diff --git a/.b4-config b/.b4-config new file mode 100644 index 00000000000..126f503ded7 --- /dev/null +++ b/.b4-config @@ -0,0 +1,13 @@ +# +# Common b4 settings that can be used to send patches to QEMU upstream. +# https://b4.docs.kernel.org/ +# + +[b4] + send-series-to = qemu-devel@nongnu.org + send-auto-to-cmd = echo + send-auto-cc-cmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback + am-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback - + prep-perpatch-check-cmd = scripts/checkpatch.pl -q --terse --no-summary --mailback - + searchmask = https://lore.kernel.org/qemu-devel/?x=m&t=1&q=%s + linkmask = https://lore.kernel.org/qemu-devel/%s diff --git a/.editorconfig b/.editorconfig index 7303759ed7d..a04cb9054cb 100644 --- a/.editorconfig +++ b/.editorconfig @@ -47,3 +47,16 @@ emacs_mode = glsl [*.json] indent_style = space emacs_mode = python + +# by default follow QEMU's style +[*.pl] +indent_style = space +indent_size = 4 +emacs_mode = perl + +# but user kernel "style" for imported scripts +[scripts/{kernel-doc,get_maintainer.pl,checkpatch.pl}] +indent_style = tab +indent_size = 8 +emacs_mode = perl + diff --git a/.gitattributes b/.gitattributes index a217cb7bfe9..9ce7a19581a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,8 @@ *.h.inc diff=c *.m diff=objc *.py diff=python +*.rs diff=rust +*.rs.inc diff=rust +Cargo.lock diff=toml merge=binary + +*.patch -text -whitespace diff --git a/.github/workflows/build-source-package.yml b/.github/workflows/build-source-package.yml new file mode 100644 index 00000000000..dba46ced431 --- /dev/null +++ b/.github/workflows/build-source-package.yml @@ -0,0 +1,115 @@ +# Builds a Debian source package for QEMU + +name: Build Debian Source Package + +on: + push: + branches: + - 'nvidia/latest' + - 'nvidia_stable-10.1' + + # Allow manual triggering of the workflow from the GitHub UI + workflow_dispatch: + +jobs: + build-source-package: + # arm64 is the primary platform of interest for this project + runs-on: ubuntu-24.04 + + steps: + # Step 1: Check out the repository's code + - name: Checkout repository + uses: actions/checkout@v4 + with: + # Fetch all history for all tags and branches, which can be necessary + # for tools like git-buildpackage to determine version numbers. + fetch-depth: 0 + + # Step 2: Install Debian packaging tools and build dependencies + - name: Install Build Dependencies + run: | + sudo apt-get update + + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y devscripts equivs fakeroot git-buildpackage + + # Use mk-build-deps to parse debian/control, create a dummy package + # with all build dependencies, and install it. This ensures all + # required build dependencies are present. + # The --no-install-recommends flag keeps the environment minimal. + sudo mk-build-deps -i --tool="apt-get -y --no-install-recommends" + # Remove debs that this generates from the working directory after install + rm -f qemu-build-deps* + + # Step 3: Generate Upstream Tarball + - name: Generate Upstream Tarball + run: | + # Extract source name and upstream version from debian/changelog. + # The first sed command strips any epoch (e.g., "1:") from the start of the version. + # The second sed command strips the Debian revision from the end of the version string. + # dpkg-parsechangelog is in the dpkg-dev package, a dependency of devscripts. + export SOURCE_NAME=$(dpkg-parsechangelog -S Source) + export UPSTREAM_VERSION=$(dpkg-parsechangelog -S Version | sed 's/^[0-9]\+://' | sed 's/-[^-]*$//') + + git config --global user.email "robot@builder.local" + git config --global user.name "Builder Robot" + # Create the .orig.tar.gz in the parent directory. + ./debian/build-source-package-from-git.sh + + # Step 4: Upload Workspace for Debugging + - name: Upload Workspace for Debugging + uses: actions/upload-artifact@v4 + with: + name: pre-build-workspace + path: . + + # Step 5: Build the Debian source package + - name: Build Source Package + run: | + # Use debuild to create the source package. + # -S: Build a source package only (no binaries). + # -us: Do not sign the source package. + # -uc: Do not sign the .changes file. + # This command will find and use the .orig.tar.gz we just created. + current_dir=$(pwd) + cd deb + # There should only be one thing in here, which is the tarball we'll extract + # so wildcard should be OK in this instance. + ls + tar -xvf $(ls -d * | head -n 1) + ls + cd $(ls -d */ | head -n 1) + debuild -S -us -uc + cd "$current_dir" + + # Step 6: Log MD5 Checksums + - name: Log MD5 Checksums as Annotations + run: | + # Change to the parent directory where artifacts were created. + current_dir=$(pwd) + cd deb + mkdir source_package_artifacts + + # Iterate over all the generated build artifacts. + for file in *.dsc *.orig.tar.* *.debian.tar.* *.changes *source.build *source.buildinfo; do + # Check if files matching the pattern exist before processing. + if [ -f "$file" ]; then + # Calculate the MD5 sum and format it for the annotation. + # The output of md5sum is "CHECKSUM FILENAME". + checksum=$(md5sum "$file") + + # Echo the checksum as a workflow notice annotation. + # This will make it easily visible in the GitHub Actions UI. + echo "::notice title=MD5 Checksum::$checksum" + mv "$file" source_package_artifacts + fi + done + mv source_package_artifacts "$current_dir" + + # Step 7: Upload the generated source package files as artifacts + - name: Upload Debian Source Package Artifacts + uses: actions/upload-artifact@v4 + with: + # Name of the artifact bundle to be displayed in the GitHub UI + name: debian-source-package + + path: source_package_artifacts diff --git a/.gitlab-ci.d/base.yml b/.gitlab-ci.d/base.yml index bf3d8efab6a..60a24a9d14d 100644 --- a/.gitlab-ci.d/base.yml +++ b/.gitlab-ci.d/base.yml @@ -69,10 +69,6 @@ variables: - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' when: never - # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set - - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' - when: never - ############################################################# # Stage 2: fine tune execution of jobs in specific scenarios @@ -101,8 +97,8 @@ variables: when: manual allow_failure: true - # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset - - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' + # Functional jobs can be manually started in forks + - if: '$QEMU_JOB_FUNCTIONAL && $QEMU_CI_FUNCTIONAL != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM' when: manual allow_failure: true @@ -128,7 +124,7 @@ variables: when: manual # Jobs can run if any jobs they depend on were successful - - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' + - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/' when: on_success variables: QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index 844c26623d9..038c3c9540a 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -8,8 +8,11 @@ key: "$CI_JOB_NAME" when: always before_script: + - source scripts/ci/gitlab-ci-section + - section_start setup "Pre-script setup" - JOBS=$(expr $(nproc) + 1) - cat /packages.txt + - section_end setup script: - export CCACHE_BASEDIR="$(pwd)" - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" @@ -19,7 +22,9 @@ - mkdir build - cd build - ccache --zero-stats + - section_start configure "Running configure" - ../configure --enable-werror --disable-docs --enable-fdt=system + --disable-debug-info ${TARGETS:+--target-list="$TARGETS"} $CONFIGURE_ARGS || { cat config.log meson-logs/meson-log.txt && exit 1; } @@ -27,11 +32,16 @@ then pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ; fi || exit 1; + - section_end configure + - section_start build "Building QEMU" - $MAKE -j"$JOBS" + - section_end build + - section_start test "Running tests" - if test -n "$MAKE_CHECK_ARGS"; then $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ; fi + - section_end test - ccache --show-stats # We jump some hoops in common_test_job_template to avoid @@ -54,12 +64,22 @@ stage: test image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG script: + - source scripts/ci/gitlab-ci-section + - section_start buildenv "Setting up to run tests" - scripts/git-submodule.sh update roms/SLOF - - meson subprojects download $(cd build/subprojects && echo *) + - build/pyvenv/bin/meson subprojects download $(cd build/subprojects && echo *) - cd build - find . -type f -exec touch {} + # Avoid recompiling by hiding ninja with NINJA=":" - - $MAKE NINJA=":" $MAKE_CHECK_ARGS + # We also have to pre-cache the functional tests manually in this case + - if [ "x${QEMU_TEST_CACHE_DIR}" != "x" ]; then + $MAKE precache-functional ; + fi + - section_end buildenv + - section_start test "Running tests" + # doctests need all the compilation artifacts + - $MAKE NINJA=":" MTESTARGS="--no-suite doc" $MAKE_CHECK_ARGS + - section_end test .native_test_job_template: extends: .common_test_job_template @@ -72,12 +92,12 @@ reports: junit: build/meson-logs/testlog.junit.xml -.avocado_test_job_template: +.functional_test_job_template: extends: .common_test_job_template cache: key: "${CI_JOB_NAME}-cache" paths: - - ${CI_PROJECT_DIR}/avocado-cache + - ${CI_PROJECT_DIR}/functional-cache policy: pull-push artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" @@ -86,21 +106,41 @@ paths: - build/tests/results/latest/results.xml - build/tests/results/latest/test-results + - build/tests/functional/*/*/*.log reports: junit: build/tests/results/latest/results.xml before_script: - - mkdir -p ~/.config/avocado - - echo "[datadir.paths]" > ~/.config/avocado/avocado.conf - - echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']" - >> ~/.config/avocado/avocado.conf - - echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]' - >> ~/.config/avocado/avocado.conf - - if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then - du -chs ${CI_PROJECT_DIR}/avocado-cache ; - fi - - export AVOCADO_ALLOW_UNTRUSTED_CODE=1 + - export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1 + - export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache after_script: - cd build - - du -chs ${CI_PROJECT_DIR}/avocado-cache + - du -chs ${CI_PROJECT_DIR}/*-cache variables: - QEMU_JOB_AVOCADO: 1 + QEMU_JOB_FUNCTIONAL: 1 + +.wasm_build_job_template: + extends: .base_job_template + stage: build + image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG + before_script: + - source scripts/ci/gitlab-ci-section + - section_start setup "Pre-script setup" + - JOBS=$(expr $(nproc) + 1) + - section_end setup + script: + - du -sh .git + - mkdir build + - cd build + - section_start configure "Running configure" + - emconfigure ../configure --disable-docs + ${TARGETS:+--target-list="$TARGETS"} + $CONFIGURE_ARGS || + { cat config.log meson-logs/meson-log.txt && exit 1; } + - if test -n "$LD_JOBS"; + then + pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ; + fi || exit 1; + - section_end configure + - section_start build "Building QEMU" + - emmake make -j"$JOBS" + - section_end build diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index aa327824052..d888a600637 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -22,15 +22,14 @@ check-system-alpine: IMAGE: alpine MAKE_CHECK_ARGS: check-unit check-qtest -avocado-system-alpine: - extends: .avocado_test_job_template +functional-system-alpine: + extends: .functional_test_job_template needs: - job: build-system-alpine artifacts: true variables: IMAGE: alpine - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel + MAKE_CHECK_ARGS: check-functional build-system-ubuntu: extends: @@ -40,9 +39,9 @@ build-system-ubuntu: job: amd64-ubuntu2204-container variables: IMAGE: ubuntu2204 - CONFIGURE_ARGS: --enable-docs + CONFIGURE_ARGS: --enable-docs --enable-rust TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu - MAKE_CHECK_ARGS: check-build + MAKE_CHECK_ARGS: check-build check-doc check-system-ubuntu: extends: .native_test_job_template @@ -53,15 +52,14 @@ check-system-ubuntu: IMAGE: ubuntu2204 MAKE_CHECK_ARGS: check -avocado-system-ubuntu: - extends: .avocado_test_job_template +functional-system-ubuntu: + extends: .functional_test_job_template needs: - job: build-system-ubuntu artifacts: true variables: IMAGE: ubuntu2204 - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el + MAKE_CHECK_ARGS: check-functional build-system-debian: extends: @@ -71,7 +69,7 @@ build-system-debian: job: amd64-debian-container variables: IMAGE: debian - CONFIGURE_ARGS: --with-coroutine=sigaltstack + CONFIGURE_ARGS: --with-coroutine=sigaltstack --enable-rust TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu sparc-softmmu xtensa-softmmu MAKE_CHECK_ARGS: check-build @@ -85,15 +83,14 @@ check-system-debian: IMAGE: debian MAKE_CHECK_ARGS: check -avocado-system-debian: - extends: .avocado_test_job_template +functional-system-debian: + extends: .functional_test_job_template needs: - job: build-system-debian artifacts: true variables: IMAGE: debian - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa + MAKE_CHECK_ARGS: check-functional crash-test-debian: extends: .native_test_job_template @@ -115,10 +112,24 @@ build-system-fedora: job: amd64-fedora-container variables: IMAGE: fedora - CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs + CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust TARGETS: microblaze-softmmu mips-softmmu xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu - MAKE_CHECK_ARGS: check-build + MAKE_CHECK_ARGS: check-build check-doc + +build-system-fedora-rust-nightly: + extends: + - .native_build_job_template + - .native_build_artifact_template + needs: + job: amd64-fedora-rust-nightly-container + variables: + IMAGE: fedora-rust-nightly + CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints + TARGETS: aarch64-softmmu + MAKE_CHECK_ARGS: check-build check-doc + + allow_failure: true check-system-fedora: extends: .native_test_job_template @@ -129,16 +140,14 @@ check-system-fedora: IMAGE: fedora MAKE_CHECK_ARGS: check -avocado-system-fedora: - extends: .avocado_test_job_template +functional-system-fedora: + extends: .functional_test_job_template needs: - job: build-system-fedora artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k - arch:riscv32 arch:ppc arch:sparc64 + MAKE_CHECK_ARGS: check-functional crash-test-fedora: extends: .native_test_job_template @@ -174,12 +183,11 @@ build-previous-qemu: when: on_success expire_in: 2 days paths: - - build-previous - exclude: - - build-previous/**/*.p - - build-previous/**/*.a.p - - build-previous/**/*.c.o - - build-previous/**/*.c.o.d + - build-previous/qemu-bundle + - build-previous/qemu-system-aarch64 + - build-previous/qemu-system-x86_64 + - build-previous/tests/qtest/migration-test + - build-previous/scripts needs: job: amd64-opensuse-leap-container variables: @@ -188,6 +196,12 @@ build-previous-qemu: # Override the default flags as we need more to grab the old version GIT_FETCH_EXTRA_FLAGS: --prune --quiet before_script: + - source scripts/ci/gitlab-ci-section + # Skip if this series contains the release bump commit. During the + # release process there might be a window of commits when the + # version tag is not yet present in the remote and git fetch would + # fail. + - if grep -q "\.0$" VERSION; then exit 0; fi - export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)" - git remote add upstream https://gitlab.com/qemu-project/qemu - git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION @@ -208,6 +222,9 @@ build-previous-qemu: IMAGE: opensuse-leap MAKE_CHECK_ARGS: check-build script: + # Skip for round release numbers, this job is only relevant for + # testing a development tree. + - if grep -q "\.0$" VERSION; then exit 0; fi # Use the migration-tests from the older QEMU tree. This avoids # testing an old QEMU against new features/tests that it is not # compatible with. @@ -243,16 +260,14 @@ check-system-centos: IMAGE: centos9 MAKE_CHECK_ARGS: check -avocado-system-centos: - extends: .avocado_test_job_template +functional-system-centos: + extends: .functional_test_job_template needs: - job: build-system-centos artifacts: true variables: IMAGE: centos9 - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx - arch:sh4 + MAKE_CHECK_ARGS: check-functional build-system-opensuse: extends: @@ -274,15 +289,14 @@ check-system-opensuse: IMAGE: opensuse-leap MAKE_CHECK_ARGS: check -avocado-system-opensuse: - extends: .avocado_test_job_template +functional-system-opensuse: + extends: .functional_test_job_template needs: - job: build-system-opensuse artifacts: true variables: IMAGE: opensuse-leap - MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64 + MAKE_CHECK_ARGS: check-functional # # Flaky tests. We don't run these by default and they are allow fail @@ -302,18 +316,17 @@ build-system-flaky: ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu MAKE_CHECK_ARGS: check-build -avocado-system-flaky: - extends: .avocado_test_job_template +functional-system-flaky: + extends: .functional_test_job_template needs: - job: build-system-flaky artifacts: true allow_failure: true variables: IMAGE: debian - MAKE_CHECK_ARGS: check-avocado + MAKE_CHECK_ARGS: check-functional QEMU_JOB_OPTIONAL: 1 QEMU_TEST_FLAKY_TESTS: 1 - AVOCADO_TAGS: flaky # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by # the configure script. The container doesn't contain Xen headers so @@ -345,6 +358,8 @@ build-tcg-disabled: 124 132 139 142 144 145 151 152 155 157 165 194 196 200 202 208 209 216 218 227 234 246 247 248 250 254 255 257 258 260 261 262 263 264 270 272 273 277 279 image-fleecing + - cd ../.. + - make distclean build-user: extends: .native_build_job_template @@ -428,9 +443,8 @@ clang-system: job: amd64-fedora-container variables: IMAGE: fedora - CONFIGURE_ARGS: --cc=clang --cxx=clang++ - --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined - --extra-cflags=-fno-sanitize=function + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan + --extra-cflags=-fno-sanitize-recover=undefined TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu MAKE_CHECK_ARGS: check-qtest check-tcg @@ -441,10 +455,9 @@ clang-user: timeout: 70m variables: IMAGE: debian-all-test-cross - CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system --enable-ubsan --target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user - --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined - --extra-cflags=-fno-sanitize=function + --extra-cflags=-fno-sanitize-recover=undefined MAKE_CHECK_ARGS: check-unit check-tcg # Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory. @@ -454,8 +467,8 @@ clang-user: # Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with # CFI builds, and thus have to disable it here. # -# Split in three sets of build/check/avocado to limit the execution time of each -# job +# Split in three sets of build/check/functional to limit the execution time +# of each job build-cfi-aarch64: extends: - .native_build_job_template @@ -485,14 +498,14 @@ check-cfi-aarch64: IMAGE: fedora MAKE_CHECK_ARGS: check -avocado-cfi-aarch64: - extends: .avocado_test_job_template +functional-cfi-aarch64: + extends: .functional_test_job_template needs: - job: build-cfi-aarch64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-avocado + MAKE_CHECK_ARGS: check-functional build-cfi-ppc64-s390x: extends: @@ -523,14 +536,14 @@ check-cfi-ppc64-s390x: IMAGE: fedora MAKE_CHECK_ARGS: check -avocado-cfi-ppc64-s390x: - extends: .avocado_test_job_template +functional-cfi-ppc64-s390x: + extends: .functional_test_job_template needs: - job: build-cfi-ppc64-s390x artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-avocado + MAKE_CHECK_ARGS: check-functional build-cfi-x86_64: extends: @@ -557,14 +570,14 @@ check-cfi-x86_64: IMAGE: fedora MAKE_CHECK_ARGS: check -avocado-cfi-x86_64: - extends: .avocado_test_job_template +functional-cfi-x86_64: + extends: .functional_test_job_template needs: - job: build-cfi-x86_64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-avocado + MAKE_CHECK_ARGS: check-functional tsan-build: extends: .native_build_job_template @@ -619,12 +632,15 @@ build-oss-fuzz: - CC="clang" CXX="clang++" CFLAGS="-fsanitize=address" ./scripts/oss-fuzz/build.sh - export ASAN_OPTIONS="fast_unwind_on_malloc=0" + - failures=0 - for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f | grep -v slirp); do grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ; echo Testing ${fuzzer} ... ; - "${fuzzer}" -runs=1 -seed=1 || exit 1 ; + "${fuzzer}" -runs=1 -seed=1 || { echo "FAILED:"" ${fuzzer} exit code is $?"; failures=$(($failures+1)); }; done + - echo "Number of failures:"" $failures" + - test $failures = 0 build-tci: extends: .native_build_job_template @@ -651,9 +667,6 @@ build-tci: - make check-tcg # Check our reduced build configurations -# requires libfdt: aarch64, arm, loongarch64, microblaze, microblazeel, -# or1k, ppc64, riscv32, riscv64, rx -# fails qtest without boards: i386, x86_64 build-without-defaults: extends: .native_build_job_template needs: @@ -667,11 +680,7 @@ build-without-defaults: --disable-pie --disable-qom-cast-debug --disable-strip - TARGETS: alpha-softmmu avr-softmmu cris-softmmu hppa-softmmu m68k-softmmu - mips-softmmu mips64-softmmu mipsel-softmmu mips64el-softmmu - ppc-softmmu s390x-softmmu sh4-softmmu sh4eb-softmmu sparc-softmmu - sparc64-softmmu tricore-softmmu xtensa-softmmu xtensaeb-softmmu - hexagon-linux-user i386-linux-user s390x-linux-user + --target-list-exclude=aarch64-softmmu,microblaze-softmmu,mips64-softmmu,mipsel-softmmu,ppc64-softmmu,sh4el-softmmu,xtensa-softmmu,x86_64-softmmu MAKE_CHECK_ARGS: check build-libvhost-user: @@ -777,3 +786,12 @@ coverity: when: never # Always manual on forks even if $QEMU_CI == "2" - when: manual + +build-wasm: + extends: .wasm_build_job_template + timeout: 2h + needs: + job: wasm-emsdk-cross-container + variables: + IMAGE: emsdk-wasm32-cross + CONFIGURE_ARGS: --static --disable-tools --enable-debug --enable-tcg-interpreter diff --git a/.gitlab-ci.d/check-dco.py b/.gitlab-ci.d/check-dco.py index 632c8bcce87..2fd56683dc6 100755 --- a/.gitlab-ci.d/check-dco.py +++ b/.gitlab-ci.d/check-dco.py @@ -19,10 +19,9 @@ reponame = os.path.basename(cwd) repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame) +print(f"adding upstream git repo @ {repourl}") subprocess.check_call(["git", "remote", "add", "check-dco", repourl]) -subprocess.check_call(["git", "fetch", "check-dco", "master"], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) +subprocess.check_call(["git", "fetch", "--refetch", "check-dco", "master"]) ancestor = subprocess.check_output(["git", "merge-base", "check-dco/master", "HEAD"], @@ -79,7 +78,10 @@ To indicate acceptance of the DCO every commit must have a tag - Signed-off-by: REAL NAME + Signed-off-by: YOUR NAME + +where "YOUR NAME" is your commonly known identity in the context +of the community. This can be achieved by passing the "-s" flag to the "git commit" command. diff --git a/.gitlab-ci.d/check-patch.py b/.gitlab-ci.d/check-patch.py index 39e2b403c9e..be13e6f77d7 100755 --- a/.gitlab-ci.d/check-patch.py +++ b/.gitlab-ci.d/check-patch.py @@ -19,13 +19,12 @@ reponame = os.path.basename(cwd) repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame) +print(f"adding upstream git repo @ {repourl}") # GitLab CI environment does not give us any direct info about the # base for the user's branch. We thus need to figure out a common # ancestor between the user's branch and current git master. subprocess.check_call(["git", "remote", "add", "check-patch", repourl]) -subprocess.check_call(["git", "fetch", "check-patch", "master"], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) +subprocess.check_call(["git", "fetch", "--refetch", "check-patch", "master"]) ancestor = subprocess.check_output(["git", "merge-base", "check-patch/master", "HEAD"], diff --git a/.gitlab-ci.d/check-units.py b/.gitlab-ci.d/check-units.py new file mode 100755 index 00000000000..cebef0e8be5 --- /dev/null +++ b/.gitlab-ci.d/check-units.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# check-units.py: check the number of compilation units and identify +# those that are rebuilt multiple times +# +# Copyright (C) 2025 Linaro Ltd. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from os import access, R_OK, path +from sys import exit +import json +import argparse +from pathlib import Path +from collections import Counter + + +def extract_build_units(cc_path): + """ + Extract the build units and their counds from compile_commands.json file. + + Returns: + Hash table of ["unit"] = count + """ + + j = json.load(open(cc_path, 'r')) + files = [f['file'] for f in j] + build_units = Counter(files) + + return build_units + + +def analyse_units(build_units, top_n): + """ + Analyse the build units and report stats and the top 10 rebuilds + """ + + print(f"Total source files: {len(build_units.keys())}") + print(f"Total build units: {sum(units.values())}") + + # Create a sorted list by number of rebuilds + sorted_build_units = sorted(build_units.items(), + key=lambda item: item[1], + reverse=True) + + print("Most rebuilt units:") + for unit, count in sorted_build_units[:top_n]: + print(f" {unit} built {count} times") + + print("Least rebuilt units:") + for unit, count in sorted_build_units[-10:]: + print(f" {unit} built {count} times") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="analyse number of build units in compile_commands.json") + parser.add_argument("cc_path", type=Path, default=None, + help="Path to compile_commands.json") + parser.add_argument("-n", type=int, default=20, + help="Dump the top entries") + + args = parser.parse_args() + + if path.isfile(args.cc_path) and access(args.cc_path, R_OK): + units = extract_build_units(args.cc_path) + analyse_units(units, args.n) + exit(0) + else: + print(f"{args.cc_path} doesn't exist or isn't readable") + exit(1) diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 75df1273bc5..75b611418e7 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -15,44 +15,29 @@ stage: build image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest needs: [] + allow_failure: + exit_codes: 3 # 20 mins larger than "timeout_in" in cirrus/build.yml # as there's often a 5-10 minute delay before Cirrus CI # actually starts the task timeout: 80m script: + - set -o allexport - source .gitlab-ci.d/cirrus/$NAME.vars - - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g" - -e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g" - -e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g" - -e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g" - -e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g" - -e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g" - -e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g" - -e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g" - -e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g" - -e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g" - -e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g" - -e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g" - -e "s|[@]PKGS@|$PKGS|g" - -e "s|[@]MAKE@|$MAKE|g" - -e "s|[@]PYTHON@|$PYTHON|g" - -e "s|[@]PIP3@|$PIP3|g" - -e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g" - -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g" - -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g" - <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml + - set +o allexport + - cirrus-vars <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml - cat .gitlab-ci.d/cirrus/$NAME.yml - cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml variables: QEMU_JOB_CIRRUS: 1 -x64-freebsd-13-build: +x64-freebsd-14-build: extends: .cirrus_build_job variables: - NAME: freebsd-13 + NAME: freebsd-14 CIRRUS_VM_INSTANCE_TYPE: freebsd_instance CIRRUS_VM_IMAGE_SELECTOR: image_family - CIRRUS_VM_IMAGE_NAME: freebsd-13-3 + CIRRUS_VM_IMAGE_NAME: freebsd-14-2 CIRRUS_VM_CPUS: 8 CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update; pkg upgrade -y @@ -60,34 +45,16 @@ x64-freebsd-13-build: CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu TEST_TARGETS: check -aarch64-macos-13-base-build: - extends: .cirrus_build_job - variables: - NAME: macos-13 - CIRRUS_VM_INSTANCE_TYPE: macos_instance - CIRRUS_VM_IMAGE_SELECTOR: image - CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-ventura-base:latest - CIRRUS_VM_CPUS: 12 - CIRRUS_VM_RAM: 24G - UPDATE_COMMAND: brew update - INSTALL_COMMAND: brew install - PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin - PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig - CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu - TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64 - -aarch64-macos-14-base-build: +aarch64-macos-build: extends: .cirrus_build_job variables: NAME: macos-14 CIRRUS_VM_INSTANCE_TYPE: macos_instance CIRRUS_VM_IMAGE_SELECTOR: image - CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-sonoma-base:latest - CIRRUS_VM_CPUS: 12 - CIRRUS_VM_RAM: 24G + CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-runner:sonoma UPDATE_COMMAND: brew update INSTALL_COMMAND: brew install PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig + CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64 - QEMU_JOB_OPTIONAL: 1 diff --git a/.gitlab-ci.d/cirrus/build.yml b/.gitlab-ci.d/cirrus/build.yml index 43dd52dd19e..41abd0b31ac 100644 --- a/.gitlab-ci.d/cirrus/build.yml +++ b/.gitlab-ci.d/cirrus/build.yml @@ -8,7 +8,7 @@ env: CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@" CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@" CI_COMMIT_SHA: "@CI_COMMIT_SHA@" - PATH: "@PATH@" + PATH: "@PATH_EXTRA@:$PATH" PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@" PYTHON: "@PYTHON@" MAKE: "@MAKE@" @@ -26,7 +26,7 @@ build_task: - git clone --depth 100 "$CI_REPOSITORY_URL" . - git fetch origin "$CI_COMMIT_REF_NAME" - git reset --hard "$CI_COMMIT_SHA" - build_script: + step_script: - mkdir build - cd build - ../configure --enable-werror $CONFIGURE_ARGS diff --git a/.gitlab-ci.d/cirrus/freebsd-13.vars b/.gitlab-ci.d/cirrus/freebsd-13.vars deleted file mode 100644 index 29ab9645f98..00000000000 --- a/.gitlab-ci.d/cirrus/freebsd-13.vars +++ /dev/null @@ -1,16 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool variables freebsd-13 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -CCACHE='/usr/local/bin/ccache' -CPAN_PKGS='' -CROSS_PKGS='' -MAKE='/usr/local/bin/gmake' -NINJA='/usr/local/bin/ninja' -PACKAGING_COMMAND='pkg' -PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-sphinx py311-sphinx_rtd_theme py311-tomli py311-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd' -PYPI_PKGS='' -PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/freebsd-14.vars b/.gitlab-ci.d/cirrus/freebsd-14.vars new file mode 100644 index 00000000000..19ca0d36638 --- /dev/null +++ b/.gitlab-ci.d/cirrus/freebsd-14.vars @@ -0,0 +1,16 @@ +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool variables freebsd-14 qemu +# +# https://gitlab.com/libvirt/libvirt-ci + +CCACHE='/usr/local/bin/ccache' +CPAN_PKGS='' +CROSS_PKGS='' +MAKE='/usr/local/bin/gmake' +NINJA='/usr/local/bin/ninja' +PACKAGING_COMMAND='pkg' +PIP3='/usr/local/bin/pip' +PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache4 cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-pyyaml py311-sphinx py311-sphinx_rtd_theme py311-tomli python3 rpm2cpio rust rust-bindgen-cli sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 vulkan-tools xorriso zstd' +PYPI_PKGS='' +PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/macos-13.vars b/.gitlab-ci.d/cirrus/macos-13.vars deleted file mode 100644 index 534f0299560..00000000000 --- a/.gitlab-ci.d/cirrus/macos-13.vars +++ /dev/null @@ -1,16 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool variables macos-13 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -CCACHE='/opt/homebrew/bin/ccache' -CPAN_PKGS='' -CROSS_PKGS='' -MAKE='/opt/homebrew/bin/gmake' -NINJA='/opt/homebrew/bin/ninja' -PACKAGING_COMMAND='brew' -PIP3='/opt/homebrew/bin/pip3' -PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd' -PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli' -PYTHON='/opt/homebrew/bin/python3' diff --git a/.gitlab-ci.d/cirrus/macos-14.vars b/.gitlab-ci.d/cirrus/macos-14.vars index 43070f4a265..b039465f56f 100644 --- a/.gitlab-ci.d/cirrus/macos-14.vars +++ b/.gitlab-ci.d/cirrus/macos-14.vars @@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake' NINJA='/opt/homebrew/bin/ninja' PACKAGING_COMMAND='brew' PIP3='/opt/homebrew/bin/pip3' -PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd' +PKGS='bash bc bindgen bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libcbor libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio rust sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 vulkan-tools xorriso zlib zstd' PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli' PYTHON='/opt/homebrew/bin/python3' diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml index e3103940a0e..8d3be53b75b 100644 --- a/.gitlab-ci.d/container-cross.yml +++ b/.gitlab-ci.d/container-cross.yml @@ -22,12 +22,6 @@ arm64-debian-cross-container: variables: NAME: debian-arm64-cross -armel-debian-cross-container: - extends: .container_job_template - stage: containers - variables: - NAME: debian-armel-cross - armhf-debian-cross-container: extends: .container_job_template stage: containers @@ -73,11 +67,8 @@ ppc64el-debian-cross-container: riscv64-debian-cross-container: extends: .container_job_template stage: containers - # as we are currently based on 'sid/unstable' we may break so... - allow_failure: true variables: NAME: debian-riscv64-cross - QEMU_JOB_OPTIONAL: 1 s390x-debian-cross-container: extends: .container_job_template @@ -96,12 +87,12 @@ xtensa-debian-cross-container: variables: NAME: debian-xtensa-cross -cris-fedora-cross-container: +win64-fedora-cross-container: extends: .container_job_template variables: - NAME: fedora-cris-cross + NAME: fedora-win64-cross -win64-fedora-cross-container: +wasm-emsdk-cross-container: extends: .container_job_template variables: - NAME: fedora-win64-cross + NAME: emsdk-wasm32-cross diff --git a/.gitlab-ci.d/containers.yml b/.gitlab-ci.d/containers.yml index ae79d4c58bc..db9b4d5e57f 100644 --- a/.gitlab-ci.d/containers.yml +++ b/.gitlab-ci.d/containers.yml @@ -27,3 +27,9 @@ python-container: extends: .container_job_template variables: NAME: python + +amd64-fedora-rust-nightly-container: + extends: .container_job_template + variables: + NAME: fedora-rust-nightly + allow_failure: true diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml index 53051ec793c..303943f818f 100644 --- a/.gitlab-ci.d/crossbuild-template.yml +++ b/.gitlab-ci.d/crossbuild-template.yml @@ -9,7 +9,11 @@ when: always timeout: 80m before_script: + - source scripts/ci/gitlab-ci-section + - section_start setup "Pre-script setup" + - JOBS=$(expr $(nproc) + 1) - cat /packages.txt + - section_end setup script: - export CCACHE_BASEDIR="$(pwd)" - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" @@ -18,18 +22,30 @@ - mkdir build - cd build - ccache --zero-stats + - section_start configure "Running configure" - ../configure --enable-werror --disable-docs --enable-fdt=system --disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS - --target-list-exclude="arm-softmmu cris-softmmu + --target-list-exclude="arm-softmmu i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS" - - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS + - section_end configure + - section_start build "Building QEMU" + - make -j"$JOBS" all check-build + - section_end build + - section_start test "Running tests" + - if test -n "$MAKE_CHECK_ARGS"; + then + $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ; + fi + - section_end test + - section_start installer "Building the installer" - if grep -q "EXESUF=.exe" config-host.mak; then make installer; version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)"; mv -v qemu-setup*.exe qemu-setup-${version}.exe; fi + - section_end installer - ccache --show-stats # Job to cross-build specific accelerators. @@ -41,11 +57,14 @@ extends: .base_job_template stage: build image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG - timeout: 30m + timeout: 60m cache: paths: - ccache/ key: "$CI_JOB_NAME" + before_script: + - source scripts/ci/gitlab-ci-section + - JOBS=$(expr $(nproc) + 1) script: - export CCACHE_BASEDIR="$(pwd)" - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" @@ -53,9 +72,19 @@ - export PATH="$CCACHE_WRAPPERSDIR:$PATH" - mkdir build - cd build + - section_start configure "Running configure" - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS --disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS - - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS + - section_end configure + - section_start build "Building QEMU" + - make -j"$JOBS" all check-build + - section_end build + - section_start test "Running tests" + - if test -n "$MAKE_CHECK_ARGS"; + then + $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ; + fi + - section_end test .cross_user_build_job: extends: .base_job_template @@ -65,18 +94,31 @@ paths: - ccache/ key: "$CI_JOB_NAME" + before_script: + - source scripts/ci/gitlab-ci-section + - JOBS=$(expr $(nproc) + 1) script: - export CCACHE_BASEDIR="$(pwd)" - export CCACHE_DIR="$CCACHE_BASEDIR/ccache" - export CCACHE_MAXSIZE="500M" - mkdir build - cd build + - section_start configure "Running configure" - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS --disable-system --target-list-exclude="aarch64_be-linux-user - alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user + alpha-linux-user m68k-linux-user microblazeel-linux-user or1k-linux-user ppc-linux-user sparc-linux-user xtensa-linux-user $CROSS_SKIP_TARGETS" - - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS + - section_end configure + - section_start build "Building QEMU" + - make -j"$JOBS" all check-build + - section_end build + - section_start test "Running tests" + - if test -n "$MAKE_CHECK_ARGS"; + then + $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ; + fi + - section_end test # We can still run some tests on some of our cross build jobs. They can add this # template to their extends to save the build logs and test results diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index cb499e4ee0d..3f76c901ba8 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -1,13 +1,6 @@ include: - local: '/.gitlab-ci.d/crossbuild-template.yml' -cross-armel-user: - extends: .cross_user_build_job - needs: - job: armel-debian-cross-container - variables: - IMAGE: debian-armel-cross - cross-armhf-user: extends: .cross_user_build_job needs: @@ -68,8 +61,12 @@ cross-i686-tci: variables: IMAGE: debian-i686-cross ACCEL: tcg-interpreter - EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm - MAKE_CHECK_ARGS: check check-tcg + EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,arm-softmmu,arm-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm + # Force tests to run with reduced parallelism, to see whether this + # reduces the flakiness of this CI job. The CI + # environment by default shows us 8 CPUs and so we + # would otherwise be using a parallelism of 9. + MAKE_CHECK_ARGS: check check-tcg -j2 cross-mipsel-system: extends: .cross_system_build_job @@ -121,12 +118,8 @@ cross-ppc64el-kvm-only: IMAGE: debian-ppc64el-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices -# The riscv64 cross-builds currently use a 'sid' container to get -# compilers and libraries. Until something more stable is found we -# allow_failure so as not to block CI. cross-riscv64-system: extends: .cross_system_build_job - allow_failure: true needs: job: riscv64-debian-cross-container variables: @@ -134,7 +127,6 @@ cross-riscv64-system: cross-riscv64-user: extends: .cross_user_build_job - allow_failure: true needs: job: riscv64-debian-cross-container variables: diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml index 263a3c2140e..ca2f1404710 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml @@ -103,7 +103,7 @@ ubuntu-22.04-aarch64-clang: script: - mkdir build - cd build - - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers + - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan || { cat config.log meson-logs/meson-log.txt; exit 1; } - make --output-sync -j`nproc --ignore=40` - make --output-sync -j`nproc --ignore=40` check diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml index 69ddd3e7d57..e62ff1763fa 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml @@ -31,7 +31,9 @@ ubuntu-22.04-s390x-all-system: timeout: 75m rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + allow_failure: true - if: "$S390X_RUNNER_AVAILABLE" + allow_failure: true script: - mkdir build - cd build @@ -80,7 +82,7 @@ ubuntu-22.04-s390x-clang: script: - mkdir build - cd build - - ../configure --cc=clang --cxx=clang++ --enable-sanitizers + - ../configure --cc=clang --cxx=clang++ --enable-ubsan || { cat config.log meson-logs/meson-log.txt; exit 1; } - make --output-sync -j`nproc` - make --output-sync -j`nproc` check diff --git a/.gitlab-ci.d/static_checks.yml b/.gitlab-ci.d/static_checks.yml index ad9f426a52f..c3ed6de453d 100644 --- a/.gitlab-ci.d/static_checks.yml +++ b/.gitlab-ci.d/static_checks.yml @@ -46,3 +46,49 @@ check-python-tox: QEMU_JOB_OPTIONAL: 1 needs: job: python-container + +check-rust-tools-nightly: + extends: .base_job_template + stage: test + image: $CI_REGISTRY_IMAGE/qemu/fedora-rust-nightly:$QEMU_CI_CONTAINER_TAG + script: + - source scripts/ci/gitlab-ci-section + - section_start test "Running Rust code checks" + - cd build + - pyvenv/bin/meson devenv -w ../rust ${CARGO-cargo} fmt --check + - make clippy + - make rustdoc + - section_end test + variables: + GIT_DEPTH: 1 + allow_failure: true + needs: + - job: build-system-fedora-rust-nightly + artifacts: true + artifacts: + when: on_success + expire_in: 2 days + paths: + - rust/target/doc + +check-build-units: + extends: .base_job_template + stage: build + image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG + needs: + job: amd64-debian-container + before_script: + - source scripts/ci/gitlab-ci-section + - section_start setup "Install Tools" + - apt install --assume-yes --no-install-recommends jq + - section_end setup + script: + - mkdir build + - cd build + - section_start configure "Running configure" + - ../configure + - cd .. + - section_end configure + - section_start analyse "Analyse" + - .gitlab-ci.d/check-units.py build/compile_commands.json + - section_end analyse diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index a83f23a786e..45ed0c96fea 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -17,12 +17,7 @@ msys2-64bit: # This feature doesn't (currently) work with PowerShell, it stops # the echo'ing of commands being run and doesn't show any timing FF_SCRIPT_SECTIONS: 0 - # do not remove "--without-default-devices"! - # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices" - # changed to compile QEMU with the --without-default-devices switch - # for this job, because otherwise the build could not complete within - # the project timeout. - CONFIGURE_ARGS: --target-list=sparc-softmmu --without-default-devices -Ddebug=false -Doptimization=0 + CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0 # The Windows git is a bit older so override the default GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet artifacts: @@ -81,35 +76,19 @@ msys2-64bit: bison diffutils flex git grep make sed mingw-w64-x86_64-binutils - mingw-w64-x86_64-capstone mingw-w64-x86_64-ccache mingw-w64-x86_64-curl - mingw-w64-x86_64-cyrus-sasl - mingw-w64-x86_64-dtc mingw-w64-x86_64-gcc mingw-w64-x86_64-glib2 - mingw-w64-x86_64-gnutls - mingw-w64-x86_64-gtk3 - mingw-w64-x86_64-libgcrypt - mingw-w64-x86_64-libjpeg-turbo mingw-w64-x86_64-libnfs - mingw-w64-x86_64-libpng mingw-w64-x86_64-libssh - mingw-w64-x86_64-libtasn1 - mingw-w64-x86_64-libusb - mingw-w64-x86_64-lzo2 - mingw-w64-x86_64-nettle mingw-w64-x86_64-ninja mingw-w64-x86_64-pixman mingw-w64-x86_64-pkgconf mingw-w64-x86_64-python - mingw-w64-x86_64-SDL2 - mingw-w64-x86_64-SDL2_image - mingw-w64-x86_64-snappy - mingw-w64-x86_64-spice - mingw-w64-x86_64-usbredir mingw-w64-x86_64-zstd" - Write-Output "Running build at $(Get-Date -Format u)" + - $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc) - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink - $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR" @@ -120,8 +99,8 @@ msys2-64bit: - mkdir build - cd build - ..\msys64\usr\bin\bash -lc "ccache --zero-stats" - - ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS" - - ..\msys64\usr\bin\bash -lc "make" + - ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS" + - ..\msys64\usr\bin\bash -lc "make -j$env:JOBS" - ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;" - ..\msys64\usr\bin\bash -lc "ccache --show-stats" - Write-Output "Finished build at $(Get-Date -Format u)" diff --git a/.mailmap b/.mailmap index ef1b8a53f44..e7271852dc6 100644 --- a/.mailmap +++ b/.mailmap @@ -67,6 +67,8 @@ Andrey Drobyshev Andrey Drobyshev via BALATON Zoltan via # Next, replace old addresses by a more recent one. +Akihiko Odaki +Akihiko Odaki Aleksandar Markovic Aleksandar Markovic Aleksandar Markovic @@ -75,6 +77,8 @@ Aleksandar Rikalo Alexander Graf Ani Sinha Anthony Liguori Anthony Liguori +Brian Cain +Brian Cain Christian Borntraeger Damien Hedde Filip Bozuta @@ -85,8 +89,9 @@ Huacai Chen Huacai Chen James Hogan Juan Quintela -Leif Lindholm -Leif Lindholm +Leif Lindholm +Leif Lindholm +Leif Lindholm Luc Michel Luc Michel Luc Michel diff --git a/.readthedocs.yml b/.readthedocs.yml index 0b262469ce6..639f628612c 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -21,5 +21,3 @@ python: install: - requirements: docs/requirements.txt -# We want all the document formats -formats: all diff --git a/.travis.yml b/.travis.yml index 8fc1ae0cf22..0a634d7b4a0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -79,41 +79,6 @@ after_script: jobs: include: - - name: "[aarch64] GCC check-tcg" - arch: arm64 - addons: - apt_packages: - - libaio-dev - - libattr1-dev - - libbrlapi-dev - - libcacard-dev - - libcap-ng-dev - - libfdt-dev - - libgcrypt20-dev - - libgnutls28-dev - - libgtk-3-dev - - libiscsi-dev - - liblttng-ust-dev - - libncurses5-dev - - libnfs-dev - - libpixman-1-dev - - libpng-dev - - librados-dev - - libsdl2-dev - - libseccomp-dev - - liburcu-dev - - libusb-1.0-0-dev - - libvdeplug-dev - - libvte-2.91-dev - - ninja-build - - python3-tomli - # Tests dependencies - - genisoimage - env: - - TEST_CMD="make check check-tcg V=1" - - CONFIG="--disable-containers --enable-fdt=system - --target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false" - - name: "[ppc64] Clang check-tcg" arch: ppc64le compiler: clang diff --git a/COPYING b/COPYING index 00ccfbb6288..8095135d50d 100644 --- a/COPYING +++ b/COPYING @@ -2,7 +2,7 @@ Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -304,8 +304,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + with this program; if not, see . Also add information on how to contact you by electronic and paper mail. diff --git a/COPYING.LIB b/COPYING.LIB index 4362b49151d..99f47575b5b 100644 --- a/COPYING.LIB +++ b/COPYING.LIB @@ -2,7 +2,7 @@ Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -484,8 +484,7 @@ convey the exclusion of warranty; and each file should have at least the Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + License along with this library; if not, see . Also add information on how to contact you by electronic and paper mail. diff --git a/Kconfig b/Kconfig index fb6a24a2de8..63ca7f46df7 100644 --- a/Kconfig +++ b/Kconfig @@ -4,3 +4,4 @@ source accel/Kconfig source target/Kconfig source hw/Kconfig source semihosting/Kconfig +source rust/Kconfig diff --git a/Kconfig.host b/Kconfig.host index 17f405004b3..933425c74b4 100644 --- a/Kconfig.host +++ b/Kconfig.host @@ -5,6 +5,12 @@ config LINUX bool +config LIBCBOR + bool + +config GNUTLS + bool + config OPENGL bool @@ -52,3 +58,9 @@ config VFIO_USER_SERVER_ALLOWED config HV_BALLOON_POSSIBLE bool + +config HAVE_RUST + bool + +config MAC_PVG + bool diff --git a/MAINTAINERS b/MAINTAINERS index e34c2bd4cda..54fb8781280 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -72,11 +72,14 @@ R: Markus Armbruster R: Philippe Mathieu-Daudé W: https://www.qemu.org/docs/master/devel/index.html S: Odd Fixes -F: docs/devel/style.rst +F: docs/devel/build-environment.rst F: docs/devel/code-of-conduct.rst +F: docs/devel/codebase.rst F: docs/devel/conflict-resolution.rst +F: docs/devel/style.rst F: docs/devel/submitting-a-patch.rst F: docs/devel/submitting-a-pull-request.rst +F: docs/glossary.rst Responsible Disclosure, Reporting Security Issues ------------------------------------------------- @@ -109,6 +112,7 @@ F: hw/intc/s390_flic.c F: hw/intc/s390_flic_kvm.c F: hw/s390x/ F: hw/vfio/ap.c +F: hw/s390x/ap-stub.c F: hw/vfio/ccw.c F: hw/watchdog/wdt_diag288.c F: include/hw/s390x/ @@ -118,7 +122,7 @@ F: pc-bios/s390-ccw.img F: target/s390x/ F: docs/system/target-s390x.rst F: docs/system/s390x/ -F: tests/migration/s390x/ +F: tests/qtest/migration/s390x/ K: ^Subject:.*(?i)s390x? L: qemu-s390x@nongnu.org @@ -132,6 +136,7 @@ F: configs/targets/mips* X86 general architecture support M: Paolo Bonzini +R: Zhao Liu S: Maintained F: configs/devices/i386-softmmu/default.mak F: configs/targets/i386-softmmu.mak @@ -148,10 +153,7 @@ Overall TCG CPUs M: Richard Henderson R: Paolo Bonzini S: Maintained -F: system/cpus.c F: system/watchpoint.c -F: cpu-common.c -F: cpu-target.c F: page-vary-target.c F: page-vary-common.c F: accel/tcg/ @@ -161,17 +163,13 @@ F: util/cacheflush.c F: scripts/decodetree.py F: docs/devel/decodetree.rst F: docs/devel/tcg* -F: include/exec/cpu*.h -F: include/exec/exec-all.h F: include/exec/tb-flush.h -F: include/exec/target_long.h F: include/exec/helper*.h F: include/exec/helper*.h.inc F: include/exec/helper-info.c.inc F: include/exec/page-protection.h -F: include/sysemu/cpus.h -F: include/sysemu/tcg.h -F: include/hw/core/tcg-cpu-ops.h +F: include/system/tcg.h +F: include/accel/tcg/ F: host/include/*/host/cpuinfo.h F: util/cpuinfo-*.c F: include/tcg/ @@ -214,7 +212,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/smmu* F: include/hw/arm/smmu* -F: tests/avocado/smmu.py +F: tests/functional/test_aarch64_smmu.py AVR TCG CPUs M: Michael Rolnik @@ -222,19 +220,10 @@ S: Maintained F: docs/system/target-avr.rst F: gdb-xml/avr-cpu.xml F: target/avr/ -F: tests/avocado/machine_avr6.py - -CRIS TCG CPUs -M: Edgar E. Iglesias -S: Maintained -F: target/cris/ -F: hw/cris/ -F: include/hw/cris/ -F: tests/tcg/cris/ -F: disas/cris.c +F: tests/functional/test_avr_*.py Hexagon TCG CPUs -M: Brian Cain +M: Brian Cain S: Supported F: target/hexagon/ X: target/hexagon/idef-parser/ @@ -245,6 +234,7 @@ F: disas/hexagon.c F: configs/targets/hexagon-linux-user/default.mak F: docker/dockerfiles/debian-hexagon-cross.docker F: gdb-xml/hexagon*.xml +T: git https://github.com/quic/qemu.git hex-next Hexagon idef-parser M: Alessandro Di Federico @@ -255,6 +245,7 @@ F: target/hexagon/gen_idef_parser_funcs.py HPPA (PA-RISC) TCG CPUs M: Richard Henderson +M: Helge Deller S: Maintained F: target/hppa/ F: disas/hppa.c @@ -265,7 +256,7 @@ M: Song Gao S: Maintained F: target/loongarch/ F: tests/tcg/loongarch64/ -F: tests/avocado/machine_loongarch.py +F: tests/functional/test_loongarch64_virt.py M68K TCG CPUs M: Laurent Vivier @@ -304,7 +295,7 @@ F: tests/tcg/openrisc/ PowerPC TCG CPUs M: Nicholas Piggin -M: Daniel Henrique Barboza +R: Chinmay Rath L: qemu-ppc@nongnu.org S: Odd Fixes F: target/ppc/ @@ -317,11 +308,11 @@ F: configs/devices/ppc* F: docs/system/ppc/embedded.rst F: docs/system/target-ppc.rst F: tests/tcg/ppc*/* +F: tests/functional/test_ppc_74xx.py RISC-V TCG CPUs M: Palmer Dabbelt M: Alistair Francis -M: Bin Meng R: Weiwei Li R: Daniel Henrique Barboza R: Liu Zhiwei @@ -329,12 +320,17 @@ L: qemu-riscv@nongnu.org S: Supported F: configs/targets/riscv* F: docs/system/target-riscv.rst +F: docs/specs/riscv-iommu.rst F: target/riscv/ +F: hw/char/riscv_htif.c F: hw/riscv/ F: hw/intc/riscv* +F: include/hw/char/riscv_htif.h F: include/hw/riscv/ F: linux-user/host/riscv32/ F: linux-user/host/riscv64/ +F: common-user/host/riscv* +F: tests/functional/test_riscv* F: tests/tcg/riscv64/ RISC-V XThead* extensions @@ -356,7 +352,7 @@ F: target/riscv/insn_trans/trans_xventanacondops.c.inc F: disas/riscv-xventana* RENESAS RX CPUs -R: Yoshinori Sato +R: Yoshinori Sato S: Orphan F: target/rx/ @@ -372,7 +368,7 @@ F: tests/tcg/s390x/ L: qemu-s390x@nongnu.org SH4 TCG CPUs -R: Yoshinori Sato +R: Yoshinori Sato S: Orphan F: target/sh4/ F: hw/sh4/ @@ -439,7 +435,7 @@ F: */*/kvm* F: accel/kvm/ F: accel/stubs/kvm-stub.c F: include/hw/kvm/ -F: include/sysemu/kvm*.h +F: include/system/kvm*.h F: scripts/kvm/kvm_flightrecorder ARM KVM CPUs @@ -452,17 +448,19 @@ MIPS KVM CPUs M: Huacai Chen S: Odd Fixes F: target/mips/kvm* -F: target/mips/sysemu/ +F: target/mips/system/ PPC KVM CPUs M: Nicholas Piggin -R: Daniel Henrique Barboza +R: Harsh Prateek Bora S: Odd Fixes F: target/ppc/kvm.c S390 KVM CPUs M: Halil Pasic M: Christian Borntraeger +R: Eric Farman +R: Matthew Rosato S: Supported F: target/s390x/kvm/ F: target/s390x/machine.c @@ -481,37 +479,50 @@ F: docs/system/i386/sgx.rst F: target/i386/kvm/ F: target/i386/sev* F: scripts/kvm/vmxcap +F: tests/functional/test_x86_64_hotplug_cpu.py Xen emulation on X86 KVM CPUs M: David Woodhouse M: Paul Durrant S: Supported -F: include/sysemu/kvm_xen.h +F: include/system/kvm_xen.h F: target/i386/kvm/xen* F: hw/i386/kvm/xen* -F: tests/avocado/kvm_xen_guest.py +F: tests/functional/test_x86_64_kvm_xen.py Guest CPU Cores (other accelerators) ------------------------------------ Overall M: Richard Henderson R: Paolo Bonzini +R: Philippe Mathieu-Daudé S: Maintained +F: include/exec/cpu*.h +F: include/exec/target_long.h F: include/qemu/accel.h -F: include/sysemu/accel-*.h -F: include/hw/core/accel-cpu.h -F: accel/accel-*.c +F: include/system/accel-*.h +F: include/system/cpus.h +F: include/accel/accel-*.h +F: accel/accel-*.? +F: accel/dummy-cpus.? F: accel/Makefile.objs F: accel/stubs/Makefile.objs +F: cpu-common.c +F: cpu-target.c +F: qapi/accelerator.json +F: system/cpus.c Apple Silicon HVF CPUs M: Alexander Graf +R: Mads Ynddal S: Maintained F: target/arm/hvf/ +F: target/arm/hvf-stub.c X86 HVF CPUs M: Cameron Esfahani M: Roman Bolshakov +R: Phil Dennis-Jordan W: https://wiki.qemu.org/Features/HVF S: Maintained F: target/i386/hvf/ @@ -519,17 +530,29 @@ F: target/i386/hvf/ HVF M: Cameron Esfahani M: Roman Bolshakov +R: Phil Dennis-Jordan +R: Mads Ynddal W: https://wiki.qemu.org/Features/HVF S: Maintained F: accel/hvf/ -F: include/sysemu/hvf.h -F: include/sysemu/hvf_int.h +F: accel/stubs/hvf-stub.c +F: include/system/hvf.h +F: include/system/hvf_int.h WHPX CPUs M: Sunil Muthuswamy S: Supported F: target/i386/whpx/ -F: include/sysemu/whpx.h +F: accel/stubs/whpx-stub.c +F: include/system/whpx.h + +X86 Instruction Emulator +M: Cameron Esfahani +M: Roman Bolshakov +R: Phil Dennis-Jordan +R: Wei Liu +S: Maintained +F: target/i386/emulate/ Guest CPU Cores (Xen) --------------------- @@ -555,16 +578,19 @@ F: hw/i386/xen/ F: hw/pci-host/xen_igd_pt.c F: include/hw/block/dataplane/xen* F: include/hw/xen/ -F: include/sysemu/xen.h -F: include/sysemu/xen-mapcache.h +F: include/system/xen.h +F: include/system/xen-mapcache.h F: stubs/xen-hw-stub.c +F: docs/system/arm/xenpvh.rst +F: docs/system/i386/xenpvh.rst Guest CPU Cores (NVMM) ---------------------- NetBSD Virtual Machine Monitor (NVMM) CPU support M: Reinoud Zandijk S: Maintained -F: include/sysemu/nvmm.h +F: include/system/nvmm.h +F: accel/stubs/nvmm-stub.c F: target/i386/nvmm/ Hosts @@ -582,7 +608,7 @@ POSIX M: Paolo Bonzini S: Maintained F: os-posix.c -F: include/sysemu/os-posix.h +F: include/system/os-posix.h F: util/*posix*.c F: include/qemu/*posix*.h @@ -614,6 +640,15 @@ F: .gitlab-ci.d/cirrus/macos-* F: */*.m F: scripts/entitlement.sh +WebAssembly +M: Kohei Tokunaga +S: Maintained +F: include/system/os-wasm.h +F: os-wasm.c +F: util/coroutine-wasm.c +F: configs/meson/emscripten.txt +F: tests/docker/dockerfiles/emsdk-wasm32-cross.docker + Alpha Machines -------------- M: Richard Henderson @@ -621,6 +656,7 @@ S: Maintained F: hw/alpha/ F: hw/isa/smc37c669-superio.c F: tests/tcg/alpha/system/ +F: tests/functional/test_alpha_clipper.py ARM Machines ------------ @@ -636,6 +672,7 @@ F: include/hw/*/allwinner* F: hw/arm/cubieboard.c F: docs/system/arm/cubieboard.rst F: hw/misc/axp209.c +F: tests/functional/test_arm_cubieboard.py Allwinner-h3 M: Niek Linnenbank @@ -645,6 +682,7 @@ F: hw/*/allwinner-h3* F: include/hw/*/allwinner-h3* F: hw/arm/orangepi.c F: docs/system/arm/orangepi.rst +F: tests/functional/test_arm_orangepi.py ARM PrimeCell and CMSDK devices M: Peter Maydell @@ -706,6 +744,24 @@ F: include/hw/timer/armv7m_systick.h F: include/hw/misc/armv7m_ras.h F: tests/qtest/test-arm-mptimer.c +Bananapi M2U +M: Peter Maydell +L: qemu-arm@nongnu.org +S: Odd Fixes +F: docs/system/arm/bananapi_m2u.rst +F: hw/*/allwinner-r40*.c +F: hw/arm/bananapi_m2u.c +F: include/hw/*/allwinner-r40*.h +F: tests/functional/test_arm_bpim2u.py + +B-L475E-IOT01A IoT Node +M: Samuel Tardieu +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/b-l475e-iot01a.c +F: hw/display/dm163.c +F: tests/qtest/dm163-test.c + Exynos M: Igor Mitsyanko M: Peter Maydell @@ -713,6 +769,8 @@ L: qemu-arm@nongnu.org S: Odd Fixes F: hw/*/exynos* F: include/hw/*/exynos* +F: docs/system/arm/exynos.rst +F: tests/functional/test_arm_smdkc210.py Calxeda Highbank M: Rob Herring @@ -731,7 +789,7 @@ S: Odd Fixes F: include/hw/arm/digic.h F: hw/*/digic* F: include/hw/*/digic* -F: tests/avocado/machine_arm_canona1100.py +F: tests/functional/test_arm_canona1100.py F: docs/system/arm/digic.rst Goldfish RTC @@ -742,14 +800,6 @@ S: Maintained F: hw/rtc/goldfish_rtc.c F: include/hw/rtc/goldfish_rtc.h -Gumstix -M: Peter Maydell -R: Philippe Mathieu-Daudé -L: qemu-arm@nongnu.org -S: Odd Fixes -F: hw/arm/gumstix.c -F: docs/system/arm/gumstix.rst - i.MX25 PDK M: Peter Maydell R: Jean-Christophe Dubois @@ -778,11 +828,11 @@ F: docs/system/arm/kzm.rst Integrator CP M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h -F: tests/avocado/machine_arm_integratorcp.py +F: tests/functional/test_arm_integratorcp.py F: docs/system/arm/integratorcp.rst MCIMX6UL EVK / i.MX6ul @@ -795,6 +845,7 @@ F: hw/arm/fsl-imx6ul.c F: hw/misc/imx6ul_ccm.c F: include/hw/arm/fsl-imx6ul.h F: include/hw/misc/imx6ul_ccm.h +F: docs/system/arm/mcimx6ul-evk.rst MCIMX7D SABRE / i.MX7 M: Peter Maydell @@ -808,6 +859,23 @@ F: include/hw/arm/fsl-imx7.h F: include/hw/misc/imx7_*.h F: hw/pci-host/designware.c F: include/hw/pci-host/designware.h +F: docs/system/arm/mcimx7d-sabre.rst + +MCIMX8MP-EVK / i.MX8MP +M: Bernhard Beschow +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/imx8mp-evk.c +F: hw/arm/fsl-imx8mp.c +F: hw/misc/imx8mp_*.c +F: hw/pci-host/fsl_imx8m_phy.c +F: hw/rtc/rs5c372.c +F: include/hw/arm/fsl-imx8mp.h +F: include/hw/misc/imx8mp_*.h +F: include/hw/pci-host/fsl_imx8m_phy.h +F: docs/system/arm/imx8mp-evk.rst +F: tests/functional/test_aarch64_imx8mp_evk.py +F: tests/qtest/rs5c372-test.c MPS2 / MPS3 M: Peter Maydell @@ -842,7 +910,7 @@ F: docs/system/arm/mps2.rst Musca M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/musca.c F: docs/system/arm/musca.rst @@ -867,34 +935,10 @@ F: include/hw/*/npcm* F: tests/qtest/npcm* F: tests/qtest/adm1266-test.c F: pc-bios/npcm7xx_bootrom.bin +F: pc-bios/npcm8xx_bootrom.bin F: roms/vbootrom F: docs/system/arm/nuvoton.rst - -nSeries -M: Peter Maydell -L: qemu-arm@nongnu.org -S: Odd Fixes -F: hw/arm/nseries.c -F: hw/display/blizzard.c -F: hw/input/lm832x.c -F: hw/input/tsc2005.c -F: hw/misc/cbus.c -F: hw/rtc/twl92230.c -F: include/hw/display/blizzard.h -F: include/hw/input/lm832x.h -F: include/hw/input/tsc2xxx.h -F: include/hw/misc/cbus.h -F: tests/avocado/machine_arm_n8x0.py -F: docs/system/arm/nseries.rst - -Palm -M: Peter Maydell -L: qemu-arm@nongnu.org -S: Odd Fixes -F: hw/arm/palm.c -F: hw/input/tsc210x.c -F: include/hw/input/tsc2xxx.h -F: docs/system/arm/palm.rst +F: tests/functional/test_arm_quanta_gsj.py Raspberry Pi M: Peter Maydell @@ -907,38 +951,20 @@ F: hw/*/bcm283* F: include/hw/arm/rasp* F: include/hw/*/bcm283* F: docs/system/arm/raspi.rst +F: tests/functional/test_arm_raspi2.py +F: tests/functional/test_aarch64_raspi3.py +F: tests/functional/test_aarch64_raspi4.py Real View M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/realview* F: hw/cpu/realview_mpcore.c F: hw/intc/realview_gic.c F: include/hw/intc/realview_gic.h F: docs/system/arm/realview.rst - -PXA2XX -M: Peter Maydell -L: qemu-arm@nongnu.org -S: Odd Fixes -F: hw/arm/mainstone.c -F: hw/arm/spitz.c -F: hw/arm/tosa.c -F: hw/arm/z2.c -F: hw/*/pxa2xx* -F: hw/display/tc6393xb.c -F: hw/gpio/max7310.c -F: hw/gpio/zaurus.c -F: hw/input/ads7846.c -F: hw/misc/mst_fpga.c -F: hw/adc/max111x.c -F: include/hw/adc/max111x.h -F: include/hw/arm/pxa.h -F: include/hw/arm/sharpsl.h -F: include/hw/display/tc6393xb.h -F: docs/system/arm/xscale.rst -F: docs/system/arm/mainstone.rst +F: tests/functional/test_arm_realview.py SABRELITE / i.MX6 M: Peter Maydell @@ -959,8 +985,7 @@ F: include/hw/ssi/imx_spi.h SBSA-REF M: Radoslaw Biernacki M: Peter Maydell -R: Leif Lindholm -R: Marcin Juszkiewicz +R: Leif Lindholm L: qemu-arm@nongnu.org S: Maintained F: hw/arm/sbsa-ref.c @@ -968,7 +993,7 @@ F: hw/misc/sbsa_ec.c F: hw/watchdog/sbsa_gwdt.c F: include/hw/watchdog/sbsa_gwdt.h F: docs/system/arm/sbsa.rst -F: tests/avocado/machine_aarch64_sbsaref.py +F: tests/functional/test_aarch64_*sbsaref*.py Sharp SL-5500 (Collie) PDA M: Peter Maydell @@ -976,17 +1001,34 @@ L: qemu-arm@nongnu.org S: Odd Fixes F: hw/arm/collie.c F: hw/arm/strongarm* +F: hw/gpio/zaurus.c +F: include/hw/arm/sharpsl.h F: docs/system/arm/collie.rst +F: tests/functional/test_arm_collie.py Stellaris M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/stellaris* F: hw/display/ssd03* -F: include/hw/input/gamepad.h +F: include/hw/input/stellaris_gamepad.h F: include/hw/timer/stellaris-gptm.h F: docs/system/arm/stellaris.rst +F: tests/functional/test_arm_stellaris.py + +STM32L4x5 SoC Family +M: Samuel Tardieu +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/stm32l4x5_soc.c +F: hw/char/stm32l4x5_usart.c +F: hw/misc/stm32l4x5_exti.c +F: hw/misc/stm32l4x5_syscfg.c +F: hw/misc/stm32l4x5_rcc.c +F: hw/gpio/stm32l4x5_gpio.c +F: include/hw/*/stm32l4x5_*.h +F: tests/qtest/stm32l4x5* STM32VLDISCOVERY M: Alexandre Iooss @@ -998,15 +1040,16 @@ F: docs/system/arm/stm32.rst Versatile Express M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/arm/vexpress.c F: hw/display/sii9022.c F: docs/system/arm/vexpress.rst +F: tests/functional/test_arm_vexpress.py Versatile PB M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/versatile* F: hw/i2c/arm_sbcon_i2c.c F: include/hw/i2c/arm_sbcon_i2c.h @@ -1020,7 +1063,10 @@ S: Maintained F: hw/arm/virt* F: include/hw/arm/virt.h F: docs/system/arm/virt.rst -F: tests/avocado/machine_aarch64_virt.py +F: tests/functional/test_aarch64_*virt*.py +F: tests/functional/test_aarch64_tuxrun.py +F: tests/functional/test_arm_tuxrun.py +F: tests/functional/test_arm_virt.py Xilinx Zynq M: Edgar E. Iglesias @@ -1049,9 +1095,11 @@ F: include/hw/ssi/xilinx_spips.h F: hw/display/dpcd.c F: include/hw/display/dpcd.h F: docs/system/arm/xlnx-versal-virt.rst +F: docs/system/arm/xlnx-zcu102.rst +F: tests/functional/test_aarch64_xlnx_versal.py Xilinx Versal OSPI -M: Francisco Iglesias +M: Francisco Iglesias S: Maintained F: hw/ssi/xlnx-versal-ospi.c F: include/hw/ssi/xlnx-versal-ospi.h @@ -1093,6 +1141,8 @@ S: Maintained F: hw/arm/stm32f405_soc.c F: hw/misc/stm32f4xx_syscfg.c F: hw/misc/stm32f4xx_exti.c +F: hw/misc/stm32_rcc.c +F: include/hw/misc/stm32_rcc.h Netduino 2 M: Alistair Francis @@ -1114,26 +1164,6 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/olimex-stm32-h405.c -STM32L4x5 SoC Family -M: Arnaud Minier -M: Inès Varhol -L: qemu-arm@nongnu.org -S: Maintained -F: hw/arm/stm32l4x5_soc.c -F: hw/char/stm32l4x5_usart.c -F: hw/misc/stm32l4x5_exti.c -F: hw/misc/stm32l4x5_syscfg.c -F: hw/misc/stm32l4x5_rcc.c -F: hw/gpio/stm32l4x5_gpio.c -F: include/hw/*/stm32l4x5_*.h - -B-L475E-IOT01A IoT Node -M: Arnaud Minier -M: Inès Varhol -L: qemu-arm@nongnu.org -S: Maintained -F: hw/arm/b-l475e-iot01a.c - SmartFusion2 M: Subbaraya Sundeep M: Peter Maydell @@ -1157,6 +1187,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/msf2-som.c F: docs/system/arm/emcraft-sf2.rst +F: tests/functional/test_arm_emcraft_sf2.py ASPEED BMCs M: Cédric Le Goater @@ -1173,8 +1204,11 @@ F: include/hw/*/*aspeed* F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h F: docs/system/arm/aspeed.rst +F: docs/system/arm/fby35.rst F: tests/*/*aspeed* +F: tests/*/*ast2700* F: hw/arm/fby35.c +F: pc-bios/ast27x0_bootrom.bin NRF51 M: Joel Stanley @@ -1186,8 +1220,14 @@ F: hw/*/microbit*.c F: include/hw/*/nrf51*.h F: include/hw/*/microbit*.h F: tests/qtest/microbit-test.c +F: tests/functional/test_arm_microbit.py F: docs/system/arm/nrf.rst +ARM PL011 Rust device +M: Manos Pitsidianakis +S: Maintained +F: rust/hw/char/pl011/ + AVR Machines ------------- @@ -1207,22 +1247,16 @@ Arduino M: Philippe Mathieu-Daudé S: Maintained F: hw/avr/arduino.c - -CRIS Machines -------------- -Axis Dev88 -M: Edgar E. Iglesias -S: Maintained -F: hw/cris/axis_dev88.c -F: hw/*/etraxfs_*.c +F: tests/functional/test_avr_uno.py HP-PARISC Machines ------------------ HP B160L, HP C3700 M: Richard Henderson -R: Helge Deller -S: Odd Fixes +M: Helge Deller +S: Maintained F: configs/devices/hppa-softmmu/default.mak +F: hw/char/diva-gsp.c F: hw/display/artist.c F: hw/hppa/ F: hw/input/lasips2.c @@ -1237,11 +1271,13 @@ F: include/hw/pci-host/astro.h F: include/hw/pci-host/dino.h F: pc-bios/hppa-firmware.img F: roms/seabios-hppa/ +F: tests/functional/test_hppa_seabios.py LoongArch Machines ------------------ Virt M: Song Gao +M: Bibo Mao R: Jiaxun Yang S: Maintained F: docs/system/loongarch/virt.rst @@ -1250,9 +1286,9 @@ F: configs/devices/loongarch64-softmmu/default.mak F: hw/loongarch/ F: include/hw/loongarch/virt.h F: include/hw/intc/loongarch_*.h -F: include/hw/intc/loongson_ipi.h +F: include/hw/intc/loongson_ipi_common.h F: hw/intc/loongarch_*.c -F: hw/intc/loongson_ipi.c +F: hw/intc/loongson_ipi_common.c F: include/hw/pci-host/ls7a.h F: hw/rtc/ls7a_rtc.c F: gdb-xml/loongarch*.xml @@ -1273,6 +1309,7 @@ F: hw/m68k/mcf_intc.c F: hw/char/mcf_uart.c F: hw/net/mcf_fec.c F: include/hw/m68k/mcf*.h +F: tests/functional/test_m68k_mcf5208evb.py NeXTcube M: Thomas Huth @@ -1280,6 +1317,7 @@ S: Odd Fixes F: hw/m68k/next-*.c F: hw/display/next-fb.c F: include/hw/m68k/next-cube.h +F: tests/functional/test_m68k_nextcube.py q800 M: Laurent Vivier @@ -1305,6 +1343,7 @@ F: include/hw/m68k/q800-glue.h F: include/hw/misc/djmemc.h F: include/hw/misc/iosb.h F: include/hw/audio/asc.h +F: tests/functional/test_m68k_q800.py virt M: Laurent Vivier @@ -1319,6 +1358,7 @@ F: include/hw/intc/goldfish_pic.h F: include/hw/intc/m68k_irqc.h F: include/hw/misc/virt_ctrl.h F: docs/specs/virt-ctlr.rst +F: tests/functional/test_m68k_tuxrun.py MicroBlaze Machines ------------------- @@ -1327,7 +1367,7 @@ M: Edgar E. Iglesias S: Maintained F: hw/microblaze/petalogix_s3adsp1800_mmu.c F: include/hw/char/xilinx_uartlite.h -F: tests/avocado/machine_microblaze.py +F: tests/functional/test_microblaze*.py petalogix_ml605 M: Edgar E. Iglesias @@ -1363,8 +1403,8 @@ F: hw/acpi/piix4.c F: hw/mips/malta.c F: hw/pci-host/gt64120.c F: include/hw/southbridge/piix.h -F: tests/avocado/linux_ssh_mips_malta.py -F: tests/avocado/machine_mips_malta.py +F: tests/functional/test_mips*_malta.py +F: tests/functional/test_mips*_tuxrun.py Mipssim R: Aleksandar Rikalo @@ -1380,20 +1420,22 @@ S: Odd Fixes F: hw/mips/fuloong2e.c F: hw/pci-host/bonito.c F: include/hw/pci-host/bonito.h -F: tests/avocado/machine_mips_fuloong2e.py +F: tests/functional/test_mips64el_fuloong2e.py Loongson-3 virtual platforms M: Huacai Chen R: Jiaxun Yang S: Maintained +F: hw/intc/loongson_ipi_common.c F: hw/intc/loongson_ipi.c F: hw/intc/loongson_liointc.c F: hw/mips/loongson3_bootp.c F: hw/mips/loongson3_bootp.h F: hw/mips/loongson3_virt.c +F: include/hw/intc/loongson_ipi_common.h F: include/hw/intc/loongson_ipi.h F: include/hw/intc/loongson_liointc.h -F: tests/avocado/machine_mips_loongson3v.py +F: tests/functional/test_mips64el_loongson3v.py Boston M: Paul Burton @@ -1412,25 +1454,21 @@ S: Maintained F: docs/system/openrisc/or1k-sim.rst F: hw/intc/ompic.c F: hw/openrisc/openrisc_sim.c +F: tests/functional/test_or1k_sim.py PowerPC Machines ---------------- -405 (ref405ep) -L: qemu-ppc@nongnu.org -S: Orphan -F: hw/ppc/ppc405* -F: tests/avocado/ppc_405.py - Bamboo L: qemu-ppc@nongnu.org S: Orphan F: hw/ppc/ppc440_bamboo.c F: hw/pci-host/ppc4xx_pci.c -F: tests/avocado/ppc_bamboo.py +F: tests/functional/test_ppc_bamboo.py e500 +M: Bernhard Beschow L: qemu-ppc@nongnu.org -S: Orphan +S: Odd Fixes F: hw/ppc/e500* F: hw/ppc/ppce500_spin.c F: hw/gpio/mpc8xxx.c @@ -1443,13 +1481,16 @@ F: pc-bios/u-boot.e500 F: hw/intc/openpic_kvm.c F: include/hw/ppc/openpic_kvm.h F: docs/system/ppc/ppce500.rst +F: tests/functional/test_ppc64_e500.py +F: tests/functional/test_ppc_tuxrun.py mpc8544ds +M: Bernhard Beschow L: qemu-ppc@nongnu.org -S: Orphan +S: Odd Fixes F: hw/ppc/mpc8544ds.c F: hw/ppc/mpc8544_guts.c -F: tests/avocado/ppc_mpc8544ds.py +F: tests/functional/test_ppc_mpc8544ds.py New World (mac99) M: Mark Cave-Ayland @@ -1471,6 +1512,8 @@ F: include/hw/ppc/mac_dbdma.h F: include/hw/pci-host/uninorth.h F: include/hw/input/adb* F: pc-bios/qemu_vga.ndrv +F: tests/functional/test_ppc_mac.py +F: tests/functional/test_ppc64_mac99.py Old World (g3beige) M: Mark Cave-Ayland @@ -1486,6 +1529,7 @@ F: include/hw/intc/heathrow_pic.h F: include/hw/input/adb* F: include/hw/pci-host/grackle.h F: pc-bios/qemu_vga.ndrv +F: tests/functional/test_ppc_mac.py PReP M: Hervé Poussineau @@ -1502,12 +1546,11 @@ F: hw/dma/i82374.c F: hw/rtc/m48t59-isa.c F: include/hw/isa/pc87312.h F: include/hw/rtc/m48t59.h -F: tests/avocado/ppc_prep_40p.py +F: tests/functional/test_ppc_40p.py sPAPR (pseries) M: Nicholas Piggin -R: Daniel Henrique Barboza -R: David Gibson +M: Harsh Prateek Bora R: Harsh Prateek Bora L: qemu-ppc@nongnu.org S: Odd Fixes @@ -1526,13 +1569,13 @@ F: tests/qtest/spapr* F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* -F: tests/avocado/ppc_pseries.py -F: tests/avocado/ppc_hv_tests.py +F: tests/functional/test_ppc64_pseries.py +F: tests/functional/test_ppc64_hv.py +F: tests/functional/test_ppc64_tuxrun.py PowerNV (Non-Virtualized) -M: Cédric Le Goater M: Nicholas Piggin -R: Frédéric Barrat +R: Aditya Gupta L: qemu-ppc@nongnu.org S: Odd Fixes F: docs/system/ppc/powernv.rst @@ -1540,10 +1583,14 @@ F: hw/ppc/pnv* F: hw/intc/pnv* F: hw/intc/xics_pnv.c F: hw/pci-host/pnv* +F: hw/ssi/pnv_spi.c F: include/hw/ppc/pnv* F: include/hw/pci-host/pnv* +F: include/hw/ssi/pnv_spi* F: pc-bios/skiboot.lid +F: pc-bios/pnv-pnor.bin F: tests/qtest/pnv* +F: tests/functional/test_ppc64_powernv.py pca955x M: Glenn Miles @@ -1558,7 +1605,7 @@ M: Edgar E. Iglesias L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/virtex_ml507.c -F: tests/avocado/ppc_virtex_ml507.py +F: tests/functional/test_ppc_virtex_ml507.py sam460ex M: BALATON Zoltan @@ -1570,10 +1617,11 @@ F: hw/pci-host/ppc440_pcix.c F: hw/display/sm501* F: hw/ide/sii3112.c F: hw/rtc/m41t80.c -F: pc-bios/canyonlands.dt[sb] +F: pc-bios/dtb/canyonlands.dt[sb] F: pc-bios/u-boot-sam460ex-20100605.bin F: roms/u-boot-sam460ex F: docs/system/ppc/amigang.rst +F: tests/functional/test_ppc_sam460ex.py pegasos2 M: BALATON Zoltan @@ -1591,10 +1639,10 @@ S: Maintained F: hw/ppc/amigaone.c F: hw/pci-host/articia.c F: include/hw/pci-host/articia.h +F: tests/functional/test_ppc_amiga.py Virtual Open Firmware (VOF) M: Alexey Kardashevskiy -R: David Gibson L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/spapr_vof* @@ -1615,7 +1663,6 @@ F: include/hw/riscv/opentitan.h F: include/hw/*/ibex_*.h Microchip PolarFire SoC Icicle Kit -M: Bin Meng L: qemu-riscv@nongnu.org S: Supported F: docs/system/riscv/microchip-icicle-kit.rst @@ -1642,27 +1689,40 @@ F: include/hw/char/shakti_uart.h SiFive Machines M: Alistair Francis -M: Bin Meng M: Palmer Dabbelt L: qemu-riscv@nongnu.org S: Supported F: docs/system/riscv/sifive_u.rst F: hw/*/*sifive*.c F: include/hw/*/*sifive*.h +F: tests/functional/test_riscv64_sifive_u.py + +AMD Microblaze-V Generic Board +M: Sai Pavan Boddu +S: Maintained +F: hw/riscv/microblaze-v-generic.c +F: docs/system/riscv/microblaze-v-generic.rst + +Xiangshan Kunminghu +M: Ran Wang +S: Maintained +F: docs/system/riscv/xiangshan-kunminghu.rst +F: hw/riscv/xiangshan_kmh.c +F: include/hw/riscv/xiangshan_kmh.h RX Machines ----------- rx-gdbsim -R: Yoshinori Sato +R: Yoshinori Sato S: Orphan F: docs/system/target-rx.rst F: hw/rx/rx-gdbsim.c -F: tests/avocado/machine_rx_gdbsim.py +F: tests/functional/test_rx_gdbsim.py SH4 Machines ------------ R2D -R: Yoshinori Sato +R: Yoshinori Sato R: Magnus Damm S: Odd Fixes F: hw/char/sh_serial.c @@ -1672,17 +1732,8 @@ F: hw/pci-host/sh_pci.c F: hw/timer/sh_timer.c F: include/hw/sh4/sh_intc.h F: include/hw/timer/tmu012.h - -Shix -R: Yoshinori Sato -R: Magnus Damm -S: Odd Fixes -F: hw/block/tc58128.c -F: hw/char/sh_serial.c -F: hw/sh4/shix.c -F: hw/intc/sh_intc.c -F: hw/timer/sh_timer.c -F: include/hw/sh4/sh_intc.h +F: tests/functional/test_sh4*_r2d.py +F: tests/functional/test_sh4_tuxrun.py SPARC Machines -------------- @@ -1700,6 +1751,7 @@ F: include/hw/nvram/sun_nvram.h F: include/hw/sparc/sparc32_dma.h F: include/hw/sparc/sun4m_iommu.h F: pc-bios/openbios-sparc32 +F: tests/functional/test_sparc_sun4m.py Sun4u M: Mark Cave-Ayland @@ -1712,7 +1764,8 @@ F: include/hw/pci-host/sabre.h F: hw/pci-bridge/simba.c F: include/hw/pci-bridge/simba.h F: pc-bios/openbios-sparc64 -F: tests/avocado/machine_sparc64_sun4u.py +F: tests/functional/test_sparc64_sun4u.py +F: tests/functional/test_sparc64_tuxrun.py Sun4v M: Artyom Tarasenko @@ -1735,22 +1788,26 @@ S390 Virtio-ccw M: Halil Pasic M: Christian Borntraeger M: Eric Farman +R: Matthew Rosato S: Supported F: hw/s390x/ F: include/hw/s390x/ F: configs/devices/s390x-softmmu/default.mak -F: tests/avocado/machine_s390_ccw_virtio.py +F: tests/functional/test_s390x_* T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org S390-ccw boot M: Christian Borntraeger M: Thomas Huth +R: Jared Rossi +R: Zhuoying Cai S: Supported F: hw/s390x/ipl.* F: pc-bios/s390-ccw/ F: pc-bios/s390-ccw.img F: docs/devel/s390-dasd-ipl.rst +F: tests/functional/test_s390x_pxelinux.py T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -1766,6 +1823,7 @@ S390 channel subsystem M: Halil Pasic M: Christian Borntraeger M: Eric Farman +R: Farhan Ali S: Supported F: hw/s390x/ccw-device.[ch] F: hw/s390x/css.c @@ -1786,6 +1844,7 @@ L: qemu-s390x@nongnu.org S390 SCLP-backed devices M: Halil Pasic M: Christian Borntraeger +R: Jason Herne S: Supported F: include/hw/s390x/event-facility.h F: include/hw/s390x/sclp.h @@ -1802,7 +1861,7 @@ F: hw/s390x/cpu-topology.c F: target/s390x/kvm/stsi-topology.c F: docs/devel/s390-cpu-topology.rst F: docs/system/s390x/cpu-topology.rst -F: tests/avocado/s390_topology.py +F: tests/functional/test_s390x_topology.py X86 Machines ------------ @@ -1830,6 +1889,12 @@ F: hw/isa/apm.c F: include/hw/isa/apm.h F: tests/unit/test-x86-topo.c F: tests/qtest/test-x86-cpuid-compat.c +F: tests/functional/test_i386_tuxrun.py +F: tests/functional/test_linux_initrd.py +F: tests/functional/test_mem_addr_space.py +F: tests/functional/test_pc_cpu_hotplug_props.py +F: tests/functional/test_x86_64_tuxrun.py +F: tests/functional/test_x86_cpu_model_versions.py PC Chipset M: Michael S. Tsirkin @@ -1874,6 +1939,16 @@ F: hw/i386/microvm.c F: include/hw/i386/microvm.h F: pc-bios/bios-microvm.bin +nitro-enclave +M: Alexander Graf +M: Dorjoy Chowdhury +S: Maintained +F: hw/core/eif.c +F: hw/core/eif.h +F: hw/i386/nitro_enclave.c +F: include/hw/i386/nitro_enclave.h +F: docs/system/i386/nitro-enclave.rst + Machine core M: Eduardo Habkost M: Marcel Apfelbaum @@ -1882,7 +1957,7 @@ R: Yanan Wang R: Zhao Liu S: Supported F: hw/core/cpu-common.c -F: hw/core/cpu-sysemu.c +F: hw/core/cpu-system.c F: hw/core/machine-qmp-cmds.c F: hw/core/machine.c F: hw/core/machine-smp.c @@ -1891,14 +1966,22 @@ F: hw/core/numa.c F: hw/cpu/cluster.c F: qapi/machine.json F: qapi/machine-common.json -F: qapi/machine-target.json F: include/hw/boards.h F: include/hw/core/cpu.h F: include/hw/cpu/cluster.h -F: include/sysemu/numa.h +F: include/system/numa.h +F: tests/functional/test_cpu_queries.py +F: tests/functional/test_empty_cpu_model.py F: tests/unit/test-smp-parse.c T: git https://gitlab.com/ehabkost/qemu.git machine-next +TargetInfo API +M: Pierrick Bouvier +M: Philippe Mathieu-Daudé +S: Supported +F: include/qemu/target-info*.h +F: target-info*.c + Xtensa Machines --------------- sim @@ -1917,6 +2000,7 @@ S: Maintained F: hw/xtensa/xtfpga.c F: hw/net/opencores_eth.c F: include/hw/xtensa/mx_pic.h +F: tests/functional/test_xtensa_lx60.py Devices ------- @@ -1931,8 +2015,8 @@ F: tests/qtest/intel-hda-test.c F: tests/qtest/fuzz-sb16-test.c Xilinx CAN -M: Vikram Garhwal M: Francisco Iglesias +M: Vikram Garhwal S: Maintained F: hw/net/can/xlnx-* F: include/hw/net/xlnx-* @@ -1989,10 +2073,11 @@ F: include/hw/hyperv/vmbus*.h OMAP M: Peter Maydell L: qemu-arm@nongnu.org -S: Maintained +S: Odd Fixes F: hw/*/omap* F: include/hw/arm/omap.h F: docs/system/arm/sx1.rst +F: tests/functional/test_arm_sx1.py IPack M: Alberto Garcia @@ -2011,6 +2096,7 @@ F: hw/pci-bridge/* F: qapi/pci.json F: docs/pci* F: docs/specs/*pci* +F: docs/system/sriov.rst PCIE DOE M: Huai-Cheng Kuo @@ -2019,6 +2105,12 @@ S: Supported F: include/hw/pci/pcie_doe.h F: hw/pci/pcie_doe.c +ARM PCI Hotplug +M: Gustavo Romero +L: qemu-arm@nongnu.org +S: Supported +F: tests/functional/test_aarch64_hotplug_pci.py + ACPI/SMBIOS M: Michael S. Tsirkin M: Igor Mammedov @@ -2058,13 +2150,13 @@ S: Supported F: hw/acpi/viot.c F: hw/acpi/viot.h -ACPI/AVOCADO/BIOSBITS +ACPI/FUNCTIONAL/BIOSBITS M: Ani Sinha M: Michael S. Tsirkin S: Supported -F: tests/avocado/acpi-bits/* -F: tests/avocado/acpi-bits.py -F: docs/devel/acpi-bits.rst +F: tests/functional/acpi-bits/* +F: tests/functional/test_acpi_bits.py +F: docs/devel/testing/acpi-bits.rst ACPI/HEST/GHES R: Dongjiu Geng @@ -2100,7 +2192,8 @@ S: Odd Fixes F: hw/net/ F: include/hw/net/ F: tests/qtest/virtio-net-test.c -F: docs/virtio-net-failover.rst +F: tests/functional/test_info_usernet.py +F: docs/system/virtio-net-failover.rst T: git https://github.com/jasowang/qemu.git net Parallel NOR Flash devices @@ -2151,7 +2244,6 @@ F: tests/qtest/sdhci-test.c USB S: Orphan F: hw/usb/* -F: stubs/usb-dev-stub.c F: tests/qtest/usb-*-test.c F: docs/system/devices/usb.rst F: include/hw/usb.h @@ -2167,11 +2259,19 @@ M: Alex Williamson M: Cédric Le Goater S: Supported F: hw/vfio/* +F: util/vfio-helpers.c F: include/hw/vfio/ -F: docs/igd-assign.txt F: docs/devel/migration/vfio.rst F: qapi/vfio.json +vfio-igd +M: Alex Williamson +M: Cédric Le Goater +M: Tomita Moeko +S: Supported +F: hw/vfio/igd.c +F: docs/igd-assign.txt + vfio-ccw M: Eric Farman M: Matthew Rosato @@ -2201,10 +2301,11 @@ M: Eric Auger M: Zhenzhong Duan S: Supported F: backends/iommufd.c -F: include/sysemu/iommufd.h +F: include/system/iommufd.h F: backends/host_iommu_device.c -F: include/sysemu/host_iommu_device.h +F: include/system/host_iommu_device.h F: include/qemu/chardev_open.h +F: include/hw/iommu.h F: util/chardev_open.c F: docs/devel/vfio-iommufd.rst @@ -2213,12 +2314,17 @@ M: Michael S. Tsirkin R: Stefano Garzarella S: Supported F: hw/*/*vhost* -F: docs/interop/vhost-user.json -F: docs/interop/vhost-user.rst +F: docs/interop/vhost-user* +F: docs/system/devices/vhost-user* F: contrib/vhost-user-*/ -F: backends/vhost-user.c -F: include/sysemu/vhost-user-backend.h +F: backends/*vhost* +F: include/system/vhost-user-backend.h +F: include/hw/virtio/vhost* +F: include/*/vhost* F: subprojects/libvhost-user/ +F: block/export/vhost-user* +F: util/vhost-user-server.c +F: net/vhost* vhost-shadow-virtqueue R: Eugenio Pérez @@ -2235,6 +2341,7 @@ F: net/vhost-user.c F: include/hw/virtio/ F: docs/devel/virtio* F: docs/devel/migration/virtio.rst +F: tests/functional/test_virtio_version.py virtio-balloon M: Michael S. Tsirkin @@ -2244,29 +2351,22 @@ F: docs/interop/virtio-balloon-stats.rst F: hw/virtio/virtio-balloon*.c F: include/hw/virtio/virtio-balloon.h F: system/balloon.c -F: include/sysemu/balloon.h +F: include/system/balloon.h +F: tests/qtest/virtio-balloon-test.c +F: tests/functional/test_virtio_balloon.py virtio-9p -M: Greg Kurz M: Christian Schoenebeck +R: Greg Kurz S: Maintained W: https://wiki.qemu.org/Documentation/9p F: hw/9pfs/ X: hw/9pfs/xen-9p* -X: hw/9pfs/9p-proxy* F: fsdev/ -X: fsdev/virtfs-proxy-helper.c F: tests/qtest/virtio-9p-test.c F: tests/qtest/libqos/virtio-9p* -T: git https://gitlab.com/gkurz/qemu.git 9p-next T: git https://github.com/cschoenebeck/qemu.git 9p.next -virtio-9p-proxy -F: hw/9pfs/9p-proxy* -F: fsdev/virtfs-proxy-helper.c -F: docs/tools/virtfs-proxy-helper.rst -S: Obsolete - virtio-blk M: Stefan Hajnoczi L: qemu-block@nongnu.org @@ -2276,6 +2376,7 @@ F: hw/block/virtio-blk.c F: hw/block/dataplane/* F: include/hw/virtio/virtio-blk-common.h F: tests/qtest/virtio-blk-test.c +F: tests/functional/test_x86_64_hotplug_blk.py T: git https://github.com/stefanha/qemu.git block virtio-ccw @@ -2333,10 +2434,20 @@ R: Amit Shah S: Supported F: hw/virtio/virtio-rng.c F: include/hw/virtio/virtio-rng.h -F: include/sysemu/rng*.h +F: include/system/rng*.h F: backends/rng*.c F: tests/qtest/virtio-rng-test.c +virtio-nsm +M: Alexander Graf +M: Dorjoy Chowdhury +S: Maintained +F: hw/virtio/cbor-helpers.c +F: hw/virtio/virtio-nsm.c +F: hw/virtio/virtio-nsm-pci.c +F: include/hw/virtio/cbor-helpers.h +F: include/hw/virtio/virtio-nsm.h + vhost-user-stubs M: Alex Bennée S: Maintained @@ -2384,9 +2495,11 @@ F: include/hw/virtio/virtio-crypto.h virtio based memory device M: David Hildenbrand S: Supported -F: hw/virtio/virtio-md-pci.c +F: hw/s390x/virtio-ccw-md.c +F: hw/s390x/virtio-ccw-md.h +F: hw/s390x/virtio-ccw-md-stubs.c +F: hw/virtio/virtio-md-*.c F: include/hw/virtio/virtio-md-pci.h -F: stubs/virtio-md-pci.c virtio-mem M: David Hildenbrand @@ -2395,6 +2508,8 @@ W: https://virtio-mem.gitlab.io/ F: hw/virtio/virtio-mem.c F: hw/virtio/virtio-mem-pci.h F: hw/virtio/virtio-mem-pci.c +F: hw/s390x/virtio-ccw-mem.c +F: hw/s390x/virtio-ccw-mem.h F: include/hw/virtio/virtio-mem.h virtio-snd @@ -2436,17 +2551,17 @@ F: tests/qtest/fuzz-megasas-test.c Network packet abstractions M: Dmitry Fleytman -R: Akihiko Odaki +R: Akihiko Odaki S: Maintained F: include/net/eth.h F: net/eth.c F: hw/net/net_rx_pkt* F: hw/net/net_tx_pkt* -Vmware +VMware M: Dmitry Fleytman S: Maintained -F: docs/specs/vmw_pvscsi-spec.txt +F: docs/specs/vmw_pvscsi-spec.rst F: hw/display/vmware_vga.c F: hw/net/vmxnet* F: hw/scsi/vmw_pvscsi* @@ -2454,7 +2569,7 @@ F: pc-bios/efi-vmxnet3.rom F: pc-bios/vgabios-vmware.bin F: roms/config.vga-vmware F: tests/qtest/vmxnet3-test.c -F: docs/specs/vwm_pvscsi-spec.rst +F: docs/specs/vmw_pvscsi-spec.rst Rocker M: Jiri Pirko @@ -2462,17 +2577,17 @@ S: Maintained F: hw/net/rocker/ F: qapi/rocker.json F: tests/rocker/ -F: docs/specs/rocker.txt +F: docs/specs/rocker.rst e1000x M: Dmitry Fleytman -R: Akihiko Odaki +R: Akihiko Odaki S: Maintained F: hw/net/e1000x* e1000e M: Dmitry Fleytman -R: Akihiko Odaki +R: Akihiko Odaki S: Maintained F: hw/net/e1000e* F: tests/qtest/fuzz-e1000e-test.c @@ -2480,12 +2595,12 @@ F: tests/qtest/e1000e-test.c F: tests/qtest/libqos/e1000e.* igb -M: Akihiko Odaki +M: Akihiko Odaki R: Sriram Yagnaraman -S: Maintained +S: Odd Fixes F: docs/system/devices/igb.rst F: hw/net/igb* -F: tests/avocado/netdev-ethtool.py +F: tests/functional/test_netdev_ethtool.py F: tests/qtest/igb-test.c F: tests/qtest/libqos/igb.c @@ -2507,8 +2622,7 @@ F: hw/i2c/i2c_mux_pca954x.c F: include/hw/i2c/i2c_mux_pca954x.h pcf8574 -M: Dmitrii Sharikhin -S: Maintained +S: Orphaned F: hw/gpio/pcf8574.c F: include/gpio/pcf8574.h @@ -2525,7 +2639,7 @@ M: Alex Bennée S: Maintained F: hw/core/guest-loader.c F: docs/system/guest-loader.rst -F: tests/avocado/boot_xen.py +F: tests/functional/test_aarch64_xen.py Intel Hexadecimal Object File Loader M: Su Hang @@ -2586,11 +2700,15 @@ F: hw/display/ramfb*.c F: include/hw/display/ramfb.h virtio-gpu -S: Orphan +M: Alex Bennée +R: Akihiko Odaki +R: Dmitry Osipenko +S: Odd Fixes F: hw/display/virtio-gpu* F: hw/display/virtio-vga.* F: include/hw/virtio/virtio-gpu.h F: docs/system/devices/virtio-gpu.rst +F: tests/functional/test_aarch64_virt_gpu.py vhost-user-blk M: Raphael Norwitz @@ -2626,6 +2744,11 @@ F: hw/display/edid* F: include/hw/display/edid.h F: qemu-edid.c +macOS PV Graphics (apple-gfx) +M: Phil Dennis-Jordan +S: Maintained +F: hw/display/apple-gfx* + PIIX4 South Bridge (i82371AB) M: Hervé Poussineau M: Philippe Mathieu-Daudé @@ -2646,7 +2769,7 @@ Firmware configuration (fw_cfg) M: Philippe Mathieu-Daudé R: Gerd Hoffmann S: Supported -F: docs/specs/fw_cfg.txt +F: docs/specs/fw_cfg.rst F: hw/nvram/fw_cfg*.c F: stubs/fw_cfg.c F: include/hw/nvram/fw_cfg.h @@ -2656,16 +2779,16 @@ F: tests/qtest/fw_cfg-test.c T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE -M: Cédric Le Goater -R: Frédéric Barrat +R: Gautam Menghani L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/*/*xive* F: include/hw/*/*xive* +F: tests/qtest/*xive* F: docs/*/*xive* Renesas peripherals -R: Yoshinori Sato +R: Yoshinori Sato R: Magnus Damm S: Odd Fixes F: hw/char/renesas_sci.c @@ -2677,7 +2800,7 @@ F: include/hw/sh4/sh.h F: include/hw/timer/renesas_*.h Renesas RX peripherals -R: Yoshinori Sato +R: Yoshinori Sato S: Orphan F: hw/intc/rx_icu.c F: hw/rx/ @@ -2686,7 +2809,8 @@ F: include/hw/rx/ CAN bus subsystem and hardware M: Pavel Pisa -M: Vikram Garhwal +M: Francisco Iglesias +M: Vikram Garhwal S: Maintained W: https://canbus.pages.fel.cvut.cz/ F: net/can/* @@ -2717,6 +2841,7 @@ F: include/hw/timer/mips_gictimer.h S390 3270 device M: Halil Pasic M: Christian Borntraeger +R: Collin Walling S: Odd fixes F: include/hw/s390x/3270-ccw.h F: hw/char/terminal3270.c @@ -2726,6 +2851,7 @@ L: qemu-s390x@nongnu.org S390 diag 288 watchdog M: Halil Pasic M: Christian Borntraeger +R: Collin Walling S: Supported F: hw/watchdog/wdt_diag288.c F: include/hw/watchdog/wdt_diag288.h @@ -2734,6 +2860,7 @@ L: qemu-s390x@nongnu.org S390 storage key device M: Halil Pasic M: Christian Borntraeger +R: Jason Herne S: Supported F: hw/s390x/storage-keys.h F: hw/s390x/s390-skeys*.c @@ -2742,6 +2869,7 @@ L: qemu-s390x@nongnu.org S390 storage attribute device M: Halil Pasic M: Christian Borntraeger +R: Jason Herne S: Supported F: hw/s390x/storage-attributes.h F: hw/s390x/s390-stattrib*.c @@ -2751,6 +2879,7 @@ S390 floating interrupt controller M: Halil Pasic M: Christian Borntraeger M: David Hildenbrand +R: Jason Herne S: Supported F: hw/intc/s390_flic*.c F: include/hw/s390x/s390_flic.h @@ -2772,6 +2901,27 @@ F: hw/hyperv/hv-balloon*.h F: include/hw/hyperv/dynmem-proto.h F: include/hw/hyperv/hv-balloon.h +ivshmem-flat +M: Gustavo Romero +S: Maintained +F: hw/misc/ivshmem-flat.c +F: include/hw/misc/ivshmem-flat.h +F: docs/system/devices/ivshmem-flat.rst + +UEFI variable service +M: Gerd Hoffmann +S: Maintained +F: hw/uefi/ +F: include/hw/uefi/ + +VMapple +M: Alexander Graf +M: Phil Dennis-Jordan +S: Maintained +F: hw/vmapple/* +F: include/hw/vmapple/* +F: docs/system/arm/vmapple.rst + Subsystems ---------- Overall Audio backends @@ -2780,7 +2930,7 @@ M: Marc-André Lureau S: Odd Fixes F: audio/ X: audio/alsaaudio.c -X: audio/coreaudio.c +X: audio/coreaudio.m X: audio/dsound* X: audio/jackaudio.c X: audio/ossaudio.c @@ -2800,9 +2950,9 @@ Core Audio framework backend M: Gerd Hoffmann M: Philippe Mathieu-Daudé R: Christian Schoenebeck -R: Akihiko Odaki +R: Akihiko Odaki S: Odd Fixes -F: audio/coreaudio.c +F: audio/coreaudio.m DSound Audio backend M: Gerd Hoffmann @@ -2848,7 +2998,7 @@ F: hw/block/ F: qapi/block*.json F: qapi/transaction.json F: include/block/ -F: include/sysemu/block-*.h +F: include/system/block-*.h F: qemu-img* F: docs/tools/qemu-img.rst F: qemu-io* @@ -2922,6 +3072,16 @@ F: include/qemu/co-shared-resource.h T: git https://gitlab.com/jsnow/qemu.git jobs T: git https://gitlab.com/vsementsov/qemu.git block +CheckPoint and Restart (CPR) +R: Steve Sistare +S: Supported +F: hw/vfio/cpr* +F: include/hw/vfio/vfio-cpr.h +F: include/migration/cpr.h +F: migration/cpr* +F: tests/qtest/migration/cpr* +F: docs/devel/migration/CPR.rst + Compute Express Link M: Jonathan Cameron R: Fan Ni @@ -2968,6 +3128,7 @@ S: Supported F: include/qemu/option.h F: tests/unit/test-keyval.c F: tests/unit/test-qemu-opts.c +F: tests/functional/test_version.py F: util/keyval.c F: util/qemu-option.c @@ -2986,21 +3147,23 @@ M: Alistair Francis R: David Gibson S: Maintained F: system/device_tree.c -F: include/sysemu/device_tree.h +F: include/system/device_tree.h Dump S: Supported M: Marc-André Lureau +R: Ani Sinha F: dump/ F: hw/misc/vmcoreinfo.c F: include/hw/misc/vmcoreinfo.h F: include/qemu/win_dump_defs -F: include/sysemu/dump-arch.h -F: include/sysemu/dump.h +F: include/system/dump-arch.h +F: include/system/dump.h F: qapi/dump.json F: scripts/dump-guest-memory.py F: stubs/dump.c F: docs/specs/vmcoreinfo.rst +F: tests/qtest/vmcoreinfo-test.c Error reporting M: Markus Armbruster @@ -3029,6 +3192,7 @@ F: gdb-xml/ F: tests/tcg/multiarch/gdbstub/* F: scripts/feature_to_c.py F: scripts/probe-gdb-support.py +T: git https://gitlab.com/stsquad/qemu gdbstub/next Memory API M: Paolo Bonzini @@ -3036,18 +3200,19 @@ M: Peter Xu M: David Hildenbrand R: Philippe Mathieu-Daudé S: Supported -F: include/exec/ioport.h +F: include/system/ioport.h F: include/exec/memop.h -F: include/exec/memory.h -F: include/exec/ram_addr.h -F: include/exec/ramblock.h -F: include/sysemu/memory_mapping.h +F: include/system/memory.h +F: include/system/ram_addr.h +F: include/system/ramblock.h +F: include/system/memory_mapping.h F: system/dma-helpers.c F: system/ioport.c F: system/memory.c F: system/memory_mapping.c F: system/physmem.c -F: include/exec/memory-internal.h +F: system/memory-internal.h +F: system/ram-block-attributes.c F: scripts/coccinelle/memory-region-housekeeping.cocci Memory devices @@ -3055,13 +3220,12 @@ M: David Hildenbrand M: Igor Mammedov R: Xiao Guangrong S: Supported -F: hw/mem/memory-device.c +F: hw/mem/memory-device*.c F: hw/mem/nvdimm.c F: hw/mem/pc-dimm.c F: include/hw/mem/memory-device.h F: include/hw/mem/nvdimm.h F: include/hw/mem/pc-dimm.h -F: stubs/memory_device.c F: docs/nvdimm.txt SPICE @@ -3082,11 +3246,12 @@ F: include/ui/ F: qapi/ui.json F: util/drm.c F: docs/devel/ui.rst +F: tests/functional/test_vnc.py Cocoa graphics M: Peter Maydell M: Philippe Mathieu-Daudé -R: Akihiko Odaki +R: Akihiko Odaki S: Odd Fixes F: ui/cocoa.m @@ -3094,33 +3259,35 @@ Main loop M: Paolo Bonzini S: Maintained F: include/qemu/main-loop.h -F: include/sysemu/runstate.h -F: include/sysemu/runstate-action.h +F: include/system/runstate.h +F: include/system/runstate-action.h F: util/main-loop.c F: util/qemu-timer*.c F: system/vl.c F: system/main.c F: system/cpus.c -F: system/cpu-throttle.c F: system/cpu-timers.c F: system/runstate* +F: migration/cpu-throttle.c F: qapi/run-state.json Read, Copy, Update (RCU) M: Paolo Bonzini S: Maintained -F: docs/devel/lockcnt.txt -F: docs/devel/rcu.txt +F: docs/devel/lockcnt.rst +F: docs/devel/rcu.rst F: include/qemu/rcu*.h +F: include/qemu/lockcnt.h F: tests/unit/rcutorture.c F: tests/unit/test-rcu-*.c +F: util/lockcnt.c F: util/rcu.c Human Monitor (HMP) M: Dr. David Alan Gilbert S: Maintained F: monitor/monitor-internal.h -F: monitor/misc.c +F: monitor/hmp-target.c F: monitor/monitor.c F: monitor/hmp* F: hmp.h @@ -3156,7 +3323,7 @@ M: David Hildenbrand M: Igor Mammedov S: Maintained F: backends/hostmem*.c -F: include/sysemu/hostmem.h +F: include/system/hostmem.h F: docs/system/vm-templating.rst T: git https://gitlab.com/ehabkost/qemu.git machine-next @@ -3164,14 +3331,13 @@ Cryptodev Backends M: Gonglei M: zhenwei pi S: Maintained -F: include/sysemu/cryptodev*.h +F: include/system/cryptodev*.h F: backends/cryptodev*.c F: qapi/cryptodev.json Python library M: John Snow M: Cleber Rosa -R: Beraldo Leal S: Maintained F: python/ T: git https://gitlab.com/jsnow/qemu.git python @@ -3203,8 +3369,6 @@ S: Supported F: qapi/ X: qapi/*.json F: include/qapi/ -X: include/qapi/qmp/ -F: include/qapi/qmp/dispatch.h F: tests/qapi-schema/ F: tests/unit/test-*-visitor.c F: tests/unit/test-qapi-*.c @@ -3228,8 +3392,7 @@ QObject M: Markus Armbruster S: Supported F: qobject/ -F: include/qapi/qmp/ -X: include/qapi/qmp/dispatch.h +F: include/qobject/ F: scripts/coccinelle/qobject.cocci F: tests/unit/check-qdict.c F: tests/unit/check-qjson.c @@ -3244,7 +3407,7 @@ T: git https://repo.or.cz/qemu/armbru.git qapi-next QEMU Guest Agent M: Michael Roth -M: Konstantin Kostiuk +M: Kostiantyn Kostiuk S: Maintained F: qga/ F: contrib/systemd/qemu-guest-agent.service @@ -3255,7 +3418,7 @@ F: tests/*/test-qga* T: git https://github.com/mdroth/qemu.git qga QEMU Guest Agent Win32 -M: Konstantin Kostiuk +M: Kostiantyn Kostiuk S: Maintained F: qga/*win32* F: qga/vss-win32/ @@ -3309,16 +3472,16 @@ F: tests/qtest/qmp-cmd-test.c T: git https://repo.or.cz/qemu/armbru.git qapi-next qtest -M: Thomas Huth +M: Fabiano Rosas M: Laurent Vivier R: Paolo Bonzini S: Maintained F: system/qtest.c -F: include/sysemu/qtest.h +F: include/system/qtest.h F: accel/qtest/ F: tests/qtest/ -F: docs/devel/qgraph.rst -F: docs/devel/qtest.rst +F: docs/devel/testing/qgraph.rst +F: docs/devel/testing/qtest.rst X: tests/qtest/bios-tables-test* X: tests/qtest/migration-* @@ -3327,7 +3490,7 @@ M: Alexander Bulekov R: Paolo Bonzini R: Bandan Das R: Stefan Hajnoczi -R: Thomas Huth +R: Fabiano Rosas R: Darren Kenny R: Qiuhao Li S: Maintained @@ -3336,7 +3499,7 @@ F: tests/qtest/fuzz-*test.c F: tests/docker/test-fuzz F: scripts/oss-fuzz/ F: hw/mem/sparse-mem.c -F: docs/devel/fuzzing.rst +F: docs/devel/testing/fuzzing.rst Register API M: Alistair Francis @@ -3345,6 +3508,19 @@ F: hw/core/register.c F: include/hw/register.h F: include/hw/registerfields.h +Rust +M: Manos Pitsidianakis +S: Maintained +F: rust/qemu-api +F: rust/qemu-api-macros +F: rust/rustfmt.toml +F: scripts/get-wraps-from-cargo-registry.py + +Rust-related patches CC here +L: qemu-rust@nongnu.org +F: tests/docker/test-rust +F: rust/ + SLIRP M: Samuel Thibault S: Maintained @@ -3354,7 +3530,7 @@ T: git https://people.debian.org/~sthibault/qemu.git slirp Stats S: Orphan -F: include/sysemu/stats.h +F: include/system/stats.h F: stats/ F: qapi/stats.json @@ -3395,7 +3571,7 @@ S: Maintained F: system/tpm* F: hw/tpm/* F: include/hw/acpi/tpm.h -F: include/sysemu/tpm* +F: include/system/tpm* F: qapi/tpm.json F: backends/tpm/ F: tests/qtest/*tpm* @@ -3406,7 +3582,7 @@ SPDM M: Alistair Francis S: Maintained F: backends/spdm-socket.c -F: include/sysemu/spdm-socket.h +F: include/system/spdm-socket.h Checkpatch S: Odd Fixes @@ -3422,11 +3598,13 @@ F: include/migration/ F: include/qemu/userfaultfd.h F: migration/ F: scripts/vmstate-static-checker.py +F: tests/functional/test_migration.py F: tests/vmstate-static-checker-data/ +F: tests/qtest/migration/ F: tests/qtest/migration-* F: docs/devel/migration/ F: qapi/migration.json -F: tests/migration/ +F: tests/migration-stress/ F: util/userfaultfd.c X: migration/rdma* @@ -3435,15 +3613,16 @@ R: Li Zhijian R: Peter Xu S: Odd Fixes F: migration/rdma* +F: scripts/rdma-migration-helper.sh Migration dirty limit and dirty page rate M: Hyman Huang S: Maintained F: system/dirtylimit.c -F: include/sysemu/dirtylimit.h +F: include/system/dirtylimit.h F: migration/dirtyrate.c F: migration/dirtyrate.h -F: include/sysemu/dirtyrate.h +F: include/system/dirtyrate.h F: docs/devel/migration/dirty-limit.rst Detached LUKS header @@ -3471,7 +3650,7 @@ Seccomp M: Daniel P. Berrange S: Odd Fixes F: system/qemu-seccomp.c -F: include/sysemu/seccomp.h +F: include/system/seccomp.h F: tests/unit/test-seccomp.c Cryptography @@ -3566,7 +3745,7 @@ F: include/migration/failover.h F: docs/COLO-FT.txt COLO Proxy -M: Zhang Chen +M: Zhang Chen M: Li Zhijian S: Supported F: docs/colo-proxy.txt @@ -3576,21 +3755,20 @@ F: net/filter-mirror.c F: tests/qtest/test-filter* Record/replay -M: Pavel Dovgalyuk R: Paolo Bonzini +R: Alex Bennée W: https://wiki.qemu.org/Features/record-replay -S: Supported +S: Odd Fixes F: replay/* F: block/blkreplay.c F: net/filter-replay.c F: include/exec/replay-core.h -F: include/sysemu/replay.h +F: include/system/replay.h F: docs/devel/replay.rst F: docs/system/replay.rst F: stubs/replay.c -F: tests/avocado/replay_kernel.py -F: tests/avocado/replay_linux.py -F: tests/avocado/reverse_debugging.py +F: tests/functional/*reverse_debug*.py +F: tests/functional/*replay*.py F: qapi/replay.json IOVA Tree @@ -3601,7 +3779,7 @@ F: util/iova-tree.c elf2dmp M: Viktor Prutyanov -R: Akihiko Odaki +R: Akihiko Odaki S: Maintained F: contrib/elf2dmp/ @@ -3669,17 +3847,20 @@ VT-d Emulation M: Michael S. Tsirkin R: Jason Wang R: Yi Liu +R: Clément Mathieu--Drif S: Supported F: hw/i386/intel_iommu.c F: hw/i386/intel_iommu_internal.h F: include/hw/i386/intel_iommu.h +F: tests/functional/test_intel_iommu.py +F: tests/qtest/intel-iommu-test.c AMD-Vi Emulation S: Orphan F: hw/i386/amd_iommu.? OpenSBI Firmware -M: Bin Meng +L: qemu-riscv@nongnu.org S: Supported F: pc-bios/opensbi-* F: .gitlab-ci.d/opensbi.yml @@ -3701,7 +3882,7 @@ M: Peter Maydell S: Maintained F: include/hw/resettable.h F: include/hw/core/resetcontainer.h -F: include/sysemu/reset.h +F: include/system/reset.h F: hw/core/reset.c F: hw/core/resettable.c F: hw/core/resetcontainer.c @@ -3712,6 +3893,7 @@ Overall usermode emulation M: Riku Voipio S: Maintained F: accel/tcg/user-exec*.c +F: hw/core/cpu-user.c F: include/user/ F: common-user/ @@ -3732,7 +3914,7 @@ F: configs/targets/*linux-user.mak F: scripts/qemu-binfmt-conf.sh F: scripts/update-syscalltbl.sh F: scripts/update-mips-syscall-args.sh -F: scripts/gensyscalls.sh +F: tests/functional/test_arm_bflt.py Tiny Code Generator (TCG) ------------------------- @@ -3744,6 +3926,7 @@ F: include/tcg/ TCG Plugins M: Alex Bennée +T: git https://gitlab.com/stsquad/qemu plugins/next R: Alexandre Iooss R: Mahmoud Mandour R: Pierrick Bouvier @@ -3751,8 +3934,9 @@ S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ F: tests/tcg/plugins/ -F: tests/avocado/tcg_plugins.py +F: tests/functional/test_aarch64_tcg_plugins.py F: contrib/plugins/ +F: scripts/qemu-plugin-symbols.py AArch64 TCG target M: Richard Henderson @@ -3868,7 +4052,7 @@ F: nbd/ F: include/block/nbd* F: qemu-nbd.* F: blockdev-nbd.c -F: docs/interop/nbd.txt +F: docs/interop/nbd.rst F: docs/tools/qemu-nbd.rst F: tests/qemu-iotests/tests/*nbd* T: git https://repo.or.cz/qemu/ericb.git nbd @@ -3935,6 +4119,7 @@ M: Stefan Hajnoczi L: qemu-block@nongnu.org S: Supported F: block/blkverify.c +F: docs/devel/testing/blkverify.rst bochs M: Stefan Hajnoczi @@ -3961,7 +4146,8 @@ L: qemu-block@nongnu.org S: Supported F: block/parallels.c F: block/parallels-ext.c -F: docs/interop/parallels.txt +F: docs/interop/parallels.rst +F: docs/interop/prl-xml.rst T: git https://src.openvz.org/scm/~den/qemu.git parallels qed @@ -3969,6 +4155,7 @@ M: Stefan Hajnoczi L: qemu-block@nongnu.org S: Supported F: block/qed.c +F: docs/interop/qed_spec.rst raw M: Kevin Wolf @@ -3997,7 +4184,7 @@ M: Hanna Reitz L: qemu-block@nongnu.org S: Supported F: block/qcow2* -F: docs/interop/qcow2.txt +F: docs/interop/qcow2.rst qcow M: Kevin Wolf @@ -4011,6 +4198,7 @@ M: Hanna Reitz L: qemu-block@nongnu.org S: Supported F: block/blkdebug.c +F: docs/devel/testing/blkdebug.rst vpc M: Kevin Wolf @@ -4095,11 +4283,22 @@ F: hw/remote/proxy-memory-listener.c F: include/hw/remote/proxy-memory-listener.h F: hw/remote/iohub.c F: include/hw/remote/iohub.h -F: subprojects/libvfio-user F: hw/remote/vfio-user-obj.c F: include/hw/remote/vfio-user-obj.h F: hw/remote/iommu.c F: include/hw/remote/iommu.h +F: tests/functional/test_multiprocess.py + +VFIO-USER: +M: John Levon +M: Thanos Makatos +M: Cédric Le Goater +S: Supported +F: docs/interop/vfio-user.rst +F: docs/system/devices/vfio-user.rst +F: hw/vfio-user/* +F: include/hw/vfio-user/* +F: subprojects/libvfio-user EBPF: M: Jason Wang @@ -4114,10 +4313,9 @@ Build and test automation ------------------------- Build and test automation, general continuous integration M: Alex Bennée +T: git https://gitlab.com/stsquad/qemu testing/next M: Philippe Mathieu-Daudé M: Thomas Huth -R: Wainer dos Santos Moschetta -R: Beraldo Leal S: Maintained F: .github/workflows/lockdown.yml F: .gitlab-ci.yml @@ -4128,9 +4326,10 @@ F: scripts/ci/ F: tests/docker/ F: tests/vm/ F: tests/lcitool/ -F: tests/avocado/tuxrun_baselines.py +F: tests/functional/test_*_tuxrun.py F: scripts/archive-source.sh -F: docs/devel/testing.rst +F: docs/devel/testing/ci* +F: docs/devel/testing/main.rst W: https://gitlab.com/qemu-project/qemu/pipelines W: https://travis-ci.org/qemu/qemu @@ -4142,6 +4341,13 @@ F: .gitlab-ci.d/cirrus/freebsd* F: tests/vm/freebsd W: https://cirrus-ci.com/github/qemu/qemu +Functional testing framework +M: Thomas Huth +R: Philippe Mathieu-Daudé +R: Daniel P. Berrange +F: docs/devel/testing/functional.rst +F: tests/functional/qemu_test/ + Windows Hosted Continuous Integration M: Yonggang Luo S: Maintained @@ -4153,15 +4359,6 @@ R: Philippe Mathieu-Daudé S: Maintained F: tests/tcg/Makefile.target -Integration Testing with the Avocado framework -W: https://trello.com/b/6Qi1pxVn/avocado-qemu -R: Cleber Rosa -R: Philippe Mathieu-Daudé -R: Wainer dos Santos Moschetta -R: Beraldo Leal -S: Odd Fixes -F: tests/avocado/ - GitLab custom runner (Works On Arm Sponsored) M: Alex Bennée M: Philippe Mathieu-Daudé @@ -4192,7 +4389,6 @@ Meson M: Paolo Bonzini R: Marc-André Lureau R: Daniel P. Berrange -R: Thomas Huth R: Philippe Mathieu-Daudé S: Maintained F: meson.build @@ -4231,13 +4427,26 @@ S: Orphan F: po/*.po Sphinx documentation configuration and build machinery +M: John Snow M: Peter Maydell S: Maintained F: docs/conf.py F: docs/*/conf.py +F: docs/requirements.txt F: docs/sphinx/ F: docs/_templates/ F: docs/devel/docs.rst +F: docs/devel/qapi-domain.rst + +Rust build system integration +M: Manos Pitsidianakis +L: qemu-rust@nongnu.org +S: Maintained +F: scripts/rust/ +F: rust/.gitignore +F: rust/Kconfig +F: rust/meson.build +F: rust/wrapper.h Miscellaneous ------------- diff --git a/Makefile b/Makefile index 02a257584ba..74c2da20372 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,8 @@ x := $(shell rm -rf meson-private meson-info meson-logs) endif # 1. ensure config-host.mak is up-to-date -config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION +config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \ + $(SRC_PATH)/pythondeps.toml $(SRC_PATH)/VERSION @echo config-host.mak is out-of-date, running configure @if test -f meson-private/coredata.dat; then \ ./config.status --skip-meson; \ @@ -186,11 +187,6 @@ SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS))) $(SUBDIR_RULES): $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) -ifneq ($(filter contrib/plugins, $(SUBDIRS)),) -.PHONY: plugins -plugins: contrib/plugins/all -endif - .PHONY: recurse-all recurse-clean recurse-all: $(addsuffix /all, $(SUBDIRS)) recurse-clean: $(addsuffix /clean, $(SUBDIRS)) @@ -211,10 +207,10 @@ clean: recurse-clean VERSION = $(shell cat $(SRC_PATH)/VERSION) -dist: qemu-$(VERSION).tar.bz2 +dist: qemu-$(VERSION).tar.xz -qemu-%.tar.bz2: - $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)" +qemu-%.tar.xz: + $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.xz,%,$@)" distclean: clean recurse-distclean -$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || : @@ -231,6 +227,7 @@ distclean: clean recurse-distclean rm -Rf .sdk qemu-bundle find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \ + -path "$(SRC_PATH)/.pc" -prune -o \ -type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) .PHONY: ctags @@ -306,11 +303,6 @@ help: $(call print-help,cscope,Generate cscope index) $(call print-help,sparse,Run sparse on the QEMU source) @echo '' -ifneq ($(filter contrib/plugins, $(SUBDIRS)),) - @echo 'Plugin targets:' - $(call print-help,plugins,Build the example TCG plugins) - @echo '' -endif @echo 'Cleaning targets:' $(call print-help,clean,Remove most generated files but keep the config) $(call print-help,distclean,Remove all generated files) diff --git a/VERSION b/VERSION index ef7c56c155f..4149c39eec6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -9.0.90 +10.1.0 diff --git a/accel/Kconfig b/accel/Kconfig index 794e0d18d21..4263cab7227 100644 --- a/accel/Kconfig +++ b/accel/Kconfig @@ -16,4 +16,5 @@ config KVM config XEN bool select FSDEV_9P if VIRTFS + select PCI_EXPRESS_GENERIC_BRIDGE select XEN_BUS diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c index e083f24aa80..51132d1b8a0 100644 --- a/accel/accel-blocker.c +++ b/accel/accel-blocker.c @@ -25,10 +25,11 @@ */ #include "qemu/osdep.h" +#include "qemu/lockcnt.h" #include "qemu/thread.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" -#include "sysemu/accel-blocker.h" +#include "system/accel-blocker.h" static QemuLockCnt accel_in_ioctl_lock; static QemuEvent accel_in_ioctl_event; diff --git a/accel/accel-common.c b/accel/accel-common.c new file mode 100644 index 00000000000..850c5ab4b8e --- /dev/null +++ b/accel/accel-common.c @@ -0,0 +1,144 @@ +/* + * QEMU accel class, components common to system emulation and user mode + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "qemu/target-info.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu.h" +#include "accel/accel-cpu-ops.h" +#include "accel-internal.h" + +/* Lookup AccelClass from opt_name. Returns NULL if not found */ +AccelClass *accel_find(const char *opt_name) +{ + char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); + AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name)); + g_free(class_name); + return ac; +} + +/* Return the name of the current accelerator */ +const char *current_accel_name(void) +{ + AccelClass *ac = ACCEL_GET_CLASS(current_accel()); + + return ac->name; +} + +static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque) +{ + CPUClass *cc = CPU_CLASS(klass); + AccelCPUClass *accel_cpu = opaque; + + /* + * The first callback allows accel-cpu to run initializations + * for the CPU, customizing CPU behavior according to the accelerator. + * + * The second one allows the CPU to customize the accel-cpu + * behavior according to the CPU. + * + * The second is currently only used by TCG, to specialize the + * TCGCPUOps depending on the CPU type. + */ + cc->accel_cpu = accel_cpu; + if (accel_cpu->cpu_class_init) { + accel_cpu->cpu_class_init(cc); + } + if (cc->init_accel_cpu) { + cc->init_accel_cpu(accel_cpu, cc); + } +} + +/* initialize the arch-specific accel CpuClass interfaces */ +static void accel_init_cpu_interfaces(AccelClass *ac) +{ + const char *ac_name; /* AccelClass name */ + char *acc_name; /* AccelCPUClass name */ + ObjectClass *acc; /* AccelCPUClass */ + const char *cpu_resolving_type = target_cpu_type(); + + ac_name = object_class_get_name(OBJECT_CLASS(ac)); + g_assert(ac_name != NULL); + + acc_name = g_strdup_printf("%s-%s", ac_name, cpu_resolving_type); + acc = object_class_by_name(acc_name); + g_free(acc_name); + + if (acc) { + object_class_foreach(accel_init_cpu_int_aux, + cpu_resolving_type, false, acc); + } +} + +void accel_init_interfaces(AccelClass *ac) +{ + accel_init_ops_interfaces(ac); + accel_init_cpu_interfaces(ac); +} + +void accel_cpu_instance_init(CPUState *cpu) +{ + if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) { + cpu->cc->accel_cpu->cpu_instance_init(cpu); + } +} + +bool accel_cpu_common_realize(CPUState *cpu, Error **errp) +{ + AccelState *accel = current_accel(); + AccelClass *acc = ACCEL_GET_CLASS(accel); + + /* target specific realization */ + if (cpu->cc->accel_cpu + && cpu->cc->accel_cpu->cpu_target_realize + && !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) { + return false; + } + + /* generic realization */ + if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) { + return false; + } + + return true; +} + +void accel_cpu_common_unrealize(CPUState *cpu) +{ + AccelState *accel = current_accel(); + AccelClass *acc = ACCEL_GET_CLASS(accel); + + /* generic unrealization */ + if (acc->cpu_common_unrealize) { + acc->cpu_common_unrealize(cpu); + } +} + +int accel_supported_gdbstub_sstep_flags(void) +{ + AccelState *accel = current_accel(); + AccelClass *acc = ACCEL_GET_CLASS(accel); + if (acc->gdbstub_supported_sstep_flags) { + return acc->gdbstub_supported_sstep_flags(accel); + } + return 0; +} + +static const TypeInfo accel_types[] = { + { + .name = TYPE_ACCEL, + .parent = TYPE_OBJECT, + .class_size = sizeof(AccelClass), + .instance_size = sizeof(AccelState), + .abstract = true, + }, +}; + +DEFINE_TYPES(accel_types) diff --git a/accel/accel-internal.h b/accel/accel-internal.h new file mode 100644 index 00000000000..d3a4422cbf7 --- /dev/null +++ b/accel/accel-internal.h @@ -0,0 +1,17 @@ +/* + * QEMU accel internal functions + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ACCEL_INTERNAL_H +#define ACCEL_INTERNAL_H + +#include "qemu/accel.h" + +void accel_init_ops_interfaces(AccelClass *ac); + +#endif /* ACCEL_SYSTEM_H */ diff --git a/accel/accel-qmp.c b/accel/accel-qmp.c new file mode 100644 index 00000000000..5fb70c6631f --- /dev/null +++ b/accel/accel-qmp.c @@ -0,0 +1,35 @@ +/* + * QMP commands related to accelerators + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "qapi/type-helpers.h" +#include "qapi/qapi-commands-accelerator.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "hw/core/cpu.h" + +HumanReadableText *qmp_x_accel_stats(Error **errp) +{ + AccelState *accel = current_accel(); + AccelClass *acc = ACCEL_GET_CLASS(accel); + g_autoptr(GString) buf = g_string_new(""); + + if (acc->get_stats) { + acc->get_stats(accel, buf); + } + if (acc->ops->get_vcpu_stats) { + CPUState *cpu; + + CPU_FOREACH(cpu) { + acc->ops->get_vcpu_stats(cpu, buf); + } + } + + return human_readable_text_from_str(buf); +} diff --git a/accel/accel-system.c b/accel/accel-system.c index f6c947dd821..1e97c64fdca 100644 --- a/accel/accel-system.c +++ b/accel/accel-system.c @@ -25,10 +25,15 @@ #include "qemu/osdep.h" #include "qemu/accel.h" +#include "qapi/qapi-commands-accelerator.h" +#include "monitor/monitor.h" #include "hw/boards.h" -#include "sysemu/cpus.h" +#include "hw/core/cpu.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" #include "qemu/error-report.h" -#include "accel-system.h" +#include "accel-internal.h" int accel_init_machine(AccelState *accel, MachineState *ms) { @@ -36,7 +41,7 @@ int accel_init_machine(AccelState *accel, MachineState *ms) int ret; ms->accelerator = accel; *(acc->allowed) = true; - ret = acc->init_machine(ms); + ret = acc->init_machine(accel, ms); if (ret < 0) { ms->accelerator = NULL; *(acc->allowed) = false; @@ -57,12 +62,21 @@ void accel_setup_post(MachineState *ms) AccelState *accel = ms->accelerator; AccelClass *acc = ACCEL_GET_CLASS(accel); if (acc->setup_post) { - acc->setup_post(ms, accel); + acc->setup_post(accel); + } +} + +void accel_pre_resume(MachineState *ms, bool step_pending) +{ + AccelState *accel = ms->accelerator; + AccelClass *acc = ACCEL_GET_CLASS(accel); + if (acc->pre_resume_vm) { + acc->pre_resume_vm(accel, step_pending); } } /* initialize the arch-independent accel operation interfaces */ -void accel_system_init_ops_interfaces(AccelClass *ac) +void accel_init_ops_interfaces(AccelClass *ac) { const char *ac_name; char *ops_name; @@ -73,30 +87,35 @@ void accel_system_init_ops_interfaces(AccelClass *ac) g_assert(ac_name != NULL); ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name); - ops = ACCEL_OPS_CLASS(module_object_class_by_name(ops_name)); oc = module_object_class_by_name(ops_name); if (!oc) { error_report("fatal: could not load module for type '%s'", ops_name); exit(1); } g_free(ops_name); - ops = ACCEL_OPS_CLASS(oc); /* * all accelerators need to define ops, providing at least a mandatory * non-NULL create_vcpu_thread operation. */ - g_assert(ops != NULL); + ops = ACCEL_OPS_CLASS(oc); + ac->ops = ops; if (ops->ops_init) { - ops->ops_init(ops); + ops->ops_init(ac); } cpus_register_accel(ops); } +static void accel_ops_class_init(ObjectClass *oc, const void *data) +{ + monitor_register_hmp_info_hrt("accel", qmp_x_accel_stats); +} + static const TypeInfo accel_ops_type_info = { .name = TYPE_ACCEL_OPS, .parent = TYPE_OBJECT, .abstract = true, .class_size = sizeof(AccelOpsClass), + .class_init = accel_ops_class_init, }; static void accel_system_register_types(void) diff --git a/accel/accel-system.h b/accel/accel-system.h deleted file mode 100644 index 2d37c73c97b..00000000000 --- a/accel/accel-system.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * QEMU System Emulation accel internal functions - * - * Copyright 2021 SUSE LLC - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef ACCEL_SYSTEM_H -#define ACCEL_SYSTEM_H - -void accel_system_init_ops_interfaces(AccelClass *ac); - -#endif /* ACCEL_SYSTEM_H */ diff --git a/accel/accel-target.c b/accel/accel-target.c index 08626c00c2d..7fd392fbc4a 100644 --- a/accel/accel-target.c +++ b/accel/accel-target.c @@ -24,141 +24,7 @@ */ #include "qemu/osdep.h" -#include "qemu/accel.h" - -#include "cpu.h" -#include "hw/core/accel-cpu.h" - -#ifndef CONFIG_USER_ONLY -#include "accel-system.h" -#endif /* !CONFIG_USER_ONLY */ - -static const TypeInfo accel_type = { - .name = TYPE_ACCEL, - .parent = TYPE_OBJECT, - .class_size = sizeof(AccelClass), - .instance_size = sizeof(AccelState), -}; - -/* Lookup AccelClass from opt_name. Returns NULL if not found */ -AccelClass *accel_find(const char *opt_name) -{ - char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); - AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name)); - g_free(class_name); - return ac; -} - -/* Return the name of the current accelerator */ -const char *current_accel_name(void) -{ - AccelClass *ac = ACCEL_GET_CLASS(current_accel()); - - return ac->name; -} - -static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque) -{ - CPUClass *cc = CPU_CLASS(klass); - AccelCPUClass *accel_cpu = opaque; - - /* - * The first callback allows accel-cpu to run initializations - * for the CPU, customizing CPU behavior according to the accelerator. - * - * The second one allows the CPU to customize the accel-cpu - * behavior according to the CPU. - * - * The second is currently only used by TCG, to specialize the - * TCGCPUOps depending on the CPU type. - */ - cc->accel_cpu = accel_cpu; - if (accel_cpu->cpu_class_init) { - accel_cpu->cpu_class_init(cc); - } - if (cc->init_accel_cpu) { - cc->init_accel_cpu(accel_cpu, cc); - } -} - -/* initialize the arch-specific accel CpuClass interfaces */ -static void accel_init_cpu_interfaces(AccelClass *ac) -{ - const char *ac_name; /* AccelClass name */ - char *acc_name; /* AccelCPUClass name */ - ObjectClass *acc; /* AccelCPUClass */ - - ac_name = object_class_get_name(OBJECT_CLASS(ac)); - g_assert(ac_name != NULL); - - acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE); - acc = object_class_by_name(acc_name); - g_free(acc_name); - - if (acc) { - object_class_foreach(accel_init_cpu_int_aux, - CPU_RESOLVING_TYPE, false, acc); - } -} - -void accel_init_interfaces(AccelClass *ac) -{ -#ifndef CONFIG_USER_ONLY - accel_system_init_ops_interfaces(ac); -#endif /* !CONFIG_USER_ONLY */ - - accel_init_cpu_interfaces(ac); -} - -void accel_cpu_instance_init(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) { - cc->accel_cpu->cpu_instance_init(cpu); - } -} - -bool accel_cpu_common_realize(CPUState *cpu, Error **errp) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - AccelState *accel = current_accel(); - AccelClass *acc = ACCEL_GET_CLASS(accel); - - /* target specific realization */ - if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize - && !cc->accel_cpu->cpu_target_realize(cpu, errp)) { - return false; - } - - /* generic realization */ - if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) { - return false; - } - - return true; -} - -void accel_cpu_common_unrealize(CPUState *cpu) -{ - AccelState *accel = current_accel(); - AccelClass *acc = ACCEL_GET_CLASS(accel); - - /* generic unrealization */ - if (acc->cpu_common_unrealize) { - acc->cpu_common_unrealize(cpu); - } -} - -int accel_supported_gdbstub_sstep_flags(void) -{ - AccelState *accel = current_accel(); - AccelClass *acc = ACCEL_GET_CLASS(accel); - if (acc->gdbstub_supported_sstep_flags) { - return acc->gdbstub_supported_sstep_flags(); - } - return 0; -} +#include "accel/accel-cpu-target.h" static const TypeInfo accel_cpu_type = { .name = TYPE_ACCEL_CPU, @@ -169,7 +35,6 @@ static const TypeInfo accel_cpu_type = { static void register_accel_types(void) { - type_register_static(&accel_type); type_register_static(&accel_cpu_type); } diff --git a/accel/accel-user.c b/accel/accel-user.c index 22b6a1a1a89..7d192306a65 100644 --- a/accel/accel-user.c +++ b/accel/accel-user.c @@ -9,6 +9,12 @@ #include "qemu/osdep.h" #include "qemu/accel.h" +#include "accel-internal.h" + +void accel_init_ops_interfaces(AccelClass *ac) +{ + /* nothing */ +} AccelState *current_accel(void) { diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index f32d8c8dc3b..03cfc0fa01e 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -13,10 +13,11 @@ #include "qemu/osdep.h" #include "qemu/rcu.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/guest-random.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" +#include "accel/dummy-cpus.h" static void *dummy_cpu_thread_fn(void *arg) { diff --git a/accel/dummy-cpus.h b/accel/dummy-cpus.h new file mode 100644 index 00000000000..d18dd0fdc51 --- /dev/null +++ b/accel/dummy-cpus.h @@ -0,0 +1,14 @@ +/* + * Dummy cpu thread code + * + * Copyright IBM, Corp. 2011 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ACCEL_DUMMY_CPUS_H +#define ACCEL_DUMMY_CPUS_H + +void dummy_start_vcpu_thread(CPUState *cpu); + +#endif diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index ac08cfb9f32..d488d6afbac 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -48,23 +48,20 @@ */ #include "qemu/osdep.h" -#include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "qemu/main-loop.h" -#include "exec/address-spaces.h" -#include "exec/exec-all.h" +#include "qemu/queue.h" #include "gdbstub/enums.h" -#include "sysemu/cpus.h" -#include "sysemu/hvf.h" -#include "sysemu/hvf_int.h" -#include "sysemu/runstate.h" -#include "qemu/guest-random.h" +#include "exec/cpu-common.h" +#include "hw/core/cpu.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" +#include "system/hvf.h" +#include "system/hvf_int.h" +#include HVFState *hvf_state; -#ifdef __aarch64__ -#define HV_VM_DEFAULT NULL -#endif - /* Memory slots */ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) @@ -81,138 +78,17 @@ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) return NULL; } -struct mac_slot { - int present; - uint64_t size; - uint64_t gpa_start; - uint64_t gva; -}; - -struct mac_slot mac_slots[32]; - -static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) -{ - struct mac_slot *macslot; - hv_return_t ret; - - macslot = &mac_slots[slot->slot_id]; - - if (macslot->present) { - if (macslot->size != slot->size) { - macslot->present = 0; - ret = hv_vm_unmap(macslot->gpa_start, macslot->size); - assert_hvf_ok(ret); - } - } - - if (!slot->size) { - return 0; - } - - macslot->present = 1; - macslot->gpa_start = slot->start; - macslot->size = slot->size; - ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); - assert_hvf_ok(ret); - return 0; -} - -static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) -{ - hvf_slot *mem; - MemoryRegion *area = section->mr; - bool writable = !area->readonly && !area->rom_device; - hv_memory_flags_t flags; - uint64_t page_size = qemu_real_host_page_size(); - - if (!memory_region_is_ram(area)) { - if (writable) { - return; - } else if (!memory_region_is_romd(area)) { - /* - * If the memory device is not in romd_mode, then we actually want - * to remove the hvf memory slot so all accesses will trap. - */ - add = false; - } - } - - if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || - !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { - /* Not page aligned, so we can not map as RAM */ - add = false; - } - - mem = hvf_find_overlap_slot( - section->offset_within_address_space, - int128_get64(section->size)); - - if (mem && add) { - if (mem->size == int128_get64(section->size) && - mem->start == section->offset_within_address_space && - mem->mem == (memory_region_get_ram_ptr(area) + - section->offset_within_region)) { - return; /* Same region was attempted to register, go away. */ - } - } - - /* Region needs to be reset. set the size to 0 and remap it. */ - if (mem) { - mem->size = 0; - if (do_hvf_set_memory(mem, 0)) { - error_report("Failed to reset overlapping slot"); - abort(); - } - } - - if (!add) { - return; - } - - if (area->readonly || - (!memory_region_is_ram(area) && memory_region_is_romd(area))) { - flags = HV_MEMORY_READ | HV_MEMORY_EXEC; - } else { - flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC; - } - - /* Now make a new slot. */ - int x; - - for (x = 0; x < hvf_state->num_slots; ++x) { - mem = &hvf_state->slots[x]; - if (!mem->size) { - break; - } - } - - if (x == hvf_state->num_slots) { - error_report("No free slots"); - abort(); - } - - mem->size = int128_get64(section->size); - mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; - mem->start = section->offset_within_address_space; - mem->region = area; - - if (do_hvf_set_memory(mem, flags)) { - error_report("Error registering new memory slot"); - abort(); - } -} - static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { hvf_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } } static void hvf_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -221,7 +97,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu, run_on_cpu_data arg) { /* QEMU state is the reference, push it to HVF now and on next entry */ - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } static void hvf_cpu_synchronize_post_reset(CPUState *cpu) @@ -239,137 +115,16 @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu) run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL); } -static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) -{ - hvf_slot *slot; - - slot = hvf_find_overlap_slot( - section->offset_within_address_space, - int128_get64(section->size)); - - /* protect region against writes; begin tracking it */ - if (on) { - slot->flags |= HVF_SLOT_LOG; - hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ | HV_MEMORY_EXEC); - /* stop tracking region*/ - } else { - slot->flags &= ~HVF_SLOT_LOG; - hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); - } -} - -static void hvf_log_start(MemoryListener *listener, - MemoryRegionSection *section, int old, int new) -{ - if (old != 0) { - return; - } - - hvf_set_dirty_tracking(section, 1); -} - -static void hvf_log_stop(MemoryListener *listener, - MemoryRegionSection *section, int old, int new) -{ - if (new != 0) { - return; - } - - hvf_set_dirty_tracking(section, 0); -} - -static void hvf_log_sync(MemoryListener *listener, - MemoryRegionSection *section) -{ - /* - * sync of dirty pages is handled elsewhere; just make sure we keep - * tracking the region. - */ - hvf_set_dirty_tracking(section, 1); -} - -static void hvf_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - hvf_set_phys_mem(section, true); -} - -static void hvf_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - hvf_set_phys_mem(section, false); -} - -static MemoryListener hvf_memory_listener = { - .name = "hvf", - .priority = MEMORY_LISTENER_PRIORITY_ACCEL, - .region_add = hvf_region_add, - .region_del = hvf_region_del, - .log_start = hvf_log_start, - .log_stop = hvf_log_stop, - .log_sync = hvf_log_sync, -}; - static void dummy_signal(int sig) { } -bool hvf_allowed; - -static int hvf_accel_init(MachineState *ms) -{ - int x; - hv_return_t ret; - HVFState *s; - - ret = hv_vm_create(HV_VM_DEFAULT); - assert_hvf_ok(ret); - - s = g_new0(HVFState, 1); - - s->num_slots = ARRAY_SIZE(s->slots); - for (x = 0; x < s->num_slots; ++x) { - s->slots[x].size = 0; - s->slots[x].slot_id = x; - } - - QTAILQ_INIT(&s->hvf_sw_breakpoints); - - hvf_state = s; - memory_listener_register(&hvf_memory_listener, &address_space_memory); - - return hvf_arch_init(); -} - -static inline int hvf_gdbstub_sstep_flags(void) -{ - return SSTEP_ENABLE | SSTEP_NOIRQ; -} - -static void hvf_accel_class_init(ObjectClass *oc, void *data) -{ - AccelClass *ac = ACCEL_CLASS(oc); - ac->name = "HVF"; - ac->init_machine = hvf_accel_init; - ac->allowed = &hvf_allowed; - ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; -} - -static const TypeInfo hvf_accel_type = { - .name = TYPE_HVF_ACCEL, - .parent = TYPE_ACCEL, - .class_init = hvf_accel_class_init, -}; - -static void hvf_type_init(void) +static void do_hvf_get_vcpu_exec_time(CPUState *cpu, run_on_cpu_data arg) { - type_register_static(&hvf_accel_type); + int r = hv_vcpu_get_exec_time(cpu->accel->fd, arg.host_ptr); + assert_hvf_ok(r); } -type_init(hvf_type_init); - static void hvf_vcpu_destroy(CPUState *cpu) { hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); @@ -402,8 +157,8 @@ static int hvf_init_vcpu(CPUState *cpu) #else r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT); #endif - cpu->accel->dirty = true; assert_hvf_ok(r); + cpu->vcpu_dirty = true; cpu->accel->guest_debug_enabled = false; @@ -469,6 +224,34 @@ static void hvf_start_vcpu_thread(CPUState *cpu) cpu, QEMU_THREAD_JOINABLE); } +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) +{ + struct hvf_sw_breakpoint *bp; + + QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { + if (bp->pc == pc) { + return bp; + } + } + return NULL; +} + +int hvf_sw_breakpoints_active(CPUState *cpu) +{ + return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); +} + +static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg) +{ + hvf_arch_update_guest_debug(cpu); +} + +int hvf_update_guest_debug(CPUState *cpu) +{ + run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL); + return 0; +} + static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) { struct hvf_sw_breakpoint *bp; @@ -571,12 +354,28 @@ static void hvf_remove_all_breakpoints(CPUState *cpu) } } -static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) +static void hvf_get_vcpu_stats(CPUState *cpu, GString *buf) +{ + uint64_t time_mach; /* units of mach_absolute_time() */ + + run_on_cpu(cpu, do_hvf_get_vcpu_exec_time, RUN_ON_CPU_HOST_PTR(&time_mach)); + + mach_timebase_info_data_t timebase; + mach_timebase_info(&timebase); + uint64_t time_ns = time_mach * timebase.numer / timebase.denom; + + g_string_append_printf(buf, "HVF cumulative execution time: %llu.%.3llus\n", + time_ns / 1000000000, + (time_ns % 1000000000) / 1000000); +} + +static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = hvf_start_vcpu_thread; ops->kick_vcpu_thread = hvf_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; ops->synchronize_post_init = hvf_cpu_synchronize_post_init; @@ -588,7 +387,10 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) ops->remove_all_breakpoints = hvf_remove_all_breakpoints; ops->update_guest_debug = hvf_update_guest_debug; ops->supports_guest_debug = hvf_arch_supports_guest_debug; + + ops->get_vcpu_stats = hvf_get_vcpu_stats; }; + static const TypeInfo hvf_accel_ops_type = { .name = ACCEL_OPS_NAME("hvf"), @@ -596,8 +398,10 @@ static const TypeInfo hvf_accel_ops_type = { .class_init = hvf_accel_ops_class_init, .abstract = true, }; + static void hvf_accel_ops_register_types(void) { type_register_static(&hvf_accel_ops_type); } + type_init(hvf_accel_ops_register_types); diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c index 6ca0850b20e..0a4b498e836 100644 --- a/accel/hvf/hvf-all.c +++ b/accel/hvf/hvf-all.c @@ -10,8 +10,25 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/hvf.h" -#include "sysemu/hvf_int.h" +#include "accel/accel-ops.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "system/hvf.h" +#include "system/hvf_int.h" +#include "hw/core/cpu.h" +#include "hw/boards.h" +#include "trace.h" + +bool hvf_allowed; + +struct mac_slot { + int present; + uint64_t size; + uint64_t gpa_start; + uint64_t gva; +}; + +struct mac_slot mac_slots[32]; const char *hvf_return_string(hv_return_t ret) { @@ -41,25 +58,257 @@ void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line, abort(); } -struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) +static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) { - struct hvf_sw_breakpoint *bp; + struct mac_slot *macslot; + hv_return_t ret; + + macslot = &mac_slots[slot->slot_id]; + + if (macslot->present) { + if (macslot->size != slot->size) { + macslot->present = 0; + trace_hvf_vm_unmap(macslot->gpa_start, macslot->size); + ret = hv_vm_unmap(macslot->gpa_start, macslot->size); + assert_hvf_ok(ret); + } + } + + if (!slot->size) { + return 0; + } + + macslot->present = 1; + macslot->gpa_start = slot->start; + macslot->size = slot->size; + trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags, + flags & HV_MEMORY_READ ? 'R' : '-', + flags & HV_MEMORY_WRITE ? 'W' : '-', + flags & HV_MEMORY_EXEC ? 'X' : '-'); + ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); + assert_hvf_ok(ret); + return 0; +} + +static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) +{ + hvf_slot *mem; + MemoryRegion *area = section->mr; + bool writable = !area->readonly && !area->rom_device; + hv_memory_flags_t flags; + uint64_t page_size = qemu_real_host_page_size(); + + if (!memory_region_is_ram(area)) { + if (writable) { + return; + } else if (!memory_region_is_romd(area)) { + /* + * If the memory device is not in romd_mode, then we actually want + * to remove the hvf memory slot so all accesses will trap. + */ + add = false; + } + } + + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + /* Not page aligned, so we can not map as RAM */ + add = false; + } + + mem = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + if (mem && add) { + if (mem->size == int128_get64(section->size) && + mem->start == section->offset_within_address_space && + mem->mem == (memory_region_get_ram_ptr(area) + + section->offset_within_region)) { + return; /* Same region was attempted to register, go away. */ + } + } + + /* Region needs to be reset. set the size to 0 and remap it. */ + if (mem) { + mem->size = 0; + if (do_hvf_set_memory(mem, 0)) { + error_report("Failed to reset overlapping slot"); + abort(); + } + } + + if (!add) { + return; + } + + if (area->readonly || + (!memory_region_is_ram(area) && memory_region_is_romd(area))) { + flags = HV_MEMORY_READ | HV_MEMORY_EXEC; + } else { + flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC; + } + + /* Now make a new slot. */ + int x; - QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { - if (bp->pc == pc) { - return bp; + for (x = 0; x < hvf_state->num_slots; ++x) { + mem = &hvf_state->slots[x]; + if (!mem->size) { + break; } } - return NULL; + + if (x == hvf_state->num_slots) { + error_report("No free slots"); + abort(); + } + + mem->size = int128_get64(section->size); + mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; + mem->start = section->offset_within_address_space; + mem->region = area; + + if (do_hvf_set_memory(mem, flags)) { + error_report("Error registering new memory slot"); + abort(); + } } -int hvf_sw_breakpoints_active(CPUState *cpu) +static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) { - return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); + hvf_slot *slot; + + slot = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + /* protect region against writes; begin tracking it */ + if (on) { + slot->flags |= HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_EXEC); + /* stop tracking region*/ + } else { + slot->flags &= ~HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); + } } -int hvf_update_guest_debug(CPUState *cpu) +static void hvf_log_start(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) { - hvf_arch_update_guest_debug(cpu); - return 0; + if (old != 0) { + return; + } + + hvf_set_dirty_tracking(section, 1); +} + +static void hvf_log_stop(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) +{ + if (new != 0) { + return; + } + + hvf_set_dirty_tracking(section, 0); +} + +static void hvf_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + /* + * sync of dirty pages is handled elsewhere; just make sure we keep + * tracking the region. + */ + hvf_set_dirty_tracking(section, 1); +} + +static void hvf_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, true); +} + +static void hvf_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, false); +} + +static MemoryListener hvf_memory_listener = { + .name = "hvf", + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, + .region_add = hvf_region_add, + .region_del = hvf_region_del, + .log_start = hvf_log_start, + .log_stop = hvf_log_stop, + .log_sync = hvf_log_sync, +}; + +static int hvf_accel_init(AccelState *as, MachineState *ms) +{ + int x; + hv_return_t ret; + HVFState *s = HVF_STATE(as); + int pa_range = 36; + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (mc->hvf_get_physical_address_range) { + pa_range = mc->hvf_get_physical_address_range(ms); + if (pa_range < 0) { + return -EINVAL; + } + } + + ret = hvf_arch_vm_create(ms, (uint32_t)pa_range); + if (ret == HV_DENIED) { + error_report("Could not access HVF. Is the executable signed" + " with com.apple.security.hypervisor entitlement?"); + exit(1); + } + assert_hvf_ok(ret); + + s->num_slots = ARRAY_SIZE(s->slots); + for (x = 0; x < s->num_slots; ++x) { + s->slots[x].size = 0; + s->slots[x].slot_id = x; + } + + QTAILQ_INIT(&s->hvf_sw_breakpoints); + + hvf_state = s; + memory_listener_register(&hvf_memory_listener, &address_space_memory); + + return hvf_arch_init(); } + +static int hvf_gdbstub_sstep_flags(AccelState *as) +{ + return SSTEP_ENABLE | SSTEP_NOIRQ; +} + +static void hvf_accel_class_init(ObjectClass *oc, const void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "HVF"; + ac->init_machine = hvf_accel_init; + ac->allowed = &hvf_allowed; + ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; +} + +static const TypeInfo hvf_accel_type = { + .name = TYPE_HVF_ACCEL, + .parent = TYPE_ACCEL, + .instance_size = sizeof(HVFState), + .class_init = hvf_accel_class_init, +}; + +static void hvf_type_init(void) +{ + type_register_static(&hvf_accel_type); +} + +type_init(hvf_type_init); diff --git a/accel/hvf/trace-events b/accel/hvf/trace-events new file mode 100644 index 00000000000..2fd3e127c74 --- /dev/null +++ b/accel/hvf/trace-events @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# See docs/devel/tracing.rst for syntax documentation. + +# hvf-accel-ops.c +hvf_vm_map(uint64_t paddr, uint64_t size, void *vaddr, uint8_t flags, const char r, const char w, const char e) "paddr:0x%016"PRIx64" size:0x%08"PRIx64" vaddr:%p flags:0x%02x/%c%c%c" +hvf_vm_unmap(uint64_t paddr, uint64_t size) "paddr:0x%016"PRIx64" size:0x%08"PRIx64 diff --git a/accel/hvf/trace.h b/accel/hvf/trace.h new file mode 100644 index 00000000000..83a1883343a --- /dev/null +++ b/accel/hvf/trace.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "trace/trace-accel_hvf.h" diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index c239dfc87ac..b709187c7d7 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -16,10 +16,11 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" -#include "sysemu/runstate.h" -#include "sysemu/cpus.h" +#include "accel/accel-cpu-ops.h" +#include "system/kvm.h" +#include "system/kvm_int.h" +#include "system/runstate.h" +#include "system/cpus.h" #include "qemu/guest-random.h" #include "qapi/error.h" @@ -89,7 +90,7 @@ static int kvm_update_guest_debug_ops(CPUState *cpu) } #endif -static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) +static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); @@ -100,6 +101,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; + ops->handle_interrupt = generic_handle_interrupt; #ifdef TARGET_KVM_HAVE_GUEST_DEBUG ops->update_guest_debug = kvm_update_guest_debug_ops; diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 75d11a07b2b..890d5ea9f86 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -28,13 +28,15 @@ #include "hw/pci/msix.h" #include "hw/s390x/adapter.h" #include "gdbstub/enums.h" -#include "sysemu/kvm_int.h" -#include "sysemu/runstate.h" -#include "sysemu/cpus.h" -#include "sysemu/accel-blocker.h" +#include "system/kvm_int.h" +#include "system/runstate.h" +#include "system/cpus.h" +#include "system/accel-blocker.h" +#include "accel/accel-ops.h" #include "qemu/bswap.h" -#include "exec/memory.h" -#include "exec/ram_addr.h" +#include "exec/tswap.h" +#include "system/memory.h" +#include "system/ram_addr.h" #include "qemu/event_notifier.h" #include "qemu/main-loop.h" #include "trace.h" @@ -42,21 +44,26 @@ #include "qapi/visitor.h" #include "qapi/qapi-types-common.h" #include "qapi/qapi-visit-common.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qemu/guest-random.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" #include "kvm-cpus.h" -#include "sysemu/dirtylimit.h" +#include "system/dirtylimit.h" #include "qemu/range.h" #include "hw/boards.h" -#include "sysemu/stats.h" +#include "system/stats.h" /* This check must be after config-host.h is included */ #ifdef CONFIG_EVENTFD #include #endif +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) +# define KVM_HAVE_MCE_INJECTION 1 +#endif + + /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We * need to use the real host PAGE_SIZE, as that's what KVM will use. */ @@ -69,6 +76,11 @@ #define KVM_GUESTDBG_BLOCKIRQ 0 #endif +/* Default num of memslots to be allocated when VM starts */ +#define KVM_MEMSLOTS_NR_ALLOC_DEFAULT 16 +/* Default max allowed memslots if kernel reported nothing */ +#define KVM_MEMSLOTS_NR_MAX_DEFAULT 32 + struct KVMParkedVcpu { unsigned long vcpu_id; int kvm_fd; @@ -88,6 +100,7 @@ bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; bool kvm_msi_use_devid; +bool kvm_pre_fault_memory_supported; static bool kvm_has_guest_debug; static int kvm_sstep_flags; static bool kvm_immediate_exit; @@ -165,11 +178,62 @@ void kvm_resample_fd_notify(int gsi) } } +/** + * kvm_slots_grow(): Grow the slots[] array in the KVMMemoryListener + * + * @kml: The KVMMemoryListener* to grow the slots[] array + * @nr_slots_new: The new size of slots[] array + * + * Returns: True if the array grows larger, false otherwise. + */ +static bool kvm_slots_grow(KVMMemoryListener *kml, unsigned int nr_slots_new) +{ + unsigned int i, cur = kml->nr_slots_allocated; + KVMSlot *slots; + + if (nr_slots_new > kvm_state->nr_slots_max) { + nr_slots_new = kvm_state->nr_slots_max; + } + + if (cur >= nr_slots_new) { + /* Big enough, no need to grow, or we reached max */ + return false; + } + + if (cur == 0) { + slots = g_new0(KVMSlot, nr_slots_new); + } else { + assert(kml->slots); + slots = g_renew(KVMSlot, kml->slots, nr_slots_new); + /* + * g_renew() doesn't initialize extended buffers, however kvm + * memslots require fields to be zero-initialized. E.g. pointers, + * memory_size field, etc. + */ + memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur)); + } + + for (i = cur; i < nr_slots_new; i++) { + slots[i].slot = i; + } + + kml->slots = slots; + kml->nr_slots_allocated = nr_slots_new; + trace_kvm_slots_grow(cur, nr_slots_new); + + return true; +} + +static bool kvm_slots_double(KVMMemoryListener *kml) +{ + return kvm_slots_grow(kml, kml->nr_slots_allocated * 2); +} + unsigned int kvm_get_max_memslots(void) { KVMState *s = KVM_STATE(current_accel()); - return s->nr_slots; + return s->nr_slots_max; } unsigned int kvm_get_free_memslots(void) @@ -183,25 +247,36 @@ unsigned int kvm_get_free_memslots(void) if (!s->as[i].ml) { continue; } - used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots); + used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used); } kvm_slots_unlock(); - return s->nr_slots - used_slots; + return s->nr_slots_max - used_slots; } /* Called with KVMMemoryListener.slots_lock held */ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) { - KVMState *s = kvm_state; + unsigned int n; int i; - for (i = 0; i < s->nr_slots; i++) { + for (i = 0; i < kml->nr_slots_allocated; i++) { if (kml->slots[i].memory_size == 0) { return &kml->slots[i]; } } + /* + * If no free slots, try to grow first by doubling. Cache the old size + * here to avoid another round of search: if the grow succeeded, it + * means slots[] now must have the existing "n" slots occupied, + * followed by one or more free slots starting from slots[n]. + */ + n = kml->nr_slots_allocated; + if (kvm_slots_double(kml)) { + return &kml->slots[n]; + } + return NULL; } @@ -222,10 +297,9 @@ static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, hwaddr start_addr, hwaddr size) { - KVMState *s = kvm_state; int i; - for (i = 0; i < s->nr_slots; i++) { + for (i = 0; i < kml->nr_slots_allocated; i++) { KVMSlot *mem = &kml->slots[i]; if (start_addr == mem->start_addr && size == mem->memory_size) { @@ -267,7 +341,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, int i, ret = 0; kvm_slots_lock(); - for (i = 0; i < s->nr_slots; i++) { + for (i = 0; i < kml->nr_slots_allocated; i++) { KVMSlot *mem = &kml->slots[i]; if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { @@ -371,7 +445,22 @@ int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id) return kvm_fd; } -int kvm_create_vcpu(CPUState *cpu) +static void kvm_reset_parked_vcpus(KVMState *s) +{ + struct KVMParkedVcpu *cpu; + + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { + kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd); + } +} + +/** + * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU + * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created. + * + * @returns: 0 when success, errno (<0) when failed. + */ +static int kvm_create_vcpu(CPUState *cpu) { unsigned long vcpu_id = kvm_arch_vcpu_id(cpu); KVMState *s = kvm_state; @@ -390,7 +479,9 @@ int kvm_create_vcpu(CPUState *cpu) cpu->kvm_fd = kvm_fd; cpu->kvm_state = s; - cpu->vcpu_dirty = true; + if (!s->guest_state_protected) { + cpu->vcpu_dirty = true; + } cpu->dirty_pages = 0; cpu->throttle_us_per_full = 0; @@ -414,7 +505,7 @@ int kvm_create_and_park_vcpu(CPUState *cpu) static int do_kvm_destroy_vcpu(CPUState *cpu) { KVMState *s = kvm_state; - long mmap_size; + int mmap_size; int ret = 0; trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); @@ -431,16 +522,23 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) goto err; } + /* If I am the CPU that created coalesced_mmio_ring, then discard it */ + if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) { + s->coalesced_mmio_ring = NULL; + } + ret = munmap(cpu->kvm_run, mmap_size); if (ret < 0) { goto err; } + cpu->kvm_run = NULL; if (cpu->kvm_dirty_gfns) { ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); if (ret < 0) { goto err; } + cpu->kvm_dirty_gfns = NULL; } kvm_park_vcpu(cpu); @@ -459,11 +557,16 @@ void kvm_destroy_vcpu(CPUState *cpu) int kvm_init_vcpu(CPUState *cpu, Error **errp) { KVMState *s = kvm_state; - long mmap_size; + int mmap_size; int ret; trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); + ret = kvm_arch_pre_create_vcpu(cpu, errp); + if (ret < 0) { + goto err; + } + ret = kvm_create_vcpu(cpu); if (ret < 0) { error_setg_errno(errp, -ret, @@ -519,6 +622,31 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) return ret; } +void kvm_close(void) +{ + CPUState *cpu; + + if (!kvm_state || kvm_state->fd == -1) { + return; + } + + CPU_FOREACH(cpu) { + cpu_remove_sync(cpu); + close(cpu->kvm_fd); + cpu->kvm_fd = -1; + close(cpu->kvm_vcpu_stats_fd); + cpu->kvm_vcpu_stats_fd = -1; + } + + if (kvm_state && kvm_state->fd != -1) { + close(kvm_state->vmfd); + kvm_state->vmfd = -1; + close(kvm_state->fd); + kvm_state->fd = -1; + } + kvm_state = NULL; +} + /* * dirty pages logging control */ @@ -1071,7 +1199,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml, kvm_slots_lock(); - for (i = 0; i < s->nr_slots; i++) { + for (i = 0; i < kml->nr_slots_allocated; i++) { mem = &kml->slots[i]; /* Discard slots that are empty or do not overlap the section */ if (!mem->memory_size || @@ -1212,7 +1340,7 @@ static void kvm_unpoison_all(void *param) QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { QLIST_REMOVE(page, list); - qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); + qemu_ram_remap(page->ram_addr); g_free(page); } } @@ -1238,21 +1366,22 @@ bool kvm_hwpoisoned_mem(void) static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) { -#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN - /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN - * endianness, but the memory core hands them in target endianness. - * For example, PPC is always treated as big-endian even if running - * on KVM and on PPC64LE. Correct here. - */ - switch (size) { - case 2: - val = bswap16(val); - break; - case 4: - val = bswap32(val); - break; + if (target_needs_bswap()) { + /* + * The kernel expects ioeventfd values in HOST_BIG_ENDIAN + * endianness, but the memory core hands them in target endianness. + * For example, PPC is always treated as big-endian even if running + * on KVM and on PPC64LE. Correct here, swapping back. + */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + } } -#endif return val; } @@ -1450,7 +1579,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, } start_addr += slot_size; size -= slot_size; - kml->nr_used_slots--; + kml->nr_slots_used--; } while (size); return; } @@ -1489,7 +1618,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, ram_start_offset += slot_size; ram += slot_size; size -= slot_size; - kml->nr_used_slots++; + kml->nr_slots_used++; } while (size); } @@ -1525,11 +1654,7 @@ static void *kvm_dirty_ring_reaper_thread(void *data) r->reaper_iteration++; } - trace_kvm_dirty_ring_reaper("exit"); - - rcu_unregister_thread(); - - return NULL; + g_assert_not_reached(); } static void kvm_dirty_ring_reaper_init(KVMState *s) @@ -1719,12 +1844,8 @@ static void kvm_log_sync_global(MemoryListener *l, bool last_stage) /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */ kvm_dirty_ring_flush(); - /* - * TODO: make this faster when nr_slots is big while there are - * only a few used slots (small VMs). - */ kvm_slots_lock(); - for (i = 0; i < s->nr_slots; i++) { + for (i = 0; i < kml->nr_slots_allocated; i++) { mem = &kml->slots[i]; if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { kvm_slot_sync_dirty_pages(mem); @@ -1839,12 +1960,9 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, { int i; - kml->slots = g_new0(KVMSlot, s->nr_slots); kml->as_id = as_id; - for (i = 0; i < s->nr_slots; i++) { - kml->slots[i].slot = i; - } + kvm_slots_grow(kml, KVM_MEMSLOTS_NR_ALLOC_DEFAULT); QSIMPLEQ_INIT(&kml->transaction_add); QSIMPLEQ_INIT(&kml->transaction_del); @@ -2355,7 +2473,7 @@ static int kvm_recommended_vcpus(KVMState *s) static int kvm_max_vcpus(KVMState *s) { - int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); + int ret = kvm_vm_check_extension(s, KVM_CAP_MAX_VCPUS); return (ret) ? ret : kvm_recommended_vcpus(s); } @@ -2385,12 +2503,111 @@ uint32_t kvm_dirty_ring_size(void) return kvm_state->kvm_dirty_ring_size; } -static int kvm_init(MachineState *ms) +static int do_kvm_create_vm(KVMState *s, int type) +{ + int ret; + + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, type); + } while (ret == -EINTR); + + if (ret < 0) { + error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret)); + +#ifdef TARGET_S390X + if (ret == -EINVAL) { + error_printf("Host kernel setup problem detected." + " Please verify:\n"); + error_printf("- for kernels supporting the" + " switch_amode or user_mode parameters, whether"); + error_printf(" user space is running in primary address space\n"); + error_printf("- for kernels supporting the vm.allocate_pgste" + " sysctl, whether it is enabled\n"); + } +#elif defined(TARGET_PPC) + if (ret == -EINVAL) { + error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n", + (type == 2) ? "pr" : "hv"); + } +#endif + } + + return ret; +} + +static int find_kvm_machine_type(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + int type; + + if (object_property_find(OBJECT(current_machine), "kvm-type")) { + g_autofree char *kvm_type; + kvm_type = object_property_get_str(OBJECT(current_machine), + "kvm-type", + &error_abort); + type = mc->kvm_type(ms, kvm_type); + } else if (mc->kvm_type) { + type = mc->kvm_type(ms, NULL); + } else { + type = kvm_arch_get_default_type(ms); + } + return type; +} + +static int kvm_setup_dirty_ring(KVMState *s) +{ + uint64_t dirty_log_manual_caps; + int ret; + + /* + * Enable KVM dirty ring if supported, otherwise fall back to + * dirty logging mode + */ + ret = kvm_dirty_ring_init(s); + if (ret < 0) { + return ret; + } + + /* + * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is + * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no + * page is wr-protected initially, which is against how kvm dirty ring is + * usage - kvm dirty ring requires all pages are wr-protected at the very + * beginning. Enabling this feature for dirty ring causes data corruption. + * + * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log, + * we may expect a higher stall time when starting the migration. In the + * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too: + * instead of clearing dirty bit, it can be a way to explicitly wr-protect + * guest pages. + */ + if (!s->kvm_dirty_ring_size) { + dirty_log_manual_caps = + kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); + dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | + KVM_DIRTY_LOG_INITIALLY_SET); + s->manual_dirty_log_protect = dirty_log_manual_caps; + if (dirty_log_manual_caps) { + ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, + dirty_log_manual_caps); + if (ret) { + warn_report("Trying to enable capability %"PRIu64" of " + "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " + "Falling back to the legacy mode. ", + dirty_log_manual_caps); + s->manual_dirty_log_protect = 0; + } + } + } + + return 0; +} + +static int kvm_init(AccelState *as, MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = - "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" - "(see http://sourceforge.net/projects/kvm).\n"; + "Please upgrade to at least kernel 4.5.\n"; const struct { const char *name; int num; @@ -2400,16 +2617,13 @@ static int kvm_init(MachineState *ms) { /* end of list */ } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; - KVMState *s; + KVMState *s = KVM_STATE(as); const KVMCapabilityInfo *missing_cap; int ret; int type; - uint64_t dirty_log_manual_caps; qemu_mutex_init(&kml_slots_lock); - s = KVM_STATE(ms->accelerator); - /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, @@ -2427,7 +2641,7 @@ static int kvm_init(MachineState *ms) QLIST_INIT(&s->kvm_parked_vcpus); s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); if (s->fd == -1) { - fprintf(stderr, "Could not access KVM kernel module: %m\n"); + error_report("Could not access KVM kernel module: %m"); ret = -errno; goto err; } @@ -2437,84 +2651,43 @@ static int kvm_init(MachineState *ms) if (ret >= 0) { ret = -EINVAL; } - fprintf(stderr, "kvm version too old\n"); + error_report("kvm version too old"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; - fprintf(stderr, "kvm version not supported\n"); + error_report("kvm version not supported"); goto err; } - kvm_supported_memory_attributes = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES); - kvm_guest_memfd_supported = - kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) && - kvm_check_extension(s, KVM_CAP_USER_MEMORY2) && - (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE); - kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); - s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); + s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ - if (!s->nr_slots) { - s->nr_slots = 32; - } - - s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); - if (s->nr_as <= 1) { - s->nr_as = 1; - } - s->as = g_new0(struct KVMAs, s->nr_as); - - if (object_property_find(OBJECT(current_machine), "kvm-type")) { - g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), - "kvm-type", - &error_abort); - type = mc->kvm_type(ms, kvm_type); - } else if (mc->kvm_type) { - type = mc->kvm_type(ms, NULL); - } else { - type = kvm_arch_get_default_type(ms); + if (!s->nr_slots_max) { + s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT; } + type = find_kvm_machine_type(ms); if (type < 0) { ret = -EINVAL; goto err; } - do { - ret = kvm_ioctl(s, KVM_CREATE_VM, type); - } while (ret == -EINTR); - + ret = do_kvm_create_vm(s, type); if (ret < 0) { - fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret, - strerror(-ret)); - -#ifdef TARGET_S390X - if (ret == -EINVAL) { - fprintf(stderr, - "Host kernel setup problem detected. Please verify:\n"); - fprintf(stderr, "- for kernels supporting the switch_amode or" - " user_mode parameters, whether\n"); - fprintf(stderr, - " user space is running in primary address space\n"); - fprintf(stderr, - "- for kernels supporting the vm.allocate_pgste sysctl, " - "whether it is enabled\n"); - } -#elif defined(TARGET_PPC) - if (ret == -EINVAL) { - fprintf(stderr, - "PPC KVM module is not loaded. Try modprobe kvm_%s.\n", - (type == 2) ? "pr" : "hv"); - } -#endif goto err; } s->vmfd = ret; + s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); + if (s->nr_as <= 1) { + s->nr_as = 1; + } + s->as = g_new0(struct KVMAs, s->nr_as); + /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); @@ -2526,9 +2699,9 @@ static int kvm_init(MachineState *ms) nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { - fprintf(stderr, "Number of %s cpus requested (%d) exceeds " - "the maximum cpus supported by KVM (%d)\n", - nc->name, nc->num, hard_vcpus_limit); + error_report("Number of %s cpus requested (%d) exceeds " + "the maximum cpus supported by KVM (%d)", + nc->name, nc->num, hard_vcpus_limit); exit(1); } } @@ -2542,8 +2715,8 @@ static int kvm_init(MachineState *ms) } if (missing_cap) { ret = -EINVAL; - fprintf(stderr, "kvm does not support %s\n%s", - missing_cap->name, upgrade_note); + error_report("kvm does not support %s", missing_cap->name); + error_printf("%s", upgrade_note); goto err; } @@ -2551,47 +2724,11 @@ static int kvm_init(MachineState *ms) s->coalesced_pio = s->coalesced_mmio && kvm_check_extension(s, KVM_CAP_COALESCED_PIO); - /* - * Enable KVM dirty ring if supported, otherwise fall back to - * dirty logging mode - */ - ret = kvm_dirty_ring_init(s); + ret = kvm_setup_dirty_ring(s); if (ret < 0) { goto err; } - /* - * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is - * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no - * page is wr-protected initially, which is against how kvm dirty ring is - * usage - kvm dirty ring requires all pages are wr-protected at the very - * beginning. Enabling this feature for dirty ring causes data corruption. - * - * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log, - * we may expect a higher stall time when starting the migration. In the - * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too: - * instead of clearing dirty bit, it can be a way to explicitly wr-protect - * guest pages. - */ - if (!s->kvm_dirty_ring_size) { - dirty_log_manual_caps = - kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); - dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | - KVM_DIRTY_LOG_INITIALLY_SET); - s->manual_dirty_log_protect = dirty_log_manual_caps; - if (dirty_log_manual_caps) { - ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, - dirty_log_manual_caps); - if (ret) { - warn_report("Trying to enable capability %"PRIu64" of " - "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " - "Falling back to the legacy mode. ", - dirty_log_manual_caps); - s->manual_dirty_log_protect = 0; - } - } - } - #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif @@ -2603,7 +2740,7 @@ static int kvm_init(MachineState *ms) } kvm_readonly_mem_allowed = - (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); + (kvm_vm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); @@ -2637,6 +2774,13 @@ static int kvm_init(MachineState *ms) goto err; } + kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES); + kvm_guest_memfd_supported = + kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) && + kvm_check_extension(s, KVM_CAP_USER_MEMORY2) && + (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE); + kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY); + if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } @@ -2766,9 +2910,15 @@ void kvm_flush_coalesced_mmio_buffer(void) static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { - int ret = kvm_arch_get_registers(cpu); + Error *err = NULL; + int ret = kvm_arch_get_registers(cpu, &err); if (ret) { - error_report("Failed to get registers: %s", strerror(-ret)); + if (err) { + error_reportf_err(err, "Failed to synchronize CPU state: "); + } else { + error_report("Failed to get registers: %s", strerror(-ret)); + } + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); vm_stop(RUN_STATE_INTERNAL_ERROR); } @@ -2786,9 +2936,15 @@ void kvm_cpu_synchronize_state(CPUState *cpu) static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) { - int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); + Error *err = NULL; + int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err); if (ret) { - error_report("Failed to put registers after reset: %s", strerror(-ret)); + if (err) { + error_reportf_err(err, "Restoring resisters after reset: "); + } else { + error_report("Failed to put registers after reset: %s", + strerror(-ret)); + } cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); vm_stop(RUN_STATE_INTERNAL_ERROR); } @@ -2799,13 +2955,23 @@ static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg void kvm_cpu_synchronize_post_reset(CPUState *cpu) { run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); + + if (cpu == first_cpu) { + kvm_reset_parked_vcpus(kvm_state); + } } static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { - int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + Error *err = NULL; + int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err); if (ret) { - error_report("Failed to put registers after init: %s", strerror(-ret)); + if (err) { + error_reportf_err(err, "Putting registers after init: "); + } else { + error_report("Failed to put registers after init: %s", + strerror(-ret)); + } exit(1); } @@ -2895,17 +3061,17 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private) MemoryRegion *mr; RAMBlock *rb; void *addr; - int ret = -1; + int ret = -EINVAL; trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared"); if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) || !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) { - return -1; + return ret; } if (!size) { - return -1; + return ret; } section = memory_region_find(get_system_memory(), start, size); @@ -2923,7 +3089,7 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private) if (!to_private) { return 0; } - return -1; + return ret; } if (!memory_region_has_guest_memfd(mr)) { @@ -2958,6 +3124,15 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private) addr = memory_region_get_ram_ptr(mr) + section.offset_within_region; rb = qemu_ram_block_from_host(addr, false, &offset); + ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm), + offset, size, to_private); + if (ret) { + error_report("Failed to notify the listener the state change of " + "(0x%"HWADDR_PRIx" + 0x%"HWADDR_PRIx") to %s", + start, size, to_private ? "private" : "shared"); + goto out_unref; + } + if (to_private) { if (rb->page_size != qemu_real_host_page_size()) { /* @@ -2995,10 +3170,15 @@ int kvm_cpu_exec(CPUState *cpu) MemTxAttrs attrs; if (cpu->vcpu_dirty) { - ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); + Error *err = NULL; + ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err); if (ret) { - error_report("Failed to put registers after init: %s", - strerror(-ret)); + if (err) { + error_reportf_err(err, "Putting registers after init: "); + } else { + error_report("Failed to put registers after init: %s", + strerror(-ret)); + } ret = -1; break; } @@ -3170,7 +3350,7 @@ int kvm_cpu_exec(CPUState *cpu) return ret; } -int kvm_ioctl(KVMState *s, int type, ...) +int kvm_ioctl(KVMState *s, unsigned long type, ...) { int ret; void *arg; @@ -3188,7 +3368,7 @@ int kvm_ioctl(KVMState *s, int type, ...) return ret; } -int kvm_vm_ioctl(KVMState *s, int type, ...) +int kvm_vm_ioctl(KVMState *s, unsigned long type, ...) { int ret; void *arg; @@ -3208,7 +3388,7 @@ int kvm_vm_ioctl(KVMState *s, int type, ...) return ret; } -int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) +int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...) { int ret; void *arg; @@ -3228,7 +3408,7 @@ int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) return ret; } -int kvm_device_ioctl(int fd, int type, ...) +int kvm_device_ioctl(int fd, unsigned long type, ...) { int ret; void *arg; @@ -3638,10 +3818,10 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) return r; } -static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, +static bool kvm_accel_has_memory(AccelState *accel, AddressSpace *as, hwaddr start_addr, hwaddr size) { - KVMState *kvm = KVM_STATE(ms->accelerator); + KVMState *kvm = KVM_STATE(accel); int i; for (i = 0; i < kvm->nr_as; ++i) { @@ -3832,12 +4012,12 @@ static void kvm_accel_instance_init(Object *obj) * Returns: SSTEP_* flags that KVM supports for guest debug. The * support is probed during kvm_init() */ -static int kvm_gdbstub_sstep_flags(void) +static int kvm_gdbstub_sstep_flags(AccelState *as) { return kvm_sstep_flags; } -static void kvm_accel_class_init(ObjectClass *oc, void *data) +static void kvm_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "KVM"; diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h index 171b22fd294..688511151c8 100644 --- a/accel/kvm/kvm-cpus.h +++ b/accel/kvm/kvm-cpus.h @@ -10,8 +10,6 @@ #ifndef KVM_CPUS_H #define KVM_CPUS_H -#include "sysemu/cpus.h" - int kvm_init_vcpu(CPUState *cpu, Error **errp); int kvm_cpu_exec(CPUState *cpu); void kvm_destroy_vcpu(CPUState *cpu); diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events index 37626c1ac5d..e43d18a8692 100644 --- a/accel/kvm/trace-events +++ b/accel/kvm/trace-events @@ -1,11 +1,11 @@ # See docs/devel/tracing.rst for syntax documentation. # kvm-all.c -kvm_ioctl(int type, void *arg) "type 0x%x, arg %p" -kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p" -kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p" +kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p" +kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p" +kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p" kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d" -kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p" +kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p" kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s" kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s" kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu" @@ -36,3 +36,4 @@ kvm_io_window_exit(void) "" kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32 kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s" kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64 +kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u" diff --git a/accel/meson.build b/accel/meson.build index 5eaeb683385..25b0f100b51 100644 --- a/accel/meson.build +++ b/accel/meson.build @@ -1,5 +1,6 @@ +common_ss.add(files('accel-common.c')) specific_ss.add(files('accel-target.c')) -system_ss.add(files('accel-system.c', 'accel-blocker.c')) +system_ss.add(files('accel-system.c', 'accel-blocker.c', 'accel-qmp.c')) user_ss.add(files('accel-user.c')) subdir('tcg') diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c index bf14032d294..1d4337d698e 100644 --- a/accel/qtest/qtest.c +++ b/accel/qtest/qtest.c @@ -18,11 +18,14 @@ #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/accel.h" -#include "sysemu/qtest.h" -#include "sysemu/cpus.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/qtest.h" +#include "system/cpus.h" #include "qemu/guest-random.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" +#include "accel/dummy-cpus.h" static int64_t qtest_clock_counter; @@ -36,12 +39,12 @@ static void qtest_set_virtual_clock(int64_t count) qatomic_set_i64(&qtest_clock_counter, count); } -static int qtest_init_accel(MachineState *ms) +static int qtest_init_accel(AccelState *as, MachineState *ms) { return 0; } -static void qtest_accel_class_init(ObjectClass *oc, void *data) +static void qtest_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "QTest"; @@ -58,13 +61,14 @@ static const TypeInfo qtest_accel_type = { }; module_obj(TYPE_QTEST_ACCEL); -static void qtest_accel_ops_class_init(ObjectClass *oc, void *data) +static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = dummy_start_vcpu_thread; ops->get_virtual_clock = qtest_get_virtual_clock; ops->set_virtual_clock = qtest_set_virtual_clock; + ops->handle_interrupt = generic_handle_interrupt; }; static const TypeInfo qtest_accel_ops_type = { diff --git a/accel/stubs/hvf-stub.c b/accel/stubs/hvf-stub.c new file mode 100644 index 00000000000..42eadc5ca92 --- /dev/null +++ b/accel/stubs/hvf-stub.c @@ -0,0 +1,12 @@ +/* + * HVF stubs for QEMU + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/hvf.h" + +bool hvf_allowed; diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 8e0eb22e61c..68cd33ba973 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/pci/msi.h" KVMState *kvm_state; @@ -29,10 +29,6 @@ void kvm_flush_coalesced_mmio_buffer(void) { } -void kvm_cpu_synchronize_state(CPUState *cpu) -{ -} - bool kvm_has_sync_mmu(void) { return false; @@ -105,11 +101,6 @@ unsigned int kvm_get_free_memslots(void) return 0; } -void kvm_init_cpu_signals(CPUState *cpu) -{ - abort(); -} - bool kvm_arm_supports_user_irq(void) { return false; diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build index 91a2d219258..9dfc4f9ddaf 100644 --- a/accel/stubs/meson.build +++ b/accel/stubs/meson.build @@ -2,5 +2,8 @@ system_stubs_ss = ss.source_set() system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) +system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c')) +system_stubs_ss.add(when: 'CONFIG_NVMM', if_false: files('nvmm-stub.c')) +system_stubs_ss.add(when: 'CONFIG_WHPX', if_false: files('whpx-stub.c')) specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss) diff --git a/accel/stubs/nvmm-stub.c b/accel/stubs/nvmm-stub.c new file mode 100644 index 00000000000..ec14837501a --- /dev/null +++ b/accel/stubs/nvmm-stub.c @@ -0,0 +1,12 @@ +/* + * NVMM stubs for QEMU + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/nvmm.h" + +bool nvmm_allowed; diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c index 7f4208fddf2..3b76b8b17c1 100644 --- a/accel/stubs/tcg-stub.c +++ b/accel/stubs/tcg-stub.c @@ -11,12 +11,7 @@ */ #include "qemu/osdep.h" -#include "exec/tb-flush.h" -#include "exec/exec-all.h" - -void tb_flush(CPUState *cpu) -{ -} +#include "exec/cpu-common.h" G_NORETURN void cpu_loop_exit(CPUState *cpu) { diff --git a/accel/stubs/whpx-stub.c b/accel/stubs/whpx-stub.c new file mode 100644 index 00000000000..c564c89fd0b --- /dev/null +++ b/accel/stubs/whpx-stub.c @@ -0,0 +1,12 @@ +/* + * WHPX stubs for QEMU + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/whpx.h" + +bool whpx_allowed; diff --git a/accel/stubs/xen-stub.c b/accel/stubs/xen-stub.c index 7054965c480..cf929b644b7 100644 --- a/accel/stubs/xen-stub.c +++ b/accel/stubs/xen-stub.c @@ -6,7 +6,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/xen.h" +#include "system/xen.h" #include "qapi/qapi-commands-migration.h" bool xen_allowed; diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 95a5c5ff12d..6056598c23d 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -14,9 +14,20 @@ */ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, + uint64_t read_value_low, + uint64_t read_value_high, + uint64_t write_value_low, + uint64_t write_value_high, MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); + if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) { + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, + read_value_low, read_value_high, + oi, QEMU_PLUGIN_MEM_R); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, + write_value_low, write_value_high, + oi, QEMU_PLUGIN_MEM_W); + } } /* diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 1dc2151dafd..08a475c10ca 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -53,6 +53,14 @@ # error unsupported data size #endif +#if DATA_SIZE == 16 +# define VALUE_LOW(val) int128_getlo(val) +# define VALUE_HIGH(val) int128_gethi(val) +#else +# define VALUE_LOW(val) val +# define VALUE_HIGH(val) 0 +#endif + #if DATA_SIZE >= 4 # define ABI_TYPE DATA_TYPE #else @@ -69,7 +77,7 @@ # define END _le #endif -ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { @@ -83,12 +91,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, oi); + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(newv), + VALUE_HIGH(newv), + oi); return ret; } #if DATA_SIZE < 16 -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, @@ -97,19 +110,29 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ret = qatomic_xchg__nocheck(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, oi); + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); return ret; } #define GEN_ATOMIC_HELPER(X) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr, ret; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, oi); \ + atomic_trace_rmw_post(env, addr, \ + VALUE_LOW(ret), \ + VALUE_HIGH(ret), \ + VALUE_LOW(val), \ + VALUE_HIGH(val), \ + oi); \ return ret; \ } @@ -133,7 +156,7 @@ GEN_ATOMIC_HELPER(xor_fetch) * of CF_PARALLEL's value, we'll trace just a read and a write. */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr, cmp, old, new, val = xval; \ @@ -145,7 +168,12 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ } while (cmp != old); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, oi); \ + atomic_trace_rmw_post(env, addr, \ + VALUE_LOW(old), \ + VALUE_HIGH(old), \ + VALUE_LOW(xval), \ + VALUE_HIGH(xval), \ + oi); \ return RET; \ } @@ -174,7 +202,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) # define END _be #endif -ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, +ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { @@ -188,12 +216,17 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, oi); + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(newv), + VALUE_HIGH(newv), + oi); return BSWAP(ret); } #if DATA_SIZE < 16 -ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, +ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, @@ -202,19 +235,29 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, oi); + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); return BSWAP(ret); } #define GEN_ATOMIC_HELPER(X) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr, ret; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, oi); \ + atomic_trace_rmw_post(env, addr, \ + VALUE_LOW(ret), \ + VALUE_HIGH(ret), \ + VALUE_LOW(val), \ + VALUE_HIGH(val), \ + oi); \ return BSWAP(ret); \ } @@ -235,7 +278,7 @@ GEN_ATOMIC_HELPER(xor_fetch) * of CF_PARALLEL's value, we'll trace just a read and a write. */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ -ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ +ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \ @@ -247,7 +290,12 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ } while (ldo != ldn); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, oi); \ + atomic_trace_rmw_post(env, addr, \ + VALUE_LOW(old), \ + VALUE_HIGH(old), \ + VALUE_LOW(xval), \ + VALUE_HIGH(xval), \ + oi); \ return RET; \ } @@ -281,3 +329,5 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) #undef SUFFIX #undef DATA_SIZE #undef SHIFT +#undef VALUE_LOW +#undef VALUE_HIGH diff --git a/accel/tcg/backend-ldst.h b/accel/tcg/backend-ldst.h new file mode 100644 index 00000000000..9c3a407a5af --- /dev/null +++ b/accel/tcg/backend-ldst.h @@ -0,0 +1,41 @@ +/* + * Internal memory barrier helpers for QEMU (target agnostic) + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_BACKEND_LDST_H +#define ACCEL_TCG_BACKEND_LDST_H + +#include "tcg-target-mo.h" + +/** + * tcg_req_mo: + * @guest_mo: Guest default memory order + * @type: TCGBar + * + * Filter @type to the barrier that is required for the guest + * memory ordering vs the host memory ordering. A non-zero + * result indicates that some barrier is required. + */ +#define tcg_req_mo(guest_mo, type) \ + ((type) & guest_mo & ~TCG_TARGET_DEFAULT_MO) + +/** + * cpu_req_mo: + * @cpu: CPUState + * @type: TCGBar + * + * If tcg_req_mo indicates a barrier for @type is required + * for the guest memory model, issue a host memory barrier. + */ +#define cpu_req_mo(cpu, type) \ + do { \ + if (tcg_req_mo(cpu->cc->tcg_ops->guest_default_memory_order, type)) { \ + smp_mb(); \ + } \ + } while (0) + +#endif diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c index bc9b1a260e8..c5c513f1e4a 100644 --- a/accel/tcg/cpu-exec-common.c +++ b/accel/tcg/cpu-exec-common.c @@ -18,13 +18,45 @@ */ #include "qemu/osdep.h" -#include "sysemu/cpus.h" -#include "sysemu/tcg.h" +#include "exec/log.h" +#include "system/tcg.h" #include "qemu/plugin.h" #include "internal-common.h" bool tcg_allowed; +bool tcg_cflags_has(CPUState *cpu, uint32_t flags) +{ + return cpu->tcg_cflags & flags; +} + +void tcg_cflags_set(CPUState *cpu, uint32_t flags) +{ + cpu->tcg_cflags |= flags; +} + +uint32_t curr_cflags(CPUState *cpu) +{ + uint32_t cflags = cpu->tcg_cflags; + + /* + * Record gdb single-step. We should be exiting the TB by raising + * EXCP_DEBUG, but to simplify other tests, disable chaining too. + * + * For singlestep and -d nochain, suppress goto_tb so that + * we can log -d cpu,exec after every TB. + */ + if (unlikely(cpu->singlestep_enabled)) { + cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; + } else if (qatomic_read(&one_insn_per_tb)) { + cflags |= CF_NO_GOTO_TB | 1; + } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + cflags |= CF_NO_GOTO_TB; + } + + return cflags; +} + /* exit the current TB, but without causing any exception to be raised */ void cpu_loop_exit_noexc(CPUState *cpu) { diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 8163295f34b..713bdb20564 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -21,26 +21,30 @@ #include "qemu/qemu-print.h" #include "qapi/error.h" #include "qapi/type-helpers.h" -#include "hw/core/tcg-cpu-ops.h" +#include "hw/core/cpu.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/helper-retaddr.h" #include "trace.h" #include "disas/disas.h" -#include "exec/exec-all.h" +#include "exec/cpu-common.h" +#include "exec/cpu-interrupt.h" +#include "exec/page-protection.h" +#include "exec/mmap-lock.h" +#include "exec/translation-block.h" #include "tcg/tcg.h" #include "qemu/atomic.h" #include "qemu/rcu.h" #include "exec/log.h" #include "qemu/main-loop.h" -#include "sysemu/cpus.h" -#include "exec/cpu-all.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" #include "exec/replay-core.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "exec/helper-proto-common.h" #include "tb-jmp-cache.h" #include "tb-hash.h" #include "tb-context.h" +#include "tb-internal.h" #include "internal-common.h" -#include "internal-target.h" /* -icount align implementation. */ @@ -144,45 +148,10 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu) } #endif /* CONFIG USER ONLY */ -bool tcg_cflags_has(CPUState *cpu, uint32_t flags) -{ - return cpu->tcg_cflags & flags; -} - -void tcg_cflags_set(CPUState *cpu, uint32_t flags) -{ - cpu->tcg_cflags |= flags; -} - -uint32_t curr_cflags(CPUState *cpu) -{ - uint32_t cflags = cpu->tcg_cflags; - - /* - * Record gdb single-step. We should be exiting the TB by raising - * EXCP_DEBUG, but to simplify other tests, disable chaining too. - * - * For singlestep and -d nochain, suppress goto_tb so that - * we can log -d cpu,exec after every TB. - */ - if (unlikely(cpu->singlestep_enabled)) { - cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; - } else if (qatomic_read(&one_insn_per_tb)) { - cflags |= CF_NO_GOTO_TB | 1; - } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - cflags |= CF_NO_GOTO_TB; - } - - return cflags; -} - struct tb_desc { - vaddr pc; - uint64_t cs_base; + TCGTBCPUState s; CPUArchState *env; tb_page_addr_t page_addr0; - uint32_t flags; - uint32_t cflags; }; static bool tb_lookup_cmp(const void *p, const void *d) @@ -190,11 +159,11 @@ static bool tb_lookup_cmp(const void *p, const void *d) const TranslationBlock *tb = p; const struct tb_desc *desc = d; - if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && + if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) && tb_page_addr0(tb) == desc->page_addr0 && - tb->cs_base == desc->cs_base && - tb->flags == desc->flags && - tb_cflags(tb) == desc->cflags) { + tb->cs_base == desc->s.cs_base && + tb->flags == desc->s.flags && + tb_cflags(tb) == desc->s.cflags) { /* check next page if needed */ tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); if (tb_phys_page1 == -1) { @@ -212,7 +181,7 @@ static bool tb_lookup_cmp(const void *p, const void *d) * is different for the new TB. Therefore any exception raised * here by the faulting lookup is not premature. */ - virt_page1 = TARGET_PAGE_ALIGN(desc->pc); + virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc); phys_page1 = get_page_addr_code(desc->env, virt_page1); if (tb_phys_page1 == phys_page1) { return true; @@ -222,59 +191,65 @@ static bool tb_lookup_cmp(const void *p, const void *d) return false; } -static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, - uint64_t cs_base, uint32_t flags, - uint32_t cflags) +static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s) { tb_page_addr_t phys_pc; struct tb_desc desc; uint32_t h; + desc.s = s; desc.env = cpu_env(cpu); - desc.cs_base = cs_base; - desc.flags = flags; - desc.cflags = cflags; - desc.pc = pc; - phys_pc = get_page_addr_code(desc.env, pc); + phys_pc = get_page_addr_code(desc.env, s.pc); if (phys_pc == -1) { return NULL; } desc.page_addr0 = phys_pc; - h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), - flags, cs_base, cflags); + h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc), + s.flags, s.cs_base, s.cflags); return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); } -/* Might cause an exception, so have a longjmp destination ready */ -static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, - uint64_t cs_base, uint32_t flags, - uint32_t cflags) +/** + * tb_lookup: + * @cpu: CPU that will execute the returned translation block + * @pc: guest PC + * @cs_base: arch-specific value associated with translation block + * @flags: arch-specific translation block flags + * @cflags: CF_* flags + * + * Look up a translation block inside the QHT using @pc, @cs_base, @flags and + * @cflags. Uses @cpu's tb_jmp_cache. Might cause an exception, so have a + * longjmp destination ready. + * + * Returns: an existing translation block or NULL. + */ +static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s) { TranslationBlock *tb; CPUJumpCache *jc; uint32_t hash; /* we should never be trying to look up an INVALID tb */ - tcg_debug_assert(!(cflags & CF_INVALID)); + tcg_debug_assert(!(s.cflags & CF_INVALID)); - hash = tb_jmp_cache_hash_func(pc); + hash = tb_jmp_cache_hash_func(s.pc); jc = cpu->tb_jmp_cache; tb = qatomic_read(&jc->array[hash].tb); if (likely(tb && - jc->array[hash].pc == pc && - tb->cs_base == cs_base && - tb->flags == flags && - tb_cflags(tb) == cflags)) { + jc->array[hash].pc == s.pc && + tb->cs_base == s.cs_base && + tb->flags == s.flags && + tb_cflags(tb) == s.cflags)) { goto hit; } - tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); + tb = tb_htable_lookup(cpu, s); if (tb == NULL) { return NULL; } - jc->array[hash].pc = pc; + jc->array[hash].pc = s.pc; qatomic_set(&jc->array[hash].tb, tb); hit: @@ -282,7 +257,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, * As long as tb is not NULL, the contents are consistent. Therefore, * the virtual PC has to match for non-CF_PCREL translations. */ - assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc); + assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc); return tb; } @@ -299,14 +274,11 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { FILE *logfile = qemu_log_trylock(); if (logfile) { - int flags = 0; + int flags = CPU_DUMP_CCOP; if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { flags |= CPU_DUMP_FPU; } -#if defined(TARGET_I386) - flags |= CPU_DUMP_CCOP; -#endif if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { flags |= CPU_DUMP_VPU; } @@ -402,9 +374,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) { CPUState *cpu = env_cpu(env); TranslationBlock *tb; - vaddr pc; - uint64_t cs_base; - uint32_t flags, cflags; /* * By definition we've just finished a TB, so I/O is OK. @@ -414,25 +383,36 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env) * The next TB, if we chain to it, will clear the flag again. */ cpu->neg.can_do_io = true; - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - cflags = curr_cflags(cpu); - if (check_for_breakpoints(cpu, pc, &cflags)) { + TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu); + s.cflags = curr_cflags(cpu); + + if (check_for_breakpoints(cpu, s.pc, &s.cflags)) { cpu_loop_exit(cpu); } - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + tb = tb_lookup(cpu, s); if (tb == NULL) { return tcg_code_gen_epilogue; } if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { - log_cpu_exec(pc, cpu, tb); + log_cpu_exec(s.pc, cpu, tb); } return tb->tc.ptr; } +/* Return the current PC from CPU, which may be cached in TB. */ +static vaddr log_pc(CPUState *cpu, const TranslationBlock *tb) +{ + if (tb_cflags(tb) & CF_PCREL) { + return cpu->cc->get_pc(cpu); + } else { + return tb->pc; + } +} + /* Execute a TB, and fix up the CPU state afterwards if necessary */ /* * Disable CFI checks. @@ -567,11 +547,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu) void cpu_exec_step_atomic(CPUState *cpu) { - CPUArchState *env = cpu_env(cpu); TranslationBlock *tb; - vaddr pc; - uint64_t cs_base; - uint32_t flags, cflags; int tb_exit; if (sigsetjmp(cpu->jmp_env, 0) == 0) { @@ -580,13 +556,13 @@ void cpu_exec_step_atomic(CPUState *cpu) g_assert(!cpu->running); cpu->running = true; - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu); + s.cflags = curr_cflags(cpu); - cflags = curr_cflags(cpu); /* Execute in a serial context. */ - cflags &= ~CF_PARALLEL; + s.cflags &= ~CF_PARALLEL; /* After 1 insn, return and release the exclusive lock. */ - cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; + s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; /* * No need to check_for_breakpoints here. * We only arrive in cpu_exec_step_atomic after beginning execution @@ -594,16 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu) * Any breakpoint for this insn will have been recognized earlier. */ - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + tb = tb_lookup(cpu, s); if (tb == NULL) { mmap_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + tb = tb_gen_code(cpu, s); mmap_unlock(); } cpu_exec_enter(cpu); /* execute the generated code */ - trace_exec_tb(tb, pc); + trace_exec_tb(tb, s.pc); cpu_tb_exec(cpu, tb, &tb_exit); cpu_exec_exit(cpu); } else { @@ -671,7 +647,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, out_unlock_next: qemu_spin_unlock(&tb_next->jmp_lock); - return; } static inline bool cpu_handle_halt(CPUState *cpu) @@ -737,10 +712,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) * If user mode only, we simulate a fake exception which will be * handled outside the cpu execution loop. */ -#if defined(TARGET_I386) const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; - tcg_ops->fake_user_interrupt(cpu); -#endif /* TARGET_I386 */ + if (tcg_ops->fake_user_interrupt) { + tcg_ops->fake_user_interrupt(cpu); + } *ret = cpu->exception_index; cpu->exception_index = -1; return true; @@ -827,33 +802,22 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->exception_index = EXCP_HLT; bql_unlock(); return true; - } -#if defined(TARGET_I386) - else if (interrupt_request & CPU_INTERRUPT_INIT) { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUArchState *env = &x86_cpu->env; - replay_interrupt(); - cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); - do_cpu_init(x86_cpu); - cpu->exception_index = EXCP_HALTED; - bql_unlock(); - return true; - } -#else - else if (interrupt_request & CPU_INTERRUPT_RESET) { - replay_interrupt(); - cpu_reset(cpu); - bql_unlock(); - return true; - } -#endif /* !TARGET_I386 */ - /* The target hook has 3 exit conditions: - False when the interrupt isn't processed, - True when it is, and we should restart on a new TB, - and via longjmp via cpu_loop_exit. */ - else { + } else { const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + if (interrupt_request & CPU_INTERRUPT_RESET) { + replay_interrupt(); + tcg_ops->cpu_exec_reset(cpu); + bql_unlock(); + return true; + } + + /* + * The target hook has 3 exit conditions: + * False when the interrupt isn't processed, + * True when it is, and we should restart on a new TB, + * and via longjmp via cpu_loop_exit. + */ if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { if (!tcg_ops->need_replay_interrupt || tcg_ops->need_replay_interrupt(interrupt_request)) { @@ -960,11 +924,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) while (!cpu_handle_interrupt(cpu, &last_tb)) { TranslationBlock *tb; - vaddr pc; - uint64_t cs_base; - uint32_t flags, cflags; - - cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); + TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu); + s.cflags = cpu->cflags_next_tb; /* * When requested, use an exact setting for cflags for the next @@ -973,33 +934,32 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) * have CF_INVALID set, -1 is a convenient invalid value that * does not require tcg headers for cpu_common_reset. */ - cflags = cpu->cflags_next_tb; - if (cflags == -1) { - cflags = curr_cflags(cpu); + if (s.cflags == -1) { + s.cflags = curr_cflags(cpu); } else { cpu->cflags_next_tb = -1; } - if (check_for_breakpoints(cpu, pc, &cflags)) { + if (check_for_breakpoints(cpu, s.pc, &s.cflags)) { break; } - tb = tb_lookup(cpu, pc, cs_base, flags, cflags); + tb = tb_lookup(cpu, s); if (tb == NULL) { CPUJumpCache *jc; uint32_t h; mmap_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + tb = tb_gen_code(cpu, s); mmap_unlock(); /* * We add the TB in the virtual pc hash table * for the fast lookup */ - h = tb_jmp_cache_hash_func(pc); + h = tb_jmp_cache_hash_func(s.pc); jc = cpu->tb_jmp_cache; - jc->array[h].pc = pc; + jc->array[h].pc = s.pc; qatomic_set(&jc->array[h].tb, tb); } @@ -1019,7 +979,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc) tb_add_jump(last_tb, tb_exit, tb); } - cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); + cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit); /* Try to align the host and virtual clocks if the guest is in advance */ @@ -1074,11 +1034,17 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp) if (!tcg_target_initialized) { /* Check mandatory TCGCPUOps handlers */ + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; #ifndef CONFIG_USER_ONLY - assert(cpu->cc->tcg_ops->cpu_exec_halt); - assert(cpu->cc->tcg_ops->cpu_exec_interrupt); + assert(tcg_ops->cpu_exec_halt); + assert(tcg_ops->cpu_exec_interrupt); + assert(tcg_ops->cpu_exec_reset); + assert(tcg_ops->pointer_wrap); #endif /* !CONFIG_USER_ONLY */ - cpu->cc->tcg_ops->initialize(); + assert(tcg_ops->translate_code); + assert(tcg_ops->get_tb_cpu_state); + assert(tcg_ops->mmu_index); + tcg_ops->initialize(); tcg_target_initialized = true; } diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 117b516739d..87e14bde4f2 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -19,15 +19,17 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -#include "hw/core/tcg-cpu-ops.h" -#include "exec/exec-all.h" +#include "qemu/target-info.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/iommu.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" -#include "exec/memory.h" -#include "exec/cpu_ldst.h" +#include "system/memory.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/cputlb.h" #include "exec/tb-flush.h" -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "exec/mmu-access-type.h" #include "exec/tlb-common.h" #include "exec/vaddr.h" @@ -35,18 +37,21 @@ #include "qemu/error-report.h" #include "exec/log.h" #include "exec/helper-proto-common.h" +#include "exec/tlb-flags.h" #include "qemu/atomic.h" #include "qemu/atomic128.h" -#include "exec/translate-all.h" +#include "tb-internal.h" #include "trace.h" #include "tb-hash.h" +#include "tb-internal.h" +#include "tlb-bounds.h" #include "internal-common.h" -#include "internal-target.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" -#include "tcg/oversized-guest.h" +#include "backend-ldst.h" + /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -104,26 +109,15 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry, { /* Do not rearrange the CPUTLBEntry structure members. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) != - MMU_DATA_LOAD * sizeof(uint64_t)); + MMU_DATA_LOAD * sizeof(uintptr_t)); QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) != - MMU_DATA_STORE * sizeof(uint64_t)); + MMU_DATA_STORE * sizeof(uintptr_t)); QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) != - MMU_INST_FETCH * sizeof(uint64_t)); + MMU_INST_FETCH * sizeof(uintptr_t)); -#if TARGET_LONG_BITS == 32 - /* Use qatomic_read, in case of addr_write; only care about low bits. */ - const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type]; - ptr += HOST_BIG_ENDIAN; - return qatomic_read(ptr); -#else - const uint64_t *ptr = &entry->addr_idx[access_type]; -# if TCG_OVERSIZED_GUEST - return *ptr; -# else + const uintptr_t *ptr = &entry->addr_idx[access_type]; /* ofs might correspond to .addr_write, so use qatomic_read */ return qatomic_read(ptr); -# endif -#endif } static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry) @@ -779,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, assert_cpu_is_self(cpu); + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx(cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx(cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -817,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, TLBFlushRangeData d, *p; CPUState *dst_cpu; + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -893,26 +887,17 @@ void tlb_unprotect_code(ram_addr_t ram_addr) * * Called with tlb_c.lock held. */ -static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, +static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent, uintptr_t start, uintptr_t length) { - uintptr_t addr = tlb_entry->addr_write; - - if ((addr & (TLB_INVALID_MASK | TLB_MMIO | - TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { - addr &= TARGET_PAGE_MASK; - addr += tlb_entry->addend; - if ((addr - start) < length) { -#if TARGET_LONG_BITS == 32 - uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; - ptr_write += HOST_BIG_ENDIAN; - qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); -#elif TCG_OVERSIZED_GUEST - tlb_entry->addr_write |= TLB_NOTDIRTY; -#else - qatomic_set(&tlb_entry->addr_write, - tlb_entry->addr_write | TLB_NOTDIRTY); -#endif + const uintptr_t addr = ent->addr_write; + int flags = addr | full->slow_flags[MMU_DATA_STORE]; + + flags &= TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY; + if (flags == 0) { + uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend; + if ((host - start) < length) { + qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY); } } } @@ -931,23 +916,25 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) * We must take tlb_c.lock to avoid racing with another vCPU update. The only * thing actually updated is the target TLB entry ->addr_write flags. */ -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) { int mmu_idx; qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; + CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + unsigned int n = tlb_n_entries(fast); unsigned int i; - unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); for (i = 0; i < n; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i], + start, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i], + start, length); } } qemu_spin_unlock(&cpu->neg.tlb.c.lock); @@ -1199,7 +1186,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, uint64_t size) + int mmu_idx, vaddr size) { CPUTLBEntryFull full = { .phys_addr = paddr, @@ -1214,29 +1201,65 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, void tlb_set_page(CPUState *cpu, vaddr addr, hwaddr paddr, int prot, - int mmu_idx, uint64_t size) + int mmu_idx, vaddr size) { tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, prot, mmu_idx, size); } +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + /* - * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the - * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must - * be discarded and looked up again (e.g. via tlb_entry()). + * Note: tlb_fill_align() can trigger a resize of the TLB. + * This means that all of the caller's prior references to the TLB table + * (e.g. CPUTLBEntry pointers) must be discarded and looked up again + * (e.g. via tlb_entry()). */ -static void tlb_fill(CPUState *cpu, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type, + int mmu_idx, MemOp memop, int size, + bool probe, uintptr_t ra) { - bool ok; + const TCGCPUOps *ops = cpu->cc->tcg_ops; + CPUTLBEntryFull full; - /* - * This is not a probe, so only valid return is success; failure - * should result in exception + longjmp to the cpu loop. - */ - ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, - access_type, mmu_idx, false, retaddr); - assert(ok); + if (ops->tlb_fill_align) { + if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx, + memop, size, probe, ra)) { + tlb_set_page_full(cpu, mmu_idx, addr, &full); + return true; + } + } else { + /* Legacy behaviour is alignment before paging. */ + if (addr & ((1u << memop_alignment_bits(memop)) - 1)) { + ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra); + } + if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) { + return true; + } + } + assert(probe); + return false; } static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, @@ -1319,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { - tb_invalidate_phys_range_fast(ram_addr, size, retaddr); + tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr); } /* @@ -1351,22 +1374,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr, if (!tlb_hit_page(tlb_addr, page_addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { - if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, - mmu_idx, nonfault, retaddr)) { + if (!tlb_fill_align(cpu, addr, access_type, mmu_idx, + 0, fault_size, nonfault, retaddr)) { /* Non-faulting page table read failed. */ *phost = NULL; *pfull = NULL; return TLB_INVALID_MASK; } - /* TLB resize via tlb_fill may have moved the entry. */ + /* TLB resize via tlb_fill_align may have moved the entry. */ index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); /* * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, - * to force the next access through tlb_fill. We've just - * called tlb_fill, so we know that this entry *is* valid. + * to force the next access through tlb_fill_align. We've just + * called tlb_fill_align, so we know that this entry *is* valid. */ flags &= ~TLB_INVALID_MASK; } @@ -1491,7 +1514,7 @@ void *probe_access(CPUArchState *env, vaddr addr, int size, return host; } -void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, +void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr, MMUAccessType access_type, int mmu_idx) { CPUTLBEntryFull *full; @@ -1607,16 +1630,17 @@ typedef struct MMULookupLocals { * mmu_lookup1: translate one page * @cpu: generic cpu state * @data: lookup parameters + * @memop: memory operation for the access, or 0 * @mmu_idx: virtual address context * @access_type: load/store/code * @ra: return address into tcg generated code, or 0 * * Resolve the translation for the one page at @data.addr, filling in * the rest of @data with the results. If the translation fails, - * tlb_fill will longjmp out. Return true if the softmmu tlb for + * tlb_fill_align will longjmp out. Return true if the softmmu tlb for * @mmu_idx may have resized. */ -static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, +static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop, int mmu_idx, MMUAccessType access_type, uintptr_t ra) { vaddr addr = data->addr; @@ -1631,7 +1655,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); + tlb_fill_align(cpu, addr, access_type, mmu_idx, + memop, data->size, false, ra); maybe_resized = true; index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); @@ -1643,6 +1668,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); flags |= full->slow_flags[access_type]; + if (likely(!maybe_resized)) { + /* Alignment has not been checked by tlb_fill_align. */ + int a_bits = memop_alignment_bits(memop); + + /* + * This alignment check differs from the one above, in that this is + * based on the atomicity of the operation. The intended use case is + * the ARM memory type field of each PTE, where access to pages with + * Device memory type require alignment. + */ + if (unlikely(flags & TLB_CHECK_ALIGNED)) { + int at_bits = memop_atomicity_bits(memop); + a_bits = MAX(a_bits, at_bits); + } + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra); + } + } + data->full = full; data->flags = flags; /* Compute haddr speculatively; depending on flags it might be invalid. */ @@ -1699,7 +1743,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, uintptr_t ra, MMUAccessType type, MMULookupLocals *l) { - unsigned a_bits; bool crosspage; int flags; @@ -1708,12 +1751,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); - /* Handle CPU specific unaligned behaviour */ - a_bits = get_alignment_bits(l->memop); - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); - } - l->page[0].addr = addr; l->page[0].size = memop_size(l->memop); l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; @@ -1721,7 +1758,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; if (likely(!crosspage)) { - mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); + mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); flags = l->page[0].flags; if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { @@ -1736,12 +1773,15 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, l->page[1].size = l->page[0].size - size0; l->page[0].size = size0; + l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx, + l->page[1].addr, addr); + /* * Lookup both pages, recognizing exceptions from either. If the * second lookup potentially resized, refresh first CPUTLBEntryFull. */ - mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); - if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) { + mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); + if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) { uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; } @@ -1760,31 +1800,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert((flags & TLB_BSWAP) == 0); } - /* - * This alignment check differs from the one above, in that this is - * based on the atomicity of the operation. The intended use case is - * the ARM memory type field of each PTE, where access to pages with - * Device memory type require alignment. - */ - if (unlikely(flags & TLB_CHECK_ALIGNED)) { - MemOp size = l->memop & MO_SIZE; - - switch (l->memop & MO_ATOM_MASK) { - case MO_ATOM_NONE: - size = MO_8; - break; - case MO_ATOM_IFALIGN_PAIR: - case MO_ATOM_WITHIN16_PAIR: - size = size ? size - 1 : 0; - break; - default: - break; - } - if (addr & ((1 << size) - 1)) { - cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); - } - } - return crosspage; } @@ -1797,34 +1812,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, { uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); - int a_bits = get_alignment_bits(mop); uintptr_t index; CPUTLBEntry *tlbe; vaddr tlb_addr; void *hostaddr; CPUTLBEntryFull *full; + bool did_tlb_fill = false; tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Adjust the given return address. */ retaddr -= GETPC_ADJ; - /* Enforce guest required alignment. */ - if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { - /* ??? Maybe indicate atomic op to cpu_unaligned_access */ - cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* Enforce qemu required alignment. */ - if (unlikely(addr & (size - 1))) { - /* We get here if guest alignment was not requested, - or was not enforced by cpu_unaligned_access above. - We might widen the access and emulate, but for now - mark an exception and exit the cpu loop. */ - goto stop_the_world; - } - index = tlb_index(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr); @@ -1833,8 +1832,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, size, - MMU_DATA_STORE, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx, + mop, size, false, retaddr); + did_tlb_fill = true; index = tlb_index(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr); } @@ -1848,17 +1848,38 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * but addr_read will only be -1 if PAGE_READ was unset. */ if (unlikely(tlbe->addr_read == -1)) { - tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx, + 0, size, false, retaddr); /* * Since we don't support reads and writes to different * addresses, and we do have the proper page loaded for - * write, this shouldn't ever return. But just in case, - * handle via stop-the-world. + * write, this shouldn't ever return. + */ + g_assert_not_reached(); + } + + /* Enforce guest required alignment, if not handled by tlb_fill_align. */ + if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) { + cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + /* + * We get here if guest alignment was not requested, or was not + * enforced by cpu_unaligned_access or tlb_fill_align above. + * We might widen the access and emulate, but for now + * mark an exception and exit the cpu loop. */ goto stop_the_world; } - /* Collect tlb flags for read. */ + + /* Finish collecting tlb flags for both read and write. */ + full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; tlb_addr |= tlbe->addr_read; + tlb_addr &= TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; + tlb_addr |= full->slow_flags[MMU_DATA_STORE]; + tlb_addr |= full->slow_flags[MMU_DATA_LOAD]; /* Notice an IO access or a needs-MMU-lookup access */ if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { @@ -1868,13 +1889,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); - full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; if (unlikely(tlb_addr & TLB_NOTDIRTY)) { notdirty_write(cpu, addr, size, full, retaddr); } - if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { int wp_flags = 0; if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { @@ -1883,10 +1903,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { wp_flags |= BP_MEM_READ; } - if (wp_flags) { - cpu_check_watchpoint(cpu, addr, size, - full->attrs, wp_flags, retaddr); - } + cpu_check_watchpoint(cpu, addr, size, + full->attrs, wp_flags, retaddr); } return hostaddr; @@ -2313,7 +2331,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); tcg_debug_assert(!crosspage); @@ -2328,7 +2346,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2352,7 +2370,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint32_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2373,7 +2391,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint64_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2396,7 +2414,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, Int128 ret; int first; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2724,7 +2742,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); tcg_debug_assert(!crosspage); @@ -2738,7 +2756,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, bool crosspage; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2760,7 +2778,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2781,7 +2799,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2804,7 +2822,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, uint64_t a, b; int first; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2889,54 +2907,45 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, /* Code access functions. */ -uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); - return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); - return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); - return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); - return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, +uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, +uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, +uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, +uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } + +/* + * Common pointer_wrap implementations. + */ + +/* + * To be used for strict alignment targets. + * Because no accesses are unaligned, no accesses wrap either. + */ +vaddr cpu_pointer_wrap_notreached(CPUState *cs, int idx, vaddr res, vaddr base) +{ + g_assert_not_reached(); +} + +/* To be used for strict 32-bit targets. */ +vaddr cpu_pointer_wrap_uint32(CPUState *cs, int idx, vaddr res, vaddr base) +{ + return (uint32_t)res; +} diff --git a/accel/tcg/icount-common.c b/accel/tcg/icount-common.c index 8d3d3a7e9dc..d6471174a3a 100644 --- a/accel/tcg/icount-common.c +++ b/accel/tcg/icount-common.c @@ -27,17 +27,16 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/cpus.h" -#include "sysemu/qtest.h" +#include "system/cpus.h" +#include "system/qtest.h" #include "qemu/main-loop.h" #include "qemu/option.h" #include "qemu/seqlock.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/replay.h" +#include "system/runstate.h" #include "hw/core/cpu.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/cpu-throttle.h" -#include "sysemu/cpu-timers-internal.h" +#include "exec/icount.h" +#include "system/cpu-timers-internal.h" /* * ICOUNT: Instruction Counter @@ -49,6 +48,8 @@ static bool icount_sleep = true; /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ #define MAX_ICOUNT_SHIFT 10 +bool icount_align_option; + /* Do not count executed instructions */ ICountMode use_icount = ICOUNT_DISABLED; diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h index a8fc3db7742..6adfeefe131 100644 --- a/accel/tcg/internal-common.h +++ b/accel/tcg/internal-common.h @@ -11,12 +11,16 @@ #include "exec/cpu-common.h" #include "exec/translation-block.h" +#include "exec/mmap-lock.h" +#include "accel/tcg/tb-cpu-state.h" extern int64_t max_delay; extern int64_t max_advance; extern bool one_insn_per_tb; +extern bool icount_align_option; + /* * Return true if CS is not running in parallel with other cpus, either * because there are no other cpus or we are within an exclusive context. @@ -43,9 +47,7 @@ static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu) #endif } -TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc, - uint64_t cs_base, uint32_t flags, - int cflags); +TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s); void page_init(void); void tb_htable_init(void); void tb_reset_jump(TranslationBlock *tb, int n); @@ -53,7 +55,90 @@ TranslationBlock *tb_link_page(TranslationBlock *tb); void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, uintptr_t host_pc); +/** + * tlb_init - initialize a CPU's TLB + * @cpu: CPU whose TLB should be initialized + */ +void tlb_init(CPUState *cpu); +/** + * tlb_destroy - destroy a CPU's TLB + * @cpu: CPU whose TLB should be destroyed + */ +void tlb_destroy(CPUState *cpu); + bool tcg_exec_realizefn(CPUState *cpu, Error **errp); void tcg_exec_unrealizefn(CPUState *cpu); +/* current cflags for hashing/comparison */ +uint32_t curr_cflags(CPUState *cpu); + +void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); + +/** + * get_page_addr_code_hostp() + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * See get_page_addr_code() (full-system version) for documentation on the + * return value. + * + * Sets *@hostp (when @hostp is non-NULL) as follows. + * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp + * to the host address where @addr's content is kept. + * + * Note: this function can trigger an exception. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, + void **hostp); + +/** + * get_page_addr_code() + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * If we cannot translate and execute from the entire RAM page, or if + * the region is not backed by RAM, returns -1. Otherwise, returns the + * ram_addr_t corresponding to the guest code at @addr. + * + * Note: this function can trigger an exception. + */ +static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, + vaddr addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + +/* + * Access to the various translations structures need to be serialised + * via locks for consistency. In user-mode emulation access to the + * memory related structures are protected with mmap_lock. + * In !user-mode we use per-page locks. + */ +#ifdef CONFIG_USER_ONLY +#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) +#else +#define assert_memory_lock() +#endif + +#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG) +void assert_no_pages_locked(void); +#else +static inline void assert_no_pages_locked(void) { } +#endif + +#ifdef CONFIG_USER_ONLY +static inline void page_table_config_init(void) { } +#else +void page_table_config_init(void); +#endif + +#ifndef CONFIG_USER_ONLY +G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); +#endif /* CONFIG_USER_ONLY */ + +void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); +void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); + +void tcg_get_stats(AccelState *accel, GString *buf); + #endif diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h deleted file mode 100644 index fe109724c68..00000000000 --- a/accel/tcg/internal-target.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Internal execution defines for qemu (target specific) - * - * Copyright (c) 2003 Fabrice Bellard - * - * SPDX-License-Identifier: LGPL-2.1-or-later - */ - -#ifndef ACCEL_TCG_INTERNAL_TARGET_H -#define ACCEL_TCG_INTERNAL_TARGET_H - -#include "exec/exec-all.h" -#include "exec/translate-all.h" - -/* - * Access to the various translations structures need to be serialised - * via locks for consistency. In user-mode emulation access to the - * memory related structures are protected with mmap_lock. - * In !user-mode we use per-page locks. - */ -#ifdef CONFIG_USER_ONLY -#define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) -#else -#define assert_memory_lock() -#endif - -#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG) -void assert_no_pages_locked(void); -#else -static inline void assert_no_pages_locked(void) { } -#endif - -#ifdef CONFIG_USER_ONLY -static inline void page_table_config_init(void) { } -#else -void page_table_config_init(void); -#endif - -#ifdef CONFIG_USER_ONLY -/* - * For user-only, page_protect sets the page read-only. - * Since most execution is already on read-only pages, and we'd need to - * account for other TBs on the same page, defer undoing any page protection - * until we receive the write fault. - */ -static inline void tb_lock_page0(tb_page_addr_t p0) -{ - page_protect(p0); -} - -static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) -{ - page_protect(p1); -} - -static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } -static inline void tb_unlock_pages(TranslationBlock *tb) { } -#else -void tb_lock_page0(tb_page_addr_t); -void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); -void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); -void tb_unlock_pages(TranslationBlock *); -#endif - -#ifdef CONFIG_SOFTMMU -void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, - unsigned size, - uintptr_t retaddr); -G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); -#endif /* CONFIG_SOFTMMU */ - -bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); - -/* Return the current PC from CPU, which may be cached in TB. */ -static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb) -{ - if (tb_cflags(tb) & CF_PCREL) { - return cpu->cc->get_pc(cpu); - } else { - return tb->pc; - } -} - -/** - * tcg_req_mo: - * @type: TCGBar - * - * Filter @type to the barrier that is required for the guest - * memory ordering vs the host memory ordering. A non-zero - * result indicates that some barrier is required. - * - * If TCG_GUEST_DEFAULT_MO is not defined, assume that the - * guest requires strict ordering. - * - * This is a macro so that it's constant even without optimization. - */ -#ifdef TCG_GUEST_DEFAULT_MO -# define tcg_req_mo(type) \ - ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) -#else -# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO) -#endif - -/** - * cpu_req_mo: - * @type: TCGBar - * - * If tcg_req_mo indicates a barrier for @type is required - * for the guest memory model, issue a host memory barrier. - */ -#define cpu_req_mo(type) \ - do { \ - if (tcg_req_mo(type)) { \ - smp_mb(); \ - } \ - } while (0) - -#endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index 134da3c1da6..c735add2615 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -168,6 +168,7 @@ static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv) #endif /* Ultimate fallback: re-execute in serial context. */ + trace_load_atom8_or_exit_fallback(ra); cpu_loop_exit_atomic(cpu, ra); } @@ -212,6 +213,7 @@ static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv) } /* Ultimate fallback: re-execute in serial context. */ + trace_load_atom16_or_exit_fallback(ra); cpu_loop_exit_atomic(cpu, ra); } @@ -519,6 +521,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra, if (HAVE_al8) { return load_atom_extract_al8x2(pv); } + trace_load_atom8_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); default: g_assert_not_reached(); @@ -563,6 +566,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra, break; case MO_64: if (!HAVE_al8) { + trace_load_atom16_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); } a = load_atomic8(pv); @@ -570,6 +574,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra, break; case -MO_64: if (!HAVE_al8) { + trace_load_atom16_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); } a = load_atom_extract_al8x2(pv); @@ -897,6 +902,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra, g_assert_not_reached(); } + trace_store_atom2_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); } @@ -961,6 +967,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra, return; } } + trace_store_atom4_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); default: g_assert_not_reached(); @@ -1029,6 +1036,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra, default: g_assert_not_reached(); } + trace_store_atom8_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); } @@ -1107,5 +1115,6 @@ static void store_atom_16(CPUState *cpu, uintptr_t ra, default: g_assert_not_reached(); } + trace_store_atom16_fallback(memop, ra); cpu_loop_exit_atomic(cpu, ra); } diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc index 87ceb954873..57f3e061924 100644 --- a/accel/tcg/ldst_common.c.inc +++ b/accel/tcg/ldst_common.c.inc @@ -123,64 +123,69 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) * Load helpers for cpu_ldst.h */ -static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) +static void plugin_load_cb(CPUArchState *env, vaddr addr, + uint64_t value_low, + uint64_t value_high, + MemOpIdx oi) { if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, + value_low, value_high, + oi, QEMU_PLUGIN_MEM_R); } } -uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) +uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { uint8_t ret; tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); + plugin_load_cb(env, addr, ret, 0, oi); return ret; } -uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, +uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { uint16_t ret; tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); + plugin_load_cb(env, addr, ret, 0, oi); return ret; } -uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, +uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { uint32_t ret; tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); + plugin_load_cb(env, addr, ret, 0, oi); return ret; } -uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, +uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { uint64_t ret; tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); + plugin_load_cb(env, addr, ret, 0, oi); return ret; } -Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, +Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { Int128 ret; tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); ret = do_ld16_mmu(env_cpu(env), addr, oi, ra); - plugin_load_cb(env, addr, oi); + plugin_load_cb(env, addr, int128_getlo(ret), int128_gethi(ret), oi); return ret; } @@ -188,363 +193,53 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, * Store helpers for cpu_ldst.h */ -static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) +static void plugin_store_cb(CPUArchState *env, vaddr addr, + uint64_t value_low, + uint64_t value_high, + MemOpIdx oi) { if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, + value_low, value_high, + oi, QEMU_PLUGIN_MEM_W); } } -void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, +void cpu_stb_mmu(CPUArchState *env, vaddr addr, uint8_t val, MemOpIdx oi, uintptr_t retaddr) { helper_stb_mmu(env, addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); + plugin_store_cb(env, addr, val, 0, oi); } -void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, +void cpu_stw_mmu(CPUArchState *env, vaddr addr, uint16_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); do_st2_mmu(env_cpu(env), addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); + plugin_store_cb(env, addr, val, 0, oi); } -void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, +void cpu_stl_mmu(CPUArchState *env, vaddr addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); do_st4_mmu(env_cpu(env), addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); + plugin_store_cb(env, addr, val, 0, oi); } -void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, +void cpu_stq_mmu(CPUArchState *env, vaddr addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); do_st8_mmu(env_cpu(env), addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); + plugin_store_cb(env, addr, val, 0, oi); } -void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, +void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val, MemOpIdx oi, uintptr_t retaddr) { tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); do_st16_mmu(env_cpu(env), addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); -} - -/* - * Wrappers of the above - */ - -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); - return cpu_ldb_mmu(env, addr, oi, ra); -} - -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); -} - -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); - return cpu_ldw_mmu(env, addr, oi, ra); -} - -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); -} - -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); - return cpu_ldl_mmu(env, addr, oi, ra); -} - -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); - return cpu_ldq_mmu(env, addr, oi, ra); -} - -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); - return cpu_ldw_mmu(env, addr, oi, ra); -} - -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); -} - -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); - return cpu_ldl_mmu(env, addr, oi, ra); -} - -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); - return cpu_ldq_mmu(env, addr, oi, ra); -} - -void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); - cpu_stb_mmu(env, addr, val, oi, ra); -} - -void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); - cpu_stw_mmu(env, addr, val, oi, ra); -} - -void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); - cpu_stl_mmu(env, addr, val, oi, ra); -} - -void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); - cpu_stq_mmu(env, addr, val, oi, ra); -} - -void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); - cpu_stw_mmu(env, addr, val, oi, ra); -} - -void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); - cpu_stl_mmu(env, addr, val, oi, ra); -} - -void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t ra) -{ - MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); - cpu_stq_mmu(env, addr, val, oi, ra); -} - -/*--------------------------*/ - -uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra); -} - -int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - return (int8_t)cpu_ldub_data_ra(env, addr, ra); -} - -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra); -} - -int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); -} - -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra); -} - -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra); -} - -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra); -} - -int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); -} - -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra); -} - -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra); -} - -void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, uintptr_t ra) -{ - int mmu_index = cpu_mmu_index(env_cpu(env), false); - cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra); -} - -/*--------------------------*/ - -uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_ldub_data_ra(env, addr, 0); -} - -int cpu_ldsb_data(CPUArchState *env, abi_ptr addr) -{ - return (int8_t)cpu_ldub_data(env, addr); -} - -uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_lduw_be_data_ra(env, addr, 0); -} - -int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) -{ - return (int16_t)cpu_lduw_be_data(env, addr); -} - -uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_ldl_be_data_ra(env, addr, 0); -} - -uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_ldq_be_data_ra(env, addr, 0); -} - -uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_lduw_le_data_ra(env, addr, 0); -} - -int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) -{ - return (int16_t)cpu_lduw_le_data(env, addr); -} - -uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_ldl_le_data_ra(env, addr, 0); -} - -uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) -{ - return cpu_ldq_le_data_ra(env, addr, 0); -} - -void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) -{ - cpu_stb_data_ra(env, addr, val, 0); -} - -void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) -{ - cpu_stw_be_data_ra(env, addr, val, 0); -} - -void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) -{ - cpu_stl_be_data_ra(env, addr, val, 0); -} - -void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) -{ - cpu_stq_be_data_ra(env, addr, val, 0); -} - -void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) -{ - cpu_stw_le_data_ra(env, addr, val, 0); -} - -void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) -{ - cpu_stl_le_data_ra(env, addr, val, 0); -} - -void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) -{ - cpu_stq_le_data_ra(env, addr, val, 0); + plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi); } diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index aef80de9676..002aa8f4588 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -1,36 +1,39 @@ -common_ss.add(when: 'CONFIG_TCG', if_true: files( - 'cpu-exec-common.c', -)) -tcg_specific_ss = ss.source_set() -tcg_specific_ss.add(files( - 'tcg-all.c', +if not have_tcg + subdir_done() +endif + +tcg_ss = ss.source_set() + +tcg_ss.add(files( 'cpu-exec.c', - 'tb-maint.c', - 'tcg-runtime-gvec.c', + 'cpu-exec-common.c', 'tcg-runtime.c', + 'tcg-runtime-gvec.c', + 'tb-maint.c', + 'tcg-all.c', + 'tcg-stats.c', 'translate-all.c', 'translator.c', )) -tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c')) -tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c')) if get_option('plugins') - tcg_specific_ss.add(files('plugin-gen.c')) + tcg_ss.add(files('plugin-gen.c')) endif -specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss) -specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( - 'cputlb.c', - 'watchpoint.c', +user_ss.add_all(tcg_ss) +system_ss.add_all(tcg_ss) + +user_ss.add(files( + 'user-exec.c', + 'user-exec-stub.c', )) -system_ss.add(when: ['CONFIG_TCG'], if_true: files( +system_ss.add(files( + 'cputlb.c', 'icount-common.c', 'monitor.c', -)) - -tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( 'tcg-accel-ops.c', - 'tcg-accel-ops-mttcg.c', 'tcg-accel-ops-icount.c', + 'tcg-accel-ops-mttcg.c', 'tcg-accel-ops-rr.c', + 'watchpoint.c', )) diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c index 093efe97144..be5c1950177 100644 --- a/accel/tcg/monitor.c +++ b/accel/tcg/monitor.c @@ -7,197 +7,13 @@ */ #include "qemu/osdep.h" -#include "qemu/accel.h" -#include "qemu/qht.h" #include "qapi/error.h" #include "qapi/type-helpers.h" #include "qapi/qapi-commands-machine.h" #include "monitor/monitor.h" -#include "sysemu/cpus.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "tcg/tcg.h" #include "internal-common.h" -#include "tb-context.h" - - -static void dump_drift_info(GString *buf) -{ - if (!icount_enabled()) { - return; - } - - g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", - (cpu_get_clock() - icount_get()) / SCALE_MS); - if (icount_align_option) { - g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", - -max_delay / SCALE_MS); - g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", - max_advance / SCALE_MS); - } else { - g_string_append_printf(buf, "Max guest delay NA\n"); - g_string_append_printf(buf, "Max guest advance NA\n"); - } -} - -static void dump_accel_info(GString *buf) -{ - AccelState *accel = current_accel(); - bool one_insn_per_tb = object_property_get_bool(OBJECT(accel), - "one-insn-per-tb", - &error_fatal); - - g_string_append_printf(buf, "Accelerator settings:\n"); - g_string_append_printf(buf, "one-insn-per-tb: %s\n\n", - one_insn_per_tb ? "on" : "off"); -} - -static void print_qht_statistics(struct qht_stats hst, GString *buf) -{ - uint32_t hgram_opts; - size_t hgram_bins; - char *hgram; - - if (!hst.head_buckets) { - return; - } - g_string_append_printf(buf, "TB hash buckets %zu/%zu " - "(%0.2f%% head buckets used)\n", - hst.used_head_buckets, hst.head_buckets, - (double)hst.used_head_buckets / - hst.head_buckets * 100); - - hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; - hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; - if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { - hgram_opts |= QDIST_PR_NODECIMAL; - } - hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); - g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " - "Histogram: %s\n", - qdist_avg(&hst.occupancy) * 100, hgram); - g_free(hgram); - - hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; - hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); - if (hgram_bins > 10) { - hgram_bins = 10; - } else { - hgram_bins = 0; - hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; - } - hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); - g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " - "Histogram: %s\n", - qdist_avg(&hst.chain), hgram); - g_free(hgram); -} - -struct tb_tree_stats { - size_t nb_tbs; - size_t host_size; - size_t target_size; - size_t max_target_size; - size_t direct_jmp_count; - size_t direct_jmp2_count; - size_t cross_page; -}; - -static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) -{ - const TranslationBlock *tb = value; - struct tb_tree_stats *tst = data; - - tst->nb_tbs++; - tst->host_size += tb->tc.size; - tst->target_size += tb->size; - if (tb->size > tst->max_target_size) { - tst->max_target_size = tb->size; - } - if (tb->page_addr[1] != -1) { - tst->cross_page++; - } - if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { - tst->direct_jmp_count++; - if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { - tst->direct_jmp2_count++; - } - } - return false; -} - -static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) -{ - CPUState *cpu; - size_t full = 0, part = 0, elide = 0; - - CPU_FOREACH(cpu) { - full += qatomic_read(&cpu->neg.tlb.c.full_flush_count); - part += qatomic_read(&cpu->neg.tlb.c.part_flush_count); - elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count); - } - *pfull = full; - *ppart = part; - *pelide = elide; -} - -static void tcg_dump_info(GString *buf) -{ - g_string_append_printf(buf, "[TCG profiler not compiled]\n"); -} - -static void dump_exec_info(GString *buf) -{ - struct tb_tree_stats tst = {}; - struct qht_stats hst; - size_t nb_tbs, flush_full, flush_part, flush_elide; - - tcg_tb_foreach(tb_tree_stats_iter, &tst); - nb_tbs = tst.nb_tbs; - /* XXX: avoid using doubles ? */ - g_string_append_printf(buf, "Translation buffer state:\n"); - /* - * Report total code size including the padding and TB structs; - * otherwise users might think "-accel tcg,tb-size" is not honoured. - * For avg host size we use the precise numbers from tb_tree_stats though. - */ - g_string_append_printf(buf, "gen code size %zu/%zu\n", - tcg_code_size(), tcg_code_capacity()); - g_string_append_printf(buf, "TB count %zu\n", nb_tbs); - g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", - nb_tbs ? tst.target_size / nb_tbs : 0, - tst.max_target_size); - g_string_append_printf(buf, "TB avg host size %zu bytes " - "(expansion ratio: %0.1f)\n", - nb_tbs ? tst.host_size / nb_tbs : 0, - tst.target_size ? - (double)tst.host_size / tst.target_size : 0); - g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", - tst.cross_page, - nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); - g_string_append_printf(buf, "direct jump count %zu (%zu%%) " - "(2 jumps=%zu %zu%%)\n", - tst.direct_jmp_count, - nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, - tst.direct_jmp2_count, - nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); - - qht_statistics_init(&tb_ctx.htable, &hst); - print_qht_statistics(hst, buf); - qht_statistics_destroy(&hst); - - g_string_append_printf(buf, "\nStatistics:\n"); - g_string_append_printf(buf, "TB flush count %u\n", - qatomic_read(&tb_ctx.tb_flush_count)); - g_string_append_printf(buf, "TB invalidate count %u\n", - qatomic_read(&tb_ctx.tb_phys_invalidate_count)); - - tlb_flush_counts(&flush_full, &flush_part, &flush_elide); - g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); - g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); - g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); - tcg_dump_info(buf); -} HumanReadableText *qmp_x_query_jit(Error **errp) { @@ -208,29 +24,7 @@ HumanReadableText *qmp_x_query_jit(Error **errp) return NULL; } - dump_accel_info(buf); - dump_exec_info(buf); - dump_drift_info(buf); - - return human_readable_text_from_str(buf); -} - -static void tcg_dump_op_count(GString *buf) -{ - g_string_append_printf(buf, "[TCG profiler not compiled]\n"); -} - -HumanReadableText *qmp_x_query_opcount(Error **errp) -{ - g_autoptr(GString) buf = g_string_new(""); - - if (!tcg_enabled()) { - error_setg(errp, - "Opcode count information is only available with accel=tcg"); - return NULL; - } - - tcg_dump_op_count(buf); + tcg_dump_stats(buf); return human_readable_text_from_str(buf); } @@ -238,7 +32,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp) static void hmp_tcg_register(void) { monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); - monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); } type_init(hmp_tcg_register); diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index ec89a085b43..9920381a84e 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -22,13 +22,12 @@ #include "qemu/osdep.h" #include "qemu/plugin.h" #include "qemu/log.h" -#include "cpu.h" #include "tcg/tcg.h" #include "tcg/tcg-temp-internal.h" -#include "tcg/tcg-op.h" -#include "exec/exec-all.h" +#include "tcg/tcg-op-common.h" #include "exec/plugin-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" enum plugin_gen_from { PLUGIN_GEN_FROM_TB, @@ -89,32 +88,49 @@ static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb, qemu_plugin_add_dyn_cb_arr(arr); tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env, - offsetof(CPUState, neg.plugin_mem_cbs) - - offsetof(ArchCPU, env)); + offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState)); } static void gen_disable_mem_helper(void) { tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env, - offsetof(CPUState, neg.plugin_mem_cbs) - - offsetof(ArchCPU, env)); + offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState)); } static TCGv_i32 gen_cpu_index(void) { + /* + * Optimize when we run with a single vcpu. All values using cpu_index, + * including scoreboard index, will be optimized out. + * User-mode calls tb_flush when setting this flag. In system-mode, all + * vcpus are created before generating code. + */ + if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) { + return tcg_constant_i32(current_cpu->cpu_index); + } TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); tcg_gen_ld_i32(cpu_index, tcg_env, - -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + offsetof(CPUState, cpu_index) - sizeof(CPUState)); return cpu_index; } static void gen_udata_cb(struct qemu_plugin_regular_cb *cb) { TCGv_i32 cpu_index = gen_cpu_index(); + enum qemu_plugin_cb_flags cb_flags = + tcg_call_to_qemu_plugin_cb_flags(cb->info->flags); + TCGv_i32 flags = tcg_constant_i32(cb_flags); + TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS); + tcg_gen_st_i32(flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL, tcgv_i32_temp(cpu_index), tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); + tcg_gen_st_i32(clear_flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_temp_free_i32(cpu_index); + tcg_temp_free_i32(flags); + tcg_temp_free_i32(clear_flags); } static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry) @@ -167,10 +183,20 @@ static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb) tcg_gen_ld_i64(val, ptr, 0); tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb); TCGv_i32 cpu_index = gen_cpu_index(); + enum qemu_plugin_cb_flags cb_flags = + tcg_call_to_qemu_plugin_cb_flags(cb->info->flags); + TCGv_i32 flags = tcg_constant_i32(cb_flags); + TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS); + tcg_gen_st_i32(flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL, tcgv_i32_temp(cpu_index), tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); + tcg_gen_st_i32(clear_flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_temp_free_i32(cpu_index); + tcg_temp_free_i32(flags); + tcg_temp_free_i32(clear_flags); gen_set_label(after_cb); tcg_temp_free_i64(val); @@ -204,12 +230,22 @@ static void gen_mem_cb(struct qemu_plugin_regular_cb *cb, qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) { TCGv_i32 cpu_index = gen_cpu_index(); + enum qemu_plugin_cb_flags cb_flags = + tcg_call_to_qemu_plugin_cb_flags(cb->info->flags); + TCGv_i32 flags = tcg_constant_i32(cb_flags); + TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS); + tcg_gen_st_i32(flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL, tcgv_i32_temp(cpu_index), tcgv_i32_temp(tcg_constant_i32(meminfo)), tcgv_i64_temp(addr), tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); + tcg_gen_st_i32(clear_flags, tcg_env, + offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState)); tcg_temp_free_i32(cpu_index); + tcg_temp_free_i32(flags); + tcg_temp_free_i32(clear_flags); } static void inject_cb(struct qemu_plugin_dyn_cb *cb) @@ -251,7 +287,6 @@ static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb, break; default: g_assert_not_reached(); - break; } } @@ -276,7 +311,7 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) * that might be live within the existing opcode stream. * The simplest solution is to release them all and create new. */ - memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps)); + tcg_temp_ebb_reset_freed(tcg_ctx); QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) { switch (op->opc) { @@ -468,4 +503,8 @@ void plugin_gen_tb_end(CPUState *cpu, size_t num_insns) /* inject the instrumentation at the appropriate places */ plugin_gen_inject(ptb); + + /* reset plugin translation state (plugin_tb is reused between blocks) */ + tcg_ctx->plugin_db = NULL; + tcg_ctx->plugin_insn = NULL; } diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h index a0c61f25cda..f7b159f04cb 100644 --- a/accel/tcg/tb-hash.h +++ b/accel/tcg/tb-hash.h @@ -20,8 +20,9 @@ #ifndef EXEC_TB_HASH_H #define EXEC_TB_HASH_H -#include "exec/cpu-defs.h" -#include "exec/exec-all.h" +#include "exec/vaddr.h" +#include "exec/target_page.h" +#include "exec/translation-block.h" #include "qemu/xxhash.h" #include "tb-jmp-cache.h" diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h new file mode 100644 index 00000000000..40439f03c3a --- /dev/null +++ b/accel/tcg/tb-internal.h @@ -0,0 +1,55 @@ +/* + * TranslationBlock internal declarations (target specific) + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H +#define ACCEL_TCG_TB_INTERNAL_TARGET_H + +#include "exec/translation-block.h" + +/* + * The true return address will often point to a host insn that is part of + * the next translated guest insn. Adjust the address backward to point to + * the middle of the call insn. Subtracting one would do the job except for + * several compressed mode architectures (arm, mips) which set the low bit + * to indicate the compressed mode; subtracting two works around that. It + * is also the case that there are no host isas that contain a call insn + * smaller than 4 bytes, so we don't worry about special-casing this. + */ +#define GETPC_ADJ 2 + +void tb_lock_page0(tb_page_addr_t); + +#ifdef CONFIG_USER_ONLY +/* + * For user-only, page_protect sets the page read-only. + * Since most execution is already on read-only pages, and we'd need to + * account for other TBs on the same page, defer undoing any page protection + * until we receive the write fault. + */ +static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) +{ + tb_lock_page0(p1); +} + +static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } +static inline void tb_unlock_pages(TranslationBlock *tb) { } +#else +void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); +void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); +void tb_unlock_pages(TranslationBlock *); +#endif + +#ifdef CONFIG_SOFTMMU +void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr, + unsigned size, uintptr_t retaddr); +#endif /* CONFIG_SOFTMMU */ + +bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr, + uintptr_t pc); + +#endif diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index cc0f5afd475..0048316f99a 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -22,16 +22,21 @@ #include "qemu/qtree.h" #include "exec/cputlb.h" #include "exec/log.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" +#include "exec/mmap-lock.h" #include "exec/tb-flush.h" -#include "exec/translate-all.h" -#include "sysemu/tcg.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ops.h" +#include "tb-internal.h" +#include "system/tcg.h" #include "tcg/tcg.h" #include "tb-hash.h" #include "tb-context.h" +#include "tb-internal.h" #include "internal-common.h" -#include "internal-target.h" +#ifdef CONFIG_USER_ONLY +#include "user/page-protection.h" +#endif /* List iterators for lists of tagged pointers in TranslationBlock. */ @@ -152,11 +157,7 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb, /* * In system mode we want L1_MAP to be based on ram offsets. */ -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS -# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS -#else -# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS -#endif +#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS /* Size of the L2 (and L3, etc) page tables. */ #define V_L2_BITS 10 @@ -1005,7 +1006,8 @@ TranslationBlock *tb_link_page(TranslationBlock *tb) * Called with mmap_lock held for user-mode emulation. * NOTE: this function must not be called while a TB is running. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) +void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start, + tb_page_addr_t last) { TranslationBlock *tb; PageForEachNext n; @@ -1028,17 +1030,16 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr) start = addr & TARGET_PAGE_MASK; last = addr | ~TARGET_PAGE_MASK; - tb_invalidate_phys_range(start, last); + tb_invalidate_phys_range(NULL, start, last); } /* * Called with mmap_lock held. If pc is not 0 then it indicates the * host PC of the faulting store instruction that caused this invalidate. - * Returns true if the caller needs to abort execution of the current - * TB (because it was modified by this store and the guest CPU has - * precise-SMC semantics). + * Returns true if the caller needs to abort execution of the current TB. */ -bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) +bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr, + uintptr_t pc) { TranslationBlock *current_tb; bool current_tb_modified; @@ -1050,10 +1051,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) * Without precise smc semantics, or when outside of a TB, * we can skip to invalidate. */ -#ifndef TARGET_HAS_PRECISE_SMC - pc = 0; -#endif - if (!pc) { + if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) { tb_invalidate_phys_page(addr); return false; } @@ -1076,15 +1074,14 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) * the CPU state. */ current_tb_modified = true; - cpu_restore_state_from_tb(current_cpu, current_tb, pc); + cpu_restore_state_from_tb(cpu, current_tb, pc); } tb_phys_invalidate__locked(tb); } if (current_tb_modified) { /* Force execution of one insn next time. */ - CPUState *cpu = current_cpu; - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); return true; } return false; @@ -1093,23 +1090,28 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) /* * @p must be non-NULL. * Call with all @pages locked. + * (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context, + * in which case precise_smc need not be detected. */ static void -tb_invalidate_phys_page_range__locked(struct page_collection *pages, +tb_invalidate_phys_page_range__locked(CPUState *cpu, + struct page_collection *pages, PageDesc *p, tb_page_addr_t start, tb_page_addr_t last, uintptr_t retaddr) { TranslationBlock *tb; PageForEachNext n; -#ifdef TARGET_HAS_PRECISE_SMC bool current_tb_modified = false; - TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL; -#endif /* TARGET_HAS_PRECISE_SMC */ + TranslationBlock *current_tb = NULL; /* Range may not cross a page. */ tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0); + if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) { + current_tb = tcg_tb_lookup(retaddr); + } + /* * We remove all the TBs in the range [start, last]. * XXX: see if in some cases it could be faster to invalidate all the code @@ -1127,8 +1129,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK); } if (!(tb_last < start || tb_start > last)) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb == tb && + if (unlikely(current_tb == tb) && (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { /* * If we are modifying the current TB, we must stop @@ -1138,9 +1139,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, * restore the CPU state. */ current_tb_modified = true; - cpu_restore_state_from_tb(current_cpu, current_tb, retaddr); + cpu_restore_state_from_tb(cpu, current_tb, retaddr); } -#endif /* TARGET_HAS_PRECISE_SMC */ tb_phys_invalidate__locked(tb); } } @@ -1150,15 +1150,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, tlb_unprotect_code(start); } -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { + if (unlikely(current_tb_modified)) { page_collection_unlock(pages); /* Force execution of one insn next time. */ - current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); mmap_unlock(); - cpu_loop_exit_noexc(current_cpu); + cpu_loop_exit_noexc(cpu); } -#endif } /* @@ -1168,7 +1166,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages, * access: the virtual CPU will exit the current TB if code is modified inside * this TB. */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) +void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start, + tb_page_addr_t last) { struct page_collection *pages; tb_page_addr_t index, index_last; @@ -1187,44 +1186,30 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last) page_start = index << TARGET_PAGE_BITS; page_last = page_start | ~TARGET_PAGE_MASK; page_last = MIN(page_last, last); - tb_invalidate_phys_page_range__locked(pages, pd, + tb_invalidate_phys_page_range__locked(cpu, pages, pd, page_start, page_last, 0); } page_collection_unlock(pages); } -/* - * Call with all @pages in the range [@start, @start + len[ locked. - */ -static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, - tb_page_addr_t start, - unsigned len, uintptr_t ra) -{ - PageDesc *p; - - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) { - return; - } - - assert_page_locked(p); - tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra); -} - /* * len must be <= 8 and start must be a multiple of len. * Called via softmmu_template.h when code areas are written to with * iothread mutex not held. */ -void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, - unsigned size, - uintptr_t retaddr) +void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start, + unsigned len, uintptr_t ra) { - struct page_collection *pages; + PageDesc *p = page_find(start >> TARGET_PAGE_BITS); - pages = page_collection_lock(ram_addr, ram_addr + size - 1); - tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr); - page_collection_unlock(pages); + if (p) { + ram_addr_t last = start + len - 1; + struct page_collection *pages = page_collection_lock(start, last); + + tb_invalidate_phys_page_range__locked(cpu, pages, p, + start, last, ra); + page_collection_unlock(pages); + } } #endif /* CONFIG_USER_ONLY */ diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index 9e1ae66f651..d0f7b410fab 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -24,11 +24,11 @@ */ #include "qemu/osdep.h" -#include "sysemu/replay.h" -#include "sysemu/cpu-timers.h" +#include "system/replay.h" +#include "exec/icount.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" -#include "exec/exec-all.h" +#include "hw/core/cpu.h" #include "tcg-accel-ops.h" #include "tcg-accel-ops-icount.h" diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index 49814ec4aff..337b993d3da 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -24,13 +24,12 @@ */ #include "qemu/osdep.h" -#include "sysemu/tcg.h" -#include "sysemu/replay.h" -#include "sysemu/cpu-timers.h" +#include "system/tcg.h" +#include "system/replay.h" +#include "exec/icount.h" #include "qemu/main-loop.h" #include "qemu/notify.h" #include "qemu/guest-random.h" -#include "exec/exec-all.h" #include "hw/boards.h" #include "tcg/startup.h" #include "tcg-accel-ops.h" @@ -114,7 +113,6 @@ static void *mttcg_cpu_thread_fn(void *arg) } } - qatomic_set_mb(&cpu->exit_request, 0); qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 48c38714bd6..6eec5c9eee9 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -25,13 +25,13 @@ #include "qemu/osdep.h" #include "qemu/lockable.h" -#include "sysemu/tcg.h" -#include "sysemu/replay.h" -#include "sysemu/cpu-timers.h" +#include "system/tcg.h" +#include "system/replay.h" +#include "exec/icount.h" #include "qemu/main-loop.h" #include "qemu/notify.h" #include "qemu/guest-random.h" -#include "exec/exec-all.h" +#include "exec/cpu-common.h" #include "tcg/startup.h" #include "tcg-accel-ops.h" #include "tcg-accel-ops-rr.h" @@ -109,7 +109,7 @@ static void rr_wait_io_event(void) { CPUState *cpu; - while (all_cpu_threads_idle() && replay_can_wait()) { + while (all_cpu_threads_idle()) { rr_stop_kick_timer(); qemu_cond_wait_bql(first_cpu->halt_cond); } @@ -302,9 +302,7 @@ static void *rr_cpu_thread_fn(void *arg) rr_deal_with_unplugged_cpus(); } - rcu_remove_force_rcu_notifier(&force_rcu); - rcu_unregister_thread(); - return NULL; + g_assert_not_reached(); } void rr_start_vcpu_thread(CPUState *cpu) diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 3c19e68a79e..3b0d7d298e6 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -26,15 +26,19 @@ */ #include "qemu/osdep.h" -#include "sysemu/tcg.h" -#include "sysemu/replay.h" -#include "sysemu/cpu-timers.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/tcg.h" +#include "system/replay.h" +#include "exec/icount.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" #include "qemu/timer.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/hwaddr.h" #include "exec/tb-flush.h" +#include "exec/translation-block.h" +#include "exec/watchpoint.h" #include "gdbstub/enums.h" #include "hw/core/cpu.h" @@ -77,6 +81,9 @@ int tcg_cpu_exec(CPUState *cpu) cpu_exec_start(cpu); ret = cpu_exec(cpu); cpu_exec_end(cpu); + + qatomic_set_mb(&cpu->exit_request, 0); + return ret; } @@ -90,8 +97,6 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - g_assert(bql_locked()); - cpu->interrupt_request |= mask; /* @@ -119,10 +124,9 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS, }; - CPUClass *cc = CPU_GET_CLASS(cpu); int cputype = xlat[gdbtype]; - if (cc->gdb_stop_before_watchpoint) { + if (cpu->cc->gdb_stop_before_watchpoint) { cputype |= BP_STOP_BEFORE_ACCESS; } return cputype; @@ -196,8 +200,10 @@ static inline void tcg_remove_all_breakpoints(CPUState *cpu) cpu_watchpoint_remove_all(cpu, BP_GDB); } -static void tcg_accel_ops_init(AccelOpsClass *ops) +static void tcg_accel_ops_init(AccelClass *ac) { + AccelOpsClass *ops = ac->ops; + if (qemu_tcg_mttcg_enabled()) { ops->create_vcpu_thread = mttcg_start_vcpu_thread; ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; @@ -222,7 +228,7 @@ static void tcg_accel_ops_init(AccelOpsClass *ops) ops->remove_all_breakpoints = tcg_remove_all_breakpoints; } -static void tcg_accel_ops_class_init(ObjectClass *oc, void *data) +static void tcg_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h index 44c4079972a..6feeb3f3e9b 100644 --- a/accel/tcg/tcg-accel-ops.h +++ b/accel/tcg/tcg-accel-ops.h @@ -12,7 +12,7 @@ #ifndef TCG_ACCEL_OPS_H #define TCG_ACCEL_OPS_H -#include "sysemu/cpus.h" +#include "system/cpus.h" void tcg_cpu_destroy(CPUState *cpu); int tcg_cpu_exec(CPUState *cpu); diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 2090907dba4..5125e1a4e27 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -24,26 +24,31 @@ */ #include "qemu/osdep.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "exec/replay-core.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" #include "tcg/startup.h" -#include "tcg/oversized-guest.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/accel.h" #include "qemu/atomic.h" +#include "qapi/qapi-types-common.h" #include "qapi/qapi-builtin-visit.h" #include "qemu/units.h" -#if !defined(CONFIG_USER_ONLY) +#include "qemu/target-info.h" +#ifndef CONFIG_USER_ONLY #include "hw/boards.h" #endif +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include "internal-common.h" + struct TCGState { AccelState parent_obj; - bool mttcg_enabled; + OnOffAuto mttcg_enabled; bool one_insn_per_tb; int splitwx_enabled; unsigned long tb_size; @@ -55,40 +60,18 @@ typedef struct TCGState TCGState; DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE, TYPE_TCG_ACCEL) -/* - * We default to false if we know other options have been enabled - * which are currently incompatible with MTTCG. Otherwise when each - * guest (target) has been updated to support: - * - atomic instructions - * - memory ordering primitives (barriers) - * they can set the appropriate CONFIG flags in ${target}-softmmu.mak - * - * Once a guest architecture has been converted to the new primitives - * there is one remaining limitation to check: - * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) - */ - -static bool default_mttcg_enabled(void) +#ifndef CONFIG_USER_ONLY +bool qemu_tcg_mttcg_enabled(void) { - if (icount_enabled() || TCG_OVERSIZED_GUEST) { - return false; - } -#ifdef TARGET_SUPPORTS_MTTCG -# ifndef TCG_GUEST_DEFAULT_MO -# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO" -# endif - return true; -#else - return false; -#endif + TCGState *s = TCG_STATE(current_accel()); + return s->mttcg_enabled == ON_OFF_AUTO_ON; } +#endif /* !CONFIG_USER_ONLY */ static void tcg_accel_instance_init(Object *obj) { TCGState *s = TCG_STATE(obj); - s->mttcg_enabled = default_mttcg_enabled(); - /* If debugging enabled, default "auto on", otherwise off. */ #if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY) s->splitwx_enabled = -1; @@ -97,24 +80,57 @@ static void tcg_accel_instance_init(Object *obj) #endif } -bool mttcg_enabled; bool one_insn_per_tb; -static int tcg_init_machine(MachineState *ms) +static int tcg_init_machine(AccelState *as, MachineState *ms) { - TCGState *s = TCG_STATE(current_accel()); -#ifdef CONFIG_USER_ONLY - unsigned max_cpus = 1; -#else - unsigned max_cpus = ms->smp.max_cpus; + TCGState *s = TCG_STATE(as); + unsigned max_threads = 1; + +#ifndef CONFIG_USER_ONLY + CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type())); + bool mttcg_supported = cc->tcg_ops->mttcg_supported; + + switch (s->mttcg_enabled) { + case ON_OFF_AUTO_AUTO: + /* + * We default to false if we know other options have been enabled + * which are currently incompatible with MTTCG. Otherwise when each + * guest (target) has been updated to support: + * - atomic instructions + * - memory ordering primitives (barriers) + * they can set the appropriate CONFIG flags in ${target}-softmmu.mak + * + * Once a guest architecture has been converted to the new primitives + * there is one remaining limitation to check: + * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host) + */ + if (mttcg_supported && !icount_enabled()) { + s->mttcg_enabled = ON_OFF_AUTO_ON; + max_threads = ms->smp.max_cpus; + } else { + s->mttcg_enabled = ON_OFF_AUTO_OFF; + } + break; + case ON_OFF_AUTO_ON: + if (!mttcg_supported) { + warn_report("Guest not yet converted to MTTCG - " + "you may get unexpected results"); + } + max_threads = ms->smp.max_cpus; + break; + case ON_OFF_AUTO_OFF: + break; + default: + g_assert_not_reached(); + } #endif tcg_allowed = true; - mttcg_enabled = s->mttcg_enabled; page_init(); tb_htable_init(); - tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus); + tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_threads); #if defined(CONFIG_SOFTMMU) /* @@ -124,6 +140,10 @@ static int tcg_init_machine(MachineState *ms) tcg_prologue_init(); #endif +#ifdef CONFIG_USER_ONLY + qdev_create_fake_machine(); +#endif + return 0; } @@ -131,7 +151,7 @@ static char *tcg_get_thread(Object *obj, Error **errp) { TCGState *s = TCG_STATE(obj); - return g_strdup(s->mttcg_enabled ? "multi" : "single"); + return g_strdup(s->mttcg_enabled == ON_OFF_AUTO_ON ? "multi" : "single"); } static void tcg_set_thread(Object *obj, const char *value, Error **errp) @@ -139,19 +159,13 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp) TCGState *s = TCG_STATE(obj); if (strcmp(value, "multi") == 0) { - if (TCG_OVERSIZED_GUEST) { - error_setg(errp, "No MTTCG when guest word size > hosts"); - } else if (icount_enabled()) { + if (icount_enabled()) { error_setg(errp, "No MTTCG when icount is enabled"); } else { -#ifndef TARGET_SUPPORTS_MTTCG - warn_report("Guest not yet converted to MTTCG - " - "you may get unexpected results"); -#endif - s->mttcg_enabled = true; + s->mttcg_enabled = ON_OFF_AUTO_ON; } } else if (strcmp(value, "single") == 0) { - s->mttcg_enabled = false; + s->mttcg_enabled = ON_OFF_AUTO_OFF; } else { error_setg(errp, "Invalid 'thread' setting %s", value); } @@ -207,7 +221,7 @@ static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) qatomic_set(&one_insn_per_tb, value); } -static int tcg_gdbstub_supported_sstep_flags(void) +static int tcg_gdbstub_supported_sstep_flags(AccelState *as) { /* * In replay mode all events will come from the log and can't be @@ -222,13 +236,14 @@ static int tcg_gdbstub_supported_sstep_flags(void) } } -static void tcg_accel_class_init(ObjectClass *oc, void *data) +static void tcg_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "tcg"; ac->init_machine = tcg_init_machine; ac->cpu_common_realize = tcg_exec_realizefn; ac->cpu_common_unrealize = tcg_exec_unrealizefn; + ac->get_stats = tcg_get_stats; ac->allowed = &tcg_allowed; ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags; diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c index afca89baa1c..ff927c5dd8d 100644 --- a/accel/tcg/tcg-runtime-gvec.c +++ b/accel/tcg/tcg-runtime-gvec.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "qemu/host-utils.h" -#include "cpu.h" #include "exec/helper-proto-common.h" #include "tcg/tcg-gvec-desc.h" diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c index 9fa539ad3d7..fa7ed9739c7 100644 --- a/accel/tcg/tcg-runtime.c +++ b/accel/tcg/tcg-runtime.c @@ -23,13 +23,9 @@ */ #include "qemu/osdep.h" #include "qemu/host-utils.h" -#include "cpu.h" +#include "exec/cpu-common.h" #include "exec/helper-proto-common.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" -#include "disas/disas.h" -#include "exec/log.h" -#include "tcg/tcg.h" +#include "accel/tcg/getpc.h" #define HELPER_H "accel/tcg/tcg-runtime.h" #include "exec/helper-info.c.inc" diff --git a/accel/tcg/tcg-stats.c b/accel/tcg/tcg-stats.c new file mode 100644 index 00000000000..ced5dec0c4f --- /dev/null +++ b/accel/tcg/tcg-stats.c @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: LGPL-2.1-or-later + * + * QEMU TCG statistics + * + * Copyright (c) 2003-2005 Fabrice Bellard + */ + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "qemu/qht.h" +#include "qapi/error.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "hw/core/cpu.h" +#include "tcg/tcg.h" +#include "internal-common.h" +#include "tb-context.h" +#include + +static void dump_drift_info(GString *buf) +{ + if (!icount_enabled()) { + return; + } + + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); + if (icount_align_option) { + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); + } else { + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); + } +} + +static void dump_accel_info(AccelState *accel, GString *buf) +{ + bool one_insn_per_tb = object_property_get_bool(OBJECT(accel), + "one-insn-per-tb", + &error_fatal); + + g_string_append_printf(buf, "Accelerator settings:\n"); + g_string_append_printf(buf, "one-insn-per-tb: %s\n\n", + one_insn_per_tb ? "on" : "off"); +} + +static void print_qht_statistics(struct qht_stats hst, GString *buf) +{ + uint32_t hgram_opts; + size_t hgram_bins; + char *hgram; + double avg; + + if (!hst.head_buckets) { + return; + } + g_string_append_printf(buf, "TB hash buckets %zu/%zu " + "(%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / + hst.head_buckets * 100); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; + if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { + hgram_opts |= QDIST_PR_NODECIMAL; + } + hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); + avg = qdist_avg(&hst.occupancy); + if (!isnan(avg)) { + g_string_append_printf(buf, "TB hash occupancy " + "%0.2f%% avg chain occ. " + "Histogram: %s\n", + avg * 100, hgram); + } + g_free(hgram); + + hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; + hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); + if (hgram_bins > 10) { + hgram_bins = 10; + } else { + hgram_bins = 0; + hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; + } + hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); + avg = qdist_avg(&hst.chain); + if (!isnan(avg)) { + g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " + "Histogram: %s\n", + avg, hgram); + } + g_free(hgram); +} + +struct tb_tree_stats { + size_t nb_tbs; + size_t host_size; + size_t target_size; + size_t max_target_size; + size_t direct_jmp_count; + size_t direct_jmp2_count; + size_t cross_page; +}; + +static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) +{ + const TranslationBlock *tb = value; + struct tb_tree_stats *tst = data; + + tst->nb_tbs++; + tst->host_size += tb->tc.size; + tst->target_size += tb->size; + if (tb->size > tst->max_target_size) { + tst->max_target_size = tb->size; + } +#ifndef CONFIG_USER_ONLY + if (tb->page_addr[1] != -1) { + tst->cross_page++; + } +#endif + if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { + tst->direct_jmp_count++; + if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { + tst->direct_jmp2_count++; + } + } + return false; +} + +static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) +{ + CPUState *cpu; + size_t full = 0, part = 0, elide = 0; + + CPU_FOREACH(cpu) { + full += qatomic_read(&cpu->neg.tlb.c.full_flush_count); + part += qatomic_read(&cpu->neg.tlb.c.part_flush_count); + elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count); + } + *pfull = full; + *ppart = part; + *pelide = elide; +} + +static void tcg_dump_flush_info(GString *buf) +{ + size_t flush_full, flush_part, flush_elide; + + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + + tlb_flush_counts(&flush_full, &flush_part, &flush_elide); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); +} + +static void dump_exec_info(GString *buf) +{ + struct tb_tree_stats tst = {}; + struct qht_stats hst; + size_t nb_tbs; + + tcg_tb_foreach(tb_tree_stats_iter, &tst); + nb_tbs = tst.nb_tbs; + /* XXX: avoid using doubles ? */ + g_string_append_printf(buf, "Translation buffer state:\n"); + /* + * Report total code size including the padding and TB structs; + * otherwise users might think "-accel tcg,tb-size" is not honoured. + * For avg host size we use the precise numbers from tb_tree_stats though. + */ + g_string_append_printf(buf, "gen code size %zu/%zu\n", + tcg_code_size(), tcg_code_capacity()); + g_string_append_printf(buf, "TB count %zu\n", nb_tbs); + g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", + nb_tbs ? tst.target_size / nb_tbs : 0, + tst.max_target_size); + g_string_append_printf(buf, "TB avg host size %zu bytes " + "(expansion ratio: %0.1f)\n", + nb_tbs ? tst.host_size / nb_tbs : 0, + tst.target_size ? + (double)tst.host_size / tst.target_size : 0); + g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", + tst.cross_page, + nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); + g_string_append_printf(buf, "direct jump count %zu (%zu%%) " + "(2 jumps=%zu %zu%%)\n", + tst.direct_jmp_count, + nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, + tst.direct_jmp2_count, + nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); + + qht_statistics_init(&tb_ctx.htable, &hst); + print_qht_statistics(hst, buf); + qht_statistics_destroy(&hst); + + g_string_append_printf(buf, "\nStatistics:\n"); + tcg_dump_flush_info(buf); +} + +void tcg_get_stats(AccelState *accel, GString *buf) +{ + dump_accel_info(accel, buf); + dump_exec_info(buf); + dump_drift_info(buf); +} + +void tcg_dump_stats(GString *buf) +{ + tcg_get_stats(current_accel(), buf); +} diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h new file mode 100644 index 00000000000..f83d9ac9eed --- /dev/null +++ b/accel/tcg/tlb-bounds.h @@ -0,0 +1,13 @@ +/* + * softmmu size bounds + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_TLB_BOUNDS_H +#define ACCEL_TCG_TLB_BOUNDS_H + +#define CPU_TLB_DYN_MIN_BITS 6 +#define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) +#define CPU_TLB_DYN_DEFAULT_BITS 8 + +#endif /* ACCEL_TCG_TLB_BOUNDS_H */ diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events index 4e9b450520b..14f638810c6 100644 --- a/accel/tcg/trace-events +++ b/accel/tcg/trace-events @@ -12,3 +12,15 @@ memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64 # translate-all.c translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p" + +# ldst_atomicity +load_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +load_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +load_atom8_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR"" +load_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +load_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +load_atom16_or_exit_fallback(uintptr_t ra) "ra:0x%"PRIxPTR"" +store_atom2_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +store_atom4_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +store_atom8_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" +store_atom16_fallback(uint32_t memop, uintptr_t ra) "mop:0x%"PRIx32", ra:0x%"PRIxPTR"" diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index fdf6d8ac191..d468667b0dd 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -21,48 +21,20 @@ #include "trace.h" #include "disas/disas.h" -#include "exec/exec-all.h" #include "tcg/tcg.h" -#if defined(CONFIG_USER_ONLY) -#include "qemu.h" -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#if __FreeBSD_version >= 700104 -#define HAVE_KINFO_GETVMMAP -#define sigqueue sigqueue_freebsd /* avoid redefinition */ -#include -#include -#define _KERNEL -#include -#undef _KERNEL -#undef sigqueue -#include -#endif -#endif -#else -#include "exec/ram_addr.h" -#endif - -#include "exec/cputlb.h" -#include "exec/translate-all.h" -#include "exec/translator.h" +#include "exec/mmap-lock.h" +#include "tb-internal.h" #include "exec/tb-flush.h" -#include "qemu/bitmap.h" -#include "qemu/qemu-print.h" -#include "qemu/main-loop.h" #include "qemu/cacheinfo.h" -#include "qemu/timer.h" +#include "qemu/target-info.h" #include "exec/log.h" -#include "sysemu/cpus.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/tcg.h" -#include "qapi/error.h" -#include "hw/core/tcg-cpu-ops.h" +#include "exec/icount.h" +#include "accel/tcg/cpu-ops.h" #include "tb-jmp-cache.h" #include "tb-hash.h" #include "tb-context.h" +#include "tb-internal.h" #include "internal-common.h" -#include "internal-target.h" #include "tcg/perf.h" #include "tcg/insn-start-words.h" @@ -105,7 +77,7 @@ static int64_t decode_sleb128(const uint8_t **pp) val |= (int64_t)(byte & 0x7f) << shift; shift += 7; } while (byte & 0x80); - if (shift < TARGET_LONG_BITS && (byte & 0x40)) { + if (shift < 64 && (byte & 0x40)) { val |= -(int64_t)1 << shift; } @@ -116,7 +88,7 @@ static int64_t decode_sleb128(const uint8_t **pp) /* Encode the data collected about the instructions while compiling TB. Place the data at BLOCK, and return the number of bytes consumed. - The logical table consists of TARGET_INSN_START_WORDS target_ulong's, + The logical table consists of INSN_START_WORDS uint64_t's, which come from the target's insn_start data, followed by a uintptr_t which comes from the host pc of the end of the code implementing the insn. @@ -136,13 +108,13 @@ static int encode_search(TranslationBlock *tb, uint8_t *block) for (i = 0, n = tb->icount; i < n; ++i) { uint64_t prev, curr; - for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + for (j = 0; j < INSN_START_WORDS; ++j) { if (i == 0) { prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); } else { - prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j]; + prev = insn_data[(i - 1) * INSN_START_WORDS + j]; } - curr = insn_data[i * TARGET_INSN_START_WORDS + j]; + curr = insn_data[i * INSN_START_WORDS + j]; p = encode_sleb128(p, curr - prev); } prev = (i == 0 ? 0 : insn_end_off[i - 1]); @@ -174,7 +146,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, return -1; } - memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); + memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS); if (!(tb_cflags(tb) & CF_PCREL)) { data[0] = tb->pc; } @@ -184,7 +156,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, * at which the end of the insn exceeds host_pc. */ for (i = 0; i < num_insns; ++i) { - for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { + for (j = 0; j < INSN_START_WORDS; ++j) { data[j] += decode_sleb128(&p); } iter_pc += decode_sleb128(&p); @@ -202,7 +174,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, uintptr_t host_pc) { - uint64_t data[TARGET_INSN_START_WORDS]; + uint64_t data[INSN_START_WORDS]; int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); if (insns_left < 0) { @@ -274,8 +246,10 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, tcg_func_start(tcg_ctx); - tcg_ctx->cpu = env_cpu(env); - gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc); + CPUState *cs = env_cpu(env); + tcg_ctx->cpu = cs; + cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc); + assert(tb->size != 0); tcg_ctx->cpu = NULL; *max_insns = tb->icount; @@ -284,9 +258,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, } /* Called with mmap_lock held for user mode emulation. */ -TranslationBlock *tb_gen_code(CPUState *cpu, - vaddr pc, uint64_t cs_base, - uint32_t flags, int cflags) +TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s) { CPUArchState *env = cpu_env(cpu); TranslationBlock *tb, *existing_tb; @@ -299,14 +271,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu, assert_memory_lock(); qemu_thread_jit_write(); - phys_pc = get_page_addr_code_hostp(env, pc, &host_pc); + phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc); if (phys_pc == -1) { /* Generate a one-shot TB with 1 insn in it */ - cflags = (cflags & ~CF_COUNT_MASK) | 1; + s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1; } - max_insns = cflags & CF_COUNT_MASK; + max_insns = s.cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = TCG_MAX_INSNS; } @@ -326,12 +298,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu, gen_code_buf = tcg_ctx->code_gen_ptr; tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); - if (!(cflags & CF_PCREL)) { - tb->pc = pc; + if (!(s.cflags & CF_PCREL)) { + tb->pc = s.pc; } - tb->cs_base = cs_base; - tb->flags = flags; - tb->cflags = cflags; + tb->cs_base = s.cs_base; + tb->flags = s.flags; + tb->cflags = s.cflags; tb_set_page_addr0(tb, phys_pc); tb_set_page_addr1(tb, -1); if (phys_pc != -1) { @@ -339,30 +311,20 @@ TranslationBlock *tb_gen_code(CPUState *cpu, } tcg_ctx->gen_tb = tb; - tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; -#ifdef CONFIG_SOFTMMU - tcg_ctx->page_bits = TARGET_PAGE_BITS; - tcg_ctx->page_mask = TARGET_PAGE_MASK; - tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; -#endif - tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS; -#ifdef TCG_GUEST_DEFAULT_MO - tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO; -#else - tcg_ctx->guest_mo = TCG_MO_ALL; -#endif + tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; + tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order; restart_translate: - trace_translate_block(tb, pc, tb->tc.ptr); + trace_translate_block(tb, s.pc, tb->tc.ptr); - gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); + gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti); if (unlikely(gen_code_size < 0)) { switch (gen_code_size) { case -1: /* * Overflow of code_gen_buffer, or the current slice of it. * - * TODO: We don't need to re-do gen_intermediate_code, nor + * TODO: We don't need to re-do tcg_ops->translate_code, nor * should we re-do the tcg optimization currently hidden * inside tcg_gen_code. All that should be required is to * flush the TBs, allocate a new TB, re-initialize it per @@ -432,10 +394,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu, * For CF_PCREL, attribute all executions of the generated code * to its first mapping. */ - perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); + perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf)); if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && - qemu_log_in_addr_range(pc)) { + qemu_log_in_addr_range(s.pc)) { FILE *logfile = qemu_log_trylock(); if (logfile) { int code_size, data_size; @@ -457,7 +419,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); fprintf(logfile, " -- guest addr 0x%016" PRIx64 " + tb prologue\n", - tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); + tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]); chunk_start = tcg_ctx->gen_insn_end_off[insn]; disas(logfile, tb->tc.ptr, chunk_start); @@ -470,7 +432,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; if (chunk_end > chunk_start) { fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", - tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); + tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]); disas(logfile, tb->tc.ptr + chunk_start, chunk_end - chunk_start); chunk_start = chunk_end; @@ -527,23 +489,32 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb_reset_jump(tb, 1); } + /* + * Insert TB into the corresponding region tree before publishing it + * through QHT. Otherwise rewinding happened in the TB might fail to + * lookup itself using host PC. + */ + tcg_tb_insert(tb); + /* * If the TB is not associated with a physical RAM page then it must be - * a temporary one-insn TB, and we have nothing left to do. Return early - * before attempting to link to other TBs or add to the lookup table. + * a temporary one-insn TB. + * + * Such TBs must be added to region trees in order to make sure that + * restore_state_to_opc() - which on some architectures is not limited to + * rewinding, but also affects exception handling! - is called when such a + * TB causes an exception. + * + * At the same time, temporary one-insn TBs must be executed at most once, + * because subsequent reads from, e.g., I/O memory may return different + * values. So return early before attempting to link to other TBs or add + * to the QHT. */ if (tb_page_addr0(tb) == -1) { assert_no_pages_locked(); return tb; } - /* - * Insert TB into the corresponding region tree before publishing it - * through QHT. Otherwise rewinding happened in the TB might fail to - * lookup itself using host PC. - */ - tcg_tb_insert(tb); - /* * No explicit memory barrier is required -- tb_link_page() makes the * TB visible in a consistent state. @@ -579,15 +550,11 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) /* The exception probably happened in a helper. The CPU state should have been saved before calling it. Fetch the PC from there. */ CPUArchState *env = cpu_env(cpu); - vaddr pc; - uint64_t cs_base; - tb_page_addr_t addr; - uint32_t flags; + TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu); + tb_page_addr_t addr = get_page_addr_code(env, s.pc); - cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - addr = get_page_addr_code(env, pc); if (addr != -1) { - tb_invalidate_phys_range(addr, addr); + tb_invalidate_phys_range(cpu, addr, addr); } } } @@ -618,7 +585,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) * to account for the re-execution of the branch. */ n = 1; - cc = CPU_GET_CLASS(cpu); + cc = cpu->cc; if (cc->tcg_ops->io_recompile_replay_branch && cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { cpu->neg.icount_decr.u16.low++; @@ -629,9 +596,10 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) * Exit the loop and potentially generate a new TB executing the * just the I/O insns. We also limit instrumentation to memory * operations only (which execute after completion) so we don't - * double instrument the instruction. + * double instrument the instruction. Also don't let an IRQ sneak + * in before we execute it. */ - cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n; + cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n; if (qemu_loglevel_mask(CPU_LOG_EXEC)) { vaddr pc = cpu->cc->get_pc(cpu); diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 113edcffe35..034f2f359ef 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -8,23 +8,24 @@ */ #include "qemu/osdep.h" +#include "qemu/bswap.h" #include "qemu/log.h" #include "qemu/error-report.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/target_page.h" #include "exec/translator.h" -#include "exec/cpu_ldst.h" #include "exec/plugin-gen.h" -#include "exec/cpu_ldst.h" #include "tcg/tcg-op-common.h" -#include "internal-target.h" +#include "internal-common.h" #include "disas/disas.h" +#include "tb-internal.h" static void set_can_do_io(DisasContextBase *db, bool val) { QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1); tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env, - offsetof(ArchCPU, parent_obj.neg.can_do_io) - - offsetof(ArchCPU, env)); + offsetof(CPUState, neg.can_do_io) - sizeof(CPUState)); } bool translator_io_start(DisasContextBase *db) @@ -47,8 +48,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) { count = tcg_temp_new_i32(); tcg_gen_ld_i32(count, tcg_env, - offsetof(ArchCPU, parent_obj.neg.icount_decr.u32) - - offsetof(ArchCPU, env)); + offsetof(CPUState, neg.icount_decr.u32) - + sizeof(CPUState)); } if (cflags & CF_USE_ICOUNT) { @@ -77,8 +78,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags) if (cflags & CF_USE_ICOUNT) { tcg_gen_st16_i32(count, tcg_env, - offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low) - - offsetof(ArchCPU, env)); + offsetof(CPUState, neg.icount_decr.u16.low) - + sizeof(CPUState)); } return icount_start_insn; @@ -102,6 +103,11 @@ static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags, } } +bool translator_is_same_page(const DisasContextBase *db, vaddr addr) +{ + return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0; +} + bool translator_use_goto_tb(DisasContextBase *db, vaddr dest) { /* Suppress goto_tb if requested. */ @@ -110,7 +116,7 @@ bool translator_use_goto_tb(DisasContextBase *db, vaddr dest) } /* Check for the dest on the same page as the start of the TB. */ - return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0; + return translator_is_same_page(db, dest); } void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, @@ -129,13 +135,13 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, db->is_jmp = DISAS_NEXT; db->num_insns = 0; db->max_insns = *max_insns; - db->singlestep_enabled = cflags & CF_SINGLE_STEP; db->insn_start = NULL; db->fake_insn = false; db->host_addr[0] = host_pc; db->host_addr[1] = NULL; db->record_start = 0; db->record_len = 0; + db->code_mmuidx = cpu_mmu_index(cpu, true); ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ @@ -259,12 +265,14 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db, if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) { /* Entire read is from the first page. */ - memcpy(dest, host + (pc - base), len); - return true; + goto do_read; } if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) { - /* Read begins on the first page and extends to the second. */ + /* + * Read begins on the first page and extends to the second. + * The unaligned read is never atomic. + */ size_t len0 = -(pc | TARGET_PAGE_MASK); memcpy(dest, host + (pc - base), len0); pc += len0; @@ -323,7 +331,39 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db, host = db->host_addr[1]; } - memcpy(dest, host + (pc - base), len); + do_read: + /* + * Assume aligned reads should be atomic, if possible. + * We're not in a position to jump out with EXCP_ATOMIC. + */ + host += pc - base; + switch (len) { + case 2: + if (QEMU_IS_ALIGNED(pc, 2)) { + uint16_t t = qatomic_read((uint16_t *)host); + stw_he_p(dest, t); + return true; + } + break; + case 4: + if (QEMU_IS_ALIGNED(pc, 4)) { + uint32_t t = qatomic_read((uint32_t *)host); + stl_he_p(dest, t); + return true; + } + break; +#ifdef CONFIG_ATOMIC64 + case 8: + if (QEMU_IS_ALIGNED(pc, 8)) { + uint64_t t = qatomic_read__nocheck((uint64_t *)host); + stq_he_p(dest, t); + return true; + } + break; +#endif + } + /* Unaligned or partial read from the second page is not atomic. */ + memcpy(dest, host, len); return true; } @@ -417,55 +457,62 @@ bool translator_st(const DisasContextBase *db, void *dest, uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc) { - uint8_t raw; + uint8_t val; - if (!translator_ld(env, db, &raw, pc, sizeof(raw))) { - raw = cpu_ldub_code(env, pc); - record_save(db, pc, &raw, sizeof(raw)); + if (!translator_ld(env, db, &val, pc, sizeof(val))) { + MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx); + val = cpu_ldb_code_mmu(env, pc, oi, 0); + record_save(db, pc, &val, sizeof(val)); } - return raw; + return val; } -uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc) +uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian) { - uint16_t raw, tgt; + uint16_t val; - if (translator_ld(env, db, &raw, pc, sizeof(raw))) { - tgt = tswap16(raw); - } else { - tgt = cpu_lduw_code(env, pc); - raw = tswap16(tgt); - record_save(db, pc, &raw, sizeof(raw)); + if (!translator_ld(env, db, &val, pc, sizeof(val))) { + MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx); + val = cpu_ldw_code_mmu(env, pc, oi, 0); + record_save(db, pc, &val, sizeof(val)); } - return tgt; + if (endian & MO_BSWAP) { + val = bswap16(val); + } + return val; } -uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc) +uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian) { - uint32_t raw, tgt; + uint32_t val; - if (translator_ld(env, db, &raw, pc, sizeof(raw))) { - tgt = tswap32(raw); - } else { - tgt = cpu_ldl_code(env, pc); - raw = tswap32(tgt); - record_save(db, pc, &raw, sizeof(raw)); + if (!translator_ld(env, db, &val, pc, sizeof(val))) { + MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx); + val = cpu_ldl_code_mmu(env, pc, oi, 0); + record_save(db, pc, &val, sizeof(val)); + } + if (endian & MO_BSWAP) { + val = bswap32(val); } - return tgt; + return val; } -uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc) +uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian) { - uint64_t raw, tgt; + uint64_t val; - if (translator_ld(env, db, &raw, pc, sizeof(raw))) { - tgt = tswap64(raw); - } else { - tgt = cpu_ldq_code(env, pc); - raw = tswap64(tgt); - record_save(db, pc, &raw, sizeof(raw)); + if (!translator_ld(env, db, &val, pc, sizeof(val))) { + MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx); + val = cpu_ldq_code_mmu(env, pc, oi, 0); + record_save(db, pc, &val, sizeof(val)); + } + if (endian & MO_BSWAP) { + val = bswap64(val); } - return tgt; + return val; } void translator_fake_ld(DisasContextBase *db, const void *data, size_t len) diff --git a/accel/tcg/user-exec-stub.c b/accel/tcg/user-exec-stub.c index 4fbe2dbdc88..1d52f48226a 100644 --- a/accel/tcg/user-exec-stub.c +++ b/accel/tcg/user-exec-stub.c @@ -1,6 +1,7 @@ #include "qemu/osdep.h" #include "hw/core/cpu.h" #include "exec/replay-core.h" +#include "internal-common.h" void cpu_resume(CPUState *cpu) { @@ -18,6 +19,16 @@ void cpu_exec_reset_hold(CPUState *cpu) { } +/* User mode emulation does not support softmmu yet. */ + +void tlb_init(CPUState *cpu) +{ +} + +void tlb_destroy(CPUState *cpu) +{ +} + /* User mode emulation does not support record/replay yet. */ bool replay_exception(void) diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 7ddc47b0ba4..f25d80e2dc2 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -17,22 +17,30 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include "disas/disas.h" -#include "exec/exec-all.h" +#include "exec/vaddr.h" +#include "exec/tlb-flags.h" #include "tcg/tcg.h" #include "qemu/bitops.h" #include "qemu/rcu.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/helper-retaddr.h" +#include "accel/tcg/probe.h" +#include "user/cpu_loop.h" +#include "user/guest-host.h" #include "qemu/main-loop.h" -#include "exec/translate-all.h" +#include "user/page-protection.h" #include "exec/page-protection.h" -#include "exec/helper-proto.h" +#include "exec/helper-proto-common.h" #include "qemu/atomic128.h" -#include "trace/trace-root.h" +#include "qemu/bswap.h" +#include "qemu/int128.h" +#include "trace.h" #include "tcg/tcg-ldst.h" +#include "backend-ldst.h" #include "internal-common.h" -#include "internal-target.h" +#include "tb-internal.h" __thread uintptr_t helper_retaddr; @@ -118,9 +126,9 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) * guest, we'd end up in an infinite loop of retrying the faulting access. */ bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, - uintptr_t host_pc, abi_ptr guest_addr) + uintptr_t host_pc, vaddr guest_addr) { - switch (page_unprotect(guest_addr, host_pc)) { + switch (page_unprotect(cpu, guest_addr, host_pc)) { case 0: /* * Fault not caused by a page marked unwritable to protect @@ -154,7 +162,7 @@ typedef struct PageFlagsNode { static IntervalTreeRoot pageflags_root; -static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) +static PageFlagsNode *pageflags_find(vaddr start, vaddr last) { IntervalTreeNode *n; @@ -162,8 +170,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last) return n ? container_of(n, PageFlagsNode, itree) : NULL; } -static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, - target_ulong last) +static PageFlagsNode *pageflags_next(PageFlagsNode *p, vaddr start, vaddr last) { IntervalTreeNode *n; @@ -192,13 +199,22 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn) return rc; } -static int dump_region(void *priv, target_ulong start, - target_ulong end, unsigned long prot) +static int dump_region(void *opaque, vaddr start, vaddr end, int prot) { - FILE *f = (FILE *)priv; + FILE *f = opaque; + uint64_t mask; + int width; - fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", - start, end, end - start, + if (guest_addr_max <= UINT32_MAX) { + mask = UINT32_MAX, width = 8; + } else { + mask = UINT64_MAX, width = 16; + } + + fprintf(f, "%0*" PRIx64 "-%0*" PRIx64 " %0*" PRIx64 " %c%c%c\n", + width, start & mask, + width, end & mask, + width, (end - start) & mask, ((prot & PAGE_READ) ? 'r' : '-'), ((prot & PAGE_WRITE) ? 'w' : '-'), ((prot & PAGE_EXEC) ? 'x' : '-')); @@ -208,14 +224,14 @@ static int dump_region(void *priv, target_ulong start, /* dump memory mappings */ void page_dump(FILE *f) { - const int length = sizeof(target_ulong) * 2; + int width = guest_addr_max <= UINT32_MAX ? 8 : 16; fprintf(f, "%-*s %-*s %-*s %s\n", - length, "start", length, "end", length, "size", "prot"); + width, "start", width, "end", width, "size", "prot"); walk_memory_regions(f, dump_region); } -int page_get_flags(target_ulong address) +int page_get_flags(vaddr address) { PageFlagsNode *p = pageflags_find(address, address); @@ -238,7 +254,7 @@ int page_get_flags(target_ulong address) } /* A subroutine of page_set_flags: insert a new node for [start,last]. */ -static void pageflags_create(target_ulong start, target_ulong last, int flags) +static void pageflags_create(vaddr start, vaddr last, int flags) { PageFlagsNode *p = g_new(PageFlagsNode, 1); @@ -249,13 +265,13 @@ static void pageflags_create(target_ulong start, target_ulong last, int flags) } /* A subroutine of page_set_flags: remove everything in [start,last]. */ -static bool pageflags_unset(target_ulong start, target_ulong last) +static bool pageflags_unset(vaddr start, vaddr last) { bool inval_tb = false; while (true) { PageFlagsNode *p = pageflags_find(start, last); - target_ulong p_last; + vaddr p_last; if (!p) { break; @@ -294,8 +310,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last) * A subroutine of page_set_flags: nothing overlaps [start,last], * but check adjacent mappings and maybe merge into a single range. */ -static void pageflags_create_merge(target_ulong start, target_ulong last, - int flags) +static void pageflags_create_merge(vaddr start, vaddr last, int flags) { PageFlagsNode *next = NULL, *prev = NULL; @@ -346,11 +361,11 @@ static void pageflags_create_merge(target_ulong start, target_ulong last, #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) /* A subroutine of page_set_flags: add flags to [start,last]. */ -static bool pageflags_set_clear(target_ulong start, target_ulong last, +static bool pageflags_set_clear(vaddr start, vaddr last, int set_flags, int clear_flags) { PageFlagsNode *p; - target_ulong p_start, p_last; + vaddr p_start, p_last; int p_flags, merge_flags; bool inval_tb = false; @@ -485,12 +500,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last, return inval_tb; } -/* - * Modify the flags of a page and invalidate the code if necessary. - * The flag PAGE_WRITE_ORG is positioned automatically depending - * on PAGE_WRITE. The mmap_lock should already be held. - */ -void page_set_flags(target_ulong start, target_ulong last, int flags) +void page_set_flags(vaddr start, vaddr last, int flags) { bool reset = false; bool inval_tb = false; @@ -499,7 +509,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags) guest address space. If this assert fires, it probably indicates a missing call to h2g_valid. */ assert(start <= last); - assert(last <= GUEST_ADDR_MAX); + assert(last <= guest_addr_max); /* Only set PAGE_ANON with new mappings. */ assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); assert_memory_lock(); @@ -526,13 +536,13 @@ void page_set_flags(target_ulong start, target_ulong last, int flags) ~(reset ? 0 : PAGE_STICKY)); } if (inval_tb) { - tb_invalidate_phys_range(start, last); + tb_invalidate_phys_range(NULL, start, last); } } -bool page_check_range(target_ulong start, target_ulong len, int flags) +bool page_check_range(vaddr start, vaddr len, int flags) { - target_ulong last; + vaddr last; int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ bool ret; @@ -581,7 +591,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags) break; } /* Asking about writable, but has been protected: undo. */ - if (!page_unprotect(start, 0)) { + if (!page_unprotect(NULL, start, 0)) { ret = false; break; } @@ -608,20 +618,19 @@ bool page_check_range(target_ulong start, target_ulong len, int flags) return ret; } -bool page_check_range_empty(target_ulong start, target_ulong last) +bool page_check_range_empty(vaddr start, vaddr last) { assert(last >= start); assert_memory_lock(); return pageflags_find(start, last) == NULL; } -target_ulong page_find_range_empty(target_ulong min, target_ulong max, - target_ulong len, target_ulong align) +vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align) { - target_ulong len_m1, align_m1; + vaddr len_m1, align_m1; assert(min <= max); - assert(max <= GUEST_ADDR_MAX); + assert(max <= guest_addr_max); assert(len != 0); assert(is_power_of_2(align)); assert_memory_lock(); @@ -656,10 +665,10 @@ target_ulong page_find_range_empty(target_ulong min, target_ulong max, } } -void page_protect(tb_page_addr_t address) +void tb_lock_page0(tb_page_addr_t address) { PageFlagsNode *p; - target_ulong start, last; + vaddr start, last; int host_page_size = qemu_real_host_page_size(); int prot; @@ -701,11 +710,13 @@ void page_protect(tb_page_addr_t address) * immediately exited. (We can only return 2 if the 'pc' argument is * non-zero.) */ -int page_unprotect(target_ulong address, uintptr_t pc) +int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc) { PageFlagsNode *p; bool current_tb_invalidated; + assert((cpu == NULL) == (pc == 0)); + /* * Technically this isn't safe inside a signal handler. However we * know this only ever happens in a synchronous SEGV handler, so in @@ -728,15 +739,15 @@ int page_unprotect(target_ulong address, uintptr_t pc) * this thread raced with another one which got here first and * set the page to PAGE_WRITE and did the TB invalidate for us. */ -#ifdef TARGET_HAS_PRECISE_SMC - TranslationBlock *current_tb = tcg_tb_lookup(pc); - if (current_tb) { - current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; + if (pc && cpu->cc->tcg_ops->precise_smc) { + TranslationBlock *current_tb = tcg_tb_lookup(pc); + if (current_tb) { + current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; + } } -#endif } else { int host_page_size = qemu_real_host_page_size(); - target_ulong start, len, i; + vaddr start, len, i; int prot; if (host_page_size <= TARGET_PAGE_SIZE) { @@ -744,14 +755,15 @@ int page_unprotect(target_ulong address, uintptr_t pc) len = TARGET_PAGE_SIZE; prot = p->flags | PAGE_WRITE; pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); - current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); + current_tb_invalidated = + tb_invalidate_phys_page_unwind(cpu, start, pc); } else { start = address & -host_page_size; len = host_page_size; prot = 0; for (i = 0; i < len; i += TARGET_PAGE_SIZE) { - target_ulong addr = start + i; + vaddr addr = start + i; p = pageflags_find(addr, addr); if (p) { @@ -767,7 +779,7 @@ int page_unprotect(target_ulong address, uintptr_t pc) * the corresponding translated code. */ current_tb_invalidated |= - tb_invalidate_phys_page_unwind(addr, pc); + tb_invalidate_phys_page_unwind(cpu, addr, pc); } } if (prot & PAGE_EXEC) { @@ -805,7 +817,7 @@ static int probe_access_internal(CPUArchState *env, vaddr addr, if (guest_addr_valid_untagged(addr)) { int page_flags = page_get_flags(addr); if (page_flags & acc_flag) { - if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE) + if (access_type != MMU_INST_FETCH && cpu_plugin_mem_cbs_enabled(env_cpu(env))) { return TLB_MMIO; } @@ -847,6 +859,12 @@ void *probe_access(CPUArchState *env, vaddr addr, int size, return size ? g2h(env_cpu(env), addr) : NULL; } +void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr, + MMUAccessType access_type, int mmu_idx) +{ + return g2h(env_cpu(env), addr); +} + tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, void **hostp) { @@ -861,7 +879,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, return addr; } -#ifdef TARGET_PAGE_DATA_SIZE /* * Allocate chunks of target data together. For the only current user, * if we allocate one hunk per page, we have overhead of 40/128 or 40%. @@ -877,10 +894,16 @@ typedef struct TargetPageDataNode { } TargetPageDataNode; static IntervalTreeRoot targetdata_root; +static size_t target_page_data_size; -void page_reset_target_data(target_ulong start, target_ulong last) +void page_reset_target_data(vaddr start, vaddr last) { IntervalTreeNode *n, *next; + size_t size = target_page_data_size; + + if (likely(size == 0)) { + return; + } assert_memory_lock(); @@ -892,7 +915,7 @@ void page_reset_target_data(target_ulong start, target_ulong last) n != NULL; n = next, next = next ? interval_tree_iter_next(n, start, last) : NULL) { - target_ulong n_start, n_last, p_ofs, p_len; + vaddr n_start, n_last, p_ofs, p_len; TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); if (n->start >= start && n->last <= last) { @@ -911,16 +934,21 @@ void page_reset_target_data(target_ulong start, target_ulong last) n_last = MIN(last, n->last); p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; - memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0, - p_len * TARGET_PAGE_DATA_SIZE); + memset(t->data + p_ofs * size, 0, p_len * size); } } -void *page_get_target_data(target_ulong address) +void *page_get_target_data(vaddr address, size_t size) { IntervalTreeNode *n; TargetPageDataNode *t; - target_ulong page, region, p_ofs; + vaddr page, region, p_ofs; + + /* Remember the size from the first call, and it should be constant. */ + if (unlikely(target_page_data_size != size)) { + assert(target_page_data_size == 0); + target_page_data_size = size; + } page = address & TARGET_PAGE_MASK; region = address & TBD_MASK; @@ -936,8 +964,7 @@ void *page_get_target_data(target_ulong address) mmap_lock(); n = interval_tree_iter_first(&targetdata_root, page, page); if (!n) { - t = g_malloc0(sizeof(TargetPageDataNode) - + TPD_PAGES * TARGET_PAGE_DATA_SIZE); + t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size); n = &t->itree; n->start = region; n->last = region | ~TBD_MASK; @@ -948,18 +975,15 @@ void *page_get_target_data(target_ulong address) t = container_of(n, TargetPageDataNode, itree); p_ofs = (page - region) >> TARGET_PAGE_BITS; - return t->data + p_ofs * TARGET_PAGE_DATA_SIZE; + return t->data + p_ofs * size; } -#else -void page_reset_target_data(target_ulong start, target_ulong last) { } -#endif /* TARGET_PAGE_DATA_SIZE */ /* The system-mode versions of these helpers are in cputlb.c. */ static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr, MemOp mop, uintptr_t ra, MMUAccessType type) { - int a_bits = get_alignment_bits(mop); + int a_bits = memop_alignment_bits(mop); void *ret; /* Enforce guest required alignment. */ @@ -972,6 +996,85 @@ static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr, return ret; } +/* physical memory access (slow version, mainly for debug) */ +int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, + void *ptr, size_t len, bool is_write) +{ + int flags; + vaddr l, page; + uint8_t *buf = ptr; + ssize_t written; + int ret = -1; + int fd = -1; + + mmap_lock(); + + while (len > 0) { + page = addr & TARGET_PAGE_MASK; + l = (page + TARGET_PAGE_SIZE) - addr; + if (l > len) { + l = len; + } + flags = page_get_flags(page); + if (!(flags & PAGE_VALID)) { + goto out_close; + } + if (is_write) { + if (flags & PAGE_WRITE) { + memcpy(g2h(cpu, addr), buf, l); + } else { + /* Bypass the host page protection using ptrace. */ + if (fd == -1) { + fd = open("/proc/self/mem", O_WRONLY); + if (fd == -1) { + goto out; + } + } + /* + * If there is a TranslationBlock and we weren't bypassing the + * host page protection, the memcpy() above would SEGV, + * ultimately leading to page_unprotect(). So invalidate the + * translations manually. Both invalidation and pwrite() must + * be under mmap_lock() in order to prevent the creation of + * another TranslationBlock in between. + */ + tb_invalidate_phys_range(NULL, addr, addr + l - 1); + written = pwrite(fd, buf, l, + (off_t)(uintptr_t)g2h_untagged(addr)); + if (written != l) { + goto out_close; + } + } + } else if (flags & PAGE_READ) { + memcpy(buf, g2h(cpu, addr), l); + } else { + /* Bypass the host page protection using ptrace. */ + if (fd == -1) { + fd = open("/proc/self/mem", O_RDONLY); + if (fd == -1) { + goto out; + } + } + if (pread(fd, buf, l, + (off_t)(uintptr_t)g2h_untagged(addr)) != l) { + goto out_close; + } + } + len -= l; + buf += l; + addr += l; + } + ret = 0; +out_close: + if (fd != -1) { + close(fd); + } +out: + mmap_unlock(); + + return ret; +} + #include "ldst_atomicity.c.inc" static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, @@ -980,7 +1083,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, void *haddr; uint8_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type); ret = ldub_p(haddr); clear_helper_retaddr(); @@ -994,7 +1097,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_2(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1012,7 +1115,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint32_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_4(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1030,7 +1133,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint64_t ret; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type); ret = load_atom_8(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1041,7 +1144,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, return ret; } -static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr, +static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uintptr_t ra) { void *haddr; @@ -1049,7 +1152,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr, MemOp mop = get_memop(oi); tcg_debug_assert((mop & MO_SIZE) == MO_128); - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD); ret = load_atom_16(cpu, ra, haddr, mop); clear_helper_retaddr(); @@ -1065,7 +1168,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, { void *haddr; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE); stb_p(haddr, val); clear_helper_retaddr(); @@ -1077,7 +1180,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1093,7 +1196,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1109,7 +1212,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, void *haddr; MemOp mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1125,7 +1228,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, void *haddr; MemOpIdx mop = get_memop(oi); - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE); if (mop & MO_BSWAP) { @@ -1135,101 +1238,28 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, clear_helper_retaddr(); } -uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - - set_helper_retaddr(1); - ret = ldub_p(g2h_untagged(ptr)); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - - set_helper_retaddr(1); - ret = lduw_p(g2h_untagged(ptr)); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - - set_helper_retaddr(1); - ret = ldl_p(g2h_untagged(ptr)); - clear_helper_retaddr(); - return ret; -} - -uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) -{ - uint64_t ret; - - set_helper_retaddr(1); - ret = ldq_p(g2h_untagged(ptr)); - clear_helper_retaddr(); - return ret; -} - -uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, +uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint8_t ret; - - haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); - ret = ldub_p(haddr); - clear_helper_retaddr(); - return ret; + return do_ld1_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH); } -uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, +uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint16_t ret; - - haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); - ret = lduw_p(haddr); - clear_helper_retaddr(); - if (get_memop(oi) & MO_BSWAP) { - ret = bswap16(ret); - } - return ret; + return do_ld2_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH); } -uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, +uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint32_t ret; - - haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH); - ret = ldl_p(haddr); - clear_helper_retaddr(); - if (get_memop(oi) & MO_BSWAP) { - ret = bswap32(ret); - } - return ret; + return do_ld4_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH); } -uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, +uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra) { - void *haddr; - uint64_t ret; - - haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); - ret = ldq_p(haddr); - clear_helper_retaddr(); - if (get_memop(oi) & MO_BSWAP) { - ret = bswap64(ret); - } - return ret; + return do_ld8_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH); } #include "ldst_common.c.inc" @@ -1241,7 +1271,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, int size, uintptr_t retaddr) { MemOp mop = get_memop(oi); - int a_bits = get_alignment_bits(mop); + int a_bits = memop_alignment_bits(mop); void *ret; /* Enforce guest required alignment. */ diff --git a/accel/tcg/vcpu-state.h b/accel/tcg/vcpu-state.h index e407d914dfd..2e3464b5eef 100644 --- a/accel/tcg/vcpu-state.h +++ b/accel/tcg/vcpu-state.h @@ -1,6 +1,11 @@ /* - * SPDX-FileContributor: Philippe Mathieu-Daudé - * SPDX-FileCopyrightText: 2023 Linaro Ltd. + * TaskState helpers for QEMU + * + * Copyright (c) 2023 Linaro Ltd. + * + * Authors: + * Philippe Mathieu-Daudé + * * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef ACCEL_TCG_VCPU_STATE_H diff --git a/accel/tcg/watchpoint.c b/accel/tcg/watchpoint.c index d3aab114588..cfb37a49e72 100644 --- a/accel/tcg/watchpoint.c +++ b/accel/tcg/watchpoint.c @@ -19,13 +19,15 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -#include "qemu/error-report.h" -#include "exec/exec-all.h" -#include "exec/translate-all.h" -#include "sysemu/tcg.h" -#include "sysemu/replay.h" -#include "hw/core/tcg-cpu-ops.h" +#include "exec/breakpoint.h" +#include "exec/cpu-interrupt.h" +#include "exec/page-protection.h" +#include "exec/translation-block.h" +#include "system/tcg.h" +#include "system/replay.h" +#include "accel/tcg/cpu-ops.h" #include "hw/core/cpu.h" +#include "internal-common.h" /* * Return true if this watchpoint address matches the specified @@ -66,7 +68,6 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, MemTxAttrs attrs, int flags, uintptr_t ra) { - CPUClass *cc = CPU_GET_CLASS(cpu); CPUWatchpoint *wp; assert(tcg_enabled()); @@ -82,9 +83,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, return; } - if (cc->tcg_ops->adjust_watchpoint_address) { + if (cpu->cc->tcg_ops->adjust_watchpoint_address) { /* this is currently used only by ARM BE32 */ - addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); + addr = cpu->cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); } assert((flags & ~BP_MEM_ACCESS) == 0); @@ -116,24 +117,21 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, wp->hitattrs = attrs; if (wp->flags & BP_CPU - && cc->tcg_ops->debug_check_watchpoint - && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + && cpu->cc->tcg_ops->debug_check_watchpoint + && !cpu->cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { wp->flags &= ~BP_WATCHPOINT_HIT; continue; } cpu->watchpoint_hit = wp; - mmap_lock(); /* This call also restores vCPU state */ tb_check_watchpoint(cpu, ra); if (wp->flags & BP_STOP_BEFORE_ACCESS) { cpu->exception_index = EXCP_DEBUG; - mmap_unlock(); cpu_loop_exit(cpu); } else { /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - mmap_unlock(); cpu_loop_exit_noexc(cpu); } } else { diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index 0bdefce5375..97377d67d1c 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -18,9 +18,12 @@ #include "hw/xen/xen_igd.h" #include "chardev/char.h" #include "qemu/accel.h" -#include "sysemu/cpus.h" -#include "sysemu/xen.h" -#include "sysemu/runstate.h" +#include "accel/dummy-cpus.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" +#include "system/xen.h" +#include "system/runstate.h" #include "migration/misc.h" #include "migration/global_state.h" #include "hw/boards.h" @@ -62,7 +65,7 @@ static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp) xen_igd_gfx_pt_set(value, errp); } -static void xen_setup_post(MachineState *ms, AccelState *accel) +static void xen_setup_post(AccelState *as) { int rc; @@ -75,7 +78,7 @@ static void xen_setup_post(MachineState *ms, AccelState *accel) } } -static int xen_init(MachineState *ms) +static int xen_init(AccelState *as, MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -115,7 +118,7 @@ static int xen_init(MachineState *ms) return 0; } -static void xen_accel_class_init(ObjectClass *oc, void *data) +static void xen_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); static GlobalProperty compat[] = { @@ -146,11 +149,12 @@ static const TypeInfo xen_accel_type = { .class_init = xen_accel_class_init, }; -static void xen_accel_ops_class_init(ObjectClass *oc, void *data) +static void xen_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = dummy_start_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; } static const TypeInfo xen_accel_ops_type = { diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c index cacae1ea59c..9b6c01c0ef5 100644 --- a/audio/alsaaudio.c +++ b/audio/alsaaudio.c @@ -899,7 +899,7 @@ static void alsa_enable_in(HWVoiceIn *hw, bool enable) static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo) { if (!apdo->has_try_poll) { - apdo->try_poll = true; + apdo->try_poll = false; apdo->has_try_poll = true; } } diff --git a/audio/audio-hmp-cmds.c b/audio/audio-hmp-cmds.c index c9608b715b8..8774c09f185 100644 --- a/audio/audio-hmp-cmds.c +++ b/audio/audio-hmp-cmds.c @@ -27,7 +27,7 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" static QLIST_HEAD (capture_list_head, CaptureState) capture_head; diff --git a/audio/audio.c b/audio/audio.c index af0ae33fedb..89f091bc882 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -32,15 +32,15 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-audio.h" #include "qapi/qapi-commands-audio.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" #include "qemu/help_option.h" -#include "sysemu/sysemu.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/replay.h" +#include "system/runstate.h" #include "ui/qemu-spice.h" #include "trace.h" @@ -905,6 +905,14 @@ size_t AUD_read(SWVoiceIn *sw, void *buf, size_t size) int AUD_get_buffer_size_out(SWVoiceOut *sw) { + if (!sw) { + return 0; + } + + if (audio_get_pdo_out(sw->s->dev)->mixing_engine) { + return sw->resample_buf.size * sw->info.bytes_per_frame; + } + return sw->hw->samples * sw->hw->info.bytes_per_frame; } @@ -1884,7 +1892,8 @@ CaptureVoiceOut *AUD_add_capture( cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame); if (hw->info.is_float) { - hw->clip = mixeng_clip_float[hw->info.nchannels == 2]; + hw->clip = mixeng_clip_float[hw->info.nchannels == 2] + [hw->info.swap_endianness]; } else { hw->clip = mixeng_clip [hw->info.nchannels == 2] @@ -2274,17 +2283,19 @@ size_t audio_rate_peek_bytes(RateCtl *rate, struct audio_pcm_info *info) ticks = now - rate->start_ticks; bytes = muldiv64(ticks, info->bytes_per_second, NANOSECONDS_PER_SECOND); frames = (bytes - rate->bytes_sent) / info->bytes_per_frame; - if (frames < 0 || frames > 65536) { - AUD_log(NULL, "Resetting rate control (%" PRId64 " frames)\n", frames); - audio_rate_start(rate); - frames = 0; - } + rate->peeked_frames = frames; - return frames * info->bytes_per_frame; + return frames < 0 ? 0 : frames * info->bytes_per_frame; } void audio_rate_add_bytes(RateCtl *rate, size_t bytes_used) { + if (rate->peeked_frames < 0 || rate->peeked_frames > 65536) { + AUD_log(NULL, "Resetting rate control (%" PRId64 " frames)\n", + rate->peeked_frames); + audio_rate_start(rate); + } + rate->bytes_sent += bytes_used; } diff --git a/audio/audio_int.h b/audio/audio_int.h index 2d079d00a25..f78ca05f92f 100644 --- a/audio/audio_int.h +++ b/audio/audio_int.h @@ -255,6 +255,7 @@ const char *audio_application_name(void); typedef struct RateCtl { int64_t start_ticks; int64_t bytes_sent; + int64_t peeked_frames; } RateCtl; void audio_rate_start(RateCtl *rate); diff --git a/audio/audio_template.h b/audio/audio_template.h index 7ccfec01168..c29d79c4439 100644 --- a/audio/audio_template.h +++ b/audio/audio_template.h @@ -174,9 +174,11 @@ static int glue (audio_pcm_sw_init_, TYPE) ( if (sw->info.is_float) { #ifdef DAC - sw->conv = mixeng_conv_float[sw->info.nchannels == 2]; + sw->conv = mixeng_conv_float[sw->info.nchannels == 2] + [sw->info.swap_endianness]; #else - sw->clip = mixeng_clip_float[sw->info.nchannels == 2]; + sw->clip = mixeng_clip_float[sw->info.nchannels == 2] + [sw->info.swap_endianness]; #endif } else { #ifdef DAC @@ -303,9 +305,11 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s, if (hw->info.is_float) { #ifdef DAC - hw->clip = mixeng_clip_float[hw->info.nchannels == 2]; + hw->clip = mixeng_clip_float[hw->info.nchannels == 2] + [hw->info.swap_endianness]; #else - hw->conv = mixeng_conv_float[hw->info.nchannels == 2]; + hw->conv = mixeng_conv_float[hw->info.nchannels == 2] + [hw->info.swap_endianness]; #endif } else { #ifdef DAC diff --git a/audio/dbusaudio.c b/audio/dbusaudio.c index 095e7393820..b44fdd15111 100644 --- a/audio/dbusaudio.c +++ b/audio/dbusaudio.c @@ -43,9 +43,10 @@ #define DBUS_DISPLAY1_AUDIO_PATH DBUS_DISPLAY1_ROOT "/Audio" -#define DBUS_AUDIO_NSAMPLES 1024 /* could be configured? */ +#define DBUS_DEFAULT_AUDIO_NSAMPLES 480 typedef struct DBusAudio { + Audiodev *dev; GDBusObjectManagerServer *server; bool p2p; GDBusObjectSkeleton *audio; @@ -151,6 +152,18 @@ dbus_init_out_listener(QemuDBusDisplay1AudioOutListener *listener, G_DBUS_CALL_FLAGS_NONE, -1, NULL, NULL, NULL); } +static guint +dbus_audio_get_nsamples(DBusAudio *da) +{ + AudiodevDBusOptions *opts = &da->dev->u.dbus; + + if (opts->has_nsamples && opts->nsamples) { + return opts->nsamples; + } else { + return DBUS_DEFAULT_AUDIO_NSAMPLES; + } +} + static int dbus_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) { @@ -160,7 +173,7 @@ dbus_init_out(HWVoiceOut *hw, struct audsettings *as, void *drv_opaque) QemuDBusDisplay1AudioOutListener *listener = NULL; audio_pcm_init_info(&hw->info, as); - hw->samples = DBUS_AUDIO_NSAMPLES; + hw->samples = dbus_audio_get_nsamples(da); audio_rate_start(&vo->rate); g_hash_table_iter_init(&iter, da->out_listeners); @@ -274,7 +287,7 @@ dbus_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque) QemuDBusDisplay1AudioInListener *listener = NULL; audio_pcm_init_info(&hw->info, as); - hw->samples = DBUS_AUDIO_NSAMPLES; + hw->samples = dbus_audio_get_nsamples(da); audio_rate_start(&vo->rate); g_hash_table_iter_init(&iter, da->in_listeners); @@ -399,6 +412,7 @@ dbus_audio_init(Audiodev *dev, Error **errp) { DBusAudio *da = g_new0(DBusAudio, 1); + da->dev = dev; da->out_listeners = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, g_object_unref); da->in_listeners = g_hash_table_new_full(g_str_hash, g_str_equal, @@ -524,11 +538,17 @@ dbus_audio_register_listener(AudioState *s, ); } + GDBusConnectionFlags flags = + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER; +#ifdef WIN32 + flags |= G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_ALLOW_ANONYMOUS; +#endif + listener_conn = g_dbus_connection_new_sync( G_IO_STREAM(socket_conn), guid, - G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER, + flags, NULL, NULL, &err); if (err) { error_report("Failed to setup peer connection: %s", err->message); @@ -646,6 +666,7 @@ dbus_audio_set_server(AudioState *s, GDBusObjectManagerServer *server, bool p2p) "swapped-signal::handle-register-out-listener", dbus_audio_register_out_listener, s, NULL); + qemu_dbus_display1_audio_set_nsamples(da->iface, dbus_audio_get_nsamples(da)); g_dbus_object_skeleton_add_interface(G_DBUS_OBJECT_SKELETON(da->audio), G_DBUS_INTERFACE_SKELETON(da->iface)); diff --git a/audio/mixeng.c b/audio/mixeng.c index 69f6549224f..703ee5448fa 100644 --- a/audio/mixeng.c +++ b/audio/mixeng.c @@ -283,10 +283,15 @@ static const float float_scale_reciprocal = 1.f / ((int64_t)INT32_MAX + 1); #endif #endif +#define F32_TO_F32S(v) \ + bswap32((union { uint32_t i; float f; }){ .f = (v) }.i) +#define F32S_TO_F32(v) \ + ((union { uint32_t i; float f; }){ .i = bswap32(v) }.f) + static void conv_natural_float_to_mono(struct st_sample *dst, const void *src, int samples) { - float *in = (float *)src; + const float *in = src; while (samples--) { dst->r = dst->l = CONV_NATURAL_FLOAT(*in++); @@ -294,10 +299,21 @@ static void conv_natural_float_to_mono(struct st_sample *dst, const void *src, } } +static void conv_swap_float_to_mono(struct st_sample *dst, const void *src, + int samples) +{ + const uint32_t *in_f32s = src; + + while (samples--) { + dst->r = dst->l = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++)); + dst++; + } +} + static void conv_natural_float_to_stereo(struct st_sample *dst, const void *src, int samples) { - float *in = (float *)src; + const float *in = src; while (samples--) { dst->l = CONV_NATURAL_FLOAT(*in++); @@ -306,15 +322,33 @@ static void conv_natural_float_to_stereo(struct st_sample *dst, const void *src, } } -t_sample *mixeng_conv_float[2] = { - conv_natural_float_to_mono, - conv_natural_float_to_stereo, +static void conv_swap_float_to_stereo(struct st_sample *dst, const void *src, + int samples) +{ + const uint32_t *in_f32s = src; + + while (samples--) { + dst->l = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++)); + dst->r = CONV_NATURAL_FLOAT(F32S_TO_F32(*in_f32s++)); + dst++; + } +} + +t_sample *mixeng_conv_float[2][2] = { + { + conv_natural_float_to_mono, + conv_swap_float_to_mono, + }, + { + conv_natural_float_to_stereo, + conv_swap_float_to_stereo, + } }; static void clip_natural_float_from_mono(void *dst, const struct st_sample *src, int samples) { - float *out = (float *)dst; + float *out = dst; while (samples--) { *out++ = CLIP_NATURAL_FLOAT(src->l + src->r); @@ -322,10 +356,21 @@ static void clip_natural_float_from_mono(void *dst, const struct st_sample *src, } } +static void clip_swap_float_from_mono(void *dst, const struct st_sample *src, + int samples) +{ + uint32_t *out_f32s = dst; + + while (samples--) { + *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->l + src->r)); + src++; + } +} + static void clip_natural_float_from_stereo( void *dst, const struct st_sample *src, int samples) { - float *out = (float *)dst; + float *out = dst; while (samples--) { *out++ = CLIP_NATURAL_FLOAT(src->l); @@ -334,9 +379,27 @@ static void clip_natural_float_from_stereo( } } -f_sample *mixeng_clip_float[2] = { - clip_natural_float_from_mono, - clip_natural_float_from_stereo, +static void clip_swap_float_from_stereo( + void *dst, const struct st_sample *src, int samples) +{ + uint32_t *out_f32s = dst; + + while (samples--) { + *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->l)); + *out_f32s++ = F32_TO_F32S(CLIP_NATURAL_FLOAT(src->r)); + src++; + } +} + +f_sample *mixeng_clip_float[2][2] = { + { + clip_natural_float_from_mono, + clip_swap_float_from_mono, + }, + { + clip_natural_float_from_stereo, + clip_swap_float_from_stereo, + } }; void audio_sample_to_uint64(const void *samples, int pos, diff --git a/audio/mixeng.h b/audio/mixeng.h index a5f56d2c268..ead93ac2f78 100644 --- a/audio/mixeng.h +++ b/audio/mixeng.h @@ -42,9 +42,9 @@ typedef void (f_sample) (void *dst, const struct st_sample *src, int samples); extern t_sample *mixeng_conv[2][2][2][3]; extern f_sample *mixeng_clip[2][2][2][3]; -/* indices: [stereo] */ -extern t_sample *mixeng_conv_float[2]; -extern f_sample *mixeng_clip_float[2]; +/* indices: [stereo][swap endianness] */ +extern t_sample *mixeng_conv_float[2][2]; +extern f_sample *mixeng_clip_float[2][2]; void *st_rate_start (int inrate, int outrate); void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf, diff --git a/audio/pwaudio.c b/audio/pwaudio.c index 3b14e04fbb0..8e13b582865 100644 --- a/audio/pwaudio.c +++ b/audio/pwaudio.c @@ -769,13 +769,15 @@ qpw_audio_init(Audiodev *dev, Error **errp) pw->core = pw_context_connect(pw->context, NULL, 0); if (pw->core == NULL) { pw_thread_loop_unlock(pw->thread_loop); - goto fail_error; + error_setg_errno(errp, errno, "Failed to connect to PipeWire instance"); + goto fail; } if (pw_core_add_listener(pw->core, &pw->core_listener, &core_events, pw) < 0) { pw_thread_loop_unlock(pw->thread_loop); - goto fail_error; + error_setg(errp, "Failed to add PipeWire listener"); + goto fail; } if (wait_resync(pw) < 0) { pw_thread_loop_unlock(pw->thread_loop); @@ -785,8 +787,6 @@ qpw_audio_init(Audiodev *dev, Error **errp) return g_steal_pointer(&pw); -fail_error: - error_setg(errp, "Failed to initialize PW context"); fail: if (pw->thread_loop) { pw_thread_loop_stop(pw->thread_loop); diff --git a/authz/list.c b/authz/list.c index 0e17eed8974..17aa0efd80e 100644 --- a/authz/list.c +++ b/authz/list.c @@ -116,7 +116,7 @@ qauthz_list_finalize(Object *obj) static void -qauthz_list_class_init(ObjectClass *oc, void *data) +qauthz_list_class_init(ObjectClass *oc, const void *data) { QAuthZClass *authz = QAUTHZ_CLASS(oc); @@ -253,7 +253,7 @@ static const TypeInfo qauthz_list_info = { .instance_size = sizeof(QAuthZList), .instance_finalize = qauthz_list_finalize, .class_init = qauthz_list_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/authz/listfile.c b/authz/listfile.c index 45a60e987df..13741d5a722 100644 --- a/authz/listfile.c +++ b/authz/listfile.c @@ -28,8 +28,8 @@ #include "qemu/filemonitor.h" #include "qom/object_interfaces.h" #include "qapi/qapi-visit-authz.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qobject.h" +#include "qobject/qjson.h" +#include "qobject/qobject.h" #include "qapi/qobject-input-visitor.h" @@ -220,7 +220,7 @@ qauthz_list_file_finalize(Object *obj) static void -qauthz_list_file_class_init(ObjectClass *oc, void *data) +qauthz_list_file_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); QAuthZClass *authz = QAUTHZ_CLASS(oc); @@ -272,7 +272,7 @@ static const TypeInfo qauthz_list_file_info = { .instance_size = sizeof(QAuthZListFile), .instance_finalize = qauthz_list_file_finalize, .class_init = qauthz_list_file_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/authz/pamacct.c b/authz/pamacct.c index c862d9ff39b..c0ad67479a7 100644 --- a/authz/pamacct.c +++ b/authz/pamacct.c @@ -103,7 +103,7 @@ qauthz_pam_finalize(Object *obj) static void -qauthz_pam_class_init(ObjectClass *oc, void *data) +qauthz_pam_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); QAuthZClass *authz = QAUTHZ_CLASS(oc); @@ -136,7 +136,7 @@ static const TypeInfo qauthz_pam_info = { .instance_size = sizeof(QAuthZPAM), .instance_finalize = qauthz_pam_finalize, .class_init = qauthz_pam_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/authz/simple.c b/authz/simple.c index 0597dcd8ea5..f8f2b98518a 100644 --- a/authz/simple.c +++ b/authz/simple.c @@ -78,7 +78,7 @@ qauthz_simple_complete(UserCreatable *uc, Error **errp) static void -qauthz_simple_class_init(ObjectClass *oc, void *data) +qauthz_simple_class_init(ObjectClass *oc, const void *data) { QAuthZClass *authz = QAUTHZ_CLASS(oc); UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -111,7 +111,7 @@ static const TypeInfo qauthz_simple_info = { .instance_size = sizeof(QAuthZSimple), .instance_finalize = qauthz_simple_finalize, .class_init = qauthz_simple_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/backends/confidential-guest-support.c b/backends/confidential-guest-support.c index 052fde8db04..156dd15e667 100644 --- a/backends/confidential-guest-support.c +++ b/backends/confidential-guest-support.c @@ -13,15 +13,59 @@ #include "qemu/osdep.h" -#include "exec/confidential-guest-support.h" +#include "system/confidential-guest-support.h" +#include "qapi/error.h" OBJECT_DEFINE_ABSTRACT_TYPE(ConfidentialGuestSupport, confidential_guest_support, CONFIDENTIAL_GUEST_SUPPORT, OBJECT) -static void confidential_guest_support_class_init(ObjectClass *oc, void *data) +static bool check_support(ConfidentialGuestPlatformType platform, + uint16_t platform_version, uint8_t highest_vtl, + uint64_t shared_gpa_boundary) { + /* Default: no support. */ + return false; +} + +static int set_guest_state(hwaddr gpa, uint8_t *ptr, uint64_t len, + ConfidentialGuestPageType memory_type, + uint16_t cpu_index, Error **errp) +{ + error_setg(errp, + "Setting confidential guest state is not supported for this platform"); + return -1; +} + +static int set_guest_policy(ConfidentialGuestPolicyType policy_type, + uint64_t policy, + void *policy_data1, uint32_t policy_data1_size, + void *policy_data2, uint32_t policy_data2_size, + Error **errp) +{ + error_setg(errp, + "Setting confidential guest policy is not supported for this platform"); + return -1; +} + +static int get_mem_map_entry(int index, ConfidentialGuestMemoryMapEntry *entry, + Error **errp) +{ + error_setg( + errp, + "Obtaining the confidential guest memory map is not supported for this platform"); + return -1; +} + +static void confidential_guest_support_class_init(ObjectClass *oc, + const void *data) +{ + ConfidentialGuestSupportClass *cgsc = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); + cgsc->check_support = check_support; + cgsc->set_guest_state = set_guest_state; + cgsc->set_guest_policy = set_guest_policy; + cgsc->get_mem_map_entry = get_mem_map_entry; } static void confidential_guest_support_init(Object *obj) diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c index 940104ee552..0414c01e06d 100644 --- a/backends/cryptodev-builtin.c +++ b/backends/cryptodev-builtin.c @@ -22,7 +22,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/cryptodev.h" +#include "system/cryptodev.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "standard-headers/linux/virtio_crypto.h" @@ -64,11 +64,11 @@ static void cryptodev_builtin_init_akcipher(CryptoDevBackend *backend) { QCryptoAkCipherOptions opts; - opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; - opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA; + opts.u.rsa.padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW; if (qcrypto_akcipher_supports(&opts)) { backend->conf.crypto_services |= - (1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER); + (1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER); backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; } } @@ -93,9 +93,9 @@ static void cryptodev_builtin_init( backend->conf.peers.ccs[0] = cc; backend->conf.crypto_services = - 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER | - 1u << QCRYPTODEV_BACKEND_SERVICE_HASH | - 1u << QCRYPTODEV_BACKEND_SERVICE_MAC; + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER | + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH | + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; /* @@ -138,18 +138,18 @@ cryptodev_builtin_get_aes_algo(uint32_t key_len, int mode, Error **errp) int algo; if (key_len == AES_KEYSIZE_128) { - algo = QCRYPTO_CIPHER_ALG_AES_128; + algo = QCRYPTO_CIPHER_ALGO_AES_128; } else if (key_len == AES_KEYSIZE_192) { - algo = QCRYPTO_CIPHER_ALG_AES_192; + algo = QCRYPTO_CIPHER_ALGO_AES_192; } else if (key_len == AES_KEYSIZE_256) { /* equals AES_KEYSIZE_128_XTS */ if (mode == QCRYPTO_CIPHER_MODE_XTS) { - algo = QCRYPTO_CIPHER_ALG_AES_128; + algo = QCRYPTO_CIPHER_ALGO_AES_128; } else { - algo = QCRYPTO_CIPHER_ALG_AES_256; + algo = QCRYPTO_CIPHER_ALGO_AES_256; } } else if (key_len == AES_KEYSIZE_256_XTS) { if (mode == QCRYPTO_CIPHER_MODE_XTS) { - algo = QCRYPTO_CIPHER_ALG_AES_256; + algo = QCRYPTO_CIPHER_ALGO_AES_256; } else { goto err; } @@ -169,16 +169,16 @@ static int cryptodev_builtin_get_rsa_hash_algo( { switch (virtio_rsa_hash) { case VIRTIO_CRYPTO_RSA_MD5: - return QCRYPTO_HASH_ALG_MD5; + return QCRYPTO_HASH_ALGO_MD5; case VIRTIO_CRYPTO_RSA_SHA1: - return QCRYPTO_HASH_ALG_SHA1; + return QCRYPTO_HASH_ALGO_SHA1; case VIRTIO_CRYPTO_RSA_SHA256: - return QCRYPTO_HASH_ALG_SHA256; + return QCRYPTO_HASH_ALGO_SHA256; case VIRTIO_CRYPTO_RSA_SHA512: - return QCRYPTO_HASH_ALG_SHA512; + return QCRYPTO_HASH_ALGO_SHA512; default: error_setg(errp, "Unsupported rsa hash algo: %d", virtio_rsa_hash); @@ -200,12 +200,12 @@ static int cryptodev_builtin_set_rsa_options( return -1; } opt->hash_alg = hash_alg; - opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1; + opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1; return 0; } if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_RAW_PADDING) { - opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW; return 0; } @@ -271,15 +271,15 @@ static int cryptodev_builtin_create_cipher_session( break; case VIRTIO_CRYPTO_CIPHER_3DES_ECB: mode = QCRYPTO_CIPHER_MODE_ECB; - algo = QCRYPTO_CIPHER_ALG_3DES; + algo = QCRYPTO_CIPHER_ALGO_3DES; break; case VIRTIO_CRYPTO_CIPHER_3DES_CBC: mode = QCRYPTO_CIPHER_MODE_CBC; - algo = QCRYPTO_CIPHER_ALG_3DES; + algo = QCRYPTO_CIPHER_ALGO_3DES; break; case VIRTIO_CRYPTO_CIPHER_3DES_CTR: mode = QCRYPTO_CIPHER_MODE_CTR; - algo = QCRYPTO_CIPHER_ALG_3DES; + algo = QCRYPTO_CIPHER_ALGO_3DES; break; default: error_setg(errp, "Unsupported cipher alg :%u", @@ -318,7 +318,7 @@ static int cryptodev_builtin_create_akcipher_session( switch (sess_info->algo) { case VIRTIO_CRYPTO_AKCIPHER_RSA: - opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; + opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA; if (cryptodev_builtin_set_rsa_options(sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo, &opts.u.rsa, errp) != 0) { return -1; @@ -334,11 +334,11 @@ static int cryptodev_builtin_create_akcipher_session( switch (sess_info->keytype) { case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: - type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; + type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC; break; case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: - type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; + type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE; break; default: @@ -549,7 +549,7 @@ static int cryptodev_builtin_operation( CryptoDevBackendBuiltinSession *sess; CryptoDevBackendSymOpInfo *sym_op_info; CryptoDevBackendAsymOpInfo *asym_op_info; - QCryptodevBackendAlgType algtype = op_info->algtype; + QCryptodevBackendAlgoType algtype = op_info->algtype; int status = -VIRTIO_CRYPTO_ERR; Error *local_error = NULL; @@ -561,11 +561,11 @@ static int cryptodev_builtin_operation( } sess = builtin->sessions[op_info->session_id]; - if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) { + if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) { sym_op_info = op_info->u.sym_op_info; status = cryptodev_builtin_sym_operation(sess, sym_op_info, &local_error); - } else if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) { + } else if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) { asym_op_info = op_info->u.asym_op_info; status = cryptodev_builtin_asym_operation(sess, op_info->op_code, asym_op_info, &local_error); @@ -608,7 +608,7 @@ static void cryptodev_builtin_cleanup( } static void -cryptodev_builtin_class_init(ObjectClass *oc, void *data) +cryptodev_builtin_class_init(ObjectClass *oc, const void *data) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc); diff --git a/backends/cryptodev-hmp-cmds.c b/backends/cryptodev-hmp-cmds.c index 4f7220bb133..01396d227c8 100644 --- a/backends/cryptodev-hmp-cmds.c +++ b/backends/cryptodev-hmp-cmds.c @@ -14,7 +14,7 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/qapi-commands-cryptodev.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" void hmp_info_cryptodev(Monitor *mon, const QDict *qdict) diff --git a/backends/cryptodev-lkcf.c b/backends/cryptodev-lkcf.c index 45aba1ff67b..bb7a81d5d06 100644 --- a/backends/cryptodev-lkcf.c +++ b/backends/cryptodev-lkcf.c @@ -30,7 +30,7 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "qom/object.h" -#include "sysemu/cryptodev.h" +#include "system/cryptodev.h" #include "standard-headers/linux/virtio_crypto.h" #include @@ -133,20 +133,20 @@ static int cryptodev_lkcf_set_op_desc(QCryptoAkCipherOptions *opts, Error **errp) { QCryptoAkCipherOptionsRSA *rsa_opt; - if (opts->alg != QCRYPTO_AKCIPHER_ALG_RSA) { + if (opts->alg != QCRYPTO_AK_CIPHER_ALGO_RSA) { error_setg(errp, "Unsupported alg: %u", opts->alg); return -1; } rsa_opt = &opts->u.rsa; - if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALG_PKCS1) { + if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALGO_PKCS1) { snprintf(key_desc, desc_len, "enc=%s hash=%s", - QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg), - QCryptoHashAlgorithm_str(rsa_opt->hash_alg)); + QCryptoRSAPaddingAlgo_str(rsa_opt->padding_alg), + QCryptoHashAlgo_str(rsa_opt->hash_alg)); } else { snprintf(key_desc, desc_len, "enc=%s", - QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg)); + QCryptoRSAPaddingAlgo_str(rsa_opt->padding_alg)); } return 0; } @@ -157,23 +157,23 @@ static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg, Error **errp) { if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) { - opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1; + opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1; switch (virtio_hash_alg) { case VIRTIO_CRYPTO_RSA_MD5: - opt->hash_alg = QCRYPTO_HASH_ALG_MD5; + opt->hash_alg = QCRYPTO_HASH_ALGO_MD5; break; case VIRTIO_CRYPTO_RSA_SHA1: - opt->hash_alg = QCRYPTO_HASH_ALG_SHA1; + opt->hash_alg = QCRYPTO_HASH_ALGO_SHA1; break; case VIRTIO_CRYPTO_RSA_SHA256: - opt->hash_alg = QCRYPTO_HASH_ALG_SHA256; + opt->hash_alg = QCRYPTO_HASH_ALGO_SHA256; break; case VIRTIO_CRYPTO_RSA_SHA512: - opt->hash_alg = QCRYPTO_HASH_ALG_SHA512; + opt->hash_alg = QCRYPTO_HASH_ALGO_SHA512; break; default: @@ -184,7 +184,7 @@ static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg, } if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_RAW_PADDING) { - opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW; + opt->padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW; return 0; } @@ -230,7 +230,7 @@ static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp) backend->conf.peers.ccs[0] = cc; backend->conf.crypto_services = - 1u << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER; + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER; backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA; lkcf->running = true; @@ -322,7 +322,7 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task) * 2. generally, public key related compution is fast, just compute it with * thread-pool. */ - if (session->keytype == QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE) { + if (session->keytype == QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE) { if (qcrypto_akcipher_export_p8info(&session->akcipher_opts, session->key, session->keylen, &p8info, &p8info_len, @@ -330,6 +330,8 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task) cryptodev_lkcf_set_op_desc(&session->akcipher_opts, op_desc, sizeof(op_desc), &local_error) != 0) { error_report_err(local_error); + status = -VIRTIO_CRYPTO_ERR; + goto out; } else { key_id = add_key(KCTL_KEY_TYPE_PKEY, "lkcf-backend-priv-key", p8info, p8info_len, KCTL_KEY_RING); @@ -346,6 +348,7 @@ static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task) session->key, session->keylen, &local_error); if (!akcipher) { + error_report_err(local_error); status = -VIRTIO_CRYPTO_ERR; goto out; } @@ -474,7 +477,7 @@ static int cryptodev_lkcf_operation( CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend); CryptoDevBackendLKCFSession *sess; - QCryptodevBackendAlgType algtype = op_info->algtype; + QCryptodevBackendAlgoType algtype = op_info->algtype; CryptoDevLKCFTask *task; if (op_info->session_id >= MAX_SESSIONS || @@ -485,7 +488,7 @@ static int cryptodev_lkcf_operation( } sess = lkcf->sess[op_info->session_id]; - if (algtype != QCRYPTODEV_BACKEND_ALG_ASYM) { + if (algtype != QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) { error_report("algtype not supported: %u", algtype); return -VIRTIO_CRYPTO_NOTSUPP; } @@ -518,7 +521,7 @@ static int cryptodev_lkcf_create_asym_session( switch (sess_info->algo) { case VIRTIO_CRYPTO_AKCIPHER_RSA: - sess->akcipher_opts.alg = QCRYPTO_AKCIPHER_ALG_RSA; + sess->akcipher_opts.alg = QCRYPTO_AK_CIPHER_ALGO_RSA; if (cryptodev_lkcf_set_rsa_opt( sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo, &sess->akcipher_opts.u.rsa, &local_error) != 0) { @@ -534,11 +537,11 @@ static int cryptodev_lkcf_create_asym_session( switch (sess_info->keytype) { case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: - sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; + sess->keytype = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC; break; case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: - sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; + sess->keytype = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE; break; default: @@ -616,7 +619,7 @@ static int cryptodev_lkcf_close_session(CryptoDevBackend *backend, return 0; } -static void cryptodev_lkcf_class_init(ObjectClass *oc, void *data) +static void cryptodev_lkcf_class_init(ObjectClass *oc, const void *data) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc); diff --git a/backends/cryptodev-vhost-user.c b/backends/cryptodev-vhost-user.c index c3283ba84a5..cb04e68b022 100644 --- a/backends/cryptodev-vhost-user.c +++ b/backends/cryptodev-vhost-user.c @@ -27,9 +27,9 @@ #include "qemu/error-report.h" #include "hw/virtio/vhost-user.h" #include "standard-headers/linux/virtio_crypto.h" -#include "sysemu/cryptodev-vhost.h" +#include "system/cryptodev-vhost.h" #include "chardev/char-fe.h" -#include "sysemu/cryptodev-vhost-user.h" +#include "system/cryptodev-vhost-user.h" #include "qom/object.h" @@ -221,9 +221,9 @@ static void cryptodev_vhost_user_init( cryptodev_vhost_user_event, NULL, s, NULL, true); backend->conf.crypto_services = - 1u << QCRYPTODEV_BACKEND_SERVICE_CIPHER | - 1u << QCRYPTODEV_BACKEND_SERVICE_HASH | - 1u << QCRYPTODEV_BACKEND_SERVICE_MAC; + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER | + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH | + 1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; @@ -281,8 +281,7 @@ static int cryptodev_vhost_user_create_session( break; default: - error_setg(&local_error, "Unsupported opcode :%" PRIu32 "", - sess_info->op_code); + error_report("Unsupported opcode :%" PRIu32 "", sess_info->op_code); return -VIRTIO_CRYPTO_NOTSUPP; } @@ -394,7 +393,7 @@ static void cryptodev_vhost_user_finalize(Object *obj) } static void -cryptodev_vhost_user_class_init(ObjectClass *oc, void *data) +cryptodev_vhost_user_class_init(ObjectClass *oc, const void *data) { CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc); diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c index 93523732f39..943680a23af 100644 --- a/backends/cryptodev-vhost.c +++ b/backends/cryptodev-vhost.c @@ -24,13 +24,13 @@ #include "qemu/osdep.h" #include "hw/virtio/virtio-bus.h" -#include "sysemu/cryptodev-vhost.h" +#include "system/cryptodev-vhost.h" #ifdef CONFIG_VHOST_CRYPTO #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/virtio/virtio-crypto.h" -#include "sysemu/cryptodev-vhost-user.h" +#include "system/cryptodev-vhost-user.h" uint64_t cryptodev_vhost_get_max_queues( @@ -53,7 +53,7 @@ cryptodev_vhost_init( CryptoDevBackendVhost *crypto; Error *local_err = NULL; - crypto = g_new(CryptoDevBackendVhost, 1); + crypto = g_new0(CryptoDevBackendVhost, 1); crypto->dev.max_queues = 1; crypto->dev.nvqs = 1; crypto->dev.vqs = crypto->vqs; diff --git a/backends/cryptodev.c b/backends/cryptodev.c index fff89fd62ae..79f8882d3be 100644 --- a/backends/cryptodev.c +++ b/backends/cryptodev.c @@ -22,8 +22,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/cryptodev.h" -#include "sysemu/stats.h" +#include "system/cryptodev.h" +#include "system/stats.h" #include "qapi/error.h" #include "qapi/qapi-commands-cryptodev.h" #include "qapi/qapi-types-stats.h" @@ -74,7 +74,7 @@ static int qmp_query_cryptodev_foreach(Object *obj, void *data) backend = CRYPTODEV_BACKEND(obj); services = backend->conf.crypto_services; - for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE__MAX; i++) { + for (i = 0; i < QCRYPTODEV_BACKEND_SERVICE_TYPE__MAX; i++) { if (services & (1 << i)) { QAPI_LIST_PREPEND(info->service, i); } @@ -97,7 +97,7 @@ static int qmp_query_cryptodev_foreach(Object *obj, void *data) QCryptodevInfoList *qmp_query_cryptodev(Error **errp) { QCryptodevInfoList *list = NULL; - Object *objs = container_get(object_get_root(), "/objects"); + Object *objs = object_get_container("objects"); object_child_foreach(objs, qmp_query_cryptodev_foreach, &list); @@ -185,10 +185,10 @@ static int cryptodev_backend_operation( static int cryptodev_backend_account(CryptoDevBackend *backend, CryptoDevBackendOpInfo *op_info) { - enum QCryptodevBackendAlgType algtype = op_info->algtype; + enum QCryptodevBackendAlgoType algtype = op_info->algtype; int len; - if (algtype == QCRYPTODEV_BACKEND_ALG_ASYM) { + if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) { CryptoDevBackendAsymOpInfo *asym_op_info = op_info->u.asym_op_info; len = asym_op_info->src_len; @@ -212,7 +212,7 @@ static int cryptodev_backend_account(CryptoDevBackend *backend, default: return -VIRTIO_CRYPTO_NOTSUPP; } - } else if (algtype == QCRYPTODEV_BACKEND_ALG_SYM) { + } else if (algtype == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) { CryptoDevBackendSymOpInfo *sym_op_info = op_info->u.sym_op_info; len = sym_op_info->src_len; @@ -424,11 +424,11 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp) } services = backend->conf.crypto_services; - if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) { + if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER)) { backend->sym_stat = g_new0(CryptodevBackendSymStat, 1); } - if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) { + if (services & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER)) { backend->asym_stat = g_new0(CryptodevBackendAsymStat, 1); } } @@ -557,7 +557,7 @@ static void cryptodev_backend_stats_cb(StatsResultList **result, switch (target) { case STATS_TARGET_CRYPTODEV: { - Object *objs = container_get(object_get_root(), "/objects"); + Object *objs = object_get_container("objects"); StatsArgs stats_args; stats_args.result.stats = result; stats_args.names = names; @@ -608,7 +608,7 @@ static void cryptodev_backend_schemas_cb(StatsSchemaList **result, } static void -cryptodev_backend_class_init(ObjectClass *oc, void *data) +cryptodev_backend_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -641,7 +641,7 @@ static const TypeInfo cryptodev_backend_info = { .instance_finalize = cryptodev_backend_finalize, .class_size = sizeof(CryptoDevBackendClass), .class_init = cryptodev_backend_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c index be6c4d8e0ae..7d5b58b4c90 100644 --- a/backends/dbus-vmstate.c +++ b/backends/dbus-vmstate.c @@ -485,7 +485,7 @@ dbus_vmstate_get_id(VMStateIf *vmif) } static void -dbus_vmstate_class_init(ObjectClass *oc, void *data) +dbus_vmstate_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); VMStateIfClass *vc = VMSTATE_IF_CLASS(oc); @@ -505,7 +505,7 @@ static const TypeInfo dbus_vmstate_info = { .instance_size = sizeof(DBusVMState), .instance_finalize = dbus_vmstate_finalize, .class_init = dbus_vmstate_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { TYPE_VMSTATE_IF }, { } diff --git a/backends/host_iommu_device.c b/backends/host_iommu_device.c index 8f2dda1beb9..f6965e4027b 100644 --- a/backends/host_iommu_device.c +++ b/backends/host_iommu_device.c @@ -10,14 +10,14 @@ */ #include "qemu/osdep.h" -#include "sysemu/host_iommu_device.h" +#include "system/host_iommu_device.h" OBJECT_DEFINE_ABSTRACT_TYPE(HostIOMMUDevice, host_iommu_device, HOST_IOMMU_DEVICE, OBJECT) -static void host_iommu_device_class_init(ObjectClass *oc, void *data) +static void host_iommu_device_class_init(ObjectClass *oc, const void *data) { } diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c index 6c024d6217d..ab20b182331 100644 --- a/backends/hostmem-epc.c +++ b/backends/hostmem-epc.c @@ -14,7 +14,7 @@ #include #include "qom/object_interfaces.h" #include "qapi/error.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "hw/i386/hostmem-epc.h" static bool @@ -36,7 +36,7 @@ sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) backend->aligned = true; name = object_get_canonical_path(OBJECT(backend)); - ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; + ram_flags = (backend->share ? RAM_SHARED : RAM_PRIVATE) | RAM_PROTECTED; return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, backend->size, ram_flags, fd, 0, errp); } @@ -50,7 +50,7 @@ static void sgx_epc_backend_instance_init(Object *obj) m->dump = false; } -static void sgx_epc_backend_class_init(ObjectClass *oc, void *data) +static void sgx_epc_backend_class_init(ObjectClass *oc, const void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 7e5072e33ef..8e3219c0617 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -15,7 +15,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/madvise.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qom/object_interfaces.h" #include "qom/object.h" #include "qapi/visitor.h" @@ -82,7 +82,7 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) backend->aligned = true; name = host_memory_backend_get_name(backend); - ram_flags = backend->share ? RAM_SHARED : 0; + ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE; ram_flags |= fb->readonly ? RAM_READONLY_FD : 0; ram_flags |= fb->rom == ON_OFF_AUTO_ON ? RAM_READONLY : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; @@ -270,7 +270,7 @@ static void file_backend_unparent(Object *obj) } static void -file_backend_class_init(ObjectClass *oc, void *data) +file_backend_class_init(ObjectClass *oc, const void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c index 6a3c89a12b2..923239f9cf6 100644 --- a/backends/hostmem-memfd.c +++ b/backends/hostmem-memfd.c @@ -11,14 +11,13 @@ */ #include "qemu/osdep.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qom/object_interfaces.h" #include "qemu/memfd.h" #include "qemu/module.h" #include "qapi/error.h" #include "qom/object.h" - -#define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd" +#include "migration/cpr.h" OBJECT_DECLARE_SIMPLE_TYPE(HostMemoryBackendMemfd, MEMORY_BACKEND_MEMFD) @@ -35,15 +34,19 @@ static bool memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); - g_autofree char *name = NULL; + g_autofree char *name = host_memory_backend_get_name(backend); + int fd = cpr_find_fd(name, 0); uint32_t ram_flags; - int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); return false; } + if (fd >= 0) { + goto have_fd; + } + fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, m->hugetlb, m->hugetlbsize, m->seal ? F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, @@ -51,10 +54,11 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) if (fd == -1) { return false; } + cpr_save_fd(name, 0, fd); +have_fd: backend->aligned = true; - name = host_memory_backend_get_name(backend); - ram_flags = backend->share ? RAM_SHARED : 0; + ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; ram_flags |= backend->guest_memfd ? RAM_GUEST_MEMFD : 0; return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, @@ -129,7 +133,7 @@ memfd_backend_instance_init(Object *obj) } static void -memfd_backend_class_init(ObjectClass *oc, void *data) +memfd_backend_class_init(ObjectClass *oc, const void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); diff --git a/backends/hostmem-ram.c b/backends/hostmem-ram.c index f7d81af783a..062b1abb116 100644 --- a/backends/hostmem-ram.c +++ b/backends/hostmem-ram.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object_interfaces.h" @@ -28,7 +28,7 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) } name = host_memory_backend_get_name(backend); - ram_flags = backend->share ? RAM_SHARED : 0; + ram_flags = backend->share ? RAM_SHARED : RAM_PRIVATE; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; ram_flags |= backend->guest_memfd ? RAM_GUEST_MEMFD : 0; return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), @@ -37,7 +37,7 @@ ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) } static void -ram_backend_class_init(ObjectClass *oc, void *data) +ram_backend_class_init(ObjectClass *oc, const void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); diff --git a/backends/hostmem-shm.c b/backends/hostmem-shm.c index 374edc3db87..f66211a2ec9 100644 --- a/backends/hostmem-shm.c +++ b/backends/hostmem-shm.c @@ -11,8 +11,9 @@ */ #include "qemu/osdep.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qapi/error.h" +#include "migration/cpr.h" #define TYPE_MEMORY_BACKEND_SHM "memory-backend-shm" @@ -25,11 +26,9 @@ struct HostMemoryBackendShm { static bool shm_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { - g_autoptr(GString) shm_name = g_string_new(NULL); - g_autofree char *backend_name = NULL; + g_autofree char *backend_name = host_memory_backend_get_name(backend); uint32_t ram_flags; - int fd, oflag; - mode_t mode; + int fd = cpr_find_fd(backend_name, 0); if (!backend->size) { error_setg(errp, "can't create shm backend with size 0"); @@ -41,48 +40,18 @@ shm_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) return false; } - /* - * Let's use `mode = 0` because we don't want other processes to open our - * memory unless we share the file descriptor with them. - */ - mode = 0; - oflag = O_RDWR | O_CREAT | O_EXCL; - backend_name = host_memory_backend_get_name(backend); - - /* - * Some operating systems allow creating anonymous POSIX shared memory - * objects (e.g. FreeBSD provides the SHM_ANON constant), but this is not - * defined by POSIX, so let's create a unique name. - * - * From Linux's shm_open(3) man-page: - * For portable use, a shared memory object should be identified - * by a name of the form /somename;" - */ - g_string_printf(shm_name, "/qemu-" FMT_pid "-shm-%s", getpid(), - backend_name); - - fd = shm_open(shm_name->str, oflag, mode); - if (fd < 0) { - error_setg_errno(errp, errno, - "failed to create POSIX shared memory"); - return false; + if (fd >= 0) { + goto have_fd; } - /* - * We have the file descriptor, so we no longer need to expose the - * POSIX shared memory object. However it will remain allocated as long as - * there are file descriptors pointing to it. - */ - shm_unlink(shm_name->str); - - if (ftruncate(fd, backend->size) == -1) { - error_setg_errno(errp, errno, - "failed to resize POSIX shared memory to %" PRIu64, - backend->size); - close(fd); + fd = qemu_shm_alloc(backend->size, errp); + if (fd < 0) { return false; } + cpr_save_fd(backend_name, 0, fd); +have_fd: + /* Let's do the same as memory-backend-ram,share=on would do. */ ram_flags = RAM_SHARED; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; @@ -100,7 +69,7 @@ shm_backend_instance_init(Object *obj) } static void -shm_backend_class_init(ObjectClass *oc, void *data) +shm_backend_class_init(ObjectClass *oc, const void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); diff --git a/backends/hostmem.c b/backends/hostmem.c index 4e5576a4ada..35734d6f4d1 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "hw/boards.h" #include "qapi/error.h" #include "qapi/qapi-builtin-visit.h" @@ -178,7 +178,7 @@ static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) return; } - if (!host_memory_backend_mr_inited(backend) && + if (host_memory_backend_mr_inited(backend) && value != backend->merge) { void *ptr = memory_region_get_ram_ptr(&backend->mr); uint64_t sz = memory_region_size(&backend->mr); @@ -501,7 +501,7 @@ host_memory_backend_set_use_canonical_path(Object *obj, bool value, } static void -host_memory_backend_class_init(ObjectClass *oc, void *data) +host_memory_backend_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -586,7 +586,7 @@ static const TypeInfo host_memory_backend_info = { .instance_size = sizeof(HostMemoryBackend), .instance_init = host_memory_backend_init, .instance_post_init = host_memory_backend_post_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/backends/igvm-cfg.c b/backends/igvm-cfg.c new file mode 100644 index 00000000000..45df63e06c1 --- /dev/null +++ b/backends/igvm-cfg.c @@ -0,0 +1,51 @@ +/* + * QEMU IGVM interface + * + * Copyright (C) 2023-2024 SUSE + * + * Authors: + * Roy Hopkins + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "system/igvm-cfg.h" +#include "igvm.h" +#include "qom/object_interfaces.h" + +static char *get_igvm(Object *obj, Error **errp) +{ + IgvmCfg *igvm = IGVM_CFG(obj); + return g_strdup(igvm->filename); +} + +static void set_igvm(Object *obj, const char *value, Error **errp) +{ + IgvmCfg *igvm = IGVM_CFG(obj); + g_free(igvm->filename); + igvm->filename = g_strdup(value); +} + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(IgvmCfg, igvm_cfg, IGVM_CFG, OBJECT, + { TYPE_USER_CREATABLE }, { NULL }) + +static void igvm_cfg_class_init(ObjectClass *oc, const void *data) +{ + IgvmCfgClass *igvmc = IGVM_CFG_CLASS(oc); + + object_class_property_add_str(oc, "file", get_igvm, set_igvm); + object_class_property_set_description(oc, "file", + "Set the IGVM filename to use"); + + igvmc->process = qigvm_process_file; +} + +static void igvm_cfg_init(Object *obj) +{ +} + +static void igvm_cfg_finalize(Object *obj) +{ +} diff --git a/backends/igvm.c b/backends/igvm.c new file mode 100644 index 00000000000..9ad41582ee5 --- /dev/null +++ b/backends/igvm.c @@ -0,0 +1,988 @@ +/* + * QEMU IGVM configuration backend for guests + * + * Copyright (C) 2023-2024 SUSE + * + * Authors: + * Roy Hopkins + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "igvm.h" +#include "qapi/error.h" +#include "system/memory.h" +#include "system/address-spaces.h" +#include "hw/core/cpu.h" + +#include +#include + +typedef struct QIgvmParameterData { + QTAILQ_ENTRY(QIgvmParameterData) next; + uint8_t *data; + uint32_t size; + uint32_t index; +} QIgvmParameterData; + +/* + * Some directives are specific to particular confidential computing platforms. + * Define required types for each of those platforms here. + */ + +/* SEV/SEV-ES/SEV-SNP */ + +/* + * These structures are defined in "SEV Secure Nested Paging Firmware ABI + * Specification" Rev 1.58, section 8.18. + */ +struct QEMU_PACKED sev_id_block { + uint8_t ld[48]; + uint8_t family_id[16]; + uint8_t image_id[16]; + uint32_t version; + uint32_t guest_svn; + uint64_t policy; +}; + +struct QEMU_PACKED sev_id_authentication { + uint32_t id_key_alg; + uint32_t auth_key_algo; + uint8_t reserved[56]; + uint8_t id_block_sig[512]; + uint8_t id_key[1028]; + uint8_t reserved2[60]; + uint8_t id_key_sig[512]; + uint8_t author_key[1028]; + uint8_t reserved3[892]; +}; + +#define IGVM_SEV_ID_BLOCK_VERSION 1 + +/* + * QIgvm contains the information required during processing + * of a single IGVM file. + */ +typedef struct QIgvm { + IgvmHandle file; + ConfidentialGuestSupport *cgs; + ConfidentialGuestSupportClass *cgsc; + uint32_t compatibility_mask; + unsigned current_header_index; + QTAILQ_HEAD(, QIgvmParameterData) parameter_data; + IgvmPlatformType platform_type; + + /* + * SEV-SNP platforms can contain an ID block and authentication + * that should be verified by the guest. + */ + struct sev_id_block *id_block; + struct sev_id_authentication *id_auth; + + /* Define the guest policy for SEV guests */ + uint64_t sev_policy; + + /* These variables keep track of contiguous page regions */ + IGVM_VHS_PAGE_DATA region_prev_page_data; + uint64_t region_start; + unsigned region_start_index; + unsigned region_last_index; + unsigned region_page_count; +} QIgvm; + +static int qigvm_directive_page_data(QIgvm *ctx, const uint8_t *header_data, + Error **errp); +static int qigvm_directive_vp_context(QIgvm *ctx, const uint8_t *header_data, + Error **errp); +static int qigvm_directive_parameter_area(QIgvm *ctx, + const uint8_t *header_data, + Error **errp); +static int qigvm_directive_parameter_insert(QIgvm *ctx, + const uint8_t *header_data, + Error **errp); +static int qigvm_directive_memory_map(QIgvm *ctx, const uint8_t *header_data, + Error **errp); +static int qigvm_directive_vp_count(QIgvm *ctx, const uint8_t *header_data, + Error **errp); +static int qigvm_directive_environment_info(QIgvm *ctx, + const uint8_t *header_data, + Error **errp); +static int qigvm_directive_required_memory(QIgvm *ctx, + const uint8_t *header_data, + Error **errp); +static int qigvm_directive_snp_id_block(QIgvm *ctx, const uint8_t *header_data, + Error **errp); +static int qigvm_initialization_guest_policy(QIgvm *ctx, + const uint8_t *header_data, + Error **errp); + +struct QIGVMHandler { + uint32_t type; + uint32_t section; + int (*handler)(QIgvm *ctx, const uint8_t *header_data, Error **errp); +}; + +static struct QIGVMHandler handlers[] = { + { IGVM_VHT_PAGE_DATA, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_page_data }, + { IGVM_VHT_VP_CONTEXT, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_vp_context }, + { IGVM_VHT_PARAMETER_AREA, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_parameter_area }, + { IGVM_VHT_PARAMETER_INSERT, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_parameter_insert }, + { IGVM_VHT_MEMORY_MAP, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_memory_map }, + { IGVM_VHT_VP_COUNT_PARAMETER, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_vp_count }, + { IGVM_VHT_ENVIRONMENT_INFO_PARAMETER, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_environment_info }, + { IGVM_VHT_REQUIRED_MEMORY, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_required_memory }, + { IGVM_VHT_SNP_ID_BLOCK, IGVM_HEADER_SECTION_DIRECTIVE, + qigvm_directive_snp_id_block }, + { IGVM_VHT_GUEST_POLICY, IGVM_HEADER_SECTION_INITIALIZATION, + qigvm_initialization_guest_policy }, +}; + +static int qigvm_handler(QIgvm *ctx, uint32_t type, Error **errp) +{ + size_t handler; + IgvmHandle header_handle; + const uint8_t *header_data; + int result; + + for (handler = 0; handler < G_N_ELEMENTS(handlers); handler++) { + if (handlers[handler].type != type) { + continue; + } + header_handle = igvm_get_header(ctx->file, handlers[handler].section, + ctx->current_header_index); + if (header_handle < 0) { + error_setg( + errp, + "IGVM file is invalid: Failed to read directive header (code: %d)", + (int)header_handle); + return -1; + } + header_data = igvm_get_buffer(ctx->file, header_handle) + + sizeof(IGVM_VHS_VARIABLE_HEADER); + result = handlers[handler].handler(ctx, header_data, errp); + igvm_free_buffer(ctx->file, header_handle); + return result; + } + error_setg(errp, + "IGVM: Unknown header type encountered when processing file: " + "(type 0x%X)", + type); + return -1; +} + +static void *qigvm_prepare_memory(QIgvm *ctx, uint64_t addr, uint64_t size, + int region_identifier, Error **errp) +{ + ERRP_GUARD(); + MemoryRegion *igvm_pages = NULL; + Int128 gpa_region_size; + MemoryRegionSection mrs = + memory_region_find(get_system_memory(), addr, size); + if (mrs.mr) { + if (!memory_region_is_ram(mrs.mr)) { + memory_region_unref(mrs.mr); + error_setg( + errp, + "Processing of IGVM file failed: Could not prepare memory " + "at address 0x%lX due to existing non-RAM region", + addr); + return NULL; + } + + gpa_region_size = int128_make64(size); + if (int128_lt(mrs.size, gpa_region_size)) { + memory_region_unref(mrs.mr); + error_setg( + errp, + "Processing of IGVM file failed: Could not prepare memory " + "at address 0x%lX: region size exceeded", + addr); + return NULL; + } + return qemu_map_ram_ptr(mrs.mr->ram_block, mrs.offset_within_region); + } else { + /* + * The region_identifier is the is the index of the IGVM directive that + * contains the page with the lowest GPA in the region. This will + * generate a unique region name. + */ + g_autofree char *region_name = + g_strdup_printf("igvm.%X", region_identifier); + igvm_pages = g_new0(MemoryRegion, 1); + if (ctx->cgs && ctx->cgs->require_guest_memfd) { + if (!memory_region_init_ram_guest_memfd(igvm_pages, NULL, + region_name, size, errp)) { + return NULL; + } + } else { + if (!memory_region_init_ram(igvm_pages, NULL, region_name, size, + errp)) { + return NULL; + } + } + memory_region_add_subregion(get_system_memory(), addr, igvm_pages); + return memory_region_get_ram_ptr(igvm_pages); + } +} + +static int qigvm_type_to_cgs_type(IgvmPageDataType memory_type, bool unmeasured, + bool zero) +{ + switch (memory_type) { + case IGVM_PAGE_DATA_TYPE_NORMAL: { + if (unmeasured) { + return CGS_PAGE_TYPE_UNMEASURED; + } else { + return zero ? CGS_PAGE_TYPE_ZERO : CGS_PAGE_TYPE_NORMAL; + } + } + case IGVM_PAGE_DATA_TYPE_SECRETS: + return CGS_PAGE_TYPE_SECRETS; + case IGVM_PAGE_DATA_TYPE_CPUID_DATA: + return CGS_PAGE_TYPE_CPUID; + case IGVM_PAGE_DATA_TYPE_CPUID_XF: + return CGS_PAGE_TYPE_CPUID; + default: + return -1; + } +} + +static bool qigvm_page_attrs_equal(IgvmHandle igvm, unsigned header_index, + const IGVM_VHS_PAGE_DATA *page_1, + const IGVM_VHS_PAGE_DATA *page_2) +{ + IgvmHandle data_handle1, data_handle2; + + /* + * If one page has data and the other doesn't then this results in different + * page types: NORMAL vs ZERO. + */ + data_handle1 = igvm_get_header_data(igvm, IGVM_HEADER_SECTION_DIRECTIVE, + header_index - 1); + data_handle2 = + igvm_get_header_data(igvm, IGVM_HEADER_SECTION_DIRECTIVE, header_index); + if ((data_handle1 == IGVMAPI_NO_DATA || + data_handle2 == IGVMAPI_NO_DATA) && + data_handle1 != data_handle2) { + return false; + } + return ((*(const uint32_t *)&page_1->flags == + *(const uint32_t *)&page_2->flags) && + (page_1->data_type == page_2->data_type) && + (page_1->compatibility_mask == page_2->compatibility_mask)); +} + +static int qigvm_process_mem_region(QIgvm *ctx, unsigned start_index, + uint64_t gpa_start, unsigned page_count, + const IgvmPageDataFlags *flags, + const IgvmPageDataType page_type, + Error **errp) +{ + uint8_t *region; + IgvmHandle data_handle; + const void *data; + uint32_t data_size; + unsigned page_index; + bool zero = true; + const uint64_t page_size = flags->is_2mb_page ? 0x200000 : 0x1000; + int result; + int cgs_page_type; + + region = qigvm_prepare_memory(ctx, gpa_start, page_count * page_size, + start_index, errp); + if (!region) { + return -1; + } + + for (page_index = 0; page_index < page_count; page_index++) { + data_handle = igvm_get_header_data( + ctx->file, IGVM_HEADER_SECTION_DIRECTIVE, page_index + start_index); + if (data_handle == IGVMAPI_NO_DATA) { + /* No data indicates a zero page */ + memset(®ion[page_index * page_size], 0, page_size); + } else if (data_handle < 0) { + error_setg( + errp, + "IGVM file contains invalid page data for directive with " + "index %d", + page_index + start_index); + return -1; + } else { + zero = false; + data_size = igvm_get_buffer_size(ctx->file, data_handle); + if (data_size < page_size) { + memset(®ion[page_index * page_size], 0, page_size); + } else if (data_size > page_size) { + error_setg(errp, + "IGVM file contains page data with invalid size for " + "directive with index %d", + page_index + start_index); + return -1; + } + data = igvm_get_buffer(ctx->file, data_handle); + memcpy(®ion[page_index * page_size], data, data_size); + igvm_free_buffer(ctx->file, data_handle); + } + } + + /* + * If a confidential guest support object is provided then use it to set the + * guest state. + */ + if (ctx->cgs) { + cgs_page_type = + qigvm_type_to_cgs_type(page_type, flags->unmeasured, zero); + if (cgs_page_type < 0) { + error_setg(errp, + "Invalid page type in IGVM file. Directives: %d to %d, " + "page type: %d", + start_index, start_index + page_count, page_type); + return -1; + } + + result = ctx->cgsc->set_guest_state( + gpa_start, region, page_size * page_count, cgs_page_type, 0, errp); + if (result < 0) { + return result; + } + } + return 0; +} + +static int qigvm_process_mem_page(QIgvm *ctx, + const IGVM_VHS_PAGE_DATA *page_data, + Error **errp) +{ + if (page_data) { + if (ctx->region_page_count == 0) { + ctx->region_start = page_data->gpa; + ctx->region_start_index = ctx->current_header_index; + } else { + if (!qigvm_page_attrs_equal(ctx->file, ctx->current_header_index, + page_data, + &ctx->region_prev_page_data) || + ((ctx->region_prev_page_data.gpa + + (ctx->region_prev_page_data.flags.is_2mb_page ? 0x200000 : + 0x1000)) != + page_data->gpa) || + (ctx->region_last_index != (ctx->current_header_index - 1))) { + /* End of current region */ + if (qigvm_process_mem_region( + ctx, ctx->region_start_index, ctx->region_start, + ctx->region_page_count, + &ctx->region_prev_page_data.flags, + ctx->region_prev_page_data.data_type, errp) < 0) { + return -1; + } + ctx->region_page_count = 0; + ctx->region_start = page_data->gpa; + ctx->region_start_index = ctx->current_header_index; + } + } + memcpy(&ctx->region_prev_page_data, page_data, + sizeof(ctx->region_prev_page_data)); + ctx->region_last_index = ctx->current_header_index; + ctx->region_page_count++; + } else { + if (ctx->region_page_count > 0) { + if (qigvm_process_mem_region( + ctx, ctx->region_start_index, ctx->region_start, + ctx->region_page_count, &ctx->region_prev_page_data.flags, + ctx->region_prev_page_data.data_type, errp) < 0) { + return -1; + } + ctx->region_page_count = 0; + } + } + return 0; +} + +static int qigvm_directive_page_data(QIgvm *ctx, const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PAGE_DATA *page_data = + (const IGVM_VHS_PAGE_DATA *)header_data; + if (page_data->compatibility_mask & ctx->compatibility_mask) { + return qigvm_process_mem_page(ctx, page_data, errp); + } + return 0; +} + +static int qigvm_directive_vp_context(QIgvm *ctx, const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_VP_CONTEXT *vp_context = + (const IGVM_VHS_VP_CONTEXT *)header_data; + IgvmHandle data_handle; + uint8_t *data; + int result; + + if (!(vp_context->compatibility_mask & ctx->compatibility_mask)) { + return 0; + } + + /* + * A confidential guest support object must be provided for setting + * a VP context. + */ + if (!ctx->cgs) { + error_setg( + errp, + "A VP context is present in the IGVM file but is not supported " + "by the current system."); + return -1; + } + + data_handle = igvm_get_header_data(ctx->file, IGVM_HEADER_SECTION_DIRECTIVE, + ctx->current_header_index); + if (data_handle < 0) { + error_setg(errp, "Invalid VP context in IGVM file. Error code: %X", + data_handle); + return -1; + } + + data = (uint8_t *)igvm_get_buffer(ctx->file, data_handle); + result = ctx->cgsc->set_guest_state( + vp_context->gpa, data, igvm_get_buffer_size(ctx->file, data_handle), + CGS_PAGE_TYPE_VMSA, vp_context->vp_index, errp); + igvm_free_buffer(ctx->file, data_handle); + if (result < 0) { + return result; + } + return 0; +} + +static int qigvm_directive_parameter_area(QIgvm *ctx, + const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PARAMETER_AREA *param_area = + (const IGVM_VHS_PARAMETER_AREA *)header_data; + QIgvmParameterData *param_entry; + + param_entry = g_new0(QIgvmParameterData, 1); + param_entry->size = param_area->number_of_bytes; + param_entry->index = param_area->parameter_area_index; + param_entry->data = g_malloc0(param_entry->size); + + QTAILQ_INSERT_TAIL(&ctx->parameter_data, param_entry, next); + return 0; +} + +static int qigvm_directive_parameter_insert(QIgvm *ctx, + const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PARAMETER_INSERT *param = + (const IGVM_VHS_PARAMETER_INSERT *)header_data; + QIgvmParameterData *param_entry; + int result; + void *region; + + if (!(param->compatibility_mask & ctx->compatibility_mask)) { + return 0; + } + + QTAILQ_FOREACH(param_entry, &ctx->parameter_data, next) + { + if (param_entry->index == param->parameter_area_index) { + region = qigvm_prepare_memory(ctx, param->gpa, param_entry->size, + ctx->current_header_index, errp); + if (!region) { + return -1; + } + memcpy(region, param_entry->data, param_entry->size); + g_free(param_entry->data); + param_entry->data = NULL; + + /* + * If a confidential guest support object is provided then use it to + * set the guest state. + */ + if (ctx->cgs) { + result = ctx->cgsc->set_guest_state(param->gpa, region, + param_entry->size, + CGS_PAGE_TYPE_UNMEASURED, 0, + errp); + if (result < 0) { + return -1; + } + } + } + } + return 0; +} + +static int qigvm_cmp_mm_entry(const void *a, const void *b) +{ + const IGVM_VHS_MEMORY_MAP_ENTRY *entry_a = + (const IGVM_VHS_MEMORY_MAP_ENTRY *)a; + const IGVM_VHS_MEMORY_MAP_ENTRY *entry_b = + (const IGVM_VHS_MEMORY_MAP_ENTRY *)b; + if (entry_a->starting_gpa_page_number < entry_b->starting_gpa_page_number) { + return -1; + } else if (entry_a->starting_gpa_page_number > + entry_b->starting_gpa_page_number) { + return 1; + } else { + return 0; + } +} + +static int qigvm_directive_memory_map(QIgvm *ctx, const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PARAMETER *param = (const IGVM_VHS_PARAMETER *)header_data; + QIgvmParameterData *param_entry; + int max_entry_count; + int entry = 0; + IGVM_VHS_MEMORY_MAP_ENTRY *mm_entry; + ConfidentialGuestMemoryMapEntry cgmm_entry; + int retval = 0; + + if (!ctx->cgs) { + error_setg(errp, + "IGVM file contains a memory map but this is not supported " + "by the current system."); + return -1; + } + + /* Find the parameter area that should hold the memory map */ + QTAILQ_FOREACH(param_entry, &ctx->parameter_data, next) + { + if (param_entry->index == param->parameter_area_index) { + max_entry_count = + param_entry->size / sizeof(IGVM_VHS_MEMORY_MAP_ENTRY); + mm_entry = (IGVM_VHS_MEMORY_MAP_ENTRY *)param_entry->data; + + retval = ctx->cgsc->get_mem_map_entry(entry, &cgmm_entry, errp); + while (retval == 0) { + if (entry > max_entry_count) { + error_setg( + errp, + "IGVM: guest memory map size exceeds parameter area defined in IGVM file"); + return -1; + } + mm_entry[entry].starting_gpa_page_number = cgmm_entry.gpa >> 12; + mm_entry[entry].number_of_pages = cgmm_entry.size >> 12; + + switch (cgmm_entry.type) { + case CGS_MEM_RAM: + mm_entry[entry].entry_type = + IGVM_MEMORY_MAP_ENTRY_TYPE_MEMORY; + break; + case CGS_MEM_RESERVED: + mm_entry[entry].entry_type = + IGVM_MEMORY_MAP_ENTRY_TYPE_PLATFORM_RESERVED; + break; + case CGS_MEM_ACPI: + mm_entry[entry].entry_type = + IGVM_MEMORY_MAP_ENTRY_TYPE_PLATFORM_RESERVED; + break; + case CGS_MEM_NVS: + mm_entry[entry].entry_type = + IGVM_MEMORY_MAP_ENTRY_TYPE_PERSISTENT; + break; + case CGS_MEM_UNUSABLE: + mm_entry[entry].entry_type = + IGVM_MEMORY_MAP_ENTRY_TYPE_PLATFORM_RESERVED; + break; + } + retval = + ctx->cgsc->get_mem_map_entry(++entry, &cgmm_entry, errp); + } + if (retval < 0) { + return retval; + } + /* The entries need to be sorted */ + qsort(mm_entry, entry, sizeof(IGVM_VHS_MEMORY_MAP_ENTRY), + qigvm_cmp_mm_entry); + + break; + } + } + return 0; +} + +static int qigvm_directive_vp_count(QIgvm *ctx, const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PARAMETER *param = (const IGVM_VHS_PARAMETER *)header_data; + QIgvmParameterData *param_entry; + uint32_t *vp_count; + CPUState *cpu; + + QTAILQ_FOREACH(param_entry, &ctx->parameter_data, next) + { + if (param_entry->index == param->parameter_area_index) { + vp_count = (uint32_t *)(param_entry->data + param->byte_offset); + *vp_count = 0; + CPU_FOREACH(cpu) + { + (*vp_count)++; + } + break; + } + } + return 0; +} + +static int qigvm_directive_environment_info(QIgvm *ctx, + const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_PARAMETER *param = (const IGVM_VHS_PARAMETER *)header_data; + QIgvmParameterData *param_entry; + IgvmEnvironmentInfo *environmental_state; + + QTAILQ_FOREACH(param_entry, &ctx->parameter_data, next) + { + if (param_entry->index == param->parameter_area_index) { + environmental_state = + (IgvmEnvironmentInfo *)(param_entry->data + param->byte_offset); + environmental_state->memory_is_shared = 1; + break; + } + } + return 0; +} + +static int qigvm_directive_required_memory(QIgvm *ctx, + const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_REQUIRED_MEMORY *mem = + (const IGVM_VHS_REQUIRED_MEMORY *)header_data; + uint8_t *region; + int result; + + if (!(mem->compatibility_mask & ctx->compatibility_mask)) { + return 0; + } + + region = qigvm_prepare_memory(ctx, mem->gpa, mem->number_of_bytes, + ctx->current_header_index, errp); + if (!region) { + return -1; + } + if (ctx->cgs) { + result = + ctx->cgsc->set_guest_state(mem->gpa, region, mem->number_of_bytes, + CGS_PAGE_TYPE_REQUIRED_MEMORY, 0, errp); + if (result < 0) { + return result; + } + } + return 0; +} + +static int qigvm_directive_snp_id_block(QIgvm *ctx, const uint8_t *header_data, + Error **errp) +{ + const IGVM_VHS_SNP_ID_BLOCK *igvm_id = + (const IGVM_VHS_SNP_ID_BLOCK *)header_data; + + if (!(igvm_id->compatibility_mask & ctx->compatibility_mask)) { + return 0; + } + + if (ctx->id_block) { + error_setg(errp, "IGVM: Multiple ID blocks encountered " + "in IGVM file."); + return -1; + } + ctx->id_block = g_new0(struct sev_id_block, 1); + ctx->id_auth = g_new0(struct sev_id_authentication, 1); + + memcpy(ctx->id_block->family_id, igvm_id->family_id, + sizeof(ctx->id_block->family_id)); + memcpy(ctx->id_block->image_id, igvm_id->image_id, + sizeof(ctx->id_block->image_id)); + ctx->id_block->guest_svn = igvm_id->guest_svn; + ctx->id_block->version = IGVM_SEV_ID_BLOCK_VERSION; + memcpy(ctx->id_block->ld, igvm_id->ld, sizeof(ctx->id_block->ld)); + + ctx->id_auth->id_key_alg = igvm_id->id_key_algorithm; + assert(sizeof(igvm_id->id_key_signature) <= + sizeof(ctx->id_auth->id_block_sig)); + memcpy(ctx->id_auth->id_block_sig, &igvm_id->id_key_signature, + sizeof(igvm_id->id_key_signature)); + + ctx->id_auth->auth_key_algo = igvm_id->author_key_algorithm; + assert(sizeof(igvm_id->author_key_signature) <= + sizeof(ctx->id_auth->id_key_sig)); + memcpy(ctx->id_auth->id_key_sig, &igvm_id->author_key_signature, + sizeof(igvm_id->author_key_signature)); + + /* + * SEV and IGVM public key structure population are slightly different. + * See SEV Secure Nested Paging Firmware ABI Specification, Chapter 10. + */ + *((uint32_t *)ctx->id_auth->id_key) = igvm_id->id_public_key.curve; + memcpy(&ctx->id_auth->id_key[4], &igvm_id->id_public_key.qx, 72); + memcpy(&ctx->id_auth->id_key[76], &igvm_id->id_public_key.qy, 72); + + *((uint32_t *)ctx->id_auth->author_key) = + igvm_id->author_public_key.curve; + memcpy(&ctx->id_auth->author_key[4], &igvm_id->author_public_key.qx, + 72); + memcpy(&ctx->id_auth->author_key[76], &igvm_id->author_public_key.qy, + 72); + + return 0; +} + +static int qigvm_initialization_guest_policy(QIgvm *ctx, + const uint8_t *header_data, Error **errp) +{ + const IGVM_VHS_GUEST_POLICY *guest = + (const IGVM_VHS_GUEST_POLICY *)header_data; + + if (guest->compatibility_mask & ctx->compatibility_mask) { + ctx->sev_policy = guest->policy; + } + return 0; +} + +static int qigvm_supported_platform_compat_mask(QIgvm *ctx, Error **errp) +{ + int32_t header_count; + unsigned header_index; + IgvmHandle header_handle; + IGVM_VHS_SUPPORTED_PLATFORM *platform; + uint32_t compatibility_mask_sev = 0; + uint32_t compatibility_mask_sev_es = 0; + uint32_t compatibility_mask_sev_snp = 0; + uint32_t compatibility_mask = 0; + + header_count = igvm_header_count(ctx->file, IGVM_HEADER_SECTION_PLATFORM); + if (header_count < 0) { + error_setg(errp, + "Invalid platform header count in IGVM file. Error code: %X", + header_count); + return -1; + } + + for (header_index = 0; header_index < (unsigned)header_count; + header_index++) { + IgvmVariableHeaderType typ = igvm_get_header_type( + ctx->file, IGVM_HEADER_SECTION_PLATFORM, header_index); + if (typ == IGVM_VHT_SUPPORTED_PLATFORM) { + header_handle = igvm_get_header( + ctx->file, IGVM_HEADER_SECTION_PLATFORM, header_index); + if (header_handle < 0) { + error_setg(errp, + "Invalid platform header in IGVM file. " + "Index: %d, Error code: %X", + header_index, header_handle); + return -1; + } + platform = + (IGVM_VHS_SUPPORTED_PLATFORM *)(igvm_get_buffer(ctx->file, + header_handle) + + sizeof( + IGVM_VHS_VARIABLE_HEADER)); + if ((platform->platform_type == IGVM_PLATFORM_TYPE_SEV_ES) && + ctx->cgs) { + if (ctx->cgsc->check_support( + CGS_PLATFORM_SEV_ES, platform->platform_version, + platform->highest_vtl, platform->shared_gpa_boundary)) { + compatibility_mask_sev_es = platform->compatibility_mask; + } + } else if ((platform->platform_type == IGVM_PLATFORM_TYPE_SEV) && + ctx->cgs) { + if (ctx->cgsc->check_support( + CGS_PLATFORM_SEV, platform->platform_version, + platform->highest_vtl, platform->shared_gpa_boundary)) { + compatibility_mask_sev = platform->compatibility_mask; + } + } else if ((platform->platform_type == + IGVM_PLATFORM_TYPE_SEV_SNP) && + ctx->cgs) { + if (ctx->cgsc->check_support( + CGS_PLATFORM_SEV_SNP, platform->platform_version, + platform->highest_vtl, platform->shared_gpa_boundary)) { + compatibility_mask_sev_snp = platform->compatibility_mask; + } + } else if (platform->platform_type == IGVM_PLATFORM_TYPE_NATIVE) { + compatibility_mask = platform->compatibility_mask; + } + igvm_free_buffer(ctx->file, header_handle); + } + } + /* Choose the strongest supported isolation technology */ + if (compatibility_mask_sev_snp != 0) { + ctx->compatibility_mask = compatibility_mask_sev_snp; + ctx->platform_type = IGVM_PLATFORM_TYPE_SEV_SNP; + } else if (compatibility_mask_sev_es != 0) { + ctx->compatibility_mask = compatibility_mask_sev_es; + ctx->platform_type = IGVM_PLATFORM_TYPE_SEV_ES; + } else if (compatibility_mask_sev != 0) { + ctx->compatibility_mask = compatibility_mask_sev; + ctx->platform_type = IGVM_PLATFORM_TYPE_SEV; + } else if (compatibility_mask != 0) { + ctx->compatibility_mask = compatibility_mask; + ctx->platform_type = IGVM_PLATFORM_TYPE_NATIVE; + } else { + error_setg( + errp, + "IGVM file does not describe a compatible supported platform"); + return -1; + } + return 0; +} + +static int qigvm_handle_policy(QIgvm *ctx, Error **errp) +{ + if (ctx->platform_type == IGVM_PLATFORM_TYPE_SEV_SNP) { + int id_block_len = 0; + int id_auth_len = 0; + if (ctx->id_block) { + ctx->id_block->policy = ctx->sev_policy; + id_block_len = sizeof(struct sev_id_block); + id_auth_len = sizeof(struct sev_id_authentication); + } + return ctx->cgsc->set_guest_policy(GUEST_POLICY_SEV, ctx->sev_policy, + ctx->id_block, id_block_len, + ctx->id_auth, id_auth_len, errp); + } + return 0; +} + +static IgvmHandle qigvm_file_init(char *filename, Error **errp) +{ + IgvmHandle igvm; + g_autofree uint8_t *buf = NULL; + unsigned long len; + g_autoptr(GError) gerr = NULL; + + if (!g_file_get_contents(filename, (gchar **)&buf, &len, &gerr)) { + error_setg(errp, "Unable to load %s: %s", filename, gerr->message); + return -1; + } + + igvm = igvm_new_from_binary(buf, len); + if (igvm < 0) { + error_setg(errp, "Unable to parse IGVM file %s: %d", filename, igvm); + return -1; + } + return igvm; +} + +int qigvm_process_file(IgvmCfg *cfg, ConfidentialGuestSupport *cgs, + bool onlyVpContext, Error **errp) +{ + int32_t header_count; + QIgvmParameterData *parameter; + int retval = -1; + QIgvm ctx; + + memset(&ctx, 0, sizeof(ctx)); + ctx.file = qigvm_file_init(cfg->filename, errp); + if (ctx.file < 0) { + return -1; + } + + /* + * The ConfidentialGuestSupport object is optional and allows a confidential + * guest platform to perform extra processing, such as page measurement, on + * IGVM directives. + */ + ctx.cgs = cgs; + ctx.cgsc = cgs ? CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs) : NULL; + + /* + * Check that the IGVM file provides configuration for the current + * platform + */ + if (qigvm_supported_platform_compat_mask(&ctx, errp) < 0) { + goto cleanup; + } + + header_count = igvm_header_count(ctx.file, IGVM_HEADER_SECTION_DIRECTIVE); + if (header_count <= 0) { + error_setg( + errp, "Invalid directive header count in IGVM file. Error code: %X", + header_count); + goto cleanup; + } + + QTAILQ_INIT(&ctx.parameter_data); + + for (ctx.current_header_index = 0; + ctx.current_header_index < (unsigned)header_count; + ctx.current_header_index++) { + IgvmVariableHeaderType type = igvm_get_header_type( + ctx.file, IGVM_HEADER_SECTION_DIRECTIVE, ctx.current_header_index); + if (!onlyVpContext || (type == IGVM_VHT_VP_CONTEXT)) { + if (qigvm_handler(&ctx, type, errp) < 0) { + goto cleanup_parameters; + } + } + } + + /* + * If only processing the VP context then we don't need to process + * any more of the file. + */ + if (onlyVpContext) { + retval = 0; + goto cleanup_parameters; + } + + header_count = + igvm_header_count(ctx.file, IGVM_HEADER_SECTION_INITIALIZATION); + if (header_count < 0) { + error_setg( + errp, + "Invalid initialization header count in IGVM file. Error code: %X", + header_count); + goto cleanup_parameters; + } + + for (ctx.current_header_index = 0; + ctx.current_header_index < (unsigned)header_count; + ctx.current_header_index++) { + IgvmVariableHeaderType type = + igvm_get_header_type(ctx.file, IGVM_HEADER_SECTION_INITIALIZATION, + ctx.current_header_index); + if (qigvm_handler(&ctx, type, errp) < 0) { + goto cleanup_parameters; + } + } + + /* + * Contiguous pages of data with compatible flags are grouped together in + * order to reduce the number of memory regions we create. Make sure the + * last group is processed with this call. + */ + retval = qigvm_process_mem_page(&ctx, NULL, errp); + + if (retval == 0) { + retval = qigvm_handle_policy(&ctx, errp); + } + +cleanup_parameters: + QTAILQ_FOREACH(parameter, &ctx.parameter_data, next) + { + g_free(parameter->data); + parameter->data = NULL; + } + g_free(ctx.id_block); + g_free(ctx.id_auth); + +cleanup: + igvm_free(ctx.file); + + return retval; +} diff --git a/backends/igvm.h b/backends/igvm.h new file mode 100644 index 00000000000..a4abab043a1 --- /dev/null +++ b/backends/igvm.h @@ -0,0 +1,22 @@ +/* + * QEMU IGVM configuration backend for Confidential Guests + * + * Copyright (C) 2023-2024 SUSE + * + * Authors: + * Roy Hopkins + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef BACKENDS_IGVM_H +#define BACKENDS_IGVM_H + +#include "system/confidential-guest-support.h" +#include "system/igvm-cfg.h" +#include "qapi/error.h" + +int qigvm_process_file(IgvmCfg *igvm, ConfidentialGuestSupport *cgs, + bool onlyVpContext, Error **errp); + +#endif diff --git a/backends/iommufd.c b/backends/iommufd.c index 9bc466a89c4..a82f0d65a38 100644 --- a/backends/iommufd.c +++ b/backends/iommufd.c @@ -11,17 +11,23 @@ */ #include "qemu/osdep.h" -#include "sysemu/iommufd.h" +#include "system/iommufd.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object_interfaces.h" #include "qemu/error-report.h" +#include "migration/cpr.h" #include "monitor/monitor.h" #include "trace.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-device.h" #include #include +static const char *iommufd_fd_name(IOMMUFDBackend *be) +{ + return object_get_canonical_path_component(OBJECT(be)); +} + static void iommufd_backend_init(Object *obj) { IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); @@ -64,26 +70,73 @@ static bool iommufd_backend_can_be_deleted(UserCreatable *uc) return !be->users; } -static void iommufd_backend_class_init(ObjectClass *oc, void *data) +static void iommufd_backend_complete(UserCreatable *uc, Error **errp) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(uc); + const char *name = iommufd_fd_name(be); + + if (!be->owned) { + /* fd came from the command line. Fetch updated value from cpr state. */ + if (cpr_is_incoming()) { + be->fd = cpr_find_fd(name, 0); + } else { + cpr_save_fd(name, 0, be->fd); + } + } +} + +static void iommufd_backend_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->can_be_deleted = iommufd_backend_can_be_deleted; + ucc->complete = iommufd_backend_complete; object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd); } +bool iommufd_change_process_capable(IOMMUFDBackend *be) +{ + struct iommu_ioas_change_process args = {.size = sizeof(args)}; + + /* + * Call IOMMU_IOAS_CHANGE_PROCESS to verify it is a recognized ioctl. + * This is a no-op if the process has not changed since DMA was mapped. + */ + return !ioctl(be->fd, IOMMU_IOAS_CHANGE_PROCESS, &args); +} + +bool iommufd_change_process(IOMMUFDBackend *be, Error **errp) +{ + struct iommu_ioas_change_process args = {.size = sizeof(args)}; + bool ret = !ioctl(be->fd, IOMMU_IOAS_CHANGE_PROCESS, &args); + + if (!ret) { + error_setg_errno(errp, errno, "IOMMU_IOAS_CHANGE_PROCESS fd %d failed", + be->fd); + } + trace_iommufd_change_process(be->fd, ret); + return ret; +} + bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp) { int fd; if (be->owned && !be->users) { - fd = qemu_open("/dev/iommu", O_RDWR, errp); + fd = cpr_open_fd("/dev/iommu", O_RDWR, iommufd_fd_name(be), 0, errp); if (fd < 0) { return false; } be->fd = fd; } + if (!be->users && !vfio_iommufd_cpr_register_iommufd(be, errp)) { + if (be->owned) { + close(be->fd); + be->fd = -1; + } + return false; + } be->users++; trace_iommufd_backend_connect(be->fd, be->owned, be->users); @@ -96,9 +149,13 @@ void iommufd_backend_disconnect(IOMMUFDBackend *be) goto out; } be->users--; - if (!be->users && be->owned) { - close(be->fd); - be->fd = -1; + if (!be->users) { + vfio_iommufd_cpr_unregister_iommufd(be); + if (be->owned) { + cpr_delete_fd(iommufd_fd_name(be), 0); + close(be->fd); + be->fd = -1; + } } out: trace_iommufd_backend_disconnect(be->fd, be->users); @@ -167,8 +224,44 @@ int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, /* TODO: Not support mapping hardware PCI BAR region for now. */ if (errno == EFAULT) { warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?"); - } else { - error_report("IOMMU_IOAS_MAP failed: %m"); + } + } + return ret; +} + +int iommufd_backend_map_file_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size, + int mfd, unsigned long start, bool readonly) +{ + int ret, fd = be->fd; + struct iommu_ioas_map_file map = { + .size = sizeof(map), + .flags = IOMMU_IOAS_MAP_READABLE | + IOMMU_IOAS_MAP_FIXED_IOVA, + .ioas_id = ioas_id, + .fd = mfd, + .start = start, + .iova = iova, + .length = size, + }; + + if (cpr_is_incoming()) { + return 0; + } + + if (!readonly) { + map.flags |= IOMMU_IOAS_MAP_WRITEABLE; + } + + ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &map); + trace_iommufd_backend_map_file_dma(fd, ioas_id, iova, size, mfd, start, + readonly, ret); + if (ret) { + ret = -errno; + + /* TODO: Not support mapping hardware PCI BAR region for now. */ + if (errno == EFAULT) { + warn_report("IOMMU_IOAS_MAP_FILE failed: %m, PCI BAR?"); } } return ret; @@ -185,6 +278,10 @@ int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, .length = size, }; + if (cpr_is_incoming()) { + return 0; + } + ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap); /* * IOMMUFD takes mapping as some kind of object, unmapping @@ -203,7 +300,6 @@ int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, if (ret) { ret = -errno; - error_report("IOMMU_IOAS_UNMAP failed: %m"); } return ret; } @@ -290,15 +386,23 @@ bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be, return true; } +/* + * @type can carry a desired HW info type defined in the uapi headers. If caller + * doesn't have one, indicating it wants the default type, then @type should be + * zeroed (i.e. IOMMU_HW_INFO_TYPE_DEFAULT). + */ bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid, uint32_t *type, void *data, uint32_t len, - uint64_t *caps, Error **errp) + uint64_t *caps, uint8_t *pasid_log2, + Error **errp) { struct iommu_hw_info info = { + .flags = (*type) ? IOMMU_HW_INFO_FLAG_INPUT_TYPE : 0, .size = sizeof(info), .dev_id = devid, .data_len = len, .data_uptr = (uintptr_t)data, + .in_data_type = *type, }; if (ioctl(be->fd, IOMMU_GET_HW_INFO, &info)) { @@ -311,9 +415,214 @@ bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid, g_assert(caps); *caps = info.out_capabilities; + if (pasid_log2) { + *pasid_log2 = info.out_max_pasid_log2; + } return true; } +bool iommufd_backend_invalidate_cache(IOMMUFDBackend *be, uint32_t id, + uint32_t data_type, uint32_t entry_len, + uint32_t *entry_num, void *data, + Error **errp) +{ + int ret, fd = be->fd; + uint32_t total_entries = *entry_num; + struct iommu_hwpt_invalidate cache = { + .size = sizeof(cache), + .hwpt_id = id, + .data_type = data_type, + .entry_len = entry_len, + .entry_num = total_entries, + .data_uptr = (uintptr_t)data, + }; + + ret = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cache); + trace_iommufd_backend_invalidate_cache(fd, id, data_type, entry_len, + total_entries, cache.entry_num, + (uintptr_t)data, ret ? errno : 0); + *entry_num = cache.entry_num; + + if (ret) { + error_setg_errno(errp, errno, "IOMMU_HWPT_INVALIDATE failed:" + " total %d entries, processed %d entries", + total_entries, cache.entry_num); + } else if (total_entries != cache.entry_num) { + error_setg(errp, "IOMMU_HWPT_INVALIDATE succeed but with unprocessed" + " entries: total %d entries, processed %d entries." + " Kernel BUG?!", total_entries, cache.entry_num); + return false; + } + + return !ret; +} + +bool iommufd_backend_alloc_viommu(IOMMUFDBackend *be, uint32_t dev_id, + uint32_t viommu_type, uint32_t hwpt_id, + void *data_ptr, uint32_t len, + uint32_t *out_viommu_id, Error **errp) +{ + int ret; + struct iommu_viommu_alloc alloc_viommu = { + .size = sizeof(alloc_viommu), + .type = viommu_type, + .dev_id = dev_id, + .hwpt_id = hwpt_id, + .data_len = len, + .data_uptr = (uintptr_t)data_ptr, + }; + + ret = ioctl(be->fd, IOMMU_VIOMMU_ALLOC, &alloc_viommu); + + trace_iommufd_backend_alloc_viommu(be->fd, dev_id, viommu_type, hwpt_id, + alloc_viommu.out_viommu_id, ret); + if (ret) { + error_setg_errno(errp, errno, "IOMMU_VIOMMU_ALLOC failed"); + return false; + } + + g_assert(out_viommu_id); + *out_viommu_id = alloc_viommu.out_viommu_id; + return true; +} + +bool iommufd_backend_alloc_vdev(IOMMUFDBackend *be, uint32_t dev_id, + uint32_t viommu_id, uint64_t virt_id, + uint32_t *out_vdev_id, Error **errp) +{ + int ret; + struct iommu_vdevice_alloc alloc_vdev = { + .size = sizeof(alloc_vdev), + .viommu_id = viommu_id, + .dev_id = dev_id, + .virt_id = virt_id, + }; + + ret = ioctl(be->fd, IOMMU_VDEVICE_ALLOC, &alloc_vdev); + + trace_iommufd_backend_alloc_vdev(be->fd, dev_id, viommu_id, virt_id, + alloc_vdev.out_vdevice_id, ret); + + if (ret) { + error_setg_errno(errp, errno, "IOMMU_VDEVICE_ALLOC failed"); + return false; + } + + g_assert(out_vdev_id); + *out_vdev_id = alloc_vdev.out_vdevice_id; + return true; +} + +bool iommufd_backend_alloc_veventq(IOMMUFDBackend *be, uint32_t viommu_id, + uint32_t type, uint32_t depth, + uint32_t *out_veventq_id, + uint32_t *out_veventq_fd, Error **errp) +{ + int ret; + struct iommu_veventq_alloc alloc_veventq = { + .size = sizeof(alloc_veventq), + .flags = 0, + .type = type, + .veventq_depth = depth, + .viommu_id = viommu_id, + }; + + ret = ioctl(be->fd, IOMMU_VEVENTQ_ALLOC, &alloc_veventq); + + trace_iommufd_backend_alloc_veventq(be->fd, viommu_id, type, + alloc_veventq.out_veventq_id, + alloc_veventq.out_veventq_fd, ret); + if (ret) { + error_setg_errno(errp, errno, "IOMMU_VEVENTQ_ALLOC failed"); + return false; + } + + g_assert(out_veventq_id); + g_assert(out_veventq_fd); + *out_veventq_id = alloc_veventq.out_veventq_id; + *out_veventq_fd = alloc_veventq.out_veventq_fd; + return true; +} + +bool iommufd_backend_alloc_hw_queue(IOMMUFDBackend *be, uint32_t viommu_id, + uint32_t data_type, uint32_t index, + uint64_t addr, uint64_t length, + uint32_t *out_hw_queue_id, Error **errp) +{ + int ret; + struct iommu_hw_queue_alloc alloc_hw_queue = { + .size = sizeof(alloc_hw_queue), + .flags = 0, + .viommu_id = viommu_id, + .type = data_type, + .index = index, + .nesting_parent_iova = addr, + .length = length, + }; + + ret = ioctl(be->fd, IOMMU_HW_QUEUE_ALLOC, &alloc_hw_queue); + + trace_iommufd_backend_alloc_hw_queue(be->fd, viommu_id, data_type, + index, addr, length, + alloc_hw_queue.out_hw_queue_id, ret); + if (ret) { + error_setg_errno(errp, errno, "IOMMU_HW_QUEUE_ALLOC failed"); + return false; + } + + g_assert(out_hw_queue_id); + *out_hw_queue_id = alloc_hw_queue.out_hw_queue_id; + return true; +} + +/* Caller is responsible for doing munmap() */ +bool iommufd_backend_viommu_mmap(IOMMUFDBackend *be, uint32_t viommu_id, + uint64_t size, off_t offset, void **ptr) +{ + g_assert(viommu_id); + g_assert(ptr); + + *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, be->fd, offset); + + trace_iommufd_backend_viommu_mmap(be->fd, viommu_id, size, offset); + + if (*ptr == MAP_FAILED) { + error_report("failed to mmap (size=0x%" PRIx64 ") for viommu (id=%d)", + size, viommu_id); + return false; + } + + return true; +} + +bool host_iommu_device_iommufd_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + uint32_t hwpt_id, Error **errp) +{ + HostIOMMUDeviceIOMMUFDClass *idevc = + HOST_IOMMU_DEVICE_IOMMUFD_GET_CLASS(idev); + + g_assert(idevc->attach_hwpt); + return idevc->attach_hwpt(idev, hwpt_id, errp); +} + +bool host_iommu_device_iommufd_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + Error **errp) +{ + HostIOMMUDeviceIOMMUFDClass *idevc = + HOST_IOMMU_DEVICE_IOMMUFD_GET_CLASS(idev); + + g_assert(idevc->detach_hwpt); + return idevc->detach_hwpt(idev, errp); +} + +static uint8_t hiod_iommufd_get_pasid(HostIOMMUDevice *hiod, uint64_t *hw_caps) +{ + HostIOMMUDeviceCaps *caps = &hiod->caps; + + *hw_caps = caps->hw_caps; + return caps->max_pasid_log2; +} + static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp) { HostIOMMUDeviceCaps *caps = &hiod->caps; @@ -329,11 +638,12 @@ static int hiod_iommufd_get_cap(HostIOMMUDevice *hiod, int cap, Error **errp) } } -static void hiod_iommufd_class_init(ObjectClass *oc, void *data) +static void hiod_iommufd_class_init(ObjectClass *oc, const void *data) { HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc); hioc->get_cap = hiod_iommufd_get_cap; + hioc->get_pasid = hiod_iommufd_get_pasid; }; static const TypeInfo types[] = { @@ -345,13 +655,15 @@ static const TypeInfo types[] = { .instance_finalize = iommufd_backend_finalize, .class_size = sizeof(IOMMUFDBackendClass), .class_init = iommufd_backend_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } }, { .name = TYPE_HOST_IOMMU_DEVICE_IOMMUFD, .parent = TYPE_HOST_IOMMU_DEVICE, + .instance_size = sizeof(HostIOMMUDeviceIOMMUFD), + .class_size = sizeof(HostIOMMUDeviceIOMMUFDClass), .class_init = hiod_iommufd_class_init, .abstract = true, } diff --git a/backends/meson.build b/backends/meson.build index da714b93d1e..60021f45d12 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -12,8 +12,10 @@ system_ss.add([files( if host_os != 'windows' system_ss.add(files('rng-random.c')) - system_ss.add(files('hostmem-file.c')) - system_ss.add([files('hostmem-shm.c'), rt]) + if host_os != 'emscripten' + system_ss.add(files('hostmem-file.c')) + system_ss.add([files('hostmem-shm.c'), rt]) + endif endif if host_os == 'linux' system_ss.add(files('hostmem-memfd.c')) @@ -32,6 +34,11 @@ if have_vhost_user_crypto endif system_ss.add(when: gio, if_true: files('dbus-vmstate.c')) system_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c')) +if igvm.found() + system_ss.add(igvm) + system_ss.add(files('igvm-cfg.c'), igvm) + system_ss.add(files('igvm.c'), igvm) +endif system_ss.add(when: 'CONFIG_SPDM_SOCKET', if_true: files('spdm-socket.c')) diff --git a/backends/rng-builtin.c b/backends/rng-builtin.c index f367eb665cf..41b7bfaa27f 100644 --- a/backends/rng-builtin.c +++ b/backends/rng-builtin.c @@ -6,11 +6,11 @@ */ #include "qemu/osdep.h" -#include "sysemu/rng.h" +#include "system/rng.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" #include "qom/object.h" -#include "sysemu/replay.h" +#include "system/replay.h" OBJECT_DECLARE_SIMPLE_TYPE(RngBuiltin, RNG_BUILTIN) @@ -55,7 +55,7 @@ static void rng_builtin_finalize(Object *obj) qemu_bh_delete(s->bh); } -static void rng_builtin_class_init(ObjectClass *klass, void *data) +static void rng_builtin_class_init(ObjectClass *klass, const void *data) { RngBackendClass *rbc = RNG_BACKEND_CLASS(klass); diff --git a/backends/rng-egd.c b/backends/rng-egd.c index 684c3cf3d61..9fd3393ede2 100644 --- a/backends/rng-egd.c +++ b/backends/rng-egd.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/rng.h" +#include "system/rng.h" #include "chardev/char-fe.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" @@ -143,7 +143,7 @@ static void rng_egd_finalize(Object *obj) g_free(s->chr_name); } -static void rng_egd_class_init(ObjectClass *klass, void *data) +static void rng_egd_class_init(ObjectClass *klass, const void *data) { RngBackendClass *rbc = RNG_BACKEND_CLASS(klass); diff --git a/backends/rng-random.c b/backends/rng-random.c index 489c0917f09..820bf48c9b4 100644 --- a/backends/rng-random.c +++ b/backends/rng-random.c @@ -11,8 +11,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/rng-random.h" -#include "sysemu/rng.h" +#include "system/rng-random.h" +#include "system/rng.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qemu/main-loop.h" @@ -121,7 +121,7 @@ static void rng_random_finalize(Object *obj) g_free(s->filename); } -static void rng_random_class_init(ObjectClass *klass, void *data) +static void rng_random_class_init(ObjectClass *klass, const void *data) { RngBackendClass *rbc = RNG_BACKEND_CLASS(klass); diff --git a/backends/rng.c b/backends/rng.c index 9bbd0c77b69..ab94dfea850 100644 --- a/backends/rng.c +++ b/backends/rng.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/rng.h" +#include "system/rng.h" #include "qapi/error.h" #include "qemu/module.h" #include "qom/object_interfaces.h" @@ -99,7 +99,7 @@ static void rng_backend_finalize(Object *obj) rng_backend_free_requests(s); } -static void rng_backend_class_init(ObjectClass *oc, void *data) +static void rng_backend_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -119,7 +119,7 @@ static const TypeInfo rng_backend_info = { .class_size = sizeof(RngBackendClass), .class_init = rng_backend_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/backends/spdm-socket.c b/backends/spdm-socket.c index d0663d696ca..2c709c68c87 100644 --- a/backends/spdm-socket.c +++ b/backends/spdm-socket.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/spdm-socket.h" +#include "system/spdm-socket.h" #include "qapi/error.h" static bool read_bytes(const int socket, uint8_t *buffer, diff --git a/backends/tpm/tpm_backend.c b/backends/tpm/tpm_backend.c index 485a20b9e09..8cf80043ac5 100644 --- a/backends/tpm/tpm_backend.c +++ b/backends/tpm/tpm_backend.c @@ -13,9 +13,9 @@ */ #include "qemu/osdep.h" -#include "sysemu/tpm_backend.h" +#include "system/tpm_backend.h" #include "qapi/error.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #include "qemu/thread.h" #include "qemu/main-loop.h" #include "qemu/module.h" diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index 5a8fba9bded..4a234ab2c0b 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -32,9 +32,9 @@ #include "qemu/sockets.h" #include "qemu/lockable.h" #include "io/channel-socket.h" -#include "sysemu/runstate.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" +#include "system/runstate.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" #include "tpm_int.h" #include "tpm_ioctl.h" #include "migration/blocker.h" @@ -72,7 +72,7 @@ struct TPMEmulator { CharBackend ctrl_chr; QIOChannel *data_ioc; TPMVersion tpm_version; - ptm_cap caps; /* capabilities of the TPM */ + uint32_t caps; /* capabilities of the TPM */ uint8_t cur_locty_number; /* last set locality */ Error *migration_blocker; @@ -123,15 +123,17 @@ static const char *tpm_emulator_strerror(uint32_t tpm_result) } static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg, - size_t msg_len_in, size_t msg_len_out) + size_t msg_len_in, size_t msg_len_out_err, + size_t msg_len_out_total) { CharBackend *dev = &tpm->ctrl_chr; uint32_t cmd_no = cpu_to_be32(cmd); ssize_t n = sizeof(uint32_t) + msg_len_in; - uint8_t *buf = NULL; + ptm_res res; WITH_QEMU_LOCK_GUARD(&tpm->mutex) { - buf = g_alloca(n); + g_autofree uint8_t *buf = g_malloc(n); + memcpy(buf, &cmd_no, sizeof(cmd_no)); memcpy(buf + sizeof(cmd_no), msg, msg_len_in); @@ -140,8 +142,25 @@ static int tpm_emulator_ctrlcmd(TPMEmulator *tpm, unsigned long cmd, void *msg, return -1; } - if (msg_len_out != 0) { - n = qemu_chr_fe_read_all(dev, msg, msg_len_out); + if (msg_len_out_total > 0) { + assert(msg_len_out_total >= msg_len_out_err); + + n = qemu_chr_fe_read_all(dev, (uint8_t *)msg, msg_len_out_err); + if (n <= 0) { + return -1; + } + if (msg_len_out_err == msg_len_out_total) { + return 0; + } + /* result error code is always in the first 4 bytes */ + assert(sizeof(res) <= msg_len_out_err); + memcpy(&res, msg, sizeof(res)); + if (res) { + return 0; + } + + n = qemu_chr_fe_read_all(dev, (uint8_t *)msg + msg_len_out_err, + msg_len_out_total - msg_len_out_err); if (n <= 0) { return -1; } @@ -204,7 +223,8 @@ static int tpm_emulator_set_locality(TPMEmulator *tpm_emu, uint8_t locty_number, memset(&loc, 0, sizeof(loc)); loc.u.req.loc = locty_number; if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_LOCALITY, &loc, - sizeof(loc), sizeof(loc)) < 0) { + sizeof(loc), sizeof(loc.u.resp.tpm_result), + sizeof(loc)) < 0) { error_setg(errp, "tpm-emulator: could not set locality : %s", strerror(errno)); return -1; @@ -239,13 +259,16 @@ static void tpm_emulator_handle_request(TPMBackend *tb, TPMBackendCmd *cmd, static int tpm_emulator_probe_caps(TPMEmulator *tpm_emu) { - if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_CAPABILITY, - &tpm_emu->caps, 0, sizeof(tpm_emu->caps)) < 0) { + ptm_cap_n cap_n; + + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_CAPABILITY, &cap_n, 0, + sizeof(cap_n.u.resp.tpm_result), + sizeof(cap_n)) < 0) { error_report("tpm-emulator: probing failed : %s", strerror(errno)); return -1; } - tpm_emu->caps = be64_to_cpu(tpm_emu->caps); + tpm_emu->caps = be32_to_cpu(cap_n.u.resp.caps); trace_tpm_emulator_probe_caps(tpm_emu->caps); @@ -254,7 +277,7 @@ static int tpm_emulator_probe_caps(TPMEmulator *tpm_emu) static int tpm_emulator_check_caps(TPMEmulator *tpm_emu) { - ptm_cap caps = 0; + uint32_t caps = 0; const char *tpm = NULL; /* check for min. required capabilities */ @@ -290,7 +313,8 @@ static int tpm_emulator_stop_tpm(TPMBackend *tb) TPMEmulator *tpm_emu = TPM_EMULATOR(tb); ptm_res res; - if (tpm_emulator_ctrlcmd(tpm_emu, CMD_STOP, &res, 0, sizeof(res)) < 0) { + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_STOP, &res, 0, + sizeof(ptm_res), sizeof(res)) < 0) { error_report("tpm-emulator: Could not stop TPM: %s", strerror(errno)); return -1; @@ -317,8 +341,9 @@ static int tpm_emulator_lock_storage(TPMEmulator *tpm_emu) /* give failing side 300 * 10ms time to release lock */ pls.u.req.retries = cpu_to_be32(300); - if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls, - sizeof(pls.u.req), sizeof(pls.u.resp)) < 0) { + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_LOCK_STORAGE, &pls, sizeof(pls.u.req), + sizeof(pls.u.resp.tpm_result), + sizeof(pls.u.resp)) < 0) { error_report("tpm-emulator: Could not lock storage within 3 seconds: " "%s", strerror(errno)); return -1; @@ -349,7 +374,8 @@ static int tpm_emulator_set_buffer_size(TPMBackend *tb, psbs.u.req.buffersize = cpu_to_be32(wanted_size); if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_BUFFERSIZE, &psbs, - sizeof(psbs.u.req), sizeof(psbs.u.resp)) < 0) { + sizeof(psbs.u.req), sizeof(psbs.u.resp.tpm_result), + sizeof(psbs.u.resp)) < 0) { error_report("tpm-emulator: Could not set buffer size: %s", strerror(errno)); return -1; @@ -396,6 +422,7 @@ static int tpm_emulator_startup_tpm_resume(TPMBackend *tb, size_t buffersize, } if (tpm_emulator_ctrlcmd(tpm_emu, CMD_INIT, &init, sizeof(init), + sizeof(init.u.resp.tpm_result), sizeof(init)) < 0) { error_report("tpm-emulator: could not send INIT: %s", strerror(errno)); @@ -437,8 +464,9 @@ static bool tpm_emulator_get_tpm_established_flag(TPMBackend *tb) return tpm_emu->established_flag; } - if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_TPMESTABLISHED, &est, - 0, sizeof(est)) < 0) { + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_TPMESTABLISHED, &est, 0, + sizeof(est) /* always returns resp.bit */, + sizeof(est)) < 0) { error_report("tpm-emulator: Could not get the TPM established flag: %s", strerror(errno)); return false; @@ -466,6 +494,7 @@ static int tpm_emulator_reset_tpm_established_flag(TPMBackend *tb, reset_est.u.req.loc = tpm_emu->cur_locty_number; if (tpm_emulator_ctrlcmd(tpm_emu, CMD_RESET_TPMESTABLISHED, &reset_est, sizeof(reset_est), + sizeof(reset_est.u.resp.tpm_result), sizeof(reset_est)) < 0) { error_report("tpm-emulator: Could not reset the establishment bit: %s", strerror(errno)); @@ -497,7 +526,7 @@ static void tpm_emulator_cancel_cmd(TPMBackend *tb) /* FIXME: make the function non-blocking, or it may block a VCPU */ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_CANCEL_TPM_CMD, &res, 0, - sizeof(res)) < 0) { + sizeof(ptm_res), sizeof(res)) < 0) { error_report("tpm-emulator: Could not cancel command: %s", strerror(errno)); } else if (res != 0) { @@ -527,8 +556,8 @@ static size_t tpm_emulator_get_buffer_size(TPMBackend *tb) static int tpm_emulator_block_migration(TPMEmulator *tpm_emu) { Error *err = NULL; - ptm_cap caps = PTM_CAP_GET_STATEBLOB | PTM_CAP_SET_STATEBLOB | - PTM_CAP_STOP; + uint32_t caps = PTM_CAP_GET_STATEBLOB | PTM_CAP_SET_STATEBLOB | + PTM_CAP_STOP; if (!TPM_EMULATOR_IMPLEMENTS_ALL_CAPS(tpm_emu, caps)) { error_setg(&tpm_emu->migration_blocker, @@ -557,7 +586,7 @@ static int tpm_emulator_prepare_data_fd(TPMEmulator *tpm_emu) qemu_chr_fe_set_msgfds(&tpm_emu->ctrl_chr, fds + 1, 1); if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_DATAFD, &res, 0, - sizeof(res)) < 0 || res != 0) { + sizeof(ptm_res), sizeof(res)) < 0 || res != 0) { error_report("tpm-emulator: Failed to send CMD_SET_DATAFD: %s", strerror(errno)); goto err_exit; @@ -704,6 +733,8 @@ static int tpm_emulator_get_state_blob(TPMEmulator *tpm_emu, if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_STATEBLOB, &pgs, sizeof(pgs.u.req), + /* always returns up to resp.data */ + offsetof(ptm_getstate, u.resp.data), offsetof(ptm_getstate, u.resp.data)) < 0) { error_report("tpm-emulator: could not get state blob type %d : %s", type, strerror(errno)); @@ -806,7 +837,7 @@ static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu, /* write the header only */ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_STATEBLOB, &pss, - offsetof(ptm_setstate, u.req.data), 0) < 0) { + offsetof(ptm_setstate, u.req.data), 0, 0) < 0) { error_report("tpm-emulator: could not set state blob type %d : %s", type, strerror(errno)); return -1; @@ -990,7 +1021,8 @@ static void tpm_emulator_shutdown(TPMEmulator *tpm_emu) return; } - if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SHUTDOWN, &res, 0, sizeof(res)) < 0) { + if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SHUTDOWN, &res, 0, + sizeof(ptm_res), sizeof(res)) < 0) { error_report("tpm-emulator: Could not cleanly shutdown the TPM: %s", strerror(errno)); } else if (res != 0) { @@ -1024,7 +1056,7 @@ static void tpm_emulator_inst_finalize(Object *obj) vmstate_unregister(NULL, &vmstate_tpm_emulator, obj); } -static void tpm_emulator_class_init(ObjectClass *klass, void *data) +static void tpm_emulator_class_init(ObjectClass *klass, const void *data) { TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass); diff --git a/backends/tpm/tpm_int.h b/backends/tpm/tpm_int.h index ba6109306e5..2319a1ce0c4 100644 --- a/backends/tpm/tpm_int.h +++ b/backends/tpm/tpm_int.h @@ -13,7 +13,7 @@ #define BACKENDS_TPM_INT_H #include "qemu/option.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #define TPM_STANDARD_CMDLINE_OPTS \ { \ diff --git a/backends/tpm/tpm_ioctl.h b/backends/tpm/tpm_ioctl.h index 1933ab68557..ee2dd15d35e 100644 --- a/backends/tpm/tpm_ioctl.h +++ b/backends/tpm/tpm_ioctl.h @@ -29,6 +29,16 @@ typedef uint32_t ptm_res; +/* PTM_GET_CAPABILITY: Get supported capabilities (ioctl's) */ +struct ptm_cap_n { + union { + struct { + ptm_res tpm_result; /* will always be TPM_SUCCESS (0) */ + uint32_t caps; + } resp; /* response */ + } u; +}; + /* PTM_GET_TPMESTABLISHED: get the establishment bit */ struct ptm_est { union { @@ -242,7 +252,8 @@ struct ptm_lockstorage { } u; }; -typedef uint64_t ptm_cap; +typedef uint64_t ptm_cap; /* CUSE-only; use ptm_cap_n otherwise */ +typedef struct ptm_cap_n ptm_cap_n; typedef struct ptm_est ptm_est; typedef struct ptm_reset_est ptm_reset_est; typedef struct ptm_loc ptm_loc; diff --git a/backends/tpm/tpm_passthrough.c b/backends/tpm/tpm_passthrough.c index 179697a3a94..b7c7074c2aa 100644 --- a/backends/tpm/tpm_passthrough.c +++ b/backends/tpm/tpm_passthrough.c @@ -26,8 +26,8 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/sockets.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" #include "tpm_int.h" #include "qapi/clone-visitor.h" #include "qapi/qapi-visit-tpm.h" @@ -364,7 +364,7 @@ static void tpm_passthrough_inst_finalize(Object *obj) qapi_free_TPMPassthroughOptions(tpm_pt->options); } -static void tpm_passthrough_class_init(ObjectClass *klass, void *data) +static void tpm_passthrough_class_init(ObjectClass *klass, const void *data) { TPMBackendClass *tbc = TPM_BACKEND_CLASS(klass); diff --git a/backends/tpm/tpm_util.c b/backends/tpm/tpm_util.c index cf138551df1..f2d1739e336 100644 --- a/backends/tpm/tpm_util.c +++ b/backends/tpm/tpm_util.c @@ -21,13 +21,14 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qemu/cutils.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "tpm_int.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-properties.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" #include "trace.h" /* tpm backend property */ @@ -46,7 +47,7 @@ static void get_tpm(Object *obj, Visitor *v, const char *name, void *opaque, static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; TPMBackend *s, **be = object_field_prop_ptr(obj, prop); char *str; @@ -66,7 +67,7 @@ static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque, static void release_tpm(Object *obj, const char *name, void *opaque) { - Property *prop = opaque; + const Property *prop = opaque; TPMBackend **be = object_field_prop_ptr(obj, prop); if (*be) { @@ -75,7 +76,7 @@ static void release_tpm(Object *obj, const char *name, void *opaque) } const PropertyInfo qdev_prop_tpm = { - .name = "str", + .type = "str", .description = "ID of a tpm to use as a backend", .get = get_tpm, .set = set_tpm, @@ -336,8 +337,8 @@ void tpm_sized_buffer_reset(TPMSizedBuffer *tsb) void tpm_util_show_buffer(const unsigned char *buffer, size_t buffer_size, const char *string) { - size_t len, i; - char *line_buffer, *p; + g_autoptr(GString) str = NULL; + size_t len, i, l; if (!trace_event_get_state_backends(TRACE_TPM_UTIL_SHOW_BUFFER_CONTENT)) { return; @@ -345,19 +346,14 @@ void tpm_util_show_buffer(const unsigned char *buffer, len = MIN(tpm_cmd_get_size(buffer), buffer_size); trace_tpm_util_show_buffer_header(string, len); - /* - * allocate enough room for 3 chars per buffer entry plus a - * newline after every 16 chars and a final null terminator. - */ - line_buffer = g_malloc(len * 3 + (len / 16) + 1); - - for (i = 0, p = line_buffer; i < len; i++) { - if (i && !(i % 16)) { - p += sprintf(p, "\n"); + for (i = 0; i < len; i += l) { + if (str) { + g_string_append_c(str, '\n'); } - p += sprintf(p, "%.2X ", buffer[i]); + l = MIN(len, 16); + str = qemu_hexdump_line(str, buffer, l, 1, 0); } - trace_tpm_util_show_buffer_content(line_buffer); - g_free(line_buffer); + g_string_ascii_up(str); + trace_tpm_util_show_buffer_content(str->str); } diff --git a/backends/tpm/trace-events b/backends/tpm/trace-events index cb5cfa65100..05e30533ce2 100644 --- a/backends/tpm/trace-events +++ b/backends/tpm/trace-events @@ -16,7 +16,7 @@ tpm_util_show_buffer_content(const char *buf) "%s" # tpm_emulator.c tpm_emulator_set_locality(uint8_t locty) "setting locality to %d" tpm_emulator_handle_request(void) "processing TPM command" -tpm_emulator_probe_caps(uint64_t caps) "capabilities: 0x%"PRIx64 +tpm_emulator_probe_caps(uint32_t caps) "capabilities: 0x%x" tpm_emulator_set_buffer_size(uint32_t buffersize, uint32_t minsize, uint32_t maxsize) "buffer size: %u, min: %u, max: %u" tpm_emulator_startup_tpm_resume(bool is_resume, size_t buffersize) "is_resume: %d, buffer size: %zu" tpm_emulator_get_tpm_established_flag(uint8_t flag) "got established flag: %d" diff --git a/backends/trace-events b/backends/trace-events index 40811a31621..d7480c7cdd7 100644 --- a/backends/trace-events +++ b/backends/trace-events @@ -7,10 +7,12 @@ dbus_vmstate_loading(const char *id) "id: %s" dbus_vmstate_saving(const char *id) "id: %s" # iommufd.c +iommufd_change_process(int fd, bool ret) "fd=%d (%d)" iommufd_backend_connect(int fd, bool owned, uint32_t users) "fd=%d owned=%d users=%d" iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d" iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d" iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)" +iommufd_backend_map_file_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int fd, unsigned long start, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" fd=%d start=%ld readonly=%d (%d)" iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas) " iommufd=%d ioas=%d" @@ -18,3 +20,9 @@ iommufd_backend_alloc_hwpt(int iommufd, uint32_t dev_id, uint32_t pt_id, uint32_ iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)" iommufd_backend_set_dirty(int iommufd, uint32_t hwpt_id, bool start, int ret) " iommufd=%d hwpt=%u enable=%d (%d)" iommufd_backend_get_dirty_bitmap(int iommufd, uint32_t hwpt_id, uint64_t iova, uint64_t size, uint64_t page_size, int ret) " iommufd=%d hwpt=%u iova=0x%"PRIx64" size=0x%"PRIx64" page_size=0x%"PRIx64" (%d)" +iommufd_backend_invalidate_cache(int iommufd, uint32_t id, uint32_t data_type, uint32_t entry_len, uint32_t entry_num, uint32_t done_num, uint64_t data_ptr, int ret) " iommufd=%d id=%u data_type=%u entry_len=%u entry_num=%u done_num=%u data_ptr=0x%"PRIx64" (%d)" +iommufd_backend_alloc_viommu(int iommufd, uint32_t dev_id, uint32_t type, uint32_t hwpt_id, uint32_t viommu_id, int ret) " iommufd=%d type=%u dev_id=%u hwpt_id=%u viommu_id=%u (%d)" +iommufd_backend_alloc_vdev(int iommufd, uint32_t dev_id, uint32_t viommu_id, uint64_t virt_id, uint32_t vdev_id, int ret) " iommufd=%d dev_id=%u viommu_id=%u virt_id=0x%"PRIx64" vdev_id=%u (%d)" +iommufd_backend_alloc_veventq(int iommufd, uint32_t viommu_id, uint32_t type, uint32_t veventq_id, uint32_t veventq_fd, int ret) " iommufd=%d viommu_id=%u type=%u veventq_id=%u veventq_fd=%u (%d)" +iommufd_backend_alloc_hw_queue(int iommufd, uint32_t viommu_id, uint32_t vqueue_type, uint32_t index, uint64_t addr, uint64_t size, uint32_t vqueue_id, int ret) " iommufd=%d viommu_id=%u vqueue_type=%u index=%u addr=0x%"PRIx64" size=0x%"PRIx64" vqueue_id=%u (%d)" +iommufd_backend_viommu_mmap(int iommufd, uint32_t viommu_id, uint64_t size, uint64_t offset) " iommufd=%d viommu_id=%u size=0x%"PRIx64" offset=0x%"PRIx64 diff --git a/backends/vhost-user.c b/backends/vhost-user.c index 94c6a82d526..42845329e72 100644 --- a/backends/vhost-user.c +++ b/backends/vhost-user.c @@ -15,8 +15,8 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qom/object_interfaces.h" -#include "sysemu/vhost-user-backend.h" -#include "sysemu/kvm.h" +#include "system/vhost-user-backend.h" +#include "system/kvm.h" #include "io/channel-command.h" #include "hw/virtio/virtio-bus.h" @@ -97,30 +97,28 @@ vhost_user_backend_start(VhostUserBackend *b) vhost_dev_disable_notifiers(&b->dev, b->vdev); } -void +int vhost_user_backend_stop(VhostUserBackend *b) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(b->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - int ret = 0; + int ret; if (!b->started) { - return; + return 0; } - vhost_dev_stop(&b->dev, b->vdev, true); + ret = vhost_dev_stop(&b->dev, b->vdev, true); - if (k->set_guest_notifiers) { - ret = k->set_guest_notifiers(qbus->parent, - b->dev.nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); - } + if (k->set_guest_notifiers && + k->set_guest_notifiers(qbus->parent, b->dev.nvqs, false) < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return -1; } - assert(ret >= 0); vhost_dev_disable_notifiers(&b->dev, b->vdev); b->started = false; + return ret; } static void set_chardev(Object *obj, const char *value, Error **errp) @@ -163,7 +161,7 @@ static char *get_chardev(Object *obj, Error **errp) return NULL; } -static void vhost_user_backend_class_init(ObjectClass *oc, void *data) +static void vhost_user_backend_class_init(ObjectClass *oc, const void *data) { object_class_property_add_str(oc, "chardev", get_chardev, set_chardev); } diff --git a/block.c b/block.c index c317de9eaa6..8848e9a7ed6 100644 --- a/block.c +++ b/block.c @@ -36,13 +36,13 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qnull.h" +#include "qobject/qstring.h" #include "qapi/qobject-output-visitor.h" #include "qapi/qapi-visit-block-core.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/notify.h" #include "qemu/option.h" #include "qemu/coroutine.h" @@ -106,9 +106,9 @@ static void bdrv_reopen_abort(BDRVReopenState *reopen_state); static bool bdrv_backing_overridden(BlockDriverState *bs); -static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); +static bool GRAPH_RDLOCK +bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, Error **errp); /* If non-zero, use only whitelisted block drivers */ static int use_bdrv_whitelist; @@ -431,7 +431,7 @@ BlockDriverState *bdrv_new(void) bs->block_status_cache = g_new0(BdrvBlockStatusCache, 1); for (i = 0; i < bdrv_drain_all_count; i++) { - bdrv_drained_begin(bs); + bdrv_do_drained_begin_quiesce(bs, NULL); } QTAILQ_INSERT_TAIL(&all_bdrv_states, bs, bs_list); @@ -1226,9 +1226,10 @@ static int bdrv_child_cb_inactivate(BdrvChild *child) return 0; } -static bool bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +bdrv_child_cb_change_aio_ctx(BdrvChild *child, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { BlockDriverState *bs = child->opaque; return bdrv_change_aio_context(bs, ctx, visited, tran, errp); @@ -1573,6 +1574,10 @@ static void update_flags_from_options(int *flags, QemuOpts *opts) if (qemu_opt_get_bool_del(opts, BDRV_OPT_AUTO_READ_ONLY, false)) { *flags |= BDRV_O_AUTO_RDONLY; } + + if (!qemu_opt_get_bool_del(opts, BDRV_OPT_ACTIVE, true)) { + *flags |= BDRV_O_INACTIVE; + } } static void update_options_from_flags(QDict *options, int flags) @@ -1716,7 +1721,7 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, open_failed: bs->drv = NULL; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); assert(!bs->file); @@ -1799,6 +1804,11 @@ QemuOptsList bdrv_runtime_opts = { .type = QEMU_OPT_BOOL, .help = "Ignore flush requests", }, + { + .name = BDRV_OPT_ACTIVE, + .type = QEMU_OPT_BOOL, + .help = "Node is activated", + }, { .name = BDRV_OPT_READ_ONLY, .type = QEMU_OPT_BOOL, @@ -3018,7 +3028,8 @@ static void GRAPH_WRLOCK bdrv_attach_child_common_abort(void *opaque) bdrv_replace_child_noperm(s->child, NULL); if (bdrv_get_aio_context(bs) != s->old_child_ctx) { - bdrv_try_change_aio_context(bs, s->old_child_ctx, NULL, &error_abort); + bdrv_try_change_aio_context_locked(bs, s->old_child_ctx, NULL, + &error_abort); } if (bdrv_child_get_parent_aio_context(s->child) != s->old_parent_ctx) { @@ -3060,6 +3071,9 @@ static TransactionActionDrv bdrv_attach_child_common_drv = { * * Both @parent_bs and @child_bs can move to a different AioContext in this * function. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_common(BlockDriverState *child_bs, @@ -3077,6 +3091,13 @@ bdrv_attach_child_common(BlockDriverState *child_bs, assert(child_class->get_parent_desc); GLOBAL_STATE_CODE(); + if (bdrv_is_inactive(child_bs) && (perm & ~BLK_PERM_CONSISTENT_READ)) { + g_autofree char *perm_names = bdrv_perm_names(perm); + error_setg(errp, "Permission '%s' unavailable on inactive node", + perm_names); + return NULL; + } + new_child = g_new(BdrvChild, 1); *new_child = (BdrvChild) { .bs = NULL, @@ -3096,8 +3117,8 @@ bdrv_attach_child_common(BlockDriverState *child_bs, parent_ctx = bdrv_child_get_parent_aio_context(new_child); if (child_ctx != parent_ctx) { Error *local_err = NULL; - int ret = bdrv_try_change_aio_context(child_bs, parent_ctx, NULL, - &local_err); + int ret = bdrv_try_change_aio_context_locked(child_bs, parent_ctx, NULL, + &local_err); if (ret < 0 && child_class->change_aio_ctx) { Transaction *aio_ctx_tran = tran_new(); @@ -3137,7 +3158,7 @@ bdrv_attach_child_common(BlockDriverState *child_bs, * stop new requests from coming in. This is fine, we don't care about the * old requests here, they are not for this child. If another place enters a * drain section for the same parent, but wants it to be fully quiesced, it - * will not run most of the the code in .drained_begin() again (which is not + * will not run most of the code in .drained_begin() again (which is not * a problem, we already did this), but it will still poll until the parent * is fully quiesced, so it will not be negatively affected either. */ @@ -3163,6 +3184,9 @@ bdrv_attach_child_common(BlockDriverState *child_bs, * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_noperm(BlockDriverState *parent_bs, @@ -3183,6 +3207,11 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, child_bs->node_name, child_name, parent_bs->node_name); return NULL; } + if (bdrv_is_inactive(child_bs) && !bdrv_is_inactive(parent_bs)) { + error_setg(errp, "Inactive '%s' can't be a %s child of active '%s'", + child_bs->node_name, child_name, parent_bs->node_name); + return NULL; + } bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, @@ -3199,6 +3228,8 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. + * + * All block nodes must be drained. */ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, @@ -3238,6 +3269,8 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. + * + * All block nodes must be drained. */ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, @@ -3272,7 +3305,11 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, return ret < 0 ? NULL : child; } -/* Callers must ensure that child->frozen is false. */ +/* + * Callers must ensure that child->frozen is false. + * + * All block nodes must be drained. + */ void bdrv_root_unref_child(BdrvChild *child) { BlockDriverState *child_bs = child->bs; @@ -3293,8 +3330,8 @@ void bdrv_root_unref_child(BdrvChild *child) * When the parent requiring a non-default AioContext is removed, the * node moves back to the main AioContext */ - bdrv_try_change_aio_context(child_bs, qemu_get_aio_context(), NULL, - NULL); + bdrv_try_change_aio_context_locked(child_bs, qemu_get_aio_context(), + NULL, NULL); } bdrv_schedule_unref(child_bs); @@ -3367,7 +3404,11 @@ bdrv_unset_inherits_from(BlockDriverState *root, BdrvChild *child, } } -/* Callers must ensure that child->frozen is false. */ +/* + * Callers must ensure that child->frozen is false. + * + * All block nodes must be drained. + */ void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child) { GLOBAL_STATE_CODE(); @@ -3432,6 +3473,9 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_WRLOCK bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, @@ -3524,12 +3568,10 @@ bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, * Both @bs and @backing_hd can move to a different AioContext in this * function. * - * If a backing child is already present (i.e. we're detaching a node), that - * child node must be drained. + * All block nodes must be drained. */ -int bdrv_set_backing_hd_drained(BlockDriverState *bs, - BlockDriverState *backing_hd, - Error **errp) +int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, + Error **errp) { int ret; Transaction *tran = tran_new(); @@ -3551,28 +3593,6 @@ int bdrv_set_backing_hd_drained(BlockDriverState *bs, return ret; } -int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, - Error **errp) -{ - BlockDriverState *drain_bs; - int ret; - GLOBAL_STATE_CODE(); - - bdrv_graph_rdlock_main_loop(); - drain_bs = bs->backing ? bs->backing->bs : bs; - bdrv_graph_rdunlock_main_loop(); - - bdrv_ref(drain_bs); - bdrv_drained_begin(drain_bs); - bdrv_graph_wrlock(); - ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); - bdrv_graph_wrunlock(); - bdrv_drained_end(drain_bs); - bdrv_unref(drain_bs); - - return ret; -} - /* * Opens the backing file for a BlockDriverState if not yet open * @@ -3598,7 +3618,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, Error *local_err = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_graph_rdlock_main_loop(); if (bs->backing != NULL) { goto free_exit; @@ -3679,7 +3700,11 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, /* Hook up the backing file link; drop our reference, bs owns the * backing_hd reference now */ + bdrv_graph_rdunlock_main_loop(); + bdrv_graph_wrlock_drained(); ret = bdrv_set_backing_hd(bs, backing_hd, errp); + bdrv_graph_wrunlock(); + bdrv_graph_rdlock_main_loop(); bdrv_unref(backing_hd); if (ret < 0) { @@ -3691,6 +3716,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, free_exit: g_free(backing_filename); qobject_unref(tmp_parent_options); + bdrv_graph_rdunlock_main_loop(); return ret; } @@ -3740,13 +3766,12 @@ bdrv_open_child_bs(const char *filename, QDict *options, const char *bdref_key, return bs; } -static BdrvChild *bdrv_open_child_common(const char *filename, - QDict *options, const char *bdref_key, - BlockDriverState *parent, - const BdrvChildClass *child_class, - BdrvChildRole child_role, - bool allow_none, bool parse_filename, - Error **errp) +static BdrvChild * GRAPH_UNLOCKED +bdrv_open_child_common(const char *filename, QDict *options, + const char *bdref_key, BlockDriverState *parent, + const BdrvChildClass *child_class, + BdrvChildRole child_role, bool allow_none, + bool parse_filename, Error **errp) { BlockDriverState *bs; BdrvChild *child; @@ -3759,7 +3784,7 @@ static BdrvChild *bdrv_open_child_common(const char *filename, return NULL; } - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, errp); bdrv_graph_wrunlock(); @@ -4337,9 +4362,7 @@ bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child) * returns a pointer to bs_queue, which is either the newly allocated * bs_queue, or the existing bs_queue being used. * - * bs is drained here and undrained by bdrv_reopen_queue_free(). - * - * To be called with bs->aio_context locked. + * bs must be drained. */ static BlockReopenQueue * GRAPH_RDLOCK bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, @@ -4358,12 +4381,7 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, GLOBAL_STATE_CODE(); - /* - * Strictly speaking, draining is illegal under GRAPH_RDLOCK. We know that - * we've been called with bdrv_graph_rdlock_main_loop(), though, so it's ok - * in practice. - */ - bdrv_drained_begin(bs); + assert(bs->quiesce_counter > 0); if (bs_queue == NULL) { bs_queue = g_new0(BlockReopenQueue, 1); @@ -4498,12 +4516,17 @@ bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs, return bs_queue; } -/* To be called with bs->aio_context locked */ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, QDict *options, bool keep_old_opts) { GLOBAL_STATE_CODE(); + + if (bs_queue == NULL) { + /* Paired with bdrv_drain_all_end() in bdrv_reopen_queue_free(). */ + bdrv_drain_all_begin(); + } + GRAPH_RDLOCK_GUARD_MAINLOOP(); return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false, @@ -4516,12 +4539,14 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { - bdrv_drained_end(bs_entry->state.bs); qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); g_free(bs_entry); } g_free(bs_queue); + + /* Paired with bdrv_drain_all_begin() in bdrv_reopen_queue(). */ + bdrv_drain_all_end(); } } @@ -4688,6 +4713,9 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, * Return 0 on success, otherwise return < 0 and set @errp. * * @reopen_state->bs can move to a different AioContext in this function. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_UNLOCKED bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, @@ -4781,7 +4809,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, if (old_child_bs) { bdrv_ref(old_child_bs); - bdrv_drained_begin(old_child_bs); + assert(old_child_bs->quiesce_counter > 0); } bdrv_graph_rdunlock_main_loop(); @@ -4793,7 +4821,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, bdrv_graph_wrunlock(); if (old_child_bs) { - bdrv_drained_end(old_child_bs); bdrv_unref(old_child_bs); } @@ -4822,6 +4849,9 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, * * After calling this function, the transaction @change_child_tran may only be * completed while holding a writer lock for the graph. + * + * All block nodes must be drained before this function is called until after + * the transaction is finalized. */ static int GRAPH_UNLOCKED bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, @@ -5115,7 +5145,7 @@ static void GRAPH_UNLOCKED bdrv_reopen_abort(BDRVReopenState *reopen_state) } -static void bdrv_close(BlockDriverState *bs) +static void GRAPH_UNLOCKED bdrv_close(BlockDriverState *bs) { BdrvAioNotifier *ban, *ban_next; BdrvChild *child, *next; @@ -5135,7 +5165,7 @@ static void bdrv_close(BlockDriverState *bs) bs->drv = NULL; } - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); QLIST_FOREACH_SAFE(child, &bs->children, next, next) { bdrv_unref_child(bs, child); } @@ -5468,10 +5498,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, assert(!bs_new->backing); bdrv_graph_rdunlock_main_loop(); - bdrv_drained_begin(bs_top); - bdrv_drained_begin(bs_new); - - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); child = bdrv_attach_child_noperm(bs_new, bs_top, "backing", &child_of_bds, bdrv_backing_role(bs_new), @@ -5493,9 +5520,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, bdrv_refresh_limits(bs_top, NULL, NULL); bdrv_graph_wrunlock(); - bdrv_drained_end(bs_top); - bdrv_drained_end(bs_new); - return ret; } @@ -6351,7 +6375,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) if (!*name) { name = allocated_name = blk_get_attached_dev_id(blk); } - xdbg_graph_add_node(gr, blk, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_BACKEND, + xdbg_graph_add_node(gr, blk, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_BACKEND, name); g_free(allocated_name); if (blk_root(blk)) { @@ -6364,7 +6388,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) job = block_job_next_locked(job)) { GSList *el; - xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, + xdbg_graph_add_node(gr, job, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB, job->job.id); for (el = job->nodes; el; el = el->next) { xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data); @@ -6373,7 +6397,7 @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp) } QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { - xdbg_graph_add_node(gr, bs, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_DRIVER, + xdbg_graph_add_node(gr, bs, XDBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_DRIVER, bs->node_name); QLIST_FOREACH(child, &bs->children, next) { xdbg_graph_add_edge(gr, bs, child); @@ -6824,6 +6848,10 @@ void bdrv_init_with_whitelist(void) bdrv_init(); } +bool bdrv_is_inactive(BlockDriverState *bs) { + return bs->open_flags & BDRV_O_INACTIVE; +} + int bdrv_activate(BlockDriverState *bs, Error **errp) { BdrvChild *child, *parent; @@ -6955,7 +6983,8 @@ bdrv_has_bds_parent(BlockDriverState *bs, bool only_active) return false; } -static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) +static int GRAPH_RDLOCK +bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) { BdrvChild *child, *parent; int ret; @@ -6963,6 +6992,8 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) GLOBAL_STATE_CODE(); + assert(bs->quiesce_counter > 0); + if (!bs->drv) { return -ENOMEDIUM; } @@ -6973,7 +7004,14 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) return 0; } - assert(!(bs->open_flags & BDRV_O_INACTIVE)); + /* + * Inactivating an already inactive node on user request is harmless, but if + * a child is already inactive before its parent, that's bad. + */ + if (bs->open_flags & BDRV_O_INACTIVE) { + assert(top_level); + return 0; + } /* Inactivate this node */ if (bs->drv->bdrv_inactivate) { @@ -7010,7 +7048,7 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) /* Recursively inactivate children */ QLIST_FOREACH(child, &bs->children, next) { - ret = bdrv_inactivate_recurse(child->bs); + ret = bdrv_inactivate_recurse(child->bs, false); if (ret < 0) { return ret; } @@ -7019,6 +7057,27 @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) return 0; } +/* All block nodes must be drained. */ +int bdrv_inactivate(BlockDriverState *bs, Error **errp) +{ + int ret; + + GLOBAL_STATE_CODE(); + + if (bdrv_has_bds_parent(bs, true)) { + error_setg(errp, "Node has active parent node"); + return -EPERM; + } + + ret = bdrv_inactivate_recurse(bs, true); + if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to inactivate node"); + return ret; + } + + return 0; +} + int bdrv_inactivate_all(void) { BlockDriverState *bs = NULL; @@ -7026,7 +7085,9 @@ int bdrv_inactivate_all(void) int ret = 0; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { /* Nodes with BDS parents are covered by recursion from the last @@ -7035,13 +7096,16 @@ int bdrv_inactivate_all(void) if (bdrv_has_bds_parent(bs, false)) { continue; } - ret = bdrv_inactivate_recurse(bs); + ret = bdrv_inactivate_recurse(bs, true); if (ret < 0) { bdrv_next_cleanup(&it); break; } } + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + return ret; } @@ -7222,10 +7286,6 @@ bool bdrv_op_blocker_is_empty(BlockDriverState *bs) return true; } -/* - * Must not be called while holding the lock of an AioContext other than the - * current one. - */ void bdrv_img_create(const char *filename, const char *fmt, const char *base_filename, const char *base_fmt, char *options, uint64_t img_size, int flags, bool quiet, @@ -7512,10 +7572,21 @@ typedef struct BdrvStateSetAioContext { BlockDriverState *bs; } BdrvStateSetAioContext; -static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, - GHashTable *visited, - Transaction *tran, - Error **errp) +/* + * Changes the AioContext of @child to @ctx and recursively for the associated + * block nodes and all their children and parents. Returns true if the change is + * possible and the transaction @tran can be continued. Returns false and sets + * @errp if not and the transaction must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ +static bool GRAPH_RDLOCK +bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp) { GLOBAL_STATE_CODE(); if (g_hash_table_contains(visited, c)) { @@ -7540,6 +7611,17 @@ static bool bdrv_parent_change_aio_context(BdrvChild *c, AioContext *ctx, return true; } +/* + * Changes the AioContext of @c->bs to @ctx and recursively for all its children + * and parents. Returns true if the change is possible and the transaction @tran + * can be continued. Returns false and sets @errp if not and the transaction + * must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, GHashTable *visited, Transaction *tran, Error **errp) @@ -7555,10 +7637,6 @@ bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, static void bdrv_set_aio_context_clean(void *opaque) { BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; - BlockDriverState *bs = (BlockDriverState *) state->bs; - - /* Paired with bdrv_drained_begin in bdrv_change_aio_context() */ - bdrv_drained_end(bs); g_free(state); } @@ -7586,10 +7664,12 @@ static TransactionActionDrv set_aio_context = { * * @visited will accumulate all visited BdrvChild objects. The caller is * responsible for freeing the list afterwards. + * + * @bs must be drained. */ -static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, + GHashTable *visited, Transaction *tran, Error **errp) { BdrvChild *c; BdrvStateSetAioContext *state; @@ -7600,21 +7680,17 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, return true; } - bdrv_graph_rdlock_main_loop(); QLIST_FOREACH(c, &bs->parents, next_parent) { if (!bdrv_parent_change_aio_context(c, ctx, visited, tran, errp)) { - bdrv_graph_rdunlock_main_loop(); return false; } } QLIST_FOREACH(c, &bs->children, next) { if (!bdrv_child_change_aio_context(c, ctx, visited, tran, errp)) { - bdrv_graph_rdunlock_main_loop(); return false; } } - bdrv_graph_rdunlock_main_loop(); state = g_new(BdrvStateSetAioContext, 1); *state = (BdrvStateSetAioContext) { @@ -7622,8 +7698,7 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, .bs = bs, }; - /* Paired with bdrv_drained_end in bdrv_set_aio_context_clean() */ - bdrv_drained_begin(bs); + assert(bs->quiesce_counter > 0); tran_add(tran, &set_aio_context, state); @@ -7636,9 +7711,13 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, * * If ignore_child is not NULL, that child (and its subgraph) will not * be touched. + * + * Called with the graph lock held. + * + * Called while all bs are drained. */ -int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp) +int bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp) { Transaction *tran; GHashTable *visited; @@ -7647,9 +7726,9 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, /* * Recursion phase: go through all nodes of the graph. - * Take care of checking that all nodes support changing AioContext - * and drain them, building a linear list of callbacks to run if everything - * is successful (the transaction itself). + * Take care of checking that all nodes support changing AioContext, + * building a linear list of callbacks to run if everything is successful + * (the transaction itself). */ tran = tran_new(); visited = g_hash_table_new(NULL, NULL); @@ -7676,6 +7755,29 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, return 0; } +/* + * Change bs's and recursively all of its parents' and children's AioContext + * to the given new context, returning an error if that isn't possible. + * + * If ignore_child is not NULL, that child (and its subgraph) will not + * be touched. + */ +int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp) +{ + int ret; + + GLOBAL_STATE_CODE(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + ret = bdrv_try_change_aio_context_locked(bs, ctx, ignore_child, errp); + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + + return ret; +} + void bdrv_add_aio_context_notifier(BlockDriverState *bs, void (*attached_aio_context)(AioContext *new_context, void *opaque), void (*detach_aio_context)(void *opaque), void *opaque) @@ -8103,8 +8205,10 @@ char *bdrv_dirname(BlockDriverState *bs, Error **errp) } /* - * Hot add/remove a BDS's child. So the user can take a child offline when - * it is broken and take a new child online + * Hot add a BDS's child. Used in combination with bdrv_del_child, so the user + * can take a child offline when it is broken and take a new child online. + * + * All block nodes must be drained. */ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, Error **errp) @@ -8144,6 +8248,12 @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, parent_bs->drv->bdrv_add_child(parent_bs, child_bs, errp); } +/* + * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the + * user can take a child offline when it is broken and take a new child online. + * + * All block nodes must be drained. + */ void bdrv_del_child(BlockDriverState *parent_bs, BdrvChild *child, Error **errp) { BdrvChild *tmp; diff --git a/block/accounting.c b/block/accounting.c index 2829745377a..3e46159569e 100644 --- a/block/accounting.c +++ b/block/accounting.c @@ -27,7 +27,7 @@ #include "block/accounting.h" #include "block/block_int.h" #include "qemu/timer.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" static QEMUClockType clock_type = QEMU_CLOCK_REALTIME; static const int qtest_latency_ns = NANOSECONDS_PER_SECOND / 1000; diff --git a/block/aio_task.c b/block/aio_task.c index 9bd17ea2c13..bb5c05f455d 100644 --- a/block/aio_task.c +++ b/block/aio_task.c @@ -119,8 +119,3 @@ int aio_task_pool_status(AioTaskPool *pool) return pool->status; } - -bool aio_task_pool_empty(AioTaskPool *pool) -{ - return pool->busy_tasks == 0; -} diff --git a/block/backup.c b/block/backup.c index 3dd2e229d2e..d4713fa1cd4 100644 --- a/block/backup.c +++ b/block/backup.c @@ -23,7 +23,7 @@ #include "block/dirty-bitmap.h" #include "qapi/error.h" #include "qemu/cutils.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/bitmap.h" #include "qemu/error-report.h" @@ -361,6 +361,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, BackupPerf *perf, BlockdevOnError on_source_error, BlockdevOnError on_target_error, + OnCbwError on_cbw_error, int creation_flags, BlockCompletionFunc *cb, void *opaque, JobTxn *txn, Error **errp) @@ -458,7 +459,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, } cbw = bdrv_cbw_append(bs, target, filter_node_name, discard_source, - &bcs, errp); + perf->min_cluster_size, &bcs, on_cbw_error, errp); if (!cbw) { goto error; } @@ -497,7 +498,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, block_copy_set_speed(bcs, speed); /* Required permissions are taken by copy-before-write filter target */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, &error_abort); bdrv_graph_wrunlock(); diff --git a/block/blkdebug.c b/block/blkdebug.c index c95c818c388..c54aee0c84b 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -33,11 +33,11 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qapi/qapi-visit-block-core.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" /* All APIs are thread-safe */ @@ -751,9 +751,9 @@ blkdebug_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) } static int coroutine_fn GRAPH_RDLOCK -blkdebug_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, int64_t *map, - BlockDriverState **file) +blkdebug_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file) { int err; diff --git a/block/blkio.c b/block/blkio.c index 3d9a2e764c3..41426739847 100644 --- a/block/blkio.c +++ b/block/blkio.c @@ -11,15 +11,15 @@ #include "qemu/osdep.h" #include #include "block/block_int.h" -#include "exec/memory.h" +#include "system/memory.h" #include "exec/cpu-common.h" /* for qemu_ram_get_fd() */ #include "qemu/defer-call.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/module.h" -#include "sysemu/block-backend.h" -#include "exec/memory.h" /* for ram_block_discard_disable() */ +#include "system/block-backend.h" +#include "system/memory.h" /* for ram_block_discard_disable() */ #include "block/block-io.h" @@ -899,8 +899,10 @@ static int blkio_open(BlockDriverState *bs, QDict *options, int flags, } bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF; - bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | - BDRV_REQ_NO_FALLBACK; + bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; +#ifdef CONFIG_BLKIO_WRITE_ZEROS_FUA + bs->supported_zero_flags |= BDRV_REQ_FUA; +#endif qemu_mutex_init(&s->blkio_lock); qemu_co_mutex_init(&s->bounce_lock); diff --git a/block/blklogwrites.c b/block/blklogwrites.c index ed38a93f21b..aa1f8888690 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -14,8 +14,8 @@ #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ #include "block/block-io.h" #include "block/block_int.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/option.h" @@ -281,7 +281,7 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, ret = 0; fail_log: if (ret < 0) { - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, s->log_file); bdrv_graph_wrunlock(); s->log_file = NULL; @@ -296,7 +296,7 @@ static void blk_log_writes_close(BlockDriverState *bs) { BDRVBlkLogWritesState *s = bs->opaque; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, s->log_file); s->log_file = NULL; bdrv_graph_wrunlock(); diff --git a/block/blkreplay.c b/block/blkreplay.c index 792d980aa9d..16d8b12dd99 100644 --- a/block/blkreplay.c +++ b/block/blkreplay.c @@ -13,7 +13,7 @@ #include "qemu/module.h" #include "block/block-io.h" #include "block/block_int.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qapi/error.h" typedef struct Request { diff --git a/block/blkverify.c b/block/blkverify.c index 5a9bf674d9c..72efcbe7ef6 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -12,8 +12,8 @@ #include "qemu/sockets.h" /* for EINPROGRESS on Windows */ #include "block/block-io.h" #include "block/block_int.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/option.h" @@ -151,7 +151,7 @@ static void blkverify_close(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, s->test_file); s->test_file = NULL; bdrv_graph_wrunlock(); diff --git a/block/block-backend.c b/block/block-backend.c index db6f9b92a39..f8d6ba65c11 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -11,15 +11,15 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/block_int.h" #include "block/blockjob.h" #include "block/coroutines.h" #include "block/throttle-groups.h" #include "hw/qdev-core.h" -#include "sysemu/blockdev.h" -#include "sysemu/runstate.h" -#include "sysemu/replay.h" +#include "system/blockdev.h" +#include "system/runstate.h" +#include "system/replay.h" #include "qapi/error.h" #include "qapi/qapi-events-block.h" #include "qemu/id.h" @@ -136,9 +136,9 @@ static void blk_root_drained_end(BdrvChild *child); static void blk_root_change_media(BdrvChild *child, bool load); static void blk_root_resize(BdrvChild *child); -static bool blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); +static bool GRAPH_RDLOCK +blk_root_change_aio_ctx(BdrvChild *child, AioContext *ctx, GHashTable *visited, + Transaction *tran, Error **errp); static char *blk_root_get_parent_desc(BdrvChild *child) { @@ -253,7 +253,7 @@ static bool blk_can_inactivate(BlockBackend *blk) * guest. For block job BBs that satisfy this, we can just allow * it. This is the case for mirror job source, which is required * by libvirt non-shared block migration. */ - if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { + if (!(blk->perm & ~BLK_PERM_CONSISTENT_READ)) { return true; } @@ -853,15 +853,6 @@ BlockBackendPublic *blk_get_public(BlockBackend *blk) return &blk->public; } -/* - * Returns a BlockBackend given the associated @public fields. - */ -BlockBackend *blk_by_public(BlockBackendPublic *public) -{ - GLOBAL_STATE_CODE(); - return container_of(public, BlockBackend, public); -} - /* * Disassociates the currently associated BlockDriverState from @blk. */ @@ -898,7 +889,7 @@ void blk_remove_bs(BlockBackend *blk) root = blk->root; blk->root = NULL; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_root_unref_child(root); bdrv_graph_wrunlock(); } @@ -909,14 +900,24 @@ void blk_remove_bs(BlockBackend *blk) int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; + uint64_t perm, shared_perm; GLOBAL_STATE_CODE(); bdrv_ref(bs); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); + + if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) { + blk->disable_perm = true; + perm = 0; + shared_perm = BLK_PERM_ALL; + } else { + perm = blk->perm; + shared_perm = blk->shared_perm; + } + blk->root = bdrv_root_attach_child(bs, "root", &child_root, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - blk->perm, blk->shared_perm, - blk, errp); + perm, shared_perm, blk, errp); bdrv_graph_wrunlock(); if (blk->root == NULL) { return -EPERM; @@ -1028,22 +1029,38 @@ DeviceState *blk_get_attached_dev(BlockBackend *blk) return blk->dev; } -/* Return the qdev ID, or if no ID is assigned the QOM path, of the block - * device attached to the BlockBackend. */ -char *blk_get_attached_dev_id(BlockBackend *blk) +/* + * The caller is responsible for releasing the value returned + * with g_free() after use. + */ +static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id) { DeviceState *dev = blk->dev; IO_CODE(); if (!dev) { return g_strdup(""); - } else if (dev->id) { + } else if (want_id && dev->id) { return g_strdup(dev->id); } return object_get_canonical_path(OBJECT(dev)) ?: g_strdup(""); } +char *blk_get_attached_dev_id(BlockBackend *blk) +{ + return blk_get_attached_dev_id_or_path(blk, true); +} + +/* + * The caller is responsible for releasing the value returned + * with g_free() after use. + */ +static char *blk_get_attached_dev_path(BlockBackend *blk) +{ + return blk_get_attached_dev_id_or_path(blk, false); +} + /* * Return the BlockBackend which has the device model @dev attached if it * exists, else null. @@ -1214,12 +1231,6 @@ BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk) return blk->iostatus; } -void blk_iostatus_disable(BlockBackend *blk) -{ - GLOBAL_STATE_CODE(); - blk->iostatus_enabled = false; -} - void blk_iostatus_reset(BlockBackend *blk) { GLOBAL_STATE_CODE(); @@ -2137,9 +2148,10 @@ static void send_qmp_error_event(BlockBackend *blk, { IoOperationType optype; BlockDriverState *bs = blk_bs(blk); + g_autofree char *path = blk_get_attached_dev_path(blk); optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; - qapi_event_send_block_io_error(blk_name(blk), + qapi_event_send_block_io_error(path, blk_name(blk), bs ? bdrv_get_node_name(bs) : NULL, optype, action, blk_iostatus_is_enabled(blk), error == ENOSPC, strerror(error)); @@ -2228,28 +2240,6 @@ void blk_set_enable_write_cache(BlockBackend *blk, bool wce) blk->enable_write_cache = wce; } -void blk_activate(BlockBackend *blk, Error **errp) -{ - BlockDriverState *bs = blk_bs(blk); - GLOBAL_STATE_CODE(); - - if (!bs) { - error_setg(errp, "Device '%s' has no medium", blk->name); - return; - } - - /* - * Migration code can call this function in coroutine context, so leave - * coroutine context if necessary. - */ - if (qemu_in_coroutine()) { - bdrv_co_activate(bs, errp); - } else { - GRAPH_RDLOCK_GUARD_MAINLOOP(); - bdrv_activate(bs, errp); - } -} - bool coroutine_fn blk_co_is_inserted(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); @@ -2367,48 +2357,6 @@ void *blk_blockalign(BlockBackend *blk, size_t size) return qemu_blockalign(blk ? blk_bs(blk) : NULL, size); } -bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp) -{ - BlockDriverState *bs = blk_bs(blk); - GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); - - if (!bs) { - return false; - } - - return bdrv_op_is_blocked(bs, op, errp); -} - -void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason) -{ - BlockDriverState *bs = blk_bs(blk); - GLOBAL_STATE_CODE(); - - if (bs) { - bdrv_op_unblock(bs, op, reason); - } -} - -void blk_op_block_all(BlockBackend *blk, Error *reason) -{ - BlockDriverState *bs = blk_bs(blk); - GLOBAL_STATE_CODE(); - - if (bs) { - bdrv_op_block_all(bs, reason); - } -} - -void blk_op_unblock_all(BlockBackend *blk, Error *reason) -{ - BlockDriverState *bs = blk_bs(blk); - GLOBAL_STATE_CODE(); - - if (bs) { - bdrv_op_unblock_all(bs, reason); - } -} /** * Return BB's current AioContext. Note that this context may change @@ -2564,12 +2512,6 @@ void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify) notifier_list_add(&blk->remove_bs_notifiers, notify); } -void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify) -{ - GLOBAL_STATE_CODE(); - notifier_list_add(&blk->insert_bs_notifiers, notify); -} - BlockAcctStats *blk_get_stats(BlockBackend *blk) { IO_CODE(); diff --git a/block/block-copy.c b/block/block-copy.c index 7e3b3785282..1826c2e1c7b 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -20,7 +20,7 @@ #include "block/block_int-io.h" #include "block/dirty-bitmap.h" #include "block/reqlist.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/units.h" #include "qemu/co-shared-resource.h" #include "qemu/coroutine.h" @@ -310,6 +310,7 @@ void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range, } static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, + int64_t min_cluster_size, Error **errp) { int ret; @@ -319,6 +320,9 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); + min_cluster_size = MAX(min_cluster_size, + (int64_t)BLOCK_COPY_CLUSTER_SIZE_DEFAULT); + target_does_cow = bdrv_backing_chain_next(target); /* @@ -329,13 +333,13 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, ret = bdrv_get_info(target, &bdi); if (ret == -ENOTSUP && !target_does_cow) { /* Cluster size is not defined */ - warn_report("The target block device doesn't provide " - "information about the block size and it doesn't have a " - "backing file. The default block size of %u bytes is " - "used. If the actual block size of the target exceeds " - "this default, the backup may be unusable", - BLOCK_COPY_CLUSTER_SIZE_DEFAULT); - return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; + warn_report("The target block device doesn't provide information about " + "the block size and it doesn't have a backing file. The " + "(default) block size of %" PRIi64 " bytes is used. If the " + "actual block size of the target exceeds this value, the " + "backup may be unusable", + min_cluster_size); + return min_cluster_size; } else if (ret < 0 && !target_does_cow) { error_setg_errno(errp, -ret, "Couldn't determine the cluster size of the target image, " @@ -345,16 +349,17 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target, return ret; } else if (ret < 0 && target_does_cow) { /* Not fatal; just trudge on ahead. */ - return BLOCK_COPY_CLUSTER_SIZE_DEFAULT; + return min_cluster_size; } - return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); + return MAX(min_cluster_size, bdi.cluster_size); } BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, BlockDriverState *copy_bitmap_bs, const BdrvDirtyBitmap *bitmap, bool discard_source, + uint64_t min_cluster_size, Error **errp) { ERRP_GUARD(); @@ -365,7 +370,18 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, GLOBAL_STATE_CODE(); - cluster_size = block_copy_calculate_cluster_size(target->bs, errp); + if (min_cluster_size > INT64_MAX) { + error_setg(errp, "min-cluster-size too large: %" PRIu64 " > %" PRIi64, + min_cluster_size, INT64_MAX); + return NULL; + } else if (min_cluster_size && !is_power_of_2(min_cluster_size)) { + error_setg(errp, "min-cluster-size needs to be a power of 2"); + return NULL; + } + + cluster_size = block_copy_calculate_cluster_size(target->bs, + (int64_t)min_cluster_size, + errp); if (cluster_size < 0) { return NULL; } @@ -568,7 +584,7 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) BlockCopyState *s = t->s; bool error_is_read = false; BlockCopyMethod method = t->method; - int ret; + int ret = -1; WITH_GRAPH_RDLOCK_GUARD() { ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, @@ -595,7 +611,9 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) if (s->discard_source && ret == 0) { int64_t nbytes = MIN(t->req.offset + t->req.bytes, s->len) - t->req.offset; - bdrv_co_pdiscard(s->source, t->req.offset, nbytes); + WITH_GRAPH_RDLOCK_GUARD() { + bdrv_co_pdiscard(s->source, t->req.offset, nbytes); + } } return ret; diff --git a/block/block-ram-registrar.c b/block/block-ram-registrar.c index 25dbafa789c..fcda2b86afb 100644 --- a/block/block-ram-registrar.c +++ b/block/block-ram-registrar.c @@ -5,8 +5,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" -#include "sysemu/block-ram-registrar.h" +#include "system/block-backend.h" +#include "system/block-ram-registrar.h" #include "qapi/error.h" static void ram_block_added(RAMBlockNotifier *n, void *host, size_t size, diff --git a/block/commit.c b/block/commit.c index 7c3fdcb0cae..0d9e1a16d7a 100644 --- a/block/commit.c +++ b/block/commit.c @@ -15,12 +15,14 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "trace.h" +#include "block/block-common.h" +#include "block/coroutines.h" #include "block/block_int.h" #include "block/blockjob_int.h" #include "qapi/error.h" #include "qemu/ratelimit.h" #include "qemu/memalign.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" enum { /* @@ -66,7 +68,7 @@ static int commit_prepare(Job *job) s->backing_mask_protocol); } -static void commit_abort(Job *job) +static void GRAPH_UNLOCKED commit_abort(Job *job) { CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); BlockDriverState *top_bs = blk_bs(s->top); @@ -126,6 +128,84 @@ static void commit_clean(Job *job) blk_unref(s->top); } +static int commit_iteration(CommitBlockJob *s, int64_t offset, + int64_t *requested_bytes, void *buf) +{ + BlockErrorAction action; + int64_t bytes = *requested_bytes; + int ret = 0; + bool error_in_source = true; + + /* Copy if allocated above the base */ + WITH_GRAPH_RDLOCK_GUARD() { + ret = bdrv_co_common_block_status_above(blk_bs(s->top), + s->base_overlay, true, true, offset, COMMIT_BUFFER_SIZE, + &bytes, NULL, NULL, NULL); + } + + trace_commit_one_iteration(s, offset, bytes, ret); + + if (ret < 0) { + goto fail; + } + + if (ret & BDRV_BLOCK_ALLOCATED) { + if (ret & BDRV_BLOCK_ZERO) { + /* + * If the top (sub)clusters are smaller than the base + * (sub)clusters, this will not unmap unless the underlying device + * does some tracking of these requests. Ideally, we would find + * the maximal extent of the zero clusters. + */ + ret = blk_co_pwrite_zeroes(s->base, offset, bytes, + BDRV_REQ_MAY_UNMAP); + if (ret < 0) { + error_in_source = false; + goto fail; + } + } else { + assert(bytes < SIZE_MAX); + + ret = blk_co_pread(s->top, offset, bytes, buf, 0); + if (ret < 0) { + goto fail; + } + + ret = blk_co_pwrite(s->base, offset, bytes, buf, 0); + if (ret < 0) { + error_in_source = false; + goto fail; + } + } + + /* + * Whether zeroes actually end up on disk depends on the details of + * the underlying driver. Therefore, this might rate limit more than + * is necessary. + */ + block_job_ratelimit_processed_bytes(&s->common, bytes); + } + + /* Publish progress */ + + job_progress_update(&s->common.job, bytes); + + *requested_bytes = bytes; + + return 0; + +fail: + action = block_job_error_action(&s->common, s->on_error, + error_in_source, -ret); + if (action == BLOCK_ERROR_ACTION_REPORT) { + return ret; + } + + *requested_bytes = 0; + + return 0; +} + static int coroutine_fn commit_run(Job *job, Error **errp) { CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); @@ -156,9 +236,6 @@ static int coroutine_fn commit_run(Job *job, Error **errp) buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE); for (offset = 0; offset < len; offset += n) { - bool copy; - bool error_in_source = true; - /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ @@ -166,38 +243,11 @@ static int coroutine_fn commit_run(Job *job, Error **errp) if (job_is_cancelled(&s->common.job)) { break; } - /* Copy if allocated above the base */ - ret = blk_co_is_allocated_above(s->top, s->base_overlay, true, - offset, COMMIT_BUFFER_SIZE, &n); - copy = (ret > 0); - trace_commit_one_iteration(s, offset, n, ret); - if (copy) { - assert(n < SIZE_MAX); - - ret = blk_co_pread(s->top, offset, n, buf, 0); - if (ret >= 0) { - ret = blk_co_pwrite(s->base, offset, n, buf, 0); - if (ret < 0) { - error_in_source = false; - } - } - } - if (ret < 0) { - BlockErrorAction action = - block_job_error_action(&s->common, s->on_error, - error_in_source, -ret); - if (action == BLOCK_ERROR_ACTION_REPORT) { - return ret; - } else { - n = 0; - continue; - } - } - /* Publish progress */ - job_progress_update(&s->common.job, n); - if (copy) { - block_job_ratelimit_processed_bytes(&s->common, n); + ret = commit_iteration(s, offset, &n, buf); + + if (ret < 0) { + return ret; } } @@ -342,7 +392,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, * this is the responsibility of the interface (i.e. whoever calls * commit_start()). */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); s->base_overlay = bdrv_find_overlay(top, base); assert(s->base_overlay); @@ -464,28 +514,32 @@ int bdrv_commit(BlockDriverState *bs) Error *local_err = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); if (!drv) return -ENOMEDIUM; + bdrv_graph_rdlock_main_loop(); + backing_file_bs = bdrv_cow_bs(bs); if (!backing_file_bs) { - return -ENOTSUP; + ret = -ENOTSUP; + goto out; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) || bdrv_op_is_blocked(backing_file_bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) { - return -EBUSY; + ret = -EBUSY; + goto out; } ro = bdrv_is_read_only(backing_file_bs); if (ro) { if (bdrv_reopen_set_read_only(backing_file_bs, false, NULL)) { - return -EACCES; + ret = -EACCES; + goto out; } } @@ -509,8 +563,14 @@ int bdrv_commit(BlockDriverState *bs) goto ro_cleanup; } + bdrv_graph_rdunlock_main_loop(); + + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(commit_top_bs, backing_file_bs, &error_abort); bdrv_set_backing_hd(bs, commit_top_bs, &error_abort); + bdrv_graph_wrunlock(); + + bdrv_graph_rdlock_main_loop(); ret = blk_insert_bs(backing, backing_file_bs, &local_err); if (ret < 0) { @@ -585,9 +645,14 @@ int bdrv_commit(BlockDriverState *bs) ret = 0; ro_cleanup: blk_unref(backing); + + bdrv_graph_rdunlock_main_loop(); + bdrv_graph_wrlock_drained(); if (bdrv_cow_bs(bs) != backing_file_bs) { bdrv_set_backing_hd(bs, backing_file_bs, &error_abort); } + bdrv_graph_wrunlock(); + bdrv_graph_rdlock_main_loop(); bdrv_unref(commit_top_bs); blk_unref(src); @@ -596,5 +661,8 @@ int bdrv_commit(BlockDriverState *bs) bdrv_reopen_set_read_only(backing_file_bs, true, NULL); } +out: + bdrv_graph_rdunlock_main_loop(); + return ret; } diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 853e01a1eb5..36d5d3ed9b0 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -24,9 +24,9 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qjson.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/cutils.h" #include "qapi/error.h" #include "block/block_int.h" @@ -66,7 +66,8 @@ typedef struct BDRVCopyBeforeWriteState { /* * @frozen_read_reqs: current read requests for fleecing user in bs->file - * node. These areas must not be rewritten by guest. + * node. These areas must not be rewritten by guest. There can be multiple + * overlapping read requests. */ BlockReqList frozen_read_reqs; @@ -290,8 +291,8 @@ cbw_co_preadv_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes, } static int coroutine_fn GRAPH_RDLOCK -cbw_co_snapshot_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, int64_t bytes, +cbw_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -417,6 +418,7 @@ static BlockdevOptions *cbw_parse_options(QDict *options, Error **errp) qdict_extract_subqdict(options, NULL, "bitmap"); qdict_del(options, "on-cbw-error"); qdict_del(options, "cbw-timeout"); + qdict_del(options, "min-cluster-size"); out: visit_free(v); @@ -476,8 +478,10 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, bs->file->bs->supported_zero_flags); s->discard_source = flags & BDRV_O_CBW_DISCARD_SOURCE; + s->bcs = block_copy_state_new(bs->file, s->target, bs, bitmap, - flags & BDRV_O_CBW_DISCARD_SOURCE, errp); + flags & BDRV_O_CBW_DISCARD_SOURCE, + opts->min_cluster_size, errp); if (!s->bcs) { error_prepend(errp, "Cannot create block-copy-state: "); return -EINVAL; @@ -545,7 +549,9 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source, BlockDriverState *target, const char *filter_node_name, bool discard_source, + uint64_t min_cluster_size, BlockCopyState **bcs, + OnCbwError on_cbw_error, Error **errp) { BDRVCopyBeforeWriteState *state; @@ -563,6 +569,15 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source, } qdict_put_str(opts, "file", bdrv_get_node_name(source)); qdict_put_str(opts, "target", bdrv_get_node_name(target)); + qdict_put_str(opts, "on-cbw-error", OnCbwError_str(on_cbw_error)); + + if (min_cluster_size > INT64_MAX) { + error_setg(errp, "min-cluster-size too large: %" PRIu64 " > %" PRIi64, + min_cluster_size, INT64_MAX); + qobject_unref(opts); + return NULL; + } + qdict_put_int(opts, "min-cluster-size", (int64_t)min_cluster_size); top = bdrv_insert_node(source, opts, flags, errp); if (!top) { diff --git a/block/copy-before-write.h b/block/copy-before-write.h index 01af0cd3c43..eb93364e854 100644 --- a/block/copy-before-write.h +++ b/block/copy-before-write.h @@ -40,7 +40,9 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source, BlockDriverState *target, const char *filter_node_name, bool discard_source, + uint64_t min_cluster_size, BlockCopyState **bcs, + OnCbwError on_cbw_error, Error **errp); void bdrv_cbw_drop(BlockDriverState *bs); diff --git a/block/copy-on-read.c b/block/copy-on-read.c index c36f253d169..accf1402f09 100644 --- a/block/copy-on-read.c +++ b/block/copy-on-read.c @@ -25,7 +25,7 @@ #include "block/block_int.h" #include "qemu/module.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "block/copy-on-read.h" diff --git a/block/coroutines.h b/block/coroutines.h index f3226682d6c..892646bb7aa 100644 --- a/block/coroutines.h +++ b/block/coroutines.h @@ -28,7 +28,7 @@ #include "block/block_int.h" /* For blk_bs() in generated block/block-gen.c */ -#include "sysemu/block-backend.h" +#include "system/block-backend.h" /* * I/O API functions. These functions are thread-safe. @@ -47,7 +47,7 @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, bool include_base, - bool want_zero, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, @@ -78,7 +78,7 @@ int co_wrapper_mixed_bdrv_rdlock bdrv_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, bool include_base, - bool want_zero, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, diff --git a/block/crypto.c b/block/crypto.c index 4eed3ffa6ad..d4226cc68a4 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -22,7 +22,7 @@ #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "crypto/block.h" #include "qapi/opts-visitor.h" #include "qapi/qapi-visit-crypto.h" @@ -682,7 +682,7 @@ static BlockMeasureInfo *block_crypto_measure(QemuOpts *opts, static int block_crypto_probe_luks(const uint8_t *buf, int buf_size, const char *filename) { - return block_crypto_probe_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS, + return block_crypto_probe_generic(QCRYPTO_BLOCK_FORMAT_LUKS, buf, buf_size, filename); } @@ -691,7 +691,7 @@ static int block_crypto_open_luks(BlockDriverState *bs, int flags, Error **errp) { - return block_crypto_open_generic(Q_CRYPTO_BLOCK_FORMAT_LUKS, + return block_crypto_open_generic(QCRYPTO_BLOCK_FORMAT_LUKS, &block_crypto_runtime_opts_luks, bs, options, flags, errp); } @@ -724,7 +724,7 @@ block_crypto_co_create_luks(BlockdevCreateOptions *create_options, Error **errp) } create_opts = (QCryptoBlockCreateOptions) { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = *qapi_BlockdevCreateOptionsLUKS_base(luks_opts), }; @@ -889,7 +889,7 @@ block_crypto_get_specific_info_luks(BlockDriverState *bs, Error **errp) if (!info) { return NULL; } - assert(info->format == Q_CRYPTO_BLOCK_FORMAT_LUKS); + assert(info->format == QCRYPTO_BLOCK_FORMAT_LUKS); spec_info = g_new(ImageInfoSpecific, 1); spec_info->type = IMAGE_INFO_SPECIFIC_KIND_LUKS; @@ -1002,7 +1002,7 @@ coroutine_fn block_crypto_co_amend_luks(BlockDriverState *bs, QCryptoBlockAmendOptions amend_opts; amend_opts = (QCryptoBlockAmendOptions) { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = *qapi_BlockdevAmendOptionsLUKS_base(&opts->u.luks), }; return block_crypto_amend_options_generic_luks(bs, &amend_opts, diff --git a/block/curl.c b/block/curl.c index 0fdb6d39acf..5467678024f 100644 --- a/block/curl.c +++ b/block/curl.c @@ -29,8 +29,8 @@ #include "qemu/option.h" #include "block/block-io.h" #include "block/block_int.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "crypto/secret.h" #include #include "qemu/cutils.h" diff --git a/block/export/export.c b/block/export/export.c index 6d51ae8ed78..f3bbf11070d 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -14,8 +14,8 @@ #include "qemu/osdep.h" #include "block/block.h" -#include "sysemu/block-backend.h" -#include "sysemu/iothread.h" +#include "system/block-backend.h" +#include "system/iothread.h" #include "block/export.h" #include "block/fuse.h" #include "block/nbd.h" @@ -75,6 +75,7 @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type) BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) { bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread; + bool allow_inactive = export->has_allow_inactive && export->allow_inactive; const BlockExportDriver *drv; BlockExport *exp = NULL; BlockDriverState *bs; @@ -138,14 +139,25 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) } } - /* - * Block exports are used for non-shared storage migration. Make sure - * that BDRV_O_INACTIVE is cleared and the image is ready for write - * access since the export could be available before migration handover. - * ctx was acquired in the caller. - */ bdrv_graph_rdlock_main_loop(); - bdrv_activate(bs, NULL); + if (allow_inactive) { + if (!drv->supports_inactive) { + error_setg(errp, "Export type does not support inactive exports"); + bdrv_graph_rdunlock_main_loop(); + goto fail; + } + } else { + /* + * Block exports are used for non-shared storage migration. Make sure + * that BDRV_O_INACTIVE is cleared and the image is ready for write + * access since the export could be available before migration handover. + */ + ret = bdrv_activate(bs, errp); + if (ret < 0) { + bdrv_graph_rdunlock_main_loop(); + goto fail; + } + } bdrv_graph_rdunlock_main_loop(); perm = BLK_PERM_CONSISTENT_READ; @@ -158,6 +170,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) if (!fixed_iothread) { blk_set_allow_aio_context_change(blk, true); } + if (allow_inactive) { + blk_set_force_allow_inactivate(blk); + } ret = blk_insert_bs(blk, bs, errp); if (ret < 0) { diff --git a/block/export/fuse.c b/block/export/fuse.c index 3307b640896..465cc9891d6 100644 --- a/block/export/fuse.c +++ b/block/export/fuse.c @@ -28,7 +28,7 @@ #include "qapi/error.h" #include "qapi/qapi-commands-block.h" #include "qemu/main-loop.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include #include diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c index 172f73cef44..bd852e538dc 100644 --- a/block/export/vduse-blk.c +++ b/block/export/vduse-blk.c @@ -273,7 +273,6 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, uint64_t logical_block_size = VIRTIO_BLK_SECTOR_SIZE; uint16_t num_queues = VDUSE_DEFAULT_NUM_QUEUE; uint16_t queue_size = VDUSE_DEFAULT_QUEUE_SIZE; - Error *local_err = NULL; struct virtio_blk_config config = { 0 }; uint64_t features; int i, ret; @@ -297,10 +296,8 @@ static int vduse_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, if (vblk_opts->has_logical_block_size) { logical_block_size = vblk_opts->logical_block_size; - check_block_size(exp->id, "logical-block-size", logical_block_size, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!check_block_size("logical-block-size", logical_block_size, + errp)) { return -EINVAL; } } diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 50c358e8cd5..d9d2014d9b7 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -319,7 +319,6 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, { VuBlkExport *vexp = container_of(exp, VuBlkExport, export); BlockExportOptionsVhostUserBlk *vu_opts = &opts->u.vhost_user_blk; - Error *local_err = NULL; uint64_t logical_block_size; uint16_t num_queues = VHOST_USER_BLK_NUM_QUEUES_DEFAULT; @@ -330,10 +329,7 @@ static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts, } else { logical_block_size = VIRTIO_BLK_SECTOR_SIZE; } - check_block_size(exp->id, "logical-block-size", logical_block_size, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!check_block_size("logical-block-size", logical_block_size, errp)) { return -EINVAL; } diff --git a/block/export/virtio-blk-handler.h b/block/export/virtio-blk-handler.h index 150d44cff24..cca1544e9fe 100644 --- a/block/export/virtio-blk-handler.h +++ b/block/export/virtio-blk-handler.h @@ -13,7 +13,7 @@ #ifndef VIRTIO_BLK_HANDLER_H #define VIRTIO_BLK_HANDLER_H -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #define VIRTIO_BLK_SECTOR_BITS 9 #define VIRTIO_BLK_SECTOR_SIZE (1ULL << VIRTIO_BLK_SECTOR_BITS) diff --git a/block/file-posix.c b/block/file-posix.c index ff928b5e858..8c738674ced 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -36,11 +36,12 @@ #include "block/thread-pool.h" #include "qemu/iov.h" #include "block/raw-aio.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "scsi/pr-manager.h" #include "scsi/constants.h" +#include "scsi/utils.h" #if defined(__APPLE__) && (__MACH__) #include @@ -72,6 +73,7 @@ #include #endif #include +#include #include #include #include @@ -110,6 +112,10 @@ #include #endif +#ifdef EMSCRIPTEN +#include +#endif + /* OS X does not have O_DSYNC */ #ifndef O_DSYNC #ifdef O_SYNC @@ -134,6 +140,22 @@ #define RAW_LOCK_PERM_BASE 100 #define RAW_LOCK_SHARED_BASE 200 +/* + * Multiple retries are mostly meant for two separate scenarios: + * + * - DM_MPATH_PROBE_PATHS returns success, but before SG_IO completes, another + * path goes down. + * + * - DM_MPATH_PROBE_PATHS failed all paths in the current path group, so we have + * to send another SG_IO to switch to another path group to probe the paths in + * it. + * + * Even if each path is in a separate path group (path_grouping_policy set to + * failover), it's rare to have more than eight path groups - and even then + * pretty unlikely that only bad path groups would be chosen in eight retries. + */ +#define SG_IO_MAX_RETRIES 8 + typedef struct BDRVRawState { int fd; bool use_lock; @@ -161,6 +183,7 @@ typedef struct BDRVRawState { bool use_linux_aio:1; bool has_laio_fdsync:1; bool use_linux_io_uring:1; + bool use_mpath:1; int page_cache_inconsistent; /* errno from fdatasync failure */ bool has_fallocate; bool needs_alignment; @@ -194,6 +217,7 @@ static int fd_open(BlockDriverState *bs) } static int64_t raw_getlength(BlockDriverState *bs); +static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs); typedef struct RawPosixAIOData { BlockDriverState *bs; @@ -780,17 +804,6 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, } #endif - if (S_ISBLK(st.st_mode)) { -#ifdef __linux__ - /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do - * not rely on the contents of discarded blocks unless using O_DIRECT. - * Same for BLKZEROOUT. - */ - if (!(bs->open_flags & BDRV_O_NOCACHE)) { - s->has_write_zeroes = false; - } -#endif - } #ifdef __FreeBSD__ if (S_ISCHR(st.st_mode)) { /* @@ -804,6 +817,13 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, #endif s->needs_alignment = raw_needs_alignment(bs); + bs->supported_write_flags = BDRV_REQ_FUA; + if (s->use_linux_aio && !laio_has_fua()) { + bs->supported_write_flags &= ~BDRV_REQ_FUA; + } else if (s->use_linux_io_uring && !luring_has_fua()) { + bs->supported_write_flags &= ~BDRV_REQ_FUA; + } + bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; if (S_ISREG(st.st_mode)) { /* When extending regular files, we get zeros from the OS */ @@ -1268,10 +1288,10 @@ static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) } #endif /* defined(CONFIG_BLKZONED) */ +#ifdef CONFIG_LINUX /* * Get a sysfs attribute value as a long integer. */ -#ifdef CONFIG_LINUX static long get_sysfs_long_val(struct stat *st, const char *attribute) { g_autofree char *str = NULL; @@ -1291,6 +1311,30 @@ static long get_sysfs_long_val(struct stat *st, const char *attribute) } return ret; } + +/* + * Get a sysfs attribute value as a uint32_t. + */ +static int get_sysfs_u32_val(struct stat *st, const char *attribute, + uint32_t *u32) +{ + g_autofree char *str = NULL; + const char *end; + unsigned int val; + int ret; + + ret = get_sysfs_str_val(st, attribute, &str); + if (ret < 0) { + return ret; + } + + /* The file is ended with '\n', pass 'end' to accept that. */ + ret = qemu_strtoui(str, &end, 10, &val); + if (ret == 0 && end && *end == '\0') { + *u32 = val; + } + return ret; +} #endif static int hdev_get_max_segments(int fd, struct stat *st) @@ -1310,6 +1354,23 @@ static int hdev_get_max_segments(int fd, struct stat *st) #endif } +/* + * Fills in *dalign with the discard alignment and returns 0 on success, + * -errno otherwise. + */ +static int hdev_get_pdiscard_alignment(struct stat *st, uint32_t *dalign) +{ +#ifdef CONFIG_LINUX + /* + * Note that Linux "discard_granularity" is QEMU "discard_alignment". Linux + * "discard_alignment" is something else. + */ + return get_sysfs_u32_val(st, "discard_granularity", dalign); +#else + return -ENOTSUP; +#endif +} + #if defined(CONFIG_BLKZONED) /* * If the reset_all flag is true, then the wps of zone whose state is @@ -1398,7 +1459,7 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, Error **errp) { BDRVRawState *s = bs->opaque; - BlockZoneModel zoned; + BlockZoneModel zoned = BLK_Z_NONE; int ret; ret = get_sysfs_zoned_model(st, &zoned); @@ -1519,6 +1580,30 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) } } + if (S_ISBLK(st.st_mode)) { + uint32_t dalign = 0; + int ret; + + ret = hdev_get_pdiscard_alignment(&st, &dalign); + if (ret == 0 && dalign != 0) { + uint32_t ralign = bs->bl.request_alignment; + + /* Probably never happens, but handle it just in case */ + if (dalign < ralign && (ralign % dalign == 0)) { + dalign = ralign; + } + + /* The block layer requires a multiple of request_alignment */ + if (dalign % ralign != 0) { + error_setg(errp, "Invalid pdiscard_alignment limit %u is not a " + "multiple of request_alignment %u", dalign, ralign); + return; + } + + bs->bl.pdiscard_alignment = dalign; + } + } + raw_refresh_zoned_limits(bs, &st, errp); } @@ -2003,8 +2088,11 @@ static int handle_aiocb_write_zeroes_unmap(void *opaque) } #ifndef HAVE_COPY_FILE_RANGE -static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, - off_t *out_off, size_t len, unsigned int flags) +#ifndef EMSCRIPTEN +static +#endif +ssize_t copy_file_range(int in_fd, off_t *in_off, int out_fd, + off_t *out_off, size_t len, unsigned int flags) { #ifdef __NR_copy_file_range return syscall(__NR_copy_file_range, in_fd, in_off, out_fd, @@ -2476,8 +2564,9 @@ static inline bool raw_check_linux_aio(BDRVRawState *s) } #endif -static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, - uint64_t bytes, QEMUIOVector *qiov, int type) +static int coroutine_fn GRAPH_RDLOCK +raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, uint64_t bytes, + QEMUIOVector *qiov, int type, int flags) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; @@ -2508,13 +2597,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, #ifdef CONFIG_LINUX_IO_URING } else if (raw_check_linux_io_uring(s)) { assert(qiov->size == bytes); - ret = luring_co_submit(bs, s->fd, offset, qiov, type); + ret = luring_co_submit(bs, s->fd, offset, qiov, type, flags); goto out; #endif #ifdef CONFIG_LINUX_AIO } else if (raw_check_linux_aio(s)) { assert(qiov->size == bytes); - ret = laio_co_submit(s->fd, offset, qiov, type, + ret = laio_co_submit(s->fd, offset, qiov, type, flags, s->aio_max_batch); goto out; #endif @@ -2534,6 +2623,10 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, assert(qiov->size == bytes); ret = raw_thread_pool_submit(handle_aiocb_rw, &acb); + if (ret == 0 && (flags & BDRV_REQ_FUA)) { + /* TODO Use pwritev2() instead if it's available */ + ret = bdrv_co_flush(bs); + } goto out; /* Avoid the compiler err of unused label */ out: @@ -2567,18 +2660,18 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, return ret; } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { - return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ); + return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ, flags); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { - return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE); + return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE, flags); } static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) @@ -2600,12 +2693,12 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) #ifdef CONFIG_LINUX_IO_URING if (raw_check_linux_io_uring(s)) { - return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH); + return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH, 0); } #endif #ifdef CONFIG_LINUX_AIO if (s->has_laio_fdsync && raw_check_linux_aio(s)) { - return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0); + return laio_co_submit(s->fd, 0, NULL, QEMU_AIO_FLUSH, 0, 0); } #endif return raw_thread_pool_submit(handle_aiocb_flush, &acb); @@ -3188,7 +3281,7 @@ static int find_allocation(BlockDriverState *bs, off_t start, * well exceed it. */ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, - bool want_zero, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, @@ -3204,7 +3297,8 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, return ret; } - if (!want_zero) { + if (!(mode & BDRV_WANT_ZERO)) { + /* There is no backing file - all bytes are allocated in this file. */ *pnum = bytes; *map = offset; *file = bs; @@ -3512,10 +3606,11 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, #endif #if defined(CONFIG_BLKZONED) -static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, - int64_t *offset, - QEMUIOVector *qiov, - BdrvRequestFlags flags) { +static int coroutine_fn GRAPH_RDLOCK +raw_co_zone_append(BlockDriverState *bs, + int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { assert(flags == 0); int64_t zone_size_mask = bs->bl.zone_size - 1; int64_t iov_len = 0; @@ -3540,7 +3635,7 @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, } trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS); - return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND); + return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND, 0); } #endif @@ -4178,15 +4273,105 @@ static int hdev_open(BlockDriverState *bs, QDict *options, int flags, /* Since this does ioctl the device must be already opened */ bs->sg = hdev_is_sg(bs); + /* sg devices aren't even block devices and can't use dm-mpath */ + s->use_mpath = !bs->sg; + return ret; } #if defined(__linux__) +#if defined(DM_MPATH_PROBE_PATHS) +static bool coroutine_fn sgio_path_error(int ret, sg_io_hdr_t *io_hdr) +{ + if (ret < 0) { + switch (ret) { + case -ENODEV: + return true; + case -EAGAIN: + /* + * The device is probably suspended. This happens while the dm table + * is reloaded, e.g. because a path is added or removed. This is an + * operation that should complete within 1ms, so just wait a bit and + * retry. + * + * If the device was suspended for another reason, we'll wait and + * retry SG_IO_MAX_RETRIES times. This is a tolerable delay before + * we return an error and potentially stop the VM. + */ + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); + return true; + default: + return false; + } + } + + if (io_hdr->host_status != SCSI_HOST_OK) { + return true; + } + + switch (io_hdr->status) { + case GOOD: + case CONDITION_GOOD: + case INTERMEDIATE_GOOD: + case INTERMEDIATE_C_GOOD: + case RESERVATION_CONFLICT: + case COMMAND_TERMINATED: + return false; + case CHECK_CONDITION: + return !scsi_sense_buf_is_guest_recoverable(io_hdr->sbp, + io_hdr->mx_sb_len); + default: + return true; + } +} + +static bool coroutine_fn hdev_co_ioctl_sgio_retry(RawPosixAIOData *acb, int ret) +{ + BDRVRawState *s = acb->bs->opaque; + RawPosixAIOData probe_acb; + + if (!s->use_mpath) { + return false; + } + + if (!sgio_path_error(ret, acb->ioctl.buf)) { + return false; + } + + probe_acb = (RawPosixAIOData) { + .bs = acb->bs, + .aio_type = QEMU_AIO_IOCTL, + .aio_fildes = s->fd, + .aio_offset = 0, + .ioctl = { + .buf = NULL, + .cmd = DM_MPATH_PROBE_PATHS, + }, + }; + + ret = raw_thread_pool_submit(handle_aiocb_ioctl, &probe_acb); + if (ret == -ENOTTY) { + s->use_mpath = false; + } else if (ret == -EAGAIN) { + /* The device might be suspended for a table reload, worth retrying */ + return true; + } + + return ret == 0; +} +#else +static bool coroutine_fn hdev_co_ioctl_sgio_retry(RawPosixAIOData *acb, int ret) +{ + return false; +} +#endif /* DM_MPATH_PROBE_PATHS */ + static int coroutine_fn hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; + int retries = SG_IO_MAX_RETRIES; int ret; ret = fd_open(bs); @@ -4214,7 +4399,11 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) }, }; - return raw_thread_pool_submit(handle_aiocb_ioctl, &acb); + do { + ret = raw_thread_pool_submit(handle_aiocb_ioctl, &acb); + } while (req == SG_IO && retries-- && hdev_co_ioctl_sgio_retry(&acb, ret)); + + return ret; } #endif /* linux */ diff --git a/block/file-win32.c b/block/file-win32.c index 7e1baa1ece6..af9aea631cb 100644 --- a/block/file-win32.c +++ b/block/file-win32.c @@ -33,8 +33,8 @@ #include "trace.h" #include "block/thread-pool.h" #include "qemu/iov.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include #include diff --git a/block/gluster.c b/block/gluster.c index f8b415f3812..89abd40f313 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -15,7 +15,7 @@ #include "block/block_int.h" #include "block/qdict.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" #include "qemu/module.h" @@ -514,7 +514,6 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, SocketAddressList **tail; QDict *backing_options = NULL; Error *local_err = NULL; - char *str = NULL; const char *ptr; int i, type, num_servers; @@ -547,7 +546,8 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, tail = &gconf->server; for (i = 0; i < num_servers; i++) { - str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); + g_autofree char *str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", + i); qdict_extract_subqdict(options, &backing_options, str); /* create opts info from runtime_type_opts list */ @@ -658,8 +658,6 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, qobject_unref(backing_options); backing_options = NULL; - g_free(str); - str = NULL; } return 0; @@ -668,7 +666,6 @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, error_propagate(errp, local_err); qapi_free_SocketAddress(gsconf); qemu_opts_del(opts); - g_free(str); qobject_unref(backing_options); errno = EINVAL; return -errno; @@ -809,6 +806,8 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, goto out; } + warn_report_once("'gluster' is deprecated"); + filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME); s->debug = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG, @@ -973,8 +972,6 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state) g_free(state->opaque); state->opaque = NULL; - - return; } @@ -994,8 +991,6 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state) g_free(state->opaque); state->opaque = NULL; - - return; } #ifdef CONFIG_GLUSTERFS_ZEROFILL @@ -1466,7 +1461,7 @@ static int find_allocation(BlockDriverState *bs, off_t start, * (Based on raw_co_block_status() from file-posix.c.) */ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs, - bool want_zero, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, @@ -1483,7 +1478,7 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs, return ret; } - if (!want_zero) { + if (!(mode & BDRV_WANT_ZERO)) { *pnum = bytes; *map = offset; *file = bs; diff --git a/block/graph-lock.c b/block/graph-lock.c index c81162b1473..b7319473a17 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -33,6 +33,17 @@ static QemuMutex aio_context_list_lock; /* Written and read with atomic operations. */ static int has_writer; +/* + * Many write-locked sections are also drained sections. There is a convenience + * wrapper bdrv_graph_wrlock_drained() which begins a drained section before + * acquiring the lock. This variable here is used so bdrv_graph_wrunlock() knows + * if it also needs to end such a drained section. It needs to be a counter, + * because the aio_poll() call in bdrv_graph_wrlock() might re-enter + * bdrv_graph_wrlock_drained(). And note that aio_bh_poll() in + * bdrv_graph_wrunlock() might also re-enter a write-locked section. + */ +static int wrlock_quiesced_counter; + /* * A reader coroutine could move from an AioContext to another. * If this happens, there is no problem from the point of view of @@ -112,8 +123,14 @@ void no_coroutine_fn bdrv_graph_wrlock(void) assert(!qatomic_read(&has_writer)); assert(!qemu_in_coroutine()); - /* Make sure that constantly arriving new I/O doesn't cause starvation */ - bdrv_drain_all_begin_nopoll(); + bool need_drain = wrlock_quiesced_counter == 0; + + if (need_drain) { + /* + * Make sure that constantly arriving new I/O doesn't cause starvation + */ + bdrv_drain_all_begin_nopoll(); + } /* * reader_count == 0: this means writer will read has_reader as 1 @@ -139,7 +156,18 @@ void no_coroutine_fn bdrv_graph_wrlock(void) smp_mb(); } while (reader_count() >= 1); - bdrv_drain_all_end(); + if (need_drain) { + bdrv_drain_all_end(); + } +} + +void no_coroutine_fn bdrv_graph_wrlock_drained(void) +{ + GLOBAL_STATE_CODE(); + + bdrv_drain_all_begin(); + wrlock_quiesced_counter++; + bdrv_graph_wrlock(); } void no_coroutine_fn bdrv_graph_wrunlock(void) @@ -168,6 +196,12 @@ void no_coroutine_fn bdrv_graph_wrunlock(void) * progress. */ aio_bh_poll(qemu_get_aio_context()); + + if (wrlock_quiesced_counter > 0) { + bdrv_drain_all_end(); + wrlock_quiesced_counter--; + } + } void coroutine_fn bdrv_graph_co_rdlock(void) diff --git a/block/io.c b/block/io.c index 301514c8808..9bd8ba84319 100644 --- a/block/io.c +++ b/block/io.c @@ -24,7 +24,7 @@ #include "qemu/osdep.h" #include "trace.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/aio-wait.h" #include "block/blockjob.h" #include "block/blockjob_int.h" @@ -37,11 +37,15 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" -#include "sysemu/replay.h" +#include "system/replay.h" +#include "qemu/units.h" /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */ #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) +/* Maximum read size for checking if data reads as zero, in bytes */ +#define MAX_ZERO_CHECK_BUFFER (128 * KiB) + static void coroutine_fn GRAPH_RDLOCK bdrv_parent_cb_resize(BlockDriverState *bs); @@ -357,7 +361,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, GLOBAL_STATE_CODE(); /* Stop things in parent-to-child order */ - if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { + if (bs->quiesce_counter++ == 0) { GRAPH_RDLOCK_GUARD_MAINLOOP(); bdrv_parent_drained_begin(bs, parent); if (bs->drv && bs->drv->bdrv_drain_begin) { @@ -397,8 +401,6 @@ bdrv_drained_begin(BlockDriverState *bs) */ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) { - int old_quiesce_counter; - IO_OR_GS_CODE(); if (qemu_in_coroutine()) { @@ -409,11 +411,9 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent) /* At this point, we should be always running in the main loop. */ GLOBAL_STATE_CODE(); assert(bs->quiesce_counter > 0); - GLOBAL_STATE_CODE(); /* Re-enable things in child-to-parent order */ - old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); - if (old_quiesce_counter == 1) { + if (--bs->quiesce_counter == 0) { GRAPH_RDLOCK_GUARD_MAINLOOP(); if (bs->drv && bs->drv->bdrv_drain_end) { bs->drv->bdrv_drain_end(bs); @@ -1058,6 +1058,10 @@ bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, return -ENOMEDIUM; } + if (bs->open_flags & BDRV_O_NO_FLUSH) { + flags &= ~BDRV_REQ_FUA; + } + if ((flags & BDRV_REQ_FUA) && (~bs->supported_write_flags & BDRV_REQ_FUA)) { flags &= ~BDRV_REQ_FUA; @@ -2360,10 +2364,8 @@ int bdrv_flush_all(void) * Drivers not implementing the functionality are assumed to not support * backing files, hence all their sectors are reported as allocated. * - * If 'want_zero' is true, the caller is querying for mapping - * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and - * _ZERO where possible; otherwise, the result favors larger 'pnum', - * with a focus on accurate BDRV_BLOCK_ALLOCATED. + * 'mode' serves as a hint as to which results are favored; see the + * BDRV_WANT_* macros for details. * * If 'offset' is beyond the end of the disk image the return value is * BDRV_BLOCK_EOF and 'pnum' is set to 0. @@ -2383,7 +2385,7 @@ int bdrv_flush_all(void) * set to the host mapping and BDS corresponding to the guest offset. */ static int coroutine_fn GRAPH_RDLOCK -bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, +bdrv_co_do_block_status(BlockDriverState *bs, unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -2472,7 +2474,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, local_file = bs; local_map = aligned_offset; } else { - ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset, + ret = bs->drv->bdrv_co_block_status(bs, mode, aligned_offset, aligned_bytes, pnum, &local_map, &local_file); @@ -2484,10 +2486,10 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, * the cache requires an RCU update, so double check here to avoid * such an update if possible. * - * Check want_zero, because we only want to update the cache when we + * Check mode, because we only want to update the cache when we * have accurate information about what is zero and what is data. */ - if (want_zero && + if (mode == BDRV_WANT_PRECISE && ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) && QLIST_EMPTY(&bs->children)) { @@ -2544,7 +2546,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, if (ret & BDRV_BLOCK_RAW) { assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file); - ret = bdrv_co_do_block_status(local_file, want_zero, local_map, + ret = bdrv_co_do_block_status(local_file, mode, local_map, *pnum, pnum, &local_map, &local_file); goto out; } @@ -2556,7 +2558,7 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, if (!cow_bs) { ret |= BDRV_BLOCK_ZERO; - } else if (want_zero) { + } else if (mode == BDRV_WANT_PRECISE) { int64_t size2 = bdrv_co_getlength(cow_bs); if (size2 >= 0 && offset >= size2) { @@ -2565,14 +2567,14 @@ bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero, } } - if (want_zero && ret & BDRV_BLOCK_RECURSE && + if (mode == BDRV_WANT_PRECISE && ret & BDRV_BLOCK_RECURSE && local_file && local_file != bs && (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) && (ret & BDRV_BLOCK_OFFSET_VALID)) { int64_t file_pnum; int ret2; - ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map, + ret2 = bdrv_co_do_block_status(local_file, mode, local_map, *pnum, &file_pnum, NULL, NULL); if (ret2 >= 0) { /* Ignore errors. This is just providing extra information, it @@ -2623,7 +2625,7 @@ int coroutine_fn bdrv_co_common_block_status_above(BlockDriverState *bs, BlockDriverState *base, bool include_base, - bool want_zero, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, @@ -2650,7 +2652,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, return 0; } - ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum, + ret = bdrv_co_do_block_status(bs, mode, offset, bytes, pnum, map, file); ++*depth; if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) { @@ -2667,7 +2669,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs, for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base; p = bdrv_filter_or_cow_bs(p)) { - ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum, + ret = bdrv_co_do_block_status(p, mode, offset, bytes, pnum, map, file); ++*depth; if (ret < 0) { @@ -2730,7 +2732,8 @@ int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs, BlockDriverState **file) { IO_CODE(); - return bdrv_co_common_block_status_above(bs, base, false, true, offset, + return bdrv_co_common_block_status_above(bs, base, false, + BDRV_WANT_PRECISE, offset, bytes, pnum, map, file, NULL); } @@ -2748,27 +2751,89 @@ int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset, * by @offset and @bytes is known to read as zeroes. * Return 1 if that is the case, 0 otherwise and -errno on error. * This test is meant to be fast rather than accurate so returning 0 - * does not guarantee non-zero data. + * does not guarantee non-zero data; but a return of 1 is reliable. */ int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes) { int ret; - int64_t pnum = bytes; + int64_t pnum; IO_CODE(); - if (!bytes) { - return 1; + while (bytes) { + ret = bdrv_co_common_block_status_above(bs, NULL, false, + BDRV_WANT_ZERO, offset, bytes, + &pnum, NULL, NULL, NULL); + + if (ret < 0) { + return ret; + } + if (!(ret & BDRV_BLOCK_ZERO)) { + return 0; + } + offset += pnum; + bytes -= pnum; } - ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset, - bytes, &pnum, NULL, NULL, NULL); + return 1; +} + +/* + * Check @bs (and its backing chain) to see if the entire image is known + * to read as zeroes. + * Return 1 if that is the case, 0 otherwise and -errno on error. + * This test is meant to be fast rather than accurate so returning 0 + * does not guarantee non-zero data; however, a return of 1 is reliable, + * and this function can report 1 in more cases than bdrv_co_is_zero_fast. + */ +int coroutine_fn bdrv_co_is_all_zeroes(BlockDriverState *bs) +{ + int ret; + int64_t pnum, bytes; + char *buf; + QEMUIOVector local_qiov; + IO_CODE(); + + bytes = bdrv_co_getlength(bs); + if (bytes < 0) { + return bytes; + } + /* First probe - see if the entire image reads as zero */ + ret = bdrv_co_common_block_status_above(bs, NULL, false, BDRV_WANT_ZERO, + 0, bytes, &pnum, NULL, NULL, + NULL); if (ret < 0) { return ret; } + if (ret & BDRV_BLOCK_ZERO) { + return bdrv_co_is_zero_fast(bs, pnum, bytes - pnum); + } - return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO); + /* + * Because of the way 'blockdev-create' works, raw files tend to + * be created with a non-sparse region at the front to make + * alignment probing easier. If the block starts with only a + * small allocated region, it is still worth the effort to see if + * the rest of the image is still sparse, coupled with manually + * reading the first region to see if it reads zero after all. + */ + if (pnum > MAX_ZERO_CHECK_BUFFER) { + return 0; + } + ret = bdrv_co_is_zero_fast(bs, pnum, bytes - pnum); + if (ret <= 0) { + return ret; + } + /* Only the head of the image is unknown, and it's small. Read it. */ + buf = qemu_blockalign(bs, pnum); + qemu_iovec_init_buf(&local_qiov, buf, pnum); + ret = bdrv_driver_preadv(bs, 0, pnum, &local_qiov, 0, 0); + if (ret >= 0) { + ret = buffer_is_zero(buf, pnum); + } + qemu_vfree(buf); + return ret; } int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, @@ -2778,9 +2843,9 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, int64_t dummy; IO_CODE(); - ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset, - bytes, pnum ? pnum : &dummy, NULL, - NULL, NULL); + ret = bdrv_co_common_block_status_above(bs, bs, true, BDRV_WANT_ALLOCATED, + offset, bytes, pnum ? pnum : &dummy, + NULL, NULL, NULL); if (ret < 0) { return ret; } @@ -2813,7 +2878,8 @@ int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs, int ret; IO_CODE(); - ret = bdrv_co_common_block_status_above(bs, base, include_base, false, + ret = bdrv_co_common_block_status_above(bs, base, include_base, + BDRV_WANT_ALLOCATED, offset, bytes, pnum, NULL, NULL, &depth); if (ret < 0) { @@ -3098,18 +3164,19 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, return 0; } - if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { + if (!bs->drv->bdrv_co_pdiscard) { return 0; } /* Invalidate the cached block-status data range if this discard overlaps */ bdrv_bsc_invalidate_range(bs, offset, bytes); - /* Discard is advisory, but some devices track and coalesce + /* + * Discard is advisory, but some devices track and coalesce * unaligned requests, so we must pass everything down rather than - * round here. Still, most devices will just silently ignore - * unaligned requests (by returning -ENOTSUP), so we must fragment - * the request accordingly. */ + * round here. Still, most devices reject unaligned requests with + * -EINVAL or -ENOTSUP, so we must fragment the request accordingly. + */ align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); assert(align % bs->bl.request_alignment == 0); head = offset % align; @@ -3157,27 +3224,15 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, ret = -ENOMEDIUM; goto out; } - if (bs->drv->bdrv_co_pdiscard) { - ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); - } else { - BlockAIOCB *acb; - CoroutineIOCompletion co = { - .coroutine = qemu_coroutine_self(), - }; - - acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, - bdrv_co_io_em_complete, &co); - if (acb == NULL) { - ret = -EIO; - goto out; + + ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); + if (ret && ret != -ENOTSUP) { + if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) { + /* Silently skip rejected unaligned head/tail requests */ } else { - qemu_coroutine_yield(); - ret = co.ret; + goto out; /* bail out */ } } - if (ret && ret != -ENOTSUP) { - goto out; - } offset += num; bytes -= num; @@ -3705,8 +3760,8 @@ bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, } int coroutine_fn -bdrv_co_snapshot_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, int64_t bytes, +bdrv_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -3724,7 +3779,7 @@ bdrv_co_snapshot_block_status(BlockDriverState *bs, } bdrv_inc_in_flight(bs); - ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes, + ret = drv->bdrv_co_snapshot_block_status(bs, mode, offset, bytes, pnum, map, file); bdrv_dec_in_flight(bs); diff --git a/block/io_uring.c b/block/io_uring.c index d11b2051abd..dd4f304910b 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -17,7 +17,7 @@ #include "qemu/coroutine.h" #include "qemu/defer-call.h" #include "qapi/error.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "trace.h" /* Only used for assertions. */ @@ -335,15 +335,24 @@ static void luring_deferred_fn(void *opaque) * */ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, - uint64_t offset, int type) + uint64_t offset, int type, BdrvRequestFlags flags) { int ret; struct io_uring_sqe *sqes = &luringcb->sqeq; switch (type) { case QEMU_AIO_WRITE: +#ifdef HAVE_IO_URING_PREP_WRITEV2 + { + int luring_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0; + io_uring_prep_writev2(sqes, fd, luringcb->qiov->iov, + luringcb->qiov->niov, offset, luring_flags); + } +#else + assert(flags == 0); io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, luringcb->qiov->niov, offset); +#endif break; case QEMU_AIO_ZONE_APPEND: io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, @@ -380,7 +389,8 @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, } int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, - QEMUIOVector *qiov, int type) + QEMUIOVector *qiov, int type, + BdrvRequestFlags flags) { int ret; AioContext *ctx = qemu_get_current_aio_context(); @@ -393,7 +403,7 @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, }; trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0, type); - ret = luring_do_submit(fd, &luringcb, s, offset, type); + ret = luring_do_submit(fd, &luringcb, s, offset, type, flags); if (ret < 0) { return ret; @@ -448,3 +458,12 @@ void luring_cleanup(LuringState *s) trace_luring_cleanup_state(s); g_free(s); } + +bool luring_has_fua(void) +{ +#ifdef HAVE_IO_URING_PREP_WRITEV2 + return true; +#else + return false; +#endif +} diff --git a/block/iscsi.c b/block/iscsi.c index 979bf90cb79..15b96ee8800 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -28,7 +28,7 @@ #include #include #include -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/bitops.h" @@ -41,11 +41,11 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qemu/uuid.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "crypto/secret.h" #include "scsi/utils.h" #include "trace.h" @@ -694,9 +694,9 @@ iscsi_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, - int64_t *map, + unsigned int mode, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, BlockDriverState **file) { IscsiLun *iscsilun = bs->opaque; diff --git a/block/linux-aio.c b/block/linux-aio.c index e3b5ec9abae..c200e7ad20f 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -16,7 +16,7 @@ #include "qemu/coroutine.h" #include "qemu/defer-call.h" #include "qapi/error.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" /* Only used for assertions. */ #include "qemu/coroutine_int.h" @@ -291,7 +291,7 @@ static void ioq_submit(LinuxAioState *s) { int ret, len; struct qemu_laiocb *aiocb; - struct iocb *iocbs[MAX_EVENTS]; + QEMU_UNINITIALIZED struct iocb *iocbs[MAX_EVENTS]; QSIMPLEQ_HEAD(, qemu_laiocb) completed; do { @@ -368,7 +368,8 @@ static void laio_deferred_fn(void *opaque) } static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, - int type, uint64_t dev_max_batch) + int type, BdrvRequestFlags flags, + uint64_t dev_max_batch) { LinuxAioState *s = laiocb->ctx; struct iocb *iocbs = &laiocb->iocb; @@ -376,7 +377,15 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, switch (type) { case QEMU_AIO_WRITE: +#ifdef HAVE_IO_PREP_PWRITEV2 + { + int laio_flags = (flags & BDRV_REQ_FUA) ? RWF_DSYNC : 0; + io_prep_pwritev2(iocbs, fd, qiov->iov, qiov->niov, offset, laio_flags); + } +#else + assert(flags == 0); io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); +#endif break; case QEMU_AIO_ZONE_APPEND: io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); @@ -409,7 +418,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, } int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, - int type, uint64_t dev_max_batch) + int type, BdrvRequestFlags flags, + uint64_t dev_max_batch) { int ret; AioContext *ctx = qemu_get_current_aio_context(); @@ -422,7 +432,7 @@ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, .qiov = qiov, }; - ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch); + ret = laio_do_submit(fd, &laiocb, offset, type, flags, dev_max_batch); if (ret < 0) { return ret; } @@ -505,3 +515,12 @@ bool laio_has_fdsync(int fd) io_destroy(ctx); return (ret == -EINVAL) ? false : true; } + +bool laio_has_fua(void) +{ +#ifdef HAVE_IO_PREP_PWRITEV2 + return true; +#else + return false; +#endif +} diff --git a/block/meson.build b/block/meson.build index f1262ec2ba8..34b1b2a3063 100644 --- a/block/meson.build +++ b/block/meson.build @@ -154,8 +154,8 @@ block_gen_c = custom_target('block-gen.c', '../include/block/dirty-bitmap.h', '../include/block/block_int-io.h', '../include/block/block-global-state.h', - '../include/sysemu/block-backend-global-state.h', - '../include/sysemu/block-backend-io.h', + '../include/system/block-backend-global-state.h', + '../include/system/block-backend-io.h', 'coroutines.h' ), command: [wrapper_py, '@OUTPUT@', '@INPUT@']) @@ -163,7 +163,7 @@ block_ss.add(block_gen_c) block_ss.add(files('stream.c')) -system_ss.add(files('qapi-sysemu.c')) +system_ss.add(files('qapi-system.c')) subdir('export') subdir('monitor') diff --git a/block/mirror.c b/block/mirror.c index 61f0a717b75..b344182c747 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -19,7 +19,7 @@ #include "block/blockjob_int.h" #include "block/block_int.h" #include "block/dirty-bitmap.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qemu/ratelimit.h" #include "qemu/bitmap.h" @@ -51,10 +51,10 @@ typedef struct MirrorBlockJob { BlockDriverState *to_replace; /* Used to block operations on the drive-mirror-replace target */ Error *replace_blocker; - bool is_none_mode; + MirrorSyncMode sync_mode; BlockMirrorBackingMode backing_mode; - /* Whether the target image requires explicit zero-initialization */ - bool zero_target; + /* Whether the target should be assumed to be already zero initialized */ + bool target_is_zero; /* * To be accesssed with atomics. Written only under the BQL (required by the * current implementation of mirror_change()). @@ -73,6 +73,7 @@ typedef struct MirrorBlockJob { size_t buf_size; int64_t bdev_length; unsigned long *cow_bitmap; + unsigned long *zero_bitmap; BdrvDirtyBitmap *dirty_bitmap; BdrvDirtyBitmapIter *dbi; uint8_t *buf; @@ -108,9 +109,12 @@ struct MirrorOp { int64_t offset; uint64_t bytes; - /* The pointee is set by mirror_co_read(), mirror_co_zero(), and - * mirror_co_discard() before yielding for the first time */ + /* + * These pointers are set by mirror_co_read(), mirror_co_zero(), and + * mirror_co_discard() before yielding for the first time + */ int64_t *bytes_handled; + bool *io_skipped; bool is_pseudo_op; bool is_active_write; @@ -349,7 +353,7 @@ static void coroutine_fn mirror_co_read(void *opaque) MirrorOp *op = opaque; MirrorBlockJob *s = op->s; int nb_chunks; - uint64_t ret; + int ret = -1; uint64_t max_bytes; max_bytes = s->granularity * s->max_iov; @@ -408,15 +412,34 @@ static void coroutine_fn mirror_co_read(void *opaque) static void coroutine_fn mirror_co_zero(void *opaque) { MirrorOp *op = opaque; - int ret; + bool write_needed = true; + int ret = 0; op->s->in_flight++; op->s->bytes_in_flight += op->bytes; *op->bytes_handled = op->bytes; op->is_in_flight = true; - ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, - op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); + if (op->s->zero_bitmap) { + unsigned long end = DIV_ROUND_UP(op->offset + op->bytes, + op->s->granularity); + assert(QEMU_IS_ALIGNED(op->offset, op->s->granularity)); + assert(QEMU_IS_ALIGNED(op->bytes, op->s->granularity) || + op->offset + op->bytes == op->s->bdev_length); + if (find_next_zero_bit(op->s->zero_bitmap, end, + op->offset / op->s->granularity) == end) { + write_needed = false; + *op->io_skipped = true; + } + } + if (write_needed) { + ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, + op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); + } + if (ret >= 0 && op->s->zero_bitmap) { + bitmap_set(op->s->zero_bitmap, op->offset / op->s->granularity, + DIV_ROUND_UP(op->bytes, op->s->granularity)); + } mirror_write_complete(op, ret); } @@ -435,29 +458,43 @@ static void coroutine_fn mirror_co_discard(void *opaque) } static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, - unsigned bytes, MirrorMethod mirror_method) + unsigned bytes, MirrorMethod mirror_method, + bool *io_skipped) { MirrorOp *op; Coroutine *co; int64_t bytes_handled = -1; + assert(QEMU_IS_ALIGNED(offset, s->granularity)); + assert(QEMU_IS_ALIGNED(bytes, s->granularity) || + offset + bytes == s->bdev_length); op = g_new(MirrorOp, 1); *op = (MirrorOp){ .s = s, .offset = offset, .bytes = bytes, .bytes_handled = &bytes_handled, + .io_skipped = io_skipped, }; qemu_co_queue_init(&op->waiting_requests); switch (mirror_method) { case MIRROR_METHOD_COPY: + if (s->zero_bitmap) { + bitmap_clear(s->zero_bitmap, offset / s->granularity, + DIV_ROUND_UP(bytes, s->granularity)); + } co = qemu_coroutine_create(mirror_co_read, op); break; case MIRROR_METHOD_ZERO: + /* s->zero_bitmap handled in mirror_co_zero */ co = qemu_coroutine_create(mirror_co_zero, op); break; case MIRROR_METHOD_DISCARD: + if (s->zero_bitmap) { + bitmap_clear(s->zero_bitmap, offset / s->granularity, + DIV_ROUND_UP(bytes, s->granularity)); + } co = qemu_coroutine_create(mirror_co_discard, op); break; default: @@ -565,9 +602,10 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s) bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); while (nb_chunks > 0 && offset < s->bdev_length) { - int ret; + int ret = -1; int64_t io_bytes; int64_t io_bytes_acct; + bool io_skipped = false; MirrorMethod mirror_method = MIRROR_METHOD_COPY; assert(!(offset % s->granularity)); @@ -611,8 +649,10 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s) } io_bytes = mirror_clip_bytes(s, offset, io_bytes); - io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); - if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { + io_bytes = mirror_perform(s, offset, io_bytes, mirror_method, + &io_skipped); + if (io_skipped || + (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok)) { io_bytes_acct = 0; } else { io_bytes_acct = io_bytes; @@ -721,11 +761,16 @@ static int mirror_exit_common(Job *job) bdrv_graph_rdlock_main_loop(); bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, &error_abort); + bdrv_graph_rdunlock_main_loop(); if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { - BlockDriverState *backing = s->is_none_mode ? src : s->base; - BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); + BlockDriverState *backing; + BlockDriverState *unfiltered_target; + bdrv_graph_wrlock_drained(); + unfiltered_target = bdrv_skip_filters(target_bs); + + backing = s->sync_mode == MIRROR_SYNC_MODE_NONE ? src : s->base; if (bdrv_cow_bs(unfiltered_target) != backing) { bdrv_set_backing_hd(unfiltered_target, backing, &local_err); if (local_err) { @@ -734,16 +779,18 @@ static int mirror_exit_common(Job *job) ret = -EPERM; } } + bdrv_graph_wrunlock(); } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { + bdrv_graph_rdlock_main_loop(); assert(!bdrv_backing_chain_next(target_bs)); ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, "backing", &local_err); + bdrv_graph_rdunlock_main_loop(); if (ret < 0) { error_report_err(local_err); local_err = NULL; } } - bdrv_graph_rdunlock_main_loop(); if (s->should_complete && !abort) { BlockDriverState *to_replace = s->to_replace ?: src; @@ -841,15 +888,54 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s) int64_t offset; BlockDriverState *bs; BlockDriverState *target_bs = blk_bs(s->target); - int ret; + int ret = -EIO; int64_t count; + bool punch_holes = + target_bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP && + bdrv_can_write_zeroes_with_unmap(target_bs); + int64_t bitmap_length = DIV_ROUND_UP(s->bdev_length, s->granularity); + /* Determine if the image is already zero, regardless of sync mode. */ + s->zero_bitmap = bitmap_new(bitmap_length); bdrv_graph_co_rdlock(); bs = s->mirror_top_bs->backing->bs; + if (s->target_is_zero) { + ret = 1; + } else { + ret = bdrv_co_is_all_zeroes(target_bs); + } bdrv_graph_co_rdunlock(); - if (s->zero_target) { - if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { + /* Determine if a pre-zeroing pass is necessary. */ + if (ret < 0) { + return ret; + } else if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { + /* + * In TOP mode, there is no benefit to a pre-zeroing pass, but + * the zero bitmap can be set if the destination already reads + * as zero and we are not punching holes. + */ + if (ret > 0 && !punch_holes) { + bitmap_set(s->zero_bitmap, 0, bitmap_length); + } + } else if (ret == 0 || punch_holes) { + /* + * Here, we are in FULL mode; our goal is to avoid writing + * zeroes if the destination already reads as zero, except + * when we are trying to punch holes. This is possible if + * zeroing happened externally (ret > 0) or if we have a fast + * way to pre-zero the image (the dirty bitmap will be + * populated later by the non-zero portions, the same as for + * TOP mode). If pre-zeroing is not fast, or we need to visit + * the entire image in order to punch holes even in the + * non-allocated regions of the source, then just mark the + * entire image dirty and leave the zero bitmap clear at this + * point in time. Otherwise, it can be faster to pre-zero the + * image now, even if we re-write the allocated portions of + * the disk later, and the pre-zero pass will populate the + * zero bitmap. + */ + if (!bdrv_can_write_zeroes_with_unmap(target_bs) || punch_holes) { bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); return 0; } @@ -858,6 +944,7 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s) for (offset = 0; offset < s->bdev_length; ) { int bytes = MIN(s->bdev_length - offset, QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); + bool ignored; mirror_throttle(s); @@ -873,12 +960,15 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s) continue; } - mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); + mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO, &ignored); offset += bytes; } mirror_wait_for_all_io(s); s->initial_zeroing_ongoing = false; + } else { + /* In FULL mode, and image already reads as zero. */ + bitmap_set(s->zero_bitmap, 0, bitmap_length); } /* First part, loop on the sectors and initialize the dirty bitmap. */ @@ -931,7 +1021,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; BlockDriverState *target_bs = blk_bs(s->target); bool need_drain = true; - BlockDeviceIoStatus iostatus; + BlockDeviceIoStatus iostatus = BLOCK_DEVICE_IO_STATUS__MAX; int64_t length; int64_t target_length; BlockDriverInfo bdi; @@ -1020,7 +1110,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) mirror_free_init(s); s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - if (!s->is_none_mode) { + if (s->sync_mode != MIRROR_SYNC_MODE_NONE) { ret = mirror_dirty_init(s); if (ret < 0 || job_is_cancelled(&s->common.job)) { goto immediate_exit; @@ -1163,6 +1253,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) assert(s->in_flight == 0); qemu_vfree(s->buf); g_free(s->cow_bitmap); + g_free(s->zero_bitmap); g_free(s->in_flight_bitmap); bdrv_dirty_iter_free(s->dbi); @@ -1341,7 +1432,8 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, { int ret; size_t qiov_offset = 0; - int64_t bitmap_offset, bitmap_end; + int64_t dirty_bitmap_offset, dirty_bitmap_end; + int64_t zero_bitmap_offset, zero_bitmap_end; if (!QEMU_IS_ALIGNED(offset, job->granularity) && bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) @@ -1385,31 +1477,54 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, } /* - * Tails are either clean or shrunk, so for bitmap resetting - * we safely align the range down. + * Tails are either clean or shrunk, so for dirty bitmap resetting + * we safely align the range narrower. But for zero bitmap, round + * range wider for checking or clearing, and narrower for setting. */ - bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); - bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); - if (bitmap_offset < bitmap_end) { - bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, - bitmap_end - bitmap_offset); + dirty_bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); + dirty_bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); + if (dirty_bitmap_offset < dirty_bitmap_end) { + bdrv_reset_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset, + dirty_bitmap_end - dirty_bitmap_offset); } + zero_bitmap_offset = offset / job->granularity; + zero_bitmap_end = DIV_ROUND_UP(offset + bytes, job->granularity); job_progress_increase_remaining(&job->common.job, bytes); job->active_write_bytes_in_flight += bytes; switch (method) { case MIRROR_METHOD_COPY: + if (job->zero_bitmap) { + bitmap_clear(job->zero_bitmap, zero_bitmap_offset, + zero_bitmap_end - zero_bitmap_offset); + } ret = blk_co_pwritev_part(job->target, offset, bytes, qiov, qiov_offset, flags); break; case MIRROR_METHOD_ZERO: + if (job->zero_bitmap) { + if (find_next_zero_bit(job->zero_bitmap, zero_bitmap_end, + zero_bitmap_offset) == zero_bitmap_end) { + ret = 0; + break; + } + } assert(!qiov); ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); + if (job->zero_bitmap && ret >= 0) { + bitmap_set(job->zero_bitmap, dirty_bitmap_offset / job->granularity, + (dirty_bitmap_end - dirty_bitmap_offset) / + job->granularity); + } break; case MIRROR_METHOD_DISCARD: + if (job->zero_bitmap) { + bitmap_clear(job->zero_bitmap, zero_bitmap_offset, + zero_bitmap_end - zero_bitmap_offset); + } assert(!qiov); ret = blk_co_pdiscard(job->target, offset, bytes); break; @@ -1430,10 +1545,10 @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, * at function start, and they must be still dirty, as we've locked * the region for in-flight op. */ - bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); - bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); - bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, - bitmap_end - bitmap_offset); + dirty_bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); + dirty_bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); + bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_bitmap_offset, + dirty_bitmap_end - dirty_bitmap_offset); qatomic_set(&job->actively_synced, false); action = mirror_error_action(job, false, -ret); @@ -1711,15 +1826,16 @@ static BlockJob *mirror_start_job( int creation_flags, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, + MirrorSyncMode sync_mode, BlockMirrorBackingMode backing_mode, - bool zero_target, + bool target_is_zero, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, const BlockJobDriver *driver, - bool is_none_mode, BlockDriverState *base, + BlockDriverState *base, bool auto_complete, const char *filter_node_name, bool is_mirror, MirrorCopyMode copy_mode, bool base_ro, @@ -1878,9 +1994,9 @@ static BlockJob *mirror_start_job( s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; - s->is_none_mode = is_none_mode; + s->sync_mode = sync_mode; s->backing_mode = backing_mode; - s->zero_target = zero_target; + s->target_is_zero = target_is_zero; qatomic_set(&s->copy_mode, copy_mode); s->base = base; s->base_overlay = bdrv_find_overlay(bs, base); @@ -1904,7 +2020,7 @@ static BlockJob *mirror_start_job( */ bdrv_disable_dirty_bitmap(s->dirty_bitmap); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); ret = block_job_add_bdrv(&s->common, "source", bs, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ, @@ -2009,13 +2125,12 @@ void mirror_start(const char *job_id, BlockDriverState *bs, int creation_flags, int64_t speed, uint32_t granularity, int64_t buf_size, MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, - bool zero_target, + bool target_is_zero, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, const char *filter_node_name, MirrorCopyMode copy_mode, Error **errp) { - bool is_none_mode; BlockDriverState *base; GLOBAL_STATE_CODE(); @@ -2028,14 +2143,13 @@ void mirror_start(const char *job_id, BlockDriverState *bs, } bdrv_graph_rdlock_main_loop(); - is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; bdrv_graph_rdunlock_main_loop(); mirror_start_job(job_id, bs, creation_flags, target, replaces, - speed, granularity, buf_size, backing_mode, zero_target, - on_source_error, on_target_error, unmap, NULL, NULL, - &mirror_job_driver, is_none_mode, base, false, + speed, granularity, buf_size, mode, backing_mode, + target_is_zero, on_source_error, on_target_error, unmap, + NULL, NULL, &mirror_job_driver, base, false, filter_node_name, true, copy_mode, false, errp); } @@ -2061,9 +2175,9 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, job = mirror_start_job( job_id, bs, creation_flags, base, NULL, speed, 0, 0, - MIRROR_LEAVE_BACKING_CHAIN, false, + MIRROR_SYNC_MODE_TOP, MIRROR_LEAVE_BACKING_CHAIN, false, on_error, on_error, true, cb, opaque, - &commit_active_job_driver, false, base, auto_complete, + &commit_active_job_driver, base, auto_complete, filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, base_read_only, errp); if (!job) { diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index d954bec6f1e..282d1c386e5 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -37,11 +37,11 @@ #include "qemu/osdep.h" #include "hw/boards.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" +#include "system/block-backend.h" +#include "system/blockdev.h" #include "qapi/qapi-commands-block.h" #include "qapi/qapi-commands-block-export.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qemu/config-file.h" @@ -49,7 +49,7 @@ #include "qemu/sockets.h" #include "qemu/cutils.h" #include "qemu/error-report.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "monitor/monitor.h" #include "monitor/hmp.h" #include "block/nbd.h" @@ -144,7 +144,7 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) Error *local_err = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); bs = bdrv_find_node(id); if (bs) { @@ -152,29 +152,31 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) if (local_err) { error_report_err(local_err); } - return; + goto unlock; } blk = blk_by_name(id); if (!blk) { error_report("Device '%s' not found", id); - return; + goto unlock; } if (!blk_legacy_dinfo(blk)) { error_report("Deleting device added with blockdev-add" " is not supported"); - return; + goto unlock; } bs = blk_bs(blk); if (bs) { if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { error_report_err(local_err); - return; + goto unlock; } + bdrv_graph_rdunlock_main_loop(); blk_remove_bs(blk); + bdrv_graph_rdlock_main_loop(); } /* Make the BlockBackend and the attached BlockDriverState anonymous */ @@ -191,6 +193,9 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) } else { blk_unref(blk); } + +unlock: + bdrv_graph_rdunlock_main_loop(); } void hmp_commit(Monitor *mon, const QDict *qdict) @@ -402,7 +407,8 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict) goto exit; } - nbd_server_start(addr, NULL, NULL, 0, &local_err); + nbd_server_start(addr, NBD_DEFAULT_HANDSHAKE_MAX_SECS, NULL, NULL, + NBD_DEFAULT_MAX_CONNECTIONS, &local_err); qapi_free_SocketAddress(addr); if (local_err != NULL) { goto exit; @@ -629,11 +635,12 @@ static void print_block_info(Monitor *mon, BlockInfo *info, } if (inserted) { - monitor_printf(mon, ": %s (%s%s%s)\n", + monitor_printf(mon, ": %s (%s%s%s%s)\n", inserted->file, inserted->drv, inserted->ro ? ", read-only" : "", - inserted->encrypted ? ", encrypted" : ""); + inserted->encrypted ? ", encrypted" : "", + inserted->active ? "" : ", inactive"); } else { monitor_printf(mon, ": [not inserted]\n"); } diff --git a/block/nbd.c b/block/nbd.c index d464315766e..d5a2b21c6d1 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -36,7 +36,7 @@ #include "qemu/main-loop.h" #include "qapi/qapi-visit-sockets.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qapi/clone-visitor.h" #include "block/qdict.h" @@ -1397,8 +1397,8 @@ nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) } static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status( - BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, - int64_t *pnum, int64_t *map, BlockDriverState **file) + BlockDriverState *bs, unsigned int mode, int64_t offset, + int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { int ret, request_ret; NBDExtent64 extent = { 0 }; diff --git a/block/nfs.c b/block/nfs.c index 0500f60c08f..0a7d38db095 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -39,10 +39,10 @@ #include "qemu/module.h" #include "qemu/option.h" #include "qemu/cutils.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qapi/qapi-visit-block-core.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include diff --git a/block/null.c b/block/null.c index 4730acc1eb2..4e448d593d7 100644 --- a/block/null.c +++ b/block/null.c @@ -12,13 +12,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/module.h" #include "qemu/option.h" #include "block/block-io.h" #include "block/block_int.h" -#include "sysemu/replay.h" +#include "system/replay.h" #define NULL_OPT_LATENCY "latency-ns" #define NULL_OPT_ZEROES "read-zeroes" @@ -227,9 +227,9 @@ static int null_reopen_prepare(BDRVReopenState *reopen_state, } static int coroutine_fn null_co_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, - int64_t *map, + unsigned int mode, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, BlockDriverState **file) { BDRVNullState *s = bs->opaque; diff --git a/block/nvme.c b/block/nvme.c index 3b588b139f6..8df53ee4cac 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -14,10 +14,11 @@ #include "qemu/osdep.h" #include #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/defer-call.h" #include "qemu/error-report.h" +#include "qemu/host-pci-mmio.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/cutils.h" @@ -26,8 +27,8 @@ #include "qemu/vfio-helpers.h" #include "block/block-io.h" #include "block/block_int.h" -#include "sysemu/block-backend.h" -#include "sysemu/replay.h" +#include "system/block-backend.h" +#include "system/replay.h" #include "trace.h" #include "block/nvme.h" @@ -60,7 +61,7 @@ typedef struct { uint8_t *queue; uint64_t iova; /* Hardware MMIO register */ - volatile uint32_t *doorbell; + uint32_t *doorbell; } NVMeQueue; typedef struct { @@ -100,7 +101,7 @@ struct BDRVNVMeState { QEMUVFIOState *vfio; void *bar0_wo_map; /* Memory mapped registers */ - volatile struct { + struct { uint32_t sq_tail; uint32_t cq_head; } *doorbells; @@ -292,7 +293,7 @@ static void nvme_kick(NVMeQueuePair *q) assert(!(q->sq.tail & 0xFF00)); /* Fence the write to submission queue entry before notifying the device. */ smp_wmb(); - *q->sq.doorbell = cpu_to_le32(q->sq.tail); + host_pci_stl_le_p(q->sq.doorbell, q->sq.tail); q->inflight += q->need_kick; q->need_kick = 0; } @@ -441,7 +442,7 @@ static bool nvme_process_completion(NVMeQueuePair *q) if (progress) { /* Notify the device so it can post more completions. */ smp_mb_release(); - *q->cq.doorbell = cpu_to_le32(q->cq.head); + host_pci_stl_le_p(q->cq.doorbell, q->cq.head); nvme_wake_free_req_locked(q); } @@ -460,7 +461,7 @@ static void nvme_process_completion_bh(void *opaque) * so notify the device that it has space to fill in more completions now. */ smp_mb_release(); - *q->cq.doorbell = cpu_to_le32(q->cq.head); + host_pci_stl_le_p(q->cq.doorbell, q->cq.head); nvme_wake_free_req_locked(q); nvme_process_completion(q); @@ -749,9 +750,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, int ret; uint64_t cap; uint32_t ver; + uint32_t cc; uint64_t timeout_ms; uint64_t deadline, now; - volatile NvmeBar *regs = NULL; + NvmeBar *regs = NULL; qemu_co_mutex_init(&s->dma_map_lock); qemu_co_queue_init(&s->dma_flush_queue); @@ -779,7 +781,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, /* Perform initialize sequence as described in NVMe spec "7.6.1 * Initialization". */ - cap = le64_to_cpu(regs->cap); + cap = host_pci_ldq_le_p(®s->cap); trace_nvme_controller_capability_raw(cap); trace_nvme_controller_capability("Maximum Queue Entries Supported", 1 + NVME_CAP_MQES(cap)); @@ -805,16 +807,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, bs->bl.request_alignment = s->page_size; timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000); - ver = le32_to_cpu(regs->vs); + ver = host_pci_ldl_le_p(®s->vs); trace_nvme_controller_spec_version(extract32(ver, 16, 16), extract32(ver, 8, 8), extract32(ver, 0, 8)); /* Reset device to get a clean state. */ - regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE); + cc = host_pci_ldl_le_p(®s->cc); + host_pci_stl_le_p(®s->cc, cc & 0xFE); /* Wait for CSTS.RDY = 0. */ deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS; - while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { + while (NVME_CSTS_RDY(host_pci_ldl_le_p(®s->csts))) { if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { error_setg(errp, "Timeout while waiting for device to reset (%" PRId64 " ms)", @@ -843,19 +846,21 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, s->queues[INDEX_ADMIN] = q; s->queue_count = 1; QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000); - regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) | - ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT)); - regs->asq = cpu_to_le64(q->sq.iova); - regs->acq = cpu_to_le64(q->cq.iova); + host_pci_stl_le_p(®s->aqa, + ((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) | + ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT)); + host_pci_stq_le_p(®s->asq, q->sq.iova); + host_pci_stq_le_p(®s->acq, q->cq.iova); /* After setting up all control registers we can enable device now. */ - regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) | - (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) | - CC_EN_MASK); + host_pci_stl_le_p(®s->cc, + (ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) | + (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) | + CC_EN_MASK); /* Wait for CSTS.RDY = 1. */ now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); deadline = now + timeout_ms * SCALE_MS; - while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { + while (!NVME_CSTS_RDY(host_pci_ldl_le_p(®s->csts))) { if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { error_setg(errp, "Timeout while waiting for device to start (%" PRId64 " ms)", diff --git a/block/parallels-ext.c b/block/parallels-ext.c index b4e14c88f2b..778b8f684ef 100644 --- a/block/parallels-ext.c +++ b/block/parallels-ext.c @@ -206,7 +206,7 @@ parallels_parse_format_extension(BlockDriverState *bs, uint8_t *ext_cluster, goto fail; } - ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALG_MD5, (char *)pos, remaining, + ret = qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_MD5, (char *)pos, remaining, &hash, &hash_len, errp); if (ret < 0) { goto fail; diff --git a/block/parallels.c b/block/parallels.c index 9205a0864fa..3a375e2a8ab 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -33,10 +33,10 @@ #include "qapi/error.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/module.h" #include "qemu/option.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" #include "qemu/bswap.h" @@ -184,11 +184,11 @@ static int mark_used(BlockDriverState *bs, unsigned long *bitmap, BDRVParallelsState *s = bs->opaque; uint32_t cluster_index = host_cluster_index(s, off); unsigned long next_used; - if (cluster_index + count > bitmap_size) { + if ((uint64_t)cluster_index + count > bitmap_size) { return -E2BIG; } next_used = find_next_bit(bitmap, bitmap_size, cluster_index); - if (next_used < cluster_index + count) { + if (next_used < (uint64_t)cluster_index + count) { return -EBUSY; } bitmap_set(bitmap, cluster_index, count); @@ -416,9 +416,9 @@ parallels_co_flush_to_os(BlockDriverState *bs) } static int coroutine_fn GRAPH_RDLOCK -parallels_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, int64_t *map, - BlockDriverState **file) +parallels_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file) { BDRVParallelsState *s = bs->opaque; int count; @@ -1298,6 +1298,10 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, error_setg(errp, "Catalog too large"); return -EFBIG; } + if (le64_to_cpu(ph.ext_off) >= (INT64_MAX >> BDRV_SECTOR_BITS)) { + error_setg(errp, "Invalid image: Too big offset"); + return -EFBIG; + } size = bat_entry_off(s->bat_size); s->header_size = ROUND_UP(size, bdrv_opt_mem_align(bs->file->bs)); diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c deleted file mode 100644 index e4282631d23..00000000000 --- a/block/qapi-sysemu.c +++ /dev/null @@ -1,574 +0,0 @@ -/* - * QMP command handlers specific to the system emulators - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * later. See the COPYING file in the top-level directory. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" - -#include "block/block_int.h" -#include "qapi/error.h" -#include "qapi/qapi-commands-block.h" -#include "qapi/qmp/qdict.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" - -static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id, - Error **errp) -{ - BlockBackend *blk; - - if (!blk_name == !qdev_id) { - error_setg(errp, "Need exactly one of 'device' and 'id'"); - return NULL; - } - - if (qdev_id) { - blk = blk_by_qdev_id(qdev_id, errp); - } else { - blk = blk_by_name(blk_name); - if (blk == NULL) { - error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, - "Device '%s' not found", blk_name); - } - } - - return blk; -} - -/* - * Attempt to open the tray of @device. - * If @force, ignore its tray lock. - * Else, if the tray is locked, don't open it, but ask the guest to open it. - * On error, store an error through @errp and return -errno. - * If @device does not exist, return -ENODEV. - * If it has no removable media, return -ENOTSUP. - * If it has no tray, return -ENOSYS. - * If the guest was asked to open the tray, return -EINPROGRESS. - * Else, return 0. - */ -static int do_open_tray(const char *blk_name, const char *qdev_id, - bool force, Error **errp) -{ - BlockBackend *blk; - const char *device = qdev_id ?: blk_name; - bool locked; - - blk = qmp_get_blk(blk_name, qdev_id, errp); - if (!blk) { - return -ENODEV; - } - - if (!blk_dev_has_removable_media(blk)) { - error_setg(errp, "Device '%s' is not removable", device); - return -ENOTSUP; - } - - if (!blk_dev_has_tray(blk)) { - error_setg(errp, "Device '%s' does not have a tray", device); - return -ENOSYS; - } - - if (blk_dev_is_tray_open(blk)) { - return 0; - } - - locked = blk_dev_is_medium_locked(blk); - if (locked) { - blk_dev_eject_request(blk, force); - } - - if (!locked || force) { - blk_dev_change_media_cb(blk, false, &error_abort); - } - - if (locked && !force) { - error_setg(errp, "Device '%s' is locked and force was not specified, " - "wait for tray to open and try again", device); - return -EINPROGRESS; - } - - return 0; -} - -void qmp_blockdev_open_tray(const char *device, - const char *id, - bool has_force, bool force, - Error **errp) -{ - Error *local_err = NULL; - int rc; - - if (!has_force) { - force = false; - } - rc = do_open_tray(device, id, force, &local_err); - if (rc && rc != -ENOSYS && rc != -EINPROGRESS) { - error_propagate(errp, local_err); - return; - } - error_free(local_err); -} - -void qmp_blockdev_close_tray(const char *device, - const char *id, - Error **errp) -{ - BlockBackend *blk; - Error *local_err = NULL; - - blk = qmp_get_blk(device, id, errp); - if (!blk) { - return; - } - - if (!blk_dev_has_removable_media(blk)) { - error_setg(errp, "Device '%s' is not removable", device ?: id); - return; - } - - if (!blk_dev_has_tray(blk)) { - /* Ignore this command on tray-less devices */ - return; - } - - if (!blk_dev_is_tray_open(blk)) { - return; - } - - blk_dev_change_media_cb(blk, true, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } -} - -static void GRAPH_UNLOCKED -blockdev_remove_medium(const char *device, const char *id, Error **errp) -{ - BlockBackend *blk; - BlockDriverState *bs; - bool has_attached_device; - - GLOBAL_STATE_CODE(); - - blk = qmp_get_blk(device, id, errp); - if (!blk) { - return; - } - - /* For BBs without a device, we can exchange the BDS tree at will */ - has_attached_device = blk_get_attached_dev(blk); - - if (has_attached_device && !blk_dev_has_removable_media(blk)) { - error_setg(errp, "Device '%s' is not removable", device ?: id); - return; - } - - if (has_attached_device && blk_dev_has_tray(blk) && - !blk_dev_is_tray_open(blk)) - { - error_setg(errp, "Tray of device '%s' is not open", device ?: id); - return; - } - - bs = blk_bs(blk); - if (!bs) { - return; - } - - bdrv_graph_rdlock_main_loop(); - if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { - bdrv_graph_rdunlock_main_loop(); - return; - } - bdrv_graph_rdunlock_main_loop(); - - blk_remove_bs(blk); - - if (!blk_dev_has_tray(blk)) { - /* For tray-less devices, blockdev-open-tray is a no-op (or may not be - * called at all); therefore, the medium needs to be ejected here. - * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load - * value passed here (i.e. false). */ - blk_dev_change_media_cb(blk, false, &error_abort); - } -} - -void qmp_blockdev_remove_medium(const char *id, Error **errp) -{ - blockdev_remove_medium(NULL, id, errp); -} - -static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, - BlockDriverState *bs, Error **errp) -{ - Error *local_err = NULL; - bool has_device; - int ret; - - /* For BBs without a device, we can exchange the BDS tree at will */ - has_device = blk_get_attached_dev(blk); - - if (has_device && !blk_dev_has_removable_media(blk)) { - error_setg(errp, "Device is not removable"); - return; - } - - if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) { - error_setg(errp, "Tray of the device is not open"); - return; - } - - if (blk_bs(blk)) { - error_setg(errp, "There already is a medium in the device"); - return; - } - - ret = blk_insert_bs(blk, bs, errp); - if (ret < 0) { - return; - } - - if (!blk_dev_has_tray(blk)) { - /* For tray-less devices, blockdev-close-tray is a no-op (or may not be - * called at all); therefore, the medium needs to be pushed into the - * slot here. - * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load - * value passed here (i.e. true). */ - blk_dev_change_media_cb(blk, true, &local_err); - if (local_err) { - error_propagate(errp, local_err); - blk_remove_bs(blk); - return; - } - } -} - -static void blockdev_insert_medium(const char *device, const char *id, - const char *node_name, Error **errp) -{ - BlockBackend *blk; - BlockDriverState *bs; - - GRAPH_RDLOCK_GUARD_MAINLOOP(); - - blk = qmp_get_blk(device, id, errp); - if (!blk) { - return; - } - - bs = bdrv_find_node(node_name); - if (!bs) { - error_setg(errp, "Node '%s' not found", node_name); - return; - } - - if (bdrv_has_blk(bs)) { - error_setg(errp, "Node '%s' is already in use", node_name); - return; - } - - qmp_blockdev_insert_anon_medium(blk, bs, errp); -} - -void qmp_blockdev_insert_medium(const char *id, const char *node_name, - Error **errp) -{ - blockdev_insert_medium(NULL, id, node_name, errp); -} - -void qmp_blockdev_change_medium(const char *device, - const char *id, - const char *filename, - const char *format, - bool has_force, bool force, - bool has_read_only, - BlockdevChangeReadOnlyMode read_only, - Error **errp) -{ - BlockBackend *blk; - BlockDriverState *medium_bs = NULL; - int bdrv_flags; - bool detect_zeroes; - int rc; - QDict *options = NULL; - Error *err = NULL; - - blk = qmp_get_blk(device, id, errp); - if (!blk) { - goto fail; - } - - if (blk_bs(blk)) { - blk_update_root_state(blk); - } - - bdrv_flags = blk_get_open_flags_from_root_state(blk); - bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | - BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY); - - if (!has_read_only) { - read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN; - } - - switch (read_only) { - case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN: - break; - - case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY: - bdrv_flags &= ~BDRV_O_RDWR; - break; - - case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE: - bdrv_flags |= BDRV_O_RDWR; - break; - - default: - abort(); - } - - options = qdict_new(); - detect_zeroes = blk_get_detect_zeroes_from_root_state(blk); - qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off"); - - if (format) { - qdict_put_str(options, "driver", format); - } - - medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); - - if (!medium_bs) { - goto fail; - } - - rc = do_open_tray(device, id, force, &err); - if (rc && rc != -ENOSYS) { - error_propagate(errp, err); - goto fail; - } - error_free(err); - err = NULL; - - blockdev_remove_medium(device, id, &err); - if (err) { - error_propagate(errp, err); - goto fail; - } - - qmp_blockdev_insert_anon_medium(blk, medium_bs, &err); - if (err) { - error_propagate(errp, err); - goto fail; - } - - qmp_blockdev_close_tray(device, id, errp); - -fail: - /* If the medium has been inserted, the device has its own reference, so - * ours must be relinquished; and if it has not been inserted successfully, - * the reference must be relinquished anyway */ - bdrv_unref(medium_bs); -} - -void qmp_eject(const char *device, const char *id, - bool has_force, bool force, Error **errp) -{ - Error *local_err = NULL; - int rc; - - if (!has_force) { - force = false; - } - - rc = do_open_tray(device, id, force, &local_err); - if (rc && rc != -ENOSYS) { - error_propagate(errp, local_err); - return; - } - error_free(local_err); - - blockdev_remove_medium(device, id, errp); -} - -/* throttling disk I/O limits */ -void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) -{ - ThrottleConfig cfg; - BlockDriverState *bs; - BlockBackend *blk; - - blk = qmp_get_blk(arg->device, arg->id, errp); - if (!blk) { - return; - } - - bs = blk_bs(blk); - if (!bs) { - error_setg(errp, "Device has no medium"); - return; - } - - throttle_config_init(&cfg); - cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps; - cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd; - cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr; - - cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops; - cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd; - cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr; - - if (arg->has_bps_max) { - cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max; - } - if (arg->has_bps_rd_max) { - cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max; - } - if (arg->has_bps_wr_max) { - cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max; - } - if (arg->has_iops_max) { - cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max; - } - if (arg->has_iops_rd_max) { - cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max; - } - if (arg->has_iops_wr_max) { - cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max; - } - - if (arg->has_bps_max_length) { - cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length; - } - if (arg->has_bps_rd_max_length) { - cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length; - } - if (arg->has_bps_wr_max_length) { - cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length; - } - if (arg->has_iops_max_length) { - cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length; - } - if (arg->has_iops_rd_max_length) { - cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length; - } - if (arg->has_iops_wr_max_length) { - cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length; - } - - if (arg->has_iops_size) { - cfg.op_size = arg->iops_size; - } - - if (!throttle_is_valid(&cfg, errp)) { - return; - } - - if (throttle_enabled(&cfg)) { - /* Enable I/O limits if they're not enabled yet, otherwise - * just update the throttling group. */ - if (!blk_get_public(blk)->throttle_group_member.throttle_state) { - blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id); - } else if (arg->group) { - blk_io_limits_update_group(blk, arg->group); - } - /* Set the new throttling configuration */ - blk_set_io_limits(blk, &cfg); - } else if (blk_get_public(blk)->throttle_group_member.throttle_state) { - /* If all throttling settings are set to 0, disable I/O limits */ - blk_io_limits_disable(blk); - } -} - -void qmp_block_latency_histogram_set( - const char *id, - bool has_boundaries, uint64List *boundaries, - bool has_boundaries_read, uint64List *boundaries_read, - bool has_boundaries_write, uint64List *boundaries_write, - bool has_boundaries_append, uint64List *boundaries_append, - bool has_boundaries_flush, uint64List *boundaries_flush, - Error **errp) -{ - BlockBackend *blk = qmp_get_blk(NULL, id, errp); - BlockAcctStats *stats; - int ret; - - if (!blk) { - return; - } - - stats = blk_get_stats(blk); - - if (!has_boundaries && !has_boundaries_read && !has_boundaries_write && - !has_boundaries_flush) - { - block_latency_histograms_clear(stats); - return; - } - - if (has_boundaries || has_boundaries_read) { - ret = block_latency_histogram_set( - stats, BLOCK_ACCT_READ, - has_boundaries_read ? boundaries_read : boundaries); - if (ret) { - error_setg(errp, "Device '%s' set read boundaries fail", id); - return; - } - } - - if (has_boundaries || has_boundaries_write) { - ret = block_latency_histogram_set( - stats, BLOCK_ACCT_WRITE, - has_boundaries_write ? boundaries_write : boundaries); - if (ret) { - error_setg(errp, "Device '%s' set write boundaries fail", id); - return; - } - } - - if (has_boundaries || has_boundaries_append) { - ret = block_latency_histogram_set( - stats, BLOCK_ACCT_ZONE_APPEND, - has_boundaries_append ? boundaries_append : boundaries); - if (ret) { - error_setg(errp, "Device '%s' set append write boundaries fail", id); - return; - } - } - - if (has_boundaries || has_boundaries_flush) { - ret = block_latency_histogram_set( - stats, BLOCK_ACCT_FLUSH, - has_boundaries_flush ? boundaries_flush : boundaries); - if (ret) { - error_setg(errp, "Device '%s' set flush boundaries fail", id); - return; - } - } -} diff --git a/block/qapi-system.c b/block/qapi-system.c new file mode 100644 index 00000000000..54b7409b2b9 --- /dev/null +++ b/block/qapi-system.c @@ -0,0 +1,574 @@ +/* + * QMP command handlers specific to the system emulators + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "block/block_int.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-block.h" +#include "qobject/qdict.h" +#include "system/block-backend.h" +#include "system/blockdev.h" + +static BlockBackend *qmp_get_blk(const char *blk_name, const char *qdev_id, + Error **errp) +{ + BlockBackend *blk; + + if (!blk_name == !qdev_id) { + error_setg(errp, "Need exactly one of 'device' and 'id'"); + return NULL; + } + + if (qdev_id) { + blk = blk_by_qdev_id(qdev_id, errp); + } else { + blk = blk_by_name(blk_name); + if (blk == NULL) { + error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, + "Device '%s' not found", blk_name); + } + } + + return blk; +} + +/* + * Attempt to open the tray of @device. + * If @force, ignore its tray lock. + * Else, if the tray is locked, don't open it, but ask the guest to open it. + * On error, store an error through @errp and return -errno. + * If @device does not exist, return -ENODEV. + * If it has no removable media, return -ENOTSUP. + * If it has no tray, return -ENOSYS. + * If the guest was asked to open the tray, return -EINPROGRESS. + * Else, return 0. + */ +static int do_open_tray(const char *blk_name, const char *qdev_id, + bool force, Error **errp) +{ + BlockBackend *blk; + const char *device = qdev_id ?: blk_name; + bool locked; + + blk = qmp_get_blk(blk_name, qdev_id, errp); + if (!blk) { + return -ENODEV; + } + + if (!blk_dev_has_removable_media(blk)) { + error_setg(errp, "Device '%s' is not removable", device); + return -ENOTSUP; + } + + if (!blk_dev_has_tray(blk)) { + error_setg(errp, "Device '%s' does not have a tray", device); + return -ENOSYS; + } + + if (blk_dev_is_tray_open(blk)) { + return 0; + } + + locked = blk_dev_is_medium_locked(blk); + if (locked) { + blk_dev_eject_request(blk, force); + } + + if (!locked || force) { + blk_dev_change_media_cb(blk, false, &error_abort); + } + + if (locked && !force) { + error_setg(errp, "Device '%s' is locked and force was not specified, " + "wait for tray to open and try again", device); + return -EINPROGRESS; + } + + return 0; +} + +void qmp_blockdev_open_tray(const char *device, + const char *id, + bool has_force, bool force, + Error **errp) +{ + Error *local_err = NULL; + int rc; + + if (!has_force) { + force = false; + } + rc = do_open_tray(device, id, force, &local_err); + if (rc && rc != -ENOSYS && rc != -EINPROGRESS) { + error_propagate(errp, local_err); + return; + } + error_free(local_err); +} + +void qmp_blockdev_close_tray(const char *device, + const char *id, + Error **errp) +{ + BlockBackend *blk; + Error *local_err = NULL; + + blk = qmp_get_blk(device, id, errp); + if (!blk) { + return; + } + + if (!blk_dev_has_removable_media(blk)) { + error_setg(errp, "Device '%s' is not removable", device ?: id); + return; + } + + if (!blk_dev_has_tray(blk)) { + /* Ignore this command on tray-less devices */ + return; + } + + if (!blk_dev_is_tray_open(blk)) { + return; + } + + blk_dev_change_media_cb(blk, true, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static void GRAPH_UNLOCKED +blockdev_remove_medium(const char *device, const char *id, Error **errp) +{ + BlockBackend *blk; + BlockDriverState *bs; + bool has_attached_device; + + GLOBAL_STATE_CODE(); + + blk = qmp_get_blk(device, id, errp); + if (!blk) { + return; + } + + /* For BBs without a device, we can exchange the BDS tree at will */ + has_attached_device = blk_get_attached_dev(blk); + + if (has_attached_device && !blk_dev_has_removable_media(blk)) { + error_setg(errp, "Device '%s' is not removable", device ?: id); + return; + } + + if (has_attached_device && blk_dev_has_tray(blk) && + !blk_dev_is_tray_open(blk)) + { + error_setg(errp, "Tray of device '%s' is not open", device ?: id); + return; + } + + bs = blk_bs(blk); + if (!bs) { + return; + } + + bdrv_graph_rdlock_main_loop(); + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { + bdrv_graph_rdunlock_main_loop(); + return; + } + bdrv_graph_rdunlock_main_loop(); + + blk_remove_bs(blk); + + if (!blk_dev_has_tray(blk)) { + /* For tray-less devices, blockdev-open-tray is a no-op (or may not be + * called at all); therefore, the medium needs to be ejected here. + * Do it after blk_remove_bs() so blk_is_inserted(blk) returns the @load + * value passed here (i.e. false). */ + blk_dev_change_media_cb(blk, false, &error_abort); + } +} + +void qmp_blockdev_remove_medium(const char *id, Error **errp) +{ + blockdev_remove_medium(NULL, id, errp); +} + +static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, + BlockDriverState *bs, Error **errp) +{ + Error *local_err = NULL; + bool has_device; + int ret; + + /* For BBs without a device, we can exchange the BDS tree at will */ + has_device = blk_get_attached_dev(blk); + + if (has_device && !blk_dev_has_removable_media(blk)) { + error_setg(errp, "Device is not removable"); + return; + } + + if (has_device && blk_dev_has_tray(blk) && !blk_dev_is_tray_open(blk)) { + error_setg(errp, "Tray of the device is not open"); + return; + } + + if (blk_bs(blk)) { + error_setg(errp, "There already is a medium in the device"); + return; + } + + ret = blk_insert_bs(blk, bs, errp); + if (ret < 0) { + return; + } + + if (!blk_dev_has_tray(blk)) { + /* For tray-less devices, blockdev-close-tray is a no-op (or may not be + * called at all); therefore, the medium needs to be pushed into the + * slot here. + * Do it after blk_insert_bs() so blk_is_inserted(blk) returns the @load + * value passed here (i.e. true). */ + blk_dev_change_media_cb(blk, true, &local_err); + if (local_err) { + error_propagate(errp, local_err); + blk_remove_bs(blk); + return; + } + } +} + +static void blockdev_insert_medium(const char *device, const char *id, + const char *node_name, Error **errp) +{ + BlockBackend *blk; + BlockDriverState *bs; + + GRAPH_RDLOCK_GUARD_MAINLOOP(); + + blk = qmp_get_blk(device, id, errp); + if (!blk) { + return; + } + + bs = bdrv_find_node(node_name); + if (!bs) { + error_setg(errp, "Node '%s' not found", node_name); + return; + } + + if (bdrv_has_blk(bs)) { + error_setg(errp, "Node '%s' is already in use", node_name); + return; + } + + qmp_blockdev_insert_anon_medium(blk, bs, errp); +} + +void qmp_blockdev_insert_medium(const char *id, const char *node_name, + Error **errp) +{ + blockdev_insert_medium(NULL, id, node_name, errp); +} + +void qmp_blockdev_change_medium(const char *device, + const char *id, + const char *filename, + const char *format, + bool has_force, bool force, + bool has_read_only, + BlockdevChangeReadOnlyMode read_only, + Error **errp) +{ + BlockBackend *blk; + BlockDriverState *medium_bs = NULL; + int bdrv_flags; + bool detect_zeroes; + int rc; + QDict *options = NULL; + Error *err = NULL; + + blk = qmp_get_blk(device, id, errp); + if (!blk) { + goto fail; + } + + if (blk_bs(blk)) { + blk_update_root_state(blk); + } + + bdrv_flags = blk_get_open_flags_from_root_state(blk); + bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | + BDRV_O_PROTOCOL | BDRV_O_AUTO_RDONLY); + + if (!has_read_only) { + read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN; + } + + switch (read_only) { + case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN: + break; + + case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY: + bdrv_flags &= ~BDRV_O_RDWR; + break; + + case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE: + bdrv_flags |= BDRV_O_RDWR; + break; + + default: + abort(); + } + + options = qdict_new(); + detect_zeroes = blk_get_detect_zeroes_from_root_state(blk); + qdict_put_str(options, "detect-zeroes", detect_zeroes ? "on" : "off"); + + if (format) { + qdict_put_str(options, "driver", format); + } + + medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); + + if (!medium_bs) { + goto fail; + } + + rc = do_open_tray(device, id, force, &err); + if (rc && rc != -ENOSYS) { + error_propagate(errp, err); + goto fail; + } + error_free(err); + err = NULL; + + blockdev_remove_medium(device, id, &err); + if (err) { + error_propagate(errp, err); + goto fail; + } + + qmp_blockdev_insert_anon_medium(blk, medium_bs, &err); + if (err) { + error_propagate(errp, err); + goto fail; + } + + qmp_blockdev_close_tray(device, id, errp); + +fail: + /* If the medium has been inserted, the device has its own reference, so + * ours must be relinquished; and if it has not been inserted successfully, + * the reference must be relinquished anyway */ + bdrv_unref(medium_bs); +} + +void qmp_eject(const char *device, const char *id, + bool has_force, bool force, Error **errp) +{ + Error *local_err = NULL; + int rc; + + if (!has_force) { + force = false; + } + + rc = do_open_tray(device, id, force, &local_err); + if (rc && rc != -ENOSYS) { + error_propagate(errp, local_err); + return; + } + error_free(local_err); + + blockdev_remove_medium(device, id, errp); +} + +/* throttling disk I/O limits */ +void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) +{ + ThrottleConfig cfg; + BlockDriverState *bs; + BlockBackend *blk; + + blk = qmp_get_blk(arg->device, arg->id, errp); + if (!blk) { + return; + } + + bs = blk_bs(blk); + if (!bs) { + error_setg(errp, "Device has no medium"); + return; + } + + throttle_config_init(&cfg); + cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps; + cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd; + cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr; + + cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops; + cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd; + cfg.buckets[THROTTLE_OPS_WRITE].avg = arg->iops_wr; + + if (arg->has_bps_max) { + cfg.buckets[THROTTLE_BPS_TOTAL].max = arg->bps_max; + } + if (arg->has_bps_rd_max) { + cfg.buckets[THROTTLE_BPS_READ].max = arg->bps_rd_max; + } + if (arg->has_bps_wr_max) { + cfg.buckets[THROTTLE_BPS_WRITE].max = arg->bps_wr_max; + } + if (arg->has_iops_max) { + cfg.buckets[THROTTLE_OPS_TOTAL].max = arg->iops_max; + } + if (arg->has_iops_rd_max) { + cfg.buckets[THROTTLE_OPS_READ].max = arg->iops_rd_max; + } + if (arg->has_iops_wr_max) { + cfg.buckets[THROTTLE_OPS_WRITE].max = arg->iops_wr_max; + } + + if (arg->has_bps_max_length) { + cfg.buckets[THROTTLE_BPS_TOTAL].burst_length = arg->bps_max_length; + } + if (arg->has_bps_rd_max_length) { + cfg.buckets[THROTTLE_BPS_READ].burst_length = arg->bps_rd_max_length; + } + if (arg->has_bps_wr_max_length) { + cfg.buckets[THROTTLE_BPS_WRITE].burst_length = arg->bps_wr_max_length; + } + if (arg->has_iops_max_length) { + cfg.buckets[THROTTLE_OPS_TOTAL].burst_length = arg->iops_max_length; + } + if (arg->has_iops_rd_max_length) { + cfg.buckets[THROTTLE_OPS_READ].burst_length = arg->iops_rd_max_length; + } + if (arg->has_iops_wr_max_length) { + cfg.buckets[THROTTLE_OPS_WRITE].burst_length = arg->iops_wr_max_length; + } + + if (arg->has_iops_size) { + cfg.op_size = arg->iops_size; + } + + if (!throttle_is_valid(&cfg, errp)) { + return; + } + + if (throttle_enabled(&cfg)) { + /* Enable I/O limits if they're not enabled yet, otherwise + * just update the throttling group. */ + if (!blk_get_public(blk)->throttle_group_member.throttle_state) { + blk_io_limits_enable(blk, arg->group ?: arg->device ?: arg->id); + } else if (arg->group) { + blk_io_limits_update_group(blk, arg->group); + } + /* Set the new throttling configuration */ + blk_set_io_limits(blk, &cfg); + } else if (blk_get_public(blk)->throttle_group_member.throttle_state) { + /* If all throttling settings are set to 0, disable I/O limits */ + blk_io_limits_disable(blk); + } +} + +void qmp_block_latency_histogram_set( + const char *id, + bool has_boundaries, uint64List *boundaries, + bool has_boundaries_read, uint64List *boundaries_read, + bool has_boundaries_write, uint64List *boundaries_write, + bool has_boundaries_append, uint64List *boundaries_append, + bool has_boundaries_flush, uint64List *boundaries_flush, + Error **errp) +{ + BlockBackend *blk = qmp_get_blk(NULL, id, errp); + BlockAcctStats *stats; + int ret; + + if (!blk) { + return; + } + + stats = blk_get_stats(blk); + + if (!has_boundaries && !has_boundaries_read && !has_boundaries_write && + !has_boundaries_flush) + { + block_latency_histograms_clear(stats); + return; + } + + if (has_boundaries || has_boundaries_read) { + ret = block_latency_histogram_set( + stats, BLOCK_ACCT_READ, + has_boundaries_read ? boundaries_read : boundaries); + if (ret) { + error_setg(errp, "Device '%s' set read boundaries fail", id); + return; + } + } + + if (has_boundaries || has_boundaries_write) { + ret = block_latency_histogram_set( + stats, BLOCK_ACCT_WRITE, + has_boundaries_write ? boundaries_write : boundaries); + if (ret) { + error_setg(errp, "Device '%s' set write boundaries fail", id); + return; + } + } + + if (has_boundaries || has_boundaries_append) { + ret = block_latency_histogram_set( + stats, BLOCK_ACCT_ZONE_APPEND, + has_boundaries_append ? boundaries_append : boundaries); + if (ret) { + error_setg(errp, "Device '%s' set append write boundaries fail", id); + return; + } + } + + if (has_boundaries || has_boundaries_flush) { + ret = block_latency_histogram_set( + stats, BLOCK_ACCT_FLUSH, + has_boundaries_flush ? boundaries_flush : boundaries); + if (ret) { + error_setg(errp, "Device '%s' set flush boundaries fail", id); + return; + } + } +} diff --git a/block/qapi.c b/block/qapi.c index 2b5793f1d9b..12fbf8d1b7f 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -33,13 +33,13 @@ #include "qapi/qapi-commands-block-core.h" #include "qapi/qobject-output-visitor.h" #include "qapi/qapi-visit-block-core.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/qemu-print.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, BlockDriverState *bs, @@ -51,6 +51,8 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, ImageInfo *backing_info; BlockDriverState *backing; BlockDeviceInfo *info; + BlockdevChildList **children_list_tail; + BdrvChild *child; if (!bs->drv) { error_setg(errp, "Block device %s is ejected", bs->node_name); @@ -63,6 +65,7 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, info->file = g_strdup(bs->filename); info->ro = bdrv_is_read_only(bs); info->drv = g_strdup(bs->drv->format_name); + info->active = !bdrv_is_inactive(bs); info->encrypted = bs->encrypted; info->cache = g_new(BlockdevCacheInfo, 1); @@ -72,8 +75,14 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, .no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH), }; - if (bs->node_name[0]) { - info->node_name = g_strdup(bs->node_name); + info->node_name = g_strdup(bs->node_name); + + children_list_tail = &info->children; + QLIST_FOREACH(child, &bs->children, next) { + BlockdevChild *child_ref = g_new0(BlockdevChild, 1); + child_ref->child = g_strdup(child->name); + child_ref->node_name = g_strdup(child->bs->node_name); + QAPI_LIST_APPEND(children_list_tail, child_ref); } backing = bdrv_cow_bs(bs); diff --git a/block/qcow.c b/block/qcow.c index c2f89db0553..8a3e7591a92 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -27,15 +27,15 @@ #include "qemu/error-report.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/bswap.h" #include "qemu/cutils.h" #include "qemu/memalign.h" #include -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" #include "crypto/block.h" @@ -530,7 +530,7 @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, } static int coroutine_fn GRAPH_RDLOCK -qcow_co_block_status(BlockDriverState *bs, bool want_zero, +qcow_co_block_status(BlockDriverState *bs, unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -831,7 +831,7 @@ qcow_co_create(BlockdevCreateOptions *opts, Error **errp) } if (qcow_opts->encrypt && - qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW) + qcow_opts->encrypt->format != QCRYPTO_BLOCK_FORMAT_QCOW) { error_setg(errp, "Unsupported encryption format"); return -EINVAL; diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c index 92e47978bf9..1e8dc48be1c 100644 --- a/block/qcow2-snapshot.c +++ b/block/qcow2-snapshot.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qcow2.h" #include "qemu/bswap.h" diff --git a/block/qcow2.c b/block/qcow2.c index 70b19730a39..4aa9f9e068e 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -25,15 +25,15 @@ #include "qemu/osdep.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qcow2.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-events-block-core.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "trace.h" #include "qemu/option_int.h" #include "qemu/cutils.h" @@ -1721,7 +1721,7 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, ret = -EINVAL; goto fail; } - } else if (!(flags & BDRV_O_NO_IO)) { + } else { error_setg(errp, "Missing CRYPTO header for crypt method %d", s->crypt_method_header); ret = -EINVAL; @@ -1895,7 +1895,9 @@ qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, g_free(s->image_data_file); if (open_data_file && has_data_file(bs)) { bdrv_graph_co_rdunlock(); + bdrv_drain_all_begin(); bdrv_co_unref_child(bs, s->data_file); + bdrv_drain_all_end(); bdrv_graph_co_rdlock(); s->data_file = NULL; } @@ -1976,7 +1978,7 @@ static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) { BDRVQcow2State *s = bs->opaque; - if (bs->encrypted) { + if (s->crypto) { /* Encryption works on a sector granularity */ bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); } @@ -2141,9 +2143,9 @@ static void qcow2_join_options(QDict *options, QDict *old_options) } static int coroutine_fn GRAPH_RDLOCK -qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, - int64_t count, int64_t *pnum, int64_t *map, - BlockDriverState **file) +qcow2_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t count, int64_t *pnum, + int64_t *map, BlockDriverState **file) { BDRVQcow2State *s = bs->opaque; uint64_t host_offset; @@ -2821,7 +2823,7 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file) if (close_data_file && has_data_file(bs)) { GLOBAL_STATE_CODE(); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, s->data_file); bdrv_graph_wrunlock(); s->data_file = NULL; @@ -3214,10 +3216,10 @@ qcow2_set_up_encryption(BlockDriverState *bs, int fmt, ret; switch (cryptoopts->format) { - case Q_CRYPTO_BLOCK_FORMAT_LUKS: + case QCRYPTO_BLOCK_FORMAT_LUKS: fmt = QCOW_CRYPT_LUKS; break; - case Q_CRYPTO_BLOCK_FORMAT_QCOW: + case QCRYPTO_BLOCK_FORMAT_QCOW: fmt = QCOW_CRYPT_AES; break; default: @@ -5299,17 +5301,17 @@ qcow2_get_specific_info(BlockDriverState *bs, Error **errp) } else { /* if this assertion fails, this probably means a new version was * added without having it covered here */ - assert(false); + g_assert_not_reached(); } if (encrypt_info) { ImageInfoSpecificQCow2Encryption *qencrypt = g_new(ImageInfoSpecificQCow2Encryption, 1); switch (encrypt_info->format) { - case Q_CRYPTO_BLOCK_FORMAT_QCOW: + case QCRYPTO_BLOCK_FORMAT_QCOW: qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; break; - case Q_CRYPTO_BLOCK_FORMAT_LUKS: + case QCRYPTO_BLOCK_FORMAT_LUKS: qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; qencrypt->u.luks = encrypt_info->u.luks; break; @@ -5948,7 +5950,7 @@ static int coroutine_fn qcow2_co_amend(BlockDriverState *bs, return -EOPNOTSUPP; } - if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) { + if (qopts->encrypt->format != QCRYPTO_BLOCK_FORMAT_LUKS) { error_setg(errp, "Amend can't be used to change the qcow2 encryption format"); return -EOPNOTSUPP; diff --git a/block/qed.c b/block/qed.c index fa5bc110855..4a36fb39294 100644 --- a/block/qed.c +++ b/block/qed.c @@ -23,8 +23,8 @@ #include "qemu/memalign.h" #include "trace.h" #include "qed.h" -#include "sysemu/block-backend.h" -#include "qapi/qmp/qdict.h" +#include "system/block-backend.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" @@ -353,6 +353,7 @@ static void bdrv_qed_detach_aio_context(BlockDriverState *bs) qed_cancel_need_check_timer(s); timer_free(s->need_check_timer); + s->need_check_timer = NULL; } static void bdrv_qed_attach_aio_context(BlockDriverState *bs, @@ -832,9 +833,9 @@ bdrv_qed_co_create_opts(BlockDriver *drv, const char *filename, } static int coroutine_fn GRAPH_RDLOCK -bdrv_qed_co_block_status(BlockDriverState *bs, bool want_zero, int64_t pos, - int64_t bytes, int64_t *pnum, int64_t *map, - BlockDriverState **file) +bdrv_qed_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t pos, int64_t bytes, int64_t *pnum, + int64_t *map, BlockDriverState **file) { BDRVQEDState *s = bs->opaque; size_t len = MIN(bytes, SIZE_MAX); diff --git a/block/quorum.c b/block/quorum.c index db8fe891c4b..76a4feb2d9f 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -23,10 +23,10 @@ #include "block/qdict.h" #include "qapi/error.h" #include "qapi/qapi-events-block.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "crypto/hash.h" #define HASH_LENGTH 32 @@ -393,7 +393,7 @@ static int quorum_compute_hash(QuorumAIOCB *acb, int i, QuorumVoteValue *hash) /* XXX - would be nice if we could pass in the Error ** * and propagate that back, but this quorum code is * restricted to just errno values currently */ - if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, + if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, qiov->iov, qiov->niov, &data, &len, NULL) < 0) { @@ -1037,7 +1037,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, close_exit: /* cleanup on error */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); for (i = 0; i < s->num_children; i++) { if (!opened[i]) { continue; @@ -1057,7 +1057,7 @@ static void quorum_close(BlockDriverState *bs) BDRVQuorumState *s = bs->opaque; int i; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); for (i = 0; i < s->num_children; i++) { bdrv_unref_child(bs, s->children[i]); } @@ -1226,7 +1226,7 @@ static void quorum_child_perm(BlockDriverState *bs, BdrvChild *c, * region contains zeroes, and BDRV_BLOCK_DATA otherwise. */ static int coroutine_fn GRAPH_RDLOCK -quorum_co_block_status(BlockDriverState *bs, bool want_zero, +quorum_co_block_status(BlockDriverState *bs, unsigned int mode, int64_t offset, int64_t count, int64_t *pnum, int64_t *map, BlockDriverState **file) { @@ -1238,7 +1238,7 @@ quorum_co_block_status(BlockDriverState *bs, bool want_zero, for (i = 0; i < s->num_children; i++) { int64_t bytes; ret = bdrv_co_common_block_status_above(s->children[i]->bs, NULL, false, - want_zero, offset, count, + mode, offset, count, &bytes, NULL, NULL, NULL); if (ret < 0) { quorum_report_bad(QUORUM_OP_TYPE_READ, offset, count, @@ -1308,7 +1308,7 @@ static BlockDriver bdrv_quorum = { static void bdrv_quorum_init(void) { - if (!qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA256)) { + if (!qcrypto_hash_supports(QCRYPTO_HASH_ALGO_SHA256)) { /* SHA256 hash support is required for quorum device */ return; } diff --git a/block/raw-format.c b/block/raw-format.c index ac7e8495f66..df16ac1ea25 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -111,7 +111,7 @@ raw_apply_options(BlockDriverState *bs, BDRVRawState *s, uint64_t offset, if (offset > real_size) { error_setg(errp, "Offset (%" PRIu64 ") cannot be greater than " "size of the containing file (%" PRId64 ")", - s->offset, real_size); + offset, real_size); return -EINVAL; } @@ -119,7 +119,7 @@ raw_apply_options(BlockDriverState *bs, BDRVRawState *s, uint64_t offset, error_setg(errp, "The sum of offset (%" PRIu64 ") and size " "(%" PRIu64 ") has to be smaller or equal to the " " actual size of the containing file (%" PRId64 ")", - s->offset, s->size, real_size); + offset, size, real_size); return -EINVAL; } @@ -283,8 +283,8 @@ raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, } static int coroutine_fn GRAPH_RDLOCK -raw_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, int64_t *map, +raw_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { BDRVRawState *s = bs->opaque; diff --git a/block/rbd.c b/block/rbd.c index 9c0fd0cb3f7..3611dc81cf1 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -23,11 +23,11 @@ #include "block/qdict.h" #include "crypto/secret.h" #include "qemu/cutils.h" -#include "sysemu/replay.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qlist.h" +#include "system/replay.h" +#include "qobject/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qlist.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" @@ -99,6 +99,14 @@ typedef struct BDRVRBDState { char *namespace; uint64_t image_size; uint64_t object_size; + + /* + * If @bs->encrypted is true, this is the encryption format actually loaded + * at the librbd level. If it is false, it is the result of probing. + * RBD_IMAGE_ENCRYPTION_FORMAT__MAX means that encryption is not enabled and + * probing didn't find any known encryption header either. + */ + RbdImageEncryptionFormat encryption_format; } BDRVRBDState; typedef struct RBDTask { @@ -254,7 +262,6 @@ static void qemu_rbd_parse_filename(const char *filename, QDict *options, done: g_free(buf); qobject_unref(keypairs); - return; } static int qemu_rbd_set_auth(rados_t cluster, BlockdevOptionsRbd *opts, @@ -367,11 +374,11 @@ static int qemu_rbd_convert_luks_create_options( if (luks_opts->has_cipher_alg) { switch (luks_opts->cipher_alg) { - case QCRYPTO_CIPHER_ALG_AES_128: { + case QCRYPTO_CIPHER_ALGO_AES_128: { *alg = RBD_ENCRYPTION_ALGORITHM_AES128; break; } - case QCRYPTO_CIPHER_ALG_AES_256: { + case QCRYPTO_CIPHER_ALGO_AES_256: { *alg = RBD_ENCRYPTION_ALGORITHM_AES256; break; } @@ -471,10 +478,12 @@ static int qemu_rbd_encryption_format(rbd_image_t image, return 0; } -static int qemu_rbd_encryption_load(rbd_image_t image, +static int qemu_rbd_encryption_load(BlockDriverState *bs, + rbd_image_t image, RbdEncryptionOptions *encrypt, Error **errp) { + BDRVRBDState *s = bs->opaque; int r = 0; g_autofree char *passphrase = NULL; rbd_encryption_luks1_format_options_t luks_opts; @@ -545,15 +554,19 @@ static int qemu_rbd_encryption_load(rbd_image_t image, error_setg_errno(errp, -r, "encryption load fail"); return r; } + bs->encrypted = true; + s->encryption_format = encrypt->format; return 0; } #ifdef LIBRBD_SUPPORTS_ENCRYPTION_LOAD2 -static int qemu_rbd_encryption_load2(rbd_image_t image, +static int qemu_rbd_encryption_load2(BlockDriverState *bs, + rbd_image_t image, RbdEncryptionOptions *encrypt, Error **errp) { + BDRVRBDState *s = bs->opaque; int r = 0; int encrypt_count = 1; int i; @@ -639,6 +652,8 @@ static int qemu_rbd_encryption_load2(rbd_image_t image, error_setg_errno(errp, -r, "layered encryption load fail"); goto exit; } + bs->encrypted = true; + s->encryption_format = encrypt->format; exit: for (i = 0; i < encrypt_count; ++i) { @@ -672,6 +687,45 @@ static int qemu_rbd_encryption_load2(rbd_image_t image, #endif #endif +/* + * For an image without encryption enabled on the rbd layer, probe the start of + * the image if it could be opened as an encrypted image so that we can display + * it when the user queries the node (most importantly in qemu-img). + * + * If the guest writes an encryption header to its disk after this probing, this + * won't be reflected when queried, but that's okay. There is no reason why the + * user should want to apply encryption at the rbd level while the image is + * still in use. This is just guest data. + */ +static void qemu_rbd_encryption_probe(BlockDriverState *bs) +{ + BDRVRBDState *s = bs->opaque; + char buf[RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN] = {0}; + int r; + + assert(s->encryption_format == RBD_IMAGE_ENCRYPTION_FORMAT__MAX); + + r = rbd_read(s->image, 0, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN, buf); + if (r < RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) { + return; + } + + if (memcmp(buf, rbd_luks_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; + } else if (memcmp(buf, rbd_luks2_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; + } else if (memcmp(buf, rbd_layered_luks_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; + } else if (memcmp(buf, rbd_layered_luks2_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; + } +} + /* FIXME Deprecate and remove keypairs or make it available in QMP. */ static int qemu_rbd_do_create(BlockdevCreateOptions *options, const char *keypairs, const char *password_secret, @@ -1134,17 +1188,18 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, goto failed_open; } + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT__MAX; if (opts->encrypt) { #ifdef LIBRBD_SUPPORTS_ENCRYPTION if (opts->encrypt->parent) { #ifdef LIBRBD_SUPPORTS_ENCRYPTION_LOAD2 - r = qemu_rbd_encryption_load2(s->image, opts->encrypt, errp); + r = qemu_rbd_encryption_load2(bs, s->image, opts->encrypt, errp); #else r = -ENOTSUP; error_setg(errp, "RBD library does not support layered encryption"); #endif } else { - r = qemu_rbd_encryption_load(s->image, opts->encrypt, errp); + r = qemu_rbd_encryption_load(bs, s->image, opts->encrypt, errp); } if (r < 0) { goto failed_post_open; @@ -1154,6 +1209,8 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, error_setg(errp, "RBD library does not support image encryption"); goto failed_post_open; #endif + } else { + qemu_rbd_encryption_probe(bs); } r = rbd_stat(s->image, &info, sizeof(info)); @@ -1413,17 +1470,6 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, { BDRVRBDState *s = bs->opaque; ImageInfoSpecific *spec_info; - char buf[RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN] = {0}; - int r; - - if (s->image_size >= RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) { - r = rbd_read(s->image, 0, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN, buf); - if (r < 0) { - error_setg_errno(errp, -r, "cannot read image start for probe"); - return NULL; - } - } spec_info = g_new(ImageInfoSpecific, 1); *spec_info = (ImageInfoSpecific){ @@ -1431,28 +1477,13 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, .u.rbd.data = g_new0(ImageInfoSpecificRbd, 1), }; - if (memcmp(buf, rbd_luks_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_luks2_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_layered_luks_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_layered_luks2_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; - spec_info->u.rbd.data->has_encryption_format = true; + if (s->encryption_format == RBD_IMAGE_ENCRYPTION_FORMAT__MAX) { + assert(!bs->encrypted); } else { - spec_info->u.rbd.data->has_encryption_format = false; + ImageInfoSpecificRbd *rbd_info = spec_info->u.rbd.data; + + rbd_info->has_encryption_format = true; + rbd_info->encryption_format = s->encryption_format; } return spec_info; @@ -1504,9 +1535,9 @@ static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len, } static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, - int64_t *map, + unsigned int mode, + int64_t offset, int64_t bytes, + int64_t *pnum, int64_t *map, BlockDriverState **file) { BDRVRBDState *s = bs->opaque; diff --git a/block/replication.c b/block/replication.c index 0415a5e8b78..3a431e908ca 100644 --- a/block/replication.c +++ b/block/replication.c @@ -19,9 +19,9 @@ #include "block/blockjob.h" #include "block/block_int.h" #include "block/block_backup.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "block/replication.h" typedef enum { @@ -176,7 +176,6 @@ static void replication_child_perm(BlockDriverState *bs, BdrvChild *c, *nshared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED; - return; } static int64_t coroutine_fn GRAPH_RDLOCK @@ -365,14 +364,15 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable, BlockReopenQueue *reopen_queue = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); /* * s->hidden_disk and s->secondary_disk may not be set yet, as they will * only be set after the children are writable. */ hidden_disk = bs->file->bs->backing; secondary_disk = hidden_disk->bs->backing; + bdrv_graph_rdunlock_main_loop(); if (writable) { s->orig_hidden_read_only = bdrv_is_read_only(hidden_disk->bs); @@ -541,7 +541,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, return; } - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_ref(hidden_disk->bs); s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk", @@ -576,7 +576,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, return; } bdrv_op_block_all(top_bs, s->blocker); - bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker); bdrv_graph_wrunlock(); @@ -585,7 +584,9 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, 0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, false, NULL, &perf, BLOCKDEV_ON_ERROR_REPORT, - BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL, + BLOCKDEV_ON_ERROR_REPORT, + ON_CBW_ERROR_BREAK_GUEST_WRITE, + JOB_INTERNAL, backup_job_completed, bs, NULL, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -651,7 +652,7 @@ static void replication_done(void *opaque, int ret) if (ret == 0) { s->stage = BLOCK_REPLICATION_DONE; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, s->secondary_disk); s->secondary_disk = NULL; bdrv_unref_child(bs, s->hidden_disk); diff --git a/block/reqlist.c b/block/reqlist.c index 08cb57cfa45..098e807378b 100644 --- a/block/reqlist.c +++ b/block/reqlist.c @@ -20,8 +20,6 @@ void reqlist_init_req(BlockReqList *reqs, BlockReq *req, int64_t offset, int64_t bytes) { - assert(!reqlist_find_conflict(reqs, offset, bytes)); - *req = (BlockReq) { .offset = offset, .bytes = bytes, diff --git a/block/snapshot-access.c b/block/snapshot-access.c index 84d0d13f867..17ed2402db8 100644 --- a/block/snapshot-access.c +++ b/block/snapshot-access.c @@ -22,7 +22,7 @@ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/cutils.h" #include "block/block_int.h" @@ -41,11 +41,11 @@ snapshot_access_co_preadv_part(BlockDriverState *bs, static int coroutine_fn GRAPH_RDLOCK snapshot_access_co_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, + unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { - return bdrv_co_snapshot_block_status(bs->file->bs, want_zero, offset, + return bdrv_co_snapshot_block_status(bs->file->bs, mode, offset, bytes, pnum, map, file); } diff --git a/block/snapshot.c b/block/snapshot.c index 8fd17567773..bd9d759b32c 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -27,10 +27,10 @@ #include "block/block_int.h" #include "block/qdict.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/option.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" QemuOptsList internal_snapshot_opts = { .name = "snapshot", @@ -291,11 +291,12 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* .bdrv_open() will re-attach it */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, fallback); bdrv_graph_wrunlock(); ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); + memset(bs->opaque, 0, drv->instance_size); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); qobject_unref(options); if (open_ret < 0) { @@ -326,7 +327,7 @@ int bdrv_snapshot_goto(BlockDriverState *bs, /** * Delete an internal snapshot by @snapshot_id and @name. - * @bs: block device used in the operation + * @bs: block device used in the operation, must be drained * @snapshot_id: unique snapshot ID, or NULL * @name: snapshot name, or NULL * @errp: location to store error @@ -357,6 +358,8 @@ int bdrv_snapshot_delete(BlockDriverState *bs, GLOBAL_STATE_CODE(); + assert(bs->quiesce_counter > 0); + if (!drv) { error_setg(errp, "Device '%s' has no medium", bdrv_get_device_name(bs)); @@ -367,9 +370,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs, return -EINVAL; } - /* drain all pending i/o before deleting snapshot */ - bdrv_drained_begin(bs); - if (drv->bdrv_snapshot_delete) { ret = drv->bdrv_snapshot_delete(bs, snapshot_id, name, errp); } else if (fallback_bs) { @@ -381,7 +381,6 @@ int bdrv_snapshot_delete(BlockDriverState *bs, ret = -ENOTSUP; } - bdrv_drained_end(bs); return ret; } @@ -570,19 +569,22 @@ int bdrv_all_delete_snapshot(const char *name, ERRP_GUARD(); g_autoptr(GList) bdrvs = NULL; GList *iterbdrvs; + int ret = 0; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); - if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) { - return -1; + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + + ret = bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp); + if (ret < 0) { + goto out; } iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; QEMUSnapshotInfo sn1, *snapshot = &sn1; - int ret = 0; if ((devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_snapshot_find(bs, snapshot, name) >= 0) @@ -593,13 +595,16 @@ int bdrv_all_delete_snapshot(const char *name, if (ret < 0) { error_prepend(errp, "Could not delete snapshot '%s' on '%s': ", name, bdrv_get_device_or_node_name(bs)); - return -1; + goto out; } iterbdrvs = iterbdrvs->next; } - return 0; +out: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); + return ret; } diff --git a/block/ssh.c b/block/ssh.c index 27d582e0e3d..70fe7cf86e5 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -39,8 +39,8 @@ #include "qemu/sockets.h" #include "qapi/qapi-visit-sockets.h" #include "qapi/qapi-visit-block-core.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "trace.h" @@ -364,7 +364,7 @@ static unsigned hex2decimal(char ch) return 10 + (ch - 'A'); } - return -1; + return UINT_MAX; } /* Compare the binary fingerprint (hash of host key) with the @@ -376,13 +376,15 @@ static int compare_fingerprint(const unsigned char *fingerprint, size_t len, unsigned c; while (len > 0) { + unsigned c0, c1; while (*host_key_check == ':') host_key_check++; - if (!qemu_isxdigit(host_key_check[0]) || - !qemu_isxdigit(host_key_check[1])) + c0 = hex2decimal(host_key_check[0]); + c1 = hex2decimal(host_key_check[1]); + if (c0 > 0xf || c1 > 0xf) { return 1; - c = hex2decimal(host_key_check[0]) * 16 + - hex2decimal(host_key_check[1]); + } + c = c0 * 16 + c1; if (c - *fingerprint != 0) return c - *fingerprint; fingerprint++; @@ -474,7 +476,6 @@ static int check_host_key(BDRVSSHState *s, SshHostKeyCheck *hkc, Error **errp) errp); } g_assert_not_reached(); - break; case SSH_HOST_KEY_CHECK_MODE_KNOWN_HOSTS: return check_host_key_knownhosts(s, errp); default: @@ -865,9 +866,6 @@ static int ssh_open(BlockDriverState *bs, QDict *options, int bdrv_flags, goto err; } - /* Go non-blocking. */ - ssh_set_blocking(s->session, 0); - if (s->attrs->type == SSH_FILEXFER_TYPE_REGULAR) { bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; } diff --git a/block/stream.c b/block/stream.c index 7031eef12b6..c0616b69e25 100644 --- a/block/stream.c +++ b/block/stream.c @@ -16,9 +16,9 @@ #include "block/block_int.h" #include "block/blockjob_int.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/ratelimit.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/copy-on-read.h" enum { @@ -51,7 +51,7 @@ static int coroutine_fn stream_populate(BlockBackend *blk, return blk_co_preadv(blk, offset, bytes, NULL, BDRV_REQ_PREFETCH); } -static int stream_prepare(Job *job) +static int GRAPH_UNLOCKED stream_prepare(Job *job) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); BlockDriverState *unfiltered_bs; @@ -73,18 +73,16 @@ static int stream_prepare(Job *job) s->cor_filter_bs = NULL; /* - * bdrv_set_backing_hd() requires that the unfiltered_bs and the COW child - * of unfiltered_bs is drained. Drain already here and use - * bdrv_set_backing_hd_drained() instead because the polling during - * drained_begin() might change the graph, and if we do this only later, we - * may end up working with the wrong base node (or it might even have gone - * away by the time we want to use it). + * bdrv_set_backing_hd() requires that all block nodes are drained. Drain + * already here, because the polling during drained_begin() might change the + * graph, and if we do this only later, we may end up working with the wrong + * base node (or it might even have gone away by the time we want to use + * it). */ - bdrv_drained_begin(unfiltered_bs); if (unfiltered_bs_cow) { bdrv_ref(unfiltered_bs_cow); - bdrv_drained_begin(unfiltered_bs_cow); } + bdrv_drain_all_begin(); bdrv_graph_rdlock_main_loop(); base = bdrv_filter_or_cow_bs(s->above_base); @@ -106,7 +104,7 @@ static int stream_prepare(Job *job) } bdrv_graph_wrlock(); - bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err); + bdrv_set_backing_hd(unfiltered_bs, base, &local_err); bdrv_graph_wrunlock(); /* @@ -123,11 +121,10 @@ static int stream_prepare(Job *job) } out: + bdrv_drain_all_end(); if (unfiltered_bs_cow) { - bdrv_drained_end(unfiltered_bs_cow); bdrv_unref(unfiltered_bs_cow); } - bdrv_drained_end(unfiltered_bs); return ret; } @@ -155,8 +152,8 @@ static void stream_clean(Job *job) static int coroutine_fn stream_run(Job *job, Error **errp) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); - BlockDriverState *unfiltered_bs; - int64_t len; + BlockDriverState *unfiltered_bs = NULL; + int64_t len = -1; int64_t offset = 0; int error = 0; int64_t n = 0; /* bytes */ @@ -177,7 +174,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp) for ( ; offset < len; offset += n) { bool copy; - int ret; + int ret = -1; /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. @@ -373,7 +370,7 @@ void stream_start(const char *job_id, BlockDriverState *bs, * already have our own plans. Also don't allow resize as the image size is * queried only at the job start and then cached. */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); if (block_job_add_bdrv(&s->common, "active node", bs, 0, basic_flags | BLK_PERM_WRITE, errp)) { bdrv_graph_wrunlock(); diff --git a/block/throttle-groups.c b/block/throttle-groups.c index f5c0fac5814..66fdce9a90e 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -23,13 +23,13 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/throttle-groups.h" #include "qemu/throttle-options.h" #include "qemu/main-loop.h" #include "qemu/queue.h" #include "qemu/thread.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "qapi/error.h" #include "qapi/qapi-visit-block-core.h" #include "qom/object.h" @@ -908,7 +908,6 @@ static void throttle_group_set_limits(Object *obj, Visitor *v, qemu_mutex_unlock(&tg->lock); qapi_free_ThrottleLimits(argp); error_propagate(errp, local_err); - return; } static void throttle_group_get_limits(Object *obj, Visitor *v, @@ -934,7 +933,8 @@ static bool throttle_group_can_be_deleted(UserCreatable *uc) return OBJECT(uc)->ref == 1; } -static void throttle_group_obj_class_init(ObjectClass *klass, void *class_data) +static void throttle_group_obj_class_init(ObjectClass *klass, + const void *class_data) { size_t i = 0; UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); @@ -967,7 +967,7 @@ static const TypeInfo throttle_group_info = { .instance_size = sizeof(ThrottleGroup), .instance_init = throttle_group_obj_init, .instance_finalize = throttle_group_obj_finalize, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } }, diff --git a/block/vdi.c b/block/vdi.c index 6363da08cee..3ddc62a5690 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -3,10 +3,12 @@ * * Copyright (c) 2009, 2012 Stefan Weil * + * SPDX-License-Identifier: GPL-2.0-or-later + * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or - * (at your option) version 3 or any later version. + * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -56,7 +58,7 @@ #include "qapi/qapi-visit-block-core.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/bswap.h" @@ -85,7 +87,7 @@ /* Command line option for static images. */ #define BLOCK_OPT_STATIC "static" -#define SECTOR_SIZE 512 +#define SECTOR_SIZE 512ULL #define DEFAULT_CLUSTER_SIZE 1048576 /* Note: can't use 1 * MiB, because it's passed to stringify() */ @@ -440,7 +442,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } else if (header.sector_size != SECTOR_SIZE) { error_setg(errp, "unsupported VDI image (sector size %" PRIu32 - " is not %u)", header.sector_size, SECTOR_SIZE); + " is not %llu)", header.sector_size, SECTOR_SIZE); ret = -ENOTSUP; goto fail; } else if (header.block_size != DEFAULT_CLUSTER_SIZE) { @@ -521,8 +523,8 @@ static int vdi_reopen_prepare(BDRVReopenState *state, } static int coroutine_fn GRAPH_RDLOCK -vdi_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, - int64_t bytes, int64_t *pnum, int64_t *map, +vdi_co_block_status(BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { BDRVVdiState *s = (BDRVVdiState *)bs->opaque; diff --git a/block/vhdx.c b/block/vhdx.c index 5aa1a135062..b2a4b813a0b 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -19,7 +19,7 @@ #include "qapi/error.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/crc32c.h" @@ -29,7 +29,7 @@ #include "vhdx.h" #include "migration/blocker.h" #include "qemu/uuid.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" diff --git a/block/vmdk.c b/block/vmdk.c index 78f64336079..7b98debc2b9 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -26,8 +26,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "block/block_int.h" -#include "sysemu/block-backend.h" -#include "qapi/qmp/qdict.h" +#include "system/block-backend.h" +#include "qobject/qdict.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/option.h" @@ -271,7 +271,7 @@ static void vmdk_free_extents(BlockDriverState *bs) BDRVVmdkState *s = bs->opaque; VmdkExtent *e; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); for (i = 0; i < s->num_extents; i++) { e = &s->extents[i]; g_free(e->l1_table); @@ -1229,9 +1229,11 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, extent_role |= BDRV_CHILD_METADATA; } + bdrv_graph_rdunlock_main_loop(); extent_file = bdrv_open_child(extent_path, options, extent_opt_prefix, bs, &child_of_bds, extent_role, false, &local_err); + bdrv_graph_rdlock_main_loop(); g_free(extent_path); if (!extent_file) { error_propagate(errp, local_err); @@ -1247,7 +1249,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, 0, 0, 0, 0, 0, &extent, errp); if (ret < 0) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); @@ -1266,7 +1268,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, g_free(buf); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); @@ -1277,7 +1279,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); @@ -1287,7 +1289,7 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, } else { error_setg(errp, "Unsupported extent type '%s'", type); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(bs, extent_file); bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); @@ -1352,13 +1354,13 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, BDRVVmdkState *s = bs->opaque; uint32_t magic; - GRAPH_RDLOCK_GUARD_MAINLOOP(); - ret = bdrv_open_file_child(NULL, options, "file", bs, errp); if (ret < 0) { return ret; } + GRAPH_RDLOCK_GUARD_MAINLOOP(); + buf = vmdk_read_desc(bs->file, 0, errp); if (!buf) { return -EINVAL; @@ -1777,7 +1779,7 @@ static inline uint64_t vmdk_find_offset_in_cluster(VmdkExtent *extent, } static int coroutine_fn GRAPH_RDLOCK -vmdk_co_block_status(BlockDriverState *bs, bool want_zero, +vmdk_co_block_status(BlockDriverState *bs, unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) { diff --git a/block/vpc.c b/block/vpc.c index d95a204612b..801ff5793f8 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -27,14 +27,14 @@ #include "qapi/error.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/module.h" #include "qemu/option.h" #include "migration/blocker.h" #include "qemu/bswap.h" #include "qemu/uuid.h" #include "qemu/memalign.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-block-core.h" @@ -216,6 +216,39 @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts, } } +/* + * Microsoft Virtual PC and Microsoft Hyper-V produce and read + * VHD image sizes differently. VPC will rely on CHS geometry, + * while Hyper-V and disk2vhd use the size specified in the footer. + * + * We use a couple of approaches to try and determine the correct method: + * look at the Creator App field, and look for images that have CHS + * geometry that is the maximum value. + * + * If the CHS geometry is the maximum CHS geometry, then we assume that + * the size is the footer->current_size to avoid truncation. Otherwise, + * we follow the table based on footer->creator_app: + * + * Known creator apps: + * 'vpc ' : CHS Virtual PC (uses disk geometry) + * 'qemu' : CHS QEMU (uses disk geometry) + * 'qem2' : current_size QEMU (uses current_size) + * 'win ' : current_size Hyper-V + * 'd2v ' : current_size Disk2vhd + * 'tap\0' : current_size XenServer + * 'CTXS' : current_size XenConverter + * 'wa\0\0': current_size Azure + * + * The user can override the table values via drive options, however + * even with an override we will still use current_size for images + * that have CHS geometry of the maximum size. + */ +static bool vpc_ignore_current_size(VHDFooter *footer) +{ + return !strncmp(footer->creator_app, "vpc ", 4) || + !strncmp(footer->creator_app, "qemu", 4); +} + static int vpc_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { @@ -304,36 +337,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, bs->total_sectors = (int64_t) be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl; - /* Microsoft Virtual PC and Microsoft Hyper-V produce and read - * VHD image sizes differently. VPC will rely on CHS geometry, - * while Hyper-V and disk2vhd use the size specified in the footer. - * - * We use a couple of approaches to try and determine the correct method: - * look at the Creator App field, and look for images that have CHS - * geometry that is the maximum value. - * - * If the CHS geometry is the maximum CHS geometry, then we assume that - * the size is the footer->current_size to avoid truncation. Otherwise, - * we follow the table based on footer->creator_app: - * - * Known creator apps: - * 'vpc ' : CHS Virtual PC (uses disk geometry) - * 'qemu' : CHS QEMU (uses disk geometry) - * 'qem2' : current_size QEMU (uses current_size) - * 'win ' : current_size Hyper-V - * 'd2v ' : current_size Disk2vhd - * 'tap\0' : current_size XenServer - * 'CTXS' : current_size XenConverter - * - * The user can override the table values via drive options, however - * even with an override we will still use current_size for images - * that have CHS geometry of the maximum size. - */ - use_chs = (!!strncmp(footer->creator_app, "win ", 4) && - !!strncmp(footer->creator_app, "qem2", 4) && - !!strncmp(footer->creator_app, "d2v ", 4) && - !!strncmp(footer->creator_app, "CTXS", 4) && - !!memcmp(footer->creator_app, "tap", 4)) || s->force_use_chs; + /* Use CHS or current_size to determine the image size. */ + use_chs = vpc_ignore_current_size(footer) || s->force_use_chs; if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) { bs->total_sectors = be64_to_cpu(footer->current_size) / @@ -721,7 +726,7 @@ vpc_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, } static int coroutine_fn GRAPH_RDLOCK -vpc_co_block_status(BlockDriverState *bs, bool want_zero, +vpc_co_block_status(BlockDriverState *bs, unsigned int mode, int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file) diff --git a/block/vvfat.c b/block/vvfat.c index 086fedf4745..814796d9185 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -34,8 +34,8 @@ #include "qemu/option.h" #include "qemu/bswap.h" #include "migration/blocker.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -403,7 +403,6 @@ static direntry_t *create_long_filename(BDRVVVFATState *s, const char *filename) { int number_of_entries, i; glong length; - direntry_t *entry; gunichar2 *longname = g_utf8_to_utf16(filename, -1, NULL, &length, NULL); if (!longname) { @@ -414,24 +413,24 @@ static direntry_t *create_long_filename(BDRVVVFATState *s, const char *filename) number_of_entries = DIV_ROUND_UP(length * 2, 26); for(i=0;idirectory)); + direntry_t *entry=array_get_next(&(s->directory)); entry->attributes=0xf; entry->reserved[0]=0; entry->begin=0; entry->name[0]=(number_of_entries-i)|(i==0?0x40:0); } for(i=0;i<26*number_of_entries;i++) { + unsigned char *entry=array_get(&(s->directory),s->directory.next-1-(i/26)); int offset=(i%26); if(offset<10) offset=1+offset; else if(offset<22) offset=14+offset-10; else offset=28+offset-22; - entry=array_get(&(s->directory),s->directory.next-1-(i/26)); if (i >= 2 * length + 2) { - entry->name[offset] = 0xff; + entry[offset] = 0xff; } else if (i % 2 == 0) { - entry->name[offset] = longname[i / 2] & 0xff; + entry[offset] = longname[i / 2] & 0xff; } else { - entry->name[offset] = longname[i / 2] >> 8; + entry[offset] = longname[i / 2] >> 8; } } g_free(longname); @@ -1369,8 +1368,9 @@ static int open_file(BDRVVVFATState* s,mapping_t* mapping) return -1; vvfat_close_current_file(s); s->current_fd = fd; - s->current_mapping = mapping; } + + s->current_mapping = mapping; return 0; } @@ -1408,7 +1408,9 @@ static inline int read_cluster(BDRVVVFATState *s,int cluster_num) assert(s->current_fd); - offset=s->cluster_size*(cluster_num-s->current_mapping->begin)+s->current_mapping->info.file.offset; + offset = s->cluster_size * + ((cluster_num - s->current_mapping->begin) + + s->current_mapping->info.file.offset); if(lseek(s->current_fd, offset, SEEK_SET)!=offset) return -3; s->cluster=s->cluster_buffer; @@ -1878,7 +1880,6 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch uint32_t cluster_num = begin_of_direntry(direntry); uint32_t offset = 0; - int first_mapping_index = -1; mapping_t* mapping = NULL; const char* basename2 = NULL; @@ -1929,8 +1930,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch (mapping->mode & MODE_DIRECTORY) == 0) { /* was modified in qcow */ - if (offset != mapping->info.file.offset + s->cluster_size - * (cluster_num - mapping->begin)) { + if (offset != s->cluster_size + * ((cluster_num - mapping->begin) + + mapping->info.file.offset)) { /* offset of this cluster in file chain has changed */ abort(); copy_it = 1; @@ -1939,14 +1941,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch if (strcmp(basename, basename2)) copy_it = 1; - first_mapping_index = array_index(&(s->mapping), mapping); - } - - if (mapping->first_mapping_index != first_mapping_index - && mapping->info.file.offset > 0) { - abort(); - copy_it = 1; } + assert(mapping->first_mapping_index == -1 + || mapping->info.file.offset > 0); /* need to write out? */ if (!was_modified && is_file(direntry)) { @@ -2404,7 +2401,7 @@ static int commit_mappings(BDRVVVFATState* s, (mapping->end - mapping->begin); } else next_mapping->info.file.offset = mapping->info.file.offset + - mapping->end - mapping->begin; + (mapping->end - mapping->begin); mapping = next_mapping; } @@ -2525,8 +2522,9 @@ commit_one_file(BDRVVVFATState* s, int dir_index, uint32_t offset) return -1; } - for (i = s->cluster_size; i < offset; i += s->cluster_size) + for (i = 0; i < offset; i += s->cluster_size) { c = modified_fat_get(s, c); + } fd = qemu_open_old(mapping->path, O_RDWR | O_CREAT | O_BINARY, 0666); if (fd < 0) { @@ -3136,9 +3134,9 @@ vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, } static int coroutine_fn vvfat_co_block_status(BlockDriverState *bs, - bool want_zero, int64_t offset, - int64_t bytes, int64_t *n, - int64_t *map, + unsigned int mode, + int64_t offset, int64_t bytes, + int64_t *n, int64_t *map, BlockDriverState **file) { *n = bytes; diff --git a/blockdev-nbd.c b/blockdev-nbd.c index 213012435f4..1e3e634b87d 100644 --- a/blockdev-nbd.c +++ b/blockdev-nbd.c @@ -10,8 +10,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/blockdev.h" -#include "sysemu/block-backend.h" +#include "system/blockdev.h" +#include "system/block-backend.h" #include "hw/block/block.h" #include "qapi/error.h" #include "qapi/clone-visitor.h" @@ -21,12 +21,19 @@ #include "io/channel-socket.h" #include "io/net-listener.h" +typedef struct NBDConn { + QIOChannelSocket *cioc; + QLIST_ENTRY(NBDConn) next; +} NBDConn; + typedef struct NBDServerData { QIONetListener *listener; + uint32_t handshake_max_secs; QCryptoTLSCreds *tlscreds; char *tlsauthz; uint32_t max_connections; uint32_t connections; + QLIST_HEAD(, NBDConn) conns; } NBDServerData; static NBDServerData *nbd_server; @@ -51,6 +58,14 @@ int nbd_server_max_connections(void) static void nbd_blockdev_client_closed(NBDClient *client, bool ignored) { + NBDConn *conn = nbd_client_owner(client); + + assert(qemu_in_main_thread() && nbd_server); + + object_unref(OBJECT(conn->cioc)); + QLIST_REMOVE(conn, next); + g_free(conn); + nbd_client_put(client); assert(nbd_server->connections > 0); nbd_server->connections--; @@ -60,31 +75,55 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored) static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc, gpointer opaque) { + NBDConn *conn = g_new0(NBDConn, 1); + + assert(qemu_in_main_thread() && nbd_server); nbd_server->connections++; + object_ref(OBJECT(cioc)); + conn->cioc = cioc; + QLIST_INSERT_HEAD(&nbd_server->conns, conn, next); nbd_update_server_watch(nbd_server); qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server"); - nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz, - nbd_blockdev_client_closed); + nbd_client_new(cioc, nbd_server->handshake_max_secs, + nbd_server->tlscreds, nbd_server->tlsauthz, + nbd_blockdev_client_closed, conn); } static void nbd_update_server_watch(NBDServerData *s) { - if (!s->max_connections || s->connections < s->max_connections) { - qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL); - } else { - qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL); + if (s->listener) { + if (!s->max_connections || s->connections < s->max_connections) { + qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, + NULL); + } else { + qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL); + } } } static void nbd_server_free(NBDServerData *server) { + NBDConn *conn, *tmp; + if (!server) { return; } + /* + * Forcefully close the listener socket, and any clients that have + * not yet disconnected on their own. + */ qio_net_listener_disconnect(server->listener); object_unref(OBJECT(server->listener)); + server->listener = NULL; + QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) { + qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH, + NULL); + } + + AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0); + if (server->tlscreds) { object_unref(OBJECT(server->tlscreds)); } @@ -123,9 +162,9 @@ static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp) } -void nbd_server_start(SocketAddress *addr, const char *tls_creds, - const char *tls_authz, uint32_t max_connections, - Error **errp) +void nbd_server_start(SocketAddress *addr, uint32_t handshake_max_secs, + const char *tls_creds, const char *tls_authz, + uint32_t max_connections, Error **errp) { if (nbd_server) { error_setg(errp, "NBD server already running"); @@ -134,6 +173,7 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds, nbd_server = g_new0(NBDServerData, 1); nbd_server->max_connections = max_connections; + nbd_server->handshake_max_secs = handshake_max_secs; nbd_server->listener = qio_net_listener_new(); qio_net_listener_set_name(nbd_server->listener, @@ -168,19 +208,36 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds, void nbd_server_start_options(NbdServerOptions *arg, Error **errp) { - nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz, - arg->max_connections, errp); + if (!arg->has_max_connections) { + arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS; + } + if (!arg->has_handshake_max_seconds) { + arg->handshake_max_seconds = NBD_DEFAULT_HANDSHAKE_MAX_SECS; + } + + nbd_server_start(arg->addr, arg->handshake_max_seconds, arg->tls_creds, + arg->tls_authz, arg->max_connections, errp); } -void qmp_nbd_server_start(SocketAddressLegacy *addr, +void qmp_nbd_server_start(bool has_handshake_max_secs, + uint32_t handshake_max_secs, const char *tls_creds, const char *tls_authz, bool has_max_connections, uint32_t max_connections, + SocketAddressLegacy *addr, Error **errp) { SocketAddress *addr_flat = socket_address_flatten(addr); - nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp); + if (!has_max_connections) { + max_connections = NBD_DEFAULT_MAX_CONNECTIONS; + } + if (!has_handshake_max_secs) { + handshake_max_secs = NBD_DEFAULT_HANDSHAKE_MAX_SECS; + } + + nbd_server_start(addr_flat, handshake_max_secs, tls_creds, tls_authz, + max_connections, errp); qapi_free_SocketAddress(addr_flat); } diff --git a/blockdev.c b/blockdev.c index 835064ed039..b451fee6e1f 100644 --- a/blockdev.c +++ b/blockdev.c @@ -31,8 +31,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" +#include "system/block-backend.h" +#include "system/blockdev.h" #include "hw/block/block.h" #include "block/blockjob.h" #include "block/dirty-bitmap.h" @@ -46,19 +46,19 @@ #include "qapi/qapi-commands-block.h" #include "qapi/qapi-commands-transaction.h" #include "qapi/qapi-visit-block-core.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qapi/qobject-output-visitor.h" -#include "sysemu/sysemu.h" -#include "sysemu/iothread.h" +#include "system/system.h" +#include "system/iothread.h" #include "block/block_int.h" #include "block/trace.h" -#include "sysemu/runstate.h" -#include "sysemu/replay.h" +#include "system/runstate.h" +#include "system/replay.h" #include "qemu/cutils.h" #include "qemu/help_option.h" #include "qemu/main-loop.h" @@ -1132,39 +1132,41 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, int ret; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); bs = qmp_get_root_bs(device, errp); if (!bs) { - return NULL; + goto error; } if (!id && !name) { error_setg(errp, "Name or id must be provided"); - return NULL; + goto error; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { - return NULL; + goto error; } ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); if (local_err) { error_propagate(errp, local_err); - return NULL; + goto error; } if (!ret) { error_setg(errp, "Snapshot with id '%s' and name '%s' does not exist on " "device '%s'", STR_OR_NULL(id), STR_OR_NULL(name), device); - return NULL; + goto error; } bdrv_snapshot_delete(bs, id, name, &local_err); if (local_err) { error_propagate(errp, local_err); - return NULL; + goto error; } info = g_new0(SnapshotInfo, 1); @@ -1180,6 +1182,9 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, info->has_icount = true; } +error: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); return info; } @@ -1203,7 +1208,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, Error *local_err = NULL; const char *device; const char *name; - BlockDriverState *bs; + BlockDriverState *bs, *check_bs; QEMUSnapshotInfo old_sn, *sn; bool ret; int64_t rt; @@ -1211,7 +1216,7 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, int ret1; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); tran_add(tran, &internal_snapshot_drv, state); @@ -1220,14 +1225,29 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, bs = qmp_get_root_bs(device, errp); if (!bs) { + bdrv_graph_rdunlock_main_loop(); return; } state->bs = bs; + /* Need to drain while unlocked. */ + bdrv_graph_rdunlock_main_loop(); /* Paired with .clean() */ bdrv_drained_begin(bs); + GRAPH_RDLOCK_GUARD_MAINLOOP(); + + /* Make sure the root bs did not change with the drain. */ + check_bs = qmp_get_root_bs(device, errp); + if (bs != check_bs) { + if (check_bs) { + error_setg(errp, "Block node of device '%s' unexpectedly changed", + device); + } /* else errp is already set */ + return; + } + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { return; } @@ -1295,12 +1315,14 @@ static void internal_snapshot_abort(void *opaque) Error *local_error = NULL; GLOBAL_STATE_CODE(); - GRAPH_RDLOCK_GUARD_MAINLOOP(); if (!state->created) { return; } + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { error_reportf_err(local_error, "Failed to delete snapshot with id '%s' and " @@ -1308,6 +1330,8 @@ static void internal_snapshot_abort(void *opaque) sn->id_str, sn->name, bdrv_get_device_name(bs)); } + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); } static void internal_snapshot_clean(void *opaque) @@ -1353,9 +1377,10 @@ static void external_snapshot_action(TransactionAction *action, const char *new_image_file; ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1); uint64_t perm, shared; + BlockDriverState *check_bs; /* TODO We'll eventually have to take a writer lock in this function */ - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_graph_rdlock_main_loop(); tran_add(tran, &external_snapshot_drv, state); @@ -1388,21 +1413,35 @@ static void external_snapshot_action(TransactionAction *action, state->old_bs = bdrv_lookup_bs(device, node_name, errp); if (!state->old_bs) { + bdrv_graph_rdunlock_main_loop(); return; } + /* Need to drain while unlocked. */ + bdrv_graph_rdunlock_main_loop(); /* Paired with .clean() */ bdrv_drained_begin(state->old_bs); + bdrv_graph_rdlock_main_loop(); + + /* Make sure the associated bs did not change with the drain. */ + check_bs = bdrv_lookup_bs(device, node_name, errp); + if (state->old_bs != check_bs) { + if (check_bs) { + error_setg(errp, "Block node of device '%s' unexpectedly changed", + device); + } /* else errp is already set */ + goto unlock; + } if (!bdrv_is_inserted(state->old_bs)) { error_setg(errp, "Device '%s' has no medium", bdrv_get_device_or_node_name(state->old_bs)); - return; + goto unlock; } if (bdrv_op_is_blocked(state->old_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) { - return; + goto unlock; } if (!bdrv_is_read_only(state->old_bs)) { @@ -1410,7 +1449,7 @@ static void external_snapshot_action(TransactionAction *action, if (ret < 0) { error_setg_errno(errp, -ret, "Write to node '%s' failed", bdrv_get_device_or_node_name(state->old_bs)); - return; + goto unlock; } } @@ -1422,13 +1461,13 @@ static void external_snapshot_action(TransactionAction *action, if (node_name && !snapshot_node_name) { error_setg(errp, "New overlay node-name missing"); - return; + goto unlock; } if (snapshot_node_name && bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) { error_setg(errp, "New overlay node-name already in use"); - return; + goto unlock; } flags = state->old_bs->open_flags; @@ -1441,7 +1480,7 @@ static void external_snapshot_action(TransactionAction *action, int64_t size = bdrv_getlength(state->old_bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - return; + goto unlock; } bdrv_refresh_filename(state->old_bs); @@ -1452,7 +1491,7 @@ static void external_snapshot_action(TransactionAction *action, if (local_err) { error_propagate(errp, local_err); - return; + goto unlock; } } @@ -1468,7 +1507,7 @@ static void external_snapshot_action(TransactionAction *action, /* We will manually add the backing_hd field to the bs later */ if (!state->new_bs) { - return; + goto unlock; } /* @@ -1479,29 +1518,51 @@ static void external_snapshot_action(TransactionAction *action, bdrv_get_cumulative_perm(state->new_bs, &perm, &shared); if (perm & BLK_PERM_CONSISTENT_READ) { error_setg(errp, "The overlay is already in use"); - return; + goto unlock; } if (state->new_bs->drv->is_filter) { error_setg(errp, "Filters cannot be used as overlays"); - return; + goto unlock; } if (bdrv_cow_child(state->new_bs)) { error_setg(errp, "The overlay already has a backing image"); - return; + goto unlock; } if (!state->new_bs->drv->supports_backing) { error_setg(errp, "The overlay does not support backing images"); - return; + goto unlock; + } + + /* + * Older QEMU versions have allowed adding an active parent node to an + * inactive child node. This is unsafe in the general case, but there is an + * important use case, which is taking a VM snapshot with migration to file + * and then adding an external snapshot while the VM is still stopped and + * images are inactive. Requiring the user to explicitly create the overlay + * as inactive would break compatibility, so just do it automatically here + * to keep this working. + */ + if (bdrv_is_inactive(state->old_bs) && !bdrv_is_inactive(state->new_bs)) { + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); + ret = bdrv_inactivate(state->new_bs, errp); + bdrv_drain_all_end(); + if (ret < 0) { + goto unlock; + } } ret = bdrv_append(state->new_bs, state->old_bs, errp); if (ret < 0) { - return; + goto unlock; } state->overlay_appended = true; +unlock: + bdrv_graph_rdunlock_main_loop(); } static void external_snapshot_commit(void *opaque) @@ -1525,10 +1586,18 @@ static void external_snapshot_abort(void *opaque) AioContext *tmp_context; int ret; + bdrv_graph_wrlock_drained(); + aio_context = bdrv_get_aio_context(state->old_bs); - bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd() - close state->old_bs; we need it */ + /* + * Note that state->old_bs would not disappear during the + * write-locked section, because the unref from + * bdrv_set_backing_hd() only happens at the end of the write-locked + * section. However, just be explicit about keeping a reference and + * don't rely on that implicit detail. + */ + bdrv_ref(state->old_bs); bdrv_set_backing_hd(state->new_bs, NULL, &error_abort); /* @@ -1538,16 +1607,14 @@ static void external_snapshot_abort(void *opaque) */ tmp_context = bdrv_get_aio_context(state->old_bs); if (aio_context != tmp_context) { - ret = bdrv_try_change_aio_context(state->old_bs, - aio_context, NULL, NULL); + ret = bdrv_try_change_aio_context_locked(state->old_bs, + aio_context, NULL, + NULL); assert(ret == 0); } - bdrv_drained_begin(state->new_bs); - bdrv_graph_wrlock(); bdrv_replace_node(state->new_bs, state->old_bs, &error_abort); bdrv_graph_wrunlock(); - bdrv_drained_end(state->new_bs); bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */ } @@ -1715,7 +1782,10 @@ static void drive_backup_action(DriveBackup *backup, } if (set_backing_hd) { - if (bdrv_set_backing_hd(target_bs, source, errp) < 0) { + bdrv_graph_wrlock_drained(); + ret = bdrv_set_backing_hd(target_bs, source, errp); + bdrv_graph_wrunlock(); + if (ret < 0) { goto unref; } } @@ -2625,6 +2695,7 @@ static BlockJob *do_backup_common(BackupCommon *backup, BdrvDirtyBitmap *bmap = NULL; BackupPerf perf = { .max_workers = 64 }; int job_flags = JOB_DEFAULT; + OnCbwError on_cbw_error = ON_CBW_ERROR_BREAK_GUEST_WRITE; if (!backup->has_speed) { backup->speed = 0; @@ -2655,6 +2726,9 @@ static BlockJob *do_backup_common(BackupCommon *backup, if (backup->x_perf->has_max_chunk) { perf.max_chunk = backup->x_perf->max_chunk; } + if (backup->x_perf->has_min_cluster_size) { + perf.min_cluster_size = backup->x_perf->min_cluster_size; + } } if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) || @@ -2726,6 +2800,10 @@ static BlockJob *do_backup_common(BackupCommon *backup, job_flags |= JOB_MANUAL_DISMISS; } + if (backup->has_on_cbw_error) { + on_cbw_error = backup->on_cbw_error; + } + job = backup_job_create(backup->job_id, bs, target_bs, backup->speed, backup->sync, bmap, backup->bitmap_mode, backup->compress, backup->discard_source, @@ -2733,6 +2811,7 @@ static BlockJob *do_backup_common(BackupCommon *backup, &perf, backup->on_source_error, backup->on_target_error, + on_cbw_error, job_flags, NULL, NULL, txn, errp); return job; } @@ -2779,7 +2858,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, const char *replaces, enum MirrorSyncMode sync, BlockMirrorBackingMode backing_mode, - bool zero_target, + bool target_is_zero, bool has_speed, int64_t speed, bool has_granularity, uint32_t granularity, bool has_buf_size, int64_t buf_size, @@ -2846,10 +2925,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, return; } - if (!bdrv_backing_chain_next(bs) && sync == MIRROR_SYNC_MODE_TOP) { - sync = MIRROR_SYNC_MODE_FULL; - } - if (!replaces) { /* We want to mirror from @bs, but keep implicit filters on top */ unfiltered_bs = bdrv_skip_implicit_filters(bs); @@ -2890,11 +2965,10 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, /* pass the node name to replace to mirror start since it's loose coupling * and will allow to check whether the node still exist at mirror completion */ - mirror_start(job_id, bs, target, - replaces, job_flags, - speed, granularity, buf_size, sync, backing_mode, zero_target, - on_source_error, on_target_error, unmap, filter_node_name, - copy_mode, errp); + mirror_start(job_id, bs, target, replaces, job_flags, + speed, granularity, buf_size, sync, backing_mode, + target_is_zero, on_source_error, on_target_error, unmap, + filter_node_name, copy_mode, errp); } void qmp_drive_mirror(DriveMirror *arg, Error **errp) @@ -2908,7 +2982,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) int flags; int64_t size; const char *format = arg->format; - bool zero_target; + bool target_is_zero; int ret; bs = qmp_get_root_bs(arg->device, errp); @@ -3022,9 +3096,8 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) } bdrv_graph_rdlock_main_loop(); - zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL && - (arg->mode == NEW_IMAGE_MODE_EXISTING || - !bdrv_has_zero_init(target_bs))); + target_is_zero = (arg->mode != NEW_IMAGE_MODE_EXISTING && + bdrv_has_zero_init(target_bs)); bdrv_graph_rdunlock_main_loop(); @@ -3036,7 +3109,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) blockdev_mirror_common(arg->job_id, bs, target_bs, arg->replaces, arg->sync, - backing_mode, zero_target, + backing_mode, target_is_zero, arg->has_speed, arg->speed, arg->has_granularity, arg->granularity, arg->has_buf_size, arg->buf_size, @@ -3066,13 +3139,13 @@ void qmp_blockdev_mirror(const char *job_id, bool has_copy_mode, MirrorCopyMode copy_mode, bool has_auto_finalize, bool auto_finalize, bool has_auto_dismiss, bool auto_dismiss, + bool has_target_is_zero, bool target_is_zero, Error **errp) { BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN; - bool zero_target; int ret; bs = qmp_get_root_bs(device, errp); @@ -3085,8 +3158,6 @@ void qmp_blockdev_mirror(const char *job_id, return; } - zero_target = (sync == MIRROR_SYNC_MODE_FULL); - aio_context = bdrv_get_aio_context(bs); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); @@ -3096,7 +3167,8 @@ void qmp_blockdev_mirror(const char *job_id, blockdev_mirror_common(job_id, bs, target_bs, replaces, sync, backing_mode, - zero_target, has_speed, speed, + has_target_is_zero && target_is_zero, + has_speed, speed, has_granularity, granularity, has_buf_size, buf_size, has_on_source_error, on_source_error, @@ -3452,6 +3524,49 @@ void qmp_blockdev_del(const char *node_name, Error **errp) bdrv_unref(bs); } +void qmp_blockdev_set_active(const char *node_name, bool active, Error **errp) +{ + BlockDriverState *bs; + int ret; + + GLOBAL_STATE_CODE(); + + if (!node_name) { + if (active) { + bdrv_activate_all(errp); + } else { + ret = bdrv_inactivate_all(); + if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to inactivate all nodes"); + } + } + return; + } + + if (!active) { + bdrv_drain_all_begin(); + } + bdrv_graph_rdlock_main_loop(); + + bs = bdrv_find_node(node_name); + if (!bs) { + error_setg(errp, "Failed to find node with node-name='%s'", + node_name); + goto unlock; + } + if (active) { + bdrv_activate(bs, errp); + } else { + bdrv_inactivate(bs, errp); + } + +unlock: + bdrv_graph_rdunlock_main_loop(); + if (!active) { + bdrv_drain_all_end(); + } +} + static BdrvChild * GRAPH_RDLOCK bdrv_find_child(BlockDriverState *parent_bs, const char *child_name) { @@ -3472,7 +3587,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, BlockDriverState *parent_bs, *new_bs = NULL; BdrvChild *p_child; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); parent_bs = bdrv_lookup_bs(parent, parent, errp); if (!parent_bs) { @@ -3542,12 +3657,13 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, AioContext *new_context; BlockDriverState *bs; - GRAPH_RDLOCK_GUARD_MAINLOOP(); + bdrv_drain_all_begin(); + bdrv_graph_rdlock_main_loop(); bs = bdrv_find_node(node_name); if (!bs) { error_setg(errp, "Failed to find node with node-name='%s'", node_name); - return; + goto out; } /* Protects against accidents. */ @@ -3555,14 +3671,14 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, error_setg(errp, "Node %s is associated with a BlockBackend and could " "be in use (use force=true to override this check)", node_name); - return; + goto out; } if (iothread->type == QTYPE_QSTRING) { IOThread *obj = iothread_by_id(iothread->u.s); if (!obj) { error_setg(errp, "Cannot find iothread %s", iothread->u.s); - return; + goto out; } new_context = iothread_get_aio_context(obj); @@ -3570,7 +3686,11 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, new_context = qemu_get_aio_context(); } - bdrv_try_change_aio_context(bs, new_context, NULL, errp); + bdrv_try_change_aio_context_locked(bs, new_context, NULL, errp); + +out: + bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); } QemuOptsList qemu_common_drive_opts = { diff --git a/blockjob.c b/blockjob.c index d5f29e14af2..db7c3a69a0c 100644 --- a/blockjob.c +++ b/blockjob.c @@ -29,7 +29,7 @@ #include "block/blockjob_int.h" #include "block/block_int.h" #include "block/trace.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qapi/qapi-events-block-core.h" #include "qapi/qmp/qerror.h" @@ -144,9 +144,9 @@ static TransactionActionDrv change_child_job_context = { .clean = g_free, }; -static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp) +static bool GRAPH_RDLOCK +child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, GHashTable *visited, + Transaction *tran, Error **errp) { BlockJob *job = c->opaque; BdrvStateChildJobContext *s; @@ -198,7 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) * one to make sure that such a concurrent access does not attempt * to process an already freed BdrvChild. */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); while (job->nodes) { GSList *l = job->nodes; BdrvChild *c = l->data; @@ -496,7 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, int ret; GLOBAL_STATE_CODE(); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); if (job_id == NULL && !(flags & JOB_INTERNAL)) { job_id = bdrv_get_device_name(bs); @@ -539,8 +539,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, goto fail; } - bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); - if (!block_job_set_speed(job, speed, errp)) { goto fail; } diff --git a/bsd-user/aarch64/target_arch_cpu.h b/bsd-user/aarch64/target_arch_cpu.h index b288e0d069b..87fbf6d6775 100644 --- a/bsd-user/aarch64/target_arch_cpu.h +++ b/bsd-user/aarch64/target_arch_cpu.h @@ -43,7 +43,7 @@ static inline void target_cpu_init(CPUARMState *env, } -static inline void target_cpu_loop(CPUARMState *env) +static inline G_NORETURN void target_cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); int trapnr, ec, fsc, si_code, si_signo; diff --git a/bsd-user/arm/target_arch_cpu.h b/bsd-user/arm/target_arch_cpu.h index 517d0087644..bc2eaa0bf4e 100644 --- a/bsd-user/arm/target_arch_cpu.h +++ b/bsd-user/arm/target_arch_cpu.h @@ -37,7 +37,7 @@ static inline void target_cpu_init(CPUARMState *env, } } -static inline void target_cpu_loop(CPUARMState *env) +static inline G_NORETURN void target_cpu_loop(CPUARMState *env) { int trapnr, si_signo, si_code; CPUState *cs = env_cpu(env); diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h index eef6b222d9e..1be906c5914 100644 --- a/bsd-user/bsd-mem.h +++ b/bsd-user/bsd-mem.h @@ -56,7 +56,9 @@ #include #include "qemu-bsd.h" +#include "exec/mmap-lock.h" #include "exec/page-protection.h" +#include "user/page-protection.h" extern struct bsd_shm_regions bsd_shm_regions[]; extern abi_ulong target_brk; @@ -369,9 +371,11 @@ static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg) if (shmaddr) { host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); } else { + abi_ulong alignment; abi_ulong mmap_start; - mmap_start = mmap_find_vma(0, shm_info.shm_segsz); + alignment = 0; /* alignment above page size not required */ + mmap_start = mmap_find_vma(0, shm_info.shm_segsz, alignment); if (mmap_start == -1) { return -TARGET_ENOMEM; diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c index 833fa3bd057..3bca0cc9ede 100644 --- a/bsd-user/elfload.c +++ b/bsd-user/elfload.c @@ -44,7 +44,7 @@ static inline void memcpy_fromfs(void *to, const void *from, unsigned long n) memcpy(to, from, n); } -#ifdef BSWAP_NEEDED +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN static void bswap_ehdr(struct elfhdr *ehdr) { bswap16s(&ehdr->e_type); /* Object file type */ @@ -111,7 +111,7 @@ static void bswap_note(struct elf_note *en) bswap32s(&en->n_type); } -#else /* ! BSWAP_NEEDED */ +#else static void bswap_ehdr(struct elfhdr *ehdr) { } static void bswap_phdr(struct elf_phdr *phdr, int phnum) { } @@ -119,7 +119,7 @@ static void bswap_shdr(struct elf_shdr *shdr, int shnum) { } static void bswap_sym(struct elf_sym *sym) { } static void bswap_note(struct elf_note *en) { } -#endif /* ! BSWAP_NEEDED */ +#endif /* HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN */ #include "elfcore.c" diff --git a/bsd-user/i386/target_arch_cpu.h b/bsd-user/i386/target_arch_cpu.h index 9bf2c4244b7..5d4c931decd 100644 --- a/bsd-user/i386/target_arch_cpu.h +++ b/bsd-user/i386/target_arch_cpu.h @@ -102,7 +102,7 @@ static inline void target_cpu_init(CPUX86State *env, env->segs[R_FS].selector = 0; } -static inline void target_cpu_loop(CPUX86State *env) +static inline G_NORETURN void target_cpu_loop(CPUX86State *env) { CPUState *cs = env_cpu(env); int trapnr; diff --git a/bsd-user/main.c b/bsd-user/main.c index cc980e6f401..7e5d4bbce09 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -36,8 +36,9 @@ #include "qemu/help_option.h" #include "qemu/module.h" #include "qemu/plugin.h" -#include "exec/exec-all.h" #include "user/guest-base.h" +#include "user/page-protection.h" +#include "accel/accel-ops.h" #include "tcg/startup.h" #include "qemu/timer.h" #include "qemu/envlist.h" @@ -60,6 +61,7 @@ uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; static bool opt_one_insn_per_tb; +static unsigned long opt_tb_size; uintptr_t guest_base; bool have_guest_base; /* @@ -88,6 +90,7 @@ bool have_guest_base; #endif unsigned long reserved_va; +unsigned long guest_addr_max; const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX; const char *qemu_uname_release; @@ -169,9 +172,13 @@ static void usage(void) " (use '-d help' for a list of log items)\n" "-D logfile write logs to 'logfile' (default stderr)\n" "-one-insn-per-tb run with one guest instruction per emulated TB\n" + "-tb-size size TCG translation block cache size\n" "-strace log system calls\n" "-trace [[enable=]][,events=][,file=]\n" " specify tracing options\n" +#ifdef CONFIG_PLUGIN + "-plugin [file=][,=]\n" +#endif "\n" "Environment variables:\n" "QEMU_STRACE Print system calls and arguments similar to the\n" @@ -222,6 +229,8 @@ static void init_task_state(TaskState *ts) }; } +static QemuPluginList plugins = QTAILQ_HEAD_INITIALIZER(plugins); + void gemu_log(const char *fmt, ...) { va_list ap; @@ -304,6 +313,7 @@ int main(int argc, char **argv) cpu_model = NULL; qemu_add_opts(&qemu_trace_opts); + qemu_plugin_add_opts(); optind = 1; for (;;) { @@ -387,10 +397,20 @@ int main(int argc, char **argv) seed_optarg = optarg; } else if (!strcmp(r, "one-insn-per-tb")) { opt_one_insn_per_tb = true; + } else if (!strcmp(r, "tb-size")) { + r = argv[optind++]; + if (qemu_strtoul(r, NULL, 0, &opt_tb_size)) { + usage(); + } } else if (!strcmp(r, "strace")) { do_strace = 1; } else if (!strcmp(r, "trace")) { trace_opt_parse(optarg); +#ifdef CONFIG_PLUGIN + } else if (!strcmp(r, "plugin")) { + r = argv[optind++]; + qemu_plugin_opt_parse(r, &plugins); +#endif } else if (!strcmp(r, "0")) { argv0 = argv[optind++]; } else { @@ -425,6 +445,7 @@ int main(int argc, char **argv) exit(1); } trace_init_file(); + qemu_plugin_load_list(&plugins, &error_fatal); /* Zero out regs */ memset(regs, 0, sizeof(struct target_pt_regs)); @@ -452,7 +473,9 @@ int main(int argc, char **argv) accel_init_interfaces(ac); object_property_set_bool(OBJECT(accel), "one-insn-per-tb", opt_one_insn_per_tb, &error_abort); - ac->init_machine(NULL); + object_property_set_int(OBJECT(accel), "tb-size", + opt_tb_size, &error_abort); + ac->init_machine(accel, NULL); } /* @@ -491,6 +514,13 @@ int main(int argc, char **argv) /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */ reserved_va = max_reserved_va; } + if (reserved_va != 0) { + guest_addr_max = reserved_va; + } else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) { + guest_addr_max = UINT32_MAX; + } else { + guest_addr_max = ~0ul; + } if (getenv("QEMU_STRACE")) { do_strace = 1; @@ -601,6 +631,7 @@ int main(int argc, char **argv) init_task_state(ts); ts->info = info; ts->bprm = &bprm; + ts->ts_tid = qemu_get_thread_id(); cpu->opaque = ts; target_set_brk(info->brk); @@ -617,8 +648,7 @@ int main(int argc, char **argv) target_cpu_init(env, regs); if (gdbstub) { - gdbserver_start(gdbstub); - gdb_handlesig(cpu, 0, NULL, NULL, 0); + gdbserver_start(gdbstub, &error_fatal); } cpu_loop(env); /* never exits */ diff --git a/bsd-user/meson.build b/bsd-user/meson.build index 39bad0ae33e..37b7cd6de87 100644 --- a/bsd-user/meson.build +++ b/bsd-user/meson.build @@ -13,6 +13,7 @@ bsd_user_ss.add(files( 'elfload.c', 'main.c', 'mmap.c', + 'plugin-api.c', 'signal.c', 'strace.c', 'uaccess.c', diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index f3a4f1712da..47e317517cb 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -17,7 +17,9 @@ * along with this program; if not, see . */ #include "qemu/osdep.h" +#include "exec/mmap-lock.h" #include "exec/page-protection.h" +#include "user/page-protection.h" #include "qemu.h" @@ -128,6 +130,40 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) return ret; } +/* + * Perform a pread on behalf of target_mmap. We can reach EOF, we can be + * interrupted by signals, and in general there's no good error return path. + * If @zero, zero the rest of the block at EOF. + * Return true on success. + */ +static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) +{ + while (1) { + ssize_t r = pread(fd, p, len, offset); + + if (likely(r == len)) { + /* Complete */ + return true; + } + if (r == 0) { + /* EOF */ + if (zero) { + memset(p, 0, len); + } + return true; + } + if (r > 0) { + /* Short read */ + p += r; + len -= r; + offset += r; + } else if (errno != EINTR) { + /* Error */ + return false; + } + } +} + /* * map an incomplete host page * @@ -190,7 +226,7 @@ static int mmap_frag(abi_ulong real_start, mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); /* read the corresponding file data */ - if (pread(fd, g2h_untagged(start), end - start, offset) == -1) { + if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) { return -1; } @@ -240,8 +276,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, * It must be called with mmap_lock() held. * Return -1 if error. */ -static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, - abi_ulong alignment) +abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment) { void *ptr, *prev; abi_ulong addr; @@ -360,11 +395,6 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, } } -abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) -{ - return mmap_find_vma_aligned(start, size, 0); -} - /* NOTE: all the constants are the HOST ones */ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, int flags, int fd, off_t offset) @@ -454,13 +484,12 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, * before we truncate the length for mapping files below. */ if (!(flags & MAP_FIXED)) { + abi_ulong alignment; + host_len = len + offset - host_offset; host_len = HOST_PAGE_ALIGN(host_len); - if ((flags & MAP_ALIGNMENT_MASK) != 0) - start = mmap_find_vma_aligned(real_start, host_len, - (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); - else - start = mmap_find_vma(real_start, host_len); + alignment = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT; + start = mmap_find_vma(real_start, host_len, alignment); if (start == (abi_ulong)-1) { errno = ENOMEM; goto fail; @@ -565,7 +594,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, -1, 0); if (retaddr == -1) goto fail; - if (pread(fd, g2h_untagged(start), len, offset) == -1) { + if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) { goto fail; } if (!(prot & PROT_WRITE)) { diff --git a/bsd-user/plugin-api.c b/bsd-user/plugin-api.c new file mode 100644 index 00000000000..6ccef7eaa04 --- /dev/null +++ b/bsd-user/plugin-api.c @@ -0,0 +1,15 @@ +/* + * QEMU Plugin API - bsd-user-mode only implementations + * + * Common user-mode only APIs are in plugins/api-user. These helpers + * are only specific to bsd-user. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "common-user/plugin-api.c.inc" diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index 3736c417860..93388e7c34e 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -22,14 +22,16 @@ #include "qemu/int128.h" #include "cpu.h" #include "qemu/units.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" #include "user/abitypes.h" +#include "user/cpu_loop.h" +#include "user/page-protection.h" extern char **environ; #include "user/thunk.h" +#include "user/mmap.h" #include "target_arch.h" #include "syscall_defs.h" #include "target_syscall.h" @@ -38,7 +40,6 @@ extern char **environ; #include "target.h" #include "exec/gdbstub.h" #include "exec/page-protection.h" -#include "qemu/clang-tsa.h" #include "accel/tcg/vcpu-state.h" #include "qemu-os.h" @@ -186,7 +187,6 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6); void gemu_log(const char *fmt, ...) G_GNUC_PRINTF(1, 2); extern __thread CPUState *thread_cpu; -void cpu_loop(CPUArchState *env); char *target_strerror(int err); int get_osversion(void); void fork_start(void); @@ -233,19 +233,8 @@ void print_taken_signal(int target_signum, const target_siginfo_t *tinfo); extern int do_strace; /* mmap.c */ -int target_mprotect(abi_ulong start, abi_ulong len, int prot); -abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, - int flags, int fd, off_t offset); -int target_munmap(abi_ulong start, abi_ulong len); -abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, - abi_ulong new_size, unsigned long flags, - abi_ulong new_addr); int target_msync(abi_ulong start, abi_ulong len, int flags); -extern abi_ulong mmap_next_start; -abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size); void mmap_reserve(abi_ulong start, abi_ulong size); -void TSA_NO_TSA mmap_fork_start(void); -void TSA_NO_TSA mmap_fork_end(int child); /* main.c */ extern char qemu_proc_pathname[]; diff --git a/bsd-user/riscv/signal.c b/bsd-user/riscv/signal.c new file mode 100644 index 00000000000..10c940cd491 --- /dev/null +++ b/bsd-user/riscv/signal.c @@ -0,0 +1,170 @@ +/* + * RISC-V signal definitions + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "qemu/osdep.h" + +#include "qemu.h" + +/* + * Compare with sendsig() in riscv/riscv/exec_machdep.c + * Assumes that target stack frame memory is locked. + */ +abi_long +set_sigtramp_args(CPURISCVState *regs, int sig, struct target_sigframe *frame, + abi_ulong frame_addr, struct target_sigaction *ka) +{ + /* + * Arguments to signal handler: + * a0 (10) = signal number + * a1 (11) = siginfo pointer + * a2 (12) = ucontext pointer + * pc = signal pointer handler + * sp (2) = sigframe pointer + * ra (1) = sigtramp at base of user stack + */ + + regs->gpr[xA0] = sig; + regs->gpr[xA1] = frame_addr + + offsetof(struct target_sigframe, sf_si); + regs->gpr[xA2] = frame_addr + + offsetof(struct target_sigframe, sf_uc); + regs->pc = ka->_sa_handler; + regs->gpr[xSP] = frame_addr; + regs->gpr[xRA] = TARGET_PS_STRINGS - TARGET_SZSIGCODE; + return 0; +} + +/* + * Compare to riscv/riscv/exec_machdep.c sendsig() + * Assumes that the memory is locked if frame points to user memory. + */ +abi_long setup_sigframe_arch(CPURISCVState *env, abi_ulong frame_addr, + struct target_sigframe *frame, int flags) +{ + target_mcontext_t *mcp = &frame->sf_uc.uc_mcontext; + + get_mcontext(env, mcp, flags); + return 0; +} + +/* + * Compare with get_mcontext() in riscv/riscv/machdep.c + * Assumes that the memory is locked if mcp points to user memory. + */ +abi_long get_mcontext(CPURISCVState *regs, target_mcontext_t *mcp, + int flags) +{ + + mcp->mc_gpregs.gp_t[0] = tswap64(regs->gpr[5]); + mcp->mc_gpregs.gp_t[1] = tswap64(regs->gpr[6]); + mcp->mc_gpregs.gp_t[2] = tswap64(regs->gpr[7]); + mcp->mc_gpregs.gp_t[3] = tswap64(regs->gpr[28]); + mcp->mc_gpregs.gp_t[4] = tswap64(regs->gpr[29]); + mcp->mc_gpregs.gp_t[5] = tswap64(regs->gpr[30]); + mcp->mc_gpregs.gp_t[6] = tswap64(regs->gpr[31]); + + mcp->mc_gpregs.gp_s[0] = tswap64(regs->gpr[8]); + mcp->mc_gpregs.gp_s[1] = tswap64(regs->gpr[9]); + mcp->mc_gpregs.gp_s[2] = tswap64(regs->gpr[18]); + mcp->mc_gpregs.gp_s[3] = tswap64(regs->gpr[19]); + mcp->mc_gpregs.gp_s[4] = tswap64(regs->gpr[20]); + mcp->mc_gpregs.gp_s[5] = tswap64(regs->gpr[21]); + mcp->mc_gpregs.gp_s[6] = tswap64(regs->gpr[22]); + mcp->mc_gpregs.gp_s[7] = tswap64(regs->gpr[23]); + mcp->mc_gpregs.gp_s[8] = tswap64(regs->gpr[24]); + mcp->mc_gpregs.gp_s[9] = tswap64(regs->gpr[25]); + mcp->mc_gpregs.gp_s[10] = tswap64(regs->gpr[26]); + mcp->mc_gpregs.gp_s[11] = tswap64(regs->gpr[27]); + + mcp->mc_gpregs.gp_a[0] = tswap64(regs->gpr[10]); + mcp->mc_gpregs.gp_a[1] = tswap64(regs->gpr[11]); + mcp->mc_gpregs.gp_a[2] = tswap64(regs->gpr[12]); + mcp->mc_gpregs.gp_a[3] = tswap64(regs->gpr[13]); + mcp->mc_gpregs.gp_a[4] = tswap64(regs->gpr[14]); + mcp->mc_gpregs.gp_a[5] = tswap64(regs->gpr[15]); + mcp->mc_gpregs.gp_a[6] = tswap64(regs->gpr[16]); + mcp->mc_gpregs.gp_a[7] = tswap64(regs->gpr[17]); + + if (flags & TARGET_MC_GET_CLEAR_RET) { + mcp->mc_gpregs.gp_a[0] = 0; /* a0 */ + mcp->mc_gpregs.gp_a[1] = 0; /* a1 */ + mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ + } + + mcp->mc_gpregs.gp_ra = tswap64(regs->gpr[1]); + mcp->mc_gpregs.gp_sp = tswap64(regs->gpr[2]); + mcp->mc_gpregs.gp_gp = tswap64(regs->gpr[3]); + mcp->mc_gpregs.gp_tp = tswap64(regs->gpr[4]); + mcp->mc_gpregs.gp_sepc = tswap64(regs->pc); + + return 0; +} + +/* Compare with set_mcontext() in riscv/riscv/exec_machdep.c */ +abi_long set_mcontext(CPURISCVState *regs, target_mcontext_t *mcp, + int srflag) +{ + + regs->gpr[5] = tswap64(mcp->mc_gpregs.gp_t[0]); + regs->gpr[6] = tswap64(mcp->mc_gpregs.gp_t[1]); + regs->gpr[7] = tswap64(mcp->mc_gpregs.gp_t[2]); + regs->gpr[28] = tswap64(mcp->mc_gpregs.gp_t[3]); + regs->gpr[29] = tswap64(mcp->mc_gpregs.gp_t[4]); + regs->gpr[30] = tswap64(mcp->mc_gpregs.gp_t[5]); + regs->gpr[31] = tswap64(mcp->mc_gpregs.gp_t[6]); + + regs->gpr[8] = tswap64(mcp->mc_gpregs.gp_s[0]); + regs->gpr[9] = tswap64(mcp->mc_gpregs.gp_s[1]); + regs->gpr[18] = tswap64(mcp->mc_gpregs.gp_s[2]); + regs->gpr[19] = tswap64(mcp->mc_gpregs.gp_s[3]); + regs->gpr[20] = tswap64(mcp->mc_gpregs.gp_s[4]); + regs->gpr[21] = tswap64(mcp->mc_gpregs.gp_s[5]); + regs->gpr[22] = tswap64(mcp->mc_gpregs.gp_s[6]); + regs->gpr[23] = tswap64(mcp->mc_gpregs.gp_s[7]); + regs->gpr[24] = tswap64(mcp->mc_gpregs.gp_s[8]); + regs->gpr[25] = tswap64(mcp->mc_gpregs.gp_s[9]); + regs->gpr[26] = tswap64(mcp->mc_gpregs.gp_s[10]); + regs->gpr[27] = tswap64(mcp->mc_gpregs.gp_s[11]); + + regs->gpr[10] = tswap64(mcp->mc_gpregs.gp_a[0]); + regs->gpr[11] = tswap64(mcp->mc_gpregs.gp_a[1]); + regs->gpr[12] = tswap64(mcp->mc_gpregs.gp_a[2]); + regs->gpr[13] = tswap64(mcp->mc_gpregs.gp_a[3]); + regs->gpr[14] = tswap64(mcp->mc_gpregs.gp_a[4]); + regs->gpr[15] = tswap64(mcp->mc_gpregs.gp_a[5]); + regs->gpr[16] = tswap64(mcp->mc_gpregs.gp_a[6]); + regs->gpr[17] = tswap64(mcp->mc_gpregs.gp_a[7]); + + + regs->gpr[1] = tswap64(mcp->mc_gpregs.gp_ra); + regs->gpr[2] = tswap64(mcp->mc_gpregs.gp_sp); + regs->gpr[3] = tswap64(mcp->mc_gpregs.gp_gp); + regs->gpr[4] = tswap64(mcp->mc_gpregs.gp_tp); + regs->pc = tswap64(mcp->mc_gpregs.gp_sepc); + + return 0; +} + +/* Compare with sys_sigreturn() in riscv/riscv/machdep.c */ +abi_long get_ucontext_sigreturn(CPURISCVState *regs, + abi_ulong target_sf, abi_ulong *target_uc) +{ + + *target_uc = target_sf; + return 0; +} diff --git a/bsd-user/riscv/target.h b/bsd-user/riscv/target.h new file mode 100644 index 00000000000..036ddd185e2 --- /dev/null +++ b/bsd-user/riscv/target.h @@ -0,0 +1,20 @@ +/* + * Riscv64 general target stuff that's common to all aarch details + * + * Copyright (c) 2022 M. Warner Losh + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_H +#define TARGET_H + +/* + * riscv64 ABI does not 'lump' the registers for 64-bit args. + */ +static inline bool regpairs_aligned(void *cpu_env) +{ + return false; +} + +#endif /* TARGET_H */ diff --git a/bsd-user/riscv/target_arch.h b/bsd-user/riscv/target_arch.h new file mode 100644 index 00000000000..26ce07f343f --- /dev/null +++ b/bsd-user/riscv/target_arch.h @@ -0,0 +1,27 @@ +/* + * RISC-V specific prototypes + * + * Copyright (c) 2019 Mark Corbin + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_ARCH_H +#define TARGET_ARCH_H + +#include "qemu.h" + +void target_cpu_set_tls(CPURISCVState *env, target_ulong newtls); + +#endif /* TARGET_ARCH_H */ diff --git a/bsd-user/riscv/target_arch_cpu.c b/bsd-user/riscv/target_arch_cpu.c new file mode 100644 index 00000000000..44e25d2ddf5 --- /dev/null +++ b/bsd-user/riscv/target_arch_cpu.c @@ -0,0 +1,29 @@ +/* + * RISC-V CPU related code + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "qemu/osdep.h" + +#include "target_arch.h" + +#define TP_OFFSET 16 + +/* Compare with cpu_set_user_tls() in riscv/riscv/vm_machdep.c */ +void target_cpu_set_tls(CPURISCVState *env, target_ulong newtls) +{ + env->gpr[xTP] = newtls + TP_OFFSET; +} diff --git a/bsd-user/riscv/target_arch_cpu.h b/bsd-user/riscv/target_arch_cpu.h new file mode 100644 index 00000000000..ef92f004803 --- /dev/null +++ b/bsd-user/riscv/target_arch_cpu.h @@ -0,0 +1,148 @@ +/* + * RISC-V CPU init and loop + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_CPU_H +#define TARGET_ARCH_CPU_H + +#include "target_arch.h" +#include "signal-common.h" + +#define TARGET_DEFAULT_CPU_MODEL "max" + +static inline void target_cpu_init(CPURISCVState *env, + struct target_pt_regs *regs) +{ + int i; + + for (i = 1; i < 32; i++) { + env->gpr[i] = regs->regs[i]; + } + + env->pc = regs->sepc; +} + +static inline G_NORETURN void target_cpu_loop(CPURISCVState *env) +{ + CPUState *cs = env_cpu(env); + int trapnr; + abi_long ret; + unsigned int syscall_num; + int32_t signo, code; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + signo = 0; + + switch (trapnr) { + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + case RISCV_EXCP_U_ECALL: + syscall_num = env->gpr[xT0]; + env->pc += TARGET_INSN_SIZE; + /* Compare to cpu_fetch_syscall_args() in riscv/riscv/trap.c */ + if (TARGET_FREEBSD_NR___syscall == syscall_num || + TARGET_FREEBSD_NR_syscall == syscall_num) { + ret = do_freebsd_syscall(env, + env->gpr[xA0], + env->gpr[xA1], + env->gpr[xA2], + env->gpr[xA3], + env->gpr[xA4], + env->gpr[xA5], + env->gpr[xA6], + env->gpr[xA7], + 0); + } else { + ret = do_freebsd_syscall(env, + syscall_num, + env->gpr[xA0], + env->gpr[xA1], + env->gpr[xA2], + env->gpr[xA3], + env->gpr[xA4], + env->gpr[xA5], + env->gpr[xA6], + env->gpr[xA7] + ); + } + + /* + * Compare to cpu_set_syscall_retval() in + * riscv/riscv/vm_machdep.c + */ + if (ret >= 0) { + env->gpr[xA0] = ret; + env->gpr[xT0] = 0; + } else if (ret == -TARGET_ERESTART) { + env->pc -= TARGET_INSN_SIZE; + } else if (ret != -TARGET_EJUSTRETURN) { + env->gpr[xA0] = -ret; + env->gpr[xT0] = 1; + } + break; + case RISCV_EXCP_ILLEGAL_INST: + signo = TARGET_SIGILL; + code = TARGET_ILL_ILLOPC; + break; + case RISCV_EXCP_BREAKPOINT: + signo = TARGET_SIGTRAP; + code = TARGET_TRAP_BRKPT; + break; + case EXCP_DEBUG: + signo = TARGET_SIGTRAP; + code = TARGET_TRAP_BRKPT; + break; + default: + fprintf(stderr, "qemu: unhandled CPU exception " + "0x%x - aborting\n", trapnr); + cpu_dump_state(cs, stderr, 0); + abort(); + } + + if (signo) { + force_sig_fault(signo, code, env->pc); + } + + process_pending_signals(env); + } +} + +static inline void target_cpu_clone_regs(CPURISCVState *env, target_ulong newsp) +{ + if (newsp) { + env->gpr[xSP] = newsp; + } + + env->gpr[xA0] = 0; + env->gpr[xT0] = 0; +} + +static inline void target_cpu_reset(CPUArchState *env) +{ +} + +#endif /* TARGET_ARCH_CPU_H */ diff --git a/bsd-user/riscv/target_arch_elf.h b/bsd-user/riscv/target_arch_elf.h new file mode 100644 index 00000000000..4eb915e61ec --- /dev/null +++ b/bsd-user/riscv/target_arch_elf.h @@ -0,0 +1,42 @@ +/* + * RISC-V ELF definitions + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_ELF_H +#define TARGET_ARCH_ELF_H + +#define elf_check_arch(x) ((x) == EM_RISCV) +#define ELF_START_MMAP 0x80000000 +#define ELF_ET_DYN_LOAD_ADDR 0x100000 +#define ELF_CLASS ELFCLASS64 + +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_RISCV + +#define ELF_HWCAP get_elf_hwcap() +static uint32_t get_elf_hwcap(void) +{ + RISCVCPU *cpu = RISCV_CPU(thread_cpu); + + return cpu->env.misa_ext_mask; +} + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#endif /* TARGET_ARCH_ELF_H */ diff --git a/bsd-user/riscv/target_arch_reg.h b/bsd-user/riscv/target_arch_reg.h new file mode 100644 index 00000000000..12b1c96b611 --- /dev/null +++ b/bsd-user/riscv/target_arch_reg.h @@ -0,0 +1,88 @@ +/* + * RISC-V register structures + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_REG_H +#define TARGET_ARCH_REG_H + +/* Compare with riscv/include/reg.h */ +typedef struct target_reg { + uint64_t ra; /* return address */ + uint64_t sp; /* stack pointer */ + uint64_t gp; /* global pointer */ + uint64_t tp; /* thread pointer */ + uint64_t t[7]; /* temporaries */ + uint64_t s[12]; /* saved registers */ + uint64_t a[8]; /* function arguments */ + uint64_t sepc; /* exception program counter */ + uint64_t sstatus; /* status register */ +} target_reg_t; + +typedef struct target_fpreg { + uint64_t fp_x[32][2]; /* Floating point registers */ + uint64_t fp_fcsr; /* Floating point control reg */ +} target_fpreg_t; + +#define tswapreg(ptr) tswapal(ptr) + +/* Compare with struct trapframe in riscv/include/frame.h */ +static inline void target_copy_regs(target_reg_t *regs, + const CPURISCVState *env) +{ + + regs->ra = tswapreg(env->gpr[1]); + regs->sp = tswapreg(env->gpr[2]); + regs->gp = tswapreg(env->gpr[3]); + regs->tp = tswapreg(env->gpr[4]); + + regs->t[0] = tswapreg(env->gpr[5]); + regs->t[1] = tswapreg(env->gpr[6]); + regs->t[2] = tswapreg(env->gpr[7]); + regs->t[3] = tswapreg(env->gpr[28]); + regs->t[4] = tswapreg(env->gpr[29]); + regs->t[5] = tswapreg(env->gpr[30]); + regs->t[6] = tswapreg(env->gpr[31]); + + regs->s[0] = tswapreg(env->gpr[8]); + regs->s[1] = tswapreg(env->gpr[9]); + regs->s[2] = tswapreg(env->gpr[18]); + regs->s[3] = tswapreg(env->gpr[19]); + regs->s[4] = tswapreg(env->gpr[20]); + regs->s[5] = tswapreg(env->gpr[21]); + regs->s[6] = tswapreg(env->gpr[22]); + regs->s[7] = tswapreg(env->gpr[23]); + regs->s[8] = tswapreg(env->gpr[24]); + regs->s[9] = tswapreg(env->gpr[25]); + regs->s[10] = tswapreg(env->gpr[26]); + regs->s[11] = tswapreg(env->gpr[27]); + + regs->a[0] = tswapreg(env->gpr[10]); + regs->a[1] = tswapreg(env->gpr[11]); + regs->a[2] = tswapreg(env->gpr[12]); + regs->a[3] = tswapreg(env->gpr[13]); + regs->a[4] = tswapreg(env->gpr[14]); + regs->a[5] = tswapreg(env->gpr[15]); + regs->a[6] = tswapreg(env->gpr[16]); + regs->a[7] = tswapreg(env->gpr[17]); + + regs->sepc = tswapreg(env->pc); +} + +#undef tswapreg + +#endif /* TARGET_ARCH_REG_H */ diff --git a/bsd-user/riscv/target_arch_signal.h b/bsd-user/riscv/target_arch_signal.h new file mode 100644 index 00000000000..1a634b865b3 --- /dev/null +++ b/bsd-user/riscv/target_arch_signal.h @@ -0,0 +1,75 @@ +/* + * RISC-V signal definitions + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SIGNAL_H +#define TARGET_ARCH_SIGNAL_H + +#include "cpu.h" + + +#define TARGET_INSN_SIZE 4 /* riscv instruction size */ + +/* Size of the signal trampoline code placed on the stack. */ +#define TARGET_SZSIGCODE ((abi_ulong)(7 * TARGET_INSN_SIZE)) + +/* Compare with riscv/include/_limits.h */ +#define TARGET_MINSIGSTKSZ (1024 * 4) +#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768) + +struct target_gpregs { + uint64_t gp_ra; + uint64_t gp_sp; + uint64_t gp_gp; + uint64_t gp_tp; + uint64_t gp_t[7]; + uint64_t gp_s[12]; + uint64_t gp_a[8]; + uint64_t gp_sepc; + uint64_t gp_sstatus; +}; + +struct target_fpregs { + uint64_t fp_x[32][2]; + uint64_t fp_fcsr; + uint32_t fp_flags; + uint32_t pad; +}; + +typedef struct target_mcontext { + struct target_gpregs mc_gpregs; + struct target_fpregs mc_fpregs; + uint32_t mc_flags; +#define TARGET_MC_FP_VALID 0x01 + uint32_t mc_pad; + uint64_t mc_spare[8]; +} target_mcontext_t; + +#define TARGET_MCONTEXT_SIZE 864 +#define TARGET_UCONTEXT_SIZE 936 + +#include "target_os_ucontext.h" + +struct target_sigframe { + target_ucontext_t sf_uc; /* = *sf_uncontext */ + target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/ +}; + +#define TARGET_SIGSTACK_ALIGN 16 + +#endif /* TARGET_ARCH_SIGNAL_H */ diff --git a/bsd-user/riscv/target_arch_sigtramp.h b/bsd-user/riscv/target_arch_sigtramp.h new file mode 100644 index 00000000000..dfe50767390 --- /dev/null +++ b/bsd-user/riscv/target_arch_sigtramp.h @@ -0,0 +1,41 @@ +/* + * RISC-V sigcode + * + * Copyright (c) 2019 Mark Corbin + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_ARCH_SIGTRAMP_H +#define TARGET_ARCH_SIGTRAMP_H + +/* Compare with sigcode() in riscv/riscv/locore.S */ +static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc, + unsigned sys_sigreturn) +{ + uint32_t sys_exit = TARGET_FREEBSD_NR_exit; + + uint32_t sigtramp_code[] = { + /*1*/ const_le32(0x00010513), /*mv a0, sp*/ + /*2*/ const_le32(0x00050513 + (sigf_uc << 20)), /*addi a0,a0,sigf_uc*/ + /*3*/ const_le32(0x00000293 + (sys_sigreturn << 20)),/*li t0,sys_sigreturn*/ + /*4*/ const_le32(0x00000073), /*ecall*/ + /*5*/ const_le32(0x00000293 + (sys_exit << 20)), /*li t0,sys_exit*/ + /*6*/ const_le32(0x00000073), /*ecall*/ + /*7*/ const_le32(0xFF1FF06F) /*b -16*/ + }; + + return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE); +} +#endif /* TARGET_ARCH_SIGTRAMP_H */ diff --git a/bsd-user/riscv/target_arch_sysarch.h b/bsd-user/riscv/target_arch_sysarch.h new file mode 100644 index 00000000000..9af42331b4d --- /dev/null +++ b/bsd-user/riscv/target_arch_sysarch.h @@ -0,0 +1,41 @@ +/* + * RISC-V sysarch() system call emulation + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_SYSARCH_H +#define TARGET_ARCH_SYSARCH_H + +#include "target_syscall.h" +#include "target_arch.h" + +static inline abi_long do_freebsd_arch_sysarch(CPURISCVState *env, int op, + abi_ulong parms) +{ + + return -TARGET_EOPNOTSUPP; +} + +static inline void do_freebsd_arch_print_sysarch( + const struct syscallname *name, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) +{ + + gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2); +} + +#endif /* TARGET_ARCH_SYSARCH_H */ diff --git a/bsd-user/riscv/target_arch_thread.h b/bsd-user/riscv/target_arch_thread.h new file mode 100644 index 00000000000..95cd0b6ad7e --- /dev/null +++ b/bsd-user/riscv/target_arch_thread.h @@ -0,0 +1,47 @@ +/* + * RISC-V thread support + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_THREAD_H +#define TARGET_ARCH_THREAD_H + +/* Compare with cpu_set_upcall() in riscv/riscv/vm_machdep.c */ +static inline void target_thread_set_upcall(CPURISCVState *regs, + abi_ulong entry, abi_ulong arg, abi_ulong stack_base, + abi_ulong stack_size) +{ + abi_ulong sp; + + sp = ROUND_DOWN(stack_base + stack_size, 16); + + regs->gpr[xSP] = sp; + regs->pc = entry; + regs->gpr[xA0] = arg; +} + +/* Compare with exec_setregs() in riscv/riscv/machdep.c */ +static inline void target_thread_init(struct target_pt_regs *regs, + struct image_info *infop) +{ + regs->sepc = infop->entry; + regs->regs[xRA] = infop->entry; + regs->regs[xA0] = infop->start_stack; + regs->regs[xSP] = ROUND_DOWN(infop->start_stack, 16); +} + +#endif /* TARGET_ARCH_THREAD_H */ diff --git a/bsd-user/riscv/target_arch_vmparam.h b/bsd-user/riscv/target_arch_vmparam.h new file mode 100644 index 00000000000..0f2486def1d --- /dev/null +++ b/bsd-user/riscv/target_arch_vmparam.h @@ -0,0 +1,53 @@ +/* + * RISC-V VM parameters definitions + * + * Copyright (c) 2019 Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef TARGET_ARCH_VMPARAM_H +#define TARGET_ARCH_VMPARAM_H + +#include "cpu.h" + +/* Compare with riscv/include/vmparam.h */ +#define TARGET_MAXTSIZ (1 * GiB) /* max text size */ +#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */ +#define TARGET_MAXDSIZ (1 * GiB) /* max data size */ +#define TARGET_DFLSSIZ (128 * MiB) /* initial stack size limit */ +#define TARGET_MAXSSIZ (1 * GiB) /* max stack size */ +#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */ + +#define TARGET_VM_MINUSER_ADDRESS (0x0000000000000000UL) +#define TARGET_VM_MAXUSER_ADDRESS (0x0000004000000000UL) + +#define TARGET_USRSTACK (TARGET_VM_MAXUSER_ADDRESS - TARGET_PAGE_SIZE) + +static inline abi_ulong get_sp_from_cpustate(CPURISCVState *state) +{ + return state->gpr[xSP]; +} + +static inline void set_second_rval(CPURISCVState *state, abi_ulong retval2) +{ + state->gpr[xA1] = retval2; +} + +static inline abi_ulong get_second_rval(CPURISCVState *state) +{ + return state->gpr[xA1]; +} + +#endif /* TARGET_ARCH_VMPARAM_H */ diff --git a/bsd-user/riscv/target_syscall.h b/bsd-user/riscv/target_syscall.h new file mode 100644 index 00000000000..e7e52313095 --- /dev/null +++ b/bsd-user/riscv/target_syscall.h @@ -0,0 +1,38 @@ +/* + * RISC-V system call definitions + * + * Copyright (c) Mark Corbin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef BSD_USER_RISCV_TARGET_SYSCALL_H +#define BSD_USER_RISCV_TARGET_SYSCALL_H + +/* + * struct target_pt_regs defines the way the registers are stored on the stack + * during a system call. + */ + +struct target_pt_regs { + abi_ulong regs[32]; + abi_ulong sepc; +}; + +#define UNAME_MACHINE "riscv64" + +#define TARGET_HW_MACHINE "riscv" +#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE + +#endif /* BSD_USER_RISCV_TARGET_SYSCALL_H */ diff --git a/bsd-user/signal-common.h b/bsd-user/signal-common.h index 77d7c7a78b7..4e634e04a30 100644 --- a/bsd-user/signal-common.h +++ b/bsd-user/signal-common.h @@ -42,7 +42,6 @@ void process_pending_signals(CPUArchState *env); void queue_signal(CPUArchState *env, int sig, int si_type, target_siginfo_t *info); void signal_init(void); -int target_to_host_signal(int sig); void target_to_host_sigset(sigset_t *d, const target_sigset_t *s); /* diff --git a/bsd-user/signal.c b/bsd-user/signal.c index da49b9bffc1..dadcc037dc3 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -21,12 +21,15 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu.h" +#include "user/cpu_loop.h" #include "exec/page-protection.h" +#include "user/page-protection.h" +#include "user/signal.h" #include "user/tswap-target.h" #include "gdbstub/user.h" #include "signal-common.h" #include "trace.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include "host-signal.h" /* target_siginfo_t must fit in gdbstub's siginfo save area. */ @@ -48,6 +51,8 @@ static inline int sas_ss_flags(TaskState *ts, unsigned long sp) on_sig_stack(ts, sp) ? SS_ONSTACK : 0; } +int host_interrupt_signal = SIGRTMAX; + /* * The BSD ABIs use the same signal numbers across all the CPU architectures, so * (unlike Linux) these functions are just the identity mapping. This might not @@ -436,7 +441,6 @@ void queue_signal(CPUArchState *env, int sig, int si_type, ts->sync_signal.pending = sig; /* Signal that a new signal is pending. */ qatomic_set(&ts->signal_pending, 1); - return; } static int fatal_signal(int sig) @@ -488,6 +492,12 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) uintptr_t pc = 0; bool sync_sig = false; + if (host_sig == host_interrupt_signal) { + ts->signal_pending = 1; + cpu_exit(thread_cpu); + return; + } + /* * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special * handling wrt signal blocking and unwinding. @@ -851,6 +861,9 @@ void signal_init(void) for (i = 1; i <= TARGET_NSIG; i++) { host_sig = target_to_host_signal(i); + if (host_sig == host_interrupt_signal) { + continue; + } sigaction(host_sig, NULL, &oact); if (oact.sa_sigaction == (void *)SIG_IGN) { sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; @@ -869,6 +882,7 @@ void signal_init(void) sigaction(host_sig, &act, NULL); } } + sigaction(host_interrupt_signal, &act, NULL); } static void handle_pending_signal(CPUArchState *env, int sig, @@ -1016,10 +1030,10 @@ void process_pending_signals(CPUArchState *env) ts->in_sigsuspend = false; } -void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, +void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr, MMUAccessType access_type, bool maperr, uintptr_t ra) { - const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; if (tcg_ops->record_sigsegv) { tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); @@ -1032,10 +1046,10 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, cpu_loop_exit_restore(cpu, ra); } -void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, +void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra) { - const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; if (tcg_ops->record_sigbus) { tcg_ops->record_sigbus(cpu, addr, access_type, ra); diff --git a/bsd-user/x86_64/target_arch_cpu.h b/bsd-user/x86_64/target_arch_cpu.h index 4094d61da1a..f82042e30af 100644 --- a/bsd-user/x86_64/target_arch_cpu.h +++ b/bsd-user/x86_64/target_arch_cpu.h @@ -110,7 +110,7 @@ static inline void target_cpu_init(CPUX86State *env, cpu_x86_load_seg(env, R_GS, 0); } -static inline void target_cpu_loop(CPUX86State *env) +static inline G_NORETURN void target_cpu_loop(CPUX86State *env) { CPUState *cs = env_cpu(env); int trapnr; diff --git a/bsd-user/x86_64/target_arch_thread.h b/bsd-user/x86_64/target_arch_thread.h index 52c28906d6d..7739bb2154b 100644 --- a/bsd-user/x86_64/target_arch_thread.h +++ b/bsd-user/x86_64/target_arch_thread.h @@ -31,7 +31,7 @@ static inline void target_thread_init(struct target_pt_regs *regs, struct image_info *infop) { regs->rax = 0; - regs->rsp = infop->start_stack; + regs->rsp = ((infop->start_stack - 8) & ~0xfUL) + 8; regs->rip = infop->entry; regs->rdi = infop->start_stack; } diff --git a/chardev/baum.c b/chardev/baum.c index a1d9784d92d..f3e8cd27f06 100644 --- a/chardev/baum.c +++ b/chardev/baum.c @@ -668,7 +668,7 @@ static void baum_chr_open(Chardev *chr, qemu_set_fd_handler(baum->brlapi_fd, baum_chr_read, NULL, baum); } -static void char_braille_class_init(ObjectClass *oc, void *data) +static void char_braille_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-console.c b/chardev/char-console.c index 6c4ce5dbce7..7e1bf642ebd 100644 --- a/chardev/char-console.c +++ b/chardev/char-console.c @@ -34,7 +34,7 @@ static void qemu_chr_open_win_con(Chardev *chr, win_chr_set_file(chr, GetStdHandle(STD_OUTPUT_HANDLE), true); } -static void char_console_class_init(ObjectClass *oc, void *data) +static void char_console_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-fd.c b/chardev/char-fd.c index d2c49233598..6f03adf8725 100644 --- a/chardev/char-fd.c +++ b/chardev/char-fd.c @@ -50,7 +50,7 @@ static gboolean fd_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque) Chardev *chr = CHARDEV(opaque); FDChardev *s = FD_CHARDEV(opaque); int len; - uint8_t buf[CHR_READ_BUF_LEN]; + QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN]; ssize_t ret; len = sizeof(buf); @@ -238,7 +238,7 @@ void qemu_chr_open_fd(Chardev *chr, } } -static void char_fd_class_init(ObjectClass *oc, void *data) +static void char_fd_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-fe.c b/chardev/char-fe.c index b214ba3802b..158a5f4f551 100644 --- a/chardev/char-fe.c +++ b/chardev/char-fe.c @@ -24,7 +24,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "chardev/char-fe.h" #include "chardev/char-io.h" @@ -191,22 +191,15 @@ bool qemu_chr_fe_backend_open(CharBackend *be) bool qemu_chr_fe_init(CharBackend *b, Chardev *s, Error **errp) { - int tag = 0; + unsigned int tag = 0; if (s) { if (CHARDEV_IS_MUX(s)) { MuxChardev *d = MUX_CHARDEV(s); - if (d->mux_cnt >= MAX_MUX) { - error_setg(errp, - "too many uses of multiplexed chardev '%s'" - " (maximum is " stringify(MAX_MUX) ")", - s->label); + if (!mux_chr_attach_frontend(d, b, &tag, errp)) { return false; } - - d->backends[d->mux_cnt] = b; - tag = d->mux_cnt++; } else if (s->be) { error_setg(errp, "chardev '%s' is already in use", s->label); return false; @@ -232,7 +225,7 @@ void qemu_chr_fe_deinit(CharBackend *b, bool del) } if (CHARDEV_IS_MUX(b->chr)) { MuxChardev *d = MUX_CHARDEV(b->chr); - d->backends[b->tag] = NULL; + mux_chr_detach_frontend(d, b->tag); } if (del) { Object *obj = OBJECT(b->chr); diff --git a/chardev/char-file.c b/chardev/char-file.c index 263e6da5636..a9e8c5e0d7f 100644 --- a/chardev/char-file.c +++ b/chardev/char-file.c @@ -123,7 +123,7 @@ static void qemu_chr_parse_file_out(QemuOpts *opts, ChardevBackend *backend, file->append = qemu_opt_get_bool(opts, "append", false); } -static void char_file_class_init(ObjectClass *oc, void *data) +static void char_file_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-hmp-cmds.c b/chardev/char-hmp-cmds.c index 287c2b1bcd8..8e9e1c1c021 100644 --- a/chardev/char-hmp-cmds.c +++ b/chardev/char-hmp-cmds.c @@ -19,7 +19,7 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-commands-char.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/config-file.h" #include "qemu/option.h" diff --git a/chardev/char-hub.c b/chardev/char-hub.c new file mode 100644 index 00000000000..16ffee2017a --- /dev/null +++ b/chardev/char-hub.c @@ -0,0 +1,301 @@ +/* + * QEMU Character Hub Device + * + * Author: Roman Penyaev + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/option.h" +#include "chardev/char.h" +#include "chardev-internal.h" + +/* + * Character hub device aggregates input from multiple backend devices + * and forwards it to a single frontend device. Additionally, hub + * device takes the output from the frontend device and sends it back + * to all the connected backend devices. + */ + +/* + * Write to all backends. Different backend devices accept data with + * various rate, so it is quite possible that one device returns less, + * then others. In this case we return minimum to the caller, + * expecting caller will repeat operation soon. When repeat happens + * send to the devices which consume data faster must be avoided + * for obvious reasons not to send data, which was already sent. + * Called with chr_write_lock held. + */ +static int hub_chr_write(Chardev *chr, const uint8_t *buf, int len) +{ + HubChardev *d = HUB_CHARDEV(chr); + int r, i, ret = len; + unsigned int written; + + /* Invalidate index on every write */ + d->be_eagain_ind = -1; + + for (i = 0; i < d->be_cnt; i++) { + if (!d->backends[i].be.chr->be_open) { + /* Skip closed backend */ + continue; + } + written = d->be_written[i] - d->be_min_written; + if (written) { + /* Written in the previous call so take into account */ + ret = MIN(written, ret); + continue; + } + r = qemu_chr_fe_write(&d->backends[i].be, buf, len); + if (r < 0) { + if (errno == EAGAIN) { + /* Set index and expect to be called soon on watch wake up */ + d->be_eagain_ind = i; + } + return r; + } + d->be_written[i] += r; + ret = MIN(r, ret); + } + d->be_min_written += ret; + + + return ret; +} + +static int hub_chr_can_read(void *opaque) +{ + HubCharBackend *backend = opaque; + CharBackend *fe = backend->hub->parent.be; + + if (fe && fe->chr_can_read) { + return fe->chr_can_read(fe->opaque); + } + + return 0; +} + +static void hub_chr_read(void *opaque, const uint8_t *buf, int size) +{ + HubCharBackend *backend = opaque; + CharBackend *fe = backend->hub->parent.be; + + if (fe && fe->chr_read) { + fe->chr_read(fe->opaque, buf, size); + } +} + +static void hub_chr_event(void *opaque, QEMUChrEvent event) +{ + HubCharBackend *backend = opaque; + HubChardev *d = backend->hub; + CharBackend *fe = d->parent.be; + + if (event == CHR_EVENT_OPENED) { + /* + * Catch up with what was already written while this backend + * was closed + */ + d->be_written[backend->be_ind] = d->be_min_written; + + if (d->be_event_opened_cnt++) { + /* Ignore subsequent open events from other backends */ + return; + } + } else if (event == CHR_EVENT_CLOSED) { + if (!d->be_event_opened_cnt) { + /* Don't go below zero. Probably assert is better */ + return; + } + if (--d->be_event_opened_cnt) { + /* Serve only the last one close event */ + return; + } + } + + if (fe && fe->chr_event) { + fe->chr_event(fe->opaque, event); + } +} + +static GSource *hub_chr_add_watch(Chardev *s, GIOCondition cond) +{ + HubChardev *d = HUB_CHARDEV(s); + Chardev *chr; + ChardevClass *cc; + + if (d->be_eagain_ind == -1) { + return NULL; + } + + assert(d->be_eagain_ind < d->be_cnt); + chr = qemu_chr_fe_get_driver(&d->backends[d->be_eagain_ind].be); + cc = CHARDEV_GET_CLASS(chr); + if (!cc->chr_add_watch) { + return NULL; + } + + return cc->chr_add_watch(chr, cond); +} + +static bool hub_chr_attach_chardev(HubChardev *d, Chardev *chr, + Error **errp) +{ + bool ret; + + if (d->be_cnt >= MAX_HUB) { + error_setg(errp, "hub: too many uses of chardevs '%s'" + " (maximum is " stringify(MAX_HUB) ")", + d->parent.label); + return false; + } + ret = qemu_chr_fe_init(&d->backends[d->be_cnt].be, chr, errp); + if (ret) { + d->backends[d->be_cnt].hub = d; + d->backends[d->be_cnt].be_ind = d->be_cnt; + d->be_cnt += 1; + } + + return ret; +} + +static void char_hub_finalize(Object *obj) +{ + HubChardev *d = HUB_CHARDEV(obj); + int i; + + for (i = 0; i < d->be_cnt; i++) { + qemu_chr_fe_deinit(&d->backends[i].be, false); + } +} + +static void hub_chr_update_read_handlers(Chardev *chr) +{ + HubChardev *d = HUB_CHARDEV(chr); + int i; + + for (i = 0; i < d->be_cnt; i++) { + qemu_chr_fe_set_handlers_full(&d->backends[i].be, + hub_chr_can_read, + hub_chr_read, + hub_chr_event, + NULL, + &d->backends[i], + chr->gcontext, true, false); + } +} + +static void qemu_chr_open_hub(Chardev *chr, + ChardevBackend *backend, + bool *be_opened, + Error **errp) +{ + ChardevHub *hub = backend->u.hub.data; + HubChardev *d = HUB_CHARDEV(chr); + strList *list = hub->chardevs; + + d->be_eagain_ind = -1; + + if (list == NULL) { + error_setg(errp, "hub: 'chardevs' list is not defined"); + return; + } + + while (list) { + Chardev *s; + + s = qemu_chr_find(list->value); + if (s == NULL) { + error_setg(errp, "hub: chardev can't be found by id '%s'", + list->value); + return; + } + if (CHARDEV_IS_HUB(s) || CHARDEV_IS_MUX(s)) { + error_setg(errp, "hub: multiplexers and hub devices can't be " + "stacked, check chardev '%s', chardev should not " + "be a hub device or have 'mux=on' enabled", + list->value); + return; + } + if (!hub_chr_attach_chardev(d, s, errp)) { + return; + } + list = list->next; + } + + /* Closed until an explicit event from backend */ + *be_opened = false; +} + +static void qemu_chr_parse_hub(QemuOpts *opts, ChardevBackend *backend, + Error **errp) +{ + ChardevHub *hub; + strList **tail; + int i; + + backend->type = CHARDEV_BACKEND_KIND_HUB; + hub = backend->u.hub.data = g_new0(ChardevHub, 1); + qemu_chr_parse_common(opts, qapi_ChardevHub_base(hub)); + + tail = &hub->chardevs; + + for (i = 0; i < MAX_HUB; i++) { + char optbuf[16]; + const char *dev; + + snprintf(optbuf, sizeof(optbuf), "chardevs.%u", i); + dev = qemu_opt_get(opts, optbuf); + if (!dev) { + break; + } + + QAPI_LIST_APPEND(tail, g_strdup(dev)); + } +} + +static void char_hub_class_init(ObjectClass *oc, const void *data) +{ + ChardevClass *cc = CHARDEV_CLASS(oc); + + cc->parse = qemu_chr_parse_hub; + cc->open = qemu_chr_open_hub; + cc->chr_write = hub_chr_write; + cc->chr_add_watch = hub_chr_add_watch; + /* We handle events from backends only */ + cc->chr_be_event = NULL; + cc->chr_update_read_handler = hub_chr_update_read_handlers; +} + +static const TypeInfo char_hub_type_info = { + .name = TYPE_CHARDEV_HUB, + .parent = TYPE_CHARDEV, + .class_init = char_hub_class_init, + .instance_size = sizeof(HubChardev), + .instance_finalize = char_hub_finalize, +}; + +static void register_types(void) +{ + type_register_static(&char_hub_type_info); +} + +type_init(register_types); diff --git a/chardev/char-mux.c b/chardev/char-mux.c index ee2d47b20d9..6b36290e2c4 100644 --- a/chardev/char-mux.c +++ b/chardev/char-mux.c @@ -26,8 +26,9 @@ #include "qapi/error.h" #include "qemu/module.h" #include "qemu/option.h" +#include "qemu/bitops.h" #include "chardev/char.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/qapi-commands-control.h" #include "chardev-internal.h" @@ -73,11 +74,11 @@ static int mux_chr_write(Chardev *chr, const uint8_t *buf, int len) * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_fe_write_all(&d->chr, (uint8_t *)buf1, strlen(buf1)); - d->linestart = 0; + d->linestart = false; } ret += qemu_chr_fe_write(&d->chr, buf + i, 1); if (buf[i] == '\n') { - d->linestart = 1; + d->linestart = true; } } } @@ -124,7 +125,8 @@ static void mux_print_help(Chardev *chr) } } -static void mux_chr_send_event(MuxChardev *d, int mux_nr, QEMUChrEvent event) +static void mux_chr_send_event(MuxChardev *d, unsigned int mux_nr, + QEMUChrEvent event) { CharBackend *be = d->backends[mux_nr]; @@ -145,7 +147,7 @@ static void mux_chr_be_event(Chardev *chr, QEMUChrEvent event) static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch) { if (d->term_got_escape) { - d->term_got_escape = 0; + d->term_got_escape = false; if (ch == term_escape_char) { goto send_char; } @@ -167,19 +169,26 @@ static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch) case 'b': qemu_chr_be_event(chr, CHR_EVENT_BREAK); break; - case 'c': - assert(d->mux_cnt > 0); /* handler registered with first fe */ + case 'c': { + unsigned int bit; + + /* Handler registered with first fe */ + assert(d->mux_bitset != 0); /* Switch to the next registered device */ - mux_set_focus(chr, (d->focus + 1) % d->mux_cnt); + bit = find_next_bit(&d->mux_bitset, MAX_MUX, d->focus + 1); + if (bit >= MAX_MUX) { + bit = find_next_bit(&d->mux_bitset, MAX_MUX, 0); + } + mux_set_focus(chr, bit); break; - case 't': + } case 't': d->timestamps = !d->timestamps; d->timestamps_start = -1; - d->linestart = 0; + d->linestart = false; break; } } else if (ch == term_escape_char) { - d->term_got_escape = 1; + d->term_got_escape = true; } else { send_char: return 1; @@ -242,15 +251,16 @@ static void mux_chr_read(void *opaque, const uint8_t *buf, int size) void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event) { MuxChardev *d = MUX_CHARDEV(chr); - int i; + int bit; if (!muxes_opened) { return; } /* Send the event to all registered listeners */ - for (i = 0; i < d->mux_cnt; i++) { - mux_chr_send_event(d, i, event); + bit = -1; + while ((bit = find_next_bit(&d->mux_bitset, MAX_MUX, bit + 1)) < MAX_MUX) { + mux_chr_send_event(d, bit, event); } } @@ -275,14 +285,15 @@ static GSource *mux_chr_add_watch(Chardev *s, GIOCondition cond) static void char_mux_finalize(Object *obj) { MuxChardev *d = MUX_CHARDEV(obj); - int i; + int bit; - for (i = 0; i < d->mux_cnt; i++) { - CharBackend *be = d->backends[i]; - if (be) { - be->chr = NULL; - } + bit = -1; + while ((bit = find_next_bit(&d->mux_bitset, MAX_MUX, bit + 1)) < MAX_MUX) { + CharBackend *be = d->backends[bit]; + be->chr = NULL; + d->backends[bit] = NULL; } + d->mux_bitset = 0; qemu_chr_fe_deinit(&d->chr, false); } @@ -300,12 +311,46 @@ static void mux_chr_update_read_handlers(Chardev *chr) chr->gcontext, true, false); } -void mux_set_focus(Chardev *chr, int focus) +bool mux_chr_attach_frontend(MuxChardev *d, CharBackend *b, + unsigned int *tag, Error **errp) +{ + unsigned int bit; + + QEMU_BUILD_BUG_ON(MAX_MUX > (sizeof(d->mux_bitset) * BITS_PER_BYTE)); + + bit = find_next_zero_bit(&d->mux_bitset, MAX_MUX, 0); + if (bit >= MAX_MUX) { + error_setg(errp, + "too many uses of multiplexed chardev '%s'" + " (maximum is " stringify(MAX_MUX) ")", + d->parent.label); + return false; + } + + d->mux_bitset |= (1ul << bit); + d->backends[bit] = b; + *tag = bit; + + return true; +} + +bool mux_chr_detach_frontend(MuxChardev *d, unsigned int tag) +{ + if (!(d->mux_bitset & (1ul << tag))) { + return false; + } + + d->mux_bitset &= ~(1ul << tag); + d->backends[tag] = NULL; + + return true; +} + +void mux_set_focus(Chardev *chr, unsigned int focus) { MuxChardev *d = MUX_CHARDEV(chr); - assert(focus >= 0); - assert(focus < d->mux_cnt); + assert(d->mux_bitset & (1ul << focus)); if (d->focus != -1) { mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_OUT); @@ -402,7 +447,7 @@ void resume_mux_open(void) chardev_options_parsed_cb, NULL); } -static void char_mux_class_init(ObjectClass *oc, void *data) +static void char_mux_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-null.c b/chardev/char-null.c index 1c6a2900f9b..89cb85da792 100644 --- a/chardev/char-null.c +++ b/chardev/char-null.c @@ -34,7 +34,7 @@ static void null_chr_open(Chardev *chr, *be_opened = false; } -static void char_null_class_init(ObjectClass *oc, void *data) +static void char_null_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-parallel.c b/chardev/char-parallel.c index 78697d7522d..62a44b2f969 100644 --- a/chardev/char-parallel.c +++ b/chardev/char-parallel.c @@ -270,7 +270,7 @@ static void qemu_chr_parse_parallel(QemuOpts *opts, ChardevBackend *backend, parallel->device = g_strdup(device); } -static void char_parallel_class_init(ObjectClass *oc, void *data) +static void char_parallel_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-pipe.c b/chardev/char-pipe.c index 5ad30bcc599..3d1b0ce2d2e 100644 --- a/chardev/char-pipe.c +++ b/chardev/char-pipe.c @@ -171,7 +171,7 @@ static void qemu_chr_parse_pipe(QemuOpts *opts, ChardevBackend *backend, dev->device = g_strdup(device); } -static void char_pipe_class_init(ObjectClass *oc, void *data) +static void char_pipe_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-pty.c b/chardev/char-pty.c index cc2f7617fe7..674e9b3f144 100644 --- a/chardev/char-pty.c +++ b/chardev/char-pty.c @@ -29,6 +29,7 @@ #include "qemu/sockets.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "qemu/option.h" #include "qemu/qemu-print.h" #include "chardev/char-io.h" @@ -41,6 +42,7 @@ struct PtyChardev { int connected; GSource *timer_src; + char *path; }; typedef struct PtyChardev PtyChardev; @@ -152,7 +154,7 @@ static gboolean pty_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque) Chardev *chr = CHARDEV(opaque); PtyChardev *s = PTY_CHARDEV(opaque); gsize len; - uint8_t buf[CHR_READ_BUF_LEN]; + QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN]; ssize_t ret; len = sizeof(buf); @@ -179,6 +181,9 @@ static void pty_chr_state(Chardev *chr, int connected) if (!connected) { remove_fd_in_watch(chr); + if (s->connected) { + qemu_chr_be_event(chr, CHR_EVENT_CLOSED); + } s->connected = 0; /* (re-)connect poll interval for idle guests: once per second. * We check more frequently in case the guests sends data to @@ -204,10 +209,15 @@ static void char_pty_finalize(Object *obj) Chardev *chr = CHARDEV(obj); PtyChardev *s = PTY_CHARDEV(obj); + /* unlink symlink */ + if (s->path) { + unlink(s->path); + g_free(s->path); + } + pty_chr_state(chr, 0); object_unref(OBJECT(s->ioc)); pty_chr_timer_cancel(s); - qemu_chr_be_event(chr, CHR_EVENT_CLOSED); } #if defined HAVE_PTY_H @@ -330,6 +340,7 @@ static void char_pty_open(Chardev *chr, int master_fd, slave_fd; char pty_name[PATH_MAX]; char *name; + char *path = backend->u.pty.data->path; master_fd = qemu_openpty_raw(&slave_fd, pty_name); if (master_fd < 0) { @@ -354,12 +365,36 @@ static void char_pty_open(Chardev *chr, g_free(name); s->timer_src = NULL; *be_opened = false; + + /* create symbolic link */ + if (path) { + int res = symlink(pty_name, path); + + if (res != 0) { + error_setg_errno(errp, errno, "Failed to create PTY symlink"); + } else { + s->path = g_strdup(path); + } + } +} + +static void char_pty_parse(QemuOpts *opts, ChardevBackend *backend, + Error **errp) +{ + const char *path = qemu_opt_get(opts, "path"); + ChardevPty *pty; + + backend->type = CHARDEV_BACKEND_KIND_PTY; + pty = backend->u.pty.data = g_new0(ChardevPty, 1); + qemu_chr_parse_common(opts, qapi_ChardevPty_base(pty)); + pty->path = g_strdup(path); } -static void char_pty_class_init(ObjectClass *oc, void *data) +static void char_pty_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); + cc->parse = char_pty_parse; cc->open = char_pty_open; cc->chr_write = char_pty_chr_write; cc->chr_update_read_handler = pty_chr_update_read_handler; diff --git a/chardev/char-ringbuf.c b/chardev/char-ringbuf.c index d40d21d3cf7..98aadb6acfb 100644 --- a/chardev/char-ringbuf.c +++ b/chardev/char-ringbuf.c @@ -223,7 +223,7 @@ static void qemu_chr_parse_ringbuf(QemuOpts *opts, ChardevBackend *backend, } } -static void char_ringbuf_class_init(ObjectClass *oc, void *data) +static void char_ringbuf_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-serial.c b/chardev/char-serial.c index 4b0b83d5b45..0a68b4b4e0b 100644 --- a/chardev/char-serial.c +++ b/chardev/char-serial.c @@ -298,7 +298,7 @@ static void qemu_chr_parse_serial(QemuOpts *opts, ChardevBackend *backend, serial->device = g_strdup(device); } -static void char_serial_class_init(ObjectClass *oc, void *data) +static void char_serial_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 1ca9441b1b5..1e8313915b5 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -74,7 +74,7 @@ static void qemu_chr_socket_restart_timer(Chardev *chr) assert(!s->reconnect_timer); name = g_strdup_printf("chardev-socket-reconnect-%s", chr->label); s->reconnect_timer = qemu_chr_timeout_add_ms(chr, - s->reconnect_time * 1000, + s->reconnect_time_ms, socket_reconnect_timeout, chr); g_source_set_name(s->reconnect_timer, name); @@ -481,7 +481,7 @@ static void tcp_chr_disconnect_locked(Chardev *chr) if (emit_close) { qemu_chr_be_event(chr, CHR_EVENT_CLOSED); } - if (s->reconnect_time && !s->reconnect_timer) { + if (s->reconnect_time_ms && !s->reconnect_timer) { qemu_chr_socket_restart_timer(chr); } } @@ -497,7 +497,7 @@ static gboolean tcp_chr_read(QIOChannel *chan, GIOCondition cond, void *opaque) { Chardev *chr = CHARDEV(opaque); SocketChardev *s = SOCKET_CHARDEV(opaque); - uint8_t buf[CHR_READ_BUF_LEN]; + QEMU_UNINITIALIZED uint8_t buf[CHR_READ_BUF_LEN]; int len, size; if ((s->state != TCP_CHARDEV_STATE_CONNECTED) || @@ -571,9 +571,13 @@ static char *qemu_chr_compute_filename(SocketChardev *s) switch (ss->ss_family) { case AF_UNIX: - return g_strdup_printf("unix:%s%s", - ((struct sockaddr_un *)(ss))->sun_path, - s->is_listen ? ",server=on" : ""); + if (s->is_listen) { + return g_strdup_printf("unix:%s,server=on", + ((struct sockaddr_un *)(ss))->sun_path); + } else { + return g_strdup_printf("unix:%s", + ((struct sockaddr_un *)(ps))->sun_path); + } case AF_INET6: left = "["; right = "]"; @@ -1080,9 +1084,9 @@ static int tcp_chr_wait_connected(Chardev *chr, Error **errp) } else { Error *err = NULL; if (tcp_chr_connect_client_sync(chr, &err) < 0) { - if (s->reconnect_time) { + if (s->reconnect_time_ms) { error_free(err); - g_usleep(s->reconnect_time * 1000ULL * 1000ULL); + g_usleep(s->reconnect_time_ms * 1000ULL); } else { error_propagate(errp, err); return -1; @@ -1267,13 +1271,13 @@ static int qmp_chardev_open_socket_server(Chardev *chr, static int qmp_chardev_open_socket_client(Chardev *chr, - int64_t reconnect, + int64_t reconnect_ms, Error **errp) { SocketChardev *s = SOCKET_CHARDEV(chr); - if (reconnect > 0) { - s->reconnect_time = reconnect; + if (reconnect_ms > 0) { + s->reconnect_time_ms = reconnect_ms; tcp_chr_connect_client_async(chr); return 0; } else { @@ -1354,6 +1358,12 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, } } + if (sock->has_reconnect_ms && sock->has_reconnect) { + error_setg(errp, + "'reconnect' and 'reconnect-ms' are mutually exclusive"); + return false; + } + return true; } @@ -1371,7 +1381,7 @@ static void qmp_chardev_open_socket(Chardev *chr, bool is_tn3270 = sock->has_tn3270 ? sock->tn3270 : false; bool is_waitconnect = sock->has_wait ? sock->wait : false; bool is_websock = sock->has_websocket ? sock->websocket : false; - int64_t reconnect = sock->has_reconnect ? sock->reconnect : 0; + int64_t reconnect_ms = 0; SocketAddress *addr; s->is_listen = is_listen; @@ -1443,7 +1453,13 @@ static void qmp_chardev_open_socket(Chardev *chr, return; } } else { - if (qmp_chardev_open_socket_client(chr, reconnect, errp) < 0) { + if (sock->has_reconnect) { + reconnect_ms = sock->reconnect * 1000ULL; + } else if (sock->has_reconnect_ms) { + reconnect_ms = sock->reconnect_ms; + } + + if (qmp_chardev_open_socket_client(chr, reconnect_ms, errp) < 0) { return; } } @@ -1509,6 +1525,9 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, sock->wait = qemu_opt_get_bool(opts, "wait", true); sock->has_reconnect = qemu_opt_find(opts, "reconnect"); sock->reconnect = qemu_opt_get_number(opts, "reconnect", 0); + sock->has_reconnect_ms = qemu_opt_find(opts, "reconnect-ms"); + sock->reconnect_ms = qemu_opt_get_number(opts, "reconnect-ms", 0); + sock->tls_creds = g_strdup(qemu_opt_get(opts, "tls-creds")); sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz")); @@ -1562,7 +1581,7 @@ char_socket_get_connected(Object *obj, Error **errp) return s->state == TCP_CHARDEV_STATE_CONNECTED; } -static void char_socket_class_init(ObjectClass *oc, void *data) +static void char_socket_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-stdio.c b/chardev/char-stdio.c index b960ddd4e4c..48db8d2f30f 100644 --- a/chardev/char-stdio.c +++ b/chardev/char-stdio.c @@ -136,7 +136,7 @@ static void qemu_chr_parse_stdio(QemuOpts *opts, ChardevBackend *backend, stdio->signal = qemu_opt_get_bool(opts, "signal", true); } -static void char_stdio_class_init(ObjectClass *oc, void *data) +static void char_stdio_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-udp.c b/chardev/char-udp.c index 3d9a2d5e772..572fab0ad13 100644 --- a/chardev/char-udp.c +++ b/chardev/char-udp.c @@ -219,7 +219,7 @@ static void qmp_chardev_open_udp(Chardev *chr, *be_opened = false; } -static void char_udp_class_init(ObjectClass *oc, void *data) +static void char_udp_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-win-stdio.c b/chardev/char-win-stdio.c index 13325ca967c..fb802a00b13 100644 --- a/chardev/char-win-stdio.c +++ b/chardev/char-win-stdio.c @@ -256,7 +256,7 @@ static int win_stdio_write(Chardev *chr, const uint8_t *buf, int len) return len - len1; } -static void char_win_stdio_class_init(ObjectClass *oc, void *data) +static void char_win_stdio_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char-win.c b/chardev/char-win.c index d4fb44c4dcc..fef45e83aad 100644 --- a/chardev/char-win.c +++ b/chardev/char-win.c @@ -220,7 +220,7 @@ void win_chr_set_file(Chardev *chr, HANDLE file, bool keep_open) s->file = file; } -static void char_win_class_init(ObjectClass *oc, void *data) +static void char_win_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/char.c b/chardev/char.c index 3c43fb1278f..bbebd246c3a 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -33,7 +33,7 @@ #include "qapi/error.h" #include "qapi/qapi-commands-char.h" #include "qapi/qmp/qerror.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qemu/help_option.h" #include "qemu/module.h" #include "qemu/option.h" @@ -48,7 +48,7 @@ Object *get_chardevs_root(void) { - return container_get(object_get_root(), "/chardevs"); + return object_get_container("chardevs"); } static void chr_be_event(Chardev *s, QEMUChrEvent event) @@ -295,7 +295,7 @@ static int null_chr_write(Chardev *chr, const uint8_t *buf, int len) return len; } -static void char_class_init(ObjectClass *oc, void *data) +static void char_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -333,7 +333,7 @@ static bool qemu_chr_is_busy(Chardev *s) { if (CHARDEV_IS_MUX(s)) { MuxChardev *d = MUX_CHARDEV(s); - return d->mux_cnt >= 0; + return d->mux_bitset != 0; } else { return s->be != NULL; } @@ -428,6 +428,11 @@ QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename, qemu_opt_set(opts, "path", p, &error_abort); return opts; } + if (strstart(filename, "pty:", &p)) { + qemu_opt_set(opts, "backend", "pty", &error_abort); + qemu_opt_set(opts, "path", p, &error_abort); + return opts; + } if (strstart(filename, "tcp:", &p) || strstart(filename, "telnet:", &p) || strstart(filename, "tn3270:", &p) || @@ -615,11 +620,24 @@ ChardevBackend *qemu_chr_parse_opts(QemuOpts *opts, Error **errp) return backend; } -Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context, - Error **errp) +static void qemu_chardev_set_replay(Chardev *chr, Error **errp) +{ + if (replay_mode != REPLAY_MODE_NONE) { + if (CHARDEV_GET_CLASS(chr)->chr_ioctl) { + error_setg(errp, "Replay: ioctl is not supported " + "for serial devices yet"); + return; + } + qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_REPLAY); + replay_register_char_driver(chr); + } +} + +static Chardev *do_qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context, + bool replay, Error **errp) { const ChardevClass *cc; - Chardev *chr = NULL; + Chardev *base = NULL, *chr = NULL; ChardevBackend *backend = NULL; const char *name = qemu_opt_get(opts, "backend"); const char *id = qemu_opts_id(opts); @@ -657,11 +675,11 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context, chr = qemu_chardev_new(bid ? bid : id, object_class_get_name(OBJECT_CLASS(cc)), backend, context, errp); - if (chr == NULL) { goto out; } + base = chr; if (bid) { Chardev *mux; qapi_free_ChardevBackend(backend); @@ -681,11 +699,25 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context, out: qapi_free_ChardevBackend(backend); g_free(bid); + + if (replay && base) { + /* RR should be set on the base device, not the mux */ + qemu_chardev_set_replay(base, errp); + } + return chr; } -Chardev *qemu_chr_new_noreplay(const char *label, const char *filename, - bool permit_mux_mon, GMainContext *context) +Chardev *qemu_chr_new_from_opts(QemuOpts *opts, GMainContext *context, + Error **errp) +{ + /* XXX: should this really not record/replay? */ + return do_qemu_chr_new_from_opts(opts, context, false, errp); +} + +static Chardev *qemu_chr_new_from_name(const char *label, const char *filename, + bool permit_mux_mon, + GMainContext *context, bool replay) { const char *p; Chardev *chr; @@ -693,14 +725,22 @@ Chardev *qemu_chr_new_noreplay(const char *label, const char *filename, Error *err = NULL; if (strstart(filename, "chardev:", &p)) { - return qemu_chr_find(p); + chr = qemu_chr_find(p); + if (replay && chr) { + qemu_chardev_set_replay(chr, &err); + if (err) { + error_report_err(err); + return NULL; + } + } + return chr; } opts = qemu_chr_parse_compat(label, filename, permit_mux_mon); if (!opts) return NULL; - chr = qemu_chr_new_from_opts(opts, context, &err); + chr = do_qemu_chr_new_from_opts(opts, context, replay, &err); if (!chr) { error_report_err(err); goto out; @@ -722,24 +762,20 @@ Chardev *qemu_chr_new_noreplay(const char *label, const char *filename, return chr; } +Chardev *qemu_chr_new_noreplay(const char *label, const char *filename, + bool permit_mux_mon, GMainContext *context) +{ + return qemu_chr_new_from_name(label, filename, permit_mux_mon, context, + false); +} + static Chardev *qemu_chr_new_permit_mux_mon(const char *label, const char *filename, bool permit_mux_mon, GMainContext *context) { - Chardev *chr; - chr = qemu_chr_new_noreplay(label, filename, permit_mux_mon, context); - if (chr) { - if (replay_mode != REPLAY_MODE_NONE) { - qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_REPLAY); - } - if (qemu_chr_replay(chr) && CHARDEV_GET_CLASS(chr)->chr_ioctl) { - error_report("Replay: ioctl is not supported " - "for serial devices yet"); - } - replay_register_char_driver(chr); - } - return chr; + return qemu_chr_new_from_name(label, filename, permit_mux_mon, context, + true); } Chardev *qemu_chr_new(const char *label, const char *filename, @@ -859,6 +895,9 @@ QemuOptsList qemu_chardev_opts = { },{ .name = "reconnect", .type = QEMU_OPT_NUMBER, + },{ + .name = "reconnect-ms", + .type = QEMU_OPT_NUMBER, },{ .name = "telnet", .type = QEMU_OPT_BOOL, @@ -904,7 +943,26 @@ QemuOptsList qemu_chardev_opts = { },{ .name = "chardev", .type = QEMU_OPT_STRING, + }, + /* + * Multiplexer options. Follows QAPI array syntax. + * See MAX_HUB macro to obtain array capacity. + */ + { + .name = "chardevs.0", + .type = QEMU_OPT_STRING, + },{ + .name = "chardevs.1", + .type = QEMU_OPT_STRING, },{ + .name = "chardevs.2", + .type = QEMU_OPT_STRING, + },{ + .name = "chardevs.3", + .type = QEMU_OPT_STRING, + }, + + { .name = "append", .type = QEMU_OPT_BOOL, },{ @@ -1067,8 +1125,8 @@ ChardevReturn *qmp_chardev_change(const char *id, ChardevBackend *backend, return NULL; } - if (CHARDEV_IS_MUX(chr)) { - error_setg(errp, "Mux device hotswap not supported yet"); + if (CHARDEV_IS_MUX(chr) || CHARDEV_IS_HUB(chr)) { + error_setg(errp, "For mux or hub device hotswap is not supported yet"); return NULL; } diff --git a/chardev/chardev-internal.h b/chardev/chardev-internal.h index 4e03af31476..9752dd75f71 100644 --- a/chardev/chardev-internal.h +++ b/chardev/chardev-internal.h @@ -29,38 +29,89 @@ #include "chardev/char-fe.h" #include "qom/object.h" +#define MAX_HUB 4 #define MAX_MUX 4 #define MUX_BUFFER_SIZE 32 /* Must be a power of 2. */ #define MUX_BUFFER_MASK (MUX_BUFFER_SIZE - 1) struct MuxChardev { Chardev parent; + /* Linked frontends */ CharBackend *backends[MAX_MUX]; + /* Linked backend */ CharBackend chr; + unsigned long mux_bitset; int focus; - int mux_cnt; - int term_got_escape; - int max_size; + bool term_got_escape; /* Intermediate input buffer catches escape sequences even if the currently active device is not accepting any input - but only until it is full as well. */ unsigned char buffer[MAX_MUX][MUX_BUFFER_SIZE]; - int prod[MAX_MUX]; - int cons[MAX_MUX]; + unsigned int prod[MAX_MUX]; + unsigned int cons[MAX_MUX]; int timestamps; /* Protected by the Chardev chr_write_lock. */ - int linestart; + bool linestart; int64_t timestamps_start; }; typedef struct MuxChardev MuxChardev; +typedef struct HubChardev HubChardev; +typedef struct HubCharBackend HubCharBackend; + +/* + * Back-pointer on a hub, actual backend and its index in + * `hub->backends` array + */ +struct HubCharBackend { + HubChardev *hub; + CharBackend be; + unsigned int be_ind; +}; + +struct HubChardev { + Chardev parent; + /* Linked backends */ + HubCharBackend backends[MAX_HUB]; + /* + * Number of backends attached to this hub. Once attached, a + * backend can't be detached, so the counter is only increasing. + * To safely remove a backend, hub has to be removed first. + */ + unsigned int be_cnt; + /* + * Number of CHR_EVEN_OPENED events from all backends. Needed to + * send CHR_EVEN_CLOSED only when counter goes to zero. + */ + unsigned int be_event_opened_cnt; + /* + * Counters of written bytes from a single frontend device + * to multiple backend devices. + */ + unsigned int be_written[MAX_HUB]; + unsigned int be_min_written; + /* + * Index of a backend device which got EAGAIN on last write, + * -1 is invalid index. + */ + int be_eagain_ind; +}; +typedef struct HubChardev HubChardev; DECLARE_INSTANCE_CHECKER(MuxChardev, MUX_CHARDEV, TYPE_CHARDEV_MUX) -#define CHARDEV_IS_MUX(chr) \ +DECLARE_INSTANCE_CHECKER(HubChardev, HUB_CHARDEV, + TYPE_CHARDEV_HUB) + +#define CHARDEV_IS_MUX(chr) \ object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_MUX) +#define CHARDEV_IS_HUB(chr) \ + object_dynamic_cast(OBJECT(chr), TYPE_CHARDEV_HUB) -void mux_set_focus(Chardev *chr, int focus); +bool mux_chr_attach_frontend(MuxChardev *d, CharBackend *b, + unsigned int *tag, Error **errp); +bool mux_chr_detach_frontend(MuxChardev *d, unsigned int tag); +void mux_set_focus(Chardev *chr, unsigned int focus); void mux_chr_send_all_event(Chardev *chr, QEMUChrEvent event); Object *get_chardevs_root(void); diff --git a/chardev/meson.build b/chardev/meson.build index 70070a8279a..56ee39ac0b0 100644 --- a/chardev/meson.build +++ b/chardev/meson.build @@ -3,6 +3,7 @@ chardev_ss.add(files( 'char-file.c', 'char-io.c', 'char-mux.c', + 'char-hub.c', 'char-null.c', 'char-pipe.c', 'char-ringbuf.c', diff --git a/chardev/msmouse.c b/chardev/msmouse.c index 2279694cfab..1a55755d397 100644 --- a/chardev/msmouse.c +++ b/chardev/msmouse.c @@ -267,7 +267,7 @@ static void msmouse_chr_open(Chardev *chr, fifo8_create(&mouse->outbuf, MSMOUSE_BUF_SZ); } -static void char_msmouse_class_init(ObjectClass *oc, void *data) +static void char_msmouse_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/spice.c b/chardev/spice.c index e843d961a78..db53b49da26 100644 --- a/chardev/spice.c +++ b/chardev/spice.c @@ -347,7 +347,7 @@ static void qemu_chr_parse_spice_port(QemuOpts *opts, ChardevBackend *backend, spiceport->fqdn = g_strdup(name); } -static void char_spice_class_init(ObjectClass *oc, void *data) +static void char_spice_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -366,7 +366,7 @@ static const TypeInfo char_spice_type_info = { }; module_obj(TYPE_CHARDEV_SPICE); -static void char_spicevmc_class_init(ObjectClass *oc, void *data) +static void char_spicevmc_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -382,7 +382,7 @@ static const TypeInfo char_spicevmc_type_info = { }; module_obj(TYPE_CHARDEV_SPICEVMC); -static void char_spiceport_class_init(ObjectClass *oc, void *data) +static void char_spiceport_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/testdev.c b/chardev/testdev.c index a92caca3c33..e91f4e8343a 100644 --- a/chardev/testdev.c +++ b/chardev/testdev.c @@ -110,7 +110,7 @@ static int testdev_chr_write(Chardev *chr, const uint8_t *buf, int len) return orig_len; } -static void char_testdev_class_init(ObjectClass *oc, void *data) +static void char_testdev_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/chardev/wctablet.c b/chardev/wctablet.c index f4008bf35b7..0dc6ef08f59 100644 --- a/chardev/wctablet.c +++ b/chardev/wctablet.c @@ -342,7 +342,7 @@ static void wctablet_chr_open(Chardev *chr, &wctablet_handler); } -static void wctablet_chr_class_init(ObjectClass *oc, void *data) +static void wctablet_chr_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000000..90161729838 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,3 @@ +doc-valid-idents = ["IrDA", "PrimeCell", ".."] +allow-mixed-uninlined-format-args = false +msrv = "1.77.0" diff --git a/common-user/host/riscv/safe-syscall.inc.S b/common-user/host/riscv/safe-syscall.inc.S index dfe83c300eb..c8b81e33d0b 100644 --- a/common-user/host/riscv/safe-syscall.inc.S +++ b/common-user/host/riscv/safe-syscall.inc.S @@ -69,11 +69,11 @@ safe_syscall_end: /* code path setting errno */ 0: neg a0, a0 - j safe_syscall_set_errno_tail + tail safe_syscall_set_errno_tail /* code path when we didn't execute the syscall */ 2: li a0, QEMU_ERESTARTSYS - j safe_syscall_set_errno_tail + tail safe_syscall_set_errno_tail .cfi_endproc .size safe_syscall_base, .-safe_syscall_base diff --git a/common-user/plugin-api.c.inc b/common-user/plugin-api.c.inc new file mode 100644 index 00000000000..63f39832711 --- /dev/null +++ b/common-user/plugin-api.c.inc @@ -0,0 +1,44 @@ +/* + * QEMU Plugin API - *-user-mode only implementations + * + * Common user-mode only APIs are in plugins/api-user. These helpers + * are only specific to the *-user frontends. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/plugin.h" +#include "accel/tcg/vcpu-state.h" +#include "qemu.h" + +/* + * Binary path, start and end locations. Host specific due to TaskState. + */ +const char *qemu_plugin_path_to_binary(void) +{ + TaskState *ts = get_task_state(current_cpu); + return g_strdup(ts->bprm->filename); +} + +uint64_t qemu_plugin_start_code(void) +{ + TaskState *ts = get_task_state(current_cpu); + return ts->info->start_code; +} + +uint64_t qemu_plugin_end_code(void) +{ + TaskState *ts = get_task_state(current_cpu); + return ts->info->end_code; +} + +uint64_t qemu_plugin_entry_code(void) +{ + TaskState *ts = get_task_state(current_cpu); + return ts->info->entry; +} diff --git a/configs/devices/aarch64-softmmu/default.mak b/configs/devices/aarch64-softmmu/default.mak index f82a04c27d1..ad8028cfd48 100644 --- a/configs/devices/aarch64-softmmu/default.mak +++ b/configs/devices/aarch64-softmmu/default.mak @@ -8,3 +8,5 @@ include ../arm-softmmu/default.mak # CONFIG_XLNX_ZYNQMP_ARM=n # CONFIG_XLNX_VERSAL=n # CONFIG_SBSA_REF=n +# CONFIG_NPCM8XX=n +CONFIG_VMAPPLE=n diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak index 31f77c20269..57ef1b8a702 100644 --- a/configs/devices/arm-softmmu/default.mak +++ b/configs/devices/arm-softmmu/default.mak @@ -18,9 +18,7 @@ # CONFIG_MUSICPAL=n # CONFIG_MPS3R=n # CONFIG_MUSCA=n -# CONFIG_CHEETAH=n # CONFIG_SX1=n -# CONFIG_NSERIES=n # CONFIG_STELLARIS=n # CONFIG_STM32VLDISCOVERY=n # CONFIG_B_L475E_IOT01A=n @@ -28,11 +26,6 @@ # CONFIG_VERSATILE=n # CONFIG_VEXPRESS=n # CONFIG_ZYNQ=n -# CONFIG_MAINSTONE=n -# CONFIG_GUMSTIX=n -# CONFIG_SPITZ=n -# CONFIG_TOSA=n -# CONFIG_Z2=n # CONFIG_NPCM7XX=n # CONFIG_COLLIE=n # CONFIG_ASPEED_SOC=n diff --git a/configs/devices/cris-softmmu/default.mak b/configs/devices/cris-softmmu/default.mak deleted file mode 100644 index ff73cd40847..00000000000 --- a/configs/devices/cris-softmmu/default.mak +++ /dev/null @@ -1,4 +0,0 @@ -# Default configuration for cris-softmmu - -# Boards are selected by default, uncomment to keep out of the build. -# CONFIG_AXIS=n diff --git a/configs/devices/i386-softmmu/default.mak b/configs/devices/i386-softmmu/default.mak index 448e3e3b1ba..bc0479a7e0a 100644 --- a/configs/devices/i386-softmmu/default.mak +++ b/configs/devices/i386-softmmu/default.mak @@ -18,6 +18,7 @@ #CONFIG_QXL=n #CONFIG_SEV=n #CONFIG_SGA=n +#CONFIG_TDX=n #CONFIG_TEST_DEVICES=n #CONFIG_TPM_CRB=n #CONFIG_TPM_TIS_ISA=n @@ -29,3 +30,4 @@ # CONFIG_I440FX=n # CONFIG_Q35=n # CONFIG_MICROVM=n +# CONFIG_NITRO_ENCLAVE=n diff --git a/configs/devices/microblaze-softmmu/default.mak b/configs/devices/microblaze-softmmu/default.mak index 583e3959bb7..78941064655 100644 --- a/configs/devices/microblaze-softmmu/default.mak +++ b/configs/devices/microblaze-softmmu/default.mak @@ -2,5 +2,3 @@ # Boards are selected by default, uncomment to keep out of the build. # CONFIG_PETALOGIX_S3ADSP1800=n -# CONFIG_PETALOGIX_ML605=n -# CONFIG_XLNX_ZYNQMP_PMU=n diff --git a/configs/devices/microblazeel-softmmu/default.mak b/configs/devices/microblazeel-softmmu/default.mak index 29f7f13816c..4c1086435bf 100644 --- a/configs/devices/microblazeel-softmmu/default.mak +++ b/configs/devices/microblazeel-softmmu/default.mak @@ -1,3 +1,6 @@ # Default configuration for microblazeel-softmmu -include ../microblaze-softmmu/default.mak +# Boards are selected by default, uncomment to keep out of the build. +# CONFIG_PETALOGIX_S3ADSP1800=n +# CONFIG_PETALOGIX_ML605=n +# CONFIG_XLNX_ZYNQMP_PMU=n diff --git a/configs/devices/riscv64-softmmu/default.mak b/configs/devices/riscv64-softmmu/default.mak index 39ed3a0061a..e485bbd1a33 100644 --- a/configs/devices/riscv64-softmmu/default.mak +++ b/configs/devices/riscv64-softmmu/default.mak @@ -11,3 +11,4 @@ # CONFIG_RISCV_VIRT=n # CONFIG_MICROCHIP_PFSOC=n # CONFIG_SHAKTI_C=n +# CONFIG_XIANGSHAN_KUNMINGHU=n diff --git a/configs/devices/sh4-softmmu/default.mak b/configs/devices/sh4-softmmu/default.mak index c06a427053a..efb401bfb17 100644 --- a/configs/devices/sh4-softmmu/default.mak +++ b/configs/devices/sh4-softmmu/default.mak @@ -1,4 +1,4 @@ -# Default configuration for sh4eb-softmmu +# Default configuration for sh4-softmmu # Uncomment the following lines to disable these optional devices: # @@ -7,4 +7,3 @@ # Boards are selected by default, uncomment to keep out of the build. # CONFIG_R2D=n -# CONFIG_SHIX=n diff --git a/configs/meson/emscripten.txt b/configs/meson/emscripten.txt new file mode 100644 index 00000000000..4230e88005f --- /dev/null +++ b/configs/meson/emscripten.txt @@ -0,0 +1,8 @@ +[built-in options] +c_args = ['-pthread'] +cpp_args = ['-pthread'] +objc_args = ['-pthread'] +# -sPROXY_TO_PTHREAD link time flag always requires -pthread even during +# configuration so explicitly add the flag here. +c_link_args = ['-pthread','-sASYNCIFY=1','-sPROXY_TO_PTHREAD=1','-sFORCE_FILESYSTEM','-sALLOW_TABLE_GROWTH','-sTOTAL_MEMORY=2GB','-sWASM_BIGINT','-sEXPORT_ES6=1','-sASYNCIFY_IMPORTS=ffi_call_js','-sEXPORTED_RUNTIME_METHODS=addFunction,removeFunction,TTY,FS'] +cpp_link_args = ['-pthread','-sASYNCIFY=1','-sPROXY_TO_PTHREAD=1','-sFORCE_FILESYSTEM','-sALLOW_TABLE_GROWTH','-sTOTAL_MEMORY=2GB','-sWASM_BIGINT','-sEXPORT_ES6=1','-sASYNCIFY_IMPORTS=ffi_call_js','-sEXPORTED_RUNTIME_METHODS=addFunction,removeFunction,TTY,FS'] diff --git a/configs/targets/aarch64-bsd-user.mak b/configs/targets/aarch64-bsd-user.mak index 8aaa5d8c802..f99c73377a9 100644 --- a/configs/targets/aarch64-bsd-user.mak +++ b/configs/targets/aarch64-bsd-user.mak @@ -1,3 +1,4 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak index 8f0ed21d764..b779ac3b4a0 100644 --- a/configs/targets/aarch64-linux-user.mak +++ b/configs/targets/aarch64-linux-user.mak @@ -4,3 +4,6 @@ TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_SYSTBL_ABI=common,64,renameat,rlimit,memfd_secret +TARGET_SYSTBL=syscall_64.tbl +TARGET_LONG_BITS=64 diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak index 84cb32dc2f4..5dfeb35af90 100644 --- a/configs/targets/aarch64-softmmu.mak +++ b/configs/targets/aarch64-softmmu.mak @@ -1,7 +1,7 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml # needed by boot.c TARGET_NEED_FDT=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak index acb5620cdbf..ef9be02290f 100644 --- a/configs/targets/aarch64_be-linux-user.mak +++ b/configs/targets/aarch64_be-linux-user.mak @@ -1,7 +1,10 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_BIG_ENDIAN=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml gdb-xml/aarch64-mte.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_SYSTBL_ABI=common,64,renameat,rlimit,memfd_secret +TARGET_SYSTBL=syscall_64.tbl +TARGET_LONG_BITS=64 diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak index f7d3fb4afa9..aa25766236e 100644 --- a/configs/targets/alpha-linux-user.mak +++ b/configs/targets/alpha-linux-user.mak @@ -1,3 +1,5 @@ TARGET_ARCH=alpha TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl +TARGET_LONG_BITS=64 +TARGET_XML_FILES= gdb-xml/alpha-core.xml diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak index 9dbe1607403..e31f059a52d 100644 --- a/configs/targets/alpha-softmmu.mak +++ b/configs/targets/alpha-softmmu.mak @@ -1,2 +1,3 @@ TARGET_ARCH=alpha -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=64 +TARGET_XML_FILES= gdb-xml/alpha-core.xml diff --git a/configs/targets/arm-bsd-user.mak b/configs/targets/arm-bsd-user.mak index cb143e6426a..472a4f9fb11 100644 --- a/configs/targets/arm-bsd-user.mak +++ b/configs/targets/arm-bsd-user.mak @@ -1,2 +1,3 @@ TARGET_ARCH=arm TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/arm-linux-user.mak b/configs/targets/arm-linux-user.mak index 7f5d65794c1..bf35ded7fea 100644 --- a/configs/targets/arm-linux-user.mak +++ b/configs/targets/arm-linux-user.mak @@ -5,3 +5,4 @@ TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/arm-softmmu.mak b/configs/targets/arm-softmmu.mak index bf390b7a8de..6a5a8eda949 100644 --- a/configs/targets/arm-softmmu.mak +++ b/configs/targets/arm-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=arm -TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml # needed by boot.c TARGET_NEED_FDT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/armeb-linux-user.mak b/configs/targets/armeb-linux-user.mak index 943d0d87bfd..35fa4d91b3c 100644 --- a/configs/targets/armeb-linux-user.mak +++ b/configs/targets/armeb-linux-user.mak @@ -6,3 +6,4 @@ TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/avr-softmmu.mak b/configs/targets/avr-softmmu.mak index e3f921c0199..b6157fc465d 100644 --- a/configs/targets/avr-softmmu.mak +++ b/configs/targets/avr-softmmu.mak @@ -1,2 +1,3 @@ TARGET_ARCH=avr TARGET_XML_FILES= gdb-xml/avr-cpu.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/cris-linux-user.mak b/configs/targets/cris-linux-user.mak deleted file mode 100644 index e483c420669..00000000000 --- a/configs/targets/cris-linux-user.mak +++ /dev/null @@ -1 +0,0 @@ -TARGET_ARCH=cris diff --git a/configs/targets/cris-softmmu.mak b/configs/targets/cris-softmmu.mak deleted file mode 100644 index e483c420669..00000000000 --- a/configs/targets/cris-softmmu.mak +++ /dev/null @@ -1 +0,0 @@ -TARGET_ARCH=cris diff --git a/configs/targets/hexagon-linux-user.mak b/configs/targets/hexagon-linux-user.mak index 2765a4c5638..aec1a04d1b4 100644 --- a/configs/targets/hexagon-linux-user.mak +++ b/configs/targets/hexagon-linux-user.mak @@ -1,2 +1,5 @@ TARGET_ARCH=hexagon TARGET_XML_FILES=gdb-xml/hexagon-core.xml gdb-xml/hexagon-hvx.xml +TARGET_SYSTBL=syscall.tbl +TARGET_SYSTBL_ABI=common,32,hexagon,time32,stat64,rlimit,renameat +TARGET_LONG_BITS=32 diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak index 8e0a80492f6..59190f63358 100644 --- a/configs/targets/hppa-linux-user.mak +++ b/configs/targets/hppa-linux-user.mak @@ -3,3 +3,5 @@ TARGET_ABI32=y TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y +# Compromise to ease maintenance vs system mode +TARGET_LONG_BITS=64 diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak index a41662aa996..ea331107a08 100644 --- a/configs/targets/hppa-softmmu.mak +++ b/configs/targets/hppa-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=hppa TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/i386-bsd-user.mak b/configs/targets/i386-bsd-user.mak index 0283bb62a07..70e098da491 100644 --- a/configs/targets/i386-bsd-user.mak +++ b/configs/targets/i386-bsd-user.mak @@ -1,2 +1,3 @@ TARGET_ARCH=i386 TARGET_XML_FILES= gdb-xml/i386-32bit.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/i386-linux-user.mak b/configs/targets/i386-linux-user.mak index 5b2546a4309..ea68a266fce 100644 --- a/configs/targets/i386-linux-user.mak +++ b/configs/targets/i386-linux-user.mak @@ -1,4 +1,5 @@ TARGET_ARCH=i386 TARGET_SYSTBL_ABI=i386 TARGET_SYSTBL=syscall_32.tbl -TARGET_XML_FILES= gdb-xml/i386-32bit.xml +TARGET_XML_FILES= gdb-xml/i386-32bit.xml gdb-xml/i386-32bit-linux.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/i386-softmmu.mak b/configs/targets/i386-softmmu.mak index 2ac69d5ba37..e9d89e8ab41 100644 --- a/configs/targets/i386-softmmu.mak +++ b/configs/targets/i386-softmmu.mak @@ -1,4 +1,5 @@ TARGET_ARCH=i386 -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y +TARGET_KVM_HAVE_RESET_PARKED_VCPU=y TARGET_XML_FILES= gdb-xml/i386-32bit.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/loongarch64-linux-user.mak b/configs/targets/loongarch64-linux-user.mak index ea9b7e839aa..249a26a798c 100644 --- a/configs/targets/loongarch64-linux-user.mak +++ b/configs/targets/loongarch64-linux-user.mak @@ -2,3 +2,6 @@ TARGET_ARCH=loongarch64 TARGET_BASE_ARCH=loongarch TARGET_XML_FILES=gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml gdb-xml/loongarch-lsx.xml gdb-xml/loongarch-lasx.xml +TARGET_SYSTBL=syscall.tbl +TARGET_SYSTBL_ABI=common,64 +TARGET_LONG_BITS=64 diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak index ce19ab6a160..fc44c54233d 100644 --- a/configs/targets/loongarch64-softmmu.mak +++ b/configs/targets/loongarch64-softmmu.mak @@ -1,7 +1,7 @@ TARGET_ARCH=loongarch64 TARGET_BASE_ARCH=loongarch TARGET_KVM_HAVE_GUEST_DEBUG=y -TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/loongarch-base32.xml gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml gdb-xml/loongarch-lsx.xml gdb-xml/loongarch-lasx.xml # all boards require libfdt TARGET_NEED_FDT=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/m68k-linux-user.mak b/configs/targets/m68k-linux-user.mak index 579b5d299cc..2d9bae22707 100644 --- a/configs/targets/m68k-linux-user.mak +++ b/configs/targets/m68k-linux-user.mak @@ -4,3 +4,4 @@ TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml TARGET_HAS_BFLT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/m68k-softmmu.mak b/configs/targets/m68k-softmmu.mak index bbcd0bada69..bacc52e96a9 100644 --- a/configs/targets/m68k-softmmu.mak +++ b/configs/targets/m68k-softmmu.mak @@ -1,3 +1,4 @@ TARGET_ARCH=m68k TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/cf-core.xml gdb-xml/cf-fp.xml gdb-xml/m68k-core.xml gdb-xml/m68k-fp.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/microblaze-linux-user.mak b/configs/targets/microblaze-linux-user.mak index 0a2322c249b..37727797695 100644 --- a/configs/targets/microblaze-linux-user.mak +++ b/configs/targets/microblaze-linux-user.mak @@ -4,3 +4,4 @@ TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/microblaze-softmmu.mak b/configs/targets/microblaze-softmmu.mak index eea266d4f3d..bab7b498c24 100644 --- a/configs/targets/microblaze-softmmu.mak +++ b/configs/targets/microblaze-softmmu.mak @@ -1,6 +1,6 @@ TARGET_ARCH=microblaze TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y # needed by boot.c TARGET_NEED_FDT=y TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/microblazeel-linux-user.mak b/configs/targets/microblazeel-linux-user.mak index 270743156a9..a51a05488d3 100644 --- a/configs/targets/microblazeel-linux-user.mak +++ b/configs/targets/microblazeel-linux-user.mak @@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_HAS_BFLT=y TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/microblazeel-softmmu.mak b/configs/targets/microblazeel-softmmu.mak index 77b968acad3..8aee7ebc5cf 100644 --- a/configs/targets/microblazeel-softmmu.mak +++ b/configs/targets/microblazeel-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=microblaze -TARGET_SUPPORTS_MTTCG=y # needed by boot.c TARGET_NEED_FDT=y TARGET_XML_FILES=gdb-xml/microblaze-core.xml gdb-xml/microblaze-stack-protect.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/mips-linux-user.mak b/configs/targets/mips-linux-user.mak index b4569a9893b..69bdc459b69 100644 --- a/configs/targets/mips-linux-user.mak +++ b/configs/targets/mips-linux-user.mak @@ -3,3 +3,4 @@ TARGET_ABI_MIPSO32=y TARGET_SYSTBL_ABI=o32 TARGET_SYSTBL=syscall_o32.tbl TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/mips-softmmu.mak b/configs/targets/mips-softmmu.mak index d34b4083fc4..c9588066b8d 100644 --- a/configs/targets/mips-softmmu.mak +++ b/configs/targets/mips-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=mips TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/mips64-linux-user.mak b/configs/targets/mips64-linux-user.mak index d2ff509a115..04e82b3ab11 100644 --- a/configs/targets/mips64-linux-user.mak +++ b/configs/targets/mips64-linux-user.mak @@ -4,3 +4,4 @@ TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n64 TARGET_SYSTBL=syscall_n64.tbl TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/mips64-softmmu.mak b/configs/targets/mips64-softmmu.mak index 12d9483bf07..7202655fcac 100644 --- a/configs/targets/mips64-softmmu.mak +++ b/configs/targets/mips64-softmmu.mak @@ -1,3 +1,4 @@ TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/mips64el-linux-user.mak b/configs/targets/mips64el-linux-user.mak index f9efeec8ea1..27f41694265 100644 --- a/configs/targets/mips64el-linux-user.mak +++ b/configs/targets/mips64el-linux-user.mak @@ -3,3 +3,4 @@ TARGET_ABI_MIPSN64=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n64 TARGET_SYSTBL=syscall_n64.tbl +TARGET_LONG_BITS=64 diff --git a/configs/targets/mips64el-softmmu.mak b/configs/targets/mips64el-softmmu.mak index 3864daa7364..3ebeadb29ea 100644 --- a/configs/targets/mips64el-softmmu.mak +++ b/configs/targets/mips64el-softmmu.mak @@ -1,2 +1,3 @@ TARGET_ARCH=mips64 TARGET_BASE_ARCH=mips +TARGET_LONG_BITS=64 diff --git a/configs/targets/mipsel-linux-user.mak b/configs/targets/mipsel-linux-user.mak index e8d7241d310..8b7e86ab288 100644 --- a/configs/targets/mipsel-linux-user.mak +++ b/configs/targets/mipsel-linux-user.mak @@ -2,3 +2,4 @@ TARGET_ARCH=mips TARGET_ABI_MIPSO32=y TARGET_SYSTBL_ABI=o32 TARGET_SYSTBL=syscall_o32.tbl +TARGET_LONG_BITS=32 diff --git a/configs/targets/mipsel-softmmu.mak b/configs/targets/mipsel-softmmu.mak index 0829659fc26..90e09bdc3e5 100644 --- a/configs/targets/mipsel-softmmu.mak +++ b/configs/targets/mipsel-softmmu.mak @@ -1,2 +1,2 @@ TARGET_ARCH=mips -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/mipsn32-linux-user.mak b/configs/targets/mipsn32-linux-user.mak index 206095da641..39ae214633f 100644 --- a/configs/targets/mipsn32-linux-user.mak +++ b/configs/targets/mipsn32-linux-user.mak @@ -5,3 +5,4 @@ TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n32 TARGET_SYSTBL=syscall_n32.tbl TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/mipsn32el-linux-user.mak b/configs/targets/mipsn32el-linux-user.mak index ca2a3ed7536..d9b61d69902 100644 --- a/configs/targets/mipsn32el-linux-user.mak +++ b/configs/targets/mipsn32el-linux-user.mak @@ -4,3 +4,4 @@ TARGET_ABI32=y TARGET_BASE_ARCH=mips TARGET_SYSTBL_ABI=n32 TARGET_SYSTBL=syscall_n32.tbl +TARGET_LONG_BITS=64 diff --git a/configs/targets/or1k-linux-user.mak b/configs/targets/or1k-linux-user.mak index 39558f77ecf..810567a98f9 100644 --- a/configs/targets/or1k-linux-user.mak +++ b/configs/targets/or1k-linux-user.mak @@ -1,2 +1,5 @@ TARGET_ARCH=openrisc TARGET_BIG_ENDIAN=y +TARGET_SYSTBL_ABI=common,32,or1k,time32,stat64,rlimit,renameat +TARGET_SYSTBL=syscall.tbl +TARGET_LONG_BITS=32 diff --git a/configs/targets/or1k-softmmu.mak b/configs/targets/or1k-softmmu.mak index 0341cb2a6b3..0e47d9878b0 100644 --- a/configs/targets/or1k-softmmu.mak +++ b/configs/targets/or1k-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=openrisc -TARGET_SUPPORTS_MTTCG=y TARGET_BIG_ENDIAN=y # needed by boot.c and all boards TARGET_NEED_FDT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/ppc-linux-user.mak b/configs/targets/ppc-linux-user.mak index cc0439a5285..970d04a5ba1 100644 --- a/configs/targets/ppc-linux-user.mak +++ b/configs/targets/ppc-linux-user.mak @@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common,nospu,32 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/ppc-softmmu.mak b/configs/targets/ppc-softmmu.mak index 53120dab41d..9bfa7df6c36 100644 --- a/configs/targets/ppc-softmmu.mak +++ b/configs/targets/ppc-softmmu.mak @@ -2,3 +2,4 @@ TARGET_ARCH=ppc TARGET_BIG_ENDIAN=y TARGET_KVM_HAVE_GUEST_DEBUG=y TARGET_XML_FILES= gdb-xml/power-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml +TARGET_LONG_BITS=32 diff --git a/configs/targets/ppc64-linux-user.mak b/configs/targets/ppc64-linux-user.mak index 4d81969f4a2..461f1c67d15 100644 --- a/configs/targets/ppc64-linux-user.mak +++ b/configs/targets/ppc64-linux-user.mak @@ -5,3 +5,4 @@ TARGET_SYSTBL_ABI=common,nospu,64 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/ppc64-softmmu.mak b/configs/targets/ppc64-softmmu.mak index 40881d93968..74572864b36 100644 --- a/configs/targets/ppc64-softmmu.mak +++ b/configs/targets/ppc64-softmmu.mak @@ -1,8 +1,8 @@ TARGET_ARCH=ppc64 TARGET_BASE_ARCH=ppc TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml # all boards require libfdt TARGET_NEED_FDT=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/ppc64le-linux-user.mak b/configs/targets/ppc64le-linux-user.mak index 426d5a28d66..cf9d8a400d9 100644 --- a/configs/targets/ppc64le-linux-user.mak +++ b/configs/targets/ppc64le-linux-user.mak @@ -4,3 +4,4 @@ TARGET_ABI_DIR=ppc TARGET_SYSTBL_ABI=common,nospu,64 TARGET_SYSTBL=syscall.tbl TARGET_XML_FILES= gdb-xml/power64-core.xml gdb-xml/power-fpu.xml gdb-xml/power-altivec.xml gdb-xml/power-spe.xml gdb-xml/power-vsx.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/riscv32-linux-user.mak b/configs/targets/riscv32-linux-user.mak index 9761618e67f..a0ef03c0c3f 100644 --- a/configs/targets/riscv32-linux-user.mak +++ b/configs/targets/riscv32-linux-user.mak @@ -4,3 +4,7 @@ TARGET_ABI_DIR=riscv TARGET_XML_FILES= gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-32bit-virtual.xml CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_SYSTBL_ABI=32 +TARGET_SYSTBL_ABI=common,32,riscv,memfd_secret +TARGET_SYSTBL=syscall.tbl +TARGET_LONG_BITS=32 diff --git a/configs/targets/riscv32-softmmu.mak b/configs/targets/riscv32-softmmu.mak index 338182d5b89..db55275b868 100644 --- a/configs/targets/riscv32-softmmu.mak +++ b/configs/targets/riscv32-softmmu.mak @@ -1,6 +1,6 @@ TARGET_ARCH=riscv32 TARGET_BASE_ARCH=riscv -TARGET_SUPPORTS_MTTCG=y TARGET_XML_FILES= gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-32bit-virtual.xml # needed by boot.c TARGET_NEED_FDT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/riscv64-bsd-user.mak b/configs/targets/riscv64-bsd-user.mak new file mode 100644 index 00000000000..c6348a79629 --- /dev/null +++ b/configs/targets/riscv64-bsd-user.mak @@ -0,0 +1,5 @@ +TARGET_ARCH=riscv64 +TARGET_BASE_ARCH=riscv +TARGET_ABI_DIR=riscv +TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/riscv64-linux-user.mak b/configs/targets/riscv64-linux-user.mak index cfd1fd382f9..aac7568305d 100644 --- a/configs/targets/riscv64-linux-user.mak +++ b/configs/targets/riscv64-linux-user.mak @@ -4,3 +4,7 @@ TARGET_ABI_DIR=riscv TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y +TARGET_SYSTBL_ABI=64 +TARGET_SYSTBL_ABI=common,64,riscv,rlimit,memfd_secret +TARGET_SYSTBL=syscall.tbl +TARGET_LONG_BITS=64 diff --git a/configs/targets/riscv64-softmmu.mak b/configs/targets/riscv64-softmmu.mak index 917980e63e8..2bdd4a62cd2 100644 --- a/configs/targets/riscv64-softmmu.mak +++ b/configs/targets/riscv64-softmmu.mak @@ -1,7 +1,7 @@ TARGET_ARCH=riscv64 TARGET_BASE_ARCH=riscv -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y -TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml +TARGET_XML_FILES= gdb-xml/riscv-64bit-cpu.xml gdb-xml/riscv-32bit-fpu.xml gdb-xml/riscv-64bit-fpu.xml gdb-xml/riscv-64bit-virtual.xml gdb-xml/riscv-32bit-cpu.xml gdb-xml/riscv-32bit-virtual.xml # needed by boot.c TARGET_NEED_FDT=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/rx-softmmu.mak b/configs/targets/rx-softmmu.mak index 706bbe6062c..1c250a6450d 100644 --- a/configs/targets/rx-softmmu.mak +++ b/configs/targets/rx-softmmu.mak @@ -2,3 +2,4 @@ TARGET_ARCH=rx TARGET_XML_FILES= gdb-xml/rx-core.xml # all boards require libfdt TARGET_NEED_FDT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/s390x-linux-user.mak b/configs/targets/s390x-linux-user.mak index 24c04c85894..68c2f288724 100644 --- a/configs/targets/s390x-linux-user.mak +++ b/configs/targets/s390x-linux-user.mak @@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/s390x-softmmu.mak b/configs/targets/s390x-softmmu.mak index b22218aacc8..76dd5de6584 100644 --- a/configs/targets/s390x-softmmu.mak +++ b/configs/targets/s390x-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=s390x TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y TARGET_XML_FILES= gdb-xml/s390x-core64.xml gdb-xml/s390-acr.xml gdb-xml/s390-fpr.xml gdb-xml/s390-vx.xml gdb-xml/s390-cr.xml gdb-xml/s390-virt.xml gdb-xml/s390-virt-kvm.xml gdb-xml/s390-gs.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/sh4-linux-user.mak b/configs/targets/sh4-linux-user.mak index 99088875666..d58c5471b77 100644 --- a/configs/targets/sh4-linux-user.mak +++ b/configs/targets/sh4-linux-user.mak @@ -2,3 +2,4 @@ TARGET_ARCH=sh4 TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_HAS_BFLT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/sh4-softmmu.mak b/configs/targets/sh4-softmmu.mak index f9d62d91e4d..787d349b501 100644 --- a/configs/targets/sh4-softmmu.mak +++ b/configs/targets/sh4-softmmu.mak @@ -1 +1,2 @@ TARGET_ARCH=sh4 +TARGET_LONG_BITS=32 diff --git a/configs/targets/sh4eb-linux-user.mak b/configs/targets/sh4eb-linux-user.mak index 9db6b3609c2..99007f0f2d6 100644 --- a/configs/targets/sh4eb-linux-user.mak +++ b/configs/targets/sh4eb-linux-user.mak @@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/sh4eb-softmmu.mak b/configs/targets/sh4eb-softmmu.mak index 226b1fc698a..cdea2c61c58 100644 --- a/configs/targets/sh4eb-softmmu.mak +++ b/configs/targets/sh4eb-softmmu.mak @@ -1,2 +1,3 @@ TARGET_ARCH=sh4 TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak index abcfb8fc624..4ff4b7287d2 100644 --- a/configs/targets/sparc-linux-user.mak +++ b/configs/targets/sparc-linux-user.mak @@ -2,3 +2,4 @@ TARGET_ARCH=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak index a5d92003826..57801faf1fc 100644 --- a/configs/targets/sparc-softmmu.mak +++ b/configs/targets/sparc-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=sparc TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak index 6cc8fa516b4..7a16934fd17 100644 --- a/configs/targets/sparc32plus-linux-user.mak +++ b/configs/targets/sparc32plus-linux-user.mak @@ -5,3 +5,4 @@ TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,32 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y +TARGET_LONG_BITS=64 diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak index 52f05ec0008..7c2ecb7be06 100644 --- a/configs/targets/sparc64-linux-user.mak +++ b/configs/targets/sparc64-linux-user.mak @@ -4,3 +4,5 @@ TARGET_ABI_DIR=sparc TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y +TARGET_XML_FILES=gdb-xml/sparc64-core.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak index 36ca64ec41d..d9d51d21e59 100644 --- a/configs/targets/sparc64-softmmu.mak +++ b/configs/targets/sparc64-softmmu.mak @@ -1,4 +1,5 @@ TARGET_ARCH=sparc64 TARGET_BASE_ARCH=sparc TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y +TARGET_XML_FILES=gdb-xml/sparc64-core.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/tricore-softmmu.mak b/configs/targets/tricore-softmmu.mak index 96b10af8533..781ce49a62f 100644 --- a/configs/targets/tricore-softmmu.mak +++ b/configs/targets/tricore-softmmu.mak @@ -1 +1,2 @@ TARGET_ARCH=tricore +TARGET_LONG_BITS=32 diff --git a/configs/targets/x86_64-bsd-user.mak b/configs/targets/x86_64-bsd-user.mak index 799cd4acd41..d62d656f2c6 100644 --- a/configs/targets/x86_64-bsd-user.mak +++ b/configs/targets/x86_64-bsd-user.mak @@ -1,3 +1,4 @@ TARGET_ARCH=x86_64 TARGET_BASE_ARCH=i386 TARGET_XML_FILES= gdb-xml/i386-64bit.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/x86_64-linux-user.mak b/configs/targets/x86_64-linux-user.mak index 9ceefbb615a..b093ab5a167 100644 --- a/configs/targets/x86_64-linux-user.mak +++ b/configs/targets/x86_64-linux-user.mak @@ -2,4 +2,5 @@ TARGET_ARCH=x86_64 TARGET_BASE_ARCH=i386 TARGET_SYSTBL_ABI=common,64 TARGET_SYSTBL=syscall_64.tbl -TARGET_XML_FILES= gdb-xml/i386-64bit.xml +TARGET_XML_FILES= gdb-xml/i386-64bit.xml gdb-xml/i386-64bit-linux.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/x86_64-softmmu.mak b/configs/targets/x86_64-softmmu.mak index e12ac3dc59b..5619b2bc686 100644 --- a/configs/targets/x86_64-softmmu.mak +++ b/configs/targets/x86_64-softmmu.mak @@ -1,5 +1,6 @@ TARGET_ARCH=x86_64 TARGET_BASE_ARCH=i386 -TARGET_SUPPORTS_MTTCG=y TARGET_KVM_HAVE_GUEST_DEBUG=y +TARGET_KVM_HAVE_RESET_PARKED_VCPU=y TARGET_XML_FILES= gdb-xml/i386-64bit.xml +TARGET_LONG_BITS=64 diff --git a/configs/targets/xtensa-linux-user.mak b/configs/targets/xtensa-linux-user.mak index 420b30a68d9..cbec6e368af 100644 --- a/configs/targets/xtensa-linux-user.mak +++ b/configs/targets/xtensa-linux-user.mak @@ -2,3 +2,4 @@ TARGET_ARCH=xtensa TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_HAS_BFLT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/xtensa-softmmu.mak b/configs/targets/xtensa-softmmu.mak index f075557bfa9..2a9797338a6 100644 --- a/configs/targets/xtensa-softmmu.mak +++ b/configs/targets/xtensa-softmmu.mak @@ -1,2 +1,2 @@ TARGET_ARCH=xtensa -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/xtensaeb-linux-user.mak b/configs/targets/xtensaeb-linux-user.mak index bce2d1d65d2..f455b1c7801 100644 --- a/configs/targets/xtensaeb-linux-user.mak +++ b/configs/targets/xtensaeb-linux-user.mak @@ -3,3 +3,4 @@ TARGET_SYSTBL_ABI=common TARGET_SYSTBL=syscall.tbl TARGET_BIG_ENDIAN=y TARGET_HAS_BFLT=y +TARGET_LONG_BITS=32 diff --git a/configs/targets/xtensaeb-softmmu.mak b/configs/targets/xtensaeb-softmmu.mak index b02e11b8200..5204729af8b 100644 --- a/configs/targets/xtensaeb-softmmu.mak +++ b/configs/targets/xtensaeb-softmmu.mak @@ -1,3 +1,3 @@ TARGET_ARCH=xtensa TARGET_BIG_ENDIAN=y -TARGET_SUPPORTS_MTTCG=y +TARGET_LONG_BITS=32 diff --git a/configure b/configure index 019fcbd0ef7..274a7787642 100755 --- a/configure +++ b/configure @@ -13,7 +13,7 @@ export CCACHE_RECACHE=yes # make source path absolute source_path=$(cd "$(dirname -- "$0")"; pwd) -if test "$PWD" = "$source_path" +if test "$PWD" -ef "$source_path" then echo "Using './build' as the directory for build output" @@ -207,6 +207,10 @@ for opt do ;; --objcc=*) objcc="$optarg" ;; + --rustc=*) RUSTC="$optarg" + ;; + --rustdoc=*) RUSTDOC="$optarg" + ;; --cpu=*) cpu="$optarg" ;; --extra-cflags=*) @@ -252,6 +256,8 @@ python= download="enabled" skip_meson=no use_containers="yes" +rust="disabled" +rust_target_triple="" gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb") gdb_arches="" @@ -310,6 +316,7 @@ objcopy="${OBJCOPY-${cross_prefix}objcopy}" ld="${LD-${cross_prefix}ld}" ranlib="${RANLIB-${cross_prefix}ranlib}" nm="${NM-${cross_prefix}nm}" +readelf="${READELF-${cross_prefix}readelf}" strip="${STRIP-${cross_prefix}strip}" widl="${WIDL-${cross_prefix}widl}" windres="${WINDRES-${cross_prefix}windres}" @@ -317,6 +324,9 @@ windmc="${WINDMC-${cross_prefix}windmc}" pkg_config="${PKG_CONFIG-${cross_prefix}pkg-config}" sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}" +rustc="${RUSTC-rustc}" +rustdoc="${RUSTDOC-rustdoc}" + check_define() { cat > $TMPC <= 3.8. + # We require python >= 3.9. # NB: a True python conditional creates a non-zero return code (Failure) - "$1" -c 'import sys; sys.exit(sys.version_info < (3,8))' + "$1" -c 'import sys; sys.exit(sys.version_info < (3,9))' } first_python= if test -z "${PYTHON}"; then # A bare 'python' is traditionally python 2.x, but some distros # have it as python 3.x, so check in both places. - for binary in python3 python python3.12 python3.11 \ - python3.10 python3.9 python3.8; do + for binary in python3 python python3.13 python3.12 python3.11 \ + python3.10 python3.9 ; do if has "$binary"; then python=$(command -v "$binary") if check_py_version "$python"; then @@ -610,6 +631,9 @@ meson_option_parse() { exit 1 fi } +has_meson_option() { + test "${meson_options#*"$1"}" != "$meson_options" +} meson_add_machine_file() { if test "$cross_compile" = "yes"; then @@ -636,6 +660,10 @@ for opt do ;; --objcc=*) ;; + --rustc=*) + ;; + --rustdoc=*) + ;; --make=*) ;; --install=*) @@ -755,8 +783,14 @@ for opt do ;; --container-engine=*) container_engine="$optarg" ;; + --rust-target-triple=*) rust_target_triple="$optarg" + ;; --gdb=*) gdb_bin="$optarg" ;; + --enable-rust) rust=enabled + ;; + --disable-rust) rust=disabled + ;; # everything else has the same name in configure and meson --*) meson_option_parse "$opt" "$optarg" ;; @@ -859,6 +893,8 @@ Advanced options (experts only): at build time [$host_cc] --cxx=CXX use C++ compiler CXX [$cxx] --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] + --rustc=RUSTC use Rust compiler RUSTC [$rustc] + --rustdoc=RUSTDOC use rustdoc binary RUSTDOC [$rustdoc] --extra-cflags=CFLAGS append extra C compiler flags CFLAGS --extra-cxxflags=CXXFLAGS append extra C++ compiler flags CXXFLAGS --extra-objcflags=OBJCFLAGS append extra Objective C compiler flags OBJCFLAGS @@ -869,8 +905,9 @@ Advanced options (experts only): --python=PYTHON use specified python [$python] --ninja=NINJA use specified ninja [$ninja] --static enable static build [$static] - --without-default-features default all --enable-* options to "disabled" - --without-default-devices do not include any device that is not needed to + --rust-target-triple=TRIPLE compilation target for Rust code [autodetect] + --without-default-features default all --enable-* options to "disabled" + --without-default-devices do not include any device that is not needed to start the emulator (only use if you are including desired devices in configs/devices/) --with-devices-ARCH=NAME override default configs/devices @@ -908,7 +945,7 @@ then # If first_python is set, there was a binary somewhere even though # it was not suitable. Use it for the error message. if test -n "$first_python"; then - error_exit "Cannot use '$first_python', Python >= 3.8 is required." \ + error_exit "Cannot use '$first_python', Python >= 3.9 is required." \ "Use --python=/path/to/python to specify a supported Python." else error_exit "Python not found. Use --python=/path/to/python" @@ -916,11 +953,11 @@ then fi if ! check_py_version "$python"; then - error_exit "Cannot use '$python', Python >= 3.8 is required." \ + error_exit "Cannot use '$python', Python >= 3.9 is required." \ "Use --python=/path/to/python to specify a supported Python." \ "Maybe try:" \ " openSUSE Leap 15.3+: zypper install python39" \ - " CentOS 8: dnf install python38" + " CentOS: dnf install python3.12" fi # Resolve PATH @@ -1029,8 +1066,11 @@ if test "$static" = "yes" ; then plugins="no" fi if test "$plugins" != "no"; then - plugins=yes - subdirs="$subdirs contrib/plugins" + if has_meson_option "-Dtcg_interpreter=true"; then + plugins="no" + else + plugins=yes + fi fi cat > $TMPC << EOF @@ -1103,8 +1143,10 @@ fi # gdb test if test -n "$gdb_bin"; then - gdb_version=$($gdb_bin --version | head -n 1) - if version_ge ${gdb_version##* } 9.1; then + gdb_version_string=$($gdb_bin --version | head -n 1) + # Extract last field in the version string + gdb_version=${gdb_version_string##* } + if version_ge $gdb_version 9.1; then gdb_arches=$($python "$source_path/scripts/probe-gdb-support.py" $gdb_bin) else gdb_bin="" @@ -1138,6 +1180,142 @@ EOF fi fi +########################################## +# detect rust triple + +meson_version=$($meson --version) +if test "$rust" != disabled && ! version_ge "$meson_version" 1.8.1; then + if test "$rust" = enabled; then + $mkvenv ensuregroup --dir "${source_path}/python/wheels" \ + ${source_path}/pythondeps.toml meson-rust || exit 1 + else + echo "Rust needs Meson 1.8.1, disabling" 2>&1 + rust=disabled + fi +fi +if test "$rust" != disabled && has "$rustc" && $rustc -vV > "${TMPDIR1}/${TMPB}.out"; then + rust_host_triple=$(sed -n 's/^host: //p' "${TMPDIR1}/${TMPB}.out") +else + if test "$rust" = enabled; then + error_exit "could not execute rustc binary \"$rustc\"" + fi + rust=disabled +fi +if test "$rust" != disabled && test -z "$rust_target_triple"; then + # arch and os generally matches between meson and rust + rust_arch=$host_arch + rust_os=$host_os + rust_machine=unknown + rust_osvariant= + + # tweak rust_os if needed; also, machine and variant depend on the OS + android=no + case "$host_os" in + darwin) + # e.g. aarch64-apple-darwin + rust_machine=apple + ;; + + linux) + # detect android/glibc/musl + if check_define __ANDROID__; then + rust_osvariant=android + android=yes + else + cat > $TMPC << EOF +#define _GNU_SOURCE +#include +#ifndef __USE_GNU +error using musl +#endif +EOF + if compile_object; then + rust_osvariant=gnu + else + rust_osvariant=musl + fi + fi + + case "$cpu" in + arm) + # e.g. arm-unknown-linux-gnueabi, arm-unknown-linux-gnueabihf + write_c_skeleton + compile_object + if $READELF -A $TMPO | grep Tag_API_VFP_args: > /dev/null; then + rust_osvariant=${rust_osvariant}eabihf + else + rust_osvariant=${rust_osvariant}eabi + fi + ;; + + mips64) + # e.g. mips64-unknown-linux-gnuabi64 + rust_osvariant=${rust_osvariant}abi64 + ;; + esac + ;; + + netbsd) + # e.g. arm-unknown-netbsd-eabihf + test "$host_arch" = arm && rust_osvariant=eabihf + ;; + + sunos) + rust_machine=pc + rust_os=solaris + ;; + + windows) + # e.g. aarch64-pc-windows-gnullvm, x86_64-pc-windows-gnu (MSVC not supported) + rust_machine=pc + if test "$host_arch" = aarch64; then + rust_osvariant=gnullvm + else + rust_osvariant=gnu + fi + ;; + esac + + # now tweak the architecture part, possibly based on pre-canonicalization --cpu + case "$host_arch" in + arm) + # preserve ISA version (armv7 etc.) from $raw_cpu if passed via --cpu + rust_arch=$raw_cpu + test "$rust_arch" = arm && test "$rust_os" != linux && rust_arch=armv7 + ;; + + mips) + # preserve ISA version (mipsisa64r6 etc.) and include endianness + rust_arch=${raw_cpu%el} + test "$bigendian" = no && rust_arch=${rust_arch}el + ;; + + riscv32|riscv64) + # e.g. riscv64gc-unknown-linux-gnu, but riscv64-linux-android + test "$android" = no && rust_arch=${rust_arch}gc + ;; + + sparc64) + if test "$rust_os" = solaris; then + rust_arch=sparcv9 + rust_machine=sun + fi + ;; + + x86_64) + # e.g. x86_64-unknown-linux-gnux32 + test "$raw_cpu" = x32 && rust_osvariant=${rust_osvariant}x32 + ;; + esac + + if test "$android" = yes; then + # e.g. aarch64-linux-android + rust_target_triple=$rust_arch-$rust_os-$rust_osvariant + else + rust_target_triple=$rust_arch-$rust_machine-$rust_os${rust_osvariant:+-$rust_osvariant} + fi +fi + ########################################## # functions to probe cross compilers @@ -1246,9 +1424,9 @@ probe_target_compiler() { target_arch=${1%%-*} case $target_arch in aarch64) container_hosts="x86_64 aarch64" ;; + aarch64_be) container_hosts="x86_64 aarch64" ;; alpha) container_hosts=x86_64 ;; arm) container_hosts="x86_64 aarch64" ;; - cris) container_hosts=x86_64 ;; hexagon) container_hosts=x86_64 ;; hppa) container_hosts=x86_64 ;; i386) container_hosts=x86_64 ;; @@ -1276,6 +1454,10 @@ probe_target_compiler() { case $target_arch in # debian-all-test-cross architectures + aarch64_be) + container_image=debian-all-test-cross + container_cross_prefix=aarch64-linux-gnu- + ;; hppa|m68k|mips|riscv64|sparc64) container_image=debian-all-test-cross ;; @@ -1307,9 +1489,6 @@ probe_target_compiler() { container_image=debian-armhf-cross container_cross_prefix=arm-linux-gnueabihf- ;; - cris) - container_image=fedora-cris-cross - ;; hexagon) container_cross_prefix=hexagon-unknown-linux-musl- container_cross_cc=${container_cross_prefix}clang @@ -1326,7 +1505,7 @@ probe_target_compiler() { container_cross_prefix=microblaze-linux-musl- ;; mips64el) - container_image=debian-mips64el-cross + container_image=debian-all-test-cross container_cross_prefix=mips64el-linux-gnuabi64- ;; tricore) @@ -1528,10 +1707,9 @@ LINKS="$LINKS pc-bios/optionrom/Makefile" LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS pc-bios/vof/Makefile" LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit -LINKS="$LINKS tests/avocado tests/data" +LINKS="$LINKS tests/data" LINKS="$LINKS tests/qemu-iotests/check tests/qemu-iotests/Makefile" LINKS="$LINKS python" -LINKS="$LINKS contrib/plugins/Makefile " for f in $LINKS ; do if [ -e "$source_path/$f" ]; then symlink "$source_path/$f" "$f" @@ -1604,6 +1782,9 @@ if test "$container" != no; then echo "RUNC=$runc" >> $config_host_mak fi echo "SUBDIRS=$subdirs" >> $config_host_mak +if test "$rust" != disabled; then + echo "RUST_TARGET_TRIPLE=$rust_target_triple" >> $config_host_mak +fi echo "PYTHON=$python" >> $config_host_mak echo "MKVENV_ENSUREGROUP=$mkvenv ensuregroup $mkvenv_online_flag" >> $config_host_mak echo "GENISOIMAGE=$genisoimage" >> $config_host_mak @@ -1614,22 +1795,6 @@ if test "$default_targets" = "yes"; then echo "CONFIG_DEFAULT_TARGETS=y" >> $config_host_mak fi -# contrib/plugins configuration -echo "# Automatically generated by configure - do not modify" > contrib/plugins/$config_host_mak -echo "SRC_PATH=$source_path/contrib/plugins" >> contrib/plugins/$config_host_mak -echo "PKG_CONFIG=${pkg_config}" >> contrib/plugins/$config_host_mak -echo "CC=$cc $CPU_CFLAGS" >> contrib/plugins/$config_host_mak -echo "CFLAGS=${CFLAGS-$default_cflags} $EXTRA_CFLAGS" >> contrib/plugins/$config_host_mak -if test "$host_os" = windows; then - echo "DLLTOOL=$dlltool" >> contrib/plugins/$config_host_mak -fi -if test "$host_os" = darwin; then - echo "CONFIG_DARWIN=y" >> contrib/plugins/$config_host_mak -fi -if test "$host_os" = windows; then - echo "CONFIG_WIN32=y" >> contrib/plugins/$config_host_mak -fi - # tests/tcg configuration mkdir -p tests/tcg echo "# Automatically generated by configure - do not modify" > tests/tcg/$config_host_mak @@ -1637,6 +1802,7 @@ echo "SRC_PATH=$source_path" >> tests/tcg/$config_host_mak if test "$plugins" = "yes" ; then echo "CONFIG_PLUGIN=y" >> tests/tcg/$config_host_mak fi +echo "PYTHON=$python" >> tests/tcg/$config_host_mak tcg_tests_targets= for target in $target_list; do @@ -1673,10 +1839,15 @@ for target in $target_list; do echo "GDB=$gdb_bin" >> $config_target_mak fi - if test "${arch}" = "aarch64" && version_ge ${gdb_version##* } 15.0; then + if test "${gdb_arches#*aarch64}" != "$gdb_arches" && version_ge $gdb_version 15.1; then echo "GDB_HAS_MTE=y" >> $config_target_mak fi + if test "${gdb_arches#*aarch64}" != "$gdb_arches" && version_ge $gdb_version 16.0; then + # GDB has to support MTE in baremetal to allow debugging MTE in QEMU system mode + echo "GDB_SUPPORTS_MTE_IN_BAREMETAL=y" >> $config_target_mak + fi + echo "run-tcg-tests-$target: $qemu\$(EXESUF)" >> Makefile.prereqs tcg_tests_targets="$tcg_tests_targets $target" fi @@ -1735,12 +1906,22 @@ if test "$skip_meson" = no; then echo "c = [$(meson_quote $cc $CPU_CFLAGS)]" >> $cross test -n "$cxx" && echo "cpp = [$(meson_quote $cxx $CPU_CFLAGS)]" >> $cross test -n "$objcc" && echo "objc = [$(meson_quote $objcc $CPU_CFLAGS)]" >> $cross + if test "$rust" != disabled; then + if test "$rust_host_triple" != "$rust_target_triple"; then + echo "rust = [$(meson_quote $rustc --target "$rust_target_triple")]" >> $cross + echo "rustdoc = [$(meson_quote $rustdoc --target "$rust_target_triple")]" >> $cross + else + echo "rust = [$(meson_quote $rustc)]" >> $cross + echo "rustdoc = [$(meson_quote $rustdoc)]" >> $cross + fi + fi echo "ar = [$(meson_quote $ar)]" >> $cross echo "dlltool = [$(meson_quote $dlltool)]" >> $cross echo "nm = [$(meson_quote $nm)]" >> $cross echo "pkgconfig = [$(meson_quote $pkg_config)]" >> $cross echo "pkg-config = [$(meson_quote $pkg_config)]" >> $cross echo "ranlib = [$(meson_quote $ranlib)]" >> $cross + echo "readelf = [$(meson_quote $readelf)]" >> $cross if has $sdl2_config; then echo "sdl2-config = [$(meson_quote $sdl2_config)]" >> $cross fi @@ -1770,6 +1951,9 @@ if test "$skip_meson" = no; then echo "# Automatically generated by configure - do not modify" > $native echo "[binaries]" >> $native echo "c = [$(meson_quote $host_cc)]" >> $native + if test "$rust" != disabled; then + echo "rust = [$(meson_quote $rustc)]" >> $cross + fi mv $native config-meson.native meson_option_add --native-file meson_option_add config-meson.native @@ -1788,6 +1972,7 @@ if test "$skip_meson" = no; then test "$pie" = no && meson_option_add -Db_pie=false # QEMU options + test "$rust" != "disabled" && meson_option_add "-Drust=$rust" test "$cfi" != false && meson_option_add "-Dcfi=$cfi" "-Db_lto=$cfi" test "$docs" != auto && meson_option_add "-Ddocs=$docs" test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE" @@ -1872,3 +2057,11 @@ echo ' "$@"' >>config.status chmod +x config.status rm -r "$TMPDIR1" + +if test "$rust" != disabled; then + echo + echo 'INFO: Rust bindings generation with `bindgen` might fail in some cases where' + echo 'the detected `libclang` does not match the expected `clang` version/target. In' + echo 'this case you must pass the path to `clang` and `libclang` to your build' + echo 'command invocation using the environment variables CLANG_PATH and LIBCLANG_PATH' +fi diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c index 492aca4434c..47c5126fb86 100644 --- a/contrib/elf2dmp/pdb.c +++ b/contrib/elf2dmp/pdb.c @@ -14,8 +14,8 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + * License along with this library; if not, see + * . */ #include "qemu/osdep.h" diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile deleted file mode 100644 index edf256cd9d1..00000000000 --- a/contrib/plugins/Makefile +++ /dev/null @@ -1,85 +0,0 @@ -# -*- Mode: makefile -*- -# -# This Makefile example is fairly independent from the main makefile -# so users can take and adapt it for their build. We only really -# include config-host.mak so we don't have to repeat probing for -# programs that the main configure has already done for us. -# - -include config-host.mak - -TOP_SRC_PATH = $(SRC_PATH)/../.. - -VPATH += $(SRC_PATH) - -NAMES := -NAMES += execlog -NAMES += hotblocks -NAMES += hotpages -NAMES += howvec - -# The lockstep example communicates using unix sockets, -# and can't be easily made to work on windows. -ifneq ($(CONFIG_WIN32),y) -NAMES += lockstep -endif - -NAMES += hwprofile -NAMES += cache -NAMES += drcov -NAMES += ips -NAMES += stoptrigger - -ifeq ($(CONFIG_WIN32),y) -SO_SUFFIX := .dll -LDLIBS += $(shell $(PKG_CONFIG) --libs glib-2.0) -else -SO_SUFFIX := .so -endif - -SONAMES := $(addsuffix $(SO_SUFFIX),$(addprefix lib,$(NAMES))) - -# The main QEMU uses Glib extensively so it is perfectly fine to use it -# in plugins (which many example do). -PLUGIN_CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0) -PLUGIN_CFLAGS += -fPIC -Wall -PLUGIN_CFLAGS += -I$(TOP_SRC_PATH)/include/qemu - -# Helper that honours V=1 so we get some output when compiling -quiet-@ = $(if $(V),,@$(if $1,printf " %-7s %s\n" "$(strip $1)" "$(strip $2)" && )) -quiet-command = $(call quiet-@,$2,$3)$1 - -# for including , in command strings -COMMA := , - -all: $(SONAMES) - -%.o: %.c - $(call quiet-command, \ - $(CC) $(CFLAGS) $(PLUGIN_CFLAGS) -c -o $@ $<, \ - BUILD, plugin $@) - -ifeq ($(CONFIG_WIN32),y) -lib%$(SO_SUFFIX): %.o win32_linker.o ../../plugins/libqemu_plugin_api.a - $(call quiet-command, \ - $(CC) -shared -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -else ifeq ($(CONFIG_DARWIN),y) -lib%$(SO_SUFFIX): %.o - $(call quiet-command, \ - $(CC) -bundle -Wl$(COMMA)-undefined$(COMMA)dynamic_lookup -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -else -lib%$(SO_SUFFIX): %.o - $(call quiet-command, \ - $(CC) -shared -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -endif - - -clean: - rm -f *.o *$(SO_SUFFIX) *.d - rm -Rf .libs - -.PHONY: all clean -.SECONDARY: diff --git a/contrib/plugins/bbv.c b/contrib/plugins/bbv.c new file mode 100644 index 00000000000..b9da6f815e3 --- /dev/null +++ b/contrib/plugins/bbv.c @@ -0,0 +1,158 @@ +/* + * Generate basic block vectors for use with the SimPoint analysis tool. + * SimPoint: https://cseweb.ucsd.edu/~calder/simpoint/ + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include + +typedef struct Bb { + uint64_t vaddr; + struct qemu_plugin_scoreboard *count; + unsigned int index; +} Bb; + +typedef struct Vcpu { + uint64_t count; + FILE *file; +} Vcpu; + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; +static GHashTable *bbs; +static GRWLock bbs_lock; +static char *filename; +static struct qemu_plugin_scoreboard *vcpus; +static uint64_t interval = 100000000; + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + for (int i = 0; i < qemu_plugin_num_vcpus(); i++) { + fclose(((Vcpu *)qemu_plugin_scoreboard_find(vcpus, i))->file); + } + + g_hash_table_unref(bbs); + g_free(filename); + qemu_plugin_scoreboard_free(vcpus); +} + +static void free_bb(void *data) +{ + qemu_plugin_scoreboard_free(((Bb *)data)->count); + g_free(data); +} + +static qemu_plugin_u64 count_u64(void) +{ + return qemu_plugin_scoreboard_u64_in_struct(vcpus, Vcpu, count); +} + +static qemu_plugin_u64 bb_count_u64(Bb *bb) +{ + return qemu_plugin_scoreboard_u64(bb->count); +} + +static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index) +{ + g_autofree gchar *vcpu_filename = NULL; + Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index); + + vcpu_filename = g_strdup_printf("%s.%u.bb", filename, vcpu_index); + vcpu->file = fopen(vcpu_filename, "w"); +} + +static void vcpu_interval_exec(unsigned int vcpu_index, void *udata) +{ + Vcpu *vcpu = qemu_plugin_scoreboard_find(vcpus, vcpu_index); + GHashTableIter iter; + void *value; + + if (!vcpu->file) { + return; + } + + vcpu->count -= interval; + + fputc('T', vcpu->file); + + g_rw_lock_reader_lock(&bbs_lock); + g_hash_table_iter_init(&iter, bbs); + + while (g_hash_table_iter_next(&iter, NULL, &value)) { + Bb *bb = value; + uint64_t bb_count = qemu_plugin_u64_get(bb_count_u64(bb), vcpu_index); + + if (!bb_count) { + continue; + } + + fprintf(vcpu->file, ":%u:%" PRIu64 " ", bb->index, bb_count); + qemu_plugin_u64_set(bb_count_u64(bb), vcpu_index, 0); + } + + g_rw_lock_reader_unlock(&bbs_lock); + fputc('\n', vcpu->file); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t n_insns = qemu_plugin_tb_n_insns(tb); + uint64_t vaddr = qemu_plugin_tb_vaddr(tb); + Bb *bb; + + g_rw_lock_writer_lock(&bbs_lock); + bb = g_hash_table_lookup(bbs, &vaddr); + if (!bb) { + bb = g_new(Bb, 1); + bb->vaddr = vaddr; + bb->count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); + bb->index = g_hash_table_size(bbs) + 1; + g_hash_table_replace(bbs, &bb->vaddr, bb); + } + g_rw_lock_writer_unlock(&bbs_lock); + + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, count_u64(), n_insns); + + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_ADD_U64, bb_count_u64(bb), n_insns); + + qemu_plugin_register_vcpu_tb_exec_cond_cb( + tb, vcpu_interval_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_GE, count_u64(), interval, NULL); +} + +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "interval") == 0) { + interval = g_ascii_strtoull(tokens[1], NULL, 10); + } else if (g_strcmp0(tokens[0], "outfile") == 0) { + filename = tokens[1]; + tokens[1] = NULL; + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + if (!filename) { + fputs("outfile unspecified\n", stderr); + return -1; + } + + bbs = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free_bb); + vcpus = qemu_plugin_scoreboard_new(sizeof(Vcpu)); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + qemu_plugin_register_vcpu_init_cb(id, vcpu_init); + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + + return 0; +} diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index 512ef6776b7..56508587d3a 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -208,7 +208,7 @@ static int fifo_get_first_block(Cache *cache, int set) static void fifo_update_on_miss(Cache *cache, int set, int blk_idx) { GQueue *q = cache->sets[set].fifo_queue; - g_queue_push_head(q, GINT_TO_POINTER(blk_idx)); + g_queue_push_head(q, (gpointer)(intptr_t) blk_idx); } static void fifo_destroy(Cache *cache) @@ -471,13 +471,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) n_insns = qemu_plugin_tb_n_insns(tb); for (i = 0; i < n_insns; i++) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - uint64_t effective_addr; - - if (sys) { - effective_addr = (uint64_t) qemu_plugin_insn_haddr(insn); - } else { - effective_addr = (uint64_t) qemu_plugin_insn_vaddr(insn); - } + uint64_t effective_addr = sys ? (uintptr_t) qemu_plugin_insn_haddr(insn) : + qemu_plugin_insn_vaddr(insn); /* * Instructions might get translated multiple times, we do not create @@ -485,14 +480,13 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) * entry from the hash table and register it for the callback again. */ g_mutex_lock(&hashtable_lock); - data = g_hash_table_lookup(miss_ht, GUINT_TO_POINTER(effective_addr)); + data = g_hash_table_lookup(miss_ht, &effective_addr); if (data == NULL) { data = g_new0(InsnData, 1); data->disas_str = qemu_plugin_insn_disas(insn); data->symbol = qemu_plugin_insn_symbol(insn); data->addr = effective_addr; - g_hash_table_insert(miss_ht, GUINT_TO_POINTER(effective_addr), - (gpointer) data); + g_hash_table_insert(miss_ht, &data->addr, data); } g_mutex_unlock(&hashtable_lock); @@ -582,7 +576,7 @@ static void sum_stats(void) } } -static int dcmp(gconstpointer a, gconstpointer b) +static int dcmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -590,7 +584,7 @@ static int dcmp(gconstpointer a, gconstpointer b) return insn_a->l1_dmisses < insn_b->l1_dmisses ? 1 : -1; } -static int icmp(gconstpointer a, gconstpointer b) +static int icmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -598,7 +592,7 @@ static int icmp(gconstpointer a, gconstpointer b) return insn_a->l1_imisses < insn_b->l1_imisses ? 1 : -1; } -static int l2_cmp(gconstpointer a, gconstpointer b) +static int l2_cmp(gconstpointer a, gconstpointer b, gpointer d) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; @@ -609,7 +603,7 @@ static int l2_cmp(gconstpointer a, gconstpointer b) static void log_stats(void) { int i; - Cache *icache, *dcache, *l2_cache; + Cache *icache, *dcache, *l2_cache = NULL; g_autoptr(GString) rep = g_string_new("core #, data accesses, data misses," " dmiss rate, insn accesses," @@ -651,7 +645,7 @@ static void log_top_insns(void) InsnData *insn; miss_insns = g_hash_table_get_values(miss_ht); - miss_insns = g_list_sort(miss_insns, dcmp); + miss_insns = g_list_sort_with_data(miss_insns, dcmp, NULL); g_autoptr(GString) rep = g_string_new(""); g_string_append_printf(rep, "%s", "address, data misses, instruction\n"); @@ -665,7 +659,7 @@ static void log_top_insns(void) insn->l1_dmisses, insn->disas_str); } - miss_insns = g_list_sort(miss_insns, icmp); + miss_insns = g_list_sort_with_data(miss_insns, icmp, NULL); g_string_append_printf(rep, "%s", "\naddress, fetch misses, instruction\n"); for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { @@ -682,7 +676,7 @@ static void log_top_insns(void) goto finish; } - miss_insns = g_list_sort(miss_insns, l2_cmp); + miss_insns = g_list_sort_with_data(miss_insns, l2_cmp, NULL); g_string_append_printf(rep, "%s", "\naddress, L2 misses, instruction\n"); for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { @@ -853,7 +847,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); - miss_ht = g_hash_table_new_full(NULL, g_direct_equal, NULL, insn_free); + miss_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, insn_free); return 0; } diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c new file mode 100644 index 00000000000..b5e33f25f9b --- /dev/null +++ b/contrib/plugins/cflow.c @@ -0,0 +1,393 @@ +/* + * Control Flow plugin + * + * This plugin will track changes to control flow and detect where + * instructions fault. + * + * Copyright (c) 2024 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include +#include + +#include + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +typedef enum { + SORT_HOTTEST, /* hottest branch insn */ + SORT_EXCEPTION, /* most early exits */ + SORT_POPDEST, /* most destinations (usually ret's) */ +} ReportType; + +ReportType report = SORT_HOTTEST; +int topn = 10; + +typedef struct { + uint64_t daddr; + uint64_t dcount; +} DestData; + +/* A node is an address where we can go to multiple places */ +typedef struct { + GMutex lock; + /* address of the branch point */ + uint64_t addr; + /* array of DestData */ + GArray *dests; + /* early exit/fault count */ + uint64_t early_exit; + /* jump destination count */ + uint64_t dest_count; + /* instruction data */ + char *insn_disas; + /* symbol? */ + const char *symbol; + /* times translated as last in block? */ + int last_count; + /* times translated in the middle of block? */ + int mid_count; +} NodeData; + +typedef enum { + /* last insn in block, expected flow control */ + LAST_INSN = (1 << 0), + /* mid-block insn, can only be an exception */ + EXCP_INSN = (1 << 1), + /* multiple disassembly, may have changed */ + MULT_INSN = (1 << 2), +} InsnTypes; + +typedef struct { + /* address of the branch point */ + uint64_t addr; + /* disassembly */ + char *insn_disas; + /* symbol? */ + const char *symbol; + /* types */ + InsnTypes type_flag; +} InsnData; + +/* We use this to track the current execution state */ +typedef struct { + /* address of current translated block */ + uint64_t tb_pc; + /* address of end of block */ + uint64_t end_block; + /* next pc after end of block */ + uint64_t pc_after_block; + /* address of last executed PC */ + uint64_t last_pc; +} VCPUScoreBoard; + +/* descriptors for accessing the above scoreboard */ +static qemu_plugin_u64 tb_pc; +static qemu_plugin_u64 end_block; +static qemu_plugin_u64 pc_after_block; +static qemu_plugin_u64 last_pc; + + +static GMutex node_lock; +static GHashTable *nodes; +struct qemu_plugin_scoreboard *state; + +/* SORT_HOTTEST */ +static gint hottest(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->dest_count > nb->dest_count ? -1 : + na->dest_count == nb->dest_count ? 0 : 1; +} + +static gint exception(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->early_exit > nb->early_exit ? -1 : + na->early_exit == nb->early_exit ? 0 : 1; +} + +static gint popular(gconstpointer a, gconstpointer b, gpointer d) +{ + NodeData *na = (NodeData *) a; + NodeData *nb = (NodeData *) b; + + return na->dests->len > nb->dests->len ? -1 : + na->dests->len == nb->dests->len ? 0 : 1; +} + +/* Filter out non-branches - returns true to remove entry */ +static gboolean filter_non_branches(gpointer key, gpointer value, + gpointer user_data) +{ + NodeData *node = (NodeData *) value; + + return node->dest_count == 0; +} + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + g_autoptr(GString) result = g_string_new("collected "); + GList *data; + GCompareDataFunc sort = &hottest; + int i = 0; + + g_mutex_lock(&node_lock); + g_string_append_printf(result, "%d control flow nodes in the hash table\n", + g_hash_table_size(nodes)); + + /* remove all nodes that didn't branch */ + g_hash_table_foreach_remove(nodes, filter_non_branches, NULL); + + data = g_hash_table_get_values(nodes); + + switch (report) { + case SORT_HOTTEST: + sort = &hottest; + break; + case SORT_EXCEPTION: + sort = &exception; + break; + case SORT_POPDEST: + sort = &popular; + break; + } + + data = g_list_sort_with_data(data, sort, NULL); + + for (GList *l = data; + l != NULL && i < topn; + l = l->next, i++) { + NodeData *n = l->data; + const char *type = n->mid_count ? "sync fault" : "branch"; + g_string_append_printf(result, " addr: 0x%"PRIx64 " %s: %s (%s)\n", + n->addr, n->symbol, n->insn_disas, type); + if (n->early_exit) { + g_string_append_printf(result, " early exits %"PRId64"\n", + n->early_exit); + } + g_string_append_printf(result, " branches %"PRId64"\n", + n->dest_count); + for (int j = 0; j < n->dests->len; j++) { + DestData *dd = &g_array_index(n->dests, DestData, j); + g_string_append_printf(result, " to 0x%"PRIx64" (%"PRId64")\n", + dd->daddr, dd->dcount); + } + } + + qemu_plugin_outs(result->str); + + g_mutex_unlock(&node_lock); +} + +static void plugin_init(void) +{ + g_mutex_init(&node_lock); + nodes = g_hash_table_new(g_int64_hash, g_int64_equal); + state = qemu_plugin_scoreboard_new(sizeof(VCPUScoreBoard)); + + /* score board declarations */ + tb_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, tb_pc); + end_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + end_block); + pc_after_block = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + pc_after_block); + last_pc = qemu_plugin_scoreboard_u64_in_struct(state, VCPUScoreBoard, + last_pc); +} + +static NodeData *create_node(uint64_t addr) +{ + NodeData *node = g_new0(NodeData, 1); + g_mutex_init(&node->lock); + node->addr = addr; + node->dests = g_array_new(true, true, sizeof(DestData)); + return node; +} + +static NodeData *fetch_node(uint64_t addr, bool create_if_not_found) +{ + NodeData *node = NULL; + + g_mutex_lock(&node_lock); + node = (NodeData *) g_hash_table_lookup(nodes, &addr); + if (!node && create_if_not_found) { + node = create_node(addr); + g_hash_table_insert(nodes, &node->addr, node); + } + g_mutex_unlock(&node_lock); + return node; +} + +/* + * Called when we detect a non-linear execution (pc != + * pc_after_block). This could be due to a fault causing some sort of + * exit exception (if last_pc != block_end) or just a taken branch. + */ +static void vcpu_tb_branched_exec(unsigned int cpu_index, void *udata) +{ + uint64_t lpc = qemu_plugin_u64_get(last_pc, cpu_index); + uint64_t ebpc = qemu_plugin_u64_get(end_block, cpu_index); + uint64_t npc = qemu_plugin_u64_get(pc_after_block, cpu_index); + uint64_t pc = qemu_plugin_u64_get(tb_pc, cpu_index); + + /* return early for address 0 */ + if (!lpc) { + return; + } + + NodeData *node = fetch_node(lpc, true); + DestData *data = NULL; + bool early_exit = (lpc != ebpc); + GArray *dests; + + /* the condition should never hit */ + g_assert(pc != npc); + + g_mutex_lock(&node->lock); + + if (early_exit) { + fprintf(stderr, "%s: pc=%"PRIx64", epbc=%"PRIx64 + " npc=%"PRIx64", lpc=%"PRIx64"\n", + __func__, pc, ebpc, npc, lpc); + node->early_exit++; + if (!node->mid_count) { + /* count now as we've only just allocated */ + node->mid_count++; + } + } + + dests = node->dests; + for (int i = 0; i < dests->len; i++) { + if (g_array_index(dests, DestData, i).daddr == pc) { + data = &g_array_index(dests, DestData, i); + } + } + + /* we've never seen this before, allocate a new entry */ + if (!data) { + DestData new_entry = { .daddr = pc }; + g_array_append_val(dests, new_entry); + data = &g_array_index(dests, DestData, dests->len - 1); + g_assert(data->daddr == pc); + } + + data->dcount++; + node->dest_count++; + + g_mutex_unlock(&node->lock); +} + +/* + * At the start of each block we need to resolve two things: + * + * - is last_pc == block_end, if not we had an early exit + * - is start of block last_pc + insn width, if not we jumped + * + * Once those are dealt with we can instrument the rest of the + * instructions for their execution. + * + */ +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t pc = qemu_plugin_tb_vaddr(tb); + size_t insns = qemu_plugin_tb_n_insns(tb); + struct qemu_plugin_insn *first_insn = qemu_plugin_tb_get_insn(tb, 0); + struct qemu_plugin_insn *last_insn = qemu_plugin_tb_get_insn(tb, insns - 1); + + /* + * check if we are executing linearly after the last block. We can + * handle both early block exits and normal branches in the + * callback if we hit it. + */ + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + tb, QEMU_PLUGIN_INLINE_STORE_U64, tb_pc, pc); + qemu_plugin_register_vcpu_tb_exec_cond_cb( + tb, vcpu_tb_branched_exec, QEMU_PLUGIN_CB_NO_REGS, + QEMU_PLUGIN_COND_NE, pc_after_block, pc, NULL); + + /* + * Now we can set start/end for this block so the next block can + * check where we are at. Do this on the first instruction and not + * the TB so we don't get mixed up with above. + */ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn, + QEMU_PLUGIN_INLINE_STORE_U64, + end_block, qemu_plugin_insn_vaddr(last_insn)); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(first_insn, + QEMU_PLUGIN_INLINE_STORE_U64, + pc_after_block, + qemu_plugin_insn_vaddr(last_insn) + + qemu_plugin_insn_size(last_insn)); + + for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx); + uint64_t ipc = qemu_plugin_insn_vaddr(insn); + /* + * If this is a potential branch point check if we could grab + * the disassembly for it. If it is the last instruction + * always create an entry. + */ + NodeData *node = fetch_node(ipc, last_insn); + if (node) { + g_mutex_lock(&node->lock); + if (!node->insn_disas) { + node->insn_disas = qemu_plugin_insn_disas(insn); + } + if (!node->symbol) { + node->symbol = qemu_plugin_insn_symbol(insn); + } + if (last_insn == insn) { + node->last_count++; + } else { + node->mid_count++; + } + g_mutex_unlock(&node->lock); + } + + /* Store the PC of what we are about to execute */ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn, + QEMU_PLUGIN_INLINE_STORE_U64, + last_pc, ipc); + } +} + +QEMU_PLUGIN_EXPORT +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "sort") == 0) { + if (g_strcmp0(tokens[1], "hottest") == 0) { + report = SORT_HOTTEST; + } else if (g_strcmp0(tokens[1], "early") == 0) { + report = SORT_EXCEPTION; + } else if (g_strcmp0(tokens[1], "exceptions") == 0) { + report = SORT_POPDEST; + } else { + fprintf(stderr, "failed to parse: %s\n", tokens[1]); + return -1; + } + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + plugin_init(); + + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + return 0; +} diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c index 1c1601cc0b4..06ec76d6e9a 100644 --- a/contrib/plugins/execlog.c +++ b/contrib/plugins/execlog.c @@ -181,8 +181,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) bool check_regs_this = rmatches; bool check_regs_next = false; - size_t n = qemu_plugin_tb_n_insns(tb); - for (size_t i = 0; i < n; i++) { + size_t n_insns = qemu_plugin_tb_n_insns(tb); + for (size_t i = 0; i < n_insns; i++) { char *insn_disas; uint64_t insn_vaddr; @@ -232,12 +232,15 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) */ if (disas_assist && rmatches) { check_regs_next = false; - gchar *args = g_strstr_len(insn_disas, -1, " "); - for (int n = 0; n < all_reg_names->len; n++) { - gchar *reg = g_ptr_array_index(all_reg_names, n); - if (g_strrstr(args, reg)) { - check_regs_next = true; - skip = false; + g_auto(GStrv) args = g_strsplit_set(insn_disas, " \t", 2); + if (args && args[1]) { + for (int n = 0; n < all_reg_names->len; n++) { + const gchar *reg = g_ptr_array_index(all_reg_names, n); + if (g_strrstr(args[1], reg)) { + check_regs_next = true; + skip = false; + break; + } } } } diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c index 02bc5078bdd..98404b68852 100644 --- a/contrib/plugins/hotblocks.c +++ b/contrib/plugins/hotblocks.c @@ -29,7 +29,7 @@ static guint64 limit = 20; * * The internals of the TCG are not exposed to plugins so we can only * get the starting PC for each block. We cheat this slightly by - * xor'ing the number of instructions to the hash to help + * checking the number of instructions as well to help * differentiate. */ typedef struct { @@ -39,7 +39,7 @@ typedef struct { unsigned long insns; } ExecCount; -static gint cmp_exec_count(gconstpointer a, gconstpointer b) +static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d) { ExecCount *ea = (ExecCount *) a; ExecCount *eb = (ExecCount *) b; @@ -50,6 +50,20 @@ static gint cmp_exec_count(gconstpointer a, gconstpointer b) return count_a > count_b ? -1 : 1; } +static guint exec_count_hash(gconstpointer v) +{ + const ExecCount *e = v; + return e->start_addr ^ e->insns; +} + +static gboolean exec_count_equal(gconstpointer v1, gconstpointer v2) +{ + const ExecCount *ea = v1; + const ExecCount *eb = v2; + return (ea->start_addr == eb->start_addr) && + (ea->insns == eb->insns); +} + static void exec_count_free(gpointer key, gpointer value, gpointer user_data) { ExecCount *cnt = value; @@ -65,7 +79,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) g_string_append_printf(report, "%d entries in the hash table\n", g_hash_table_size(hotblocks)); counts = g_hash_table_get_values(hotblocks); - it = g_list_sort(counts, cmp_exec_count); + it = g_list_sort_with_data(counts, cmp_exec_count, NULL); if (it) { g_string_append_printf(report, "pc, tcount, icount, ecount\n"); @@ -91,7 +105,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_init(void) { - hotblocks = g_hash_table_new(NULL, g_direct_equal); + hotblocks = g_hash_table_new(exec_count_hash, exec_count_equal); } static void vcpu_tb_exec(unsigned int cpu_index, void *udata) @@ -111,10 +125,15 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) ExecCount *cnt; uint64_t pc = qemu_plugin_tb_vaddr(tb); size_t insns = qemu_plugin_tb_n_insns(tb); - uint64_t hash = pc ^ insns; g_mutex_lock(&lock); - cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash); + { + ExecCount e; + e.start_addr = pc; + e.insns = insns; + cnt = (ExecCount *) g_hash_table_lookup(hotblocks, &e); + } + if (cnt) { cnt->trans_count++; } else { @@ -123,7 +142,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) cnt->trans_count = 1; cnt->insns = insns; cnt->exec_count = qemu_plugin_scoreboard_new(sizeof(uint64_t)); - g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt); + g_hash_table_insert(hotblocks, cnt, cnt); } g_mutex_unlock(&lock); diff --git a/contrib/plugins/hotpages.c b/contrib/plugins/hotpages.c index 8316ae50c72..9d48ac969eb 100644 --- a/contrib/plugins/hotpages.c +++ b/contrib/plugins/hotpages.c @@ -48,7 +48,7 @@ typedef struct { static GMutex lock; static GHashTable *pages; -static gint cmp_access_count(gconstpointer a, gconstpointer b) +static gint cmp_access_count(gconstpointer a, gconstpointer b, gpointer d) { PageCounters *ea = (PageCounters *) a; PageCounters *eb = (PageCounters *) b; @@ -83,7 +83,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) if (counts && g_list_next(counts)) { GList *it; - it = g_list_sort(counts, cmp_access_count); + it = g_list_sort_with_data(counts, cmp_access_count, NULL); for (i = 0; i < limit && it->next; i++, it = it->next) { PageCounters *rec = (PageCounters *) it->data; @@ -103,7 +103,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) static void plugin_init(void) { page_mask = (page_size - 1); - pages = g_hash_table_new(NULL, g_direct_equal); + pages = g_hash_table_new(g_int64_hash, g_int64_equal); } static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, @@ -130,12 +130,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, page &= ~page_mask; g_mutex_lock(&lock); - count = (PageCounters *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page)); + count = (PageCounters *) g_hash_table_lookup(pages, &page); if (!count) { count = g_new0(PageCounters, 1); count->page_address = page; - g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) count); + g_hash_table_insert(pages, &count->page_address, count); } if (qemu_plugin_mem_is_store(meminfo)) { count->writes++; diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c index 9be67f74534..42bddb6566d 100644 --- a/contrib/plugins/howvec.c +++ b/contrib/plugins/howvec.c @@ -155,7 +155,7 @@ static ClassSelector class_tables[] = { static InsnClassExecCount *class_table; static int class_table_sz; -static gint cmp_exec_count(gconstpointer a, gconstpointer b) +static gint cmp_exec_count(gconstpointer a, gconstpointer b, gpointer d) { InsnExecCount *ea = (InsnExecCount *) a; InsnExecCount *eb = (InsnExecCount *) b; @@ -208,7 +208,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) counts = g_hash_table_get_values(insns); if (counts && g_list_next(counts)) { g_string_append_printf(report, "Individual Instructions:\n"); - counts = g_list_sort(counts, cmp_exec_count); + counts = g_list_sort_with_data(counts, cmp_exec_count, NULL); for (i = 0; i < limit && g_list_next(counts); i++, counts = g_list_next(counts)) { @@ -253,6 +253,8 @@ static struct qemu_plugin_scoreboard *find_counter( int i; uint64_t *cnt = NULL; uint32_t opcode = 0; + /* if opcode is greater than 32 bits, we should refactor insn hash table. */ + G_STATIC_ASSERT(sizeof(opcode) == sizeof(uint32_t)); InsnClassExecCount *class = NULL; /* @@ -284,7 +286,7 @@ static struct qemu_plugin_scoreboard *find_counter( g_mutex_lock(&lock); icount = (InsnExecCount *) g_hash_table_lookup(insns, - GUINT_TO_POINTER(opcode)); + (gpointer)(intptr_t) opcode); if (!icount) { icount = g_new0(InsnExecCount, 1); @@ -295,8 +297,7 @@ static struct qemu_plugin_scoreboard *find_counter( qemu_plugin_scoreboard_new(sizeof(uint64_t)); icount->count = qemu_plugin_scoreboard_u64(score); - g_hash_table_insert(insns, GUINT_TO_POINTER(opcode), - (gpointer) icount); + g_hash_table_insert(insns, (gpointer)(intptr_t) opcode, icount); } g_mutex_unlock(&lock); diff --git a/contrib/plugins/hwprofile.c b/contrib/plugins/hwprofile.c index 739ac0c66b5..a9838ccc879 100644 --- a/contrib/plugins/hwprofile.c +++ b/contrib/plugins/hwprofile.c @@ -43,6 +43,8 @@ typedef struct { static GMutex lock; static GHashTable *devices; +static struct qemu_plugin_scoreboard *source_pc_scoreboard; +static qemu_plugin_u64 source_pc; /* track the access pattern to a piece of HW */ static bool pattern; @@ -69,7 +71,7 @@ static void plugin_init(void) devices = g_hash_table_new(NULL, NULL); } -static gint sort_cmp(gconstpointer a, gconstpointer b) +static gint sort_cmp(gconstpointer a, gconstpointer b, gpointer d) { DeviceCounts *ea = (DeviceCounts *) a; DeviceCounts *eb = (DeviceCounts *) b; @@ -77,7 +79,7 @@ static gint sort_cmp(gconstpointer a, gconstpointer b) eb->totals.reads + eb->totals.writes ? -1 : 1; } -static gint sort_loc(gconstpointer a, gconstpointer b) +static gint sort_loc(gconstpointer a, gconstpointer b, gpointer d) { IOLocationCounts *ea = (IOLocationCounts *) a; IOLocationCounts *eb = (IOLocationCounts *) b; @@ -124,13 +126,13 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) if (counts && g_list_next(counts)) { GList *it; - it = g_list_sort(counts, sort_cmp); + it = g_list_sort_with_data(counts, sort_cmp, NULL); while (it) { DeviceCounts *rec = (DeviceCounts *) it->data; if (rec->detail) { GList *accesses = g_hash_table_get_values(rec->detail); - GList *io_it = g_list_sort(accesses, sort_loc); + GList *io_it = g_list_sort_with_data(accesses, sort_loc, NULL); const char *prefix = pattern ? "off" : "pc"; g_string_append_printf(report, "%s @ 0x%"PRIx64"\n", rec->name, rec->base); @@ -159,7 +161,7 @@ static DeviceCounts *new_count(const char *name, uint64_t base) count->name = name; count->base = base; if (pattern || source) { - count->detail = g_hash_table_new(NULL, NULL); + count->detail = g_hash_table_new(g_int64_hash, g_int64_equal); } g_hash_table_insert(devices, (gpointer) name, count); return count; @@ -169,7 +171,7 @@ static IOLocationCounts *new_location(GHashTable *table, uint64_t off_or_pc) { IOLocationCounts *loc = g_new0(IOLocationCounts, 1); loc->off_or_pc = off_or_pc; - g_hash_table_insert(table, (gpointer) off_or_pc, loc); + g_hash_table_insert(table, &loc->off_or_pc, loc); return loc; } @@ -224,12 +226,12 @@ static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, /* either track offsets or source of access */ if (source) { - off = (uint64_t) udata; + off = qemu_plugin_u64_get(source_pc, cpu_index); } if (pattern || source) { IOLocationCounts *io_count = g_hash_table_lookup(counts->detail, - (gpointer) off); + &off); if (!io_count) { io_count = new_location(counts->detail, off); } @@ -247,10 +249,14 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) for (i = 0; i < n; i++) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - gpointer udata = (gpointer) (source ? qemu_plugin_insn_vaddr(insn) : 0); + if (source) { + uint64_t pc = qemu_plugin_insn_vaddr(insn); + qemu_plugin_register_vcpu_mem_inline_per_vcpu( + insn, rw, QEMU_PLUGIN_INLINE_STORE_U64, + source_pc, pc); + } qemu_plugin_register_vcpu_mem_cb(insn, vcpu_haddr, - QEMU_PLUGIN_CB_NO_REGS, - rw, udata); + QEMU_PLUGIN_CB_NO_REGS, rw, NULL); } } @@ -306,10 +312,9 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, return -1; } - /* Just warn about overflow */ - if (info->system.smp_vcpus > 64 || - info->system.max_vcpus > 64) { - fprintf(stderr, "hwprofile: can only track up to 64 CPUs\n"); + if (source) { + source_pc_scoreboard = qemu_plugin_scoreboard_new(sizeof(uint64_t)); + source_pc = qemu_plugin_scoreboard_u64(source_pc_scoreboard); } plugin_init(); diff --git a/contrib/plugins/ips.c b/contrib/plugins/ips.c index 29fa556d0ff..f110c565bc4 100644 --- a/contrib/plugins/ips.c +++ b/contrib/plugins/ips.c @@ -129,20 +129,62 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata) qemu_plugin_scoreboard_free(vcpus); } +typedef struct { + const char *suffix; + unsigned long multipler; +} ScaleEntry; + +/* a bit like units.h but not binary */ +static const ScaleEntry scales[] = { + { "k", 1000 }, + { "m", 1000 * 1000 }, + { "g", 1000 * 1000 * 1000 }, +}; + QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { + bool ipq_set = false; + for (int i = 0; i < argc; i++) { char *opt = argv[i]; g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); if (g_strcmp0(tokens[0], "ips") == 0) { - max_insn_per_second = g_ascii_strtoull(tokens[1], NULL, 10); + char *endptr = NULL; + max_insn_per_second = g_ascii_strtoull(tokens[1], &endptr, 10); if (!max_insn_per_second && errno) { fprintf(stderr, "%s: couldn't parse %s (%s)\n", __func__, tokens[1], g_strerror(errno)); return -1; } + + if (endptr && *endptr != 0) { + g_autofree gchar *lower = g_utf8_strdown(endptr, -1); + unsigned long scale = 0; + + for (int j = 0; j < G_N_ELEMENTS(scales); j++) { + if (g_strcmp0(lower, scales[j].suffix) == 0) { + scale = scales[j].multipler; + break; + } + } + + if (scale) { + max_insn_per_second *= scale; + } else { + fprintf(stderr, "bad suffix: %s\n", endptr); + return -1; + } + } + } else if (g_strcmp0(tokens[0], "ipq") == 0) { + max_insn_per_quantum = g_ascii_strtoull(tokens[1], NULL, 10); + + if (!max_insn_per_quantum) { + fprintf(stderr, "bad ipq value: %s\n", tokens[0]); + return -1; + } + ipq_set = true; } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; @@ -150,7 +192,16 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } vcpus = qemu_plugin_scoreboard_new(sizeof(vCPUTime)); - max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC; + + if (!ipq_set) { + max_insn_per_quantum = max_insn_per_second / NUM_TIME_UPDATE_PER_SEC; + } + + if (max_insn_per_quantum == 0) { + fprintf(stderr, "minimum of %d instructions per second needed\n", + NUM_TIME_UPDATE_PER_SEC); + return -1; + } time_handle = qemu_plugin_request_time_control(); g_assert(time_handle); diff --git a/contrib/plugins/meson.build b/contrib/plugins/meson.build new file mode 100644 index 00000000000..1876bc78438 --- /dev/null +++ b/contrib/plugins/meson.build @@ -0,0 +1,30 @@ +contrib_plugins = ['bbv', 'cache', 'cflow', 'drcov', 'execlog', 'hotblocks', + 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger'] +if host_os != 'windows' + # lockstep uses socket.h + contrib_plugins += 'lockstep' +endif + +t = [] +if get_option('plugins') + foreach i : contrib_plugins + if host_os == 'windows' + t += shared_module(i, files(i + '.c') + 'win32_linker.c', + include_directories: '../../include/qemu', + link_depends: [win32_qemu_plugin_api_lib], + link_args: win32_qemu_plugin_api_link_flags, + dependencies: glib) + else + t += shared_module(i, files(i + '.c'), + include_directories: '../../include/qemu', + dependencies: glib) + endif + endforeach +endif +if t.length() > 0 + alias_target('contrib-plugins', t) +else + run_target('contrib-plugins', command: [python, '-c', '']) +endif + +plugin_modules += t diff --git a/contrib/plugins/stoptrigger.c b/contrib/plugins/stoptrigger.c index 03ee22f4c6a..b3a6ed66a7b 100644 --- a/contrib/plugins/stoptrigger.c +++ b/contrib/plugins/stoptrigger.c @@ -21,9 +21,11 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; /* Scoreboard to track executed instructions count */ typedef struct { uint64_t insn_count; + uint64_t current_pc; } InstructionsCount; static struct qemu_plugin_scoreboard *insn_count_sb; static qemu_plugin_u64 insn_count; +static qemu_plugin_u64 current_pc; static uint64_t icount; static int icount_exit_code; @@ -34,6 +36,11 @@ static bool exit_on_address; /* Map trigger addresses to exit code */ static GHashTable *addrs_ht; +typedef struct { + uint64_t exit_addr; + int exit_code; +} ExitInfo; + static void exit_emulation(int return_code, char *message) { qemu_plugin_outs(message); @@ -43,23 +50,18 @@ static void exit_emulation(int return_code, char *message) static void exit_icount_reached(unsigned int cpu_index, void *udata) { - uint64_t insn_vaddr = GPOINTER_TO_UINT(udata); + uint64_t insn_vaddr = qemu_plugin_u64_get(current_pc, cpu_index); char *msg = g_strdup_printf("icount reached at 0x%" PRIx64 ", exiting\n", insn_vaddr); - exit_emulation(icount_exit_code, msg); } static void exit_address_reached(unsigned int cpu_index, void *udata) { - uint64_t insn_vaddr = GPOINTER_TO_UINT(udata); - char *msg = g_strdup_printf("0x%" PRIx64 " reached, exiting\n", insn_vaddr); - int exit_code; - - exit_code = GPOINTER_TO_INT( - g_hash_table_lookup(addrs_ht, GUINT_TO_POINTER(insn_vaddr))); - - exit_emulation(exit_code, msg); + ExitInfo *ei = udata; + g_assert(ei); + char *msg = g_strdup_printf("0x%" PRIx64 " reached, exiting\n", ei->exit_addr); + exit_emulation(ei->exit_code, msg); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -67,23 +69,25 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) size_t tb_n = qemu_plugin_tb_n_insns(tb); for (size_t i = 0; i < tb_n; i++) { struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); - gpointer insn_vaddr = GUINT_TO_POINTER(qemu_plugin_insn_vaddr(insn)); + uint64_t insn_vaddr = qemu_plugin_insn_vaddr(insn); if (exit_on_icount) { /* Increment and check scoreboard for each instruction */ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1); + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + insn, QEMU_PLUGIN_INLINE_STORE_U64, current_pc, insn_vaddr); qemu_plugin_register_vcpu_insn_exec_cond_cb( insn, exit_icount_reached, QEMU_PLUGIN_CB_NO_REGS, - QEMU_PLUGIN_COND_EQ, insn_count, icount + 1, insn_vaddr); + QEMU_PLUGIN_COND_EQ, insn_count, icount + 1, NULL); } if (exit_on_address) { - if (g_hash_table_contains(addrs_ht, insn_vaddr)) { + ExitInfo *ei = g_hash_table_lookup(addrs_ht, &insn_vaddr); + if (ei) { /* Exit triggered by address */ qemu_plugin_register_vcpu_insn_exec_cb( - insn, exit_address_reached, QEMU_PLUGIN_CB_NO_REGS, - insn_vaddr); + insn, exit_address_reached, QEMU_PLUGIN_CB_NO_REGS, ei); } } } @@ -99,11 +103,13 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { - addrs_ht = g_hash_table_new(NULL, g_direct_equal); + addrs_ht = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free); insn_count_sb = qemu_plugin_scoreboard_new(sizeof(InstructionsCount)); insn_count = qemu_plugin_scoreboard_u64_in_struct( insn_count_sb, InstructionsCount, insn_count); + current_pc = qemu_plugin_scoreboard_u64_in_struct( + insn_count_sb, InstructionsCount, current_pc); for (int i = 0; i < argc; i++) { char *opt = argv[i]; @@ -124,13 +130,13 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, exit_on_icount = true; } else if (g_strcmp0(tokens[0], "addr") == 0) { g_auto(GStrv) addr_tokens = g_strsplit(tokens[1], ":", 2); - uint64_t exit_addr = g_ascii_strtoull(addr_tokens[0], NULL, 0); - int exit_code = 0; + ExitInfo *ei = g_malloc(sizeof(ExitInfo)); + ei->exit_addr = g_ascii_strtoull(addr_tokens[0], NULL, 0); + ei->exit_code = 0; if (addr_tokens[1]) { - exit_code = g_ascii_strtoull(addr_tokens[1], NULL, 0); + ei->exit_code = g_ascii_strtoull(addr_tokens[1], NULL, 0); } - g_hash_table_insert(addrs_ht, GUINT_TO_POINTER(exit_addr), - GINT_TO_POINTER(exit_code)); + g_hash_table_insert(addrs_ht, &ei->exit_addr, ei); exit_on_address = true; } else { fprintf(stderr, "option parsing failed: %s\n", opt); diff --git a/contrib/vmapple/uuid.sh b/contrib/vmapple/uuid.sh new file mode 100755 index 00000000000..f5637221d23 --- /dev/null +++ b/contrib/vmapple/uuid.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Used for converting a guest provisioned using Virtualization.framework +# for use with the QEMU 'vmapple' aarch64 machine type. +# +# Extracts the Machine UUID from Virtualization.framework VM JSON file. +# (as produced by 'macosvm', passed as command line argument) +# +# SPDX-License-Identifier: GPL-2.0-or-later + +plutil -extract machineId raw "$1" | base64 -d | plutil -extract ECID raw - + diff --git a/cpu-common.c b/cpu-common.c index 6b262233a3b..ef5757d23bf 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -21,7 +21,6 @@ #include "qemu/main-loop.h" #include "exec/cpu-common.h" #include "hw/core/cpu.h" -#include "sysemu/cpus.h" #include "qemu/lockable.h" #include "trace/trace-root.h" @@ -194,6 +193,9 @@ void start_exclusive(void) CPUState *other_cpu; int running_cpus; + /* Ensure we are not running, or start_exclusive will be blocked. */ + g_assert(!current_cpu->running); + if (current_cpu->exclusive_context_count) { current_cpu->exclusive_context_count++; return; @@ -386,11 +388,10 @@ void process_queued_cpu_work(CPUState *cpu) int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, CPUBreakpoint **breakpoint) { - CPUClass *cc = CPU_GET_CLASS(cpu); CPUBreakpoint *bp; - if (cc->gdb_adjust_breakpoint) { - pc = cc->gdb_adjust_breakpoint(cpu, pc); + if (cpu->cc->gdb_adjust_breakpoint) { + pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc); } bp = g_malloc(sizeof(*bp)); @@ -416,11 +417,10 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, /* Remove a specific breakpoint. */ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) { - CPUClass *cc = CPU_GET_CLASS(cpu); CPUBreakpoint *bp; - if (cc->gdb_adjust_breakpoint) { - pc = cc->gdb_adjust_breakpoint(cpu, pc); + if (cpu->cc->gdb_adjust_breakpoint) { + pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc); } QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { diff --git a/cpu-target.c b/cpu-target.c index 499facf7747..772e35495b8 100644 --- a/cpu-target.c +++ b/cpu-target.c @@ -18,305 +18,18 @@ */ #include "qemu/osdep.h" -#include "qapi/error.h" - -#include "exec/target_page.h" -#include "exec/page-protection.h" -#include "hw/qdev-core.h" -#include "hw/qdev-properties.h" -#include "qemu/error-report.h" -#include "qemu/qemu-print.h" -#include "migration/vmstate.h" -#ifdef CONFIG_USER_ONLY -#include "qemu.h" -#else -#include "hw/core/sysemu-cpu-ops.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#endif -#include "sysemu/cpus.h" -#include "sysemu/tcg.h" -#include "exec/tswap.h" -#include "exec/replay-core.h" +#include "cpu.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" #include "exec/cpu-common.h" -#include "exec/exec-all.h" -#include "exec/tb-flush.h" -#include "exec/translate-all.h" +#include "exec/replay-core.h" #include "exec/log.h" -#include "hw/core/accel-cpu.h" +#include "hw/core/cpu.h" #include "trace/trace-root.h" -#include "qemu/accel.h" - -#ifndef CONFIG_USER_ONLY -static int cpu_common_post_load(void *opaque, int version_id) -{ - CPUState *cpu = opaque; - - /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the - version_id is increased. */ - cpu->interrupt_request &= ~0x01; - tlb_flush(cpu); - - /* loadvm has just updated the content of RAM, bypassing the - * usual mechanisms that ensure we flush TBs for writes to - * memory we've translated code from. So we must flush all TBs, - * which will now be stale. - */ - tb_flush(cpu); - - return 0; -} - -static int cpu_common_pre_load(void *opaque) -{ - CPUState *cpu = opaque; - - cpu->exception_index = -1; - - return 0; -} - -static bool cpu_common_exception_index_needed(void *opaque) -{ - CPUState *cpu = opaque; - - return tcg_enabled() && cpu->exception_index != -1; -} - -static const VMStateDescription vmstate_cpu_common_exception_index = { - .name = "cpu_common/exception_index", - .version_id = 1, - .minimum_version_id = 1, - .needed = cpu_common_exception_index_needed, - .fields = (const VMStateField[]) { - VMSTATE_INT32(exception_index, CPUState), - VMSTATE_END_OF_LIST() - } -}; - -static bool cpu_common_crash_occurred_needed(void *opaque) -{ - CPUState *cpu = opaque; - - return cpu->crash_occurred; -} - -static const VMStateDescription vmstate_cpu_common_crash_occurred = { - .name = "cpu_common/crash_occurred", - .version_id = 1, - .minimum_version_id = 1, - .needed = cpu_common_crash_occurred_needed, - .fields = (const VMStateField[]) { - VMSTATE_BOOL(crash_occurred, CPUState), - VMSTATE_END_OF_LIST() - } -}; - -const VMStateDescription vmstate_cpu_common = { - .name = "cpu_common", - .version_id = 1, - .minimum_version_id = 1, - .pre_load = cpu_common_pre_load, - .post_load = cpu_common_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(halted, CPUState), - VMSTATE_UINT32(interrupt_request, CPUState), - VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription * const []) { - &vmstate_cpu_common_exception_index, - &vmstate_cpu_common_crash_occurred, - NULL - } -}; -#endif - -bool cpu_exec_realizefn(CPUState *cpu, Error **errp) -{ - /* cache the cpu class for the hotpath */ - cpu->cc = CPU_GET_CLASS(cpu); - - if (!accel_cpu_common_realize(cpu, errp)) { - return false; - } - - /* Wait until cpu initialization complete before exposing cpu. */ - cpu_list_add(cpu); - -#ifdef CONFIG_USER_ONLY - assert(qdev_get_vmsd(DEVICE(cpu)) == NULL || - qdev_get_vmsd(DEVICE(cpu))->unmigratable); -#else - if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { - vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); - } - if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { - vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu); - } -#endif /* CONFIG_USER_ONLY */ - - return true; -} - -void cpu_exec_unrealizefn(CPUState *cpu) -{ -#ifndef CONFIG_USER_ONLY - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->sysemu_ops->legacy_vmsd != NULL) { - vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu); - } - if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { - vmstate_unregister(NULL, &vmstate_cpu_common, cpu); - } -#endif - - cpu_list_remove(cpu); - /* - * Now that the vCPU has been removed from the RCU list, we can call - * accel_cpu_common_unrealize, which may free fields using call_rcu. - */ - accel_cpu_common_unrealize(cpu); -} - -/* - * This can't go in hw/core/cpu.c because that file is compiled only - * once for both user-mode and system builds. - */ -static Property cpu_common_props[] = { -#ifdef CONFIG_USER_ONLY - /* - * Create a property for the user-only object, so users can - * adjust prctl(PR_SET_UNALIGN) from the command-line. - * Has no effect if the target does not support the feature. - */ - DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState, - prctl_unalign_sigbus, false), -#else - /* - * Create a memory property for system CPU object, so users can - * wire up its memory. The default if no link is set up is to use - * the system address space. - */ - DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, - MemoryRegion *), -#endif - DEFINE_PROP_END_OF_LIST(), -}; - -#ifndef CONFIG_USER_ONLY -static bool cpu_get_start_powered_off(Object *obj, Error **errp) -{ - CPUState *cpu = CPU(obj); - return cpu->start_powered_off; -} - -static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) -{ - CPUState *cpu = CPU(obj); - cpu->start_powered_off = value; -} -#endif - -void cpu_class_init_props(DeviceClass *dc) -{ -#ifndef CONFIG_USER_ONLY - ObjectClass *oc = OBJECT_CLASS(dc); - /* - * We can't use DEFINE_PROP_BOOL in the Property array for this - * property, because we want this to be settable after realize. - */ - object_class_property_add_bool(oc, "start-powered-off", - cpu_get_start_powered_off, - cpu_set_start_powered_off); -#endif - - device_class_set_props(dc, cpu_common_props); -} - -void cpu_exec_initfn(CPUState *cpu) -{ - cpu->as = NULL; - cpu->num_ases = 0; - -#ifndef CONFIG_USER_ONLY - cpu->memory = get_system_memory(); - object_ref(OBJECT(cpu->memory)); -#endif -} - -char *cpu_model_from_type(const char *typename) -{ - const char *suffix = "-" CPU_RESOLVING_TYPE; - - if (!object_class_by_name(typename)) { - return NULL; - } - - if (g_str_has_suffix(typename, suffix)) { - return g_strndup(typename, strlen(typename) - strlen(suffix)); - } - - return g_strdup(typename); -} - -const char *parse_cpu_option(const char *cpu_option) -{ - ObjectClass *oc; - CPUClass *cc; - gchar **model_pieces; - const char *cpu_type; - - model_pieces = g_strsplit(cpu_option, ",", 2); - if (!model_pieces[0]) { - error_report("-cpu option cannot be empty"); - exit(1); - } - - oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); - if (oc == NULL) { - error_report("unable to find CPU model '%s'", model_pieces[0]); - g_strfreev(model_pieces); - exit(EXIT_FAILURE); - } - - cpu_type = object_class_get_name(oc); - cc = CPU_CLASS(oc); - cc->parse_features(cpu_type, model_pieces[1], &error_fatal); - g_strfreev(model_pieces); - return cpu_type; -} - -#ifndef cpu_list -static void cpu_list_entry(gpointer data, gpointer user_data) -{ - CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data)); - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - g_autofree char *model = cpu_model_from_type(typename); - - if (cc->deprecation_note) { - qemu_printf(" %s (deprecated)\n", model); - } else { - qemu_printf(" %s\n", model); - } -} - -static void cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, cpu_list_entry, NULL); - g_slist_free(list); -} -#endif - -void list_cpus(void) -{ - cpu_list(); -} +/* Validate correct placement of CPUArchState. */ +QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); +QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ @@ -371,104 +84,3 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...) #endif abort(); } - -/* physical memory access (slow version, mainly for debug) */ -#if defined(CONFIG_USER_ONLY) -int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, - void *ptr, size_t len, bool is_write) -{ - int flags; - vaddr l, page; - void * p; - uint8_t *buf = ptr; - ssize_t written; - int ret = -1; - int fd = -1; - - while (len > 0) { - page = addr & TARGET_PAGE_MASK; - l = (page + TARGET_PAGE_SIZE) - addr; - if (l > len) - l = len; - flags = page_get_flags(page); - if (!(flags & PAGE_VALID)) { - goto out_close; - } - if (is_write) { - if (flags & PAGE_WRITE) { - /* XXX: this code should not depend on lock_user */ - p = lock_user(VERIFY_WRITE, addr, l, 0); - if (!p) { - goto out_close; - } - memcpy(p, buf, l); - unlock_user(p, addr, l); - } else { - /* Bypass the host page protection using ptrace. */ - if (fd == -1) { - fd = open("/proc/self/mem", O_WRONLY); - if (fd == -1) { - goto out; - } - } - /* - * If there is a TranslationBlock and we weren't bypassing the - * host page protection, the memcpy() above would SEGV, - * ultimately leading to page_unprotect(). So invalidate the - * translations manually. Both invalidation and pwrite() must - * be under mmap_lock() in order to prevent the creation of - * another TranslationBlock in between. - */ - mmap_lock(); - tb_invalidate_phys_range(addr, addr + l - 1); - written = pwrite(fd, buf, l, - (off_t)(uintptr_t)g2h_untagged(addr)); - mmap_unlock(); - if (written != l) { - goto out_close; - } - } - } else if (flags & PAGE_READ) { - /* XXX: this code should not depend on lock_user */ - p = lock_user(VERIFY_READ, addr, l, 1); - if (!p) { - goto out_close; - } - memcpy(buf, p, l); - unlock_user(p, addr, 0); - } else { - /* Bypass the host page protection using ptrace. */ - if (fd == -1) { - fd = open("/proc/self/mem", O_RDONLY); - if (fd == -1) { - goto out; - } - } - if (pread(fd, buf, l, - (off_t)(uintptr_t)g2h_untagged(addr)) != l) { - goto out_close; - } - } - len -= l; - buf += l; - addr += l; - } - ret = 0; -out_close: - if (fd != -1) { - close(fd); - } -out: - return ret; -} -#endif - -bool target_words_bigendian(void) -{ - return TARGET_BIG_ENDIAN; -} - -const char *target_name(void) -{ - return TARGET_NAME; -} diff --git a/crypto/afalg.c b/crypto/afalg.c index 52a491dbb51..246d0679d43 100644 --- a/crypto/afalg.c +++ b/crypto/afalg.c @@ -66,13 +66,13 @@ qcrypto_afalg_socket_bind(const char *type, const char *name, return sbind; } -QCryptoAFAlg * +QCryptoAFAlgo * qcrypto_afalg_comm_alloc(const char *type, const char *name, Error **errp) { - QCryptoAFAlg *afalg; + QCryptoAFAlgo *afalg; - afalg = g_new0(QCryptoAFAlg, 1); + afalg = g_new0(QCryptoAFAlgo, 1); /* initialize crypto API socket */ afalg->opfd = -1; afalg->tfmfd = qcrypto_afalg_socket_bind(type, name, errp); @@ -93,7 +93,7 @@ qcrypto_afalg_comm_alloc(const char *type, const char *name, return NULL; } -void qcrypto_afalg_comm_free(QCryptoAFAlg *afalg) +void qcrypto_afalg_comm_free(QCryptoAFAlgo *afalg) { if (!afalg) { return; diff --git a/crypto/afalgpriv.h b/crypto/afalgpriv.h index 5a2393f1b7e..3fdcc0f8316 100644 --- a/crypto/afalgpriv.h +++ b/crypto/afalgpriv.h @@ -30,9 +30,9 @@ #define ALG_OPTYPE_LEN 4 #define ALG_MSGIV_LEN(len) (sizeof(struct af_alg_iv) + (len)) -typedef struct QCryptoAFAlg QCryptoAFAlg; +typedef struct QCryptoAFAlgo QCryptoAFAlgo; -struct QCryptoAFAlg { +struct QCryptoAFAlgo { QCryptoCipher base; int tfmfd; @@ -46,22 +46,22 @@ struct QCryptoAFAlg { * @type: the type of crypto operation * @name: the name of crypto operation * - * Allocate a QCryptoAFAlg object and bind itself to + * Allocate a QCryptoAFAlgo object and bind itself to * a AF_ALG socket. * * Returns: - * a new QCryptoAFAlg object, or NULL in error. + * a new QCryptoAFAlgo object, or NULL in error. */ -QCryptoAFAlg * +QCryptoAFAlgo * qcrypto_afalg_comm_alloc(const char *type, const char *name, Error **errp); /** * afalg_comm_free: - * @afalg: the QCryptoAFAlg object + * @afalg: the QCryptoAFAlgo object * * Free the @afalg. */ -void qcrypto_afalg_comm_free(QCryptoAFAlg *afalg); +void qcrypto_afalg_comm_free(QCryptoAFAlgo *afalg); #endif diff --git a/crypto/afsplit.c b/crypto/afsplit.c index b1a5a208995..b2e383aa678 100644 --- a/crypto/afsplit.c +++ b/crypto/afsplit.c @@ -40,7 +40,7 @@ static void qcrypto_afsplit_xor(size_t blocklen, } -static int qcrypto_afsplit_hash(QCryptoHashAlgorithm hash, +static int qcrypto_afsplit_hash(QCryptoHashAlgo hash, size_t blocklen, uint8_t *block, Error **errp) @@ -85,7 +85,7 @@ static int qcrypto_afsplit_hash(QCryptoHashAlgorithm hash, } -int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash, +int qcrypto_afsplit_encode(QCryptoHashAlgo hash, size_t blocklen, uint32_t stripes, const uint8_t *in, @@ -117,7 +117,7 @@ int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash, } -int qcrypto_afsplit_decode(QCryptoHashAlgorithm hash, +int qcrypto_afsplit_decode(QCryptoHashAlgo hash, size_t blocklen, uint32_t stripes, const uint8_t *in, diff --git a/crypto/akcipher-gcrypt.c.inc b/crypto/akcipher-gcrypt.c.inc index abb1fb272e4..bcf030fdec4 100644 --- a/crypto/akcipher-gcrypt.c.inc +++ b/crypto/akcipher-gcrypt.c.inc @@ -26,14 +26,14 @@ #include "crypto/akcipher.h" #include "crypto/random.h" #include "qapi/error.h" -#include "sysemu/cryptodev.h" +#include "system/cryptodev.h" #include "rsakey.h" typedef struct QCryptoGcryptRSA { QCryptoAkCipher akcipher; gcry_sexp_t key; - QCryptoRSAPaddingAlgorithm padding_alg; - QCryptoHashAlgorithm hash_alg; + QCryptoRSAPaddingAlgo padding_alg; + QCryptoHashAlgo hash_alg; } QCryptoGcryptRSA; static void qcrypto_gcrypt_rsa_free(QCryptoAkCipher *akcipher) @@ -59,7 +59,7 @@ QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, Error **errp) { switch (opts->alg) { - case QCRYPTO_AKCIPHER_ALG_RSA: + case QCRYPTO_AK_CIPHER_ALGO_RSA: return (QCryptoAkCipher *)qcrypto_gcrypt_rsa_new( &opts->u.rsa, type, key, keylen, errp); @@ -85,7 +85,7 @@ static int qcrypto_gcrypt_parse_rsa_private_key( const uint8_t *key, size_t keylen, Error **errp) { g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( - QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); + QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); gcry_mpi_t n = NULL, e = NULL, d = NULL, p = NULL, q = NULL, u = NULL; bool compute_mul_inv = false; int ret = -1; @@ -178,7 +178,7 @@ static int qcrypto_gcrypt_parse_rsa_public_key(QCryptoGcryptRSA *rsa, { g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( - QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); + QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); gcry_mpi_t n = NULL, e = NULL; int ret = -1; gcry_error_t err; @@ -241,7 +241,7 @@ static int qcrypto_gcrypt_rsa_encrypt(QCryptoAkCipher *akcipher, err = gcry_sexp_build(&data_sexp, NULL, "(data (flags %s) (value %b))", - QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg), + QCryptoRSAPaddingAlgo_str(rsa->padding_alg), in_len, in); if (gcry_err_code(err) != 0) { error_setg(errp, "Failed to build plaintext: %s/%s", @@ -263,7 +263,7 @@ static int qcrypto_gcrypt_rsa_encrypt(QCryptoAkCipher *akcipher, goto cleanup; } - if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) { cipher_mpi = gcry_sexp_nth_mpi(cipher_sexp_item, 1, GCRYMPI_FMT_USG); if (!cipher_mpi) { error_setg(errp, "Invalid ciphertext result"); @@ -332,7 +332,7 @@ static int qcrypto_gcrypt_rsa_decrypt(QCryptoAkCipher *akcipher, err = gcry_sexp_build(&cipher_sexp, NULL, "(enc-val (flags %s) (rsa (a %b) ))", - QCryptoRSAPaddingAlgorithm_str(rsa->padding_alg), + QCryptoRSAPaddingAlgo_str(rsa->padding_alg), in_len, in); if (gcry_err_code(err) != 0) { error_setg(errp, "Failed to build ciphertext: %s/%s", @@ -348,7 +348,7 @@ static int qcrypto_gcrypt_rsa_decrypt(QCryptoAkCipher *akcipher, } /* S-expression of plaintext: (value plaintext) */ - if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) { data_mpi = gcry_sexp_nth_mpi(data_sexp, 1, GCRYMPI_FMT_USG); if (!data_mpi) { error_setg(errp, "Invalid plaintext result"); @@ -410,14 +410,14 @@ static int qcrypto_gcrypt_rsa_sign(QCryptoAkCipher *akcipher, return ret; } - if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) { + if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALGO_PKCS1) { error_setg(errp, "Invalid padding %u", rsa->padding_alg); return ret; } err = gcry_sexp_build(&dgst_sexp, NULL, "(data (flags pkcs1) (hash %s %b))", - QCryptoHashAlgorithm_str(rsa->hash_alg), + QCryptoHashAlgo_str(rsa->hash_alg), in_len, in); if (gcry_err_code(err) != 0) { error_setg(errp, "Failed to build dgst: %s/%s", @@ -482,7 +482,7 @@ static int qcrypto_gcrypt_rsa_verify(QCryptoAkCipher *akcipher, return ret; } - if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALG_PKCS1) { + if (rsa->padding_alg != QCRYPTO_RSA_PADDING_ALGO_PKCS1) { error_setg(errp, "Invalid padding %u", rsa->padding_alg); return ret; } @@ -497,7 +497,7 @@ static int qcrypto_gcrypt_rsa_verify(QCryptoAkCipher *akcipher, err = gcry_sexp_build(&dgst_sexp, NULL, "(data (flags pkcs1) (hash %s %b))", - QCryptoHashAlgorithm_str(rsa->hash_alg), + QCryptoHashAlgo_str(rsa->hash_alg), in2_len, in2); if (gcry_err_code(err) != 0) { error_setg(errp, "Failed to build dgst: %s/%s", @@ -540,13 +540,13 @@ static QCryptoGcryptRSA *qcrypto_gcrypt_rsa_new( rsa->akcipher.driver = &gcrypt_rsa; switch (type) { - case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE: if (qcrypto_gcrypt_parse_rsa_private_key(rsa, key, keylen, errp) != 0) { goto error; } break; - case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC: if (qcrypto_gcrypt_parse_rsa_public_key(rsa, key, keylen, errp) != 0) { goto error; } @@ -568,17 +568,17 @@ error: bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts) { switch (opts->alg) { - case QCRYPTO_AKCIPHER_ALG_RSA: + case QCRYPTO_AK_CIPHER_ALGO_RSA: switch (opts->u.rsa.padding_alg) { - case QCRYPTO_RSA_PADDING_ALG_RAW: + case QCRYPTO_RSA_PADDING_ALGO_RAW: return true; - case QCRYPTO_RSA_PADDING_ALG_PKCS1: + case QCRYPTO_RSA_PADDING_ALGO_PKCS1: switch (opts->u.rsa.hash_alg) { - case QCRYPTO_HASH_ALG_MD5: - case QCRYPTO_HASH_ALG_SHA1: - case QCRYPTO_HASH_ALG_SHA256: - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_MD5: + case QCRYPTO_HASH_ALGO_SHA1: + case QCRYPTO_HASH_ALGO_SHA256: + case QCRYPTO_HASH_ALGO_SHA512: return true; default: diff --git a/crypto/akcipher-nettle.c.inc b/crypto/akcipher-nettle.c.inc index 02699e6e6d1..1d4bd6960e0 100644 --- a/crypto/akcipher-nettle.c.inc +++ b/crypto/akcipher-nettle.c.inc @@ -26,15 +26,15 @@ #include "crypto/akcipher.h" #include "crypto/random.h" #include "qapi/error.h" -#include "sysemu/cryptodev.h" +#include "system/cryptodev.h" #include "rsakey.h" typedef struct QCryptoNettleRSA { QCryptoAkCipher akcipher; struct rsa_public_key pub; struct rsa_private_key priv; - QCryptoRSAPaddingAlgorithm padding_alg; - QCryptoHashAlgorithm hash_alg; + QCryptoRSAPaddingAlgo padding_alg; + QCryptoHashAlgo hash_alg; } QCryptoNettleRSA; static void qcrypto_nettle_rsa_free(QCryptoAkCipher *akcipher) @@ -61,7 +61,7 @@ QCryptoAkCipher *qcrypto_akcipher_new(const QCryptoAkCipherOptions *opts, Error **errp) { switch (opts->alg) { - case QCRYPTO_AKCIPHER_ALG_RSA: + case QCRYPTO_AK_CIPHER_ALGO_RSA: return qcrypto_nettle_rsa_new(&opts->u.rsa, type, key, keylen, errp); default: @@ -87,7 +87,7 @@ static int qcrypt_nettle_parse_rsa_private_key(QCryptoNettleRSA *rsa, Error **errp) { g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( - QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); + QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, key, keylen, errp); if (!rsa_key) { return -1; @@ -137,7 +137,7 @@ static int qcrypt_nettle_parse_rsa_public_key(QCryptoNettleRSA *rsa, Error **errp) { g_autoptr(QCryptoAkCipherRSAKey) rsa_key = qcrypto_akcipher_rsakey_parse( - QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); + QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, key, keylen, errp); if (!rsa_key) { return -1; @@ -184,11 +184,11 @@ static int qcrypto_nettle_rsa_encrypt(QCryptoAkCipher *akcipher, /* Nettle do not support RSA encryption without any padding */ switch (rsa->padding_alg) { - case QCRYPTO_RSA_PADDING_ALG_RAW: + case QCRYPTO_RSA_PADDING_ALGO_RAW: error_setg(errp, "RSA with raw padding is not supported"); break; - case QCRYPTO_RSA_PADDING_ALG_PKCS1: + case QCRYPTO_RSA_PADDING_ALGO_PKCS1: mpz_init(c); if (rsa_encrypt(&rsa->pub, NULL, wrap_nettle_random_func, data_len, (uint8_t *)data, c) != 1) { @@ -223,11 +223,11 @@ static int qcrypto_nettle_rsa_decrypt(QCryptoAkCipher *akcipher, } switch (rsa->padding_alg) { - case QCRYPTO_RSA_PADDING_ALG_RAW: + case QCRYPTO_RSA_PADDING_ALGO_RAW: error_setg(errp, "RSA with raw padding is not supported"); break; - case QCRYPTO_RSA_PADDING_ALG_PKCS1: + case QCRYPTO_RSA_PADDING_ALGO_PKCS1: nettle_mpz_init_set_str_256_u(c, enc_len, enc); if (!rsa_decrypt(&rsa->priv, &data_len, (uint8_t *)data, c)) { error_setg(errp, "Failed to decrypt"); @@ -257,7 +257,7 @@ static int qcrypto_nettle_rsa_sign(QCryptoAkCipher *akcipher, * The RSA algorithm cannot be used for signature/verification * without padding. */ - if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) { error_setg(errp, "Try to make signature without padding"); return ret; } @@ -276,19 +276,19 @@ static int qcrypto_nettle_rsa_sign(QCryptoAkCipher *akcipher, mpz_init(s); switch (rsa->hash_alg) { - case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALGO_MD5: rv = rsa_md5_sign_digest(&rsa->priv, data, s); break; - case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALGO_SHA1: rv = rsa_sha1_sign_digest(&rsa->priv, data, s); break; - case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALGO_SHA256: rv = rsa_sha256_sign_digest(&rsa->priv, data, s); break; - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_SHA512: rv = rsa_sha512_sign_digest(&rsa->priv, data, s); break; @@ -324,7 +324,7 @@ static int qcrypto_nettle_rsa_verify(QCryptoAkCipher *akcipher, * The RSA algorithm cannot be used for signature/verification * without padding. */ - if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALG_RAW) { + if (rsa->padding_alg == QCRYPTO_RSA_PADDING_ALGO_RAW) { error_setg(errp, "Try to verify signature without padding"); return ret; } @@ -341,19 +341,19 @@ static int qcrypto_nettle_rsa_verify(QCryptoAkCipher *akcipher, nettle_mpz_init_set_str_256_u(s, sig_len, sig); switch (rsa->hash_alg) { - case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALGO_MD5: rv = rsa_md5_verify_digest(&rsa->pub, data, s); break; - case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALGO_SHA1: rv = rsa_sha1_verify_digest(&rsa->pub, data, s); break; - case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALGO_SHA256: rv = rsa_sha256_verify_digest(&rsa->pub, data, s); break; - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_SHA512: rv = rsa_sha512_verify_digest(&rsa->pub, data, s); break; @@ -397,13 +397,13 @@ static QCryptoAkCipher *qcrypto_nettle_rsa_new( rsa_private_key_init(&rsa->priv); switch (type) { - case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE: if (qcrypt_nettle_parse_rsa_private_key(rsa, key, keylen, errp) != 0) { goto error; } break; - case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC: if (qcrypt_nettle_parse_rsa_public_key(rsa, key, keylen, errp) != 0) { goto error; } @@ -425,21 +425,21 @@ error: bool qcrypto_akcipher_supports(QCryptoAkCipherOptions *opts) { switch (opts->alg) { - case QCRYPTO_AKCIPHER_ALG_RSA: + case QCRYPTO_AK_CIPHER_ALGO_RSA: switch (opts->u.rsa.padding_alg) { - case QCRYPTO_RSA_PADDING_ALG_PKCS1: + case QCRYPTO_RSA_PADDING_ALGO_PKCS1: switch (opts->u.rsa.hash_alg) { - case QCRYPTO_HASH_ALG_MD5: - case QCRYPTO_HASH_ALG_SHA1: - case QCRYPTO_HASH_ALG_SHA256: - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_MD5: + case QCRYPTO_HASH_ALGO_SHA1: + case QCRYPTO_HASH_ALGO_SHA256: + case QCRYPTO_HASH_ALGO_SHA512: return true; default: return false; } - case QCRYPTO_RSA_PADDING_ALG_RAW: + case QCRYPTO_RSA_PADDING_ALGO_RAW: default: return false; } diff --git a/crypto/akcipher.c b/crypto/akcipher.c index e4bbc6e5f1a..0a0576b7922 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -115,7 +115,7 @@ int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts, Error **errp) { switch (opts->alg) { - case QCRYPTO_AKCIPHER_ALG_RSA: + case QCRYPTO_AK_CIPHER_ALGO_RSA: qcrypto_akcipher_rsakey_export_p8info(key, keylen, dst, dst_len); return 0; diff --git a/crypto/akcipherpriv.h b/crypto/akcipherpriv.h index 739f639bcf2..3b33e54f081 100644 --- a/crypto/akcipherpriv.h +++ b/crypto/akcipherpriv.h @@ -27,7 +27,7 @@ typedef struct QCryptoAkCipherDriver QCryptoAkCipherDriver; struct QCryptoAkCipher { - QCryptoAkCipherAlgorithm alg; + QCryptoAkCipherAlgo alg; QCryptoAkCipherKeyType type; int max_plaintext_len; int max_ciphertext_len; diff --git a/crypto/block-luks.c b/crypto/block-luks.c index 45347adeeb7..0926ad28f0c 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -68,38 +68,38 @@ struct QCryptoBlockLUKSCipherNameMap { static const QCryptoBlockLUKSCipherSizeMap qcrypto_block_luks_cipher_size_map_aes[] = { - { 16, QCRYPTO_CIPHER_ALG_AES_128 }, - { 24, QCRYPTO_CIPHER_ALG_AES_192 }, - { 32, QCRYPTO_CIPHER_ALG_AES_256 }, + { 16, QCRYPTO_CIPHER_ALGO_AES_128 }, + { 24, QCRYPTO_CIPHER_ALGO_AES_192 }, + { 32, QCRYPTO_CIPHER_ALGO_AES_256 }, { 0, 0 }, }; static const QCryptoBlockLUKSCipherSizeMap qcrypto_block_luks_cipher_size_map_cast5[] = { - { 16, QCRYPTO_CIPHER_ALG_CAST5_128 }, + { 16, QCRYPTO_CIPHER_ALGO_CAST5_128 }, { 0, 0 }, }; static const QCryptoBlockLUKSCipherSizeMap qcrypto_block_luks_cipher_size_map_serpent[] = { - { 16, QCRYPTO_CIPHER_ALG_SERPENT_128 }, - { 24, QCRYPTO_CIPHER_ALG_SERPENT_192 }, - { 32, QCRYPTO_CIPHER_ALG_SERPENT_256 }, + { 16, QCRYPTO_CIPHER_ALGO_SERPENT_128 }, + { 24, QCRYPTO_CIPHER_ALGO_SERPENT_192 }, + { 32, QCRYPTO_CIPHER_ALGO_SERPENT_256 }, { 0, 0 }, }; static const QCryptoBlockLUKSCipherSizeMap qcrypto_block_luks_cipher_size_map_twofish[] = { - { 16, QCRYPTO_CIPHER_ALG_TWOFISH_128 }, - { 24, QCRYPTO_CIPHER_ALG_TWOFISH_192 }, - { 32, QCRYPTO_CIPHER_ALG_TWOFISH_256 }, + { 16, QCRYPTO_CIPHER_ALGO_TWOFISH_128 }, + { 24, QCRYPTO_CIPHER_ALGO_TWOFISH_192 }, + { 32, QCRYPTO_CIPHER_ALGO_TWOFISH_256 }, { 0, 0 }, }; #ifdef CONFIG_CRYPTO_SM4 static const QCryptoBlockLUKSCipherSizeMap qcrypto_block_luks_cipher_size_map_sm4[] = { - { 16, QCRYPTO_CIPHER_ALG_SM4}, + { 16, QCRYPTO_CIPHER_ALGO_SM4}, { 0, 0 }, }; #endif @@ -123,25 +123,25 @@ struct QCryptoBlockLUKS { QCryptoBlockLUKSHeader header; /* Main encryption algorithm used for encryption*/ - QCryptoCipherAlgorithm cipher_alg; + QCryptoCipherAlgo cipher_alg; /* Mode of encryption for the selected encryption algorithm */ QCryptoCipherMode cipher_mode; /* Initialization vector generation algorithm */ - QCryptoIVGenAlgorithm ivgen_alg; + QCryptoIVGenAlgo ivgen_alg; /* Hash algorithm used for IV generation*/ - QCryptoHashAlgorithm ivgen_hash_alg; + QCryptoHashAlgo ivgen_hash_alg; /* * Encryption algorithm used for IV generation. * Usually the same as main encryption algorithm */ - QCryptoCipherAlgorithm ivgen_cipher_alg; + QCryptoCipherAlgo ivgen_cipher_alg; /* Hash algorithm used in pbkdf2 function */ - QCryptoHashAlgorithm hash_alg; + QCryptoHashAlgo hash_alg; /* Name of the secret that was used to open the image */ char *secret; @@ -179,7 +179,7 @@ static int qcrypto_block_luks_cipher_name_lookup(const char *name, } static const char * -qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgorithm alg, +qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgo alg, Error **errp) { const QCryptoBlockLUKSCipherNameMap *map = @@ -195,7 +195,7 @@ qcrypto_block_luks_cipher_alg_lookup(QCryptoCipherAlgorithm alg, } error_setg(errp, "Algorithm '%s' not supported", - QCryptoCipherAlgorithm_str(alg)); + QCryptoCipherAlgo_str(alg)); return NULL; } @@ -223,13 +223,13 @@ static int qcrypto_block_luks_name_lookup(const char *name, #define qcrypto_block_luks_hash_name_lookup(name, errp) \ qcrypto_block_luks_name_lookup(name, \ - &QCryptoHashAlgorithm_lookup, \ + &QCryptoHashAlgo_lookup, \ "Hash algorithm", \ errp) #define qcrypto_block_luks_ivgen_name_lookup(name, errp) \ qcrypto_block_luks_name_lookup(name, \ - &QCryptoIVGenAlgorithm_lookup, \ + &QCryptoIVGenAlgo_lookup, \ "IV generator", \ errp) @@ -262,9 +262,9 @@ qcrypto_block_luks_has_format(const uint8_t *buf, * the cipher since that gets a key length matching the digest * size, not AES 128 with truncated digest as might be imagined */ -static QCryptoCipherAlgorithm -qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher, - QCryptoHashAlgorithm hash, +static QCryptoCipherAlgo +qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgo cipher, + QCryptoHashAlgo hash, Error **errp) { size_t digestlen = qcrypto_hash_digest_len(hash); @@ -274,54 +274,54 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher, } switch (cipher) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_256: if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_AES_128)) { - return QCRYPTO_CIPHER_ALG_AES_128; + QCRYPTO_CIPHER_ALGO_AES_128)) { + return QCRYPTO_CIPHER_ALGO_AES_128; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_AES_192)) { - return QCRYPTO_CIPHER_ALG_AES_192; + QCRYPTO_CIPHER_ALGO_AES_192)) { + return QCRYPTO_CIPHER_ALGO_AES_192; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_AES_256)) { - return QCRYPTO_CIPHER_ALG_AES_256; + QCRYPTO_CIPHER_ALGO_AES_256)) { + return QCRYPTO_CIPHER_ALGO_AES_256; } else { error_setg(errp, "No AES cipher with key size %zu available", digestlen); return 0; } break; - case QCRYPTO_CIPHER_ALG_SERPENT_128: - case QCRYPTO_CIPHER_ALG_SERPENT_192: - case QCRYPTO_CIPHER_ALG_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_SERPENT_128)) { - return QCRYPTO_CIPHER_ALG_SERPENT_128; + QCRYPTO_CIPHER_ALGO_SERPENT_128)) { + return QCRYPTO_CIPHER_ALGO_SERPENT_128; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_SERPENT_192)) { - return QCRYPTO_CIPHER_ALG_SERPENT_192; + QCRYPTO_CIPHER_ALGO_SERPENT_192)) { + return QCRYPTO_CIPHER_ALGO_SERPENT_192; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_SERPENT_256)) { - return QCRYPTO_CIPHER_ALG_SERPENT_256; + QCRYPTO_CIPHER_ALGO_SERPENT_256)) { + return QCRYPTO_CIPHER_ALGO_SERPENT_256; } else { error_setg(errp, "No Serpent cipher with key size %zu available", digestlen); return 0; } break; - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - case QCRYPTO_CIPHER_ALG_TWOFISH_192: - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_192: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_TWOFISH_128)) { - return QCRYPTO_CIPHER_ALG_TWOFISH_128; + QCRYPTO_CIPHER_ALGO_TWOFISH_128)) { + return QCRYPTO_CIPHER_ALGO_TWOFISH_128; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_TWOFISH_192)) { - return QCRYPTO_CIPHER_ALG_TWOFISH_192; + QCRYPTO_CIPHER_ALGO_TWOFISH_192)) { + return QCRYPTO_CIPHER_ALGO_TWOFISH_192; } else if (digestlen == qcrypto_cipher_get_key_len( - QCRYPTO_CIPHER_ALG_TWOFISH_256)) { - return QCRYPTO_CIPHER_ALG_TWOFISH_256; + QCRYPTO_CIPHER_ALGO_TWOFISH_256)) { + return QCRYPTO_CIPHER_ALGO_TWOFISH_256; } else { error_setg(errp, "No Twofish cipher with key size %zu available", digestlen); @@ -330,7 +330,7 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher, break; default: error_setg(errp, "Cipher %s not supported with essiv", - QCryptoCipherAlgorithm_str(cipher)); + QCryptoCipherAlgo_str(cipher)); return 0; } } @@ -660,7 +660,7 @@ qcrypto_block_luks_parse_header(QCryptoBlockLUKS *luks, Error **errp) return -1; } - if (luks->ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { + if (luks->ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) { if (!ivhash_name) { error_setg(errp, "Missing IV generator hash specification"); return -1; @@ -1322,20 +1322,20 @@ qcrypto_block_luks_create(QCryptoBlock *block, luks_opts.iter_time = QCRYPTO_BLOCK_LUKS_DEFAULT_ITER_TIME_MS; } if (!luks_opts.has_cipher_alg) { - luks_opts.cipher_alg = QCRYPTO_CIPHER_ALG_AES_256; + luks_opts.cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256; } if (!luks_opts.has_cipher_mode) { luks_opts.cipher_mode = QCRYPTO_CIPHER_MODE_XTS; } if (!luks_opts.has_ivgen_alg) { - luks_opts.ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64; + luks_opts.ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64; } if (!luks_opts.has_hash_alg) { - luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256; + luks_opts.hash_alg = QCRYPTO_HASH_ALGO_SHA256; } - if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { + if (luks_opts.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) { if (!luks_opts.has_ivgen_hash_alg) { - luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256; + luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALGO_SHA256; luks_opts.has_ivgen_hash_alg = true; } } @@ -1384,15 +1384,15 @@ qcrypto_block_luks_create(QCryptoBlock *block, } cipher_mode = QCryptoCipherMode_str(luks_opts.cipher_mode); - ivgen_alg = QCryptoIVGenAlgorithm_str(luks_opts.ivgen_alg); + ivgen_alg = QCryptoIVGenAlgo_str(luks_opts.ivgen_alg); if (luks_opts.has_ivgen_hash_alg) { - ivgen_hash_alg = QCryptoHashAlgorithm_str(luks_opts.ivgen_hash_alg); + ivgen_hash_alg = QCryptoHashAlgo_str(luks_opts.ivgen_hash_alg); cipher_mode_spec = g_strdup_printf("%s-%s:%s", cipher_mode, ivgen_alg, ivgen_hash_alg); } else { cipher_mode_spec = g_strdup_printf("%s-%s", cipher_mode, ivgen_alg); } - hash_alg = QCryptoHashAlgorithm_str(luks_opts.hash_alg); + hash_alg = QCryptoHashAlgo_str(luks_opts.hash_alg); if (strlen(cipher_alg) >= QCRYPTO_BLOCK_LUKS_CIPHER_NAME_LEN) { @@ -1411,7 +1411,7 @@ qcrypto_block_luks_create(QCryptoBlock *block, goto error; } - if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { + if (luks_opts.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) { luks->ivgen_cipher_alg = qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, luks_opts.ivgen_hash_alg, @@ -1861,11 +1861,11 @@ qcrypto_block_luks_amend_options(QCryptoBlock *block, QCryptoBlockAmendOptionsLUKS *opts_luks = &options->u.luks; switch (opts_luks->state) { - case Q_CRYPTO_BLOCKLUKS_KEYSLOT_STATE_ACTIVE: + case QCRYPTO_BLOCK_LUKS_KEYSLOT_STATE_ACTIVE: return qcrypto_block_luks_amend_add_keyslot(block, readfunc, writefunc, opaque, opts_luks, force, errp); - case Q_CRYPTO_BLOCKLUKS_KEYSLOT_STATE_INACTIVE: + case QCRYPTO_BLOCK_LUKS_KEYSLOT_STATE_INACTIVE: return qcrypto_block_luks_amend_erase_keyslots(block, readfunc, writefunc, opaque, opts_luks, force, errp); @@ -1886,7 +1886,7 @@ static int qcrypto_block_luks_get_info(QCryptoBlock *block, info->u.luks.cipher_alg = luks->cipher_alg; info->u.luks.cipher_mode = luks->cipher_mode; info->u.luks.ivgen_alg = luks->ivgen_alg; - if (info->u.luks.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { + if (info->u.luks.ivgen_alg == QCRYPTO_IV_GEN_ALGO_ESSIV) { info->u.luks.has_ivgen_hash_alg = true; info->u.luks.ivgen_hash_alg = luks->ivgen_hash_alg; } diff --git a/crypto/block-qcow.c b/crypto/block-qcow.c index 42e9556e425..054078b8950 100644 --- a/crypto/block-qcow.c +++ b/crypto/block-qcow.c @@ -62,16 +62,16 @@ qcrypto_block_qcow_init(QCryptoBlock *block, memcpy(keybuf, password, MIN(len, sizeof(keybuf))); g_free(password); - block->niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128, + block->niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALGO_AES_128, QCRYPTO_CIPHER_MODE_CBC); - block->ivgen = qcrypto_ivgen_new(QCRYPTO_IVGEN_ALG_PLAIN64, + block->ivgen = qcrypto_ivgen_new(QCRYPTO_IV_GEN_ALGO_PLAIN64, 0, 0, NULL, 0, errp); if (!block->ivgen) { ret = -ENOTSUP; goto fail; } - ret = qcrypto_block_init_cipher(block, QCRYPTO_CIPHER_ALG_AES_128, + ret = qcrypto_block_init_cipher(block, QCRYPTO_CIPHER_ALGO_AES_128, QCRYPTO_CIPHER_MODE_CBC, keybuf, G_N_ELEMENTS(keybuf), errp); diff --git a/crypto/block.c b/crypto/block.c index 3bcc4270c3c..96c83e60b9f 100644 --- a/crypto/block.c +++ b/crypto/block.c @@ -26,8 +26,8 @@ #include "block-luks.h" static const QCryptoBlockDriver *qcrypto_block_drivers[] = { - [Q_CRYPTO_BLOCK_FORMAT_QCOW] = &qcrypto_block_driver_qcow, - [Q_CRYPTO_BLOCK_FORMAT_LUKS] = &qcrypto_block_driver_luks, + [QCRYPTO_BLOCK_FORMAT_QCOW] = &qcrypto_block_driver_qcow, + [QCRYPTO_BLOCK_FORMAT_LUKS] = &qcrypto_block_driver_luks, }; @@ -267,7 +267,7 @@ static void qcrypto_block_push_cipher(QCryptoBlock *block, int qcrypto_block_init_cipher(QCryptoBlock *block, - QCryptoCipherAlgorithm alg, + QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp) @@ -332,7 +332,7 @@ QCryptoIVGen *qcrypto_block_get_ivgen(QCryptoBlock *block) } -QCryptoHashAlgorithm qcrypto_block_get_kdf_hash(QCryptoBlock *block) +QCryptoHashAlgo qcrypto_block_get_kdf_hash(QCryptoBlock *block) { return block->kdfhash; } diff --git a/crypto/blockpriv.h b/crypto/blockpriv.h index b8f77cb5eb5..edf0b3a3d9f 100644 --- a/crypto/blockpriv.h +++ b/crypto/blockpriv.h @@ -33,7 +33,7 @@ struct QCryptoBlock { void *opaque; /* Cipher parameters */ - QCryptoCipherAlgorithm alg; + QCryptoCipherAlgo alg; QCryptoCipherMode mode; uint8_t *key; size_t nkey; @@ -44,7 +44,7 @@ struct QCryptoBlock { QCryptoIVGen *ivgen; QemuMutex mutex; - QCryptoHashAlgorithm kdfhash; + QCryptoHashAlgo kdfhash; size_t niv; uint64_t payload_offset; /* In bytes */ uint64_t sector_size; /* In bytes */ @@ -132,7 +132,7 @@ int qcrypto_block_encrypt_helper(QCryptoBlock *block, Error **errp); int qcrypto_block_init_cipher(QCryptoBlock *block, - QCryptoCipherAlgorithm alg, + QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp); diff --git a/crypto/cipher-afalg.c b/crypto/cipher-afalg.c index 3df8fc54c05..4980d419c43 100644 --- a/crypto/cipher-afalg.c +++ b/crypto/cipher-afalg.c @@ -18,7 +18,7 @@ static char * -qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg, +qcrypto_afalg_cipher_format_name(QCryptoCipherAlgo alg, QCryptoCipherMode mode, Error **errp) { @@ -27,22 +27,22 @@ qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg, const char *mode_name; switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_256: alg_name = "aes"; break; - case QCRYPTO_CIPHER_ALG_CAST5_128: + case QCRYPTO_CIPHER_ALGO_CAST5_128: alg_name = "cast5"; break; - case QCRYPTO_CIPHER_ALG_SERPENT_128: - case QCRYPTO_CIPHER_ALG_SERPENT_192: - case QCRYPTO_CIPHER_ALG_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: alg_name = "serpent"; break; - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - case QCRYPTO_CIPHER_ALG_TWOFISH_192: - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_192: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: alg_name = "twofish"; break; @@ -60,12 +60,12 @@ qcrypto_afalg_cipher_format_name(QCryptoCipherAlgorithm alg, static const struct QCryptoCipherDriver qcrypto_cipher_afalg_driver; QCryptoCipher * -qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgorithm alg, +qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp) { - QCryptoAFAlg *afalg; + QCryptoAFAlgo *afalg; size_t expect_niv; char *name; @@ -119,7 +119,7 @@ qcrypto_afalg_cipher_setiv(QCryptoCipher *cipher, const uint8_t *iv, size_t niv, Error **errp) { - QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base); + QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base); struct af_alg_iv *alg_iv; size_t expect_niv; @@ -143,7 +143,7 @@ qcrypto_afalg_cipher_setiv(QCryptoCipher *cipher, } static int -qcrypto_afalg_cipher_op(QCryptoAFAlg *afalg, +qcrypto_afalg_cipher_op(QCryptoAFAlgo *afalg, const void *in, void *out, size_t len, bool do_encrypt, Error **errp) @@ -202,7 +202,7 @@ qcrypto_afalg_cipher_encrypt(QCryptoCipher *cipher, const void *in, void *out, size_t len, Error **errp) { - QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base); + QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base); return qcrypto_afalg_cipher_op(afalg, in, out, len, true, errp); } @@ -212,14 +212,14 @@ qcrypto_afalg_cipher_decrypt(QCryptoCipher *cipher, const void *in, void *out, size_t len, Error **errp) { - QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base); + QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base); return qcrypto_afalg_cipher_op(afalg, in, out, len, false, errp); } static void qcrypto_afalg_comm_ctx_free(QCryptoCipher *cipher) { - QCryptoAFAlg *afalg = container_of(cipher, QCryptoAFAlg, base); + QCryptoAFAlgo *afalg = container_of(cipher, QCryptoAFAlgo, base); qcrypto_afalg_comm_free(afalg); } diff --git a/crypto/cipher-builtin.c.inc b/crypto/cipher-builtin.c.inc deleted file mode 100644 index b409089095c..00000000000 --- a/crypto/cipher-builtin.c.inc +++ /dev/null @@ -1,303 +0,0 @@ -/* - * QEMU Crypto cipher built-in algorithms - * - * Copyright (c) 2015 Red Hat, Inc. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - * - */ - -#include "crypto/aes.h" - -typedef struct QCryptoCipherBuiltinAESContext QCryptoCipherBuiltinAESContext; -struct QCryptoCipherBuiltinAESContext { - AES_KEY enc; - AES_KEY dec; -}; - -typedef struct QCryptoCipherBuiltinAES QCryptoCipherBuiltinAES; -struct QCryptoCipherBuiltinAES { - QCryptoCipher base; - QCryptoCipherBuiltinAESContext key; - uint8_t iv[AES_BLOCK_SIZE]; -}; - - -static inline bool qcrypto_length_check(size_t len, size_t blocksize, - Error **errp) -{ - if (unlikely(len & (blocksize - 1))) { - error_setg(errp, "Length %zu must be a multiple of block size %zu", - len, blocksize); - return false; - } - return true; -} - -static void qcrypto_cipher_ctx_free(QCryptoCipher *cipher) -{ - g_free(cipher); -} - -static int qcrypto_cipher_no_setiv(QCryptoCipher *cipher, - const uint8_t *iv, size_t niv, - Error **errp) -{ - error_setg(errp, "Setting IV is not supported"); - return -1; -} - -static void do_aes_encrypt_ecb(const void *vctx, - size_t len, - uint8_t *out, - const uint8_t *in) -{ - const QCryptoCipherBuiltinAESContext *ctx = vctx; - - /* We have already verified that len % AES_BLOCK_SIZE == 0. */ - while (len) { - AES_encrypt(in, out, &ctx->enc); - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; - len -= AES_BLOCK_SIZE; - } -} - -static void do_aes_decrypt_ecb(const void *vctx, - size_t len, - uint8_t *out, - const uint8_t *in) -{ - const QCryptoCipherBuiltinAESContext *ctx = vctx; - - /* We have already verified that len % AES_BLOCK_SIZE == 0. */ - while (len) { - AES_decrypt(in, out, &ctx->dec); - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; - len -= AES_BLOCK_SIZE; - } -} - -static void do_aes_encrypt_cbc(const AES_KEY *key, - size_t len, - uint8_t *out, - const uint8_t *in, - uint8_t *ivec) -{ - uint8_t tmp[AES_BLOCK_SIZE]; - size_t n; - - /* We have already verified that len % AES_BLOCK_SIZE == 0. */ - while (len) { - for (n = 0; n < AES_BLOCK_SIZE; ++n) { - tmp[n] = in[n] ^ ivec[n]; - } - AES_encrypt(tmp, out, key); - memcpy(ivec, out, AES_BLOCK_SIZE); - len -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; - } -} - -static void do_aes_decrypt_cbc(const AES_KEY *key, - size_t len, - uint8_t *out, - const uint8_t *in, - uint8_t *ivec) -{ - uint8_t tmp[AES_BLOCK_SIZE]; - size_t n; - - /* We have already verified that len % AES_BLOCK_SIZE == 0. */ - while (len) { - memcpy(tmp, in, AES_BLOCK_SIZE); - AES_decrypt(in, out, key); - for (n = 0; n < AES_BLOCK_SIZE; ++n) { - out[n] ^= ivec[n]; - } - memcpy(ivec, tmp, AES_BLOCK_SIZE); - len -= AES_BLOCK_SIZE; - in += AES_BLOCK_SIZE; - out += AES_BLOCK_SIZE; - } -} - -static int qcrypto_cipher_aes_encrypt_ecb(QCryptoCipher *cipher, - const void *in, void *out, - size_t len, Error **errp) -{ - QCryptoCipherBuiltinAES *ctx - = container_of(cipher, QCryptoCipherBuiltinAES, base); - - if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) { - return -1; - } - do_aes_encrypt_ecb(&ctx->key, len, out, in); - return 0; -} - -static int qcrypto_cipher_aes_decrypt_ecb(QCryptoCipher *cipher, - const void *in, void *out, - size_t len, Error **errp) -{ - QCryptoCipherBuiltinAES *ctx - = container_of(cipher, QCryptoCipherBuiltinAES, base); - - if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) { - return -1; - } - do_aes_decrypt_ecb(&ctx->key, len, out, in); - return 0; -} - -static int qcrypto_cipher_aes_encrypt_cbc(QCryptoCipher *cipher, - const void *in, void *out, - size_t len, Error **errp) -{ - QCryptoCipherBuiltinAES *ctx - = container_of(cipher, QCryptoCipherBuiltinAES, base); - - if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) { - return -1; - } - do_aes_encrypt_cbc(&ctx->key.enc, len, out, in, ctx->iv); - return 0; -} - -static int qcrypto_cipher_aes_decrypt_cbc(QCryptoCipher *cipher, - const void *in, void *out, - size_t len, Error **errp) -{ - QCryptoCipherBuiltinAES *ctx - = container_of(cipher, QCryptoCipherBuiltinAES, base); - - if (!qcrypto_length_check(len, AES_BLOCK_SIZE, errp)) { - return -1; - } - do_aes_decrypt_cbc(&ctx->key.dec, len, out, in, ctx->iv); - return 0; -} - -static int qcrypto_cipher_aes_setiv(QCryptoCipher *cipher, const uint8_t *iv, - size_t niv, Error **errp) -{ - QCryptoCipherBuiltinAES *ctx - = container_of(cipher, QCryptoCipherBuiltinAES, base); - - if (niv != AES_BLOCK_SIZE) { - error_setg(errp, "IV must be %d bytes not %zu", - AES_BLOCK_SIZE, niv); - return -1; - } - - memcpy(ctx->iv, iv, AES_BLOCK_SIZE); - return 0; -} - -static const struct QCryptoCipherDriver qcrypto_cipher_aes_driver_ecb = { - .cipher_encrypt = qcrypto_cipher_aes_encrypt_ecb, - .cipher_decrypt = qcrypto_cipher_aes_decrypt_ecb, - .cipher_setiv = qcrypto_cipher_no_setiv, - .cipher_free = qcrypto_cipher_ctx_free, -}; - -static const struct QCryptoCipherDriver qcrypto_cipher_aes_driver_cbc = { - .cipher_encrypt = qcrypto_cipher_aes_encrypt_cbc, - .cipher_decrypt = qcrypto_cipher_aes_decrypt_cbc, - .cipher_setiv = qcrypto_cipher_aes_setiv, - .cipher_free = qcrypto_cipher_ctx_free, -}; - -bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, - QCryptoCipherMode mode) -{ - switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: - switch (mode) { - case QCRYPTO_CIPHER_MODE_ECB: - case QCRYPTO_CIPHER_MODE_CBC: - return true; - default: - return false; - } - break; - default: - return false; - } -} - -static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, - QCryptoCipherMode mode, - const uint8_t *key, - size_t nkey, - Error **errp) -{ - if (!qcrypto_cipher_validate_key_length(alg, mode, nkey, errp)) { - return NULL; - } - - switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: - { - QCryptoCipherBuiltinAES *ctx; - const QCryptoCipherDriver *drv; - - switch (mode) { - case QCRYPTO_CIPHER_MODE_ECB: - drv = &qcrypto_cipher_aes_driver_ecb; - break; - case QCRYPTO_CIPHER_MODE_CBC: - drv = &qcrypto_cipher_aes_driver_cbc; - break; - default: - goto bad_mode; - } - - ctx = g_new0(QCryptoCipherBuiltinAES, 1); - ctx->base.driver = drv; - - if (AES_set_encrypt_key(key, nkey * 8, &ctx->key.enc)) { - error_setg(errp, "Failed to set encryption key"); - goto error; - } - if (AES_set_decrypt_key(key, nkey * 8, &ctx->key.dec)) { - error_setg(errp, "Failed to set decryption key"); - goto error; - } - - return &ctx->base; - - error: - g_free(ctx); - return NULL; - } - - default: - error_setg(errp, - "Unsupported cipher algorithm %s", - QCryptoCipherAlgorithm_str(alg)); - return NULL; - } - - bad_mode: - error_setg(errp, "Unsupported cipher mode %s", - QCryptoCipherMode_str(mode)); - return NULL; -} diff --git a/crypto/cipher-gcrypt.c.inc b/crypto/cipher-gcrypt.c.inc index 4a8314746db..12eb9ddb5a6 100644 --- a/crypto/cipher-gcrypt.c.inc +++ b/crypto/cipher-gcrypt.c.inc @@ -20,33 +20,33 @@ #include -static int qcrypto_cipher_alg_to_gcry_alg(QCryptoCipherAlgorithm alg) +static int qcrypto_cipher_alg_to_gcry_alg(QCryptoCipherAlgo alg) { switch (alg) { - case QCRYPTO_CIPHER_ALG_DES: + case QCRYPTO_CIPHER_ALGO_DES: return GCRY_CIPHER_DES; - case QCRYPTO_CIPHER_ALG_3DES: + case QCRYPTO_CIPHER_ALGO_3DES: return GCRY_CIPHER_3DES; - case QCRYPTO_CIPHER_ALG_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_128: return GCRY_CIPHER_AES128; - case QCRYPTO_CIPHER_ALG_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_192: return GCRY_CIPHER_AES192; - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_256: return GCRY_CIPHER_AES256; - case QCRYPTO_CIPHER_ALG_CAST5_128: + case QCRYPTO_CIPHER_ALGO_CAST5_128: return GCRY_CIPHER_CAST5; - case QCRYPTO_CIPHER_ALG_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: return GCRY_CIPHER_SERPENT128; - case QCRYPTO_CIPHER_ALG_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: return GCRY_CIPHER_SERPENT192; - case QCRYPTO_CIPHER_ALG_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: return GCRY_CIPHER_SERPENT256; - case QCRYPTO_CIPHER_ALG_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: return GCRY_CIPHER_TWOFISH128; - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: return GCRY_CIPHER_TWOFISH; #ifdef CONFIG_CRYPTO_SM4 - case QCRYPTO_CIPHER_ALG_SM4: + case QCRYPTO_CIPHER_ALGO_SM4: return GCRY_CIPHER_SM4; #endif default: @@ -70,23 +70,23 @@ static int qcrypto_cipher_mode_to_gcry_mode(QCryptoCipherMode mode) } } -bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, +bool qcrypto_cipher_supports(QCryptoCipherAlgo alg, QCryptoCipherMode mode) { switch (alg) { - case QCRYPTO_CIPHER_ALG_DES: - case QCRYPTO_CIPHER_ALG_3DES: - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: - case QCRYPTO_CIPHER_ALG_CAST5_128: - case QCRYPTO_CIPHER_ALG_SERPENT_128: - case QCRYPTO_CIPHER_ALG_SERPENT_192: - case QCRYPTO_CIPHER_ALG_SERPENT_256: - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_DES: + case QCRYPTO_CIPHER_ALGO_3DES: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_256: + case QCRYPTO_CIPHER_ALGO_CAST5_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: #ifdef CONFIG_CRYPTO_SM4 - case QCRYPTO_CIPHER_ALG_SM4: + case QCRYPTO_CIPHER_ALGO_SM4: #endif break; default: @@ -228,7 +228,7 @@ static const struct QCryptoCipherDriver qcrypto_gcrypt_ctr_driver = { .cipher_free = qcrypto_gcrypt_ctx_free, }; -static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, +static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, @@ -246,7 +246,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, gcryalg = qcrypto_cipher_alg_to_gcry_alg(alg); if (gcryalg == GCRY_CIPHER_NONE) { error_setg(errp, "Unsupported cipher algorithm %s", - QCryptoCipherAlgorithm_str(alg)); + QCryptoCipherAlgo_str(alg)); return NULL; } diff --git a/crypto/cipher-gnutls.c.inc b/crypto/cipher-gnutls.c.inc index d3e231c13c9..b9450d48b0f 100644 --- a/crypto/cipher-gnutls.c.inc +++ b/crypto/cipher-gnutls.c.inc @@ -27,7 +27,7 @@ #define QEMU_GNUTLS_XTS #endif -bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, +bool qcrypto_cipher_supports(QCryptoCipherAlgo alg, QCryptoCipherMode mode) { @@ -35,11 +35,11 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_MODE_ECB: case QCRYPTO_CIPHER_MODE_CBC: switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: - case QCRYPTO_CIPHER_ALG_DES: - case QCRYPTO_CIPHER_ALG_3DES: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_256: + case QCRYPTO_CIPHER_ALGO_DES: + case QCRYPTO_CIPHER_ALGO_3DES: return true; default: return false; @@ -47,8 +47,8 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, #ifdef QEMU_GNUTLS_XTS case QCRYPTO_CIPHER_MODE_XTS: switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_256: return true; default: return false; @@ -229,7 +229,7 @@ static struct QCryptoCipherDriver gnutls_driver = { .cipher_free = qcrypto_gnutls_cipher_free, }; -static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, +static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, @@ -244,10 +244,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, #ifdef QEMU_GNUTLS_XTS case QCRYPTO_CIPHER_MODE_XTS: switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_128: galg = GNUTLS_CIPHER_AES_128_XTS; break; - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_256: galg = GNUTLS_CIPHER_AES_256_XTS; break; default: @@ -259,19 +259,19 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_MODE_ECB: case QCRYPTO_CIPHER_MODE_CBC: switch (alg) { - case QCRYPTO_CIPHER_ALG_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_128: galg = GNUTLS_CIPHER_AES_128_CBC; break; - case QCRYPTO_CIPHER_ALG_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_192: galg = GNUTLS_CIPHER_AES_192_CBC; break; - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_256: galg = GNUTLS_CIPHER_AES_256_CBC; break; - case QCRYPTO_CIPHER_ALG_DES: + case QCRYPTO_CIPHER_ALGO_DES: galg = GNUTLS_CIPHER_DES_CBC; break; - case QCRYPTO_CIPHER_ALG_3DES: + case QCRYPTO_CIPHER_ALGO_3DES: galg = GNUTLS_CIPHER_3DES_CBC; break; default: @@ -284,7 +284,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, if (galg == GNUTLS_CIPHER_UNKNOWN) { error_setg(errp, "Unsupported cipher algorithm %s with %s mode", - QCryptoCipherAlgorithm_str(alg), + QCryptoCipherAlgo_str(alg), QCryptoCipherMode_str(mode)); return NULL; } @@ -310,8 +310,8 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, } } - if (alg == QCRYPTO_CIPHER_ALG_DES || - alg == QCRYPTO_CIPHER_ALG_3DES) + if (alg == QCRYPTO_CIPHER_ALGO_DES || + alg == QCRYPTO_CIPHER_ALGO_3DES) ctx->blocksize = 8; else ctx->blocksize = 16; diff --git a/crypto/cipher-nettle.c.inc b/crypto/cipher-nettle.c.inc index 42b39e18a23..ae913637721 100644 --- a/crypto/cipher-nettle.c.inc +++ b/crypto/cipher-nettle.c.inc @@ -454,24 +454,24 @@ DEFINE_ECB(qcrypto_nettle_sm4, sm4_encrypt_native, sm4_decrypt_native) #endif -bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, +bool qcrypto_cipher_supports(QCryptoCipherAlgo alg, QCryptoCipherMode mode) { switch (alg) { - case QCRYPTO_CIPHER_ALG_DES: - case QCRYPTO_CIPHER_ALG_3DES: - case QCRYPTO_CIPHER_ALG_AES_128: - case QCRYPTO_CIPHER_ALG_AES_192: - case QCRYPTO_CIPHER_ALG_AES_256: - case QCRYPTO_CIPHER_ALG_CAST5_128: - case QCRYPTO_CIPHER_ALG_SERPENT_128: - case QCRYPTO_CIPHER_ALG_SERPENT_192: - case QCRYPTO_CIPHER_ALG_SERPENT_256: - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - case QCRYPTO_CIPHER_ALG_TWOFISH_192: - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_DES: + case QCRYPTO_CIPHER_ALGO_3DES: + case QCRYPTO_CIPHER_ALGO_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_256: + case QCRYPTO_CIPHER_ALGO_CAST5_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_192: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: #ifdef CONFIG_CRYPTO_SM4 - case QCRYPTO_CIPHER_ALG_SM4: + case QCRYPTO_CIPHER_ALGO_SM4: #endif break; default: @@ -489,7 +489,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, } } -static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, +static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, @@ -510,7 +510,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, } switch (alg) { - case QCRYPTO_CIPHER_ALG_DES: + case QCRYPTO_CIPHER_ALGO_DES: { QCryptoNettleDES *ctx; const QCryptoCipherDriver *drv; @@ -525,8 +525,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_MODE_CTR: drv = &qcrypto_nettle_des_driver_ctr; break; - default: + case QCRYPTO_CIPHER_MODE_XTS: goto bad_cipher_mode; + default: + g_assert_not_reached(); } ctx = g_new0(QCryptoNettleDES, 1); @@ -536,7 +538,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_3DES: + case QCRYPTO_CIPHER_ALGO_3DES: { QCryptoNettleDES3 *ctx; const QCryptoCipherDriver *drv; @@ -551,8 +553,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_MODE_CTR: drv = &qcrypto_nettle_des3_driver_ctr; break; - default: + case QCRYPTO_CIPHER_MODE_XTS: goto bad_cipher_mode; + default: + g_assert_not_reached(); } ctx = g_new0(QCryptoNettleDES3, 1); @@ -561,7 +565,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_AES_128: + case QCRYPTO_CIPHER_ALGO_AES_128: { QCryptoNettleAES128 *ctx = g_new0(QCryptoNettleAES128, 1); @@ -590,7 +594,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_AES_192: + case QCRYPTO_CIPHER_ALGO_AES_192: { QCryptoNettleAES192 *ctx = g_new0(QCryptoNettleAES192, 1); @@ -619,7 +623,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_AES_256: + case QCRYPTO_CIPHER_ALGO_AES_256: { QCryptoNettleAES256 *ctx = g_new0(QCryptoNettleAES256, 1); @@ -648,7 +652,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_CAST5_128: + case QCRYPTO_CIPHER_ALGO_CAST5_128: { QCryptoNettleCAST128 *ctx; const QCryptoCipherDriver *drv; @@ -663,8 +667,10 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_MODE_CTR: drv = &qcrypto_nettle_cast128_driver_ctr; break; - default: + case QCRYPTO_CIPHER_MODE_XTS: goto bad_cipher_mode; + default: + g_assert_not_reached(); } ctx = g_new0(QCryptoNettleCAST128, 1); @@ -674,9 +680,9 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_SERPENT_128: - case QCRYPTO_CIPHER_ALG_SERPENT_192: - case QCRYPTO_CIPHER_ALG_SERPENT_256: + case QCRYPTO_CIPHER_ALGO_SERPENT_128: + case QCRYPTO_CIPHER_ALGO_SERPENT_192: + case QCRYPTO_CIPHER_ALGO_SERPENT_256: { QCryptoNettleSerpent *ctx = g_new0(QCryptoNettleSerpent, 1); @@ -703,9 +709,9 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } - case QCRYPTO_CIPHER_ALG_TWOFISH_128: - case QCRYPTO_CIPHER_ALG_TWOFISH_192: - case QCRYPTO_CIPHER_ALG_TWOFISH_256: + case QCRYPTO_CIPHER_ALGO_TWOFISH_128: + case QCRYPTO_CIPHER_ALGO_TWOFISH_192: + case QCRYPTO_CIPHER_ALGO_TWOFISH_256: { QCryptoNettleTwofish *ctx = g_new0(QCryptoNettleTwofish, 1); @@ -732,18 +738,25 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } #ifdef CONFIG_CRYPTO_SM4 - case QCRYPTO_CIPHER_ALG_SM4: + case QCRYPTO_CIPHER_ALGO_SM4: { - QCryptoNettleSm4 *ctx = g_new0(QCryptoNettleSm4, 1); + QCryptoNettleSm4 *ctx; + const QCryptoCipherDriver *drv; switch (mode) { case QCRYPTO_CIPHER_MODE_ECB: - ctx->base.driver = &qcrypto_nettle_sm4_driver_ecb; + drv = &qcrypto_nettle_sm4_driver_ecb; break; - default: + case QCRYPTO_CIPHER_MODE_CBC: + case QCRYPTO_CIPHER_MODE_CTR: + case QCRYPTO_CIPHER_MODE_XTS: goto bad_cipher_mode; + default: + g_assert_not_reached(); } + ctx = g_new0(QCryptoNettleSm4, 1); + ctx->base.driver = drv; sm4_set_encrypt_key(&ctx->key[0], key); sm4_set_decrypt_key(&ctx->key[1], key); @@ -753,7 +766,7 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, default: error_setg(errp, "Unsupported cipher algorithm %s", - QCryptoCipherAlgorithm_str(alg)); + QCryptoCipherAlgo_str(alg)); return NULL; } diff --git a/crypto/cipher-stub.c.inc b/crypto/cipher-stub.c.inc new file mode 100644 index 00000000000..1b7ea81eac4 --- /dev/null +++ b/crypto/cipher-stub.c.inc @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * QEMU Crypto cipher impl stub + * + * Copyright (c) 2025 Red Hat, Inc. + * + */ + +bool qcrypto_cipher_supports(QCryptoCipherAlgo alg, + QCryptoCipherMode mode) +{ + return false; +} + +static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgo alg, + QCryptoCipherMode mode, + const uint8_t *key, + size_t nkey, + Error **errp) +{ + if (!qcrypto_cipher_validate_key_length(alg, mode, nkey, errp)) { + return NULL; + } + + error_setg(errp, + "Unsupported cipher algorithm %s, no crypto library enabled in build", + QCryptoCipherAlgo_str(alg)); + return NULL; +} diff --git a/crypto/cipher.c b/crypto/cipher.c index 5f512768ea3..229710f76b4 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -25,39 +25,39 @@ #include "cipherpriv.h" -static const size_t alg_key_len[QCRYPTO_CIPHER_ALG__MAX] = { - [QCRYPTO_CIPHER_ALG_AES_128] = 16, - [QCRYPTO_CIPHER_ALG_AES_192] = 24, - [QCRYPTO_CIPHER_ALG_AES_256] = 32, - [QCRYPTO_CIPHER_ALG_DES] = 8, - [QCRYPTO_CIPHER_ALG_3DES] = 24, - [QCRYPTO_CIPHER_ALG_CAST5_128] = 16, - [QCRYPTO_CIPHER_ALG_SERPENT_128] = 16, - [QCRYPTO_CIPHER_ALG_SERPENT_192] = 24, - [QCRYPTO_CIPHER_ALG_SERPENT_256] = 32, - [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, - [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 24, - [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 32, +static const size_t alg_key_len[QCRYPTO_CIPHER_ALGO__MAX] = { + [QCRYPTO_CIPHER_ALGO_AES_128] = 16, + [QCRYPTO_CIPHER_ALGO_AES_192] = 24, + [QCRYPTO_CIPHER_ALGO_AES_256] = 32, + [QCRYPTO_CIPHER_ALGO_DES] = 8, + [QCRYPTO_CIPHER_ALGO_3DES] = 24, + [QCRYPTO_CIPHER_ALGO_CAST5_128] = 16, + [QCRYPTO_CIPHER_ALGO_SERPENT_128] = 16, + [QCRYPTO_CIPHER_ALGO_SERPENT_192] = 24, + [QCRYPTO_CIPHER_ALGO_SERPENT_256] = 32, + [QCRYPTO_CIPHER_ALGO_TWOFISH_128] = 16, + [QCRYPTO_CIPHER_ALGO_TWOFISH_192] = 24, + [QCRYPTO_CIPHER_ALGO_TWOFISH_256] = 32, #ifdef CONFIG_CRYPTO_SM4 - [QCRYPTO_CIPHER_ALG_SM4] = 16, + [QCRYPTO_CIPHER_ALGO_SM4] = 16, #endif }; -static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = { - [QCRYPTO_CIPHER_ALG_AES_128] = 16, - [QCRYPTO_CIPHER_ALG_AES_192] = 16, - [QCRYPTO_CIPHER_ALG_AES_256] = 16, - [QCRYPTO_CIPHER_ALG_DES] = 8, - [QCRYPTO_CIPHER_ALG_3DES] = 8, - [QCRYPTO_CIPHER_ALG_CAST5_128] = 8, - [QCRYPTO_CIPHER_ALG_SERPENT_128] = 16, - [QCRYPTO_CIPHER_ALG_SERPENT_192] = 16, - [QCRYPTO_CIPHER_ALG_SERPENT_256] = 16, - [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, - [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 16, - [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 16, +static const size_t alg_block_len[QCRYPTO_CIPHER_ALGO__MAX] = { + [QCRYPTO_CIPHER_ALGO_AES_128] = 16, + [QCRYPTO_CIPHER_ALGO_AES_192] = 16, + [QCRYPTO_CIPHER_ALGO_AES_256] = 16, + [QCRYPTO_CIPHER_ALGO_DES] = 8, + [QCRYPTO_CIPHER_ALGO_3DES] = 8, + [QCRYPTO_CIPHER_ALGO_CAST5_128] = 8, + [QCRYPTO_CIPHER_ALGO_SERPENT_128] = 16, + [QCRYPTO_CIPHER_ALGO_SERPENT_192] = 16, + [QCRYPTO_CIPHER_ALGO_SERPENT_256] = 16, + [QCRYPTO_CIPHER_ALGO_TWOFISH_128] = 16, + [QCRYPTO_CIPHER_ALGO_TWOFISH_192] = 16, + [QCRYPTO_CIPHER_ALGO_TWOFISH_256] = 16, #ifdef CONFIG_CRYPTO_SM4 - [QCRYPTO_CIPHER_ALG_SM4] = 16, + [QCRYPTO_CIPHER_ALGO_SM4] = 16, #endif }; @@ -69,21 +69,21 @@ static const bool mode_need_iv[QCRYPTO_CIPHER_MODE__MAX] = { }; -size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg) +size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgo alg) { assert(alg < G_N_ELEMENTS(alg_key_len)); return alg_block_len[alg]; } -size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg) +size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgo alg) { assert(alg < G_N_ELEMENTS(alg_key_len)); return alg_key_len[alg]; } -size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg, +size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgo alg, QCryptoCipherMode mode) { if (alg >= G_N_ELEMENTS(alg_block_len)) { @@ -101,20 +101,20 @@ size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg, static bool -qcrypto_cipher_validate_key_length(QCryptoCipherAlgorithm alg, +qcrypto_cipher_validate_key_length(QCryptoCipherAlgo alg, QCryptoCipherMode mode, size_t nkey, Error **errp) { - if ((unsigned)alg >= QCRYPTO_CIPHER_ALG__MAX) { + if ((unsigned)alg >= QCRYPTO_CIPHER_ALGO__MAX) { error_setg(errp, "Cipher algorithm %d out of range", alg); return false; } if (mode == QCRYPTO_CIPHER_MODE_XTS) { - if (alg == QCRYPTO_CIPHER_ALG_DES || - alg == QCRYPTO_CIPHER_ALG_3DES) { + if (alg == QCRYPTO_CIPHER_ALGO_DES || + alg == QCRYPTO_CIPHER_ALGO_3DES) { error_setg(errp, "XTS mode not compatible with DES/3DES"); return false; } @@ -145,10 +145,10 @@ qcrypto_cipher_validate_key_length(QCryptoCipherAlgorithm alg, #elif defined CONFIG_GNUTLS_CRYPTO #include "cipher-gnutls.c.inc" #else -#include "cipher-builtin.c.inc" +#include "cipher-stub.c.inc" #endif -QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg, +QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp) diff --git a/crypto/cipherpriv.h b/crypto/cipherpriv.h index 396527857d6..64737ce9615 100644 --- a/crypto/cipherpriv.h +++ b/crypto/cipherpriv.h @@ -42,7 +42,7 @@ struct QCryptoCipherDriver { #include "afalgpriv.h" extern QCryptoCipher * -qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgorithm alg, +qcrypto_afalg_cipher_ctx_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp); diff --git a/crypto/der.c b/crypto/der.c index ebbecfc3fe1..81367524c3a 100644 --- a/crypto/der.c +++ b/crypto/der.c @@ -408,19 +408,6 @@ void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx, qcrypto_der_encode_prim(ctx, tag, src, src_len); } -void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx) -{ - uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV, - QCRYPTO_DER_TAG_ENC_PRIM, - QCRYPTO_DER_TYPE_TAG_OCT_STR); - qcrypto_der_encode_cons_begin(ctx, tag); -} - -void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx) -{ - qcrypto_der_encode_cons_end(ctx); -} - size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx) { return ctx->root.dlen; diff --git a/crypto/der.h b/crypto/der.h index f4ba6da28a0..bcfa4a24952 100644 --- a/crypto/der.h +++ b/crypto/der.h @@ -242,28 +242,6 @@ void qcrypto_der_encode_null(QCryptoEncodeContext *ctx); void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx, const uint8_t *src, size_t src_len); -/** - * qcrypto_der_encode_octet_str_begin: - * @ctx: the encode context. - * - * Start encoding a octet string, All fields between - * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end - * are encoded as an octet string. This is useful when we need to encode a - * encoded SEQUENCE as OCTET STRING. - */ -void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx); - -/** - * qcrypto_der_encode_octet_str_end: - * @ctx: the encode context. - * - * Finish encoding a octet string, All fields between - * qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end - * are encoded as an octet string. This is useful when we need to encode a - * encoded SEQUENCE as OCTET STRING. - */ -void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx); - /** * qcrypto_der_encode_ctx_buffer_len: * @ctx: the encode context. diff --git a/crypto/hash-afalg.c b/crypto/hash-afalg.c index 3ebea392920..bd3fe3b4272 100644 --- a/crypto/hash-afalg.c +++ b/crypto/hash-afalg.c @@ -1,6 +1,7 @@ /* * QEMU Crypto af_alg-backend hash/hmac support * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD. * * Authors: @@ -20,7 +21,7 @@ #include "hmacpriv.h" static char * -qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg, +qcrypto_afalg_hash_format_name(QCryptoHashAlgo alg, bool is_hmac, Error **errp) { @@ -28,25 +29,25 @@ qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg, const char *alg_name; switch (alg) { - case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALGO_MD5: alg_name = "md5"; break; - case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALGO_SHA1: alg_name = "sha1"; break; - case QCRYPTO_HASH_ALG_SHA224: + case QCRYPTO_HASH_ALGO_SHA224: alg_name = "sha224"; break; - case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALGO_SHA256: alg_name = "sha256"; break; - case QCRYPTO_HASH_ALG_SHA384: + case QCRYPTO_HASH_ALGO_SHA384: alg_name = "sha384"; break; - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_SHA512: alg_name = "sha512"; break; - case QCRYPTO_HASH_ALG_RIPEMD160: + case QCRYPTO_HASH_ALGO_RIPEMD160: alg_name = "rmd160"; break; @@ -58,18 +59,18 @@ qcrypto_afalg_hash_format_name(QCryptoHashAlgorithm alg, if (is_hmac) { name = g_strdup_printf("hmac(%s)", alg_name); } else { - name = g_strdup_printf("%s", alg_name); + name = g_strdup(alg_name); } return name; } -static QCryptoAFAlg * -qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgorithm alg, +static QCryptoAFAlgo * +qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, bool is_hmac, Error **errp) { - QCryptoAFAlg *afalg; + QCryptoAFAlgo *afalg; char *name; name = qcrypto_afalg_hash_format_name(alg, is_hmac, errp); @@ -98,89 +99,160 @@ qcrypto_afalg_hash_hmac_ctx_new(QCryptoHashAlgorithm alg, return afalg; } -static QCryptoAFAlg * -qcrypto_afalg_hash_ctx_new(QCryptoHashAlgorithm alg, +static QCryptoAFAlgo * +qcrypto_afalg_hash_ctx_new(QCryptoHashAlgo alg, Error **errp) { return qcrypto_afalg_hash_hmac_ctx_new(alg, NULL, 0, false, errp); } -QCryptoAFAlg * -qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgorithm alg, +QCryptoAFAlgo * +qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { return qcrypto_afalg_hash_hmac_ctx_new(alg, key, nkey, true, errp); } -static int -qcrypto_afalg_hash_hmac_bytesv(QCryptoAFAlg *hmac, - QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, uint8_t **result, - size_t *resultlen, - Error **errp) +static +QCryptoHash *qcrypto_afalg_hash_new(QCryptoHashAlgo alg, Error **errp) { - QCryptoAFAlg *afalg; - struct iovec outv; - int ret = 0; - bool is_hmac = (hmac != NULL) ? true : false; - const int expect_len = qcrypto_hash_digest_len(alg); + /* Check if hash algorithm is supported */ + char *alg_name = qcrypto_afalg_hash_format_name(alg, false, NULL); + QCryptoHash *hash; - if (*resultlen == 0) { - *resultlen = expect_len; - *result = g_new0(uint8_t, *resultlen); - } else if (*resultlen != expect_len) { - error_setg(errp, - "Result buffer size %zu is not match hash %d", - *resultlen, expect_len); - return -1; + if (alg_name == NULL) { + error_setg(errp, "Unknown hash algorithm %d", alg); + return NULL; } - if (is_hmac) { - afalg = hmac; - } else { - afalg = qcrypto_afalg_hash_ctx_new(alg, errp); - if (!afalg) { - return -1; - } + g_free(alg_name); + + hash = g_new(QCryptoHash, 1); + hash->alg = alg; + hash->opaque = qcrypto_afalg_hash_ctx_new(alg, errp); + if (!hash->opaque) { + free(hash); + return NULL; } + return hash; +} + +static +void qcrypto_afalg_hash_free(QCryptoHash *hash) +{ + QCryptoAFAlgo *ctx = hash->opaque; + + if (ctx) { + qcrypto_afalg_comm_free(ctx); + } + + g_free(hash); +} + +/** + * Send data to the kernel's crypto core. + * + * The more_data parameter is used to notify the crypto engine + * that this is an "update" operation, and that more data will + * be provided to calculate the final hash. + */ +static +int qcrypto_afalg_send_to_kernel(QCryptoAFAlgo *afalg, + const struct iovec *iov, + size_t niov, + bool more_data, + Error **errp) +{ + int ret = 0; + int flags = (more_data ? MSG_MORE : 0); + /* send data to kernel's crypto core */ - ret = iov_send_recv(afalg->opfd, iov, niov, - 0, iov_size(iov, niov), true); + ret = iov_send_recv_with_flags(afalg->opfd, flags, iov, niov, + 0, iov_size(iov, niov), true); if (ret < 0) { error_setg_errno(errp, errno, "Send data to afalg-core failed"); - goto out; + ret = -1; + } else { + /* No error, so return 0 */ + ret = 0; + } + + return ret; +} + +static +int qcrypto_afalg_recv_from_kernel(QCryptoAFAlgo *afalg, + QCryptoHashAlgo alg, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + struct iovec outv; + int ret; + const int expected_len = qcrypto_hash_digest_len(alg); + + if (*result_len == 0) { + *result_len = expected_len; + *result = g_new0(uint8_t, *result_len); + } else if (*result_len != expected_len) { + error_setg(errp, + "Result buffer size %zu is not match hash %d", + *result_len, expected_len); + return -1; } /* hash && get result */ outv.iov_base = *result; - outv.iov_len = *resultlen; + outv.iov_len = *result_len; ret = iov_send_recv(afalg->opfd, &outv, 1, 0, iov_size(&outv, 1), false); if (ret < 0) { error_setg_errno(errp, errno, "Recv result from afalg-core failed"); - } else { - ret = 0; + return -1; } -out: - if (!is_hmac) { - qcrypto_afalg_comm_free(afalg); - } - return ret; + return 0; +} + +static +int qcrypto_afalg_hash_update(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + return qcrypto_afalg_send_to_kernel((QCryptoAFAlgo *) hash->opaque, + iov, niov, true, errp); +} + +static +int qcrypto_afalg_hash_finalize(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + return qcrypto_afalg_recv_from_kernel((QCryptoAFAlgo *) hash->opaque, + hash->alg, result, result_len, errp); } static int -qcrypto_afalg_hash_bytesv(QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, uint8_t **result, - size_t *resultlen, - Error **errp) +qcrypto_afalg_hash_hmac_bytesv(QCryptoAFAlgo *hmac, + QCryptoHashAlgo alg, + const struct iovec *iov, + size_t niov, uint8_t **result, + size_t *resultlen, + Error **errp) { - return qcrypto_afalg_hash_hmac_bytesv(NULL, alg, iov, niov, result, - resultlen, errp); + int ret = 0; + + ret = qcrypto_afalg_send_to_kernel(hmac, iov, niov, false, errp); + if (ret == 0) { + ret = qcrypto_afalg_recv_from_kernel(hmac, alg, result, + resultlen, errp); + } + + return ret; } static int @@ -197,14 +269,17 @@ qcrypto_afalg_hmac_bytesv(QCryptoHmac *hmac, static void qcrypto_afalg_hmac_ctx_free(QCryptoHmac *hmac) { - QCryptoAFAlg *afalg; + QCryptoAFAlgo *afalg; afalg = hmac->opaque; qcrypto_afalg_comm_free(afalg); } QCryptoHashDriver qcrypto_hash_afalg_driver = { - .hash_bytesv = qcrypto_afalg_hash_bytesv, + .hash_new = qcrypto_afalg_hash_new, + .hash_free = qcrypto_afalg_hash_free, + .hash_update = qcrypto_afalg_hash_update, + .hash_finalize = qcrypto_afalg_hash_finalize }; QCryptoHmacDriver qcrypto_hmac_afalg_driver = { diff --git a/crypto/hash-gcrypt.c b/crypto/hash-gcrypt.c index 829e48258d3..af61c4e75da 100644 --- a/crypto/hash-gcrypt.c +++ b/crypto/hash-gcrypt.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -25,92 +26,115 @@ #include "hashpriv.h" -static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GCRY_MD_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GCRY_MD_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GCRY_MD_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GCRY_MD_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GCRY_MD_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GCRY_MD_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MD_RMD160, +static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GCRY_MD_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MD_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MD_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MD_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3, +#endif }; -gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) +gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) && qcrypto_hash_alg_map[alg] != GCRY_MD_NONE) { - return true; + return gcry_md_test_algo(qcrypto_hash_alg_map[alg]) == 0; } return false; } - -static int -qcrypto_gcrypt_hash_bytesv(QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, - uint8_t **result, - size_t *resultlen, - Error **errp) +static +QCryptoHash *qcrypto_gcrypt_hash_new(QCryptoHashAlgo alg, Error **errp) { - int i, ret; - gcry_md_hd_t md; - unsigned char *digest; - - if (!qcrypto_hash_supports(alg)) { - error_setg(errp, - "Unknown hash algorithm %d", - alg); - return -1; - } + QCryptoHash *hash; + gcry_error_t ret; - ret = gcry_md_open(&md, qcrypto_hash_alg_map[alg], 0); + hash = g_new(QCryptoHash, 1); + hash->alg = alg; + hash->opaque = g_new(gcry_md_hd_t, 1); - if (ret < 0) { + ret = gcry_md_open((gcry_md_hd_t *) hash->opaque, + qcrypto_hash_alg_map[alg], 0); + if (ret != 0) { error_setg(errp, "Unable to initialize hash algorithm: %s", gcry_strerror(ret)); - return -1; + g_free(hash->opaque); + g_free(hash); + return NULL; } + return hash; +} - for (i = 0; i < niov; i++) { - gcry_md_write(md, iov[i].iov_base, iov[i].iov_len); +static +void qcrypto_gcrypt_hash_free(QCryptoHash *hash) +{ + gcry_md_hd_t *ctx = hash->opaque; + + if (ctx) { + gcry_md_close(*ctx); + g_free(ctx); } - ret = gcry_md_get_algo_dlen(qcrypto_hash_alg_map[alg]); - if (ret <= 0) { - error_setg(errp, - "Unable to get hash length: %s", - gcry_strerror(ret)); - goto error; + g_free(hash); +} + + +static +int qcrypto_gcrypt_hash_update(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + gcry_md_hd_t *ctx = hash->opaque; + + for (int i = 0; i < niov; i++) { + gcry_md_write(*ctx, iov[i].iov_base, iov[i].iov_len); } - if (*resultlen == 0) { - *resultlen = ret; - *result = g_new0(uint8_t, *resultlen); - } else if (*resultlen != ret) { - error_setg(errp, - "Result buffer size %zu is smaller than hash %d", - *resultlen, ret); - goto error; + + return 0; +} + +static +int qcrypto_gcrypt_hash_finalize(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + int ret; + unsigned char *digest; + gcry_md_hd_t *ctx = hash->opaque; + + ret = gcry_md_get_algo_dlen(qcrypto_hash_alg_map[hash->alg]); + if (ret == 0) { + error_setg(errp, "Unable to get hash length"); + return -1; } - digest = gcry_md_read(md, 0); - if (!digest) { + if (*result_len == 0) { + *result_len = ret; + *result = g_new(uint8_t, *result_len); + } else if (*result_len != ret) { error_setg(errp, - "No digest produced"); - goto error; + "Result buffer size %zu is smaller than hash %d", + *result_len, ret); + return -1; } - memcpy(*result, digest, *resultlen); - gcry_md_close(md); + /* Digest is freed by gcry_md_close(), copy it */ + digest = gcry_md_read(*ctx, 0); + memcpy(*result, digest, *result_len); return 0; - - error: - gcry_md_close(md); - return -1; } - QCryptoHashDriver qcrypto_hash_lib_driver = { - .hash_bytesv = qcrypto_gcrypt_hash_bytesv, + .hash_new = qcrypto_gcrypt_hash_new, + .hash_update = qcrypto_gcrypt_hash_update, + .hash_finalize = qcrypto_gcrypt_hash_finalize, + .hash_free = qcrypto_gcrypt_hash_free, }; diff --git a/crypto/hash-glib.c b/crypto/hash-glib.c index 82de9db705c..809cef98aee 100644 --- a/crypto/hash-glib.c +++ b/crypto/hash-glib.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -24,17 +25,17 @@ #include "hashpriv.h" -static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = G_CHECKSUM_MD5, - [QCRYPTO_HASH_ALG_SHA1] = G_CHECKSUM_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = -1, - [QCRYPTO_HASH_ALG_SHA256] = G_CHECKSUM_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = -1, - [QCRYPTO_HASH_ALG_SHA512] = G_CHECKSUM_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = -1, +static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = G_CHECKSUM_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = G_CHECKSUM_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = -1, + [QCRYPTO_HASH_ALGO_SHA256] = G_CHECKSUM_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = G_CHECKSUM_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = G_CHECKSUM_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = -1, }; -gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) +gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) && qcrypto_hash_alg_map[alg] != -1) { @@ -43,58 +44,78 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) return false; } - -static int -qcrypto_glib_hash_bytesv(QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, - uint8_t **result, - size_t *resultlen, - Error **errp) +static +QCryptoHash *qcrypto_glib_hash_new(QCryptoHashAlgo alg, + Error **errp) { - int i, ret; - GChecksum *cs; + QCryptoHash *hash; - if (!qcrypto_hash_supports(alg)) { - error_setg(errp, - "Unknown hash algorithm %d", - alg); - return -1; + hash = g_new(QCryptoHash, 1); + hash->alg = alg; + hash->opaque = g_checksum_new(qcrypto_hash_alg_map[alg]); + + return hash; +} + +static +void qcrypto_glib_hash_free(QCryptoHash *hash) +{ + if (hash->opaque) { + g_checksum_free(hash->opaque); } - cs = g_checksum_new(qcrypto_hash_alg_map[alg]); + g_free(hash); +} + + +static +int qcrypto_glib_hash_update(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + GChecksum *ctx = hash->opaque; - for (i = 0; i < niov; i++) { - g_checksum_update(cs, iov[i].iov_base, iov[i].iov_len); + for (int i = 0; i < niov; i++) { + g_checksum_update(ctx, iov[i].iov_base, iov[i].iov_len); } - ret = g_checksum_type_get_length(qcrypto_hash_alg_map[alg]); + return 0; +} + +static +int qcrypto_glib_hash_finalize(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + int ret; + GChecksum *ctx = hash->opaque; + + ret = g_checksum_type_get_length(qcrypto_hash_alg_map[hash->alg]); if (ret < 0) { - error_setg(errp, "%s", - "Unable to get hash length"); - goto error; + error_setg(errp, "Unable to get hash length"); + *result_len = 0; + return -1; } - if (*resultlen == 0) { - *resultlen = ret; - *result = g_new0(uint8_t, *resultlen); - } else if (*resultlen != ret) { + + if (*result_len == 0) { + *result_len = ret; + *result = g_new(uint8_t, *result_len); + } else if (*result_len != ret) { error_setg(errp, "Result buffer size %zu is smaller than hash %d", - *resultlen, ret); - goto error; + *result_len, ret); + return -1; } - g_checksum_get_digest(cs, *result, resultlen); - - g_checksum_free(cs); + g_checksum_get_digest(ctx, *result, result_len); return 0; - - error: - g_checksum_free(cs); - return -1; } - QCryptoHashDriver qcrypto_hash_lib_driver = { - .hash_bytesv = qcrypto_glib_hash_bytesv, + .hash_new = qcrypto_glib_hash_new, + .hash_update = qcrypto_glib_hash_update, + .hash_finalize = qcrypto_glib_hash_finalize, + .hash_free = qcrypto_glib_hash_free, }; diff --git a/crypto/hash-gnutls.c b/crypto/hash-gnutls.c index 17911ac5d1c..99fbe824ead 100644 --- a/crypto/hash-gnutls.c +++ b/crypto/hash-gnutls.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2021 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -25,17 +26,17 @@ #include "hashpriv.h" -static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GNUTLS_DIG_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_DIG_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_DIG_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_DIG_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_DIG_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_DIG_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_DIG_RMD160, +static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160, }; -gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) +gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) { size_t i; const gnutls_digest_algorithm_t *algs; @@ -52,53 +53,93 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) return false; } - -static int -qcrypto_gnutls_hash_bytesv(QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, - uint8_t **result, - size_t *resultlen, - Error **errp) +static +QCryptoHash *qcrypto_gnutls_hash_new(QCryptoHashAlgo alg, Error **errp) { - int i, ret; - gnutls_hash_hd_t hash; - - if (!qcrypto_hash_supports(alg)) { - error_setg(errp, - "Unknown hash algorithm %d", - alg); - return -1; - } + QCryptoHash *hash; + int ret; - ret = gnutls_hash_get_len(qcrypto_hash_alg_map[alg]); - if (*resultlen == 0) { - *resultlen = ret; - *result = g_new0(uint8_t, *resultlen); - } else if (*resultlen != ret) { - error_setg(errp, - "Result buffer size %zu is smaller than hash %d", - *resultlen, ret); - return -1; - } + hash = g_new(QCryptoHash, 1); + hash->alg = alg; + hash->opaque = g_new(gnutls_hash_hd_t, 1); - ret = gnutls_hash_init(&hash, qcrypto_hash_alg_map[alg]); + ret = gnutls_hash_init(hash->opaque, qcrypto_hash_alg_map[alg]); if (ret < 0) { error_setg(errp, "Unable to initialize hash algorithm: %s", gnutls_strerror(ret)); - return -1; + g_free(hash->opaque); + g_free(hash); + return NULL; } - for (i = 0; i < niov; i++) { - gnutls_hash(hash, iov[i].iov_base, iov[i].iov_len); + return hash; +} + +static +void qcrypto_gnutls_hash_free(QCryptoHash *hash) +{ + gnutls_hash_hd_t *ctx = hash->opaque; + + gnutls_hash_deinit(*ctx, NULL); + g_free(ctx); + g_free(hash); +} + + +static +int qcrypto_gnutls_hash_update(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + int ret = 0; + gnutls_hash_hd_t *ctx = hash->opaque; + + for (int i = 0; i < niov; i++) { + ret = gnutls_hash(*ctx, iov[i].iov_base, iov[i].iov_len); + if (ret != 0) { + error_setg(errp, "Failed to hash data: %s", + gnutls_strerror(ret)); + return -1; + } } - gnutls_hash_deinit(hash, *result); return 0; } +static +int qcrypto_gnutls_hash_finalize(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + gnutls_hash_hd_t *ctx = hash->opaque; + int ret; + + ret = gnutls_hash_get_len(qcrypto_hash_alg_map[hash->alg]); + if (ret == 0) { + error_setg(errp, "Unable to get hash length"); + return -1; + } + + if (*result_len == 0) { + *result_len = ret; + *result = g_new(uint8_t, *result_len); + } else if (*result_len != ret) { + error_setg(errp, + "Result buffer size %zu is smaller than hash %d", + *result_len, ret); + return -1; + } + + gnutls_hash_output(*ctx, *result); + return 0; +} QCryptoHashDriver qcrypto_hash_lib_driver = { - .hash_bytesv = qcrypto_gnutls_hash_bytesv, + .hash_new = qcrypto_gnutls_hash_new, + .hash_update = qcrypto_gnutls_hash_update, + .hash_finalize = qcrypto_gnutls_hash_finalize, + .hash_free = qcrypto_gnutls_hash_free, }; diff --git a/crypto/hash-nettle.c b/crypto/hash-nettle.c index 1ca1a410628..53f68301efb 100644 --- a/crypto/hash-nettle.c +++ b/crypto/hash-nettle.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -25,6 +26,9 @@ #include #include #include +#ifdef CONFIG_CRYPTO_SM3 +#include +#endif typedef void (*qcrypto_nettle_init)(void *ctx); typedef void (*qcrypto_nettle_write)(void *ctx, @@ -42,6 +46,9 @@ union qcrypto_hash_ctx { struct sha384_ctx sha384; struct sha512_ctx sha512; struct ripemd160_ctx ripemd160; +#ifdef CONFIG_CRYPTO_SM3 + struct sm3_ctx sm3; +#endif }; struct qcrypto_hash_alg { @@ -50,51 +57,59 @@ struct qcrypto_hash_alg { qcrypto_nettle_result result; size_t len; } qcrypto_hash_alg_map[] = { - [QCRYPTO_HASH_ALG_MD5] = { + [QCRYPTO_HASH_ALGO_MD5] = { .init = (qcrypto_nettle_init)md5_init, .write = (qcrypto_nettle_write)md5_update, .result = (qcrypto_nettle_result)md5_digest, .len = MD5_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA1] = { + [QCRYPTO_HASH_ALGO_SHA1] = { .init = (qcrypto_nettle_init)sha1_init, .write = (qcrypto_nettle_write)sha1_update, .result = (qcrypto_nettle_result)sha1_digest, .len = SHA1_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA224] = { + [QCRYPTO_HASH_ALGO_SHA224] = { .init = (qcrypto_nettle_init)sha224_init, .write = (qcrypto_nettle_write)sha224_update, .result = (qcrypto_nettle_result)sha224_digest, .len = SHA224_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA256] = { + [QCRYPTO_HASH_ALGO_SHA256] = { .init = (qcrypto_nettle_init)sha256_init, .write = (qcrypto_nettle_write)sha256_update, .result = (qcrypto_nettle_result)sha256_digest, .len = SHA256_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA384] = { + [QCRYPTO_HASH_ALGO_SHA384] = { .init = (qcrypto_nettle_init)sha384_init, .write = (qcrypto_nettle_write)sha384_update, .result = (qcrypto_nettle_result)sha384_digest, .len = SHA384_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA512] = { + [QCRYPTO_HASH_ALGO_SHA512] = { .init = (qcrypto_nettle_init)sha512_init, .write = (qcrypto_nettle_write)sha512_update, .result = (qcrypto_nettle_result)sha512_digest, .len = SHA512_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_RIPEMD160] = { + [QCRYPTO_HASH_ALGO_RIPEMD160] = { .init = (qcrypto_nettle_init)ripemd160_init, .write = (qcrypto_nettle_write)ripemd160_update, .result = (qcrypto_nettle_result)ripemd160_digest, .len = RIPEMD160_DIGEST_SIZE, }, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = { + .init = (qcrypto_nettle_init)sm3_init, + .write = (qcrypto_nettle_write)sm3_update, + .result = (qcrypto_nettle_result)sm3_digest, + .len = SM3_DIGEST_SIZE, + }, +#endif }; -gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) +gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) && qcrypto_hash_alg_map[alg].init != NULL) { @@ -103,59 +118,72 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg) return false; } +static +QCryptoHash *qcrypto_nettle_hash_new(QCryptoHashAlgo alg, Error **errp) +{ + QCryptoHash *hash; + + hash = g_new(QCryptoHash, 1); + hash->alg = alg; + hash->opaque = g_new(union qcrypto_hash_ctx, 1); + + qcrypto_hash_alg_map[alg].init(hash->opaque); + return hash; +} -static int -qcrypto_nettle_hash_bytesv(QCryptoHashAlgorithm alg, - const struct iovec *iov, - size_t niov, - uint8_t **result, - size_t *resultlen, - Error **errp) +static +void qcrypto_nettle_hash_free(QCryptoHash *hash) { - size_t i; - union qcrypto_hash_ctx ctx; + union qcrypto_hash_ctx *ctx = hash->opaque; - if (!qcrypto_hash_supports(alg)) { - error_setg(errp, - "Unknown hash algorithm %d", - alg); - return -1; - } + g_free(ctx); + g_free(hash); +} - qcrypto_hash_alg_map[alg].init(&ctx); - - for (i = 0; i < niov; i++) { - /* Some versions of nettle have functions - * declared with 'int' instead of 'size_t' - * so to be safe avoid writing more than - * UINT_MAX bytes at a time - */ - size_t len = iov[i].iov_len; - uint8_t *base = iov[i].iov_base; - while (len) { - size_t shortlen = MIN(len, UINT_MAX); - qcrypto_hash_alg_map[alg].write(&ctx, len, base); - len -= shortlen; - base += len; - } +static +int qcrypto_nettle_hash_update(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + union qcrypto_hash_ctx *ctx = hash->opaque; + + for (int i = 0; i < niov; i++) { + qcrypto_hash_alg_map[hash->alg].write(ctx, + iov[i].iov_len, + iov[i].iov_base); } - if (*resultlen == 0) { - *resultlen = qcrypto_hash_alg_map[alg].len; - *result = g_new0(uint8_t, *resultlen); - } else if (*resultlen != qcrypto_hash_alg_map[alg].len) { + return 0; +} + +static +int qcrypto_nettle_hash_finalize(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + union qcrypto_hash_ctx *ctx = hash->opaque; + int ret = qcrypto_hash_alg_map[hash->alg].len; + + if (*result_len == 0) { + *result_len = ret; + *result = g_new(uint8_t, *result_len); + } else if (*result_len != ret) { error_setg(errp, - "Result buffer size %zu is smaller than hash %zu", - *resultlen, qcrypto_hash_alg_map[alg].len); + "Result buffer size %zu is smaller than hash %d", + *result_len, ret); return -1; } - qcrypto_hash_alg_map[alg].result(&ctx, *resultlen, *result); + qcrypto_hash_alg_map[hash->alg].result(ctx, *result_len, *result); return 0; } - QCryptoHashDriver qcrypto_hash_lib_driver = { - .hash_bytesv = qcrypto_nettle_hash_bytesv, + .hash_new = qcrypto_nettle_hash_new, + .hash_update = qcrypto_nettle_hash_update, + .hash_finalize = qcrypto_nettle_hash_finalize, + .hash_free = qcrypto_nettle_hash_free, }; diff --git a/crypto/hash.c b/crypto/hash.c index b0f8228bdcb..7513769e42d 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -19,53 +20,53 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi-types-crypto.h" #include "crypto/hash.h" #include "hashpriv.h" -static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = 16, - [QCRYPTO_HASH_ALG_SHA1] = 20, - [QCRYPTO_HASH_ALG_SHA224] = 28, - [QCRYPTO_HASH_ALG_SHA256] = 32, - [QCRYPTO_HASH_ALG_SHA384] = 48, - [QCRYPTO_HASH_ALG_SHA512] = 64, - [QCRYPTO_HASH_ALG_RIPEMD160] = 20, +static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = QCRYPTO_HASH_DIGEST_LEN_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = QCRYPTO_HASH_DIGEST_LEN_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = QCRYPTO_HASH_DIGEST_LEN_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = QCRYPTO_HASH_DIGEST_LEN_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = QCRYPTO_HASH_DIGEST_LEN_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = QCRYPTO_HASH_DIGEST_LEN_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = QCRYPTO_HASH_DIGEST_LEN_RIPEMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = QCRYPTO_HASH_DIGEST_LEN_SM3, +#endif }; -size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg) +size_t qcrypto_hash_digest_len(QCryptoHashAlgo alg) { assert(alg < G_N_ELEMENTS(qcrypto_hash_alg_size)); return qcrypto_hash_alg_size[alg]; } -int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg, +int qcrypto_hash_bytesv(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, uint8_t **result, size_t *resultlen, Error **errp) { -#ifdef CONFIG_AF_ALG - int ret; - /* - * TODO: - * Maybe we should treat some afalg errors as fatal - */ - ret = qcrypto_hash_afalg_driver.hash_bytesv(alg, iov, niov, - result, resultlen, - NULL); - if (ret == 0) { - return ret; + g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp); + + if (!ctx) { + return -1; + } + + if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 || + qcrypto_hash_finalize_bytes(ctx, result, resultlen, errp) < 0) { + return -1; } -#endif - return qcrypto_hash_lib_driver.hash_bytesv(alg, iov, niov, - result, resultlen, - errp); + return 0; } -int qcrypto_hash_bytes(QCryptoHashAlgorithm alg, +int qcrypto_hash_bytes(QCryptoHashAlgo alg, const char *buf, size_t len, uint8_t **result, @@ -77,33 +78,134 @@ int qcrypto_hash_bytes(QCryptoHashAlgorithm alg, return qcrypto_hash_bytesv(alg, &iov, 1, result, resultlen, errp); } +int qcrypto_hash_updatev(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp) +{ + QCryptoHashDriver *drv = hash->driver; + + return drv->hash_update(hash, iov, niov, errp); +} + +int qcrypto_hash_update(QCryptoHash *hash, + const char *buf, + size_t len, + Error **errp) +{ + struct iovec iov = { .iov_base = (char *)buf, .iov_len = len }; + + return qcrypto_hash_updatev(hash, &iov, 1, errp); +} + +QCryptoHash *qcrypto_hash_new(QCryptoHashAlgo alg, Error **errp) +{ + QCryptoHash *hash = NULL; + + if (!qcrypto_hash_supports(alg)) { + error_setg(errp, "Unsupported hash algorithm %s", + QCryptoHashAlgo_str(alg)); + return NULL; + } + +#ifdef CONFIG_AF_ALG + hash = qcrypto_hash_afalg_driver.hash_new(alg, NULL); + if (hash) { + hash->driver = &qcrypto_hash_afalg_driver; + return hash; + } +#endif + + hash = qcrypto_hash_lib_driver.hash_new(alg, errp); + if (!hash) { + return NULL; + } + + hash->driver = &qcrypto_hash_lib_driver; + return hash; +} + +void qcrypto_hash_free(QCryptoHash *hash) +{ + QCryptoHashDriver *drv; + + if (hash) { + drv = hash->driver; + drv->hash_free(hash); + } +} + +int qcrypto_hash_finalize_bytes(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp) +{ + QCryptoHashDriver *drv = hash->driver; + + return drv->hash_finalize(hash, result, result_len, errp); +} + static const char hex[] = "0123456789abcdef"; -int qcrypto_hash_digestv(QCryptoHashAlgorithm alg, +int qcrypto_hash_finalize_digest(QCryptoHash *hash, + char **digest, + Error **errp) +{ + int ret; + g_autofree uint8_t *result = NULL; + size_t resultlen = 0; + size_t i; + + ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen, errp); + if (ret == 0) { + *digest = g_new0(char, (resultlen * 2) + 1); + for (i = 0 ; i < resultlen ; i++) { + (*digest)[(i * 2)] = hex[(result[i] >> 4) & 0xf]; + (*digest)[(i * 2) + 1] = hex[result[i] & 0xf]; + } + (*digest)[resultlen * 2] = '\0'; + } + + return ret; +} + +int qcrypto_hash_finalize_base64(QCryptoHash *hash, + char **base64, + Error **errp) +{ + int ret; + g_autofree uint8_t *result = NULL; + size_t resultlen = 0; + + ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen, errp); + if (ret == 0) { + *base64 = g_base64_encode(result, resultlen); + } + + return ret; +} + +int qcrypto_hash_digestv(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, char **digest, Error **errp) { - uint8_t *result = NULL; - size_t resultlen = 0; - size_t i; + g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp); - if (qcrypto_hash_bytesv(alg, iov, niov, &result, &resultlen, errp) < 0) { + if (!ctx) { return -1; } - *digest = g_new0(char, (resultlen * 2) + 1); - for (i = 0 ; i < resultlen ; i++) { - (*digest)[(i * 2)] = hex[(result[i] >> 4) & 0xf]; - (*digest)[(i * 2) + 1] = hex[result[i] & 0xf]; + if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 || + qcrypto_hash_finalize_digest(ctx, digest, errp) < 0) { + return -1; } - (*digest)[resultlen * 2] = '\0'; - g_free(result); + return 0; } -int qcrypto_hash_digest(QCryptoHashAlgorithm alg, +int qcrypto_hash_digest(QCryptoHashAlgo alg, const char *buf, size_t len, char **digest, @@ -114,25 +216,27 @@ int qcrypto_hash_digest(QCryptoHashAlgorithm alg, return qcrypto_hash_digestv(alg, &iov, 1, digest, errp); } -int qcrypto_hash_base64v(QCryptoHashAlgorithm alg, +int qcrypto_hash_base64v(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, char **base64, Error **errp) { - uint8_t *result = NULL; - size_t resultlen = 0; + g_autoptr(QCryptoHash) ctx = qcrypto_hash_new(alg, errp); + + if (!ctx) { + return -1; + } - if (qcrypto_hash_bytesv(alg, iov, niov, &result, &resultlen, errp) < 0) { + if (qcrypto_hash_updatev(ctx, iov, niov, errp) < 0 || + qcrypto_hash_finalize_base64(ctx, base64, errp) < 0) { return -1; } - *base64 = g_base64_encode(result, resultlen); - g_free(result); return 0; } -int qcrypto_hash_base64(QCryptoHashAlgorithm alg, +int qcrypto_hash_base64(QCryptoHashAlgo alg, const char *buf, size_t len, char **base64, diff --git a/crypto/hashpriv.h b/crypto/hashpriv.h index cee26ccb47e..83b9256886e 100644 --- a/crypto/hashpriv.h +++ b/crypto/hashpriv.h @@ -1,6 +1,7 @@ /* * QEMU Crypto hash driver supports * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD. * * Authors: @@ -15,15 +16,21 @@ #ifndef QCRYPTO_HASHPRIV_H #define QCRYPTO_HASHPRIV_H +#include "crypto/hash.h" + typedef struct QCryptoHashDriver QCryptoHashDriver; struct QCryptoHashDriver { - int (*hash_bytesv)(QCryptoHashAlgorithm alg, + QCryptoHash *(*hash_new)(QCryptoHashAlgo alg, Error **errp); + int (*hash_update)(QCryptoHash *hash, const struct iovec *iov, size_t niov, - uint8_t **result, - size_t *resultlen, Error **errp); + int (*hash_finalize)(QCryptoHash *hash, + uint8_t **result, + size_t *resultlen, + Error **errp); + void (*hash_free)(QCryptoHash *hash); }; extern QCryptoHashDriver qcrypto_hash_lib_driver; diff --git a/crypto/hmac-gcrypt.c b/crypto/hmac-gcrypt.c index 0c6f9797114..5273086eb9a 100644 --- a/crypto/hmac-gcrypt.c +++ b/crypto/hmac-gcrypt.c @@ -18,14 +18,17 @@ #include "hmacpriv.h" #include -static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GCRY_MAC_HMAC_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GCRY_MAC_HMAC_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GCRY_MAC_HMAC_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GCRY_MAC_HMAC_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GCRY_MAC_HMAC_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GCRY_MAC_HMAC_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MAC_HMAC_RMD160, +static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GCRY_MAC_HMAC_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MAC_HMAC_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MAC_HMAC_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MAC_HMAC_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MAC_HMAC_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MAC_HMAC_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MAC_HMAC_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MAC_HMAC_SM3, +#endif }; typedef struct QCryptoHmacGcrypt QCryptoHmacGcrypt; @@ -33,17 +36,17 @@ struct QCryptoHmacGcrypt { gcry_mac_hd_t handle; }; -bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) +bool qcrypto_hmac_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) && qcrypto_hmac_alg_map[alg] != GCRY_MAC_NONE) { - return true; + return gcry_mac_test_algo(qcrypto_hmac_alg_map[alg]) == 0; } return false; } -void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, +void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { @@ -52,7 +55,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, if (!qcrypto_hmac_supports(alg)) { error_setg(errp, "Unsupported hmac algorithm %s", - QCryptoHashAlgorithm_str(alg)); + QCryptoHashAlgo_str(alg)); return NULL; } diff --git a/crypto/hmac-glib.c b/crypto/hmac-glib.c index 509bbc74c2c..ea80c8d1b23 100644 --- a/crypto/hmac-glib.c +++ b/crypto/hmac-glib.c @@ -17,14 +17,14 @@ #include "crypto/hmac.h" #include "hmacpriv.h" -static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = G_CHECKSUM_MD5, - [QCRYPTO_HASH_ALG_SHA1] = G_CHECKSUM_SHA1, - [QCRYPTO_HASH_ALG_SHA256] = G_CHECKSUM_SHA256, - [QCRYPTO_HASH_ALG_SHA512] = G_CHECKSUM_SHA512, - [QCRYPTO_HASH_ALG_SHA224] = -1, - [QCRYPTO_HASH_ALG_SHA384] = -1, - [QCRYPTO_HASH_ALG_RIPEMD160] = -1, +static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = G_CHECKSUM_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = G_CHECKSUM_SHA1, + [QCRYPTO_HASH_ALGO_SHA256] = G_CHECKSUM_SHA256, + [QCRYPTO_HASH_ALGO_SHA512] = G_CHECKSUM_SHA512, + [QCRYPTO_HASH_ALGO_SHA224] = -1, + [QCRYPTO_HASH_ALGO_SHA384] = -1, + [QCRYPTO_HASH_ALGO_RIPEMD160] = -1, }; typedef struct QCryptoHmacGlib QCryptoHmacGlib; @@ -32,7 +32,7 @@ struct QCryptoHmacGlib { GHmac *ghmac; }; -bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) +bool qcrypto_hmac_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) && qcrypto_hmac_alg_map[alg] != -1) { @@ -42,7 +42,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) return false; } -void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, +void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { @@ -50,7 +50,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, if (!qcrypto_hmac_supports(alg)) { error_setg(errp, "Unsupported hmac algorithm %s", - QCryptoHashAlgorithm_str(alg)); + QCryptoHashAlgo_str(alg)); return NULL; } diff --git a/crypto/hmac-gnutls.c b/crypto/hmac-gnutls.c index 24db383322c..822995505cd 100644 --- a/crypto/hmac-gnutls.c +++ b/crypto/hmac-gnutls.c @@ -20,14 +20,14 @@ #include "crypto/hmac.h" #include "hmacpriv.h" -static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GNUTLS_MAC_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_MAC_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_MAC_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_MAC_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_MAC_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_MAC_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_MAC_RMD160, +static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_MAC_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_MAC_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_MAC_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_MAC_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_MAC_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_MAC_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_MAC_RMD160, }; typedef struct QCryptoHmacGnutls QCryptoHmacGnutls; @@ -35,7 +35,7 @@ struct QCryptoHmacGnutls { gnutls_hmac_hd_t handle; }; -bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) +bool qcrypto_hmac_supports(QCryptoHashAlgo alg) { size_t i; const gnutls_digest_algorithm_t *algs; @@ -52,7 +52,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) return false; } -void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, +void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { @@ -61,7 +61,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, if (!qcrypto_hmac_supports(alg)) { error_setg(errp, "Unsupported hmac algorithm %s", - QCryptoHashAlgorithm_str(alg)); + QCryptoHashAlgo_str(alg)); return NULL; } diff --git a/crypto/hmac-nettle.c b/crypto/hmac-nettle.c index 1ad6c4f2530..dd5b2ab7a19 100644 --- a/crypto/hmac-nettle.c +++ b/crypto/hmac-nettle.c @@ -38,6 +38,9 @@ struct QCryptoHmacNettle { struct hmac_sha256_ctx sha256_ctx; /* equals hmac_sha224_ctx */ struct hmac_sha512_ctx sha512_ctx; /* equals hmac_sha384_ctx */ struct hmac_ripemd160_ctx ripemd160_ctx; +#ifdef CONFIG_CRYPTO_SM3 + struct hmac_sm3_ctx ctx; +#endif } u; }; @@ -46,52 +49,60 @@ struct qcrypto_nettle_hmac_alg { qcrypto_nettle_hmac_update update; qcrypto_nettle_hmac_digest digest; size_t len; -} qcrypto_hmac_alg_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = { +} qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_md5_set_key, .update = (qcrypto_nettle_hmac_update)hmac_md5_update, .digest = (qcrypto_nettle_hmac_digest)hmac_md5_digest, .len = MD5_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA1] = { + [QCRYPTO_HASH_ALGO_SHA1] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_sha1_set_key, .update = (qcrypto_nettle_hmac_update)hmac_sha1_update, .digest = (qcrypto_nettle_hmac_digest)hmac_sha1_digest, .len = SHA1_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA224] = { + [QCRYPTO_HASH_ALGO_SHA224] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_sha224_set_key, .update = (qcrypto_nettle_hmac_update)hmac_sha224_update, .digest = (qcrypto_nettle_hmac_digest)hmac_sha224_digest, .len = SHA224_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA256] = { + [QCRYPTO_HASH_ALGO_SHA256] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_sha256_set_key, .update = (qcrypto_nettle_hmac_update)hmac_sha256_update, .digest = (qcrypto_nettle_hmac_digest)hmac_sha256_digest, .len = SHA256_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA384] = { + [QCRYPTO_HASH_ALGO_SHA384] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_sha384_set_key, .update = (qcrypto_nettle_hmac_update)hmac_sha384_update, .digest = (qcrypto_nettle_hmac_digest)hmac_sha384_digest, .len = SHA384_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_SHA512] = { + [QCRYPTO_HASH_ALGO_SHA512] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_sha512_set_key, .update = (qcrypto_nettle_hmac_update)hmac_sha512_update, .digest = (qcrypto_nettle_hmac_digest)hmac_sha512_digest, .len = SHA512_DIGEST_SIZE, }, - [QCRYPTO_HASH_ALG_RIPEMD160] = { + [QCRYPTO_HASH_ALGO_RIPEMD160] = { .setkey = (qcrypto_nettle_hmac_setkey)hmac_ripemd160_set_key, .update = (qcrypto_nettle_hmac_update)hmac_ripemd160_update, .digest = (qcrypto_nettle_hmac_digest)hmac_ripemd160_digest, .len = RIPEMD160_DIGEST_SIZE, }, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = { + .setkey = (qcrypto_nettle_hmac_setkey)hmac_sm3_set_key, + .update = (qcrypto_nettle_hmac_update)hmac_sm3_update, + .digest = (qcrypto_nettle_hmac_digest)hmac_sm3_digest, + .len = SM3_DIGEST_SIZE, + }, +#endif }; -bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) +bool qcrypto_hmac_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) && qcrypto_hmac_alg_map[alg].setkey != NULL) { @@ -101,7 +112,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg) return false; } -void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, +void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { @@ -109,7 +120,7 @@ void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, if (!qcrypto_hmac_supports(alg)) { error_setg(errp, "Unsupported hmac algorithm %s", - QCryptoHashAlgorithm_str(alg)); + QCryptoHashAlgo_str(alg)); return NULL; } diff --git a/crypto/hmac.c b/crypto/hmac.c index 4de7e8c9cbb..422e005182a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -83,7 +83,7 @@ int qcrypto_hmac_digest(QCryptoHmac *hmac, return qcrypto_hmac_digestv(hmac, &iov, 1, digest, errp); } -QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg, +QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp) { diff --git a/crypto/hmacpriv.h b/crypto/hmacpriv.h index 62dfe8257af..f339596bd9a 100644 --- a/crypto/hmacpriv.h +++ b/crypto/hmacpriv.h @@ -28,7 +28,7 @@ struct QCryptoHmacDriver { void (*hmac_free)(QCryptoHmac *hmac); }; -void *qcrypto_hmac_ctx_new(QCryptoHashAlgorithm alg, +void *qcrypto_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp); extern QCryptoHmacDriver qcrypto_hmac_lib_driver; @@ -37,7 +37,7 @@ extern QCryptoHmacDriver qcrypto_hmac_lib_driver; #include "afalgpriv.h" -QCryptoAFAlg *qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgorithm alg, +QCryptoAFAlgo *qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp); extern QCryptoHmacDriver qcrypto_hmac_afalg_driver; diff --git a/crypto/ivgen.c b/crypto/ivgen.c index 12822f85198..6b7d24d8897 100644 --- a/crypto/ivgen.c +++ b/crypto/ivgen.c @@ -27,9 +27,9 @@ #include "ivgen-essiv.h" -QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg, - QCryptoCipherAlgorithm cipheralg, - QCryptoHashAlgorithm hash, +QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgo alg, + QCryptoCipherAlgo cipheralg, + QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, Error **errp) { @@ -40,13 +40,13 @@ QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg, ivgen->hash = hash; switch (alg) { - case QCRYPTO_IVGEN_ALG_PLAIN: + case QCRYPTO_IV_GEN_ALGO_PLAIN: ivgen->driver = &qcrypto_ivgen_plain; break; - case QCRYPTO_IVGEN_ALG_PLAIN64: + case QCRYPTO_IV_GEN_ALGO_PLAIN64: ivgen->driver = &qcrypto_ivgen_plain64; break; - case QCRYPTO_IVGEN_ALG_ESSIV: + case QCRYPTO_IV_GEN_ALGO_ESSIV: ivgen->driver = &qcrypto_ivgen_essiv; break; default: @@ -73,19 +73,19 @@ int qcrypto_ivgen_calculate(QCryptoIVGen *ivgen, } -QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen) +QCryptoIVGenAlgo qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen) { return ivgen->algorithm; } -QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen) +QCryptoCipherAlgo qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen) { return ivgen->cipher; } -QCryptoHashAlgorithm qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen) +QCryptoHashAlgo qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen) { return ivgen->hash; } diff --git a/crypto/ivgenpriv.h b/crypto/ivgenpriv.h index cecdbedfde4..e3388d30be1 100644 --- a/crypto/ivgenpriv.h +++ b/crypto/ivgenpriv.h @@ -40,9 +40,9 @@ struct QCryptoIVGen { QCryptoIVGenDriver *driver; void *private; - QCryptoIVGenAlgorithm algorithm; - QCryptoCipherAlgorithm cipher; - QCryptoHashAlgorithm hash; + QCryptoIVGenAlgo algorithm; + QCryptoCipherAlgo cipher; + QCryptoHashAlgo hash; }; diff --git a/crypto/meson.build b/crypto/meson.build index c46f9c22a7f..735635de1f3 100644 --- a/crypto/meson.build +++ b/crypto/meson.build @@ -24,6 +24,10 @@ crypto_ss.add(files( 'rsakey.c', )) +if gnutls.found() + crypto_ss.add(files('x509-utils.c')) +endif + if nettle.found() crypto_ss.add(nettle, files('hash-nettle.c', 'hmac-nettle.c', 'pbkdf-nettle.c')) if hogweed.found() diff --git a/crypto/pbkdf-gcrypt.c b/crypto/pbkdf-gcrypt.c index a8d8e64f4d4..e89b8b1c768 100644 --- a/crypto/pbkdf-gcrypt.c +++ b/crypto/pbkdf-gcrypt.c @@ -23,37 +23,43 @@ #include "qapi/error.h" #include "crypto/pbkdf.h" -bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash) +bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash) { switch (hash) { - case QCRYPTO_HASH_ALG_MD5: - case QCRYPTO_HASH_ALG_SHA1: - case QCRYPTO_HASH_ALG_SHA224: - case QCRYPTO_HASH_ALG_SHA256: - case QCRYPTO_HASH_ALG_SHA384: - case QCRYPTO_HASH_ALG_SHA512: - case QCRYPTO_HASH_ALG_RIPEMD160: - return true; + case QCRYPTO_HASH_ALGO_MD5: + case QCRYPTO_HASH_ALGO_SHA1: + case QCRYPTO_HASH_ALGO_SHA224: + case QCRYPTO_HASH_ALGO_SHA256: + case QCRYPTO_HASH_ALGO_SHA384: + case QCRYPTO_HASH_ALGO_SHA512: + case QCRYPTO_HASH_ALGO_RIPEMD160: +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: +#endif + return qcrypto_hash_supports(hash); default: return false; } } -int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, +int qcrypto_pbkdf2(QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, const uint8_t *salt, size_t nsalt, uint64_t iterations, uint8_t *out, size_t nout, Error **errp) { - static const int hash_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GCRY_MD_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GCRY_MD_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GCRY_MD_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GCRY_MD_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GCRY_MD_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GCRY_MD_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MD_RMD160, + static const int hash_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GCRY_MD_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GCRY_MD_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GCRY_MD_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GCRY_MD_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3, +#endif }; int ret; @@ -68,7 +74,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, hash_map[hash] == GCRY_MD_NONE) { error_setg_errno(errp, ENOSYS, "PBKDF does not support hash algorithm %s", - QCryptoHashAlgorithm_str(hash)); + QCryptoHashAlgo_str(hash)); return -1; } diff --git a/crypto/pbkdf-gnutls.c b/crypto/pbkdf-gnutls.c index 2dfbbd382c2..f34423f918b 100644 --- a/crypto/pbkdf-gnutls.c +++ b/crypto/pbkdf-gnutls.c @@ -23,37 +23,37 @@ #include "qapi/error.h" #include "crypto/pbkdf.h" -bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash) +bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash) { switch (hash) { - case QCRYPTO_HASH_ALG_MD5: - case QCRYPTO_HASH_ALG_SHA1: - case QCRYPTO_HASH_ALG_SHA224: - case QCRYPTO_HASH_ALG_SHA256: - case QCRYPTO_HASH_ALG_SHA384: - case QCRYPTO_HASH_ALG_SHA512: - case QCRYPTO_HASH_ALG_RIPEMD160: - return true; + case QCRYPTO_HASH_ALGO_MD5: + case QCRYPTO_HASH_ALGO_SHA1: + case QCRYPTO_HASH_ALGO_SHA224: + case QCRYPTO_HASH_ALGO_SHA256: + case QCRYPTO_HASH_ALGO_SHA384: + case QCRYPTO_HASH_ALGO_SHA512: + case QCRYPTO_HASH_ALGO_RIPEMD160: + return qcrypto_hash_supports(hash); default: return false; } } -int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, +int qcrypto_pbkdf2(QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, const uint8_t *salt, size_t nsalt, uint64_t iterations, uint8_t *out, size_t nout, Error **errp) { - static const int hash_map[QCRYPTO_HASH_ALG__MAX] = { - [QCRYPTO_HASH_ALG_MD5] = GNUTLS_DIG_MD5, - [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_DIG_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = GNUTLS_DIG_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_DIG_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = GNUTLS_DIG_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = GNUTLS_DIG_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = GNUTLS_DIG_RMD160, + static const int hash_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160, }; int ret; const gnutls_datum_t gkey = { (unsigned char *)key, nkey }; @@ -70,7 +70,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, hash_map[hash] == GNUTLS_DIG_UNKNOWN) { error_setg_errno(errp, ENOSYS, "PBKDF does not support hash algorithm %s", - QCryptoHashAlgorithm_str(hash)); + QCryptoHashAlgo_str(hash)); return -1; } diff --git a/crypto/pbkdf-nettle.c b/crypto/pbkdf-nettle.c index d6293c25a13..3ef9c1b52c4 100644 --- a/crypto/pbkdf-nettle.c +++ b/crypto/pbkdf-nettle.c @@ -25,22 +25,25 @@ #include "crypto/pbkdf.h" -bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash) +bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash) { switch (hash) { - case QCRYPTO_HASH_ALG_SHA1: - case QCRYPTO_HASH_ALG_SHA224: - case QCRYPTO_HASH_ALG_SHA256: - case QCRYPTO_HASH_ALG_SHA384: - case QCRYPTO_HASH_ALG_SHA512: - case QCRYPTO_HASH_ALG_RIPEMD160: + case QCRYPTO_HASH_ALGO_SHA1: + case QCRYPTO_HASH_ALGO_SHA224: + case QCRYPTO_HASH_ALGO_SHA256: + case QCRYPTO_HASH_ALGO_SHA384: + case QCRYPTO_HASH_ALGO_SHA512: + case QCRYPTO_HASH_ALGO_RIPEMD160: +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: +#endif return true; default: return false; } } -int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, +int qcrypto_pbkdf2(QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, const uint8_t *salt, size_t nsalt, uint64_t iterations, @@ -55,6 +58,9 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, struct hmac_sha384_ctx sha384; struct hmac_sha512_ctx sha512; struct hmac_ripemd160_ctx ripemd160; +#ifdef CONFIG_CRYPTO_SM3 + struct hmac_sm3_ctx sm3; +#endif } ctx; if (iterations > UINT_MAX) { @@ -65,52 +71,59 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, } switch (hash) { - case QCRYPTO_HASH_ALG_MD5: + case QCRYPTO_HASH_ALGO_MD5: hmac_md5_set_key(&ctx.md5, nkey, key); PBKDF2(&ctx.md5, hmac_md5_update, hmac_md5_digest, MD5_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_SHA1: + case QCRYPTO_HASH_ALGO_SHA1: hmac_sha1_set_key(&ctx.sha1, nkey, key); PBKDF2(&ctx.sha1, hmac_sha1_update, hmac_sha1_digest, SHA1_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_SHA224: + case QCRYPTO_HASH_ALGO_SHA224: hmac_sha224_set_key(&ctx.sha224, nkey, key); PBKDF2(&ctx.sha224, hmac_sha224_update, hmac_sha224_digest, SHA224_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_SHA256: + case QCRYPTO_HASH_ALGO_SHA256: hmac_sha256_set_key(&ctx.sha256, nkey, key); PBKDF2(&ctx.sha256, hmac_sha256_update, hmac_sha256_digest, SHA256_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_SHA384: + case QCRYPTO_HASH_ALGO_SHA384: hmac_sha384_set_key(&ctx.sha384, nkey, key); PBKDF2(&ctx.sha384, hmac_sha384_update, hmac_sha384_digest, SHA384_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_SHA512: + case QCRYPTO_HASH_ALGO_SHA512: hmac_sha512_set_key(&ctx.sha512, nkey, key); PBKDF2(&ctx.sha512, hmac_sha512_update, hmac_sha512_digest, SHA512_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; - case QCRYPTO_HASH_ALG_RIPEMD160: + case QCRYPTO_HASH_ALGO_RIPEMD160: hmac_ripemd160_set_key(&ctx.ripemd160, nkey, key); PBKDF2(&ctx.ripemd160, hmac_ripemd160_update, hmac_ripemd160_digest, RIPEMD160_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: + hmac_sm3_set_key(&ctx.sm3, nkey, key); + PBKDF2(&ctx.sm3, hmac_sm3_update, hmac_sm3_digest, + SM3_DIGEST_SIZE, iterations, nsalt, salt, nout, out); + break; +#endif default: error_setg_errno(errp, ENOSYS, "PBKDF does not support hash algorithm %s", - QCryptoHashAlgorithm_str(hash)); + QCryptoHashAlgo_str(hash)); return -1; } return 0; diff --git a/crypto/pbkdf-stub.c b/crypto/pbkdf-stub.c index 9c4622e4247..9f29d0eed71 100644 --- a/crypto/pbkdf-stub.c +++ b/crypto/pbkdf-stub.c @@ -22,12 +22,12 @@ #include "qapi/error.h" #include "crypto/pbkdf.h" -bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash G_GNUC_UNUSED) +bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash G_GNUC_UNUSED) { return false; } -int qcrypto_pbkdf2(QCryptoHashAlgorithm hash G_GNUC_UNUSED, +int qcrypto_pbkdf2(QCryptoHashAlgo hash G_GNUC_UNUSED, const uint8_t *key G_GNUC_UNUSED, size_t nkey G_GNUC_UNUSED, const uint8_t *salt G_GNUC_UNUSED, diff --git a/crypto/pbkdf.c b/crypto/pbkdf.c index 8d198c152cf..2989fc0a40f 100644 --- a/crypto/pbkdf.c +++ b/crypto/pbkdf.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/thread.h" #include "qapi/error.h" #include "crypto/pbkdf.h" #ifndef _WIN32 @@ -85,12 +86,28 @@ static int qcrypto_pbkdf2_get_thread_cpu(unsigned long long *val_ms, #endif } -uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash, - const uint8_t *key, size_t nkey, - const uint8_t *salt, size_t nsalt, - size_t nout, - Error **errp) +typedef struct CountItersData { + QCryptoHashAlgo hash; + const uint8_t *key; + size_t nkey; + const uint8_t *salt; + size_t nsalt; + size_t nout; + uint64_t iterations; + Error **errp; +} CountItersData; + +static void *threaded_qcrypto_pbkdf2_count_iters(void *data) { + CountItersData *iters_data = (CountItersData *) data; + QCryptoHashAlgo hash = iters_data->hash; + const uint8_t *key = iters_data->key; + size_t nkey = iters_data->nkey; + const uint8_t *salt = iters_data->salt; + size_t nsalt = iters_data->nsalt; + size_t nout = iters_data->nout; + Error **errp = iters_data->errp; + size_t scaled = 0; uint64_t ret = -1; g_autofree uint8_t *out = g_new(uint8_t, nout); uint64_t iterations = (1 << 15); @@ -114,13 +131,27 @@ uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash, delta_ms = end_ms - start_ms; - if (delta_ms > 500) { + /* + * For very small 'iterations' values, CPU (or crypto + * accelerator) might be fast enough that the scheduler + * hasn't incremented getrusage() data, or incremented + * it by a very small amount, resulting in delta_ms == 0. + * Once we've scaled 'iterations' x10, 5 times, we really + * should be seeing delta_ms != 0, so sanity check at + * that point. + */ + if (scaled > 5 && + delta_ms == 0) { /* sanity check */ + error_setg(errp, "Unable to get accurate CPU usage"); + goto cleanup; + } else if (delta_ms > 500) { break; } else if (delta_ms < 100) { iterations = iterations * 10; } else { iterations = (iterations * 1000 / delta_ms); } + scaled++; } iterations = iterations * 1000 / delta_ms; @@ -129,5 +160,24 @@ uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash, cleanup: memset(out, 0, nout); - return ret; + iters_data->iterations = ret; + return NULL; +} + +uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgo hash, + const uint8_t *key, size_t nkey, + const uint8_t *salt, size_t nsalt, + size_t nout, + Error **errp) +{ + CountItersData data = { + hash, key, nkey, salt, nsalt, nout, 0, errp + }; + QemuThread thread; + + qemu_thread_create(&thread, "pbkdf2", threaded_qcrypto_pbkdf2_count_iters, + &data, QEMU_THREAD_JOINABLE); + qemu_thread_join(&thread); + + return data.iterations; } diff --git a/crypto/rsakey-builtin.c.inc b/crypto/rsakey-builtin.c.inc index 46cc7afe87f..6337b84c54e 100644 --- a/crypto/rsakey-builtin.c.inc +++ b/crypto/rsakey-builtin.c.inc @@ -183,10 +183,10 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse( size_t keylen, Error **errp) { switch (type) { - case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE: return qcrypto_builtin_rsa_private_key_parse(key, keylen, errp); - case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC: return qcrypto_builtin_rsa_public_key_parse(key, keylen, errp); default: diff --git a/crypto/rsakey-nettle.c.inc b/crypto/rsakey-nettle.c.inc index cc49872e78d..b7f34b02345 100644 --- a/crypto/rsakey-nettle.c.inc +++ b/crypto/rsakey-nettle.c.inc @@ -145,10 +145,10 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse( size_t keylen, Error **errp) { switch (type) { - case QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE: return qcrypto_nettle_rsa_private_key_parse(key, keylen, errp); - case QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC: + case QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC: return qcrypto_nettle_rsa_public_key_parse(key, keylen, errp); default: diff --git a/crypto/secret.c b/crypto/secret.c index 44eaff16f60..61a4584286c 100644 --- a/crypto/secret.c +++ b/crypto/secret.c @@ -117,7 +117,7 @@ qcrypto_secret_finalize(Object *obj) } static void -qcrypto_secret_class_init(ObjectClass *oc, void *data) +qcrypto_secret_class_init(ObjectClass *oc, const void *data) { QCryptoSecretCommonClass *sic = QCRYPTO_SECRET_COMMON_CLASS(oc); sic->load_data = qcrypto_secret_load_data; diff --git a/crypto/secret_common.c b/crypto/secret_common.c index 3441c44ca89..a5ecb876aeb 100644 --- a/crypto/secret_common.c +++ b/crypto/secret_common.c @@ -71,7 +71,7 @@ static void qcrypto_secret_decrypt(QCryptoSecretCommon *secret, return; } - aes = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_256, + aes = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_256, QCRYPTO_CIPHER_MODE_CBC, key, keylen, errp); @@ -191,15 +191,6 @@ qcrypto_secret_complete(UserCreatable *uc, Error **errp) } -static bool -qcrypto_secret_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoSecretCommon *secret = QCRYPTO_SECRET_COMMON(obj); - return secret->rawdata != NULL; -} - - static void qcrypto_secret_prop_set_format(Object *obj, int value, @@ -272,15 +263,12 @@ qcrypto_secret_finalize(Object *obj) } static void -qcrypto_secret_class_init(ObjectClass *oc, void *data) +qcrypto_secret_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = qcrypto_secret_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_secret_prop_get_loaded, - NULL); object_class_property_add_enum(oc, "format", "QCryptoSecretFormat", &QCryptoSecretFormat_lookup, @@ -387,7 +375,7 @@ static const TypeInfo qcrypto_secret_info = { .class_size = sizeof(QCryptoSecretCommonClass), .class_init = qcrypto_secret_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/crypto/secret_keyring.c b/crypto/secret_keyring.c index 1b7edec84a0..78d7f09b3b9 100644 --- a/crypto/secret_keyring.c +++ b/crypto/secret_keyring.c @@ -103,7 +103,7 @@ qcrypto_secret_prop_get_key(Object *obj, Visitor *v, static void -qcrypto_secret_keyring_class_init(ObjectClass *oc, void *data) +qcrypto_secret_keyring_class_init(ObjectClass *oc, const void *data) { QCryptoSecretCommonClass *sic = QCRYPTO_SECRET_COMMON_CLASS(oc); sic->load_data = qcrypto_secret_keyring_load_data; diff --git a/crypto/tls-cipher-suites.c b/crypto/tls-cipher-suites.c index d0df4badc0f..d9b61d0c08a 100644 --- a/crypto/tls-cipher-suites.c +++ b/crypto/tls-cipher-suites.c @@ -102,7 +102,8 @@ static GByteArray *qcrypto_tls_cipher_suites_fw_cfg_gen_data(Object *obj, errp); } -static void qcrypto_tls_cipher_suites_class_init(ObjectClass *oc, void *data) +static void qcrypto_tls_cipher_suites_class_init(ObjectClass *oc, + const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(oc); @@ -117,7 +118,7 @@ static const TypeInfo qcrypto_tls_cipher_suites_info = { .instance_size = sizeof(QCryptoTLSCipherSuites), .class_size = sizeof(QCryptoTLSCredsClass), .class_init = qcrypto_tls_cipher_suites_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, { } diff --git a/crypto/tlscreds.c b/crypto/tlscreds.c index 084ce0d51ae..9e59594d673 100644 --- a/crypto/tlscreds.c +++ b/crypto/tlscreds.c @@ -223,7 +223,7 @@ qcrypto_tls_creds_prop_get_endpoint(Object *obj, static void -qcrypto_tls_creds_class_init(ObjectClass *oc, void *data) +qcrypto_tls_creds_class_init(ObjectClass *oc, const void *data) { object_class_property_add_bool(oc, "verify-peer", qcrypto_tls_creds_prop_get_verify, diff --git a/crypto/tlscredsanon.c b/crypto/tlscredsanon.c index c0d23a0ef3c..44af9e6c9af 100644 --- a/crypto/tlscredsanon.c +++ b/crypto/tlscredsanon.c @@ -127,37 +127,6 @@ qcrypto_tls_creds_anon_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_anon_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsAnon *creds = QCRYPTO_TLS_CREDS_ANON(obj); - - if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { - return creds->data.server != NULL; - } else { - return creds->data.client != NULL; - } -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_anon_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_anon_finalize(Object *obj) { @@ -168,15 +137,11 @@ qcrypto_tls_creds_anon_finalize(Object *obj) static void -qcrypto_tls_creds_anon_class_init(ObjectClass *oc, void *data) +qcrypto_tls_creds_anon_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = qcrypto_tls_creds_anon_complete; - - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_anon_prop_get_loaded, - NULL); } @@ -187,7 +152,7 @@ static const TypeInfo qcrypto_tls_creds_anon_info = { .instance_finalize = qcrypto_tls_creds_anon_finalize, .class_size = sizeof(QCryptoTLSCredsAnonClass), .class_init = qcrypto_tls_creds_anon_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/crypto/tlscredspsk.c b/crypto/tlscredspsk.c index 546cad1c5a4..5b68a6b7ba2 100644 --- a/crypto/tlscredspsk.c +++ b/crypto/tlscredspsk.c @@ -206,43 +206,13 @@ qcrypto_tls_creds_psk_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_psk_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj); - - if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { - return creds->data.server != NULL; - } else { - return creds->data.client != NULL; - } -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_psk_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_psk_finalize(Object *obj) { QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj); qcrypto_tls_creds_psk_unload(creds); + g_free(creds->username); } static void @@ -266,15 +236,12 @@ qcrypto_tls_creds_psk_prop_get_username(Object *obj, } static void -qcrypto_tls_creds_psk_class_init(ObjectClass *oc, void *data) +qcrypto_tls_creds_psk_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = qcrypto_tls_creds_psk_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_psk_prop_get_loaded, - NULL); object_class_property_add_str(oc, "username", qcrypto_tls_creds_psk_prop_get_username, qcrypto_tls_creds_psk_prop_set_username); @@ -288,7 +255,7 @@ static const TypeInfo qcrypto_tls_creds_psk_info = { .instance_finalize = qcrypto_tls_creds_psk_finalize, .class_size = sizeof(QCryptoTLSCredsPSKClass), .class_init = qcrypto_tls_creds_psk_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c index d14313925dd..cd1f5044715 100644 --- a/crypto/tlscredsx509.c +++ b/crypto/tlscredsx509.c @@ -426,9 +426,8 @@ qcrypto_tls_creds_load_cert(QCryptoTLSCredsX509 *creds, static int qcrypto_tls_creds_load_ca_cert_list(QCryptoTLSCredsX509 *creds, const char *certFile, - gnutls_x509_crt_t *certs, - unsigned int certMax, - size_t *ncerts, + gnutls_x509_crt_t **certs, + unsigned int *ncerts, Error **errp) { gnutls_datum_t data; @@ -449,20 +448,18 @@ qcrypto_tls_creds_load_ca_cert_list(QCryptoTLSCredsX509 *creds, data.data = (unsigned char *)buf; data.size = strlen(buf); - if (gnutls_x509_crt_list_import(certs, &certMax, &data, - GNUTLS_X509_FMT_PEM, 0) < 0) { + if (gnutls_x509_crt_list_import2(certs, ncerts, &data, + GNUTLS_X509_FMT_PEM, 0) < 0) { error_setg(errp, "Unable to import CA certificate list %s", certFile); return -1; } - *ncerts = certMax; return 0; } -#define MAX_CERTS 16 static int qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, bool isServer, @@ -471,12 +468,11 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, Error **errp) { gnutls_x509_crt_t cert = NULL; - gnutls_x509_crt_t cacerts[MAX_CERTS]; - size_t ncacerts = 0; + gnutls_x509_crt_t *cacerts = NULL; + unsigned int ncacerts = 0; size_t i; int ret = -1; - memset(cacerts, 0, sizeof(cacerts)); if (certFile && access(certFile, R_OK) == 0) { cert = qcrypto_tls_creds_load_cert(creds, @@ -488,8 +484,9 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, } if (access(cacertFile, R_OK) == 0) { if (qcrypto_tls_creds_load_ca_cert_list(creds, - cacertFile, cacerts, - MAX_CERTS, &ncacerts, + cacertFile, + &cacerts, + &ncacerts, errp) < 0) { goto cleanup; } @@ -526,6 +523,8 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, for (i = 0; i < ncacerts; i++) { gnutls_x509_crt_deinit(cacerts[i]); } + g_free(cacerts); + return ret; } @@ -695,33 +694,6 @@ qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_x509_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); - - return creds->data != NULL; -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_x509_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_x509_prop_set_sanity(Object *obj, bool value, @@ -829,7 +801,7 @@ qcrypto_tls_creds_x509_finalize(Object *obj) static void -qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data) +qcrypto_tls_creds_x509_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); QCryptoTLSCredsClass *ctcc = QCRYPTO_TLS_CREDS_CLASS(oc); @@ -838,9 +810,6 @@ qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data) ucc->complete = qcrypto_tls_creds_x509_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_x509_prop_get_loaded, - NULL); object_class_property_add_bool(oc, "sanity-check", qcrypto_tls_creds_x509_prop_get_sanity, qcrypto_tls_creds_x509_prop_set_sanity); @@ -858,7 +827,7 @@ static const TypeInfo qcrypto_tls_creds_x509_info = { .instance_finalize = qcrypto_tls_creds_x509_finalize, .class_size = sizeof(QCryptoTLSCredsX509Class), .class_init = qcrypto_tls_creds_x509_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/crypto/tlssession.c b/crypto/tlssession.c index 77286e23f46..86d407a1429 100644 --- a/crypto/tlssession.c +++ b/crypto/tlssession.c @@ -19,6 +19,8 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/thread.h" #include "crypto/tlssession.h" #include "crypto/tlscredsanon.h" #include "crypto/tlscredspsk.h" @@ -51,6 +53,14 @@ struct QCryptoTLSSession { */ Error *rerr; Error *werr; + + /* + * Used to protect against broken GNUTLS thread safety + * https://gitlab.com/gnutls/gnutls/-/issues/1717 + */ + bool requireThreadSafety; + bool lockEnabled; + QemuMutex lock; }; @@ -69,6 +79,7 @@ qcrypto_tls_session_free(QCryptoTLSSession *session) g_free(session->peername); g_free(session->authzid); object_unref(OBJECT(session->creds)); + qemu_mutex_destroy(&session->lock); g_free(session); } @@ -84,10 +95,19 @@ qcrypto_tls_session_push(void *opaque, const void *buf, size_t len) return -1; }; + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + error_free(session->werr); session->werr = NULL; ret = session->writeFunc(buf, len, session->opaque, &session->werr); + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { errno = EAGAIN; return -1; @@ -114,7 +134,16 @@ qcrypto_tls_session_pull(void *opaque, void *buf, size_t len) error_free(session->rerr); session->rerr = NULL; + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + ret = session->readFunc(buf, len, session->opaque, &session->rerr); + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { errno = EAGAIN; return -1; @@ -153,6 +182,8 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds, session->creds = creds; object_ref(OBJECT(creds)); + qemu_mutex_init(&session->lock); + if (creds->endpoint != endpoint) { error_setg(errp, "Credentials endpoint doesn't match session"); goto error; @@ -289,6 +320,11 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds, return NULL; } +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess) +{ + sess->requireThreadSafety = true; +} + static int qcrypto_tls_session_check_certificate(QCryptoTLSSession *session, Error **errp) @@ -480,7 +516,17 @@ qcrypto_tls_session_write(QCryptoTLSSession *session, size_t len, Error **errp) { - ssize_t ret = gnutls_record_send(session->handle, buf, len); + ssize_t ret; + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + + ret = gnutls_record_send(session->handle, buf, len); + + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { @@ -509,7 +555,17 @@ qcrypto_tls_session_read(QCryptoTLSSession *session, bool gracefulTermination, Error **errp) { - ssize_t ret = gnutls_record_recv(session->handle, buf, len); + ssize_t ret; + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + + ret = gnutls_record_recv(session->handle, buf, len); + + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { @@ -545,46 +601,108 @@ int qcrypto_tls_session_handshake(QCryptoTLSSession *session, Error **errp) { - int ret = gnutls_handshake(session->handle); - if (ret == 0) { + int ret; + ret = gnutls_handshake(session->handle); + + if (!ret) { +#ifdef CONFIG_GNUTLS_BUG1717_WORKAROUND + gnutls_cipher_algorithm_t cipher = + gnutls_cipher_get(session->handle); + + /* + * Any use of rekeying in TLS 1.3 is unsafe for + * a gnutls with bug 1717, however, we know that + * QEMU won't initiate manual rekeying. Thus we + * only have to protect against automatic rekeying + * which doesn't trigger with CHACHA20 + */ + trace_qcrypto_tls_session_parameters( + session, + session->requireThreadSafety, + gnutls_protocol_get_version(session->handle), + cipher); + + if (session->requireThreadSafety && + gnutls_protocol_get_version(session->handle) == + GNUTLS_TLS1_3 && + cipher != GNUTLS_CIPHER_CHACHA20_POLY1305) { + warn_report("WARNING: activating thread safety countermeasures " + "for potentially broken GNUTLS with TLS1.3 cipher=%d", + cipher); + trace_qcrypto_tls_session_bug1717_workaround(session); + session->lockEnabled = true; + } +#endif + session->handshakeComplete = true; + return QCRYPTO_TLS_HANDSHAKE_COMPLETE; + } + + if (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN) { + int direction = gnutls_record_get_direction(session->handle); + return direction ? QCRYPTO_TLS_HANDSHAKE_SENDING : + QCRYPTO_TLS_HANDSHAKE_RECVING; + } + + if (session->rerr || session->werr) { + error_setg(errp, "TLS handshake failed: %s: %s", + gnutls_strerror(ret), + error_get_pretty(session->rerr ? + session->rerr : session->werr)); } else { - if (ret == GNUTLS_E_INTERRUPTED || - ret == GNUTLS_E_AGAIN) { - ret = 1; - } else { - if (session->rerr || session->werr) { - error_setg(errp, "TLS handshake failed: %s: %s", - gnutls_strerror(ret), - error_get_pretty(session->rerr ? - session->rerr : session->werr)); - } else { - error_setg(errp, "TLS handshake failed: %s", - gnutls_strerror(ret)); - } - ret = -1; - } + error_setg(errp, "TLS handshake failed: %s", + gnutls_strerror(ret)); } + error_free(session->rerr); error_free(session->werr); session->rerr = session->werr = NULL; - return ret; + return -1; } -QCryptoTLSSessionHandshakeStatus -qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *session) +int +qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp) { - if (session->handshakeComplete) { - return QCRYPTO_TLS_HANDSHAKE_COMPLETE; - } else if (gnutls_record_get_direction(session->handle) == 0) { - return QCRYPTO_TLS_HANDSHAKE_RECVING; + int ret; + + if (!session->handshakeComplete) { + return 0; + } + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + ret = gnutls_bye(session->handle, GNUTLS_SHUT_WR); + + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + + if (!ret) { + return QCRYPTO_TLS_BYE_COMPLETE; + } + + if (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN) { + int direction = gnutls_record_get_direction(session->handle); + return direction ? QCRYPTO_TLS_BYE_SENDING : QCRYPTO_TLS_BYE_RECVING; + } + + if (session->rerr || session->werr) { + error_setg(errp, "TLS termination failed: %s: %s", gnutls_strerror(ret), + error_get_pretty(session->rerr ? + session->rerr : session->werr)); } else { - return QCRYPTO_TLS_HANDSHAKE_SENDING; + error_setg(errp, "TLS termination failed: %s", gnutls_strerror(ret)); } -} + error_free(session->rerr); + error_free(session->werr); + session->rerr = session->werr = NULL; + + return -1; +} int qcrypto_tls_session_get_key_size(QCryptoTLSSession *session, @@ -627,6 +745,9 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds G_GNUC_UNUSED, return NULL; } +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess) +{ +} void qcrypto_tls_session_free(QCryptoTLSSession *sess G_GNUC_UNUSED) @@ -692,10 +813,10 @@ qcrypto_tls_session_handshake(QCryptoTLSSession *sess, } -QCryptoTLSSessionHandshakeStatus -qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *sess) +int +qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp) { - return QCRYPTO_TLS_HANDSHAKE_COMPLETE; + return QCRYPTO_TLS_BYE_COMPLETE; } diff --git a/crypto/trace-events b/crypto/trace-events index bccd0bbf291..d0e33427faa 100644 --- a/crypto/trace-events +++ b/crypto/trace-events @@ -21,6 +21,8 @@ qcrypto_tls_creds_x509_load_cert_list(void *creds, const char *file) "TLS creds # tlssession.c qcrypto_tls_session_new(void *session, void *creds, const char *hostname, const char *authzid, int endpoint) "TLS session new session=%p creds=%p hostname=%s authzid=%s endpoint=%d" qcrypto_tls_session_check_creds(void *session, const char *status) "TLS session check creds session=%p status=%s" +qcrypto_tls_session_parameters(void *session, int threadSafety, int protocol, int cipher) "TLS session parameters session=%p threadSafety=%d protocol=%d cipher=%d" +qcrypto_tls_session_bug1717_workaround(void *session) "TLS session bug1717 workaround session=%p" # tls-cipher-suites.c qcrypto_tls_cipher_suite_priority(const char *name) "priority: %s" diff --git a/crypto/x509-utils.c b/crypto/x509-utils.c new file mode 100644 index 00000000000..39bb6d4d8c3 --- /dev/null +++ b/crypto/x509-utils.c @@ -0,0 +1,80 @@ +/* + * X.509 certificate related helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "crypto/x509-utils.h" +#include +#include +#include + +static const int qcrypto_to_gnutls_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = { + [QCRYPTO_HASH_ALGO_MD5] = GNUTLS_DIG_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = GNUTLS_DIG_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = GNUTLS_DIG_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = GNUTLS_DIG_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = GNUTLS_DIG_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = GNUTLS_DIG_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = GNUTLS_DIG_RMD160, +}; + +int qcrypto_get_x509_cert_fingerprint(uint8_t *cert, size_t size, + QCryptoHashAlgo alg, + uint8_t *result, + size_t *resultlen, + Error **errp) +{ + int ret = -1; + int hlen; + gnutls_x509_crt_t crt; + gnutls_datum_t datum = {.data = cert, .size = size}; + + if (alg >= G_N_ELEMENTS(qcrypto_to_gnutls_hash_alg_map)) { + error_setg(errp, "Unknown hash algorithm"); + return -1; + } + + if (result == NULL) { + error_setg(errp, "No valid buffer given"); + return -1; + } + + if (gnutls_x509_crt_init(&crt) < 0) { + error_setg(errp, "Unable to initialize certificate: %s", + gnutls_strerror(ret)); + return -1; + } + + if (gnutls_x509_crt_import(crt, &datum, GNUTLS_X509_FMT_PEM) != 0) { + error_setg(errp, "Failed to import certificate"); + goto cleanup; + } + + hlen = gnutls_hash_get_len(qcrypto_to_gnutls_hash_alg_map[alg]); + if (*resultlen < hlen) { + error_setg(errp, + "Result buffer size %zu is smaller than hash %d", + *resultlen, hlen); + goto cleanup; + } + + if (gnutls_x509_crt_get_fingerprint(crt, + qcrypto_to_gnutls_hash_alg_map[alg], + result, resultlen) != 0) { + error_setg(errp, "Failed to get fingerprint from certificate"); + goto cleanup; + } + + ret = 0; + + cleanup: + gnutls_x509_crt_deinit(crt); + return ret; +} diff --git a/debian/README-components-versions b/debian/README-components-versions new file mode 100644 index 00000000000..de78efe654d --- /dev/null +++ b/debian/README-components-versions @@ -0,0 +1,74 @@ +Evolution of qemu-related packages in debian + +kvm: was a binary package providing /usr/bin/kvm before squeeze. + + In squeeze it has been renamed to qemu-kvm, + so kvm becomes a transitional package. + + Probably can be get rid of now. + + No need to conflict with this package, but we can force-remove it + by conflicting with it. + + Some packages may still Require/Depend/Suggest: kvm. + + +qemu-kvm: was a binary package providing /usr/bin/kvm in wheezy (v. 1.1) + + This binary become a (deprecated) shell wrapper and was a part of + qemu-system-x86 package between versions 1.3.0+dfsg-5 (when qemu-system + were split) up to 1.7.0+dfsg-1, with qemu-kvm package being transitional. + + At version 1.7.0+dfsg-2, qemu-kvm packcage is not transitional anymore, + and the wrapper is provided by qemu-kvm, not qemu-system-x86. + + Some packages may Require/Depend/Suggest: qemu-kvm. + +qemu: initially it was a mix of all of qemu (unrelated, not conflicting +with qemu-kvm or kvm) + + at 0.11.0-1 it were split to qemu-user, qemu-user-static and qemu-system. + + Also qemu-keymaps is made a separate package at version 0.12.4+dfsg-4 + (for use together with qemu-kvm and xen and, on ubuntu, qemu-linaro). + + And qemu-utils become a separate package (look when? since the beginning?) + + Now it is a metapackage and a placeholder for some docs + +qemu-system: was a package with all qemu-system-* binaries. + + Were split into individual qemu-system-foo and qemu-system-common at version + 1.3.0+dfsg-5, so qemu-system becomes a metapackage without its own files. + + The split introduced a bunch of qemu-system-* conflicting+replacing + qemu-system<<1.3.0+dfsg-5. + + At version 1.3.0+dfsg-4exp (before split, in experimental), /usr/bin/kvm was + a part of qemu-system (and later qemu-system-x86)/ + + At the same version, virtfs-proxy-helper binary has been moved from qemu-utils + to qemu-system (and later to qemu-system-common). + +qemu-system-common: appeared at 1.3.0+dfsg-5, files common for all qemu-system* + + briefly (>=1.3.0+dfsg-4exp, <<1.3.0+dfsg-5), qemu-utils had + virtfs-proxy-helper binary which is in qemu-system-common + +qemu-keymaps: keymap definition for qemu, qemu-kvm and xen. + + Can be merged into qemu-system-common now, or after qemu-linaro. + + Were split out of qemu at version 0.12.4+dfsg-4 + +qemu-utils: common utilities (part of qemu-common in ubuntu?) + + In versions between 1.3.0+dfsg-1~exp1 and 1.3.0+dfsg-4exp, wrongly shipped + virtfs-proxy-helper binary + +qemu-user, qemu-user-static: user-mode qemu emulation + + Were split out of qemu at verson 0.11.0-1 (pre-squeeze), + do not conflict/replace anything anymore. + +qemu-common: ubuntu package diff --git a/debian/TODO b/debian/TODO new file mode 100644 index 00000000000..5efed04b42f --- /dev/null +++ b/debian/TODO @@ -0,0 +1,16 @@ +* fix other binaries (s390-zipl, ppc_rom) + +* permissions/ownership for /dev/vhost_net??? + +* startup script for qemu-guest-agent: check dependencies & runlevels. + +* maybe provide activation for udev & systemd: + + SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", \ + TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service" + + and dev-virtio/ports-org.qemu.guest_agent.0.device activation + for systemd. + +* decide what to do with qemu-bridge-helper (#691138) + diff --git a/debian/binfmt-install b/debian/binfmt-install new file mode 100755 index 00000000000..02536c381ef --- /dev/null +++ b/debian/binfmt-install @@ -0,0 +1,130 @@ +#!/bin/sh +set -e + +PACKAGE="$1" +case "$#$PACKAGE" in + 1qemu-user-static) SUFFIX=-static; FIXB=F; DOC_PACKAGE=$PACKAGE ;; + 1qemu-user-binfmt) SUFFIX=; FIXB= ; DOC_PACKAGE=qemu-user ;; + *) echo "usage: $0 "; exit 1 ;; +esac + +fmts="aarch64 alpha arm armeb cris hexagon hppa i386 loongarch64 m68k microblaze mips mipsel mipsn32 mipsn32el mips64 mips64el ppc ppc64 ppc64le riscv32 riscv64 s390x sh4 sh4eb sparc sparc32plus sparc64 x86_64 xtensa xtensaeb" +removed_fmts="ppc64abi32" + +# linux ELF_OSABI(byte7) can be 0 (traditional,SYSV) or 3 (GNU/LINUX extensions) +# binfmt registration does not allow a enum, only value&mask. So we use broader mask +# to allow both 0 and 3 here, this also lets 1 (HPUX) and 2 (NETBSD) - 0xfc not 0xff +# alternative is to create 2 magic/mask pairs instead of one + + aarch64_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00' + aarch64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + alpha_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90' + alpha_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + arm_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00' + arm_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + armeb_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28' + armeb_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + cris_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x4c\x00' + cris_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + hexagon_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xa4\x00' + hexagon_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + hppa_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x0f' + hppa_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + i386_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00' + i386_mask='\xff\xff\xff\xff\xff\xfe\xfe\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' +loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02\x01' + loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + m68k_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x04' + m68k_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + microblaze_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xba\xab' + microblaze_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + mips_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + mips_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' + mipsel_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + mipsel_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' + mipsn32_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' + mipsn32_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20' + mipsn32el_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' + mipsn32el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00' + mips64_magic='\x7f\x45\x4c\x46\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08' + mips64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + mips64el_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00' + mips64el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + ppc_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14' + ppc_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + ppc64_magic='\x7f\x45\x4c\x46\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15' + ppc64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + ppc64le_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15\x00' + ppc64le_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\x00' + riscv32_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00' + riscv32_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + riscv64_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00' + riscv64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + s390x_magic='\x7f\x45\x4c\x46\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16' + s390x_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + sh4_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00' + sh4_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + sh4eb_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a' + sh4eb_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + sparc_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02' + sparc_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' +sparc32plus_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x12' + sparc32plus_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + sparc64_magic='\x7f\x45\x4c\x46\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2b' + sparc64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + x86_64_magic='\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x3e\x00' + x86_64_mask='\xff\xff\xff\xff\xff\xfe\xfe\xfc\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + xtensa_magic='\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x5e\x00' + xtensa_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' + xtensaeb_magic='\x7f\x45\x4c\x46\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x5e' + xtensaeb_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff' + +: ${DEB_HOST_ARCH:=$(dpkg-architecture -qDEB_HOST_ARCH)} + +# find which fmts needs to be filtered out, which is arch-dependent. +# Drop support for emulating amd64 on i386, http://bugs.debian.org/604712 +case "$DEB_HOST_ARCH" in + amd64 | i386 | x32) omit="i386|x86_64|x32" ;; + arm | armel | armhf | arm64) omit="arm|aarch64" ;; + ppc64 | powerpc) omit="ppc|ppc64|ppc64abi32" ;; + ppc64el) omit="ppc64le" ;; + riscv64) omit="riscv32|riscv64" ;; + s390x) omit="s390x" ;; + sparc | sparc64) omit="sparc|sparc32plus|sparc64" ;; + *) omit="$DEB_HOST_ARCH" ;; +esac + +mkdir -p debian/$PACKAGE/usr/lib/binfmt.d \ + debian/$DOC_PACKAGE/usr/share/doc/$DOC_PACKAGE \ + debian/$PACKAGE/usr/libexec/qemu-binfmt \ + +for fmt in $fmts ; do + eval "case $fmt in $omit) inst=;; *) inst=y;; esac; magic=\"\$${fmt}_magic\" mask=\"\$${fmt}_mask\"" + # for arch from the same family as host, install stuff to docdir + # so it is not enabled by default. One can (sym)link selected entries + # from docdir to /etc/binfmt.d/ to enable them + if [ "$inst" ]; then inst=debian/$PACKAGE/usr/lib/binfmt.d + else inst=debian/$DOC_PACKAGE/usr/share/doc/$DOC_PACKAGE + fi + ln -sf ../../bin/qemu-$fmt$SUFFIX debian/$PACKAGE/usr/libexec/qemu-binfmt/$fmt-binfmt-P + echo ":qemu-$fmt:M::$magic:$mask:/usr/libexec/qemu-binfmt/$fmt-binfmt-P:OP${FIXB}" \ + >$inst/qemu-$fmt.conf +done + +# generate postinst script to unregister all from binfmt-support +# when upgrading from older qemu. +cat >>debian/$PACKAGE.postinst.debhelper </dev/null || return 0 + for fmt in $fmts $removed_fmts; do + if [ -f /var/lib/binfmts/qemu-\$fmt ]; then + [ -f /usr/bin/qemu-\$fmt$SUFFIX ] || + echo "$PACKAGE.preinst: note: the warning from update-binfmts about missing qemu-\$fmt$SUFFIX is okay" >&2 + update-binfmts --package $PACKAGE --remove qemu-\$fmt /usr/bin/qemu-\$fmt$SUFFIX + fi + done +} +binfmt_update "\$1" "\$2" +EOF diff --git a/debian/build-deb-from-git.sh b/debian/build-deb-from-git.sh new file mode 100755 index 00000000000..ab16d10db4a --- /dev/null +++ b/debian/build-deb-from-git.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Produce a deb source package that can be uploaded to a Debian/Ubuntu builder. + +if [[ $(git --no-optional-locks status -uno --porcelain) ]]; then + echo "ERROR: git repository is not clean" + echo "Try running:" + echo " git submodule foreach --recursive 'git reset --hard && git clean -fdx'" + exit 1 +fi + +# Update submodules +git clean -xdf +git submodule update --init --recursive + +# Clean up and include everything in the package +find subprojects/ -type d -name .git -exec rm -rf {} \; +find . -name .gitignore -exec rm -f {} \; + +meson subprojects download + +find subprojects/ -type d -name .git -exec rm -rf {} \; +find . -name .gitignore -exec rm -f {} \; + +# Import submodules in git +git add . +git commit -s -a -m "[DROP THIS] Include submodules for packaging" + +# Produce an orig tarball +mkdir -p deb +git archive --prefix=qemu-10.1.0/ -o deb/qemu_10.1.0+nvidia1.orig.tar.gz HEAD diff --git a/debian/build-source-package-from-git.sh b/debian/build-source-package-from-git.sh new file mode 100755 index 00000000000..ab16d10db4a --- /dev/null +++ b/debian/build-source-package-from-git.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Produce a deb source package that can be uploaded to a Debian/Ubuntu builder. + +if [[ $(git --no-optional-locks status -uno --porcelain) ]]; then + echo "ERROR: git repository is not clean" + echo "Try running:" + echo " git submodule foreach --recursive 'git reset --hard && git clean -fdx'" + exit 1 +fi + +# Update submodules +git clean -xdf +git submodule update --init --recursive + +# Clean up and include everything in the package +find subprojects/ -type d -name .git -exec rm -rf {} \; +find . -name .gitignore -exec rm -f {} \; + +meson subprojects download + +find subprojects/ -type d -name .git -exec rm -rf {} \; +find . -name .gitignore -exec rm -f {} \; + +# Import submodules in git +git add . +git commit -s -a -m "[DROP THIS] Include submodules for packaging" + +# Produce an orig tarball +mkdir -p deb +git archive --prefix=qemu-10.1.0/ -o deb/qemu_10.1.0+nvidia1.orig.tar.gz HEAD diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 00000000000..7bbf1857658 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,11193 @@ +qemu (1:10.1.0+nvidia1-1) noble; urgency=medium + + * Update to QEMU 10.1.0 upstream with NVIDIA support + + -- Matthew R. Ochs Thu, 16 Oct 2025 14:30:00 -0700 + +qemu (1:8.2.2+ds-0ubuntu1.10) noble-security; urgency=medium + + * SECURITY UPDATE: double-free in QEMU virtio devices + - debian/patches/CVE-2024-3446-pre1.patch: introduce + virtio_bh_new_guarded() helper in hw/virtio/virtio.c, + include/hw/virtio/virtio.h. + - debian/patches/CVE-2024-3446-1.patch: protect from DMA re-entrancy + bugs in hw/virtio/virtio-crypto.c. + - debian/patches/CVE-2024-3446-2.patch: protect from DMA re-entrancy + bugs in hw/char/virtio-serial-bus.c. + - debian/patches/CVE-2024-3446-3.patch: protect from DMA re-entrancy + bugs in hw/display/virtio-gpu.c. + - CVE-2024-3446 + * SECURITY UPDATE: heap overflow in SDHCI device emulation + - debian/patches/CVE-2024-3447.patch: do not update TRNMOD when Command + Inhibit (DAT) is set in hw/sd/sdhci.c. + - CVE-2024-3447 + * SECURITY UPDATE: assert failure in checksum calculation + - debian/patches/CVE-2024-3567.patch: fix overrun in + update_sctp_checksum() in hw/net/net_tx_pkt.c. + - CVE-2024-3567 + * SECURITY UPDATE: resource consumption in disk utility + - debian/patches/CVE-2024-4467-1.patch: don't open data_file with + BDRV_O_NO_IO in block/qcow2.c, tests/qemu-iotests/061*. + - debian/patches/CVE-2024-4467-2.patch: don't store data-file with + protocol in image in tests/qemu-iotests/244. + - debian/patches/CVE-2024-4467-3.patch: don't store data-file with + json: prefix in image in tests/qemu-iotests/270. + - debian/patches/CVE-2024-4467-4.patch: parse filenames only when + explicitly requested in block.c. + - CVE-2024-4467 + * SECURITY UPDATE: heap overflow in virtio-net device RSS feature + - debian/patches/CVE-2024-6505.patch: ensure queue index fits with RSS + in hw/net/virtio-net.c. + - CVE-2024-6505 + * SECURITY UPDATE: Dos via improper synchronization during socket closure + - debian/patches/CVE-2024-7409-1.patch: plumb in new args to + nbd_client_add() in blockdev-nbd.c, include/block/nbd.h, + nbd/server.c, qemu-nbd.c. + - debian/patches/CVE-2024-7409-2.patch: cap default max-connections to + 100 in block/monitor/block-hmp-cmds.c, blockdev-nbd.c, + include/block/nbd.h, qapi/block-export.json. + - debian/patches/CVE-2024-7409-3.patch: close stray clients at + server-stop in blockdev-nbd.c. + - debian/patches/CVE-2024-7409-4.patch: drop non-negotiating clients in + nbd/server.c, nbd/trace-events. + - debian/patches/CVE-2024-7409-5.patch: avoid use-after-free when + closing server in blockdev-nbd.c. + - CVE-2024-7409 + * SECURITY UPDATE: DoS via assert failure in usb_ep_get() + - debian/patches/CVE-2024-8354.patch: change ohci validation in + hw/usb/hcd-ohci.c, hw/usb/trace-events. + - CVE-2024-8354 + * SECURITY UPDATE: possibly binfmt privilege escalation (LP: #2120814) + - debian/binfmt-install: stop using C (Credentials) flag for + binfmt_misc registration. + + -- Marc Deslauriers Mon, 25 Aug 2025 14:10:37 -0400 + +qemu (1:8.2.2+ds-0ubuntu1.9) noble; urgency=medium + + * d/p/u/lp-2101053-pci-acpi-Windows-PCI-Label-Id-bug-workaround.patch: + fix windows virtio network by tolerarting a bad acpi call (LP: #2101053) + + -- Christian Ehrhardt Wed, 02 Jul 2025 11:36:20 +0200 + +qemu (1:8.2.2+ds-0ubuntu1.8) noble; urgency=medium + + * d/p/u/lp2101944/*: Synthesize IBPB_BRTYPE and SBPB CPUID bits to the guest + as described in AMD's Speculative Return Stack Overflow whitepaper. + (LP: #2101944) + + -- Lukas Märdian Wed, 19 Mar 2025 10:02:49 +0100 + +qemu (1:8.2.2+ds-0ubuntu1.7) noble; urgency=medium + + * d/p/u/lp2049698/*: Add full boot order support on s390x (LP: #2049698) + * Cherry-pick prerequisite for above backport (to avoid FTBFS): + - d/p/u/lp2049698/0-hw-s390x-sclp.c-include-s390-virtio-ccw.h-to-make.patch + * d/qemu-system-data.links: symlink s390-netboot.img -> s390-ccw.img for + backwards compatibility, as the code is now combined. + + [ Michael Tokarev ] + * d/rules: run ./configure in arch-indep build and build some roms from there. + After adding just a few more build-deps to common Build-Depends, + it is now possible to run ./configure in arch-indep step too. + Run ./configure, and switch s390-ccw and vof.bin builds from + ad-hoc instructions to using the regular qemu makefiles. + Move python3-venv dependency from Build-Depend-Arch to Build-Depend + so that ./configure can be run. + [cherry-pick https://salsa.debian.org/qemu-team/qemu/-/commit/5b5a97b] + + * Fix qemu-aarch64-static segfaults running ldconfig.real (LP: #2072564) + - lp-2072564-01-linux-user-Honor-elf-alignment-when-placing-images.patch + - lp-2072564-02-elfload-Fix-alignment-when-unmapping-excess-reservat.patch + Thanks to Dimitry Andric for identifying the fix. + + -- Lukas Märdian Thu, 13 Mar 2025 17:15:00 +0100 + +qemu (1:8.2.2+ds-0ubuntu1.6) noble; urgency=medium + + [ Gabriel B. Sant'Anna ] + * Fix emulation of RISC-V Vector instructions (LP: #2095169) + - d/p/u/lp2095169-riscv-vector-fixes-{01..12}.patch: ensure vstart_eq_zero + is updated at the end of each vector instruction. + - Changes come from upstream, but backporting to 8.2.2 required some + adjustment before patches could be applied cleanly. + - Thanks to Daniel Henrique Barboza + + -- Sergio Durigan Junior Fri, 24 Jan 2025 16:48:49 -0500 + +qemu (1:8.2.2+ds-0ubuntu1.5) noble; urgency=medium + + * d/p/u/lp-2091099-fix-9p-regression-cve-2023-2861.patch: Fix + regression regarding CVE-2023-2861 affecting 9p filesystems. + (LP: #2091099) + + -- Sergio Durigan Junior Wed, 11 Dec 2024 22:06:49 -0500 + +qemu (1:8.2.2+ds-0ubuntu1.4) noble-security; urgency=medium + + * SECURITY UPDATE: denial of service + - debian/patches/CVE-2024-4693-1.patch: virtio-pci: fix use of a + released vector + - debian/patches/CVE-2024-4693-2.patch: virtio-pci: Fix the use of + an uninitialized irqfd + - CVE-2024-4693 + * SECURITY UPDATE: heap buffer overflow + - debian/patches/CVE-2024-7730.patch: add max size bounds check in + input cb + - CVE-2024-7730 + + -- Bruce Cable Tue, 22 Oct 2024 15:57:13 +1100 + +qemu (1:8.2.2+ds-0ubuntu1.3) noble; urgency=medium + + * d/p/u/lp2076927-fix-time-based-freq-kvm.patch: Fix + timebase-frequency when using KVM acceleration on RISC-V. (LP: #2076927) + * d/p/u/lp2028964-add-support-sierra-forest.patch: Add support for + Sierra Forest CPU model. (LP: #2028964) + * Fail gracefully when hotplugging a vCPU fails on PPC. (LP: #2076587) + - d/p/u/lp2076587-cpu-hotplug-crashes-guest-*.patch: Backport + patches for upstream fix. + + -- Sergio Durigan Junior Tue, 24 Sep 2024 22:57:11 -0400 + +qemu (1:8.2.2+ds-0ubuntu1.2) noble-security; urgency=medium + + * SECURITY UPDATE: buffer overflow + - debian/patches/CVE-2024-26327.patch: Check num_vfs size + - CVE-2024-26327 + * SECURITY UPDATE: out of bounds memory access + - debian/patches/CVE-2024-26328.patch: Use pcie_sriov_num_vfs to + get number of enabled vfs before and after config writes + - CVE-2024-26328 + + -- Bruce Cable Wed, 21 Aug 2024 11:53:08 +1000 + +qemu (1:8.2.2+ds-0ubuntu1.1) noble-proposed; urgency=medium + + * SRU: LP: #2076340: No-change rebuild to pick up changed build flags + on ppc64 and s390x. + + -- Matthias Klose Fri, 09 Aug 2024 04:33:21 +0200 + +qemu (1:8.2.2+ds-0ubuntu1) noble; urgency=medium + + * Merge version 8.2.2 from upstream. (LP: #2061005). Cherry-picks from + Debian: + - d/p/ui-clipboard-mark-type-as-not-available-when-no-data-CVE-2023-6683.patch: + Remove patch; included upstream. + - d/control: clarify qemu-system-gui description: this is not a + management gui for qemu + - d/rules: stop qemu-system-${arch} packages from providing + themselves (#1063233) + - d/control{,-in}: Fix typo on qemu-system-gui description. + + -- Sergio Durigan Junior Fri, 12 Apr 2024 18:13:51 -0400 + +qemu (1:8.2.1+ds-1ubuntu9) noble; urgency=medium + + * No-change rebuild for CVE-2024-3094 + + -- William Grant Mon, 01 Apr 2024 18:20:15 +1100 + +qemu (1:8.2.1+ds-1ubuntu8) noble; urgency=medium + + * d/p/u/lp2012763-maxcpus-too-low.patch: Actually set the max_cpus + property of the new Mantic machine types. (LP: #2012763) + + -- Sergio Durigan Junior Mon, 25 Mar 2024 14:58:39 -0400 + +qemu (1:8.2.1+ds-1ubuntu7) noble; urgency=medium + + * d/p/u/lp2012763-maxcpus-too-low.patch: Bump max_cpus to 1024 on + Jammy amd64 machine types. (LP: #2012763) + + -- Sergio Durigan Junior Mon, 18 Mar 2024 16:48:22 -0400 + +qemu (1:8.2.1+ds-1ubuntu6) noble; urgency=medium + + * No-change rebuild against libcurl3t64-gnutls + + -- Steve Langasek Sat, 16 Mar 2024 07:16:54 +0000 + +qemu (1:8.2.1+ds-1ubuntu5) noble; urgency=medium + + * No-change rebuild against libglib2.0-0t64 + + -- Steve Langasek Mon, 11 Mar 2024 23:31:21 +0000 + +qemu (1:8.2.1+ds-1ubuntu4) noble; urgency=medium + + * No-change rebuild against libgnutls30t64 + + -- Steve Langasek Sun, 10 Mar 2024 02:11:43 +0000 + +qemu (1:8.2.1+ds-1ubuntu3) noble; urgency=medium + + * No-change rebuild against libpng16-16t64 + + -- Steve Langasek Thu, 29 Feb 2024 07:54:00 +0000 + +qemu (1:8.2.1+ds-1ubuntu2) noble; urgency=medium + + * d/p/u/lp-2055003-*: Properly initialize max_cpus limit to + SPAPR_IRQ_NR_IPIS, fixing a segfault on ppc64el. (LP: #2055003) + + -- Sergio Durigan Junior Mon, 26 Feb 2024 15:32:25 -0500 + +qemu (1:8.2.1+ds-1ubuntu1) noble; urgency=medium + + * Merge with Debian unstable (LP: #2051883, #2049703). Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + - d/rules: Enable/disable extra features on microvm + variant. (LP #2045594) + - Move glusterfs storage driver to Universe in a new package + (LP #2045063): + + d/control{,-in}: new package qemu-block-supplemental for drivers + we want in Universe + + d/rules: we only want block-gluster.so in the new + qemu-block-supplemental package. Adjust dynamically-created + maintainer scripts for qemu-block-extra and -supplemental. + + -- Sergio Durigan Junior Wed, 07 Feb 2024 13:01:14 -0500 + +qemu (1:8.2.1+ds-1) unstable; urgency=medium + + * new upstream stable/bugfix release + * remove all upstream-applied patches + * d/patches/note-missing-module-pkg-name.diff: fixup + * replace fix for CVE-2023-6683 (A different fix from upstream) + * remove the mistakenly-added temp file in d/qemu-block-extra/ + * d/.gitignore: refresh + + -- Michael Tokarev Tue, 30 Jan 2024 10:32:17 +0300 + +qemu (1:8.2.0+ds-5) unstable; urgency=medium + + * d/rules, d/run-qemu.mount: use dh_installsystemd to install run-qemu.mount + (Closes: #1060087) + * update hppa and seabios-hppa patch series + * ui-clipboard-avoid-crash-upon-request-when-clipboard-CVE-2023-6683.patch + (Closes: #1060749, CVE-2023-6683) + * +target-s390x-Fix-LAE-setting-a-wrong-access-register.patch + * +tcg-s390x-Fix-encoding-of-VRIc-VRSa-VRSc-insns.patch + fix chacha20 issue on s390x + * update hw-vfio-fix-iteration-over-global-VFIODevice-list.patch + + -- Michael Tokarev Thu, 18 Jan 2024 10:16:31 +0300 + +qemu (1:8.2.0+ds-4ubuntu2) noble; urgency=medium + + * Move glusterfs storage driver to Universe in a new package + (LP: #2045063): + - d/control{,-in}: new package qemu-block-supplemental for drivers + we want in Universe + - d/rules: we only want block-gluster.so in the new + qemu-block-supplemental package. Adjust dynamically-created + maintainer scripts for qemu-block-extra and -supplemental. + + -- Andreas Hasenack Fri, 02 Feb 2024 14:07:00 -0300 + +qemu (1:8.2.0+ds-4ubuntu1) noble; urgency=medium + + * Merge with Debian unstable (LP: #2048802, #2048776). Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + * Drop changes: + - d/p/u/lp2003673-*.patch: Enable passthrough of IBM Z crypto + hardware to Secure Execution guests. (LP #2003673) + [ Incorporated by upstream on version 8.2.0. ] + * Add changes: + - d/rules: Enable/disable extra features on microvm + variant. (LP: #2045594) + + -- Sergio Durigan Junior Wed, 10 Jan 2024 19:10:46 -0500 + +qemu (1:8.2.0+ds-4) unstable; urgency=medium + + * d/rules: fix "tail -20" usage + * note-missing-module-pkg-name.diff: update, to be much more accurate + No more sporadic warnings about missing audio backends etc + * d/control: clarify qemu-system-gui and qemu-system-modules-* package + descriptions a little bit (#1059457) + * more fixups from the ML targetting stable: + + hw-net-cadence_gem-fix-MDIO_OP_xxx-values.patch + + tcg-ppc-use-new-registers-for-LQ-destination.patch + + target-riscv-fix-mcycle-minstret-increment-behavior.patch + * a bunch of hppa and seabios-hppa fixes targetting -stable for + https://gitlab.com/qemu-project/qemu/-/issues/2044 + + -- Michael Tokarev Thu, 04 Jan 2024 22:47:59 +0300 + +qemu (1:8.2.0+ds-3) unstable; urgency=medium + + * +virtio-net-correctly-copy-vnet-header-when-flushing-TX-CVE-2023-6693.patch + Fix CVE-2023-6693 (virtio-net: stack buffer overflow in virtio_net_flush_tx) + * +target-i386-the-sgx_epc_get_section-stub-is-reachable.patch + * +target-xtensa-fix-OOB-TLB-entry-access.patch + * d/rules: print last 20 lines of config.log & meson.log if ./configure fails + + -- Michael Tokarev Tue, 02 Jan 2024 15:54:35 +0300 + +qemu (1:8.2.0+ds-2) unstable; urgency=medium + + * include-ui-rect.h-fix-qemu_rect_init-mis-assignment.patch + fixes virtio-gpu redraw issue (Closes: #1059211) + * hw-vfio-fix-iteration-over-global-VFIODevice-list.patch + fixes reboot issue with virtio-gpu + * target-i386-do-not-re-compute-new-pc-with-CF_PCREL.patch + fixes 4M edk2 stall in i386 tcg mode + * block-fix-crash-when-loading-snapshot-on-inactive-no.patch + fix possible assertion failure when loading snapshot + + -- Michael Tokarev Tue, 02 Jan 2024 12:10:14 +0300 + +qemu (1:8.2.0+ds-1) unstable; urgency=medium + + * new upstream release 8.2.0 + Closes: #1013952 + * d/rules: re-enable building static-pie binaries (the default) for + qemu-user-static again (formally Closes: #1053101, LP:#1908331) + * d/rules: add --disable-pie for static build on i386 due to #1056739 + * d/control: qemu-system-x86 depends on seabios >>1.16.3-1 due to ahci fix + * d/qemu-user-static.lintian-overrides: +shared-library-lacks-prerequisites + for static-pie executables + * d/rules: omit qemu-user-static package from dh_shlibdeps run + since dpkg-shlibdeps complains about static-pie binaries + * d/rules: fix bugzilla.redhat.com url (migrated to issues.redhat.com) + * d/patches: remove patches applied upstream + * d/patches, d/rules: use --disable-relocatable instead of a patch + * d/patches: refresh disable-xen-on-x32.patch + * d/control: --enable-pixman (which is optional now) + * d/rules: vnc needs pixman too (for xen and microvm builds) + * d/copyright: stop excluding subprojects/dtc (not included anymore) + * d/source/lintian-overrides: +source-is-missing for vdso.so files + + -- Michael Tokarev Wed, 20 Dec 2023 18:21:19 +0300 + +qemu (1:8.1.3+ds-1ubuntu2) noble; urgency=medium + + * d/p/u/define-ubuntu-machine-types.patch: Remove -hpb Noble machine + types, as they are not needed by OpenStack anymore. (LP: #2045592) + + -- Sergio Durigan Junior Mon, 04 Dec 2023 16:44:44 -0500 + +qemu (1:8.1.3+ds-1ubuntu1) noble; urgency=medium + + * Merge with Debian unstable (LP: #2044425, #2039700). Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + - d/p/u/lp2003673-*.patch: Enable passthrough of IBM Z crypto + hardware to Secure Execution guests. (LP #2003673) + * Drop changes: + - d/rules: Incorporate the following changes from Debian unstable, in + order to fix the FTBFS caused by -fcf-protection: + + d/rules: move icons install rules to install-misc section + + d/rules: stop running whole thing with dh, take back *-indep sequence + + d/rules: implement arch-dependent install/build targets without dh too + [ Fixed in Debian. ] + - d/rules: Get rid of binary-helper target; explicitly invoke its + commands under binary-{arch,indep}. This makes the build succeed + again in Ubuntu, where binary-helper wasn't being properly invoked. + [ Fixed in Debian. ] + - d/p/u/lp2003673-update-linux-headers-6.3rc5.patch, + d/p/u/lp2003673-update-linux-headers-6.5rc1.patch, + d/p/u/lp2003673-s390x-fix-missing-subsystem-reset-registration.patch: + Drop some of the patches to Enable passthrough of IBM Z crypto + hardware to Secure Execution guests. (LP #2003673) + [ Applied upstream. ] + + -- Sergio Durigan Junior Wed, 22 Nov 2023 21:34:19 -0500 + +qemu (1:8.1.3+ds-1) unstable; urgency=medium + + * new upstream stable/bugfix release + * remove patches applied upstream: + - linux-user-Fixes-for-zero_bss.patch + - target-mips-Fix-MSA-BZ-BNZ-opcodes-displacement.patch + - hw-ide-reset-cancel-async-DMA-operation-before-reset.patch + * d/control, d/qemu-system-gui.install: enable pipewire audio support + (Closes: #1055221) + + -- Michael Tokarev Wed, 22 Nov 2023 17:56:06 +0300 + +qemu (1:8.1.2+ds-1) unstable; urgency=medium + + * upstream 8.1.2 stable/bugfix release + * remove all stable-staging/ patches and two more + (all included into 8.1.2) + * d/rules: microvm build: do not explicitly enable avx2 + + -- Michael Tokarev Tue, 17 Oct 2023 09:44:39 +0300 + +qemu (1:8.1.1+ds-2) unstable; urgency=medium + + * d/rules: fix binary target to produce both arch and indep + binaries instead of omitting indep one(s) + * d/patches: sync with current staging-8.1 branch, many new fixes + * additional fixes: + +hw-ide-ahci-fix-legacy-software-reset.patch + +target-mips-Fix-MSA-BZ-BNZ-opcodes-displacement.patch + +hw-ide-reset-cancel-async-DMA-operation-before-reset.patch + + -- Michael Tokarev Sun, 08 Oct 2023 12:28:55 +0300 + +qemu (1:8.1.1+ds-1) unstable; urgency=medium + + * new upstream stable/bugfix release + * remove all stable-staging/ patches, keep + softmmu-Use-async_run_on_cpu-in-tcg_commit.patch + * vfio-display-fix-missing-update-to-set-backing-field.patch + * scsi-disk-disallow-small-block-sizes-CVE-2023-42467.patch + (Closes: #1051899, CVE-2023-42467) + * migration-qmp-Fix-crash-on-setting-tls-authz-with-nu.patch + * d/patches/move-vl-opts/ - stop linking everything with async-teardown.c, + un-FTBFS on ia64 + * d/control: minor: remove old todo comments + * d/control: disable rbd (ceph) on 32bit platforms (Closes: #1053172) + * d/control: enable rbd on riscv64 once it's built there + * d/copyright: also remove subprojects/dtc + + -- Michael Tokarev Sun, 01 Oct 2023 22:11:24 +0300 + +qemu (1:8.1.0+ds-6) unstable; urgency=medium + + * re-enable softmmu-Use-async_run_on_cpu-in-tcg_commit.patch + * add https://www.mail-archive.com/qemu-devel@nongnu.org/msg989073.html + fixing https://gitlab.com/qemu-project/qemu/-/issues/1866 + * d/rules: reorder some definitions to evaluate in proper order + + -- Michael Tokarev Wed, 20 Sep 2023 23:31:59 +0300 + +qemu (1:8.1.0+ds-5) unstable; urgency=medium + + * disable softmmu-Use-async_run_on_cpu-in-tcg_commit.patch + The change in softmmu-Use-async_run_on_cpu-in-tcg_commit.patch + which is a fix for https://gitlab.com/qemu-project/qemu/-/issues/1864 + (x86 VM with TCG and SMP fails to start on 8.1.0) + introduces https://gitlab.com/qemu-project/qemu/-/issues/1866 + * more patches from stable-staging + * re-introduce qemu-debootstrap for now until all users of it will + be converted to regular debootstrap + + -- Michael Tokarev Sun, 17 Sep 2023 19:10:34 +0300 + +qemu (1:8.1.0+ds-4) unstable; urgency=medium + + * d/changelog: fix spelling + * +linux-user-Fixes-for-zero_bss.patch: fix linux-user zero_bss bug + * many small changes for d/rules + * d/rules: split out qemu-user build out out of main qemu build + When both system and linux-user builds are enabled, linux-user + build is getting features only relevant for system (softmmu) + configuration, like linking with liburing, libnuma and other + softmmu-only stuff. So build it separately. + This not only makes qemu-user smaller and neater, but it also + makes Built-Using field for qemu-user-static (which is generated + from Depends field of qemu-user) accurate. + * d/qemu-user[-static].docs: use unprocessed .rst doc instead of html + * d/rules: check for nocheck in DEB_BUILD_PROFILES too, + not only DEB_BUILD_OPTIONS + * d/rules, d/control: disable build-time test due to apparent dak bug + + -- Michael Tokarev Mon, 11 Sep 2023 14:19:32 +0300 + +qemu (1:8.1.0+ds-3) unstable; urgency=medium + + * d/control: split out most of Build-Depends to Build-Depends-Arch, in order + to break B-D loop on qemu-system-data (it is only needed for -Arch) and + to reduce arch-all build time. + Only very few things left in common Build-Depends. + * Removing most things from B-D discovered that skiboot includes openssl + header(s) (!), so add libssl-dev to Build-Depends-Indep. + * d/control,d/rules: introduce build profiles to omit building some packages + (mostly debugging aid, to reduce test build run time). + * d/rules: use ninja directly for various qemu builds + (non-verbose build now shows errors/warnings nicely) + * d/control: Rules-Requires-Root: "binary-targets", not "no", - + this enables building as non-root, finally + * d/rules: add forgotten -p for mkdir b/user-static + * d/not-installed: list 2 files from user/ manual + (which is built even on unsupported architectures) + + -- Michael Tokarev Sun, 10 Sep 2023 01:30:15 +0300 + +qemu (1:8.1.0+ds-2) unstable; urgency=medium + + * d/control: fix descriptions of qemu-system-gui and + qemu-system-modules-spice packages + * update lintian-overrides + * d/rules: enable verbose (-v) build for qboot + * d/rules: move lto control to where it actually works + * d/rules: remove usage of "standard dh sequencer". + It has multiple issues. To name a few: + - it exports CFLAGS &Co which breaks badly when trying to compile + bios/firmware code (fixes FTBFS with new -fcf-protection) + - it performs multiple recursive calls to d/rules which is + slow when make variables are set using $(shell), - annoying + when debugging + - it hides actual actions being done at install/binary stages + - it is confusing in override_dh_foo{,-indep,-arch} + - it does just too much unknown magic, - just give the control + back. + + -- Michael Tokarev Sat, 09 Sep 2023 22:37:02 +0300 + +qemu (1:8.1.0+ds-1) unstable; urgency=medium + + * d/changelog: mention closing of #984451, CVE-2021-20255 by 8.1 + * d/changelog: mention closing of #1041471 by 8.1 + * d/patches: add patches currenly staged for 8.1.1 + * d/gbp.conf: switch from experimental to master + * upload to unstable + + -- Michael Tokarev Sat, 09 Sep 2023 17:03:54 +0300 + +qemu (1:8.1.0+ds-1~exp2) experimental; urgency=medium + + * qemu-system-modules-spice & qemu-system-modules-opengl packages, + containing optional spice and opengl modules from qemu-system-common. + Both are recommended by all qemu-system-* but can be removed if not + used, to reduce list of dependencies. + + -- Michael Tokarev Wed, 23 Aug 2023 12:37:55 +0300 + +qemu (1:8.1.0+ds-1~exp1) experimental; urgency=medium + + * new upstream release + Closes: #1041102, CVE-2023-3019 (NIC DMA reentrancy issue, problem class) + Closes: CVE-2021-3750 (DMA MMIO reentrancy issue, problem class) + Closes: #984451, CVE-2021-20255 (DMA reentrancy issue) + Closes: #1041471 (qemu-user armel commpage mapping bug) + * d/watch: change repack suffix to +ds + * d/patches: remove patches applied upstream + * disable-xen-on-x32.patch: refresh + * d/copyright: stop stripping dtc/ and meson/, removed upstream + * d/rules: replace --with-git-submodules=ignore with --disable-download + * d/control: build-depend on python3-venv + * d/control: bump minimum meson version to 0.63.0 + * d/control: build-depend on seabios & qemu-system-data for the testsuite. + qemu testsuite runs qemu-system binaries which require firmware even for + simple tests + * d/rules: run `make check-block' after the main build, as a minimal test + for now + * qemu-img-omit-errno-value-in-error-message.patch fixes check-block tests + on mips* where errno values are different from other architectures. + * late fix for 8.1 linux-user-Adjust-brk-for-load_bias.patch + + -- Michael Tokarev Wed, 23 Aug 2023 08:01:13 +0300 + +qemu (1:8.0.4+dfsg-3) unstable; urgency=medium + + * d/rules: export PYTHONDONTWRITEBYTECODE=1 to stop generating .pyc files + (Closes: #1046056) + * d/control: list more CPU types emulated by qemu in package descriptions + * d/control: refine qemu-system-gui package description + * d/rules: remove --interp-prefix= configure option + * late fix for 8.1: target-arm-Fix-SME-ST1Q.patch + * late fix for 8.1: target-arm-Fix-64-bit-SSRA.patch + * d/control: remove old versions from build-deps + + -- Michael Tokarev Tue, 22 Aug 2023 20:15:07 +0300 + +qemu (1:8.0.4+dfsg-2) unstable; urgency=medium + + * remove linux-user-show-heap-address-in-proc-pid-maps.patch + * pick 2 nvme fixes from upstream: + - hw-nvme-fix-oob-memory-read-in-fdp-events-log-CVE-2023-4135.patch + Closes: #1050142, CVE-2023-4135 + - hw-nvme-fix-null-pointer-access-in-directive-receive-CVE-2023-40360.patch + Closes: #1050140, CVE-2023-40360 + * d/rules: --enable-virtfs (--enable-attr --enable-cap-ng) for xen build + to enable 9pfs (Closes: #1049925) + * d/rules: run-qemu.mount is linux-specific too + (if we ever do non-linux system build) + * d/control: disable sndio on debian too (disabled on ubuntu), for now anyway + * d/*.install, d/rules: explicitly list all qemu-system modules + * d/control: build-depend on libglib2.0-dev (forgotten!) and zlib1g-dev, + move the two to the top before all optional deps + * d/changelog: fix 7.1+dfsg-1 changelog entry (qemu-user and qemu-system) + + -- Michael Tokarev Mon, 21 Aug 2023 09:57:59 +0300 + +qemu (1:8.0.4+dfsg-1ubuntu5) noble; urgency=medium + + * d/p/u/lp2003673-*.patch: Enable passthrough of IBM Z crypto + hardware to Secure Execution guests. (LP: #2003673) + + -- Sergio Durigan Junior Thu, 16 Nov 2023 10:35:58 -0500 + +qemu (1:8.0.4+dfsg-1ubuntu4) noble; urgency=medium + + * Rebuild against new libnfs14. + + -- Gianfranco Costamagna Fri, 27 Oct 2023 10:46:01 +0200 + +qemu (1:8.0.4+dfsg-1ubuntu3) mantic; urgency=medium + + * d/rules: Get rid of binary-helper target; explicitly invoke its + commands under binary-{arch,indep}. This makes the build succeed + again in Ubuntu, where binary-helper wasn't being properly invoked. + + -- Sergio Durigan Junior Tue, 03 Oct 2023 18:13:20 -0400 + +qemu (1:8.0.4+dfsg-1ubuntu2) mantic; urgency=medium + + * d/rules: Incorporate the following changes from Debian unstable, in + order to fix the FTBFS caused by -fcf-protection: + - d/rules: implement arch-dependent install/build targets without dh too + - d/rules: stop running whole thing with dh, take back *-indep sequence + - d/rules: move icons install rules to install-misc section + + -- Sergio Durigan Junior Wed, 27 Sep 2023 14:53:27 -0400 + +qemu (1:8.0.4+dfsg-1ubuntu1) mantic; urgency=medium + + * Merge with Debian unstable. Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + + -- Sergio Durigan Junior Mon, 14 Aug 2023 16:28:34 -0400 + +qemu (1:8.0.4+dfsg-1) unstable; urgency=medium + + * new upstream stable/bugfix release + Closes: CVE-2023-3180 (virtual crypto virtio_crypto_handle_sym_req) + Closes: CVE-2023-3354 (VNC server QIOChannel NULL ptr deref) + Closes: CVE-2023-3255 (VNC: infinite loop in inflate_buffer) + * d/patches: remove patches picked up from stable-staging branch + which are applied in 8.0.4 + * d/control: build-depend on libglib2.0-dev (forgotten!) and zlib1g-dev, + move the two to the top before all optional deps + * remove xen-specific wrapper for qemu-system-i386 + (needed for bookworm upgrade only) + + -- Michael Tokarev Fri, 11 Aug 2023 22:13:36 +0300 + +qemu (1:8.0.3+dfsg-5) unstable; urgency=medium + + * remove previous 2 mmap/brk patches for now + linux-user-optimize-memory-layout-for-static-and-dyn.patch + linux-user-load-pie-executables-at-upper-memory.patch + These are intended for 8.1, and causes other issues on 8.0. + Closes: #1042808 + Reopens: #1040981 + + -- Michael Tokarev Wed, 02 Aug 2023 10:55:50 +0300 + +qemu (1:8.0.3+dfsg-4ubuntu1) mantic; urgency=medium + + * Merge with Debian unstable (LP: #2028873, #2028124). Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + + -- Sergio Durigan Junior Mon, 31 Jul 2023 23:09:27 -0400 + +qemu (1:8.0.3+dfsg-4) unstable; urgency=medium + + * more linux-user address fixes from Helge Deller + Remove + stable-staging/linux-user-fix-qemu-arm-to-run-static-armhf-binaries.patch + linux-user-limit-brk-adjustment-wrt-interp.brk-to-arm32.patch + Add + linux-user-show-heap-address-in-proc-pid-maps.patch + linux-user-optimize-memory-layout-for-static-and-dyn.patch + linux-user-load-pie-executables-at-upper-memory.patch + This *might* fix #1041859. + * stable-staging/tcg-ppc-fix-race-in-goto_tb-implementation.patch + fix qemu sigsegv on ppc -smp. Should fix autopkgtests (debvm, others) + * Stop passing --no-start to qga's dh_installsystemd. + qga is activated from an udev rule, but we need to restart it on upgrade. + Change by Sergio Durigan. Closes: LP#2028124. + + -- Michael Tokarev Wed, 26 Jul 2023 07:51:20 +0300 + +qemu (1:8.0.3+dfsg-3) unstable; urgency=medium + + * d/control: glusterfs: drop pre-buster glusterfs-common alternative, + restrict glusterfs support to 64bit (see #1039604) + * linux-user-limit-brk-adjustment-wrt-interp.brk-to-arm32.patch + Fix (band-aid for now) an unexpected breakage caused by the previous + patch in this area which fixes static executables loading on armhf. + * d/binfmt-install: update mips* magic strings from upstream commit + 77d119dd335f910c7: + mips: allow nonzero EI_ABIVERSION, distinguish o32 and n32 + (Closes: #1041597) + + -- Michael Tokarev Sat, 22 Jul 2023 11:53:38 +0300 + +qemu (1:8.0.3+dfsg-2) unstable; urgency=medium + + * d/patches: set Forwarded: URLs for some patches + * add 5 qemu-user fixes staging for the next stable: + linux-user-make-sure-initial-brk-0-is-page-aligned.patch + linux-user-fix-qemu-brk-to-not-zero-bytes-on-current-page.patch + linux-user-prohibit-brk-to-to-shrink-below-initial-address.patch + linux-user-fix-signed-math-overflow-in-brk-syscall.patch + linux-user-fix-qemu-arm-to-run-static-armhf-binaries.patch + (Closes: #1040981) + + -- Michael Tokarev Thu, 20 Jul 2023 09:59:49 +0300 + +qemu (1:8.0.3+dfsg-1) unstable; urgency=medium + + * new upstream stable/bugfix release 8.0.3 + Including the following security fix(es): + - 9pfs: prevent opening special files (CVE-2023-2861) + * remove patches now included upstream: + - hw-mips-malta-fix-the-malta-machine-on-big-endian-hosts.patch + - qga-fix-suspend-on-linux-guests-without-systemd.patch + - hw_intc_allwinner-a10-pic-handle-IRQ-levels-other-than-0-or-1.patch + - linux-user-Avoid-mmap-of-the-last-byte-of-the-reserv.patch + * d/rules: omit qemu-systemd-data from dh_dwz run + * d/rules: create qemu-system-armhf & qemu-system-armel aliases + for qemu-system-arm (Closes: #1040209) + + -- Michael Tokarev Tue, 11 Jul 2023 15:07:04 +0300 + +qemu (1:8.0.2+dfsg-3) unstable; urgency=medium + + * d/patches/*: update, add DEP-3 headers + * d/rules: strip ../../ prefix from compile paths + to undo sub-subdir build (-ffile-prefix-map) + * linux-user-Avoid-mmap-of-the-last-byte-of-the-reserv.patch: + (hackish) fix for recent memory failures + + -- Michael Tokarev Thu, 29 Jun 2023 18:36:33 +0300 + +qemu (1:8.0.2+dfsg-2ubuntu1) mantic; urgency=medium + + * Merge with Debian unstable (LP: #2018103). Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + * Drop changes: + - d/control-in: libnfs is in main since focal, enable direct nfs + storage support (LP 1988704) + [ Adopted by Debian. ] + - d/control-in: libsndio is in universe in ubuntu + [ Adopted by Debian. ] + - Fix FTBFS with glibc >= 2.36. (LP #2015418) + + d/p/fix-ftbfs-glibc-*.patch: Revert now-unnecessary + upstream commits that were working around a glibc issue. + [ Incorporated upstream. ] + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + [ Debian linked the qemu-system-x86 documentation with the + qemu-system-common package, rendering this README file not + applicable. ] + - d/p/u/allow-repeating-hot-unplug-requests.patch: Allow repeating + hot-unplug requests by making ACPI PCI able to requeue them. + (LP #2018733) + [ Applied upstream. ] + + -- Sergio Durigan Junior Mon, 19 Jun 2023 15:45:09 -0400 + +qemu (1:8.0.2+dfsg-2) unstable; urgency=medium + + * d/rules: --enable-libusb for xen build (Closes: #1037341) + * reapply linux-user-binfmt-P.diff. + Re-rely on qemu-user's argv0 to detect it is running in binfmt context. + The problem is that while we ship kernel which can pass this info the + qemu way, there are many containers which are running on older kernels + still, including bullseye kernel (5.10) which does not have this feature. + Keep it for a bit more. + * hw_intc_allwinner-a10-pic-handle-IRQ-levels-other-than-0-or-1.patch + Pick a patch from upstream mailinglist to fix regression in 8.0.2 + + -- Michael Tokarev Thu, 15 Jun 2023 22:25:50 +0300 + +qemu (1:8.0.2+dfsg-1) unstable; urgency=medium + + * new upstream stable/bugfix release + Closes: #1029155, CVE-2023-0330: + A DMA-MMIO reentrancy problem in lsi53c895a device + * keep full upstream version number, not just first 2 components + (Closes: #855966) + * d/copyright: remove stray newline + * d/control: drop libuuid-dev build-dep (not used) + * clarify files in d/not-installed just a little bit + * fixup qemu(1) refs in qemu-storage-daemon(1) + * move qemu-storage-daemon and qemu-block-drivers.7 + from qemu-system-common to qemu-utils + * remove patches now included upstream: + - linux-user-fix-getgroups-setgroups-allocations.patch + - rtl8139-fix-large_send_mss-divide-by-zero.patch + - target_i386-Change-wrong-XFRM-value.patch + * qga-fix-suspend-on-linux-guests-without-systemd.patch + (hopefully Closes: #1004943) + * d/rules: disable pvrdma (Closes: #1034179, CVE-2023-1544) + CVE-2023-1544: + huge number of page tables for a ring of descriptors for CQ and + async events, potentially leading to an OOB read and crash + + -- Michael Tokarev Sun, 11 Jun 2023 11:49:17 +0300 + +qemu (1:8.0+dfsg-4) experimental; urgency=medium + + * d/control: do not use --enable-spice on sh4 and --enable-seccomp + on hppa where qemu-system is not being built + * spelling-information.patch: add headers + * merge d/qemu-system-x86.NEWS into d/qemu-system-common.NEWS + * d/rules: migrate docs for individual qemu-system-foo into symlinks + pointing to qemu-system-common docs + * d/rules: move qemu-user-binfmt doclink to the proper place, + and remove installdocs and installchangelogs overrides + * d/qemu-system-common.README.Debian, d/qemu-user*.README.Debian: + update statement about ppc64el in READMEs + * d/rules: switch to more declarative approach to generating various + qemu-system-foo packages, fix some bugs + * d/rules: switch to use "-" in variable names instead of "_" + to avoid exporting variables from environment + * add qemu-system-for-arch package (commented-out for now) + * refresh d/source/lintian-overrides + + -- Michael Tokarev Fri, 21 Apr 2023 19:11:53 +0300 + +qemu (1:8.0+dfsg-3) experimental; urgency=medium + + * Release highlights: + - build only tools on unsupported arches + - much easier arch control in a single place + - much easier dependency/options control + - stop building system targets on ia64 and kfreebsd + * d/control: generate Architecture: field dynamically from d/rules + * demote ia64 and kfreebsd from system arches to tools arches + * include m68k into list of utils arches + * optional dependencies and --enable-feature in d/control: + - update d/extract-config-opts to expect dpkg-like [arch] patterns + - d/control: unify and simplify arch strings for --enable-foo + and dependencies + - d/control: switch some build-deps to [:system-arch:] + * stop using --enable-tcg-interpreter for unsupported arches, + add --enable-tools for main qemu build + * provide --disable-xkbcommon to stop building qemu-keymap tool (!) + * d/rules: for !enable-system build, remove qemu.1 manpage + * install upstream qemu.desktop file instead of debian-specific + * d/*.install: list files relative to d/tmp/, use ${DEB_HOST_MULTIARCH} + * d/qemu-system-common.install: move 3 linux-specific files + from d/rules to here + * d/rules: move qemu-block-extrs maintscript/savedir generation + to instide enable-system + + -- Michael Tokarev Thu, 20 Apr 2023 20:50:35 +0300 + +qemu (1:8.0+dfsg-2) experimental; urgency=medium + + * re-add the dropped-on-the-way Provides: qemu-system-any + * specify versions for all Provides: so it's possible to add versioned deps + (including qemu-system-any and qemu-kvm) + * d/control: collapse Depends: qemu-system-* into tne new qemu-system-any + * drop Breaks:/Replaces: qemu-kvm (it was for old qemu-kvm binary pkg) + + -- Michael Tokarev Thu, 20 Apr 2023 13:09:57 +0300 + +qemu (1:8.0+dfsg-1) experimental; urgency=medium + + * New qemu release 8.0.0. + * remove binfmt-support registration, use systemd binfmd.d/ only + No more binfmt-support support. Unregister any entries on upgrades. + * binfmt: ship (but not enable) entries for all arches, do not omit native + Ship all really-foreign binfmt entries in /usr/lib/binfmt.d/ as usual, + to be enable automatically at package install. Also ship the + same-cpu-family entries in /usr/share/doc/qemu-user-static/qemu-foo.conf - + this way it will not be enabled automatically but it will be possible to + (carefully) symlink the needed additional entries to /etc/binfmt.d/. + (Closes: #924667, #1016810, #1027781) + * qemu-system-*: add extra names to use as qemu-system-${DEB_HOST_ARCH_CPU}, + for both the Provides: line and executable file names. See + /usr/share/doc/qemu-system-common/README.Debian. + * qemu-system-*: also Provides: qemu-system-any + * qemu-system-ppc: provide qemu-kvm on ppc64el too, the same as ppc64 + * qemu-user, qemu-user-static: provide qemu-${DEB_HOST_ARCH}[-static] + aliases too, when qemu arch is different from debian arch. See + /usr/share/doc/qemu-user[-static]/README.Debian. + * d/binfmt-install: fix disabled .conf entries install for qemu-user-binfmt + (those goes to qemu-user doc dir, not qemu-user-binfmt doc dir) + * d/control: remove old (pre-bullseye) Breaks/Replaces + * qemu-bridge-helper-path.patch: use the right path for qemu-bridge-helper + in docs (Closes: #1027447) + * d/qemu-system-common.NEWS: document dropping of virtiofsd + * d/rules: add comment saying why savemoddir block needs to be generated + * stop trying to provide os-specific qemu-ifup + * two more spelling fixes for mistyped "information" + + -- Michael Tokarev Thu, 20 Apr 2023 04:19:06 +0300 + +qemu (1:8.0~rc4+dfsg-2) experimental; urgency=medium + + [ Vagrant Cascadian ] + * debian/rules: Use 'printf' instead of 'echo' to avoid + differences in underlying /bin/sh implementations. + Closes: #1034431 + + [ Michael Tokarev ] + * Provide Debian architecture names for qemu-system-foo packages and + binaries, for arm64, armel, armhf, powerpc, amd64, loong64 and ppc64el. + It is now possible to run qemu-system-$debianarch binary or depend on + qemu-system-$debianarch package. This should simplify various tools + for cross compilation and the like. Also Closes: #825841. + * d/qemu-system-ppc.README.Debian: remove obsolete README about video.x + + -- Michael Tokarev Tue, 18 Apr 2023 05:04:04 +0300 + +qemu (1:8.0~rc4+dfsg-1) experimental; urgency=medium + + * update to 8.0.0-rc4 + * d/rules, d/qemu.desktop: install a simple .desktop file in + qemu-system-data so that qemu-system-foo has an icon under gnome/wayland + * re-enable build on x32 - disable new CONFIG_XEN_EMU which is now enabled + unconditionally on x86 + * d/patches: restore note-missing-module-pkg-name.diff + (lost in one of previous commits) + * pick 3 more fixes from qemu-devel@: + +rtl8139-fix-large_send_mss-divide-by-zero.patch + +target_i386-Change-wrong-XFRM-value.patch + +hw_mips_malta-Fix-malta-machine-on-big-endian-hosts.patch + + -- Michael Tokarev Fri, 14 Apr 2023 12:25:57 +0300 + +qemu (1:8.0~rc3+dfsg-2) experimental; urgency=medium + + * d/rules: fix qemu.svg install and remove .png fallback icons again + (qemu window still doesn't have an icon) + * d/binfmt-install: fix systemd binfmt registration broken + since previous upload + * +linux-user-fix-getgroups-setgroups-allocations.patch (Closes: #811087) + + -- Michael Tokarev Mon, 10 Apr 2023 12:33:01 +0300 + +qemu (1:8.0~rc3+dfsg-1) experimental; urgency=medium + + * new upstream release candidate (8.0.0-rc3) + * d/control: build-depend on gcc-powerpc-linux-gnu (for u-boot code) + * d/rules: build u-boot-sam460 ppc firmware (u-boot-sam460-20100605.bin) + * +u-boot-sam460ex-fdi.patch + * +u-boot-sam460ex-mstring.patch + * d/copyright: stop stripping roms/u-boot/, we need it for u-boot.e500 + * d/rules: build u-boot.e500 binary (Closes: #756833) + * d/rules: install all png icons too (for gtk display) + * d/rules: remove old compat qboot symlink + * remove skip-meson-pc-bios.diff (and skip-unpack-edk2-blobs.patch), + fix pc-bios/meson.build instead + * remove d/get-orig-source.sh now when d/copyright is set up + * d/source/options: stop diff-ignoring submodules + + -- Michael Tokarev Thu, 06 Apr 2023 09:50:41 +0300 + +qemu (1:8.0~rc2+dfsg-1) experimental; urgency=medium + + [ Michael Tokarev ] + * new upstream 8.0 (rc2) + Packaging changes: + * d/rules, d/qemu-system-common.lintian-overrides: + do not try to install virtiofsd, it is removed in qemu 8.0 + * d/rules: do not build sgabios, it is removed upstream in 8.0 + * spelling.diff: remove hunks which has been applied, + adopt virtio.c=>virtio-hmp.c for remaining + * patches: remove all patches from d/patches/master/ (picked from upstream) + * hw-pvrdma-protect-against-guest-driver-CVE-2022-1050.patch: + remove, also applied upstream + * microvm-default-machine-type.patch: adjust for 8.0 + * openbios-address-of-packet-member.patch: remove, not needed anymore + * d/control: build-depend on flex & bison + * d/rules: it is --disable-install-blobs, not --disable-blobs for xen too + * build microblaze firmware (petalogic-*.dtb) instead of using shipped one + * remove microblaze firmware (petalogic-*.dtb) for -dfsg + * remove previously deprecated qemu-debootstrap + * stop using custom $argv[0] for binfmt + * d/rules: always disable dwz if <<0.14 + * stop enabling avx512f for xen build (it is disabled by default) + * d/rules: install .bmp icon, not .png + + + [ Christian Ehrhardt ] + * d/control-in: libsndio is in universe in ubuntu + * d/control-in: libnfs is in main since focal, + enable direct nfs storage support (LP: #1988704) + + -- Michael Tokarev Fri, 31 Mar 2023 15:44:21 +0300 + +qemu (1:7.2+dfsg-5ubuntu3) mantic; urgency=medium + + * d/p/u/allow-repeating-hot-unplug-requests.patch: Allow repeating + hot-unplug requests by making ACPI PCI able to requeue them. + (LP: #2018733) + + -- Sergio Durigan Junior Thu, 18 May 2023 15:13:14 -0400 + +qemu (1:7.2+dfsg-5ubuntu2) lunar; urgency=medium + + * Fix FTBFS with glibc >= 2.36. (LP: #2015418) + - d/p/fix-ftbfs-glibc-*.patch: Revert now-unnecessary + upstream commits that were working around a glibc issue. + + -- Sergio Durigan Junior Wed, 05 Apr 2023 20:10:13 -0400 + +qemu (1:7.2+dfsg-5ubuntu1) lunar; urgency=medium + + * Re-merge with Debian unstable to pick up stabilization fixes + remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - d/control-in: switch qemu-system-x86-xen to qemu-system-xen as this + landed in Debian but under a different name. + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + - d/control-in: libnfs is in main since focal, enable direct nfs + storage support (LP 1988704) + - d/control-in: libsndio is in universe in ubuntu + + -- Christian Ehrhardt Tue, 07 Mar 2023 08:50:45 +0100 + +qemu (1:7.2+dfsg-5) unstable; urgency=medium + + * d/qemu-guest-agent.udev: fix missing comma + (Christian Schneider , Closes: #1031838) + * remove qemu-make-debian-root. + Ths script debian/qemu-make-debian-root has been broken for ages. + In 2023, it creates /etc/fstab with a reference to /dev/hda1, and + edits /etc/inittab which does not exist. And no one noticed, - so + it's safe to assume it is not used anymore. Just remove it. + * re-pick qemu-stable patches from master (the same patch contents): + master/tests-tcg-i386-Introduce-and-use-reg_t-consistently.patch + master/target-i386-Fix-BEXTR-instruction.patch + master/target-i386-Fix-C-flag-for-BLSI-BLSMSK-BLSR.patch + master/target-i386-fix-ADOX-followed-by-ADCX.patch + * 20 more changes picked from upstream/master: + master/target-i386-Fix-BZHI-instruction.patch + master/block-iscsi-fix-double-free-on-BUSY-or-similar-status.patch + master/hw-smbios-fix-field-corruption-in-type-4-table.patch + master/Revert-x86-do-not-re-randomize-RNG-seed-on-snapshot-.patch + master/Revert-x86-re-initialize-RNG-seed-when-selecting-ker.patch + master/Revert-x86-reinitialize-RNG-seed-on-system-reboot.patch + master/Revert-x86-use-typedef-for-SetupData-struct.patch + master/Revert-x86-return-modified-setup_data-only-if-read-a.patch + master/Revert-hw-i386-pass-RNG-seed-via-setup_data-entry.patch + master/vhost-user-gpio-Configure-vhost_dev-when-connecting.patch + master/vhost-user-i2c-Back-up-vqs-before-cleaning-up-vhost_.patch + master/vhost-user-rng-Back-up-vqs-before-cleaning-up-vhost_.patch + master/virtio-rng-pci-fix-migration-compat-for-vectors.patch + master/virtio-rng-pci-fix-transitional-migration-compat-for.patch + master/hw-timer-hpet-Fix-expiration-time-overflow.patch + master/vdpa-stop-all-svq-on-device-deletion.patch + master/vhost-avoid-a-potential-use-of-an-uninitialized-vari.patch + master/libvhost-user-check-for-NULL-when-allocating-a-virtq.patch + master/chardev-char-socket-set-s-listener-NULL-in-char_sock.patch + master/intel-iommu-fail-MAP-notifier-without-caching-mode.patch + master/intel-iommu-fail-DEVIOTLB_UNMAP-without-dt-mode.patch + + -- Michael Tokarev Sun, 05 Mar 2023 20:09:04 +0300 + +qemu (1:7.2+dfsg-4ubuntu1) lunar; urgency=medium + + * Merge with Debian unstable (LP: #1993438), among many other fixes + this resolvs these bugs: + (LP: #1957924) - support for querying stats, + (LP: #1853307) - Enhanced Interpretation for PCI Functions (s390x) + (LP: #1959966) - guest dump encryption with customer keys (s390x) + (LP: #1999885) - pv: don't allow userspace to set the clock under PV + (LP: #1957924) - add filtering of statistics by target vCPU + remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - d/control-in: switch qemu-system-x86-xen to qemu-system-xen as this + landed in Debian but under a different name. + - Remaining GCC-12 FTBFS (LP 1988710 + LP 1921664) + + d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: + fix qboot FTBFS with LTO + * Dropped Changes [now part of upstream v7.2.0] + - d/p/u/lp1994002-migration-Read-state-once.patch: Fix for libvirt + error 'migration was active, but no RAM info was set' (LP 1994002) + - d/p/u/ebpf-replace-deprecated-bpf_program__set_socket_filt.patch: + Fix FTBFS with libbpf 1.0.1-2. + + Header updates that were added as part of the libbpf fixes + but not mentioned in changelog + - d/p/u/lp-1981339-*: fix s390x system emulation (LP 1981339) + - Fix I/O stalls when using NVMe storage (LP 1970737). + + d/p/lp1970737-linux-aio-*.patch: Fix unbalanced plugged counter + in laio_io_unplug. + - SECURITY UPDATE: heap overflow in floppy disk emulator + + debian/patches/CVE-2021-3507.patch: prevent end-of-track overrun in + hw/block/fdc.c. + - SECURITY UPDATE: use-after-free vulnerability + + debian/patches/CVE-2022-0216-*.patch: fix use-after-free in + lsi_do_msgout + - SECURITY UPDATE: heap overflow vulnerability + + debian/patches/CVE-2022-2962.patch: tulip: Restrict DMA engine to + memories + - SECURITY UPDATE: integer underflow vulnerability + + debian/patches/CVE-2022-3165.patch: fix integer underflow in + vnc_client_cut_text_ext + * Dropped Changes in regard to GCC-12 FTBFS (LP 1988710) + [not all are needed in lunar] + - d/p/u/lp1988710-silence-openbios-array-bounds-false-positive.patch. + Silence -Warray-bounds false positive [no more needed] + - d/rules: set -O1 for alpha firmware build + - d/p/u/lp1988710-opensbi-Makefile-fix-build-with-binutils-2.38.patch: + further FTBFS fixup + * Dropped Changes [in Debian 1:7.2+dfsg-3] + - d/rules: disable LTO on non-amd64 builds (LP 1921664) + * Added Changes + - d/control-in: libnfs is in main since focal, enable direct nfs + storage support (LP: #1988704) + - d/control-in: libsndio is in universe in ubuntu + + -- Christian Ehrhardt Wed, 04 Jan 2023 13:18:43 +0100 + +qemu (1:7.2+dfsg-4) unstable; urgency=medium + + * block-fix-detect-zeroes-with-BDRV_REQ_REGISTERED_BUF.patch: + re-pick now from master (the same patch, moved to master/). + * revert x86-don-t-let-decompressed-kernel-image-clobber-setu.patch + Closes: ##1031682 . + This turned out to be wrong move, breaking more stuff than fixing. + Upstream is going to revert it too. + + -- Michael Tokarev Mon, 20 Feb 2023 21:00:18 +0300 + +qemu (1:7.2+dfsg-3) unstable; urgency=medium + + [ Paride Legovini ] + * Disable LTO on non-amd64 builds (LP: #1921664) + + [ Michael Tokarev ] + * target-arm-Fix-physical-address-resolution-for-Stage2.patch: + re-fetch now from master branch + * 4 more patches picked from master: + x86-don-t-let-decompressed-kernel-image-clobber-setu.patch + migration-ram-Fix-error-handling-in-ram_write_tracki.patch + migration-ram-Fix-populate_read_range.patch + qcow2-Fix-theoretical-corruption-in-store_bitmap-err.patch + * 5 fixes picked from current pullreqs: + block-fix-detect-zeroes-with-BDRV_REQ_REGISTERED_BUF.patch + tests_tcg_i386-introduce-and-use-reg_t-consistently.patch + target_i386-fix-BEXTR-instruction.patch + target_i386-fix-C-flag-for-BLSI-BLSMSK-BLSR.patch + target_i386-fix-ADOX-followed-by-ADCX.patch + * disable dwz on certain architectures for older dwz + (FTBFS on bullseye, #968670) + + -- Michael Tokarev Fri, 10 Feb 2023 14:29:12 +0300 + +qemu (1:7.2+dfsg-2) unstable; urgency=medium + + * d/rules: add -ffile-prefix-map when building skiboot + * d/control: provide qemu-kvm in qemu-system-misc on s390x + (Closes: #1029309) + * d/control: drop dependency of qemu-guest-agent on lsb-base + * Picked patches from qemu master branch tagged for qemu-stable + up to commit deabea6e88 (2023-02-02): + target-sh4-Mask-restore-of-env-flags-from-tb-flags.patch + vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch + virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch + virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch + target-arm-fix-handling-of-HLT-semihosting-in-system.patch + meson-accept-relative-symlinks-in-meson-introspect-i.patch + target-riscv-Set-pc_succ_insn-for-rvc-illegal-insn.patch + acpi-cpuhp-fix-guest-visible-maximum-access-size-to-.patch + hw-nvme-fix-missing-endian-conversions-for-doorbell-.patch + hw-nvme-fix-missing-cq-eventidx-update.patch + configure-fix-GLIB_VERSION-for-cross-compilation.patch + target-arm-Fix-sve_probe_page.patch + target-arm-allow-writes-to-SCR_EL3.HXEn-bit-when-FEA.patch + target-arm-Fix-in_debug-path-in-S1_ptw_translate.patch + * Also: target-arm-Fix-physical-address-resolution-for-Stage.patch + + -- Michael Tokarev Thu, 02 Feb 2023 21:17:10 +0300 + +qemu (1:7.2+dfsg-1) unstable; urgency=medium + + * new upstream release + Closes: #1025123 CVE-2022-4172 + (erst: undefined behavior in memcpy in write_erst_record) + Closes: #1021981 qemu-user: faccessat2 is not implemented + Closes: #1021019 CVE-2022-3165 (VNC: integer underflow in + vnc_client_cut_text_ext leads to CPU exhaustion) + * remove patches applied upstream + * refresh note-missing-module-pkg-name.diff + * slirp is always external package now, not a submodule anymore + * d/control: require meson >> 0.61.5~ for build + * spelling.diff: update with more spelling error + * add some lintian-overrides + * fix minor spelling errors in patches + * d/control: Bump Standards-Version to 4.6.1 + * debian shell programs use "which" instead of the "command -v", + fix that (Closes: #1018254) + * Better fix for #1019011 (gcc ICE building palcode-clipper), use -O1 + instead of -O2 for the failing compile when it actually fails + (no need to depend on gcc-11, Closes: #1011003) + + -- Michael Tokarev Thu, 15 Dec 2022 17:17:28 +0300 + +qemu (1:7.1+dfsg-2) unstable; urgency=medium + + * tulip-restrict-DMA-engine-to-memories-CVE-2022-2962.patch + fix possible stack or heap overflow (tulip: DMA reentrancy issue) + Closes: #1018055, CVE-2022-2962 + * hw-pvrdma-protect-against-guest-driver-CVE-2022-1050.patch + fix possible use-after-free in paravirtual RDMA device. + Closes: #1014589, CVE-2022-1050 + * mention closing of #979677 (CVE-2020-14394) by 7.1 + * d/rules: parametrify extra-cflags & extra-ldflags + * d/rules: explicitly disable pie on arm64 due to + https://sourceware.org/bugzilla/show_bug.cgi?id=29514 + Fixes FTBFS. + + -- Michael Tokarev Tue, 13 Sep 2022 20:08:43 +0300 + +qemu (1:7.1+dfsg-1) unstable; urgency=medium + + * new upstream release (7.1) + Closes: #1014958, CVE-2022-35414 + Closes: #1014590, CVE-2022-0216 + Closes: #979677, CVE-2020-14394 + Closes: #987410, CVE-2021-3507 + Closes: #988333, #1018913 + * d/copyright: + - remove mentions of slirp (packaged separately) + - blindly convert to dep-5 (it needs a complete rewrite) + - add Files-Excluded from d/get-orig-source.sh + * d/gbp.conf: remove filter= (and whole [import-orig]) + * d/watch: verify upstream tarballs + * d/rules: stop faking skiboot version, it is now properly included in + roms/skiboot/.version file. Add a dependency on this file too + * d/patches: + - remove use-fixed-data-path.patch: not needed anymore + - linux-user-binfmt-P.diff: refresh + - remove patches applied upstream + * d/control: + - it is --enable-capstone now, not --enable-capstone=system + - it is --enable-png now, not --enable-vnc-png + * d/rules: fix --enable-vhost-* options + * d/rules: remove vnc-png for xen too + * openbios-array-bounds-gcc12.patch + * opensbi-fix-build-with-binutils-2.38.patch + * d/rules: adopt vof build changes + * d/qemu-system-data.docs: omit ccid.txt (removed) + * temporary workaround for gcc-12 bug #1019011: use gcc-11-alpha-linux-gnu + instead of gcc-alpha-linux-gnu (another option is to use -Os) + * d/control: temporarily build-depend on libva-dev till #1019485 is fixed + * add loongarch64 qemu-user and qemu-system arch + + -- Michael Tokarev Mon, 12 Sep 2022 11:50:53 +0300 + +qemu (1:7.0+dfsg-7ubuntu4) lunar; urgency=medium + + * SECURITY UPDATE: use-after-free vulnerability + - debian/patches/CVE-2022-0216-*.patch: fix use-after-free in + lsi_do_msgout + - CVE-2022-0216 + * SECURITY UPDATE: heap overflow vulnerability + - debian/patches/CVE-2022-2962.patch: tulip: Restrict DMA engine to + memories + - CVE-2022-2962 + * SECURITY UPDATE: integer underflow vulnerability + - debian/patches/CVE-2022-3165.patch: fix integer underflow in + vnc_client_cut_text_ext + - CVE-2022-3165 + + -- Nishit Majithia Fri, 09 Dec 2022 10:25:52 +0530 + +qemu (1:7.0+dfsg-7ubuntu3) lunar; urgency=medium + + [ Brett Milford ] + * d/p/u/lp1994002-migration-Read-state-once.patch: Fix for libvirt + error 'migration was active, but no RAM info was set' (LP: #1994002) + + [ Mauricio Faria de Oliveira ] + * d/p/u/ebpf-replace-deprecated-bpf_program__set_socket_filt.patch: + Fix FTBFS with libbpf 1.0.1-2. + + -- Mauricio Faria de Oliveira Wed, 30 Nov 2022 12:17:51 -0300 + +qemu (1:7.0+dfsg-7ubuntu2) kinetic; urgency=medium + + [ Paride Legovini ] + * d/rules: disable LTO on non-amd64 builds (LP: #1921664) + * GCC-12 FTBFS (LP: #1988710) + - d/p/u/lp1988710-silence-openbios-array-bounds-false-positive.patch. + Silence -Warray-bounds false positive (treated as error) + + [ Christian Ehrhardt ] + * More on GCC-12 FTBFS (LP 1988710) + - d/rules: set -O1 for alpha firmware build + - d/p/u/lp1988710-opensbi-Makefile-fix-build-with-binutils-2.38.patch: + further FTBFS fixup + + -- Christian Ehrhardt Mon, 19 Sep 2022 08:07:24 +0200 + +qemu (1:7.0+dfsg-7ubuntu1) kinetic; urgency=medium + + * Merge with Debian unstable (LP: #1971315)(LP: #1980896), remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Fix I/O stalls when using NVMe storage (LP 1970737). + - d/p/lp1970737-linux-aio-*.patch: Fix unbalanced plugged counter + in laio_io_unplug. + - SECURITY UPDATE: heap overflow in floppy disk emulator + - debian/patches/CVE-2021-3507.patch: prevent end-of-track overrun in + hw/block/fdc.c. + - CVE-2021-3507 + * Dropped Changes [now part of 1:7.0+dfsg-7]: + - d/rules: xen libexec dir is no more versioned + - d/rules: ensure xen is built on x86 + - d/kvm-spice: fix when acceleration is already defined on the commandline + - debian/control[-in]: no more disable glusterfs in Ubuntu (LP 1246924) + * Dropped Changes [now part of upstream v7.0.0] + - d/p/u/lp-1959984-s390x-ipl-support-extended-kernel-command-line-size.patch + Allow long kernel command lines for QEMU (LP 1959984) + - d/p/u/fix-virtiofsd-for-glibc2.35.patch: add rseq to seccomp allow list + - d/p/u/tcg-Remove-dh_alias-indirection-for-dh_typecode.patch: fix 32bit + tcg on s390x. + - Fix diff handling on ceph that can cause data corruption (LP 1968258) + - d/p/u/lp-1968258-block-rbd-fix-handling-of-holes-in-.bdrv_co.patch + - d/p/u/lp-1968258-block-rbd-workaround-for-ceph-issue-53784.patch + - d/p/u/lp-1970563-ui-vnc.c-Fixed-a-deadlock-bug.patch: avoid deadlock + in vnc connections (LP 1970563) + - All CVE fixes of 1:6.2+dfsg-2ubuntu8 except CVE-2021-3507 + * Dropped Changes + - d/p/lp-1952448-relax-skiboot-gcc-deprecation-errors.patch: + add patch to workaround FTBFS when building against OpenSSL 3.0. + [ now working with OpenSSL 3.0 ] + - d/optionrom.mak, d/p/u/avoid-fcf-clashing-with-i486.patch: fix + -fcf-protection being unavailble on -march=i486 (LP 1940029) + [ fixed in compiler toolchain ] + - Make qemu-system-x86-microvm a transitional package as the binary is now + in qemu-system-x86 itself. + [ no more needed] + * Added Changes + - d/control-in: switch qemu-system-x86-xen to qemu-system-xen as this + landed in Debian but under a different name. + - d/p/u/qboot-Disable-LTO-for-ELF-binary-build-step.patch: fix qboot FTBFS + with LTO + - d/p/u/lp-1981339-*: fix s390x system emulation (LP: #1981339) + + -- Christian Ehrhardt Tue, 05 Jul 2022 12:07:19 +0200 + +qemu (1:7.0+dfsg-7) unstable; urgency=medium + + * d/tests/test-qemu-user: rework ls/glob test a bit + * d/tests/test-qemu-user: fix ppc64le qemu architecture name + * d/binfmt-install: use proper name for binfmt.d (*.conf) + Hopefully closes: #1011003 + * two virtio-scsi bugfixes from upstream: + virtio-scsi-fix-ctrl-and-event-handler-functions-in-dataplane.patch + virtio-scsi-don-t-waste-CPU-polling-the-event-virtqueue.patch + * 3 patches from upstream to fix possible coroutine crashes: + coroutine-use-QEMU_DEFINE_STATIC_CO_TLS.patch + coroutine-rename-qemu_coroutine_inc-dec_pool_size.patch + coroutine-revert-to-constant-batch-size.patch + * target-i386-do-not-consult-nonexistent-host-leaves.patch + * d/control: stop suggesting sudo for qemu-user-static + * Revert "d/rules: do not try to enable tcg-interpreter on unsupported + targets, it does not help anymore" - it does help but it needs a bit + more work + * disable xen support for qemu-system-x86 build and create a wrapper + for -i386 to redirect xen-related usage to xen-specific binary + with a warning (for bookworm only) + * common-user-no-user.patch: fix one of FTBFS on unsupported architectures + * d/rules: use regular variable assignment for BUILD_PACKAGES + * two trivial patches to fix spelling in roms: + openbios-spelling-endianess.patch + slof-spelling-seperator.patch + + -- Michael Tokarev Sun, 15 May 2022 15:49:12 +0300 + +qemu (1:7.0+dfsg-6) unstable; urgency=medium + + * d/rules: the forgotten --enable-xen-pci-passthrough for the xen build + * d/tests/test-qemu-user: rewrite to be more robust and complete and + include test for qemu-user-static too. + + -- Michael Tokarev Mon, 09 May 2022 01:37:56 +0300 + +qemu (1:7.0+dfsg-5) unstable; urgency=medium + + * d/tests/test-qemu-user.sh: more arch-specific debugging/updates + + -- Michael Tokarev Sat, 07 May 2022 12:22:26 +0300 + +qemu (1:7.0+dfsg-4) unstable; urgency=medium + + * d/tests/: fix failing tests. + - test-qemu-user: depend on gcc for dpkg-architecture to work, + and print debugging info for future switch to uname -m + - test-qemu-img: switch from using file to qemu-img info + + -- Michael Tokarev Sat, 07 May 2022 11:33:23 +0300 + +qemu (1:7.0+dfsg-3) unstable; urgency=medium + + [ Michael Tokarev ] + * d/binfmt-install: also generate binfmt.d/ entries for systemd + * d/control: use systemd as preferred alternative to binfmt-support + hopefully Closes: #789011 (Minimal dependencies to register binfmt) + Closes: #985889 (make binfmt setup configurable) + * d/control: remove Riku Voipio from Uploaders. Thank you Riku! + * d/rules: simplify DEB_BUILD_OPTIONS=parallel=N parsing + + [ Guido Günther ] + * Add minimal autopkgtest (Closes: #832982) + + -- Michael Tokarev Sat, 07 May 2022 00:03:24 +0300 + +qemu (1:7.0+dfsg-2) unstable; urgency=medium + + * d/control: add Rules-Requires-Root: no + * d/control: switch to debhelper-compat=13 + * d/control: drop "qemu" empty/dummy pseudopackage + * d/control: do not build linux-user* on ia64 and powerpc + (not supported by upstream anymore) + * d/control: add Breaks for qemu-system-data for other packages from which + it borrowed files in the past (Closes: #1008095) + * d/rules: switch to the dh sequence (but keep build-{arch,indep}), + rearrange some rules. + This brings us dh_dwz (very slow) and dh_strip_nondeterminism. + * d/rules: do not explicitly turn off slirp & capstone (now properly + controlled by --with[out]-default-features option) + * d/rules: do not try to enable tcg-interpreter on the unsupported + targets, it does not help to build tools anymore + * d/rules: do not chown -w d/control, it breaks dpkg-source + * d/rules: clean up the clean target + * d/not-installed: list many documentation files and qemu-plugin.h + * configure-make-fortify_source-yes-by-default.patch: enable + fortify-source for minimal builds too + * d/changelog: mention #990562 (CVE-2021-3611) closed by 7.0 + + -- Michael Tokarev Sat, 30 Apr 2022 13:38:12 +0300 + +qemu (1:7.0+dfsg-1) unstable; urgency=medium + + * update to 7.0 release + + -- Michael Tokarev Thu, 21 Apr 2022 14:19:51 +0300 + +qemu (1:7.0~rc4+dfsg-1) experimental; urgency=medium + + * New upstream 7.0 (rc) + Closes: #990562, CVE-2021-3611 + * remove patches applied upstream + * remove new binary file, pc-bios/edk2-x86_64-microvm.fd.bz2 + * d/control: remove libxfs-dev build dependency, + the ioctl is implemented inline + * d/control: stop build-depend-indep on libc6.1-dev-alpha-cross, + not needed anymore + * d/rules: update skiboot version check (skiboot hasn't canged since 6.1) + * build & install vbootrom (npcm7xx_bootrom.bin), and + build-depend-indep on gcc-arm-none-eabi + * create a new binary package, qemu-system-xen, which provides + /usr/libexec/xen-qemu-system-i386 binary for use by xen only. + Once xen switches to use this binary instead of usual qemu-system-i386, + xen support will be removed from the regular qemu-system-x86 build + * use a fast inline version of /usr/share/dpkg/architecture.mk + + -- Michael Tokarev Sun, 17 Apr 2022 15:08:40 +0300 + +qemu (1:6.2+dfsg-3) unstable; urgency=medium + + [ Christian Ehrhardt ] + * d/rules: ensure xen is built on x86 + * d/rules: xen libexec dir is no more versioned + * d/kvm-spice: fix when acceleration is already defined on the commandline + + [ Michael Tokarev ] + * d/control, d/rules: do not compile xen support on i386, + since it is amd64-only now (since 4.16) + * d/control: add libbpf-dev & --enable-bpf for eBPF support + (Closes: #994573) + + -- Michael Tokarev Fri, 25 Feb 2022 12:01:46 +0300 + +qemu (1:6.2+dfsg-2ubuntu8) kinetic; urgency=medium + + [ Marc Deslauriers ] + * SECURITY UPDATE: heap overflow in floppy disk emulator + - debian/patches/CVE-2021-3507.patch: prevent end-of-track overrun in + hw/block/fdc.c. + - CVE-2021-3507 + * SECURITY UPDATE: use-after-free in nvme + - debian/patches/CVE-2021-3929.patch: deny DMA to the iomem of the + device itself in hw/nvme/ctrl.c. + - CVE-2021-3929 + * SECURITY UPDATE: integer overflow in QXL display device emulation + - debian/patches/CVE-2021-4206.patch: check width and height in + hw/display/qxl-render.c, hw/display/vmware_vga.c, ui/cursor.c. + - CVE-2021-4206 + * SECURITY UPDATE: heap overflow in QXL display device emulation + - debian/patches/CVE-2021-4207.patch: fix race condition in qxl_cursor + in hw/display/qxl-render.c. + - CVE-2021-4207 + * SECURITY UPDATE: potential privilege escalation in virtiofsd + - debian/patches/CVE-2022-0358.patch: Drop membership of all + supplementary groups in tools/virtiofsd/passthrough_ll.c. + - CVE-2022-0358 + * SECURITY UPDATE: memory leakage in virtio-net device + - debian/patches/CVE-2022-26353.patch: fix map leaking on error during + receive in hw/net/virtio-net.c. + - CVE-2022-26353 + * SECURITY UPDATE: memory leakage in vhost-vsock device + - debian/patches/CVE-2022-26354.patch: detach the virqueue element in + case of error in hw/virtio/vhost-vsock-common.c. + - CVE-2022-26354 + + [ Sergio Durigan Junior ] + * Fix I/O stalls when using NVMe storage (LP: #1970737). + - d/p/lp1970737-linux-aio-*.patch: Fix unbalanced plugged counter + in laio_io_unplug. + + -- Sergio Durigan Junior Wed, 22 Jun 2022 15:38:37 -0400 + +qemu (1:6.2+dfsg-2ubuntu7) kinetic; urgency=medium + + * d/p/u/lp-1970563-ui-vnc.c-Fixed-a-deadlock-bug.patch: avoid deadlock + in vnc connections (LP: #1970563) + + -- Christian Ehrhardt Thu, 19 May 2022 08:25:20 +0200 + +qemu (1:6.2+dfsg-2ubuntu6) jammy; urgency=medium + + * debian/control[-in]: no more disable glusterfs in Ubuntu (LP: #1246924) + * Fix diff handling on ceph that can cause data corruption (LP: #1968258) + - d/p/u/lp-1968258-block-rbd-fix-handling-of-holes-in-.bdrv_co.patch + - d/p/u/lp-1968258-block-rbd-workaround-for-ceph-issue-53784.patch + + -- Christian Ehrhardt Fri, 08 Apr 2022 09:36:34 +0200 + +qemu (1:6.2+dfsg-2ubuntu5) jammy; urgency=medium + + * d/p/u/tcg-Remove-dh_alias-indirection-for-dh_typecode.patch: fix 32bit + tcg on s390x. + + -- Christian Ehrhardt Thu, 17 Feb 2022 09:54:36 +0100 + +qemu (1:6.2+dfsg-2ubuntu4) jammy; urgency=medium + + * No-change rebuild to update maintainer scripts, see LP: 1959054 + + -- Dave Jones Wed, 16 Feb 2022 17:28:14 +0000 + +qemu (1:6.2+dfsg-2ubuntu3) jammy; urgency=medium + + * Merge with Debian unstable, remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/p/lp-1952448-relax-skiboot-gcc-deprecation-errors.patch: + add patch to workaround FTBFS when building against OpenSSL 3.0. + - d/optionrom.mak, d/p/u/avoid-fcf-clashing-with-i486.patch: fix + -fcf-protection being unavailble on -march=i486 (LP 1940029) + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - Make qemu-system-x86-microvm a transitional package as the binary is now + in qemu-system-x86 itself. + * Dropped Changes [now part of 1:6.1+dfsg-8]: + - updated debian/patches/linux-user-binfmt-P.diff to work with in-kernel code + (#993658) (LP 1947860) + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - d/p/ubuntu/lp-1929926-*: avoid segfaults by uretprobes (LP 1929926) + * Dropped Changes [now part of upstream] + - d/p/u/lp-1932175-s390x-cpumodel-add-3931-and-3932.patch: add new 3931 + and 3932 machines (LP 1932175) + - d/p/u/lp-1940288-audio-Never-send-migration-section.patch: fix + migration with audio devices present (LP 1940288) + * Added changes: + - update patches for qemu v6.2.0 + - d/p/u/enable-svm-by-default.patch + - d/p/u/define-ubuntu-machine-types.patch + - d/p/u/lp-1952448-relax-skiboot-gcc-deprecation-errors.patch + - d/rules: xen libexec dir is no more versioned + - d/rules: ensure xen is built on x86 + - d/p/u/lp-1959984-s390x-ipl-support-extended-kernel-command-line-size.patch + Allow long kernel command lines for QEMU (LP: #1959984) + - d/kvm-spice: fix when acceleration is already defined on the commandline + - d/p/u/fix-virtiofsd-for-glibc2.35.patch: add rseq to seccomp allow list + + -- Christian Ehrhardt Wed, 05 Jan 2022 12:18:25 +0100 + +qemu (1:6.2+dfsg-2) unstable; urgency=medium + + * bump meson build-dep to 0.59.3 + * build & include multiboot_dma.bin (Closes: #1003930) + * libxml2 is not needed for parallels. + Enable parallels block image format (Closes: #1003162) + * acpi-validate-hotplug-selector-on-access-CVE-2021-4158.patch + Closes: CVE-2021-4158 + * acpi-fix-QEMU-crash-when-started-with-SLIC-table.patch + (Closes: #1004017) + * acpi-fix-OEM_ID-padding.patch + * debian/get-orig-source.sh: repack dfsg archive differently + * mention closing of a few CVEs by 6.2.0 + + -- Michael Tokarev Thu, 20 Jan 2022 10:52:19 +0300 + +qemu (1:6.2+dfsg-1) unstable; urgency=medium + + [ Christian Ehrhardt ] + * 6.2.0 upstream release + Closes: #984452, CVE-2021-20203 + (integer overflow issue in the vmxnet3 NIC emulator) + Closes: #984453, CVE-2021-20196 + (fdc: check drive block device before usage) + Closes: #984451, CVE-2021-20255 + (infinite recursion / DMA reentrancy in eepro100 i8255x device emulator) + * d/get-orig-source.sh: remove pc-bios/multiboot_dma.bin in dfsg-clean + * Drop patches upstream in v6.2.0 + * d/p/spelling.diff: update for v6.2.0 (partially accepted) + * d/rules: use new --disable-install-blobs build arg + * Revert "make fuse debian-only, since libfuse3 in ubuntu is in universe", + it is now in main (LP: #1934510) + * d/rules: bump skiboot version for qemu v6.2.0 + * d/p/ignore-roms-dependency-in-qtest.patch: fix meson issue + due to dfsg removal of blobs + * d/rules: drop --disable-fdt on microvm builds + (now strictly required on any x86 build) + * d/rules: select default PARISC config for hppa-firmware + + -- Michael Tokarev Sun, 09 Jan 2022 12:52:10 +0300 + +qemu (1:6.1+dfsg-8) unstable; urgency=medium + + * fix keymaps definitions placement in last upload + (Closes: #997925, #997926) + + -- Michael Tokarev Wed, 27 Oct 2021 13:27:09 +0300 + +qemu (1:6.1+dfsg-7) unstable; urgency=medium + + * qemu-system-data: do not install qemu.desktop (Closes: #995628) + * remove qemu-user-static.README.Debian (#995633) + * d/rules: update configure rules for different qemu builds + * qemu-system-x86-xen: install only -i386 link to xen path, not -x86_64 + * promote qemu-system-x86-xen package on ubuntu to be like qemu-system-x86 + since it uses the same modules actually + * enable zstd compression support (Build-Depends) + * qemu-system-data: install usr/share/icons/hicolor/32x32/apps/qemu.bmp + for the sdl ui + * d/control: fix wrong relation (< vs <<) + * d/control: use :native version of python3-sphynx (Closes: #995622) + * do not make qemu-system-gui Multi-Arch:same due to vhost-user-gpu + * quieten gcc11 warnings/errors so roms will compile (Closes: #997082) + * move d/qemu-system-data.install to d/rules + + -- Michael Tokarev Tue, 26 Oct 2021 10:35:02 +0300 + +qemu (1:6.1+dfsg-6) unstable; urgency=medium + + * virtio-net-fix-use-after-unmap-free-for-sg-CVE-2021-3748.patch + Closes: #993401, CVE-2021-3748: use-after-free in virtio_net_receive_rcu + * ati_2d-fix-buffer-overflow-in-ati_2d_blt-CVE-2021-3638.patch + Closes: #992726, CVE-2021-3638: + inconsistent check in ati_2d_blt() may lead to out-of-bounds write + * refresh uas-add-stream-number-sanity-checks-CVE-2021-3713{.diff=>.patch} + from upstream + * hmp-unbreak-change-vnc.patch from upstream + to fix 'change vnc passwd' command + + -- Michael Tokarev Wed, 29 Sep 2021 13:41:47 +0300 + +qemu (1:6.1+dfsg-5) unstable; urgency=medium + + * updated debian/patches/linux-user-binfmt-P.diff + to work with in-kernel code + Closes: #993658 + * d/rules: do not mark configure target as .PHONY + since it is a real file + + -- Michael Tokarev Mon, 06 Sep 2021 01:20:59 +0300 + +qemu (1:6.1+dfsg-4) unstable; urgency=medium + + * qemu-sockets-fix-unix-socket-path-copy-again.patch + replacing socket-unix-maxlen.patch + Closes: #993145 + * enable more devices for the microvm build: + virtio-gpu & vhost-user-gpu + virtio-input-host & vhost_user_input + * move vhost-user-gpu files from qemu-system-common to qemu-system-gui + this elminates X11 dependencies from non-gui qemu-system install + * build and install vof.bin firmware + * rearrange d/rules a bit to make different qemu builds + to be consistent with sysdata-components + * move ppc dtb firmware files from qemu-system-ppc to qemu-system-data + * device-tree-compiler is now needed in build-indep-depends, + not in build-depends + * d/rules: use CROSSPFX variables + * ubuntu only: + - Revert commit from the previous release which restores + relation between qemu-system-xen and qemu-system-gui + since -xen is not compatible with -gui modules + - qemu-system-xen does not suggest qemu-block-extra (incompatible too) + - qemu-system-s390x recommends qemu-block-extra not suggests it + + -- Michael Tokarev Tue, 31 Aug 2021 22:27:25 +0300 + +qemu (1:6.1+dfsg-3) unstable; urgency=medium + + * fix brown-paper bag in last upload (--enable-libudev) + * ubuntu only: restore relations (depends/recommends) + between qemu-system-gui and qemu-system-xen since -xen + replaces full qemu-system-x86 and acts the same way + + -- Michael Tokarev Tue, 31 Aug 2021 02:50:52 +0300 + +qemu (1:6.1+dfsg-2) unstable; urgency=medium + + * rearrange d/rules to be able to configure/build/install + various different kinds of qemu builds (main/microvm/xen/static) + separately, by splitting targets of d/rules into subtargets + * enable many virtio devices for microvm build (Closes: #992029) + * disable libudev and fuse for microvm build + * rearrange options for microvm build in d/rules + * tidy newly added assert in unix-domain socket handling code + to account for extra \0 terminator for socket pathname, + socket-unix-maxlen.patch (Closes: #993145) + * upstream qemu added ignoring of *.patch to .gitignore, + unignore them in d/.gitignore + * re-add 4 patches which were lost from git + during preparation for 6.1 + (not affecting the source package) + * uas-add-stream-number-sanity-checks-CVE-2021-3713.diff + Closes: #992727, CVE-2021-3713 + * Mention (some) bugs closed by 6.1 upstream + * Mention closing of #947349 + + -- Michael Tokarev Tue, 31 Aug 2021 02:01:51 +0300 + +qemu (1:6.1+dfsg-1) unstable; urgency=medium + + * new upstream release (6.1.0) + Closes: CVE-2021-3607 (pvrdma: ensure correct input on ring init) + Closes: CVE-2021-3608 (pvrdma: unmap initialized dma address) + Closes: #989042, CVE-2021-3544 (vhost-user-gpu resource leaks) + Closes: #989042, CVE-2021-3545 (vhost-user-gpu memory disclosure) + Closes: #989042, CVE-2021-3546 (vhost-user-gpu OOBwr virgl_cmd_get_capset) + Closes: #991911, CVE-2021-3682 (pvrdma: possible mremap overflow) + * refresh patches, remove patches which were applied upstream + * remove newly appeared pc-bios/vof.bin in dfsg-clean + * add python3-sphinx-rtd-theme to build-depends + * removed qemu-system-moxie arch + * actually build many qemu modules as modules, and install + them in qemu-system-common. + * make strong versioned dependency between various qemu-system-* + packages, so that modules works correctly. + * drop very old versions from Build-Depends, Depends and Recommends + for packages which long has much more recent versions in debian + * up qemu-block-extra dependecy level from Suggests to Recommends + * d/control: stop suggesting sgabios by qemu-system-x86 + * (experimental for now, needs more work) print name of the package + name for a module which can't be loaded, to give a clue what other + package one may need to install for the requested functionality + * fix some spelling mistakes in visible messages (spelling.diff) + * enable jack audio backend (in qemu-system-gui) (Closes: #984726) + * other small/internal changes in packaging: + - removed --disable-sheepdog which were dropped upstream + - install gui modules in d/rules not in d/q-s-gui.install + to be able to use wildcard in d/q-s-common.install + - recommend qemu-block-extra, not suggest it and not depend on it (ubuntu) + for qemu-system-* and qemu-utils + - reformat qemu "deps" for qemu-system-gui, stop listing -xen there + (it can not satisfy -gui), qemu-system-s390x is :ubuntu:-only + - d/control: stop recommending -gui for xen package + (it is of no use for xen) + - d/control: reformat Depends for qemu-block-extra, do not include -xen + version there, mark -x390x as ubuntu-only, + and allow qemu-utils to satisfy the dependency + - do not install docs which does not exist anymore + - stop omiting Changelog from dh_installchangelog: the file is long gone + - d/rules: explicitly state version of skiboot as it is stored + in a git tag only, or else skiboot does not build (hack) + - put (new in 6.1, new in debian) hw-display-virtio-gpu-gl.so + to qemu-system-gui as it pulls in X11 + + -- Michael Tokarev Wed, 25 Aug 2021 15:59:26 +0300 + +qemu (1:6.0+dfsg-4) unstable; urgency=medium + + * d/rules: fix last ubuntu merge, xen is x86-only, not all-debian + + -- Michael Tokarev Tue, 17 Aug 2021 19:04:30 +0300 + +qemu (1:6.0+dfsg-3) unstable; urgency=medium + + [ Michael Tokarev ] + * enable /run/qemu mount on ubuntu only + * usbredir-fix-free-call-CVE-2021-3682.patchi + Closes: #991911, CVE-2021-3682 + + [ Christian Ehrhardt ] + * ubuntu-only changes: + - d/control-in: Make Ubuntu qemu-utils depend on qemu-block-extra + - d/control-in: Make Ubuntu qemu-system-common depend on qemu-block-extra + - d/control*, d/rules: disable xen by default, but provide universe package + qemu-system-x86-xen as alternative + * d/p/target-s390x-Fix-translation-exception-on-illegal-in.patch: + avoid segfaults by uretprobes (LP 1929926) + + -- Michael Tokarev Tue, 17 Aug 2021 17:49:10 +0300 + +qemu (1:6.0+dfsg-2expubuntu4) jammy; urgency=medium + + * d/p/lp-1952448-relax-skiboot-gcc-deprecation-errors.patch: + add patch to workaround FTBFS when building against OpenSSL 3.0. + Thanks to Christian Ehrhardt (LP: #1952448) + + -- Paride Legovini Fri, 26 Nov 2021 15:47:51 +0100 + +qemu (1:6.0+dfsg-2expubuntu3) jammy; urgency=medium + + * No-change rebuild against liburing2 + + -- Paride Legovini Mon, 22 Nov 2021 18:00:26 +0100 + +qemu (1:6.0+dfsg-2expubuntu2) jammy; urgency=medium + + * updated debian/patches/linux-user-binfmt-P.diff to work with in-kernel code + (#993658) (LP: #1947860) + + -- Christian Ehrhardt Wed, 03 Nov 2021 14:10:56 +0100 + +qemu (1:6.0+dfsg-2expubuntu1) impish; urgency=medium + + * Merge with Debian experimental, remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - d/p/ubuntu/enable-svm-by-default.patch: update to match v6.0 + - d/p/ubuntu/define-ubuntu-machine-types.patch: add ubuntu machine types + for v6.0 + - d/p/ubuntu/lp-1929926-*: avoid segfaults by uretprobes (LP 1929926) + - Ease the use of module retention on upgrades (LP 1913421) + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + * Dropped Changes [in 1:6.0+dfsg-2exp]: + - d/control-in: Disable capstone disassembler library support (universe) + - Disable fuse export (universe dependency) + - Ease the use of module retention on upgrades (LP 1913421) + - d/run-qemu.mount, d/rules: provide run-qemu.mount in qemu-block-extra + - d/rules: only save modules if /run/qemu isn't noexec + - d/rules: clear all (current and former) modules on purge + - d/control: qemu 6.0 broke libvirt <7.2 add a breaks to avoid partial + upgrade issues (LP 1932264) + - Enable SDL as secondary UI backend (LP 1256185) + - d/control: add build dependency libsdl2-dev + - d/control: enable sdl graphics on build + - d/qemu-system-gui.install: add ui-sdl.so + - d/control: add runtime dependency to libgl1 + * Dropped Changes [no more needed] + - let qemu-utils recommend sharutils + * Added changes: + - d/optionrom.mak, d/p/u/avoid-fcf-clashing-with-i486.patch: fix + -fcf-protection being unavailble on -march=i486 (LP: #1940029) + - d/p/u/lp-1932175-s390x-cpumodel-add-3931-and-3932.patch: add new 3931 + and 3932 machines (LP: #1932175) + - d/p/u/lp-1940288-audio-Never-send-migration-section.patch: fix + migration with audio devices present (LP: #1940288) + + -- Christian Ehrhardt Thu, 12 Aug 2021 15:35:12 +0200 + +qemu (1:6.0+dfsg-2exp) experimental; urgency=medium + + [ Christian Ehrhardt ] + * qemu 6.0 broke libvirt <7.2, add a Breaks + to avoid partial upgrade issues (LP: #1932264) + * enable SDL as secondary UI backend (LP: #1256185) (Closes: #947349) + * clear all (current and former) modules on purge + * only save modules if /run/qemu isn't noexec + * provide run-qemu.mount in qemu-block-extra + (disabled in debian for now) + * Disable capstone disassembler library support in ubuntu (universe) + + [ Michael Tokarev ] + * qemu does not ship Changelog file anymore + * drop version from libfuse-dev build-depends (noticed by Ville Skyttä) + * a few patches from upstream stable: + - target-ppc-fix-load-endianness-for-lxvwsx-lxvdsx.patch + fix various crashes in ppc system emulation. + Thanks to Christian Ehrhardt for pointing this out + - pvrdma-fix-possible-mremap-overflow-in-pvrdma-device-CVE-2021-3582.patch + (Closes: #990565, CVE-2021-3582) + - pvrdma-ensure-correct-input-on-ring-init-CVE-2021-3607.patch + (Closes: #990564, CVE-2021-3607) + - pvrdma-fix-the-ring-init-error-flow-CVE-2021-3608.patch + (Closes: #990563, CVE-2021-3608) + - usb-limit-combined-packets-to-1-MiB-CVE-2021-3527.patch + usb-redir-avoid-dynamic-stack-allocation-CVE-2021-3527.patch + (Closes: #988157, CVE-2021-3527) + * mention closing of 3 bugs in am53c974 (ESP) device emulation by 6.0 + (Closes: #979679, CVE-2020-35504) + (Closes: #984455, CVE-2020-35505) + (Closes: #984454, CVE-2020-35506) + * make fuse debian-only, since libfuse3 in ubuntu is in universe + * fix microvm default machine type for a new build system (LP: #1936894) + + -- Michael Tokarev Wed, 21 Jul 2021 19:43:37 +0300 + +qemu (1:6.0+dfsg-1~ubuntu3) impish; urgency=medium + + * d/p/u/lp-1935617-target-ppc-Fix-load-endianness-for-lxvwsx-lxvdsx.patch: + fix TCG emulation for ppc64 (LP: #1935617) + + -- Christian Ehrhardt Tue, 13 Jul 2021 09:34:55 +0200 + +qemu (1:6.0+dfsg-1~ubuntu2) impish; urgency=medium + + * d/control: remove fuse2 trial-build (LP 1934510) + + -- Christian Ehrhardt Wed, 07 Jul 2021 10:26:08 +0200 + +qemu (1:6.0+dfsg-1~ubuntu1) impish; urgency=medium + + * Merge with Debian experimental, Among many other things this fixes LP Bugs: + (LP: #1907952) broken arrow keys in -display gtk on aarch64 + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type + (LP: 1304107 1621042 1776189 1761372 1761372 1776189) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types containing release versioned machine attributes + - d/qemu-system-x86.NEWS Info on fixed machine type defintions + for host-phys-bits=true + - Add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - Let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - Fix upgrade module handling (LP 1905377) + --enable-module-upgrades for qemu-xen which doesn't exist in Debian + * Dropped Changes [in 6.0]: + - d/p/ubuntu/lp-1907789-build-no-pie-is-no-functional-liker-flag.patch: fix + ld usage of -no-pie (LP 1907789) + - d/p/u/lp-1916230-hw-s390x-fix-build-for-virtio-9p-ccw.patch: fix + virtio-9p-ccw being missing (LP 1916230) + - d/p/u/lp-1916705-disas-Fix-build-with-glib2.0-2.67.3.patch: Fix FTFBS due + to glib2.0 >=2.67.3 (LP 1916705) + - d/p/u/lp-1921754*: add EPYC-Rome-v2 as v1 missed IBRS and thereby fails + on some HW/Guest combinations e.g. Windows 10 on Threadripper chips + (LP 1921754) + - d/p/u/lp-1921880*: add EPYC-Milan features and named cpu type support + (LP 1921880) + - d/p/u/lp-1922010-linux-user-s390x-Use-the-guest-pointer-for-the-sigre*: + fix go in qemu-s390x-static (LP 1922010) + * Dropped Changes [in Debian]: + - Allow qemu to load old modules post upgrade (LP 1847361) + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + * Dropped Changes [No more needed >21.04]: + - d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP 1906245 1905377) + * Added Changes + - Disable fuse export (universe dependency) + - d/p/ubuntu/enable-svm-by-default.patch: update to match v6.0 + - d/p/ubuntu/define-ubuntu-machine-types.patch: add ubuntu machine types + for v6.0 + - d/p/ubuntu/lp-1929926-*: avoid segfaults by uretprobes (LP: #1929926) + - Ease the use of module retention on upgrades (LP: #1913421) + - d/run-qemu.mount, d/rules: provide run-qemu.mount in qemu-block-extra + - d/rules: only save modules if /run/qemu isn't noexec + - d/rules: clear all (current and former) modules on purge + - debian/qemu-block-extra.postinst: enable mount unit on install/upgrade + - d/control: qemu 6.0 broke libvirt <7.2 add a breaks to avoid partial + upgrade issues (LP: #1932264) + - Enable SDL as secondary UI backend (LP: #1256185) + - d/control: add build dependency libsdl2-dev + - d/control: enable sdl graphics on build + - d/qemu-system-gui.install: add ui-sdl.so + - d/control: add runtime dependency to libgl1 + - d/rules: qemu-system-x86-xen builds modules as well now (follows the + other packages) + + -- Christian Ehrhardt Tue, 15 Jun 2021 12:41:33 +0200 + +qemu (1:6.0+dfsg-1~exp0) experimental; urgency=medium + + * new upstream release + Closes: #979679, CVE-2020-35504 + Closes: #984455, CVE-2020-35505 + Closes: #984454, CVE-2020-35506 + * remove obsolete patches, refresh use-fixed-data-path.patch + * use libncurses-dev, not old libncursesw5-dev + * enable fuse export (and build-depend on libfuse3-dev) + * install (new) manpages for qemu-storage-daemon + * enable new hexagon qemu-user target + * two patches to fix 3 new spelling mistakes + * remove now-unused shared-library-lacks-prerequisites lintian-overrides + for qemu-user-static + + -- Michael Tokarev Sat, 08 May 2021 10:16:05 +0300 + +qemu (1:5.2+dfsg-11) unstable; urgency=medium + + * i386-acpi-restore-device-paths-for-pre-5.1-vms.patch + This fixes a serious issue in some VMs (in particuar, Windows & MacOS) + when migrating from buster qemu to bullseye qemu. + (Closes: #990675) + * pvrdma-fix-possible-mremap-overflow-in-pvrdma-device-CVE-2021-3582.patch + (Closes: #990565, CVE-2021-3582) + * pvrdma-ensure-correct-input-on-ring-init-CVE-2021-3607.patch + (Closes: #990564, CVE-2021-3607) + * pvrdma-fix-the-ring-init-error-flow-CVE-2021-3608.patch + (Closes: #990563, CVE-2021-3608) + * ide-atapi-check-logical-block-address-and-read-size-CVE-2020-29443.patch + (Closes: #983575, CVE-2020-29443) + * usb-limit-combined-packets-to-1-MiB-CVE-2021-3527.patch + usb-redir-avoid-dynamic-stack-allocation-CVE-2021-3527.patch + (Closes: #988157, CVE-2021-3527) + + -- Michael Tokarev Sun, 18 Jul 2021 16:14:41 +0300 + +qemu (1:5.2+dfsg-10) unstable; urgency=medium + + * 5 sdhci fixes from upstream: + dont-transfer-any-data-when-command-time-out.patch + dont-write-to-SDHC_SYSAD-register-when-transfer-is-in-progress.patch + correctly-set-the-controller-status-for-ADMA.patch + limit-block-size-only-when-SDHC_BLKSIZE-register-is-writable.patch + reset-the-data-pointer-of-s-fifo_buffer-when-a-different-block-size...patch + (Closes: #986795, #970937, CVE-2021-3409, CVE-2020-17380, CVE-2020-25085) + * mptsas-remove-unused-MPTSASState.pending-CVE-2021-3392.patch + fix possible use-after-free in mptsas_free_request + (Cloese: #984449, CVE-2021-3392) + + -- Michael Tokarev Fri, 16 Apr 2021 12:43:36 +0300 + +qemu (1:5.2+dfsg-9ubuntu3) hirsute; urgency=medium + + * d/p/u/lp-1921754*: add EPYC-Rome-v2 as v1 missed IBRS and thereby fails + on some HW/Guest combinations e.g. Windows 10 on Threadripper chips + (LP: #1921754) + * d/p/u/lp-1921880*: add EPYC-Milan features and named cpu type support + (LP: #1921880) + + -- Christian Ehrhardt Wed, 07 Apr 2021 11:58:29 +0200 + +qemu (1:5.2+dfsg-9ubuntu2) hirsute; urgency=medium + + * d/p/u/lp-1922010-linux-user-s390x-Use-the-guest-pointer-for-the-sigre.patch: + fix go in qemu-s390x-static (LP: #1922010) + + -- Christian Ehrhardt Wed, 31 Mar 2021 10:01:40 +0200 + +qemu (1:5.2+dfsg-9ubuntu1) hirsute; urgency=medium + + * Merge with Debian unstable; Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: distro machine types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - allow qemu to load old modules post upgrade (LP 1847361) + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + - d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP 1906245 1905377) + - d/p/ubuntu/lp-1907789-build-no-pie-is-no-functional-liker-flag.patch: fix + ld usage of -no-pie (LP 1907789) + - d/p/u/lp-1916230-hw-s390x-fix-build-for-virtio-9p-ccw.patch: fix + virtio-9p-ccw being missing (LP 1916230) + - d/p/u/lp-1916705-disas-Fix-build-with-glib2.0-2.67.3.patch: Fix FTFBS due + to glib2.0 >=2.67.3 (LP 1916705) + + -- Christian Ehrhardt Thu, 18 Mar 2021 11:13:49 +0100 + +qemu (1:5.2+dfsg-9) unstable; urgency=medium + + * do not make qemu-system-data dependent on qemu-system-foo + (Closes: #985040) + * CVE-2021-20263 - implement dropping security.capability xattr + This adds two patches from upstream: + virtiofsd-save-error-code-early-at-the-failure-callsite.patch + virtiofsd-drop-remapped-security.capability-..-needed-CVE-2021-20263.patch + Closes: #985083, CVE-2021-20263 + * CVE-2021-3416 fix from upstream + Fixes infinite loop in loopback mode of various network devices, + adding 10 patches from upstream + Closes: #984448, CVE-2021-3416 + * net-e1000-fail-early-for-evil-descriptor-CVE-2021-20257.patch + Fix CVE-2021-20257 from upstream: e1000: infinite loop while processing + transmit descriptors + Closes: #984450, CVE-2021-20257 + + -- Michael Tokarev Wed, 17 Mar 2021 21:02:30 +0300 + +qemu (1:5.2+dfsg-8) unstable; urgency=medium + + * a no-change upload to fix broken previous upload + + -- Michael Tokarev Sun, 14 Mar 2021 12:21:37 +0300 + +qemu (1:5.2+dfsg-7) unstable; urgency=high + + * do not make qemu-system-common dependent on qemu-system-foo. + We removed modules from qemu-system-common for now, so there's no + need for it to depend on any of qemu-system-foo of the same version. + Among other things this fixes #983756 (which should be fixes some + other way anyway, but it should be ok for now). + Closes: #983756, #983921, #985195 + Urgency is high because a single bin-NMU of qemu package made it + uninstallable. + + -- Michael Tokarev Sun, 14 Mar 2021 11:32:54 +0300 + +qemu (1:5.2+dfsg-6ubuntu2) hirsute; urgency=medium + + * d/p/u/lp-1916705-disas-Fix-build-with-glib2.0-2.67.3.patch: Fix FTFBS due + to glib2.0 >=2.67.3 (LP: #1916705) + + -- Christian Ehrhardt Wed, 24 Feb 2021 08:39:09 +0100 + +qemu (1:5.2+dfsg-6ubuntu1) hirsute; urgency=medium + + * Merge with Debian unstable, includes fixes for + - build operates differently if source is a git repo (LP: #1887535) + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: distro machine types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - allow qemu to load old modules post upgrade (LP 1847361) + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + - d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP 1906245 1905377) + - d/p/ubuntu/lp-1907789-build-no-pie-is-no-functional-liker-flag.patch: fix + ld usage of -no-pie (LP 1907789) + * Added changes + - d/p/u/lp-1916230-hw-s390x-fix-build-for-virtio-9p-ccw.patch: fix + virtio-9p-ccw being missing (LP: #1916230) + + -- Christian Ehrhardt Mon, 22 Feb 2021 11:40:36 +0100 + +qemu (1:5.2+dfsg-6) unstable; urgency=medium + + * deprecate qemu-debootstrap. It is not needed anymore with + binfmt F flag, since everything now works without --foreign + debootstrap argument and copying the right qemu binary into + the chroot. Closes: #901197 + * fix the brown-paper bag bug: wrong argument order + in the linux-user-binfmt patch (really closes: #970460) + + -- Michael Tokarev Tue, 16 Feb 2021 12:11:20 +0300 + +qemu (1:5.2+dfsg-5) unstable; urgency=medium + + * d/rules: ensure b/ subdir exists before building palcode and qboot + * d/changelog: #959530 is not fixed by 5.2+dfsg-4 + * 3 virtiofsd patches Closes: #980814, CVE-2020-35517 + virtiofsd: potential privileged host device access from guest + - virtiofsd-extract-lo_do_open-from-lo_open.patch + - virtiofsd-optionally-return-inode-pointer-from-lo_do_lookup.patch + - virtiofsd-prevent-opening-of-special-files-CVE-2020-35517.patch + -- Michael Tokarev Sun, 14 Feb 2021 17:44:06 +0300 + +qemu (1:5.2+dfsg-4) unstable; urgency=medium + + [ Michael Tokarev ] + * require libfdt >= 1.5.0-2 due to #931046 + * qemu-user: attempt to preserve argv[0] when run under binfmt + (Closes: #970460) + This changes the enterpreter name for all linux-user registered + binfmts, so it potentially can break stuff. The actual binary + being registered now is /usr/libexec/qemu-binfmt/foo-binfmt-P, + which is a symlink to actual /usr/lib/qemu-foo[-static]. + * ignore .git-submodule-status when building source + * some security fixes from upstream: + o arm_gic-fix-interrupt-ID-in-GICD_SGIR-CVE-2021-20221.patch + Closes: CVE-2021-20221 + GIC (armv7): out-of-bound heap buffer access via an interrupt ID field + o 9pfs-Fully-restart-unreclaim-loop-CVE-2021-20181.patch + Closes: CVE-2021-20181 + * non-security fixes from upstream: + pc-bios-descriptors-fix-paths-in-json-files.patch - fixes wrong paths + in edk2-firmware-related json files introduced in 5.2 + + [ Christian Ehrhardt ] + * d/control-in: avoid version mismatch of installed binaries + (Closes: #956377) + + [ Dan Streetman ] + * Backport configure param --with-git-submodules and set to 'ignore' + + -- Michael Tokarev Sun, 14 Feb 2021 16:52:10 +0300 + +qemu (1:5.2+dfsg-3ubuntu2) hirsute; urgency=medium + + * No change rebuild to pick up liburing. (LP: #1914145) + + -- Mauricio Faria de Oliveira Wed, 03 Feb 2021 19:44:54 -0300 + +qemu (1:5.2+dfsg-3ubuntu1) hirsute; urgency=medium + + * Merge with Debian unstable, includes fixes for + - qemu-user-static are partially dynamically linked (LP: #1908331) + - qemu crashing when using spice without qemu-system-gui being + installed (LP: #1908577) + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: distro machine types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - allow qemu to load old modules post upgrade (LP 1847361) + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + - d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP 1906245 1905377) + - d/p/ubuntu/lp-1907789-build-no-pie-is-no-functional-liker-flag.patch: fix + ld usage of -no-pie (LP 1907789) + + -- Christian Ehrhardt Tue, 05 Jan 2021 12:43:42 +0100 + +qemu (1:5.2+dfsg-3) unstable; urgency=medium + + [ Christian Ehrhardt ] + * d/rules: fix qemu-user-static to really be static (LP: #1908331) + + [ Michael Tokarev ] + * build most modules statically (besides block and gui parts). + This makes qemu-system-common package to be of less strict dependency + for other qemu-system-* packages, and also Closes: #977301, #978131 + * especially remove removed binfmts in qemu-user-{static,binfmt}.preinst + (really Closes: #977015) + * memory-clamp-cached-translation-MMIO-region-CVE-2020-27821.patch + (Closes: #977616, CVE-2020-27821) + + -- Michael Tokarev Tue, 29 Dec 2020 15:07:03 +0300 + +qemu (1:5.2+dfsg-2ubuntu1) hirsute; urgency=medium + + * Merge with Debian unstable + - includes fix for CVE-2020-17380 + - includes a fix for s390x PCI device reset (LP: #1907656) + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: distro machine types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - allow qemu to load old modules post upgrade (LP 1847361) + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + - d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP 1906245 1905377) + * Dropped Changes: + - d/control, d/rules: build with gcc-9 on armhf as workaround until + resolved in gcc-10 (LP: 1890435) [it is flaky still, but no more 100% + fails] + * Added Changes: + - Refreshed ubuntu machine types for hirsute@5.2 + - d/control: regenerated from d/control-in + - d/p/ubuntu/lp-1907789-build-no-pie-is-no-functional-liker-flag.patch: fix + ld usage of -no-pie (LP: #1907789) + + -- Christian Ehrhardt Wed, 09 Dec 2020 16:44:47 +0100 + +qemu (1:5.2+dfsg-2) unstable; urgency=medium + + * move ui-opengl.so module from qemu-system-gui to qemu-system-common, + as other modules want it (Closes: #976996, #977022) + * do not install dropped ppc64abi32 binfmt for qemu-user[-static] + (Closes: #977015) + + -- Michael Tokarev Thu, 10 Dec 2020 11:15:43 +0300 + +qemu (1:5.2+dfsg-1) unstable; urgency=medium + + * new upstream release + Closes: #965978, CVE-2020-15859 (22dc8663d9fc7baa22100544c600b6285a63c7a3) + Closes: #970539, CVE-2020-25084 (21bc31524e8ca487e976f713b878d7338ee00df2) + Closes: #970540, CVE-2020-25085 (dfba99f17feb6d4a129da19d38df1bcd8579d1c3) + Closes: #970541, CVE-2020-25624 (1328fe0c32d5474604105b8105310e944976b058) + Closes: #970542, CVE-2020-25625 (1be90ebecc95b09a2ee5af3f60c412b45a766c4f) + Closes: #974687, CVE-2020-25707 (c2cb511634012344e3d0fe49a037a33b12d8a98a) + Closes: #975276, CVE-2020-25723 (2fdb42d840400d58f2e706ecca82c142b97bcbd6) + Closes: #975265, CVE-2020-27616 (ca1f9cbfdce4d63b10d57de80fef89a89d92a540) + Closes: #973324, CVE-2020-27617 (7564bf7701f00214cdc8a678a9f7df765244def1) + Closes: #972864, CVE-2020-27661 (bea2a9e3e00b275dc40cfa09c760c715b8753e03) + Closes: CVE-2020-27821 (1370d61ae3c9934861d2349349447605202f04e9) + Closes: #976388, CVE-2020-28916 (c2cb511634012344e3d0fe49a037a33b12d8a98a) + * remove obsolete patches + * refresh use-fixed-data-path.patch and debian/get-orig-source.sh + * bump minimum meson version required for build to 0.55.3 + * update build rules for several components + * remove deprecated lm32 and unicore32 system emulators + * remove deprecated ppc64abi32 and tilegx linux-user emulators + * install ui-spice-core.so & chardev-spice.so in qemu-system-common + * install ui-egl-headless.so in qemu-system-common + * install hw-display-virtio-*.so in qemu-system-common + * install ui-opengl.so in qemu-system-gui + * install qemu-pr-helper.8 in qemu-system-common + * qemu-pr-helper moved to usr/bin/ again + * qboot.rom renamed from bios-microvm.bin + * remove several unused lintian overrides + * add spelling.diff patch to fix a few spelling errors + * update Standards-Version to 4.5.1 + * fix a few trailing whitespaces in d/control and d/changelog + * require libcapstone >= 4.0.2 (v4) for build + + -- Michael Tokarev Wed, 09 Dec 2020 08:57:41 +0300 + +qemu (1:5.1+dfsg-4ubuntu3) hirsute; urgency=medium + + * d/qemu-system-gui.prerm: add no-op prerm to overcome upgrade issues on + the bad old prerm (LP: #1906245) + + -- Christian Ehrhardt Mon, 30 Nov 2020 12:53:03 +0100 + +qemu (1:5.1+dfsg-4ubuntu2) hirsute; urgency=medium + + * Fix upgrade module handling (LP: #1905377) + This was accetped in a slightly different form in qemu_5.0-6 and therefore + allows to drop some former delta that is now conflicting. + Ubuntu still keeps enabling --enable-module-upgrades, but only for + qemu-xen which doesn't exist in Debian + - Drop d/qemu-block-extra.*.in, d/qemu-system-gui.*.in + - d/rules: Drop generating package version into maintainer scripts + + -- Christian Ehrhardt Tue, 24 Nov 2020 11:16:01 +0100 + +qemu (1:5.1+dfsg-4ubuntu1) hirsute; urgency=medium + + * Merge with Debian testing, remaining changes: + Fixes qemu-arm-static Assertion `guest_base != 0' failed (LP: #1897854) + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes compat links changes of 5.0-5ubuntu4] + - allow qemu to load old modules post upgrade (LP 1847361) + - d/qemu-block-extra.*.in, d/qemu-system-gui.*.in: save shared objects on + upgrade + - d/rules: generate maintainer scripts matching package version on build + - d/rules: enable --enable-module-upgrades where --enable-modules is set + - d/control: regenerate debian/control out of control-in + * Dropped changes [in Debian or no more needed] + - d/control-in: disable pmem on ppc64 as it is currently considered + experimental on that architecture (pmdk v1.8-1) + - d/rules: makefile definitions can't be recursive - sys_systems for s390x + - d/rules: report config log from the correct subdir + - d/control-in: disable rbd support unavailable on riscv (LP: 1872931) + - Pick further changes for groovy from debian/master since 5.0-5 + - ati-vga-check-mm_index-before-recursive-call-CVE-2020-13800.patch + - revert-memory-accept-mismatching-sizes-in-memory_region_access_...patch + - exec-set-map-length-to-zero-when-returning-NULL-CVE-2020-13659.patch + - megasas-use-unsigned-type-for-reply_queue_head-and-check-index...patch + - megasas-use-unsigned-type-for-positive-numeric-fields.patch + - megasas-fix-possible-out-of-bounds-array-access.patch + - nbd-server-avoid-long-error-message-assertions-CVE-2020-10761.patch + - es1370-check-total-frame-count-against-current-...-CVE-2020-13361.patch + - a few patches from the stable series: + - fix-tulip-breakage.patch + - 9p-lock-directory-streams-with-a-CoMutex.patch + Prevent deadlocks in 9pfs readdir code + - net-do-not-include-a-newline-in-the-id-of-nic-device.patch + Fix newline accidentally sneaked into id string of a nic + - qemu-nbd-close-inherited-stderr.patch + - virtio-balloon-fix-free-page-hinting-check-on-unreal.patch + - virtio-balloon-fix-free-page-hinting-without-an-iothread.patch + - virtio-balloon-unref-the-iothread-when-unrealizing.patch + - acpi-tmr-allow-2-byte-reads.patch + - reapply CVE-2020-13253 fixes from upstream + - linux-user-refactor-ipc-syscall-and-support-of-semtimedop.patch + - linux-user-add-netlink-RTM_SETLINK-command.patch + - d/control: since qemu-system-data now contains module(s), + it can't be multi-arch. Ditto for qemu-block-extra. + - qemu-system-foo: depend on exact version of qemu-system-data, + due to the latter having modules + - acpi-allow-accessing-acpi-cnt-register-by-byte.patch' + This is another incarnation of the recent bugfix which actually enabled + memory access constraints, like #964247 + - acpi-accept-byte-and-word-access-to-core-ACPI-registers.patch + this replace acpi-allow-accessing-acpi-cnt-register-by-byte.patch + and acpi-tmr-allow-2-byte-reads.patch, a more complete fix + - xhci-fix-valid.max_access_size-to-access-address-registers.patch + fix one more incarnation of the breakage after the CVE-2020-13754 fix + - do not install outdated (0.12 and before) Changelog + - xgmac-fix-buffer-overflow-in-xgmac_enet_send-CVE-2020-15863.patch + ARM-only XGMAC NIC, possible buffer overflow during packet transmission + Closes: CVE-2020-15863 + - sm501 OOB read/write due to integer overflow in sm501_2d_operation() + - riscv-allow-64-bit-access-to-SiFive-CLINT.patch + another fix for revert-memory-accept-.. CVE-2020-13754 + - seabios-hppa-fno-ipa-sra.patch fix ftbfs with gcc-10 + - d/control-in: build-dep libcap is no more needed + - arch aware kvm wrappers + [upstream now automatically enables KVM if available and called with + kvm* name, provides KVM as before but with auto-fallback to tcg. + Former behavior of KVM-or-die can be achieved via -machine accel=kvm ] + * Dropped changes [upstream now] + - d/p/u/usb-fix-setup_len-init-CVE-2020-14364.patch: sanity check usb + setup_len + - d/p/u/lp-1887930-*: Enable Channel Path Handling for vfio-ccw (LP 1887930) + - d/p/u/lp-1894942-*: fix virtio-ccw host/guest notification (LP 1894942) + - d/p/ubuntu/lp-1887935-vfio-ccw-allow-non-prefetch-ORBs.patch: fix boot + from vfio-ccw (LP 1887935) + - fix qemu-user-static initialization to allow executing systemd (LP 1890881) + - fix assertion failue in net_tx_pkt_add_raw_fragment (LP 1891187) + - d/p/ubuntu/lp-1883984-target-s390x-Fix-SQXBR.patch: avoid crash on + SQXBR (LP 1883984) + - d/p/lp-1890154-*: fix -no-reboot on s390x secure boot (LP 1890154) + - d/p/ubuntu/lp-1887763-*: fix TCG sizing that OOMed many small CI + environments (LP 1887763) + - d/p/ubuntu/lp-1835546-*: backport the s390x protvirt feature (LP 1835546) + - debian/patches/ubuntu/lp-1878973-*: fix assert in qemu-guest-agent that + crashes it on shutdown (LP 1878973) + - update d/p/ubuntu/lp-1835546-* to the final versions + - d/p/ubuntu/virtio-net-fix-rsc_ext-compat-handling.patch: fix + FTBFS in groovy + * Added Changes: + - update ubuntu machine types for hirsute@5.1 + - d/control: regenerated from d/control-in + - d/control, d/rules: build with gcc-9 on armhf as workaround until + resolved in gcc-10 (LP: 1890435) + + -- Christian Ehrhardt Thu, 29 Oct 2020 12:37:31 +0100 + +qemu (1:5.1+dfsg-4) unstable; urgency=high + + * mention closing of CVE-2020-16092 by 5.1 + * usb-fix-setup_len-init-CVE-2020-14364.patch + Closes: #968947, CVE-2020-14364 + (OOB r/w access in USB emulation) + + -- Michael Tokarev Wed, 02 Sep 2020 16:14:52 +0300 + +qemu (1:5.1+dfsg-3) unstable; urgency=medium + + * fix one more issue in last upload. This is what happens when + you do "obvious" stuff in a hurry without proper testing.. + + -- Michael Tokarev Mon, 17 Aug 2020 22:19:55 +0300 + +qemu (1:5.1+dfsg-2) unstable; urgency=medium + + * fix brown-paper bag bug in last upload + + -- Michael Tokarev Mon, 17 Aug 2020 20:58:52 +0300 + +qemu (1:5.1+dfsg-1) unstable; urgency=medium + + * hw-display-qxl.so depends on spice so install it + only if it is built just like ui-spice-app + * note #931046 for libfdt + + -- Michael Tokarev Mon, 17 Aug 2020 18:57:14 +0300 + +qemu (1:5.1+dfsg-0exp1) experimental; urgency=medium + + * new upstream release 5.1.0. Make source DFSG-clean again + Closes: #968088 + Closes: CVE-2020-16092 (net_tx_pkt_add_raw_fragment in e1000e & vmxnet3) + * remove all patches which are applied upstream + * do not install non-existing doc/qemu/*-ref.* + * qemu-pr-helper is now in /usr/lib/qemu not /usr/bin + * virtfs-proxy-helper is in /usr/lib/qemu now, not /usr/bin + * new architecture: qemu-system-avr + * refresh d/get-orig-source.sh + * d/get-orig-source.sh: report already removed files in dfsg-clean + * install common modules in qemu-system-common + * lintian tag renamed: shared-lib-without-dependency-information to + shared-library-lacks-prerequisites + + -- Michael Tokarev Wed, 12 Aug 2020 19:09:24 +0300 + +qemu (1:5.0-14) unstable; urgency=high + + * this is a bugfix release before breaking toys with the new upstream + * riscv-allow-64-bit-access-to-SiFive-CLINT.patch + (another fix for revert-memory-accept-..-CVE-2020-13754) + * install /usr/lib/*/qemu/ui-curses.so in qemu-system-common + Closes: #966517 + + -- Michael Tokarev Fri, 31 Jul 2020 11:45:25 +0300 + +qemu (1:5.0-13) unstable; urgency=medium + + * seabios-hppa-fno-ipa-sra.patch + fix ftbfs with gcc-10 + + -- Michael Tokarev Wed, 22 Jul 2020 22:16:41 +0300 + +qemu (1:5.0-12) unstable; urgency=medium + + * acpi-accept-byte-and-word-access-to-core-ACPI-registers.patch + this replace cpi-allow-accessing-acpi-cnt-register-by-byte.patch + and acpi-tmr-allow-2-byte-reads.patch, a more complete fix + * xhci-fix-valid.max_access_size-to-access-address-registers.patch + fix one more incarnation of the breakage after the CVE-2020-13754 fix + * do not install outdated (0.12 and before) Changelog (Closes: #965381) + * xgmac-fix-buffer-overflow-in-xgmac_enet_send-CVE-2020-15863.patch + ARM-only XGMAC NIC, possible buffer overflow during packet transmission + Closes: CVE-2020-15863 + * sm501 OOB read/write due to integer overflow in sm501_2d_operation() + List of patches: + sm501-convert-printf-abort-to-qemu_log_mask.patch + sm501-shorten-long-variable-names-in-sm501_2d_operation.patch + sm501-use-BIT-macro-to-shorten-constant.patch + sm501-clean-up-local-variables-in-sm501_2d_operation.patch + sm501-replace-hand-written-implementation-with-pixman-CVE-2020-12829.patch + Closes: #961451, CVE-2020-12829 + + -- Michael Tokarev Wed, 22 Jul 2020 19:42:29 +0300 + +qemu (1:5.0-11) unstable; urgency=high + + * d/control-in: only enable opengl (libdrm&Co) on linux + * d/control-in: spice: drop versioned deps (even jessie version is enough), + drop libspice-protocol-dev (automatically pulled by libspice-server-dev), + and build on more architectures + * change from debhelper versioned dependency to debhelper-compat (=12) + * acpi-allow-accessing-acpi-cnt-register-by-byte.patch' (Closes: #964793) + This is another incarnation of the recent bugfix which actually enabled + memory access constraints, like #964247 + Urgency = high due to this issue. + + -- Michael Tokarev Mon, 20 Jul 2020 18:41:17 +0300 + +qemu (1:5.0-10) unstable; urgency=medium + + * fix the wrong $(if) construct for s390x kvm link (FTBFS on s390x) + * use the same $(if) construct to simplify #ifdeffery + + -- Michael Tokarev Sat, 18 Jul 2020 10:02:41 +0300 + +qemu (1:5.0-9) unstable; urgency=medium + + * move kvm executable/script from qemu-kvm to qemu-system-foo, + make it multi-arch, and remove qemu-kvm package + * remove libcacard leftovers from d/.gitignore + * linux-user-refactor-ipc-syscall-and-support-of-semtimedop.patch + (Closes: #965109) + * linux-user-add-netlink-RTM_SETLINK-command.patch (Closes: #964289) + * libudev is linux-specific, do not build-depend on it + on kfreebsd and others + * install virtiofsd in d/rules (!sparc64) instead of + d/qemu-system-common.install (fixes FTBFS on sparc64) + * confirm -static-pie not working today still + * d/control: since qemu-system-data now contains module(s), + it can't be multi-arch. Ditto for qemu-block-extra. + * qemu-system-foo: depend on exact version of qemu-system-data, + due to the latter having modules + * build all modules since there are modules anyway, + no need to hack them in d/rules + * fix spelling in a patch name/subject inlast upload + * d/rules: do not use dh_install and dh_movefiles for individual + pkgs, open-code mkdir+cp/mv, b/c dh_install acts on all files + listed in d/foo.install too, in addition to given on command-line + * remove trailing whitespace from d/changelog + + -- Michael Tokarev Sat, 18 Jul 2020 08:29:38 +0300 + +qemu (1:5.0-8) unstable; urgency=medium + + * d/control: rdma is linux-only, do not enable it on kfreebsd & hurd + * add comment about virtiofsd conditional to d/qemu-system-common.install + Now qemu FTBFS on sparc64 since virtiofsd is not built due to missing + seccomp onn that platform, we should either make virtiofsd conditional + (!sparc64) or fix seccomp on sparc64 and build-depend on it + * openbios-use-source_date_epoch-in-makefile.patch (Closes: #963466) + * seabios-hppa-use-consistant-date-and-remove-hostname.patch (Closes: #963467) + * slof-remove-user-and-host-from-release-version.patch (Closes: #963472) + * slof-ensure-ld-is-called-with-C-locale.patch (Closes: #963470) + * update previous changelog, mention #945997 + * reapply CVE-2020-13253 fixed from upstream: + sdcard-simplify-realize-a-bit.patch (preparation for the next patch) + sdcard-dont-allow-invalid-SD-card-sizes.patch (half part of CVE-2020-13253) + sdcard-update-coding-style-to-make-checkpatch-happy.patch (preparational) + sdcard-dont-switch-to-ReceivingData-if-address-is-in..-CVE-2020-13253.patch + Closes: #961297, CVE-2020-13253 + + -- Michael Tokarev Fri, 17 Jul 2020 09:12:43 +0300 + +qemu (1:5.0-7) unstable; urgency=medium + + * Revert "d/rules: report config log from the correct subdir - base build" + * Revert "d/rules: report config log from the correct subdir - microvm build" + * acpi-tmr-allow-2-byte-reads.patch (Closes: #964247) + * remove sdcard-dont-switch-to-ReceivingData-if-add...-CVE-2020-13253.patch - + upstream decided to fix it differently (Reopens: #961297, CVE-2020-13253) + * explicitly specify --enable-tools on hppa and do the same trick + with --enable-tcg-interpreter --enable-tools on a few other unsupported + arches (Closes: #964372, #945997) + + -- Michael Tokarev Thu, 16 Jul 2020 18:36:08 +0300 + +qemu (1:5.0-6) unstable; urgency=medium + + [ Christian Ehrhardt ] + * d/control-in: disable pmem on ppc64 as it is currently considered + experimental on that architecture + * d/rules: makefile definitions can't be recursive - sys_systems for s390x + * d/rules: report config log from the correct subdir - base build + * d/rules: report config log from the correct subdir - microvm build + * d/control-in: disable rbd support unavailable on riscv + * fix assert in qemu guest agent that crashes on shutdown (LP: #1878973) + * d/control-in: build-dep libcap is no more needed + * d/rules: update -spice compat (Ubuntu only) + + [ Michael Tokarev ] + * save block modules on upgrades (LP: #1847361) + After upgrade a still running qemu of a former version can't load the + new modules e.g. for extended storage support. Qemu 5.0 has the code to + allow defining a path that it will load these modules from. + * ati-vga-check-mm_index-before-recursive-call-CVE-2020-13800.patch + Closes: CVE-2020-13800, ati-vga allows guest OS users to trigger + infinite recursion via a crafted mm_index value during + ati_mm_read or ati_mm_write call. + * revert-memory-accept-mismatching-sizes-in-memory_region_access_valid...patch + Closes: CVE-2020-13754, possible OOB memory accesses in a bunch of qemu + devices which uses min_access_size and max_access_size Memory API fields. + Also closes: CVE-2020-13791 + * exec-set-map-length-to-zero-when-returning-NULL-CVE-2020-13659.patch + CVE-2020-13659: address_space_map in exec.c can trigger + a NULL pointer dereference related to BounceBuffer + * megasas-use-unsigned-type-for-reply_queue_head-and-check-index...patch + Closes: #961887, CVE-2020-13362, megasas_lookup_frame in hw/scsi/megasas.c + has an OOB read via a crafted reply_queue_head field from a guest OS user + * megasas-use-unsigned-type-for-positive-numeric-fields.patch + fix other possible cases like in CVE-2020-13362 (#961887) + * megasas-fix-possible-out-of-bounds-array-access.patch + Some tracepoints use a guest-controlled value as an index into the + mfi_frame_desc[] array. Thus a malicious guest could cause a very low + impact OOB errors here + * nbd-server-avoid-long-error-message-assertions-CVE-2020-10761.patch + Closes: CVE-2020-10761, An assertion failure issue in the QEMU NBD Server. + This flaw occurs when an nbd-client sends a spec-compliant request that is + near the boundary of maximum permitted request length. A remote nbd-client + could use this flaw to crash the qemu-nbd server resulting in a DoS. + * es1370-check-total-frame-count-against-current-frame-CVE-2020-13361.patch + Closes: CVE-2020-13361, es1370_transfer_audio in hw/audio/es1370.c does not + properly validate the frame count, which allows guest OS users to trigger + an out-of-bounds access during an es1370_write() operation + * sdcard-dont-switch-to-ReceivingData-if-address-is-in...-CVE-2020-13253.patch + CVE-2020-13253: sd_wp_addr in hw/sd/sd.c in QEMU 4.2.0 uses an unvalidated + address, which leads to an out-of-bounds read during sdhci_write() + operations. A guest OS user can crash the QEMU process. + And a preparational patch, + sdcard-update-coding-style-to-make-checkpatch-happy.patch + * a few patches from the stable series: + - fix-tulip-breakage.patch + The tulip network driver in a qemu-system-hppa emulation is broken in + the sense that bigger network packages aren't received any longer and + thus even running e.g. "apt update" inside the VM fails. Fix this. + - 9p-lock-directory-streams-with-a-CoMutex.patch + Prevent deadlocks in 9pfs readdir code + - net-do-not-include-a-newline-in-the-id-of-nic-device.patch + Fix newline accidentally sneaked into id string of a nic + - qemu-nbd-close-inherited-stderr.patch + - virtio-balloon-fix-free-page-hinting-check-on-unreal.patch + - virtio-balloon-fix-free-page-hinting-without-an-iothread.patch + - virtio-balloon-unref-the-iothread-when-unrealizing.patch + + [ Aurelien Jarno ] + * Remove myself from maintainers + + -- Michael Tokarev Fri, 03 Jul 2020 18:24:48 +0300 + +qemu (1:5.0-5ubuntu11) hirsute; urgency=medium + + * d/p/ubuntu/define-ubuntu-machine-types.patch: update to fix 15.04 wily + machine type to match how it originally was released (LP: #1902654) + + -- Christian Ehrhardt Mon, 09 Nov 2020 08:19:07 +0100 + +qemu (1:5.0-5ubuntu10) hirsute; urgency=medium + + * No-change rebuild for brltty soname change. + + -- Matthias Klose Mon, 02 Nov 2020 16:59:33 +0100 + +qemu (1:5.0-5ubuntu9) groovy; urgency=medium + + * d/p/u/usb-fix-setup_len-init-CVE-2020-14364.patch: sanity check usb + setup_len + CVE-2020-14364 + + -- Christian Ehrhardt Tue, 22 Sep 2020 16:53:18 +0200 + +qemu (1:5.0-5ubuntu8) groovy; urgency=medium + + * d/p/u/lp-1887930-*: Enable Channel Path Handling for vfio-ccw (LP: #1887930) + + -- Christian Ehrhardt Mon, 14 Sep 2020 08:23:49 +0200 + +qemu (1:5.0-5ubuntu7) groovy; urgency=medium + + * d/p/u/lp-1894942-*: fix virtio-ccw host/guest notification (LP: #1894942) + + -- Christian Ehrhardt Wed, 09 Sep 2020 08:47:12 +0200 + +qemu (1:5.0-5ubuntu6) groovy; urgency=medium + + * d/p/ubuntu/lp-1887935-vfio-ccw-allow-non-prefetch-ORBs.patch: fix boot + from vfio-ccw (LP: #1887935) + + -- Christian Ehrhardt Tue, 25 Aug 2020 11:09:12 +0200 + +qemu (1:5.0-5ubuntu5) groovy; urgency=medium + + * fix qemu-user-static initialization to allow executing systemd + (LP: #1890881) + - d/p/u/lp1890881-linux-user-completely-re-write-init_guest_space.patch + - d/p/u/lp1890881-linux-user-deal-with-address-wrap-for-ARM_COMMPAGE-o.patch + - d/p/u/lp1890881-linux-user-don-t-use-MAP_FIXED-in-pgd_find_hole_fall.patch + - d/p/u/lp1890881-linux-user-elfload-use-MAP_FIXED_NOREPLACE-in-pgb_re.patch + - d/p/u/lp1890881-linux-user-limit-check-to-HOST_LONG_BITS-TARGET_ABI_.patch + - d/p/u/lp1890881-linux-user-provide-fallback-pgd_find_hole-for-bare-c.patch + * fix assertion failue in net_tx_pkt_add_raw_fragment (LP: #1891187) + CVE-2020-16092 + - d/p/u/lp-1891187-hw-net-net_tx_pkt-fix-assertion-failure-in-net_tx.patch + + -- Christian Ehrhardt Wed, 19 Aug 2020 07:19:42 +0200 + +qemu (1:5.0-5ubuntu4) groovy; urgency=medium + + * xen: provide compat links to what libxen-dev reports where to find + the binaries (LP: #1890005) + * d/p/ubuntu/lp-1883984-target-s390x-Fix-SQXBR.patch: avoid crash on + SQXBR (LP: #1883984) + * d/p/lp-1890154-*: fix -no-reboot on s390x secure boot (LP: #1890154) + + -- Christian Ehrhardt Mon, 03 Aug 2020 07:15:28 +0200 + +qemu (1:5.0-5ubuntu3) groovy; urgency=medium + + * d/p/ubuntu/lp-1887763-*: fix TCG sizing that OOMed many small CI + environments (LP: #1887763) + * Pick further changes for groovy from debian/master since 5.0-5 + - ati-vga-check-mm_index-before-recursive-call-CVE-2020-13800.patch + Closes: CVE-2020-13800, ati-vga allows guest OS users to trigger + infinite recursion via a crafted mm_index value during + ati_mm_read or ati_mm_write call. + - revert-memory-accept-mismatching-sizes-in-memory_region_access_valid...patch + Closes: CVE-2020-13754, possible OOB memory accesses in a bunch of qemu + devices which uses min_access_size and max_access_size Memory API fields. + Also closes: CVE-2020-13791 + - exec-set-map-length-to-zero-when-returning-NULL-CVE-2020-13659.patch + CVE-2020-13659: address_space_map in exec.c can trigger + a NULL pointer dereference related to BounceBuffer + - megasas-use-unsigned-type-for-reply_queue_head-and-check-index...patch + Closes: #961887, CVE-2020-13362, megasas_lookup_frame in hw/scsi/megasas.c + has an OOB read via a crafted reply_queue_head field from a guest OS user + - megasas-use-unsigned-type-for-positive-numeric-fields.patch + fix other possible cases like in CVE-2020-13362 (#961887) + - megasas-fix-possible-out-of-bounds-array-access.patch + Some tracepoints use a guest-controlled value as an index into the + mfi_frame_desc[] array. Thus a malicious guest could cause a very low + impact OOB errors here + - nbd-server-avoid-long-error-message-assertions-CVE-2020-10761.patch + Closes: CVE-2020-10761, An assertion failure issue in the QEMU NBD Server. + This flaw occurs when an nbd-client sends a spec-compliant request that is + near the boundary of maximum permitted request length. A remote nbd-client + could use this flaw to crash the qemu-nbd server resulting in a DoS. + - es1370-check-total-frame-count-against-current-frame-CVE-2020-13361.patch + Closes: CVE-2020-13361, es1370_transfer_audio in hw/audio/es1370.c does not + properly validate the frame count, which allows guest OS users to trigger + an out-of-bounds access during an es1370_write() operation + - a few patches from the stable series: + - fix-tulip-breakage.patch + The tulip network driver in a qemu-system-hppa emulation is broken in + the sense that bigger network packages aren't received any longer and + thus even running e.g. "apt update" inside the VM fails. Fix this. + - 9p-lock-directory-streams-with-a-CoMutex.patch + Prevent deadlocks in 9pfs readdir code + - net-do-not-include-a-newline-in-the-id-of-nic-device.patch + Fix newline accidentally sneaked into id string of a nic + - qemu-nbd-close-inherited-stderr.patch + - virtio-balloon-fix-free-page-hinting-check-on-unreal.patch + - virtio-balloon-fix-free-page-hinting-without-an-iothread.patch + - virtio-balloon-unref-the-iothread-when-unrealizing.patch + - acpi-tmr-allow-2-byte-reads.patch (Closes: #964247) + - reapply CVE-2020-13253 fixed from upstream: + sdcard-simplify-realize-a-bit.patch (preparation for the next patch) + sdcard-dont-allow-invalid-SD-card-sizes.patch (half part of CVE-2020-13253) + sdcard-update-coding-style-to-make-checkpatch-happy.patch (preparational) + sdcard-dont-switch-to-ReceivingData-if-address-is-in..-CVE-2020-13253.patch + Closes: #961297, CVE-2020-13253 + - linux-user-refactor-ipc-syscall-and-support-of-semtimedop.patch + (Closes: #965109) + - linux-user-add-netlink-RTM_SETLINK-command.patch (Closes: #964289) + - d/control: since qemu-system-data now contains module(s), + it can't be multi-arch. Ditto for qemu-block-extra. + - qemu-system-foo: depend on exact version of qemu-system-data, + due to the latter having modules + - acpi-allow-accessing-acpi-cnt-register-by-byte.patch' (Closes: #964793) + This is another incarnation of the recent bugfix which actually enabled + memory access constraints, like #964247 + - acpi-accept-byte-and-word-access-to-core-ACPI-registers.patch + this replace acpi-allow-accessing-acpi-cnt-register-by-byte.patch + and acpi-tmr-allow-2-byte-reads.patch, a more complete fix + - xhci-fix-valid.max_access_size-to-access-address-registers.patch + fix one more incarnation of the breakage after the CVE-2020-13754 fix + - do not install outdated (0.12 and before) Changelog (Closes: #965381) + - xgmac-fix-buffer-overflow-in-xgmac_enet_send-CVE-2020-15863.patch + ARM-only XGMAC NIC, possible buffer overflow during packet transmission + Closes: CVE-2020-15863 + - sm501 OOB read/write due to integer overflow in sm501_2d_operation() + List of patches: + sm501-convert-printf-abort-to-qemu_log_mask.patch + sm501-shorten-long-variable-names-in-sm501_2d_operation.patch + sm501-use-BIT-macro-to-shorten-constant.patch + sm501-clean-up-local-variables-in-sm501_2d_operation.patch + sm501-replace-hand-written-implementation-with-pixman-CVE-2020-12829.patch + Closes: #961451, CVE-2020-12829 + - riscv-allow-64-bit-access-to-SiFive-CLINT.patch + another fix for revert-memory-accept-.. CVE-2020-13754 + - seabios-hppa-fno-ipa-sra.patch fix ftbfs with gcc-10 + + -- Christian Ehrhardt Tue, 28 Jul 2020 13:21:31 +0200 + +qemu (1:5.0-5ubuntu2) groovy; urgency=medium + + * No change rebuild against new libnettle8 and libhogweed6 ABI. + + -- Dimitri John Ledkov Mon, 29 Jun 2020 22:32:55 +0100 + +qemu (1:5.0-5ubuntu1) groovy; urgency=medium + + * Merge with Debian testing (LP: #1749393), remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - arch aware kvm wrappers + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + [includes --disable-xen for user-static builds] + - d/control-in: disable pmem on ppc64 as it is currently considered + experimental on that architecture (pmdk v1.8-1) + - d/rules: makefile definitions can't be recursive - sys_systems for s390x + - d/rules: report config log from the correct subdir + - allow qemu to load old modules post upgrade (LP 1847361) + - d/qemu-block-extra.*.in, d/qemu-system-gui.*.in: save shared objects on + upgrade + - d/rules: generate maintainer scripts matching package version on build + - d/rules: enable --enable-module-upgrades where --enable-modules is set + - d/p/ubuntu/lp-1835546-*: backport the s390x protvirt feature (LP 1835546) + - d/control-in: disable rbd support unavailable on riscv (LP: 1872931) + - debian/patches/ubuntu/lp-1878973-*: fix assert in qemu-guest-agent that + crashes it on shutdown (LP 1878973) + * Dropped changes (no more needed) + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/control: avoid upgrade issues triggered by moving ivshmem tools after + Debian. Fixed by bumping the related Breaks/Replaces to the + Version Ubuntu introduced the change (LP 1862287) + * Dropped changes (in Debian) + - improved s390x support + - d/binfmt-update-in: fix binfmt being called in some containers + (LP 1840956) + - qemu-system-x86-microvm package + In addition to the generic multi-purpose qemu also provide a minimal + feature binary that is loading faster for use cases with microvm machine + type and qboot bios + - d/control-in: add a new qemu-system-x86-microvm package + - d/rules: add an extra config/build step to get the minimal qemu + - Security and packaging fixes (LP 1872937) + - arm-fix-PAuth-sbox-functions-CVE-2020-10702.patch + - net-tulip-check-frame-size-and-r-w-data-length-CVE-2020-11102.patch + CVE-2020-10702 + CVE-2020-11102 + - fix external spice UI + + install ui-spice-app.so in qemu-system-common + + install ui-spice-app.so only if built, spice is optional + - switch binfmt registration to use update-binfmts --[un]import (#866756) + - qemu-system-gui: Multi-Arch=same, not foreign (#956763) + - qemu-system-data: s/highcolor/hicolor/ (#955741) + - enable riscv build (LP 1872931) + [ changes picked from Debian ] + - enable support for riscv64 hosts + - only enable librbd on architectures where it is built + - ceph: do not list librados-dev as we only use librbd-dev and the latter + depends on the former + - seccomp grew up, no need in versioned build-dep + - enable seccomp only on architectures where it can be built + * Dropped changes (upstream) + - d/p/ubuntu/lp-1857033-*: add support for Cooper Lake cpu model + (LP 1857033) + - d/p/lp-1859527-*: avoid breakage on high virtqueue counts (LP 1859527) + - d/p/ubuntu/vhost-user-gpu-Drop-trailing-json-comma.patch: fix parsing of + vhost-user-gpu + - d/p/ubuntu/lp-1847361-vhost-correctly-turn-on-VIRTIO_F_IOMMU_PLATFORM.patch: + avoid unnecessary IOTLB transactions (LP 1866207) + - d/p/stable/lp-1867519-*: Stabilize qemu 4.2 with upstream + patches @qemu-stable (LP 1867519) + - remove d/p/ubuntu/expose-vmx_qemu64cpu.patch: Stop adding VMX to qemu64 + to avoid broken nesting (LP 1868692) + - d/p/ubuntu/lp-1871830-*: avoid crash when using QEMU_MODULE_DIR + (LP 1871830) + - d/p/ubuntu/lp-1872107*: fix migration while rebooting guests (LP 1872107) + - d/p/ubuntu/lp-1872931-*: fix build on non KVM platforms + - d/p/ubuntu/lp-1872945-*: fix riscv emulation errors that e.g. hung ssh + and clobbered doubles (LP 1872945) + - SECURITY UPDATE: DoS via integer overflow in ati_2d_blt() + - debian/patches/ubuntu/CVE-2020-11869.patch: fix checks in + ati_2d_blt() to avoid crash in hw/display/ati_2d.c. + - CVE-2020-11869 + - d/p/ubuntu/lp-1805256*: Fixes for QEMU on aarch64 ARM hosts + - async: use explicit memory barriers (LP 1805256) + - aio-wait: delegate polling of main AioContext if BQL not held + - d/p/ubuntu/lp-1882774-*: fix issues with VMX subfeatures on systems not + supporting to set them (LP 1882774) + - d/p/ubuntu/lp-1847361-modules-load-upgrade.patch: to fallback module + load to a versioned path + * Added Changes: + - d/control: regenerate debian/control out of control-in + - update d/p/ubuntu/lp-1835546-* to the final versions + - 11 patches dropped as they are in 5.0 + - 20 patches updated to how they will be in 5.1 + - d/p/ubuntu/virtio-net-fix-rsc_ext-compat-handling.patch: fix + FTBFS in groovy + - Make qemu-system-x86-microvm a transitional package as the binary is now + in qemu-system-x86 itself. + - d/control-in: build-dep libcap is no more needed + - d/rules: update arch aware kvm wrappers + - d/qemu-system-x86.README.Debian: fix typo + + -- Christian Ehrhardt Tue, 16 Jun 2020 16:50:09 +0200 + +qemu (1:5.0-5) unstable; urgency=medium + + * more binfmt-install updates + * CVE-2020-10717 fix from upstream: + virtiofsd-add-rlimit-nofile-NUM-option.patch (preparational) and + virtiofsd-stay-below-fs.file-max-CVE-2020-10717.patch + (Closes: #959746, CVE-2020-10717) + * 2 patches from upstream/stable to fix io_uring fd set buildup: + aio-posix-dont-duplicate-fd-handler-deletion-in-fdmon_io_uring_destroy.patch + aio-posix-disable-fdmon-io_uring-when-GSource-is-used.patch + * upstream stable fix: hostmem-dont-use-mbind-if-host-nodes-is-empty.patch + * upstream stable fix: + net-use-peer-when-purging-queue-in-qemu_flush_or_purge_queue_packets.patch + + -- Michael Tokarev Wed, 13 May 2020 12:57:19 +0300 + +qemu (1:5.0-4) unstable; urgency=medium + + * fix binfmt registration (Closes: #959222) + * disable PIE for user-static build on x32 too, not only i386 + + -- Michael Tokarev Fri, 01 May 2020 13:30:43 +0300 + +qemu (1:5.0-3) unstable; urgency=medium + + * do not explicitly enable -static-pie on non-i386 architectures. + Apparenly only amd64 actually support -static-pie for now, and + it is correctly detected. + + -- Michael Tokarev Thu, 30 Apr 2020 08:05:31 +0300 + +qemu (1:5.0-2) unstable; urgency=medium + + * (temporarily) disable pie on i386 static build + For now -static-pie fails on i386 with the following error message: + /usr/bin/ld: /usr/lib/i386-linux-gnu/libc.a(memset_chk-nonshared.o): + unsupported non-PIC call to IFUNC `memset' + * install qemu-system docs in qemu-system-common, not qemu-system-data, + since docs require ./configure run + + -- Michael Tokarev Wed, 29 Apr 2020 23:41:04 +0300 + +qemu (1:5.0-1) unstable; urgency=medium + + * new upstream release (5.0) + Closes: #958926 + Closes: CVE-2020-11869 + * refresh patches, remove patches applied upstream + * do not mention openhackware, it is not used anymore + * do not disable bluez (support removed) + * new system arch "rx" + * dont install qemu-doc.* for now, + but install virtiofsd & qemu-storage-daemon + * add shared-lib-without-dependency-information tag + to qemu-user-static.lintian-overrides + * add html docs to qemu-system-data (to /usr/share/doc/qemu-system-common) + * do not install usr/share/doc/qemu/specs & usr/share/doc/qemu/tools + * install qemu-user html docs for qemu-user & qemu-user-static + * build hppa-firmware.img from roms/seabios-hppa + (and Build-Depeds-Indep on gcc-hppa-linux-gnu) + * enable liburing on linux (build-depend on liburing-dev) + * add upstream signing-key.asc (Michael Roth ) + * build opensbi firmware + (for riscv64 only, riscv32 is possible with compiler flags) + * add source-level lintian-overrides for binaries-without-sources + (lintian can't find sources for a few firmware images which are in roms/) + + -- Michael Tokarev Wed, 29 Apr 2020 12:00:12 +0300 + +qemu (1:4.2-7) unstable; urgency=medium + + * qemu-system-gui: Multi-Arch=same, not foreign (Closes: #956763) + * x32 arch is in the same family as i386 & x86_64, omit binfmt registration + * check systemd-detect-virt before running update-binfmt + * gluster is de-facto linux-only, do not build-depend on it on non-linux + * virglrenderer is also essentially linux-specific + * qemu-user-static does not depend on shlibs + * disable parallel building of targets of d/rules + * add lintian overrides (arch-dependent static binaries) for openbios binaries + * separate binary-indep target into install-indep-prep and binary-indep + * split out various components of qemu-system-data into independent + build/install rules and add infrastructure for more components: + x86-optionrom, sgabios, qboot, openbios, skiboot, palcode-clipper, + slof, s390x-fw + * iscsi-fix-heap-buffer-overflow-in-iscsi_aio_ioctl_cb.patch + + -- Michael Tokarev Mon, 20 Apr 2020 18:30:00 +0300 + +qemu (1:4.2-6) unstable; urgency=medium + + * d/rules: fix FTBFS (brown-paper-bag bug) in last upload + + -- Michael Tokarev Tue, 14 Apr 2020 17:08:45 +0300 + +qemu (1:4.2-5) unstable; urgency=medium + + * no error-out on address-of-packet-member in openbios + * install ui-spice-app.so only if built, spice is optional + * arm-fix-PAuth-sbox-functions-CVE-2020-10702.patch - + Closes: CVE-2020-10702, weak signature generation + in Pointer Authentication support for ARM + * (temporarily) enable seccomp only on architectures where it can be built + (Closes: #956624) + * seccomp has grown up, no need in versioned build-dep + * do not list librados-dev in build-dep as we only use librbd-dev + and the latter depends on the former + * only enable librbd on architectures where it is buildable + + -- Michael Tokarev Tue, 14 Apr 2020 15:47:40 +0300 + +qemu (1:4.2-4) unstable; urgency=medium + + [ Michael Tokarev ] + * d/rules: build minimal configuration for qboot/microvm usage + * set microvm to be the default machine type for microvm case + * install ui-spice-app.so in qemu-system-common + * do not depend on libattr-dev, functions are now in libc6 (Closes: #953910) + * net-tulip-check-frame-size-and-r-w-data-length-CVE-2020-11102.patch + (Closes: #956145, CVE-2020-11102, tulip nic buffer overflow) + * qemu-system-data: s/highcolor/hicolor/ (Closes: #955741) + * switch binfmt registration to use update-binfmts --[un]import + (Closes: #866756) + * build openbios-ppc & openbios-sparc binaries in qemu-system-data, + and replace corresponding binary packages. + Add gcc-sparc64-linux-gnu, fcode-utils & xsltproc to build-depend-indep + * build and provide/replace qemu-slof too + + [ Aurelien Jarno ] + * enable support for riscv64 hosts + + -- Michael Tokarev Tue, 14 Apr 2020 12:44:43 +0300 + +qemu (1:4.2-3ubuntu10) groovy; urgency=medium + + * No-change rebuild against libnettle8 + + -- Steve Langasek Mon, 20 Jul 2020 16:12:37 +0000 + +qemu (1:4.2-3ubuntu9) groovy; urgency=medium + + * debian/patches/ubuntu/lp-1878973-*: fix assert in qemu-guest-agent that + crashes it on shutdown (LP: #1878973) + * d/p/ubuntu/lp-1882774-*: fix issues with VMX subfeatures on systems not + supporting to set them (LP: #1882774) + + -- Christian Ehrhardt Tue, 02 Jun 2020 10:42:49 +0200 + +qemu (1:4.2-3ubuntu8) groovy; urgency=medium + + * d/p/ubuntu/lp-1805256*: Fixes for QEMU on aarch64 ARM hosts + - async: use explicit memory barriers (LP: #1805256) + - aio-wait: delegate polling of main AioContext if BQL not held + + -- Rafael David Tinoco Wed, 27 May 2020 21:47:21 +0000 + +qemu (1:4.2-3ubuntu7) groovy; urgency=medium + + * SECURITY UPDATE: DoS via integer overflow in ati_2d_blt() + - debian/patches/ubuntu/CVE-2020-11869.patch: fix checks in + ati_2d_blt() to avoid crash in hw/display/ati_2d.c. + - CVE-2020-11869 + + -- Marc Deslauriers Thu, 21 May 2020 14:43:19 -0400 + +qemu (1:4.2-3ubuntu6) focal; urgency=medium + + [ Christian Ehrhardt ] + * enable riscv build (LP: #1872931) + [ changes picked from Debian ] + - enable support for riscv64 hosts + - only enable librbd on architectures where it is built + - ceph: do not list librados-dev as we only use librbd-dev and the latter + depends on the former + - seccomp grew up, no need in versioned build-dep + - enable seccomp only on architectures where it can be built + * d/p/ubuntu/lp-1872931-*: fix build on non KVM platforms + * d/p/ubuntu/lp-1872945-*: fix riscv emulation errors that e.g. hung ssh + and clobbered doubles (LP: #1872945) + + [ William Grant ] + * d/control-in: disable rbd support unavailable on riscv (LP: 1872931) + + -- Christian Ehrhardt Wed, 15 Apr 2020 14:27:15 +0200 + +qemu (1:4.2-3ubuntu5) focal; urgency=medium + + [ Christian Ehrhardt ] + * d/p/ubuntu/lp-1871830-*: avoid crash when using QEMU_MODULE_DIR + (LP: #1871830) + * Security and packaging fixes (LP: #1872937) + - arm-fix-PAuth-sbox-functions-CVE-2020-10702.patch + - net-tulip-check-frame-size-and-r-w-data-length-CVE-2020-11102.patch + CVE-2020-10702 + CVE-2020-11102 + - fix external spice UI + + install ui-spice-app.so in qemu-system-common + + install ui-spice-app.so only if built, spice is optional + - switch binfmt registration to use update-binfmts --[un]import (#866756) + - qemu-system-gui: Multi-Arch=same, not foreign (#956763) + - qemu-system-data: s/highcolor/hicolor/ (#955741) + * d/p/ubuntu/lp-1872107*: fix migration while rebooting guests (LP: #1872107) + + -- Christian Ehrhardt Wed, 15 Apr 2020 11:26:44 +0200 + +qemu (1:4.2-3ubuntu4) focal; urgency=medium + + * d/p/ubuntu/lp-1835546-*: backport the s390x protvirt feature (LP: #1835546) + * remove d/p/ubuntu/expose-vmx_qemu64cpu.patch: Stop adding VMX to qemu64 + to avoid broken nesting (LP: #1868692) + + -- Christian Ehrhardt Fri, 20 Mar 2020 08:02:16 +0100 + +qemu (1:4.2-3ubuntu3) focal; urgency=medium + + * d/p/stable/lp-1867519-*: Stabilize qemu 4.2 with upstream + patches @qemu-stable (LP: #1867519) + + -- Christian Ehrhardt Wed, 18 Mar 2020 13:57:57 +0100 + +qemu (1:4.2-3ubuntu2) focal; urgency=medium + + * allow qemu to load old modules post upgrade (LP: #1847361) + - d/p/ubuntu/lp-1847361-modules-load-upgrade.patch: to fallback module + load to a versioned path + - d/qemu-block-extra.*.in, d/qemu-system-gui.*.in: save shared objects on + upgrade + - d/rules: generate maintainer scripts matching package version on build + - d/rules: enable --enable-module-upgrades where --enable-modules is set + * d/p/ubuntu/lp-1847361-vhost-correctly-turn-on-VIRTIO_F_IOMMU_PLATFORM.patch: + avoid unnecessary IOTLB transactions (LP: #1866207) + + -- Christian Ehrhardt Mon, 02 Mar 2020 15:21:27 +0100 + +qemu (1:4.2-3ubuntu1) focal; urgency=medium + + * Merge with Debian testing, remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - Enable nesting by default + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - improved s390x support + - d/rules: build s390-ccw.img with upstream Makefile + - d/rules: build s390-netboot.img with upstream Makefile + - arch aware kvm wrappers + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/binfmt-update-in: fix binfmt being called in some containers + (LP 1840956) + - d/p/ubuntu/lp-1857033-*: add support for Cooper Lake cpu model + (LP 1857033) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + - d/p/lp-1859527-*: avoid breakage on high virtqueue counts (LP 1859527) + - Dropped changes [ in Debian ] + - d/control: update VCS links + - d/control-in: bump debhelper build-dep for compat 12 + - d/control: disable bluetooth being deprecated + - d/not-installed: ignore new interop docs and extra icons for now + - d/not-installed: do not install elf2dmp until namespaced + - d/qemu-utils.install: install new tools qemu-edid and qemu-keymap + [ not needed ] + - d/control-in: promote qemu-efi/ovmf in Ubuntu (LP 1570617) + - s390x support + - Create qemu-system-s390x package + - Enable numa support for s390x + - d/control*: enable libpmem support for nvdimms (LP 1790856) + * Added changes + - d/control: regenerate debian/control out of control-in + - qemu-system-x86-microvm package + In addition to the generic multi-purpose qemu also provide a minimal + feature binary that is loading faster for use cases with microvm machine + type and qboot bios + - d/control-in: add a new qemu-system-x86-microvm package + - d/rules: add an extra config/build step to get the minimal qemu + - d/control-in: disable pmem on ppc64 as it is currently considered + experimental on that architecture (pmdk v1.8-1) + - d/rules: makefile definitions can't be recursive - sys_systems for s390x + - d/p/ubuntu/vhost-user-gpu-Drop-trailing-json-comma.patch: fix parsing of + vhost-user-gpu + - d/rules: report config log from the correct subdir + - d/rules: --disable-xen for user-static builds + + -- Christian Ehrhardt Wed, 12 Feb 2020 15:21:56 +0100 + +qemu (1:4.2-3) unstable; urgency=medium + + * mention closing of #909743 in previous changelog (Closes: #909743) + * do not link to qemu-skiboot from qemu-system-ppc (Closes: #950431) + * provide+conflict qemu-skiboot from qemu-system-data, + as we are not using this package anymore + + -- Michael Tokarev Sat, 01 Feb 2020 22:10:57 +0300 + +qemu (1:4.2-2) unstable; urgency=medium + + [ Fabrice Bauzac ] + * Fix a typo in the description of the qemu binary package + + [ Frédéric Bonnard ] + * Enable powernv emulation with skiboot firmware + + [ Michael R. Crusoe ] + * Modernize watch file (Closes: #909743) + + [ Christian Ehrhardt ] + * d/control-in: promote qemu-efi/ovmf in Ubuntu + * d/control-in: bump debhelper build-dep for compat 12 + * - d/control-in: update VCS links + * - d/control-in: disable bluetooth being deprecated + * d/not-installed: ignore new interop docs and extra icons for now + * do not install elf2dmp until namespaced + * d/control-in: Enable numa support for s390x + * Create qemu-system-s390x package (Ubuntu only for now) + + [ Michael Tokarev ] + * stop using inttypes.h in qboot code; + this makes dependency on libc6-dev-i386 to be unnecessary + * qboot-no-jump-tables.diff - use #pragma for one file in qboot + * do not install qemu-edid and qemu-keymap for now + * no need in bluetooth patches as bluetooth is disabled + * scsi-cap-block-count-from-GET-LBA-STATUS-CVE-2020-1711.patch + (Closes: #949731, CVE-2020-1711) + * enable libpmem support on amd64|arm64|ppc64el (Closes: #935327) + + -- Michael Tokarev Fri, 31 Jan 2020 23:51:09 +0300 + +qemu (1:4.2-1ubuntu2) focal; urgency=medium + + * d/control: avoid upgrade issues triggered by moving ivshmem tools after + Debian. Fixed by by bumping the related Breaks/Replaces to the + Version Ubuntu introduced the change (LP: #1862287) + + -- Christian Ehrhardt Fri, 07 Feb 2020 07:31:21 +0100 + +qemu (1:4.2-1ubuntu1) focal; urgency=medium + + * Merge with Debian testing, Among many other things this fixes LP Bugs: + LP: #1847806 - add mff* instructions to not break on ppc64 with newer glibc + LP: #1812822 - avoid crashes on detaching vhost_net interfaces + LP: #1852744 - Crypto Passthrough Interrupt Support + LP: #1853316 - CCW IPL Support + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - Enable nesting by default + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + [ No more strictly needed, but required for backward compatibility ] + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Enable numa support for s390x + - d/rules: build s390-ccw.img with upstream Makefile + - d/rules: build s390-netboot.img with upstream Makefile + - arch aware kvm wrappers + - d/control: update VCS links + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - d/control: disable bluetooth being deprecated + - d/not-installed: ignore new interop docs and extra icons for now + - d/not-installed: do not install elf2dmp until namespaced + - d/qemu-utils.install: install new tools qemu-edid and qemu-keymap + - d/control-in: promote qemu-efi/ovmf in Ubuntu (LP 1570617) + - d/binfmt-update-in: fix binfmt being called in some containers + (LP 1840956) + - Dropped changes (in Debian) + - qemu-guest-agent: freeze-hook fixes (LP: 1484990) + - d/qemu-guest-agent.install: provide /etc/qemu/fsfreeze-hook + - d/qemu-guest-agent.dirs: provide /etc/qemu/fsfreeze-hook.d + - d/control-in: enable RDMA support in qemu (LP: 1692476) + - enable RDMA config option + - add libibumad-dev build-dep + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot.patch: bring back + some SLOF bits stripped in DFSG to be able to build s390x-netboot roms + As that hack to build s390-ccw.img rom can't build s390x-netboot.img + replace it with a build-indep using the upstream makefiles. + This is less prone to miss future changes/fixes that are done to the + makefiles + - remove /dev/kvm permission handling (moved to systemd 239-6) (#892945) + - d/p/debianize-qemu-guest-service.patch: fix path of qemu-ga + - d/rules: fix qemu-kvm service for debhelper compat >=12 + - Refreshed patches for v4.0 context changes + - d/control*: remove sdlabi which was removed upstream + - d/control*: enable docs (now explicit) and provide new build-dep + python3-sphinx + - d/qemu-system-data.install: use new paths for formerly used icons + - Merge with Upstream release of qemu 4.0 + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot.patch + - Dropped changes (Upstream) + - d/p/ubuntu/lp-1830243-*: s390x Secure Linux Boot Toleration (LP 1830243) + - d/p/ubuntu/lp-1830238-*: s390x hardware cpu model (LP 1830238) + - d/p/ubuntu/linux-user-fix-__NR_semtimedop-undeclared-error.patch: + fix i386 build error + - d/p/ubuntu/lp-1836066-s390-cpumodel-fix-description-for-the-new-vector-fac: + fix naming of the new vector facitlity (LP 1836066) + - d/p/ubuntu/lp-1836159-fix-with-latest-kernel.patch: fix build issues + for missing SIOCGSTAMP definition; final fix is still in discussion + upstream (LP: 1836159) + - d/p/ubuntu/lp-1836154-*: further fixups for HW CPU model for newer + s390x machines (LP 1836154) + - d/p/ubuntu/lp-1841066-*: fix detection of arch_capability flags + (LP 1841066) + - d/p/lp-1842774-s390x-cpumodel-Add-the-z15-name-to-the-description-o.patch: + update the z15 model name (LP 1842774) + - d/p/ubuntu/lp-1848556-curl-Handle-success-in-multi_check_completion.patch: + fix a potential hang when qemu or qemu-img where accessing http backed + disks via libcurl (LP 1848556) + - d/p/u/lp-1848497-virtio-balloon-fix-QEMU-4.0-config-size-migration-*: + fix migration issue from qemu <4.0 when using virtio-balloon (LP 1848497) + - d/p/ubuntu/lp-1830704-s390x-cpumodel-ignore-csske-for-expansion.patch + toleration for future machines (LP 1830704) + - SECURITY UPDATE: Add support for exposing md-clear functionality + to guests + - d/p/ubuntu/enable-md-clear.patch + - d/p/ubuntu/enable-md-no.patch + - CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + - SECURITY UPDATE: heap overflow when loading device tree blob + - d/p/ubuntu/CVE-2018-20815.patch: specify how large the buffer to + copy the device tree blob into is. + - CVE-2018-20815 + - SECURITY UPDATE: device driver denial of service via NULL pointer + dereference + - d/p/ubuntu/CVE-2019-5008.patch: Define skeleton 'power_mem_read' + routine + - CVE-2019-5008 + - SECURITY UPDATE: information leak in SLiRP + - d/p/ubuntu/CVE-2019-9824.patch: check sscanf result when + emulating ident. + - CVE-2019-9824 + - d/p/ubuntu/lp-1812384-s390x-Return-specification-exception-for- + unimplement.patch: properly return architecture defined exception + on bad subcodes of diag 308 (LP 1812384) + * Dropped changes (no more needed) + - d/qemu-guest-agent.pre{rm|inst}/.postrm: special handling for + mv_conffile since the new path is a directory in the old package + version which can not be handled by mv_conffile. + [ only needed between disco and eoan ] + - disable pvrdma + [ CVEs all fixed now ] + - d/p/ubuntu/Revert-target-i386-kvm-add-VMX-migration-blocker.patch: + avoid misdetection of simplified nesting blocking all migrations + [ qemu now detects and handles nesting - needs kernel >=4.20 ] + - Enable nesting by default + - d/qemu-system-x86.modprobe: set nested=1 module option on intel. + (is default on amd) + - d/qemu-system-x86.postinst: re-load kvm_intel.ko if it was loaded + without nested=1 + [ nesting is default in kernel modules and default selected cpu types ] + * Added changes + - d/control: regenerate debian/control out of control-in + - updated ubuntu machine types to match qemu 4.2 in Ubuntu 20.04 Focal + - added ubuntu focal types for qemu 4.2 + - ubuntu-q35 alias added to auto-select the most recent q35 ubuntu type + - d/p/ubuntu/lp-1857033-*: add support for Cooper Lake cpu model + (LP: #1857033) + - d/qemu-system-x86.README.Debian: add info about updated nesting changes + - d/control*, d/rules: disable xen by default, but provide universe + package qemu-system-x86-xen as alternative + - fix typos in changelog and d/qemu-system-x86.NEWS + - d/p/lp-1859527-*: avoid breakage on high virtqueue counts (LP: #1859527) + - d/control*: enable libpmem support for nvdimms (LP: #1790856) + + -- Christian Ehrhardt Wed, 08 Jan 2020 15:27:42 +0100 + +qemu (1:4.2-1) unstable; urgency=medium + + * new upstream release (4.2.0) + * removed patches: v4.1.1.diff, enable-pschange-mc-no.patch + * do not make sgabios.bin executable (lintian) + * add s390-netboot.img lintian overrides for qemu-system-data + * build qboot (bios-microvm.bin) + * build-depend-indep on libc6-dev-i386 for qboot + (includes some system headers) + + -- Michael Tokarev Sat, 14 Dec 2019 14:07:27 +0300 + +qemu (1:4.1-3) unstable; urgency=medium + + * mention #939869 (CVE-2019-15890) in previous changelog entry + * add Provides: sgabios to qemu-data (Closes: #945924) + * fix qemu-debootsrtap (add hppa arch, print correct error message) + thanks to Helge Deller (Closes: #923410) + * enable long binfmt masks again for mips/mips32 (Closes: #829243) + + -- Michael Tokarev Mon, 02 Dec 2019 13:24:58 +0300 + +qemu (1:4.1-2) unstable; urgency=medium + + * build sgabios in build-indep, conflict with sgabios package + * qemu-system-ppc: build and install canyonlands.dtb in addition to bamboo.dtb + * remove duplicated CVE-2018-20123 & CVE-2018-20124 in prev changelog + * move s390 firmware build rules to debian/s390fw.mak, build s390-netboot.img + * imported v4.1.1.diff - upstream stable branch + Closes: CVE-2019-12068 + Closes: #945258, #945072 + * enable-pschange-mc-no.patch: i386: add PSCHANGE_MC_NO feature + to allow disabling ITLB multihit mitigations in nested hypervisors + Closes: #944623 + * build-depend on nettle-dev, enable nettle, and clarify --enable-lzo + * switch to system libslirp, build-depend on libslirp-dev + Closes: #939869, CVE-2019-15890 + + -- Michael Tokarev Mon, 25 Nov 2019 12:54:05 +0300 + +qemu (1:4.1-1) unstable; urgency=medium + + * new upstream release v4.1 + Closes: #933741, CVE-2019-14378 (slirp buff overflow in packet reassembly) + (use internal slirp copy for now) + Closes: #931351, CVE-2019-13164 (qemu-bridge-helper long IFNAME) + Closes: #922923, CVE-2019-8934 (ppc64 emulator leaks hw identity) + Closes: #916442, CVE-2018-20123 (pvrdma memory leak in device hotplug) + Closes: #922461, CVE-2018-20124 (pvrdma num_sge can exceed MAX_SGE) + Closes: #927924 (new upstream version) + Closes: #897054 (AMD Zen CPU support) + Closes: #935324 (FTBFS due to gluster API change) + Closes: CVE-2018-20125 (pvrdma: DoS in create_cq_ring|create_qp_rings) + Closes: CVE-2018-20126 (pvrdma: memleaks in create_cq_ring|create_qp_rings) + Closes: CVE-2018-20191 (pvrdma: DoS due to missing read operation impl.) + Closes: CVE-2018-20216 (pvrdma: infinite loop in pvrdma_dev_ring.c) + * remove patches which are applied upstream, refresh remaining patches + (bt-use-size_t-...-CVE-2018-19665.patch hasn't been applied upstream, + bluetooth subsystem is going to be removed, we keep it for now) + * debian/source/options: ignore slirp/ submodule + * use python3 for building, not python + * debian/optionrom.mk: add pvh.bin + * switch from libssh2 to libssh, and enable libssh support in ubuntu + * bump spice version requiriment to 0.12.5 + * enable pvrdma + * debian/control-in: remove reference to libsdl + * debian/rules: add new objects for s390-ccw fw + * debian/control: add build dependency on python3-sphinx for docs + * install ui/icons/qemu.svg and qemu.desktop + * debian/rules: remove pc-bios/bamboo.dtb before building it + * install vhost-user-gpu binary and 50-qemu-gpu.json + * debian/rules: remove old maintscript-helper invocations, not needed anymore + * remove +dfsg for now, upload whole upstream source, will trim it later + + -- Michael Tokarev Tue, 27 Aug 2019 12:43:43 +0300 + +qemu (1:4.0+dfsg-0ubuntu10) focal; urgency=medium + + * d/p/ubuntu/lp-1848556-curl-Handle-success-in-multi_check_completion.patch: + fix a potential hang when qemu or qemu-img where accessing http backed + disks via libcurl (LP: #1848556) + * d/p/u/lp-1848497-virtio-balloon-fix-QEMU-4.0-config-size-migration-in.patch: + fix migration issue from qemu <4.0 when using virtio-balloon (LP: #1848497) + + -- Christian Ehrhardt Mon, 21 Oct 2019 14:51:45 +0200 + +qemu (1:4.0+dfsg-0ubuntu9) eoan; urgency=medium + + * d/p/lp-1842774-s390x-cpumodel-Add-the-z15-name-to-the-description-o.patch: + update the z15 model name (LP: #1842774) + + -- Christian Ehrhardt Tue, 24 Sep 2019 11:42:58 +0200 + +qemu (1:4.0+dfsg-0ubuntu8) eoan; urgency=medium + + * d/binfmt-update-in: fix binfmt being called in some containers + (LP: #1840956) + + -- Christian Ehrhardt Mon, 09 Sep 2019 11:03:13 +0200 + +qemu (1:4.0+dfsg-0ubuntu7) eoan; urgency=medium + + * No-change upload with strops.h and sys/strops.h removed in glibc. + + -- Matthias Klose Thu, 05 Sep 2019 11:07:25 +0000 + +qemu (1:4.0+dfsg-0ubuntu6) eoan; urgency=medium + + * d/p/ubuntu/lp-1841066-*: fix detection of arch_capability flags + (LP: #1841066) + + -- Christian Ehrhardt Mon, 26 Aug 2019 12:08:04 +0200 + +qemu (1:4.0+dfsg-0ubuntu5) eoan; urgency=medium + + * d/p/ubuntu/lp-1836154-*: further fixups for HW CPU model for newer + s390x machines (LP: #1836154) + + -- Christian Ehrhardt Wed, 17 Jul 2019 13:20:42 +0200 + +qemu (1:4.0+dfsg-0ubuntu4) eoan; urgency=medium + + * d/control-in: promote qemu-efi/ovmf in Ubuntu (LP: #1570617) + - pick Debian change for (#889885) + move ovmf to recommends on debian and update aarch ovmf refs + - stop Ubuntu to drop ovmf/qemu-efi to a suggest + + -- Christian Ehrhardt Fri, 12 Jul 2019 12:48:24 +0200 + +qemu (1:4.0+dfsg-0ubuntu3) eoan; urgency=medium + + * d/p/ubuntu/lp-1836159-fix-with-latest-kernel.patch: fix build issues + for missing SIOCGSTAMP definition; final fix is still in discussion + upstream (LP: 1836159) + + -- Christian Ehrhardt Thu, 11 Jul 2019 10:10:00 +0200 + +qemu (1:4.0+dfsg-0ubuntu2) eoan; urgency=medium + + * d/p/ubuntu/lp-1836066-s390-cpumodel-fix-description-for-the-new-vector-fac: + fix naming of the new vector facitlity (LP: #1836066) + * d/control-in: update VCS links in control template as well + + -- Christian Ehrhardt Thu, 11 Jul 2019 08:18:44 +0200 + +qemu (1:4.0+dfsg-0ubuntu1) eoan; urgency=medium + + * Merge with Upstream release of qemu 4.0. + Among many other things this fixes LP Bugs: + LP: #1782206 - SnowRidge Accelerator Interfacing Architecture (AIA) + LP: #1828038 - Update s390x CPU Model for more HW support + LP: #1832622 - count cache flush Spectre v2 mitigation for ppc64el + Remaining Changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-system-common.qemu-kvm.service: systemd unit to call + qemu-kvm-init + - d/qemu-system-common.install: install helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: call dh_installinit and dh_installsystemd for qemu-kvm + - Enable nesting by default + - d/qemu-system-x86.modprobe: set nested=1 module option on intel. + (is default on amd) + - d/qemu-system-x86.postinst: re-load kvm_intel.ko if it was loaded + without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - d/qemu-system-x86.README.Debian: document intention of nested being + default is comfort, not full support + - Distribution specific machine type (LP: 1304107 1621042) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - provide pseries-bionic-2.11-sxxm type as convenience with all + meltdown/spectre workarounds enabled by default. (LP: 1761372). + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Enable numa support for s390x + - arch aware kvm wrappers + - d/control: update VCS links + - qemu-guest-agent: freeze-hook fixes (LP: 1484990) + - d/qemu-guest-agent.install: provide /etc/qemu/fsfreeze-hook + - d/qemu-guest-agent.dirs: provide /etc/qemu/fsfreeze-hook.d + - d/control-in: enable RDMA support in qemu (LP: 1692476) + - enable RDMA config option + - add libibumad-dev build-dep + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + - Move s390x roms to a new qemu-system-data-s390x + - d/qemu-system-data.install: install s390x roms as architecture:all in + qemu-system-data + - d/rules: build s390-ccw.img with upstream Makefile + - d/rules: build s390-netboot.img with upstream Makefile + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot.patch: bring back + some SLOF bits stripped in DFSG to be able to build s390x-netboot roms + As that hack to build s390-ccw.img rom can't build s390x-netboot.img + replace it with a build-indep using the upstream makefiles. + This is less prone to miss future changes/fixes that are done to the + makefiles + - d/control-in: add breaks/replaces for moving s390x roms from + qemu-system-s390x to qemu-system-data + - remove /dev/kvm permission handling (moved to systemd 239-6) (#892945) + [From not yet uploaded Debian branch] + - d/p/debianize-qemu-guest-service.patch: fix path of qemu-ga + - d/rules: fix qemu-kvm service for debhelper compat >=12 + - disable pvrdma - besides several security holes there are many other + bugs there as well + * Dropped patches that are upstream in v4.0 + - d/p/do-not-link-everything-with-xen.patch + - d/p/usb-mtp-use-O_NOFOLLOW-and-O_CLOEXEC-CVE-2018-16872.patch + - d/p/hw_usb-fix-mistaken-de-initialization-of-CCID-state.patch + - d/p/scsi-generic-avoid-possible-oob-access-to-r-buf-CVE-2019-6501.patch + - d/p/slirp-check-data-length-while-emulating-ident-function-CVE-2019-6778 + - d/p/i2c-ddc-fix-oob-read-CVE-2019-3812.patch + - d/p/ubuntu/lp-1759509-qmp-query-current-machine-with-wakeup-suspend-suppor + (LP: 1759509) + - d/p/ubuntu/lp-1759509-qga-update-guest-suspend-ram-and-guest-suspend-hybri + - d/p/ubuntu/lp-1759509-qmp-hmp-Make-system_wakeup-check-wake-up-support-and + - d/p/ubuntu/lp-1812384-s390x-Return-specification-exception-for-unimplement + - d/p/ubuntu/CVE-2018-20815.patch + - d/p/ubuntu/CVE-2019-5008.patch + - d/p/ubuntu/CVE-2019-9824.patch + - d/p/ubuntu/Revert-target-i386-kvm-add-VMX-migration-blocker.patch: + avoid misdetection of simplified nesting blocking all migrations + * Dropped further patches + d/p/bt-use-size_t-type-for-length-parameters-instead-of-int-CVE-2018-19665 + [upstream deprecated the whole subsystem instead of applying the fix] + * Added Changes + - updated ubuntu machine types for v4.0 + - added eoan types + - fixed s390x issue of upstream types having a "v" prefix + - add back dropped machine types to avoid more issues like LP: 1802944 + - fix kvm split irqchip default in ubuntu q35 machine type + - drop no more needed spapr_machine_2_11_sxxm_instance_options and + adapt updated CamelCase + - -hpb types now need to use GlobalProperties + - pc_compat_2_0 got a _fn suffix and slight changes + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot.patch: update to + SLOF of qemu 4.0 + - Refreshed patches still needed for v4.0 context changes + - d/p/use-fixed-data-path.patch + - d/p/ubuntu/enable-svm-by-default.patch + - d/p/ubuntu/enable-md-clear.patch + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch + - d/p/ubuntu/lp-1830243-*: s390x Secure Linux Boot Toleration + (LP: #1830243) + - d/control: disable bluetooth being deprecated + - d/control*: remove sdlabi which was removed upstream + - d/p/ubuntu/lp-1830238-*: s390x hardware cpu model (LP: #1830238) + - d/control*: enable docs (now explicit) and provide new build-dep + python3-sphinx + - d/not-installed: ignore new interop docs and extra icons for now + - d/not-installed: do not install elf2dmp until namespaced + - d/qemu-utils.install: install new tools qemu-edid and qemu-keymap + - d/qemu-system-data.install: use new paths for formerly used icons + - d/p/ubuntu/linux-user-fix-__NR_semtimedop-undeclared-error.patch: + fix i386 build error + + -- Christian Ehrhardt Mon, 24 Jun 2019 16:33:19 +0200 + +qemu (1:3.1+dfsg-8) unstable; urgency=high + + * sun4u-add-power_mem_read-routine-CVE-2019-5008.patch + fixes a null-pointer dereference in sparc/sun4u emulated hw + Closes: #927439, CVE-2019-5008 + * enable-md-no.patch & enable-md-clear.patch + mitigation for MDS (Microarchitectural Data Sampling) issues + Closes: #929067, + CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + * qxl-check-release-info-object-CVE-2019-12155.patch + fixes null-pointer deref in qxl cleanup code + Closes: #929353, CVE-2019-12155 + * aarch32-exception-return-to-switch-from-hyp-mon.patch + fixes booting U-Boot in UEFI mode on aarch32 + Closes: #927763 + * stop qemu-system-common pre-depending on adduser + Closes: #929261 + + -- Michael Tokarev Mon, 27 May 2019 07:49:25 +0300 + +qemu (1:3.1+dfsg-7) unstable; urgency=high + + [ Michael Tokarev ] + * device_tree-don-t-use-load_image-CVE-2018-20815.patch + fix heap buffer overflow while loading device tree blob + (Closes: CVE-2018-20815) + + [ Christian Ehrhardt ] + * qemu-guest-agent: fix path of fsfreeze-hook (LP: #1820291) + - d/qemu-guest-agent.install: use correct path for fsfreeze-hook + - d/qemu-guest-agent.pre{rm|inst}/.postrm: special handling for + mv_conffile since the new path is a directory in the old package + version which can not be handled by mv_conffile. + + -- Michael Tokarev Wed, 27 Mar 2019 14:24:06 +0300 + +qemu (1:3.1+dfsg-6) unstable; urgency=high + + * slirp-check-sscanf-result-when-emulating-ident-CVE-2019-9824.patch + fix information leakage in slirp code (Closes: CVE-2019-9824) + + -- Michael Tokarev Mon, 18 Mar 2019 14:41:51 +0300 + +qemu (1:3.1+dfsg-5) unstable; urgency=high + + * i2c-ddc-fix-oob-read-CVE-2019-3812.patch fixes + OOB read in hw/i2c/i2c-ddc.c which allows for memory disclosure. + Closes: #922635, CVE-2019-3812 + + -- Michael Tokarev Mon, 11 Mar 2019 14:30:44 +0300 + +qemu (1:3.1+dfsg-4) unstable; urgency=medium + + * mention closing of #855043 by 3.1+dfsg-3 + * disable pvrdma for now, it is a bit too buggy. + Besides several security holes there are many other bugs there as well, + and the amount of patches applied upstream after 3.1 release is large + (Closes, or really makes unimportant again: CVE-2018-20123 CVE-2018-20124 + CVE-2018-20125 CVE-2018-20126 CVE-2018-20191 CVE-2018-20216) + + -- Michael Tokarev Mon, 11 Feb 2019 14:00:09 +0300 + +qemu (1:3.1+dfsg-3) unstable; urgency=medium + + [ Michael Tokarev ] + * mention #696289 closed by 2.10 + * move ovmf to recommends on debian and update aarch ovmf refs + (Closes: #889885, #855043) + * remove /dev/kvm permission handling (moved to systemd 239-6) + (Closes: #892945) + * build qemu-palcode using alpha cross-compiler + (Closes: #913103) + * fix path in qemu-guest-agent.service (#918378), fixs Bind[s]To + (Closes: #918378 + * use int for sparc64 timeval.tv_usec + (Closes: #920032) + * build-depend on libglusterfs-dev not glusterfs-common + (Closes: #919668, #881527) + * add breaks: qemu-system-data to qemu-system-common, + to close #916279 completely (all this can be removed after buster) + (Closes: #916279) + * scsi-generic-avoid-possible-oob-access-to-r-buf-CVE-2019-6501.patch + (Closes: #920222, CVE-2019-6501) + * slirp-check-data-length-while-emulating-ident-function-CVE-2019-6778.patch + (Closes: #921525) + * pvrdma-release-device-resources-on-error-CVE-2018-20123.patch + (Closes: #916442, CVE-2018-20123) + * enable rdma and pvrdma, build-depend on + librdmacm-dev, libibverbs-dev, libibumad-dev + * sync debian/qemu-user-static.1 and debian/qemu-user.1 generate the latter + from the former (finally Closes: #901407) + * move ivshmem-server & ivshmem-client from qemu-utils to qemu-system-common + (the binaries are also specific to qemu-system, not useable alone) + * move qemu-pr-helper from qemu-utils to qemu-system-common - + this is an internal qemu-system helper, with possible socket activation, + not intended for use outside of qemu-system + + [ Christian Ehrhardt ] + * qemu-guest-agent: freeze-hook to ignore dpkg files (packaging changes) + + -- Michael Tokarev Wed, 06 Feb 2019 12:23:01 +0300 + +qemu (1:3.1+dfsg-2ubuntu5) eoan; urgency=medium + + * d/p/ubuntu/define-ubuntu-machine-types.patch: fix wily machine type being + broken since 2.11 due to 2.3/2.4 version mismatch in its definition to + fix migrations from old machines (LP: #1829868). + * d/p/ubuntu/lp-1830704-s390x-cpumodel-ignore-csske-for-expansion.patch + toleration for future machines (LP: #1830704 + + -- Christian Ehrhardt Tue, 28 May 2019 11:30:42 +0200 + +qemu (1:3.1+dfsg-2ubuntu4) eoan; urgency=medium + + * SECURITY UPDATE: Add support for exposing md-clear functionality + to guests + - d/p/ubuntu/enable-md-clear.patch + - d/p/ubuntu/enable-md-no.patch + - CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + * SECURITY UPDATE: heap overflow when loading device tree blob + - d/p/ubuntu/CVE-2018-20815.patch: specify how large the buffer to + copy the device tree blob into is. + - CVE-2018-20815 + * SECURITY UPDATE: device driver denial of service via NULL pointer + dereference + - d/p/ubuntu/CVE-2019-5008.patch: Define skeleton 'power_mem_read' + routine + - CVE-2019-5008 + * SECURITY UPDATE: information leak in SLiRP + - d/p/ubuntu/CVE-2019-9824.patch: check sscanf result when + emulating ident. + - CVE-2019-9824 + + -- Steve Beattie Wed, 08 May 2019 09:27:53 -0700 + +qemu (1:3.1+dfsg-2ubuntu3) disco; urgency=medium + + * qemu-guest-agent: fix path of fsfreeze-hook (LP: #1820291) + - d/qemu-guest-agent.install: use correct path for fsfreeze-hook + - d/qemu-guest-agent.pre{rm|inst}/.postrm: special handling for + mv_conffile since the new path is a directory in the old package + version which can not be handled by mv_conffile. + * i2c-ddc-fix-oob-read-CVE-2019-3812.patch fixes + OOB read in hw/i2c/i2c-ddc.c which allows for memory disclosure. + Closes: #922635 (Thanks to Gerd Hoffmann and Michael Tokarev) + CVE-2019-3812 + + -- Christian Ehrhardt Mon, 18 Mar 2019 09:20:07 +0100 + +qemu (1:3.1+dfsg-2ubuntu2) disco; urgency=medium + + * disable pvrdma - besides several security holes there are many other + bugs there as well, and the amount of patches applied upstream after + 3.1 release is large (Closes, or actuallymakes unimportant again) + - CVE-2018-20123 + - CVE-2018-20124 + - CVE-2018-20125 + - CVE-2018-20126 + - CVE-2018-20191 + - CVE-2018-20216 + * scsi-generic-avoid-possible-oob-access-to-r-buf-CVE-2019-6501.patch + - CVE-2019-6501 + * slirp-check-data-length-while-emulating-ident-function-CVE-2019-6778.patch + - CVE-2019-6778 + + -- Christian Ehrhardt Tue, 19 Feb 2019 06:43:04 +0100 + +qemu (1:3.1+dfsg-2ubuntu1) disco; urgency=medium + + * Merge with Debian testing, Among many other things this fixes LP Bugs: + LP: #1806104 - fix misleading page size error on ppc64el + LP: #1782205 - SnowRidge enabled new ISAs + LP: #1786956 - upgrade to qemu >= 3.0 + LP: #1809083 - Backward migration to Xenial on ppc64el + LP: #1803315 - s390x Huge page enablement + LP: #1657409 - enable virglrenderer + Remaining Changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - d/qemu-system-x86.modprobe: set nested=1 module option on intel. + (is default on amd) + - d/qemu-system-x86.postinst: re-load kvm_intel.ko if it was loaded + without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - d/qemu-system-x86.README.Debian: document intention of nested being + default is comfort, not full support + - Distribution specific machine type (LP: 1304107 1621042 1776189 1761372) + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - d/p/ubuntu/lp-1761372-*: provide pseries-bionic-2.11-sxxm type as + convenience with all meltdown/spectre workarounds enabled by default. + (LP: 1761372). + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Enable numa support for s390x + - arch aware kvm wrappers + - d/control: update VCS links (updated to match latest Ubuntu) + - qemu-guest-agent: freeze-hook fixes (LP: 1484990) + - d/qemu-guest-agent.install: provide /etc/qemu/fsfreeze-hook + - d/qemu-guest-agent.dirs: provide /etc/qemu/fsfreeze-hook.d + - d/control-in: enable RDMA support in qemu (LP: 1692476) + - enable RDMA config option + - add libibumad-dev build-dep + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control-in: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: Disable capstone disassembler library support (universe) + * Added Changes: + - d/p/ubuntu/define-ubuntu-machine-types.patch: update machine type changes + for qemu 3.1 in the Ubuntu Disco release + - d/p/ubuntu/lp-1759509-* fix waking up VMs from dompmsuspend (LP: #1759509) + - Move s390x roms to a new qemu-system-data-s390x + - d/qemu-system-data.install: install s390x roms as architecture:all in + qemu-system-data + - d/rules: build s390-ccw.img with upstream Makefile + - d/rules: build s390x-netboot.img with upstream Makefile + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot.patch: bring back + some SLOF bits stripped in DFSG to be able to build s390x-netboot roms + As that hack to build s390-ccw.img rom can't build s390x-netboot.img + replace it with a build-indep using the upstream makefiles. + This is less prone to miss future changes/fixes that are done to the + makefiles + - d/control-in: add breaks/replaces for moving s390x roms from + qemu-system-s390x to qemu-system-data + - remove /dev/kvm permission handling (moved to systemd 239-6) (#892945) + [From not yet uploaded Debian branch] + - d/p/debianize-qemu-guest-service.patch: fix path of qemu-ga + (Closes: #918378) + - d/rules: fix qemu-kvm service for debhelper compat >=12 + - d/p/ubuntu/Revert-target-i386-kvm-add-VMX-migration-blocker.patch: + avoid misdetection of simplified nesting blocking all migrations + - d/p/ubuntu/lp-1812384-s390x-Return-specification-exception-for- + unimplement.patch: properly return archicture defined exception + on bad subcodes of diag 308 (LP: #1812384) + * Dropped Changes: + - Include s390-ccw.img firmware (old style native build) + - d/rules enable install s390x-netboot.img (old style native build) + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + [ Droppable since logind properly sets ACLs now ] + - qemu-system-common.preinst: add kvm group if needed + [ Droppable because systemd/udev take care of it since 239-6] + - d/p/guest-agent-freeze-hook-skip-dpkg-artifacts.patch of qemu-guest-agent + freeze-hook fixes (LP: 1484990) + [upstream] + - d/p/ubuntu/CVE-2018-3639/* update for qemu 2.12 using the final patches + merged upstream + [upstream] + - d/p/ubuntu/CVE-2018-11806-slirp-correct-size.patch: slirp: correct size + computation while concatenating mbuf. + CVE-2018-11806 + [upstream] + - d/p/ubuntu/lp-1781526-powerpc64-align-memory-THP.patch: align to 2MB + for powerpc64 to speed up translation (LP: 1781526) + [upstream] + - d/p/ubuntu/lp-1780773-s390x-cpumodels-add-z14-Model-ZR1.patch: Add + cpu model for z14 ZR1 (LP: 1780773). + [upstream] + - Mark qemu-system-data foreign to be able to install it e.g. on i386 + (Closes: 903562) + [in Debian] + - d/control-in: qemu-keymaps is provided by qemu-system-data now (from yet + unreleased Debian version) + [in Debian] + - d/p/lp-1755912-qxl-fix-local-renderer-crash.patch: Fix an issue triggered + by migrations with UI frontends or frequent guest resolution changes + (LP #1755912) + [upstream] + - d//ubuntu/target-ppc-extend-eieio-for-POWER9.patch: Backport to + extend eieio for POWER9 emulation (LP: 1787408). + [upstream] + - d/p/ubuntu/lp-1789551-seccomp-set-the-seccomp-filter-to-all-threads.patch: + ensure that the seccomp blacklist is applied to all threads (LP: 1789551) + [upstream] + - improve s390x spectre mitigation with etoken facility (LP: 1790457) + [upstream] + - Update pxe netboot images for KVM s390x to qemu 3.0 level (LP: 1790901) + [upstream] + - d/control-in: our addition of a qemu-system-s390x package needs to follow + the split of qemu-system-data by adding a dependency to it (LP: 1798084) + [in Debian] + - debian/patches/ubuntu/lp1787405-*: Support guest dedicated Crypto + Adapters on s390x (LP: 1787405) + [upstream] + - enable opengl for vfio-MDEV support (LP: 1804766) + [in Debian] + - SECURITY UPDATE: integer overflow in NE2000 NIC emulation + [upstream] + - SECURITY UPDATE: integer overflow via crafted QMP command + [upstream] + - SECURITY UPDATE: OOB heap buffer r/w access in NVM Express Controller + [upstream] + - SECURITY UPDATE: buffer overflow in rtl8139 + [upstream] + - SECURITY UPDATE: buffer overflow in pcnet + [upstream] + - SECURITY UPDATE: DoS via large packet sizes + [upstream] + - SECURITY UPDATE: DoS in lsi53c895a + [upstream] + - SECURITY UPDATE: Out-of-bounds r/w stack access in ppc64 + [upstream] + - SECURITY UPDATE: race condition in 9p + [upstream] + + -- Christian Ehrhardt Tue, 08 Jan 2019 09:41:08 +0100 + +qemu (1:3.1+dfsg-2) unstable; urgency=medium + + * d/rules: split arch and indep builds + * enable s390x cross-compiler and build s390-ccw.img (Closes: #684909) + * build x86 optionrom in qemu-system-data (was in seabios/debian/) + * qemu-system-data: Multi-Arch: allowed=>foreign (Closes: #903562) + * fix Replaces: version for qemu-system-common (Closes: #916279) + * add simple udev rules file for systemd guest agent (Closes: #916674) + * usb-mtp-use-O_NOFOLLOW-and-O_CLOEXEC-CVE-2018-16872.patch + Race condition in usb_mtp implementation (Closes: #916397) + * bt-use-size_t-type-for-length-parameters-instead-of-int-CVE-2018-19665.patch + Memory corruption in bluetooth subsystem (Closes: #916278) + * hw_usb-fix-mistaken-de-initialization-of-CCID-state.patch (Closes: #917007) + * bump debhelper compat to 12 (>>11) + * d/rules: use dh_missing instead of dh_install --list-missing (compat=12) + * use dh_installsystemd for guest agent (Closes: #916625) + * mention closing by 3.1: Closes: #912655, CVE-2018-16847 + * mention closing by 2.10: + Closes: #849798, CVE-2016-10028 + Closes: CVE-2017-9060 + Closes: CVE-2017-8284 + + -- Michael Tokarev Fri, 21 Dec 2018 16:51:39 +0300 + +qemu (1:3.1+dfsg-1) unstable; urgency=medium + + * new upstream release (3.1) + * Security bugs fixed by upstream: + Closes: #910431, CVE-2018-10839: + integer overflow leads to buffer overflow issue + Closes: #911468, CVE-2018-17962 + pcnet: integer overflow leads to buffer overflow + Closes: #911469, CVE-2018-17963 + net: ignore packets with large size + Closes: #908682, CVE-2018-3639 + qemu should be able to pass the ssbd cpu flag + Closes: #901017, CVE-2018-11806 + m_cat in slirp/mbuf.c in Qemu has a heap-based buffer overflow + via incoming fragmented datagrams + Closes: #902725, CVE-2018-12617 + qmp_guest_file_read in qemu-ga has an integer overflow + Closes: #907500, CVE-2018-15746 + qemu-seccomp might allow local OS guest users to cause a denial of service + Closes: #915884, CVE-2018-16867 + dev-mtp: path traversal in usb_mtp_write_data of the MTP + Closes: #911499, CVE-2018-17958 + Buffer Overflow in rtl8139_do_receive in hw/net/rtl8139.c + because an incorrect integer data type is used + Closes: #911470, CVE-2018-18438 + integer overflows because IOReadHandler and its associated functions + use a signed integer data type for a size value + Closes: #912535, CVE-2018-18849 + lsi53c895a: OOB msg buffer access leads to DoS + Closes: #914604, CVE-2018-18954 + pnv_lpc_do_eccb function in hw/ppc/pnv_lpc.c in Qemu before 3.1 + allows out-of-bounds write or read access to PowerNV memory + Closes: #914599, CVE-2018-19364 + Use-after-free due to race condition while updating fid path + Closes: #914727, CVE-2018-19489 + 9pfs: crash due to race condition in renaming files + Closes: #912655, CVE-2018-16847 + Out-of-bounds r/w buffer access in cmb operations + * remove patches which were applied upstream + * add new manpage qemu-cpu-models.7 + * qemu-system-ppcemb is gone, use qemu-system-ppc[64] + * do-not-link-everything-with-xen.patch (trivial) + * get-orig-source: handle 3.x and 4.x, and remove roms again, as + upstream wants us to use separate source packages for that stuff + * move generated data from qemu-system-data back to qemu-system-common + * d/control: enable spice on arm64 (Closes: #902501) + (probably should enable on all) + * d/control: change git@salsa urls to https + * add qemu-guest-agent.service (Closes: #795486) + * enable opengl support and virglrenderer (Closes: #813658) + * simplify d/rules just a little bit + * build-depend on libudev-dev, for qga + + -- Michael Tokarev Sun, 02 Dec 2018 19:10:27 +0300 + +qemu (1:2.12+dfsg-3ubuntu9) disco; urgency=medium + + [ Marc Deslauriers ] + * SECURITY UPDATE: integer overflow in NE2000 NIC emulation + - debian/patches/CVE-2018-10839.patch: use proper type in + hw/net/ne2000.c. + - CVE-2018-10839 + * SECURITY UPDATE: integer overflow via crafted QMP command + - debian/patches/CVE-2018-12617.patch: check bytes count read by + guest-file-read in qga/commands-posix.c. + - CVE-2018-12617 + * SECURITY UPDATE: OOB heap buffer r/w access in NVM Express Controller + - debian/patches/CVE-2018-16847.patch: check size in hw/block/nvme.c. + - CVE-2018-16847 + * SECURITY UPDATE: buffer overflow in rtl8139 + - debian/patches/CVE-2018-17958.patch: use proper type in + hw/net/rtl8139.c. + - CVE-2018-17958 + * SECURITY UPDATE: buffer overflow in pcnet + - debian/patches/CVE-2018-17962.patch: use proper type in + hw/net/pcnet.c. + - CVE-2018-17962 + * SECURITY UPDATE: DoS via large packet sizes + - debian/patches/CVE-2018-17963.patch: check size in net/net.c. + - CVE-2018-17963 + * SECURITY UPDATE: DoS in lsi53c895a + - debian/patches/CVE-2018-18849.patch: check message length value is + valid in hw/scsi/lsi53c895a.c. + - CVE-2018-18849 + * SECURITY UPDATE: Out-of-bounds r/w stack access in ppc64 + - debian/patches/CVE-2018-18954.patch: check size before data buffer + access in hw/ppc/pnv_lpc.c. + - CVE-2018-18954 + * SECURITY UPDATE: race condition in 9p + - debian/patches/CVE-2018-19364-1.patch: use write lock in + hw/9pfs/cofile.c. + - debian/patches/CVE-2018-19364-2.patch: use write lock in + hw/9pfs/9p.c. + - CVE-2018-19364 + + [ Christian Ehrhardt] + * debian/patches/ubuntu/lp1787405-*: Support guest dedicated Crypto + Adapters on s390x (LP: #1787405) + * enable opengl for vfio-MDEV support (LP: #1804766) + - d/control-in: set --enable-opengl + - d/control-in: add gl related build-dependencies + + -- Christian Ehrhardt Wed, 21 Nov 2018 13:17:01 -0500 + +qemu (1:2.12+dfsg-3ubuntu8) cosmic; urgency=medium + + * d/control-in: our addition of a qemu-system-s390x package needs to follow + the split of qemu-system-data by adding a dependency to it (LP: #1798084) + + -- Christian Ehrhardt Wed, 17 Oct 2018 10:50:27 +0200 + +qemu (1:2.12+dfsg-3ubuntu7) cosmic; urgency=medium + + * Update pxe netboot images for KVM s390x to qemu 3.0 level (LP: #1790901) + The SLOF source pieces in src:qemu are only used for s390x netboot, + which are independent ROMs (no linking). All other binaries out of this + are part of src:slof and independent. + - d/p/ubuntu/lp-1790901-partial-SLOF-for-s390x-netboot-2.12-to-3.0.patch + - d/p/ubuntu/lp-1790901-0*: backport s390x pxelinux netboot capabilities + and related fixes + + -- Christian Ehrhardt Tue, 25 Sep 2018 13:31:15 +0200 + +qemu (1:2.12+dfsg-3ubuntu6) cosmic; urgency=medium + + * improve s390x spectre mitigation with etoken facility (LP: #1790457) + - debian/patches/ubuntu/lp-1790457-s390x-kvm-add-etoken-facility.patch + - debian/patches/ubuntu/lp-1790457-partial-s390x-linux-headers-update.patch + + -- Christian Ehrhardt Wed, 12 Sep 2018 10:06:48 +0200 + +qemu (1:2.12+dfsg-3ubuntu5) cosmic; urgency=medium + + * d/p/ubuntu/lp-1789551-seccomp-set-the-seccomp-filter-to-all-threads.patch: + ensure that the seccomp blacklist is applied to all threads (LP: #1789551) + - CVE-2018-15746 + + -- Christian Ehrhardt Wed, 29 Aug 2018 08:50:36 +0200 + +qemu (1:2.12+dfsg-3ubuntu4) cosmic; urgency=medium + + [ Murilo Opsfelder Araujo ] + * d//ubuntu/target-ppc-extend-eieio-for-POWER9.patch: Backport to + extend eieio for POWER9 emulation (LP: #1787408). + + -- Christian Ehrhardt Mon, 20 Aug 2018 11:52:39 +0200 + +qemu (1:2.12+dfsg-3ubuntu3) cosmic; urgency=medium + + * d/p/lp-1755912-qxl-fix-local-renderer-crash.patch: Fix an issue triggered + by migrations with UI frontends or frequent guest resolution changes + (LP: #1755912) + + -- Christian Ehrhardt Thu, 19 Jul 2018 08:26:52 +0200 + +qemu (1:2.12+dfsg-3ubuntu2) cosmic; urgency=medium + + * Disable capstone disassembler library support (universe dependency) + + -- Christian Ehrhardt Tue, 17 Jul 2018 08:35:32 +0200 + +qemu (1:2.12+dfsg-3ubuntu1) cosmic; urgency=medium + + * Merge with Debian testing, Remaining Changes: + - Among other things this fixes (LP: #1780768, LP: #1780769, LP: #1780772) + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - set nested=1 module option on intel. (is default on amd) + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - d/qemu-system-x86.README.Debian: document intention of nested being + default is comfort, not full support + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + - qemu-system-common.preinst: add kvm group if needed + - Distribution specific machine type + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + - d/p/ubuntu/machine-type-hpb.patch: add -hpb machine type + for host-phys-bits=true (LP: 1776189) + - add an info about -hpb machine type in debian/qemu-system-x86.NEWS + - d/p/ubuntu/lp-1761372-*: provide pseries-bionic-2.11-sxxm type as + convenience with all meltdown/spectre workarounds enabled by default. + (LP: 1761372). + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - Enable numa support for s390x + - arch aware kvm wrappers + - update VCS-git (updated to match cosmic) + - qemu-guest-agent: freeze-hook fixes (LP: 1484990) + - d/p/guest-agent-freeze-hook-skip-dpkg-artifacts.patch + - d/qemu-guest-agent.install: provide /etc/qemu/fsfreeze-hook + - d/qemu-guest-agent.dirs: provide /etc/qemu/fsfreeze-hook.d + - Create and install pxe netboot images for KVM s390x (LP: 1732094) + - d/rules enable install s390x-netboot.img + - d/control-in: enable RDMA support in qemu (LP: 1692476) + - tolerate ipxe size change on migrations to >=18.04 (LP: 1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - SECURITY UPDATE: Speculative Store Bypass + - debian/patches/ubuntu/CVE-2018-3639/0001*.patch: define the 'ssbd' + CPUID feature bit in target/i386/cpu.*. + - debian/patches/ubuntu/CVE-2018-3639/0002*.patch: define the AMD + 'virt-ssbd' CPUID feature bit in target/i386/cpu.c. + - debian/patches/ubuntu/CVE-2018-3639/0003*.patch: define the Virt SSBD + MSR and handling of it in target/i386/cpu.h, target/i386/kvm.c, + target/i386/machine.c. + - CVE-2018-3639 + * Added Changes: + - update machine type changes for qemu 2.12 and the Ubuntu Cosmic release + - add cosmic types for base and -hpb + - drop no more supported types (zesty and yakkety) + - d/p/series: group machine type changes + - d/p/ubuntu/CVE-2018-3639/* update for qemu 2.12 using the final patches + merged upstream + - d/p/ubuntu/CVE-2018-11806-slirp-correct-size.patch: slirp: correct size + computation while concatenating mbuf. + CVE-2018-11806 + - d/qemu-kvm-init, d/qemu-system-common.qemu-kvm.default: drop the + deprecated handling of VHOST_NET_ENABLED and KVM_HUGEPAGES. + - d/qemu-kvm-init: do not exit early on non x86/ppc64el (LP: #1763275) + - d/qemu-kvm-init, d/kvm.powerpc: clean up typos and shellcheck warnings + - d/qemu-kvm-init, d/kvm.powerpc: fix SMT detection and make it only apply + to POWER8 + - d/qemu-kvm-init: drop old VM detection that was broken in some cases and + is no more needed with systemd-detect-virt being more mature and always + present. + - d/kvm.powerpc: drop old powerpc (non-ppc64el) code. + - d/control-in: add libibumad-dev which is now needed for rdma + - d/rules: update s390x delta to match new Debian packaging + - d/p/ubuntu/lp-1781526-powerpc64-align-memory-THP.patch: align to 2MB + for powerpc64 to speed up translation (LP: #1781526) + - d/p/ubuntu/lp-1780773-s390x-cpumodels-add-z14-Model-ZR1.patch: Add + cpu model for z14 ZR1 (LP: #1780773). + - Mark qemu-system-data foreign to be able to install it e.g. on i386 + (Closes: 903562) + - d/control-in: qemu-keymaps is provided by qemu-system-data now (from yet + unreleased Debian version) + * Dropped Changes: + - debian/patches/ubuntu/partial-SLOF-for-s390x-netboot-compilation.patch + (No more removed when building DFSG orig tarball in Debian) + - sdl2 is yet too unstable for the LTS Ubuntu release given the reports + we still see upstream and in Debian - furthermore sdl2 isn't in main yet, + so we revert related changes to stick with the proven for now: + - 0fd25810 - do not build-depend on libx11-dev (libsdl2-dev already + depends on it) + - 9594f820 - switch from sdl1.2 to sdl2 (#870025) + (Debian switched to gtk which seems to work better and has all + dependencies in main.) + - d/control-in: enable seccomp on s390x (in Debian for Linux-any) + - Changes that are now upstream with qemu 2.12 + - d/p/ubuntu/lp1753826-memfd-fix-configure-test.patch: fix FTBFS with + newer versions of glibc >=2.27 (LP: 1753826) + - d/p/ubuntu/qemu-stable-2.11.1.patch: add stable release + - d/p/ubuntu/lp1739665-SSE-AVX-AVX512-cpu-features.patch: Enable new + SSE/AVX/AVX512 cpu features (LP: 1739665) + - d/p/ubuntu/lp1740219-continuous-space-commpage.patch: make Arm + space+commpage continuous which avoids long startup times on + qemu-user-static (LP: 1740219) + - provide pseries-2.12-sxxm type (LP: 1761372) + - d/p/ubuntu/lp-1704312-1-* provide means to manually handle + filesystem-dax with pmem by backporting align and unarmed options + (LP: 1704312). + - d/p/ubuntu/lp-1762315-slirp-Add-domainname.patch: slirp: Add domainname + option to slirp's DHCP server (LP: 1762315) + - d/p/ubuntu/lp-1762854-*: fix issue with SCSI-2 devices denying + Protection information (LP: 1762854). + - d/p/ubuntu/lp-1763468-*: fix VSMT handling to fix ppc64el P8/P9 + migration (LP: 1763468). + - SECURITY UPDATE: out-of-bounds access during migration via ps2 + CVE-2017-16845 + - SECURITY UPDATE: arbitrary code execution via load_multiboot + CVE-2018-7550 + - SECURITY UPDATE: denial of service in Cirrus CLGD 54xx VGA + CVE-2018-7858 + + -- Christian Ehrhardt Thu, 21 Jun 2018 14:24:06 +0200 + +qemu (1:2.12+dfsg-3) unstable; urgency=medium + + * make qemu-system-foo depending + on qemu-system-data >>ver~, not >>ver + (Closes: #900585) + * do not build qemu-system-gui on hppa + * use dh_lintian for lintian overrides + * update VCS fields to point to salsa.debian.org + + -- Michael Tokarev Fri, 01 Jun 2018 21:42:29 +0300 + +qemu (1:2.12+dfsg-2) unstable; urgency=medium + + * create new package, qemu-system-gui, + and package GTK module and audio modules in there + Closes: #850584 + * add an item about qemu-system-gui to debian/qemu-system-common.NEWS + * qemu-system-*: require more recent qemu-system-common + * switch all builds to be in a single b/ subdir + * d/get-orig-source: remove .oco (object) files from roms/SLOF/ + * refresh patches/use-fixed-data-path.patch: remove now-unused local var too + * ccid-card-passthru-fix-regression-in-realize.patch (Closes: #900006) + * debian/control-in: enable seccomp on linux-any (Closes: #900055) + * create new arch-indep package qemu-system-data, for data and firmware files. + Move common data files from qemu-system-common to it, for now + * fix sata/ahci stalls (ahci-fix-PxCI-register-race.patch) + * tcg-i386-Fix-dup_vec-in-non-AVX2-codepath.patch (Closes: #900372) + + -- Michael Tokarev Thu, 31 May 2018 13:22:55 +0300 + +qemu (1:2.12+dfsg-1) unstable; urgency=medium + + * new upstream release + * get-orig-source: do not remove roms/* directories, + since we will use these to build the roms + * disable building on hppa arch (not supported upstream since ong time) + * Use https://download.qemu.org to download new tarballs (Closes: #895067) + * add Breaks: binfmt-support (<<2.1.7) so that --fix-binary works; + also fix qemu-user-static description (Closes: #896478) + + -- Michael Tokarev Thu, 26 Apr 2018 20:29:36 +0300 + +qemu (1:2.12~rc3+dfsg-2) unstable; urgency=medium + + * fix typo in previous changelog entry + * add riscv32/riscv64 to qemu-debootstrap + * install gtk message catalogs into qemu-system-common (Closes: #878130) + (and install-gtk-message-catalogs-if-CONFIG_GTK.patch) + * tcg_mips-handle-large-offsets-from-target-env-to-tlb_table.patch: + fix FTBFS on mips targets + + -- Michael Tokarev Sat, 14 Apr 2018 17:01:24 +0300 + +qemu (1:2.12~rc3+dfsg-1) unstable; urgency=medium + + * new upstream 2.12 release (Release Candidate 3) + Closes: #892041, CVE-2018-7550 + Closes: #884806, CVE-2017-15124 + Closes: #887392, CVE-2018-5683 + Closes: #892497, CVE-2018-7858 + Closes: #882136, CVE-2017-16845 + Closes: #886532, #892947, #891375, #887892, #860822, #851694 + * refresh local debian patches + * d/rules: enable new system (hppa riscv32 riscv64) and + user (aarch64_be xtensa xtensaeb riscv32 riscv64) targets + Closes: #893767 + * fix d/source/options to match current reality + * drop use-data-path.patch, upstream now has --firmwarepath= option + * enable capstone disassembler library support + (build-depend on libcapstone-dev) + * debian/extract-config-opts: use tab for option / condition separator + * qemu-block-extra: install only block modules + * make `qemu' metapackage to be dummy, to remove it in a future release + * do not suggest kmod, it is pointless + * install /usr/bin/qemu-pr-helper to qemu-utils package + * switch from sdl2 to gtk ui + Closes: #839695, #886671, #879536, #879534, #879532, #879193, #894852 + * qemu-system-ppc: forgotten qemu-system-ppc64le.1 link + * mention closing of #880582 by 2.11 + * package will built against spice 0.14, so Closes: #854959 + * check sfdisk presence in qemu-make-debian-root (Closes: #872098) + * check mke2fs presence in qemu-make-debian-root (Closes: #887207) + * debian/binfmt-update-in: include forgotten hppa (Closes: #891261) + * debian/TODO: removed some old ToDo items + * use binfmt-support --fix-binary option (Closes: #868030) + + -- Michael Tokarev Thu, 12 Apr 2018 19:04:03 +0300 + +qemu (1:2.11+dfsg-1ubuntu11) cosmic; urgency=medium + + * d/p/ubuntu/machine-type-hpb.patch: add -hpb machine type + for host-phys-bits=true (LP: #1776189) + - add an info about this change in debian/qemu-system-x86.NEWS + + -- Christian Ehrhardt Tue, 12 Jun 2018 09:01:00 +0200 + +qemu (1:2.11+dfsg-1ubuntu10) cosmic; urgency=medium + + * SECURITY UPDATE: Speculative Store Bypass + - debian/patches/ubuntu/CVE-2018-3639/0001*.patch: define the 'ssbd' + CPUID feature bit in target/i386/cpu.*. + - debian/patches/ubuntu/CVE-2018-3639/0002*.patch: define the AMD + 'virt-ssbd' CPUID feature bit in target/i386/cpu.c. + - debian/patches/ubuntu/CVE-2018-3639/0003*.patch: define the Virt SSBD + MSR and handling of it in target/i386/cpu.h, target/i386/kvm.c, + target/i386/machine.c. + - CVE-2018-3639 + + -- Marc Deslauriers Tue, 22 May 2018 09:34:52 -0400 + +qemu (1:2.11+dfsg-1ubuntu9) cosmic; urgency=medium + + * SECURITY UPDATE: out-of-bounds access during migration via ps2 + - debian/patches/ubuntu/CVE-2017-16845.patch: check PS2Queue pointers + in post_load routine in hw/input/ps2.c. + - CVE-2017-16845 + * SECURITY UPDATE: arbitrary code execution via load_multiboot + - debian/patches/ubuntu/CVE-2018-7550.patch: handle bss_end_addr being + zero in hw/i386/multiboot.c. + - CVE-2018-7550 + * SECURITY UPDATE: denial of service in Cirrus CLGD 54xx VGA + - debian/patches/ubuntu/CVE-2018-7858.patch: fix region calculation in + hw/display/vga.c. + - CVE-2018-7858 + + -- Marc Deslauriers Wed, 16 May 2018 14:14:20 -0400 + +qemu (1:2.11+dfsg-1ubuntu8) cosmic; urgency=medium + + * No-change rebuild for ncurses soname changes. + + -- Matthias Klose Thu, 03 May 2018 14:18:39 +0000 + +qemu (1:2.11+dfsg-1ubuntu7) bionic; urgency=medium + + * d/p/ubuntu/lp-1762854-*: fix issue with SCSI-2 devices denying Protection + information (LP: #1762854). + * d/p/ubuntu/lp-1763468-*: fix VSMT handling to fix ppc64el P8/P9 migration + (LP: #1763468). + + -- Christian Ehrhardt Wed, 11 Apr 2018 07:46:18 +0200 + +qemu (1:2.11+dfsg-1ubuntu6) bionic; urgency=medium + + * Remove LP: 1752026 changes to d/p/ubuntu/define-ubuntu-machine-types.patch. + The Kernel fixes are preferred and already committed to the kernel. + Therefore remove the default disabling of the HTM feature (LP: #1761175) + * d/p/ubuntu/lp1739665-SSE-AVX-AVX512-cpu-features.patch: Enable new + SSE/AVX/AVX512 cpu features (LP: #1739665) + * d/p/ubuntu/lp1740219-continuous-space-commpage.patch: make Arm + space+commpage continuous which avoids long startup times on + qemu-user-static (LP: #1740219) + * d/p/ubuntu/lp-1761372-*: provide pseries-bionic-2.11-sxxm type as + convenience with all meltdown/spectre workarounds enabled by default. + This is not the default type following upstream and x86 on that. + (LP: #1761372). + * d/p/ubuntu/lp-1704312-1-* provide means to manually handle filesystem-dax + with pmem by backporting align and unarmed options (LP: #1704312). + * d/p/ubuntu/lp-1762315-slirp-Add-domainname.patch: slirp: Add domainname + option to slirp's DHCP server (LP: #1762315) + + -- Christian Ehrhardt Wed, 04 Apr 2018 15:16:07 +0200 + +qemu (1:2.11+dfsg-1ubuntu5) bionic; urgency=medium + + * Revert the slirp changes of 1:2.11+dfsg-1ubuntu3 until they are upstream + accepted to be better long term maintainable (LP: #1753938) + + -- Christian Ehrhardt Thu, 22 Mar 2018 10:31:23 +0100 + +qemu (1:2.11+dfsg-1ubuntu4) bionic; urgency=medium + + * d/p/ubuntu/define-ubuntu-machine-types.patch: Disable HTM feature for + ppc64el in spapr to let the defaults not fail on Power9 HW (LP: #1752026). + * d/p/ubuntu/lp1753826-memfd-fix-configure-test.patch: fix FTBFS with newer + versions of glibc >=2.27 (LP: #1753826) + + -- Christian Ehrhardt Mon, 05 Mar 2018 16:43:01 +0100 + +qemu (1:2.11+dfsg-1ubuntu3) bionic; urgency=medium + + * d/p/ubuntu/0001-slirp-Add-domainname-option-to-slirp-s-DHCP-server.patch, + d/p/ubuntu/0002-slirp-Add-classless-static-routes-support-to-DHCP-se.patch: + Add domainname option and classless static routes support to the user + networking's DHCP server + + -- Benjamin Drung Fri, 02 Mar 2018 21:08:54 +0100 + +qemu (1:2.11+dfsg-1ubuntu2) bionic; urgency=medium + + * d/p/ubuntu/qemu-stable-2.11.1.patch: add stable release + - among other fixes this adds code to: + - mitigate the Spectre/Meltdown attacks (LP: #1744882) (CVE-2017-5715) + However, enabling this functionality requires additional configuration + beyond just updating QEMU. Also migrations need special consideration. + Details about that can be found at: + https://www.qemu.org/2018/02/14/qemu-2-11-1-and-spectre-update/ + - Power9 allocation of max 8 threads per core (LP: #1750526) + * Drop changes that are part of the upstream stable release + - d/p/ubuntu/linux-headers-update-to-4.15-rc1.patch + - d/p/ubuntu/linux-headers-update-4.15-rc9.patch + - d/p/ubuntu/lp1743560-s390x-kvm-Handle-bpb-feature.patch + - d/p/ubuntu/lp1743560-s390x-kvm-provide-stfle.81.patch + * d/p/ubuntu/define-ubuntu-machine-types.patch: refresh to match stable update + * d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: unify to only change the + common compat.h header and add some extra info in the patch header. + + -- Christian Ehrhardt Mon, 19 Feb 2018 11:03:11 +0100 + +qemu (1:2.11+dfsg-1ubuntu1) bionic; urgency=medium + + * Merge with Debian testing, among other fixes this includes + - fix fatal error on negative maxcpus (LP: #1722495) + - fix segfault on dump-guest-memory on guests without memory (LP: #1723381) + - linux user threading issues (LP: #1350435) + - TOD-Clock Epoch Extension Support on s390x (LP: #1732691) + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - set nested=1 module option on intel. (is default on amd) + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + - qemu-system-common.preinst: add kvm group if needed + - Distribution specific machine type + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - Enable numa support for s390x + - ppc64[le] support + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - arch aware kvm wrappers + * Added Changes + - update VCS-git to match the bionic branch + - sdl2 is yet too unstable for the LTS Ubuntu release given the reports + we still see upstream and in Debian - furthermore sdl2 isn't in main yet, + so we revert related changes to stick with the proven for now: + - 0fd25810 - do not build-depend on libx11-dev (libsdl2-dev already + depends on it) + - 9594f820 - switch from sdl1.2 to sdl2 (#870025) + - d/qemu-system-x86.README.Debian: document intention of nested being + default is comfort, not full support + - update Ubuntu machine types for qemu 2.11 + - qemu-guest-agent: freeze-hook fixes (LP: #1484990) + - d/p/guest-agent-freeze-hook-skip-dpkg-artifacts.patch + - d/qemu-guest-agent.install: provide /etc/qemu/fsfreeze-hook + - d/qemu-guest-agent.dirs: provide /etc/qemu/fsfreeze-hook.d + - Create and install pxe netboot images for KVM s390x (LP: #1732094) + - d/rules enable install s390x-netboot.img + - debian/patches/ubuntu/partial-SLOF-for-s390x-netboot-compilation.patch + - d/control-in: enable RDMA support in qemu (LP: #1692476) + - on s390x provide facility bits 81 (ppa15) and 82 (bpb) (LP: #1743560) + - d/p/ubuntu/linux-headers-update-to-4.15-rc1.patch + - d/p/ubuntu/linux-headers-update-4.15-rc9.patch + - d/p/ubuntu/lp1743560-s390x-kvm-Handle-bpb-feature.patch + - d/p/ubuntu/lp1743560-s390x-kvm-provide-stfle.81.patch + - tolerate ipxe size change on migrations to >=18.04 (LP: #1713490) + - d/p/ubuntu/pre-bionic-256k-ipxe-efi-roms.patch: old machine types + reference 256k path + - d/control: depend on ipxe-qemu-256k-compat-efi-roms to be able to + handle incoming migrations from former releases. + - d/control-in: enable seccomp on s390x + * Dropped changes (no more needed): + - Dropped VHOST_NET_ENABLED and KVM_HUGEPAGES from /etc/default/qemu-kvm + The functionality is retained for upgraders, but is deprecated. + Post 18.04 the implementation for these configurations will be removed. + * Dropped changes (in Debian now): + - ppc64[le] support + - Enable seccomp for ppc64el + - bump libseccomp-dev dependency, 2.3 is the minimum for ppc64 + - disable missing x32 architecture + - d/rules: or32 is now named or1k (since 4a09d0bb) + - d/qemu-system-common.docs: new paths since (ac06724a) + - d/qemu-system-common.install: qmp-commands.txt removed, but replaced + by qapi-schema.json which is already packaged (since 4d8bb958) + - d/p/02_kfreebsd.patch: utimensat is no more optional upstream (Update + to Debian patch to match qemu 2.10) + - d/qemu-system-common.docs: adapt new path of live-block-operations.rst + since 8508eee7 + - d/qemu-system-common.docs: adapt q35 config paths since 9ca019c1 + - make nios2/hppa not installed explicitly until further stablized + - d/qemu-guest-agent.install: add the new guest agent reference man page + qemu-ga-ref + - d/qemu-system-common.install: add the now generated qapi/qmp reference + along the qapi intro + - d/not-installed: ignore further generated (since 56e8bdd4) files in + dh_missing that are already provided in other formats qemu-doc, + qemu-qmp-ref,qemu-ga-ref + * Dropped changes (integrated upstream): + - d/p/detect-ITS-and-skip-usage-on-older-kernel.patch to avoid crashes + on arm64 when doing suspend/resume and reboots due to older kernels not + supporting ITS (LP 1731051). + - Apply linux-user-return-EINVAL-from-prctl-PR_-_SECCOMP.patch from + James Cowgill to prevent qemu-user from forwarding prctl seccomp + calls (LP 1726394) + - update to upstream 2.10.1 point release (LP 1722808) + + + + -- Christian Ehrhardt Mon, 22 Jan 2018 14:35:18 +0100 + +qemu (1:2.11+dfsg-1) unstable; urgency=medium + + [ Michael Tokarev ] + * update to new upstream (2.11) release + Closes: #883625, CVE-2017-17381 + Closes: #880832, CVE-2017-15289 + Closes: #880836, CVE-2017-15268 + Closes: #883399, CVE-2017-15119 + Closes: #883406, CVE-2017-15118 + Closes: #880582 + * update to new upstream, remove old patches, refresh debian patches + * disable sdl audio driver (pulse or oss should work fine) + * do not build-depend on libx11-dev (libsdl2-dev already depends on it) + * move libpulse-dev build-dep to a better place + * clean up d/control from various old conflicts/replaces/provides + * remove --with-system-pixman, not used anymore + * remove ubuntu-specific qemu-system-aarch64 transitional package (trusty) + * remove ubuntu-specific mentions of old qemu-kvm-spice package (precise) + * remove old comment about /etc/kvm from qemu-kvm description + * add Suggests: openbios-sparc for qemu-system-sparc on ubuntu + (similar to what is done for qemu-system-ppc) + * update get-orig-source.sh with new blobs/submodules + * update debian/watch a bit + + [ Aurelien Jarno ] + * debian/control-in: build qemu-system and qemu-user on mips64 and + mips64el. Closes: #880485. + + [ Christian Ehrhardt ] + * ppc64[le]: provide symlink matching arch name + * d/control-in: Enable seccomp for ppc64el, + this bumps minimum libseccomp version + + -- Michael Tokarev Thu, 11 Jan 2018 14:42:12 +0300 + +qemu (1:2.10.0+dfsg-2) unstable; urgency=medium + + * update to upstream 2.10.1 point release + Closes: #877160 + Closes: CVE-2017-13673 + * remove 3 patches included upstream: + multiboot-validate-multiboot-header-address-values-CVE-2017-14167.patch + vga-stop-passing-pointers-to-vga_draw_line-functions-CVE-2017-13672.patch + slirp-fix-clearing-ifq_so-from-pending-packets-CVE-2017-13711.patch + * 9pfs-use-g_malloc0-to-allocate-space-for-xattr-CVE-2017-15038.patch + Closes: #877890, CVE-2017-15038 + * remove-trailing-whitespace-from-qemu-options.hx.patch + Closes: #875711 + * drop dh_makeshlibs call (was for libcacard) + * drop linux-libc-dev build-dependency (it gets pulled by libc-dev) + * switch from sdl1 to sdl2 (Closes: #870025) + + -- Michael Tokarev Sun, 08 Oct 2017 12:51:09 +0300 + +qemu (1:2.10.0+dfsg-1) unstable; urgency=medium + + * remove blobs, to DFSG'ify it again (there's still + no source for some blobs included in upstream tarball) + There's no way to revert to 2-number version due to prev. upload + * update from upstream git (no changes but include date & commit-id): + multiboot-validate-multiboot-header-address-values-CVE-2017-14167.patch + * update previous changelog entry (fix bug/closes refs): + Closes: #873851, CVE-2017-13672 + Closes: #874606, CVE-2017-14167 + Closes: #873875, CVE-2017-13711 + + -- Michael Tokarev Mon, 25 Sep 2017 09:46:53 +0300 + +qemu (1:2.10.0-1) unstable; urgency=medium + + * new upstream release, 2.10 + Closes: #865754, CVE-2017-9503 + Closes: #864219, CVE-2017-9375 + Closes: #869945 + Closes: #867978 + Closes: #871648, #871702, #872257 + Closes: #851694 + Closes: #696289 + Closed in this upstream release: + #865755, CVE-2017-9524 + #863840, CVE-2017-9310 + #863943, CVE-2017-9330 + #864216, CVE-2017-9373 + #864568, CVE-2017-9374 + #869171, CVE-2017-11434 + #869173, CVE-2017-11334 + #869706, CVE-2017-10911 + #867751, CVE-2017-10806 + #866674, CVE-2017-10664 + #873849, CVE-2017-12809 + #849798, CVE-2016-10028 + CVE-2017-9060 + CVE-2017-8284 + * dropped all fixes, applied upstream + * dropped 02_kfreebsd.patch - apparently not relevant anymore + * dropped +dfsg, use upstream tarball directly: we do not use + binaries shipped there, and even for those, upstream tarball + contains the sources + * refreshed list of targets: + qemu-or32, qemu-system-or32 => qemu-or1k, qemu-system-or1k + +qemu-nios2, qemu-system-nios2 + +qemu-hppa + * added hppa binfmt entry + * refreshed docs lists for various packages + * new (security) patches: + vga-stop-passing-pointers-to-vga_draw_line-functions-CVE-2017-13672.patch + Closes: #873851, CVE-2017-13672 + multiboot-validate-multiboot-header-address-values-CVE-2017-14167.patch + Closes: #874606, CVE-2017-14167 + slirp-fix-clearing-ifq_so-from-pending-packets-CVE-2017-13711.patch + Closes: #873875, CVE-2017-13711 + + -- Michael Tokarev Sat, 23 Sep 2017 16:47:02 +0300 + +qemu (1:2.10+dfsg-0ubuntu5) bionic; urgency=medium + + * d/p/detect-ITS-and-skip-usage-on-older-kernel.patch to avoid crashes + on arm64 when doing suspend/resume and reboots due to older kernels not + supporting ITS (LP: #1731051). + + -- Christian Ehrhardt Tue, 14 Nov 2017 08:30:29 +0100 + +qemu (1:2.10+dfsg-0ubuntu4) bionic; urgency=medium + + * Apply linux-user-return-EINVAL-from-prctl-PR_-_SECCOMP.patch from + James Cowgill to prevent qemu-user from forwarding prctl seccomp + calls (LP: #1726394) + + -- Julian Andres Klode Sat, 04 Nov 2017 00:21:14 +0100 + +qemu (1:2.10+dfsg-0ubuntu3) artful; urgency=medium + + * fix enablement of qemu-kvm service (LP: #1720397) + - rename d/qemu-kvm.service to d/qemu-system-common.qemu-kvm.service + - d/rules: add proper enablement debhelper calls + - d/qemu-system-common.install: install covered by dh_installinit + + -- Christian Ehrhardt Mon, 16 Oct 2017 11:28:39 +0200 + +qemu (1:2.10+dfsg-0ubuntu2) artful; urgency=medium + + * update to upstream 2.10.1 point release (LP: #1722808) + + -- Christian Ehrhardt Wed, 11 Oct 2017 15:33:40 +0200 + +qemu (1:2.10+dfsg-0ubuntu1) artful; urgency=medium + + * Merge with Upstream 2.10.0 to pick up final fixes of the 2.10 release + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - set nested=1 module option on intel. (is default on amd) + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + - qemu-system-common.preinst: add kvm group if needed + - Distribution specific machine type + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - Enable numa support for s390x + - ppc64[le] support + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - Enable seccomp for ppc64el + - bump libseccomp-dev dependency, 2.3 is the minimum for ppc64 + - arch aware kvm wrappers + - update VCS-git to match the Artful branch + - disable missing x32 architecture + - d/rules: or32 is now named or1k (since 4a09d0bb) + - d/qemu-system-common.docs: new paths since (ac06724a) + - d/qemu-system-common.install: qmp-commands.txt removed, but replaced + by qapi-schema.json which is already packaged (since 4d8bb958) + - d/p/02_kfreebsd.patch: utimensat is no more optional upstream (Update + to Debian patch to match qemu 2.10) + - s390x package now builds correctly on all architectures (LP 1710695) + - d/qemu-system-common.docs: adapt new path of live-block-operations.rst + since 8508eee7 + - d/qemu-system-common.docs: adapt q35 config paths since 9ca019c1 + - make nios2/hppa not installed explicitly until further stablized + - d/qemu-guest-agent.install: add the new guest agent reference man page + qemu-ga-ref + - d/qemu-system-common.install: add the now generated qapi/qmp reference + along the qapi intro + - d/not-installed: ignore further generated (since 56e8bdd4) files in + dh_missing that are already provided in other formats qemu-doc, + qemu-qmp-ref,qemu-ga-ref + + + -- Christian Ehrhardt Tue, 05 Sep 2017 08:31:26 +0200 + +qemu (1:2.10~rc4+dfsg-0ubuntu1) artful; urgency=medium + + * Merge with Upstream 2.10-rc4; This fixes a migration issue (LP: #1711602); + Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - set nested=1 module option on intel. (is default on amd) + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + - qemu-system-common.preinst: add kvm group if needed + - Distribution specific machine type + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - Enable numa support for s390x + - ppc64[le] support + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - Enable seccomp for ppc64el + - bump libseccomp-dev dependency, 2.3 is the minimum for ppc64 + - arch aware kvm wrappers + - update VCS-git to match the Artful branch + - disable missing x32 architecture + - d/rules: or32 is now named or1k (since 4a09d0bb) + - d/qemu-system-common.docs: new paths since (ac06724a) + - d/qemu-system-common.install: qmp-commands.txt removed, but replaced + by qapi-schema.json which is already packaged (since 4d8bb958) + - d/p/02_kfreebsd.patch: utimensat is no more optional upstream (Update + to Debian patch to match qemu 2.10) + - s390x package now builds correctly on all architectures (LP 1710695) + * Added changes: + - d/qemu-system-common.docs: adapt new path of live-block-operations.rst + since 8508eee7 + - d/qemu-system-common.docs: adapt q35 config paths since 9ca019c1 + - make nios2/hppa not installed explicitly until further stablized + - d/qemu-guest-agent.install: add the new guest agent reference man page + qemu-ga-ref + - d/qemu-system-common.install: add the now generated qapi/qmp reference + along the qapi intro + - d/not-installed: ignore further generated (since 56e8bdd4) files in + dh_missing that are already provided in other formats qemu-doc, + qemu-qmp-ref,qemu-ga-ref + - d/p/ubuntu/define-ubuntu-machine-types.patch: update to match new + changes in 2.10-rc4 + + -- Christian Ehrhardt Fri, 25 Aug 2017 07:49:30 +0200 + +qemu (1:2.10~rc3+dfsg-0ubuntu1) artful; urgency=medium + + * Merge with Debian unstable (2.8) and Upstream 2.10-rci3; This fixes + a set of bugs + - [FFE] Qemu 2.10 in Artful (LP: #1699968) + - CPU hot unplug fails after migrating a CPU hotplugged guest + from source (LP: #1677552) + - [Feature] KNL/KNM: Numa Distance on KVM(LP: #1647902) + - New KVM 288 Pass Through (LP: #1672447) + - aarch64: MSI is not supported by interrupt controller (LP: #1706630) + * Remaining changes: + - qemu-kvm to systemd unit + - d/qemu-kvm-init: script for QEMU KVM preparation modules, ksm, + hugepages and architecture specifics + - d/qemu-kvm.service: systemd unit to call qemu-kvm-init + - d/qemu-system-common.install: install systemd unit and helper script + - d/qemu-system-common.maintscript: clean old sysv and upstart scripts + - d/qemu-system-common.qemu-kvm.default: defaults for + /etc/default/qemu-kvm + - d/rules: install /etc/default/qemu-kvm + - Enable nesting by default + - set nested=1 module option on intel. (is default on amd) + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: expose nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/enable-svm-by-default.patch: Enable nested svm by default + in qemu64 on amd + - libvirt/qemu user/group support + - qemu-system-common.postinst: remove acl placed by udev, and add udevadm + trigger. + - qemu-system-common.preinst: add kvm group if needed + - Distribution specific machine type + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - d/qemu-system-x86.NEWS Info on fixed machine type definitions + - improved dependencies + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - let qemu-utils recommend sharutils + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - Enable numa support for s390x + - ppc64[le] support + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - Enable seccomp for ppc64el + - bump libseccomp-dev dependency, 2.3 is the minimum for ppc64 + - arch aware kvm wrappers + - disable missing x32 architecture + - update VCS links + * Added changes + - d/rules: or32 is now named or1k (since 4a09d0bb) + - d/qemu-system-common.docs: new paths since (ac06724a) + - d/qemu-system-common.install: qmp-commands.txt removed, but replaced + by qapi-schema.json which is already packaged (since 4d8bb958) + - Updates in debian/patches to match qemu 2.10 + - d/p/02_kfreebsd.patch: utimensat is no more optional upstream + - d/p/ubuntu/enable-svm-by-default.patch: target-i386 -> target/i386 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: target-i386 -> target/i386 + - d/p/ubuntu/define-ubuntu-machine-types.patch: new 2.10 ubuntu types + - update VCS-git to match the Artful branch + - s390x package now builds correctly on all architectures (LP: #1710695) + * Dropped changes (integrated upstream): + - d/p/ubuntu/spapr-pci-populate-PCI-DT-in-reverse-order.patch: backport + "spapr/pci: populate PCI DT in reverse order" (LP 1670481). + - All CVE fixes formerly applied are upstream and thereby dropped. + + -- Christian Ehrhardt Tue, 08 Aug 2017 16:59:19 +0200 + +qemu (1:2.8+dfsg-7) unstable; urgency=medium + + * uploading to unstable all fixes which went to stretch-security + (exactly the same as 2.8+dfsg-6+deb9u2) + + -- Michael Tokarev Sat, 05 Aug 2017 16:35:01 +0300 + +qemu (1:2.8+dfsg-6+deb9u2) stretch-security; urgency=high + + * actually apply the nbd server patches, not only include in debian/patches/ + Really closes: #865755, CVE-2017-9524 + * slirp-check-len-against-dhcp-options-array-end-CVE-2017-11434.patch + Closes: #869171, CVE-2017-11434 + * exec-use-qemu_ram_ptr_length-to-access-guest-ram-CVE-2017-11334.patch + Closes: #869173, CVE-2017-11334 + * usb-redir-fix-stack-overflow-in-usbredir_log_data-CVE-2017-10806.patch + Closes: #867751, CVE-2017-10806 + * add reference to #869706 to + xen-disk-don-t-leak-stack-data-via-response-ring-CVE-2017-10911.patch + * disable xhci recursive calls fix for now, as it causes instant crash + (xhci-guard-xhci_kick_epctx-against-recursive-calls-CVE-2017-9375.patch) + Reopens: #864219, CVE-2017-9375 + Closes: #869945 + + -- Michael Tokarev Wed, 02 Aug 2017 16:57:34 +0300 + +qemu (1:2.8+dfsg-6+deb9u1) stretch-security; urgency=high + + * net-e1000e-fix-an-infinite-loop-issue-CVE-2017-9310.patch + Closes: #863840, CVE-2017-9310 + * usb-ohci-fix-error-return-code-in-servicing-iso-td-CVE-2017-9330.patch + Closes: #863943, CVE-2017-9330 + * ide-ahci-call-cleanup-function-in-ahci-unit-CVE-2017-9373.patch + Closes: #864216, CVE-2017-9373 + * xhci-guard-xhci_kick_epctx-against-recursive-calls-CVE-2017-9375.patch + Closes: #864219, CVE-2017-9375 + * usb-ehci-fix-memory-leak-in-ehci-CVE-2017-9374.patch + Closes: #864568, CVE-2017-9374 + * nbd-ignore-SIGPIPE-CVE-2017-10664.patch + Closes: #866674, CVE-2017-10664 + * nbd-fully-initialize-client-in-case-of-failed-negotiation-CVE-2017-9524.patch + nbd-fix-regression-on-resiliency-to-port-scan-CVE-2017-9524.patch + Closes: #865755, CVE-2017-9524 + * xen-disk-don-t-leak-stack-data-via-response-ring-CVE-2017-10911.patch + Closes: CVE-2017-10911 + + -- Michael Tokarev Wed, 12 Jul 2017 11:05:16 +0300 + +qemu (1:2.8+dfsg-6) unstable; urgency=high + + * 9pfs-local-forbid-client-access-to-metadata-CVE-2017-7493.patch + Closes: CVE-2017-7493 + * group all 9p patches together + * drop obsolete comment about libiscsi on ubuntu from d/control + + -- Michael Tokarev Tue, 23 May 2017 09:58:03 +0300 + +qemu (1:2.8+dfsg-5) unstable; urgency=high + + * Security fix release + * 9pfs-local-set-path-of-export-root-to-dot-CVE-2017-7471.patch + Closes: #860785, CVE-2017-7471 + * 9pfs-xattr-fix-memory-leak-in-v9fs_list_xattr-CVE-2017-8086.patch + Closes: #861348, CVE-2017-8086 + * vmw_pvscsi-check-message-ring-page-count-at-init-CVE-2017-8112.patch + Closes: #861351, CVE-2017-8112 + * scsi-avoid-an-off-by-one-error-in-megasas_mmio_write-CVE-2017-8380.patch + Closes: #862282, CVE-2017-8380 + * input-limit-kbd-queue-depth-CVE-2017-8379.patch + Closes: #862289, CVE-2017-8379 + * audio-release-capture-buffers-CVE-2017-8309.patch + Closes: #862280, CVE-2017-8309 + + -- Michael Tokarev Wed, 17 May 2017 09:01:24 +0300 + +qemu (1:2.8+dfsg-4) unstable; urgency=high + + * usb-ohci-limit-the-number-of-link-eds-CVE-2017-6505.patch + Closes: #856969, CVE-2017-6505 + * linux-user-fix-apt-get-update-on-linux-user-hppa.patch + Closes: #846084 + * update to 2.8.1 upstream stable/bugfix release + (v2.8.1.diff from upstream, except of seabios blob bits). + Closes: #857744, CVE-2016-9603 + Patches dropped because they're included in 2.8.1 release: + 9pfs-symlink-attack-fixes-CVE-2016-9602.patch + char-fix-ctrl-a-b-not-working.patch + cirrus-add-blit_is_unsafe-to-cirrus_bitblt_cputovideo-CVE-2017-2620.patch + cirrus-fix-oob-access-issue-CVE-2017-2615.patch + cirrus-ignore-source-pitch-as-needed-in-blit_is_unsafe.patch + linux-user-fix-s390x-safe-syscall-for-z900.patch + nbd_client-fix-drop_sync-CVE-2017-2630.patch + s390x-use-qemu-cpu-model-in-user-mode.patch + sd-sdhci-check-data-length-during-dma_memory_read-CVE-2017-5667.patch + virtio-crypto-fix-possible-integer-and-heap-overflow-CVE-2017-5931.patch + vmxnet3-fix-memory-corruption-on-vlan-header-stripping-CVE-2017-6058.patch + * bump seabios dependency to 1.10.2 due to ahci fix in 2.8.1 + * 9pfs-fix-file-descriptor-leak-CVE-2017-7377.patch + (Closes: #859854, CVE-2017-7377) + * dma-rc4030-limit-interval-timer-reload-value-CVE-2016-8667.patch + Closes: #840950, CVE-2016-8667 + * make d/control un-writable to stop users from changing a generated file + * two patches from upstream to fix user-mode network with IPv6 + slirp-make-RA-build-more-flexible.patch + slirp-send-RDNSS-in-RA-only-if-host-has-an-IPv6-DNS.patch + (Closes: #844566) + + -- Michael Tokarev Mon, 03 Apr 2017 16:28:49 +0300 + +qemu (1:2.8+dfsg-3ubuntu4) artful; urgency=medium + + * debian/rules: fix installation of /etc/default/qemu-kvm (LP: #1692530) + This was inadvertently dropped on 2.8 merge. + + -- Christian Ehrhardt Mon, 22 May 2017 15:45:58 +0200 + +qemu (1:2.8+dfsg-3ubuntu3) artful; urgency=medium + + * SECURITY UPDATE: denial of service via leak in virtFS + - debian/patches/CVE-2017-7377.patch: fix file descriptor leak in + hw/9pfs/9p.c. + - CVE-2017-7377 + * SECURITY UPDATE: denial of service in cirrus_vga + - debian/patches/CVE-2017-7718.patch: check parameters in + hw/display/cirrus_vga_rop.h. + - CVE-2017-7718 + * SECURITY UPDATE: code execution via cirrus_vga OOB r/w + - debian/patches/CVE-2017-7980-1.patch: handle negative pitch in + hw/display/cirrus_vga.c. + - debian/patches/CVE-2017-7980-2.patch: allow zero source pitch in + hw/display/cirrus_vga.c. + - debian/patches/CVE-2017-7980-3.patch: fix blit address mask handling + in hw/display/cirrus_vga.c. + - debian/patches/CVE-2017-7980-4.patch: fix patterncopy checks in + hw/display/cirrus_vga.c. + - debian/patches/CVE-2017-7980-5.patch: revert allow zero source pitch + in hw/display/cirrus_vga.c. + - debian/patches/CVE-2017-7980-6.patch: stop passing around dst + pointers in hw/display/cirrus_vga.c, hw/display/cirrus_vga_rop.h, + hw/display/cirrus_vga_rop2.h. + - debian/patches/CVE-2017-7980-7.patch: stop passing around src + pointers in hw/display/cirrus_vga.c, hw/display/cirrus_vga_rop.h, + hw/display/cirrus_vga_rop2.h. + - debian/patches/CVE-2017-7980-8.patch: fix off-by-one in + hw/display/cirrus_vga_rop.h. + - debian/patches/CVE-2017-7980-9.patch: fix cirrus_invalidate_region in + hw/display/cirrus_vga.c. + - CVE-2017-7980 + * SECURITY UPDATE: denial of service via memory leak in virtFS + - debian/patches/CVE-2017-8086.patch: fix leak in hw/9pfs/9p-xattr.c. + - CVE-2017-8086 + * SECURITY UPDATE: denial of service via leak in audio + - debian/patches/CVE-2017-8309.patch: release capture buffers in + audio/audio.c. + - CVE-2017-8309 + * SECURITY UPDATE: denial of service via leak in keyboard + - debian/patches/CVE-2017-8379-1.patch: limit kbd queue depth in + ui/input.c. + - debian/patches/CVE-2017-8379-2.patch: don't queue delay if paused in + ui/input.c. + - CVE-2017-8379 + + -- Marc Deslauriers Thu, 18 May 2017 09:20:54 -0400 + +qemu (1:2.8+dfsg-3ubuntu2.1) zesty-security; urgency=medium + + * SECURITY UPDATE: DoS in virtio GPU device + - debian/patches/CVE-2016-10028.patch: check virgl capabilities + max_size in hw/display/virtio-gpu-3d.c. + - CVE-2016-10028 + * SECURITY UPDATE: DoS in JAZZ RC4030 chipset emulation + - debian/patches/CVE-2016-8667.patch: limit interval timer reload value + in hw/dma/rc4030.c. + - CVE-2016-8667 + * SECURITY UPDATE: host filesystem access via virtFS + - debian/patches/CVE-2016-9602.patch: don't follow symlinks in + hw/9pfs/*. + - CVE-2016-9602 + * SECURITY UPDATE: arbitrary code execution via Cirrus VGA + - debian/patches/CVE-2016-9603.patch: remove bitblit support from + console code in hw/display/cirrus_vga.c, include/ui/console.h, + ui/console.c, ui/vnc.c. + - CVE-2016-9603 + * SECURITY UPDATE: information leak in virtio GPU device + - debian/patches/CVE-2016-9908.patch: properly clear out memory in + hw/display/virtio-gpu-3d.c. + - CVE-2016-9908 + * SECURITY UPDATE: DoS via memory leak in virtio GPU device + - debian/patches/CVE-2016-9912.patch: properly free memory in + hw/display/virtio-gpu.c. + - CVE-2016-9912 + * SECURITY UPDATE: DoS via virtFS + - debian/patches/CVE-2016-9914.patch: add cleanup operations to + fsdev/file-op-9p.h, hw/9pfs/9p.c. + - CVE-2016-9914 + * SECURITY UPDATE: DoS via memory leak in virtio GPU device + - debian/patches/CVE-2017-5552.patch: check return value in + hw/display/virtio-gpu-3d.c. + - CVE-2017-5552 + * SECURITY UPDATE: DoS via memory leak in virtio GPU device + - debian/patches/CVE-2017-5578.patch: check res->iov in + hw/display/virtio-gpu.c. + - CVE-2017-5578 + * SECURITY UPDATE: DoS via infinite loop in SDHCI device emulation + - debian/patches/CVE-2017-5987-*.patch: fix transfer mode register + handling in hw/sd/sdhci.c. + - CVE-2017-5987 + * SECURITY UPDATE: DoS via infinite loop in USB OHCI emulation + - debian/patches/CVE-2017-6505.patch: limit the number of link eds in + hw/usb/hcd-ohci.c. + - CVE-2017-6505 + + -- Marc Deslauriers Mon, 24 Apr 2017 07:30:11 -0400 + +qemu (1:2.8+dfsg-3ubuntu2) zesty; urgency=medium + + * d/p/ubuntu/spapr-pci-populate-PCI-DT-in-reverse-order.patch: backport + "spapr/pci: populate PCI DT in reverse order" (LP: #1670481). + + -- Christian Ehrhardt Tue, 07 Mar 2017 09:23:08 +0100 + +qemu (1:2.8+dfsg-3ubuntu1) zesty; urgency=medium + + * Merge with Debian; + This fixes several CVEs that were reported against qemu 2.8 and also + includes a few important functional backports (LP: #1667033); remaining + changes: + - add qemu-kvm init script and defaults file + (d/qemu-system-common.qemu-kvm.*) + - d/rules, d/qemu-kvm-init: add and install script loading kvm + modules and handling /etc/default/qemu-kvm + - qemu-system-common.preinst: add kvm group if needed + - Enable nesting by default on intel. + - set default module option + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by + default in qemu64 cpu type. + - Enable svm by default for qemu64 on amd + - d/p/ubuntu/define-ubuntu-machine-types.patch, d/qemu-system-x86.NEWS: + define distro machine types to ease future live vm migration (includes + all former follow up fixes). + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - qemu-system-common.postinst: + - change acl placed by udev, and add udevadm trigger. + - d/qemu-kvm-init, d/kvm.powerpc, d/control-in: check SMT on ppc64el + - Several changes were applied but missing in the changelog so far + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - arch aware kvm wrapper + - update VCS links + - let qemu-utils recommend sharutils + - disable x32 architecture + - Enable seccomp for ppc64el + - Enable numa support for s390x + - d/qemu-system-common.qemu-kvm.init: fix lintian error type + init.d-script-missing-dependency-on-remote_fs + - d/qemu-system-common.postinst: fix lintian error type + command-with-path-in-maintainer-script + - Transition qemu-kvm to a systemd unit + - d/qemu-kvm-init, d/kvm.powerpc ppc64el SMT check avoid unwanted output + - d/qemu-kvm-init, d/kvm.powerpc ppc64el SMT check keep output local so + that it shows up where the user expects (sytemctl status, kvm stdout) + - d/qemu-kvm-init ppc64el warn on expected second level kvm-hv load failure + - add arch aware kvm wrapper for s390x + * Dropped Changes (in Debian now): + - d/p/ubuntu/ctrl-a-b-fix-fb5e19d2.patch: char: fix ctrl-a b not working + - d/control-in: change dependencies for fix of wrong acl for newly + created device node on ubuntu + - have qemu-system-arm suggest: qemu-efi; this should be a stronger + relationship, but qemu-efi is still in universe right now. + - Disable glusterfs (Universe dependency) + - no more skip disable libiscsi on Ubuntu + - d/rules, d/control-in: avoid people editing d/control + * Added Changes: + - d/control: bump libseccomp-dev dependency as enabling libseccomp for + power makes 2.3 the minimum level. + + -- Christian Ehrhardt Wed, 01 Mar 2017 14:23:16 +0100 + +qemu (1:2.8+dfsg-3) unstable; urgency=high + + * urgency high due to security fixes + + [ Michael Tokarev ] + * serial-fix-memory-leak-in-serial-exit-CVE-2017-5579.patch + Closes: #853002, CVE-2017-5579 + * cirrus-ignore-source-pitch-as-needed-in-blit_is_unsafe.patch + (needed for the next patch, CVE-2017-2620 fix) + * cirrus-add-blit_is_unsafe-to-cirrus_bitblt_cputovideo-CVE-2017-2620.patch + Closes: #855791, CVE-2017-2620 + * nbd_client-fix-drop_sync-CVE-2017-2630.diff + Closes: #855227, CVE-2017-2630 + * sd-sdhci-check-transfer-mode-register-in-multi-block-CVE-2017-5987.patch + Closes: #855159, CVE-2017-5987 + * vmxnet3-fix-memory-corruption-on-vlan-header-stripping-CVE-2017-6058.patch + Closes: #855616, CVE-2017-6058 + * 3 CVE fixes from upstream for #853996: + sd-sdhci-check-data-length-during-dma_memory_read-CVE-2017-5667.patch + megasas-fix-guest-triggered-memory-leak-CVE-2017-5856.patch + virtio-gpu-fix-resource-leak-in-virgl_cmd_resource-CVE-2017-5857.patch + Closes: #853996, CVE-2017-5667, CVE-2017-5856, CVE-2017-5857 + * usb-ccid-check-ccid-apdu-length-CVE-2017-5898.patch + Closes: #854729, CVE-2017-5898 + * virtio-crypto-fix-possible-integer-and-heap-overflow-CVE-2017-5931.patch + Closes: #854730, CVE-2017-5931 + * xhci-apply-limits-to-loops-CVE-2017-5973.patch + Closes: #855611, CVE-2017-5973 + * net-imx-limit-buffer-descriptor-count-CVE-2016-7907.patch + Closes: #839986, CVE-2016-7907 + * cirrus-fix-oob-access-issue-CVE-2017-2615.patch + Closes: #854731, CVE-2017-2615 + * 9pfs-symlink-attack-fixes-CVE-2016-9602.patch + Closes: #853006 + * vnc-do-not-disconnect-on-EAGAIN.patch + Closes: #854032 + * xhci-fix-event-queue-IRQ-handling.patch (win7 xhci issue fix) + * xhci-only-free-completed-transfers.patch + Closes: #855659 + * char-fix-ctrl-a-b-not-working.patch + Closes: https://bugs.launchpad.net/bugs/1654137 + * char-drop-data-written-to-a-disconnected-pty.patch + Closes: https://bugs.launchpad.net/bugs/1667033 + * s390x-use-qemu-cpu-model-in-user-mode.patch + Closes: #854893 + * d/control is autogenerated, add comment + * check if debootstrap is available in qemu-debootstrap + Closes: #846497 + + [ Christian Ehrhardt ] + * (ubuntu) no more skip enable libiscsi (now in main) + * (ubuntu) Disable glusterfs (Universe dependency) + * (ubuntu) have qemu-system-arm suggest: qemu-efi; + this should be a stronger relationship, but qemu-efi is still + in universe right now. + * (ubuntu) change dependencies for fix of wrong acl for newly + created device node on ubuntu + + -- Michael Tokarev Tue, 28 Feb 2017 11:40:18 +0300 + +qemu (1:2.8+dfsg-2ubuntu1) zesty; urgency=medium + + * Merge with Debian; remaining changes: + - add qemu-kvm init script and defaults file + (d/qemu-system-common.qemu-kvm.*) + - d/rules, d/qemu-kvm-init: add and install script loading kvm + modules and handling /etc/default/qemu-kvm + - qemu-system-common.preinst: add kvm group if needed + - Enable nesting by default on intel. + - set default module option + - re-load kvm_intel.ko if it was loaded without nested=1 + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by + default in qemu64 cpu type. + - Enable svm by default for qemu64 on amd + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - Make qemu-system-common depend on qemu-block-extra + - Make qemu-utils depend on qemu-block-extra + - s390x support + - Create qemu-system-s390x package + - Include s390-ccw.img firmware + - qemu-system-common.postinst: + - change acl placed by udev, and add udevadm trigger. + - d/control-in: change dependencies for fix of wrong acl for newly + created device node on ubuntu + - have qemu-system-arm suggest: qemu-efi; this should be a stronger + relationship, but qemu-efi is still in universe right now. + - d/qemu-kvm-init, d/kvm.powerpc, d/control-in: check SMT on ppc64el + - Several changes were applied but missing in the changelog so far + - d/qemu-system-ppc.links provide usr/bin/qemu-system-ppc64le symlink + - arch aware kvm wrapper + - update VCS links + - no more skip disable libiscsi on Ubuntu + - let qemu-utils recommend sharutils + - disable x32 architecture + * Dropped Changes: + - Several changes were applied but missing in the changelog so far + but are no more needed + - no pie for relocatable LD calls, with toolchain defaulting to + pie (fixed upstream) + - enable libnuma-dev (now in Debian) + - transition for moved init scripts (can be dropped after LTS + containing >=2.5 which is Xenial) + - --enable-seccomp related whitespace change (had no effect) + - apport hook for qemu source package (In Debian) + - add upstart script (d/qemu-system-common.qemu-kvm.upstart) + - d/qemu-system-x86.maintscript: transition off of + /etc/init.d/qemu-system-x86 (can be dropped after Xenial) + - Enable pie by default, on ubuntu/s390x. (Is the default since + >=Xenial, no cloud archive backport <=Xenial to consider) + - no pie for relocatable LD calls (fixed upstream in commit + 7ecf44a5) + - CVEs: CVE-2016-5403, CVE-2016-6351, CVE-2016-6490 (now Upstream) + - Revert fix for CVE-2016-5403, causes regression see USN-3047-2. + (Improved fix included by upstream) + - Enable GPU Passthru for ppc64le (is upstream in qemu 2.7) + - Fixed wrong migration blocker when vhost is used (is upstream in + qemu 2.8) + * Added Changes: + - d/rules, d/control-in: avoid people editing d/control by warning + header and non writable permissions + - fixed moving trusty machine type definition which made it + ambiguous (LP: #1641532) + - d/qemu-system-x86.NEWS describe the issue + - Enable seccomp for ppc64el (LP: #1644639) + - Enable numa support for s390x + - d/qemu-system-common.qemu-kvm.init: fix lintian error type + init.d-script-missing-dependency-on-remote_fs + - d/qemu-system-common.postinst: fix lintian error type + command-with-path-in-maintainer-script + - Transition qemu-kvm to a systemd unit + - Disable glusterfs (Universe dependency) + - d/qemu-kvm-init, d/kvm.powerpc ppc64el SMT check avoid unwanted output + - d/qemu-kvm-init, d/kvm.powerpc ppc64el SMT check keep output local so + that it shows up where the user expects (sytemctl status, kvm stdout) + - d/qemu-kvm-init ppc64el warn on expected second level kvm-hv load failure + - add arch aware kvm wrapper for s390x + - d/p/ubuntu/ctrl-a-b-fix-fb5e19d2.patch: char: fix ctrl-a b not working + - Enable DDW in Yakkety machine type because "Enable GPU Passthru for + ppc64le" was released as part of qemu 2.6 (can be dropped at 18.10, + merged in d/p/ubuntu/define-ubuntu-machine-types.patch) + + -- Christian Ehrhardt Mon, 16 Jan 2017 16:27:11 +0100 + +qemu (1:2.8+dfsg-2) unstable; urgency=medium + + * Revert "update binfmt registration for mipsn32" + Reopens: #829243 + Closes: #843032 + Will re-enable it for stretch+1, since for now upgrades + from jessie are broken (jessie comes with 3.16 kernel), + and there's no easy fix for this + * Revert "enable virtio gpu (virglrenderer) and opengl support" + Revert "switch from sdl1 to gtk3" + Revert other gtk2/drm/vte/virgl-related changes + Reopens: #813658, #839695 + The change were too close to stretch release and too large, + bringing too much graphics stuff for headless servers, + will re-think this for stretch+1. + sdl1 back: Closes: #851509 + virtio-3d bugs: Closes: #849798, #852119 + * mention closing of #769983 (multi-threaded linux-user) by 2.7 + * mention closing of #842455, CVE-2016-9101 by 2.8 + * audio-ac97-add-exit-function-CVE-2017-5525.patch (Closes: #852021) + * audio-es1370-add-exit-function-CVE-2017-5526.patch (Closes: #851910) + * watchdog-6300esb-add-exit-function-CVE-2016-10155.patch (Closes: #852232) + + -- Michael Tokarev Mon, 23 Jan 2017 14:06:54 +0300 + +qemu (1:2.8+dfsg-1) unstable; urgency=medium + + * new upstream release + Closes: #837191, CVE-2016-7156 + Closes: #837316, CVE-2016-7170 + Closes: #839835, CVE-2016-7908 + Closes: #839834, CVE-2016-7909 + Closes: #840228, CVE-2016-7994 + Closes: #840236, CVE-2016-7995 + Closes: #840343, CVE-2016-8576 + Closes: #840341, CVE-2016-8577 + Closes: #840340, CVE-2016-8578 + Closes: #840948, CVE-2016-8668 + Closes: #840945, CVE-2016-8669 + Closes: #841950, CVE-2016-8909 + Closes: #841955, CVE-2016-8910 + Closes: #842463, CVE-2016-9102 CVE-2016-9103 CVE-2016-9104 + CVE-2016-9105 CVE-2016-9106 + Closes: #846797, CVE-2016-9776 + Closes: #847381, CVE-2016-9845 + Closes: #847382, CVE-2017-9846 + Closes: #847953, CVE-2016-9907 + Closes: #847400, CVE-2016-9908 + Closes: #847951, CVE-2016-9911 + Closes: #847391, CVE-2016-9912 + Closes: #847496, CVE-2016-9913 CVE-2016-9914 CVE-2016-9915 CVE-2016-9916 + Closes: #847960, CVE-2016-9921 CVE-2016-9922 + Closes: #847957, CVE-2016-9923 + Closes: #842455, CVE-2016-9101 (git2634ab7fe29b3f75d0865b719caf8f310d634aae) + Closes: #819755, #833162 + Hopefully closes: #844361 + * remove unicore32 linux-user target, removed upstream + * remove all patches which were applied upstream (most of them) + * actually fix #841060 + * doc-don-t-mention-memory-it-is-m.patch, Closes: #833619 + * don't pass --enable-uuid (always enabled) + * build-depend on libncursesw5-dev, not libncurses5-dev + * install trace-events-all in qemu-system-common + * do not install qemu-tech.html (not provided by upstream anymore) + * switch from sdl1 to gtk3 (Closes: #839695) + * enable virtio gpu (virglrenderer) and opengl support (Closes: #813658) + * strip out -ldrm out of OPENGL_LIBS, since libdrm is actually not needed + * enable nfs support (libnfs-dev), in qemu-block-extra + * enable glusterfs support (glusterfs-common), in qemu-block-extra + (Closes: #775431) + * enable numa support (libnuma-dev) (Closes: #758189) + + -- Michael Tokarev Wed, 28 Dec 2016 15:31:37 +0300 + +qemu (1:2.7+dfsg-3) unstable; urgency=medium + + * add PIE.patch to change loadable modules linker flags, from Adrian + (Closes: #837574) + * linux-user-fix-s390x-safe-syscall-for-z900.patch - fix FTBFS on s390x + * mention CVE-2016-7466 for 2.7+dfsg-1 (Closes: #838687, CVE-2016-7466) + + -- Michael Tokarev Thu, 27 Oct 2016 19:38:01 +0300 + +qemu (1:2.7+dfsg-2) unstable; urgency=medium + + * fix distribution field in previous changelog entry + * add depends: on seabios >= 1.9 with linuxboot_dma.bin + (Closes: #840853, #841060, #842161) + * add more links for openbios-sparc to qemu-system-sparc, + bump dependency (Closes: #827456) + * include license for qemu logo files (Closes: #785362) + + -- Michael Tokarev Wed, 26 Oct 2016 20:04:15 +0300 + +qemu (1:2.7+dfsg-1) unstable; urgency=medium + + * Acknowledge the previous NMU. Thank you Andrew! + * New upstream release, 2.7 (Closes: #748043, #839292) + Closes: #838850, CVE-2016-7161 + Closes: #473240 (qcow encryption support has been removed) + Closes: #769983 (multi-threaded linux-user) + * removed patches which went upstream, refreshed use-data-path.patch + * renamed remaining patches to include CVE#s and added Bug-Debian headers + * added Depends on lsb-base to qemu-guest-agent (Closes: #840740) + * update binfmt registration for mipsn32 (Closes: #829243) + Thank you Adam Borowski for investigation and the patch + * replace CVE-2016-7156 (#837339) patch with actual code from upstream + * scsi-mptsas-use-g_new0-to-allocate-MPTSASRequest-obj-CVE-2016-7423.patch + (Closes: #838145, CVE-2016-7423) + * virtio-add-check-for-descriptor-s-mapped-address-CVE-2016-7422.patch + (Closes: #838146, CVE-2016-7422) + * scsi-pvscsi-limit-process-IO-loop-to-ring-size-CVE-2016-7421.patch + (Closes: #838147, CVE-2016-7421) + * usb-xhci-fix-memory-leak-in-usb_xhci_exit-CVE-2016-7466.patch + (Closes: #838687, CVE-2016-7466) + + -- Michael Tokarev Fri, 14 Oct 2016 13:31:40 +0300 + +qemu (1:2.6.1+dfsg-0ubuntu5) yakkety; urgency=medium + + * No-change rebuild to compile against new libxen version. + + -- Stefan Bader Fri, 30 Sep 2016 14:24:37 +0200 + +qemu (1:2.6.1+dfsg-0ubuntu4) yakkety; urgency=medium + + * retain older xenial machine type to avoid issues starting guests + created on xenial prior to the SRU for bug 1621042. In that regard the old + broken xenial machine type and the new fixed one have both to be considered + as valid LTS machine types (LP: #1626070). + + -- Christian Ehrhardt Wed, 21 Sep 2016 14:57:09 +0200 + +qemu (1:2.6.1+dfsg-0ubuntu3) yakkety; urgency=medium + + * fix default ubuntu machine types. (LP: #1621042) + - add dep3 header to d/p/ubuntu/define-ubuntu-machine-types.patch + - remove double default and double ubuntu alias + - drop former devel releases utopic, vivid, wily + - add xenial and yakkety machine types + - add q35 based ubuntu machine type starting at xenial + - add ubuntu machine types on ppc64el and s390x starting at xenial + + -- Christian Ehrhardt Mon, 19 Sep 2016 07:50:50 +0200 + +qemu (1:2.6.1+dfsg-0ubuntu2) yakkety; urgency=medium + + * Enable GPU Passthru for ppc64le (LP: #1541902) + - 0001-spapr-ensure-device-trees-are-always-associated-with.patch + - 0002-spapr_pci-Use-correct-DMA-LIOBN-when-composing-the-d.patch + - 0003-spapr_iommu-Finish-renaming-vfio_accel-to-need_vfio.patch + - 0004-spapr_iommu-Move-table-allocation-to-helpers.patch + - 0005-vmstate-Define-VARRAY-with-VMS_ALLOC.patch + - 0006-spapr_iommu-Introduce-enabled-state-for-TCE-table.patch + - 0007-spapr_iommu-Migrate-full-state.patch + - 0008-spapr_iommu-Add-root-memory-region.patch + - 0009-spapr_pci-Reset-DMA-config-on-PHB-reset.patch + - 0010-spapr_pci-Add-and-export-DMA-resetting-helper.patch + - 0011-memory-Add-reporting-of-supported-page-sizes.patch + - 0012-memory-Add-MemoryRegionIOMMUOps.notify_started-stopp.patch + - 0013-intel_iommu-Throw-hw_error-on-notify_started.patch + - 0014-spapr_iommu-Realloc-guest-visible-TCE-table-when-sta.patch + - 0015-vfio-spapr-Add-DMA-memory-preregistering-SPAPR-IOMMU.patch + - 0016-vfio-Add-host-side-DMA-window-capabilities.patch + - 0017-vfio-spapr-Create-DMA-window-dynamically-SPAPR-IOMMU.patch + - 0018-spapr_pci-spapr_pci_vfio-Support-Dynamic-DMA-Windows.patch + - 0019-vfio-spapr-Remove-stale-ioctl-call.patch + - 0020-spapr-Fix-undefined-behaviour-in-spapr_tce_reset.patch + - 0021-memory-Fix-IOMMU-replay-base-address.patch + + -- Jon Grimm Fri, 16 Sep 2016 14:14:47 -0500 + +qemu (1:2.6.1+dfsg-0ubuntu1) yakkety; urgency=medium + + * New upstream release. LP: #1617055. + * Revert fix for CVE-2016-5403, causes regression see USN-3047-2. + + -- Dimitri John Ledkov Fri, 09 Sep 2016 23:33:57 +0100 + +qemu (1:2.6+dfsg-3.1) unstable; urgency=high + + * Non-maintainer upload. + * Security fixes from upstream: + - virtio-error-out-if-guest-exceeds-virtqueue-size-CVE-2015-5403.patch + (Closes: #832619, CVE-2015-5403) + - scsi-pvscsi-avoid-infinite-loop-while-building-SG-list.patch + (Closes: #837339, CVE-2016-7156) + - scsi-pvscsi-check-page-count-while-initialising-descriptor-rings.patch + (Closes: #837174, CVE-2016-7155) + - CVE-2016-6351: scsi-esp-make-cmdbuf-big-enough-for-maximum-CDB-size.patch + and scsi-esp-fix-migration.patch (Closes: #832621, CVE-2016-6351) + - virtio-check-vring-descriptor-buffer-length.patch + (Closes: #832767, CVE-2016-6490) + - net-vmxnet3-check-for-device_active-before-write.patch + (Closes: #834904, CVE-2016-6833) + - net-check-fragment-length-during-fragmentation.patch + (Closes: #834905, CVE-2016-6834) + - net-vmxnet-check-IP-header-length.patch (Closes: #835031, CVE-2016-6835) + - net-vmxnet-initialise-local-tx-descriptor.patch + (Closes: #834944, CVE-2016-6836) + - net-vmxnet-use-g_new-for-pkt-initialisation.patch + (Closes: #834902, CVE-2016-6888) + - CVE-2016-7116: 9pfs-forbid-.-and-.-in-file-names.patch, + 9pfs-forbid-illegal-path-names.patch and + 9pfs-handle-walk-of-.-in-the-root-directory.patch + (Closes: #836502, CVE-2016-7116) + - CVE-2016-7157: scsi-mptconfig-fix-an-assert-expression.patch and + scsi-mptconfig-fix-misuse-of-MPTSAS_CONFIG_PACK.patch + (Closes: #837603, CVE-2016-7157) + + -- Andrew James Wed, 14 Sep 2016 00:56:18 -0600 + +qemu (1:2.6+dfsg-3ubuntu2) yakkety; urgency=medium + + * SECURITY UPDATE: DoS via unbounded memory allocation + - debian/patches/CVE-2016-5403.patch: check size in hw/virtio/virtio.c. + - CVE-2016-5403 + * SECURITY UPDATE: oob write access while reading ESP command + - debian/patches/CVE-2016-6351.patch: make cmdbuf big enough for + maximum CDB size and handle migration in hw/scsi/esp.c, + include/hw/scsi/esp.h, include/migration/vmstate.h. + - CVE-2016-6351 + * SECURITY UPDATE: infinite loop in virtqueue_pop + - debian/patches/CVE-2016-6490.patch: check vring descriptor buffer + length in hw/virtio/virtio.c. + - CVE-2016-6490 + + -- Marc Deslauriers Wed, 03 Aug 2016 08:36:16 -0400 + +qemu (1:2.6+dfsg-3ubuntu1) yakkety; urgency=medium + + * Merge with Debian; remaining changes: + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. + - s390x: + * Create qemu-system-s390x package + * Enable pie by default, on ubuntu/s390x. + * Enable svm by default for qemu64 on amd + * Include s390-ccw.img firmware + * have qemu-system-aarch64 Suggest: qemu-efi; this should be a stronger + relationship, but qemu-efi is still in universe right now. + + -- Serge Hallyn Wed, 15 Jun 2016 16:49:49 -0500 + +qemu (1:2.6+dfsg-3) unstable; urgency=high + + * more security fixes picked from upstream: + - CVE-2016-4454 fix (vmsvga) (Closes: CVE-2016-4454) + vmsvga-add-more-fifo-checks-CVE-2016-4454.patch + vmsvga-move-fifo-sanity-checks-to-vmsvga_fifo_length-CVE-2016-4454.patch + vmsvga-shadow-fifo-registers-CVE-2016-4454.patch + - vmsvga-don-t-process-more-than-1024-fifo-commands-at-once-CVE-2016-4453.patch + (Closes: CVE-2016-4453) + - scsi-check-buffer-length-before-reading-scsi-command-CVE-2016-5238.patch + (Closes: #826152, CVE-2016-5238) + * set urgency to high due to the amount of + security fixes accumulated so far + + -- Michael Tokarev Wed, 15 Jun 2016 08:54:12 +0300 + +qemu (1:2.6+dfsg-2) unstable; urgency=medium + + * add missing log entries for previous upload, + remove closing of #807006 (it is not closed) + * Added vga-add-sr_vbe-register-set.patch from upstream + This fixes regression (in particular with win7 installer) + introduced by the fix for CVE-2016-3712 (commit fd3c136) + * fix-linking-relocatable-objects-on-sparc.patch (Closes: #807006) + * Lots of security patches from upstream: + - net-mipsnet-check-packet-length-against-buffer-CVE-2016-4002.patch + (Closes: #821061, CVE-2016-4002) + - i386-kvmvapic-initialise-imm32-variable-CVE-2016-4020.patch + (Closes: #821062, CVE-2016-4020) + - esp-check-command-buffer-length-before-write-CVE-2016-4439.patch, + esp-check-dma-length-before-reading-scsi-command-CVE-2016-4441.patch + (Closes: #824856, CVE-2016-4439, CVE-2016-4441) + - scsi-mptsas-infinite-loop-while-fetching-requests-CVE-2016-4964.patch + (Closes: #825207, CVE-2016-4964) + - scsi-pvscsi-check-command-descriptor-ring-buffer-size-CVE-2016-4952.patch + (Closes: #825210, CVE-2016-4952) + - scsi-megasas-use-appropriate-property-buffer-size-CVE-2016-5106.patch + (Closes: #825615, CVE-2016-5106) + - scsi-megasas-initialise-local-configuration-data-buffer-CVE-2016-5105.patch + (Closes: #825614, CVE-2016-5105) + - scsi-megasas-check-read_queue_head-index-value-CVE-2016-5107.patch + (Closes: #825616, CVE-2016-5107) + - block-iscsi-avoid-potential-overflow-of-acb-task-cdb-CVE-2016-5126.patch + (Closes: #826151, CVE-2016-5126) + - scsi-esp-check-TI-buffer-index-before-read-write-CVE-2016-5338.patch + (Closes: #827024, CVE-2016-5338) + - scsi-megasas-null-terminate-bios-version-buffer-CVE-2016-5337.patch + (Closes: #827026, CVE-2016-5337) + * hw-dma-omap-spelling-fix-endianness.patch (lintian) + * arm-spelling-fix-mismatch.patch (lintian) + + -- Michael Tokarev Mon, 13 Jun 2016 12:10:44 +0300 + +qemu (1:2.6+dfsg-1ubuntu1) yakkety; urgency=medium + + * Merge with Debian; remaining changes: (LP: #1583775) + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + - s390x: + * Create qemu-system-s390x package + * Enable pie by default, on ubuntu/s390x. + * Enable svm by default for qemu64 on amd + * Include s390-ccw.img firmware + * have qemu-system-aarch64 Suggest: qemu-efi; this should be a stronger + relationship, but qemu-efi is still in universe right now. + * Drop patches which have been applied upstream: + + -- Serge Hallyn Thu, 19 May 2016 12:11:36 -0500 + +qemu (1:2.6+dfsg-1) unstable; urgency=medium + + * new upstream release + Closes: #799115 + Closes: #822369, #823588 + Closes: #813698 + Closes: #805827 + Closes: #813585 + Closes: #823830 CVE-2016-3710 CVE-2016-3712 + Closes: #813193 CVE-2016-2198 + Closes: #813194 CVE-2016-2197 + Closes: #815008 CVE-2016-2392 + Closes: #815009 CVE-2016-2391 + Closes: #815680 CVE-2016-2538 + Closes: #821038 CVE-2016-4001 + Closes: #822344 CVE-2016-4037 + Closes: #817181 CVE-2016-2841 + Closes: #817182 CVE-2016-2857 + Closes: #817183 CVE-2016-2858 + - removed all patches applied upstream + - removed mjt-set-oem-in-rsdt-like-slic.diff, feature has been + implemented in upstream differently + - refreshed local patches + * do not recommend sharutils for qemu-utils anymore (Closes: #820449) + * typo fix in qemu-system-misc description (Closes: #822883) + * allow qemu-debootstrap to create mips64el chroot (Closes: #817234) + * switch VCS URLs from http to https (lintian) + * Bump Standards-Version to 3.9.8 (no changes) + * code spelling fixes from upstream + * added s390x-virtio-ccw-fix-spelling.patch from upstream + * added hw-ipmi-fix-spelling.patch from upstream + * added docs-specify-spell-fix.patch from upstream + * added fsdev-spelling-fix.patch from upstream + * fold long list of supported arches in package descriptions + + -- Michael Tokarev Wed, 18 May 2016 14:44:14 +0300 + +qemu (1:2.5+dfsg-5ubuntu12) yakkety; urgency=medium + + * Cherrypick upstream patches to support the query-gic-version QMP command + (LP: #1566564) + + -- dann frazier Tue, 05 Apr 2016 16:56:11 -0600 + +qemu (1:2.5+dfsg-5ubuntu11) yakkety; urgency=medium + + [Stefan Bader] + * Enable svm by default for qemu64 on amd (LP: #1561019) + + -- Serge Hallyn Fri, 22 Apr 2016 16:53:55 -0500 + +qemu (1:2.5+dfsg-5ubuntu10) xenial; urgency=medium + + * qemu-system-s390x only available on s390x, so qemu-system should only + depend on it on this arch. + * have qemu-system-aarch64 Suggest: qemu-efi; this should be a stronger + relationship, but qemu-efi is still in universe right now. + + -- Steve Langasek Tue, 19 Apr 2016 13:41:37 -0700 + +qemu (1:2.5+dfsg-5ubuntu9) xenial; urgency=medium + + * And actually ship the right things in qemu-system-s390x. + + -- Dimitri John Ledkov Tue, 19 Apr 2016 16:49:00 +0100 + +qemu (1:2.5+dfsg-5ubuntu8) xenial; urgency=medium + + * Create qemu-system-s390x package on ubuntu only. + + -- Dimitri John Ledkov Mon, 18 Apr 2016 10:16:19 +0100 + +qemu (1:2.5+dfsg-5ubuntu7) xenial; urgency=medium + + * Cherrypick patch from mailing list to fix qemu in sandbox. (LP: #1560149) + + -- Serge Hallyn Mon, 11 Apr 2016 15:13:06 -0500 + +qemu (1:2.5+dfsg-5ubuntu6) xenial; urgency=medium + + * Cherrypick upstream patch vhost-user-interrupt-management-fixes.patch + (LP: #1556306) + + -- Serge Hallyn Wed, 16 Mar 2016 16:35:22 -0700 + +qemu (1:2.5+dfsg-5ubuntu5) xenial; urgency=medium + + * Cherrypick upstream patch to fix snapshot regression (LP: #1533728) + + -- Serge Hallyn Mon, 07 Mar 2016 18:53:34 -0800 + +qemu (1:2.5+dfsg-5ubuntu4) xenial; urgency=medium + + * d/control{-in}: Re-generate and build with libiscsi-dev now + that its in Ubuntu main (LP: #1271653). + + -- James Page Wed, 24 Feb 2016 17:59:13 +0000 + +qemu (1:2.5+dfsg-5ubuntu3) xenial; urgency=medium + + * Make -no-pie conditional, on $(CC) supporting -no-pie flag. + + -- Dimitri John Ledkov Wed, 24 Feb 2016 14:40:19 +0000 + +qemu (1:2.5+dfsg-5ubuntu2) xenial; urgency=medium + + * No-change rebuild for gnutls transition. + + -- Matthias Klose Wed, 17 Feb 2016 22:27:20 +0000 + +qemu (1:2.5+dfsg-5ubuntu1) xenial; urgency=medium + + * Merge with Debian; remaining changes: + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + - Enable pie by default, on ubuntu/s390x. + - Include s390-ccw.img firmware. + + -- Serge Hallyn Tue, 09 Feb 2016 10:24:49 -0800 + +qemu (1:2.5+dfsg-5) unstable; urgency=medium + + * fix misspellings in previous debian/changelog entry + * e1000-eliminate-infinite-loops-on-out-of-bounds-start-CVE-2016-1981.patch + (Closes: #812307, CVE-2016-1981) + * hmp-fix-sendkey-out-of-bounds-write-CVE-2015-8619.patch + (Closes: #809237, CVE-2015-8619) + * use `command -v' instead of `type' to check for command existence + + -- Michael Tokarev Thu, 28 Jan 2016 18:39:21 +0300 + +qemu (1:2.5+dfsg-4) unstable; urgency=medium + + * change misspelling of won't in NEWS (lintian) + * two patches from upstream to enable sigaltstack syscall (linux-user) + (Closes: #805826) + * word-wrapped last entry in debian/changelog + * use type to find out whenever update-binfmts is available + * fw_cfg-add-check-to-validate-current-entry-value-CVE-2016-1714.patch + (Partial) patch targeted 2.3 which fixes the read side of the issue + (Closes: CVE-2016-1714) + * i386-avoid-null-pointer-dereference-CVE-2016-1922.patch + (Closes: #811201, CVE-2016-1922) + + -- Michael Tokarev Thu, 21 Jan 2016 13:06:06 +0300 + +qemu (1:2.5+dfsg-3) unstable; urgency=high + + [ Aurelien Jarno ] + * debian/copyright: + fix a spelling error reported by lintian: dependecy -> dependency. + + [ Michael Tokarev ] + * net-vmxnet3-avoid-memory-leakage-in-activate_device patch + (Closes: #808145, CVE-2015-8567, CVE-2015-8568) + * scsi-initialise-info-object-with-appropriate-size-CVE-2015-8613.patch + (Closes: #809232, CVE-2015-8613) + * net-rocker-fix-an-incorrect-array-bounds-check-CVE-2015-8701.patch + (Closes: #809313, CVE-2015-8701) + + -- Michael Tokarev Sun, 10 Jan 2016 10:59:46 +0300 + +qemu (1:2.5+dfsg-2) unstable; urgency=high + + * ehci-make-idt-processing-more-robust-CVE-2015-8558.patch + (Closes: #808144, CVE-2015-8558) + * virtio-9p-use-accessor-to-get-thread_pool.patch (Closes: #808357) + * two upstream patches from xsa-155 fixing unsafe shared memory access in xen + (Closes: #809229, CVE-2015-8550) + * net-ne2000-fix-bounds-check-in-ioport-operations-CVE-2015-8743.patch + (Closes: #810519, CVE-2015-8743) + * ide-ahci-reset-ncq-object-to-unused-on-error-CVE-2016-1568.patch + (Closes: #810527, CVE-2016-1568) + * changed build-depends from libpng12-dev to libpng-dev (Closes: #810205) + + -- Michael Tokarev Sat, 09 Jan 2016 21:40:43 +0300 + +qemu (1:2.5+dfsg-1ubuntu5) xenial; urgency=medium + + * SECURITY UPDATE: paravirtualized drivers incautious about shared memory + contents + - debian/patches/CVE-2015-8550-1.patch: avoid double access in + hw/block/xen_blkif.h. + - debian/patches/CVE-2015-8550-2.patch: avoid reading twice in + hw/display/xenfb.c. + - CVE-2015-8550 + * SECURITY UPDATE: infinite loop in ehci_advance_state + - debian/patches/CVE-2015-8558.patch: make idt processing more robust + in hw/usb/hcd-ehci.c. + - CVE-2015-8558 + * SECURITY UPDATE: host memory leakage in vmxnet3 + - debian/patches/CVE-2015-856x.patch: avoid memory leakage in + hw/net/vmxnet3.c. + - CVE-2015-8567 + - CVE-2015-8568 + * SECURITY UPDATE: buffer overflow in megasas_ctrl_get_info + - debian/patches/CVE-2015-8613.patch: initialise info object with + appropriate size in hw/scsi/megasas.c. + - CVE-2015-8613 + * SECURITY UPDATE: DoS via Human Monitor Interface + - debian/patches/CVE-2015-8619.patch: fix sendkey out of bounds write + in hmp.c, include/ui/console.h, ui/input-legacy.c. + - CVE-2015-8619 + * SECURITY UPDATE: incorrect array bounds check in rocker + - debian/patches/CVE-2015-8701.patch: fix an incorrect array bounds + check in hw/net/rocker/rocker.c. + - CVE-2015-8701 + * SECURITY UPDATE: ne2000 OOB r/w in ioport operations + - debian/patches/CVE-2015-8743.patch: fix bounds check in ioport + operations in hw/net/ne2000.c. + - CVE-2015-8743 + * SECURITY UPDATE: ahci use-after-free vulnerability in aio port commands + - debian/patches/CVE-2016-1568.patch: reset ncq object to unused on + error in hw/ide/ahci.c. + - CVE-2016-1568 + * SECURITY UPDATE: DoS via null pointer dereference in vapic_write() + - debian/patches/CVE-2016-1922.patch: avoid null pointer dereference in + hw/i386/kvmvapic.c. + - CVE-2016-1922 + * SECURITY UPDATE: e1000 infinite loop + - debian/patches/CVE-2016-1981.patch: eliminate infinite loops on + out-of-bounds transfer start in hw/net/e1000.c + - CVE-2016-1981 + * SECURITY UPDATE: AHCI NULL pointer dereference when using FIS CLB + engines + - debian/patches/CVE-2016-2197.patch: add check before calling + dma_memory_unmap in hw/ide/ahci.c. + - CVE-2016-2197 + * SECURITY UPDATE: ehci null pointer dereference in ehci_caps_write + - debian/patches/CVE-2016-2198.patch: add capability mmio write + function in hw/usb/hcd-ehci.c. + - CVE-2016-2198 + + -- Marc Deslauriers Mon, 01 Feb 2016 09:39:01 -0500 + +qemu (1:2.5+dfsg-1ubuntu4) xenial; urgency=medium + + * debian/qemu-kvm-init: Call systemd-detect-virt instead of the + Ubuntu specific running-in-container wrapper. (LP: #1539016) + + -- Martin Pitt Thu, 28 Jan 2016 13:24:51 +0100 + +qemu (1:2.5+dfsg-1ubuntu3) xenial; urgency=high + + * Include s390-ccw.img firmware. + + -- Dimitri John Ledkov Tue, 12 Jan 2016 15:53:43 +0000 + +qemu (1:2.5+dfsg-1ubuntu2) xenial; urgency=medium + + * Place qemu-kvm.defaults file in qemu-system-common, next to the init + scripts. Fix the comparison operator when checking KVM_HUGEPAGES. + Thanks Simon. (LP: #1531191) + + -- Serge Hallyn Wed, 06 Jan 2016 09:45:37 -0800 + +qemu (1:2.5+dfsg-1ubuntu1) xenial; urgency=medium + + * Merge with Debian; remaining changes: + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-ubuntu-machine-types.patch: define distro machine + types to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + - Enable pie by default, on ubuntu/s390x. + * Drop vGICv3 support patches - all is now upstream + * debian/qemu-kvm-init: handle KVM_HUGEPAGES being unset (LP: #1531191) + + -- Serge Hallyn Tue, 05 Jan 2016 15:42:50 -0800 + +qemu (1:2.5+dfsg-1) unstable; urgency=medium + + * new upstream release + (Closes: #801158) + Closes: #806373 CVE-2015-8345 + Closes: #806742 CVE-2015-7504 + Closes: #806741 CVE-2015-7512 + Closes: #808131 CVE-2015-7549 + Closes: #808130 CVE-2015-8504 + * adopt for the new upstream: + - removed patches which are upstream now + - build-depend on libcacard-dev and stop requiring libtool + - removed libcacard refs from debian/qemu-system-common.docs + - moved qmp docs out of subdir following upstream + - removed pc-bios/vgabios-virtio.bin + * enable new linux-user target: tilegx + * install qemu-ga manpage + * install ivshmem-server and ivshmem-client to qemu-utils + * stop using cylinders/heads/sectors for sfdisk + in qemu-make-debian-root (Closes: #785470) + * modify qemu-make-debian-root to use some current tools + (this simplifies things, removes usage of uudecode) + (usefulness of this utility is questionable anyway) + + -- Michael Tokarev Wed, 16 Dec 2015 20:00:04 +0300 + +qemu (1:2.4+dfsg-5ubuntu3) xenial; urgency=high + + * Enable pie by default, on ubuntu/s390x. + + -- Dimitri John Ledkov Mon, 07 Dec 2015 16:04:16 +0000 + +qemu (1:2.4+dfsg-5ubuntu2) xenial; urgency=medium + + * undo the libseccomp delta from debian. libseccomp is indeed available + on other arches, but we need qemu's configure script to be fixed before + we can use it on anything other than amd64|i386. Fixes FTBFS. + (LP: #1522531) + + -- Serge Hallyn Thu, 03 Dec 2015 12:44:46 -0600 + +qemu (1:2.4+dfsg-5ubuntu1) xenial; urgency=medium + + * Merge with Debian; remaining changes: + - Update the ubuntu machine types patch to reflect upstream churn + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + - control-in: build with libseccomp an all architectures + - Add vGICv3 support + + -- Matthias Klose Wed, 02 Dec 2015 21:31:36 +0100 + +qemu (1:2.4+dfsg-5) unstable; urgency=medium + + * trace-remove-malloc-tracing.patch from upstream. + (Closes: #802633) + * stop building libcacard, as it is now in its own separate + source package and has been removed from upstream qemu in 2.5. + Here we just stop producing libcacard binaries, but still use + embedded libcacard source to link with it statically. In 2.5 + we will switch to external libcacard. (Closes: #805410) + + -- Michael Tokarev Sun, 29 Nov 2015 12:22:52 +0300 + +qemu (1:2.4+dfsg-4ubuntu3) xenial; urgency=medium + + * SECURITY UPDATE: loopback mode heap overflow vulnerability in pcnet + - debian/patches/CVE-2015-7504.patch: leave room for CRC code in + hw/net/pcnet.c. + - CVE-2015-7504 + * SECURITY UPDATE: non-loopback mode buffer overflow in pcnet + - debian/patches/CVE-2015-7512.patch: check packet length in + hw/net/pcnet.c. + - CVE-2015-7512 + * SECURITY UPDATE: infinite loop in eepro100 + - debian/patches/CVE-2015-8345.patch: prevent endless loop in + hw/net/eepro100.c. + - CVE-2015-8345 + + -- Marc Deslauriers Tue, 01 Dec 2015 13:36:40 -0500 + +qemu (1:2.4+dfsg-4ubuntu2) xenial; urgency=medium + + * d/p/u/define-ubuntu-machine-type.patch: Fix typo in utopic definition. + + -- dann frazier Tue, 03 Nov 2015 08:05:46 -0700 + +qemu (1:2.4+dfsg-4ubuntu1) xenial; urgency=medium + + * Merge 2.4 from unstable. Remaining changes: + - Update the ubuntu machine types patch to reflect upstream churn + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + - control-in: build with libseccomp an all architectures. + * Add vGICv3 support + + -- Serge Hallyn Tue, 27 Oct 2015 13:28:58 -0500 + +qemu (1:2.4+dfsg-4) unstable; urgency=medium + + * applied 3 patches from upstream to fix virtio-net + possible remote DoS (Closes: #799452 CVE-2015-7295) + * remove now-unused /etc/qemu too (Closes: #797608) + + -- Michael Tokarev Thu, 08 Oct 2015 20:30:03 +0300 + +qemu (1:2.4+dfsg-3) unstable; urgency=high + + * ne2000-add-checks-to-validate-ring-buffer-pointers-CVE-2015-5279.patch + fix for Heap overflow vulnerability in ne2000_receive() function + (Closes: #799074 CVE-2015-5279) + * ne2000-avoid-infinite-loop-when-receiving-packets-CVE-2015-5278.patch + (Closes: #799073 CVE-2015-5278) + * some binfmt reorg: + - extend aarch64 to include one more byte as other arches do + - set OSABI mask to 0xfc for i386, ppc*, s390x, sparc*, to recognize + OSABI=3 (GNU/Linux) in addition to NONE/SysV + (Closes: #784605, #794737) + - tighten sh4 & sh4eb, fixing OSABI mask to be \xfc not 0 + + -- Michael Tokarev Tue, 15 Sep 2015 19:30:18 +0300 + +qemu (1:2.4+dfsg-2) unstable; urgency=high + + * Add e1000-avoid-infinite-loop-in-transmit-CVE-2015-6815.patch. + CVE-2015-6815: net: e1000 infinite loop issue in processing transmit + descriptor. (Closes: #798101 CVE-2015-6815) + * Add ide-fix-ATAPI-command-permissions-CVE-2015-6855.patch. + CVE-2015-6855: ide: qemu allows arbitrary commands to be sent to an ATAPI + device from guest, while illegal comands might have security impact, + f.e. WIN_READ_NATIVE_MAX results in divide by zero error. + (Closes: CVE-2015-6855) + + -- Michael Tokarev Fri, 11 Sep 2015 19:54:07 +0300 + +qemu (1:2.4+dfsg-1a) unstable; urgency=medium + + * new upstream (2.4.0) release + Closes: #795461, #793811, #794610, #795087, #794611, #793388 + CVE-2015-3214 CVE-2015-5154 CVE-2015-5165 CVE-2015-5745 + CVE-2015-5166 CVE-2015-5158 + Closes: #793817 + * removed all upstreamed patches + * remove --enable-vnc-ws option (not used anymore) + * update mjt-set-oem-in-rsdt-like-slic.diff + * vnc-fix-memory-corruption-CVE-2015-5225.patch from upstream + Closes: #796465 CVE-2015-5225 + * remove now-unused /etc/qemu/target-x86_64.conf + + -- Michael Tokarev Mon, 31 Aug 2015 16:28:08 +0300 + +qemu (1:2.3+dfsg-6a) unstable; urgency=medium + + * fix d/copyright leftover in previous upload + + -- Michael Tokarev Thu, 11 Jun 2015 20:31:07 +0300 + +qemu (1:2.3+dfsg-6) unstable; urgency=high + + * pcnet-force-buffer-access-to-be-in-bounds-CVE-2015-3209.patch + from upstream (Closes: #788460 CVE-2015-3209) + + -- Michael Tokarev Thu, 11 Jun 2015 20:03:40 +0300 + +qemu (1:2.3+dfsg-5ubuntu10) xenial; urgency=medium + + * debian/patches/fix-curses-with-xterm-256.patch (LP: #1508466) + + -- Ryan Harper Wed, 21 Oct 2015 08:59:29 -0500 + +qemu (1:2.3+dfsg-5ubuntu9) wily; urgency=low + + * debian/patches/upstream-fix-irq-route-entries.patch + Fix "kvm_irqchip_commit_routes: Assertion 'ret == 0' failed" + (LP: #1465935) + + -- Stefan Bader Fri, 09 Oct 2015 15:38:53 +0200 + +qemu (1:2.3+dfsg-5ubuntu8) wily; urgency=medium + + * Build using libseccomp on all architectures. + + -- Matthias Klose Sat, 03 Oct 2015 21:12:15 +0200 + +qemu (1:2.3+dfsg-5ubuntu7) wily; urgency=medium + + * SECURITY UPDATE: denial of service via NE2000 driver + - debian/patches/CVE-2015-5278.patch: fix infinite loop in + hw/net/ne2000.c. + - CVE-2015-5278 + * SECURITY UPDATE: denial of service and possible code execution via + heap overflow in NE2000 driver + - debian/patches/CVE-2015-5279.patch: validate ring buffer pointers in + hw/net/ne2000.c. + - CVE-2015-5279 + * SECURITY UPDATE: denial of service via e1000 infinite loop + - debian/patches/CVE-2015-6815.patch: check bytes in hw/net/e1000.c. + - CVE-2015-6815 + * SECURITY UPDATE: denial of service via illegal ATAPI commands + - debian/patches/CVE-2015-6855.patch: fix ATAPI command permissions in + hw/ide/core.c. + - CVE-2015-6855 + + -- Marc Deslauriers Wed, 23 Sep 2015 15:05:51 -0400 + +qemu (1:2.3+dfsg-5ubuntu6) wily; urgency=medium + + * Make qemu-system-common and qemu-utils depend on qemu-block-extra + to fix errors with missing block backends. (LP: #1495895) + * Cherry pick fixes for vmdk stream-optimized subformat (LP: #1006655) + * Apply fix for memory corruption during live-migration in tcg mode + (LP: #1493049) + * Apply tracing patch to remove use of custom vtable in newer glibc + (LP: #1491972) + + -- Ryan Harper Tue, 15 Sep 2015 09:37:23 -0500 + +qemu (1:2.3+dfsg-5ubuntu5) wily; urgency=medium + + * Import qcow2-handle-eagain-from-update_refcount from upstream + to fix errors when using qemu-img convert -c. (LP: #1491050) + + -- Serge Hallyn Fri, 04 Sep 2015 16:35:56 -0500 + +qemu (1:2.3+dfsg-5ubuntu4) wily; urgency=medium + + * SECURITY UPDATE: process heap memory disclosure + - debian/patches/CVE-2015-5165.patch: check sizes in hw/net/rtl8139.c. + - CVE-2015-5165 + * SECURITY UPDATE: privilege escalation via block device unplugging + - debian/patches/CVE-2015-5166.patch: properly unhook from BlockBackend + in hw/ide/piix.c. + - CVE-2015-5166 + * SECURITY UPDATE: privilege escalation via memory corruption in vnc + - debian/patches/CVE-2015-5225.patch: use bytes per scanline to apply + limits in ui/vnc.c. + - CVE-2015-5225 + * SECURITY UPDATE: denial of service via virtio-serial + - debian/patches/CVE-2015-5745.patch: don't assume a specific layout + for control messages in hw/char/virtio-serial-bus.c. + - CVE-2015-5745 + + -- Marc Deslauriers Tue, 25 Aug 2015 09:38:43 -0400 + +qemu (1:2.3+dfsg-5ubuntu3) wily; urgency=medium + + * SECURITY UPDATE: out-of-bounds memory access in pit_ioport_read() + - debian/patches/CVE-2015-3214.patch: ignore read in hw/timer/i8254.c. + - CVE-2015-3214 + * SECURITY UPDATE: heap overflow when processing ATAPI commands + - debian/patches/CVE-2015-5154.patch: check bounds and clear DRQ in + hw/ide/core.c, make sure command is completed in hw/ide/atapi.c. + - CVE-2015-5154 + * SECURITY UPDATE: buffer overflow in scsi_req_parse_cdb + - debian/patches/CVE-2015-5158.patch: check length in + hw/scsi/scsi-bus.c. + - CVE-2015-5158 + + -- Marc Deslauriers Mon, 27 Jul 2015 10:07:05 -0400 + +qemu (1:2.3+dfsg-5ubuntu2) wily; urgency=medium + + * SECURITY UPDATE: heap overflow in PCNET controller + - debian/patches/CVE-2015-3209.patch: check bounds in hw/net/pcnet.c. + - CVE-2015-3209 + + -- Marc Deslauriers Thu, 11 Jun 2015 14:25:05 -0400 + +qemu (1:2.3+dfsg-5ubuntu1) wily; urgency=medium + + * Merge 1:2.3+dfsg-5 from Debian. + * Remaining changes: + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + * Refreshed patches: + - ubuntu/expose-vmx_qemu64cpu.patch + - ubuntu/define-ubuntu-machine-types.patch + + -- Serge Hallyn Wed, 10 Jun 2015 14:28:39 -0500 + +qemu (1:2.3+dfsg-5) unstable; urgency=high + + * slirp-use-less-predictable-directory-name-in-tmp-CVE-2015-4037.patch + (Closes: CVE-2015-4037) + * 11 patches for XEN PCI pass-through issues + (Closes: #787547 CVE-2015-4103 CVE-2015-4104 CVE-2015-4105 CVE-2015-4106) + * kbd-add-brazil-kbd-keys-*.patch, adding two keys found on Brazilian + keyboards (Closes: #772422) + + -- Michael Tokarev Wed, 03 Jun 2015 17:18:58 +0300 + +qemu (1:2.3+dfsg-4ubuntu1) wily; urgency=medium + + * Merge 1:2.3+dfsg-4 from Debian. + * Remaining changes: + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + * Dropped all patches which are applied upstream + * Move the upstart jobs to a generic script + - add new qemu-kvm-init script + - call that from upstart and sysvrc qemu-kvm scripts + - move to qemu-system-common, which must now B/R qemu-system-{x86,ppc} + + -- Serge Hallyn Wed, 03 Jun 2015 13:36:36 -0500 + +qemu (1:2.3+dfsg-4) unstable; urgency=medium + + * rules.mak-force-CFLAGS-for-all-objects-in-DSO.patch: + patch from upstream to fix FTBFS on some arches + * libcacard-dev: depend on libnss3-dev (Closes: #785798) + * libcacard-dev: do not depend on pkg-config + + -- Michael Tokarev Wed, 20 May 2015 14:21:09 +0300 + +qemu (1:2.3+dfsg-3) unstable; urgency=high + + * fdc-force-the-fifo-access-to-be-in-bounds-CVE-2015-3456.patch + (Closes: CVE-2015-3456) + * fix the OSABI binfmt mask for x86_64 arch, to actually fix #763043. + Original fix didn't work, because "someone" forgot arithmetics. + (Really Closes: #763043) + * align binfmt magics/masks to be in single column + + -- Michael Tokarev Tue, 12 May 2015 23:02:29 +0300 + +qemu (1:2.3+dfsg-2) unstable; urgency=medium + + * do not install upstream changelog file, it is unused for a long time + * mention closing of #781250 #769299 by 2.3 + * mention qemu-block-extra split in NEWS files + * fix spelling prob in the manpage + * bump Standards-Version to 3.9.6 (no changes needed) + * add mips64 and mips64el binfmt registration (Closes: #778624) + + -- Michael Tokarev Mon, 04 May 2015 13:01:03 +0300 + +qemu (1:2.3+dfsg-1) unstable; urgency=medium + + * new upstream release (2.3) + (Closes: #781250 #769299 #781250 #769299) + + -- Michael Tokarev Fri, 24 Apr 2015 17:33:46 +0300 + +qemu (1:2.2+dfsg-6exp) experimental; urgency=medium + + * qemu 2.2.1 stable/bugfix release (remove included upstream + exec-change-default-exception_index-value-for-migration-to--1.patch) + * added mips64(el) to list of architectures where qemu-utils is built + (Closes: #780200) + * added kvm-on-x32.patch from Adam Borowski (Closes: #778737) + * create qemu-block-extra package + * rules.mak-fix-module-build.patch from upstream to fix module build + * pass --enable-modules to configure + * pass multiarch --libdir to configure + * mjt-set-oem-in-rsdt-like-slic.diff: update FACP table too, + not only RSDT. FACP is needed for win7 booting in UEFI mode. + * enable libcacard (closes: #764971) + - build-depend on libnss3-dev & libtool-bin + - --enable-smartcard-nss + - run dh_makeshlibs + - rm libcacard.la + - add libcacard0, libcacard-dev and libcacard-tools packages + - add libcacard*.install and libcacard0.symbols + + -- Michael Tokarev Fri, 17 Apr 2015 21:54:53 +0300 + +qemu (1:2.2+dfsg-5expubuntu10) wily; urgency=medium + + * SECURITY UPDATE: denial of service in vnc web + - debian/patches/CVE-2015-1779-1.patch: incrementally decode websocket + frames in ui/vnc-ws.c, ui/vnc-ws.h, ui/vnc.h. + - debian/patches/CVE-2015-1779-2.patch: limit size of HTTP headers from + websockets clients in ui/vnc-ws.c. + - CVE-2015-1779 + * SECURITY UPDATE: host code execution via floppy device (VEMON) + - debian/patches/CVE-2015-3456.patch: force the fifo access to be in + bounds of the allocated buffer in hw/block/fdc.c. + - CVE-2015-3456 + + -- Marc Deslauriers Wed, 13 May 2015 07:25:59 -0400 + +qemu (1:2.2+dfsg-5expubuntu9) vivid; urgency=low + + * CVE-2015-2756 / XSA-126 + - xen: limit guest control of PCI command register + + -- Stefan Bader Wed, 08 Apr 2015 10:17:45 +0200 + +qemu (1:2.2+dfsg-5expubuntu8) vivid; urgency=medium + + * debian/qemu-system-x86.qemu-kvm.upstart: fix redirection to not + accidentally create /1 + + -- Steve Beattie Thu, 12 Mar 2015 16:46:51 -0700 + +qemu (1:2.2+dfsg-5expubuntu7) vivid; urgency=low + + * No-change rebuild to pull in libxl-4.5 (take 2: step to the right). + + -- Stefan Bader Thu, 26 Feb 2015 08:55:35 +0100 + +qemu (1:2.2+dfsg-5expubuntu6) vivid; urgency=low + + * No-change rebuild to pull in libxl-4.5. + + -- Stefan Bader Wed, 25 Feb 2015 13:58:37 +0100 + +qemu (1:2.2+dfsg-5expubuntu5) vivid; urgency=medium + + * debian/control-in: enable numa on architectures where numa is built + (LP: #1417937) + + -- Serge Hallyn Thu, 12 Feb 2015 23:18:58 -0600 + +qemu (1:2.2+dfsg-5expubuntu4) vivid; urgency=medium + + [Scott Moser] + * update d/kvm.powerpc to avoid use of awk, which isn't allowed by aa + profile when started by libvirt. + + [Serge Hallyn] + * add symlink qemu-system-ppc64le -> qemu-system-ppc64 + * debian/rules: fix DEB_HOST_ARCh fix to ppc64el for installing qemu-kvm init script + (LP: #1419855) + + [Chris J Arges] + * Determine if we are running inside a virtual environment. If running inside + a virtualized enviornment do _not_ automatically enable KSM. (LP: #1414153) + + -- Serge Hallyn Thu, 12 Feb 2015 13:04:21 -0600 + +qemu (1:2.2+dfsg-5expubuntu1) vivid; urgency=medium + + * Merge 1:2.2+dfsg-5exp from Debian. (LP: #1409308) + - debian/rules: do not drop the init scripts loading kvm modules + (still needed in precise in cloud archive) + * Remaining changes: + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + * Dropped all patches which are applied upstream + * Update ubuntu-vivid machine type to default to std graphics (following + upstream's lead for pc-i440fx-2.2 machine type) + + -- Serge Hallyn Mon, 09 Feb 2015 22:31:09 -0600 + +qemu (1:2.2+dfsg-5exp) experimental; urgency=medium + + * fix initscript removal once again + + -- Michael Tokarev Fri, 23 Jan 2015 15:05:46 +0300 + +qemu (1:2.2+dfsg-4exp) experimental; urgency=medium + + * fix a brown-paper bag bug removing the qemu-system-x86 initscript + (Closes: #776004) + + -- Michael Tokarev Thu, 22 Jan 2015 20:33:38 +0300 + +qemu (1:2.2+dfsg-3exp) experimental; urgency=medium + + * mention closing of #753887 by 2.2 + * install only specific bamboo.dtb for ppc, not *.dtb + (Closes: #773033) + * install qemu-system-misc firmware in d/*.install not d/rules, + as other firmware files + * exec-change-default-exception_index-value-for-migration-to--1.patch: + cherry-picked commit adee64249ee37e from upstream + * stop messing up with alternatives (qemu for qemu-system-*) + * stop shipping qemu-system-x86 initscript to load kvm modules + (kernel since 3.4 does that automatically) (Closes: #751754) + + -- Michael Tokarev Thu, 22 Jan 2015 09:28:01 +0300 + +qemu (1:2.2+dfsg-2exp) experimental; urgency=medium + + * and finally uploading to experimental as it should be + + -- Michael Tokarev Wed, 10 Dec 2014 00:58:32 +0300 + +qemu (2.2+dfsg-1exp) unstable; urgency=medium + + * new upstream release 2.2.0 (Closes: #751078, #726629, #753887) + * removed all patches which was cherry-picked from upstream, + only keeping debian-specific changes + * refreshed mjt-set-oem-in-rsdt-like-slic.diff + * added tricore to qemu-system-misc package (new arch) + * restore upstream pc-bios/petalogix-*.dtb "blobs": + upstream says it is the canonical form, dtc is used + to convert from dts to dtb and back, the conversion + is reversible + * install petalogix firmware for microblaze (Closes: #769068) + + -- Michael Tokarev Tue, 09 Dec 2014 23:09:26 +0300 + +qemu (1:2.1+dfsg-11ubuntu2) vivid; urgency=medium + + * Cherrypick upstream patch needed to allow ESx hosts to run under + kvm (LP: #1411575) + + -- Serge Hallyn Fri, 16 Jan 2015 16:32:48 -0600 + +qemu (1:2.1+dfsg-11ubuntu1) vivid; urgency=medium + + * Merge 2.1+dfsg-11. Remaining changes: + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - debian/binfmt-update-in: support ppcle + * debian/binfmt-update-in + * Support-ppcle.patch + - Upstream patches to fix AArch64 emulation ignoring SPSel=0: + * d/p/target-arm-A64-Break-out-aarch64_save-restore_sp.patch + * d/p/target-arm-A64-Respect-SPSEL-in-ERET-SP-restore.patch + * d/p/target-arm-A64-Respect-SPSEL-when-taking-exceptions.patch: + * Dropped patches (upstream or now in debian's tree): + - upstream-xen_disk-fix-unmapping-of-persistent-grants.patch + - CVE-2014-7840.patch + - CVE-2014-8106.patch + + -- Serge Hallyn Wed, 17 Dec 2014 13:57:34 -0600 + +qemu (1:2.1+dfsg-11) unstable; urgency=medium + + * bump epoch and reupload to cancel 2.2+dfsg-1exp upload + mistakenly done to unstable. No other changes. + + -- Michael Tokarev Wed, 10 Dec 2014 00:52:28 +0300 + +qemu (2.1+dfsg-10) unstable; urgency=medium + + * make (debian-specific) x86 data path (with seabios and ipxe + in it) non-x86-specific, since other arches use firmware + files too (Closes: #772127) + * add seabios to Recommends to qemu-system-misc, qemu-system-mips, + qemu-system-ppc and qemu-system-sparc packages, because these + packages contains emulators using vgabios which is part of + seabios package (#772127). + * add ipxe-qemu to Recommends to qemu-system-misc, qemu-system-arm, + qemu-system-mips, qemu-system-ppc, qemu-system-sparc packages, + because these packages contains emulators using network boot + roms (#772127), in a similar way. + + -- Michael Tokarev Tue, 09 Dec 2014 13:47:36 +0300 + +qemu (2.1+dfsg-9) unstable; urgency=high + + * apply upstream patches for CVE-2014-8106 + (cirrus: insufficient blit region checks) + (Closes: #772025 CVE-2014-8106) + + -- Michael Tokarev Thu, 04 Dec 2014 00:10:43 +0300 + +qemu (2.1+dfsg-8) unstable; urgency=low + + [ Michael Tokarev ] + * add Built-Using control field for qemu-user-static package: + take contents of qemu-user ${shlibs:Depends} and transform it + into list of source packages with versions. (Closes: #768926) + * run remove-alternatives in qemu-system.postinst (the metapkg) + too, not only in qemu-system-XX.postinst, to handle upgrades + from wheezy (Closes: #768244) + * several fixes for debian/qemu-user.1 manpage. It needs more + work, but at least some easy and obvious errors are fixed now. + (Closes: #763841) + * migration-fix-parameter-validation-on-ram-load.patch from upstream + (Closes: #769451 CVE-2014-7840) + * fix x86_64 binfmt mask to allow more values in ELF_OSABI field + (byte7). Current gcc/binfmt sometimes produces binaries with + this field set to 3 (OSABI_GNU) not 0 (OSABI_SYSV) as used to be. + Set mask to 0xfb not 0xff here, to allow 0 (traditional SYSV), + 1 (HPUX), 2 (NETBSD) or 3 (GNU). This lets 2 more types than + necessary, but qemu will reject wrong types so no harm is done. + Some other binfmts ignore this field completely (with mask=0). + Maybe some day we'll have 2 different binfmt registrations for + the 2 different ABI types. (Closes: #763043) + * usb-host-fix-usb_host_speed_compat-tyops.patch -- fix host usb devices + attach, without this patch many USB devices does not work + * qdev-monitor-fix-segmentation-fault-on-qdev_device_h.patch - trivial + patch from upstream to fix segfault in -device foo,help (Closes: #770880) + + [ Aurelien Jarno ] + * Add tcg-mips-fix-store-softmmu-slow-path.patch from upstream to fix + TCG support on mips/mipsel hosts (Closes: #769470). + + [ Ian Campbell ] + * Backport patch to fix unmapping of persistent grants in the Xen qdisk + backend (Closes: #770468). + + -- Michael Tokarev Thu, 27 Nov 2014 18:32:45 +0300 + +qemu (2.1+dfsg-7ubuntu5) vivid; urgency=medium + + * SECURITY UPDATE: code execution via savevm data + - debian/patches/CVE-2014-7840.patch: validate parameters in + arch_init.c. + - CVE-2014-7840 + * SECURITY UPDATE: code execution via cirrus vga blit regions + (LP: #1400775) + - debian/patches/CVE-2014-8106.patch: properly validate blit regions in + hw/display/cirrus_vga.c. + - CVE-2014-8106 + + -- Marc Deslauriers Thu, 11 Dec 2014 14:11:52 -0500 + +qemu (2.1+dfsg-7ubuntu4) vivid; urgency=low + + * d/rules: Fix vendor check to make kvm-spice symlinks (DEB_VENDOR got + dropped and VENDOR now will be all capital UBUNTU). + + -- Stefan Bader Mon, 08 Dec 2014 14:45:31 +0100 + +qemu (2.1+dfsg-7ubuntu3) vivid; urgency=medium + + * d/p/target-arm-A64-Break-out-aarch64_save-restore_sp.patch + d/p/target-arm-A64-Respect-SPSEL-in-ERET-SP-restore.patch + d/p/target-arm-A64-Respect-SPSEL-when-taking-exceptions.patch: + Cherry-pick of upstream patches in order to fix AArch64 emulation ignoring + SPSel=0 in certain conditions. (LP: #1349277) + + -- Chris J Arges Thu, 04 Dec 2014 14:17:01 -0600 + +qemu (2.1+dfsg-7ubuntu2) vivid; urgency=low + + * d/p/upstream-xen_disk-fix-unmapping-of-persistent-grants.patch: + Cherry-pick of qemu-upstream patch to fix issues with persistent + grants and the PV backend (Qdisk) (LP: #1394327). + + -- Stefan Bader Fri, 28 Nov 2014 13:14:37 +0100 + +qemu (2.1+dfsg-7ubuntu1) vivid; urgency=medium + + * Merge 2.1+dfsg-7. Remaining changes: + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - debian/binfmt-update-in: support ppcle + * debian/binfmt-update-in + * Support-ppcle.patch + * Dropped patches (upstream or now in debian's tree): + - pc-reserve-more-memory-for-acpi.patch + - CVE-2014-5388.patch + - 501-block-raw-posix-fix-disk-corruption-in-try-fiemap and + 502-block-raw-posic-use-seek-hole-ahead-of-fiemap (combined + in debian) + - CVE-2014-3615.patch + - CVE-2014-3640.patch + - CVE-2014-3689.patch + - CVE-2014-7815.patch + + -- Serge Hallyn Sat, 22 Nov 2014 18:36:53 -0600 + +qemu (2.1+dfsg-7) unstable; urgency=high + + * urgency is high due to 2 security fixes + (one current and one forgotten in previous release) + and because of possible data corruption bugfix + * vnc-sanitize-bits_per_pixel-from-the-client-CVE-2014-7815.patch + from upstream (Closes: CVE-2014-7815) + * fix spelling mistake in previous changelog entry + * add two patches from upstream for block/raw-posix.c to work around + probs in FS_IOC_FIEMAP ioctl and to prefer seek_hole over fiemap. + This should fix a long-standing ghost data corruption observed + in various places. + + -- Michael Tokarev Mon, 03 Nov 2014 18:44:34 +0300 + +qemu (2.1+dfsg-6) unstable; urgency=medium + + * mention closing of CVE-2014-3615 by 2.1.2 (2.1+dfsg-5) + * 9p-use-little-endian-format-for-xattr-values.patch (Closes: #755740) + * mention closing of #760386 + * mention closing of more CVEs by 2.1+dfsg-1 + * recognize ppc64el in qemu-debootstrap (Luca Falavigna) (Closes: #760949) + * use dpkg-vendor to let derived distros to use our d/rules + * use /usr/share/dpkg/architecture.mk to get DEB_HOST_* and DEB_BUILD_* + variables. This restores cross building support. + * use /usr/share/dpkg/buildflags.mk for CFLAGS LDFLAGS &Co + * pass -DVENDOR_{DEBIAN,UBUNTU} to compiler + * do not treat ppc* and ppc*le as compatible for binfmt registrations + * mention ACPI SLIC to RSDT id copying if slic table is supplied, + thank you Tim Small for the patch (Closes: #765075) + * apply 5 patches from upstream to fix a security issue in + vmware-vga (Closes: #765496 CVE-2014-3689) + * apply two patches from upstream to make qemu to work with samba4 + (Closes: #747636) + + -- Michael Tokarev Mon, 03 Nov 2014 18:07:48 +0300 + +qemu (2.1+dfsg-5) unstable; urgency=medium + + * upstream bugfix release v2.1.2 + (Closes: #762532 CVE-2014-3640 CVE-2014-5388 CVE-2014-3615) + * Add x32 to the list of supported architectures + (patch by Thorsten Glaser) (Closes: #760386) + * fix wrong reference in kvm.1 (Closes: #761137) + * removed patches (applied upstream): + l2tp-linux-only.patch + ide-only-constrain-read_write-requests-to-drive-size.diff + pc-reserve-more-memory-for-acpi.patch + + -- Michael Tokarev Fri, 26 Sep 2014 17:43:26 +0400 + +qemu (2.1+dfsg-4ubuntu9) vivid; urgency=medium + + * SECURITY UPDATE: information disclosure via vga driver + - debian/patches/CVE-2014-3615.patch: return the correct memory size, + sanity check register writes, and don't use fixed buffer sizes in + hw/display/qxl.c, hw/display/vga.c, hw/display/vga_int.h, + ui/spice-display.c. + - CVE-2014-3615 + * SECURITY UPDATE: denial of service via slirp NULL pointer deref + - debian/patches/CVE-2014-3640.patch: make sure socket is not just a + stub in slirp/udp.c. + - CVE-2014-3640 + * SECURITY UPDATE: possible privilege escalation via vmware-vga driver + - debian/patches/CVE-2014-3689.patch: verify rectangles in + hw/display/vmware_vga.c. + - CVE-2014-3689 + * SECURITY UPDATE: denial of service via VNC console + - debian/patches/CVE-2014-7815.patch: validate bits_per_pixel in + ui/vnc.c. + - CVE-2014-7815 + + -- Marc Deslauriers Thu, 13 Nov 2014 07:31:03 -0500 + +qemu (2.1+dfsg-4ubuntu8) vivid; urgency=medium + + * Support qemu-kvm on x32, arm64, ppc64 and pp64el architectures + (LP: #1389897) (Patch thanks to mwhudson, BenC, and infinity) + + -- Serge Hallyn Tue, 11 Nov 2014 15:51:47 -0600 + +qemu (2.1+dfsg-4ubuntu7) vivid; urgency=medium + + * Apply two patches to fix intermittent qemu-img corruption + (LP: #1368815) + - 501-block-raw-posix-fix-disk-corruption-in-try-fiemap + - 502-block-raw-posic-use-seek-hole-ahead-of-fiemap + + -- Serge Hallyn Wed, 29 Oct 2014 22:31:43 -0500 + +qemu (2.1+dfsg-4ubuntu6) utopic; urgency=medium + + * debian/control: slof is moving into main, so we can depend on qemu-slof as + debian does. + + -- Serge Hallyn Wed, 15 Oct 2014 22:01:27 +0200 + +qemu (2.1+dfsg-4ubuntu5) utopic; urgency=medium + + * debian/binfmt-update-in: don't blacklist ppc64le on ppc64 and vice + versa. + * Drop Support-ppc64le.pach, as that architecture appears to not exist yet. + * update d/p/ubuntu/define-ubuntu-machine-types.patch to keep -M pc pointing + to latest upstream machine type, rather than distro one. Add 'ubuntu' + machine type for that. + + -- Serge Hallyn Mon, 06 Oct 2014 13:41:31 -0500 + +qemu (2.1+dfsg-4ubuntu4) utopic; urgency=medium + + * debian/qemu-system-x86.qemu-kvm.upstart: create /dev/kvm in a + container. (LP: #1370199) + * load kvm module on ppc64le at boot (LP: #1369785) + - debian/rules: install qemu-kvm on ppc64el + - add debian/qemu-system-ppc.qemu-kvm.{upstart,default} to autoload the + kvm-hv module if available + * qemu-system-x86.maintscript: remove accidentally installed + /etc/init.d/qemu-system-x86 (from 2.0.0+dfsg-6ubuntu1 and a few earlier) + * rename qemu-system-x86 init script to qemu-kvm so it gets installed in + ubuntu. + + -- Serge Hallyn Wed, 17 Sep 2014 14:20:12 -0500 + +qemu (2.1+dfsg-4ubuntu3) utopic; urgency=medium + + * Re-stick the trusty machine type to 2.0 (where it must always stay) and + define a new, default, pc-i440fx-utopic machine type (LP: #1369481) + + -- Serge Hallyn Mon, 15 Sep 2014 14:04:57 -0500 + +qemu (2.1+dfsg-4ubuntu2) utopic; urgency=medium + + * move kvm_intel nested setting to qemu-system-x86.postinst. + + -- Serge Hallyn Fri, 12 Sep 2014 23:12:52 +0000 + +qemu (2.1+dfsg-4ubuntu1) utopic; urgency=medium + + * Merge new debian release + * Remaining changes: + - qemu-system-common.postinst: + * remove acl placed by udev, and add udevadm trigger. + * reload kvm_intel if needed to set nested=1 + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + - debian/binfmt-update-in: support ppcle + * debian/binfmt-update-in + * Support-ppcle.patch + - d/p/CVE-2014-5388.patch + + -- Serge Hallyn Tue, 09 Sep 2014 17:56:15 -0500 + +qemu (2.1+dfsg-4) unstable; urgency=medium + + * mention libnuma-dev but not enable for now + * 9p-readdir.patch: fix readdir in 9p mapped mode (Closes: #755738) + * pc-reserve-more-memory-for-acpi.patch: fix linux -kernel not working + with new qemu (Closes: #759522) + * qemu-options-add-missing--drive-discard-option-to-cmdline-help.diff - + documentation fix + * mention that 2.1 closed #754336. + * move qemu-bridge-helper to /usr/lib/qemu/ subdir (lintian) + * debian/binfmt-update-in (Serge Hallyn): + - don't run in a container + - add ppc64le as target + * add apport hook from ubuntu package (ubuntu-only for now) + + -- Michael Tokarev Sun, 31 Aug 2014 09:32:59 +0400 + +qemu (2.1+dfsg-3ubuntu4) utopic; urgency=medium + + * SECURITY UPDATE: memory disclosure via out-of-bounds array access + - debian/patches/CVE-2014-5388.patch: fix check in hw/acpi/pcihp.c. + - CVE-2014-5388 + + -- Marc Deslauriers Tue, 09 Sep 2014 08:26:24 -0400 + +qemu (2.1+dfsg-3ubuntu3) utopic; urgency=medium + + * replace d/p/revert-acpi-table-size-bump with + pc-reserve-more-memory-for-acpi.patch from upstream + * debian/binfmt-update-in + - don't run in a container + - add ppc64le as target (LP: #1358268) + * Add experimental ppcle support (LP: #1358268) + + -- Serge Hallyn Wed, 27 Aug 2014 18:24:32 -0500 + +qemu (2.1+dfsg-3ubuntu2) utopic; urgency=medium + + * revert-acpi-table-size-bump - get qemu -kernel working again. + + -- Serge Hallyn Fri, 15 Aug 2014 15:33:24 -0500 + +qemu (2.1+dfsg-3ubuntu1) utopic; urgency=medium + + * Merge new debian release + * Remaining changes: + - control-in: stick to libsdl1.2-dev. + - qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + - qemu-system-common.postinst: remove acl placed by udev, + and add udevadm trigger. + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + * Upstart job: use getent group to check for kvm group + * apport: 'qemu' doesn't exist any more, so check for any qemu* tasks + + -- Serge Hallyn Fri, 15 Aug 2014 08:44:54 -0500 + +qemu (2.1+dfsg-3) unstable; urgency=medium + + * set SHELL = /bin/sh -e, so that more complex shell constructs + in d/rules will fail if any command fail inside + * check for pipe2() being a stub too, like utimensat() etc + * build-depend on gnutls-dev, not libgnutls*-dev, so the + buuld system will pick default gnutls impl (so it works for + backports and future versions) + * build-depend on libjpeg-dev not libjpeg8-dev + * minimum version of seabios is 1.7.5 (Closes: #757958) + * ide-only-constrain-read_write-requests-to-drive-size.diff + (Closes: #757927) + * added use-arch-data-path.patch, to be able to search for binary + blobs in several (arch-specific) data directories instead of just one. + * removed all blob/firmware symlinks from qemu-system-x86, using + arch-specific datapath instead (/usr/share/seabios:/usr/lib/ipxe/qemu) + * removed retry-pxe-after-efi.patch and depend on ipxe-qemu which + introduced efi boot roms. qemu should not try to load "wrong" + ROM, or else migration will fail due to rom size mismatch. + * include /usr/lib/qemu-bridge-helper binary, but not make it setuid + due to security concerns outlined in #691138 (Closes: #691138) + * make vnc-jpeg not debian-specific + * install kvm-spice symlinks on ubuntu + + -- Michael Tokarev Thu, 14 Aug 2014 14:30:24 +0400 + +qemu (2.1+dfsg-2ubuntu2) utopic; urgency=medium + + * reload kvm_intel if needed to set the nested=Y flag (LP: #1324174) + + -- Serge Hallyn Mon, 11 Aug 2014 12:58:50 -0500 + +qemu (2.1+dfsg-2ubuntu1) utopic; urgency=medium + + * Merge new debian release + * Remaining changes: + - qemu-system-x86.links: add eepro100.rom link, drop links which we + have in ipxe-qemu package. + - control-in: stick to libsdl1.2-dev. + - qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + - qemu-system-common.postinst: remove acl placed by udev, + and add udevadm trigger. + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - debian/rules: add qemu-kvm-spice + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - binfmt-update-in: make sure to filter out compat arches. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - apport hook for qemu source package: d/source_qemu-kvm.py, + d/qemu-system-common.install + + -- Serge Hallyn Tue, 05 Aug 2014 13:53:06 -0500 + +qemu (2.1+dfsg-2) unstable; urgency=medium + + * l2tp-linux-only.patch: fix FTBFS on kfreebsd + * imx_timer_TIMER_MAX_clash.diff: fix ITIMER_MAX definition clash + * remove kfreebsd hack which disabled usb support on this platform + since qemu-1.3: it isn't needed anymore + + -- Michael Tokarev Sat, 02 Aug 2014 00:51:04 +0400 + +qemu (2.1+dfsg-1) unstable; urgency=medium + + * new upstream release + Closes: #739589: + CVE-2013-4148 CVE-2013-4149 CVE-2013-4150 CVE-2013-4151 + CVE-2013-4526 CVE-2013-4527 CVE-2013-4528 + CVE-2013-4530 CVE-2013-4531 CVE-2013-4532 CVE-2013-4533 CVE-2013-4534 + CVE-2013-4535 CVE-2013-4536 CVE-2013-4537 CVE-2013-4538 CVE-2013-4539 + CVE-2013-4540 CVE-2013-4541 CVE-2013-4542 + CVE-2013-6399 CVE-2014-0182 CVE-2014-3461 + Closes: #735618 + Closes: #754336 + Closes: CVE-2014-3471 (pcie hotplug/hotunplug) + * versioned build-depend on libiscsi-dev (>>1.9.0~) + * added ppc64le user target + * fix description of qemu-user-binfmt wrt "empty" (Closes: #755988) + * use /usr/share/dpkg/pkg-info.mk instead of inventing the same locally + * added debian/get-orig-source.sh (and a d/rules target) + * set ubuntu vcs branch to ubuntu-utopic + * binfmt-update-in: make sure to filter out compat arches + + -- Michael Tokarev Fri, 01 Aug 2014 20:06:22 +0400 + +qemu (2.0.0+dfsg-7) unstable; urgency=medium + + * clarify description of qemu-user-binfmt a bit + * build-depend on acpica-tools (iasl) in order to rebuild .dsl files + * remove qemu-keymaps package, since it is not used by other tools + anymore, and ship keymaps in qemu-system-common. + * remove (and break by qemu-system-common) old qemu-common for + ubuntu too, since it was transitional-to-qemu-keymaps pkg + * reorganize docs (Closes: #751376): + - do not ship docs in qemu (meta)package, except of qemu-doc.html + - ship most of docs/* in qemu-system-common in /usr/share/doc/q-s-c/ + - make symlinks from /usr/share/doc/qemu-system-foo/common to ../q-s-c/ + - ship doc-base file for qemu-system-common too (for qemu-doc.html) + - rename qemu.1 manpage to qemu-system.1 + * qemu-user-static & qemu-user-binfmt conflict with each other, not break. + * mention that qemu-user-binfmt is empty package (lintian) + + -- Michael Tokarev Thu, 24 Jul 2014 16:51:16 +0400 + +qemu (2.0.0+dfsg-6ubuntu2) utopic; urgency=medium + + * d/qemu-system-x86.qemu-kvm.upstart: change the early-exit check from + /usr/bin/kvm to qemu-system-x86_64. (LP: #1348551) + + -- Serge Hallyn Fri, 25 Jul 2014 08:35:02 -0500 + +qemu (2.0.0+dfsg-6ubuntu1) utopic; urgency=medium + + * Merge 2.0.0+dfsg-6. Remaining changes: + - qemu-system-x86.links: add eepro100.rom link, drop links which we + have in ipxe-qemu package. + - control-in: stick to libgnutls-dev and libsdl1.2-dev. + - qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + - qemu-system-common.postinst: remove acl placed by udev, + and add udevadm trigger. + - qemu-system-common.preinst: add kvm group if needed + - add qemu-kvm upstart job and defaults file (rules, + qemu-system-x86.qemu-kvm.default, qemu-system-x86.qemu-kvm.upstart) + - debian/rules: add qemu-kvm-spice + - rules,qemu-system-x86.modprobe: support use under older udevs which + do not auto-load the kvm kernel module. Enable nesting by default + on intel. + - binfmt-update-in: make sure to filter out compat arches. + - debian/qemu-system-alternatives.in: use a later version as ubuntu + removed the alternatives bit later. + - d/p/ubuntu/expose-vmx_qemu64cpu.patch: enable nested kvm by default + in qemu64 cpu type. + - d/p/ubuntu/define-trusty-machine-type.patch: define a default trusty + machine type to ease future live vm migration. + - re-introduce apport hook for qemu source package: + d/source_qemu-kvm.py, d/qemu-system-common.install + * enable-build-dep on libjpeg8-dev - which is now in main + + -- Serge Hallyn Mon, 23 Jun 2014 14:52:54 -0500 + +qemu (2.0.0+dfsg-6) unstable; urgency=medium + + * build-depend on libgnutls28-dev not libgnutls-dev + * added qcow1 block format validation patches from upstream: + block-fmt-validation/qcow1-check-maximum-cluster-size.patch + block-fmt-validation/qcow1-stricter-backing-file-length-check.patch + block-fmt-validation/qcow1-validate-image-size-CVE-2014-0223.patch + block-fmt-validation/qcow1-validate-L2-table-size-CVE-2014-0222.patch + (Finally closes: #742730, CVE-2014-0222, CVE-2014-0223) + + -- Michael Tokarev Fri, 23 May 2014 12:12:38 +0400 + +qemu (2.0.0+dfsg-5) unstable; urgency=medium + + * re-re-enable rbd (ceph) support again (Closes: #689239) + Should watch for breakage and for runtime dependencies closely from now on. + * fix qemu-kvm package description (stop mentioning it is transitional) + * move all binfmt handling from many files to d/binfmt-update-in + * introduce qemu-user-binfmt (dummy) package to support binfmt + registration of qemu-user binaries (Closes: #677529) + + -- Michael Tokarev Tue, 13 May 2014 21:15:48 +0400 + +qemu (2.0.0+dfsg-4) unstable; urgency=medium + + * suggests ovmf, not recommends it as it is not in -main (Closes: #745698) + * cputlb-fix-regression-with-TCG-interpreter.patch (Closes: #744342) + + -- Michael Tokarev Wed, 30 Apr 2014 10:03:55 +0400 + +qemu (2.0.0+dfsg-3) unstable; urgency=low + + * 2.0.0 closed #744213 (CVE-2013-4544) and #745157 (CVE-2014-2894) + * mjt-set-oem-in-rsdt-like-slic.diff: apply a (hackish) patch to simplify + running OEM versions of windows vista and 7 in qemu using SLIC table + from current hardware. + * set VENDOR in d/rules + * added forgotten qemu-kvm Pre-Depends field + * switch back from sdl2 to sdl1, as the former apparently isn't ready yet + (Closes: #745269) + + -- Michael Tokarev Mon, 21 Apr 2014 12:34:03 +0400 + +qemu (2.0.0+dfsg-2ubuntu3) utopic; urgency=medium + + * remove alternatives for qemu: different architectures + aren't really alternatives and never had been (LP: #1316829) + + -- Serge Hallyn Wed, 07 May 2014 15:12:33 +0000 + +qemu (2.0.0+dfsg-2ubuntu2) utopic; urgency=medium + + * debian/rules: install the proper /etc/init/qemu-kvm.conf (LP: #1315402) + * debian/control: drop the versioning requirement from libfdt-dev + build-dependency, as it is longer needed (LP: #1295072) + + -- Serge Hallyn Fri, 02 May 2014 11:43:44 -0500 + +qemu (2.0.0+dfsg-2ubuntu1) trusty-proposed; urgency=medium + + * Merge 2.0.0+dfsg-2 + * Incorporates a fix for spice users (LP: #1309452) + * drop patch kvm_physical_sync_dirty_bitmap-ignore-ENOENT-from-kv.patch, as + the regression requiring it was reverted for 2.0 upstream. + * remove qemu-system-common depends on the qemu-system-aarch64 metapackage + * debian/qemu-debootstrap: add arm64 + * Remaining changes from debian: + - keep qemu 'alternative' (not something to change in SRU) + - debian/control and debian/control-in: + * versioned libfdt-dev check, until libfdt is fixed in precise + * enable rbd + * remove ovmf Recommends, as it is in multiverse + * use libsdl1.2, not libsdl2, since libsdl2-dev is in universe + * add a qemu-system-aarch64 metapackage for transitions from trusty + development version. This can be removed after trusty. + - qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + - qemu-system-common.postinst: fix /dev/kvm acls + - qemu-system-common.preinst: add kvm group if needed + - qemu-system-x86.links: add eepro100.rom link, drop links which we + have in ipxe-qemu package. + - qemu-system-x86.modprobe: set module options for older releases + - qemu-system-x86.qemu-kvm.default: defaults for the upstart job + - qemu-system-x86.qemu-kvm.upstart: qemu-kvm upstart job + - qemu-user-static.postinst-in: remove qemu-arm64-static on arm64 + - debian/rules + * add legacy kvm-spice link + * fix ppc and arm slections + * add aarch64 to user_targets + - debian/patches/ubuntu/define-trusty-machine-type.patch: define a + pc-i440fx-trusty machine type as the default. + - debian/patches/ubuntu/expose-vmx_qemu64cpu.patch: support nesting by + default in qemu64 cpu time. + + -- Serge Hallyn Fri, 18 Apr 2014 09:23:27 -0500 + +qemu (2.0.0+dfsg-2) unstable; urgency=medium + + * resurrect 02_kfreebsd.patch, -- without it qemu FTBFS on current + Debian kFreeBSD system still. + + -- Michael Tokarev Thu, 17 Apr 2014 22:04:38 +0400 + +qemu (2.0.0+dfsg-1) unstable; urgency=low + + * new upstream major release + Closes: #744213 CVE-2013-4544 + Closes: #745157 CVE-2014-2894 + * 2.0 actually does not close #739589, + remove it from from last changelog entry + * mention closing of #707629 by 2.0 + * mention a list of CVE IDs closed by #742730 + * mention closing of CVE-2013-4377 by 1.7.0-6 + * do not set --enable-uname-release=2.6.32 for qemu-user anymore + (was needed for old ubuntu builders) + * removed 02_kfreebsd.patch: it adds configure check for futimens() and + futimesat() syscalls on FreeBSD, however futimens() appeared in FreeBSD + 5.0, and futimesat() in 8.0, and 8.0 is the earliest supported version + * kmod dependency is linux-any + * doc-grammify-allows-to.patch: fix some lintian warnings + * remove alternatives for qemu: different architectures + aren't really alternatives and never had been + * update Standards-Version to 3.9.5 (no changes needed) + * exec-limit-translation-limiting-in-address_space_translate-to-xen.diff - + fixes windows BSOD with virtio-scsi when upgrading from 1.7.0 to 1.7.1 + or 2.0, among other things + + -- Michael Tokarev Thu, 17 Apr 2014 18:27:15 +0400 + +qemu (2.0.0~rc1+dfsg-1exp) experimental; urgency=low + + * new upstream release candidate (2.0-rc1) + Closes: #742730 -- image format processing issues: + CVE-2014-0142 CVE-2014-0143 CVE-2014-0144 CVE-2014-0145 + CVE-2014-0146 CVE-2014-0147 CVE-2014-0148 + Closes: #743235, #707629 + * refreshed patches: + 02_kfreebsd.patch + retry-pxe-after-efi.patch + use-fixed-data-path.patch + * removed patches applied upstream: + qemu-1.7.1.diff + address_space_translate-do-not-cross-page-boundaries.diff + fix-smb-security-share.patch + slirp-smb-redirect-port-445-too.patch + implement-posix-timers.diff + linux-user-fixed-s390x-clone-argument-order.patch + * added bios-256k.bin symlink and bump seabios dependency to >= 1.7.4-2 + * recommend ovmf package for qemu-system-x86 to support UEFI boot + (Closes: #714249) + * switch from sdl1 to sdl2 (build-depend on libsdl2-dev) + * output last 50 lines of config.log in case configure failed + + -- Michael Tokarev Sat, 05 Apr 2014 16:23:48 +0400 + +qemu (2.0.0~rc1+dfsg-0ubuntu3) trusty; urgency=medium + + * d/p/ubuntu/kvm_physical_sync_dirty_bitmap-ignore-ENOENT-from-kv.patch + don't abort() just because the kernel has no dirty bitmap. + (LP: #1303926) + + -- Serge Hallyn Tue, 08 Apr 2014 22:32:00 -0500 + +qemu (2.0.0~rc1+dfsg-0ubuntu2) trusty; urgency=medium + + * define-trusty-machine-type.patch: update the trusty machine type name to + pc-i440fx-trusty (LP: #1304107) + + -- Serge Hallyn Tue, 08 Apr 2014 11:49:04 -0500 + +qemu (2.0.0~rc1+dfsg-0ubuntu1) trusty; urgency=medium + + * Merge 2.0.0-rc1 + * debian/rules: consolidate ppc filter entries. + * Move qemu-system-arch64 into qemu-system-arm + * debian/patches/define-trusty-machine-type.patch: define a trusty machine + type, currently the same as pc-i440fx-2.0, to put is in a better position + to enable live migrations from trusty onward. (LP: #1294823) + * debian/control: build-dep on libfdt >= 1.4.0 (LP: #1295072) + * Merge latest upstream git to commit dc9528f + * Debian/rules: + - remove -enable-uname-release=2.6.32 + - don't make the aarch64 target Ubuntu-specific. + * Remove patches which are now upstream: + - fix-smb-security-share.patch + - slirp-smb-redirect-port-445-too.patch + - linux-user-Implement-sendmmsg-syscall.patch (better version is upstream) + - signal-added-a-wrapper-for-sigprocmask-function.patch + - ubuntu/signal-sigsegv-protection-on-do_sigprocmask.patch + - ubuntu/Don-t-block-SIGSEGV-at-more-places.patch + - ubuntu/ppc-force-cpu-threads-count-to-be-power-of-2.patch + * add link for /usr/share/qemu/bios-256k.bin + * Remove all linaro patches. + * Remove all arm64/ patches. Many but not all are upstream. + * Remove CVE-2013-4377.patch which is upstream. + * debian/control-in: don't make qemu-system-aarch64 ubuntu-specific + + -- Serge Hallyn Tue, 25 Feb 2014 22:31:43 -0600 + +qemu (1.7.0+dfsg-9) unstable; urgency=medium + + * remove rbd/rados/ceph support *again*, till they'll actually provide + some symbol/library version mechanism + (Closes: #744364, Reopens: #729961, #689239) + + -- Michael Tokarev Sun, 13 Apr 2014 18:49:46 +0400 + +qemu (1.7.0+dfsg-8) unstable; urgency=low + + * fix a brown-paper-bag bug in the previous upload + + -- Michael Tokarev Fri, 11 Apr 2014 22:01:32 +0400 + +qemu (1.7.0+dfsg-7) unstable; urgency=high + + * fix guest-triggerable buffer overrun in virtio-net device + (Closes: #744221 CVE-2014-0150) + + -- Michael Tokarev Fri, 11 Apr 2014 20:27:16 +0400 + +qemu (1.7.0+dfsg-6) unstable; urgency=medium + + * make ceph (rbd) support linux-only, since it exists only on linux + + -- Michael Tokarev Sat, 05 Apr 2014 13:59:48 +0400 + +qemu (1.7.0+dfsg-5) unstable; urgency=medium + + * remove OVMF.fd symlink added in -4, it belongs to ovmf (Closes: #741494) + * qemu-debootstrap: add support for arm64 architecture (Closes: #740112) + * mention closing of #725176 by 1.7.0 + + -- Michael Tokarev Thu, 13 Mar 2014 06:21:01 +0400 + +qemu (1.7.0+dfsg-4) unstable; urgency=medium + + [ Michael Tokarev ] + * 1.7.1 stable upstream release (Closes: #719633) + Closes: CVE-2013-4377 + * implement-posix-timers.diff from upstream (Closes: #732258) + * address_space_translate-do-not-cross-page-boundaries.diff - + upstream bugfix for xen + * break libvirt << 1.2, not just 1.0, we need 1.2+ after qemu-1.6. + * add linux-user-fixed-s390x-clone-argument-order.patch (Closes: #739800) + * re-enable cepth (rbd) support (Closes: #729961, #689239) + + [ Steve Langasek ] + * (from Ubuntu) add symlink for OVMF.fd, which is now available in Debian + non-free. + * libusbredir is enabled in Ubuntu too, so sync debian/control. + * Enable building for ppc64el (in both Debian and Ubuntu): Debian does not + have a ppc64el port yet, but qemu builds out of the box there so it's + safe/appropriate to enable. + * Merge in Ubuntu-specific (and Ubuntu-tagged) debian/control changes. + * Enable building for arm64; the arm64 target is not yet merged, but the + package doesn't need arm64 target support to build for an arm64 host. + + [ Riku Voipio ] + * control: build-depend on python:any (change originally made + in Aug-2013 but reverted by mjt later) + + -- Michael Tokarev Wed, 12 Mar 2014 18:34:03 +0400 + +qemu (1.7.0+dfsg-3ubuntu7) trusty; urgency=low + + * No-change rebuild to build with libxen-4.4. + + -- Stefan Bader Fri, 21 Mar 2014 10:04:36 +0100 + +qemu (1.7.0+dfsg-3ubuntu6) trusty; urgency=medium + + * d/p/ubuntu/ppc-force-cpu-threads-count-to-be-power-of-2.patch: cherrypick + upstream patch to force cpu count on ppc to be a power of 2. (LP: #1279682) + + -- Serge Hallyn Tue, 11 Mar 2014 00:03:00 -0500 + +qemu (1.7.0+dfsg-3ubuntu5) trusty; urgency=medium + + [ dann frazier ] + * Add patches from the susematz tree to avoid intermittent segfaults: + - ubuntu/signal-added-a-wrapper-for-sigprocmask-function.patch + - ubuntu/signal-sigsegv-protection-on-do_sigprocmask.patch + - ubuntu/Don-t-block-SIGSEGV-at-more-places.patch + + [ Serge Hallyn ] + * Modify do_sigprocmask to only change behavior for aarch64. + (LP: #1285363) + + -- Serge Hallyn Thu, 06 Mar 2014 16:15:50 -0600 + +qemu (1.7.0+dfsg-3ubuntu4) trusty; urgency=medium + + [ Steve Langasek ] + * Merge debian/control with unreleased Debian branch: our architecture + lists should now be in sync. + + [ Dann Frazier ] + * ubuntu/linux-user-Implement-sendmmsg-syscall.patch: Fix user mode DNS + on arm64 and maybe others. (LP: #1284344) + + [ Serge Hallyn ] + * Move the OVMF.fd link to the ovmf package. + + -- Serge Hallyn Fri, 21 Feb 2014 12:14:53 -0800 + +qemu (1.7.0+dfsg-3ubuntu3) trusty; urgency=medium + + * Add ppc64el to the architecture list (supposedly added in the previous + upload, but really wasn't). + + -- Steve Langasek Thu, 20 Feb 2014 23:40:07 -0800 + +qemu (1.7.0+dfsg-3ubuntu2) trusty; urgency=medium + + * Backport changes to enable qemu-user-static support for aarch64 + * debian/control: add ppc64el to Architectures + * debian/rules: only install qemu-system-aarch64 on arm64. + Fixes a FTBFS when built twice in a row on non-arm64 due to a stale + debian/qemu-system-aarch64 directory + + -- dann frazier Tue, 11 Feb 2014 15:41:53 -0700 + +qemu (1.7.0+dfsg-3ubuntu1) trusty; urgency=medium + + * Fix broken filter_binfmts + * Remove use of dpkg-version in postinsts, as we're not Depending on + dpkg-dev. + + -- Serge Hallyn Wed, 05 Feb 2014 21:57:38 -0600 + +qemu (1.7.0+dfsg-3ubuntu1~ppa1) trusty; urgency=medium + + * Merge 1.7.0+dfsg-3 from debian. Remaining changes: + - debian/patches/ubuntu: + * expose-vmx_qemu64cpu.patch + * linaro (omap3) and arm64 patches + * ubuntu/target-ppc-add-stubs-for-kvm-breakpoints: fix FTBFS + on ppc + * ubuntu/CVE-2013-4377.patch: fix denial of service via virtio + - debian/qemu-system-x86.modprobe: set kvm_intel nested=1 options + - debian/control: + * add arm64 to Architectures + * add qemu-common and qemu-system-aarch64 packages + - debian/qemu-system-common.install: add debian/tmp/usr/lib + - debian/qemu-system-common.preinst: add kvm group + - debian/qemu-system-common.postinst: remove acl placed by udev, + and add udevadm trigger. + - qemu-system-x86.links: add eepro100.rom, remove pxe-virtio, + pxe-e1000 and pxe-rtl8139. + - add qemu-system-x86.qemu-kvm.upstart and .default + - qemu-user-static.postinst-in: remove arm64 binfmt + - debian/rules: + * allow parallel build + * add aarch64 to system_targets and sys_systems + * add qemu-kvm-spice links + * install qemu-system-x86.modprobe + - add debian/qemu-system-common.links for OVMF.fd link + * Remove kvm-img, kvm-nbd, kvm-ifup and kvm-ifdown symlinks. + + -- Serge Hallyn Tue, 04 Feb 2014 12:13:08 -0600 + +qemu (1.7.0+dfsg-3) unstable; urgency=low + + * qemu-kvm: fix versions for Breaks/Replaces/Depends on qemu-system-x86 + * qemu-system-ppc: depend on openbios-ppc >= 1.1+svn1229 to fix boot issues + * qemu-system-sparc: depend on openbios-sparc >= 1.1+svn1229 too + * remove unused lintian overrides for qemu-user from qemu (meta)package + * qemu-system-*: depend on unversioned qemu-keymaps and qemu-system-common + packages (no particular version of any is hard-required) + * remove debian/README.source (was from quilt) + * add myself to debian/copyright + * reorder d/control to have Recommends:/Suggests: closer to Depends. + * rename d/control to d/control-in and add a d/rules rule to build it + based on ${VENDOR} + * allow different content in d/control for debian/ubuntu + * added debian/README-components-versions + * fixed qemu-armeb binfmt (Closes: #735078) + * added powerpcspe host arch (Closes: #734696) + * do not check for presence of update-alternatives which is part of dpkg + (Closes: #733222) + * do not call update-alternative --remove from postrm:remove + (lintian complains about this) + * add efi netrom links. This requires new ipxe-qemu. + + -- Michael Tokarev Thu, 16 Jan 2014 15:17:46 +0400 + +qemu (1.7.0+dfsg-2ubuntu9) trusty; urgency=medium + + * debian/qemu-user-static.postinst-in: remove arm64 qemu-user binfmt, which + may have been installed up to 1.6.0+dfsg-2ubuntu4 (LP: #1273654) + + -- Serge Hallyn Tue, 28 Jan 2014 14:41:20 +0000 + +qemu (1.7.0+dfsg-2ubuntu8) trusty; urgency=medium + + * SECURITY UPDATE: denial of service via virtio device hot-plugging + - debian/patches/CVE-2013-4377.patch: upstream commits to refactor + virtio device unplugging. + - CVE-2013-4377 + + -- Marc Deslauriers Mon, 27 Jan 2014 09:10:37 -0500 + +qemu (1.7.0+dfsg-2ubuntu7) trusty; urgency=medium + + * d/p/target-ppc-add-stubs-for-kvm-breakpoints: fix FTBFS on + powerpc. + + -- Serge Hallyn Wed, 22 Jan 2014 11:59:26 -0600 + +qemu (1.7.0+dfsg-2ubuntu6) trusty; urgency=medium + + [ Serge Hallyn ] + * add arm64 patchset from upstream. The three arm virt patches previously + pushed are in that set, so drop them. + + [ dann frazier ] + * Add packaging for qemu-system-aarch64. This package is currently only + available for arm64, as full software emulation is not yet supported. + + -- Serge Hallyn Fri, 10 Jan 2014 12:19:08 -0600 + +qemu (1.7.0+dfsg-2ubuntu5) trusty; urgency=medium + + * Drop d/p/fix-pci-add: upstream does not intend for pci_add to be + supported any longer. + * Add patchset from git://git.linaro.org/qemu/qemu-linaro.git#rebasing + * Refresh debian/patches/hw_arm_add_virt_platform.patch against context + churn caused by linaro patchset. + * debian/rules: enable parallel builds. + + -- Serge Hallyn Fri, 03 Jan 2014 10:53:17 -0600 + +qemu (1.7.0+dfsg-2ubuntu4) trusty; urgency=medium + + * d/control: enable usbredir (LP: 1126390) + + -- Serge Hallyn Thu, 02 Jan 2014 08:55:43 -0600 + +qemu (1.7.0+dfsg-2ubuntu3) trusty; urgency=medium + + * add missing arm virt patches from the mach-virt-v7 branch of + git://git.linaro.org/people/cdall/qemu-arm.git + + -- Serge Hallyn Wed, 18 Dec 2013 12:25:59 -0600 + +qemu (1.7.0+dfsg-2ubuntu2) trusty; urgency=medium + + * debian/control: add arm64 to list of architectures. + + -- Serge Hallyn Thu, 12 Dec 2013 10:22:47 -0600 + +qemu (1.7.0+dfsg-2ubuntu1) trusty; urgency=low + + * Merge 1.7.0+dfsg-2 from debian experimental. Remaining changes: + - debian/control + * update maintainer + * remove libiscsi, usb-redir, vde, vnc-jpeg, and libssh2-1-dev + from build-deps + * enable rbd + * add qemu-system and qemu-common B/R to qemu-keymaps + * add D:udev, R:qemu, R:qemu-common and B:qemu-common to + qemu-system-common + * qemu-system-arm, qemu-system-ppc, qemu-system-sparc: + - add qemu-common, qemu-kvm, kvm to B/R + - remove openbios-sparc from qemu-system-sparc D + - drop openbios-ppc and openhackware Depends to Suggests (for now) + * qemu-system-x86: + - add qemu-common to Breaks/Replaces. + - add cpu-checker to Recommends. + * qemu-user: add B/R:qemu-kvm + * qemu-kvm: + - add armhf armel powerpc sparc to Architecture + - C/R/P: qemu-kvm-spice + * add qemu-common package + * drop qemu-slof which is not packaged in ubuntu + - add qemu-system-common.links for tap ifup/down scripts and OVMF link. + - qemu-system-x86.links: + * remove pxe rom links which are in kvm-ipxe + - debian/rules + * add kvm-spice symlink to qemu-kvm + * call dh_installmodules for qemu-system-x86 + * update dh_installinit to install upstart script + * run dh_installman (Closes: #709241) (cherrypicked from 1.5.0+dfsg-2) + - Add qemu-utils.links for kvm-* symlinks. + - Add qemu-system-x86.qemu-kvm.upstart and .default + - Add qemu-system-x86.modprobe to set nesting=1 + - Add qemu-system-common.preinst to add kvm group + - qemu-system-common.postinst: remove bad group acl if there, then have + udev relabel /dev/kvm. + - New linaro patches from qemu-linaro rebasing branch + - Dropped patches: + * linaro patchset + * mach-virt patchset + - Kept patches: + * expose_vms_qemu64cpu.patch + * fix-pci-add + * qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + + -- Serge Hallyn Sat, 07 Dec 2013 06:08:11 +0000 + +qemu (1.7.0+dfsg-2) unstable; urgency=low + + * switch from vgabios to seavgabios + * rework update-alternatives handling for qemu-system (Closes: #722914) + * mention closing of #326886, #390444, #706237 and CVE-2013-4375 for 1.7.0 + * rearrange libvte-dev build-dependency to come together with gtk, and + comment it out (since gtk frontend isn't being built) + * re-introduce qemu-kvm package with just a wrapper (/usr/bin/kvm) + and make this wrapper to force kvm mode (Closes: #727762) + * use less strict dependency on qemu-keymaps + * added fix-smb-security-share.patch by Michael Büsch (Closes: #727756) + * added move-ncalrpc-dir-to-tmp.patch by Michael Büsch (Closes: #728876) + + -- Michael Tokarev Fri, 29 Nov 2013 00:16:44 +0400 + +qemu (1.7.0+dfsg-1) unstable; urgency=low + + * new upstream release (Closes: #724758, #326886, #390444, #706237, #725176) + Also fixes CVE-2013-4375 (xen-specific qemu disk backend (qdisk) + resource leak) + * tweak kvm loading script to not load module for 3.4+ kernel + (kernel autoloads kvm modules since 3.4) (Closes: #717811) + * mention closing of #721713, #710971, #674201 + * refresh use-fixed-data-path.patch to contain just the min. changes + * fix pxe-eepro100.rom link (never worked in qemu due to wrong name) + * remove old $Id$ line from debian/rules + + -- Michael Tokarev Thu, 28 Nov 2013 03:14:21 +0400 + +qemu (1.6.0+dfsg-2ubuntu2) trusty; urgency=low + + * debian/control: qemu-utils must Replace: qemu-kvm as it did in raring, + to prevent lts-to-lts updates from breaking. (LP: #1243403) + + -- Serge Hallyn Wed, 23 Oct 2013 14:31:05 -0500 + +qemu (1.6.0+dfsg-2ubuntu1) trusty; urgency=low + + * Merge 1.6.0~rc0+dfsg-2exp from debian experimental. Remaining changes: + - debian/control + * update maintainer + * remove libiscsi, usb-redir, vde, vnc-jpeg, and libssh2-1-dev + from build-deps + * enable rbd + * add qemu-system and qemu-common B/R to qemu-keymaps + * add D:udev, R:qemu, R:qemu-common and B:qemu-common to + qemu-system-common + * qemu-system-arm, qemu-system-ppc, qemu-system-sparc: + - add qemu-kvm to Provides + - add qemu-common, qemu-kvm, kvm to B/R + - remove openbios-sparc from qemu-system-sparc D + - drop openbios-ppc and openhackware Depends to Suggests (for now) + * qemu-system-x86: + - add qemu-common to Breaks/Replaces. + - add cpu-checker to Recommends. + * qemu-user: add B/R:qemu-kvm + * qemu-kvm: + - add armhf armel powerpc sparc to Architecture + - C/R/P: qemu-kvm-spice + * add qemu-common package + * drop qemu-slof which is not packaged in ubuntu + - add qemu-system-common.links for tap ifup/down scripts and OVMF link. + - qemu-system-x86.links: + * remove pxe rom links which are in kvm-ipxe + * add symlink for kvm.1 manpage + - debian/rules + * add kvm-spice symlink to qemu-kvm + * call dh_installmodules for qemu-system-x86 + * update dh_installinit to install upstart script + * run dh_installman (Closes: #709241) (cherrypicked from 1.5.0+dfsg-2) + - Add qemu-utils.links for kvm-* symlinks. + - Add qemu-system-x86.qemu-kvm.upstart and .default + - Add qemu-system-x86.modprobe to set nesting=1 + - Add qemu-system-common.preinst to add kvm group + - qemu-system-common.postinst: remove bad group acl if there, then have + udev relabel /dev/kvm. + - New linaro patches from qemu-linaro rebasing branch + - Dropped patches: + * xen-simplify-xen_enabled.patch + * sparc-linux-user-fix-missing-symbols-in-.rel-.rela.plt-sections.patch + * main_loop-do-not-set-nonblocking-if-xen_enabled.patch + * xen_machine_pv-do-not-create-a-dummy-CPU-in-machine-.patch + * virtio-rng-fix-crash + - Kept patches: + * expose_vms_qemu64cpu.patch - updated + * linaro arm patches from qemu-linaro rebasing branch + - New patches: + * fix-pci-add: change CONFIG variable in ifdef to make sure that + pci_add is defined. + * Add linaro patches + * Add experimental mach-virt patches for arm virtualization. + * qemu-system-common.install: add debian/tmp/usr/lib to install the + qemu-bridge-helper + + -- Serge Hallyn Tue, 22 Oct 2013 22:47:07 -0500 + +qemu (1.6.0+dfsg-2) unstable; urgency=low + + * Build-depend in seccomp again once it is in -testing + * 1.6.1 upstream bugfix release (Closes: #725944, #721713, #710971) + * fix "allows [one] to" in qemu-ga description + * fix descriptions for qemu-system and qemu-system-common packages + + -- Michael Tokarev Fri, 11 Oct 2013 01:15:48 +0400 + +qemu (1.6.0+dfsg-1) unstable; urgency=low + + * final upstream v1.6.0 (Closes: #718180, #714273, #605525, #701855, #674201) + * removed configure-explicitly-disable-virtfs-if-softmmu=no.patch + * mention closing of #717724 by 1.6 + * mention closing of #710971 by 1.5 (which disabled gtk support) + + [ Riku Voipio ] + * - set --cross-prefix in debian/rules when cross-compiling + + -- Michael Tokarev Mon, 02 Sep 2013 15:18:49 +0400 + +qemu (1.6.0~rc0+dfsg-1exp) experimental; urgency=low + + * uploading to experimental (rc0) + * new upstream release (release candidate) (Closes: #718016, #717724) + * removed patches: + - qemu-1.5.1.diff + - sparc-linux-user-fix-missing-symbols-in-.rel-.rela.plt-sections.patch + * refreshed use-fixed-data-path.patch + * ship new qemu_logo_no_text.svg + * stop shipping sgabios symlink, it is moved to sgabios package + * bump version of libseccomp build dependency to 2.1 (minimum + required to build) and disable libseccomp for now (because it + isn't available in debian yet) + + -- Michael Tokarev Wed, 31 Jul 2013 10:49:30 +0400 + +qemu (1.5.0+dfsg-5) unstable; urgency=low + + * new upstream 1.5.1 stable/bugfix release (as qemu-1.5.1.diff) + removed qemu_openpty_raw-helper.patch (included upstream) + * configure-explicitly-disable-virtfs-if-softmmu=no.patch -- do not + build virtfs-proxy-helper stuff if not building system emulator + (fix FTBFS on s390) + * disable gtk ui and build dependencies, as it adds almost nothing + compared with sdl (well, except bugs and limitations), and has + lots of additional dependencies (Closes: #710971) + * remove obsolete /etc/init.d/qemu-kvm (Closes: #712898) + * fix versions of obsolete qemu-kvm conffiles to be removed + * provide manpage for obsolete kvm (Closes: #716891, #586973) + * add --daemonize option to the guest-agent startup script (Closes: #715502) + * clarify what qemu-guest-agent does (Closes: #714270) and provide + its json schema as a doc + + -- Michael Tokarev Tue, 23 Jul 2013 22:39:54 +0400 + +qemu (1.5.0+dfsg-4) unstable; urgency=medium + + * urgency is medium to make it go faster because, on one hand, we've + been in unstable for quite a bit longer than needed already and + have nothing but (build) fixes in there, but on the other hand + we're holding migration of other packages which are waiting for + us, again, for too long already + * added qemu_openpty_raw-helper.patch - a cleanup patch submitted upstream + which removes #include from common header and hence works + around FTBFS problem on debian sparc where somehow, conflicts + with . + + -- Michael Tokarev Thu, 06 Jun 2013 01:50:32 +0400 + +qemu (1.5.0+dfsg-3ubuntu6) trusty; urgency=low + + * No change rebuild for new seccomp. + + -- Stéphane Graber Mon, 21 Oct 2013 18:34:50 -0400 + +qemu (1.5.0+dfsg-3ubuntu5) saucy; urgency=low + + * Cherrypick upstream patch to fix crash with rng device (LP: #1235017) + - virtio-rng-fix-crash + + -- Serge Hallyn Wed, 09 Oct 2013 17:46:49 -0500 + +qemu (1.5.0+dfsg-3ubuntu4) saucy; urgency=low + + * Re-introduce snippet in upstart job to load kvm modules if needed. + (LP: #1218459) + + -- Serge Hallyn Mon, 16 Sep 2013 22:43:52 +0000 + +qemu (1.5.0+dfsg-3ubuntu3) saucy; urgency=low + + * Cherry-picking three Xen related patches targetted for qemu-stable: + * xen-simplify-xen_enabled.patch + * main_loop-do-not-set-nonblocking-if-xen_enabled.patch + * xen_machine_pv-do-not-create-a-dummy-CPU-in-machine-.patch + + -- Stefan Bader Fri, 26 Jul 2013 15:01:44 +0200 + +qemu (1.5.0+dfsg-3ubuntu2) saucy; urgency=low + + * Drop openbios-ppc and openhackware Depends to Suggests for now. + + -- Adam Conrad Wed, 05 Jun 2013 03:23:56 -0600 + +qemu (1.5.0+dfsg-3ubuntu1) saucy; urgency=low + + * Merge 1.5.0+dfs-3 from debian unstable. Remaining changes: + - debian/control + * update maintainer + * remove libiscsi, usb-redir, vde, vnc-jpeg, and libssh2-1-dev + from build-deps + * enable rbd + * add qemu-system and qemu-common B/R to qemu-keymaps + * add D:udev, R:qemu, R:qemu-common and B:qemu-common to + qemu-system-common + * qemu-system-arm, qemu-system-ppc, qemu-system-sparc: + - add qemu-kvm to Provides + - add qemu-common, qemu-kvm, kvm to B/R + - remove openbios-sparc from qemu-system-sparc D + * qemu-system-x86: + - add qemu-common to Breaks/Replaces. + - add cpu-checker to Recommends. + * qemu-user: add B/R:qemu-kvm + * qemu-kvm: + - add armhf armel powerpc sparc to Architecture + - C/R/P: qemu-kvm-spice + * add qemu-common package + * drop qemu-slof which is not packaged in ubuntu + - add qemu-system-common.links for tap ifup/down scripts and OVMF link. + - qemu-system-x86.links: + * remove pxe rom links which are in kvm-ipxe + * add symlink for kvm.1 manpage + - debian/rules + * add kvm-spice symlink to qemu-kvm + * call dh_installmodules for qemu-system-x86 + * update dh_installinit to install upstart script + * run dh_installman (Closes: #709241) (cherrypicked from 1.5.0+dfsg-2) + - Add qemu-utils.links for kvm-* symlinks. + - Add qemu-system-x86.qemu-kvm.upstart and .default + - Add qemu-system-x86.modprobe to set nesting=1 + - Add qemu-system-common.preinst to add kvm group + - qemu-system-common.postinst: remove bad group acl if there, then have + udev relabel /dev/kvm. + - Dropped patches: + * 0001-fix-wrong-output-with-info-chardev-for-tcp-socket.patch + - Kept patches: + * expose_vms_qemu64cpu.patch - updated + * gridcentric patch - updated + * linaro arm patches from qemu-linaro rebasing branch + + -- Serge Hallyn Tue, 04 Jun 2013 22:56:43 +0200 + +qemu (1.5.0+dfsg-3) unstable; urgency=low + + * fix sections: misc => otherosfs + * remove obsolete conffiles (kvm-ifup, kvm-ifdown, target-x86_64.conf) + from /etc/kvm/ in qemu-kvm (Closes: #710328) + * rework debian/rules a bit, to build various bits depending on + which packages are requested, not depending on ad-hoc host/arch + logic + * do not fail at install if kvm module loading failed on x86 or + if modprobe isn't installed (Closes: #710496) + * also suggest kmod to be able to load x86 kvm modules + * suggest sgabios for qemu-system-x86 and put a symlink to sgabios.bin + (Closes: #696985) + * add rules to build just one of arch/indep parts, to make + buildd log scanner happier (E-binary-arch-produces-all) + * use verbose build by default (V=1) and let it to be overridden + + -- Michael Tokarev Sun, 02 Jun 2013 01:49:47 +0400 + +qemu (1.5.0+dfsg-2ubuntu1) saucy; urgency=low + + * Merge 1.5.0+dfs-2 from debian unstable. Remaining changes: + - debian/control + * update maintainer + * remove libiscsi, usb-redir, vde, vnc-jpeg, and libssh2-1-dev + from build-deps + * enable rbd + * add qemu-system and qemu-common B/R to qemu-keymaps + * add D:udev, R:qemu, R:qemu-common and B:qemu-common to + qemu-system-common + * qemu-system-arm, qemu-system-ppc, qemu-system-sparc: + - add qemu-kvm to Provides + - add qemu-common, qemu-kvm, kvm to B/R + - remove openbios-sparc from qemu-system-sparc D + * qemu-system-x86: + - add qemu-common to Breaks/Replaces. + - add cpu-checker to Recommends. + * qemu-user: add B/R:qemu-kvm + * qemu-kvm: + - add armhf armel powerpc sparc to Architecture + - C/R/P: qemu-kvm-spice + * add qemu-common package + * drop qemu-slof which is not packaged in ubuntu + - add qemu-system-common.links for tap ifup/down scripts and OVMF link. + - qemu-system-x86.links: + * remove pxe rom links which are in kvm-ipxe + * add symlink for kvm.1 manpage + - debian/rules + * add kvm-spice symlink to qemu-kvm + * call dh_installmodules for qemu-system-x86 + * update dh_installinit to install upstart script + * run dh_installman (Closes: #709241) (cherrypicked from 1.5.0+dfsg-2) + - Add qemu-utils.links for kvm-* symlinks. + - Add qemu-system-x86.qemu-kvm.upstart and .default + - Add qemu-system-x86.modprobe to set nesting=1 + - Add qemu-system-common.preinst to add kvm group + - qemu-system-common.postinst: remove bad group acl if there, then have + udev relabel /dev/kvm. + - Dropped patches: + * 0001-fix-wrong-output-with-info-chardev-for-tcp-socket.patch + - Kept patches: + * expose_vms_qemu64cpu.patch - updated + * gridcentric patch - updated + * linaro arm patches from qemu-linaro rebasing branch + + -- Serge Hallyn Tue, 28 May 2013 08:18:30 -0500 + +qemu (1.5.0+dfsg-2) unstable; urgency=low + + * merged development history of wheezy and experimental branches. + Now the history is ordered by version, but is not chronological. + As a base we now have wheezy (1.1.2+dfsg-6a) version. + * removed trailing whitespaces from changelog file + * run dh_installinit properly (Closes: #709199) + * run dh_installman (Closes: #709241) + * remove build-dependendy on texi2html, upstream switched to makeinfo + + -- Michael Tokarev Tue, 28 May 2013 10:48:41 +0400 + +qemu (1.5.0+dfsg-1) unstable; urgency=low + + * update to 1.5.0 (Closes: #707732) + * upload to unstable + * mention that 1.5 closes #697641 and #705544 + * bump dependency on openbios (openbios-ppc for qemu-system-ppc, + openbios-sparc for qemu-system-sparc) from 1.0+svn1060 to 1.1 + (Closes: #707727) + * bump dependency on seabios to be >= 1.7.2-2 + * add retry-pxe-after-efi.patch to try pxe-XXXX.rom after unsuccessfully + trying efi-XXXX.rom - this is for NICs, until pxe-qemu package will be + able to provide necessary efi-XXXX.rom files. + * add (versioned) dependency on libusb-1.0 now when the right version + is available in debian + * use-fixed-data-path.patch: do not try to derive data path from + executable location, always use /usr/share/qemu + + -- Michael Tokarev Tue, 21 May 2013 00:49:47 +0400 + +qemu (1.5.0~rc0+dfsg-1) experimental; urgency=low + + * update to new upstream release candidate (1.5.0-rc0) + (Closes: #697641, #705544) + * remove --audio-card-list + * added new moxie system target + * added new linux-user targets: mips64 mips64el mipsn32 mipsn32el + * add libgtk2 and libvte to dependencies (new UI) + * added libssh2 to dependencies (new block device) + * s/libvdeplug2-dev/libvdeplug-dev/ + * define localstatedir (for guest-agent) + + -- Michael Tokarev Wed, 08 May 2013 01:01:01 +0400 + +qemu (1.4.0+dfsg-2exp) experimental; urgency=low + + [ Michael Tokarev ] + * set qemu-kvm priority to extra + * fix distribution field in last qemu-system.NEWS entry + * bump Standards-Version to 3.9.4 (no changes needed) + * fix update-rc.d args for qemu-system-x86 + * pre-Depend on adduser for qemu-system-common (Closes: #700840) + * move guest agent binary (qemu-ga) to /usr/sbin + * add versioned build-depends on libspice-protocol-dev (>= 0.12.3) + * refresh qemu-ifunc-sparc.patch, use proper submission from patchwork + (sparc-linux-user-fix-missing-symbols...) instead. + * apply 1.4.1 upstream stable patch + * release as 1.4.0 + + [ Aurelien Jarno ] + * debian/rules: don't build spapr-rtas.bin from .hex file. + * qemu-system-ppc: add a depends on qemu-slof and add the corresponding + links (Closes: #686979). + + -- Michael Tokarev Thu, 18 Apr 2013 14:45:30 +0400 + +qemu (1.4.0+dfsg-1expubuntu4) raring; urgency=low + + * re-add qemu-system-x86.modprobe to set nesting=1 (LP: #1155177) + * qemu-system-x86.qemu-kvm.upstart: + - remove NESTED workarounds from upstart file. + - remove loading of modules which is now always done + - remove TAPR define which is no longer used + * move customizable defines back to qemu-kvm.default + * copy creation of group kvm to preinst - the group must exist when the + kvm udev rule is installed (LP: #1103022) (LP: #1092715) + * add adduser to qemu-system-common Pre-Depends for use by preinst. + + -- Serge Hallyn Thu, 14 Mar 2013 14:21:53 -0500 + +qemu (1.4.0+dfsg-1expubuntu3) raring; urgency=low + + * debian/rules: add a symlink from kvm-spice to kvm in qemu-kvm, on + i386/amd64 targets. (LP: #1126258) + + -- Serge Hallyn Thu, 28 Feb 2013 15:17:16 -0600 + +qemu (1.4.0+dfsg-1expubuntu2) raring; urgency=low + + * substitute (apparently identical) patches from 1.4.0 qemu-linaro rebasing + tree. + * add qemu-common to qemu-system-common B/R (was accidentally dropped from + 1.3.0 in 1.4.0 merge). + * debian/control: fix kvm P/C/B/R: + - make all C/B/R against kvm versioned + - don't have any qemu-system-* other than x86 Provides: kvm + + -- Serge Hallyn Fri, 22 Feb 2013 13:34:07 -0600 + +qemu (1.4.0+dfsg-1expubuntu1) raring; urgency=low + + * Merge 1.4.0+dfsg-1exp from debian. Remaining changes: + - debian/control: + * update maintainer + * remove libiscsi, usb-redir, vde, and vnc-jpeg from build-deps + * enable rbd + * add qemu-system and qemu-common B/R to qemu-keymaps + * add D:udev and R:qemu to qemu-system-common + * qemu-system-arm, qemu-system-ppc, qemu-system-sparc: + - add qemu-kvm and kvm to Provides + - add qemu-common and qemu-kvm to Breaks/Replaces qemu-system-ppc, + qemu-system-sparc: + - remove openbios-$arch from Depends + * qemu-system-x86: + - add qemu-common to Breaks/Replaces. + - add cpu-checker to Recommends. + * qemu-user: + - add B/R qemu-kvm + * qemu-utils: + - add B/R qemu-user and qemu-kvm + * qemu-kvm: add armhf armel powerpc sparc to Architecture + * add qemu-common package + - add qemu-system-common.links for tap ifup/down scripts and OVMF link. + - qemu-system-x86.links: + * remove pxe rom links which are in kvm-ipxe + * add symlink for kvm.1 manpage + - Add qemu-utils.links for kvm-* symlinks. + - Add qemu-kvm.conf upstart job to qemu-system + - Clear /dev/kvm acls on install + - Add linaro arm patches. + - Add gridcentric patches. + - Re-add expose_vms_qemu64cpu.patch (from Daviey) + * Add 0001-fix-wrong-output-with-info-chardev-for-tcp-socket.patch + + -- Serge Hallyn Wed, 20 Feb 2013 11:58:27 -0600 + +qemu (1.4.0+dfsg-1exp) experimental; urgency=low + + [ Michael Tokarev ] + * 1.4.0 final release + * remove fix-virtio-net-for-win-guests.patch (upstream now has better fix) + * fix debian/control arch fields. Build-Depends: foo [bar] + means foo will be selected for build on LINUX-bar, not any-bar. + So stop using [bar], always use [linux-bar] or [any-bar], + as appropriate. This fix spice and xen (non)selection on + kfreebsd-{i386,amd64}. + * fix manpage "links" generation (man qemu-system-* was broken) + * change Vcs fields to point to anonscm.debian.org (lintian) + * add a check for (lxc) container to qemu-system-x86 initscript + + [ Steve Langasek ] + * Pass --enable-uname-release=2.6.32 for the user emulation builds, so that + we have a sensible baseline kernel value regardless of what the + underlying host kernel is. This makes eglibc happier when running under + emulation on a very old kernel for instance (whose host syscall ABI has + nothing to do with what emulated syscalls are supported), and probably + also lets us steer clear for the moment of code that has problem with + the new kernel upstream versioning convention. LP: #921078. + + -- Michael Tokarev Sat, 16 Feb 2013 12:34:54 +0400 + +qemu (1.4.0~rc0+dfsg-1exp) experimental; urgency=low + + * new upstream 1.4.0-rc0 (first release candidate). + (Closes: #549195) + * remove patches included upstream: + e1000-discard-oversized-packets-based-on-SBP_LPE.patch + link-seccomp-only-with-softmmu-targets.patch + revert-serial-fix-retry-logic.patch + savevm.c-cleanup-system-includes.patch + * refresh qemu-ifunc-sparc.patch + * add fix-virtio-net-for-win-guests.patch bandaid to make virtio-net + in windows guest to work again + * don't install virtfs-proxy-helper in its own subdir in /usr/bin + * add qemu-io manpage. Thank you Asias He for the work! + (Closes: #652518) + * move config options from debian/configure-opts into debian/control, + to keep list of build-deps & corresponding config flags in one place + * use initscript from old qemu-kvm package to load kvm modules for + qemu-system-x86, and clean it up (Closes: #699404) + * load vhost_net module in the initscript too + * mention default NIC change in qemu-kvm.NEWS and old conffiles + * remove mentions of (ubuntu-specific) qemu-common from debian/control + for now, as it does not help anyway (other changes are needed anyway + and it is better to keep them in one place) + * add a (preliminary) qemu-guest-agent startup script + * qemu-system-x86 break libvirt0 << 1.0, because older versions + didn't work with qemu 1.3+ correctly + + -- Michael Tokarev Sat, 02 Feb 2013 21:05:28 +0400 + +qemu (1.3.0+dfsg-5expubuntu5) raring; urgency=low + + * qemu-system-common.postinst: only run setfacl when /dev/kvm exists. + (LP: #1130591) + + -- Serge Hallyn Wed, 20 Feb 2013 08:58:53 -0600 + +qemu (1.3.0+dfsg-5expubuntu4) raring; urgency=low + + * Update workarounds for udev/inotify: (LP: #1092715) + - qemu-system-common.udev: go back to original, simple rule + - qemu-system-common.postinst: manually run setfacl + - (keep Depends: on acl as well) + - this can be removed once bug 1092715 is fixed. + + -- Serge Hallyn Tue, 19 Feb 2013 12:41:22 -0600 + +qemu (1.3.0+dfsg-5expubuntu3) raring; urgency=low + + * Now that qemu provides spice support, and qemu-kvm-spice is removed from + the archive, have qemu-kvm (which qemu-kvm-spice always depended on) + P/C/R qemu-kvm-spice. + + -- Serge Hallyn Thu, 14 Feb 2013 13:43:27 -0600 + +qemu (1.3.0+dfsg-5expubuntu2) raring; urgency=low + + * Enable spice. + * Address lintian warning by adding ${misc:Depends} to qemu-common and + qemu-kvm. + + -- Serge Hallyn Tue, 12 Feb 2013 16:07:04 -0600 + +qemu (1.3.0+dfsg-5expubuntu1) raring; urgency=low + + [ Serge Hallyn ] + * Merge 1.3.0+dfsg-5exp from Debian. + * remaining changes from 1.3.0+dfsg-1~exp3ubuntu1: + - debian/control: + * update maintainer + * remove vde2 recommends + * build-deps: remove libusbredir, libvdeplug2-dev, + libspice-server-dev, libspice-protocol-dev, libiscsi-dev + * qemu-system: + - break/replace qemu-common + - depend on udev + - remove openbios-ppc, openbios-sparc, and openhackware from + Depends. (Intend to add them back once we can build them.) + * qemu-utils: break/replace qemu-kvm + - qemu-kvm.upstart: + - add qemu-system.qemu-kvm.upstart + - debian/rules: add dh_installinit to get qemu-system.upstart installed. + - take the defaults from the old qemu-kvm.defaults, and move them into + the upstart job + - debian/patches: + - apply gridcentric patches from lp:~amscanne/+junk/gridcentric-qemu-patches + - apply arm patches from git://git.linaro.org/qemu/qemu-linaro.git + - add links for qemu-ifup/down in qemu-system-common.links + - debian/qemu-system-common.postinst + - udevadm trigger to fix up /dev/kvm perms + - debian/qemu-system.links: + - remove pxe-virtio, pxe-e1000 and pxe-rtl8139 links (which conflict + with ones from kvm-ipxe). We may want to move the links from kvm-ipxe + back to qemu-system at some point. + * remaining changes from after 1.3.0+dfsg-1~exp3ubuntu1: + - qemu-system-common.links: add link for OVMF + - Add qemu-utils.links for kvm-img and kvm-nbd utils and manpages. + - qemu-system.links: + * Add link to usr/share/ovmf/OVMF.fd + * Fix target of /etc/kvm/kvm-if{up,down} links + - debian/control: qemu-system should Recommend cpu-checker + - Add qemu-kvm breaks/replaces to qemu-user, to handle conflict over + (i.e.) qemu-x86_64. + - add qemu-kvm, and qemu-common transitional packages. + - Add breaks/replaces to qemu-keymaps for qemu-system. + - Add provides: qemu-kvm and kvm to qemu-system-ppc. + - Add breaks/replaces to qemu-system-ppc for qemu-kvm and qemu-common. + - Add breaks/replaces to qemu-kvm for qemu-common. + - Add breaks/replaces to qemu-utils for qemu-user and qemu-kvm. + - Add armhf, armel, powerpc and sparc arches to qemu-kvm transitional + package. + - Add qemu-common package. + - Make sure /dev/kvm gets its acls cleared: + * Add acl to qemu-system.depends + * update qemu-system.udev to run setfacl to set g::rw acl + - Remove vnc-jpeg, libiscsi-dev, and vde from debian/configure-opts + * dropped debian/patches/CVE-2012-6075.patch (duplicate of + e1000-discard-oversize-packets-based-on-SBP_LPE.patch) + * debian/{control,configure-opts}: enable rbd (LP: #1118406) + * add symlink for kvm.1 -> qemu.1 manpage (LP: #1117636) + * add replaces to qemu-system-common for qemu - we briefly moved conflicting + docs to qemu, which debian moved to qemu-system-common. This can be + dropped after raring. + * move qemu-kvm.upstart from qemu-system to qemu-system-x86. + * Support upgrade from qemu-kvm on non-x86 arches: + - Add Provides: qemu-kvm, kvm to qemu-system-{arm,ppc,sparc,x86} + - Add Breaks/Replaces for qemu-{common,system,kvm} and kvm. + * Re-add expose_vms_qemu64cpu.patch (from Daviey) from quantal. + + [ Steve Langasek ] + * Pass --enable-uname-release=2.6.32 for the user emulation builds, so that + we have a sensible baseline kernel value regardless of what the + underlying host kernel is. This makes eglibc happier when running under + emulation on a very old kernel for instance (whose host syscall ABI has + nothing to do with what emulated syscalls are supported), and probably + also lets us steer clear for the moment of code that has problem with + the new kernel upstream versioning convention. LP: #921078. + + -- Serge Hallyn Thu, 07 Feb 2013 14:15:26 -0600 + +qemu (1.3.0+dfsg-5exp) experimental; urgency=low + + * qemu-system-split: split qemu-system into several target-specific packages: + qemu-system-arm, qemu-system-mips, qemu-system-ppc, qemu-system-sparc, + qemu-system-x86, and qemu-system-misc, plus qemu-system-common. + (Closes: #636000) + * add initial qemu-guest-agent package (just the binary for now, + no startup script) (Closes: #676959) + * do not try to install (linux-specific) virtfs-proxy-helper on kfreebsd + * change order of audio drivers, in particular put pulseaudio (pa) first + * ship OS-specific qemu-ifup (use trivial ifconfig invocation on kfreebsd) + * qemu-system replaces qemu-utils due to virtfs-proxy-helper binary + * chmod +x qemu-ifdown + + -- Michael Tokarev Mon, 28 Jan 2013 15:05:57 +0400 + +qemu (1.3.0+dfsg-4exp) experimental; urgency=low + + * install forgotten /etc/qemu-ifdown (dummy, but qemu complains without it) + * install virtfs-proxy-helper in qemu-system not qemu-utils + * add qemu-kvm.NEWS mentioning transition from qemu-kvm to qemu-system-x86_64. + * do not pass -cpu kvm64 to qemu in kvm wrapper script, previous qemu-kvm + used qemu64 cpu instead - the same as new qemu uses. + * install kvm wrapper on x86 only, and install it as /usr/bin/kvm not + /usr/bin/kvm/kvm (Closes: #698736). + * stop shipping /usr/share/qemu/vapic.bin link (qemu uses kvmvapic.bin) + * stop shipping /usr/share/qemu/vgabios.bin link (qemu uses vgabios-*.bin) + * enable all guest audio devices + * add breaks/replaces/provides/conflicts with kvm (very old package), + qemu-kvm, and ubuntu's qemu-common. + * stop caring about old (pre-squeeze) qemu + + -- Michael Tokarev Wed, 23 Jan 2013 11:08:47 +0400 + +qemu (1.3.0+dfsg-3exp) experimental; urgency=low + + * add ability to specify os-arch in configure-opts + * libseccomp is linux-x86 not linux-any + * e1000-discard-oversized-packets-based-on-SBP_LPE.patch + CVE-2012-6075 (Closes: #696051) + + -- Michael Tokarev Mon, 21 Jan 2013 02:54:15 +0400 + +qemu (1.3.0+dfsg-2exp) experimental; urgency=low + + * qemu-nbd and qemu-io should be installed on kFreeBSD too + * install qemu-system docs into /usr/share/doc/qemu-system, + not .../qemu (Closes: #697085) + * do not depend on ipxe, it does not provide our ROMs + * move vde2 from Recommends to Suggests, since it isn't + used often + * require libspice-server-dev >= 0.12.2 and require it on i386 too, + enable spice support + * require libusbredirparser-dev >= 0.6, enable usb-redir + * enable xen explicitly on amd64|i386 + * enable xfsctl explicitly on linux + * sort build-deps in debian/control and add comments + * set permissions of /dev/kvm in qemu-system.postinst + the same way it is done in old qemu-kvm package + * set --localstatedir=/var (will be used later by guest agent) + * bump qemu-system dependency on seabios to 1.7.2 + and add symlinks for acpi-dsdt.aml and q35-acpi-dsdt.aml + * import qemu-ifup and qemu-ifdown scripts from qemu-kvm, + and modify qemu-ifup to allow usage of just `ip' command + from iproute package (if installed) instead of old brctl+ifconfig. + Add Breaks: for old iproute without bridge controls. + Add iproute to Recomments, so that the scripts will actually work + (previous script used sudo which should be in recommends too) + * enable seccomp (and libseccomp-dev b-d) on linux, + and add link-seccomp-only-with-softmmu-targets.patch + * use $(MAKE) not make when building spapr-rtas.bin + * update debian/watch (new place and new extensions) + * add qemu-kvm package (transitional, depends on qemu-system), + and add /usr/bin/kvm wrapper that calls qemu-system-x86_64 + with some arguments to match original qemu-kvm behavour. + (Closes: #560853) + + -- Michael Tokarev Sun, 20 Jan 2013 22:12:11 +0400 + +qemu (1.3.0+dfsg-1~exp3ubuntu8) raring; urgency=low + + * qemu-system.links: + - Add link to usr/share/ovmf/OVMF.fd (LP: #1074207) + - Fix target of /etc/kvm/kvm-if{up,down} links + + -- Serge Hallyn Tue, 29 Jan 2013 10:52:22 -0600 + +qemu (1.3.0+dfsg-1~exp3ubuntu7) raring; urgency=low + + * debian/control: qemu-system should Recommend cpu-checker (LP: #1103982) + + -- Serge Hallyn Mon, 28 Jan 2013 11:52:10 -0600 + +qemu (1.3.0+dfsg-1~exp3ubuntu6) raring; urgency=low + + * configure-opts: add audio-cards list (LP: #1102487) + * configure-opts: change order of audio-drv-list for ubuntu, putting pa + first. + + -- Serge Hallyn Mon, 21 Jan 2013 12:02:09 -0600 + +qemu (1.3.0+dfsg-1~exp3ubuntu5) raring; urgency=low + + * Add qemu-kvm breaks/replaces to qemu-user, to handle conflict over + (i.e.) qemu-x86_64. (LP: #1102332) + + -- Serge Hallyn Mon, 21 Jan 2013 08:58:07 -0600 + +qemu (1.3.0+dfsg-1~exp3ubuntu4) raring; urgency=low + + * Move three docs from qemu-system.install to qemu.docs (LP: #1101798) + + -- Adam Conrad Sat, 19 Jan 2013 20:12:48 -0700 + +qemu (1.3.0+dfsg-1~exp3ubuntu3) raring; urgency=low + + * debian/patches/CVE-2012-6075.patch: Fix guest denial of service and + possible code execution in hw/e1000.c by dropping oversize packets. + + -- Adam Conrad Sat, 19 Jan 2013 07:31:50 -0700 + +qemu (1.3.0+dfsg-1~exp3ubuntu2) raring; urgency=low + + * debian/rules: empty MAKEFLAGS when building spapr-rtas.bin on powerpc, to + fix FTBFS due to parallel compile. + + -- Serge Hallyn Fri, 18 Jan 2013 15:51:09 -0600 + +qemu (1.3.0+dfsg-1~exp3ubuntu1) raring; urgency=low + + * Merge 1.3.0+dfsg-1~exp3. Remaining ubuntu delta: + - debian/control: + * update maintainer + * remove vde2 recommends + * build-deps: remove libusbredir, libvdeplug2-dev, + libspice-server-dev, libspice-protocol-dev, libiscsi-dev, + and libxen-dev. + * qemu-keymaps: break/replace qemu-common + * qemu-system: + - break/replace qemu-common + - depend on udev + - remove openbios-ppc, openbios-sparc, and openhackware from + Depends. (Intend to add them back once we can build them.) + - provides: qemu-kvm + * qemu-utils: break/replace qemu-kvm + * set up transitional packages for qemu-kvm, qemu-common, and kvm. + - qemu-kvm.upstart: + - add qemu-system.qemu-kvm.upstart + - debian/rules: add dh_installinit to get qemu-system.upstart installed. + - take the defaults from the old qemu-kvm.defaults, and move them into + the upstart job + - debian/patches: + - apply gridcentric patches from lp:~amscanne/+junk/gridcentric-qemu-patches + - apply arm patches from git://git.linaro.org/qemu/qemu-linaro.git + - ifup/down: + - copy Debian qemu-kvm's kvm-ifup/down into debian/ + - fix dh_install for kvm-ifup/down in debian/rules + - add links for qemu-ifup/down in qemu-system.links + - remove (debian's original) qemu-ifup from qemu-system.install + - debian/qemu-system.postinst + - udevadm trigger to fix up /dev/kvm perms + - make the 'qemu' symlink point to qemu-system-x86_64, not -i386. + - debian/qemu-system.links: + - point 'kvm' to qemu-system-x86_64 + - remove pxe-virtio, pxe-e1000 and pxe-rtl8139 links (which conflict + with ones from kvm-ipxe). We may want to move the links from kvm-ipxe + back to qemu-system at some point. + * Add note about kvm to qemu-system.README.debian. + * Copy kvm-ifup and kvm-ifdown from debian's qemu-kvm + * Remove TAPBR from qemu-kvm.conf. + * Make sure /dev/kvm gets its acls cleared: + - Add acl to qemu-system.depends + - update qemu-system.udev to run setfacl to set g::rw acl + * qemu-system.qemu-kvm.conf: don't rmmod at stop + * Remove vnc-jpeg, libiscsi-dev, and vde from debian/configure-opts + * Remove hugepages sysctl file - qemu now supports transparent hugepages. + + -- Serge Hallyn Mon, 14 Jan 2013 23:22:51 -0600 + +qemu (1.3.0+dfsg-1~exp3) experimental; urgency=low + + * enable vde on kFreebsd too (no idea why it was disabled) + * bluez (libbluetooth) is linux-specific + * savevm.c-cleanup-system-includes.patch: remove excessive #includes + from savevm.c (fixes FTBFS on kFreebsd due to wrong #include) + + -- Michael Tokarev Mon, 31 Dec 2012 15:52:23 +0400 + +qemu (1.3.0+dfsg-1~exp2) experimental; urgency=low + + * mention that 1.3: Closes #622319, #597527, #593547, #660154 + * libcap and libcap-ng are linux-specific + * include spapr-rtas.bin file in a pre-compiled pseudo-hex-with-assembly + form in debian/spapr-rtas.hex, "compile" it (using sed magic) in + build step and compare the result with the actually built binary + on ppc. This binary is needed for ppc system emulation (qemu-system-ppc*). + (Closes: #670909). + * rename system-build to qemu-build, and merge user-build to qemu-build, + building qemu-system and qemu-user in one go. Only qemu-system-static + is left in separate build dir. Note that current qemu-user is + linux-specific, even if qemu has bsd-user targets. + * add or32 user target + * debian/configure-opts - list of (possible arch- or os-specific) + features to enable/disable. This is in order to ensure we always + build with specific options enabled and error out if new upstream + will dislike our dependencies, or if the deps will not work. + Move config_audio_drv from debian/rules to this file too. + For now do not explicitly enable xen, spice, usb-redir, since + these requires some more work. + Also pass $(QEMU_CONFIGURE_OPTIONS) to qemu configure line, + to be able to quickly override some options. + * do not build-depend on sharutils, since no uuencoded binaries + are shipped anymore + * do not build-depend on nasm, we don't compile from assembly anymore + * remove libgpmg1-dev from build-depends. It was due to #267174, + because of static link of qemu with libsdl. Now only qemu-user-static + is linked statically, and this one does not use libsdl. + * add myself to uploaders, and remove dm-upload-allowed + * added main docs to qemu-system package + * specify --libexecdir=/usr/lib to configure + * mark all packages as Multi-Arch: foreign + (at least it is possible to install i386 versions on amd64 arch) + * replace "flags OC" with "credentials yes" in debian/binfmts/*, + since that's the format update-binfmts expects to enable setuid + binaries. (Closes: #683205) + * build-depend on debhelper 9, and set debian/compat to 9 + + -- Michael Tokarev Mon, 31 Dec 2012 01:40:35 +0400 + +qemu (1.3.0+dfsg-1~exp1) experimental; urgency=low + + [ Michael Tokarev ] + * new upstream version (1.3.0) + (Closes: #676374, #622319, #597527, #593547, #660154) + - Removed patches included upstream: + do-not-include-libutil.h.patch + configure-nss-usbredir.patch + tcg_s390-fix-ld_st-with-CONFIG_TCG_PASS_AREG0.patch + net-add--netdev-options-to-man-page.patch + - update 02_kfreebsd.patch + - do not build mpc8544ds.dtb + - include new targets + * Cleaned up the build system ALOT. Larger changes: + - used explicit lists of emulated targets in debian/rules + and generate everything else from there, instead of repeating + these lists in lots of places. + - stop using debian/$pkg.manpages and other auxilary files like this, + moving eveything to debian/$pkg.install, because with the number + of packages growing, amount of these small files becomes very + large and the result is difficult to maintain. + * ship forgotten target-x86_64.conf in qemu-system. + * ship virtfs-proxy-helper in qemu-utils. + * stop shipping tundev.c, since it does not reflect the reality for + a long time now (Closes: #325761, #325754). + * re-introduce support parallel build using DEB_BUILD_OPTIONS=parallel=N, + this time by adding to $MAKEFLAGS instead of passing down to submakes + * build-depend on libcap-ng-dev (for virtfs-proxy-helper) + + [ Vagrant Cascadian ] + * Add libcap-dev to Build-Depends to support virtfs-proxy-helper. + + -- Michael Tokarev Sun, 30 Dec 2012 01:52:21 +0400 + +qemu (1.2.0.dfsg-1~exp1-0ubuntu2) raring; urgency=low + + * Remove kvm package + - make qemu-system P/C/B: kvm. + + -- Serge Hallyn Mon, 14 Jan 2013 12:03:19 -0600 + +qemu (1.2.0.dfsg-1~exp1-0ubuntu1) raring; urgency=low + + [ Serge Hallyn ] + * debian/control: + - update maintainer + - remove vde2 recommends + - build-deps: remove libusbredir, libvdeplug2-dev, + libspice-server-dev, libspice-protocol-dev, libiscsi-dev, + and libxen-dev. + - qemu-keymaps: break/replace qemu-common + - qemu-system: + - break/replace qemu-common + - depend on udev + - remove openbios-ppc, openbios-sparc, and openhackware from + Depends. (Intend to add them back once we can build them.) + - provides: qemu-kvm + - qemu-utils: break/replace qemu-kvm + - set up transitional packages for qemu-kvm, qemu-common, and kvm. + * debian/rules: + - install kvm-ifup and kvm-ifdown + - dh_installinit the qemu-kvm upstart job + * install a 30-qemu-kvm.conf into /etc/sysctl.c for nr_hugepages. + * qemu-kvm.upstart: + - add qemu-system.qemu-kvm.upstart + - add mv_confile to qemu-system.preinst, postinst, and .postrm to rename + /etc/init/qemu-kvm.conf to qemu-system.conf + - debian/rules: add dh_installinit to get qemu-system.upstart installed. + - take the defaults from the old qemu-kvm.defaults, and move them into + the upstart job + * debian/patches: + - apply gridcentric patches from lp:~amscanne/+junk/gridcentric-qemu-patches + - apply arm patches from git://git.linaro.org/qemu/qemu-linaro.git + - apply nbd-fixes-to-read-only-handling.patch from upstream to + make read-write mount after read-only mount work. (LP: #1077838) + * ifup/down: + - copy Ubuntu qemu-kvm's kvm-ifup/down into debian/ + - fix dh_install for kvm-ifup/down in debian/rules + - add links for qemu-ifup/down in qemu-system.links + - remove (debian's original) qemu-ifup from qemu-system.install + * debian/qemu-system.postinst + - udevadm trigger to fix up /dev/kvm perms + - make the 'qemu' symlink point to qemu-system-x86_64, not -i386. + * debian/qemu-system.links: + - point 'kvm' to qemu-system-x86_64 + - remove pxe-virtio, pxe-e1000 and pxe-rtl8139 links (which conflict + with ones from kvm-ipxe). We may want to move the links from kvm-ipxe + back to qemu-system at some point. + - add qemu-ifdown and qemu-ifup links + * debian/qemu-system.install: + - remove /etc/qemu-ifup link + - add /etc/sysctl.d/30-qemu-kvm.conf + + [ Adam Conrad ] + * Appease apt-get's dist-upgrade resolver by creating a qemu-common + transitional package to upgrade more gracefully to qemu-keymaps. + * Move all the empty transitional packages to the oldlibs section. + * Restore the versioned dep from qemu-kvm (and kvm) to qemu-system. + + -- Serge Hallyn Fri, 04 Jan 2013 08:50:24 -0600 + +qemu (1.2.0+dfsg-1~exp1) UNRELEASED; urgency=low + + [ Michael Tokarev ] + * new upstream version (1.3.0) + (Closes: #676374, #622319, #597527, #593547, #660154) + - Removed patches included upstream: + do-not-include-libutil.h.patch + configure-nss-usbredir.patch + tcg_s390-fix-ld_st-with-CONFIG_TCG_PASS_AREG0.patch + net-add--netdev-options-to-man-page.patch + - update 02_kfreebsd.patch + - do not build mpc8544ds.dtb + - include new targets + * Cleaned up the build system ALOT. Larger changes: + - used explicit lists of emulated targets in debian/rules + and generate everything else from there, instead of repeating + these lists in lots of places. + - stop using debian/$pkg.manpages and other auxilary files like this, + moving eveything to debian/$pkg.install, because with the number + of packages growing, amount of these small files becomes very + large and the result is difficult to maintain. + * ship forgotten target-x86_64.conf in qemu-system. + * ship virtfs-proxy-helper in qemu-utils. + * stop shipping tundev.c, since it does not reflect the reality for + a long time now (Closes: #325761, #325754). + * re-introduce support parallel build using DEB_BUILD_OPTIONS=parallel=N, + this time by adding to $MAKEFLAGS instead of passing down to submakes + * build-depend on libcap-ng-dev (for virtfs-proxy-helper) + + [ Vagrant Cascadian ] + * Add libcap-dev to Build-Depends to support virtfs-proxy-helper. + + -- Michael Tokarev Sun, 30 Dec 2012 01:52:21 +0400 + +qemu (1.1.2+dfsg-6a) unstable; urgency=low + + * reupload to remove two unrelated files slipped in debian/ + + -- Michael Tokarev Mon, 18 Mar 2013 10:09:37 +0400 + +qemu (1.1.2+dfsg-6) unstable; urgency=low + + * another bugfix for USB, upstream from early days of past-1.1. + usb-split-endpoint-init-and-reset.patch. With certain redirected + to guest USB devices, qemu process may crash: + + usb_packet_complete: Assertion `((&ep->queue)->tqh_first) == p' failed. + + The patch fixes this by de-coupling reset and complete paths. + Big thanks goes to Joseph Price who found the fix by doing a + reverse git bisection. + (Closes: #701926) + + -- Michael Tokarev Mon, 18 Mar 2013 09:07:24 +0400 + +qemu (1.1.2+dfsg-5) unstable; urgency=low + + * fix USB regression introduced in 1.1 (Closes: #683983) + uhci-don-t-queue-up-packets-after-one-with-the-SPD-flag-set.patch + Big thanks to Peter Schaefer (https://bugs.launchpad.net/bugs/1033727) + for the help identifying the fix. + + -- Michael Tokarev Mon, 14 Jan 2013 12:20:29 +0400 + +qemu (1.1.2+dfsg-4) unstable; urgency=medium + + * linux-user-fix-mips-32-on-64-prealloc-case.patch (Closes: #668658) + * e1000-discard-oversized-packets-based-on-SBP_LPE.patch: the second + half of the fix for CVE-2012-6075. (Finally Closes: #696051) + + -- Michael Tokarev Wed, 09 Jan 2013 23:05:17 +0400 + +qemu (1.1.2+dfsg-3) unstable; urgency=low + + * add build-dependency on libcap-dev [linux-any] to enable virtfs support + which has been dropped in 1.1. (Closes: #677654) + * intel_hda-do-not-call-msi_reset-when-only-device-state-needs-resetting.patch + patch to fix Fixing reset of MSI function in intel-hda virtual device. + The fix (applied to stable-1.1.1) was partially wrong, as it actually + added the msi_reset() call to two code paths instead of one as planned. + Fix this by splitting the function in question into two parts. + (Closes: #688964) + * blockdev-preserve-readonly-and-snapshot-states-across-media-changes.patch: + allow opening of read-only cdrom images/devices (Closes: #686776) + * ahci-properly-reset-PxCMD-on-HBA-reset.patch: fix windows install on ahci + (Closes: #696052) + * e1000-discard-packets-that-are-too-long-if-not-SBP-and-not-LPE.patch: + discard too long rx packets which may overflow guest buffer + (Closes: #696051) + * eepro100-fix-network-hang-when-rx-buffers-run-out.patch: + fix e100 stall (Closes: #696061) + * fix possible network stalls/slowness in e1000 device emulation: + net-notify-iothread-after-flushing-queue.patch + e1000-flush-queue-whenever-can_receive-can-go-from-false-to-true.patch + (Closes: #696063) + * fixes-related-to-processing-of-qemu-s-numa-option.patch: + fixes numa handling (Closes: #691343) + * qcow2-fix-avail_sectors-in-cluster-allocation-code.patch: + fixes data corruption in stacked qcow2 (Closes: #695905) + * qcow2-fix-refcount-table-size-calculation.patch: another possible + corruption or crash in qcow2 (Closes: #691569) + * tap-reset-vnet-header-size-on-open.patch: always ensure tap device is + in known state initially (Closes: #696057) + * vmdk-fix-data-corruption-bug-in-WRITE-and-READ-handling.patch: + possible data corruption bug in vmdk image format (Closes: #696050) + + -- Michael Tokarev Sun, 16 Dec 2012 23:08:40 +0400 + +qemu (1.1.2+dfsg-2) unstable; urgency=low + + * remove debian/patches/fix-armhf-prctl.patch, it is included + upstream in 1.1.0 version and is misapplied since 1.1.0~rc3+dfsg-1. + * drop -jN passing to downstream makes, as it breaks dpkg-buildpackage -j + and actually breaks build (Closes: #597524 - said to be fixed in 0.14.1 + but was still present) + * add revert-serial-fix-retry-logic.patch that restores + old (semi-)working behavour of a virtual serial port. + + -- Michael Tokarev Wed, 19 Sep 2012 13:54:05 +0400 + +qemu (1.1.2+dfsg-1) unstable; urgency=low + + [Michael Tokarev] + * new upstream stable/bugfix release, fixing a LOT of bugs, + including CVE-2012-3515 (Closes: #686973, #681985) + * bump versioned depends of seabios to 1.7.0~, since this version ships + kvmvapic.bin. + * ship /usr/share/qemu/qemu-icon.bmp (Closes: #681317) + * do not build-depend on ceph (librbd-dev librados-dev), since ceph is + having longstanding issues in wheezy. + * add tcg_s390-fix-ld_st-with-CONFIG_TCG_PASS_AREG0.patch - upstream fix + to un-break s390[x] emulation code. Similar fixes were included for + other platforms in 1.1.2 changeset. Without this fix, qemu is basically + useless on s390. + * document -netdev option in the manpage, a long-standing omission + (net-add--netdev-options-to-man-page.patch) + + [Vagrant Cascadian] + * qemu-system: Add symlinks for extboot.bin, kvmvapic.bin and vapic.bin to + binaries shipped in seabios. Closes: #678217, #679004. + * qemu-system: Remove dead link for ne2k_isa.rom, which is not included in + ipxe. Closes: #679004. + * qemu-system: Bump versioned openbios-sparc and openbios-ppc Depends to + 1.0+svn1060, to ensure we use at least version which is used by upstream. + Wheezy already has the right version, but we should not break partial + upgrades. + + -- Michael Tokarev Sun, 09 Sep 2012 18:52:57 +0400 + +qemu (1.1.0+dfsg-1) unstable; urgency=low + + [ Vagrant Cascadian ] + * New upstream release. (Closes: #655604, #655145) + + [ Michael Tokarev ] + * do-not-include-libutil.h.patch - don't include libutil.h&Co when not + needed. Fixes FTBFS on kFreebsd with recent libbsd-dev. + * drop libbsd-dev support on kFreebsd - no longer needed. + * do not build USB host support on kFreebsd (qemu uses obsolete, + now removed, USB API). Use the same hack/technique as FreeBSD + qemu port does -- changing HOST_USB to "stub" after configure run. + + -- Vagrant Cascadian Thu, 07 Jun 2012 13:44:26 -0700 + +qemu (1.1.0~rc3+dfsg-1) experimental; urgency=low + + * New upstream release candidate. + * debian/patches: + - Update 02_kfreebsd. + - Remove dont-block-sigchld, applied upstream. + - Update configure-nss-usbredir. + - Update fix-armhf-prctl. + * debian/rules: Remove --disable-darwin-user from configure, as it is no + longer present. + * Apply patch to qemu-make-debian-root to improve argument handling. + (Closes: #671723). Thanks to Askar Safin. + * qemu-utils: Add recommends on sharutils, used by qemu-make-debian-root + (Closes: #660296). + * Add Build-Depends on libusbredirparser-dev to support usbredir protocol. + * Add Build-Depends on libbsd-dev for kfreebsd. + + -- Vagrant Cascadian Wed, 30 May 2012 20:24:59 -0700 + +qemu (1.0.1+dfsg-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream stable version: + - remove debian/patches/fix-malta-i8259 + - remove debian/patches/qemu-ifunc-ppc.patch + - remove debian/patches/x86-fix-cmpxchg.patch + + [ Michael Tokarev ] + * apply patch to change backticks `` in debian/rules variables + to $(shell) construct, by Allard Hoeve. (Closes: #660133) + * depend on vgabios >= 0.6c-3~ not 0.6c-3, to assist backporting + + [ Hector Oron ] + * Fix prctl syscall (Closes: #656926, #651083). + + [ Vagrant Cascadian ] + * Update to Standards-Version 3.9.3, no changes necessary. + + -- Vagrant Cascadian Mon, 05 Mar 2012 13:05:14 -0800 + +qemu (1.0+dfsg-3) unstable; urgency=low + + [ Aurelien Jarno ] + * Add a build-depends on libfdt-dev to enable some more emulated machines. + * Really add binfmt support for s390x. + + [ Michael Tokarev ] + * Depend on ipxe-qemu | ipxe (<< 1.0.0+git-20120202.f6840ba-2) + after ipxe package split. This will probably need to be changed + to just ipxe-qemu once it will be landed properly. + + [ Vagrant Cascadian ] + * Apply patch to use dpkg-buildflags (Closes: #656276). + Thanks to Moritz Muehlenhoff. + + -- Vagrant Cascadian Mon, 06 Feb 2012 17:56:15 -0800 + +qemu (1.0+dfsg-2) unstable; urgency=low + + [ Aurelien Jarno ] + * Add patch from upstream to fix cmpxchg on x86. + * Add patch to not link user builds with NSS (Closes: #648202). + * Add binfmt support for s390x. + * Bump depends on openbios-ppc and openbios-sparc on versions + compatible with version 1.0. + * Build on s390x (Closes: #651048). + + [ Vagrant Cascadian ] + * qemu-make-debian-root: Fix argument processing to handle when both -s and + -k are specified. Thanks to Mats Erik Andersson (Closes: #638047). + * Add Patch from upstream to fix regression on malta with i8259 interrupts. + * qemu-debootstrap: Add support for armhf and s390x. + * debian/rules: remove config.log in the clean target. + * qemu-make-debian-root: Use debootstrap's minbase variant, instead of a + long list of excludes. + + -- Vagrant Cascadian Mon, 09 Jan 2012 16:01:17 -0800 + +qemu (1.0+dfsg-1) experimental; urgency=low + + * New upstream version. + * Add build-dep on libxen-dev to enable xen support. + * Add build-dep on libiscsi-dev to enable iscsi support. + * Add patch from upstream to not block SIGCHLD (Closes: #618743). + + -- Vagrant Cascadian Fri, 30 Dec 2011 16:12:03 -0800 + +qemu (1.0~rc4+dfsg-1) experimental; urgency=low + + * New upstream version: + - Fixes CVE-2011-4111. + * remove patches applied upstream: + - security/leftover.patch + - Move_QEMU_INCLUDES_before_QEMU_CFLAGS + - runnning-typo.patch + * Install new qemu-system variants: + xtensa, xtensaeb, alpha + * debian/rules: Drop hack to rename "qemu" binary to "qemu-system-i386", as + upstream now does the same. + * Enable spice support on amd64: + - Add Build-Depends: libspice-server-dev, libspice-protocol-dev + * debian/rules: use dh_prep instead of "dh_clean -k", which is deprecated. + * qemu-system: Bump dependency on seabios to 1.6.3~. + + -- Vagrant Cascadian Mon, 28 Nov 2011 23:40:24 -0800 + +qemu (0.15.1+dfsg-3) unstable; urgency=low + + * Add patch from upstream to fix FTBFS on ia64. + + -- Vagrant Cascadian Fri, 09 Dec 2011 23:48:21 -0800 + +qemu (0.15.1+dfsg-2) unstable; urgency=low + + * Add patch that fixes a buffer overrun (CVE-2011-4111). + * Enable spice support on amd64: + - Add Build-Depends: libspice-server-dev, libspice-protocol-dev + * debian/rules: Use dh_prep instead of "dh_clean -k", which is deprecated. + + -- Vagrant Cascadian Mon, 28 Nov 2011 20:34:50 -0800 + +qemu (0.15.1+dfsg-1) unstable; urgency=low + + * New upstream version. + + -- Vagrant Cascadian Sun, 06 Nov 2011 10:37:31 -0800 + +qemu (0.15.0+dfsg-1) unstable; urgency=low + + * New upstream version. + * Install new qemu-system, qemu-user and qemu-user-static variants: + lm32, microblazeel, s390x, unicore32 + * Patch from upstream to set QEMU_INCLUDES before QEMU_CFLAGS. + * Update debian/watch to check http://qemu.org/download. + + -- Vagrant Cascadian Mon, 03 Oct 2011 12:29:18 -0700 + +qemu (0.15.0~rc2+dfsg-1) experimental; urgency=low + + * New upstream version. + * qemu-debootstrap: Return the exit status of debootstrap instead of + hard-coding a sucessful exit status. + * Build-depend on librbd-dev. + * Remove qemu-mipsel-debian-rootfs.patch, a variation was applied upstream. + * Remove pcnet-ipxe patch, applied upstream. + * Add Build-Depends on python. + * Rename pxe rom symlinks to match upstream rename. + + -- Vagrant Cascadian Sun, 07 Aug 2011 13:27:43 +0200 + +qemu (0.14.1+dfsg-3) unstable; urgency=low + + [ Aurelien Jarno ] + * Add patches/qemu-ifunc-ppc.patch and patches/qemu-ifunc-sparc.patch + to fix FTBFS on ppc and sparc. + + [ Vagrant Cascadian ] + * Apply patch to fix qemu-user-static mipsel emulation (Closes: #562887). + * Drop support for esd (Closes: #633390). Thanks to Adrian Bunk. + * Add dummy debian/rules build-indep/build-arch targets to resolve lintian + warnings and future policy requirements. + * Remove needless mention of "Author(s)" which triggers a lintian warning. + * Fix maintainer-script-without-set-e lintian checks. + * Fix hyphen-used-as-minus-sign lintian check for qemu-debootstrap manpage. + + -- Vagrant Cascadian Sat, 23 Jul 2011 10:18:37 +0200 + +qemu (0.14.1+dfsg-2) unstable; urgency=low + + * Add override for qemu-user-static binaries which embed needed libraries. + * Add qemu-debootstrap manpage. + * Add patch to fix typo in qemu-system-* (runnning -> running). + * Update to Standards-Version 3.9.2, no changes necessary. + + -- Vagrant Cascadian Sat, 02 Jul 2011 22:29:17 -0700 + +qemu (0.14.1+dfsg-1) unstable; urgency=low + + * New upstream version. + * Depend on ipxe instead of etherboot-qemu. + * Add pcnet-ipxe.patch from upstream to fix ipxe with pcnet nic. + * Add symlink for network boot with eepro100 cards (i82551, i82557b, + i82559er). + * Drop 01_rdb.patch, applied upstream. + * Add support for building on armhf architecture. + * Support debootstrap options that require arguments in qemu-debootstrap. + Thanks to Stefano Rivera for the patch. Closes: #605660. + + -- Vagrant Cascadian Fri, 01 Jul 2011 22:49:40 -0700 + +qemu (0.14.0+dfsg-5.1) unstable; urgency=low + + * Non-maintainer upload. + * Replace "librados1-dev" by "librados-dev" in Build-Dependencies. + + -- Mehdi Dogguy Fri, 29 Apr 2011 17:45:05 +0200 + +qemu (0.14.0+dfsg-5) unstable; urgency=low + + * Don't register qemu-mips(el) with binfmt on mips(el). Closes: + #618369. + + -- Aurelien Jarno Thu, 17 Mar 2011 20:13:27 +0100 + +qemu (0.14.0+dfsg-4) unstable; urgency=low + + * Reupload without automatically generated patch + debian-changes-0.14.0+dfsg-3. + + -- Aurelien Jarno Mon, 28 Feb 2011 15:10:04 +0100 + +qemu (0.14.0+dfsg-3) unstable; urgency=low + + [ Aurelien Jarno ] + * Depends on vgabios (>= 0.6c-3) and add symlinks for qxl, stdvga and + vmware bioses. Closes: #614252, #614169. + * Tighten build-depends on linux-libc-dev to (>= 2.6.34), to get + vhost-net support. + * Build-depends on xfslibs-dev in order to get TRIM support on XFS + filesystems. + * Build-depends on librados1-dev to get rdb support. Closes: #614150. + + -- Aurelien Jarno Mon, 28 Feb 2011 09:06:00 +0100 + +qemu (0.14.0+dfsg-2) unstable; urgency=low + + [ Aurelien Jarno ] + * Tighten dependencies on openbios-ppc, openbios-sparc and seabios to + the versions in upstream 0.14.0. + * patches/02_kfreebsd.patch: don't consider futimens/utimensat available + if it is a stub. + + -- Aurelien Jarno Sun, 20 Feb 2011 00:36:20 +0100 + +qemu (0.14.0+dfsg-1) unstable; urgency=low + + [ Vagrant Cascadian ] + * New upstream release candidate version. + * qemu-user-static: + - Drop binfmt support for emulating amd64 on i386, as it is broken and + including it interferes with environments capable of running amd64 + natively. Closes: #604712. + - Remove binfmt support for installed targets in postinst before installing + supported targets, to ensure no-longer-supported targets are actually + removed. + - Remove binfmt support for installed targets in prerm. + + [ Aurelien Jarno ] + * Fix configuration files directory. Closes: #600735. + * Enable AIO support. + + [ Vagrant Cascadian ] + * Update debian/copyright to refer to upstream git repositry and clarify + which binary blobs are removed to make the dfsg-free tarball. + * Refresh debian/patches/security/leftover.patch. + + -- Vagrant Cascadian Fri, 18 Feb 2011 21:07:01 -0800 + +qemu (0.13.0+dfsg-2) experimental; urgency=low + + * Fix Build-Depends to exclude kfreebsd-any wildcards where appropriate. + Thanks to Jon Severinsson. Closes: #592215 + + -- Vagrant Cascadian Sun, 24 Oct 2010 09:02:27 -0700 + +qemu (0.13.0+dfsg-1) experimental; urgency=low + + [ Aurelien Jarno ] + * mips/mipsel binfmt registration: also match EI_ABIVERSION=1, used by + OpenWRT. Closes: #591543. + * Build-depends on libattr1-dev to enable VirtFS (9p) support. Closes: + #592215. + * Use architecture wildcards instead of explicit architecture list. + + [ Vagrant Cascadian ] + * Switch to source format 3.0 (quilt). + * New upstream version. + * Drop 99_stable.diff, applied in new upstream version. + * debian/watch: update to properly handle upstream rc versions. + + -- Vagrant Cascadian Mon, 18 Oct 2010 10:22:44 -0700 + +qemu (0.13.0~rc0+dfsg-2) experimental; urgency=low + + [ Aurelien Jarno ] + * Add ia64 to the list of supported architectures. + * Bump Standards-Version to 3.9.1 (no changes). + * Update seabios, openbios-ppc and openbios-sparc dependencies. + * Add 99_stable.diff to update from the stable-0.13 branch: + - Fix sparc FTBFS. Closes: #591249. + * Add qemu-debootstrap from Loïc Minier in qemu-static. Closes: + #572952. + + -- Aurelien Jarno Tue, 03 Aug 2010 07:25:06 +0200 + +qemu (0.13.0~rc0+dfsg-1) experimental; urgency=low + + * New upstream release candidate version. + * Do not configure audio drivers for qemu-user and qemu-user-static targets. + * Remove patches: + - 05_bochs_vbe, applied upstream. + - 06_sh4, applied upstream. + - 03_support_pselect_in_linux_user_arm, upstream implemented a simpler + workaround. + * Add Build-Depends on texinfo. + * Drop libqemu-dev package. + + -- Vagrant Cascadian Thu, 29 Jul 2010 19:51:01 -0400 + +qemu (0.12.5+dfsg-3) unstable; urgency=medium + + * qemu-user-static: + - Drop binfmt support for emulating amd64 on i386, as it is broken and + including it interferes with environments capable of running amd64 + natively. Closes: #604712. + - Remove binfmt support for installed targets in postinst before installing + supported targets, to ensure no-longer-supported targets are actually + removed. + - Remove binfmt support for installed targets in prerm. + + -- Vagrant Cascadian Sun, 28 Nov 2010 15:57:11 -0800 + +qemu (0.12.5+dfsg-2) unstable; urgency=low + + * mips/mipsel binfmt registration: also match EI_ABIVERSION=1, used by + OpenWRT. Closes: #591543. + * Update 99_stable.diff from the stable branch: + - Fix windows XP boot with libvirt. Closes: bug#579166. + + -- Aurelien Jarno Tue, 17 Aug 2010 12:56:30 +0200 + +qemu (0.12.5+dfsg-1) unstable; urgency=low + + * New upstream stable version. + * qemu-system: don't suggests kqemu-source. Closes: bug#589217. + * qemu-keymaps: fix short description. + + -- Aurelien Jarno Fri, 23 Jul 2010 19:02:14 +0200 + +qemu (0.12.4+dfsg-4) unstable; urgency=high + + * Update debian/copyright. Closes: bug#588911. + * Update 99_stable.diff from the stable branch: + - Add documentation for the stdio signal option. Closes: bug#588514. + * Split out keymaps in the qemu-keymaps package. Closes: bug#559174. + * Bump Standards-Version to 3.9.0 (no changes). + + -- Aurelien Jarno Wed, 14 Jul 2010 15:13:04 +0200 + +qemu (0.12.4+dfsg-3) unstable; urgency=low + + * Update 99_stable.diff from the stable branch. + + -- Aurelien Jarno Wed, 16 Jun 2010 23:07:36 +0200 + +qemu (0.12.4+dfsg-2) unstable; urgency=low + + [ Vagrant Cascadian ] + * qemu-system: Depend on etherboot-qemu package for PXE roms. + Closes: #552406. + + [ Aurelien Jarno ] + * Add 99_stable.diff to update from the stable branch. + * Use --with-pkgversion to set the packaging version. + + -- Aurelien Jarno Wed, 02 Jun 2010 21:24:26 +0200 + +qemu (0.12.4+dfsg-1) unstable; urgency=low + + * New upstream stable version: + - remove debian/patches/01_redir_doc.patch + - remove debian/patches/04_cmd646.patch + - update debian/patches/06_sh4.diff + + -- Aurelien Jarno Fri, 07 May 2010 19:43:48 +0200 + +qemu (0.12.3+dfsg-4) unstable; urgency=low + + * Add 05_bochs_vbe.diff backported from uptream to support vgabios + 0.6c. + * Add 06_sh4.diff containing a few SH4 specific fixes backported from + upstream. + + -- Aurelien Jarno Fri, 09 Apr 2010 01:44:38 +0200 + +qemu (0.12.3+dfsg-3) unstable; urgency=low + + * Add symlink for seabios's multiboot.bin. + * Change configure-stamp depends to non-phony target $(QUILT_STAMPFN). + Closes: #574444. + * Fix a crash in cmd646 bmdma code that can be triggered by the guest. + Closes: #574539. + * Explain that KQEMU support has been removed in qemu-system.NEWS. + * Build-Conflicts with oss4-dev, as this package install a broken + header. Closes: #575320. + + -- Aurelien Jarno Sat, 03 Apr 2010 17:07:23 +0200 + +qemu (0.12.3+dfsg-2) unstable; urgency=low + + [ Aurelien Jarno ] + * Disable KVM support on PowerPC, as it needs at least 2.6.33 kernel + headers. + + [ Vagrant Cascadian ] + * Support pselect for linux-user arm target. Patch by Michael Casadevall. + * Add symlink for seabios's linuxboot.bin to fix -kernel option. Thanks to + Sami Liedes. Closes: #574174. + * qemu-system: Switch back to using versioned dependencies for vgabios, + bochsbios, openhackware, openbios-ppc and openbios-sparc rather than + recommends/conflicts, to ensure a proper upgrade path. Closes: #573397. + Reopens: #436094. + + -- Vagrant Cascadian Fri, 19 Mar 2010 09:31:29 -0700 + +qemu (0.12.3+dfsg-1) unstable; urgency=low + + [ Vagrant Cascadian ] + * New upstream version: + - Fix access to block devices on GNU/kFreeBSD. Closes: #558447. + - Correctly update clock when waking up from sleep. Closes: #414165. + - Slirp works with other network interfaces. Closes: #407702. + - Add the possibility to specify a host to bind to with the -redir + option. Closes: #366847. + - Fix cirrus graphics card with windows 98. Closes: #522124. + * Indicate repackaged upstream tarball by adding "+dfsg" to the version. + Closes: #388740. + * Remove second libgnutls-dev from build depends. + * Update debian/watch with current location of tarball releases. + * Drop binutils-gold patch, applied upstream. + * Switch from bochsbios to seabios. Update bios.bin symlink and + recommends/conflicts. + * Bump Standards-Version to 3.8.4 (no changes). + * Update my email address to vagrant@debian.org. + + [ Aurelien Jarno ] + * Create a kvm group in postinst and set the group of /dev/kvm to kvm. + Closes: #570544. + * Add mips and mipsel to the list of supported architectures. + * Add patches/01_redir_doc.patch to fix a mistake in the redirection + documentation. + * Add patches/02_kfreebsd.patch to use the legacy USB stack on + GNU/kFreeBSD. + * Force the depends from qemu on qemu-system, qemu-user and qemu-utils + to (>= {source:Version}). + * Update openbios related conflicts. + + -- Vagrant Cascadian Sun, 07 Mar 2010 09:20:43 -0800 + +qemu (0.11.1-2) unstable; urgency=low + + * Add versioned build-depends on etherboot. + * Add PXE boot support for virtio network adapters. + * Move qemu-make-debian-root to qemu-utils package, as it only produces disk + images not useable by qemu-user. Lower recommends on debootstrap to + suggests. Add Conflicts and Replaces on older versions of qemu-user. + * Register /usr/bin/qemu with the alternatives system. Closes: #413840. + * qemu: Add ${misc:Depends} so that debhelper can add dependencies if needed. + + -- Vagrant Cascadian Fri, 08 Jan 2010 09:26:11 -0800 + +qemu (0.11.1-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream version. + * Drop build-depends on libfreebsd-dev on GNU/kFreeBSD. + * qemu: suggests qemu-user-static. + * qemu-user-static: register QEMU with binfmt mecanism. Closes: + #306637. + * Bump conflicts on openbios-ppc to (<< 1.0+svn505-1). + * Add 01-binutils-gold.diff to fix FTBFS with binutils-gold. Closes: + #556301. + * Add sparc64 support. + * Use new roms location in etherboot package. + + [ Vagrant Cascadian ] + * qemu-utils, qemu-user, qemu-system: Set both Conflicts and Replaces for + older versions of qemu to ensure proper upgrade path. + * Add versioned build-dep on linux-libc-dev to ensure that KVM support is + enabled. + * qemu-system: Lower dependencies on vgabios, bochsbios, openhackware, + openbios-ppc and openbios-sparc to recommends. Conflict with versions that + are incompatible. Closes: #436094. + * qemu-utils: Tighten the versioned conflicts with kvm, as not all older + versions actually conflict. + * qemu-make-debian-root: Apply modified patch from Nicolas Boulenguez that + documents usage of -s, exits on error, and mentions that it is normally + run as root. Closes: #447034. + + -- Aurelien Jarno Sun, 27 Dec 2009 12:09:11 +0100 + +qemu (0.11.0-6) unstable; urgency=low + + * Update from stable-0.11 branch. + * qemu-utils: add Replaces: qemu (<< 0.11.0-2). Closes: #556627, + #556860. + + -- Aurelien Jarno Fri, 20 Nov 2009 08:24:32 +0100 + +qemu (0.11.0-5) unstable; urgency=low + + * Change the Conflicts: into Replaces: to handle the move of /etc/ifup + from one package to another correctly. Tighten the version. Closes: + #556627. + + -- Aurelien Jarno Wed, 18 Nov 2009 16:30:39 +0000 + +qemu (0.11.0-4) unstable; urgency=low + + [ Aurelien Jarno ] + * Update from stable-0.11 branch. + * Default to alsa before OSS. Closes: #451234. + + [ Vagrant Cascadian ] + * Updated Vcs-Git to a url more likely to work with debcommit. + + -- Aurelien Jarno Wed, 18 Nov 2009 16:26:17 +0100 + +qemu (0.11.0-3) unstable; urgency=low + + * qemu-system, qemu-user: fix conflicts version. Closes: #556627. + * qemu-utils: conflicts with kvm (<= 85+dfsg-4.1), as it also provides + qemu-io. + + -- Aurelien Jarno Tue, 17 Nov 2009 09:49:24 +0100 + +qemu (0.11.0-2) unstable; urgency=low + + * Update from stable-0.11 branch. + * Move qemu-user.1 and qemu-make-debian-root.8 to the qemu-user + package. + * Add build-depends on uuid-dev. + * Use a specific install file for qemu-utils on GNU/kFreeBSD. + * Call dh_install with -s. + + -- Aurelien Jarno Tue, 17 Nov 2009 09:11:29 +0100 + +qemu (0.11.0-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream version. + - Documents virtio NIC. Closes: #541182. + - Increase the maximum TCG op a target instruction op can expand to. + Closes: #530645, #542297. + - KVM is enabled by default. Closes: #520894. + - Fix CVE-2009-3616. Closes: #553589. + * Drop 65_kfreebsd.patch. + * Split the qemu package and use out of tree building. Based on a patch + from Vagrant Cascadian. Closes: #524774. + * Only recommends debootstrap for qemu-user and qemu-user static. Closes: + #543356. + * Remove /usr/share/qemu/proll.elf. Closes: bug#542247. + * Add build-depends on libcurl4-gnutls-dev, libgnutls-dev and libsasl2-dev + to enable new upstream features. + * Bump Standards-Version to 3.8.3 (no changes). + * Update Vcs-* fields to point to the new git repository. + * Add Vagrant Cascadian to uploaders, and set + DM-Upload-Allowed to yes. + + -- Aurelien Jarno Mon, 26 Oct 2009 10:17:57 +0000 + +qemu (0.10.50+git20090729-1) experimental; urgency=low + + [ Josh Triplett ] + * Remove myself from Uploaders. + + [ Riku Voipio ] + * new upstream RC version + * nuke all linux-user patches (applied upstream) + 06_exit_segfault + 12_signal_powerpc_support + 21_net_soopts + 30_syscall_ipc + 32_syscall_sysctl + 35_syscall_sockaddr + 48_signal_terminate + 55_unmux_socketcall + * nuke all other applied-upstream patches + 01_nostrip (better version upstream) + 07_i386_exec_name (can be reintroduced in debian/rules) + 50_linuxbios_isa_bios_ram (shouldn't be needed anymore) + 51_linuxbios_piix_ram_size (applied) + 56_dhcp (crap) + 60_ppc_ld (reintroduce if needed) + 64_ppc_asm_constraints (ditto) + 66_tls_ld.patch (ditto) + 81_compile_dtb.patch (applied upstream) + 82_qemu-img_decimal (ditto) + * move to git + * simplify build rules + * Correct my email address + + -- Riku Voipio Wed, 29 Jul 2009 13:28:05 +0300 + +qemu (0.10.6-1) unstable; urgency=low + + [ Josh Triplett ] + * Remove myself from Uploaders. + + [ Aurelien Jarno ] + * New upstream version. + * Bump Standards-Version to 3.8.2 (no changes). + * Update debian/watch (closes: bug#538781). + + -- Aurelien Jarno Fri, 31 Jul 2009 15:25:36 +0200 + +qemu (0.10.5-1) unstable; urgency=low + + * New upstream version. + + -- Aurelien Jarno Sun, 24 May 2009 16:15:35 +0200 + +qemu (0.10.4-1) unstable; urgency=low + + * New upstream version. + * debian/NEWS.Debian: new file, describing the cache policy options + (closes: bug#526832). + * debian/patches/70_versatile_memsize.patch: new patch to set a upper + limit on the memory size of the versatile boards (closes: + bug#527264). + + -- Aurelien Jarno Tue, 12 May 2009 18:31:29 +0200 + +qemu (0.10.3-1) unstable; urgency=low + + * New upstream version. + * Tighten dependency on bochsbios. + + -- Aurelien Jarno Sat, 02 May 2009 10:14:21 +0200 + +qemu (0.10.2-2) unstable; urgency=low + + * Add missing comma in build-depends (closes: bug#524207). + * Tighten dependency on vgabios. + + -- Aurelien Jarno Wed, 15 Apr 2009 22:30:43 +0200 + +qemu (0.10.2-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream stable release. + + -- Aurelien Jarno Tue, 07 Apr 2009 07:37:15 +0200 + +qemu (0.10.1-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream stable release: + - patches/80_stable-branch.patch: remove. + * debian/control: + - Remove depends on proll. + - Move depends on device-tree-compiler to build-depends. + - Bump Standards-Version to 3.8.1 (no changes). + * patches/82_qemu-img_decimal.patch: new patch from upstream to make + qemu-img accept sizes with decimal values (closes: bug#501400). + + -- Aurelien Jarno Sun, 22 Mar 2009 10:13:17 +0100 + +qemu (0.10.0-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream release: + - Fix fr-be keyboard mapping (closes: bug#514462). + - Fix stat64 structure on ppc-linux-user (closes: bug#470231). + - Add a chroot option (closes: bug#415996). + - Add evdev support (closes: bug#513210). + - Fix loop on symlinks in user mode (closes: bug#297572). + - Bump depends on openbios-sparc. + - Depends on openbios-ppc. + - Update 12_signal_powerpc_support.patch. + - Update 21_net_soopts.patch. + - Drop 44_socklen_t_check.patch (merged upstream). + - Drop 49_null_check.patch (merged upstream). + - Update 64_ppc_asm_constraints.patch. + - Drop security/CVE-2008-0928-fedora.patch (merged upstream). + - Drop security/CVE-2007-5730.patch (merged upstream). + * patches/80_stable-branch.patch: add patches from stable branch: + - Fix race condition between signal handler/execution loop (closes: + bug#474386, bug#501731). + * debian/copyright: update. + * Compile and install .dtb files: + - debian/control: build-depends on device-tree-compiler. + - debian/patches/81_compile_dtb.patch: new patch from upstream. + - debian/rules: compile and install bamboo.dtb and mpc8544.dtb. + + -- Aurelien Jarno Sat, 07 Mar 2009 06:20:34 +0100 + +qemu (0.9.1+svn20090104-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + * Disable security/CVE-2008-0928-fedora.patch, it still breaks qcow + format. + + -- Aurelien Jarno Sun, 04 Jan 2009 16:31:40 +0100 + +qemu (0.9.1+svn20081223-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - Fix CVE-2008-2382 + * Update patches/48_signal_terminate.patch. + * debian/rules: remove upstream flags from CFLAGS. + + -- Aurelien Jarno Tue, 23 Dec 2008 14:51:25 +0100 + +qemu (0.9.1+svn20081214-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - Fix jmp im on x86_64 when executing 32-bit code. Fix grub + installation (Closes: bug#467148). + + -- Aurelien Jarno Sun, 14 Dec 2008 23:26:04 +0100 + +qemu (0.9.1+svn20081207-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - Do not depend on gcc-3.4 anymore (Closes: bug#440425, bug#463066). + - Fix broken display introduced by CVE-2007-1320 (Closes: bug#422578). + * debian/control: remove build-dependency on gcc-3.4. + * debian/rules: remove code for dyngen targets. + * Split 90_security.patch into + - security/CVE-2007-5730.patch + - security/leftover.patch + * Replace 91_security.patch by security/CVE-2008-0928-fedora.patch taken + from fedora repository and enable it (Closes: #469649). + + [ Riku Voipio ] + * 2 patches gone, 19 to go: + - 10_signal_jobs.patch: drop, merged upstream + - 11_signal_sigaction.patch: drop, merged upstream + - series: update + + -- Aurelien Jarno Sun, 07 Dec 2008 19:40:09 +0100 + +qemu (0.9.1+svn20081128-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - Include documentation for network downscript option (Closes: + bug#506994). + - Drop 00_bios.patch and pass --disable-blobs instead. + - Update 12_signal_powerpc_support.patch. + + [ Riku Voipio ] + * Drop 31_syscalls.patch as it makes no sense using host uselib to + load target code into qemu's host memoryspace. + + -- Aurelien Jarno Sat, 29 Nov 2008 09:04:41 +0100 + +qemu (0.9.1+svn20081112-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - does not need a disk image anymore (Closes: bug#260935). + - 53_openbios_size.patch: drop (merged upstream). + - 90_security: update. + * debian/control: depend on openbios-sparc (>= 1.0~alpha2+20081109) + (Closes: bug#502411, bug#502414). + + -- Aurelien Jarno Sun, 09 Nov 2008 14:42:37 +0100 + +qemu (0.9.1+svn20081101-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - fix a heap overflow in Cirrus emulation (CVE-2008-4539). + - 50_linuxbios_isa_bios_ram.patch: update. + - 90_security.patch: update. + + -- Aurelien Jarno Sat, 01 Nov 2008 09:26:45 +0100 + +qemu (0.9.1+svn20081023-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + - 12_signal_powerpc_support.patch: update. + - 50_linuxbios_isa_bios_ram.patch: update. + + -- Aurelien Jarno Thu, 23 Oct 2008 21:34:26 +0200 + +qemu (0.9.1+svn20081016-1) experimental; urgency=low + + [ Aurelien Jarno ] + * New upstream snapshot. + * patches/31_syscalls.patch: remove parts merged upstream. + * debian/qemu-make-debian-root: + - Fix bug introduced when fixing bug#496394 (Closes: bug#502325). + + -- Aurelien Jarno Mon, 13 Oct 2008 23:11:15 +0200 + +qemu (0.9.1+svn20081012-1) experimental; urgency=low + + [ Riku Voipio ] + * Add a bunch of patches from scratchbox + - 44_socklen_t_check work better with badbehavin net apps + - 48_signal_terminate make qemu binary terminate on signals as expected + - 49_null_checks don't bother some syscalls when null/zero is passed + + [ Aurelien Jarno ] + * New upstream snapshot. + - alpha is now a TCG target. + - comma has been added to sendkey (closes: bug#414342). + * patches/31_syscalls.patch: remove parts merged upstream. + * patches/39_syscall_fadvise64.patch: remove (merged upstream). + * patches/90_security.patch: remove parts merged upstream. + * debian/control: build-depends on libbluetooth-dev. + + -- Aurelien Jarno Sun, 12 Oct 2008 18:46:54 +0200 + +qemu (0.9.1+svn20080905-1) experimental; urgency=low + + * New upstream snapshot. + - SH4 is now a TCG target. + * debian/watch: update URL location. + + -- Aurelien Jarno Tue, 02 Sep 2008 01:43:24 +0200 + +qemu (0.9.1+svn20080826-1) experimental; urgency=low + + * New upstream snapshot. + * debian/qemu-make-debian-root: + - Use mktemp instead of $$ to create temporary directories (Closes: + bug#496394). + * Ship a libqemu-dev package (Closes: bug#451618). + + -- Aurelien Jarno Tue, 26 Aug 2008 09:55:36 +0200 + +qemu (0.9.1+svn20080822-1) experimental; urgency=low + + * New upstream snapshot. + - Focus to monitor to ask password (Closes: bug#473240). + - TCG SPARC host support (Closes: bug#450817). + - Check KQEMU availability before allocating memory (Closes: bug#414566). + - Fix dead keys (Closes: bug#489594). + - Fix ES1370 emulation (Closes: bug#494462). + - New USB UHCI implemnation (Closes: bug#457651). + - Add debian/patches/00_bios.patch. + - Remove debian/patches/02_snapshot_use_tmpdir.patch (merged). + - Remove debian/patches/04_do_not_print_rtc_freq_if_ok.patch (merged). + - Remove patches/05_non-fatal_if_linux_hd_missing.patch (merged). + - Update debian/patches/07_i386_exec_name.patch + - Update debian/patches/12_signal_powerpc_support.patch + - Remove debian/patches/33_syscall_ppc_clone.patch (merged differently). + - Remove debian/patches/41_arm_fpa_sigfpe.patch (merged). + - Remove debian/patches/42_arm_tls.patch (merged differently). + - Update debian/patches/55_unmux_socketcall.patch. + - Remove debian/patches/63_sparc_build.patch (useless). + - Update debian/patches/65_kfreebsd.patch. + - Update debian/patches/66_tls_ld.patch. + - Remove debian/patches/70_manpage.patch (merged). + - Remove debian/patches/71_doc.patch (merged). + - Remove debian/patches/80_ui_curses.patch (merged). + - Remove debian/patches/81_mips32r2_fpu.patch (merged). + - Remove debian/patches/82_mips_abs.patch (merged). + - Remove debian/patches/83_usb-serial.patch (merged). + - Remove debian/patches/84_rtl8139.patch (merged). + - Remove debian/patches/85_vvfat.patch (merged). + - Remove debian/patches/86_df.patch (merged). + - Remove debian/patches/87_eoi.patch (merged). + - Remove debian/patches/88_dma.patch (merged). + - Remove debian/patches/89_braille.patch (merged). + - Remove debian/patches/92_no_shutdown.patch (merged). + - Remove debian/patches/93_tmpfs.patch (merged). + - Remove debian/patches/94_security.patch (merged). + * debian/README.source: new file. + * debian/patches/*: convert to patchlevel 1 (Closes: bug#484963). + * debian/control: + - Add build-depends on libesd0-dev. + - Add build-depends on libpulse-dev. + - Add build-depends on libvdeplug2-dev. + - Add build-depends on etherboot. + - Update list of supported targets (Closes: bug#488339). + - Suggests kqemu-source. + - Bump Standards-Version to 3.8.0. + * debian/links: + - Add missing manpage symlinks. + * debian/rules: + - Enable audio drivers depending on the system. + - Enable DYNGEN targets depending on the system. + - Install PXE bios from etherboot (Closes: bug#412010). + - Don't ignore make clean errors. + - Don't build DYNGEN targets on kfreebsd-amd64 (Closes: bug#494353). + * debian/patches/22_net_tuntap_stall.patch: remove (outdated). + + -- Aurelien Jarno Fri, 22 Aug 2008 01:00:54 +0200 + +qemu (0.9.1-5) unstable; urgency=high + + [ Guillem Jover ] + * Add Homepage field. + * Add Vcs-Browser and Vcs-Svn fields. + * Remove packaging repository information from debian/copyright. + * Add former package co-maintainers to debian/copyright. + * Serialize patch and configure steps in debian/rules to support parallel + builds, as we are patching configure. + * Remove myself from Uploaders. + + [ Aurelien Jarno ] + * debian/patches/70_manpage.patch: remove curses documentation, it is already + in debian/patches/80_ui_curses.patch (Closes: bug#477369). + * debian/patches/94_security.patch: add format= to drive options + (CVE-2008-2004). + + -- Aurelien Jarno Mon, 28 Apr 2008 21:54:12 +0200 + +qemu (0.9.1-4) unstable; urgency=high + + * debian/patches/52_ne2000_return.patch: drop, the patch is wrong. + * Backports from upstream: + - Typo in curses_keys.h + - Documentation for the -curses option + - Fix broken absoluteness check for cabs.d.*. + - USB-to-serial device. + - rtl8139: fix endianness on big endian targets + - restore rw support for vvfat + - x86-64: recompute DF after eflags has been modified when emulating + SYSCALL + - ignore reads to the EOI register + - IDE: Improve DMA transfers by increasing the buffer size + - Braille device support + - Add -no-shutdown option (Closes: #326406) + - Ask to use "mount -o remount" instead of "umount" and "mount" + /dev/shm (Closes: #476539). + * debian/qemu.doc-base: fix section. + + -- Aurelien Jarno Sun, 20 Apr 2008 23:29:42 +0200 + +qemu (0.9.1-3) unstable; urgency=low + + [ Aurelien Jarno ] + * debian/patches/42_arm_tls.patch: fix to get qemu-system-arm working + again. (Closes: #471722). + * debian/patches/56_dhcp.patch: fix DHCP server to correctly support + MS-Windows guests. (Closes: #471452). + + -- Aurelien Jarno Wed, 19 Mar 2008 18:58:29 +0100 + +qemu (0.9.1-2) unstable; urgency=low + + [ Aurelien Jarno ] + * debian/patches/80_ui_curses.patch: pull new patch from upstream CVS + (Closes: #442274). + * debian/patches/65_kfreebsd.patch: link with -lfreebsd. (Closes: + #465932). + * debian/patches/81_mips32r2_fpu.patch: patch pulled from upstream + to fix FPU issue on MIPS32R2. + * debian/patches/42_arm_tls.patch: reenable, mistakenly disabled in the + previous upload. (Closes: #469743). + * debian/rules: fix parallel building. (Closes: #469981). + * debian/patches/07_i386_exec_name.patch: install the i386 emulator as + qemu-system-i386, and change qemu into a link pointing to the i386 + version. + * debian/README.Debian: add notes about qemu-system-ppc and video.x + (Closes: #388735). + * debian/patches/70_manpage.patch: describe the -curses option. + (Closes: #433658). + * debian/patches/71_doc.patch: fix the monitor change option. (Closes: + #467106). + * debian/patches/35_syscall_sockaddr.patch: fix sockaddr (Closes: + #469351). + * debian/patches/43_arm_cpustate.patch: disable (Closes: #444171). + + -- Aurelien Jarno Mon, 17 Mar 2008 01:29:03 +0100 + +qemu (0.9.1-1) unstable; urgency=low + + [ Aurelien Jarno ] + * New upstream version. (Closes: #459801) + - Supports s390 host. (Closes: #441119) + - Fix PCI bar allocation. (Closes: #413315) + - Fix typo in keys name. (Closes: #426181) + - Fix segfault of qemu-i386 (Closes: #446868). + - debian/control: bump depends on openbios-sparc to + >= 1.0~alpha2+20080106. + - debian/patches/02_snapshot_use_tmpdir.patch: Refreshed. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Likewise. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/06_exit_segfault.patch: Likewise. + - debian/patches/10_signal_jobs.patch: Likewise. + - debian/patches/11_signal_sigaction.patch: Likewise. + - debian/patches/12_signal_powerpc_support.patch: Likewise. + - debian/patches/21_net_soopts.patch: Likewise. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/32_syscall_sysctl.patch: Likewise. + - debian/patches/33_syscall_ppc_clone.patch: Likewise. + - debian/patches/35_syscall_sockaddr.patch: Likewise. + - debian/patches/41_arm_fpa_sigfpe.patch: Likewise. + - debian/patches/42_arm_tls.patch: Likewise. + - debian/patches/50_linuxbios_isa_bios_ram.patch: Likewise + - debian/patches/51_linuxbios_piix_ram_size.patch: Likewise + - debian/patches/61_safe_64bit_int.patch: Removed, merged upstream. + - debian/patches/63_sparc_build.patch: Refreshed. + - debian/patches/80_ui_curses.patch: Likewise. + * debian/patches/90_security.patch: fix 64-bit overflow. (Closes: + #425634) + * debian/qemu-make-debian-root: add a -s option to create sparse + image. (Closes: #322325) + * debian/control: bump depends on bochsbios to >= 2.3.5-1. Use + BIOS-qemu-latest instead of BIOS-bochs-latest. (Closes: #402289, + #442822) + * debian/rules: build the non-dyngen part with default gcc. + * debian/rules: support DEB_BUILD_OPTIONS="parallel=n". + * debian/patches/70_manpage.patch: describe the arguments of the + -usbdevice option in the manpage. (Closes: #443801) + * debian/control: now using Standards-Version 3.7.3 (no changes needed). + * debian/control: build-depends on libgnutls-dev to enable TLS support + in VNC. + * debian/patches/01_nostrip.patch: don't strip binaries during make + install. (Closes: #437866) + * debian/patches/53_openbios_size.patch: increase maximum prom size to + support latest openbios. + + -- Aurelien Jarno Mon, 28 Jan 2008 21:24:14 +0100 + +qemu (0.9.0+20070816-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream snapshot. + - Fix hang on ARM during Etch installation. (Closes: #430164) + - Fix data corruption with qcow 2. (Closes: #440296) + - Fix errors with raw images > 4 GiB. (Closes: #425634) + - debian/patches/01_typo_qemu-img.patch: Removed, merged upstream. + - debian/patches/03_machines_list_no_error.patch: Likewise. + - debian/patches/36_syscall_prctl.patch: Likewise. + - debian/patches/37_syscall_mount.patch: Likewise. + - debian/patches/38_syscall_semctl.patch: Likewise. + - debian/patches/40_sparc_fp_to_int.patch: Likewise. + - debian/patches/44_arm_eabi_built_on_64bit_arches.patch: Likewise. + - debian/patches/62_linux_boot_nasm.patch: Likewise. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Synced. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/35_syscall_sockaddr.patch: Likewise. + - debian/patches/42_arm_tls.patch: Likewise. + - debian/patches/43_arm_cpustate.patch: Likewise. + - debian/patches/51_linuxbios_piix_ram_size.patch: Likewise. + - debian/patches/55_unmux_socketcall.patch: Likewise. + - debian/patches/60_ppc_ld.patch: Likewise. + - debian/patches/65_kfreebsd.patch: Likewise. + - debian/patches/80_ui_curses.patch: Likewise. + - debian/patches/90_security.patch: Likewise. + * Remove Elrond and Guilherme de S. Pastore from Uploaders, with their + permission, and add Aurelien Jarno and Riku Voipio. + * Remove Tag field, this is better maintained outside of the package. + * Add openbios-sparc64 to qemu_bios_files in debian/rules. + + [ Aurelien Jarno ] + * Fix FTBFS on amd64. (Closes: #434296) + - Drop debian/patches/34_syscalls_types.patch + * debian/control: + - Suggest samba. (Closes: #430368) + * Add OpenBIOS for sparc. (Closes: #407076) + - debian/control: depends on openbios-sparc. + - debian/links: provide symlinks in /usr/share/qemu. + + -- Guillem Jover Tue, 04 Sep 2007 04:04:47 +0300 + +qemu (0.9.0-2) unstable; urgency=high + + [ Guillem Jover ] + * Fix several security issues. (Closes: #424070) + Thanks to Tavis Ormandy . + - Cirrus LGD-54XX "bitblt" heap overflow. CVE-2007-1320 + - NE2000 "mtu" heap overflow. + - QEMU "net socket" heap overflow. + - QEMU NE2000 "receive" integer signedness error. CVE-2007-1321 + - Infinite loop in the emulated SB16 device. + - Unprivileged "aam" instruction does not correctly handle the + undocumented divisor operand. CVE-2007-1322 + - Unprivileged "icebp" instruction will halt emulation. CVE-2007-1322 + - debian/patches/90_security.patch: New file. + * Enable adlib audio emulation. (Closes: #419170) + * Fix structure padding for target_eabi_flock64 when built for a 64 bit + architecture. (Closes: #414799) + Thanks to Stuart Anderson . + - debian/patches/44_arm_eabi_built_on_64bit_arches.patch: New file. + * Fix qemu to be able to use LinuxBios. (Closes: #412212) + Thanks to Ed Swierk . + - debian/patches/50_linuxbios_isa_bios_ram.patch: New file. + - 51_linuxbios_piix_ram_size.patch: Likewise. + * Fix segfault when booting a Linux kernel w/o a disk image, by exiting but + clarifying the message, as to use '/dev/null'. (Closes: #409817, #411780) + Thanks to Robert Millan . + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Updated. + * Fix segfault by using addrlen instead of target_addrlen in + do_getpeername()/do_getsockname(). (Closes: #411910) + Thanks to Stuart Anderson . + - debian/patches/35_syscall_sockaddr.patch: Updated. + * Fix semctl() for 32 bit targets on 64 bit hosts. (Closes: #414809) + Thanks to Stuart Anderson . + - debian/patches/38_syscall_semctl.patch: New file. + * Remove Elrond from Uploaders with consent, always welcome to join + back anytime. + + -- Guillem Jover Wed, 16 May 2007 08:08:31 +0300 + +qemu (0.9.0-1) experimental; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #409989) + - Support for relative paths in backing files for disk images. + (Closes: #390446) + - debian/patches/01_doc_typos.patch: Removed, merged upstream. + - debian/patches/38_syscall_arm_statfs64.patch: Likewise. + - debian/patches/51_serial_small_divider.patch: Likewise. + - debian/patches/67_ppc_ftbfs.patch: Likewise. + - debian/patches/21_net_soopts.patch: Synced. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/35_syscall_sockaddr.patch: Likewise. + - debian/patches/39_syscall_fadvise64.patch: Likewise. + - debian/patches/42_arm_tls.patch: Likewise. + - debian/patches/55_unmux_socketcall.patch: Likewise. + - debian/patches/80_ui_curses.patch: Likewise. + * Update the copyright information. + * The ACPI initialization code has been moved to bochsbios. + - debian/patches/acpi-dsdt.hex: Removed. + - debian/rules: Do not install acpi-dsdt.hex. + * Add more files to the list of roms removed from the tarball needed to + be touched so that upstream 'make install' does not fail. + * Added armeb and armel to Architecture fields and libgpmg1-dev + Build-Depends. + * Recommend vde2 instead of the transitional vde package. (Closes: #407251) + * Fix typo in qemu-img output. (Closes: #408542) + - debian/patches/01_typo_qemu-img.patch: New file. + Thanks to Adam Buchbinder . + * Symlink qemu-user(1) to qemu-m68k(1). + * Reduce redundancy in qemu-user(1) synopsis. + * Fix rounding in sparc floating point to integer conversions. + - debian/patches/40_sparc_fp_to_int.patch: New file. + Thanks to Aurelien Jarno . + + -- Guillem Jover Thu, 8 Feb 2007 01:01:29 +0200 + +qemu (0.8.2-5) unstable; urgency=low + + [ Guillem Jover ] + * Added a missing part to the ARM NPTL support patch, initially lost. + - debian/patches/42_arm_tls.patch: Updated. + + -- Guillem Jover Tue, 16 Jan 2007 11:44:00 +0200 + +qemu (0.8.2-4) unstable; urgency=medium + + [ Guillem Jover ] + * Disable using iasl for now until it's ported to big-endian systems and + include a locally built acpi-dsdt.hex file. + + -- Guillem Jover Sun, 3 Dec 2006 21:10:23 +0200 + +qemu (0.8.2-3) unstable; urgency=low + + [ Guillem Jover ] + * Hopefully really fix powerpc FTBFS. + + -- Guillem Jover Sun, 5 Nov 2006 17:09:53 +0200 + +qemu (0.8.2-2) unstable; urgency=low + + [ Guillem Jover ] + * Update Tag field to match new debtags vocabulary. + * Clean properly. (Closes: #390166) + - Remove the acpi generated files and the docs. + - Revert the docs regeneration forcing logic. + Thanks to Anderson Lizardo . + * On install use DESTDIR instead of specifying all paths. (Closes: #396139) + Thanks to Anderson Lizardo . + * Port to GNU/kFreeBSD. (Closes: #327622) + - Disable ALSA on non-linux systems. + - Add a Build-Depends on libfreebsd-dev on kfreebsd systems. + - Add kfreebsd-i386 and kfreebsd-amd64 to the Architecture field. + - debian/patches/65_kfreebsd.patch: New file. + Thanks Petr Salinger . + * In qemu-make-debian-root do not explicitely install in aptitude and + libsigc++-1.2-5c102, they are pulled now by default. And do not remove + aptitude afterwards. (Closes: #392481) + Thanks to Ted Percival . + * Add experimental ncurses ui support. (Closes: #369462) + - debian/patches/80_ui_curses.patch: New file. + Thanks to Andrzej Zaborowski . + * Add SO_PEERCRED and SO_SNDTIMEO support, and fix accept syscall when + being passed NULL pointers. + - debian/patches/21_net_sockopts.patch: Renamed to ... + - debian/patches/21_net_soopts.patch: ... here. Modify. + Thanks to Pablo Virolainen. + * Add a fadvise64 syscall stub. + - debian/patches/39_syscall_fadvise64.patch: New file. + Thanks to Pablo Virolainen. + * Add EABI unmuxed socket syscalls. + - debian/patches/55_unmux_socketcall.patch: New file. + Thanks to Riku Voipio. + * Add TLS sections to the ARM and x86 linker scripts so that qemu user + emulators can be linked statically. + - debian/patches/66_tls_ld.patch: New file. + * Move the documentation of the binary blob removals from the original + upstream tarball from README.Debian to debian/copyright. + * Reword the emphasis on "FAST!" from the package description. + * Fix FTBFS on powerpc by adding the missing fp_status variable to the + int32_to_float32 function calls. + - debian/patches/67_ppc_ftbfs.patch: New file. + + -- Guillem Jover Sun, 5 Nov 2006 08:48:27 +0200 + +qemu (0.8.2-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #379461, #385029, #388810) + - Add ACPI BIOS emulation support. (Closes: #372533) + - Fix mouse invisible wall when using Windows XP. (Closes: #384666) + - debian/patches/01_doc_typos.patch: Sync. + - debian/patches/03_machines_list_no_error.patch: Likewise. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Likewise. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/06_exit_segfault.patch: Likewise. + - debian/patches/12_signal_powerpc_support.patch: Likewise. + - debian/patches/21_net_sockopt.patch: Likewise. + - debian/patches/22_net_tuntap_stall.patch: Likewise. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/32_syscall_sysctl.patch: Likewise. + - debian/patches/33_syscall_ppc_clone.patch: Likewise. + - debian/patches/35_syscall_sockaddr.patch: Likewise. + - debian/patches/36_syscall_prctl.patch: Likewise. + - debian/patches/37_syscall_mount.patch: Likewise. + - debian/patches/41_arm_fpa_sigfpe.patch: Likewise. + - debian/patches/42_arm_tls.patch: Likewise. + - debian/patches/61_safe_64bit_int.patch: Likewise. + - debian/patches/63_sparc_build.patch: Likewise. + - debian/patches/50_missing_keycodes.patch: Removed, integrated upstream. + * Switch to quilt: + - debian/control: Add quilt (>= 0.40) to Build-Depends. + - debian/patches/series: New file. + - debian/patch.mk: Removed. + - debian/rules: Include '/usr/share/quilt/quilt.make' instead of + 'debian/patch.mk'. + * Build the ACPI Source Language files with iasl. + * Add a Tag field to the binary package, using data from debtags. + * Add 2006 to the debian/copyright years. + * Add a Recommends on vde. (Closes: #386780) + * Fix spelling error in package description (peripherials -> peripherals). + (Closes: #388700) + Thanks to Rakesh 'arky' Ambati . + * Fix ne2000_can_receive return code to 0 when the command is STOP. + (Closes: #386209) + - debian/patches/52_ne2000_return.patch: New file. + Thanks to Samuel Thibault . + * Document the binary blob removals from the original upstream tarball in + README.Debian. (Closes: #388740) + + -- Guillem Jover Mon, 25 Sep 2006 04:16:25 +0300 + +qemu (0.8.1-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #366955, #366637) + - debian/patches/01_doc_typos.patch: Sync. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Likewise. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/12_signal_powerpc_support.patch: Likewise. + - debian/patches/21_net_sockopt.patch: Likewise. + - debian/patches/22_net_tuntap_stall.patch: Likewise. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/32_syscall_sysctl.patch: Likewise. + - debian/patches/33_syscall_ppc_clone.patch: Likewise. + - debian/patches/35_syscall_sockaddr.patch: Likewise. + - debian/patches/36_syscall_prctl.patch: Likewise. + - debian/patches/37_syscall_mount.patch: Likewise. + - debian/patches/41_arm_fpa_sigfpe.patch: Likewise. + - debian/patches/42_arm_tls.patch: Likewise. + - debian/patches/43_arm_cpustate.patch: Likewise. + - debian/patches/50_missing_keycodes.patch: Likewise. + - debian/patches/51_serial_small_divider.patch: Likewise. + - debian/patches/61_safe_64bit_int.patch: Likewise. + - debian/patches/63_sparc_build.patch: Likewise. + - debian/patches/40_arm_nwfpe_cpsr.patch: Removed, integrated upstream. + * Make the patch system apply the patch on the first run. + - debian/patches/64_ppc_asm_constraints.patch: Add DPATCHLEVEL. + * Document how to use the images created with qemu-make-debian-root in the + man page. Thanks to Jacobo . (Closes: #343450) + * Add support for the -snapshot option to use the TMPDIR evironment + variable. (Closes: #353880) + - debian/patches/02_snapshot_use_tmpdir.patch: New file. + * Do not exit with an error when using '-M ?'. (Closes: #365209) + - debian/patches/03_machines_list_no_error.patch: New file. + * Added symlink for system-mipsel emulator man page. + * Build and clean the pc-bios directory. + * Avoid segfaulting by using _exit(2) instead of exit(3) in qemu user + emulators. (Closes: #338289) + - debian/patches/06_exit_segfault.patch: New file. + * Enable ALSA audio support and add libasound2-dev to the Build-Depends. + * Now using Standards-Version 3.7.2 (no changes needed). + + -- Guillem Jover Sun, 28 May 2006 20:51:10 +0300 + +qemu (0.8.0-3) unstable; urgency=low + + [ Josh Triplett ] + * Fix FTBFS on PowerPC caused by asm constraint problem. (Closes: #361727) + - debian/patches/64_ppc_asm_constraints.patch. + + [ Guillem Jover ] + * Clamp addrlen from host to target when using AF_UNIX. This fixes + socket problems when using EABI. + - debian/patches/35_syscall_sockaddr.patch: New file. + * Fix floating point comparison on ARM NWFPE, due to glue code missmatch. + (Closes: #356287) + - debian/patches/40_arm_nwfpe_cpsr.patch: New file. + - debian/patches/40_fpu_arm_sigfpe.patch: Rename to ... + - debian/patches/41_arm_fpa_sigfpe.patch: ... this. Resync. + Thanks to Ulrich Hecht. + * Fix POSIX threads creation on ARM hanging when initializing the cpu + structure being it cyclic. + - debian/patches/43_arm_cpustate.patch: New file. + * Add TLS support for ARM. Stolen from Scratchbox. + - debian/patches/42_arm_tls.patch: New file. + * Fix sysctl endian problem. + - debian/patches/32_syscall_sysctl.patch: Update. + Thanks to Timo Savola . + * Remove now default '--enable-slirp' build option. (Closes: #356284) + Thanks to Anderson Lizardo . + * Remove unused sharedir to 'make install'. (Closes: #356418) + Thanks to Anderson Lizardo . + * Fix package not cleaning properly. (Closes: #356279) + Thanks to Anderson Lizardo for the initial + patch. + * Add needed syscalls to make debootstrap work. (Closes: #356291) + - debian/patches/36_syscall_prctl.patch: New file. + - debian/patches/37_syscall_mount.patch: Likewise. + - debian/patches/38_syscall_arm_statfs64.patch: Likewise. + Thanks to Anderson Lizardo . + * Remove obsolete Build-Dependency xlibs-dev. + + -- Guillem Jover Thu, 13 Apr 2006 11:53:00 +0300 + +qemu (0.8.0-2) unstable; urgency=low + + [ Guillem Jover ] + * Switch away from cdbs to plain debhelper. + * Upgrade to debhelper compat level 5. + * Allow overriding CC compiler variable. (Closes: #345772) + * Do not redefine 64 bit types on 64 bit arches. + - debian/patches/61_safe_64bit_int.patch: New file. + * Allow linux_boot.bin to be built on any arch by switching to nasm, + and Build-Depending on it. + - debian/patches/62_linux_boot_nasm.patch: New file. + * The serial hw driver uses a small divider that gets zeroed when shifting + bits to the right. (Closes: #276276, #348098) + - debian/patches/51_serial_small_divider.patch: New file. + Thanks to Samuel Thibault . + * Escaped hyphens in qemu-user manpage, use italics for filenames and + parameters and bold for options. + * Partial build failure fix for Sparc. (Bugs: #317145, #336970) + Thanks to Jurij Smakov . + + -- Guillem Jover Mon, 20 Feb 2006 09:17:46 +0200 + +qemu (0.8.0-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #344339) + - Added support for Virtual FAT. (Closes: #313123) + - Emulate repeated keystrokes when holding a key. (Closes: #298864) + - debian/patches/01_doc_typos.patch: Sync. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Likewise. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/12_signal_powerpc_support.patch: Likewise. + - debian/patches/21_net_sockopt.patch: Likewise. + - debian/patches/22_net_tuntap_stall.patch: Likewise. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/32_syscall_sysctl.patch: Likewise. + - debian/patches/33_syscall_ppc_clone.patch: Likewise. + - debian/patches/40_fpu_arm_sigfpe.patch: Likewise. + - debian/patches/50_missing_keycodes.patch: Likewise. + * Added mips and mipsel to the lintian overrides for the user emulators + being shlib-with-non-pic-code. + * Added symlinks for mips, mipsel and system-arm emulator manpages. + + -- Guillem Jover Fri, 30 Dec 2005 05:44:53 +0200 + +qemu (0.7.2-2) unstable; urgency=low + + [ Josh Triplett ] + * Add support for signal handling on PowerPC. (Closes: #335509) + - debian/patches/12_signal_powerpc_support.patch: New file. + + [ Guillem Jover ] + * Add Josh Triplett to Uploaders and packaging team. + * Fix PowerPC build failure by reintroducing the ppc linker script and + adding the missing _SDA_BASE_ and _SDA2_BASE_ symbols. (Closes: #336983) + * Remove invalid patch making X11 fail at runtime. + - debian/patches/20_net_socket.patch: Remove. + - debian/patches/32_syscall_sysctl.patch: Sync. + Thanks to Daniel Gimpelevich . + * Avoid the patch system to try until it applies. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Added patch level. + - debian/patches/12_signal_powerpc_support.patch: Likewise. + + -- Guillem Jover Wed, 21 Dec 2005 22:11:34 +0200 + +qemu (0.7.2-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #321232, #327168) + - debian/patches/12_signal_silent.patch: Integrated upstream, remove. + - debian/patches/50_ppc_ldscript.patch: Likewise. + - debian/patches/33_syscall_truncate64.patch: Likewise. + - debian/patches/01_doc_typos.patch: Resync with upstream. + - debian/patches/04_do_not_print_rtc_freq_if_ok.patch: Likewise. + - debian/patches/05_non-fatal_if_linux_hd_missing.patch: Likewise. + - debian/patches/10_signal_jobs.patch: Likewise. + - debian/patches/11_signal_sigaction.patch: Likewise. + - debian/patches/20_net_socket.patch: Likewise. + - debian/patches/21_net_sockopt.patch: Likewise. + - debian/patches/22_net_tuntap_stall.patch: Likewise. + - debian/patches/30_syscall_ipc.patch: Likewise. + - debian/patches/31_syscalls.patch: Likewise. + - debian/patches/32_syscall_sysctl.patch: Likewise. + - debian/patches/40_fpu_arm_sigfpe.patch: Likewise. + * Repackaged upstream source to deal with binaries w/o sources. + - pc-bios/video.x: New file removed. + * Create a new qemu-user(1) manpage and link all user emulator manpages + to it. (Closes: #335163) + * Add missing '-' and '=' keycodes for sendkey command. + - debian/patches/50_missing_keycodes.patch: New file. (Closes: #334071) + Thanks to Robert Millan . + * Add manpage link for qemu-system-mips. + * Make sysctl byte-swap the name values. + - debian/patches/32_syscall_sysctl.patch: Merge patch. (Closes: #334458) + Thanks to Josh Triplett . + * Change documentation menu section to "Apps/Emulators". (Closes: #335062) + Thanks to Frans Pop . + * On PowerPC, do not zero registers r7-r31 in do_fork and zero register r3. + Fixing segfaults on programs using the clone syscall. + - debian/patches/33_syscall_ppc_clone.patch: New file. (Closes: #335159) + Thanks to Josh Triplett + and Paul Brook . + * Tighten vgabios and bochsbios versioned Depends. + * Add video.x to the list of roms to touch to make qemu Makefile happy. + * Add lintian overrides for the user emulators being shlib-with-non-pic-code. + * Wrap lines in debian/control fields (knowingly breaking policy). + + [ Guilherme de S. Pastore ] + * debian/control: + - Updated my e-mail address. + * debian/copyright: + - Dropped André from team members list, not a single contribution ever. + + -- Guillem Jover Mon, 31 Oct 2005 05:01:45 +0200 + +qemu (0.7.0-4) unstable; urgency=low + + [ Guillem Jover ] + * Rebuild source with locally deborked dpkg-source. (Closes: #321019) + * Added the location of the Subversion repo used for the packages and + fixed the upstream URL in debian/copyright. + * Lower case title header in qemu-make-debian-root man page. + * Use dd instead of cat to generate the qemu debian root image. + (Closes: #315952) + + -- Guillem Jover Wed, 3 Aug 2005 05:53:30 +0300 + +qemu (0.7.0-3) unstable; urgency=low + + [ Guillem Jover ] + * Update watch file to version 3, use perlre and new upstream site. + * Now using Standards-Version 3.6.2 (no changes needed). + * Fix TUN/TAP network interface stalling the connection. (Closes: #290569) + Thanks to Vitaly Belostotsky . + * Link against librt, needed by the new clock_gettime syscall. + - debian/patches/31_syscalls.patch: Update. (Closes: #315388) + Thanks to Timo Savola for noticing. + * Force Build-Dependency on binutils >= 2.16-1 needed by the amd64 and + powerpc linker scripts. (Closes: #262655) + * Force usage of gcc-3.4. (Closes: #319527) + * Add missing Build-Dependency on zlib1g-dev. + Thanks to Reinhard Tartler . + * Include in syscall.c to avoid the broken headers in + linux-kernel-headers 2.6.12. + - debian/patches/34_syscalls_types.patch: New file. + Thanks to Octavian Cerna . + * Fix powerpc linker script. + - debian/patches/50_ppc_ldscript.patch: New file. + Thanks to Octavian Cerna . + + -- Guillem Jover Mon, 1 Aug 2005 02:48:09 +0300 + +qemu (0.7.0-2) unstable; urgency=low + + [ Guillem Jover ] + * Add alpha, sparc, arm and s390 to Architectures (and to the + libgpmg1-dev Build-Depends). + + * Forward SIGSTOP and SIGCONT sent to QEMU to the emulated application. + - debian/patches/10_signal_jobs.patch: New file. + Thanks to Ulrich Hecht. + * Return EINVAL on emulated sigaction when given invalid signal + parameters SIGKILL and SIGSTOP. + - debian/patches/11_signal_sigaction.patch: New fle. + Thanks to Valtteri Rahkonen. + * Do not print messsages for uncaught signal, thus fixing the case + were some applications want to kill their siblings. + - debian/patches/12_signal_silent.patch: New file. + Thanks to Valtteri Rahkonen + + * Fix Unix sockets by handling correctly AF_UNIX socket address + structure length. + - debian/patches/20_net_socket.patch: New file. + Thanks to Timo Savola. + * Implement SO_LINGER, SO_RCVTIMEO, SO_SNDTIMEO, SO_PEERNAME and + SO_PEERCRED getsockoptions. + - debian/patches/21_net_sockopt.patch: New file. + Thanks to Valtteri Rahkonen. + + * Implement SysV IPC message and semaphore syscalls. + - debian/patches/30_syscall_ipc.patch: New file. + Thanks to Valtteri Rahkonen. + * Implement acct, umount2, uselib, swapon, syslog, ftruncate64, + mincore, madvise, readahead and clock_gettime syscalls. + - debian/patches/31_syscalls.patch: New file. + Thanks to Ulrich Hecht. + * Implement sysctl CTL_KERN/KERN_VERSION + - debian/patches/32_syscall_sysctl.patch: New file. + Thanks to Timo Savola. + * Implement truncate64 syscall. + - debian/patches/33_syscall_truncate64.patch: New file. + Thanks to Valtteri Rahkonen. + + * Implement ARM floating point exeption emulation. + - debian/patches/40_fpu_arm_sigfpe.patch: New file. + Thanks to Ulrich Hecht. + + -- Guillem Jover Sun, 19 Jun 2005 15:05:37 +0300 + +qemu (0.7.0-1) experimental; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #308459, #308494) + * Do not require a disk image when booting a Linux kernel. (Closes: #260935) + Thanks to Jonas Smedegaard . + + [ Guilherme de S. Pastore ] + * Rewrote README.Debian for more clarity + * Add support for amd64 as a host architecture. (Closes: #262655) + - Add build-depend on libgpmg1-dev on amd64. + * Fixed qemu-make-debian-root so that it shows the name by which + it was called on the usage notice, not "%s". (Closes: #303507) + Thanks to Micah Anderson . + + [ Elrond ] + * Clean up more files, so they don't end up in the final .diff.gz + * Switch to external proll and openhackware: + - Instead of patching qemu's Makefile, trick it by giving it empty + files to install and remove them straight after install. + - Don't ship the roms in debian/roms any more! + - Instead add more symlinks. + - Update Depends: apropiately. + + -- Guillem Jover Fri, 27 May 2005 02:06:20 +0300 + +qemu (0.6.1+20050407-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream snapshot. + - Fix -user-net. (Closes: #295019) + - Fix win2k and winxp image booting. (Closes: #285170, #292707) + - Fix installation of outdated documentation. (Closes: #286931) + - Provide qemu-img instead of qemu-mkcow. (Closes: #290713) + - Remove debian/patches/05_fix_openpic_timer_test.patch, integrated + upstream. + - Remove debian/patches/02_selectable_sdl_keyboard.patch, superseded + by new keyboard implementation. (Closes: #284510, #299432) + - Remove debian/patches/01_mkcow_section_and_hyphens.patch. + - Conditionalize qemu -g option for some architectures. (Closes: #298988) + * Added new copyright year to debian/copyright. + * Added initial qemu-make-debian-root man page. (Closes: #286932) + * Fixed typos in qemu documentation. (Closes: #301933) + Thanks to A Costa . + * Added Elrond to Uploaders and packaging team. + * Use the default target list: + - Do not build qemu-fast anymore as it is deprecated upstream anyway. + (Closes: #278602, #281510) + - New targets armeb and system-x86_64. + * Updated ROM images under debian/roms/: + - OpenHackWare 0.4. + - Proll 18 with qemu specific patches. + * Remove uudecoded files from pc-bios/ on clean. + * Fix qemu-make-debian-root to behave correctly even if the needed + Recommends are not installed. + + [ Guilherme de S. Pastore ] + * Create a doc-base entry for the package (Closes: #290669) + * debian/control: + - Add debootstrap to the 'Recommends: ' line, as needed by + qemu-make-debian-root (Closes: #302848) + - Moved sharutils from dependency to recommendation, as it is only + needed by qemu-make-debian-root + * debian/docs: + - Do not include README.distrib in the binary package (Closes: #302853) + + [ Elrond ] + * Replace "libgpmg1-dev | not+linux-gnu" by "libgpmg1-dev [i386 powerpc]" + in Build-Depends. qemu should not need to build-depend on it anyway, the + real problem is described in Bug#267174. When it is solved, we can + remove our dependency. Until then please remember to add any arch, which + we will build on and that has gpm. This change hopefully calms: + + * Add versions to the dependencies on bochsbios and vgabios + (Closes: #288997): + - vgabios: Use the current version from testing/unstable (0.4c+20041014-1), + according to Frans Pop , this fixed those + "blank screen" problems. + - bochsbios: Use the current version from unstable (2.1.1+20041109-3), as + Guillem Jover fixed the networking in that version. + + -- Guillem Jover Thu, 7 Apr 2005 01:26:01 +0300 + +qemu (0.6.1-1) unstable; urgency=low + + [ Guillem Jover ] + * New upstream release. (Closes: #281626) + - Booting from drive b is not supported anymore. (Closes: #275679) + - Fix ne2k network interface that was not working in some situations. + (Closes: #281862) + - Remove debian/patches/06_build_gcc3.4.patch, fixed upstream. + - Remove debian/patches/04_lfs.patch, fixed upstream. + - Remove debian/patches/02_fix_powerpc_FTBFS.patch, fixed upstream. + - Remove debian/patches/00_escape_manpage_hyphens.patch, not needed. + - Sync debian/patches/03_use_external_bios.patch. + * Include uuencoded source for proll 18, some build fixes and its binary + proll.bin on debian/roms/. + * Suggests sudo to be used by the qemu-ifup script. + Thanks to Elrond . + * Make sudo in qemu-ifup explain what the password is for. (Closes: #281380) + * Add an option to select the method to convert keyevent to keycode + in the SDL keyboard handling code. Added support for Right Shift in the + generic handler. (Closes: #282658) + Thanks to Elrond . + * Do not set RTC frequency to 1024 or warn about this if it has already + the correct value. (Closes: #281403) + * Enabled sparc-softmmu support. + + -- Guillem Jover Sat, 27 Nov 2004 23:23:49 +0100 + +qemu (0.6.0.dfsg.2-1) unstable; urgency=low + + [ Guillem Jover ] + * Repackaged upstream source to remove external included files. + - pc-bios/ppc-rom.bin: Removed. + - pc-bios/OpenHackWare_0.3.tar.bz2: Likewise. + - pc-bios/vgabios.bin: Likewise. + - pc-bios/vgabios-cirrus.bin: Likewise. + - pc-bios/vgabios-cvs-2004-06-17.tgz: Likewise. + * Include uuencoded source for OpenHackWare 0.3.1 and its binary + ppc-rom.bin on debian/roms/. Add a Build-Depends on sharutils. + * Update tundev.c. Pass -tun-dev to qemu without the equal sign. + Thanks to Isaac Clerencia . + * Fix README.Debian to point to the renamed qemu-make-debian-root. + * Add Depends on sharutils needed by qemu-make-debian-root. + (Closes: #272130) + * Use and depend on vgabios package, which is in sync with bochsbios + that checks for rom bios checksums. (Closes: #281202) + * Enable LFS globally, thus fixing problems with qemu-mkcow when using + an existing large image. + (Closes: #279925) + * Fix openpic timer write test, catched from a warning about a constant + value larger than the type it was casted to. + * Fix build failure with gcc 3.4. Patch stolen from Gentoo BTS. + + -- Guillem Jover Mon, 15 Nov 2004 10:46:54 +0100 + +qemu (0.6.0.dfsg.1-1) unstable; urgency=high + + [ Guillem Jover ] + * Repackaged upstream source to deal with binaries w/o sources. + (Closes: #268780) + - pc-bios/bios.bin: Removed binary without source. Now using + bochsbios package. + - pc-bios/vgabios.bin: Rebuilt from vgabios cvs 2004-06-17 snapshot, + source included. + - pc-bios/vgabios-cirrus.bin: Likewise. + - pc-bios/ppc-rom.bin: Rebuilt on voltaire, source included. + - pc-bios/linux_boot.bin: Rebuilt from source. + * Move make-debian-root.sh to /usr/sbin/qemu-make-debian-root. + (Closes: #268705) + + -- Guillem Jover Mon, 13 Sep 2004 01:28:54 +0200 + +qemu (0.6.0-2) unstable; urgency=high + + [ Guilherme de S. Pastore ] + * Fixed dangling symlinks under /usr/share/man/man1. (Closes: #264764) + + [ Guillem Jover ] + * Fix FTBFS on powerpc. + - debian/patches/02_fix_powerpc_FTBFS.patch: New file. + + -- Guillem Jover Wed, 18 Aug 2004 15:50:43 +0200 + +qemu (0.6.0-1) unstable; urgency=medium + + * New maintainers. (Closes: #258900) + * New upstream release. (Closes: #258732) + - Installs ppc BIOS ROM file. (Closes: #257492) + - Builds with -fno-strict-aliasing. (Closes: #257123) + + [ Guilherme de S. Pastore ] + * debian/rules: + - Cleaned up. + - Ported to use CDBS. + * 00_escape_manpage_hyphens.patch: + - Correct a little typo and escape hyphens in upstream manpage. + * 01_mkcow_section_and_hyphens.patch: + - Fix section mismatch and escape hyphens in the qemu-mkcow manpage. + * Added simple /etc/qemu-ifup helper script. (Closes: #245281) + Thanks to Martin Michlmayr . + * Cleaned debian/watch. + * UTF-8'ed debian/changelog. + * Updated Standards-Version to 3.6.1.1. + * Removed outdated and unnecessary debian/qemu-i386.sgml. + - Removed build dependency on docbook-to-man. + * Removed "x86" part from the description (hey, qemu is not x86-only + in any way). Deserves a complete rewrite, shall be done soon. + + [ Guillem Jover ] + * Lower-case package short description. + * Added missing CPU emulations to the description. + * Cleaned and updated debian/copyright. + * Removed manually added libx11-6 dependency. + * Only Build-Depends on libgpmg1-dev on GNU/Linux systems. + * Cosmetic unification to debian/changelog. + * debian/rules: + - Remove generated files. + - Give exec perms to qemu-ifup. + + -- Guillem Jover Sun, 8 Aug 2004 17:24:08 +0200 + +qemu (0.5.5-2) unstable; urgency=low + + * Re-enable SDL disabled while I was bugchasing. (Closes: #255014) + * Yes, this is really 0.5.5. (Closes: #254655) + * Enable slirp networking. (Closes: #253573) + * Add Build-Depends on libgpmg1-dev (found by Bastian Blank, probably breaks + Hurd but that's a problem for another day). + + -- Paul Russell Thu, 24 Jun 2004 06:26:42 +0200 + +qemu (0.5.5-1) unstable; urgency=low + + * New upstream release. (Closes: #237556, #237556) + * Applied patch to add options to make_debian_root.sh. (Closes: #238787) + * Applied patch for other archs: hmmm... (Closes: #251420) + * Do umount -d in make_debian_root.sh. (Closes: #251775) + + -- Paul Russell Tue, 1 Jun 2004 03:50:05 +0200 + +qemu (0.5.4-1) unstable; urgency=low + + * New upstream release. (Closes: #246634) + * qemu-mkcow included in upstream. + * Added tundev program source in doc, to see if people find it useful. + + -- Paul Russell Mon, 3 May 2004 08:14:49 +0200 + +qemu (0.5.3-1) unstable; urgency=low + + * New upstream release. (Closes: #237556) + * Use aalib-config --static-libs. (Closes: #243325) + * Document Control-Shift to release mouse pointer. (Closes: #238074) + + -- Paul Russell Tue, 13 Apr 2004 02:58:49 +0200 + +qemu (0.5.2-4) unstable; urgency=low + + * Fix PPC install (Michel Daenzer patch). (Closes: #238431) + * Simplify deps (might be wrong, but it's neater). (Closes: #238430) + + -- Paul Russell Wed, 17 Mar 2004 01:35:47 +0100 + +qemu (0.5.2-3) unstable; urgency=low + + * Make compile on woody. (Closes: #238163) + * Include qemu-doc.html. (Closes: #238076) + * Wrote qemu-i386 man page. (Closes: #238077) + + -- Paul Russell Mon, 15 Mar 2004 23:56:25 +0100 + +qemu (0.5.2-2) unstable; urgency=low + + * Fix build problem so bios.bin etc. can be found. (Closes: #237553) + + -- Paul Russell Fri, 12 Mar 2004 05:43:00 +0100 + +qemu (0.5.2-1) unstable; urgency=low + + * Initial Release. (Closes: #187407) + + -- Paul Russell Wed, 3 Mar 2004 02:18:54 +0100 diff --git a/debian/control b/debian/control new file mode 100644 index 00000000000..28d67bead1b --- /dev/null +++ b/debian/control @@ -0,0 +1,670 @@ +# autogenerated file, please edit debian/control-in +Source: qemu +Section: otherosfs +Priority: optional +Maintainer: NVIDIA BaseOS Team +XSBC-Original-Maintainer: Debian QEMU Team +Uploaders: Michael Tokarev +Build-Depends: debhelper-compat (= 13), + python3:any, + meson (>> 0.63.0~) | meson-1.5, ninja-build, + flex, bison, +Build-Depends-Arch: +# In comments below we also specify (system-specific) arguments +# to qemu's configure script, -- optional features which depend +# on build-dependencies. + python3-venv, +# --enable-docs +# for python3-sphinx:native see #995622 + texinfo, python3-sphinx:native, python3-sphinx-rtd-theme, +# always needed + libglib2.0-dev, + zlib1g-dev, +# iasl (from acpica-tools) is used only in a single test these days, not for building +# acpica-tools, +# libcapstone is in universe in ubuntu +# --enable-linux-aio linux-any + libaio-dev [linux-any], +# libsndio is in universe in ubuntu +# --disable-sndio +# --audio-drv-list=pipewire,pa,alsa,jack,oss,sdl amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +##--audio-drv-list=pa,oss kfreebsd-any + libjack-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], + libpulse-dev, + libasound2-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], + libpipewire-0.3-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# for virtfs (now in libc6) +# --enable-attr +# --enable-bpf linux-any + libbpf-dev [linux-any], +# --enable-brlapi amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libbrlapi-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-virtfs linux-any +# needed for virtfs +# --enable-cap-ng linux-any + libcap-ng-dev [linux-any], +# --enable-curl + libcurl4-gnutls-dev, +# --enable-fdt + libfdt-dev, +# --enable-fuse + libfuse3-dev, +# --enable-gnutls + gnutls-dev, +# --enable-gtk --enable-vte amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libgtk-3-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], + libvte-2.91-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-libiscsi + libiscsi-dev, +# --enable-curses amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libncurses-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-virglrenderer amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libvirglrenderer-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-opengl amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libepoxy-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], + libdrm-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], + libgbm-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-libnfs + libnfs-dev, +# --enable-numa i386 amd64 ia64 mips mipsel mips64 mips64el powerpc powerpcspe x32 ppc64 ppc64el arm64 sparc s390x riscv64 + libnuma-dev [i386 amd64 ia64 mips mipsel mips64 mips64el powerpc powerpcspe x32 ppc64 ppc64el arm64 sparc s390x riscv64], +# --enable-smartcard + libcacard-dev, +# --enable-pixman + libpixman-1-dev, +# --enable-rbd amd64 arm64 mips64el ppc64el ppc64 riscv64 s390x sparc64 + librbd-dev [amd64 arm64 mips64el ppc64el ppc64 riscv64 s390x sparc64], +# gluster is 64bit-only: #1039604 +# --enable-glusterfs amd64 arm64 ppc64el ppc64 riscv64 mips64el s390x ia64 sparc64 + libglusterfs-dev [amd64 arm64 ppc64el ppc64 riscv64 mips64el s390x ia64 sparc64], +# --enable-vnc-sasl + libsasl2-dev, +# --enable-sdl amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 + libsdl2-dev [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32], +# --enable-seccomp amd64 arm64 armel armhf i386 mips64el mipsel ppc64 ppc64el powerpc riscv64 s390x x32 + libseccomp-dev [amd64 arm64 armel armhf i386 mips64el mipsel ppc64 ppc64el powerpc riscv64 s390x x32], +# --enable-slirp + libslirp-dev, +# --enable-spice amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32 + libspice-server-dev [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], +# --enable-rdma linux-any + librdmacm-dev [linux-any], libibverbs-dev [linux-any], libibumad-dev [linux-any], +# --enable-linux-io-uring linux-any + liburing-dev [linux-any], +# --enable-libusb linux-any + libusb-1.0-0-dev [linux-any], +# --enable-usb-redir linux-any + libusbredirparser-dev [linux-any], +# --enable-libssh + libssh-dev, +# --enable-zstd + libzstd-dev, +# vde is debian-only since ubuntu/vde2 is in universe + libxen-dev [linux-amd64], +# --enable-nettle + nettle-dev, +# other optional features we enable +# --enable-libudev +# needed for qga? + libudev-dev [linux-any], +# --enable-vnc +# --enable-vnc-jpeg + libjpeg-dev, +# --enable-png + libpng-dev, +# --enable-libpmem amd64 arm64 + libpmem-dev [amd64 arm64], +# --enable-kvm linux-any +# --enable-vhost-net linux-any # is it really linux-specific? +##--enable-lzo todo, for (memory) dumps +##--enable-netmap todo bsd +##--enable-xen-pci-passthrough todo +## auth-pam - for auth for vnc&Co using PAM +# +# the testsuite: +#XXX-cyclic-test-dep-dak-bug seabios [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32] , +# ipxe-qemu [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32] , +# various firmware files (kvmvapic.bin &Co), older qemu-system-data should work +#XXX-cyclic-test-dep-dak-bug qemu-system-data [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32] , +Build-Depends-Indep: +# Firmware build dependencies disabled since we're not building firmware +# (sysdata-components commented out in debian/rules) +# pc-bios/*.dts => *.dtb (PPC firmware) + device-tree-compiler, +# gcc-s390x-linux-gnu, +# qemu-palcode/palcode-clipper +# gcc-alpha-linux-gnu, +# u-boot code +# gcc-powerpc-linux-gnu, bc, +# skiboot firmware, openbios +# gcc-powerpc64-linux-gnu, +# skiboot includes +# libssl-dev, +# openbios +# gcc-sparc64-linux-gnu, fcode-utils, xsltproc, +# hppa-firmware +# gcc-hppa-linux-gnu, +# opensbi +# gcc-riscv64-linux-gnu, +# vbootrom/npcm7xx_bootrom +# gcc-arm-none-eabi, +Build-Conflicts: oss4-dev +Standards-Version: 4.6.1 +Homepage: http://www.qemu.org/ +Rules-Requires-Root: binary-targets +XS-Debian-Vcs-Browser: https://salsa.debian.org/qemu-team/qemu +XS-Debian-Vcs-Git: https://salsa.debian.org/qemu-team/qemu.git +Vcs-Browser: https://git.launchpad.net/ubuntu/+source/qemu +Vcs-Git: https://git.launchpad.net/ubuntu/+source/qemu + +Package: qemu-system +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends}, + qemu-system-arm, + qemu-system-mips, + qemu-system-ppc, + qemu-system-sparc, + qemu-system-x86, + qemu-system-s390x, + qemu-system-misc +Description: QEMU full system emulation binaries + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This metapackage provides the full system emulation binaries for all supported + targets, by depending on all per-architecture system emulation packages which + QEMU supports. + +Package: qemu-block-extra +Architecture: amd64 arm arm64 armel armhf hppa i386 ia64 kfreebsd-amd64 kfreebsd-i386 m68k mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sh4 sparc sparc64 x32 +Multi-Arch: no +Depends: ${misc:Depends}, ${shlibs:Depends}, +# we need to ensure qemu-block-extra is upgraded with qemu-system-* or qemu-utils + qemu-system-any (= ${binary:Version}) [amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32] | qemu-utils (= ${binary:Version}), +Enhances: qemu-utils, qemu-system-misc, + qemu-system-arm, qemu-system-mips, qemu-system-sparc, qemu-system-x86, +Description: extra block backend modules for qemu-system and qemu-utils + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides extra block device backend modules for qemu-system + emulation and qemu-img from qemu-utils package, which are rarely used and + has extra dependencies. + +Package: qemu-block-supplemental +# For now, this package only ships glusterfs, which does not build in 32bit architectures +Architecture: amd64 arm64 ppc64el riscv64 s390x +Multi-Arch: no +Depends: ${misc:Depends}, ${shlibs:Depends}, +# qemu-block-extra has the run-qemu.mount unit which we need + qemu-block-extra (= ${binary:Version}), +# we need to ensure qemu-block-supplemental is upgraded with qemu-system-* or qemu-utils + qemu-system-any (= ${binary:Version}) [amd64 arm64 armhf ppc64el riscv64 s390x] | qemu-utils (= ${binary:Version}), +Enhances: qemu-utils, qemu-system-misc, + qemu-system-arm, qemu-system-mips, qemu-system-sparc, qemu-system-x86, +Breaks: qemu-block-extra (<< 1:8.2.0+ds-4ubuntu2~) +Replaces: qemu-block-extra (<< 1:8.2.0+ds-4ubuntu2) +Description: supplemental block backend modules for qemu-system and qemu-utils + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides supplemental block device backend modules for qemu-system + emulation and qemu-img from qemu-utils package. + . + Currently, the following module is shipped in this package: block-glusterfs + +Package: qemu-system-data +Architecture: all +Multi-Arch: foreign +Conflicts: sgabios, qemu-skiboot, openbios-sparc, openbios-ppc, qemu-slof, +Replaces: sgabios, openbios-sparc, openbios-ppc, qemu-slof, + qemu-system-ppc (<< 1:6.1-4~), +Breaks: qemu-system-ppc (<< 1:6.1-4~), +Provides: qemu-keymaps, sgabios, qemu-skiboot, openbios-sparc, openbios-ppc, qemu-slof, +Depends: ${misc:Depends} +Description: QEMU full system emulation (data files) + This package provides architecture-neutral data files + (such as keyboard definitions, icons) for system-mode + QEMU emulation (qemu-system-*) packages. + +Package: qemu-system-common +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: no +Build-Profiles: +Breaks: libvirt-daemon (<< 7.2.0-1) +Depends: ${misc:Depends}, ${shlibs:Depends}, +# to fix wrong acl for newly created device node on ubuntu: + acl +Description: QEMU full system emulation binaries (common files) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides common files needed for target-specific + full system emulation (qemu-system-*) packages. + +Package: qemu-system-gui +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +#XXX M-A: same does not really work for now due to /usr/lib/qemu/vhost-user-gpu +#XXX we'll deal with this if some actual need arises, +#XXX by moving that binary back to q-s-common or packaging it separately +#Multi-Arch: same +Depends: ${misc:Depends}, ${shlibs:Depends}, +# ui-* depends on ui-opengl + qemu-system-modules-opengl (= ${binary:Version}), +# we need to ensure qemu-system-gui is upgraded with qemu-system-* + qemu-system-any (= ${binary:Version}), +# libgl1 is dynamically loaded by sdl display code + libgl1, +# we moved vhost-user-gpu files here from qemu-system-common at 6.1-4 +Replaces: qemu-system-common (<< 1:6.1+dfsg-4~) +Description: QEMU full system emulation binaries (graphical display and audio modules) + This package provides optional graphical guest display modules (currently GTK + and SDL) and audio backend modules for full system emulation (qemu-system-*) + packages. + . + This package is not a management/control/GUI interface for qemu, use something + else (like virt-manager) for that. + +Package: qemu-system-modules-spice +Architecture: amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32 +Build-Profiles: +Depends: ${misc:Depends}, ${shlibs:Depends}, +# spice modules depends on ui-opengl + qemu-system-modules-opengl (= ${binary:Version}), +# we need to ensure qemu-system-modules-spice is upgraded with qemu-system-* + qemu-system-any (= ${binary:Version}), +Replaces: qemu-system-common (<< 1:8.1.0+ds-1~exp2~) +Breaks: qemu-system-common (<< 1:8.1.0+ds-1~exp2~) +Description: QEMU full system emulation binaries (spice display modules) + This package provides optional spice display (qxl and spice-app) and audio + support modules for QEMU full system emulation (qemu-system-*) packages. + +Package: qemu-system-modules-opengl +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Build-Profiles: +Depends: ${misc:Depends}, ${shlibs:Depends}, +# we need to ensure qemu-system-modules-opengl is upgraded when qemu-system is upgraded + qemu-system-any (= ${binary:Version}), +Replaces: qemu-system-common (<< 1:8.1.0+ds-1~exp2~), qemu-system-gui (<< 1:8.1.0+ds-1~exp2~) +Breaks: qemu-system-common (<< 1:8.1.0+ds-1~exp2~), qemu-system-gui (<< 1:8.1.0+ds-1~exp2~) +Description: QEMU full system emulation binaries (OpenGL display modules) + This package provides optional OpenGL display support modules for QEMU full + system emulation (qemu-system-*) packages. It also provides dbus display type. + +Package: qemu-system-misc +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# alpha uses vgabios +# alpha m68k sh4 uses bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (miscellaneous) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the full system emulation binaries to emulate + various other hardware which did not made into separate packages. + Emulators for the following architectures are provided: + ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-arm +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# aarch64 arm uses bootroms + ipxe-qemu, + ipxe-qemu-256k-compat-efi-roms, + qemu-efi-aarch64, qemu-efi-arm, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (arm) + QEMU is a fast processor emulator: currently the package supports + ARM emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following arm hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-mips +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# all mips targets uses vgabios and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (mips) + QEMU is a fast processor emulator: currently the package supports + MIPS emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following mips hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-ppc +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, + ipxe-qemu-256k-compat-efi-roms, +# ppc targets use vgabios-stdvga and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (ppc) + QEMU is a fast processor emulator: currently the package supports + PowerPC emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following PowerPC hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-sparc +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# sparc64 uses vgabios-stdvga and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (sparc) + QEMU is a fast processor emulator: currently the package supports + SPARC emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following sparc hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-s390x +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (s390x) + QEMU is a fast processor emulator: currently the package supports + s390x emulation. By using dynamic translation it achieves reasonable + speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following s390x hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +# xen support generally is disabled on ubuntu, for a while we used +# an extra build with xen enabled. In the meantime Debian followed that +# approach but with a different name, so add a transitional until +# after 24.04 +Package: qemu-system-x86-xen +Architecture: amd64 +Multi-Arch: foreign +Section: oldlibs +Depends: qemu-system-xen (>= 1:7.0+dfsg-7ubuntu1), ${misc:Depends} +Description: QEMU full system emulation binaries (x86) + The former qemu-system-x86-xen binaries are now in qemu-system-xen. + . + This is a transitional package. You can safely remove it. + +Package: qemu-system-x86 +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), + seabios (>> 1.16.3-1~), + ipxe-qemu, +Recommends: qemu-utils, + ipxe-qemu-256k-compat-efi-roms, + ovmf, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [amd64 arm64 armel armhf i386 mips64el mipsel ppc64el x32], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), + cpu-checker, +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (x86) + QEMU is a fast processor emulator: currently the package supports + i386 and x86-64 emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following x86 hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + . + On x86 host hardware this package also enables KVM kernel virtual machine + usage on systems which supports it. + +Package: qemu-system-xen +Architecture: amd64 +Multi-Arch: no +Build-Profiles: +# do we really need qemu-system-data? keymaps only? +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-data (>> ${source:Upstream-Version}~), + seabios +Recommends: qemu-utils, + ovmf, +# For the transition from the former qemu-system-x86-xen name +Breaks: qemu-system-x86-xen (<<1:7.0+dfsg-7ubuntu1) +Replaces: qemu-system-x86-xen (<<1:7.0+dfsg-7ubuntu1) +Provides: qemu-system-x86-xen +Description: QEMU full system emulation (Xen helper package) + This package provides the i386 system emulation binary to work + together with the Xen hypervisor for some types of DomUs. + This package is not useful by its own. + +Package: qemu-user +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends} +Recommends: qemu-user-binfmt +Description: QEMU user mode emulation binaries + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the user mode emulation binaries. In this mode + QEMU can launch Linux processes compiled for one CPU on another CPU. + . + If qemu-user-binfmt package is also installed, it will register binary + format handlers from this qemu-user package with the kernel so it will + be possible to run foreign binaries directly. However, this might not + be suitable for using inside foreign chroots, in which case it is + possible to use qemu-user-static package instead of qemu-user-binmft, -- + qemu-user-static will register statically linked binfmt handlers instead. + +Package: qemu-user-static +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Built-Using: ${built-using} +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends} +# Recommend systemd for binfmt-misc registration +Recommends: systemd +Provides: qemu-user-binfmt +Conflicts: qemu-user-binfmt +Description: QEMU user mode emulation binaries (static version) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the user mode emulation binaries, built + statically. In this mode QEMU can launch Linux processes compiled for + one CPU on another CPU. + . + qemu-user-static package will register binary formats which the provided + emulators can handle, so that it will be possible to run foreign binaries + directly. + +Package: qemu-user-binfmt +Architecture: amd64 arm arm64 armel armhf i386 mips mips64 mips64el mipsel ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends}, qemu-user (= ${binary:Version}) +# Recommend systemd for binfmt-misc registration +Recommends: systemd +Conflicts: qemu-user-static +Description: QEMU user mode binfmt registration for qemu-user + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides binfmt support registration for the user mode + emulation binaries from qemu-user. This is an empty package, it does + not contain any additional files, only registration scripts which run + at install and remove times. + +Package: qemu-utils +Architecture: amd64 arm arm64 armel armhf hppa i386 ia64 kfreebsd-amd64 kfreebsd-i386 m68k mips mips64 mips64el mipsel powerpc powerpcspe ppc64 ppc64el riscv64 s390x sh4 sparc sparc64 x32 +Multi-Arch: foreign +Depends: ${shlibs:Depends}, ${misc:Depends}, +Recommends: + qemu-block-extra (= ${binary:Version}), +Replaces: +# qemu-storage-daemon and qemu-block-driver.7 has been moved from q-s-c. + qemu-system-common (<< 1:8.0+dfsg-5~), +Breaks: + qemu-system-common (<< 1:8.0+dfsg-5~), +Description: QEMU utilities + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides QEMU related utilities: + * qemu-img: QEMU disk image utility + * qemu-io: QEMU disk exerciser + * qemu-nbd: QEMU disk network block device server + +Package: qemu-guest-agent +Architecture: any +Multi-Arch: foreign +Depends: ${shlibs:Depends}, ${misc:Depends} +Pre-Depends: ${misc:Pre-Depends} +Description: Guest-side qemu-system agent + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides a daemon (agent) to run inside qemu-system + guests (full system emulation). It communicates with the host using + a virtio-serial channel org.qemu.guest_agent.0, and allows one to perform + some functions in the guest from the host, including: + - querying and setting guest system time + - performing guest filesystem sync operation + - initiating guest shutdown or suspend to ram + - accessing guest files + - freezing/thawing guest filesystem operations + - others. + . + Install this package on a system which is running as guest inside + qemu virtual machine. It is not used on the host. + +#Package: qemu-system-for-host +## This is actually all architectures for which qemu-system (softmmu) target is implemented +#Architecture: any +#Multi-Arch: same +#Depends: ${qemu:for-host} (=${binary:Version}) +#Description: QEMU full system emulation (dependency-only package for the host architecture) +# This package pulls one of qemu-system-* subpackages which contains +# qemu-system-${}{DEB_HOST_ARCH_CPU} binary specific for the host +# architecture. This one depends on ${qemu:for-host}. +# diff --git a/debian/control-in b/debian/control-in new file mode 100644 index 00000000000..6785d4076e8 --- /dev/null +++ b/debian/control-in @@ -0,0 +1,676 @@ +Source: qemu +Section: otherosfs +Priority: optional +:debian:Maintainer: Debian QEMU Team +:ubuntu:Maintainer: Ubuntu Developers +:ubuntu:XSBC-Original-Maintainer: Debian QEMU Team +Uploaders: Michael Tokarev +Build-Depends: debhelper-compat (= 13), + python3:any, + python3-venv, + meson (>> 0.63.0~) | meson-1.5, ninja-build, + flex, bison, +Build-Depends-Arch: +# In comments below we also specify (system-specific) arguments +# to qemu's configure script, -- optional features which depend +# on build-dependencies. +# --enable-docs +# for python3-sphinx:native see #995622 + texinfo, python3-sphinx:native, python3-sphinx-rtd-theme, +# always needed + libglib2.0-dev, + zlib1g-dev, +# iasl (from acpica-tools) is used only in a single test these days, not for building +# acpica-tools, +# libcapstone is in universe in ubuntu +:debian:# --enable-capstone +:debian: libcapstone-dev, +# --enable-linux-aio linux-any + libaio-dev [linux-any], +# libsndio is in universe in ubuntu +# --disable-sndio +# --audio-drv-list=pipewire,pa,alsa,jack,oss,sdl :system-arch-linux: +##--audio-drv-list=pa,oss kfreebsd-any + libjack-dev [:system-arch-linux:], + libpulse-dev, + libasound2-dev [:system-arch-linux:], + libpipewire-0.3-dev [:system-arch-linux:], +# for virtfs (now in libc6) +# --enable-attr +# --enable-bpf linux-any + libbpf-dev [linux-any], +# --enable-brlapi :system-arch: + libbrlapi-dev [:system-arch:], +# --enable-virtfs linux-any +# needed for virtfs +# --enable-cap-ng linux-any + libcap-ng-dev [linux-any], +# --enable-curl + libcurl4-gnutls-dev, +# --enable-fdt + libfdt-dev, +# --enable-fuse + libfuse3-dev, +# --enable-gnutls + gnutls-dev, +# --enable-gtk --enable-vte :system-arch: + libgtk-3-dev [:system-arch:], + libvte-2.91-dev [:system-arch:], +# --enable-libiscsi + libiscsi-dev, +# --enable-curses :system-arch: + libncurses-dev [:system-arch:], +# --enable-virglrenderer :system-arch-linux: + libvirglrenderer-dev [:system-arch-linux:], +# --enable-opengl :system-arch-linux: + libepoxy-dev [:system-arch-linux:], + libdrm-dev [:system-arch-linux:], + libgbm-dev [:system-arch-linux:], +# --enable-libnfs + libnfs-dev, +# --enable-numa i386 amd64 ia64 mips mipsel mips64 mips64el powerpc powerpcspe x32 ppc64 ppc64el arm64 sparc s390x riscv64 + libnuma-dev [i386 amd64 ia64 mips mipsel mips64 mips64el powerpc powerpcspe x32 ppc64 ppc64el arm64 sparc s390x riscv64], +# --enable-smartcard + libcacard-dev, +# --enable-pixman + libpixman-1-dev, +# --enable-rbd amd64 arm64 mips64el ppc64el ppc64 riscv64 s390x sparc64 + librbd-dev [amd64 arm64 mips64el ppc64el ppc64 riscv64 s390x sparc64], +# gluster is 64bit-only: #1039604 +# --enable-glusterfs amd64 arm64 ppc64el ppc64 riscv64 mips64el s390x ia64 sparc64 + libglusterfs-dev [amd64 arm64 ppc64el ppc64 riscv64 mips64el s390x ia64 sparc64], +# --enable-vnc-sasl + libsasl2-dev, +# --enable-sdl :system-arch: + libsdl2-dev [:system-arch:], +# --enable-seccomp amd64 arm64 armel armhf i386 mips64el mipsel ppc64 ppc64el powerpc riscv64 s390x x32 + libseccomp-dev [amd64 arm64 armel armhf i386 mips64el mipsel ppc64 ppc64el powerpc riscv64 s390x x32], +# --enable-slirp + libslirp-dev, +# --enable-spice :spice-arch: + libspice-server-dev [:spice-arch:], +# --enable-rdma linux-any + librdmacm-dev [linux-any], libibverbs-dev [linux-any], libibumad-dev [linux-any], +# --enable-linux-io-uring linux-any + liburing-dev [linux-any], +# --enable-libusb linux-any + libusb-1.0-0-dev [linux-any], +# --enable-usb-redir linux-any + libusbredirparser-dev [linux-any], +# --enable-libssh + libssh-dev, +# --enable-zstd + libzstd-dev, +# vde is debian-only since ubuntu/vde2 is in universe +:debian:# --enable-vde +:debian: libvdeplug-dev, + libxen-dev [linux-amd64], +# --enable-nettle + nettle-dev, +# other optional features we enable +# --enable-libudev +# needed for qga? + libudev-dev [linux-any], +# --enable-vnc +# --enable-vnc-jpeg + libjpeg-dev, +# --enable-png + libpng-dev, +# --enable-libpmem amd64 arm64 + libpmem-dev [amd64 arm64], +# --enable-kvm linux-any +# --enable-vhost-net linux-any # is it really linux-specific? +##--enable-lzo todo, for (memory) dumps +##--enable-netmap todo bsd +##--enable-xen-pci-passthrough todo +## auth-pam - for auth for vnc&Co using PAM +# +# the testsuite: +#XXX-cyclic-test-dep-dak-bug seabios [:system-arch-linux:] , +# ipxe-qemu [:system-arch-linux:] , +# various firmware files (kvmvapic.bin &Co), older qemu-system-data should work +#XXX-cyclic-test-dep-dak-bug qemu-system-data [:system-arch-linux:] , +Build-Depends-Indep: +# Firmware build dependencies disabled since we're not building firmware +# (sysdata-components commented out in debian/rules) +# pc-bios/*.dts => *.dtb (PPC firmware) + device-tree-compiler, +# gcc-s390x-linux-gnu, +# qemu-palcode/palcode-clipper +# gcc-alpha-linux-gnu, +# u-boot code +# gcc-powerpc-linux-gnu, bc, +# skiboot firmware, openbios +# gcc-powerpc64-linux-gnu, +# skiboot includes +# libssl-dev, +# openbios +# gcc-sparc64-linux-gnu, fcode-utils, xsltproc, +# hppa-firmware +# gcc-hppa-linux-gnu, +# opensbi +# gcc-riscv64-linux-gnu, +# vbootrom/npcm7xx_bootrom +# gcc-arm-none-eabi, +Build-Conflicts: oss4-dev +Standards-Version: 4.6.1 +Homepage: http://www.qemu.org/ +Rules-Requires-Root: binary-targets +:debian:Vcs-Browser: https://salsa.debian.org/qemu-team/qemu +:debian:Vcs-Git: https://salsa.debian.org/qemu-team/qemu.git +:ubuntu:XS-Debian-Vcs-Browser: https://salsa.debian.org/qemu-team/qemu +:ubuntu:XS-Debian-Vcs-Git: https://salsa.debian.org/qemu-team/qemu.git +:ubuntu:Vcs-Browser: https://git.launchpad.net/ubuntu/+source/qemu +:ubuntu:Vcs-Git: https://git.launchpad.net/ubuntu/+source/qemu + +Package: qemu-system +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends}, + qemu-system-arm, + qemu-system-mips, + qemu-system-ppc, + qemu-system-sparc, + qemu-system-x86, +:ubuntu: qemu-system-s390x, + qemu-system-misc +Description: QEMU full system emulation binaries + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This metapackage provides the full system emulation binaries for all supported + targets, by depending on all per-architecture system emulation packages which + QEMU supports. + +Package: qemu-block-extra +Architecture: :utils-arch: +Multi-Arch: no +Depends: ${misc:Depends}, ${shlibs:Depends}, +# we need to ensure qemu-block-extra is upgraded with qemu-system-* or qemu-utils + qemu-system-any (= ${binary:Version}) [:system-arch:] | qemu-utils (= ${binary:Version}), +Enhances: qemu-utils, qemu-system-misc, + qemu-system-arm, qemu-system-mips, qemu-system-sparc, qemu-system-x86, +Description: extra block backend modules for qemu-system and qemu-utils + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides extra block device backend modules for qemu-system + emulation and qemu-img from qemu-utils package, which are rarely used and + has extra dependencies. +:ubuntu: +:ubuntu:Package: qemu-block-supplemental +:ubuntu:# For now, this package only ships glusterfs, which does not build in 32bit architectures +:ubuntu:Architecture: amd64 arm64 ppc64el riscv64 s390x +:ubuntu:Multi-Arch: no +:ubuntu:Depends: ${misc:Depends}, ${shlibs:Depends}, +:ubuntu:# qemu-block-extra has the run-qemu.mount unit which we need +:ubuntu: qemu-block-extra (= ${binary:Version}), +:ubuntu:# we need to ensure qemu-block-supplemental is upgraded with qemu-system-* or qemu-utils +:ubuntu: qemu-system-any (= ${binary:Version}) [amd64 arm64 armhf ppc64el riscv64 s390x] | qemu-utils (= ${binary:Version}), +:ubuntu:Enhances: qemu-utils, qemu-system-misc, +:ubuntu: qemu-system-arm, qemu-system-mips, qemu-system-sparc, qemu-system-x86, +:ubuntu:Breaks: qemu-block-extra (<< 1:8.2.0+ds-4ubuntu2~) +:ubuntu:Replaces: qemu-block-extra (<< 1:8.2.0+ds-4ubuntu2) +:ubuntu:Description: supplemental block backend modules for qemu-system and qemu-utils +:ubuntu: QEMU is a fast processor emulator: currently the package supports Alpha, ARM, +:ubuntu: CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, +:ubuntu: S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic +:ubuntu: translation it achieves reasonable speed while being easy to port on new host +:ubuntu: CPUs. +:ubuntu: . +:ubuntu: This package provides supplemental block device backend modules for qemu-system +:ubuntu: emulation and qemu-img from qemu-utils package. +:ubuntu: . +:ubuntu: Currently, the following module is shipped in this package: block-glusterfs + +Package: qemu-system-data +Architecture: all +Multi-Arch: foreign +Conflicts: sgabios, qemu-skiboot, openbios-sparc, openbios-ppc, qemu-slof, +Replaces: sgabios, openbios-sparc, openbios-ppc, qemu-slof, + qemu-system-ppc (<< 1:6.1-4~), +Breaks: qemu-system-ppc (<< 1:6.1-4~), +Provides: qemu-keymaps, sgabios, qemu-skiboot, openbios-sparc, openbios-ppc, qemu-slof, +Depends: ${misc:Depends} +Description: QEMU full system emulation (data files) + This package provides architecture-neutral data files + (such as keyboard definitions, icons) for system-mode + QEMU emulation (qemu-system-*) packages. + +Package: qemu-system-common +Architecture: :system-arch: +Multi-Arch: no +Build-Profiles: +Breaks: libvirt-daemon (<< 7.2.0-1) +Depends: ${misc:Depends}, ${shlibs:Depends}, +# to fix wrong acl for newly created device node on ubuntu: +:ubuntu: acl +Description: QEMU full system emulation binaries (common files) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides common files needed for target-specific + full system emulation (qemu-system-*) packages. + +Package: qemu-system-gui +Architecture: :system-arch: +#XXX M-A: same does not really work for now due to /usr/lib/qemu/vhost-user-gpu +#XXX we'll deal with this if some actual need arises, +#XXX by moving that binary back to q-s-common or packaging it separately +#Multi-Arch: same +Depends: ${misc:Depends}, ${shlibs:Depends}, +# ui-* depends on ui-opengl + qemu-system-modules-opengl (= ${binary:Version}), +# we need to ensure qemu-system-gui is upgraded with qemu-system-* + qemu-system-any (= ${binary:Version}), +# libgl1 is dynamically loaded by sdl display code + libgl1, +# we moved vhost-user-gpu files here from qemu-system-common at 6.1-4 +Replaces: qemu-system-common (<< 1:6.1+dfsg-4~) +Description: QEMU full system emulation binaries (graphical display and audio modules) + This package provides optional graphical guest display modules (currently GTK + and SDL) and audio backend modules for full system emulation (qemu-system-*) + packages. + . + This package is not a management/control/GUI interface for qemu, use something + else (like virt-manager) for that. + +Package: qemu-system-modules-spice +Architecture: :spice-arch: +Build-Profiles: +Depends: ${misc:Depends}, ${shlibs:Depends}, +# spice modules depends on ui-opengl + qemu-system-modules-opengl (= ${binary:Version}), +# we need to ensure qemu-system-modules-spice is upgraded with qemu-system-* + qemu-system-any (= ${binary:Version}), +Replaces: qemu-system-common (<< 1:8.1.0+ds-1~exp2~) +Breaks: qemu-system-common (<< 1:8.1.0+ds-1~exp2~) +Description: QEMU full system emulation binaries (spice display modules) + This package provides optional spice display (qxl and spice-app) and audio + support modules for QEMU full system emulation (qemu-system-*) packages. + +Package: qemu-system-modules-opengl +Architecture: :system-arch: +Build-Profiles: +Depends: ${misc:Depends}, ${shlibs:Depends}, +# we need to ensure qemu-system-modules-opengl is upgraded when qemu-system is upgraded + qemu-system-any (= ${binary:Version}), +Replaces: qemu-system-common (<< 1:8.1.0+ds-1~exp2~), qemu-system-gui (<< 1:8.1.0+ds-1~exp2~) +Breaks: qemu-system-common (<< 1:8.1.0+ds-1~exp2~), qemu-system-gui (<< 1:8.1.0+ds-1~exp2~) +Description: QEMU full system emulation binaries (OpenGL display modules) + This package provides optional OpenGL display support modules for QEMU full + system emulation (qemu-system-*) packages. It also provides dbus display type. + +Package: qemu-system-misc +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# alpha uses vgabios +# alpha m68k sh4 uses bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (miscellaneous) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the full system emulation binaries to emulate + various other hardware which did not made into separate packages. + Emulators for the following architectures are provided: + ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-arm +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# aarch64 arm uses bootroms + ipxe-qemu, +:ubuntu: ipxe-qemu-256k-compat-efi-roms, + qemu-efi-aarch64, qemu-efi-arm, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (arm) + QEMU is a fast processor emulator: currently the package supports + ARM emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following arm hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-mips +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# all mips targets uses vgabios and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (mips) + QEMU is a fast processor emulator: currently the package supports + MIPS emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following mips hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-ppc +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +:ubuntu: ipxe-qemu-256k-compat-efi-roms, +# ppc targets use vgabios-stdvga and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (ppc) + QEMU is a fast processor emulator: currently the package supports + PowerPC emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following PowerPC hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + +Package: qemu-system-sparc +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +Recommends: qemu-utils, +# sparc64 uses vgabios-stdvga and bootroms + seabios, + ipxe-qemu, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (sparc) + QEMU is a fast processor emulator: currently the package supports + SPARC emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following sparc hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. +:ubuntu: +:ubuntu:Package: qemu-system-s390x +:ubuntu:Architecture: :system-arch: +:ubuntu:Multi-Arch: foreign +:ubuntu:Build-Profiles: +:ubuntu:Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), +:ubuntu:Recommends: qemu-utils, +:ubuntu: qemu-system-gui (= ${binary:Version}), +:ubuntu: qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], +:ubuntu: qemu-system-modules-opengl (= ${binary:Version}), +:ubuntu: qemu-block-extra (= ${binary:Version}), +:ubuntu:Provides: ${qemu:Provides} +:ubuntu:Description: QEMU full system emulation binaries (s390x) +:ubuntu: QEMU is a fast processor emulator: currently the package supports +:ubuntu: s390x emulation. By using dynamic translation it achieves reasonable +:ubuntu: speed while being easy to port on new host CPUs. +:ubuntu: . +:ubuntu: This package provides the full system emulation binaries to emulate +:ubuntu: the following s390x hardware: ${qemu:archlist}. +:ubuntu: . +:ubuntu: In system emulation mode QEMU emulates a full system, including a processor +:ubuntu: and various peripherals. It enables easier testing and debugging of system +:ubuntu: code. It can also be used to provide virtual hosting of several virtual +:ubuntu: machines on a single server. +:ubuntu: +:ubuntu:# xen support generally is disabled on ubuntu, for a while we used +:ubuntu:# an extra build with xen enabled. In the meantime Debian followed that +:ubuntu:# approach but with a different name, so add a transitional until +:ubuntu:# after 24.04 +:ubuntu:Package: qemu-system-x86-xen +:ubuntu:Architecture: amd64 +:ubuntu:Multi-Arch: foreign +:ubuntu:Section: oldlibs +:ubuntu:Depends: qemu-system-xen (>= 1:7.0+dfsg-7ubuntu1), ${misc:Depends} +:ubuntu:Description: QEMU full system emulation binaries (x86) +:ubuntu: The former qemu-system-x86-xen binaries are now in qemu-system-xen. +:ubuntu: . +:ubuntu: This is a transitional package. You can safely remove it. + +Package: qemu-system-x86 +Architecture: :system-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-common (= ${binary:Version}), qemu-system-data (>> ${source:Upstream-Version}~), + seabios (>> 1.16.3-1~), + ipxe-qemu, +Recommends: qemu-utils, +:ubuntu: ipxe-qemu-256k-compat-efi-roms, + ovmf, + qemu-system-gui (= ${binary:Version}), + qemu-system-modules-spice (= ${binary:Version}) [:spice-arch:], + qemu-system-modules-opengl (= ${binary:Version}), + qemu-block-extra (= ${binary:Version}), +:ubuntu: cpu-checker, +Suggests: samba, vde2, +Provides: ${qemu:Provides} +Description: QEMU full system emulation binaries (x86) + QEMU is a fast processor emulator: currently the package supports + i386 and x86-64 emulation. By using dynamic translation it achieves + reasonable speed while being easy to port on new host CPUs. + . + This package provides the full system emulation binaries to emulate + the following x86 hardware: ${qemu:archlist}. + . + In system emulation mode QEMU emulates a full system, including a processor + and various peripherals. It enables easier testing and debugging of system + code. It can also be used to provide virtual hosting of several virtual + machines on a single server. + . + On x86 host hardware this package also enables KVM kernel virtual machine + usage on systems which supports it. + +Package: qemu-system-xen +Architecture: amd64 +Multi-Arch: no +Build-Profiles: +# do we really need qemu-system-data? keymaps only? +Depends: ${shlibs:Depends}, ${misc:Depends}, qemu-system-data (>> ${source:Upstream-Version}~), + seabios +Recommends: qemu-utils, + ovmf, +:ubuntu:# For the transition from the former qemu-system-x86-xen name +:ubuntu:Breaks: qemu-system-x86-xen (<<1:7.0+dfsg-7ubuntu1) +:ubuntu:Replaces: qemu-system-x86-xen (<<1:7.0+dfsg-7ubuntu1) +:ubuntu:Provides: qemu-system-x86-xen +Description: QEMU full system emulation (Xen helper package) + This package provides the i386 system emulation binary to work + together with the Xen hypervisor for some types of DomUs. + This package is not useful by its own. + +Package: qemu-user +Architecture: :user-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${shlibs:Depends}, ${misc:Depends} +Recommends: qemu-user-binfmt +Description: QEMU user mode emulation binaries + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the user mode emulation binaries. In this mode + QEMU can launch Linux processes compiled for one CPU on another CPU. + . + If qemu-user-binfmt package is also installed, it will register binary + format handlers from this qemu-user package with the kernel so it will + be possible to run foreign binaries directly. However, this might not + be suitable for using inside foreign chroots, in which case it is + possible to use qemu-user-static package instead of qemu-user-binmft, -- + qemu-user-static will register statically linked binfmt handlers instead. + +Package: qemu-user-static +Architecture: :user-arch: +Built-Using: ${built-using} +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends} +# Recommend systemd for binfmt-misc registration +Recommends: systemd +Provides: qemu-user-binfmt +Conflicts: qemu-user-binfmt +Description: QEMU user mode emulation binaries (static version) + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides the user mode emulation binaries, built + statically. In this mode QEMU can launch Linux processes compiled for + one CPU on another CPU. + . + qemu-user-static package will register binary formats which the provided + emulators can handle, so that it will be possible to run foreign binaries + directly. + +Package: qemu-user-binfmt +Architecture: :user-arch: +Multi-Arch: foreign +Build-Profiles: +Depends: ${misc:Depends}, qemu-user (= ${binary:Version}) +# Recommend systemd for binfmt-misc registration +Recommends: systemd +Conflicts: qemu-user-static +Description: QEMU user mode binfmt registration for qemu-user + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides binfmt support registration for the user mode + emulation binaries from qemu-user. This is an empty package, it does + not contain any additional files, only registration scripts which run + at install and remove times. + +Package: qemu-utils +Architecture: :utils-arch: +Multi-Arch: foreign +Depends: ${shlibs:Depends}, ${misc:Depends}, +Recommends: + qemu-block-extra (= ${binary:Version}), +Replaces: +# qemu-storage-daemon and qemu-block-driver.7 has been moved from q-s-c. + qemu-system-common (<< 1:8.0+dfsg-5~), +Breaks: + qemu-system-common (<< 1:8.0+dfsg-5~), +Description: QEMU utilities + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides QEMU related utilities: + * qemu-img: QEMU disk image utility + * qemu-io: QEMU disk exerciser + * qemu-nbd: QEMU disk network block device server + +Package: qemu-guest-agent +Architecture: any +Multi-Arch: foreign +Depends: ${shlibs:Depends}, ${misc:Depends} +Pre-Depends: ${misc:Pre-Depends} +Description: Guest-side qemu-system agent + QEMU is a fast processor emulator: currently the package supports Alpha, ARM, + CRIS, i386, LoongArch, M68k (ColdFire), MicroBlaze, MIPS, PowerPC, RISC-V, + S390x, SH4, SPARC, x86-64, Xtensa and other emulations. By using dynamic + translation it achieves reasonable speed while being easy to port on new host + CPUs. + . + This package provides a daemon (agent) to run inside qemu-system + guests (full system emulation). It communicates with the host using + a virtio-serial channel org.qemu.guest_agent.0, and allows one to perform + some functions in the guest from the host, including: + - querying and setting guest system time + - performing guest filesystem sync operation + - initiating guest shutdown or suspend to ram + - accessing guest files + - freezing/thawing guest filesystem operations + - others. + . + Install this package on a system which is running as guest inside + qemu virtual machine. It is not used on the host. + +#Package: qemu-system-for-host +## This is actually all architectures for which qemu-system (softmmu) target is implemented +#Architecture: any +#Multi-Arch: same +#Depends: ${qemu:for-host} (=${binary:Version}) +#Description: QEMU full system emulation (dependency-only package for the host architecture) +# This package pulls one of qemu-system-* subpackages which contains +# qemu-system-${}{DEB_HOST_ARCH_CPU} binary specific for the host +# architecture. This one depends on ${qemu:for-host}. +# diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 00000000000..ddec53dd95c --- /dev/null +++ b/debian/copyright @@ -0,0 +1,529 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Files-Excluded: + roms/QemuMacDrivers + roms/edk2 + roms/ipxe + roms/seabios +# roms/u-boot +# + roms/SLOF/board-js2x/rtas/i2c_bmc.oco + roms/SLOF/board-js2x/rtas/ipmi_oem.oco + roms/SLOF/clients/takeover/takeover.oco + roms/SLOF/lib/libipmi/libipmi.oco +# + pc-bios/QEMU,*.bin + pc-bios/bamboo.dtb pc-bios/canyonlands.dtb + pc-bios/bios.bin + pc-bios/bios-256k.bin + pc-bios/bios-microvm.bin + pc-bios/edk2-*.fd.bz2 + pc-bios/efi-*.rom + pc-bios/pxe-*.rom + pc-bios/hppa-firmware.img + pc-bios/kvmvapic.bin + pc-bios/linuxboot.bin + pc-bios/linuxboot_dma.bin + pc-bios/multiboot.bin + pc-bios/multiboot_dma.bin + pc-bios/openbios-ppc + pc-bios/openbios-sparc32 + pc-bios/openbios-sparc64 + pc-bios/opensbi-*.bin + pc-bios/palcode-clipper + pc-bios/petalogix-ml605.dtb + pc-bios/petalogix-s3adsp1800.dtb + pc-bios/pvh.bin + pc-bios/qboot.rom + pc-bios/qemu_vga.ndrv + pc-bios/s390-ccw.img + pc-bios/s390-netboot.img + pc-bios/skiboot.lid + pc-bios/slof.bin + pc-bios/u-boot-sam460-20100605.bin + pc-bios/u-boot.e500 + pc-bios/vgabios.bin + pc-bios/vgabios-*.bin + pc-bios/vof.bin + +Files: * +Copyright: + Copyright (C) 1982, 1986, 1988-1994 The Regents of the University of California + Copyright (C) 1986-2007 Free Software Foundation Inc. + Copyright (C) 1988-1992 Richard Outerbridge + Copyright (C) 1991-1992, 1996 Linus Torvalds + Copyright (C) 1992 Graven Imagery + Copyright (C) 1995 Danny Gasparovski + Copyright (C) 1996-1999 Eduardo Horvath + Copyright (C) 1996 Paul Mackerras + Copyright (C) 1997-1999, 2001, 2006-2009 Red Hat Inc. + Copyright (C) 1998-1999 Philip Blundell + Copyright (C) 1998-2001, 2003, 2006 Thomas Sailer + Copyright (C) 1998, 2003-2008 Fabrice Bellard + Copyright (C) 1998-2004 Samuel Rydh + Copyright (C) 1998 Kenneth Albanowski + Copyright (C) 1998 The Silver Hammer Group Ltd. + Copyright (C) 1999-2000, 2002-2003 Maxim Krasnyansky + Copyright (C) 1999-2000 Tatsuyuki Satoh + Copyright (C) 1999-2006, 2008 Intel Corporation + Copyright (C) 1999 AT&T Laboratories Cambridge + Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2000-2002, 2004-2009 Axis Communications AB. + Copyright (C) 2000-2003, 2005 Martin Schwidefsky + Copyright (C) 2000-2003 David McCullough + Copyright (C) 2000-2005 All Rights Reserved. + Copyright (C) 2000-2005 DENX Software Engineering + Copyright (C) 2000-2005 Silicon Graphics Inc. + Copyright (C) 2000-2005 Wolfgang Denk + Copyright (C) 2000-2007 Tibor "TS" Schütz + Copyright (C) 2001 OKTET Ltd. + Copyright (C) 2001 Xilinx Inc. + Copyright (C) 2002-2005 Vassili Karpov + Copyright (C) 2002-2006 Marcel Holtmann + Copyright (C) 2002 Greg Ungerer + Copyright (C) 2002 Paul Dale + Copyright (C) 2003-2004 James Yonan + Copyright (C) 2003-2007 Jocelyn Mayer + Copyright (C) 2003 Damion K. Wilson + Copyright (C) 2003 Thomas M. Ogrisegg + Copyright (C) 2004-2005 Johannes E. Schindelin + Copyright (C) 2004, 2007 Magnus Damm + Copyright (C) 2004 Antony T Curtis + Copyright (C) 2004 Gianni Tedesco + Copyright (C) 2004 Johannes Schindelin + Copyright (C) 2004 Makoto Suzuki + Copyright (C) 2005, 2007 Alex Beregszaszi + Copyright (C) 2005-2007 Anthony Liguori + Copyright (C) 2005-2008 Andrzej Zaborowski + Copyright (C) 2005-2009 Paul Brook + Copyright (C) 2005 Anthony Liguori + Copyright (C) 2005 Filip Navara + Copyright (C) 2005 International Business Machines Corp. + Copyright (C) 2005 LLC. Written + Copyright (C) 2005 Mike Kronenberg + Copyright (C) 2005 Samuel Tardieu + Copyright (C) 2006-2007, 2009 Aurelien Jarno + Copyright (C) 2006-2007, 2009 Stefan Weil + Copyright (C) 2006-2007 Thiemo Seufer + Copyright (C) 2006-2007 Thorsten Zitterell + Copyright (C) 2006-2008 Openedhand Ltd. + Copyright (C) 2006-2008 Qumranet Technologies + Copyright (C) 2006 Frederick Reeve + Copyright (C) 2006 Igor Kovalenko + Copyright (C) 2006 InnoTek Systemberatung GmbH + Copyright (C) 2006 Joachim Henke + Copyright (C) 2006 Lonnie Mendez + Copyright (C) 2006 Marius Groeger + Copyright (C) 2007-2008 Bull S.A.S. + Copyright (C) 2007-2008 IBM Corporation + Copyright (C) 2007-2008 Lauro Ramos Venancio + Copyright (C) 2007-2008 Nokia Corporation + Copyright (C) 2007-2008 OpenMoko Inc. + Copyright (C) 2007, 2009 Alexander Graf + Copyright (C) 2007-2009 Edgar E. Iglesias + Copyright (C) 2007-2009 Herve Poussineau + Copyright (C) 2007 Arastra Inc. + Copyright (C) 2007 Armin Kuster + Copyright (C) 2007 Dan Aloni + Copyright (C) 2007 Marko Kohtala + Copyright (C) 2007 MontaVista Software Inc. + Copyright (C) 2007 Robert Reif + Copyright (C) 2007 Vladimir Ananiev + Copyright (C) 2008-2009 Arnaud Patard + Copyright (C) 2008-2009 Citrix Systems Inc. + Copyright (C) 2008-2009 Gerd Hoffmann + Copyright (C) 2008 Dell MessageOne + Copyright (C) 2008 Dmitry Baryshkov + Copyright (C) 2008 Gleb Natapov + Copyright (C) 2008 Jean-Christophe PLAGNIOL-VILLARD + Copyright (C) 2008 Lubomir Rintel + Copyright (C) 2008 Max Krasnyansky + Copyright (C) 2008 Paul Mundt + Copyright (C) 2008 Samuel Thibault + Copyright (C) 2008 Shin-ichiro KAWASAKI + Copyright (C) 2008 Takashi YOSHII + Copyright (C) 2008 TJ + Copyright (C) 2009 CodeSourcery + Copyright (C) 2009 Freescale Semiconductor Inc. + Copyright (C) 2009 Hewlett-Packard Development Company + Copyright (C) 2009 Isaku Yamahata + Copyright (C) 2009 Kevin Wolf + Copyright (C) 2009 Laurent Vivier + Copyright (C) 2009 Michael S. Tsirkin + Copyright (C) 2009 Novell Inc. + Copyright (C) 2009 Ulrich Hecht + Copyright (C) 2009 VA Linux Systems Japan +License: + QEMU as a whole is released under the GNU General Public License version 2. + On Debian systems, the complete text of the GNU General Public License + version 2 can be found in the file /usr/share/common-licenses/GPL-2. + . + Parts of QEMU have specific licenses which are compatible with the + GNU General Public License. Hence each source file contains its own + licensing information. + . + In particular, the QEMU virtual CPU core library (libqemu.a) is + released under the GNU Lesser General Public License version 2 or later. + On Debian systems, the complete text of the GNU Lesser General Public + License can be found in the file /usr/share/common-licenses/LGPL-2. + . + The BSD emulator is released under the following BSD license: + . + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + . + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + . + The TCG code and many hardware device emulation sources are released under the + following MIT license: + . + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + . + The QEMU Logo, pc-bios/qemu_logo*.svg, and also icons and pictures derived from + it, pc-bios/qemu-icon.bmp, pc-bios/qemu-nsis.*, are licensed under Creative + Commons Attribution license version 3.0 (CC-BY-3.0): + . + Attribution 3.0 Unported + . + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR + DAMAGES RESULTING FROM ITS USE. + . + License + . + THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE + COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY + COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS + AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + . + BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE + TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY + BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS + CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND + CONDITIONS. + . + 1. Definitions + . + a. "Adaptation" means a work based upon the Work, or upon the Work and + other pre-existing works, such as a translation, adaptation, + derivative work, arrangement of music or other alterations of a + literary or artistic work, or phonogram or performance and includes + cinematographic adaptations or any other form in which the Work may be + recast, transformed, or adapted including in any form recognizably + derived from the original, except that a work that constitutes a + Collection will not be considered an Adaptation for the purpose of + this License. For the avoidance of doubt, where the Work is a musical + work, performance or phonogram, the synchronization of the Work in + timed-relation with a moving image ("synching") will be considered an + Adaptation for the purpose of this License. + b. "Collection" means a collection of literary or artistic works, such as + encyclopedias and anthologies, or performances, phonograms or + broadcasts, or other works or subject matter other than works listed + in Section 1(f) below, which, by reason of the selection and + arrangement of their contents, constitute intellectual creations, in + which the Work is included in its entirety in unmodified form along + with one or more other contributions, each constituting separate and + independent works in themselves, which together are assembled into a + collective whole. A work that constitutes a Collection will not be + considered an Adaptation (as defined above) for the purposes of this + License. + c. "Distribute" means to make available to the public the original and + copies of the Work or Adaptation, as appropriate, through sale or + other transfer of ownership. + d. "Licensor" means the individual, individuals, entity or entities that + offer(s) the Work under the terms of this License. + e. "Original Author" means, in the case of a literary or artistic work, + the individual, individuals, entity or entities who created the Work + or if no individual or entity can be identified, the publisher; and in + addition (i) in the case of a performance the actors, singers, + musicians, dancers, and other persons who act, sing, deliver, declaim, + play in, interpret or otherwise perform literary or artistic works or + expressions of folklore; (ii) in the case of a phonogram the producer + being the person or legal entity who first fixes the sounds of a + performance or other sounds; and, (iii) in the case of broadcasts, the + organization that transmits the broadcast. + f. "Work" means the literary and/or artistic work offered under the terms + of this License including without limitation any production in the + literary, scientific and artistic domain, whatever may be the mode or + form of its expression including digital form, such as a book, + pamphlet and other writing; a lecture, address, sermon or other work + of the same nature; a dramatic or dramatico-musical work; a + choreographic work or entertainment in dumb show; a musical + composition with or without words; a cinematographic work to which are + assimilated works expressed by a process analogous to cinematography; + a work of drawing, painting, architecture, sculpture, engraving or + lithography; a photographic work to which are assimilated works + expressed by a process analogous to photography; a work of applied + art; an illustration, map, plan, sketch or three-dimensional work + relative to geography, topography, architecture or science; a + performance; a broadcast; a phonogram; a compilation of data to the + extent it is protected as a copyrightable work; or a work performed by + a variety or circus performer to the extent it is not otherwise + considered a literary or artistic work. + g. "You" means an individual or entity exercising rights under this + License who has not previously violated the terms of this License with + respect to the Work, or who has received express permission from the + Licensor to exercise rights under this License despite a previous + violation. + h. "Publicly Perform" means to perform public recitations of the Work and + to communicate to the public those public recitations, by any means or + process, including by wire or wireless means or public digital + performances; to make available to the public Works in such a way that + members of the public may access these Works from a place and at a + place individually chosen by them; to perform the Work to the public + by any means or process and the communication to the public of the + performances of the Work, including by public digital performance; to + broadcast and rebroadcast the Work by any means including signs, + sounds or images. + i. "Reproduce" means to make copies of the Work by any means including + without limitation by sound or visual recordings and the right of + fixation and reproducing fixations of the Work, including storage of a + protected performance or phonogram in digital form or other electronic + medium. + . + 2. Fair Dealing Rights. Nothing in this License is intended to reduce, + limit, or restrict any uses free from copyright or rights arising from + limitations or exceptions that are provided for in connection with the + copyright protection under copyright law or other applicable laws. + . + 3. License Grant. Subject to the terms and conditions of this License, + Licensor hereby grants You a worldwide, royalty-free, non-exclusive, + perpetual (for the duration of the applicable copyright) license to + exercise the rights in the Work as stated below: + . + a. to Reproduce the Work, to incorporate the Work into one or more + Collections, and to Reproduce the Work as incorporated in the + Collections; + b. to create and Reproduce Adaptations provided that any such Adaptation, + including any translation in any medium, takes reasonable steps to + clearly label, demarcate or otherwise identify that changes were made + to the original Work. For example, a translation could be marked "The + original work was translated from English to Spanish," or a + modification could indicate "The original work has been modified."; + c. to Distribute and Publicly Perform the Work including as incorporated + in Collections; and, + d. to Distribute and Publicly Perform Adaptations. + e. For the avoidance of doubt: + . + i. Non-waivable Compulsory License Schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme cannot be waived, the Licensor + reserves the exclusive right to collect such royalties for any + exercise by You of the rights granted under this License; + ii. Waivable Compulsory License Schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme can be waived, the Licensor waives the + exclusive right to collect such royalties for any exercise by You + of the rights granted under this License; and, + iii. Voluntary License Schemes. The Licensor waives the right to + collect royalties, whether individually or, in the event that the + Licensor is a member of a collecting society that administers + voluntary licensing schemes, via that society, from any exercise + by You of the rights granted under this License. + . + The above rights may be exercised in all media and formats whether now + known or hereafter devised. The above rights include the right to make + such modifications as are technically necessary to exercise the rights in + other media and formats. Subject to Section 8(f), all rights not expressly + granted by Licensor are hereby reserved. + . + 4. Restrictions. The license granted in Section 3 above is expressly made + subject to and limited by the following restrictions: + . + a. You may Distribute or Publicly Perform the Work only under the terms + of this License. You must include a copy of, or the Uniform Resource + Identifier (URI) for, this License with every copy of the Work You + Distribute or Publicly Perform. You may not offer or impose any terms + on the Work that restrict the terms of this License or the ability of + the recipient of the Work to exercise the rights granted to that + recipient under the terms of the License. You may not sublicense the + Work. You must keep intact all notices that refer to this License and + to the disclaimer of warranties with every copy of the Work You + Distribute or Publicly Perform. When You Distribute or Publicly + Perform the Work, You may not impose any effective technological + measures on the Work that restrict the ability of a recipient of the + Work from You to exercise the rights granted to that recipient under + the terms of the License. This Section 4(a) applies to the Work as + incorporated in a Collection, but this does not require the Collection + apart from the Work itself to be made subject to the terms of this + License. If You create a Collection, upon notice from any Licensor You + must, to the extent practicable, remove from the Collection any credit + as required by Section 4(b), as requested. If You create an + Adaptation, upon notice from any Licensor You must, to the extent + practicable, remove from the Adaptation any credit as required by + Section 4(b), as requested. + b. If You Distribute, or Publicly Perform the Work or any Adaptations or + Collections, You must, unless a request has been made pursuant to + Section 4(a), keep intact all copyright notices for the Work and + provide, reasonable to the medium or means You are utilizing: (i) the + name of the Original Author (or pseudonym, if applicable) if supplied, + and/or if the Original Author and/or Licensor designate another party + or parties (e.g., a sponsor institute, publishing entity, journal) for + attribution ("Attribution Parties") in Licensor's copyright notice, + terms of service or by other reasonable means, the name of such party + or parties; (ii) the title of the Work if supplied; (iii) to the + extent reasonably practicable, the URI, if any, that Licensor + specifies to be associated with the Work, unless such URI does not + refer to the copyright notice or licensing information for the Work; + and (iv) , consistent with Section 3(b), in the case of an Adaptation, + a credit identifying the use of the Work in the Adaptation (e.g., + "French translation of the Work by Original Author," or "Screenplay + based on original Work by Original Author"). The credit required by + this Section 4 (b) may be implemented in any reasonable manner; + provided, however, that in the case of a Adaptation or Collection, at + a minimum such credit will appear, if a credit for all contributing + authors of the Adaptation or Collection appears, then as part of these + credits and in a manner at least as prominent as the credits for the + other contributing authors. For the avoidance of doubt, You may only + use the credit required by this Section for the purpose of attribution + in the manner set out above and, by exercising Your rights under this + License, You may not implicitly or explicitly assert or imply any + connection with, sponsorship or endorsement by the Original Author, + Licensor and/or Attribution Parties, as appropriate, of You or Your + use of the Work, without the separate, express prior written + permission of the Original Author, Licensor and/or Attribution + Parties. + c. Except as otherwise agreed in writing by the Licensor or as may be + otherwise permitted by applicable law, if You Reproduce, Distribute or + Publicly Perform the Work either by itself or as part of any + Adaptations or Collections, You must not distort, mutilate, modify or + take other derogatory action in relation to the Work which would be + prejudicial to the Original Author's honor or reputation. Licensor + agrees that in those jurisdictions (e.g. Japan), in which any exercise + of the right granted in Section 3(b) of this License (the right to + make Adaptations) would be deemed to be a distortion, mutilation, + modification or other derogatory action prejudicial to the Original + Author's honor and reputation, the Licensor will waive or not assert, + as appropriate, this Section, to the fullest extent permitted by the + applicable national law, to enable You to reasonably exercise Your + right under Section 3(b) of this License (right to make Adaptations) + but not otherwise. + . + 5. Representations, Warranties and Disclaimer + . + UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR + OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY + KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, + INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, + FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF + LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, + WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION + OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + . + 6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE + LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR + ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES + ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS + BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + . + 7. Termination + . + a. This License and the rights granted hereunder will terminate + automatically upon any breach by You of the terms of this License. + Individuals or entities who have received Adaptations or Collections + from You under this License, however, will not have their licenses + terminated provided such individuals or entities remain in full + compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will + survive any termination of this License. + b. Subject to the above terms and conditions, the license granted here is + perpetual (for the duration of the applicable copyright in the Work). + Notwithstanding the above, Licensor reserves the right to release the + Work under different license terms or to stop distributing the Work at + any time; provided, however that any such election will not serve to + withdraw this License (or any other license that has been, or is + required to be, granted under the terms of this License), and this + License will continue in full force and effect unless terminated as + stated above. + . + 8. Miscellaneous + . + a. Each time You Distribute or Publicly Perform the Work or a Collection, + the Licensor offers to the recipient a license to the Work on the same + terms and conditions as the license granted to You under this License. + b. Each time You Distribute or Publicly Perform an Adaptation, Licensor + offers to the recipient a license to the original Work on the same + terms and conditions as the license granted to You under this License. + c. If any provision of this License is invalid or unenforceable under + applicable law, it shall not affect the validity or enforceability of + the remainder of the terms of this License, and without further action + by the parties to this agreement, such provision shall be reformed to + the minimum extent necessary to make such provision valid and + enforceable. + d. No term or provision of this License shall be deemed waived and no + breach consented to unless such waiver or consent shall be in writing + and signed by the party to be charged with such waiver or consent. + e. This License constitutes the entire agreement between the parties with + respect to the Work licensed here. There are no understandings, + agreements or representations with respect to the Work not specified + here. Licensor shall not be bound by any additional provisions that + may appear in any communication from You. This License may not be + modified without the mutual written agreement of the Licensor and You. + f. The rights granted under, and the subject matter referenced, in this + License were drafted utilizing the terminology of the Berne Convention + for the Protection of Literary and Artistic Works (as amended on + September 28, 1979), the Rome Convention of 1961, the WIPO Copyright + Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 + and the Universal Copyright Convention (as revised on July 24, 1971). + These rights and subject matter take effect in the relevant + jurisdiction in which the License terms are sought to be enforced + according to the corresponding provisions of the implementation of + those treaty provisions in the applicable national law. If the + standard suite of rights granted under applicable copyright law + includes additional rights not granted under this License, such + additional rights are deemed to be included in the License; this + License is not intended to restrict the license of any rights under + applicable law. + . + Creative Commons Notice + . + Creative Commons is not a party to this License, and makes no warranty + whatsoever in connection with the Work. Creative Commons will not be + liable to You or any party on any legal theory for any damages + whatsoever, including without limitation any general, special, + incidental or consequential damages arising in connection to this + license. Notwithstanding the foregoing two (2) sentences, if Creative + Commons has expressly identified itself as the Licensor hereunder, it + shall have all rights and obligations of Licensor. + . + Except for the limited purpose of indicating to the public that the + Work is licensed under the CCPL, Creative Commons does not authorize + the use by either party of the trademark "Creative Commons" or any + related trademark or logo of Creative Commons without the prior + written consent of Creative Commons. Any permitted use will be in + compliance with Creative Commons' then-current trademark usage + guidelines, as may be published on its website or otherwise made + available upon request from time to time. For the avoidance of doubt, + this trademark restriction does not form part of this License. + . + Creative Commons may be contacted at https://creativecommons.org/. diff --git a/debian/extract-config-opts b/debian/extract-config-opts new file mode 100755 index 00000000000..b75d63b8b84 --- /dev/null +++ b/debian/extract-config-opts @@ -0,0 +1,22 @@ +#! /bin/sh + +# This is a trivial script to parse comments in debian/control +# into a set of system-specific configure options. +# Usage: ./debian/extract-config-opts $OS-$ARCH debian/control + +osarch=$1 +control=$2 + +sed -n 's/^# \?--/--/p' $control | \ +while IFS=' ' read f p x; do + set -- $p + if [ $# = 0 ]; then echo $f; continue; fi + for p in "$@"; do + case "$p" in + *-any) p="${p%-any}-*" ;; + *-*) ;; + *) p="linux-$p" ;; + esac + eval "case \$osarch in ($p) echo \$f; continue;; esac" + done +done diff --git a/debian/gbp.conf b/debian/gbp.conf new file mode 100644 index 00000000000..b185b618e50 --- /dev/null +++ b/debian/gbp.conf @@ -0,0 +1,4 @@ +[DEFAULT] +sign-tags = True +pristine-tar = True +upstream-branch = upstream-8.2 diff --git a/debian/kvm-spice b/debian/kvm-spice new file mode 100755 index 00000000000..55f910d145b --- /dev/null +++ b/debian/kvm-spice @@ -0,0 +1,10 @@ +#! /bin/sh +echo "$0: W: this is an old compat wrapper script for kvm-spice" >&2 +echo "$0: W: please use qemu-system-x86_64 instead of $0" >&2 + +if echo "$@" | grep -q -E -e '(^|\s)-machine\s.*accel=' -e '(^|\s)-accel\s'; then + # acceleration already set via commandline option - adding -enable-kvm would conflic" + exec qemu-system-x86_64 "$@" +else + exec qemu-system-x86_64 -enable-kvm "$@" +fi diff --git a/debian/kvm-spice.1 b/debian/kvm-spice.1 new file mode 100644 index 00000000000..896daab20e9 --- /dev/null +++ b/debian/kvm-spice.1 @@ -0,0 +1,17 @@ +.TH kvm-spice 1 2020-07 "5.0" Ubuntu +.SH NAME +kvm-spice, qemu-system-x86_64-spice \- compatibility names for qemu-system-x86_64 +.SH DESCRIPTION +The two names are aliases for +.BR qemu-system-x86_64 , +where kvm-spice enables kvm native hardware mode by default. +These names are kept for backward compatibility +wih old package, when spice-enabled qemu were packaged +separately. Now main qemu-system has spice functionality +built in. Please use +.B qemu-system-x86_64 +instead of the old compat names. +.SH SEE ALSO +.BR qemu-system-x86_64 (1). +.SH AUTHOR +This manual page was written by Michael Tokarev . diff --git a/debian/kvm.1 b/debian/kvm.1 new file mode 100644 index 00000000000..0b272026f11 --- /dev/null +++ b/debian/kvm.1 @@ -0,0 +1,20 @@ +.TH kvm 1 2020-07 "5.0" Debian +.SH NAME +kvm \- kvm-enabling link for qemu-system-@ARCH@ +.SH DESCRIPTION +When executed as +.BR kvm , +qemu-system is run with preference to native hardware-based +virtualization, instead of relying solely on emulation. +Essentially +.B kvm +is equivalent to +.B qemu-system-@ARCH@ +.I -machine accel=kvm:tcg +so when the host CPU support the kvm mode, native hardware +virtualization is enabled, or qemu-system-@ARCH@ falls back +to the emulation (TCG) mode. +.SH SEE ALSO +.BR qemu-system-@ARCH@ (1). +.SH AUTHOR +This manual page was written by Michael Tokarev . diff --git a/debian/microvm-devices.mak b/debian/microvm-devices.mak new file mode 100644 index 00000000000..0e115f4dc1d --- /dev/null +++ b/debian/microvm-devices.mak @@ -0,0 +1,19 @@ +# see configs/devices/i386-softmmu/default.mak +# for additional devices which can be disabled +# +CONFIG_PCI_DEVICES=n + +CONFIG_MICROVM=y + +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_SERIAL=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_INPUT_HOST=y +CONFIG_VHOST_USER_INPUT=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_SCSI=y +CONFIG_VIRTIO_RNG=y +CONFIG_VIRTIO_CRYPTO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_GPU=y +CONFIG_VHOST_USER_GPU=y diff --git a/debian/not-installed b/debian/not-installed new file mode 100644 index 00000000000..bda49a1ff50 --- /dev/null +++ b/debian/not-installed @@ -0,0 +1,21 @@ +usr/include/qemu-plugin.h +usr/share/doc/qemu/_static +usr/share/doc/qemu/about +usr/share/doc/qemu/devel +usr/share/doc/qemu/interop +usr/share/doc/qemu/specs +usr/share/doc/qemu/tools +usr/share/doc/qemu/*.* +usr/share/doc/qemu/.buildinfo +usr/bin/elf2dmp +# test tool +usr/bin/qemu-edid +# we install these files in d/rules into arch-all qemu-system-data +usr/share/icons +usr/share/applications/qemu.desktop +usr/share/qemu/keymaps +# these 2 are installed even if no user or system build is requested +usr/share/doc/qemu/user/index.html +usr/share/doc/qemu/user/main.html +# VMware-specific helper, not needed for NVIDIA QEMU +usr/bin/qemu-vmsr-helper diff --git a/debian/optionrom.mak b/debian/optionrom.mak new file mode 100644 index 00000000000..27dc7249a8d --- /dev/null +++ b/debian/optionrom.mak @@ -0,0 +1,31 @@ +LD = ld +OBJCOPY = objcopy +CC = cc +CFLAGS = -O2 -m16 -Wa,-32 -march=i486 \ + -ffreestanding -fno-stack-protector -fno-pie \ + -I${SRC_PATH}/include +VPATH = ${SRC_PATH}/pc-bios/optionrom + +BINS = kvmvapic.bin linuxboot.bin linuxboot_dma.bin \ + multiboot.bin multiboot_dma.bin pvh.bin +all: ${BINS} + +%.o: %.S + ${CC} ${CFLAGS} -c -o $@ $< +%.o: %.c + ${CC} ${CFLAGS} -c -o $@ $< +%.img: %.o + ${LD} -m elf_i386 -T ${SRC_PATH}/pc-bios/optionrom/flat.lds -s -o $@ $^ +pvh.img: pvh.o pvh_main.o +%.raw: %.img + ${OBJCOPY} -O binary -j .text $< $@ +%.bin: %.raw + python3 ${SRC_PATH}/scripts/signrom.py $< $@ + +clean: + rm -f ${BINS} + +install: ${BINS} + install -m 0644 -t "${DESTDIR}" ${BINS} + +.PHONY: all clean install diff --git a/debian/patches/linux-user-binfmt-P.diff b/debian/patches/linux-user-binfmt-P.diff new file mode 100644 index 00000000000..66fde6116b0 --- /dev/null +++ b/debian/patches/linux-user-binfmt-P.diff @@ -0,0 +1,92 @@ +Subject: [PATCH, HACK]: linux-user: handle binfmt-misc P flag as a separate exe name +From: Michael Tokarev +Date: Sat, 13 Feb 2021 13:57:52 +0300 +Updated: Wed, 31 Aug 2022 12:30:17 +0300 +Forwarded: https://patchwork.ozlabs.org/project/qemu-devel/patch/27dfe8eb-adce-8db4-f28b-c42858b086db@msgid.tls.msk.ru/ +Upstream-Status: rejected this simple userspace solution in favour of more complex in-kernel one + +A hackish way to distinguish the case when qemu-user binary is executed +using in-kernel binfmt-misc subsystem with P flag (preserve argv). +We register binfmt interpreter under name /usr/libexec/qemu-binfmt/qemu-foo-binfmt-P +(which is just a symlink to ../../bin/qemu-foo), and if run like that, +qemu-user binary will "know" it should interpret argv[1] & argv[2] +in a special way. + +diff --git a/linux-user/main.c b/linux-user/main.c +index e44bdb17b8..587bd02db2 100644 +--- a/linux-user/main.c ++++ b/linux-user/main.c +@@ -562,7 +562,7 @@ static void usage(int exitcode) + exit(exitcode); + } + +-static int parse_args(int argc, char **argv) ++static int parse_args(int argc, char **argv, bool *preserve_argv0) + { + const char *r; + int optind; +@@ -579,6 +579,28 @@ static int parse_args(int argc, char **argv) + } + } + ++ /* HACK alert. ++ * when run as an interpreter using kernel's binfmt-misc mechanism, ++ * we have to know where are we (our own binary), where's the binary being run, ++ * and what it's argv[0] element. ++ * Only with the P interpreter flag kernel passes all 3 elements as first 3 argv[], ++ * but we can't distinguish if we were run with or without this P flag. ++ * So we register a special name with binfmt-misc system, a name which ends up ++ * in "-binfmt-P", and if our argv[0] ends up with that, we assume we were run ++ * from kernel's binfmt with P flag and our first 3 args are from kernel. ++ */ ++ if (strlen(argv[0]) > sizeof("binfmt-P") && ++ strcmp(argv[0] + strlen(argv[0]) - sizeof("binfmt-P"), "-binfmt-P") == 0) { ++ if (argc < 3) { ++ (void) fprintf(stderr, "qemu: %s has to be run using kernel binfmt-misc subsystem\n", argv[0]); ++ exit(EXIT_FAILURE); ++ } ++ exec_path = argv[1]; ++ handle_arg_argv0(argv[2]); ++ *preserve_argv0 = true; ++ return 2; ++ } ++ + optind = 1; + for (;;) { + if (optind >= argc) { +@@ -648,7 +670,7 @@ int main(int argc, char **argv, char **envp) + int ret; + int execfd; + unsigned long max_reserved_va; +- bool preserve_argv0; ++ bool preserve_argv0 = 0; + + error_init(argv[0]); + module_call_init(MODULE_INIT_TRACE); +@@ -678,7 +700,7 @@ int main(int argc, char **argv, char **envp) + qemu_add_opts(&qemu_trace_opts); + qemu_plugin_add_opts(); + +- optind = parse_args(argc, argv); ++ optind = parse_args(argc, argv, &preserve_argv0); + + qemu_set_log_filename_flags(last_log_filename, + last_log_mask | (enable_strace * LOG_STRACE), +@@ -717,7 +739,9 @@ int main(int argc, char **argv, char **envp) + + /* + * get binfmt_misc flags ++ * but only if not already done by parse_args() above + */ ++ if (!preserve_argv0) { + preserve_argv0 = !!(qemu_getauxval(AT_FLAGS) & AT_FLAGS_PRESERVE_ARGV0); + + /* +@@ -728,6 +752,7 @@ int main(int argc, char **argv, char **envp) + if (optind + 1 < argc && preserve_argv0) { + optind++; + } ++ } + + if (cpu_model == NULL) { + cpu_model = cpu_get_model(get_elf_eflags(execfd)); diff --git a/debian/patches/note-missing-module-pkg-name.diff b/debian/patches/note-missing-module-pkg-name.diff new file mode 100644 index 00000000000..4ba7ac40841 --- /dev/null +++ b/debian/patches/note-missing-module-pkg-name.diff @@ -0,0 +1,97 @@ +Date: Sun, 22 Aug 2021 15:16:25 +0300 +Updated: Wed, 03 Jan 2024 14:12:39 +0300 +From: Michael Tokarev +Subject: Note missing module package name +Debian-Specific: yes +Forwarded: not-needed + +Debian ships different modules in different packages. +By default qemu ignores the fact that it can not load +a module, pretending this module never existed. +Give a useful hint about the package where the module +in question resides. + +This is a hack, but it makes qemu packaged in debian +more user-friendly. + +diff --git a/audio/audio.c b/audio/audio.c +index 8d1e4ad922..15f1071bdc 100644 +--- a/audio/audio.c ++++ b/audio/audio.c +@@ -1745,5 +1745,9 @@ static AudioState *audio_init(Audiodev *dev, Error **errp) + done = !audio_driver_init(s, driver, dev, errp); + } else { +- error_setg(errp, "Unknown audio driver `%s'", drvname); ++ error_setg(errp, "Unknown audio driver `%s'. Perhaps you want to install %s package?", ++ drvname, ++ !strcmp(drvname, "spice") ? "qemu-system-modules-spice" : ++ !strcmp(drvname, "dbus") ? "qemu-system-modules-opengl" : ++ "qemu-system-gui"); + } + if (!done) { +diff --git a/block.c b/block.c +index a097772238..d5e7cb19bb 100644 +--- a/block.c ++++ b/block.c +@@ -476,4 +476,5 @@ BlockDriver *bdrv_find_format(const char *format_name) + error_report_err(local_err); + } ++ else error_report("Unable to load block driver %s. Perhaps you want to install qemu-block-extra package?", block_driver_modules[i].library_name); + break; + } +@@ -966,5 +967,8 @@ BlockDriver *bdrv_find_protocol(const char *filename, + return NULL; + } +- break; ++ else { ++ error_setg(errp, "Unable to load block driver %s. Perhaps you want to install qemu-block-extra package?", block_driver_modules[i].library_name); ++ return NULL; ++ } + } + } +diff --git a/system/vl.c b/system/vl.c +index 6b87bfa32c..dfa7f6a228 100644 +--- a/system/vl.c ++++ b/system/vl.c +@@ -1049,5 +1049,7 @@ static void select_vgahw(const MachineClass *machine_class, const char *p) + if (ti->opt_name && strstart(p, ti->opt_name, &opts)) { + if (!vga_interface_available(t)) { +- error_report("%s not available", ti->name); ++ error_report("%s not available. Perhaps you want to install %s package?", ti->name, ++ /* qxl is in spice, some are in opengl, the some are in common */ ++ !strcmp(ti->opt_name, "qxl") ? "qemu-system-modules-spice" : "qemu-system-modules-opengl"); + exit(1); + } +@@ -3496,5 +3498,10 @@ void qemu_init(int argc, char **argv) + #ifdef CONFIG_SPICE + case QEMU_OPTION_spice: +- opts = qemu_opts_parse_noisily(qemu_find_opts("spice"), optarg, false); ++ olist = qemu_find_opts("spice"); ++ if (!olist) { ++ error_report("Perhaps you want to install qemu-system-modules-spice package?"); ++ exit(1); ++ } ++ opts = qemu_opts_parse_noisily(olist, optarg, false); + if (!opts) { + exit(1); +diff --git a/ui/console.c b/ui/console.c +index 7db921e3b7..07a473a385 100644 +--- a/ui/console.c ++++ b/ui/console.c +@@ -1644,5 +1644,4 @@ bool qemu_display_find_default(DisplayOptions *opts) + return false; + } +- + void qemu_display_early_init(DisplayOptions *opts) + { +@@ -1659,6 +1658,8 @@ void qemu_display_early_init(DisplayOptions *opts) + } + if (dpys[opts->type] == NULL) { +- error_report("Display '%s' is not available.", +- DisplayType_str(opts->type)); ++ error_report("Display '%s' is not available. Perhaps you want to install %s package?", ++ DisplayType_str(opts->type), ++ (!strcmp(DisplayType_str(opts->type), "dbus") || !strcmp(DisplayType_str(opts->type), "egl-headless")) ++ ? "qemu-system-modules-opengl" : "qemu-system-gui"); + exit(1); + } diff --git a/debian/patches/openbios-array-bounds.diff b/debian/patches/openbios-array-bounds.diff new file mode 100644 index 00000000000..f2ae71b99d8 --- /dev/null +++ b/debian/patches/openbios-array-bounds.diff @@ -0,0 +1,14 @@ +From: Michael Tokarev +Subject: openbios: drivers/usb.c: add pragma -Warray-bounds to work around gcc false positive + +diff --git a/roms/openbios/drivers/usb.c b/roms/openbios/drivers/usb.c +index 88b7580..03f6ebb 100644 +--- a/roms/openbios/drivers/usb.c ++++ b/roms/openbios/drivers/usb.c +@@ -32,4 +32,6 @@ + */ + ++#pragma GCC diagnostic warning "-Warray-bounds" ++ + #include "config.h" + #include "drivers/usb.h" diff --git a/tests/migration/guestperf/__init__.py b/debian/patches/series similarity index 100% rename from tests/migration/guestperf/__init__.py rename to debian/patches/series diff --git a/debian/qemu-block-extra.install b/debian/qemu-block-extra.install new file mode 100644 index 00000000000..d8e0569358e --- /dev/null +++ b/debian/qemu-block-extra.install @@ -0,0 +1 @@ +usr/lib/${DEB_HOST_MULTIARCH}/qemu/block-*.so diff --git a/debian/qemu-block-extra.postinst b/debian/qemu-block-extra.postinst new file mode 100644 index 00000000000..9108c498aaf --- /dev/null +++ b/debian/qemu-block-extra.postinst @@ -0,0 +1,59 @@ +#!/bin/sh +# postinst script for qemu-block-extra +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +# dh_installsystemd will not fully enable or start .mount units, but we want +# this to be ready after install. We want to avoid forcing users to fail once +# only to realize they need to activate anything. +if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then + if [ -d /run/systemd/system ]; then + # enabling on install will not work through deb-systemd-helper + # It will do all the rest like d-s-h state files and such, but + # not enable (= symlink) it. Therefore on new install or + # upgrade from a former version enable it once. + if dpkg --compare-versions "$2" lt "1:6.0+dfsg-1~ubuntu1~"; then + systemctl enable 'run-qemu.mount' >/dev/null || true + fi + fi + # start it on configure + if [ -d /run/systemd/system ]; then + systemctl --system daemon-reload >/dev/null || true + deb-systemd-invoke start 'run-qemu.mount' >/dev/null || true + fi +fi + +exit 0 diff --git a/debian/qemu-block-extra.run-qemu.mount b/debian/qemu-block-extra.run-qemu.mount new file mode 100644 index 00000000000..6ab120a8201 --- /dev/null +++ b/debian/qemu-block-extra.run-qemu.mount @@ -0,0 +1,14 @@ +[Unit] +Description=Prepare /run/qemu to allow still running qemu binaries of former builds (after package upgrades) to fallback-load modules from there +Before=libvirtd.service + +[Mount] +What=tmpfs +Where=/run/qemu +Type=tmpfs +Options=nosuid,nodev,mode=0755 +ReadWriteOnly=true +LazyUnmount=yes + +[Install] +WantedBy=multi-user.target diff --git a/debian/qemu-debootstrap b/debian/qemu-debootstrap new file mode 100755 index 00000000000..399e736c86c --- /dev/null +++ b/debian/qemu-debootstrap @@ -0,0 +1,15 @@ +#!/bin/sh +# it was qemu-debootstrap - setup qemu syscall emulation in a debootstrap chroot +# since kernel binfmt-misc support F flag for the interpreter and we use it, +# there is no need to copy qemu-user binfmt interpreter binary to the chroot, +# so regular debootstrap can be used just fine without --foreign, since all +# commands inside the chroot will just run using qemu from binfmt-misc subsystem. + +if ! command -v debootstrap >/dev/null; then + echo "E: debootstrap isn't found in \$PATH, is debootstrap package installed?" >&2 + exit 1 +fi + +echo "W: qemu-debootstrap is deprecated. Please use regular debootstrap directly" >&2 +echo "I: Running command: debootstrap $*" >&2 +exec debootstrap "$@" diff --git a/debian/qemu-debootstrap.1 b/debian/qemu-debootstrap.1 new file mode 100644 index 00000000000..8e76a1b4825 --- /dev/null +++ b/debian/qemu-debootstrap.1 @@ -0,0 +1,19 @@ +.TH qemu\-debootstrap 1 2011-07-02 "0.14.1+dfsg" Debian +.SH NAME +qemu\-debootstrap \- QEMU debootstrap wrapper +.SH SYNOPSIS +.B qemu\-debootstrap +.RI [ options ] +.SH DESCRIPTION +The +.B qemu\-debootstrap +wrapper calls +.BR debootstrap (8) +making use of the \-\-foreign and \-\-second-stage options, and copies the appropriate +.BR qemu\-user\-static (1) +binary into place in order to install cross-architecture chroots. In order for it to work seamlessly, the binfmt-support package must be installed. +.SH SEE ALSO +.BR debootstrap (8), +.BR qemu\-user\-static (1). +.SH AUTHOR +This manual page was written by Vagrant Cascadian . diff --git a/debian/qemu-guest-agent.dirs b/debian/qemu-guest-agent.dirs new file mode 100644 index 00000000000..47e91b182d9 --- /dev/null +++ b/debian/qemu-guest-agent.dirs @@ -0,0 +1 @@ +/etc/qemu/fsfreeze-hook.d diff --git a/debian/qemu-guest-agent.init b/debian/qemu-guest-agent.init new file mode 100644 index 00000000000..e142478056d --- /dev/null +++ b/debian/qemu-guest-agent.init @@ -0,0 +1,131 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: qemu-guest-agent +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: QEMU Guest Agent startup script +# Description: Start the QEMU Guest Agent if we're running +# in a QEMU virtual machine +### END INIT INFO + +# Author: Michael Tokarev + +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="QEMU Guest Agent" +NAME=qemu-ga +DAEMON=/usr/sbin/$NAME +PIDFILE=/var/run/$NAME.pid + +# config +DAEMON_ARGS="" +# default transport +TRANSPORT=virtio-serial:/dev/virtio-ports/org.qemu.guest_agent.0 + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/qemu-guest-agent ] && . /etc/default/qemu-guest-agent + +# Load the VERBOSE setting and other rcS variables +. /lib/init/vars.sh + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.2-14) to ensure that this file is present +# and status_of_proc is working. +. /lib/lsb/init-functions + +# +# Function that checks whenever system has necessary environment +# It also splits $TRANSPORT into $method and $path +# +do_check_transport() { + method=${TRANSPORT%%:*}; path=${TRANSPORT#*:} + case "$method" in + virtio-serial | isa-serial) + if [ ! -e "$path" ]; then + log_warning_msg "$NAME: transport endpoint not found, not starting" + return 1 + fi + ;; + esac +} + +# +# Function that starts the daemon/service +# +do_start() +{ + # Return + # 0 if daemon has been started + # 1 if daemon was already running + # 2 if daemon could not be started + start-stop-daemon -Sq -p $PIDFILE -x $DAEMON --test > /dev/null \ + || return 1 + start-stop-daemon -Sq -p $PIDFILE -x $DAEMON -- --daemonize \ + $DAEMON_ARGS -m "$method" -p "$path" \ + || return 2 +} + +# +# Function that stops the daemon/service +# +do_stop() +{ + # Return + # 0 if daemon has been stopped + # 1 if daemon was already stopped + # 2 if daemon could not be stopped + # other if a failure occurred + start-stop-daemon -Kq --retry=TERM/30/KILL/5 -p $PIDFILE --name $NAME +} + +case "$1" in + start) + do_check_transport || exit 0 + [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" $NAME + do_start + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + stop) + [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" $NAME + do_stop + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + status) + status_of_proc "$DAEMON" $NAME && exit 0 || exit $? + ;; + restart|force-reload) # we do not support reload + do_check_transport || exit 0 + log_daemon_msg "Restarting $DESC" $NAME + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) + # Failed to stop + log_end_msg 1 + ;; + esac + ;; + *) + echo "Usage: /etc/init.d/qemu-guest-agent {start|stop|status|restart|force-reload}" >&2 + exit 3 + ;; +esac + +: diff --git a/debian/qemu-guest-agent.install b/debian/qemu-guest-agent.install new file mode 100644 index 00000000000..d48def15bde --- /dev/null +++ b/debian/qemu-guest-agent.install @@ -0,0 +1,8 @@ +# from source tree +qga/qapi-schema.json /usr/share/doc/qemu-guest-agent/ +scripts/qemu-guest-agent/fsfreeze-hook /etc/qemu/ + +# from staging area +usr/bin/qemu-ga /usr/sbin/ +usr/share/man/man8/qemu-ga.8 /usr/share/man/man8/ +usr/share/man/man7/qemu-ga-ref.7 /usr/share/man/man7/ diff --git a/debian/qemu-guest-agent.postinst b/debian/qemu-guest-agent.postinst new file mode 100644 index 00000000000..e479a0d21a7 --- /dev/null +++ b/debian/qemu-guest-agent.postinst @@ -0,0 +1,59 @@ +#!/bin/sh +# postinst script for qemu-guest-agent +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-remove' +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + configure) + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +# Normal mv_conffile alone would fail due to the new path being a DIR in the old package version (LP: 1820291) +case "$1" in + configure) + # From /usr/bin/dpkg-maintscript-helper modified to be able to cope with this edge case + if [ -n "$2" ] && dpkg --compare-versions -- "$2" le-nl "1:3.1+dfsg-7~"; then + TMPCONFFILE="/etc/qemu/fsfreeze-hook.old" + NEWCONFFILE="/etc/qemu/fsfreeze-hook" + ORIGCONFFILE="/etc/qemu/fsfreeze-hook/fsfreeze-hook" + rm -f "$TMPCONFFILE.dpkg-remove" + if [ -e "$TMPCONFFILE" ]; then + echo "Preserving user changes to $NEWCONFFILE (renamed from $ORIGCONFFILE)..." + if [ -e "$NEWCONFFILE" ]; then + mv -f "$NEWCONFFILE" "$NEWCONFFILE.dpkg-new" + fi + mv -f "$TMPCONFFILE" "$NEWCONFFILE" + fi + fi + ;; +esac + +exit 0 diff --git a/debian/qemu-guest-agent.postrm b/debian/qemu-guest-agent.postrm new file mode 100644 index 00000000000..525e3cbfe16 --- /dev/null +++ b/debian/qemu-guest-agent.postrm @@ -0,0 +1,56 @@ +#!/bin/sh +# postrm script for qemu-guest-agent +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `remove' +# * `purge' +# * `upgrade' +# * `failed-upgrade' +# * `abort-install' +# * `abort-install' +# * `abort-upgrade' +# * `disappear' +# +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) + ;; + + *) + echo "postrm called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +# If needed revert the move we have made in preinst to compensate the new path being a DIR in the old package version (LP: 1820291) +case "$1" in + abort-install|abort-upgrade) + # From /usr/bin/dpkg-maintscript-helper modified to be able to cope with this edge case + if [ -n "$2" ] && dpkg --compare-versions -- "$2" le-nl "1:3.1+dfsg-7~"; then + TMPCONFFILE="/etc/qemu/fsfreeze-hook.old" + NEWCONFFILE="/etc/qemu/fsfreeze-hook" + ORIGCONFFILE="/etc/qemu/fsfreeze-hook/fsfreeze-hook" + if [ -e "$TMPCONFFILE.dpkg-remove" ]; then + echo "Reinstalling $ORIGCONFFILE that was moved away" + if [ -f "$NEWCONFFILE" ]; then + rm -f "$NEWCONFFILE" + fi + mkdir -p "$NEWCONFFILE" + mv "$TMPCONFFILE.dpkg-remove" "$ORIGCONFFILE" + fi + fi +esac + +exit 0 diff --git a/debian/qemu-guest-agent.preinst b/debian/qemu-guest-agent.preinst new file mode 100644 index 00000000000..f5b4cc7de97 --- /dev/null +++ b/debian/qemu-guest-agent.preinst @@ -0,0 +1,62 @@ +#!/bin/sh +# preinst script for qemu-guest-agent +# +# see: dh_installdeb(1) + +set -e + +# summary of how this script can be called: +# * `install' +# * `install' +# * `upgrade' +# * `abort-upgrade' +# for details, see https://www.debian.org/doc/debian-policy/ or +# the debian-policy package + + +case "$1" in + install|upgrade) + ;; + + abort-upgrade) + ;; + + *) + echo "preinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +# Normal mv_conffile alone would fail due to the new path being a DIR in the old package version (LP: 1820291) +case "$1" in + install|upgrade) + # From /usr/bin/dpkg-maintscript-helper modified to be able to cope with this edge case + if [ -n "$2" ] && dpkg --compare-versions -- "$2" le-nl "1:3.1+dfsg-7~"; then + TMPCONFFILE="/etc/qemu/fsfreeze-hook.old" + NEWCONFFILE="/etc/qemu/fsfreeze-hook" + ORIGCONFFILE="/etc/qemu/fsfreeze-hook/fsfreeze-hook" + if [ -f "$ORIGCONFFILE" ]; then + disk_md5sum="$(md5sum "$ORIGCONFFILE" | sed -e 's/ .*//')" + pkg_md5sum="$(dpkg-query -W -f='${Conffiles}' "qemu-guest-agent" | \ + sed -n -e "\'^ $ORIGCONFFILE ' { s/ obsolete$//; s/.* //; p }")" + if [ "$disk_md5sum" = "$pkg_md5sum" ]; then + # mark as having no custom content + mv -f "$ORIGCONFFILE" "${TMPCONFFILE}.dpkg-remove" + else + # keep the "old" name to reflect there is content to be preserved + mv -f "$ORIGCONFFILE" "$TMPCONFFILE" + fi + # In any case the old directory blocking the new conffile + # has to be removed before unpack happens + rmdir "$NEWCONFFILE" || echo "failed to remove $NEWCONFFILE" + fi + fi + ;; +esac + +#DEBHELPER# + +exit 0 diff --git a/debian/qemu-guest-agent.service b/debian/qemu-guest-agent.service new file mode 100644 index 00000000000..88ef9c7e5f5 --- /dev/null +++ b/debian/qemu-guest-agent.service @@ -0,0 +1,11 @@ +[Unit] +Description=QEMU Guest Agent +BindsTo=dev-virtio\x2dports-org.qemu.guest_agent.0.device +After=dev-virtio\x2dports-org.qemu.guest_agent.0.device + +[Service] +ExecStart=-/usr/sbin/qemu-ga +Restart=always +RestartSec=0 + +[Install] diff --git a/debian/qemu-guest-agent.udev b/debian/qemu-guest-agent.udev new file mode 100644 index 00000000000..47097057e3a --- /dev/null +++ b/debian/qemu-guest-agent.udev @@ -0,0 +1,2 @@ +SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", \ + TAG+="systemd", ENV{SYSTEMD_WANTS}="qemu-guest-agent.service" diff --git a/debian/qemu-ifdown b/debian/qemu-ifdown new file mode 100755 index 00000000000..b2dc471f048 --- /dev/null +++ b/debian/qemu-ifdown @@ -0,0 +1,6 @@ +#! /bin/sh +# Script to shut down a network (tap) device for qemu. +# Initially this script is empty, but you can configure, +# for example, accounting info here. + +: diff --git a/debian/qemu-ifup b/debian/qemu-ifup new file mode 100755 index 00000000000..76b68ea5777 --- /dev/null +++ b/debian/qemu-ifup @@ -0,0 +1,41 @@ +#! /bin/sh +# Script to bring a network (tap) device for qemu up. +# The idea is to add the tap device to the same bridge +# as we have default routing to. + +# in order to be able to find brctl +PATH=$PATH:/sbin:/usr/sbin +ip=$(command -v ip) + +if [ -n "$ip" ]; then + ip link set "$1" up +else + brctl=$(command -v brctl) + if [ -z "$ip$brctl" ]; then + echo "W: $0: not doing any bridge processing: neither ip nor brctl utility not found" >&2 + exit 0 + fi + ifconfig "$1" 0.0.0.0 up +fi + +switch=$(ip route ls | \ + awk '/^default / { + for(i=0;i&2 diff --git a/debian/qemu-io.1 b/debian/qemu-io.1 new file mode 100644 index 00000000000..cabd24e3016 --- /dev/null +++ b/debian/qemu-io.1 @@ -0,0 +1,43 @@ +.TH QEMU-IO 1 "December 18, 2011" +.SH NAME +qemu-io \- QEMU Disk exerciser +.SH SYNOPSIS +.B qemu-io [-h] [-V] [-rsnm] [-c cmd] ... [file] +.SH DESCRIPTION +qemu-io is a command line utility to exercise the QEMU I/O path. +.SH OPTIONS +.TP +.B \-c,\ \-\-cmd\ +Command to execute +.TP +.B \-r,\ \-\-read-only +Export read-only +.TP +.B \-s,\ \-\-snapshot +Use snapshot file +.TP +.B \-n,\ \-\-nocache +Disable host cache +.TP +.B \-g,\ \-\-growable +Allow file to grow (only applies to protocols) +.TP +.B \-m,\ \-\-misalign +Misalign allocations for O_DIRECT +.TP +.B \-k,\ \-\-native-aio +Use kernel AIO implementation (on Linux only) +.TP +.B \-h,\ \-\-help +Display help and exit +.TP +.B \-V,\ \-\-version +Output version information and exit +.SH SEE ALSO +.BR qemu-img (1), +.BR qemu-nbd (8) +.SH AUTHOR +QEMU project +.PP +This manual page was written by Asias He , for the Debian project (and may be used by others). Permission is granted to copy, distribute and/or modify this document under the terms of the Creative Commons Attribution-Share Alike 3.0 United States License. (See http://creativecommons.org/licenses/by-sa/3.0/us/legalcode) +. diff --git a/debian/qemu-kvm-init b/debian/qemu-kvm-init new file mode 100755 index 00000000000..af00b71848c --- /dev/null +++ b/debian/qemu-kvm-init @@ -0,0 +1,89 @@ +#!/bin/sh + +# Detect our host arch +arch=$(arch) +test -z "$arch" && exit 0 + +modlist="" +case "$arch" in + x86_64 | i686) + kvm=/usr/bin/qemu-system-x86_64 + if grep -qs "^flags.* vmx" /proc/cpuinfo; then + modlist="kvm_intel $KVM_NESTED" + elif grep -qs "^flags.* svm" /proc/cpuinfo; then + modlist="kvm_amd" + fi + ;; + ppc*) + SMT=$(/usr/sbin/ppc64_cpu --smt 2>&1 | grep "SMT=[248]") + if [ -n "$SMT" ] + then + if grep -q -e '^cpu\s*:\s*POWER8' /proc/cpuinfo; then + echo "Error: You must disable SMT if you want to run QEMU/KVM on Power8 based ppc64le architecture" + echo "In order to disable SMT, run: # ppc64_cpu --smt=off" + fi + fi + kvm=/usr/bin/qemu-system-ppc64 + if [ "$(uname -m)" != "ppc64le" ]; then + exit 0 + fi + if systemd-detect-virt --quiet --vm; then + echo "Info: second level virtualization not supported, kvm-hv load might fail" + fi + modlist="kvm-hv" + ;; +esac + +# Silently exit if the package isn't installed anymore +if [ -z "$kvm" -o ! -e "$kvm" ]; then + exit 0 +fi + +# shellcheck disable=SC1091 +[ -r /etc/default/qemu-kvm ] && . /etc/default/qemu-kvm + +start() { + if [ -n "$modlist" ]; then + modprobe -b $modlist || true + fi + + if systemd-detect-virt --quiet --container; then + mknod /dev/kvm c 10 232 || true + chown root:kvm /dev/kvm || true + chmod g+rw /dev/kvm || true + fi + + # Determine if we are running inside a VM + IS_VM=0 + if command -v systemd-detect-virt >/dev/null 2>&1; then + systemd-detect-virt -vq && IS_VM=1 + fi + + # Enable KSM, respecting the default configuration file. If 'AUTO' is + # set, enable only if we aren't running inside a VM. + if [ "$KSM_ENABLED" = "1" ] || [ "$KSM_ENABLED" = "AUTO" ] && [ "$IS_VM" = "0" ]; then + # shellcheck disable=SC2015 + [ -w /sys/kernel/mm/ksm/run ] && echo 1 > /sys/kernel/mm/ksm/run || true + if [ -w /sys/kernel/mm/ksm/sleep_millisecs ]; then + if [ -n "$SLEEP_MILLISECS" ]; then + echo "$SLEEP_MILLISECS" > /sys/kernel/mm/ksm/sleep_millisecs || true + fi + fi + else + # shellcheck disable=SC2015 + [ -w /sys/kernel/mm/ksm/run ] && echo 0 > /sys/kernel/mm/ksm/run || true + fi +} + +# See how we were called. +case "$1" in + start) + start + ;; + + *) + exit 0 + ;; +esac + +exit $? diff --git a/debian/qemu-system-arm.maintscript b/debian/qemu-system-arm.maintscript new file mode 100644 index 00000000000..368fa9a1e19 --- /dev/null +++ b/debian/qemu-system-arm.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-arm qemu-system-common 1:8.0+dfsg-4~ qemu-system-arm diff --git a/debian/qemu-system-common.NEWS b/debian/qemu-system-common.NEWS new file mode 100644 index 00000000000..fc8fccc96f6 --- /dev/null +++ b/debian/qemu-system-common.NEWS @@ -0,0 +1,106 @@ +qemu (1:8.0+dfsg-1) experimental; urgency=medium + + Qemu upstream dropped support of the C-language virtiofsd daemon + implementation in version 8.0, so this package does not ship + virtiofsd anymore. A rust-language implementation is available + in a separate virtiofsd package. + + -- Michael Tokarev Tue, 18 Apr 2023 21:26:14 +0300 + +qemu (1:7.0+dfsg-7) unstable; urgency=medium + + Starting with this version of qemu-system-x86 on x86 architecture, xen + support is moved out to a separate package named qemu-system-xen. Xen + release since 4.16.1-1 already uses qemu binary from this package. In + order to support transition from old xen and xen-supporting qemu-system-x86 + to separate xen build of qemu, the qemu-system-i386 binary is temporarily + replaced by a shell wrapper which detects xen usage and if found, redirects + the call to xen-enabled qemu binary if found (with a warning), or suggests + to install qemu-system-xen package. If you used qemu-system-x86 (or + qemu-system-x86-xen on Ubuntu) to run xen, please install qemu-system-xen. + qemu-system-x86 is not used by xen anymore since 4.16.1-1. + + -- Michael Tokarev Sun, 15 May 2022 15:15:58 +0300 + +qemu (1:5.0-9) unstable; urgency=medium + + With this version, kvm wrapper (initially from the separate kvm + package, which were later renamed to qemu-kvm) is merged back + to qemu-system-x86 package, replacing old qemu-kvm package. + 'kvm' command name turned out to be very handy for manual + execution of qemu with kvm enabled, and we now rely on the + upstream behavor - when executable name ends with "kvm" it + enables the kvm mode by default if available, and falls back + to TCG if not. + + -- Michael Tokarev Fri, 17 Jul 2020 12:54:35 +0300 + +qemu (1:2.12+dfsg-2) unstable; urgency=medium + + Since qemu 2.12, [G]UI display frontends can be built as modules. + Debian creates new package, qemu-system-gui, which currently + includes GTK3 support. This also switches display from SDL to GTK. + Qemu-system-* packages recommends installing qemu-system-gui, so + by default on upgrade you will have new package installed, and + local GUI will continue to work. However, if you choose to not + install recommended packages, you might consider installing + qemu-system-gui package separately, if you need local GUI support + as well. Without this package, qemu-system-* becomes "headless", + and can be used on servers to reduce amount of dependencies - + this way, no X11 stuff is needed by qemu-system anymore. + + -- Michael Tokarev Sun, 27 Apr 2018 09:18:32 +0300 + +qemu (1:2.2+dfsg-6exp) unstable; urgency=medium + + Since Debian release 2.2+dfsg-6exp, a new package named qemu-block-extra + has been created and some less frequently used block backends has been + split out of main qemu-system binaries and from qemu-img binary to + this new package. The backends which has been split are: + curl + iscsi + rbd (ceph/rados) + ssh + If you use any of these, please install qemu-block-extra package in + addition to qemu-system-* or qemu-utils package, because without it + these block backends won't work anymore. + + -- Michael Tokarev Mon, 27 Apr 2015 09:29:55 +0300 + +qemu (2.0.0+dfsg-1) unstable; urgency=low + + qemu-system-* packages does not provide /usr/bin/qemu alternative + anymore, and all various alternatives will be unregistered at new + individual qemu-system packages install. This is because different + architectures are not really alternatives, and never has been. + Historically, qemu emulated just one architecture, so the name "qemu" + was used for the binary. However when more architectures were added, + the old name "qemu" was used as an alternative, pointing to one of + the emulators. Upstream does not use the name "qemu" for binaries + for a long time. If you have scripts using the old name "qemu" + please update them to use the right qemu-system-* binary. + + -- Michael Tokarev Fri, 11 Apr 2014 19:57:22 +0400 + +qemu (1.7.0+dfsg-2) unstable; urgency=low + + Since version 1.7.0+dfsg-2, qemu-system-x86 switched from vgabios for + plex/bochs project to implementation of vgabios provided by seabios. + The latter is written almost entirely in C language so it is much easier + to debugu/develop, but it lacks some 16bit protected mode functions which + are present in vgabios. This means that it is possible that, for eaxample, + some protected-mode games written for MS-DOS may not work since this + release. + + This also means that vgabios package isn't used by qemu anymore, and might + be removed from the system if there are no other users of it left. + + + /usr/bin/kvm shell wrapper has been moved back to qemu-kvm package (it was + moved to qemu-system-x86 in version 1.3.0+dfsg-2exp). Please note that we + do not re-install qemu-kvm package if it has been removed as obsolete, so + if you need /usr/bin/kvm wrapper please install qemu-kvm package again. + This change allows qemu-system-x86 package to co-exist with the old qemu-kvm + binary (not shell-wrapper) package from wheezy. + + -- Michael Tokarev Thu, 28 Nov 2013 18:40:56 +0400 diff --git a/debian/qemu-system-common.README.Debian b/debian/qemu-system-common.README.Debian new file mode 100644 index 00000000000..f282b7a0faa --- /dev/null +++ b/debian/qemu-system-common.README.Debian @@ -0,0 +1,35 @@ +Historically, there were different naming in use for the +same architectures within different groups/projects. For +example, amd64 architecture (Debian name) is also known as +x86_64; arm64 and aarch64; and so on. Qemu used one way +to name the binaries, debian used another, some architectures +are named the same way, while for other architectures there's +a difference. + +Starting with debian qemu-system-* packages version 8.0, we now +provide debian-compatible naming scheme for qemu-system-$arch +binaries, shipping symlinks to qemu names when qemu name is +different from debian's. For example, qemu-system-amd64 is a +symlink to qemu-system-x86_64. + +Since qemu-system emulates a CPU (it is agnostic to what +OS/kernel/abi is being actually used inside), we can use +qemu-system-${DEB_HOST_ARCH_CPU} from dpkg-architecture, +not qemu-system-${DEB_HOST_ARCH}. + +In particular, the following extra names are provided: + + amd64 => x86_64 + arm64 => aarch64 + loong64 => loongarch64 + powerpc => ppc + ppc64el => ppc64 + +The other names do match between qemu and debian. + +There's another name which is also provided but which is +neither in debian nor in qemu: it is ppc64le, which is +the name used for this architecture in the linux kernel. + +One can run qemu-system-$archos binary, and you can +list it in (Build-)Depends: field too. diff --git a/debian/qemu-system-common.doc-base b/debian/qemu-system-common.doc-base new file mode 100644 index 00000000000..76cd04b3295 --- /dev/null +++ b/debian/qemu-system-common.doc-base @@ -0,0 +1,12 @@ +Document: qemu-system-doc +Title: QEMU User Manual +Author: Fabrice Bellard +Abstract: The QEMU user manual intends to make the user understand what + qemu is/does, and to guide them through the first steps of getting + the emulator to work, documenting parameters and commands, among other + useful things. +Section: Emulators + +Format: HTML +Index: /usr/share/doc/qemu-system-common/system/index.html +Files: /usr/share/doc/qemu-system-common/system/index.html diff --git a/debian/qemu-system-common.install b/debian/qemu-system-common.install new file mode 100644 index 00000000000..a4a90b57df8 --- /dev/null +++ b/debian/qemu-system-common.install @@ -0,0 +1,33 @@ +debian/qemu-ifup etc/ +debian/qemu-ifdown etc/ + +usr/share/man/man1/qemu-system.1 +usr/share/qemu/trace-events-all +usr/bin/qemu-pr-helper +usr/share/man/man8/qemu-pr-helper.8 +usr/share/man/man7/qemu-qmp-ref.7 +usr/share/man/man7/qemu-cpu-models.7 +usr/share/doc/qemu/system usr/share/doc/qemu-system-common + +# linux-specific +usr/lib/qemu/qemu-bridge-helper +# virtfs-proxy-helper removed in QEMU 9.0+ (replaced by vhost-user-fs) +# usr/lib/qemu/virtfs-proxy-helper +# usr/share/man/man1/virtfs-proxy-helper.1 + +# common modules. Other gui modules are in qemu-system-gui +# accel-tcg-*.so modules no longer built as separate .so files in QEMU 10.1+ +# usr/lib/${DEB_HOST_MULTIARCH}/qemu/accel-tcg-*.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-alsa.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-oss.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/chardev-baum.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-gpu-pci.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-gpu.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-vga.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-usb-host.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-usb-redirect.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-usb-smartcard.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-s390x-virtio-gpu-ccw.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-uefi-vars.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-curses.so +debian/qemu-kvm-init /usr/share/qemu/init diff --git a/debian/qemu-system-common.lintian-overrides b/debian/qemu-system-common.lintian-overrides new file mode 100644 index 00000000000..628f7111c8e --- /dev/null +++ b/debian/qemu-system-common.lintian-overrides @@ -0,0 +1,9 @@ +# probably a false positives +qemu-system-common: shared-library-lacks-prerequisites [usr/lib/*/qemu/hw-display-virtio-gpu-pci.so] +qemu-system-common: shared-library-lacks-prerequisites [usr/lib/*/qemu/hw-display-virtio-vga.so] +qemu-system-common: shared-library-lacks-prerequisites [usr/lib/*/qemu/hw-s390x-virtio-gpu-ccw.so] +qemu-system-common: hardening-no-fortify-functions [usr/lib/*/qemu/audio-alsa.so] +qemu-system-common: hardening-no-fortify-functions [usr/lib/*/qemu/audio-oss.so] +# this is a common/generic manpage used by all qemu-system-* binaries +qemu-system-common: spare-manual-page [usr/share/man/man1/qemu-system.1.gz] +qemu-system-common: spare-manual-page [usr/share/man/man1/virtfs-proxy-helper.1.gz] diff --git a/debian/qemu-system-common.qemu-kvm.default b/debian/qemu-system-common.qemu-kvm.default new file mode 100644 index 00000000000..08ab26c7688 --- /dev/null +++ b/debian/qemu-system-common.qemu-kvm.default @@ -0,0 +1,8 @@ +# Set to 1 to enable KSM, 0 to disable KSM, and AUTO to use default settings. +# After changing this setting restart the qemu-kvm service. +KSM_ENABLED=AUTO +SLEEP_MILLISECS=200 + +# Dropped VHOST_NET_ENABLED as this is auto-loaded in recent kernels + +# Dropped KVM_HUGEPAGES as systemd provides feasible hugepage moutpoints diff --git a/debian/qemu-system-common.qemu-kvm.service b/debian/qemu-system-common.qemu-kvm.service new file mode 100644 index 00000000000..f35e3e98bfa --- /dev/null +++ b/debian/qemu-system-common.qemu-kvm.service @@ -0,0 +1,16 @@ +[Unit] +Description=QEMU KVM preparation - module, ksm, hugepages +DefaultDependencies=no +After=local-fs.target +Before=shutdown.target +Conflicts=shutdown.target +RequiresMountsFor=/usr + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/share/qemu/init/qemu-kvm-init start +ExecReload=/usr/share/qemu/init/qemu-kvm-init start + +[Install] +WantedBy=multi-user.target diff --git a/debian/qemu-system-data.docs b/debian/qemu-system-data.docs new file mode 100644 index 00000000000..3c68b4688b1 --- /dev/null +++ b/debian/qemu-system-data.docs @@ -0,0 +1,5 @@ +docs/multiseat.txt +docs/qdev-device-use.txt +docs/qemupciserial.inf +docs/config/ +docs/specs/ diff --git a/debian/qemu-system-data.install b/debian/qemu-system-data.install new file mode 100644 index 00000000000..2b1b4196904 --- /dev/null +++ b/debian/qemu-system-data.install @@ -0,0 +1,2 @@ +usr/share/qemu/dtb/bamboo.dtb +usr/share/qemu/dtb/canyonlands.dtb diff --git a/debian/qemu-system-data.links b/debian/qemu-system-data.links new file mode 100644 index 00000000000..6ccba3d609d --- /dev/null +++ b/debian/qemu-system-data.links @@ -0,0 +1 @@ +usr/share/qemu/s390-ccw.img usr/share/qemu/s390-netboot.img diff --git a/debian/qemu-system-data.lintian-overrides b/debian/qemu-system-data.lintian-overrides new file mode 100644 index 00000000000..d1be0c48e07 --- /dev/null +++ b/debian/qemu-system-data.lintian-overrides @@ -0,0 +1,5 @@ +# firmware files are arch-dependent binaries and can be statically linked +qemu-system-data: arch-dependent-file-in-usr-share *usr/share/qemu/* +qemu-system-data: arch-independent-package-contains-binary-or-object *usr/share/qemu/* +qemu-system-data: statically-linked-binary *usr/share/qemu/* +qemu-system-data: shared-library-lacks-prerequisites *usr/share/qemu/s390-* diff --git a/debian/qemu-system-gui.install b/debian/qemu-system-gui.install new file mode 100644 index 00000000000..df9c3eb5572 --- /dev/null +++ b/debian/qemu-system-gui.install @@ -0,0 +1,10 @@ +usr/share/locale/*/LC_MESSAGES/qemu.mo +usr/share/qemu/vhost-user/50-qemu-gpu.json +usr/lib/qemu/vhost-user-gpu +# modules +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-gtk.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-sdl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-sdl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-jack.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-pa.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-pipewire.so diff --git a/debian/qemu-system-mips.maintscript b/debian/qemu-system-mips.maintscript new file mode 100644 index 00000000000..7f302e915d1 --- /dev/null +++ b/debian/qemu-system-mips.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-mips qemu-system-common 1:8.0+dfsg-4~ qemu-system-mips diff --git a/debian/qemu-system-misc.install b/debian/qemu-system-misc.install new file mode 100644 index 00000000000..13f6225a84f --- /dev/null +++ b/debian/qemu-system-misc.install @@ -0,0 +1,2 @@ +usr/share/qemu/dtb/petalogix-ml605.dtb +usr/share/qemu/dtb/petalogix-s3adsp1800.dtb diff --git a/debian/qemu-system-misc.lintian-overrides b/debian/qemu-system-misc.lintian-overrides new file mode 100644 index 00000000000..ec61d802ca2 --- /dev/null +++ b/debian/qemu-system-misc.lintian-overrides @@ -0,0 +1,2 @@ +qemu-system-misc: spelling-error-in-binary addd add *usr/bin/qemu-* +qemu-system-misc: spelling-error-in-binary nott not *usr/bin/qemu-* diff --git a/debian/qemu-system-misc.maintscript b/debian/qemu-system-misc.maintscript new file mode 100644 index 00000000000..6f0082da19c --- /dev/null +++ b/debian/qemu-system-misc.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-misc qemu-system-common 1:8.0+dfsg-4~ qemu-system-misc diff --git a/debian/qemu-system-modules-opengl.install b/debian/qemu-system-modules-opengl.install new file mode 100644 index 00000000000..0ed5c82a9bc --- /dev/null +++ b/debian/qemu-system-modules-opengl.install @@ -0,0 +1,8 @@ +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-opengl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-egl-headless.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-gpu-pci-gl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-gpu-gl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-virtio-vga-gl.so +# ui-dbus requires ui-opengl +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-dbus.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-dbus.so diff --git a/debian/qemu-system-modules-opengl.lintian-overrides b/debian/qemu-system-modules-opengl.lintian-overrides new file mode 100644 index 00000000000..ea04ea1d64c --- /dev/null +++ b/debian/qemu-system-modules-opengl.lintian-overrides @@ -0,0 +1,3 @@ +# probably false positives +qemu-system-modules-opengl: shared-library-lacks-prerequisites [usr/lib/*/qemu/hw-display-virtio-gpu-pci-gl.so] +qemu-system-modules-opengl: shared-library-lacks-prerequisites [usr/lib/*/qemu/hw-display-virtio-vga-gl.so] diff --git a/debian/qemu-system-modules-spice.install b/debian/qemu-system-modules-spice.install new file mode 100644 index 00000000000..9aede779854 --- /dev/null +++ b/debian/qemu-system-modules-spice.install @@ -0,0 +1,5 @@ +usr/lib/${DEB_HOST_MULTIARCH}/qemu/audio-spice.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/chardev-spice.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/hw-display-qxl.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-spice-app.so +usr/lib/${DEB_HOST_MULTIARCH}/qemu/ui-spice-core.so diff --git a/debian/qemu-system-ppc.lintian-overrides b/debian/qemu-system-ppc.lintian-overrides new file mode 100644 index 00000000000..b215e1b0a4d --- /dev/null +++ b/debian/qemu-system-ppc.lintian-overrides @@ -0,0 +1 @@ +qemu-system-ppc: spelling-error-in-binary buid build *usr/bin/qemu-system-ppc64* diff --git a/debian/qemu-system-ppc.maintscript b/debian/qemu-system-ppc.maintscript new file mode 100644 index 00000000000..760727fb056 --- /dev/null +++ b/debian/qemu-system-ppc.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-ppc qemu-system-common 1:8.0+dfsg-4~ qemu-system-ppc diff --git a/debian/qemu-system-s390x.maintscript b/debian/qemu-system-s390x.maintscript new file mode 100644 index 00000000000..cd86fbcacf7 --- /dev/null +++ b/debian/qemu-system-s390x.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-s390x qemu-system-common 1:8.0+dfsg-4~ qemu-system-s390x diff --git a/debian/qemu-system-sparc.maintscript b/debian/qemu-system-sparc.maintscript new file mode 100644 index 00000000000..1b51c3b5c7c --- /dev/null +++ b/debian/qemu-system-sparc.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-sparc qemu-system-common 1:8.0+dfsg-4~ qemu-system-sparc diff --git a/debian/qemu-system-x86.maintscript b/debian/qemu-system-x86.maintscript new file mode 100644 index 00000000000..afaba5d56cd --- /dev/null +++ b/debian/qemu-system-x86.maintscript @@ -0,0 +1 @@ +dir_to_symlink /usr/share/doc/qemu-system-x86 qemu-system-common 1:8.0+dfsg-4~ qemu-system-x86 diff --git a/debian/qemu-system-x86_64-spice b/debian/qemu-system-x86_64-spice new file mode 100755 index 00000000000..a7bdb04e0c8 --- /dev/null +++ b/debian/qemu-system-x86_64-spice @@ -0,0 +1,5 @@ +#! /bin/sh +echo "$0: W: this is an old compat wrapper script for qemu-system-x86_64-spice" >&2 +echo "$0: W: please use qemu-system-x86_64 instead of $0" >&2 + +exec qemu-system-x86_64 "$@" diff --git a/debian/qemu-system-xen.lintian-overrides b/debian/qemu-system-xen.lintian-overrides new file mode 100644 index 00000000000..1df7552f5d6 --- /dev/null +++ b/debian/qemu-system-xen.lintian-overrides @@ -0,0 +1 @@ +qemu-system-xen: no-manual-page [usr/libexec/xen-qemu-system-i386] diff --git a/debian/qemu-system.NEWS b/debian/qemu-system.NEWS new file mode 100644 index 00000000000..550670bafb3 --- /dev/null +++ b/debian/qemu-system.NEWS @@ -0,0 +1,47 @@ +qemu (1.3.0+dfsg-4exp) experimental; urgency=low + + Starting from 1.3.0 release, qemu-system package, which provided + QEMU system emulation targets for many architectures, has been + split into several packages, each providing a related, + per-base-architecture, set of emulators, and qemu-system-misc + package which contains some "other" architectures. There are + also qemu-system-common package, which provides common files for + all other qemu-system-*, packages. Former qemu-system package + now become a meta-package which installs emulators for all + available targets as before. + + -- Michael Tokarev Tue, 22 Jan 2013 01:38:27 +0400 + +qemu (0.12.3+dfsg-3) unstable; urgency=low + + Starting with QEMU 0.12.0, KQEMU support has been removed from + upstream, as it has not seen active development for a few years now, + while causing regressions or limitations for non-KQEMU users. + + -- Aurelien Jarno Wed, 24 Mar 2010 23:14:35 +0100 + +qemu (0.10.3-2) unstable; urgency=low + + Starting with QEMU 0.10.0, it is possible to control how the host + cache is used to access block data, using the cache= suboption of the + -drive option. The following suboptions are available: + * none: The host page cache is entirely avoided. + * writeback (default in QEMU 0.9.x): Writeback caching reports data + writes as completed as soon as the data is present in the host page + cache. This is safe as long as you trust your host. If your host + crashes or loses power, then the guest may experience data + corruption. + * writethrough (default in QEMU 0.10.x): The host page cache is used + to read and write data but write notification is sent to the guest + only when the data has been reported as written by the storage + subsystem. + + Note that depending on your configuration (filesystem, encryption, + kernel version, etc.), disk accesses can be very slow with the default + cache policy (writethrough). You can use the writeback cache policy + instead, but the data integrity is not assured anymore. + + See qemu(1) for more details. + + -- Aurelien Jarno Sun, 03 May 2009 23:22:29 +0200 + diff --git a/debian/qemu-user-static.1 b/debian/qemu-user-static.1 new file mode 100644 index 00000000000..bac2d391c22 --- /dev/null +++ b/debian/qemu-user-static.1 @@ -0,0 +1,37 @@ +.TH qemu\-user\-static 1 2007-02-08 "0.9.0" Debian +.SH NAME +qemu\-user\-static \- QEMU User Emulator (static version) +.SH SYNOPSIS +.B qemu\-user\-static +.RI [ options ] +.I program +.RI [ program-arguments... ] +.SH DESCRIPTION +The +.B qemu\-user\-static +emulator can run binaries for other architectures but with the same operating +system as the current one. +.SH OPTIONS +.TP +.BR \-h +Print this help. +.TP +.BR \-g " \fI\fP" +Wait gdb connection to port \fIport\fP. +.TP +.BR \-L " \fI\fP" +Set the elf interpreter prefix (default=\fI/etc/qemu\-binfmt/%M\fP). +.TP +.BR \-s " \fI\fP" +Set the stack size in bytes (default=\fI524288\fP). +.TP +.BR \-d " \fI\fP" +Activate log (logfile=\fI/tmp/qemu.log\fP) +.TP +.BR \-p " \fI\fP" +Set the host page size to 'pagesize'. +.SH SEE ALSO +.BR qemu-system (1) +(in qemu-system-common package). +.SH AUTHOR +This manual page was written by Guillem Jover . diff --git a/debian/qemu-user-static.README.Debian b/debian/qemu-user-static.README.Debian new file mode 100644 index 00000000000..b22be69605c --- /dev/null +++ b/debian/qemu-user-static.README.Debian @@ -0,0 +1,28 @@ +Historically, there were different naming in use for the +same architectures within different groups/projects. For +example, amd64 architecture (Debian name) is also known as +x86_64; arm64 and aarch64; and so on. Qemu used one way +to name the binaries, debian used another, some architectures +are named the same way, while for other architectures there's +a difference. + +Starting with debian qemu-user-static package version 8.0, we +provide debian-compatible naming scheme for qemu-$arch-static +binaries, to be used as qemu-${DEB_HOST_ARCH}-static. For +example, qemu-armel-static is a symlink for qemu-arm-static. + +In particular, the following extra names are provided: + + amd64 => x86_64 + arm64 => aarch64 + armhf => arm + armel => arm + loong64 => loongarch64 + powerpc => ppc + ppc64el => ppc64 + +The other names do match between qemu and debian. + +There's another name which is also provided but which is +neither in debian nor in qemu: it is ppc64le, which is +the name used for this architecture in the linux kernel. diff --git a/debian/qemu-user-static.docs b/debian/qemu-user-static.docs new file mode 100644 index 00000000000..b8472f7f0e6 --- /dev/null +++ b/debian/qemu-user-static.docs @@ -0,0 +1 @@ +docs/user/main.rst diff --git a/debian/qemu-user-static.install b/debian/qemu-user-static.install new file mode 100644 index 00000000000..42e8bca7c92 --- /dev/null +++ b/debian/qemu-user-static.install @@ -0,0 +1,3 @@ +debian/qemu-user-static.1 usr/share/man/man1/ +debian/qemu-debootstrap usr/sbin +debian/qemu-debootstrap.1 usr/share/man/man1/ diff --git a/debian/qemu-user-static.lintian-overrides b/debian/qemu-user-static.lintian-overrides new file mode 100644 index 00000000000..b9ed6553d30 --- /dev/null +++ b/debian/qemu-user-static.lintian-overrides @@ -0,0 +1,4 @@ +qemu-user-static: spelling-error-in-binary addd add *usr/bin/qemu-*-static* +qemu-user-static: spelling-error-in-binary nott not *usr/bin/qemu-*-static* +# these are static-pic executables, not shared executables: +qemu-user-static: shared-library-lacks-prerequisites *usr/bin/qemu-*-static* diff --git a/debian/qemu-user.docs b/debian/qemu-user.docs new file mode 100644 index 00000000000..b8472f7f0e6 --- /dev/null +++ b/debian/qemu-user.docs @@ -0,0 +1 @@ +docs/user/main.rst diff --git a/debian/qemu-user.lintian-overrides b/debian/qemu-user.lintian-overrides new file mode 100644 index 00000000000..5ee86b85bcb --- /dev/null +++ b/debian/qemu-user.lintian-overrides @@ -0,0 +1,3 @@ +qemu-user: spelling-error-in-binary addd add *usr/bin/qemu-* +qemu-user: spelling-error-in-binary nott not *usr/bin/qemu-* +qemu-user: spare-manual-page [usr/share/man/man1/qemu-user.1.gz] diff --git a/debian/qemu-utils.NEWS b/debian/qemu-utils.NEWS new file mode 100644 index 00000000000..406990ad04b --- /dev/null +++ b/debian/qemu-utils.NEWS @@ -0,0 +1,15 @@ +qemu (1:2.2+dfsg-6exp) unstable; urgency=medium + + Since Debian release 2.2+dfsg-6exp, a new package named qemu-block-extra + has been created and some less frequently used block backends has been + split out of main qemu-system binaries and from qemu-img binary to + this new package. The backends which has been split are: + curl + iscsi + rbd (ceph/rados) + ssh + If you use any of these, please install qemu-block-extra package in + addition to qemu-system-* or qemu-utils package, because without it + these block backends won't work anymore. + + -- Michael Tokarev Mon, 27 Apr 2015 09:29:55 +0300 diff --git a/debian/qemu-utils.install b/debian/qemu-utils.install new file mode 100644 index 00000000000..152109d222f --- /dev/null +++ b/debian/qemu-utils.install @@ -0,0 +1,10 @@ +usr/bin/qemu-img +usr/share/man/man1/qemu-img.1 +usr/bin/qemu-nbd +usr/share/man/man8/qemu-nbd.8 +usr/bin/qemu-io +usr/bin/qemu-storage-daemon +usr/share/man/man1/qemu-storage-daemon.1 +usr/share/man/man7/qemu-storage-daemon-qmp-ref.7 + +usr/share/man/man7/qemu-block-drivers.7 diff --git a/debian/qemu-utils.manpages b/debian/qemu-utils.manpages new file mode 100644 index 00000000000..7cba2a0b134 --- /dev/null +++ b/debian/qemu-utils.manpages @@ -0,0 +1 @@ +debian/qemu-io.1 diff --git a/debian/rules b/debian/rules new file mode 100755 index 00000000000..901d8ae8457 --- /dev/null +++ b/debian/rules @@ -0,0 +1,797 @@ +#!/usr/bin/make -f +SHELL = /bin/sh -e + +# stop python from generating .pyc caches +export PYTHONDONTWRITEBYTECODE=1 + +# in order to keep output non-intermixed together, disable parallel building +# of different targets in this d/rules but allow running parallel submakes +.NOTPARALLEL: + +ifeq ($(shell dpkg-vendor --derives-from Ubuntu && echo yes),yes) +VENDOR := UBUNTU +DEB_BUILD_PARALLEL = yes +else +VENDOR := DEBIAN +endif + +# get DEB_VERSION +include /usr/share/dpkg/pkg-info.mk + +ifeq ($(and ${DEB_HOST_MULTIARCH},${DEB_HOST_ARCH}),) +# Fast version of dpkg/architecture.mk defining all vars in one go + $(foreach d, $(shell dpkg-architecture | sed 's/=/?=/'), $(eval export $d)) +endif + +# we build in a sub-subdir so strip that from file paths too +export DEB_CFLAGS_MAINT_APPEND = -ffile-prefix-map=../../= + +# Disable LTO on non-amd64 builds, see: +# https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1921664 +# https://issues.redhat.com/browse/RHEL-7385 +ifneq ($(DEB_HOST_ARCH),amd64) + export DEB_BUILD_MAINT_OPTIONS += optimize=-lto +endif + +# get CFLAGS LDFLAGS etc (should come after DEB_*MAINT*) +include /usr/share/dpkg/buildflags.mk + +# Host architectures we produce packages for. +# when changing this list, check d/control-in too, if any changes +# needs to be done for build deps and --enable options. +system-arch-linux = \ + amd64 arm arm64 armel armhf i386 mips mipsel mips64 mips64el \ + powerpc powerpcspe ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +system-arch = ${system-arch-linux} +user-arch = \ + amd64 arm arm64 armel armhf i386 mips mipsel mips64 mips64el \ + ppc64 ppc64el riscv64 s390x sparc sparc64 x32 +utils-arch = $(sort ${system-arch} ${user-arch} ia64 hppa m68k sh4 \ + kfreebsd-amd64 kfreebsd-i386) +# subset of system-arch +spice-arch = amd64 i386 arm64 armel armhf mips64el mipsel ppc64el x32 + +# DEB_BUILD_OPTIONS=parallel=N +MAKEFLAGS += $(subst parallel=,-j,$(filter parallel=%,${DEB_BUILD_OPTIONS})) + +# verbose build +V = $(if $(filter terse, ${DEB_BUILD_OPTIONS}),,1) + +NINJA = ninja $(if $V,-v) $(or $(filter -j%,${MAKEFLAGS}),-j1) + +# list of packages we're supposed to build +BUILD_PACKAGES := $(shell dh_listpackages) + +enable-system = $(if $(filter qemu-system,${BUILD_PACKAGES}),y) +enable-user = $(if $(filter qemu-user,${BUILD_PACKAGES}),y) +enable-user-static = $(if $(filter qemu-user-static,${BUILD_PACKAGES}),y) + +QEMU_XEN = /usr/libexec/xen-qemu-system-i386 +PKGVERSION = Debian ${DEB_VERSION} +SAVEMODDIR = /run/qemu/$(shell echo -n "${PKGVERSION}" | tr --complement '[:alnum:]+-.~' '_') +# New Ubuntu package qemu-block-supplemental to hold modules we want in Universe +# glusterfs is in universe, see LP: #2045063 +# This variable specifies a space-separated list of module filenames +# If altering this list, please also update the package description in d/control* +BLOCK_SUPPLEMENTAL_MODULES = block-gluster.so +sysdataidir = debian/qemu-system-data/usr/share/qemu +libdir = /usr/lib/${DEB_HOST_MULTIARCH} +FIRMWAREPATH = /usr/share/qemu:/usr/share/seabios:/usr/lib/ipxe/qemu + +ALPHAEV67_CROSSPFX = alpha-linux-gnu- +PPC_CROSSPFX = powerpc-linux-gnu- +PPC64_CROSSPFX = powerpc64-linux-gnu- +RISCV64_CROSSPFX = riscv64-linux-gnu- +ARM_CROSSPFX = arm-none-eabi- + +extra-cflags = ${CFLAGS} ${CPPFLAGS} +extra-ldflags = ${LDFLAGS} -Wl,--as-needed +# we add another set of configure options from debian/control +common_configure_opts = \ + --with-pkgversion="$(PKGVERSION)" \ + --extra-cflags="${extra-cflags}" \ + --extra-ldflags="${extra-ldflags}" \ + --prefix=/usr \ + --sysconfdir=/etc \ + --libdir=${libdir} \ + --libexecdir=/usr/lib/qemu \ + --firmwarepath=${FIRMWAREPATH} \ + --localstatedir=/var \ + --disable-install-blobs \ + --disable-strip \ + --localstatedir=/var \ + --disable-download \ + --disable-relocatable \ + +# this disables building of qemu-keymap tool (!) +# qemu-keymap might be helpful for qemu-system -k +# but is -k flag useful these days? +common_configure_opts += --disable-xkbcommon + +# pvrdma was removed in QEMU 9.1, no longer needs to be disabled +# (it was an extension for vmxnet3 vmware virtual network adapter) + +# Cross compiling support +ifneq ($(DEB_BUILD_GNU_TYPE), $(DEB_HOST_GNU_TYPE)) +common_configure_opts += --cross-prefix=$(DEB_HOST_GNU_TYPE)- +endif + +comma:=, + +# qemu-system (softmmu) targets, in multiple packages +# For each package: +# system-archlist-$pkg - list qemu architectues which should go to this pkg +# system-kvmcpus-$pkg - list of ${DEB_HOST_ARCH_CPU}s where we create +# kvm link for this package +# For each of ${system-archlist-*}, optional: +# system-alias-$qcpu - aliases for this qemu architecture +# For each of ${system-kvmcpus-*}, mandatory: +# system-kvmlink-$dcpu - where to point kvm link for this ${DEB_HOST_ARCH_CPU} + +system-archlist-arm = aarch64 arm +system-alias-aarch64 = arm64 +system-alias-arm = armel armhf +system-kvmcpus-arm = arm64 arm +system-kvmlink-arm64 = aarch64 +system-kvmlink-arm = arm + +system-archlist-mips = mips mipsel mips64 mips64el + +system-archlist-ppc = ppc ppc64 +system-alias-ppc = powerpc +system-alias-ppc64 = ppc64le ppc64el +system-kvmcpus-ppc = ppc64 ppc64el powerpc +system-kvmlink-ppc64 = ppc64 +system-kvmlink-ppc64el = ppc64 +system-kvmlink-powerpc = ppc + +system-archlist-sparc = sparc sparc64 + +system-s390x = $(if $(filter ${VENDOR},UBUNTU),s390x) + +system-archlist-s390x = ${system-s390x} +system-kvmcpus-s390x = s390x +system-kvmlink-s390x = s390x + +system-archlist-x86 = i386 x86_64 +system-alias-x86_64 = amd64 +system-kvmcpus-x86 = amd64 i386 +system-kvmlink-amd64 = x86_64 +system-kvmlink-i386 = x86_64 + +system-archlist-misc = alpha avr hppa m68k loongarch64 \ + microblaze microblazeel or1k riscv32 riscv64 rx sh4 sh4eb \ + $(if ${system-s390x},,s390x) \ + tricore xtensa xtensaeb +# Note: cris and nios2 removed in QEMU 9.1+ - system emulation targets no longer exist +system-alias-loongarch64 = loong64 +system-kvmcpus-misc = $(if ${system-s390x},,s390x) + +system-packages = arm mips ppc sparc x86 ${system-s390x} misc + +# install-system qemu-system-$pkg, ${system-archlist-$pkg}, kvm-link +# install qemu-arch-list binaries with manpages and aliases into pkg +# install kvm link to qemu-system-${kvm-link} if it is not empty +# fills in qemu:Provides and qemu:archlist substvars +define install-system + # installing $1 + dh_installdirs -p $1 usr/bin usr/share/man/man1 + mv -t debian/$1/usr/bin/ $(addprefix debian/tmp/usr/bin/qemu-system-,$2) +$(foreach q,$2,\ + @echo ".so man1/qemu-system.1" > debian/$1/usr/share/man/man1/qemu-system-$q.1 + $(foreach a,${system-alias-$q},\ + dh_link -p $1 usr/bin/qemu-system-$q usr/bin/qemu-system-$a + dh_link -p $1 usr/share/man/man1/qemu-system-$q.1 usr/share/man/man1/qemu-system-$a.1 +)) +$(if $3,\ + dh_link -p $1 usr/bin/$3 usr/bin/kvm + dh_link -p $1 usr/share/man/man1/$3.1 usr/share/man/man1/kvm.1 +) +# note: older make does not understand line-splitting inside $(functions ..) + echo 'qemu:Provides=$(if $3,qemu-kvm (=${DEB_VERSION})${comma})\ + $(patsubst %,% (=${DEB_VERSION})${comma},$(filter-out $1, $(patsubst %, qemu-system-%, any $2 $(foreach q,$2,${system-alias-$q}))))' \ + | tr _ - >> debian/$1.substvars +# construct list `arch1 arch2 (alias) arch3..' for Description +# and word-wrap it into two lines if too long + list='$(foreach q,$2,$q$(if ${system-alias-$q}, (${system-alias-$q})))'; \ + len2=$$(($${#list}/2)); \ + if [ $$len2 -gt 36 ]; then \ + while expr substr "$$list" $$len2 1 != " " >/dev/null; do len2=$$(($$len2+1)); done; \ + list="$$(expr substr "$$list" 1 $$(($$len2-1)))\$${Newline} $$(expr substr "$$list" $$(($$len2+1)) $$len2)"; \ + fi; \ + echo "qemu:archlist=$$list" >> debian/$1.substvars + dh_installdocs -p $1 --link-doc=qemu-system-common +endef + +sysdata-components := +qemu-builds := + +# several builds of qemu binaries: + +############################################## +# main system and tools build +configure-qemu: b/qemu/configured +b/qemu/configured: configure + rm -rf b/qemu; mkdir -p b/qemu + cd b/qemu && \ + ../../configure ${common_configure_opts} \ + --$(if ${enable-system},enable,disable)-system \ + --disable-user --disable-linux-user \ + --enable-tools \ + --disable-xen \ + --enable-modules \ + $(if ${enable-system},--enable-module-upgrades) \ + $(shell sh debian/extract-config-opts \ + $(DEB_HOST_ARCH_OS)-$(DEB_HOST_ARCH) debian/control) \ + ${QEMU_CONFIGURE_OPTIONS} || \ + { tail -n20 config.log meson-logs/meson-log.txt && false; } + touch $@ + +build-qemu: b/qemu/built +b/qemu/built: b/qemu/configured + ${NINJA} -C b/qemu + touch $@ + +test-qemu: b/qemu/tested +b/qemu/tested: b/qemu/built +ifeq (${enable-system},y) +# copy all firmware files. An alternative is to remove qemu-bundle +# cp -a /usr/share/seabios/* /usr/lib/ipxe/qemu/* /usr/share/qemu/* \ +# b/qemu/qemu-bundle/usr/share/qemu/ + rm -rf b/qemu/qemu-bundle + QEMU_MODULE_DIR=${CURDIR}/b/qemu \ + ${MAKE} -C b/qemu check-block +endif + touch $@ + +install-qemu: b/qemu/built + DESTDIR=${CURDIR}/debian/tmp \ + ${NINJA} -C b/qemu install + +# remove qtest "accel" modules + rm -f debian/tmp${libdir}/qemu/accel-qtest-*.so + +# fixup the manpage +# Note: with no enable-system, qemu-system manpage will not be installed. + sed -i 's/\\fBqemu(1)\\fP manual page/\\fBqemu-system(1)\\fP manual page (in qemu-system-common package)/' \ + debian/tmp/usr/share/man/man1/qemu-storage-daemon.1 + +ifeq (${enable-system},y) + + # qemu-system subpackages + mv debian/tmp/usr/share/man/man1/qemu.1 debian/tmp/usr/share/man/man1/qemu-system.1 + $(foreach p,${system-packages},\ + $(call install-system,qemu-system-$p,${system-archlist-$p}\ + ,$(if $(filter ${DEB_HOST_ARCH_CPU},${system-kvmcpus-$p}),qemu-system-${system-kvmlink-${DEB_HOST_ARCH_CPU}}))) + +ifeq ($(DEB_HOST_ARCH_OS),linux) + +ifeq (${VENDOR},UBUNTU) +ifneq ($(filter ${DEB_HOST_ARCH},amd64 i386),) +# on ubuntu *-spice existed, may be used in libvirt xml and scripts - keep links for compatibility +# The sunset for this will be when Ubuntu-Bionic goes out of support which is expected to happen in 2028 + install -p -t debian/qemu-system-x86/usr/bin debian/kvm-spice debian/qemu-system-x86_64-spice + install -p -t debian/qemu-system-x86/usr/share/man/man1/ debian/kvm-spice.1 + echo ".so man1/kvm-spice.1" > debian/qemu-system-x86/usr/share/man/man1/qemu-system-x86_64-spice.1 +endif # x86 +# apport hook is ubuntu-specific + install -p -D -t debian/qemu-system-common/usr/share/apport/package-hooks/ \ + debian/source_qemu.py +endif # ubuntu + +# for --enable-module-upgrades to work (also see run-qemu.mount install) +# save block-extra loadable modules on upgrades +# other module types for now (5.0) can't be loaded at runtime, only at startup +# the maintscript fragments include version string so we have to generate them + echo 'case $$1 in (upgrade|deconfigure) [ -d /run/qemu ] || exit 0; ! findmnt --noheadings --target /run/qemu/ | grep -q noexec || exit 0; mkdir -p ${SAVEMODDIR}; for m in ${libdir}/qemu/block-*.so; do if echo "${BLOCK_SUPPLEMENTAL_MODULES}" | grep -qF `basename "$$m"`; then continue; else cp -p $$m ${SAVEMODDIR}/; fi; done;; esac' \ + >> debian/qemu-block-extra.prerm.debhelper + echo 'case $$1 in (remove) for m in ${libdir}/qemu/block-*.so; do if echo "${BLOCK_SUPPLEMENTAL_MODULES}" | grep -qF `basename "$$m"`; then continue; else rm -f ${SAVEMODDIR}/`basename $$m`; fi; done;; esac' \ + >> debian/qemu-block-extra.postrm.debhelper + echo 'case $$1 in (purge) if systemctl is-active -q run-qemu.mount; then systemctl stop run-qemu.mount || true; fi; rm -rf "/run/qemu";; esac' \ + >> debian/qemu-block-extra.postrm.debhelper +# do mostly the same for qemu-block-supplemental +ifneq (,$(filter qemu-block-supplemental,${BUILD_PACKAGES})) + echo 'case $$1 in (upgrade|deconfigure) [ -d /run/qemu ] || exit 0; ! findmnt --noheadings --target /run/qemu/ | grep -q noexec || exit 0; mkdir -p ${SAVEMODDIR}; for m in ${BLOCK_SUPPLEMENTAL_MODULES}; do cp -p ${libdir}/qemu/$$m ${SAVEMODDIR}/; done;; esac' \ + >> debian/qemu-block-supplemental.prerm.debhelper + echo 'case $$1 in (remove) for m in ${BLOCK_SUPPLEMENTAL_MODULES}; do rm -f ${SAVEMODDIR}/$$m; done;; esac' \ + >> debian/qemu-block-supplemental.postrm.debhelper +endif + +endif # linux + +else # !enable-system + +# qemu-system manpage is built regardless of system target + rm -fv debian/tmp/usr/share/man/man1/qemu.1 + +endif # enable-system + +qemu-builds += qemu + +############################################## +# microvm build: +configure-microvm: b/microvm/configured +b/microvm/configured: configure debian/microvm-devices.mak + rm -rf b/microvm; mkdir -p b/microvm + cp -up debian/microvm-devices.mak configs/devices/x86_64-softmmu/microvm.mak + cd b/microvm && \ + ../../configure ${common_configure_opts} \ + --extra-cflags="${extra-cflags} -DCONFIG_MICROVM_DEFAULT=1" \ + --disable-docs \ + --without-default-features \ + --target-list=x86_64-softmmu --enable-kvm --disable-tcg \ + --enable-pixman \ + --enable-attr \ + --enable-coroutine-pool \ + --audio-drv-list="" \ + --without-default-devices \ + --with-devices-x86_64=microvm \ + --enable-vhost-kernel --enable-vhost-net \ + --enable-vhost-vdpa \ + --enable-vhost-user --enable-vhost-user-blk-server \ + --enable-vhost-crypto \ + --enable-linux-aio \ + --enable-numa \ + --enable-seccomp \ + --enable-virtfs \ + --disable-vnc \ + ${QEMU_MICROVM_CONFIGURE_OPTIONS} + touch $@ +build-microvm: b/microvm/qemu-system-x86_64 +b/microvm/qemu-system-x86_64: b/microvm/configured + ${NINJA} -C b/microvm qemu-system-x86_64 +install-microvm: b/microvm/qemu-system-x86_64 + cp b/microvm/qemu-system-x86_64 debian/qemu-system-x86/usr/bin/qemu-system-x86_64-microvm + echo ".so man1/qemu-system.1" > debian/qemu-system-x86/usr/share/man/man1/qemu-system-x86_64-microvm.1 +# build microvm on amd64 only if system build is enabled +qemu-builds += $(if $(filter ${DEB_HOST_ARCH}-${enable-system},amd64-y),microvm) + +############################################## +# xen build (amd64 arch only, i386-softmmu target only) +configure-xen: b/xen/configured +b/xen/configured: configure + # system build for qemu-system-xen + rm -rf b/xen; mkdir -p b/xen + cd b/xen && \ + ../../configure ${common_configure_opts} \ + --disable-install-blobs --disable-docs --disable-tools \ + --without-default-features \ + --enable-xen --target-list=i386-softmmu \ + --enable-xen-pci-passthrough \ + --disable-tcg --disable-kvm \ + --audio-drv-list= \ + --enable-libusb \ + --enable-pixman --enable-vnc --enable-vnc-jpeg \ + --enable-spice \ + --enable-virtfs --enable-attr --enable-cap-ng \ + ${QEMU_XEN_CONFIGURE_OPTIONS} + touch $@ +build-xen: b/xen/built +b/xen/built: b/xen/configured + ${NINJA} -C b/xen qemu-system-i386 + touch $@ +install-xen: b/xen/built + install -D b/xen/qemu-system-i386 \ + debian/qemu-system-xen${QEMU_XEN} +qemu-builds += $(if $(filter qemu-system-xen,${BUILD_PACKAGES}),xen) + +############################## + +# list of linux-user targets, from configs/targets/*-linux-user.mak +# Note: cris and nios2 removed in QEMU 9.1+ +user-targets = \ + aarch64 aarch64_be alpha arm armeb hexagon hppa i386 loongarch64 \ + m68k microblaze microblazeel mips mips64 mips64el mipsel mipsn32 mipsn32el \ + or1k ppc ppc64 ppc64le riscv32 riscv64 \ + s390x sh4 sh4eb sparc sparc32plus sparc64 \ + x86_64 xtensa xtensaeb +# aliases for missing ${DEB_HOST_ARCH} names in qemu-user: +user-alias-aarch64 = arm64 +user-alias-arm = armel armhf +user-alias-loongarch64 = loong64 +user-alias-ppc = powerpc +user-alias-ppc64le = ppc64el +user-alias-x86_64 = amd64 + +# install-user [-static] +define install-user + dh_installdirs -p qemu-user$1 \ + usr/bin usr/share/man/man1 usr/share/doc/qemu-user$1 +$(foreach t,${user-targets},\ + cp -p b/user$1/qemu-$t debian/qemu-user$1/usr/bin/qemu-$t$1 + ln -sf qemu-user$1.1 debian/qemu-user$1/usr/share/man/man1/qemu-$t$1.1 +$(foreach a,${user-alias-$t},\ + ln -sf qemu-$t$1 debian/qemu-user$1/usr/bin/qemu-$a$1 + ln -sf qemu-$t$1.1 debian/qemu-user$1/usr/share/man/man1/qemu-$a$1.1 +)) +endef + +############################################## +# user build +configure-user: b/user/configured +b/user/configured: configure +# do not use debian/configure-opts here, all optional stuff will be enabled +# automatically, dependencies are already verified in the main build + rm -rf b/user; mkdir -p b/user + cd b/user && \ + ../../configure ${common_configure_opts} \ + --extra-cflags="${extra-cflags}" \ + --disable-plugins \ + --target-list="$(addsuffix -linux-user,${user-targets})" + touch $@ +build-user: b/user/built +b/user/built: b/user/configured + # we use this invocation to build just the binaries + ${NINJA} -C b/user $(addprefix qemu-,${user-targets}) + touch $@ +install-user: b/user/built + $(call install-user,) + sed -e 's/qemu\\-user\\-static/qemu\\-user/g' \ + -e 's/ (static version)//' \ + debian/qemu-user-static.1 > debian/qemu-user/usr/share/man/man1/qemu-user.1 + sed 's|-static||g' debian/qemu-user-static.README.Debian > \ + debian/qemu-user/usr/share/doc/qemu-user/README.Debian + dh_installdocs -p qemu-user-binfmt --link-doc=qemu-user + ./debian/binfmt-install qemu-user-binfmt +qemu-builds += $(if ${enable-user},user) + +############################################## +# user-static build +configure-user-static: b/user-static/configured +b/user-static/configured: configure +# do not use debian/configure-opts here, all optional stuff will be enabled +# automatically, dependencies are already verified in the main build +# See LP:#1908331 for --static-pie (the default in qemu) and #1053101 +# See https://sourceware.org/bugzilla/show_bug.cgi?id=29514 +# use --disable-pie on i386 for now due to #1056739 +# (maybe also add arm64 here for gcc12) + rm -rf b/user-static; mkdir -p b/user-static + cd b/user-static && \ + ../../configure ${common_configure_opts} \ + --extra-cflags="${extra-cflags}" \ + --static \ + $(if $(filter i386,${DEB_HOST_ARCH}),--disable-pie) \ + --disable-plugins \ + --target-list="$(addsuffix -linux-user,${user-targets})" + touch $@ +build-user-static: b/user-static/built +b/user-static/built: b/user-static/configured + # we use this invocation to build just the binaries + ${NINJA} -C b/user-static $(addprefix qemu-,${user-targets}) + touch $@ +install-user-static: b/user-static/built + $(call install-user,-static) + ./debian/binfmt-install qemu-user-static +qemu-builds += $(if ${enable-user-static},user-static) + +############################################## +# qemu-system-for-host dep-only package +# find the qemu-system-foo package which contains the right binary: +#qemu-system-for-host=$(strip \ +# $(foreach p,${system-packages},\ +# $(if \ +# $(filter \ +# ${system-archlist-$p} $(foreach a,${system-archlist-$p},${system-alias-$a}),\ +# ${DEB_HOST_ARCH_CPU}),\ +# qemu-system-$p))) +install-qemu-system-for-host: +# $(if ${qemu-system-for-host},,$(error no qemu-system-for-host found for ${DEB_HOST_ARCH_CPU})) +# echo 'qemu-for-host=${qemu-system-for-host}' + echo 'qemu:for-host=qemu-system-${DEB_HOST_ARCH_CPU}' \ + >> debian/qemu-system-for-host.substvars + dh_installdocs -p qemu-system-for-host --link-doc=qemu-system-common +configure-qemu-system-for-host: +build-qemu-system-for-host: +qemu-builds += $(filter ${BUILD_PACKAGES},qemu-system-for-host) + +############################################## +# common rules + +.PHONY: $(addprefix configure-, ${qemu-builds}) \ + $(addprefix build-, ${qemu-builds}) \ + $(addprefix install-, ${qemu-builds}) \ + configure-arch build-arch test-qemu \ + pre-install-arch install-arch binary-arch +configure-arch: $(addprefix configure-, ${qemu-builds}) +build-arch: $(addprefix build-, ${qemu-builds}) \ + +#XXX-cyclic-test-dep-dak-bug $(if $(findstring nocheck, ${DEB_BUILD_OPTIONS} ${DEB_BUILD_PROFILES}),, test-qemu) + +pre-install-arch: + dh_testroot + dh_prep -a + dh_installdirs -a +install-arch: pre-install-arch $(addprefix install-, ${qemu-builds}) + dh_install -a +ifneq (,$(filter qemu-block-supplemental,${BUILD_PACKAGES})) + # block-gluster.so should only be in qemu-block-supplemental, if it's built (it doesn't build on 32bit architectures) + dh_installdirs -p qemu-block-supplemental usr/lib/${DEB_HOST_MULTIARCH}/qemu + for m in ${BLOCK_SUPPLEMENTAL_MODULES}; do mv debian/qemu-block-extra/usr/lib/${DEB_HOST_MULTIARCH}/qemu/$$m debian/qemu-block-supplemental/usr/lib/${DEB_HOST_MULTIARCH}/qemu/; done + # This also introduces a "Depends: qemu-block-extra" to qemu-block-supplemental, which + # we want also because of the run-qemu.mount unit shipped there. + dh_installdocs -p qemu-block-supplemental --link-doc=qemu-block-extra +endif + dh_missing -a + dh_installdocs -a + dh_installchangelogs -a + dh_installman -a + dh_installinit -pqemu-guest-agent + # install /etc/default/qemu-kvm + dh_installinit -a -pqemu-system-common --name=qemu-kvm + dh_installsystemd -pqemu-guest-agent --no-enable + dh_installudev -pqemu-guest-agent + # install and enable qemu-kvm.service + dh_installsystemd -a -pqemu-system-common --no-restart-on-upgrade --name=qemu-kvm +# default-enable /run/qemu mount only on ubuntu, +# on debian let it be manually controlled and off by default + dh_installsystemd -pqemu-block-extra --no-restart-on-upgrade --name=run-qemu \ + $(if $(filter ${VENDOR},DEBIAN),--no-start --no-enable,) + dh_lintian -a + dh_strip_nondeterminism -a + dh_compress -a + dh_fixperms -a +#968670: skip dwz << 0.14 on these arches, remove in bookworm+1 + v="$$(dwz --version 2>&1 | sed -n 's/^dwz version //p')"; case "$$v" in \ + (0.1[0123]) echo "Skipping dwz version $$v on ${DEB_HOST_ARCH}: #968670" ;; \ + (*) dh_dwz -a ;; \ + esac + dh_strip -a +# qemu-user-static contains static binaries only, but they might be built +# as static-pie executables and dpkg-shlibdeps complains. Just omit it here. + dh_shlibdeps -a -Nqemu-user-static +ifeq (${enable-user-static},y) +# after shlibdeps finished, grab ${shlibs:Depends} from -user package +# and transform it into Built-Using field for -user-static. + if [ -f debian/qemu-user.substvars ]; then \ + pkgs=$$(sed -n -e's/([^)]*)//g' -e's/,//g' -e's/^shlibs:Depends=//p' debian/qemu-user.substvars); \ + srcs="$$srcs $$(dpkg-query -f '$${source:Package} (= $${source:Version}),' -W $$pkgs)"; \ + echo "built-using=$$srcs" >> debian/qemu-user-static.substvars ; \ + fi +endif +binary-arch: install-arch binary-helper +binary-arch: a=-a + +############################################## +### firmware, qemu-user-data package + +### main qemu arch-indep build +# we configure qemu in b/data with default options, so that various +# subcomponents can be built from the main qemu source +b/data/.configured: | b + rm -rf b/data; mkdir -p b/data + cd b/data && ../../configure --skip-meson + touch $@ + +### openbios rules +b/openbios/config-host.mak: + mkdir -p b/openbios + cd b/openbios && ../../roms/openbios/config/scripts/switch-arch builtin-ppc builtin-sparc32 builtin-sparc64 +build-openbios: $(addprefix b/openbios/obj-, $(addsuffix /.built, ppc sparc32 sparc64)) +b/openbios/obj-%/.built: b/openbios/config-host.mak + ${MAKE} -C ${@D} V=${V} EXTRACFLAGS="-ffreestanding -fno-pic -fno-stack-protector" + @touch $@ +install-openbios: build-openbios + install -m 0644 b/openbios/obj-ppc/openbios-qemu.elf ${sysdataidir}/openbios-ppc + install -m 0644 b/openbios/obj-sparc32/openbios-builtin.elf ${sysdataidir}/openbios-sparc32 + install -m 0644 b/openbios/obj-sparc64/openbios-builtin.elf ${sysdataidir}/openbios-sparc64 + install -m 0644 -t ${sysdataidir} \ + b/openbios/obj-sparc32/QEMU,tcx.bin \ + b/openbios/obj-sparc32/QEMU,cgthree.bin \ + b/openbios/obj-sparc64/QEMU,VGA.bin +# sysdata-components += openbios + +### powernv firmware in roms/skiboot +build-skiboot: b/skiboot/skiboot.lid +b/skiboot/skiboot.lid: | roms/skiboot/.version + mkdir -p b/skiboot +# skiboot makefiles makes it difficult to *add* an option to CFLAGS. +# Abuse OPTS= for this, with the default being -Os. + grep -q '^OPTS=-Os$$' roms/skiboot/Makefile.main || \ + { echo "review OPTS= in skiboot/Makefile.main"; false; } + ${MAKE} -C b/skiboot -f ${CURDIR}/roms/skiboot/Makefile \ + SRC=${CURDIR}/roms/skiboot \ + OPTS='-Os -ffile-prefix-map="${CURDIR}/roms/skiboot/"=' \ + CROSS_COMPILE=${PPC64_CROSSPFX} V=${V} +install-skiboot: b/skiboot/skiboot.lid + install -m 0644 -t ${sysdataidir} $< +# sysdata-components += skiboot + +### vof.bin +build-vof: b/data/pc-bios/vof/vof.bin +b/data/pc-bios/vof/vof.bin: b/data/.configured + ${MAKE} -C ${@D} V=$V ${@F} +install-vof: b/data/pc-bios/vof/vof.bin + install -m 0644 -t ${sysdataidir} $< +# sysdata-components += vof + +### u-boot-e500 (u-boot.e500) +build-u-boot-e500: b/u-boot/build-e500/u-boot +b/u-boot/build-e500/u-boot: | b + cp -alu roms/u-boot b/ + ${MAKE} -C b/u-boot O=build-e500 qemu-ppce500_config + ${MAKE} -C b/u-boot CROSS_COMPILE=${PPC_CROSSPFX} O=build-e500 V=$V + ${PPC_CROSSPFX}strip $@ +install-u-boot-e500: b/u-boot/build-e500/u-boot + install -m 0644 $< ${sysdataidir}/u-boot.e500 +# sysdata-components += u-boot-e500 + +### u-boot-sam460 (u-boot-sam460-20100605.bin) +build-u-boot-sam460: b/u-boot-sam460ex/u-boot.bin +b/u-boot-sam460ex/u-boot.bin: | b + cp -alu roms/u-boot-sam460ex b/ + ${MAKE} -C b/u-boot-sam460ex Sam460ex_config + env -u LDFLAGS -u OBJCFLAGS \ + ${MAKE} -C b/u-boot-sam460ex CROSS_COMPILE=${PPC_CROSSPFX} +# ${PPC_CROSSPFX}strip $@ +install-u-boot-sam460: b/u-boot-sam460ex/u-boot.bin | ${sysdataidir} + install -m 0644 $< ${sysdataidir}/u-boot-sam460-20100605.bin +# sysdata-components += u-boot-sam460 + +### x86 optionrom +build-x86-optionrom: b/optionrom/built +b/optionrom/built: + mkdir -p b/optionrom + ${MAKE} -f ${CURDIR}/debian/optionrom.mak -C b/optionrom SRC_PATH="${CURDIR}" all + touch $@ +install-x86-optionrom: build-x86-optionrom | ${sysdataidir} + ${MAKE} -f ${CURDIR}/debian/optionrom.mak -C b/optionrom SRC_PATH="${CURDIR}" install DESTDIR="${CURDIR}/${sysdataidir}" +# sysdata-components += x86-optionrom + +### qboot, aka bios-microvm +build-qboot: b/qboot/bios.bin +b/qboot/bios.bin: | b + rm -rf b/qboot + meson setup roms/qboot b/qboot + ninja -C b/qboot $(if $V,-v) +install-qboot: b/qboot/bios.bin + install -m 0644 $< ${sysdataidir}/qboot.rom +# sysdata-components += qboot + +### alpha firmware in roms/palcode-clipper +build-palcode-clipper: b/qemu-palcode/palcode-clipper +b/qemu-palcode/palcode-clipper: | b + cp -al roms/qemu-palcode b/ +#XXX #1019011 (remove OPT= alternative when fixed) + ${MAKE} -C b/qemu-palcode CROSS=${ALPHAEV67_CROSSPFX} -k || \ + ${MAKE} -C b/qemu-palcode CROSS=${ALPHAEV67_CROSSPFX} OPT=-O1 + ${ALPHAEV67_CROSSPFX}strip b/qemu-palcode/palcode-clipper +install-palcode-clipper: b/qemu-palcode/palcode-clipper + install -m 0644 $< ${sysdataidir}/palcode-clipper +# sysdata-components += palcode-clipper + +### SLOF +build-slof: b/SLOF/boot_rom.bin +b/SLOF/boot_rom.bin: | b + cp -al roms/SLOF b/ + env -u LDFLAGS -u CFLAGS $(MAKE) -C b/SLOF qemu CROSS=${PPC64_CROSSPFX} V=${V} +install-slof: b/SLOF/boot_rom.bin + install -m 0644 $< ${sysdataidir}/slof.bin +# sysdata-components += slof + +### s390x firmware in pc-bios/s390-ccw +build-s390x-fw: b/data/pc-bios/s390-ccw/.built +b/data/pc-bios/s390-ccw/.built: b/data/.configured + make -C ${@D} V=$V + touch $@ +install-s390x-fw: b/data/pc-bios/s390-ccw/.built + install -m 0644 -t ${sysdataidir} ${ roms/seabios-hppa/.config + ${MAKE} -C roms/seabios-hppa olddefconfig + ${MAKE} -C roms/seabios-hppa OUT=../../b/hppafw/ PYTHON=python3 parisc + hppa-linux-gnu-strip -R.note -R.comment $@ +install-hppa-fw: b/hppafw/hppa-firmware.img + install -m 0644 $< ${sysdataidir} +# sysdata-components += hppa-fw + +### opensbi (riscv firmware) +# we only build v64 variants, not v32 +build-opensbi: b/opensbi/.built +b/opensbi/.built: + mkdir -p b/opensbi + ${MAKE} -C roms/opensbi O=../../b/opensbi CROSS_COMPILE=${RISCV64_CROSSPFX} V=${V} PLATFORM=generic + ${RISCV64_CROSSPFX}strip --strip-unneeded -R.comment -R.note b/opensbi/platform/generic/firmware/fw_dynamic.elf + touch $@ +install-opensbi: build-opensbi + install -m 0644 b/opensbi/platform/generic/firmware/fw_dynamic.bin ${sysdataidir}/opensbi-riscv64-generic-fw_dynamic.bin + install -m 0644 b/opensbi/platform/generic/firmware/fw_dynamic.elf ${sysdataidir}/opensbi-riscv64-generic-fw_dynamic.elf +# sysdata-components += opensbi + +### vbootrom (npcm7xx) +build-vbootrom: b/vbootrom/.built +b/vbootrom/.built: | b + cp -pa roms/vbootrom b/ + ${MAKE} -C b/vbootrom CROSS_COMPILE=${ARM_CROSSPFX} + touch $@ +install-vbootrom: build-vbootrom + install -m 0644 b/vbootrom/npcm7xx_bootrom.bin ${sysdataidir}/ +# sysdata-components += vbootrom + +### misc firmware +build-misc: b/misc/.built +b/misc/.built: + mkdir -p b/misc + # Use pre-built DTB files from pc-bios/dtb/ + cp pc-bios/dtb/bamboo.dtb b/misc/bamboo.dtb + cp pc-bios/dtb/canyonlands.dtb b/misc/canyonlands.dtb + cp pc-bios/dtb/petalogix-ml605.dtb b/misc/petalogix-ml605.dtb + cp pc-bios/dtb/petalogix-s3adsp1800.dtb b/misc/petalogix-s3adsp1800.dtb + touch $@ +install-misc: build-misc + install -m 0644 b/misc/bamboo.dtb b/misc/canyonlands.dtb \ + -D -t ${sysdataidir}/dtb + install -m 0644 b/misc/petalogix-ml605.dtb b/misc/petalogix-s3adsp1800.dtb \ + -D -t debian/qemu-system-misc/usr/share/qemu/dtb/ +# icon for gtk ui + install -Dp -m0644 ui/icons/qemu.svg \ + -t debian/qemu-system-data/usr/share/icons/hicolor/scalable/apps/ + install -Dp -m0644 ui/qemu.desktop \ + -t debian/qemu-system-data/usr/share/applications/ +# icon for sdl2 ui (non-sdl-image version) + install -Dp -m0644 ui/icons/qemu_32x32.bmp \ + debian/qemu-system-data/usr/share/icons/hicolor/32x32/apps/qemu.bmp + install -Dp -m0644 -t debian/qemu-system-data/usr/share/qemu/keymaps/ \ + $$(ls -1 pc-bios/keymaps/* | fgrep -v /meson.build) +sysdata-components += misc + +${sysdataidir}: + mkdir -p -m 0755 $@ +b: + mkdir -p $@ + +.PHONY: $(addprefix build- , ${sysdata-components}) \ + $(addprefix install-, ${sysdata-components}) \ + build-indep pre-install-indep install-indep binary-indep +$(addprefix build- , ${sysdata-components}): | b +$(addprefix install-, ${sysdata-components}): | ${sysdataidir} +build-indep: $(addprefix build-, ${sysdata-components}) +pre-install-indep: + dh_testroot + dh_prep -i +install-indep: pre-install-indep $(addprefix install-, ${sysdata-components}) + dh_installdocs -i + # install qemu-system-data.links + dh_link -i + dh_installchangelogs -i + dh_lintian -i + dh_compress -i + dh_fixperms -i +binary-indep: install-indep binary-helper +binary-indep: a=-i + +build: build-arch build-indep +install: install-arch install-indep +binary: install-arch install-indep binary-helper +binary: a= +binary-helper: + dh_installdeb $a + dh_gencontrol $a + dh_md5sums $a + dh_builddeb $a + +clean: debian/control + dh_clean \ + b/ \ + configs/devices/x86_64-softmmu/microvm.mak \ + +.PHONY: build install binary binary-helper clean + +ifneq (,$(wildcard debian/control-in)) +# only include rules for debian/control if debian/control-in is present +debian/control: debian/control-in debian/rules + echo '# autogenerated file, please edit debian/control-in' > $@.tmp + sed -e 's/^:$(shell echo ${VENDOR} | tr '[A-Z]' '[a-z]')://' \ + -e '/^:[a-z]*:/D' \ + -e 's/:system-arch-linux:/$(sort ${system-arch-linux})/g' \ + -e 's/:system-arch:/$(sort ${system-arch})/g' \ + -e 's/:spice-arch:/$(sort ${spice-arch})/g' \ + -e 's/:user-arch:/$(sort ${user-arch})/g' \ + -e 's/:utils-arch:/${utils-arch}/g' \ + $< >> $@.tmp + mv -f $@.tmp $@ +endif diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 00000000000..163aaf8d82b --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/debian/source/lintian-overrides b/debian/source/lintian-overrides new file mode 100644 index 00000000000..8d4f783e736 --- /dev/null +++ b/debian/source/lintian-overrides @@ -0,0 +1,14 @@ +# deliberate spacing to align Replaces +qemu source: debian-control-has-unusual-field-spacing Replaces * +# we have a ton of files with long lines, just shut up this warning +qemu source: very-long-line-length-in-source-file * +qemu source: package-does-not-install-examples *roms/*/examples/* +# we refer to versioned Provides coming from the same source so it's ok +qemu source: version-substvar-for-external-package Depends ${binary:Version} qemu-* -> qemu-system-any * +# yes we do NOT use dh sequencer. +# dh exports CFLAGS et all, which breaks firmware compilation badly +# also, it makes whole thing recursive, uncontrollable and hidden, +# hence difficult to debug. +qemu source: no-dh-sequencer [debian/rules] +# the source is available in linux-user/*/vdso.S: +qemu source: source-is-missing [linux-user/*/vdso*.so] diff --git a/debian/source/options b/debian/source/options new file mode 100644 index 00000000000..06e1ae9c8a7 --- /dev/null +++ b/debian/source/options @@ -0,0 +1 @@ +include-binaries diff --git a/debian/source_qemu.py b/debian/source_qemu.py new file mode 100644 index 00000000000..b86d6bd7a69 --- /dev/null +++ b/debian/source_qemu.py @@ -0,0 +1,25 @@ +'''apport package hook for qemu + +(c) 2009 Canonical Ltd. +''' + +from apport.hookutils import * +import subprocess + +def cmd_pipe(command1, command2, input = None, stderr = subprocess.STDOUT, stdin = None): + '''Try to pipe command1 into command2.''' + try: + sp1 = subprocess.Popen(command1, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, close_fds=True) + sp2 = subprocess.Popen(command2, stdin=sp1.stdout, stdout=subprocess.PIPE, stderr=stderr, close_fds=True) + except OSError as e: + return [127, str(e)] + + out = sp2.communicate(input)[0] + return [sp2.returncode,out] + +def add_info(report): + attach_hardware(report) + attach_related_packages(report, ['kvm*', '*libvirt*', 'virt-manager', 'qemu*']) + rc,output = cmd_pipe(['ps', '-eo', 'comm,stat,euid,ruid,pid,ppid,pcpu,args'], ['egrep', '(^COMMAND|^qemu|^kvm)']) + if rc == 0: + report['KvmCmdLine'] = output diff --git a/debian/tests/control b/debian/tests/control new file mode 100644 index 00000000000..9869a5f8aec --- /dev/null +++ b/debian/tests/control @@ -0,0 +1,11 @@ +Tests: test-qemu-img.sh +Depends: qemu-utils +Restrictions: superficial + +Tests: test-qemu-system.sh +Depends: qemu-system-x86 +Restrictions: superficial + +Tests: test-qemu-user.sh +Depends: qemu-user, qemu-user-static +Restrictions: superficial diff --git a/debian/tests/test-qemu-img.sh b/debian/tests/test-qemu-img.sh new file mode 100755 index 00000000000..ed7475f2faf --- /dev/null +++ b/debian/tests/test-qemu-img.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +set -e + +cd "$AUTOPKGTEST_TMP" + +doit() { + echo "$1:" + shift + echo "$1" + if [ -n "$2" ]; then + out="$($1)" + eval "case \"\$out\" in ( $2 ) echo \"\$out\";; (*) echo \"unexpected output:\"; echo \" want $2\"; echo \" got \$out\"; return 1;; esac" + else + $1 + fi + echo ok. +} + +doit "Testing if qemu-img creates images" \ + "qemu-img create q.raw 12G" + +doit "Testing for correct image size" \ + "ls -l q.raw" '*\ 12884901888\ *' + +doit "Testing if file is sparse" \ + 'ls -s q.raw' '[04]\ *' + +doit "Testing if conversion to a qcow2 image works" \ + "qemu-img convert -f raw -O qcow2 q.raw q.qcow2" + +doit "Checking if image is qcow2" \ + 'qemu-img info q.qcow2' "*'file format: qcow2'*'size: 12 GiB (12884901888 bytes)'*" + +rm -f q.raw q.qcow2 diff --git a/debian/tests/test-qemu-system.sh b/debian/tests/test-qemu-system.sh new file mode 100755 index 00000000000..eca40951ca9 --- /dev/null +++ b/debian/tests/test-qemu-system.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e + +for ARCH in x86_64 i386; do + echo -n "Checking for pc in ${ARCH}..." + qemu-system-${ARCH} -M help | grep -qs "pc\s\+Standard PC" + echo "done." +done + diff --git a/debian/tests/test-qemu-user.sh b/debian/tests/test-qemu-user.sh new file mode 100755 index 00000000000..da18b8af83c --- /dev/null +++ b/debian/tests/test-qemu-user.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +set -e + +arch=$(dpkg --print-architecture) +echo "debian architecture: $arch" +case $arch in + ( amd64 | x86_64 ) arch=x86_64 ;; + ( i[3456]86 ) arch=i386 ;; + ( arm64 | aarch64 ) arch=aarch64 ;; + ( arm | armel | armhf ) arch=arm ;; + ( ppc64el | powerpc64le ) arch=ppc64le ;; + ( s390x ) ;; + ( * ) echo "Warning: unmapped architecture $arch" ;; +esac +echo "qemu architecture: $arch" + +tested= +for f in qemu-$arch qemu-$arch-static ; do + [ -x /usr/bin/$f ] || continue + echo "Checking if $f can run executables:" + echo "glob with sh: $f /bin/sh -c '$f /bin/ls -dCFl debian/*[t]*':" + ls="$($f /bin/sh -c "$f /bin/ls -dCFl debian/*[t]*")" + echo "$ls" + case "$ls" in + (*debian/control*) ;; + *) echo "Expected output not found" >&2; exit 1;; + esac + echo ok. + tested=y +done +if [ ! "$tested" ]; then + echo "Warning: qemu-$arch[-static] not found, not testing qemu-user" +fi diff --git a/debian/upstream/signing-key.asc b/debian/upstream/signing-key.asc new file mode 100644 index 00000000000..63904eafeab --- /dev/null +++ b/debian/upstream/signing-key.asc @@ -0,0 +1,44 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFJhQQ8BCAChk4A3y0VfqeGfuhBZK4nvpZP/cSIQntWDheF3Tx7m9CxEGbc+ +5aHxfrvm45LSjwPCK020WjeqYX2UFQfcvcjoW6iMbth1BLydu11vx6Gk/CJuB7Ss +8AbyvEXBcOfHbginUdqr4nwLD9e8qlVxRFbSHfbFRbuybZghke4y1pZzekkqbseT +kahkWHxr6o1EGAjyIdjAq1IQxewW6yJ4rkHWsRvfv3sUQTqBU+wT180kdwC8AAv6 +q6TX4um0HGR46uJ+5SG8DYb00kRMckQtYpTuwuUmlAvNh/qLg2fVVMEiHBpcuIiV +h7x8INuq94vc+tgxmr0bomIWIZljMQ7vp8ixABEBAAG0IE1pY2hhZWwgUm90aCA8 +bWRyb3RoQHV0ZXhhcy5lZHU+iQE4BBMBAgAiBQJSYUEPAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRAzU8nO8Qi1hAHFB/9XZo74FYIGrSY814qun2wWF1Ui +wljt6pLrwJfpn5DQJmMRn6ZMAJxXb7S7NE1jVvo0E85m4tykjidWZvrFtTljDBiV +WKA+xHT8ip5+cLIOr5ogwsR7ujC+Rk1u5nb8FIafo3G7V8raXYI6T7sfOCJ+wUUI +s0A3Sw7P7GiAQLuafQ+jAUjg7S0REPNBvGamZ+tIAJxL7bAAACTWe4hZUGodXJuV +Ofk7ic2p5SpeT0E6/Pd2JtAcQdsdpRB4x5M99neRtyEdZO/3SauoHzIoneRxml1Y +rp4hr27Ggr8I9WMomU5E0IMz7HR3fRTQXVd5SsXKLsitUqeKHmTOLn1zuXtUtCFN +aWNoYWVsIFJvdGggPGZsdWtzaHVuQGdtYWlsLmNvbT6JATgEEwECACIFAlJhQl4C +GwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEDNTyc7xCLWEj2cH/21MIyGv +podSPX6e2y6sS0cvZ3rTNTm+qVq3VY2yJlHX8KVcvT6Su9s0yl9J5FdeIkQCqpo4 ++U/ANDK4R7rcnHl/gEw21932lVESPMMKhxlLluAb054nrqdwpdjtxf+VZcTt4c4B +zcF/K9po1LgZoOt6HJPxIC+tqYYQrsmrOZUtCJjgOXO6IkadrBmxoFNFxQ4qWB2u +oZ+/HEtceOIb56qNfOj36MtEKVwpScYPAxaePd9lO7P4vlCOx6yIzO3MzEl+Aen7 +/NYKU8WwwRnshAlW4GTn2XbrEQeJx4HzSzI5r8xfMOH3JLh/ZVANBR7c5cw1MPe7 +1fAj5WzXrr/scJu0KE1pY2hhZWwgUm90aCA8bWRyb3RoQGxpbnV4LnZuZXQuaWJt +LmNvbT6JATgEEwECACIFAlJhQjYCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA +AAoJEDNTyc7xCLWEltAH/37ADf0YFPrJd3gcZyGC/zvtPhYPZ/kxnqoahoBIRF/s +ftGV5XqwwZOGm1Iws/VmTQ0PFBvU+Mw0vNT0HcreRT8PohtRlF+V8umHLVeL6x3C +tmJfsfyYBxLoE5/DLyYqBvpCothLm40ND4O6lRdLF5CUScGhgD1IblvmYBs0Uz71 +tFnHmC+Ov3fNC8YkTNymaxnJGYlPtmwUZ7SWh1XRl8pMQzL+3B9Be6ecO3J5lbFm +wL8XYawx0HtayvM5n9VbPy2eawAn++e7bLLywc39YyQ9/ckmSlZqtlUsEuij+h0T +qmr9XY7K8WEHqBJXfl8txIm8DTOqq4dcx3j7k+qwQJC5AQ0EUmFBDwEIAJDxMtsu +9ie8QN7eepcm+WuaY6Zbg3iDdPOOrQ4Ez+4oLaib5FHiZZjikdTsD7hlwcVuuhyE +P2/bT9f29pbsrUVjHRgqJPdcuoOlUzAekgz17895Wh1gRarsbDIJDgs1878OSvIC +/ek++qAWkzU4Sy8Psu9eJMTP6F0nPBOvet+iPwWDZO/dxrf+BnBb9wuBZnihpKMa +v2gJox0iYrqpnFOFlK/XdSYnZNYpIyBin1e+K2CG+TzF2M+KmdZE7FMhnTz95est +AG2kC37VIVkCq8yHNVZqsgyAfMqpB1ayQI2r3FUBM0Hxp6z2+8v/Ezp6zhYCI+Bi +UC7VbrWSSuTlp4UAEQEAAYkBHwQYAQIACQUCUmFBDwIbDAAKCRAzU8nO8Qi1hKWY +B/0R6ct3W2SEyoNuHTTKd5szIJigHYXrsqBa4XQGaVuFz7XZtcIbFFhEHjMrvTJp +BWhuZ091Gp0AjV2ACNi2z+dSpXi16QxdFb1/4us6mFEm86UIu4tcNN1V3WPiODpW +fFkEys/vmqQImLjfSsdxzhMdX7Yen1B3fxiKzwzsTlFbnNiBr2Mv7flDiUvMdbHm +b/n0/B6a69SRYfVkJ3MZdl0gptJlXhJVdwjwVVl3bjvlQd0aZoLwJ7ntrWeMxOkb +f8950vPVxemQ1frblB0zR98fuUNhX4cjrFTI9iJck7xLUwNZfgOz9PodfqUv4riM +LczMmw3nwGZO/aJg0m6uWSWk +=IvBA +-----END PGP PUBLIC KEY BLOCK----- diff --git a/debian/watch b/debian/watch new file mode 100644 index 00000000000..297e0b0c622 --- /dev/null +++ b/debian/watch @@ -0,0 +1,9 @@ +version=4 + +opts=\ +dversionmangle=auto,\ +repacksuffix=+ds,\ +uversionmangle=s/-rc/~rc/,\ +pgpsigurlmangle=s/$/.sig/,\ +compression=xz \ + https://download.qemu.org/ qemu-@ANY_VERSION@@ARCHIVE_EXT@ diff --git a/disas/cris.c b/disas/cris.c deleted file mode 100644 index 409a224c5d1..00000000000 --- a/disas/cris.c +++ /dev/null @@ -1,2863 +0,0 @@ -/* Disassembler code for CRIS. - Copyright 2000, 2001, 2002, 2004, 2005, 2006 Free Software Foundation, Inc. - Contributed by Axis Communications AB, Lund, Sweden. - Written by Hans-Peter Nilsson. - - This file is part of the GNU binutils and GDB, the GNU debugger. - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the - Free Software Foundation; either version 2, or (at your option) any later - version. - - This program is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, see . */ - -#include "qemu/osdep.h" -#include "disas/dis-asm.h" -#include "target/cris/opcode-cris.h" - -#define CONST_STRNEQ(STR1,STR2) (strncmp ((STR1), (STR2), sizeof (STR2) - 1) == 0) - -/* cris-opc.c -- Table of opcodes for the CRIS processor. - Copyright 2000, 2001, 2004 Free Software Foundation, Inc. - Contributed by Axis Communications AB, Lund, Sweden. - Originally written for GAS 1.38.1 by Mikael Asker. - Reorganized by Hans-Peter Nilsson. - -This file is part of GAS, GDB and the GNU binutils. - -GAS, GDB, and GNU binutils is free software; you can redistribute it -and/or modify it under the terms of the GNU General Public License as -published by the Free Software Foundation; either version 2, or (at your -option) any later version. - -GAS, GDB, and GNU binutils are distributed in the hope that they will be -useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, see . */ - -#ifndef NULL -#define NULL (0) -#endif - -/* This table isn't used for CRISv32 and the size of immediate operands. */ -const struct cris_spec_reg -cris_spec_regs[] = -{ - {"bz", 0, 1, cris_ver_v32p, NULL}, - {"p0", 0, 1, 0, NULL}, - {"vr", 1, 1, 0, NULL}, - {"p1", 1, 1, 0, NULL}, - {"pid", 2, 1, cris_ver_v32p, NULL}, - {"p2", 2, 1, cris_ver_v32p, NULL}, - {"p2", 2, 1, cris_ver_warning, NULL}, - {"srs", 3, 1, cris_ver_v32p, NULL}, - {"p3", 3, 1, cris_ver_v32p, NULL}, - {"p3", 3, 1, cris_ver_warning, NULL}, - {"wz", 4, 2, cris_ver_v32p, NULL}, - {"p4", 4, 2, 0, NULL}, - {"ccr", 5, 2, cris_ver_v0_10, NULL}, - {"exs", 5, 4, cris_ver_v32p, NULL}, - {"p5", 5, 2, cris_ver_v0_10, NULL}, - {"p5", 5, 4, cris_ver_v32p, NULL}, - {"dcr0",6, 2, cris_ver_v0_3, NULL}, - {"eda", 6, 4, cris_ver_v32p, NULL}, - {"p6", 6, 2, cris_ver_v0_3, NULL}, - {"p6", 6, 4, cris_ver_v32p, NULL}, - {"dcr1/mof", 7, 4, cris_ver_v10p, - "Register `dcr1/mof' with ambiguous size specified. Guessing 4 bytes"}, - {"dcr1/mof", 7, 2, cris_ver_v0_3, - "Register `dcr1/mof' with ambiguous size specified. Guessing 2 bytes"}, - {"mof", 7, 4, cris_ver_v10p, NULL}, - {"dcr1",7, 2, cris_ver_v0_3, NULL}, - {"p7", 7, 4, cris_ver_v10p, NULL}, - {"p7", 7, 2, cris_ver_v0_3, NULL}, - {"dz", 8, 4, cris_ver_v32p, NULL}, - {"p8", 8, 4, 0, NULL}, - {"ibr", 9, 4, cris_ver_v0_10, NULL}, - {"ebp", 9, 4, cris_ver_v32p, NULL}, - {"p9", 9, 4, 0, NULL}, - {"irp", 10, 4, cris_ver_v0_10, NULL}, - {"erp", 10, 4, cris_ver_v32p, NULL}, - {"p10", 10, 4, 0, NULL}, - {"srp", 11, 4, 0, NULL}, - {"p11", 11, 4, 0, NULL}, - /* For disassembly use only. Accept at assembly with a warning. */ - {"bar/dtp0", 12, 4, cris_ver_warning, - "Ambiguous register `bar/dtp0' specified"}, - {"nrp", 12, 4, cris_ver_v32p, NULL}, - {"bar", 12, 4, cris_ver_v8_10, NULL}, - {"dtp0",12, 4, cris_ver_v0_3, NULL}, - {"p12", 12, 4, 0, NULL}, - /* For disassembly use only. Accept at assembly with a warning. */ - {"dccr/dtp1",13, 4, cris_ver_warning, - "Ambiguous register `dccr/dtp1' specified"}, - {"ccs", 13, 4, cris_ver_v32p, NULL}, - {"dccr",13, 4, cris_ver_v8_10, NULL}, - {"dtp1",13, 4, cris_ver_v0_3, NULL}, - {"p13", 13, 4, 0, NULL}, - {"brp", 14, 4, cris_ver_v3_10, NULL}, - {"usp", 14, 4, cris_ver_v32p, NULL}, - {"p14", 14, 4, cris_ver_v3p, NULL}, - {"usp", 15, 4, cris_ver_v10, NULL}, - {"spc", 15, 4, cris_ver_v32p, NULL}, - {"p15", 15, 4, cris_ver_v10p, NULL}, - {NULL, 0, 0, cris_ver_version_all, NULL} -}; - -/* Add version specifiers to this table when necessary. - The (now) regular coding of register names suggests a simpler - implementation. */ -const struct cris_support_reg cris_support_regs[] = -{ - {"s0", 0}, - {"s1", 1}, - {"s2", 2}, - {"s3", 3}, - {"s4", 4}, - {"s5", 5}, - {"s6", 6}, - {"s7", 7}, - {"s8", 8}, - {"s9", 9}, - {"s10", 10}, - {"s11", 11}, - {"s12", 12}, - {"s13", 13}, - {"s14", 14}, - {"s15", 15}, - {NULL, 0} -}; - -/* All CRIS opcodes are 16 bits. - - - The match component is a mask saying which bits must match a - particular opcode in order for an instruction to be an instance - of that opcode. - - - The args component is a string containing characters symbolically - matching the operands of an instruction. Used for both assembly - and disassembly. - - Operand-matching characters: - [ ] , space - Verbatim. - A The string "ACR" (case-insensitive). - B Not really an operand. It causes a "BDAP -size,SP" prefix to be - output for the PUSH alias-instructions and recognizes a push- - prefix at disassembly. This letter isn't recognized for v32. - Must be followed by a R or P letter. - ! Non-match pattern, will not match if there's a prefix insn. - b Non-matching operand, used for branches with 16-bit - displacement. Only recognized by the disassembler. - c 5-bit unsigned immediate in bits <4:0>. - C 4-bit unsigned immediate in bits <3:0>. - d At assembly, optionally (as in put other cases before this one) - ".d" or ".D" at the start of the operands, followed by one space - character. At disassembly, nothing. - D General register in bits <15:12> and <3:0>. - f List of flags in bits <15:12> and <3:0>. - i 6-bit signed immediate in bits <5:0>. - I 6-bit unsigned immediate in bits <5:0>. - M Size modifier (B, W or D) for CLEAR instructions. - m Size modifier (B, W or D) in bits <5:4> - N A 32-bit dword, like in the difference between s and y. - This has no effect on bits in the opcode. Can also be expressed - as "[pc+]" in input. - n As N, but PC-relative (to the start of the instruction). - o [-128..127] word offset in bits <7:1> and <0>. Used by 8-bit - branch instructions. - O [-128..127] offset in bits <7:0>. Also matches a comma and a - general register after the expression, in bits <15:12>. Used - only for the BDAP prefix insn (in v32 the ADDOQ insn; same opcode). - P Special register in bits <15:12>. - p Indicates that the insn is a prefix insn. Must be first - character. - Q As O, but don't relax; force an 8-bit offset. - R General register in bits <15:12>. - r General register in bits <3:0>. - S Source operand in bit <10> and a prefix; a 3-operand prefix - without side-effect. - s Source operand in bits <10> and <3:0>, optionally with a - side-effect prefix, except [pc] (the name, not R15 as in ACR) - isn't allowed for v32 and higher. - T Support register in bits <15:12>. - u 4-bit (PC-relative) unsigned immediate word offset in bits <3:0>. - U Relaxes to either u or n, instruction is assumed LAPCQ or LAPC. - Not recognized at disassembly. - x Register-dot-modifier, for example "r5.w" in bits <15:12> and <5:4>. - y Like 's' but do not allow an integer at assembly. - Y The difference s-y; only an integer is allowed. - z Size modifier (B or W) in bit <4>. */ - - -/* Please note the order of the opcodes in this table is significant. - The assembler requires that all instances of the same mnemonic must - be consecutive. If they aren't, the assembler might not recognize - them, or may indicate an internal error. - - The disassembler should not normally care about the order of the - opcodes, but will prefer an earlier alternative if the "match-score" - (see cris-dis.c) is computed as equal. - - It should not be significant for proper execution that this table is - in alphabetical order, but please follow that convention for an easy - overview. */ - -const struct cris_opcode -cris_opcodes[] = -{ - {"abs", 0x06B0, 0x0940, "r,R", 0, SIZE_NONE, 0, - cris_abs_op}, - - {"add", 0x0600, 0x09c0, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"add", 0x0A00, 0x01c0, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"add", 0x0A00, 0x01c0, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"add", 0x0a00, 0x05c0, "m S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"add", 0x0A00, 0x01c0, "m s,R", 0, SIZE_FIELD, - cris_ver_v32p, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"addc", 0x0570, 0x0A80, "r,R", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_not_implemented_op}, - - {"addc", 0x09A0, 0x0250, "s,R", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_not_implemented_op}, - - {"addi", 0x0540, 0x0A80, "x,r,A", 0, SIZE_NONE, - cris_ver_v32p, - cris_addi_op}, - - {"addi", 0x0500, 0x0Ac0, "x,r", 0, SIZE_NONE, 0, - cris_addi_op}, - - /* This collates after "addo", but we want to disassemble as "addoq", - not "addo". */ - {"addoq", 0x0100, 0x0E00, "Q,A", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"addo", 0x0940, 0x0280, "m s,R,A", 0, SIZE_FIELD_SIGNED, - cris_ver_v32p, - cris_not_implemented_op}, - - /* This must be located after the insn above, lest we misinterpret - "addo.b -1,r0,acr" as "addo .b-1,r0,acr". FIXME: Sounds like a - parser bug. */ - {"addo", 0x0100, 0x0E00, "O,A", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"addq", 0x0200, 0x0Dc0, "I,R", 0, SIZE_NONE, 0, - cris_quick_mode_add_sub_op}, - - {"adds", 0x0420, 0x0Bc0, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */ - {"adds", 0x0820, 0x03c0, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"adds", 0x0820, 0x03c0, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"adds", 0x0820, 0x07c0, "z S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"addu", 0x0400, 0x0be0, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"addu", 0x0800, 0x03e0, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"addu", 0x0800, 0x03e0, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"addu", 0x0800, 0x07e0, "z S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"and", 0x0700, 0x08C0, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"and", 0x0B00, 0x00C0, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"and", 0x0B00, 0x00C0, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"and", 0x0B00, 0x04C0, "m S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"andq", 0x0300, 0x0CC0, "i,R", 0, SIZE_NONE, 0, - cris_quick_mode_and_cmp_move_or_op}, - - {"asr", 0x0780, 0x0840, "m r,R", 0, SIZE_NONE, 0, - cris_asr_op}, - - {"asrq", 0x03a0, 0x0c40, "c,R", 0, SIZE_NONE, 0, - cris_asrq_op}, - - {"ax", 0x15B0, 0xEA4F, "", 0, SIZE_NONE, 0, - cris_ax_ei_setf_op}, - - /* FIXME: Should use branch #defines. */ - {"b", 0x0dff, 0x0200, "b", 1, SIZE_NONE, 0, - cris_sixteen_bit_offset_branch_op}, - - {"ba", - BA_QUICK_OPCODE, - 0x0F00+(0xF-CC_A)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - /* Needs to come after the usual "ba o", which might be relaxed to - this one. */ - {"ba", BA_DWORD_OPCODE, - 0xffff & (~BA_DWORD_OPCODE), "n", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"bas", 0x0EBF, 0x0140, "n,P", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"basc", 0x0EFF, 0x0100, "n,P", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"bcc", - BRANCH_QUICK_OPCODE+CC_CC*0x1000, - 0x0f00+(0xF-CC_CC)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bcs", - BRANCH_QUICK_OPCODE+CC_CS*0x1000, - 0x0f00+(0xF-CC_CS)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bdap", - BDAP_INDIR_OPCODE, BDAP_INDIR_Z_BITS, "pm s,R", 0, SIZE_FIELD_SIGNED, - cris_ver_v0_10, - cris_bdap_prefix}, - - {"bdap", - BDAP_QUICK_OPCODE, BDAP_QUICK_Z_BITS, "pO", 0, SIZE_NONE, - cris_ver_v0_10, - cris_quick_mode_bdap_prefix}, - - {"beq", - BRANCH_QUICK_OPCODE+CC_EQ*0x1000, - 0x0f00+(0xF-CC_EQ)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - /* This is deliberately put before "bext" to trump it, even though not - in alphabetical order, since we don't do excluding version checks - for v0..v10. */ - {"bwf", - BRANCH_QUICK_OPCODE+CC_EXT*0x1000, - 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE, - cris_ver_v10, - cris_eight_bit_offset_branch_op}, - - {"bext", - BRANCH_QUICK_OPCODE+CC_EXT*0x1000, - 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE, - cris_ver_v0_3, - cris_eight_bit_offset_branch_op}, - - {"bge", - BRANCH_QUICK_OPCODE+CC_GE*0x1000, - 0x0f00+(0xF-CC_GE)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bgt", - BRANCH_QUICK_OPCODE+CC_GT*0x1000, - 0x0f00+(0xF-CC_GT)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bhi", - BRANCH_QUICK_OPCODE+CC_HI*0x1000, - 0x0f00+(0xF-CC_HI)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bhs", - BRANCH_QUICK_OPCODE+CC_HS*0x1000, - 0x0f00+(0xF-CC_HS)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"biap", BIAP_OPCODE, BIAP_Z_BITS, "pm r,R", 0, SIZE_NONE, - cris_ver_v0_10, - cris_biap_prefix}, - - {"ble", - BRANCH_QUICK_OPCODE+CC_LE*0x1000, - 0x0f00+(0xF-CC_LE)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"blo", - BRANCH_QUICK_OPCODE+CC_LO*0x1000, - 0x0f00+(0xF-CC_LO)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bls", - BRANCH_QUICK_OPCODE+CC_LS*0x1000, - 0x0f00+(0xF-CC_LS)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"blt", - BRANCH_QUICK_OPCODE+CC_LT*0x1000, - 0x0f00+(0xF-CC_LT)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bmi", - BRANCH_QUICK_OPCODE+CC_MI*0x1000, - 0x0f00+(0xF-CC_MI)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bmod", 0x0ab0, 0x0140, "s,R", 0, SIZE_FIX_32, - cris_ver_sim_v0_10, - cris_not_implemented_op}, - - {"bmod", 0x0ab0, 0x0140, "S,D", 0, SIZE_NONE, - cris_ver_sim_v0_10, - cris_not_implemented_op}, - - {"bmod", 0x0ab0, 0x0540, "S,R,r", 0, SIZE_NONE, - cris_ver_sim_v0_10, - cris_not_implemented_op}, - - {"bne", - BRANCH_QUICK_OPCODE+CC_NE*0x1000, - 0x0f00+(0xF-CC_NE)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bound", 0x05c0, 0x0A00, "m r,R", 0, SIZE_NONE, 0, - cris_two_operand_bound_op}, - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"bound", 0x09c0, 0x0200, "m s,R", 0, SIZE_FIELD, - cris_ver_v0_10, - cris_two_operand_bound_op}, - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"bound", 0x0dcf, 0x0200, "m Y,R", 0, SIZE_FIELD, 0, - cris_two_operand_bound_op}, - {"bound", 0x09c0, 0x0200, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_two_operand_bound_op}, - {"bound", 0x09c0, 0x0600, "m S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_bound_op}, - - {"bpl", - BRANCH_QUICK_OPCODE+CC_PL*0x1000, - 0x0f00+(0xF-CC_PL)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"break", 0xe930, 0x16c0, "C", 0, SIZE_NONE, - cris_ver_v3p, - cris_break_op}, - - {"bsb", - BRANCH_QUICK_OPCODE+CC_EXT*0x1000, - 0x0f00+(0xF-CC_EXT)*0x1000, "o", 1, SIZE_NONE, - cris_ver_v32p, - cris_eight_bit_offset_branch_op}, - - {"bsr", 0xBEBF, 0x4140, "n", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"bsrc", 0xBEFF, 0x4100, "n", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"bstore", 0x0af0, 0x0100, "s,R", 0, SIZE_FIX_32, - cris_ver_warning, - cris_not_implemented_op}, - - {"bstore", 0x0af0, 0x0100, "S,D", 0, SIZE_NONE, - cris_ver_warning, - cris_not_implemented_op}, - - {"bstore", 0x0af0, 0x0500, "S,R,r", 0, SIZE_NONE, - cris_ver_warning, - cris_not_implemented_op}, - - {"btst", 0x04F0, 0x0B00, "r,R", 0, SIZE_NONE, 0, - cris_btst_nop_op}, - {"btstq", 0x0380, 0x0C60, "c,R", 0, SIZE_NONE, 0, - cris_btst_nop_op}, - - {"bvc", - BRANCH_QUICK_OPCODE+CC_VC*0x1000, - 0x0f00+(0xF-CC_VC)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"bvs", - BRANCH_QUICK_OPCODE+CC_VS*0x1000, - 0x0f00+(0xF-CC_VS)*0x1000, "o", 1, SIZE_NONE, 0, - cris_eight_bit_offset_branch_op}, - - {"clear", 0x0670, 0x3980, "M r", 0, SIZE_NONE, 0, - cris_reg_mode_clear_op}, - - {"clear", 0x0A70, 0x3180, "M y", 0, SIZE_NONE, 0, - cris_none_reg_mode_clear_test_op}, - - {"clear", 0x0A70, 0x3180, "M S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_clear_test_op}, - - {"clearf", 0x05F0, 0x0A00, "f", 0, SIZE_NONE, 0, - cris_clearf_di_op}, - - {"cmp", 0x06C0, 0x0900, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"cmp", 0x0Ac0, 0x0100, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"cmp", 0x0Ac0, 0x0100, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"cmpq", 0x02C0, 0x0D00, "i,R", 0, SIZE_NONE, 0, - cris_quick_mode_and_cmp_move_or_op}, - - /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */ - {"cmps", 0x08e0, 0x0300, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"cmps", 0x08e0, 0x0300, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"cmpu", 0x08c0, 0x0320, "z s,R" , 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"cmpu", 0x08c0, 0x0320, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"di", 0x25F0, 0xDA0F, "", 0, SIZE_NONE, 0, - cris_clearf_di_op}, - - {"dip", DIP_OPCODE, DIP_Z_BITS, "ps", 0, SIZE_FIX_32, - cris_ver_v0_10, - cris_dip_prefix}, - - {"div", 0x0980, 0x0640, "m R,r", 0, SIZE_FIELD, 0, - cris_not_implemented_op}, - - {"dstep", 0x06f0, 0x0900, "r,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"ei", 0x25B0, 0xDA4F, "", 0, SIZE_NONE, 0, - cris_ax_ei_setf_op}, - - {"fidxd", 0x0ab0, 0xf540, "[r]", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"fidxi", 0x0d30, 0xF2C0, "[r]", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"ftagd", 0x1AB0, 0xE540, "[r]", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"ftagi", 0x1D30, 0xE2C0, "[r]", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"halt", 0xF930, 0x06CF, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"jas", 0x09B0, 0x0640, "r,P", 0, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jas", 0x0DBF, 0x0240, "N,P", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jasc", 0x0B30, 0x04C0, "r,P", 0, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jasc", 0x0F3F, 0x00C0, "N,P", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jbrc", 0x69b0, 0x9640, "r", 0, SIZE_NONE, - cris_ver_v8_10, - cris_reg_mode_jump_op}, - - {"jbrc", 0x6930, 0x92c0, "s", 0, SIZE_FIX_32, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jbrc", 0x6930, 0x92c0, "S", 0, SIZE_NONE, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jir", 0xA9b0, 0x5640, "r", 0, SIZE_NONE, - cris_ver_v8_10, - cris_reg_mode_jump_op}, - - {"jir", 0xA930, 0x52c0, "s", 0, SIZE_FIX_32, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jir", 0xA930, 0x52c0, "S", 0, SIZE_NONE, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jirc", 0x29b0, 0xd640, "r", 0, SIZE_NONE, - cris_ver_v8_10, - cris_reg_mode_jump_op}, - - {"jirc", 0x2930, 0xd2c0, "s", 0, SIZE_FIX_32, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jirc", 0x2930, 0xd2c0, "S", 0, SIZE_NONE, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jsr", 0xB9b0, 0x4640, "r", 0, SIZE_NONE, 0, - cris_reg_mode_jump_op}, - - {"jsr", 0xB930, 0x42c0, "s", 0, SIZE_FIX_32, - cris_ver_v0_10, - cris_none_reg_mode_jump_op}, - - {"jsr", 0xBDBF, 0x4240, "N", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"jsr", 0xB930, 0x42c0, "S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_jump_op}, - - {"jsrc", 0x39b0, 0xc640, "r", 0, SIZE_NONE, - cris_ver_v8_10, - cris_reg_mode_jump_op}, - - {"jsrc", 0x3930, 0xc2c0, "s", 0, SIZE_FIX_32, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jsrc", 0x3930, 0xc2c0, "S", 0, SIZE_NONE, - cris_ver_v8_10, - cris_none_reg_mode_jump_op}, - - {"jsrc", 0xBB30, 0x44C0, "r", 0, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jsrc", 0xBF3F, 0x40C0, "N", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_reg_mode_jump_op}, - - {"jump", 0x09b0, 0xF640, "r", 0, SIZE_NONE, 0, - cris_reg_mode_jump_op}, - - {"jump", - JUMP_INDIR_OPCODE, JUMP_INDIR_Z_BITS, "s", 0, SIZE_FIX_32, - cris_ver_v0_10, - cris_none_reg_mode_jump_op}, - - {"jump", - JUMP_INDIR_OPCODE, JUMP_INDIR_Z_BITS, "S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_jump_op}, - - {"jump", 0x09F0, 0x060F, "P", 0, SIZE_NONE, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"jump", - JUMP_PC_INCR_OPCODE_V32, - (0xffff & ~JUMP_PC_INCR_OPCODE_V32), "N", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_none_reg_mode_jump_op}, - - {"jmpu", 0x8930, 0x72c0, "s", 0, SIZE_FIX_32, - cris_ver_v10, - cris_none_reg_mode_jump_op}, - - {"jmpu", 0x8930, 0x72c0, "S", 0, SIZE_NONE, - cris_ver_v10, - cris_none_reg_mode_jump_op}, - - {"lapc", 0x0970, 0x0680, "U,R", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"lapc", 0x0D7F, 0x0280, "dn,R", 0, SIZE_FIX_32, - cris_ver_v32p, - cris_not_implemented_op}, - - {"lapcq", 0x0970, 0x0680, "u,R", 0, SIZE_NONE, - cris_ver_v32p, - cris_addi_op}, - - {"lsl", 0x04C0, 0x0B00, "m r,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"lslq", 0x03c0, 0x0C20, "c,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"lsr", 0x07C0, 0x0800, "m r,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"lsrq", 0x03e0, 0x0C00, "c,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"lz", 0x0730, 0x08C0, "r,R", 0, SIZE_NONE, - cris_ver_v3p, - cris_not_implemented_op}, - - {"mcp", 0x07f0, 0x0800, "P,r", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"move", 0x0640, 0x0980, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"move", 0x0A40, 0x0180, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"move", 0x0A40, 0x0180, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"move", 0x0630, 0x09c0, "r,P", 0, SIZE_NONE, 0, - cris_move_to_preg_op}, - - {"move", 0x0670, 0x0980, "P,r", 0, SIZE_NONE, 0, - cris_reg_mode_move_from_preg_op}, - - {"move", 0x0BC0, 0x0000, "m R,y", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"move", 0x0BC0, 0x0000, "m D,S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"move", - MOVE_M_TO_PREG_OPCODE, MOVE_M_TO_PREG_ZBITS, - "s,P", 0, SIZE_SPEC_REG, 0, - cris_move_to_preg_op}, - - {"move", 0x0A30, 0x01c0, "S,P", 0, SIZE_NONE, - cris_ver_v0_10, - cris_move_to_preg_op}, - - {"move", 0x0A70, 0x0180, "P,y", 0, SIZE_SPEC_REG, 0, - cris_none_reg_mode_move_from_preg_op}, - - {"move", 0x0A70, 0x0180, "P,S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_move_from_preg_op}, - - {"move", 0x0B70, 0x0480, "r,T", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"move", 0x0F70, 0x0080, "T,r", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"movem", 0x0BF0, 0x0000, "R,y", 0, SIZE_FIX_32, 0, - cris_move_reg_to_mem_movem_op}, - - {"movem", 0x0BF0, 0x0000, "D,S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_move_reg_to_mem_movem_op}, - - {"movem", 0x0BB0, 0x0040, "s,R", 0, SIZE_FIX_32, 0, - cris_move_mem_to_reg_movem_op}, - - {"movem", 0x0BB0, 0x0040, "S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_move_mem_to_reg_movem_op}, - - {"moveq", 0x0240, 0x0D80, "i,R", 0, SIZE_NONE, 0, - cris_quick_mode_and_cmp_move_or_op}, - - {"movs", 0x0460, 0x0B80, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */ - {"movs", 0x0860, 0x0380, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"movs", 0x0860, 0x0380, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"movu", 0x0440, 0x0Ba0, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"movu", 0x0840, 0x03a0, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"movu", 0x0840, 0x03a0, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"mstep", 0x07f0, 0x0800, "r,R", 0, SIZE_NONE, - cris_ver_v0_10, - cris_dstep_logshift_mstep_neg_not_op}, - - {"muls", 0x0d00, 0x02c0, "m r,R", 0, SIZE_NONE, - cris_ver_v10p, - cris_muls_op}, - - {"mulu", 0x0900, 0x06c0, "m r,R", 0, SIZE_NONE, - cris_ver_v10p, - cris_mulu_op}, - - {"neg", 0x0580, 0x0A40, "m r,R", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"nop", NOP_OPCODE, NOP_Z_BITS, "", 0, SIZE_NONE, - cris_ver_v0_10, - cris_btst_nop_op}, - - {"nop", NOP_OPCODE_V32, NOP_Z_BITS_V32, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_btst_nop_op}, - - {"not", 0x8770, 0x7880, "r", 0, SIZE_NONE, 0, - cris_dstep_logshift_mstep_neg_not_op}, - - {"or", 0x0740, 0x0880, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"or", 0x0B40, 0x0080, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"or", 0x0B40, 0x0080, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"or", 0x0B40, 0x0480, "m S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"orq", 0x0340, 0x0C80, "i,R", 0, SIZE_NONE, 0, - cris_quick_mode_and_cmp_move_or_op}, - - {"pop", 0x0E6E, 0x0191, "!R", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"pop", 0x0e3e, 0x01c1, "!P", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_move_from_preg_op}, - - {"push", 0x0FEE, 0x0011, "BR", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"push", 0x0E7E, 0x0181, "BP", 0, SIZE_NONE, - cris_ver_v0_10, - cris_move_to_preg_op}, - - {"rbf", 0x3b30, 0xc0c0, "y", 0, SIZE_NONE, - cris_ver_v10, - cris_not_implemented_op}, - - {"rbf", 0x3b30, 0xc0c0, "S", 0, SIZE_NONE, - cris_ver_v10, - cris_not_implemented_op}, - - {"rfe", 0x2930, 0xD6CF, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"rfg", 0x4930, 0xB6CF, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"rfn", 0x5930, 0xA6CF, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - {"ret", 0xB67F, 0x4980, "", 1, SIZE_NONE, - cris_ver_v0_10, - cris_reg_mode_move_from_preg_op}, - - {"ret", 0xB9F0, 0x460F, "", 1, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_move_from_preg_op}, - - {"retb", 0xe67f, 0x1980, "", 1, SIZE_NONE, - cris_ver_v0_10, - cris_reg_mode_move_from_preg_op}, - - {"rete", 0xA9F0, 0x560F, "", 1, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_move_from_preg_op}, - - {"reti", 0xA67F, 0x5980, "", 1, SIZE_NONE, - cris_ver_v0_10, - cris_reg_mode_move_from_preg_op}, - - {"retn", 0xC9F0, 0x360F, "", 1, SIZE_NONE, - cris_ver_v32p, - cris_reg_mode_move_from_preg_op}, - - {"sbfs", 0x3b70, 0xc080, "y", 0, SIZE_NONE, - cris_ver_v10, - cris_not_implemented_op}, - - {"sbfs", 0x3b70, 0xc080, "S", 0, SIZE_NONE, - cris_ver_v10, - cris_not_implemented_op}, - - {"sa", - 0x0530+CC_A*0x1000, - 0x0AC0+(0xf-CC_A)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"ssb", - 0x0530+CC_EXT*0x1000, - 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE, - cris_ver_v32p, - cris_scc_op}, - - {"scc", - 0x0530+CC_CC*0x1000, - 0x0AC0+(0xf-CC_CC)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"scs", - 0x0530+CC_CS*0x1000, - 0x0AC0+(0xf-CC_CS)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"seq", - 0x0530+CC_EQ*0x1000, - 0x0AC0+(0xf-CC_EQ)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"setf", 0x05b0, 0x0A40, "f", 0, SIZE_NONE, 0, - cris_ax_ei_setf_op}, - - {"sfe", 0x3930, 0xC6CF, "", 0, SIZE_NONE, - cris_ver_v32p, - cris_not_implemented_op}, - - /* Need to have "swf" in front of "sext" so it is the one displayed in - disassembly. */ - {"swf", - 0x0530+CC_EXT*0x1000, - 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE, - cris_ver_v10, - cris_scc_op}, - - {"sext", - 0x0530+CC_EXT*0x1000, - 0x0AC0+(0xf-CC_EXT)*0x1000, "r", 0, SIZE_NONE, - cris_ver_v0_3, - cris_scc_op}, - - {"sge", - 0x0530+CC_GE*0x1000, - 0x0AC0+(0xf-CC_GE)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"sgt", - 0x0530+CC_GT*0x1000, - 0x0AC0+(0xf-CC_GT)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"shi", - 0x0530+CC_HI*0x1000, - 0x0AC0+(0xf-CC_HI)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"shs", - 0x0530+CC_HS*0x1000, - 0x0AC0+(0xf-CC_HS)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"sle", - 0x0530+CC_LE*0x1000, - 0x0AC0+(0xf-CC_LE)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"slo", - 0x0530+CC_LO*0x1000, - 0x0AC0+(0xf-CC_LO)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"sls", - 0x0530+CC_LS*0x1000, - 0x0AC0+(0xf-CC_LS)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"slt", - 0x0530+CC_LT*0x1000, - 0x0AC0+(0xf-CC_LT)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"smi", - 0x0530+CC_MI*0x1000, - 0x0AC0+(0xf-CC_MI)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"sne", - 0x0530+CC_NE*0x1000, - 0x0AC0+(0xf-CC_NE)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"spl", - 0x0530+CC_PL*0x1000, - 0x0AC0+(0xf-CC_PL)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"sub", 0x0680, 0x0940, "m r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - {"sub", 0x0a80, 0x0140, "m s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"sub", 0x0a80, 0x0140, "m S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"sub", 0x0a80, 0x0540, "m S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"subq", 0x0280, 0x0d40, "I,R", 0, SIZE_NONE, 0, - cris_quick_mode_add_sub_op}, - - {"subs", 0x04a0, 0x0b40, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_SIGNED and all necessary changes. */ - {"subs", 0x08a0, 0x0340, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"subs", 0x08a0, 0x0340, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"subs", 0x08a0, 0x0740, "z S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"subu", 0x0480, 0x0b60, "z r,R", 0, SIZE_NONE, 0, - cris_reg_mode_add_sub_cmp_and_or_move_op}, - - /* FIXME: SIZE_FIELD_UNSIGNED and all necessary changes. */ - {"subu", 0x0880, 0x0360, "z s,R", 0, SIZE_FIELD, 0, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"subu", 0x0880, 0x0360, "z S,D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_add_sub_cmp_and_or_move_op}, - - {"subu", 0x0880, 0x0760, "z S,R,r", 0, SIZE_NONE, - cris_ver_v0_10, - cris_three_operand_add_sub_cmp_and_or_op}, - - {"svc", - 0x0530+CC_VC*0x1000, - 0x0AC0+(0xf-CC_VC)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - {"svs", - 0x0530+CC_VS*0x1000, - 0x0AC0+(0xf-CC_VS)*0x1000, "r", 0, SIZE_NONE, 0, - cris_scc_op}, - - /* The insn "swapn" is the same as "not" and will be disassembled as - such, but the swap* family of mnmonics are generally v8-and-higher - only, so count it in. */ - {"swapn", 0x8770, 0x7880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapw", 0x4770, 0xb880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnw", 0xc770, 0x3880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapb", 0x2770, 0xd880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnb", 0xA770, 0x5880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapwb", 0x6770, 0x9880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnwb", 0xE770, 0x1880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapr", 0x1770, 0xe880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnr", 0x9770, 0x6880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapwr", 0x5770, 0xa880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnwr", 0xd770, 0x2880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapbr", 0x3770, 0xc880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnbr", 0xb770, 0x4880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapwbr", 0x7770, 0x8880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"swapnwbr", 0xf770, 0x0880, "r", 0, SIZE_NONE, - cris_ver_v8p, - cris_not_implemented_op}, - - {"test", 0x0640, 0x0980, "m D", 0, SIZE_NONE, - cris_ver_v0_10, - cris_reg_mode_test_op}, - - {"test", 0x0b80, 0xf040, "m y", 0, SIZE_FIELD, 0, - cris_none_reg_mode_clear_test_op}, - - {"test", 0x0b80, 0xf040, "m S", 0, SIZE_NONE, - cris_ver_v0_10, - cris_none_reg_mode_clear_test_op}, - - {"xor", 0x07B0, 0x0840, "r,R", 0, SIZE_NONE, 0, - cris_xor_op}, - - {NULL, 0, 0, NULL, 0, 0, 0, cris_not_implemented_op} -}; - -/* Condition-names, indexed by the CC_* numbers as found in cris.h. */ -const char * const -cris_cc_strings[] = -{ - "hs", - "lo", - "ne", - "eq", - "vc", - "vs", - "pl", - "mi", - "ls", - "hi", - "ge", - "lt", - "gt", - "le", - "a", - /* This is a placeholder. In v0, this would be "ext". In v32, this - is "sb". */ - "wf" -}; - -/* - * Local variables: - * eval: (c-set-style "gnu") - * indent-tabs-mode: t - * End: - */ - - -/* No instruction will be disassembled longer than this. In theory, and - in silicon, address prefixes can be cascaded. In practice, cascading - is not used by GCC, and not supported by the assembler. */ -#ifndef MAX_BYTES_PER_CRIS_INSN -#define MAX_BYTES_PER_CRIS_INSN 8 -#endif - -/* Whether or not to decode prefixes, folding it into the following - instruction. FIXME: Make this optional later. */ -#ifndef PARSE_PREFIX -#define PARSE_PREFIX 1 -#endif - -/* Sometimes we prefix all registers with this character. */ -#define REGISTER_PREFIX_CHAR '$' - -/* Whether or not to trace the following sequence: - sub* X,r%d - bound* Y,r%d - adds.w [pc+r%d.w],pc - - This is the assembly form of a switch-statement in C. - The "sub is optional. If there is none, then X will be zero. - X is the value of the first case, - Y is the number of cases (including default). - - This results in case offsets printed on the form: - case N: -> case_address - where N is an estimation on the corresponding 'case' operand in C, - and case_address is where execution of that case continues after the - sequence presented above. - - The old style of output was to print the offsets as instructions, - which made it hard to follow "case"-constructs in the disassembly, - and caused a lot of annoying warnings about undefined instructions. - - FIXME: Make this optional later. */ -#ifndef TRACE_CASE -#define TRACE_CASE (disdata->trace_case) -#endif - -enum cris_disass_family - { cris_dis_v0_v10, cris_dis_common_v10_v32, cris_dis_v32 }; - -/* Stored in the disasm_info->private_data member. */ -struct cris_disasm_data -{ - /* Whether to print something less confusing if we find something - matching a switch-construct. */ - bfd_boolean trace_case; - - /* Whether this code is flagged as crisv32. FIXME: Should be an enum - that includes "compatible". */ - enum cris_disass_family distype; -}; - -/* Value of first element in switch. */ -static long case_offset = 0; - -/* How many more case-offsets to print. */ -static long case_offset_counter = 0; - -/* Number of case offsets. */ -static long no_of_case_offsets = 0; - -/* Candidate for next case_offset. */ -static long last_immediate = 0; - -static int cris_constraint - (const char *, unsigned, unsigned, struct cris_disasm_data *); - -/* Parse disassembler options and store state in info. FIXME: For the - time being, we abuse static variables. */ - -static void -cris_parse_disassembler_options (struct cris_disasm_data *disdata, - char *disassembler_options, - enum cris_disass_family distype) -{ - /* Default true. */ - disdata->trace_case - = (disassembler_options == NULL - || (strcmp (disassembler_options, "nocase") != 0)); - - disdata->distype = distype; -} - -static const struct cris_spec_reg * -spec_reg_info (unsigned int sreg, enum cris_disass_family distype) -{ - int i; - - for (i = 0; cris_spec_regs[i].name != NULL; i++) - { - if (cris_spec_regs[i].number == sreg) - { - if (distype == cris_dis_v32) - switch (cris_spec_regs[i].applicable_version) - { - case cris_ver_warning: - case cris_ver_version_all: - case cris_ver_v3p: - case cris_ver_v8p: - case cris_ver_v10p: - case cris_ver_v32p: - /* No ambiguous sizes or register names with CRISv32. */ - if (cris_spec_regs[i].warning == NULL) - return &cris_spec_regs[i]; - default: - ; - } - else if (cris_spec_regs[i].applicable_version != cris_ver_v32p) - return &cris_spec_regs[i]; - } - } - - return NULL; -} - -/* Return the number of bits in the argument. */ - -static int -number_of_bits (unsigned int val) -{ - int bits; - - for (bits = 0; val != 0; val &= val - 1) - bits++; - - return bits; -} - -/* Get an entry in the opcode-table. */ - -static const struct cris_opcode * -get_opcode_entry (unsigned int insn, - unsigned int prefix_insn, - struct cris_disasm_data *disdata) -{ - /* For non-prefixed insns, we keep a table of pointers, indexed by the - insn code. Each entry is initialized when found to be NULL. */ - static const struct cris_opcode **opc_table = NULL; - - const struct cris_opcode *max_matchedp = NULL; - const struct cris_opcode **prefix_opc_table = NULL; - - /* We hold a table for each prefix that need to be handled differently. */ - static const struct cris_opcode **dip_prefixes = NULL; - static const struct cris_opcode **bdapq_m1_prefixes = NULL; - static const struct cris_opcode **bdapq_m2_prefixes = NULL; - static const struct cris_opcode **bdapq_m4_prefixes = NULL; - static const struct cris_opcode **rest_prefixes = NULL; - - /* Allocate and clear the opcode-table. */ - if (opc_table == NULL) - { - opc_table = g_new0(const struct cris_opcode *, 65536); - dip_prefixes = g_new0(const struct cris_opcode *, 65536); - bdapq_m1_prefixes = g_new0(const struct cris_opcode *, 65536); - bdapq_m2_prefixes = g_new0(const struct cris_opcode *, 65536); - bdapq_m4_prefixes = g_new0(const struct cris_opcode *, 65536); - rest_prefixes = g_new0(const struct cris_opcode *, 65536); - } - - /* Get the right table if this is a prefix. - This code is connected to cris_constraints in that it knows what - prefixes play a role in recognition of patterns; the necessary - state is reflected by which table is used. If constraints - involving match or non-match of prefix insns are changed, then this - probably needs changing too. */ - if (prefix_insn != NO_CRIS_PREFIX) - { - const struct cris_opcode *popcodep - = (opc_table[prefix_insn] != NULL - ? opc_table[prefix_insn] - : get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata)); - - if (popcodep == NULL) - return NULL; - - if (popcodep->match == BDAP_QUICK_OPCODE) - { - /* Since some offsets are recognized with "push" macros, we - have to have different tables for them. */ - int offset = (prefix_insn & 255); - - if (offset > 127) - offset -= 256; - - switch (offset) - { - case -4: - prefix_opc_table = bdapq_m4_prefixes; - break; - - case -2: - prefix_opc_table = bdapq_m2_prefixes; - break; - - case -1: - prefix_opc_table = bdapq_m1_prefixes; - break; - - default: - prefix_opc_table = rest_prefixes; - break; - } - } - else if (popcodep->match == DIP_OPCODE) - /* We don't allow postincrement when the prefix is DIP, so use a - different table for DIP. */ - prefix_opc_table = dip_prefixes; - else - prefix_opc_table = rest_prefixes; - } - - if (prefix_insn != NO_CRIS_PREFIX - && prefix_opc_table[insn] != NULL) - max_matchedp = prefix_opc_table[insn]; - else if (prefix_insn == NO_CRIS_PREFIX && opc_table[insn] != NULL) - max_matchedp = opc_table[insn]; - else - { - const struct cris_opcode *opcodep; - int max_level_of_match = -1; - - for (opcodep = cris_opcodes; - opcodep->name != NULL; - opcodep++) - { - int level_of_match; - - if (disdata->distype == cris_dis_v32) - { - switch (opcodep->applicable_version) - { - case cris_ver_version_all: - break; - - case cris_ver_v0_3: - case cris_ver_v0_10: - case cris_ver_v3_10: - case cris_ver_sim_v0_10: - case cris_ver_v8_10: - case cris_ver_v10: - case cris_ver_warning: - continue; - - case cris_ver_v3p: - case cris_ver_v8p: - case cris_ver_v10p: - case cris_ver_v32p: - break; - - case cris_ver_v8: - abort (); - default: - abort (); - } - } - else - { - switch (opcodep->applicable_version) - { - case cris_ver_version_all: - case cris_ver_v0_3: - case cris_ver_v3p: - case cris_ver_v0_10: - case cris_ver_v8p: - case cris_ver_v8_10: - case cris_ver_v10: - case cris_ver_sim_v0_10: - case cris_ver_v10p: - case cris_ver_warning: - break; - - case cris_ver_v32p: - continue; - - case cris_ver_v8: - abort (); - default: - abort (); - } - } - - /* We give a double lead for bits matching the template in - cris_opcodes. Not even, because then "move p8,r10" would - be given 2 bits lead over "clear.d r10". When there's a - tie, the first entry in the table wins. This is - deliberate, to avoid a more complicated recognition - formula. */ - if ((opcodep->match & insn) == opcodep->match - && (opcodep->lose & insn) == 0 - && ((level_of_match - = cris_constraint (opcodep->args, - insn, - prefix_insn, - disdata)) - >= 0) - && ((level_of_match - += 2 * number_of_bits (opcodep->match - | opcodep->lose)) - > max_level_of_match)) - { - max_matchedp = opcodep; - max_level_of_match = level_of_match; - - /* If there was a full match, never mind looking - further. */ - if (level_of_match >= 2 * 16) - break; - } - } - /* Fill in the new entry. - - If there are changes to the opcode-table involving prefixes, and - disassembly then does not work correctly, try removing the - else-clause below that fills in the prefix-table. If that - helps, you need to change the prefix_opc_table setting above, or - something related. */ - if (prefix_insn == NO_CRIS_PREFIX) - opc_table[insn] = max_matchedp; - else - prefix_opc_table[insn] = max_matchedp; - } - - return max_matchedp; -} - -/* Return -1 if the constraints of a bitwise-matched instruction say - that there is no match. Otherwise return a nonnegative number - indicating the confidence in the match (higher is better). */ - -static int -cris_constraint (const char *cs, - unsigned int insn, - unsigned int prefix_insn, - struct cris_disasm_data *disdata) -{ - int retval = 0; - int tmp; - int prefix_ok = 0; - const char *s; - - for (s = cs; *s; s++) - switch (*s) - { - case '!': - /* Do not recognize "pop" if there's a prefix and then only for - v0..v10. */ - if (prefix_insn != NO_CRIS_PREFIX - || disdata->distype != cris_dis_v0_v10) - return -1; - break; - - case 'U': - /* Not recognized at disassembly. */ - return -1; - - case 'M': - /* Size modifier for "clear", i.e. special register 0, 4 or 8. - Check that it is one of them. Only special register 12 could - be mismatched, but checking for matches is more logical than - checking for mismatches when there are only a few cases. */ - tmp = ((insn >> 12) & 0xf); - if (tmp != 0 && tmp != 4 && tmp != 8) - return -1; - break; - - case 'm': - if ((insn & 0x30) == 0x30) - return -1; - break; - - case 'S': - /* A prefix operand without side-effect. */ - if (prefix_insn != NO_CRIS_PREFIX && (insn & 0x400) == 0) - { - prefix_ok = 1; - break; - } - else - return -1; - - case 's': - case 'y': - case 'Y': - /* If this is a prefixed insn with postincrement (side-effect), - the prefix must not be DIP. */ - if (prefix_insn != NO_CRIS_PREFIX) - { - if (insn & 0x400) - { - const struct cris_opcode *prefix_opcodep - = get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata); - - if (prefix_opcodep->match == DIP_OPCODE) - return -1; - } - - prefix_ok = 1; - } - break; - - case 'B': - /* If we don't fall through, then the prefix is ok. */ - prefix_ok = 1; - - /* A "push" prefix. Check for valid "push" size. - In case of special register, it may be != 4. */ - if (prefix_insn != NO_CRIS_PREFIX) - { - /* Match the prefix insn to BDAPQ. */ - const struct cris_opcode *prefix_opcodep - = get_opcode_entry (prefix_insn, NO_CRIS_PREFIX, disdata); - - if (prefix_opcodep->match == BDAP_QUICK_OPCODE) - { - int pushsize = (prefix_insn & 255); - - if (pushsize > 127) - pushsize -= 256; - - if (s[1] == 'P') - { - unsigned int spec_reg = (insn >> 12) & 15; - const struct cris_spec_reg *sregp - = spec_reg_info (spec_reg, disdata->distype); - - /* For a special-register, the "prefix size" must - match the size of the register. */ - if (sregp && sregp->reg_size == (unsigned int) -pushsize) - break; - } - else if (s[1] == 'R') - { - if ((insn & 0x30) == 0x20 && pushsize == -4) - break; - } - /* FIXME: Should abort here; next constraint letter - *must* be 'P' or 'R'. */ - } - } - return -1; - - case 'D': - retval = (((insn >> 12) & 15) == (insn & 15)); - if (!retval) - return -1; - else - retval += 4; - break; - - case 'P': - { - const struct cris_spec_reg *sregp - = spec_reg_info ((insn >> 12) & 15, disdata->distype); - - /* Since we match four bits, we will give a value of 4-1 = 3 - in a match. If there is a corresponding exact match of a - special register in another pattern, it will get a value of - 4, which will be higher. This should be correct in that an - exact pattern would match better than a general pattern. - - Note that there is a reason for not returning zero; the - pattern for "clear" is partly matched in the bit-pattern - (the two lower bits must be zero), while the bit-pattern - for a move from a special register is matched in the - register constraint. */ - - if (sregp != NULL) - { - retval += 3; - break; - } - else - return -1; - } - } - - if (prefix_insn != NO_CRIS_PREFIX && ! prefix_ok) - return -1; - - return retval; -} - -/* Format number as hex with a leading "0x" into outbuffer. */ - -static char * -format_hex (unsigned long number, - char *outbuffer, - struct cris_disasm_data *disdata) -{ - /* Truncate negative numbers on >32-bit hosts. */ - number &= 0xffffffff; - - sprintf (outbuffer, "0x%lx", number); - - /* Save this value for the "case" support. */ - if (TRACE_CASE) - last_immediate = number; - - return outbuffer + strlen (outbuffer); -} - -/* Format number as decimal into outbuffer. Parameter signedp says - whether the number should be formatted as signed (!= 0) or - unsigned (== 0). */ - -static char * -format_dec (long number, char *outbuffer, size_t outsize, int signedp) -{ - last_immediate = number; - snprintf (outbuffer, outsize, signedp ? "%ld" : "%lu", number); - - return outbuffer + strlen (outbuffer); -} - -/* Format the name of the general register regno into outbuffer. */ - -static char * -format_reg (struct cris_disasm_data *disdata, - int regno, - char *outbuffer_start, - bfd_boolean with_reg_prefix) -{ - char *outbuffer = outbuffer_start; - - if (with_reg_prefix) - *outbuffer++ = REGISTER_PREFIX_CHAR; - - switch (regno) - { - case 15: - /* For v32, there is no context in which we output PC. */ - if (disdata->distype == cris_dis_v32) - strcpy (outbuffer, "acr"); - else - strcpy (outbuffer, "pc"); - break; - - case 14: - strcpy (outbuffer, "sp"); - break; - - default: - sprintf (outbuffer, "r%d", regno); - break; - } - - return outbuffer_start + strlen (outbuffer_start); -} - -/* Format the name of a support register into outbuffer. */ - -static char * -format_sup_reg (unsigned int regno, - char *outbuffer_start, - bfd_boolean with_reg_prefix) -{ - char *outbuffer = outbuffer_start; - int i; - - if (with_reg_prefix) - *outbuffer++ = REGISTER_PREFIX_CHAR; - - for (i = 0; cris_support_regs[i].name != NULL; i++) - if (cris_support_regs[i].number == regno) - { - sprintf (outbuffer, "%s", cris_support_regs[i].name); - return outbuffer_start + strlen (outbuffer_start); - } - - /* There's supposed to be register names covering all numbers, though - some may be generic names. */ - sprintf (outbuffer, "format_sup_reg-BUG"); - return outbuffer_start + strlen (outbuffer_start); -} - -/* Return the length of an instruction. */ - -static unsigned -bytes_to_skip (unsigned int insn, - const struct cris_opcode *matchedp, - enum cris_disass_family distype, - const struct cris_opcode *prefix_matchedp) -{ - /* Each insn is a word plus "immediate" operands. */ - unsigned to_skip = 2; - const char *template = matchedp->args; - const char *s; - - for (s = template; *s; s++) - if ((*s == 's' || *s == 'N' || *s == 'Y') - && (insn & 0x400) && (insn & 15) == 15 - && prefix_matchedp == NULL) - { - /* Immediate via [pc+], so we have to check the size of the - operand. */ - int mode_size = 1 << ((insn >> 4) & (*template == 'z' ? 1 : 3)); - - if (matchedp->imm_oprnd_size == SIZE_FIX_32) - to_skip += 4; - else if (matchedp->imm_oprnd_size == SIZE_SPEC_REG) - { - const struct cris_spec_reg *sregp - = spec_reg_info ((insn >> 12) & 15, distype); - - /* FIXME: Improve error handling; should have been caught - earlier. */ - if (sregp == NULL) - return 2; - - /* PC is incremented by two, not one, for a byte. Except on - CRISv32, where constants are always DWORD-size for - special registers. */ - to_skip += - distype == cris_dis_v32 ? 4 : (sregp->reg_size + 1) & ~1; - } - else - to_skip += (mode_size + 1) & ~1; - } - else if (*s == 'n') - to_skip += 4; - else if (*s == 'b') - to_skip += 2; - - return to_skip; -} - -/* Print condition code flags. */ - -static char * -print_flags (struct cris_disasm_data *disdata, unsigned int insn, char *cp) -{ - /* Use the v8 (Etrax 100) flag definitions for disassembly. - The differences with v0 (Etrax 1..4) vs. Svinto are: - v0 'd' <=> v8 'm' - v0 'e' <=> v8 'b'. - FIXME: Emit v0..v3 flag names somehow. */ - static const char v8_fnames[] = "cvznxibm"; - static const char v32_fnames[] = "cvznxiup"; - const char *fnames - = disdata->distype == cris_dis_v32 ? v32_fnames : v8_fnames; - - unsigned char flagbits = (((insn >> 8) & 0xf0) | (insn & 15)); - int i; - - for (i = 0; i < 8; i++) - if (flagbits & (1 << i)) - *cp++ = fnames[i]; - - return cp; -} - -#define FORMAT_DEC(number, tp, signedp) \ - format_dec (number, tp, ({ \ - assert(tp >= temp && tp <= temp + sizeof(temp)); \ - temp + sizeof(temp) - tp; \ - }), signedp) - -/* Print out an insn with its operands, and update the info->insn_type - fields. The prefix_opcodep and the rest hold a prefix insn that is - supposed to be output as an address mode. */ - -static void -print_with_operands (const struct cris_opcode *opcodep, - unsigned int insn, - unsigned char *buffer, - bfd_vma addr, - disassemble_info *info, - /* If a prefix insn was before this insn (and is supposed - to be output as an address), here is a description of - it. */ - const struct cris_opcode *prefix_opcodep, - unsigned int prefix_insn, - unsigned char *prefix_buffer, - bfd_boolean with_reg_prefix) -{ - /* Get a buffer of somewhat reasonable size where we store - intermediate parts of the insn. */ - char temp[sizeof (".d [$r13=$r12-2147483648],$r10") * 2]; - char *tp = temp; - static const char mode_char[] = "bwd?"; - const char *s; - const char *cs; - struct cris_disasm_data *disdata - = (struct cris_disasm_data *) info->private_data; - - /* Print out the name first thing we do. */ - (*info->fprintf_func) (info->stream, "%s", opcodep->name); - - cs = opcodep->args; - s = cs; - - /* Ignore any prefix indicator. */ - if (*s == 'p') - s++; - - if (*s == 'm' || *s == 'M' || *s == 'z') - { - *tp++ = '.'; - - /* Get the size-letter. */ - *tp++ = *s == 'M' - ? (insn & 0x8000 ? 'd' - : insn & 0x4000 ? 'w' : 'b') - : mode_char[(insn >> 4) & (*s == 'z' ? 1 : 3)]; - - /* Ignore the size and the space character that follows. */ - s += 2; - } - - /* Add a space if this isn't a long-branch, because for those will add - the condition part of the name later. */ - if (opcodep->match != (BRANCH_PC_LOW + BRANCH_INCR_HIGH * 256)) - *tp++ = ' '; - - /* Fill in the insn-type if deducible from the name (and there's no - better way). */ - if (opcodep->name[0] == 'j') - { - if (CONST_STRNEQ (opcodep->name, "jsr")) - /* It's "jsr" or "jsrc". */ - info->insn_type = dis_jsr; - else - /* Any other jump-type insn is considered a branch. */ - info->insn_type = dis_branch; - } - - /* We might know some more fields right now. */ - info->branch_delay_insns = opcodep->delayed; - - /* Handle operands. */ - for (; *s; s++) - { - switch (*s) - { - case 'T': - tp = format_sup_reg ((insn >> 12) & 15, tp, with_reg_prefix); - break; - - case 'A': - if (with_reg_prefix) - *tp++ = REGISTER_PREFIX_CHAR; - *tp++ = 'a'; - *tp++ = 'c'; - *tp++ = 'r'; - break; - - case '[': - case ']': - case ',': - *tp++ = *s; - break; - - case '!': - /* Ignore at this point; used at earlier stages to avoid - recognition if there's a prefix at something that in other - ways looks like a "pop". */ - break; - - case 'd': - /* Ignore. This is an optional ".d " on the large one of - relaxable insns. */ - break; - - case 'B': - /* This was the prefix that made this a "push". We've already - handled it by recognizing it, so signal that the prefix is - handled by setting it to NULL. */ - prefix_opcodep = NULL; - break; - - case 'D': - case 'r': - tp = format_reg (disdata, insn & 15, tp, with_reg_prefix); - break; - - case 'R': - tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix); - break; - - case 'n': - { - /* Like N but pc-relative to the start of the insn. */ - uint32_t number - = (buffer[2] + buffer[3] * 256 + buffer[4] * 65536 - + buffer[5] * 0x1000000 + addr); - - /* Finish off and output previous formatted bytes. */ - *tp = 0; - if (temp[0]) - (*info->fprintf_func) (info->stream, "%s", temp); - tp = temp; - - (*info->print_address_func) ((bfd_vma) number, info); - } - break; - - case 'u': - { - /* Like n but the offset is bits <3:0> in the instruction. */ - unsigned long number = (buffer[0] & 0xf) * 2 + addr; - - /* Finish off and output previous formatted bytes. */ - *tp = 0; - if (temp[0]) - (*info->fprintf_func) (info->stream, "%s", temp); - tp = temp; - - (*info->print_address_func) ((bfd_vma) number, info); - } - break; - - case 'N': - case 'y': - case 'Y': - case 'S': - case 's': - /* Any "normal" memory operand. */ - if ((insn & 0x400) && (insn & 15) == 15 && prefix_opcodep == NULL) - { - /* We're looking at [pc+], i.e. we need to output an immediate - number, where the size can depend on different things. */ - int32_t number; - int signedp - = ((*cs == 'z' && (insn & 0x20)) - || opcodep->match == BDAP_QUICK_OPCODE); - int nbytes; - - if (opcodep->imm_oprnd_size == SIZE_FIX_32) - nbytes = 4; - else if (opcodep->imm_oprnd_size == SIZE_SPEC_REG) - { - const struct cris_spec_reg *sregp - = spec_reg_info ((insn >> 12) & 15, disdata->distype); - - /* A NULL return should have been as a non-match earlier, - so catch it as an internal error in the error-case - below. */ - if (sregp == NULL) - /* Whatever non-valid size. */ - nbytes = 42; - else - /* PC is always incremented by a multiple of two. - For CRISv32, immediates are always 4 bytes for - special registers. */ - nbytes = disdata->distype == cris_dis_v32 - ? 4 : (sregp->reg_size + 1) & ~1; - } - else - { - int mode_size = 1 << ((insn >> 4) & (*cs == 'z' ? 1 : 3)); - - if (mode_size == 1) - nbytes = 2; - else - nbytes = mode_size; - } - - switch (nbytes) - { - case 1: - number = buffer[2]; - if (signedp && number > 127) - number -= 256; - break; - - case 2: - number = buffer[2] + buffer[3] * 256; - if (signedp && number > 32767) - number -= 65536; - break; - - case 4: - number - = buffer[2] + buffer[3] * 256 + buffer[4] * 65536 - + buffer[5] * 0x1000000; - break; - - default: - strcpy (tp, "bug"); - tp += 3; - number = 42; - } - - if ((*cs == 'z' && (insn & 0x20)) - || (opcodep->match == BDAP_QUICK_OPCODE - && (nbytes <= 2 || buffer[1 + nbytes] == 0))) - tp = FORMAT_DEC (number, tp, signedp); - else - { - unsigned int highbyte = (number >> 24) & 0xff; - - /* Either output this as an address or as a number. If it's - a dword with the same high-byte as the address of the - insn, assume it's an address, and also if it's a non-zero - non-0xff high-byte. If this is a jsr or a jump, then - it's definitely an address. */ - if (nbytes == 4 - && (highbyte == ((addr >> 24) & 0xff) - || (highbyte != 0 && highbyte != 0xff) - || info->insn_type == dis_branch - || info->insn_type == dis_jsr)) - { - /* Finish off and output previous formatted bytes. */ - *tp = 0; - tp = temp; - if (temp[0]) - (*info->fprintf_func) (info->stream, "%s", temp); - - (*info->print_address_func) ((bfd_vma) number, info); - - info->target = number; - } - else - tp = format_hex (number, tp, disdata); - } - } - else - { - /* Not an immediate number. Then this is a (possibly - prefixed) memory operand. */ - if (info->insn_type != dis_nonbranch) - { - int mode_size - = 1 << ((insn >> 4) - & (opcodep->args[0] == 'z' ? 1 : 3)); - int size; - info->insn_type = dis_dref; - info->flags |= CRIS_DIS_FLAG_MEMREF; - - if (opcodep->imm_oprnd_size == SIZE_FIX_32) - size = 4; - else if (opcodep->imm_oprnd_size == SIZE_SPEC_REG) - { - const struct cris_spec_reg *sregp - = spec_reg_info ((insn >> 12) & 15, disdata->distype); - - /* FIXME: Improve error handling; should have been caught - earlier. */ - if (sregp == NULL) - size = 4; - else - size = sregp->reg_size; - } - else - size = mode_size; - - info->data_size = size; - } - - *tp++ = '['; - - if (prefix_opcodep - /* We don't match dip with a postincremented field - as a side-effect address mode. */ - && ((insn & 0x400) == 0 - || prefix_opcodep->match != DIP_OPCODE)) - { - if (insn & 0x400) - { - tp = format_reg (disdata, insn & 15, tp, with_reg_prefix); - *tp++ = '='; - } - - - /* We mainly ignore the prefix format string when the - address-mode syntax is output. */ - switch (prefix_opcodep->match) - { - case DIP_OPCODE: - /* It's [r], [r+] or [pc+]. */ - if ((prefix_insn & 0x400) && (prefix_insn & 15) == 15) - { - /* It's [pc+]. This cannot possibly be anything - but an address. */ - uint32_t number - = prefix_buffer[2] + prefix_buffer[3] * 256 - + prefix_buffer[4] * 65536 - + prefix_buffer[5] * 0x1000000; - - info->target = (bfd_vma) number; - - /* Finish off and output previous formatted - data. */ - *tp = 0; - tp = temp; - if (temp[0]) - (*info->fprintf_func) (info->stream, "%s", temp); - - (*info->print_address_func) ((bfd_vma) number, info); - } - else - { - /* For a memref in an address, we use target2. - In this case, target is zero. */ - info->flags - |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG - | CRIS_DIS_FLAG_MEM_TARGET2_MEM); - - info->target2 = prefix_insn & 15; - - *tp++ = '['; - tp = format_reg (disdata, prefix_insn & 15, tp, - with_reg_prefix); - if (prefix_insn & 0x400) - *tp++ = '+'; - *tp++ = ']'; - } - break; - - case BDAP_QUICK_OPCODE: - { - int number; - - number = prefix_buffer[0]; - if (number > 127) - number -= 256; - - /* Output "reg+num" or, if num < 0, "reg-num". */ - tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp, - with_reg_prefix); - if (number >= 0) - *tp++ = '+'; - tp = FORMAT_DEC (number, tp, 1); - - info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG; - info->target = (prefix_insn >> 12) & 15; - info->target2 = (bfd_vma) number; - break; - } - - case BIAP_OPCODE: - /* Output "r+R.m". */ - tp = format_reg (disdata, prefix_insn & 15, tp, - with_reg_prefix); - *tp++ = '+'; - tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp, - with_reg_prefix); - *tp++ = '.'; - *tp++ = mode_char[(prefix_insn >> 4) & 3]; - - info->flags - |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG - | CRIS_DIS_FLAG_MEM_TARGET_IS_REG - - | ((prefix_insn & 0x8000) - ? CRIS_DIS_FLAG_MEM_TARGET2_MULT4 - : ((prefix_insn & 0x8000) - ? CRIS_DIS_FLAG_MEM_TARGET2_MULT2 : 0))); - - /* Is it the casejump? It's a "adds.w [pc+r%d.w],pc". */ - if (insn == 0xf83f && (prefix_insn & ~0xf000) == 0x55f) - /* Then start interpreting data as offsets. */ - case_offset_counter = no_of_case_offsets; - break; - - case BDAP_INDIR_OPCODE: - /* Output "r+s.m", or, if "s" is [pc+], "r+s" or - "r-s". */ - tp = format_reg (disdata, (prefix_insn >> 12) & 15, tp, - with_reg_prefix); - - if ((prefix_insn & 0x400) && (prefix_insn & 15) == 15) - { - int32_t number; - unsigned int nbytes; - - /* It's a value. Get its size. */ - int mode_size = 1 << ((prefix_insn >> 4) & 3); - - if (mode_size == 1) - nbytes = 2; - else - nbytes = mode_size; - - switch (nbytes) - { - case 1: - number = prefix_buffer[2]; - if (number > 127) - number -= 256; - break; - - case 2: - number = prefix_buffer[2] + prefix_buffer[3] * 256; - if (number > 32767) - number -= 65536; - break; - - case 4: - number - = prefix_buffer[2] + prefix_buffer[3] * 256 - + prefix_buffer[4] * 65536 - + prefix_buffer[5] * 0x1000000; - break; - - default: - strcpy (tp, "bug"); - tp += 3; - number = 42; - } - - info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG; - info->target2 = (bfd_vma) number; - - /* If the size is dword, then assume it's an - address. */ - if (nbytes == 4) - { - /* Finish off and output previous formatted - bytes. */ - *tp++ = '+'; - *tp = 0; - tp = temp; - (*info->fprintf_func) (info->stream, "%s", temp); - - (*info->print_address_func) ((bfd_vma) number, info); - } - else - { - if (number >= 0) - *tp++ = '+'; - tp = FORMAT_DEC (number, tp, 1); - } - } - else - { - /* Output "r+[R].m" or "r+[R+].m". */ - *tp++ = '+'; - *tp++ = '['; - tp = format_reg (disdata, prefix_insn & 15, tp, - with_reg_prefix); - if (prefix_insn & 0x400) - *tp++ = '+'; - *tp++ = ']'; - *tp++ = '.'; - *tp++ = mode_char[(prefix_insn >> 4) & 3]; - - info->flags - |= (CRIS_DIS_FLAG_MEM_TARGET2_IS_REG - | CRIS_DIS_FLAG_MEM_TARGET2_MEM - | CRIS_DIS_FLAG_MEM_TARGET_IS_REG - - | (((prefix_insn >> 4) == 2) - ? 0 - : (((prefix_insn >> 4) & 3) == 1 - ? CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD - : CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE))); - } - break; - - default: - (*info->fprintf_func) (info->stream, "?prefix-bug"); - } - - /* To mark that the prefix is used, reset it. */ - prefix_opcodep = NULL; - } - else - { - tp = format_reg (disdata, insn & 15, tp, with_reg_prefix); - - info->flags |= CRIS_DIS_FLAG_MEM_TARGET_IS_REG; - info->target = insn & 15; - - if (insn & 0x400) - *tp++ = '+'; - } - *tp++ = ']'; - } - break; - - case 'x': - tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix); - *tp++ = '.'; - *tp++ = mode_char[(insn >> 4) & 3]; - break; - - case 'I': - tp = FORMAT_DEC (insn & 63, tp, 0); - break; - - case 'b': - { - int where = buffer[2] + buffer[3] * 256; - - if (where > 32767) - where -= 65536; - - where += addr + ((disdata->distype == cris_dis_v32) ? 0 : 4); - - if (insn == BA_PC_INCR_OPCODE) - info->insn_type = dis_branch; - else - info->insn_type = dis_condbranch; - - info->target = (bfd_vma) where; - - *tp = 0; - tp = temp; - (*info->fprintf_func) (info->stream, "%s%s ", - temp, cris_cc_strings[insn >> 12]); - - (*info->print_address_func) ((bfd_vma) where, info); - } - break; - - case 'c': - tp = FORMAT_DEC (insn & 31, tp, 0); - break; - - case 'C': - tp = FORMAT_DEC (insn & 15, tp, 0); - break; - - case 'o': - { - long offset = insn & 0xfe; - bfd_vma target; - - if (insn & 1) - offset |= ~0xff; - - if (opcodep->match == BA_QUICK_OPCODE) - info->insn_type = dis_branch; - else - info->insn_type = dis_condbranch; - - target = addr + ((disdata->distype == cris_dis_v32) ? 0 : 2) + offset; - info->target = target; - *tp = 0; - tp = temp; - (*info->fprintf_func) (info->stream, "%s", temp); - (*info->print_address_func) (target, info); - } - break; - - case 'Q': - case 'O': - { - long number = buffer[0]; - - if (number > 127) - number = number - 256; - - tp = FORMAT_DEC (number, tp, 1); - *tp++ = ','; - tp = format_reg (disdata, (insn >> 12) & 15, tp, with_reg_prefix); - } - break; - - case 'f': - tp = print_flags (disdata, insn, tp); - break; - - case 'i': - tp = FORMAT_DEC ((insn & 32) ? (insn & 31) | ~31L : insn & 31, tp, 1); - break; - - case 'P': - { - const struct cris_spec_reg *sregp - = spec_reg_info ((insn >> 12) & 15, disdata->distype); - - if (sregp == NULL || sregp->name == NULL) - /* Should have been caught as a non-match earlier. */ - *tp++ = '?'; - else - { - if (with_reg_prefix) - *tp++ = REGISTER_PREFIX_CHAR; - strcpy (tp, sregp->name); - tp += strlen (tp); - } - } - break; - - default: - strcpy (tp, "???"); - tp += 3; - } - } - - *tp = 0; - - if (prefix_opcodep) - (*info->fprintf_func) (info->stream, " (OOPS unused prefix \"%s: %s\")", - prefix_opcodep->name, prefix_opcodep->args); - - (*info->fprintf_func) (info->stream, "%s", temp); - - /* Get info for matching case-tables, if we don't have any active. - We assume that the last constant seen is used; either in the insn - itself or in a "move.d const,rN, sub.d rN,rM"-like sequence. */ - if (TRACE_CASE && case_offset_counter == 0) - { - if (CONST_STRNEQ (opcodep->name, "sub")) - case_offset = last_immediate; - - /* It could also be an "add", if there are negative case-values. */ - else if (CONST_STRNEQ (opcodep->name, "add")) - /* The first case is the negated operand to the add. */ - case_offset = -last_immediate; - - /* A bound insn will tell us the number of cases. */ - else if (CONST_STRNEQ (opcodep->name, "bound")) - no_of_case_offsets = last_immediate + 1; - - /* A jump or jsr or branch breaks the chain of insns for a - case-table, so assume default first-case again. */ - else if (info->insn_type == dis_jsr - || info->insn_type == dis_branch - || info->insn_type == dis_condbranch) - case_offset = 0; - } -} - - -/* Print the CRIS instruction at address memaddr on stream. Returns - length of the instruction, in bytes. Prefix register names with `$' if - WITH_REG_PREFIX. */ - -static int -print_insn_cris_generic (bfd_vma memaddr, - disassemble_info *info, - bfd_boolean with_reg_prefix) -{ - int nbytes; - unsigned int insn; - const struct cris_opcode *matchedp; - int advance = 0; - struct cris_disasm_data *disdata - = (struct cris_disasm_data *) info->private_data; - - /* No instruction will be disassembled as longer than this number of - bytes; stacked prefixes will not be expanded. */ - unsigned char buffer[MAX_BYTES_PER_CRIS_INSN]; - unsigned char *bufp; - int status = 0; - bfd_vma addr; - - /* There will be an "out of range" error after the last instruction. - Reading pairs of bytes in decreasing number, we hope that we will get - at least the amount that we will consume. - - If we can't get any data, or we do not get enough data, we print - the error message. */ - - nbytes = info->buffer_length ? info->buffer_length - : MAX_BYTES_PER_CRIS_INSN; - nbytes = MIN(nbytes, MAX_BYTES_PER_CRIS_INSN); - status = (*info->read_memory_func) (memaddr, buffer, nbytes, info); - - /* If we did not get all we asked for, then clear the rest. - Hopefully this makes a reproducible result in case of errors. */ - if (nbytes != MAX_BYTES_PER_CRIS_INSN) - memset (buffer + nbytes, 0, MAX_BYTES_PER_CRIS_INSN - nbytes); - - addr = memaddr; - bufp = buffer; - - /* Set some defaults for the insn info. */ - info->insn_info_valid = 1; - info->branch_delay_insns = 0; - info->data_size = 0; - info->insn_type = dis_nonbranch; - info->flags = 0; - info->target = 0; - info->target2 = 0; - - /* If we got any data, disassemble it. */ - if (nbytes != 0) - { - matchedp = NULL; - - insn = bufp[0] + bufp[1] * 256; - - /* If we're in a case-table, don't disassemble the offsets. */ - if (TRACE_CASE && case_offset_counter != 0) - { - info->insn_type = dis_noninsn; - advance += 2; - - /* If to print data as offsets, then shortcut here. */ - (*info->fprintf_func) (info->stream, "case %ld%s: -> ", - case_offset + no_of_case_offsets - - case_offset_counter, - case_offset_counter == 1 ? "/default" : - ""); - - (*info->print_address_func) ((bfd_vma) - ((short) (insn) - + (long) (addr - - (no_of_case_offsets - - case_offset_counter) - * 2)), info); - case_offset_counter--; - - /* The default case start (without a "sub" or "add") must be - zero. */ - if (case_offset_counter == 0) - case_offset = 0; - } - else if (insn == 0) - { - /* We're often called to disassemble zeroes. While this is a - valid "bcc .+2" insn, it is also useless enough and enough - of a nuiscance that we will just output "bcc .+2" for it - and signal it as a noninsn. */ - (*info->fprintf_func) (info->stream, - disdata->distype == cris_dis_v32 - ? "bcc ." : "bcc .+2"); - info->insn_type = dis_noninsn; - advance += 2; - } - else - { - const struct cris_opcode *prefix_opcodep = NULL; - unsigned char *prefix_buffer = bufp; - unsigned int prefix_insn = insn; - int prefix_size = 0; - - matchedp = get_opcode_entry (insn, NO_CRIS_PREFIX, disdata); - - /* Check if we're supposed to write out prefixes as address - modes and if this was a prefix. */ - if (matchedp != NULL && PARSE_PREFIX && matchedp->args[0] == 'p') - { - /* If it's a prefix, put it into the prefix vars and get the - main insn. */ - prefix_size = bytes_to_skip (prefix_insn, matchedp, - disdata->distype, NULL); - prefix_opcodep = matchedp; - - insn = bufp[prefix_size] + bufp[prefix_size + 1] * 256; - matchedp = get_opcode_entry (insn, prefix_insn, disdata); - - if (matchedp != NULL) - { - addr += prefix_size; - bufp += prefix_size; - advance += prefix_size; - } - else - { - /* The "main" insn wasn't valid, at least not when - prefixed. Put back things enough to output the - prefix insn only, as a normal insn. */ - matchedp = prefix_opcodep; - insn = prefix_insn; - prefix_opcodep = NULL; - } - } - - if (matchedp == NULL) - { - (*info->fprintf_func) (info->stream, "??0x%x", insn); - advance += 2; - - info->insn_type = dis_noninsn; - } - else - { - advance - += bytes_to_skip (insn, matchedp, disdata->distype, - prefix_opcodep); - - /* The info_type and assorted fields will be set according - to the operands. */ - print_with_operands (matchedp, insn, bufp, addr, info, - prefix_opcodep, prefix_insn, - prefix_buffer, with_reg_prefix); - } - } - } - else - info->insn_type = dis_noninsn; - - /* If we read less than MAX_BYTES_PER_CRIS_INSN, i.e. we got an error - status when reading that much, and the insn decoding indicated a - length exceeding what we read, there is an error. */ - if (status != 0 && (nbytes == 0 || advance > nbytes)) - { - (*info->memory_error_func) (status, memaddr, info); - return -1; - } - - /* Max supported insn size with one folded prefix insn. */ - info->bytes_per_line = MAX_BYTES_PER_CRIS_INSN; - - /* I would like to set this to a fixed value larger than the actual - number of bytes to print in order to avoid spaces between bytes, - but objdump.c (2.9.1) does not like that, so we print 16-bit - chunks, which is the next choice. */ - info->bytes_per_chunk = 2; - - /* Printing bytes in order of increasing addresses makes sense, - especially on a little-endian target. - This is completely the opposite of what you think; setting this to - BFD_ENDIAN_LITTLE will print bytes in order N..0 rather than the 0..N - we want. */ - info->display_endian = BFD_ENDIAN_BIG; - - return advance; -} - -/* Disassemble, prefixing register names with `$'. CRIS v0..v10. */ -static int -print_insn_cris_with_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_v0_v10); - return print_insn_cris_generic (vma, info, true); -} -/* Disassemble, prefixing register names with `$'. CRIS v32. */ - -static int -print_insn_crisv32_with_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_v32); - return print_insn_cris_generic (vma, info, true); -} - -#if 0 -/* Disassemble, prefixing register names with `$'. - Common v10 and v32 subset. */ - -static int -print_insn_crisv10_v32_with_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_common_v10_v32); - return print_insn_cris_generic (vma, info, true); -} - -/* Disassemble, no prefixes on register names. CRIS v0..v10. */ - -static int -print_insn_cris_without_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_v0_v10); - return print_insn_cris_generic (vma, info, false); -} - -/* Disassemble, no prefixes on register names. CRIS v32. */ - -static int -print_insn_crisv32_without_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_v32); - return print_insn_cris_generic (vma, info, false); -} - -/* Disassemble, no prefixes on register names. - Common v10 and v32 subset. */ - -static int -print_insn_crisv10_v32_without_register_prefix (bfd_vma vma, - disassemble_info *info) -{ - struct cris_disasm_data disdata; - info->private_data = &disdata; - cris_parse_disassembler_options (&disdata, info->disassembler_options, - cris_dis_common_v10_v32); - return print_insn_cris_generic (vma, info, false); -} -#endif - -int -print_insn_crisv10 (bfd_vma vma, - disassemble_info *info) -{ - return print_insn_cris_with_register_prefix(vma, info); -} - -int -print_insn_crisv32 (bfd_vma vma, - disassemble_info *info) -{ - return print_insn_crisv32_with_register_prefix(vma, info); -} - -/* Return a disassembler-function that prints registers with a `$' prefix, - or one that prints registers without a prefix. - FIXME: We should improve the solution to avoid the multitude of - functions seen above. */ -#if 0 -disassembler_ftype -cris_get_disassembler (bfd *abfd) -{ - /* If there's no bfd in sight, we return what is valid as input in all - contexts if fed back to the assembler: disassembly *with* register - prefix. Unfortunately this will be totally wrong for v32. */ - if (abfd == NULL) - return print_insn_cris_with_register_prefix; - - if (bfd_get_symbol_leading_char (abfd) == 0) - { - if (bfd_get_mach (abfd) == bfd_mach_cris_v32) - return print_insn_crisv32_with_register_prefix; - if (bfd_get_mach (abfd) == bfd_mach_cris_v10_v32) - return print_insn_crisv10_v32_with_register_prefix; - - /* We default to v10. This may be specifically specified in the - bfd mach, but is also the default setting. */ - return print_insn_cris_with_register_prefix; - } - - if (bfd_get_mach (abfd) == bfd_mach_cris_v32) - return print_insn_crisv32_without_register_prefix; - if (bfd_get_mach (abfd) == bfd_mach_cris_v10_v32) - return print_insn_crisv10_v32_without_register_prefix; - return print_insn_cris_without_register_prefix; -} -#endif -/* Local variables: - eval: (c-set-style "gnu") - indent-tabs-mode: t - End: */ diff --git a/disas/disas-common.c b/disas/disas-common.c index de61f6d8a12..21c2f03430b 100644 --- a/disas/disas-common.c +++ b/disas/disas-common.c @@ -7,7 +7,6 @@ #include "disas/disas.h" #include "disas/capstone.h" #include "hw/core/cpu.h" -#include "exec/tswap.h" #include "disas-internal.h" @@ -61,15 +60,11 @@ void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu) s->cpu = cpu; s->info.print_address_func = print_address; - if (target_words_bigendian()) { - s->info.endian = BFD_ENDIAN_BIG; - } else { - s->info.endian = BFD_ENDIAN_LITTLE; - } + s->info.endian = BFD_ENDIAN_UNKNOWN; - CPUClass *cc = CPU_GET_CLASS(cpu); - if (cc->disas_set_info) { - cc->disas_set_info(cpu, &s->info); + if (cpu->cc->disas_set_info) { + cpu->cc->disas_set_info(cpu, &s->info); + g_assert(s->info.endian != BFD_ENDIAN_UNKNOWN); } } diff --git a/disas/disas-mon.c b/disas/disas-mon.c index 37bf16ac797..9c693618c27 100644 --- a/disas/disas-mon.c +++ b/disas/disas-mon.c @@ -7,7 +7,7 @@ #include "qemu/osdep.h" #include "disas-internal.h" #include "disas/disas.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/core/cpu.h" #include "monitor/monitor.h" diff --git a/disas/hppa.c b/disas/hppa.c index 49e2231ae62..2b58434966c 100644 --- a/disas/hppa.c +++ b/disas/hppa.c @@ -606,7 +606,7 @@ struct pa_opcode In the args field, the following characters are unused: - ' " - / 34 6789:; ' + ' " - / 34 678 :; ' '@ C M [\] ' '` e g } ' @@ -650,6 +650,7 @@ Also these: | 6 bit field length at 19,27:31 (fixed extract/deposit) A 13 bit immediate at 18 (to support the BREAK instruction) ^ like b, but describes a control register + 9 like b, but describes a diagnose register ! sar (cr11) register D 26 bit immediate at 31 (to support the DIAG instruction) $ 9 bit immediate at 28 (to support POPBTS) @@ -1322,13 +1323,19 @@ static const struct pa_opcode pa_opcodes[] = { "fdce", 0x040012c0, 0xfc00ffdf, "cZx(b)", pa10, 0}, { "fdce", 0x040012c0, 0xfc003fdf, "cZx(s,b)", pa10, 0}, { "fice", 0x040002c0, 0xfc001fdf, "cZx(S,b)", pa10, 0}, -{ "diag", 0x14000000, 0xfc000000, "D", pa10, 0}, { "idtlbt", 0x04001800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT}, { "iitlbt", 0x04000800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT}, +/* completely undocumented, but used by ODE, HP-UX and Linux: */ +{ "mfcpu_pcxu", 0x140008a0, 0xfc9fffe0, "9,t", pa20, 0}, /* PCXU: mfdiag */ +{ "mtcpu_pcxu", 0x14001840, 0xfc00ffff, "x,9", pa20, 0}, + /* These may be specific to certain versions of the PA. Joel claimed they were 72000 (7200?) specific. However, I'm almost certain the mtcpu/mfcpu were undocumented, but available in the older 700 machines. */ +{ "mfcpu_c", 0x14000600, 0xfc00ffff, "9,x", pa10, 0}, /* PCXL: for dr0 and dr8 only */ +{ "mfcpu_t", 0x14001400, 0xfc9fffe0, "9,t", pa10, 0}, /* PCXL: all dr except dr0 and dr8 */ +{ "mtcpu_pcxl", 0x14000240, 0xfc00ffff, "x,9", pa11, 0}, /* PCXL: mtcpu for dr0 and dr8 */ { "mtcpu", 0x14001600, 0xfc00ffff, "x,^", pa10, 0}, { "mfcpu", 0x14001A00, 0xfc00ffff, "^,x", pa10, 0}, { "tocen", 0x14403600, 0xffffffff, "", pa10, 0}, @@ -1336,6 +1343,9 @@ static const struct pa_opcode pa_opcodes[] = { "shdwgr", 0x14402600, 0xffffffff, "", pa10, 0}, { "grshdw", 0x14400620, 0xffffffff, "", pa10, 0}, +/* instead of showing D only, show all other registers too */ +{ "diag", 0x14000000, 0xfc000000, "D x,9,t", pa10, 0}, + /* gfw and gfr are not in the HP PA 1.1 manual, but they are in either the Timex FPU or the Mustang ERS (not sure which) manual. */ { "gfw", 0x04001680, 0xfc00ffdf, "cZx(b)", pa11, 0}, @@ -1801,6 +1811,12 @@ fput_creg (unsigned reg, disassemble_info *info) (*info->fprintf_func) (info->stream, "%s", control_reg[reg]); } +static void +fput_dreg (unsigned reg, disassemble_info *info) +{ + (*info->fprintf_func) (info->stream, "dr%d", reg); +} + /* Print constants with sign. */ static void @@ -2007,6 +2023,9 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info) case '^': fput_creg (GET_FIELD (insn, 6, 10), info); break; + case '9': + fput_dreg (GET_FIELD (insn, 6, 10), info); + break; case 't': fput_reg (GET_FIELD (insn, 27, 31), info); break; diff --git a/disas/meson.build b/disas/meson.build index 20d6aef9a71..bbfa1197835 100644 --- a/disas/meson.build +++ b/disas/meson.build @@ -1,5 +1,4 @@ common_ss.add(when: 'CONFIG_ALPHA_DIS', if_true: files('alpha.c')) -common_ss.add(when: 'CONFIG_CRIS_DIS', if_true: files('cris.c')) common_ss.add(when: 'CONFIG_HEXAGON_DIS', if_true: files('hexagon.c')) common_ss.add(when: 'CONFIG_HPPA_DIS', if_true: files('hppa.c')) common_ss.add(when: 'CONFIG_M68K_DIS', if_true: files('m68k.c')) diff --git a/disas/riscv.c b/disas/riscv.c index 5965574d874..85cd2a9c2ae 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -976,6 +976,14 @@ typedef enum { rv_op_amocas_h = 945, rv_op_wrs_sto = 946, rv_op_wrs_nto = 947, + rv_op_lpad = 948, + rv_op_sspush = 949, + rv_op_sspopchk = 950, + rv_op_ssrdp = 951, + rv_op_ssamoswap_w = 952, + rv_op_ssamoswap_d = 953, + rv_op_c_sspush = 954, + rv_op_c_sspopchk = 955, } rv_op; /* register names */ @@ -1654,7 +1662,7 @@ const rv_opcode_data rvi_opcode_data[] = { { "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, { "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, { "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 }, - { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 }, + { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 }, { "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 }, { "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, @@ -2206,11 +2214,11 @@ const rv_opcode_data rvi_opcode_data[] = { { "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, - { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, - { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, - { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, - { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, - { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, { "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, { "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, { "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 }, @@ -2236,6 +2244,16 @@ const rv_opcode_data rvi_opcode_data[] = { { "amocas.h", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, { "wrs.sto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, { "wrs.nto", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "lpad", rv_codec_lp, rv_fmt_imm, NULL, 0, 0, 0 }, + { "sspush", rv_codec_r, rv_fmt_rs2, NULL, 0, 0, 0 }, + { "sspopchk", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 }, + { "ssrdp", rv_codec_r, rv_fmt_rd, NULL, 0, 0, 0 }, + { "ssamoswap.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "ssamoswap.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "c.sspush", rv_codec_cmop_ss, rv_fmt_rs2, NULL, rv_op_sspush, + rv_op_sspush, 0 }, + { "c.sspopchk", rv_codec_cmop_ss, rv_fmt_rs1, NULL, rv_op_sspopchk, + rv_op_sspopchk, 0 }, }; /* CSR names */ @@ -2253,6 +2271,7 @@ static const char *csr_name(int csrno) case 0x0009: return "vxsat"; case 0x000a: return "vxrm"; case 0x000f: return "vcsr"; + case 0x0011: return "ssp"; case 0x0015: return "seed"; case 0x0017: return "jvt"; case 0x0040: return "uscratch"; @@ -2419,9 +2438,11 @@ static const char *csr_name(int csrno) case 0x07a1: return "tdata1"; case 0x07a2: return "tdata2"; case 0x07a3: return "tdata3"; + case 0x07a4: return "tinfo"; case 0x07b0: return "dcsr"; case 0x07b1: return "dpc"; - case 0x07b2: return "dscratch"; + case 0x07b2: return "dscratch0"; + case 0x07b3: return "dscratch1"; case 0x0b00: return "mcycle"; case 0x0b01: return "mtime"; case 0x0b02: return "minstret"; @@ -2592,10 +2613,16 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) break; case 2: op = rv_op_c_li; break; case 3: - if (dec->cfg->ext_zcmop) { + if (dec->cfg && dec->cfg->ext_zcmop) { if ((((inst >> 2) & 0b111111) == 0b100000) && (((inst >> 11) & 0b11) == 0b0)) { - op = rv_c_mop_1 + ((inst >> 8) & 0b111); + unsigned int cmop_code = 0; + cmop_code = ((inst >> 8) & 0b111); + op = rv_c_mop_1 + cmop_code; + if (dec->cfg->ext_zicfiss) { + op = (cmop_code == 0) ? rv_op_c_sspush : op; + op = (cmop_code == 2) ? rv_op_c_sspopchk : op; + } break; } } @@ -2687,7 +2714,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) op = rv_op_c_sqsp; } else { op = rv_op_c_fsdsp; - if (dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) { + if (dec->cfg && dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) { switch ((inst >> 8) & 0b01111) { case 8: if (((inst >> 4) & 0b01111) >= 4) { @@ -2713,7 +2740,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) } else { switch ((inst >> 10) & 0b011) { case 0: - if (!dec->cfg->ext_zcmt) { + if (dec->cfg && !dec->cfg->ext_zcmt) { break; } if (((inst >> 2) & 0xFF) >= 32) { @@ -2723,7 +2750,7 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) } break; case 3: - if (!dec->cfg->ext_zcmp) { + if (dec->cfg && !dec->cfg->ext_zcmp) { break; } switch ((inst >> 5) & 0b011) { @@ -2929,7 +2956,13 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 7: op = rv_op_andi; break; } break; - case 5: op = rv_op_auipc; break; + case 5: + op = rv_op_auipc; + if (dec->cfg && dec->cfg->ext_zicfilp && + (((inst >> 7) & 0b11111) == 0b00000)) { + op = rv_op_lpad; + } + break; case 6: switch ((inst >> 12) & 0b111) { case 0: op = rv_op_addiw; break; @@ -3073,6 +3106,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 66: op = rv_op_amoor_w; break; case 67: op = rv_op_amoor_d; break; case 68: op = rv_op_amoor_q; break; + case 74: op = rv_op_ssamoswap_w; break; + case 75: op = rv_op_ssamoswap_d; break; case 96: op = rv_op_amoand_b; break; case 97: op = rv_op_amoand_h; break; case 98: op = rv_op_amoand_w; break; @@ -4025,8 +4060,8 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 2: op = rv_op_csrrs; break; case 3: op = rv_op_csrrc; break; case 4: - if (dec->cfg->ext_zimop) { - int imm_mop5, imm_mop3; + if (dec->cfg && dec->cfg->ext_zimop) { + int imm_mop5, imm_mop3, reg_num; if ((extract32(inst, 22, 10) & 0b1011001111) == 0b1000000111) { imm_mop5 = deposit32(deposit32(extract32(inst, 20, 2), @@ -4034,11 +4069,36 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) extract32(inst, 26, 2)), 4, 1, extract32(inst, 30, 1)); op = rv_mop_r_0 + imm_mop5; + /* if zicfiss enabled and mop5 is shadow stack */ + if (dec->cfg->ext_zicfiss && + ((imm_mop5 & 0b11100) == 0b11100)) { + /* rs1=0 means ssrdp */ + if ((inst & (0b011111 << 15)) == 0) { + op = rv_op_ssrdp; + } + /* rd=0 means sspopchk */ + reg_num = (inst >> 15) & 0b011111; + if (((inst & (0b011111 << 7)) == 0) && + ((reg_num == 1) || (reg_num == 5))) { + op = rv_op_sspopchk; + } + } } else if ((extract32(inst, 25, 7) & 0b1011001) == 0b1000001) { imm_mop3 = deposit32(extract32(inst, 26, 2), 2, 1, extract32(inst, 30, 1)); op = rv_mop_rr_0 + imm_mop3; + /* if zicfiss enabled and mop3 is shadow stack */ + if (dec->cfg->ext_zicfiss && + ((imm_mop3 & 0b111) == 0b111)) { + /* rs1=0 and rd=0 means sspush */ + reg_num = (inst >> 20) & 0b011111; + if (((inst & (0b011111 << 15)) == 0) && + ((inst & (0b011111 << 7)) == 0) && + ((reg_num == 1) || (reg_num == 5))) { + op = rv_op_sspush; + } + } } } break; @@ -4488,6 +4548,11 @@ static uint32_t operand_tbl_index(rv_inst inst) return ((inst << 54) >> 56); } +static uint32_t operand_lpl(rv_inst inst) +{ + return inst >> 12; +} + /* decode operands */ static void decode_inst_operands(rv_decode *dec, rv_isa isa) @@ -4808,7 +4873,7 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa) break; case rv_codec_vsetivli: dec->rd = operand_rd(inst); - dec->imm = operand_vimm(inst); + dec->imm = extract32(inst, 15, 5); dec->vzimm = operand_vzimm10(inst); break; case rv_codec_zcb_lb: @@ -4875,6 +4940,14 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa) dec->imm = sextract32(operand_rs2(inst), 0, 5); dec->imm1 = operand_imm2(inst); break; + case rv_codec_lp: + dec->imm = operand_lpl(inst); + break; + case rv_codec_cmop_ss: + dec->rd = rv_ireg_zero; + dec->rs1 = dec->rs2 = operand_crs1(inst); + dec->imm = 0; + break; }; } @@ -5041,28 +5114,28 @@ static GString *format_inst(size_t tab, rv_decode *dec) g_string_append(buf, rv_ireg_name_sym[dec->rs2]); break; case '3': - if (dec->cfg->ext_zfinx) { + if (dec->cfg && dec->cfg->ext_zfinx) { g_string_append(buf, rv_ireg_name_sym[dec->rd]); } else { g_string_append(buf, rv_freg_name_sym[dec->rd]); } break; case '4': - if (dec->cfg->ext_zfinx) { + if (dec->cfg && dec->cfg->ext_zfinx) { g_string_append(buf, rv_ireg_name_sym[dec->rs1]); } else { g_string_append(buf, rv_freg_name_sym[dec->rs1]); } break; case '5': - if (dec->cfg->ext_zfinx) { + if (dec->cfg && dec->cfg->ext_zfinx) { g_string_append(buf, rv_ireg_name_sym[dec->rs2]); } else { g_string_append(buf, rv_freg_name_sym[dec->rs2]); } break; case '6': - if (dec->cfg->ext_zfinx) { + if (dec->cfg && dec->cfg->ext_zfinx) { g_string_append(buf, rv_ireg_name_sym[dec->rs3]); } else { g_string_append(buf, rv_freg_name_sym[dec->rs3]); @@ -5368,7 +5441,8 @@ static GString *disasm_inst(rv_isa isa, uint64_t pc, rv_inst inst, const rv_opcode_data *opcode_data = decoders[i].opcode_data; void (*decode_func)(rv_decode *, rv_isa) = decoders[i].decode_func; - if (guard_func(cfg)) { + /* always_true_p don't dereference cfg */ + if (((i == 0) || cfg) && guard_func(cfg)) { dec.opcode_data = opcode_data; decode_func(&dec, isa); if (dec.op != rv_op_illegal) diff --git a/disas/riscv.h b/disas/riscv.h index 16a08e4895c..d211700cb2f 100644 --- a/disas/riscv.h +++ b/disas/riscv.h @@ -166,6 +166,8 @@ typedef enum { rv_codec_r2_immhl, rv_codec_r2_imm2_imm5, rv_codec_fli, + rv_codec_lp, + rv_codec_cmop_ss, } rv_codec; /* structures */ @@ -223,11 +225,13 @@ enum { #define rv_fmt_none "O\t" #define rv_fmt_rs1 "O\t1" +#define rv_fmt_rs2 "O\t2" #define rv_fmt_offset "O\to" #define rv_fmt_pred_succ "O\tp,s" #define rv_fmt_rs1_rs2 "O\t1,2" #define rv_fmt_rd_imm "O\t0,i" #define rv_fmt_rd_uimm "O\t0,Ui" +#define rv_fmt_imm "O\ti" #define rv_fmt_rd_offset "O\t0,o" #define rv_fmt_rd_uoffset "O\t0,Uo" #define rv_fmt_rd_rs1_rs2 "O\t0,1,2" @@ -290,7 +294,7 @@ enum { #define rv_fmt_fd_vs2 "O\t3,F" #define rv_fmt_vd_vm "O\tDm" #define rv_fmt_vsetvli "O\t0,1,v" -#define rv_fmt_vsetivli "O\t0,u,v" +#define rv_fmt_vsetivli "O\t0,i,v" #define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)" #define rv_fmt_push_rlist "O\tx,-i" #define rv_fmt_pop_rlist "O\tx,i" diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt index 2e760a4aeec..2283a09c080 100644 --- a/docs/COLO-FT.txt +++ b/docs/COLO-FT.txt @@ -193,8 +193,8 @@ any IP's here, except for the $primary_ip variable. -device piix3-usb-uhci -device usb-tablet -name secondary \ -netdev tap,id=hn0,vhost=off,helper=/usr/lib/qemu/qemu-bridge-helper \ -device rtl8139,id=e0,netdev=hn0 \ - -chardev socket,id=red0,host=$primary_ip,port=9003,reconnect=1 \ - -chardev socket,id=red1,host=$primary_ip,port=9004,reconnect=1 \ + -chardev socket,id=red0,host=$primary_ip,port=9003,reconnect-ms=1000 \ + -chardev socket,id=red1,host=$primary_ip,port=9004,reconnect-ms=1000 \ -object filter-redirector,id=f1,netdev=hn0,queue=tx,indev=red0 \ -object filter-redirector,id=f2,netdev=hn0,queue=rx,outdev=red1 \ -object filter-rewriter,id=rew0,netdev=hn0,queue=all \ diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index 8fd7da140a3..8671c3be9cd 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -29,6 +29,9 @@ The `Repology`_ site is a useful resource to identify currently shipped versions of software in various operating systems, though it does not cover all distros listed below. +You can find how to install build dependencies for different systems on the +:ref:`setup-build-env` page. + Supported host architectures ---------------------------- @@ -40,8 +43,8 @@ Those hosts are officially supported, with various accelerators: * - CPU Architecture - Accelerators * - Arm - - kvm (64 bit only), tcg, xen - * - MIPS (little endian only) + - hvf (64 bit only), kvm (64 bit only), tcg, xen + * - MIPS (64 bit little endian only) - kvm, tcg * - PPC - kvm, tcg @@ -98,7 +101,7 @@ Python runtime option of the ``configure`` script to point QEMU to a supported version of the Python runtime. - As of QEMU |version|, the minimum supported version of Python is 3.7. + As of QEMU |version|, the minimum supported version of Python is 3.9. Python build dependencies Some of QEMU's build dependencies are written in Python. Usually these @@ -107,18 +110,42 @@ Python build dependencies required, it may be necessary to fetch python modules from the Python Package Index (PyPI) via ``pip``, in order to build QEMU. +Rust build dependencies + QEMU is generally conservative in adding new Rust dependencies, and all + of them are included in the distributed tarballs. One exception is the + bindgen tool, which is too big to package and distribute. The minimum + supported version of bindgen is 0.60.x. For distributions that do not + include bindgen or have an older version, it is recommended to install + a newer version using ``cargo install bindgen-cli``. + + QEMU requires Rust 1.77.0. This is available on all supported platforms + with one exception, namely the ``mips64el`` architecture on Debian bookworm. + For all other architectures, Debian bookworm provides a new-enough Rust + compiler in the ``rustc-web`` package. + + Also, on Ubuntu 22.04 or 24.04 this requires the ``rustc-1.77`` + (or newer) package. The path to ``rustc`` and ``rustdoc`` must be + provided manually to the configure script. + + Some distros prefer to avoid vendored crate sources, and instead use + local sources from e.g. ``/usr/share/cargo/registry``. QEMU includes a + script, ``scripts/get-wraps-from-cargo-registry.py``, that automatically + performs this task. The script is meant to be invoked after unpacking + the QEMU tarball. QEMU also includes ``rust/Cargo.toml`` and + ``rust/Cargo.lock`` files that can be used to compute QEMU's build + dependencies, e.g. using ``cargo2rpm -p rust/Cargo.toml buildrequires``. + Optional build dependencies - Build components whose absence does not affect the ability to build - QEMU may not be available in distros, or may be too old for QEMU's - requirements. Many of these, such as the Avocado testing framework - or various linters, are written in Python and therefore can also - be installed using ``pip``. Cross compilers are another example + Build components whose absence does not affect the ability to build QEMU + may not be available in distros, or may be too old for our requirements. + Many of these, such as additional modules for the functional testing + framework or various linters, are written in Python and therefore can + also be installed using ``pip``. Cross compilers are another example of optional build-time dependency; in this case it is possible to download them from repositories such as EPEL, to use container-based cross compilation using ``docker`` or ``podman``, or to use pre-built binaries distributed with QEMU. - Windows ------- diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 88f0f037865..d50645a0711 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -24,12 +24,6 @@ should exclusively use a non-deprecated machine type, with use of the most recent version highly recommended. Non-versioned machine types follow the general feature deprecation policy. -Prior to the 2.10.0 release there was no official policy on how -long features would be deprecated prior to their removal, nor -any documented list of which features were deprecated. Thus -any features deprecated prior to 2.10.0 will be treated as if -they were first deprecated in the 2.10.0 release. - What follows is a list of all features currently marked as deprecated. @@ -74,11 +68,18 @@ configurations (e.g. -smp drawers=1,books=1,clusters=1 for x86 PC machine) is marked deprecated since 9.0, users have to ensure that all the topology members described with -smp are supported by the target machine. -``-runas`` (since 9.1) ----------------------- - -Use ``-run-with user=..`` instead. +``-old-param`` option for booting Arm kernels via param_struct (since 10.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +The ``-old-param`` command line option is specific to Arm targets: +it is used when directly booting a guest kernel to pass it the +command line and other information via the old ``param_struct`` ABI, +rather than the newer ATAGS or DTB mechanisms. This option was only +ever needed to support ancient kernels on some old board types +like the ``akita`` or ``terrier``; it has been deprecated in the +kernel since 2001. None of the board types QEMU supports need +``param_struct`` support, so this option has been deprecated and will +be removed in a future QEMU version. User-mode emulator command line arguments ----------------------------------------- @@ -147,32 +148,66 @@ options are removed in favor of using explicit ``blockdev-create`` and ``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for details. -Incorrectly typed ``device_add`` arguments (since 6.2) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +``query-migrationthreads`` (since 9.2) +'''''''''''''''''''''''''''''''''''''' + +To be removed with no replacement, as it reports only a limited set of +threads (for example, it only reports source side of multifd threads, +without reporting any destination threads, or non-multifd source threads). +For debugging purpose, please use ``-name $VM,debug-threads=on`` instead. + +``block-job-pause`` (since 10.1) +'''''''''''''''''''''''''''''''' + +Use ``job-pause`` instead. The only difference is that ``job-pause`` +always reports GenericError on failure when ``block-job-pause`` reports +DeviceNotActive when block-job is not found. + +``block-job-resume`` (since 10.1) +''''''''''''''''''''''''''''''''' + +Use ``job-resume`` instead. The only difference is that ``job-resume`` +always reports GenericError on failure when ``block-job-resume`` reports +DeviceNotActive when block-job is not found. + +``block-job-complete`` (since 10.1) +''''''''''''''''''''''''''''''''''' -Due to shortcomings in the internal implementation of ``device_add``, QEMU -incorrectly accepts certain invalid arguments: Any object or list arguments are -silently ignored. Other argument types are not checked, but an implicit -conversion happens, so that e.g. string values can be assigned to integer -device properties or vice versa. +Use ``job-complete`` instead. The only difference is that ``job-complete`` +always reports GenericError on failure when ``block-job-complete`` reports +DeviceNotActive when block-job is not found. -This is a bug in QEMU that will be fixed in the future so that previously -accepted incorrect commands will return an error. Users should make sure that -all arguments passed to ``device_add`` are consistent with the documented -property types. +``block-job-dismiss`` (since 10.1) +'''''''''''''''''''''''''''''''''' + +Use ``job-dismiss`` instead. + +``block-job-finalize`` (since 10.1) +''''''''''''''''''''''''''''''''''' + +Use ``job-finalize`` instead. + +``migrate`` argument ``detach`` (since 10.1) +'''''''''''''''''''''''''''''''''''''''''''' + +This argument has always been ignored. Host Architectures ------------------ -BE MIPS (since 7.2) -''''''''''''''''''' +Big endian MIPS since 7.2; 32-bit little endian MIPS since 9.2 +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' As Debian 10 ("Buster") moved into LTS the big endian 32 bit version of MIPS moved out of support making it hard to maintain our cross-compilation CI tests of the architecture. As we no longer have CI coverage support may bitrot away before the deprecation process -completes. The little endian variants of MIPS (both 32 and 64 bit) are -still a supported host architecture. +completes. + +Likewise, the little endian variant of 32 bit MIPS is not supported by +Debian 13 ("Trixie") and newer. + +64 bit little endian MIPS is still a supported host architecture. System emulation on 32-bit x86 hosts (since 8.0) '''''''''''''''''''''''''''''''''''''''''''''''' @@ -184,6 +219,53 @@ be an effective use of its limited resources, and thus intends to discontinue it. Since all recent x86 hardware from the past >10 years is capable of the 64-bit x86 extensions, a corresponding 64-bit OS should be used instead. +TCG Plugin support not enabled by default on 32-bit hosts (since 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +While it is still possible to enable TCG plugin support for 32-bit +hosts there are a number of potential pitfalls when instrumenting +64-bit guests. The plugin APIs typically pass most addresses as +uint64_t but practices like encoding that address in a host pointer +for passing as user-data will lose data. As most software analysis +benefits from having plenty of host memory it seems reasonable to +encourage users to use 64 bit builds of QEMU for analysis work +whatever targets they are instrumenting. + +TCG Plugin support not enabled by default with TCI (since 9.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +While the TCG interpreter can interpret the TCG ops used by plugins it +is going to be so much slower it wouldn't make sense for any serious +instrumentation. Due to implementation differences there will also be +anomalies in things like memory instrumentation. + +32-bit host operating systems (since 10.0) +'''''''''''''''''''''''''''''''''''''''''' + +Keeping 32-bit host support alive is a substantial burden for the +QEMU project. Thus QEMU will in future drop the support for all +32-bit host systems. + +linux-user mode CPUs +-------------------- + +iwMMXt emulation and the ``pxa`` CPUs (since 10.0) +'''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``pxa`` CPU family (``pxa250``, ``pxa255``, ``pxa260``, +``pxa261``, ``pxa262``, ``pxa270-a0``, ``pxa270-a1``, ``pxa270``, +``pxa270-b0``, ``pxa270-b1``, ``pxa270-c0``, ``pxa270-c5``) are no +longer used in system emulation, because all the machine types which +used these CPUs were removed in the QEMU 9.2 release. These CPUs can +now only be used in linux-user mode, and to do that you would have to +explicitly select one of these CPUs with the ``-cpu`` command line +option or the ``QEMU_CPU`` environment variable. + +We don't believe that anybody is using the iwMMXt emulation, and we do +not have any tests to validate it or any real hardware or similar +known-good implementation to test against. GCC is in the process of +dropping their support for iwMMXt codegen. These CPU types are +therefore deprecated in QEMU, and will be removed in a future release. System emulator CPUs -------------------- @@ -206,17 +288,25 @@ in the QEMU object model anymore. ``Sun-UltraSparc-IIIi+`` and but for consistency these will get removed in a future release, too. Use ``Sun-UltraSparc-IIIi-plus`` and ``Sun-UltraSparc-IV-plus`` instead. -CRIS CPU architecture (since 9.0) -''''''''''''''''''''''''''''''''' +PPC 405 CPUs (since 10.0) +''''''''''''''''''''''''' -The CRIS architecture was pulled from Linux in 4.17 and the compiler -is no longer packaged in any distro making it harder to run the -``check-tcg`` tests. Unless we can improve the testing situation there -is a chance the code will bitrot without anyone noticing. +The PPC 405 CPU has no known users and the ``ref405ep`` machine was +removed in QEMU 10.0. Since the IBM POWER [8-11] processors uses an +embedded 405 for power management (OCC) and other internal tasks, it +is theoretically possible to use QEMU to model them. Let's keep the +CPU implementation for a while before removing all support. System emulator machines ------------------------ +Versioned machine types (aarch64, arm, i386, m68k, ppc64, s390x, x86_64) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +In accordance with our versioned machine type deprecation policy, all machine +types with version |VER_MACHINE_DEPRECATION_VERSION|, or older, have been +deprecated. + Arm ``virt`` machine ``dtb-kaslr-seed`` property (since 7.1) '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -225,57 +315,63 @@ deprecated; use the new name ``dtb-randomness`` instead. The new name better reflects the way this property affects all random data within the device tree blob, not just the ``kaslr-seed`` node. -``pc-i440fx-2.4`` up to ``pc-i440fx-2.12`` (since 9.1) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +Arm ``ast2700a0-evb`` machine (since 10.1) +'''''''''''''''''''''''''''''''''''''''''' + +The ``ast2700a0-evb`` machine represents the first revision of the AST2700 +and serves as the initial engineering sample rather than a production version. +A newer revision, A1, is now supported, and the ``ast2700a1-evb`` should +replace the older A0 version. -These old machine types are quite neglected nowadays and thus might have -various pitfalls with regards to live migration. Use a newer machine type +Mips ``mipssim`` machine (since 10.0) +''''''''''''''''''''''''''''''''''''' + +Linux dropped support for this virtual machine type in kernel v3.7, and +there does not seem to be anybody around who is still using this board +in QEMU: Most former MIPS-related people are working on other architectures +in their everyday job nowadays, and we are also not aware of anybody still +using old binaries with this board (i.e. there is also no binary available +online to check that this board did not completely bitrot yet). It is +recommended to use another MIPS machine for future MIPS code development instead. -``shix`` (since 9.0) -'''''''''''''''''''' +RISC-V default machine option (since 10.0) +'''''''''''''''''''''''''''''''''''''''''' -The machine is no longer in existence and has been long unmaintained -in QEMU. This also holds for the TC51828 16MiB flash that it uses. +RISC-V defines ``spike`` as the default machine if no machine option is +given in the command line. This happens because ``spike`` is the first +RISC-V machine implemented in QEMU and setting it as default was +convenient at that time. Now we have 7 riscv64 and 6 riscv32 machines +and having ``spike`` as a default is no longer justified. This default +will also promote situations where users think they're running ``virt`` +(the most used RISC-V machine type in 10.0) when in fact they're +running ``spike``. -``pseries-2.1`` up to ``pseries-2.12`` (since 9.0) -'''''''''''''''''''''''''''''''''''''''''''''''''' +Removing the default machine option forces users to always set the machine +they want to use and avoids confusion. Existing users of the ``spike`` +machine must ensure that they're setting the ``spike`` machine in the +command line (``-M spike``). + +Arm ``highbank`` and ``midway`` machines (since 10.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''' + +There are no known users left for these machines (if you still use it, +please write a mail to the qemu-devel mailing list). If you just want to +boot a Cortex-A15 or Cortex-A9 Linux, use the ``virt`` machine instead. + + +System emulator binaries +------------------------ + +``qemu-system-microblazeel`` (since 10.1) +''''''''''''''''''''''''''''''''''''''''' + +The ``qemu-system-microblaze`` binary can emulate little-endian machines +now, too, so the separate binary ``qemu-system-microblazeel`` (with the +``el`` suffix) for little-endian targets is not required anymore. The +``petalogix-s3adsp1800`` machine can now be switched to little endian by +setting its ``endianness`` property to ``little``. -Older pseries machines before version 3.0 have undergone many changes -to correct issues, mostly regarding migration compatibility. These are -no longer maintained and removing them will make the code easier to -read and maintain. Use versions 3.0 and above as a replacement. - -Arm machines ``akita``, ``borzoi``, ``cheetah``, ``connex``, ``mainstone``, ``n800``, ``n810``, ``spitz``, ``terrier``, ``tosa``, ``verdex``, ``z2`` (since 9.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -QEMU includes models of some machine types where the QEMU code that -emulates their SoCs is very old and unmaintained. This code is now -blocking our ability to move forward with various changes across -the codebase, and over many years nobody has been interested in -trying to modernise it. We don't expect any of these machines to have -a large number of users, because they're all modelling hardware that -has now passed away into history. We are therefore dropping support -for all machine types using the PXA2xx and OMAP2 SoCs. We are also -dropping the ``cheetah`` OMAP1 board, because we don't have any -test images for it and don't know of anybody who does; the ``sx1`` -and ``sx1-v1`` OMAP1 machines remain supported for now. - -PPC 405 ``ref405ep`` machine (since 9.1) -'''''''''''''''''''''''''''''''''''''''' - -The ``ref405ep`` machine and PPC 405 CPU have no known users, firmware -images are not available, OpenWRT dropped support in 2019, U-Boot in -2017, Linux also is dropping support in 2024. It is time to let go of -this ancient hardware and focus on newer CPUs and platforms. - -Arm ``tacoma-bmc`` machine (since 9.1) -'''''''''''''''''''''''''''''''''''''''' - -The ``tacoma-bmc`` machine was a board including an AST2600 SoC based -BMC and a witherspoon like OpenPOWER system. It was used for bring up -of the AST2600 SoC in labs. It can be easily replaced by the -``rainier-bmc`` machine which is a real product. Backend options --------------- @@ -324,41 +420,6 @@ the addition of volatile memory support, it is now necessary to distinguish between persistent and volatile memory backends. As such, memdev is deprecated in favor of persistent-memdev. -``-fsdev proxy`` and ``-virtfs proxy`` (since 8.1) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The 9p ``proxy`` filesystem backend driver has been deprecated and will be -removed (along with its proxy helper daemon) in a future version of QEMU. Please -use ``-fsdev local`` or ``-virtfs local`` for using the 9p ``local`` filesystem -backend, or alternatively consider deploying virtiofsd instead. - -The 9p ``proxy`` backend was originally developed as an alternative to the 9p -``local`` backend. The idea was to enhance security by dispatching actual low -level filesystem operations from 9p server (QEMU process) over to a separate -process (the virtfs-proxy-helper binary). However this alternative never gained -momentum. The proxy backend is much slower than the local backend, hasn't seen -any development in years, and showed to be less secure, especially due to the -fact that its helper daemon must be run as root, whereas with the local backend -QEMU is typically run as unprivileged user and allows to tighten behaviour by -mapping permissions et al by using its 'mapped' security model option. - -Nowadays it would make sense to reimplement the ``proxy`` backend by using -QEMU's ``vhost`` feature, which would eliminate the high latency costs under -which the 9p ``proxy`` backend currently suffers. However as of to date nobody -has indicated plans for such kind of reimplementation unfortunately. - -RISC-V 'any' CPU type ``-cpu any`` (since 8.2) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The 'any' CPU type was introduced back in 2018 and has been around since the -initial RISC-V QEMU port. Its usage has always been unclear: users don't know -what to expect from a CPU called 'any', and in fact the CPU does not do anything -special that isn't already done by the default CPUs rv32/rv64. - -After the introduction of the 'max' CPU type, RISC-V now has a good coverage -of generic CPUs: rv32 and rv64 as default CPUs and 'max' as a feature complete -CPU for both 32 and 64 bit builds. Users are then discouraged to use the 'any' -CPU type starting in 8.2. RISC-V CPU properties which start with capital 'Z' (since 8.2) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -422,6 +483,15 @@ Specifying the iSCSI password in plain text on the command line using the used instead, to refer to a ``--object secret...`` instance that provides a password via a file, or encrypted. +``gluster`` backend (since 9.2) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +According to https://marc.info/?l=fedora-devel-list&m=171934833215726 +the GlusterFS development effectively ended. Unless the development +gains momentum again, the QEMU project will remove the gluster backend +in a future release. + + Character device options '''''''''''''''''''''''' @@ -430,16 +500,49 @@ Backend ``memory`` (since 9.0) ``memory`` is a deprecated synonym for ``ringbuf``. -CPU device properties -''''''''''''''''''''' +``reconnect`` (since 9.2) +^^^^^^^^^^^^^^^^^^^^^^^^^ -``pcommit`` on x86 (since 9.1) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The ``reconnect`` option only allows specifying second granularity timeouts, +which is not enough for all types of use cases, use ``reconnect-ms`` instead. -The PCOMMIT instruction was never included in any physical processor. -It was implemented as a no-op instruction in TCG up to QEMU 9.0, but -only with ``-cpu max`` (which does not guarantee migration compatibility -across versions). + +Net device options +'''''''''''''''''' + +Stream ``reconnect`` (since 9.2) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``reconnect`` option only allows specifying second granularity timeouts, +which is not enough for all types of use cases, use ``reconnect-ms`` instead. + +VFIO device options +''''''''''''''''''' + +``-device vfio-calxeda-xgmac`` (since 10.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The vfio-calxeda-xgmac device allows to assign a host Calxeda Highbank +10Gb XGMAC Ethernet controller device ("calxeda,hb-xgmac" compatibility +string) to a guest. Calxeda HW has been ewasted now and there is no point +keeping that device. + +``-device vfio-amd-xgbe`` (since 10.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The vfio-amd-xgbe device allows to assign a host AMD 10GbE controller +to a guest ("amd,xgbe-seattle-v1a" compatibility string). AMD "Seattle" +is not supported anymore and there is no point keeping that device. + +``-device vfio-platform`` (since 10.0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The vfio-platform device allows to assign a host platform device +to a guest in a generic manner. Integrating a new device into +the vfio-platform infrastructure requires some adaptation at +both kernel and qemu level. No such attempt has been done for years +and the conclusion is that vfio-platform has not got any traction. +PCIe passthrough shall be the mainline solution. + +CPU device properties +''''''''''''''''''''' ``pmu-num=n`` on RISC-V CPUs (since 8.2) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -450,6 +553,14 @@ be calculated with ``((2 ^ n) - 1) << 3``. The least significant three bits must be left clear. +``pcommit`` on x86 (since 9.1) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The PCOMMIT instruction was never included in any physical processor. +It was implemented as a no-op instruction in TCG up to QEMU 9.0, but +only with ``-cpu max`` (which does not guarantee migration compatibility +across versions). + Backwards compatibility ----------------------- @@ -503,3 +614,9 @@ usage of providing a file descriptor to a plain file has been deprecated in favor of explicitly using the ``file:`` URI with the file descriptor being passed as an ``fdset``. Refer to the ``add-fd`` command documentation for details on the ``fdset`` usage. + +``zero-blocks`` capability (since 9.2) +'''''''''''''''''''''''''''''''''''''' + +The ``zero-blocks`` capability was part of the block migration which +doesn't exist anymore since it was removed in QEMU v9.1. diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst index c03033e4e95..456d01d5b08 100644 --- a/docs/about/emulation.rst +++ b/docs/about/emulation.rst @@ -26,10 +26,6 @@ depending on the guest architecture. - :ref:`Yes` - No - 8 bit micro controller, often used in maker projects - * - Cris - - Yes - - Yes - - Embedded RISC chip developed by AXIS * - Hexagon - No - Yes @@ -175,11 +171,13 @@ for that architecture. - Unified Hosting Interface (MD01069) * - RISC-V - System and User-mode - - https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc + - https://github.com/riscv-non-isa/riscv-semihosting/blob/main/riscv-semihosting.adoc * - Xtensa - System - Tensilica ISS SIMCALL +.. _tcg-plugins: + TCG Plugins ----------- @@ -207,8 +205,8 @@ Once built a program can be run with multiple plugins loaded each with their own arguments:: $QEMU $OTHER_QEMU_ARGS \ - -plugin contrib/plugin/libhowvec.so,inline=on,count=hint \ - -plugin contrib/plugin/libhotblocks.so + -plugin contrib/plugins/libhowvec.so,inline=on,count=hint \ + -plugin contrib/plugins/libhotblocks.so Arguments are plugin specific and can be used to modify their behaviour. In this case the howvec plugin is being asked to use inline @@ -219,6 +217,14 @@ Linux user-mode emulation also evaluates the environment variable QEMU_PLUGIN="file=contrib/plugins/libhowvec.so,inline=on,count=hint" $QEMU +QEMU plugins avoid to write directly to stdin/stderr, and use the log provided +by the API (see function ``qemu_plugin_outs``). +To show output, you may use this additional parameter:: + + $QEMU $OTHER_QEMU_ARGS \ + -d plugin \ + -plugin contrib/plugins/libhowvec.so,inline=on,count=hint + Example Plugins ~~~~~~~~~~~~~~~ @@ -260,11 +266,40 @@ Behaviour can be tweaked with the following arguments: * - Option - Description * - inline=true|false - - Use faster inline addition of a single counter. Not per-cpu and not - thread safe. + - Use faster inline addition of a single counter. * - idle=true|false - Dump the current execution stats whenever the guest vCPU idles +Basic Block Vectors +................... + +``contrib/plugins/bbv.c`` + +The bbv plugin allows you to generate basic block vectors for use with the +`SimPoint `__ analysis tool. + +.. list-table:: Basic block vectors arguments + :widths: 20 80 + :header-rows: 1 + + * - Option + - Description + * - interval=N + - The interval to generate a basic block vector specified by the number of + instructions (Default: N = 100000000) + * - outfile=PATH + - The path to output files. + It will be suffixed with ``.N.bb`` where ``N`` is a vCPU index. + +Example:: + + $ qemu-aarch64 \ + -plugin contrib/plugins/libbbv.so,interval=100,outfile=sha1 \ + tests/tcg/aarch64-linux-user/sha1 + SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6 + $ du sha1.0.bb + 23128 sha1.0.bb + Instruction ........... @@ -381,6 +416,28 @@ run:: 160 1 0 135 1 0 +Behaviour can be tweaked with the following arguments: + +.. list-table:: Syscall plugin arguments + :widths: 20 80 + :header-rows: 1 + + * - Option + - Description + * - print=true|false + - Print the number of times each syscall is called + * - log_writes=true|false + - Log the buffer of each write syscall in hexdump format + +Test inline operations +...................... + +``tests/plugins/inline.c`` + +This plugin is used for testing all inline operations, conditional callbacks and +scoreboard. It prints a per-cpu summary of all events. + + Hot Blocks .......... @@ -394,9 +451,6 @@ with linux-user execution as system emulation tends to generate re-translations as blocks from different programs get swapped in and out of system memory. -If your program is single-threaded you can use the ``inline`` option for -slightly faster (but not thread safe) counters. - Example:: $ qemu-aarch64 \ @@ -736,10 +790,35 @@ The plugin will log the reason of exit, for example:: 0xd4 reached, exiting +Limit instructions per second +............................. + +This plugin can limit the number of Instructions Per Second that are executed:: + + # get number of instructions + $ num_insn=$(./build/qemu-x86_64 -plugin ./build/tests/plugin/libinsn.so -d plugin /bin/true |& grep total | sed -e 's/.*: //') + # limit speed to execute in 10 seconds + $ time ./build/qemu-x86_64 -plugin ./build/contrib/plugins/libips.so,ips=$(($num_insn/10)) /bin/true + real 10.000s + + +.. list-table:: IPS arguments + :widths: 20 80 + :header-rows: 1 + + * - Option + - Description + * - ips=N + - Maximum number of instructions per cpu that can be executed in one second. + The plugin will sleep when the given number of instructions is reached. + * - ipq=N + - Instructions per quantum. How many instructions before we re-calculate time. + The lower the number the more accurate time will be, but the less efficient the plugin. + Defaults to ips/10 + Other emulation features ------------------------ When running system emulation you can also enable deterministic execution which allows for repeatable record/replay debugging. See :ref:`Record/Replay` for more details. - diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst index fc7b28e6373..d7c2113fc3e 100644 --- a/docs/about/removed-features.rst +++ b/docs/about/removed-features.rst @@ -162,6 +162,12 @@ specified with ``-mem-path`` can actually provide the guest RAM configured with The ``name`` parameter of the ``-net`` option was a synonym for the ``id`` parameter, which should now be used instead. +RISC-V firmware not booted by default (removed in 5.1) +'''''''''''''''''''''''''''''''''''''''''''''''''''''' + +QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default`` +for the RISC-V ``virt`` machine and ``sifive_u`` machine. + ``-numa node,mem=...`` (removed in 5.1) ''''''''''''''''''''''''''''''''''''''' @@ -324,12 +330,6 @@ devices. Drives the board doesn't pick up can no longer be used with This option was undocumented and not used in the field. Use ``-device usb-ccid`` instead. -RISC-V firmware not booted by default (removed in 5.1) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' - -QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default`` -for the RISC-V ``virt`` machine and ``sifive_u`` machine. - ``-no-quit`` (removed in 7.0) ''''''''''''''''''''''''''''' @@ -355,13 +355,13 @@ The ``-writeconfig`` option was not able to serialize the entire contents of the QEMU command line. It is thus considered a failed experiment and removed without a replacement. -``loaded`` property of ``secret`` and ``secret_keyring`` objects (removed in 7.1) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``loaded`` property of secret and TLS credential objects (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The ``loaded=on`` option in the command line or QMP ``object-add`` either had no effect (if ``loaded`` was the last option) or caused options to be effectively ignored as if they were not given. The property is therefore -useless and should simply be removed. +useless and has been removed. ``opened`` property of ``rng-*`` objects (removed in 7.1) ''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -403,13 +403,13 @@ Sound card devices should be created using ``-device`` or ``-audio``. The exception is ``pcspk`` which can be activated using ``-machine pcspk-audiodev=``. -``-watchdog`` (since 7.2) -''''''''''''''''''''''''' +``-watchdog`` (removed in 7.2) +'''''''''''''''''''''''''''''' Use ``-device`` instead. -Hexadecimal sizes with scaling multipliers (since 8.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +Hexadecimal sizes with scaling multipliers (removed in 8.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Input parameters that take a size value should only use a size suffix (such as 'k' or 'M') when the base is written in decimal, and not when @@ -510,13 +510,56 @@ than zero. Removed along with the ``compression`` migration capability. -``-device virtio-blk,scsi=on|off`` (since 9.1) -'''''''''''''''''''''''''''''''''''''''''''''' +``-device virtio-blk,scsi=on|off`` (removed in 9.1) +''''''''''''''''''''''''''''''''''''''''''''''''''' The virtio-blk SCSI passthrough feature is a legacy VIRTIO feature. VIRTIO 1.0 and later do not support it because the virtio-scsi device was introduced for full SCSI support. Use virtio-scsi instead when SCSI passthrough is required. +``-fsdev proxy`` and ``-virtfs proxy`` (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The 9p ``proxy`` filesystem backend driver was originally developed to +enhance security by dispatching low level filesystem operations from 9p +server (QEMU process) over to a separate process (the virtfs-proxy-helper +binary). However the proxy backend was much slower than the local backend, +didn't see any development in years, and showed to be less secure, +especially due to the fact that its helper daemon must be run as root. + +Use ``local``, possibly mapping permissions et al by using its 'mapped' +security model option, or switch to ``virtiofs``. The virtiofs daemon +``virtiofsd`` uses vhost to eliminate the high latency costs of the 9p +``proxy`` backend. + +``-portrait`` and ``-rotate`` (removed in 9.2) +'''''''''''''''''''''''''''''''''''''''''''''' + +The ``-portrait`` and ``-rotate`` options were documented as only +working with the PXA LCD device, and all the machine types using +that display device were removed in 9.2, so these options also +have been dropped. + +These options were intended to simulate a mobile device being +rotated by the user, and had three effects: + +* the display output was rotated by 90, 180 or 270 degrees +* the mouse/trackpad input was rotated the opposite way +* the machine model would signal to the guest about its + orientation + +Of these three things, the input-rotation was coded without being +restricted to boards which supported the full set of device-rotation +handling, so in theory the options were usable on other machine models +to produce an odd effect (rotating input but not display output). But +this was never intended or documented behaviour, so we have dropped +the options along with the machine models they were intended for. + +``-runas`` (removed in 10.0) +'''''''''''''''''''''''''''' + +Use ``-run-with user=..`` instead. + User-mode emulator command line arguments ----------------------------------------- @@ -679,6 +722,15 @@ Use ``multifd-channels`` instead. Use ``multifd-compression`` instead. +Incorrectly typed ``device_add`` arguments (since 9.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Due to shortcomings in the internal implementation of ``device_add``, +QEMU used to incorrectly accept certain invalid arguments. Any object +or list arguments were silently ignored. Other argument types were not +checked, but an implicit conversion happened, so that e.g. string +values could be assigned to integer device properties or vice versa. + QEMU Machine Protocol (QMP) events ---------------------------------- @@ -815,6 +867,15 @@ QEMU. Since all recent x86 hardware from the past >10 years is capable of the 64-bit x86 extensions, a corresponding 64-bit OS should be used instead. +32-bit hosts for 64-bit guests (removed in 10.0) +'''''''''''''''''''''''''''''''''''''''''''''''' + +In general, 32-bit hosts cannot support the memory space or atomicity +requirements of 64-bit guests. Prior to 10.0, QEMU attempted to +work around the atomicity issues in system mode by running all vCPUs +in a single thread context; in user mode atomicity was simply broken. +From 10.0, QEMU has disabled configuration of 64-bit guests on 32-bit hosts. + Guest Emulator ISAs ------------------- @@ -889,6 +950,21 @@ Nios II CPU (removed in 9.1) QEMU Nios II architecture was orphan; Intel has EOL'ed the Nios II processor IP (see `Intel discontinuance notification`_). +CRIS CPU architecture (removed in 9.2) +'''''''''''''''''''''''''''''''''''''' + +The CRIS architecture was pulled from Linux in 4.17 and the compiler +was no longer packaged in any distro making it harder to run the +``check-tcg`` tests. + +RISC-V 'any' CPU type ``-cpu any`` (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''' + +The 'any' CPU type was introduced back in 2018 and was around since the +initial RISC-V QEMU port. Its usage was always been unclear: users don't know +what to expect from a CPU called 'any', and in fact the CPU does not do anything +special that isn't already done by the default CPUs rv32/rv64. + System accelerators ------------------- @@ -899,21 +975,28 @@ Userspace local APIC with KVM (x86, removed in 8.0) a local APIC. The ``split`` setting is supported, as is using ``-M kernel-irqchip=off`` when the CPU does not have a local APIC. -HAXM (``-accel hax``) (removed in 8.2) -'''''''''''''''''''''''''''''''''''''' - -The HAXM project has been retired (see https://github.com/intel/haxm#status). -Use "whpx" (on Windows) or "hvf" (on macOS) instead. - MIPS "Trap-and-Emulate" KVM support (removed in 8.0) '''''''''''''''''''''''''''''''''''''''''''''''''''' The MIPS "Trap-and-Emulate" KVM host and guest support was removed from Linux in 2021, and is not supported anymore by QEMU either. +HAXM (``-accel hax``) (removed in 8.2) +'''''''''''''''''''''''''''''''''''''' + +The HAXM project has been retired (see https://github.com/intel/haxm#status). +Use "whpx" (on Windows) or "hvf" (on macOS) instead. + System emulator machines ------------------------ +Versioned machine types (aarch64, arm, i386, m68k, ppc64, s390x, x86_64) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +In accordance with our versioned machine type deprecation policy, all machine +types with version |VER_MACHINE_DELETION_VERSION|, or older, have been +removed. + ``s390-virtio`` (removed in 2.6) '''''''''''''''''''''''''''''''' @@ -948,12 +1031,6 @@ mips ``fulong2e`` machine alias (removed in 6.0) This machine has been renamed ``fuloong2e``. -``pc-0.10`` up to ``pc-i440fx-2.3`` (removed in 4.0 up to 9.0) -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -These machine types were very old and likely could not be used for live -migration from old QEMU versions anymore. Use a newer machine type instead. - Raspberry Pi ``raspi2`` and ``raspi3`` machines (removed in 6.2) '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -978,6 +1055,51 @@ Nios II ``10m50-ghrd`` and ``nios2-generic-nommu`` machines (removed in 9.1) The Nios II architecture was orphan. +``shix`` (removed in 9.2) +''''''''''''''''''''''''' + +The machine was unmaintained. + +Arm machines ``akita``, ``borzoi``, ``cheetah``, ``connex``, ``mainstone``, ``n800``, ``n810``, ``spitz``, ``terrier``, ``tosa``, ``verdex``, ``z2`` (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +QEMU included models of some machine types where the QEMU code that +emulates their SoCs was very old and unmaintained. This code was +blocking our ability to move forward with various changes across +the codebase, and over many years nobody has been interested in +trying to modernise it. We don't expect any of these machines to have +a large number of users, because they're all modelling hardware that +has now passed away into history. We are therefore dropping support +for all machine types using the PXA2xx and OMAP2 SoCs. We are also +dropping the ``cheetah`` OMAP1 board, because we don't have any +test images for it and don't know of anybody who does. + +Aspeed ``tacoma-bmc`` machine (removed in 10.0) +''''''''''''''''''''''''''''''''''''''''''''''' + +The ``tacoma-bmc`` machine was removed because it didn't bring much +compared to the ``rainier-bmc`` machine. Also, the ``tacoma-bmc`` was +a board used for bring up of the AST2600 SoC that never left the +labs. It can be easily replaced by the ``rainier-bmc`` machine, which +was the actual final product, or by the ``ast2600-evb`` with some +tweaks. + +ppc ``ref405ep`` machine (removed in 10.0) +'''''''''''''''''''''''''''''''''''''''''' + +This machine was removed because PPC 405 CPU have no known users, +firmware images are not available, OpenWRT dropped support in 2019, +U-Boot in 2017, and Linux in 2024. + +Big-Endian variants of ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines (removed in 10.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Both the MicroBlaze ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines +were added for little endian CPUs. Big endian support was never tested +and likely never worked. Starting with QEMU v10.1, the machines are now +only available as little-endian machines. + + linux-user mode CPUs -------------------- @@ -1006,8 +1128,8 @@ processor IP (see `Intel discontinuance notification`_). TCG introspection features -------------------------- -TCG trace-events (since 6.2) -'''''''''''''''''''''''''''' +TCG trace-events (removed in 7.0) +''''''''''''''''''''''''''''''''' The ability to add new TCG trace points had bit rotted and as the feature can be replicated with TCG plugins it was removed. If diff --git a/docs/conf.py b/docs/conf.py index 876f6768815..f892a6e1da3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -60,7 +60,14 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc'] +extensions = [ + 'depfile', + 'hxtool', + 'kerneldoc', + 'qapi_domain', + 'qapidoc', + 'qmp_lexer', +] if sphinx.version_info[:3] > (4, 0, 0): tags.add('sphinx4') @@ -87,7 +94,7 @@ # General information about the project. project = u'QEMU' -copyright = u'2024, The QEMU Project Developers' +copyright = u'2025, The QEMU Project Developers' author = u'The QEMU Project Developers' # The version info for the project you're documenting, acts as replacement for @@ -110,6 +117,32 @@ else: version = release = "unknown version" +bits = version.split(".") + +major = int(bits[0]) +minor = int(bits[1]) +micro = int(bits[2]) + +# Check for a dev snapshot, so we can adjust to next +# predicted release version. +# +# This assumes we do 3 releases per year, so must bump +# major if minor == 2 +if micro >= 50: + micro = 0 + if minor == 2: + major += 1 + minor = 0 + else: + minor += 1 + +# These thresholds must match the constants +# MACHINE_VER_DELETION_MAJOR & MACHINE_VER_DEPRECATION_MAJOR +# defined in include/hw/boards.h and the introductory text in +# docs/about/deprecated.rst +ver_machine_deprecation_version = "%d.%d.0" % (major - 3, minor) +ver_machine_deletion_version = "%d.%d.0" % (major - 6, minor) + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # @@ -138,7 +171,18 @@ # environment variable is not set is for the benefit of readthedocs # style document building; our Makefile always sets the variable. confdir = os.getenv('CONFDIR', "/etc/qemu") -rst_epilog = ".. |CONFDIR| replace:: ``" + confdir + "``\n" + +vars = { + "CONFDIR": confdir, + "VER_MACHINE_DEPRECATION_VERSION": ver_machine_deprecation_version, + "VER_MACHINE_DELETION_VERSION": ver_machine_deletion_version, +} + +rst_epilog = "".join([ + ".. |" + key + "| replace:: ``" + vars[key] + "``\n" + for key in vars.keys() +]) + # We slurp in the defs.rst.inc and literally include it into rst_epilog, # because Sphinx's include:: directive doesn't work with absolute paths # and there isn't any one single relative path that will work for all @@ -146,6 +190,22 @@ with open(os.path.join(qemu_docdir, 'defs.rst.inc')) as f: rst_epilog += f.read() + +# Normally, the QAPI domain is picky about what field lists you use to +# describe a QAPI entity. If you'd like to use arbitrary additional +# fields in source documentation, add them here. +qapi_allowed_fields = { + "see also", +} + +# Due to a limitation in Sphinx, we need to know which indices to +# generate in advance. Adding a namespace here allows that generation. +qapi_namespaces = { + "QGA", + "QMP", + "QSD", +} + # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -186,7 +246,7 @@ ] html_context = { - "display_gitlab": True, + "source_url_prefix": "https://gitlab.com/qemu-project/qemu/-/blob/master/docs/", "gitlab_user": "qemu-project", "gitlab_repo": "qemu", "gitlab_version": "master", @@ -275,9 +335,6 @@ ('tools/qemu-trace-stap', 'qemu-trace-stap', 'QEMU SystemTap trace tool', [], 1), - ('tools/virtfs-proxy-helper', 'virtfs-proxy-helper', - 'QEMU 9p virtfs proxy filesystem helper', - ['M. Mohan Kumar'], 1), ] man_make_section_directory = False diff --git a/docs/devel/acpi-bits.rst b/docs/devel/acpi-bits.rst deleted file mode 100644 index 1ec394f5fb3..00000000000 --- a/docs/devel/acpi-bits.rst +++ /dev/null @@ -1,167 +0,0 @@ -============================================================================= -ACPI/SMBIOS avocado tests using biosbits -============================================================================= -************ -Introduction -************ -Biosbits is a software written by Josh Triplett that can be downloaded -from https://biosbits.org/. The github codebase can be found -`here `__. It is a software that -executes the bios components such as acpi and smbios tables directly through -acpica bios interpreter (a freely available C based library written by Intel, -downloadable from https://acpica.org/ and is included with biosbits) without an -operating system getting involved in between. Bios-bits has python integration -with grub so actual routines that executes bios components can be written in -python instead of bash-ish (grub's native scripting language). -There are several advantages to directly testing the bios in a real physical -machine or in a VM as opposed to indirectly discovering bios issues through the -operating system (the OS). Operating systems tend to bypass bios problems and -hide them from the end user. We have more control of what we wanted to test and -how by being as close to the bios on a running system as possible without a -complicated software component such as an operating system coming in between. -Another issue is that we cannot exercise bios components such as ACPI and -SMBIOS without being in the highest hardware privilege level, ring 0 for -example in case of x86. Since the OS executes from ring 0 whereas normal user -land software resides in unprivileged ring 3, operating system must be modified -in order to write our test routines that exercise and test the bios. This is -not possible in all cases. Lastly, test frameworks and routines are preferably -written using a high level scripting language such as python. OSes and -OS modules are generally written using low level languages such as C and -low level assembly machine language. Writing test routines in a low level -language makes things more cumbersome. These and other reasons makes using -bios-bits very attractive for testing bioses. More details on the inspiration -for developing biosbits and its real life uses can be found in [#a]_ and [#b]_. - -For QEMU, we maintain a fork of bios bits in gitlab along with all the -dependent submodules `here `__. -This fork contains numerous fixes, a newer acpica and changes specific to -running this avocado QEMU tests using bits. The author of this document -is the sole maintainer of the QEMU fork of bios bits repository. For more -information, please see author's `FOSDEM talk on this bios-bits based test -framework `__. - -********************************* -Description of the test framework -********************************* - -Under the directory ``tests/avocado/``, ``acpi-bits.py`` is a QEMU avocado -test that drives all this. - -A brief description of the various test files follows. - -Under ``tests/avocado/`` as the root we have: - -:: - - ├── acpi-bits - │ ├── bits-config - │ │ └── bits-cfg.txt - │ ├── bits-tests - │ ├── smbios.py2 - │ ├── testacpi.py2 - │ └── testcpuid.py2 - ├── acpi-bits.py - -* ``tests/avocado``: - - ``acpi-bits.py``: - This is the main python avocado test script that generates a - biosbits iso. It then spawns a QEMU VM with it, collects the log and reports - test failures. This is the script one would be interested in if they wanted - to add or change some component of the log parsing, add a new command line - to alter how QEMU is spawned etc. Test writers typically would not need to - modify this script unless they wanted to enhance or change the log parsing - for their tests. In order to enable debugging, you can set **V=1** - environment variable. This enables verbose mode for the test and also dumps - the entire log from bios bits and more information in case failure happens. - You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable - verbose logs and also retain the temporary work directory the test used for - you to inspect and run the specific commands manually. - - In order to run this test, please perform the following steps from the QEMU - build directory: - :: - - $ make check-venv (needed only the first time to create the venv) - $ ./pyvenv/bin/avocado run -t acpi tests/avocado - - The above will run all acpi avocado tests including this one. - In order to run the individual tests, perform the following: - :: - - $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py --tap - - - The above will produce output in tap format. You can omit "--tap -" in the - end and it will produce output like the following: - :: - - $ ./pyvenv/bin/avocado run tests/avocado/acpi-bits.py - Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits - JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef - JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log - (1/1) tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits: PASS (33.09 s) - RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0 - JOB TIME : 39.22 s - - You can inspect the log file for more information about the run or in order - to diagnoze issues. If you pass V=1 in the environment, more diagnostic logs - would be found in the test log. - -* ``tests/avocado/acpi-bits/bits-config``: - - This location contains biosbits configuration files that determine how the - software runs the tests. - - ``bits-config.txt``: - This is the biosbits config file that determines what tests - or actions are performed by bits. The description of the config options are - provided in the file itself. - -* ``tests/avocado/acpi-bits/bits-tests``: - - This directory contains biosbits python based tests that are run from within - the biosbits environment in the spawned VM. New additions of test cases can - be made in the appropriate test file. For example, new acpi tests can go - into testacpi.py2 and one would call testsuite.add_test() to register the new - test so that it gets executed as a part of the ACPI tests. - It might be occasionally necessary to disable some subtests or add a new - test that belongs to a test suite not already present in this directory. To - do this, please clone the bits source from - https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits. - Note that this is the "qemu-bits" branch and not the "bits" branch of the - repository. "qemu-bits" is the branch where we have made all the QEMU - specific enhancements and we must use the source from this branch only. - Copy the test suite/script that needs modification (addition of new tests - or disabling them) from python directory into this directory. For - example, in order to change cpuid related tests, copy the following - file into this directory and rename it with .py2 extension: - https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py - Then make your additions and changes here. Therefore, the steps are: - - (a) Copy unmodified test script to this directory from bits source. - (b) Add a SPDX license header. - (c) Perform modifications to the test. - - Commits (a), (b) and (c) preferably should go under separate commits so that - the original test script and the changes we have made are separated and - clear. (a) and (b) can sometimes be combined into a single step. - - The test framework will then use your modified test script to run the test. - No further changes would be needed. Please check the logs to make sure that - appropriate changes have taken effect. - - The tests have an extension .py2 in order to indicate that: - - (a) They are python2.7 based scripts and not python 3 scripts. - (b) They are run from within the bios bits VM and is not subjected to QEMU - build/test python script maintenance and dependency resolutions. - (c) They need not be loaded by avocado framework when running tests. - - -Author: Ani Sinha - -References: ------------ -.. [#a] https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf -.. [#b] https://www.youtube.com/watch?v=36QIepyUuhg -.. [#c] https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/ diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst index b77c6e13e18..95c7b77c01e 100644 --- a/docs/devel/atomics.rst +++ b/docs/devel/atomics.rst @@ -204,7 +204,7 @@ They come in six kinds: before the second with respect to the other components of the system. Therefore, unlike ``smp_rmb()`` or ``qatomic_load_acquire()``, ``smp_read_barrier_depends()`` can be just a compiler barrier on - weakly-ordered architectures such as Arm or PPC[#]_. + weakly-ordered architectures such as Arm or PPC\ [#alpha]_. Note that the first load really has to have a _data_ dependency and not a control dependency. If the address for the second load is dependent @@ -212,7 +212,7 @@ They come in six kinds: than actually loading the address itself, then it's a _control_ dependency and a full read barrier or better is required. -.. [#] The DEC Alpha is an exception, because ``smp_read_barrier_depends()`` +.. [#alpha] The DEC Alpha is an exception, because ``smp_read_barrier_depends()`` needs a processor barrier. On strongly-ordered architectures such as x86 or s390, ``smp_rmb()`` and ``qatomic_load_acquire()`` can also be compiler barriers only. @@ -295,7 +295,7 @@ Acquire/release pairing and the *synchronizes-with* relation ------------------------------------------------------------ Atomic operations other than ``qatomic_set()`` and ``qatomic_read()`` have -either *acquire* or *release* semantics [#rmw]_. This has two effects: +either *acquire* or *release* semantics\ [#rmw]_. This has two effects: .. [#rmw] Read-modify-write operations can have both---acquire applies to the read part, and release to the write. diff --git a/docs/devel/blkdebug.txt b/docs/devel/blkdebug.txt deleted file mode 100644 index 0b0c128d356..00000000000 --- a/docs/devel/blkdebug.txt +++ /dev/null @@ -1,162 +0,0 @@ -Block I/O error injection using blkdebug ----------------------------------------- -Copyright (C) 2014-2015 Red Hat Inc - -This work is licensed under the terms of the GNU GPL, version 2 or later. See -the COPYING file in the top-level directory. - -The blkdebug block driver is a rule-based error injection engine. It can be -used to exercise error code paths in block drivers including ENOSPC (out of -space) and EIO. - -This document gives an overview of the features available in blkdebug. - -Background ----------- -Block drivers have many error code paths that handle I/O errors. Image formats -are especially complex since metadata I/O errors during cluster allocation or -while updating tables happen halfway through request processing and require -discipline to keep image files consistent. - -Error injection allows test cases to trigger I/O errors at specific points. -This way, all error paths can be tested to make sure they are correct. - -Rules ------ -The blkdebug block driver takes a list of "rules" that tell the error injection -engine when to fail an I/O request. - -Each I/O request is evaluated against the rules. If a rule matches the request -then its "action" is executed. - -Rules can be placed in a configuration file; the configuration file -follows the same .ini-like format used by QEMU's -readconfig option, and -each section of the file represents a rule. - -The following configuration file defines a single rule: - - $ cat blkdebug.conf - [inject-error] - event = "read_aio" - errno = "28" - -This rule fails all aio read requests with ENOSPC (28). Note that the errno -value depends on the host. On Linux, see -/usr/include/asm-generic/errno-base.h for errno values. - -Invoke QEMU as follows: - - $ qemu-system-x86_64 - -drive if=none,cache=none,file=blkdebug:blkdebug.conf:test.img,id=drive0 \ - -device virtio-blk-pci,drive=drive0,id=virtio-blk-pci0 - -Rules support the following attributes: - - event - which type of operation to match (e.g. read_aio, write_aio, - flush_to_os, flush_to_disk). See the "Events" section for - information on events. - - state - (optional) the engine must be in this state number in order for this - rule to match. See the "State transitions" section for information - on states. - - errno - the numeric errno value to return when a request matches this rule. - The errno values depend on the host since the numeric values are not - standardized in the POSIX specification. - - sector - (optional) a sector number that the request must overlap in order to - match this rule - - once - (optional, default "off") only execute this action on the first - matching request - - immediately - (optional, default "off") return a NULL BlockAIOCB - pointer and fail without an errno instead. This - exercises the code path where BlockAIOCB fails and the - caller's BlockCompletionFunc is not invoked. - -Events ------- -Block drivers provide information about the type of I/O request they are about -to make so rules can match specific types of requests. For example, the qcow2 -block driver tells blkdebug when it accesses the L1 table so rules can match -only L1 table accesses and not other metadata or guest data requests. - -The core events are: - - read_aio - guest data read - - write_aio - guest data write - - flush_to_os - write out unwritten block driver state (e.g. cached metadata) - - flush_to_disk - flush the host block device's disk cache - -See qapi/block-core.json:BlkdebugEvent for the full list of events. -You may need to grep block driver source code to understand the -meaning of specific events. - -State transitions ------------------ -There are cases where more power is needed to match a particular I/O request in -a longer sequence of requests. For example: - - write_aio - flush_to_disk - write_aio - -How do we match the 2nd write_aio but not the first? This is where state -transitions come in. - -The error injection engine has an integer called the "state" that always starts -initialized to 1. The state integer is internal to blkdebug and cannot be -observed from outside but rules can interact with it for powerful matching -behavior. - -Rules can be conditional on the current state and they can transition to a new -state. - -When a rule's "state" attribute is non-zero then the current state must equal -the attribute in order for the rule to match. - -For example, to match the 2nd write_aio: - - [set-state] - event = "write_aio" - state = "1" - new_state = "2" - - [inject-error] - event = "write_aio" - state = "2" - errno = "5" - -The first write_aio request matches the set-state rule and transitions from -state 1 to state 2. Once state 2 has been entered, the set-state rule no -longer matches since it requires state 1. But the inject-error rule now -matches the next write_aio request and injects EIO (5). - -State transition rules support the following attributes: - - event - which type of operation to match (e.g. read_aio, write_aio, - flush_to_os, flush_to_disk). See the "Events" section for - information on events. - - state - (optional) the engine must be in this state number in order for this - rule to match - - new_state - transition to this state number - -Suspend and resume ------------------- -Exercising code paths in block drivers may require specific ordering amongst -concurrent requests. The "breakpoint" feature allows requests to be halted on -a blkdebug event and resumed later. This makes it possible to achieve -deterministic ordering when multiple requests are in flight. - -Breakpoints on blkdebug events are associated with a user-defined "tag" string. -This tag serves as an identifier by which the request can be resumed at a later -point. - -See the qemu-io(1) break, resume, remove_break, and wait_break commands for -details. diff --git a/docs/devel/blkverify.txt b/docs/devel/blkverify.txt deleted file mode 100644 index aca826c51cc..00000000000 --- a/docs/devel/blkverify.txt +++ /dev/null @@ -1,69 +0,0 @@ -= Block driver correctness testing with blkverify = - -== Introduction == - -This document describes how to use the blkverify protocol to test that a block -driver is operating correctly. - -It is difficult to test and debug block drivers against real guests. Often -processes inside the guest will crash because corrupt sectors were read as part -of the executable. Other times obscure errors are raised by a program inside -the guest. These issues are extremely hard to trace back to bugs in the block -driver. - -Blkverify solves this problem by catching data corruption inside QEMU the first -time bad data is read and reporting the disk sector that is corrupted. - -== How it works == - -The blkverify protocol has two child block devices, the "test" device and the -"raw" device. Read/write operations are mirrored to both devices so their -state should always be in sync. - -The "raw" device is a raw image, a flat file, that has identical starting -contents to the "test" image. The idea is that the "raw" device will handle -read/write operations correctly and not corrupt data. It can be used as a -reference for comparison against the "test" device. - -After a mirrored read operation completes, blkverify will compare the data and -raise an error if it is not identical. This makes it possible to catch the -first instance where corrupt data is read. - -== Example == - -Imagine raw.img has 0xcd repeated throughout its first sector: - - $ ./qemu-io -c 'read -v 0 512' raw.img - 00000000: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ - 00000010: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ - [...] - 000001e0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ - 000001f0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ - read 512/512 bytes at offset 0 - 512.000000 bytes, 1 ops; 0.0000 sec (97.656 MiB/sec and 200000.0000 ops/sec) - -And test.img is corrupt, its first sector is zeroed when it shouldn't be: - - $ ./qemu-io -c 'read -v 0 512' test.img - 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ - 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ - [...] - 000001e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ - 000001f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ - read 512/512 bytes at offset 0 - 512.000000 bytes, 1 ops; 0.0000 sec (81.380 MiB/sec and 166666.6667 ops/sec) - -This error is caught by blkverify: - - $ ./qemu-io -c 'read 0 512' blkverify:a.img:b.img - blkverify: read sector_num=0 nb_sectors=4 contents mismatch in sector 0 - -A more realistic scenario is verifying the installation of a guest OS: - - $ ./qemu-img create raw.img 16G - $ ./qemu-img create -f qcow2 test.qcow2 16G - $ ./qemu-system-x86_64 -cdrom debian.iso \ - -drive file=blkverify:raw.img:test.qcow2 - -If the installation is aborted when blkverify detects corruption, use qemu-io -to explore the contents of the disk image at the sector in question. diff --git a/docs/devel/build-environment.rst b/docs/devel/build-environment.rst new file mode 100644 index 00000000000..661f6ea8504 --- /dev/null +++ b/docs/devel/build-environment.rst @@ -0,0 +1,118 @@ + +.. _setup-build-env: + +Setup build environment +======================= + +QEMU uses a lot of dependencies on the host system. glib2 is used everywhere in +the code base, and most of the other dependencies are optional. + +We present here simple instructions to enable native builds on most popular +systems. + +You can find additional instructions on `QEMU wiki `_: + +- `Linux `_ +- `MacOS `_ +- `Windows `_ +- `BSD `_ + +Note: Installing dependencies using your package manager build dependencies may +miss out on deps that have been newly introduced in qemu.git. In more, it misses +deps the distribution has decided to exclude. + +Linux +----- + +Fedora +++++++ + +:: + + sudo dnf update && sudo dnf builddep qemu + +Debian/Ubuntu ++++++++++++++ + +You first need to enable `Sources List `_. +Then, use apt to install dependencies: + +:: + + sudo apt update && sudo apt build-dep qemu + +MacOS +----- + +You first need to install `Homebrew `_. Then, use it to +install dependencies: + +:: + + brew update && brew install $(brew deps --include-build qemu) + +Windows +------- + +You first need to install `MSYS2 `_. +MSYS2 offers `different environments `_. +x86_64 environments are based on GCC, while aarch64 is based on Clang. + +We recommend to use MINGW64 for windows-x86_64 and CLANGARM64 for windows-aarch64 +(only available on windows-aarch64 hosts). + +Then, you can open a windows shell, and enter msys2 env using: + +:: + + c:/msys64/msys2_shell.cmd -defterm -here -no-start -mingw64 + # Replace -ucrt64 by -clangarm64 or -ucrt64 for other environments. + +MSYS2 package manager does not offer a built-in way to install build +dependencies. You can start with this list of packages using pacman: + +Note: Dependencies need to be installed again if you use a different MSYS2 +environment. + +:: + + # update MSYS2 itself, you need to reopen your shell at the end. + pacman -Syu + pacman -S \ + base-devel binutils bison diffutils flex git grep make sed \ + ${MINGW_PACKAGE_PREFIX}-toolchain \ + ${MINGW_PACKAGE_PREFIX}-glib2 \ + ${MINGW_PACKAGE_PREFIX}-gtk3 \ + ${MINGW_PACKAGE_PREFIX}-libnfs \ + ${MINGW_PACKAGE_PREFIX}-libssh \ + ${MINGW_PACKAGE_PREFIX}-ninja \ + ${MINGW_PACKAGE_PREFIX}-pixman \ + ${MINGW_PACKAGE_PREFIX}-pkgconf \ + ${MINGW_PACKAGE_PREFIX}-python \ + ${MINGW_PACKAGE_PREFIX}-SDL2 \ + ${MINGW_PACKAGE_PREFIX}-zstd + +If you want to install all dependencies, it's possible to use recipe used to +build QEMU in MSYS2 itself. + +:: + + pacman -S wget base-devel git + wget https://raw.githubusercontent.com/msys2/MINGW-packages/refs/heads/master/mingw-w64-qemu/PKGBUILD + # Some packages may be missing for your environment, installation will still + # be done though. + makepkg --syncdeps --nobuild PKGBUILD || true + +Build on windows-aarch64 +++++++++++++++++++++++++ + +When trying to cross compile meson for x86_64 using UCRT64 or MINGW64 env, +configure will run into an error because the cpu detected is not correct. + +Meson detects x86_64 processes emulated, so you need to manually set the cpu, +and force a cross compilation (with empty prefix). + +:: + + ./configure --cpu=x86_64 --cross-prefix= + diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 79eceb179de..2c884197a20 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -134,7 +134,7 @@ in how the build process runs Python code. At this stage, ``configure`` also queries the chosen Python interpreter about QEMU's build dependencies. Note that the build process does *not* -look for ``meson``, ``sphinx-build`` or ``avocado`` binaries in the PATH; +look for ``meson`` or ``sphinx-build`` binaries in the PATH; likewise, there are no options such as ``--meson`` or ``--sphinx-build``. This avoids a potential mismatch, where Meson and Sphinx binaries on the PATH might operate in a different Python environment than the one chosen @@ -145,13 +145,13 @@ was installed in the ``site-packages`` directory of another interpreter, or with the wrong ``pip`` program. If a package is available for the chosen interpreter, ``configure`` -prepares a small script that invokes it from the venv itself[#distlib]_. +prepares a small script that invokes it from the venv itself\ [#distlib]_. If not, ``configure`` can also optionally install dependencies in the virtual environment with ``pip``, either from wheels in ``python/wheels`` or by downloading the package with PyPI. Downloading can be disabled with ``--disable-download``; and anyway, it only happens when a ``configure`` option (currently, only ``--enable-docs``) is explicitly enabled but -the dependencies are not present[#pip]_. +the dependencies are not present. .. [#distlib] The scripts are created based on the package's metadata, specifically the ``console_script`` entry points. This is the @@ -164,15 +164,11 @@ the dependencies are not present[#pip]_. because the Python Packaging Authority provides a package ``distlib.scripts`` to perform this task. -.. [#pip] ``pip`` might also be used when running ``make check-avocado`` - if downloading is enabled, to ensure that Avocado is - available. - The required versions of the packages are stored in a configuration file ``pythondeps.toml``. The format is custom to QEMU, but it is documented at the top of the file itself and it should be easy to understand. The requirements should make it possible to use the version that is packaged -that is provided by supported distros. +by QEMU's supported distros. When dependencies are downloaded, instead, ``configure`` uses a "known good" version that is also listed in ``pythondeps.toml``. In this @@ -260,7 +256,7 @@ Target-dependent emulator sourcesets: Each emulator also includes sources for files in the ``hw/`` and ``target/`` subdirectories. The subdirectory used for each emulator comes from the target's definition of ``TARGET_BASE_ARCH`` or (if missing) - ``TARGET_ARCH``, as found in ``default-configs/targets/*.mak``. + ``TARGET_ARCH``, as found in ``configs/targets/*.mak``. Each subdirectory in ``hw/`` adds one sourceset to the ``hw_arch`` dictionary, for example:: @@ -317,8 +313,8 @@ Utility sourcesets: The following files concur in the definition of which files are linked into each emulator: -``default-configs/devices/*.mak`` - The files under ``default-configs/devices/`` control the boards and devices +``configs/devices/*.mak`` + The files under ``configs/devices/`` control the boards and devices that are built into each QEMU system emulation targets. They merely contain a list of config variable definitions such as:: @@ -327,13 +323,13 @@ into each emulator: CONFIG_XLNX_VERSAL=y ``*/Kconfig`` - These files are processed together with ``default-configs/devices/*.mak`` and + These files are processed together with ``configs/devices/*.mak`` and describe the dependencies between various features, subsystems and device models. They are described in :ref:`kconfig` -``default-configs/targets/*.mak`` +``configs/targets/*.mak`` These files mostly define symbols that appear in the ``*-config-target.h`` - file for each emulator [#cfgtarget]_. However, the ``TARGET_ARCH`` + file for each emulator\ [#cfgtarget]_. However, the ``TARGET_ARCH`` and ``TARGET_BASE_ARCH`` will also be used to select the ``hw/`` and ``target/`` subdirectories that are compiled into each target. @@ -497,8 +493,7 @@ number of dynamically created files listed later. ``pyvenv/bin``, and calling ``pip`` to install dependencies. ``tests/Makefile.include`` - Rules for external test harnesses. These include the TCG tests - and the Avocado-based integration tests. + Rules for external test harnesses like the TCG tests. ``tests/docker/Makefile.include`` Rules for Docker tests. Like ``tests/Makefile.include``, this file is diff --git a/docs/devel/ci-definitions.rst.inc b/docs/devel/ci-definitions.rst.inc deleted file mode 100644 index 6d5c6fd9f20..00000000000 --- a/docs/devel/ci-definitions.rst.inc +++ /dev/null @@ -1,121 +0,0 @@ -Definition of terms -=================== - -This section defines the terms used in this document and correlates them with -what is currently used on QEMU. - -Automated tests ---------------- - -An automated test is written on a test framework using its generic test -functions/classes. The test framework can run the tests and report their -success or failure [1]_. - -An automated test has essentially three parts: - -1. The test initialization of the parameters, where the expected parameters, - like inputs and expected results, are set up; -2. The call to the code that should be tested; -3. An assertion, comparing the result from the previous call with the expected - result set during the initialization of the parameters. If the result - matches the expected result, the test has been successful; otherwise, it has - failed. - -Unit testing ------------- - -A unit test is responsible for exercising individual software components as a -unit, like interfaces, data structures, and functionality, uncovering errors -within the boundaries of a component. The verification effort is in the -smallest software unit and focuses on the internal processing logic and data -structures. A test case of unit tests should be designed to uncover errors due -to erroneous computations, incorrect comparisons, or improper control flow [2]_. - -On QEMU, unit testing is represented by the 'check-unit' target from 'make'. - -Functional testing ------------------- - -A functional test focuses on the functional requirement of the software. -Deriving sets of input conditions, the functional tests should fully exercise -all the functional requirements for a program. Functional testing is -complementary to other testing techniques, attempting to find errors like -incorrect or missing functions, interface errors, behavior errors, and -initialization and termination errors [3]_. - -On QEMU, functional testing is represented by the 'check-qtest' target from -'make'. - -System testing --------------- - -System tests ensure all application elements mesh properly while the overall -functionality and performance are achieved [4]_. Some or all system components -are integrated to create a complete system to be tested as a whole. System -testing ensures that components are compatible, interact correctly, and -transfer the right data at the right time across their interfaces. As system -testing focuses on interactions, use case-based testing is a practical approach -to system testing [5]_. Note that, in some cases, system testing may require -interaction with third-party software, like operating system images, databases, -networks, and so on. - -On QEMU, system testing is represented by the 'check-avocado' target from -'make'. - -Flaky tests ------------ - -A flaky test is defined as a test that exhibits both a passing and a failing -result with the same code on different runs. Some usual reasons for an -intermittent/flaky test are async wait, concurrency, and test order dependency -[6]_. - -Gating ------- - -A gate restricts the move of code from one stage to another on a -test/deployment pipeline. The step move is granted with approval. The approval -can be a manual intervention or a set of tests succeeding [7]_. - -On QEMU, the gating process happens during the pull request. The approval is -done by the project leader running its own set of tests. The pull request gets -merged when the tests succeed. - -Continuous Integration (CI) ---------------------------- - -Continuous integration (CI) requires the builds of the entire application and -the execution of a comprehensive set of automated tests every time there is a -need to commit any set of changes [8]_. The automated tests can be composed of -the unit, functional, system, and other tests. - -Keynotes about continuous integration (CI) [9]_: - -1. System tests may depend on external software (operating system images, - firmware, database, network). -2. It may take a long time to build and test. It may be impractical to build - the system being developed several times per day. -3. If the development platform is different from the target platform, it may - not be possible to run system tests in the developer’s private workspace. - There may be differences in hardware, operating system, or installed - software. Therefore, more time is required for testing the system. - -References ----------- - -.. [1] Sommerville, Ian (2016). Software Engineering. p. 233. -.. [2] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, - A Practitioner’s Approach. p. 48, 376, 378, 381. -.. [3] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, - A Practitioner’s Approach. p. 388. -.. [4] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, - A Practitioner’s Approach. Software Engineering, p. 377. -.. [5] Sommerville, Ian (2016). Software Engineering. p. 59, 232, 240. -.. [6] Luo, Qingzhou, et al. An empirical analysis of flaky tests. - Proceedings of the 22nd ACM SIGSOFT International Symposium on - Foundations of Software Engineering. 2014. -.. [7] Humble, Jez & Farley, David (2010). Continuous Delivery: - Reliable Software Releases Through Build, Test, and Deployment, p. 122. -.. [8] Humble, Jez & Farley, David (2010). Continuous Delivery: - Reliable Software Releases Through Build, Test, and Deployment, p. 55. -.. [9] Sommerville, Ian (2016). Software Engineering. p. 743. diff --git a/docs/devel/ci.rst b/docs/devel/ci.rst deleted file mode 100644 index ed88a2010be..00000000000 --- a/docs/devel/ci.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _ci: - -== -CI -== - -Most of QEMU's CI is run on GitLab's infrastructure although a number -of other CI services are used for specialised purposes. The most up to -date information about them and their status can be found on the -`project wiki testing page `_. - -.. include:: ci-definitions.rst.inc -.. include:: ci-jobs.rst.inc -.. include:: ci-runners.rst.inc diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst index 177ee1c90d7..3f744f2be1e 100644 --- a/docs/devel/clocks.rst +++ b/docs/devel/clocks.rst @@ -358,6 +358,12 @@ humans (for instance in debugging), use ``clock_display_freq()``, which returns a prettified string-representation, e.g. "33.3 MHz". The caller must free the string with g_free() after use. +It's also possible to retrieve the clock period from a QTest by +accessing QOM property ``qtest-clock-period`` using a QMP command. +This property is only present when the device is being run under +the ``qtest`` accelerator; it is not available when QEMU is +being run normally. + Calculating expiry deadlines ---------------------------- diff --git a/docs/devel/code-provenance.rst b/docs/devel/code-provenance.rst new file mode 100644 index 00000000000..b5aae2e2532 --- /dev/null +++ b/docs/devel/code-provenance.rst @@ -0,0 +1,338 @@ +.. _code-provenance: + +Code provenance +=============== + +Certifying patch submissions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The QEMU community **mandates** all contributors to certify provenance of +patch submissions they make to the project. To put it another way, +contributors must indicate that they are legally permitted to contribute to +the project. + +Certification is achieved with a low overhead by adding a single line to the +bottom of every git commit:: + + Signed-off-by: YOUR NAME + +The addition of this line asserts that the author of the patch is contributing +in accordance with the clauses specified in the +`Developer's Certificate of Origin `__: + +.. _dco: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +The name used with "Signed-off-by" does not need to be your legal name, nor +birth name, nor appear on any government ID. It is the identity you choose to +be known by in the community, but should not be anonymous, nor misrepresent +whom you are. + +It is generally expected that the name and email addresses used in one of the +``Signed-off-by`` lines, matches that of the git commit ``Author`` field. +It's okay if you subscribe or contribute to the list via more than one +address, but using multiple addresses in one commit just confuses +things. + +If the person sending the mail is not one of the patch authors, they are +nonetheless expected to add their own ``Signed-off-by`` to comply with the +DCO clause (c). + +Multiple authorship +~~~~~~~~~~~~~~~~~~~ + +It is not uncommon for a patch to have contributions from multiple authors. In +this scenario, git commits will usually be expected to have a ``Signed-off-by`` +line for each contributor involved in creation of the patch. Some edge cases: + + * The non-primary author's contributions were so trivial that they can be + considered not subject to copyright. In this case the secondary authors + need not include a ``Signed-off-by``. + + This case most commonly applies where QEMU reviewers give short snippets + of code as suggested fixes to a patch. The reviewers don't need to have + their own ``Signed-off-by`` added unless their code suggestion was + unusually large, but it is common to add ``Suggested-by`` as a credit + for non-trivial code. + + * Both contributors work for the same employer and the employer requires + copyright assignment. + + It can be said that in this case a ``Signed-off-by`` is indicating that + the person has permission to contribute from their employer who is the + copyright holder. It is nonetheless still preferable to include a + ``Signed-off-by`` for each contributor, as in some countries employees are + not able to assign copyright to their employer, and it also covers any + time invested outside working hours. + +When multiple ``Signed-off-by`` tags are present, they should be strictly kept +in order of authorship, from oldest to newest. + +Other commit tags +~~~~~~~~~~~~~~~~~ + +While the ``Signed-off-by`` tag is mandatory, there are a number of other tags +that are commonly used during QEMU development: + + * **``Reviewed-by``**: when a QEMU community member reviews a patch on the + mailing list, if they consider the patch acceptable, they should send an + email reply containing a ``Reviewed-by`` tag. Subsystem maintainers who + review a patch should add this even if they are also adding their + ``Signed-off-by`` to the same commit. + + * **``Acked-by``**: when a QEMU subsystem maintainer approves a patch that + touches their subsystem, but intends to allow a different maintainer to + queue it and send a pull request, they would send a mail containing a + ``Acked-by`` tag. Where a patch touches multiple subsystems, ``Acked-by`` + only implies review of the maintainers' own areas of responsibility. If a + maintainer wants to indicate they have done a full review they should use + a ``Reviewed-by`` tag. + + * **``Tested-by``**: when a QEMU community member has functionally tested the + behaviour of the patch in some manner, they should send an email reply + containing a ``Tested-by`` tag. + + * **``Reported-by``**: when a QEMU community member reports a problem via the + mailing list, or some other informal channel that is not the issue tracker, + it is good practice to credit them by including a ``Reported-by`` tag on + any patch fixing the issue. When the problem is reported via the GitLab + issue tracker, however, it is sufficient to just include a link to the + issue. + + * **``Suggested-by``**: when a reviewer or other 3rd party makes non-trivial + suggestions for how to change a patch, it is good practice to credit them + by including a ``Suggested-by`` tag. + +Subsystem maintainer requirements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When a subsystem maintainer accepts a patch from a contributor, in addition to +the normal code review points, they are expected to validate the presence of +suitable ``Signed-off-by`` tags. + +At the time they queue the patch in their subsystem tree, the maintainer +**must** also then add their own ``Signed-off-by`` to indicate that they have +done the aforementioned validation. This is in addition to any of their own +``Reviewed-by`` tags the subsystem maintainer may wish to include. + +When the maintainer modifies the patch after pulling into their tree, they +should record their contribution. This is typically done via a note in the +commit message, just prior to the maintainer's ``Signed-off-by``:: + + Signed-off-by: Cory Contributor + [Comment rephrased for clarity] + Signed-off-by: Mary Maintainer + + +Tools for adding ``Signed-off-by`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are a variety of ways tools can support adding ``Signed-off-by`` tags +for patches, avoiding the need for contributors to manually type in this +repetitive text each time. + +git commands +^^^^^^^^^^^^ + +When creating, or amending, a commit the ``-s`` flag to ``git commit`` will +append a suitable line matching the configured git author details. + +If preparing patches using the ``git format-patch`` tool, the ``-s`` flag can +be used to append a suitable line in the emails it creates, without modifying +the local commits. Alternatively to modify all the local commits on a branch:: + + git rebase master -x 'git commit --amend --no-edit -s' + +emacs +^^^^^ + +In the file ``$HOME/.emacs.d/abbrev_defs`` add: + +.. code:: elisp + + (define-abbrev-table 'global-abbrev-table + '( + ("8rev" "Reviewed-by: YOUR NAME " nil 1) + ("8ack" "Acked-by: YOUR NAME " nil 1) + ("8test" "Tested-by: YOUR NAME " nil 1) + ("8sob" "Signed-off-by: YOUR NAME " nil 1) + )) + +with this change, if you type (for example) ``8rev`` followed by ```` +or ```` it will expand to the whole phrase. + +vim +^^^ + +In the file ``$HOME/.vimrc`` add:: + + iabbrev 8rev Reviewed-by: YOUR NAME + iabbrev 8ack Acked-by: YOUR NAME + iabbrev 8test Tested-by: YOUR NAME + iabbrev 8sob Signed-off-by: YOUR NAME + +with this change, if you type (for example) ``8rev`` followed by ```` +or ```` it will expand to the whole phrase. + +Re-starting abandoned work +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For a variety of reasons there are some patches that get submitted to QEMU but +never merged. An unrelated contributor may decide (months or years later) to +continue working from the abandoned patch and re-submit it with extra changes. + +The general principles when picking up abandoned work are: + + * Continue to credit the original author for their work, by maintaining their + original ``Signed-off-by`` + * Indicate where the original patch was obtained from (mailing list, bug + tracker, author's git repo, etc) when sending it for review + * Acknowledge the extra work of the new contributor by including their + ``Signed-off-by`` in the patch in addition to the orignal author's + * Indicate who is responsible for what parts of the patch. This is typically + done via a note in the commit message, just prior to the new contributor's + ``Signed-off-by``:: + + Signed-off-by: Some Person + [Rebased and added support for 'foo'] + Signed-off-by: New Person + +In complicated cases, or if otherwise unsure, ask for advice on the project +mailing list. + +It is also recommended to attempt to contact the original author to let them +know you are interested in taking over their work, in case they still intended +to return to the work, or had any suggestions about the best way to continue. + +Inclusion of generated files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Files in patches contributed to QEMU are generally expected to be provided +only in the preferred format for making modifications. The implication of +this is that the output of code generators or compilers is usually not +appropriate to contribute to QEMU. + +For reasons of practicality there are some exceptions to this rule, where +generated code is permitted, provided it is also accompanied by the +corresponding preferred source format. This is done where it is impractical +to expect those building QEMU to run the code generation or compilation +process. A non-exhaustive list of examples is: + + * Images: where an bitmap image is created from a vector file it is common + to include the rendered bitmaps at desired resolution(s), since subtle + changes in the rasterization process / tools may affect quality. The + original vector file is expected to accompany any generated bitmaps. + + * Firmware: QEMU includes pre-compiled binary ROMs for a variety of guest + firmwares. When such binary ROMs are contributed, the corresponding source + must also be provided, either directly, or through a git submodule link. + + * Dockerfiles: the majority of the dockerfiles are automatically generated + from a canonical list of build dependencies maintained in tree, together + with the libvirt-ci git submodule link. The generated dockerfiles are + included in tree because it is desirable to be able to directly build + container images from a clean git checkout. + + * eBPF: QEMU includes some generated eBPF machine code, since the required + eBPF compilation tools are not broadly available on all targetted OS + distributions. The corresponding eBPF C code for the binary is also + provided. This is a time-limited exception until the eBPF toolchain is + sufficiently broadly available in distros. + +In all cases above, the existence of generated files must be acknowledged +and justified in the commit that introduces them. + +Tools which perform changes to existing code with deterministic algorithmic +manipulation, driven by user specified inputs, are not generally considered +to be "generators". + +For instance, using Coccinelle to convert code from one pattern to another +pattern, or fixing documentation typos with a spell checker, or transforming +code using sed / awk / etc, are not considered to be acts of code +generation. Where an automated manipulation is performed on code, however, +this should be declared in the commit message. + +At times contributors may use or create scripts/tools to generate an initial +boilerplate code template which is then filled in to produce the final patch. +The output of such a tool would still be considered the "preferred format", +since it is intended to be a foundation for further human authored changes. +Such tools are acceptable to use, provided there is clearly defined copyright +and licensing for their output. Note in particular the caveats applying to AI +content generators below. + +Use of AI content generators +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TL;DR: + + **Current QEMU project policy is to DECLINE any contributions which are + believed to include or derive from AI generated content. This includes + ChatGPT, Claude, Copilot, Llama and similar tools.** + +The increasing prevalence of AI-assisted software development results in a +number of difficult legal questions and risks for software projects, including +QEMU. Of particular concern is content generated by `Large Language Models +`__ (LLMs). + +The QEMU community requires that contributors certify their patch submissions +are made in accordance with the rules of the `Developer's Certificate of +Origin (DCO) `. + +To satisfy the DCO, the patch contributor has to fully understand the +copyright and license status of content they are contributing to QEMU. With AI +content generators, the copyright and license status of the output is +ill-defined with no generally accepted, settled legal foundation. + +Where the training material is known, it is common for it to include large +volumes of material under restrictive licensing/copyright terms. Even where +the training material is all known to be under open source licenses, it is +likely to be under a variety of terms, not all of which will be compatible +with QEMU's licensing requirements. + +How contributors could comply with DCO terms (b) or (c) for the output of AI +content generators commonly available today is unclear. The QEMU project is +not willing or able to accept the legal risks of non-compliance. + +The QEMU project thus requires that contributors refrain from using AI content +generators on patches intended to be submitted to the project, and will +decline any contribution if use of AI is either known or suspected. + +This policy does not apply to other uses of AI, such as researching APIs or +algorithms, static analysis, or debugging, provided their output is not to be +included in contributions. + +Examples of tools impacted by this policy includes GitHub's CoPilot, OpenAI's +ChatGPT, Anthropic's Claude, and Meta's Code Llama, and code/content +generation agents which are built on top of such tools. + +This policy may evolve as AI tools mature and the legal situation is +clarifed. In the meanwhile, requests for exceptions to this policy will be +evaluated by the QEMU project on a case by case basis. To be granted an +exception, a contributor will need to demonstrate clarity of the license and +copyright status for the tool's output in relation to its training model and +code, to the satisfaction of the project maintainers. diff --git a/docs/devel/codebase.rst b/docs/devel/codebase.rst new file mode 100644 index 00000000000..2a3143787a6 --- /dev/null +++ b/docs/devel/codebase.rst @@ -0,0 +1,215 @@ +======== +Codebase +======== + +This section presents the various parts of QEMU and how the codebase is +organized. + +Beyond giving succinct descriptions, the goal is to offer links to various +parts of the documentation/codebase. + +Subsystems +---------- + +An exhaustive list of subsystems and associated files can be found in the +`MAINTAINERS `_ +file. + +Some of the main QEMU subsystems are: + +- `Accelerators` +- Block devices and `disk images` support +- `CI` and `Tests` +- `Devices` & Board models +- `Documentation ` +- `GDB support` +- :ref:`Migration` +- `Monitor` +- :ref:`QOM (QEMU Object Model)` +- `System mode` +- :ref:`TCG (Tiny Code Generator)` +- `User mode` (`Linux` & `BSD`) +- User Interfaces + +More documentation on QEMU subsystems can be found on :ref:`internal-subsystem` +page. + +The Grand tour +-------------- + +We present briefly here what every folder in the top directory of the codebase +contains. Hop on! + +The folder name links here will take you to that folder in our gitlab +repository. Other links will take you to more detailed documentation for that +subsystem, where we have it. Unfortunately not every subsystem has documentation +yet, so sometimes the source code is all you have. + +* `accel `_: + Infrastructure and architecture agnostic code related to the various + `accelerators ` supported by QEMU + (TCG, KVM, hvf, whpx, xen, nvmm). + Contains interfaces for operations that will be implemented per + `target `_. +* `audio `_: + Audio (host) support. +* `authz `_: + `QEMU Authorization framework`. +* `backends `_: + Various backends that are used to access resources on the host (e.g. for + random number generation, memory backing or cryptographic functions). +* `block `_: + Block devices and `image formats` implementation. +* `bsd-user `_: + `BSD User mode`. +* build: Where the code built goes by default. You can tell the QEMU build + system to put the built code anywhere else you like. +* `chardev `_: + Various backends used by char devices. +* `common-user `_: + User-mode assembly code for dealing with signals occurring during syscalls. +* `configs `_: + Makefiles defining configurations to build QEMU. +* `contrib `_: + Community contributed devices/plugins/tools. +* `crypto `_: + Cryptographic algorithms used in QEMU. +* `disas `_: + Disassembly functions used by QEMU target code. +* `docs `_: + QEMU Documentation. +* `dump `_: + Code to dump memory of a running VM. +* `ebpf `_: + eBPF program support in QEMU. `virtio-net RSS` uses it. +* `fpu `_: + Floating-point software emulation. +* `fsdev `_: + `VirtFS `_ support. +* `gdbstub `_: + `GDB ` support. +* `gdb-xml `_: + Set of XML files describing architectures and used by `gdbstub `. +* `host `_: + Various architecture specific header files (crypto, atomic, memory + operations). +* `linux-headers `_: + A subset of headers imported from Linux kernel and used for implementing + KVM support and user-mode. +* `linux-user `_: + `User mode ` implementation. Contains one folder per target + architecture. +* `.gitlab-ci.d `_: + `CI ` yaml and scripts. +* `include `_: + All headers associated to different subsystems in QEMU. The hierarchy used + mirrors source code organization and naming. +* `hw `_: + `Devices ` and boards emulation. Devices are categorized by + type/protocol/architecture and located in associated subfolder. +* `io `_: + QEMU `I/O channels `_. +* `libdecnumber `_: + Import of gcc library, used to implement decimal number arithmetic. +* `migration `__: + :ref:`Migration framework `. +* `monitor `_: + `Monitor ` implementation (HMP & QMP). +* `nbd `_: + QEMU NBD (Network Block Device) server. +* `net `_: + Network (host) support. +* `pc-bios `_: + Contains pre-built firmware binaries and boot images, ready to use in + QEMU without compilation. +* `plugins `_: + :ref:`TCG plugins ` core implementation. Plugins can be found in + `tests `__ + and `contrib `__ + folders. +* `po `_: + Translation files. +* `python `_: + Python part of our build/test system. +* `qapi `_: + `QAPI ` implementation. +* `qobject `_: + QEMU Object implementation. +* `qga `_: + QEMU `Guest agent ` implementation. +* `qom `_: + QEMU :ref:`Object model ` implementation, with monitor associated commands. +* `replay `_: + QEMU :ref:`Record/replay ` implementation. +* `roms `_: + Contains source code for various firmware and ROMs, which can be compiled if + custom or updated versions are needed. +* `rust `_: + Rust integration in QEMU. It contains the new interfaces defined and + associated devices using it. +* `scripts `_: + Collection of scripts used in build and test systems, and various + tools for QEMU codebase and execution traces. +* `scsi `_: + Code related to SCSI support, used by SCSI devices. +* `semihosting `_: + QEMU `Semihosting ` implementation. +* `stats `_: + `Monitor ` stats commands implementation. +* `storage-daemon `_: + QEMU `Storage daemon ` implementation. +* `stubs `_: + Various stubs (empty functions) used to compile QEMU with specific + configurations. +* `subprojects `_: + QEMU submodules used by QEMU build system. +* `system `_: + QEMU `system mode ` implementation (cpu, mmu, boot support). +* `target `_: + Contains code for all target architectures supported (one subfolder + per arch). For every architecture, you can find accelerator specific + implementations. +* `tcg `_: + :ref:`TCG ` related code. + Contains one subfolder per host supported architecture. +* `tests `_: + QEMU `test ` suite + + - `data `_: + Data for various tests. + - `decode `_: + Testsuite for :ref:`decodetree ` implementation. + - `docker `_: + Code and scripts to create `containers ` used in `CI `. + - `fp `_: + QEMU testsuite for soft float implementation. + - `functional `_: + `Functional tests ` (full VM boot). + - `lcitool `_: + Generate dockerfiles for CI containers. + - `migration `_: + Test scripts and data for :ref:`Migration framework `. + - `multiboot `_: + Test multiboot functionality for x86_64/i386. + - `qapi-schema `_: + Test scripts and data for `QAPI `. + - `qemu-iotests `_: + `Disk image and block tests `. + - `qtest `_: + `Device emulation testing `. + - `tcg `__: + `TCG related tests `. Contains code per architecture + (subfolder) and multiarch tests as well. + - `tsan `_: + `Suppressions ` for thread sanitizer. + - `uefi-test-tools `_: + Test tool for UEFI support. + - `unit `_: + QEMU `Unit tests `. +* `trace `_: + :ref:`Tracing framework `. Used to print information associated to various + events during execution. +* `ui `_: + QEMU User interfaces. +* `util `_: + Utility code used by other parts of QEMU. diff --git a/docs/devel/control-flow-integrity.rst b/docs/devel/control-flow-integrity.rst index e6b73a4fe1a..3d5702fa4cc 100644 --- a/docs/devel/control-flow-integrity.rst +++ b/docs/devel/control-flow-integrity.rst @@ -1,3 +1,5 @@ +.. _cfi: + ============================ Control-Flow Integrity (CFI) ============================ diff --git a/docs/devel/decodetree.rst b/docs/devel/decodetree.rst index e3392aa7057..98ad33a4870 100644 --- a/docs/devel/decodetree.rst +++ b/docs/devel/decodetree.rst @@ -1,3 +1,5 @@ +.. _decodetree: + ======================== Decodetree Specification ======================== diff --git a/docs/devel/ebpf_rss.rst b/docs/devel/ebpf_rss.rst index 4a68682b31a..ed5d33767bd 100644 --- a/docs/devel/ebpf_rss.rst +++ b/docs/devel/ebpf_rss.rst @@ -1,3 +1,5 @@ +.. _ebpf-rss: + =========================== eBPF RSS virtio-net support =========================== diff --git a/docs/devel/index-api.rst b/docs/devel/index-api.rst index fe01b2b488d..1c487c152ab 100644 --- a/docs/devel/index-api.rst +++ b/docs/devel/index-api.rst @@ -9,6 +9,7 @@ generated from in-code annotations to function prototypes. bitops loads-stores + lockcnt memory modules pci diff --git a/docs/devel/index-build.rst b/docs/devel/index-build.rst index 90b406ca0ed..3f3cb21b9b4 100644 --- a/docs/devel/index-build.rst +++ b/docs/devel/index-build.rst @@ -1,20 +1,16 @@ -QEMU Build and Test System --------------------------- +QEMU Build System +----------------- -Details about how QEMU's build system works and how it is integrated -into our testing infrastructure. You will need to understand some of -the basics if you are adding new files and targets to the build. +Details about how QEMU's build system works. You will need to understand +some of the basics if you are adding new files and targets to the build. .. toctree:: :maxdepth: 3 build-system + build-environment kconfig docs - testing - acpi-bits - qtest - ci qapi-code-gen - fuzzing + qapi-domain control-flow-integrity diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst index 4ac7725d728..7a0678cbdd3 100644 --- a/docs/devel/index-internals.rst +++ b/docs/devel/index-internals.rst @@ -1,3 +1,5 @@ +.. _internal-subsystem: + Internal Subsystem Information ------------------------------ @@ -8,6 +10,7 @@ Details about QEMU's various subsystems including how to add features to them. qom atomics + rcu block-coroutine-wrapper clocks ebpf_rss @@ -17,7 +20,9 @@ Details about QEMU's various subsystems including how to add features to them. s390-cpu-topology s390-dasd-ipl tracing + uefi-vars vfio-iommufd writing-monitor-commands virtio-backends crypto + multiple-iothreads diff --git a/docs/devel/index-process.rst b/docs/devel/index-process.rst index 362f97ee300..5807752d704 100644 --- a/docs/devel/index-process.rst +++ b/docs/devel/index-process.rst @@ -13,7 +13,9 @@ Notes about how to interact with the community and how and where to submit patch maintainers style submitting-a-patch + code-provenance trivial-patches stable-process submitting-a-pull-request secure-coding-practices + rust diff --git a/docs/devel/index.rst b/docs/devel/index.rst index abf60457c29..29f032d6a82 100644 --- a/docs/devel/index.rst +++ b/docs/devel/index.rst @@ -31,6 +31,8 @@ the :ref:`tcg_internals`. index-process index-build + testing/index index-api index-internals index-tcg + codebase diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index 52d4b905f67..493b76c4fbf 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -38,7 +38,7 @@ originated in the Linux kernel, though it was heavily simplified and the handling of dependencies is stricter in QEMU. Unlike Linux, there is no user interface to edit the configuration, which -is instead specified in per-target files under the ``default-configs/`` +is instead specified in per-target files under the ``configs/`` directory of the QEMU source tree. This is because, unlike Linux, configuration and dependencies can be treated as a black box when building QEMU; the default configuration that QEMU ships with should be okay in @@ -103,7 +103,7 @@ directives can be included: **default value**: ``default [if ]`` Default values are assigned to the config symbol if no other value was - set by the user via ``default-configs/*.mak`` files, and only if + set by the user via ``configs/*.mak`` files, and only if ``select`` or ``depends on`` directives do not force the value to true or false respectively. ```` can be ``y`` or ``n``; it cannot be an arbitrary Boolean expression. However, a condition for applying @@ -119,7 +119,7 @@ directives can be included: This is similar to ``select`` as it applies a lower limit of ``y`` to another symbol. However, the lower limit is only a default and the "implied" symbol's value may still be set to ``n`` from a - ``default-configs/*.mak`` files. The following two examples are + ``configs/*.mak`` files. The following two examples are equivalent:: config FOO @@ -146,7 +146,7 @@ declares its dependencies in different ways: bool Subsystems always default to false (they have no ``default`` directive) - and are never visible in ``default-configs/*.mak`` files. It's + and are never visible in ``configs/*.mak`` files. It's up to other symbols to ``select`` whatever subsystems they require. They sometimes have ``select`` directives to bring in other required @@ -238,7 +238,7 @@ declares its dependencies in different ways: include libraries (such as ``FDT``) or ``TARGET_BIG_ENDIAN`` (possibly negated). - Boards are listed for convenience in the ``default-configs/*.mak`` + Boards are listed for convenience in the ``configs/*.mak`` for the target they apply to. **internal elements** @@ -251,18 +251,18 @@ declares its dependencies in different ways: Internal elements group code that is useful in several boards or devices. They are usually enabled with ``select`` and in turn select - other elements; they are never visible in ``default-configs/*.mak`` + other elements; they are never visible in ``configs/*.mak`` files, and often not even in the Makefile. Writing and modifying default configurations -------------------------------------------- In addition to the Kconfig files under hw/, each target also includes -a file called ``default-configs/TARGETNAME-softmmu.mak``. These files +a file called ``configs/TARGETNAME-softmmu.mak``. These files initialize some Kconfig variables to non-default values and provide the starting point to turn on devices and subsystems. -A file in ``default-configs/`` looks like the following example:: +A file in ``configs/`` looks like the following example:: # Default configuration for alpha-softmmu diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index ec627aa9c06..9471bac8599 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -95,7 +95,7 @@ guest CPU state in case of a guest CPU exception. This is passed to ``cpu_restore_state()``. Therefore the value should either be 0, to indicate that the guest CPU state is already synchronized, or the result of ``GETPC()`` from the top level ``HELPER(foo)`` -function, which is a return address into the generated code [#gpc]_. +function, which is a return address into the generated code\ [#gpc]_. .. [#gpc] Note that ``GETPC()`` should be used with great care: calling it in other functions that are *not* the top level diff --git a/docs/devel/lockcnt.rst b/docs/devel/lockcnt.rst new file mode 100644 index 00000000000..8b43578f6c7 --- /dev/null +++ b/docs/devel/lockcnt.rst @@ -0,0 +1,278 @@ +Locked Counters (aka ``QemuLockCnt``) +===================================== + +QEMU often uses reference counts to track data structures that are being +accessed and should not be freed. For example, a loop that invoke +callbacks like this is not safe:: + + QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { + if (ioh->revents & G_IO_OUT) { + ioh->fd_write(ioh->opaque); + } + } + +``QLIST_FOREACH_SAFE`` protects against deletion of the current node (``ioh``) +by stashing away its ``next`` pointer. However, ``ioh->fd_write`` could +actually delete the next node from the list. The simplest way to +avoid this is to mark the node as deleted, and remove it from the +list in the above loop:: + + QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { + if (ioh->deleted) { + QLIST_REMOVE(ioh, next); + g_free(ioh); + } else { + if (ioh->revents & G_IO_OUT) { + ioh->fd_write(ioh->opaque); + } + } + } + +If however this loop must also be reentrant, i.e. it is possible that +``ioh->fd_write`` invokes the loop again, some kind of counting is needed:: + + walking_handlers++; + QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { + if (ioh->deleted) { + if (walking_handlers == 1) { + QLIST_REMOVE(ioh, next); + g_free(ioh); + } + } else { + if (ioh->revents & G_IO_OUT) { + ioh->fd_write(ioh->opaque); + } + } + } + walking_handlers--; + +One may think of using the RCU primitives, ``rcu_read_lock()`` and +``rcu_read_unlock()``; effectively, the RCU nesting count would take +the place of the walking_handlers global variable. Indeed, +reference counting and RCU have similar purposes, but their usage in +general is complementary: + +- reference counting is fine-grained and limited to a single data + structure; RCU delays reclamation of *all* RCU-protected data + structures; + +- reference counting works even in the presence of code that keeps + a reference for a long time; RCU critical sections in principle + should be kept short; + +- reference counting is often applied to code that is not thread-safe + but is reentrant; in fact, usage of reference counting in QEMU predates + the introduction of threads by many years. RCU is generally used to + protect readers from other threads freeing memory after concurrent + modifications to a data structure. + +- reclaiming data can be done by a separate thread in the case of RCU; + this can improve performance, but also delay reclamation undesirably. + With reference counting, reclamation is deterministic. + +This file documents ``QemuLockCnt``, an abstraction for using reference +counting in code that has to be both thread-safe and reentrant. + + +``QemuLockCnt`` concepts +------------------------ + +A ``QemuLockCnt`` comprises both a counter and a mutex; it has primitives +to increment and decrement the counter, and to take and release the +mutex. The counter notes how many visits to the data structures are +taking place (the visits could be from different threads, or there could +be multiple reentrant visits from the same thread). The basic rules +governing the counter/mutex pair then are the following: + +- Data protected by the QemuLockCnt must not be freed unless the + counter is zero and the mutex is taken. + +- A new visit cannot be started while the counter is zero and the + mutex is taken. + +Most of the time, the mutex protects all writes to the data structure, +not just frees, though there could be cases where this is not necessary. + +Reads, instead, can be done without taking the mutex, as long as the +readers and writers use the same macros that are used for RCU, for +example ``qatomic_rcu_read``, ``qatomic_rcu_set``, ``QLIST_FOREACH_RCU``, +etc. This is because the reads are done outside a lock and a set +or ``QLIST_INSERT_HEAD`` +can happen concurrently with the read. The RCU API ensures that the +processor and the compiler see all required memory barriers. + +This could be implemented simply by protecting the counter with the +mutex, for example:: + + // (1) + qemu_mutex_lock(&walking_handlers_mutex); + walking_handlers++; + qemu_mutex_unlock(&walking_handlers_mutex); + + ... + + // (2) + qemu_mutex_lock(&walking_handlers_mutex); + if (--walking_handlers == 0) { + QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { + if (ioh->deleted) { + QLIST_REMOVE(ioh, next); + g_free(ioh); + } + } + } + qemu_mutex_unlock(&walking_handlers_mutex); + +Here, no frees can happen in the code represented by the ellipsis. +If another thread is executing critical section (2), that part of +the code cannot be entered, because the thread will not be able +to increment the ``walking_handlers`` variable. And of course +during the visit any other thread will see a nonzero value for +``walking_handlers``, as in the single-threaded code. + +Note that it is possible for multiple concurrent accesses to delay +the cleanup arbitrarily; in other words, for the ``walking_handlers`` +counter to never become zero. For this reason, this technique is +more easily applicable if concurrent access to the structure is rare. + +However, critical sections are easy to forget since you have to do +them for each modification of the counter. ``QemuLockCnt`` ensures that +all modifications of the counter take the lock appropriately, and it +can also be more efficient in two ways: + +- it avoids taking the lock for many operations (for example + incrementing the counter while it is non-zero); + +- on some platforms, one can implement ``QemuLockCnt`` to hold the lock + and the mutex in a single word, making the fast path no more expensive + than simply managing a counter using atomic operations (see + :doc:`atomics`). This can be very helpful if concurrent access to + the data structure is expected to be rare. + + +Using the same mutex for frees and writes can still incur some small +inefficiencies; for example, a visit can never start if the counter is +zero and the mutex is taken -- even if the mutex is taken by a write, +which in principle need not block a visit of the data structure. +However, these are usually not a problem if any of the following +assumptions are valid: + +- concurrent access is possible but rare + +- writes are rare + +- writes are frequent, but this kind of write (e.g. appending to a + list) has a very small critical section. + +For example, QEMU uses ``QemuLockCnt`` to manage an ``AioContext``'s list of +bottom halves and file descriptor handlers. Modifications to the list +of file descriptor handlers are rare. Creation of a new bottom half is +frequent and can happen on a fast path; however: 1) it is almost never +concurrent with a visit to the list of bottom halves; 2) it only has +three instructions in the critical path, two assignments and a ``smp_wmb()``. + + +``QemuLockCnt`` API +------------------- + +.. kernel-doc:: include/qemu/lockcnt.h + + +``QemuLockCnt`` usage +--------------------- + +This section explains the typical usage patterns for ``QemuLockCnt`` functions. + +Setting a variable to a non-NULL value can be done between +``qemu_lockcnt_lock`` and ``qemu_lockcnt_unlock``:: + + qemu_lockcnt_lock(&xyz_lockcnt); + if (!xyz) { + new_xyz = g_new(XYZ, 1); + ... + qatomic_rcu_set(&xyz, new_xyz); + } + qemu_lockcnt_unlock(&xyz_lockcnt); + +Accessing the value can be done between ``qemu_lockcnt_inc`` and +``qemu_lockcnt_dec``:: + + qemu_lockcnt_inc(&xyz_lockcnt); + if (xyz) { + XYZ *p = qatomic_rcu_read(&xyz); + ... + /* Accesses can now be done through "p". */ + } + qemu_lockcnt_dec(&xyz_lockcnt); + +Freeing the object can similarly use ``qemu_lockcnt_lock`` and +``qemu_lockcnt_unlock``, but you also need to ensure that the count +is zero (i.e. there is no concurrent visit). Because ``qemu_lockcnt_inc`` +takes the ``QemuLockCnt``'s lock, the count cannot become non-zero while +the object is being freed. Freeing an object looks like this:: + + qemu_lockcnt_lock(&xyz_lockcnt); + if (!qemu_lockcnt_count(&xyz_lockcnt)) { + g_free(xyz); + xyz = NULL; + } + qemu_lockcnt_unlock(&xyz_lockcnt); + +If an object has to be freed right after a visit, you can combine +the decrement, the locking and the check on count as follows:: + + qemu_lockcnt_inc(&xyz_lockcnt); + if (xyz) { + XYZ *p = qatomic_rcu_read(&xyz); + ... + /* Accesses can now be done through "p". */ + } + if (qemu_lockcnt_dec_and_lock(&xyz_lockcnt)) { + g_free(xyz); + xyz = NULL; + qemu_lockcnt_unlock(&xyz_lockcnt); + } + +``QemuLockCnt`` can also be used to access a list as follows:: + + qemu_lockcnt_inc(&io_handlers_lockcnt); + QLIST_FOREACH_RCU(ioh, &io_handlers, pioh) { + if (ioh->revents & G_IO_OUT) { + ioh->fd_write(ioh->opaque); + } + } + + if (qemu_lockcnt_dec_and_lock(&io_handlers_lockcnt)) { + QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { + if (ioh->deleted) { + QLIST_REMOVE(ioh, next); + g_free(ioh); + } + } + qemu_lockcnt_unlock(&io_handlers_lockcnt); + } + +Again, the RCU primitives are used because new items can be added to the +list during the walk. ``QLIST_FOREACH_RCU`` ensures that the processor and +the compiler see the appropriate memory barriers. + +An alternative pattern uses ``qemu_lockcnt_dec_if_lock``:: + + qemu_lockcnt_inc(&io_handlers_lockcnt); + QLIST_FOREACH_SAFE_RCU(ioh, &io_handlers, next, pioh) { + if (ioh->deleted) { + if (qemu_lockcnt_dec_if_lock(&io_handlers_lockcnt)) { + QLIST_REMOVE(ioh, next); + g_free(ioh); + qemu_lockcnt_inc_and_unlock(&io_handlers_lockcnt); + } + } else { + if (ioh->revents & G_IO_OUT) { + ioh->fd_write(ioh->opaque); + } + } + } + qemu_lockcnt_dec(&io_handlers_lockcnt); + +Here you can use ``qemu_lockcnt_dec`` instead of ``qemu_lockcnt_dec_and_lock``, +because there is no special task to do if the count goes from 1 to 0. diff --git a/docs/devel/lockcnt.txt b/docs/devel/lockcnt.txt deleted file mode 100644 index a3fb3bc5d8d..00000000000 --- a/docs/devel/lockcnt.txt +++ /dev/null @@ -1,277 +0,0 @@ -DOCUMENTATION FOR LOCKED COUNTERS (aka QemuLockCnt) -=================================================== - -QEMU often uses reference counts to track data structures that are being -accessed and should not be freed. For example, a loop that invoke -callbacks like this is not safe: - - QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { - if (ioh->revents & G_IO_OUT) { - ioh->fd_write(ioh->opaque); - } - } - -QLIST_FOREACH_SAFE protects against deletion of the current node (ioh) -by stashing away its "next" pointer. However, ioh->fd_write could -actually delete the next node from the list. The simplest way to -avoid this is to mark the node as deleted, and remove it from the -list in the above loop: - - QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { - if (ioh->deleted) { - QLIST_REMOVE(ioh, next); - g_free(ioh); - } else { - if (ioh->revents & G_IO_OUT) { - ioh->fd_write(ioh->opaque); - } - } - } - -If however this loop must also be reentrant, i.e. it is possible that -ioh->fd_write invokes the loop again, some kind of counting is needed: - - walking_handlers++; - QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { - if (ioh->deleted) { - if (walking_handlers == 1) { - QLIST_REMOVE(ioh, next); - g_free(ioh); - } - } else { - if (ioh->revents & G_IO_OUT) { - ioh->fd_write(ioh->opaque); - } - } - } - walking_handlers--; - -One may think of using the RCU primitives, rcu_read_lock() and -rcu_read_unlock(); effectively, the RCU nesting count would take -the place of the walking_handlers global variable. Indeed, -reference counting and RCU have similar purposes, but their usage in -general is complementary: - -- reference counting is fine-grained and limited to a single data - structure; RCU delays reclamation of *all* RCU-protected data - structures; - -- reference counting works even in the presence of code that keeps - a reference for a long time; RCU critical sections in principle - should be kept short; - -- reference counting is often applied to code that is not thread-safe - but is reentrant; in fact, usage of reference counting in QEMU predates - the introduction of threads by many years. RCU is generally used to - protect readers from other threads freeing memory after concurrent - modifications to a data structure. - -- reclaiming data can be done by a separate thread in the case of RCU; - this can improve performance, but also delay reclamation undesirably. - With reference counting, reclamation is deterministic. - -This file documents QemuLockCnt, an abstraction for using reference -counting in code that has to be both thread-safe and reentrant. - - -QemuLockCnt concepts --------------------- - -A QemuLockCnt comprises both a counter and a mutex; it has primitives -to increment and decrement the counter, and to take and release the -mutex. The counter notes how many visits to the data structures are -taking place (the visits could be from different threads, or there could -be multiple reentrant visits from the same thread). The basic rules -governing the counter/mutex pair then are the following: - -- Data protected by the QemuLockCnt must not be freed unless the - counter is zero and the mutex is taken. - -- A new visit cannot be started while the counter is zero and the - mutex is taken. - -Most of the time, the mutex protects all writes to the data structure, -not just frees, though there could be cases where this is not necessary. - -Reads, instead, can be done without taking the mutex, as long as the -readers and writers use the same macros that are used for RCU, for -example qatomic_rcu_read, qatomic_rcu_set, QLIST_FOREACH_RCU, etc. This is -because the reads are done outside a lock and a set or QLIST_INSERT_HEAD -can happen concurrently with the read. The RCU API ensures that the -processor and the compiler see all required memory barriers. - -This could be implemented simply by protecting the counter with the -mutex, for example: - - // (1) - qemu_mutex_lock(&walking_handlers_mutex); - walking_handlers++; - qemu_mutex_unlock(&walking_handlers_mutex); - - ... - - // (2) - qemu_mutex_lock(&walking_handlers_mutex); - if (--walking_handlers == 0) { - QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { - if (ioh->deleted) { - QLIST_REMOVE(ioh, next); - g_free(ioh); - } - } - } - qemu_mutex_unlock(&walking_handlers_mutex); - -Here, no frees can happen in the code represented by the ellipsis. -If another thread is executing critical section (2), that part of -the code cannot be entered, because the thread will not be able -to increment the walking_handlers variable. And of course -during the visit any other thread will see a nonzero value for -walking_handlers, as in the single-threaded code. - -Note that it is possible for multiple concurrent accesses to delay -the cleanup arbitrarily; in other words, for the walking_handlers -counter to never become zero. For this reason, this technique is -more easily applicable if concurrent access to the structure is rare. - -However, critical sections are easy to forget since you have to do -them for each modification of the counter. QemuLockCnt ensures that -all modifications of the counter take the lock appropriately, and it -can also be more efficient in two ways: - -- it avoids taking the lock for many operations (for example - incrementing the counter while it is non-zero); - -- on some platforms, one can implement QemuLockCnt to hold the lock - and the mutex in a single word, making the fast path no more expensive - than simply managing a counter using atomic operations (see - docs/devel/atomics.rst). This can be very helpful if concurrent access to - the data structure is expected to be rare. - - -Using the same mutex for frees and writes can still incur some small -inefficiencies; for example, a visit can never start if the counter is -zero and the mutex is taken---even if the mutex is taken by a write, -which in principle need not block a visit of the data structure. -However, these are usually not a problem if any of the following -assumptions are valid: - -- concurrent access is possible but rare - -- writes are rare - -- writes are frequent, but this kind of write (e.g. appending to a - list) has a very small critical section. - -For example, QEMU uses QemuLockCnt to manage an AioContext's list of -bottom halves and file descriptor handlers. Modifications to the list -of file descriptor handlers are rare. Creation of a new bottom half is -frequent and can happen on a fast path; however: 1) it is almost never -concurrent with a visit to the list of bottom halves; 2) it only has -three instructions in the critical path, two assignments and a smp_wmb(). - - -QemuLockCnt API ---------------- - -The QemuLockCnt API is described in include/qemu/thread.h. - - -QemuLockCnt usage ------------------ - -This section explains the typical usage patterns for QemuLockCnt functions. - -Setting a variable to a non-NULL value can be done between -qemu_lockcnt_lock and qemu_lockcnt_unlock: - - qemu_lockcnt_lock(&xyz_lockcnt); - if (!xyz) { - new_xyz = g_new(XYZ, 1); - ... - qatomic_rcu_set(&xyz, new_xyz); - } - qemu_lockcnt_unlock(&xyz_lockcnt); - -Accessing the value can be done between qemu_lockcnt_inc and -qemu_lockcnt_dec: - - qemu_lockcnt_inc(&xyz_lockcnt); - if (xyz) { - XYZ *p = qatomic_rcu_read(&xyz); - ... - /* Accesses can now be done through "p". */ - } - qemu_lockcnt_dec(&xyz_lockcnt); - -Freeing the object can similarly use qemu_lockcnt_lock and -qemu_lockcnt_unlock, but you also need to ensure that the count -is zero (i.e. there is no concurrent visit). Because qemu_lockcnt_inc -takes the QemuLockCnt's lock, the count cannot become non-zero while -the object is being freed. Freeing an object looks like this: - - qemu_lockcnt_lock(&xyz_lockcnt); - if (!qemu_lockcnt_count(&xyz_lockcnt)) { - g_free(xyz); - xyz = NULL; - } - qemu_lockcnt_unlock(&xyz_lockcnt); - -If an object has to be freed right after a visit, you can combine -the decrement, the locking and the check on count as follows: - - qemu_lockcnt_inc(&xyz_lockcnt); - if (xyz) { - XYZ *p = qatomic_rcu_read(&xyz); - ... - /* Accesses can now be done through "p". */ - } - if (qemu_lockcnt_dec_and_lock(&xyz_lockcnt)) { - g_free(xyz); - xyz = NULL; - qemu_lockcnt_unlock(&xyz_lockcnt); - } - -QemuLockCnt can also be used to access a list as follows: - - qemu_lockcnt_inc(&io_handlers_lockcnt); - QLIST_FOREACH_RCU(ioh, &io_handlers, pioh) { - if (ioh->revents & G_IO_OUT) { - ioh->fd_write(ioh->opaque); - } - } - - if (qemu_lockcnt_dec_and_lock(&io_handlers_lockcnt)) { - QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) { - if (ioh->deleted) { - QLIST_REMOVE(ioh, next); - g_free(ioh); - } - } - qemu_lockcnt_unlock(&io_handlers_lockcnt); - } - -Again, the RCU primitives are used because new items can be added to the -list during the walk. QLIST_FOREACH_RCU ensures that the processor and -the compiler see the appropriate memory barriers. - -An alternative pattern uses qemu_lockcnt_dec_if_lock: - - qemu_lockcnt_inc(&io_handlers_lockcnt); - QLIST_FOREACH_SAFE_RCU(ioh, &io_handlers, next, pioh) { - if (ioh->deleted) { - if (qemu_lockcnt_dec_if_lock(&io_handlers_lockcnt)) { - QLIST_REMOVE(ioh, next); - g_free(ioh); - qemu_lockcnt_inc_and_unlock(&io_handlers_lockcnt); - } - } else { - if (ioh->revents & G_IO_OUT) { - ioh->fd_write(ioh->opaque); - } - } - } - qemu_lockcnt_dec(&io_handlers_lockcnt); - -Here you can use qemu_lockcnt_dec instead of qemu_lockcnt_dec_and_lock, -because there is no special task to do if the count goes from 1 to 0. diff --git a/docs/devel/maintainers.rst b/docs/devel/maintainers.rst index 5c907d901cd..88a613ed74f 100644 --- a/docs/devel/maintainers.rst +++ b/docs/devel/maintainers.rst @@ -99,9 +99,9 @@ members of the QEMU community, you should make arrangements to attend a `KeySigningParty `__ (for example at KVM Forum) or make alternative arrangements to have your key signed by an attendee. Key signing requires meeting another -community member **in person** [#]_ so please make appropriate +community member **in person**\ [#2020]_ so please make appropriate arrangements. -.. [#] In recent pandemic times we have had to exercise some +.. [#2020] In recent pandemic times we have had to exercise some flexibility here. Maintainers still need to sign their pull requests though. diff --git a/docs/devel/memory.rst b/docs/devel/memory.rst index 69c5e3f914a..57fb2aec76e 100644 --- a/docs/devel/memory.rst +++ b/docs/devel/memory.rst @@ -369,4 +369,4 @@ callbacks are called: API Reference ------------- -.. kernel-doc:: include/exec/memory.h +.. kernel-doc:: include/system/memory.h diff --git a/docs/devel/migration/CPR.rst b/docs/devel/migration/CPR.rst index 63c36470cf6..0a0fd4f6dc3 100644 --- a/docs/devel/migration/CPR.rst +++ b/docs/devel/migration/CPR.rst @@ -5,7 +5,7 @@ CPR is the umbrella name for a set of migration modes in which the VM is migrated to a new QEMU instance on the same host. It is intended for use when the goal is to update host software components that run the VM, such as QEMU or even the host kernel. At this time, -cpr-reboot is the only available mode. +the cpr-reboot and cpr-transfer modes are available. Because QEMU is restarted on the same host, with access to the same local devices, CPR is allowed in certain cases where normal migration @@ -53,7 +53,7 @@ RAM is copied to the migration URI. Outgoing: * Set the migration mode parameter to ``cpr-reboot``. * Set the ``x-ignore-shared`` capability if desired. - * Issue the ``migrate`` command. It is recommended the the URI be a + * Issue the ``migrate`` command. It is recommended the URI be a ``file`` type, but one can use other types such as ``exec``, provided the command captures all the data from the outgoing side, and provides all the data to the incoming side. @@ -145,3 +145,182 @@ Caveats cpr-reboot mode may not be used with postcopy, background-snapshot, or COLO. + +cpr-transfer mode +----------------- + +This mode allows the user to transfer a guest to a new QEMU instance +on the same host with minimal guest pause time, by preserving guest +RAM in place, albeit with new virtual addresses in new QEMU. Devices +and their pinned memory pages are also preserved for VFIO and IOMMUFD. + +The user starts new QEMU on the same host as old QEMU, with command- +line arguments to create the same machine, plus the ``-incoming`` +option for the main migration channel, like normal live migration. +In addition, the user adds a second -incoming option with channel +type ``cpr``. This CPR channel must support file descriptor transfer +with SCM_RIGHTS, i.e. it must be a UNIX domain socket. + +To initiate CPR, the user issues a migrate command to old QEMU, +adding a second migration channel of type ``cpr`` in the channels +argument. Old QEMU stops the VM, saves state to the migration +channels, and enters the postmigrate state. Execution resumes in +new QEMU. + +New QEMU reads the CPR channel before opening a monitor, hence +the CPR channel cannot be specified in the list of channels for a +migrate-incoming command. It may only be specified on the command +line. + +Usage +^^^^^ + +Memory backend objects must have the ``share=on`` attribute. + +The VM must be started with the ``-machine aux-ram-share=on`` +option. This causes implicit RAM blocks (those not described by +a memory-backend object) to be allocated by mmap'ing a memfd. +Examples include VGA and ROM. + +Outgoing: + * Set the migration mode parameter to ``cpr-transfer``. + * Issue the ``migrate`` command, containing a main channel and + a cpr channel. + +Incoming: + * Start new QEMU with two ``-incoming`` options. + * If the VM was running when the outgoing ``migrate`` command was + issued, then QEMU automatically resumes VM execution. + +Caveats +^^^^^^^ + +cpr-transfer mode may not be used with postcopy, background-snapshot, +or COLO. + +memory-backend-epc is not supported. + +The main incoming migration channel address cannot be a file type. + +If the main incoming channel address is an inet socket, then the port +cannot be 0 (meaning dynamically choose a port). + +When using ``-incoming defer``, you must issue the migrate command to +old QEMU before issuing any monitor commands to new QEMU, because new +QEMU blocks waiting to read from the cpr channel before starting its +monitor, and old QEMU does not write to the channel until the migrate +command is issued. However, new QEMU does not open and read the +main migration channel until you issue the migrate incoming command. + +Example 1: incoming channel +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In these examples, we simply restart the same version of QEMU, but +in a real scenario one would start new QEMU on the incoming side. +Note that new QEMU does not print the monitor prompt until old QEMU +has issued the migrate command. The outgoing side uses QMP because +HMP cannot specify a CPR channel. Some QMP responses are omitted for +brevity. + +:: + + Outgoing: Incoming: + + # qemu-kvm -qmp stdio + -object memory-backend-file,id=ram0,size=4G, + mem-path=/dev/shm/ram0,share=on -m 4G + -machine memory-backend=ram0 + -machine aux-ram-share=on + ... + # qemu-kvm -monitor stdio + -incoming tcp:0:44444 + -incoming '{"channel-type": "cpr", + "addr": { "transport": "socket", + "type": "unix", "path": "cpr.sock"}}' + ... + {"execute":"qmp_capabilities"} + + {"execute": "query-status"} + {"return": {"status": "running", + "running": true}} + + {"execute":"migrate-set-parameters", + "arguments":{"mode":"cpr-transfer"}} + + {"execute": "migrate", "arguments": { "channels": [ + {"channel-type": "main", + "addr": { "transport": "socket", "type": "inet", + "host": "0", "port": "44444" }}, + {"channel-type": "cpr", + "addr": { "transport": "socket", "type": "unix", + "path": "cpr.sock" }}]}} + + QEMU 10.0.50 monitor + (qemu) info status + VM status: running + + {"execute": "query-status"} + {"return": {"status": "postmigrate", + "running": false}} + +Example 2: incoming defer +^^^^^^^^^^^^^^^^^^^^^^^^^ + +This example uses ``-incoming defer`` to hot plug a device before +accepting the main migration channel. Again note you must issue the +migrate command to old QEMU before you can issue any monitor +commands to new QEMU. + + +:: + + Outgoing: Incoming: + + # qemu-kvm -monitor stdio + -object memory-backend-file,id=ram0,size=4G, + mem-path=/dev/shm/ram0,share=on -m 4G + -machine memory-backend=ram0 + -machine aux-ram-share=on + ... + # qemu-kvm -monitor stdio + -incoming defer + -incoming '{"channel-type": "cpr", + "addr": { "transport": "socket", + "type": "unix", "path": "cpr.sock"}}' + ... + {"execute":"qmp_capabilities"} + + {"execute": "device_add", + "arguments": {"driver": "pcie-root-port"}} + + {"execute":"migrate-set-parameters", + "arguments":{"mode":"cpr-transfer"}} + + {"execute": "migrate", "arguments": { "channels": [ + {"channel-type": "main", + "addr": { "transport": "socket", "type": "inet", + "host": "0", "port": "44444" }}, + {"channel-type": "cpr", + "addr": { "transport": "socket", "type": "unix", + "path": "cpr.sock" }}]}} + + QEMU 10.0.50 monitor + (qemu) info status + VM status: paused (inmigrate) + (qemu) device_add pcie-root-port + (qemu) migrate_incoming tcp:0:44444 + (qemu) info status + VM status: running + + {"execute": "query-status"} + {"return": {"status": "postmigrate", + "running": false}} + +Futures +^^^^^^^ + +cpr-transfer mode is based on a capability to transfer open file +descriptors from old to new QEMU. In the future, descriptors for +vhost, and char devices could be transferred, +preserving those devices and their kernel state without interruption, +even if they do not explicitly support live migration. diff --git a/docs/devel/migration/compatibility.rst b/docs/devel/migration/compatibility.rst index 5a5417ef069..ecb887e3184 100644 --- a/docs/devel/migration/compatibility.rst +++ b/docs/devel/migration/compatibility.rst @@ -198,7 +198,7 @@ was done:: The relevant parts for migration are:: - @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = { + @@ -1281,7 +1284,8 @@ static const Property virtio_blk_properties[] = { #endif DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, true), @@ -395,13 +395,12 @@ the old behaviour or the new behaviour:: index 8a87ccc8b0..5153ad63d6 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c - @@ -79,6 +79,8 @@ static Property pci_props[] = { + @@ -79,6 +79,8 @@ static const Property pci_props[] = { DEFINE_PROP_STRING("failover_pair_id", PCIDevice, failover_pair_id), DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), - DEFINE_PROP_END_OF_LIST() }; Notice that we enable the feature for new machine types. diff --git a/docs/devel/migration/features.rst b/docs/devel/migration/features.rst index 58f8fd9e16a..8f431d52f9d 100644 --- a/docs/devel/migration/features.rst +++ b/docs/devel/migration/features.rst @@ -14,3 +14,4 @@ Migration has plenty of features to support different use cases. CPR qpl-compression uadk-compression + qatzip-compression diff --git a/docs/devel/migration/main.rst b/docs/devel/migration/main.rst index 784c899dca6..6493c1d2bca 100644 --- a/docs/devel/migration/main.rst +++ b/docs/devel/migration/main.rst @@ -1,3 +1,5 @@ +.. _migration: + =================== Migration framework =================== @@ -465,6 +467,12 @@ Examples of such API functions are: - portio_list_set_address() - portio_list_set_enabled() +Since the order of device save/restore is not defined, you must +avoid accessing or changing any other device's state in one of these +callbacks. (For instance, don't do anything that calls ``update_irq()`` +in a ``post_load`` hook.) Otherwise, restore will not be deterministic, +and this will break execution record/replay. + Iterative device migration -------------------------- @@ -500,8 +508,8 @@ An iterative device must provide: the point that stream bandwidth limits tell it to stop. Each call generates one section. - - A ``save_live_complete_precopy`` function that must transmit the - last section for the device containing any remaining data. + - A ``save_complete`` function that must transmit the last section for + the device containing any remaining data. - A ``load_state`` function used to load sections generated by any of the save functions that generate sections. diff --git a/docs/devel/migration/mapped-ram.rst b/docs/devel/migration/mapped-ram.rst index d352b546e96..b08c2b433c4 100644 --- a/docs/devel/migration/mapped-ram.rst +++ b/docs/devel/migration/mapped-ram.rst @@ -44,7 +44,7 @@ Use-cases The mapped-ram feature was designed for use cases where the migration stream will be directed to a file in the filesystem and not -immediately restored on the destination VM [#]_. These could be +immediately restored on the destination VM\ [#alternatives]_. These could be thought of as snapshots. We can further categorize them into live and non-live. @@ -70,7 +70,7 @@ mapped-ram in this scenario is portability since background-snapshot depends on async dirty tracking (KVM_GET_DIRTY_LOG) which is not supported outside of Linux. -.. [#] While this same effect could be obtained with the usage of +.. [#alternatives] While this same effect could be obtained with the usage of snapshots or the ``file:`` migration alone, mapped-ram provides a performance increase for VMs with larger RAM sizes (10s to 100s of GiBs), specially if the VM has been stopped beforehand. diff --git a/docs/devel/migration/postcopy.rst b/docs/devel/migration/postcopy.rst index 82e7a848c63..e319388d8f8 100644 --- a/docs/devel/migration/postcopy.rst +++ b/docs/devel/migration/postcopy.rst @@ -33,25 +33,6 @@ will now cause the transition from precopy to postcopy. It can be issued immediately after migration is started or any time later on. Issuing it after the end of a migration is harmless. -Blocktime is a postcopy live migration metric, intended to show how -long the vCPU was in state of interruptible sleep due to pagefault. -That metric is calculated both for all vCPUs as overlapped value, and -separately for each vCPU. These values are calculated on destination -side. To enable postcopy blocktime calculation, enter following -command on destination monitor: - -``migrate_set_capability postcopy-blocktime on`` - -Postcopy blocktime can be retrieved by query-migrate qmp command. -postcopy-blocktime value of qmp command will show overlapped blocking -time for all vCPU, postcopy-vcpu-blocktime will show list of blocking -time per vCPU. - -.. note:: - During the postcopy phase, the bandwidth limits set using - ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that - the destination is waiting for). - Postcopy internals ================== @@ -312,3 +293,20 @@ explicitly) to be sent in a separate preempt channel, rather than queued in the background migration channel. Anyone who cares about latencies of page faults during a postcopy migration should enable this feature. By default, it's not enabled. + +Postcopy blocktime statistics +----------------------------- + +Blocktime is a postcopy live migration metric, intended to show how +long the vCPU was in state of interruptible sleep due to pagefault. +That metric is calculated both for all vCPUs as overlapped value, and +separately for each vCPU. These values are calculated on destination +side. To enable postcopy blocktime calculation, enter following +command on destination monitor: + +``migrate_set_capability postcopy-blocktime on`` + +Postcopy blocktime can be retrieved by query-migrate qmp command. +postcopy-blocktime value of qmp command will show overlapped blocking +time for all vCPU, postcopy-vcpu-blocktime will show list of blocking +time per vCPU. diff --git a/docs/devel/migration/qatzip-compression.rst b/docs/devel/migration/qatzip-compression.rst new file mode 100644 index 00000000000..862b3831647 --- /dev/null +++ b/docs/devel/migration/qatzip-compression.rst @@ -0,0 +1,165 @@ +================== +QATzip Compression +================== +In scenarios with limited network bandwidth, the ``QATzip`` solution can help +users save a lot of host CPU resources by accelerating compression and +decompression through the Intel QuickAssist Technology(``QAT``) hardware. + + +The following test was conducted using 8 multifd channels and 10Gbps network +bandwidth. The results show that, compared to zstd, ``QATzip`` significantly +saves CPU resources on the sender and reduces migration time. Compared to the +uncompressed solution, ``QATzip`` greatly improves the dirty page processing +capability, indicated by the Pages per Second metric, and also reduces the +total migration time. + +:: + + VM Configuration: 16 vCPU and 64G memory + VM Workload: all vCPUs are idle and 54G memory is filled with Silesia data. + QAT Devices: 4 + |-----------|--------|---------|----------|----------|------|------| + |8 Channels |Total |down |throughput|pages per | send | recv | + | |time(ms)|time(ms) |(mbps) |second | cpu %| cpu% | + |-----------|--------|---------|----------|----------|------|------| + |qatzip | 16630| 28| 10467| 2940235| 160| 360| + |-----------|--------|---------|----------|----------|------|------| + |zstd | 20165| 24| 8579| 2391465| 810| 340| + |-----------|--------|---------|----------|----------|------|------| + |none | 46063| 40| 10848| 330240| 45| 85| + |-----------|--------|---------|----------|----------|------|------| + + +QATzip Compression Framework +============================ + +``QATzip`` is a user space library which builds on top of the Intel QuickAssist +Technology to provide extended accelerated compression and decompression +services. + +For more ``QATzip`` introduction, please refer to `QATzip Introduction +`_ + +:: + + +----------------+ + | MultiFd Thread | + +-------+--------+ + | + | compress/decompress + +-------+--------+ + | QATzip library | + +-------+--------+ + | + +-------+--------+ + | QAT library | + +-------+--------+ + | user space + --------+--------------------- + | kernel space + +------+-------+ + | QAT Driver | + +------+-------+ + | + +------+-------+ + | QAT Devices | + +--------------+ + + +QATzip Installation +------------------- + +The ``QATzip`` installation package has been integrated into some Linux +distributions and can be installed directly. For example, the Ubuntu Server +24.04 LTS system can be installed using below command + +.. code-block:: shell + + #apt search qatzip + libqatzip-dev/noble 1.2.0-0ubuntu3 amd64 + Intel QuickAssist user space library development files + + libqatzip3/noble 1.2.0-0ubuntu3 amd64 + Intel QuickAssist user space library + + qatzip/noble,now 1.2.0-0ubuntu3 amd64 [installed] + Compression user-space tool for Intel QuickAssist Technology + + #sudo apt install libqatzip-dev libqatzip3 qatzip + +If your system does not support the ``QATzip`` installation package, you can +use the source code to build and install, please refer to `QATzip source code installation +`_ + +QAT Hardware Deployment +----------------------- + +``QAT`` supports physical functions(PFs) and virtual functions(VFs) for +deployment, and users can configure ``QAT`` resources for migration according +to actual needs. For more details about ``QAT`` deployment, please refer to +`Intel QuickAssist Technology Documentation +`_ + +For more ``QAT`` hardware introduction, please refer to `intel-quick-assist-technology-overview +`_ + +How To Use QATzip Compression +============================= + +1 - Install ``QATzip`` library + +2 - Build ``QEMU`` with ``--enable-qatzip`` parameter + + E.g. configure --target-list=x86_64-softmmu --enable-kvm ``--enable-qatzip`` + +3 - Set ``migrate_set_parameter multifd-compression qatzip`` + +4 - Set ``migrate_set_parameter multifd-qatzip-level comp_level``, the default +comp_level value is 1, and it supports levels from 1 to 9 + +QAT Memory Requirements +======================= + +The user needs to reserve system memory for the QAT memory management to +allocate DMA memory. The size of the reserved system memory depends on the +number of devices used for migration and the number of multifd channels. + +Because memory usage depends on QAT configuration, please refer to `QAT Memory +Driver Queries +`_ +for memory usage calculation. + +.. list-table:: An example of a PF used for migration + :header-rows: 1 + + * - Number of channels + - Sender memory usage + - Receiver memory usage + * - 2 + - 10M + - 10M + * - 4 + - 12M + - 14M + * - 8 + - 16M + - 20M + +How To Choose Between QATzip and QPL +==================================== +Starting from 4th Gen Intel Xeon Scalable processors, codenamed Sapphire Rapids +processor(``SPR``), multiple built-in accelerators are supported including +``QAT`` and ``IAA``. The former can accelerate ``QATzip`` and the latter is +used to accelerate ``QPL``. + +Here are some suggestions: + +1 - If the live migration scenario is limited by network bandwidth and ``QAT`` +hardware resources exceed ``IAA``, use the ``QATzip`` method, which can save a +lot of host CPU resources for compression. + +2 - If the system cannot support shared virtual memory (SVM) technology, use +the ``QATzip`` method because ``QPL`` performance is not good without SVM +support. + +3 - For other scenarios, use the ``QPL`` method first. diff --git a/docs/devel/migration/uadk-compression.rst b/docs/devel/migration/uadk-compression.rst index 3f73345dd53..64cadebd219 100644 --- a/docs/devel/migration/uadk-compression.rst +++ b/docs/devel/migration/uadk-compression.rst @@ -114,7 +114,7 @@ Make sure all these above kernel configurations are selected. Accelerator dev node permissions -------------------------------- -Harware accelerators(eg: HiSilicon Kunpeng Zip accelerator) gets registered to +Hardware accelerators (eg: HiSilicon Kunpeng Zip accelerator) gets registered to UADK and char devices are created in dev directory. In order to access resources on hardware accelerator devices, write permission should be provided to user. @@ -134,7 +134,7 @@ How To Use UADK Compression In QEMU Migration Set ``migrate_set_parameter multifd-compression uadk`` Since UADK uses Shared Virtual Addressing(SVA) and device access virtual memory -directly it is possible that SMMUv3 may enounter page faults while walking the +directly it is possible that SMMUv3 may encounter page faults while walking the IO page tables. This may impact the performance. In order to mitigate this, please make sure to specify ``-mem-prealloc`` parameter to the destination VM boot parameters. diff --git a/docs/devel/migration/vfio.rst b/docs/devel/migration/vfio.rst index c49482eab66..0790e5031d8 100644 --- a/docs/devel/migration/vfio.rst +++ b/docs/devel/migration/vfio.rst @@ -67,15 +67,35 @@ VFIO implements the device hooks for the iterative approach as follows: * A ``switchover_ack_needed`` function that checks if the VFIO device uses "switchover-ack" migration capability when this capability is enabled. -* A ``save_state`` function to save the device config space if it is present. - -* A ``save_live_complete_precopy`` function that sets the VFIO device in - _STOP_COPY state and iteratively copies the data for the VFIO device until - the vendor driver indicates that no data remains. +* A ``switchover_start`` function that in the multifd mode starts a thread that + reassembles the multifd received data and loads it in-order into the device. + In the non-multifd mode this function is a NOP. + +* A ``save_state`` function to save the device config space if it is present + in the non-multifd mode. + In the multifd mode it just emits either a dummy EOS marker. + +* A ``save_complete`` function that sets the VFIO device in _STOP_COPY + state and iteratively copies the data for the VFIO device until the + vendor driver indicates that no data remains. In the multifd mode it + just emits a dummy EOS marker. + +* A ``save_complete_precopy_thread`` function that in the multifd mode + provides thread handler performing multifd device state transfer. + It sets the VFIO device to _STOP_COPY state, iteratively reads the data + from the VFIO device and queues it for multifd transmission until the vendor + driver indicates that no data remains. + After that, it saves the device config space and queues it for multifd + transfer too. + In the non-multifd mode this thread is a NOP. * A ``load_state`` function that loads the config section and the data sections that are generated by the save functions above. +* A ``load_state_buffer`` function that loads the device state and the device + config that arrived via multifd channels. + It's used only in the multifd mode. + * ``cleanup`` functions for both save and load that perform any migration related cleanup. @@ -175,9 +195,12 @@ Live migration save path | Then the VFIO device is put in _STOP_COPY state (FINISH_MIGRATE, _ACTIVE, _STOP_COPY) - .save_live_complete_precopy() is called for each active device - For the VFIO device, iterate in .save_live_complete_precopy() until + .save_complete() is called for each active device + For the VFIO device: in the non-multifd mode iterate in + .save_complete() until pending data is 0 + In the multifd mode this iteration is done in + .save_complete_precopy_thread() instead. | (POSTMIGRATE, _COMPLETED, _STOP_COPY) Migraton thread schedules cleanup bottom half and exits @@ -194,6 +217,9 @@ Live migration resume path (RESTORE_VM, _ACTIVE, _STOP) | For each device, .load_state() is called for that device section data + transmitted via the main migration channel. + For data transmitted via multifd channels .load_state_buffer() is called + instead. (RESTORE_VM, _ACTIVE, _RESUMING) | At the end, .load_cleanup() is called for each device and vCPUs are started @@ -206,3 +232,37 @@ Postcopy ======== Postcopy migration is currently not supported for VFIO devices. + +Multifd +======= + +Starting from QEMU version 10.0 there's a possibility to transfer VFIO device +_STOP_COPY state via multifd channels. This helps reduce downtime - especially +with multiple VFIO devices or with devices having a large migration state. +As an additional benefit, setting the VFIO device to _STOP_COPY state and +saving its config space is also parallelized (run in a separate thread) in +such migration mode. + +The multifd VFIO device state transfer is controlled by +"x-migration-multifd-transfer" VFIO device property. This property defaults to +AUTO, which means that VFIO device state transfer via multifd channels is +attempted in configurations that otherwise support it. + +Since the target QEMU needs to load device state buffers in-order it needs to +queue incoming buffers until they can be loaded into the device. +This means that a malicious QEMU source could theoretically cause the target +QEMU to allocate unlimited amounts of memory for such buffers-in-flight. + +The "x-migration-max-queued-buffers-size" property allows capping the total size +of these VFIO device state buffers queued at the destination. + +Because a malicious QEMU source causing OOM on the target is not expected to be +a realistic threat in most of VFIO live migration use cases and the right value +depends on the particular setup by default this queued buffers size limit is +disabled by setting it to UINT64_MAX. + +Some host platforms (like ARM64) require that VFIO device config is loaded only +after all iterables were loaded, during non-iterables loading phase. +Such interlocking is controlled by "x-migration-load-config-after-iter" VFIO +device property, which in its default setting (AUTO) does so only on platforms +that actually require it. diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst index d706c27ea74..da9a1530c9f 100644 --- a/docs/devel/multi-thread-tcg.rst +++ b/docs/devel/multi-thread-tcg.rst @@ -4,6 +4,8 @@ This work is licensed under the terms of the GNU GPL, version 2 or later. See the COPYING file in the top-level directory. +.. _mttcg: + ================== Multi-threaded TCG ================== @@ -26,16 +28,15 @@ vCPU Scheduling We introduce a new running mode where each vCPU will run on its own user-space thread. This is enabled by default for all FE/BE combinations where the host memory model is able to accommodate the -guest (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO is zero) and the -guest has had the required work done to support this safely -(TARGET_SUPPORTS_MTTCG). +guest (TCGCPUOps::guest_default_memory_order & ~TCG_TARGET_DEFAULT_MO is zero) +and the guest has had the required work done to support this safely +(TCGCPUOps::mttcg_supported). System emulation will fall back to the original round robin approach if: * forced by --accel tcg,thread=single * enabling --icount mode -* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST) In the general case of running translated code there should be no inter-vCPU dependencies and all vCPUs should be able to run at full diff --git a/docs/devel/multiple-iothreads.rst b/docs/devel/multiple-iothreads.rst new file mode 100644 index 00000000000..d1f3fc4510a --- /dev/null +++ b/docs/devel/multiple-iothreads.rst @@ -0,0 +1,139 @@ +Using Multiple ``IOThread``\ s +============================== + +.. + Copyright (c) 2014-2017 Red Hat Inc. + + This work is licensed under the terms of the GNU GPL, version 2 or later. See + the COPYING file in the top-level directory. + + +This document explains the ``IOThread`` feature and how to write code that runs +outside the BQL. + +The main loop and ``IOThread``\ s +--------------------------------- +QEMU is an event-driven program that can do several things at once using an +event loop. The VNC server and the QMP monitor are both processed from the +same event loop, which monitors their file descriptors until they become +readable and then invokes a callback. + +The default event loop is called the main loop (see ``main-loop.c``). It is +possible to create additional event loop threads using +``-object iothread,id=my-iothread``. + +Side note: The main loop and ``IOThread`` are both event loops but their code is +not shared completely. Sometimes it is useful to remember that although they +are conceptually similar they are currently not interchangeable. + +Why ``IOThread``\ s are useful +------------------------------ +``IOThread``\ s allow the user to control the placement of work. The main loop is a +scalability bottleneck on hosts with many CPUs. Work can be spread across +several ``IOThread``\ s instead of just one main loop. When set up correctly this +can improve I/O latency and reduce jitter seen by the guest. + +The main loop is also deeply associated with the BQL, which is a +scalability bottleneck in itself. vCPU threads and the main loop use the BQL +to serialize execution of QEMU code. This mutex is necessary because a lot of +QEMU's code historically was not thread-safe. + +The fact that all I/O processing is done in a single main loop and that the +BQL is contended by all vCPU threads and the main loop explain +why it is desirable to place work into ``IOThread``\ s. + +The experimental ``virtio-blk`` data-plane implementation has been benchmarked and +shows these effects: +ftp://public.dhe.ibm.com/linux/pdfs/KVM_Virtualized_IO_Performance_Paper.pdf + +.. _how-to-program: + +How to program for ``IOThread``\ s +---------------------------------- +The main difference between legacy code and new code that can run in an +``IOThread`` is dealing explicitly with the event loop object, ``AioContext`` +(see ``include/block/aio.h``). Code that only works in the main loop +implicitly uses the main loop's ``AioContext``. Code that supports running +in ``IOThread``\ s must be aware of its ``AioContext``. + +AioContext supports the following services: + * File descriptor monitoring (read/write/error on POSIX hosts) + * Event notifiers (inter-thread signalling) + * Timers + * Bottom Halves (BH) deferred callbacks + +There are several old APIs that use the main loop AioContext: + * LEGACY ``qemu_aio_set_fd_handler()`` - monitor a file descriptor + * LEGACY ``qemu_aio_set_event_notifier()`` - monitor an event notifier + * LEGACY ``timer_new_ms()`` - create a timer + * LEGACY ``qemu_bh_new()`` - create a BH + * LEGACY ``qemu_bh_new_guarded()`` - create a BH with a device re-entrancy guard + * LEGACY ``qemu_aio_wait()`` - run an event loop iteration + +Since they implicitly work on the main loop they cannot be used in code that +runs in an ``IOThread``. They might cause a crash or deadlock if called from an +``IOThread`` since the BQL is not held. + +Instead, use the ``AioContext`` functions directly (see ``include/block/aio.h``): + * ``aio_set_fd_handler()`` - monitor a file descriptor + * ``aio_set_event_notifier()`` - monitor an event notifier + * ``aio_timer_new()`` - create a timer + * ``aio_bh_new()`` - create a BH + * ``aio_bh_new_guarded()`` - create a BH with a device re-entrancy guard + * ``aio_poll()`` - run an event loop iteration + +The ``qemu_bh_new_guarded``/``aio_bh_new_guarded`` APIs accept a +``MemReentrancyGuard`` +argument, which is used to check for and prevent re-entrancy problems. For +BHs associated with devices, the reentrancy-guard is contained in the +corresponding ``DeviceState`` and named ``mem_reentrancy_guard``. + +The ``AioContext`` can be obtained from the ``IOThread`` using +``iothread_get_aio_context()`` or for the main loop using +``qemu_get_aio_context()``. Code that takes an ``AioContext`` argument +works both in ``IOThread``\ s or the main loop, depending on which ``AioContext`` +instance the caller passes in. + +How to synchronize with an ``IOThread`` +--------------------------------------- +Variables that can be accessed by multiple threads require some form of +synchronization such as ``qemu_mutex_lock()``, ``rcu_read_lock()``, etc. + +``AioContext`` functions like ``aio_set_fd_handler()``, +``aio_set_event_notifier()``, ``aio_bh_new()``, and ``aio_timer_new()`` +are thread-safe. They can be used to trigger activity in an ``IOThread``. + +Side note: the best way to schedule a function call across threads is to call +``aio_bh_schedule_oneshot()``. + +The main loop thread can wait synchronously for a condition using +``AIO_WAIT_WHILE()``. + +``AioContext`` and the block layer +---------------------------------- +The ``AioContext`` originates from the QEMU block layer, even though nowadays +``AioContext`` is a generic event loop that can be used by any QEMU subsystem. + +The block layer has support for ``AioContext`` integrated. Each +``BlockDriverState`` is associated with an ``AioContext`` using +``bdrv_try_change_aio_context()`` and ``bdrv_get_aio_context()``. +This allows block layer code to process I/O inside the +right ``AioContext``. Other subsystems may wish to follow a similar approach. + +Block layer code must therefore expect to run in an ``IOThread`` and avoid using +old APIs that implicitly use the main loop. See +`How to program for IOThreads`_ for information on how to do that. + +Code running in the monitor typically needs to ensure that past +requests from the guest are completed. When a block device is running +in an ``IOThread``, the ``IOThread`` can also process requests from the guest +(via ioeventfd). To achieve both objects, wrap the code between +``bdrv_drained_begin()`` and ``bdrv_drained_end()``, thus creating a "drained +section". + +Long-running jobs (usually in the form of coroutines) are often scheduled in +the ``BlockDriverState``'s ``AioContext``. The functions +``bdrv_add``/``remove_aio_context_notifier``, or alternatively +``blk_add``/``remove_aio_context_notifier`` if you use ``BlockBackends``, +can be used to get a notification whenever ``bdrv_try_change_aio_context()`` +moves a ``BlockDriverState`` to a different ``AioContext``. diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt deleted file mode 100644 index de85767b124..00000000000 --- a/docs/devel/multiple-iothreads.txt +++ /dev/null @@ -1,130 +0,0 @@ -Copyright (c) 2014-2017 Red Hat Inc. - -This work is licensed under the terms of the GNU GPL, version 2 or later. See -the COPYING file in the top-level directory. - - -This document explains the IOThread feature and how to write code that runs -outside the BQL. - -The main loop and IOThreads ---------------------------- -QEMU is an event-driven program that can do several things at once using an -event loop. The VNC server and the QMP monitor are both processed from the -same event loop, which monitors their file descriptors until they become -readable and then invokes a callback. - -The default event loop is called the main loop (see main-loop.c). It is -possible to create additional event loop threads using -object -iothread,id=my-iothread. - -Side note: The main loop and IOThread are both event loops but their code is -not shared completely. Sometimes it is useful to remember that although they -are conceptually similar they are currently not interchangeable. - -Why IOThreads are useful ------------------------- -IOThreads allow the user to control the placement of work. The main loop is a -scalability bottleneck on hosts with many CPUs. Work can be spread across -several IOThreads instead of just one main loop. When set up correctly this -can improve I/O latency and reduce jitter seen by the guest. - -The main loop is also deeply associated with the BQL, which is a -scalability bottleneck in itself. vCPU threads and the main loop use the BQL -to serialize execution of QEMU code. This mutex is necessary because a lot of -QEMU's code historically was not thread-safe. - -The fact that all I/O processing is done in a single main loop and that the -BQL is contended by all vCPU threads and the main loop explain -why it is desirable to place work into IOThreads. - -The experimental virtio-blk data-plane implementation has been benchmarked and -shows these effects: -ftp://public.dhe.ibm.com/linux/pdfs/KVM_Virtualized_IO_Performance_Paper.pdf - -How to program for IOThreads ----------------------------- -The main difference between legacy code and new code that can run in an -IOThread is dealing explicitly with the event loop object, AioContext -(see include/block/aio.h). Code that only works in the main loop -implicitly uses the main loop's AioContext. Code that supports running -in IOThreads must be aware of its AioContext. - -AioContext supports the following services: - * File descriptor monitoring (read/write/error on POSIX hosts) - * Event notifiers (inter-thread signalling) - * Timers - * Bottom Halves (BH) deferred callbacks - -There are several old APIs that use the main loop AioContext: - * LEGACY qemu_aio_set_fd_handler() - monitor a file descriptor - * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier - * LEGACY timer_new_ms() - create a timer - * LEGACY qemu_bh_new() - create a BH - * LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard - * LEGACY qemu_aio_wait() - run an event loop iteration - -Since they implicitly work on the main loop they cannot be used in code that -runs in an IOThread. They might cause a crash or deadlock if called from an -IOThread since the BQL is not held. - -Instead, use the AioContext functions directly (see include/block/aio.h): - * aio_set_fd_handler() - monitor a file descriptor - * aio_set_event_notifier() - monitor an event notifier - * aio_timer_new() - create a timer - * aio_bh_new() - create a BH - * aio_bh_new_guarded() - create a BH with a device re-entrancy guard - * aio_poll() - run an event loop iteration - -The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard" -argument, which is used to check for and prevent re-entrancy problems. For -BHs associated with devices, the reentrancy-guard is contained in the -corresponding DeviceState and named "mem_reentrancy_guard". - -The AioContext can be obtained from the IOThread using -iothread_get_aio_context() or for the main loop using qemu_get_aio_context(). -Code that takes an AioContext argument works both in IOThreads or the main -loop, depending on which AioContext instance the caller passes in. - -How to synchronize with an IOThread ------------------------------------ -Variables that can be accessed by multiple threads require some form of -synchronization such as qemu_mutex_lock(), rcu_read_lock(), etc. - -AioContext functions like aio_set_fd_handler(), aio_set_event_notifier(), -aio_bh_new(), and aio_timer_new() are thread-safe. They can be used to trigger -activity in an IOThread. - -Side note: the best way to schedule a function call across threads is to call -aio_bh_schedule_oneshot(). - -The main loop thread can wait synchronously for a condition using -AIO_WAIT_WHILE(). - -AioContext and the block layer ------------------------------- -The AioContext originates from the QEMU block layer, even though nowadays -AioContext is a generic event loop that can be used by any QEMU subsystem. - -The block layer has support for AioContext integrated. Each BlockDriverState -is associated with an AioContext using bdrv_try_change_aio_context() and -bdrv_get_aio_context(). This allows block layer code to process I/O inside the -right AioContext. Other subsystems may wish to follow a similar approach. - -Block layer code must therefore expect to run in an IOThread and avoid using -old APIs that implicitly use the main loop. See the "How to program for -IOThreads" above for information on how to do that. - -Code running in the monitor typically needs to ensure that past -requests from the guest are completed. When a block device is running -in an IOThread, the IOThread can also process requests from the guest -(via ioeventfd). To achieve both objects, wrap the code between -bdrv_drained_begin() and bdrv_drained_end(), thus creating a "drained -section". - -Long-running jobs (usually in the form of coroutines) are often scheduled in -the BlockDriverState's AioContext. The functions -bdrv_add/remove_aio_context_notifier, or alternatively -blk_add/remove_aio_context_notifier if you use BlockBackends, can be used to -get a notification whenever bdrv_try_change_aio_context() moves a -BlockDriverState to a different AioContext. diff --git a/docs/devel/nested-papr.txt b/docs/devel/nested-papr.txt deleted file mode 100644 index 90943650db9..00000000000 --- a/docs/devel/nested-papr.txt +++ /dev/null @@ -1,119 +0,0 @@ -Nested PAPR API (aka KVM on PowerVM) -==================================== - -This API aims at providing support to enable nested virtualization with -KVM on PowerVM. While the existing support for nested KVM on PowerNV was -introduced with cap-nested-hv option, however, with a slight design change, -to enable this on papr/pseries, a new cap-nested-papr option is added. eg: - - qemu-system-ppc64 -cpu POWER10 -machine pseries,cap-nested-papr=true ... - -Work by: - Michael Neuling - Vaibhav Jain - Jordan Niethe - Harsh Prateek Bora - Shivaprasad G Bhat - Kautuk Consul - -Below taken from the kernel documentation: - -Introduction -============ - -This document explains how a guest operating system can act as a -hypervisor and run nested guests through the use of hypercalls, if the -hypervisor has implemented them. The terms L0, L1, and L2 are used to -refer to different software entities. L0 is the hypervisor mode entity -that would normally be called the "host" or "hypervisor". L1 is a -guest virtual machine that is directly run under L0 and is initiated -and controlled by L0. L2 is a guest virtual machine that is initiated -and controlled by L1 acting as a hypervisor. A significant design change -wrt existing API is that now the entire L2 state is maintained within L0. - -Existing Nested-HV API -====================== - -Linux/KVM has had support for Nesting as an L0 or L1 since 2018 - -The L0 code was added:: - - commit 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce - Author: Paul Mackerras - Date: Mon Oct 8 16:31:03 2018 +1100 - KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization - -The L1 code was added:: - - commit 360cae313702cdd0b90f82c261a8302fecef030a - Author: Paul Mackerras - Date: Mon Oct 8 16:31:04 2018 +1100 - KVM: PPC: Book3S HV: Nested guest entry via hypercall - -This API works primarily using a signal hcall h_enter_nested(). This -call made by the L1 to tell the L0 to start an L2 vCPU with the given -state. The L0 then starts this L2 and runs until an L2 exit condition -is reached. Once the L2 exits, the state of the L2 is given back to -the L1 by the L0. The full L2 vCPU state is always transferred from -and to L1 when the L2 is run. The L0 doesn't keep any state on the L2 -vCPU (except in the short sequence in the L0 on L1 -> L2 entry and L2 --> L1 exit). - -The only state kept by the L0 is the partition table. The L1 registers -it's partition table using the h_set_partition_table() hcall. All -other state held by the L0 about the L2s is cached state (such as -shadow page tables). - -The L1 may run any L2 or vCPU without first informing the L0. It -simply starts the vCPU using h_enter_nested(). The creation of L2s and -vCPUs is done implicitly whenever h_enter_nested() is called. - -In this document, we call this existing API the v1 API. - -New PAPR API -=============== - -The new PAPR API changes from the v1 API such that the creating L2 and -associated vCPUs is explicit. In this document, we call this the v2 -API. - -h_enter_nested() is replaced with H_GUEST_VCPU_RUN(). Before this can -be called the L1 must explicitly create the L2 using h_guest_create() -and any associated vCPUs() created with h_guest_create_vCPU(). Getting -and setting vCPU state can also be performed using h_guest_{g|s}et -hcall. - -The basic execution flow is for an L1 to create an L2, run it, and -delete it is: - -- L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES() - (normally at L1 boot time). - -- L1 requests the L0 to create an L2 with H_GUEST_CREATE() and receives a token - -- L1 requests the L0 to create an L2 vCPU with H_GUEST_CREATE_VCPU() - -- L1 and L0 communicate the vCPU state using the H_GUEST_{G,S}ET() hcall - -- L1 requests the L0 to run the vCPU using H_GUEST_RUN_VCPU() hcall - -- L1 deletes L2 with H_GUEST_DELETE() - -For more details, please refer: - -[1] Linux Kernel documentation (upstream documentation commit): - -commit 476652297f94a2e5e5ef29e734b0da37ade94110 -Author: Michael Neuling -Date: Thu Sep 14 13:06:00 2023 +1000 - - docs: powerpc: Document nested KVM on POWER - - Document support for nested KVM on POWER using the existing API as well - as the new PAPR API. This includes the new HCALL interface and how it - used by KVM. - - Signed-off-by: Michael Neuling - Signed-off-by: Jordan Niethe - Signed-off-by: Michael Ellerman - Link: https://msgid.link/20230914030600.16993-12-jniethe5@gmail.com diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 583207a8ec2..d97602f464c 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -9,6 +9,7 @@ How to use the QAPI code generator This work is licensed under the terms of the GNU GPL, version 2 or later. See the COPYING file in the top-level directory. +.. _qapi: Introduction ============ @@ -228,7 +229,8 @@ These are of the form PREFIX_NAME, where PREFIX is derived from the enumeration type's name, and NAME from the value's name. For the example above, the generator maps 'MyEnum' to MY_ENUM and 'value1' to VALUE1, resulting in the enumeration constant MY_ENUM_VALUE1. The -optional 'prefix' member overrides PREFIX. +optional 'prefix' member overrides PREFIX. This is rarely necessary, +and should be used with restraint. The generated C enumeration constants have values 0, 1, ..., N-1 (in QAPI schema order), where N is the number of values. There is an @@ -644,9 +646,9 @@ Member 'event' names the event. This is the event name used in the Client JSON Protocol. Member 'data' defines the event-specific data. It defaults to an -empty MEMBERS object. +empty MEMBERS_ object. -If 'data' is a MEMBERS object, then MEMBERS defines event-specific +If 'data' is a MEMBERS_ object, then MEMBERS defines event-specific data just like a struct type's 'data' defines struct type members. If 'data' is a STRING, then STRING names a complex type whose members @@ -761,8 +763,8 @@ Names beginning with ``x-`` used to signify "experimental". This convention has been replaced by special feature "unstable". Pragmas ``command-name-exceptions`` and ``member-name-exceptions`` let -you violate naming rules. Use for new code is strongly discouraged. See -`Pragma directives`_ for details. +you violate naming rules. Use for new code is strongly discouraged. +See `Pragma directives`_ for details. Downstream extensions @@ -784,8 +786,8 @@ Configuring the schema Syntax:: COND = STRING - | { 'all: [ COND, ... ] } - | { 'any: [ COND, ... ] } + | { 'all': [ COND, ... ] } + | { 'any': [ COND, ... ] } | { 'not': COND } All definitions take an optional 'if' member. Its value must be a @@ -874,25 +876,35 @@ structuring content. Headings and subheadings ~~~~~~~~~~~~~~~~~~~~~~~~ -A free-form documentation comment containing a line which starts with -some ``=`` symbols and then a space defines a section heading:: +Free-form documentation does not start with ``@SYMBOL`` and can contain +arbitrary rST markup. Headings can be marked up using the standard rST +syntax:: ## - # = This is a top level heading + # ************************* + # This is a level 2 heading + # ************************* # # This is a free-form comment which will go under the # top level heading. ## ## - # == This is a second level heading + # This is a third level heading + # ============================== + # + # Level 4 + # _______ + # + # Level 5 + # ^^^^^^^ + # + # Level 6 + # """"""" ## -A heading line must be the first line of the documentation -comment block. - -Section headings must always be correctly nested, so you can only -define a third-level heading inside a second-level heading, and so on. +Level 1 headings are reserved for use by the generated documentation +page itself, leaving level 2 as the highest level that should be used. Documentation markup @@ -931,9 +943,14 @@ The usual ****strong****, *\*emphasized\** and ````literal```` markup should be used. If you need a single literal ``*``, you will need to backslash-escape it. -Use ``@foo`` to reference a name in the schema. This is an rST -extension. It is rendered the same way as ````foo````, but carries -additional meaning. +Use ```foo``` to reference a definition in the schema. This generates +a link to the definition. In the event that such a cross-reference is +ambiguous, you can use `QAPI cross-reference roles +` to disambiguate. + +Use @foo to reference a member description within the current +definition. This is an rST extension. It is currently rendered the +same way as ````foo````, but carries additional meaning. Example:: @@ -1011,7 +1028,7 @@ like this:: document the success and the error response, respectively. "Errors" sections should be formatted as an rST list, each entry -detailing a relevant error condition. For example:: +detailing a relevant error condition. For example:: # Errors: # - If @device does not exist, DeviceNotFound @@ -1024,31 +1041,28 @@ definition. QMP). In other sections, the text is formatted, and rST markup can be used. -QMP Examples can be added by using the ``.. qmp-example::`` -directive. In its simplest form, this can be used to contain a single -QMP code block which accepts standard JSON syntax with additional server -directionality indicators (``->`` and ``<-``), and elisions (``...``). +QMP Examples can be added by using the ``.. qmp-example::`` directive. +In its simplest form, this can be used to contain a single QMP code +block which accepts standard JSON syntax with additional server +directionality indicators (``->`` and ``<-``), and elisions. An +elision is commonly ``...``, but it can also be or a pair of ``...`` +with text in between. Optionally, a plaintext title may be provided by using the ``:title:`` -directive option. If the title is omitted, the example title will +directive option. If the title is omitted, the example title will default to "Example:". A simple QMP example:: # .. qmp-example:: - # :title: Using query-block # - # -> { "execute": "query-block" } - # <- { ... } - -More complex or multi-step examples where exposition is needed before or -between QMP code blocks can be created by using the ``:annotated:`` -directive option. When using this option, nested QMP code blocks must be -entered explicitly with rST's ``::`` syntax. + # -> { "execute": "query-name" } + # <- { "return": { "name": "Fred" } } -Highlighting in non-QMP languages can be accomplished by using the -``.. code-block:: lang`` directive, and non-highlighted text can be -achieved by omitting the language argument. +More complex or multi-step examples where exposition is needed before +or between QMP code blocks can be created by using the ``:annotated:`` +directive option. When using this option, nested QMP code blocks must +be entered explicitly with rST's ``::`` syntax. For example:: @@ -1059,11 +1073,21 @@ For example:: # This is a more complex example that can use # ``arbitrary rST syntax`` in its exposition:: # - # -> { "execute": "query-block" } - # <- { ... } + # -> { "execute": "query-block" } + # <- { "return": [ + # { + # "device": "ide0-hd0", + # ... + # } + # ... more ... + # ] } # # Above, lengthy output has been omitted for brevity. +Highlighting in non-QMP languages can be accomplished by using the +``.. code-block:: lang`` directive, and non-highlighted text can be +achieved by omitting the language argument. + Examples of complete definition documentation:: @@ -1464,7 +1488,9 @@ As an example, we'll use the following schema, which describes a single complex user-defined type, along with command which takes a list of that type as a parameter, and returns a single element of that type. The user is responsible for writing the implementation of -qmp_my_command(); everything else is produced by the generator. :: +qmp_my_command(); everything else is produced by the generator. + +:: $ cat example-schema.json { 'struct': 'UserDefOne', @@ -1854,7 +1880,7 @@ Example:: #ifndef EXAMPLE_QAPI_INIT_COMMANDS_H #define EXAMPLE_QAPI_INIT_COMMANDS_H - #include "qapi/qmp/dispatch.h" + #include "qapi/qmp-registry.h" void example_qmp_init_marshal(QmpCommandList *cmds); @@ -1985,7 +2011,7 @@ Example:: #ifndef EXAMPLE_QAPI_INTROSPECT_H #define EXAMPLE_QAPI_INTROSPECT_H - #include "qapi/qmp/qlit.h" + #include "qobject/qlit.h" extern const QLitObject example_qmp_schema_qlit; diff --git a/docs/devel/qapi-domain.rst b/docs/devel/qapi-domain.rst new file mode 100644 index 00000000000..1924f12d42c --- /dev/null +++ b/docs/devel/qapi-domain.rst @@ -0,0 +1,748 @@ +====================== +The Sphinx QAPI Domain +====================== + +An extension to the `rST syntax +`_ +in Sphinx is provided by the QAPI Domain, located in +``docs/sphinx/qapi_domain.py``. This extension is analogous to the +`Python Domain +`_ +included with Sphinx, but provides special directives and roles +for annotating and documenting QAPI definitions +specifically. + +A `Domain +`_ +provides a set of special rST directives and cross-referencing roles to +Sphinx for understanding rST markup written to document a specific +language. By itself, this QAPI extension is only sufficient to parse rST +markup written by hand; the `autodoc +`_ +functionality is provided elsewhere, in ``docs/sphinx/qapidoc.py``, by +the "Transmogrifier". + +It is not expected that any developer nor documentation writer would +never need to write *nor* read these special rST forms. However, in the +event that something needs to be debugged, knowing the syntax of the +domain is quite handy. This reference may also be useful as a guide for +understanding the QAPI Domain extension code itself. Although most of +these forms will not be needed for documentation writing purposes, +understanding the cross-referencing syntax *will* be helpful when +writing rST documentation elsewhere, or for enriching the body of +QAPIDoc blocks themselves. + + +Concepts +======== + +The QAPI Domain itself provides no mechanisms for reading the QAPI +Schema or generating documentation from code that exists. It is merely +the rST syntax used to describe things. For instance, the Sphinx Python +domain adds syntax like ``:py:func:`` for describing Python functions in +documentation, but it's the autodoc module that is responsible for +reading Python code and generating such syntax. QAPI is analogous here: +qapidoc.py is responsible for reading the QAPI Schema and generating rST +syntax, and qapi_domain.py is responsible for translating that special +syntax and providing APIs for Sphinx internals. + +In other words: + +qapi_domain.py adds syntax like ``.. qapi:command::`` to Sphinx, and +qapidoc.py transforms the documentation in ``qapi/*.json`` into rST +using directives defined by the domain. + +Or even shorter: + +``:py:`` is to ``:qapi:`` as *autodoc* is to *qapidoc*. + + +Info Field Lists +================ + +`Field lists +`_ +are a standard syntax in reStructuredText. Sphinx `extends that syntax +`_ +to give certain field list entries special meaning and parsing to, for +example, add cross-references. The QAPI Domain takes advantage of this +field list extension to document things like Arguments, Members, Values, +and so on. + +The special parsing and handling of info field lists in Sphinx is provided by +three main classes; Field, GroupedField, and TypedField. The behavior +and formatting for each configured field list entry in the domain +changes depending on which class is used. + +Field: + * Creates an ungrouped field: i.e., each entry will create its own + section and they will not be combined. + * May *optionally* support an argument. + * May apply cross-reference roles to *either* the argument *or* the + content body, both, or neither. + +This is used primarily for entries which are not expected to be +repeated, i.e., items that may only show up at most once. The QAPI +domain uses this class for "Errors" section. + +GroupedField: + * Creates a grouped field: i.e. multiple adjacent entries will be + merged into one section, and the content will form a bulleted list. + * *Must* take an argument. + * May optionally apply a cross-reference role to the argument, but not + the body. + * Can be configured to remove the bulleted list if there is only a + single entry. + * All items will be generated with the form: "argument -- body" + +This is used for entries which are expected to be repeated, but aren't +expected to have two arguments, i.e. types without names, or names +without types. The QAPI domain uses this class for features, returns, +and enum values. + +TypedField: + * Creates a grouped, typed field. Multiple adjacent entries will be + merged into one section, and the content will form a bulleted list. + * *Must* take at least one argument, but supports up to two - + nominally, a name and a type. + * May optionally apply a cross-reference role to the type or the name + argument, but not the body. + * Can be configured to remove the bulleted list if there is only a + single entry. + * All items will be generated with the form "name (type) -- body" + +This is used for entries that are expected to be repeated and will have +a name, a type, and a description. The QAPI domain uses this class for +arguments, alternatives, and members. Wherever type names are referenced +below, They must be a valid, documented type that will be +cross-referenced in the HTML output; or one of the built-in JSON types +(string, number, int, boolean, null, value, q_empty). + + +``:feat:`` +---------- + +Document a feature attached to a QAPI definition. + +:availability: This field list is available in the body of Command, + Event, Enum, Object and Alternate directives. +:syntax: ``:feat name: Lorem ipsum, dolor sit amet...`` +:type: `sphinx.util.docfields.GroupedField + `_ + +Example:: + + .. qapi:object:: BlockdevOptionsVirtioBlkVhostVdpa + :since: 7.2 + :ifcond: CONFIG_BLKIO + + Driver specific block device options for the virtio-blk-vhost-vdpa + backend. + + :memb string path: path to the vhost-vdpa character device. + :feat fdset: Member ``path`` supports the special "/dev/fdset/N" path + (since 8.1) + + +``:arg:`` +--------- + +Document an argument to a QAPI command. + +:availability: This field list is only available in the body of the + Command directive. +:syntax: ``:arg type name: description`` +:type: `sphinx.util.docfields.TypedField + `_ + + +Example:: + + .. qapi:command:: job-pause + :since: 3.0 + + Pause an active job. + + This command returns immediately after marking the active job for + pausing. Pausing an already paused job is an error. + + The job will pause as soon as possible, which means transitioning + into the PAUSED state if it was RUNNING, or into STANDBY if it was + READY. The corresponding JOB_STATUS_CHANGE event will be emitted. + + Cancelling a paused job automatically resumes it. + + :arg string id: The job identifier. + + +``:error:`` +----------- + +Document the error condition(s) of a QAPI command. + +:availability: This field list is only available in the body of the + Command directive. +:syntax: ``:error: Lorem ipsum dolor sit amet ...`` +:type: `sphinx.util.docfields.Field + `_ + +The format of the :errors: field list description is free-form rST. The +alternative spelling ":errors:" is also permitted, but strictly +analogous. + +Example:: + + .. qapi:command:: block-job-set-speed + :since: 1.1 + + Set maximum speed for a background block operation. + + This command can only be issued when there is an active block job. + + Throttling can be disabled by setting the speed to 0. + + :arg string device: The job identifier. This used to be a device + name (hence the name of the parameter), but since QEMU 2.7 it + can have other values. + :arg int speed: the maximum speed, in bytes per second, or 0 for + unlimited. Defaults to 0. + :error: + - If no background operation is active on this device, + DeviceNotActive + + +``:return:`` +------------- + +Document the return type(s) and value(s) of a QAPI command. + +:availability: This field list is only available in the body of the + Command directive. +:syntax: ``:return type: Lorem ipsum dolor sit amet ...`` +:type: `sphinx.util.docfields.GroupedField + `_ + + +Example:: + + .. qapi:command:: query-replay + :since: 5.2 + + Retrieve the record/replay information. It includes current + instruction count which may be used for ``replay-break`` and + ``replay-seek`` commands. + + :return ReplayInfo: record/replay information. + + .. qmp-example:: + + -> { "execute": "query-replay" } + <- { "return": { + "mode": "play", "filename": "log.rr", "icount": 220414 } + } + + +``:return-nodesc:`` +------------------- + +Document the return type of a QAPI command, without an accompanying +description. + +:availability: This field list is only available in the body of the + Command directive. +:syntax: ``:return-nodesc: type`` +:type: `sphinx.util.docfields.Field + `_ + + +Example:: + + .. qapi:command:: query-replay + :since: 5.2 + + Retrieve the record/replay information. It includes current + instruction count which may be used for ``replay-break`` and + ``replay-seek`` commands. + + :return-nodesc: ReplayInfo + + .. qmp-example:: + + -> { "execute": "query-replay" } + <- { "return": { + "mode": "play", "filename": "log.rr", "icount": 220414 } + } + +``:value:`` +----------- + +Document a possible value for a QAPI enum. + +:availability: This field list is only available in the body of the Enum + directive. +:syntax: ``:value name: Lorem ipsum, dolor sit amet ...`` +:type: `sphinx.util.docfields.GroupedField + `_ + +Example:: + + .. qapi:enum:: QapiErrorClass + :since: 1.2 + + QEMU error classes + + :value GenericError: this is used for errors that don't require a specific + error class. This should be the default case for most errors + :value CommandNotFound: the requested command has not been found + :value DeviceNotActive: a device has failed to be become active + :value DeviceNotFound: the requested device has not been found + :value KVMMissingCap: the requested operation can't be fulfilled because a + required KVM capability is missing + + +``:alt:`` +------------ + +Document a possible branch for a QAPI alternate. + +:availability: This field list is only available in the body of the + Alternate directive. +:syntax: ``:alt type name: Lorem ipsum, dolor sit amet ...`` +:type: `sphinx.util.docfields.TypedField + `_ + +As a limitation of Sphinx, we must document the "name" of the branch in +addition to the type, even though this information is not visible on the +wire in the QMP protocol format. This limitation *may* be lifted at a +future date. + +Example:: + + .. qapi:alternate:: StrOrNull + :since: 2.10 + + This is a string value or the explicit lack of a string (null + pointer in C). Intended for cases when 'optional absent' already + has a different meaning. + + :alt string s: the string value + :alt null n: no string value + + +``:memb:`` +---------- + +Document a member of an Event or Object. + +:availability: This field list is available in the body of Event or + Object directives. +:syntax: ``:memb type name: Lorem ipsum, dolor sit amet ...`` +:type: `sphinx.util.docfields.TypedField + `_ + +This is fundamentally the same as ``:arg:`` and ``:alt:``, but uses the +"Members" phrasing for Events and Objects (Structs and Unions). + +Example:: + + .. qapi:event:: JOB_STATUS_CHANGE + :since: 3.0 + + Emitted when a job transitions to a different status. + + :memb string id: The job identifier + :memb JobStatus status: The new job status + + +Arbitrary field lists +--------------------- + +Other field list names, while valid rST syntax, are prohibited inside of +QAPI directives to help prevent accidental misspellings of info field +list names. If you want to add a new arbitrary "non-value-added" field +list to QAPI documentation, you must add the field name to the allow +list in ``docs/conf.py`` + +For example:: + + qapi_allowed_fields = { + "see also", + } + +Will allow you to add arbitrary field lists in QAPI directives:: + + .. qapi:command:: x-fake-command + + :see also: Lorem ipsum, dolor sit amet ... + +.. _QAPI-domain-cross-references: + +Cross-references +================ + +Cross-reference `roles +`_ +in the QAPI domain are modeled closely after the `Python +cross-referencing syntax +`_. + +QAPI definitions can be referenced using the standard `any +`_ +role cross-reference syntax, such as with ```query-blockstats```. In +the event that disambiguation is needed, cross-references can also be +written using a number of explicit cross-reference roles: + +* ``:qapi:mod:`block-core``` -- Reference a QAPI module. The link will + take you to the beginning of that section in the documentation. +* ``:qapi:cmd:`query-block``` -- Reference a QAPI command. +* ``:qapi:event:`JOB_STATUS_CHANGE``` -- Reference a QAPI event. +* ``:qapi:enum:`QapiErrorClass``` -- Reference a QAPI enum. +* ``:qapi:obj:`BlockdevOptionsVirtioBlkVhostVdpa`` -- Reference a QAPI + object (struct or union) +* ``:qapi:alt:`StrOrNull``` -- Reference a QAPI alternate. +* ``:qapi:type:`BlockDirtyInfo``` -- Reference *any* QAPI type; this + excludes modules, commands, and events. +* ``:qapi:any:`block-job-set-speed``` -- Reference absolutely any QAPI entity. + +Type arguments in info field lists are converted into references as if +you had used the ``:qapi:type:`` role. All of the special syntax below +applies to both info field lists and standalone explicit +cross-references. + + +Type decorations +---------------- + +Type names in references can be surrounded by brackets, like +``[typename]``, to indicate an array of that type. The cross-reference +will apply only to the type name between the brackets. For example; +``:qapi:type:`[Qcow2BitmapInfoFlags]``` renders to: +:qapi:type:`[QMP:Qcow2BitmapInfoFlags]` + +To indicate an optional argument/member in a field list, the type name +can be suffixed with ``?``. The cross-reference will be transformed to +"type, Optional" with the link applying only to the type name. For +example; ``:qapi:type:`BitmapSyncMode?``` renders to: +:qapi:type:`QMP:BitmapSyncMode?` + + +Namespaces +---------- + +Mimicking the `Python domain target specification syntax +`_, +QAPI allows you to specify the fully qualified path for a data +type. + +* A namespace can be explicitly provided; + e.g. ``:qapi:type:`QMP:BitmapSyncMode`` +* A module can be explicitly provided; + ``:qapi:type:`QMP:block-core.BitmapSyncMode``` will render to: + :qapi:type:`QMP:block-core.BitmapSyncMode` +* If you don't want to display the "fully qualified" name, it can be + prefixed with a tilde; ``:qapi:type:`~QMP:block-core.BitmapSyncMode``` + will render to: :qapi:type:`~QMP:block-core.BitmapSyncMode` + + +Target resolution +----------------- + +Any cross-reference to a QAPI type, whether using the ```any``` style of +reference or the more explicit ```:qapi:any:`target``` syntax, allows +for the presence or absence of either the namespace or module +information. + +When absent, their value will be inferred from context by the presence +of any ``qapi:namespace`` or ``qapi:module`` directives preceding the +cross-reference. + +If no results are found when using the inferred values, other +namespaces/modules will be searched as a last resort; but any explicitly +provided values must always match in order to succeed. + +This allows for efficient cross-referencing with a minimum of syntax in +the large majority of cases, but additional context or namespace markup +may be required outside of the QAPI reference documents when linking to +items that share a name across multiple documented QAPI schema. + + +Custom link text +---------------- + +The name of a cross-reference link can be explicitly overridden like +`most stock Sphinx references +`_ +using the ``custom text `` syntax. + +For example, ``:qapi:cmd:`Merge dirty bitmaps +``` will render as: :qapi:cmd:`Merge dirty +bitmaps ` + + +Directives +========== + +The QAPI domain adds a number of custom directives for documenting +various QAPI/QMP entities. The syntax is plain rST, and follows this +general format:: + + .. qapi:directive:: argument + :option: + :another-option: with an argument + + Content body, arbitrary rST is allowed here. + + +Sphinx standard options +----------------------- + +All QAPI directives inherit a number of `standard options +`_ +from Sphinx's ObjectDescription class. + +The dashed spellings of the below options were added in Sphinx 7.2, the +undashed spellings are currently retained as aliases, but will be +removed in a future version. + +* ``:no-index:`` and ``:noindex:`` -- Do not add this item into the + Index, and do not make it available for cross-referencing. +* ``no-index-entry:`` and ``:noindexentry:`` -- Do not add this item + into the Index, but allow it to be cross-referenced. +* ``no-contents-entry`` and ``:nocontentsentry:`` -- Exclude this item + from the Table of Contents. +* ``no-typesetting`` -- Create TOC, Index and cross-referencing + entities, but don't actually display the content. + + +QAPI standard options +--------------------- + +All QAPI directives -- *except* for namespace and module -- support +these common options. + +* ``:namespace: name`` -- This option allows you to override the + namespace association of a given definition. +* ``:module: modname`` -- Borrowed from the Python domain, this option allows + you to override the module association of a given definition. +* ``:since: x.y`` -- Allows the documenting of "Since" information, which is + displayed in the signature bar. +* ``:ifcond: CONDITION`` -- Allows the documenting of conditional availability + information, which is displayed in an eyecatch just below the + signature bar. +* ``:deprecated:`` -- Adds an eyecatch just below the signature bar that + advertises that this definition is deprecated and should be avoided. +* ``:unstable:`` -- Adds an eyecatch just below the signature bar that + advertises that this definition is unstable and should not be used in + production code. + + +qapi:namespace +-------------- + +The ``qapi:namespace`` directive marks the start of a QAPI namespace. It +does not take a content body, nor any options. All subsequent QAPI +directives are associated with the most recent namespace. This affects +the definition's "fully qualified name", allowing two different +namespaces to create an otherwise identically named definition. + +This directive also influences how reference resolution works for any +references that do not explicitly specify a namespace, so this directive +can be used to nudge references into preferring targets from within that +namespace. + +Example:: + + .. qapi:namespace:: QMP + + +This directive has no visible effect. + + +qapi:module +----------- + +The ``qapi:module`` directive marks the start of a QAPI module. It may have +a content body, but it can be omitted. All subsequent QAPI directives +are associated with the most recent module; this effects their "fully +qualified" name, but has no other effect. + +Example:: + + .. qapi:module:: block-core + + Welcome to the block-core module! + +Will be rendered as: + +.. qapi:module:: block-core + :noindex: + + Welcome to the block-core module! + + +qapi:command +------------ + +This directive documents a QMP command. It may use any of the standard +Sphinx or QAPI options, and the documentation body may contain +``:arg:``, ``:feat:``, ``:error:``, or ``:return:`` info field list +entries. + +Example:: + + .. qapi:command:: x-fake-command + :since: 42.0 + :unstable: + + This command is fake, so it can't hurt you! + + :arg int foo: Your favorite number. + :arg string? bar: Your favorite season. + :return [string]: A lovely computer-written poem for you. + + +Will be rendered as: + + .. qapi:command:: x-fake-command + :noindex: + :since: 42.0 + :unstable: + + This command is fake, so it can't hurt you! + + :arg int foo: Your favorite number. + :arg string? bar: Your favorite season. + :return [string]: A lovely computer-written poem for you. + + +qapi:event +---------- + +This directive documents a QMP event. It may use any of the standard +Sphinx or QAPI options, and the documentation body may contain +``:memb:`` or ``:feat:`` info field list entries. + +Example:: + + .. qapi:event:: COMPUTER_IS_RUINED + :since: 0.1 + :deprecated: + + This event is emitted when your computer is *extremely* ruined. + + :memb string reason: Diagnostics as to what caused your computer to + be ruined. + :feat sadness: When present, the diagnostic message will also + explain how sad the computer is as a result of your wrongdoings. + +Will be rendered as: + +.. qapi:event:: COMPUTER_IS_RUINED + :noindex: + :since: 0.1 + :deprecated: + + This event is emitted when your computer is *extremely* ruined. + + :memb string reason: Diagnostics as to what caused your computer to + be ruined. + :feat sadness: When present, the diagnostic message will also explain + how sad the computer is as a result of your wrongdoings. + + +qapi:enum +--------- + +This directive documents a QAPI enum. It may use any of the standard +Sphinx or QAPI options, and the documentation body may contain +``:value:`` or ``:feat:`` info field list entries. + +Example:: + + .. qapi:enum:: Mood + :ifcond: LIB_PERSONALITY + + This enum represents your virtual machine's current mood! + + :value Happy: Your VM is content and well-fed. + :value Hungry: Your VM needs food. + :value Melancholic: Your VM is experiencing existential angst. + :value Petulant: Your VM is throwing a temper tantrum. + +Will be rendered as: + +.. qapi:enum:: Mood + :noindex: + :ifcond: LIB_PERSONALITY + + This enum represents your virtual machine's current mood! + + :value Happy: Your VM is content and well-fed. + :value Hungry: Your VM needs food. + :value Melancholic: Your VM is experiencing existential angst. + :value Petulant: Your VM is throwing a temper tantrum. + + +qapi:object +----------- + +This directive documents a QAPI structure or union and represents a QMP +object. It may use any of the standard Sphinx or QAPI options, and the +documentation body may contain ``:memb:`` or ``:feat:`` info field list +entries. + +Example:: + + .. qapi:object:: BigBlobOfStuff + + This object has a bunch of disparate and unrelated things in it. + + :memb int Birthday: Your birthday, represented in seconds since the + UNIX epoch. + :memb [string] Fav-Foods: A list of your favorite foods. + :memb boolean? Bizarre-Docs: True if the documentation reference + should be strange. + +Will be rendered as: + +.. qapi:object:: BigBlobOfStuff + :noindex: + + This object has a bunch of disparate and unrelated things in it. + + :memb int Birthday: Your birthday, represented in seconds since the + UNIX epoch. + :memb [string] Fav-Foods: A list of your favorite foods. + :memb boolean? Bizarre-Docs: True if the documentation reference + should be strange. + + +qapi:alternate +-------------- + +This directive documents a QAPI alternate. It may use any of the +standard Sphinx or QAPI options, and the documentation body may contain +``:alt:`` or ``:feat:`` info field list entries. + +Example:: + + .. qapi:alternate:: ErrorCode + + This alternate represents an Error Code from the VM. + + :alt int ec: An error code, like the type you're used to. + :alt string em: An expletive-laced error message, if your + computer is feeling particularly cranky and tired of your + antics. + +Will be rendered as: + +.. qapi:alternate:: ErrorCode + :noindex: + + This alternate represents an Error Code from the VM. + + :alt int ec: An error code, like the type you're used to. + :alt string em: An expletive-laced error message, if your + computer is feeling particularly cranky and tired of your + antics. diff --git a/docs/devel/qom.rst b/docs/devel/qom.rst index 0889ca949c1..5870745ba27 100644 --- a/docs/devel/qom.rst +++ b/docs/devel/qom.rst @@ -147,7 +147,7 @@ to introduce an overridden virtual function: #include "qdev.h" - void my_device_class_init(ObjectClass *klass, void *class_data) + void my_device_class_init(ObjectClass *klass, const void *class_data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = my_device_reset; @@ -249,7 +249,7 @@ class, which someone might choose to change at some point. // do something } - static void my_class_init(ObjectClass *oc, void *data) + static void my_class_init(ObjectClass *oc, const void *data) { MyClass *mc = MY_CLASS(oc); @@ -279,7 +279,7 @@ class, which someone might choose to change at some point. // do something else here } - static void derived_class_init(ObjectClass *oc, void *data) + static void derived_class_init(ObjectClass *oc, const void *data) { MyClass *mc = MY_CLASS(oc); DerivedClass *dc = DERIVED_CLASS(oc); @@ -363,7 +363,7 @@ This is equivalent to the following: :caption: Expansion from defining a simple type static void my_device_finalize(Object *obj); - static void my_device_class_init(ObjectClass *oc, void *data); + static void my_device_class_init(ObjectClass *oc, const void *data); static void my_device_init(Object *obj); static const TypeInfo my_device_info = { diff --git a/docs/devel/rcu.rst b/docs/devel/rcu.rst new file mode 100644 index 00000000000..dd07c1d9195 --- /dev/null +++ b/docs/devel/rcu.rst @@ -0,0 +1,394 @@ +Using RCU (Read-Copy-Update) for synchronization +================================================ + +Read-copy update (RCU) is a synchronization mechanism that is used to +protect read-mostly data structures. RCU is very efficient and scalable +on the read side (it is wait-free), and thus can make the read paths +extremely fast. + +RCU supports concurrency between a single writer and multiple readers, +thus it is not used alone. Typically, the write-side will use a lock to +serialize multiple updates, but other approaches are possible (e.g., +restricting updates to a single task). In QEMU, when a lock is used, +this will often be the "iothread mutex", also known as the "big QEMU +lock" (BQL). Also, restricting updates to a single task is done in +QEMU using the "bottom half" API. + +RCU is fundamentally a "wait-to-finish" mechanism. The read side marks +sections of code with "critical sections", and the update side will wait +for the execution of all *currently running* critical sections before +proceeding, or before asynchronously executing a callback. + +The key point here is that only the currently running critical sections +are waited for; critical sections that are started **after** the beginning +of the wait do not extend the wait, despite running concurrently with +the updater. This is the reason why RCU is more scalable than, +for example, reader-writer locks. It is so much more scalable that +the system will have a single instance of the RCU mechanism; a single +mechanism can be used for an arbitrary number of "things", without +having to worry about things such as contention or deadlocks. + +How is this possible? The basic idea is to split updates in two phases, +"removal" and "reclamation". During removal, we ensure that subsequent +readers will not be able to get a reference to the old data. After +removal has completed, a critical section will not be able to access +the old data. Therefore, critical sections that begin after removal +do not matter; as soon as all previous critical sections have finished, +there cannot be any readers who hold references to the data structure, +and these can now be safely reclaimed (e.g., freed or unref'ed). + +Here is a picture:: + + thread 1 thread 2 thread 3 + ------------------- ------------------------ ------------------- + enter RCU crit.sec. + | finish removal phase + | begin wait + | | enter RCU crit.sec. + exit RCU crit.sec | | + complete wait | + begin reclamation phase | + exit RCU crit.sec. + + +Note how thread 3 is still executing its critical section when thread 2 +starts reclaiming data. This is possible, because the old version of the +data structure was not accessible at the time thread 3 began executing +that critical section. + + +RCU API +------- + +The core RCU API is small: + +``void rcu_read_lock(void);`` + Used by a reader to inform the reclaimer that the reader is + entering an RCU read-side critical section. + +``void rcu_read_unlock(void);`` + Used by a reader to inform the reclaimer that the reader is + exiting an RCU read-side critical section. Note that RCU + read-side critical sections may be nested and/or overlapping. + +``void synchronize_rcu(void);`` + Blocks until all pre-existing RCU read-side critical sections + on all threads have completed. This marks the end of the removal + phase and the beginning of reclamation phase. + + Note that it would be valid for another update to come while + ``synchronize_rcu`` is running. Because of this, it is better that + the updater releases any locks it may hold before calling + ``synchronize_rcu``. If this is not possible (for example, because + the updater is protected by the BQL), you can use ``call_rcu``. + +``void call_rcu1(struct rcu_head * head, void (*func)(struct rcu_head *head));`` + This function invokes ``func(head)`` after all pre-existing RCU + read-side critical sections on all threads have completed. This + marks the end of the removal phase, with func taking care + asynchronously of the reclamation phase. + + The ``foo`` struct needs to have an ``rcu_head`` structure added, + perhaps as follows:: + + struct foo { + struct rcu_head rcu; + int a; + char b; + long c; + }; + + so that the reclaimer function can fetch the ``struct foo`` address + and free it:: + + call_rcu1(&foo.rcu, foo_reclaim); + + void foo_reclaim(struct rcu_head *rp) + { + struct foo *fp = container_of(rp, struct foo, rcu); + g_free(fp); + } + + ``call_rcu1`` is typically used via either the ``call_rcu`` or + ``g_free_rcu`` macros, which handle the common case where the + ``rcu_head`` member is the first of the struct. + +``void call_rcu(T *p, void (*func)(T *p), field-name);`` + If the ``struct rcu_head`` is the first field in the struct, you can + use this macro instead of ``call_rcu1``. + +``void g_free_rcu(T *p, field-name);`` + This is a special-case version of ``call_rcu`` where the callback + function is ``g_free``. + In the example given in ``call_rcu1``, one could have written simply:: + + g_free_rcu(&foo, rcu); + +``typeof(*p) qatomic_rcu_read(p);`` + ``qatomic_rcu_read()`` is similar to ``qatomic_load_acquire()``, but + it makes some assumptions on the code that calls it. This allows a + more optimized implementation. + + ``qatomic_rcu_read`` assumes that whenever a single RCU critical + section reads multiple shared data, these reads are either + data-dependent or need no ordering. This is almost always the + case when using RCU, because read-side critical sections typically + navigate one or more pointers (the pointers that are changed on + every update) until reaching a data structure of interest, + and then read from there. + + RCU read-side critical sections must use ``qatomic_rcu_read()`` to + read data, unless concurrent writes are prevented by another + synchronization mechanism. + + Furthermore, RCU read-side critical sections should traverse the + data structure in a single direction, opposite to the direction + in which the updater initializes it. + +``void qatomic_rcu_set(p, typeof(*p) v);`` + ``qatomic_rcu_set()`` is similar to ``qatomic_store_release()``, + though it also makes assumptions on the code that calls it in + order to allow a more optimized implementation. + + In particular, ``qatomic_rcu_set()`` suffices for synchronization + with readers, if the updater never mutates a field within a + data item that is already accessible to readers. This is the + case when initializing a new copy of the RCU-protected data + structure; just ensure that initialization of ``*p`` is carried out + before ``qatomic_rcu_set()`` makes the data item visible to readers. + If this rule is observed, writes will happen in the opposite + order as reads in the RCU read-side critical sections (or if + there is just one update), and there will be no need for other + synchronization mechanism to coordinate the accesses. + +The following APIs must be used before RCU is used in a thread: + +``void rcu_register_thread(void);`` + Mark a thread as taking part in the RCU mechanism. Such a thread + will have to report quiescent points regularly, either manually + or through the ``QemuCond``/``QemuSemaphore``/``QemuEvent`` APIs. + +``void rcu_unregister_thread(void);`` + Mark a thread as not taking part anymore in the RCU mechanism. + It is not a problem if such a thread reports quiescent points, + either manually or by using the + ``QemuCond``/``QemuSemaphore``/``QemuEvent`` APIs. + +Note that these APIs are relatively heavyweight, and should **not** be +nested. + +Convenience macros +------------------ + +Two macros are provided that automatically release the read lock at the +end of the scope. + +``RCU_READ_LOCK_GUARD()`` + Takes the lock and will release it at the end of the block it's + used in. + +``WITH_RCU_READ_LOCK_GUARD() { code }`` + Is used at the head of a block to protect the code within the block. + +Note that a ``goto`` out of the guarded block will also drop the lock. + +Differences with Linux +---------------------- + +- Waiting on a mutex is possible, though discouraged, within an RCU critical + section. This is because spinlocks are rarely (if ever) used in userspace + programming; not allowing this would prevent upgrading an RCU read-side + critical section to become an updater. + +- ``qatomic_rcu_read`` and ``qatomic_rcu_set`` replace ``rcu_dereference`` and + ``rcu_assign_pointer``. They take a **pointer** to the variable being accessed. + +- ``call_rcu`` is a macro that has an extra argument (the name of the first + field in the struct, which must be a struct ``rcu_head``), and expects the + type of the callback's argument to be the type of the first argument. + ``call_rcu1`` is the same as Linux's ``call_rcu``. + + +RCU Patterns +------------ + +Many patterns using read-writer locks translate directly to RCU, with +the advantages of higher scalability and deadlock immunity. + +In general, RCU can be used whenever it is possible to create a new +"version" of a data structure every time the updater runs. This may +sound like a very strict restriction, however: + +- the updater does not mean "everything that writes to a data structure", + but rather "everything that involves a reclamation step". See the + array example below + +- in some cases, creating a new version of a data structure may actually + be very cheap. For example, modifying the "next" pointer of a singly + linked list is effectively creating a new version of the list. + +Here are some frequently-used RCU idioms that are worth noting. + + +RCU list processing +^^^^^^^^^^^^^^^^^^^ + +TBD (not yet used in QEMU) + + +RCU reference counting +^^^^^^^^^^^^^^^^^^^^^^ + +Because grace periods are not allowed to complete while there is an RCU +read-side critical section in progress, the RCU read-side primitives +may be used as a restricted reference-counting mechanism. For example, +consider the following code fragment:: + + rcu_read_lock(); + p = qatomic_rcu_read(&foo); + /* do something with p. */ + rcu_read_unlock(); + +The RCU read-side critical section ensures that the value of ``p`` remains +valid until after the ``rcu_read_unlock()``. In some sense, it is acquiring +a reference to ``p`` that is later released when the critical section ends. +The write side looks simply like this (with appropriate locking):: + + qemu_mutex_lock(&foo_mutex); + old = foo; + qatomic_rcu_set(&foo, new); + qemu_mutex_unlock(&foo_mutex); + synchronize_rcu(); + free(old); + +If the processing cannot be done purely within the critical section, it +is possible to combine this idiom with a "real" reference count:: + + rcu_read_lock(); + p = qatomic_rcu_read(&foo); + foo_ref(p); + rcu_read_unlock(); + /* do something with p. */ + foo_unref(p); + +The write side can be like this:: + + qemu_mutex_lock(&foo_mutex); + old = foo; + qatomic_rcu_set(&foo, new); + qemu_mutex_unlock(&foo_mutex); + synchronize_rcu(); + foo_unref(old); + +or with ``call_rcu``:: + + qemu_mutex_lock(&foo_mutex); + old = foo; + qatomic_rcu_set(&foo, new); + qemu_mutex_unlock(&foo_mutex); + call_rcu(foo_unref, old, rcu); + +In both cases, the write side only performs removal. Reclamation +happens when the last reference to a ``foo`` object is dropped. +Using ``synchronize_rcu()`` is undesirably expensive, because the +last reference may be dropped on the read side. Hence you can +use ``call_rcu()`` instead:: + + foo_unref(struct foo *p) { + if (qatomic_fetch_dec(&p->refcount) == 1) { + call_rcu(foo_destroy, p, rcu); + } + } + + +Note that the same idioms would be possible with reader/writer +locks:: + + read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock); + p = foo; p = foo; + /* do something with p. */ foo = new; + read_unlock(&foo_rwlock); free(p); + write_mutex_unlock(&foo_rwlock); + free(p); + + ------------------------------------------------------------------ + + read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock); + p = foo; old = foo; + foo_ref(p); foo = new; + read_unlock(&foo_rwlock); foo_unref(old); + /* do something with p. */ write_mutex_unlock(&foo_rwlock); + read_lock(&foo_rwlock); + foo_unref(p); + read_unlock(&foo_rwlock); + +``foo_unref`` could use a mechanism such as bottom halves to move deallocation +out of the write-side critical section. + + +RCU resizable arrays +^^^^^^^^^^^^^^^^^^^^ + +Resizable arrays can be used with RCU. The expensive RCU synchronization +(or ``call_rcu``) only needs to take place when the array is resized. +The two items to take care of are: + +- ensuring that the old version of the array is available between removal + and reclamation; + +- avoiding mismatches in the read side between the array data and the + array size. + +The first problem is avoided simply by not using ``realloc``. Instead, +each resize will allocate a new array and copy the old data into it. +The second problem would arise if the size and the data pointers were +two members of a larger struct:: + + struct mystuff { + ... + int data_size; + int data_alloc; + T *data; + ... + }; + +Instead, we store the size of the array with the array itself:: + + struct arr { + int size; + int alloc; + T data[]; + }; + struct arr *global_array; + + read side: + rcu_read_lock(); + struct arr *array = qatomic_rcu_read(&global_array); + x = i < array->size ? array->data[i] : -1; + rcu_read_unlock(); + return x; + + write side (running under a lock): + if (global_array->size == global_array->alloc) { + /* Creating a new version. */ + new_array = g_malloc(sizeof(struct arr) + + global_array->alloc * 2 * sizeof(T)); + new_array->size = global_array->size; + new_array->alloc = global_array->alloc * 2; + memcpy(new_array->data, global_array->data, + global_array->alloc * sizeof(T)); + + /* Removal phase. */ + old_array = global_array; + qatomic_rcu_set(&global_array, new_array); + synchronize_rcu(); + + /* Reclamation phase. */ + free(old_array); + } + + +References +---------- + +* The `Linux kernel RCU documentation `__ diff --git a/docs/devel/rcu.txt b/docs/devel/rcu.txt deleted file mode 100644 index 2e6cc607a17..00000000000 --- a/docs/devel/rcu.txt +++ /dev/null @@ -1,406 +0,0 @@ -Using RCU (Read-Copy-Update) for synchronization -================================================ - -Read-copy update (RCU) is a synchronization mechanism that is used to -protect read-mostly data structures. RCU is very efficient and scalable -on the read side (it is wait-free), and thus can make the read paths -extremely fast. - -RCU supports concurrency between a single writer and multiple readers, -thus it is not used alone. Typically, the write-side will use a lock to -serialize multiple updates, but other approaches are possible (e.g., -restricting updates to a single task). In QEMU, when a lock is used, -this will often be the "iothread mutex", also known as the "big QEMU -lock" (BQL). Also, restricting updates to a single task is done in -QEMU using the "bottom half" API. - -RCU is fundamentally a "wait-to-finish" mechanism. The read side marks -sections of code with "critical sections", and the update side will wait -for the execution of all *currently running* critical sections before -proceeding, or before asynchronously executing a callback. - -The key point here is that only the currently running critical sections -are waited for; critical sections that are started _after_ the beginning -of the wait do not extend the wait, despite running concurrently with -the updater. This is the reason why RCU is more scalable than, -for example, reader-writer locks. It is so much more scalable that -the system will have a single instance of the RCU mechanism; a single -mechanism can be used for an arbitrary number of "things", without -having to worry about things such as contention or deadlocks. - -How is this possible? The basic idea is to split updates in two phases, -"removal" and "reclamation". During removal, we ensure that subsequent -readers will not be able to get a reference to the old data. After -removal has completed, a critical section will not be able to access -the old data. Therefore, critical sections that begin after removal -do not matter; as soon as all previous critical sections have finished, -there cannot be any readers who hold references to the data structure, -and these can now be safely reclaimed (e.g., freed or unref'ed). - -Here is a picture: - - thread 1 thread 2 thread 3 - ------------------- ------------------------ ------------------- - enter RCU crit.sec. - | finish removal phase - | begin wait - | | enter RCU crit.sec. - exit RCU crit.sec | | - complete wait | - begin reclamation phase | - exit RCU crit.sec. - - -Note how thread 3 is still executing its critical section when thread 2 -starts reclaiming data. This is possible, because the old version of the -data structure was not accessible at the time thread 3 began executing -that critical section. - - -RCU API -======= - -The core RCU API is small: - - void rcu_read_lock(void); - - Used by a reader to inform the reclaimer that the reader is - entering an RCU read-side critical section. - - void rcu_read_unlock(void); - - Used by a reader to inform the reclaimer that the reader is - exiting an RCU read-side critical section. Note that RCU - read-side critical sections may be nested and/or overlapping. - - void synchronize_rcu(void); - - Blocks until all pre-existing RCU read-side critical sections - on all threads have completed. This marks the end of the removal - phase and the beginning of reclamation phase. - - Note that it would be valid for another update to come while - synchronize_rcu is running. Because of this, it is better that - the updater releases any locks it may hold before calling - synchronize_rcu. If this is not possible (for example, because - the updater is protected by the BQL), you can use call_rcu. - - void call_rcu1(struct rcu_head * head, - void (*func)(struct rcu_head *head)); - - This function invokes func(head) after all pre-existing RCU - read-side critical sections on all threads have completed. This - marks the end of the removal phase, with func taking care - asynchronously of the reclamation phase. - - The foo struct needs to have an rcu_head structure added, - perhaps as follows: - - struct foo { - struct rcu_head rcu; - int a; - char b; - long c; - }; - - so that the reclaimer function can fetch the struct foo address - and free it: - - call_rcu1(&foo.rcu, foo_reclaim); - - void foo_reclaim(struct rcu_head *rp) - { - struct foo *fp = container_of(rp, struct foo, rcu); - g_free(fp); - } - - For the common case where the rcu_head member is the first of the - struct, you can use the following macro. - - void call_rcu(T *p, - void (*func)(T *p), - field-name); - void g_free_rcu(T *p, - field-name); - - call_rcu1 is typically used through these macro, in the common case - where the "struct rcu_head" is the first field in the struct. If - the callback function is g_free, in particular, g_free_rcu can be - used. In the above case, one could have written simply: - - g_free_rcu(&foo, rcu); - - typeof(*p) qatomic_rcu_read(p); - - qatomic_rcu_read() is similar to qatomic_load_acquire(), but it makes - some assumptions on the code that calls it. This allows a more - optimized implementation. - - qatomic_rcu_read assumes that whenever a single RCU critical - section reads multiple shared data, these reads are either - data-dependent or need no ordering. This is almost always the - case when using RCU, because read-side critical sections typically - navigate one or more pointers (the pointers that are changed on - every update) until reaching a data structure of interest, - and then read from there. - - RCU read-side critical sections must use qatomic_rcu_read() to - read data, unless concurrent writes are prevented by another - synchronization mechanism. - - Furthermore, RCU read-side critical sections should traverse the - data structure in a single direction, opposite to the direction - in which the updater initializes it. - - void qatomic_rcu_set(p, typeof(*p) v); - - qatomic_rcu_set() is similar to qatomic_store_release(), though it also - makes assumptions on the code that calls it in order to allow a more - optimized implementation. - - In particular, qatomic_rcu_set() suffices for synchronization - with readers, if the updater never mutates a field within a - data item that is already accessible to readers. This is the - case when initializing a new copy of the RCU-protected data - structure; just ensure that initialization of *p is carried out - before qatomic_rcu_set() makes the data item visible to readers. - If this rule is observed, writes will happen in the opposite - order as reads in the RCU read-side critical sections (or if - there is just one update), and there will be no need for other - synchronization mechanism to coordinate the accesses. - -The following APIs must be used before RCU is used in a thread: - - void rcu_register_thread(void); - - Mark a thread as taking part in the RCU mechanism. Such a thread - will have to report quiescent points regularly, either manually - or through the QemuCond/QemuSemaphore/QemuEvent APIs. - - void rcu_unregister_thread(void); - - Mark a thread as not taking part anymore in the RCU mechanism. - It is not a problem if such a thread reports quiescent points, - either manually or by using the QemuCond/QemuSemaphore/QemuEvent - APIs. - -Note that these APIs are relatively heavyweight, and should _not_ be -nested. - -Convenience macros -================== - -Two macros are provided that automatically release the read lock at the -end of the scope. - - RCU_READ_LOCK_GUARD() - - Takes the lock and will release it at the end of the block it's - used in. - - WITH_RCU_READ_LOCK_GUARD() { code } - - Is used at the head of a block to protect the code within the block. - -Note that 'goto'ing out of the guarded block will also drop the lock. - -DIFFERENCES WITH LINUX -====================== - -- Waiting on a mutex is possible, though discouraged, within an RCU critical - section. This is because spinlocks are rarely (if ever) used in userspace - programming; not allowing this would prevent upgrading an RCU read-side - critical section to become an updater. - -- qatomic_rcu_read and qatomic_rcu_set replace rcu_dereference and - rcu_assign_pointer. They take a _pointer_ to the variable being accessed. - -- call_rcu is a macro that has an extra argument (the name of the first - field in the struct, which must be a struct rcu_head), and expects the - type of the callback's argument to be the type of the first argument. - call_rcu1 is the same as Linux's call_rcu. - - -RCU PATTERNS -============ - -Many patterns using read-writer locks translate directly to RCU, with -the advantages of higher scalability and deadlock immunity. - -In general, RCU can be used whenever it is possible to create a new -"version" of a data structure every time the updater runs. This may -sound like a very strict restriction, however: - -- the updater does not mean "everything that writes to a data structure", - but rather "everything that involves a reclamation step". See the - array example below - -- in some cases, creating a new version of a data structure may actually - be very cheap. For example, modifying the "next" pointer of a singly - linked list is effectively creating a new version of the list. - -Here are some frequently-used RCU idioms that are worth noting. - - -RCU list processing -------------------- - -TBD (not yet used in QEMU) - - -RCU reference counting ----------------------- - -Because grace periods are not allowed to complete while there is an RCU -read-side critical section in progress, the RCU read-side primitives -may be used as a restricted reference-counting mechanism. For example, -consider the following code fragment: - - rcu_read_lock(); - p = qatomic_rcu_read(&foo); - /* do something with p. */ - rcu_read_unlock(); - -The RCU read-side critical section ensures that the value of "p" remains -valid until after the rcu_read_unlock(). In some sense, it is acquiring -a reference to p that is later released when the critical section ends. -The write side looks simply like this (with appropriate locking): - - qemu_mutex_lock(&foo_mutex); - old = foo; - qatomic_rcu_set(&foo, new); - qemu_mutex_unlock(&foo_mutex); - synchronize_rcu(); - free(old); - -If the processing cannot be done purely within the critical section, it -is possible to combine this idiom with a "real" reference count: - - rcu_read_lock(); - p = qatomic_rcu_read(&foo); - foo_ref(p); - rcu_read_unlock(); - /* do something with p. */ - foo_unref(p); - -The write side can be like this: - - qemu_mutex_lock(&foo_mutex); - old = foo; - qatomic_rcu_set(&foo, new); - qemu_mutex_unlock(&foo_mutex); - synchronize_rcu(); - foo_unref(old); - -or with call_rcu: - - qemu_mutex_lock(&foo_mutex); - old = foo; - qatomic_rcu_set(&foo, new); - qemu_mutex_unlock(&foo_mutex); - call_rcu(foo_unref, old, rcu); - -In both cases, the write side only performs removal. Reclamation -happens when the last reference to a "foo" object is dropped. -Using synchronize_rcu() is undesirably expensive, because the -last reference may be dropped on the read side. Hence you can -use call_rcu() instead: - - foo_unref(struct foo *p) { - if (qatomic_fetch_dec(&p->refcount) == 1) { - call_rcu(foo_destroy, p, rcu); - } - } - - -Note that the same idioms would be possible with reader/writer -locks: - - read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock); - p = foo; p = foo; - /* do something with p. */ foo = new; - read_unlock(&foo_rwlock); free(p); - write_mutex_unlock(&foo_rwlock); - free(p); - - ------------------------------------------------------------------ - - read_lock(&foo_rwlock); write_mutex_lock(&foo_rwlock); - p = foo; old = foo; - foo_ref(p); foo = new; - read_unlock(&foo_rwlock); foo_unref(old); - /* do something with p. */ write_mutex_unlock(&foo_rwlock); - read_lock(&foo_rwlock); - foo_unref(p); - read_unlock(&foo_rwlock); - -foo_unref could use a mechanism such as bottom halves to move deallocation -out of the write-side critical section. - - -RCU resizable arrays --------------------- - -Resizable arrays can be used with RCU. The expensive RCU synchronization -(or call_rcu) only needs to take place when the array is resized. -The two items to take care of are: - -- ensuring that the old version of the array is available between removal - and reclamation; - -- avoiding mismatches in the read side between the array data and the - array size. - -The first problem is avoided simply by not using realloc. Instead, -each resize will allocate a new array and copy the old data into it. -The second problem would arise if the size and the data pointers were -two members of a larger struct: - - struct mystuff { - ... - int data_size; - int data_alloc; - T *data; - ... - }; - -Instead, we store the size of the array with the array itself: - - struct arr { - int size; - int alloc; - T data[]; - }; - struct arr *global_array; - - read side: - rcu_read_lock(); - struct arr *array = qatomic_rcu_read(&global_array); - x = i < array->size ? array->data[i] : -1; - rcu_read_unlock(); - return x; - - write side (running under a lock): - if (global_array->size == global_array->alloc) { - /* Creating a new version. */ - new_array = g_malloc(sizeof(struct arr) + - global_array->alloc * 2 * sizeof(T)); - new_array->size = global_array->size; - new_array->alloc = global_array->alloc * 2; - memcpy(new_array->data, global_array->data, - global_array->alloc * sizeof(T)); - - /* Removal phase. */ - old_array = global_array; - qatomic_rcu_set(&global_array, new_array); - synchronize_rcu(); - - /* Reclamation phase. */ - free(old_array); - } - - -SOURCES -======= - -* Documentation/RCU/ from the Linux kernel diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst index effd856f0c6..40f58d9d4fc 100644 --- a/docs/devel/replay.rst +++ b/docs/devel/replay.rst @@ -202,6 +202,9 @@ into the log. Saving/restoring the VM state ----------------------------- +Record/replay relies on VM state save and restore being complete and +deterministic. + All fields in the device state structure (including virtual timers) should be restored by loadvm to the same values they had before savevm. diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst index 9746a4e8a0b..c02fe0a405c 100644 --- a/docs/devel/reset.rst +++ b/docs/devel/reset.rst @@ -44,6 +44,26 @@ The Resettable interface handles reset types with an enum ``ResetType``: value on each cold reset, such as RNG seed information, and which they must not reinitialize on a snapshot-load reset. +``RESET_TYPE_WAKEUP`` + If the machine supports waking up from a suspended state and needs to reset + its devices during wake-up (from the ``MachineClass::wakeup()`` method), this + reset type should be used for such a request. Devices can utilize this reset + type to differentiate the reset requested during machine wake-up from other + reset requests. For example, RAM content must not be lost during wake-up, and + memory devices like virtio-mem that provide additional RAM must not reset + such state during wake-ups, but might do so during cold resets. However, this + reset type should not be used for wake-up detection, as not every machine + type issues a device reset request during wake-up. + +``RESET_TYPE_S390_CPU_NORMAL`` + This is only used for S390 CPU objects; it clears interrupts, stops + processing, and clears the TLB, but does not touch register contents. + +``RESET_TYPE_S390_CPU_INITIAL`` + This is only used for S390 CPU objects; it does everything + ``RESET_TYPE_S390_CPU_NORMAL`` does and also clears the PSW, prefix, + FPC, timer and control registers. It does not touch gprs, fprs or acrs. + Devices which implement reset methods must treat any unknown ``ResetType`` as equivalent to ``RESET_TYPE_COLD``; this will reduce the amount of existing code we need to change if we add more types in future. @@ -123,6 +143,11 @@ The *exit* phase is executed only when the last reset operation ends. Therefore the object does not need to care how many of reset controllers it has and how many of them have started a reset. +DMA capable devices are expected to cancel all outstanding DMA operations +during either 'enter' or 'hold' phases. IOMMUs are expected to reset during +the 'exit' phase and this sequencing makes sure no outstanding DMA request +will fault. + Handling reset in a resettable object ------------------------------------- @@ -191,7 +216,7 @@ in reset. ResettablePhases parent_phases; } MyDevClass; - static void mydev_class_init(ObjectClass *class, void *data) + static void mydev_class_init(ObjectClass *class, const void *data) { MyDevClass *myclass = MYDEV_CLASS(class); ResettableClass *rc = RESETTABLE_CLASS(class); @@ -266,8 +291,8 @@ every reset child of the given resettable object. All children must be resettable too. Additional parameters (a reset type and an opaque pointer) must be passed to the callback too. -In ``DeviceClass`` and ``BusClass`` the ``ResettableState`` is located -``DeviceState`` and ``BusState`` structure. ``child_foreach()`` is implemented +In ``DeviceClass`` and ``BusClass`` the ``ResettableState`` is located in the +``DeviceState`` and ``BusState`` structures. ``child_foreach()`` is implemented to follow the bus hierarchy; for a bus, it calls the function on every child device; for a device, it calls the function on every bus child. When we reset the main system bus, we reset the whole machine bus tree. diff --git a/docs/devel/rust.rst b/docs/devel/rust.rst new file mode 100644 index 00000000000..b6737536c69 --- /dev/null +++ b/docs/devel/rust.rst @@ -0,0 +1,479 @@ +.. |msrv| replace:: 1.63.0 + +Rust in QEMU +============ + +Rust in QEMU is a project to enable using the Rust programming language +to add new functionality to QEMU. + +Right now, the focus is on making it possible to write devices that inherit +from ``SysBusDevice`` in `*safe*`__ Rust. Later, it may become possible +to write other kinds of devices (e.g. PCI devices that can do DMA), +complete boards, or backends (e.g. block device formats). + +__ https://doc.rust-lang.org/nomicon/meet-safe-and-unsafe.html + +Building the Rust in QEMU code +------------------------------ + +The Rust in QEMU code is included in the emulators via Meson. Meson +invokes rustc directly, building static libraries that are then linked +together with the C code. This is completely automatic when you run +``make`` or ``ninja``. + +However, QEMU's build system also tries to be easy to use for people who +are accustomed to the more "normal" Cargo-based development workflow. +In particular: + +* the set of warnings and lints that are used to build QEMU always + comes from the ``rust/Cargo.toml`` workspace file + +* it is also possible to use ``cargo`` for common Rust-specific coding + tasks, in particular to invoke ``clippy``, ``rustfmt`` and ``rustdoc``. + +To this end, QEMU includes a ``build.rs`` build script that picks up +generated sources from QEMU's build directory and puts it in Cargo's +output directory (typically ``rust/target/``). A vanilla invocation +of Cargo will complain that it cannot find the generated sources, +which can be fixed in different ways: + +* by using Makefile targets, provided by Meson, that run ``clippy`` or + ``rustdoc``: + + make clippy + make rustdoc + +A target for ``rustfmt`` is also declared in ``rust/meson.build``: + + make rustfmt + +* by invoking ``cargo`` through the Meson `development environment`__ + feature:: + + pyvenv/bin/meson devenv -w ../rust cargo clippy --tests + pyvenv/bin/meson devenv -w ../rust cargo fmt + + If you are going to use ``cargo`` repeatedly, ``pyvenv/bin/meson devenv`` + will enter a shell where commands like ``cargo fmt`` just work. + +__ https://mesonbuild.com/Commands.html#devenv + +* by pointing the ``MESON_BUILD_ROOT`` to the top of your QEMU build + tree. This third method is useful if you are using ``rust-analyzer``; + you can set the environment variable through the + ``rust-analyzer.cargo.extraEnv`` setting. + +As shown above, you can use the ``--tests`` option as usual to operate on test +code. Note however that you cannot *build* or run tests via ``cargo``, because +they need support C code from QEMU that Cargo does not know about. Tests can +be run via ``meson test`` or ``make``:: + + make check-rust + +Note that doctests require all ``.o`` files from the build to be available. + +Supported tools +''''''''''''''' + +QEMU supports rustc version 1.77.0 and newer. Notably, the following features +are missing: + +* inline const expression (stable in 1.79.0), currently worked around with + associated constants in the ``FnCall`` trait. + +* associated constants have to be explicitly marked ``'static`` (`changed in + 1.81.0`__) + +* ``&raw`` (stable in 1.82.0). Use ``addr_of!`` and ``addr_of_mut!`` instead, + though hopefully the need for raw pointers will go down over time. + +* ``new_uninit`` (stable in 1.82.0). This is used internally by the ``pinned_init`` + crate, which is planned for inclusion in QEMU, but it can be easily patched + out. + +* referencing statics in constants (stable in 1.83.0). For now use a const + function; this is an important limitation for QEMU's migration stream + architecture (VMState). Right now, VMState lacks type safety because + it is hard to place the ``VMStateField`` definitions in traits. + +* NUL-terminated file names with ``#[track_caller]`` are scheduled for + inclusion as ``#![feature(location_file_nul)]``, but it will be a while + before QEMU can use them. For now, there is special code in + ``util/error.c`` to support non-NUL-terminated file names. + +* associated const equality would be nice to have for some users of + ``callbacks::FnCall``, but is still experimental. ``ASSERT_IS_SOME`` + replaces it. + +__ https://github.com/rust-lang/rust/pull/125258 + +QEMU also supports version 0.60.x of bindgen, which is missing option +``--generate-cstr``. This option requires version 0.66.x and will +be adopted as soon as supporting these older versions is not necessary +anymore. + +Writing Rust code in QEMU +------------------------- + +QEMU includes four crates: + +* ``qemu_api`` for bindings to C code and useful functionality + +* ``qemu_api_macros`` defines several procedural macros that are useful when + writing C code + +* ``pl011`` (under ``rust/hw/char/pl011``) and ``hpet`` (under ``rust/hw/timer/hpet``) + are sample devices that demonstrate ``qemu_api`` and ``qemu_api_macros``, and are + used to further develop them. These two crates are functional\ [#issues]_ replacements + for the ``hw/char/pl011.c`` and ``hw/timer/hpet.c`` files. + +.. [#issues] The ``pl011`` crate is synchronized with ``hw/char/pl011.c`` + as of commit 3e0f118f82. The ``hpet`` crate is synchronized as of + commit 1433e38cc8. Both are lacking tracing functionality. + +This section explains how to work with them. + +Status +'''''' + +Modules of ``qemu_api`` can be defined as: + +- *complete*: ready for use in new devices; if applicable, the API supports the + full functionality available in C + +- *stable*: ready for production use, the API is safe and should not undergo + major changes + +- *proof of concept*: the API is subject to change but allows working with safe + Rust + +- *initial*: the API is in its initial stages; it requires large amount of + unsafe code; it might have soundness or type-safety issues + +The status of the modules is as follows: + +================ ====================== +module status +================ ====================== +``assertions`` stable +``bitops`` complete +``callbacks`` complete +``cell`` stable +``errno`` complete +``error`` stable +``irq`` complete +``log`` proof of concept +``memory`` stable +``module`` complete +``qdev`` stable +``qom`` stable +``sysbus`` stable +``timer`` stable +``vmstate`` proof of concept +``zeroable`` stable +================ ====================== + +.. note:: + API stability is not a promise, if anything because the C APIs are not a stable + interface either. Also, ``unsafe`` interfaces may be replaced by safe interfaces + later. + +Naming convention +''''''''''''''''' + +C function names usually are prefixed according to the data type that they +apply to, for example ``timer_mod`` or ``sysbus_connect_irq``. Furthermore, +both function and structs sometimes have a ``qemu_`` or ``QEMU`` prefix. +Generally speaking, these are all removed in the corresponding Rust functions: +``QEMUTimer`` becomes ``timer::Timer``, ``timer_mod`` becomes ``Timer::modify``, +``sysbus_connect_irq`` becomes ``SysBusDeviceMethods::connect_irq``. + +Sometimes however a name appears multiple times in the QOM class hierarchy, +and the only difference is in the prefix. An example is ``qdev_realize`` and +``sysbus_realize``. In such cases, whenever a name is not unique in +the hierarchy, always add the prefix to the classes that are lower in +the hierarchy; for the top class, decide on a case by case basis. + +For example: + +========================== ========================================= +``device_cold_reset()`` ``DeviceMethods::cold_reset()`` +``pci_device_reset()`` ``PciDeviceMethods::pci_device_reset()`` +``pci_bridge_reset()`` ``PciBridgeMethods::pci_bridge_reset()`` +========================== ========================================= + +Here, the name is not exactly the same, but nevertheless ``PciDeviceMethods`` +adds the prefix to avoid confusion, because the functionality of +``device_cold_reset()`` and ``pci_device_reset()`` is subtly different. + +In this case, however, no prefix is needed: + +========================== ========================================= +``device_realize()`` ``DeviceMethods::realize()`` +``sysbus_realize()`` ``SysbusDeviceMethods::sysbus_realize()`` +``pci_realize()`` ``PciDeviceMethods::pci_realize()`` +========================== ========================================= + +Here, the lower classes do not add any functionality, and mostly +provide extra compile-time checking; the basic *realize* functionality +is the same for all devices. Therefore, ``DeviceMethods`` does not +add the prefix. + +Whenever a name is unique in the hierarchy, instead, you should +always remove the class name prefix. + +Common pitfalls +''''''''''''''' + +Rust has very strict rules with respect to how you get an exclusive (``&mut``) +reference; failure to respect those rules is a source of undefined behavior. +In particular, even if a value is loaded from a raw mutable pointer (``*mut``), +it *cannot* be casted to ``&mut`` unless the value was stored to the ``*mut`` +from a mutable reference. Furthermore, it is undefined behavior if any +shared reference was created between the store to the ``*mut`` and the load:: + + let mut p: u32 = 42; + let p_mut = &mut p; // 1 + let p_raw = p_mut as *mut u32; // 2 + + // p_raw keeps the mutable reference "alive" + + let p_shared = &p; // 3 + println!("access from &u32: {}", *p_shared); + + // Bring back the mutable reference, its lifetime overlaps + // with that of a shared reference. + let p_mut = unsafe { &mut *p_raw }; // 4 + println!("access from &mut 32: {}", *p_mut); + + println!("access from &u32: {}", *p_shared); // 5 + +These rules can be tested with `MIRI`__, for example. + +__ https://github.com/rust-lang/miri + +Almost all Rust code in QEMU will involve QOM objects, and pointers to these +objects are *shared*, for example because they are part of the QOM composition +tree. This creates exactly the above scenario: + +1. a QOM object is created + +2. a ``*mut`` is created, for example as the opaque value for a ``MemoryRegion`` + +3. the QOM object is placed in the composition tree + +4. a memory access dereferences the opaque value to a ``&mut`` + +5. but the shared reference is still present in the composition tree + +Because of this, QOM objects should almost always use ``&self`` instead +of ``&mut self``; access to internal fields must use *interior mutability* +to go from a shared reference to a ``&mut``. + +Whenever C code provides you with an opaque ``void *``, avoid converting it +to a Rust mutable reference, and use a shared reference instead. The +``qemu_api::cell`` module provides wrappers that can be used to tell the +Rust compiler about interior mutability, and optionally to enforce locking +rules for the "Big QEMU Lock". In the future, similar cell types might +also be provided for ``AioContext``-based locking as well. + +In particular, device code will usually rely on the ``BqlRefCell`` and +``BqlCell`` type to ensure that data is accessed correctly under the +"Big QEMU Lock". These cell types are also known to the ``vmstate`` +crate, which is able to "look inside" them when building an in-memory +representation of a ``struct``'s layout. Note that the same is not true +of a ``RefCell`` or ``Mutex``. + +Bindings code instead will usually use the ``Opaque`` type, which hides +the contents of the underlying struct and can be easily converted to +a raw pointer, for use in calls to C functions. It can be used for +example as follows:: + + #[repr(transparent)] + #[derive(Debug, qemu_api_macros::Wrapper)] + pub struct Object(Opaque); + +where the special ``derive`` macro provides useful methods such as +``from_raw``, ``as_ptr`, ``as_mut_ptr`` and ``raw_get``. The bindings will +then manually check for the big QEMU lock with assertions, which allows +the wrapper to be declared thread-safe:: + + unsafe impl Send for Object {} + unsafe impl Sync for Object {} + +Writing bindings to C code +'''''''''''''''''''''''''' + +Here are some things to keep in mind when working on the ``qemu_api`` crate. + +**Look at existing code** + Very often, similar idioms in C code correspond to similar tricks in + Rust bindings. If the C code uses ``offsetof``, look at qdev properties + or ``vmstate``. If the C code has a complex const struct, look at + ``MemoryRegion``. Reuse existing patterns for handling lifetimes; + for example use ``&T`` for QOM objects that do not need a reference + count (including those that can be embedded in other objects) and + ``Owned`` for those that need it. + +**Use the type system** + Bindings often will need access information that is specific to a type + (either a builtin one or a user-defined one) in order to pass it to C + functions. Put them in a trait and access it through generic parameters. + The ``vmstate`` module has examples of how to retrieve type information + for the fields of a Rust ``struct``. + +**Prefer unsafe traits to unsafe functions** + Unsafe traits are much easier to prove correct than unsafe functions. + They are an excellent place to store metadata that can later be accessed + by generic functions. C code usually places metadata in global variables; + in Rust, they can be stored in traits and then turned into ``static`` + variables. Often, unsafe traits can be generated by procedural macros. + +**Document limitations due to old Rust versions** + If you need to settle for an inferior solution because of the currently + supported set of Rust versions, document it in the source and in this + file. This ensures that it can be fixed when the minimum supported + version is bumped. + +**Keep locking in mind**. + When marking a type ``Sync``, be careful of whether it needs the big + QEMU lock. Use ``BqlCell`` and ``BqlRefCell`` for interior data, + or assert ``bql_locked()``. + +**Don't be afraid of complexity, but document and isolate it** + It's okay to be tricky; device code is written more often than bindings + code and it's important that it is idiomatic. However, you should strive + to isolate any tricks in a place (for example a ``struct``, a trait + or a macro) where it can be documented and tested. If needed, include + toy versions of the code in the documentation. + +Writing procedural macros +''''''''''''''''''''''''' + +By conventions, procedural macros are split in two functions, one +returning ``Result`` with the body of +the procedural macro, and the second returning ``proc_macro::TokenStream`` +which is the actual procedural macro. The former's name is the same as +the latter with the ``_or_error`` suffix. The code for the latter is more +or less fixed; it follows the following template, which is fixed apart +from the type after ``as`` in the invocation of ``parse_macro_input!``:: + + #[proc_macro_derive(Object)] + pub fn derive_object(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_object_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() + } + +The ``qemu_api_macros`` crate has utility functions to examine a +``DeriveInput`` and perform common checks (e.g. looking for a struct +with named fields). These functions return ``Result<..., syn::Error>`` +and can be used easily in the procedural macro function:: + + fn derive_object_or_error(input: DeriveInput) -> + Result + { + is_c_repr(&input, "#[derive(Object)]")?; + + let name = &input.ident; + let parent = &get_fields(&input, "#[derive(Object)]")?[0].ident; + ... + } + +Use procedural macros with care. They are mostly useful for two purposes: + +* Performing consistency checks; for example ``#[derive(Object)]`` checks + that the structure has ``#[repr[C])`` and that the type of the first field + is consistent with the ``ObjectType`` declaration. + +* Extracting information from Rust source code into traits, typically based + on types and attributes. For example, ``#[derive(TryInto)]`` builds an + implementation of ``TryFrom``, and it uses the ``#[repr(...)]`` attribute + as the ``TryFrom`` source and error types. + +Procedural macros can be hard to debug and test; if the code generation +exceeds a few lines of code, it may be worthwhile to delegate work to +"regular" declarative (``macro_rules!``) macros and write unit tests for +those instead. + + +Coding style +'''''''''''' + +Code should pass clippy and be formatted with rustfmt. + +Right now, only the nightly version of ``rustfmt`` is supported. This +might change in the future. While CI checks for correct formatting via +``cargo fmt --check``, maintainers can fix this for you when applying patches. + +It is expected that ``qemu_api`` provides full ``rustdoc`` documentation for +bindings that are in their final shape or close. + +Adding dependencies +------------------- + +Generally, the set of dependent crates is kept small. Think twice before +adding a new external crate, especially if it comes with a large set of +dependencies itself. Sometimes QEMU only needs a small subset of the +functionality; see for example QEMU's ``assertions`` module. + +On top of this recommendation, adding external crates to QEMU is a +slightly complicated process, mostly due to the need to teach Meson how +to build them. While Meson has initial support for parsing ``Cargo.lock`` +files, it is still highly experimental and is therefore not used. + +Therefore, external crates must be added as subprojects for Meson to +learn how to build them, as well as to the relevant ``Cargo.toml`` files. +The versions specified in ``rust/Cargo.lock`` must be the same as the +subprojects; note that the ``rust/`` directory forms a Cargo `workspace`__, +and therefore there is a single lock file for the whole build. + +__ https://doc.rust-lang.org/cargo/reference/workspaces.html#virtual-workspace + +Choose a version of the crate that works with QEMU's minimum supported +Rust version (|msrv|). + +Second, a new ``wrap`` file must be added to teach Meson how to download the +crate. The wrap file must be named ``NAME-SEMVER-rs.wrap``, where ``NAME`` +is the name of the crate and ``SEMVER`` is the version up to and including the +first non-zero number. For example, a crate with version ``0.2.3`` will use +``0.2`` for its ``SEMVER``, while a crate with version ``1.0.84`` will use ``1``. + +Third, the Meson rules to build the crate must be added at +``subprojects/NAME-SEMVER-rs/meson.build``. Generally this includes: + +* ``subproject`` and ``dependency`` lines for all dependent crates + +* a ``static_library`` or ``rust.proc_macro`` line to perform the actual build + +* ``declare_dependency`` and a ``meson.override_dependency`` lines to expose + the result to QEMU and to other subprojects + +Remember to add ``native: true`` to ``dependency``, ``static_library`` and +``meson.override_dependency`` for dependencies of procedural macros. +If a crate is needed in both procedural macros and QEMU binaries, everything +apart from ``subproject`` must be duplicated to build both native and +non-native versions of the crate. + +It's important to specify the right compiler options. These include: + +* the language edition (which can be found in the ``Cargo.toml`` file) + +* the ``--cfg`` (which have to be "reverse engineered" from the ``build.rs`` + file of the crate). + +* usually, a ``--cap-lints allow`` argument to hide warnings from rustc + or clippy. + +After every change to the ``meson.build`` file you have to update the patched +version with ``meson subprojects update --reset ``NAME-SEMVER-rs``. This might +be automated in the future. + +Also, after every change to the ``meson.build`` file it is strongly suggested to +do a dummy change to the ``.wrap`` file (for example adding a comment like +``# version 2``), which will help Meson notice that the subproject is out of date. + +As a last step, add the new subproject to ``scripts/archive-source.sh``, +``scripts/make-release`` and ``subprojects/.gitignore``. diff --git a/docs/devel/style.rst b/docs/devel/style.rst index 2f68b500798..d025933808e 100644 --- a/docs/devel/style.rst +++ b/docs/devel/style.rst @@ -416,6 +416,26 @@ definitions instead of typedefs in headers and function prototypes; this avoids problems with duplicated typedefs and reduces the need to include headers from other headers. +Bitfields +--------- + +C bitfields can be a cause of non-portability issues, especially under windows +where `MSVC has a different way to lay them out than GCC +`_, or where +endianness matters. + +For this reason, we disallow usage of bitfields in packed structures and in any +structures which are supposed to exactly match a specific layout in guest +memory. Some existing code may use it, and we carefully ensured the layout was +the one expected. + +We also suggest avoiding bitfields even in structures where the exact +layout does not matter, unless you can show that they provide a significant +usability benefit. + +We encourage the usage of ``include/hw/registerfields.h`` as a safe replacement +for bitfields. + Reserved namespaces in C and POSIX ---------------------------------- diff --git a/docs/devel/submitting-a-patch.rst b/docs/devel/submitting-a-patch.rst index 83e9092b8c0..dd1cf32ad35 100644 --- a/docs/devel/submitting-a-patch.rst +++ b/docs/devel/submitting-a-patch.rst @@ -18,7 +18,7 @@ one-shot fix, the bare minimum we ask is that: * - Check - Reason - * - Patches contain Signed-off-by: Real Name + * - Patches contain Signed-off-by: Your Name - States you are legally able to contribute the code. See :ref:`patch_emails_must_include_a_signed_off_by_line` * - Sent as patch emails to ``qemu-devel@nongnu.org`` - The project uses an email list based workflow. See :ref:`submitting_your_patches` @@ -235,6 +235,63 @@ to another list.) ``git send-email`` (`step-by-step setup guide works best for delivering the patch without mangling it, but attachments can be used as a last resort on a first-time submission. +.. _use_b4: + +Use B4 +~~~~~~ + +The `b4`_ tool, used for Linux kernel development, can also be used for QEMU +development. It is packaged in most distros and PyPi. The QEMU source tree +includes a ``b4`` project configuration file at the root: ``.b4-config``. + +Example workflow to prepare a patch series: + +1. Start with a clean checkout of the ``master`` branch. +2. Create a new series with a topical branch name using ``b4 prep -n descriptive-name``. + ``b4`` will create a ``b4/descriptive-name`` branch and switch to it. +3. Commit your changes, following this page's guidelines about proper commit messages etc. +4. Write a descriptive cover letter with ``b4 prep --edit-cover``. +5. Add maintainer and reviewer CCs with ``b4 prep --auto-to-cc``. You can make + changes to Cc: and To: recipients by editing the cover letter. +6. Run patch checks with ``b4 prep --check``. +7. Optionally review the patches with ``b4 send --dry-run`` which will print the + raw patches in standard output. + +To send the patches, you can: + +- Setup ``git-send-email`` and use ``b4 send``, or +- Export the patches to files using ``b4 send -o OUTPUT_DIR`` and send them manually. + +For more details, consult the `b4 documentation`_. + +.. _b4 documentation: https://b4.docs.kernel.org/ +.. _b4: https://github.com/mricon/b4/ + +.. _use_git_publish: + +Use git-publish +~~~~~~~~~~~~~~~ + +If you already configured git send-email, you can simply use `git-publish +`__ to send series. + +:: + + $ git checkout master -b my-feature + $ # work on new commits, add your 'Signed-off-by' lines to each + $ git publish + $ ... more work, rebase on master, ... + $ git publish # will send a v2 + +Each time you post a series, git-publish will create a local tag with the format +``-v`` to record the patch series. + +When sending patch emails, 'git publish' will consult the output of +'scripts/get_maintainers.pl' and automatically CC anyone listed as maintainers +of the affected code. Generally you should accept the suggested CC list, but +there may sometimes be scenarios where it is appropriate to cut it down (eg on +certain large tree-wide cleanups), or augment it with other interested people. + .. _if_you_cannot_send_patch_emails: If you cannot send patch emails @@ -252,10 +309,7 @@ patches to the QEMU mailing list by following these steps: #. Send your patches to the QEMU mailing list using the web-based ``git-send-email`` UI at https://git.sr.ht/~USERNAME/qemu/send-email -`This video -`__ -shows the web-based ``git-send-email`` workflow. Documentation is -available `here +Documentation for sourcehut is available `here `__. .. _cc_the_relevant_maintainer: @@ -322,23 +376,9 @@ Patch emails must include a ``Signed-off-by:`` line Your patches **must** include a Signed-off-by: line. This is a hard requirement because it's how you say "I'm legally okay to contribute -this and happy for it to go into QEMU". The process is modelled after -the `Linux kernel -`__ -policy. - -If you wrote the patch, make sure your "From:" and "Signed-off-by:" -lines use the same spelling. It's okay if you subscribe or contribute to -the list via more than one address, but using multiple addresses in one -commit just confuses things. If someone else wrote the patch, git will -include a "From:" line in the body of the email (different from your -envelope From:) that will give credit to the correct author; but again, -that author's Signed-off-by: line is mandatory, with the same spelling. - -There are various tooling options for automatically adding these tags -include using ``git commit -s`` or ``git format-patch -s``. For more -information see `SubmittingPatches 1.12 -`__. +this and happy for it to go into QEMU". For full guidance, read the +:ref:`code-provenance` documentation. + .. _include_a_meaningful_cover_letter: @@ -406,6 +446,20 @@ For more details on how QEMU's stable process works, refer to the .. _participating_in_code_review: +Retrieve an existing series +--------------------------- + +If you want to apply an existing series on top of your tree, you can simply use +`b4`_. + +:: + + b4 shazam $msg-id + +The message id is related to the patch series that has been sent to the mailing +list. You need to retrieve the "Message-Id:" header from one of the patches. Any +of them can be used and b4 will apply the whole series. + Participating in Code Review ---------------------------- @@ -511,7 +565,11 @@ summary belongs. The `git-publish `__ script can help with tracking a good summary across versions. Also, the `git-backport-diff `__ script can help focus -reviewers on what changed between revisions. +reviewers on what changed between revisions. The ``b4`` tool automatically +generates a version history section in the cover letter, including links to the +previous versions on `Lore`_. + +.. _Lore: https://lore.kernel.org/ .. _tips_and_tricks: diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index d46b625e0e8..f26b837a309 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -239,7 +239,7 @@ Jumps/Labels - | Jump to label. - * - brcond_i32/i64 *t0*, *t1*, *cond*, *label* + * - brcond *t0*, *t1*, *cond*, *label* - | Conditional jump if *t0* *cond* *t1* is true. *cond* can be: | @@ -261,98 +261,117 @@ Arithmetic .. list-table:: - * - add_i32/i64 *t0*, *t1*, *t2* + * - add *t0*, *t1*, *t2* - | *t0* = *t1* + *t2* - * - sub_i32/i64 *t0*, *t1*, *t2* + * - sub *t0*, *t1*, *t2* - | *t0* = *t1* - *t2* - * - neg_i32/i64 *t0*, *t1* + * - neg *t0*, *t1* - | *t0* = -*t1* (two's complement) - * - mul_i32/i64 *t0*, *t1*, *t2* + * - mul *t0*, *t1*, *t2* - | *t0* = *t1* * *t2* - * - div_i32/i64 *t0*, *t1*, *t2* + * - divs *t0*, *t1*, *t2* - | *t0* = *t1* / *t2* (signed) | Undefined behavior if division by zero or overflow. - * - divu_i32/i64 *t0*, *t1*, *t2* + * - divu *t0*, *t1*, *t2* - | *t0* = *t1* / *t2* (unsigned) | Undefined behavior if division by zero. - * - rem_i32/i64 *t0*, *t1*, *t2* + * - rems *t0*, *t1*, *t2* - | *t0* = *t1* % *t2* (signed) | Undefined behavior if division by zero or overflow. - * - remu_i32/i64 *t0*, *t1*, *t2* + * - remu *t0*, *t1*, *t2* - | *t0* = *t1* % *t2* (unsigned) | Undefined behavior if division by zero. + * - divs2 *q*, *r*, *nl*, *nh*, *d* + + - | *q* = *nh:nl* / *d* (signed) + | *r* = *nh:nl* % *d* + | Undefined behaviour if division by zero, or the double-word + numerator divided by the single-word divisor does not fit + within the single-word quotient. The code generator will + pass *nh* as a simple sign-extension of *nl*, so the only + overflow should be *INT_MIN* / -1. + + * - divu2 *q*, *r*, *nl*, *nh*, *d* + + - | *q* = *nh:nl* / *d* (unsigned) + | *r* = *nh:nl* % *d* + | Undefined behaviour if division by zero, or the double-word + numerator divided by the single-word divisor does not fit + within the single-word quotient. The code generator will + pass 0 to *nh* to make a simple zero-extension of *nl*, + so overflow should never occur. Logical ------- .. list-table:: - * - and_i32/i64 *t0*, *t1*, *t2* + * - and *t0*, *t1*, *t2* - | *t0* = *t1* & *t2* - * - or_i32/i64 *t0*, *t1*, *t2* + * - or *t0*, *t1*, *t2* - | *t0* = *t1* | *t2* - * - xor_i32/i64 *t0*, *t1*, *t2* + * - xor *t0*, *t1*, *t2* - | *t0* = *t1* ^ *t2* - * - not_i32/i64 *t0*, *t1* + * - not *t0*, *t1* - | *t0* = ~\ *t1* - * - andc_i32/i64 *t0*, *t1*, *t2* + * - andc *t0*, *t1*, *t2* - | *t0* = *t1* & ~\ *t2* - * - eqv_i32/i64 *t0*, *t1*, *t2* + * - eqv *t0*, *t1*, *t2* - | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2* - * - nand_i32/i64 *t0*, *t1*, *t2* + * - nand *t0*, *t1*, *t2* - | *t0* = ~(*t1* & *t2*) - * - nor_i32/i64 *t0*, *t1*, *t2* + * - nor *t0*, *t1*, *t2* - | *t0* = ~(*t1* | *t2*) - * - orc_i32/i64 *t0*, *t1*, *t2* + * - orc *t0*, *t1*, *t2* - | *t0* = *t1* | ~\ *t2* - * - clz_i32/i64 *t0*, *t1*, *t2* + * - clz *t0*, *t1*, *t2* - | *t0* = *t1* ? clz(*t1*) : *t2* - * - ctz_i32/i64 *t0*, *t1*, *t2* + * - ctz *t0*, *t1*, *t2* - | *t0* = *t1* ? ctz(*t1*) : *t2* - * - ctpop_i32/i64 *t0*, *t1* + * - ctpop *t0*, *t1* - | *t0* = number of bits set in *t1* | - | With *ctpop* short for "count population", matching - | the function name used in ``include/qemu/host-utils.h``. + | The name *ctpop* is short for "count population", and matches + the function name used in ``include/qemu/host-utils.h``. Shifts/Rotates @@ -360,30 +379,30 @@ Shifts/Rotates .. list-table:: - * - shl_i32/i64 *t0*, *t1*, *t2* + * - shl *t0*, *t1*, *t2* - | *t0* = *t1* << *t2* - | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64) + | Unspecified behavior for negative or out-of-range shifts. - * - shr_i32/i64 *t0*, *t1*, *t2* + * - shr *t0*, *t1*, *t2* - | *t0* = *t1* >> *t2* (unsigned) - | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64) + | Unspecified behavior for negative or out-of-range shifts. - * - sar_i32/i64 *t0*, *t1*, *t2* + * - sar *t0*, *t1*, *t2* - | *t0* = *t1* >> *t2* (signed) - | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64) + | Unspecified behavior for negative or out-of-range shifts. - * - rotl_i32/i64 *t0*, *t1*, *t2* + * - rotl *t0*, *t1*, *t2* - | Rotation of *t2* bits to the left - | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64) + | Unspecified behavior for negative or out-of-range shifts. - * - rotr_i32/i64 *t0*, *t1*, *t2* + * - rotr *t0*, *t1*, *t2* - | Rotation of *t2* bits to the right. - | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64) + | Unspecified behavior for negative or out-of-range shifts. Misc @@ -391,26 +410,12 @@ Misc .. list-table:: - * - mov_i32/i64 *t0*, *t1* + * - mov *t0*, *t1* - | *t0* = *t1* - | Move *t1* to *t0* (both operands must have the same type). - - * - ext8s_i32/i64 *t0*, *t1* - - ext8u_i32/i64 *t0*, *t1* - - ext16s_i32/i64 *t0*, *t1* - - ext16u_i32/i64 *t0*, *t1* + | Move *t1* to *t0*. - ext32s_i64 *t0*, *t1* - - ext32u_i64 *t0*, *t1* - - - | 8, 16 or 32 bit sign/zero extension (both operands must have the same type) - - * - bswap16_i32/i64 *t0*, *t1*, *flags* + * - bswap16 *t0*, *t1*, *flags* - | 16 bit byte swap on the low bits of a 32/64 bit input. | @@ -420,24 +425,24 @@ Misc | | If neither ``TCG_BSWAP_OZ`` nor ``TCG_BSWAP_OS`` are set, then the bits of *t0* above bit 15 may contain any value. - * - bswap32_i64 *t0*, *t1*, *flags* - - - | 32 bit byte swap on a 64-bit value. The flags are the same as for bswap16, - except they apply from bit 31 instead of bit 15. + * - bswap32 *t0*, *t1*, *flags* - * - bswap32_i32 *t0*, *t1*, *flags* + - | 32 bit byte swap. The flags are the same as for bswap16, except + they apply from bit 31 instead of bit 15. On TCG_TYPE_I32, the + flags should be zero. - bswap64_i64 *t0*, *t1*, *flags* + * - bswap64 *t0*, *t1*, *flags* - - | 32/64 bit byte swap. The flags are ignored, but still present - for consistency with the other bswap opcodes. + - | 64 bit byte swap. The flags are ignored, but still present + for consistency with the other bswap opcodes. For future + compatibility, the flags should be zero. * - discard_i32/i64 *t0* - | Indicate that the value of *t0* won't be used later. It is useful to force dead code elimination. - * - deposit_i32/i64 *dest*, *t1*, *t2*, *pos*, *len* + * - deposit *dest*, *t1*, *t2*, *pos*, *len* - | Deposit *t2* as a bitfield into *t1*, placing the result in *dest*. | @@ -446,14 +451,16 @@ Misc | *len* - the length of the bitfield | *pos* - the position of the first bit, counting from the LSB | - | For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field + | For example, "deposit dest, t1, t2, 8, 4" indicates a 4-bit field at bit 8. This operation would be equivalent to | | *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00) + | + | on TCG_TYPE_I32. - * - extract_i32/i64 *dest*, *t1*, *pos*, *len* + * - extract *dest*, *t1*, *pos*, *len* - sextract_i32/i64 *dest*, *t1*, *pos*, *len* + sextract *dest*, *t1*, *pos*, *len* - | Extract a bitfield from *t1*, placing the result in *dest*. | @@ -462,16 +469,16 @@ Misc to the left with zeros; for sextract_*, the result will be extended to the left with copies of the bitfield sign bit at *pos* + *len* - 1. | - | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field + | For example, "sextract dest, t1, 8, 4" indicates a 4-bit field at bit 8. This operation would be equivalent to | | *dest* = (*t1* << 20) >> 28 | - | (using an arithmetic right shift). + | (using an arithmetic right shift) on TCG_TYPE_I32. - * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos* + * - extract2 *dest*, *t1*, *t2*, *pos* - - | For N = {32,64}, extract an N-bit quantity from the concatenation + - | For TCG_TYPE_I{N}, extract an N-bit quantity from the concatenation of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander accepts 0 <= *pos* <= N as inputs. The backend code generator will not see either 0 or N as inputs for these opcodes. @@ -494,19 +501,19 @@ Conditional moves .. list-table:: - * - setcond_i32/i64 *dest*, *t1*, *t2*, *cond* + * - setcond *dest*, *t1*, *t2*, *cond* - | *dest* = (*t1* *cond* *t2*) | | Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0. - * - negsetcond_i32/i64 *dest*, *t1*, *t2*, *cond* + * - negsetcond *dest*, *t1*, *t2*, *cond* - | *dest* = -(*t1* *cond* *t2*) | | Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0. - * - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond* + * - movcond *dest*, *c1*, *c2*, *v1*, *v2*, *cond* - | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*) | @@ -586,26 +593,79 @@ Multiword arithmetic support .. list-table:: - * - add2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high* + * - addco *t0*, *t1*, *t2* + + - | Compute *t0* = *t1* + *t2* and in addition output to the + carry bit provided by the host architecture. + + * - addci *t0, *t1*, *t2* - sub2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high* + - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the + input carry bit provided by the host architecture. + The output carry bit need not be computed. - - | Similar to add/sub, except that the double-word inputs *t1* and *t2* are - formed from two single-word arguments, and the double-word output *t0* - is returned in two single-word outputs. + * - addcio *t0, *t1*, *t2* - * - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2* + - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the + input carry bit provided by the host architecture, + and also compute the output carry bit. + + * - addc1o *t0, *t1*, *t2* + + - | Compute *t0* = *t1* + *t2* + 1, and in addition output to the + carry bit provided by the host architecture. This is akin to + *addcio* with a fixed carry-in value of 1. + | This is intended to be used by the optimization pass, + intermediate to complete folding of the addition chain. + In some cases complete folding is not possible and this + opcode will remain until output. If this happens, the + code generator will use ``tcg_out_set_carry`` and then + the output routine for *addcio*. + + * - subbo *t0*, *t1*, *t2* + + - | Compute *t0* = *t1* - *t2* and in addition output to the + borrow bit provided by the host architecture. + | Depending on the host architecture, the carry bit may or may not be + identical to the borrow bit. Thus the addc\* and subb\* + opcodes must not be mixed. + + * - subbi *t0, *t1*, *t2* + + - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the + input borrow bit provided by the host architecture. + The output borrow bit need not be computed. + + * - subbio *t0, *t1*, *t2* + + - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the + input borrow bit provided by the host architecture, + and also compute the output borrow bit. + + * - subb1o *t0, *t1*, *t2* + + - | Compute *t0* = *t1* - *t2* - 1, and in addition output to the + borrow bit provided by the host architecture. This is akin to + *subbio* with a fixed borrow-in value of 1. + | This is intended to be used by the optimization pass, + intermediate to complete folding of the subtraction chain. + In some cases complete folding is not possible and this + opcode will remain until output. If this happens, the + code generator will use ``tcg_out_set_borrow`` and then + the output routine for *subbio*. + + * - mulu2 *t0_low*, *t0_high*, *t1*, *t2* - | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full double-word product *t0*. The latter is returned in two single-word outputs. - * - muls2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2* + * - muls2 *t0_low*, *t0_high*, *t1*, *t2* - | Similar to mulu2, except the two inputs *t1* and *t2* are signed. - * - mulsh_i32/i64 *t0*, *t1*, *t2* + * - mulsh *t0*, *t1*, *t2* - muluh_i32/i64 *t0*, *t1*, *t2* + muluh *t0*, *t1*, *t2* - | Provide the high part of a signed or unsigned multiply, respectively. | @@ -684,8 +744,6 @@ QEMU specific operations qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx* - qemu_st8_i32 *t0*, *t1*, *flags*, *memidx* - - | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output register *t0* only. The address *t1* is always sized according to the guest, @@ -703,19 +761,14 @@ QEMU specific operations 64-bit memory access specified in *flags*. | | For qemu_ld/st_i128, these are only supported for a 64-bit host. - | - | For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of - the memory operation is known to be 8-bit. This allows the backend to - provide a different set of register constraints. Host vector operations ---------------------- -All of the vector ops have two parameters, ``TCGOP_VECL`` & ``TCGOP_VECE``. -The former specifies the length of the vector in log2 64-bit units; the -latter specifies the length of the element (if applicable) in log2 8-bit units. -E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32. +All of the vector ops have two parameters, ``TCGOP_TYPE`` & ``TCGOP_VECE``. +The former specifies the length of the vector as a TCGType; the latter +specifies the length of the element (if applicable) in log2 8-bit units. .. list-table:: @@ -729,7 +782,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32. * - dup_vec *v0*, *r1* - - | Duplicate the low N bits of *r1* into VECL/VECE copies across *v0*. + - | Duplicate the low N bits of *r1* into TYPE/VECE copies across *v0*. * - dupi_vec *v0*, *c* @@ -738,7 +791,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32. * - dup2_vec *v0*, *r1*, *r2* - - | Duplicate *r2*:*r1* into VECL/64 copies across *v0*. This opcode is + - | Duplicate *r2*:*r1* into TYPE/64 copies across *v0*. This opcode is only present for 32-bit hosts. * - add_vec *v0*, *v1*, *v2* @@ -810,7 +863,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32. .. code-block:: c - for (i = 0; i < VECL/VECE; ++i) { + for (i = 0; i < TYPE/VECE; ++i) { v0[i] = v1[i] << s2; } @@ -832,7 +885,7 @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32. .. code-block:: c - for (i = 0; i < VECL/VECE; ++i) { + for (i = 0; i < TYPE/VECE; ++i) { v0[i] = v1[i] << v2[i]; } @@ -885,9 +938,9 @@ Assumptions The target word size (``TCG_TARGET_REG_BITS``) is expected to be 32 bit or 64 bit. It is expected that the pointer has the same size as the word. -On a 32 bit target, all 64 bit operations are converted to 32 bits. A -few specific operations must be implemented to allow it (see add2_i32, -sub2_i32, brcond2_i32). +On a 32 bit target, all 64 bit operations are converted to 32 bits. +A few specific operations must be implemented to allow it +(see brcond2_i32, setcond2_i32). On a 64 bit target, the values are transferred between 32 and 64-bit registers using the following ops: @@ -928,7 +981,9 @@ operation uses a constant input constraint which does not allow all constants, it must also accept registers in order to have a fallback. The constraint '``i``' is defined generically to accept any constant. The constraint '``r``' is not defined generically, but is consistently -used by each backend to indicate all registers. +used by each backend to indicate all registers. If ``TCG_REG_ZERO`` +is defined by the backend, the constraint '``z``' is defined generically +to map constant 0 to the hardware zero register. The movi_i32 and movi_i64 operations must accept any constants. diff --git a/docs/devel/tcg-plugins.rst b/docs/devel/tcg-plugins.rst index d8725c2854a..9463692c411 100644 --- a/docs/devel/tcg-plugins.rst +++ b/docs/devel/tcg-plugins.rst @@ -61,11 +61,14 @@ translation event the plugin has an option to enumerate the instructions in a block of instructions and optionally register callbacks to some or all instructions when they are executed. -There is also a facility to add an inline event where code to -increment a counter can be directly inlined with the translation. -Currently only a simple increment is supported. This is not atomic so -can miss counts. If you want absolute precision you should use a -callback which can then ensure atomicity itself. +There is also a facility to add inline instructions doing various operations, +like adding or storing an immediate value. It is also possible to execute a +callback conditionally, with condition being evaluated inline. All those inline +operations are associated to a ``scoreboard``, which is a thread-local storage +automatically expanded when new cores/threads are created and that can be +accessed/modified in a thread-safe way without any lock needed. Combining inline +operations and conditional callbacks offer a more efficient way to instrument +binaries, compared to classic callbacks. Finally when QEMU exits all the registered *atexit* callbacks are invoked. diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst deleted file mode 100644 index af73d3d64fb..00000000000 --- a/docs/devel/testing.rst +++ /dev/null @@ -1,1557 +0,0 @@ -.. _testing: - -Testing in QEMU -=============== - -QEMU's testing infrastructure is fairly complex as it covers -everything from unit testing and exercising specific sub-systems all -the way to full blown acceptance tests. To get an overview of the -tests you can run ``make check-help`` from either the source or build -tree. - -Most (but not all) tests are also integrated into the meson build -system so can be run directly from the build tree, for example: - -.. code:: - - [./pyvenv/bin/]meson test --suite qemu:softfloat - -will run just the softfloat tests. - -The rest of this document will cover the details for specific test -groups. - -Testing with "make check" -------------------------- - -The "make check" testing family includes most of the C based tests in QEMU. - -The usual way to run these tests is: - -.. code:: - - make check - -which includes QAPI schema tests, unit tests, QTests and some iotests. -Different sub-types of "make check" tests will be explained below. - -Before running tests, it is best to build QEMU programs first. Some tests -expect the executables to exist and will fail with obscure messages if they -cannot find them. - -Unit tests -~~~~~~~~~~ - -Unit tests, which can be invoked with ``make check-unit``, are simple C tests -that typically link to individual QEMU object files and exercise them by -calling exported functions. - -If you are writing new code in QEMU, consider adding a unit test, especially -for utility modules that are relatively stateless or have few dependencies. To -add a new unit test: - -1. Create a new source file. For example, ``tests/unit/foo-test.c``. - -2. Write the test. Normally you would include the header file which exports - the module API, then verify the interface behaves as expected from your - test. The test code should be organized with the glib testing framework. - Copying and modifying an existing test is usually a good idea. - -3. Add the test to ``tests/unit/meson.build``. The unit tests are listed in a - dictionary called ``tests``. The values are any additional sources and - dependencies to be linked with the test. For a simple test whose source - is in ``tests/unit/foo-test.c``, it is enough to add an entry like:: - - { - ... - 'foo-test': [], - ... - } - -Since unit tests don't require environment variables, the simplest way to debug -a unit test failure is often directly invoking it or even running it under -``gdb``. However there can still be differences in behavior between ``make`` -invocations and your manual run, due to ``$MALLOC_PERTURB_`` environment -variable (which affects memory reclamation and catches invalid pointers better) -and gtester options. If necessary, you can run - -.. code:: - - make check-unit V=1 - -and copy the actual command line which executes the unit test, then run -it from the command line. - -QTest -~~~~~ - -QTest is a device emulation testing framework. It can be very useful to test -device models; it could also control certain aspects of QEMU (such as virtual -clock stepping), with a special purpose "qtest" protocol. Refer to -:doc:`qtest` for more details. - -QTest cases can be executed with - -.. code:: - - make check-qtest - -Writing portable test cases -~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Both unit tests and qtests can run on POSIX hosts as well as Windows hosts. -Care must be taken when writing portable test cases that can be built and run -successfully on various hosts. The following list shows some best practices: - -* Use portable APIs from glib whenever necessary, e.g.: g_setenv(), - g_mkdtemp(), g_mkdir(). -* Avoid using hardcoded /tmp for temporary file directory. - Use g_get_tmp_dir() instead. -* Bear in mind that Windows has different special string representation for - stdin/stdout/stderr and null devices. For example if your test case uses - "/dev/fd/2" and "/dev/null" on Linux, remember to use "2" and "nul" on - Windows instead. Also IO redirection does not work on Windows, so avoid - using "2>nul" whenever necessary. -* If your test cases uses the blkdebug feature, use relative path to pass - the config and image file paths in the command line as Windows absolute - path contains the delimiter ":" which will confuse the blkdebug parser. -* Use double quotes in your extra QEMU command line in your test cases - instead of single quotes, as Windows does not drop single quotes when - passing the command line to QEMU. -* Windows opens a file in text mode by default, while a POSIX compliant - implementation treats text files and binary files the same. So if your - test cases opens a file to write some data and later wants to compare the - written data with the original one, be sure to pass the letter 'b' as - part of the mode string to fopen(), or O_BINARY flag for the open() call. -* If a certain test case can only run on POSIX or Linux hosts, use a proper - #ifdef in the codes. If the whole test suite cannot run on Windows, disable - the build in the meson.build file. - -QAPI schema tests -~~~~~~~~~~~~~~~~~ - -The QAPI schema tests validate the QAPI parser used by QMP, by feeding -predefined input to the parser and comparing the result with the reference -output. - -The input/output data is managed under the ``tests/qapi-schema`` directory. -Each test case includes four files that have a common base name: - - * ``${casename}.json`` - the file contains the JSON input for feeding the - parser - * ``${casename}.out`` - the file contains the expected stdout from the parser - * ``${casename}.err`` - the file contains the expected stderr from the parser - * ``${casename}.exit`` - the expected error code - -Consider adding a new QAPI schema test when you are making a change on the QAPI -parser (either fixing a bug or extending/modifying the syntax). To do this: - -1. Add four files for the new case as explained above. For example: - - ``$EDITOR tests/qapi-schema/foo.{json,out,err,exit}``. - -2. Add the new test in ``tests/Makefile.include``. For example: - - ``qapi-schema += foo.json`` - -check-block -~~~~~~~~~~~ - -``make check-block`` runs a subset of the block layer iotests (the tests that -are in the "auto" group). -See the "QEMU iotests" section below for more information. - -QEMU iotests ------------- - -QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing -framework widely used to test block layer related features. It is higher level -than "make check" tests and 99% of the code is written in bash or Python -scripts. The testing success criteria is golden output comparison, and the -test files are named with numbers. - -To run iotests, make sure QEMU is built successfully, then switch to the -``tests/qemu-iotests`` directory under the build directory, and run ``./check`` -with desired arguments from there. - -By default, "raw" format and "file" protocol is used; all tests will be -executed, except the unsupported ones. You can override the format and protocol -with arguments: - -.. code:: - - # test with qcow2 format - ./check -qcow2 - # or test a different protocol - ./check -nbd - -It's also possible to list test numbers explicitly: - -.. code:: - - # run selected cases with qcow2 format - ./check -qcow2 001 030 153 - -Cache mode can be selected with the "-c" option, which may help reveal bugs -that are specific to certain cache mode. - -More options are supported by the ``./check`` script, run ``./check -h`` for -help. - -Writing a new test case -~~~~~~~~~~~~~~~~~~~~~~~ - -Consider writing a tests case when you are making any changes to the block -layer. An iotest case is usually the choice for that. There are already many -test cases, so it is possible that extending one of them may achieve the goal -and save the boilerplate to create one. (Unfortunately, there isn't a 100% -reliable way to find a related one out of hundreds of tests. One approach is -using ``git grep``.) - -Usually an iotest case consists of two files. One is an executable that -produces output to stdout and stderr, the other is the expected reference -output. They are given the same number in file names. E.g. Test script ``055`` -and reference output ``055.out``. - -In rare cases, when outputs differ between cache mode ``none`` and others, a -``.out.nocache`` file is added. In other cases, when outputs differ between -image formats, more than one ``.out`` files are created ending with the -respective format names, e.g. ``178.out.qcow2`` and ``178.out.raw``. - -There isn't a hard rule about how to write a test script, but a new test is -usually a (copy and) modification of an existing case. There are a few -commonly used ways to create a test: - -* A Bash script. It will make use of several environmental variables related - to the testing procedure, and could source a group of ``common.*`` libraries - for some common helper routines. - -* A Python unittest script. Import ``iotests`` and create a subclass of - ``iotests.QMPTestCase``, then call ``iotests.main`` method. The downside of - this approach is that the output is too scarce, and the script is considered - harder to debug. - -* A simple Python script without using unittest module. This could also import - ``iotests`` for launching QEMU and utilities etc, but it doesn't inherit - from ``iotests.QMPTestCase`` therefore doesn't use the Python unittest - execution. This is a combination of 1 and 2. - -Pick the language per your preference since both Bash and Python have -comparable library support for invoking and interacting with QEMU programs. If -you opt for Python, it is strongly recommended to write Python 3 compatible -code. - -Both Python and Bash frameworks in iotests provide helpers to manage test -images. They can be used to create and clean up images under the test -directory. If no I/O or any protocol specific feature is needed, it is often -more convenient to use the pseudo block driver, ``null-co://``, as the test -image, which doesn't require image creation or cleaning up. Avoid system-wide -devices or files whenever possible, such as ``/dev/null`` or ``/dev/zero``. -Otherwise, image locking implications have to be considered. For example, -another application on the host may have locked the file, possibly leading to a -test failure. If using such devices are explicitly desired, consider adding -``locking=off`` option to disable image locking. - -Debugging a test case -~~~~~~~~~~~~~~~~~~~~~ - -The following options to the ``check`` script can be useful when debugging -a failing test: - -* ``-gdb`` wraps every QEMU invocation in a ``gdbserver``, which waits for a - connection from a gdb client. The options given to ``gdbserver`` (e.g. the - address on which to listen for connections) are taken from the ``$GDB_OPTIONS`` - environment variable. By default (if ``$GDB_OPTIONS`` is empty), it listens on - ``localhost:12345``. - It is possible to connect to it for example with - ``gdb -iex "target remote $addr"``, where ``$addr`` is the address - ``gdbserver`` listens on. - If the ``-gdb`` option is not used, ``$GDB_OPTIONS`` is ignored, - regardless of whether it is set or not. - -* ``-valgrind`` attaches a valgrind instance to QEMU. If it detects - warnings, it will print and save the log in - ``$TEST_DIR/.valgrind``. - The final command line will be ``valgrind --log-file=$TEST_DIR/ - .valgrind --error-exitcode=99 $QEMU ...`` - -* ``-d`` (debug) just increases the logging verbosity, showing - for example the QMP commands and answers. - -* ``-p`` (print) redirects QEMU’s stdout and stderr to the test output, - instead of saving it into a log file in - ``$TEST_DIR/qemu-machine-``. - -Test case groups -~~~~~~~~~~~~~~~~ - -"Tests may belong to one or more test groups, which are defined in the form -of a comment in the test source file. By convention, test groups are listed -in the second line of the test file, after the "#!/..." line, like this: - -.. code:: - - #!/usr/bin/env python3 - # group: auto quick - # - ... - -Another way of defining groups is creating the tests/qemu-iotests/group.local -file. This should be used only for downstream (this file should never appear -in upstream). This file may be used for defining some downstream test groups -or for temporarily disabling tests, like this: - -.. code:: - - # groups for some company downstream process - # - # ci - tests to run on build - # down - our downstream tests, not for upstream - # - # Format of each line is: - # TEST_NAME TEST_GROUP [TEST_GROUP ]... - - 013 ci - 210 disabled - 215 disabled - our-ugly-workaround-test down ci - -Note that the following group names have a special meaning: - -- quick: Tests in this group should finish within a few seconds. - -- auto: Tests in this group are used during "make check" and should be - runnable in any case. That means they should run with every QEMU binary - (also non-x86), with every QEMU configuration (i.e. must not fail if - an optional feature is not compiled in - but reporting a "skip" is ok), - work at least with the qcow2 file format, work with all kind of host - filesystems and users (e.g. "nobody" or "root") and must not take too - much memory and disk space (since CI pipelines tend to fail otherwise). - -- disabled: Tests in this group are disabled and ignored by check. - -.. _container-ref: - -Container based tests ---------------------- - -Introduction -~~~~~~~~~~~~ - -The container testing framework in QEMU utilizes public images to -build and test QEMU in predefined and widely accessible Linux -environments. This makes it possible to expand the test coverage -across distros, toolchain flavors and library versions. The support -was originally written for Docker although we also support Podman as -an alternative container runtime. Although many of the target -names and scripts are prefixed with "docker" the system will -automatically run on whichever is configured. - -The container images are also used to augment the generation of tests -for testing TCG. See :ref:`checktcg-ref` for more details. - -Docker Prerequisites -~~~~~~~~~~~~~~~~~~~~ - -Install "docker" with the system package manager and start the Docker service -on your development machine, then make sure you have the privilege to run -Docker commands. Typically it means setting up passwordless ``sudo docker`` -command or login as root. For example: - -.. code:: - - $ sudo yum install docker - $ # or `apt-get install docker` for Ubuntu, etc. - $ sudo systemctl start docker - $ sudo docker ps - -The last command should print an empty table, to verify the system is ready. - -An alternative method to set up permissions is by adding the current user to -"docker" group and making the docker daemon socket file (by default -``/var/run/docker.sock``) accessible to the group: - -.. code:: - - $ sudo groupadd docker - $ sudo usermod $USER -a -G docker - $ sudo chown :docker /var/run/docker.sock - -Note that any one of above configurations makes it possible for the user to -exploit the whole host with Docker bind mounting or other privileged -operations. So only do it on development machines. - -Podman Prerequisites -~~~~~~~~~~~~~~~~~~~~ - -Install "podman" with the system package manager. - -.. code:: - - $ sudo dnf install podman - $ podman ps - -The last command should print an empty table, to verify the system is ready. - -Quickstart -~~~~~~~~~~ - -From source tree, type ``make docker-help`` to see the help. Testing -can be started without configuring or building QEMU (``configure`` and -``make`` are done in the container, with parameters defined by the -make target): - -.. code:: - - make docker-test-build@debian - -This will create a container instance using the ``debian`` image (the image -is downloaded and initialized automatically), in which the ``test-build`` job -is executed. - -Registry -~~~~~~~~ - -The QEMU project has a container registry hosted by GitLab at -``registry.gitlab.com/qemu-project/qemu`` which will automatically be -used to pull in pre-built layers. This avoids unnecessary strain on -the distro archives created by multiple developers running the same -container build steps over and over again. This can be overridden -locally by using the ``NOCACHE`` build option: - -.. code:: - - make docker-image-debian-arm64-cross NOCACHE=1 - -Images -~~~~~~ - -Along with many other images, the ``debian`` image is defined in a Dockerfile -in ``tests/docker/dockerfiles/``, called ``debian.docker``. ``make docker-help`` -command will list all the available images. - -A ``.pre`` script can be added beside the ``.docker`` file, which will be -executed before building the image under the build context directory. This is -mainly used to do necessary host side setup. One such setup is ``binfmt_misc``, -for example, to make qemu-user powered cross build containers work. - -Most of the existing Dockerfiles were written by hand, simply by creating a -a new ``.docker`` file under the ``tests/docker/dockerfiles/`` directory. -This has led to an inconsistent set of packages being present across the -different containers. - -Thus going forward, QEMU is aiming to automatically generate the Dockerfiles -using the ``lcitool`` program provided by the ``libvirt-ci`` project: - - https://gitlab.com/libvirt/libvirt-ci - -``libvirt-ci`` contains an ``lcitool`` program as well as a list of -mappings to distribution package names for a wide variety of third -party projects. ``lcitool`` applies the mappings to a list of build -pre-requisites in ``tests/lcitool/projects/qemu.yml``, determines the -list of native packages to install on each distribution, and uses them -to generate build environments (dockerfiles and Cirrus CI variable files) -that are consistent across OS distribution. - - -Adding new build pre-requisites -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -When preparing a patch series that adds a new build -pre-requisite to QEMU, the prerequisites should to be added to -``tests/lcitool/projects/qemu.yml`` in order to make the dependency -available in the CI build environments. - -In the simple case where the pre-requisite is already known to ``libvirt-ci`` -the following steps are needed: - - * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite - - * Run ``make lcitool-refresh`` to re-generate all relevant build environment - manifests - -It may be that ``libvirt-ci`` does not know about the new pre-requisite. -If that is the case, some extra preparation steps will be required -first to contribute the mapping to the ``libvirt-ci`` project: - - * Fork the ``libvirt-ci`` project on gitlab - - * Add an entry for the new build prerequisite to - ``lcitool/facts/mappings.yml``, listing its native package name on as - many OS distros as practical. Run ``python -m pytest --regenerate-output`` - and check that the changes are correct. - - * Commit the ``mappings.yml`` change together with the regenerated test - files, and submit a merge request to the ``libvirt-ci`` project. - Please note in the description that this is a new build pre-requisite - desired for use with QEMU. - - * CI pipeline will run to validate that the changes to ``mappings.yml`` - are correct, by attempting to install the newly listed package on - all OS distributions supported by ``libvirt-ci``. - - * Once the merge request is accepted, go back to QEMU and update - the ``tests/lcitool/libvirt-ci`` submodule to point to a commit that - contains the ``mappings.yml`` update. Then add the prerequisite and - run ``make lcitool-refresh``. - - * Please also trigger gitlab container generation pipelines on your change - for as many OS distros as practical to make sure that there are no - obvious breakages when adding the new pre-requisite. Please see - `CI `__ documentation - page on how to trigger gitlab CI pipelines on your change. - - * Please also trigger gitlab container generation pipelines on your change - for as many OS distros as practical to make sure that there are no - obvious breakages when adding the new pre-requisite. Please see - `CI `__ documentation - page on how to trigger gitlab CI pipelines on your change. - -For enterprise distros that default to old, end-of-life versions of the -Python runtime, QEMU uses a separate set of mappings that work with more -recent versions. These can be found in ``tests/lcitool/mappings.yml``. -Modifying this file should not be necessary unless the new pre-requisite -is a Python library or tool. - - -Adding new OS distros -^^^^^^^^^^^^^^^^^^^^^ - -In some cases ``libvirt-ci`` will not know about the OS distro that is -desired to be tested. Before adding a new OS distro, discuss the proposed -addition: - - * Send a mail to qemu-devel, copying people listed in the - MAINTAINERS file for ``Build and test automation``. - - There are limited CI compute resources available to QEMU, so the - cost/benefit tradeoff of adding new OS distros needs to be considered. - - * File an issue at https://gitlab.com/libvirt/libvirt-ci/-/issues - pointing to the qemu-devel mail thread in the archives. - - This alerts other people who might be interested in the work - to avoid duplication, as well as to get feedback from libvirt-ci - maintainers on any tips to ease the addition - -Assuming there is agreement to add a new OS distro then - - * Fork the ``libvirt-ci`` project on gitlab - - * Add metadata under ``lcitool/facts/targets/`` for the new OS - distro. There might be code changes required if the OS distro - uses a package format not currently known. The ``libvirt-ci`` - maintainers can advise on this when the issue is filed. - - * Edit the ``lcitool/facts/mappings.yml`` change to add entries for - the new OS, listing the native package names for as many packages - as practical. Run ``python -m pytest --regenerate-output`` and - check that the changes are correct. - - * Commit the changes to ``lcitool/facts`` and the regenerated test - files, and submit a merge request to the ``libvirt-ci`` project. - Please note in the description that this is a new build pre-requisite - desired for use with QEMU - - * CI pipeline will run to validate that the changes to ``mappings.yml`` - are correct, by attempting to install the newly listed package on - all OS distributions supported by ``libvirt-ci``. - - * Once the merge request is accepted, go back to QEMU and update - the ``libvirt-ci`` submodule to point to a commit that contains - the ``mappings.yml`` update. - - -Tests -~~~~~ - -Different tests are added to cover various configurations to build and test -QEMU. Docker tests are the executables under ``tests/docker`` named -``test-*``. They are typically shell scripts and are built on top of a shell -library, ``tests/docker/common.rc``, which provides helpers to find the QEMU -source and build it. - -The full list of tests is printed in the ``make docker-help`` help. - -Debugging a Docker test failure -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When CI tasks, maintainers or yourself report a Docker test failure, follow the -below steps to debug it: - -1. Locally reproduce the failure with the reported command line. E.g. run - ``make docker-test-mingw@fedora-win64-cross J=8``. -2. Add "V=1" to the command line, try again, to see the verbose output. -3. Further add "DEBUG=1" to the command line. This will pause in a shell prompt - in the container right before testing starts. You could either manually - build QEMU and run tests from there, or press Ctrl-D to let the Docker - testing continue. -4. If you press Ctrl-D, the same building and testing procedure will begin, and - will hopefully run into the error again. After that, you will be dropped to - the prompt for debug. - -Options -~~~~~~~ - -Various options can be used to affect how Docker tests are done. The full -list is in the ``make docker`` help text. The frequently used ones are: - -* ``V=1``: the same as in top level ``make``. It will be propagated to the - container and enable verbose output. -* ``J=$N``: the number of parallel tasks in make commands in the container, - similar to the ``-j $N`` option in top level ``make``. (The ``-j`` option in - top level ``make`` will not be propagated into the container.) -* ``DEBUG=1``: enables debug. See the previous "Debugging a Docker test - failure" section. - -Thread Sanitizer ----------------- - -Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports -building and testing with this tool. - -For more information on TSan: - -https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual - -Thread Sanitizer in Docker -~~~~~~~~~~~~~~~~~~~~~~~~~~ -TSan is currently supported in the ubuntu2204 docker. - -The test-tsan test will build using TSan and then run make check. - -.. code:: - - make docker-test-tsan@ubuntu2204 - -TSan warnings under docker are placed in files located at build/tsan/. - -We recommend using DEBUG=1 to allow launching the test from inside the docker, -and to allow review of the warnings generated by TSan. - -Building and Testing with TSan -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is possible to build and test with TSan, with a few additional steps. -These steps are normally done automatically in the docker. - -There is a one time patch needed in clang-9 or clang-10 at this time: - -.. code:: - - sed -i 's/^const/static const/g' \ - /usr/lib/llvm-10/lib/clang/10.0.0/include/sanitizer/tsan_interface.h - -To configure the build for TSan: - -.. code:: - - ../configure --enable-tsan --cc=clang-10 --cxx=clang++-10 \ - --disable-werror --extra-cflags="-O0" - -The runtime behavior of TSAN is controlled by the TSAN_OPTIONS environment -variable. - -More information on the TSAN_OPTIONS can be found here: - -https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags - -For example: - -.. code:: - - export TSAN_OPTIONS=suppressions=/tests/tsan/suppressions.tsan \ - detect_deadlocks=false history_size=7 exitcode=0 \ - log_path=/tsan/tsan_warning - -The above exitcode=0 has TSan continue without error if any warnings are found. -This allows for running the test and then checking the warnings afterwards. -If you want TSan to stop and exit with error on warnings, use exitcode=66. - -TSan Suppressions -~~~~~~~~~~~~~~~~~ -Keep in mind that for any data race warning, although there might be a data race -detected by TSan, there might be no actual bug here. TSan provides several -different mechanisms for suppressing warnings. In general it is recommended -to fix the code if possible to eliminate the data race rather than suppress -the warning. - -A few important files for suppressing warnings are: - -tests/tsan/suppressions.tsan - Has TSan warnings we wish to suppress at runtime. -The comment on each suppression will typically indicate why we are -suppressing it. More information on the file format can be found here: - -https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions - -tests/tsan/ignore.tsan - Has TSan warnings we wish to disable -at compile time for test or debug. -Add flags to configure to enable: - -"--extra-cflags=-fsanitize-blacklist=/tests/tsan/ignore.tsan" - -More information on the file format can be found here under "Blacklist Format": - -https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags - -TSan Annotations -~~~~~~~~~~~~~~~~ -include/qemu/tsan.h defines annotations. See this file for more descriptions -of the annotations themselves. Annotations can be used to suppress -TSan warnings or give TSan more information so that it can detect proper -relationships between accesses of data. - -Annotation examples can be found here: - -https://github.com/llvm/llvm-project/tree/master/compiler-rt/test/tsan/ - -Good files to start with are: annotate_happens_before.cpp and ignore_race.cpp - -The full set of annotations can be found here: - -https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp - -docker-binfmt-image-debian-% targets ------------------------------------- - -It is possible to combine Debian's bootstrap scripts with a configured -``binfmt_misc`` to bootstrap a number of Debian's distros including -experimental ports not yet supported by a released OS. This can -simplify setting up a rootfs by using docker to contain the foreign -rootfs rather than manually invoking chroot. - -Setting up ``binfmt_misc`` -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use the script ``qemu-binfmt-conf.sh`` to configure a QEMU -user binary to automatically run binaries for the foreign -architecture. While the scripts will try their best to work with -dynamically linked QEMU's a statically linked one will present less -potential complications when copying into the docker image. Modern -kernels support the ``F`` (fix binary) flag which will open the QEMU -executable on setup and avoids the need to find and re-open in the -chroot environment. This is triggered with the ``--persistent`` flag. - -Example invocation -~~~~~~~~~~~~~~~~~~ - -For example to setup the HPPA ports builds of Debian:: - - make docker-binfmt-image-debian-sid-hppa \ - DEB_TYPE=sid DEB_ARCH=hppa \ - DEB_URL=http://ftp.ports.debian.org/debian-ports/ \ - DEB_KEYRING=/usr/share/keyrings/debian-ports-archive-keyring.gpg \ - EXECUTABLE=(pwd)/qemu-hppa V=1 - -The ``DEB_`` variables are substitutions used by -``debian-bootstrap.pre`` which is called to do the initial debootstrap -of the rootfs before it is copied into the container. The second stage -is run as part of the build. The final image will be tagged as -``qemu/debian-sid-hppa``. - -VM testing ----------- - -This test suite contains scripts that bootstrap various guest images that have -necessary packages to build QEMU. The basic usage is documented in ``Makefile`` -help which is displayed with ``make vm-help``. - -Quickstart -~~~~~~~~~~ - -Run ``make vm-help`` to list available make targets. Invoke a specific make -command to run build test in an image. For example, ``make vm-build-freebsd`` -will build the source tree in the FreeBSD image. The command can be executed -from either the source tree or the build dir; if the former, ``./configure`` is -not needed. The command will then generate the test image in ``./tests/vm/`` -under the working directory. - -Note: images created by the scripts accept a well-known RSA key pair for SSH -access, so they SHOULD NOT be exposed to external interfaces if you are -concerned about attackers taking control of the guest and potentially -exploiting a QEMU security bug to compromise the host. - -QEMU binaries -~~~~~~~~~~~~~ - -By default, ``qemu-system-x86_64`` is searched in $PATH to run the guest. If -there isn't one, or if it is older than 2.10, the test won't work. In this case, -provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``. - -Likewise the path to ``qemu-img`` can be set in QEMU_IMG environment variable. - -Make jobs -~~~~~~~~~ - -The ``-j$X`` option in the make command line is not propagated into the VM, -specify ``J=$X`` to control the make jobs in the guest. - -Debugging -~~~~~~~~~ - -Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive -debugging and verbose output. If this is not enough, see the next section. -``V=1`` will be propagated down into the make jobs in the guest. - -Manual invocation -~~~~~~~~~~~~~~~~~ - -Each guest script is an executable script with the same command line options. -For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: - -.. code:: - - $ cd $QEMU_SRC/tests/vm - - # To bootstrap the image - $ ./netbsd --build-image --image /var/tmp/netbsd.img - <...> - - # To run an arbitrary command in guest (the output will not be echoed unless - # --debug is added) - $ ./netbsd --debug --image /var/tmp/netbsd.img uname -a - - # To build QEMU in guest - $ ./netbsd --debug --image /var/tmp/netbsd.img --build-qemu $QEMU_SRC - - # To get to an interactive shell - $ ./netbsd --interactive --image /var/tmp/netbsd.img sh - -Adding new guests -~~~~~~~~~~~~~~~~~ - -Please look at existing guest scripts for how to add new guests. - -Most importantly, create a subclass of BaseVM and implement ``build_image()`` -method and define ``BUILD_SCRIPT``, then finally call ``basevm.main()`` from -the script's ``main()``. - -* Usually in ``build_image()``, a template image is downloaded from a - predefined URL. ``BaseVM._download_with_cache()`` takes care of the cache and - the checksum, so consider using it. - -* Once the image is downloaded, users, SSH server and QEMU build deps should - be set up: - - - Root password set to ``BaseVM.ROOT_PASS`` - - User ``BaseVM.GUEST_USER`` is created, and password set to - ``BaseVM.GUEST_PASS`` - - SSH service is enabled and started on boot, - ``$QEMU_SRC/tests/keys/id_rsa.pub`` is added to ssh's ``authorized_keys`` - file of both root and the normal user - - DHCP client service is enabled and started on boot, so that it can - automatically configure the virtio-net-pci NIC and communicate with QEMU - user net (10.0.2.2) - - Necessary packages are installed to untar the source tarball and build - QEMU - -* Write a proper ``BUILD_SCRIPT`` template, which should be a shell script that - untars a raw virtio-blk block device, which is the tarball data blob of the - QEMU source tree, then configure/build it. Running "make check" is also - recommended. - -Image fuzzer testing --------------------- - -An image fuzzer was added to exercise format drivers. Currently only qcow2 is -supported. To start the fuzzer, run - -.. code:: - - tests/image-fuzzer/runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2 - -Alternatively, some command different from ``qemu-img info`` can be tested, by -changing the ``-c`` option. - -Integration tests using the Avocado Framework ---------------------------------------------- - -The ``tests/avocado`` directory hosts integration tests. They're usually -higher level tests, and may interact with external resources and with -various guest operating systems. - -These tests are written using the Avocado Testing Framework (which must -be installed separately) in conjunction with a the ``avocado_qemu.Test`` -class, implemented at ``tests/avocado/avocado_qemu``. - -Tests based on ``avocado_qemu.Test`` can easily: - - * Customize the command line arguments given to the convenience - ``self.vm`` attribute (a QEMUMachine instance) - - * Interact with the QEMU monitor, send QMP commands and check - their results - - * Interact with the guest OS, using the convenience console device - (which may be useful to assert the effectiveness and correctness of - command line arguments or QMP commands) - - * Interact with external data files that accompany the test itself - (see ``self.get_data()``) - - * Download (and cache) remote data files, such as firmware and kernel - images - - * Have access to a library of guest OS images (by means of the - ``avocado.utils.vmimage`` library) - - * Make use of various other test related utilities available at the - test class itself and at the utility library: - - - http://avocado-framework.readthedocs.io/en/latest/api/test/avocado.html#avocado.Test - - http://avocado-framework.readthedocs.io/en/latest/api/utils/avocado.utils.html - -Running tests -~~~~~~~~~~~~~ - -You can run the avocado tests simply by executing: - -.. code:: - - make check-avocado - -This involves the automatic installation, from PyPI, of all the -necessary avocado-framework dependencies into the QEMU venv within the -build tree (at ``./pyvenv``). Test results are also saved within the -build tree (at ``tests/results``). - -Note: the build environment must be using a Python 3 stack, and have -the ``venv`` and ``pip`` packages installed. If necessary, make sure -``configure`` is called with ``--python=`` and that those modules are -available. On Debian and Ubuntu based systems, depending on the -specific version, they may be on packages named ``python3-venv`` and -``python3-pip``. - -It is also possible to run tests based on tags using the -``make check-avocado`` command and the ``AVOCADO_TAGS`` environment -variable: - -.. code:: - - make check-avocado AVOCADO_TAGS=quick - -Note that tags separated with commas have an AND behavior, while tags -separated by spaces have an OR behavior. For more information on Avocado -tags, see: - - https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/tags.html - -To run a single test file, a couple of them, or a test within a file -using the ``make check-avocado`` command, set the ``AVOCADO_TESTS`` -environment variable with the test files or test names. To run all -tests from a single file, use: - - .. code:: - - make check-avocado AVOCADO_TESTS=$FILEPATH - -The same is valid to run tests from multiple test files: - - .. code:: - - make check-avocado AVOCADO_TESTS='$FILEPATH1 $FILEPATH2' - -To run a single test within a file, use: - - .. code:: - - make check-avocado AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME - -The same is valid to run single tests from multiple test files: - - .. code:: - - make check-avocado AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2' - -The scripts installed inside the virtual environment may be used -without an "activation". For instance, the Avocado test runner -may be invoked by running: - - .. code:: - - pyvenv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ - -Note that if ``make check-avocado`` was not executed before, it is -possible to create the Python virtual environment with the dependencies -needed running: - - .. code:: - - make check-venv - -It is also possible to run tests from a single file or a single test within -a test file. To run tests from a single file within the build tree, use: - - .. code:: - - pyvenv/bin/avocado run tests/avocado/$TESTFILE - -To run a single test within a test file, use: - - .. code:: - - pyvenv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME - -Valid test names are visible in the output from any previous execution -of Avocado or ``make check-avocado``, and can also be queried using: - - .. code:: - - pyvenv/bin/avocado list tests/avocado - -Manual Installation -~~~~~~~~~~~~~~~~~~~ - -To manually install Avocado and its dependencies, run: - -.. code:: - - pip install --user avocado-framework - -Alternatively, follow the instructions on this link: - - https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/installing.html - -Overview -~~~~~~~~ - -The ``tests/avocado/avocado_qemu`` directory provides the -``avocado_qemu`` Python module, containing the ``avocado_qemu.Test`` -class. Here's a simple usage example: - -.. code:: - - from avocado_qemu import QemuSystemTest - - - class Version(QemuSystemTest): - """ - :avocado: tags=quick - """ - def test_qmp_human_info_version(self): - self.vm.launch() - res = self.vm.cmd('human-monitor-command', - command_line='info version') - self.assertRegex(res, r'^(\d+\.\d+\.\d)') - -To execute your test, run: - -.. code:: - - avocado run version.py - -Tests may be classified according to a convention by using docstring -directives such as ``:avocado: tags=TAG1,TAG2``. To run all tests -in the current directory, tagged as "quick", run: - -.. code:: - - avocado run -t quick . - -The ``avocado_qemu.Test`` base test class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``avocado_qemu.Test`` class has a number of characteristics that -are worth being mentioned right away. - -First of all, it attempts to give each test a ready to use QEMUMachine -instance, available at ``self.vm``. Because many tests will tweak the -QEMU command line, launching the QEMUMachine (by using ``self.vm.launch()``) -is left to the test writer. - -The base test class has also support for tests with more than one -QEMUMachine. The way to get machines is through the ``self.get_vm()`` -method which will return a QEMUMachine instance. The ``self.get_vm()`` -method accepts arguments that will be passed to the QEMUMachine creation -and also an optional ``name`` attribute so you can identify a specific -machine and get it more than once through the tests methods. A simple -and hypothetical example follows: - -.. code:: - - from avocado_qemu import QemuSystemTest - - - class MultipleMachines(QemuSystemTest): - def test_multiple_machines(self): - first_machine = self.get_vm() - second_machine = self.get_vm() - self.get_vm(name='third_machine').launch() - - first_machine.launch() - second_machine.launch() - - first_res = first_machine.cmd( - 'human-monitor-command', - command_line='info version') - - second_res = second_machine.cmd( - 'human-monitor-command', - command_line='info version') - - third_res = self.get_vm(name='third_machine').cmd( - 'human-monitor-command', - command_line='info version') - - self.assertEqual(first_res, second_res, third_res) - -At test "tear down", ``avocado_qemu.Test`` handles all the QEMUMachines -shutdown. - -The ``avocado_qemu.LinuxTest`` base test class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``avocado_qemu.LinuxTest`` is further specialization of the -``avocado_qemu.Test`` class, so it contains all the characteristics of -the later plus some extra features. - -First of all, this base class is intended for tests that need to -interact with a fully booted and operational Linux guest. At this -time, it uses a Fedora 31 guest image. The most basic example looks -like this: - -.. code:: - - from avocado_qemu import LinuxTest - - - class SomeTest(LinuxTest): - - def test(self): - self.launch_and_wait() - self.ssh_command('some_command_to_be_run_in_the_guest') - -Please refer to tests that use ``avocado_qemu.LinuxTest`` under -``tests/avocado`` for more examples. - -QEMUMachine -~~~~~~~~~~~ - -The QEMUMachine API is already widely used in the Python iotests, -device-crash-test and other Python scripts. It's a wrapper around the -execution of a QEMU binary, giving its users: - - * the ability to set command line arguments to be given to the QEMU - binary - - * a ready to use QMP connection and interface, which can be used to - send commands and inspect its results, as well as asynchronous - events - - * convenience methods to set commonly used command line arguments in - a more succinct and intuitive way - -QEMU binary selection -^^^^^^^^^^^^^^^^^^^^^ - -The QEMU binary used for the ``self.vm`` QEMUMachine instance will -primarily depend on the value of the ``qemu_bin`` parameter. If it's -not explicitly set, its default value will be the result of a dynamic -probe in the same source tree. A suitable binary will be one that -targets the architecture matching host machine. - -Based on this description, test writers will usually rely on one of -the following approaches: - -1) Set ``qemu_bin``, and use the given binary - -2) Do not set ``qemu_bin``, and use a QEMU binary named like - "qemu-system-${arch}", either in the current - working directory, or in the current source tree. - -The resulting ``qemu_bin`` value will be preserved in the -``avocado_qemu.Test`` as an attribute with the same name. - -Attribute reference -~~~~~~~~~~~~~~~~~~~ - -Test -^^^^ - -Besides the attributes and methods that are part of the base -``avocado.Test`` class, the following attributes are available on any -``avocado_qemu.Test`` instance. - -vm -'' - -A QEMUMachine instance, initially configured according to the given -``qemu_bin`` parameter. - -arch -'''' - -The architecture can be used on different levels of the stack, e.g. by -the framework or by the test itself. At the framework level, it will -currently influence the selection of a QEMU binary (when one is not -explicitly given). - -Tests are also free to use this attribute value, for their own needs. -A test may, for instance, use the same value when selecting the -architecture of a kernel or disk image to boot a VM with. - -The ``arch`` attribute will be set to the test parameter of the same -name. If one is not given explicitly, it will either be set to -``None``, or, if the test is tagged with one (and only one) -``:avocado: tags=arch:VALUE`` tag, it will be set to ``VALUE``. - -cpu -''' - -The cpu model that will be set to all QEMUMachine instances created -by the test. - -The ``cpu`` attribute will be set to the test parameter of the same -name. If one is not given explicitly, it will either be set to -``None ``, or, if the test is tagged with one (and only one) -``:avocado: tags=cpu:VALUE`` tag, it will be set to ``VALUE``. - -machine -''''''' - -The machine type that will be set to all QEMUMachine instances created -by the test. - -The ``machine`` attribute will be set to the test parameter of the same -name. If one is not given explicitly, it will either be set to -``None``, or, if the test is tagged with one (and only one) -``:avocado: tags=machine:VALUE`` tag, it will be set to ``VALUE``. - -qemu_bin -'''''''' - -The preserved value of the ``qemu_bin`` parameter or the result of the -dynamic probe for a QEMU binary in the current working directory or -source tree. - -LinuxTest -^^^^^^^^^ - -Besides the attributes present on the ``avocado_qemu.Test`` base -class, the ``avocado_qemu.LinuxTest`` adds the following attributes: - -distro -'''''' - -The name of the Linux distribution used as the guest image for the -test. The name should match the **Provider** column on the list -of images supported by the avocado.utils.vmimage library: - -https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images - -distro_version -'''''''''''''' - -The version of the Linux distribution as the guest image for the -test. The name should match the **Version** column on the list -of images supported by the avocado.utils.vmimage library: - -https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images - -distro_checksum -''''''''''''''' - -The sha256 hash of the guest image file used for the test. - -If this value is not set in the code or by a test parameter (with the -same name), no validation on the integrity of the image will be -performed. - -Parameter reference -~~~~~~~~~~~~~~~~~~~ - -To understand how Avocado parameters are accessed by tests, and how -they can be passed to tests, please refer to:: - - https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#accessing-test-parameters - -Parameter values can be easily seen in the log files, and will look -like the following: - -.. code:: - - PARAMS (key=qemu_bin, path=*, default=./qemu-system-x86_64) => './qemu-system-x86_64 - -Test -^^^^ - -arch -'''' - -The architecture that will influence the selection of a QEMU binary -(when one is not explicitly given). - -Tests are also free to use this parameter value, for their own needs. -A test may, for instance, use the same value when selecting the -architecture of a kernel or disk image to boot a VM with. - -This parameter has a direct relation with the ``arch`` attribute. If -not given, it will default to None. - -cpu -''' - -The cpu model that will be set to all QEMUMachine instances created -by the test. - -machine -''''''' - -The machine type that will be set to all QEMUMachine instances created -by the test. - -qemu_bin -'''''''' - -The exact QEMU binary to be used on QEMUMachine. - -LinuxTest -^^^^^^^^^ - -Besides the parameters present on the ``avocado_qemu.Test`` base -class, the ``avocado_qemu.LinuxTest`` adds the following parameters: - -distro -'''''' - -The name of the Linux distribution used as the guest image for the -test. The name should match the **Provider** column on the list -of images supported by the avocado.utils.vmimage library: - -https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images - -distro_version -'''''''''''''' - -The version of the Linux distribution as the guest image for the -test. The name should match the **Version** column on the list -of images supported by the avocado.utils.vmimage library: - -https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images - -distro_checksum -''''''''''''''' - -The sha256 hash of the guest image file used for the test. - -If this value is not set in the code or by this parameter no -validation on the integrity of the image will be performed. - -Skipping tests -~~~~~~~~~~~~~~ - -The Avocado framework provides Python decorators which allow for easily skip -tests running under certain conditions. For example, on the lack of a binary -on the test system or when the running environment is a CI system. For further -information about those decorators, please refer to:: - - https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#skipping-tests - -While the conditions for skipping tests are often specifics of each one, there -are recurring scenarios identified by the QEMU developers and the use of -environment variables became a kind of standard way to enable/disable tests. - -Here is a list of the most used variables: - -AVOCADO_ALLOW_LARGE_STORAGE -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Tests which are going to fetch or produce assets considered *large* are not -going to run unless that ``AVOCADO_ALLOW_LARGE_STORAGE=1`` is exported on -the environment. - -The definition of *large* is a bit arbitrary here, but it usually means an -asset which occupies at least 1GB of size on disk when uncompressed. - -SPEED -^^^^^ -Tests which have a long runtime will not be run unless ``SPEED=slow`` is -exported on the environment. - -The definition of *long* is a bit arbitrary here, and it depends on the -usefulness of the test too. A unique test is worth spending more time on, -small variations on existing tests perhaps less so. As a rough guide, -a test or set of similar tests which take more than 100 seconds to -complete. - -AVOCADO_ALLOW_UNTRUSTED_CODE -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -There are tests which will boot a kernel image or firmware that can be -considered not safe to run on the developer's workstation, thus they are -skipped by default. The definition of *not safe* is also arbitrary but -usually it means a blob which either its source or build process aren't -public available. - -You should export ``AVOCADO_ALLOW_UNTRUSTED_CODE=1`` on the environment in -order to allow tests which make use of those kind of assets. - -AVOCADO_TIMEOUT_EXPECTED -^^^^^^^^^^^^^^^^^^^^^^^^ -The Avocado framework has a timeout mechanism which interrupts tests to avoid the -test suite of getting stuck. The timeout value can be set via test parameter or -property defined in the test class, for further details:: - - https://avocado-framework.readthedocs.io/en/latest/guides/writer/chapters/writing.html#setting-a-test-timeout - -Even though the timeout can be set by the test developer, there are some tests -that may not have a well-defined limit of time to finish under certain -conditions. For example, tests that take longer to execute when QEMU is -compiled with debug flags. Therefore, the ``AVOCADO_TIMEOUT_EXPECTED`` variable -has been used to determine whether those tests should run or not. - -QEMU_TEST_FLAKY_TESTS -^^^^^^^^^^^^^^^^^^^^^ -Some tests are not working reliably and thus are disabled by default. -This includes tests that don't run reliably on GitLab's CI which -usually expose real issues that are rarely seen on developer machines -due to the constraints of the CI environment. If you encounter a -similar situation then raise a bug and then mark the test as shown on -the code snippet below: - -.. code:: - - # See https://gitlab.com/qemu-project/qemu/-/issues/nnnn - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - def test(self): - do_something() - -You can also add ``:avocado: tags=flaky`` to the test meta-data so -only the flaky tests can be run as a group: - -.. code:: - - env QEMU_TEST_FLAKY_TESTS=1 ./pyvenv/bin/avocado \ - run tests/avocado -filter-by-tags=flaky - -Tests should not live in this state forever and should either be fixed -or eventually removed. - - -Uninstalling Avocado -~~~~~~~~~~~~~~~~~~~~ - -If you've followed the manual installation instructions above, you can -easily uninstall Avocado. Start by listing the packages you have -installed:: - - pip list --user - -And remove any package you want with:: - - pip uninstall - -If you've used ``make check-avocado``, the Python virtual environment where -Avocado is installed will be cleaned up as part of ``make check-clean``. - -.. _checktcg-ref: - -Testing with "make check-tcg" ------------------------------ - -The check-tcg tests are intended for simple smoke tests of both -linux-user and softmmu TCG functionality. However to build test -programs for guest targets you need to have cross compilers available. -If your distribution supports cross compilers you can do something as -simple as:: - - apt install gcc-aarch64-linux-gnu - -The configure script will automatically pick up their presence. -Sometimes compilers have slightly odd names so the availability of -them can be prompted by passing in the appropriate configure option -for the architecture in question, for example:: - - $(configure) --cross-cc-aarch64=aarch64-cc - -There is also a ``--cross-cc-cflags-ARCH`` flag in case additional -compiler flags are needed to build for a given target. - -If you have the ability to run containers as the user the build system -will automatically use them where no system compiler is available. For -architectures where we also support building QEMU we will generally -use the same container to build tests. However there are a number of -additional containers defined that have a minimal cross-build -environment that is only suitable for building test cases. Sometimes -we may use a bleeding edge distribution for compiler features needed -for test cases that aren't yet in the LTS distros we support for QEMU -itself. - -See :ref:`container-ref` for more details. - -Running subset of tests -~~~~~~~~~~~~~~~~~~~~~~~ - -You can build the tests for one architecture:: - - make build-tcg-tests-$TARGET - -And run with:: - - make run-tcg-tests-$TARGET - -Adding ``V=1`` to the invocation will show the details of how to -invoke QEMU for the test which is useful for debugging tests. - -Running individual tests -~~~~~~~~~~~~~~~~~~~~~~~~ - -Tests can also be run directly from the test build directory. If you -run ``make help`` from the test build directory you will get a list of -all the tests that can be run. Please note that same binaries are used -in multiple tests, for example:: - - make run-plugin-test-mmap-with-libinline.so - -will run the mmap test with the ``libinline.so`` TCG plugin. The -gdbstub tests also re-use the test binaries but while exercising gdb. - -TCG test dependencies -~~~~~~~~~~~~~~~~~~~~~ - -The TCG tests are deliberately very light on dependencies and are -either totally bare with minimal gcc lib support (for system-mode tests) -or just glibc (for linux-user tests). This is because getting a cross -compiler to work with additional libraries can be challenging. - -Other TCG Tests ---------------- - -There are a number of out-of-tree test suites that are used for more -extensive testing of processor features. - -KVM Unit Tests -~~~~~~~~~~~~~~ - -The KVM unit tests are designed to run as a Guest OS under KVM but -there is no reason why they can't exercise the TCG as well. It -provides a minimal OS kernel with hooks for enabling the MMU as well -as reporting test results via a special device:: - - https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git - -Linux Test Project -~~~~~~~~~~~~~~~~~~ - -The LTP is focused on exercising the syscall interface of a Linux -kernel. It checks that syscalls behave as documented and strives to -exercise as many corner cases as possible. It is a useful test suite -to run to exercise QEMU's linux-user code:: - - https://linux-test-project.github.io/ - -GCC gcov support ----------------- - -``gcov`` is a GCC tool to analyze the testing coverage by -instrumenting the tested code. To use it, configure QEMU with -``--enable-gcov`` option and build. Then run the tests as usual. - -If you want to gather coverage information on a single test the ``make -clean-gcda`` target can be used to delete any existing coverage -information before running a single test. - -You can generate a HTML coverage report by executing ``make -coverage-html`` which will create -``meson-logs/coveragereport/index.html``. - -Further analysis can be conducted by running the ``gcov`` command -directly on the various .gcda output files. Please read the ``gcov`` -documentation for more information. diff --git a/docs/devel/testing/acpi-bits.rst b/docs/devel/testing/acpi-bits.rst new file mode 100644 index 00000000000..9a4d716ebff --- /dev/null +++ b/docs/devel/testing/acpi-bits.rst @@ -0,0 +1,155 @@ +================================== +ACPI/SMBIOS testing using biosbits +================================== +************ +Introduction +************ +Biosbits is a software written by Josh Triplett that can be downloaded +from https://biosbits.org/. The github codebase can be found +`here `__. It is a software that +executes the bios components such as acpi and smbios tables directly through +acpica bios interpreter (a freely available C based library written by Intel, +downloadable from https://acpica.org/ and is included with biosbits) without an +operating system getting involved in between. Bios-bits has python integration +with grub so actual routines that executes bios components can be written in +python instead of bash-ish (grub's native scripting language). +There are several advantages to directly testing the bios in a real physical +machine or in a VM as opposed to indirectly discovering bios issues through the +operating system (the OS). Operating systems tend to bypass bios problems and +hide them from the end user. We have more control of what we wanted to test and +how by being as close to the bios on a running system as possible without a +complicated software component such as an operating system coming in between. +Another issue is that we cannot exercise bios components such as ACPI and +SMBIOS without being in the highest hardware privilege level, ring 0 for +example in case of x86. Since the OS executes from ring 0 whereas normal user +land software resides in unprivileged ring 3, operating system must be modified +in order to write our test routines that exercise and test the bios. This is +not possible in all cases. Lastly, test frameworks and routines are preferably +written using a high level scripting language such as python. OSes and +OS modules are generally written using low level languages such as C and +low level assembly machine language. Writing test routines in a low level +language makes things more cumbersome. These and other reasons makes using +bios-bits very attractive for testing bioses. More details on the inspiration +for developing biosbits and its real life uses were presented `at Plumbers +in 2011 `__ and `at Linux.conf.au in 2012 `__. + +For QEMU, we maintain a fork of bios bits in `gitlab`_, along with all +the dependent submodules. This fork contains numerous fixes, a newer +acpica and changes specific to running these functional QEMU tests using +bits. The author of this document is the current maintainer of the QEMU +fork of bios bits repository. For more information, please see `the +author's FOSDEM presentation `__ on this bios-bits based test framework. + +.. _Plumbers: https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf +.. _Linux.conf.au: https://www.youtube.com/watch?v=36QIepyUuhg +.. _gitlab: https://gitlab.com/qemu-project/biosbits-bits +.. _FOSDEM: https://fosdem.org/2024/schedule/event/fosdem-2024-2262-exercising-qemu-generated-acpi-smbios-tables-using-biosbits-from-within-a-guest-vm-/ + +********************************* +Description of the test framework +********************************* + +Under the directory ``tests/functional/``, ``test_acpi_bits.py`` is a QEMU +functional test that drives all this. + +A brief description of the various test files follows. + +Under ``tests/functional/`` as the root we have: + +:: + + ├── acpi-bits + │ ├── bits-config + │ │ └── bits-cfg.txt + │ ├── bits-tests + │ ├── smbios.py2 + │ ├── testacpi.py2 + │ └── testcpuid.py2 + ├── test_acpi_bits.py + +* ``tests/functional``: + + ``test_acpi_bits.py``: + This is the main python functional test script that generates a + biosbits iso. It then spawns a QEMU VM with it, collects the log and reports + test failures. This is the script one would be interested in if they wanted + to add or change some component of the log parsing, add a new command line + to alter how QEMU is spawned etc. Test writers typically would not need to + modify this script unless they wanted to enhance or change the log parsing + for their tests. In order to enable debugging, you can set **V=1** + environment variable. This enables verbose mode for the test and also dumps + the entire log from bios bits and more information in case failure happens. + You can also set **BITS_DEBUG=1** to turn on debug mode. It will enable + verbose logs and also retain the temporary work directory the test used for + you to inspect and run the specific commands manually. + + In order to run this test, please perform the following steps from the QEMU + build directory (assuming that the sources are in ".."): + :: + + $ export PYTHONPATH=../python:../tests/functional + $ export QEMU_TEST_QEMU_BINARY=$PWD/qemu-system-x86_64 + $ python3 ../tests/functional/test_acpi_bits.py + + The above will run all acpi-bits functional tests (producing output in + tap format). + + You can inspect the log files in tests/functional/x86_64/test_acpi_bits.*/ + for more information about the run or in order to diagnoze issues. + If you pass V=1 in the environment, more diagnostic logs will be put into + the test log. + +* ``tests/functional/acpi-bits/bits-config``: + + This location contains biosbits configuration files that determine how the + software runs the tests. + + ``bits-config.txt``: + This is the biosbits config file that determines what tests + or actions are performed by bits. The description of the config options are + provided in the file itself. + +* ``tests/functional/acpi-bits/bits-tests``: + + This directory contains biosbits python based tests that are run from within + the biosbits environment in the spawned VM. New additions of test cases can + be made in the appropriate test file. For example, new acpi tests can go + into testacpi.py2 and one would call testsuite.add_test() to register the new + test so that it gets executed as a part of the ACPI tests. + It might be occasionally necessary to disable some subtests or add a new + test that belongs to a test suite not already present in this directory. To + do this, please clone the bits source from + https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits. + Note that this is the "qemu-bits" branch and not the "bits" branch of the + repository. "qemu-bits" is the branch where we have made all the QEMU + specific enhancements and we must use the source from this branch only. + Copy the test suite/script that needs modification (addition of new tests + or disabling them) from python directory into this directory. For + example, in order to change cpuid related tests, copy the following + file into this directory and rename it with .py2 extension: + https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py + Then make your additions and changes here. Therefore, the steps are: + + (a) Copy unmodified test script to this directory from bits source. + (b) Add a SPDX license header. + (c) Perform modifications to the test. + + Commits (a), (b) and (c) preferably should go under separate commits so that + the original test script and the changes we have made are separated and + clear. (a) and (b) can sometimes be combined into a single step. + + The test framework will then use your modified test script to run the test. + No further changes would be needed. Please check the logs to make sure that + appropriate changes have taken effect. + + The tests have an extension .py2 in order to indicate that: + + (a) They are python2.7 based scripts and not python 3 scripts. + (b) They are run from within the bios bits VM and is not subjected to QEMU + build/test python script maintenance and dependency resolutions. + (c) They need not be loaded by the test framework by accident when running + tests. + + +Author: Ani Sinha + diff --git a/docs/devel/testing/blkdebug.rst b/docs/devel/testing/blkdebug.rst new file mode 100644 index 00000000000..63887c9aa9c --- /dev/null +++ b/docs/devel/testing/blkdebug.rst @@ -0,0 +1,177 @@ +Block I/O error injection using ``blkdebug`` +============================================ + +.. + Copyright (C) 2014-2015 Red Hat Inc + + This work is licensed under the terms of the GNU GPL, version 2 or later. See + the COPYING file in the top-level directory. + +The ``blkdebug`` block driver is a rule-based error injection engine. It can be +used to exercise error code paths in block drivers including ``ENOSPC`` (out of +space) and ``EIO``. + +This document gives an overview of the features available in ``blkdebug``. + +Background +---------- +Block drivers have many error code paths that handle I/O errors. Image formats +are especially complex since metadata I/O errors during cluster allocation or +while updating tables happen halfway through request processing and require +discipline to keep image files consistent. + +Error injection allows test cases to trigger I/O errors at specific points. +This way, all error paths can be tested to make sure they are correct. + +Rules +----- +The ``blkdebug`` block driver takes a list of "rules" that tell the error injection +engine when to fail an I/O request. + +Each I/O request is evaluated against the rules. If a rule matches the request +then its "action" is executed. + +Rules can be placed in a configuration file; the configuration file +follows the same .ini-like format used by QEMU's ``-readconfig`` option, and +each section of the file represents a rule. + +The following configuration file defines a single rule:: + + $ cat blkdebug.conf + [inject-error] + event = "read_aio" + errno = "28" + +This rule fails all aio read requests with ``ENOSPC`` (28). Note that the errno +value depends on the host. On Linux, see +``/usr/include/asm-generic/errno-base.h`` for errno values. + +Invoke QEMU as follows:: + + $ qemu-system-x86_64 + -drive if=none,cache=none,file=blkdebug:blkdebug.conf:test.img,id=drive0 \ + -device virtio-blk-pci,drive=drive0,id=virtio-blk-pci0 + +Rules support the following attributes: + +``event`` + which type of operation to match (e.g. ``read_aio``, ``write_aio``, + ``flush_to_os``, ``flush_to_disk``). See `Events`_ for + information on events. + +``state`` + (optional) the engine must be in this state number in order for this + rule to match. See `State transitions`_ for information + on states. + +``errno`` + the numeric errno value to return when a request matches this rule. + The errno values depend on the host since the numeric values are not + standardized in the POSIX specification. + +``sector`` + (optional) a sector number that the request must overlap in order to + match this rule + +``once`` + (optional, default ``off``) only execute this action on the first + matching request + +``immediately`` + (optional, default ``off``) return a NULL ``BlockAIOCB`` + pointer and fail without an errno instead. This + exercises the code path where ``BlockAIOCB`` fails and the + caller's ``BlockCompletionFunc`` is not invoked. + +Events +------ +Block drivers provide information about the type of I/O request they are about +to make so rules can match specific types of requests. For example, the ``qcow2`` +block driver tells ``blkdebug`` when it accesses the L1 table so rules can match +only L1 table accesses and not other metadata or guest data requests. + +The core events are: + +``read_aio`` + guest data read + +``write_aio`` + guest data write + +``flush_to_os`` + write out unwritten block driver state (e.g. cached metadata) + +``flush_to_disk`` + flush the host block device's disk cache + +See ``qapi/block-core.json:BlkdebugEvent`` for the full list of events. +You may need to grep block driver source code to understand the +meaning of specific events. + +State transitions +----------------- +There are cases where more power is needed to match a particular I/O request in +a longer sequence of requests. For example:: + + write_aio + flush_to_disk + write_aio + +How do we match the 2nd ``write_aio`` but not the first? This is where state +transitions come in. + +The error injection engine has an integer called the "state" that always starts +initialized to 1. The state integer is internal to ``blkdebug`` and cannot be +observed from outside but rules can interact with it for powerful matching +behavior. + +Rules can be conditional on the current state and they can transition to a new +state. + +When a rule's "state" attribute is non-zero then the current state must equal +the attribute in order for the rule to match. + +For example, to match the 2nd write_aio:: + + [set-state] + event = "write_aio" + state = "1" + new_state = "2" + + [inject-error] + event = "write_aio" + state = "2" + errno = "5" + +The first ``write_aio`` request matches the ``set-state`` rule and transitions from +state 1 to state 2. Once state 2 has been entered, the ``set-state`` rule no +longer matches since it requires state 1. But the ``inject-error`` rule now +matches the next ``write_aio`` request and injects ``EIO`` (5). + +State transition rules support the following attributes: + +``event`` + which type of operation to match (e.g. ``read_aio``, ``write_aio``, + ``flush_to_os`, ``flush_to_disk``). See `Events`_ for + information on events. + +``state`` + (optional) the engine must be in this state number in order for this + rule to match + +``new_state`` + transition to this state number + +Suspend and resume +------------------ +Exercising code paths in block drivers may require specific ordering amongst +concurrent requests. The "breakpoint" feature allows requests to be halted on +a ``blkdebug`` event and resumed later. This makes it possible to achieve +deterministic ordering when multiple requests are in flight. + +Breakpoints on ``blkdebug`` events are associated with a user-defined ``tag`` string. +This tag serves as an identifier by which the request can be resumed at a later +point. + +See the ``qemu-io(1)`` ``break``, ``resume``, ``remove_break``, and ``wait_break`` +commands for details. diff --git a/docs/devel/testing/blkverify.rst b/docs/devel/testing/blkverify.rst new file mode 100644 index 00000000000..2a71778b5e3 --- /dev/null +++ b/docs/devel/testing/blkverify.rst @@ -0,0 +1,73 @@ +Block driver correctness testing with ``blkverify`` +=================================================== + +Introduction +------------ + +This document describes how to use the ``blkverify`` protocol to test that a block +driver is operating correctly. + +It is difficult to test and debug block drivers against real guests. Often +processes inside the guest will crash because corrupt sectors were read as part +of the executable. Other times obscure errors are raised by a program inside +the guest. These issues are extremely hard to trace back to bugs in the block +driver. + +``blkverify`` solves this problem by catching data corruption inside QEMU the first +time bad data is read and reporting the disk sector that is corrupted. + +How it works +------------ + +The ``blkverify`` protocol has two child block devices, the "test" device and the +"raw" device. Read/write operations are mirrored to both devices so their +state should always be in sync. + +The "raw" device is a raw image, a flat file, that has identical starting +contents to the "test" image. The idea is that the "raw" device will handle +read/write operations correctly and not corrupt data. It can be used as a +reference for comparison against the "test" device. + +After a mirrored read operation completes, ``blkverify`` will compare the data and +raise an error if it is not identical. This makes it possible to catch the +first instance where corrupt data is read. + +Example +------- + +Imagine raw.img has 0xcd repeated throughout its first sector:: + + $ ./qemu-io -c 'read -v 0 512' raw.img + 00000000: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ + 00000010: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ + [...] + 000001e0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ + 000001f0: cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd cd ................ + read 512/512 bytes at offset 0 + 512.000000 bytes, 1 ops; 0.0000 sec (97.656 MiB/sec and 200000.0000 ops/sec) + +And test.img is corrupt, its first sector is zeroed when it shouldn't be:: + + $ ./qemu-io -c 'read -v 0 512' test.img + 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + [...] + 000001e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + 000001f0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + read 512/512 bytes at offset 0 + 512.000000 bytes, 1 ops; 0.0000 sec (81.380 MiB/sec and 166666.6667 ops/sec) + +This error is caught by ``blkverify``:: + + $ ./qemu-io -c 'read 0 512' blkverify:a.img:b.img + blkverify: read sector_num=0 nb_sectors=4 contents mismatch in sector 0 + +A more realistic scenario is verifying the installation of a guest OS:: + + $ ./qemu-img create raw.img 16G + $ ./qemu-img create -f qcow2 test.qcow2 16G + $ ./qemu-system-x86_64 -cdrom debian.iso \ + -drive file=blkverify:raw.img:test.qcow2 + +If the installation is aborted when ``blkverify`` detects corruption, use ``qemu-io`` +to explore the contents of the disk image at the sector in question. diff --git a/docs/devel/ci-jobs.rst.inc b/docs/devel/testing/ci-jobs.rst.inc similarity index 92% rename from docs/devel/ci-jobs.rst.inc rename to docs/devel/testing/ci-jobs.rst.inc index 3756bbe3554..f1c541cc25b 100644 --- a/docs/devel/ci-jobs.rst.inc +++ b/docs/devel/testing/ci-jobs.rst.inc @@ -126,10 +126,10 @@ QEMU_JOB_PUBLISH The job is for publishing content after a branch has been merged into the upstream default branch. -QEMU_JOB_AVOCADO -~~~~~~~~~~~~~~~~ +QEMU_JOB_FUNCTIONAL +~~~~~~~~~~~~~~~~~~~ -The job runs the Avocado integration test suite +The job runs the functional test suite Contributor controlled runtime variables ---------------------------------------- @@ -149,13 +149,12 @@ the jobs to be manually started from the UI Set this variable to 2 to create the pipelines and run all the jobs immediately, as was the historical behaviour -QEMU_CI_AVOCADO_TESTING -~~~~~~~~~~~~~~~~~~~~~~~ -By default, tests using the Avocado framework are not run automatically in -the pipelines (because multiple artifacts have to be downloaded, and if -these artifacts are not already cached, downloading them make the jobs -reach the timeout limit). Set this variable to have the tests using the -Avocado framework run automatically. +QEMU_CI_FUNCTIONAL +~~~~~~~~~~~~~~~~~~ +By default, tests using the functional framework are not run automatically +in the pipelines (because multiple artifacts have to be downloaded, which +might cause a lot of network traffic). Set this variable to have the tests +using the functional framework run automatically. Other misc variables -------------------- diff --git a/docs/devel/ci-runners.rst.inc b/docs/devel/testing/ci-runners.rst.inc similarity index 100% rename from docs/devel/ci-runners.rst.inc rename to docs/devel/testing/ci-runners.rst.inc diff --git a/docs/devel/testing/ci.rst b/docs/devel/testing/ci.rst new file mode 100644 index 00000000000..e21d39db57a --- /dev/null +++ b/docs/devel/testing/ci.rst @@ -0,0 +1,34 @@ +.. _ci: + +Continuous Integration (CI) +=========================== + +Continuous integration (CI) requires the builds of the entire application and +the execution of a comprehensive set of automated tests every time there is a +need to commit any set of changes [1]_. The automated tests are composed +of unit, functional and other tests. + +Most of QEMU's CI is run on GitLab's infrastructure although a number +of other CI services are used for specialised purposes. The most up to +date information about them and their status can be found on the +`project wiki testing page `_. + +These tests are also used as gating tests before merging pull requests. +A gating test restricts the move of code from one stage to another on a +test/deployment pipeline. The step move is granted with approval. The approval +can be a manual intervention or a set of tests succeeding [2]_. + +On QEMU, the gating process happens during the pull request. The approval is +done by the project leader running its own set of tests. The pull request gets +merged when the tests succeed. + +.. include:: ci-jobs.rst.inc +.. include:: ci-runners.rst.inc + +References +---------- + +.. [1] Humble, Jez & Farley, David (2010). Continuous Delivery: + Reliable Software Releases Through Build, Test, and Deployment, p. 55. +.. [2] Humble, Jez & Farley, David (2010). Continuous Delivery: + Reliable Software Releases Through Build, Test, and Deployment, p. 122. diff --git a/docs/devel/testing/functional.rst b/docs/devel/testing/functional.rst new file mode 100644 index 00000000000..3728bab6c0c --- /dev/null +++ b/docs/devel/testing/functional.rst @@ -0,0 +1,380 @@ +.. _checkfunctional-ref: + +Functional testing with Python +============================== + +The ``tests/functional`` directory hosts functional tests written in +Python. They are usually higher level tests, and may interact with +external resources and with various guest operating systems. + +The tests should be written in the style of the Python `unittest`_ framework, +using stdio for the TAP protocol. The folder ``tests/functional/qemu_test`` +provides classes (e.g. the ``QemuBaseTest``, ``QemuUserTest`` and the +``QemuSystemTest`` classes) and utility functions that help to get your test +into the right shape, e.g. by replacing the 'stdout' python object to redirect +the normal output of your test to stderr instead. + +Note that if you don't use one of the QemuBaseTest based classes for your +test, or if you spawn subprocesses from your test, you have to make sure +that there is no TAP-incompatible output written to stdio, e.g. either by +prefixing every line with a "# " to mark the output as a TAP comment, or +e.g. by capturing the stdout output of subprocesses (redirecting it to +stderr is OK). + +Tests based on ``qemu_test.QemuSystemTest`` can easily: + + * Customize the command line arguments given to the convenience + ``self.vm`` attribute (a QEMUMachine instance) + + * Interact with the QEMU monitor, send QMP commands and check + their results + + * Interact with the guest OS, using the convenience console device + (which may be useful to assert the effectiveness and correctness of + command line arguments or QMP commands) + + * Download (and cache) remote data files, such as firmware and kernel + images + +Running tests +------------- + +You can run the functional tests simply by executing: + +.. code:: + + make check-functional + +It is also possible to run tests for a certain target only, for example +the following line will only run the tests for the x86_64 target: + +.. code:: + + make check-functional-x86_64 + +To run a single test file without the meson test runner, you can also +execute the file directly by specifying two environment variables first, +the PYTHONPATH that has to include the python folder and the tests/functional +folder of the source tree, and QEMU_TEST_QEMU_BINARY that has to point +to the QEMU binary that should be used for the test. The current working +directory should be your build folder. For example:: + + $ export PYTHONPATH=../python:../tests/functional + $ export QEMU_TEST_QEMU_BINARY=$PWD/qemu-system-x86_64 + $ pyvenv/bin/python3 ../tests/functional/test_file.py + +The test framework will automatically purge any scratch files created during +the tests. If needing to debug a failed test, it is possible to keep these +files around on disk by setting ``QEMU_TEST_KEEP_SCRATCH=1`` as an env +variable. Any preserved files will be deleted the next time the test is run +without this variable set. + +Logging +------- + +The framework collects log files for each test in the build directory +in the following subfolder:: + + /tests/functional//../ + +There are usually three log files: + +* ``base.log`` contains the generic logging information that is written + by the calls to the logging functions in the test code (e.g. by calling + the ``self.log.info()`` or ``self.log.debug()`` functions). +* ``console.log`` contains the output of the serial console of the guest. +* ``default.log`` contains the output of QEMU. This file could be named + differently if the test chooses to use a different identifier for + the guest VM (e.g. when the test spins up multiple VMs). + +Introduction to writing tests +----------------------------- + +The ``tests/functional/qemu_test`` directory provides the ``qemu_test`` +Python module, containing the ``qemu_test.QemuSystemTest`` class. +Here is a simple usage example: + +.. code:: + + #!/usr/bin/env python3 + + from qemu_test import QemuSystemTest + + class Version(QemuSystemTest): + + def test_qmp_human_info_version(self): + self.vm.launch() + res = self.vm.cmd('human-monitor-command', + command_line='info version') + self.assertRegex(res, r'^(\d+\.\d+\.\d)') + + if __name__ == '__main__': + QemuSystemTest.main() + +By providing the "hash bang" line at the beginning of the script, marking +the file as executable and by calling into QemuSystemTest.main(), the test +can also be run stand-alone, without a test runner. OTOH when run via a test +runner, the QemuSystemTest.main() function takes care of running the test +functions in the right fassion (e.g. with TAP output that is required by the +meson test runner). + +The ``qemu_test.QemuSystemTest`` base test class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``qemu_test.QemuSystemTest`` class has a number of characteristics +that are worth being mentioned. + +First of all, it attempts to give each test a ready to use QEMUMachine +instance, available at ``self.vm``. Because many tests will tweak the +QEMU command line, launching the QEMUMachine (by using ``self.vm.launch()``) +is left to the test writer. + +The base test class has also support for tests with more than one +QEMUMachine. The way to get machines is through the ``self.get_vm()`` +method which will return a QEMUMachine instance. The ``self.get_vm()`` +method accepts arguments that will be passed to the QEMUMachine creation +and also an optional ``name`` attribute so you can identify a specific +machine and get it more than once through the tests methods. A simple +and hypothetical example follows: + +.. code:: + + from qemu_test import QemuSystemTest + + class MultipleMachines(QemuSystemTest): + def test_multiple_machines(self): + first_machine = self.get_vm() + second_machine = self.get_vm() + self.get_vm(name='third_machine').launch() + + first_machine.launch() + second_machine.launch() + + first_res = first_machine.cmd( + 'human-monitor-command', + command_line='info version') + + second_res = second_machine.cmd( + 'human-monitor-command', + command_line='info version') + + third_res = self.get_vm(name='third_machine').cmd( + 'human-monitor-command', + command_line='info version') + + self.assertEqual(first_res, second_res, third_res) + +At test "tear down", ``qemu_test.QemuSystemTest`` handles all the QEMUMachines +shutdown. + +QEMUMachine +----------- + +The QEMUMachine API is already widely used in the Python iotests, +device-crash-test and other Python scripts. It's a wrapper around the +execution of a QEMU binary, giving its users: + + * the ability to set command line arguments to be given to the QEMU + binary + + * a ready to use QMP connection and interface, which can be used to + send commands and inspect its results, as well as asynchronous + events + + * convenience methods to set commonly used command line arguments in + a more succinct and intuitive way + +QEMU binary selection +^^^^^^^^^^^^^^^^^^^^^ + +The QEMU binary used for the ``self.vm`` QEMUMachine instance will +primarily depend on the value of the ``qemu_bin`` instance attribute. +If it is not explicitly set by the test code, its default value will +be the result the QEMU_TEST_QEMU_BINARY environment variable. + +Debugging hung QEMU +^^^^^^^^^^^^^^^^^^^ + +When test cases go wrong it may be helpful to debug a stalled QEMU +process. While the QEMUMachine class owns the primary QMP monitor +socket, it is possible to request a second QMP monitor be created +by setting the ``QEMU_TEST_QMP_BACKDOOR`` env variable to refer +to a UNIX socket name. The ``qmp-shell`` command can then be +attached to the stalled QEMU to examine its live state. + +Attribute reference +------------------- + +QemuBaseTest +^^^^^^^^^^^^ + +The following attributes are available on any ``qemu_test.QemuBaseTest`` +instance. + +arch +"""" + +The target architecture of the QEMU binary. + +Tests are also free to use this attribute value, for their own needs. +A test may, for instance, use this value when selecting the architecture +of a kernel or disk image to boot a VM with. + +qemu_bin +"""""""" + +The preserved value of the ``QEMU_TEST_QEMU_BINARY`` environment +variable. + +QemuUserTest +^^^^^^^^^^^^ + +The QemuUserTest class can be used for running an executable via the +usermode emulation binaries. + +QemuSystemTest +^^^^^^^^^^^^^^ + +The QemuSystemTest class can be used for running tests via one of the +qemu-system-* binaries. + +vm +"" + +A QEMUMachine instance, initially configured according to the given +``qemu_bin`` parameter. + +cpu +""" + +The cpu model that will be set to all QEMUMachine instances created +by the test. + +machine +""""""" + +The machine type that will be set to all QEMUMachine instances created +by the test. By using the set_machine() function of the QemuSystemTest +class to set this attribute, you can automatically check whether the +machine is available to skip the test in case it is not built into the +QEMU binary. + +Asset handling +-------------- + +Many functional tests download assets (e.g. Linux kernels, initrds, +firmware images, etc.) from the internet to be able to run tests with +them. This imposes additional challenges to the test framework. + +First there is the problem that some people might not have an +unconstrained internet connection, so such tests should not be run by +default when running ``make check``. To accomplish this situation, +the tests that download files should only be added to the "thorough" +speed mode in the meson.build file, while the "quick" speed mode is +fine for functional tests that can be run without downloading files. +``make check`` then only runs the quick functional tests along with +the other quick tests from the other test suites. If you choose to +run only ``make check-functional``, the "thorough" tests will be +executed, too. And to run all functional tests along with the others, +you can use something like:: + + make -j$(nproc) check SPEED=thorough + +The second problem with downloading files from the internet are time +constraints. The time for downloading files should not be taken into +account when the test is running and the timeout of the test is ticking +(since downloading can be very slow, depending on the network bandwidth). +This problem is solved by downloading the assets ahead of time, before +the tests are run. This pre-caching is done with the qemu_test.Asset +class. To use it in your test, declare an asset in your test class with +its URL and SHA256 checksum like this:: + + from qemu_test import Asset + + ASSET_somename = Asset( + ('https://www.qemu.org/assets/images/qemu_head_200.png'), + '34b74cad46ea28a2966c1d04e102510daf1fd73e6582b6b74523940d5da029dd') + +In your test function, you can then get the file name of the cached +asset like this:: + + def test_function(self): + file_path = self.ASSET_somename.fetch() + +The pre-caching will be done automatically when running +``make check-functional`` (but not when running e.g. +``make check-functional-``). In case you just want to download +the assets without running the tests, you can do so by running:: + + make precache-functional + +The cache is populated in the ``~/.cache/qemu/download`` directory by +default, but the location can be changed by setting the +``QEMU_TEST_CACHE_DIR`` environment variable. + +Skipping tests +-------------- + +Since the test framework is based on the common Python unittest framework, +you can use the usual Python decorators which allow for easily skipping +tests running under certain conditions, for example, on the lack of a binary +on the test system or when the running environment is a CI system. For further +information about those decorators, please refer to: + + https://docs.python.org/3/library/unittest.html#skipping-tests-and-expected-failures + +While the conditions for skipping tests are often specifics of each one, there +are recurring scenarios identified by the QEMU developers and the use of +environment variables became a kind of standard way to enable/disable tests. + +Here is a list of the most used variables: + +QEMU_TEST_ALLOW_LARGE_STORAGE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Tests which are going to fetch or produce assets considered *large* are not +going to run unless that ``QEMU_TEST_ALLOW_LARGE_STORAGE=1`` is exported on +the environment. + +The definition of *large* is a bit arbitrary here, but it usually means an +asset which occupies at least 1GB of size on disk when uncompressed. + +QEMU_TEST_ALLOW_UNTRUSTED_CODE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +There are tests which will boot a kernel image or firmware that can be +considered not safe to run on the developer's workstation, thus they are +skipped by default. The definition of *not safe* is also arbitrary but +usually it means a blob which either its source or build process aren't +public available. + +You should export ``QEMU_TEST_ALLOW_UNTRUSTED_CODE=1`` on the environment in +order to allow tests which make use of those kind of assets. + +QEMU_TEST_FLAKY_TESTS +^^^^^^^^^^^^^^^^^^^^^ +Some tests are not working reliably and thus are disabled by default. +This includes tests that don't run reliably on GitLab's CI which +usually expose real issues that are rarely seen on developer machines +due to the constraints of the CI environment. If you encounter a +similar situation then raise a bug and then mark the test as shown on +the code snippet below: + +.. code:: + + # See https://gitlab.com/qemu-project/qemu/-/issues/nnnn + @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') + def test(self): + do_something() + +Tests should not live in this state forever and should either be fixed +or eventually removed. + +QEMU_TEST_ALLOW_SLOW +^^^^^^^^^^^^^^^^^^^^ +Tests that have a very long runtime and might run into timeout issues +e.g. if the QEMU binary has been compiled with debugging options enabled. +To avoid these timeout issues by default and to save some precious CPU +cycles during normal testing, such tests are disabled by default unless +the QEMU_TEST_ALLOW_SLOW environment variable has been set. + + +.. _unittest: https://docs.python.org/3/library/unittest.html diff --git a/docs/devel/fuzzing.rst b/docs/devel/testing/fuzzing.rst similarity index 97% rename from docs/devel/fuzzing.rst rename to docs/devel/testing/fuzzing.rst index 3bfcb33fc4b..c3ac084311b 100644 --- a/docs/devel/fuzzing.rst +++ b/docs/devel/testing/fuzzing.rst @@ -21,11 +21,12 @@ Building the fuzzers To build the fuzzers, install a recent version of clang: Configure with (substitute the clang binaries with the version you installed). -Here, enable-sanitizers, is optional but it allows us to reliably detect bugs -such as out-of-bounds accesses, use-after-frees, double-frees etc.:: +Here, enable-asan and enable-ubsan are optional but they allow us to reliably +detect bugs such as out-of-bounds accesses, uses-after-free, double-frees +etc.:: - CC=clang-8 CXX=clang++-8 /path/to/configure --enable-fuzzing \ - --enable-sanitizers + CC=clang-8 CXX=clang++-8 /path/to/configure \ + --enable-fuzzing --enable-asan --enable-ubsan Fuzz targets are built similarly to system targets:: diff --git a/docs/devel/testing/index.rst b/docs/devel/testing/index.rst new file mode 100644 index 00000000000..ccc2fc6cbc1 --- /dev/null +++ b/docs/devel/testing/index.rst @@ -0,0 +1,17 @@ +Testing QEMU +------------ + +Details about how to test QEMU and how it is integrated into our CI +testing infrastructure. + +.. toctree:: + :maxdepth: 3 + + main + qtest + functional + acpi-bits + ci + fuzzing + blkdebug + blkverify diff --git a/docs/devel/testing/main.rst b/docs/devel/testing/main.rst new file mode 100644 index 00000000000..2b5cb0c1480 --- /dev/null +++ b/docs/devel/testing/main.rst @@ -0,0 +1,1059 @@ +.. _testing: + +Testing in QEMU +=============== + +QEMU's testing infrastructure is fairly complex as it covers +everything from unit testing and exercising specific sub-systems all +the way to full blown functional tests. To get an overview of the +tests you can run ``make check-help`` from either the source or build +tree. + +Most (but not all) tests are also integrated as an automated test into +the meson build system so can be run directly from the build tree, +for example:: + + [./pyvenv/bin/]meson test --suite qemu:softfloat + +will run just the softfloat tests. + +An automated test is written with one of the test frameworks using its +generic test functions/classes. The test framework can run the tests and +report their success or failure [1]_. + +An automated test has essentially three parts: + +1. The test initialization of the parameters, where the expected parameters, + like inputs and expected results, are set up; +2. The call to the code that should be tested; +3. An assertion, comparing the result from the previous call with the expected + result set during the initialization of the parameters. If the result + matches the expected result, the test has been successful; otherwise, it has + failed. + +The rest of this document will cover the details for specific test +groups. + +Testing with "make check" +------------------------- + +The "make check" testing family includes most of the C based tests in QEMU. + +The usual way to run these tests is: + +.. code:: + + make check + +which includes QAPI schema tests, unit tests, QTests and some iotests. +Different sub-types of "make check" tests will be explained below. + +Before running tests, it is best to build QEMU programs first. Some tests +expect the executables to exist and will fail with obscure messages if they +cannot find them. + +.. _unit-tests: + +Unit tests +~~~~~~~~~~ + +A unit test is responsible for exercising individual software components as a +unit, like interfaces, data structures, and functionality, uncovering errors +within the boundaries of a component. The verification effort is in the +smallest software unit and focuses on the internal processing logic and data +structures. A test case of unit tests should be designed to uncover errors +due to erroneous computations, incorrect comparisons, or improper control +flow [2]_. + +In QEMU, unit tests can be invoked with ``make check-unit``. They are +simple C tests that typically link to individual QEMU object files and +exercise them by calling exported functions. + +If you are writing new code in QEMU, consider adding a unit test, especially +for utility modules that are relatively stateless or have few dependencies. To +add a new unit test: + +1. Create a new source file. For example, ``tests/unit/foo-test.c``. + +2. Write the test. Normally you would include the header file which exports + the module API, then verify the interface behaves as expected from your + test. The test code should be organized with the glib testing framework. + Copying and modifying an existing test is usually a good idea. + +3. Add the test to ``tests/unit/meson.build``. The unit tests are listed in a + dictionary called ``tests``. The values are any additional sources and + dependencies to be linked with the test. For a simple test whose source + is in ``tests/unit/foo-test.c``, it is enough to add an entry like:: + + { + ... + 'foo-test': [], + ... + } + +Since unit tests don't require environment variables, the simplest way to debug +a unit test failure is often directly invoking it or even running it under +``gdb``. However there can still be differences in behavior between ``make`` +invocations and your manual run, due to ``$MALLOC_PERTURB_`` environment +variable (which affects memory reclamation and catches invalid pointers better) +and gtester options. If necessary, you can run + +.. code:: + + make check-unit V=1 + +and copy the actual command line which executes the unit test, then run +it from the command line. + +QTest +~~~~~ + +QTest is a device emulation testing framework. It can be very useful to test +device models; it could also control certain aspects of QEMU (such as virtual +clock stepping), with a special purpose "qtest" protocol. Refer to +:doc:`qtest` for more details. + +QTest cases can be executed with + +.. code:: + + make check-qtest + +Writing portable test cases +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Both unit tests and qtests can run on POSIX hosts as well as Windows hosts. +Care must be taken when writing portable test cases that can be built and run +successfully on various hosts. The following list shows some best practices: + +* Use portable APIs from glib whenever necessary, e.g.: g_setenv(), + g_mkdtemp(), g_mkdir(). +* Avoid using hardcoded /tmp for temporary file directory. + Use g_get_tmp_dir() instead. +* Bear in mind that Windows has different special string representation for + stdin/stdout/stderr and null devices. For example if your test case uses + "/dev/fd/2" and "/dev/null" on Linux, remember to use "2" and "nul" on + Windows instead. Also IO redirection does not work on Windows, so avoid + using "2>nul" whenever necessary. +* If your test cases uses the blkdebug feature, use relative path to pass + the config and image file paths in the command line as Windows absolute + path contains the delimiter ":" which will confuse the blkdebug parser. +* Use double quotes in your extra QEMU command line in your test cases + instead of single quotes, as Windows does not drop single quotes when + passing the command line to QEMU. +* Windows opens a file in text mode by default, while a POSIX compliant + implementation treats text files and binary files the same. So if your + test cases opens a file to write some data and later wants to compare the + written data with the original one, be sure to pass the letter 'b' as + part of the mode string to fopen(), or O_BINARY flag for the open() call. +* If a certain test case can only run on POSIX or Linux hosts, use a proper + #ifdef in the codes. If the whole test suite cannot run on Windows, disable + the build in the meson.build file. + +.. _qapi-tests: + +QAPI schema tests +~~~~~~~~~~~~~~~~~ + +The QAPI schema tests validate the QAPI parser used by QMP, by feeding +predefined input to the parser and comparing the result with the reference +output. + +The input/output data is managed under the ``tests/qapi-schema`` directory. +Each test case includes four files that have a common base name: + + * ``${casename}.json`` - the file contains the JSON input for feeding the + parser + * ``${casename}.out`` - the file contains the expected stdout from the parser + * ``${casename}.err`` - the file contains the expected stderr from the parser + * ``${casename}.exit`` - the expected error code + +Consider adding a new QAPI schema test when you are making a change on the QAPI +parser (either fixing a bug or extending/modifying the syntax). To do this: + +1. Add four files for the new case as explained above. For example: + + ``$EDITOR tests/qapi-schema/foo.{json,out,err,exit}``. + +2. Add the new test in ``tests/Makefile.include``. For example: + + ``qapi-schema += foo.json`` + +check-block +~~~~~~~~~~~ + +``make check-block`` runs a subset of the block layer iotests (the tests that +are in the "auto" group). +See the "QEMU iotests" section below for more information. + +.. _qemu-iotests: + +QEMU iotests +------------ + +QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing +framework widely used to test block layer related features. It is higher level +than "make check" tests and 99% of the code is written in bash or Python +scripts. The testing success criteria is golden output comparison, and the +test files are named with numbers. + +To run iotests, make sure QEMU is built successfully, then switch to the +``tests/qemu-iotests`` directory under the build directory, and run ``./check`` +with desired arguments from there. + +By default, "raw" format and "file" protocol is used; all tests will be +executed, except the unsupported ones. You can override the format and protocol +with arguments: + +.. code:: + + # test with qcow2 format + ./check -qcow2 + # or test a different protocol + ./check -nbd + +It's also possible to list test numbers explicitly: + +.. code:: + + # run selected cases with qcow2 format + ./check -qcow2 001 030 153 + +Cache mode can be selected with the "-c" option, which may help reveal bugs +that are specific to certain cache mode. + +More options are supported by the ``./check`` script, run ``./check -h`` for +help. + +Writing a new test case +~~~~~~~~~~~~~~~~~~~~~~~ + +Consider writing a tests case when you are making any changes to the block +layer. An iotest case is usually the choice for that. There are already many +test cases, so it is possible that extending one of them may achieve the goal +and save the boilerplate to create one. (Unfortunately, there isn't a 100% +reliable way to find a related one out of hundreds of tests. One approach is +using ``git grep``.) + +Usually an iotest case consists of two files. One is an executable that +produces output to stdout and stderr, the other is the expected reference +output. They are given the same number in file names. E.g. Test script ``055`` +and reference output ``055.out``. + +In rare cases, when outputs differ between cache mode ``none`` and others, a +``.out.nocache`` file is added. In other cases, when outputs differ between +image formats, more than one ``.out`` files are created ending with the +respective format names, e.g. ``178.out.qcow2`` and ``178.out.raw``. + +There isn't a hard rule about how to write a test script, but a new test is +usually a (copy and) modification of an existing case. There are a few +commonly used ways to create a test: + +* A Bash script. It will make use of several environmental variables related + to the testing procedure, and could source a group of ``common.*`` libraries + for some common helper routines. + +* A Python unittest script. Import ``iotests`` and create a subclass of + ``iotests.QMPTestCase``, then call ``iotests.main`` method. The downside of + this approach is that the output is too scarce, and the script is considered + harder to debug. + +* A simple Python script without using unittest module. This could also import + ``iotests`` for launching QEMU and utilities etc, but it doesn't inherit + from ``iotests.QMPTestCase`` therefore doesn't use the Python unittest + execution. This is a combination of 1 and 2. + +Pick the language per your preference since both Bash and Python have +comparable library support for invoking and interacting with QEMU programs. If +you opt for Python, it is strongly recommended to write Python 3 compatible +code. + +Both Python and Bash frameworks in iotests provide helpers to manage test +images. They can be used to create and clean up images under the test +directory. If no I/O or any protocol specific feature is needed, it is often +more convenient to use the pseudo block driver, ``null-co://``, as the test +image, which doesn't require image creation or cleaning up. Avoid system-wide +devices or files whenever possible, such as ``/dev/null`` or ``/dev/zero``. +Otherwise, image locking implications have to be considered. For example, +another application on the host may have locked the file, possibly leading to a +test failure. If using such devices are explicitly desired, consider adding +``locking=off`` option to disable image locking. + +Debugging a test case +~~~~~~~~~~~~~~~~~~~~~ + +The following options to the ``check`` script can be useful when debugging +a failing test: + +* ``-gdb`` wraps every QEMU invocation in a ``gdbserver``, which waits for a + connection from a gdb client. The options given to ``gdbserver`` (e.g. the + address on which to listen for connections) are taken from the ``$GDB_OPTIONS`` + environment variable. By default (if ``$GDB_OPTIONS`` is empty), it listens on + ``localhost:12345``. + It is possible to connect to it for example with + ``gdb -iex "target remote $addr"``, where ``$addr`` is the address + ``gdbserver`` listens on. + If the ``-gdb`` option is not used, ``$GDB_OPTIONS`` is ignored, + regardless of whether it is set or not. + +* ``-valgrind`` attaches a valgrind instance to QEMU. If it detects + warnings, it will print and save the log in + ``$TEST_DIR/.valgrind``. + The final command line will be ``valgrind --log-file=$TEST_DIR/ + .valgrind --error-exitcode=99 $QEMU ...`` + +* ``-d`` (debug) just increases the logging verbosity, showing + for example the QMP commands and answers. + +* ``-p`` (print) redirects QEMU’s stdout and stderr to the test output, + instead of saving it into a log file in + ``$TEST_DIR/qemu-machine-``. + +Test case groups +~~~~~~~~~~~~~~~~ + +"Tests may belong to one or more test groups, which are defined in the form +of a comment in the test source file. By convention, test groups are listed +in the second line of the test file, after the "#!/..." line, like this: + +.. code:: + + #!/usr/bin/env python3 + # group: auto quick + # + ... + +Another way of defining groups is creating the tests/qemu-iotests/group.local +file. This should be used only for downstream (this file should never appear +in upstream). This file may be used for defining some downstream test groups +or for temporarily disabling tests, like this: + +.. code:: + + # groups for some company downstream process + # + # ci - tests to run on build + # down - our downstream tests, not for upstream + # + # Format of each line is: + # TEST_NAME TEST_GROUP [TEST_GROUP ]... + + 013 ci + 210 disabled + 215 disabled + our-ugly-workaround-test down ci + +Note that the following group names have a special meaning: + +- quick: Tests in this group should finish within a few seconds. + +- auto: Tests in this group are used during "make check" and should be + runnable in any case. That means they should run with every QEMU binary + (also non-x86), with every QEMU configuration (i.e. must not fail if + an optional feature is not compiled in - but reporting a "skip" is ok), + work at least with the qcow2 file format, work with all kind of host + filesystems and users (e.g. "nobody" or "root") and must not take too + much memory and disk space (since CI pipelines tend to fail otherwise). + +- disabled: Tests in this group are disabled and ignored by check. + +.. _container-ref: + +Container based tests +--------------------- + +Introduction +~~~~~~~~~~~~ + +The container testing framework in QEMU utilizes public images to +build and test QEMU in predefined and widely accessible Linux +environments. This makes it possible to expand the test coverage +across distros, toolchain flavors and library versions. The support +was originally written for Docker although we also support Podman as +an alternative container runtime. Although many of the target +names and scripts are prefixed with "docker" the system will +automatically run on whichever is configured. + +The container images are also used to augment the generation of tests +for testing TCG. See :ref:`checktcg-ref` for more details. + +Docker Prerequisites +~~~~~~~~~~~~~~~~~~~~ + +Install "docker" with the system package manager and start the Docker service +on your development machine, then make sure you have the privilege to run +Docker commands. Typically it means setting up passwordless ``sudo docker`` +command or login as root. For example: + +.. code:: + + $ sudo yum install docker + $ # or `apt-get install docker` for Ubuntu, etc. + $ sudo systemctl start docker + $ sudo docker ps + +The last command should print an empty table, to verify the system is ready. + +An alternative method to set up permissions is by adding the current user to +"docker" group and making the docker daemon socket file (by default +``/var/run/docker.sock``) accessible to the group: + +.. code:: + + $ sudo groupadd docker + $ sudo usermod $USER -a -G docker + $ sudo chown :docker /var/run/docker.sock + +Note that any one of above configurations makes it possible for the user to +exploit the whole host with Docker bind mounting or other privileged +operations. So only do it on development machines. + +Podman Prerequisites +~~~~~~~~~~~~~~~~~~~~ + +Install "podman" with the system package manager. + +.. code:: + + $ sudo dnf install podman + $ podman ps + +The last command should print an empty table, to verify the system is ready. + +Quickstart +~~~~~~~~~~ + +From source tree, type ``make docker-help`` to see the help. Testing +can be started without configuring or building QEMU (``configure`` and +``make`` are done in the container, with parameters defined by the +make target): + +.. code:: + + make docker-test-build@debian + +This will create a container instance using the ``debian`` image (the image +is downloaded and initialized automatically), in which the ``test-build`` job +is executed. + +Registry +~~~~~~~~ + +The QEMU project has a container registry hosted by GitLab at +``registry.gitlab.com/qemu-project/qemu`` which will automatically be +used to pull in pre-built layers. This avoids unnecessary strain on +the distro archives created by multiple developers running the same +container build steps over and over again. This can be overridden +locally by using the ``NOCACHE`` build option: + +.. code:: + + make docker-image-debian-arm64-cross NOCACHE=1 + +Images +~~~~~~ + +Along with many other images, the ``debian`` image is defined in a Dockerfile +in ``tests/docker/dockerfiles/``, called ``debian.docker``. ``make docker-help`` +command will list all the available images. + +A ``.pre`` script can be added beside the ``.docker`` file, which will be +executed before building the image under the build context directory. This is +mainly used to do necessary host side setup. One such setup is ``binfmt_misc``, +for example, to make qemu-user powered cross build containers work. + +Most of the existing Dockerfiles were written by hand, simply by creating a +a new ``.docker`` file under the ``tests/docker/dockerfiles/`` directory. +This has led to an inconsistent set of packages being present across the +different containers. + +Thus going forward, QEMU is aiming to automatically generate the Dockerfiles +using the ``lcitool`` program provided by the ``libvirt-ci`` project: + + https://gitlab.com/libvirt/libvirt-ci + +``libvirt-ci`` contains an ``lcitool`` program as well as a list of +mappings to distribution package names for a wide variety of third +party projects. ``lcitool`` applies the mappings to a list of build +pre-requisites in ``tests/lcitool/projects/qemu.yml``, determines the +list of native packages to install on each distribution, and uses them +to generate build environments (dockerfiles and Cirrus CI variable files) +that are consistent across OS distribution. + + +Adding new build pre-requisites +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When preparing a patch series that adds a new build +pre-requisite to QEMU, the prerequisites should to be added to +``tests/lcitool/projects/qemu.yml`` in order to make the dependency +available in the CI build environments. + +In the simple case where the pre-requisite is already known to ``libvirt-ci`` +the following steps are needed: + + * Edit ``tests/lcitool/projects/qemu.yml`` and add the pre-requisite + + * Run ``make lcitool-refresh`` to re-generate all relevant build environment + manifests + +It may be that ``libvirt-ci`` does not know about the new pre-requisite. +If that is the case, some extra preparation steps will be required +first to contribute the mapping to the ``libvirt-ci`` project: + + * Fork the ``libvirt-ci`` project on gitlab + + * Add an entry for the new build prerequisite to + ``lcitool/facts/mappings.yml``, listing its native package name on as + many OS distros as practical. Run ``python -m pytest --regenerate-output`` + and check that the changes are correct. + + * Commit the ``mappings.yml`` change together with the regenerated test + files, and submit a merge request to the ``libvirt-ci`` project. + Please note in the description that this is a new build pre-requisite + desired for use with QEMU. + + * CI pipeline will run to validate that the changes to ``mappings.yml`` + are correct, by attempting to install the newly listed package on + all OS distributions supported by ``libvirt-ci``. + + * Once the merge request is accepted, go back to QEMU and update + the ``tests/lcitool/libvirt-ci`` submodule to point to a commit that + contains the ``mappings.yml`` update. Then add the prerequisite and + run ``make lcitool-refresh``. + + * Please also trigger gitlab container generation pipelines on your change + for as many OS distros as practical to make sure that there are no + obvious breakages when adding the new pre-requisite. Please see + `CI `__ documentation + page on how to trigger gitlab CI pipelines on your change. + +For enterprise distros that default to old, end-of-life versions of the +Python runtime, QEMU uses a separate set of mappings that work with more +recent versions. These can be found in ``tests/lcitool/mappings.yml``. +Modifying this file should not be necessary unless the new pre-requisite +is a Python library or tool. + + +Adding new OS distros +^^^^^^^^^^^^^^^^^^^^^ + +In some cases ``libvirt-ci`` will not know about the OS distro that is +desired to be tested. Before adding a new OS distro, discuss the proposed +addition: + + * Send a mail to qemu-devel, copying people listed in the + MAINTAINERS file for ``Build and test automation``. + + There are limited CI compute resources available to QEMU, so the + cost/benefit tradeoff of adding new OS distros needs to be considered. + + * File an issue at https://gitlab.com/libvirt/libvirt-ci/-/issues + pointing to the qemu-devel mail thread in the archives. + + This alerts other people who might be interested in the work + to avoid duplication, as well as to get feedback from libvirt-ci + maintainers on any tips to ease the addition + +Assuming there is agreement to add a new OS distro then + + * Fork the ``libvirt-ci`` project on gitlab + + * Add metadata under ``lcitool/facts/targets/`` for the new OS + distro. There might be code changes required if the OS distro + uses a package format not currently known. The ``libvirt-ci`` + maintainers can advise on this when the issue is filed. + + * Edit the ``lcitool/facts/mappings.yml`` change to add entries for + the new OS, listing the native package names for as many packages + as practical. Run ``python -m pytest --regenerate-output`` and + check that the changes are correct. + + * Commit the changes to ``lcitool/facts`` and the regenerated test + files, and submit a merge request to the ``libvirt-ci`` project. + Please note in the description that this is a new build pre-requisite + desired for use with QEMU + + * CI pipeline will run to validate that the changes to ``mappings.yml`` + are correct, by attempting to install the newly listed package on + all OS distributions supported by ``libvirt-ci``. + + * Once the merge request is accepted, go back to QEMU and update + the ``libvirt-ci`` submodule to point to a commit that contains + the ``mappings.yml`` update. + + +Tests +~~~~~ + +Different tests are added to cover various configurations to build and test +QEMU. Docker tests are the executables under ``tests/docker`` named +``test-*``. They are typically shell scripts and are built on top of a shell +library, ``tests/docker/common.rc``, which provides helpers to find the QEMU +source and build it. + +The full list of tests is printed in the ``make docker-help`` help. + +Debugging a Docker test failure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When CI tasks, maintainers or yourself report a Docker test failure, follow the +below steps to debug it: + +1. Locally reproduce the failure with the reported command line. E.g. run + ``make docker-test-mingw@fedora-win64-cross J=8``. +2. Add "V=1" to the command line, try again, to see the verbose output. +3. Further add "DEBUG=1" to the command line. This will pause in a shell prompt + in the container right before testing starts. You could either manually + build QEMU and run tests from there, or press :kbd:`Ctrl+d` to let the Docker + testing continue. +4. If you press :kbd:`Ctrl+d`, the same building and testing procedure will begin, and + will hopefully run into the error again. After that, you will be dropped to + the prompt for debug. + +Options +~~~~~~~ + +Various options can be used to affect how Docker tests are done. The full +list is in the ``make docker`` help text. The frequently used ones are: + +* ``V=1``: the same as in top level ``make``. It will be propagated to the + container and enable verbose output. +* ``J=$N``: the number of parallel tasks in make commands in the container, + similar to the ``-j $N`` option in top level ``make``. (The ``-j`` option in + top level ``make`` will not be propagated into the container.) +* ``DEBUG=1``: enables debug. See the previous "Debugging a Docker test + failure" section. + +Thread Sanitizer +---------------- + +Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports +building and testing with this tool. + +For more information on TSan: + +https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual + +Thread Sanitizer in Docker +~~~~~~~~~~~~~~~~~~~~~~~~~~ +TSan is currently supported in the ubuntu2204 docker. + +The test-tsan test will build using TSan and then run make check. + +.. code:: + + make docker-test-tsan@ubuntu2204 + +TSan warnings under docker are placed in files located at build/tsan/. + +We recommend using DEBUG=1 to allow launching the test from inside the docker, +and to allow review of the warnings generated by TSan. + +Building and Testing with TSan +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is possible to build and test with TSan, with a few additional steps. +These steps are normally done automatically in the docker. + +TSan is supported for clang and gcc. +One particularity of sanitizers is that all the code, including shared objects +dependencies, should be built with it. +In the case of TSan, any synchronization primitive from glib (GMutex for +instance) will not be recognized, and will lead to false positives. + +To build a tsan version of glib: + +.. code:: + + $ git clone --depth=1 --branch=2.81.0 https://github.com/GNOME/glib.git + $ cd glib + $ CFLAGS="-O2 -g -fsanitize=thread" meson build + $ ninja -C build + +To configure the build for TSan: + +.. code:: + + ../configure --enable-tsan \ + --disable-werror --extra-cflags="-O0" + +When executing qemu, don't forget to point to tsan glib: + +.. code:: + + $ glib_dir=/path/to/glib + $ export LD_LIBRARY_PATH=$glib_dir/build/gio:$glib_dir/build/glib:$glib_dir/build/gmodule:$glib_dir/build/gobject:$glib_dir/build/gthread + # check correct version is used + $ ldd build/qemu-x86_64 | grep glib + $ qemu-system-x86_64 ... + +The runtime behavior of TSAN is controlled by the TSAN_OPTIONS environment +variable. + +More information on the TSAN_OPTIONS can be found here: + +https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags + +For example: + +.. code:: + + export TSAN_OPTIONS=suppressions=/tests/tsan/suppressions.tsan \ + detect_deadlocks=false history_size=7 exitcode=0 \ + log_path=/tsan/tsan_warning + +The above exitcode=0 has TSan continue without error if any warnings are found. +This allows for running the test and then checking the warnings afterwards. +If you want TSan to stop and exit with error on warnings, use exitcode=66. + +.. _tsan-suppressions: + +TSan Suppressions +~~~~~~~~~~~~~~~~~ +Keep in mind that for any data race warning, although there might be a data race +detected by TSan, there might be no actual bug here. TSan provides several +different mechanisms for suppressing warnings. In general it is recommended +to fix the code if possible to eliminate the data race rather than suppress +the warning. + +A few important files for suppressing warnings are: + +tests/tsan/suppressions.tsan - Has TSan warnings we wish to suppress at runtime. +The comment on each suppression will typically indicate why we are +suppressing it. More information on the file format can be found here: + +https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions + +tests/tsan/ignore.tsan - Has TSan warnings we wish to disable +at compile time for test or debug. +Add flags to configure to enable: + +"--extra-cflags=-fsanitize-blacklist=/tests/tsan/ignore.tsan" + +More information on the file format can be found here under "Blacklist Format": + +https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags + +TSan Annotations +~~~~~~~~~~~~~~~~ +include/qemu/tsan.h defines annotations. See this file for more descriptions +of the annotations themselves. Annotations can be used to suppress +TSan warnings or give TSan more information so that it can detect proper +relationships between accesses of data. + +Annotation examples can be found here: + +https://github.com/llvm/llvm-project/tree/master/compiler-rt/test/tsan/ + +Good files to start with are: annotate_happens_before.cpp and ignore_race.cpp + +The full set of annotations can be found here: + +https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp + +docker-binfmt-image-debian-% targets +------------------------------------ + +It is possible to combine Debian's bootstrap scripts with a configured +``binfmt_misc`` to bootstrap a number of Debian's distros including +experimental ports not yet supported by a released OS. This can +simplify setting up a rootfs by using docker to contain the foreign +rootfs rather than manually invoking chroot. + +Setting up ``binfmt_misc`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use the script ``qemu-binfmt-conf.sh`` to configure a QEMU +user binary to automatically run binaries for the foreign +architecture. While the scripts will try their best to work with +dynamically linked QEMU's a statically linked one will present less +potential complications when copying into the docker image. Modern +kernels support the ``F`` (fix binary) flag which will open the QEMU +executable on setup and avoids the need to find and re-open in the +chroot environment. This is triggered with the ``--persistent`` flag. + +Example invocation +~~~~~~~~~~~~~~~~~~ + +For example to setup the HPPA ports builds of Debian:: + + make docker-binfmt-image-debian-sid-hppa \ + DEB_TYPE=sid DEB_ARCH=hppa \ + DEB_URL=http://ftp.ports.debian.org/debian-ports/ \ + DEB_KEYRING=/usr/share/keyrings/debian-ports-archive-keyring.gpg \ + EXECUTABLE=(pwd)/qemu-hppa V=1 + +The ``DEB_`` variables are substitutions used by +``debian-bootstrap.pre`` which is called to do the initial debootstrap +of the rootfs before it is copied into the container. The second stage +is run as part of the build. The final image will be tagged as +``qemu/debian-sid-hppa``. + +VM testing +---------- + +This test suite contains scripts that bootstrap various guest images that have +necessary packages to build QEMU. The basic usage is documented in ``Makefile`` +help which is displayed with ``make vm-help``. + +Quickstart +~~~~~~~~~~ + +Run ``make vm-help`` to list available make targets. Invoke a specific make +command to run build test in an image. For example, ``make vm-build-freebsd`` +will build the source tree in the FreeBSD image. The command can be executed +from either the source tree or the build dir; if the former, ``./configure`` is +not needed. The command will then generate the test image in ``./tests/vm/`` +under the working directory. + +Note: images created by the scripts accept a well-known RSA key pair for SSH +access, so they SHOULD NOT be exposed to external interfaces if you are +concerned about attackers taking control of the guest and potentially +exploiting a QEMU security bug to compromise the host. + +QEMU binaries +~~~~~~~~~~~~~ + +By default, ``qemu-system-x86_64`` is searched in $PATH to run the guest. If +there isn't one, or if it is older than 2.10, the test won't work. In this case, +provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``. + +Likewise the path to ``qemu-img`` can be set in QEMU_IMG environment variable. + +Make jobs +~~~~~~~~~ + +The ``-j$X`` option in the make command line is not propagated into the VM, +specify ``J=$X`` to control the make jobs in the guest. + +Debugging +~~~~~~~~~ + +Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive +debugging and verbose output. If this is not enough, see the next section. +``V=1`` will be propagated down into the make jobs in the guest. + +Manual invocation +~~~~~~~~~~~~~~~~~ + +Each guest script is an executable script with the same command line options. +For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: + +.. code:: + + $ cd $QEMU_SRC/tests/vm + + # To bootstrap the image + $ ./netbsd --build-image --image /var/tmp/netbsd.img + <...> + + # To run an arbitrary command in guest (the output will not be echoed unless + # --debug is added) + $ ./netbsd --debug --image /var/tmp/netbsd.img uname -a + + # To build QEMU in guest + $ ./netbsd --debug --image /var/tmp/netbsd.img --build-qemu $QEMU_SRC + + # To get to an interactive shell + $ ./netbsd --interactive --image /var/tmp/netbsd.img sh + +Adding new guests +~~~~~~~~~~~~~~~~~ + +Please look at existing guest scripts for how to add new guests. + +Most importantly, create a subclass of BaseVM and implement ``build_image()`` +method and define ``BUILD_SCRIPT``, then finally call ``basevm.main()`` from +the script's ``main()``. + +* Usually in ``build_image()``, a template image is downloaded from a + predefined URL. ``BaseVM._download_with_cache()`` takes care of the cache and + the checksum, so consider using it. + +* Once the image is downloaded, users, SSH server and QEMU build deps should + be set up: + + - Root password set to ``BaseVM.ROOT_PASS`` + - User ``BaseVM.GUEST_USER`` is created, and password set to + ``BaseVM.GUEST_PASS`` + - SSH service is enabled and started on boot, + ``$QEMU_SRC/tests/keys/id_rsa.pub`` is added to ssh's ``authorized_keys`` + file of both root and the normal user + - DHCP client service is enabled and started on boot, so that it can + automatically configure the virtio-net-pci NIC and communicate with QEMU + user net (10.0.2.2) + - Necessary packages are installed to untar the source tarball and build + QEMU + +* Write a proper ``BUILD_SCRIPT`` template, which should be a shell script that + untars a raw virtio-blk block device, which is the tarball data blob of the + QEMU source tree, then configure/build it. Running "make check" is also + recommended. + +Image fuzzer testing +-------------------- + +An image fuzzer was added to exercise format drivers. Currently only qcow2 is +supported. To start the fuzzer, run + +.. code:: + + tests/image-fuzzer/runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2 + +Alternatively, some command different from ``qemu-img info`` can be tested, by +changing the ``-c`` option. + +Functional tests using Python +----------------------------- + +A functional test focuses on the functional requirement of the software, +attempting to find errors like incorrect functions, interface errors, +behavior errors, and initialization and termination errors [3]_. + +The ``tests/functional`` directory hosts functional tests written in +Python. You can run the functional tests simply by executing: + +.. code:: + + make check-functional + +See :ref:`checkfunctional-ref` for more details. + +.. _checktcg-ref: + +Testing with "make check-tcg" +----------------------------- + +The check-tcg tests are intended for simple smoke tests of both +linux-user and softmmu TCG functionality. However to build test +programs for guest targets you need to have cross compilers available. +If your distribution supports cross compilers you can do something as +simple as:: + + apt install gcc-aarch64-linux-gnu + +The configure script will automatically pick up their presence. +Sometimes compilers have slightly odd names so the availability of +them can be prompted by passing in the appropriate configure option +for the architecture in question, for example:: + + $(configure) --cross-cc-aarch64=aarch64-cc + +There is also a ``--cross-cc-cflags-ARCH`` flag in case additional +compiler flags are needed to build for a given target. + +If you have the ability to run containers as the user the build system +will automatically use them where no system compiler is available. For +architectures where we also support building QEMU we will generally +use the same container to build tests. However there are a number of +additional containers defined that have a minimal cross-build +environment that is only suitable for building test cases. Sometimes +we may use a bleeding edge distribution for compiler features needed +for test cases that aren't yet in the LTS distros we support for QEMU +itself. + +See :ref:`container-ref` for more details. + +Running subset of tests +~~~~~~~~~~~~~~~~~~~~~~~ + +You can build the tests for one architecture:: + + make build-tcg-tests-$TARGET + +And run with:: + + make run-tcg-tests-$TARGET + +Adding ``V=1`` to the invocation will show the details of how to +invoke QEMU for the test which is useful for debugging tests. + +Running individual tests +~~~~~~~~~~~~~~~~~~~~~~~~ + +Tests can also be run directly from the test build directory. If you +run ``make help`` from the test build directory you will get a list of +all the tests that can be run. Please note that same binaries are used +in multiple tests, for example:: + + make run-plugin-test-mmap-with-libinline.so + +will run the mmap test with the ``libinline.so`` TCG plugin. The +gdbstub tests also re-use the test binaries but while exercising gdb. + +TCG test dependencies +~~~~~~~~~~~~~~~~~~~~~ + +The TCG tests are deliberately very light on dependencies and are +either totally bare with minimal gcc lib support (for system-mode tests) +or just glibc (for linux-user tests). This is because getting a cross +compiler to work with additional libraries can be challenging. + +Other TCG Tests +--------------- + +There are a number of out-of-tree test suites that are used for more +extensive testing of processor features. + +KVM Unit Tests +~~~~~~~~~~~~~~ + +The KVM unit tests are designed to run as a Guest OS under KVM but +there is no reason why they can't exercise the TCG as well. It +provides a minimal OS kernel with hooks for enabling the MMU as well +as reporting test results via a special device:: + + https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git + +Linux Test Project +~~~~~~~~~~~~~~~~~~ + +The LTP is focused on exercising the syscall interface of a Linux +kernel. It checks that syscalls behave as documented and strives to +exercise as many corner cases as possible. It is a useful test suite +to run to exercise QEMU's linux-user code:: + + https://linux-test-project.github.io/ + +GCC gcov support +---------------- + +``gcov`` is a GCC tool to analyze the testing coverage by +instrumenting the tested code. To use it, configure QEMU with +``--enable-gcov`` option and build. Then run the tests as usual. + +If you want to gather coverage information on a single test the ``make +clean-gcda`` target can be used to delete any existing coverage +information before running a single test. + +You can generate a HTML coverage report by executing ``make +coverage-html`` which will create +``meson-logs/coveragereport/index.html``. + +Further analysis can be conducted by running the ``gcov`` command +directly on the various .gcda output files. Please read the ``gcov`` +documentation for more information. + +Flaky tests +----------- + +A flaky test is defined as a test that exhibits both a passing and a failing +result with the same code on different runs. Some usual reasons for an +intermittent/flaky test are async wait, concurrency, and test order dependency +[4]_. + +In QEMU, tests that are identified to be flaky are normally disabled by +default. Set the QEMU_TEST_FLAKY_TESTS environment variable before running +the tests to enable them. + +References +---------- + +.. [1] Sommerville, Ian (2016). Software Engineering. p. 233. +.. [2] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, + A Practitioner’s Approach. p. 48, 376, 378, 381. +.. [3] Pressman, Roger S. & Maxim, Bruce R. (2020). Software Engineering, + A Practitioner’s Approach. p. 388. +.. [4] Luo, Qingzhou, et al. An empirical analysis of flaky tests. + Proceedings of the 22nd ACM SIGSOFT International Symposium on + Foundations of Software Engineering. 2014. diff --git a/docs/devel/qgraph.rst b/docs/devel/testing/qgraph.rst similarity index 100% rename from docs/devel/qgraph.rst rename to docs/devel/testing/qgraph.rst diff --git a/docs/devel/qtest.rst b/docs/devel/testing/qtest.rst similarity index 99% rename from docs/devel/qtest.rst rename to docs/devel/testing/qtest.rst index c5b8546b3eb..73ef7702b7b 100644 --- a/docs/devel/qtest.rst +++ b/docs/devel/testing/qtest.rst @@ -1,3 +1,5 @@ +.. _qtest: + ======================================== QTest Device Emulation Testing Framework ======================================== diff --git a/docs/devel/tracing.rst b/docs/devel/tracing.rst index 043bed7fd0f..f4557ee20e4 100644 --- a/docs/devel/tracing.rst +++ b/docs/devel/tracing.rst @@ -76,7 +76,7 @@ The "io/trace.h" file must be created manually with an #include of the corresponding "trace/trace-.h" file that will be generated in the builddir:: - $ echo '#include "trace/trace-io.h"' >io/trace.h + $ (echo '/* SPDX-License-Identifier: GPL-2.0-or-later */' ; echo '#include "trace/trace-io.h"') >io/trace.h While it is possible to include a trace.h file from outside a source file's own sub-directory, this is discouraged in general. It is strongly preferred that diff --git a/docs/devel/uefi-vars.rst b/docs/devel/uefi-vars.rst new file mode 100644 index 00000000000..0151a26a0a6 --- /dev/null +++ b/docs/devel/uefi-vars.rst @@ -0,0 +1,68 @@ +============== +UEFI variables +============== + +Guest UEFI variable management +============================== + +The traditional approach for UEFI Variable storage in qemu guests is +to work as close as possible to physical hardware. That means +providing pflash as storage and leaving the management of variables +and flash to the guest. + +Secure boot support comes with the requirement that the UEFI variable +storage must be protected against direct access by the OS. All update +requests must pass the sanity checks. (Parts of) the firmware must +run with a higher privilege level than the OS so this can be enforced +by the firmware. On x86 this has been implemented using System +Management Mode (SMM) in qemu and kvm, which again is the same +approach taken by physical hardware. Only privileged code running in +SMM mode is allowed to access flash storage. + +Communication with the firmware code running in SMM mode works by +serializing the requests to a shared buffer, then trapping into SMM +mode via SMI. The SMM code processes the request, stores the reply in +the same buffer and returns. + +Host UEFI variable service +========================== + +Instead of running the privileged code inside the guest we can run it +on the host. The serialization protocol can be reused. The +communication with the host uses a virtual device, which essentially +configures the shared buffer location and size, and traps to the host +to process the requests. + +The ``uefi-vars`` device implements the UEFI virtual device. It comes +in ``uefi-vars-x86`` and ``uefi-vars-sysbus`` flavours. The device +reimplements the handlers needed, specifically +``EfiSmmVariableProtocol`` and ``VarCheckPolicyLibMmiHandler``. It +also consumes events (``EfiEndOfDxeEventGroup``, +``EfiEventReadyToBoot`` and ``EfiEventExitBootServices``). + +The advantage of the approach is that we do not need a special +privilege level for the firmware to protect itself, i.e. it does not +depend on SMM emulation on x64, which allows the removal of a bunch of +complex code for SMM emulation from the linux kernel +(CONFIG_KVM_SMM=n). It also allows support for secure boot on arm +without implementing secure world (el3) emulation in kvm. + +Of course there are also downsides. The added device increases the +attack surface of the host, and we are adding some code duplication +because we have to reimplement some edk2 functionality in qemu. + +usage on x86_64 +--------------- + +.. code:: + + qemu-system-x86_64 \ + -device uefi-vars-x86,jsonfile=/path/to/vars.json + +usage on aarch64 +---------------- + +.. code:: + + qemu-system-aarch64 -M virt \ + -device uefi-vars-sysbus,jsonfile=/path/to/vars.json diff --git a/docs/devel/virtio-backends.rst b/docs/devel/virtio-backends.rst index 9ff092e7a04..ebddc3b9f51 100644 --- a/docs/devel/virtio-backends.rst +++ b/docs/devel/virtio-backends.rst @@ -101,13 +101,12 @@ manually instantiated: VirtIOBlock vdev; }; - static Property virtio_blk_pci_properties[] = { + static const Property virtio_blk_pci_properties[] = { DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -120,7 +119,7 @@ manually instantiated: qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } - static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) + static void virtio_blk_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/docs/glossary.rst b/docs/glossary.rst new file mode 100644 index 00000000000..4fa044bfb6e --- /dev/null +++ b/docs/glossary.rst @@ -0,0 +1,280 @@ +.. _Glossary: + +-------- +Glossary +-------- + +This section of the manual presents brief definitions of acronyms and terms used +by QEMU developers. + +Accelerator +----------- + +A specific API used to accelerate execution of guest instructions. It can be +hardware-based, through a virtualization API provided by the host OS (kvm, hvf, +whpx, ...), or software-based (tcg). See this description of `supported +accelerators`. + +Board +----- + +Another name for :ref:`machine`. + +Block +----- + +Block drivers are the available `disk formats and front-ends +` available, and block devices `(see Block device section on +options page)` are using them to implement disks for a +virtual machine. + +CFI +--- + +Control Flow Integrity is a hardening technique used to prevent exploits +targeting QEMU by detecting unexpected branches during execution. QEMU `actively +supports` being compiled with CFI enabled. + +Device +------ + +In QEMU, a device is a piece of hardware visible to the guest. Examples include +UARTs, PCI controllers, PCI cards, VGA controllers, and many more. + +QEMU is able to emulate a CPU, and all the hardware interacting with it, +including `many devices`. When QEMU runs a virtual machine +using a hardware-based accelerator, it is responsible for emulating, using +software, all devices. + +EDK2 +---- + +EDK2, as known as `TianoCore `_, is an open source +implementation of UEFI standard. QEMU virtual machines that boot a UEFI firmware +usually use EDK2. + +gdbstub +------- + +QEMU implements a `gdb server `, allowing gdb to attach to it and +debug a running virtual machine, or a program in user-mode. This allows +debugging the guest code that is running inside QEMU. + +glib2 +----- + +`GLib2 `_ is one of the most important libraries we +are using through the codebase. It provides many data structures, macros, string +and thread utilities and portable functions across different OS. It's required +to build QEMU. + +Guest agent +----------- + +The `QEMU Guest Agent ` is a daemon intended to be run within virtual +machines. It provides various services to help QEMU to interact with it. + +.. _guest: + +Guest +----- + +Guest is the architecture of the virtual machine, which is emulated. +See also :ref:`host`. + +Sometimes this is called the :ref:`target` architecture, but that term +can be ambiguous. + +.. _host: + +Host +---- + +Host is the architecture on which QEMU is running on, which is native. +See also :ref:`guest`. + +Hypervisor +---------- + +The formal definition of an hypervisor is a program or API than can be used to +manage a virtual machine. QEMU is a virtualizer, that interacts with various +hypervisors. + +In the context of QEMU, an hypervisor is an API, provided by the Host OS, +allowing to execute virtual machines. Linux implementation is KVM (and supports +Xen as well). For MacOS, it's HVF. Windows defines WHPX. And NetBSD provides +NVMM. + +.. _machine: + +Machine +------- + +QEMU's system emulation models many different types of hardware. A machine model +(sometimes called a board model) is the model of a complete virtual system with +RAM, one or more CPUs, and various devices. It can be selected with the option +``-machine`` of qemu-system. Our machine models can be found on this `page +`. + +Migration +--------- + +QEMU can save and restore the execution of a virtual machine between different +host systems. This is provided by the :ref:`Migration framework`. + +NBD +--- + +The `QEMU Network Block Device server ` is a tool that can be used to +mount and access QEMU images, providing functionality similar to a loop device. + +Mailing List +------------ + +This is `where `_ all the +development happens! Changes are posted as series, that all developers can +review and share feedback for. + +For reporting issues, our `GitLab +`_ tracker is the best place. + +.. _softmmu: + +MMU / softmmu +------------- + +The Memory Management Unit is responsible for translating virtual addresses to +physical addresses and managing memory protection. QEMU system mode is named +"softmmu" precisely because it implements this in software, including a TLB +(Translation lookaside buffer), for the guest virtual machine. + +QEMU user-mode does not implement a full software MMU, but "simply" translates +virtual addresses by adding a specific offset, and relying on host MMU/OS +instead. + +Monitor / QMP / HMP +------------------- + +The `QEMU Monitor ` is a text interface which can be used to interact +with a running virtual machine. + +QMP stands for QEMU Monitor Protocol and is a json based interface. +HMP stands for Human Monitor Protocol and is a set of text commands available +for users who prefer natural language to json. + +MTTCG +----- + +Multiple CPU support was first implemented using a round-robin algorithm +running on a single thread. Later on, `Multi-threaded TCG ` was developed +to benefit from multiple cores to speed up execution. + +Plugins +------- + +`TCG Plugins ` is an API used to instrument guest code, in system +and user mode. The end goal is to have a similar set of functionality compared +to `DynamoRIO `_ or `valgrind `_. + +One key advantage of QEMU plugins is that they can be used to perform +architecture agnostic instrumentation. + +Patchew +------- + +`Patchew `_ is a website that tracks patches on the +Mailing List. + +PR +-- + +Once a series is reviewed and accepted by a subsystem maintainer, it will be +included in a PR (Pull Request) that the project maintainer will merge into QEMU +main branch, after running tests. + +The QEMU project doesn't currently expect most developers to directly submit +pull requests. + +QCOW2 +----- + +QEMU Copy On Write is a disk format developed by QEMU. It provides transparent +compression, automatic extension, and many other advantages over a raw image. + +qcow2 is the recommended format to use. + +QEMU +---- + +`QEMU (Quick Emulator) `_ is a generic and open source +machine emulator and virtualizer. + +QOM +--- + +:ref:`QEMU Object Model ` is an object oriented API used to define +various devices and hardware in the QEMU codebase. + +Record/replay +------------- + +:ref:`Record/replay ` is a feature of QEMU allowing to have a +deterministic and reproducible execution of a virtual machine. + +Rust +---- + +`A new programming language `_, memory safe by +default. There is a work in progress to integrate it in QEMU codebase for +various subsystems. + +System mode +----------- + +QEMU System mode provides a virtual model of an entire machine (CPU, memory and +emulated devices) to run a guest OS. In this mode the CPU may be fully emulated, +or it may work with a hypervisor such as KVM, Xen or Hypervisor.Framework to +allow the guest to run directly on the host CPU. + +QEMU System mode is called :ref:`softmmu ` as well. + +.. _target: + +Target +------ + +The term "target" can be ambiguous. In most places in QEMU it is used as a +synonym for :ref:`guest`. For example the code for emulating Arm CPUs is in +``target/arm/``. However in the :ref:`TCG subsystem ` "target" refers to the +architecture which QEMU is running on, i.e. the :ref:`host`. + +TCG +--- + +TCG is the QEMU `Tiny Code Generator `. It is the JIT (just-in-time) +compiler we use to emulate a guest CPU in software. + +It is one of the accelerators supported by QEMU, and supports a lot of +guest/host architectures. + +User mode +--------- + +QEMU User mode can launch processes compiled for one CPU on another CPU. In this +mode the CPU is always emulated. In this mode, QEMU translate system calls from +guest to host kernel. It is available for Linux and BSD. + +VirtIO +------ + +VirtIO is an open standard used to define and implement virtual devices with a +minimal overhead, defining a set of data structures and hypercalls (similar to +system calls, but targeting an hypervisor, which happens to be QEMU in our +case). It's designed to be more efficient than emulating a real device, by +minimizing the amount of interactions between a guest VM and its hypervisor. + +vhost-user +---------- + +`Vhost-user ` is an interface used to implement VirtIO devices +outside of QEMU itself. diff --git a/docs/igd-assign.txt b/docs/igd-assign.txt index e17bb50789a..e54040335ba 100644 --- a/docs/igd-assign.txt +++ b/docs/igd-assign.txt @@ -1,44 +1,71 @@ Intel Graphics Device (IGD) assignment with vfio-pci ==================================================== -IGD has two different modes for assignment using vfio-pci: - -1) Universal Pass-Through (UPT) mode: - - In this mode the IGD device is added as a *secondary* (ie. non-primary) - graphics device in combination with an emulated primary graphics device. - This mode *requires* guest driver support to remove the external - dependencies generally associated with IGD (see below). Those guest - drivers only support this mode for Broadwell and newer IGD, according to - Intel. Additionally, this mode by default, and as officially supported - by Intel, does not support direct video output. The intention is to use - this mode either to provide hardware acceleration to the emulated graphics - or to use this mode in combination with guest-based remote access software, - for example VNC (see below for optional output support). This mode - theoretically has no device specific handling dependencies on vfio-pci or - the VM firmware. - -2) "Legacy" mode: - - In this mode the IGD device is intended to be the primary and exclusive - graphics device in the VM[1], as such QEMU does not facilitate any sort - of remote graphics to the VM in this mode. A connected physical monitor - is the intended output device for IGD. This mode includes several - requirements and restrictions: - - * IGD must be given address 02.0 on the PCI root bus in the VM - * The host kernel must support vfio extensions for IGD (v4.6) - * vfio VGA support very likely needs to be enabled in the host kernel - * The VM firmware must support specific fw_cfg enablers for IGD - * The VM machine type must support a PCI host bridge at 00.0 (standard) - * The VM machine type must provide or allow to be created a special - ISA/LPC bridge device (vfio-pci-igd-lpc-bridge) on the root bus at - PCI address 1f.0. - * The IGD device must have a VGA ROM, either provided via the romfile - option or loaded automatically through vfio (standard). rombar=0 - will disable legacy mode support. - * Hotplug of the IGD device is not supported. - * The IGD device must be a SandyBridge or newer model device. +Using vfio-pci, we can passthrough Intel Graphics Device (IGD) to guest, either +serve as primary and exclusive graphics adapter, or used in combination with an +emulated primary graphics device, depending on the config and guest driver +support. However, IGD devices are not "clean" PCI devices, they use extra +memory regions other than BARs. Special handling is required to make them work +properly, including: + +* OpRegion for accessing Virtual BIOS Table (VBT) that contains display output + information. +* Data Stolen Memory (DSM) region used as VRAM at early stage (BIOS/UEFI) + +Certain guest software also depends on following conditions to work: +(*-Required by) + +| Condition | Linux | Windows | VBIOS | EFI GOP | +|---------------------------------------------|-------|---------|-------|---------| +| #1 IGD has a valid OpRegion containing VBT | * ^1 | * | * | * | +| #2 VID/DID of LPC bridge at 00:1f.0 matches | | | * | * | +| #3 IGD is assigned to BDF 00:02.0 | | | * | * | +| #4 IGD has VGA controller device class | | | * | * | +| #5 Host's VGA ranges are mapped to IGD | | | * | | +| #6 Guest has valid VBIOS or UEFI Option ROM | | | * | * | + +^1 Though i915 driver is able to mock a OpRegion, it is still recommended to + use the VBT copied from host OpRegion to prevent incorrect configuration. + +For #1, the "x-igd-opregion=on" option exposes a copy of host IGD OpRegion to +guest via fw_cfg, where guest firmware can set up guest OpRegion with it. + +For #2, "x-igd-lpc=on" option copies the IDs of host LPC bridge and host bridge +to guest. Currently this is only supported on i440fx machines as there is +already an ICH9 LPC bridge present on q35 machines, overwriting its IDs may +lead to unexpected behavior. + +For #3, "addr=2.0" assigns IGD to 00:02.0. + +For #4, the primary display must be set to IGD in host BIOS. + +For #5, "x-vga=on" enables guest access to standard VGA IO/MMIO ranges. + +For #6, ROM either provided via the ROM BAR or romfile= option is needed, this +Intel document [1] shows how to dump VBIOS to file. For UEFI Option ROM, see +"Guest firmware" section. + +QEMU also provides a "Legacy" mode that implicitly enables full functionality +on IGD, it is automatically enabled when +* IGD generation is 6 to 9 (Sandy Bridge to Comet Lake) +* IGD claims VGA cycles on host (IGD is VGA controller on host) +* Machine type is i440fx +* IGD is assigned to guest BDF 00:02.0 +* ROM BAR or romfile is present + +In "Legacy" mode, QEMU will automatically setup OpRegion, LPC bridge IDs and +VGA range access, which is equivalent to: + x-igd-opregion=on,x-igd-lpc=on,x-vga=on + +By default, "Legacy" mode won't fail, it continues on error. User can set +"x-igd-legacy-mode=on" to force enabling legacy mode, this also checks if the +conditions above for legacy mode is met, and if any error occurs, QEMU will +fail immediately. Users can also set "x-igd-legacy-mode=off" to disable legacy +mode. + +In legacy mode, as the guest VGA ranges are assigned to IGD device, all other +graphics devices should be removed, this can be done using "-nographic" or +"-vga none" or "-nodefaults", along with adding the device using vfio-pci. For either mode, depending on the host kernel, the i915 driver in the host may generate faults and errors upon re-binding to an IGD device after it @@ -73,31 +100,39 @@ DVI, or DisplayPort) may be unsupported in some use cases. In the author's experience, even DP to VGA adapters can be troublesome while adapters between digital formats work well. -Usage -===== -The intention is for IGD assignment to be transparent for users and thus for -management tools like libvirt. To make use of legacy mode, simply remove all -other graphics options and use "-nographic" and either "-vga none" or -"-nodefaults", along with adding the device using vfio-pci: - -device vfio-pci,host=00:02.0,id=hostdev0,bus=pci.0,addr=0x2 +Options +======= +* x-igd-opregion=[*on*|off] + Copy host IGD OpRegion and expose it to guest with fw_cfg + +* x-igd-lpc=[on|*off*] + Creates a dummy LPC bridge at 00:1f:0 with host VID/DID (i440fx only) + +* x-igd-legacy-mode=[on|off|*auto*] + Enable/Disable legacy mode + +* x-igd-gms=[hex, default 0] + Overriding DSM region size in GGC register, 0 means uses host value. + Use this only when the DSM size cannot be changed through the + 'DVMT Pre-Allocated' option in host BIOS. + -For UPT mode, retain the default emulated graphics and simply add the vfio-pci -device making use of any other bus address other than 02.0. libvirt will -default to assigning the device a UPT compatible address while legacy mode -users will need to manually edit the XML if using a tool like virt-manager -where the VM device address is not expressly specified. +Examples +======== +* Adding IGD with automatically legacy mode support + -device vfio-pci,host=00:02.0,id=hostdev0,addr=2.0 -An experimental vfio-pci option also exists to enable OpRegion, and thus -external monitor support, for UPT mode. This can be enabled by adding -"x-igd-opregion=on" to the vfio-pci device options for the IGD device. As -with legacy mode, this requires the host to support features introduced in -the v4.6 kernel. If Intel chooses to embrace this support, the option may -be made non-experimental in the future, opening it to libvirt support. +* Adding IGD with OpRegion and LPC ID hack, but without VGA ranges + (For UEFI guests) + -device vfio-pci,host=00:02.0,id=hostdev0,addr=2.0,x-igd-legacy-mode=off,x-igd-lpc=on,romfile=efi_oprom.rom -Developer ABI -============= -Legacy mode IGD support imposes two fw_cfg requirements on the VM firmware: + +Guest firmware +============== +Guest firmware is responsible for setting up OpRegion and Base of Data Stolen +Memory (BDSM) in guest address space. IGD passthrough support imposes two +fw_cfg requirements on the VM firmware: 1) "etc/igd-opregion" @@ -117,17 +152,117 @@ Legacy mode IGD support imposes two fw_cfg requirements on the VM firmware: Firmware must allocate a reserved memory below 4GB with required 1MB alignment equal to this size. Additionally the base address of this reserved region must be written to the dword BDSM register in PCI config - space of the IGD device at offset 0x5C. As this support is related to - running the IGD ROM, which has other dependencies on the device appearing - at guest address 00:02.0, it's expected that this fw_cfg file is only - relevant to a single PCI class VGA device with Intel vendor ID, appearing - at PCI bus address 00:02.0. + space of the IGD device at offset 0x5C (or 0xC0 for Gen 11+ devices using + 64-bit BDSM). As this support is related to running the IGD ROM, which + has other dependencies on the device appearing at guest address 00:02.0, + it's expected that this fw_cfg file is only relevant to a single PCI + class VGA device with Intel vendor ID, appearing at PCI bus address 00:02.0. + + Starting from Meteor Lake, IGD devices access stolen memory via its MMIO + BAR2 (LMEMBAR) and removed the BDSM register in config space. There is + no need for guest firmware to allocate data stolen memory in guest address + space and write it to BDSM register. Value of this fw_cfg file is 0 in + such case. + +Upstream Seabios has OpRegion and BDSM (pre-Gen11 device only) support. +However, the support is not accepted by upstream EDK2/OVMF. A recommended +solution is to create a virtual OpRom with following DXE drivers: + +* IgdAssignmentDxe: Set up OpRegion and BDSM according to fw_cfg (must) +* IntelGopDriver: Closed-source Intel GOP driver +* PlatformGopPolicy: Protocol required by IntelGopDriver + +IntelGopDriver and PlatformGopPolicy is only required when enabling GOP on IGD. + +The original IgdAssignmentDxe can be found at [3]. A Intel maintained version +with PlatformGopPolicy for industrial computing is at [4]. There is also an +unofficially maintained version with newer Gen11+ device support at [5]. +You need to build them with EDK2. + +For the IntelGopDriver, Intel never released it to public. You may contact +Intel support to get one as [4] said, if you are an Intel Premier Support +customer, or you can try extracting it from your host firmware using +"UEFI BIOS Updater"[6]. + +Once you got all the required DXE drivers, a Option ROM can be generated with +EfiRom utility in EDK2, using + EfiRom -f 0x8086 -i -o output.rom \ + -e IgdAssignmentDxe.efi PlatformGOPPolicy.efi IntelGopDriver.efi + + +Known issues +============ +When using OVMF as guest firmware, you may encounter the following warning: +warning: vfio_container_dma_map(0x55fab36ce610, 0x380010000000, 0x108000, 0x7fd336000000) = -22 (Invalid argument) + +Solution: +Set the host physical address bits to IOMMU address width using + -cpu host,host-phys-bits-limit= +Or in libvirt XML with + + + +The IOMMU address width can be determined with + echo $(( ((0x$(cat /sys/devices/virtual/iommu/dmar0/intel-iommu/cap) & 0x3F0000) >> 16) + 1 )) +Refer https://edk2.groups.io/g/devel/topic/patch_v1/102359124 for more details + + +Memory View +=========== +IGD has it own address space. To use system RAM as VRAM, a single-level page +table named Global Graphics Translation Table (GTT) is used for the address +translation. Each page table entry points a 4KB page. Illustration below shows +the translation flow on IGD with 64-bit GTT PTEs. + +(PTE_SIZE == 8) +-------------+---+ + | Address | V | V: Valid Bit + +-------------+---+ + | ... | | +IGD:0x01ae9010 0xd740| 0x70ffc000 | 1 | Mem:0x42ba3e010^ +-----------------------> 0xd748| 0x42ba3e000 | 1 +------------------> +(addr >> 12) * PTE_SIZE 0xd750| 0x42ba3f000 | 1 | + | ... | | + +-------------+---+ +^ The address may be remapped by IOMMU + +The memory region store GTT is called GTT Stolen Memory (GSM) it is located +right below the Data Stolen Memory (DSM). Accessing this region directly is +not allowed, any access will immediately freeze the whole system. The only way +to access it is through the second half of MMIO BAR0. + +The Data Stolen Memory is reserved by firmware, and acts as the VRAM in pre-OS +environments. In QEMU, guest firmware (Seabios/OVMF) is responsible for +reserving a continuous region and program its base address to BDSM register, +then let VBIOS/GOP driver initializing this region. Illustration below shows +how DSM is mapped. + + IGD Addr Space Host Addr Space Guest Addr Space + +-------------+ +-------------+ +-------------+ + | | | | | | + | | | | | | + | | +-------------+ +-------------+ + | | | Data Stolen | | Data Stolen | + | | | (Guest) | | (Guest) | + | | +------------>+-------------+<------->+-------------+<--Guest BDSM + | | | Passthrough | | EPT | | Emulated by QEMU +DSMSIZE+-------------+ | with IOMMU | | Mapping | | Programmed by guest FW + | | | | | | | + | | | | | | | + 0+-------------+--+ | | | | + | +-------------+ | | + | | Data Stolen | +-------------+ + | | (Host) | + +------------>+-------------+<--Host BDSM + Non- | | "real" one in HW + Passthrough | | Programmed by host FW + +-------------+ Footnotes ========= -[1] Nothing precludes adding additional emulated or assigned graphics devices - as non-primary, other than the combination typically not working. I only - intend to set user expectations, others are welcome to find working - combinations or fix whatever issues prevent this from working in the common - case. +[1] https://www.intel.com/content/www/us/en/docs/graphics-for-linux/developer-reference/1-0/dump-video-bios.html [2] # echo "vfio-pci" > /sys/bus/pci/devices/0000:00:02.0/driver_override +[3] https://web.archive.org/web/20240827012422/https://bugzilla.tianocore.org/show_bug.cgi?id=935 + Tianocore bugzilla was down since Jan 2025 :( +[4] https://eci.intel.com/docs/3.3/components/kvm-hypervisor.html, Patch 0001-0004 +[5] https://github.com/tomitamoeko/VfioIgdPkg +[6] https://winraid.level1techs.com/t/tool-guide-news-uefi-bios-updater-ubu/30357 diff --git a/docs/index.rst b/docs/index.rst index 0b9ee9901d9..5665de85cab 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,6 +3,8 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. +.. _documentation-root: + ================================ Welcome to QEMU's documentation! ================================ @@ -18,3 +20,4 @@ Welcome to QEMU's documentation! interop/index specs/index devel/index + glossary diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst index ddf8947d548..7536f0ba5c3 100644 --- a/docs/interop/bitmaps.rst +++ b/docs/interop/bitmaps.rst @@ -97,7 +97,7 @@ time. - Persistent storage formats may impose their own requirements on bitmap names and namespaces. Presently, only qcow2 supports persistent bitmaps. See - docs/interop/qcow2.txt for more details on restrictions. Notably: + :doc:`qcow2` for more details on restrictions. Notably: - qcow2 bitmap names are limited to between 1 and 1023 bytes long. diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json index 57f55f6c545..6bbe2cce0af 100644 --- a/docs/interop/firmware.json +++ b/docs/interop/firmware.json @@ -11,7 +11,9 @@ # later. See the COPYING file in the top-level directory. ## -# = Firmware +# ******** +# Firmware +# ******** ## { 'pragma': { @@ -57,10 +59,17 @@ # # @memory: The firmware is to be mapped into memory. # +# @igvm: The firmware is defined by a file conforming to the IGVM +# specification and mapped into memory according to directives +# defined in the file. This is similar to @memory but may +# include additional processing defined by the IGVM file +# including initial CPU state or population of metadata into +# the guest address space. Since: 10.1 +# # Since: 3.0 ## { 'enum' : 'FirmwareDevice', - 'data' : [ 'flash', 'kernel', 'memory' ] } + 'data' : [ 'flash', 'kernel', 'memory', 'igvm' ] } ## # @FirmwareArchitecture: @@ -214,13 +223,23 @@ # PL011 UART. @verbose-static is mutually exclusive # with @verbose-dynamic. # +# @host-uefi-vars: The firmware expects the host to provide an uefi +# variable store. qemu supports that via +# "uefi-vars-sysbus" (aarch64, riscv64, loongarch64) +# or "uefi-vars-x64" (x86_64) devices. The firmware +# will not use flash for nvram. When loading the +# firmware into flash the 'stateless' setup should be +# used. It is recommened to load the firmware into +# memory though. +# # Since: 3.0 ## { 'enum' : 'FirmwareFeature', 'data' : [ 'acpi-s3', 'acpi-s4', 'amd-sev', 'amd-sev-es', 'amd-sev-snp', 'intel-tdx', - 'enrolled-keys', 'requires-smm', 'secure-boot', + 'enrolled-keys', 'requires-smm', + 'secure-boot', 'host-uefi-vars', 'verbose-dynamic', 'verbose-static' ] } ## @@ -367,6 +386,24 @@ { 'struct' : 'FirmwareMappingMemory', 'data' : { 'filename' : 'str' } } +## +# @FirmwareMappingIgvm: +# +# Describes loading and mapping properties for the firmware executable, +# when @FirmwareDevice is @igvm. +# +# @filename: Identifies the IGVM file containing the firmware executable +# along with other information used to configure the initial +# state of the guest. The IGVM file may be shared by multiple +# virtual machine definitions. This corresponds to creating +# an object on the command line with "-object igvm-cfg, +# file=@filename". +# +# Since: 10.1 +## +{ 'struct' : 'FirmwareMappingIgvm', + 'data' : { 'filename' : 'str' } } + ## # @FirmwareMapping: # @@ -383,7 +420,8 @@ 'discriminator' : 'device', 'data' : { 'flash' : 'FirmwareMappingFlash', 'kernel' : 'FirmwareMappingKernel', - 'memory' : 'FirmwareMappingMemory' } } + 'memory' : 'FirmwareMappingMemory', + 'igvm' : 'FirmwareMappingIgvm' } } ## # @Firmware: diff --git a/docs/interop/index.rst b/docs/interop/index.rst index ed65395bfb2..d830c5c4104 100644 --- a/docs/interop/index.rst +++ b/docs/interop/index.rst @@ -14,12 +14,18 @@ are useful for making QEMU interoperate with other software. dbus-vmstate dbus-display live-block-operations + nbd + parallels + prl-xml + qcow2 + qed_spec pr-helper qmp-spec qemu-ga qemu-ga-ref qemu-qmp-ref qemu-storage-daemon-qmp-ref + vfio-user vhost-user vhost-user-gpu vhost-vdpa diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst index 691429c7afe..6b549ede7c9 100644 --- a/docs/interop/live-block-operations.rst +++ b/docs/interop/live-block-operations.rst @@ -931,8 +931,8 @@ Shutdown the guest, by issuing the ``quit`` QMP command:: } -Live disk backup --- ``blockdev-backup`` and the deprecated``drive-backup`` ---------------------------------------------------------------------------- +Live disk backup --- ``blockdev-backup`` and the deprecated ``drive-backup`` +---------------------------------------------------------------------------- The ``blockdev-backup`` (and the deprecated ``drive-backup``) allows you to create a point-in-time snapshot. diff --git a/docs/interop/nbd.rst b/docs/interop/nbd.rst new file mode 100644 index 00000000000..de079d31fd8 --- /dev/null +++ b/docs/interop/nbd.rst @@ -0,0 +1,89 @@ +QEMU NBD protocol support +========================= + +QEMU supports the NBD protocol, and has an internal NBD client (see +``block/nbd.c``), an internal NBD server (see ``blockdev-nbd.c``), and an +external NBD server tool (see ``qemu-nbd.c``). The common code is placed +in ``nbd/*``. + +The NBD protocol is specified here: +https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md + +The following paragraphs describe some specific properties of NBD +protocol realization in QEMU. + +Metadata namespaces +------------------- + +QEMU supports the ``base:allocation`` metadata context as defined in the +NBD protocol specification, and also defines an additional metadata +namespace ``qemu``. + +``qemu`` namespace +------------------ + +The ``qemu`` namespace currently contains two available metadata context +types. The first is related to exposing the contents of a dirty +bitmap alongside the associated disk contents. That metadata context +is named with the following form:: + + qemu:dirty-bitmap: + +Each dirty-bitmap metadata context defines only one flag for extents +in reply for ``NBD_CMD_BLOCK_STATUS``: + +bit 0: + ``NBD_STATE_DIRTY``, set when the extent is "dirty" + +The second is related to exposing the source of various extents within +the image, with a single metadata context named:: + + qemu:allocation-depth + +In the allocation depth context, the entire 32-bit value represents a +depth of which layer in a thin-provisioned backing chain provided the +data (0 for unallocated, 1 for the active layer, 2 for the first +backing layer, and so forth). + +For ``NBD_OPT_LIST_META_CONTEXT`` the following queries are supported +in addition to the specific ``qemu:allocation-depth`` and +``qemu:dirty-bitmap:``: + +``qemu:`` + returns list of all available metadata contexts in the namespace +``qemu:dirty-bitmap:`` + returns list of all available dirty-bitmap metadata contexts + +Features by version +------------------- + +The following list documents which qemu version first implemented +various features (both as a server exposing the feature, and as a +client taking advantage of the feature when present), to make it +easier to plan for cross-version interoperability. Note that in +several cases, the initial release containing a feature may require +additional patches from the corresponding stable branch to fix bugs in +the operation of that feature. + +2.6 + ``NBD_OPT_STARTTLS`` with TLS X.509 Certificates +2.8 + ``NBD_CMD_WRITE_ZEROES`` +2.10 + ``NBD_OPT_GO``, ``NBD_INFO_BLOCK`` +2.11 + ``NBD_OPT_STRUCTURED_REPLY`` +2.12 + ``NBD_CMD_BLOCK_STATUS`` for ``base:allocation`` +3.0 + ``NBD_OPT_STARTTLS`` with TLS Pre-Shared Keys (PSK), + ``NBD_CMD_BLOCK_STATUS`` for ``qemu:dirty-bitmap:``, ``NBD_CMD_CACHE`` +4.2 + ``NBD_FLAG_CAN_MULTI_CONN`` for shareable read-only exports, + ``NBD_CMD_FLAG_FAST_ZERO`` +5.2 + ``NBD_CMD_BLOCK_STATUS`` for ``qemu:allocation-depth`` +7.1 + ``NBD_FLAG_CAN_MULTI_CONN`` for shareable writable exports +8.2 + ``NBD_OPT_EXTENDED_HEADERS``, ``NBD_FLAG_BLOCK_STATUS_PAYLOAD`` diff --git a/docs/interop/nbd.txt b/docs/interop/nbd.txt deleted file mode 100644 index 18efb251de9..00000000000 --- a/docs/interop/nbd.txt +++ /dev/null @@ -1,72 +0,0 @@ -QEMU supports the NBD protocol, and has an internal NBD client (see -block/nbd.c), an internal NBD server (see blockdev-nbd.c), and an -external NBD server tool (see qemu-nbd.c). The common code is placed -in nbd/*. - -The NBD protocol is specified here: -https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md - -The following paragraphs describe some specific properties of NBD -protocol realization in QEMU. - -= Metadata namespaces = - -QEMU supports the "base:allocation" metadata context as defined in the -NBD protocol specification, and also defines an additional metadata -namespace "qemu". - -== "qemu" namespace == - -The "qemu" namespace currently contains two available metadata context -types. The first is related to exposing the contents of a dirty -bitmap alongside the associated disk contents. That metadata context -is named with the following form: - - qemu:dirty-bitmap: - -Each dirty-bitmap metadata context defines only one flag for extents -in reply for NBD_CMD_BLOCK_STATUS: - - bit 0: NBD_STATE_DIRTY, set when the extent is "dirty" - -The second is related to exposing the source of various extents within -the image, with a single metadata context named: - - qemu:allocation-depth - -In the allocation depth context, the entire 32-bit value represents a -depth of which layer in a thin-provisioned backing chain provided the -data (0 for unallocated, 1 for the active layer, 2 for the first -backing layer, and so forth). - -For NBD_OPT_LIST_META_CONTEXT the following queries are supported -in addition to the specific "qemu:allocation-depth" and -"qemu:dirty-bitmap:": - -* "qemu:" - returns list of all available metadata contexts in the - namespace. -* "qemu:dirty-bitmap:" - returns list of all available dirty-bitmap - metadata contexts. - -= Features by version = - -The following list documents which qemu version first implemented -various features (both as a server exposing the feature, and as a -client taking advantage of the feature when present), to make it -easier to plan for cross-version interoperability. Note that in -several cases, the initial release containing a feature may require -additional patches from the corresponding stable branch to fix bugs in -the operation of that feature. - -* 2.6: NBD_OPT_STARTTLS with TLS X.509 Certificates -* 2.8: NBD_CMD_WRITE_ZEROES -* 2.10: NBD_OPT_GO, NBD_INFO_BLOCK -* 2.11: NBD_OPT_STRUCTURED_REPLY -* 2.12: NBD_CMD_BLOCK_STATUS for "base:allocation" -* 3.0: NBD_OPT_STARTTLS with TLS Pre-Shared Keys (PSK), -NBD_CMD_BLOCK_STATUS for "qemu:dirty-bitmap:", NBD_CMD_CACHE -* 4.2: NBD_FLAG_CAN_MULTI_CONN for shareable read-only exports, -NBD_CMD_FLAG_FAST_ZERO -* 5.2: NBD_CMD_BLOCK_STATUS for "qemu:allocation-depth" -* 7.1: NBD_FLAG_CAN_MULTI_CONN for shareable writable exports -* 8.2: NBD_OPT_EXTENDED_HEADERS, NBD_FLAG_BLOCK_STATUS_PAYLOAD diff --git a/docs/interop/parallels.rst b/docs/interop/parallels.rst new file mode 100644 index 00000000000..7b328a40c80 --- /dev/null +++ b/docs/interop/parallels.rst @@ -0,0 +1,240 @@ +Parallels Expandable Image File Format +====================================== + +.. + Copyright (c) 2015 Denis Lunev + Copyright (c) 2015 Vladimir Sementsov-Ogievskiy + + This work is licensed under the terms of the GNU GPL, version 2 or later. + See the COPYING file in the top-level directory. + + +A Parallels expandable image file consists of three consecutive parts: + +* header +* BAT +* data area + +All numbers in a Parallels expandable image are stored in little-endian byte +order. + + +Definitions +----------- + +Sector + A 512-byte data chunk. + +Cluster + A data chunk of the size specified in the image header. + Currently, the default size is 1MiB (2048 sectors). In previous + versions, cluster sizes of 63 sectors, 256 and 252 kilobytes were used. + +BAT + Block Allocation Table, an entity that contains information for + guest-to-host I/O data address translation. + +Header +------ + +The header is placed at the start of an image and contains the following +fields:: + + Bytes: + 0 - 15: magic + Must contain "WithoutFreeSpace" or "WithouFreSpacExt". + + 16 - 19: version + Must be 2. + + 20 - 23: heads + Disk geometry parameter for guest. + + 24 - 27: cylinders + Disk geometry parameter for guest. + + 28 - 31: tracks + Cluster size, in sectors. + + 32 - 35: nb_bat_entries + Disk size, in clusters (BAT size). + + 36 - 43: nb_sectors + Disk size, in sectors. + + For "WithoutFreeSpace" images: + Only the lowest 4 bytes are used. The highest 4 bytes must be + cleared in this case. + + For "WithouFreSpacExt" images, there are no such + restrictions. + + 44 - 47: in_use + Set to 0x746F6E59 when the image is opened by software in R/W + mode; set to 0x312e3276 when the image is closed. + + A zero in this field means that the image was opened by an old + version of the software that doesn't support Format Extension + (see below). + + Other values are not allowed. + + 48 - 51: data_off + An offset, in sectors, from the start of the file to the start of + the data area. + + For "WithoutFreeSpace" images: + - If data_off is zero, the offset is calculated as the end of BAT + table plus some padding to ensure sector size alignment. + - If data_off is non-zero, the offset should be aligned to sector + size. However it is recommended to align it to cluster size for + newly created images. + + For "WithouFreSpacExt" images: + data_off must be non-zero and aligned to cluster size. + + 52 - 55: flags + Miscellaneous flags. + + Bit 0: Empty Image bit. If set, the image should be + considered clear. + + Bits 1-31: Unused. + + 56 - 63: ext_off + Format Extension offset, an offset, in sectors, from the start of + the file to the start of the Format Extension Cluster. + + ext_off must meet the same requirements as cluster offsets + defined by BAT entries (see below). + +BAT +--- + +BAT is placed immediately after the image header. In the file, BAT is a +contiguous array of 32-bit unsigned little-endian integers with +``(bat_entries * 4)`` bytes size. + +Each BAT entry contains an offset from the start of the file to the +corresponding cluster. The offset set in clusters for ``WithouFreSpacExt`` +images and in sectors for ``WithoutFreeSpace`` images. + +If a BAT entry is zero, the corresponding cluster is not allocated and should +be considered as filled with zeroes. + +Cluster offsets specified by BAT entries must meet the following requirements: + +- the value must not be lower than data offset (provided by ``header.data_off`` + or calculated as specified above) +- the value must be lower than the desired file size +- the value must be unique among all BAT entries +- the result of ``(cluster offset - data offset)`` must be aligned to + cluster size + +Data Area +--------- + +The data area is an area from the data offset (provided by ``header.data_off`` +or calculated as specified above) to the end of the file. It represents a +contiguous array of clusters. Most of them are allocated by the BAT, some may +be allocated by the ``ext_off`` field in the header while other may be +allocated by extensions. All clusters allocated by ``ext_off`` and extensions +should meet the same requirements as clusters specified by BAT entries. + + +Format Extension +---------------- + +The Format Extension is an area 1 cluster in size that provides additional +format features. This cluster is addressed by the ext_off field in the header. +The format of the Format Extension area is the following:: + + 0 - 7: magic + Must be 0xAB234CEF23DCEA87 + + 8 - 23: m_CheckSum + The MD5 checksum of the entire Header Extension cluster except + the first 24 bytes. + +The above are followed by feature sections or "extensions". The last +extension must be "End of features" (see below). + +Each feature section has the following format:: + + 0 - 7: magic + The identifier of the feature: + 0x0000000000000000 - End of features + 0x20385FAE252CB34A - Dirty bitmap + + 8 - 15: flags + External flags for extension: + + Bit 0: NECESSARY + If the software cannot load the extension (due to an + unknown magic number or error), the file should not be + changed. If this flag is unset and there is an error on + loading the extension, said extension should be dropped. + + Bit 1: TRANSIT + If there is an unknown extension with this flag set, + said extension should be left as is. + + If neither NECESSARY nor TRANSIT are set, the extension should be + dropped. + + 16 - 19: data_size + The size of the following feature data, in bytes. + + 20 - 23: unused32 + Align header to 8 bytes boundary. + + variable: data (data_size bytes) + +The above is followed by padding to the next 8 bytes boundary, then the +next extension starts. + +The last extension must be "End of features" with all the fields set to 0. + + +Dirty bitmaps feature +--------------------- + +This feature provides a way of storing dirty bitmaps in the image. The fields +of its data area are:: + + 0 - 7: size + The bitmap size, should be equal to disk size in sectors. + + 8 - 23: id + An identifier for backup consistency checking. + + 24 - 27: granularity + Bitmap granularity, in sectors. I.e., the number of sectors + corresponding to one bit of the bitmap. Granularity must be + a power of 2. + + 28 - 31: l1_size + The number of entries in the L1 table of the bitmap. + + variable: L1 offset table (l1_table), size: 8 * l1_size bytes + +The dirty bitmap described by this feature extension is stored in a set of +clusters inside the Parallels image file. The offsets of these clusters are +saved in the L1 offset table specified by the feature extension. Each L1 table +entry is a 64 bit integer as described below: + +Given an offset in bytes into the bitmap data, corresponding L1 entry is:: + + l1_table[offset / cluster_size] + +If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap +are assumed to be 0. + +If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap +are assumed to be 1. + +If an L1 table entry is not 0 or 1, it contains the corresponding cluster +offset (in 512b sectors). Given an offset in bytes into the bitmap data the +offset in bytes into the image file can be obtained as follows:: + + offset = l1_table[offset / cluster_size] * 512 + (offset % cluster_size) diff --git a/docs/interop/parallels.txt b/docs/interop/parallels.txt deleted file mode 100644 index bb3fadf3692..00000000000 --- a/docs/interop/parallels.txt +++ /dev/null @@ -1,232 +0,0 @@ -= License = - -Copyright (c) 2015 Denis Lunev -Copyright (c) 2015 Vladimir Sementsov-Ogievskiy - -This work is licensed under the terms of the GNU GPL, version 2 or later. -See the COPYING file in the top-level directory. - -= Parallels Expandable Image File Format = - -A Parallels expandable image file consists of three consecutive parts: - * header - * BAT - * data area - -All numbers in a Parallels expandable image are stored in little-endian byte -order. - - -== Definitions == - - Sector A 512-byte data chunk. - - Cluster A data chunk of the size specified in the image header. - Currently, the default size is 1MiB (2048 sectors). In previous - versions, cluster sizes of 63 sectors, 256 and 252 kilobytes were - used. - - BAT Block Allocation Table, an entity that contains information for - guest-to-host I/O data address translation. - - -== Header == - -The header is placed at the start of an image and contains the following -fields: - -Bytes: - 0 - 15: magic - Must contain "WithoutFreeSpace" or "WithouFreSpacExt". - - 16 - 19: version - Must be 2. - - 20 - 23: heads - Disk geometry parameter for guest. - - 24 - 27: cylinders - Disk geometry parameter for guest. - - 28 - 31: tracks - Cluster size, in sectors. - - 32 - 35: nb_bat_entries - Disk size, in clusters (BAT size). - - 36 - 43: nb_sectors - Disk size, in sectors. - - For "WithoutFreeSpace" images: - Only the lowest 4 bytes are used. The highest 4 bytes must be - cleared in this case. - - For "WithouFreSpacExt" images, there are no such - restrictions. - - 44 - 47: in_use - Set to 0x746F6E59 when the image is opened by software in R/W - mode; set to 0x312e3276 when the image is closed. - - A zero in this field means that the image was opened by an old - version of the software that doesn't support Format Extension - (see below). - - Other values are not allowed. - - 48 - 51: data_off - An offset, in sectors, from the start of the file to the start of - the data area. - - For "WithoutFreeSpace" images: - - If data_off is zero, the offset is calculated as the end of BAT - table plus some padding to ensure sector size alignment. - - If data_off is non-zero, the offset should be aligned to sector - size. However it is recommended to align it to cluster size for - newly created images. - - For "WithouFreSpacExt" images: - data_off must be non-zero and aligned to cluster size. - - 52 - 55: flags - Miscellaneous flags. - - Bit 0: Empty Image bit. If set, the image should be - considered clear. - - Bits 1-31: Unused. - - 56 - 63: ext_off - Format Extension offset, an offset, in sectors, from the start of - the file to the start of the Format Extension Cluster. - - ext_off must meet the same requirements as cluster offsets - defined by BAT entries (see below). - - -== BAT == - -BAT is placed immediately after the image header. In the file, BAT is a -contiguous array of 32-bit unsigned little-endian integers with -(bat_entries * 4) bytes size. - -Each BAT entry contains an offset from the start of the file to the -corresponding cluster. The offset set in clusters for "WithouFreSpacExt" images -and in sectors for "WithoutFreeSpace" images. - -If a BAT entry is zero, the corresponding cluster is not allocated and should -be considered as filled with zeroes. - -Cluster offsets specified by BAT entries must meet the following requirements: - - the value must not be lower than data offset (provided by header.data_off - or calculated as specified above), - - the value must be lower than the desired file size, - - the value must be unique among all BAT entries, - - the result of (cluster offset - data offset) must be aligned to cluster - size. - - -== Data Area == - -The data area is an area from the data offset (provided by header.data_off or -calculated as specified above) to the end of the file. It represents a -contiguous array of clusters. Most of them are allocated by the BAT, some may -be allocated by the ext_off field in the header while other may be allocated by -extensions. All clusters allocated by ext_off and extensions should meet the -same requirements as clusters specified by BAT entries. - - -== Format Extension == - -The Format Extension is an area 1 cluster in size that provides additional -format features. This cluster is addressed by the ext_off field in the header. -The format of the Format Extension area is the following: - - 0 - 7: magic - Must be 0xAB234CEF23DCEA87 - - 8 - 23: m_CheckSum - The MD5 checksum of the entire Header Extension cluster except - the first 24 bytes. - - The above are followed by feature sections or "extensions". The last - extension must be "End of features" (see below). - -Each feature section has the following format: - - 0 - 7: magic - The identifier of the feature: - 0x0000000000000000 - End of features - 0x20385FAE252CB34A - Dirty bitmap - - 8 - 15: flags - External flags for extension: - - Bit 0: NECESSARY - If the software cannot load the extension (due to an - unknown magic number or error), the file should not be - changed. If this flag is unset and there is an error on - loading the extension, said extension should be dropped. - - Bit 1: TRANSIT - If there is an unknown extension with this flag set, - said extension should be left as is. - - If neither NECESSARY nor TRANSIT are set, the extension should be - dropped. - - 16 - 19: data_size - The size of the following feature data, in bytes. - - 20 - 23: unused32 - Align header to 8 bytes boundary. - - variable: data (data_size bytes) - - The above is followed by padding to the next 8 bytes boundary, then the - next extension starts. - - The last extension must be "End of features" with all the fields set to 0. - - -=== Dirty bitmaps feature === - -This feature provides a way of storing dirty bitmaps in the image. The fields -of its data area are: - - 0 - 7: size - The bitmap size, should be equal to disk size in sectors. - - 8 - 23: id - An identifier for backup consistency checking. - - 24 - 27: granularity - Bitmap granularity, in sectors. I.e., the number of sectors - corresponding to one bit of the bitmap. Granularity must be - a power of 2. - - 28 - 31: l1_size - The number of entries in the L1 table of the bitmap. - - variable: L1 offset table (l1_table), size: 8 * l1_size bytes - -The dirty bitmap described by this feature extension is stored in a set of -clusters inside the Parallels image file. The offsets of these clusters are -saved in the L1 offset table specified by the feature extension. Each L1 table -entry is a 64 bit integer as described below: - -Given an offset in bytes into the bitmap data, corresponding L1 entry is - - l1_table[offset / cluster_size] - -If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap -are assumed to be 0. - -If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap -are assumed to be 1. - -If an L1 table entry is not 0 or 1, it contains the corresponding cluster -offset (in 512b sectors). Given an offset in bytes into the bitmap data the -offset in bytes into the image file can be obtained as follows: - - offset = l1_table[offset / cluster_size] * 512 + (offset % cluster_size) diff --git a/docs/interop/prl-xml.rst b/docs/interop/prl-xml.rst new file mode 100644 index 00000000000..5bb63bb93a4 --- /dev/null +++ b/docs/interop/prl-xml.rst @@ -0,0 +1,192 @@ +Parallels Disk Format +===================== + +.. + Copyright (c) 2015-2017, Virtuozzo, Inc. + Authors: + 2015 Denis Lunev + 2015 Vladimir Sementsov-Ogievskiy + 2016-2017 Klim Kireev + 2016-2017 Edgar Kaziakhmedov + + This work is licensed under the terms of the GNU GPL, version 2 or later. + See the COPYING file in the top-level directory. + +This specification contains minimal information about Parallels Disk Format, +which is enough to properly work with QEMU. Nevertheless, Parallels Cloud Server +and Parallels Desktop are able to add some unspecified nodes to the xml and use +them, but they are for internal work and don't affect functionality. Also it +uses auxiliary xml ``Snapshot.xml``, which allows storage of optional snapshot +information, but this doesn't influence open/read/write functionality. QEMU and +other software should not use fields not covered in this document or the +``Snapshot.xml`` file, and must leave them as is. + +A Parallels disk consists of two parts: the set of snapshots and the disk +descriptor file, which stores information about all files and snapshots. + +Definitions +----------- + +Snapshot + a record of the contents captured at a particular time, capable + of storing current state. A snapshot has a UUID and a parent UUID. + +Snapshot image + an overlay representing the difference between this + snapshot and some earlier snapshot. + +Overlay + an image storing the different sectors between two captured states. + +Root image + a snapshot image with no parent, the root of the snapshot tree. + +Storage + the backing storage for a subset of the virtual disk. When + there is more than one storage in a Parallels disk then that + is referred to as a split image. In this case every storage + covers a specific address space area of the disk and has its + particular root image. Split images are not considered here + and are not supported. Each storage consists of disk + parameters and a list of images. The list of images always + contains a root image and may also contain overlays. The + root image can be an expandable Parallels image file or + plain. Overlays must be expandable. + +Description file + ``DiskDescriptor.xml`` stores information about disk parameters, + snapshots, and storages. + +Top Snapshot + The overlay between actual state and some previous snapshot. + It is not a snapshot in the classical sense because it + serves as the active image that the guest writes to. + +Sector + a 512-byte data chunk. + +Description file +---------------- + +All information is placed in a single XML element +``Parallels_disk_image``. +The element has only one attribute, ``Version``, which must be ``1.0``. + +The schema of ``DiskDescriptor.xml``:: + + + + ... + + + ... + + + ... + + + +``Disk_Parameters`` element +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``Disk_Parameters`` element describes the physical layout of the +virtual disk and some general settings. + +The ``Disk_Parameters`` element MUST contain the following child elements: + +* ``Disk_size`` - number of sectors in the disk, + desired size of the disk. +* ``Cylinders`` - number of the disk cylinders. +* ``Heads`` - number of the disk heads. +* ``Sectors`` - number of the disk sectors per cylinder + (sector size is 512 bytes) + Limitation: The product of the ``Heads``, ``Sectors`` and ``Cylinders`` + values MUST be equal to the value of the Disk_size parameter. +* ``Padding`` - must be 0. Parallels Cloud Server and Parallels Desktop may + use padding set to 1; however this case is not covered + by this specification. QEMU and other software should not open + such disks and should not create them. + +``StorageData`` element +^^^^^^^^^^^^^^^^^^^^^^^ + +This element of the file describes the root image and all snapshot images. + +The ``StorageData`` element consists of the ``Storage`` child element, +as shown below:: + + + + ... + + + +A ``Storage`` element has the following child elements: + +* ``Start`` - start sector of the storage, in case of non split storage + equals to 0. +* ``End`` - number of sector following the last sector, in case of non + split storage equals to ``Disk_size``. +* ``Blocksize`` - storage cluster size, number of sectors per one cluster. + The cluster size for each "Compressed" (see below) image in + a parallels disk must be equal to this field. Note: the cluster + size for a Parallels Expandable Image is in the ``tracks`` field of + its header (see :doc:`parallels`). +* Several ``Image`` child elements. + +Each ``Image`` element has the following child elements: + +* ``GUID`` - image identifier, UUID in curly brackets. + For instance, ``{12345678-9abc-def1-2345-6789abcdef12}.`` + The GUID is used by the Snapshots element to reference images + (see below) +* ``Type`` - image type of the element. It can be: + + * ``Plain`` for raw files. + * ``Compressed`` for expanding disks. + +* ``File`` - path to image file. The path can be relative to + ``DiskDescriptor.xml`` or absolute. + +``Snapshots`` element +^^^^^^^^^^^^^^^^^^^^^ + +The ``Snapshots`` element describes the snapshot relations with the snapshot tree. + +The element contains the set of ``Shot`` child elements, as shown below:: + + + ... /* Optional child element */ + + ... + + + ... + + ... + + +Each ``Shot`` element contains the following child elements: + +* ``GUID`` - an image GUID. +* ``ParentGUID`` - GUID of the image of the parent snapshot. + +The software may traverse snapshots from child to parent using the +```` field as reference. The ``ParentGUID`` of the root +snapshot is ``{00000000-0000-0000-0000-000000000000}``. +There should be only one root snapshot. + +The Top snapshot could be +described via two ways: via the ``TopGUID`` child +element of the ``Snapshots`` element, or via the predefined GUID +``{5fbaabe3-6958-40ff-92a7-860e329aab41}``. If ``TopGUID`` is defined, +the predefined GUID is interpreted as a normal GUID. All snapshot images +(except the Top Snapshot) should be +opened read-only. + +There is another predefined GUID, +``BackupID = {704718e1-2314-44c8-9087-d78ed36b0f4e}``, which is used by +original and some third-party software for backup. QEMU and other +software may operate with images with ``GUID = BackupID`` as usual. +However, it is not recommended to use this +GUID for new disks. The Top snapshot cannot have this GUID. diff --git a/docs/interop/prl-xml.txt b/docs/interop/prl-xml.txt deleted file mode 100644 index cf9b3fba265..00000000000 --- a/docs/interop/prl-xml.txt +++ /dev/null @@ -1,158 +0,0 @@ -= License = - -Copyright (c) 2015-2017, Virtuozzo, Inc. -Authors: - 2015 Denis Lunev - 2015 Vladimir Sementsov-Ogievskiy - 2016-2017 Klim Kireev - 2016-2017 Edgar Kaziakhmedov - -This work is licensed under the terms of the GNU GPL, version 2 or later. -See the COPYING file in the top-level directory. - -This specification contains minimal information about Parallels Disk Format, -which is enough to proper work with QEMU. Nevertheless, Parallels Cloud Server -and Parallels Desktop are able to add some unspecified nodes to xml and use -them, but they are for internal work and don't affect functionality. Also it -uses auxiliary xml "Snapshot.xml", which allows to store optional snapshot -information, but it doesn't influence open/read/write functionality. QEMU and -other software should not use fields not covered in this document and -Snapshot.xml file and must leave them as is. - -= Parallels Disk Format = - -Parallels disk consists of two parts: the set of snapshots and the disk -descriptor file, which stores information about all files and snapshots. - -== Definitions == - Snapshot a record of the contents captured at a particular time, - capable of storing current state. A snapshot has UUID and - parent UUID. - - Snapshot image an overlay representing the difference between this - snapshot and some earlier snapshot. - - Overlay an image storing the different sectors between two captured - states. - - Root image snapshot image with no parent, the root of snapshot tree. - - Storage the backing storage for a subset of the virtual disk. When - there is more than one storage in a Parallels disk then that - is referred to as a split image. In this case every storage - covers specific address space area of the disk and has its - particular root image. Split images are not considered here - and are not supported. Each storage consists of disk - parameters and a list of images. The list of images always - contains a root image and may also contain overlays. The - root image can be an expandable Parallels image file or - plain. Overlays must be expandable. - - Description DiskDescriptor.xml stores information about disk parameters, - file snapshots, storages. - - Top The overlay between actual state and some previous snapshot. - Snapshot It is not a snapshot in the classical sense because it - serves as the active image that the guest writes to. - - Sector a 512-byte data chunk. - -== Description file == -All information is placed in a single XML element Parallels_disk_image. -The element has only one attribute "Version", that must be 1.0. -Schema of DiskDescriptor.xml: - - - - ... - - - ... - - - ... - - - -== Disk_Parameters element == -The Disk_Parameters element describes the physical layout of the virtual disk -and some general settings. - -The Disk_Parameters element MUST contain the following child elements: - * Disk_size - number of sectors in the disk, - desired size of the disk. - * Cylinders - number of the disk cylinders. - * Heads - number of the disk heads. - * Sectors - number of the disk sectors per cylinder - (sector size is 512 bytes) - Limitation: Product of the Heads, Sectors and Cylinders - values MUST be equal to the value of the Disk_size parameter. - * Padding - must be 0. Parallels Cloud Server and Parallels Desktop may - use padding set to 1, however this case is not covered - by this spec, QEMU and other software should not open - such disks and should not create them. - -== StorageData element == -This element of the file describes the root image and all snapshot images. - -The StorageData element consists of the Storage child element, as shown below: - - - ... - - - -A Storage element has following child elements: - * Start - start sector of the storage, in case of non split storage - equals to 0. - * End - number of sector following the last sector, in case of non - split storage equals to Disk_size. - * Blocksize - storage cluster size, number of sectors per one cluster. - Cluster size for each "Compressed" (see below) image in - parallels disk must be equal to this field. Note: cluster - size for Parallels Expandable Image is in 'tracks' field of - its header (see docs/interop/parallels.txt). - * Several Image child elements. - -Each Image element has following child elements: - * GUID - image identifier, UUID in curly brackets. - For instance, {12345678-9abc-def1-2345-6789abcdef12}. - The GUID is used by the Snapshots element to reference images - (see below) - * Type - image type of the element. It can be: - "Plain" for raw files. - "Compressed" for expanding disks. - * File - path to image file. Path can be relative to DiskDescriptor.xml or - absolute. - -== Snapshots element == -The Snapshots element describes the snapshot relations with the snapshot tree. - -The element contains the set of Shot child elements, as shown below: - - ... /* Optional child element */ - - ... - - - ... - - ... - - -Each Shot element contains the following child elements: - * GUID - an image GUID. - * ParentGUID - GUID of the image of the parent snapshot. - -The software may traverse snapshots from child to parent using -field as reference. ParentGUID of root snapshot is -{00000000-0000-0000-0000-000000000000}. There should be only one root -snapshot. Top snapshot could be described via two ways: via TopGUID child -element of the Snapshots element or via predefined GUID -{5fbaabe3-6958-40ff-92a7-860e329aab41}. If TopGUID is defined, predefined GUID is -interpreted as usual GUID. All snapshot images (except Top Snapshot) should be -opened read-only. There is another predefined GUID, -BackupID = {704718e1-2314-44c8-9087-d78ed36b0f4e}, which is used by original and -some third-party software for backup, QEMU and other software may operate with -images with GUID = BackupID as usual, however, it is not recommended to use this -GUID for new disks. Top snapshot cannot have this GUID. diff --git a/docs/interop/qcow2.rst b/docs/interop/qcow2.rst new file mode 100644 index 00000000000..59485911073 --- /dev/null +++ b/docs/interop/qcow2.rst @@ -0,0 +1,937 @@ +======================= +Qcow2 Image File Format +======================= + +A ``qcow2`` image file is organized in units of constant size, which are called +(host) clusters. A cluster is the unit in which all allocations are done, +both for actual guest data and for image metadata. + +Likewise, the virtual disk as seen by the guest is divided into (guest) +clusters of the same size. + +All numbers in qcow2 are stored in Big Endian byte order. + +Header +------ + +The first cluster of a qcow2 image contains the file header:: + + Byte 0 - 3: magic + QCOW magic string ("QFI\xfb") + + 4 - 7: version + Version number (valid values are 2 and 3) + + 8 - 15: backing_file_offset + Offset into the image file at which the backing file name + is stored (NB: The string is not null terminated). 0 if the + image doesn't have a backing file. + + Note: backing files are incompatible with raw external data + files (auto-clear feature bit 1). + + 16 - 19: backing_file_size + Length of the backing file name in bytes. Must not be + longer than 1023 bytes. Undefined if the image doesn't have + a backing file. + + 20 - 23: cluster_bits + Number of bits that are used for addressing an offset + within a cluster (1 << cluster_bits is the cluster size). + Must not be less than 9 (i.e. 512 byte clusters). + + Note: QEMU as of today has an implementation limit of 2 MB + as the maximum cluster size and won't be able to open images + with larger cluster sizes. + + Note: if the image has Extended L2 Entries then cluster_bits + must be at least 14 (i.e. 16384 byte clusters). + + 24 - 31: size + Virtual disk size in bytes. + + Note: QEMU has an implementation limit of 32 MB as + the maximum L1 table size. With a 2 MB cluster + size, it is unable to populate a virtual cluster + beyond 2 EB (61 bits); with a 512 byte cluster + size, it is unable to populate a virtual size + larger than 128 GB (37 bits). Meanwhile, L1/L2 + table layouts limit an image to no more than 64 PB + (56 bits) of populated clusters, and an image may + hit other limits first (such as a file system's + maximum size). + + 32 - 35: crypt_method + 0 for no encryption + 1 for AES encryption + 2 for LUKS encryption + + 36 - 39: l1_size + Number of entries in the active L1 table + + 40 - 47: l1_table_offset + Offset into the image file at which the active L1 table + starts. Must be aligned to a cluster boundary. + + 48 - 55: refcount_table_offset + Offset into the image file at which the refcount table + starts. Must be aligned to a cluster boundary. + + 56 - 59: refcount_table_clusters + Number of clusters that the refcount table occupies + + 60 - 63: nb_snapshots + Number of snapshots contained in the image + + 64 - 71: snapshots_offset + Offset into the image file at which the snapshot table + starts. Must be aligned to a cluster boundary. + +For version 2, the header is exactly 72 bytes in length, and finishes here. +For version 3 or higher, the header length is at least 104 bytes, including +the next fields through ``header_length``. +:: + + 72 - 79: incompatible_features + Bitmask of incompatible features. An implementation must + fail to open an image if an unknown bit is set. + + Bit 0: Dirty bit. If this bit is set then refcounts + may be inconsistent, make sure to scan L1/L2 + tables to repair refcounts before accessing the + image. + + Bit 1: Corrupt bit. If this bit is set then any data + structure may be corrupt and the image must not + be written to (unless for regaining + consistency). + + Bit 2: External data file bit. If this bit is set, an + external data file is used. Guest clusters are + then stored in the external data file. For such + images, clusters in the external data file are + not refcounted. The offset field in the + Standard Cluster Descriptor must match the + guest offset and neither compressed clusters + nor internal snapshots are supported. + + An External Data File Name header extension may + be present if this bit is set. + + Bit 3: Compression type bit. If this bit is set, + a non-default compression is used for compressed + clusters. The compression_type field must be + present and not zero. + + Bit 4: Extended L2 Entries. If this bit is set then + L2 table entries use an extended format that + allows subcluster-based allocation. See the + Extended L2 Entries section for more details. + + Bits 5-63: Reserved (set to 0) + + 80 - 87: compatible_features + Bitmask of compatible features. An implementation can + safely ignore any unknown bits that are set. + + Bit 0: Lazy refcounts bit. If this bit is set then + lazy refcount updates can be used. This means + marking the image file dirty and postponing + refcount metadata updates. + + Bits 1-63: Reserved (set to 0) + + 88 - 95: autoclear_features + Bitmask of auto-clear features. An implementation may only + write to an image with unknown auto-clear features if it + clears the respective bits from this field first. + + Bit 0: Bitmaps extension bit + This bit indicates consistency for the bitmaps + extension data. + + It is an error if this bit is set without the + bitmaps extension present. + + If the bitmaps extension is present but this + bit is unset, the bitmaps extension data must be + considered inconsistent. + + Bit 1: Raw external data bit + If this bit is set, the external data file can + be read as a consistent standalone raw image + without looking at the qcow2 metadata. + + Setting this bit has a performance impact for + some operations on the image (e.g. writing + zeros requires writing to the data file instead + of only setting the zero flag in the L2 table + entry) and conflicts with backing files. + + This bit may only be set if the External Data + File bit (incompatible feature bit 1) is also + set. + + Bits 2-63: Reserved (set to 0) + + 96 - 99: refcount_order + Describes the width of a reference count block entry (width + in bits: refcount_bits = 1 << refcount_order). For version 2 + images, the order is always assumed to be 4 + (i.e. refcount_bits = 16). + This value may not exceed 6 (i.e. refcount_bits = 64). + + 100 - 103: header_length + Length of the header structure in bytes. For version 2 + images, the length is always assumed to be 72 bytes. + For version 3 it's at least 104 bytes and must be a multiple + of 8. + + +Additional fields (version 3 and higher) +---------------------------------------- + +In general, these fields are optional and may be safely ignored by the software, +as well as filled by zeros (which is equal to field absence), if software needs +to set field B, but does not care about field A which precedes B. More +formally, additional fields have the following compatibility rules: + +1. If the value of the additional field must not be ignored for correct + handling of the file, it will be accompanied by a corresponding incompatible + feature bit. + +2. If there are no unrecognized incompatible feature bits set, an unknown + additional field may be safely ignored other than preserving its value when + rewriting the image header. + +.. _ref_rules_3: + +3. An explicit value of 0 will have the same behavior as when the field is not + present*, if not altered by a specific incompatible bit. + +(*) A field is considered not present when ``header_length`` is less than or equal +to the field's offset. Also, all additional fields are not present for +version 2. + +:: + + 104: compression_type + + Defines the compression method used for compressed clusters. + All compressed clusters in an image use the same compression + type. + + If the incompatible bit "Compression type" is set: the field + must be present and non-zero (which means non-deflate + compression type). Otherwise, this field must not be present + or must be zero (which means deflate). + + Available compression type values: + - 0: deflate + - 1: zstd + + The deflate compression type is called "zlib" + in QEMU. However, clusters with the + deflate compression type do not have zlib headers. + + 105 - 111: Padding, contents defined below. + +Header padding +-------------- + +``header_length`` must be a multiple of 8, which means that if the end of the last +additional field is not aligned, some padding is needed. This padding must be +zeroed, so that if some existing (or future) additional field will fall into +the padding, it will be interpreted accordingly to point `[3.] <#ref_rules_3>`_ of the previous +paragraph, i.e. in the same manner as when this field is not present. + + +Header extensions +----------------- + +Directly after the image header, optional sections called header extensions can +be stored. Each extension has a structure like the following:: + + Byte 0 - 3: Header extension type: + 0x00000000 - End of the header extension area + 0xe2792aca - Backing file format name string + 0x6803f857 - Feature name table + 0x23852875 - Bitmaps extension + 0x0537be77 - Full disk encryption header pointer + 0x44415441 - External data file name string + other - Unknown header extension, can be safely + ignored + + 4 - 7: Length of the header extension data + + 8 - n: Header extension data + + n - m: Padding to round up the header extension size to the next + multiple of 8. + +Unless stated otherwise, each header extension type shall appear at most once +in the same image. + +If the image has a backing file then the backing file name should be stored in +the remaining space between the end of the header extension area and the end of +the first cluster. It is not allowed to store other data here, so that an +implementation can safely modify the header and add extensions without harming +data of compatible features that it doesn't support. Compatible features that +need space for additional data can use a header extension. + + +String header extensions +------------------------ + +Some header extensions (such as the backing file format name and the external +data file name) are just a single string. In this case, the header extension +length is the string length and the string is not ``\0`` terminated. (The header +extension padding can make it look like a string is ``\0`` terminated, but +neither is padding always necessary nor is there a guarantee that zero bytes +are used for padding.) + + +Feature name table +------------------ + +The feature name table is an optional header extension that contains the name +for features used by the image. It can be used by applications that don't know +the respective feature (e.g. because the feature was introduced only later) to +display a useful error message. + +The number of entries in the feature name table is determined by the length of +the header extension data. Each entry looks like this:: + + Byte 0: Type of feature (select feature bitmap) + 0: Incompatible feature + 1: Compatible feature + 2: Autoclear feature + + 1: Bit number within the selected feature bitmap (valid + values: 0-63) + + 2 - 47: Feature name (padded with zeros, but not necessarily null + terminated if it has full length) + + +Bitmaps extension +----------------- + +The bitmaps extension is an optional header extension. It provides the ability +to store bitmaps related to a virtual disk. For now, there is only one bitmap +type: the dirty tracking bitmap, which tracks virtual disk changes from some +point in time. + +The data of the extension should be considered consistent only if the +corresponding auto-clear feature bit is set, see ``autoclear_features`` above. + +The fields of the bitmaps extension are:: + + Byte 0 - 3: nb_bitmaps + The number of bitmaps contained in the image. Must be + greater than or equal to 1. + + Note: QEMU currently only supports up to 65535 bitmaps per + image. + + 4 - 7: Reserved, must be zero. + + 8 - 15: bitmap_directory_size + Size of the bitmap directory in bytes. It is the cumulative + size of all (nb_bitmaps) bitmap directory entries. + + 16 - 23: bitmap_directory_offset + Offset into the image file at which the bitmap directory + starts. Must be aligned to a cluster boundary. + +Full disk encryption header pointer +----------------------------------- + +The full disk encryption header must be present if, and only if, the +``crypt_method`` header requires metadata. Currently this is only true +of the ``LUKS`` crypt method. The header extension must be absent for +other methods. + +This header provides the offset at which the crypt method can store +its additional data, as well as the length of such data. +:: + + Byte 0 - 7: Offset into the image file at which the encryption + header starts in bytes. Must be aligned to a cluster + boundary. + Byte 8 - 15: Length of the written encryption header in bytes. + Note actual space allocated in the qcow2 file may + be larger than this value, since it will be rounded + to the nearest multiple of the cluster size. Any + unused bytes in the allocated space will be initialized + to 0. + +For the LUKS crypt method, the encryption header works as follows. + +The first 592 bytes of the header clusters will contain the LUKS +partition header. This is then followed by the key material data areas. +The size of the key material data areas is determined by the number of +stripes in the key slot and key size. Refer to the LUKS format +specification (``docs/on-disk-format.pdf`` in the cryptsetup source +package) for details of the LUKS partition header format. + +In the LUKS partition header, the ``payload-offset`` field will be +calculated as normal for the LUKS spec. ie the size of the LUKS +header, plus key material regions, plus padding, relative to the +start of the LUKS header. This offset value is not required to be +qcow2 cluster aligned. Its value is currently never used in the +context of qcow2, since the qcow2 file format itself defines where +the real payload offset is, but none the less a valid payload offset +should always be present. + +In the LUKS key slots header, the ``key-material-offset`` is relative +to the start of the LUKS header clusters in the qcow2 container, +not the start of the qcow2 file. + +Logically the layout looks like +:: + + +-----------------------------+ + | QCow2 header | + | QCow2 header extension X | + | QCow2 header extension FDE | + | QCow2 header extension ... | + | QCow2 header extension Z | + +-----------------------------+ + | ....other QCow2 tables.... | + . . + . . + +-----------------------------+ + | +-------------------------+ | + | | LUKS partition header | | + | +-------------------------+ | + | | LUKS key material 1 | | + | +-------------------------+ | + | | LUKS key material 2 | | + | +-------------------------+ | + | | LUKS key material ... | | + | +-------------------------+ | + | | LUKS key material 8 | | + | +-------------------------+ | + +-----------------------------+ + | QCow2 cluster payload | + . . + . . + . . + | | + +-----------------------------+ + +Data encryption +--------------- + +When an encryption method is requested in the header, the image payload +data must be encrypted/decrypted on every write/read. The image headers +and metadata are never encrypted. + +The algorithms used for encryption vary depending on the method + + - ``AES``: + + The AES cipher, in CBC mode, with 256 bit keys. + + Initialization vectors generated using plain64 method, with + the virtual disk sector as the input tweak. + + This format is no longer supported in QEMU system emulators, due + to a number of design flaws affecting its security. It is only + supported in the command line tools for the sake of back compatibility + and data liberation. + + - ``LUKS``: + + The algorithms are specified in the LUKS header. + + Initialization vectors generated using the method specified + in the LUKS header, with the physical disk sector as the + input tweak. + +Host cluster management +----------------------- + +qcow2 manages the allocation of host clusters by maintaining a reference count +for each host cluster. A refcount of 0 means that the cluster is free, 1 means +that it is used, and >= 2 means that it is used and any write access must +perform a COW (copy on write) operation. + +The refcounts are managed in a two-level table. The first level is called +refcount table and has a variable size (which is stored in the header). The +refcount table can cover multiple clusters, however it needs to be contiguous +in the image file. + +It contains pointers to the second level structures which are called refcount +blocks and are exactly one cluster in size. + +Although a large enough refcount table can reserve clusters past 64 PB +(56 bits) (assuming the underlying protocol can even be sized that +large), note that some qcow2 metadata such as L1/L2 tables must point +to clusters prior to that point. + +.. note:: + QEMU has an implementation limit of 8 MB as the maximum refcount + table size. With a 2 MB cluster size and a default refcount_order of + 4, it is unable to reference host resources beyond 2 EB (61 bits); in + the worst case, with a 512 cluster size and refcount_order of 6, it is + unable to access beyond 32 GB (35 bits). + +Given an offset into the image file, the refcount of its cluster can be +obtained as follows:: + + refcount_block_entries = (cluster_size * 8 / refcount_bits) + + refcount_block_index = (offset / cluster_size) % refcount_block_entries + refcount_table_index = (offset / cluster_size) / refcount_block_entries + + refcount_block = load_cluster(refcount_table[refcount_table_index]); + return refcount_block[refcount_block_index]; + +Refcount table entry:: + + Bit 0 - 8: Reserved (set to 0) + + 9 - 63: Bits 9-63 of the offset into the image file at which the + refcount block starts. Must be aligned to a cluster + boundary. + + If this is 0, the corresponding refcount block has not yet + been allocated. All refcounts managed by this refcount block + are 0. + +Refcount block entry ``(x = refcount_bits - 1)``:: + + Bit 0 - x: Reference count of the cluster. If refcount_bits implies a + sub-byte width, note that bit 0 means the least significant + bit in this context. + + +Cluster mapping +--------------- + +Just as for refcounts, qcow2 uses a two-level structure for the mapping of +guest clusters to host clusters. They are called L1 and L2 table. + +The L1 table has a variable size (stored in the header) and may use multiple +clusters, however it must be contiguous in the image file. L2 tables are +exactly one cluster in size. + +The L1 and L2 tables have implications on the maximum virtual file +size; for a given L1 table size, a larger cluster size is required for +the guest to have access to more space. Furthermore, a virtual +cluster must currently map to a host offset below 64 PB (56 bits) +(although this limit could be relaxed by putting reserved bits into +use). Additionally, as cluster size increases, the maximum host +offset for a compressed cluster is reduced (a 2M cluster size requires +compressed clusters to reside below 512 TB (49 bits), and this limit +cannot be relaxed without an incompatible layout change). + +Given an offset into the virtual disk, the offset into the image file can be +obtained as follows:: + + l2_entries = (cluster_size / sizeof(uint64_t)) [*] + + l2_index = (offset / cluster_size) % l2_entries + l1_index = (offset / cluster_size) / l2_entries + + l2_table = load_cluster(l1_table[l1_index]); + cluster_offset = l2_table[l2_index]; + + return cluster_offset + (offset % cluster_size) + + [*] this changes if Extended L2 Entries are enabled, see next section + +L1 table entry:: + + Bit 0 - 8: Reserved (set to 0) + + 9 - 55: Bits 9-55 of the offset into the image file at which the L2 + table starts. Must be aligned to a cluster boundary. If the + offset is 0, the L2 table and all clusters described by this + L2 table are unallocated. + + 56 - 62: Reserved (set to 0) + + 63: 0 for an L2 table that is unused or requires COW, 1 if its + refcount is exactly one. This information is only accurate + in the active L1 table. + +L2 table entry:: + + Bit 0 - 61: Cluster descriptor + + 62: 0 for standard clusters + 1 for compressed clusters + + 63: 0 for clusters that are unused, compressed or require COW. + 1 for standard clusters whose refcount is exactly one. + This information is only accurate in L2 tables + that are reachable from the active L1 table. + + With external data files, all guest clusters have an + implicit refcount of 1 (because of the fixed host = guest + mapping for guest cluster offsets), so this bit should be 1 + for all allocated clusters. + +Standard Cluster Descriptor:: + + Bit 0: If set to 1, the cluster reads as all zeros. The host + cluster offset can be used to describe a preallocation, + but it won't be used for reading data from this cluster, + nor is data read from the backing file if the cluster is + unallocated. + + With version 2 or with extended L2 entries (see the next + section), this is always 0. + + 1 - 8: Reserved (set to 0) + + 9 - 55: Bits 9-55 of host cluster offset. Must be aligned to a + cluster boundary. If the offset is 0 and bit 63 is clear, + the cluster is unallocated. The offset may only be 0 with + bit 63 set (indicating a host cluster offset of 0) when an + external data file is used. + + 56 - 61: Reserved (set to 0) + + +Compressed Clusters Descriptor ``(x = 62 - (cluster_bits - 8))``:: + + Bit 0 - x-1: Host cluster offset. This is usually _not_ aligned to a + cluster or sector boundary! If cluster_bits is + small enough that this field includes bits beyond + 55, those upper bits must be set to 0. + + x - 61: Number of additional 512-byte sectors used for the + compressed data, beyond the sector containing the offset + in the previous field. Some of these sectors may reside + in the next contiguous host cluster. + + Note that the compressed data does not necessarily occupy + all of the bytes in the final sector; rather, decompression + stops when it has produced a cluster of data. + + Another compressed cluster may map to the tail of the final + sector used by this compressed cluster. + +If a cluster is unallocated, read requests shall read the data from the backing +file (except if bit 0 in the Standard Cluster Descriptor is set). If there is +no backing file or the backing file is smaller than the image, they shall read +zeros for all parts that are not covered by the backing file. + +Extended L2 Entries +------------------- + +An image uses Extended L2 Entries if bit 4 is set on the incompatible_features +field of the header. + +In these images standard data clusters are divided into 32 subclusters of the +same size. They are contiguous and start from the beginning of the cluster. +Subclusters can be allocated independently and the L2 entry contains information +indicating the status of each one of them. Compressed data clusters don't have +subclusters so they are treated the same as in images without this feature. + +The size of an extended L2 entry is 128 bits so the number of entries per table +is calculated using this formula: + +.. code:: + + l2_entries = (cluster_size / (2 * sizeof(uint64_t))) + +The first 64 bits have the same format as the standard L2 table entry described +in the previous section, with the exception of bit 0 of the standard cluster +descriptor. + +The last 64 bits contain a subcluster allocation bitmap with this format: + +Subcluster Allocation Bitmap (for standard clusters):: + + Bit 0 - 31: Allocation status (one bit per subcluster) + + 1: the subcluster is allocated. In this case the + host cluster offset field must contain a valid + offset. + 0: the subcluster is not allocated. In this case + read requests shall go to the backing file or + return zeros if there is no backing file data. + + Bits are assigned starting from the least significant + one (i.e. bit x is used for subcluster x). + + 32 - 63 Subcluster reads as zeros (one bit per subcluster) + + 1: the subcluster reads as zeros. In this case the + allocation status bit must be unset. The host + cluster offset field may or may not be set. + 0: no effect. + + Bits are assigned starting from the least significant + one (i.e. bit x is used for subcluster x - 32). + +Subcluster Allocation Bitmap (for compressed clusters):: + + Bit 0 - 63: Reserved (set to 0) + Compressed clusters don't have subclusters, + so this field is not used. + +Snapshots +--------- + +qcow2 supports internal snapshots. Their basic principle of operation is to +switch the active L1 table, so that a different set of host clusters are +exposed to the guest. + +When creating a snapshot, the L1 table should be copied and the refcount of all +L2 tables and clusters reachable from this L1 table must be increased, so that +a write causes a COW and isn't visible in other snapshots. + +When loading a snapshot, bit 63 of all entries in the new active L1 table and +all L2 tables referenced by it must be reconstructed from the refcount table +as it doesn't need to be accurate in inactive L1 tables. + +A directory of all snapshots is stored in the snapshot table, a contiguous area +in the image file, whose starting offset and length are given by the header +fields snapshots_offset and nb_snapshots. The entries of the snapshot table +have variable length, depending on the length of ID, name and extra data. + +Snapshot table entry:: + + Byte 0 - 7: Offset into the image file at which the L1 table for the + snapshot starts. Must be aligned to a cluster boundary. + + 8 - 11: Number of entries in the L1 table of the snapshots + + 12 - 13: Length of the unique ID string describing the snapshot + + 14 - 15: Length of the name of the snapshot + + 16 - 19: Time at which the snapshot was taken in seconds since the + Epoch + + 20 - 23: Subsecond part of the time at which the snapshot was taken + in nanoseconds + + 24 - 31: Time that the guest was running until the snapshot was + taken in nanoseconds + + 32 - 35: Size of the VM state in bytes. 0 if no VM state is saved. + If there is VM state, it starts at the first cluster + described by first L1 table entry that doesn't describe a + regular guest cluster (i.e. VM state is stored like guest + disk content, except that it is stored at offsets that are + larger than the virtual disk presented to the guest) + + 36 - 39: Size of extra data in the table entry (used for future + extensions of the format) + + variable: Extra data for future extensions. Unknown fields must be + ignored. Currently defined are (offset relative to snapshot + table entry): + + Byte 40 - 47: Size of the VM state in bytes. 0 if no VM + state is saved. If this field is present, + the 32-bit value in bytes 32-35 is ignored. + + Byte 48 - 55: Virtual disk size of the snapshot in bytes + + Byte 56 - 63: icount value which corresponds to + the record/replay instruction count + when the snapshot was taken. Set to -1 + if icount was disabled + + Version 3 images must include extra data at least up to + byte 55. + + variable: Unique ID string for the snapshot (not null terminated) + + variable: Name of the snapshot (not null terminated) + + variable: Padding to round up the snapshot table entry size to the + next multiple of 8. + + +Bitmaps +------- + +As mentioned above, the bitmaps extension provides the ability to store bitmaps +related to a virtual disk. This section describes how these bitmaps are stored. + +All stored bitmaps are related to the virtual disk stored in the same image, so +each bitmap size is equal to the virtual disk size. + +Each bit of the bitmap is responsible for strictly defined range of the virtual +disk. For bit number bit_nr the corresponding range (in bytes) will be: + +.. code:: + + [bit_nr * bitmap_granularity .. (bit_nr + 1) * bitmap_granularity - 1] + +Granularity is a property of the concrete bitmap, see below. + + +Bitmap directory +---------------- + +Each bitmap saved in the image is described in a bitmap directory entry. The +bitmap directory is a contiguous area in the image file, whose starting offset +and length are given by the header extension fields ``bitmap_directory_offset`` and +``bitmap_directory_size``. The entries of the bitmap directory have variable +length, depending on the lengths of the bitmap name and extra data. + +Structure of a bitmap directory entry:: + + Byte 0 - 7: bitmap_table_offset + Offset into the image file at which the bitmap table + (described below) for the bitmap starts. Must be aligned to + a cluster boundary. + + 8 - 11: bitmap_table_size + Number of entries in the bitmap table of the bitmap. + + 12 - 15: flags + Bit + 0: in_use + The bitmap was not saved correctly and may be + inconsistent. Although the bitmap metadata is still + well-formed from a qcow2 perspective, the metadata + (such as the auto flag or bitmap size) or data + contents may be outdated. + + 1: auto + The bitmap must reflect all changes of the virtual + disk by any application that would write to this qcow2 + file (including writes, snapshot switching, etc.). The + type of this bitmap must be 'dirty tracking bitmap'. + + 2: extra_data_compatible + This flags is meaningful when the extra data is + unknown to the software (currently any extra data is + unknown to QEMU). + If it is set, the bitmap may be used as expected, extra + data must be left as is. + If it is not set, the bitmap must not be used, but + both it and its extra data be left as is. + + Bits 3 - 31 are reserved and must be 0. + + 16: type + This field describes the sort of the bitmap. + Values: + 1: Dirty tracking bitmap + + Values 0, 2 - 255 are reserved. + + 17: granularity_bits + Granularity bits. Valid values: 0 - 63. + + Note: QEMU currently supports only values 9 - 31. + + Granularity is calculated as + granularity = 1 << granularity_bits + + A bitmap's granularity is how many bytes of the image + accounts for one bit of the bitmap. + + 18 - 19: name_size + Size of the bitmap name. Must be non-zero. + + Note: QEMU currently doesn't support values greater than + 1023. + + 20 - 23: extra_data_size + Size of type-specific extra data. + + For now, as no extra data is defined, extra_data_size is + reserved and should be zero. If it is non-zero the + behavior is defined by extra_data_compatible flag. + + variable: extra_data + Extra data for the bitmap, occupying extra_data_size bytes. + Extra data must never contain references to clusters or in + some other way allocate additional clusters. + + variable: name + The name of the bitmap (not null terminated), occupying + name_size bytes. Must be unique among all bitmap names + within the bitmaps extension. + + variable: Padding to round up the bitmap directory entry size to the + next multiple of 8. All bytes of the padding must be zero. + + +Bitmap table +------------ + +Each bitmap is stored using a one-level structure (as opposed to two-level +structures like for refcounts and guest clusters mapping) for the mapping of +bitmap data to host clusters. This structure is called the bitmap table. + +Each bitmap table has a variable size (stored in the bitmap directory entry) +and may use multiple clusters, however, it must be contiguous in the image +file. + +Structure of a bitmap table entry:: + + Bit 0: Reserved and must be zero if bits 9 - 55 are non-zero. + If bits 9 - 55 are zero: + 0: Cluster should be read as all zeros. + 1: Cluster should be read as all ones. + + 1 - 8: Reserved and must be zero. + + 9 - 55: Bits 9 - 55 of the host cluster offset. Must be aligned to + a cluster boundary. If the offset is 0, the cluster is + unallocated; in that case, bit 0 determines how this + cluster should be treated during reads. + + 56 - 63: Reserved and must be zero. + + +Bitmap data +----------- + +As noted above, bitmap data is stored in separate clusters, described by the +bitmap table. Given an offset (in bytes) into the bitmap data, the offset into +the image file can be obtained as follows:: + + image_offset(bitmap_data_offset) = + bitmap_table[bitmap_data_offset / cluster_size] + + (bitmap_data_offset % cluster_size) + +This offset is not defined if bits 9 - 55 of bitmap table entry are zero (see +above). + +Given an offset byte_nr into the virtual disk and the bitmap's granularity, the +bit offset into the image file to the corresponding bit of the bitmap can be +calculated like this:: + + bit_offset(byte_nr) = + image_offset(byte_nr / granularity / 8) * 8 + + (byte_nr / granularity) % 8 + +If the size of the bitmap data is not a multiple of the cluster size then the +last cluster of the bitmap data contains some unused tail bits. These bits must +be zero. + + +Dirty tracking bitmaps +---------------------- + +Bitmaps with ``type`` field equal to one are dirty tracking bitmaps. + +When the virtual disk is in use dirty tracking bitmap may be ``enabled`` or +``disabled``. While the bitmap is ``enabled``, all writes to the virtual disk +should be reflected in the bitmap. A set bit in the bitmap means that the +corresponding range of the virtual disk (see above) was written to while the +bitmap was ``enabled``. An unset bit means that this range was not written to. + +The software doesn't have to sync the bitmap in the image file with its +representation in RAM after each write or metadata change. Flag ``in_use`` +should be set while the bitmap is not synced. + +In the image file the ``enabled`` state is reflected by the ``auto`` flag. If this +flag is set, the software must consider the bitmap as ``enabled`` and start +tracking virtual disk changes to this bitmap from the first write to the +virtual disk. If this flag is not set then the bitmap is disabled. diff --git a/docs/interop/qcow2.txt b/docs/interop/qcow2.txt deleted file mode 100644 index 2c4618375ad..00000000000 --- a/docs/interop/qcow2.txt +++ /dev/null @@ -1,906 +0,0 @@ -== General == - -A qcow2 image file is organized in units of constant size, which are called -(host) clusters. A cluster is the unit in which all allocations are done, -both for actual guest data and for image metadata. - -Likewise, the virtual disk as seen by the guest is divided into (guest) -clusters of the same size. - -All numbers in qcow2 are stored in Big Endian byte order. - - -== Header == - -The first cluster of a qcow2 image contains the file header: - - Byte 0 - 3: magic - QCOW magic string ("QFI\xfb") - - 4 - 7: version - Version number (valid values are 2 and 3) - - 8 - 15: backing_file_offset - Offset into the image file at which the backing file name - is stored (NB: The string is not null terminated). 0 if the - image doesn't have a backing file. - - Note: backing files are incompatible with raw external data - files (auto-clear feature bit 1). - - 16 - 19: backing_file_size - Length of the backing file name in bytes. Must not be - longer than 1023 bytes. Undefined if the image doesn't have - a backing file. - - 20 - 23: cluster_bits - Number of bits that are used for addressing an offset - within a cluster (1 << cluster_bits is the cluster size). - Must not be less than 9 (i.e. 512 byte clusters). - - Note: qemu as of today has an implementation limit of 2 MB - as the maximum cluster size and won't be able to open images - with larger cluster sizes. - - Note: if the image has Extended L2 Entries then cluster_bits - must be at least 14 (i.e. 16384 byte clusters). - - 24 - 31: size - Virtual disk size in bytes. - - Note: qemu has an implementation limit of 32 MB as - the maximum L1 table size. With a 2 MB cluster - size, it is unable to populate a virtual cluster - beyond 2 EB (61 bits); with a 512 byte cluster - size, it is unable to populate a virtual size - larger than 128 GB (37 bits). Meanwhile, L1/L2 - table layouts limit an image to no more than 64 PB - (56 bits) of populated clusters, and an image may - hit other limits first (such as a file system's - maximum size). - - 32 - 35: crypt_method - 0 for no encryption - 1 for AES encryption - 2 for LUKS encryption - - 36 - 39: l1_size - Number of entries in the active L1 table - - 40 - 47: l1_table_offset - Offset into the image file at which the active L1 table - starts. Must be aligned to a cluster boundary. - - 48 - 55: refcount_table_offset - Offset into the image file at which the refcount table - starts. Must be aligned to a cluster boundary. - - 56 - 59: refcount_table_clusters - Number of clusters that the refcount table occupies - - 60 - 63: nb_snapshots - Number of snapshots contained in the image - - 64 - 71: snapshots_offset - Offset into the image file at which the snapshot table - starts. Must be aligned to a cluster boundary. - -For version 2, the header is exactly 72 bytes in length, and finishes here. -For version 3 or higher, the header length is at least 104 bytes, including -the next fields through header_length. - - 72 - 79: incompatible_features - Bitmask of incompatible features. An implementation must - fail to open an image if an unknown bit is set. - - Bit 0: Dirty bit. If this bit is set then refcounts - may be inconsistent, make sure to scan L1/L2 - tables to repair refcounts before accessing the - image. - - Bit 1: Corrupt bit. If this bit is set then any data - structure may be corrupt and the image must not - be written to (unless for regaining - consistency). - - Bit 2: External data file bit. If this bit is set, an - external data file is used. Guest clusters are - then stored in the external data file. For such - images, clusters in the external data file are - not refcounted. The offset field in the - Standard Cluster Descriptor must match the - guest offset and neither compressed clusters - nor internal snapshots are supported. - - An External Data File Name header extension may - be present if this bit is set. - - Bit 3: Compression type bit. If this bit is set, - a non-default compression is used for compressed - clusters. The compression_type field must be - present and not zero. - - Bit 4: Extended L2 Entries. If this bit is set then - L2 table entries use an extended format that - allows subcluster-based allocation. See the - Extended L2 Entries section for more details. - - Bits 5-63: Reserved (set to 0) - - 80 - 87: compatible_features - Bitmask of compatible features. An implementation can - safely ignore any unknown bits that are set. - - Bit 0: Lazy refcounts bit. If this bit is set then - lazy refcount updates can be used. This means - marking the image file dirty and postponing - refcount metadata updates. - - Bits 1-63: Reserved (set to 0) - - 88 - 95: autoclear_features - Bitmask of auto-clear features. An implementation may only - write to an image with unknown auto-clear features if it - clears the respective bits from this field first. - - Bit 0: Bitmaps extension bit - This bit indicates consistency for the bitmaps - extension data. - - It is an error if this bit is set without the - bitmaps extension present. - - If the bitmaps extension is present but this - bit is unset, the bitmaps extension data must be - considered inconsistent. - - Bit 1: Raw external data bit - If this bit is set, the external data file can - be read as a consistent standalone raw image - without looking at the qcow2 metadata. - - Setting this bit has a performance impact for - some operations on the image (e.g. writing - zeros requires writing to the data file instead - of only setting the zero flag in the L2 table - entry) and conflicts with backing files. - - This bit may only be set if the External Data - File bit (incompatible feature bit 1) is also - set. - - Bits 2-63: Reserved (set to 0) - - 96 - 99: refcount_order - Describes the width of a reference count block entry (width - in bits: refcount_bits = 1 << refcount_order). For version 2 - images, the order is always assumed to be 4 - (i.e. refcount_bits = 16). - This value may not exceed 6 (i.e. refcount_bits = 64). - - 100 - 103: header_length - Length of the header structure in bytes. For version 2 - images, the length is always assumed to be 72 bytes. - For version 3 it's at least 104 bytes and must be a multiple - of 8. - - -=== Additional fields (version 3 and higher) === - -In general, these fields are optional and may be safely ignored by the software, -as well as filled by zeros (which is equal to field absence), if software needs -to set field B, but does not care about field A which precedes B. More -formally, additional fields have the following compatibility rules: - -1. If the value of the additional field must not be ignored for correct -handling of the file, it will be accompanied by a corresponding incompatible -feature bit. - -2. If there are no unrecognized incompatible feature bits set, an unknown -additional field may be safely ignored other than preserving its value when -rewriting the image header. - -3. An explicit value of 0 will have the same behavior as when the field is not -present*, if not altered by a specific incompatible bit. - -*. A field is considered not present when header_length is less than or equal -to the field's offset. Also, all additional fields are not present for -version 2. - - 104: compression_type - - Defines the compression method used for compressed clusters. - All compressed clusters in an image use the same compression - type. - - If the incompatible bit "Compression type" is set: the field - must be present and non-zero (which means non-deflate - compression type). Otherwise, this field must not be present - or must be zero (which means deflate). - - Available compression type values: - 0: deflate - 1: zstd - - The deflate compression type is called "zlib" - in QEMU. However, clusters with the - deflate compression type do not have zlib headers. - - 105 - 111: Padding, contents defined below. - -=== Header padding === - -@header_length must be a multiple of 8, which means that if the end of the last -additional field is not aligned, some padding is needed. This padding must be -zeroed, so that if some existing (or future) additional field will fall into -the padding, it will be interpreted accordingly to point [3.] of the previous -paragraph, i.e. in the same manner as when this field is not present. - - -=== Header extensions === - -Directly after the image header, optional sections called header extensions can -be stored. Each extension has a structure like the following: - - Byte 0 - 3: Header extension type: - 0x00000000 - End of the header extension area - 0xe2792aca - Backing file format name string - 0x6803f857 - Feature name table - 0x23852875 - Bitmaps extension - 0x0537be77 - Full disk encryption header pointer - 0x44415441 - External data file name string - other - Unknown header extension, can be safely - ignored - - 4 - 7: Length of the header extension data - - 8 - n: Header extension data - - n - m: Padding to round up the header extension size to the next - multiple of 8. - -Unless stated otherwise, each header extension type shall appear at most once -in the same image. - -If the image has a backing file then the backing file name should be stored in -the remaining space between the end of the header extension area and the end of -the first cluster. It is not allowed to store other data here, so that an -implementation can safely modify the header and add extensions without harming -data of compatible features that it doesn't support. Compatible features that -need space for additional data can use a header extension. - - -== String header extensions == - -Some header extensions (such as the backing file format name and the external -data file name) are just a single string. In this case, the header extension -length is the string length and the string is not '\0' terminated. (The header -extension padding can make it look like a string is '\0' terminated, but -neither is padding always necessary nor is there a guarantee that zero bytes -are used for padding.) - - -== Feature name table == - -The feature name table is an optional header extension that contains the name -for features used by the image. It can be used by applications that don't know -the respective feature (e.g. because the feature was introduced only later) to -display a useful error message. - -The number of entries in the feature name table is determined by the length of -the header extension data. Each entry look like this: - - Byte 0: Type of feature (select feature bitmap) - 0: Incompatible feature - 1: Compatible feature - 2: Autoclear feature - - 1: Bit number within the selected feature bitmap (valid - values: 0-63) - - 2 - 47: Feature name (padded with zeros, but not necessarily null - terminated if it has full length) - - -== Bitmaps extension == - -The bitmaps extension is an optional header extension. It provides the ability -to store bitmaps related to a virtual disk. For now, there is only one bitmap -type: the dirty tracking bitmap, which tracks virtual disk changes from some -point in time. - -The data of the extension should be considered consistent only if the -corresponding auto-clear feature bit is set, see autoclear_features above. - -The fields of the bitmaps extension are: - - Byte 0 - 3: nb_bitmaps - The number of bitmaps contained in the image. Must be - greater than or equal to 1. - - Note: QEMU currently only supports up to 65535 bitmaps per - image. - - 4 - 7: Reserved, must be zero. - - 8 - 15: bitmap_directory_size - Size of the bitmap directory in bytes. It is the cumulative - size of all (nb_bitmaps) bitmap directory entries. - - 16 - 23: bitmap_directory_offset - Offset into the image file at which the bitmap directory - starts. Must be aligned to a cluster boundary. - -== Full disk encryption header pointer == - -The full disk encryption header must be present if, and only if, the -'crypt_method' header requires metadata. Currently this is only true -of the 'LUKS' crypt method. The header extension must be absent for -other methods. - -This header provides the offset at which the crypt method can store -its additional data, as well as the length of such data. - - Byte 0 - 7: Offset into the image file at which the encryption - header starts in bytes. Must be aligned to a cluster - boundary. - Byte 8 - 15: Length of the written encryption header in bytes. - Note actual space allocated in the qcow2 file may - be larger than this value, since it will be rounded - to the nearest multiple of the cluster size. Any - unused bytes in the allocated space will be initialized - to 0. - -For the LUKS crypt method, the encryption header works as follows. - -The first 592 bytes of the header clusters will contain the LUKS -partition header. This is then followed by the key material data areas. -The size of the key material data areas is determined by the number of -stripes in the key slot and key size. Refer to the LUKS format -specification ('docs/on-disk-format.pdf' in the cryptsetup source -package) for details of the LUKS partition header format. - -In the LUKS partition header, the "payload-offset" field will be -calculated as normal for the LUKS spec. ie the size of the LUKS -header, plus key material regions, plus padding, relative to the -start of the LUKS header. This offset value is not required to be -qcow2 cluster aligned. Its value is currently never used in the -context of qcow2, since the qcow2 file format itself defines where -the real payload offset is, but none the less a valid payload offset -should always be present. - -In the LUKS key slots header, the "key-material-offset" is relative -to the start of the LUKS header clusters in the qcow2 container, -not the start of the qcow2 file. - -Logically the layout looks like - - +-----------------------------+ - | QCow2 header | - | QCow2 header extension X | - | QCow2 header extension FDE | - | QCow2 header extension ... | - | QCow2 header extension Z | - +-----------------------------+ - | ....other QCow2 tables.... | - . . - . . - +-----------------------------+ - | +-------------------------+ | - | | LUKS partition header | | - | +-------------------------+ | - | | LUKS key material 1 | | - | +-------------------------+ | - | | LUKS key material 2 | | - | +-------------------------+ | - | | LUKS key material ... | | - | +-------------------------+ | - | | LUKS key material 8 | | - | +-------------------------+ | - +-----------------------------+ - | QCow2 cluster payload | - . . - . . - . . - | | - +-----------------------------+ - -== Data encryption == - -When an encryption method is requested in the header, the image payload -data must be encrypted/decrypted on every write/read. The image headers -and metadata are never encrypted. - -The algorithms used for encryption vary depending on the method - - - AES: - - The AES cipher, in CBC mode, with 256 bit keys. - - Initialization vectors generated using plain64 method, with - the virtual disk sector as the input tweak. - - This format is no longer supported in QEMU system emulators, due - to a number of design flaws affecting its security. It is only - supported in the command line tools for the sake of back compatibility - and data liberation. - - - LUKS: - - The algorithms are specified in the LUKS header. - - Initialization vectors generated using the method specified - in the LUKS header, with the physical disk sector as the - input tweak. - -== Host cluster management == - -qcow2 manages the allocation of host clusters by maintaining a reference count -for each host cluster. A refcount of 0 means that the cluster is free, 1 means -that it is used, and >= 2 means that it is used and any write access must -perform a COW (copy on write) operation. - -The refcounts are managed in a two-level table. The first level is called -refcount table and has a variable size (which is stored in the header). The -refcount table can cover multiple clusters, however it needs to be contiguous -in the image file. - -It contains pointers to the second level structures which are called refcount -blocks and are exactly one cluster in size. - -Although a large enough refcount table can reserve clusters past 64 PB -(56 bits) (assuming the underlying protocol can even be sized that -large), note that some qcow2 metadata such as L1/L2 tables must point -to clusters prior to that point. - -Note: qemu has an implementation limit of 8 MB as the maximum refcount -table size. With a 2 MB cluster size and a default refcount_order of -4, it is unable to reference host resources beyond 2 EB (61 bits); in -the worst case, with a 512 cluster size and refcount_order of 6, it is -unable to access beyond 32 GB (35 bits). - -Given an offset into the image file, the refcount of its cluster can be -obtained as follows: - - refcount_block_entries = (cluster_size * 8 / refcount_bits) - - refcount_block_index = (offset / cluster_size) % refcount_block_entries - refcount_table_index = (offset / cluster_size) / refcount_block_entries - - refcount_block = load_cluster(refcount_table[refcount_table_index]); - return refcount_block[refcount_block_index]; - -Refcount table entry: - - Bit 0 - 8: Reserved (set to 0) - - 9 - 63: Bits 9-63 of the offset into the image file at which the - refcount block starts. Must be aligned to a cluster - boundary. - - If this is 0, the corresponding refcount block has not yet - been allocated. All refcounts managed by this refcount block - are 0. - -Refcount block entry (x = refcount_bits - 1): - - Bit 0 - x: Reference count of the cluster. If refcount_bits implies a - sub-byte width, note that bit 0 means the least significant - bit in this context. - - -== Cluster mapping == - -Just as for refcounts, qcow2 uses a two-level structure for the mapping of -guest clusters to host clusters. They are called L1 and L2 table. - -The L1 table has a variable size (stored in the header) and may use multiple -clusters, however it must be contiguous in the image file. L2 tables are -exactly one cluster in size. - -The L1 and L2 tables have implications on the maximum virtual file -size; for a given L1 table size, a larger cluster size is required for -the guest to have access to more space. Furthermore, a virtual -cluster must currently map to a host offset below 64 PB (56 bits) -(although this limit could be relaxed by putting reserved bits into -use). Additionally, as cluster size increases, the maximum host -offset for a compressed cluster is reduced (a 2M cluster size requires -compressed clusters to reside below 512 TB (49 bits), and this limit -cannot be relaxed without an incompatible layout change). - -Given an offset into the virtual disk, the offset into the image file can be -obtained as follows: - - l2_entries = (cluster_size / sizeof(uint64_t)) [*] - - l2_index = (offset / cluster_size) % l2_entries - l1_index = (offset / cluster_size) / l2_entries - - l2_table = load_cluster(l1_table[l1_index]); - cluster_offset = l2_table[l2_index]; - - return cluster_offset + (offset % cluster_size) - - [*] this changes if Extended L2 Entries are enabled, see next section - -L1 table entry: - - Bit 0 - 8: Reserved (set to 0) - - 9 - 55: Bits 9-55 of the offset into the image file at which the L2 - table starts. Must be aligned to a cluster boundary. If the - offset is 0, the L2 table and all clusters described by this - L2 table are unallocated. - - 56 - 62: Reserved (set to 0) - - 63: 0 for an L2 table that is unused or requires COW, 1 if its - refcount is exactly one. This information is only accurate - in the active L1 table. - -L2 table entry: - - Bit 0 - 61: Cluster descriptor - - 62: 0 for standard clusters - 1 for compressed clusters - - 63: 0 for clusters that are unused, compressed or require COW. - 1 for standard clusters whose refcount is exactly one. - This information is only accurate in L2 tables - that are reachable from the active L1 table. - - With external data files, all guest clusters have an - implicit refcount of 1 (because of the fixed host = guest - mapping for guest cluster offsets), so this bit should be 1 - for all allocated clusters. - -Standard Cluster Descriptor: - - Bit 0: If set to 1, the cluster reads as all zeros. The host - cluster offset can be used to describe a preallocation, - but it won't be used for reading data from this cluster, - nor is data read from the backing file if the cluster is - unallocated. - - With version 2 or with extended L2 entries (see the next - section), this is always 0. - - 1 - 8: Reserved (set to 0) - - 9 - 55: Bits 9-55 of host cluster offset. Must be aligned to a - cluster boundary. If the offset is 0 and bit 63 is clear, - the cluster is unallocated. The offset may only be 0 with - bit 63 set (indicating a host cluster offset of 0) when an - external data file is used. - - 56 - 61: Reserved (set to 0) - - -Compressed Clusters Descriptor (x = 62 - (cluster_bits - 8)): - - Bit 0 - x-1: Host cluster offset. This is usually _not_ aligned to a - cluster or sector boundary! If cluster_bits is - small enough that this field includes bits beyond - 55, those upper bits must be set to 0. - - x - 61: Number of additional 512-byte sectors used for the - compressed data, beyond the sector containing the offset - in the previous field. Some of these sectors may reside - in the next contiguous host cluster. - - Note that the compressed data does not necessarily occupy - all of the bytes in the final sector; rather, decompression - stops when it has produced a cluster of data. - - Another compressed cluster may map to the tail of the final - sector used by this compressed cluster. - -If a cluster is unallocated, read requests shall read the data from the backing -file (except if bit 0 in the Standard Cluster Descriptor is set). If there is -no backing file or the backing file is smaller than the image, they shall read -zeros for all parts that are not covered by the backing file. - -== Extended L2 Entries == - -An image uses Extended L2 Entries if bit 4 is set on the incompatible_features -field of the header. - -In these images standard data clusters are divided into 32 subclusters of the -same size. They are contiguous and start from the beginning of the cluster. -Subclusters can be allocated independently and the L2 entry contains information -indicating the status of each one of them. Compressed data clusters don't have -subclusters so they are treated the same as in images without this feature. - -The size of an extended L2 entry is 128 bits so the number of entries per table -is calculated using this formula: - - l2_entries = (cluster_size / (2 * sizeof(uint64_t))) - -The first 64 bits have the same format as the standard L2 table entry described -in the previous section, with the exception of bit 0 of the standard cluster -descriptor. - -The last 64 bits contain a subcluster allocation bitmap with this format: - -Subcluster Allocation Bitmap (for standard clusters): - - Bit 0 - 31: Allocation status (one bit per subcluster) - - 1: the subcluster is allocated. In this case the - host cluster offset field must contain a valid - offset. - 0: the subcluster is not allocated. In this case - read requests shall go to the backing file or - return zeros if there is no backing file data. - - Bits are assigned starting from the least significant - one (i.e. bit x is used for subcluster x). - - 32 - 63 Subcluster reads as zeros (one bit per subcluster) - - 1: the subcluster reads as zeros. In this case the - allocation status bit must be unset. The host - cluster offset field may or may not be set. - 0: no effect. - - Bits are assigned starting from the least significant - one (i.e. bit x is used for subcluster x - 32). - -Subcluster Allocation Bitmap (for compressed clusters): - - Bit 0 - 63: Reserved (set to 0) - Compressed clusters don't have subclusters, - so this field is not used. - -== Snapshots == - -qcow2 supports internal snapshots. Their basic principle of operation is to -switch the active L1 table, so that a different set of host clusters are -exposed to the guest. - -When creating a snapshot, the L1 table should be copied and the refcount of all -L2 tables and clusters reachable from this L1 table must be increased, so that -a write causes a COW and isn't visible in other snapshots. - -When loading a snapshot, bit 63 of all entries in the new active L1 table and -all L2 tables referenced by it must be reconstructed from the refcount table -as it doesn't need to be accurate in inactive L1 tables. - -A directory of all snapshots is stored in the snapshot table, a contiguous area -in the image file, whose starting offset and length are given by the header -fields snapshots_offset and nb_snapshots. The entries of the snapshot table -have variable length, depending on the length of ID, name and extra data. - -Snapshot table entry: - - Byte 0 - 7: Offset into the image file at which the L1 table for the - snapshot starts. Must be aligned to a cluster boundary. - - 8 - 11: Number of entries in the L1 table of the snapshots - - 12 - 13: Length of the unique ID string describing the snapshot - - 14 - 15: Length of the name of the snapshot - - 16 - 19: Time at which the snapshot was taken in seconds since the - Epoch - - 20 - 23: Subsecond part of the time at which the snapshot was taken - in nanoseconds - - 24 - 31: Time that the guest was running until the snapshot was - taken in nanoseconds - - 32 - 35: Size of the VM state in bytes. 0 if no VM state is saved. - If there is VM state, it starts at the first cluster - described by first L1 table entry that doesn't describe a - regular guest cluster (i.e. VM state is stored like guest - disk content, except that it is stored at offsets that are - larger than the virtual disk presented to the guest) - - 36 - 39: Size of extra data in the table entry (used for future - extensions of the format) - - variable: Extra data for future extensions. Unknown fields must be - ignored. Currently defined are (offset relative to snapshot - table entry): - - Byte 40 - 47: Size of the VM state in bytes. 0 if no VM - state is saved. If this field is present, - the 32-bit value in bytes 32-35 is ignored. - - Byte 48 - 55: Virtual disk size of the snapshot in bytes - - Byte 56 - 63: icount value which corresponds to - the record/replay instruction count - when the snapshot was taken. Set to -1 - if icount was disabled - - Version 3 images must include extra data at least up to - byte 55. - - variable: Unique ID string for the snapshot (not null terminated) - - variable: Name of the snapshot (not null terminated) - - variable: Padding to round up the snapshot table entry size to the - next multiple of 8. - - -== Bitmaps == - -As mentioned above, the bitmaps extension provides the ability to store bitmaps -related to a virtual disk. This section describes how these bitmaps are stored. - -All stored bitmaps are related to the virtual disk stored in the same image, so -each bitmap size is equal to the virtual disk size. - -Each bit of the bitmap is responsible for strictly defined range of the virtual -disk. For bit number bit_nr the corresponding range (in bytes) will be: - - [bit_nr * bitmap_granularity .. (bit_nr + 1) * bitmap_granularity - 1] - -Granularity is a property of the concrete bitmap, see below. - - -=== Bitmap directory === - -Each bitmap saved in the image is described in a bitmap directory entry. The -bitmap directory is a contiguous area in the image file, whose starting offset -and length are given by the header extension fields bitmap_directory_offset and -bitmap_directory_size. The entries of the bitmap directory have variable -length, depending on the lengths of the bitmap name and extra data. - -Structure of a bitmap directory entry: - - Byte 0 - 7: bitmap_table_offset - Offset into the image file at which the bitmap table - (described below) for the bitmap starts. Must be aligned to - a cluster boundary. - - 8 - 11: bitmap_table_size - Number of entries in the bitmap table of the bitmap. - - 12 - 15: flags - Bit - 0: in_use - The bitmap was not saved correctly and may be - inconsistent. Although the bitmap metadata is still - well-formed from a qcow2 perspective, the metadata - (such as the auto flag or bitmap size) or data - contents may be outdated. - - 1: auto - The bitmap must reflect all changes of the virtual - disk by any application that would write to this qcow2 - file (including writes, snapshot switching, etc.). The - type of this bitmap must be 'dirty tracking bitmap'. - - 2: extra_data_compatible - This flags is meaningful when the extra data is - unknown to the software (currently any extra data is - unknown to QEMU). - If it is set, the bitmap may be used as expected, extra - data must be left as is. - If it is not set, the bitmap must not be used, but - both it and its extra data be left as is. - - Bits 3 - 31 are reserved and must be 0. - - 16: type - This field describes the sort of the bitmap. - Values: - 1: Dirty tracking bitmap - - Values 0, 2 - 255 are reserved. - - 17: granularity_bits - Granularity bits. Valid values: 0 - 63. - - Note: QEMU currently supports only values 9 - 31. - - Granularity is calculated as - granularity = 1 << granularity_bits - - A bitmap's granularity is how many bytes of the image - accounts for one bit of the bitmap. - - 18 - 19: name_size - Size of the bitmap name. Must be non-zero. - - Note: QEMU currently doesn't support values greater than - 1023. - - 20 - 23: extra_data_size - Size of type-specific extra data. - - For now, as no extra data is defined, extra_data_size is - reserved and should be zero. If it is non-zero the - behavior is defined by extra_data_compatible flag. - - variable: extra_data - Extra data for the bitmap, occupying extra_data_size bytes. - Extra data must never contain references to clusters or in - some other way allocate additional clusters. - - variable: name - The name of the bitmap (not null terminated), occupying - name_size bytes. Must be unique among all bitmap names - within the bitmaps extension. - - variable: Padding to round up the bitmap directory entry size to the - next multiple of 8. All bytes of the padding must be zero. - - -=== Bitmap table === - -Each bitmap is stored using a one-level structure (as opposed to two-level -structures like for refcounts and guest clusters mapping) for the mapping of -bitmap data to host clusters. This structure is called the bitmap table. - -Each bitmap table has a variable size (stored in the bitmap directory entry) -and may use multiple clusters, however, it must be contiguous in the image -file. - -Structure of a bitmap table entry: - - Bit 0: Reserved and must be zero if bits 9 - 55 are non-zero. - If bits 9 - 55 are zero: - 0: Cluster should be read as all zeros. - 1: Cluster should be read as all ones. - - 1 - 8: Reserved and must be zero. - - 9 - 55: Bits 9 - 55 of the host cluster offset. Must be aligned to - a cluster boundary. If the offset is 0, the cluster is - unallocated; in that case, bit 0 determines how this - cluster should be treated during reads. - - 56 - 63: Reserved and must be zero. - - -=== Bitmap data === - -As noted above, bitmap data is stored in separate clusters, described by the -bitmap table. Given an offset (in bytes) into the bitmap data, the offset into -the image file can be obtained as follows: - - image_offset(bitmap_data_offset) = - bitmap_table[bitmap_data_offset / cluster_size] + - (bitmap_data_offset % cluster_size) - -This offset is not defined if bits 9 - 55 of bitmap table entry are zero (see -above). - -Given an offset byte_nr into the virtual disk and the bitmap's granularity, the -bit offset into the image file to the corresponding bit of the bitmap can be -calculated like this: - - bit_offset(byte_nr) = - image_offset(byte_nr / granularity / 8) * 8 + - (byte_nr / granularity) % 8 - -If the size of the bitmap data is not a multiple of the cluster size then the -last cluster of the bitmap data contains some unused tail bits. These bits must -be zero. - - -=== Dirty tracking bitmaps === - -Bitmaps with 'type' field equal to one are dirty tracking bitmaps. - -When the virtual disk is in use dirty tracking bitmap may be 'enabled' or -'disabled'. While the bitmap is 'enabled', all writes to the virtual disk -should be reflected in the bitmap. A set bit in the bitmap means that the -corresponding range of the virtual disk (see above) was written to while the -bitmap was 'enabled'. An unset bit means that this range was not written to. - -The software doesn't have to sync the bitmap in the image file with its -representation in RAM after each write or metadata change. Flag 'in_use' -should be set while the bitmap is not synced. - -In the image file the 'enabled' state is reflected by the 'auto' flag. If this -flag is set, the software must consider the bitmap as 'enabled' and start -tracking virtual disk changes to this bitmap from the first write to the -virtual disk. If this flag is not set then the bitmap is disabled. diff --git a/docs/interop/qed_spec.rst b/docs/interop/qed_spec.rst new file mode 100644 index 00000000000..cd6c7d90051 --- /dev/null +++ b/docs/interop/qed_spec.rst @@ -0,0 +1,219 @@ +=================================== +QED Image File Format Specification +=================================== + +The file format looks like this:: + + +----------+----------+----------+-----+ + | cluster0 | cluster1 | cluster2 | ... | + +----------+----------+----------+-----+ + +The first cluster begins with the ``header``. The header contains information +about where regular clusters start; this allows the header to be extensible and +store extra information about the image file. A regular cluster may be +a ``data cluster``, an ``L2``, or an ``L1 table``. L1 and L2 tables are composed +of one or more contiguous clusters. + +Normally the file size will be a multiple of the cluster size. If the file size +is not a multiple, extra information after the last cluster may not be preserved +if data is written. Legitimate extra information should use space between the header +and the first regular cluster. + +All fields are little-endian. + +Header +------ + +:: + + Header { + uint32_t magic; /* QED\0 */ + + uint32_t cluster_size; /* in bytes */ + uint32_t table_size; /* for L1 and L2 tables, in clusters */ + uint32_t header_size; /* in clusters */ + + uint64_t features; /* format feature bits */ + uint64_t compat_features; /* compat feature bits */ + uint64_t autoclear_features; /* self-resetting feature bits */ + + uint64_t l1_table_offset; /* in bytes */ + uint64_t image_size; /* total logical image size, in bytes */ + + /* if (features & QED_F_BACKING_FILE) */ + uint32_t backing_filename_offset; /* in bytes from start of header */ + uint32_t backing_filename_size; /* in bytes */ + } + +Field descriptions: +~~~~~~~~~~~~~~~~~~~ + +- ``cluster_size`` must be a power of 2 in range [2^12, 2^26]. +- ``table_size`` must be a power of 2 in range [1, 16]. +- ``header_size`` is the number of clusters used by the header and any additional + information stored before regular clusters. +- ``features``, ``compat_features``, and ``autoclear_features`` are file format + extension bitmaps. They work as follows: + + - An image with unknown ``features`` bits enabled must not be opened. File format + changes that are not backwards-compatible must use ``features`` bits. + - An image with unknown ``compat_features`` bits enabled can be opened safely. + The unknown features are simply ignored and represent backwards-compatible + changes to the file format. + - An image with unknown ``autoclear_features`` bits enable can be opened safely + after clearing the unknown bits. This allows for backwards-compatible changes + to the file format which degrade gracefully and can be re-enabled again by a + new program later. +- ``l1_table_offset`` is the offset of the first byte of the L1 table in the image + file and must be a multiple of ``cluster_size``. +- ``image_size`` is the block device size seen by the guest and must be a multiple + of 512 bytes. +- ``backing_filename_offset`` and ``backing_filename_size`` describe a string in + (byte offset, byte size) form. It is not NUL-terminated and has no alignment constraints. + The string must be stored within the first ``header_size`` clusters. The backing filename + may be an absolute path or relative to the image file. + +Feature bits: +~~~~~~~~~~~~~ + +- ``QED_F_BACKING_FILE = 0x01``. The image uses a backing file. +- ``QED_F_NEED_CHECK = 0x02``. The image needs a consistency check before use. +- ``QED_F_BACKING_FORMAT_NO_PROBE = 0x04``. The backing file is a raw disk image + and no file format autodetection should be attempted. This should be used to + ensure that raw backing files are never detected as an image format if they happen + to contain magic constants. + +There are currently no defined ``compat_features`` or ``autoclear_features`` bits. + +Fields predicated on a feature bit are only used when that feature is set. +The fields always take up header space, regardless of whether or not the feature +bit is set. + +Tables +------ + +Tables provide the translation from logical offsets in the block device to cluster +offsets in the file. + +:: + + #define TABLE_NOFFSETS (table_size * cluster_size / sizeof(uint64_t)) + + Table { + uint64_t offsets[TABLE_NOFFSETS]; + } + +The tables are organized as follows:: + + +----------+ + | L1 table | + +----------+ + ,------' | '------. + +----------+ | +----------+ + | L2 table | ... | L2 table | + +----------+ +----------+ + ,------' | '------. + +----------+ | +----------+ + | Data | ... | Data | + +----------+ +----------+ + +A table is made up of one or more contiguous clusters. The ``table_size`` header +field determines table size for an image file. For example, ``cluster_size=64 KB`` +and ``table_size=4`` results in 256 KB tables. + +The logical image size must be less than or equal to the maximum possible size of +clusters rooted by the L1 table: + +.. code:: + + header.image_size <= TABLE_NOFFSETS * TABLE_NOFFSETS * header.cluster_size + +L1, L2, and data cluster offsets must be aligned to ``header.cluster_size``. +The following offsets have special meanings: + +L2 table offsets +~~~~~~~~~~~~~~~~ + +- 0 - unallocated. The L2 table is not yet allocated. + +Data cluster offsets +~~~~~~~~~~~~~~~~~~~~ + +- 0 - unallocated. The data cluster is not yet allocated. +- 1 - zero. The data cluster contents are all zeroes and no cluster is allocated. + +Future format extensions may wish to store per-offset information. The least +significant 12 bits of an offset are reserved for this purpose and must be set +to zero. Image files with ``cluster_size`` > 2^12 will have more unused bits +which should also be zeroed. + +Unallocated L2 tables and data clusters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Reads to an unallocated area of the image file access the backing file. If there +is no backing file, then zeroes are produced. The backing file may be smaller +than the image file and reads of unallocated areas beyond the end of the backing +file produce zeroes. + +Writes to an unallocated area cause a new data clusters to be allocated, and a new +L2 table if that is also unallocated. The new data cluster is populated with data +from the backing file (or zeroes if no backing file) and the data being written. + +Zero data clusters +~~~~~~~~~~~~~~~~~~ + +Zero data clusters are a space-efficient way of storing zeroed regions of the image. + +Reads to a zero data cluster produce zeroes. + +.. note:: + The difference between an unallocated and a zero data cluster is that zero data + clusters stop the reading of contents from the backing file. + +Writes to a zero data cluster cause a new data cluster to be allocated. The new +data cluster is populated with zeroes and the data being written. + +Logical offset translation +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Logical offsets are translated into cluster offsets as follows:: + + table_bits table_bits cluster_bits + <--------> <--------> <---------------> + +----------+----------+-----------------+ + | L1 index | L2 index | byte offset | + +----------+----------+-----------------+ + + Structure of a logical offset + + offset_mask = ~(cluster_size - 1) # mask for the image file byte offset + + def logical_to_cluster_offset(l1_index, l2_index, byte_offset): + l2_offset = l1_table[l1_index] + l2_table = load_table(l2_offset) + cluster_offset = l2_table[l2_index] & offset_mask + return cluster_offset + byte_offset + +Consistency checking +-------------------- + +This section is informational and included to provide background on the use +of the ``QED_F_NEED_CHECK features`` bit. + +The ``QED_F_NEED_CHECK`` bit is used to mark an image as dirty before starting +an operation that could leave the image in an inconsistent state if interrupted +by a crash or power failure. A dirty image must be checked on open because its +metadata may not be consistent. + +Consistency check includes the following invariants: + +- Each cluster is referenced once and only once. It is an inconsistency to have + a cluster referenced more than once by L1 or L2 tables. A cluster has been leaked + if it has no references. +- Offsets must be within the image file size and must be ``cluster_size`` aligned. +- Table offsets must at least ``table_size`` * ``cluster_size`` bytes from the end + of the image file so that there is space for the entire table. + +The consistency check process starts from ``l1_table_offset`` and scans all L2 tables. +After the check completes with no other errors besides leaks, the ``QED_F_NEED_CHECK`` +bit can be cleared and the image can be accessed. diff --git a/docs/interop/qed_spec.txt b/docs/interop/qed_spec.txt deleted file mode 100644 index 7982e058b2c..00000000000 --- a/docs/interop/qed_spec.txt +++ /dev/null @@ -1,138 +0,0 @@ -=Specification= - -The file format looks like this: - - +----------+----------+----------+-----+ - | cluster0 | cluster1 | cluster2 | ... | - +----------+----------+----------+-----+ - -The first cluster begins with the '''header'''. The header contains information about where regular clusters start; this allows the header to be extensible and store extra information about the image file. A regular cluster may be a '''data cluster''', an '''L2''', or an '''L1 table'''. L1 and L2 tables are composed of one or more contiguous clusters. - -Normally the file size will be a multiple of the cluster size. If the file size is not a multiple, extra information after the last cluster may not be preserved if data is written. Legitimate extra information should use space between the header and the first regular cluster. - -All fields are little-endian. - -==Header== - Header { - uint32_t magic; /* QED\0 */ - - uint32_t cluster_size; /* in bytes */ - uint32_t table_size; /* for L1 and L2 tables, in clusters */ - uint32_t header_size; /* in clusters */ - - uint64_t features; /* format feature bits */ - uint64_t compat_features; /* compat feature bits */ - uint64_t autoclear_features; /* self-resetting feature bits */ - - uint64_t l1_table_offset; /* in bytes */ - uint64_t image_size; /* total logical image size, in bytes */ - - /* if (features & QED_F_BACKING_FILE) */ - uint32_t backing_filename_offset; /* in bytes from start of header */ - uint32_t backing_filename_size; /* in bytes */ - } - -Field descriptions: -* ''cluster_size'' must be a power of 2 in range [2^12, 2^26]. -* ''table_size'' must be a power of 2 in range [1, 16]. -* ''header_size'' is the number of clusters used by the header and any additional information stored before regular clusters. -* ''features'', ''compat_features'', and ''autoclear_features'' are file format extension bitmaps. They work as follows: -** An image with unknown ''features'' bits enabled must not be opened. File format changes that are not backwards-compatible must use ''features'' bits. -** An image with unknown ''compat_features'' bits enabled can be opened safely. The unknown features are simply ignored and represent backwards-compatible changes to the file format. -** An image with unknown ''autoclear_features'' bits enable can be opened safely after clearing the unknown bits. This allows for backwards-compatible changes to the file format which degrade gracefully and can be re-enabled again by a new program later. -* ''l1_table_offset'' is the offset of the first byte of the L1 table in the image file and must be a multiple of ''cluster_size''. -* ''image_size'' is the block device size seen by the guest and must be a multiple of 512 bytes. -* ''backing_filename_offset'' and ''backing_filename_size'' describe a string in (byte offset, byte size) form. It is not NUL-terminated and has no alignment constraints. The string must be stored within the first ''header_size'' clusters. The backing filename may be an absolute path or relative to the image file. - -Feature bits: -* QED_F_BACKING_FILE = 0x01. The image uses a backing file. -* QED_F_NEED_CHECK = 0x02. The image needs a consistency check before use. -* QED_F_BACKING_FORMAT_NO_PROBE = 0x04. The backing file is a raw disk image and no file format autodetection should be attempted. This should be used to ensure that raw backing files are never detected as an image format if they happen to contain magic constants. - -There are currently no defined ''compat_features'' or ''autoclear_features'' bits. - -Fields predicated on a feature bit are only used when that feature is set. The fields always take up header space, regardless of whether or not the feature bit is set. - -==Tables== - -Tables provide the translation from logical offsets in the block device to cluster offsets in the file. - - #define TABLE_NOFFSETS (table_size * cluster_size / sizeof(uint64_t)) - - Table { - uint64_t offsets[TABLE_NOFFSETS]; - } - -The tables are organized as follows: - - +----------+ - | L1 table | - +----------+ - ,------' | '------. - +----------+ | +----------+ - | L2 table | ... | L2 table | - +----------+ +----------+ - ,------' | '------. - +----------+ | +----------+ - | Data | ... | Data | - +----------+ +----------+ - -A table is made up of one or more contiguous clusters. The table_size header field determines table size for an image file. For example, cluster_size=64 KB and table_size=4 results in 256 KB tables. - -The logical image size must be less than or equal to the maximum possible size of clusters rooted by the L1 table: - header.image_size <= TABLE_NOFFSETS * TABLE_NOFFSETS * header.cluster_size - -L1, L2, and data cluster offsets must be aligned to header.cluster_size. The following offsets have special meanings: - -===L2 table offsets=== -* 0 - unallocated. The L2 table is not yet allocated. - -===Data cluster offsets=== -* 0 - unallocated. The data cluster is not yet allocated. -* 1 - zero. The data cluster contents are all zeroes and no cluster is allocated. - -Future format extensions may wish to store per-offset information. The least significant 12 bits of an offset are reserved for this purpose and must be set to zero. Image files with cluster_size > 2^12 will have more unused bits which should also be zeroed. - -===Unallocated L2 tables and data clusters=== -Reads to an unallocated area of the image file access the backing file. If there is no backing file, then zeroes are produced. The backing file may be smaller than the image file and reads of unallocated areas beyond the end of the backing file produce zeroes. - -Writes to an unallocated area cause a new data clusters to be allocated, and a new L2 table if that is also unallocated. The new data cluster is populated with data from the backing file (or zeroes if no backing file) and the data being written. - -===Zero data clusters=== -Zero data clusters are a space-efficient way of storing zeroed regions of the image. - -Reads to a zero data cluster produce zeroes. Note that the difference between an unallocated and a zero data cluster is that zero data clusters stop the reading of contents from the backing file. - -Writes to a zero data cluster cause a new data cluster to be allocated. The new data cluster is populated with zeroes and the data being written. - -===Logical offset translation=== -Logical offsets are translated into cluster offsets as follows: - - table_bits table_bits cluster_bits - <--------> <--------> <---------------> - +----------+----------+-----------------+ - | L1 index | L2 index | byte offset | - +----------+----------+-----------------+ - - Structure of a logical offset - - offset_mask = ~(cluster_size - 1) # mask for the image file byte offset - - def logical_to_cluster_offset(l1_index, l2_index, byte_offset): - l2_offset = l1_table[l1_index] - l2_table = load_table(l2_offset) - cluster_offset = l2_table[l2_index] & offset_mask - return cluster_offset + byte_offset - -==Consistency checking== - -This section is informational and included to provide background on the use of the QED_F_NEED_CHECK ''features'' bit. - -The QED_F_NEED_CHECK bit is used to mark an image as dirty before starting an operation that could leave the image in an inconsistent state if interrupted by a crash or power failure. A dirty image must be checked on open because its metadata may not be consistent. - -Consistency check includes the following invariants: -# Each cluster is referenced once and only once. It is an inconsistency to have a cluster referenced more than once by L1 or L2 tables. A cluster has been leaked if it has no references. -# Offsets must be within the image file size and must be ''cluster_size'' aligned. -# Table offsets must at least ''table_size'' * ''cluster_size'' bytes from the end of the image file so that there is space for the entire table. - -The consistency check process starts by from ''l1_table_offset'' and scans all L2 tables. After the check completes with no other errors besides leaks, the QED_F_NEED_CHECK bit can be cleared and the image can be accessed. diff --git a/docs/interop/qemu-ga-ref.rst b/docs/interop/qemu-ga-ref.rst index 032d4924552..ea6652ae43e 100644 --- a/docs/interop/qemu-ga-ref.rst +++ b/docs/interop/qemu-ga-ref.rst @@ -1,7 +1,5 @@ QEMU Guest Agent Protocol Reference =================================== -.. contents:: - :depth: 3 - .. qapi-doc:: qga/qapi-schema.json + :namespace: QGA diff --git a/docs/interop/qemu-ga.rst b/docs/interop/qemu-ga.rst index 9c7380896ae..d16cc1b9f07 100644 --- a/docs/interop/qemu-ga.rst +++ b/docs/interop/qemu-ga.rst @@ -1,3 +1,5 @@ +.. _qemu-ga: + QEMU Guest Agent ================ @@ -50,7 +52,7 @@ Options .. option:: -c, --config=PATH Configuration file path (the default is |CONFDIR|\ ``/qemu-ga.conf``, - unless overriden by the QGA_CONF environment variable) + unless overridden by the QGA_CONF environment variable) .. option:: -m, --method=METHOD diff --git a/docs/interop/qemu-qmp-ref.rst b/docs/interop/qemu-qmp-ref.rst index f94614a0b2f..f0ce39ad67d 100644 --- a/docs/interop/qemu-qmp-ref.rst +++ b/docs/interop/qemu-qmp-ref.rst @@ -4,6 +4,7 @@ QEMU QMP Reference Manual ========================= .. contents:: - :depth: 3 + :local: .. qapi-doc:: qapi/qapi-schema.json + :namespace: QMP diff --git a/docs/interop/qemu-storage-daemon-qmp-ref.rst b/docs/interop/qemu-storage-daemon-qmp-ref.rst index 9fed68152f5..4dbb6a2cc83 100644 --- a/docs/interop/qemu-storage-daemon-qmp-ref.rst +++ b/docs/interop/qemu-storage-daemon-qmp-ref.rst @@ -2,6 +2,7 @@ QEMU Storage Daemon QMP Reference Manual ======================================== .. contents:: - :depth: 3 + :local: .. qapi-doc:: storage-daemon/qapi/qapi-schema.json + :namespace: QSD diff --git a/docs/interop/vfio-user.rst b/docs/interop/vfio-user.rst new file mode 100644 index 00000000000..0b06f026b0d --- /dev/null +++ b/docs/interop/vfio-user.rst @@ -0,0 +1,1520 @@ +.. include:: +.. SPDX-License-Identifier: GPL-2.0-or-later + +================================ +vfio-user Protocol Specification +================================ + +.. contents:: Table of Contents + +Introduction +============ +vfio-user is a protocol that allows a device to be emulated in a separate +process outside of a Virtual Machine Monitor (VMM). vfio-user devices consist +of a generic VFIO device type, living inside the VMM, which we call the client, +and the core device implementation, living outside the VMM, which we call the +server. + +The vfio-user specification is partly based on the +`Linux VFIO ioctl interface `_. + +VFIO is a mature and stable API, backed by an extensively used framework. The +existing VFIO client implementation in QEMU (``qemu/hw/vfio/``) can be largely +re-used, though there is nothing in this specification that requires that +particular implementation. None of the VFIO kernel modules are required for +supporting the protocol, on either the client or server side. Some source +definitions in VFIO are re-used for vfio-user. + +The main idea is to allow a virtual device to function in a separate process in +the same host over a UNIX domain socket. A UNIX domain socket (``AF_UNIX``) is +chosen because file descriptors can be trivially sent over it, which in turn +allows: + +* Sharing of client memory for DMA with the server. +* Sharing of server memory with the client for fast MMIO. +* Efficient sharing of eventfd's for triggering interrupts. + +Other socket types could be used which allow the server to run in a separate +guest in the same host (``AF_VSOCK``) or remotely (``AF_INET``). Theoretically +the underlying transport does not necessarily have to be a socket, however we do +not examine such alternatives. In this protocol version we focus on using a UNIX +domain socket and introduce basic support for the other two types of sockets +without considering performance implications. + +While passing of file descriptors is desirable for performance reasons, support +is not necessary for either the client or the server in order to implement the +protocol. There is always an in-band, message-passing fall back mechanism. + +Overview +======== + +VFIO is a framework that allows a physical device to be securely passed through +to a user space process; the device-specific kernel driver does not drive the +device at all. Typically, the user space process is a VMM and the device is +passed through to it in order to achieve high performance. VFIO provides an API +and the required functionality in the kernel. QEMU has adopted VFIO to allow a +guest to directly access physical devices, instead of emulating them in +software. + +vfio-user reuses the core VFIO concepts defined in its API, but implements them +as messages to be sent over a socket. It does not change the kernel-based VFIO +in any way, in fact none of the VFIO kernel modules need to be loaded to use +vfio-user. It is also possible for the client to concurrently use the current +kernel-based VFIO for one device, and vfio-user for another device. + +VFIO Device Model +----------------- + +A device under VFIO presents a standard interface to the user process. Many of +the VFIO operations in the existing interface use the ``ioctl()`` system call, and +references to the existing interface are called the ``ioctl()`` implementation in +this document. + +The following sections describe the set of messages that implement the vfio-user +interface over a socket. In many cases, the messages are analogous to data +structures used in the ``ioctl()`` implementation. Messages derived from the +``ioctl()`` will have a name derived from the ``ioctl()`` command name. E.g., the +``VFIO_DEVICE_GET_INFO`` ``ioctl()`` command becomes a +``VFIO_USER_DEVICE_GET_INFO`` message. The purpose of this reuse is to share as +much code as feasible with the ``ioctl()`` implementation``. + +Connection Initiation +^^^^^^^^^^^^^^^^^^^^^ + +After the client connects to the server, the initial client message is +``VFIO_USER_VERSION`` to propose a protocol version and set of capabilities to +apply to the session. The server replies with a compatible version and set of +capabilities it supports, or closes the connection if it cannot support the +advertised version. + +Device Information +^^^^^^^^^^^^^^^^^^ + +The client uses a ``VFIO_USER_DEVICE_GET_INFO`` message to query the server for +information about the device. This information includes: + +* The device type and whether it supports reset (``VFIO_DEVICE_FLAGS_``), +* the number of device regions, and +* the device presents to the client the number of interrupt types the device + supports. + +Region Information +^^^^^^^^^^^^^^^^^^ + +The client uses ``VFIO_USER_DEVICE_GET_REGION_INFO`` messages to query the +server for information about the device's regions. This information describes: + +* Read and write permissions, whether it can be memory mapped, and whether it + supports additional capabilities (``VFIO_REGION_INFO_CAP_``). +* Region index, size, and offset. + +When a device region can be mapped by the client, the server provides a file +descriptor which the client can ``mmap()``. The server is responsible for +polling for client updates to memory mapped regions. + +Region Capabilities +""""""""""""""""""" + +Some regions have additional capabilities that cannot be described adequately +by the region info data structure. These capabilities are returned in the +region info reply in a list similar to PCI capabilities in a PCI device's +configuration space. + +Sparse Regions +"""""""""""""" +A region can be memory-mappable in whole or in part. When only a subset of a +region can be mapped by the client, a ``VFIO_REGION_INFO_CAP_SPARSE_MMAP`` +capability is included in the region info reply. This capability describes +which portions can be mapped by the client. + +.. Note:: + For example, in a virtual NVMe controller, sparse regions can be used so + that accesses to the NVMe registers (found in the beginning of BAR0) are + trapped (an infrequent event), while allowing direct access to the doorbells + (an extremely frequent event as every I/O submission requires a write to + BAR0), found in the next page after the NVMe registers in BAR0. + +Device-Specific Regions +""""""""""""""""""""""" + +A device can define regions additional to the standard ones (e.g. PCI indexes +0-8). This is achieved by including a ``VFIO_REGION_INFO_CAP_TYPE`` capability +in the region info reply of a device-specific region. Such regions are reflected +in ``struct vfio_user_device_info.num_regions``. Thus, for PCI devices this +value can be equal to, or higher than, ``VFIO_PCI_NUM_REGIONS``. + +Region I/O via file descriptors +------------------------------- + +For unmapped regions, region I/O from the client is done via +``VFIO_USER_REGION_READ/WRITE``. As an optimization, ioeventfds or ioregionfds +may be configured for sub-regions of some regions. A client may request +information on these sub-regions via ``VFIO_USER_DEVICE_GET_REGION_IO_FDS``; by +configuring the returned file descriptors as ioeventfds or ioregionfds, the +server can be directly notified of I/O (for example, by KVM) without taking a +trip through the client. + +Interrupts +^^^^^^^^^^ + +The client uses ``VFIO_USER_DEVICE_GET_IRQ_INFO`` messages to query the server +for the device's interrupt types. The interrupt types are specific to the bus +the device is attached to, and the client is expected to know the capabilities +of each interrupt type. The server can signal an interrupt by directly injecting +interrupts into the guest via an event file descriptor. The client configures +how the server signals an interrupt with ``VFIO_USER_SET_IRQS`` messages. + +Device Read and Write +^^^^^^^^^^^^^^^^^^^^^ + +When the guest executes load or store operations to an unmapped device region, +the client forwards these operations to the server with +``VFIO_USER_REGION_READ`` or ``VFIO_USER_REGION_WRITE`` messages. The server +will reply with data from the device on read operations or an acknowledgement on +write operations. See `Read and Write Operations`_. + +Client memory access +-------------------- + +The client uses ``VFIO_USER_DMA_MAP`` and ``VFIO_USER_DMA_UNMAP`` messages to +inform the server of the valid DMA ranges that the server can access on behalf +of a device (typically, VM guest memory). DMA memory may be accessed by the +server via ``VFIO_USER_DMA_READ`` and ``VFIO_USER_DMA_WRITE`` messages over the +socket. In this case, the "DMA" part of the naming is a misnomer. + +Actual direct memory access of client memory from the server is possible if the +client provides file descriptors the server can ``mmap()``. Note that ``mmap()`` +privileges cannot be revoked by the client, therefore file descriptors should +only be exported in environments where the client trusts the server not to +corrupt guest memory. + +See `Read and Write Operations`_. + +Client/server interactions +========================== + +Socket +------ + +A server can serve: + +1) one or more clients, and/or +2) one or more virtual devices, belonging to one or more clients. + +The current protocol specification requires a dedicated socket per +client/server connection. It is a server-side implementation detail whether a +single server handles multiple virtual devices from the same or multiple +clients. The location of the socket is implementation-specific. Multiplexing +clients, devices, and servers over the same socket is not supported in this +version of the protocol. + +Authentication +-------------- + +For ``AF_UNIX``, we rely on OS mandatory access controls on the socket files, +therefore it is up to the management layer to set up the socket as required. +Socket types that span guests or hosts will require a proper authentication +mechanism. Defining that mechanism is deferred to a future version of the +protocol. + +Command Concurrency +------------------- + +A client may pipeline multiple commands without waiting for previous command +replies. The server will process commands in the order they are received. A +consequence of this is if a client issues a command with the *No_reply* bit, +then subsequently issues a command without *No_reply*, the older command will +have been processed before the reply to the younger command is sent by the +server. The client must be aware of the device's capability to process +concurrent commands if pipelining is used. For example, pipelining allows +multiple client threads to concurrently access device regions; the client must +ensure these accesses obey device semantics. + +An example is a frame buffer device, where the device may allow concurrent +access to different areas of video memory, but may have indeterminate behavior +if concurrent accesses are performed to command or status registers. + +Note that unrelated messages sent from the server to the client can appear in +between a client to server request/reply and vice versa. + +Implementers should be prepared for certain commands to exhibit potentially +unbounded latencies. For example, ``VFIO_USER_DEVICE_RESET`` may take an +arbitrarily long time to complete; clients should take care not to block +unnecessarily. + +Socket Disconnection Behavior +----------------------------- +The server and the client can disconnect from each other, either intentionally +or unexpectedly. Both the client and the server need to know how to handle such +events. + +Server Disconnection +^^^^^^^^^^^^^^^^^^^^ +A server disconnecting from the client may indicate that: + +1) A virtual device has been restarted, either intentionally (e.g. because of a + device update) or unintentionally (e.g. because of a crash). +2) A virtual device has been shut down with no intention to be restarted. + +It is impossible for the client to know whether or not a failure is +intermittent or innocuous and should be retried, therefore the client should +reset the VFIO device when it detects the socket has been disconnected. +Error recovery will be driven by the guest's device error handling +behavior. + +Client Disconnection +^^^^^^^^^^^^^^^^^^^^ +The client disconnecting from the server primarily means that the client +has exited. Currently, this means that the guest is shut down so the device is +no longer needed therefore the server can automatically exit. However, there +can be cases where a client disconnection should not result in a server exit: + +1) A single server serving multiple clients. +2) A multi-process QEMU upgrading itself step by step, which is not yet + implemented. + +Therefore in order for the protocol to be forward compatible, the server should +respond to a client disconnection as follows: + + - all client memory regions are unmapped and cleaned up (including closing any + passed file descriptors) + - all IRQ file descriptors passed from the old client are closed + - the device state should otherwise be retained + +The expectation is that when a client reconnects, it will re-establish IRQ and +client memory mappings. + +If anything happens to the client (such as qemu really did exit), the control +stack will know about it and can clean up resources accordingly. + +Security Considerations +----------------------- + +Speaking generally, vfio-user clients should not trust servers, and vice versa. +Standard tools and mechanisms should be used on both sides to validate input and +prevent against denial of service scenarios, buffer overflow, etc. + +Request Retry and Response Timeout +---------------------------------- +A failed command is a command that has been successfully sent and has been +responded to with an error code. Failure to send the command in the first place +(e.g. because the socket is disconnected) is a different type of error examined +earlier in the disconnect section. + +.. Note:: + QEMU's VFIO retries certain operations if they fail. While this makes sense + for real HW, we don't know for sure whether it makes sense for virtual + devices. + +Defining a retry and timeout scheme is deferred to a future version of the +protocol. + +Message sizes +------------- + +Some requests have an ``argsz`` field. In a request, it defines the maximum +expected reply payload size, which should be at least the size of the fixed +reply payload headers defined here. The *request* payload size is defined by the +usual ``msg_size`` field in the header, not the ``argsz`` field. + +In a reply, the server sets ``argsz`` field to the size needed for a full +payload size. This may be less than the requested maximum size. This may be +larger than the requested maximum size: in that case, the full payload is not +included in the reply, but the ``argsz`` field in the reply indicates the needed +size, allowing a client to allocate a larger buffer for holding the reply before +trying again. + +In addition, during negotiation (see `Version`_), the client and server may +each specify a ``max_data_xfer_size`` value; this defines the maximum data that +may be read or written via one of the ``VFIO_USER_DMA/REGION_READ/WRITE`` +messages; see `Read and Write Operations`_. + +Protocol Specification +====================== + +To distinguish from the base VFIO symbols, all vfio-user symbols are prefixed +with ``vfio_user`` or ``VFIO_USER``. In this revision, all data is in the +endianness of the host system, although this may be relaxed in future +revisions in cases where the client and server run on different hosts +with different endianness. + +Unless otherwise specified, all sizes should be presumed to be in bytes. + +.. _Commands: + +Commands +-------- +The following table lists the VFIO message command IDs, and whether the +message command is sent from the client or the server. + +====================================== ========= ================= +Name Command Request Direction +====================================== ========= ================= +``VFIO_USER_VERSION`` 1 client -> server +``VFIO_USER_DMA_MAP`` 2 client -> server +``VFIO_USER_DMA_UNMAP`` 3 client -> server +``VFIO_USER_DEVICE_GET_INFO`` 4 client -> server +``VFIO_USER_DEVICE_GET_REGION_INFO`` 5 client -> server +``VFIO_USER_DEVICE_GET_REGION_IO_FDS`` 6 client -> server +``VFIO_USER_DEVICE_GET_IRQ_INFO`` 7 client -> server +``VFIO_USER_DEVICE_SET_IRQS`` 8 client -> server +``VFIO_USER_REGION_READ`` 9 client -> server +``VFIO_USER_REGION_WRITE`` 10 client -> server +``VFIO_USER_DMA_READ`` 11 server -> client +``VFIO_USER_DMA_WRITE`` 12 server -> client +``VFIO_USER_DEVICE_RESET`` 13 client -> server +``VFIO_USER_REGION_WRITE_MULTI`` 15 client -> server +====================================== ========= ================= + +Header +------ + +All messages, both command messages and reply messages, are preceded by a +16-byte header that contains basic information about the message. The header is +followed by message-specific data described in the sections below. + ++----------------+--------+-------------+ +| Name | Offset | Size | ++================+========+=============+ +| Message ID | 0 | 2 | ++----------------+--------+-------------+ +| Command | 2 | 2 | ++----------------+--------+-------------+ +| Message size | 4 | 4 | ++----------------+--------+-------------+ +| Flags | 8 | 4 | ++----------------+--------+-------------+ +| | +-----+------------+ | +| | | Bit | Definition | | +| | +=====+============+ | +| | | 0-3 | Type | | +| | +-----+------------+ | +| | | 4 | No_reply | | +| | +-----+------------+ | +| | | 5 | Error | | +| | +-----+------------+ | ++----------------+--------+-------------+ +| Error | 12 | 4 | ++----------------+--------+-------------+ +| | 16 | variable | ++----------------+--------+-------------+ + +* *Message ID* identifies the message, and is echoed in the command's reply + message. Message IDs belong entirely to the sender, can be re-used (even + concurrently) and the receiver must not make any assumptions about their + uniqueness. +* *Command* specifies the command to be executed, listed in Commands_. It is + also set in the reply header. +* *Message size* contains the size of the entire message, including the header. +* *Flags* contains attributes of the message: + + * The *Type* bits indicate the message type. + + * *Command* (value 0x0) indicates a command message. + * *Reply* (value 0x1) indicates a reply message acknowledging a previous + command with the same message ID. + * *No_reply* in a command message indicates that no reply is needed for this + command. This is commonly used when multiple commands are sent, and only + the last needs acknowledgement. + * *Error* in a reply message indicates the command being acknowledged had + an error. In this case, the *Error* field will be valid. + +* *Error* in a reply message is an optional UNIX errno value. It may be zero + even if the Error bit is set in Flags. It is reserved in a command message. + +Each command message in Commands_ must be replied to with a reply message, +unless the message sets the *No_Reply* bit. The reply consists of the header +with the *Reply* bit set, plus any additional data. + +If an error occurs, the reply message must only include the reply header. + +As the header is standard in both requests and replies, it is not included in +the command-specific specifications below; each message definition should be +appended to the standard header, and the offsets are given from the end of the +standard header. + +``VFIO_USER_VERSION`` +--------------------- + +.. _Version: + +This is the initial message sent by the client after the socket connection is +established; the same format is used for the server's reply. + +Upon establishing a connection, the client must send a ``VFIO_USER_VERSION`` +message proposing a protocol version and a set of capabilities. The server +compares these with the versions and capabilities it supports and sends a +``VFIO_USER_VERSION`` reply according to the following rules. + +* The major version in the reply must be the same as proposed. If the client + does not support the proposed major, it closes the connection. +* The minor version in the reply must be equal to or less than the minor + version proposed. +* The capability list must be a subset of those proposed. If the server + requires a capability the client did not include, it closes the connection. + +The protocol major version will only change when incompatible protocol changes +are made, such as changing the message format. The minor version may change +when compatible changes are made, such as adding new messages or capabilities, +Both the client and server must support all minor versions less than the +maximum minor version it supports. E.g., an implementation that supports +version 1.3 must also support 1.0 through 1.2. + +When making a change to this specification, the protocol version number must +be included in the form "added in version X.Y" + +Request +^^^^^^^ + +============== ====== ==== +Name Offset Size +============== ====== ==== +version major 0 2 +version minor 2 2 +version data 4 variable (including terminating NUL). Optional. +============== ====== ==== + +The version data is an optional UTF-8 encoded JSON byte array with the following +format: + ++--------------+--------+-----------------------------------+ +| Name | Type | Description | ++==============+========+===================================+ +| capabilities | object | Contains common capabilities that | +| | | the sender supports. Optional. | ++--------------+--------+-----------------------------------+ + +Capabilities: + ++--------------------+---------+------------------------------------------------+ +| Name | Type | Description | ++====================+=========+================================================+ +| max_msg_fds | number | Maximum number of file descriptors that can be | +| | | received by the sender in one message. | +| | | Optional. If not specified then the receiver | +| | | must assume a value of ``1``. | ++--------------------+---------+------------------------------------------------+ +| max_data_xfer_size | number | Maximum ``count`` for data transfer messages; | +| | | see `Read and Write Operations`_. Optional, | +| | | with a default value of 1048576 bytes. | ++--------------------+---------+------------------------------------------------+ +| pgsizes | number | Page sizes supported in DMA map operations | +| | | or'ed together. Optional, with a default value | +| | | of supporting only 4k pages. | ++--------------------+---------+------------------------------------------------+ +| max_dma_maps | number | Maximum number DMA map windows that can be | +| | | valid simultaneously. Optional, with a | +| | | value of 65535 (64k-1). | ++--------------------+---------+------------------------------------------------+ +| migration | object | Migration capability parameters. If missing | +| | | then migration is not supported by the sender. | ++--------------------+---------+------------------------------------------------+ +| write_multiple | boolean | ``VFIO_USER_REGION_WRITE_MULTI`` messages | +| | | are supported if the value is ``true``. | ++--------------------+---------+------------------------------------------------+ + +The migration capability contains the following name/value pairs: + ++-----------------+--------+--------------------------------------------------+ +| Name | Type | Description | ++=================+========+==================================================+ +| pgsize | number | Page size of dirty pages bitmap. The smallest | +| | | between the client and the server is used. | ++-----------------+--------+--------------------------------------------------+ +| max_bitmap_size | number | Maximum bitmap size in ``VFIO_USER_DIRTY_PAGES`` | +| | | and ``VFIO_DMA_UNMAP`` messages. Optional, | +| | | with a default value of 256MB. | ++-----------------+--------+--------------------------------------------------+ + +Reply +^^^^^ + +The same message format is used in the server's reply with the semantics +described above. + +``VFIO_USER_DMA_MAP`` +--------------------- + +This command message is sent by the client to the server to inform it of the +memory regions the server can access. It must be sent before the server can +perform any DMA to the client. It is normally sent directly after the version +handshake is completed, but may also occur when memory is added to the client, +or if the client uses a vIOMMU. + +Request +^^^^^^^ + +The request payload for this message is a structure of the following format: + ++-------------+--------+-------------+ +| Name | Offset | Size | ++=============+========+=============+ +| argsz | 0 | 4 | ++-------------+--------+-------------+ +| flags | 4 | 4 | ++-------------+--------+-------------+ +| | +-----+------------+ | +| | | Bit | Definition | | +| | +=====+============+ | +| | | 0 | readable | | +| | +-----+------------+ | +| | | 1 | writeable | | +| | +-----+------------+ | ++-------------+--------+-------------+ +| offset | 8 | 8 | ++-------------+--------+-------------+ +| address | 16 | 8 | ++-------------+--------+-------------+ +| size | 24 | 8 | ++-------------+--------+-------------+ + +* *argsz* is the size of the above structure. Note there is no reply payload, + so this field differs from other message types. +* *flags* contains the following region attributes: + + * *readable* indicates that the region can be read from. + + * *writeable* indicates that the region can be written to. + +* *offset* is the file offset of the region with respect to the associated file + descriptor, or zero if the region is not mappable +* *address* is the base DMA address of the region. +* *size* is the size of the region. + +This structure is 32 bytes in size, so the message size is 16 + 32 bytes. + +If the DMA region being added can be directly mapped by the server, a file +descriptor must be sent as part of the message meta-data. The region can be +mapped via the mmap() system call. On ``AF_UNIX`` sockets, the file descriptor +must be passed as ``SCM_RIGHTS`` type ancillary data. Otherwise, if the DMA +region cannot be directly mapped by the server, no file descriptor must be sent +as part of the message meta-data and the DMA region can be accessed by the +server using ``VFIO_USER_DMA_READ`` and ``VFIO_USER_DMA_WRITE`` messages, +explained in `Read and Write Operations`_. A command to map over an existing +region must be failed by the server with ``EEXIST`` set in error field in the +reply. + +Reply +^^^^^ + +There is no payload in the reply message. + +``VFIO_USER_DMA_UNMAP`` +----------------------- + +This command message is sent by the client to the server to inform it that a +DMA region, previously made available via a ``VFIO_USER_DMA_MAP`` command +message, is no longer available for DMA. It typically occurs when memory is +subtracted from the client or if the client uses a vIOMMU. The DMA region is +described by the following structure: + +Request +^^^^^^^ + +The request payload for this message is a structure of the following format: + ++--------------+--------+------------------------+ +| Name | Offset | Size | ++==============+========+========================+ +| argsz | 0 | 4 | ++--------------+--------+------------------------+ +| flags | 4 | 4 | ++--------------+--------+------------------------+ +| address | 8 | 8 | ++--------------+--------+------------------------+ +| size | 16 | 8 | ++--------------+--------+------------------------+ + +* *argsz* is the maximum size of the reply payload. +* *flags* is unused in this version. +* *address* is the base DMA address of the DMA region. +* *size* is the size of the DMA region. + +The address and size of the DMA region being unmapped must match exactly a +previous mapping. + +Reply +^^^^^ + +Upon receiving a ``VFIO_USER_DMA_UNMAP`` command, if the file descriptor is +mapped then the server must release all references to that DMA region before +replying, which potentially includes in-flight DMA transactions. + +The server responds with the original DMA entry in the request. + + +``VFIO_USER_DEVICE_GET_INFO`` +----------------------------- + +This command message is sent by the client to the server to query for basic +information about the device. + +Request +^^^^^^^ + ++-------------+--------+--------------------------+ +| Name | Offset | Size | ++=============+========+==========================+ +| argsz | 0 | 4 | ++-------------+--------+--------------------------+ +| flags | 4 | 4 | ++-------------+--------+--------------------------+ +| | +-----+-------------------------+ | +| | | Bit | Definition | | +| | +=====+=========================+ | +| | | 0 | VFIO_DEVICE_FLAGS_RESET | | +| | +-----+-------------------------+ | +| | | 1 | VFIO_DEVICE_FLAGS_PCI | | +| | +-----+-------------------------+ | ++-------------+--------+--------------------------+ +| num_regions | 8 | 4 | ++-------------+--------+--------------------------+ +| num_irqs | 12 | 4 | ++-------------+--------+--------------------------+ + +* *argsz* is the maximum size of the reply payload +* all other fields must be zero. + +Reply +^^^^^ + ++-------------+--------+--------------------------+ +| Name | Offset | Size | ++=============+========+==========================+ +| argsz | 0 | 4 | ++-------------+--------+--------------------------+ +| flags | 4 | 4 | ++-------------+--------+--------------------------+ +| | +-----+-------------------------+ | +| | | Bit | Definition | | +| | +=====+=========================+ | +| | | 0 | VFIO_DEVICE_FLAGS_RESET | | +| | +-----+-------------------------+ | +| | | 1 | VFIO_DEVICE_FLAGS_PCI | | +| | +-----+-------------------------+ | ++-------------+--------+--------------------------+ +| num_regions | 8 | 4 | ++-------------+--------+--------------------------+ +| num_irqs | 12 | 4 | ++-------------+--------+--------------------------+ + +* *argsz* is the size required for the full reply payload (16 bytes today) +* *flags* contains the following device attributes. + + * ``VFIO_DEVICE_FLAGS_RESET`` indicates that the device supports the + ``VFIO_USER_DEVICE_RESET`` message. + * ``VFIO_DEVICE_FLAGS_PCI`` indicates that the device is a PCI device. + +* *num_regions* is the number of memory regions that the device exposes. +* *num_irqs* is the number of distinct interrupt types that the device supports. + +This version of the protocol only supports PCI devices. Additional devices may +be supported in future versions. + +``VFIO_USER_DEVICE_GET_REGION_INFO`` +------------------------------------ + +This command message is sent by the client to the server to query for +information about device regions. The VFIO region info structure is defined in +```` (``struct vfio_region_info``). + +Request +^^^^^^^ + ++------------+--------+------------------------------+ +| Name | Offset | Size | ++============+========+==============================+ +| argsz | 0 | 4 | ++------------+--------+------------------------------+ +| flags | 4 | 4 | ++------------+--------+------------------------------+ +| index | 8 | 4 | ++------------+--------+------------------------------+ +| cap_offset | 12 | 4 | ++------------+--------+------------------------------+ +| size | 16 | 8 | ++------------+--------+------------------------------+ +| offset | 24 | 8 | ++------------+--------+------------------------------+ + +* *argsz* the maximum size of the reply payload +* *index* is the index of memory region being queried, it is the only field + that is required to be set in the command message. +* all other fields must be zero. + +Reply +^^^^^ + ++------------+--------+------------------------------+ +| Name | Offset | Size | ++============+========+==============================+ +| argsz | 0 | 4 | ++------------+--------+------------------------------+ +| flags | 4 | 4 | ++------------+--------+------------------------------+ +| | +-----+-----------------------------+ | +| | | Bit | Definition | | +| | +=====+=============================+ | +| | | 0 | VFIO_REGION_INFO_FLAG_READ | | +| | +-----+-----------------------------+ | +| | | 1 | VFIO_REGION_INFO_FLAG_WRITE | | +| | +-----+-----------------------------+ | +| | | 2 | VFIO_REGION_INFO_FLAG_MMAP | | +| | +-----+-----------------------------+ | +| | | 3 | VFIO_REGION_INFO_FLAG_CAPS | | +| | +-----+-----------------------------+ | ++------------+--------+------------------------------+ ++------------+--------+------------------------------+ +| index | 8 | 4 | ++------------+--------+------------------------------+ +| cap_offset | 12 | 4 | ++------------+--------+------------------------------+ +| size | 16 | 8 | ++------------+--------+------------------------------+ +| offset | 24 | 8 | ++------------+--------+------------------------------+ + +* *argsz* is the size required for the full reply payload (region info structure + plus the size of any region capabilities) +* *flags* are attributes of the region: + + * ``VFIO_REGION_INFO_FLAG_READ`` allows client read access to the region. + * ``VFIO_REGION_INFO_FLAG_WRITE`` allows client write access to the region. + * ``VFIO_REGION_INFO_FLAG_MMAP`` specifies the client can mmap() the region. + When this flag is set, the reply will include a file descriptor in its + meta-data. On ``AF_UNIX`` sockets, the file descriptors will be passed as + ``SCM_RIGHTS`` type ancillary data. + * ``VFIO_REGION_INFO_FLAG_CAPS`` indicates additional capabilities found in the + reply. + +* *index* is the index of memory region being queried, it is the only field + that is required to be set in the command message. +* *cap_offset* describes where additional region capabilities can be found. + cap_offset is relative to the beginning of the VFIO region info structure. + The data structure it points is a VFIO cap header defined in + ````. +* *size* is the size of the region. +* *offset* is the offset that should be given to the mmap() system call for + regions with the MMAP attribute. It is also used as the base offset when + mapping a VFIO sparse mmap area, described below. + +VFIO region capabilities +"""""""""""""""""""""""" + +The VFIO region information can also include a capabilities list. This list is +similar to a PCI capability list - each entry has a common header that +identifies a capability and where the next capability in the list can be found. +The VFIO capability header format is defined in ```` (``struct +vfio_info_cap_header``). + +VFIO cap header format +"""""""""""""""""""""" + ++---------+--------+------+ +| Name | Offset | Size | ++=========+========+======+ +| id | 0 | 2 | ++---------+--------+------+ +| version | 2 | 2 | ++---------+--------+------+ +| next | 4 | 4 | ++---------+--------+------+ + +* *id* is the capability identity. +* *version* is a capability-specific version number. +* *next* specifies the offset of the next capability in the capability list. It + is relative to the beginning of the VFIO region info structure. + +VFIO sparse mmap cap header +""""""""""""""""""""""""""" + ++------------------+----------------------------------+ +| Name | Value | ++==================+==================================+ +| id | VFIO_REGION_INFO_CAP_SPARSE_MMAP | ++------------------+----------------------------------+ +| version | 0x1 | ++------------------+----------------------------------+ +| next | | ++------------------+----------------------------------+ +| sparse mmap info | VFIO region info sparse mmap | ++------------------+----------------------------------+ + +This capability is defined when only a subrange of the region supports +direct access by the client via mmap(). The VFIO sparse mmap area is defined in +```` (``struct vfio_region_sparse_mmap_area`` and ``struct +vfio_region_info_cap_sparse_mmap``). + +VFIO region info cap sparse mmap +"""""""""""""""""""""""""""""""" + ++----------+--------+------+ +| Name | Offset | Size | ++==========+========+======+ +| nr_areas | 0 | 4 | ++----------+--------+------+ +| reserved | 4 | 4 | ++----------+--------+------+ +| offset | 8 | 8 | ++----------+--------+------+ +| size | 16 | 8 | ++----------+--------+------+ +| ... | | | ++----------+--------+------+ + +* *nr_areas* is the number of sparse mmap areas in the region. +* *offset* and size describe a single area that can be mapped by the client. + There will be *nr_areas* pairs of offset and size. The offset will be added to + the base offset given in the ``VFIO_USER_DEVICE_GET_REGION_INFO`` to form the + offset argument of the subsequent mmap() call. + +The VFIO sparse mmap area is defined in ```` (``struct +vfio_region_info_cap_sparse_mmap``). + + +``VFIO_USER_DEVICE_GET_REGION_IO_FDS`` +-------------------------------------- + +Clients can access regions via ``VFIO_USER_REGION_READ/WRITE`` or, if provided, by +``mmap()`` of a file descriptor provided by the server. + +``VFIO_USER_DEVICE_GET_REGION_IO_FDS`` provides an alternative access mechanism via +file descriptors. This is an optional feature intended for performance +improvements where an underlying sub-system (such as KVM) supports communication +across such file descriptors to the vfio-user server, without needing to +round-trip through the client. + +The server returns an array of sub-regions for the requested region. Each +sub-region describes a span (offset and size) of a region, along with the +requested file descriptor notification mechanism to use. Each sub-region in the +response message may choose to use a different method, as defined below. The +two mechanisms supported in this specification are ioeventfds and ioregionfds. + +The server in addition returns a file descriptor in the ancillary data; clients +are expected to configure each sub-region's file descriptor with the requested +notification method. For example, a client could configure KVM with the +requested ioeventfd via a ``KVM_IOEVENTFD`` ``ioctl()``. + +Request +^^^^^^^ + ++-------------+--------+------+ +| Name | Offset | Size | ++=============+========+======+ +| argsz | 0 | 4 | ++-------------+--------+------+ +| flags | 4 | 4 | ++-------------+--------+------+ +| index | 8 | 4 | ++-------------+--------+------+ +| count | 12 | 4 | ++-------------+--------+------+ + +* *argsz* the maximum size of the reply payload +* *index* is the index of memory region being queried +* all other fields must be zero + +The client must set ``flags`` to zero and specify the region being queried in +the ``index``. + +Reply +^^^^^ + ++-------------+--------+------+ +| Name | Offset | Size | ++=============+========+======+ +| argsz | 0 | 4 | ++-------------+--------+------+ +| flags | 4 | 4 | ++-------------+--------+------+ +| index | 8 | 4 | ++-------------+--------+------+ +| count | 12 | 4 | ++-------------+--------+------+ +| sub-regions | 16 | ... | ++-------------+--------+------+ + +* *argsz* is the size of the region IO FD info structure plus the + total size of the sub-region array. Thus, each array entry "i" is at offset + i * ((argsz - 32) / count). Note that currently this is 40 bytes for both IO + FD types, but this is not to be relied on. As elsewhere, this indicates the + full reply payload size needed. +* *flags* must be zero +* *index* is the index of memory region being queried +* *count* is the number of sub-regions in the array +* *sub-regions* is the array of Sub-Region IO FD info structures + +The reply message will additionally include at least one file descriptor in the +ancillary data. Note that more than one sub-region may share the same file +descriptor. + +Note that it is the client's responsibility to verify the requested values (for +example, that the requested offset does not exceed the region's bounds). + +Each sub-region given in the response has one of two possible structures, +depending whether *type* is ``VFIO_USER_IO_FD_TYPE_IOEVENTFD`` or +``VFIO_USER_IO_FD_TYPE_IOREGIONFD``: + +Sub-Region IO FD info format (ioeventfd) +"""""""""""""""""""""""""""""""""""""""" + ++-----------+--------+------+ +| Name | Offset | Size | ++===========+========+======+ +| offset | 0 | 8 | ++-----------+--------+------+ +| size | 8 | 8 | ++-----------+--------+------+ +| fd_index | 16 | 4 | ++-----------+--------+------+ +| type | 20 | 4 | ++-----------+--------+------+ +| flags | 24 | 4 | ++-----------+--------+------+ +| padding | 28 | 4 | ++-----------+--------+------+ +| datamatch | 32 | 8 | ++-----------+--------+------+ + +* *offset* is the offset of the start of the sub-region within the region + requested ("physical address offset" for the region) +* *size* is the length of the sub-region. This may be zero if the access size is + not relevant, which may allow for optimizations +* *fd_index* is the index in the ancillary data of the FD to use for ioeventfd + notification; it may be shared. +* *type* is ``VFIO_USER_IO_FD_TYPE_IOEVENTFD`` +* *flags* is any of: + + * ``KVM_IOEVENTFD_FLAG_DATAMATCH`` + * ``KVM_IOEVENTFD_FLAG_PIO`` + * ``KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY`` (FIXME: makes sense?) + +* *datamatch* is the datamatch value if needed + +See https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt, *4.59 +KVM_IOEVENTFD* for further context on the ioeventfd-specific fields. + +Sub-Region IO FD info format (ioregionfd) +""""""""""""""""""""""""""""""""""""""""" + ++-----------+--------+------+ +| Name | Offset | Size | ++===========+========+======+ +| offset | 0 | 8 | ++-----------+--------+------+ +| size | 8 | 8 | ++-----------+--------+------+ +| fd_index | 16 | 4 | ++-----------+--------+------+ +| type | 20 | 4 | ++-----------+--------+------+ +| flags | 24 | 4 | ++-----------+--------+------+ +| padding | 28 | 4 | ++-----------+--------+------+ +| user_data | 32 | 8 | ++-----------+--------+------+ + +* *offset* is the offset of the start of the sub-region within the region + requested ("physical address offset" for the region) +* *size* is the length of the sub-region. This may be zero if the access size is + not relevant, which may allow for optimizations; ``KVM_IOREGION_POSTED_WRITES`` + must be set in *flags* in this case +* *fd_index* is the index in the ancillary data of the FD to use for ioregionfd + messages; it may be shared +* *type* is ``VFIO_USER_IO_FD_TYPE_IOREGIONFD`` +* *flags* is any of: + + * ``KVM_IOREGION_PIO`` + * ``KVM_IOREGION_POSTED_WRITES`` + +* *user_data* is an opaque value passed back to the server via a message on the + file descriptor + +For further information on the ioregionfd-specific fields, see: +https://lore.kernel.org/kvm/cover.1613828726.git.eafanasova@gmail.com/ + +(FIXME: update with final API docs.) + +``VFIO_USER_DEVICE_GET_IRQ_INFO`` +--------------------------------- + +This command message is sent by the client to the server to query for +information about device interrupt types. The VFIO IRQ info structure is +defined in ```` (``struct vfio_irq_info``). + +Request +^^^^^^^ + ++-------+--------+---------------------------+ +| Name | Offset | Size | ++=======+========+===========================+ +| argsz | 0 | 4 | ++-------+--------+---------------------------+ +| flags | 4 | 4 | ++-------+--------+---------------------------+ +| | +-----+--------------------------+ | +| | | Bit | Definition | | +| | +=====+==========================+ | +| | | 0 | VFIO_IRQ_INFO_EVENTFD | | +| | +-----+--------------------------+ | +| | | 1 | VFIO_IRQ_INFO_MASKABLE | | +| | +-----+--------------------------+ | +| | | 2 | VFIO_IRQ_INFO_AUTOMASKED | | +| | +-----+--------------------------+ | +| | | 3 | VFIO_IRQ_INFO_NORESIZE | | +| | +-----+--------------------------+ | ++-------+--------+---------------------------+ +| index | 8 | 4 | ++-------+--------+---------------------------+ +| count | 12 | 4 | ++-------+--------+---------------------------+ + +* *argsz* is the maximum size of the reply payload (16 bytes today) +* index is the index of IRQ type being queried (e.g. ``VFIO_PCI_MSIX_IRQ_INDEX``) +* all other fields must be zero + +Reply +^^^^^ + ++-------+--------+---------------------------+ +| Name | Offset | Size | ++=======+========+===========================+ +| argsz | 0 | 4 | ++-------+--------+---------------------------+ +| flags | 4 | 4 | ++-------+--------+---------------------------+ +| | +-----+--------------------------+ | +| | | Bit | Definition | | +| | +=====+==========================+ | +| | | 0 | VFIO_IRQ_INFO_EVENTFD | | +| | +-----+--------------------------+ | +| | | 1 | VFIO_IRQ_INFO_MASKABLE | | +| | +-----+--------------------------+ | +| | | 2 | VFIO_IRQ_INFO_AUTOMASKED | | +| | +-----+--------------------------+ | +| | | 3 | VFIO_IRQ_INFO_NORESIZE | | +| | +-----+--------------------------+ | ++-------+--------+---------------------------+ +| index | 8 | 4 | ++-------+--------+---------------------------+ +| count | 12 | 4 | ++-------+--------+---------------------------+ + +* *argsz* is the size required for the full reply payload (16 bytes today) +* *flags* defines IRQ attributes: + + * ``VFIO_IRQ_INFO_EVENTFD`` indicates the IRQ type can support server eventfd + signalling. + * ``VFIO_IRQ_INFO_MASKABLE`` indicates that the IRQ type supports the ``MASK`` + and ``UNMASK`` actions in a ``VFIO_USER_DEVICE_SET_IRQS`` message. + * ``VFIO_IRQ_INFO_AUTOMASKED`` indicates the IRQ type masks itself after being + triggered, and the client must send an ``UNMASK`` action to receive new + interrupts. + * ``VFIO_IRQ_INFO_NORESIZE`` indicates ``VFIO_USER_SET_IRQS`` operations setup + interrupts as a set, and new sub-indexes cannot be enabled without disabling + the entire type. +* index is the index of IRQ type being queried +* count describes the number of interrupts of the queried type. + +``VFIO_USER_DEVICE_SET_IRQS`` +----------------------------- + +This command message is sent by the client to the server to set actions for +device interrupt types. The VFIO IRQ set structure is defined in +```` (``struct vfio_irq_set``). + +Request +^^^^^^^ + ++-------+--------+------------------------------+ +| Name | Offset | Size | ++=======+========+==============================+ +| argsz | 0 | 4 | ++-------+--------+------------------------------+ +| flags | 4 | 4 | ++-------+--------+------------------------------+ +| | +-----+-----------------------------+ | +| | | Bit | Definition | | +| | +=====+=============================+ | +| | | 0 | VFIO_IRQ_SET_DATA_NONE | | +| | +-----+-----------------------------+ | +| | | 1 | VFIO_IRQ_SET_DATA_BOOL | | +| | +-----+-----------------------------+ | +| | | 2 | VFIO_IRQ_SET_DATA_EVENTFD | | +| | +-----+-----------------------------+ | +| | | 3 | VFIO_IRQ_SET_ACTION_MASK | | +| | +-----+-----------------------------+ | +| | | 4 | VFIO_IRQ_SET_ACTION_UNMASK | | +| | +-----+-----------------------------+ | +| | | 5 | VFIO_IRQ_SET_ACTION_TRIGGER | | +| | +-----+-----------------------------+ | ++-------+--------+------------------------------+ +| index | 8 | 4 | ++-------+--------+------------------------------+ +| start | 12 | 4 | ++-------+--------+------------------------------+ +| count | 16 | 4 | ++-------+--------+------------------------------+ +| data | 20 | variable | ++-------+--------+------------------------------+ + +* *argsz* is the size of the VFIO IRQ set request payload, including any *data* + field. Note there is no reply payload, so this field differs from other + message types. +* *flags* defines the action performed on the interrupt range. The ``DATA`` + flags describe the data field sent in the message; the ``ACTION`` flags + describe the action to be performed. The flags are mutually exclusive for + both sets. + + * ``VFIO_IRQ_SET_DATA_NONE`` indicates there is no data field in the command. + The action is performed unconditionally. + * ``VFIO_IRQ_SET_DATA_BOOL`` indicates the data field is an array of boolean + bytes. The action is performed if the corresponding boolean is true. + * ``VFIO_IRQ_SET_DATA_EVENTFD`` indicates an array of event file descriptors + was sent in the message meta-data. These descriptors will be signalled when + the action defined by the action flags occurs. In ``AF_UNIX`` sockets, the + descriptors are sent as ``SCM_RIGHTS`` type ancillary data. + If no file descriptors are provided, this de-assigns the specified + previously configured interrupts. + * ``VFIO_IRQ_SET_ACTION_MASK`` indicates a masking event. It can be used with + ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to mask an interrupt, + or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the guest masks + the interrupt. + * ``VFIO_IRQ_SET_ACTION_UNMASK`` indicates an unmasking event. It can be used + with ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to unmask an + interrupt, or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the + guest unmasks the interrupt. + * ``VFIO_IRQ_SET_ACTION_TRIGGER`` indicates a triggering event. It can be used + with ``VFIO_IRQ_SET_DATA_BOOL`` or ``VFIO_IRQ_SET_DATA_NONE`` to trigger an + interrupt, or with ``VFIO_IRQ_SET_DATA_EVENTFD`` to generate an event when the + server triggers the interrupt. + +* *index* is the index of IRQ type being setup. +* *start* is the start of the sub-index being set. +* *count* describes the number of sub-indexes being set. As a special case, a + count (and start) of 0, with data flags of ``VFIO_IRQ_SET_DATA_NONE`` disables + all interrupts of the index. +* *data* is an optional field included when the + ``VFIO_IRQ_SET_DATA_BOOL`` flag is present. It contains an array of booleans + that specify whether the action is to be performed on the corresponding + index. It's used when the action is only performed on a subset of the range + specified. + +Not all interrupt types support every combination of data and action flags. +The client must know the capabilities of the device and IRQ index before it +sends a ``VFIO_USER_DEVICE_SET_IRQ`` message. + +In typical operation, a specific IRQ may operate as follows: + +1. The client sends a ``VFIO_USER_DEVICE_SET_IRQ`` message with + ``flags=(VFIO_IRQ_SET_DATA_EVENTFD|VFIO_IRQ_SET_ACTION_TRIGGER)`` along + with an eventfd. This associates the IRQ with a particular eventfd on the + server side. + +#. The client may send a ``VFIO_USER_DEVICE_SET_IRQ`` message with + ``flags=(VFIO_IRQ_SET_DATA_EVENTFD|VFIO_IRQ_SET_ACTION_MASK/UNMASK)`` along + with another eventfd. This associates the given eventfd with the + mask/unmask state on the server side. + +#. The server may trigger the IRQ by writing 1 to the eventfd. + +#. The server may mask/unmask an IRQ which will write 1 to the corresponding + mask/unmask eventfd, if there is one. + +5. A client may trigger a device IRQ itself, by sending a + ``VFIO_USER_DEVICE_SET_IRQ`` message with + ``flags=(VFIO_IRQ_SET_DATA_NONE/BOOL|VFIO_IRQ_SET_ACTION_TRIGGER)``. + +6. A client may mask or unmask the IRQ, by sending a + ``VFIO_USER_DEVICE_SET_IRQ`` message with + ``flags=(VFIO_IRQ_SET_DATA_NONE/BOOL|VFIO_IRQ_SET_ACTION_MASK/UNMASK)``. + +Reply +^^^^^ + +There is no payload in the reply. + +.. _Read and Write Operations: + +Note that all of these operations must be supported by the client and/or server, +even if the corresponding memory or device region has been shared as mappable. + +The ``count`` field must not exceed the value of ``max_data_xfer_size`` of the +peer, for both reads and writes. + +``VFIO_USER_REGION_READ`` +------------------------- + +If a device region is not mappable, it's not directly accessible by the client +via ``mmap()`` of the underlying file descriptor. In this case, a client can +read from a device region with this message. + +Request +^^^^^^^ + ++--------+--------+----------+ +| Name | Offset | Size | ++========+========+==========+ +| offset | 0 | 8 | ++--------+--------+----------+ +| region | 8 | 4 | ++--------+--------+----------+ +| count | 12 | 4 | ++--------+--------+----------+ + +* *offset* into the region being accessed. +* *region* is the index of the region being accessed. +* *count* is the size of the data to be transferred. + +Reply +^^^^^ + ++--------+--------+----------+ +| Name | Offset | Size | ++========+========+==========+ +| offset | 0 | 8 | ++--------+--------+----------+ +| region | 8 | 4 | ++--------+--------+----------+ +| count | 12 | 4 | ++--------+--------+----------+ +| data | 16 | variable | ++--------+--------+----------+ + +* *offset* into the region accessed. +* *region* is the index of the region accessed. +* *count* is the size of the data transferred. +* *data* is the data that was read from the device region. + +``VFIO_USER_REGION_WRITE`` +-------------------------- + +If a device region is not mappable, it's not directly accessible by the client +via mmap() of the underlying fd. In this case, a client can write to a device +region with this message. + +Request +^^^^^^^ + ++--------+--------+----------+ +| Name | Offset | Size | ++========+========+==========+ +| offset | 0 | 8 | ++--------+--------+----------+ +| region | 8 | 4 | ++--------+--------+----------+ +| count | 12 | 4 | ++--------+--------+----------+ +| data | 16 | variable | ++--------+--------+----------+ + +* *offset* into the region being accessed. +* *region* is the index of the region being accessed. +* *count* is the size of the data to be transferred. +* *data* is the data to write + +Reply +^^^^^ + ++--------+--------+----------+ +| Name | Offset | Size | ++========+========+==========+ +| offset | 0 | 8 | ++--------+--------+----------+ +| region | 8 | 4 | ++--------+--------+----------+ +| count | 12 | 4 | ++--------+--------+----------+ + +* *offset* into the region accessed. +* *region* is the index of the region accessed. +* *count* is the size of the data transferred. + +``VFIO_USER_DMA_READ`` +----------------------- + +If the client has not shared mappable memory, the server can use this message to +read from guest memory. + +Request +^^^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| address | 0 | 8 | ++---------+--------+----------+ +| count | 8 | 8 | ++---------+--------+----------+ + +* *address* is the client DMA memory address being accessed. This address must have + been previously exported to the server with a ``VFIO_USER_DMA_MAP`` message. +* *count* is the size of the data to be transferred. + +Reply +^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| address | 0 | 8 | ++---------+--------+----------+ +| count | 8 | 8 | ++---------+--------+----------+ +| data | 16 | variable | ++---------+--------+----------+ + +* *address* is the client DMA memory address being accessed. +* *count* is the size of the data transferred. +* *data* is the data read. + +``VFIO_USER_DMA_WRITE`` +----------------------- + +If the client has not shared mappable memory, the server can use this message to +write to guest memory. + +Request +^^^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| address | 0 | 8 | ++---------+--------+----------+ +| count | 8 | 8 | ++---------+--------+----------+ +| data | 16 | variable | ++---------+--------+----------+ + +* *address* is the client DMA memory address being accessed. This address must have + been previously exported to the server with a ``VFIO_USER_DMA_MAP`` message. +* *count* is the size of the data to be transferred. +* *data* is the data to write + +Reply +^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| address | 0 | 8 | ++---------+--------+----------+ +| count | 8 | 4 | ++---------+--------+----------+ + +* *address* is the client DMA memory address being accessed. +* *count* is the size of the data transferred. + +``VFIO_USER_DEVICE_RESET`` +-------------------------- + +This command message is sent from the client to the server to reset the device. +Neither the request or reply have a payload. + +``VFIO_USER_REGION_WRITE_MULTI`` +-------------------------------- + +This message can be used to coalesce multiple device write operations +into a single messgage. It is only used as an optimization when the +outgoing message queue is relatively full. + +Request +^^^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| wr_cnt | 0 | 8 | ++---------+--------+----------+ +| wrs | 8 | variable | ++---------+--------+----------+ + +* *wr_cnt* is the number of device writes coalesced in the message +* *wrs* is an array of device writes defined below + +Single Device Write Format +"""""""""""""""""""""""""" + ++--------+--------+----------+ +| Name | Offset | Size | ++========+========+==========+ +| offset | 0 | 8 | ++--------+--------+----------+ +| region | 8 | 4 | ++--------+--------+----------+ +| count | 12 | 4 | ++--------+--------+----------+ +| data | 16 | 8 | ++--------+--------+----------+ + +* *offset* into the region being accessed. +* *region* is the index of the region being accessed. +* *count* is the size of the data to be transferred. This format can + only describe writes of 8 bytes or less. +* *data* is the data to write. + +Reply +^^^^^ + ++---------+--------+----------+ +| Name | Offset | Size | ++=========+========+==========+ +| wr_cnt | 0 | 8 | ++---------+--------+----------+ + +* *wr_cnt* is the number of device writes completed. + + +Appendices +========== + +Unused VFIO ``ioctl()`` commands +-------------------------------- + +The following VFIO commands do not have an equivalent vfio-user command: + +* ``VFIO_GET_API_VERSION`` +* ``VFIO_CHECK_EXTENSION`` +* ``VFIO_SET_IOMMU`` +* ``VFIO_GROUP_GET_STATUS`` +* ``VFIO_GROUP_SET_CONTAINER`` +* ``VFIO_GROUP_UNSET_CONTAINER`` +* ``VFIO_GROUP_GET_DEVICE_FD`` +* ``VFIO_IOMMU_GET_INFO`` + +However, once support for live migration for VFIO devices is finalized some +of the above commands may have to be handled by the client in their +corresponding vfio-user form. This will be addressed in a future protocol +version. + +VFIO groups and containers +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The current VFIO implementation includes group and container idioms that +describe how a device relates to the host IOMMU. In the vfio-user +implementation, the IOMMU is implemented in SW by the client, and is not +visible to the server. The simplest idea would be that the client put each +device into its own group and container. + +Backend Program Conventions +--------------------------- + +vfio-user backend program conventions are based on the vhost-user ones. + +* The backend program must not daemonize itself. +* No assumptions must be made as to what access the backend program has on the + system. +* File descriptors 0, 1 and 2 must exist, must have regular + stdin/stdout/stderr semantics, and can be redirected. +* The backend program must honor the SIGTERM signal. +* The backend program must accept the following commands line options: + + * ``--socket-path=PATH``: path to UNIX domain socket, + * ``--fd=FDNUM``: file descriptor for UNIX domain socket, incompatible with + ``--socket-path`` +* The backend program must be accompanied with a JSON file stored under + ``/usr/share/vfio-user``. + +TODO add schema similar to docs/interop/vhost-user.json. diff --git a/docs/interop/vhost-user.json b/docs/interop/vhost-user.json index b6ade9e4931..095eb99cf79 100644 --- a/docs/interop/vhost-user.json +++ b/docs/interop/vhost-user.json @@ -10,7 +10,9 @@ # later. See the COPYING file in the top-level directory. ## -# = vhost user backend discovery & capabilities +# ******************************************* +# vhost user backend discovery & capabilities +# ******************************************* ## ## diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index d8419fd2f17..2e50f2ddfad 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -167,6 +167,8 @@ A vring address description Note that a ring address is an IOVA if ``VIRTIO_F_IOMMU_PLATFORM`` has been negotiated. Otherwise it is a user address. +.. _memory_region_description: + Memory region description ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -180,7 +182,7 @@ Memory region description :user address: a 64-bit user address -:mmap offset: 64-bit offset where region starts in the mapped memory +:mmap offset: a 64-bit offset where region starts in the mapped memory When the ``VHOST_USER_PROTOCOL_F_XEN_MMAP`` protocol feature has been successfully negotiated, the memory region description contains two extra @@ -190,7 +192,7 @@ fields at the end. | guest address | size | user address | mmap offset | xen mmap flags | domid | +---------------+------+--------------+-------------+----------------+-------+ -:xen mmap flags: 32-bit bit field +:xen mmap flags: a 32-bit bit field - Bit 0 is set for Xen foreign memory mapping. - Bit 1 is set for Xen grant memory mapping. @@ -211,7 +213,7 @@ Single memory region description :padding: 64-bit -A region is represented by Memory region description. +:region: region is represented by :ref:`Memory region description `. Multiple Memory regions description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -224,7 +226,7 @@ Multiple Memory regions description :padding: 32-bit -A region is represented by Memory region description. +:regions: regions field contains 8 regions of type :ref:`Memory region description `. Log description ^^^^^^^^^^^^^^^ @@ -233,9 +235,9 @@ Log description | log size | log offset | +----------+------------+ -:log size: size of area used for logging +:log size: a 64-bit size of area used for logging -:log offset: offset from start of supplied file descriptor where +:log offset: a 64-bit offset from start of supplied file descriptor where logging starts (i.e. where guest address 0 would be logged) @@ -382,7 +384,7 @@ the kernel implementation. The communication consists of the *front-end* sending message requests and the *back-end* sending message replies. Most of the requests don't require -replies. Here is a list of the ones that do: +replies, except for the following requests: * ``VHOST_USER_GET_FEATURES`` * ``VHOST_USER_GET_PROTOCOL_FEATURES`` @@ -1239,11 +1241,11 @@ Front-end message types (*a vring descriptor index for split virtqueues* vs. *vring descriptor indices for packed virtqueues*). - When and as long as all of a device’s vrings are stopped, it is + When and as long as all of a device's vrings are stopped, it is *suspended*, see :ref:`Suspended device state `. - The request payload’s *num* field is currently reserved and must be + The request payload's *num* field is currently reserved and must be set to 0. ``VHOST_USER_SET_VRING_KICK`` @@ -1662,7 +1664,7 @@ Front-end message types :reply payload: ``u64`` Front-end and back-end negotiate a channel over which to transfer the - back-end’s internal state during migration. Either side (front-end or + back-end's internal state during migration. Either side (front-end or back-end) may create the channel. The nature of this channel is not restricted or defined in this document, but whichever side creates it must create a file descriptor that is provided to the respectively @@ -1714,7 +1716,7 @@ Front-end message types :request payload: N/A :reply payload: ``u64`` - After transferring the back-end’s internal state during migration (see + After transferring the back-end's internal state during migration (see the :ref:`Migrating back-end state ` section), check whether the back-end was able to successfully fully process the state. diff --git a/docs/meson.build b/docs/meson.build index 322452c8778..3676f81c4d3 100644 --- a/docs/meson.build +++ b/docs/meson.build @@ -54,7 +54,6 @@ if build_docs 'qemu-pr-helper.8': (have_tools ? 'man8' : ''), 'qemu-storage-daemon.1': (have_tools ? 'man1' : ''), 'qemu-trace-stap.1': (stap.found() ? 'man1' : ''), - 'virtfs-proxy-helper.1': (have_virtfs_proxy_helper ? 'man1' : ''), 'qemu.1': 'man1', 'qemu-block-drivers.7': 'man7', 'qemu-cpu-models.7': 'man7' diff --git a/docs/pcie_sriov.txt b/docs/pcie_sriov.txt index a47aad0bfab..ab2142807f7 100644 --- a/docs/pcie_sriov.txt +++ b/docs/pcie_sriov.txt @@ -52,9 +52,11 @@ setting up a BAR for a VF. ... /* Add and initialize the SR/IOV capability */ - pcie_sriov_pf_init(d, 0x200, "your_virtual_dev", - vf_devid, initial_vfs, total_vfs, - fun_offset, stride); + if (!pcie_sriov_pf_init(d, 0x200, "your_virtual_dev", + vf_devid, initial_vfs, total_vfs, + fun_offset, stride, errp)) { + return; + } /* Set up individual VF BARs (parameters as for normal BARs) */ pcie_sriov_pf_init_vf_bar( ... ) diff --git a/docs/qcow2-cache.txt b/docs/qcow2-cache.txt index 5f763aa6bbf..204a5741ad2 100644 --- a/docs/qcow2-cache.txt +++ b/docs/qcow2-cache.txt @@ -15,7 +15,7 @@ not a straightforward operation. This document attempts to give an overview of the L2 and refcount caches, and how to configure them. -Please refer to the docs/interop/qcow2.txt file for an in-depth +Please refer to the docs/interop/qcow2.rst file for an in-depth technical description of the qcow2 file format. diff --git a/docs/requirements.txt b/docs/requirements.txt index 02583f209aa..87f7afcb2e7 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ # Used by readthedocs.io # Should be in sync with the "installed" key of pythondeps.toml -sphinx==5.3.0 -sphinx_rtd_theme==1.1.1 +sphinx==6.2.1 +sphinx_rtd_theme==1.2.2 diff --git a/docs/specs/acpi_hest_ghes.rst b/docs/specs/acpi_hest_ghes.rst index 68f1fbe0a4a..c3e9f8d9a70 100644 --- a/docs/specs/acpi_hest_ghes.rst +++ b/docs/specs/acpi_hest_ghes.rst @@ -67,8 +67,10 @@ Design Details (3) The address registers table contains N Error Block Address entries and N Read Ack Register entries. The size for each entry is 8-byte. The Error Status Data Block table contains N Error Status Data Block - entries. The size for each entry is 4096(0x1000) bytes. The total size - for the "etc/hardware_errors" fw_cfg blob is (N * 8 * 2 + N * 4096) bytes. + entries. The size for each entry is defined at the source code as + ACPI_GHES_MAX_RAW_DATA_LENGTH (currently 1024 bytes). The total size + for the "etc/hardware_errors" fw_cfg blob is + (N * 8 * 2 + N * ACPI_GHES_MAX_RAW_DATA_LENGTH) bytes. N is the number of the kinds of hardware error sources. (4) QEMU generates the ACPI linker/loader script for the firmware. The diff --git a/docs/specs/aspeed-intc.rst b/docs/specs/aspeed-intc.rst new file mode 100644 index 00000000000..9cefd7f37f8 --- /dev/null +++ b/docs/specs/aspeed-intc.rst @@ -0,0 +1,136 @@ +=========================== +ASPEED Interrupt Controller +=========================== + +AST2700 +------- +There are a total of 480 interrupt sources in AST2700. Due to the limitation of +interrupt numbers of processors, the interrupts are merged every 32 sources for +interrupt numbers greater than 127. + +There are two levels of interrupt controllers, INTC (CPU Die) and INTCIO +(I/O Die). + +Interrupt Mapping +----------------- +- INTC: Handles interrupt sources 0 - 127 and integrates signals from INTCIO. +- INTCIO: Handles interrupt sources 128 - 319 independently. + +QEMU Support +------------ +Currently, only GIC 192 to 201 are supported, and their source interrupts are +from INTCIO and connected to INTC at input pin 0 and output pins 0 to 9 for +GIC 192-201. + +Design for GICINT 196 +--------------------- +The orgate has interrupt sources ranging from 0 to 31, with its output pin +connected to INTCIO "T0 GICINT_196". The output pin is then connected to INTC +"GIC_192_201" at bit 4, and its bit 4 output pin is connected to GIC 196. + +INTC GIC_192_201 Output Pin Mapping +----------------------------------- +The design of INTC GIC_192_201 have 10 output pins, mapped as following: + +==== ==== +Bit GIC +==== ==== +0 192 +1 193 +2 194 +3 195 +4 196 +5 197 +6 198 +7 199 +8 200 +9 201 +==== ==== + +AST2700 A0 +---------- +It has only one INTC controller, and currently, only GIC 128-136 is supported. +To support both AST2700 A1 and AST2700 A0, there are 10 OR gates in the INTC, +with gates 1 to 9 supporting GIC 128-136. + +Design for GICINT 132 +--------------------- +The orgate has interrupt sources ranging from 0 to 31, with its output pin +connected to INTC. The output pin is then connected to GIC 132. + +Block Diagram of GICINT 196 for AST2700 A1 and GICINT 132 for AST2700 A0 +------------------------------------------------------------------------ + +.. code-block:: + + |-------------------------------------------------------------------------------------------------------| + | AST2700 A1 Design | + | To GICINT196 | + | | + | ETH1 |-----------| |--------------------------| |--------------| | + | -------->|0 | | INTCIO | | orgates[0] | | + | ETH2 | 4| orgates[0]------>|inpin[0]-------->outpin[0]|------->| 0 | | + | -------->|1 5| orgates[1]------>|inpin[1]-------->outpin[1]|------->| 1 | | + | ETH3 | 6| orgates[2]------>|inpin[2]-------->outpin[2]|------->| 2 | | + | -------->|2 19| orgates[3]------>|inpin[3]-------->outpin[3]|------->| 3 OR[0:9] |-----| | + | UART0 | 20|-->orgates[4]------>|inpin[4]-------->outpin[4]|------->| 4 | | | + | -------->|7 21| orgates[5]------>|inpin[5]-------->outpin[5]|------->| 5 | | | + | UART1 | 22| orgates[6]------>|inpin[6]-------->outpin[6]|------->| 6 | | | + | -------->|8 23| orgates[7]------>|inpin[7]-------->outpin[7]|------->| 7 | | | + | UART2 | 24| orgates[8]------>|inpin[8]-------->outpin[8]|------->| 8 | | | + | -------->|9 25| orgates[9]------>|inpin[9]-------->outpin[9]|------->| 9 | | | + | UART3 | 26| |--------------------------| |--------------| | | + | ---------|10 27| | | + | UART5 | 28| | | + | -------->|11 29| | | + | UART6 | | | | + | -------->|12 30| |-----------------------------------------------------------------------| | + | UART7 | 31| | | + | -------->|13 | | | + | UART8 | OR[0:31] | | |------------------------------| |----------| | + | -------->|14 | | | INTC | | GIC | | + | UART9 | | | |inpin[0:0]--------->outpin[0] |---------->|192 | | + | -------->|15 | | |inpin[0:1]--------->outpin[1] |---------->|193 | | + | UART10 | | | |inpin[0:2]--------->outpin[2] |---------->|194 | | + | -------->|16 | | |inpin[0:3]--------->outpin[3] |---------->|195 | | + | UART11 | | |--------------> |inpin[0:4]--------->outpin[4] |---------->|196 | | + | -------->|17 | |inpin[0:5]--------->outpin[5] |---------->|197 | | + | UART12 | | |inpin[0:6]--------->outpin[6] |---------->|198 | | + | -------->|18 | |inpin[0:7]--------->outpin[7] |---------->|199 | | + | |-----------| |inpin[0:8]--------->outpin[8] |---------->|200 | | + | |inpin[0:9]--------->outpin[9] |---------->|201 | | + |-------------------------------------------------------------------------------------------------------| + |-------------------------------------------------------------------------------------------------------| + | ETH1 |-----------| orgates[1]------->|inpin[1]----------->outpin[10]|---------->|128 | | + | -------->|0 | orgates[2]------->|inpin[2]----------->outpin[11]|---------->|129 | | + | ETH2 | 4| orgates[3]------->|inpin[3]----------->outpin[12]|---------->|130 | | + | -------->|1 5| orgates[4]------->|inpin[4]----------->outpin[13]|---------->|131 | | + | ETH3 | 6|---->orgates[5]------->|inpin[5]----------->outpin[14]|---------->|132 | | + | -------->|2 19| orgates[6]------->|inpin[6]----------->outpin[15]|---------->|133 | | + | UART0 | 20| orgates[7]------->|inpin[7]----------->outpin[16]|---------->|134 | | + | -------->|7 21| orgates[8]------->|inpin[8]----------->outpin[17]|---------->|135 | | + | UART1 | 22| orgates[9]------->|inpin[9]----------->outpin[18]|---------->|136 | | + | -------->|8 23| |------------------------------| |----------| | + | UART2 | 24| | + | -------->|9 25| AST2700 A0 Design | + | UART3 | 26| | + | -------->|10 27| | + | UART5 | 28| | + | -------->|11 29| GICINT132 | + | UART6 | | | + | -------->|12 30| | + | UART7 | 31| | + | -------->|13 | | + | UART8 | OR[0:31] | | + | -------->|14 | | + | UART9 | | | + | -------->|15 | | + | UART10 | | | + | -------->|16 | | + | UART11 | | | + | -------->|17 | | + | UART12 | | | + | -------->|18 | | + | |-----------| | + | | + |-------------------------------------------------------------------------------------------------------| diff --git a/docs/specs/fw_cfg.rst b/docs/specs/fw_cfg.rst index 5ad47a901c9..31ae31576b1 100644 --- a/docs/specs/fw_cfg.rst +++ b/docs/specs/fw_cfg.rst @@ -54,11 +54,11 @@ Data Register ------------- * Read/Write (writes ignored as of QEMU v2.4, but see the DMA interface) -* Location: platform dependent (IOport [#]_ or MMIO) +* Location: platform dependent (IOport\ [#placement]_ or MMIO) * Width: 8-bit (if IOport), 8/16/32/64-bit (if MMIO) * Endianness: string-preserving -.. [#] +.. [#placement] On platforms where the data register is exposed as an IOport, its port number will always be one greater than the port number of the selector register. In other words, the two ports overlap, and can not diff --git a/docs/specs/index.rst b/docs/specs/index.rst index be899b49c28..f19d73c9f6e 100644 --- a/docs/specs/index.rst +++ b/docs/specs/index.rst @@ -35,3 +35,7 @@ guest hardware that is specific to QEMU. vmcoreinfo vmgenid rapl-msr + rocker + riscv-iommu + riscv-aia + aspeed-intc diff --git a/docs/specs/pci-ids.rst b/docs/specs/pci-ids.rst index c0a3dec2e7a..261b0f359fe 100644 --- a/docs/specs/pci-ids.rst +++ b/docs/specs/pci-ids.rst @@ -77,13 +77,17 @@ PCI devices (other than virtio): 1b36:0008 PCIe host bridge 1b36:0009 - PCI Expander Bridge (-device pxb) + PCI Expander Bridge (``-device pxb``) 1b36:000a PCI-PCI bridge (multiseat) 1b36:000b - PCIe Expander Bridge (-device pxb-pcie) + PCIe Expander Bridge (``-device pxb-pcie``) +1b36:000c + PCIe Root Port (``-device pcie-root-port``) 1b36:000d PCI xhci usb host adapter +1b36:000e + PCIe-to-PCI bridge (``-device pcie-pci-bridge``) 1b36:000f mdpy (mdev sample device), ``linux/samples/vfio-mdev/mdpy.c`` 1b36:0010 @@ -94,6 +98,8 @@ PCI devices (other than virtio): PCI ACPI ERST device (``-device acpi-erst``) 1b36:0013 PCI UFS device (``-device ufs``) +1b36:0014 + PCI RISC-V IOMMU device All these devices are documented in :doc:`index`. diff --git a/docs/specs/rapl-msr.rst b/docs/specs/rapl-msr.rst index 1202ee89bee..aaf0db9f91b 100644 --- a/docs/specs/rapl-msr.rst +++ b/docs/specs/rapl-msr.rst @@ -9,11 +9,12 @@ The consumption is reported via MSRs (model specific registers) like MSR_PKG_ENERGY_STATUS for the CPU package power domain. These MSRs are 64 bits registers that represent the accumulated energy consumption in micro Joules. -Thanks to the MSR Filtering patch [#a]_ not all MSRs are handled by KVM. Some -of them can now be handled by the userspace (QEMU). It uses a mechanism called -"MSR filtering" where a list of MSRs is given at init time of a VM to KVM so -that a callback is put in place. The design of this patch uses only this -mechanism for handling the MSRs between guest/host. +Thanks to KVM's `MSR filtering `__ functionality, +not all MSRs are handled by KVM. Some of them can now be handled by the +userspace (QEMU); a list of MSRs is given at VM creation time to KVM, and +a userspace exit occurs when they are accessed. + +.. _msr-filter-patch: https://patchwork.kernel.org/project/kvm/patch/20200916202951.23760-7-graf@amazon.com/ At the moment the following MSRs are involved: @@ -92,9 +93,12 @@ found by the sysconf system call. A typical value of clock ticks per second is package has 4 cores, 400 ticks maximum can be scheduled on all the cores of the package for a period of 1 second. -The /proc/[pid]/stat [#b]_ is a sysfs file that can give the executed time of a -process with the [pid] as the process ID. It gives the amount of ticks the -process has been scheduled in userspace (utime) and kernel space (stime). +`/proc/[pid]/stat `__ is a procfs file that can give the executed +time of a process with the [pid] as the process ID. It gives the amount +of ticks the process has been scheduled in userspace (utime) and kernel +space (stime). + +.. _stat: https://man7.org/linux/man-pages/man5/proc.5.html By reading those metrics for a thread, one can calculate the ratio of time the package has spent executing the thread. @@ -148,8 +152,3 @@ Current Limitations - Only the Package Power-Plane (MSR_PKG_ENERGY_STATUS) is reported at the moment. -References ----------- - -.. [#a] https://patchwork.kernel.org/project/kvm/patch/20200916202951.23760-7-graf@amazon.com/ -.. [#b] https://man7.org/linux/man-pages/man5/proc.5.html diff --git a/docs/specs/riscv-aia.rst b/docs/specs/riscv-aia.rst new file mode 100644 index 00000000000..8097e2f8974 --- /dev/null +++ b/docs/specs/riscv-aia.rst @@ -0,0 +1,83 @@ +.. _riscv-aia: + +RISC-V AIA support for RISC-V machines +====================================== + +AIA (Advanced Interrupt Architecture) support is implemented in the ``virt`` +RISC-V machine for TCG and KVM accelerators. + +The support consists of two main modes: + +- "aia=aplic": adds one or more APLIC (Advanced Platform Level Interrupt Controller) + devices +- "aia=aplic-imsic": adds one or more APLIC device and an IMSIC (Incoming MSI + Controller) device for each CPU + +From an user standpoint, these modes will behave the same regardless of the accelerator +used. From a developer standpoint the accelerator settings will change what it being +emulated in userspace versus what is being emulated by an in-kernel irqchip. + +When running TCG, all controllers are emulated in userspace, including machine mode +(m-mode) APLIC and IMSIC (when applicable). + +When running KVM: + +- no m-mode is provided, so there is no m-mode APLIC or IMSIC emulation regardless of + the AIA mode chosen +- with "aia=aplic", s-mode APLIC will be emulated by userspace +- with "aia=aplic-imsic" there are two possibilities. If no additional KVM option + is provided there will be no APLIC or IMSIC emulation in userspace, and the virtual + machine will use the provided in-kernel APLIC and IMSIC controllers. If the user + chooses to use the irqchip in split mode via "-accel kvm,kernel-irqchip=split", + s-mode APLIC will be emulated while using the s-mode IMSIC from the irqchip + +The following table summarizes how the AIA and accelerator options defines what +we will emulate in userspace: + + +.. list-table:: How AIA and accel options changes controller emulation + :widths: 25 25 25 25 25 25 25 + :header-rows: 1 + + * - Accel + - Accel props + - AIA type + - APLIC m-mode + - IMSIC m-mode + - APLIC s-mode + - IMSIC s-mode + * - tcg + - --- + - aplic + - emul + - n/a + - emul + - n/a + * - tcg + - --- + - aplic-imsic + - emul + - emul + - emul + - emul + * - kvm + - --- + - aplic + - n/a + - n/a + - emul + - n/a + * - kvm + - none + - aplic-imsic + - n/a + - n/a + - in-kernel + - in-kernel + * - kvm + - irqchip=split + - aplic-imsic + - n/a + - n/a + - emul + - in-kernel diff --git a/docs/specs/riscv-iommu.rst b/docs/specs/riscv-iommu.rst new file mode 100644 index 00000000000..991d376fdc2 --- /dev/null +++ b/docs/specs/riscv-iommu.rst @@ -0,0 +1,116 @@ +.. _riscv-iommu: + +RISC-V IOMMU support for RISC-V machines +======================================== + +QEMU implements a RISC-V IOMMU emulation based on the RISC-V IOMMU spec +version 1.0 `iommu1.0.0`_. + +The emulation includes a PCI reference device (riscv-iommu-pci) and a platform +bus device (riscv-iommu-sys) that QEMU RISC-V boards can use. The 'virt' +RISC-V machine is compatible with both devices. + +riscv-iommu-pci reference device +-------------------------------- + +This device implements the RISC-V IOMMU emulation as recommended by the section +"Integrating an IOMMU as a PCIe device" of `iommu1.0.0`_: a PCI device with base +class 08h, sub-class 06h and programming interface 00h. + +As a reference device it doesn't implement anything outside of the specification, +so it uses a generic default PCI ID given by QEMU: 1b36:0014. + +To include the device in the 'virt' machine: + +.. code-block:: bash + + $ qemu-system-riscv64 -M virt -device riscv-iommu-pci,[optional_pci_opts] (...) + +This will add a RISC-V IOMMU PCI device in the board following any additional +PCI parameters (like PCI bus address). The behavior of the RISC-V IOMMU is +defined by the spec but its operation is OS dependent. + +As of this writing the existing Linux kernel support `linux-v8`_, not yet merged, +does not have support for features like VFIO passthrough. The IOMMU emulation +was tested using a public Ventana Micro Systems kernel repository in +`ventana-linux`_. This kernel is based on `linux-v8`_ with additional patches that +enable features like KVM VFIO passthrough with irqbypass. Until the kernel support +is feature complete feel free to use the kernel available in the Ventana Micro Systems +mirror. + +The current Linux kernel support will use the IOMMU device to create IOMMU groups +with any eligible cards available in the system, regardless of factors such as the +order in which the devices are added in the command line. + +This means that these command lines are equivalent as far as the current +IOMMU kernel driver behaves: + +.. code-block:: bash + + $ qemu-system-riscv64 \ + -M virt,aia=aplic-imsic,aia-guests=5 \ + -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \ + -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \ + -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \ + (...) + + $ qemu-system-riscv64 \ + -M virt,aia=aplic-imsic,aia-guests=5 \ + -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \ + -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \ + -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \ + (...) + +Both will create iommu groups for the two e1000e cards. + +Another thing to notice on `linux-v8`_ and `ventana-linux`_ is that the kernel driver +considers an IOMMU identified as a Rivos device, i.e. it uses Rivos vendor ID. To +use the riscv-iommu-pci device with the existing kernel support we need to emulate +a Rivos PCI IOMMU by setting 'vendor-id' and 'device-id': + +.. code-block:: bash + + $ qemu-system-riscv64 -M virt \ + -device riscv-iommu-pci,vendor-id=0x1efd,device-id=0xedf1 (...) + +Several options are available to control the capabilities of the device, namely: + +- "bus": the bus that the IOMMU device uses +- "ioatc-limit": size of the Address Translation Cache (default to 2Mb) +- "intremap": enable/disable MSI support +- "ats": enable ATS support +- "off" (Out-of-reset translation mode: 'on' for DMA disabled, 'off' for 'BARE' (passthrough)) +- "s-stage": enable s-stage support +- "g-stage": enable g-stage support +- "hpm-counters": number of hardware performance counters available. Maximum value is 31. + Default value is 31. Use 0 (zero) to disable HPM support + +riscv-iommu-sys device +---------------------- + +This device implements the RISC-V IOMMU emulation as a platform bus device that +RISC-V boards can use. + +For the 'virt' board the device is disabled by default. To enable it use the +'iommu-sys' machine option: + +.. code-block:: bash + + $ qemu-system-riscv64 -M virt,iommu-sys=on (...) + +There is no options to configure the capabilities of this device in the 'virt' +board using the QEMU command line. The device is configured with the following +riscv-iommu options: + +- "ioatc-limit": default value (2Mb) +- "intremap": enabled +- "ats": enabled +- "off": on (DMA disabled) +- "s-stage": enabled +- "g-stage": enabled + +.. _iommu1.0.0: https://github.com/riscv-non-isa/riscv-iommu/releases/download/v1.0.0/riscv-iommu.pdf + +.. _linux-v8: https://lore.kernel.org/linux-riscv/cover.1718388908.git.tjeznach@rivosinc.com/ + +.. _ventana-linux: https://github.com/ventanamicro/linux/tree/dev-upstream diff --git a/docs/specs/rocker.rst b/docs/specs/rocker.rst new file mode 100644 index 00000000000..3a7fc6a7e0e --- /dev/null +++ b/docs/specs/rocker.rst @@ -0,0 +1,1015 @@ +Rocker Network Switch Register Programming Guide +************************************************ + +.. + Copyright (c) Scott Feldman + Copyright (c) Neil Horman + Version 0.11, 12/29/2014 + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + +Introduction +============ + +Overview +-------- + +This document describes the hardware/software interface for the Rocker switch +device. The intended audience is authors of OS drivers and device emulation +software. + +Notations and Conventions +------------------------- + +* In register descriptions, [n:m] indicates a range from bit n to bit m, + inclusive. +* Use of leading 0x indicates a hexadecimal number. +* Use of leading 0b indicates a binary number. +* The use of RSVD or Reserved indicates that a bit or field is reserved for + future use. +* Field width is in bytes, unless otherwise noted. +* Register are (R) read-only, (R/W) read/write, (W) write-only, or (COR) clear + on read +* TLV values in network-byte-order are designated with (N). + + +PCI Configuration Registers +=========================== + +PCI Configuration Space +----------------------- + +Each switch instance registers as a PCI device with PCI configuration space:: + + offset width description value + --------------------------------------------- + 0x0 2 Vendor ID 0x1b36 + 0x2 2 Device ID 0x0006 + 0x4 4 Command/Status + 0x8 1 Revision ID 0x01 + 0x9 3 Class code 0x2800 + 0xC 1 Cache line size + 0xD 1 Latency timer + 0xE 1 Header type + 0xF 1 Built-in self test + 0x10 4 Base address low + 0x14 4 Base address high + 0x18-28 Reserved + 0x2C 2 Subsystem vendor ID * + 0x2E 2 Subsystem ID * + 0x30-38 Reserved + 0x3C 1 Interrupt line + 0x3D 1 Interrupt pin 0x00 + 0x3E 1 Min grant 0x00 + 0x3D 1 Max latency 0x00 + 0x40 1 TRDY timeout + 0x41 1 Retry count + 0x42 2 Reserved + + * Assigned by sub-system implementation + +Memory-Mapped Register Space +============================ + +There are two memory-mapped BARs. BAR0 maps device register space and is +0x2000 in size. BAR1 maps MSI-X vector and PBA tables and is also 0x2000 in +size, allowing for 256 MSI-X vectors. + +All registers are 4 or 8 bytes long. It is assumed host software will access 4 +byte registers with one 4-byte access, and 8 byte registers with either two +4-byte accesses or a single 8-byte access. In the case of two 4-byte accesses, +access must be lower and then upper 4-bytes, in that order. + +BAR0 device register space is organized as follows:: + + offset description + ------------------------------------------------------ + 0x0000-0x000f Bogus registers to catch misbehaving + drivers. Writes do nothing. Reads + back as 0xDEADBABE. + 0x0010-0x00ff Test registers + 0x0300-0x03ff General purpose registers + 0x1000-0x1fff Descriptor control + +Holes in register space are reserved. Writes to reserved registers do nothing. +Reads to reserved registers read back as 0. + +No fancy stuff like write-combining is enabled on any of the registers. + +BAR1 MSI-X register space is organized as follows:: + + offset description + ------------------------------------------------------ + 0x0000-0x0fff MSI-X vector table (256 vectors total) + 0x1000-0x1fff MSI-X PBA table + + +Interrupts, DMA, and Endianness +=============================== + +PCI Interrupts +-------------- + +The device supports only MSI-X interrupts. BAR1 memory-mapped region contains +the MSI-X vector and PBA tables, with support for up to 256 MSI-X vectors. + +The vector assignment is:: + + vector description + ----------------------------------------------------- + 0 Command descriptor ring completion + 1 Event descriptor ring completion + 2 Test operation completion + 3 RSVD + 4-255 Tx and Rx descriptor ring completion + Tx vector is even + Rx vector is odd + +A MSI-X vector table entry is 16 bytes:: + + field offset width description + ------------------------------------------------------------- + lower_addr 0x0 4 [31:2] message address[31:2] + [1:0] Rsvd (4 byte alignment + required) + upper_addr 0x4 4 [31:19] Rsvd + [14:0] message address[46:32] + data 0x8 4 message data[31:0] + control 0xc 4 [31:1] Rsvd + [0] mask (0 = enable, + 1 = masked) + +Software should install the Interrupt Service Routine (ISR) before any ports +are enabled or any commands are issued on the command ring. + +DMA Operations +-------------- + +DMA operations are used for packet DMA to/from the CPU, command and event +processing. Command processing includes statistical counters and table dumps, +table insertion/deletion, and more. Event processing provides an async +notification method for device-originating events. Each DMA operation has a +set of control registers to manage a descriptor ring. The descriptor rings are +allocated from contiguous host DMA-able memory and registers specify the rings +base address, size and current head and tail indices. Software always writes +the head, and hardware always writes the tail. + +The higher-order bit of DMA_DESC_COMP_ERR is used to mark hardware completion +of a descriptor. Software will clear this bit when posting a descriptor to the +ring, and hardware will set this bit when the descriptor is complete. + +Descriptor ring sizes must be a power of 2 and range from 2 to 64K entries. +Descriptor rings' base address must be 8-byte aligned. Descriptors must be +packed within ring. Each descriptor in each ring must also be aligned on an 8 +byte boundary. Each descriptor ring will have these registers:: + + DMA_DESC_xxx_BASE_ADDR, offset 0x1000 + (x * 32), 64-bit, (R/W) + DMA_DESC_xxx_SIZE, offset 0x1008 + (x * 32), 32-bit, (R/W) + DMA_DESC_xxx_HEAD, offset 0x100c + (x * 32), 32-bit, (R/W) + DMA_DESC_xxx_TAIL, offset 0x1010 + (x * 32), 32-bit, (R) + DMA_DESC_xxx_CTRL, offset 0x1014 + (x * 32), 32-bit, (W) + DMA_DESC_xxx_CREDITS, offset 0x1018 + (x * 32), 32-bit, (R/W) + DMA_DESC_xxx_RSVD1, offset 0x101c + (x * 32), 32-bit, (R/W) + +Where x is descriptor ring index:: + + index ring + -------------------- + 0 CMD + 1 EVENT + 2 TX (port 0) + 3 RX (port 0) + 4 TX (port 1) + 5 RX (port 1) + . + . + . + 124 TX (port 61) + 125 RX (port 61) + 126 Resv + 127 Resv + +Writing BASE_ADDR or SIZE will reset HEAD and TAIL to zero. HEAD cannot be +written past TAIL. To do so would wrap the ring. An empty ring is when HEAD +== TAIL. A full ring is when HEAD is one position behind TAIL. Both HEAD and +TAIL increment and modulo wrap at the ring size. + +CTRL register bits:: + + bit name description + ------------------------------------------------------------------------ + [0] CTRL_RESET Reset the descriptor ring + [1:31] Reserved + +All descriptor types share some common fields:: + + field width description + ------------------------------------------------------------------- + DMA_DESC_BUF_ADDR 8 Phys addr of desc payload, 8-byte + aligned + DMA_DESC_COOKIE 8 Desc cookie for completion matching, + upper-most bit is reserved + DMA_DESC_BUF_SIZE 2 Desc payload size in bytes + DMA_DESC_TLV_SIZE 2 Desc payload total size in bytes + used for TLVs. Must be <= + DMA_DESC_BUF_SIZE. + DMA_DESC_COMP_ERR 2 Completion status of associated + desc payload. High order bit is + clear on new descs, toggled by + hw for completed items. + +To support forward- and backward-compatibility, descriptor and completion +payloads are specified in TLV format. Fields are packed with Type=field name, +Length=field length, and Value=field value. Software will ignore unknown fields +filled in by the switch. Likewise, the switch will ignore unknown fields +filled in by software. + +Descriptor payload buffer is 8-byte aligned and TLVs are 8-byte aligned. The +value within a TLV is also 8-byte aligned. The (packed, 8 byte) TLV header is:: + + field width description + ----------------------------- + type 4 TLV type + len 2 TLV value length + pad 2 Reserved + +The alignment requirements for descriptors and TLVs are to avoid unaligned +access exceptions in software. Note that the payload for each TLV is also +8 byte aligned. + +Figure 1 shows an example descriptor buffer with two TLVs:: + + <------- 8 bytes -------> + + 8-byte +––––+ +–––––––––––+–––––+–––––+ +–+ + align | type | len | pad | TLV#1 hdr | + +–––––––––––+–––––+–––––+ (len=22) | + | | | + | value | TVL#1 value | + | | (padded to 8-byte | + | +–––––+ alignment) | + | |/////| | + 8-byte +––––+ +–––––––––––+–––––––––––+ | + align | type | len | pad | TLV#2 hdr DESC_BUF_SIZE + +–––––+–––––+–––––+–––––+ (len=2) | + |value|/////////////////| TLV#2 value | + +–––––+/////////////////| | + |///////////////////////| | + |///////////////////////| | + |///////////////////////| | + |////////unused/////////| | + |////////space//////////| | + |///////////////////////| | + |///////////////////////| | + |///////////////////////| | + +–––––––––––––––––––––––+ +–+ + + fig. 1 + +TLVs can be nested within the NEST TLV type. + +Interrupt credits +^^^^^^^^^^^^^^^^^ + +MSI-X vectors used for descriptor ring completions use a credit mechanism for +efficient device, PCIe bus, OS and driver operations. Each descriptor ring has +a credit count which represents the number of outstanding descriptors to be +processed by the driver. As the device marks descriptors complete, the credit +count is incremented. As the driver processes those outstanding descriptors, +it returns credits back to the device. This way, the device knows the driver's +progress and can make decisions about when to fire the next interrupt or not. +When the credit count is zero, and the first descriptors are posted for the +driver, a single interrupt is fired. Once the interrupt is fired, the +interrupt is disabled (auto-masked*). In response to the interrupt, the driver +will process descriptors and PIO write a returned credit value for that +descriptor ring. If the driver returns all credits (the driver caught up with +the device and there is no outstanding work), then the interrupt is unmasked, +but not fired. If only partial credits are returned, the interrupt remains +masked but the device generates an interrupt, signaling the driver that more +outstanding work is available. + +(* this masking is unrelated to the MSI-X interrupt mask register) + +Endianness +---------- + +Device registers are hard-coded to little-endian (LE). The driver should +convert to/from host endianness to LE for device register accesses. + +Descriptors are LE. Descriptor buffer TLVs will have LE type and length +fields, but the value field can either be LE or network-byte-order, depending +on context. TLV values containing network packet data will be in network-byte +order. A TLV value containing a field or mask used to compare against network +packet data is network-byte order. For example, flow match fields (and masks) +are network-byte-order since they're matched directly, byte-by-byte, against +network packet data. All non-network-packet TLV multi-byte values will be LE. + +TLV values in network-byte-order are designated with (N). + + +Test Registers +============== + +Rocker has several test registers to support troubleshooting register access, +interrupt generation, and DMA operations:: + + TEST_REG, offset 0x0010, 32-bit (R/W) + TEST_REG64, offset 0x0018, 64-bit (R/W) + TEST_IRQ, offset 0x0020, 32-bit (R/W) + TEST_DMA_ADDR, offset 0x0028, 64-bit (R/W) + TEST_DMA_SIZE, offset 0x0030, 32-bit (R/W) + TEST_DMA_CTRL, offset 0x0034, 32-bit (R/W) + +Reads to TEST_REG and TEST_REG64 will read a value equal to twice the last +value written to the register. The 32-bit and 64-bit versions are for testing +32-bit and 64-bit host accesses. + +A vector can be written to TEST_IRQ and the device will generate an interrupt +for that vector. + +To test basic DMA operations, allocate a DMA-able host buffer and put the +buffer address into TEST_DMA_ADDR and size into TEST_DMA_SIZE. Then, write to +TEST_DMA_CTRL to manipulate the buffer contents. TEST_DMA_CTRL operations are:: + + operation value description + ----------------------------------------------------------- + TEST_DMA_CTRL_CLEAR 1 clear buffer + TEST_DMA_CTRL_FILL 2 fill buffer bytes with 0x96 + TEST_DMA_CTRL_INVERT 4 invert bytes in buffer + +Various buffer address and sizes should be tested to verify no address boundary +issue exists. In particular, buffers that start on odd-8-byte boundary and/or +span multiple PAGE sizes should be tested. + + +Ports +===== + +Physical and Logical Ports +------------------------------------ + +The switch supports up to 62 physical (front-panel) ports. Register +PORT_PHYS_COUNT returns the actual number of physical ports available:: + + PORT_PHYS_COUNT, offset 0x0304, 32-bit, (R) + +In addition to front-panel ports, the switch supports logical ports for +tunnels. + +Front-panel ports and logical tunnel ports are mapped into a single 32-bit port +space. A special CPU port is assigned port 0. The front-panel ports are +mapped to ports 1-62. A special loopback port is assigned port 63. Logical +tunnel ports are assigned ports 0x0001000-0x0001ffff. +To summarize the port assignments:: + + port mapping + ------------------------------------------------------- + 0 CPU port (for packets to/from host CPU) + 1-62 front-panel physical ports + 63 loopback port + 64-0x0000ffff RSVD + 0x00010000-0x0001ffff logical tunnel ports + 0x00020000-0xffffffff RSVD + +Physical Port Mode +------------------ + +Switch front-panel ports operate in a mode. Currently, the only mode is +OF-DPA. OF-DPA[1] mode is based on OpenFlow Data Plane Abstraction (OF-DPA) +Abstract Switch Specification, Version 1.0, from Broadcom Corporation. To +set/get the mode for front-panel ports, see port settings, below. + +Port Settings +------------- + +Link status for all front-panel ports is available via PORT_PHYS_LINK_STATUS:: + + PORT_PHYS_LINK_STATUS, offset 0x0310, 64-bit, (R) + + Value is port bitmap. Bits 0 and 63 always read 0. Bits 1-62 + read 1 for link UP and 0 for link DOWN for respective front-panel ports. + +Other properties for front-panel ports are available via DMA CMD descriptors:: + + Get PORT_SETTINGS descriptor: + + field width description + ---------------------------------------------- + PORT_SETTINGS 2 CMD_GET + PPORT 4 Physical port # + + Get PORT_SETTINGS completion: + + field width description + ---------------------------------------------- + PPORT 4 Physical port # + SPEED 4 Current port interface speed, in Mbps + DUPLEX 1 1 = Full, 0 = Half + AUTONEG 1 1 = enabled, 0 = disabled + MACADDR 6 Port MAC address + MODE 1 0 = OF-DPA + LEARNING 1 MAC address learning on port + 1 = enabled + 0 = disabled + PHYS_NAME Physical port name (string) + + Set PORT_SETTINGS descriptor: + + field width description + ---------------------------------------------- + PORT_SETTINGS 2 CMD_SET + PPORT 4 Physical port # + SPEED 4 Port interface speed, in Mbps + DUPLEX 1 1 = Full, 0 = Half + AUTONEG 1 1 = enabled, 0 = disabled + MACADDR 6 Port MAC address + MODE 1 0 = OF-DPA + +Port Enable +----------- + +Front-panel ports are initially disabled, which means port ingress and egress +packets will be dropped. To enable or disable a port, use PORT_PHYS_ENABLE:: + + PORT_PHYS_ENABLE: offset 0x0318, 64-bit, (R/W) + + Value is bitmap of first 64 ports. Bits 0 and 63 are ignored + and always read as 0. Write 1 to enable port; write 0 to disable it. + Default is 0. + + +Switch Control +============== + +This section covers switch-wide register settings. + +Control +------- + +This register is used for low level control of the switch:: + + CONTROL: offset 0x0300, 32-bit, (W) + + bit name description + ------------------------------------------------------------------------ + [0] CONTROL_RESET If set, device will perform reset + [1:31] Reserved + +Switch ID +--------- + +The switch has a SWITCH_ID to be used by software to uniquely identify the +switch:: + + SWITCH_ID: offset 0x0320, 64-bit, (R) + + Value is opaque to switch software and no special encoding is implied. + + +Events +====== + +Non-I/O asynchronous events from the device are notified to the host using the +event ring. The TLV structure for events is:: + + field width description + --------------------------------------------------- + TYPE 4 Event type, one of: + 1: LINK_CHANGED + 2: MAC_VLAN_SEEN + INFO Event info (details below) + +Link Changed Event +------------------ + +When link status changes on a physical port, this event is generated:: + + field width description + --------------------------------------------------- + INFO + PPORT 4 Physical port + LINKUP 1 Link status: + 0: down + 1: up + +MAC VLAN Seen Event +------------------- + +When a packet ingresses on a port and the source MAC/VLAN isn't known to the +device, the device will generate this event. In response to the event, the +driver should install to the device the MAC/VLAN on the port into the bridge +table. Once installed, the MAC/VLAN is known on the port and this event will +no longer be generated. + +:: + + field width description + --------------------------------------------------- + INFO + PPORT 4 Physical port + MAC 6 MAC address + VLAN 2 VLAN ID + + +CPU Packet Processing +===================== + +Ingress packets directed to the host CPU for further processing are delivered +in the DMA RX ring. Likewise, host CPU originating packets destined to egress +on switch ports are scheduled by software using the DMA TX ring. + +Tx Packet Processing +-------------------- + +Software schedules packets for egress on switch ports using the DMA TX ring. A +TX descriptor buffer describes the packet location and size in host DMA-able +memory, the destination port, and any hardware-offload functions (such as L3 +payload checksum offload). Software then bumps the descriptor head to signal +hardware of new Tx work. In response, hardware will DMA read Tx descriptors up +to head, DMA read descriptor buffer and packet data, perform offloading +functions, and finally frame packet on wire (network). Once packet processing +is complete, hardware will writeback status to descriptor(s) to signal to +software that Tx is complete and software resources (e.g. skb) backing packet +can be released. + +Figure 2 shows an example 3-fragment packet queued with one Tx descriptor. A +TLV is used for each packet fragment:: + + pkt frag 1 + +–––––––+ +–+ + +–––+ | | + desc buf | | | | + +––––––––+ | | | | + Tx ring +–––+ +–––––+ | | | + +–––––––––+ | | TLVs | +–––––––+ | + | +–––+ +––––––––+ pkt frag 2 | + | desc 0 | | +–––––+ +–––––––+ | + +–––––––––+ | TLVs | +–––+ | | + head+–+ | +––––––––+ | | | + | desc 1 | | +–––––+ +–––––––+ |pkt + +–––––––––+ | TLVs | | | + | | +––––––––+ | pkt frag 3 | + | | | +–––––––+ | + +–––––––––+ +–––+ | | + | | | | | + | | | | | + +–––––––––+ | | | + | | | | | + | | | | | + +–––––––––+ | | | + | | +–––––––+ +–+ + | | + +–––––––––+ + + fig 2. + +The TLVs for Tx descriptor buffer are:: + + field width description + --------------------------------------------------------------------- + PPORT 4 Destination physical port # + TX_OFFLOAD 1 Hardware offload modes: + 0: no offload + 1: insert IP csum (ipv4 only) + 2: insert TCP/UDP csum + 3: L3 csum calc and insert + into csum offset (TX_L3_CSUM_OFF) + 16-bit 1's complement csum value. + IPv4 pseudo-header and IP + already calculated by OS + and inserted. + 4: TSO (TCP Segmentation Offload) + TX_L3_CSUM_OFF 2 For L3 csum offload mode, the offset, + from the beginning of the packet, + of the csum field in the L3 header + TX_TSO_MSS 2 For TSO offload mode, the + Maximum Segment Size in bytes + TX_TSO_HDR_LEN 2 For TSO offload mode, the + length of ethernet, IP, and + TCP/UDP headers, including IP + and TCP options. + TX_FRAGS Packet fragments + TX_FRAG Packet fragment + TX_FRAG_ADDR 8 DMA address of packet fragment + TX_FRAG_LEN 2 Packet fragment length + +Possible status return codes in descriptor on completion are:: + + DESC_COMP_ERR reason + -------------------------------------------------------------------- + 0 OK + -ROCKER_ENXIO address or data read err on desc buf or packet + fragment + -ROCKER_EINVAL bad pport or TSO or csum offloading error + -ROCKER_ENOMEM no memory for internal staging tx fragment + +Rx Packet Processing +-------------------- + +For packets ingressing on switch ports that are not forwarded by the switch but +rather directed to the host CPU for further processing are delivered in the DMA +RX ring. Rx descriptor buffers are allocated by software and placed on the +ring. Hardware will fill Rx descriptor buffers with packet data, write the +completion, and signal to software that a new packet is ready. Since Rx packet +size is not known a-priori, the Rx descriptor buffer must be allocated for +worst-case packet size. A single Rx descriptor will contain the entire Rx +packet data in one RX_FRAG. Other Rx TLVs describe and hardware offloads +performed on the packet, such as checksum validation. + +The TLVs for Rx descriptor buffer are:: + + field width description + --------------------------------------------------- + PPORT 4 Source physical port # + RX_FLAGS 2 Packet parsing flags: + (1 << 0): IPv4 packet + (1 << 1): IPv6 packet + (1 << 2): csum calculated + (1 << 3): IPv4 csum good + (1 << 4): IP fragment + (1 << 5): TCP packet + (1 << 6): UDP packet + (1 << 7): TCP/UDP csum good + (1 << 8): Offload forward + RX_CSUM 2 IP calculated checksum: + IPv4: IP payload csum + IPv6: header and payload csum + (Only valid is RX_FLAGS:csum calc is set) + RX_FRAG_ADDR 8 DMA address of packet fragment + RX_FRAG_MAX_LEN 2 Packet maximum fragment length + RX_FRAG_LEN 2 Actual packet fragment length after receive + +Offload forward RX_FLAG indicates the device has already forwarded the packet +so the host CPU should not also forward the packet. + +Possible status return codes in descriptor on completion are:: + + DESC_COMP_ERR reason + -------------------------------------------------------------------- + 0 OK + -ROCKER_ENXIO address or data read err on desc buf + -ROCKER_ENOMEM no memory for internal staging desc buf + -ROCKER_EMSGSIZE Rx descriptor buffer wasn't big enough to contain + packet data TLV and other TLVs. + + +OF-DPA Mode +=========== + +OF-DPA mode allows the switch to offload flow packet processing functions to +hardware. An OpenFlow controller would communicate with an OpenFlow agent +installed on the switch. The OpenFlow agent would (directly or indirectly) +communicate with the Rocker switch driver, which in turn would program switch +hardware with flow functionality, as defined in OF-DPA. The block diagram is:: + + +–––––––––––––––----–––+ + | OF | + | Remote Controller | + +––––––––+––----–––––––+ + | + | + +––––––––+–––––––––+ + | OF | + | Local Agent | + +––––––––––––––––––+ + | | + | Rocker Driver | + +––––––––––––––––––+ + + +––––––––––––––––––+ + | | + | Rocker Switch | + +––––––––––––––––––+ + +To participate in flow functions, ports must be configure for OF-DPA mode +during switch initialization. + +OF-DPA Flow Table Interface +--------------------------- + +There are commands to add, modify, delete, and get stats of flow table entries. +The commands are issued using the DMA CMD descriptor ring. The following +commands are defined:: + + CMD_ADD: add an entry to flow table + CMD_MOD: modify an entry in flow table + CMD_DEL: delete an entry from flow table + CMD_GET_STATS: get stats for flow entry + +TLVs for add and modify commands are:: + + field width description + ---------------------------------------------------- + OF_DPA_CMD 2 CMD_[ADD|MOD] + OF_DPA_TBL 2 Flow table ID + 0: ingress port + 10: vlan + 20: termination mac + 30: unicast routing + 40: multicast routing + 50: bridging + 60: ACL policy + OF_DPA_PRIORITY 4 Flow priority + OF_DPA_HARDTIME 4 Hard timeout for flow + OF_DPA_IDLETIME 4 Idle timeout for flow + OF_DPA_COOKIE 8 Cookie + +Additional TLVs based on flow table ID: + +Table ID 0: ingress port:: + + field width description + ---------------------------------------------------- + OF_DPA_IN_PPORT 4 ingress physical port number + OF_DPA_GOTO_TBL 2 goto table ID; zero to drop + +Table ID 10: vlan:: + + field width description + ---------------------------------------------------- + OF_DPA_IN_PPORT 4 ingress physical port number + OF_DPA_VLAN_ID 2 (N) vlan ID + OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask + OF_DPA_GOTO_TBL 2 goto table ID; zero to drop + OF_DPA_NEW_VLAN_ID 2 (N) new vlan ID + +Table ID 20: termination mac:: + + field width description + ---------------------------------------------------- + OF_DPA_IN_PPORT 4 ingress physical port number + OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask + OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd + OF_DPA_DST_MAC 6 (N) destination MAC + OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask + OF_DPA_VLAN_ID 2 (N) vlan ID + OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask + OF_DPA_GOTO_TBL 2 only acceptable values are + unicast or multicast routing + table IDs + OF_DPA_OUT_PPORT 2 if specified, must be + controller, set zero otherwise + +Table ID 30: unicast routing:: + + field width description + ---------------------------------------------------- + OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd + OF_DPA_DST_IP 4 (N) destination IPv4 address. + Must be unicast address + OF_DPA_DST_IP_MASK 4 (N) IP mask. Must be prefix mask + OF_DPA_DST_IPV6 16 (N) destination IPv6 address. + Must be unicast address + OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask. Must be prefix mask + OF_DPA_GOTO_TBL 2 goto table ID; zero to drop + OF_DPA_GROUP_ID 4 data for GROUP action must + be an L3 Unicast group entry + +Table ID 40: multicast routing:: + + field width description + ---------------------------------------------------- + OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd + OF_DPA_VLAN_ID 2 (N) vlan ID + OF_DPA_SRC_IP 4 (N) source IPv4. Optional, + can contain IPv4 address, + must be completely masked + if not used + OF_DPA_SRC_IP_MASK 4 (N) IP Mask + OF_DPA_DST_IP 4 (N) destination IPv4 address. + Must be multicast address + OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional. + Can contain IPv6 address, + must be completely masked + if not used + OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask. + OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must + be multicast address + Must be multicast address + OF_DPA_GOTO_TBL 2 goto table ID; zero to drop + OF_DPA_GROUP_ID 4 data for GROUP action must + be an L3 multicast group entry + +Table ID 50: bridging:: + + field width description + ---------------------------------------------------- + OF_DPA_VLAN_ID 2 (N) vlan ID + OF_DPA_TUNNEL_ID 4 tunnel ID + OF_DPA_DST_MAC 6 (N) destination MAC + OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask + OF_DPA_GOTO_TBL 2 goto table ID; zero to drop + OF_DPA_GROUP_ID 4 data for GROUP action must + be a L2 Interface, L2 + Multicast, L2 Flood, + or L2 Overlay group entry + as appropriate + OF_DPA_TUNNEL_LPORT 4 unicast Tenant Bridging + flows specify a tunnel + logical port ID + OF_DPA_OUT_PPORT 2 data for OUTPUT action, + restricted to CONTROLLER, + set to 0 otherwise + +Table ID 60: acl policy:: + + field width description + ---------------------------------------------------- + OF_DPA_IN_PPORT 4 ingress physical port number + OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask + OF_DPA_ETHERTYPE 2 (N) ethertype + OF_DPA_VLAN_ID 2 (N) vlan ID + OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask + OF_DPA_VLAN_PCP 2 (N) vlan Priority Code Point + OF_DPA_VLAN_PCP_MASK 2 (N) vlan Priority Code Point mask + OF_DPA_SRC_MAC 6 (N) source MAC + OF_DPA_SRC_MAC_MASK 6 (N) source MAC mask + OF_DPA_DST_MAC 6 (N) destination MAC + OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask + OF_DPA_TUNNEL_ID 4 tunnel ID + OF_DPA_SRC_IP 4 (N) source IPv4. Optional, + can contain IPv4 address, + must be completely masked + if not used + OF_DPA_SRC_IP_MASK 4 (N) IP Mask + OF_DPA_DST_IP 4 (N) destination IPv4 address. + Must be multicast address + OF_DPA_DST_IP_MASK 4 (N) IP Mask + OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional. + Can contain IPv6 address, + must be completely masked + if not used + OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask + OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must + be multicast address. + OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask + OF_DPA_SRC_ARP_IP 4 (N) source IPv4 address in the ARP + payload. Only used if ethertype + == 0x0806. + OF_DPA_SRC_ARP_IP_MASK 4 (N) IP Mask + OF_DPA_IP_PROTO 1 IP protocol + OF_DPA_IP_PROTO_MASK 1 IP protocol mask + OF_DPA_IP_DSCP 1 DSCP + OF_DPA_IP_DSCP_MASK 1 DSCP mask + OF_DPA_IP_ECN 1 ECN + OF_DPA_IP_ECN_MASK 1 ECN mask + OF_DPA_L4_SRC_PORT 2 (N) L4 source port, only for + TCP, UDP, or SCTP + OF_DPA_L4_SRC_PORT_MASK 2 (N) L4 source port mask + OF_DPA_L4_DST_PORT 2 (N) L4 source port, only for + TCP, UDP, or SCTP + OF_DPA_L4_DST_PORT_MASK 2 (N) L4 source port mask + OF_DPA_ICMP_TYPE 1 ICMP type, only if IP + protocol is 1 + OF_DPA_ICMP_TYPE_MASK 1 ICMP type mask + OF_DPA_ICMP_CODE 1 ICMP code + OF_DPA_ICMP_CODE_MASK 1 ICMP code mask + OF_DPA_IPV6_LABEL 4 (N) IPv6 flow label + OF_DPA_IPV6_LABEL_MASK 4 (N) IPv6 flow label mask + OF_DPA_GROUP_ID 4 data for GROUP action + OF_DPA_QUEUE_ID_ACTION 1 write the queue ID + OF_DPA_NEW_QUEUE_ID 1 queue ID + OF_DPA_VLAN_PCP_ACTION 1 write the VLAN priority + OF_DPA_NEW_VLAN_PCP 1 VLAN priority + OF_DPA_IP_DSCP_ACTION 1 write the DSCP + OF_DPA_NEW_IP_DSCP 1 new DSCP + OF_DPA_TUNNEL_LPORT 4 restrct to valid tunnel + logical port, set to 0 + otherwise. + OF_DPA_OUT_PPORT 2 data for OUTPUT action, + restricted to CONTROLLER, + set to 0 otherwise + OF_DPA_CLEAR_ACTIONS 4 if 1 packets matching flow are + dropped (all other instructions + ignored) + +TLVs for flow delete and get stats command are:: + + field width description + --------------------------------------------------- + OF_DPA_CMD 2 CMD_[DEL|GET_STATS] + OF_DPA_COOKIE 8 Cookie + +On completion of get stats command, the descriptor buffer is written back with +the following TLVs:: + + field width description + --------------------------------------------------- + OF_DPA_STAT_DURATION 4 Flow duration + OF_DPA_STAT_RX_PKTS 8 Received packets + OF_DPA_STAT_TX_PKTS 8 Transmit packets + +Possible status return codes in descriptor on completion are:: + + DESC_COMP_ERR command reason + -------------------------------------------------------------------- + 0 all OK + -ROCKER_EFAULT all head or tail index outside + of ring + -ROCKER_ENXIO all address or data read err on + desc buf + -ROCKER_EMSGSIZE GET_STATS cmd descriptor buffer wasn't + big enough to contain write-back + TLVs + -ROCKER_EINVAL all invalid parameters passed in + -ROCKER_EEXIST ADD entry already exists + -ROCKER_ENOSPC ADD no space left in flow table + -ROCKER_ENOENT MOD|DEL|GET_STATS cookie invalid + +Group Table Interface +--------------------- + +There are commands to add, modify, delete, and get stats of group table +entries. The commands are issued using the DMA CMD descriptor ring. The +following commands are defined:: + + CMD_ADD: add an entry to group table + CMD_MOD: modify an entry in group table + CMD_DEL: delete an entry from group table + CMD_GET_STATS: get stats for group entry + +TLVs for add and modify commands are:: + + field width description + ----------------------------------------------------------- + FLOW_GROUP_CMD 2 CMD_[ADD|MOD] + FLOW_GROUP_ID 2 Flow group ID + FLOW_GROUP_TYPE 1 Group type: + 0: L2 interface + 1: L2 rewrite + 2: L3 unicast + 3: L2 multicast + 4: L2 flood + 5: L3 interface + 6: L3 multicast + 7: L3 ECMP + 8: L2 overlay + FLOW_VLAN_ID 2 Vlan ID (types 0, 3, 4, 6) + FLOW_L2_PORT 2 Port (types 0) + FLOW_INDEX 4 Index (all types but 0) + FLOW_OVERLAY_TYPE 1 Overlay sub-type (type 8): + 0: Flood unicast tunnel + 1: Flood multicast tunnel + 2: Multicast unicast tunnel + 3: Multicast multicast tunnel + FLOW_GROUP_ACTION nest + FLOW_GROUP_ID 2 next group ID in chain (all + types except 0) + FLOW_OUT_PORT 4 egress port (types 0, 8) + FLOW_POP_VLAN_TAG 1 strip outer VLAN tag (type 1 + only) + FLOW_VLAN_ID 2 (types 1, 5) + FLOW_SRC_MAC 6 (types 1, 2, 5) + FLOW_DST_MAC 6 (types 1, 2) + +TLVs for flow delete and get stats command are:: + + field width description + ----------------------------------------------------------- + FLOW_GROUP_CMD 2 CMD_[DEL|GET_STATS] + FLOW_GROUP_ID 2 Flow group ID + +On completion of get stats command, the descriptor buffer is written back with +the following TLVs:: + + field width description + --------------------------------------------------- + FLOW_GROUP_ID 2 Flow group ID + FLOW_STAT_DURATION 4 Flow duration + FLOW_STAT_REF_COUNT 4 Flow reference count + FLOW_STAT_BUCKET_COUNT 4 Flow bucket count + +Possible status return codes in descriptor on completion are:: + + DESC_COMP_ERR command reason + -------------------------------------------------------------------- + 0 all OK + -ROCKER_EFAULT all head or tail index outside + of ring + -ROCKER_ENXIO all address or data read err on + desc buf + -ROCKER_ENOSPC GET_STATS cmd descriptor buffer wasn't + big enough to contain write-back + TLVs + -ROCKER_EINVAL ADD|MOD invalid parameters passed in + -ROCKER_EEXIST ADD entry already exists + -ROCKER_ENOSPC ADD no space left in flow table + -ROCKER_ENOENT MOD|DEL|GET_STATS group ID invalid + -ROCKER_EBUSY DEL group reference count non-zero + -ROCKER_ENODEV ADD next group ID doesn't exist + + + +References +========== + +[1] OpenFlow Data Plane Abstraction (OF-DPA) Abstract Switch Specification, +Version 1.0, from Broadcom Corporation, February 21, 2014. diff --git a/docs/specs/rocker.txt b/docs/specs/rocker.txt deleted file mode 100644 index 1857b317030..00000000000 --- a/docs/specs/rocker.txt +++ /dev/null @@ -1,1014 +0,0 @@ -Rocker Network Switch Register Programming Guide -Copyright (c) Scott Feldman -Copyright (c) Neil Horman -Version 0.11, 12/29/2014 - -LICENSE -======= - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -SECTION 1: Introduction -======================= - -Overview --------- - -This document describes the hardware/software interface for the Rocker switch -device. The intended audience is authors of OS drivers and device emulation -software. - -Notations and Conventions -------------------------- - -o In register descriptions, [n:m] indicates a range from bit n to bit m, -inclusive. -o Use of leading 0x indicates a hexadecimal number. -o Use of leading 0b indicates a binary number. -o The use of RSVD or Reserved indicates that a bit or field is reserved for -future use. -o Field width is in bytes, unless otherwise noted. -o Register are (R) read-only, (R/W) read/write, (W) write-only, or (COR) clear -on read -o TLV values in network-byte-order are designated with (N). - - -SECTION 2: PCI Configuration Registers -====================================== - -PCI Configuration Space ------------------------ - -Each switch instance registers as a PCI device with PCI configuration space: - - offset width description value - --------------------------------------------- - 0x0 2 Vendor ID 0x1b36 - 0x2 2 Device ID 0x0006 - 0x4 4 Command/Status - 0x8 1 Revision ID 0x01 - 0x9 3 Class code 0x2800 - 0xC 1 Cache line size - 0xD 1 Latency timer - 0xE 1 Header type - 0xF 1 Built-in self test - 0x10 4 Base address low - 0x14 4 Base address high - 0x18-28 Reserved - 0x2C 2 Subsystem vendor ID * - 0x2E 2 Subsystem ID * - 0x30-38 Reserved - 0x3C 1 Interrupt line - 0x3D 1 Interrupt pin 0x00 - 0x3E 1 Min grant 0x00 - 0x3D 1 Max latency 0x00 - 0x40 1 TRDY timeout - 0x41 1 Retry count - 0x42 2 Reserved - - -* Assigned by sub-system implementation - -SECTION 3: Memory-Mapped Register Space -======================================= - -There are two memory-mapped BARs. BAR0 maps device register space and is -0x2000 in size. BAR1 maps MSI-X vector and PBA tables and is also 0x2000 in -size, allowing for 256 MSI-X vectors. - -All registers are 4 or 8 bytes long. It is assumed host software will access 4 -byte registers with one 4-byte access, and 8 byte registers with either two -4-byte accesses or a single 8-byte access. In the case of two 4-byte accesses, -access must be lower and then upper 4-bytes, in that order. - -BAR0 device register space is organized as follows: - - offset description - ------------------------------------------------------ - 0x0000-0x000f Bogus registers to catch misbehaving - drivers. Writes do nothing. Reads - back as 0xDEADBABE. - 0x0010-0x00ff Test registers - 0x0300-0x03ff General purpose registers - 0x1000-0x1fff Descriptor control - -Holes in register space are reserved. Writes to reserved registers do nothing. -Reads to reserved registers read back as 0. - -No fancy stuff like write-combining is enabled on any of the registers. - -BAR1 MSI-X register space is organized as follows: - - offset description - ------------------------------------------------------ - 0x0000-0x0fff MSI-X vector table (256 vectors total) - 0x1000-0x1fff MSI-X PBA table - - -SECTION 4: Interrupts, DMA, and Endianness -========================================== - -PCI Interrupts --------------- - -The device supports only MSI-X interrupts. BAR1 memory-mapped region contains -the MSI-X vector and PBA tables, with support for up to 256 MSI-X vectors. - -The vector assignment is: - - vector description - ----------------------------------------------------- - 0 Command descriptor ring completion - 1 Event descriptor ring completion - 2 Test operation completion - 3 RSVD - 4-255 Tx and Rx descriptor ring completion - Tx vector is even - Rx vector is odd - -A MSI-X vector table entry is 16 bytes: - - field offset width description - ------------------------------------------------------------- - lower_addr 0x0 4 [31:2] message address[31:2] - [1:0] Rsvd (4 byte alignment - required) - upper_addr 0x4 4 [31:19] Rsvd - [14:0] message address[46:32] - data 0x8 4 message data[31:0] - control 0xc 4 [31:1] Rsvd - [0] mask (0 = enable, - 1 = masked) - -Software should install the Interrupt Service Routine (ISR) before any ports -are enabled or any commands are issued on the command ring. - -DMA Operations --------------- - -DMA operations are used for packet DMA to/from the CPU, command and event -processing. Command processing includes statistical counters and table dumps, -table insertion/deletion, and more. Event processing provides an async -notification method for device-originating events. Each DMA operation has a -set of control registers to manage a descriptor ring. The descriptor rings are -allocated from contiguous host DMA-able memory and registers specify the rings -base address, size and current head and tail indices. Software always writes -the head, and hardware always writes the tail. - -The higher-order bit of DMA_DESC_COMP_ERR is used to mark hardware completion -of a descriptor. Software will clear this bit when posting a descriptor to the -ring, and hardware will set this bit when the descriptor is complete. - -Descriptor ring sizes must be a power of 2 and range from 2 to 64K entries. -Descriptor rings' base address must be 8-byte aligned. Descriptors must be -packed within ring. Each descriptor in each ring must also be aligned on an 8 -byte boundary. Each descriptor ring will have these registers: - - DMA_DESC_xxx_BASE_ADDR, offset 0x1000 + (x * 32), 64-bit, (R/W) - DMA_DESC_xxx_SIZE, offset 0x1008 + (x * 32), 32-bit, (R/W) - DMA_DESC_xxx_HEAD, offset 0x100c + (x * 32), 32-bit, (R/W) - DMA_DESC_xxx_TAIL, offset 0x1010 + (x * 32), 32-bit, (R) - DMA_DESC_xxx_CTRL, offset 0x1014 + (x * 32), 32-bit, (W) - DMA_DESC_xxx_CREDITS, offset 0x1018 + (x * 32), 32-bit, (R/W) - DMA_DESC_xxx_RSVD1, offset 0x101c + (x * 32), 32-bit, (R/W) - -Where x is descriptor ring index: - - index ring - -------------------- - 0 CMD - 1 EVENT - 2 TX (port 0) - 3 RX (port 0) - 4 TX (port 1) - 5 RX (port 1) - . - . - . - 124 TX (port 61) - 125 RX (port 61) - 126 Resv - 127 Resv - -Writing BASE_ADDR or SIZE will reset HEAD and TAIL to zero. HEAD cannot be -written past TAIL. To do so would wrap the ring. An empty ring is when HEAD -== TAIL. A full ring is when HEAD is one position behind TAIL. Both HEAD and -TAIL increment and modulo wrap at the ring size. - -CTRL register bits: - - bit name description - ------------------------------------------------------------------------ - [0] CTRL_RESET Reset the descriptor ring - [1:31] Reserved - -All descriptor types share some common fields: - - field width description - ------------------------------------------------------------------- - DMA_DESC_BUF_ADDR 8 Phys addr of desc payload, 8-byte - aligned - DMA_DESC_COOKIE 8 Desc cookie for completion matching, - upper-most bit is reserved - DMA_DESC_BUF_SIZE 2 Desc payload size in bytes - DMA_DESC_TLV_SIZE 2 Desc payload total size in bytes - used for TLVs. Must be <= - DMA_DESC_BUF_SIZE. - DMA_DESC_COMP_ERR 2 Completion status of associated - desc payload. High order bit is - clear on new descs, toggled by - hw for completed items. - -To support forward- and backward-compatibility, descriptor and completion -payloads are specified in TLV format. Fields are packed with Type=field name, -Length=field length, and Value=field value. Software will ignore unknown fields -filled in by the switch. Likewise, the switch will ignore unknown fields -filled in by software. - -Descriptor payload buffer is 8-byte aligned and TLVs are 8-byte aligned. The -value within a TLV is also 8-byte aligned. The (packed, 8 byte) TLV header is: - - field width description - ----------------------------- - type 4 TLV type - len 2 TLV value length - pad 2 Reserved - -The alignment requirements for descriptors and TLVs are to avoid unaligned -access exceptions in software. Note that the payload for each TLV is also -8 byte aligned. - -Figure 1 shows an example descriptor buffer with two TLVs. - - <------- 8 bytes -------> - - 8-byte +––––+ +–––––––––––+–––––+–––––+ +–+ - align | type | len | pad | TLV#1 hdr | - +–––––––––––+–––––+–––––+ (len=22) | - | | | - | value | TVL#1 value | - | | (padded to 8-byte | - | +–––––+ alignment) | - | |/////| | - 8-byte +––––+ +–––––––––––+–––––––––––+ | - align | type | len | pad | TLV#2 hdr DESC_BUF_SIZE - +–––––+–––––+–––––+–––––+ (len=2) | - |value|/////////////////| TLV#2 value | - +–––––+/////////////////| | - |///////////////////////| | - |///////////////////////| | - |///////////////////////| | - |////////unused/////////| | - |////////space//////////| | - |///////////////////////| | - |///////////////////////| | - |///////////////////////| | - +–––––––––––––––––––––––+ +–+ - - fig. 1 - -TLVs can be nested within the NEST TLV type. - -Interrupt credits -^^^^^^^^^^^^^^^^^ - -MSI-X vectors used for descriptor ring completions use a credit mechanism for -efficient device, PCIe bus, OS and driver operations. Each descriptor ring has -a credit count which represents the number of outstanding descriptors to be -processed by the driver. As the device marks descriptors complete, the credit -count is incremented. As the driver processes those outstanding descriptors, -it returns credits back to the device. This way, the device knows the driver's -progress and can make decisions about when to fire the next interrupt or not. -When the credit count is zero, and the first descriptors are posted for the -driver, a single interrupt is fired. Once the interrupt is fired, the -interrupt is disabled (auto-masked*). In response to the interrupt, the driver -will process descriptors and PIO write a returned credit value for that -descriptor ring. If the driver returns all credits (the driver caught up with -the device and there is no outstanding work), then the interrupt is unmasked, -but not fired. If only partial credits are returned, the interrupt remains -masked but the device generates an interrupt, signaling the driver that more -outstanding work is available. - -(* this masking is unrelated to the MSI-X interrupt mask register) - -Endianness ----------- - -Device registers are hard-coded to little-endian (LE). The driver should -convert to/from host endianness to LE for device register accesses. - -Descriptors are LE. Descriptor buffer TLVs will have LE type and length -fields, but the value field can either be LE or network-byte-order, depending -on context. TLV values containing network packet data will be in network-byte -order. A TLV value containing a field or mask used to compare against network -packet data is network-byte order. For example, flow match fields (and masks) -are network-byte-order since they're matched directly, byte-by-byte, against -network packet data. All non-network-packet TLV multi-byte values will be LE. - -TLV values in network-byte-order are designated with (N). - - -SECTION 5: Test Registers -========================= - -Rocker has several test registers to support troubleshooting register access, -interrupt generation, and DMA operations: - - TEST_REG, offset 0x0010, 32-bit (R/W) - TEST_REG64, offset 0x0018, 64-bit (R/W) - TEST_IRQ, offset 0x0020, 32-bit (R/W) - TEST_DMA_ADDR, offset 0x0028, 64-bit (R/W) - TEST_DMA_SIZE, offset 0x0030, 32-bit (R/W) - TEST_DMA_CTRL, offset 0x0034, 32-bit (R/W) - -Reads to TEST_REG and TEST_REG64 will read a value equal to twice the last -value written to the register. The 32-bit and 64-bit versions are for testing -32-bit and 64-bit host accesses. - -A vector can be written to TEST_IRQ and the device will generate an interrupt -for that vector. - -To test basic DMA operations, allocate a DMA-able host buffer and put the -buffer address into TEST_DMA_ADDR and size into TEST_DMA_SIZE. Then, write to -TEST_DMA_CTRL to manipulate the buffer contents. TEST_DMA_CTRL operations are: - - operation value description - ----------------------------------------------------------- - TEST_DMA_CTRL_CLEAR 1 clear buffer - TEST_DMA_CTRL_FILL 2 fill buffer bytes with 0x96 - TEST_DMA_CTRL_INVERT 4 invert bytes in buffer - -Various buffer address and sizes should be tested to verify no address boundary -issue exists. In particular, buffers that start on odd-8-byte boundary and/or -span multiple PAGE sizes should be tested. - - -SECTION 6: Ports -================ - -Physical and Logical Ports ------------------------------------- - -The switch supports up to 62 physical (front-panel) ports. Register -PORT_PHYS_COUNT returns the actual number of physical ports available: - - PORT_PHYS_COUNT, offset 0x0304, 32-bit, (R) - -In addition to front-panel ports, the switch supports logical ports for -tunnels. - -Front-panel ports and logical tunnel ports are mapped into a single 32-bit port -space. A special CPU port is assigned port 0. The front-panel ports are -mapped to ports 1-62. A special loopback port is assigned port 63. Logical -tunnel ports are assigned ports 0x0001000-0x0001ffff. -To summarize the port assignments: - - port mapping - ------------------------------------------------------- - 0 CPU port (for packets to/from host CPU) - 1-62 front-panel physical ports - 63 loopback port - 64-0x0000ffff RSVD - 0x00010000-0x0001ffff logical tunnel ports - 0x00020000-0xffffffff RSVD - -Physical Port Mode ------------------- - -Switch front-panel ports operate in a mode. Currently, the only mode is -OF-DPA. OF-DPA[1] mode is based on OpenFlow Data Plane Abstraction (OF-DPA) -Abstract Switch Specification, Version 1.0, from Broadcom Corporation. To -set/get the mode for front-panel ports, see port settings, below. - -Port Settings -------------- - -Link status for all front-panel ports is available via PORT_PHYS_LINK_STATUS: - - PORT_PHYS_LINK_STATUS, offset 0x0310, 64-bit, (R) - - Value is port bitmap. Bits 0 and 63 always read 0. Bits 1-62 - read 1 for link UP and 0 for link DOWN for respective front-panel ports. - -Other properties for front-panel ports are available via DMA CMD descriptors: - - Get PORT_SETTINGS descriptor: - - field width description - ---------------------------------------------- - PORT_SETTINGS 2 CMD_GET - PPORT 4 Physical port # - - Get PORT_SETTINGS completion: - - field width description - ---------------------------------------------- - PPORT 4 Physical port # - SPEED 4 Current port interface speed, in Mbps - DUPLEX 1 1 = Full, 0 = Half - AUTONEG 1 1 = enabled, 0 = disabled - MACADDR 6 Port MAC address - MODE 1 0 = OF-DPA - LEARNING 1 MAC address learning on port - 1 = enabled - 0 = disabled - PHYS_NAME Physical port name (string) - - Set PORT_SETTINGS descriptor: - - field width description - ---------------------------------------------- - PORT_SETTINGS 2 CMD_SET - PPORT 4 Physical port # - SPEED 4 Port interface speed, in Mbps - DUPLEX 1 1 = Full, 0 = Half - AUTONEG 1 1 = enabled, 0 = disabled - MACADDR 6 Port MAC address - MODE 1 0 = OF-DPA - -Port Enable ------------ - -Front-panel ports are initially disabled, which means port ingress and egress -packets will be dropped. To enable or disable a port, use PORT_PHYS_ENABLE: - - PORT_PHYS_ENABLE: offset 0x0318, 64-bit, (R/W) - - Value is bitmap of first 64 ports. Bits 0 and 63 are ignored - and always read as 0. Write 1 to enable port; write 0 to disable it. - Default is 0. - - -SECTION 7: Switch Control -========================= - -This section covers switch-wide register settings. - -Control -------- - -This register is used for low level control of the switch. - - CONTROL: offset 0x0300, 32-bit, (W) - - bit name description - ------------------------------------------------------------------------ - [0] CONTROL_RESET If set, device will perform reset - [1:31] Reserved - -Switch ID ---------- - -The switch has a SWITCH_ID to be used by software to uniquely identify the -switch: - - SWITCH_ID: offset 0x0320, 64-bit, (R) - - Value is opaque to switch software and no special encoding is implied. - - -SECTION 8: Events -================= - -Non-I/O asynchronous events from the device are notified to the host using the -event ring. The TLV structure for events is: - - field width description - --------------------------------------------------- - TYPE 4 Event type, one of: - 1: LINK_CHANGED - 2: MAC_VLAN_SEEN - INFO Event info (details below) - -Link Changed Event ------------------- - -When link status changes on a physical port, this event is generated. - - field width description - --------------------------------------------------- - INFO - PPORT 4 Physical port - LINKUP 1 Link status: - 0: down - 1: up - -MAC VLAN Seen Event -------------------- - -When a packet ingresses on a port and the source MAC/VLAN isn't known to the -device, the device will generate this event. In response to the event, the -driver should install to the device the MAC/VLAN on the port into the bridge -table. Once installed, the MAC/VLAN is known on the port and this event will -no longer be generated. - - field width description - --------------------------------------------------- - INFO - PPORT 4 Physical port - MAC 6 MAC address - VLAN 2 VLAN ID - - -SECTION 9: CPU Packet Processing -================================ - -Ingress packets directed to the host CPU for further processing are delivered -in the DMA RX ring. Likewise, host CPU originating packets destined to egress -on switch ports are scheduled by software using the DMA TX ring. - -Tx Packet Processing --------------------- - -Software schedules packets for egress on switch ports using the DMA TX ring. A -TX descriptor buffer describes the packet location and size in host DMA-able -memory, the destination port, and any hardware-offload functions (such as L3 -payload checksum offload). Software then bumps the descriptor head to signal -hardware of new Tx work. In response, hardware will DMA read Tx descriptors up -to head, DMA read descriptor buffer and packet data, perform offloading -functions, and finally frame packet on wire (network). Once packet processing -is complete, hardware will writeback status to descriptor(s) to signal to -software that Tx is complete and software resources (e.g. skb) backing packet -can be released. - -Figure 2 shows an example 3-fragment packet queued with one Tx descriptor. A -TLV is used for each packet fragment. - - pkt frag 1 - +–––––––+ +–+ - +–––+ | | - desc buf | | | | - +––––––––+ | | | | - Tx ring +–––+ +–––––+ | | | - +–––––––––+ | | TLVs | +–––––––+ | - | +–––+ +––––––––+ pkt frag 2 | - | desc 0 | | +–––––+ +–––––––+ | - +–––––––––+ | TLVs | +–––+ | | - head+–+ | +––––––––+ | | | - | desc 1 | | +–––––+ +–––––––+ |pkt - +–––––––––+ | TLVs | | | - | | +––––––––+ | pkt frag 3 | - | | | +–––––––+ | - +–––––––––+ +–––+ | | - | | | | | - | | | | | - +–––––––––+ | | | - | | | | | - | | | | | - +–––––––––+ | | | - | | +–––––––+ +–+ - | | - +–––––––––+ - - fig 2. - -The TLVs for Tx descriptor buffer are: - - field width description - --------------------------------------------------------------------- - PPORT 4 Destination physical port # - TX_OFFLOAD 1 Hardware offload modes: - 0: no offload - 1: insert IP csum (ipv4 only) - 2: insert TCP/UDP csum - 3: L3 csum calc and insert - into csum offset (TX_L3_CSUM_OFF) - 16-bit 1's complement csum value. - IPv4 pseudo-header and IP - already calculated by OS - and inserted. - 4: TSO (TCP Segmentation Offload) - TX_L3_CSUM_OFF 2 For L3 csum offload mode, the offset, - from the beginning of the packet, - of the csum field in the L3 header - TX_TSO_MSS 2 For TSO offload mode, the - Maximum Segment Size in bytes - TX_TSO_HDR_LEN 2 For TSO offload mode, the - length of ethernet, IP, and - TCP/UDP headers, including IP - and TCP options. - TX_FRAGS Packet fragments - TX_FRAG Packet fragment - TX_FRAG_ADDR 8 DMA address of packet fragment - TX_FRAG_LEN 2 Packet fragment length - -Possible status return codes in descriptor on completion are: - - DESC_COMP_ERR reason - -------------------------------------------------------------------- - 0 OK - -ROCKER_ENXIO address or data read err on desc buf or packet - fragment - -ROCKER_EINVAL bad pport or TSO or csum offloading error - -ROCKER_ENOMEM no memory for internal staging tx fragment - -Rx Packet Processing --------------------- - -For packets ingressing on switch ports that are not forwarded by the switch but -rather directed to the host CPU for further processing are delivered in the DMA -RX ring. Rx descriptor buffers are allocated by software and placed on the -ring. Hardware will fill Rx descriptor buffers with packet data, write the -completion, and signal to software that a new packet is ready. Since Rx packet -size is not known a-priori, the Rx descriptor buffer must be allocated for -worst-case packet size. A single Rx descriptor will contain the entire Rx -packet data in one RX_FRAG. Other Rx TLVs describe and hardware offloads -performed on the packet, such as checksum validation. - -The TLVs for Rx descriptor buffer are: - - field width description - --------------------------------------------------- - PPORT 4 Source physical port # - RX_FLAGS 2 Packet parsing flags: - (1 << 0): IPv4 packet - (1 << 1): IPv6 packet - (1 << 2): csum calculated - (1 << 3): IPv4 csum good - (1 << 4): IP fragment - (1 << 5): TCP packet - (1 << 6): UDP packet - (1 << 7): TCP/UDP csum good - (1 << 8): Offload forward - RX_CSUM 2 IP calculated checksum: - IPv4: IP payload csum - IPv6: header and payload csum - (Only valid is RX_FLAGS:csum calc is set) - RX_FRAG_ADDR 8 DMA address of packet fragment - RX_FRAG_MAX_LEN 2 Packet maximum fragment length - RX_FRAG_LEN 2 Actual packet fragment length after receive - -Offload forward RX_FLAG indicates the device has already forwarded the packet -so the host CPU should not also forward the packet. - -Possible status return codes in descriptor on completion are: - - DESC_COMP_ERR reason - -------------------------------------------------------------------- - 0 OK - -ROCKER_ENXIO address or data read err on desc buf - -ROCKER_ENOMEM no memory for internal staging desc buf - -ROCKER_EMSGSIZE Rx descriptor buffer wasn't big enough to contain - packet data TLV and other TLVs. - - -SECTION 10: OF-DPA Mode -====================== - -OF-DPA mode allows the switch to offload flow packet processing functions to -hardware. An OpenFlow controller would communicate with an OpenFlow agent -installed on the switch. The OpenFlow agent would (directly or indirectly) -communicate with the Rocker switch driver, which in turn would program switch -hardware with flow functionality, as defined in OF-DPA. The block diagram is: - - +–––––––––––––––----–––+ - | OF | - | Remote Controller | - +––––––––+––----–––––––+ - | - | - +––––––––+–––––––––+ - | OF | - | Local Agent | - +––––––––––––––––––+ - | | - | Rocker Driver | - +––––––––––––––––––+ - - +––––––––––––––––––+ - | | - | Rocker Switch | - +––––––––––––––––––+ - -To participate in flow functions, ports must be configure for OF-DPA mode -during switch initialization. - -OF-DPA Flow Table Interface ---------------------------- - -There are commands to add, modify, delete, and get stats of flow table entries. -The commands are issued using the DMA CMD descriptor ring. The following -commands are defined: - - CMD_ADD: add an entry to flow table - CMD_MOD: modify an entry in flow table - CMD_DEL: delete an entry from flow table - CMD_GET_STATS: get stats for flow entry - -TLVs for add and modify commands are: - - field width description - ---------------------------------------------------- - OF_DPA_CMD 2 CMD_[ADD|MOD] - OF_DPA_TBL 2 Flow table ID - 0: ingress port - 10: vlan - 20: termination mac - 30: unicast routing - 40: multicast routing - 50: bridging - 60: ACL policy - OF_DPA_PRIORITY 4 Flow priority - OF_DPA_HARDTIME 4 Hard timeout for flow - OF_DPA_IDLETIME 4 Idle timeout for flow - OF_DPA_COOKIE 8 Cookie - -Additional TLVs based on flow table ID: - -Table ID 0: ingress port - - field width description - ---------------------------------------------------- - OF_DPA_IN_PPORT 4 ingress physical port number - OF_DPA_GOTO_TBL 2 goto table ID; zero to drop - -Table ID 10: vlan - - field width description - ---------------------------------------------------- - OF_DPA_IN_PPORT 4 ingress physical port number - OF_DPA_VLAN_ID 2 (N) vlan ID - OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask - OF_DPA_GOTO_TBL 2 goto table ID; zero to drop - OF_DPA_NEW_VLAN_ID 2 (N) new vlan ID - -Table ID 20: termination mac - - field width description - ---------------------------------------------------- - OF_DPA_IN_PPORT 4 ingress physical port number - OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask - OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd - OF_DPA_DST_MAC 6 (N) destination MAC - OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask - OF_DPA_VLAN_ID 2 (N) vlan ID - OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask - OF_DPA_GOTO_TBL 2 only acceptable values are - unicast or multicast routing - table IDs - OF_DPA_OUT_PPORT 2 if specified, must be - controller, set zero otherwise - -Table ID 30: unicast routing - - field width description - ---------------------------------------------------- - OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd - OF_DPA_DST_IP 4 (N) destination IPv4 address. - Must be unicast address - OF_DPA_DST_IP_MASK 4 (N) IP mask. Must be prefix mask - OF_DPA_DST_IPV6 16 (N) destination IPv6 address. - Must be unicast address - OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask. Must be prefix mask - OF_DPA_GOTO_TBL 2 goto table ID; zero to drop - OF_DPA_GROUP_ID 4 data for GROUP action must - be an L3 Unicast group entry - -Table ID 40: multicast routing - - field width description - ---------------------------------------------------- - OF_DPA_ETHERTYPE 2 (N) must be either 0x0800 or 0x86dd - OF_DPA_VLAN_ID 2 (N) vlan ID - OF_DPA_SRC_IP 4 (N) source IPv4. Optional, - can contain IPv4 address, - must be completely masked - if not used - OF_DPA_SRC_IP_MASK 4 (N) IP Mask - OF_DPA_DST_IP 4 (N) destination IPv4 address. - Must be multicast address - OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional. - Can contain IPv6 address, - must be completely masked - if not used - OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask. - OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must - be multicast address - Must be multicast address - OF_DPA_GOTO_TBL 2 goto table ID; zero to drop - OF_DPA_GROUP_ID 4 data for GROUP action must - be an L3 multicast group entry - -Table ID 50: bridging - - field width description - ---------------------------------------------------- - OF_DPA_VLAN_ID 2 (N) vlan ID - OF_DPA_TUNNEL_ID 4 tunnel ID - OF_DPA_DST_MAC 6 (N) destination MAC - OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask - OF_DPA_GOTO_TBL 2 goto table ID; zero to drop - OF_DPA_GROUP_ID 4 data for GROUP action must - be a L2 Interface, L2 - Multicast, L2 Flood, - or L2 Overlay group entry - as appropriate - OF_DPA_TUNNEL_LPORT 4 unicast Tenant Bridging - flows specify a tunnel - logical port ID - OF_DPA_OUT_PPORT 2 data for OUTPUT action, - restricted to CONTROLLER, - set to 0 otherwise - -Table ID 60: acl policy - - field width description - ---------------------------------------------------- - OF_DPA_IN_PPORT 4 ingress physical port number - OF_DPA_IN_PPORT_MASK 4 ingress physical port number mask - OF_DPA_ETHERTYPE 2 (N) ethertype - OF_DPA_VLAN_ID 2 (N) vlan ID - OF_DPA_VLAN_ID_MASK 2 (N) vlan ID mask - OF_DPA_VLAN_PCP 2 (N) vlan Priority Code Point - OF_DPA_VLAN_PCP_MASK 2 (N) vlan Priority Code Point mask - OF_DPA_SRC_MAC 6 (N) source MAC - OF_DPA_SRC_MAC_MASK 6 (N) source MAC mask - OF_DPA_DST_MAC 6 (N) destination MAC - OF_DPA_DST_MAC_MASK 6 (N) destination MAC mask - OF_DPA_TUNNEL_ID 4 tunnel ID - OF_DPA_SRC_IP 4 (N) source IPv4. Optional, - can contain IPv4 address, - must be completely masked - if not used - OF_DPA_SRC_IP_MASK 4 (N) IP Mask - OF_DPA_DST_IP 4 (N) destination IPv4 address. - Must be multicast address - OF_DPA_DST_IP_MASK 4 (N) IP Mask - OF_DPA_SRC_IPV6 16 (N) source IPv6 Address. Optional. - Can contain IPv6 address, - must be completely masked - if not used - OF_DPA_SRC_IPV6_MASK 16 (N) IPv6 mask - OF_DPA_DST_IPV6 16 (N) destination IPv6 Address. Must - be multicast address. - OF_DPA_DST_IPV6_MASK 16 (N) IPv6 mask - OF_DPA_SRC_ARP_IP 4 (N) source IPv4 address in the ARP - payload. Only used if ethertype - == 0x0806. - OF_DPA_SRC_ARP_IP_MASK 4 (N) IP Mask - OF_DPA_IP_PROTO 1 IP protocol - OF_DPA_IP_PROTO_MASK 1 IP protocol mask - OF_DPA_IP_DSCP 1 DSCP - OF_DPA_IP_DSCP_MASK 1 DSCP mask - OF_DPA_IP_ECN 1 ECN - OF_DPA_IP_ECN_MASK 1 ECN mask - OF_DPA_L4_SRC_PORT 2 (N) L4 source port, only for - TCP, UDP, or SCTP - OF_DPA_L4_SRC_PORT_MASK 2 (N) L4 source port mask - OF_DPA_L4_DST_PORT 2 (N) L4 source port, only for - TCP, UDP, or SCTP - OF_DPA_L4_DST_PORT_MASK 2 (N) L4 source port mask - OF_DPA_ICMP_TYPE 1 ICMP type, only if IP - protocol is 1 - OF_DPA_ICMP_TYPE_MASK 1 ICMP type mask - OF_DPA_ICMP_CODE 1 ICMP code - OF_DPA_ICMP_CODE_MASK 1 ICMP code mask - OF_DPA_IPV6_LABEL 4 (N) IPv6 flow label - OF_DPA_IPV6_LABEL_MASK 4 (N) IPv6 flow label mask - OF_DPA_GROUP_ID 4 data for GROUP action - OF_DPA_QUEUE_ID_ACTION 1 write the queue ID - OF_DPA_NEW_QUEUE_ID 1 queue ID - OF_DPA_VLAN_PCP_ACTION 1 write the VLAN priority - OF_DPA_NEW_VLAN_PCP 1 VLAN priority - OF_DPA_IP_DSCP_ACTION 1 write the DSCP - OF_DPA_NEW_IP_DSCP 1 new DSCP - OF_DPA_TUNNEL_LPORT 4 restrct to valid tunnel - logical port, set to 0 - otherwise. - OF_DPA_OUT_PPORT 2 data for OUTPUT action, - restricted to CONTROLLER, - set to 0 otherwise - OF_DPA_CLEAR_ACTIONS 4 if 1 packets matching flow are - dropped (all other instructions - ignored) - -TLVs for flow delete and get stats command are: - - field width description - --------------------------------------------------- - OF_DPA_CMD 2 CMD_[DEL|GET_STATS] - OF_DPA_COOKIE 8 Cookie - -On completion of get stats command, the descriptor buffer is written back with -the following TLVs: - - field width description - --------------------------------------------------- - OF_DPA_STAT_DURATION 4 Flow duration - OF_DPA_STAT_RX_PKTS 8 Received packets - OF_DPA_STAT_TX_PKTS 8 Transmit packets - -Possible status return codes in descriptor on completion are: - - DESC_COMP_ERR command reason - -------------------------------------------------------------------- - 0 all OK - -ROCKER_EFAULT all head or tail index outside - of ring - -ROCKER_ENXIO all address or data read err on - desc buf - -ROCKER_EMSGSIZE GET_STATS cmd descriptor buffer wasn't - big enough to contain write-back - TLVs - -ROCKER_EINVAL all invalid parameters passed in - -ROCKER_EEXIST ADD entry already exists - -ROCKER_ENOSPC ADD no space left in flow table - -ROCKER_ENOENT MOD|DEL|GET_STATS cookie invalid - -Group Table Interface ---------------------- - -There are commands to add, modify, delete, and get stats of group table -entries. The commands are issued using the DMA CMD descriptor ring. The -following commands are defined: - - CMD_ADD: add an entry to group table - CMD_MOD: modify an entry in group table - CMD_DEL: delete an entry from group table - CMD_GET_STATS: get stats for group entry - -TLVs for add and modify commands are: - - field width description - ----------------------------------------------------------- - FLOW_GROUP_CMD 2 CMD_[ADD|MOD] - FLOW_GROUP_ID 2 Flow group ID - FLOW_GROUP_TYPE 1 Group type: - 0: L2 interface - 1: L2 rewrite - 2: L3 unicast - 3: L2 multicast - 4: L2 flood - 5: L3 interface - 6: L3 multicast - 7: L3 ECMP - 8: L2 overlay - FLOW_VLAN_ID 2 Vlan ID (types 0, 3, 4, 6) - FLOW_L2_PORT 2 Port (types 0) - FLOW_INDEX 4 Index (all types but 0) - FLOW_OVERLAY_TYPE 1 Overlay sub-type (type 8): - 0: Flood unicast tunnel - 1: Flood multicast tunnel - 2: Multicast unicast tunnel - 3: Multicast multicast tunnel - FLOW_GROUP_ACTION nest - FLOW_GROUP_ID 2 next group ID in chain (all - types except 0) - FLOW_OUT_PORT 4 egress port (types 0, 8) - FLOW_POP_VLAN_TAG 1 strip outer VLAN tag (type 1 - only) - FLOW_VLAN_ID 2 (types 1, 5) - FLOW_SRC_MAC 6 (types 1, 2, 5) - FLOW_DST_MAC 6 (types 1, 2) - -TLVs for flow delete and get stats command are: - - field width description - ----------------------------------------------------------- - FLOW_GROUP_CMD 2 CMD_[DEL|GET_STATS] - FLOW_GROUP_ID 2 Flow group ID - -On completion of get stats command, the descriptor buffer is written back with -the following TLVs: - - field width description - --------------------------------------------------- - FLOW_GROUP_ID 2 Flow group ID - FLOW_STAT_DURATION 4 Flow duration - FLOW_STAT_REF_COUNT 4 Flow reference count - FLOW_STAT_BUCKET_COUNT 4 Flow bucket count - -Possible status return codes in descriptor on completion are: - - DESC_COMP_ERR command reason - -------------------------------------------------------------------- - 0 all OK - -ROCKER_EFAULT all head or tail index outside - of ring - -ROCKER_ENXIO all address or data read err on - desc buf - -ROCKER_ENOSPC GET_STATS cmd descriptor buffer wasn't - big enough to contain write-back - TLVs - -ROCKER_EINVAL ADD|MOD invalid parameters passed in - -ROCKER_EEXIST ADD entry already exists - -ROCKER_ENOSPC ADD no space left in flow table - -ROCKER_ENOENT MOD|DEL|GET_STATS group ID invalid - -ROCKER_EBUSY DEL group reference count non-zero - -ROCKER_ENODEV ADD next group ID doesn't exist - - - -References -========== - -[1] OpenFlow Data Plane Abstraction (OF-DPA) Abstract Switch Specification, -Version 1.0, from Broadcom Corporation, February 21, 2014. diff --git a/docs/specs/tpm.rst b/docs/specs/tpm.rst index 1ad36ad7099..b630a351b4f 100644 --- a/docs/specs/tpm.rst +++ b/docs/specs/tpm.rst @@ -205,8 +205,8 @@ to be used with the passthrough backend or the swtpm backend. QEMU files related to TPM backends: - ``backends/tpm.c`` - - ``include/sysemu/tpm.h`` - - ``include/sysemu/tpm_backend.h`` + - ``include/system/tpm.h`` + - ``include/system/tpm_backend.h`` The QEMU TPM passthrough device ------------------------------- @@ -240,7 +240,7 @@ PCRs. QEMU files related to the TPM passthrough device: - ``backends/tpm/tpm_passthrough.c`` - ``backends/tpm/tpm_util.c`` - - ``include/sysemu/tpm_util.h`` + - ``include/system/tpm_util.h`` Command line to start QEMU with the TPM passthrough device using the host's @@ -301,7 +301,7 @@ command. QEMU files related to the TPM emulator device: - ``backends/tpm/tpm_emulator.c`` - ``backends/tpm/tpm_util.c`` - - ``include/sysemu/tpm_util.h`` + - ``include/system/tpm_util.h`` The following commands start the swtpm with a UnixIO control channel over a socket interface. They do not need to be run as root. diff --git a/docs/sphinx-static/theme_overrides.css b/docs/sphinx-static/theme_overrides.css index 965ecac54fd..b225bf706f5 100644 --- a/docs/sphinx-static/theme_overrides.css +++ b/docs/sphinx-static/theme_overrides.css @@ -18,8 +18,8 @@ h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend { .rst-content dl:not(.docutils) dt { border-top: none; - border-left: solid 3px #ccc; - background-color: #f0f0f0; + border-left: solid 5px #bcc6d2; + background-color: #eaedf1; color: black; } @@ -208,3 +208,97 @@ div[class^="highlight"] pre { color: inherit; } } + +/* QAPI domain theming */ + +/* most content in a QAPI object definition should not eclipse about + 80ch, but nested field lists are explicitly exempt due to their + two-column nature */ +.qapi dd *:not(dl) { + max-width: 80ch; +} + +/* but the content column itself should still be less than ~80ch. */ +.qapi .field-list dd { + max-width: 80ch; +} + +.qapi-infopips { + margin-bottom: 1em; +} + +.qapi-infopip { + display: inline-block; + padding: 0em 0.5em 0em 0.5em; + margin: 0.25em; +} + +.qapi-deprecated,.qapi-unstable { + background-color: #fffef5; + border: solid #fff176 6px; + font-weight: bold; + padding: 8px; + border-radius: 15px; + margin: 5px; +} + +.qapi-unstable::before { + content: '🚧 '; +} + +.qapi-deprecated::before { + content: '⚠️ '; +} + +.qapi-ifcond::before { + /* gaze ye into the crystal ball to determine feature availability */ + content: '🔮 '; +} + +.qapi-ifcond { + background-color: #f9f5ff; + border: solid #dac2ff 6px; + padding: 8px; + border-radius: 15px; + margin: 5px; +} + +/* code blocks */ +.qapi div[class^="highlight"] { + width: fit-content; + background-color: #fffafd; + border: 2px solid #ffe1f3; +} + +/* note, warning, etc. */ +.qapi .admonition { + width: fit-content; +} + +/* pad the top of the field-list so the text doesn't start directly at + the top border; primarily for the field list labels, but adjust the + field bodies as well for parity. */ +dl.field-list > dt:first-of-type, dl.field-list > dd:first-of-type { + padding-top: 0.3em; +} + +dl.field-list > dt:last-of-type, dl.field-list > dd:last-of-type { + padding-bottom: 0.3em; +} + +/* pad the field list labels so they don't crash into the border */ +dl.field-list > dt { + padding-left: 0.5em; + padding-right: 0.5em; +} + +/* Add a little padding between field list sections */ +dl.field-list > dd:not(:last-child) { + padding-bottom: 1em; +} + +/* Sphinx 3.x: unresolved xrefs */ +.rst-content *:not(a) > code.xref { + font-weight: 400; + color: #333333; +} diff --git a/docs/sphinx/compat.py b/docs/sphinx/compat.py new file mode 100644 index 00000000000..9cf7fe006e4 --- /dev/null +++ b/docs/sphinx/compat.py @@ -0,0 +1,230 @@ +""" +Sphinx cross-version compatibility goop +""" + +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Optional, + Type, +) + +from docutils import nodes +from docutils.nodes import Element, Node, Text +from docutils.statemachine import StringList + +import sphinx +from sphinx import addnodes, util +from sphinx.directives import ObjectDescription +from sphinx.environment import BuildEnvironment +from sphinx.roles import XRefRole +from sphinx.util import docfields +from sphinx.util.docutils import ( + ReferenceRole, + SphinxDirective, + switch_source_input, +) +from sphinx.util.typing import TextlikeNode + + +MAKE_XREF_WORKAROUND = sphinx.version_info[:3] < (4, 1, 0) + + +SpaceNode: Callable[[str], Node] +KeywordNode: Callable[[str, str], Node] + +if sphinx.version_info[:3] >= (4, 0, 0): + SpaceNode = addnodes.desc_sig_space + KeywordNode = addnodes.desc_sig_keyword +else: + SpaceNode = Text + KeywordNode = addnodes.desc_annotation + + +def nested_parse_with_titles( + directive: SphinxDirective, content_node: Element +) -> None: + """ + This helper preserves error parsing context across sphinx versions. + """ + + # necessary so that the child nodes get the right source/line set + content_node.document = directive.state.document + + try: + # Modern sphinx (6.2.0+) supports proper offsetting for + # nested parse error context management + util.nodes.nested_parse_with_titles( + directive.state, + directive.content, + content_node, + content_offset=directive.content_offset, + ) + except TypeError: + # No content_offset argument. Fall back to SSI method. + with switch_source_input(directive.state, directive.content): + util.nodes.nested_parse_with_titles( + directive.state, directive.content, content_node + ) + + +# ########################################### +# xref compatibility hacks for Sphinx < 4.1 # +# ########################################### + +# When we require >= Sphinx 4.1, the following function and the +# subsequent 3 compatibility classes can be removed. Anywhere in +# qapi_domain that uses one of these Compat* types can be switched to +# using the garden-variety lib-provided classes with no trickery. + + +def _compat_make_xref( # pylint: disable=unused-argument + self: sphinx.util.docfields.Field, + rolename: str, + domain: str, + target: str, + innernode: Type[TextlikeNode] = addnodes.literal_emphasis, + contnode: Optional[Node] = None, + env: Optional[BuildEnvironment] = None, + inliner: Any = None, + location: Any = None, +) -> Node: + """ + Compatibility workaround for Sphinx versions prior to 4.1.0. + + Older sphinx versions do not use the domain's XRefRole for parsing + and formatting cross-references, so we need to perform this magick + ourselves to avoid needing to write the parser/formatter in two + separate places. + + This workaround isn't brick-for-brick compatible with modern Sphinx + versions, because we do not have access to the parent directive's + state during this parsing like we do in more modern versions. + + It's no worse than what pre-Sphinx 4.1.0 does, so... oh well! + """ + + # Yes, this function is gross. Pre-4.1 support is a miracle. + # pylint: disable=too-many-locals + + assert env + # Note: Sphinx's own code ignores the type warning here, too. + if not rolename: + return contnode or innernode(target, target) # type: ignore[call-arg] + + # Get the role instance, but don't *execute it* - we lack the + # correct state to do so. Instead, we'll just use its public + # methods to do our reference formatting, and emulate the rest. + role = env.get_domain(domain).roles[rolename] + assert isinstance(role, XRefRole) + + # XRefRole features not supported by this compatibility shim; + # these were not supported in Sphinx 3.x either, so nothing of + # value is really lost. + assert not target.startswith("!") + assert not re.match(ReferenceRole.explicit_title_re, target) + assert not role.lowercase + assert not role.fix_parens + + # Code below based mostly on sphinx.roles.XRefRole; run() and + # create_xref_node() + options = { + "refdoc": env.docname, + "refdomain": domain, + "reftype": rolename, + "refexplicit": False, + "refwarn": role.warn_dangling, + } + refnode = role.nodeclass(target, **options) + title, target = role.process_link(env, refnode, False, target, target) + refnode["reftarget"] = target + classes = ["xref", domain, f"{domain}-{rolename}"] + refnode += role.innernodeclass(target, title, classes=classes) + + # This is the very gross part of the hack. Normally, + # result_nodes takes a document object to which we would pass + # self.inliner.document. Prior to Sphinx 4.1, we don't *have* an + # inliner to pass, so we have nothing to pass here. However, the + # actual implementation of role.result_nodes in this case + # doesn't actually use that argument, so this winds up being + # ... fine. Rest easy at night knowing this code only runs under + # old versions of Sphinx, so at least it won't change in the + # future on us and lead to surprising new failures. + # Gross, I know. + result_nodes, _messages = role.result_nodes( + None, # type: ignore + env, + refnode, + is_ref=True, + ) + return nodes.inline(target, "", *result_nodes) + + +class CompatField(docfields.Field): + if MAKE_XREF_WORKAROUND: + make_xref = _compat_make_xref + + +class CompatGroupedField(docfields.GroupedField): + if MAKE_XREF_WORKAROUND: + make_xref = _compat_make_xref + + +class CompatTypedField(docfields.TypedField): + if MAKE_XREF_WORKAROUND: + make_xref = _compat_make_xref + + +# ################################################################ +# Nested parsing error location fix for Sphinx 5.3.0 < x < 6.2.0 # +# ################################################################ + +# When we require Sphinx 4.x, the TYPE_CHECKING hack where we avoid +# subscripting ObjectDescription at runtime can be removed in favor of +# just always subscripting the class. + +# When we require Sphinx > 6.2.0, the rest of this compatibility hack +# can be dropped and QAPIObject can just inherit directly from +# ObjectDescription[Signature]. + +SOURCE_LOCATION_FIX = (5, 3, 0) <= sphinx.version_info[:3] < (6, 2, 0) + +Signature = str + + +if TYPE_CHECKING: + _BaseClass = ObjectDescription[Signature] +else: + _BaseClass = ObjectDescription + + +class ParserFix(_BaseClass): + + _temp_content: StringList + _temp_offset: int + _temp_node: Optional[addnodes.desc_content] + + def before_content(self) -> None: + # Work around a sphinx bug and parse the content ourselves. + self._temp_content = self.content + self._temp_offset = self.content_offset + self._temp_node = None + + if SOURCE_LOCATION_FIX: + self._temp_node = addnodes.desc_content() + self.state.nested_parse( + self.content, self.content_offset, self._temp_node + ) + # Sphinx will try to parse the content block itself, + # Give it nothingness to parse instead. + self.content = StringList() + self.content_offset = 0 + + def transform_content(self, content_node: addnodes.desc_content) -> None: + # Sphinx workaround: Inject our parsed content and restore state. + if self._temp_node: + content_node += self._temp_node.children + self.content = self._temp_content + self.content_offset = self._temp_offset diff --git a/docs/sphinx/depfile.py b/docs/sphinx/depfile.py index e74be6af98b..d3c774d28b1 100644 --- a/docs/sphinx/depfile.py +++ b/docs/sphinx/depfile.py @@ -31,6 +31,9 @@ def get_infiles(env): for path in Path(static_path).rglob('*'): yield str(path) + # also include kdoc script + yield str(env.config.kerneldoc_bin[1]) + def write_depfile(app, exception): if exception: diff --git a/docs/sphinx/qapi_domain.py b/docs/sphinx/qapi_domain.py new file mode 100644 index 00000000000..f561dc465f8 --- /dev/null +++ b/docs/sphinx/qapi_domain.py @@ -0,0 +1,1063 @@ +""" +QAPI domain extension. +""" + +# The best laid plans of mice and men, ... +# pylint: disable=too-many-lines + +from __future__ import annotations + +import re +import types +from typing import ( + TYPE_CHECKING, + List, + NamedTuple, + Tuple, + Type, + cast, +) + +from docutils import nodes +from docutils.parsers.rst import directives +from sphinx import addnodes +from sphinx.directives import ObjectDescription +from sphinx.domains import ( + Domain, + Index, + IndexEntry, + ObjType, +) +from sphinx.locale import _, __ +from sphinx.roles import XRefRole +from sphinx.util import logging +from sphinx.util.docutils import SphinxDirective +from sphinx.util.nodes import make_id, make_refnode + +from compat import ( + CompatField, + CompatGroupedField, + CompatTypedField, + KeywordNode, + ParserFix, + Signature, + SpaceNode, +) + + +if TYPE_CHECKING: + from typing import ( + AbstractSet, + Any, + Dict, + Iterable, + Optional, + Union, + ) + + from docutils.nodes import Element, Node + from sphinx.addnodes import desc_signature, pending_xref + from sphinx.application import Sphinx + from sphinx.builders import Builder + from sphinx.environment import BuildEnvironment + from sphinx.util.typing import OptionSpec + + +logger = logging.getLogger(__name__) + + +def _unpack_field( + field: nodes.Node, +) -> Tuple[nodes.field_name, nodes.field_body]: + """ + docutils helper: unpack a field node in a type-safe manner. + """ + assert isinstance(field, nodes.field) + assert len(field.children) == 2 + assert isinstance(field.children[0], nodes.field_name) + assert isinstance(field.children[1], nodes.field_body) + return (field.children[0], field.children[1]) + + +class ObjectEntry(NamedTuple): + docname: str + node_id: str + objtype: str + aliased: bool + + +class QAPIXRefRole(XRefRole): + + def process_link( + self, + env: BuildEnvironment, + refnode: Element, + has_explicit_title: bool, + title: str, + target: str, + ) -> tuple[str, str]: + refnode["qapi:namespace"] = env.ref_context.get("qapi:namespace") + refnode["qapi:module"] = env.ref_context.get("qapi:module") + + # Cross-references that begin with a tilde adjust the title to + # only show the reference without a leading module, even if one + # was provided. This is a Sphinx-standard syntax; give it + # priority over QAPI-specific type markup below. + hide_module = False + if target.startswith("~"): + hide_module = True + target = target[1:] + + # Type names that end with "?" are considered optional + # arguments and should be documented as such, but it's not + # part of the xref itself. + if target.endswith("?"): + refnode["qapi:optional"] = True + target = target[:-1] + + # Type names wrapped in brackets denote lists. strip the + # brackets and remember to add them back later. + if target.startswith("[") and target.endswith("]"): + refnode["qapi:array"] = True + target = target[1:-1] + + if has_explicit_title: + # Don't mess with the title at all if it was explicitly set. + # Explicit title syntax for references is e.g. + # :qapi:type:`target ` + # and this explicit title overrides everything else here. + return title, target + + title = target + if hide_module: + title = target.split(".")[-1] + + return title, target + + def result_nodes( + self, + document: nodes.document, + env: BuildEnvironment, + node: Element, + is_ref: bool, + ) -> Tuple[List[nodes.Node], List[nodes.system_message]]: + + # node here is the pending_xref node (or whatever nodeclass was + # configured at XRefRole class instantiation time). + results: List[nodes.Node] = [node] + + if node.get("qapi:array"): + results.insert(0, nodes.literal("[", "[")) + results.append(nodes.literal("]", "]")) + + if node.get("qapi:optional"): + results.append(nodes.Text(", ")) + results.append(nodes.emphasis("?", "optional")) + + return results, [] + + +class QAPIDescription(ParserFix): + """ + Generic QAPI description. + + This is meant to be an abstract class, not instantiated + directly. This class handles the abstract details of indexing, the + TOC, and reference targets for QAPI descriptions. + """ + + def handle_signature(self, sig: str, signode: desc_signature) -> Signature: + # pylint: disable=unused-argument + + # Do nothing. The return value here is the "name" of the entity + # being documented; for QAPI, this is the same as the + # "signature", which is just a name. + + # Normally this method must also populate signode with nodes to + # render the signature; here we do nothing instead - the + # subclasses will handle this. + return sig + + def get_index_text(self, name: Signature) -> Tuple[str, str]: + """Return the text for the index entry of the object.""" + + # NB: this is used for the global index, not the QAPI index. + return ("single", f"{name} (QMP {self.objtype})") + + def _get_context(self) -> Tuple[str, str]: + namespace = self.options.get( + "namespace", self.env.ref_context.get("qapi:namespace", "") + ) + modname = self.options.get( + "module", self.env.ref_context.get("qapi:module", "") + ) + + return namespace, modname + + def _get_fqn(self, name: Signature) -> str: + namespace, modname = self._get_context() + + # If we're documenting a module, don't include the module as + # part of the FQN; we ARE the module! + if self.objtype == "module": + modname = "" + + if modname: + name = f"{modname}.{name}" + if namespace: + name = f"{namespace}:{name}" + return name + + def add_target_and_index( + self, name: Signature, sig: str, signode: desc_signature + ) -> None: + # pylint: disable=unused-argument + + # name is the return value of handle_signature. + # sig is the original, raw text argument to handle_signature. + # For QAPI, these are identical, currently. + + assert self.objtype + + if not (fullname := signode.get("fullname", "")): + fullname = self._get_fqn(name) + + node_id = make_id( + self.env, self.state.document, self.objtype, fullname + ) + signode["ids"].append(node_id) + + self.state.document.note_explicit_target(signode) + domain = cast(QAPIDomain, self.env.get_domain("qapi")) + domain.note_object(fullname, self.objtype, node_id, location=signode) + + if "no-index-entry" not in self.options: + arity, indextext = self.get_index_text(name) + assert self.indexnode is not None + if indextext: + self.indexnode["entries"].append( + (arity, indextext, node_id, "", None) + ) + + @staticmethod + def split_fqn(name: str) -> Tuple[str, str, str]: + if ":" in name: + ns, name = name.split(":") + else: + ns = "" + + if "." in name: + module, name = name.split(".") + else: + module = "" + + return (ns, module, name) + + def _object_hierarchy_parts( + self, sig_node: desc_signature + ) -> Tuple[str, ...]: + if "fullname" not in sig_node: + return () + return self.split_fqn(sig_node["fullname"]) + + def _toc_entry_name(self, sig_node: desc_signature) -> str: + # This controls the name in the TOC and on the sidebar. + + # This is the return type of _object_hierarchy_parts(). + toc_parts = cast(Tuple[str, ...], sig_node.get("_toc_parts", ())) + if not toc_parts: + return "" + + config = self.env.app.config + namespace, modname, name = toc_parts + + if config.toc_object_entries_show_parents == "domain": + ret = name + if modname and modname != self.env.ref_context.get( + "qapi:module", "" + ): + ret = f"{modname}.{name}" + if namespace and namespace != self.env.ref_context.get( + "qapi:namespace", "" + ): + ret = f"{namespace}:{ret}" + return ret + if config.toc_object_entries_show_parents == "hide": + return name + if config.toc_object_entries_show_parents == "all": + return sig_node.get("fullname", name) + return "" + + +class QAPIObject(QAPIDescription): + """ + Description of a generic QAPI object. + + It's not used directly, but is instead subclassed by specific directives. + """ + + # Inherit some standard options from Sphinx's ObjectDescription + option_spec: OptionSpec = ( # type:ignore[misc] + ObjectDescription.option_spec.copy() + ) + option_spec.update( + { + # Context overrides: + "namespace": directives.unchanged, + "module": directives.unchanged, + # These are QAPI originals: + "since": directives.unchanged, + "ifcond": directives.unchanged, + "deprecated": directives.flag, + "unstable": directives.flag, + } + ) + + doc_field_types = [ + # :feat name: descr + CompatGroupedField( + "feature", + label=_("Features"), + names=("feat",), + can_collapse=False, + ), + ] + + def get_signature_prefix(self) -> List[nodes.Node]: + """Return a prefix to put before the object name in the signature.""" + assert self.objtype + return [ + KeywordNode("", self.objtype.title()), + SpaceNode(" "), + ] + + def get_signature_suffix(self) -> List[nodes.Node]: + """Return a suffix to put after the object name in the signature.""" + ret: List[nodes.Node] = [] + + if "since" in self.options: + ret += [ + SpaceNode(" "), + addnodes.desc_sig_element( + "", f"(Since: {self.options['since']})" + ), + ] + + return ret + + def handle_signature(self, sig: str, signode: desc_signature) -> Signature: + """ + Transform a QAPI definition name into RST nodes. + + This method was originally intended for handling function + signatures. In the QAPI domain, however, we only pass the + definition name as the directive argument and handle everything + else in the content body with field lists. + + As such, the only argument here is "sig", which is just the QAPI + definition name. + """ + # No module or domain info allowed in the signature! + assert ":" not in sig + assert "." not in sig + + namespace, modname = self._get_context() + signode["fullname"] = self._get_fqn(sig) + signode["namespace"] = namespace + signode["module"] = modname + + sig_prefix = self.get_signature_prefix() + if sig_prefix: + signode += addnodes.desc_annotation( + str(sig_prefix), "", *sig_prefix + ) + signode += addnodes.desc_name(sig, sig) + signode += self.get_signature_suffix() + + return sig + + def _add_infopips(self, contentnode: addnodes.desc_content) -> None: + # Add various eye-catches and things that go below the signature + # bar, but precede the user-defined content. + infopips = nodes.container() + infopips.attributes["classes"].append("qapi-infopips") + + def _add_pip( + source: str, content: Union[str, List[nodes.Node]], classname: str + ) -> None: + node = nodes.container(source) + if isinstance(content, str): + node.append(nodes.Text(content)) + else: + node.extend(content) + node.attributes["classes"].extend(["qapi-infopip", classname]) + infopips.append(node) + + if "deprecated" in self.options: + _add_pip( + ":deprecated:", + f"This {self.objtype} is deprecated.", + "qapi-deprecated", + ) + + if "unstable" in self.options: + _add_pip( + ":unstable:", + f"This {self.objtype} is unstable/experimental.", + "qapi-unstable", + ) + + if self.options.get("ifcond", ""): + ifcond = self.options["ifcond"] + _add_pip( + f":ifcond: {ifcond}", + [ + nodes.emphasis("", "Availability"), + nodes.Text(": "), + nodes.literal(ifcond, ifcond), + ], + "qapi-ifcond", + ) + + if infopips.children: + contentnode.insert(0, infopips) + + def _validate_field(self, field: nodes.field) -> None: + """Validate field lists in this QAPI Object Description.""" + name, _ = _unpack_field(field) + allowed_fields = set(self.env.app.config.qapi_allowed_fields) + + field_label = name.astext() + if field_label in allowed_fields: + # Explicitly allowed field list name, OK. + return + + try: + # split into field type and argument (if provided) + # e.g. `:arg type name: descr` is + # field_type = "arg", field_arg = "type name". + field_type, field_arg = field_label.split(None, 1) + except ValueError: + # No arguments provided + field_type = field_label + field_arg = "" + + typemap = self.get_field_type_map() + if field_type in typemap: + # This is a special docfield, yet-to-be-processed. Catch + # correct names, but incorrect arguments. This mismatch WILL + # cause Sphinx to render this field incorrectly (without a + # warning), which is never what we want. + typedesc = typemap[field_type][0] + if typedesc.has_arg != bool(field_arg): + msg = f"docfield field list type {field_type!r} " + if typedesc.has_arg: + msg += "requires an argument." + else: + msg += "takes no arguments." + logger.warning(msg, location=field) + else: + # This is unrecognized entirely. It's valid rST to use + # arbitrary fields, but let's ensure the documentation + # writer has done this intentionally. + valid = ", ".join(sorted(set(typemap) | allowed_fields)) + msg = ( + f"Unrecognized field list name {field_label!r}.\n" + f"Valid fields for qapi:{self.objtype} are: {valid}\n" + "\n" + "If this usage is intentional, please add it to " + "'qapi_allowed_fields' in docs/conf.py." + ) + logger.warning(msg, location=field) + + def transform_content(self, content_node: addnodes.desc_content) -> None: + # This hook runs after before_content and the nested parse, but + # before the DocFieldTransformer is executed. + super().transform_content(content_node) + + self._add_infopips(content_node) + + # Validate field lists. + for child in content_node: + if isinstance(child, nodes.field_list): + for field in child.children: + assert isinstance(field, nodes.field) + self._validate_field(field) + + +class SpecialTypedField(CompatTypedField): + def make_field(self, *args: Any, **kwargs: Any) -> nodes.field: + ret = super().make_field(*args, **kwargs) + + # Look for the characteristic " -- " text node that Sphinx + # inserts for each TypedField entry ... + for node in ret.traverse(lambda n: str(n) == " -- "): + par = node.parent + if par.children[0].astext() != "q_dummy": + continue + + # If the first node's text is q_dummy, this is a dummy + # field we want to strip down to just its contents. + del par.children[:-1] + + return ret + + +class QAPICommand(QAPIObject): + """Description of a QAPI Command.""" + + doc_field_types = QAPIObject.doc_field_types.copy() + doc_field_types.extend( + [ + # :arg TypeName ArgName: descr + SpecialTypedField( + "argument", + label=_("Arguments"), + names=("arg",), + typerolename="type", + can_collapse=False, + ), + # :error: descr + CompatField( + "error", + label=_("Errors"), + names=("error", "errors"), + has_arg=False, + ), + # :return TypeName: descr + CompatGroupedField( + "returnvalue", + label=_("Return"), + rolename="type", + names=("return",), + can_collapse=True, + ), + # :return-nodesc: TypeName + CompatField( + "returnvalue", + label=_("Return"), + names=("return-nodesc",), + bodyrolename="type", + has_arg=False, + ), + ] + ) + + +class QAPIEnum(QAPIObject): + """Description of a QAPI Enum.""" + + doc_field_types = QAPIObject.doc_field_types.copy() + doc_field_types.extend( + [ + # :value name: descr + CompatGroupedField( + "value", + label=_("Values"), + names=("value",), + can_collapse=False, + ) + ] + ) + + +class QAPIAlternate(QAPIObject): + """Description of a QAPI Alternate.""" + + doc_field_types = QAPIObject.doc_field_types.copy() + doc_field_types.extend( + [ + # :alt type name: descr + CompatTypedField( + "alternative", + label=_("Alternatives"), + names=("alt",), + typerolename="type", + can_collapse=False, + ), + ] + ) + + +class QAPIObjectWithMembers(QAPIObject): + """Base class for Events/Structs/Unions""" + + doc_field_types = QAPIObject.doc_field_types.copy() + doc_field_types.extend( + [ + # :member type name: descr + SpecialTypedField( + "member", + label=_("Members"), + names=("memb",), + typerolename="type", + can_collapse=False, + ), + ] + ) + + +class QAPIEvent(QAPIObjectWithMembers): + # pylint: disable=too-many-ancestors + """Description of a QAPI Event.""" + + +class QAPIJSONObject(QAPIObjectWithMembers): + # pylint: disable=too-many-ancestors + """Description of a QAPI Object: structs and unions.""" + + +class QAPIModule(QAPIDescription): + """ + Directive to mark description of a new module. + + This directive doesn't generate any special formatting, and is just + a pass-through for the content body. Named section titles are + allowed in the content body. + + Use this directive to create entries for the QAPI module in the + global index and the QAPI index; as well as to associate subsequent + definitions with the module they are defined in for purposes of + search and QAPI index organization. + + :arg: The name of the module. + :opt no-index: Don't add cross-reference targets or index entries. + :opt no-typesetting: Don't render the content body (but preserve any + cross-reference target IDs in the squelched output.) + + Example:: + + .. qapi:module:: block-core + :no-index: + :no-typesetting: + + Lorem ipsum, dolor sit amet ... + """ + + def run(self) -> List[Node]: + modname = self.arguments[0].strip() + self.env.ref_context["qapi:module"] = modname + ret = super().run() + + # ObjectDescription always creates a visible signature bar. We + # want module items to be "invisible", however. + + # Extract the content body of the directive: + assert isinstance(ret[-1], addnodes.desc) + desc_node = ret.pop(-1) + assert isinstance(desc_node.children[1], addnodes.desc_content) + ret.extend(desc_node.children[1].children) + + # Re-home node_ids so anchor refs still work: + node_ids: List[str] + if node_ids := [ + node_id + for el in desc_node.children[0].traverse(nodes.Element) + for node_id in cast(List[str], el.get("ids", ())) + ]: + target_node = nodes.target(ids=node_ids) + ret.insert(1, target_node) + + return ret + + +class QAPINamespace(SphinxDirective): + has_content = False + required_arguments = 1 + + def run(self) -> List[Node]: + namespace = self.arguments[0].strip() + self.env.ref_context["qapi:namespace"] = namespace + + return [] + + +class QAPIIndex(Index): + """ + Index subclass to provide the QAPI definition index. + """ + + # pylint: disable=too-few-public-methods + + name = "index" + localname = _("QAPI Index") + shortname = _("QAPI Index") + namespace = "" + + def generate( + self, + docnames: Optional[Iterable[str]] = None, + ) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]: + assert isinstance(self.domain, QAPIDomain) + content: Dict[str, List[IndexEntry]] = {} + collapse = False + + for objname, obj in self.domain.objects.items(): + if docnames and obj.docname not in docnames: + continue + + ns, _mod, name = QAPIDescription.split_fqn(objname) + + if self.namespace != ns: + continue + + # Add an alphabetical entry: + entries = content.setdefault(name[0].upper(), []) + entries.append( + IndexEntry( + name, 0, obj.docname, obj.node_id, obj.objtype, "", "" + ) + ) + + # Add a categorical entry: + category = obj.objtype.title() + "s" + entries = content.setdefault(category, []) + entries.append( + IndexEntry(name, 0, obj.docname, obj.node_id, "", "", "") + ) + + # Sort entries within each category alphabetically + for category in content: + content[category] = sorted(content[category]) + + # Sort the categories themselves; type names first, ABC entries last. + sorted_content = sorted( + content.items(), + key=lambda x: (len(x[0]) == 1, x[0]), + ) + return sorted_content, collapse + + +class QAPIDomain(Domain): + """QAPI language domain.""" + + name = "qapi" + label = "QAPI" + + # This table associates cross-reference object types (key) with an + # ObjType instance, which defines the valid cross-reference roles + # for each object type. + # + # e.g., the :qapi:type: cross-reference role can refer to enum, + # struct, union, or alternate objects; but :qapi:obj: can refer to + # anything. Each object also gets its own targeted cross-reference role. + object_types: Dict[str, ObjType] = { + "module": ObjType(_("module"), "mod", "any"), + "command": ObjType(_("command"), "cmd", "any"), + "event": ObjType(_("event"), "event", "any"), + "enum": ObjType(_("enum"), "enum", "type", "any"), + "object": ObjType(_("object"), "obj", "type", "any"), + "alternate": ObjType(_("alternate"), "alt", "type", "any"), + } + + # Each of these provides a rST directive, + # e.g. .. qapi:module:: block-core + directives = { + "namespace": QAPINamespace, + "module": QAPIModule, + "command": QAPICommand, + "event": QAPIEvent, + "enum": QAPIEnum, + "object": QAPIJSONObject, + "alternate": QAPIAlternate, + } + + # These are all cross-reference roles; e.g. + # :qapi:cmd:`query-block`. The keys correlate to the names used in + # the object_types table values above. + roles = { + "mod": QAPIXRefRole(), + "cmd": QAPIXRefRole(), + "event": QAPIXRefRole(), + "enum": QAPIXRefRole(), + "obj": QAPIXRefRole(), # specifically structs and unions. + "alt": QAPIXRefRole(), + # reference any data type (excludes modules, commands, events) + "type": QAPIXRefRole(), + "any": QAPIXRefRole(), # reference *any* type of QAPI object. + } + + # Moved into the data property at runtime; + # this is the internal index of reference-able objects. + initial_data: Dict[str, Dict[str, Tuple[Any]]] = { + "objects": {}, # fullname -> ObjectEntry + } + + # Index pages to generate; each entry is an Index class. + indices = [ + QAPIIndex, + ] + + @property + def objects(self) -> Dict[str, ObjectEntry]: + ret = self.data.setdefault("objects", {}) + return ret # type: ignore[no-any-return] + + def setup(self) -> None: + namespaces = set(self.env.app.config.qapi_namespaces) + for namespace in namespaces: + new_index: Type[QAPIIndex] = types.new_class( + f"{namespace}Index", bases=(QAPIIndex,) + ) + new_index.name = f"{namespace.lower()}-index" + new_index.localname = _(f"{namespace} Index") + new_index.shortname = _(f"{namespace} Index") + new_index.namespace = namespace + + self.indices.append(new_index) + + super().setup() + + def note_object( + self, + name: str, + objtype: str, + node_id: str, + aliased: bool = False, + location: Any = None, + ) -> None: + """Note a QAPI object for cross reference.""" + if name in self.objects: + other = self.objects[name] + if other.aliased and aliased is False: + # The original definition found. Override it! + pass + elif other.aliased is False and aliased: + # The original definition is already registered. + return + else: + # duplicated + logger.warning( + __( + "duplicate object description of %s, " + "other instance in %s, use :no-index: for one of them" + ), + name, + other.docname, + location=location, + ) + self.objects[name] = ObjectEntry( + self.env.docname, node_id, objtype, aliased + ) + + def clear_doc(self, docname: str) -> None: + for fullname, obj in list(self.objects.items()): + if obj.docname == docname: + del self.objects[fullname] + + def merge_domaindata( + self, docnames: AbstractSet[str], otherdata: Dict[str, Any] + ) -> None: + for fullname, obj in otherdata["objects"].items(): + if obj.docname in docnames: + # Sphinx's own python domain doesn't appear to bother to + # check for collisions. Assert they don't happen and + # we'll fix it if/when the case arises. + assert fullname not in self.objects, ( + "bug - collision on merge?" + f" {fullname=} {obj=} {self.objects[fullname]=}" + ) + self.objects[fullname] = obj + + def find_obj( + self, namespace: str, modname: str, name: str, typ: Optional[str] + ) -> List[Tuple[str, ObjectEntry]]: + """ + Find a QAPI object for "name", maybe using contextual information. + + Returns a list of (name, object entry) tuples. + + :param namespace: The current namespace context (if any!) under + which we are searching. + :param modname: The current module context (if any!) under + which we are searching. + :param name: The name of the x-ref to resolve; may or may not + include leading context. + :param type: The role name of the x-ref we're resolving, if + provided. This is absent for "any" role lookups. + """ + if not name: + return [] + + # ## + # what to search for + # ## + + parts = list(QAPIDescription.split_fqn(name)) + explicit = tuple(bool(x) for x in parts) + + # Fill in the blanks where possible: + if namespace and not parts[0]: + parts[0] = namespace + if modname and not parts[1]: + parts[1] = modname + + implicit_fqn = "" + if all(parts): + implicit_fqn = f"{parts[0]}:{parts[1]}.{parts[2]}" + + if typ is None: + # :any: lookup, search everything: + objtypes: List[str] = list(self.object_types) + else: + # type is specified and will be a role (e.g. obj, mod, cmd) + # convert this to eligible object types (e.g. command, module) + # using the QAPIDomain.object_types table. + objtypes = self.objtypes_for_role(typ, []) + + # ## + # search! + # ## + + def _search(needle: str) -> List[str]: + if ( + needle + and needle in self.objects + and self.objects[needle].objtype in objtypes + ): + return [needle] + return [] + + if found := _search(name): + # Exact match! + pass + elif found := _search(implicit_fqn): + # Exact match using contextual information to fill in the gaps. + pass + else: + # No exact hits, perform applicable fuzzy searches. + searches = [] + + esc = tuple(re.escape(s) for s in parts) + + # Try searching for ns:*.name or ns:name + if explicit[0] and not explicit[1]: + searches.append(f"^{esc[0]}:([^\\.]+\\.)?{esc[2]}$") + # Try searching for *:module.name or module.name + if explicit[1] and not explicit[0]: + searches.append(f"(^|:){esc[1]}\\.{esc[2]}$") + # Try searching for context-ns:*.name or context-ns:name + if parts[0] and not (explicit[0] or explicit[1]): + searches.append(f"^{esc[0]}:([^\\.]+\\.)?{esc[2]}$") + # Try searching for *:context-mod.name or context-mod.name + if parts[1] and not (explicit[0] or explicit[1]): + searches.append(f"(^|:){esc[1]}\\.{esc[2]}$") + # Try searching for *:name, *.name, or name + if not (explicit[0] or explicit[1]): + searches.append(f"(^|:|\\.){esc[2]}$") + + for search in searches: + if found := [ + oname + for oname in self.objects + if re.search(search, oname) + and self.objects[oname].objtype in objtypes + ]: + break + + matches = [(oname, self.objects[oname]) for oname in found] + if len(matches) > 1: + matches = [m for m in matches if not m[1].aliased] + return matches + + def resolve_xref( + self, + env: BuildEnvironment, + fromdocname: str, + builder: Builder, + typ: str, + target: str, + node: pending_xref, + contnode: Element, + ) -> nodes.reference | None: + namespace = node.get("qapi:namespace") + modname = node.get("qapi:module") + matches = self.find_obj(namespace, modname, target, typ) + + if not matches: + # Normally, we could pass warn_dangling=True to QAPIXRefRole(), + # but that will trigger on references to these built-in types, + # which we'd like to ignore instead. + + # Take care of that warning here instead, so long as the + # reference isn't to one of our built-in core types. + if target not in ( + "string", + "number", + "int", + "boolean", + "null", + "value", + "q_empty", + ): + logger.warning( + __("qapi:%s reference target not found: %r"), + typ, + target, + type="ref", + subtype="qapi", + location=node, + ) + return None + + if len(matches) > 1: + logger.warning( + __("more than one target found for cross-reference %r: %s"), + target, + ", ".join(match[0] for match in matches), + type="ref", + subtype="qapi", + location=node, + ) + + name, obj = matches[0] + return make_refnode( + builder, fromdocname, obj.docname, obj.node_id, contnode, name + ) + + def resolve_any_xref( + self, + env: BuildEnvironment, + fromdocname: str, + builder: Builder, + target: str, + node: pending_xref, + contnode: Element, + ) -> List[Tuple[str, nodes.reference]]: + results: List[Tuple[str, nodes.reference]] = [] + matches = self.find_obj( + node.get("qapi:namespace"), node.get("qapi:module"), target, None + ) + for name, obj in matches: + rolename = self.role_for_objtype(obj.objtype) + assert rolename is not None + role = f"qapi:{rolename}" + refnode = make_refnode( + builder, fromdocname, obj.docname, obj.node_id, contnode, name + ) + results.append((role, refnode)) + return results + + +def setup(app: Sphinx) -> Dict[str, Any]: + app.setup_extension("sphinx.directives") + app.add_config_value( + "qapi_allowed_fields", + set(), + "env", # Setting impacts parsing phase + types=set, + ) + app.add_config_value( + "qapi_namespaces", + set(), + "env", + types=set, + ) + app.add_domain(QAPIDomain) + + return { + "version": "1.0", + "env_version": 1, + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/docs/sphinx/qapidoc.py b/docs/sphinx/qapidoc.py index 738b2450fb1..c2f09bac16c 100644 --- a/docs/sphinx/qapidoc.py +++ b/docs/sphinx/qapidoc.py @@ -2,6 +2,7 @@ # # QEMU qapidoc QAPI file parsing extension # +# Copyright (c) 2024-2025 Red Hat # Copyright (c) 2020 Linaro # # This work is licensed under the terms of the GNU GPLv2 or later. @@ -24,446 +25,411 @@ https://www.sphinx-doc.org/en/master/development/index.html """ +from __future__ import annotations + + +__version__ = "2.0" + +from contextlib import contextmanager import os +from pathlib import Path import re import sys -import textwrap -from typing import List +from typing import TYPE_CHECKING from docutils import nodes -from docutils.parsers.rst import Directive, directives -from docutils.statemachine import ViewList -from qapi.error import QAPIError, QAPISemError -from qapi.gen import QAPISchemaVisitor -from qapi.schema import QAPISchema - +from docutils.parsers.rst import directives +from docutils.statemachine import StringList +from qapi.error import QAPIError +from qapi.parser import QAPIDoc +from qapi.schema import ( + QAPISchema, + QAPISchemaArrayType, + QAPISchemaCommand, + QAPISchemaDefinition, + QAPISchemaEnumMember, + QAPISchemaEvent, + QAPISchemaFeature, + QAPISchemaMember, + QAPISchemaObjectType, + QAPISchemaObjectTypeMember, + QAPISchemaType, + QAPISchemaVisitor, +) +from qapi.source import QAPISourceInfo from sphinx import addnodes from sphinx.directives.code import CodeBlock from sphinx.errors import ExtensionError -from sphinx.util.docutils import switch_source_input +from sphinx.util import logging +from sphinx.util.docutils import SphinxDirective, switch_source_input from sphinx.util.nodes import nested_parse_with_titles -__version__ = "1.0" +if TYPE_CHECKING: + from typing import ( + Any, + Generator, + List, + Optional, + Sequence, + Union, + ) + from sphinx.application import Sphinx + from sphinx.util.typing import ExtensionMetadata -def dedent(text: str) -> str: - # Adjust indentation to make description text parse as paragraph. - lines = text.splitlines(True) - if re.match(r"\s+", lines[0]): - # First line is indented; description started on the line after - # the name. dedent the whole block. - return textwrap.dedent(text) +logger = logging.getLogger(__name__) - # Descr started on same line. Dedent line 2+. - return lines[0] + textwrap.dedent("".join(lines[1:])) +class Transmogrifier: + # pylint: disable=too-many-public-methods -# Disable black auto-formatter until re-enabled: -# fmt: off + # Field names used for different entity types: + field_types = { + "enum": "value", + "struct": "memb", + "union": "memb", + "event": "memb", + "command": "arg", + "alternate": "alt", + } + def __init__(self) -> None: + self._curr_ent: Optional[QAPISchemaDefinition] = None + self._result = StringList() + self.indent = 0 -class QAPISchemaGenRSTVisitor(QAPISchemaVisitor): - """A QAPI schema visitor which generates docutils/Sphinx nodes + @property + def result(self) -> StringList: + return self._result - This class builds up a tree of docutils/Sphinx nodes corresponding - to documentation for the various QAPI objects. To use it, first - create a QAPISchemaGenRSTVisitor object, and call its - visit_begin() method. Then you can call one of the two methods - 'freeform' (to add documentation for a freeform documentation - chunk) or 'symbol' (to add documentation for a QAPI symbol). These - will cause the visitor to build up the tree of document - nodes. Once you've added all the documentation via 'freeform' and - 'symbol' method calls, you can call 'get_document_nodes' to get - the final list of document nodes (in a form suitable for returning - from a Sphinx directive's 'run' method). - """ - def __init__(self, sphinx_directive): - self._cur_doc = None - self._sphinx_directive = sphinx_directive - self._top_node = nodes.section() - self._active_headings = [self._top_node] - - def _make_dlitem(self, term, defn): - """Return a dlitem node with the specified term and definition. - - term should be a list of Text and literal nodes. - defn should be one of: - - a string, which will be handed to _parse_text_into_node - - a list of Text and literal nodes, which will be put into - a paragraph node - """ - dlitem = nodes.definition_list_item() - dlterm = nodes.term('', '', *term) - dlitem += dlterm - if defn: - dldef = nodes.definition() - if isinstance(defn, list): - dldef += nodes.paragraph('', '', *defn) - else: - self._parse_text_into_node(defn, dldef) - dlitem += dldef - return dlitem - - def _make_section(self, title): - """Return a section node with optional title""" - section = nodes.section(ids=[self._sphinx_directive.new_serialno()]) - if title: - section += nodes.title(title, title) - return section - - def _nodes_for_ifcond(self, ifcond, with_if=True): - """Return list of Text, literal nodes for the ifcond - - Return a list which gives text like ' (If: condition)'. - If with_if is False, we don't return the "(If: " and ")". - """ + @property + def entity(self) -> QAPISchemaDefinition: + assert self._curr_ent is not None + return self._curr_ent - doc = ifcond.docgen() - if not doc: - return [] - doc = nodes.literal('', doc) - if not with_if: - return [doc] + @property + def member_field_type(self) -> str: + return self.field_types[self.entity.meta] - nodelist = [nodes.Text(' ('), nodes.strong('', 'If: ')] - nodelist.append(doc) - nodelist.append(nodes.Text(')')) - return nodelist + # General-purpose rST generation functions - def _nodes_for_one_member(self, member): - """Return list of Text, literal nodes for this member + def get_indent(self) -> str: + return " " * self.indent - Return a list of doctree nodes which give text like - 'name: type (optional) (If: ...)' suitable for use as the - 'term' part of a definition list item. - """ - term = [nodes.literal('', member.name)] - if member.type.doc_type(): - term.append(nodes.Text(': ')) - term.append(nodes.literal('', member.type.doc_type())) - if member.optional: - term.append(nodes.Text(' (optional)')) - if member.ifcond.is_present(): - term.extend(self._nodes_for_ifcond(member.ifcond)) - return term - - def _nodes_for_variant_when(self, branches, variant): - """Return list of Text, literal nodes for variant 'when' clause - - Return a list of doctree nodes which give text like - 'when tagname is variant (If: ...)' suitable for use in - the 'branches' part of a definition list. - """ - term = [nodes.Text(' when '), - nodes.literal('', branches.tag_member.name), - nodes.Text(' is '), - nodes.literal('', '"%s"' % variant.name)] - if variant.ifcond.is_present(): - term.extend(self._nodes_for_ifcond(variant.ifcond)) - return term - - def _nodes_for_members(self, doc, what, base=None, branches=None): - """Return list of doctree nodes for the table of members""" - dlnode = nodes.definition_list() - for section in doc.args.values(): - term = self._nodes_for_one_member(section.member) - # TODO drop fallbacks when undocumented members are outlawed - if section.text: - defn = dedent(section.text) - else: - defn = [nodes.Text('Not documented')] + @contextmanager + def indented(self) -> Generator[None]: + self.indent += 1 + try: + yield + finally: + self.indent -= 1 - dlnode += self._make_dlitem(term, defn) + def add_line_raw(self, line: str, source: str, *lineno: int) -> None: + """Append one line of generated reST to the output.""" - if base: - dlnode += self._make_dlitem([nodes.Text('The members of '), - nodes.literal('', base.doc_type())], - None) + # NB: Sphinx uses zero-indexed lines; subtract one. + lineno = tuple((n - 1 for n in lineno)) - if branches: - for v in branches.variants: - if v.type.name == 'q_empty': - continue - assert not v.type.is_implicit() - term = [nodes.Text('The members of '), - nodes.literal('', v.type.doc_type())] - term.extend(self._nodes_for_variant_when(branches, v)) - dlnode += self._make_dlitem(term, None) - - if not dlnode.children: - return [] - - section = self._make_section(what) - section += dlnode - return [section] - - def _nodes_for_enum_values(self, doc): - """Return list of doctree nodes for the table of enum values""" - seen_item = False - dlnode = nodes.definition_list() - for section in doc.args.values(): - termtext = [nodes.literal('', section.member.name)] - if section.member.ifcond.is_present(): - termtext.extend(self._nodes_for_ifcond(section.member.ifcond)) - # TODO drop fallbacks when undocumented members are outlawed - if section.text: - defn = dedent(section.text) - else: - defn = [nodes.Text('Not documented')] - - dlnode += self._make_dlitem(termtext, defn) - seen_item = True - - if not seen_item: - return [] - - section = self._make_section('Values') - section += dlnode - return [section] - - def _nodes_for_arguments(self, doc, arg_type): - """Return list of doctree nodes for the arguments section""" - if arg_type and not arg_type.is_implicit(): - assert not doc.args - section = self._make_section('Arguments') - dlnode = nodes.definition_list() - dlnode += self._make_dlitem( - [nodes.Text('The members of '), - nodes.literal('', arg_type.name)], - None) - section += dlnode - return [section] - - return self._nodes_for_members(doc, 'Arguments') - - def _nodes_for_features(self, doc): - """Return list of doctree nodes for the table of features""" - seen_item = False - dlnode = nodes.definition_list() - for section in doc.features.values(): - dlnode += self._make_dlitem( - [nodes.literal('', section.member.name)], dedent(section.text)) - seen_item = True - - if not seen_item: - return [] - - section = self._make_section('Features') - section += dlnode - return [section] - - def _nodes_for_example(self, exampletext): - """Return list of doctree nodes for a code example snippet""" - return [nodes.literal_block(exampletext, exampletext)] - - def _nodes_for_sections(self, doc): - """Return list of doctree nodes for additional sections""" - nodelist = [] - for section in doc.sections: - if section.tag and section.tag == 'TODO': - # Hide TODO: sections - continue - - if not section.tag: - # Sphinx cannot handle sectionless titles; - # Instead, just append the results to the prior section. - container = nodes.container() - self._parse_text_into_node(section.text, container) - nodelist += container.children - continue - - snode = self._make_section(section.tag) - if section.tag.startswith('Example'): - snode += self._nodes_for_example(dedent(section.text)) - else: - self._parse_text_into_node(dedent(section.text), snode) - nodelist.append(snode) - return nodelist - - def _nodes_for_if_section(self, ifcond): - """Return list of doctree nodes for the "If" section""" - nodelist = [] - if ifcond.is_present(): - snode = self._make_section('If') - snode += nodes.paragraph( - '', '', *self._nodes_for_ifcond(ifcond, with_if=False) + if line.strip(): + # not a blank line + self._result.append( + self.get_indent() + line.rstrip("\n"), source, *lineno ) - nodelist.append(snode) - return nodelist + else: + self._result.append("", source, *lineno) + + def add_line(self, content: str, info: QAPISourceInfo) -> None: + # NB: We *require* an info object; this works out OK because we + # don't document built-in objects that don't have + # one. Everything else should. + self.add_line_raw(content, info.fname, info.line) + + def add_lines( + self, + content: str, + info: QAPISourceInfo, + ) -> None: + lines = content.splitlines(True) + for i, line in enumerate(lines): + self.add_line_raw(line, info.fname, info.line + i) + + def ensure_blank_line(self) -> None: + # Empty document -- no blank line required. + if not self._result: + return + + # Last line isn't blank, add one. + if self._result[-1].strip(): # pylint: disable=no-member + fname, line = self._result.info(-1) + assert isinstance(line, int) + # New blank line is credited to one-after the current last line. + # +2: correct for zero/one index, then increment by one. + self.add_line_raw("", fname, line + 2) + + def add_field( + self, + kind: str, + name: str, + body: str, + info: QAPISourceInfo, + typ: Optional[str] = None, + ) -> None: + if typ: + text = f":{kind} {typ} {name}: {body}" + else: + text = f":{kind} {name}: {body}" + self.add_lines(text, info) + + def format_type( + self, ent: Union[QAPISchemaDefinition | QAPISchemaMember] + ) -> Optional[str]: + if isinstance(ent, (QAPISchemaEnumMember, QAPISchemaFeature)): + return None + + qapi_type = ent + optional = False + if isinstance(ent, QAPISchemaObjectTypeMember): + qapi_type = ent.type + optional = ent.optional + + if isinstance(qapi_type, QAPISchemaArrayType): + ret = f"[{qapi_type.element_type.doc_type()}]" + else: + assert isinstance(qapi_type, QAPISchemaType) + tmp = qapi_type.doc_type() + assert tmp + ret = tmp + if optional: + ret += "?" + + return ret + + def generate_field( + self, + kind: str, + member: QAPISchemaMember, + body: str, + info: QAPISourceInfo, + ) -> None: + typ = self.format_type(member) + self.add_field(kind, member.name, body, info, typ) + + @staticmethod + def reformat_arobase(text: str) -> str: + """ reformats @var to ``var`` """ + return re.sub(r"@([\w-]+)", r"``\1``", text) + + # Transmogrification helpers + + def visit_paragraph(self, section: QAPIDoc.Section) -> None: + # Squelch empty paragraphs. + if not section.text: + return + + self.ensure_blank_line() + self.add_lines(section.text, section.info) + self.ensure_blank_line() + + def visit_member(self, section: QAPIDoc.ArgSection) -> None: + # FIXME: ifcond for members + # TODO: features for members (documented at entity-level, + # but sometimes defined per-member. Should we add such + # information to member descriptions when we can?) + assert section.member + self.generate_field( + self.member_field_type, + section.member, + # TODO drop fallbacks when undocumented members are outlawed + section.text if section.text else "Not documented", + section.info, + ) - def _add_doc(self, typ, sections): - """Add documentation for a command/object/enum... + def visit_feature(self, section: QAPIDoc.ArgSection) -> None: + # FIXME - ifcond for features is not handled at all yet! + # Proposal: decorate the right-hand column with some graphical + # element to indicate conditional availability? + assert section.text # Guaranteed by parser.py + assert section.member - We assume we're documenting the thing defined in self._cur_doc. - typ is the type of thing being added ("Command", "Object", etc) + self.generate_field("feat", section.member, section.text, section.info) - sections is a list of nodes for sections to add to the definition. - """ + def visit_returns(self, section: QAPIDoc.Section) -> None: + assert isinstance(self.entity, QAPISchemaCommand) + rtype = self.entity.ret_type + # return statements will not be present (and won't be + # autogenerated) for any command that doesn't return + # *something*, so rtype will always be defined here. + assert rtype - doc = self._cur_doc - snode = nodes.section(ids=[self._sphinx_directive.new_serialno()]) - snode += nodes.title('', '', *[nodes.literal(doc.symbol, doc.symbol), - nodes.Text(' (' + typ + ')')]) - self._parse_text_into_node(doc.body.text, snode) - for s in sections: - if s is not None: - snode += s - self._add_node_to_current_heading(snode) - - def visit_enum_type(self, name, info, ifcond, features, members, prefix): - doc = self._cur_doc - self._add_doc('Enum', - self._nodes_for_enum_values(doc) - + self._nodes_for_features(doc) - + self._nodes_for_sections(doc) - + self._nodes_for_if_section(ifcond)) - - def visit_object_type(self, name, info, ifcond, features, - base, members, branches): - doc = self._cur_doc - if base and base.is_implicit(): - base = None - self._add_doc('Object', - self._nodes_for_members(doc, 'Members', base, branches) - + self._nodes_for_features(doc) - + self._nodes_for_sections(doc) - + self._nodes_for_if_section(ifcond)) - - def visit_alternate_type(self, name, info, ifcond, features, - alternatives): - doc = self._cur_doc - self._add_doc('Alternate', - self._nodes_for_members(doc, 'Members') - + self._nodes_for_features(doc) - + self._nodes_for_sections(doc) - + self._nodes_for_if_section(ifcond)) - - def visit_command(self, name, info, ifcond, features, arg_type, - ret_type, gen, success_response, boxed, allow_oob, - allow_preconfig, coroutine): - doc = self._cur_doc - self._add_doc('Command', - self._nodes_for_arguments(doc, arg_type) - + self._nodes_for_features(doc) - + self._nodes_for_sections(doc) - + self._nodes_for_if_section(ifcond)) - - def visit_event(self, name, info, ifcond, features, arg_type, boxed): - doc = self._cur_doc - self._add_doc('Event', - self._nodes_for_arguments(doc, arg_type) - + self._nodes_for_features(doc) - + self._nodes_for_sections(doc) - + self._nodes_for_if_section(ifcond)) - - def symbol(self, doc, entity): - """Add documentation for one symbol to the document tree - - This is the main entry point which causes us to add documentation - nodes for a symbol (which could be a 'command', 'object', 'event', - etc). We do this by calling 'visit' on the schema entity, which - will then call back into one of our visit_* methods, depending - on what kind of thing this symbol is. - """ - self._cur_doc = doc - entity.visit(self) - self._cur_doc = None + typ = self.format_type(rtype) + assert typ - def _start_new_heading(self, heading, level): - """Start a new heading at the specified heading level + if section.text: + self.add_field("return", typ, section.text, section.info) + else: + self.add_lines(f":return-nodesc: {typ}", section.info) + + def visit_errors(self, section: QAPIDoc.Section) -> None: + # If the section text does not start with a space, it means text + # began on the same line as the "Error:" string and we should + # not insert a newline in this case. + if section.text[0].isspace(): + text = f":error:\n{section.text}" + else: + text = f":error: {section.text}" + self.add_lines(text, section.info) - Create a new section whose title is 'heading' and which is placed - in the docutils node tree as a child of the most recent level-1 - heading. Subsequent document sections (commands, freeform doc chunks, - etc) will be placed as children of this new heading section. + def preamble(self, ent: QAPISchemaDefinition) -> None: """ - if len(self._active_headings) < level: - raise QAPISemError(self._cur_doc.info, - 'Level %d subheading found outside a ' - 'level %d heading' - % (level, level - 1)) - snode = self._make_section(heading) - self._active_headings[level - 1] += snode - self._active_headings = self._active_headings[:level] - self._active_headings.append(snode) - - def _add_node_to_current_heading(self, node): - """Add the node to whatever the current active heading is""" - self._active_headings[-1] += node - - def freeform(self, doc): - """Add a piece of 'freeform' documentation to the document tree - - A 'freeform' document chunk doesn't relate to any particular - symbol (for instance, it could be an introduction). - - If the freeform document starts with a line of the form - '= Heading text', this is a section or subsection heading, with - the heading level indicated by the number of '=' signs. + Generate option lines for QAPI entity directives. """ + if ent.doc and ent.doc.since: + assert ent.doc.since.kind == QAPIDoc.Kind.SINCE + # Generated from the entity's docblock; info location is exact. + self.add_line(f":since: {ent.doc.since.text}", ent.doc.since.info) + + if ent.ifcond.is_present(): + doc = ent.ifcond.docgen() + assert ent.info + # Generated from entity definition; info location is approximate. + self.add_line(f":ifcond: {doc}", ent.info) + + # Hoist special features such as :deprecated: and :unstable: + # into the options block for the entity. If, in the future, new + # special features are added, qapi-domain will chirp about + # unrecognized options and fail until they are handled in + # qapi-domain. + for feat in ent.features: + if feat.is_special(): + # FIXME: handle ifcond if present. How to display that + # information is TBD. + # Generated from entity def; info location is approximate. + assert feat.info + self.add_line(f":{feat.name}:", feat.info) + + self.ensure_blank_line() + + def _insert_member_pointer(self, ent: QAPISchemaDefinition) -> None: + + def _get_target( + ent: QAPISchemaDefinition, + ) -> Optional[QAPISchemaDefinition]: + if isinstance(ent, (QAPISchemaCommand, QAPISchemaEvent)): + return ent.arg_type + if isinstance(ent, QAPISchemaObjectType): + return ent.base + return None + + target = _get_target(ent) + if target is not None and not target.is_implicit(): + assert ent.info + self.add_field( + self.member_field_type, + "q_dummy", + f"The members of :qapi:type:`{target.name}`.", + ent.info, + "q_dummy", + ) - # QAPIDoc documentation says free-form documentation blocks - # must have only a body section, nothing else. - assert not doc.sections - assert not doc.args - assert not doc.features - self._cur_doc = doc - - text = doc.body.text - if re.match(r'=+ ', text): - # Section/subsection heading (if present, will always be - # the first line of the block) - (heading, _, text) = text.partition('\n') - (leader, _, heading) = heading.partition(' ') - self._start_new_heading(heading, len(leader)) - if text == '': - return - - node = self._make_section(None) - self._parse_text_into_node(text, node) - self._add_node_to_current_heading(node) - self._cur_doc = None - - def _parse_text_into_node(self, doctext, node): - """Parse a chunk of QAPI-doc-format text into the node - - The doc comment can contain most inline rST markup, including - bulleted and enumerated lists. - As an extra permitted piece of markup, @var will be turned - into ``var``. - """ + if isinstance(ent, QAPISchemaObjectType) and ent.branches is not None: + for variant in ent.branches.variants: + if variant.type.name == "q_empty": + continue + assert ent.info + self.add_field( + self.member_field_type, + "q_dummy", + f" When ``{ent.branches.tag_member.name}`` is " + f"``{variant.name}``: " + f"The members of :qapi:type:`{variant.type.name}`.", + ent.info, + "q_dummy", + ) + + def visit_sections(self, ent: QAPISchemaDefinition) -> None: + sections = ent.doc.all_sections if ent.doc else [] + + # Determine the index location at which we should generate + # documentation for "The members of ..." pointers. This should + # go at the end of the members section(s) if any. Note that + # index 0 is assumed to be a plain intro section, even if it is + # empty; and that a members section if present will always + # immediately follow the opening PLAIN section. + gen_index = 1 + if len(sections) > 1: + while sections[gen_index].kind == QAPIDoc.Kind.MEMBER: + gen_index += 1 + if gen_index >= len(sections): + break + + # Add sections in source order: + for i, section in enumerate(sections): + section.text = self.reformat_arobase(section.text) + + if section.kind == QAPIDoc.Kind.PLAIN: + self.visit_paragraph(section) + elif section.kind == QAPIDoc.Kind.MEMBER: + assert isinstance(section, QAPIDoc.ArgSection) + self.visit_member(section) + elif section.kind == QAPIDoc.Kind.FEATURE: + assert isinstance(section, QAPIDoc.ArgSection) + self.visit_feature(section) + elif section.kind in (QAPIDoc.Kind.SINCE, QAPIDoc.Kind.TODO): + # Since is handled in preamble, TODO is skipped intentionally. + pass + elif section.kind == QAPIDoc.Kind.RETURNS: + self.visit_returns(section) + elif section.kind == QAPIDoc.Kind.ERRORS: + self.visit_errors(section) + else: + assert False + + # Generate "The members of ..." entries if necessary: + if i == gen_index - 1: + self._insert_member_pointer(ent) + + self.ensure_blank_line() - # Handle the "@var means ``var`` case - doctext = re.sub(r'@([\w-]+)', r'``\1``', doctext) - - rstlist = ViewList() - for line in doctext.splitlines(): - # The reported line number will always be that of the start line - # of the doc comment, rather than the actual location of the error. - # Being more precise would require overhaul of the QAPIDoc class - # to track lines more exactly within all the sub-parts of the doc - # comment, as well as counting lines here. - rstlist.append(line, self._cur_doc.info.fname, - self._cur_doc.info.line) - # Append a blank line -- in some cases rST syntax errors get - # attributed to the line after one with actual text, and if there - # isn't anything in the ViewList corresponding to that then Sphinx - # 1.6's AutodocReporter will then misidentify the source/line location - # in the error message (usually attributing it to the top-level - # .rst file rather than the offending .json file). The extra blank - # line won't affect the rendered output. - rstlist.append("", self._cur_doc.info.fname, self._cur_doc.info.line) - self._sphinx_directive.do_parse(rstlist, node) - - def get_document_nodes(self): - """Return the list of docutils nodes which make up the document""" - return self._top_node.children - - -# Turn the black formatter on for the rest of the file. -# fmt: on + # Transmogrification core methods + + def visit_module(self, path: str) -> None: + name = Path(path).stem + # module directives are credited to the first line of a module file. + self.add_line_raw(f".. qapi:module:: {name}", path, 1) + self.ensure_blank_line() + + def visit_freeform(self, doc: QAPIDoc) -> None: + assert len(doc.all_sections) == 1, doc.all_sections + body = doc.all_sections[0] + self.add_lines(self.reformat_arobase(body.text), doc.info) + self.ensure_blank_line() + + def visit_entity(self, ent: QAPISchemaDefinition) -> None: + assert ent.info + + try: + self._curr_ent = ent + + # Squish structs and unions together into an "object" directive. + meta = ent.meta + if meta in ("struct", "union"): + meta = "object" + + # This line gets credited to the start of the /definition/. + self.add_line(f".. qapi:{meta}:: {ent.name}", ent.info) + with self.indented(): + self.preamble(ent) + self.visit_sections(ent) + finally: + self._curr_ent = None + + def set_namespace(self, namespace: str, source: str, lineno: int) -> None: + self.add_line_raw( + f".. qapi:namespace:: {namespace}", source, lineno + 1 + ) + self.ensure_blank_line() class QAPISchemaGenDepVisitor(QAPISchemaVisitor): @@ -474,22 +440,22 @@ class QAPISchemaGenDepVisitor(QAPISchemaVisitor): schema file associated with each module in the QAPI input. """ - def __init__(self, env, qapidir): + def __init__(self, env: Any, qapidir: str) -> None: self._env = env self._qapidir = qapidir - def visit_module(self, name): + def visit_module(self, name: str) -> None: if name != "./builtin": qapifile = self._qapidir + "/" + name self._env.note_dependency(os.path.abspath(qapifile)) super().visit_module(name) -class NestedDirective(Directive): - def run(self): +class NestedDirective(SphinxDirective): + def run(self) -> Sequence[nodes.Node]: raise NotImplementedError - def do_parse(self, rstlist, node): + def do_parse(self, rstlist: StringList, node: nodes.Node) -> None: """ Parse rST source lines and add them to the specified node @@ -508,15 +474,90 @@ class QAPIDocDirective(NestedDirective): required_argument = 1 optional_arguments = 1 - option_spec = {"qapifile": directives.unchanged_required} + option_spec = { + "qapifile": directives.unchanged_required, + "namespace": directives.unchanged, + } has_content = False - def new_serialno(self): - """Return a unique new ID string suitable for use as a node's ID""" - env = self.state.document.settings.env - return "qapidoc-%d" % env.new_serialno("qapidoc") + def transmogrify(self, schema: QAPISchema) -> nodes.Element: + logger.info("Transmogrifying QAPI to rST ...") + vis = Transmogrifier() + modules = set() + + if "namespace" in self.options: + vis.set_namespace( + self.options["namespace"], *self.get_source_info() + ) - def run(self): + for doc in schema.docs: + module_source = doc.info.fname + if module_source not in modules: + vis.visit_module(module_source) + modules.add(module_source) + + if doc.symbol: + ent = schema.lookup_entity(doc.symbol) + assert isinstance(ent, QAPISchemaDefinition) + vis.visit_entity(ent) + else: + vis.visit_freeform(doc) + + logger.info("Transmogrification complete.") + + contentnode = nodes.section() + content = vis.result + titles_allowed = True + + logger.info("Transmogrifier running nested parse ...") + with switch_source_input(self.state, content): + if titles_allowed: + node: nodes.Element = nodes.section() + node.document = self.state.document + nested_parse_with_titles(self.state, content, contentnode) + else: + node = nodes.paragraph() + node.document = self.state.document + self.state.nested_parse(content, 0, contentnode) + logger.info("Transmogrifier's nested parse completed.") + + if self.env.app.verbosity >= 2 or os.environ.get("DEBUG"): + argname = "_".join(Path(self.arguments[0]).parts) + name = Path(argname).stem + ".ir" + self.write_intermediate(content, name) + + sys.stdout.flush() + return contentnode + + def write_intermediate(self, content: StringList, filename: str) -> None: + logger.info( + "writing intermediate rST for '%s' to '%s'", + self.arguments[0], + filename, + ) + + srctree = Path(self.env.app.config.qapidoc_srctree).resolve() + outlines = [] + lcol_width = 0 + + for i, line in enumerate(content): + src, lineno = content.info(i) + srcpath = Path(src).resolve() + srcpath = srcpath.relative_to(srctree) + + lcol = f"{srcpath}:{lineno:04d}" + lcol_width = max(lcol_width, len(lcol)) + outlines.append((lcol, line)) + + with open(filename, "w", encoding="UTF-8") as outfile: + for lcol, rcol in outlines: + outfile.write(lcol.rjust(lcol_width)) + outfile.write(" |") + if rcol: + outfile.write(f" {rcol}") + outfile.write("\n") + + def run(self) -> Sequence[nodes.Node]: env = self.state.document.settings.env qapifile = env.config.qapidoc_srctree + "/" + self.arguments[0] qapidir = os.path.dirname(qapifile) @@ -527,20 +568,14 @@ def run(self): # First tell Sphinx about all the schema files that the # output documentation depends on (including 'qapifile' itself) schema.visit(QAPISchemaGenDepVisitor(env, qapidir)) - - vis = QAPISchemaGenRSTVisitor(self) - vis.visit_begin(schema) - for doc in schema.docs: - if doc.symbol: - vis.symbol(doc, schema.lookup_entity(doc.symbol)) - else: - vis.freeform(doc) - return vis.get_document_nodes() except QAPIError as err: # Launder QAPI parse errors into Sphinx extension errors # so they are displayed nicely to the user raise ExtensionError(str(err)) from err + contentnode = self.transmogrify(schema) + return contentnode.children + class QMPExample(CodeBlock, NestedDirective): """ @@ -591,7 +626,7 @@ def _highlightlang(self) -> addnodes.highlightlang: ) return node - def admonition_wrap(self, *content) -> List[nodes.Node]: + def admonition_wrap(self, *content: nodes.Node) -> List[nodes.Node]: title = "Example:" if "title" in self.options: title = f"{title} {self.options['title']}" @@ -637,8 +672,9 @@ def run(self) -> List[nodes.Node]: return self.admonition_wrap(*content_nodes) -def setup(app): +def setup(app: Sphinx) -> ExtensionMetadata: """Register qapi-doc directive with Sphinx""" + app.setup_extension("qapi_domain") app.add_config_value("qapidoc_srctree", None, "env") app.add_directive("qapi-doc", QAPIDocDirective) app.add_directive("qmp-example", QMPExample) diff --git a/docs/sphinx/qmp_lexer.py b/docs/sphinx/qmp_lexer.py index a59de8a079c..7b3b808d124 100644 --- a/docs/sphinx/qmp_lexer.py +++ b/docs/sphinx/qmp_lexer.py @@ -24,7 +24,7 @@ class QMPExampleMarkersLexer(RegexLexer): 'root': [ (r'-> ', token.Generic.Prompt), (r'<- ', token.Generic.Prompt), - (r' ?\.{3} ?', token.Generic.Prompt), + (r'\.{3}( .* \.{3})?', token.Comment.Multiline), ] } diff --git a/docs/spin/tcg-exclusive.promela b/docs/spin/tcg-exclusive.promela index c91cfca9f73..1d03af850b7 100644 --- a/docs/spin/tcg-exclusive.promela +++ b/docs/spin/tcg-exclusive.promela @@ -1,6 +1,6 @@ /* * This model describes the implementation of exclusive sections in - * cpus-common.c (start_exclusive, end_exclusive, cpu_exec_start, + * cpu-common.c (start_exclusive, end_exclusive, cpu_exec_start, * cpu_exec_end). * * Author: Paolo Bonzini @@ -65,7 +65,7 @@ } #define COND_BROADCAST(c) c++ -// this is the logic from cpus-common.c +// this is the logic from cpu-common.c mutex_t mutex; cond_t exclusive_cond; diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index 6733ffd2b94..bf18c563470 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -1,12 +1,11 @@ -Aspeed family boards (``*-bmc``, ``ast2500-evb``, ``ast2600-evb``, ``ast2700-evb``) -=================================================================================== +Aspeed family boards (``ast2500-evb``, ``ast2600-evb``, ``ast2700-evb``, ``bletchley-bmc``, ``fuji-bmc``, ``gb200nvl-bmc``, ``fby35-bmc``, ``fp5280g2-bmc``, ``g220a-bmc``, ``palmetto-bmc``, ``qcom-dc-scm-v1-bmc``, ``qcom-firework-bmc``, ``quanta-q71l-bmc``, ``rainier-bmc``, ``romulus-bmc``, ``sonorapass-bmc``, ``supermicrox11-bmc``, ``supermicrox11spi-bmc``, ``tiogapass-bmc``, ``witherspoon-bmc``, ``yosemitev2-bmc``) +==================================================================================================================================================================================================================================================================================================================================================================================================================================== The QEMU Aspeed machines model BMCs of various OpenPOWER systems and Aspeed evaluation boards. They are based on different releases of the Aspeed SoC : the AST2400 integrating an ARM926EJ-S CPU (400MHz), the AST2500 with an ARM1176JZS CPU (800MHz), the AST2600 -with dual cores ARM Cortex-A7 CPUs (1.2GHz) and more recently the AST2700 -with quad cores ARM Cortex-A35 64 bits CPUs (1.6GHz) +with dual cores ARM Cortex-A7 CPUs (1.2GHz). The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C, etc. @@ -15,7 +14,8 @@ AST2400 SoC based machines : - ``palmetto-bmc`` OpenPOWER Palmetto POWER8 BMC - ``quanta-q71l-bmc`` OpenBMC Quanta BMC -- ``supermicrox11-bmc`` Supermicro X11 BMC +- ``supermicrox11-bmc`` Supermicro X11 BMC (ARM926EJ-S) +- ``supermicrox11spi-bmc`` Supermicro X11 SPI BMC (ARM1176) AST2500 SoC based machines : @@ -31,18 +31,14 @@ AST2500 SoC based machines : AST2600 SoC based machines : - ``ast2600-evb`` Aspeed AST2600 Evaluation board (Cortex-A7) -- ``tacoma-bmc`` OpenPOWER Witherspoon POWER9 AST2600 BMC - ``rainier-bmc`` IBM Rainier POWER10 BMC - ``fuji-bmc`` Facebook Fuji BMC - ``bletchley-bmc`` Facebook Bletchley BMC - ``fby35-bmc`` Facebook fby35 BMC +- ``gb200nvl-bmc`` Nvidia GB200nvl BMC - ``qcom-dc-scm-v1-bmc`` Qualcomm DC-SCM V1 BMC - ``qcom-firework-bmc`` Qualcomm Firework BMC -AST2700 SoC based machines : - -- ``ast2700-evb`` Aspeed AST2700 Evaluation board (Cortex-A35) - Supported devices ----------------- @@ -105,6 +101,9 @@ or directly from the ASPEED Forked OpenBMC GitHub release repository : https://github.com/AspeedTech-BMC/openbmc/releases +Booting from a kernel image +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + To boot a kernel directly from a Linux build tree: .. code-block:: bash @@ -114,16 +113,10 @@ To boot a kernel directly from a Linux build tree: -dtb arch/arm/boot/dts/aspeed-ast2600-evb.dtb \ -initrd rootfs.cpio -To boot the machine from the flash image, use an MTD drive : - -.. code-block:: bash - - $ qemu-system-arm -M romulus-bmc -nic user \ - -drive file=obmc-phosphor-image-romulus.static.mtd,format=raw,if=mtd -nographic +Booting from a flash image +^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Options specific to Aspeed machines are : - - * ``boot-emmc`` to set or unset boot from eMMC (AST2600). +The machine options specific to Aspeed to boot from a flash image are : * ``execute-in-place`` which emulates the boot from the CE0 flash device by using the FMC controller to load the instructions, and @@ -134,10 +127,12 @@ Options specific to Aspeed machines are : * ``spi-model`` to change the default SPI Flash model. - * ``bmc-console`` to change the default console device. Most of the - machines use the ``UART5`` device for a boot console, which is - mapped on ``/dev/ttyS4`` under Linux, but it is not always the - case. +To boot the machine from the flash image, use an MTD drive : + +.. code-block:: bash + + $ qemu-system-arm -M romulus-bmc -nic user \ + -drive file=obmc-phosphor-image-romulus.static.mtd,format=raw,if=mtd -nographic To use other flash models, for instance a different FMC chip and a bigger (64M) SPI for the ``ast2500-evb`` machine, run : @@ -169,6 +164,78 @@ In that case, the machine boots fetching instructions from the FMC0 device. It is slower to start but closer to what HW does. Using the machine option ``execute-in-place`` has a similar effect. +Booting from an eMMC image +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The machine options specific to Aspeed machines to boot from an eMMC +image are : + + * ``boot-emmc`` to set or unset boot from eMMC (AST2600). + +Only the ``ast2600-evb`` and ``rainier-emmc`` machines have support to +boot from an eMMC device. In this case, the machine assumes that the +eMMC image includes special boot partitions. Such an image can be +built this way : + +.. code-block:: bash + + $ dd if=/dev/zero of=mmc-bootarea.img count=2 bs=1M + $ dd if=u-boot-spl.bin of=mmc-bootarea.img conv=notrunc + $ dd if=u-boot.bin of=mmc-bootarea.img conv=notrunc count=64 bs=1K + $ cat mmc-bootarea.img obmc-phosphor-image.wic > mmc.img + $ truncate --size 16GB mmc.img + +Boot the machine ``rainier-emmc`` with : + +.. code-block:: bash + + $ qemu-system-arm -M rainier-bmc \ + -drive file=mmc.img,format=raw,if=sd,index=2 \ + -nographic + +The ``boot-emmc`` option can be set or unset, to change the default +boot mode of machine: SPI or eMMC. This can be useful to boot the +``ast2600-evb`` machine from an eMMC device (default being SPI) or to +boot the ``rainier-bmc`` machine from a flash device (default being +eMMC). + +As an example, here is how to to boot the ``rainier-bmc`` machine from +the flash device with ``boot-emmc=false`` and let the machine use an +eMMC image : + +.. code-block:: bash + + $ qemu-system-arm -M rainier-bmc,boot-emmc=false \ + -drive file=flash.img,format=raw,if=mtd \ + -drive file=mmc.img,format=raw,if=sd,index=2 \ + -nographic + +It should be noted that in this case the eMMC device must not have +boot partitions, otherwise the contents will not be accessible to the +machine. This limitation is due to the use of the ``-drive`` +interface. + +Ideally, one should be able to define the eMMC device and the +associated backend directly on the command line, such as : + +.. code-block:: bash + + -blockdev node-name=emmc0,driver=file,filename=mmc.img \ + -device emmc,bus=sdhci-bus.2,drive=emmc0,boot-partition-size=1048576,boot-config=8 + +This is not yet supported (as of QEMU-10.0). Work is needed to +refactor the sdhci bus model. + +Other booting options +^^^^^^^^^^^^^^^^^^^^^ + +Other machine options specific to Aspeed machines are : + + * ``bmc-console`` to change the default console device. Most of the + machines use the ``UART5`` device for a boot console, which is + mapped on ``/dev/ttyS4`` under Linux, but it is not always the + case. + To change the boot console and use device ``UART3`` (``/dev/ttyS2`` under Linux), use : @@ -176,8 +243,78 @@ under Linux), use : -M ast2500-evb,bmc-console=uart3 +Aspeed 2700 family boards (``ast2700-evb``) +================================================================== + +The QEMU Aspeed machines model BMCs of Aspeed evaluation boards. +They are based on different releases of the Aspeed SoC : +the AST2700 with quad cores ARM Cortex-A35 64 bits CPUs (1.6GHz). + +The SoC comes with RAM, Gigabit ethernet, USB, SD/MMC, USB, SPI, I2C, +etc. + +AST2700 SoC based machines : -Boot the AST2700 machine from the flash image, use an MTD drive : +- ``ast2700-evb`` Aspeed AST2700 Evaluation board (Cortex-A35) +- ``ast2700fc`` Aspeed AST2700 Evaluation board (Cortex-A35 + Cortex-M4) + +Supported devices +----------------- + * Interrupt Controller + * Timer Controller + * RTC Controller + * I2C Controller + * System Control Unit (SCU) + * SRAM mapping + * X-DMA Controller (basic interface) + * Static Memory Controller (SMC or FMC) - Only SPI Flash support + * SPI Memory Controller + * USB 2.0 Controller + * SD/MMC storage controllers + * SDRAM controller (dummy interface for basic settings and training) + * Watchdog Controller + * GPIO Controller (Master only) + * UART + * Ethernet controllers + * Front LEDs (PCA9552 on I2C bus) + * LPC Peripheral Controller (a subset of subdevices are supported) + * Hash/Crypto Engine (HACE) - Hash support only. TODO: Crypto + * ADC + * eMMC Boot Controller (dummy) + * PECI Controller (minimal) + * I3C Controller + * Internal Bridge Controller (SLI dummy) + +Missing devices +--------------- + * PWM and Fan Controller + * Slave GPIO Controller + * Super I/O Controller + * PCI-Express 1 Controller + * Graphic Display Controller + * MCTP Controller + * Mailbox Controller + * Virtual UART + * eSPI Controller + +Boot options +------------ + +Images can be downloaded from the ASPEED Forked OpenBMC GitHub release repository : + + https://github.com/AspeedTech-BMC/openbmc/releases + +Booting the ast2700-evb machine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Boot the AST2700 machine from the flash image. + +There are two supported methods for booting the AST2700 machine with a flash image: + +Manual boot using ``-device loader``: + +It causes all 4 CPU cores to start execution from address ``0x430000000``, which +corresponds to the BL31 image load address. .. code-block:: bash @@ -197,6 +334,89 @@ Boot the AST2700 machine from the flash image, use an MTD drive : -drive file=${IMGDIR}/image-bmc,format=raw,if=mtd \ -nographic +Boot using a virtual boot ROM (``-bios``): + +If users do not specify the ``-bios option``, QEMU will attempt to load the +default vbootrom image ``ast27x0_bootrom.bin`` from either the current working +directory or the ``pc-bios`` directory within the QEMU source tree. + +.. code-block:: bash + + $ qemu-system-aarch64 -M ast2700-evb \ + -drive file=image-bmc,format=raw,if=mtd \ + -nographic + +The ``-bios`` option allows users to specify a custom path for the vbootrom +image to be loaded during boot. This will load the vbootrom image from the +specified path in the ${HOME} directory. + +.. code-block:: bash + + -bios ${HOME}/ast27x0_bootrom.bin + +Booting the ast2700fc machine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +AST2700 features four Cortex-A35 primary processors and two Cortex-M4 coprocessors. +**ast2700-evb** machine focuses on emulating the four Cortex-A35 primary processors, +**ast2700fc** machine extends **ast2700-evb** by adding support for the two Cortex-M4 coprocessors. + +Steps to boot the AST2700fc machine: + +1. Ensure you have the following AST2700A1 binaries available in a directory + + * u-boot-nodtb.bin + * u-boot.dtb + * bl31.bin + * optee/tee-raw.bin + * image-bmc + * zephyr-aspeed-ssp.elf (for SSP firmware, CPU 5) + * zephyr-aspeed-tsp.elf (for TSP firmware, CPU 6) + +2. Execute the following command to start ``ast2700fc`` machine: + +.. code-block:: bash + + IMGDIR=ast2700-default + UBOOT_SIZE=$(stat --format=%s -L ${IMGDIR}/u-boot-nodtb.bin) + + $ qemu-system-aarch64 -M ast2700fc \ + -device loader,force-raw=on,addr=0x400000000,file=${IMGDIR}/u-boot-nodtb.bin \ + -device loader,force-raw=on,addr=$((0x400000000 + ${UBOOT_SIZE})),file=${IMGDIR}/u-boot.dtb \ + -device loader,force-raw=on,addr=0x430000000,file=${IMGDIR}/bl31.bin \ + -device loader,force-raw=on,addr=0x430080000,file=${IMGDIR}/optee/tee-raw.bin \ + -device loader,cpu-num=0,addr=0x430000000 \ + -device loader,cpu-num=1,addr=0x430000000 \ + -device loader,cpu-num=2,addr=0x430000000 \ + -device loader,cpu-num=3,addr=0x430000000 \ + -drive file=${IMGDIR}/image-bmc,if=mtd,format=raw \ + -device loader,file=${IMGDIR}/zephyr-aspeed-ssp.elf,cpu-num=4 \ + -device loader,file=${IMGDIR}/zephyr-aspeed-tsp.elf,cpu-num=5 \ + -serial pty -serial pty -serial pty \ + -snapshot \ + -S -nographic + +After launching QEMU, serial devices will be automatically redirected. +Example output: + +.. code-block:: bash + + char device redirected to /dev/pts/55 (label serial0) + char device redirected to /dev/pts/56 (label serial1) + char device redirected to /dev/pts/57 (label serial2) + +- serial0: Console for the four Cortex-A35 primary processors. +- serial1 and serial2: Consoles for the two Cortex-M4 coprocessors. + +Use ``tio`` or another terminal emulator to connect to the consoles: + +.. code-block:: bash + + $ tio /dev/pts/55 + $ tio /dev/pts/56 + $ tio /dev/pts/57 + + Aspeed minibmc family boards (``ast1030-evb``) ================================================================== @@ -257,51 +477,3 @@ To boot a kernel directly from a Zephyr build tree: $ qemu-system-arm -M ast1030-evb -nographic \ -kernel zephyr.elf - -Facebook Yosemite v3.5 Platform and CraterLake Server (``fby35``) -================================================================== - -Facebook has a series of multi-node compute server designs named -Yosemite. The most recent version released was -`Yosemite v3 `__. - -Yosemite v3.5 is an iteration on this design, and is very similar: there's a -baseboard with a BMC, and 4 server slots. The new server board design termed -"CraterLake" includes a Bridge IC (BIC), with room for expansion boards to -include various compute accelerators (video, inferencing, etc). At the moment, -only the first server slot's BIC is included. - -Yosemite v3.5 is itself a sled which fits into a 40U chassis, and 3 sleds -can be fit into a chassis. See `here `__ -for an example. - -In this generation, the BMC is an AST2600 and each BIC is an AST1030. The BMC -runs `OpenBMC `__, and the BIC runs -`OpenBIC `__. - -Firmware images can be retrieved from the Github releases or built from the -source code, see the README's for instructions on that. This image uses the -"fby35" machine recipe from OpenBMC, and the "yv35-cl" target from OpenBIC. -Some reference images can also be found here: - -.. code-block:: bash - - $ wget https://github.com/facebook/openbmc/releases/download/openbmc-e2294ff5d31d/fby35.mtd - $ wget https://github.com/peterdelevoryas/OpenBIC/releases/download/oby35-cl-2022.13.01/Y35BCL.elf - -Since this machine has multiple SoC's, each with their own serial console, the -recommended way to run it is to allocate a pseudoterminal for each serial -console and let the monitor use stdio. Also, starting in a paused state is -useful because it allows you to attach to the pseudoterminals before the boot -process starts. - -.. code-block:: bash - - $ qemu-system-arm -machine fby35 \ - -drive file=fby35.mtd,format=raw,if=mtd \ - -device loader,file=Y35BCL.elf,addr=0,cpu-num=2 \ - -serial pty -serial pty -serial mon:stdio \ - -display none -S - $ screen /dev/tty0 # In a separate TMUX pane, terminal window, etc. - $ screen /dev/tty1 - $ (qemu) c # Start the boot process once screen is setup. diff --git a/docs/system/arm/bananapi_m2u.rst b/docs/system/arm/bananapi_m2u.rst index 587b4886553..03cc5618c38 100644 --- a/docs/system/arm/bananapi_m2u.rst +++ b/docs/system/arm/bananapi_m2u.rst @@ -125,16 +125,15 @@ And then boot it. $ qemu-system-arm -M bpim2u -nographic -sd sd.img -Banana Pi M2U integration tests -""""""""""""""""""""""""""""""" +Banana Pi M2U functional tests +"""""""""""""""""""""""""""""" -The Banana Pi M2U machine has several integration tests included. +The Banana Pi M2U machine has several functional tests included. To run the whole set of tests, build QEMU from source and simply provide the following command: .. code-block:: bash $ cd qemu-build-dir - $ AVOCADO_ALLOW_LARGE_STORAGE=yes tests/venv/bin/avocado \ - --verbose --show=app,console run -t machine:bpim2u \ - ../tests/avocado/boot_linux_console.py + $ QEMU_TEST_ALLOW_LARGE_STORAGE=1 \ + pyvenv/bin/meson test --suite thorough func-arm-arm_bpim2u diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst index a5fb929243c..37d5dfd15b3 100644 --- a/docs/system/arm/cpu-features.rst +++ b/docs/system/arm/cpu-features.rst @@ -219,8 +219,11 @@ Below is the list of TCG VCPU features and their descriptions. ``pauth-qarma3`` When ``pauth`` is enabled, select the architected QARMA3 algorithm. -Without either ``pauth-impdef`` or ``pauth-qarma3`` enabled, -the architected QARMA5 algorithm is used. The architected QARMA5 +``pauth-qarma5`` + When ``pauth`` is enabled, select the architected QARMA5 algorithm. + +Without ``pauth-impdef``, ``pauth-qarma3`` or ``pauth-qarma5`` enabled, +the QEMU impdef algorithm is used. The architected QARMA5 and QARMA3 algorithms have good cryptographic properties, but can be quite slow to emulate. The impdef algorithm used by QEMU is non-cryptographic but significantly faster. diff --git a/docs/system/arm/cubieboard.rst b/docs/system/arm/cubieboard.rst index 58c4a2d3ea6..90d24c73a13 100644 --- a/docs/system/arm/cubieboard.rst +++ b/docs/system/arm/cubieboard.rst @@ -15,4 +15,5 @@ Emulated devices: - USB controller - SATA controller - TWI (I2C) controller +- SPI controller - Watchdog timer diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 3ab6e726679..890dc6fee21 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -3,8 +3,8 @@ A-profile CPU architecture support ================================== -QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7 and -Armv8 versions of the A-profile architecture. It also has support for +QEMU's TCG emulation includes support for the Armv5, Armv6, Armv7, +Armv8 and Armv9 versions of the A-profile architecture. It also has support for the following architecture extensions: - FEAT_AA32BF16 (AArch32 BFloat16 instructions) @@ -20,12 +20,14 @@ the following architecture extensions: - FEAT_AA64EL3 (Support for AArch64 at EL3) - FEAT_AdvSIMD (Advanced SIMD Extension) - FEAT_AES (AESD and AESE instructions) +- FEAT_AFP (Alternate floating-point behavior) - FEAT_Armv9_Crypto (Armv9 Cryptographic Extension) - FEAT_ASID16 (16 bit ASID) - FEAT_BBM at level 2 (Translation table break-before-make levels) - FEAT_BF16 (AArch64 BFloat16 instructions) - FEAT_BTI (Branch Target Identification) - FEAT_CCIDX (Extended cache index) +- FEAT_CMOW (Control for cache maintenance permission) - FEAT_CRC32 (CRC32 instructions) - FEAT_Crypto (Cryptographic Extension) - FEAT_CSV2 (Cache speculation variant 2) @@ -36,6 +38,7 @@ the following architecture extensions: - FEAT_CSV3 (Cache speculation variant 3) - FEAT_DGH (Data gathering hint) - FEAT_DIT (Data Independent Timing instructions) +- FEAT_DoubleLock (Double Lock) - FEAT_DPB (DC CVAP instruction) - FEAT_DPB2 (DC CVADP instruction) - FEAT_Debugv8p1 (Debug with VHE) @@ -45,6 +48,7 @@ the following architecture extensions: - FEAT_DotProd (Advanced SIMD dot product instructions) - FEAT_DoubleFault (Double Fault Extension) - FEAT_E0PD (Preventing EL0 access to halves of address maps) +- FEAT_EBF16 (AArch64 Extended BFloat16 instructions) - FEAT_ECV (Enhanced Counter Virtualization) - FEAT_EL0 (Support for execution at EL0) - FEAT_EL1 (Support for execution at EL1) @@ -86,12 +90,13 @@ the following architecture extensions: - FEAT_LSE2 (Large System Extensions v2) - FEAT_LVA (Large Virtual Address space) - FEAT_MixedEnd (Mixed-endian support) -- FEAT_MixdEndEL0 (Mixed-endian support at EL0) +- FEAT_MixedEndEL0 (Mixed-endian support at EL0) - FEAT_MOPS (Standardization of memory operations) - FEAT_MTE (Memory Tagging Extension) - FEAT_MTE2 (Memory Tagging Extension) - FEAT_MTE3 (MTE Asymmetric Fault Handling) - FEAT_MTE_ASYM_FAULT (Memory tagging asymmetric faults) +- FEAT_MTE_ASYNC (Asynchronous reporting of Tag Check Fault) - FEAT_NMI (Non-maskable Interrupt) - FEAT_NV (Nested Virtualization) - FEAT_NV2 (Enhanced nested virtualization support) @@ -113,6 +118,7 @@ the following architecture extensions: - FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions) - FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental) - FEAT_RNG (Random number generator) +- FEAT_RPRES (Increased precision of FRECPE and FRSQRTE) - FEAT_S2FWB (Stage 2 forced Write-Back) - FEAT_SB (Speculation Barrier) - FEAT_SEL2 (Secure EL2) @@ -123,18 +129,25 @@ the following architecture extensions: - FEAT_SM3 (Advanced SIMD SM3 instructions) - FEAT_SM4 (Advanced SIMD SM4 instructions) - FEAT_SME (Scalable Matrix Extension) +- FEAT_SME2 (Scalable Matrix Extension version 2) +- FEAT_SME2p1 (Scalable Matrix Extension version 2.1) +- FEAT_SME_B16B16 (Non-widening BFloat16 arithmetic for SME2) - FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) +- FEAT_SME_F16F16 (Non-widening half-precision FP16 arithmetic for SME2) - FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) - FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) - FEAT_SVE (Scalable Vector Extension) - FEAT_SVE_AES (Scalable Vector AES instructions) +- FEAT_SVE_B16B16 (Non-widening BFloat16 arithmetic for SVE2) - FEAT_SVE_BitPerm (Scalable Vector Bit Permutes instructions) - FEAT_SVE_PMULL128 (Scalable Vector PMULL instructions) - FEAT_SVE_SHA3 (Scalable Vector SHA3 instructions) - FEAT_SVE_SM4 (Scalable Vector SM4 instructions) - FEAT_SVE2 (Scalable Vector Extension version 2) +- FEAT_SVE2p1 (Scalable Vector Extension version 2.1) - FEAT_SPECRES (Speculation restriction instructions) - FEAT_SSBS (Speculative Store Bypass Safe) +- FEAT_SSBS2 (MRS and MSR instructions for SSBS version 2) - FEAT_TGran16K (Support for 16KB memory translation granule size at stage 1) - FEAT_TGran4K (Support for 4KB memory translation granule size at stage 1) - FEAT_TGran64K (Support for 64KB memory translation granule size at stage 1) @@ -149,9 +162,10 @@ the following architecture extensions: - FEAT_VMID16 (16-bit VMID) - FEAT_WFxT (WFE and WFI instructions with timeout) - FEAT_XNX (Translation table stage 2 Unprivileged Execute-never) +- FEAT_XS (XS attribute) For information on the specifics of these extensions, please refer -to the `Armv8-A Arm Architecture Reference Manual +to the `Arm Architecture Reference Manual for A-profile architecture `_. When a specific named CPU is being emulated, only those features which diff --git a/docs/system/arm/exynos.rst b/docs/system/arm/exynos.rst new file mode 100644 index 00000000000..86894bc02b7 --- /dev/null +++ b/docs/system/arm/exynos.rst @@ -0,0 +1,9 @@ +Exynos4 boards (``nuri``, ``smdkc210``) +======================================= + +These are machines which use the Samsung Exynos4210 SoC, which has Cortex-A9 CPUs. + +``nuri`` models the Samsung NURI board. + +``smdkc210`` models the Samsung SMDKC210 board. + diff --git a/docs/system/arm/fby35.rst b/docs/system/arm/fby35.rst new file mode 100644 index 00000000000..e19274e75c8 --- /dev/null +++ b/docs/system/arm/fby35.rst @@ -0,0 +1,52 @@ +Facebook Yosemite v3.5 Platform and CraterLake Server (``fby35``) +================================================================== + +Facebook has a series of multi-node compute server designs named +Yosemite. The most recent version released was +`Yosemite v3 `__. + +Yosemite v3.5 is an iteration on this design, and is very similar: there's a +baseboard with a BMC, and 4 server slots. The new server board design termed +"CraterLake" includes a Bridge IC (BIC), with room for expansion boards to +include various compute accelerators (video, inferencing, etc). At the moment, +only the first server slot's BIC is included. + +Yosemite v3.5 is itself a sled which fits into a 40U chassis, and 3 sleds +can be fit into a chassis. See `here `__ +for an example. + +In this generation, the BMC is an AST2600 and each BIC is an AST1030. The BMC +runs `OpenBMC `__, and the BIC runs +`OpenBIC `__. + +Firmware images can be retrieved from the Github releases or built from the +source code, see the README's for instructions on that. This image uses the +"fby35" machine recipe from OpenBMC, and the "yv35-cl" target from OpenBIC. +Some reference images can also be found here: + +.. code-block:: bash + + $ wget https://github.com/facebook/openbmc/releases/download/openbmc-e2294ff5d31d/fby35.mtd + $ wget https://github.com/peterdelevoryas/OpenBIC/releases/download/oby35-cl-2022.13.01/Y35BCL.elf + +Since this machine has multiple SoC's, each with their own serial console, the +recommended way to run it is to allocate a pseudoterminal for each serial +console and let the monitor use stdio. Also, starting in a paused state is +useful because it allows you to attach to the pseudoterminals before the boot +process starts. + +.. code-block:: bash + + $ qemu-system-arm -machine fby35 \ + -drive file=fby35.mtd,format=raw,if=mtd \ + -device loader,file=Y35BCL.elf,addr=0,cpu-num=2 \ + -serial pty -serial pty -serial mon:stdio \ + -display none -S + $ screen /dev/tty0 # In a separate TMUX pane, terminal window, etc. + $ screen /dev/tty1 + $ (qemu) c # Start the boot process once screen is setup. + +This machine model supports emulation of the boot from the CE0 flash device by +setting option ``execute-in-place``. When using this option, the CPU fetches +instructions to execute by reading CE0 and not from a preloaded ROM +initialized at machine init time. As a result, execution will be slower. diff --git a/docs/system/arm/gumstix.rst b/docs/system/arm/gumstix.rst deleted file mode 100644 index cb373139dcb..00000000000 --- a/docs/system/arm/gumstix.rst +++ /dev/null @@ -1,21 +0,0 @@ -Gumstix Connex and Verdex (``connex``, ``verdex``) -================================================== - -These machines model the Gumstix Connex and Verdex boards. -The Connex has a PXA255 CPU and the Verdex has a PXA270. - -Implemented devices: - - * NOR flash - * SMC91C111 ethernet - * Interrupt controller - * DMA - * Timer - * GPIO - * MMC/SD card - * Fast infra-red communications port (FIR) - * LCD controller - * Synchronous serial ports (SPI) - * PCMCIA interface - * I2C - * I2S diff --git a/docs/system/arm/imx8mp-evk.rst b/docs/system/arm/imx8mp-evk.rst new file mode 100644 index 00000000000..b2f7d29ade5 --- /dev/null +++ b/docs/system/arm/imx8mp-evk.rst @@ -0,0 +1,62 @@ +NXP i.MX 8M Plus Evaluation Kit (``imx8mp-evk``) +================================================ + +The ``imx8mp-evk`` machine models the i.MX 8M Plus Evaluation Kit, based on an +i.MX 8M Plus SoC. + +Supported devices +----------------- + +The ``imx8mp-evk`` machine implements the following devices: + + * Up to 4 Cortex-A53 cores + * Generic Interrupt Controller (GICv3) + * 4 UARTs + * 3 USDHC Storage Controllers + * 1 Designware PCI Express Controller + * 1 Ethernet Controller + * 2 Designware USB 3 Controllers + * 5 GPIO Controllers + * 6 I2C Controllers + * 3 SPI Controllers + * 3 Watchdogs + * 6 General Purpose Timers + * Secure Non-Volatile Storage (SNVS) including an RTC + * Clock Tree + +Boot options +------------ + +The ``imx8mp-evk`` machine can start a Linux kernel directly using the standard +``-kernel`` functionality. + +Direct Linux Kernel Boot +'''''''''''''''''''''''' + +Probably the easiest way to get started with a whole Linux system on the machine +is to generate an image with Buildroot. Version 2024.11.1 is tested at the time +of writing and involves two steps. First run the following commands in the +toplevel directory of the Buildroot source tree: + +.. code-block:: bash + + $ make freescale_imx8mpevk_defconfig + $ make + +Once finished successfully there is an ``output/image`` subfolder. Navigate into +it and resize the SD card image to a power of two: + +.. code-block:: bash + + $ qemu-img resize sdcard.img 256M + +Now that everything is prepared the machine can be started as follows: + +.. code-block:: bash + + $ qemu-system-aarch64 -M imx8mp-evk -smp 4 -m 3G \ + -display none -serial null -serial stdio \ + -kernel Image \ + -dtb imx8mp-evk.dtb \ + -append "root=/dev/mmcblk2p2" \ + -drive file=sdcard.img,if=sd,bus=2,format=raw,id=mmcblk2 diff --git a/docs/system/arm/mainstone.rst b/docs/system/arm/mainstone.rst deleted file mode 100644 index 05310f42c7f..00000000000 --- a/docs/system/arm/mainstone.rst +++ /dev/null @@ -1,25 +0,0 @@ -Intel Mainstone II board (``mainstone``) -======================================== - -The ``mainstone`` board emulates the Intel Mainstone II development -board, which uses a PXA270 CPU. - -Emulated devices: - -- Flash memory -- Keypad -- MMC controller -- 91C111 ethernet -- PIC -- Timer -- DMA -- GPIO -- FIR -- Serial -- LCD controller -- SSP -- USB controller -- RTC -- PCMCIA -- I2C -- I2S diff --git a/docs/system/arm/max78000.rst b/docs/system/arm/max78000.rst new file mode 100644 index 00000000000..3d95011fefd --- /dev/null +++ b/docs/system/arm/max78000.rst @@ -0,0 +1,37 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +Analog Devices max78000 board (``max78000fthr``) +================================================ + +The max78000 is a Cortex-M4 based SOC with a RISC-V coprocessor. The RISC-V coprocessor is not supported. + +Supported devices +----------------- + + * Instruction Cache Controller + * UART + * Global Control Register + * True Random Number Generator + * AES + +Notable unsupported devices +--------------------------- + + * I2C + * CNN + * CRC + * SPI + +Boot options +------------ + +The max78000 can be started using the ``-kernel`` option to load a +firmware at address 0 as the ROM. As the ROM normally jumps to software loaded +from the internal flash at address 0x10000000, loading your program there is +generally advisable. If you don't have a copy of the ROM, the interrupt +vector table from user firmware will do. +Example: + +.. code-block:: bash + + $ qemu-system-arm -machine max78000fthr -kernel max78000.bin -device loader,file=max78000.bin,addr=0x10000000 diff --git a/docs/system/arm/mcimx6ul-evk.rst b/docs/system/arm/mcimx6ul-evk.rst new file mode 100644 index 00000000000..8871138ab3e --- /dev/null +++ b/docs/system/arm/mcimx6ul-evk.rst @@ -0,0 +1,5 @@ +NXP MCIMX6UL-EVK (``mcimx6ul-evk``) +=================================== + +The ``mcimx6ul-evk`` machine models the NXP i.MX6UltraLite Evaluation Kit +MCIMX6UL-EVK development board. It has a single Cortex-A7 CPU. diff --git a/docs/system/arm/mcimx7d-sabre.rst b/docs/system/arm/mcimx7d-sabre.rst new file mode 100644 index 00000000000..c5d35af1d44 --- /dev/null +++ b/docs/system/arm/mcimx7d-sabre.rst @@ -0,0 +1,5 @@ +NXP MCIMX7D Sabre (``mcimx7d-sabre``) +===================================== + +The ``mcimx7d-sabre`` machine models the NXP SABRE Board MCIMX7SABRE, +based an an i.MX7Dual SoC. diff --git a/docs/system/arm/nseries.rst b/docs/system/arm/nseries.rst deleted file mode 100644 index cd9edf5d88b..00000000000 --- a/docs/system/arm/nseries.rst +++ /dev/null @@ -1,33 +0,0 @@ -Nokia N800 and N810 tablets (``n800``, ``n810``) -================================================ - -Nokia N800 and N810 internet tablets (known also as RX-34 and RX-44 / -48) emulation supports the following elements: - -- Texas Instruments OMAP2420 System-on-chip (ARM1136 core) - -- RAM and non-volatile OneNAND Flash memories - -- Display connected to EPSON remote framebuffer chip and OMAP on-chip - display controller and a LS041y3 MIPI DBI-C controller - -- TI TSC2301 (in N800) and TI TSC2005 (in N810) touchscreen - controllers driven through SPI bus - -- National Semiconductor LM8323-controlled qwerty keyboard driven - through |I2C| bus - -- Secure Digital card connected to OMAP MMC/SD host - -- Three OMAP on-chip UARTs and on-chip STI debugging console - -- Mentor Graphics \"Inventra\" dual-role USB controller embedded in a - TI TUSB6010 chip - only USB host mode is supported - -- TI TMP105 temperature sensor driven through |I2C| bus - -- TI TWL92230C power management companion with an RTC on - |I2C| bus - -- Nokia RETU and TAHVO multi-purpose chips with an RTC, connected - through CBUS diff --git a/docs/system/arm/nuvoton.rst b/docs/system/arm/nuvoton.rst index 0424cae4b01..e4827fb43a1 100644 --- a/docs/system/arm/nuvoton.rst +++ b/docs/system/arm/nuvoton.rst @@ -1,12 +1,13 @@ -Nuvoton iBMC boards (``*-bmc``, ``npcm750-evb``, ``quanta-gsj``) -================================================================ +Nuvoton iBMC boards (``kudo-bmc``, ``mori-bmc``, ``npcm750-evb``, ``quanta-gbs-bmc``, ``quanta-gsj``, ``npcm845-evb``) +====================================================================================================================== -The `Nuvoton iBMC`_ chips (NPCM7xx) are a family of ARM-based SoCs that are +The `Nuvoton iBMC`_ chips are a family of Arm-based SoCs that are designed to be used as Baseboard Management Controllers (BMCs) in various -servers. They all feature one or two ARM Cortex-A9 CPU cores, as well as an -assortment of peripherals targeted for either Enterprise or Data Center / -Hyperscale applications. The former is a superset of the latter, so NPCM750 has -all the peripherals of NPCM730 and more. +servers. Currently there are two families: NPCM7XX series and +NPCM8XX series. NPCM7XX series feature one or two Arm Cortex-A9 CPU cores, +while NPCM8XX feature 4 Arm Cortex-A35 CPU cores. Both series contain a +different assortment of peripherals targeted for either Enterprise or Data +Center / Hyperscale applications. .. _Nuvoton iBMC: https://www.nuvoton.com/products/cloud-computing/ibmc/ @@ -27,6 +28,11 @@ There are also two more SoCs, NPCM710 and NPCM705, which are single-core variants of NPCM750 and NPCM730, respectively. These are currently not supported by QEMU. +The NPCM8xx SoC is the successor of the NPCM7xx SoC. It has 4 Cortex-A35 cores. +The following machines are based on this chip : + +- ``npcm845-evb`` Nuvoton NPCM845 Evaluation board + Supported devices ----------------- @@ -62,6 +68,8 @@ Missing devices * System Wake-up Control (SWC) * Shared memory (SHM) * eSPI slave interface + * Block-transfer interface (8XX only) + * Virtual UART (8XX only) * Ethernet controller (GMAC) * USB device (USBD) @@ -76,6 +84,11 @@ Missing devices * Video capture * Encoding compression engine * Security features + * I3C buses (8XX only) + * Temperature sensor interface (8XX only) + * Virtual UART (8XX only) + * Flash monitor (8XX only) + * JTAG master (8XX only) Boot options ------------ diff --git a/docs/system/arm/orangepi.rst b/docs/system/arm/orangepi.rst index 9afa54213b0..d81f6c3bfd2 100644 --- a/docs/system/arm/orangepi.rst +++ b/docs/system/arm/orangepi.rst @@ -119,7 +119,7 @@ Orange Pi PC images Note that the mainline kernel does not have a root filesystem. You may provide it with an official Orange Pi PC image from the official website: - http://www.orangepi.org/downloadresources/ + http://www.orangepi.org/html/serviceAndSupport/index.html Another possibility is to run an Armbian image for Orange Pi PC which can be downloaded from: @@ -213,7 +213,7 @@ including the Orange Pi PC. NetBSD 9.0 is known to work best for the Orange Pi P board and provides a fully working system with serial console, networking and storage. For the Orange Pi PC machine, get the 'evbarm-earmv7hf' based image from: - https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.0/evbarm-earmv7hf/binary/gzimg/armv7.img.gz + https://archive.netbsd.org/pub/NetBSD-archive/NetBSD-9.0/evbarm-earmv7hf/binary/gzimg/armv7.img.gz The image requires manually installing U-Boot in the image. Build U-Boot with the orangepi_pc_defconfig configuration as described in the previous section. @@ -252,14 +252,14 @@ and set the following environment variables before booting: Optionally you may save the environment variables to SD card with 'saveenv'. To continue booting simply give the 'boot' command and NetBSD boots. -Orange Pi PC integration tests -"""""""""""""""""""""""""""""" +Orange Pi PC functional tests +""""""""""""""""""""""""""""" -The Orange Pi PC machine has several integration tests included. +The Orange Pi PC machine has several functional tests included. To run the whole set of tests, build QEMU from source and simply -provide the following command: +provide the following command from the build directory: .. code-block:: bash - $ AVOCADO_ALLOW_LARGE_STORAGE=yes avocado --show=app,console run \ - -t machine:orangepi-pc tests/avocado/boot_linux_console.py + $ QEMU_TEST_ALLOW_LARGE_STORAGE=1 \ + pyvenv/bin/meson test --suite thorough func-arm-arm_orangepi diff --git a/docs/system/arm/palm.rst b/docs/system/arm/palm.rst deleted file mode 100644 index 61bc8d34f40..00000000000 --- a/docs/system/arm/palm.rst +++ /dev/null @@ -1,23 +0,0 @@ -Palm Tungsten|E PDA (``cheetah``) -================================= - -The Palm Tungsten|E PDA (codename \"Cheetah\") emulation includes the -following elements: - -- Texas Instruments OMAP310 System-on-chip (ARM925T core) - -- ROM and RAM memories (ROM firmware image can be loaded with - -option-rom) - -- On-chip LCD controller - -- On-chip Real Time Clock - -- TI TSC2102i touchscreen controller / analog-digital converter / - Audio CODEC, connected through MicroWire and |I2S| buses - -- GPIO-connected matrix keypad - -- Secure Digital card connected to OMAP MMC/SD host - -- Three on-chip UARTs diff --git a/docs/system/arm/stm32.rst b/docs/system/arm/stm32.rst index 3b640f3ee07..511e3eb9ac1 100644 --- a/docs/system/arm/stm32.rst +++ b/docs/system/arm/stm32.rst @@ -1,5 +1,5 @@ -STMicroelectronics STM32 boards (``netduino2``, ``netduinoplus2``, ``stm32vldiscovery``) -======================================================================================== +STMicroelectronics STM32 boards (``netduino2``, ``netduinoplus2``, ``olimex-stm32-h405``, ``stm32vldiscovery``) +=============================================================================================================== The `STM32`_ chips are a family of 32-bit ARM-based microcontroller by STMicroelectronics. @@ -36,6 +36,7 @@ Supported devices * SPI controller * System configuration (SYSCFG) * Timer controller (TIMER) + * Reset and Clock Controller (RCC) (STM32F4 only, reset and enable only) Missing devices --------------- @@ -53,7 +54,7 @@ Missing devices * Power supply configuration (PWR) * Random Number Generator (RNG) * Real-Time Clock (RTC) controller - * Reset and Clock Controller (RCC) + * Reset and Clock Controller (RCC) (other features than reset and enable) * Secure Digital Input/Output (SDIO) interface * USB OTG * Watchdog controller (IWDG, WWDG) diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index e67e7f0f7c5..10cbffc8a70 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -1,3 +1,5 @@ +.. _arm-virt: + 'virt' generic virtual platform (``virt``) ========================================== @@ -19,12 +21,17 @@ of the 5.0 release and ``virt-5.0`` of the 5.1 release. Migration is not guaranteed to work between different QEMU releases for the non-versioned ``virt`` machine type. +VM migration is not guaranteed when using ``-cpu max``, as features +supported may change between QEMU versions. To ensure your VM can be +migrated, it is recommended to use another cpu model instead. + Supported devices """"""""""""""""" The virt board supports: - PCI/PCIe devices +- CXL Fixed memory windows, root bridges and devices. - Flash memory - Either one or two PL011 UARTs for the NonSecure World - An RTC @@ -64,11 +71,11 @@ Supported guest CPU types: - ``cortex-a76`` (64-bit) - ``cortex-a710`` (64-bit) - ``a64fx`` (64-bit) -- ``host`` (with KVM only) +- ``host`` (with KVM and HVF only) - ``neoverse-n1`` (64-bit) - ``neoverse-v1`` (64-bit) - ``neoverse-n2`` (64-bit) -- ``max`` (same as ``host`` for KVM; best possible emulation with TCG) +- ``max`` (same as ``host`` for KVM and HVF; best possible emulation with TCG) Note that the default is ``cortex-a15``, so for an AArch64 guest you must specify a CPU type. @@ -138,6 +145,10 @@ highmem-mmio Set ``on``/``off`` to enable/disable the high memory region for PCI MMIO. The default is ``on``. +highmem-mmio-size + Set the high memory region size for PCI MMIO. Must be a power of 2 and + greater than or equal to the default size (512G). + gic-version Specify the version of the Generic Interrupt Controller (GIC) to provide. Valid values are: @@ -167,10 +178,26 @@ iommu ``smmuv3`` Create an SMMUv3 +default-bus-bypass-iommu + Set ``on``/``off`` to enable/disable `bypass_iommu + `_ + for default root bus. + ras Set ``on``/``off`` to enable/disable reporting host memory errors to a guest using ACPI and guest external abort exceptions. The default is off. +acpi + Set ``on``/``off``/``auto`` to enable/disable ACPI. + +cxl + Set ``on``/``off`` to enable/disable CXL. More details in + :doc:`../devices/cxl`. The default is off. + +cxl-fmw + Array of CXL fixed memory windows describing fixed address routing to + target CXL host bridges. See :doc:`../devices/cxl`. + dtb-randomness Set ``on``/``off`` to pass random seeds via the guest DTB rng-seed and kaslr-seed nodes (in both "/chosen" and @@ -184,6 +211,14 @@ dtb-randomness dtb-kaslr-seed A deprecated synonym for dtb-randomness. +x-oem-id + Set string (up to 6 bytes) to override the default value of field OEMID in ACPI + table header. + +x-oem-table-id + Set string (up to 8 bytes) to override the default value of field OEM Table ID + in ACPI table header. + Linux guest kernel configuration """""""""""""""""""""""""""""""" diff --git a/docs/system/arm/vmapple.rst b/docs/system/arm/vmapple.rst new file mode 100644 index 00000000000..35c329ea5a8 --- /dev/null +++ b/docs/system/arm/vmapple.rst @@ -0,0 +1,65 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +VMApple machine emulation +======================================================================================== + +VMApple is the device model that the macOS built-in hypervisor called "Virtualization.framework" +exposes to Apple Silicon macOS guests. The "vmapple" machine model in QEMU implements the same +device model, but does not use any code from Virtualization.Framework. + +Prerequisites +------------- + +To run the vmapple machine model, you need to + + * Run on Apple Silicon + * Run on macOS 12.0 or above + * Have an already installed copy of a Virtualization.Framework macOS 12 virtual + machine. Note that newer versions than 12.x are currently NOT supported on + the guest side. I will assume that you installed it using the + `macosvm `__ CLI. + +First, we need to extract the UUID from the virtual machine that you installed. You can do this +by running the shell script in contrib/vmapple/uuid.sh on the macosvm.json file. + +.. code-block:: bash + :caption: uuid.sh script to extract the UUID from a macosvm.json file + + $ contrib/vmapple/uuid.sh "path/to/macosvm.json" + +Now we also need to trim the aux partition. It contains metadata that we can just discard: + +.. code-block:: bash + :caption: Command to trim the aux file + + $ dd if="aux.img" of="aux.img.trimmed" bs=$(( 0x4000 )) skip=1 + +How to run +---------- + +Then, we can launch QEMU with the Virtualization.Framework pre-boot environment and the readily +installed target disk images. I recommend to port forward the VM's ssh and vnc ports to the host +to get better interactive access into the target system: + +.. code-block:: bash + :caption: Example execution command line + + $ UUID="$(contrib/vmapple/uuid.sh 'macosvm.json')" + $ AVPBOOTER="/System/Library/Frameworks/Virtualization.framework/Resources/AVPBooter.vmapple2.bin" + $ AUX="aux.img.trimmed" + $ DISK="disk.img" + $ qemu-system-aarch64 \ + -serial mon:stdio \ + -m 4G \ + -accel hvf \ + -M vmapple,uuid="$UUID" \ + -bios "$AVPBOOTER" \ + -drive file="$AUX",if=pflash,format=raw \ + -drive file="$DISK",if=pflash,format=raw \ + -drive file="$AUX",if=none,id=aux,format=raw \ + -drive file="$DISK",if=none,id=root,format=raw \ + -device vmapple-virtio-blk-pci,variant=aux,drive=aux \ + -device vmapple-virtio-blk-pci,variant=root,drive=root \ + -netdev user,id=net0,ipv6=off,hostfwd=tcp::2222-:22,hostfwd=tcp::5901-:5900 \ + -device virtio-net-pci,netdev=net0 + diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index 0bafc76469d..c5f35f28e4f 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -178,6 +178,9 @@ Run the following at the U-Boot prompt: fdt set /chosen/dom0 reg <0x00000000 0x40000000 0x0 0x03100000> booti 30000000 - 20000000 +It's possible to change the OSPI flash model emulated by using the machine model +option ``ospi-flash``. + BBRAM File Backend """""""""""""""""" BBRAM can have an optional file backend, which must be a seekable diff --git a/docs/system/arm/xlnx-zcu102.rst b/docs/system/arm/xlnx-zcu102.rst new file mode 100644 index 00000000000..534cd1dc887 --- /dev/null +++ b/docs/system/arm/xlnx-zcu102.rst @@ -0,0 +1,19 @@ +Xilinx ZynqMP ZCU102 (``xlnx-zcu102``) +====================================== + +The ``xlnx-zcu102`` board models the Xilinx ZynqMP ZCU102 board. +This board has 4 Cortex-A53 CPUs and 2 Cortex-R5F CPUs. + +Machine-specific options +"""""""""""""""""""""""" + +The following machine-specific options are supported: + +secure + Set ``on``/``off`` to enable/disable emulating a guest CPU which implements the + Arm Security Extensions (TrustZone). The default is ``off``. + +virtualization + Set ``on``/``off`` to enable/disable emulating a guest CPU which implements the + Arm Virtualization Extensions. The default is ``off``. + diff --git a/docs/system/arm/xscale.rst b/docs/system/arm/xscale.rst deleted file mode 100644 index e239136c3c7..00000000000 --- a/docs/system/arm/xscale.rst +++ /dev/null @@ -1,35 +0,0 @@ -Sharp XScale-based PDA models (``akita``, ``borzoi``, ``spitz``, ``terrier``, ``tosa``) -======================================================================================= - -The Sharp Zaurus are PDAs based on XScale, able to run Linux ('SL series'). - -The SL-6000 (\"Tosa\"), released in 2005, uses a PXA255 System-on-chip. - -The SL-C3000 (\"Spitz\"), SL-C1000 (\"Akita\"), SL-C3100 (\"Borzoi\") and -SL-C3200 (\"Terrier\") use a PXA270. - -The clamshell PDA models emulation includes the following peripherals: - -- Intel PXA255/PXA270 System-on-chip (ARMv5TE core) - -- NAND Flash memory - not in \"Tosa\" - -- IBM/Hitachi DSCM microdrive in a PXA PCMCIA slot - not in \"Akita\" - -- On-chip OHCI USB controller - not in \"Tosa\" - -- On-chip LCD controller - -- On-chip Real Time Clock - -- TI ADS7846 touchscreen controller on SSP bus - -- Maxim MAX1111 analog-digital converter on |I2C| bus - -- GPIO-connected keyboard controller and LEDs - -- Secure Digital card connected to PXA MMC/SD host - -- Three on-chip UARTs - -- WM8750 audio CODEC on |I2C| and |I2S| buses diff --git a/docs/system/bootindex.rst b/docs/system/bootindex.rst index 8b057f812f2..5e1b33ee22b 100644 --- a/docs/system/bootindex.rst +++ b/docs/system/bootindex.rst @@ -49,10 +49,11 @@ Limitations ----------- Some firmware has limitations on which devices can be considered for -booting. For instance, the PC BIOS boot specification allows only one -disk to be bootable. If boot from disk fails for some reason, the BIOS +booting. For instance, the x86 PC BIOS boot specification allows only one +disk to be bootable. If boot from disk fails for some reason, the x86 BIOS won't retry booting from other disk. It can still try to boot from -floppy or net, though. +floppy or net, though. In the case of s390x BIOS, the BIOS will try up to +8 total devices, any number of which may be disks or virtio-net devices. Sometimes, firmware cannot map the device path QEMU wants firmware to boot from to a boot method. It doesn't happen for devices the firmware diff --git a/docs/system/confidential-guest-support.rst b/docs/system/confidential-guest-support.rst index 0c490dbda2b..66129fbab64 100644 --- a/docs/system/confidential-guest-support.rst +++ b/docs/system/confidential-guest-support.rst @@ -38,6 +38,7 @@ Supported mechanisms Currently supported confidential guest mechanisms are: * AMD Secure Encrypted Virtualization (SEV) (see :doc:`i386/amd-memory-encryption`) +* Intel Trust Domain Extension (TDX) (see :doc:`i386/tdx`) * POWER Protected Execution Facility (PEF) (see :ref:`power-papr-protected-execution-facility-pef`) * s390x Protected Virtualization (PV) (see :doc:`s390x/protvirt`) diff --git a/docs/system/cpu-hotplug.rst b/docs/system/cpu-hotplug.rst index 015ce2b6ec3..cc50937c36c 100644 --- a/docs/system/cpu-hotplug.rst +++ b/docs/system/cpu-hotplug.rst @@ -33,23 +33,23 @@ vCPU hotplug { "return": [ { - "type": "IvyBridge-IBRS-x86_64-cpu", - "vcpus-count": 1, "props": { - "socket-id": 1, - "core-id": 0, + "core-id": 1, + "socket-id": 0, "thread-id": 0 - } + }, + "type": "IvyBridge-IBRS-x86_64-cpu", + "vcpus-count": 1 }, { - "qom-path": "/machine/unattached/device[0]", - "type": "IvyBridge-IBRS-x86_64-cpu", - "vcpus-count": 1, "props": { - "socket-id": 0, "core-id": 0, + "socket-id": 0, "thread-id": 0 - } + }, + "qom-path": "/machine/unattached/device[0]", + "type": "IvyBridge-IBRS-x86_64-cpu", + "vcpus-count": 1 } ] } @@ -58,18 +58,18 @@ vCPU hotplug (4) The ``query-hotpluggable-cpus`` command returns an object for CPUs that are present (containing a "qom-path" member) or which may be hot-plugged (no "qom-path" member). From its output in step (3), we - can see that ``IvyBridge-IBRS-x86_64-cpu`` is present in socket 0, - while hot-plugging a CPU into socket 1 requires passing the listed + can see that ``IvyBridge-IBRS-x86_64-cpu`` is present in socket 0 core 0, + while hot-plugging a CPU into socket 0 core 1 requires passing the listed properties to QMP ``device_add``:: - (QEMU) device_add id=cpu-2 driver=IvyBridge-IBRS-x86_64-cpu socket-id=1 core-id=0 thread-id=0 + (QEMU) device_add id=cpu-2 driver=IvyBridge-IBRS-x86_64-cpu socket-id=0 core-id=1 thread-id=0 { "execute": "device_add", "arguments": { - "socket-id": 1, + "core-id": 1, "driver": "IvyBridge-IBRS-x86_64-cpu", "id": "cpu-2", - "core-id": 0, + "socket-id": 0, "thread-id": 0 } } @@ -83,34 +83,32 @@ vCPU hotplug (QEMU) query-cpus-fast { - "execute": "query-cpus-fast", "arguments": {} + "execute": "query-cpus-fast", } { "return": [ { - "qom-path": "/machine/unattached/device[0]", - "target": "x86_64", - "thread-id": 11534, "cpu-index": 0, "props": { - "socket-id": 0, "core-id": 0, + "socket-id": 0, "thread-id": 0 }, - "arch": "x86" + "qom-path": "/machine/unattached/device[0]", + "target": "x86_64", + "thread-id": 28957 }, { - "qom-path": "/machine/peripheral/cpu-2", - "target": "x86_64", - "thread-id": 12106, "cpu-index": 1, "props": { - "socket-id": 1, - "core-id": 0, + "core-id": 1, + "socket-id": 0, "thread-id": 0 }, - "arch": "x86" + "qom-path": "/machine/peripheral/cpu-2", + "target": "x86_64", + "thread-id": 29095 } ] } @@ -123,10 +121,10 @@ From the 'qmp-shell', invoke the QMP ``device_del`` command:: (QEMU) device_del id=cpu-2 { - "execute": "device_del", "arguments": { "id": "cpu-2" } + "execute": "device_del", } { "return": {} diff --git a/docs/system/cpu-models-x86.rst.inc b/docs/system/cpu-models-x86.rst.inc index ba27b5683fb..6a770ca8351 100644 --- a/docs/system/cpu-models-x86.rst.inc +++ b/docs/system/cpu-models-x86.rst.inc @@ -71,6 +71,16 @@ mixture of host CPU models between machines, if live migration compatibility is required, use the newest CPU model that is compatible across all desired hosts. +``ClearwaterForest`` + Intel Xeon Processor (ClearwaterForest, 2025) + +``SierraForest``, ``SierraForest-v2`` + Intel Xeon Processor (SierraForest, 2024), SierraForest-v2 mitigates + the GDS and RFDS vulnerabilities with stepping 3. + +``GraniteRapids``, ``GraniteRapids-v2`` + Intel Xeon Processor (GraniteRapids, 2024) + ``Cascadelake-Server``, ``Cascadelake-Server-noTSX`` Intel Xeon Processor (Cascade Lake, 2019), with "stepping" levels 6 or 7 only. (The Cascade Lake Xeon processor with *stepping 5 is @@ -181,7 +191,7 @@ features are included if using "Host passthrough" or "Host model". CVE-2018-12127, [MSBDS] CVE-2018-12126). This is an MSR (Model-Specific Register) feature rather than a CPUID feature, - so it will not appear in the Linux ``/proc/cpuinfo`` in the host or + therefore it will not appear in the Linux ``/proc/cpuinfo`` in the host or guest. Instead, the host kernel uses it to populate the MDS vulnerability file in ``sysfs``. @@ -189,10 +199,10 @@ features are included if using "Host passthrough" or "Host model". affected} in the ``/sys/devices/system/cpu/vulnerabilities/mds`` file. ``taa-no`` - Recommended to inform that the guest that the host is ``not`` + Recommended to inform the guest that the host is ``not`` vulnerable to CVE-2019-11135, TSX Asynchronous Abort (TAA). - This too is an MSR feature, so it does not show up in the Linux + This is also an MSR feature, therefore it does not show up in the Linux ``/proc/cpuinfo`` in the host or guest. It should only be enabled for VMs if the host reports ``Not affected`` @@ -214,7 +224,7 @@ features are included if using "Host passthrough" or "Host model". By disabling TSX, KVM-based guests can avoid paying the price of mitigating TSX-based attacks. - Note that ``tsx-ctrl`` too is an MSR feature, so it does not show + Note that ``tsx-ctrl`` is also an MSR feature, therefore it does not show up in the Linux ``/proc/cpuinfo`` in the host or guest. To validate that Intel TSX is indeed disabled for the guest, there are @@ -223,6 +233,38 @@ features are included if using "Host passthrough" or "Host model". ``/sys/devices/system/cpu/vulnerabilities/tsx_async_abort`` file in the guest should report ``Mitigation: TSX disabled``. +``bhi-no`` + Recommended to inform the guest that the host is ``not`` + vulnerable to CVE-2022-0001, Branch History Injection (BHI). + + This is also an MSR feature, therefore it does not show up in the Linux + ``/proc/cpuinfo`` in the host or guest. + + It should only be enabled for VMs if the host reports + ``BHI: Not affected`` in the + ``/sys/devices/system/cpu/vulnerabilities/spectre_v2`` file. + +``gds-no`` + Recommended to inform the guest that the host is ``not`` + vulnerable to CVE-2022-40982, Gather Data Sampling (GDS). + + This is also an MSR feature, therefore it does not show up in the Linux + ``/proc/cpuinfo`` in the host or guest. + + It should only be enabled for VMs if the host reports ``Not affected`` + in the ``/sys/devices/system/cpu/vulnerabilities/gather_data_sampling`` + file. + +``rfds-no`` + Recommended to inform the guest that the host is ``not`` + vulnerable to CVE-2023-28746, Register File Data Sampling (RFDS). + + This is also an MSR feature, therefore it does not show up in the Linux + ``/proc/cpuinfo`` in the host or guest. + + It should only be enabled for VMs if the host reports ``Not affected`` + in the ``/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling`` + file. Preferred CPU models for AMD x86 hosts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index f19777411cd..911381643f1 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -85,7 +85,9 @@ Emulated Devices devices/can.rst devices/ccid.rst devices/cxl.rst + devices/vfio-user.rst devices/ivshmem.rst + devices/ivshmem-flat.rst devices/keyboard.rst devices/net.rst devices/nvme.rst diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst index 882b036f5e7..ca15a0da1c1 100644 --- a/docs/system/devices/cxl.rst +++ b/docs/system/devices/cxl.rst @@ -308,7 +308,7 @@ A very simple setup with just one directly attached CXL Type 3 Persistent Memory -object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa.raw,size=256M \ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ - -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0,sn=0x1 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G A very simple setup with just one directly attached CXL Type 3 Volatile Memory device:: @@ -349,13 +349,13 @@ the CXL Type3 device directly attached (no switches).:: -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ -device pxb-cxl,bus_nr=222,bus=pcie.0,id=cxl.2 \ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ - -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0 \ + -device cxl-type3,bus=root_port13,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem0,sn=0x1 \ -device cxl-rp,port=1,bus=cxl.1,id=root_port14,chassis=0,slot=3 \ - -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1 \ + -device cxl-type3,bus=root_port14,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem1,sn=0x2 \ -device cxl-rp,port=0,bus=cxl.2,id=root_port15,chassis=0,slot=5 \ - -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2 \ + -device cxl-type3,bus=root_port15,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem2,sn=0x3 \ -device cxl-rp,port=1,bus=cxl.2,id=root_port16,chassis=0,slot=6 \ - -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ + -device cxl-type3,bus=root_port16,persistent-memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3,sn=0x4 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: @@ -375,15 +375,26 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave:: -device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \ -device cxl-upstream,bus=root_port0,id=us0 \ -device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \ - -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0 \ + -device cxl-type3,bus=swport0,persistent-memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,sn=0x1 \ -device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \ - -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1 \ + -device cxl-type3,bus=swport1,persistent-memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,sn=0x2 \ -device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \ - -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2 \ + -device cxl-type3,bus=swport2,persistent-memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,sn=0x3 \ -device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \ - -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3 \ + -device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,sn=0x4 \ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k +A simple arm/virt example featuring a single direct connected CXL Type 3 +Volatile Memory device:: + + qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8g,slots=4 -cpu max -smp 4 \ + ... + -object memory-backend-ram,id=vmem0,share=on,size=256M \ + -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \ + -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \ + -device cxl-type3,bus=root_port13,volatile-memdev=vmem0,id=cxl-vmem0 \ + -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G + Deprecations ------------ diff --git a/docs/system/devices/igb.rst b/docs/system/devices/igb.rst index 04e79dfe549..71f31cb1160 100644 --- a/docs/system/devices/igb.rst +++ b/docs/system/devices/igb.rst @@ -57,11 +57,12 @@ directory: meson test qtest-x86_64/qos-test ethtool can test register accesses, interrupts, etc. It is automated as an -Avocado test and can be ran with the following command: +functional test and can be run from the build directory with the following +command: .. code:: shell - make check-avocado AVOCADO_TESTS=tests/avocado/netdev-ethtool.py + pyvenv/bin/meson test --suite thorough func-x86_64-netdev_ethtool References ========== diff --git a/docs/system/devices/ivshmem-flat.rst b/docs/system/devices/ivshmem-flat.rst new file mode 100644 index 00000000000..1f97052804a --- /dev/null +++ b/docs/system/devices/ivshmem-flat.rst @@ -0,0 +1,33 @@ +Inter-VM Shared Memory Flat Device +---------------------------------- + +The ivshmem-flat device is meant to be used on machines that lack a PCI bus, +making them unsuitable for the use of the traditional ivshmem device modeled as +a PCI device. Machines like those with a Cortex-M MCU are good candidates to use +the ivshmem-flat device. Also, since the flat version maps the control and +status registers directly to the memory, it requires a quite tiny "device +driver" to interact with other VMs, which is useful in some RTOSes, like +Zephyr, which usually run on constrained resource targets. + +Similar to the ivshmem device, the ivshmem-flat device supports both peer +notification via HW interrupts and Inter-VM shared memory. This allows the +device to be used together with the traditional ivshmem, enabling communication +between, for instance, an aarch64 VM (using the traditional ivshmem device and +running Linux), and an arm VM (using the ivshmem-flat device and running Zephyr +instead). + +The ivshmem-flat device does not support the use of a ``memdev`` option (see +ivshmem.rst for more details). It relies on the ivshmem server to create and +distribute the proper shared memory file descriptor and the eventfd(s) to notify +(interrupt) the peers. Therefore, to use this device, it is always necessary to +have an ivshmem server up and running for proper device creation. + +Although the ivshmem-flat supports both peer notification (interrupts) and +shared memory, the interrupt mechanism is optional. If no input IRQ is +specified for the device it is disabled, preventing the VM from notifying or +being notified by other VMs (a warning will be displayed to the user to inform +the IRQ mechanism is disabled). The shared memory region is always present. + +The MMRs (INTRMASK, INTRSTATUS, IVPOSITION, and DOORBELL registers) offsets at +the MMR region, and their functions, follow the ivshmem spec, so they work +exactly as in the ivshmem PCI device (see ./specs/ivshmem-spec.txt). diff --git a/docs/system/devices/net.rst b/docs/system/devices/net.rst index 2ab516d4b09..7d76fe88c45 100644 --- a/docs/system/devices/net.rst +++ b/docs/system/devices/net.rst @@ -21,11 +21,17 @@ configure it as if it was a real ethernet card. Linux host ^^^^^^^^^^ -As an example, you can download the ``linux-test-xxx.tar.gz`` archive -and copy the script ``qemu-ifup`` in ``/etc`` and configure properly -``sudo`` so that the command ``ifconfig`` contained in ``qemu-ifup`` can -be executed as root. You must verify that your host kernel supports the -TAP network interfaces: the device ``/dev/net/tun`` must be present. +A distribution will generally provide specific helper scripts when it +packages QEMU. By default these are found at ``/etc/qemu-ifup`` and +``/etc/qemu-ifdown`` and are called appropriately when QEMU wants to +change the network state. + +If QEMU is being run as a non-privileged user you may need properly +configure ``sudo`` so that network commands in the scripts can be +executed as root. + +You must verify that your host kernel supports the TAP network +interfaces: the device ``/dev/net/tun`` must be present. See :ref:`sec_005finvocation` to have examples of command lines using the TAP network interfaces. @@ -77,6 +83,152 @@ When using the ``'-netdev user,hostfwd=...'`` option, TCP or UDP connections can be redirected from the host to the guest. It allows for example to redirect X11, telnet or SSH connections. +Using passt as the user mode network stack +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +passt_ can be used as a simple replacement for SLIRP (``-net user``). +passt doesn't require any capability or privilege. passt has +better performance than ``-net user``, full IPv6 support and better security +as it's a daemon that is not executed in QEMU context. + +passt_ can be used in the same way as the user backend (using ``-net passt``, +``-netdev passt`` or ``-nic passt``) or it can be launched manually and +connected to QEMU either by using a socket (``-netdev stream``) or by using +the vhost-user interface (``-netdev vhost-user``). + +Using ``-netdev stream`` or ``-netdev vhost-user`` will allow the user to +enable functionalities not available through the passt backend interface +(like migration). + +See `passt(1)`_ for more details on passt. + +.. _passt: https://passt.top/ +.. _passt(1): https://passt.top/builds/latest/web/passt.1.html + +To use the passt backend interface +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +There is no need to start the daemon as QEMU will do it for you. + +By default, passt will be started in the socket-based mode. + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -nic passt + + (qemu) info network + e1000e.0: index=0,type=nic,model=e1000e,macaddr=52:54:00:12:34:56 + \ #net071: index=0,type=passt,stream,connected to pid 24846 + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -net nic -net passt,tcp-ports=10001,udp-ports=10001 + + (qemu) info network + hub 0 + \ hub0port1: #net136: index=0,type=passt,stream,connected to pid 25204 + \ hub0port0: e1000e.0: index=0,type=nic,model=e1000e,macaddr=52:54:00:12:34:56 + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -netdev passt,id=netdev0 -device virtio-net,mac=9a:2b:2c:2d:2e:2f,id=virtio0,netdev=netdev0 + + (qemu) info network + virtio0: index=0,type=nic,model=virtio-net-pci,macaddr=9a:2b:2c:2d:2e:2f + \ netdev0: index=0,type=passt,stream,connected to pid 25428 + +To use the vhost-based interface, add the ``vhost-user=on`` parameter and +select the virtio-net device: + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -nic passt,model=virtio,vhost-user=on + + (qemu) info network + virtio-net-pci.0: index=0,type=nic,model=virtio-net-pci,macaddr=52:54:00:12:34:56 + \ #net006: index=0,type=passt,vhost-user,connected to pid 25731 + +To use socket based passt interface: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Start passt as a daemon:: + + passt --socket ~/passt.socket + +If ``--socket`` is not provided, passt will print the path of the UNIX domain socket QEMU can connect to (``/tmp/passt_1.socket``, ``/tmp/passt_2.socket``, +...). Then you can connect your QEMU instance to passt: + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -device virtio-net-pci,netdev=netdev0 -netdev stream,id=netdev0,server=off,addr.type=unix,addr.path=~/passt.socket + +Where ``~/passt.socket`` is the UNIX socket created by passt to +communicate with QEMU. + +To use vhost-based interface: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Start passt with ``--vhost-user``:: + + passt --vhost-user --socket ~/passt.socket + +Then to connect QEMU: + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] -m $RAMSIZE -chardev socket,id=chr0,path=~/passt.socket -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0 + +Where ``$RAMSIZE`` is the memory size of your VM ``-m`` and ``-object memory-backend-memfd,size=`` must match. + +Migration of passt: +^^^^^^^^^^^^^^^^^^^ + +When passt is connected to QEMU using the vhost-user interface it can +be migrated with QEMU and the network connections are not interrupted. + +As passt runs with no privileges, it relies on passt-repair to save and +load the TCP connections state, using the TCP_REPAIR socket option. +The passt-repair helper needs to have the CAP_NET_ADMIN capability, or run as root. If passt-repair is not available, TCP connections will not be preserved. + +Example of migration of a guest on the same host +________________________________________________ + +Before being able to run passt-repair, the CAP_NET_ADMIN capability must be set +on the file, run as root:: + + setcap cap_net_admin+eip ./passt-repair + +Start passt for the source side:: + + passt --vhost-user --socket ~/passt_src.socket --repair-path ~/passt-repair_src.socket + +Where ``~/passt-repair_src.socket`` is the UNIX socket created by passt to +communicate with passt-repair. The default value is the ``--socket`` path +appended with ``.repair``. + +Start passt-repair:: + + passt-repair ~/passt-repair_src.socket + +Start source side QEMU with a monitor to be able to send the migrate command: + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] [...VHOST USER OPTIONS...] -monitor stdio + +Start passt for the destination side:: + + passt --vhost-user --socket ~/passt_dst.socket --repair-path ~/passt-repair_dst.socket + +Start passt-repair:: + + passt-repair ~/passt-repair_dst.socket + +Start QEMU with the ``-incoming`` parameter: + +.. parsed-literal:: + |qemu_system| [...OPTIONS...] [...VHOST USER OPTIONS...] -incoming tcp:localhost:4444 + +Then in the source guest monitor the migration can be started:: + + (qemu) migrate tcp:localhost:4444 + +A separate passt-repair instance must be started for every migration. In the case of a failed migration, passt-repair also needs to be restarted before trying +again. + Hubs ~~~~ diff --git a/docs/system/devices/nvme.rst b/docs/system/devices/nvme.rst index d2b1ca96455..6509b35fcb4 100644 --- a/docs/system/devices/nvme.rst +++ b/docs/system/devices/nvme.rst @@ -53,6 +53,13 @@ parameters. Vendor ID. Set this to ``on`` to revert to the unallocated Intel ID previously used. +``ocp`` (default: ``off``) + The Open Compute Project defines the Datacenter NVMe SSD Specification that + sits on top of NVMe. It describes additional commands and NVMe behaviors + specific for the Datacenter. When this option is ``on`` OCP features such as + the SMART / Health information extended log become available in the + controller. We emulate version 5 of this log page. + Additional Namespaces --------------------- diff --git a/docs/system/devices/vfio-user.rst b/docs/system/devices/vfio-user.rst new file mode 100644 index 00000000000..b6dcaa5615e --- /dev/null +++ b/docs/system/devices/vfio-user.rst @@ -0,0 +1,26 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +========= +vfio-user +========= + +QEMU includes a ``vfio-user`` client. The ``vfio-user`` specification allows for +implementing (PCI) devices in userspace outside of QEMU; it is similar to +``vhost-user`` in this respect (see :doc:`vhost-user`), but can emulate arbitrary +PCI devices, not just ``virtio``. Whereas ``vfio`` is handled by the host +kernel, ``vfio-user``, while similar in implementation, is handled entirely in +userspace. + +For example, SPDK includes a virtual PCI NVMe controller implementation; by +setting up a ``vfio-user`` UNIX socket between QEMU and SPDK, a VM can send NVMe +I/O to the SPDK process. + +Presuming a suitable ``vfio-user`` server has opened a socket at +``/tmp/vfio-user.sock``, a device can be configured with for example: + +.. code-block:: console + +-device '{"driver": "vfio-user-pci","socket": {"path": "/tmp/vfio-user.sock", "type": "unix"}}' + +See `libvfio-user `_ for further +information. diff --git a/docs/system/devices/virtio-gpu.rst b/docs/system/devices/virtio-gpu.rst index cb73dd79985..b7eb0fc0e72 100644 --- a/docs/system/devices/virtio-gpu.rst +++ b/docs/system/devices/virtio-gpu.rst @@ -71,6 +71,17 @@ representation back to OpenGL API calls. .. _Gallium3D: https://www.freedesktop.org/wiki/Software/gallium/ .. _virglrenderer: https://gitlab.freedesktop.org/virgl/virglrenderer/ +Translation of Vulkan API calls is supported since release of `virglrenderer`_ +v1.0.0 using `venus`_ protocol. ``Venus`` virtio-gpu capability set ("capset") +requires host blob support (``hostmem`` and ``blob`` fields) and should +be enabled using ``venus`` field. The ``hostmem`` field specifies the size +of virtio-gpu host memory window. This is typically between 256M and 8G. + +.. parsed-literal:: + -device virtio-gpu-gl,hostmem=8G,blob=true,venus=true + +.. _venus: https://gitlab.freedesktop.org/virgl/venus-protocol/ + virtio-gpu rutabaga ------------------- diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst index 4228cb56bbb..d50470b135e 100644 --- a/docs/system/gdb.rst +++ b/docs/system/gdb.rst @@ -20,7 +20,7 @@ connection, use the ``-gdb dev`` option instead of ``-s``. See .. parsed-literal:: - |qemu_system| -s -S -kernel bzImage -hda rootdisk.img -append "root=/dev/hda" + |qemu_system| -s -S -kernel bzImage -drive file=rootdisk.img,format=raw -append "root=/dev/sda" QEMU will launch but will silently wait for gdb to connect. diff --git a/docs/system/i386/amd-memory-encryption.rst b/docs/system/i386/amd-memory-encryption.rst index 748f5094baf..6c23f3535f4 100644 --- a/docs/system/i386/amd-memory-encryption.rst +++ b/docs/system/i386/amd-memory-encryption.rst @@ -1,3 +1,5 @@ +.. _amd-sev: + AMD Secure Encrypted Virtualization (SEV) ========================================= diff --git a/docs/system/i386/hyperv.rst b/docs/system/i386/hyperv.rst index 2505dc4c86e..1c1de77feb6 100644 --- a/docs/system/i386/hyperv.rst +++ b/docs/system/i386/hyperv.rst @@ -262,14 +262,19 @@ Supplementary features ``hv-passthrough`` In some cases (e.g. during development) it may make sense to use QEMU in 'pass-through' mode and give Windows guests all enlightenments currently - supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU - flag. + supported by KVM. Note: ``hv-passthrough`` flag only enables enlightenments which are known to QEMU (have corresponding 'hv-' flag) and copies ``hv-spinlocks`` and ``hv-vendor-id`` values from KVM to QEMU. ``hv-passthrough`` overrides all other 'hv-' settings on - the command line. Also, enabling this flag effectively prevents migration as the - list of enabled enlightenments may differ between target and destination hosts. + the command line. + + Note: ``hv-passthrough`` does not enable ``hv-syndbg`` which can prevent certain + Windows guests from booting when used without proper configuration. If needed, + ``hv-syndbg`` can be enabled additionally. + + Note: ``hv-passthrough`` effectively prevents migration as the list of enabled + enlightenments may differ between target and destination hosts. ``hv-enforce-cpuid`` By default, KVM allows the guest to use all currently supported Hyper-V @@ -278,6 +283,36 @@ Supplementary features feature alters this behavior and only allows the guest to use exposed Hyper-V enlightenments. +Recommendations +--------------- + +To achieve the best performance of Windows and Hyper-V guests and unless there +are any specific requirements (e.g. migration to older QEMU/KVM versions, +emulating specific Hyper-V version, ...), it is recommended to enable all +currently implemented Hyper-V enlightenments with the following exceptions: + +- ``hv-syndbg``, ``hv-passthrough``, ``hv-enforce-cpuid`` should not be enabled + in production configurations as these are debugging/development features. +- ``hv-reset`` can be avoided as modern Hyper-V versions don't expose it. +- ``hv-evmcs`` can (and should) be enabled on Intel CPUs only. While the feature + is only used in nested configurations (Hyper-V, WSL2), enabling it for regular + Windows guests should not have any negative effects. +- ``hv-no-nonarch-coresharing`` must only be enabled if vCPUs are properly pinned + so no non-architectural core sharing is possible. +- ``hv-vendor-id``, ``hv-version-id-build``, ``hv-version-id-major``, + ``hv-version-id-minor``, ``hv-version-id-spack``, ``hv-version-id-sbranch``, + ``hv-version-id-snumber`` can be left unchanged, guests are not supposed to + behave differently when different Hyper-V version is presented to them. +- ``hv-crash`` must only be enabled if the crash information is consumed via + QAPI by higher levels of the virtualization stack. Enabling this feature + effectively prevents Windows from creating dumps upon crashes. +- ``hv-reenlightenment`` can only be used on hardware which supports TSC + scaling or when guest migration is not needed. +- ``hv-spinlocks`` should be set to e.g. 0xfff when host CPUs are overcommited + (meaning there are other scheduled tasks or guests) and can be left unchanged + from the default value (0xffffffff) otherwise. +- ``hv-avic``/``hv-apicv`` should not be enabled if the hardware does not + support APIC virtualization (Intel APICv, AMD AVIC). Useful links ------------ diff --git a/docs/system/i386/nitro-enclave.rst b/docs/system/i386/nitro-enclave.rst new file mode 100644 index 00000000000..7317f547dce --- /dev/null +++ b/docs/system/i386/nitro-enclave.rst @@ -0,0 +1,78 @@ +'nitro-enclave' virtual machine (``nitro-enclave``) +=================================================== + +``nitro-enclave`` is a machine type which emulates an *AWS nitro enclave* +virtual machine. `AWS nitro enclaves`_ is an Amazon EC2 feature that allows +creating isolated execution environments, called enclaves, from Amazon EC2 +instances which are used for processing highly sensitive data. Enclaves have +no persistent storage and no external networking. The enclave VMs are based +on Firecracker microvm with a vhost-vsock device for communication with the +parent EC2 instance that spawned it and a Nitro Secure Module (NSM) device +for cryptographic attestation. The parent instance VM always has CID 3 while +the enclave VM gets a dynamic CID. Enclaves use an EIF (`Enclave Image Format`_) +file which contains the necessary kernel, cmdline and ramdisk(s) to boot. + +In QEMU, ``nitro-enclave`` is a machine type based on ``microvm`` similar to how +AWS nitro enclaves look like a `Firecracker`_ microvm. This is useful for +local testing of EIF files using QEMU instead of running real AWS Nitro Enclaves +which can be difficult for debugging due to its roots in security. The vsock +device emulation is done using vhost-user-vsock which means another process that +can do the userspace emulation, like `vhost-device-vsock`_ from rust-vmm crate, +must be run alongside nitro-enclave for the vsock communication to work. + +``libcbor`` and ``gnutls`` are required dependencies for nitro-enclave machine +support to be added when building QEMU from source. + +.. _AWS nitro enclaves: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html +.. _Enclave Image Format: https://github.com/aws/aws-nitro-enclaves-image-format +.. _vhost-device-vsock: https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock +.. _Firecracker: https://firecracker-microvm.github.io + +Using the nitro-enclave machine type +------------------------------------ + +Machine-specific options +~~~~~~~~~~~~~~~~~~~~~~~~ + +It supports the following machine-specific options: + +- nitro-enclave.vsock=string (required) (Id of the chardev from '-chardev' option that vhost-user-vsock device will use) +- nitro-enclave.id=string (optional) (Set enclave identifier) +- nitro-enclave.parent-role=string (optional) (Set parent instance IAM role ARN) +- nitro-enclave.parent-id=string (optional) (Set parent instance identifier) + + +Running a nitro-enclave VM +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, run `vhost-device-vsock`__ (or a similar tool that supports vhost-user-vsock). +The forward-cid option below with value 1 forwards all connections from the enclave +VM to the host machine and the forward-listen (port numbers separated by '+') is used +for forwarding connections from the host machine to the enclave VM:: + + $ vhost-device-vsock \ + --vm guest-cid=4,forward-cid=1,forward-listen=9001+9002,socket=/tmp/vhost4.socket + +__ https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock#using-the-vsock-backend + +Now run the necessary applications on the host machine so that the nitro-enclave VM +applications' vsock communication works. For example, the nitro-enclave VM's init +process connects to CID 3 and sends a single byte hello heartbeat (0xB7) to let the +parent VM know that it booted expecting a heartbeat (0xB7) response. So you must run +a AF_VSOCK server on the host machine that listens on port 9000 and sends the heartbeat +after it receives the heartbeat for enclave VM to boot successfully. You should run all +the applications on the host machine that would typically be running in the parent EC2 +VM for successful communication with the enclave VM. + +Then run the nitro-enclave VM using the following command where ``hello.eif`` is +an EIF file you would use to spawn a real AWS nitro enclave virtual machine:: + + $ qemu-system-x86_64 -M nitro-enclave,vsock=c,id=hello-world \ + -kernel hello-world.eif -nographic -m 4G --enable-kvm -cpu host \ + -chardev socket,id=c,path=/tmp/vhost4.socket + +In this example, the nitro-enclave VM has CID 4. If there are applications that +connect to the enclave VM, run them on the host machine after enclave VM starts. +You need to modify the applications to connect to CID 1 (instead of the enclave +VM's CID) and use the forward-listen (e.g., 9001+9002) option of vhost-device-vsock +to forward the ports they connect to. diff --git a/docs/system/i386/tdx.rst b/docs/system/i386/tdx.rst new file mode 100644 index 00000000000..8131750b64b --- /dev/null +++ b/docs/system/i386/tdx.rst @@ -0,0 +1,161 @@ +Intel Trusted Domain eXtension (TDX) +==================================== + +Intel Trusted Domain eXtensions (TDX) refers to an Intel technology that extends +Virtual Machine Extensions (VMX) and Multi-Key Total Memory Encryption (MKTME) +with a new kind of virtual machine guest called a Trust Domain (TD). A TD runs +in a CPU mode that is designed to protect the confidentiality of its memory +contents and its CPU state from any other software, including the hosting +Virtual Machine Monitor (VMM), unless explicitly shared by the TD itself. + +Prerequisites +------------- + +To run TD, the physical machine needs to have TDX module loaded and initialized +while KVM hypervisor has TDX support and has TDX enabled. If those requirements +are met, the ``KVM_CAP_VM_TYPES`` will report the support of ``KVM_X86_TDX_VM``. + +Trust Domain Virtual Firmware (TDVF) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Trust Domain Virtual Firmware (TDVF) is required to provide TD services to boot +TD Guest OS. TDVF needs to be copied to guest private memory and measured before +the TD boots. + +KVM vcpu ioctl ``KVM_TDX_INIT_MEM_REGION`` can be used to populate the TDVF +content into its private memory. + +Since TDX doesn't support readonly memslot, TDVF cannot be mapped as pflash +device and it actually works as RAM. "-bios" option is chosen to load TDVF. + +OVMF is the opensource firmware that implements the TDVF support. Thus the +command line to specify and load TDVF is ``-bios OVMF.fd`` + +Feature Configuration +--------------------- + +Unlike non-TDX VM, the CPU features (enumerated by CPU or MSR) of a TD are not +under full control of VMM. VMM can only configure part of features of a TD on +``KVM_TDX_INIT_VM`` command of VM scope ``MEMORY_ENCRYPT_OP`` ioctl. + +The configurable features have three types: + +- Attributes: + - PKS (bit 30) controls whether Supervisor Protection Keys is exposed to TD, + which determines related CPUID bit and CR4 bit; + - PERFMON (bit 63) controls whether PMU is exposed to TD. + +- XSAVE related features (XFAM): + XFAM is a 64b mask, which has the same format as XCR0 or IA32_XSS MSR. It + determines the set of extended features available for use by the guest TD. + +- CPUID features: + Only some bits of some CPUID leaves are directly configurable by VMM. + +What features can be configured is reported via TDX capabilities. + +TDX capabilities +~~~~~~~~~~~~~~~~ + +The VM scope ``MEMORY_ENCRYPT_OP`` ioctl provides command ``KVM_TDX_CAPABILITIES`` +to get the TDX capabilities from KVM. It returns a data structure of +``struct kvm_tdx_capabilities``, which tells the supported configuration of +attributes, XFAM and CPUIDs. + +TD attributes +~~~~~~~~~~~~~ + +QEMU supports configuring raw 64-bit TD attributes directly via "attributes" +property of "tdx-guest" object. Note, it's users' responsibility to provide a +valid value because some bits may not supported by current QEMU or KVM yet. + +QEMU also supports the configuration of individual attribute bits that are +supported by it, via properties of "tdx-guest" object. +E.g., "sept-ve-disable" (bit 28). + +MSR based features +~~~~~~~~~~~~~~~~~~ + +Current KVM doesn't support MSR based feature (e.g., MSR_IA32_ARCH_CAPABILITIES) +configuration for TDX, and it's a future work to enable it in QEMU when KVM adds +support of it. + +Feature check +~~~~~~~~~~~~~ + +QEMU checks if the final (CPU) features, determined by given cpu model and +explicit feature adjustment of "+featureA/-featureB", can be supported or not. +It can produce feature not supported warning like + + "warning: host doesn't support requested feature: CPUID.07H:EBX.intel-pt [bit 25]" + +It can also produce warning like + + "warning: TDX forcibly sets the feature: CPUID.80000007H:EDX.invtsc [bit 8]" + +if the fixed-1 feature is requested to be disabled explicitly. This is newly +added to QEMU for TDX because TDX has fixed-1 features that are forcibly enabled +by TDX module and VMM cannot disable them. + +Launching a TD (TDX VM) +----------------------- + +To launch a TD, the necessary command line options are tdx-guest object and +split kernel-irqchip, as below: + +.. parsed-literal:: + + |qemu_system_x86| \\ + -accel kvm \\ + -cpu host \\ + -object tdx-guest,id=tdx0 \\ + -machine ...,confidential-guest-support=tdx0 \\ + -bios OVMF.fd \\ + +Restrictions +------------ + + - kernel-irqchip must be split; + + This is set by default for TDX guest if kernel-irqchip is left on its default + 'auto' setting. + + - No readonly support for private memory; + + - No SMM support: SMM support requires manipulating the guest register states + which is not allowed; + +Debugging +--------- + +Bit 0 of TD attributes, is DEBUG bit, which decides if the TD runs in off-TD +debug mode. When in off-TD debug mode, TD's VCPU state and private memory are +accessible via given SEAMCALLs. This requires KVM to expose APIs to invoke those +SEAMCALLs and corresonponding QEMU change. + +It's targeted as future work. + +TD attestation +-------------- + +In TD guest, the attestation process is used to verify the TDX guest +trustworthiness to other entities before provisioning secrets to the guest. + +TD attestation is initiated first by calling TDG.MR.REPORT inside TD to get the +REPORT. Then the REPORT data needs to be converted into a remotely verifiable +Quote by SGX Quoting Enclave (QE). + +It's a future work in QEMU to add support of TD attestation since it lacks +support in current KVM. + +Live Migration +-------------- + +Future work. + +References +---------- + +- `TDX Homepage `__ + +- `SGX QE `__ diff --git a/docs/system/i386/xenpvh.rst b/docs/system/i386/xenpvh.rst new file mode 100644 index 00000000000..354250f073f --- /dev/null +++ b/docs/system/i386/xenpvh.rst @@ -0,0 +1,49 @@ +Xen PVH machine (``xenpvh``) +========================================= + +Xen supports a spectrum of types of guests that vary in how they depend +on HW virtualization features, emulation models and paravirtualization. +PVH is a mode that uses HW virtualization features (like HVM) but tries +to avoid emulation models and instead use passthrough or +paravirtualized devices. + +QEMU can be used to provide PV virtio devices on an emulated PCIe controller. +That is the purpose of this minimal machine. + +Supported devices +----------------- + +The x86 Xen PVH QEMU machine provide the following devices: + +- RAM +- GPEX host bridge +- virtio-pci devices + +The idea is to only connect virtio-pci devices but in theory any compatible +PCI device model will work depending on Xen and guest support. + +Running +------- + +The Xen tools will typically construct a command-line and launch QEMU +for you when needed. But here's an example of what it can look like in +case you need to construct one manually: + +.. code-block:: console + + qemu-system-i386 -xen-domid 3 -no-shutdown \ + -chardev socket,id=libxl-cmd,path=/var/run/xen/qmp-libxl-3,server=on,wait=off \ + -mon chardev=libxl-cmd,mode=control \ + -chardev socket,id=libxenstat-cmd,path=/var/run/xen/qmp-libxenstat-3,server=on,wait=off \ + -mon chardev=libxenstat-cmd,mode=control \ + -nodefaults \ + -no-user-config \ + -xen-attach -name g0 \ + -vnc none \ + -display none \ + -device virtio-net-pci,id=nic0,netdev=net0,mac=00:16:3e:5c:81:78 \ + -netdev type=tap,id=net0,ifname=vif3.0-emu,br=xenbr0,script=no,downscript=no \ + -smp 4,maxcpus=4 \ + -nographic \ + -machine xenpvh,ram-low-base=0,ram-low-size=2147483648,ram-high-base=4294967296,ram-high-size=2147483648,pci-ecam-base=824633720832,pci-ecam-size=268435456,pci-mmio-base=4026531840,pci-mmio-size=33554432,pci-mmio-high-base=824902156288,pci-mmio-high-size=68719476736 \ + -m 4096 diff --git a/docs/system/igvm.rst b/docs/system/igvm.rst new file mode 100644 index 00000000000..79508d95880 --- /dev/null +++ b/docs/system/igvm.rst @@ -0,0 +1,173 @@ +Independent Guest Virtual Machine (IGVM) support +================================================ + +IGVM files are designed to encapsulate all the information required to launch a +virtual machine on any given virtualization stack in a deterministic way. This +allows the cryptographic measurement of initial guest state for Confidential +Guests to be calculated when the IGVM file is built, allowing a relying party to +verify the initial state of a guest via a remote attestation. + +Although IGVM files are designed with Confidential Computing in mind, they can +also be used to configure non-confidential guests. Multiple platforms can be +defined by a single IGVM file, allowing a single IGVM file to configure a +virtual machine that can run on, for example, TDX, SEV and non-confidential +hosts. + +QEMU supports IGVM files through the user-creatable ``igvm-cfg`` object. This +object is used to define the filename of the IGVM file to process. A reference +to the object is added to the ``-machine`` to configure the virtual machine +to use the IGVM file for configuration. + +Confidential platform support is provided through the use of +the ``ConfidentialGuestSupport`` object. If the virtual machine provides an +instance of this object then this is used by the IGVM loader to configure the +isolation properties of the directives within the file. + +Further Information on IGVM +--------------------------- + +Information about the IGVM format, including links to the format specification +and documentation for the Rust and C libraries can be found at the project +repository: + +https://github.com/microsoft/igvm + + +Supported Platforms +------------------- + +Currently, IGVM files can be provided for Confidential Guests on host systems +that support AMD SEV, SEV-ES and SEV-SNP with KVM. IGVM files can also be +provided for non-confidential guests. + + +Limitations when using IGVM with AMD SEV, SEV-ES and SEV-SNP +------------------------------------------------------------ + +IGVM files configure the initial state of the guest using a set of directives. +Not every directive is supported by every Confidential Guest type. For example, +AMD SEV does not support encrypted save state regions, therefore setting the +initial CPU state using IGVM for SEV is not possible. When an IGVM file contains +directives that are not supported for the active platform, an error is generated +and the guest launch is aborted. + +The table below describes the list of directives that are supported for SEV, +SEV-ES, SEV-SNP and non-confidential platforms. + +.. list-table:: SEV, SEV-ES, SEV-SNP & non-confidential Supported Directives + :widths: 35 65 + :header-rows: 1 + + * - IGVM directive + - Notes + * - IGVM_VHT_PAGE_DATA + - ``NORMAL`` zero, measured and unmeasured page types are supported. Other + page types result in an error. + * - IGVM_VHT_PARAMETER_AREA + - + * - IGVM_VHT_PARAMETER_INSERT + - + * - IGVM_VHT_VP_COUNT_PARAMETER + - The guest parameter page is populated with the CPU count. + * - IGVM_VHT_ENVIRONMENT_INFO_PARAMETER + - The ``memory_is_shared`` parameter is set to 1 in the guest parameter + page. + +.. list-table:: Additional SEV, SEV-ES & SEV_SNP Supported Directives + :widths: 25 75 + :header-rows: 1 + + * - IGVM directive + - Notes + * - IGVM_VHT_MEMORY_MAP + - The memory map page is populated using entries from the E820 table. + * - IGVM_VHT_REQUIRED_MEMORY + - Ensures memory is available in the guest at the specified range. + +.. list-table:: Additional SEV-ES & SEV-SNP Supported Directives + :widths: 25 75 + :header-rows: 1 + + * - IGVM directive + - Notes + * - IGVM_VHT_VP_CONTEXT + - Setting of the initial CPU state for the boot CPU and additional CPUs is + supported with limitations on the fields that can be provided in the + VMSA. See below for details on which fields are supported. + +Initial CPU state with VMSA +--------------------------- + +The initial state of guest CPUs can be defined in the IGVM file for AMD SEV-ES +and SEV-SNP. The state data is provided as a VMSA structure as defined in Table +B-4 in the AMD64 Architecture Programmer's Manual, Volume 2 [1]. + +The IGVM VMSA is translated to CPU state in QEMU which is then synchronized +by KVM to the guest VMSA during the launch process where it contributes to the +launch measurement. See :ref:`amd-sev` for details on the launch process and +guest launch measurement. + +It is important that no information is lost or changed when translating the +VMSA provided by the IGVM file into the VSMA that is used to launch the guest. +Therefore, QEMU restricts the VMSA fields that can be provided in the IGVM +VMSA structure to the following registers: + +RAX, RCX, RDX, RBX, RBP, RSI, RDI, R8-R15, RSP, RIP, CS, DS, ES, FS, GS, SS, +CR0, CR3, CR4, XCR0, EFER, PAT, GDT, IDT, LDTR, TR, DR6, DR7, RFLAGS, X87_FCW, +MXCSR. + +When processing the IGVM file, QEMU will check if any fields other than the +above are non-zero and generate an error if this is the case. + +KVM uses a hardcoded GPA of 0xFFFFFFFFF000 for the VMSA. When an IGVM file +defines initial CPU state, the GPA for each VMSA must match this hardcoded +value. + +Firmware Images with IGVM +------------------------- + +When an IGVM filename is specified for a Confidential Guest Support object it +overrides the default handling of system firmware: the firmware image, such as +an OVMF binary should be contained as a payload of the IGVM file and not +provided as a flash drive or via the ``-bios`` parameter. The default QEMU +firmware is not automatically populated into the guest memory space. + +If an IGVM file is provided along with either the ``-bios`` parameter or pflash +devices then an error is displayed and the guest startup is aborted. + +Running a guest configured using IGVM +------------------------------------- + +To run a guest configured with IGVM you firstly need to generate an IGVM file +that contains a guest configuration compatible with the platform you are +targeting. + +The ``buildigvm`` tool [2] is an example of a tool that can be used to generate +IGVM files for non-confidential X86 platforms as well as for SEV, SEV-ES and +SEV-SNP confidential platforms. + +Example using this tool to generate an IGVM file for AMD SEV-SNP:: + + buildigvm --firmware /path/to/OVMF.fd --output sev-snp.igvm \ + --cpucount 4 sev-snp + +To run a guest configured with the generated IGVM you need to add an +``igvm-cfg`` object and refer to it from the ``-machine`` parameter: + +Example (for AMD SEV):: + + qemu-system-x86_64 \ + \ + -machine ...,confidential-guest-support=sev0,igvm-cfg=igvm0 \ + -object sev-guest,id=sev0,cbitpos=47,reduced-phys-bits=1 \ + -object igvm-cfg,id=igvm0,file=/path/to/sev-snp.igvm + +References +---------- + +[1] AMD64 Architecture Programmer's Manual, Volume 2: System Programming + Rev 3.41 + https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf + +[2] ``buildigvm`` - A tool to build example IGVM files containing OVMF firmware + https://github.com/roy-hopkins/buildigvm \ No newline at end of file diff --git a/docs/system/images.rst b/docs/system/images.rst index d000bd6b6f1..43706969fdd 100644 --- a/docs/system/images.rst +++ b/docs/system/images.rst @@ -30,7 +30,7 @@ Snapshot mode If you use the option ``-snapshot``, all disk images are considered as read only. When sectors in written, they are written in a temporary file created in ``/tmp``. You can however force the write back to the raw -disk images by using the ``commit`` monitor command (or C-a s in the +disk images by using the ``commit`` monitor command (or :kbd:`Ctrl+a s` in the serial console). .. _vm_005fsnapshots: @@ -82,4 +82,6 @@ VM snapshots currently have the following known limitations: - A few device drivers still have incomplete snapshot support so their state is not saved or restored properly (in particular USB). +.. _block-drivers: + .. include:: qemu-block-drivers.rst.inc diff --git a/docs/system/index.rst b/docs/system/index.rst index c21065e5193..427b0204831 100644 --- a/docs/system/index.rst +++ b/docs/system/index.rst @@ -38,4 +38,6 @@ or Hypervisor.Framework. security multi-process confidential-guest-support + igvm vm-templating + sriov diff --git a/docs/system/introduction.rst b/docs/system/introduction.rst index 746707eb00e..4cd46b5b8f9 100644 --- a/docs/system/introduction.rst +++ b/docs/system/introduction.rst @@ -81,7 +81,7 @@ may not be optimal for modern systems. For a non-x86 system where we emulate a broad range of machine types, the command lines are generally more explicit in defining the machine -and boot behaviour. You will find often find example command lines in +and boot behaviour. You will often find example command lines in the :ref:`system-targets-ref` section of the manual. While the project doesn't want to discourage users from using the @@ -169,7 +169,7 @@ would default to it anyway. .. code:: - -cpu max,pauth-impdef=on \ + -cpu max \ -smp 4 \ -accel tcg \ diff --git a/docs/system/keys.rst.inc b/docs/system/keys.rst.inc index 59966a3fe7c..c28ae1a2272 100644 --- a/docs/system/keys.rst.inc +++ b/docs/system/keys.rst.inc @@ -1,36 +1,37 @@ During the graphical emulation, you can use special key combinations from -the following table to change modes. By default the modifier is Ctrl-Alt +the following table to change modes. By default the modifier is :kbd:`Ctrl+Alt` (used in the table below) which can be changed with ``-display`` suboption ``mod=`` where appropriate. For example, ``-display sdl, -grab-mod=lshift-lctrl-lalt`` changes the modifier key to Ctrl-Alt-Shift, -while ``-display sdl,grab-mod=rctrl`` changes it to the right Ctrl key. +grab-mod=lshift-lctrl-lalt`` changes the modifier key to :kbd:`Ctrl+Alt+Shift`, +while ``-display sdl,grab-mod=rctrl`` changes it to the right :kbd:`Ctrl` key. -Ctrl-Alt-f - Toggle full screen +.. list-table:: Multiplexer Keys + :widths: 10 90 + :header-rows: 1 -Ctrl-Alt-+ - Enlarge the screen + * - Key Sequence + - Action -Ctrl-Alt\-- - Shrink the screen + * - :kbd:`Ctrl+Alt+f` + - Toggle full screen -Ctrl-Alt-u - Restore the screen's un-scaled dimensions + * - :kbd:`Ctrl+Alt++` + - Enlarge the screen -Ctrl-Alt-n - Switch to virtual console 'n'. Standard console mappings are: + * - :kbd:`Ctrl+Alt+-` + - Shrink the screen - *1* - Target system display + * - :kbd:`Ctrl+Alt+u` + - Restore the screen's un-scaled dimensions - *2* - Monitor + * - :kbd:`Ctrl+Alt+n` + - Switch to virtual console 'n'. Standard console mappings are: - *3* - Serial port + - *1*: Target system display + - *2*: Monitor + - *3*: Serial port + * - :kbd:`Ctrl+Alt+g` + - Toggle mouse and keyboard grab. -Ctrl-Alt-g - Toggle mouse and keyboard grab. - -In the virtual consoles, you can use Ctrl-Up, Ctrl-Down, Ctrl-PageUp and -Ctrl-PageDown to move in the back log. +In the virtual consoles, you can use :kbd:`Ctrl+Up`, :kbd:`Ctrl+Down`, :kbd:`Ctrl+PageUp` and +:kbd:`Ctrl+PageDown` to move in the back log. diff --git a/docs/system/linuxboot.rst b/docs/system/linuxboot.rst index 5db2e560dc5..f7573ab80aa 100644 --- a/docs/system/linuxboot.rst +++ b/docs/system/linuxboot.rst @@ -11,7 +11,7 @@ The syntax is: .. parsed-literal:: - |qemu_system| -kernel bzImage -hda rootdisk.img -append "root=/dev/hda" + |qemu_system| -kernel bzImage -drive file=rootdisk.img,format=raw -append "root=/dev/sda" Use ``-kernel`` to provide the Linux kernel image and ``-append`` to give the kernel command line arguments. The ``-initrd`` option can be @@ -23,8 +23,8 @@ virtual serial port and the QEMU monitor to the console with the .. parsed-literal:: - |qemu_system| -kernel bzImage -hda rootdisk.img \ - -append "root=/dev/hda console=ttyS0" -nographic + |qemu_system| -kernel bzImage -drive file=rootdisk.img,format=raw \ + -append "root=/dev/sda console=ttyS0" -nographic -Use Ctrl-a c to switch between the serial console and the monitor (see +Use :kbd:`Ctrl+a c` to switch between the serial console and the monitor (see :ref:`GUI_keys`). diff --git a/docs/system/loongarch/virt.rst b/docs/system/loongarch/virt.rst index 06d034b8ef2..78458784697 100644 --- a/docs/system/loongarch/virt.rst +++ b/docs/system/loongarch/virt.rst @@ -12,14 +12,15 @@ Supported devices ----------------- The ``virt`` machine supports: -- Gpex host bridge -- Ls7a RTC device -- Ls7a IOAPIC device -- ACPI GED device -- Fw_cfg device -- PCI/PCIe devices -- Memory device -- CPU device. Type: la464. + +* Gpex host bridge +* Ls7a RTC device +* Ls7a IOAPIC device +* ACPI GED device +* Fw_cfg device +* PCI/PCIe devices +* Memory device +* CPU device. Type: la464. CPU and machine Type -------------------- @@ -39,13 +40,7 @@ can be accessed by following steps. .. code-block:: bash - ./configure --disable-rdma --prefix=/usr \ - --target-list="loongarch64-softmmu" \ - --disable-libiscsi --disable-libnfs --disable-libpmem \ - --disable-glusterfs --enable-libusb --enable-usb-redir \ - --disable-opengl --disable-xen --enable-spice \ - --enable-debug --disable-capstone --disable-kvm \ - --enable-profiler + ./configure --target-list="loongarch64-softmmu" make -j8 (2) Set cross tools: @@ -53,9 +48,7 @@ can be accessed by following steps. .. code-block:: bash wget https://github.com/loongson/build-tools/releases/download/2022.09.06/loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz - tar -vxf loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz -C /opt - export PATH=/opt/cross-tools/bin:$PATH export LD_LIBRARY_PATH=/opt/cross-tools/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=/opt/cross-tools/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH @@ -64,7 +57,7 @@ Note: You need get the latest cross-tools at https://github.com/loongson/build-t (3) Build BIOS: - See: https://github.com/tianocore/edk2-platforms/tree/master/Platform/Loongson/LoongArchQemuPkg#readme + See: https://github.com/tianocore/edk2/tree/master/OvmfPkg/LoongArchVirt#readme Note: To build the release version of the bios, set --buildtarget=RELEASE, the bios file path: Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd @@ -74,13 +67,9 @@ Note: To build the release version of the bios, set --buildtarget=RELEASE, .. code-block:: bash git clone https://github.com/loongson/linux.git - cd linux - git checkout loongarch-next - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- loongson3_defconfig - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- -j32 Note: The branch of linux source code is loongarch-next. diff --git a/docs/system/mux-chardev.rst.inc b/docs/system/mux-chardev.rst.inc index 84ea12cbf58..c87ba313629 100644 --- a/docs/system/mux-chardev.rst.inc +++ b/docs/system/mux-chardev.rst.inc @@ -1,27 +1,33 @@ During emulation, if you are using a character backend multiplexer (which is the default if you are using ``-nographic``) then several commands are available via an escape sequence. These key sequences all -start with an escape character, which is Ctrl-a by default, but can be +start with an escape character, which is :kbd:`Ctrl+a` by default, but can be changed with ``-echr``. The list below assumes you're using the default. -Ctrl-a h - Print this help +.. list-table:: Multiplexer Keys + :widths: 20 80 + :header-rows: 1 -Ctrl-a x - Exit emulator + * - Key Sequence + - Action -Ctrl-a s - Save disk data back to file (if -snapshot) + * - :kbd:`Ctrl+a h` + - Print this help -Ctrl-a t - Toggle console timestamps + * - :kbd:`Ctrl+a x` + - Exit emulator -Ctrl-a b - Send break (magic sysrq in Linux) + * - :kbd:`Ctrl+a s` + - Save disk data back to file (if -snapshot) -Ctrl-a c - Rotate between the frontends connected to the multiplexer (usually - this switches between the monitor and the console) + * - :kbd:`Ctrl+a t` + - Toggle console timestamps -Ctrl-a Ctrl-a - Send the escape character to the frontend + * - :kbd:`Ctrl+a b` + - Send break (magic sysrq in Linux) + + * - :kbd:`Ctrl+a c` + - Rotate between the frontends connected to the multiplexer (usually this switches between the monitor and the console) + + * - :kbd:`Ctrl+a Ctrl+a` + - Send the escape character to the frontend diff --git a/docs/system/ppc/amigang.rst b/docs/system/ppc/amigang.rst index e2c9cb74b7f..21bb14ed099 100644 --- a/docs/system/ppc/amigang.rst +++ b/docs/system/ppc/amigang.rst @@ -21,6 +21,7 @@ Emulated devices * VIA VT82C686B south bridge * PCI VGA compatible card (guests may need other card instead) * PS/2 keyboard and mouse + * 4 KiB NVRAM (use ``-drive if=mtd,format=raw,file=nvram.bin`` to keep contents persistent) Firmware -------- @@ -54,14 +55,14 @@ To boot the system run: -cdrom "A1 Linux Net Installer.iso" \ -device ati-vga,model=rv100,romfile=VGABIOS-lgpl-latest.bin -From the firmware menu that appears select ``Boot sequence`` → -``Amiga Multiboot Options`` and set ``Boot device 1`` to -``Onboard VIA IDE CDROM``. Then hit escape until the main screen appears again, -hit escape once more and from the exit menu that appears select either -``Save settings and exit`` or ``Use settings for this session only``. It may -take a long time loading the kernel into memory but eventually it boots and the -installer becomes visible. The ``ati-vga`` RV100 emulation is not -complete yet so only frame buffer works, DRM and 3D is not available. +If a firmware menu appears, select ``Boot sequence`` → ``Amiga Multiboot Options`` +and set ``Boot device 1`` to ``Onboard VIA IDE CDROM``. Then hit escape until +the main screen appears again, hit escape once more and from the exit menu that +appears select either ``Save settings and exit`` or ``Use settings for this +session only``. It may take a long time loading the kernel into memory but +eventually it boots and the installer becomes visible. The ``ati-vga`` RV100 +emulation is not complete yet so only frame buffer works, DRM and 3D is not +available. Genesi/bPlan Pegasos II (``pegasos2``) ====================================== diff --git a/docs/system/ppc/embedded.rst b/docs/system/ppc/embedded.rst index af3b3d9fa46..5cb7d98b450 100644 --- a/docs/system/ppc/embedded.rst +++ b/docs/system/ppc/embedded.rst @@ -4,6 +4,5 @@ Embedded family boards - ``bamboo`` bamboo - ``mpc8544ds`` mpc8544ds - ``ppce500`` generic paravirt e500 platform -- ``ref405ep`` ref405ep - ``sam460ex`` aCube Sam460ex - ``virtex-ml507`` Xilinx Virtex ML507 reference design diff --git a/docs/system/ppc/powermac.rst b/docs/system/ppc/powermac.rst index 04334ba2102..3eac81c491c 100644 --- a/docs/system/ppc/powermac.rst +++ b/docs/system/ppc/powermac.rst @@ -4,8 +4,8 @@ PowerMac family boards (``g3beige``, ``mac99``) Use the executable ``qemu-system-ppc`` to simulate a complete PowerMac PowerPC system. -- ``g3beige`` Heathrow based PowerMAC -- ``mac99`` Mac99 based PowerMAC +- ``g3beige`` Heathrow based PowerMac +- ``mac99`` Mac99 based PowerMac Supported devices ----------------- diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst index 09f39658587..f3ec2cc69c0 100644 --- a/docs/system/ppc/powernv.rst +++ b/docs/system/ppc/powernv.rst @@ -181,7 +181,7 @@ connected to a remote QEMU machine acting as BMC, using these options .. code-block:: bash - -chardev socket,id=ipmi0,host=localhost,port=9002,reconnect=10 \ + -chardev socket,id=ipmi0,host=localhost,port=9002,reconnect-ms=10000 \ -device ipmi-bmc-extern,id=bmc0,chardev=ipmi0 \ -device isa-ipmi-bt,bmc=bmc0,irq=10 \ -nodefaults @@ -195,6 +195,13 @@ Use a MTD drive to add a PNOR to the machine, and get a NVRAM : -drive file=./witherspoon.pnor,format=raw,if=mtd +If no mtd drive is provided, the powernv platform will create a default +PNOR device using a tiny formatted PNOR in pc-bios/pnv-pnor.bin opened +read-only (PNOR changes will be persistent across reboots but not across +invocations of QEMU). If no defaults are used, an erased 128MB PNOR is +provided (which skiboot will probably not recognize since it is not +formatted). + Maintainer contact information ------------------------------ diff --git a/docs/system/ppc/pseries.rst b/docs/system/ppc/pseries.rst index a876d897b6e..bbc51aa7fcd 100644 --- a/docs/system/ppc/pseries.rst +++ b/docs/system/ppc/pseries.rst @@ -14,10 +14,19 @@ virtualization capabilities. Supported devices ================= - * Multi processor support for many Power processors generations: POWER7, - POWER7+, POWER8, POWER8NVL, POWER9, and Power10. Support for POWER5+ exists, - but its state is unknown. - * Interrupt Controller, XICS (POWER8) and XIVE (POWER9 and Power10) + * Multi processor support for many Power processors generations: + - POWER7, POWER7+ + - POWER8, POWER8NVL + - POWER9 + - Power10 + - Power11 + - Support for POWER5+ also exists, works with correct kernel/userspace + * Interrupt Controller + - XICS (POWER8) + - XIVE (Supported by below:) + - POWER9 + - Power10 + - Power11 * vPHB PCIe Host bridge. * vscsi and vnet devices, compatible with the same devices available on a PowerVM hypervisor with VIOS managing LPARs. diff --git a/docs/system/riscv/microblaze-v-generic.rst b/docs/system/riscv/microblaze-v-generic.rst new file mode 100644 index 00000000000..5606f88d570 --- /dev/null +++ b/docs/system/riscv/microblaze-v-generic.rst @@ -0,0 +1,42 @@ +Microblaze-V generic board (``amd-microblaze-v-generic``) +========================================================= +The AMD MicroBlaze™ V processor is a soft-core RISC-V processor IP for AMD +adaptive SoCs and FPGAs. The MicroBlaze™ V processor is based on the 32-bit (or +64-bit) RISC-V instruction set architecture (ISA) and contains interfaces +compatible with the classic MicroBlaze™ V processor (i.e it is a drop in +replacement for the classic MicroBlaze™ processor in existing RTL designs). +More information can be found in below document. + +https://docs.amd.com/r/en-US/ug1629-microblaze-v-user-guide/MicroBlaze-V-Architecture + +The MicroBlaze™ V generic board in QEMU has following supported devices: + + - timer + - uartlite + - uart16550 + - emaclite + - timer2 + - axi emac + - axi dma + +The MicroBlaze™ V core in QEMU has the following configuration: + + - RV32I base integer instruction set + - "Zicsr" Control and Status register instructions + - "Zifencei" instruction-fetch + - Extensions: m, a, f, c + +Running +""""""" +Below is an example command line for launching mainline U-boot +(xilinx_mbv32_defconfig) on the Microblaze-V generic board. + +.. code-block:: bash + + $ qemu-system-riscv32 -M amd-microblaze-v-generic \ + -display none \ + -device loader,addr=0x80000000,file=u-boot-spl.bin,cpu-num=0 \ + -device loader,addr=0x80200000,file=u-boot.img \ + -serial mon:stdio \ + -device loader,addr=0x83000000,file=system.dtb \ + -m 2g diff --git a/docs/system/riscv/microchip-icicle-kit.rst b/docs/system/riscv/microchip-icicle-kit.rst index 40798b1aae5..9809e94b84b 100644 --- a/docs/system/riscv/microchip-icicle-kit.rst +++ b/docs/system/riscv/microchip-icicle-kit.rst @@ -5,10 +5,10 @@ Microchip PolarFire SoC Icicle Kit integrates a PolarFire SoC, with one SiFive's E51 plus four U54 cores and many on-chip peripherals and an FPGA. For more details about Microchip PolarFire SoC, please see: -https://www.microsemi.com/product-directory/soc-fpgas/5498-polarfire-soc-fpga +https://www.microchip.com/en-us/products/fpgas-and-plds/system-on-chip-fpgas/polarfire-soc-fpgas The Icicle Kit board information can be found here: -https://www.microsemi.com/existing-parts/parts/152514 +https://www.microchip.com/en-us/development-tool/mpfs-icicle-kit-es Supported devices ----------------- @@ -26,95 +26,48 @@ The ``microchip-icicle-kit`` machine supports the following devices: * 2 GEM Ethernet controllers * 1 SDHC storage controller +The memory is set to 1537 MiB by default. A sanity check on RAM size is +performed in the machine init routine to prompt user to increase the RAM size +to > 1537 MiB when less than 1537 MiB RAM is detected. + Boot options ------------ -The ``microchip-icicle-kit`` machine can start using the standard -bios -functionality for loading its BIOS image, aka Hart Software Services (HSS_). -HSS loads the second stage bootloader U-Boot from an SD card. Then a kernel -can be loaded from U-Boot. It also supports direct kernel booting via the --kernel option along with the device tree blob via -dtb. When direct kernel -boot is used, the OpenSBI fw_dynamic BIOS image is used to boot a payload -like U-Boot or OS kernel directly. - -The user provided DTB should have the following requirements: - -* The /cpus node should contain at least one subnode for E51 and the number - of subnodes should match QEMU's ``-smp`` option -* The /memory reg size should match QEMU’s selected ram_size via ``-m`` -* Should contain a node for the CLINT device with a compatible string - "riscv,clint0" - -QEMU follows below truth table to select which payload to execute: - -===== ========== ========== ======= --bios -kernel -dtb payload -===== ========== ========== ======= - N N don't care HSS - Y don't care don't care HSS - N Y Y kernel -===== ========== ========== ======= - -The memory is set to 1537 MiB by default which is the minimum required high -memory size by HSS. A sanity check on ram size is performed in the machine -init routine to prompt user to increase the RAM size to > 1537 MiB when less -than 1537 MiB ram is detected. - -Running HSS ------------ - -HSS 2020.12 release is tested at the time of writing. To build an HSS image -that can be booted by the ``microchip-icicle-kit`` machine, type the following -in the HSS source tree: - -.. code-block:: bash - - $ export CROSS_COMPILE=riscv64-linux- - $ cp boards/mpfs-icicle-kit-es/def_config .config - $ make BOARD=mpfs-icicle-kit-es - -Download the official SD card image released by Microchip and prepare it for -QEMU usage: - -.. code-block:: bash - - $ wget ftp://ftpsoc.microsemi.com/outgoing/core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz - $ gunzip core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz - $ qemu-img resize core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic 4G - -Then we can boot the machine by: - -.. code-block:: bash - - $ qemu-system-riscv64 -M microchip-icicle-kit -smp 5 \ - -bios path/to/hss.bin -sd path/to/sdcard.img \ - -nic user,model=cadence_gem \ - -nic tap,ifname=tap,model=cadence_gem,script=no \ - -display none -serial stdio \ - -chardev socket,id=serial1,path=serial1.sock,server=on,wait=on \ - -serial chardev:serial1 +The ``microchip-icicle-kit`` machine provides some options to run a firmware +(BIOS) or a kernel image. QEMU follows below truth table to select the +firmware: -With above command line, current terminal session will be used for the first -serial port. Open another terminal window, and use ``minicom`` to connect the -second serial port. +============= =========== ====================================== +-bios -kernel firmware +============= =========== ====================================== +none N this is an error +none Y the kernel image +NULL, default N hss.bin +NULL, default Y opensbi-riscv64-generic-fw_dynamic.bin +other don't care the BIOS image +============= =========== ====================================== -.. code-block:: bash +Direct Kernel Boot +------------------ - $ minicom -D unix\#serial1.sock +Use the ``-kernel`` option to directly run a kernel image. When a direct +kernel boot is requested, a device tree blob may be specified via the ``-dtb`` +option. Unlike other QEMU machines, this machine does not generate a device +tree for the kernel. It shall be provided by the user. The user provided DTB +should meet the following requirements: -HSS output is on the first serial port (stdio) and U-Boot outputs on the -second serial port. U-Boot will automatically load the Linux kernel from -the SD card image. +* The ``/cpus`` node should contain at least one subnode for E51 and the number + of subnodes should match QEMU's ``-smp`` option. -Direct Kernel Boot ------------------- +* The ``/memory`` reg size should match QEMU’s selected RAM size via the ``-m`` + option. -Sometimes we just want to test booting a new kernel, and transforming the -kernel image to the format required by the HSS bootflow is tedious. We can -use '-kernel' for direct kernel booting just like other RISC-V machines do. +* It should contain a node for the CLINT device with a compatible string + "riscv,clint0". -In this mode, the OpenSBI fw_dynamic BIOS image for 'generic' platform is -used to boot an S-mode payload like U-Boot or OS kernel directly. +When ``-bios`` is not specified or set to ``default``, the OpenSBI +``fw_dynamic`` BIOS image for the ``generic`` platform is used to boot an +S-mode payload like U-Boot or OS kernel directly. For example, the following commands show building a U-Boot image from U-Boot mainline v2021.07 for the Microchip Icicle Kit board: @@ -146,4 +99,13 @@ CAVEATS: ``u-boot.bin`` has to be used which does contain one. To use the ELF image, we need to change to CONFIG_OF_EMBED or CONFIG_OF_PRIOR_STAGE. +Running HSS +----------- + +The machine ``microchip-icicle-kit`` used to run the Hart Software Services +(HSS_), however, the HSS development progressed and the QEMU machine +implementation lacks behind. Currently, running the HSS no longer works. +There is missing support in the clock and memory controller devices. In +particular, reading from the SD card does not work. + .. _HSS: https://github.com/polarfire-soc/hart-software-services diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst index 9a06f95a344..60850970ce8 100644 --- a/docs/system/riscv/virt.rst +++ b/docs/system/riscv/virt.rst @@ -84,6 +84,25 @@ none``, as in Firmware images used for pflash must be exactly 32 MiB in size. +riscv-iommu support +------------------- + +The board has support for the riscv-iommu-pci device by using the following +command line: + +.. code-block:: bash + + $ qemu-system-riscv64 -M virt -device riscv-iommu-pci (...) + +It also has support for the riscv-iommu-sys platform device: + +.. code-block:: bash + + $ qemu-system-riscv64 -M virt,iommu-sys=on (...) + +Refer to :ref:`riscv-iommu` for more information on how the RISC-V IOMMU support +works. + Machine-specific options ------------------------ @@ -110,12 +129,23 @@ The following machine-specific options are supported: MSIs. When not specified, this option is assumed to be "none" which selects SiFive PLIC to handle wired interrupts. + This option also interacts with '-accel kvm'. When using "aia=aplic-imsic" + with KVM, it is possible to set the use of the kernel irqchip in split mode + by using "-accel kvm,kernel-irqchip=split". In this case the ``virt`` machine + will emulate the APLIC controller instead of using the APLIC controller from + the irqchip. See :ref:`riscv-aia` for more details on all available AIA + modes. + - aia-guests=nnn The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified, the default number of per-HART VS-level AIA IMSIC pages is 0. +- iommu-sys=[on|off] + + Enables the riscv-iommu-sys platform device. Defaults to 'off'. + Running Linux kernel -------------------- diff --git a/docs/system/riscv/xiangshan-kunminghu.rst b/docs/system/riscv/xiangshan-kunminghu.rst new file mode 100644 index 00000000000..46e7ceeda06 --- /dev/null +++ b/docs/system/riscv/xiangshan-kunminghu.rst @@ -0,0 +1,39 @@ +BOSC Xiangshan Kunminghu FPGA prototype platform (``xiangshan-kunminghu``) +========================================================================== +The ``xiangshan-kunminghu`` machine is compatible with our FPGA prototype +platform. + +XiangShan is an open-source high-performance RISC-V processor project. +The third generation processor is called Kunminghu. Kunminghu is a 64-bit +RV64GCBSUHV processor core. More information can be found in our Github +repository: +https://github.com/OpenXiangShan/XiangShan + +Supported devices +----------------- +The ``xiangshan-kunminghu`` machine supports the following devices: + +* Up to 16 xiangshan-kunminghu cores +* Core Local Interruptor (CLINT) +* Incoming MSI Controller (IMSIC) +* Advanced Platform-Level Interrupt Controller (APLIC) +* 1 UART + +Boot options +------------ +The ``xiangshan-kunminghu`` machine can start using the standard ``-bios`` +functionality for loading the boot image. You need to compile and link +the firmware, kernel, and Device Tree (FDT) into a single binary file, +such as ``fw_payload.bin``. + +Running +------- +Below is an example command line for running the ``xiangshan-kunminghu`` +machine: + +.. code-block:: bash + + $ qemu-system-riscv64 -machine xiangshan-kunminghu \ + -smp 16 -m 16G \ + -bios path/to/opensbi/platform/generic/firmware/fw_payload.bin \ + -nographic diff --git a/docs/system/s390x/bootdevices.rst b/docs/system/s390x/bootdevices.rst index 1a7a18b43b0..97b3914785a 100644 --- a/docs/system/s390x/bootdevices.rst +++ b/docs/system/s390x/bootdevices.rst @@ -6,9 +6,7 @@ Booting with bootindex parameter For classical mainframe guests (i.e. LPAR or z/VM installations), you always have to explicitly specify the disk where you want to boot from (or "IPL" from, -in s390x-speak -- IPL means "Initial Program Load"). In particular, there can -also be only one boot device according to the architecture specification, thus -specifying multiple boot devices is not possible (yet). +in s390x-speak -- IPL means "Initial Program Load"). So for booting an s390x guest in QEMU, you should always mark the device where you want to boot from with the ``bootindex`` property, for @@ -17,6 +15,11 @@ example:: qemu-system-s390x -drive if=none,id=dr1,file=guest.qcow2 \ -device virtio-blk,drive=dr1,bootindex=1 +Multiple devices may have a bootindex. The lowest bootindex is assigned to the +device to IPL first. If the IPL fails for the first, the device with the second +lowest bootindex will be tried and so on until IPL is successful or there are no +remaining boot devices to try. + For booting from a CD-ROM ISO image (which needs to include El-Torito boot information in order to be bootable), it is recommended to specify a ``scsi-cd`` device, for example like this:: @@ -76,29 +79,45 @@ The second way to use this parameter is to use a number in the range from 0 to 31. The numbers that can be used here correspond to the numbers that are shown when using the ``PROMPT`` option, and the s390-ccw bios will then try to automatically boot the kernel that is associated with the given number. -Note that ``0`` can be used to boot the default entry. +Note that ``0`` can be used to boot the default entry. If the machine +``loadparm`` is not assigned a value, then the default entry is used. + +By default, the machine ``loadparm`` applies to all boot devices. If multiple +devices are assigned a ``bootindex`` and the ``loadparm`` is to be different +between them, an independent ``loadparm`` may be assigned on a per-device basis. + +An example guest using per-device ``loadparm``:: + + qemu-system-s390x -drive if=none,id=dr1,file=primary.qcow2 \ + -device virtio-blk,drive=dr1,bootindex=1 \ + -drive if=none,id=dr2,file=secondary.qcow2 \ + -device virtio-blk,drive=dr2,bootindex=2,loadparm=3 + +In this case, the primary boot device will attempt to IPL using the default +entry (because no ``loadparm`` is specified for this device or for the +machine). If that device fails to boot, the secondary device will attempt to +IPL using entry number 3. + +If a ``loadparm`` is specified on both the machine and a device, the per-device +value will superseded the machine value. Per-device ``loadparm`` values are +only used for devices with an assigned ``bootindex``. The machine ``loadparm`` +is used when attempting to boot without a ``bootindex``. Booting from a network device ----------------------------- -Beside the normal guest firmware (which is loaded from the file ``s390-ccw.img`` -in the data directory of QEMU, or via the ``-bios`` option), QEMU ships with -a small TFTP network bootloader firmware for virtio-net-ccw devices, too. This -firmware is loaded from a file called ``s390-netboot.img`` in the QEMU data -directory. In case you want to load it from a different filename instead, -you can specify it via the ``-global s390-ipl.netboot_fw=filename`` -command line option. - -The ``bootindex`` property is especially important for booting via the network. -If you don't specify the ``bootindex`` property here, the network bootloader -firmware code won't get loaded into the guest memory so that the network boot -will fail. For a successful network boot, try something like this:: +The firmware that ships with QEMU includes a small TFTP network bootloader +for virtio-net-ccw devices. The ``bootindex`` property is especially +important for booting via the network. If you don't specify the ``bootindex`` +property here, the network bootloader won't be taken into consideration and +the network boot will fail. For a successful network boot, try something +like this:: qemu-system-s390x -netdev user,id=n1,tftp=...,bootfile=... \ -device virtio-net-ccw,netdev=n1,bootindex=1 -The network bootloader firmware also has basic support for pxelinux.cfg-style +The network bootloader also has basic support for pxelinux.cfg-style configuration files. See the `PXELINUX Configuration page `__ for details how to set up the configuration file on your TFTP server. diff --git a/docs/system/sriov.rst b/docs/system/sriov.rst new file mode 100644 index 00000000000..d12178f3c31 --- /dev/null +++ b/docs/system/sriov.rst @@ -0,0 +1,37 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +Compsable SR-IOV device +======================= + +SR-IOV (Single Root I/O Virtualization) is an optional extended capability of a +PCI Express device. It allows a single physical function (PF) to appear as +multiple virtual functions (VFs) for the main purpose of eliminating software +overhead in I/O from virtual machines. + +There are devices with predefined SR-IOV configurations, but it is also possible +to compose an SR-IOV device yourself. Composing an SR-IOV device is currently +only supported by virtio-net-pci. + +Users can configure an SR-IOV-capable virtio-net device by adding +virtio-net-pci functions to a bus. Below is a command line example: + +.. code-block:: shell + + -netdev user,id=n -netdev user,id=o + -netdev user,id=p -netdev user,id=q + -device pcie-root-port,id=b + -device virtio-net-pci,bus=b,addr=0x0.0x3,netdev=q,sriov-pf=f + -device virtio-net-pci,bus=b,addr=0x0.0x2,netdev=p,sriov-pf=f + -device virtio-net-pci,bus=b,addr=0x0.0x1,netdev=o,sriov-pf=f + -device virtio-net-pci,bus=b,addr=0x0.0x0,netdev=n,id=f + +The VFs specify the paired PF with ``sriov-pf`` property. The PF must be +added after all VFs. It is the user's responsibility to ensure that VFs have +function numbers larger than one of the PF, and that the function numbers +have a consistent stride. Both the PF and VFs are ARI-capable so you can have +255 VFs at maximum. + +You may also need to perform additional steps to activate the SR-IOV feature on +your guest. For Linux, refer to [1]_. + +.. [1] https://docs.kernel.org/PCI/pci-iov-howto.html diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst index 7b992722846..a96d1867df1 100644 --- a/docs/system/target-arm.rst +++ b/docs/system/target-arm.rst @@ -63,10 +63,6 @@ large amounts of RAM. It also supports 64-bit CPUs. Board-specific documentation ============================ -Unfortunately many of the Arm boards QEMU supports are currently -undocumented; you can get a complete list by running -``qemu-system-aarch64 --machine help``. - .. This table of contents should be kept sorted alphabetically by the title text of each file, which isn't the same ordering @@ -75,6 +71,7 @@ undocumented; you can get a complete list by running .. toctree:: :maxdepth: 1 + arm/max78000 arm/integratorcp arm/mps2 arm/musca @@ -90,26 +87,28 @@ undocumented; you can get a complete list by running arm/digic arm/cubieboard arm/emcraft-sf2 + arm/exynos + arm/fby35 arm/musicpal - arm/gumstix - arm/mainstone arm/kzm - arm/nseries arm/nrf arm/nuvoton arm/imx25-pdk + arm/mcimx6ul-evk + arm/mcimx7d-sabre + arm/imx8mp-evk arm/orangepi - arm/palm arm/raspi - arm/xscale arm/collie arm/sx1 arm/stellaris arm/stm32 arm/virt + arm/vmapple arm/xenpvh arm/xlnx-versal-virt arm/xlnx-zynq + arm/xlnx-zcu102 Emulated CPU architecture support ================================= diff --git a/docs/system/target-i386.rst b/docs/system/target-i386.rst index 1b8a1f248ab..2374391397c 100644 --- a/docs/system/target-i386.rst +++ b/docs/system/target-i386.rst @@ -14,8 +14,9 @@ Board-specific documentation .. toctree:: :maxdepth: 1 - i386/microvm i386/pc + i386/microvm + i386/nitro-enclave Architectural features ~~~~~~~~~~~~~~~~~~~~~~ @@ -26,14 +27,14 @@ Architectural features i386/cpu i386/hyperv i386/xen + i386/xenpvh i386/kvm-pv i386/sgx i386/amd-memory-encryption + i386/tdx OS requirements ~~~~~~~~~~~~~~~ On x86_64 hosts, the default set of CPU features enabled by the KVM -accelerator require the host to be running Linux v4.5 or newer. Red Hat -Enterprise Linux 7 is also supported, since the required -functionality was backported. +accelerator require the host to be running Linux v4.5 or newer. diff --git a/docs/system/target-loongarch.rst b/docs/system/target-loongarch.rst new file mode 100644 index 00000000000..316c604b919 --- /dev/null +++ b/docs/system/target-loongarch.rst @@ -0,0 +1,19 @@ +.. _LoongArch-System-emulator: + +LoongArch System emulator +------------------------- + +QEMU can emulate loongArch 64 bit systems via the +``qemu-system-loongarch64`` binary. Only one machine type ``virt`` is +supported. + +When using KVM as accelerator, QEMU can emulate la464 cpu model. And when +using the default cpu model with TCG as accelerator, QEMU will emulate a +subset of la464 cpu features that should be enough to run distributions +built for the la464. + +Board-specific documentation +============================ + +.. toctree:: + loongarch/virt diff --git a/docs/system/target-mips.rst b/docs/system/target-mips.rst index 83239fb9dfd..9028c3b304d 100644 --- a/docs/system/target-mips.rst +++ b/docs/system/target-mips.rst @@ -112,5 +112,5 @@ https://mipsdistros.mips.com/LinuxDistro/nanomips/kernels/v4.15.18-432-gb2eb9a8b Start system emulation of Malta board with nanoMIPS I7200 CPU:: qemu-system-mipsel -cpu I7200 -kernel \ - -M malta -serial stdio -m -hda \ + -M malta -serial stdio -m -drive file=,format=raw \ -append "mem=256m@0x0 rw console=ttyS0 vga=cirrus vesa=0x111 root=/dev/sda" diff --git a/docs/system/target-riscv.rst b/docs/system/target-riscv.rst index ba195f1518a..89b2cb732c2 100644 --- a/docs/system/target-riscv.rst +++ b/docs/system/target-riscv.rst @@ -66,10 +66,12 @@ undocumented; you can get a complete list by running .. toctree:: :maxdepth: 1 + riscv/microblaze-v-generic riscv/microchip-icicle-kit riscv/shakti-c riscv/sifive_u riscv/virt + riscv/xiangshan-kunminghu RISC-V CPU firmware ------------------- diff --git a/docs/system/targets.rst b/docs/system/targets.rst index 224fadae71c..38e24188018 100644 --- a/docs/system/targets.rst +++ b/docs/system/targets.rst @@ -18,6 +18,7 @@ Contents: target-arm target-avr + target-loongarch target-m68k target-mips target-ppc diff --git a/docs/system/virtio-net-failover.rst b/docs/system/virtio-net-failover.rst index 6002dc5d96e..0cc465454cf 100644 --- a/docs/system/virtio-net-failover.rst +++ b/docs/system/virtio-net-failover.rst @@ -26,43 +26,48 @@ and standby devices are not plugged into the same PCIe slot. Usecase ------- - Virtio-net standby allows easy migration while using a passed-through fast - networking device by falling back to a virtio-net device for the duration of - the migration. It is like a simple version of a bond, the difference is that it - requires no configuration in the guest. When a guest is live-migrated to - another host QEMU will unplug the primary device via the PCIe based hotplug - handler and traffic will go through the virtio-net device. On the target - system the primary device will be automatically plugged back and the - net_failover module registers it again as the primary device. +Virtio-net standby allows easy migration while using a passed-through +fast networking device by falling back to a virtio-net device for the +duration of the migration. It is like a simple version of a bond, the +difference is that it requires no configuration in the guest. When a +guest is live-migrated to another host QEMU will unplug the primary +device via the PCIe based hotplug handler and traffic will go through +the virtio-net device. On the target system the primary device will be +automatically plugged back and the net_failover module registers it +again as the primary device. Usage ----- - The primary device can be hotplugged or be part of the startup configuration +The primary device can be hotplugged or be part of the startup configuration - -device virtio-net-pci,netdev=hostnet1,id=net1,mac=52:54:00:6f:55:cc, \ - bus=root2,failover=on +.. code-block:: shell - With the parameter failover=on the VIRTIO_NET_F_STANDBY feature will be enabled. + -device virtio-net-pci,netdev=hostnet1,id=net1,mac=52:54:00:6f:55:cc,bus=root2,failover=on + +With the parameter ``failover=on`` the VIRTIO_NET_F_STANDBY feature will be enabled. + +.. code-block:: shell -device vfio-pci,host=5e:00.2,id=hostdev0,bus=root1,failover_pair_id=net1 - failover_pair_id references the id of the virtio-net standby device. This - is only for pairing the devices within QEMU. The guest kernel module - net_failover will match devices with identical MAC addresses. +``failover_pair_id`` references the id of the virtio-net standby device. +This is only for pairing the devices within QEMU. The guest kernel +module net_failover will match devices with identical MAC addresses. Hotplug ------- - Both primary and standby device can be hotplugged via the QEMU monitor. Note - that if the virtio-net device is plugged first a warning will be issued that it - couldn't find the primary device. +Both primary and standby device can be hotplugged via the QEMU +monitor. Note that if the virtio-net device is plugged first a warning +will be issued that it couldn't find the primary device. Migration --------- - A new migration state wait-unplug was added for this feature. If failover primary - devices are present in the configuration, migration will go into this state. - It will wait until the device unplug is completed in the guest and then move into - active state. On the target system the primary devices will be automatically hotplugged - when the feature bit was negotiated for the virtio-net standby device. +A new migration state wait-unplug was added for this feature. If +failover primary devices are present in the configuration, migration +will go into this state. It will wait until the device unplug is +completed in the guest and then move into active state. On the target +system the primary devices will be automatically hotplugged when the +feature bit was negotiated for the virtio-net standby device. diff --git a/docs/tools/index.rst b/docs/tools/index.rst index 33ad438e86f..1e88ae48cdc 100644 --- a/docs/tools/index.rst +++ b/docs/tools/index.rst @@ -15,5 +15,4 @@ command line utilities and other standalone programs. qemu-nbd qemu-pr-helper qemu-trace-stap - virtfs-proxy-helper qemu-vmsr-helper diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index 3653adb963e..5e7b85079d9 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -256,7 +256,7 @@ Parameters to snapshot subcommand: .. option:: -l - Lists all snapshots in the given image + Lists all snapshots in the given image (default action) Command description: @@ -419,7 +419,7 @@ Command description: 4 Error on reading data -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-b BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM* to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can @@ -467,11 +467,11 @@ Command description: ``--skip-broken-bitmaps`` is also specified to copy only the consistent bitmaps. -.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE [-F BACKING_FMT]] [-u] [-o OPTIONS] FILENAME [SIZE] +.. option:: create [-f FMT] [-o FMT_OPTS] [-b BACKING_FILE [-B BACKING_FMT]] [-u] [-q] [--object OBJDEF] FILE [SIZE] - Create the new disk image *FILENAME* of size *SIZE* and format - *FMT*. Depending on the file format, you can add one or more *OPTIONS* - that enable additional features of this format. + Create the new disk image *FILE* of size *SIZE* and format + *FMT*. Depending on the file format, you can add one or more *FMT_OPTS* + options that enable additional features of this format. If the option *BACKING_FILE* is specified, then the image will record only the differences from *BACKING_FILE*. No size needs to be specified in @@ -479,7 +479,7 @@ Command description: ``commit`` monitor command (or ``qemu-img commit``). If a relative path name is given, the backing file is looked up relative to - the directory containing *FILENAME*. + the directory containing *FILE*. Note that a given backing file will be opened to check that it is valid. Use the ``-u`` option to enable unsafe backing file mode, which means that the @@ -663,11 +663,11 @@ Command description: bitmap support, or 0 if bitmaps are supported but there is nothing to copy. -.. option:: snapshot [--object OBJECTDEF] [--image-opts] [-U] [-q] [-l | -a SNAPSHOT | -c SNAPSHOT | -d SNAPSHOT] FILENAME +.. option:: snapshot [--object OBJECTDEF] [-f FMT | --image-opts] [-U] [-q] [-l | -a SNAPSHOT | -c SNAPSHOT | -d SNAPSHOT] FILENAME List, apply, create or delete snapshots in image *FILENAME*. -.. option:: rebase [--object OBJECTDEF] [--image-opts] [-U] [-q] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-p] [-u] [-c] -b BACKING_FILE [-F BACKING_FMT] FILENAME +.. option:: rebase [--object OBJECTDEF] [--image-opts] [-U] [-q] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-p] [-u] [-c] -b BACKING_FILE [-B BACKING_FMT] FILENAME Changes the backing file of an image. Only the formats ``qcow2`` and ``qed`` support changing the backing file. diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst index 329f44d9895..f82ea5fd77b 100644 --- a/docs/tools/qemu-nbd.rst +++ b/docs/tools/qemu-nbd.rst @@ -1,3 +1,5 @@ +.. _qemu-nbd: + ===================================== QEMU Disk Network Block Device Server ===================================== @@ -154,6 +156,11 @@ driver options if :option:`--image-opts` is specified. Set the NBD volume export description, as a human-readable string. +.. option:: --handshake-limit=N + + Set the timeout for a client to successfully complete its handshake + to N seconds (default 10), or 0 for no limit. + .. option:: -L, --list Connect as a client and list all details about the exports exposed by diff --git a/docs/tools/qemu-storage-daemon.rst b/docs/tools/qemu-storage-daemon.rst index ea00149a63a..35ab2d78074 100644 --- a/docs/tools/qemu-storage-daemon.rst +++ b/docs/tools/qemu-storage-daemon.rst @@ -1,3 +1,5 @@ +.. _storage-daemon: + =================== QEMU Storage Daemon =================== diff --git a/docs/tools/qemu-vmsr-helper.rst b/docs/tools/qemu-vmsr-helper.rst index 6ec87b49d96..9ce10b9af9c 100644 --- a/docs/tools/qemu-vmsr-helper.rst +++ b/docs/tools/qemu-vmsr-helper.rst @@ -17,8 +17,8 @@ driver to advertise and monitor the power consumption or accumulated energy consumption of different power domains, such as CPU packages, DRAM, and other components when available. -However those register are accesible under priviliged access (CAP_SYS_RAWIO). -QEMU can use an external helper to access those priviliged register. +However those registers are accessible under privileged access (CAP_SYS_RAWIO). +QEMU can use an external helper to access those privileged registers. :program:`qemu-vmsr-helper` is that external helper; it creates a listener socket which will accept incoming connections for communication with QEMU. diff --git a/docs/tools/virtfs-proxy-helper.rst b/docs/tools/virtfs-proxy-helper.rst deleted file mode 100644 index bd310ebb07b..00000000000 --- a/docs/tools/virtfs-proxy-helper.rst +++ /dev/null @@ -1,75 +0,0 @@ -QEMU 9p virtfs proxy filesystem helper -====================================== - -Synopsis --------- - -**virtfs-proxy-helper** [*OPTIONS*] - -Description ------------ - -NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be -removed, along with this daemon, in a future version of QEMU! - -Pass-through security model in QEMU 9p server needs root privilege to do -few file operations (like chown, chmod to any mode/uid:gid). There are two -issues in pass-through security model: - -- TOCTTOU vulnerability: Following symbolic links in the server could - provide access to files beyond 9p export path. - -- Running QEMU with root privilege could be a security issue. - -To overcome above issues, following approach is used: A new filesystem -type 'proxy' is introduced. Proxy FS uses chroot + socket combination -for securing the vulnerability known with following symbolic links. -Intention of adding a new filesystem type is to allow qemu to run -in non-root mode, but doing privileged operations using socket IO. - -Proxy helper (a stand alone binary part of qemu) is invoked with -root privileges. Proxy helper chroots into 9p export path and creates -a socket pair or a named socket based on the command line parameter. -QEMU and proxy helper communicate using this socket. QEMU proxy fs -driver sends filesystem request to proxy helper and receives the -response from it. - -The proxy helper is designed so that it can drop root privileges except -for the capabilities needed for doing filesystem operations. - -Options -------- - -The following options are supported: - -.. program:: virtfs-proxy-helper - -.. option:: -h - - Display help and exit - -.. option:: -p, --path PATH - - Path to export for proxy filesystem driver - -.. option:: -f, --fd SOCKET_ID - - Use given file descriptor as socket descriptor for communicating with - qemu proxy fs drier. Usually a helper like libvirt will create - socketpair and pass one of the fds as parameter to this option. - -.. option:: -s, --socket SOCKET_FILE - - Creates named socket file for communicating with qemu proxy fs driver - -.. option:: -u, --uid UID - - uid to give access to named socket file; used in combination with -g. - -.. option:: -g, --gid GID - - gid to give access to named socket file; used in combination with -u. - -.. option:: -n, --nodaemon - - Run as a normal program. By default program will run in daemon mode diff --git a/docs/user/index.rst b/docs/user/index.rst index 782d27cda27..2307580cb97 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -5,8 +5,9 @@ User Mode Emulation ------------------- This section of the manual is the overall guide for users using QEMU -for user-mode emulation. In this mode, QEMU can launch -processes compiled for one CPU on another CPU. +for user-mode emulation. In this mode, QEMU can launch programs +compiled for one CPU architecture on the same Operating System (OS) +but running on a different CPU architecture. .. toctree:: :maxdepth: 2 diff --git a/docs/user/main.rst b/docs/user/main.rst index e04bc2cb86f..347bdfabf8c 100644 --- a/docs/user/main.rst +++ b/docs/user/main.rst @@ -1,3 +1,5 @@ +.. _user-mode: + QEMU User space emulator ======================== @@ -15,33 +17,51 @@ Features QEMU user space emulation has the following notable features: -**System call translation:** - QEMU includes a generic system call translator. This means that the - parameters of the system calls can be converted to fix endianness and - 32/64-bit mismatches between hosts and targets. IOCTLs can be - converted too. - -**POSIX signal handling:** - QEMU can redirect to the running program all signals coming from the - host (such as ``SIGALRM``), as well as synthesize signals from - virtual CPU exceptions (for example ``SIGFPE`` when the program - executes a division by zero). - - QEMU relies on the host kernel to emulate most signal system calls, - for example to emulate the signal mask. On Linux, QEMU supports both - normal and real-time signals. - -**Threading:** - On Linux, QEMU can emulate the ``clone`` syscall and create a real - host thread (with a separate virtual CPU) for each emulated thread. - Note that not all targets currently emulate atomic operations - correctly. x86 and Arm use a global lock in order to preserve their - semantics. +System call translation +~~~~~~~~~~~~~~~~~~~~~~~ + +System calls are the principle interface between user-space and the +kernel. Generally the same system calls exist on all versions of the +kernel so QEMU includes a generic system call translator. The +translator takes care of adjusting endianess, 32/64 bit parameter size +and then calling the equivalent host system call. + +QEMU can also adjust device specific ``ioctl()`` calls in a similar +fashion. + +POSIX signal handling +~~~~~~~~~~~~~~~~~~~~~ + +QEMU can redirect to the running program all signals coming from the +host (such as ``SIGALRM``), as well as synthesize signals from +virtual CPU exceptions (for example ``SIGFPE`` when the program +executes a division by zero). + +QEMU relies on the host kernel to emulate most signal system calls, +for example to emulate the signal mask. On Linux, QEMU supports both +normal and real-time signals. + +Threading +~~~~~~~~~ + +On Linux, QEMU can emulate the ``clone`` syscall and create a real +host thread (with a separate virtual CPU) for each emulated thread. +However as QEMU relies on the system libc to call ``clone`` on its +behalf we limit the flags accepted to those it uses. Specifically this +means flags affecting namespaces (e.g. container runtimes) are not +supported. QEMU user-mode processes can still be run inside containers +though. + +While QEMU does its best to emulate atomic operations properly +differences between the host and guest memory models can cause issues +for software that makes assumptions about the memory model. QEMU was conceived so that ultimately it can emulate itself. Although it is not very useful, it is an important test to show the power of the emulator. +.. _linux-user-mode: + Linux User space emulator ------------------------- @@ -50,7 +70,7 @@ Command line options :: - qemu-i386 [-h] [-d] [-L path] [-s size] [-cpu model] [-g port] [-B offset] [-R size] program [arguments...] + qemu-i386 [-h] [-d] [-L path] [-s size] [-cpu model] [-g endpoint] [-B offset] [-R size] program [arguments...] ``-h`` Print the help @@ -87,8 +107,18 @@ Debug options: Activate logging of the specified items (use '-d help' for a list of log items) -``-g port`` - Wait gdb connection to port +``-g endpoint`` + Wait gdb connection to a port (e.g., ``1234``) or a unix socket (e.g., + ``/tmp/qemu.sock``). + + If a unix socket path contains single ``%d`` placeholder (e.g., + ``/tmp/qemu-%d.sock``), it is replaced by the emulator PID, which is useful + when passing this option via the ``QEMU_GDB`` environment variable to a + multi-process application. + + If the endpoint address is followed by ``,suspend=n`` (e.g., + ``1234,suspend=n``), then the emulated program starts without waiting for a + connection, which can be established at any later point in time. ``-one-insn-per-tb`` Run the emulation with one guest instruction per translation block. @@ -130,10 +160,6 @@ Other binaries The binary format is detected automatically. -- user mode (Cris) - - * ``qemu-cris`` TODO. - - user mode (i386) * ``qemu-i386`` TODO. @@ -179,6 +205,8 @@ Other binaries * ``qemu-sparc64`` can execute some Sparc64 (Sparc64 CPU, 64 bit ABI) and SPARC32PLUS binaries (Sparc64 CPU, 32 bit ABI). +.. _bsd-user-mode: + BSD User space emulator ----------------------- diff --git a/dump/dump-hmp-cmds.c b/dump/dump-hmp-cmds.c index d9340427c30..21023db6fd8 100644 --- a/dump/dump-hmp-cmds.c +++ b/dump/dump-hmp-cmds.c @@ -10,7 +10,7 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-commands-dump.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict) { diff --git a/dump/dump.c b/dump/dump.c index 45e84428aea..15bbcc0c619 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -17,9 +17,9 @@ #include "qemu/bswap.h" #include "exec/target_page.h" #include "monitor/monitor.h" -#include "sysemu/dump.h" -#include "sysemu/runstate.h" -#include "sysemu/cpus.h" +#include "system/dump.h" +#include "system/runstate.h" +#include "system/cpus.h" #include "qapi/error.h" #include "qapi/qapi-commands-dump.h" #include "qapi/qapi-events-dump.h" diff --git a/dump/win_dump.c b/dump/win_dump.c index 0e4fe692ce0..3162e8bd487 100644 --- a/dump/win_dump.c +++ b/dump/win_dump.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/dump.h" +#include "system/dump.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "exec/cpu-defs.h" @@ -476,8 +476,6 @@ void create_win_dump(DumpState *s, Error **errp) g_free(saved_ctx); out_cr3: first_x86_cpu->env.cr[3] = saved_cr3; - - return; } #else /* !TARGET_X86_64 */ diff --git a/dump/win_dump.h b/dump/win_dump.h index c9b49f87dc8..9d6cfa47c56 100644 --- a/dump/win_dump.h +++ b/dump/win_dump.h @@ -11,7 +11,7 @@ #ifndef WIN_DUMP_H #define WIN_DUMP_H -#include "sysemu/dump.h" +#include "system/dump.h" /* Check Windows dump availability for the current target */ bool win_dump_available(Error **errp); diff --git a/ebpf/ebpf_rss-stub.c b/ebpf/ebpf_rss-stub.c index 8d7fae2ad92..d0e7f99fb91 100644 --- a/ebpf/ebpf_rss-stub.c +++ b/ebpf/ebpf_rss-stub.c @@ -23,19 +23,21 @@ bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx) return false; } -bool ebpf_rss_load(struct EBPFRSSContext *ctx) +bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp) { return false; } bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, - int config_fd, int toeplitz_fd, int table_fd) + int config_fd, int toeplitz_fd, int table_fd, + Error **errp) { return false; } bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config, - uint16_t *indirections_table, uint8_t *toeplitz_key) + uint16_t *indirections_table, uint8_t *toeplitz_key, + Error **errp) { return false; } diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c index 87f0714910e..e793786c172 100644 --- a/ebpf/ebpf_rss.c +++ b/ebpf/ebpf_rss.c @@ -47,34 +47,37 @@ bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx) return ctx != NULL && (ctx->obj != NULL || ctx->program_fd != -1); } -static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx) +static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx, Error **errp) { - if (!ebpf_rss_is_loaded(ctx)) { - return false; - } - ctx->mmap_configuration = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_WRITE, MAP_SHARED, ctx->map_configuration, 0); if (ctx->mmap_configuration == MAP_FAILED) { - trace_ebpf_error("eBPF RSS", "can not mmap eBPF configuration array"); + trace_ebpf_rss_mmap_error(ctx, "configuration"); + error_setg(errp, "Unable to map eBPF configuration array"); return false; } ctx->mmap_toeplitz_key = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_WRITE, MAP_SHARED, ctx->map_toeplitz_key, 0); if (ctx->mmap_toeplitz_key == MAP_FAILED) { - trace_ebpf_error("eBPF RSS", "can not mmap eBPF toeplitz key"); + trace_ebpf_rss_mmap_error(ctx, "toeplitz key"); + error_setg(errp, "Unable to map eBPF toeplitz array"); goto toeplitz_fail; } ctx->mmap_indirections_table = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_WRITE, MAP_SHARED, ctx->map_indirections_table, 0); if (ctx->mmap_indirections_table == MAP_FAILED) { - trace_ebpf_error("eBPF RSS", "can not mmap eBPF indirection table"); + trace_ebpf_rss_mmap_error(ctx, "indirections table"); + error_setg(errp, "Unable to map eBPF indirection array"); goto indirection_fail; } + trace_ebpf_rss_mmap(ctx, + ctx->mmap_configuration, + ctx->mmap_toeplitz_key, + ctx->mmap_indirections_table); return true; indirection_fail: @@ -90,10 +93,6 @@ static bool ebpf_rss_mmap(struct EBPFRSSContext *ctx) static void ebpf_rss_munmap(struct EBPFRSSContext *ctx) { - if (!ebpf_rss_is_loaded(ctx)) { - return; - } - munmap(ctx->mmap_indirections_table, qemu_real_host_page_size()); munmap(ctx->mmap_toeplitz_key, qemu_real_host_page_size()); munmap(ctx->mmap_configuration, qemu_real_host_page_size()); @@ -103,7 +102,7 @@ static void ebpf_rss_munmap(struct EBPFRSSContext *ctx) ctx->mmap_indirections_table = NULL; } -bool ebpf_rss_load(struct EBPFRSSContext *ctx) +bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp) { struct rss_bpf *rss_bpf_ctx; @@ -113,14 +112,16 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) rss_bpf_ctx = rss_bpf__open(); if (rss_bpf_ctx == NULL) { - trace_ebpf_error("eBPF RSS", "can not open eBPF RSS object"); + trace_ebpf_rss_open_error(ctx); + error_setg(errp, "Unable to open eBPF RSS object"); goto error; } bpf_program__set_type(rss_bpf_ctx->progs.tun_rss_steering_prog, BPF_PROG_TYPE_SOCKET_FILTER); if (rss_bpf__load(rss_bpf_ctx)) { - trace_ebpf_error("eBPF RSS", "can not load RSS program"); + trace_ebpf_rss_load_error(ctx); + error_setg(errp, "Unable to load eBPF program"); goto error; } @@ -134,7 +135,12 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) ctx->map_toeplitz_key = bpf_map__fd( rss_bpf_ctx->maps.tap_rss_map_toeplitz_key); - if (!ebpf_rss_mmap(ctx)) { + trace_ebpf_rss_load(ctx, + ctx->program_fd, + ctx->map_configuration, + ctx->map_indirections_table, + ctx->map_toeplitz_key); + if (!ebpf_rss_mmap(ctx, errp)) { goto error; } @@ -151,13 +157,28 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) } bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, - int config_fd, int toeplitz_fd, int table_fd) + int config_fd, int toeplitz_fd, int table_fd, + Error **errp) { if (ebpf_rss_is_loaded(ctx)) { + error_setg(errp, "eBPF program is already loaded"); return false; } - if (program_fd < 0 || config_fd < 0 || toeplitz_fd < 0 || table_fd < 0) { + if (program_fd < 0) { + error_setg(errp, "eBPF program FD is not open"); + return false; + } + if (config_fd < 0) { + error_setg(errp, "eBPF config FD is not open"); + return false; + } + if (toeplitz_fd < 0) { + error_setg(errp, "eBPF toeplitz FD is not open"); + return false; + } + if (table_fd < 0) { + error_setg(errp, "eBPF indirection FD is not open"); return false; } @@ -166,7 +187,13 @@ bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, ctx->map_toeplitz_key = toeplitz_fd; ctx->map_indirections_table = table_fd; - if (!ebpf_rss_mmap(ctx)) { + trace_ebpf_rss_load(ctx, + ctx->program_fd, + ctx->map_configuration, + ctx->map_indirections_table, + ctx->map_toeplitz_key); + + if (!ebpf_rss_mmap(ctx, errp)) { ctx->program_fd = -1; ctx->map_configuration = -1; ctx->map_toeplitz_key = -1; @@ -177,25 +204,22 @@ bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, return true; } -static bool ebpf_rss_set_config(struct EBPFRSSContext *ctx, +static void ebpf_rss_set_config(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config) { - if (!ebpf_rss_is_loaded(ctx)) { - return false; - } - memcpy(ctx->mmap_configuration, config, sizeof(*config)); - return true; } static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx, uint16_t *indirections_table, - size_t len) + size_t len, + Error **errp) { char *cursor = ctx->mmap_indirections_table; - if (!ebpf_rss_is_loaded(ctx) || indirections_table == NULL || - len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { + if (len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { + error_setg(errp, "Indirections table length %zu exceeds limit %d", + len, VIRTIO_NET_RSS_MAX_TABLE_LEN); return false; } @@ -207,43 +231,51 @@ static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx, return true; } -static bool ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx, +static void ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx, uint8_t *toeplitz_key) { /* prepare toeplitz key */ uint8_t toe[VIRTIO_NET_RSS_MAX_KEY_SIZE] = {}; - if (!ebpf_rss_is_loaded(ctx) || toeplitz_key == NULL) { - return false; - } memcpy(toe, toeplitz_key, VIRTIO_NET_RSS_MAX_KEY_SIZE); *(uint32_t *)toe = ntohl(*(uint32_t *)toe); memcpy(ctx->mmap_toeplitz_key, toe, VIRTIO_NET_RSS_MAX_KEY_SIZE); - return true; } bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config, - uint16_t *indirections_table, uint8_t *toeplitz_key) + uint16_t *indirections_table, uint8_t *toeplitz_key, + Error **errp) { - if (!ebpf_rss_is_loaded(ctx) || config == NULL || - indirections_table == NULL || toeplitz_key == NULL) { + if (!ebpf_rss_is_loaded(ctx)) { + error_setg(errp, "eBPF program is not loaded"); return false; } - - if (!ebpf_rss_set_config(ctx, config)) { + if (config == NULL) { + error_setg(errp, "eBPF config table is NULL"); return false; } - - if (!ebpf_rss_set_indirections_table(ctx, indirections_table, - config->indirections_len)) { + if (indirections_table == NULL) { + error_setg(errp, "eBPF indirections table is NULL"); + return false; + } + if (toeplitz_key == NULL) { + error_setg(errp, "eBPF toeplitz key is NULL"); return false; } - if (!ebpf_rss_set_toepliz_key(ctx, toeplitz_key)) { + ebpf_rss_set_config(ctx, config); + + if (!ebpf_rss_set_indirections_table(ctx, indirections_table, + config->indirections_len, + errp)) { return false; } + ebpf_rss_set_toepliz_key(ctx, toeplitz_key); + + trace_ebpf_rss_set_data(ctx, config, indirections_table, toeplitz_key); + return true; } @@ -253,6 +285,8 @@ void ebpf_rss_unload(struct EBPFRSSContext *ctx) return; } + trace_ebpf_rss_unload(ctx); + ebpf_rss_munmap(ctx); if (ctx->obj) { @@ -271,4 +305,4 @@ void ebpf_rss_unload(struct EBPFRSSContext *ctx) ctx->map_indirections_table = -1; } -ebpf_binary_init(EBPF_PROGRAMID_RSS, rss_bpf__elf_bytes) +ebpf_binary_init(EBPF_PROGRAM_ID_RSS, rss_bpf__elf_bytes) diff --git a/ebpf/ebpf_rss.h b/ebpf/ebpf_rss.h index 239242b0d26..86a5787789d 100644 --- a/ebpf/ebpf_rss.h +++ b/ebpf/ebpf_rss.h @@ -14,6 +14,8 @@ #ifndef QEMU_EBPF_RSS_H #define QEMU_EBPF_RSS_H +#include "qapi/error.h" + #define EBPF_RSS_MAX_FDS 4 struct EBPFRSSContext { @@ -41,13 +43,15 @@ void ebpf_rss_init(struct EBPFRSSContext *ctx); bool ebpf_rss_is_loaded(struct EBPFRSSContext *ctx); -bool ebpf_rss_load(struct EBPFRSSContext *ctx); +bool ebpf_rss_load(struct EBPFRSSContext *ctx, Error **errp); bool ebpf_rss_load_fds(struct EBPFRSSContext *ctx, int program_fd, - int config_fd, int toeplitz_fd, int table_fd); + int config_fd, int toeplitz_fd, int table_fd, + Error **errp); bool ebpf_rss_set_all(struct EBPFRSSContext *ctx, struct EBPFRSSConfig *config, - uint16_t *indirections_table, uint8_t *toeplitz_key); + uint16_t *indirections_table, uint8_t *toeplitz_key, + Error **errp); void ebpf_rss_unload(struct EBPFRSSContext *ctx); diff --git a/ebpf/trace-events b/ebpf/trace-events index b3ad1a35f2e..bf3d9b64510 100644 --- a/ebpf/trace-events +++ b/ebpf/trace-events @@ -1,4 +1,10 @@ # See docs/devel/tracing.rst for syntax documentation. # ebpf-rss.c -ebpf_error(const char *s1, const char *s2) "error in %s: %s" +ebpf_rss_load(void *ctx, int progfd, int cfgfd, int toepfd, int indirfd) "ctx=%p program-fd=%d config-fd=%d toeplitz-fd=%d indirection-fd=%d" +ebpf_rss_load_error(void *ctx) "ctx=%p" +ebpf_rss_mmap(void *ctx, void *cfgptr, void *toepptr, void *indirptr) "ctx=%p config-ptr=%p toeplitz-ptr=%p indirection-ptr=%p" +ebpf_rss_mmap_error(void *ctx, const char *object) "ctx=%p object=%s" +ebpf_rss_open_error(void *ctx) "ctx=%p" +ebpf_rss_set_data(void *ctx, void *cfgptr, void *toepptr, void *indirptr) "ctx=%p config-ptr=%p toeplitz-ptr=%p indirection-ptr=%p" +ebpf_rss_unload(void *ctx) "rss unload ctx=%p" diff --git a/event-loop-base.c b/event-loop-base.c index d5be4dc6fcf..8ca143bea43 100644 --- a/event-loop-base.c +++ b/event-loop-base.c @@ -15,7 +15,7 @@ #include "qom/object_interfaces.h" #include "qapi/error.h" #include "block/thread-pool.h" -#include "sysemu/event-loop-base.h" +#include "system/event-loop-base.h" typedef struct { const char *name; @@ -73,8 +73,6 @@ static void event_loop_base_set_param(Object *obj, Visitor *v, if (bc->update_params) { bc->update_params(base, errp); } - - return; } static void event_loop_base_complete(UserCreatable *uc, Error **errp) @@ -99,7 +97,8 @@ static bool event_loop_base_can_be_deleted(UserCreatable *uc) return true; } -static void event_loop_base_class_init(ObjectClass *klass, void *class_data) +static void event_loop_base_class_init(ObjectClass *klass, + const void *class_data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); ucc->complete = event_loop_base_complete; @@ -127,7 +126,7 @@ static const TypeInfo event_loop_base_info = { .class_size = sizeof(EventLoopBaseClass), .class_init = event_loop_base_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/fpu/meson.build b/fpu/meson.build index 1a9992ded56..646c76f0c69 100644 --- a/fpu/meson.build +++ b/fpu/meson.build @@ -1 +1 @@ -specific_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c')) +common_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c')) diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index a44649f4f4a..5e0438fc0b7 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -39,65 +39,152 @@ static void partsN(return_nan)(FloatPartsN *a, float_status *s) static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b, float_status *s) { + bool have_snan = false; + FloatPartsN *ret; + int cmp; + if (is_snan(a->cls) || is_snan(b->cls)) { float_raise(float_flag_invalid | float_flag_invalid_snan, s); + have_snan = true; } if (s->default_nan_mode) { parts_default_nan(a, s); - } else { - int cmp = frac_cmp(a, b); - if (cmp == 0) { - cmp = a->sign < b->sign; - } + return a; + } - if (pickNaN(a->cls, b->cls, cmp > 0, s)) { - a = b; + switch (s->float_2nan_prop_rule) { + case float_2nan_prop_s_ab: + if (have_snan) { + ret = is_snan(a->cls) ? a : b; + break; + } + /* fall through */ + case float_2nan_prop_ab: + ret = is_nan(a->cls) ? a : b; + break; + case float_2nan_prop_s_ba: + if (have_snan) { + ret = is_snan(b->cls) ? b : a; + break; } + /* fall through */ + case float_2nan_prop_ba: + ret = is_nan(b->cls) ? b : a; + break; + case float_2nan_prop_x87: + /* + * This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ if (is_snan(a->cls)) { - parts_silence_nan(a, s); + if (!is_snan(b->cls)) { + ret = is_qnan(b->cls) ? b : a; + break; + } + } else if (is_qnan(a->cls)) { + if (is_snan(b->cls) || !is_qnan(b->cls)) { + ret = a; + break; + } + } else { + ret = b; + break; + } + cmp = frac_cmp(a, b); + if (cmp == 0) { + cmp = a->sign < b->sign; } + ret = cmp > 0 ? a : b; + break; + default: + g_assert_not_reached(); } - return a; + + if (is_snan(ret->cls)) { + parts_silence_nan(ret, s); + } + return ret; } static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b, FloatPartsN *c, float_status *s, int ab_mask, int abc_mask) { - int which; + bool infzero = (ab_mask == float_cmask_infzero); + bool have_snan = (abc_mask & float_cmask_snan); + FloatPartsN *ret; - if (unlikely(abc_mask & float_cmask_snan)) { + if (unlikely(have_snan)) { float_raise(float_flag_invalid | float_flag_invalid_snan, s); } - which = pickNaNMulAdd(a->cls, b->cls, c->cls, - ab_mask == float_cmask_infzero, s); + if (infzero && + !(s->float_infzeronan_rule & float_infzeronan_suppress_invalid)) { + /* This is (0 * inf) + NaN or (inf * 0) + NaN */ + float_raise(float_flag_invalid | float_flag_invalid_imz, s); + } - if (s->default_nan_mode || which == 3) { + if (s->default_nan_mode) { /* - * Note that this check is after pickNaNMulAdd so that function - * has an opportunity to set the Invalid flag for infzero. + * We guarantee not to require the target to tell us how to + * pick a NaN if we're always returning the default NaN. + * But if we're not in default-NaN mode then the target must + * specify. */ - parts_default_nan(a, s); - return a; + goto default_nan; + } else if (infzero) { + /* + * Inf * 0 + NaN -- some implementations return the + * default NaN here, and some return the input NaN. + */ + switch (s->float_infzeronan_rule & ~float_infzeronan_suppress_invalid) { + case float_infzeronan_dnan_never: + break; + case float_infzeronan_dnan_always: + goto default_nan; + case float_infzeronan_dnan_if_qnan: + if (is_qnan(c->cls)) { + goto default_nan; + } + break; + default: + g_assert_not_reached(); + } + ret = c; + } else { + FloatPartsN *val[R_3NAN_1ST_MASK + 1] = { a, b, c }; + Float3NaNPropRule rule = s->float_3nan_prop_rule; + + assert(rule != float_3nan_prop_none); + if (have_snan && (rule & R_3NAN_SNAN_MASK)) { + /* We have at least one SNaN input and should prefer it */ + do { + ret = val[rule & R_3NAN_1ST_MASK]; + rule >>= R_3NAN_1ST_LENGTH; + } while (!is_snan(ret->cls)); + } else { + do { + ret = val[rule & R_3NAN_1ST_MASK]; + rule >>= R_3NAN_1ST_LENGTH; + } while (!is_nan(ret->cls)); + } } - switch (which) { - case 0: - break; - case 1: - a = b; - break; - case 2: - a = c; - break; - default: - g_assert_not_reached(); - } - if (is_snan(a->cls)) { - parts_silence_nan(a, s); + if (is_snan(ret->cls)) { + parts_silence_nan(ret, s); } + return ret; + + default_nan: + parts_default_nan(a, s); return a; } @@ -108,18 +195,37 @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b, static void partsN(canonicalize)(FloatPartsN *p, float_status *status, const FloatFmt *fmt) { + /* + * It's target-dependent how to handle the case of exponent 0 + * and Integer bit set. Intel calls these "pseudodenormals", + * and treats them as if the integer bit was 0, and never + * produces them on output. This is the default behaviour for QEMU. + * For m68k, the integer bit is considered validly part of the + * input value when the exponent is 0, and may be 0 or 1, + * giving extra range. They may also be generated as outputs. + * (The m68k manual actually calls these values part of the + * normalized number range, not the denormalized number range, + * but that distinction is not important for us, because + * m68k doesn't care about the input_denormal_used status flag.) + * floatx80_pseudo_denormal_valid selects the m68k behaviour, + * which changes both how we canonicalize such a value and + * how we uncanonicalize results. + */ + bool has_pseudo_denormals = fmt->has_explicit_bit && + (status->floatx80_behaviour & floatx80_pseudo_denormal_valid); + if (unlikely(p->exp == 0)) { if (likely(frac_eqz(p))) { p->cls = float_class_zero; } else if (status->flush_inputs_to_zero) { - float_raise(float_flag_input_denormal, status); + float_raise(float_flag_input_denormal_flushed, status); p->cls = float_class_zero; frac_clear(p); } else { int shift = frac_normalize(p); - p->cls = float_class_normal; + p->cls = float_class_denormal; p->exp = fmt->frac_shift - fmt->exp_bias - - shift + !fmt->m68k_denormal; + - shift + !has_pseudo_denormals; } } else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) { p->cls = float_class_normal; @@ -155,6 +261,9 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, int exp, flags = 0; switch (s->float_rounding_mode) { + case float_round_nearest_even_max: + overflow_norm = true; + /* fall through */ case float_round_nearest_even: if (N > 64 && frac_lsb == 0) { inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1 @@ -244,20 +353,23 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, p->frac_lo &= ~round_mask; } frac_shr(p, frac_shift); - } else if (s->flush_to_zero) { - flags |= float_flag_output_denormal; + } else if (s->flush_to_zero && + s->ftz_detection == float_ftz_before_rounding) { + flags |= float_flag_output_denormal_flushed; p->cls = float_class_zero; exp = 0; frac_clear(p); } else { bool is_tiny = s->tininess_before_rounding || exp < 0; + bool has_pseudo_denormals = fmt->has_explicit_bit && + (s->floatx80_behaviour & floatx80_pseudo_denormal_valid); if (!is_tiny) { FloatPartsN discard; is_tiny = !frac_addi(&discard, p, inc); } - frac_shrjam(p, !fmt->m68k_denormal - exp); + frac_shrjam(p, !has_pseudo_denormals - exp); if (p->frac_lo & round_mask) { /* Need to recompute round-to-even/round-to-odd. */ @@ -288,14 +400,22 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, p->frac_lo &= ~round_mask; } - exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal; + exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !has_pseudo_denormals; frac_shr(p, frac_shift); - if (is_tiny && (flags & float_flag_inexact)) { - flags |= float_flag_underflow; - } - if (exp == 0 && frac_eqz(p)) { - p->cls = float_class_zero; + if (is_tiny) { + if (s->flush_to_zero) { + assert(s->ftz_detection == float_ftz_after_rounding); + flags |= float_flag_output_denormal_flushed; + p->cls = float_class_zero; + exp = 0; + frac_clear(p); + } else if (flags & float_flag_inexact) { + flags |= float_flag_underflow; + } + if (exp == 0 && frac_eqz(p)) { + p->cls = float_class_zero; + } } } p->exp = exp; @@ -305,7 +425,7 @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s, static void partsN(uncanon)(FloatPartsN *p, float_status *s, const FloatFmt *fmt) { - if (likely(p->cls == float_class_normal)) { + if (likely(is_anynorm(p->cls))) { parts_uncanon_normal(p, s, fmt); } else { switch (p->cls) { @@ -343,9 +463,18 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, bool b_sign = b->sign ^ subtract; int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); + /* + * For addition and subtraction, we will consume an + * input denormal unless the other input is a NaN. + */ + if ((ab_mask & (float_cmask_denormal | float_cmask_anynan)) == + float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (a->sign != b_sign) { /* Subtraction */ - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { if (parts_sub_normal(a, b)) { return a; } @@ -378,7 +507,7 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, } } else { /* Addition */ - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { parts_add_normal(a, b); return a; } @@ -398,12 +527,12 @@ static FloatPartsN *partsN(addsub)(FloatPartsN *a, FloatPartsN *b, } if (b->cls == float_class_zero) { - g_assert(a->cls == float_class_normal); + g_assert(is_anynorm(a->cls)); return a; } g_assert(a->cls == float_class_zero); - g_assert(b->cls == float_class_normal); + g_assert(is_anynorm(b->cls)); return_b: b->sign = b_sign; return b; @@ -423,9 +552,13 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); bool sign = a->sign ^ b->sign; - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { FloatPartsW tmp; + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + frac_mulw(&tmp, a, b); frac_truncjam(a, &tmp); @@ -451,6 +584,10 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, } /* Multiply by 0 or Inf */ + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (ab_mask & float_cmask_inf) { a->cls = float_class_inf; a->sign = sign; @@ -476,8 +613,9 @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b, * Requires A and C extracted into a double-sized structure to provide the * extra space for the widening multiply. */ -static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, - FloatPartsN *c, int flags, float_status *s) +static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b, + FloatPartsN *c, int scale, + int flags, float_status *s) { int ab_mask, abc_mask; FloatPartsW p_widen, c_widen; @@ -505,7 +643,7 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, a->sign ^= 1; } - if (unlikely(ab_mask != float_cmask_normal)) { + if (unlikely(!cmask_is_only_normals(ab_mask))) { if (unlikely(ab_mask == float_cmask_infzero)) { float_raise(float_flag_invalid | float_flag_invalid_imz, s); goto d_nan; @@ -520,12 +658,14 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, } g_assert(ab_mask & float_cmask_zero); - if (c->cls == float_class_normal) { + if (is_anynorm(c->cls)) { *a = *c; goto return_normal; } if (c->cls == float_class_zero) { - if (a->sign != c->sign) { + if (flags & float_muladd_suppress_add_product_zero) { + a->sign = c->sign; + } else if (a->sign != c->sign) { goto return_sub_zero; } goto return_zero; @@ -566,12 +706,16 @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b, a->exp = p_widen.exp; return_normal: - if (flags & float_muladd_halve_result) { - a->exp -= 1; - } + a->exp += scale; finish_sign: - if (flags & float_muladd_negate_result) { - a->sign ^= 1; + /* + * All result types except for "return the default NaN + * because this is an Invalid Operation" go through here; + * this matches the set of cases where we consumed a + * denormal input. + */ + if (abc_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); } return a; @@ -601,7 +745,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); bool sign = a->sign ^ b->sign; - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } a->sign = sign; a->exp -= b->exp + frac_div(a, b); return a; @@ -622,6 +769,10 @@ static FloatPartsN *partsN(div)(FloatPartsN *a, FloatPartsN *b, return parts_pick_nan(a, b, s); } + if ((ab_mask & float_cmask_denormal) && b->cls != float_class_zero) { + float_raise(float_flag_input_denormal_used, s); + } + a->sign = sign; /* Inf / X */ @@ -659,7 +810,10 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b, { int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } frac_modrem(a, b, mod_quot); return a; } @@ -680,6 +834,10 @@ static FloatPartsN *partsN(modrem)(FloatPartsN *a, FloatPartsN *b, return a; } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + /* N % Inf; 0 % N */ g_assert(b->cls == float_class_inf || a->cls == float_class_zero); return a; @@ -709,6 +867,12 @@ static void partsN(sqrt)(FloatPartsN *a, float_status *status, if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { + case float_class_denormal: + if (!a->sign) { + /* -ve denormal will be InvalidOperation */ + float_raise(float_flag_input_denormal_used, status); + } + break; case float_class_snan: case float_class_qnan: parts_return_nan(a, status); @@ -1039,6 +1203,7 @@ static void partsN(round_to_int)(FloatPartsN *a, FloatRoundMode rmode, case float_class_inf: break; case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(a, rmode, scale, fmt->frac_size)) { float_raise(float_flag_inexact, s); } @@ -1083,6 +1248,7 @@ static int64_t partsN(float_to_sint)(FloatPartsN *p, FloatRoundMode rmode, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, scale, N - 2)) { flags = float_flag_inexact; @@ -1150,6 +1316,7 @@ static uint64_t partsN(float_to_uint)(FloatPartsN *p, FloatRoundMode rmode, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, scale, N - 2)) { flags = float_flag_inexact; @@ -1213,6 +1380,7 @@ static int64_t partsN(float_to_sint_modulo)(FloatPartsN *p, return 0; case float_class_normal: + case float_class_denormal: /* TODO: N - 2 is frac_size for rounding; could use input fmt. */ if (parts_round_to_int_normal(p, rmode, 0, N - 2)) { flags = float_flag_inexact; @@ -1334,6 +1502,9 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, if ((flags & (minmax_isnum | minmax_isnumber)) && !(ab_mask & float_cmask_snan) && (ab_mask & ~float_cmask_qnan)) { + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } return is_nan(a->cls) ? b : a; } @@ -1358,12 +1529,17 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, return parts_pick_nan(a, b, s); } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + a_exp = a->exp; b_exp = b->exp; - if (unlikely(ab_mask != float_cmask_normal)) { + if (unlikely(!cmask_is_only_normals(ab_mask))) { switch (a->cls) { case float_class_normal: + case float_class_denormal: break; case float_class_inf: a_exp = INT16_MAX; @@ -1373,10 +1549,10 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, break; default: g_assert_not_reached(); - break; } switch (b->cls) { case float_class_normal: + case float_class_denormal: break; case float_class_inf: b_exp = INT16_MAX; @@ -1386,7 +1562,6 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, break; default: g_assert_not_reached(); - break; } } @@ -1424,9 +1599,13 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, { int ab_mask = float_cmask(a->cls) | float_cmask(b->cls); - if (likely(ab_mask == float_cmask_normal)) { + if (likely(cmask_is_only_normals(ab_mask))) { FloatRelation cmp; + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (a->sign != b->sign) { goto a_sign; } @@ -1452,6 +1631,10 @@ static FloatRelation partsN(compare)(FloatPartsN *a, FloatPartsN *b, return float_relation_unordered; } + if (ab_mask & float_cmask_denormal) { + float_raise(float_flag_input_denormal_used, s); + } + if (ab_mask & float_cmask_zero) { if (ab_mask == float_cmask_zero) { return float_relation_equal; @@ -1491,6 +1674,9 @@ static void partsN(scalbn)(FloatPartsN *a, int n, float_status *s) case float_class_zero: case float_class_inf: break; + case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + /* fall through */ case float_class_normal: a->exp += MIN(MAX(n, -0x10000), 0x10000); break; @@ -1510,6 +1696,12 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) if (unlikely(a->cls != float_class_normal)) { switch (a->cls) { + case float_class_denormal: + if (!a->sign) { + /* -ve denormal will be InvalidOperation */ + float_raise(float_flag_input_denormal_used, s); + } + break; case float_class_snan: case float_class_qnan: parts_return_nan(a, s); @@ -1526,9 +1718,8 @@ static void partsN(log2)(FloatPartsN *a, float_status *s, const FloatFmt *fmt) } return; default: - break; + g_assert_not_reached(); } - g_assert_not_reached(); } if (unlikely(a->sign)) { goto d_nan; diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc index 8f3b97d9bf7..ba4fa08b7be 100644 --- a/fpu/softfloat-specialize.c.inc +++ b/fpu/softfloat-specialize.c.inc @@ -85,11 +85,7 @@ this code that are retained. */ static inline bool no_signaling_nans(float_status *status) { -#if defined(TARGET_XTENSA) return status->no_signaling_nans; -#else - return false; -#endif } /* Define how the architecture discriminates signaling NaNs. @@ -97,17 +93,10 @@ static inline bool no_signaling_nans(float_status *status) * In IEEE 754-1985 this was implementation defined, but in IEEE 754-2008 * the msb must be zero. MIPS is (so far) unique in supporting both the * 2008 revision and backward compatibility with their original choice. - * Thus for MIPS we must make the choice at runtime. */ static inline bool snan_bit_is_one(float_status *status) { -#if defined(TARGET_MIPS) return status->snan_bit_is_one; -#elif defined(TARGET_HPPA) || defined(TARGET_SH4) - return 1; -#else - return 0; -#endif } /*---------------------------------------------------------------------------- @@ -133,35 +122,17 @@ static void parts64_default_nan(FloatParts64 *p, float_status *status) { bool sign = 0; uint64_t frac; + uint8_t dnan_pattern = status->default_nan_pattern; -#if defined(TARGET_SPARC) || defined(TARGET_M68K) - /* !snan_bit_is_one, set all bits */ - frac = (1ULL << DECOMPOSED_BINARY_POINT) - 1; -#elif defined(TARGET_I386) || defined(TARGET_X86_64) \ - || defined(TARGET_MICROBLAZE) - /* !snan_bit_is_one, set sign and msb */ - frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); - sign = 1; -#elif defined(TARGET_HPPA) - /* snan_bit_is_one, set msb-1. */ - frac = 1ULL << (DECOMPOSED_BINARY_POINT - 2); -#elif defined(TARGET_HEXAGON) - sign = 1; - frac = ~0ULL; -#else + assert(dnan_pattern != 0); + + sign = dnan_pattern >> 7; /* - * This case is true for Alpha, ARM, MIPS, OpenRISC, PPC, RISC-V, - * S390, SH4, TriCore, and Xtensa. Our other supported targets, - * such CRIS, do not have floating-point. + * Place default_nan_pattern [6:0] into bits [62:56], + * and replecate bit [0] down into [55:0] */ - if (snan_bit_is_one(status)) { - /* set all bits other than msb */ - frac = (1ULL << (DECOMPOSED_BINARY_POINT - 1)) - 1; - } else { - /* set msb */ - frac = 1ULL << (DECOMPOSED_BINARY_POINT - 1); - } -#endif + frac = deposit64(0, DECOMPOSED_BINARY_POINT - 7, 7, dnan_pattern); + frac = deposit64(frac, 0, DECOMPOSED_BINARY_POINT - 7, -(dnan_pattern & 1)); *p = (FloatParts64) { .cls = float_class_qnan, @@ -227,17 +198,17 @@ static void parts128_silence_nan(FloatParts128 *p, float_status *status) floatx80 floatx80_default_nan(float_status *status) { floatx80 r; + /* + * Extrapolate from the choices made by parts64_default_nan to fill + * in the floatx80 format. We assume that floatx80's explicit + * integer bit is always set (this is true for i386 and m68k, + * which are the only real users of this format). + */ + FloatParts64 p64; + parts64_default_nan(&p64, status); - /* None of the targets that have snan_bit_is_one use floatx80. */ - assert(!snan_bit_is_one(status)); -#if defined(TARGET_M68K) - r.low = UINT64_C(0xFFFFFFFFFFFFFFFF); - r.high = 0x7FFF; -#else - /* X86 */ - r.low = UINT64_C(0xC000000000000000); - r.high = 0xFFFF; -#endif + r.high = 0x7FFF | (p64.sign << 15); + r.low = (1ULL << DECOMPOSED_BINARY_POINT) | p64.frac; return r; } @@ -245,15 +216,15 @@ floatx80 floatx80_default_nan(float_status *status) | The pattern for a default generated extended double-precision inf. *----------------------------------------------------------------------------*/ -#define floatx80_infinity_high 0x7FFF -#if defined(TARGET_M68K) -#define floatx80_infinity_low UINT64_C(0x0000000000000000) -#else -#define floatx80_infinity_low UINT64_C(0x8000000000000000) -#endif - -const floatx80 floatx80_infinity - = make_floatx80_init(floatx80_infinity_high, floatx80_infinity_low); +floatx80 floatx80_default_inf(bool zSign, float_status *status) +{ + /* + * Whether the Integer bit is set in the default Infinity is + * target dependent. + */ + bool z = status->floatx80_behaviour & floatx80_default_inf_int_bit_is_zero; + return packFloatx80(zSign, 0x7fff, z ? 0 : (1ULL << 63)); +} /*---------------------------------------------------------------------------- | Returns 1 if the half-precision floating-point value `a' is a quiet @@ -370,331 +341,6 @@ bool float32_is_signaling_nan(float32 a_, float_status *status) } } -/*---------------------------------------------------------------------------- -| Select which NaN to propagate for a two-input operation. -| IEEE754 doesn't specify all the details of this, so the -| algorithm is target-specific. -| The routine is passed various bits of information about the -| two NaNs and should return 0 to select NaN a and 1 for NaN b. -| Note that signalling NaNs are always squashed to quiet NaNs -| by the caller, by calling floatXX_silence_nan() before -| returning them. -| -| aIsLargerSignificand is only valid if both a and b are NaNs -| of some kind, and is true if a has the larger significand, -| or if both a and b have the same significand but a is -| positive but b is negative. It is only needed for the x87 -| tie-break rule. -*----------------------------------------------------------------------------*/ - -static int pickNaN(FloatClass a_cls, FloatClass b_cls, - bool aIsLargerSignificand, float_status *status) -{ -#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) || \ - defined(TARGET_LOONGARCH64) || defined(TARGET_S390X) - /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take - * the first of: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always quietened before returning it. - */ - /* According to MIPS specifications, if one of the two operands is - * a sNaN, a new qNaN has to be generated. This is done in - * floatXX_silence_nan(). For qNaN inputs the specifications - * says: "When possible, this QNaN result is one of the operand QNaN - * values." In practice it seems that most implementations choose - * the first operand if both operands are qNaN. In short this gives - * the following rules: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always silenced before returning it. - */ - if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_qnan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_PPC) || defined(TARGET_M68K) - /* PowerPC propagation rules: - * 1. A if it sNaN or qNaN - * 2. B if it sNaN or qNaN - * A signaling NaN is always silenced before returning it. - */ - /* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL - * 3.4 FLOATING-POINT INSTRUCTION DETAILS - * If either operand, but not both operands, of an operation is a - * nonsignaling NaN, then that NaN is returned as the result. If both - * operands are nonsignaling NaNs, then the destination operand - * nonsignaling NaN is returned as the result. - * If either operand to an operation is a signaling NaN (SNaN), then the - * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit - * is set in the FPCR ENABLE byte, then the exception is taken and the - * destination is not modified. If the SNaN exception enable bit is not - * set, setting the SNaN bit in the operand to a one converts the SNaN to - * a nonsignaling NaN. The operation then continues as described in the - * preceding paragraph for nonsignaling NaNs. - */ - if (is_nan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_SPARC) - /* Prefer SNaN over QNaN, order B then A. */ - if (is_snan(b_cls)) { - return 1; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_qnan(b_cls)) { - return 1; - } else { - return 0; - } -#elif defined(TARGET_XTENSA) - /* - * Xtensa has two NaN propagation modes. - * Which one is active is controlled by float_status::use_first_nan. - */ - if (status->use_first_nan) { - if (is_nan(a_cls)) { - return 0; - } else { - return 1; - } - } else { - if (is_nan(b_cls)) { - return 1; - } else { - return 0; - } - } -#else - /* This implements x87 NaN propagation rules: - * SNaN + QNaN => return the QNaN - * two SNaNs => return the one with the larger significand, silenced - * two QNaNs => return the one with the larger significand - * SNaN and a non-NaN => return the SNaN, silenced - * QNaN and a non-NaN => return the QNaN - * - * If we get down to comparing significands and they are the same, - * return the NaN with the positive sign bit (if any). - */ - if (is_snan(a_cls)) { - if (is_snan(b_cls)) { - return aIsLargerSignificand ? 0 : 1; - } - return is_qnan(b_cls) ? 1 : 0; - } else if (is_qnan(a_cls)) { - if (is_snan(b_cls) || !is_qnan(b_cls)) { - return 0; - } else { - return aIsLargerSignificand ? 0 : 1; - } - } else { - return 1; - } -#endif -} - -/*---------------------------------------------------------------------------- -| Select which NaN to propagate for a three-input operation. -| For the moment we assume that no CPU needs the 'larger significand' -| information. -| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN -*----------------------------------------------------------------------------*/ -static int pickNaNMulAdd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls, - bool infzero, float_status *status) -{ -#if defined(TARGET_ARM) - /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns - * the default NaN - */ - if (infzero && is_qnan(c_cls)) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 3; - } - - /* This looks different from the ARM ARM pseudocode, because the ARM ARM - * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. - */ - if (is_snan(c_cls)) { - return 2; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_qnan(c_cls)) { - return 2; - } else if (is_qnan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_MIPS) - if (snan_bit_is_one(status)) { - /* - * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan) - * case sets InvalidOp and returns the default NaN - */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 3; - } - /* Prefer sNaN over qNaN, in the a, b, c order. */ - if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_snan(c_cls)) { - return 2; - } else if (is_qnan(a_cls)) { - return 0; - } else if (is_qnan(b_cls)) { - return 1; - } else { - return 2; - } - } else { - /* - * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan) - * case sets InvalidOp and returns the input value 'c' - */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 2; - } - /* Prefer sNaN over qNaN, in the c, a, b order. */ - if (is_snan(c_cls)) { - return 2; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_qnan(c_cls)) { - return 2; - } else if (is_qnan(a_cls)) { - return 0; - } else { - return 1; - } - } -#elif defined(TARGET_LOONGARCH64) - /* - * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan) - * case sets InvalidOp and returns the input value 'c' - */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 2; - } - /* Prefer sNaN over qNaN, in the c, a, b order. */ - if (is_snan(c_cls)) { - return 2; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_qnan(c_cls)) { - return 2; - } else if (is_qnan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_PPC) - /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer - * to return an input NaN if we have one (ie c) rather than generating - * a default NaN - */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 2; - } - - /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; - * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB - */ - if (is_nan(a_cls)) { - return 0; - } else if (is_nan(c_cls)) { - return 2; - } else { - return 1; - } -#elif defined(TARGET_RISCV) - /* For RISC-V, InvalidOp is set when multiplicands are Inf and zero */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - } - return 3; /* default NaN */ -#elif defined(TARGET_SPARC) - /* For (inf,0,nan) return c. */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 2; - } - /* Prefer SNaN over QNaN, order C, B, A. */ - if (is_snan(c_cls)) { - return 2; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_qnan(c_cls)) { - return 2; - } else if (is_qnan(b_cls)) { - return 1; - } else { - return 0; - } -#elif defined(TARGET_XTENSA) - /* - * For Xtensa, the (inf,zero,nan) case sets InvalidOp and returns - * an input NaN if we have one (ie c). - */ - if (infzero) { - float_raise(float_flag_invalid | float_flag_invalid_imz, status); - return 2; - } - if (status->use_first_nan) { - if (is_nan(a_cls)) { - return 0; - } else if (is_nan(b_cls)) { - return 1; - } else { - return 2; - } - } else { - if (is_nan(c_cls)) { - return 2; - } else if (is_nan(b_cls)) { - return 1; - } else { - return 0; - } - } -#else - /* A default implementation: prefer a to b to c. - * This is unlikely to actually match any real implementation. - */ - if (is_nan(a_cls)) { - return 0; - } else if (is_nan(b_cls)) { - return 1; - } else { - return 2; - } -#endif -} - /*---------------------------------------------------------------------------- | Returns 1 if the double-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. @@ -798,58 +444,6 @@ floatx80 floatx80_silence_nan(floatx80 a, float_status *status) return a; } -/*---------------------------------------------------------------------------- -| Takes two extended double-precision floating-point values `a' and `b', one -| of which is a NaN, and returns the appropriate NaN result. If either `a' or -| `b' is a signaling NaN, the invalid exception is raised. -*----------------------------------------------------------------------------*/ - -floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status) -{ - bool aIsLargerSignificand; - FloatClass a_cls, b_cls; - - /* This is not complete, but is good enough for pickNaN. */ - a_cls = (!floatx80_is_any_nan(a) - ? float_class_normal - : floatx80_is_signaling_nan(a, status) - ? float_class_snan - : float_class_qnan); - b_cls = (!floatx80_is_any_nan(b) - ? float_class_normal - : floatx80_is_signaling_nan(b, status) - ? float_class_snan - : float_class_qnan); - - if (is_snan(a_cls) || is_snan(b_cls)) { - float_raise(float_flag_invalid, status); - } - - if (status->default_nan_mode) { - return floatx80_default_nan(status); - } - - if (a.low < b.low) { - aIsLargerSignificand = 0; - } else if (b.low < a.low) { - aIsLargerSignificand = 1; - } else { - aIsLargerSignificand = (a.high < b.high) ? 1 : 0; - } - - if (pickNaN(a_cls, b_cls, aIsLargerSignificand, status)) { - if (is_snan(b_cls)) { - return floatx80_silence_nan(b, status); - } - return b; - } else { - if (is_snan(a_cls)) { - return floatx80_silence_nan(a, status); - } - return a; - } -} - /*---------------------------------------------------------------------------- | Returns 1 if the quadruple-precision floating-point value `a' is a quiet | NaN; otherwise returns 0. diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 027a8e576d3..8094358c2e4 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -79,9 +79,6 @@ this code that are retained. * version 2 or later. See the COPYING file in the top-level directory. */ -/* softfloat (and in particular the code in softfloat-specialize.h) is - * target-dependent and needs the TARGET_* macros. - */ #include "qemu/osdep.h" #include #include "qemu/bitops.h" @@ -132,7 +129,7 @@ this code that are retained. if (unlikely(soft_t ## _is_denormal(*a))) { \ *a = soft_t ## _set_sign(soft_t ## _zero, \ soft_t ## _is_neg(*a)); \ - float_raise(float_flag_input_denormal, s); \ + float_raise(float_flag_input_denormal_flushed, s); \ } \ } @@ -220,11 +217,9 @@ GEN_INPUT_FLUSH3(float64_input_flush3, float64) * the use of hardfloat, since hardfloat relies on the inexact flag being * already set. */ -#if defined(TARGET_PPC) || defined(__FAST_MATH__) # if defined(__FAST_MATH__) # warning disabling hardfloat due to -ffast-math: hardfloat requires an exact \ IEEE implementation -# endif # define QEMU_NO_HARDFLOAT 1 # define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN #else @@ -404,12 +399,16 @@ float64_gen2(float64 xa, float64 xb, float_status *s, /* * Classify a floating point number. Everything above float_class_qnan * is a NaN so cls >= float_class_qnan is any NaN. + * + * Note that we canonicalize denormals, so most code should treat + * class_normal and class_denormal identically. */ typedef enum __attribute__ ((__packed__)) { float_class_unclassified, float_class_zero, float_class_normal, + float_class_denormal, /* input was a non-squashed denormal */ float_class_inf, float_class_qnan, /* all NaNs from here */ float_class_snan, @@ -420,12 +419,14 @@ typedef enum __attribute__ ((__packed__)) { enum { float_cmask_zero = float_cmask(float_class_zero), float_cmask_normal = float_cmask(float_class_normal), + float_cmask_denormal = float_cmask(float_class_denormal), float_cmask_inf = float_cmask(float_class_inf), float_cmask_qnan = float_cmask(float_class_qnan), float_cmask_snan = float_cmask(float_class_snan), float_cmask_infzero = float_cmask_zero | float_cmask_inf, float_cmask_anynan = float_cmask_qnan | float_cmask_snan, + float_cmask_anynorm = float_cmask_normal | float_cmask_denormal, }; /* Flags for parts_minmax. */ @@ -459,6 +460,20 @@ static inline __attribute__((unused)) bool is_qnan(FloatClass c) return c == float_class_qnan; } +/* + * Return true if the float_cmask has only normals in it + * (including input denormals that were canonicalized) + */ +static inline bool cmask_is_only_normals(int cmask) +{ + return !(cmask & ~float_cmask_anynorm); +} + +static inline bool is_anynorm(FloatClass c) +{ + return float_cmask(c) & float_cmask_anynorm; +} + /* * Structure holding all of the decomposed parts of a float. * The exponent is unbiased and the fraction is normalized. @@ -517,7 +532,8 @@ typedef struct { * round_mask: bits below lsb which must be rounded * The following optional modifiers are available: * arm_althp: handle ARM Alternative Half Precision - * m68k_denormal: explicit integer bit for extended precision may be 1 + * has_explicit_bit: has an explicit integer bit; this affects whether + * the float_status floatx80_behaviour handling applies */ typedef struct { int exp_size; @@ -527,7 +543,7 @@ typedef struct { int frac_size; int frac_shift; bool arm_althp; - bool m68k_denormal; + bool has_explicit_bit; uint64_t round_mask; } FloatFmt; @@ -580,9 +596,7 @@ static const FloatFmt floatx80_params[3] = { [floatx80_precision_d] = { FLOATX80_PARAMS(52) }, [floatx80_precision_x] = { FLOATX80_PARAMS(64), -#ifdef TARGET_M68K - .m68k_denormal = true, -#endif + .has_explicit_bit = true, }, }; @@ -789,15 +803,15 @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b, #define parts_mul(A, B, S) \ PARTS_GENERIC_64_128(mul, A)(A, B, S) -static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b, - FloatParts64 *c, int flags, - float_status *s); -static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b, - FloatParts128 *c, int flags, - float_status *s); +static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b, + FloatParts64 *c, int scale, + int flags, float_status *s); +static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b, + FloatParts128 *c, int scale, + int flags, float_status *s); -#define parts_muladd(A, B, C, Z, S) \ - PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S) +#define parts_muladd_scalbn(A, B, C, Z, Y, S) \ + PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S) static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b, float_status *s); @@ -1717,11 +1731,8 @@ static float64 float64_round_pack_canonical(FloatParts64 *p, return float64_pack_raw(p); } -static float64 float64r32_round_pack_canonical(FloatParts64 *p, - float_status *s) +static float64 float64r32_pack_raw(FloatParts64 *p) { - parts_uncanon(p, s, &float32_params); - /* * In parts_uncanon, we placed the fraction for float32 at the lsb. * We need to adjust the fraction higher so that the least N bits are @@ -1729,6 +1740,7 @@ static float64 float64r32_round_pack_canonical(FloatParts64 *p, */ switch (p->cls) { case float_class_normal: + case float_class_denormal: if (unlikely(p->exp == 0)) { /* * The result is denormal for float32, but can be represented @@ -1761,6 +1773,13 @@ static float64 float64r32_round_pack_canonical(FloatParts64 *p, return float64_pack_raw(p); } +static float64 float64r32_round_pack_canonical(FloatParts64 *p, + float_status *s) +{ + parts_uncanon(p, s, &float32_params); + return float64r32_pack_raw(p); +} + static void float128_unpack_canonical(FloatParts128 *p, float128 f, float_status *s) { @@ -1789,7 +1808,7 @@ static bool floatx80_unpack_canonical(FloatParts128 *p, floatx80 f, g_assert_not_reached(); } - if (unlikely(floatx80_invalid_encoding(f))) { + if (unlikely(floatx80_invalid_encoding(f, s))) { float_raise(float_flag_invalid, s); return false; } @@ -1817,6 +1836,7 @@ static floatx80 floatx80_round_pack_canonical(FloatParts128 *p, switch (p->cls) { case float_class_normal: + case float_class_denormal: if (s->floatx80_rounding_precision == floatx80_precision_x) { parts_uncanon_normal(p, s, fmt); frac = p->frac_hi; @@ -1838,7 +1858,8 @@ static floatx80 floatx80_round_pack_canonical(FloatParts128 *p, case float_class_inf: /* x86 and m68k differ in the setting of the integer bit. */ - frac = floatx80_infinity_low; + frac = s->floatx80_behaviour & floatx80_default_inf_int_bit_is_zero ? + 0 : (1ULL << 63); exp = fmt->exp_max; break; @@ -2212,45 +2233,67 @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status) * Fused multiply-add */ -float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c, - int flags, float_status *status) +float16 QEMU_FLATTEN +float16_muladd_scalbn(float16 a, float16 b, float16 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float16_unpack_canonical(&pa, a, status); float16_unpack_canonical(&pb, b, status); float16_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); - return float16_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &float16_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return float16_pack_raw(pr); } -static float32 QEMU_SOFTFLOAT_ATTR -soft_f32_muladd(float32 a, float32 b, float32 c, int flags, - float_status *status) +float16 float16_muladd(float16 a, float16 b, float16 c, + int flags, float_status *status) +{ + return float16_muladd_scalbn(a, b, c, 0, flags, status); +} + +float32 QEMU_SOFTFLOAT_ATTR +float32_muladd_scalbn(float32 a, float32 b, float32 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float32_unpack_canonical(&pa, a, status); float32_unpack_canonical(&pb, b, status); float32_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); - return float32_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &float32_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return float32_pack_raw(pr); } -static float64 QEMU_SOFTFLOAT_ATTR -soft_f64_muladd(float64 a, float64 b, float64 c, int flags, - float_status *status) +float64 QEMU_SOFTFLOAT_ATTR +float64_muladd_scalbn(float64 a, float64 b, float64 c, + int scale, int flags, float_status *status) { FloatParts64 pa, pb, pc, *pr; float64_unpack_canonical(&pa, a, status); float64_unpack_canonical(&pb, b, status); float64_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status); - return float64_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &float64_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return float64_pack_raw(pr); } static bool force_soft_fma; @@ -2267,7 +2310,7 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) if (unlikely(!can_use_fpu(s))) { goto soft; } - if (unlikely(flags & float_muladd_halve_result)) { + if (unlikely(flags & float_muladd_suppress_add_product_zero)) { goto soft; } @@ -2323,7 +2366,7 @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s) return ur.s; soft: - return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s); + return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s); } float64 QEMU_FLATTEN @@ -2338,9 +2381,6 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) if (unlikely(!can_use_fpu(s))) { goto soft; } - if (unlikely(flags & float_muladd_halve_result)) { - goto soft; - } float64_input_flush3(&ua.s, &ub.s, &uc.s, s); if (unlikely(!f64_is_zon3(ua, ub, uc))) { @@ -2394,7 +2434,7 @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s) return ur.s; soft: - return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s); + return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s); } float64 float64r32_muladd(float64 a, float64 b, float64 c, @@ -2405,9 +2445,14 @@ float64 float64r32_muladd(float64 a, float64 b, float64 c, float64_unpack_canonical(&pa, a, status); float64_unpack_canonical(&pb, b, status); float64_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); - return float64r32_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &float32_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return float64r32_pack_raw(pr); } bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c, @@ -2418,9 +2463,14 @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c, bfloat16_unpack_canonical(&pa, a, status); bfloat16_unpack_canonical(&pb, b, status); bfloat16_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); - return bfloat16_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &bfloat16_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return bfloat16_pack_raw(pr); } float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c, @@ -2431,9 +2481,14 @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c, float128_unpack_canonical(&pa, a, status); float128_unpack_canonical(&pb, b, status); float128_unpack_canonical(&pc, c, status); - pr = parts_muladd(&pa, &pb, &pc, flags, status); + pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status); - return float128_round_pack_canonical(pr, status); + /* Round before applying negate result. */ + parts_uncanon(pr, status, &float128_params); + if ((flags & float_muladd_negate_result) && !is_nan(pr->cls)) { + pr->sign ^= 1; + } + return float128_pack_raw(pr); } /* @@ -2692,6 +2747,9 @@ static void parts_float_to_ahp(FloatParts64 *a, float_status *s) float16_params_ahp.frac_size + 1); break; + case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + break; case float_class_normal: case float_class_zero: break; @@ -2706,6 +2764,9 @@ static void parts64_float_to_float(FloatParts64 *a, float_status *s) if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } static void parts128_float_to_float(FloatParts128 *a, float_status *s) @@ -2713,6 +2774,9 @@ static void parts128_float_to_float(FloatParts128 *a, float_status *s) if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } #define parts_float_to_float(P, S) \ @@ -2725,12 +2789,21 @@ static void parts_float_to_float_narrow(FloatParts64 *a, FloatParts128 *b, a->sign = b->sign; a->exp = b->exp; - if (a->cls == float_class_normal) { + switch (a->cls) { + case float_class_denormal: + float_raise(float_flag_input_denormal_used, s); + /* fall through */ + case float_class_normal: frac_truncjam(a, b); - } else if (is_nan(a->cls)) { + break; + case float_class_snan: + case float_class_qnan: /* Discard the low bits of the NaN. */ a->frac = b->frac_hi; parts_return_nan(a, s); + break; + default: + break; } } @@ -2745,6 +2818,9 @@ static void parts_float_to_float_widen(FloatParts128 *a, FloatParts64 *b, if (is_nan(a->cls)) { parts_return_nan(a, s); } + if (a->cls == float_class_denormal) { + float_raise(float_flag_input_denormal_used, s); + } } float32 float16_to_float32(float16 a, bool ieee, float_status *s) @@ -3214,6 +3290,7 @@ static Int128 float128_to_int128_scalbn(float128 a, FloatRoundMode rmode, return int128_zero(); case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { flags = float_flag_inexact; } @@ -3641,6 +3718,7 @@ static Int128 float128_to_uint128_scalbn(float128 a, FloatRoundMode rmode, return int128_zero(); case float_class_normal: + case float_class_denormal: if (parts_round_to_int_normal(&p, rmode, scale, 128 - 2)) { flags = float_flag_inexact; if (p.cls == float_class_zero) { @@ -4382,7 +4460,11 @@ float32_hs_compare(float32 xa, float32 xb, float_status *s, bool is_quiet) goto soft; } - float32_input_flush2(&ua.s, &ub.s, s); + if (unlikely(float32_is_denormal(ua.s) || float32_is_denormal(ub.s))) { + /* We may need to set the input_denormal_used flag */ + goto soft; + } + if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; @@ -4432,7 +4514,11 @@ float64_hs_compare(float64 xa, float64 xb, float_status *s, bool is_quiet) goto soft; } - float64_input_flush2(&ua.s, &ub.s, s); + if (unlikely(float64_is_denormal(ua.s) || float64_is_denormal(ub.s))) { + /* We may need to set the input_denormal_used flag */ + goto soft; + } + if (isgreaterequal(ua.h, ub.h)) { if (isgreater(ua.h, ub.h)) { return float_relation_greater; @@ -4844,7 +4930,7 @@ float128 float128_silence_nan(float128 a, float_status *status) static bool parts_squash_denormal(FloatParts64 p, float_status *status) { if (p.exp == 0 && p.frac != 0) { - float_raise(float_flag_input_denormal, status); + float_raise(float_flag_input_denormal_flushed, status); return true; } @@ -4920,6 +5006,25 @@ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr, *zExpPtr = 1 - shiftCount; } +/*---------------------------------------------------------------------------- +| Takes two extended double-precision floating-point values `a' and `b', one +| of which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b, float_status *status) +{ + FloatParts128 pa, pb, *pr; + + if (!floatx80_unpack_canonical(&pa, a, status) || + !floatx80_unpack_canonical(&pb, b, status)) { + return floatx80_default_nan(status); + } + + pr = parts_pick_nan(&pa, &pb, status); + return floatx80_round_pack_canonical(pr, status); +} + /*---------------------------------------------------------------------------- | Takes an abstract floating-point value having sign `zSign', exponent `zExp', | and extended significand formed by the concatenation of `zSig0' and `zSig1', @@ -4994,7 +5099,7 @@ floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign, } if ( zExp <= 0 ) { if (status->flush_to_zero) { - float_raise(float_flag_output_denormal, status); + float_raise(float_flag_output_denormal_flushed, status); return packFloatx80(zSign, 0, 0); } isTiny = status->tininess_before_rounding @@ -5068,9 +5173,7 @@ floatx80 roundAndPackFloatx80(FloatX80RoundPrec roundingPrecision, bool zSign, ) { return packFloatx80( zSign, 0x7FFE, ~ roundMask ); } - return packFloatx80(zSign, - floatx80_infinity_high, - floatx80_infinity_low); + return floatx80_default_inf(zSign, status); } if ( zExp <= 0 ) { isTiny = status->tininess_before_rounding @@ -5208,6 +5311,8 @@ float32 float32_exp2(float32 a, float_status *status) float32_unpack_canonical(&xp, a, status); if (unlikely(xp.cls != float_class_normal)) { switch (xp.cls) { + case float_class_denormal: + break; case float_class_snan: case float_class_qnan: parts_return_nan(&xp, status); @@ -5217,9 +5322,8 @@ float32 float32_exp2(float32 a, float_status *status) case float_class_zero: return float32_one; default: - break; + g_assert_not_reached(); } - g_assert_not_reached(); } float_raise(float_flag_inexact, status); @@ -5230,8 +5334,9 @@ float32 float32_exp2(float32 a, float_status *status) float64_unpack_canonical(&rp, float64_one, status); for (i = 0 ; i < 15 ; i++) { + float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status); - rp = *parts_muladd(&tp, &xnp, &rp, 0, status); + rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status); xnp = *parts_mul(&xnp, &xp, status); } diff --git a/fsdev/9p-iov-marshal.c b/fsdev/9p-iov-marshal.c index a1c9beddd2e..0c5a1a0fa21 100644 --- a/fsdev/9p-iov-marshal.c +++ b/fsdev/9p-iov-marshal.c @@ -84,9 +84,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'w': { - uint16_t val, *valp; + uint16_t val = 0, *valp; valp = va_arg(ap, uint16_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le16_to_cpu(val); } else { @@ -95,9 +98,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'd': { - uint32_t val, *valp; + uint32_t val = 0, *valp; valp = va_arg(ap, uint32_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le32_to_cpu(val); } else { @@ -106,9 +112,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'q': { - uint64_t val, *valp; + uint64_t val = 0, *valp; valp = va_arg(ap, uint64_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le64_to_cpu(val); } else { diff --git a/fsdev/9p-marshal.c b/fsdev/9p-marshal.c index f9b0336cd59..3455580703d 100644 --- a/fsdev/9p-marshal.c +++ b/fsdev/9p-marshal.c @@ -27,8 +27,7 @@ void v9fs_string_free(V9fsString *str) str->size = 0; } -void G_GNUC_PRINTF(2, 3) -v9fs_string_sprintf(V9fsString *str, const char *fmt, ...) +void v9fs_string_sprintf(V9fsString *str, const char *fmt, ...) { va_list ap; diff --git a/fsdev/9p-marshal.h b/fsdev/9p-marshal.h index f1abbe151c3..8995e420678 100644 --- a/fsdev/9p-marshal.h +++ b/fsdev/9p-marshal.h @@ -76,7 +76,8 @@ static inline void v9fs_string_init(V9fsString *str) str->size = 0; } void v9fs_string_free(V9fsString *str); -void v9fs_string_sprintf(V9fsString *str, const char *fmt, ...); +void G_GNUC_PRINTF(2, 3) v9fs_string_sprintf(V9fsString *str, const char *fmt, + ...); void v9fs_string_copy(V9fsString *lhs, V9fsString *rhs); #endif diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index 4997677460e..b9dae8c84c2 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -129,6 +129,8 @@ struct FileOperations { int (*chown)(FsContext *, V9fsPath *, FsCred *); int (*mknod)(FsContext *, V9fsPath *, const char *, FsCred *); int (*utimensat)(FsContext *, V9fsPath *, const struct timespec *); + int (*futimens)(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, + const struct timespec *times); int (*remove)(FsContext *, const char *); int (*symlink)(FsContext *, const char *, V9fsPath *, const char *, FsCred *); @@ -152,6 +154,8 @@ struct FileOperations { int (*fstat)(FsContext *, int, V9fsFidOpenState *, struct stat *); int (*rename)(FsContext *, const char *, const char *); int (*truncate)(FsContext *, V9fsPath *, off_t); + int (*ftruncate)(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, + off_t size); int (*fsync)(FsContext *, int, V9fsFidOpenState *, int); int (*statfs)(FsContext *s, V9fsPath *path, struct statfs *stbuf); ssize_t (*lgetxattr)(FsContext *, V9fsPath *, @@ -164,6 +168,7 @@ struct FileOperations { int (*renameat)(FsContext *ctx, V9fsPath *olddir, const char *old_name, V9fsPath *newdir, const char *new_name); int (*unlinkat)(FsContext *ctx, V9fsPath *dir, const char *name, int flags); + bool (*has_valid_file_handle)(int fid_type, V9fsFidOpenState *fs); }; #endif diff --git a/fsdev/meson.build b/fsdev/meson.build index e20d7255e1e..c751d8cb622 100644 --- a/fsdev/meson.build +++ b/fsdev/meson.build @@ -8,11 +8,3 @@ fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files( if host_os in ['linux', 'darwin'] system_ss.add_all(fsdev_ss) endif - -if have_virtfs_proxy_helper - executable('virtfs-proxy-helper', - files('virtfs-proxy-helper.c', '9p-marshal.c', '9p-iov-marshal.c'), - dependencies: [qemuutil, libattr, libcap_ng], - install: true, - install_dir: get_option('libexecdir')) -endif diff --git a/fsdev/qemu-fsdev.c b/fsdev/qemu-fsdev.c index f5c953a7105..57877dad0a7 100644 --- a/fsdev/qemu-fsdev.c +++ b/fsdev/qemu-fsdev.c @@ -89,17 +89,6 @@ static FsDriverTable FsDrivers[] = { NULL }, }, - { - .name = "proxy", - .ops = &proxy_ops, - .opts = (const char * []) { - COMMON_FS_DRIVER_OPTIONS, - "socket", - "sock_fd", - "writeout", - NULL - }, - }, }; static int validate_opt(void *opaque, const char *name, const char *value, @@ -133,14 +122,6 @@ int qemu_fsdev_add(QemuOpts *opts, Error **errp) } if (fsdriver) { - if (strncmp(fsdriver, "proxy", 5) == 0) { - warn_report( - "'-fsdev proxy' and '-virtfs proxy' are deprecated, use " - "'local' instead of 'proxy, or consider deploying virtiofsd " - "as alternative to 9p" - ); - } - for (i = 0; i < ARRAY_SIZE(FsDrivers); i++) { if (strcmp(FsDrivers[i].name, fsdriver) == 0) { break; diff --git a/fsdev/qemu-fsdev.h b/fsdev/qemu-fsdev.h index 52a53977701..731f1406a81 100644 --- a/fsdev/qemu-fsdev.h +++ b/fsdev/qemu-fsdev.h @@ -18,5 +18,4 @@ int qemu_fsdev_add(QemuOpts *opts, Error **errp); FsDriverEntry *get_fsdev_fsentry(char *id); extern FileOperations local_ops; extern FileOperations synth_ops; -extern FileOperations proxy_ops; #endif diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c deleted file mode 100644 index 144aaf585ad..00000000000 --- a/fsdev/virtfs-proxy-helper.c +++ /dev/null @@ -1,1193 +0,0 @@ -/* - * Helper for QEMU Proxy FS Driver - * Copyright IBM, Corp. 2011 - * - * Authors: - * M. Mohan Kumar - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - */ - -/* - * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be - * removed in a future version of QEMU! - */ - -#include "qemu/osdep.h" -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_LINUX_MAGIC_H -#include -#endif -#include -#include "qemu/sockets.h" -#include "qemu/xattr.h" -#include "9p-iov-marshal.h" -#include "hw/9pfs/9p-proxy.h" -#include "hw/9pfs/9p-util.h" -#include "fsdev/9p-iov-marshal.h" - -#define PROGNAME "virtfs-proxy-helper" - -#ifndef XFS_SUPER_MAGIC -#define XFS_SUPER_MAGIC 0x58465342 -#endif -#ifndef EXT2_SUPER_MAGIC -#define EXT2_SUPER_MAGIC 0xEF53 -#endif -#ifndef REISERFS_SUPER_MAGIC -#define REISERFS_SUPER_MAGIC 0x52654973 -#endif -#ifndef BTRFS_SUPER_MAGIC -#define BTRFS_SUPER_MAGIC 0x9123683E -#endif - -static const struct option helper_opts[] = { - {"fd", required_argument, NULL, 'f'}, - {"path", required_argument, NULL, 'p'}, - {"nodaemon", no_argument, NULL, 'n'}, - {"socket", required_argument, NULL, 's'}, - {"uid", required_argument, NULL, 'u'}, - {"gid", required_argument, NULL, 'g'}, - {}, -}; - -static bool is_daemon; -static bool get_version; /* IOC getversion IOCTL supported */ -static char *prog_name; - -static void G_GNUC_PRINTF(2, 3) do_log(int loglevel, const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - if (is_daemon) { - vsyslog(LOG_CRIT, format, ap); - } else { - vfprintf(stderr, format, ap); - } - va_end(ap); -} - -static void do_perror(const char *string) -{ - if (is_daemon) { - syslog(LOG_CRIT, "%s:%s", string, strerror(errno)); - } else { - fprintf(stderr, "%s:%s\n", string, strerror(errno)); - } -} - -static int init_capabilities(void) -{ - /* helper needs following capabilities only */ - int cap_list[] = { - CAP_CHOWN, - CAP_DAC_OVERRIDE, - CAP_FOWNER, - CAP_FSETID, - CAP_SETGID, - CAP_MKNOD, - CAP_SETUID, - }; - int i; - - capng_clear(CAPNG_SELECT_BOTH); - for (i = 0; i < ARRAY_SIZE(cap_list); i++) { - if (capng_update(CAPNG_ADD, CAPNG_EFFECTIVE | CAPNG_PERMITTED, - cap_list[i]) < 0) { - do_perror("capng_update"); - return -1; - } - } - if (capng_apply(CAPNG_SELECT_BOTH) < 0) { - do_perror("capng_apply"); - return -1; - } - - /* Prepare effective set for setugid. */ - for (i = 0; i < ARRAY_SIZE(cap_list); i++) { - if (cap_list[i] == CAP_DAC_OVERRIDE) { - continue; - } - - if (capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, - cap_list[i]) < 0) { - do_perror("capng_update"); - return -1; - } - } - return 0; -} - -static int socket_read(int sockfd, void *buff, ssize_t size) -{ - ssize_t retval, total = 0; - - while (size) { - retval = read(sockfd, buff, size); - if (retval == 0) { - return -EIO; - } - if (retval < 0) { - if (errno == EINTR) { - continue; - } - return -errno; - } - size -= retval; - buff += retval; - total += retval; - } - return total; -} - -static int socket_write(int sockfd, void *buff, ssize_t size) -{ - ssize_t retval, total = 0; - - while (size) { - retval = write(sockfd, buff, size); - if (retval < 0) { - if (errno == EINTR) { - continue; - } - return -errno; - } - size -= retval; - buff += retval; - total += retval; - } - return total; -} - -static int read_request(int sockfd, struct iovec *iovec, ProxyHeader *header) -{ - int retval; - - /* - * read the request header. - */ - iovec->iov_len = 0; - retval = socket_read(sockfd, iovec->iov_base, PROXY_HDR_SZ); - if (retval < 0) { - return retval; - } - iovec->iov_len = PROXY_HDR_SZ; - retval = proxy_unmarshal(iovec, 0, "dd", &header->type, &header->size); - if (retval < 0) { - return retval; - } - /* - * We can't process message.size > PROXY_MAX_IO_SZ. - * Treat it as fatal error - */ - if (header->size > PROXY_MAX_IO_SZ) { - return -ENOBUFS; - } - retval = socket_read(sockfd, iovec->iov_base + PROXY_HDR_SZ, header->size); - if (retval < 0) { - return retval; - } - iovec->iov_len += header->size; - return 0; -} - -static int send_fd(int sockfd, int fd) -{ - struct msghdr msg; - struct iovec iov; - int retval, data; - struct cmsghdr *cmsg; - union MsgControl msg_control; - - iov.iov_base = &data; - iov.iov_len = sizeof(data); - - memset(&msg, 0, sizeof(msg)); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - /* No ancillary data on error */ - if (fd < 0) { - /* fd is really negative errno if the request failed */ - data = fd; - } else { - data = V9FS_FD_VALID; - msg.msg_control = &msg_control; - msg.msg_controllen = sizeof(msg_control); - - cmsg = &msg_control.cmsg; - cmsg->cmsg_len = CMSG_LEN(sizeof(fd)); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd)); - } - - do { - retval = sendmsg(sockfd, &msg, 0); - } while (retval < 0 && errno == EINTR); - if (fd >= 0) { - close(fd); - } - if (retval < 0) { - return retval; - } - return 0; -} - -static int send_status(int sockfd, struct iovec *iovec, int status) -{ - ProxyHeader header; - int retval, msg_size; - - if (status < 0) { - header.type = T_ERROR; - } else { - header.type = T_SUCCESS; - } - header.size = sizeof(status); - /* - * marshal the return status. We don't check error. - * because we are sure we have enough space for the status - */ - msg_size = proxy_marshal(iovec, 0, "ddd", header.type, - header.size, status); - if (msg_size < 0) { - return msg_size; - } - retval = socket_write(sockfd, iovec->iov_base, msg_size); - if (retval < 0) { - return retval; - } - return 0; -} - -/* - * from man 7 capabilities, section - * Effect of User ID Changes on Capabilities: - * If the effective user ID is changed from nonzero to 0, then the permitted - * set is copied to the effective set. If the effective user ID is changed - * from 0 to nonzero, then all capabilities are are cleared from the effective - * set. - * - * The setfsuid/setfsgid man pages warn that changing the effective user ID may - * expose the program to unwanted signals, but this is not true anymore: for an - * unprivileged (without CAP_KILL) program to send a signal, the real or - * effective user ID of the sending process must equal the real or saved user - * ID of the target process. Even when dropping privileges, it is enough to - * keep the saved UID to a "privileged" value and virtfs-proxy-helper won't - * be exposed to signals. So just use setresuid/setresgid. - */ -static int setugid(int uid, int gid, int *suid, int *sgid) -{ - int retval; - - *suid = geteuid(); - *sgid = getegid(); - - if (setresgid(-1, gid, *sgid) == -1) { - return -errno; - } - - if (setresuid(-1, uid, *suid) == -1) { - retval = -errno; - goto err_sgid; - } - - if (uid == 0 && gid == 0) { - /* Linux has already copied the permitted set to the effective set. */ - return 0; - } - - /* - * All capabilities have been cleared from the effective set. However - * we still need DAC_OVERRIDE because we don't change supplementary - * group ids, and hence may be subject to DAC rules. init_capabilities - * left the set of capabilities that we want in libcap-ng's state. - */ - if (capng_apply(CAPNG_SELECT_CAPS) < 0) { - retval = -errno; - do_perror("capng_apply"); - goto err_suid; - } - return 0; - -err_suid: - if (setresuid(-1, *suid, *suid) == -1) { - abort(); - } -err_sgid: - if (setresgid(-1, *sgid, *sgid) == -1) { - abort(); - } - return retval; -} - -/* - * This is used to reset the ugid back with the saved values - * There is nothing much we can do checking error values here. - */ -static void resetugid(int suid, int sgid) -{ - if (setresgid(-1, sgid, sgid) == -1) { - abort(); - } - if (setresuid(-1, suid, suid) == -1) { - abort(); - } -} - -/* - * Open regular file or directory. Attempts to open any special file are - * rejected. - * - * returns file descriptor or -1 on error - */ -static int open_regular(const char *pathname, int flags, mode_t mode) -{ - int fd; - - fd = open(pathname, flags, mode); - if (fd < 0) { - return fd; - } - - if (close_if_special_file(fd) < 0) { - return -1; - } - - return fd; -} - -/* - * send response in two parts - * 1) ProxyHeader - * 2) Response or error status - * This function should be called with marshaled response - * send_response constructs header part and error part only. - * send response sends {ProxyHeader,Response} if the request was success - * otherwise sends {ProxyHeader,error status} - */ -static int send_response(int sock, struct iovec *iovec, int size) -{ - int retval; - ProxyHeader header; - - /* - * If response size exceeds available iovec->iov_len, - * we return ENOBUFS - */ - if (size > PROXY_MAX_IO_SZ) { - size = -ENOBUFS; - } - - if (size < 0) { - /* - * In case of error we would not have got the error encoded - * already so encode the error here. - */ - header.type = T_ERROR; - header.size = sizeof(size); - proxy_marshal(iovec, PROXY_HDR_SZ, "d", size); - } else { - header.type = T_SUCCESS; - header.size = size; - } - proxy_marshal(iovec, 0, "dd", header.type, header.size); - retval = socket_write(sock, iovec->iov_base, header.size + PROXY_HDR_SZ); - if (retval < 0) { - return retval; - } - return 0; -} - -/* - * gets generation number - * returns -errno on failure and sizeof(generation number) on success - */ -static int do_getversion(struct iovec *iovec, struct iovec *out_iovec) -{ - uint64_t version; - int retval = -ENOTTY; -#ifdef FS_IOC_GETVERSION - int fd; - V9fsString path; -#endif - - - /* no need to issue ioctl */ - if (!get_version) { - version = 0; - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "q", version); - return retval; - } -#ifdef FS_IOC_GETVERSION - retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "s", &path); - if (retval < 0) { - return retval; - } - - fd = open(path.data, O_RDONLY); - if (fd < 0) { - retval = -errno; - goto err_out; - } - if (ioctl(fd, FS_IOC_GETVERSION, &version) < 0) { - retval = -errno; - } else { - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "q", version); - } - close(fd); -err_out: - v9fs_string_free(&path); -#endif - return retval; -} - -static int do_getxattr(int type, struct iovec *iovec, struct iovec *out_iovec) -{ - int size = 0, offset, retval; - V9fsString path, name, xattr; - - v9fs_string_init(&xattr); - v9fs_string_init(&path); - retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "ds", &size, &path); - if (retval < 0) { - return retval; - } - offset = PROXY_HDR_SZ + retval; - - if (size) { - xattr.data = g_malloc(size); - xattr.size = size; - } - switch (type) { - case T_LGETXATTR: - v9fs_string_init(&name); - retval = proxy_unmarshal(iovec, offset, "s", &name); - if (retval > 0) { - retval = lgetxattr(path.data, name.data, xattr.data, size); - if (retval < 0) { - retval = -errno; - } else { - xattr.size = retval; - } - } - v9fs_string_free(&name); - break; - case T_LLISTXATTR: - retval = llistxattr(path.data, xattr.data, size); - if (retval < 0) { - retval = -errno; - } else { - xattr.size = retval; - } - break; - } - if (retval < 0) { - goto err_out; - } - - if (!size) { - proxy_marshal(out_iovec, PROXY_HDR_SZ, "d", retval); - retval = sizeof(retval); - } else { - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "s", &xattr); - } -err_out: - v9fs_string_free(&xattr); - v9fs_string_free(&path); - return retval; -} - -static void stat_to_prstat(ProxyStat *pr_stat, struct stat *stat) -{ - memset(pr_stat, 0, sizeof(*pr_stat)); - pr_stat->st_dev = stat->st_dev; - pr_stat->st_ino = stat->st_ino; - pr_stat->st_nlink = stat->st_nlink; - pr_stat->st_mode = stat->st_mode; - pr_stat->st_uid = stat->st_uid; - pr_stat->st_gid = stat->st_gid; - pr_stat->st_rdev = stat->st_rdev; - pr_stat->st_size = stat->st_size; - pr_stat->st_blksize = stat->st_blksize; - pr_stat->st_blocks = stat->st_blocks; - pr_stat->st_atim_sec = stat->st_atim.tv_sec; - pr_stat->st_atim_nsec = stat->st_atim.tv_nsec; - pr_stat->st_mtim_sec = stat->st_mtim.tv_sec; - pr_stat->st_mtim_nsec = stat->st_mtim.tv_nsec; - pr_stat->st_ctim_sec = stat->st_ctim.tv_sec; - pr_stat->st_ctim_nsec = stat->st_ctim.tv_nsec; -} - -static void statfs_to_prstatfs(ProxyStatFS *pr_stfs, struct statfs *stfs) -{ - memset(pr_stfs, 0, sizeof(*pr_stfs)); - pr_stfs->f_type = stfs->f_type; - pr_stfs->f_bsize = stfs->f_bsize; - pr_stfs->f_blocks = stfs->f_blocks; - pr_stfs->f_bfree = stfs->f_bfree; - pr_stfs->f_bavail = stfs->f_bavail; - pr_stfs->f_files = stfs->f_files; - pr_stfs->f_ffree = stfs->f_ffree; - pr_stfs->f_fsid[0] = stfs->f_fsid.__val[0]; - pr_stfs->f_fsid[1] = stfs->f_fsid.__val[1]; - pr_stfs->f_namelen = stfs->f_namelen; - pr_stfs->f_frsize = stfs->f_frsize; -} - -/* - * Gets stat/statfs information and packs in out_iovec structure - * on success returns number of bytes packed in out_iovec structure - * otherwise returns -errno - */ -static int do_stat(int type, struct iovec *iovec, struct iovec *out_iovec) -{ - int retval; - V9fsString path; - ProxyStat pr_stat; - ProxyStatFS pr_stfs; - struct stat st_buf; - struct statfs stfs_buf; - - v9fs_string_init(&path); - retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "s", &path); - if (retval < 0) { - return retval; - } - - switch (type) { - case T_LSTAT: - retval = lstat(path.data, &st_buf); - if (retval < 0) { - retval = -errno; - } else { - stat_to_prstat(&pr_stat, &st_buf); - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, - "qqqdddqqqqqqqqqq", pr_stat.st_dev, - pr_stat.st_ino, pr_stat.st_nlink, - pr_stat.st_mode, pr_stat.st_uid, - pr_stat.st_gid, pr_stat.st_rdev, - pr_stat.st_size, pr_stat.st_blksize, - pr_stat.st_blocks, - pr_stat.st_atim_sec, pr_stat.st_atim_nsec, - pr_stat.st_mtim_sec, pr_stat.st_mtim_nsec, - pr_stat.st_ctim_sec, pr_stat.st_ctim_nsec); - } - break; - case T_STATFS: - retval = statfs(path.data, &stfs_buf); - if (retval < 0) { - retval = -errno; - } else { - statfs_to_prstatfs(&pr_stfs, &stfs_buf); - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, - "qqqqqqqqqqq", pr_stfs.f_type, - pr_stfs.f_bsize, pr_stfs.f_blocks, - pr_stfs.f_bfree, pr_stfs.f_bavail, - pr_stfs.f_files, pr_stfs.f_ffree, - pr_stfs.f_fsid[0], pr_stfs.f_fsid[1], - pr_stfs.f_namelen, pr_stfs.f_frsize); - } - break; - } - v9fs_string_free(&path); - return retval; -} - -static int do_readlink(struct iovec *iovec, struct iovec *out_iovec) -{ - char *buffer; - int size, retval; - V9fsString target, path; - - v9fs_string_init(&path); - retval = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sd", &path, &size); - if (retval < 0) { - v9fs_string_free(&path); - return retval; - } - buffer = g_malloc(size); - v9fs_string_init(&target); - retval = readlink(path.data, buffer, size - 1); - if (retval > 0) { - buffer[retval] = '\0'; - v9fs_string_sprintf(&target, "%s", buffer); - retval = proxy_marshal(out_iovec, PROXY_HDR_SZ, "s", &target); - } else { - retval = -errno; - } - g_free(buffer); - v9fs_string_free(&target); - v9fs_string_free(&path); - return retval; -} - -/* - * create other filesystem objects and send 0 on success - * return -errno on error - */ -static int do_create_others(int type, struct iovec *iovec) -{ - dev_t rdev; - int retval = 0; - int offset = PROXY_HDR_SZ; - V9fsString oldpath, path; - int mode, uid, gid, cur_uid, cur_gid; - - v9fs_string_init(&path); - v9fs_string_init(&oldpath); - - retval = proxy_unmarshal(iovec, offset, "dd", &uid, &gid); - if (retval < 0) { - return retval; - } - offset += retval; - retval = setugid(uid, gid, &cur_uid, &cur_gid); - if (retval < 0) { - goto unmarshal_err_out; - } - switch (type) { - case T_MKNOD: - retval = proxy_unmarshal(iovec, offset, "sdq", &path, &mode, &rdev); - if (retval < 0) { - goto err_out; - } - retval = mknod(path.data, mode, rdev); - break; - case T_MKDIR: - retval = proxy_unmarshal(iovec, offset, "sd", &path, &mode); - if (retval < 0) { - goto err_out; - } - retval = g_mkdir(path.data, mode); - break; - case T_SYMLINK: - retval = proxy_unmarshal(iovec, offset, "ss", &oldpath, &path); - if (retval < 0) { - goto err_out; - } - retval = symlink(oldpath.data, path.data); - break; - } - if (retval < 0) { - retval = -errno; - } - -err_out: - resetugid(cur_uid, cur_gid); -unmarshal_err_out: - v9fs_string_free(&path); - v9fs_string_free(&oldpath); - return retval; -} - -/* - * create a file and send fd on success - * return -errno on error - */ -static int do_create(struct iovec *iovec) -{ - int ret; - V9fsString path; - int flags, mode, uid, gid, cur_uid, cur_gid; - - v9fs_string_init(&path); - ret = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sdddd", - &path, &flags, &mode, &uid, &gid); - if (ret < 0) { - goto unmarshal_err_out; - } - ret = setugid(uid, gid, &cur_uid, &cur_gid); - if (ret < 0) { - goto unmarshal_err_out; - } - ret = open_regular(path.data, flags, mode); - if (ret < 0) { - ret = -errno; - } - - resetugid(cur_uid, cur_gid); -unmarshal_err_out: - v9fs_string_free(&path); - return ret; -} - -/* - * open a file and send fd on success - * return -errno on error - */ -static int do_open(struct iovec *iovec) -{ - int flags, ret; - V9fsString path; - - v9fs_string_init(&path); - ret = proxy_unmarshal(iovec, PROXY_HDR_SZ, "sd", &path, &flags); - if (ret < 0) { - goto err_out; - } - ret = open_regular(path.data, flags, 0); - if (ret < 0) { - ret = -errno; - } -err_out: - v9fs_string_free(&path); - return ret; -} - -/* create unix domain socket and return the descriptor */ -static int proxy_socket(const char *path, uid_t uid, gid_t gid) -{ - int sock, client; - struct sockaddr_un proxy, qemu; - socklen_t size; - - /* requested socket already exists, refuse to start */ - if (!access(path, F_OK)) { - do_log(LOG_CRIT, "socket already exists\n"); - return -1; - } - - if (strlen(path) >= sizeof(proxy.sun_path)) { - do_log(LOG_CRIT, "UNIX domain socket path exceeds %zu characters\n", - sizeof(proxy.sun_path)); - return -1; - } - - sock = socket(AF_UNIX, SOCK_STREAM, 0); - if (sock < 0) { - do_perror("socket"); - return -1; - } - - /* mask other part of mode bits */ - umask(7); - - proxy.sun_family = AF_UNIX; - strcpy(proxy.sun_path, path); - if (bind(sock, (struct sockaddr *)&proxy, - sizeof(struct sockaddr_un)) < 0) { - do_perror("bind"); - goto error; - } - if (chown(proxy.sun_path, uid, gid) < 0) { - do_perror("chown"); - goto error; - } - if (listen(sock, 1) < 0) { - do_perror("listen"); - goto error; - } - - size = sizeof(qemu); - client = accept(sock, (struct sockaddr *)&qemu, &size); - if (client < 0) { - do_perror("accept"); - goto error; - } - close(sock); - return client; - -error: - close(sock); - return -1; -} - -static void usage(void) -{ - fprintf(stderr, "usage: %s\n" - " -p|--path 9p path to export\n" - " {-f|--fd } socket file descriptor to be used\n" - " {-s|--socket socket file used for communication\n" - " \t-u|--uid -g|--gid } - uid:gid combination to give " - " access to this socket\n" - " \tNote: -s & -f can not be used together\n" - " [-n|--nodaemon] Run as a normal program\n", - prog_name); -} - -static int process_reply(int sock, int type, - struct iovec *out_iovec, int retval) -{ - switch (type) { - case T_OPEN: - case T_CREATE: - if (send_fd(sock, retval) < 0) { - return -1; - } - break; - case T_MKNOD: - case T_MKDIR: - case T_SYMLINK: - case T_LINK: - case T_CHMOD: - case T_CHOWN: - case T_TRUNCATE: - case T_UTIME: - case T_RENAME: - case T_REMOVE: - case T_LSETXATTR: - case T_LREMOVEXATTR: - if (send_status(sock, out_iovec, retval) < 0) { - return -1; - } - break; - case T_LSTAT: - case T_STATFS: - case T_READLINK: - case T_LGETXATTR: - case T_LLISTXATTR: - case T_GETVERSION: - if (send_response(sock, out_iovec, retval) < 0) { - return -1; - } - break; - default: - return -1; - break; - } - return 0; -} - -static int process_requests(int sock) -{ - int flags; - int size = 0; - int retval = 0; - uint64_t offset; - ProxyHeader header; - int mode, uid, gid; - V9fsString name, value; - struct timespec spec[2]; - V9fsString oldpath, path; - struct iovec in_iovec, out_iovec; - - in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); - in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; - out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); - out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; - - while (1) { - /* - * initialize the header type, so that we send - * response to proper request type. - */ - header.type = 0; - retval = read_request(sock, &in_iovec, &header); - if (retval < 0) { - goto err_out; - } - - switch (header.type) { - case T_OPEN: - retval = do_open(&in_iovec); - break; - case T_CREATE: - retval = do_create(&in_iovec); - break; - case T_MKNOD: - case T_MKDIR: - case T_SYMLINK: - retval = do_create_others(header.type, &in_iovec); - break; - case T_LINK: - v9fs_string_init(&path); - v9fs_string_init(&oldpath); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, - "ss", &oldpath, &path); - if (retval > 0) { - retval = link(oldpath.data, path.data); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&oldpath); - v9fs_string_free(&path); - break; - case T_LSTAT: - case T_STATFS: - retval = do_stat(header.type, &in_iovec, &out_iovec); - break; - case T_READLINK: - retval = do_readlink(&in_iovec, &out_iovec); - break; - case T_CHMOD: - v9fs_string_init(&path); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, - "sd", &path, &mode); - if (retval > 0) { - retval = chmod(path.data, mode); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - break; - case T_CHOWN: - v9fs_string_init(&path); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sdd", &path, - &uid, &gid); - if (retval > 0) { - retval = lchown(path.data, uid, gid); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - break; - case T_TRUNCATE: - v9fs_string_init(&path); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sq", - &path, &offset); - if (retval > 0) { - retval = truncate(path.data, offset); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - break; - case T_UTIME: - v9fs_string_init(&path); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sqqqq", &path, - &spec[0].tv_sec, &spec[0].tv_nsec, - &spec[1].tv_sec, &spec[1].tv_nsec); - if (retval > 0) { - retval = utimensat(AT_FDCWD, path.data, spec, - AT_SYMLINK_NOFOLLOW); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - break; - case T_RENAME: - v9fs_string_init(&path); - v9fs_string_init(&oldpath); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, - "ss", &oldpath, &path); - if (retval > 0) { - retval = rename(oldpath.data, path.data); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&oldpath); - v9fs_string_free(&path); - break; - case T_REMOVE: - v9fs_string_init(&path); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "s", &path); - if (retval > 0) { - retval = remove(path.data); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - break; - case T_LGETXATTR: - case T_LLISTXATTR: - retval = do_getxattr(header.type, &in_iovec, &out_iovec); - break; - case T_LSETXATTR: - v9fs_string_init(&path); - v9fs_string_init(&name); - v9fs_string_init(&value); - retval = proxy_unmarshal(&in_iovec, PROXY_HDR_SZ, "sssdd", &path, - &name, &value, &size, &flags); - if (retval > 0) { - retval = lsetxattr(path.data, - name.data, value.data, size, flags); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - v9fs_string_free(&name); - v9fs_string_free(&value); - break; - case T_LREMOVEXATTR: - v9fs_string_init(&path); - v9fs_string_init(&name); - retval = proxy_unmarshal(&in_iovec, - PROXY_HDR_SZ, "ss", &path, &name); - if (retval > 0) { - retval = lremovexattr(path.data, name.data); - if (retval < 0) { - retval = -errno; - } - } - v9fs_string_free(&path); - v9fs_string_free(&name); - break; - case T_GETVERSION: - retval = do_getversion(&in_iovec, &out_iovec); - break; - default: - goto err_out; - break; - } - - if (process_reply(sock, header.type, &out_iovec, retval) < 0) { - goto err_out; - } - } -err_out: - g_free(in_iovec.iov_base); - g_free(out_iovec.iov_base); - return -1; -} - -int main(int argc, char **argv) -{ - int sock; - uid_t own_u; - gid_t own_g; - char *rpath = NULL; - char *sock_name = NULL; - struct stat stbuf; - int c, option_index; -#ifdef FS_IOC_GETVERSION - int retval; - struct statfs st_fs; -#endif - - fprintf(stderr, "NOTE: The 9p 'proxy' backend is deprecated (since " - "QEMU 8.1) and will be removed in a future version of " - "QEMU!\n"); - - prog_name = g_path_get_basename(argv[0]); - - is_daemon = true; - sock = -1; - own_u = own_g = -1; - while (1) { - option_index = 0; - c = getopt_long(argc, argv, "p:nh?f:s:u:g:", helper_opts, - &option_index); - if (c == -1) { - break; - } - switch (c) { - case 'p': - rpath = g_strdup(optarg); - break; - case 'n': - is_daemon = false; - break; - case 'f': - sock = atoi(optarg); - break; - case 's': - sock_name = g_strdup(optarg); - break; - case 'u': - own_u = atoi(optarg); - break; - case 'g': - own_g = atoi(optarg); - break; - case '?': - case 'h': - default: - usage(); - exit(EXIT_FAILURE); - } - } - - /* Parameter validation */ - if ((sock_name == NULL && sock == -1) || rpath == NULL) { - fprintf(stderr, "socket, socket descriptor or path not specified\n"); - usage(); - return -1; - } - - if (sock_name && sock != -1) { - fprintf(stderr, "both named socket and socket descriptor specified\n"); - usage(); - exit(EXIT_FAILURE); - } - - if (sock_name && (own_u == -1 || own_g == -1)) { - fprintf(stderr, "owner uid:gid not specified, "); - fprintf(stderr, - "owner uid:gid specifies who can access the socket file\n"); - usage(); - exit(EXIT_FAILURE); - } - - if (lstat(rpath, &stbuf) < 0) { - fprintf(stderr, "invalid path \"%s\" specified, %s\n", - rpath, strerror(errno)); - exit(EXIT_FAILURE); - } - - if (!S_ISDIR(stbuf.st_mode)) { - fprintf(stderr, "specified path \"%s\" is not directory\n", rpath); - exit(EXIT_FAILURE); - } - - if (is_daemon) { - if (daemon(0, 0) < 0) { - fprintf(stderr, "daemon call failed\n"); - exit(EXIT_FAILURE); - } - openlog(PROGNAME, LOG_PID, LOG_DAEMON); - } - - do_log(LOG_INFO, "Started\n"); - if (sock_name) { - sock = proxy_socket(sock_name, own_u, own_g); - if (sock < 0) { - goto error; - } - } - - if (chroot(rpath) < 0) { - do_perror("chroot"); - goto error; - } - if (chdir("/") < 0) { - do_perror("chdir"); - goto error; - } - - get_version = false; -#ifdef FS_IOC_GETVERSION - /* check whether underlying FS support IOC_GETVERSION */ - retval = statfs("/", &st_fs); - if (!retval) { - switch (st_fs.f_type) { - case EXT2_SUPER_MAGIC: - case BTRFS_SUPER_MAGIC: - case REISERFS_SUPER_MAGIC: - case XFS_SUPER_MAGIC: - get_version = true; - break; - } - } -#endif - - umask(0); - if (init_capabilities() < 0) { - goto error; - } - - process_requests(sock); -error: - g_free(rpath); - g_free(sock_name); - do_log(LOG_INFO, "Done\n"); - closelog(); - return 0; -} diff --git a/gdb-xml/aarch64-core.xml b/gdb-xml/aarch64-core.xml index e1e9dc3f913..b8046510b9a 100644 --- a/gdb-xml/aarch64-core.xml +++ b/gdb-xml/aarch64-core.xml @@ -1,5 +1,5 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/alpha-core.xml b/gdb-xml/alpha-core.xml new file mode 100644 index 00000000000..c9e12f4ffdd --- /dev/null +++ b/gdb-xml/alpha-core.xml @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/hexagon-core.xml b/gdb-xml/hexagon-core.xml index e181163cff0..b94378112a0 100644 --- a/gdb-xml/hexagon-core.xml +++ b/gdb-xml/hexagon-core.xml @@ -1,6 +1,6 @@ + + + + + diff --git a/gdb-xml/i386-64bit-linux.xml b/gdb-xml/i386-64bit-linux.xml new file mode 100644 index 00000000000..0f26990d2f7 --- /dev/null +++ b/gdb-xml/i386-64bit-linux.xml @@ -0,0 +1,11 @@ + + + + + + + diff --git a/gdb-xml/sparc64-core.xml b/gdb-xml/sparc64-core.xml new file mode 100644 index 00000000000..375b9bb0cc6 --- /dev/null +++ b/gdb-xml/sparc64-core.xml @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdbstub/gdbstub.c b/gdbstub/gdbstub.c index d08568cea0e..dd5fb5667cc 100644 --- a/gdbstub/gdbstub.c +++ b/gdbstub/gdbstub.c @@ -20,7 +20,7 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #include "qemu/osdep.h" @@ -28,6 +28,7 @@ #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/error-report.h" +#include "qemu/target-info.h" #include "trace.h" #include "exec/gdbstub.h" #include "gdbstub/commands.h" @@ -41,8 +42,8 @@ #endif #include "hw/core/cpu.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" +#include "system/hw_accel.h" +#include "system/runstate.h" #include "exec/replay-core.h" #include "exec/hwaddr.h" @@ -354,7 +355,6 @@ static const char *get_feature_xml(const char *p, const char **newp, GDBProcess *process) { CPUState *cpu = gdb_get_first_cpu_in_process(process); - CPUClass *cc = CPU_GET_CLASS(cpu); GDBRegisterState *r; size_t len; @@ -377,11 +377,11 @@ static const char *get_feature_xml(const char *p, const char **newp, "" "")); - if (cc->gdb_arch_name) { + if (cpu->cc->gdb_arch_name) { g_ptr_array_add( xml, g_markup_printf_escaped("%s", - cc->gdb_arch_name(cpu))); + cpu->cc->gdb_arch_name(cpu))); } for (guint i = 0; i < cpu->gdb_regs->len; i++) { r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i); @@ -520,11 +520,10 @@ GArray *gdb_get_register_list(CPUState *cpu) int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) { - CPUClass *cc = CPU_GET_CLASS(cpu); GDBRegisterState *r; - if (reg < cc->gdb_num_core_regs) { - return cc->gdb_read_register(cpu, buf, reg); + if (reg < cpu->cc->gdb_num_core_regs) { + return cpu->cc->gdb_read_register(cpu, buf, reg); } for (guint i = 0; i < cpu->gdb_regs->len; i++) { @@ -536,13 +535,12 @@ int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) return 0; } -static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) +int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) { - CPUClass *cc = CPU_GET_CLASS(cpu); GDBRegisterState *r; - if (reg < cc->gdb_num_core_regs) { - return cc->gdb_write_register(cpu, mem_buf, reg); + if (reg < cpu->cc->gdb_num_core_regs) { + return cpu->cc->gdb_write_register(cpu, mem_buf, reg); } for (guint i = 0; i < cpu->gdb_regs->len; i++) { @@ -568,15 +566,30 @@ static void gdb_register_feature(CPUState *cpu, int base_reg, g_array_append_val(cpu->gdb_regs, s); } +static const char *gdb_get_core_xml_file(CPUState *cpu) +{ + CPUClass *cc = cpu->cc; + + /* + * The CPU class can provide the XML filename via a method, + * or as a simple fixed string field. + */ + if (cc->gdb_get_core_xml_file) { + return cc->gdb_get_core_xml_file(cpu); + } + return cc->gdb_core_xml_file; +} + void gdb_init_cpu(CPUState *cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); + CPUClass *cc = cpu->cc; const GDBFeature *feature; + const char *xmlfile = gdb_get_core_xml_file(cpu); cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState)); - if (cc->gdb_core_xml_file) { - feature = gdb_find_static_feature(cc->gdb_core_xml_file); + if (xmlfile) { + feature = gdb_find_static_feature(xmlfile); gdb_register_feature(cpu, 0, cc->gdb_read_register, cc->gdb_write_register, feature); @@ -1331,8 +1344,8 @@ static void handle_read_all_regs(GArray *params, void *user_ctx) len += gdb_read_register(gdbserver_state.g_cpu, gdbserver_state.mem_buf, reg_id); + g_assert(len == gdbserver_state.mem_buf->len); } - g_assert(len == gdbserver_state.mem_buf->len); gdb_memtohex(gdbserver_state.str_buf, gdbserver_state.mem_buf->data, len); gdb_put_strbuf(); @@ -1585,6 +1598,18 @@ static void handle_query_threads(GArray *params, void *user_ctx) gdbserver_state.query_cpu = gdb_next_attached_cpu(gdbserver_state.query_cpu); } +static void handle_query_gdb_server_version(GArray *params, void *user_ctx) +{ +#if defined(CONFIG_USER_ONLY) + g_string_printf(gdbserver_state.str_buf, "name:qemu-%s;version:%s;", + target_name(), QEMU_VERSION); +#else + g_string_printf(gdbserver_state.str_buf, "name:qemu-system-%s;version:%s;", + target_name(), QEMU_VERSION); +#endif + gdb_put_strbuf(); +} + static void handle_query_first_threads(GArray *params, void *user_ctx) { gdbserver_state.query_cpu = gdb_first_attached_cpu(); @@ -1646,11 +1671,8 @@ void gdb_extend_qsupported_features(char *qflags) static void handle_query_supported(GArray *params, void *user_ctx) { - CPUClass *cc; - g_string_printf(gdbserver_state.str_buf, "PacketSize=%x", MAX_PACKET_LENGTH); - cc = CPU_GET_CLASS(first_cpu); - if (cc->gdb_core_xml_file) { + if (gdb_get_core_xml_file(first_cpu)) { g_string_append(gdbserver_state.str_buf, ";qXfer:features:read+"); } @@ -1697,7 +1719,6 @@ static void handle_query_supported(GArray *params, void *user_ctx) static void handle_query_xfer_features(GArray *params, void *user_ctx) { GDBProcess *process; - CPUClass *cc; unsigned long len, total_len, addr; const char *xml; const char *p; @@ -1708,8 +1729,7 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx) } process = gdb_get_cpu_process(gdbserver_state.g_cpu); - cc = CPU_GET_CLASS(gdbserver_state.g_cpu); - if (!cc->gdb_core_xml_file) { + if (!gdb_get_core_xml_file(gdbserver_state.g_cpu)) { gdb_put_packet(""); return; } @@ -1835,6 +1855,10 @@ static const GdbCmdParseEntry gdb_gen_query_table[] = { .handler = handle_query_threads, .cmd = "sThreadInfo", }, + { + .handler = handle_query_gdb_server_version, + .cmd = "GDBServerVersion", + }, { .handler = handle_query_first_threads, .cmd = "fThreadInfo", diff --git a/gdbstub/meson.build b/gdbstub/meson.build index dff741ddd4d..15c666f5752 100644 --- a/gdbstub/meson.build +++ b/gdbstub/meson.build @@ -4,34 +4,18 @@ # types such as hwaddr. # -# We need to build the core gdb code via a library to be able to tweak -# cflags so: - -gdb_user_ss = ss.source_set() -gdb_system_ss = ss.source_set() - # We build two versions of gdbstub, one for each mode -gdb_user_ss.add(files('gdbstub.c', 'user.c')) -gdb_system_ss.add(files('gdbstub.c', 'system.c')) - -gdb_user_ss = gdb_user_ss.apply({}) -gdb_system_ss = gdb_system_ss.apply({}) - -libgdb_user = static_library('gdb_user', - gdb_user_ss.sources() + genh, - c_args: '-DCONFIG_USER_ONLY', - build_by_default: false) - -libgdb_system = static_library('gdb_system', - gdb_system_ss.sources() + genh, - build_by_default: false) - -gdb_user = declare_dependency(objects: libgdb_user.extract_all_objects(recursive: false)) -user_ss.add(gdb_user) -gdb_system = declare_dependency(objects: libgdb_system.extract_all_objects(recursive: false)) -system_ss.add(gdb_system) - -common_ss.add(files('syscalls.c')) +user_ss.add(files( + 'gdbstub.c', + 'syscalls.c', + 'user.c' +)) + +system_ss.add(files( + 'gdbstub.c', + 'syscalls.c', + 'system.c' +)) # The user-target is specialised by the guest specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-target.c')) diff --git a/gdbstub/syscalls.c b/gdbstub/syscalls.c index 4e1295b782d..e855df21aba 100644 --- a/gdbstub/syscalls.c +++ b/gdbstub/syscalls.c @@ -7,13 +7,13 @@ * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2023 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "semihosting/semihost.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "gdbstub/user.h" #include "gdbstub/syscalls.h" #include "gdbstub/commands.h" diff --git a/gdbstub/system.c b/gdbstub/system.c index 1ad87fe7fdf..5be0d3c58ce 100644 --- a/gdbstub/system.c +++ b/gdbstub/system.c @@ -7,7 +7,7 @@ * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #include "qemu/osdep.h" @@ -19,9 +19,12 @@ #include "gdbstub/commands.h" #include "exec/hwaddr.h" #include "exec/tb-flush.h" -#include "sysemu/cpus.h" -#include "sysemu/runstate.h" -#include "sysemu/replay.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" +#include "system/runstate.h" +#include "system/replay.h" +#include "system/tcg.h" #include "hw/core/cpu.h" #include "hw/cpu/cluster.h" #include "hw/boards.h" @@ -171,7 +174,9 @@ static void gdb_vm_state_change(void *opaque, bool running, RunState state) } else { trace_gdbstub_hit_break(); } - tb_flush(cpu); + if (tcg_enabled()) { + tb_flush(cpu); + } ret = GDB_SIGNAL_TRAP; break; case RUN_STATE_PAUSED: @@ -239,7 +244,7 @@ static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend, *be_opened = false; } -static void char_gdb_class_init(ObjectClass *oc, void *data) +static void char_gdb_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -330,26 +335,27 @@ static void create_processes(GDBState *s) gdb_create_default_process(s); } -int gdbserver_start(const char *device) +bool gdbserver_start(const char *device, Error **errp) { Chardev *chr = NULL; Chardev *mon_chr; g_autoptr(GString) cs = g_string_new(device); if (!first_cpu) { - error_report("gdbstub: meaningless to attach gdb to a " - "machine without any CPU."); - return -1; + error_setg(errp, "gdbstub: meaningless to attach gdb to a " + "machine without any CPU."); + return false; } if (!gdb_supports_guest_debug()) { - error_report("gdbstub: current accelerator doesn't " - "support guest debugging"); - return -1; + error_setg(errp, "gdbstub: current accelerator doesn't " + "support guest debugging"); + return false; } if (cs->len == 0) { - return -1; + error_setg(errp, "gdbstub: missing connection string"); + return false; } trace_gdbstub_op_start(cs->str); @@ -374,7 +380,8 @@ int gdbserver_start(const char *device) */ chr = qemu_chr_new_noreplay("gdb", cs->str, true, NULL); if (!chr) { - return -1; + error_setg(errp, "gdbstub: couldn't create chardev"); + return false; } } @@ -406,7 +413,7 @@ int gdbserver_start(const char *device) gdbserver_system_state.mon_chr = mon_chr; gdb_syscall_reset(); - return 0; + return true; } static void register_types(void) @@ -450,8 +457,6 @@ static int phy_memory_mode; int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, uint8_t *buf, int len, bool is_write) { - CPUClass *cc; - if (phy_memory_mode) { if (is_write) { cpu_physical_memory_write(addr, buf, len); @@ -461,9 +466,8 @@ int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, return 0; } - cc = CPU_GET_CLASS(cpu); - if (cc->memory_rw_debug) { - return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + if (cpu->cc->memory_rw_debug) { + return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write); } return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); diff --git a/gdbstub/user-target.c b/gdbstub/user-target.c index b5e01fd8b00..43231e695e8 100644 --- a/gdbstub/user-target.c +++ b/gdbstub/user-target.c @@ -4,7 +4,7 @@ * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #include "qemu/osdep.h" @@ -233,10 +233,8 @@ void gdb_handle_query_offsets(GArray *params, void *user_ctx) static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr, uint8_t *buf, int len, bool is_write) { - CPUClass *cc; - cc = CPU_GET_CLASS(cpu); - if (cc->memory_rw_debug) { - return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + if (cpu->cc->memory_rw_debug) { + return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write); } return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); } @@ -317,9 +315,9 @@ void gdb_handle_v_file_open(GArray *params, void *user_ctx) int fd = open(filename, flags, mode); #endif if (fd < 0) { - g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno); + g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno); } else { - g_string_printf(gdbserver_state.str_buf, "F%d", fd); + g_string_printf(gdbserver_state.str_buf, "F%x", fd); } gdb_put_strbuf(); } @@ -329,7 +327,7 @@ void gdb_handle_v_file_close(GArray *params, void *user_ctx) int fd = gdb_get_cmd_param(params, 0)->val_ul; if (close(fd) == -1) { - g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno); + g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno); gdb_put_strbuf(); return; } @@ -352,7 +350,7 @@ void gdb_handle_v_file_pread(GArray *params, void *user_ctx) ssize_t n = pread(fd, buf, bufsiz, offset); if (n < 0) { - g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno); + g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno); gdb_put_strbuf(); return; } @@ -375,7 +373,7 @@ void gdb_handle_v_file_readlink(GArray *params, void *user_ctx) ssize_t n = readlink(filename, buf, BUFSIZ); #endif if (n < 0) { - g_string_printf(gdbserver_state.str_buf, "F-1,%d", errno); + g_string_printf(gdbserver_state.str_buf, "F-1,%x", errno); gdb_put_strbuf(); return; } diff --git a/gdbstub/user.c b/gdbstub/user.c index b36033bc7a2..67403e5a252 100644 --- a/gdbstub/user.c +++ b/gdbstub/user.c @@ -6,13 +6,14 @@ * Copyright (c) 2003-2005 Fabrice Bellard * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/cutils.h" #include "qemu/sockets.h" +#include "qapi/error.h" #include "exec/hwaddr.h" #include "exec/tb-flush.h" #include "exec/gdbstub.h" @@ -21,6 +22,7 @@ #include "gdbstub/user.h" #include "gdbstub/enums.h" #include "hw/core/cpu.h" +#include "user/signal.h" #include "trace.h" #include "internals.h" @@ -314,33 +316,20 @@ static bool gdb_accept_socket(int gdb_fd) return true; } -static int gdbserver_open_socket(const char *path) +static int gdbserver_open_socket(const char *path, Error **errp) { - struct sockaddr_un sockaddr = {}; - int fd, ret; + g_autoptr(GString) buf = g_string_new(""); + char *pid_placeholder; - fd = socket(AF_UNIX, SOCK_STREAM, 0); - if (fd < 0) { - perror("create socket"); - return -1; + pid_placeholder = strstr(path, "%d"); + if (pid_placeholder != NULL) { + g_string_append_len(buf, path, pid_placeholder - path); + g_string_append_printf(buf, "%d", qemu_get_thread_id()); + g_string_append(buf, pid_placeholder + 2); + path = buf->str; } - sockaddr.sun_family = AF_UNIX; - pstrcpy(sockaddr.sun_path, sizeof(sockaddr.sun_path) - 1, path); - ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); - if (ret < 0) { - perror("bind socket"); - close(fd); - return -1; - } - ret = listen(fd, 1); - if (ret < 0) { - perror("listen socket"); - close(fd); - return -1; - } - - return fd; + return unix_listen(path, errp); } static bool gdb_accept_tcp(int gdb_fd) @@ -372,14 +361,14 @@ static bool gdb_accept_tcp(int gdb_fd) return true; } -static int gdbserver_open_port(int port) +static int gdbserver_open_port(int port, Error **errp) { struct sockaddr_in sockaddr; int fd, ret; fd = socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { - perror("socket"); + error_setg_errno(errp, errno, "Failed to create socket"); return -1; } qemu_set_cloexec(fd); @@ -391,13 +380,13 @@ static int gdbserver_open_port(int port) sockaddr.sin_addr.s_addr = 0; ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); if (ret < 0) { - perror("bind"); + error_setg_errno(errp, errno, "Failed to bind socket"); close(fd); return -1; } ret = listen(fd, 1); if (ret < 0) { - perror("listen"); + error_setg_errno(errp, errno, "Failed to listen to socket"); close(fd); return -1; } @@ -405,31 +394,122 @@ static int gdbserver_open_port(int port) return fd; } -int gdbserver_start(const char *port_or_path) +static bool gdbserver_accept(int port, int gdb_fd, const char *path) { - int port = g_ascii_strtoull(port_or_path, NULL, 10); - int gdb_fd; + bool ret; if (port > 0) { - gdb_fd = gdbserver_open_port(port); + ret = gdb_accept_tcp(gdb_fd); } else { - gdb_fd = gdbserver_open_socket(port_or_path); + ret = gdb_accept_socket(gdb_fd); + if (ret) { + gdbserver_user_state.socket_path = g_strdup(path); + } } - if (gdb_fd < 0) { - return -1; + if (!ret) { + close(gdb_fd); + } + + return ret; +} + +struct { + int port; + int gdb_fd; + char *path; +} gdbserver_args; + +static void do_gdb_handlesig(CPUState *cs, run_on_cpu_data arg) +{ + int sig; + + sig = target_to_host_signal(gdb_handlesig(cs, 0, NULL, NULL, 0)); + if (sig >= 1 && sig < NSIG) { + qemu_kill_thread(gdb_get_cpu_index(cs), sig); + } +} + +static void *gdbserver_accept_thread(void *arg) +{ + if (gdbserver_accept(gdbserver_args.port, gdbserver_args.gdb_fd, + gdbserver_args.path)) { + CPUState *cs = first_cpu; + + async_safe_run_on_cpu(cs, do_gdb_handlesig, RUN_ON_CPU_NULL); + qemu_kill_thread(gdb_get_cpu_index(cs), host_interrupt_signal); + } + + g_free(gdbserver_args.path); + gdbserver_args.path = NULL; + + return NULL; +} + +#define USAGE "\nUsage: -g {port|path}[,suspend={y|n}]" + +bool gdbserver_start(const char *args, Error **errp) +{ + g_auto(GStrv) argv = g_strsplit(args, ",", 0); + const char *port_or_path = NULL; + bool suspend = true; + int gdb_fd, port; + GStrv arg; + + for (arg = argv; *arg; arg++) { + g_auto(GStrv) tokens = g_strsplit(*arg, "=", 2); + + if (g_strcmp0(tokens[0], "suspend") == 0) { + if (tokens[1] == NULL) { + error_setg(errp, + "gdbstub: missing \"suspend\" option value" USAGE); + return false; + } else if (!qapi_bool_parse(tokens[0], tokens[1], + &suspend, errp)) { + return false; + } + } else { + if (port_or_path) { + error_setg(errp, "gdbstub: unknown option \"%s\"" USAGE, *arg); + return false; + } + port_or_path = *arg; + } + } + if (!port_or_path) { + error_setg(errp, "gdbstub: port or path not specified" USAGE); + return false; } - if (port > 0 && gdb_accept_tcp(gdb_fd)) { - return 0; - } else if (gdb_accept_socket(gdb_fd)) { - gdbserver_user_state.socket_path = g_strdup(port_or_path); - return 0; + port = g_ascii_strtoull(port_or_path, NULL, 10); + if (port > 0) { + gdb_fd = gdbserver_open_port(port, errp); + } else { + gdb_fd = gdbserver_open_socket(port_or_path, errp); + } + if (gdb_fd < 0) { + return false; } - /* gone wrong */ - close(gdb_fd); - return -1; + if (suspend) { + if (gdbserver_accept(port, gdb_fd, port_or_path)) { + gdb_handlesig(first_cpu, 0, NULL, NULL, 0); + return true; + } else { + error_setg(errp, "gdbstub: failed to accept connection"); + return false; + } + } else { + QemuThread thread; + + gdbserver_args.port = port; + gdbserver_args.gdb_fd = gdb_fd; + gdbserver_args.path = g_strdup(port_or_path); + qemu_thread_create(&thread, "gdb-accept", + &gdbserver_accept_thread, NULL, + QEMU_THREAD_DETACHED); + return true; + } } void gdbserver_fork_start(void) @@ -663,11 +743,8 @@ int gdb_continue_partial(char *newstates) int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr, uint8_t *buf, int len, bool is_write) { - CPUClass *cc; - - cc = CPU_GET_CLASS(cpu); - if (cc->memory_rw_debug) { - return cc->memory_rw_debug(cpu, addr, buf, len, is_write); + if (cpu->cc->memory_rw_debug) { + return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write); } return cpu_memory_rw_debug(cpu, addr, buf, len, is_write); } diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index c59cd6637b9..6142f60e7b1 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -256,20 +256,6 @@ SRST Show dynamic compiler info. ERST -#if defined(CONFIG_TCG) - { - .name = "opcount", - .args_type = "", - .params = "", - .help = "show dynamic compiler opcode counters", - }, -#endif - -SRST - ``info opcount`` - Show dynamic compiler opcode counters -ERST - { .name = "sync-profile", .args_type = "mean:-m,no_coalesce:-n,max:i?", @@ -281,6 +267,18 @@ ERST .cmd = hmp_info_sync_profile, }, + { + .name = "accel", + .args_type = "", + .params = "", + .help = "show accelerator info", + }, + +SRST + ``info accel`` + Show accelerator info. +ERST + SRST ``info sync-profile [-m|-n]`` [*max*] Show synchronization profiling info, up to *max* entries (default: 10), @@ -475,9 +473,9 @@ ERST { .name = "migrate", - .args_type = "", - .params = "", - .help = "show migration status", + .args_type = "all:-a", + .params = "[-a]", + .help = "show migration status (-a: all, dump all status)", .cmd = hmp_info_migrate, }, diff --git a/hmp-commands.hx b/hmp-commands.hx index 06746f0afc3..d0e4f35a30a 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1287,6 +1287,9 @@ ERST .name = "netdev_add", .args_type = "netdev:O", .params = "[user|tap|socket|stream|dgram|vde|bridge|hubport|netmap|vhost-user" +#ifdef CONFIG_PASST + "|passt" +#endif #ifdef CONFIG_AF_XDP "|af-xdp" #endif diff --git a/host/include/aarch64/host/atomic128-cas.h b/host/include/aarch64/host/atomic128-cas.h index 58630107bcc..991da4ef543 100644 --- a/host/include/aarch64/host/atomic128-cas.h +++ b/host/include/aarch64/host/atomic128-cas.h @@ -13,7 +13,7 @@ /* Through gcc 10, aarch64 has no support for 128-bit atomics. */ #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) -#include "host/include/generic/host/atomic128-cas.h" +#include "host/include/generic/host/atomic128-cas.h.inc" #else static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) { diff --git a/host/include/aarch64/host/atomic128-ldst.h b/host/include/aarch64/host/atomic128-ldst.h.inc similarity index 100% rename from host/include/aarch64/host/atomic128-ldst.h rename to host/include/aarch64/host/atomic128-ldst.h.inc diff --git a/host/include/generic/host/atomic128-cas.h b/host/include/generic/host/atomic128-cas.h.inc similarity index 100% rename from host/include/generic/host/atomic128-cas.h rename to host/include/generic/host/atomic128-cas.h.inc diff --git a/host/include/generic/host/atomic128-ldst.h b/host/include/generic/host/atomic128-ldst.h.inc similarity index 100% rename from host/include/generic/host/atomic128-ldst.h rename to host/include/generic/host/atomic128-ldst.h.inc diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h index 81771733eaa..9541a64da61 100644 --- a/host/include/i386/host/cpuinfo.h +++ b/host/include/i386/host/cpuinfo.h @@ -9,6 +9,7 @@ /* Digested version of */ #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ +#define CPUINFO_OSXSAVE (1u << 1) #define CPUINFO_MOVBE (1u << 2) #define CPUINFO_LZCNT (1u << 3) #define CPUINFO_POPCNT (1u << 4) diff --git a/host/include/loongarch64/host/atomic128-ldst.h b/host/include/loongarch64/host/atomic128-ldst.h deleted file mode 100644 index 9a4a8f8b9e3..00000000000 --- a/host/include/loongarch64/host/atomic128-ldst.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0-or-later - * Load/store for 128-bit atomic operations, LoongArch version. - * - * See docs/devel/atomics.rst for discussion about the guarantees each - * atomic primitive is meant to provide. - */ - -#ifndef LOONGARCH_ATOMIC128_LDST_H -#define LOONGARCH_ATOMIC128_LDST_H - -#include "host/cpuinfo.h" -#include "tcg/debug-assert.h" - -#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_LSX) -#define HAVE_ATOMIC128_RW HAVE_ATOMIC128_RO - -/* - * As of gcc 13 and clang 16, there is no compiler support for LSX at all. - * Use inline assembly throughout. - */ - -static inline Int128 atomic16_read_ro(const Int128 *ptr) -{ - uint64_t l, h; - - tcg_debug_assert(HAVE_ATOMIC128_RO); - asm("vld $vr0, %2, 0\n\t" - "vpickve2gr.d %0, $vr0, 0\n\t" - "vpickve2gr.d %1, $vr0, 1" - : "=r"(l), "=r"(h) : "r"(ptr), "m"(*ptr) : "f0"); - - return int128_make128(l, h); -} - -static inline Int128 atomic16_read_rw(Int128 *ptr) -{ - return atomic16_read_ro(ptr); -} - -static inline void atomic16_set(Int128 *ptr, Int128 val) -{ - uint64_t l = int128_getlo(val), h = int128_gethi(val); - - tcg_debug_assert(HAVE_ATOMIC128_RW); - asm("vinsgr2vr.d $vr0, %1, 0\n\t" - "vinsgr2vr.d $vr0, %2, 1\n\t" - "vst $vr0, %3, 0" - : "=m"(*ptr) : "r"(l), "r"(h), "r"(ptr) : "f0"); -} - -#endif /* LOONGARCH_ATOMIC128_LDST_H */ diff --git a/host/include/loongarch64/host/atomic128-ldst.h.inc b/host/include/loongarch64/host/atomic128-ldst.h.inc new file mode 100644 index 00000000000..754d2143f0c --- /dev/null +++ b/host/include/loongarch64/host/atomic128-ldst.h.inc @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Load/store for 128-bit atomic operations, LoongArch version. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef LOONGARCH_ATOMIC128_LDST_H +#define LOONGARCH_ATOMIC128_LDST_H + +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" + +#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_LSX) +#define HAVE_ATOMIC128_RW HAVE_ATOMIC128_RO + +/* + * As of gcc 13 and clang 16, there is no compiler support for LSX at all. + * Use inline assembly throughout. + */ + +static inline Int128 atomic16_read_ro(const Int128 *ptr) +{ + uint64_t l, h; + + tcg_debug_assert(HAVE_ATOMIC128_RO); + asm("vld $vr0, %2, 0\n\t" + "vpickve2gr.d %0, $vr0, 0\n\t" + "vpickve2gr.d %1, $vr0, 1" + : "=r"(l), "=r"(h) : "r"(ptr), "m"(*ptr) : "$f0"); + + return int128_make128(l, h); +} + +static inline Int128 atomic16_read_rw(Int128 *ptr) +{ + return atomic16_read_ro(ptr); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + uint64_t l = int128_getlo(val), h = int128_gethi(val); + + tcg_debug_assert(HAVE_ATOMIC128_RW); + asm("vinsgr2vr.d $vr0, %1, 0\n\t" + "vinsgr2vr.d $vr0, %2, 1\n\t" + "vst $vr0, %3, 0" + : "=m"(*ptr) : "r"(l), "r"(h), "r"(ptr) : "$f0"); +} + +#endif /* LOONGARCH_ATOMIC128_LDST_H */ diff --git a/host/include/loongarch64/host/bufferiszero.c.inc b/host/include/loongarch64/host/bufferiszero.c.inc index 69891eac803..bb2598fdc3f 100644 --- a/host/include/loongarch64/host/bufferiszero.c.inc +++ b/host/include/loongarch64/host/bufferiszero.c.inc @@ -61,7 +61,8 @@ static bool buffer_is_zero_lsx(const void *buf, size_t len) "2:" : "=&r"(ret), "+r"(p) : "r"(buf), "r"(e), "r"(l) - : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0"); + : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", + "$fcc0"); return ret; } @@ -119,7 +120,8 @@ static bool buffer_is_zero_lasx(const void *buf, size_t len) "3:" : "=&r"(ret), "+r"(p) : "r"(buf), "r"(e), "r"(l) - : "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "fcc0"); + : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", + "$fcc0"); return ret; } diff --git a/host/include/loongarch64/host/load-extract-al16-al8.h.inc b/host/include/loongarch64/host/load-extract-al16-al8.h.inc index d1fb59d8af6..9528521e7d7 100644 --- a/host/include/loongarch64/host/load-extract-al16-al8.h.inc +++ b/host/include/loongarch64/host/load-extract-al16-al8.h.inc @@ -31,7 +31,7 @@ static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s) asm("vld $vr0, %2, 0\n\t" "vpickve2gr.d %0, $vr0, 0\n\t" "vpickve2gr.d %1, $vr0, 1" - : "=r"(l), "=r"(h) : "r"(ptr_align), "m"(*ptr_align) : "f0"); + : "=r"(l), "=r"(h) : "r"(ptr_align), "m"(*ptr_align) : "$f0"); return (l >> shr) | (h << (-shr & 63)); } diff --git a/host/include/riscv/host/cpuinfo.h b/host/include/riscv/host/cpuinfo.h index 2b00660e362..b2b53dbf627 100644 --- a/host/include/riscv/host/cpuinfo.h +++ b/host/include/riscv/host/cpuinfo.h @@ -9,10 +9,13 @@ #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */ #define CPUINFO_ZBA (1u << 1) #define CPUINFO_ZBB (1u << 2) -#define CPUINFO_ZICOND (1u << 3) +#define CPUINFO_ZBS (1u << 3) +#define CPUINFO_ZICOND (1u << 4) +#define CPUINFO_ZVE64X (1u << 5) /* Initialized with a constructor. */ extern unsigned cpuinfo; +extern unsigned riscv_lg2_vlenb; /* * We cannot rely on constructor ordering, so other constructors must diff --git a/host/include/x86_64/host/atomic128-ldst.h b/host/include/x86_64/host/atomic128-ldst.h deleted file mode 100644 index 8d6f909d3c9..00000000000 --- a/host/include/x86_64/host/atomic128-ldst.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0-or-later - * Load/store for 128-bit atomic operations, x86_64 version. - * - * Copyright (C) 2023 Linaro, Ltd. - * - * See docs/devel/atomics.rst for discussion about the guarantees each - * atomic primitive is meant to provide. - */ - -#ifndef X86_64_ATOMIC128_LDST_H -#define X86_64_ATOMIC128_LDST_H - -#ifdef CONFIG_INT128_TYPE -#include "host/cpuinfo.h" -#include "tcg/debug-assert.h" -#include - -typedef union { - __m128i v; - __int128_t i; - Int128 s; -} X86Int128Union; - -/* - * Through clang 16, with -mcx16, __atomic_load_n is incorrectly - * expanded to a read-write operation: lock cmpxchg16b. - */ - -#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA) -#define HAVE_ATOMIC128_RW 1 - -static inline Int128 atomic16_read_ro(const Int128 *ptr) -{ - X86Int128Union r; - - tcg_debug_assert(HAVE_ATOMIC128_RO); - asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr)); - - return r.s; -} - -static inline Int128 atomic16_read_rw(Int128 *ptr) -{ - __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); - X86Int128Union r; - - if (HAVE_ATOMIC128_RO) { - asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr_align)); - } else { - r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0); - } - return r.s; -} - -static inline void atomic16_set(Int128 *ptr, Int128 val) -{ - __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); - X86Int128Union new = { .s = val }; - - if (HAVE_ATOMIC128_RO) { - asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.v)); - } else { - __int128_t old; - do { - old = *ptr_align; - } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i)); - } -} -#else -/* Provide QEMU_ERROR stubs. */ -#include "host/include/generic/host/atomic128-ldst.h" -#endif - -#endif /* X86_64_ATOMIC128_LDST_H */ diff --git a/host/include/x86_64/host/atomic128-ldst.h.inc b/host/include/x86_64/host/atomic128-ldst.h.inc new file mode 100644 index 00000000000..4c698e3246f --- /dev/null +++ b/host/include/x86_64/host/atomic128-ldst.h.inc @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Load/store for 128-bit atomic operations, x86_64 version. + * + * Copyright (C) 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef X86_64_ATOMIC128_LDST_H +#define X86_64_ATOMIC128_LDST_H + +#ifdef CONFIG_INT128_TYPE +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" +#include + +typedef union { + __m128i v; + __int128_t i; + Int128 s; +} X86Int128Union; + +/* + * Through clang 16, with -mcx16, __atomic_load_n is incorrectly + * expanded to a read-write operation: lock cmpxchg16b. + */ + +#define HAVE_ATOMIC128_RO likely(cpuinfo & CPUINFO_ATOMIC_VMOVDQA) +#define HAVE_ATOMIC128_RW 1 + +static inline Int128 atomic16_read_ro(const Int128 *ptr) +{ + X86Int128Union r; + + tcg_debug_assert(HAVE_ATOMIC128_RO); + asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr)); + + return r.s; +} + +static inline Int128 atomic16_read_rw(Int128 *ptr) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + X86Int128Union r; + + if (HAVE_ATOMIC128_RO) { + asm("vmovdqa %1, %0" : "=x" (r.v) : "m" (*ptr_align)); + } else { + r.i = __sync_val_compare_and_swap_16(ptr_align, 0, 0); + } + return r.s; +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + X86Int128Union new = { .s = val }; + + if (HAVE_ATOMIC128_RO) { + asm("vmovdqa %1, %0" : "=m"(*ptr_align) : "x" (new.v)); + } else { + __int128_t old; + do { + old = *ptr_align; + } while (!__sync_bool_compare_and_swap_16(ptr_align, old, new.i)); + } +} +#else +/* Provide QEMU_ERROR stubs. */ +#include "host/include/generic/host/atomic128-ldst.h.inc" +#endif + +#endif /* X86_64_ATOMIC128_LDST_H */ diff --git a/host/include/x86_64/host/load-extract-al16-al8.h.inc b/host/include/x86_64/host/load-extract-al16-al8.h.inc index baa506b7b5b..b837c378684 100644 --- a/host/include/x86_64/host/load-extract-al16-al8.h.inc +++ b/host/include/x86_64/host/load-extract-al16-al8.h.inc @@ -9,7 +9,7 @@ #define X86_64_LOAD_EXTRACT_AL16_AL8_H #ifdef CONFIG_INT128_TYPE -#include "host/atomic128-ldst.h" +#include "host/atomic128-ldst.h.inc" /** * load_atom_extract_al16_or_al8: diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c index 1b1f3b9ec81..31e216227cb 100644 --- a/hw/9pfs/9p-local.c +++ b/hw/9pfs/9p-local.c @@ -766,16 +766,19 @@ static int local_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, return err; } -static int local_fstat(FsContext *fs_ctx, int fid_type, - V9fsFidOpenState *fs, struct stat *stbuf) +static int local_fid_fd(int fid_type, V9fsFidOpenState *fs) { - int err, fd; - if (fid_type == P9_FID_DIR) { - fd = dirfd(fs->dir.stream); + return dirfd(fs->dir.stream); } else { - fd = fs->fd; + return fs->fd; } +} + +static int local_fstat(FsContext *fs_ctx, int fid_type, + V9fsFidOpenState *fs, struct stat *stbuf) +{ + int err, fd = local_fid_fd(fid_type, fs); err = fstat(fd, stbuf); if (err) { @@ -1039,6 +1042,14 @@ static int local_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) return ret; } +static int local_ftruncate(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, + off_t size) +{ + int fd = local_fid_fd(fid_type, fs); + + return ftruncate(fd, size); +} + static int local_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) { char *dirpath = g_path_get_dirname(fs_path->data); @@ -1089,6 +1100,14 @@ static int local_utimensat(FsContext *s, V9fsPath *fs_path, return ret; } +static int local_futimens(FsContext *s, int fid_type, V9fsFidOpenState *fs, + const struct timespec *times) +{ + int fd = local_fid_fd(fid_type, fs); + + return qemu_futimens(fd, times); +} + static int local_unlinkat_common(FsContext *ctx, int dirfd, const char *name, int flags) { @@ -1167,13 +1186,7 @@ static int local_remove(FsContext *ctx, const char *path) static int local_fsync(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, int datasync) { - int fd; - - if (fid_type == P9_FID_DIR) { - fd = dirfd(fs->dir.stream); - } else { - fd = fs->fd; - } + int fd = local_fid_fd(fid_type, fs); if (datasync) { return qemu_fdatasync(fd); @@ -1538,6 +1551,9 @@ static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp) "[remap|forbid|warn]\n"); return -1; } + } else { + fse->export_flags &= ~V9FS_FORBID_MULTIDEVS; + fse->export_flags |= V9FS_REMAP_INODES; } if (!path) { @@ -1572,6 +1588,13 @@ static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp) return 0; } +static bool local_has_valid_file_handle(int fid_type, V9fsFidOpenState *fs) +{ + return + (fid_type == P9_FID_FILE && fs->fd != -1) || + (fid_type == P9_FID_DIR && fs->dir.stream != NULL); +} + FileOperations local_ops = { .parse_opts = local_parse_opts, .init = local_init, @@ -1609,4 +1632,7 @@ FileOperations local_ops = { .name_to_path = local_name_to_path, .renameat = local_renameat, .unlinkat = local_unlinkat, + .has_valid_file_handle = local_has_valid_file_handle, + .ftruncate = local_ftruncate, + .futimens = local_futimens, }; diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c deleted file mode 100644 index 7aac49ad4ad..00000000000 --- a/hw/9pfs/9p-proxy.c +++ /dev/null @@ -1,1279 +0,0 @@ -/* - * 9p Proxy callback - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * M. Mohan Kumar - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - */ - -/* - * Not so fast! You might want to read the 9p developer docs first: - * https://wiki.qemu.org/Documentation/9p - */ - -/* - * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be - * removed in a future version of QEMU! - */ - -#include "qemu/osdep.h" -#include -#include -#include "9p.h" -#include "qapi/error.h" -#include "qemu/cutils.h" -#include "qemu/error-report.h" -#include "qemu/option.h" -#include "fsdev/qemu-fsdev.h" -#include "9p-proxy.h" - -typedef struct V9fsProxy { - int sockfd; - QemuMutex mutex; - struct iovec in_iovec; - struct iovec out_iovec; -} V9fsProxy; - -/* - * Return received file descriptor on success in *status. - * errno is also returned on *status (which will be < 0) - * return < 0 on transport error. - */ -static int v9fs_receivefd(int sockfd, int *status) -{ - struct iovec iov; - struct msghdr msg; - struct cmsghdr *cmsg; - int retval, data, fd; - union MsgControl msg_control; - - iov.iov_base = &data; - iov.iov_len = sizeof(data); - - memset(&msg, 0, sizeof(msg)); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - msg.msg_control = &msg_control; - msg.msg_controllen = sizeof(msg_control); - - do { - retval = recvmsg(sockfd, &msg, 0); - } while (retval < 0 && errno == EINTR); - if (retval <= 0) { - return retval; - } - /* - * data is set to V9FS_FD_VALID, if ancillary data is sent. If this - * request doesn't need ancillary data (fd) or an error occurred, - * data is set to negative errno value. - */ - if (data != V9FS_FD_VALID) { - *status = data; - return 0; - } - /* - * File descriptor (fd) is sent in the ancillary data. Check if we - * indeed received it. One of the reasons to fail to receive it is if - * we exceeded the maximum number of file descriptors! - */ - for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { - if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) || - cmsg->cmsg_level != SOL_SOCKET || - cmsg->cmsg_type != SCM_RIGHTS) { - continue; - } - fd = *((int *)CMSG_DATA(cmsg)); - *status = fd; - return 0; - } - *status = -ENFILE; /* Ancillary data sent but not received */ - return 0; -} - -static ssize_t socket_read(int sockfd, void *buff, size_t size) -{ - ssize_t retval, total = 0; - - while (size) { - retval = read(sockfd, buff, size); - if (retval == 0) { - return -EIO; - } - if (retval < 0) { - if (errno == EINTR) { - continue; - } - return -errno; - } - size -= retval; - buff += retval; - total += retval; - } - return total; -} - -/* Converts proxy_statfs to VFS statfs structure */ -static void prstatfs_to_statfs(struct statfs *stfs, ProxyStatFS *prstfs) -{ - memset(stfs, 0, sizeof(*stfs)); - stfs->f_type = prstfs->f_type; - stfs->f_bsize = prstfs->f_bsize; - stfs->f_blocks = prstfs->f_blocks; - stfs->f_bfree = prstfs->f_bfree; - stfs->f_bavail = prstfs->f_bavail; - stfs->f_files = prstfs->f_files; - stfs->f_ffree = prstfs->f_ffree; -#ifdef CONFIG_DARWIN - /* f_namelen and f_frsize do not exist on Darwin */ - stfs->f_fsid.val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU; - stfs->f_fsid.val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU; -#else - stfs->f_fsid.__val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU; - stfs->f_fsid.__val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU; - stfs->f_namelen = prstfs->f_namelen; - stfs->f_frsize = prstfs->f_frsize; -#endif -} - -/* Converts proxy_stat structure to VFS stat structure */ -static void prstat_to_stat(struct stat *stbuf, ProxyStat *prstat) -{ - memset(stbuf, 0, sizeof(*stbuf)); - stbuf->st_dev = prstat->st_dev; - stbuf->st_ino = prstat->st_ino; - stbuf->st_nlink = prstat->st_nlink; - stbuf->st_mode = prstat->st_mode; - stbuf->st_uid = prstat->st_uid; - stbuf->st_gid = prstat->st_gid; - stbuf->st_rdev = prstat->st_rdev; - stbuf->st_size = prstat->st_size; - stbuf->st_blksize = prstat->st_blksize; - stbuf->st_blocks = prstat->st_blocks; - stbuf->st_atime = prstat->st_atim_sec; - stbuf->st_mtime = prstat->st_mtim_sec; - stbuf->st_ctime = prstat->st_ctim_sec; -#ifdef CONFIG_DARWIN - stbuf->st_atimespec.tv_sec = prstat->st_atim_sec; - stbuf->st_mtimespec.tv_sec = prstat->st_mtim_sec; - stbuf->st_ctimespec.tv_sec = prstat->st_ctim_sec; - stbuf->st_atimespec.tv_nsec = prstat->st_atim_nsec; - stbuf->st_mtimespec.tv_nsec = prstat->st_mtim_nsec; - stbuf->st_ctimespec.tv_nsec = prstat->st_ctim_nsec; -#else - stbuf->st_atim.tv_sec = prstat->st_atim_sec; - stbuf->st_mtim.tv_sec = prstat->st_mtim_sec; - stbuf->st_ctim.tv_sec = prstat->st_ctim_sec; - stbuf->st_atim.tv_nsec = prstat->st_atim_nsec; - stbuf->st_mtim.tv_nsec = prstat->st_mtim_nsec; - stbuf->st_ctim.tv_nsec = prstat->st_ctim_nsec; -#endif -} - -/* - * Response contains two parts - * {header, data} - * header.type == T_ERROR, data -> -errno - * header.type == T_SUCCESS, data -> response - * size of errno/response is given by header.size - * returns < 0, on transport error. response is - * valid only if status >= 0. - */ -static int v9fs_receive_response(V9fsProxy *proxy, int type, - int *status, void *response) -{ - int retval; - ProxyHeader header; - struct iovec *reply = &proxy->in_iovec; - - *status = 0; - reply->iov_len = 0; - retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); - if (retval < 0) { - return retval; - } - reply->iov_len = PROXY_HDR_SZ; - retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); - assert(retval == 4 * 2); - /* - * if response size > PROXY_MAX_IO_SZ, read the response but ignore it and - * return -ENOBUFS - */ - if (header.size > PROXY_MAX_IO_SZ) { - int count; - while (header.size > 0) { - count = MIN(PROXY_MAX_IO_SZ, header.size); - count = socket_read(proxy->sockfd, reply->iov_base, count); - if (count < 0) { - return count; - } - header.size -= count; - } - *status = -ENOBUFS; - return 0; - } - - retval = socket_read(proxy->sockfd, - reply->iov_base + PROXY_HDR_SZ, header.size); - if (retval < 0) { - return retval; - } - reply->iov_len += header.size; - /* there was an error during processing request */ - if (header.type == T_ERROR) { - int ret; - ret = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); - assert(ret == 4); - return 0; - } - - switch (type) { - case T_LSTAT: { - ProxyStat prstat; - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, - "qqqdddqqqqqqqqqq", &prstat.st_dev, - &prstat.st_ino, &prstat.st_nlink, - &prstat.st_mode, &prstat.st_uid, - &prstat.st_gid, &prstat.st_rdev, - &prstat.st_size, &prstat.st_blksize, - &prstat.st_blocks, - &prstat.st_atim_sec, &prstat.st_atim_nsec, - &prstat.st_mtim_sec, &prstat.st_mtim_nsec, - &prstat.st_ctim_sec, &prstat.st_ctim_nsec); - assert(retval == 8 * 3 + 4 * 3 + 8 * 10); - prstat_to_stat(response, &prstat); - break; - } - case T_STATFS: { - ProxyStatFS prstfs; - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, - "qqqqqqqqqqq", &prstfs.f_type, - &prstfs.f_bsize, &prstfs.f_blocks, - &prstfs.f_bfree, &prstfs.f_bavail, - &prstfs.f_files, &prstfs.f_ffree, - &prstfs.f_fsid[0], &prstfs.f_fsid[1], - &prstfs.f_namelen, &prstfs.f_frsize); - assert(retval == 8 * 11); - prstatfs_to_statfs(response, &prstfs); - break; - } - case T_READLINK: { - V9fsString target; - v9fs_string_init(&target); - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &target); - strcpy(response, target.data); - v9fs_string_free(&target); - break; - } - case T_LGETXATTR: - case T_LLISTXATTR: { - V9fsString xattr; - v9fs_string_init(&xattr); - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &xattr); - memcpy(response, xattr.data, xattr.size); - v9fs_string_free(&xattr); - break; - } - case T_GETVERSION: - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "q", response); - assert(retval == 8); - break; - default: - return -1; - } - if (retval < 0) { - *status = retval; - } - return 0; -} - -/* - * return < 0 on transport error. - * *status is valid only if return >= 0 - */ -static int v9fs_receive_status(V9fsProxy *proxy, - struct iovec *reply, int *status) -{ - int retval; - ProxyHeader header; - - *status = 0; - reply->iov_len = 0; - retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); - if (retval < 0) { - return retval; - } - reply->iov_len = PROXY_HDR_SZ; - retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); - assert(retval == 4 * 2); - retval = socket_read(proxy->sockfd, - reply->iov_base + PROXY_HDR_SZ, header.size); - if (retval < 0) { - return retval; - } - reply->iov_len += header.size; - retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); - assert(retval == 4); - return 0; -} - -/* - * Proxy->header and proxy->request written to socket by QEMU process. - * This request read by proxy helper process - * returns 0 on success and -errno on error - */ -static int v9fs_request(V9fsProxy *proxy, int type, void *response, ...) -{ - dev_t rdev; - va_list ap; - int size = 0; - int retval = 0; - uint64_t offset; - ProxyHeader header = { 0, 0}; - struct timespec spec[2]; - int flags, mode, uid, gid; - V9fsString *name, *value; - V9fsString *path, *oldpath; - struct iovec *iovec = NULL, *reply = NULL; - - qemu_mutex_lock(&proxy->mutex); - - if (proxy->sockfd == -1) { - retval = -EIO; - goto err_out; - } - iovec = &proxy->out_iovec; - reply = &proxy->in_iovec; - va_start(ap, response); - switch (type) { - case T_OPEN: - path = va_arg(ap, V9fsString *); - flags = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, flags); - if (retval > 0) { - header.size = retval; - header.type = T_OPEN; - } - break; - case T_CREATE: - path = va_arg(ap, V9fsString *); - flags = va_arg(ap, int); - mode = va_arg(ap, int); - uid = va_arg(ap, int); - gid = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdddd", path, - flags, mode, uid, gid); - if (retval > 0) { - header.size = retval; - header.type = T_CREATE; - } - break; - case T_MKNOD: - path = va_arg(ap, V9fsString *); - mode = va_arg(ap, int); - rdev = va_arg(ap, long int); - uid = va_arg(ap, int); - gid = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsdq", - uid, gid, path, mode, rdev); - if (retval > 0) { - header.size = retval; - header.type = T_MKNOD; - } - break; - case T_MKDIR: - path = va_arg(ap, V9fsString *); - mode = va_arg(ap, int); - uid = va_arg(ap, int); - gid = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsd", - uid, gid, path, mode); - if (retval > 0) { - header.size = retval; - header.type = T_MKDIR; - } - break; - case T_SYMLINK: - oldpath = va_arg(ap, V9fsString *); - path = va_arg(ap, V9fsString *); - uid = va_arg(ap, int); - gid = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddss", - uid, gid, oldpath, path); - if (retval > 0) { - header.size = retval; - header.type = T_SYMLINK; - } - break; - case T_LINK: - oldpath = va_arg(ap, V9fsString *); - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", - oldpath, path); - if (retval > 0) { - header.size = retval; - header.type = T_LINK; - } - break; - case T_LSTAT: - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); - if (retval > 0) { - header.size = retval; - header.type = T_LSTAT; - } - break; - case T_READLINK: - path = va_arg(ap, V9fsString *); - size = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, size); - if (retval > 0) { - header.size = retval; - header.type = T_READLINK; - } - break; - case T_STATFS: - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); - if (retval > 0) { - header.size = retval; - header.type = T_STATFS; - } - break; - case T_CHMOD: - path = va_arg(ap, V9fsString *); - mode = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, mode); - if (retval > 0) { - header.size = retval; - header.type = T_CHMOD; - } - break; - case T_CHOWN: - path = va_arg(ap, V9fsString *); - uid = va_arg(ap, int); - gid = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdd", path, uid, gid); - if (retval > 0) { - header.size = retval; - header.type = T_CHOWN; - } - break; - case T_TRUNCATE: - path = va_arg(ap, V9fsString *); - offset = va_arg(ap, uint64_t); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sq", path, offset); - if (retval > 0) { - header.size = retval; - header.type = T_TRUNCATE; - } - break; - case T_UTIME: - path = va_arg(ap, V9fsString *); - spec[0].tv_sec = va_arg(ap, long); - spec[0].tv_nsec = va_arg(ap, long); - spec[1].tv_sec = va_arg(ap, long); - spec[1].tv_nsec = va_arg(ap, long); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sqqqq", path, - spec[0].tv_sec, spec[1].tv_nsec, - spec[1].tv_sec, spec[1].tv_nsec); - if (retval > 0) { - header.size = retval; - header.type = T_UTIME; - } - break; - case T_RENAME: - oldpath = va_arg(ap, V9fsString *); - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", oldpath, path); - if (retval > 0) { - header.size = retval; - header.type = T_RENAME; - } - break; - case T_REMOVE: - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); - if (retval > 0) { - header.size = retval; - header.type = T_REMOVE; - } - break; - case T_LGETXATTR: - size = va_arg(ap, int); - path = va_arg(ap, V9fsString *); - name = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, - "dss", size, path, name); - if (retval > 0) { - header.size = retval; - header.type = T_LGETXATTR; - } - break; - case T_LLISTXATTR: - size = va_arg(ap, int); - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ds", size, path); - if (retval > 0) { - header.size = retval; - header.type = T_LLISTXATTR; - } - break; - case T_LSETXATTR: - path = va_arg(ap, V9fsString *); - name = va_arg(ap, V9fsString *); - value = va_arg(ap, V9fsString *); - size = va_arg(ap, int); - flags = va_arg(ap, int); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sssdd", - path, name, value, size, flags); - if (retval > 0) { - header.size = retval; - header.type = T_LSETXATTR; - } - break; - case T_LREMOVEXATTR: - path = va_arg(ap, V9fsString *); - name = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", path, name); - if (retval > 0) { - header.size = retval; - header.type = T_LREMOVEXATTR; - } - break; - case T_GETVERSION: - path = va_arg(ap, V9fsString *); - retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); - if (retval > 0) { - header.size = retval; - header.type = T_GETVERSION; - } - break; - default: - error_report("Invalid type %d", type); - retval = -EINVAL; - break; - } - va_end(ap); - - if (retval < 0) { - goto err_out; - } - - /* marshal the header details */ - retval = proxy_marshal(iovec, 0, "dd", header.type, header.size); - assert(retval == 4 * 2); - header.size += PROXY_HDR_SZ; - - retval = qemu_write_full(proxy->sockfd, iovec->iov_base, header.size); - if (retval != header.size) { - goto close_error; - } - - switch (type) { - case T_OPEN: - case T_CREATE: - /* - * A file descriptor is returned as response for - * T_OPEN,T_CREATE on success - */ - if (v9fs_receivefd(proxy->sockfd, &retval) < 0) { - goto close_error; - } - break; - case T_MKNOD: - case T_MKDIR: - case T_SYMLINK: - case T_LINK: - case T_CHMOD: - case T_CHOWN: - case T_RENAME: - case T_TRUNCATE: - case T_UTIME: - case T_REMOVE: - case T_LSETXATTR: - case T_LREMOVEXATTR: - if (v9fs_receive_status(proxy, reply, &retval) < 0) { - goto close_error; - } - break; - case T_LSTAT: - case T_READLINK: - case T_STATFS: - case T_GETVERSION: - if (v9fs_receive_response(proxy, type, &retval, response) < 0) { - goto close_error; - } - break; - case T_LGETXATTR: - case T_LLISTXATTR: - if (!size) { - if (v9fs_receive_status(proxy, reply, &retval) < 0) { - goto close_error; - } - } else { - if (v9fs_receive_response(proxy, type, &retval, response) < 0) { - goto close_error; - } - } - break; - } - -err_out: - qemu_mutex_unlock(&proxy->mutex); - return retval; - -close_error: - close(proxy->sockfd); - proxy->sockfd = -1; - qemu_mutex_unlock(&proxy->mutex); - return -EIO; -} - -static int proxy_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) -{ - int retval; - retval = v9fs_request(fs_ctx->private, T_LSTAT, stbuf, fs_path); - if (retval < 0) { - errno = -retval; - return -1; - } - return retval; -} - -static ssize_t proxy_readlink(FsContext *fs_ctx, V9fsPath *fs_path, - char *buf, size_t bufsz) -{ - int retval; - retval = v9fs_request(fs_ctx->private, T_READLINK, buf, fs_path, bufsz); - if (retval < 0) { - errno = -retval; - return -1; - } - return strlen(buf); -} - -static int proxy_close(FsContext *ctx, V9fsFidOpenState *fs) -{ - return close(fs->fd); -} - -static int proxy_closedir(FsContext *ctx, V9fsFidOpenState *fs) -{ - return closedir(fs->dir.stream); -} - -static int proxy_open(FsContext *ctx, V9fsPath *fs_path, - int flags, V9fsFidOpenState *fs) -{ - fs->fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, flags); - if (fs->fd < 0) { - errno = -fs->fd; - fs->fd = -1; - } - return fs->fd; -} - -static int proxy_opendir(FsContext *ctx, - V9fsPath *fs_path, V9fsFidOpenState *fs) -{ - int serrno, fd; - - fs->dir.stream = NULL; - fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, O_DIRECTORY); - if (fd < 0) { - errno = -fd; - return -1; - } - fs->dir.stream = fdopendir(fd); - if (!fs->dir.stream) { - serrno = errno; - close(fd); - errno = serrno; - return -1; - } - return 0; -} - -static void proxy_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) -{ - rewinddir(fs->dir.stream); -} - -static off_t proxy_telldir(FsContext *ctx, V9fsFidOpenState *fs) -{ - return telldir(fs->dir.stream); -} - -static struct dirent *proxy_readdir(FsContext *ctx, V9fsFidOpenState *fs) -{ - struct dirent *entry; - entry = readdir(fs->dir.stream); -#ifdef CONFIG_DARWIN - if (!entry) { - return NULL; - } - int td; - td = telldir(fs->dir.stream); - /* If telldir fails, fail the entire readdir call */ - if (td < 0) { - return NULL; - } - entry->d_seekoff = td; -#endif - return entry; -} - -static void proxy_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) -{ - seekdir(fs->dir.stream, off); -} - -static ssize_t proxy_preadv(FsContext *ctx, V9fsFidOpenState *fs, - const struct iovec *iov, - int iovcnt, off_t offset) -{ - ssize_t ret; -#ifdef CONFIG_PREADV - ret = preadv(fs->fd, iov, iovcnt, offset); -#else - ret = lseek(fs->fd, offset, SEEK_SET); - if (ret >= 0) { - ret = readv(fs->fd, iov, iovcnt); - } -#endif - return ret; -} - -static ssize_t proxy_pwritev(FsContext *ctx, V9fsFidOpenState *fs, - const struct iovec *iov, - int iovcnt, off_t offset) -{ - ssize_t ret; - -#ifdef CONFIG_PREADV - ret = pwritev(fs->fd, iov, iovcnt, offset); -#else - ret = lseek(fs->fd, offset, SEEK_SET); - if (ret >= 0) { - ret = writev(fs->fd, iov, iovcnt); - } -#endif -#ifdef CONFIG_SYNC_FILE_RANGE - if (ret > 0 && ctx->export_flags & V9FS_IMMEDIATE_WRITEOUT) { - /* - * Initiate a writeback. This is not a data integrity sync. - * We want to ensure that we don't leave dirty pages in the cache - * after write when writeout=immediate is specified. - */ - sync_file_range(fs->fd, offset, ret, - SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE); - } -#endif - return ret; -} - -static int proxy_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) -{ - int retval; - retval = v9fs_request(fs_ctx->private, T_CHMOD, NULL, fs_path, - credp->fc_mode); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_mknod(FsContext *fs_ctx, V9fsPath *dir_path, - const char *name, FsCred *credp) -{ - int retval; - V9fsString fullname; - - v9fs_string_init(&fullname); - v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); - - retval = v9fs_request(fs_ctx->private, T_MKNOD, NULL, &fullname, - credp->fc_mode, credp->fc_rdev, - credp->fc_uid, credp->fc_gid); - v9fs_string_free(&fullname); - if (retval < 0) { - errno = -retval; - retval = -1; - } - return retval; -} - -static int proxy_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, - const char *name, FsCred *credp) -{ - int retval; - V9fsString fullname; - - v9fs_string_init(&fullname); - v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); - - retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, &fullname, - credp->fc_mode, credp->fc_uid, credp->fc_gid); - v9fs_string_free(&fullname); - if (retval < 0) { - errno = -retval; - retval = -1; - } - return retval; -} - -static int proxy_fstat(FsContext *fs_ctx, int fid_type, - V9fsFidOpenState *fs, struct stat *stbuf) -{ - int fd; - - if (fid_type == P9_FID_DIR) { - fd = dirfd(fs->dir.stream); - } else { - fd = fs->fd; - } - return fstat(fd, stbuf); -} - -static int proxy_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, - int flags, FsCred *credp, V9fsFidOpenState *fs) -{ - V9fsString fullname; - - v9fs_string_init(&fullname); - v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); - - fs->fd = v9fs_request(fs_ctx->private, T_CREATE, NULL, &fullname, flags, - credp->fc_mode, credp->fc_uid, credp->fc_gid); - v9fs_string_free(&fullname); - if (fs->fd < 0) { - errno = -fs->fd; - fs->fd = -1; - } - return fs->fd; -} - -static int proxy_symlink(FsContext *fs_ctx, const char *oldpath, - V9fsPath *dir_path, const char *name, FsCred *credp) -{ - int retval; - V9fsString fullname, target; - - v9fs_string_init(&fullname); - v9fs_string_init(&target); - - v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); - v9fs_string_sprintf(&target, "%s", oldpath); - - retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, &target, &fullname, - credp->fc_uid, credp->fc_gid); - v9fs_string_free(&fullname); - v9fs_string_free(&target); - if (retval < 0) { - errno = -retval; - retval = -1; - } - return retval; -} - -static int proxy_link(FsContext *ctx, V9fsPath *oldpath, - V9fsPath *dirpath, const char *name) -{ - int retval; - V9fsString newpath; - - v9fs_string_init(&newpath); - v9fs_string_sprintf(&newpath, "%s/%s", dirpath->data, name); - - retval = v9fs_request(ctx->private, T_LINK, NULL, oldpath, &newpath); - v9fs_string_free(&newpath); - if (retval < 0) { - errno = -retval; - retval = -1; - } - return retval; -} - -static int proxy_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) -{ - int retval; - - retval = v9fs_request(ctx->private, T_TRUNCATE, NULL, fs_path, size); - if (retval < 0) { - errno = -retval; - return -1; - } - return 0; -} - -static int proxy_rename(FsContext *ctx, const char *oldpath, - const char *newpath) -{ - int retval; - V9fsString oldname, newname; - - v9fs_string_init(&oldname); - v9fs_string_init(&newname); - - v9fs_string_sprintf(&oldname, "%s", oldpath); - v9fs_string_sprintf(&newname, "%s", newpath); - retval = v9fs_request(ctx->private, T_RENAME, NULL, &oldname, &newname); - v9fs_string_free(&oldname); - v9fs_string_free(&newname); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) -{ - int retval; - retval = v9fs_request(fs_ctx->private, T_CHOWN, NULL, fs_path, - credp->fc_uid, credp->fc_gid); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_utimensat(FsContext *s, V9fsPath *fs_path, - const struct timespec *buf) -{ - int retval; - retval = v9fs_request(s->private, T_UTIME, NULL, fs_path, - buf[0].tv_sec, buf[0].tv_nsec, - buf[1].tv_sec, buf[1].tv_nsec); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_remove(FsContext *ctx, const char *path) -{ - int retval; - V9fsString name; - v9fs_string_init(&name); - v9fs_string_sprintf(&name, "%s", path); - retval = v9fs_request(ctx->private, T_REMOVE, NULL, &name); - v9fs_string_free(&name); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_fsync(FsContext *ctx, int fid_type, - V9fsFidOpenState *fs, int datasync) -{ - int fd; - - if (fid_type == P9_FID_DIR) { - fd = dirfd(fs->dir.stream); - } else { - fd = fs->fd; - } - - if (datasync) { - return qemu_fdatasync(fd); - } else { - return fsync(fd); - } -} - -static int proxy_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf) -{ - int retval; - retval = v9fs_request(s->private, T_STATFS, stbuf, fs_path); - if (retval < 0) { - errno = -retval; - return -1; - } - return retval; -} - -static ssize_t proxy_lgetxattr(FsContext *ctx, V9fsPath *fs_path, - const char *name, void *value, size_t size) -{ - int retval; - V9fsString xname; - - v9fs_string_init(&xname); - v9fs_string_sprintf(&xname, "%s", name); - retval = v9fs_request(ctx->private, T_LGETXATTR, value, size, fs_path, - &xname); - v9fs_string_free(&xname); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static ssize_t proxy_llistxattr(FsContext *ctx, V9fsPath *fs_path, - void *value, size_t size) -{ - int retval; - retval = v9fs_request(ctx->private, T_LLISTXATTR, value, size, fs_path); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, - void *value, size_t size, int flags) -{ - int retval; - V9fsString xname, xvalue; - - v9fs_string_init(&xname); - v9fs_string_sprintf(&xname, "%s", name); - - v9fs_string_init(&xvalue); - xvalue.size = size; - xvalue.data = g_malloc(size); - memcpy(xvalue.data, value, size); - - retval = v9fs_request(ctx->private, T_LSETXATTR, value, fs_path, &xname, - &xvalue, size, flags); - v9fs_string_free(&xname); - v9fs_string_free(&xvalue); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_lremovexattr(FsContext *ctx, V9fsPath *fs_path, - const char *name) -{ - int retval; - V9fsString xname; - - v9fs_string_init(&xname); - v9fs_string_sprintf(&xname, "%s", name); - retval = v9fs_request(ctx->private, T_LREMOVEXATTR, NULL, fs_path, &xname); - v9fs_string_free(&xname); - if (retval < 0) { - errno = -retval; - } - return retval; -} - -static int proxy_name_to_path(FsContext *ctx, V9fsPath *dir_path, - const char *name, V9fsPath *target) -{ - if (dir_path) { - v9fs_path_sprintf(target, "%s/%s", dir_path->data, name); - } else { - v9fs_path_sprintf(target, "%s", name); - } - return 0; -} - -static int proxy_renameat(FsContext *ctx, V9fsPath *olddir, - const char *old_name, V9fsPath *newdir, - const char *new_name) -{ - int ret; - V9fsString old_full_name, new_full_name; - - v9fs_string_init(&old_full_name); - v9fs_string_init(&new_full_name); - - v9fs_string_sprintf(&old_full_name, "%s/%s", olddir->data, old_name); - v9fs_string_sprintf(&new_full_name, "%s/%s", newdir->data, new_name); - - ret = proxy_rename(ctx, old_full_name.data, new_full_name.data); - v9fs_string_free(&old_full_name); - v9fs_string_free(&new_full_name); - return ret; -} - -static int proxy_unlinkat(FsContext *ctx, V9fsPath *dir, - const char *name, int flags) -{ - int ret; - V9fsString fullname; - v9fs_string_init(&fullname); - - v9fs_string_sprintf(&fullname, "%s/%s", dir->data, name); - ret = proxy_remove(ctx, fullname.data); - v9fs_string_free(&fullname); - - return ret; -} - -static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path, - mode_t st_mode, uint64_t *st_gen) -{ - int err; - - /* Do not try to open special files like device nodes, fifos etc - * we can get fd for regular files and directories only - */ - if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) { - errno = ENOTTY; - return -1; - } - err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, path); - if (err < 0) { - errno = -err; - err = -1; - } - return err; -} - -static int connect_namedsocket(const char *path, Error **errp) -{ - int sockfd; - struct sockaddr_un helper; - - if (strlen(path) >= sizeof(helper.sun_path)) { - error_setg(errp, "socket name too long"); - return -1; - } - sockfd = socket(AF_UNIX, SOCK_STREAM, 0); - if (sockfd < 0) { - error_setg_errno(errp, errno, "failed to create client socket"); - return -1; - } - strcpy(helper.sun_path, path); - helper.sun_family = AF_UNIX; - if (connect(sockfd, (struct sockaddr *)&helper, sizeof(helper)) < 0) { - error_setg_errno(errp, errno, "failed to connect to '%s'", path); - close(sockfd); - return -1; - } - - /* remove the socket for security reasons */ - unlink(path); - return sockfd; -} - -static void error_append_socket_sockfd_hint(Error *const *errp) -{ - error_append_hint(errp, "Either specify socket=/some/path where /some/path" - " points to a listening AF_UNIX socket or sock_fd=fd" - " where fd is a file descriptor to a connected AF_UNIX" - " socket\n"); -} - -static int proxy_parse_opts(QemuOpts *opts, FsDriverEntry *fs, Error **errp) -{ - const char *socket = qemu_opt_get(opts, "socket"); - const char *sock_fd = qemu_opt_get(opts, "sock_fd"); - - if (!socket && !sock_fd) { - error_setg(errp, "both socket and sock_fd properties are missing"); - error_append_socket_sockfd_hint(errp); - return -1; - } - if (socket && sock_fd) { - error_setg(errp, "both socket and sock_fd properties are set"); - error_append_socket_sockfd_hint(errp); - return -1; - } - if (socket) { - fs->path = g_strdup(socket); - fs->export_flags |= V9FS_PROXY_SOCK_NAME; - } else { - fs->path = g_strdup(sock_fd); - fs->export_flags |= V9FS_PROXY_SOCK_FD; - } - return 0; -} - -static int proxy_init(FsContext *ctx, Error **errp) -{ - V9fsProxy *proxy = g_new(V9fsProxy, 1); - int sock_id; - - if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { - sock_id = connect_namedsocket(ctx->fs_root, errp); - } else { - sock_id = atoi(ctx->fs_root); - if (sock_id < 0) { - error_setg(errp, "socket descriptor not initialized"); - } - } - if (sock_id < 0) { - g_free(proxy); - return -1; - } - g_free(ctx->fs_root); - ctx->fs_root = NULL; - - proxy->in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); - proxy->in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; - proxy->out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); - proxy->out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; - - ctx->private = proxy; - proxy->sockfd = sock_id; - qemu_mutex_init(&proxy->mutex); - - ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT; - ctx->exops.get_st_gen = proxy_ioc_getversion; - return 0; -} - -static void proxy_cleanup(FsContext *ctx) -{ - V9fsProxy *proxy = ctx->private; - - if (!proxy) { - return; - } - - g_free(proxy->out_iovec.iov_base); - g_free(proxy->in_iovec.iov_base); - if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { - close(proxy->sockfd); - } - g_free(proxy); -} - -FileOperations proxy_ops = { - .parse_opts = proxy_parse_opts, - .init = proxy_init, - .cleanup = proxy_cleanup, - .lstat = proxy_lstat, - .readlink = proxy_readlink, - .close = proxy_close, - .closedir = proxy_closedir, - .open = proxy_open, - .opendir = proxy_opendir, - .rewinddir = proxy_rewinddir, - .telldir = proxy_telldir, - .readdir = proxy_readdir, - .seekdir = proxy_seekdir, - .preadv = proxy_preadv, - .pwritev = proxy_pwritev, - .chmod = proxy_chmod, - .mknod = proxy_mknod, - .mkdir = proxy_mkdir, - .fstat = proxy_fstat, - .open2 = proxy_open2, - .symlink = proxy_symlink, - .link = proxy_link, - .truncate = proxy_truncate, - .rename = proxy_rename, - .chown = proxy_chown, - .utimensat = proxy_utimensat, - .remove = proxy_remove, - .fsync = proxy_fsync, - .statfs = proxy_statfs, - .lgetxattr = proxy_lgetxattr, - .llistxattr = proxy_llistxattr, - .lsetxattr = proxy_lsetxattr, - .lremovexattr = proxy_lremovexattr, - .name_to_path = proxy_name_to_path, - .renameat = proxy_renameat, - .unlinkat = proxy_unlinkat, -}; diff --git a/hw/9pfs/9p-proxy.h b/hw/9pfs/9p-proxy.h deleted file mode 100644 index 9be4718d3e2..00000000000 --- a/hw/9pfs/9p-proxy.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * 9p Proxy callback - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * M. Mohan Kumar - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - */ - -/* - * NOTE: The 9p 'proxy' backend is deprecated (since QEMU 8.1) and will be - * removed in a future version of QEMU! - */ - -#ifndef QEMU_9P_PROXY_H -#define QEMU_9P_PROXY_H - -#define PROXY_MAX_IO_SZ (64 * 1024) -#define V9FS_FD_VALID INT_MAX - -/* - * proxy iovec only support one element and - * marsha/unmarshal doesn't do little endian conversion. - */ -#define proxy_unmarshal(in_sg, offset, fmt, args...) \ - v9fs_iov_unmarshal(in_sg, 1, offset, 0, fmt, ##args) -#define proxy_marshal(out_sg, offset, fmt, args...) \ - v9fs_iov_marshal(out_sg, 1, offset, 0, fmt, ##args) - -union MsgControl { - struct cmsghdr cmsg; - char control[CMSG_SPACE(sizeof(int))]; -}; - -typedef struct { - uint32_t type; - uint32_t size; -} ProxyHeader; - -#define PROXY_HDR_SZ (sizeof(ProxyHeader)) - -enum { - T_SUCCESS = 0, - T_ERROR, - T_OPEN, - T_CREATE, - T_MKNOD, - T_MKDIR, - T_SYMLINK, - T_LINK, - T_LSTAT, - T_READLINK, - T_STATFS, - T_CHMOD, - T_CHOWN, - T_TRUNCATE, - T_UTIME, - T_RENAME, - T_REMOVE, - T_LGETXATTR, - T_LLISTXATTR, - T_LSETXATTR, - T_LREMOVEXATTR, - T_GETVERSION, -}; - -typedef struct { - uint64_t st_dev; - uint64_t st_ino; - uint64_t st_nlink; - uint32_t st_mode; - uint32_t st_uid; - uint32_t st_gid; - uint64_t st_rdev; - uint64_t st_size; - uint64_t st_blksize; - uint64_t st_blocks; - uint64_t st_atim_sec; - uint64_t st_atim_nsec; - uint64_t st_mtim_sec; - uint64_t st_mtim_nsec; - uint64_t st_ctim_sec; - uint64_t st_ctim_nsec; -} ProxyStat; - -typedef struct { - uint64_t f_type; - uint64_t f_bsize; - uint64_t f_blocks; - uint64_t f_bfree; - uint64_t f_bavail; - uint64_t f_files; - uint64_t f_ffree; - uint64_t f_fsid[2]; - uint64_t f_namelen; - uint64_t f_frsize; -} ProxyStatFS; -#endif diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c index 0ac79a500b4..9cd18842242 100644 --- a/hw/9pfs/9p-synth.c +++ b/hw/9pfs/9p-synth.c @@ -24,7 +24,7 @@ #include "qemu/rcu.h" #include "qemu/rcu_queue.h" #include "qemu/cutils.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" /* Root node for synth file system */ static V9fsSynthNode synth_root = { @@ -356,6 +356,13 @@ static int synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset) return -1; } +static int synth_ftruncate(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, + off_t size) +{ + errno = ENOSYS; + return -1; +} + static int synth_chmod(FsContext *fs_ctx, V9fsPath *path, FsCred *credp) { errno = EPERM; @@ -417,6 +424,13 @@ static int synth_utimensat(FsContext *fs_ctx, V9fsPath *path, return 0; } +static int synth_futimens(FsContext *fs_ctx, int fid_type, V9fsFidOpenState *fs, + const struct timespec *buf) +{ + errno = ENOSYS; + return -1; +} + static int synth_remove(FsContext *ctx, const char *path) { errno = EPERM; @@ -615,6 +629,11 @@ static int synth_init(FsContext *ctx, Error **errp) return 0; } +static bool synth_has_valid_file_handle(int fid_type, V9fsFidOpenState *fs) +{ + return false; +} + FileOperations synth_ops = { .init = synth_init, .lstat = synth_lstat, @@ -650,4 +669,7 @@ FileOperations synth_ops = { .name_to_path = synth_name_to_path, .renameat = synth_renameat, .unlinkat = synth_unlinkat, + .has_valid_file_handle = synth_has_valid_file_handle, + .ftruncate = synth_ftruncate, + .futimens = synth_futimens, }; diff --git a/hw/9pfs/9p-util-generic.c b/hw/9pfs/9p-util-generic.c new file mode 100644 index 00000000000..4c1e9c887d0 --- /dev/null +++ b/hw/9pfs/9p-util-generic.c @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "9p-util.h" +#include + +char *qemu_open_flags_tostr(int flags) +{ + int acc = flags & O_ACCMODE; + return g_strconcat( + (acc == O_WRONLY) ? "WRONLY" : (acc == O_RDONLY) ? "RDONLY" : "RDWR", + (flags & O_CREAT) ? "|CREAT" : "", + (flags & O_EXCL) ? "|EXCL" : "", + (flags & O_NOCTTY) ? "|NOCTTY" : "", + (flags & O_TRUNC) ? "|TRUNC" : "", + (flags & O_APPEND) ? "|APPEND" : "", + (flags & O_NONBLOCK) ? "|NONBLOCK" : "", + (flags & O_DSYNC) ? "|DSYNC" : "", + #ifdef O_DIRECT + (flags & O_DIRECT) ? "|DIRECT" : "", + #endif + (flags & O_LARGEFILE) ? "|LARGEFILE" : "", + (flags & O_DIRECTORY) ? "|DIRECTORY" : "", + (flags & O_NOFOLLOW) ? "|NOFOLLOW" : "", + #ifdef O_NOATIME + (flags & O_NOATIME) ? "|NOATIME" : "", + #endif + #ifdef O_CLOEXEC + (flags & O_CLOEXEC) ? "|CLOEXEC" : "", + #endif + #ifdef __O_SYNC + (flags & __O_SYNC) ? "|SYNC" : "", + #else + ((flags & O_SYNC) == O_SYNC) ? "|SYNC" : "", + #endif + #ifdef O_PATH + (flags & O_PATH) ? "|PATH" : "", + #endif + #ifdef __O_TMPFILE + (flags & __O_TMPFILE) ? "|TMPFILE" : "", + #elif defined(O_TMPFILE) + ((flags & O_TMPFILE) == O_TMPFILE) ? "|TMPFILE" : "", + #endif + /* O_NDELAY is usually just an alias of O_NONBLOCK */ + #if defined(O_NDELAY) && O_NDELAY != O_NONBLOCK + (flags & O_NDELAY) ? "|NDELAY" : "", + #endif + NULL /* always last (required NULL termination) */ + ); +} diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index 51c94b01162..a1924fe3f05 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -103,6 +103,7 @@ static inline int errno_to_dotl(int err) { #define qemu_renameat renameat #define qemu_utimensat utimensat #define qemu_unlinkat unlinkat +#define qemu_futimens futimens static inline void close_preserve_errno(int fd) { @@ -177,20 +178,27 @@ static inline int openat_file(int dirfd, const char *name, int flags, return -1; } - if (close_if_special_file(fd) < 0) { - return -1; - } - - serrno = errno; - /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't - * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat() - * ignored it anyway. - */ + /* Only if O_PATH is not set ... */ if (!(flags & O_PATH_9P_UTIL)) { + /* + * Prevent I/O on special files (device files, etc.) on host side, + * however it is safe and required to allow opening them with O_PATH, + * as this is limited to (required) path based operations only. + */ + if (close_if_special_file(fd) < 0) { + return -1; + } + + serrno = errno; + /* + * O_NONBLOCK was only needed to open the file. Let's drop it. We don't + * do that with O_PATH since fcntl(F_SETFL) isn't supported, and + * openat() ignored it anyway. + */ ret = fcntl(fd, F_SETFL, flags); assert(!ret); + errno = serrno; } - errno = serrno; return fd; } @@ -260,4 +268,10 @@ int pthread_fchdir_np(int fd) __attribute__((weak_import)); #endif int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev); +/* + * Returns a newly allocated string presentation of open() flags, intended + * for debugging (tracing) purposes only. + */ +char *qemu_open_flags_tostr(int flags); + #endif diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index af636cfb2d3..acfa7db4e19 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -201,8 +201,7 @@ void v9fs_path_free(V9fsPath *path) } -void G_GNUC_PRINTF(2, 3) -v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...) +void v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...) { va_list ap; @@ -434,16 +433,24 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu) V9fsFidState *f; GHashTableIter iter; gpointer fid; + int err; + int nclosed = 0; + + /* prevent multiple coroutines running this function simultaniously */ + if (s->reclaiming) { + return; + } + s->reclaiming = true; g_hash_table_iter_init(&iter, s->fids); QSLIST_HEAD(, V9fsFidState) reclaim_list = QSLIST_HEAD_INITIALIZER(reclaim_list); + /* Pick FIDs to be closed, collect them on reclaim_list. */ while (g_hash_table_iter_next(&iter, &fid, (gpointer *) &f)) { /* - * Unlink fids cannot be reclaimed. Check - * for them and skip them. Also skip fids + * Unlinked fids cannot be reclaimed, skip those, and also skip fids * currently being operated on. */ if (f->ref || f->flags & FID_NON_RECLAIMABLE) { @@ -493,23 +500,42 @@ void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu) } } /* - * Now close the fid in reclaim list. Free them if they - * are already clunked. + * Close the picked FIDs altogether on a background I/O driver thread. Do + * this all at once to keep latency (i.e. amount of thread hops between main + * thread <-> fs driver background thread) as low as possible. */ + v9fs_co_run_in_worker({ + QSLIST_FOREACH(f, &reclaim_list, reclaim_next) { + err = (f->fid_type == P9_FID_DIR) ? + s->ops->closedir(&s->ctx, &f->fs_reclaim) : + s->ops->close(&s->ctx, &f->fs_reclaim); + + /* 'man 2 close' suggests to ignore close() errors except of EBADF */ + if (unlikely(err && errno == EBADF)) { + /* + * unexpected case as FIDs were picked above by having a valid + * file descriptor + */ + error_report("9pfs: v9fs_reclaim_fd() WARNING: close() failed with EBADF"); + } else { + /* total_open_fd must only be mutated on main thread */ + nclosed++; + } + } + }); + total_open_fd -= nclosed; + /* Free the closed FIDs. */ while (!QSLIST_EMPTY(&reclaim_list)) { f = QSLIST_FIRST(&reclaim_list); QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next); - if (f->fid_type == P9_FID_FILE) { - v9fs_co_close(pdu, &f->fs_reclaim); - } else if (f->fid_type == P9_FID_DIR) { - v9fs_co_closedir(pdu, &f->fs_reclaim); - } /* * Now drop the fid reference, free it * if clunked. */ put_fid(pdu, f); } + + s->reclaiming = false; } /* @@ -1574,6 +1600,11 @@ static void coroutine_fn v9fs_stat(void *opaque) pdu_complete(pdu, err); } +static bool fid_has_valid_file_handle(V9fsState *s, V9fsFidState *fidp) +{ + return s->ops->has_valid_file_handle(fidp->fid_type, &fidp->fs); +} + static void coroutine_fn v9fs_getattr(void *opaque) { int32_t fid; @@ -1596,11 +1627,11 @@ static void coroutine_fn v9fs_getattr(void *opaque) retval = -ENOENT; goto out_nofid; } - /* - * Currently we only support BASIC fields in stat, so there is no - * need to look at request_mask. - */ - retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (fid_has_valid_file_handle(pdu->s, fidp)) { + retval = v9fs_co_fstat(pdu, fidp, &stbuf); + } else { + retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + } if (retval < 0) { goto out; } @@ -1703,7 +1734,11 @@ static void coroutine_fn v9fs_setattr(void *opaque) } else { times[1].tv_nsec = UTIME_OMIT; } - err = v9fs_co_utimensat(pdu, &fidp->path, times); + if (fid_has_valid_file_handle(pdu->s, fidp)) { + err = v9fs_co_futimens(pdu, fidp, times); + } else { + err = v9fs_co_utimensat(pdu, &fidp->path, times); + } if (err < 0) { goto out; } @@ -1728,7 +1763,11 @@ static void coroutine_fn v9fs_setattr(void *opaque) } } if (v9iattr.valid & (P9_ATTR_SIZE)) { - err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size); + if (fid_has_valid_file_handle(pdu->s, fidp)) { + err = v9fs_co_ftruncate(pdu, fidp, v9iattr.size); + } else { + err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size); + } if (err < 0) { goto out; } @@ -1772,6 +1811,21 @@ static bool same_stat_id(const struct stat *a, const struct stat *b) return a->st_dev == b->st_dev && a->st_ino == b->st_ino; } +/* + * Returns a (newly allocated) comma-separated string presentation of the + * passed array for logging (tracing) purpose for trace event "v9fs_walk". + * + * It is caller's responsibility to free the returned string. + */ +static char *trace_v9fs_walk_wnames(V9fsString *wnames, size_t nwnames) +{ + g_autofree char **arr = g_malloc0_n(nwnames + 1, sizeof(char *)); + for (size_t i = 0; i < nwnames; ++i) { + arr[i] = wnames[i].data; + } + return g_strjoinv(", ", arr); +} + static void coroutine_fn v9fs_walk(void *opaque) { int name_idx, nwalked; @@ -1785,6 +1839,7 @@ static void coroutine_fn v9fs_walk(void *opaque) size_t offset = 7; int32_t fid, newfid; P9ARRAY_REF(V9fsString) wnames = NULL; + g_autofree char *trace_wnames = NULL; V9fsFidState *fidp; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; @@ -1798,11 +1853,9 @@ static void coroutine_fn v9fs_walk(void *opaque) } offset += err; - trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames); - if (nwnames > P9_MAXWELEM) { err = -EINVAL; - goto out_nofid; + goto out_nofid_nownames; } if (nwnames) { P9ARRAY_NEW(V9fsString, wnames, nwnames); @@ -1812,15 +1865,23 @@ static void coroutine_fn v9fs_walk(void *opaque) for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { - goto out_nofid; + goto out_nofid_nownames; } if (name_is_illegal(wnames[i].data)) { err = -ENOENT; - goto out_nofid; + goto out_nofid_nownames; } offset += err; } + if (trace_event_get_state_backends(TRACE_V9FS_WALK)) { + trace_wnames = trace_v9fs_walk_wnames(wnames, nwnames); + trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, + trace_wnames); + } + } else { + trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, ""); } + fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; @@ -1955,7 +2016,11 @@ static void coroutine_fn v9fs_walk(void *opaque) } v9fs_path_free(&dpath); v9fs_path_free(&path); + goto out_pdu_complete; +out_nofid_nownames: + trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames, ""); out_nofid: +out_pdu_complete: pdu_complete(pdu, err); } @@ -1980,6 +2045,7 @@ static void coroutine_fn v9fs_open(void *opaque) V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; + g_autofree char *trace_oflags = NULL; if (s->proto_version == V9FS_PROTO_2000L) { err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode); @@ -1991,7 +2057,13 @@ static void coroutine_fn v9fs_open(void *opaque) if (err < 0) { goto out_nofid; } - trace_v9fs_open(pdu->tag, pdu->id, fid, mode); + if (trace_event_get_state_backends(TRACE_V9FS_OPEN)) { + trace_oflags = qemu_open_flags_tostr( + (s->proto_version == V9FS_PROTO_2000L) ? + dotl_to_open_flags(mode) : omode_to_uflags(mode) + ); + trace_v9fs_open(pdu->tag, pdu->id, fid, mode, trace_oflags); + } fidp = get_fid(pdu, fid); if (fidp == NULL) { @@ -2587,6 +2659,11 @@ static void coroutine_fn v9fs_readdir(void *opaque) retval = -EINVAL; goto out_nofid; } + if (fidp->fid_type != P9_FID_DIR) { + warn_report_once("9p: bad client: T_readdir on non-directory stream"); + retval = -ENOTDIR; + goto out; + } if (!fidp->fs.dir.stream) { retval = -EINVAL; goto out; @@ -4284,6 +4361,8 @@ int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t, s->ctx.fst = &fse->fst; fsdev_throttle_init(s->ctx.fst); + s->reclaiming = false; + rc = 0; out: if (rc) { diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index a6f59abccbf..65cc45e344f 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -280,7 +280,6 @@ struct V9fsFidState { uid_t uid; int ref; bool clunked; - QSIMPLEQ_ENTRY(V9fsFidState) next; QSLIST_ENTRY(V9fsFidState) reclaim_next; }; @@ -363,6 +362,7 @@ struct V9fsState { uint64_t qp_ndevices; /* Amount of entries in qpd_table. */ uint16_t qp_affix_next; uint64_t qp_fullpath_next; + bool reclaiming; }; /* 9p2000.L open flags */ @@ -456,7 +456,8 @@ static inline uint8_t v9fs_request_cancelled(V9fsPDU *pdu) void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu); void v9fs_path_init(V9fsPath *path); void v9fs_path_free(V9fsPath *path); -void v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...); +void G_GNUC_PRINTF(2, 3) v9fs_path_sprintf(V9fsPath *path, const char *fmt, + ...); void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src); size_t v9fs_readdir_response_size(V9fsString *name); int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath, diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c index 2068a4779d4..bce7dd96e91 100644 --- a/hw/9pfs/codir.c +++ b/hw/9pfs/codir.c @@ -20,6 +20,7 @@ #include "fsdev/qemu-fsdev.h" #include "qemu/thread.h" #include "qemu/main-loop.h" +#include "qemu/error-report.h" #include "coth.h" #include "9p-xattr.h" #include "9p-util.h" @@ -353,7 +354,11 @@ int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs) err = -errno; } }); - if (!err) { + /* 'man 2 close' suggests to ignore close() errors except of EBADF */ + if (unlikely(err && errno == EBADF)) { + /* unexpected case as we should have checked for a valid file handle */ + error_report("9pfs: WARNING: v9fs_co_closedir() failed with EBADF"); + } else { total_open_fd--; } return err; diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c index 71174c3e4ab..6e775c8e412 100644 --- a/hw/9pfs/cofile.c +++ b/hw/9pfs/cofile.c @@ -20,6 +20,7 @@ #include "fsdev/qemu-fsdev.h" #include "qemu/thread.h" #include "qemu/main-loop.h" +#include "qemu/error-report.h" #include "coth.h" int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode, @@ -197,7 +198,11 @@ int coroutine_fn v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs) err = -errno; } }); - if (!err) { + /* 'man 2 close' suggests to ignore close() errors except of EBADF */ + if (unlikely(err && errno == EBADF)) { + /* unexpected case as we should have checked for a valid file handle */ + error_report("9pfs: WARNING: v9fs_co_close() failed with EBADF"); + } else { total_open_fd--; } return err; diff --git a/hw/9pfs/cofs.c b/hw/9pfs/cofs.c index 67e3ae5c5cc..12fa8c9fe9c 100644 --- a/hw/9pfs/cofs.c +++ b/hw/9pfs/cofs.c @@ -139,6 +139,25 @@ int coroutine_fn v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path, return err; } +int coroutine_fn v9fs_co_futimens(V9fsPDU *pdu, V9fsFidState *fidp, + struct timespec times[2]) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->futimens(&s->ctx, fidp->fid_type, &fidp->fs, times); + if (err < 0) { + err = -errno; + } + }); + return err; +} + int coroutine_fn v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid, gid_t gid) { @@ -184,6 +203,24 @@ int coroutine_fn v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size) return err; } +int coroutine_fn v9fs_co_ftruncate(V9fsPDU *pdu, V9fsFidState *fidp, off_t size) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->ftruncate(&s->ctx, fidp->fid_type, &fidp->fs, size); + if (err < 0) { + err = -errno; + } + }); + return err; +} + int coroutine_fn v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, uid_t uid, gid_t gid, dev_t dev, mode_t mode, struct stat *stbuf) diff --git a/hw/9pfs/coth.h b/hw/9pfs/coth.h index 2c54249b357..7906fa7782d 100644 --- a/hw/9pfs/coth.h +++ b/hw/9pfs/coth.h @@ -71,8 +71,12 @@ int coroutine_fn v9fs_co_statfs(V9fsPDU *, V9fsPath *, struct statfs *); int coroutine_fn v9fs_co_lstat(V9fsPDU *, V9fsPath *, struct stat *); int coroutine_fn v9fs_co_chmod(V9fsPDU *, V9fsPath *, mode_t); int coroutine_fn v9fs_co_utimensat(V9fsPDU *, V9fsPath *, struct timespec [2]); +int coroutine_fn v9fs_co_futimens(V9fsPDU *pdu, V9fsFidState *fidp, + struct timespec times[2]); int coroutine_fn v9fs_co_chown(V9fsPDU *, V9fsPath *, uid_t, gid_t); int coroutine_fn v9fs_co_truncate(V9fsPDU *, V9fsPath *, off_t); +int coroutine_fn v9fs_co_ftruncate(V9fsPDU *pdu, V9fsFidState *fidp, + off_t size); int coroutine_fn v9fs_co_llistxattr(V9fsPDU *, V9fsPath *, void *, size_t); int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *, V9fsPath *, V9fsString *, void *, size_t); diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index f1b62fa8c80..d35d4f44ffa 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -2,8 +2,8 @@ fs_ss = ss.source_set() fs_ss.add(files( '9p-local.c', '9p-posix-acl.c', - '9p-proxy.c', '9p-synth.c', + '9p-util-generic.c', '9p-xattr-user.c', '9p-xattr.c', '9p.c', diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events index a12e55c165d..0e0fc37261b 100644 --- a/hw/9pfs/trace-events +++ b/hw/9pfs/trace-events @@ -11,9 +11,9 @@ v9fs_stat(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" v9fs_stat_return(uint16_t tag, uint8_t id, int32_t mode, int32_t atime, int32_t mtime, int64_t length) "tag %d id %d stat={mode %d atime %d mtime %d length %"PRId64"}" v9fs_getattr(uint16_t tag, uint8_t id, int32_t fid, uint64_t request_mask) "tag %d id %d fid %d request_mask %"PRIu64 v9fs_getattr_return(uint16_t tag, uint8_t id, uint64_t result_mask, uint32_t mode, uint32_t uid, uint32_t gid) "tag %d id %d getattr={result_mask %"PRId64" mode %u uid %u gid %u}" -v9fs_walk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, uint16_t nwnames) "tag %d id %d fid %d newfid %d nwnames %d" +v9fs_walk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, uint16_t nwnames, const char* wnames) "tag=%d id=%d fid=%d newfid=%d nwnames=%d wnames={%s}" v9fs_walk_return(uint16_t tag, uint8_t id, uint16_t nwnames, void* qids) "tag %d id %d nwnames %d qids %p" -v9fs_open(uint16_t tag, uint8_t id, int32_t fid, int32_t mode) "tag %d id %d fid %d mode %d" +v9fs_open(uint16_t tag, uint8_t id, int32_t fid, int32_t mode, const char* oflags) "tag=%d id=%d fid=%d mode=%d(%s)" v9fs_open_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d" v9fs_lcreate(uint16_t tag, uint8_t id, int32_t dfid, int32_t flags, int32_t mode, uint32_t gid) "tag %d id %d dfid %d flags %d mode %d gid %u" v9fs_lcreate_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int32_t iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d" diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index efa41cfd73f..81b91e47c63 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -26,7 +26,7 @@ #include "hw/virtio/virtio-access.h" #include "qemu/iov.h" #include "qemu/module.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" static void virtio_9p_push_and_notify(V9fsPDU *pdu) { @@ -243,13 +243,12 @@ static const VMStateDescription vmstate_virtio_9p = { }, }; -static Property virtio_9p_properties[] = { +static const Property virtio_9p_properties[] = { DEFINE_PROP_STRING("mount_tag", V9fsVirtioState, state.fsconf.tag), DEFINE_PROP_STRING("fsdev", V9fsVirtioState, state.fsconf.fsdev_id), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_9p_class_init(ObjectClass *klass, void *data) +static void virtio_9p_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/Kconfig b/hw/Kconfig index f7866e76f73..9e6c789ae7e 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -27,7 +27,6 @@ source nvme/Kconfig source nvram/Kconfig source pci-bridge/Kconfig source pci-host/Kconfig -source pcmcia/Kconfig source pci/Kconfig source remote/Kconfig source rtc/Kconfig @@ -38,10 +37,13 @@ source smbios/Kconfig source ssi/Kconfig source timer/Kconfig source tpm/Kconfig +source uefi/Kconfig source ufs/Kconfig source usb/Kconfig source virtio/Kconfig source vfio/Kconfig +source vfio-user/Kconfig +source vmapple/Kconfig source xen/Kconfig source watchdog/Kconfig @@ -50,7 +52,6 @@ source arm/Kconfig source cpu/Kconfig source alpha/Kconfig source avr/Kconfig -source cris/Kconfig source hppa/Kconfig source i386/Kconfig source loongarch/Kconfig diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig index e07d3204eb3..1d4e9f0845c 100644 --- a/hw/acpi/Kconfig +++ b/hw/acpi/Kconfig @@ -60,6 +60,11 @@ config ACPI_VMGENID default y depends on PC +config ACPI_VMCLOCK + bool + default y + depends on PC + config ACPI_VIOT bool depends on ACPI diff --git a/hw/acpi/acpi-cpu-hotplug-stub.c b/hw/acpi/acpi-cpu-hotplug-stub.c index c6c61bb9cd3..9872dd55e43 100644 --- a/hw/acpi/acpi-cpu-hotplug-stub.c +++ b/hw/acpi/acpi-cpu-hotplug-stub.c @@ -10,47 +10,39 @@ void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu, CPUHotplugState *cpuhp_state, uint16_t io_port) { - return; } void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, AcpiCpuHotplug *gpe_cpu, uint16_t base) { - return; } void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner, CPUHotplugState *state, hwaddr base_addr) { - return; } void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list) { - return; } void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) { - return; } void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, AcpiCpuHotplug *g, DeviceState *dev, Error **errp) { - return; } void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) { - return; } void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev, CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) { - return; } diff --git a/hw/acpi/acpi-mem-hotplug-stub.c b/hw/acpi/acpi-mem-hotplug-stub.c index 73a076a2657..7ad0fdcdf26 100644 --- a/hw/acpi/acpi-mem-hotplug-stub.c +++ b/hw/acpi/acpi-mem-hotplug-stub.c @@ -7,29 +7,24 @@ const VMStateDescription vmstate_memory_hotplug; void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, MemHotplugState *state, hwaddr io_base) { - return; } void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list) { - return; } void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, DeviceState *dev, Error **errp) { - return; } void acpi_memory_unplug_cb(MemHotplugState *mem_st, DeviceState *dev, Error **errp) { - return; } void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, DeviceState *dev, Error **errp) { - return; } diff --git a/hw/acpi/acpi-nvdimm-stub.c b/hw/acpi/acpi-nvdimm-stub.c index 8baff9be6f4..65f491d6535 100644 --- a/hw/acpi/acpi-nvdimm-stub.c +++ b/hw/acpi/acpi-nvdimm-stub.c @@ -4,5 +4,4 @@ void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) { - return; } diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c index dcee3ad7a1b..d58ea726a82 100644 --- a/hw/acpi/acpi-pci-hotplug-stub.c +++ b/hw/acpi/acpi-pci-hotplug-stub.c @@ -4,43 +4,37 @@ const VMStateDescription vmstate_acpi_pcihp_pci_status; -void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, +void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, MemoryRegion *address_space_io, uint16_t io_base) { - return; } void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, DeviceState *dev, Error **errp) { - return; } void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - return; } void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, DeviceState *dev, Error **errp) { - return; } void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, DeviceState *dev, Error **errp) { - return; } void acpi_pcihp_reset(AcpiPciHpState *s) { - return; } -bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus) +bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus) { return true; } diff --git a/hw/acpi/acpi-stub.c b/hw/acpi/acpi-stub.c index e268ce9b1a9..fd0b62fad9e 100644 --- a/hw/acpi/acpi-stub.c +++ b/hw/acpi/acpi-stub.c @@ -21,7 +21,15 @@ #include "qemu/osdep.h" #include "hw/acpi/acpi.h" +char unsigned *acpi_tables; +size_t acpi_tables_len; + void acpi_table_add(const QemuOpts *opts, Error **errp) { g_assert_not_reached(); } + +bool acpi_builtin(void) +{ + return false; +} diff --git a/hw/acpi/acpi_egm_memory.c b/hw/acpi/acpi_egm_memory.c new file mode 100644 index 00000000000..d59dfd3db1e --- /dev/null +++ b/hw/acpi/acpi_egm_memory.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi_egm_memory.h" +#include "hw/acpi/aml-build.h" +#include "hw/boards.h" +#include "hw/pci/pci_device.h" +#include "qemu/error-report.h" +#include "hw/arm/virt.h" +#include "include/hw/boards.h" + +typedef struct AcpiEgmMemoryClass { + ObjectClass parent_class; +} AcpiEgmMemoryClass; + +static int gpu_id; + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiEgmMemory, acpi_egm_memory, + ACPI_EGM_MEMORY, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiEgmMemory, ACPI_EGM_MEMORY) + +static void acpi_egm_memory_init(Object *obj) +{ + AcpiEgmMemory *egm = ACPI_EGM_MEMORY(obj); + + egm->node = MAX_NODES; + egm->pci_dev = NULL; +} + +static void acpi_egm_memory_finalize(Object *obj) +{ + AcpiEgmMemory *egm = ACPI_EGM_MEMORY(obj); + + g_free(egm->pci_dev); +} + +static void acpi_egm_memory_set_pci_device(Object *obj, const char *val, + Error **errp) +{ + AcpiEgmMemory *egm = ACPI_EGM_MEMORY(obj); + + egm->pci_dev = g_strdup(val); +} + +static void acpi_egm_memory_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiEgmMemory *egm = ACPI_EGM_MEMORY(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_EGM_MEMORY); + exit(1); + } + + egm->node = value; +} + +static void acpi_egm_memory_class_init(ObjectClass *oc, const void *data) +{ + object_class_property_add_str(oc, "pci-dev", NULL, + acpi_egm_memory_set_pci_device); + object_class_property_add(oc, "node", "int", NULL, + acpi_egm_memory_set_node, NULL, NULL); +} + +static void acpi_dsdt_add_gpu(Aml *dev, int32_t devfn, uint64_t egm_mem_base, + uint64_t egm_mem_size, int egm_mem_pxm) +{ + Aml *dev_gpu = aml_device("GPU%d", gpu_id++); + Aml *pkg = aml_package(3); + Aml *pkg1 = aml_package(2); + Aml *pkg2 = aml_package(2); + Aml *pkg3 = aml_package(2); + Aml *dev_pkg = aml_package(2); + Aml *UUID; + + aml_append(dev_gpu, aml_name_decl("_ADR", aml_int(PCI_SLOT(devfn) << 16))); + + aml_append(pkg1, aml_string("nvidia,egm-base-pa")); + aml_append(pkg1, aml_int(egm_mem_base)); + + aml_append(pkg2, aml_string("nvidia,egm-size")); + aml_append(pkg2, aml_int(egm_mem_size)); + + aml_append(pkg3, aml_string("nvidia,egm-pxm")); + aml_append(pkg3, aml_int(egm_mem_pxm)); + + aml_append(pkg, pkg1); + aml_append(pkg, pkg2); + aml_append(pkg, pkg3); + + UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"); + aml_append(dev_pkg, UUID); + aml_append(dev_pkg, pkg); + + aml_append(dev_gpu, aml_name_decl("_DSD", dev_pkg)); + aml_append(dev, dev_gpu); +} + +typedef struct DsdtInfo { + Aml *dev; + int bus; +} DsdtInfo; + +static int build_all_acpi_egm_memory_dsdt(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + AcpiEgmMemory *egm; + DsdtInfo *info = opaque; + PCIDevice *pci_dev; + Object *o; + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + uint64_t mem_base; + int i; + + if (!object_dynamic_cast(obj, TYPE_ACPI_EGM_MEMORY)) { + return 0; + } + + egm = ACPI_EGM_MEMORY(obj); + if (egm->node >= ms->numa_state->num_nodes) { + error_printf("%s: Specified node %d is invalid.\n", + TYPE_ACPI_EGM_MEMORY, egm->node); + exit(1); + } + + o = object_resolve_path_type(egm->pci_dev, TYPE_PCI_DEVICE, NULL); + if (!o) { + error_printf("%s: Specified device must be a PCI device.\n", + TYPE_ACPI_EGM_MEMORY); + exit(1); + } + + pci_dev = PCI_DEVICE(o); + + if (info->bus != pci_bus_num(pci_get_bus(pci_dev))) { + return 0; + } + + mem_base = vms->memmap[VIRT_MEM].base; + for (i = 0; i < ms->numa_state->num_nodes; ++i) { + if (i == egm->node) { + acpi_dsdt_add_gpu(info->dev, pci_dev->devfn, mem_base, + ms->numa_state->nodes[i].node_mem, i); + break; + } + + if (ms->numa_state->nodes[i].node_mem > 0) { + mem_base += ms->numa_state->nodes[i].node_mem; + } + } + + return 0; +} + +void build_acpi_egm_memory_dsdt(Aml *dev, int bus) +{ + DsdtInfo info = {dev, bus}; + + object_child_foreach_recursive(object_get_root(), + build_all_acpi_egm_memory_dsdt, + &info); +} diff --git a/hw/acpi/acpi_generic_initiator.c b/hw/acpi/acpi_generic_initiator.c deleted file mode 100644 index 17b9a052f59..00000000000 --- a/hw/acpi/acpi_generic_initiator.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved - */ - -#include "qemu/osdep.h" -#include "hw/acpi/acpi_generic_initiator.h" -#include "hw/acpi/aml-build.h" -#include "hw/boards.h" -#include "hw/pci/pci_device.h" -#include "qemu/error-report.h" - -typedef struct AcpiGenericInitiatorClass { - ObjectClass parent_class; -} AcpiGenericInitiatorClass; - -OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, - ACPI_GENERIC_INITIATOR, OBJECT, - { TYPE_USER_CREATABLE }, - { NULL }) - -OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) - -static void acpi_generic_initiator_init(Object *obj) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - gi->node = MAX_NODES; - gi->pci_dev = NULL; -} - -static void acpi_generic_initiator_finalize(Object *obj) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - g_free(gi->pci_dev); -} - -static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, - Error **errp) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - gi->pci_dev = g_strdup(val); -} - -static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - MachineState *ms = MACHINE(qdev_get_machine()); - uint32_t value; - - if (!visit_type_uint32(v, name, &value, errp)) { - return; - } - - if (value >= MAX_NODES) { - error_printf("%s: Invalid NUMA node specified\n", - TYPE_ACPI_GENERIC_INITIATOR); - exit(1); - } - - gi->node = value; - ms->numa_state->nodes[gi->node].has_gi = true; -} - -static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data) -{ - object_class_property_add_str(oc, "pci-dev", NULL, - acpi_generic_initiator_set_pci_device); - object_class_property_add(oc, "node", "int", NULL, - acpi_generic_initiator_set_node, NULL, NULL); -} - -/* - * ACPI 6.3: - * Table 5-78 Generic Initiator Affinity Structure - */ -static void -build_srat_generic_pci_initiator_affinity(GArray *table_data, int node, - PCIDeviceHandle *handle) -{ - uint8_t index; - - build_append_int_noprefix(table_data, 5, 1); /* Type */ - build_append_int_noprefix(table_data, 32, 1); /* Length */ - build_append_int_noprefix(table_data, 0, 1); /* Reserved */ - build_append_int_noprefix(table_data, 1, 1); /* Device Handle Type: PCI */ - build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ - - /* Device Handle - PCI */ - build_append_int_noprefix(table_data, handle->segment, 2); - build_append_int_noprefix(table_data, handle->bdf, 2); - for (index = 0; index < 12; index++) { - build_append_int_noprefix(table_data, 0, 1); - } - - build_append_int_noprefix(table_data, GEN_AFFINITY_ENABLED, 4); /* Flags */ - build_append_int_noprefix(table_data, 0, 4); /* Reserved */ -} - -static int build_all_acpi_generic_initiators(Object *obj, void *opaque) -{ - MachineState *ms = MACHINE(qdev_get_machine()); - AcpiGenericInitiator *gi; - GArray *table_data = opaque; - PCIDeviceHandle dev_handle; - PCIDevice *pci_dev; - Object *o; - - if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { - return 0; - } - - gi = ACPI_GENERIC_INITIATOR(obj); - if (gi->node >= ms->numa_state->num_nodes) { - error_printf("%s: Specified node %d is invalid.\n", - TYPE_ACPI_GENERIC_INITIATOR, gi->node); - exit(1); - } - - o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); - if (!o) { - error_printf("%s: Specified device must be a PCI device.\n", - TYPE_ACPI_GENERIC_INITIATOR); - exit(1); - } - - pci_dev = PCI_DEVICE(o); - - dev_handle.segment = 0; - dev_handle.bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)), - pci_dev->devfn); - - build_srat_generic_pci_initiator_affinity(table_data, - gi->node, &dev_handle); - - return 0; -} - -void build_srat_generic_pci_initiator(GArray *table_data) -{ - object_child_foreach_recursive(object_get_root(), - build_all_acpi_generic_initiators, - table_data); -} diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index 6d4517cfbe3..1e685f982f3 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -24,7 +24,7 @@ #include "hw/acpi/aml-build.h" #include "qemu/bswap.h" #include "qemu/bitops.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/boards.h" #include "hw/acpi/tpm.h" #include "hw/pci/pci_host.h" @@ -160,7 +160,7 @@ void crs_replace_with_free_ranges(GPtrArray *ranges, */ static void crs_range_merge(GPtrArray *range) { - GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free); + g_autoptr(GPtrArray) tmp = g_ptr_array_new_with_free_func(crs_range_free); CrsRangeEntry *entry; uint64_t range_base, range_limit; int i; @@ -191,7 +191,6 @@ static void crs_range_merge(GPtrArray *range) entry = g_ptr_array_index(tmp, i); crs_range_insert(range, entry->base, entry->limit); } - g_ptr_array_free(tmp, true); } static void @@ -534,8 +533,7 @@ void aml_append(Aml *parent_ctx, Aml *child) case AML_NO_OPCODE: break; default: - assert(0); - break; + g_assert_not_reached(); } build_append_array(parent_ctx->buf, buf); build_free_array(buf); @@ -1938,6 +1936,89 @@ void build_srat_memory(GArray *table_data, uint64_t base, build_append_int_noprefix(table_data, 0, 8); /* Reserved */ } +/* + * ACPI Spec Revision 6.3 + * Table 5-80 Device Handle - PCI + */ +static void build_append_srat_pci_device_handle(GArray *table_data, + uint16_t segment, + uint8_t bus, uint8_t devfn) +{ + /* PCI segment number */ + build_append_int_noprefix(table_data, segment, 2); + /* PCI Bus Device Function */ + build_append_int_noprefix(table_data, bus, 1); + build_append_int_noprefix(table_data, devfn, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 12); +} + +static void build_append_srat_acpi_device_handle(GArray *table_data, + const char *hid, + uint32_t uid) +{ + assert(strlen(hid) == 8); + /* Device Handle - ACPI */ + for (int i = 0; i < 8; i++) { + build_append_int_noprefix(table_data, hid[i], 1); + } + build_append_int_noprefix(table_data, uid, 4); + build_append_int_noprefix(table_data, 0, 4); +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.16.6 Generic Initiator Affinity Structure + * With PCI Device Handle. + */ +void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node, + uint16_t segment, uint8_t bus, + uint8_t devfn) +{ + /* Type */ + build_append_int_noprefix(table_data, 5, 1); + /* Length */ + build_append_int_noprefix(table_data, 32, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Device Handle Type: PCI */ + build_append_int_noprefix(table_data, 1, 1); + /* Proximity Domain */ + build_append_int_noprefix(table_data, node, 4); + /* Device Handle */ + build_append_srat_pci_device_handle(table_data, segment, bus, devfn); + /* Flags - GI Enabled */ + build_append_int_noprefix(table_data, 1, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); +} + +/* + * ACPI spec, Revision 6.5 + * 5.2.16.7 Generic Port Affinity Structure + * With ACPI Device Handle. + */ +void build_srat_acpi_generic_port(GArray *table_data, uint32_t node, + const char *hid, uint32_t uid) +{ + /* Type */ + build_append_int_noprefix(table_data, 6, 1); + /* Length */ + build_append_int_noprefix(table_data, 32, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Device Handle Type: ACPI */ + build_append_int_noprefix(table_data, 0, 1); + /* Proximity Domain */ + build_append_int_noprefix(table_data, node, 4); + /* Device Handle */ + build_append_srat_acpi_device_handle(table_data, hid, uid); + /* Flags - GP Enabled */ + build_append_int_noprefix(table_data, 1, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); +} + /* * ACPI spec 5.2.17 System Locality Distance Information Table * (Revision 2.0 or later) @@ -1996,7 +2077,7 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, void build_spcr(GArray *table_data, BIOSLinker *linker, const AcpiSpcrData *f, const uint8_t rev, - const char *oem_id, const char *oem_table_id) + const char *oem_id, const char *oem_table_id, const char *name) { AcpiTable table = { .sig = "SPCR", .rev = rev, .oem_id = oem_id, .oem_table_id = oem_table_id }; @@ -2042,9 +2123,21 @@ void build_spcr(GArray *table_data, BIOSLinker *linker, build_append_int_noprefix(table_data, f->pci_flags, 4); /* PCI Segment */ build_append_int_noprefix(table_data, f->pci_segment, 1); - /* Reserved */ - build_append_int_noprefix(table_data, 0, 4); - + if (rev < 4) { + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + } else { + /* UartClkFreq */ + build_append_int_noprefix(table_data, f->uart_clk_freq, 4); + /* PreciseBaudrate */ + build_append_int_noprefix(table_data, f->precise_baudrate, 4); + /* NameSpaceStringLength */ + build_append_int_noprefix(table_data, f->namespace_string_length, 2); + /* NameSpaceStringOffset */ + build_append_int_noprefix(table_data, f->namespace_string_offset, 2); + /* NamespaceString[] */ + g_array_append_vals(table_data, name, f->namespace_string_length); + } acpi_table_end(linker, &table); } /* @@ -2059,12 +2152,25 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, int64_t socket_id = -1, cluster_id = -1, core_id = -1; uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0; uint32_t pptt_start = table_data->len; + uint32_t root_offset; int n; AcpiTable table = { .sig = "PPTT", .rev = 2, .oem_id = oem_id, .oem_table_id = oem_table_id }; acpi_table_begin(&table, table_data); + /* + * Build a root node for all the processor nodes. Otherwise when + * building a multi-socket system each socket tree is separated + * and will be hard for the OS like Linux to know whether the + * system is homogeneous. + */ + root_offset = table_data->len - pptt_start; + build_processor_hierarchy_node(table_data, + (1 << 0) | /* Physical package */ + (1 << 4), /* Identical Implementation */ + 0, 0, NULL, 0); + /* * This works with the assumption that cpus[n].props.*_id has been * sorted from top to down levels in mc->possible_cpu_arch_ids(). @@ -2079,8 +2185,9 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, core_id = -1; socket_offset = table_data->len - pptt_start; build_processor_hierarchy_node(table_data, - (1 << 0), /* Physical package */ - 0, socket_id, NULL, 0); + (1 << 0) | /* Physical package */ + (1 << 4), /* Identical Implementation */ + root_offset, socket_id, NULL, 0); } if (mc->smp_props.clusters_supported && mc->smp_props.has_clusters) { @@ -2090,7 +2197,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, core_id = -1; cluster_offset = table_data->len - pptt_start; build_processor_hierarchy_node(table_data, - (0 << 0), /* Not a physical package */ + (0 << 0) | /* Not a physical package */ + (1 << 4), /* Identical Implementation */ socket_offset, cluster_id, NULL, 0); } } else { @@ -2108,7 +2216,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, core_id = cpus->cpus[n].props.core_id; core_offset = table_data->len - pptt_start; build_processor_hierarchy_node(table_data, - (0 << 0), /* Not a physical package */ + (0 << 0) | /* Not a physical package */ + (1 << 4), /* Identical Implementation */ cluster_offset, core_id, NULL, 0); } diff --git a/hw/acpi/bios-linker-loader.c b/hw/acpi/bios-linker-loader.c index 108061828b1..c9ffe449aac 100644 --- a/hw/acpi/bios-linker-loader.c +++ b/hw/acpi/bios-linker-loader.c @@ -22,8 +22,6 @@ #include "hw/acpi/bios-linker-loader.h" #include "hw/nvram/fw_cfg.h" -#include "qemu/bswap.h" - /* * Linker/loader is a paravirtualized interface that passes commands to guest. * The commands can be used to request guest to diff --git a/hw/acpi/core.c b/hw/acpi/core.c index ec5e127d172..58f8964e130 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -31,7 +31,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/option.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" struct acpi_table_header { @@ -78,6 +78,11 @@ static void acpi_register_config(void) opts_init(acpi_register_config); +bool acpi_builtin(void) +{ + return true; +} + static int acpi_checksum(const uint8_t *data, int len) { int sum, i; diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 5cb60ca8bc7..6f1ae79edbf 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -5,7 +5,7 @@ #include "qapi/error.h" #include "qapi/qapi-events-acpi.h" #include "trace.h" -#include "sysemu/numa.h" +#include "system/numa.h" #define ACPI_CPU_SELECTOR_OFFSET_WR 0 #define ACPI_CPU_FLAGS_OFFSET_RW 4 @@ -235,8 +235,8 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner, static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev) { - CPUClass *k = CPU_GET_CLASS(dev); - uint64_t cpu_arch_id = k->get_arch_id(CPU(dev)); + CPUState *cpu = CPU(dev); + uint64_t cpu_arch_id = cpu->cc->get_arch_id(cpu); int i; for (i = 0; i < cpu_st->dev_count; i++) { @@ -327,6 +327,7 @@ const VMStateDescription vmstate_cpu_hotplug = { #define CPU_EJECT_METHOD "CEJ0" #define CPU_OST_METHOD "COST" #define CPU_ADDED_LIST "CNEW" +#define CPU_EJ_LIST "CEJL" #define CPU_ENABLED "CPEN" #define CPU_SELECTOR "CSEL" @@ -488,7 +489,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED); { const uint8_t max_cpus_per_pass = 255; - Aml *else_ctx; Aml *while_ctx, *while_ctx2; Aml *has_event = aml_local(0); Aml *dev_chk = aml_int(1); @@ -499,6 +499,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, Aml *uid = aml_local(3); Aml *has_job = aml_local(4); Aml *new_cpus = aml_name(CPU_ADDED_LIST); + Aml *ej_cpus = aml_name(CPU_EJ_LIST); + Aml *num_ej_cpus = aml_local(5); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); @@ -513,6 +515,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, */ aml_append(method, aml_name_decl(CPU_ADDED_LIST, aml_package(max_cpus_per_pass))); + aml_append(method, aml_name_decl(CPU_EJ_LIST, + aml_package(max_cpus_per_pass))); aml_append(method, aml_store(zero, uid)); aml_append(method, aml_store(one, has_job)); @@ -527,6 +531,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(while_ctx2, aml_store(one, has_event)); aml_append(while_ctx2, aml_store(zero, num_added_cpus)); + aml_append(while_ctx2, aml_store(zero, num_ej_cpus)); /* * Scan CPUs, till there are CPUs with events or @@ -559,8 +564,10 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, * if CPU_ADDED_LIST is full, exit inner loop and process * collected CPUs */ - ifctx = aml_if( - aml_equal(num_added_cpus, aml_int(max_cpus_per_pass))); + ifctx = aml_if(aml_lor( + aml_equal(num_added_cpus, aml_int(max_cpus_per_pass)), + aml_equal(num_ej_cpus, aml_int(max_cpus_per_pass)) + )); { aml_append(ifctx, aml_store(one, has_job)); aml_append(ifctx, aml_break()); @@ -577,16 +584,16 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(ifctx, aml_store(one, has_event)); } aml_append(while_ctx, ifctx); - else_ctx = aml_else(); + ifctx = aml_if(aml_equal(rm_evt, one)); { - aml_append(ifctx, - aml_call2(CPU_NOTIFY_METHOD, uid, eject_req)); - aml_append(ifctx, aml_store(one, rm_evt)); + /* cache to be removed CPUs to Notify later */ + aml_append(ifctx, aml_store(uid, + aml_index(ej_cpus, num_ej_cpus))); + aml_append(ifctx, aml_increment(num_ej_cpus)); aml_append(ifctx, aml_store(one, has_event)); } - aml_append(else_ctx, ifctx); - aml_append(while_ctx, else_ctx); + aml_append(while_ctx, ifctx); aml_append(while_ctx, aml_increment(uid)); } aml_append(while_ctx2, while_ctx); @@ -620,6 +627,24 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(while_ctx, aml_increment(cpu_idx)); } aml_append(while_ctx2, while_ctx); + + /* + * Notify OSPM about to be removed CPUs and clear remove flag + */ + aml_append(while_ctx2, aml_store(zero, cpu_idx)); + while_ctx = aml_while(aml_lless(cpu_idx, num_ej_cpus)); + { + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(ej_cpus, cpu_idx)), + uid)); + aml_append(while_ctx, + aml_call2(CPU_NOTIFY_METHOD, uid, eject_req)); + aml_append(while_ctx, aml_store(uid, cpu_selector)); + aml_append(while_ctx, aml_store(one, rm_evt)); + aml_append(while_ctx, aml_increment(cpu_idx)); + } + aml_append(while_ctx2, while_ctx); + /* * If another batch is needed, then it will resume scanning * exactly at -- and not after -- the last CPU that's currently diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 83b8bc5deb8..aa0e1e3efa5 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -62,10 +62,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu, bool *swtchd_to_modern) { - CPUClass *k = CPU_GET_CLASS(cpu); int64_t cpu_id; - cpu_id = k->get_arch_id(cpu); + cpu_id = cpu->cc->get_arch_id(cpu); if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) { object_property_set_bool(g->device, "cpu-hotplug-legacy", false, &error_abort); diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c index 9cd7905ea25..75d5b30bb8b 100644 --- a/hw/acpi/cxl.c +++ b/hw/acpi/cxl.c @@ -22,6 +22,7 @@ #include "hw/pci/pci_bridge.h" #include "hw/pci/pci_host.h" #include "hw/cxl/cxl.h" +#include "hw/cxl/cxl_host.h" #include "hw/mem/memory-device.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" @@ -135,55 +136,52 @@ static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl) * Interleave ways encoding in CXL 2.0 ECN: 3, 6, 12 and 16-way memory * interleaving. */ -static void cedt_build_cfmws(GArray *table_data, CXLState *cxls) +static void cedt_build_cfmws(CXLFixedWindow *fw, Aml *cedt) { - GList *it; + GArray *table_data = cedt->buf; + int i; - for (it = cxls->fixed_windows; it; it = it->next) { - CXLFixedWindow *fw = it->data; - int i; - - /* Type */ - build_append_int_noprefix(table_data, 1, 1); + /* Type */ + build_append_int_noprefix(table_data, 1, 1); - /* Reserved */ - build_append_int_noprefix(table_data, 0, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); - /* Record Length */ - build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2); + /* Record Length */ + build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2); - /* Reserved */ - build_append_int_noprefix(table_data, 0, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); - /* Base HPA */ - build_append_int_noprefix(table_data, fw->mr.addr, 8); + /* Base HPA */ + build_append_int_noprefix(table_data, fw->mr.addr, 8); - /* Window Size */ - build_append_int_noprefix(table_data, fw->size, 8); + /* Window Size */ + build_append_int_noprefix(table_data, fw->size, 8); - /* Host Bridge Interleave Ways */ - build_append_int_noprefix(table_data, fw->enc_int_ways, 1); + /* Host Bridge Interleave Ways */ + build_append_int_noprefix(table_data, fw->enc_int_ways, 1); - /* Host Bridge Interleave Arithmetic */ - build_append_int_noprefix(table_data, 0, 1); + /* Host Bridge Interleave Arithmetic */ + build_append_int_noprefix(table_data, 0, 1); - /* Reserved */ - build_append_int_noprefix(table_data, 0, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); - /* Host Bridge Interleave Granularity */ - build_append_int_noprefix(table_data, fw->enc_int_gran, 4); + /* Host Bridge Interleave Granularity */ + build_append_int_noprefix(table_data, fw->enc_int_gran, 4); - /* Window Restrictions */ - build_append_int_noprefix(table_data, 0x0f, 2); /* No restrictions */ + /* Window Restrictions */ + build_append_int_noprefix(table_data, 0x0f, 2); - /* QTG ID */ - build_append_int_noprefix(table_data, 0, 2); + /* QTG ID */ + build_append_int_noprefix(table_data, 0, 2); - /* Host Bridge List (list of UIDs - currently bus_nr) */ - for (i = 0; i < fw->num_targets; i++) { - g_assert(fw->target_hbs[i]); - build_append_int_noprefix(table_data, PXB_DEV(fw->target_hbs[i])->bus_nr, 4); - } + /* Host Bridge List (list of UIDs - currently bus_nr) */ + for (i = 0; i < fw->num_targets; i++) { + g_assert(fw->target_hbs[i]); + build_append_int_noprefix(table_data, + PXB_DEV(fw->target_hbs[i])->bus_nr, 4); } } @@ -202,6 +200,7 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id, CXLState *cxl_state) { + GSList *cfmws_list, *iter; Aml *cedt; AcpiTable table = { .sig = "CEDT", .rev = 1, .oem_id = oem_id, .oem_table_id = oem_table_id }; @@ -213,7 +212,12 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data, /* reserve space for CEDT header */ object_child_foreach_recursive(object_get_root(), cxl_foreach_pxb_hb, cedt); - cedt_build_cfmws(cedt->buf, cxl_state); + + cfmws_list = cxl_fmws_get_all_sorted(); + for (iter = cfmws_list; iter; iter = iter->next) { + cedt_build_cfmws(CXL_FMW(iter->data), cedt); + } + g_slist_free(cfmws_list); /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, cedt->buf->data, cedt->buf->len); diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c index b2f1b136301..099cabb7ab7 100644 --- a/hw/acpi/erst.c +++ b/hw/acpi/erst.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/qdev-core.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #include "hw/pci/pci_device.h" #include "qom/object_interfaces.h" @@ -23,8 +23,8 @@ #include "hw/acpi/acpi-defs.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/bios-linker-loader.h" -#include "exec/address-spaces.h" -#include "sysemu/hostmem.h" +#include "system/address-spaces.h" +#include "system/hostmem.h" #include "hw/acpi/erst.h" #include "trace.h" @@ -1011,15 +1011,14 @@ static void erst_reset(DeviceState *dev) trace_acpi_erst_reset_out(le32_to_cpu(s->header->record_count)); } -static Property erst_properties[] = { +static const Property erst_properties[] = { DEFINE_PROP_LINK(ACPI_ERST_MEMDEV_PROP, ERSTDeviceState, hostmem, TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_UINT32(ACPI_ERST_RECORD_SIZE_PROP, ERSTDeviceState, default_record_size, ERST_RECORD_SIZE), - DEFINE_PROP_END_OF_LIST(), }; -static void erst_class_init(ObjectClass *klass, void *data) +static void erst_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1030,7 +1029,7 @@ static void erst_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_REDHAT_ACPI_ERST; k->revision = 0x00; k->class_id = PCI_CLASS_OTHERS; - dc->reset = erst_reset; + device_class_set_legacy_reset(dc, erst_reset); dc->vmsd = &erst_vmstate; dc->user_creatable = true; dc->hotpluggable = false; @@ -1045,7 +1044,7 @@ static const TypeInfo erst_type_info = { .parent = TYPE_PCI_DEVICE, .class_init = erst_class_init, .instance_size = sizeof(ERSTDeviceState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } } diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index 15b4c3ebbf2..95682b79a2d 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -12,20 +12,24 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/acpi/acpi.h" +#include "hw/acpi/pcihp.h" #include "hw/acpi/generic_event_device.h" +#include "hw/pci/pci.h" #include "hw/irq.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" +#include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/error-report.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" static const uint32_t ged_supported_events[] = { ACPI_GED_MEM_HOTPLUG_EVT, ACPI_GED_PWR_DOWN_EVT, ACPI_GED_NVDIMM_HOTPLUG_EVT, ACPI_GED_CPU_HOTPLUG_EVT, + ACPI_GED_PCI_HOTPLUG_EVT, }; /* @@ -121,6 +125,12 @@ void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev, aml_notify(aml_name("\\_SB.NVDR"), aml_int(0x80))); break; + case ACPI_GED_PCI_HOTPLUG_EVT: + aml_append(if_ctx, + aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); + aml_append(if_ctx, aml_call0("\\_SB.PCI0.PCNT")); + aml_append(if_ctx, aml_release(aml_name("\\_SB.PCI0.BLCK"))); + break; default: /* * Please make sure all the events in ged_supported_events[] @@ -201,9 +211,9 @@ static void ged_regs_write(void *opaque, hwaddr addr, uint64_t data, switch (addr) { case ACPI_GED_REG_SLEEP_CTL: - slp_typ = (data >> 2) & 0x07; - slp_en = (data >> 5) & 0x01; - if (slp_en && slp_typ == 5) { + slp_typ = (data >> ACPI_GED_SLP_TYP_POS) & ACPI_GED_SLP_TYP_MASK; + slp_en = !!(data & ACPI_GED_SLP_EN); + if (slp_en && slp_typ == ACPI_GED_SLP_TYP_S5) { qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } return; @@ -227,6 +237,14 @@ static const MemoryRegionOps ged_regs_ops = { }, }; +static void acpi_ged_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp); + } +} + static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -240,6 +258,8 @@ static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev, } } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_plug_cb(hotplug_dev, &s->pcihp_state, dev, errp); } else { error_setg(errp, "virt: device plug request for unsupported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -256,6 +276,9 @@ static void acpi_ged_unplug_request_cb(HotplugHandler *hotplug_dev, acpi_memory_unplug_request_cb(hotplug_dev, &s->memhp_state, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_request_cb(hotplug_dev, &s->pcihp_state, + dev, errp); } else { error_setg(errp, "acpi: device unplug request for unsupported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -271,6 +294,8 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev, acpi_memory_unplug_cb(&s->memhp_state, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_cb(hotplug_dev, &s->pcihp_state, dev, errp); } else { error_setg(errp, "acpi: device unplug for unsupported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -299,6 +324,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) sel = ACPI_GED_NVDIMM_HOTPLUG_EVT; } else if (ev & ACPI_CPU_HOTPLUG_STATUS) { sel = ACPI_GED_CPU_HOTPLUG_EVT; + } else if (ev & ACPI_PCI_HOTPLUG_STATUS) { + sel = ACPI_GED_PCI_HOTPLUG_EVT; } else { /* Unknown event. Return without generating interrupt. */ warn_report("GED: Unsupported event %d. No irq injected", ev); @@ -316,9 +343,12 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) qemu_irq_pulse(s->irq); } -static Property acpi_ged_properties[] = { +static const Property acpi_ged_properties[] = { DEFINE_PROP_UINT32("ged-event", AcpiGedState, ged_event_bitmap, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, AcpiGedState, + pcihp_state.use_acpi_hotplug_bridge, 0), + DEFINE_PROP_LINK("bus", AcpiGedState, pcihp_state.root, + TYPE_PCI_BUS, PCIBus *), }; static const VMStateDescription vmstate_memhp_state = { @@ -331,6 +361,24 @@ static const VMStateDescription vmstate_memhp_state = { } }; +static bool cpuhp_needed(void *opaque) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + + return mc->has_hotpluggable_cpus; +} + +static const VMStateDescription vmstate_cpuhp_state = { + .name = "acpi-ged/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpuhp_needed, + .fields = (VMStateField[]) { + VMSTATE_CPU_HOTPLUG(cpuhp_state, AcpiGedState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_ged_state = { .name = "acpi-ged-state", .version_id = 1, @@ -346,7 +394,7 @@ static const VMStateDescription vmstate_ghes = { .version_id = 1, .minimum_version_id = 1, .fields = (const VMStateField[]) { - VMSTATE_UINT64(ghes_addr_le, AcpiGhesState), + VMSTATE_UINT64(hw_error_le, AcpiGhesState), VMSTATE_END_OF_LIST() }, }; @@ -354,7 +402,7 @@ static const VMStateDescription vmstate_ghes = { static bool ghes_needed(void *opaque) { AcpiGedState *s = opaque; - return s->ghes_state.ghes_addr_le; + return s->ghes_state.hw_error_le; } static const VMStateDescription vmstate_ghes_state = { @@ -369,6 +417,25 @@ static const VMStateDescription vmstate_ghes_state = { } }; +static bool pcihp_needed(void *opaque) +{ + AcpiGedState *s = opaque; + return s->pcihp_state.use_acpi_hotplug_bridge; +} + +static const VMStateDescription vmstate_pcihp_state = { + .name = "acpi-ged/pcihp", + .version_id = 1, + .minimum_version_id = 1, + .needed = pcihp_needed, + .fields = (const VMStateField[]) { + VMSTATE_PCI_HOTPLUG(pcihp_state, + AcpiGedState, + NULL, NULL), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_acpi_ged = { .name = "acpi-ged", .version_id = 1, @@ -379,7 +446,9 @@ static const VMStateDescription vmstate_acpi_ged = { }, .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, + &vmstate_cpuhp_state, &vmstate_ghes_state, + &vmstate_pcihp_state, NULL } }; @@ -388,9 +457,13 @@ static void acpi_ged_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AcpiGedState *s = ACPI_GED(dev); + AcpiPciHpState *pcihp_state = &s->pcihp_state; uint32_t ged_events; int i; + if (pcihp_state->use_acpi_hotplug_bridge) { + s->ged_event_bitmap |= ACPI_GED_PCI_HOTPLUG_EVT; + } ged_events = ctpop32(s->ged_event_bitmap); for (i = 0; i < ARRAY_SIZE(ged_supported_events) && ged_events; i++) { @@ -410,6 +483,13 @@ static void acpi_ged_realize(DeviceState *dev, Error **errp) cpu_hotplug_hw_init(&s->container_cpuhp, OBJECT(dev), &s->cpuhp_state, 0); break; + case ACPI_GED_PCI_HOTPLUG_EVT: + memory_region_init(&s->container_pcihp, OBJECT(dev), + ACPI_PCIHP_REGION_NAME, ACPI_PCIHP_SIZE); + sysbus_init_mmio(sbd, &s->container_pcihp); + acpi_pcihp_init(OBJECT(s), &s->pcihp_state, + &s->container_pcihp, 0); + qbus_set_hotplug_handler(BUS(s->pcihp_state.root), OBJECT(dev)); } ged_events--; } @@ -440,31 +520,45 @@ static void acpi_ged_initfn(Object *obj) * container for memory hotplug IO and expose it as GED sysbus * MMIO so that boards can map it separately. */ - memory_region_init(&s->container_memhp, OBJECT(dev), "memhp container", - MEMORY_HOTPLUG_IO_LEN); - sysbus_init_mmio(sbd, &s->container_memhp); - acpi_memory_hotplug_init(&s->container_memhp, OBJECT(dev), - &s->memhp_state, 0); + memory_region_init(&s->container_memhp, OBJECT(dev), "memhp container", + MEMORY_HOTPLUG_IO_LEN); + sysbus_init_mmio(sbd, &s->container_memhp); + acpi_memory_hotplug_init(&s->container_memhp, OBJECT(dev), + &s->memhp_state, 0); memory_region_init_io(&ged_st->regs, obj, &ged_regs_ops, ged_st, TYPE_ACPI_GED "-regs", ACPI_GED_REG_COUNT); sysbus_init_mmio(sbd, &ged_st->regs); } -static void acpi_ged_class_init(ObjectClass *class, void *data) +static void ged_reset_hold(Object *obj, ResetType type) +{ + AcpiGedState *s = ACPI_GED(obj); + + if (s->pcihp_state.use_acpi_hotplug_bridge) { + acpi_pcihp_reset(&s->pcihp_state); + } +} + +static void acpi_ged_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(class); AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(class); + ResettableClass *rc = RESETTABLE_CLASS(class); + AcpiGedClass *gedc = ACPI_GED_CLASS(class); dc->desc = "ACPI Generic Event Device"; device_class_set_props(dc, acpi_ged_properties); dc->vmsd = &vmstate_acpi_ged; dc->realize = acpi_ged_realize; + hc->pre_plug = acpi_ged_device_pre_plug_cb; hc->plug = acpi_ged_device_plug_cb; hc->unplug_request = acpi_ged_unplug_request_cb; hc->unplug = acpi_ged_unplug_cb; + resettable_class_set_parent_phases(rc, NULL, ged_reset_hold, NULL, + &gedc->parent_phases); adevc->ospm_status = acpi_ged_ospm_status; adevc->send_event = acpi_ged_send_event; @@ -476,7 +570,8 @@ static const TypeInfo acpi_ged_info = { .instance_size = sizeof(AcpiGedState), .instance_init = acpi_ged_initfn, .class_init = acpi_ged_class_init, - .interfaces = (InterfaceInfo[]) { + .class_size = sizeof(AcpiGedClass), + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { } diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c index c315de1802d..7cec1812dad 100644 --- a/hw/acpi/ghes-stub.c +++ b/hw/acpi/ghes-stub.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "hw/acpi/ghes.h" -int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address) +int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) { return -1; } diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c index e9511d9b8f7..b85bb48195a 100644 --- a/hw/acpi/ghes.c +++ b/hw/acpi/ghes.c @@ -28,15 +28,12 @@ #include "hw/nvram/fw_cfg.h" #include "qemu/uuid.h" -#define ACPI_GHES_ERRORS_FW_CFG_FILE "etc/hardware_errors" -#define ACPI_GHES_DATA_ADDR_FW_CFG_FILE "etc/hardware_errors_addr" +#define ACPI_HW_ERROR_FW_CFG_FILE "etc/hardware_errors" +#define ACPI_HW_ERROR_ADDR_FW_CFG_FILE "etc/hardware_errors_addr" /* The max size in bytes for one error block */ #define ACPI_GHES_MAX_RAW_DATA_LENGTH (1 * KiB) -/* Now only support ARMv8 SEA notification type error source */ -#define ACPI_GHES_ERROR_SOURCE_COUNT 1 - /* Generic Hardware Error Source version 2 */ #define ACPI_GHES_SOURCE_GENERIC_ERROR_V2 10 @@ -184,51 +181,24 @@ static void acpi_ghes_build_append_mem_cper(GArray *table, build_append_int_noprefix(table, 0, 7); } -static int acpi_ghes_record_mem_error(uint64_t error_block_address, - uint64_t error_physical_addr) +static void +ghes_gen_err_data_uncorrectable_recoverable(GArray *block, + const uint8_t *section_type, + int data_length) { - GArray *block; - - /* Memory Error Section Type */ - const uint8_t uefi_cper_mem_sec[] = - UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ - 0xED, 0x7C, 0x83, 0xB1); - /* invalid fru id: ACPI 4.0: 17.3.2.6.1 Generic Error Data, * Table 17-13 Generic Error Data Entry */ QemuUUID fru_id = {}; - uint32_t data_length; - - block = g_array_new(false, true /* clear */, 1); - - /* This is the length if adding a new generic error data entry*/ - data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH; - /* - * It should not run out of the preallocated memory if adding a new generic - * error data entry - */ - assert((data_length + ACPI_GHES_GESB_SIZE) <= - ACPI_GHES_MAX_RAW_DATA_LENGTH); /* Build the new generic error status block header */ acpi_ghes_generic_error_status(block, ACPI_GEBS_UNCORRECTABLE, 0, 0, data_length, ACPI_CPER_SEV_RECOVERABLE); /* Build this new generic error data entry header */ - acpi_ghes_generic_error_data(block, uefi_cper_mem_sec, + acpi_ghes_generic_error_data(block, section_type, ACPI_CPER_SEV_RECOVERABLE, 0, 0, ACPI_GHES_MEM_CPER_LENGTH, fru_id, 0); - - /* Build the memory section CPER for above new generic error data entry */ - acpi_ghes_build_append_mem_cper(block, error_physical_addr); - - /* Write the generic error data entry into guest memory */ - cpu_physical_memory_write(error_block_address, block->data, block->len); - - g_array_free(block, true); - - return 0; } /* @@ -236,7 +206,7 @@ static int acpi_ghes_record_mem_error(uint64_t error_block_address, * Initialize "etc/hardware_errors" and "etc/hardware_errors_addr" fw_cfg blobs. * See docs/specs/acpi_hest_ghes.rst for blobs format. */ -void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) +static void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) { int i, error_status_block_offset; @@ -264,7 +234,7 @@ void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) ACPI_GHES_MAX_RAW_DATA_LENGTH * ACPI_GHES_ERROR_SOURCE_COUNT); /* Tell guest firmware to place hardware_errors blob into RAM */ - bios_linker_loader_alloc(linker, ACPI_GHES_ERRORS_FW_CFG_FILE, + bios_linker_loader_alloc(linker, ACPI_HW_ERROR_FW_CFG_FILE, hardware_errors, sizeof(uint64_t), false); for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { @@ -273,23 +243,31 @@ void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) * corresponding "Generic Error Status Block" */ bios_linker_loader_add_pointer(linker, - ACPI_GHES_ERRORS_FW_CFG_FILE, sizeof(uint64_t) * i, - sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, - error_status_block_offset + i * ACPI_GHES_MAX_RAW_DATA_LENGTH); + ACPI_HW_ERROR_FW_CFG_FILE, + sizeof(uint64_t) * i, + sizeof(uint64_t), + ACPI_HW_ERROR_FW_CFG_FILE, + error_status_block_offset + + i * ACPI_GHES_MAX_RAW_DATA_LENGTH); } /* * tell firmware to write hardware_errors GPA into * hardware_errors_addr fw_cfg, once the former has been initialized. */ - bios_linker_loader_write_pointer(linker, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, - 0, sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, 0); + bios_linker_loader_write_pointer(linker, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, 0, + sizeof(uint64_t), + ACPI_HW_ERROR_FW_CFG_FILE, 0); } /* Build Generic Hardware Error Source version 2 (GHESv2) */ -static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) +static void build_ghes_v2(GArray *table_data, + BIOSLinker *linker, + enum AcpiGhesNotifyType notify, + uint16_t source_id) { uint64_t address_offset; + /* * Type: * Generic Hardware Error Source version 2(GHESv2 - Type 10) @@ -316,21 +294,13 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0, 4 /* QWord access */, 0); bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, - address_offset + GAS_ADDR_OFFSET, sizeof(uint64_t), - ACPI_GHES_ERRORS_FW_CFG_FILE, source_id * sizeof(uint64_t)); + address_offset + GAS_ADDR_OFFSET, + sizeof(uint64_t), + ACPI_HW_ERROR_FW_CFG_FILE, + source_id * sizeof(uint64_t)); - switch (source_id) { - case ACPI_HEST_SRC_ID_SEA: - /* - * Notification Structure - * Now only enable ARMv8 SEA notification type - */ - build_ghes_hw_error_notification(table_data, ACPI_GHES_NOTIFY_SEA); - break; - default: - error_report("Not support this error source"); - abort(); - } + /* Notification Structure */ + build_ghes_hw_error_notification(table_data, notify); /* Error Status Block Length */ build_append_int_noprefix(table_data, ACPI_GHES_MAX_RAW_DATA_LENGTH, 4); @@ -344,9 +314,11 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0, 4 /* QWord access */, 0); bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, - address_offset + GAS_ADDR_OFFSET, - sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, - (ACPI_GHES_ERROR_SOURCE_COUNT + source_id) * sizeof(uint64_t)); + address_offset + GAS_ADDR_OFFSET, + sizeof(uint64_t), + ACPI_HW_ERROR_FW_CFG_FILE, + (ACPI_GHES_ERROR_SOURCE_COUNT + source_id) + * sizeof(uint64_t)); /* * Read Ack Preserve field @@ -359,17 +331,21 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) } /* Build Hardware Error Source Table */ -void acpi_build_hest(GArray *table_data, BIOSLinker *linker, +void acpi_build_hest(GArray *table_data, GArray *hardware_errors, + BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { AcpiTable table = { .sig = "HEST", .rev = 1, .oem_id = oem_id, .oem_table_id = oem_table_id }; + build_ghes_error_table(hardware_errors, linker); + acpi_table_begin(&table, table_data); /* Error Source Count */ build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4); - build_ghes_v2(table_data, ACPI_HEST_SRC_ID_SEA, linker); + build_ghes_v2(table_data, linker, + ACPI_GHES_NOTIFY_SEA, ACPI_HEST_SRC_ID_SEA); acpi_table_end(linker, &table); } @@ -378,70 +354,130 @@ void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, GArray *hardware_error) { /* Create a read-only fw_cfg file for GHES */ - fw_cfg_add_file(s, ACPI_GHES_ERRORS_FW_CFG_FILE, hardware_error->data, + fw_cfg_add_file(s, ACPI_HW_ERROR_FW_CFG_FILE, hardware_error->data, hardware_error->len); /* Create a read-write fw_cfg file for Address */ - fw_cfg_add_file_callback(s, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, NULL, NULL, - NULL, &(ags->ghes_addr_le), sizeof(ags->ghes_addr_le), false); + fw_cfg_add_file_callback(s, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, NULL, NULL, + NULL, &(ags->hw_error_le), sizeof(ags->hw_error_le), false); ags->present = true; } -int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address) +static void get_hw_error_offsets(uint64_t ghes_addr, + uint64_t *cper_addr, + uint64_t *read_ack_register_addr) +{ + if (!ghes_addr) { + return; + } + + /* + * non-HEST version supports only one source, so no need to change + * the start offset based on the source ID. Also, we can't validate + * the source ID, as it is stored inside the HEST table. + */ + + cpu_physical_memory_read(ghes_addr, cper_addr, + sizeof(*cper_addr)); + + *cper_addr = le64_to_cpu(*cper_addr); + + /* + * As the current version supports only one source, the ack offset is + * just sizeof(uint64_t). + */ + *read_ack_register_addr = ghes_addr + sizeof(uint64_t); +} + +static void ghes_record_cper_errors(const void *cper, size_t len, + uint16_t source_id, Error **errp) { - uint64_t error_block_addr, read_ack_register_addr, read_ack_register = 0; - uint64_t start_addr; - bool ret = -1; + uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register; AcpiGedState *acpi_ged_state; AcpiGhesState *ags; - assert(source_id < ACPI_HEST_SRC_ID_RESERVED); + if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) { + error_setg(errp, "GHES CPER record is too big: %zd", len); + return; + } acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED, NULL)); - g_assert(acpi_ged_state); + if (!acpi_ged_state) { + error_setg(errp, "Can't find ACPI_GED object"); + return; + } ags = &acpi_ged_state->ghes_state; - start_addr = le64_to_cpu(ags->ghes_addr_le); + assert(ACPI_GHES_ERROR_SOURCE_COUNT == 1); + get_hw_error_offsets(le64_to_cpu(ags->hw_error_le), + &cper_addr, &read_ack_register_addr); + + if (!cper_addr) { + error_setg(errp, "can not find Generic Error Status Block"); + return; + } + + cpu_physical_memory_read(read_ack_register_addr, + &read_ack_register, sizeof(read_ack_register)); + + /* zero means OSPM does not acknowledge the error */ + if (!read_ack_register) { + error_setg(errp, + "OSPM does not acknowledge previous error," + " so can not record CPER for current error anymore"); + return; + } + + read_ack_register = cpu_to_le64(0); + /* + * Clear the Read Ack Register, OSPM will write 1 to this register when + * it acknowledges the error. + */ + cpu_physical_memory_write(read_ack_register_addr, + &read_ack_register, sizeof(uint64_t)); + + /* Write the generic error data entry into guest memory */ + cpu_physical_memory_write(cper_addr, cper, len); +} - if (physical_address) { +int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) +{ + /* Memory Error Section Type */ + const uint8_t guid[] = + UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1); + Error *errp = NULL; + int data_length; + GArray *block; - if (source_id < ACPI_HEST_SRC_ID_RESERVED) { - start_addr += source_id * sizeof(uint64_t); - } + block = g_array_new(false, true /* clear */, 1); - cpu_physical_memory_read(start_addr, &error_block_addr, - sizeof(error_block_addr)); + data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH; + /* + * It should not run out of the preallocated memory if adding a new generic + * error data entry + */ + assert((data_length + ACPI_GHES_GESB_SIZE) <= + ACPI_GHES_MAX_RAW_DATA_LENGTH); - error_block_addr = le64_to_cpu(error_block_addr); + ghes_gen_err_data_uncorrectable_recoverable(block, guid, data_length); - read_ack_register_addr = start_addr + - ACPI_GHES_ERROR_SOURCE_COUNT * sizeof(uint64_t); + /* Build the memory section CPER for above new generic error data entry */ + acpi_ghes_build_append_mem_cper(block, physical_address); - cpu_physical_memory_read(read_ack_register_addr, - &read_ack_register, sizeof(read_ack_register)); + /* Report the error */ + ghes_record_cper_errors(block->data, block->len, source_id, &errp); - /* zero means OSPM does not acknowledge the error */ - if (!read_ack_register) { - error_report("OSPM does not acknowledge previous error," - " so can not record CPER for current error anymore"); - } else if (error_block_addr) { - read_ack_register = cpu_to_le64(0); - /* - * Clear the Read Ack Register, OSPM will write it to 1 when - * it acknowledges this error. - */ - cpu_physical_memory_write(read_ack_register_addr, - &read_ack_register, sizeof(uint64_t)); + g_array_free(block, true); - ret = acpi_ghes_record_mem_error(error_block_addr, - physical_address); - } else - error_report("can not find Generic Error Status Block"); + if (errp) { + error_report_err(errp); + return -1; } - return ret; + return 0; } bool acpi_ghes_present(void) diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index 9b1662b6b8a..ca7b183d9eb 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -26,7 +26,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/hmat.h" diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h index fd989cb6619..362b05e2ddf 100644 --- a/hw/acpi/hmat.h +++ b/hw/acpi/hmat.h @@ -28,7 +28,7 @@ #define HMAT_H #include "hw/acpi/bios-linker-loader.h" -#include "sysemu/numa.h" +#include "system/numa.h" /* * ACPI 6.3: 5.2.27.3 Memory Proximity Domain Attributes Structure, diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index 02d8546bd36..2b3b493c014 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -31,24 +31,16 @@ #include "migration/vmstate.h" #include "qemu/timer.h" #include "hw/core/cpu.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "hw/acpi/acpi.h" #include "hw/acpi/ich9_tco.h" +#include "hw/acpi/ich9_timer.h" #include "hw/southbridge/ich9.h" #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" -//#define DEBUG - -#ifdef DEBUG -#define ICH9_DEBUG(fmt, ...) \ -do { printf("%s "fmt, __func__, ## __VA_ARGS__); } while (0) -#else -#define ICH9_DEBUG(fmt, ...) do { } while (0) -#endif - static void ich9_pm_update_sci_fn(ACPIREGS *regs) { ICH9LPCPMRegs *pm = container_of(regs, ICH9LPCPMRegs, acpi_regs); @@ -108,6 +100,18 @@ static void ich9_smi_writel(void *opaque, hwaddr addr, uint64_t val, } pm->smi_en &= ~pm->smi_en_wmask; pm->smi_en |= (val & pm->smi_en_wmask); + if (pm->swsmi_timer_enabled) { + ich9_pm_update_swsmi_timer(pm, pm->smi_en & + ICH9_PMIO_SMI_EN_SWSMI_EN); + } + if (pm->periodic_timer_enabled) { + ich9_pm_update_periodic_timer(pm, pm->smi_en & + ICH9_PMIO_SMI_EN_PERIODIC_EN); + } + break; + case 4: + pm->smi_sts &= ~pm->smi_sts_wmask; + pm->smi_sts |= (val & pm->smi_sts_wmask); break; } } @@ -122,8 +126,6 @@ static const MemoryRegionOps ich9_smi_ops = { void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base) { - ICH9_DEBUG("to 0x%x\n", pm_io_base); - assert((pm_io_base & ICH9_PMIO_MASK) == 0); pm->pm_io_base = pm_io_base; @@ -286,6 +288,8 @@ static void pm_powerdown_req(Notifier *n, void *opaque) void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq) { + pm->smi_sts_wmask = 0; + memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE); memory_region_set_enabled(&pm->io, false); memory_region_add_subregion(pci_address_space_io(lpc_pci), @@ -305,14 +309,23 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq) "acpi-smi", 8); memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi); + if (pm->swsmi_timer_enabled) { + ich9_pm_swsmi_timer_init(pm); + } + + if (pm->periodic_timer_enabled) { + ich9_pm_periodic_timer_init(pm); + } + if (pm->enable_tco) { acpi_pm_tco_init(&pm->tco_regs, &pm->io); } if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) { + object_property_set_link(OBJECT(lpc_pci), "bus", + OBJECT(pci_get_bus(lpc_pci)), &error_abort); acpi_pcihp_init(OBJECT(lpc_pci), &pm->acpi_pci_hotplug, - pci_get_bus(lpc_pci), pci_address_space_io(lpc_pci), ACPI_PCIHP_ADDR_ICH9); @@ -416,6 +429,10 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE, &pm->pm_io_base, OBJ_PROP_FLAG_READ); + object_property_add_link(obj, "bus", TYPE_PCI_BUS, + (Object **)&pm->acpi_pci_hotplug.root, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32", ich9_pm_get_gpe0_blk, NULL, NULL, pm); @@ -547,7 +564,7 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, bool ich9_pm_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); - return acpi_pcihp_is_hotpluggbale_bus(&lpc->pm.acpi_pci_hotplug, bus); + return acpi_pcihp_is_hotpluggable_bus(&lpc->pm.acpi_pci_hotplug, bus); } void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c index 81606219f73..6300db65b72 100644 --- a/hw/acpi/ich9_tco.c +++ b/hw/acpi/ich9_tco.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/southbridge/ich9.h" #include "migration/vmstate.h" diff --git a/hw/acpi/ich9_timer.c b/hw/acpi/ich9_timer.c new file mode 100644 index 00000000000..5b1c9101566 --- /dev/null +++ b/hw/acpi/ich9_timer.c @@ -0,0 +1,93 @@ +/* + * QEMU ICH9 Timer emulation + * + * Copyright (c) 2024 Dominic Prinz + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "hw/pci/pci.h" +#include "hw/southbridge/ich9.h" +#include "qemu/timer.h" + +#include "hw/acpi/ich9_timer.h" + +void ich9_pm_update_swsmi_timer(ICH9LPCPMRegs *pm, bool enable) +{ + uint16_t swsmi_rate_sel; + int64_t expire_time; + ICH9LPCState *lpc; + + if (enable) { + lpc = container_of(pm, ICH9LPCState, pm); + swsmi_rate_sel = + (pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_3) & 0xc0) >> 6; + + if (swsmi_rate_sel == 0) { + expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1500000LL; + } else { + expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + 8 * (1 << swsmi_rate_sel) * 1000000LL; + } + + timer_mod(pm->swsmi_timer, expire_time); + } else { + timer_del(pm->swsmi_timer); + } +} + +static void ich9_pm_swsmi_timer_expired(void *opaque) +{ + ICH9LPCPMRegs *pm = opaque; + + pm->smi_sts |= ICH9_PMIO_SMI_STS_SWSMI_STS; + ich9_generate_smi(); + + ich9_pm_update_swsmi_timer(pm, pm->smi_en & ICH9_PMIO_SMI_EN_SWSMI_EN); +} + +void ich9_pm_swsmi_timer_init(ICH9LPCPMRegs *pm) +{ + pm->smi_sts_wmask |= ICH9_PMIO_SMI_STS_SWSMI_STS; + pm->swsmi_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, ich9_pm_swsmi_timer_expired, pm); +} + +void ich9_pm_update_periodic_timer(ICH9LPCPMRegs *pm, bool enable) +{ + uint16_t per_smi_sel; + int64_t expire_time; + ICH9LPCState *lpc; + + if (enable) { + lpc = container_of(pm, ICH9LPCState, pm); + per_smi_sel = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1) & 3; + expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + 8 * (1 << (3 - per_smi_sel)) * NANOSECONDS_PER_SECOND; + + timer_mod(pm->periodic_timer, expire_time); + } else { + timer_del(pm->periodic_timer); + } +} + +static void ich9_pm_periodic_timer_expired(void *opaque) +{ + ICH9LPCPMRegs *pm = opaque; + + pm->smi_sts = ICH9_PMIO_SMI_STS_PERIODIC_STS; + ich9_generate_smi(); + + ich9_pm_update_periodic_timer(pm, + pm->smi_en & ICH9_PMIO_SMI_EN_PERIODIC_EN); +} + +void ich9_pm_periodic_timer_init(ICH9LPCPMRegs *pm) +{ + pm->smi_sts_wmask |= ICH9_PMIO_SMI_STS_PERIODIC_STS; + pm->periodic_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, ich9_pm_periodic_timer_expired, pm); +} diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c index a20e57d465c..39f8f2f6d68 100644 --- a/hw/acpi/ipmi.c +++ b/hw/acpi/ipmi.c @@ -55,7 +55,8 @@ static Aml *aml_ipmi_crs(IPMIFwInfo *info) abort(); } - if (info->interrupt_number) { + /* Should PCI interrupts also be appended? */ + if (info->irq_source == IPMI_ISA_IRQ && info->interrupt_number) { aml_append(crs, aml_irq_no_flags(info->interrupt_number)); } diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index fa5c07db906..a5db53a422f 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -1,6 +1,6 @@ acpi_ss = ss.source_set() acpi_ss.add(files( - 'acpi_generic_initiator.c', + 'acpi_egm_memory.c', 'acpi_interface.c', 'aml-build.c', 'bios-linker-loader.c', @@ -16,6 +16,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c')) acpi_ss.add(when: 'CONFIG_ACPI_CXL', if_true: files('cxl.c'), if_false: files('cxl-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c')) +acpi_ss.add(when: 'CONFIG_ACPI_VMCLOCK', if_true: files('vmclock.c')) acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c')) acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c')) acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c')) @@ -24,7 +25,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_true: files('pci-bridge.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c')) -acpi_ss.add(when: 'CONFIG_ACPI_ICH9', if_true: files('ich9.c', 'ich9_tco.c')) +acpi_ss.add(when: 'CONFIG_ACPI_ICH9', if_true: files('ich9.c', 'ich9_tco.c', 'ich9_timer.c')) acpi_ss.add(when: 'CONFIG_ACPI_ERST', if_true: files('erst.c')) acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c')) acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 9ba90806f26..732d613ac09 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -535,7 +535,7 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr) #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000 -/* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */ +/* Read FIT data, defined in docs/specs/acpi_nvdimm.rst. */ static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { diff --git a/hw/acpi/pci-bridge.c b/hw/acpi/pci-bridge.c index 7baa7034a16..394a9194799 100644 --- a/hw/acpi/pci-bridge.c +++ b/hw/acpi/pci-bridge.c @@ -35,3 +35,57 @@ void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope) } } } + +Aml *build_pci_bridge_edsm(void) +{ + Aml *method, *ifctx; + Aml *zero = aml_int(0); + Aml *func = aml_arg(2); + Aml *ret = aml_local(0); + Aml *aidx = aml_local(1); + Aml *params = aml_arg(4); + + method = aml_method("EDSM", 5, AML_SERIALIZED); + + /* get supported functions */ + ifctx = aml_if(aml_equal(func, zero)); + { + /* 1: have supported functions */ + /* 7: support for function 7 */ + const uint8_t caps = 1 | BIT(7); + build_append_pci_dsm_func0_common(ifctx, ret); + aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + /* handle specific functions requests */ + /* + * PCI Firmware Specification 3.1 + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems + */ + ifctx = aml_if(aml_equal(func, aml_int(7))); + { + Aml *pkg = aml_package(2); + aml_append(pkg, zero); + /* optional, if not impl. should return null string */ + aml_append(pkg, aml_string("%s", "")); + aml_append(ifctx, aml_store(pkg, ret)); + + /* + * IASL is fine when initializing Package with computational data, + * however it makes guest unhappy /it fails to process such AML/. + * So use runtime assignment to set acpi-index after initializer + * to make OSPM happy. + */ + aml_append(ifctx, + aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx)); + aml_append(ifctx, aml_store(aidx, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + return method; +} + diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c index 20b70dcd815..a6e474af0f7 100644 --- a/hw/acpi/pci.c +++ b/hw/acpi/pci.c @@ -24,8 +24,14 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qom/object_interfaces.h" +#include "qapi/error.h" +#include "hw/boards.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_device.h" #include "hw/pci/pcie_host.h" /* @@ -59,3 +65,337 @@ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, acpi_table_end(linker, &table); } + +typedef struct AcpiGenericInitiator { + /* private */ + Object parent; + + /* public */ + char *pci_dev; + uint32_t node; +} AcpiGenericInitiator; + +typedef struct AcpiGenericInitiatorClass { + ObjectClass parent_class; +} AcpiGenericInitiatorClass; + +#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator" + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, + ACPI_GENERIC_INITIATOR, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) + +static void acpi_generic_initiator_init(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->node = MAX_NODES; + gi->pci_dev = NULL; +} + +static void acpi_generic_initiator_finalize(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + g_free(gi->pci_dev); +} + +static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->pci_dev = g_strdup(val); +} + +static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + MachineState *ms = MACHINE(qdev_get_machine()); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gi->node = value; + ms->numa_state->nodes[gi->node].has_gi = true; +} + +static void acpi_generic_initiator_class_init(ObjectClass *oc, const void *data) +{ + object_class_property_add_str(oc, "pci-dev", NULL, + acpi_generic_initiator_set_pci_device); + object_class_property_set_description(oc, "pci-dev", + "PCI device to associate with the node"); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_initiator_set_node, NULL, NULL); + object_class_property_set_description(oc, "node", + "NUMA node associated with the PCI device"); +} + +static gint memory_device_addr_sort(gconstpointer a, gconstpointer b) +{ + const AcpiGenericInitiator *gi_a = a; + const AcpiGenericInitiator *gi_b = b; + + if (gi_a->node > gi_b->node) { + return 1; + } else if (gi_a->node < gi_b->node) { + return -1; + } + return 0; +} + +static int acpi_generic_initiator_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { + *list = g_slist_insert_sorted(*list, ACPI_GENERIC_INITIATOR(obj), + memory_device_addr_sort); + } + + object_child_foreach(obj, acpi_generic_initiator_list, opaque); + return 0; +} + +/* + * Identify Generic Initiator objects and link them into the list which is + * returned to the caller. + * + * Note: it is the caller's responsibility to free the list to avoid + * memory leak. + */ +static GSList *acpi_generic_initiator_get_list(void) +{ + GSList *list = NULL; + + object_child_foreach(object_get_root(), + acpi_generic_initiator_list, &list); + return list; +} + +static int build_acpi_generic_initiator(AcpiGenericInitiator *gi, + GArray *table_data) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int32_t devfn; + uint8_t bus; + Object *o; + + if (gi->node >= ms->numa_state->num_nodes) { + error_printf("%s: Specified node %d is invalid.\n", + TYPE_ACPI_GENERIC_INITIATOR, gi->node); + exit(1); + } + + o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); + if (!o) { + error_printf("%s: Specified device must be a PCI device.\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + bus = object_property_get_uint(o, "busnr", &error_fatal); + devfn = object_property_get_uint(o, "addr", &error_fatal); + /* devfn is constrained in PCI to be 8 bit but storage is an int32_t */ + assert(devfn >= 0 && devfn < PCI_DEVFN_MAX); + + build_srat_pci_generic_initiator(table_data, gi->node, 0, bus, devfn); + + return 0; +} + +static void build_all_acpi_generic_initiators(GArray *table_data) +{ + GSList *gi_list, *list = acpi_generic_initiator_get_list(); + AcpiGenericInitiator *gi; + + for (gi_list = list; gi_list; gi_list = gi_list->next) { + gi = gi_list->data; + build_acpi_generic_initiator(gi, table_data); + } + + g_slist_free(list); +} + +typedef struct AcpiGenericPort { + /* private */ + Object parent; + + /* public */ + char *pci_bus; + uint32_t node; +} AcpiGenericPort; + +typedef struct AcpiGenericPortClass { + ObjectClass parent_class; +} AcpiGenericPortClass; + +#define TYPE_ACPI_GENERIC_PORT "acpi-generic-port" + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericPort, acpi_generic_port, + ACPI_GENERIC_PORT, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericPort, ACPI_GENERIC_PORT) + +static void acpi_generic_port_init(Object *obj) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + gp->node = MAX_NODES; + gp->pci_bus = NULL; +} + +static void acpi_generic_port_finalize(Object *obj) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + g_free(gp->pci_bus); +} + +static void acpi_generic_port_set_pci_bus(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + gp->pci_bus = g_strdup(val); +} + +static void acpi_generic_port_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gp->node = value; +} + +static void acpi_generic_port_class_init(ObjectClass *oc, const void *data) +{ + object_class_property_add_str(oc, "pci-bus", NULL, + acpi_generic_port_set_pci_bus); + object_class_property_set_description(oc, "pci-bus", + "PCI Bus of the host bridge associated with this GP affinity structure"); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_port_set_node, NULL, NULL); + object_class_property_set_description(oc, "node", + "The NUMA node like ID to index HMAT/SLIT NUMA properties involving GP"); +} + +static int build_acpi_generic_port(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + const char *hid = "ACPI0016"; + GArray *table_data = opaque; + AcpiGenericPort *gp; + uint32_t uid; + Object *o; + + if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_PORT)) { + return 0; + } + + gp = ACPI_GENERIC_PORT(obj); + + if (gp->node >= ms->numa_state->num_nodes) { + error_printf("%s: node %d is invalid.\n", + TYPE_ACPI_GENERIC_PORT, gp->node); + exit(1); + } + + o = object_resolve_path_type(gp->pci_bus, TYPE_PXB_CXL_BUS, NULL); + if (!o) { + error_printf("%s: device must be a CXL host bridge.\n", + TYPE_ACPI_GENERIC_PORT); + exit(1); + } + + uid = object_property_get_uint(o, "acpi_uid", &error_fatal); + build_srat_acpi_generic_port(table_data, gp->node, hid, uid); + + return 0; +} + +void build_srat_generic_affinity_structures(GArray *table_data) +{ + build_all_acpi_generic_initiators(table_data); + + object_child_foreach_recursive(object_get_root(), build_acpi_generic_port, + table_data); +} + +Aml *build_pci_host_bridge_osc_method(bool enable_native_pcie_hotplug) +{ + Aml *if_ctx; + Aml *if_ctx2; + Aml *else_ctx; + Aml *method; + Aml *a_cwd1 = aml_name("CDW1"); + Aml *a_ctrl = aml_local(0); + + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + if_ctx = aml_if(aml_equal( + aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"))); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + + aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl)); + + /* + * Always allow native PME, AER (no dependencies) + * Allow SHPC (PCI bridges can have SHPC controller) + * Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled. + */ + aml_append(if_ctx, aml_and(a_ctrl, + aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl)); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); + /* Unknown revision */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); + /* Capabilities bits were masked */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + /* Update DWORD3 in the buffer */ + aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3"))); + aml_append(method, if_ctx); + + else_ctx = aml_else(); + /* Unrecognized UUID */ + aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1)); + aml_append(method, else_ctx); + + aml_append(method, aml_return(aml_arg(3))); + return method; +} diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 5f79c9016b4..4922bbc778f 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -3,7 +3,7 @@ * * QEMU supports PCI hotplug via ACPI. This module * implements the interface between QEMU and the ACPI BIOS. - * Interface specification - see docs/specs/acpi_pci_hotplug.txt + * Interface specification - see docs/specs/acpi_pci_hotplug.rst * * Copyright (c) 2013, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com) * Copyright (c) 2006 Fabrice Bellard @@ -26,7 +26,8 @@ #include "qemu/osdep.h" #include "hw/acpi/pcihp.h" - +#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi_aml_interface.h" #include "hw/pci-host/i440fx.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bridge.h" @@ -39,9 +40,9 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qom/qom-qobject.h" +#include "qobject/qnum.h" #include "trace.h" -#define ACPI_PCIHP_SIZE 0x0018 #define PCI_UP_BASE 0x0000 #define PCI_DOWN_BASE 0x0004 #define PCI_EJ_BASE 0x0008 @@ -97,10 +98,10 @@ static void *acpi_set_bsel(PCIBus *bus, void *opaque) return info; } -static void acpi_set_pci_info(bool has_bridge_hotplug) +static void acpi_set_pci_info(AcpiPciHpState *s) { static bool bsel_is_set; - Object *host = acpi_get_i386_pci_host(); + bool has_bridge_hotplug = s->use_acpi_hotplug_bridge; PCIBus *bus; BSELInfo info = { .bsel_alloc = ACPI_PCIHP_BSEL_DEFAULT, .has_bridge_hotplug = has_bridge_hotplug }; @@ -110,11 +111,8 @@ static void acpi_set_pci_info(bool has_bridge_hotplug) } bsel_is_set = true; - if (!host) { - return; - } - bus = PCI_HOST_BRIDGE(host)->bus; + bus = s->root; if (bus) { /* Scan all PCI buses. Set property to enable acpi based hotplug. */ pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &info); @@ -264,7 +262,7 @@ static void acpi_pcihp_update(AcpiPciHpState *s) void acpi_pcihp_reset(AcpiPciHpState *s) { - acpi_set_pci_info(s->use_acpi_hotplug_bridge); + acpi_set_pci_info(s); acpi_pcihp_update(s); } @@ -371,7 +369,7 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); } -bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus) +bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus) { Object *o = OBJECT(bus->parent); @@ -495,13 +493,13 @@ static const MemoryRegionOps acpi_pcihp_io_ops = { }, }; -void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, +void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, MemoryRegion *io, uint16_t io_base) { s->io_len = ACPI_PCIHP_SIZE; s->io_base = io_base; - s->root = root_bus; + assert(s->root); memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s, "acpi-pci-hotplug", s->io_len); @@ -513,6 +511,425 @@ void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, OBJ_PROP_FLAG_READ); } +void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar) +{ + Aml *UUID, *ifctx1; + uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ + + aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar)); + /* + * PCI Firmware Specification 3.1 + * 4.6. _DSM Definitions for PCI + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); + { + /* call is for unsupported UUID, bail out */ + aml_append(ifctx1, aml_return(retvar)); + } + aml_append(ctx, ifctx1); + + ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2))); + { + /* call is for unsupported REV, bail out */ + aml_append(ifctx1, aml_return(retvar)); + } + aml_append(ctx, ifctx1); +} + +static Aml *aml_pci_pdsm(void) +{ + Aml *method, *ifctx, *ifctx1; + Aml *ret = aml_local(0); + Aml *caps = aml_local(1); + Aml *acpi_index = aml_local(2); + Aml *zero = aml_int(0); + Aml *one = aml_int(1); + Aml *not_supp = aml_int(0xFFFFFFFF); + Aml *func = aml_arg(2); + Aml *params = aml_arg(4); + Aml *bnum = aml_derefof(aml_index(params, aml_int(0))); + Aml *sunum = aml_derefof(aml_index(params, aml_int(1))); + + method = aml_method("PDSM", 5, AML_SERIALIZED); + + /* get supported functions */ + ifctx = aml_if(aml_equal(func, zero)); + { + build_append_pci_dsm_func0_common(ifctx, ret); + + aml_append(ifctx, aml_store(zero, caps)); + aml_append(ifctx, + aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + /* + * advertise function 7 if device has acpi-index + * acpi_index values: + * 0: not present (default value) + * FFFFFFFF: not supported (old QEMU without PIDX reg) + * other: device's acpi-index + */ + ifctx1 = aml_if(aml_lnot( + aml_or(aml_equal(acpi_index, zero), + aml_equal(acpi_index, not_supp), NULL) + )); + { + /* have supported functions */ + aml_append(ifctx1, aml_or(caps, one, caps)); + /* support for function 7 */ + aml_append(ifctx1, + aml_or(caps, aml_shiftleft(one, aml_int(7)), caps)); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, aml_store(caps, aml_index(ret, zero))); + aml_append(ifctx, aml_return(ret)); + } + aml_append(method, ifctx); + + /* handle specific functions requests */ + /* + * PCI Firmware Specification 3.1 + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems + */ + ifctx = aml_if(aml_equal(func, aml_int(7))); + { + Aml *pkg = aml_package(2); + + aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); + aml_append(ifctx, aml_store(pkg, ret)); + /* + * Windows calls func=7 without checking if it's available, + * as workaround Microsoft has suggested to return invalid for func7 + * Package, so return 2 elements package but only initialize elements + * when acpi_index is supported and leave them uninitialized, which + * leads elements to being Uninitialized ObjectType and should trip + * Windows into discarding result as an unexpected and prevent setting + * bogus 'PCI Label' on the device. + */ + ifctx1 = aml_if(aml_lnot(aml_lor( + aml_equal(acpi_index, zero), aml_equal(acpi_index, not_supp) + ))); + { + aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero))); + /* + * optional, if not impl. should return null string + */ + aml_append(ifctx1, aml_store(aml_string("%s", ""), + aml_index(ret, one))); + } + aml_append(ifctx, ifctx1); + + aml_append(ifctx, aml_return(ret)); + } + + aml_append(method, ifctx); + return method; +} + +void build_acpi_pci_hotplug(Aml *table, AmlRegionSpace rs, uint64_t pcihp_addr) +{ + Aml *scope; + Aml *field; + Aml *method; + + scope = aml_scope("_SB.PCI0"); + + aml_append(scope, + aml_operation_region("PCST", rs, aml_int(pcihp_addr), 0x08)); + field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("PCIU", 32)); + aml_append(field, aml_named_field("PCID", 32)); + aml_append(scope, field); + + aml_append(scope, + aml_operation_region("SEJ", rs, + aml_int(pcihp_addr + ACPI_PCIHP_SEJ_BASE), 0x04)); + field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("B0EJ", 32)); + aml_append(scope, field); + + aml_append(scope, + aml_operation_region("BNMR", rs, + aml_int(pcihp_addr + ACPI_PCIHP_BNMR_BASE), 0x08)); + field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("BNUM", 32)); + aml_append(field, aml_named_field("PIDX", 32)); + aml_append(scope, field); + + aml_append(scope, aml_mutex("BLCK", 0)); + + method = aml_method("PCEJ", 2, AML_NOTSERIALIZED); + aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); + aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); + aml_append(method, + aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ"))); + aml_append(method, aml_release(aml_name("BLCK"))); + aml_append(method, aml_return(aml_int(0))); + aml_append(scope, method); + + method = aml_method("AIDX", 2, AML_NOTSERIALIZED); + aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); + aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); + aml_append(method, + aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("PIDX"))); + aml_append(method, aml_store(aml_name("PIDX"), aml_local(0))); + aml_append(method, aml_release(aml_name("BLCK"))); + aml_append(method, aml_return(aml_local(0))); + aml_append(scope, method); + + aml_append(scope, aml_pci_pdsm()); + + aml_append(table, scope); +} + +/* Reserve PCIHP resources */ +void build_append_pcihp_resources(Aml *scope /* \\_SB.PCI0 */, + uint64_t io_addr, uint64_t io_len) +{ + Aml *dev, *crs; + + dev = aml_device("PHPR"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(dev, + aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, io_addr, io_addr, 1, io_len)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +bool build_append_notification_callback(Aml *parent_scope, const PCIBus *bus) +{ + Aml *method; + PCIBus *sec; + QObject *bsel; + int nr_notifiers = 0; + GQueue *pcnt_bus_list = g_queue_new(); + + QLIST_FOREACH(sec, &bus->child, sibling) { + Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn); + if (pci_bus_is_root(sec)) { + continue; + } + nr_notifiers = nr_notifiers + + build_append_notification_callback(br_scope, sec); + /* + * add new child scope to parent + * and keep track of bus that have PCNT, + * bus list is used later to call children PCNTs from this level PCNT + */ + if (nr_notifiers) { + g_queue_push_tail(pcnt_bus_list, sec); + aml_append(parent_scope, br_scope); + } + } + + /* + * Append PCNT method to notify about events on local and child buses. + * ps: hostbridge might not have hotplug (bsel) enabled but might have + * child bridges that do have bsel. + */ + method = aml_method("PCNT", 0, AML_NOTSERIALIZED); + + /* If bus supports hotplug select it and notify about local events */ + bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); + if (bsel) { + uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); + + aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM"))); + aml_append(method, aml_call2("DVNT", aml_name("PCIU"), + aml_int(1))); /* Device Check */ + aml_append(method, aml_call2("DVNT", aml_name("PCID"), + aml_int(3))); /* Eject Request */ + nr_notifiers++; + } + + /* Notify about child bus events in any case */ + while ((sec = g_queue_pop_head(pcnt_bus_list))) { + aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn)); + } + + aml_append(parent_scope, method); + qobject_unref(bsel); + g_queue_free(pcnt_bus_list); + return !!nr_notifiers; +} + +static Aml *aml_pci_device_dsm(void) +{ + Aml *method; + + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + Aml *params = aml_local(0); + Aml *pkg = aml_package(2); + aml_append(pkg, aml_int(0)); + aml_append(pkg, aml_int(0)); + aml_append(method, aml_store(pkg, params)); + aml_append(method, + aml_store(aml_name("BSEL"), aml_index(params, aml_int(0)))); + aml_append(method, + aml_store(aml_name("ASUN"), aml_index(params, aml_int(1)))); + aml_append(method, + aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), params)) + ); + } + return method; +} + +static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev) +{ + Aml *method; + + g_assert(pdev->acpi_index != 0); + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + Aml *params = aml_local(0); + Aml *pkg = aml_package(1); + aml_append(pkg, aml_int(pdev->acpi_index)); + aml_append(method, aml_store(pkg, params)); + aml_append(method, + aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), params)) + ); + } + return method; +} + +static void build_append_pcihp_notify_entry(Aml *method, int slot) +{ + Aml *if_ctx; + int32_t devfn = PCI_DEVFN(slot, 0); + + if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL)); + aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1))); + aml_append(method, if_ctx); +} + +static bool is_devfn_ignored_generic(const int devfn, const PCIBus *bus) +{ + const PCIDevice *pdev = bus->devices[devfn]; + + if (PCI_FUNC(devfn)) { + if (IS_PCI_BRIDGE(pdev)) { + /* + * Ignore only hotplugged PCI bridges on !0 functions, but + * allow describing cold plugged bridges on all functions + */ + if (DEVICE(pdev)->hotplugged) { + return true; + } + } + } + return false; +} + +static bool is_devfn_ignored_hotplug(const int devfn, const PCIBus *bus) +{ + PCIDevice *pdev = bus->devices[devfn]; + if (pdev) { + return is_devfn_ignored_generic(devfn, bus) || + !DEVICE_GET_CLASS(pdev)->hotpluggable || + /* Cold plugged bridges aren't themselves hot-pluggable */ + (IS_PCI_BRIDGE(pdev) && !DEVICE(pdev)->hotplugged); + } else { /* non populated slots */ + /* + * hotplug is supported only for non-multifunction device + * so generate device description only for function 0 + */ + if (PCI_FUNC(devfn) || + (pci_bus_is_express(bus) && PCI_SLOT(devfn) > 0)) { + return true; + } + } + return false; +} + +void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus) +{ + int devfn; + Aml *dev, *notify_method = NULL, *method; + QObject *bsel = object_property_get_qobject(OBJECT(bus), + ACPI_PCIHP_PROP_BSEL, NULL); + uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); + qobject_unref(bsel); + + aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val))); + notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED); + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + int slot = PCI_SLOT(devfn); + int adr = slot << 16 | PCI_FUNC(devfn); + + if (is_devfn_ignored_hotplug(devfn, bus)) { + continue; + } + + if (bus->devices[devfn]) { + dev = aml_scope("S%.02X", devfn); + } else { + dev = aml_device("S%.02X", devfn); + aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); + } + + /* + * Can't declare _SUN here for every device as it changes 'slot' + * enumeration order in linux kernel, so use another variable for it + */ + aml_append(dev, aml_name_decl("ASUN", aml_int(slot))); + aml_append(dev, aml_pci_device_dsm()); + + aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); + /* add _EJ0 to make slot hotpluggable */ + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) + ); + aml_append(dev, method); + + build_append_pcihp_notify_entry(notify_method, slot); + + /* device descriptor has been composed, add it into parent context */ + aml_append(parent_scope, dev); + } + aml_append(parent_scope, notify_method); +} + +void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus) +{ + int devfn; + Aml *dev; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ + int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn); + PCIDevice *pdev = bus->devices[devfn]; + + if (!pdev || is_devfn_ignored_generic(devfn, bus)) { + continue; + } + + /* start to compose PCI device descriptor */ + dev = aml_device("S%.02X", devfn); + aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); + + call_dev_aml_func(DEVICE(bus->devices[devfn]), dev); + /* add _DSM if device has acpi-index set */ + if (pdev->acpi_index && + !object_property_get_bool(OBJECT(pdev), "hotpluggable", + &error_abort)) { + aml_append(dev, aml_pci_static_endpoint_dsm(pdev)); + } + + /* device descriptor has been composed, add it into parent context */ + aml_append(parent_scope, dev); + } +} + const VMStateDescription vmstate_acpi_pcihp_pci_status = { .name = "acpi_pcihp_pci_status", .version_id = 1, diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index debe1adb846..7a18f18dda2 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -28,9 +28,9 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/pcihp.h" #include "hw/acpi/piix4.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/xen.h" #include "qapi/error.h" #include "qemu/range.h" #include "hw/acpi/cpu_hotplug.h" @@ -406,7 +406,7 @@ static bool piix4_is_hotpluggable_bus(HotplugHandler *hotplug_dev, BusState *bus) { PIIX4PMState *s = PIIX4_PM(hotplug_dev); - return acpi_pcihp_is_hotpluggbale_bus(&s->acpi_pci_hotplug, bus); + return acpi_pcihp_is_hotpluggable_bus(&s->acpi_pci_hotplug, bus); } static void piix4_pm_machine_ready(Notifier *n, void *opaque) @@ -567,7 +567,8 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent, if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge || s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) { - acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent, + object_property_set_link(OBJECT(s), "bus", OBJECT(bus), &error_abort); + acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, parent, ACPI_PCIHP_ADDR_PIIX4); qbus_set_hotplug_handler(BUS(pci_get_bus(PCI_DEVICE(s))), OBJECT(s)); } @@ -602,7 +603,7 @@ static void piix4_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) acpi_send_gpe_event(&s->ar, s->irq, ev); } -static Property piix4_pm_properties[] = { +static const Property piix4_pm_properties[] = { DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0), DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0), DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0), @@ -611,16 +612,17 @@ static Property piix4_pm_properties[] = { acpi_pci_hotplug.use_acpi_hotplug_bridge, true), DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState, acpi_pci_hotplug.use_acpi_root_pci_hotplug, true), + DEFINE_PROP_LINK("bus", PIIX4PMState, acpi_pci_hotplug.root, + TYPE_PCI_BUS, PCIBus *), DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState, acpi_memory_hotplug.is_enabled, true), DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false), DEFINE_PROP_BOOL("smm-enabled", PIIX4PMState, smm_enabled, false), DEFINE_PROP_BOOL("x-not-migrate-acpi-index", PIIX4PMState, not_migrate_acpi_index, false), - DEFINE_PROP_END_OF_LIST(), }; -static void piix4_pm_class_init(ObjectClass *klass, void *data) +static void piix4_pm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -633,7 +635,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_INTEL_82371AB_3; k->revision = 0x03; k->class_id = PCI_CLASS_BRIDGE_OTHER; - dc->reset = piix4_pm_reset; + device_class_set_legacy_reset(dc, piix4_pm_reset); dc->desc = "PM"; dc->vmsd = &vmstate_acpi; device_class_set_props(dc, piix4_pm_properties); @@ -658,7 +660,7 @@ static const TypeInfo piix4_pm_info = { .instance_init = piix4_pm_init, .instance_size = sizeof(PIIX4PMState), .class_init = piix4_pm_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/acpi/vmclock.c b/hw/acpi/vmclock.c new file mode 100644 index 00000000000..c582c0c1f83 --- /dev/null +++ b/hw/acpi/vmclock.c @@ -0,0 +1,179 @@ +/* + * Virtual Machine Clock Device + * + * Copyright © 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Authors: David Woodhouse + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/vmclock.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "system/reset.h" + +#include "standard-headers/linux/vmclock-abi.h" + +void vmclock_build_acpi(VmclockState *vms, GArray *table_data, + BIOSLinker *linker, const char *oem_id) +{ + Aml *ssdt, *dev, *scope, *crs; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "VMCLOCK" }; + + /* Put VMCLOCK into a separate SSDT table */ + acpi_table_begin(&table, table_data); + ssdt = init_aml_allocator(); + + scope = aml_scope("\\_SB"); + dev = aml_device("VCLK"); + aml_append(dev, aml_name_decl("_HID", aml_string("AMZNC10C"))); + aml_append(dev, aml_name_decl("_CID", aml_string("VMCLOCK"))); + aml_append(dev, aml_name_decl("_DDN", aml_string("VMCLOCK"))); + + /* Simple status method */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xf))); + + crs = aml_resource_template(); + aml_append(crs, aml_qword_memory(AML_POS_DECODE, + AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_ONLY, + 0xffffffffffffffffULL, + vms->physaddr, + vms->physaddr + VMCLOCK_SIZE - 1, + 0, VMCLOCK_SIZE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + aml_append(ssdt, scope); + + g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void vmclock_update_guest(VmclockState *vms) +{ + uint64_t disruption_marker; + uint32_t seq_count; + + if (!vms->clk) { + return; + } + + seq_count = le32_to_cpu(vms->clk->seq_count) | 1; + vms->clk->seq_count = cpu_to_le32(seq_count); + /* These barriers pair with read barriers in the guest */ + smp_wmb(); + + disruption_marker = le64_to_cpu(vms->clk->disruption_marker); + disruption_marker++; + vms->clk->disruption_marker = cpu_to_le64(disruption_marker); + + /* These barriers pair with read barriers in the guest */ + smp_wmb(); + vms->clk->seq_count = cpu_to_le32(seq_count + 1); +} + +/* + * After restoring an image, we need to update the guest memory to notify + * it of clock disruption. + */ +static int vmclock_post_load(void *opaque, int version_id) +{ + VmclockState *vms = opaque; + + vmclock_update_guest(vms); + return 0; +} + +static const VMStateDescription vmstate_vmclock = { + .name = "vmclock", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmclock_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(physaddr, VmclockState), + VMSTATE_END_OF_LIST() + }, +}; + +static void vmclock_handle_reset(void *opaque) +{ + VmclockState *vms = VMCLOCK(opaque); + + if (!memory_region_is_mapped(&vms->clk_page)) { + memory_region_add_subregion_overlap(get_system_memory(), + vms->physaddr, + &vms->clk_page, 0); + } +} + +static void vmclock_realize(DeviceState *dev, Error **errp) +{ + VmclockState *vms = VMCLOCK(dev); + + /* + * Given that this function is executing, there is at least one VMCLOCK + * device. Check if there are several. + */ + if (!find_vmclock_dev()) { + error_setg(errp, "at most one %s device is permitted", TYPE_VMCLOCK); + return; + } + + vms->physaddr = VMCLOCK_ADDR; + + e820_add_entry(vms->physaddr, VMCLOCK_SIZE, E820_RESERVED); + + memory_region_init_ram(&vms->clk_page, OBJECT(dev), "vmclock_page", + VMCLOCK_SIZE, &error_abort); + memory_region_set_enabled(&vms->clk_page, true); + vms->clk = memory_region_get_ram_ptr(&vms->clk_page); + memset(vms->clk, 0, VMCLOCK_SIZE); + + vms->clk->magic = cpu_to_le32(VMCLOCK_MAGIC); + vms->clk->size = cpu_to_le16(VMCLOCK_SIZE); + vms->clk->version = cpu_to_le16(1); + + /* These are all zero and thus default, but be explicit */ + vms->clk->clock_status = VMCLOCK_STATUS_UNKNOWN; + vms->clk->counter_id = VMCLOCK_COUNTER_INVALID; + + qemu_register_reset(vmclock_handle_reset, vms); + + vmclock_update_guest(vms); +} + +static void vmclock_device_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vmclock; + dc->realize = vmclock_realize; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo vmclock_device_info = { + .name = TYPE_VMCLOCK, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VmclockState), + .class_init = vmclock_device_class_init, +}; + +static void vmclock_register_types(void) +{ + type_register_static(&vmclock_device_info); +} + +type_init(vmclock_register_types) diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c index e63c8af4c3f..33c35c85dd4 100644 --- a/hw/acpi/vmgenid.c +++ b/hw/acpi/vmgenid.c @@ -20,7 +20,7 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" -#include "sysemu/reset.h" +#include "system/reset.h" void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, BIOSLinker *linker, const char *oem_id) @@ -38,7 +38,7 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, guid_le = qemu_uuid_bswap(vms->guid); /* The GUID is written at a fixed offset into the fw_cfg file * in order to implement the "OVMF SDT Header probe suppressor" - * see docs/specs/vmgenid.txt for more details + * see docs/specs/vmgenid.rst for more details */ g_array_insert_vals(guid, VMGENID_GUID_OFFSET, guid_le.data, ARRAY_SIZE(guid_le.data)); @@ -101,7 +101,7 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, * < 4GB, but write 64 bits anyway. * The address that is patched in is offset in order to implement * the "OVMF SDT Header probe suppressor" - * see docs/specs/vmgenid.txt for more details. + * see docs/specs/vmgenid.rst for more details. */ bios_linker_loader_write_pointer(linker, VMGENID_ADDR_FW_CFG_FILE, 0, sizeof(uint64_t), @@ -153,7 +153,7 @@ static void vmgenid_update_guest(VmGenIdState *vms) guid_le = qemu_uuid_bswap(vms->guid); /* The GUID is written at a fixed offset into the fw_cfg file * in order to implement the "OVMF SDT Header probe suppressor" - * see docs/specs/vmgenid.txt for more details. + * see docs/specs/vmgenid.rst for more details. */ cpu_physical_memory_write(vmgenid_addr, guid_le.data, sizeof(guid_le.data)); @@ -214,12 +214,11 @@ static void vmgenid_realize(DeviceState *dev, Error **errp) vmgenid_update_guest(vms); } -static Property vmgenid_device_properties[] = { +static const Property vmgenid_device_properties[] = { DEFINE_PROP_UUID(VMGENID_GUID, VmGenIdState, guid), - DEFINE_PROP_END_OF_LIST(), }; -static void vmgenid_device_class_init(ObjectClass *klass, void *data) +static void vmgenid_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/adc/Kconfig b/hw/adc/Kconfig index a825bd3d343..25d2229fb83 100644 --- a/hw/adc/Kconfig +++ b/hw/adc/Kconfig @@ -1,5 +1,2 @@ config STM32F2XX_ADC bool - -config MAX111X - bool diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c index 48328ef8919..3e820cae1e3 100644 --- a/hw/adc/aspeed_adc.c +++ b/hw/adc/aspeed_adc.c @@ -286,18 +286,17 @@ static const VMStateDescription vmstate_aspeed_adc_engine = { } }; -static Property aspeed_adc_engine_properties[] = { +static const Property aspeed_adc_engine_properties[] = { DEFINE_PROP_UINT32("engine-id", AspeedADCEngineState, engine_id, 0), DEFINE_PROP_UINT32("nr-channels", AspeedADCEngineState, nr_channels, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_adc_engine_class_init(ObjectClass *klass, void *data) +static void aspeed_adc_engine_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_adc_engine_realize; - dc->reset = aspeed_adc_engine_reset; + device_class_set_legacy_reset(dc, aspeed_adc_engine_reset); device_class_set_props(dc, aspeed_adc_engine_properties); dc->desc = "Aspeed Analog-to-Digital Engine"; dc->vmsd = &vmstate_aspeed_adc_engine; @@ -370,7 +369,7 @@ static void aspeed_adc_realize(DeviceState *dev, Error **errp) } } -static void aspeed_adc_class_init(ObjectClass *klass, void *data) +static void aspeed_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); @@ -380,7 +379,7 @@ static void aspeed_adc_class_init(ObjectClass *klass, void *data) aac->nr_engines = 1; } -static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); @@ -389,7 +388,7 @@ static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data) aac->nr_engines = 2; } -static void aspeed_1030_adc_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); @@ -398,7 +397,7 @@ static void aspeed_1030_adc_class_init(ObjectClass *klass, void *data) aac->nr_engines = 2; } -static void aspeed_2700_adc_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); diff --git a/hw/adc/max111x.c b/hw/adc/max111x.c deleted file mode 100644 index 957d177e1ce..00000000000 --- a/hw/adc/max111x.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Maxim MAX1110/1111 ADC chip emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "hw/adc/max111x.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "hw/qdev-properties.h" - -/* Control-byte bitfields */ -#define CB_PD0 (1 << 0) -#define CB_PD1 (1 << 1) -#define CB_SGL (1 << 2) -#define CB_UNI (1 << 3) -#define CB_SEL0 (1 << 4) -#define CB_SEL1 (1 << 5) -#define CB_SEL2 (1 << 6) -#define CB_START (1 << 7) - -#define CHANNEL_NUM(v, b0, b1, b2) \ - ((((v) >> (2 + (b0))) & 4) | \ - (((v) >> (3 + (b1))) & 2) | \ - (((v) >> (4 + (b2))) & 1)) - -static uint32_t max111x_read(MAX111xState *s) -{ - if (!s->tb1) - return 0; - - switch (s->cycle ++) { - case 1: - return s->rb2; - case 2: - return s->rb3; - } - - return 0; -} - -/* Interpret a control-byte */ -static void max111x_write(MAX111xState *s, uint32_t value) -{ - int measure, chan; - - /* Ignore the value if START bit is zero */ - if (!(value & CB_START)) - return; - - s->cycle = 0; - - if (!(value & CB_PD1)) { - s->tb1 = 0; - return; - } - - s->tb1 = value; - - if (s->inputs == 8) - chan = CHANNEL_NUM(value, 1, 0, 2); - else - chan = CHANNEL_NUM(value & ~CB_SEL0, 0, 1, 2); - - if (value & CB_SGL) - measure = s->input[chan] - s->com; - else - measure = s->input[chan] - s->input[chan ^ 1]; - - if (!(value & CB_UNI)) - measure ^= 0x80; - - s->rb2 = (measure >> 2) & 0x3f; - s->rb3 = (measure << 6) & 0xc0; - - /* FIXME: When should the IRQ be lowered? */ - qemu_irq_raise(s->interrupt); -} - -static uint32_t max111x_transfer(SSIPeripheral *dev, uint32_t value) -{ - MAX111xState *s = MAX_111X(dev); - max111x_write(s, value); - return max111x_read(s); -} - -static const VMStateDescription vmstate_max111x = { - .name = "max111x", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_SSI_PERIPHERAL(parent_obj, MAX111xState), - VMSTATE_UINT8(tb1, MAX111xState), - VMSTATE_UINT8(rb2, MAX111xState), - VMSTATE_UINT8(rb3, MAX111xState), - VMSTATE_INT32_EQUAL(inputs, MAX111xState, NULL), - VMSTATE_INT32(com, MAX111xState), - VMSTATE_ARRAY_INT32_UNSAFE(input, MAX111xState, inputs, - vmstate_info_uint8, uint8_t), - VMSTATE_END_OF_LIST() - } -}; - -static void max111x_input_set(void *opaque, int line, int value) -{ - MAX111xState *s = MAX_111X(opaque); - - assert(line >= 0 && line < s->inputs); - s->input[line] = value; -} - -static int max111x_init(SSIPeripheral *d, int inputs) -{ - DeviceState *dev = DEVICE(d); - MAX111xState *s = MAX_111X(dev); - - qdev_init_gpio_out(dev, &s->interrupt, 1); - qdev_init_gpio_in(dev, max111x_input_set, inputs); - - s->inputs = inputs; - - return 0; -} - -static void max1110_realize(SSIPeripheral *dev, Error **errp) -{ - max111x_init(dev, 8); -} - -static void max1111_realize(SSIPeripheral *dev, Error **errp) -{ - max111x_init(dev, 4); -} - -static void max111x_reset(DeviceState *dev) -{ - MAX111xState *s = MAX_111X(dev); - int i; - - for (i = 0; i < s->inputs; i++) { - s->input[i] = s->reset_input[i]; - } - s->com = 0; - s->tb1 = 0; - s->rb2 = 0; - s->rb3 = 0; - s->cycle = 0; -} - -static Property max1110_properties[] = { - /* Reset values for ADC inputs */ - DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0), - DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0), - DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0), - DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0), - DEFINE_PROP_END_OF_LIST(), -}; - -static Property max1111_properties[] = { - /* Reset values for ADC inputs */ - DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0), - DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0), - DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0), - DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0), - DEFINE_PROP_UINT8("input4", MAX111xState, reset_input[4], 0xb0), - DEFINE_PROP_UINT8("input5", MAX111xState, reset_input[5], 0xa0), - DEFINE_PROP_UINT8("input6", MAX111xState, reset_input[6], 0x90), - DEFINE_PROP_UINT8("input7", MAX111xState, reset_input[7], 0x80), - DEFINE_PROP_END_OF_LIST(), -}; - -static void max111x_class_init(ObjectClass *klass, void *data) -{ - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - k->transfer = max111x_transfer; - dc->reset = max111x_reset; - dc->vmsd = &vmstate_max111x; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); -} - -static const TypeInfo max111x_info = { - .name = TYPE_MAX_111X, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(MAX111xState), - .class_init = max111x_class_init, - .abstract = true, -}; - -static void max1110_class_init(ObjectClass *klass, void *data) -{ - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - k->realize = max1110_realize; - device_class_set_props(dc, max1110_properties); -} - -static const TypeInfo max1110_info = { - .name = TYPE_MAX_1110, - .parent = TYPE_MAX_111X, - .class_init = max1110_class_init, -}; - -static void max1111_class_init(ObjectClass *klass, void *data) -{ - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - k->realize = max1111_realize; - device_class_set_props(dc, max1111_properties); -} - -static const TypeInfo max1111_info = { - .name = TYPE_MAX_1111, - .parent = TYPE_MAX_111X, - .class_init = max1111_class_init, -}; - -static void max111x_register_types(void) -{ - type_register_static(&max111x_info); - type_register_static(&max1110_info); - type_register_static(&max1111_info); -} - -type_init(max111x_register_types) diff --git a/hw/adc/meson.build b/hw/adc/meson.build index a4f85b7d468..7f7acc16196 100644 --- a/hw/adc/meson.build +++ b/hw/adc/meson.build @@ -2,4 +2,3 @@ system_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) system_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) -system_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c index de8469dae4f..ddb219d4562 100644 --- a/hw/adc/npcm7xx_adc.c +++ b/hw/adc/npcm7xx_adc.c @@ -267,12 +267,11 @@ static const VMStateDescription vmstate_npcm7xx_adc = { }, }; -static Property npcm7xx_timer_properties[] = { +static const Property npcm7xx_timer_properties[] = { DEFINE_PROP_UINT32("iref", NPCM7xxADCState, iref, NPCM7XX_ADC_DEFAULT_IREF), - DEFINE_PROP_END_OF_LIST(), }; -static void npcm7xx_adc_class_init(ObjectClass *klass, void *data) +static void npcm7xx_adc_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c index e9df6ea53f3..a490ae640d9 100644 --- a/hw/adc/stm32f2xx_adc.c +++ b/hw/adc/stm32f2xx_adc.c @@ -284,11 +284,11 @@ static void stm32f2xx_adc_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } -static void stm32f2xx_adc_class_init(ObjectClass *klass, void *data) +static void stm32f2xx_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f2xx_adc_reset; + device_class_set_legacy_reset(dc, stm32f2xx_adc_reset); dc->vmsd = &vmstate_stm32f2xx_adc; } diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c index 34268319a40..748a51ba78b 100644 --- a/hw/adc/zynq-xadc.c +++ b/hw/adc/zynq-xadc.c @@ -281,12 +281,12 @@ static const VMStateDescription vmstate_zynq_xadc = { } }; -static void zynq_xadc_class_init(ObjectClass *klass, void *data) +static void zynq_xadc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_zynq_xadc; - dc->reset = zynq_xadc_reset; + device_class_set_legacy_reset(dc, zynq_xadc_reset); } static const TypeInfo zynq_xadc_info = { diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c index 52a1fa310b9..19562b59674 100644 --- a/hw/alpha/dp264.c +++ b/hw/alpha/dp264.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/target_page.h" #include "elf.h" #include "hw/loader.h" #include "alpha_sys.h" @@ -144,7 +145,7 @@ static void clipper_init(MachineState *machine) } size = load_elf(palcode_filename, NULL, cpu_alpha_superpage_to_phys, NULL, &palcode_entry, NULL, NULL, NULL, - 0, EM_ALPHA, 0, 0); + ELFDATA2LSB, EM_ALPHA, 0, 0); if (size < 0) { error_report("could not load palcode '%s'", palcode_filename); exit(1); @@ -163,7 +164,7 @@ static void clipper_init(MachineState *machine) size = load_elf(kernel_filename, NULL, cpu_alpha_superpage_to_phys, NULL, &kernel_entry, &kernel_low, NULL, NULL, - 0, EM_ALPHA, 0, 0); + ELFDATA2LSB, EM_ALPHA, 0, 0); if (size < 0) { error_report("could not load kernel '%s'", kernel_filename); exit(1); diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c index e8711ae16a3..4c56f981d71 100644 --- a/hw/alpha/typhoon.c +++ b/hw/alpha/typhoon.c @@ -9,6 +9,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/units.h" +#include "exec/cpu-interrupt.h" #include "qapi/error.h" #include "hw/pci/pci_host.h" #include "cpu.h" @@ -934,7 +935,7 @@ static const TypeInfo typhoon_pcihost_info = { }; static void typhoon_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 1ad60da7aa2..bd6ea48158e 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -2,6 +2,7 @@ config ARM_VIRT bool default y depends on ARM + depends on TCG || KVM || HVF imply PCI_DEVICES imply TEST_DEVICES imply VFIO_AMD_XGBE @@ -14,6 +15,7 @@ config ARM_VIRT select ARM_GIC select ACPI select ARM_SMMUV3 + select ARM_SMMUV3_ACCEL select GPIO_KEY select DEVICE_TREE select FW_CFG_DMA @@ -33,16 +35,12 @@ config ARM_VIRT select ACPI_HW_REDUCED select ACPI_APEI select ACPI_VIOT + select ACPI_PCIHP + select ACPI_PCI_BRIDGE select VIRTIO_MEM_SUPPORTED select ACPI_CXL select ACPI_HMAT - -config CHEETAH - bool - default y - depends on TCG && ARM - select OMAP - select TSC210X + select TEGRA241_CMDQV config CUBIEBOARD bool @@ -77,7 +75,7 @@ config HIGHBANK depends on TCG && ARM select A9MPCORE select A15MPCORE - select AHCI + select AHCI_SYSBUS select ARM_TIMER # sp804 select ARM_V7M select PL011 # UART @@ -101,13 +99,11 @@ config INTEGRATOR select PL181 # display select SMC91C111 -config MAINSTONE +config MAX78000FTHR bool default y depends on TCG && ARM - select PXA2XX - select PFLASH_CFI01 - select SMC91C111 + select MAX78000_SOC config MPS3R bool @@ -119,7 +115,7 @@ config MUSCA default y depends on TCG && ARM select ARMSSE - select PL011 + select PL011 # UART select PL031 select SPLIT_IRQ select UNIMP @@ -136,7 +132,7 @@ config MUSICPAL select MARVELL_88W8618 select PTIMER select PFLASH_CFI02 - select SERIAL + select SERIAL_MM select WM8750 config NETDUINO2 @@ -157,79 +153,13 @@ config OLIMEX_STM32_H405 depends on TCG && ARM select STM32F405_SOC -config NSERIES - bool - default y - depends on TCG && ARM - select OMAP - select TMP105 # temperature sensor - select BLIZZARD # LCD/TV controller - select ONENAND - select TSC210X # touchscreen/sensors/audio - select TSC2005 # touchscreen/sensors/keypad - select LM832X # GPIO keyboard chip - select TWL92230 # energy-management - select TUSB6010 - config OMAP bool select FRAMEBUFFER select I2C - select ECC - select NAND select PFLASH_CFI01 select SD - select SERIAL - -config PXA2XX - bool - select FRAMEBUFFER - select I2C - select SERIAL - select SD - select SSI - select USB_OHCI_SYSBUS - select PCMCIA - -config GUMSTIX - bool - default y - depends on TCG && ARM - select PFLASH_CFI01 - select SMC91C111 - select PXA2XX - -config TOSA - bool - default y - depends on TCG && ARM - select ZAURUS # scoop - select MICRODRIVE - select PXA2XX - select LED - -config SPITZ - bool - default y - depends on TCG && ARM - select ADS7846 # touch-screen controller - select MAX111X # A/D converter - select WM8750 # audio codec - select MAX7310 # GPIO expander - select ZAURUS # scoop - select NAND # memory - select ECC # Error-correcting for NAND - select MICRODRIVE - select PXA2XX - -config Z2 - bool - default y - depends on TCG && ARM - select PFLASH_CFI01 - select WM8750 - select PL011 # UART - select PXA2XX + select SERIAL_MM config REALVIEW bool @@ -248,7 +178,7 @@ config REALVIEW select WM8750 # audio codec select LSI_SCSI_PCI select PCI - select PL011 # UART + select PL011 # UART select PL031 # RTC select PL041 # audio codec select PL050 # keyboard/mouse @@ -267,7 +197,7 @@ config SBSA_REF depends on TCG && AARCH64 imply PCI_DEVICES select DEVICE_TREE - select AHCI + select AHCI_SYSBUS select ARM_SMMUV3 select GPIO_KEY select PCI_EXPRESS @@ -316,14 +246,15 @@ config STM32VLDISCOVERY config STRONGARM bool - select PXA2XX + select PXA2XX_TIMER + select SSI config COLLIE bool default y depends on TCG && ARM select PFLASH_CFI01 - select ZAURUS # scoop + select ZAURUS_SCOOP select STRONGARM config SX1 @@ -374,7 +305,7 @@ config ZYNQ select PL330 select SDHCI select SSI_M25P80 - select USB_EHCI_SYSBUS + select USB_CHIPIDEA select XILINX # UART select XILINX_AXI select XILINX_SPI @@ -390,7 +321,7 @@ config ARM_V7M config ALLWINNER_A10 bool - select AHCI + select AHCI_SYSBUS select ALLWINNER_A10_PIT select ALLWINNER_A10_PIC select ALLWINNER_A10_CCM @@ -398,8 +329,9 @@ config ALLWINNER_A10 select ALLWINNER_WDT select ALLWINNER_EMAC select ALLWINNER_I2C + select ALLWINNER_A10_SPI select AXP2XX_PMU - select SERIAL + select SERIAL_MM select UNIMP select USB_OHCI_SYSBUS @@ -411,7 +343,7 @@ config ALLWINNER_H3 select ALLWINNER_SUN8I_EMAC select ALLWINNER_I2C select ALLWINNER_WDT - select SERIAL + select SERIAL_MM select ARM_TIMER select ARM_GIC select UNIMP @@ -422,12 +354,12 @@ config ALLWINNER_H3 config ALLWINNER_R40 bool default y if TCG && ARM - select AHCI + select AHCI_SYSBUS select ALLWINNER_SRAMC select ALLWINNER_A10_PIT select ALLWINNER_WDT select AXP2XX_PMU - select SERIAL + select SERIAL_MM select ARM_TIMER select ARM_GIC select UNIMP @@ -435,6 +367,15 @@ config ALLWINNER_R40 select USB_EHCI_SYSBUS select SD +config MAX78000_SOC + bool + select ARM_V7M + select MAX78000_ICC + select MAX78000_UART + select MAX78000_GCR + select MAX78000_TRNG + select MAX78000_AES + config RASPI bool default y @@ -466,6 +407,7 @@ config STM32F405_SOC bool select ARM_V7M select OR_IRQ + select STM32_RCC select STM32F4XX_SYSCFG select STM32F4XX_EXTI @@ -490,7 +432,7 @@ config XLNX_ZYNQMP_ARM bool default y if PIXMAN depends on TCG && AARCH64 - select AHCI + select AHCI_SYSBUS select ARM_GIC select CADENCE select CPU_CLUSTER @@ -515,7 +457,7 @@ config XLNX_VERSAL select ARM_GIC select CPU_CLUSTER select DEVICE_TREE - select PL011 + select PL011 # UART select CADENCE select VIRTIO_MMIO select UNIMP @@ -542,12 +484,25 @@ config NPCM7XX select ISL_PMBUS_VR select PL310 # cache controller select PMBUS - select SERIAL + select SERIAL_MM select SSI select UNIMP select PCA954X select USB_OHCI_SYSBUS +config NPCM8XX + bool + default y + depends on TCG && AARCH64 + select ARM_GIC + select SMBUS + select PL310 # cache controller + select NPCM7XX + select SERIAL + select SSI + select UNIMP + + config FSL_IMX25 bool default y @@ -556,6 +511,7 @@ config FSL_IMX25 select IMX select IMX_FEC select IMX_I2C + select USB_CHIPIDEA select WDT_IMX2 select SDHCI @@ -564,7 +520,7 @@ config FSL_IMX31 default y depends on TCG && ARM imply I2C_DEVICES - select SERIAL + select SERIAL_MM select IMX select IMX_I2C select WDT_IMX2 @@ -583,6 +539,8 @@ config FSL_IMX6 select PL310 # cache controller select PCI_EXPRESS_DESIGNWARE select SDHCI + select USB_CHIPIDEA + select OR_IRQ config ASPEED_SOC bool @@ -593,7 +551,8 @@ config ASPEED_SOC select I2C select DPS310 select PCA9552 - select SERIAL + select PCA9554 + select SERIAL_MM select SMBUS_EEPROM select PCA954X select SSI @@ -606,6 +565,7 @@ config ASPEED_SOC select PMBUS select MAX31785 select FSI_APB2OPB_ASPEED + select AT24C config MPS2 bool @@ -639,11 +599,45 @@ config FSL_IMX7 select WDT_IMX2 select PCI_EXPRESS_DESIGNWARE select SDHCI + select OR_IRQ + select UNIMP + select USB_CHIPIDEA + +config FSL_IMX8MP + bool + imply I2C_DEVICES + imply PCI_DEVICES + select ARM_GIC + select FSL_IMX8MP_ANALOG + select FSL_IMX8MP_CCM + select IMX + select IMX_FEC + select IMX_I2C + select OR_IRQ + select PCI_EXPRESS_DESIGNWARE + select PCI_EXPRESS_FSL_IMX8M_PHY + select SDHCI select UNIMP + select USB_DWC3 + select WDT_IMX2 + +config FSL_IMX8MP_EVK + bool + default y + depends on TCG && AARCH64 + select FSL_IMX8MP config ARM_SMMUV3 bool +config ARM_SMMUV3_ACCEL + bool + depends on ARM_SMMUV3 && IOMMUFD + +config TEGRA241_CMDQV + bool + depends on ARM_SMMUV3_ACCEL + config FSL_IMX6UL bool default y @@ -655,6 +649,7 @@ config FSL_IMX6UL select IMX_I2C select WDT_IMX2 select SDHCI + select USB_CHIPIDEA select UNIMP config MICROBIT @@ -681,15 +676,10 @@ config MSF2 bool select ARM_V7M select PTIMER - select SERIAL + select SERIAL_MM select SSI select UNIMP -config ZAURUS - bool - select NAND - select ECC - config ARMSSE bool select ARM_V7M diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index 57d5d80159c..dc910d4177b 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -17,12 +17,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/module.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/sysbus.h" #include "hw/arm/allwinner-a10.h" #include "hw/misc/unimp.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/usb/hcd-ohci.h" #include "hw/loader.h" @@ -35,6 +36,7 @@ #define AW_A10_PIC_REG_BASE 0x01c20400 #define AW_A10_PIT_REG_BASE 0x01c20c00 #define AW_A10_UART0_REG_BASE 0x01c28000 +#define AW_A10_SPI0_BASE 0x01c05000 #define AW_A10_EMAC_BASE 0x01c0b000 #define AW_A10_EHCI_BASE 0x01c14000 #define AW_A10_OHCI_BASE 0x01c14400 @@ -49,9 +51,8 @@ void allwinner_a10_bootrom_setup(AwA10State *s, BlockBackend *blk) g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { - error_setg(&error_fatal, "%s: failed to read BlockBackend data", - __func__); - return; + error_report("%s: failed to read BlockBackend data", __func__); + exit(1); } rom_add_blob("allwinner-a10.bootrom", buffer, rom_size, @@ -80,6 +81,8 @@ static void aw_a10_init(Object *obj) object_initialize_child(obj, "i2c0", &s->i2c0, TYPE_AW_I2C); + object_initialize_child(obj, "spi0", &s->spi0, TYPE_AW_A10_SPI); + for (size_t i = 0; i < AW_A10_NUM_USB; i++) { object_initialize_child(obj, "ehci[*]", &s->ehci[i], TYPE_PLATFORM_EHCI); @@ -155,7 +158,7 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) /* FIXME use a qdev chardev prop instead of serial_hd() */ serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, qdev_get_gpio_in(dev, 1), - 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); for (size_t i = 0; i < AW_A10_NUM_USB; i++) { g_autofree char *bus = g_strdup_printf("usb-bus.%zu", i); @@ -195,12 +198,17 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c0), 0, AW_A10_I2C0_BASE); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, qdev_get_gpio_in(dev, 7)); + /* SPI */ + sysbus_realize(SYS_BUS_DEVICE(&s->spi0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi0), 0, AW_A10_SPI0_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi0), 0, qdev_get_gpio_in(dev, 10)); + /* WDT */ sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_fatal); sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->wdt), 0, AW_A10_WDT_BASE, 1); } -static void aw_a10_class_init(ObjectClass *oc, void *data) +static void aw_a10_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index 6870c3fe963..edffc21dd88 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -24,11 +24,11 @@ #include "qemu/units.h" #include "hw/qdev-core.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/misc/unimp.h" #include "hw/usb/hcd-ehci.h" #include "hw/loader.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/arm/allwinner-h3.h" #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" @@ -182,9 +182,8 @@ void allwinner_h3_bootrom_setup(AwH3State *s, BlockBackend *blk) g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { - error_setg(&error_fatal, "%s: failed to read BlockBackend data", - __func__); - return; + error_report("%s: failed to read BlockBackend data", __func__); + exit(1); } rom_add_blob("allwinner-h3.bootrom", buffer, rom_size, @@ -409,19 +408,19 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) /* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */ serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART0], 2, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART0), - 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); /* UART1 */ serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART1], 2, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART1), - 115200, serial_hd(1), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(1), DEVICE_LITTLE_ENDIAN); /* UART2 */ serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART2], 2, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART2), - 115200, serial_hd(2), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(2), DEVICE_LITTLE_ENDIAN); /* UART3 */ serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART3], 2, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART3), - 115200, serial_hd(3), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(3), DEVICE_LITTLE_ENDIAN); /* DRAMC */ sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal); @@ -467,7 +466,7 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) } } -static void allwinner_h3_class_init(ObjectClass *oc, void *data) +static void allwinner_h3_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/allwinner-r40.c b/hw/arm/allwinner-r40.c index b8c72021334..c8eda39957c 100644 --- a/hw/arm/allwinner-r40.c +++ b/hw/arm/allwinner-r40.c @@ -20,17 +20,16 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "qemu/bswap.h" #include "qemu/module.h" #include "qemu/units.h" #include "hw/boards.h" #include "hw/qdev-core.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/misc/unimp.h" #include "hw/usb/hcd-ehci.h" #include "hw/loader.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/arm/allwinner-r40.h" #include "hw/misc/allwinner-r40-dramc.h" #include "target/arm/cpu-qom.h" @@ -231,9 +230,8 @@ bool allwinner_r40_bootrom_setup(AwR40State *s, BlockBackend *blk, int unit) struct boot_file_head *head = (struct boot_file_head *)buffer; if (blk_pread(blk, 8 * KiB, rom_size, buffer, 0) < 0) { - error_setg(&error_fatal, "%s: failed to read BlockBackend data", - __func__); - return false; + error_report("%s: failed to read BlockBackend data", __func__); + exit(1); } /* we only check the magic string here. */ @@ -493,7 +491,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) serial_mm_init(get_system_memory(), addr, 2, qdev_get_gpio_in(DEVICE(&s->gic), uart_irqs[i]), - 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN); + 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN); } /* I2C */ @@ -540,7 +538,7 @@ static void allwinner_r40_realize(DeviceState *dev, Error **errp) } } -static void allwinner_r40_class_init(ObjectClass *oc, void *data) +static void allwinner_r40_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c index 91502d157a9..50ab7f48105 100644 --- a/hw/arm/armsse.c +++ b/hw/arm/armsse.c @@ -72,12 +72,13 @@ struct ARMSSEInfo { bool has_cpu_pwrctrl; bool has_sse_counter; bool has_tcms; - Property *props; + uint8_t props_count; + const Property *props; const ARMSSEDeviceInfo *devinfo; const bool *irq_is_common; }; -static Property iotkit_properties[] = { +static const Property iotkit_properties[] = { DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), @@ -87,10 +88,9 @@ static Property iotkit_properties[] = { DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8), DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), - DEFINE_PROP_END_OF_LIST() }; -static Property sse200_properties[] = { +static const Property sse200_properties[] = { DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), @@ -104,10 +104,9 @@ static Property sse200_properties[] = { DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), DEFINE_PROP_UINT32("CPU1_MPU_NS", ARMSSE, cpu_mpu_ns[1], 8), DEFINE_PROP_UINT32("CPU1_MPU_S", ARMSSE, cpu_mpu_s[1], 8), - DEFINE_PROP_END_OF_LIST() }; -static Property sse300_properties[] = { +static const Property sse300_properties[] = { DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), @@ -117,7 +116,6 @@ static Property sse300_properties[] = { DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), DEFINE_PROP_UINT32("CPU0_MPU_NS", ARMSSE, cpu_mpu_ns[0], 8), DEFINE_PROP_UINT32("CPU0_MPU_S", ARMSSE, cpu_mpu_s[0], 8), - DEFINE_PROP_END_OF_LIST() }; static const ARMSSEDeviceInfo iotkit_devices[] = { @@ -528,6 +526,7 @@ static const ARMSSEInfo armsse_variants[] = { .has_sse_counter = false, .has_tcms = false, .props = iotkit_properties, + .props_count = ARRAY_SIZE(iotkit_properties), .devinfo = iotkit_devices, .irq_is_common = sse200_irq_is_common, }, @@ -549,6 +548,7 @@ static const ARMSSEInfo armsse_variants[] = { .has_sse_counter = false, .has_tcms = false, .props = sse200_properties, + .props_count = ARRAY_SIZE(sse200_properties), .devinfo = sse200_devices, .irq_is_common = sse200_irq_is_common, }, @@ -570,6 +570,7 @@ static const ARMSSEInfo armsse_variants[] = { .has_sse_counter = true, .has_tcms = true, .props = sse300_properties, + .props_count = ARRAY_SIZE(sse300_properties), .devinfo = sse300_devices, .irq_is_common = sse300_irq_is_common, }, @@ -1690,7 +1691,7 @@ static void armsse_reset(DeviceState *dev) s->nsccfg = 0; } -static void armsse_class_init(ObjectClass *klass, void *data) +static void armsse_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass); @@ -1699,8 +1700,8 @@ static void armsse_class_init(ObjectClass *klass, void *data) dc->realize = armsse_realize; dc->vmsd = &armsse_vmstate; - device_class_set_props(dc, info->props); - dc->reset = armsse_reset; + device_class_set_props_n(dc, info->props, info->props_count); + device_class_set_legacy_reset(dc, armsse_reset); iic->check = armsse_idau_check; asc->info = info; } @@ -1712,7 +1713,7 @@ static const TypeInfo armsse_info = { .class_size = sizeof(ARMSSEClass), .instance_init = armsse_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IDAU_INTERFACE }, { } } @@ -1729,9 +1730,9 @@ static void armsse_register_types(void) .name = armsse_variants[i].name, .parent = TYPE_ARM_SSE, .class_init = armsse_class_init, - .class_data = (void *)&armsse_variants[i], + .class_data = &armsse_variants[i], }; - type_register(&ti); + type_register_static(&ti); } } diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index 7c68525a9e6..cea3eb49ee5 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -16,7 +16,7 @@ #include "hw/qdev-properties.h" #include "hw/qdev-clock.h" #include "elf.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/log.h" @@ -140,7 +140,7 @@ static MemTxResult v7m_sysreg_ns_write(void *opaque, hwaddr addr, /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; return memory_region_dispatch_write(mr, addr, value, - size_memop(size) | MO_TE, attrs); + size_memop(size) | MO_LE, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -160,7 +160,7 @@ static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr, /* S accesses to the alias act like NS accesses to the real region */ attrs.secure = 0; return memory_region_dispatch_read(mr, addr, data, - size_memop(size) | MO_TE, attrs); + size_memop(size) | MO_LE, attrs); } else { /* NS attrs are RAZ/WI for privileged, and BusFault for user */ if (attrs.user) { @@ -174,7 +174,7 @@ static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr, static const MemoryRegionOps v7m_sysreg_ns_ops = { .read_with_attrs = v7m_sysreg_ns_read, .write_with_attrs = v7m_sysreg_ns_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static MemTxResult v7m_systick_write(void *opaque, hwaddr addr, @@ -187,7 +187,7 @@ static MemTxResult v7m_systick_write(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); return memory_region_dispatch_write(mr, addr, value, - size_memop(size) | MO_TE, attrs); + size_memop(size) | MO_LE, attrs); } static MemTxResult v7m_systick_read(void *opaque, hwaddr addr, @@ -199,14 +199,14 @@ static MemTxResult v7m_systick_read(void *opaque, hwaddr addr, /* Direct the access to the correct systick */ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); - return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, - attrs); + return memory_region_dispatch_read(mr, addr, data, + size_memop(size) | MO_LE, attrs); } static const MemoryRegionOps v7m_systick_ops = { .read_with_attrs = v7m_systick_read, .write_with_attrs = v7m_systick_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; /* @@ -538,7 +538,7 @@ static void armv7m_realize(DeviceState *dev, Error **errp) } } -static Property armv7m_properties[] = { +static const Property armv7m_properties[] = { DEFINE_PROP_STRING("cpu-type", ARMv7MState, cpu_type), DEFINE_PROP_LINK("memory", ARMv7MState, board_memory, TYPE_MEMORY_REGION, MemoryRegion *), @@ -552,7 +552,6 @@ static Property armv7m_properties[] = { DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true), DEFINE_PROP_UINT32("mpu-ns-regions", ARMv7MState, mpu_ns_regions, UINT_MAX), DEFINE_PROP_UINT32("mpu-s-regions", ARMv7MState, mpu_s_regions, UINT_MAX), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_armv7m = { @@ -566,7 +565,7 @@ static const VMStateDescription vmstate_armv7m = { } }; -static void armv7m_class_init(ObjectClass *klass, void *data) +static void armv7m_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -609,7 +608,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, if (kernel_filename) { image_size = load_elf_as(kernel_filename, NULL, NULL, NULL, &entry, NULL, NULL, - NULL, 0, EM_ARM, 1, 0, as); + NULL, ELFDATA2LSB, EM_ARM, 1, 0, as); if (image_size < 0) { image_size = load_image_targphys_as(kernel_filename, mem_base, mem_size, as); @@ -631,14 +630,13 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, qemu_register_reset(armv7m_reset, cpu); } -static Property bitband_properties[] = { +static const Property bitband_properties[] = { DEFINE_PROP_UINT32("base", BitBandState, base, 0), DEFINE_PROP_LINK("source-memory", BitBandState, source_memory, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void bitband_class_init(ObjectClass *klass, void *data) +static void bitband_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index fd5603f7aa2..c31bbe77013 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -19,17 +19,19 @@ #include "hw/i2c/i2c_mux_pca954x.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/gpio/pca9552.h" +#include "hw/gpio/pca9554.h" #include "hw/nvram/eeprom_at24c.h" #include "hw/sensor/tmp105.h" #include "hw/misc/led.h" #include "hw/qdev-properties.h" -#include "sysemu/block-backend.h" -#include "sysemu/reset.h" +#include "system/block-backend.h" +#include "system/reset.h" #include "hw/loader.h" #include "qemu/error-report.h" +#include "qemu/datadir.h" #include "qemu/units.h" #include "hw/qdev-clock.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static struct arm_boot_info aspeed_board_binfo = { .board_id = -1, /* device-tree-only board */ @@ -181,14 +183,12 @@ struct AspeedMachineState { #ifdef TARGET_AARCH64 /* AST2700 evb hardware value */ -#define AST2700_EVB_HW_STRAP1 0x000000C0 -#define AST2700_EVB_HW_STRAP2 0x00000003 +/* SCU HW Strap1 */ +#define AST2700_EVB_HW_STRAP1 0x00000800 +/* SCUIO HW Strap1 */ +#define AST2700_EVB_HW_STRAP2 0x00000700 #endif -/* Tacoma hardware value */ -#define TACOMA_BMC_HW_STRAP1 0x00000000 -#define TACOMA_BMC_HW_STRAP2 0x00000040 - /* Rainier hardware value: (QEMU prototype) */ #define RAINIER_BMC_HW_STRAP1 (0x00422016 | SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC) #define RAINIER_BMC_HW_STRAP2 0x80000848 @@ -198,9 +198,12 @@ struct AspeedMachineState { #define FUJI_BMC_HW_STRAP2 0x00000000 /* Bletchley hardware value */ -/* TODO: Leave same as EVB for now. */ -#define BLETCHLEY_BMC_HW_STRAP1 AST2600_EVB_HW_STRAP1 -#define BLETCHLEY_BMC_HW_STRAP2 AST2600_EVB_HW_STRAP2 +#define BLETCHLEY_BMC_HW_STRAP1 0x00002000 +#define BLETCHLEY_BMC_HW_STRAP2 0x00000801 + +/* GB200NVL hardware value */ +#define GB200NVL_BMC_HW_STRAP1 AST2600_EVB_HW_STRAP1 +#define GB200NVL_BMC_HW_STRAP2 AST2600_EVB_HW_STRAP2 /* Qualcomm DC-SCM hardware value */ #define QCOM_DC_SCM_V1_BMC_HW_STRAP1 0x00000000 @@ -307,6 +310,33 @@ static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk, rom_size, &error_abort); } +#define VBOOTROM_FILE_NAME "ast27x0_bootrom.bin" + +/* + * This function locates the vbootrom image file specified via the command line + * using the -bios option. It loads the specified image into the vbootrom + * memory region and handles errors if the file cannot be found or loaded. + */ +static void aspeed_load_vbootrom(AspeedMachineState *bmc, const char *bios_name, + Error **errp) +{ + g_autofree char *filename = NULL; + AspeedSoCState *soc = bmc->soc; + int ret; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_setg(errp, "Could not find vbootrom image '%s'", bios_name); + return; + } + + ret = load_image_mr(filename, &soc->vbootrom); + if (ret < 0) { + error_setg(errp, "Failed to load vbootrom image '%s'", bios_name); + return; + } +} + void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, unsigned int count, int unit0) { @@ -338,10 +368,20 @@ static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo, bool emmc, return; } card = qdev_new(emmc ? TYPE_EMMC : TYPE_SD_CARD); - if (emmc) { + + /* + * Force the boot properties of the eMMC device only when the + * machine is strapped to boot from eMMC. Without these + * settings, the machine would not boot. + * + * This also allows the machine to use an eMMC device without + * boot areas when booting from the flash device (or -kernel) + * Ideally, the device and its properties should be defined on + * the command line. + */ + if (emmc && boot_emmc) { qdev_prop_set_uint64(card, "boot-partition-size", 1 * MiB); - qdev_prop_set_uint8(card, "boot-config", - boot_emmc ? 0x1 << 3 : 0x0); + qdev_prop_set_uint8(card, "boot-config", 0x1 << 3); } qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), &error_fatal); @@ -358,11 +398,11 @@ static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; aspeed_soc_uart_set_chr(s, uart_chosen, serial_hd(0)); - for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { + for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; uart++) { if (uart == uart_chosen) { continue; } - aspeed_soc_uart_set_chr(s, uart, serial_hd(i)); + aspeed_soc_uart_set_chr(s, uart, serial_hd(i++)); } } @@ -372,6 +412,7 @@ static void aspeed_machine_init(MachineState *machine) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); AspeedSoCClass *sc; int i; + const char *bios_name = NULL; DriveInfo *emmc0 = NULL; bool boot_emmc; @@ -403,6 +444,12 @@ static void aspeed_machine_init(MachineState *machine) OBJECT(get_system_memory()), &error_abort); object_property_set_link(OBJECT(bmc->soc), "dram", OBJECT(machine->ram), &error_abort); + if (amc->sdhci_wp_inverted) { + for (i = 0; i < bmc->soc->sdhci.num_slots; i++) { + object_property_set_bool(OBJECT(&bmc->soc->sdhci.slots[i]), + "wp-inverted", true, &error_abort); + } + } if (machine->kernel_filename) { /* * When booting with a -kernel command line there is no u-boot @@ -422,6 +469,8 @@ static void aspeed_machine_init(MachineState *machine) aspeed_board_init_flashes(&bmc->soc->spi[0], bmc->spi_model ? bmc->spi_model : amc->spi_model, 1, amc->num_cs); + aspeed_board_init_flashes(&bmc->soc->spi[1], + amc->spi2_model, 1, amc->num_cs2); } if (machine->kernel_filename && sc->num_cpus > 1) { @@ -444,14 +493,14 @@ static void aspeed_machine_init(MachineState *machine) amc->i2c_init(bmc); } - for (i = 0; i < bmc->soc->sdhci.num_slots; i++) { + for (i = 0; i < bmc->soc->sdhci.num_slots && defaults_enabled(); i++) { sdhci_attach_drive(&bmc->soc->sdhci.slots[i], drive_get(IF_SD, 0, i), false, false); } boot_emmc = sc->boot_from_emmc(bmc->soc); - if (bmc->soc->emmc.num_slots) { + if (bmc->soc->emmc.num_slots && defaults_enabled()) { emmc0 = drive_get(IF_SD, 0, bmc->soc->sdhci.num_slots); sdhci_attach_drive(&bmc->soc->emmc.slots[0], emmc0, true, boot_emmc); } @@ -468,6 +517,11 @@ static void aspeed_machine_init(MachineState *machine) } } + if (amc->vbootrom) { + bios_name = machine->firmware ?: VBOOTROM_FILE_NAME; + aspeed_load_vbootrom(bmc, bios_name, &error_abort); + } + arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); } @@ -597,6 +651,12 @@ static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr) TYPE_PCA9552, addr); } +static I2CSlave *create_pca9554(AspeedSoCState *soc, int bus_id, int addr) +{ + return i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, bus_id), + TYPE_PCA9554, addr); +} + static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = bmc->soc; @@ -955,6 +1015,180 @@ static void fuji_bmc_i2c_init(AspeedMachineState *bmc) } #define TYPE_TMP421 "tmp421" +#define TYPE_DS1338 "ds1338" + +/* Catalina hardware value */ +#define CATALINA_BMC_HW_STRAP1 0x00002002 +#define CATALINA_BMC_HW_STRAP2 0x00000800 + +#define CATALINA_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB) + +static void catalina_bmc_i2c_init(AspeedMachineState *bmc) +{ + /* Reference from v6.16-rc2 aspeed-bmc-facebook-catalina.dts */ + + AspeedSoCState *soc = bmc->soc; + I2CBus *i2c[16] = {}; + I2CSlave *i2c_mux; + + /* busses 0-15 are all used. */ + for (int i = 0; i < ARRAY_SIZE(i2c); i++) { + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + + /* &i2c0 */ + /* i2c-mux@71 (PCA9546) on i2c0 */ + i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x71); + + /* i2c-mux@72 (PCA9546) on i2c0 */ + i2c_mux = i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x72); + + /* i2c0mux1ch1 */ + /* io_expander7 - pca9535@20 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 1), + TYPE_PCA9552, 0x20); + /* eeprom@50 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x50, 8 * KiB); + + /* i2c-mux@73 (PCA9546) on i2c0 */ + i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x73); + + /* i2c-mux@75 (PCA9546) on i2c0 */ + i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x75); + + /* i2c-mux@76 (PCA9546) on i2c0 */ + i2c_mux = i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x76); + + /* i2c0mux4ch1 */ + /* io_expander8 - pca9535@21 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 1), + TYPE_PCA9552, 0x21); + /* eeprom@50 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x50, 8 * KiB); + + /* i2c-mux@77 (PCA9546) on i2c0 */ + i2c_slave_create_simple(i2c[0], TYPE_PCA9546, 0x77); + + + /* &i2c1 */ + /* i2c-mux@70 (PCA9548) on i2c1 */ + i2c_mux = i2c_slave_create_simple(i2c[1], TYPE_PCA9548, 0x70); + /* i2c1mux0ch0 */ + /* ina238@41 - no model */ + /* ina238@42 - no model */ + /* ina238@44 - no model */ + /* i2c1mux0ch1 */ + /* ina238@41 - no model */ + /* ina238@43 - no model */ + /* i2c1mux0ch4 */ + /* ltc4287@42 - no model */ + /* ltc4287@43 - no model */ + + /* i2c1mux0ch5 */ + /* eeprom@54 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 5), 0x54, 8 * KiB); + /* tpm75@4f */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), TYPE_TMP75, 0x4f); + + /* i2c1mux0ch6 */ + /* io_expander5 - pca9554@27 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 6), + TYPE_PCA9554, 0x27); + /* io_expander6 - pca9555@25 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 6), + TYPE_PCA9552, 0x25); + /* eeprom@51 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 6), 0x51, 8 * KiB); + + /* i2c1mux0ch7 */ + /* eeprom@53 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 7), 0x53, 8 * KiB); + /* temperature-sensor@4b - tmp75 */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 7), TYPE_TMP75, 0x4b); + + /* &i2c2 */ + /* io_expander0 - pca9555@20 */ + i2c_slave_create_simple(i2c[2], TYPE_PCA9552, 0x20); + /* io_expander0 - pca9555@21 */ + i2c_slave_create_simple(i2c[2], TYPE_PCA9552, 0x21); + /* io_expander0 - pca9555@27 */ + i2c_slave_create_simple(i2c[2], TYPE_PCA9552, 0x27); + /* eeprom@50 */ + at24c_eeprom_init(i2c[2], 0x50, 8 * KiB); + /* eeprom@51 */ + at24c_eeprom_init(i2c[2], 0x51, 8 * KiB); + + /* &i2c5 */ + /* i2c-mux@70 (PCA9548) on i2c5 */ + i2c_mux = i2c_slave_create_simple(i2c[5], TYPE_PCA9548, 0x70); + /* i2c5mux0ch6 */ + /* eeprom@52 */ + at24c_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 6), 0x52, 8 * KiB); + /* i2c5mux0ch7 */ + /* ina230@40 - no model */ + /* ina230@41 - no model */ + /* ina230@44 - no model */ + /* ina230@45 - no model */ + + /* &i2c6 */ + /* io_expander3 - pca9555@21 */ + i2c_slave_create_simple(i2c[6], TYPE_PCA9552, 0x21); + /* rtc@6f - nct3018y */ + i2c_slave_create_simple(i2c[6], TYPE_DS1338, 0x6f); + + /* &i2c9 */ + /* io_expander4 - pca9555@4f */ + i2c_slave_create_simple(i2c[9], TYPE_PCA9552, 0x4f); + /* temperature-sensor@4b - tpm75 */ + i2c_slave_create_simple(i2c[9], TYPE_TMP75, 0x4b); + /* eeprom@50 */ + at24c_eeprom_init(i2c[9], 0x50, 8 * KiB); + /* eeprom@56 */ + at24c_eeprom_init(i2c[9], 0x56, 8 * KiB); + + /* &i2c10 */ + /* temperature-sensor@1f - tpm421 */ + i2c_slave_create_simple(i2c[10], TYPE_TMP421, 0x1f); + /* eeprom@50 */ + at24c_eeprom_init(i2c[10], 0x50, 8 * KiB); + + /* &i2c11 */ + /* ssif-bmc@10 - no model */ + + /* &i2c12 */ + /* eeprom@50 */ + at24c_eeprom_init(i2c[12], 0x50, 8 * KiB); + + /* &i2c13 */ + /* eeprom@50 */ + at24c_eeprom_init(i2c[13], 0x50, 8 * KiB); + /* eeprom@54 */ + at24c_eeprom_init(i2c[13], 0x54, 256); + /* eeprom@55 */ + at24c_eeprom_init(i2c[13], 0x55, 256); + /* eeprom@57 */ + at24c_eeprom_init(i2c[13], 0x57, 256); + + /* &i2c14 */ + /* io_expander9 - pca9555@10 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x10); + /* io_expander10 - pca9555@11 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x11); + /* io_expander11 - pca9555@12 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x12); + /* io_expander12 - pca9555@13 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x13); + /* io_expander13 - pca9555@14 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x14); + /* io_expander14 - pca9555@15 */ + i2c_slave_create_simple(i2c[14], TYPE_PCA9552, 0x15); + + /* &i2c15 */ + /* temperature-sensor@1f - tmp421 */ + i2c_slave_create_simple(i2c[15], TYPE_TMP421, 0x1f); + /* eeprom@52 */ + at24c_eeprom_init(i2c[15], 0x52, 8 * KiB); +} static void bletchley_bmc_i2c_init(AspeedMachineState *bmc) { @@ -1002,6 +1236,45 @@ static void bletchley_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(i2c[12], TYPE_PCA9552, 0x67); } + +static void gb200nvl_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = bmc->soc; + I2CBus *i2c[15] = {}; + DeviceState *dev; + for (int i = 0; i < sizeof(i2c) / sizeof(i2c[0]); i++) { + if ((i == 11) || (i == 12) || (i == 13)) { + continue; + } + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + + /* Bus 5 Expander */ + create_pca9554(soc, 4, 0x21); + + /* Mux I2c Expanders */ + i2c_slave_create_simple(i2c[5], "pca9546", 0x71); + i2c_slave_create_simple(i2c[5], "pca9546", 0x72); + i2c_slave_create_simple(i2c[5], "pca9546", 0x73); + i2c_slave_create_simple(i2c[5], "pca9546", 0x75); + i2c_slave_create_simple(i2c[5], "pca9546", 0x76); + i2c_slave_create_simple(i2c[5], "pca9546", 0x77); + + /* Bus 10 */ + dev = DEVICE(create_pca9554(soc, 9, 0x20)); + + /* Set FPGA_READY */ + object_property_set_str(OBJECT(dev), "pin1", "high", &error_fatal); + + create_pca9554(soc, 9, 0x21); + at24c_eeprom_init(i2c[9], 0x50, 64 * KiB); + at24c_eeprom_init(i2c[9], 0x51, 64 * KiB); + + /* Bus 11 */ + at24c_eeprom_init_rom(i2c[10], 0x50, 256, gb200nvl_bmc_fruid, + gb200nvl_bmc_fruid_len); +} + static void fby35_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = bmc->soc; @@ -1213,7 +1486,7 @@ static void aspeed_machine_ast2600_class_emmc_init(ObjectClass *oc) "Set or unset boot from EMMC"); } -static void aspeed_machine_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1229,7 +1502,8 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data) aspeed_machine_class_props_init(oc); } -static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_palmetto_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1241,11 +1515,13 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = palmetto_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 256 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1257,12 +1533,13 @@ static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx25l25635e"; amc->num_cs = 1; amc->i2c_init = quanta_q71l_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 128 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1275,12 +1552,13 @@ static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 256 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1293,11 +1571,13 @@ static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc, amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } -static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1309,11 +1589,13 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = ast2500_evb_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1326,11 +1608,13 @@ static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx25l25635e"; amc->num_cs = 2; amc->i2c_init = yosemitev2_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_romulus_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1342,11 +1626,13 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = romulus_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1359,11 +1645,13 @@ static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx25l25635e"; amc->num_cs = 2; amc->i2c_init = tiogapass_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1375,11 +1663,13 @@ static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = sonorapass_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1391,11 +1681,13 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = witherspoon_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1409,33 +1701,15 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON | ASPEED_MAC3_ON; + amc->sdhci_wp_inverted = true; amc->i2c_init = ast2600_evb_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); aspeed_machine_ast2600_class_emmc_init(oc); }; -static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); - - mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)"; - amc->soc_name = "ast2600-a3"; - amc->hw_strap1 = TACOMA_BMC_HW_STRAP1; - amc->hw_strap2 = TACOMA_BMC_HW_STRAP2; - amc->fmc_model = "mx66l1g45g"; - amc->spi_model = "mx66l1g45g"; - amc->num_cs = 2; - amc->macs_mask = ASPEED_MAC2_ON; - amc->i2c_init = witherspoon_bmc_i2c_init; /* Same board layout */ - mc->default_ram_size = 1 * GiB; - aspeed_machine_class_init_cpus_defaults(mc); - - mc->deprecation_reason = "Please use the similar 'rainier-bmc' machine"; -}; - -static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_g220a_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1448,11 +1722,13 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = g220a_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1024 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1465,11 +1741,12 @@ static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = fp5280g2_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; -static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_rainier_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1483,6 +1760,7 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = rainier_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); aspeed_machine_ast2600_class_emmc_init(oc); @@ -1490,7 +1768,7 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) #define FUJI_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB) -static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_fuji_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1505,13 +1783,15 @@ static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) amc->macs_mask = ASPEED_MAC3_ON; amc->i2c_init = fuji_bmc_i2c_init; amc->uart_default = ASPEED_DEV_UART1; + mc->auto_create_sdcard = true; mc->default_ram_size = FUJI_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); }; #define BLETCHLEY_BMC_RAM_SIZE ASPEED_RAM_SIZE(2 * GiB) -static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_bletchley_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1525,16 +1805,63 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = bletchley_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); } -static void fby35_reset(MachineState *state, ShutdownCause reason) +static void aspeed_machine_catalina_class_init(ObjectClass *oc, + const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Catalina BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = CATALINA_BMC_HW_STRAP1; + amc->hw_strap2 = CATALINA_BMC_HW_STRAP2; + amc->fmc_model = "w25q01jvq"; + amc->spi_model = NULL; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON; + amc->i2c_init = catalina_bmc_i2c_init; + mc->auto_create_sdcard = true; + mc->default_ram_size = CATALINA_BMC_RAM_SIZE; + aspeed_machine_class_init_cpus_defaults(mc); + aspeed_machine_ast2600_class_emmc_init(oc); +} + +#define GB200NVL_BMC_RAM_SIZE ASPEED_RAM_SIZE(1 * GiB) + +static void aspeed_machine_gb200nvl_class_init(ObjectClass *oc, + const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Nvidia GB200NVL BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = GB200NVL_BMC_HW_STRAP1; + amc->hw_strap2 = GB200NVL_BMC_HW_STRAP2; + amc->fmc_model = "mx66u51235f"; + amc->spi_model = "mx66u51235f"; + amc->num_cs = 2; + + amc->spi2_model = "mx66u51235f"; + amc->num_cs2 = 1; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = gb200nvl_bmc_i2c_init; + mc->default_ram_size = GB200NVL_BMC_RAM_SIZE; + aspeed_machine_class_init_cpus_defaults(mc); + aspeed_machine_ast2600_class_emmc_init(oc); +} + +static void fby35_reset(MachineState *state, ResetType type) { AspeedMachineState *bmc = ASPEED_MACHINE(state); AspeedGPIOState *gpio = &bmc->soc->gpio; - qemu_devices_reset(reason); + qemu_devices_reset(type); /* Board ID: 7 (Class-1, 4 slots) */ object_property_set_bool(OBJECT(gpio), "gpioV4", true, &error_fatal); @@ -1555,7 +1882,7 @@ static void fby35_reset(MachineState *state, ShutdownCause reason) object_property_set_bool(OBJECT(gpio), "gpioB5", false, &error_fatal); } -static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data) +static void aspeed_machine_fby35_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1566,6 +1893,7 @@ static void aspeed_machine_fby35_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC3_ON; amc->i2c_init = fby35_i2c_init; + mc->auto_create_sdcard = true; /* FIXME: Replace this macro with something more general */ mc->default_ram_size = FUJI_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); @@ -1594,18 +1922,20 @@ static void aspeed_minibmc_machine_init(MachineState *machine) connect_serial_hds_to_uarts(bmc); qdev_realize(DEVICE(bmc->soc), NULL, &error_abort); - aspeed_board_init_flashes(&bmc->soc->fmc, - bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, - amc->num_cs, - 0); + if (defaults_enabled()) { + aspeed_board_init_flashes(&bmc->soc->fmc, + bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, + amc->num_cs, + 0); - aspeed_board_init_flashes(&bmc->soc->spi[0], - bmc->spi_model ? bmc->spi_model : amc->spi_model, - amc->num_cs, amc->num_cs); + aspeed_board_init_flashes(&bmc->soc->spi[0], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, amc->num_cs); - aspeed_board_init_flashes(&bmc->soc->spi[1], - bmc->spi_model ? bmc->spi_model : amc->spi_model, - amc->num_cs, (amc->num_cs * 2)); + aspeed_board_init_flashes(&bmc->soc->spi[1], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, (amc->num_cs * 2)); + } if (amc->i2c_init) { amc->i2c_init(bmc); @@ -1630,7 +1960,7 @@ static void ast1030_evb_i2c_init(AspeedMachineState *bmc) } static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1642,20 +1972,31 @@ static void aspeed_minibmc_machine_ast1030_evb_class_init(ObjectClass *oc, mc->init = aspeed_minibmc_machine_init; amc->i2c_init = ast1030_evb_i2c_init; mc->default_ram_size = 0; - amc->fmc_model = "sst25vf032b"; - amc->spi_model = "sst25vf032b"; + amc->fmc_model = "w25q80bl"; + amc->spi_model = "w25q256"; amc->num_cs = 2; amc->macs_mask = 0; aspeed_machine_class_init_cpus_defaults(mc); } #ifdef TARGET_AARCH64 -static void aspeed_machine_ast2700_evb_class_init(ObjectClass *oc, void *data) +static void ast2700_evb_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = bmc->soc; + + /* LM75 is compatible with TMP105 driver */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0), + TYPE_TMP105, 0x4d); +} + +static void aspeed_machine_ast2700a0_evb_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); - mc->desc = "Aspeed AST2700 EVB (Cortex-A35)"; + mc->alias = "ast2700-evb"; + mc->desc = "Aspeed AST2700 A0 EVB (Cortex-A35)"; amc->soc_name = "ast2700-a0"; amc->hw_strap1 = AST2700_EVB_HW_STRAP1; amc->hw_strap2 = AST2700_EVB_HW_STRAP2; @@ -1664,13 +2005,38 @@ static void aspeed_machine_ast2700_evb_class_init(ObjectClass *oc, void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON; amc->uart_default = ASPEED_DEV_UART12; + amc->i2c_init = ast2700_evb_i2c_init; + amc->vbootrom = true; + mc->auto_create_sdcard = true; + mc->default_ram_size = 1 * GiB; + aspeed_machine_class_init_cpus_defaults(mc); +} + +static void aspeed_machine_ast2700a1_evb_class_init(ObjectClass *oc, + const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Aspeed AST2700 A1 EVB (Cortex-A35)"; + amc->soc_name = "ast2700-a1"; + amc->hw_strap1 = AST2700_EVB_HW_STRAP1; + amc->hw_strap2 = AST2700_EVB_HW_STRAP2; + amc->fmc_model = "w25q01jvq"; + amc->spi_model = "w25q512jv"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON; + amc->uart_default = ASPEED_DEV_UART12; + amc->i2c_init = ast2700_evb_i2c_init; + amc->vbootrom = true; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); } #endif static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1684,12 +2050,13 @@ static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_bmc_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); @@ -1703,6 +2070,7 @@ static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_firework_i2c_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1744,10 +2112,6 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("yosemitev2-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_yosemitev2_class_init, - }, { - .name = MACHINE_TYPE_NAME("tacoma-bmc"), - .parent = TYPE_ASPEED_MACHINE, - .class_init = aspeed_machine_tacoma_class_init, }, { .name = MACHINE_TYPE_NAME("tiogapass-bmc"), .parent = TYPE_ASPEED_MACHINE, @@ -1784,6 +2148,14 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("bletchley-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_bletchley_class_init, + }, { + .name = MACHINE_TYPE_NAME("gb200nvl-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_gb200nvl_class_init, + }, { + .name = MACHINE_TYPE_NAME("catalina-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_catalina_class_init, }, { .name = MACHINE_TYPE_NAME("fby35-bmc"), .parent = MACHINE_TYPE_NAME("ast2600-evb"), @@ -1794,9 +2166,13 @@ static const TypeInfo aspeed_machine_types[] = { .class_init = aspeed_minibmc_machine_ast1030_evb_class_init, #ifdef TARGET_AARCH64 }, { - .name = MACHINE_TYPE_NAME("ast2700-evb"), + .name = MACHINE_TYPE_NAME("ast2700a0-evb"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_ast2700a0_evb_class_init, + }, { + .name = MACHINE_TYPE_NAME("ast2700a1-evb"), .parent = TYPE_ASPEED_MACHINE, - .class_init = aspeed_machine_ast2700_evb_class_init, + .class_init = aspeed_machine_ast2700a1_evb_class_init, #endif }, { .name = TYPE_ASPEED_MACHINE, diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c index 9f98ad8e87a..e6e1ee63c1c 100644 --- a/hw/arm/aspeed_ast10x0.c +++ b/hw/arm/aspeed_ast10x0.c @@ -11,8 +11,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" @@ -116,7 +116,7 @@ static void aspeed_soc_ast1030_init(Object *obj) char typename[64]; int i; - if (sscanf(sc->name, "%7s", socname) != 1) { + if (sscanf(object_get_typename(obj), "%7s", socname) != 1) { g_assert_not_reached(); } @@ -415,7 +415,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) sc->memmap[ASPEED_DEV_JTAG1], 0x20); } -static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) +static void aspeed_soc_ast1030_class_init(ObjectClass *klass, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */ @@ -428,7 +428,6 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, void *data) dc->user_creatable = false; dc->realize = aspeed_soc_ast1030_realize; - sc->name = "ast1030-a1"; sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST1030_A1_SILICON_REV; sc->sram_size = 0xc0000; diff --git a/hw/arm/aspeed_ast2400.c b/hw/arm/aspeed_ast2400.c index d1258862075..c7b0f21887b 100644 --- a/hw/arm/aspeed_ast2400.c +++ b/hw/arm/aspeed_ast2400.c @@ -15,12 +15,12 @@ #include "qapi/error.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "target/arm/cpu-qom.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 @@ -151,7 +151,7 @@ static void aspeed_ast2400_soc_init(Object *obj) char socname[8]; char typename[64]; - if (sscanf(sc->name, "%7s", socname) != 1) { + if (sscanf(object_get_typename(obj), "%7s", socname) != 1) { g_assert_not_reached(); } @@ -224,7 +224,8 @@ static void aspeed_ast2400_soc_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); object_initialize_child(obj, "gpio", &s->gpio, typename); - object_initialize_child(obj, "sdc", &s->sdhci, TYPE_ASPEED_SDHCI); + snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname); + object_initialize_child(obj, "sdc", &s->sdhci, typename); object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort); @@ -501,7 +502,7 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); } -static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_ast2400_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("arm926"), @@ -514,7 +515,6 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) /* Reason: Uses serial_hds and nd_table in realize() directly */ dc->user_creatable = false; - sc->name = "ast2400-a1"; sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2400_A1_SILICON_REV; sc->sram_size = 0x8000; @@ -530,7 +530,7 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) sc->get_irq = aspeed_soc_ast2400_get_irq; } -static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_ast2500_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("arm1176"), @@ -543,7 +543,6 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) /* Reason: Uses serial_hds and nd_table in realize() directly */ dc->user_creatable = false; - sc->name = "ast2500-a1"; sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2500_A1_SILICON_REV; sc->sram_size = 0x9000; diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index be3eb70cdd7..d12707f0abe 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -15,7 +15,7 @@ #include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "target/arm/cpu-qom.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 @@ -157,7 +157,7 @@ static void aspeed_soc_ast2600_init(Object *obj) char socname[8]; char typename[64]; - if (sscanf(sc->name, "%7s", socname) != 1) { + if (sscanf(object_get_typename(obj), "%7s", socname) != 1) { g_assert_not_reached(); } @@ -236,8 +236,8 @@ static void aspeed_soc_ast2600_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.gpio-%s-1_8v", socname); object_initialize_child(obj, "gpio_1_8v", &s->gpio_1_8v, typename); - object_initialize_child(obj, "sd-controller", &s->sdhci, - TYPE_ASPEED_SDHCI); + snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname); + object_initialize_child(obj, "sd-controller", &s->sdhci, typename); object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort); @@ -247,8 +247,7 @@ static void aspeed_soc_ast2600_init(Object *obj) &s->sdhci.slots[i], TYPE_SYSBUS_SDHCI); } - object_initialize_child(obj, "emmc-controller", &s->emmc, - TYPE_ASPEED_SDHCI); + object_initialize_child(obj, "emmc-controller", &s->emmc, typename); object_property_set_int(OBJECT(&s->emmc), "num-slots", 1, &error_abort); @@ -541,7 +540,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); @@ -653,7 +653,7 @@ static bool aspeed_soc_ast2600_boot_from_emmc(AspeedSoCState *s) return !!(hw_strap1 & SCU_AST2600_HW_STRAP_BOOT_SRC_EMMC); } -static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_ast2600_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a7"), @@ -666,7 +666,6 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) /* Reason: The Aspeed SoC can only be instantiated from a board */ dc->user_creatable = false; - sc->name = "ast2600-a3"; sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2600_A3_SILICON_REV; sc->sram_size = 0x16400; diff --git a/hw/arm/aspeed_ast27x0-fc.c b/hw/arm/aspeed_ast27x0-fc.c new file mode 100644 index 00000000000..7087be4288b --- /dev/null +++ b/hw/arm/aspeed_ast27x0-fc.c @@ -0,0 +1,200 @@ +/* + * ASPEED SoC 2700 family + * + * Copyright (C) 2025 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "system/block-backend.h" +#include "system/system.h" +#include "hw/arm/aspeed.h" +#include "hw/boards.h" +#include "hw/qdev-clock.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/loader.h" +#include "hw/arm/boot.h" +#include "hw/block/flash.h" + + +#define TYPE_AST2700A1FC MACHINE_TYPE_NAME("ast2700fc") +OBJECT_DECLARE_SIMPLE_TYPE(Ast2700FCState, AST2700A1FC); + +static struct arm_boot_info ast2700fc_board_info = { + .board_id = -1, /* device-tree-only board */ +}; + +struct Ast2700FCState { + MachineState parent_obj; + + MemoryRegion ca35_memory; + MemoryRegion ca35_dram; + MemoryRegion ssp_memory; + MemoryRegion tsp_memory; + + Clock *ssp_sysclk; + Clock *tsp_sysclk; + + Aspeed27x0SoCState ca35; + Aspeed27x0SSPSoCState ssp; + Aspeed27x0TSPSoCState tsp; + + bool mmio_exec; +}; + +#define AST2700FC_BMC_RAM_SIZE (1 * GiB) +#define AST2700FC_CM4_DRAM_SIZE (32 * MiB) + +#define AST2700FC_HW_STRAP1 0x000000C0 +#define AST2700FC_HW_STRAP2 0x00000003 +#define AST2700FC_FMC_MODEL "w25q01jvq" +#define AST2700FC_SPI_MODEL "w25q512jv" + +static void ast2700fc_ca35_init(MachineState *machine) +{ + Ast2700FCState *s = AST2700A1FC(machine); + AspeedSoCState *soc; + AspeedSoCClass *sc; + + object_initialize_child(OBJECT(s), "ca35", &s->ca35, "ast2700-a1"); + soc = ASPEED_SOC(&s->ca35); + sc = ASPEED_SOC_GET_CLASS(soc); + + memory_region_init(&s->ca35_memory, OBJECT(&s->ca35), "ca35-memory", + UINT64_MAX); + memory_region_add_subregion(get_system_memory(), 0, &s->ca35_memory); + + if (!memory_region_init_ram(&s->ca35_dram, OBJECT(&s->ca35), "ca35-dram", + AST2700FC_BMC_RAM_SIZE, &error_abort)) { + return; + } + if (!object_property_set_link(OBJECT(&s->ca35), "memory", + OBJECT(&s->ca35_memory), + &error_abort)) { + return; + }; + if (!object_property_set_link(OBJECT(&s->ca35), "dram", + OBJECT(&s->ca35_dram), &error_abort)) { + return; + } + if (!object_property_set_int(OBJECT(&s->ca35), "ram-size", + AST2700FC_BMC_RAM_SIZE, &error_abort)) { + return; + } + + for (int i = 0; i < sc->macs_num; i++) { + if (!qemu_configure_nic_device(DEVICE(&soc->ftgmac100[i]), + true, NULL)) { + break; + } + } + if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap1", + AST2700FC_HW_STRAP1, &error_abort)) { + return; + } + if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap2", + AST2700FC_HW_STRAP2, &error_abort)) { + return; + } + aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART12, serial_hd(0)); + if (!qdev_realize(DEVICE(&s->ca35), NULL, &error_abort)) { + return; + } + + /* + * AST2700 EVB has a LM75 temperature sensor on I2C bus 0 at address 0x4d. + */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0), "tmp105", 0x4d); + + aspeed_board_init_flashes(&soc->fmc, AST2700FC_FMC_MODEL, 2, 0); + aspeed_board_init_flashes(&soc->spi[0], AST2700FC_SPI_MODEL, 1, 2); + + ast2700fc_board_info.ram_size = machine->ram_size; + ast2700fc_board_info.loader_start = sc->memmap[ASPEED_DEV_SDRAM]; + + arm_load_kernel(ARM_CPU(first_cpu), machine, &ast2700fc_board_info); +} + +static void ast2700fc_ssp_init(MachineState *machine) +{ + AspeedSoCState *soc; + Ast2700FCState *s = AST2700A1FC(machine); + s->ssp_sysclk = clock_new(OBJECT(s), "SSP_SYSCLK"); + clock_set_hz(s->ssp_sysclk, 200000000ULL); + + object_initialize_child(OBJECT(s), "ssp", &s->ssp, TYPE_ASPEED27X0SSP_SOC); + memory_region_init(&s->ssp_memory, OBJECT(&s->ssp), "ssp-memory", + UINT64_MAX); + + qdev_connect_clock_in(DEVICE(&s->ssp), "sysclk", s->ssp_sysclk); + if (!object_property_set_link(OBJECT(&s->ssp), "memory", + OBJECT(&s->ssp_memory), &error_abort)) { + return; + } + + soc = ASPEED_SOC(&s->ssp); + aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART4, serial_hd(1)); + if (!qdev_realize(DEVICE(&s->ssp), NULL, &error_abort)) { + return; + } +} + +static void ast2700fc_tsp_init(MachineState *machine) +{ + AspeedSoCState *soc; + Ast2700FCState *s = AST2700A1FC(machine); + s->tsp_sysclk = clock_new(OBJECT(s), "TSP_SYSCLK"); + clock_set_hz(s->tsp_sysclk, 200000000ULL); + + object_initialize_child(OBJECT(s), "tsp", &s->tsp, TYPE_ASPEED27X0TSP_SOC); + memory_region_init(&s->tsp_memory, OBJECT(&s->tsp), "tsp-memory", + UINT64_MAX); + + qdev_connect_clock_in(DEVICE(&s->tsp), "sysclk", s->tsp_sysclk); + if (!object_property_set_link(OBJECT(&s->tsp), "memory", + OBJECT(&s->tsp_memory), &error_abort)) { + return; + } + + soc = ASPEED_SOC(&s->tsp); + aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART7, serial_hd(2)); + if (!qdev_realize(DEVICE(&s->tsp), NULL, &error_abort)) { + return; + } +} + +static void ast2700fc_init(MachineState *machine) +{ + ast2700fc_ca35_init(machine); + ast2700fc_ssp_init(machine); + ast2700fc_tsp_init(machine); +} + +static void ast2700fc_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->alias = "ast2700fc"; + mc->desc = "ast2700 full core support"; + mc->init = ast2700fc_init; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->min_cpus = mc->max_cpus = mc->default_cpus = 6; +} + +static const TypeInfo ast2700fc_types[] = { + { + .name = MACHINE_TYPE_NAME("ast2700fc"), + .parent = TYPE_MACHINE, + .class_init = ast2700fc_class_init, + .instance_size = sizeof(Ast2700FCState), + }, +}; + +DEFINE_TYPES(ast2700fc_types) diff --git a/hw/arm/aspeed_ast27x0-ssp.c b/hw/arm/aspeed_ast27x0-ssp.c new file mode 100644 index 00000000000..80ec5996c1d --- /dev/null +++ b/hw/arm/aspeed_ast27x0-ssp.c @@ -0,0 +1,294 @@ +/* + * ASPEED Ast27x0 SSP SoC + * + * Copyright (C) 2025 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" + +#define AST2700_SSP_RAM_SIZE (32 * MiB) + +static const hwaddr aspeed_soc_ast27x0ssp_memmap[] = { + [ASPEED_DEV_SRAM] = 0x00000000, + [ASPEED_DEV_INTC] = 0x72100000, + [ASPEED_DEV_SCU] = 0x72C02000, + [ASPEED_DEV_SCUIO] = 0x74C02000, + [ASPEED_DEV_UART0] = 0x74C33000, + [ASPEED_DEV_UART1] = 0x74C33100, + [ASPEED_DEV_UART2] = 0x74C33200, + [ASPEED_DEV_UART3] = 0x74C33300, + [ASPEED_DEV_UART4] = 0x72C1A000, + [ASPEED_DEV_INTCIO] = 0x74C18000, + [ASPEED_DEV_IPC0] = 0x72C1C000, + [ASPEED_DEV_IPC1] = 0x74C39000, + [ASPEED_DEV_UART5] = 0x74C33400, + [ASPEED_DEV_UART6] = 0x74C33500, + [ASPEED_DEV_UART7] = 0x74C33600, + [ASPEED_DEV_UART8] = 0x74C33700, + [ASPEED_DEV_UART9] = 0x74C33800, + [ASPEED_DEV_UART10] = 0x74C33900, + [ASPEED_DEV_UART11] = 0x74C33A00, + [ASPEED_DEV_UART12] = 0x74C33B00, + [ASPEED_DEV_TIMER1] = 0x72C10000, +}; + +static const int aspeed_soc_ast27x0ssp_irqmap[] = { + [ASPEED_DEV_SCU] = 12, + [ASPEED_DEV_UART0] = 164, + [ASPEED_DEV_UART1] = 164, + [ASPEED_DEV_UART2] = 164, + [ASPEED_DEV_UART3] = 164, + [ASPEED_DEV_UART4] = 8, + [ASPEED_DEV_UART5] = 164, + [ASPEED_DEV_UART6] = 164, + [ASPEED_DEV_UART7] = 164, + [ASPEED_DEV_UART8] = 164, + [ASPEED_DEV_UART9] = 164, + [ASPEED_DEV_UART10] = 164, + [ASPEED_DEV_UART11] = 164, + [ASPEED_DEV_UART12] = 164, + [ASPEED_DEV_TIMER1] = 16, +}; + +/* SSPINT 164 */ +static const int ast2700_ssp132_ssp164_intcmap[] = { + [ASPEED_DEV_UART0] = 7, + [ASPEED_DEV_UART1] = 8, + [ASPEED_DEV_UART2] = 9, + [ASPEED_DEV_UART3] = 10, + [ASPEED_DEV_UART5] = 11, + [ASPEED_DEV_UART6] = 12, + [ASPEED_DEV_UART7] = 13, + [ASPEED_DEV_UART8] = 14, + [ASPEED_DEV_UART9] = 15, + [ASPEED_DEV_UART10] = 16, + [ASPEED_DEV_UART11] = 17, + [ASPEED_DEV_UART12] = 18, +}; + +struct nvic_intc_irq_info { + int irq; + int intc_idx; + int orgate_idx; + const int *ptr; +}; + +static struct nvic_intc_irq_info ast2700_ssp_intcmap[] = { + {160, 1, 0, NULL}, + {161, 1, 1, NULL}, + {162, 1, 2, NULL}, + {163, 1, 3, NULL}, + {164, 1, 4, ast2700_ssp132_ssp164_intcmap}, + {165, 1, 5, NULL}, + {166, 1, 6, NULL}, + {167, 1, 7, NULL}, + {168, 1, 8, NULL}, + {169, 1, 9, NULL}, + {128, 0, 1, NULL}, + {129, 0, 2, NULL}, + {130, 0, 3, NULL}, + {131, 0, 4, NULL}, + {132, 0, 5, ast2700_ssp132_ssp164_intcmap}, + {133, 0, 6, NULL}, + {134, 0, 7, NULL}, + {135, 0, 8, NULL}, + {136, 0, 9, NULL}, +}; + +static qemu_irq aspeed_soc_ast27x0ssp_get_irq(AspeedSoCState *s, int dev) +{ + Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(s); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + int or_idx; + int idx; + int i; + + for (i = 0; i < ARRAY_SIZE(ast2700_ssp_intcmap); i++) { + if (sc->irqmap[dev] == ast2700_ssp_intcmap[i].irq) { + assert(ast2700_ssp_intcmap[i].ptr); + or_idx = ast2700_ssp_intcmap[i].orgate_idx; + idx = ast2700_ssp_intcmap[i].intc_idx; + return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]), + ast2700_ssp_intcmap[i].ptr[dev]); + } + } + + return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]); +} + +static void aspeed_soc_ast27x0ssp_init(Object *obj) +{ + Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(obj); + AspeedSoCState *s = ASPEED_SOC(obj); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i; + + object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M); + object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU); + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev); + + for (i = 0; i < sc->uarts_num; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); + } + + object_initialize_child(obj, "intc0", &a->intc[0], + TYPE_ASPEED_2700SSP_INTC); + object_initialize_child(obj, "intc1", &a->intc[1], + TYPE_ASPEED_2700SSP_INTCIO); + + object_initialize_child(obj, "timerctrl", &s->timerctrl, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "ipc0", &a->ipc[0], + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "ipc1", &a->ipc[1], + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "scuio", &a->scuio, + TYPE_UNIMPLEMENTED_DEVICE); +} + +static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp) +{ + Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(dev_soc); + AspeedSoCState *s = ASPEED_SOC(dev_soc); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + DeviceState *armv7m; + g_autofree char *sram_name = NULL; + int i; + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* AST27X0 SSP Core */ + armv7m = DEVICE(&a->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 256); + qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + object_property_set_link(OBJECT(&a->armv7m), "memory", + OBJECT(s->memory), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort); + + sram_name = g_strdup_printf("aspeed.dram.%d", + CPU(a->armv7m.cpu)->cpu_index); + + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { + return; + } + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_SRAM], + &s->sram); + + /* SCU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + + /* INTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) { + return; + } + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, + sc->memmap[ASPEED_DEV_INTC]); + + /* INTCIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) { + return; + } + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + sc->memmap[ASPEED_DEV_INTCIO]); + + /* irq source orgates -> INTC0 */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[0]), i)); + } + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_outpins; i++) { + assert(i < ARRAY_SIZE(ast2700_ssp_intcmap)); + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i, + qdev_get_gpio_in(DEVICE(&a->armv7m), + ast2700_ssp_intcmap[i].irq)); + } + /* irq source orgates -> INTCIO */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[1]), i)); + } + /* INTCIO -> INTC */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_outpins; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i, + qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i)); + } + /* UART */ + if (!aspeed_soc_uart_realize(s, errp)) { + return; + } + + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl), + "aspeed.timerctrl", + sc->memmap[ASPEED_DEV_TIMER1], 0x200); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]), + "aspeed.ipc0", + sc->memmap[ASPEED_DEV_IPC0], 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]), + "aspeed.ipc1", + sc->memmap[ASPEED_DEV_IPC1], 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio), + "aspeed.scuio", + sc->memmap[ASPEED_DEV_SCUIO], 0x1000); +} + +static void aspeed_soc_ast27x0ssp_class_init(ObjectClass *klass, const void *data) +{ + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO: cortex-m4f */ + NULL + }; + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + + /* Reason: The Aspeed SoC can only be instantiated from a board */ + dc->user_creatable = false; + dc->realize = aspeed_soc_ast27x0ssp_realize; + + sc->valid_cpu_types = valid_cpu_types; + sc->silicon_rev = AST2700_A1_SILICON_REV; + sc->sram_size = AST2700_SSP_RAM_SIZE; + sc->spis_num = 0; + sc->ehcis_num = 0; + sc->wdts_num = 0; + sc->macs_num = 0; + sc->uarts_num = 13; + sc->uarts_base = ASPEED_DEV_UART0; + sc->irqmap = aspeed_soc_ast27x0ssp_irqmap; + sc->memmap = aspeed_soc_ast27x0ssp_memmap; + sc->num_cpus = 1; + sc->get_irq = aspeed_soc_ast27x0ssp_get_irq; +} + +static const TypeInfo aspeed_soc_ast27x0ssp_types[] = { + { + .name = TYPE_ASPEED27X0SSP_SOC, + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(Aspeed27x0SSPSoCState), + .instance_init = aspeed_soc_ast27x0ssp_init, + .class_init = aspeed_soc_ast27x0ssp_class_init, + }, +}; + +DEFINE_TYPES(aspeed_soc_ast27x0ssp_types) diff --git a/hw/arm/aspeed_ast27x0-tsp.c b/hw/arm/aspeed_ast27x0-tsp.c new file mode 100644 index 00000000000..4e0efaef07c --- /dev/null +++ b/hw/arm/aspeed_ast27x0-tsp.c @@ -0,0 +1,294 @@ +/* + * ASPEED Ast27x0 TSP SoC + * + * Copyright (C) 2025 ASPEED Technology Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" + +#define AST2700_TSP_RAM_SIZE (32 * MiB) + +static const hwaddr aspeed_soc_ast27x0tsp_memmap[] = { + [ASPEED_DEV_SRAM] = 0x00000000, + [ASPEED_DEV_INTC] = 0x72100000, + [ASPEED_DEV_SCU] = 0x72C02000, + [ASPEED_DEV_SCUIO] = 0x74C02000, + [ASPEED_DEV_UART0] = 0x74C33000, + [ASPEED_DEV_UART1] = 0x74C33100, + [ASPEED_DEV_UART2] = 0x74C33200, + [ASPEED_DEV_UART3] = 0x74C33300, + [ASPEED_DEV_UART4] = 0x72C1A000, + [ASPEED_DEV_INTCIO] = 0x74C18000, + [ASPEED_DEV_IPC0] = 0x72C1C000, + [ASPEED_DEV_IPC1] = 0x74C39000, + [ASPEED_DEV_UART5] = 0x74C33400, + [ASPEED_DEV_UART6] = 0x74C33500, + [ASPEED_DEV_UART7] = 0x74C33600, + [ASPEED_DEV_UART8] = 0x74C33700, + [ASPEED_DEV_UART9] = 0x74C33800, + [ASPEED_DEV_UART10] = 0x74C33900, + [ASPEED_DEV_UART11] = 0x74C33A00, + [ASPEED_DEV_UART12] = 0x74C33B00, + [ASPEED_DEV_TIMER1] = 0x72C10000, +}; + +static const int aspeed_soc_ast27x0tsp_irqmap[] = { + [ASPEED_DEV_SCU] = 12, + [ASPEED_DEV_UART0] = 164, + [ASPEED_DEV_UART1] = 164, + [ASPEED_DEV_UART2] = 164, + [ASPEED_DEV_UART3] = 164, + [ASPEED_DEV_UART4] = 8, + [ASPEED_DEV_UART5] = 164, + [ASPEED_DEV_UART6] = 164, + [ASPEED_DEV_UART7] = 164, + [ASPEED_DEV_UART8] = 164, + [ASPEED_DEV_UART9] = 164, + [ASPEED_DEV_UART10] = 164, + [ASPEED_DEV_UART11] = 164, + [ASPEED_DEV_UART12] = 164, + [ASPEED_DEV_TIMER1] = 16, +}; + +/* TSPINT 164 */ +static const int ast2700_tsp132_tsp164_intcmap[] = { + [ASPEED_DEV_UART0] = 7, + [ASPEED_DEV_UART1] = 8, + [ASPEED_DEV_UART2] = 9, + [ASPEED_DEV_UART3] = 10, + [ASPEED_DEV_UART5] = 11, + [ASPEED_DEV_UART6] = 12, + [ASPEED_DEV_UART7] = 13, + [ASPEED_DEV_UART8] = 14, + [ASPEED_DEV_UART9] = 15, + [ASPEED_DEV_UART10] = 16, + [ASPEED_DEV_UART11] = 17, + [ASPEED_DEV_UART12] = 18, +}; + +struct nvic_intc_irq_info { + int irq; + int intc_idx; + int orgate_idx; + const int *ptr; +}; + +static struct nvic_intc_irq_info ast2700_tsp_intcmap[] = { + {160, 1, 0, NULL}, + {161, 1, 1, NULL}, + {162, 1, 2, NULL}, + {163, 1, 3, NULL}, + {164, 1, 4, ast2700_tsp132_tsp164_intcmap}, + {165, 1, 5, NULL}, + {166, 1, 6, NULL}, + {167, 1, 7, NULL}, + {168, 1, 8, NULL}, + {169, 1, 9, NULL}, + {128, 0, 1, NULL}, + {129, 0, 2, NULL}, + {130, 0, 3, NULL}, + {131, 0, 4, NULL}, + {132, 0, 5, ast2700_tsp132_tsp164_intcmap}, + {133, 0, 6, NULL}, + {134, 0, 7, NULL}, + {135, 0, 8, NULL}, + {136, 0, 9, NULL}, +}; + +static qemu_irq aspeed_soc_ast27x0tsp_get_irq(AspeedSoCState *s, int dev) +{ + Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(s); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + int or_idx; + int idx; + int i; + + for (i = 0; i < ARRAY_SIZE(ast2700_tsp_intcmap); i++) { + if (sc->irqmap[dev] == ast2700_tsp_intcmap[i].irq) { + assert(ast2700_tsp_intcmap[i].ptr); + or_idx = ast2700_tsp_intcmap[i].orgate_idx; + idx = ast2700_tsp_intcmap[i].intc_idx; + return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]), + ast2700_tsp_intcmap[i].ptr[dev]); + } + } + + return qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[dev]); +} + +static void aspeed_soc_ast27x0tsp_init(Object *obj) +{ + Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(obj); + AspeedSoCState *s = ASPEED_SOC(obj); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i; + + object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M); + object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU); + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev); + + for (i = 0; i < sc->uarts_num; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); + } + + object_initialize_child(obj, "intc0", &a->intc[0], + TYPE_ASPEED_2700TSP_INTC); + object_initialize_child(obj, "intc1", &a->intc[1], + TYPE_ASPEED_2700TSP_INTCIO); + + object_initialize_child(obj, "timerctrl", &s->timerctrl, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "ipc0", &a->ipc[0], + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "ipc1", &a->ipc[1], + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "scuio", &a->scuio, + TYPE_UNIMPLEMENTED_DEVICE); +} + +static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp) +{ + Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(dev_soc); + AspeedSoCState *s = ASPEED_SOC(dev_soc); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + DeviceState *armv7m; + g_autofree char *sram_name = NULL; + int i; + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* AST27X0 TSP Core */ + armv7m = DEVICE(&a->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 256); + qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + object_property_set_link(OBJECT(&a->armv7m), "memory", + OBJECT(s->memory), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort); + + sram_name = g_strdup_printf("aspeed.dram.%d", + CPU(a->armv7m.cpu)->cpu_index); + + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { + return; + } + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_SRAM], + &s->sram); + + /* SCU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + + /* INTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) { + return; + } + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, + sc->memmap[ASPEED_DEV_INTC]); + + /* INTCIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) { + return; + } + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + sc->memmap[ASPEED_DEV_INTCIO]); + + /* irq source orgates -> INTC */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[0]), i)); + } + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[0])->num_outpins; i++) { + assert(i < ARRAY_SIZE(ast2700_tsp_intcmap)); + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i, + qdev_get_gpio_in(DEVICE(&a->armv7m), + ast2700_tsp_intcmap[i].irq)); + } + /* irq source orgates -> INTC */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[1]), i)); + } + /* INTCIO -> INTC */ + for (i = 0; i < ASPEED_INTC_GET_CLASS(&a->intc[1])->num_outpins; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i, + qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i)); + } + /* UART */ + if (!aspeed_soc_uart_realize(s, errp)) { + return; + } + + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl), + "aspeed.timerctrl", + sc->memmap[ASPEED_DEV_TIMER1], 0x200); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]), + "aspeed.ipc0", + sc->memmap[ASPEED_DEV_IPC0], 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]), + "aspeed.ipc1", + sc->memmap[ASPEED_DEV_IPC1], 0x1000); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio), + "aspeed.scuio", + sc->memmap[ASPEED_DEV_SCUIO], 0x1000); +} + +static void aspeed_soc_ast27x0tsp_class_init(ObjectClass *klass, const void *data) +{ + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */ + NULL + }; + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + + /* Reason: The Aspeed SoC can only be instantiated from a board */ + dc->user_creatable = false; + dc->realize = aspeed_soc_ast27x0tsp_realize; + + sc->valid_cpu_types = valid_cpu_types; + sc->silicon_rev = AST2700_A1_SILICON_REV; + sc->sram_size = AST2700_TSP_RAM_SIZE; + sc->spis_num = 0; + sc->ehcis_num = 0; + sc->wdts_num = 0; + sc->macs_num = 0; + sc->uarts_num = 13; + sc->uarts_base = ASPEED_DEV_UART0; + sc->irqmap = aspeed_soc_ast27x0tsp_irqmap; + sc->memmap = aspeed_soc_ast27x0tsp_memmap; + sc->num_cpus = 1; + sc->get_irq = aspeed_soc_ast27x0tsp_get_irq; +} + +static const TypeInfo aspeed_soc_ast27x0tsp_types[] = { + { + .name = TYPE_ASPEED27X0TSP_SOC, + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(Aspeed27x0TSPSoCState), + .instance_init = aspeed_soc_ast27x0tsp_init, + .class_init = aspeed_soc_ast27x0tsp_class_init, + }, +}; + +DEFINE_TYPES(aspeed_soc_ast27x0tsp_types) diff --git a/hw/arm/aspeed_ast27x0.c b/hw/arm/aspeed_ast27x0.c index 4257b5e8af8..6aa3841b691 100644 --- a/hw/arm/aspeed_ast27x0.c +++ b/hw/arm/aspeed_ast27x0.c @@ -13,65 +13,115 @@ #include "qapi/error.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" +#include "hw/arm/bsa.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/intc/arm_gicv3.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qemu/log.h" +#define AST2700_SOC_IO_SIZE 0x00FE0000 +#define AST2700_SOC_IOMEM_SIZE 0x01000000 +#define AST2700_SOC_DPMCU_SIZE 0x00040000 +#define AST2700_SOC_LTPI_SIZE 0x01000000 + static const hwaddr aspeed_soc_ast2700_memmap[] = { - [ASPEED_DEV_SPI_BOOT] = 0x400000000, + [ASPEED_DEV_VBOOTROM] = 0x00000000, + [ASPEED_DEV_IOMEM] = 0x00020000, [ASPEED_DEV_SRAM] = 0x10000000, + [ASPEED_DEV_DPMCU] = 0x11000000, + [ASPEED_DEV_IOMEM0] = 0x12000000, + [ASPEED_DEV_EHCI1] = 0x12061000, + [ASPEED_DEV_EHCI2] = 0x12063000, + [ASPEED_DEV_HACE] = 0x12070000, + [ASPEED_DEV_EMMC] = 0x12090000, + [ASPEED_DEV_INTC] = 0x12100000, + [ASPEED_GIC_DIST] = 0x12200000, + [ASPEED_GIC_REDIST] = 0x12280000, [ASPEED_DEV_SDMC] = 0x12C00000, [ASPEED_DEV_SCU] = 0x12C02000, - [ASPEED_DEV_SCUIO] = 0x14C02000, - [ASPEED_DEV_UART0] = 0X14C33000, - [ASPEED_DEV_UART1] = 0X14C33100, - [ASPEED_DEV_UART2] = 0X14C33200, - [ASPEED_DEV_UART3] = 0X14C33300, - [ASPEED_DEV_UART4] = 0X12C1A000, - [ASPEED_DEV_UART5] = 0X14C33400, - [ASPEED_DEV_UART6] = 0X14C33500, - [ASPEED_DEV_UART7] = 0X14C33600, - [ASPEED_DEV_UART8] = 0X14C33700, - [ASPEED_DEV_UART9] = 0X14C33800, - [ASPEED_DEV_UART10] = 0X14C33900, - [ASPEED_DEV_UART11] = 0X14C33A00, - [ASPEED_DEV_UART12] = 0X14C33B00, - [ASPEED_DEV_WDT] = 0x14C37000, - [ASPEED_DEV_VUART] = 0X14C30000, + [ASPEED_DEV_RTC] = 0x12C0F000, + [ASPEED_DEV_TIMER1] = 0x12C10000, + [ASPEED_DEV_SLI] = 0x12C17000, + [ASPEED_DEV_UART4] = 0x12C1A000, + [ASPEED_DEV_IOMEM1] = 0x14000000, [ASPEED_DEV_FMC] = 0x14000000, [ASPEED_DEV_SPI0] = 0x14010000, [ASPEED_DEV_SPI1] = 0x14020000, [ASPEED_DEV_SPI2] = 0x14030000, - [ASPEED_DEV_SDRAM] = 0x400000000, [ASPEED_DEV_MII1] = 0x14040000, [ASPEED_DEV_MII2] = 0x14040008, [ASPEED_DEV_MII3] = 0x14040010, [ASPEED_DEV_ETH1] = 0x14050000, [ASPEED_DEV_ETH2] = 0x14060000, [ASPEED_DEV_ETH3] = 0x14070000, - [ASPEED_DEV_EMMC] = 0x12090000, - [ASPEED_DEV_INTC] = 0x12100000, - [ASPEED_DEV_SLI] = 0x12C17000, - [ASPEED_DEV_SLIIO] = 0x14C1E000, - [ASPEED_GIC_DIST] = 0x12200000, - [ASPEED_GIC_REDIST] = 0x12280000, + [ASPEED_DEV_SDHCI] = 0x14080000, + [ASPEED_DEV_EHCI3] = 0x14121000, + [ASPEED_DEV_EHCI4] = 0x14123000, [ASPEED_DEV_ADC] = 0x14C00000, + [ASPEED_DEV_SCUIO] = 0x14C02000, + [ASPEED_DEV_GPIO] = 0x14C0B000, + [ASPEED_DEV_I2C] = 0x14C0F000, + [ASPEED_DEV_INTCIO] = 0x14C18000, + [ASPEED_DEV_SLIIO] = 0x14C1E000, + [ASPEED_DEV_VUART] = 0x14C30000, + [ASPEED_DEV_UART0] = 0x14C33000, + [ASPEED_DEV_UART1] = 0x14C33100, + [ASPEED_DEV_UART2] = 0x14C33200, + [ASPEED_DEV_UART3] = 0x14C33300, + [ASPEED_DEV_UART5] = 0x14C33400, + [ASPEED_DEV_UART6] = 0x14C33500, + [ASPEED_DEV_UART7] = 0x14C33600, + [ASPEED_DEV_UART8] = 0x14C33700, + [ASPEED_DEV_UART9] = 0x14C33800, + [ASPEED_DEV_UART10] = 0x14C33900, + [ASPEED_DEV_UART11] = 0x14C33A00, + [ASPEED_DEV_UART12] = 0x14C33B00, + [ASPEED_DEV_WDT] = 0x14C37000, + [ASPEED_DEV_SPI_BOOT] = 0x100000000, + [ASPEED_DEV_LTPI] = 0x300000000, + [ASPEED_DEV_SDRAM] = 0x400000000, }; -#define AST2700_MAX_IRQ 288 +#define AST2700_MAX_IRQ 256 /* Shared Peripheral Interrupt values below are offset by -32 from datasheet */ -static const int aspeed_soc_ast2700_irqmap[] = { +static const int aspeed_soc_ast2700a0_irqmap[] = { + [ASPEED_DEV_SDMC] = 0, + [ASPEED_DEV_HACE] = 4, + [ASPEED_DEV_XDMA] = 5, + [ASPEED_DEV_UART4] = 8, + [ASPEED_DEV_SCU] = 12, + [ASPEED_DEV_RTC] = 13, + [ASPEED_DEV_EMMC] = 15, + [ASPEED_DEV_TIMER1] = 16, + [ASPEED_DEV_TIMER2] = 17, + [ASPEED_DEV_TIMER3] = 18, + [ASPEED_DEV_TIMER4] = 19, + [ASPEED_DEV_TIMER5] = 20, + [ASPEED_DEV_TIMER6] = 21, + [ASPEED_DEV_TIMER7] = 22, + [ASPEED_DEV_TIMER8] = 23, + [ASPEED_DEV_DP] = 28, + [ASPEED_DEV_EHCI1] = 33, + [ASPEED_DEV_EHCI2] = 37, + [ASPEED_DEV_LPC] = 128, + [ASPEED_DEV_IBT] = 128, + [ASPEED_DEV_KCS] = 128, + [ASPEED_DEV_ADC] = 130, + [ASPEED_DEV_GPIO] = 130, + [ASPEED_DEV_I2C] = 130, + [ASPEED_DEV_FMC] = 131, + [ASPEED_DEV_WDT] = 131, + [ASPEED_DEV_PWM] = 131, + [ASPEED_DEV_I3C] = 131, [ASPEED_DEV_UART0] = 132, [ASPEED_DEV_UART1] = 132, [ASPEED_DEV_UART2] = 132, [ASPEED_DEV_UART3] = 132, - [ASPEED_DEV_UART4] = 8, [ASPEED_DEV_UART5] = 132, [ASPEED_DEV_UART6] = 132, [ASPEED_DEV_UART7] = 132, @@ -80,15 +130,21 @@ static const int aspeed_soc_ast2700_irqmap[] = { [ASPEED_DEV_UART10] = 132, [ASPEED_DEV_UART11] = 132, [ASPEED_DEV_UART12] = 132, - [ASPEED_DEV_FMC] = 131, + [ASPEED_DEV_ETH1] = 132, + [ASPEED_DEV_ETH2] = 132, + [ASPEED_DEV_ETH3] = 132, + [ASPEED_DEV_PECI] = 133, + [ASPEED_DEV_SDHCI] = 133, +}; + +static const int aspeed_soc_ast2700a1_irqmap[] = { [ASPEED_DEV_SDMC] = 0, - [ASPEED_DEV_SCU] = 12, - [ASPEED_DEV_ADC] = 130, + [ASPEED_DEV_HACE] = 4, [ASPEED_DEV_XDMA] = 5, - [ASPEED_DEV_EMMC] = 15, - [ASPEED_DEV_GPIO] = 11, - [ASPEED_DEV_GPIO_1_8V] = 130, + [ASPEED_DEV_UART4] = 8, + [ASPEED_DEV_SCU] = 12, [ASPEED_DEV_RTC] = 13, + [ASPEED_DEV_EMMC] = 15, [ASPEED_DEV_TIMER1] = 16, [ASPEED_DEV_TIMER2] = 17, [ASPEED_DEV_TIMER3] = 18, @@ -97,37 +153,60 @@ static const int aspeed_soc_ast2700_irqmap[] = { [ASPEED_DEV_TIMER6] = 21, [ASPEED_DEV_TIMER7] = 22, [ASPEED_DEV_TIMER8] = 23, - [ASPEED_DEV_WDT] = 131, - [ASPEED_DEV_PWM] = 131, - [ASPEED_DEV_LPC] = 128, - [ASPEED_DEV_IBT] = 128, - [ASPEED_DEV_I2C] = 130, - [ASPEED_DEV_PECI] = 133, - [ASPEED_DEV_ETH1] = 132, - [ASPEED_DEV_ETH2] = 132, - [ASPEED_DEV_ETH3] = 132, - [ASPEED_DEV_HACE] = 4, - [ASPEED_DEV_KCS] = 128, [ASPEED_DEV_DP] = 28, - [ASPEED_DEV_I3C] = 131, + [ASPEED_DEV_EHCI1] = 33, + [ASPEED_DEV_EHCI2] = 37, + [ASPEED_DEV_LPC] = 192, + [ASPEED_DEV_IBT] = 192, + [ASPEED_DEV_KCS] = 192, + [ASPEED_DEV_I2C] = 194, + [ASPEED_DEV_ADC] = 194, + [ASPEED_DEV_GPIO] = 194, + [ASPEED_DEV_FMC] = 195, + [ASPEED_DEV_WDT] = 195, + [ASPEED_DEV_PWM] = 195, + [ASPEED_DEV_I3C] = 195, + [ASPEED_DEV_UART0] = 196, + [ASPEED_DEV_UART1] = 196, + [ASPEED_DEV_UART2] = 196, + [ASPEED_DEV_UART3] = 196, + [ASPEED_DEV_UART5] = 196, + [ASPEED_DEV_UART6] = 196, + [ASPEED_DEV_UART7] = 196, + [ASPEED_DEV_UART8] = 196, + [ASPEED_DEV_UART9] = 196, + [ASPEED_DEV_UART10] = 196, + [ASPEED_DEV_UART11] = 196, + [ASPEED_DEV_UART12] = 196, + [ASPEED_DEV_ETH1] = 196, + [ASPEED_DEV_ETH2] = 196, + [ASPEED_DEV_ETH3] = 196, + [ASPEED_DEV_PECI] = 197, + [ASPEED_DEV_SDHCI] = 197, }; /* GICINT 128 */ -static const int aspeed_soc_ast2700_gic128_intcmap[] = { +/* GICINT 192 */ +static const int ast2700_gic128_gic192_intcmap[] = { [ASPEED_DEV_LPC] = 0, [ASPEED_DEV_IBT] = 2, [ASPEED_DEV_KCS] = 4, }; +/* GICINT 129 */ +/* GICINT 193 */ + /* GICINT 130 */ -static const int aspeed_soc_ast2700_gic130_intcmap[] = { +/* GICINT 194 */ +static const int ast2700_gic130_gic194_intcmap[] = { [ASPEED_DEV_I2C] = 0, [ASPEED_DEV_ADC] = 16, - [ASPEED_DEV_GPIO_1_8V] = 18, + [ASPEED_DEV_GPIO] = 18, }; /* GICINT 131 */ -static const int aspeed_soc_ast2700_gic131_intcmap[] = { +/* GICINT 195 */ +static const int ast2700_gic131_gic195_intcmap[] = { [ASPEED_DEV_I3C] = 0, [ASPEED_DEV_WDT] = 16, [ASPEED_DEV_FMC] = 25, @@ -135,7 +214,8 @@ static const int aspeed_soc_ast2700_gic131_intcmap[] = { }; /* GICINT 132 */ -static const int aspeed_soc_ast2700_gic132_intcmap[] = { +/* GICINT 196 */ +static const int ast2700_gic132_gic196_intcmap[] = { [ASPEED_DEV_ETH1] = 0, [ASPEED_DEV_ETH2] = 1, [ASPEED_DEV_ETH3] = 2, @@ -151,48 +231,95 @@ static const int aspeed_soc_ast2700_gic132_intcmap[] = { [ASPEED_DEV_UART10] = 16, [ASPEED_DEV_UART11] = 17, [ASPEED_DEV_UART12] = 18, + [ASPEED_DEV_EHCI3] = 28, + [ASPEED_DEV_EHCI4] = 29, }; /* GICINT 133 */ -static const int aspeed_soc_ast2700_gic133_intcmap[] = { +/* GICINT 197 */ +static const int ast2700_gic133_gic197_intcmap[] = { + [ASPEED_DEV_SDHCI] = 1, [ASPEED_DEV_PECI] = 4, }; /* GICINT 128 ~ 136 */ +/* GICINT 192 ~ 201 */ struct gic_intc_irq_info { int irq; + int intc_idx; + int orgate_idx; const int *ptr; }; -static const struct gic_intc_irq_info aspeed_soc_ast2700_gic_intcmap[] = { - {128, aspeed_soc_ast2700_gic128_intcmap}, - {129, NULL}, - {130, aspeed_soc_ast2700_gic130_intcmap}, - {131, aspeed_soc_ast2700_gic131_intcmap}, - {132, aspeed_soc_ast2700_gic132_intcmap}, - {133, aspeed_soc_ast2700_gic133_intcmap}, - {134, NULL}, - {135, NULL}, - {136, NULL}, +static const struct gic_intc_irq_info ast2700_gic_intcmap[] = { + {192, 1, 0, ast2700_gic128_gic192_intcmap}, + {193, 1, 1, NULL}, + {194, 1, 2, ast2700_gic130_gic194_intcmap}, + {195, 1, 3, ast2700_gic131_gic195_intcmap}, + {196, 1, 4, ast2700_gic132_gic196_intcmap}, + {197, 1, 5, ast2700_gic133_gic197_intcmap}, + {198, 1, 6, NULL}, + {199, 1, 7, NULL}, + {200, 1, 8, NULL}, + {201, 1, 9, NULL}, + {128, 0, 1, ast2700_gic128_gic192_intcmap}, + {129, 0, 2, NULL}, + {130, 0, 3, ast2700_gic130_gic194_intcmap}, + {131, 0, 4, ast2700_gic131_gic195_intcmap}, + {132, 0, 5, ast2700_gic132_gic196_intcmap}, + {133, 0, 6, ast2700_gic133_gic197_intcmap}, + {134, 0, 7, NULL}, + {135, 0, 8, NULL}, + {136, 0, 9, NULL}, }; static qemu_irq aspeed_soc_ast2700_get_irq(AspeedSoCState *s, int dev) { Aspeed27x0SoCState *a = ASPEED27X0_SOC(s); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int or_idx; + int idx; int i; - for (i = 0; i < ARRAY_SIZE(aspeed_soc_ast2700_gic_intcmap); i++) { - if (sc->irqmap[dev] == aspeed_soc_ast2700_gic_intcmap[i].irq) { - assert(aspeed_soc_ast2700_gic_intcmap[i].ptr); - return qdev_get_gpio_in(DEVICE(&a->intc.orgates[i]), - aspeed_soc_ast2700_gic_intcmap[i].ptr[dev]); + for (i = 0; i < ARRAY_SIZE(ast2700_gic_intcmap); i++) { + if (sc->irqmap[dev] == ast2700_gic_intcmap[i].irq) { + assert(ast2700_gic_intcmap[i].ptr); + or_idx = ast2700_gic_intcmap[i].orgate_idx; + idx = ast2700_gic_intcmap[i].intc_idx; + return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]), + ast2700_gic_intcmap[i].ptr[dev]); } } return qdev_get_gpio_in(DEVICE(&a->gic), sc->irqmap[dev]); } +static qemu_irq aspeed_soc_ast2700_get_irq_index(AspeedSoCState *s, int dev, + int index) +{ + Aspeed27x0SoCState *a = ASPEED27X0_SOC(s); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int or_idx; + int idx; + int i; + + for (i = 0; i < ARRAY_SIZE(ast2700_gic_intcmap); i++) { + if (sc->irqmap[dev] == ast2700_gic_intcmap[i].irq) { + assert(ast2700_gic_intcmap[i].ptr); + or_idx = ast2700_gic_intcmap[i].orgate_idx; + idx = ast2700_gic_intcmap[i].intc_idx; + return qdev_get_gpio_in(DEVICE(&a->intc[idx].orgates[or_idx]), + ast2700_gic_intcmap[i].ptr[dev] + index); + } + } + + /* + * Invalid OR gate index, device IRQ should be between 128 to 136 + * and 192 to 201. + */ + g_assert_not_reached(); +} + static uint64_t aspeed_ram_capacity_read(void *opaque, hwaddr addr, unsigned int size) { @@ -219,8 +346,9 @@ static void aspeed_ram_capacity_write(void *opaque, hwaddr addr, uint64_t data, * If writes the data to the address which is beyond the ram size, * it would write the data to the "address % ram_size". */ - result = address_space_write(&s->dram_as, addr % ram_size, - MEMTXATTRS_UNSPECIFIED, &data, 4); + address_space_stl_le(&s->dram_as, addr % ram_size, data, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed, addr:0x%" HWADDR_PRIx @@ -233,9 +361,10 @@ static const MemoryRegionOps aspeed_ram_capacity_ops = { .read = aspeed_ram_capacity_read, .write = aspeed_ram_capacity_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { - .min_access_size = 1, - .max_access_size = 8, + .min_access_size = 4, + .max_access_size = 4, }, }; @@ -288,7 +417,7 @@ static void aspeed_soc_ast2700_init(Object *obj) char socname[8]; char typename[64]; - if (sscanf(sc->name, "%7s", socname) != 1) { + if (sscanf(object_get_typename(obj), "%7s", socname) != 1) { g_assert_not_reached(); } @@ -304,14 +433,21 @@ static void aspeed_soc_ast2700_init(Object *obj) sc->silicon_rev); object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu), "hw-strap1"); - object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), - "hw-strap2"); object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), "hw-prot-key"); object_initialize_child(obj, "scuio", &s->scuio, TYPE_ASPEED_2700_SCUIO); qdev_prop_set_uint32(DEVICE(&s->scuio), "silicon-rev", sc->silicon_rev); + /* + * There is one hw-strap1 register in the SCU (CPU DIE) and another + * hw-strap1 register in the SCUIO (IO DIE). To reuse the current design + * of hw-strap, hw-strap1 is assigned to the SCU and sets the value in the + * SCU hw-strap1 register, while hw-strap2 is assigned to the SCUIO and + * sets the value in the SCUIO hw-strap1 register. + */ + object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scuio), + "hw-strap1"); snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); object_initialize_child(obj, "fmc", &s->fmc, typename); @@ -321,6 +457,11 @@ static void aspeed_soc_ast2700_init(Object *obj) object_initialize_child(obj, "spi[*]", &s->spi[i], typename); } + for (i = 0; i < sc->ehcis_num; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + } + snprintf(typename, sizeof(typename), "aspeed.sdmc-%s", socname); object_initialize_child(obj, "sdmc", &s->sdmc, typename); object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), @@ -344,10 +485,50 @@ static void aspeed_soc_ast2700_init(Object *obj) object_initialize_child(obj, "sli", &s->sli, TYPE_ASPEED_2700_SLI); object_initialize_child(obj, "sliio", &s->sliio, TYPE_ASPEED_2700_SLIIO); - object_initialize_child(obj, "intc", &a->intc, TYPE_ASPEED_2700_INTC); + object_initialize_child(obj, "intc", &a->intc[0], TYPE_ASPEED_2700_INTC); + object_initialize_child(obj, "intcio", &a->intc[1], + TYPE_ASPEED_2700_INTCIO); snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); object_initialize_child(obj, "adc", &s->adc, typename); + + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); + object_initialize_child(obj, "i2c", &s->i2c, typename); + + snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); + object_initialize_child(obj, "gpio", &s->gpio, typename); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); + + snprintf(typename, sizeof(typename), "aspeed.sdhci-%s", socname); + object_initialize_child(obj, "sd-controller", &s->sdhci, typename); + object_property_set_int(OBJECT(&s->sdhci), "num-slots", 1, &error_abort); + + /* Init sd card slot class here so that they're under the correct parent */ + object_initialize_child(obj, "sd-controller.sdhci", + &s->sdhci.slots[0], TYPE_SYSBUS_SDHCI); + + object_initialize_child(obj, "emmc-controller", &s->emmc, typename); + object_property_set_int(OBJECT(&s->emmc), "num-slots", 1, &error_abort); + + object_initialize_child(obj, "emmc-controller.sdhci", &s->emmc.slots[0], + TYPE_SYSBUS_SDHCI); + + snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); + object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + + snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); + object_initialize_child(obj, "hace", &s->hace, typename); + object_initialize_child(obj, "dpmcu", &s->dpmcu, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "ltpi", &s->ltpi, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "iomem", &s->iomem, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "iomem0", &s->iomem0, + TYPE_UNIMPLEMENTED_DEVICE); + object_initialize_child(obj, "iomem1", &s->iomem1, + TYPE_UNIMPLEMENTED_DEVICE); } /* @@ -374,7 +555,7 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) gicdev = DEVICE(&a->gic); qdev_prop_set_uint32(gicdev, "revision", 3); qdev_prop_set_uint32(gicdev, "num-cpu", sc->num_cpus); - qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ); + qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ + GIC_INTERNAL); redist_region_count = qlist_new(); qlist_append_int(redist_region_count, sc->num_cpus); @@ -383,33 +564,35 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(gicbusdev, errp)) { return false; } - sysbus_mmio_map(gicbusdev, 0, sc->memmap[ASPEED_GIC_DIST]); - sysbus_mmio_map(gicbusdev, 1, sc->memmap[ASPEED_GIC_REDIST]); + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 0, + sc->memmap[ASPEED_GIC_DIST]); + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 1, + sc->memmap[ASPEED_GIC_REDIST]); for (i = 0; i < sc->num_cpus; i++) { DeviceState *cpudev = DEVICE(&a->cpu[i]); - int NUM_IRQS = 256, ARCH_GIC_MAINT_IRQ = 9, VIRTUAL_PMU_IRQ = 7; - int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int intidbase = AST2700_MAX_IRQ + i * GIC_INTERNAL; const int timer_irq[] = { - [GTIMER_PHYS] = 14, - [GTIMER_VIRT] = 11, - [GTIMER_HYP] = 10, - [GTIMER_SEC] = 13, + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, }; int j; for (j = 0; j < ARRAY_SIZE(timer_irq); j++) { qdev_connect_gpio_out(cpudev, j, - qdev_get_gpio_in(gicdev, ppibase + timer_irq[j])); + qdev_get_gpio_in(gicdev, intidbase + timer_irq[j])); } qemu_irq irq = qdev_get_gpio_in(gicdev, - ppibase + ARCH_GIC_MAINT_IRQ); + intidbase + ARCH_GIC_MAINT_IRQ); qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, irq); qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, - qdev_get_gpio_in(gicdev, ppibase + VIRTUAL_PMU_IRQ)); + qdev_get_gpio_in(gicdev, intidbase + VIRTUAL_PMU_IRQ)); sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); sysbus_connect_irq(gicbusdev, i + sc->num_cpus, @@ -418,6 +601,10 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); sysbus_connect_irq(gicbusdev, i + 3 * sc->num_cpus, qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + sysbus_connect_irq(gicbusdev, i + 4 * sc->num_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_NMI)); + sysbus_connect_irq(gicbusdev, i + 5 * sc->num_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VINMI)); } return true; @@ -429,8 +616,10 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) Aspeed27x0SoCState *a = ASPEED27X0_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - AspeedINTCClass *ic = ASPEED_INTC_GET_CLASS(&a->intc); - g_autofree char *sram_name = NULL; + AspeedINTCClass *ic = ASPEED_INTC_GET_CLASS(&a->intc[0]); + AspeedINTCClass *icio = ASPEED_INTC_GET_CLASS(&a->intc[1]); + g_autofree char *name = NULL; + qemu_irq irq; /* Default boot region (SPI memory or ROMs) */ memory_region_init(&s->spi_boot_container, OBJECT(s), @@ -459,31 +648,64 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) } /* INTC */ - if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc), 0, + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, sc->memmap[ASPEED_DEV_INTC]); - /* GICINT orgates -> INTC -> GIC */ - for (i = 0; i < ic->num_ints; i++) { - qdev_connect_gpio_out(DEVICE(&a->intc.orgates[i]), 0, - qdev_get_gpio_in(DEVICE(&a->intc), i)); - sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc), i, + /* INTCIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[1]), errp)) { + return; + } + + aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + sc->memmap[ASPEED_DEV_INTCIO]); + + /* irq sources -> orgates -> INTC */ + for (i = 0; i < ic->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[0].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[0]), i)); + } + + /* INTC -> GIC192 - GIC201 */ + /* INTC -> GIC128 - GIC136 */ + for (i = 0; i < ic->num_outpins; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[0]), i, qdev_get_gpio_in(DEVICE(&a->gic), - aspeed_soc_ast2700_gic_intcmap[i].irq)); + ast2700_gic_intcmap[i].irq)); + } + + /* irq source -> orgates -> INTCIO */ + for (i = 0; i < icio->num_inpins; i++) { + qdev_connect_gpio_out(DEVICE(&a->intc[1].orgates[i]), 0, + qdev_get_gpio_in(DEVICE(&a->intc[1]), i)); + } + + /* INTCIO -> INTC */ + for (i = 0; i < icio->num_outpins; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i, + qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i)); } /* SRAM */ - sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); - if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, - errp)) { + name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); + if (!memory_region_init_ram(&s->sram, OBJECT(s), name, sc->sram_size, + errp)) { return; } memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SRAM], &s->sram); + /* VBOOTROM */ + if (!memory_region_init_ram(&s->vbootrom, OBJECT(s), "aspeed.vbootrom", + 0x20000, errp)) { + return; + } + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_VBOOTROM], &s->vbootrom); + /* SCU */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; @@ -536,6 +758,17 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } + /* EHCI */ + for (i = 0; i < sc->ehcis_num; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, + sc->memmap[ASPEED_DEV_EHCI1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + } + /* * SDMC - SDRAM Memory Controller * The SDMC controller is unlocked at SPL stage. @@ -613,14 +846,119 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); - create_unimplemented_device("ast2700.dpmcu", 0x11000000, 0x40000); - create_unimplemented_device("ast2700.iomem0", 0x12000000, 0x01000000); - create_unimplemented_device("ast2700.iomem1", 0x14000000, 0x01000000); - create_unimplemented_device("ast2700.ltpi", 0x30000000, 0x1000000); - create_unimplemented_device("ast2700.io", 0x0, 0x4000000); + /* I2C */ + object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { + /* + * The AST2700 I2C controller has one source INTC per bus. + * + * For AST2700 A0: + * I2C bus interrupts are connected to the OR gate from bit 0 to bit + * 15, and the OR gate output pin is connected to the input pin of + * GICINT130 of INTC (CPU Die). Then, the output pin is connected to + * the GIC. + * + * For AST2700 A1: + * I2C bus interrupts are connected to the OR gate from bit 0 to bit + * 15, and the OR gate output pin is connected to the input pin of + * GICINT194 of INTCIO (IO Die). Then, the output pin is connected + * to the INTC (CPU Die) input pin, and its output pin is connected + * to the GIC. + * + * I2C bus 0 is connected to the OR gate at bit 0. + * I2C bus 15 is connected to the OR gate at bit 15. + */ + irq = aspeed_soc_ast2700_get_irq_index(s, ASPEED_DEV_I2C, i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); + } + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + sc->memmap[ASPEED_DEV_GPIO]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + + /* RTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + + /* SDHCI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, + sc->memmap[ASPEED_DEV_SDHCI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + + /* eMMC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->emmc), 0, + sc->memmap[ASPEED_DEV_EMMC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); + + /* Timer */ + object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + sc->memmap[ASPEED_DEV_TIMER1]); + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); + } + + /* HACE */ + object_property_set_link(OBJECT(&s->hace), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + sc->memmap[ASPEED_DEV_HACE]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->dpmcu), + "aspeed.dpmcu", + sc->memmap[ASPEED_DEV_DPMCU], + AST2700_SOC_DPMCU_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->ltpi), + "aspeed.ltpi", + sc->memmap[ASPEED_DEV_LTPI], + AST2700_SOC_LTPI_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), + "aspeed.io", + sc->memmap[ASPEED_DEV_IOMEM], + AST2700_SOC_IO_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem0), + "aspeed.iomem0", + sc->memmap[ASPEED_DEV_IOMEM0], + AST2700_SOC_IOMEM_SIZE); + aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem1), + "aspeed.iomem1", + sc->memmap[ASPEED_DEV_IOMEM1], + AST2700_SOC_IOMEM_SIZE); } -static void aspeed_soc_ast2700_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_ast2700a0_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a35"), @@ -633,17 +971,45 @@ static void aspeed_soc_ast2700_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; dc->realize = aspeed_soc_ast2700_realize; - sc->name = "ast2700-a0"; sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2700_A0_SILICON_REV; sc->sram_size = 0x20000; sc->spis_num = 3; + sc->ehcis_num = 2; sc->wdts_num = 8; sc->macs_num = 1; sc->uarts_num = 13; sc->num_cpus = 4; sc->uarts_base = ASPEED_DEV_UART0; - sc->irqmap = aspeed_soc_ast2700_irqmap; + sc->irqmap = aspeed_soc_ast2700a0_irqmap; + sc->memmap = aspeed_soc_ast2700_memmap; + sc->get_irq = aspeed_soc_ast2700_get_irq; +} + +static void aspeed_soc_ast2700a1_class_init(ObjectClass *oc, const void *data) +{ + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a35"), + NULL + }; + DeviceClass *dc = DEVICE_CLASS(oc); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); + + /* Reason: The Aspeed SoC can only be instantiated from a board */ + dc->user_creatable = false; + dc->realize = aspeed_soc_ast2700_realize; + + sc->valid_cpu_types = valid_cpu_types; + sc->silicon_rev = AST2700_A1_SILICON_REV; + sc->sram_size = 0x20000; + sc->spis_num = 3; + sc->ehcis_num = 4; + sc->wdts_num = 8; + sc->macs_num = 3; + sc->uarts_num = 13; + sc->num_cpus = 4; + sc->uarts_base = ASPEED_DEV_UART0; + sc->irqmap = aspeed_soc_ast2700a1_irqmap; sc->memmap = aspeed_soc_ast2700_memmap; sc->get_irq = aspeed_soc_ast2700_get_irq; } @@ -658,7 +1024,13 @@ static const TypeInfo aspeed_soc_ast27x0_types[] = { .name = "ast2700-a0", .parent = TYPE_ASPEED27X0_SOC, .instance_init = aspeed_soc_ast2700_init, - .class_init = aspeed_soc_ast2700_class_init, + .class_init = aspeed_soc_ast2700a0_class_init, + }, + { + .name = "ast2700-a1", + .parent = TYPE_ASPEED27X0_SOC, + .instance_init = aspeed_soc_ast2700_init, + .class_init = aspeed_soc_ast2700a1_class_init, }, }; diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c index daa3d329d10..8bbbdec8349 100644 --- a/hw/arm/aspeed_eeprom.c +++ b/hw/arm/aspeed_eeprom.c @@ -162,6 +162,25 @@ const uint8_t rainier_bmc_fruid[] = { 0x31, 0x50, 0x46, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, }; +const uint8_t gb200nvl_bmc_fruid[] = { + 0x01, 0x00, 0x00, 0x01, 0x0b, 0x00, 0x00, 0xf3, 0x01, 0x0a, 0x19, 0x1f, + 0x0f, 0xe6, 0xc6, 0x4e, 0x56, 0x49, 0x44, 0x49, 0x41, 0xc5, 0x50, 0x33, + 0x38, 0x30, 0x39, 0xcd, 0x31, 0x35, 0x38, 0x33, 0x33, 0x32, 0x34, 0x38, + 0x30, 0x30, 0x31, 0x35, 0x30, 0xd2, 0x36, 0x39, 0x39, 0x2d, 0x31, 0x33, + 0x38, 0x30, 0x39, 0x2d, 0x30, 0x34, 0x30, 0x34, 0x2d, 0x36, 0x30, 0x30, + 0xc0, 0x01, 0x01, 0xd6, 0x4d, 0x41, 0x43, 0x3a, 0x20, 0x33, 0x43, 0x3a, + 0x36, 0x44, 0x3a, 0x36, 0x36, 0x3a, 0x31, 0x34, 0x3a, 0x43, 0x38, 0x3a, + 0x37, 0x41, 0xc1, 0x3b, 0x01, 0x09, 0x19, 0xc6, 0x4e, 0x56, 0x49, 0x44, + 0x49, 0x41, 0xc9, 0x50, 0x33, 0x38, 0x30, 0x39, 0x2d, 0x42, 0x4d, 0x43, + 0xd2, 0x36, 0x39, 0x39, 0x2d, 0x31, 0x33, 0x38, 0x30, 0x39, 0x2d, 0x30, + 0x34, 0x30, 0x34, 0x2d, 0x36, 0x30, 0x30, 0xc4, 0x41, 0x45, 0x2e, 0x31, + 0xcd, 0x31, 0x35, 0x38, 0x33, 0x33, 0x32, 0x34, 0x38, 0x30, 0x30, 0x31, + 0x35, 0x30, 0xc0, 0xc4, 0x76, 0x30, 0x2e, 0x31, 0xc1, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + +}; + const size_t tiogapass_bmc_fruid_len = sizeof(tiogapass_bmc_fruid); const size_t fby35_nic_fruid_len = sizeof(fby35_nic_fruid); const size_t fby35_bb_fruid_len = sizeof(fby35_bb_fruid); @@ -169,3 +188,5 @@ const size_t fby35_bmc_fruid_len = sizeof(fby35_bmc_fruid); const size_t yosemitev2_bmc_fruid_len = sizeof(yosemitev2_bmc_fruid); const size_t rainier_bb_fruid_len = sizeof(rainier_bb_fruid); const size_t rainier_bmc_fruid_len = sizeof(rainier_bmc_fruid); +const size_t gb200nvl_bmc_fruid_len = sizeof(gb200nvl_bmc_fruid); + diff --git a/hw/arm/aspeed_eeprom.h b/hw/arm/aspeed_eeprom.h index f08c16ef506..3ed9bc1d9a8 100644 --- a/hw/arm/aspeed_eeprom.h +++ b/hw/arm/aspeed_eeprom.h @@ -26,4 +26,7 @@ extern const size_t rainier_bb_fruid_len; extern const uint8_t rainier_bmc_fruid[]; extern const size_t rainier_bmc_fruid_len; +extern const uint8_t gb200nvl_bmc_fruid[]; +extern const size_t gb200nvl_bmc_fruid_len; + #endif diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c index 05551461aea..1c4ac93a0ff 100644 --- a/hw/arm/aspeed_soc_common.c +++ b/hw/arm/aspeed_soc_common.c @@ -15,7 +15,7 @@ #include "hw/qdev-properties.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" const char *aspeed_soc_cpu_type(AspeedSoCClass *sc) @@ -139,15 +139,14 @@ static bool aspeed_soc_boot_from_emmc(AspeedSoCState *s) return false; } -static Property aspeed_soc_properties[] = { +static const Property aspeed_soc_properties[] = { DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_LINK("memory", AspeedSoCState, memory, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_soc_class_init(ObjectClass *oc, void *data) +static void aspeed_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); diff --git a/hw/arm/b-l475e-iot01a.c b/hw/arm/b-l475e-iot01a.c index 5002a40f06d..34ed2e0851b 100644 --- a/hw/arm/b-l475e-iot01a.c +++ b/hw/arm/b-l475e-iot01a.c @@ -82,7 +82,7 @@ static void bl475e_init(MachineState *machine) sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); sc = STM32L4X5_SOC_GET_CLASS(&s->soc); - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0, + armv7m_load_kernel(s->soc.armv7m.cpu, machine->kernel_filename, 0, sc->flash_size); if (object_class_by_name(TYPE_DM163)) { @@ -110,7 +110,7 @@ static void bl475e_init(MachineState *machine) } } -static void bl475e_machine_init(ObjectClass *oc, void *data) +static void bl475e_machine_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); static const char *machine_valid_cpu_types[] = { diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c index 0a4b6f29b1c..b750a575f72 100644 --- a/hw/arm/bananapi_m2u.c +++ b/hw/arm/bananapi_m2u.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/boards.h" @@ -141,6 +141,7 @@ static void bpim2u_machine_init(MachineClass *mc) mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "bpim2u.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("bpim2u", bpim2u_machine_init) diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c index ac153a96b9a..8a1e72dfab2 100644 --- a/hw/arm/bcm2835_peripherals.c +++ b/hw/arm/bcm2835_peripherals.c @@ -15,7 +15,7 @@ #include "hw/arm/bcm2835_peripherals.h" #include "hw/misc/bcm2835_mbox_defs.h" #include "hw/arm/raspi_platform.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* Peripheral base address on the VC (GPU) system bus */ #define BCM2835_VC_PERI_BASE 0x7e000000 @@ -520,7 +520,7 @@ void bcm_soc_peripherals_common_realize(DeviceState *dev, Error **errp) create_unimp(s, &s->sdramc, "bcm2835-sdramc", SDRAMC_OFFSET, 0x100); } -static void bcm2835_peripherals_class_init(ObjectClass *oc, void *data) +static void bcm2835_peripherals_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCMSocPeripheralBaseClass *bc = BCM_SOC_PERIPHERALS_BASE_CLASS(oc); diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index 40a379bc369..cd61ba15054 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -18,7 +18,7 @@ #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" -static Property bcm2836_enabled_cores_property = +static const Property bcm2836_enabled_cores_property = DEFINE_PROP_UINT32("enabled-cpus", BCM283XBaseState, enabled_cpus, 0); static void bcm283x_base_init(Object *obj) @@ -163,7 +163,7 @@ static void bcm2836_realize(DeviceState *dev, Error **errp) } } -static void bcm283x_base_class_init(ObjectClass *oc, void *data) +static void bcm283x_base_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -171,7 +171,7 @@ static void bcm283x_base_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; } -static void bcm2835_class_init(ObjectClass *oc, void *data) +static void bcm2835_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); @@ -182,7 +182,7 @@ static void bcm2835_class_init(ObjectClass *oc, void *data) dc->realize = bcm2835_realize; }; -static void bcm2836_class_init(ObjectClass *oc, void *data) +static void bcm2836_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); @@ -196,7 +196,7 @@ static void bcm2836_class_init(ObjectClass *oc, void *data) }; #ifdef TARGET_AARCH64 -static void bcm2837_class_init(ObjectClass *oc, void *data) +static void bcm2837_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCM283XBaseClass *bc = BCM283X_BASE_CLASS(oc); diff --git a/hw/arm/bcm2838.c b/hw/arm/bcm2838.c index ddb7c5f757a..22aa754613c 100644 --- a/hw/arm/bcm2838.c +++ b/hw/arm/bcm2838.c @@ -233,7 +233,7 @@ static void bcm2838_realize(DeviceState *dev, Error **errp) qdev_pass_gpios(DEVICE(&s->gic), DEVICE(&s->peripherals), NULL); } -static void bcm2838_class_init(ObjectClass *oc, void *data) +static void bcm2838_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCM283XBaseClass *bc_base = BCM283X_BASE_CLASS(oc); diff --git a/hw/arm/bcm2838_peripherals.c b/hw/arm/bcm2838_peripherals.c index e28bef4a37d..812b5b8480c 100644 --- a/hw/arm/bcm2838_peripherals.c +++ b/hw/arm/bcm2838_peripherals.c @@ -196,7 +196,7 @@ static void bcm2838_peripherals_realize(DeviceState *dev, Error **errp) create_unimp(s_base, &s->asb, "bcm2838-asb", BRDG_OFFSET, 0x24); } -static void bcm2838_peripherals_class_init(ObjectClass *oc, void *data) +static void bcm2838_peripherals_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); BCM2838PeripheralClass *bc = BCM2838_PERIPHERALS_CLASS(oc); diff --git a/hw/arm/boot.c b/hw/arm/boot.c index d480a7da02c..c56dfb6c192 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -14,18 +14,31 @@ #include #include "hw/arm/boot.h" #include "hw/arm/linux-boot-if.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" -#include "sysemu/sysemu.h" -#include "sysemu/numa.h" +#include "cpu.h" +#include "exec/tswap.h" +#include "exec/target_page.h" +#include "system/kvm.h" +#include "system/tcg.h" +#include "system/system.h" +#include "system/memory.h" +#include "system/numa.h" #include "hw/boards.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/loader.h" #include "elf.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/units.h" +#include "qemu/bswap.h" + +#include +#include "qapi/error.h" +#include "qemu/error-report.h" + +#ifdef CONFIG_LINUX +#include +#endif /* Kernel boot protocol is specified in the kernel docs * Documentation/arm/Booting and Documentation/arm64/booting.txt @@ -432,13 +445,12 @@ static int fdt_add_memory_node(void *fdt, uint32_t acells, hwaddr mem_base, return ret; } -static void fdt_add_psci_node(void *fdt) +static void fdt_add_psci_node(void *fdt, ARMCPU *armcpu) { uint32_t cpu_suspend_fn; uint32_t cpu_off_fn; uint32_t cpu_on_fn; uint32_t migrate_fn; - ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0)); const char *psci_method; int64_t psci_conduit; int rc; @@ -511,8 +523,105 @@ static void fdt_add_psci_node(void *fdt) qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn); } +static int fdt_add_memory_node_wrapper(void *fdt, uint32_t acells, + hwaddr mem_base, uint32_t scells, + hwaddr mem_len, int numa_node_id) +{ + int rc; + + if (!mem_len) { + return 0; + } + + rc = fdt_add_memory_node(fdt, acells, mem_base, scells, + mem_len, numa_node_id); + if (rc < 0) { + fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n", + mem_base); + return -1; + } + + return 0; +} + +static int offset_compare(const void *a, const void *b) +{ + struct egm_bad_pages_info *page1 = (struct egm_bad_pages_info *) a; + struct egm_bad_pages_info *page2 = (struct egm_bad_pages_info *) b; + + if (page1->offset > page2->offset) { + return 1; + } else if (page1->offset < page2->offset) { + return -1; + } else { + return 0; + } +} + +static int add_memory_regions(struct egm_bad_pages_list *info, void *fdt, + uint32_t acells, hwaddr mem_base, uint32_t scells, + hwaddr mem_len, int numa_node_id) +{ + int index; + hwaddr mem_base_curr, mem_last_curr = mem_len; + + if (!info) { + goto no_retired_pages; + } + + qsort(info->bad_pages, info->count, + sizeof(struct egm_bad_pages_info), &offset_compare); + mem_last_curr = mem_len; + for (index = info->count - 1; index >= 0; index--) { + mem_base_curr = info->bad_pages[index].offset + + info->bad_pages[index].size; + if (fdt_add_memory_node_wrapper(fdt, acells, mem_base_curr + mem_base, + scells, mem_last_curr - mem_base_curr, + numa_node_id)) { + return -1; + } + + mem_last_curr = info->bad_pages[index].offset; + } + +no_retired_pages: + if (fdt_add_memory_node_wrapper(fdt, acells, mem_base, scells, + mem_last_curr, numa_node_id)) { + return -1; + } + + return 0; +} + +static void fetch_retired_pages(MemoryRegion *mr, + struct egm_bad_pages_list **info) +{ + size_t argsz = sizeof(struct egm_bad_pages_list); + int fd; + + *info = g_malloc0(argsz); + + fd = memory_region_get_fd(mr); + +retry: + (*info)->argsz = argsz; + + if (ioctl(fd, EGM_BAD_PAGES_LIST, *info)) { + g_free(*info); + *info = NULL; + return; + } + + if ((*info)->argsz > argsz) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + goto retry; + } +} + int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, - hwaddr addr_limit, AddressSpace *as, MachineState *ms) + hwaddr addr_limit, AddressSpace *as, MachineState *ms, + ARMCPU *cpu) { void *fdt = NULL; int size, rc, n = 0; @@ -524,7 +633,7 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, if (binfo->dtb_filename) { char *filename; - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename); + filename = qemu_find_file(QEMU_FILE_TYPE_DTB, binfo->dtb_filename); if (!filename) { fprintf(stderr, "Couldn't open dtb file %s\n", binfo->dtb_filename); goto fail; @@ -599,16 +708,22 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, mem_base = binfo->loader_start; for (i = 0; i < ms->numa_state->num_nodes; i++) { mem_len = ms->numa_state->nodes[i].node_mem; - if (!mem_len) { - continue; - } - rc = fdt_add_memory_node(fdt, acells, mem_base, - scells, mem_len, i); - if (rc < 0) { - fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n", - mem_base); - goto fail; + if (ms->numa_state->nodes[i].node_memdev) { + struct egm_bad_pages_list *info = NULL; + fetch_retired_pages(&(ms->numa_state->nodes[i].node_memdev->mr), + &info); + rc = add_memory_regions(info, fdt, acells, mem_base, scells, + mem_len, i); + g_free(info); + + if (rc) { + goto fail; + } + } else { + if (fdt_add_memory_node_wrapper(fdt, acells, mem_base, + scells, mem_len, i)) + goto fail; } mem_base += mem_len; @@ -655,14 +770,12 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, } } - fdt_add_psci_node(fdt); + fdt_add_psci_node(fdt, cpu); if (binfo->modify_dtb) { binfo->modify_dtb(binfo, fdt); } - qemu_fdt_dumpdtb(fdt, size); - /* Put the DTB into the memory map as a ROM image: this will ensure * the DTB is copied again upon reset, even if addr points into RAM. */ @@ -743,7 +856,7 @@ static void do_cpu_reset(void *opaque) } else { if (arm_feature(env, ARM_FEATURE_EL3) && (info->secure_boot || - (info->secure_board_setup && cs == first_cpu))) { + (info->secure_board_setup && cpu == info->primary_cpu))) { /* Start this CPU in Secure SVC */ target_el = 3; } @@ -751,7 +864,7 @@ static void do_cpu_reset(void *opaque) arm_emulate_firmware_reset(cs, target_el); - if (cs == first_cpu) { + if (cpu == info->primary_cpu) { AddressSpace *as = arm_boot_address_space(cpu, info); cpu_set_pc(cs, info->loader_start); @@ -798,24 +911,28 @@ static ssize_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, Elf64_Ehdr h64; } elf_header; int data_swab = 0; - bool big_endian; - ssize_t ret = -1; + int elf_data_order; + ssize_t ret; Error *err = NULL; load_elf_hdr(info->kernel_filename, &elf_header, &elf_is64, &err); if (err) { + /* + * If the file is not an ELF file we silently return. + * The caller will fall back to try other formats. + */ error_free(err); - return ret; + return -1; } if (elf_is64) { - big_endian = elf_header.h64.e_ident[EI_DATA] == ELFDATA2MSB; - info->endianness = big_endian ? ARM_ENDIANNESS_BE8 - : ARM_ENDIANNESS_LE; + elf_data_order = elf_header.h64.e_ident[EI_DATA]; + info->endianness = elf_data_order == ELFDATA2MSB ? ARM_ENDIANNESS_BE8 + : ARM_ENDIANNESS_LE; } else { - big_endian = elf_header.h32.e_ident[EI_DATA] == ELFDATA2MSB; - if (big_endian) { + elf_data_order = elf_header.h32.e_ident[EI_DATA]; + if (elf_data_order == ELFDATA2MSB) { if (bswap32(elf_header.h32.e_flags) & EF_ARM_BE8) { info->endianness = ARM_ENDIANNESS_BE8; } else { @@ -835,10 +952,12 @@ static ssize_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, } ret = load_elf_as(info->kernel_filename, NULL, NULL, NULL, - pentry, lowaddr, highaddr, NULL, big_endian, elf_machine, - 1, data_swab, as); + pentry, lowaddr, highaddr, NULL, elf_data_order, + elf_machine, 1, data_swab, as); if (ret <= 0) { /* The header loaded but the image didn't */ + error_report("Couldn't load elf '%s': %s", + info->kernel_filename, load_elf_strerror(ret)); exit(1); } @@ -851,7 +970,7 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR; uint64_t kernel_size = 0; uint8_t *buffer; - int size; + ssize_t size; /* On aarch64, it's the bootloader's job to uncompress the kernel. */ size = load_image_gzipped_buffer(filename, LOAD_IMAGE_MAX_GUNZIP_BYTES, @@ -1232,6 +1351,9 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) info->dtb_filename = ms->dtb; info->dtb_limit = 0; + /* We assume the CPU passed as argument is the primary CPU. */ + info->primary_cpu = cpu; + /* Load the kernel. */ if (!info->kernel_filename || info->firmware_loaded) { arm_setup_firmware_boot(cpu, info); @@ -1281,12 +1403,8 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) object_property_set_int(cpuobj, "psci-conduit", info->psci_conduit, &error_abort); - /* - * Secondary CPUs start in PSCI powered-down state. Like the - * code in do_cpu_reset(), we assume first_cpu is the primary - * CPU. - */ - if (cs != first_cpu) { + /* Secondary CPUs start in PSCI powered-down state. */ + if (ARM_CPU(cs) != info->primary_cpu) { object_property_set_bool(cpuobj, "start-powered-off", true, &error_abort); } @@ -1321,7 +1439,8 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) * decided whether to enable PSCI and set the psci-conduit CPU properties. */ if (!info->skip_dtb_autoload && have_dtb(info)) { - if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { + if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, + as, ms, cpu) < 0) { exit(1); } } diff --git a/hw/arm/collie.c b/hw/arm/collie.c index eaa5c52d45a..93bb190f1f9 100644 --- a/hw/arm/collie.c +++ b/hw/arm/collie.c @@ -16,7 +16,7 @@ #include "strongarm.h" #include "hw/arm/boot.h" #include "hw/block/flash.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/object.h" #include "qemu/error-report.h" @@ -69,7 +69,7 @@ static void collie_init(MachineState *machine) arm_load_kernel(cms->sa1110->cpu, machine, &collie_binfo); } -static void collie_machine_class_init(ObjectClass *oc, void *data) +static void collie_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index b976727eefd..d665d4edd97 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -122,6 +122,7 @@ static void cubieboard_machine_init(MachineClass *mc) mc->units_per_default_bus = 1; mc->ignore_memory_transaction_failures = true; mc->default_ram_id = "cubieboard.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("cubieboard", cubieboard_machine_init) diff --git a/hw/arm/digic.c b/hw/arm/digic.c index 6df55479773..d831bc974d2 100644 --- a/hw/arm/digic.c +++ b/hw/arm/digic.c @@ -25,7 +25,7 @@ #include "qemu/module.h" #include "hw/arm/digic.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #define DIGIC4_TIMER_BASE(n) (0xc0210000 + (n) * 0x100) @@ -79,7 +79,7 @@ static void digic_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(sbd, 0, DIGIC_UART_BASE); } -static void digic_class_init(ObjectClass *oc, void *data) +static void digic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c index 4093af09cb2..466b8b84c0e 100644 --- a/hw/arm/digic_boards.c +++ b/hw/arm/digic_boards.c @@ -31,7 +31,7 @@ #include "hw/arm/digic.h" #include "hw/block/flash.h" #include "hw/loader.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "qemu/units.h" #include "qemu/cutils.h" @@ -80,7 +80,7 @@ static void digic4_board_init(MachineState *machine, DigicBoard *board) static void digic_load_rom(DigicState *s, hwaddr addr, hwaddr max_size, const char *filename) { - target_long rom_size; + ssize_t rom_size; if (qtest_enabled()) { /* qtest runs no code so don't attempt a ROM load which diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c index e3f1de26317..76001ff0dfc 100644 --- a/hw/arm/exynos4210.c +++ b/hw/arm/exynos4210.c @@ -27,8 +27,8 @@ #include "cpu.h" #include "hw/cpu/a9mpcore.h" #include "hw/irq.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" +#include "system/blockdev.h" +#include "system/system.h" #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "hw/loader.h" @@ -103,6 +103,8 @@ #define EXYNOS4210_PL330_BASE1_ADDR 0x12690000 #define EXYNOS4210_PL330_BASE2_ADDR 0x12850000 +#define GIC_EXT_IRQS 64 /* FIXME: verify for this SoC */ + enum ExtGicId { EXT_GIC_ID_MDMA_LCD0 = 66, EXT_GIC_ID_PDMA0, @@ -394,7 +396,8 @@ static void exynos4210_init_board_irqs(Exynos4210State *s) } if (irq_id) { qdev_connect_gpio_out(splitter, splitin, - qdev_get_gpio_in(extgicdev, irq_id - 32)); + qdev_get_gpio_in(extgicdev, + irq_id - GIC_INTERNAL)); } } for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) { @@ -421,7 +424,8 @@ static void exynos4210_init_board_irqs(Exynos4210State *s) s->irq_table[n] = qdev_get_gpio_in(splitter, 0); qdev_connect_gpio_out(splitter, 0, qdev_get_gpio_in(intcdev, n)); qdev_connect_gpio_out(splitter, 1, - qdev_get_gpio_in(extgicdev, irq_id - 32)); + qdev_get_gpio_in(extgicdev, + irq_id - GIC_INTERNAL)); } else { s->irq_table[n] = qdev_get_gpio_in(intcdev, n); } @@ -458,7 +462,6 @@ static uint64_t exynos4210_chipid_and_omr_read(void *opaque, hwaddr offset, static void exynos4210_chipid_and_omr_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { - return; } static const MemoryRegionOps exynos4210_chipid_and_omr_ops = { @@ -586,6 +589,8 @@ static void exynos4210_realize(DeviceState *socdev, Error **errp) /* Private memory region and Internal GIC */ qdev_prop_set_uint32(DEVICE(&s->a9mpcore), "num-cpu", EXYNOS4210_NCPUS); + qdev_prop_set_uint32(DEVICE(&s->a9mpcore), "num-irq", + GIC_EXT_IRQS + GIC_INTERNAL); busdev = SYS_BUS_DEVICE(&s->a9mpcore); sysbus_realize(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR); @@ -837,7 +842,7 @@ static void exynos4210_init(Object *obj) TYPE_EXYNOS4210_COMBINER); } -static void exynos4210_class_init(ObjectClass *klass, void *data) +static void exynos4210_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c index 2410e2a28e8..73049741312 100644 --- a/hw/arm/exynos4_boards.c +++ b/hw/arm/exynos4_boards.c @@ -28,7 +28,7 @@ #include "hw/sysbus.h" #include "net/net.h" #include "hw/arm/boot.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/arm/exynos4210.h" #include "hw/net/lan9118.h" #include "hw/qdev-properties.h" @@ -154,7 +154,7 @@ static const char * const valid_cpu_types[] = { NULL }; -static void nuri_class_init(ObjectClass *oc, void *data) +static void nuri_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -165,6 +165,7 @@ static void nuri_class_init(ObjectClass *oc, void *data) mc->min_cpus = EXYNOS4210_NCPUS; mc->default_cpus = EXYNOS4210_NCPUS; mc->ignore_memory_transaction_failures = true; + mc->auto_create_sdcard = true; } static const TypeInfo nuri_type = { @@ -173,7 +174,7 @@ static const TypeInfo nuri_type = { .class_init = nuri_class_init, }; -static void smdkc210_class_init(ObjectClass *oc, void *data) +static void smdkc210_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -184,6 +185,7 @@ static void smdkc210_class_init(ObjectClass *oc, void *data) mc->min_cpus = EXYNOS4210_NCPUS; mc->default_cpus = EXYNOS4210_NCPUS; mc->ignore_memory_transaction_failures = true; + mc->auto_create_sdcard = true; } static const TypeInfo smdkc210_type = { diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index c9964bd283f..c14fc2efe9b 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -8,8 +8,8 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" +#include "system/system.h" +#include "system/block-backend.h" #include "hw/boards.h" #include "hw/qdev-clock.h" #include "hw/arm/aspeed_soc.h" @@ -77,6 +77,7 @@ static void fby35_bmc_init(Fby35State *s) memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), 0, &s->bmc_memory); memory_region_init_ram(&s->bmc_dram, OBJECT(&s->bmc), "bmc-dram", FBY35_BMC_RAM_SIZE, &error_abort); @@ -162,7 +163,7 @@ static void fby35_instance_init(Object *obj) FBY35(obj)->mmio_exec = false; } -static void fby35_class_init(ObjectClass *oc, void *data) +static void fby35_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -170,6 +171,7 @@ static void fby35_class_init(ObjectClass *oc, void *data) mc->init = fby35_init; mc->no_floppy = 1; mc->no_cdrom = 1; + mc->auto_create_sdcard = true; mc->min_cpus = mc->max_cpus = mc->default_cpus = 3; object_class_property_add_bool(oc, "execute-in-place", diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 5ed87edfe4f..7aad6359ea8 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx25.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/qdev-properties.h" #include "chardev/char.h" #include "target/arm/cpu-qom.h" @@ -243,8 +243,6 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) &error_abort); object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg", IMX25_ESDHC_CAPABILITIES, &error_abort); - object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor", - SDHCI_VENDOR_IMX, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) { return; } @@ -309,12 +307,11 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) &s->iram_alias); } -static Property fsl_imx25_properties[] = { +static const Property fsl_imx25_properties[] = { DEFINE_PROP_UINT32("fec-phy-num", FslIMX25State, phy_num, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void fsl_imx25_class_init(ObjectClass *oc, void *data) +static void fsl_imx25_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c index 4b8d9b8e4fe..e9f70ad94b8 100644 --- a/hw/arm/fsl-imx31.c +++ b/hw/arm/fsl-imx31.c @@ -22,8 +22,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/arm/fsl-imx31.h" -#include "sysemu/sysemu.h" -#include "exec/address-spaces.h" +#include "system/system.h" +#include "system/address-spaces.h" #include "hw/qdev-properties.h" #include "chardev/char.h" #include "target/arm/cpu-qom.h" @@ -218,7 +218,7 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) &s->iram_alias); } -static void fsl_imx31_class_init(ObjectClass *oc, void *data) +static void fsl_imx31_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 85748cb233e..f3a60022d84 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -26,7 +26,7 @@ #include "hw/usb/imx-usb-phy.h" #include "hw/boards.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "chardev/char.h" #include "qemu/error-report.h" #include "qemu/module.h" @@ -106,6 +106,8 @@ static void fsl_imx6_init(Object *obj) object_initialize_child(obj, "eth", &s->eth, TYPE_IMX_ENET); object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); + object_initialize_child(obj, "pcie4-msi-irq", &s->pcie4_msi_irq, + TYPE_OR_IRQ); } static void fsl_imx6_realize(DeviceState *dev, Error **errp) @@ -115,6 +117,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) uint16_t i; qemu_irq irq; unsigned int smp_cpus = ms->smp.cpus; + DeviceState *mpcore = DEVICE(&s->a9mpcore); + DeviceState *gic; if (smp_cpus > FSL_IMX6_NUM_CPUS) { error_setg(errp, "%s: Only %d CPUs are supported (%d requested)", @@ -141,21 +145,21 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) } } - object_property_set_int(OBJECT(&s->a9mpcore), "num-cpu", smp_cpus, - &error_abort); + object_property_set_int(OBJECT(mpcore), "num-cpu", smp_cpus, &error_abort); - object_property_set_int(OBJECT(&s->a9mpcore), "num-irq", + object_property_set_int(OBJECT(mpcore), "num-irq", FSL_IMX6_MAX_IRQ + GIC_INTERNAL, &error_abort); - if (!sysbus_realize(SYS_BUS_DEVICE(&s->a9mpcore), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(mpcore), errp)) { return; } - sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, FSL_IMX6_A9MPCORE_ADDR); + sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX6_A9MPCORE_ADDR); + gic = mpcore; for (i = 0; i < smp_cpus; i++) { - sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i, + sysbus_connect_irq(SYS_BUS_DEVICE(gic), i, qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ)); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + smp_cpus, + sysbus_connect_irq(SYS_BUS_DEVICE(gic), i + smp_cpus, qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); } @@ -193,8 +197,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - serial_table[i].irq)); + qdev_get_gpio_in(gic, serial_table[i].irq)); } s->gpt.ccm = IMX_CCM(&s->ccm); @@ -205,8 +208,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX6_GPT_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - FSL_IMX6_GPT_IRQ)); + qdev_get_gpio_in(gic, FSL_IMX6_GPT_IRQ)); /* Initialize all EPIT timers */ for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) { @@ -226,8 +228,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - epit_table[i].irq)); + qdev_get_gpio_in(gic, epit_table[i].irq)); } /* Initialize all I2C */ @@ -247,8 +248,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - i2c_table[i].irq)); + qdev_get_gpio_in(gic, i2c_table[i].irq)); } /* Initialize all GPIOs */ @@ -305,11 +305,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - gpio_table[i].irq_low)); + qdev_get_gpio_in(gic, gpio_table[i].irq_low)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - gpio_table[i].irq_high)); + qdev_get_gpio_in(gic, gpio_table[i].irq_high)); } /* Initialize all SDHC */ @@ -329,15 +327,12 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) &error_abort); object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg", IMX6_ESDHC_CAPABILITIES, &error_abort); - object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor", - SDHCI_VENDOR_IMX, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - esdhc_table[i].irq)); + qdev_get_gpio_in(gic, esdhc_table[i].irq)); } /* USB */ @@ -358,8 +353,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, FSL_IMX6_USBOH3_USB_ADDR + i * 0x200); sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - FSL_IMX6_USBn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6_USBn_IRQ[i])); } /* Initialize all ECSPI */ @@ -382,8 +376,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - spi_table[i].irq)); + qdev_get_gpio_in(gic, spi_table[i].irq)); } object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num, @@ -394,11 +387,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) } sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth), 0, FSL_IMX6_ENET_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - FSL_IMX6_ENET_MAC_IRQ)); + qdev_get_gpio_in(gic, FSL_IMX6_ENET_MAC_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 1, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - FSL_IMX6_ENET_MAC_1588_IRQ)); + qdev_get_gpio_in(gic, FSL_IMX6_ENET_MAC_1588_IRQ)); /* * SNVS @@ -425,8 +416,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX6_WDOGn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a9mpcore), - FSL_IMX6_WDOGn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6_WDOGn_IRQ[i])); } /* @@ -435,14 +425,23 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX6_PCIe_REG_ADDR); + object_property_set_int(OBJECT(&s->pcie4_msi_irq), "num-lines", 2, + &error_abort); + qdev_realize(DEVICE(&s->pcie4_msi_irq), NULL, &error_abort); + + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE4_MSI_IRQ); + qdev_connect_gpio_out(DEVICE(&s->pcie4_msi_irq), 0, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE1_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq); irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE2_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq); irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE3_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq); - irq = qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_PCIE4_IRQ); + irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq); + irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 1); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4, irq); /* * PCIe PHY @@ -481,12 +480,11 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) &s->ocram_alias); } -static Property fsl_imx6_properties[] = { +static const Property fsl_imx6_properties[] = { DEFINE_PROP_UINT32("fec-phy-num", FslIMX6State, phy_num, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void fsl_imx6_class_init(ObjectClass *oc, void *data) +static void fsl_imx6_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c index 19f443570bf..883c7fc534f 100644 --- a/hw/arm/fsl-imx6ul.c +++ b/hw/arm/fsl-imx6ul.c @@ -22,7 +22,7 @@ #include "hw/misc/unimp.h" #include "hw/usb/imx-usb-phy.h" #include "hw/boards.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "target/arm/cpu-qom.h" @@ -157,10 +157,12 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); FslIMX6ULState *s = FSL_IMX6UL(dev); + DeviceState *mpcore = DEVICE(&s->a7mpcore); int i; char name[NAME_SIZE]; - SysBusDevice *sbd; - DeviceState *d; + DeviceState *gic; + SysBusDevice *gicsbd; + DeviceState *cpu; if (ms->smp.cpus > 1) { error_setg(errp, "%s: Only a single CPU is supported (%d requested)", @@ -173,19 +175,19 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) /* * A7MPCORE */ - object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", 1, &error_abort); - object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + object_property_set_int(OBJECT(mpcore), "num-cpu", 1, &error_abort); + object_property_set_int(OBJECT(mpcore), "num-irq", FSL_IMX6UL_MAX_IRQ + GIC_INTERNAL, &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR); + sysbus_realize(SYS_BUS_DEVICE(mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR); - sbd = SYS_BUS_DEVICE(&s->a7mpcore); - d = DEVICE(&s->cpu); - - sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(d, ARM_CPU_IRQ)); - sysbus_connect_irq(sbd, 1, qdev_get_gpio_in(d, ARM_CPU_FIQ)); - sysbus_connect_irq(sbd, 2, qdev_get_gpio_in(d, ARM_CPU_VIRQ)); - sysbus_connect_irq(sbd, 3, qdev_get_gpio_in(d, ARM_CPU_VFIQ)); + gic = mpcore; + gicsbd = SYS_BUS_DEVICE(gic); + cpu = DEVICE(&s->cpu); + sysbus_connect_irq(gicsbd, 0, qdev_get_gpio_in(cpu, ARM_CPU_IRQ)); + sysbus_connect_irq(gicsbd, 1, qdev_get_gpio_in(cpu, ARM_CPU_FIQ)); + sysbus_connect_irq(gicsbd, 2, qdev_get_gpio_in(cpu, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicsbd, 3, qdev_get_gpio_in(cpu, ARM_CPU_VFIQ)); /* * A7MPCORE DAP @@ -244,8 +246,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_GPTn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_GPTn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_GPTn_IRQ[i])); } /* @@ -269,8 +270,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_EPITn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_EPITn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_EPITn_IRQ[i])); } /* @@ -307,12 +307,10 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_GPIOn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_GPIOn_LOW_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_GPIOn_LOW_IRQ[i])); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_GPIOn_HIGH_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_GPIOn_HIGH_IRQ[i])); } /* @@ -366,8 +364,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_SPIn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_SPIn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_SPIn_IRQ[i])); } /* @@ -392,8 +389,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX6UL_I2Cn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_I2Cn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_I2Cn_IRQ[i])); } /* @@ -430,8 +426,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_UARTn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_UARTn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_UARTn_IRQ[i])); } /* @@ -480,12 +475,10 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_ENETn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_ENETn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_ENETn_IRQ[i])); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_ENETn_TIMER_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_ENETn_TIMER_IRQ[i])); } /* @@ -521,8 +514,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, FSL_IMX6UL_USB02_USBn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_USBn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_USBn_IRQ[i])); } /* @@ -539,16 +531,13 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_USDHC2_IRQ, }; - object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor", - SDHCI_VENDOR_IMX, &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, FSL_IMX6UL_USDHCn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_USDHCn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_USDHCn_IRQ[i])); } /* @@ -580,8 +569,7 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX6UL_WDOGn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX6UL_WDOGn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX6UL_WDOGn_IRQ[i])); } /* @@ -718,17 +706,16 @@ static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) FSL_IMX6UL_OCRAM_ALIAS_ADDR, &s->ocram_alias); } -static Property fsl_imx6ul_properties[] = { +static const Property fsl_imx6ul_properties[] = { DEFINE_PROP_UINT32("fec1-phy-num", FslIMX6ULState, phy_num[0], 0), DEFINE_PROP_UINT32("fec2-phy-num", FslIMX6ULState, phy_num[1], 1), DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX6ULState, phy_connected[0], true), DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX6ULState, phy_connected[1], true), - DEFINE_PROP_END_OF_LIST(), }; -static void fsl_imx6ul_class_init(ObjectClass *oc, void *data) +static void fsl_imx6ul_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c index 9f2ef345557..02f7602077f 100644 --- a/hw/arm/fsl-imx7.c +++ b/hw/arm/fsl-imx7.c @@ -23,7 +23,7 @@ #include "hw/arm/fsl-imx7.h" #include "hw/misc/unimp.h" #include "hw/boards.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "target/arm/cpu-qom.h" @@ -150,6 +150,8 @@ static void fsl_imx7_init(Object *obj) * PCIE */ object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); + object_initialize_child(obj, "pcie4-msi-irq", &s->pcie4_msi_irq, + TYPE_OR_IRQ); /* * USBs @@ -164,7 +166,8 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); FslIMX7State *s = FSL_IMX7(dev); - Object *o; + DeviceState *mpcore = DEVICE(&s->a7mpcore); + DeviceState *gic; int i; qemu_irq irq; char name[NAME_SIZE]; @@ -180,7 +183,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) * CPUs */ for (i = 0; i < smp_cpus; i++) { - o = OBJECT(&s->cpu[i]); + Object *o = OBJECT(&s->cpu[i]); /* On uniprocessor, the CBAR is set to 0 */ if (smp_cpus > 1) { @@ -203,16 +206,15 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) /* * A7MPCORE */ - object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", smp_cpus, - &error_abort); - object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + object_property_set_int(OBJECT(mpcore), "num-cpu", smp_cpus, &error_abort); + object_property_set_int(OBJECT(mpcore), "num-irq", FSL_IMX7_MAX_IRQ + GIC_INTERNAL, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(mpcore), 0, FSL_IMX7_A7MPCORE_ADDR); - sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); - sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX7_A7MPCORE_ADDR); - + gic = mpcore; for (i = 0; i < smp_cpus; i++) { - SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); + SysBusDevice *sbd = SYS_BUS_DEVICE(gic); DeviceState *d = DEVICE(qemu_get_cpu(i)); irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); @@ -253,8 +255,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, FSL_IMX7_GPTn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_GPTn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_GPTn_IRQ[i])); } /* @@ -296,12 +297,10 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) FSL_IMX7_GPIOn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_GPIOn_LOW_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_GPIOn_LOW_IRQ[i])); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_GPIOn_HIGH_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_GPIOn_HIGH_IRQ[i])); } /* @@ -353,8 +352,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, FSL_IMX7_SPIn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_SPIn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_SPIn_IRQ[i])); } /* @@ -379,8 +377,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX7_I2Cn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_I2Cn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_I2Cn_IRQ[i])); } /* @@ -414,7 +411,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, FSL_IMX7_UARTn_ADDR[i]); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_UARTn_IRQ[i]); + irq = qdev_get_gpio_in(gic, FSL_IMX7_UARTn_IRQ[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, irq); } @@ -452,9 +449,9 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, FSL_IMX7_ENETn_ADDR[i]); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 0)); + irq = qdev_get_gpio_in(gic, FSL_IMX7_ENET_IRQ(i, 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, irq); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 3)); + irq = qdev_get_gpio_in(gic, FSL_IMX7_ENET_IRQ(i, 3)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, irq); } @@ -474,14 +471,12 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) FSL_IMX7_USDHC3_IRQ, }; - object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor", - SDHCI_VENDOR_IMX, &error_abort); sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, FSL_IMX7_USDHCn_ADDR[i]); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USDHCn_IRQ[i]); + irq = qdev_get_gpio_in(gic, FSL_IMX7_USDHCn_IRQ[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, irq); } @@ -520,8 +515,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX7_WDOGn_ADDR[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, - qdev_get_gpio_in(DEVICE(&s->a7mpcore), - FSL_IMX7_WDOGn_IRQ[i])); + qdev_get_gpio_in(gic, FSL_IMX7_WDOGn_IRQ[i])); } /* @@ -597,14 +591,23 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort); sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTA_IRQ); + object_property_set_int(OBJECT(&s->pcie4_msi_irq), "num-lines", 2, + &error_abort); + qdev_realize(DEVICE(&s->pcie4_msi_irq), NULL, &error_abort); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_MSI_IRQ); + qdev_connect_gpio_out(DEVICE(&s->pcie4_msi_irq), 0, irq); + + irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTA_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTB_IRQ); + irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTB_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTC_IRQ); + irq = qdev_get_gpio_in(gic, FSL_IMX7_PCI_INTC_IRQ); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ); + irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq); + irq = qdev_get_gpio_in(DEVICE(&s->pcie4_msi_irq), 1); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4, irq); /* * USBs @@ -632,7 +635,7 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, FSL_IMX7_USBn_ADDR[i]); - irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USBn_IRQ[i]); + irq = qdev_get_gpio_in(gic, FSL_IMX7_USBn_IRQ[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, irq); snprintf(name, NAME_SIZE, "usbmisc%d", i); @@ -736,17 +739,16 @@ static void fsl_imx7_realize(DeviceState *dev, Error **errp) &s->caam); } -static Property fsl_imx7_properties[] = { +static const Property fsl_imx7_properties[] = { DEFINE_PROP_UINT32("fec1-phy-num", FslIMX7State, phy_num[0], 0), DEFINE_PROP_UINT32("fec2-phy-num", FslIMX7State, phy_num[1], 1), DEFINE_PROP_BOOL("fec1-phy-connected", FslIMX7State, phy_connected[0], true), DEFINE_PROP_BOOL("fec2-phy-connected", FslIMX7State, phy_connected[1], true), - DEFINE_PROP_END_OF_LIST(), }; -static void fsl_imx7_class_init(ObjectClass *oc, void *data) +static void fsl_imx7_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/fsl-imx8mp.c b/hw/arm/fsl-imx8mp.c new file mode 100644 index 00000000000..866f4d1d740 --- /dev/null +++ b/hw/arm/fsl-imx8mp.c @@ -0,0 +1,716 @@ +/* + * i.MX 8M Plus SoC Implementation + * + * Based on hw/arm/fsl-imx6.c + * + * Copyright (c) 2024, Bernhard Beschow + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/address-spaces.h" +#include "hw/arm/bsa.h" +#include "hw/arm/fsl-imx8mp.h" +#include "hw/intc/arm_gicv3.h" +#include "hw/misc/unimp.h" +#include "hw/boards.h" +#include "system/system.h" +#include "target/arm/cpu-qom.h" +#include "qapi/error.h" +#include "qobject/qlist.h" + +static const struct { + hwaddr addr; + size_t size; + const char *name; +} fsl_imx8mp_memmap[] = { + [FSL_IMX8MP_RAM] = { FSL_IMX8MP_RAM_START, FSL_IMX8MP_RAM_SIZE_MAX, "ram" }, + [FSL_IMX8MP_DDR_PHY_BROADCAST] = { 0x3dc00000, 4 * MiB, "ddr_phy_broadcast" }, + [FSL_IMX8MP_DDR_PERF_MON] = { 0x3d800000, 4 * MiB, "ddr_perf_mon" }, + [FSL_IMX8MP_DDR_CTL] = { 0x3d400000, 4 * MiB, "ddr_ctl" }, + [FSL_IMX8MP_DDR_BLK_CTRL] = { 0x3d000000, 1 * MiB, "ddr_blk_ctrl" }, + [FSL_IMX8MP_DDR_PHY] = { 0x3c000000, 16 * MiB, "ddr_phy" }, + [FSL_IMX8MP_AUDIO_DSP] = { 0x3b000000, 16 * MiB, "audio_dsp" }, + [FSL_IMX8MP_GIC_DIST] = { 0x38800000, 512 * KiB, "gic_dist" }, + [FSL_IMX8MP_GIC_REDIST] = { 0x38880000, 512 * KiB, "gic_redist" }, + [FSL_IMX8MP_NPU] = { 0x38500000, 2 * MiB, "npu" }, + [FSL_IMX8MP_VPU] = { 0x38340000, 2 * MiB, "vpu" }, + [FSL_IMX8MP_VPU_BLK_CTRL] = { 0x38330000, 2 * MiB, "vpu_blk_ctrl" }, + [FSL_IMX8MP_VPU_VC8000E_ENCODER] = { 0x38320000, 2 * MiB, "vpu_vc8000e_encoder" }, + [FSL_IMX8MP_VPU_G2_DECODER] = { 0x38310000, 2 * MiB, "vpu_g2_decoder" }, + [FSL_IMX8MP_VPU_G1_DECODER] = { 0x38300000, 2 * MiB, "vpu_g1_decoder" }, + [FSL_IMX8MP_USB2_GLUE] = { 0x382f0000, 0x100, "usb2_glue" }, + [FSL_IMX8MP_USB2_OTG] = { 0x3820cc00, 0x100, "usb2_otg" }, + [FSL_IMX8MP_USB2_DEV] = { 0x3820c700, 0x500, "usb2_dev" }, + [FSL_IMX8MP_USB2] = { 0x38200000, 0xc700, "usb2" }, + [FSL_IMX8MP_USB1_GLUE] = { 0x381f0000, 0x100, "usb1_glue" }, + [FSL_IMX8MP_USB1_OTG] = { 0x3810cc00, 0x100, "usb1_otg" }, + [FSL_IMX8MP_USB1_DEV] = { 0x3810c700, 0x500, "usb1_dev" }, + [FSL_IMX8MP_USB1] = { 0x38100000, 0xc700, "usb1" }, + [FSL_IMX8MP_GPU2D] = { 0x38008000, 32 * KiB, "gpu2d" }, + [FSL_IMX8MP_GPU3D] = { 0x38000000, 32 * KiB, "gpu3d" }, + [FSL_IMX8MP_QSPI1_RX_BUFFER] = { 0x34000000, 32 * MiB, "qspi1_rx_buffer" }, + [FSL_IMX8MP_PCIE1] = { 0x33800000, 4 * MiB, "pcie1" }, + [FSL_IMX8MP_QSPI1_TX_BUFFER] = { 0x33008000, 32 * KiB, "qspi1_tx_buffer" }, + [FSL_IMX8MP_APBH_DMA] = { 0x33000000, 32 * KiB, "apbh_dma" }, + + /* AIPS-5 Begin */ + [FSL_IMX8MP_MU_3_B] = { 0x30e90000, 64 * KiB, "mu_3_b" }, + [FSL_IMX8MP_MU_3_A] = { 0x30e80000, 64 * KiB, "mu_3_a" }, + [FSL_IMX8MP_MU_2_B] = { 0x30e70000, 64 * KiB, "mu_2_b" }, + [FSL_IMX8MP_MU_2_A] = { 0x30e60000, 64 * KiB, "mu_2_a" }, + [FSL_IMX8MP_EDMA_CHANNELS] = { 0x30e40000, 128 * KiB, "edma_channels" }, + [FSL_IMX8MP_EDMA_MANAGEMENT_PAGE] = { 0x30e30000, 64 * KiB, "edma_management_page" }, + [FSL_IMX8MP_AUDIO_BLK_CTRL] = { 0x30e20000, 64 * KiB, "audio_blk_ctrl" }, + [FSL_IMX8MP_SDMA2] = { 0x30e10000, 64 * KiB, "sdma2" }, + [FSL_IMX8MP_SDMA3] = { 0x30e00000, 64 * KiB, "sdma3" }, + [FSL_IMX8MP_AIPS5_CONFIGURATION] = { 0x30df0000, 64 * KiB, "aips5_configuration" }, + [FSL_IMX8MP_SPBA2] = { 0x30cf0000, 64 * KiB, "spba2" }, + [FSL_IMX8MP_AUDIO_XCVR_RX] = { 0x30cc0000, 64 * KiB, "audio_xcvr_rx" }, + [FSL_IMX8MP_HDMI_TX_AUDLNK_MSTR] = { 0x30cb0000, 64 * KiB, "hdmi_tx_audlnk_mstr" }, + [FSL_IMX8MP_PDM] = { 0x30ca0000, 64 * KiB, "pdm" }, + [FSL_IMX8MP_ASRC] = { 0x30c90000, 64 * KiB, "asrc" }, + [FSL_IMX8MP_SAI7] = { 0x30c80000, 64 * KiB, "sai7" }, + [FSL_IMX8MP_SAI6] = { 0x30c60000, 64 * KiB, "sai6" }, + [FSL_IMX8MP_SAI5] = { 0x30c50000, 64 * KiB, "sai5" }, + [FSL_IMX8MP_SAI3] = { 0x30c30000, 64 * KiB, "sai3" }, + [FSL_IMX8MP_SAI2] = { 0x30c20000, 64 * KiB, "sai2" }, + [FSL_IMX8MP_SAI1] = { 0x30c10000, 64 * KiB, "sai1" }, + /* AIPS-5 End */ + + /* AIPS-4 Begin */ + [FSL_IMX8MP_HDMI_TX] = { 0x32fc0000, 128 * KiB, "hdmi_tx" }, + [FSL_IMX8MP_TZASC] = { 0x32f80000, 64 * KiB, "tzasc" }, + [FSL_IMX8MP_HSIO_BLK_CTL] = { 0x32f10000, 64 * KiB, "hsio_blk_ctl" }, + [FSL_IMX8MP_PCIE_PHY1] = { 0x32f00000, 64 * KiB, "pcie_phy1" }, + [FSL_IMX8MP_MEDIA_BLK_CTL] = { 0x32ec0000, 64 * KiB, "media_blk_ctl" }, + [FSL_IMX8MP_LCDIF2] = { 0x32e90000, 64 * KiB, "lcdif2" }, + [FSL_IMX8MP_LCDIF1] = { 0x32e80000, 64 * KiB, "lcdif1" }, + [FSL_IMX8MP_MIPI_DSI1] = { 0x32e60000, 64 * KiB, "mipi_dsi1" }, + [FSL_IMX8MP_MIPI_CSI2] = { 0x32e50000, 64 * KiB, "mipi_csi2" }, + [FSL_IMX8MP_MIPI_CSI1] = { 0x32e40000, 64 * KiB, "mipi_csi1" }, + [FSL_IMX8MP_IPS_DEWARP] = { 0x32e30000, 64 * KiB, "ips_dewarp" }, + [FSL_IMX8MP_ISP2] = { 0x32e20000, 64 * KiB, "isp2" }, + [FSL_IMX8MP_ISP1] = { 0x32e10000, 64 * KiB, "isp1" }, + [FSL_IMX8MP_ISI] = { 0x32e00000, 64 * KiB, "isi" }, + [FSL_IMX8MP_AIPS4_CONFIGURATION] = { 0x32df0000, 64 * KiB, "aips4_configuration" }, + /* AIPS-4 End */ + + [FSL_IMX8MP_INTERCONNECT] = { 0x32700000, 1 * MiB, "interconnect" }, + + /* AIPS-3 Begin */ + [FSL_IMX8MP_ENET2_TSN] = { 0x30bf0000, 64 * KiB, "enet2_tsn" }, + [FSL_IMX8MP_ENET1] = { 0x30be0000, 64 * KiB, "enet1" }, + [FSL_IMX8MP_SDMA1] = { 0x30bd0000, 64 * KiB, "sdma1" }, + [FSL_IMX8MP_QSPI] = { 0x30bb0000, 64 * KiB, "qspi" }, + [FSL_IMX8MP_USDHC3] = { 0x30b60000, 64 * KiB, "usdhc3" }, + [FSL_IMX8MP_USDHC2] = { 0x30b50000, 64 * KiB, "usdhc2" }, + [FSL_IMX8MP_USDHC1] = { 0x30b40000, 64 * KiB, "usdhc1" }, + [FSL_IMX8MP_I2C6] = { 0x30ae0000, 64 * KiB, "i2c6" }, + [FSL_IMX8MP_I2C5] = { 0x30ad0000, 64 * KiB, "i2c5" }, + [FSL_IMX8MP_SEMAPHORE_HS] = { 0x30ac0000, 64 * KiB, "semaphore_hs" }, + [FSL_IMX8MP_MU_1_B] = { 0x30ab0000, 64 * KiB, "mu_1_b" }, + [FSL_IMX8MP_MU_1_A] = { 0x30aa0000, 64 * KiB, "mu_1_a" }, + [FSL_IMX8MP_AUD_IRQ_STEER] = { 0x30a80000, 64 * KiB, "aud_irq_steer" }, + [FSL_IMX8MP_UART4] = { 0x30a60000, 64 * KiB, "uart4" }, + [FSL_IMX8MP_I2C4] = { 0x30a50000, 64 * KiB, "i2c4" }, + [FSL_IMX8MP_I2C3] = { 0x30a40000, 64 * KiB, "i2c3" }, + [FSL_IMX8MP_I2C2] = { 0x30a30000, 64 * KiB, "i2c2" }, + [FSL_IMX8MP_I2C1] = { 0x30a20000, 64 * KiB, "i2c1" }, + [FSL_IMX8MP_AIPS3_CONFIGURATION] = { 0x309f0000, 64 * KiB, "aips3_configuration" }, + [FSL_IMX8MP_CAAM] = { 0x30900000, 256 * KiB, "caam" }, + [FSL_IMX8MP_SPBA1] = { 0x308f0000, 64 * KiB, "spba1" }, + [FSL_IMX8MP_FLEXCAN2] = { 0x308d0000, 64 * KiB, "flexcan2" }, + [FSL_IMX8MP_FLEXCAN1] = { 0x308c0000, 64 * KiB, "flexcan1" }, + [FSL_IMX8MP_UART2] = { 0x30890000, 64 * KiB, "uart2" }, + [FSL_IMX8MP_UART3] = { 0x30880000, 64 * KiB, "uart3" }, + [FSL_IMX8MP_UART1] = { 0x30860000, 64 * KiB, "uart1" }, + [FSL_IMX8MP_ECSPI3] = { 0x30840000, 64 * KiB, "ecspi3" }, + [FSL_IMX8MP_ECSPI2] = { 0x30830000, 64 * KiB, "ecspi2" }, + [FSL_IMX8MP_ECSPI1] = { 0x30820000, 64 * KiB, "ecspi1" }, + /* AIPS-3 End */ + + /* AIPS-2 Begin */ + [FSL_IMX8MP_QOSC] = { 0x307f0000, 64 * KiB, "qosc" }, + [FSL_IMX8MP_PERFMON2] = { 0x307d0000, 64 * KiB, "perfmon2" }, + [FSL_IMX8MP_PERFMON1] = { 0x307c0000, 64 * KiB, "perfmon1" }, + [FSL_IMX8MP_GPT4] = { 0x30700000, 64 * KiB, "gpt4" }, + [FSL_IMX8MP_GPT5] = { 0x306f0000, 64 * KiB, "gpt5" }, + [FSL_IMX8MP_GPT6] = { 0x306e0000, 64 * KiB, "gpt6" }, + [FSL_IMX8MP_SYSCNT_CTRL] = { 0x306c0000, 64 * KiB, "syscnt_ctrl" }, + [FSL_IMX8MP_SYSCNT_CMP] = { 0x306b0000, 64 * KiB, "syscnt_cmp" }, + [FSL_IMX8MP_SYSCNT_RD] = { 0x306a0000, 64 * KiB, "syscnt_rd" }, + [FSL_IMX8MP_PWM4] = { 0x30690000, 64 * KiB, "pwm4" }, + [FSL_IMX8MP_PWM3] = { 0x30680000, 64 * KiB, "pwm3" }, + [FSL_IMX8MP_PWM2] = { 0x30670000, 64 * KiB, "pwm2" }, + [FSL_IMX8MP_PWM1] = { 0x30660000, 64 * KiB, "pwm1" }, + [FSL_IMX8MP_AIPS2_CONFIGURATION] = { 0x305f0000, 64 * KiB, "aips2_configuration" }, + /* AIPS-2 End */ + + /* AIPS-1 Begin */ + [FSL_IMX8MP_CSU] = { 0x303e0000, 64 * KiB, "csu" }, + [FSL_IMX8MP_RDC] = { 0x303d0000, 64 * KiB, "rdc" }, + [FSL_IMX8MP_SEMAPHORE2] = { 0x303c0000, 64 * KiB, "semaphore2" }, + [FSL_IMX8MP_SEMAPHORE1] = { 0x303b0000, 64 * KiB, "semaphore1" }, + [FSL_IMX8MP_GPC] = { 0x303a0000, 64 * KiB, "gpc" }, + [FSL_IMX8MP_SRC] = { 0x30390000, 64 * KiB, "src" }, + [FSL_IMX8MP_CCM] = { 0x30380000, 64 * KiB, "ccm" }, + [FSL_IMX8MP_SNVS_HP] = { 0x30370000, 64 * KiB, "snvs_hp" }, + [FSL_IMX8MP_ANA_PLL] = { 0x30360000, 64 * KiB, "ana_pll" }, + [FSL_IMX8MP_OCOTP_CTRL] = { 0x30350000, 64 * KiB, "ocotp_ctrl" }, + [FSL_IMX8MP_IOMUXC_GPR] = { 0x30340000, 64 * KiB, "iomuxc_gpr" }, + [FSL_IMX8MP_IOMUXC] = { 0x30330000, 64 * KiB, "iomuxc" }, + [FSL_IMX8MP_GPT3] = { 0x302f0000, 64 * KiB, "gpt3" }, + [FSL_IMX8MP_GPT2] = { 0x302e0000, 64 * KiB, "gpt2" }, + [FSL_IMX8MP_GPT1] = { 0x302d0000, 64 * KiB, "gpt1" }, + [FSL_IMX8MP_WDOG3] = { 0x302a0000, 64 * KiB, "wdog3" }, + [FSL_IMX8MP_WDOG2] = { 0x30290000, 64 * KiB, "wdog2" }, + [FSL_IMX8MP_WDOG1] = { 0x30280000, 64 * KiB, "wdog1" }, + [FSL_IMX8MP_ANA_OSC] = { 0x30270000, 64 * KiB, "ana_osc" }, + [FSL_IMX8MP_ANA_TSENSOR] = { 0x30260000, 64 * KiB, "ana_tsensor" }, + [FSL_IMX8MP_GPIO5] = { 0x30240000, 64 * KiB, "gpio5" }, + [FSL_IMX8MP_GPIO4] = { 0x30230000, 64 * KiB, "gpio4" }, + [FSL_IMX8MP_GPIO3] = { 0x30220000, 64 * KiB, "gpio3" }, + [FSL_IMX8MP_GPIO2] = { 0x30210000, 64 * KiB, "gpio2" }, + [FSL_IMX8MP_GPIO1] = { 0x30200000, 64 * KiB, "gpio1" }, + [FSL_IMX8MP_AIPS1_CONFIGURATION] = { 0x301f0000, 64 * KiB, "aips1_configuration" }, + /* AIPS-1 End */ + + [FSL_IMX8MP_A53_DAP] = { 0x28000000, 16 * MiB, "a53_dap" }, + [FSL_IMX8MP_PCIE1_MEM] = { 0x18000000, 128 * MiB, "pcie1_mem" }, + [FSL_IMX8MP_QSPI_MEM] = { 0x08000000, 256 * MiB, "qspi_mem" }, + [FSL_IMX8MP_OCRAM] = { 0x00900000, 576 * KiB, "ocram" }, + [FSL_IMX8MP_TCM_DTCM] = { 0x00800000, 128 * KiB, "tcm_dtcm" }, + [FSL_IMX8MP_TCM_ITCM] = { 0x007e0000, 128 * KiB, "tcm_itcm" }, + [FSL_IMX8MP_OCRAM_S] = { 0x00180000, 36 * KiB, "ocram_s" }, + [FSL_IMX8MP_CAAM_MEM] = { 0x00100000, 32 * KiB, "caam_mem" }, + [FSL_IMX8MP_BOOT_ROM_PROTECTED] = { 0x0003f000, 4 * KiB, "boot_rom_protected" }, + [FSL_IMX8MP_BOOT_ROM] = { 0x00000000, 252 * KiB, "boot_rom" }, +}; + +static void fsl_imx8mp_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslImx8mpState *s = FSL_IMX8MP(obj); + int i; + + for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX8MP_NUM_CPUS); i++) { + g_autofree char *name = g_strdup_printf("cpu%d", i); + object_initialize_child(obj, name, &s->cpu[i], + ARM_CPU_TYPE_NAME("cortex-a53")); + } + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GICV3); + + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX8MP_CCM); + + object_initialize_child(obj, "analog", &s->analog, TYPE_IMX8MP_ANALOG); + + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + + for (i = 0; i < FSL_IMX8MP_NUM_UARTS; i++) { + g_autofree char *name = g_strdup_printf("uart%d", i + 1); + object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); + } + + for (i = 0; i < FSL_IMX8MP_NUM_GPTS; i++) { + g_autofree char *name = g_strdup_printf("gpt%d", i + 1); + object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX8MP_GPT); + } + object_initialize_child(obj, "gpt5-gpt6-irq", &s->gpt5_gpt6_irq, + TYPE_OR_IRQ); + + for (i = 0; i < FSL_IMX8MP_NUM_I2CS; i++) { + g_autofree char *name = g_strdup_printf("i2c%d", i + 1); + object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C); + } + + for (i = 0; i < FSL_IMX8MP_NUM_GPIOS; i++) { + g_autofree char *name = g_strdup_printf("gpio%d", i + 1); + object_initialize_child(obj, name, &s->gpio[i], TYPE_IMX_GPIO); + } + + for (i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) { + g_autofree char *name = g_strdup_printf("usdhc%d", i + 1); + object_initialize_child(obj, name, &s->usdhc[i], TYPE_IMX_USDHC); + } + + for (i = 0; i < FSL_IMX8MP_NUM_USBS; i++) { + g_autofree char *name = g_strdup_printf("usb%d", i); + object_initialize_child(obj, name, &s->usb[i], TYPE_USB_DWC3); + } + + for (i = 0; i < FSL_IMX8MP_NUM_ECSPIS; i++) { + g_autofree char *name = g_strdup_printf("spi%d", i + 1); + object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI); + } + + for (i = 0; i < FSL_IMX8MP_NUM_WDTS; i++) { + g_autofree char *name = g_strdup_printf("wdt%d", i); + object_initialize_child(obj, name, &s->wdt[i], TYPE_IMX2_WDT); + } + + object_initialize_child(obj, "eth0", &s->enet, TYPE_IMX_ENET); + + object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); + object_initialize_child(obj, "pcie_phy", &s->pcie_phy, + TYPE_FSL_IMX8M_PCIE_PHY); +} + +static void fsl_imx8mp_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslImx8mpState *s = FSL_IMX8MP(dev); + DeviceState *gicdev = DEVICE(&s->gic); + int i; + + if (ms->smp.cpus > FSL_IMX8MP_NUM_CPUS) { + error_setg(errp, "%s: Only %d CPUs are supported (%d requested)", + TYPE_FSL_IMX8MP, FSL_IMX8MP_NUM_CPUS, ms->smp.cpus); + return; + } + + /* CPUs */ + for (i = 0; i < ms->smp.cpus; i++) { + /* On uniprocessor, the CBAR is set to 0 */ + if (ms->smp.cpus > 1) { + object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", + fsl_imx8mp_memmap[FSL_IMX8MP_GIC_DIST].addr, + &error_abort); + } + + /* + * CNTFID0 base frequency in Hz of system counter + */ + object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 8000000, + &error_abort); + + if (i) { + /* + * Secondary CPUs start in powered-down state (and can be + * powered up via the SRC system reset controller) + */ + object_property_set_bool(OBJECT(&s->cpu[i]), "start-powered-off", + true, &error_abort); + } + + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + /* GIC */ + { + SysBusDevice *gicsbd = SYS_BUS_DEVICE(&s->gic); + QList *redist_region_count; + + qdev_prop_set_uint32(gicdev, "num-cpu", ms->smp.cpus); + qdev_prop_set_uint32(gicdev, "num-irq", + FSL_IMX8MP_NUM_IRQS + GIC_INTERNAL); + redist_region_count = qlist_new(); + qlist_append_int(redist_region_count, ms->smp.cpus); + qdev_prop_set_array(gicdev, "redist-region-count", redist_region_count); + object_property_set_link(OBJECT(&s->gic), "sysmem", + OBJECT(get_system_memory()), &error_fatal); + if (!sysbus_realize(gicsbd, errp)) { + return; + } + sysbus_mmio_map(gicsbd, 0, fsl_imx8mp_memmap[FSL_IMX8MP_GIC_DIST].addr); + sysbus_mmio_map(gicsbd, 1, fsl_imx8mp_memmap[FSL_IMX8MP_GIC_REDIST].addr); + + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, and + * the GIC's IRQ/FIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < ms->smp.cpus; i++) { + DeviceState *cpudev = DEVICE(&s->cpu[i]); + int intidbase = FSL_IMX8MP_NUM_IRQS + i * GIC_INTERNAL; + qemu_irq irq; + + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs. + */ + static const int timer_irqs[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + }; + + for (int j = 0; j < ARRAY_SIZE(timer_irqs); j++) { + irq = qdev_get_gpio_in(gicdev, intidbase + timer_irqs[j]); + qdev_connect_gpio_out(cpudev, j, irq); + } + + irq = qdev_get_gpio_in(gicdev, intidbase + ARCH_GIC_MAINT_IRQ); + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", + 0, irq); + + irq = qdev_get_gpio_in(gicdev, intidbase + VIRTUAL_PMU_IRQ); + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, irq); + + sysbus_connect_irq(gicsbd, i, + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicsbd, i + ms->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicsbd, i + 2 * ms->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicsbd, i + 3 * ms->smp.cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } + } + + /* CCM */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_CCM].addr); + + /* Analog */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->analog), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->analog), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_ANA_PLL].addr); + + /* UARTs */ + for (i = 0; i < FSL_IMX8MP_NUM_UARTS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } serial_table[FSL_IMX8MP_NUM_UARTS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_UART1].addr, FSL_IMX8MP_UART1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_UART2].addr, FSL_IMX8MP_UART2_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_UART3].addr, FSL_IMX8MP_UART3_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_UART4].addr, FSL_IMX8MP_UART4_IRQ }, + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(gicdev, serial_table[i].irq)); + } + + /* GPTs */ + object_property_set_int(OBJECT(&s->gpt5_gpt6_irq), "num-lines", 2, + &error_abort); + if (!qdev_realize(DEVICE(&s->gpt5_gpt6_irq), NULL, errp)) { + return; + } + + qdev_connect_gpio_out(DEVICE(&s->gpt5_gpt6_irq), 0, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_GPT5_GPT6_IRQ)); + + for (i = 0; i < FSL_IMX8MP_NUM_GPTS; i++) { + hwaddr gpt_addrs[FSL_IMX8MP_NUM_GPTS] = { + fsl_imx8mp_memmap[FSL_IMX8MP_GPT1].addr, + fsl_imx8mp_memmap[FSL_IMX8MP_GPT2].addr, + fsl_imx8mp_memmap[FSL_IMX8MP_GPT3].addr, + fsl_imx8mp_memmap[FSL_IMX8MP_GPT4].addr, + fsl_imx8mp_memmap[FSL_IMX8MP_GPT5].addr, + fsl_imx8mp_memmap[FSL_IMX8MP_GPT6].addr, + }; + + s->gpt[i].ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, gpt_addrs[i]); + + if (i < FSL_IMX8MP_NUM_GPTS - 2) { + static const unsigned int gpt_irqs[FSL_IMX8MP_NUM_GPTS - 2] = { + FSL_IMX8MP_GPT1_IRQ, + FSL_IMX8MP_GPT2_IRQ, + FSL_IMX8MP_GPT3_IRQ, + FSL_IMX8MP_GPT4_IRQ, + }; + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, + qdev_get_gpio_in(gicdev, gpt_irqs[i])); + } else { + int irq = i - FSL_IMX8MP_NUM_GPTS + 2; + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->gpt5_gpt6_irq), irq)); + } + } + + /* I2Cs */ + for (i = 0; i < FSL_IMX8MP_NUM_I2CS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } i2c_table[FSL_IMX8MP_NUM_I2CS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C1].addr, FSL_IMX8MP_I2C1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C2].addr, FSL_IMX8MP_I2C2_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C3].addr, FSL_IMX8MP_I2C3_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C4].addr, FSL_IMX8MP_I2C4_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C5].addr, FSL_IMX8MP_I2C5_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_I2C6].addr, FSL_IMX8MP_I2C6_IRQ }, + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(gicdev, i2c_table[i].irq)); + } + + /* GPIOs */ + for (i = 0; i < FSL_IMX8MP_NUM_GPIOS; i++) { + struct { + hwaddr addr; + unsigned int irq_low; + unsigned int irq_high; + } gpio_table[FSL_IMX8MP_NUM_GPIOS] = { + { + fsl_imx8mp_memmap[FSL_IMX8MP_GPIO1].addr, + FSL_IMX8MP_GPIO1_LOW_IRQ, + FSL_IMX8MP_GPIO1_HIGH_IRQ + }, + { + fsl_imx8mp_memmap[FSL_IMX8MP_GPIO2].addr, + FSL_IMX8MP_GPIO2_LOW_IRQ, + FSL_IMX8MP_GPIO2_HIGH_IRQ + }, + { + fsl_imx8mp_memmap[FSL_IMX8MP_GPIO3].addr, + FSL_IMX8MP_GPIO3_LOW_IRQ, + FSL_IMX8MP_GPIO3_HIGH_IRQ + }, + { + fsl_imx8mp_memmap[FSL_IMX8MP_GPIO4].addr, + FSL_IMX8MP_GPIO4_LOW_IRQ, + FSL_IMX8MP_GPIO4_HIGH_IRQ + }, + { + fsl_imx8mp_memmap[FSL_IMX8MP_GPIO5].addr, + FSL_IMX8MP_GPIO5_LOW_IRQ, + FSL_IMX8MP_GPIO5_HIGH_IRQ + }, + }; + + object_property_set_bool(OBJECT(&s->gpio[i]), "has-edge-sel", true, + &error_abort); + object_property_set_bool(OBJECT(&s->gpio[i]), "has-upper-pin-irq", + true, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(gicdev, gpio_table[i].irq_low)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, + qdev_get_gpio_in(gicdev, gpio_table[i].irq_high)); + } + + /* USDHCs */ + for (i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } usdhc_table[FSL_IMX8MP_NUM_USDHCS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC1].addr, FSL_IMX8MP_USDHC1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC2].addr, FSL_IMX8MP_USDHC2_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_USDHC3].addr, FSL_IMX8MP_USDHC3_IRQ }, + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, usdhc_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + qdev_get_gpio_in(gicdev, usdhc_table[i].irq)); + } + + /* USBs */ + for (i = 0; i < FSL_IMX8MP_NUM_USBS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } usb_table[FSL_IMX8MP_NUM_USBS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_USB1].addr, FSL_IMX8MP_USB1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_USB2].addr, FSL_IMX8MP_USB2_IRQ }, + }; + + qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "p2", 1); + qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "p3", 1); + qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "slots", 2); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, usb_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i].sysbus_xhci), 0, + qdev_get_gpio_in(gicdev, usb_table[i].irq)); + } + + /* ECSPIs */ + for (i = 0; i < FSL_IMX8MP_NUM_ECSPIS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } spi_table[FSL_IMX8MP_NUM_ECSPIS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI1].addr, FSL_IMX8MP_ECSPI1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI2].addr, FSL_IMX8MP_ECSPI2_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_ECSPI3].addr, FSL_IMX8MP_ECSPI3_IRQ }, + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(gicdev, spi_table[i].irq)); + } + + /* ENET1 */ + object_property_set_uint(OBJECT(&s->enet), "phy-num", s->phy_num, + &error_abort); + object_property_set_uint(OBJECT(&s->enet), "tx-ring-num", 3, &error_abort); + qemu_configure_nic_device(DEVICE(&s->enet), true, NULL); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->enet), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->enet), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_ENET1].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->enet), 0, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_ENET1_MAC_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->enet), 1, + qdev_get_gpio_in(gicdev, FSL_IMX6_ENET1_MAC_1588_IRQ)); + + /* SNVS */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->snvs), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_SNVS_HP].addr); + + /* Watchdogs */ + for (i = 0; i < FSL_IMX8MP_NUM_WDTS; i++) { + struct { + hwaddr addr; + unsigned int irq; + } wdog_table[FSL_IMX8MP_NUM_WDTS] = { + { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG1].addr, FSL_IMX8MP_WDOG1_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG2].addr, FSL_IMX8MP_WDOG2_IRQ }, + { fsl_imx8mp_memmap[FSL_IMX8MP_WDOG3].addr, FSL_IMX8MP_WDOG3_IRQ }, + }; + + object_property_set_bool(OBJECT(&s->wdt[i]), "pretimeout-support", + true, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, wdog_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, + qdev_get_gpio_in(gicdev, wdog_table[i].irq)); + } + + /* PCIe */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_PCIE1].addr); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTA_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTB_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTC_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_INTD_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 4, + qdev_get_gpio_in(gicdev, FSL_IMX8MP_PCI_MSI_IRQ)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie_phy), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie_phy), 0, + fsl_imx8mp_memmap[FSL_IMX8MP_PCIE_PHY1].addr); + + /* On-Chip RAM */ + if (!memory_region_init_ram(&s->ocram, NULL, "imx8mp.ocram", + fsl_imx8mp_memmap[FSL_IMX8MP_OCRAM].size, + errp)) { + return; + } + memory_region_add_subregion(get_system_memory(), + fsl_imx8mp_memmap[FSL_IMX8MP_OCRAM].addr, + &s->ocram); + + /* Unimplemented devices */ + for (i = 0; i < ARRAY_SIZE(fsl_imx8mp_memmap); i++) { + switch (i) { + case FSL_IMX8MP_ANA_PLL: + case FSL_IMX8MP_CCM: + case FSL_IMX8MP_GIC_DIST: + case FSL_IMX8MP_GIC_REDIST: + case FSL_IMX8MP_GPIO1 ... FSL_IMX8MP_GPIO5: + case FSL_IMX8MP_ECSPI1 ... FSL_IMX8MP_ECSPI3: + case FSL_IMX8MP_ENET1: + case FSL_IMX8MP_I2C1 ... FSL_IMX8MP_I2C6: + case FSL_IMX8MP_OCRAM: + case FSL_IMX8MP_PCIE1: + case FSL_IMX8MP_PCIE_PHY1: + case FSL_IMX8MP_RAM: + case FSL_IMX8MP_SNVS_HP: + case FSL_IMX8MP_UART1 ... FSL_IMX8MP_UART4: + case FSL_IMX8MP_USB1 ... FSL_IMX8MP_USB2: + case FSL_IMX8MP_USDHC1 ... FSL_IMX8MP_USDHC3: + case FSL_IMX8MP_WDOG1 ... FSL_IMX8MP_WDOG3: + /* device implemented and treated above */ + break; + + default: + create_unimplemented_device(fsl_imx8mp_memmap[i].name, + fsl_imx8mp_memmap[i].addr, + fsl_imx8mp_memmap[i].size); + break; + } + } +} + +static const Property fsl_imx8mp_properties[] = { + DEFINE_PROP_UINT32("fec1-phy-num", FslImx8mpState, phy_num, 0), + DEFINE_PROP_BOOL("fec1-phy-connected", FslImx8mpState, phy_connected, true), +}; + +static void fsl_imx8mp_class_init(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, fsl_imx8mp_properties); + dc->realize = fsl_imx8mp_realize; + + dc->desc = "i.MX 8M Plus SoC"; +} + +static const TypeInfo fsl_imx8mp_types[] = { + { + .name = TYPE_FSL_IMX8MP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(FslImx8mpState), + .instance_init = fsl_imx8mp_init, + .class_init = fsl_imx8mp_class_init, + }, +}; + +DEFINE_TYPES(fsl_imx8mp_types) diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c deleted file mode 100644 index 91462691531..00000000000 --- a/hw/arm/gumstix.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Gumstix Platforms - * - * Copyright (c) 2007 by Thorsten Zitterell - * - * Code based on spitz platform by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -/* - * Example usage: - * - * connex: - * ======= - * create image: - * # dd of=flash bs=1k count=16k if=/dev/zero - * # dd of=flash bs=1k conv=notrunc if=u-boot.bin - * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2 - * start it: - * # qemu-system-arm -M connex -pflash flash -monitor null -nographic - * - * verdex: - * ======= - * create image: - * # dd of=flash bs=1k count=32k if=/dev/zero - * # dd of=flash bs=1k conv=notrunc if=u-boot.bin - * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2 - * # dd of=flash bs=1k conv=notrunc seek=31744 if=uImage - * start it: - * # qemu-system-arm -M verdex -pflash flash -monitor null -nographic -m 289 - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qemu/error-report.h" -#include "hw/arm/pxa.h" -#include "net/net.h" -#include "hw/block/flash.h" -#include "hw/net/smc91c111.h" -#include "hw/boards.h" -#include "exec/address-spaces.h" -#include "sysemu/qtest.h" - -#define CONNEX_FLASH_SIZE (16 * MiB) -#define CONNEX_RAM_SIZE (64 * MiB) - -#define VERDEX_FLASH_SIZE (32 * MiB) -#define VERDEX_RAM_SIZE (256 * MiB) - -#define FLASH_SECTOR_SIZE (128 * KiB) - -static void connex_init(MachineState *machine) -{ - PXA2xxState *cpu; - DriveInfo *dinfo; - - cpu = pxa255_init(CONNEX_RAM_SIZE); - - dinfo = drive_get(IF_PFLASH, 0, 0); - if (!dinfo && !qtest_enabled()) { - error_report("A flash image must be given with the " - "'pflash' parameter"); - exit(1); - } - - /* Numonyx RC28F128J3F75 */ - pflash_cfi01_register(0x00000000, "connext.rom", CONNEX_FLASH_SIZE, - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0); - - /* Interrupt line of NIC is connected to GPIO line 36 */ - smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 36)); -} - -static void verdex_init(MachineState *machine) -{ - PXA2xxState *cpu; - DriveInfo *dinfo; - - cpu = pxa270_init(VERDEX_RAM_SIZE, machine->cpu_type); - - dinfo = drive_get(IF_PFLASH, 0, 0); - if (!dinfo && !qtest_enabled()) { - error_report("A flash image must be given with the " - "'pflash' parameter"); - exit(1); - } - - /* Micron RC28F256P30TFA */ - pflash_cfi01_register(0x00000000, "verdex.rom", VERDEX_FLASH_SIZE, - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - FLASH_SECTOR_SIZE, 2, 0, 0, 0, 0, 0); - - /* Interrupt line of NIC is connected to GPIO line 99 */ - smc91c111_init(0x04000300, qdev_get_gpio_in(cpu->gpio, 99)); -} - -static void connex_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "Gumstix Connex (PXA255)"; - mc->init = connex_init; - mc->ignore_memory_transaction_failures = true; - mc->deprecation_reason = "machine is old and unmaintained"; -} - -static const TypeInfo connex_type = { - .name = MACHINE_TYPE_NAME("connex"), - .parent = TYPE_MACHINE, - .class_init = connex_class_init, -}; - -static void verdex_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "Gumstix Verdex Pro XL6P COMs (PXA270)"; - mc->init = verdex_init; - mc->ignore_memory_transaction_failures = true; - mc->deprecation_reason = "machine is old and unmaintained"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); -} - -static const TypeInfo verdex_type = { - .name = MACHINE_TYPE_NAME("verdex"), - .parent = TYPE_MACHINE, - .class_init = verdex_class_init, -}; - -static void gumstix_machine_init(void) -{ - type_register_static(&connex_type); - type_register_static(&verdex_type); -} - -type_init(gumstix_machine_init) diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index c71b1a8db32..165c0b741a5 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -25,8 +25,8 @@ #include "hw/arm/boot.h" #include "hw/loader.h" #include "net/net.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "hw/boards.h" #include "qemu/error-report.h" #include "hw/char/pl011.h" @@ -45,7 +45,7 @@ #define MVBAR_ADDR 0x200 #define BOARD_SETUP_ADDR (MVBAR_ADDR + 8 * sizeof(uint32_t)) -#define NIRQ_GIC 160 +#define GIC_EXT_IRQS 128 /* EnergyCore ECX-1000 & ECX-2000 */ /* Board init. */ @@ -139,13 +139,13 @@ static void highbank_regs_init(Object *obj) sysbus_init_mmio(dev, &s->iomem); } -static void highbank_regs_class_init(ObjectClass *klass, void *data) +static void highbank_regs_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "Calxeda Highbank registers"; dc->vmsd = &vmstate_highbank_regs; - dc->reset = highbank_regs_reset; + device_class_set_legacy_reset(dc, highbank_regs_reset); } static const TypeInfo highbank_regs_info = { @@ -180,7 +180,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) { DeviceState *dev = NULL; SysBusDevice *busdev; - qemu_irq pic[128]; + qemu_irq pic[GIC_EXT_IRQS]; int n; unsigned int smp_cpus = machine->smp.cpus; qemu_irq cpu_irq[4]; @@ -199,7 +199,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) machine->cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); break; default: - assert(0); + g_assert_not_reached(); } for (n = 0; n < smp_cpus; n++) { @@ -260,7 +260,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) break; } qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); - qdev_prop_set_uint32(dev, "num-irq", NIRQ_GIC); + qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); @@ -271,7 +271,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) sysbus_connect_irq(busdev, n + 3 * smp_cpus, cpu_vfiq[n]); } - for (n = 0; n < 128; n++) { + for (n = 0; n < GIC_EXT_IRQS; n++) { pic[n] = qdev_get_gpio_in(dev, n); } @@ -341,7 +341,7 @@ static void midway_init(MachineState *machine) calxeda_init(machine, CALXEDA_MIDWAY); } -static void highbank_class_init(ObjectClass *oc, void *data) +static void highbank_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a9"), @@ -357,6 +357,7 @@ static void highbank_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->ignore_memory_transaction_failures = true; mc->default_ram_id = "highbank.dram"; + mc->deprecation_reason = "no known users left for this machine"; } static const TypeInfo highbank_type = { @@ -365,7 +366,7 @@ static const TypeInfo highbank_type = { .class_init = highbank_class_init, }; -static void midway_class_init(ObjectClass *oc, void *data) +static void midway_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a15"), @@ -381,6 +382,7 @@ static void midway_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->ignore_memory_transaction_failures = true; mc->default_ram_id = "highbank.dram"; + mc->deprecation_reason = "no known users left for this machine"; } static const TypeInfo midway_type = { diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c index 7dfddd49e23..e95ea5e4e18 100644 --- a/hw/arm/imx25_pdk.c +++ b/hw/arm/imx25_pdk.c @@ -30,7 +30,7 @@ #include "hw/arm/boot.h" #include "hw/boards.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "hw/i2c/i2c.h" #include "qemu/cutils.h" @@ -147,6 +147,7 @@ static void imx25_pdk_machine_init(MachineClass *mc) mc->init = imx25_pdk_init; mc->ignore_memory_transaction_failures = true; mc->default_ram_id = "imx25.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("imx25-pdk", imx25_pdk_machine_init) diff --git a/hw/arm/imx8mp-evk.c b/hw/arm/imx8mp-evk.c new file mode 100644 index 00000000000..b3082fa60d8 --- /dev/null +++ b/hw/arm/imx8mp-evk.c @@ -0,0 +1,103 @@ +/* + * NXP i.MX 8M Plus Evaluation Kit System Emulation + * + * Copyright (c) 2024, Bernhard Beschow + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/address-spaces.h" +#include "hw/arm/boot.h" +#include "hw/arm/fsl-imx8mp.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "system/qtest.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include + +static void imx8mp_evk_modify_dtb(const struct arm_boot_info *info, void *fdt) +{ + int i, offset; + + /* Temporarily disable following nodes until they are implemented */ + const char *nodes_to_remove[] = { + "nxp,imx8mp-fspi", + }; + + for (i = 0; i < ARRAY_SIZE(nodes_to_remove); i++) { + const char *dev_str = nodes_to_remove[i]; + + offset = fdt_node_offset_by_compatible(fdt, -1, dev_str); + while (offset >= 0) { + fdt_nop_node(fdt, offset); + offset = fdt_node_offset_by_compatible(fdt, offset, dev_str); + } + } + + /* Remove cpu-idle-states property from CPU nodes */ + offset = fdt_node_offset_by_compatible(fdt, -1, "arm,cortex-a53"); + while (offset >= 0) { + fdt_nop_property(fdt, offset, "cpu-idle-states"); + offset = fdt_node_offset_by_compatible(fdt, offset, "arm,cortex-a53"); + } +} + +static void imx8mp_evk_init(MachineState *machine) +{ + static struct arm_boot_info boot_info; + FslImx8mpState *s; + + if (machine->ram_size > FSL_IMX8MP_RAM_SIZE_MAX) { + error_report("RAM size " RAM_ADDR_FMT " above max supported (%08" PRIx64 ")", + machine->ram_size, FSL_IMX8MP_RAM_SIZE_MAX); + exit(1); + } + + boot_info = (struct arm_boot_info) { + .loader_start = FSL_IMX8MP_RAM_START, + .board_id = -1, + .ram_size = machine->ram_size, + .psci_conduit = QEMU_PSCI_CONDUIT_SMC, + .modify_dtb = imx8mp_evk_modify_dtb, + }; + + s = FSL_IMX8MP(object_new(TYPE_FSL_IMX8MP)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); + object_property_set_uint(OBJECT(s), "fec1-phy-num", 1, &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(s), &error_fatal); + + memory_region_add_subregion(get_system_memory(), FSL_IMX8MP_RAM_START, + machine->ram); + + for (int i = 0; i < FSL_IMX8MP_NUM_USDHCS; i++) { + BusState *bus; + DeviceState *carddev; + BlockBackend *blk; + DriveInfo *di = drive_get(IF_SD, i, 0); + + if (!di) { + continue; + } + + blk = blk_by_legacy_dinfo(di); + bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus"); + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + } + + if (!qtest_enabled()) { + arm_load_kernel(&s->cpu[0], machine, &boot_info); + } +} + +static void imx8mp_evk_machine_init(MachineClass *mc) +{ + mc->desc = "NXP i.MX 8M Plus EVK Board"; + mc->init = imx8mp_evk_init; + mc->max_cpus = FSL_IMX8MP_NUM_CPUS; + mc->default_ram_id = "imx8mp-evk.ram"; +} +DEFINE_MACHINE("imx8mp-evk", imx8mp_evk_machine_init) diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c index feb0dd63df5..b1d8fbd470a 100644 --- a/hw/arm/integratorcp.c +++ b/hw/arm/integratorcp.c @@ -16,9 +16,9 @@ #include "hw/misc/arm_integrator_debug.h" #include "hw/net/smc91c111.h" #include "net/net.h" -#include "exec/address-spaces.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/runstate.h" +#include "system/system.h" #include "qemu/log.h" #include "qemu/error-report.h" #include "hw/char/pl011.h" @@ -688,18 +688,18 @@ static void integratorcp_machine_init(MachineClass *mc) mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); mc->default_ram_id = "integrator.ram"; + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } DEFINE_MACHINE("integratorcp", integratorcp_machine_init) -static Property core_properties[] = { +static const Property core_properties[] = { DEFINE_PROP_UINT32("memsz", IntegratorCMState, memsz, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void core_class_init(ObjectClass *klass, void *data) +static void core_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -708,14 +708,14 @@ static void core_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_integratorcm; } -static void icp_pic_class_init(ObjectClass *klass, void *data) +static void icp_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_icp_pic; } -static void icp_control_class_init(ObjectClass *klass, void *data) +static void icp_control_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c index 2ccd6f8a768..362c1454099 100644 --- a/hw/arm/kzm.c +++ b/hw/arm/kzm.c @@ -19,12 +19,12 @@ #include "hw/arm/boot.h" #include "hw/boards.h" #include "qemu/error-report.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "net/net.h" #include "hw/net/lan9118.h" -#include "hw/char/serial.h" -#include "sysemu/qtest.h" -#include "sysemu/sysemu.h" +#include "hw/char/serial-mm.h" +#include "system/qtest.h" +#include "system/system.h" #include "qemu/cutils.h" /* Memory map for Kzm Emulation Baseboard: diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c deleted file mode 100644 index 3a6c22fddbc..00000000000 --- a/hw/arm/mainstone.c +++ /dev/null @@ -1,175 +0,0 @@ -/* - * PXA270-based Intel Mainstone platforms. - * - * Copyright (c) 2007 by Armin Kuster or - * - * - * Code based on spitz platform by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qemu/error-report.h" -#include "qapi/error.h" -#include "hw/arm/pxa.h" -#include "hw/arm/boot.h" -#include "net/net.h" -#include "hw/net/smc91c111.h" -#include "hw/boards.h" -#include "hw/block/flash.h" -#include "hw/sysbus.h" -#include "exec/address-spaces.h" - -/* Device addresses */ -#define MST_FPGA_PHYS 0x08000000 -#define MST_ETH_PHYS 0x10000300 -#define MST_FLASH_0 0x00000000 -#define MST_FLASH_1 0x04000000 - -/* IRQ definitions */ -#define MMC_IRQ 0 -#define USIM_IRQ 1 -#define USBC_IRQ 2 -#define ETHERNET_IRQ 3 -#define AC97_IRQ 4 -#define PEN_IRQ 5 -#define MSINS_IRQ 6 -#define EXBRD_IRQ 7 -#define S0_CD_IRQ 9 -#define S0_STSCHG_IRQ 10 -#define S0_IRQ 11 -#define S1_CD_IRQ 13 -#define S1_STSCHG_IRQ 14 -#define S1_IRQ 15 - -static const struct keymap map[0xE0] = { - [0 ... 0xDF] = { -1, -1 }, - [0x1e] = {0,0}, /* a */ - [0x30] = {0,1}, /* b */ - [0x2e] = {0,2}, /* c */ - [0x20] = {0,3}, /* d */ - [0x12] = {0,4}, /* e */ - [0x21] = {0,5}, /* f */ - [0x22] = {1,0}, /* g */ - [0x23] = {1,1}, /* h */ - [0x17] = {1,2}, /* i */ - [0x24] = {1,3}, /* j */ - [0x25] = {1,4}, /* k */ - [0x26] = {1,5}, /* l */ - [0x32] = {2,0}, /* m */ - [0x31] = {2,1}, /* n */ - [0x18] = {2,2}, /* o */ - [0x19] = {2,3}, /* p */ - [0x10] = {2,4}, /* q */ - [0x13] = {2,5}, /* r */ - [0x1f] = {3,0}, /* s */ - [0x14] = {3,1}, /* t */ - [0x16] = {3,2}, /* u */ - [0x2f] = {3,3}, /* v */ - [0x11] = {3,4}, /* w */ - [0x2d] = {3,5}, /* x */ - [0x34] = {4,0}, /* . */ - [0x15] = {4,2}, /* y */ - [0x2c] = {4,3}, /* z */ - [0x35] = {4,4}, /* / */ - [0xc7] = {5,0}, /* Home */ - [0x2a] = {5,1}, /* shift */ - /* - * There are two matrix positions which map to space, - * but QEMU can only use one of them for the reverse - * mapping, so simply use the second one. - */ - /* [0x39] = {5,2}, space */ - [0x39] = {5,3}, /* space */ - /* - * Matrix position {5,4} and other keys are missing here. - * TODO: Compare with Linux code and test real hardware. - */ - [0x1c] = {5,4}, /* enter */ - [0x0e] = {5,5}, /* backspace */ - [0xc8] = {6,0}, /* up */ - [0xd0] = {6,1}, /* down */ - [0xcb] = {6,2}, /* left */ - [0xcd] = {6,3}, /* right */ -}; - -enum mainstone_model_e { mainstone }; - -#define MAINSTONE_RAM_SIZE (64 * MiB) -#define MAINSTONE_ROM_SIZE (8 * MiB) -#define MAINSTONE_FLASH_SIZE (32 * MiB) - -static struct arm_boot_info mainstone_binfo = { - .loader_start = PXA2XX_SDRAM_BASE, - .ram_size = MAINSTONE_RAM_SIZE, -}; - -#define FLASH_SECTOR_SIZE (256 * KiB) - -static void mainstone_common_init(MachineState *machine, - enum mainstone_model_e model, int arm_id) -{ - hwaddr mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; - PXA2xxState *mpu; - DeviceState *mst_irq; - DriveInfo *dinfo; - int i; - MemoryRegion *rom = g_new(MemoryRegion, 1); - - /* Setup CPU & memory */ - mpu = pxa270_init(mainstone_binfo.ram_size, machine->cpu_type); - memory_region_init_rom(rom, NULL, "mainstone.rom", MAINSTONE_ROM_SIZE, - &error_fatal); - memory_region_add_subregion(get_system_memory(), 0x00000000, rom); - - /* There are two 32MiB flash devices on the board */ - for (i = 0; i < 2; i ++) { - dinfo = drive_get(IF_PFLASH, 0, i); - pflash_cfi01_register(mainstone_flash_base[i], - i ? "mainstone.flash1" : "mainstone.flash0", - MAINSTONE_FLASH_SIZE, - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - FLASH_SECTOR_SIZE, 4, 0, 0, 0, 0, 0); - } - - mst_irq = sysbus_create_simple("mainstone-fpga", MST_FPGA_PHYS, - qdev_get_gpio_in(mpu->gpio, 0)); - - /* setup keypad */ - pxa27x_register_keypad(mpu->kp, map, 0xe0); - - /* MMC/SD host */ - pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); - - pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0], - qdev_get_gpio_in(mst_irq, S0_IRQ), - qdev_get_gpio_in(mst_irq, S0_CD_IRQ)); - pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1], - qdev_get_gpio_in(mst_irq, S1_IRQ), - qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); - - smc91c111_init(MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); - - mainstone_binfo.board_id = arm_id; - arm_load_kernel(mpu->cpu, machine, &mainstone_binfo); -} - -static void mainstone_init(MachineState *machine) -{ - mainstone_common_init(machine, mainstone, 0x196); -} - -static void mainstone2_machine_init(MachineClass *mc) -{ - mc->desc = "Mainstone II (PXA27x)"; - mc->init = mainstone_init; - mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); - mc->deprecation_reason = "machine is old and unmaintained"; -} - -DEFINE_MACHINE("mainstone", mainstone2_machine_init) diff --git a/hw/arm/max78000_soc.c b/hw/arm/max78000_soc.c new file mode 100644 index 00000000000..7f1856f5ba1 --- /dev/null +++ b/hw/arm/max78000_soc.c @@ -0,0 +1,232 @@ +/* + * MAX78000 SOC + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Implementation based on stm32f205 and Max78000 user guide at + * https://www.analog.com/media/en/technical-documentation/user-guides/max78000-user-guide.pdf + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "system/address-spaces.h" +#include "system/system.h" +#include "hw/arm/max78000_soc.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" + +static const uint32_t max78000_icc_addr[] = {0x4002a000, 0x4002a800}; +static const uint32_t max78000_uart_addr[] = {0x40042000, 0x40043000, + 0x40044000}; + +static const int max78000_uart_irq[] = {14, 15, 34}; + +static void max78000_soc_initfn(Object *obj) +{ + MAX78000State *s = MAX78000_SOC(obj); + int i; + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + object_initialize_child(obj, "gcr", &s->gcr, TYPE_MAX78000_GCR); + + for (i = 0; i < MAX78000_NUM_ICC; i++) { + g_autofree char *name = g_strdup_printf("icc%d", i); + object_initialize_child(obj, name, &s->icc[i], TYPE_MAX78000_ICC); + } + + for (i = 0; i < MAX78000_NUM_UART; i++) { + g_autofree char *name = g_strdup_printf("uart%d", i); + object_initialize_child(obj, name, &s->uart[i], + TYPE_MAX78000_UART); + } + + object_initialize_child(obj, "trng", &s->trng, TYPE_MAX78000_TRNG); + + object_initialize_child(obj, "aes", &s->aes, TYPE_MAX78000_AES); + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); +} + +static void max78000_soc_realize(DeviceState *dev_soc, Error **errp) +{ + MAX78000State *s = MAX78000_SOC(dev_soc); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *dev, *gcrdev, *armv7m; + SysBusDevice *busdev; + Error *err = NULL; + int i; + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "MAX78000.flash", + FLASH_SIZE, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + + memory_region_init_ram(&s->sram, NULL, "MAX78000.sram", SRAM_SIZE, + &err); + + gcrdev = DEVICE(&s->gcr); + object_property_set_link(OBJECT(gcrdev), "sram", OBJECT(&s->sram), + &err); + + if (err != NULL) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); + + armv7m = DEVICE(&s->armv7m); + + /* + * The MAX78000 user guide's Interrupt Vector Table section + * suggests that there are 120 IRQs in the text, while only listing + * 104 in table 5-1. Implement the more generous of the two. + * This has not been tested in hardware. + */ + qdev_prop_set_uint32(armv7m, "num-irq", 120); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 3); + qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(system_memory), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + for (i = 0; i < MAX78000_NUM_ICC; i++) { + dev = DEVICE(&(s->icc[i])); + sysbus_realize(SYS_BUS_DEVICE(dev), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, max78000_icc_addr[i]); + } + + for (i = 0; i < MAX78000_NUM_UART; i++) { + g_autofree char *link = g_strdup_printf("uart%d", i); + dev = DEVICE(&(s->uart[i])); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + + object_property_set_link(OBJECT(gcrdev), link, OBJECT(dev), + &err); + + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, max78000_uart_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, + max78000_uart_irq[i])); + } + + dev = DEVICE(&s->trng); + sysbus_realize(SYS_BUS_DEVICE(dev), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x4004d000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 4)); + + object_property_set_link(OBJECT(gcrdev), "trng", OBJECT(dev), &err); + + dev = DEVICE(&s->aes); + sysbus_realize(SYS_BUS_DEVICE(dev), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40007400); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 5)); + + object_property_set_link(OBJECT(gcrdev), "aes", OBJECT(dev), &err); + + dev = DEVICE(&s->gcr); + sysbus_realize(SYS_BUS_DEVICE(dev), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40000000); + + create_unimplemented_device("systemInterface", 0x40000400, 0x400); + create_unimplemented_device("functionControl", 0x40000800, 0x400); + create_unimplemented_device("watchdogTimer0", 0x40003000, 0x400); + create_unimplemented_device("dynamicVoltScale", 0x40003c00, 0x40); + create_unimplemented_device("SIMO", 0x40004400, 0x400); + create_unimplemented_device("trimSystemInit", 0x40005400, 0x400); + create_unimplemented_device("generalCtrlFunc", 0x40005800, 0x400); + create_unimplemented_device("wakeupTimer", 0x40006400, 0x400); + create_unimplemented_device("powerSequencer", 0x40006800, 0x400); + create_unimplemented_device("miscControl", 0x40006c00, 0x400); + + create_unimplemented_device("gpio0", 0x40008000, 0x1000); + create_unimplemented_device("gpio1", 0x40009000, 0x1000); + + create_unimplemented_device("parallelCamInterface", 0x4000e000, 0x1000); + create_unimplemented_device("CRC", 0x4000f000, 0x1000); + + create_unimplemented_device("timer0", 0x40010000, 0x1000); + create_unimplemented_device("timer1", 0x40011000, 0x1000); + create_unimplemented_device("timer2", 0x40012000, 0x1000); + create_unimplemented_device("timer3", 0x40013000, 0x1000); + + create_unimplemented_device("i2c0", 0x4001d000, 0x1000); + create_unimplemented_device("i2c1", 0x4001e000, 0x1000); + create_unimplemented_device("i2c2", 0x4001f000, 0x1000); + + create_unimplemented_device("standardDMA", 0x40028000, 0x1000); + create_unimplemented_device("flashController0", 0x40029000, 0x400); + + create_unimplemented_device("adc", 0x40034000, 0x1000); + create_unimplemented_device("pulseTrainEngine", 0x4003c000, 0xa0); + create_unimplemented_device("oneWireMaster", 0x4003d000, 0x1000); + create_unimplemented_device("semaphore", 0x4003e000, 0x1000); + + create_unimplemented_device("spi1", 0x40046000, 0x2000); + create_unimplemented_device("i2s", 0x40060000, 0x1000); + create_unimplemented_device("lowPowerControl", 0x40080000, 0x400); + create_unimplemented_device("gpio2", 0x40080400, 0x200); + create_unimplemented_device("lowPowerWatchdogTimer", 0x40080800, 0x400); + create_unimplemented_device("lowPowerTimer4", 0x40080c00, 0x400); + + create_unimplemented_device("lowPowerTimer5", 0x40081000, 0x400); + create_unimplemented_device("lowPowerUART0", 0x40081400, 0x400); + create_unimplemented_device("lowPowerComparator", 0x40088000, 0x400); + + create_unimplemented_device("spi0", 0x400be000, 0x400); + + /* + * The MAX78000 user guide's base address map lists the CNN TX FIFO as + * beginning at 0x400c0400 and ending at 0x400c0400. Given that CNN_FIFO + * is listed as having data accessible up to offset 0x1000, the user + * guide is likely incorrect. + */ + create_unimplemented_device("cnnTxFIFO", 0x400c0400, 0x2000); + + create_unimplemented_device("cnnGlobalControl", 0x50000000, 0x10000); + create_unimplemented_device("cnnx16quad0", 0x50100000, 0x40000); + create_unimplemented_device("cnnx16quad1", 0x50500000, 0x40000); + create_unimplemented_device("cnnx16quad2", 0x50900000, 0x40000); + create_unimplemented_device("cnnx16quad3", 0x50d00000, 0x40000); + +} + +static void max78000_soc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = max78000_soc_realize; +} + +static const TypeInfo max78000_soc_info = { + .name = TYPE_MAX78000_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MAX78000State), + .instance_init = max78000_soc_initfn, + .class_init = max78000_soc_class_init, +}; + +static void max78000_soc_types(void) +{ + type_register_static(&max78000_soc_info); +} + +type_init(max78000_soc_types) diff --git a/hw/arm/max78000fthr.c b/hw/arm/max78000fthr.c new file mode 100644 index 00000000000..c4f6b5b1b04 --- /dev/null +++ b/hw/arm/max78000fthr.c @@ -0,0 +1,50 @@ +/* + * MAX78000FTHR Evaluation Board + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "qemu/error-report.h" +#include "hw/arm/max78000_soc.h" +#include "hw/arm/boot.h" + +/* 60MHz is the default, but other clocks can be selected. */ +#define SYSCLK_FRQ 60000000ULL +static void max78000_init(MachineState *machine) +{ + DeviceState *dev; + Clock *sysclk; + + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); + + dev = qdev_new(TYPE_MAX78000_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); + qdev_connect_clock_in(dev, "sysclk", sysclk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + 0x00000000, FLASH_SIZE); +} + +static void max78000_machine_init(MachineClass *mc) +{ + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), + NULL + }; + + mc->desc = "MAX78000FTHR Board (Cortex-M4 / (Unimplemented) RISC-V)"; + mc->init = max78000_init; + mc->valid_cpu_types = valid_cpu_types; +} + +DEFINE_MACHINE("max78000fthr", max78000_machine_init) diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c index 500427e94be..86982cb0772 100644 --- a/hw/arm/mcimx6ul-evk.c +++ b/hw/arm/mcimx6ul-evk.c @@ -17,7 +17,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" static void mcimx6ul_evk_init(MachineState *machine) { @@ -74,5 +74,6 @@ static void mcimx6ul_evk_machine_init(MachineClass *mc) mc->init = mcimx6ul_evk_init; mc->max_cpus = FSL_IMX6UL_NUM_CPUS; mc->default_ram_id = "mcimx6ul-evk.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("mcimx6ul-evk", mcimx6ul_evk_machine_init) diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c index 693a1023b6c..33119610113 100644 --- a/hw/arm/mcimx7d-sabre.c +++ b/hw/arm/mcimx7d-sabre.c @@ -19,7 +19,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" static void mcimx7d_sabre_init(MachineState *machine) { @@ -74,5 +74,6 @@ static void mcimx7d_sabre_machine_init(MachineClass *mc) mc->init = mcimx7d_sabre_init; mc->max_cpus = FSL_IMX7_NUM_CPUS; mc->default_ram_id = "mcimx7d-sabre.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("mcimx7d-sabre", mcimx7d_sabre_machine_init) diff --git a/hw/arm/meson.build b/hw/arm/meson.build index 0c07ab522f4..55d4106be68 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -1,81 +1,89 @@ arm_ss = ss.source_set() -arm_ss.add(files('boot.c')) +arm_common_ss = ss.source_set() arm_ss.add(when: 'CONFIG_ARM_VIRT', if_true: files('virt.c')) arm_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) -arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c')) -arm_ss.add(when: 'CONFIG_EMCRAFT_SF2', if_true: files('msf2-som.c')) -arm_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c')) -arm_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c')) -arm_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mainstone.c')) -arm_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c')) -arm_ss.add(when: 'CONFIG_MPS3R', if_true: files('mps3r.c')) -arm_ss.add(when: 'CONFIG_MUSICPAL', if_true: files('musicpal.c')) -arm_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c')) -arm_ss.add(when: 'CONFIG_OLIMEX_STM32_H405', if_true: files('olimex-stm32-h405.c')) -arm_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c')) -arm_ss.add(when: 'CONFIG_NSERIES', if_true: files('nseries.c')) -arm_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c')) +arm_common_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c')) +arm_common_ss.add(when: 'CONFIG_EMCRAFT_SF2', if_true: files('msf2-som.c')) +arm_common_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c')) +arm_common_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c')) +arm_common_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c')) +arm_common_ss.add(when: 'CONFIG_MPS3R', if_true: files('mps3r.c')) +arm_common_ss.add(when: 'CONFIG_MUSICPAL', if_true: [files('musicpal.c')]) +arm_common_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c')) +arm_common_ss.add(when: 'CONFIG_OLIMEX_STM32_H405', if_true: files('olimex-stm32-h405.c')) +arm_common_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c')) +arm_common_ss.add(when: 'CONFIG_NPCM8XX', if_true: files('npcm8xx.c', 'npcm8xx_boards.c')) +arm_common_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c')) arm_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa-ref.c')) -arm_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c')) -arm_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c')) -arm_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c')) -arm_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c')) +arm_common_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c')) +arm_common_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c')) +arm_common_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c')) +arm_common_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c')) -arm_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c')) -arm_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c')) -arm_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c', 'pxa2xx_gpio.c', 'pxa2xx_pic.c')) -arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c')) -arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c')) -arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) -arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) -arm_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c')) +arm_common_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c')) +arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c')) +arm_common_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c')) +arm_common_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c')) +arm_common_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) +arm_common_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) +arm_common_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c')) +arm_common_ss.add(when: 'CONFIG_MAX78000_SOC', if_true: files('max78000_soc.c')) arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c')) -arm_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c')) -arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) -arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) -arm_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c')) -arm_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c')) -arm_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c')) -arm_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c')) -arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c')) -arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) -arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c')) -arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c')) +arm_common_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c')) +arm_common_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) +arm_common_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) +arm_common_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c')) +arm_common_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c')) +arm_common_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c')) +arm_common_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c')) +arm_common_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c')) arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed.c', 'aspeed_soc_common.c', 'aspeed_ast2400.c', 'aspeed_ast2600.c', + 'aspeed_ast27x0-ssp.c', + 'aspeed_ast27x0-tsp.c', 'aspeed_ast10x0.c', 'aspeed_eeprom.c', 'fby35.c')) -arm_ss.add(when: ['CONFIG_ASPEED_SOC', 'TARGET_AARCH64'], if_true: files('aspeed_ast27x0.c')) -arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c')) -arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c')) -arm_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c')) -arm_ss.add(when: 'CONFIG_MUSCA', if_true: files('musca.c')) -arm_ss.add(when: 'CONFIG_ARMSSE', if_true: files('armsse.c')) -arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.c')) +arm_common_ss.add(when: ['CONFIG_ASPEED_SOC', 'TARGET_AARCH64'], if_true: files( + 'aspeed_ast27x0.c', + 'aspeed_ast27x0-fc.c',)) +arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c')) +arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c')) +arm_common_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c')) +arm_common_ss.add(when: 'CONFIG_MUSCA', if_true: files('musca.c')) +arm_common_ss.add(when: 'CONFIG_ARMSSE', if_true: files('armsse.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX8MP', if_true: files('fsl-imx8mp.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX8MP_EVK', if_true: files('imx8mp-evk.c')) arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmuv3.c')) -arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c')) -arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c')) -arm_ss.add(when: 'CONFIG_XEN', if_true: files('xen_arm.c')) +arm_ss.add(when: 'CONFIG_ARM_SMMUV3_ACCEL', if_true: files('smmuv3-accel.c')) +arm_ss.add(when: 'CONFIG_TEGRA241_CMDQV', if_true: files('tegra241-cmdqv.c')) +arm_common_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c')) +arm_common_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c')) +arm_ss.add(when: 'CONFIG_XEN', if_true: files( + 'xen-stubs.c', + 'xen-pvh.c', +)) -system_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c')) -system_ss.add(when: 'CONFIG_CHEETAH', if_true: files('palm.c')) -system_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c')) -system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) -system_ss.add(when: 'CONFIG_GUMSTIX', if_true: files('gumstix.c')) -system_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c')) -system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap2.c')) -system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c')) -system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c')) -system_ss.add(when: 'CONFIG_SPITZ', if_true: files('spitz.c')) -system_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) -system_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c')) -system_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c')) -system_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c')) -system_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c')) -system_ss.add(when: 'CONFIG_Z2', if_true: files('z2.c')) +arm_common_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c')) +arm_common_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c')) +arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) +arm_common_ss.add(when: 'CONFIG_MAX78000FTHR', if_true: files('max78000fthr.c')) +arm_common_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c')) +arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c')) +arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c')) +arm_common_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) +arm_common_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c')) +arm_common_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c')) +arm_common_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c')) + +arm_common_ss.add(files('boot.c')) hw_arch += {'arm': arm_ss} +hw_common_arch += {'arm': arm_common_ss} diff --git a/hw/arm/microbit.c b/hw/arm/microbit.c index 50df3620882..525443fdb97 100644 --- a/hw/arm/microbit.c +++ b/hw/arm/microbit.c @@ -12,8 +12,8 @@ #include "qapi/error.h" #include "hw/boards.h" #include "hw/arm/boot.h" -#include "sysemu/sysemu.h" -#include "exec/address-spaces.h" +#include "system/system.h" +#include "system/address-spaces.h" #include "hw/arm/nrf51_soc.h" #include "hw/i2c/microbit_i2c.h" @@ -56,11 +56,11 @@ static void microbit_init(MachineState *machine) memory_region_add_subregion_overlap(&s->nrf51.container, NRF51_TWI_BASE, mr, -1); - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(s->nrf51.armv7m.cpu, machine->kernel_filename, 0, s->nrf51.flash_size); } -static void microbit_machine_class_init(ObjectClass *oc, void *data) +static void microbit_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index aec57c0d686..5dd87cc0281 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -48,15 +48,15 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qemu/error-report.h" #include "hw/arm/boot.h" #include "hw/arm/armv7m.h" #include "hw/or-irq.h" #include "hw/boards.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" +#include "system/address-spaces.h" +#include "system/system.h" +#include "system/reset.h" #include "hw/misc/unimp.h" #include "hw/char/cmsdk-apb-uart.h" #include "hw/timer/cmsdk-apb-timer.h" @@ -1211,7 +1211,7 @@ static void mps2tz_common_init(MachineState *machine) mms->remap_irq); } - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(mms->iotkit.armv7m[0].cpu, machine->kernel_filename, 0, boot_ram_size(mms)); } @@ -1254,7 +1254,7 @@ static void mps2_set_remap(Object *obj, const char *value, Error **errp) } } -static void mps2_machine_reset(MachineState *machine, ShutdownCause reason) +static void mps2_machine_reset(MachineState *machine, ResetType type) { MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine); @@ -1264,10 +1264,10 @@ static void mps2_machine_reset(MachineState *machine, ShutdownCause reason) * reset see the correct mapping. */ remap_memory(mms, mms->remap); - qemu_devices_reset(reason); + qemu_devices_reset(type); } -static void mps2tz_class_init(ObjectClass *oc, void *data) +static void mps2tz_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(oc); @@ -1304,7 +1304,7 @@ static void mps2tz_set_default_ram_info(MPS2TZMachineClass *mmc) g_assert_not_reached(); } -static void mps2tz_an505_class_init(ObjectClass *oc, void *data) +static void mps2tz_an505_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); @@ -1338,7 +1338,7 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) mps2tz_set_default_ram_info(mmc); } -static void mps2tz_an521_class_init(ObjectClass *oc, void *data) +static void mps2tz_an521_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); @@ -1372,7 +1372,7 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) mps2tz_set_default_ram_info(mmc); } -static void mps3tz_an524_class_init(ObjectClass *oc, void *data) +static void mps3tz_an524_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); @@ -1411,7 +1411,7 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) "are BRAM (default) and QSPI."); } -static void mps3tz_an547_class_init(ObjectClass *oc, void *data) +static void mps3tz_an547_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); @@ -1453,7 +1453,7 @@ static const TypeInfo mps2tz_info = { .instance_size = sizeof(MPS2TZMachineState), .class_size = sizeof(MPS2TZMachineClass), .class_init = mps2tz_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IDAU_INTERFACE }, { } }, diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index 50919ee46d7..bd378e360b0 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -33,8 +33,8 @@ #include "hw/arm/armv7m.h" #include "hw/or-irq.h" #include "hw/boards.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/qdev-properties.h" #include "hw/misc/unimp.h" #include "hw/char/cmsdk-apb-uart.h" @@ -48,7 +48,7 @@ #include "net/net.h" #include "hw/watchdog/cmsdk-apb-watchdog.h" #include "hw/qdev-clock.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qom/object.h" typedef enum MPS2FPGAType { @@ -224,7 +224,11 @@ static void mps2_common_init(MachineState *machine) switch (mmc->fpga_type) { case FPGA_AN385: case FPGA_AN386: + qdev_prop_set_uint32(armv7m, "num-irq", 32); + break; case FPGA_AN500: + /* The AN500 configures its Cortex-M7 with 16 MPU regions */ + qdev_prop_set_uint32(armv7m, "mpu-ns-regions", 16); qdev_prop_set_uint32(armv7m, "num-irq", 32); break; case FPGA_AN511: @@ -460,11 +464,11 @@ static void mps2_common_init(MachineState *machine) qdev_get_gpio_in(armv7m, mmc->fpga_type == FPGA_AN511 ? 47 : 13)); - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(mms->armv7m.cpu, machine->kernel_filename, 0, 0x400000); } -static void mps2_class_init(ObjectClass *oc, void *data) +static void mps2_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -474,7 +478,7 @@ static void mps2_class_init(ObjectClass *oc, void *data) mc->default_ram_id = "mps.ram"; } -static void mps2_an385_class_init(ObjectClass *oc, void *data) +static void mps2_an385_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); @@ -493,7 +497,7 @@ static void mps2_an385_class_init(ObjectClass *oc, void *data) mmc->has_block_ram = true; } -static void mps2_an386_class_init(ObjectClass *oc, void *data) +static void mps2_an386_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); @@ -512,7 +516,7 @@ static void mps2_an386_class_init(ObjectClass *oc, void *data) mmc->has_block_ram = true; } -static void mps2_an500_class_init(ObjectClass *oc, void *data) +static void mps2_an500_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); @@ -531,7 +535,7 @@ static void mps2_an500_class_init(ObjectClass *oc, void *data) mmc->has_block_ram = false; } -static void mps2_an511_class_init(ObjectClass *oc, void *data) +static void mps2_an511_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); diff --git a/hw/arm/mps3r.c b/hw/arm/mps3r.c index 4d55a6564c6..48c73acc62e 100644 --- a/hw/arm/mps3r.c +++ b/hw/arm/mps3r.c @@ -27,10 +27,10 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qapi/qmp/qlist.h" -#include "exec/address-spaces.h" +#include "qobject/qlist.h" +#include "system/address-spaces.h" #include "cpu.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/or-irq.h" #include "hw/qdev-clock.h" @@ -583,14 +583,14 @@ static void mps3r_set_default_ram_info(MPS3RMachineClass *mmc) g_assert_not_reached(); } -static void mps3r_class_init(ObjectClass *oc, void *data) +static void mps3r_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->init = mps3r_common_init; } -static void mps3r_an536_class_init(ObjectClass *oc, void *data) +static void mps3r_an536_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS3RMachineClass *mmc = MPS3R_MACHINE_CLASS(oc); diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c index a94a10adcca..c5e9c7175a4 100644 --- a/hw/arm/msf2-soc.c +++ b/hw/arm/msf2-soc.c @@ -25,12 +25,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "hw/char/serial.h" +#include "system/address-spaces.h" +#include "hw/char/serial-mm.h" #include "hw/arm/msf2-soc.h" #include "hw/misc/unimp.h" #include "hw/qdev-clock.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #define MSF2_TIMER_BASE 0x40004000 #define MSF2_SYSREG_BASE 0x40038000 @@ -222,7 +222,7 @@ static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) create_unimplemented_device("usb", 0x40043000, 0x1000); } -static Property m2sxxx_soc_properties[] = { +static const Property m2sxxx_soc_properties[] = { /* * part name specifies the type of SmartFusion2 device variant(this * property is for information purpose only. @@ -234,10 +234,9 @@ static Property m2sxxx_soc_properties[] = { /* default divisors in Libero GUI */ DEFINE_PROP_UINT8("apb0div", MSF2State, apb0div, 2), DEFINE_PROP_UINT8("apb1div", MSF2State, apb1div, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void m2sxxx_soc_class_init(ObjectClass *klass, void *data) +static void m2sxxx_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index 5c415abe852..29c76c68605 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -33,7 +33,7 @@ #include "hw/qdev-properties.h" #include "hw/arm/boot.h" #include "hw/qdev-clock.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/arm/msf2-soc.h" #define DDR_BASE_ADDRESS 0xA0000000 @@ -92,7 +92,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) cs_line = qdev_get_gpio_in_named(spi_flash, SSI_GPIO_CS, 0); sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line); - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(soc->armv7m.cpu, machine->kernel_filename, 0, soc->envm_size); } diff --git a/hw/arm/musca.c b/hw/arm/musca.c index e2c9d49af58..250b3b5bf84 100644 --- a/hw/arm/musca.c +++ b/hw/arm/musca.c @@ -22,8 +22,8 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/arm/boot.h" #include "hw/arm/armsse.h" #include "hw/boards.h" @@ -590,11 +590,11 @@ static void musca_init(MachineState *machine) "cfg_sec_resp", 0)); } - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(mms->sse.armv7m[0].cpu, machine->kernel_filename, 0, 0x2000000); } -static void musca_class_init(ObjectClass *oc, void *data) +static void musca_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); static const char * const valid_cpu_types[] = { @@ -609,7 +609,7 @@ static void musca_class_init(ObjectClass *oc, void *data) mc->init = musca_init; } -static void musca_a_class_init(ObjectClass *oc, void *data) +static void musca_a_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc); @@ -623,7 +623,7 @@ static void musca_a_class_init(ObjectClass *oc, void *data) mmc->num_mpcs = ARRAY_SIZE(a_mpc_info); } -static void musca_b1_class_init(ObjectClass *oc, void *data) +static void musca_b1_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc); diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 2020f73a576..329b162eb20 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -16,9 +16,9 @@ #include "migration/vmstate.h" #include "hw/arm/boot.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "qemu/timer.h" #include "hw/ptimer.h" #include "hw/qdev-properties.h" @@ -29,9 +29,9 @@ #include "hw/irq.h" #include "hw/or-irq.h" #include "hw/audio/wm8750.h" -#include "sysemu/block-backend.h" -#include "sysemu/runstate.h" -#include "sysemu/dma.h" +#include "system/block-backend.h" +#include "system/runstate.h" +#include "system/dma.h" #include "ui/pixel_ops.h" #include "qemu/cutils.h" #include "qom/object.h" @@ -286,7 +286,7 @@ static const VMStateDescription musicpal_lcd_vmsd = { } }; -static void musicpal_lcd_class_init(ObjectClass *klass, void *data) +static void musicpal_lcd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -407,11 +407,11 @@ static const VMStateDescription mv88w8618_pic_vmsd = { } }; -static void mv88w8618_pic_class_init(ObjectClass *klass, void *data) +static void mv88w8618_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = mv88w8618_pic_reset; + device_class_set_legacy_reset(dc, mv88w8618_pic_reset); dc->vmsd = &mv88w8618_pic_vmsd; } @@ -601,11 +601,11 @@ static const VMStateDescription mv88w8618_pit_vmsd = { } }; -static void mv88w8618_pit_class_init(ObjectClass *klass, void *data) +static void mv88w8618_pit_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = mv88w8618_pit_reset; + device_class_set_legacy_reset(dc, mv88w8618_pit_reset); dc->vmsd = &mv88w8618_pit_vmsd; } @@ -687,7 +687,7 @@ static const VMStateDescription mv88w8618_flashcfg_vmsd = { } }; -static void mv88w8618_flashcfg_class_init(ObjectClass *klass, void *data) +static void mv88w8618_flashcfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1026,11 +1026,11 @@ static const VMStateDescription musicpal_gpio_vmsd = { } }; -static void musicpal_gpio_class_init(ObjectClass *klass, void *data) +static void musicpal_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = musicpal_gpio_reset; + device_class_set_legacy_reset(dc, musicpal_gpio_reset); dc->vmsd = &musicpal_gpio_vmsd; } @@ -1171,7 +1171,7 @@ static const VMStateDescription musicpal_key_vmsd = { } }; -static void musicpal_key_class_init(ObjectClass *klass, void *data) +static void musicpal_key_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1238,7 +1238,7 @@ static void musicpal_init(MachineState *machine) qdev_get_gpio_in(pic, MP_TIMER4_IRQ), NULL); /* Logically OR both UART IRQs together */ - uart_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + uart_orgate = qdev_new(TYPE_OR_IRQ); object_property_set_int(OBJECT(uart_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(uart_orgate, NULL, &error_fatal); qdev_connect_gpio_out(uart_orgate, 0, @@ -1348,7 +1348,7 @@ static void musicpal_machine_init(MachineClass *mc) DEFINE_MACHINE("musicpal", musicpal_machine_init) -static void mv88w8618_wlan_class_init(ObjectClass *klass, void *data) +static void mv88w8618_wlan_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c index 8b1a9a24379..df793c77fe1 100644 --- a/hw/arm/netduino2.c +++ b/hw/arm/netduino2.c @@ -48,7 +48,7 @@ static void netduino2_init(MachineState *machine) qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + armv7m_load_kernel(STM32F205_SOC(dev)->armv7m.cpu, machine->kernel_filename, 0, FLASH_SIZE); } diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c index bccd1003549..81b6334cf72 100644 --- a/hw/arm/netduinoplus2.c +++ b/hw/arm/netduinoplus2.c @@ -48,7 +48,7 @@ static void netduinoplus2_init(MachineState *machine) qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - armv7m_load_kernel(ARM_CPU(first_cpu), + armv7m_load_kernel(STM32F405_SOC(dev)->armv7m.cpu, machine->kernel_filename, 0, FLASH_SIZE); } diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c index cb7791301b4..ecfae328a96 100644 --- a/hw/arm/npcm7xx.c +++ b/hw/arm/npcm7xx.c @@ -18,15 +18,15 @@ #include "hw/arm/boot.h" #include "hw/arm/npcm7xx.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/loader.h" #include "hw/misc/unimp.h" #include "hw/qdev-clock.h" #include "hw/qdev-properties.h" #include "qapi/error.h" -#include "qemu/bswap.h" +#include "exec/tswap.h" #include "qemu/units.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "target/arm/cpu-qom.h" /* @@ -292,17 +292,21 @@ static const struct { hwaddr regs_addr; int cs_count; const hwaddr *flash_addr; + size_t flash_size; } npcm7xx_fiu[] = { { .name = "fiu0", .regs_addr = 0xfb000000, .cs_count = ARRAY_SIZE(npcm7xx_fiu0_flash_addr), .flash_addr = npcm7xx_fiu0_flash_addr, + .flash_size = 128 * MiB, + }, { .name = "fiu3", .regs_addr = 0xc0000000, .cs_count = ARRAY_SIZE(npcm7xx_fiu3_flash_addr), .flash_addr = npcm7xx_fiu3_flash_addr, + .flash_size = 128 * MiB, }, }; @@ -735,6 +739,8 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) object_property_set_int(OBJECT(sbd), "cs-count", npcm7xx_fiu[i].cs_count, &error_abort); + object_property_set_int(OBJECT(sbd), "flash-size", + npcm7xx_fiu[i].flash_size, &error_abort); sysbus_realize(sbd, &error_abort); sysbus_mmio_map(sbd, 0, npcm7xx_fiu[i].regs_addr); @@ -810,13 +816,12 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) create_unimplemented_device("npcm7xx.spix", 0xfb001000, 4 * KiB); } -static Property npcm7xx_properties[] = { +static const Property npcm7xx_properties[] = { DEFINE_PROP_LINK("dram-mr", NPCM7xxState, dram, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void npcm7xx_class_init(ObjectClass *oc, void *data) +static void npcm7xx_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -825,7 +830,7 @@ static void npcm7xx_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, npcm7xx_properties); } -static void npcm730_class_init(ObjectClass *oc, void *data) +static void npcm730_class_init(ObjectClass *oc, const void *data) { NPCM7xxClass *nc = NPCM7XX_CLASS(oc); @@ -834,7 +839,7 @@ static void npcm730_class_init(ObjectClass *oc, void *data) nc->num_cpus = 2; } -static void npcm750_class_init(ObjectClass *oc, void *data) +static void npcm750_class_init(ObjectClass *oc, const void *data) { NPCM7xxClass *nc = NPCM7XX_CLASS(oc); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index e229efb4472..465a0e5acec 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -27,9 +27,9 @@ #include "qapi/error.h" #include "qemu/datadir.h" #include "qemu/units.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" +#include "system/blockdev.h" +#include "system/system.h" +#include "system/block-backend.h" #include "qemu/error-report.h" @@ -453,7 +453,7 @@ static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type) mc->default_cpus = mc->min_cpus = mc->max_cpus = sc->num_cpus; } -static void npcm7xx_machine_class_init(ObjectClass *oc, void *data) +static void npcm7xx_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); static const char * const valid_cpu_types[] = { @@ -472,7 +472,7 @@ static void npcm7xx_machine_class_init(ObjectClass *oc, void *data) * Schematics: * https://github.com/Nuvoton-Israel/nuvoton-info/blob/master/npcm7xx-poleg/evaluation-board/board_deliverables/NPCM750x_EB_ver.A1.1_COMPLETE.pdf */ -static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data) +static void npcm750_evb_machine_class_init(ObjectClass *oc, const void *data) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -481,10 +481,11 @@ static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex-A9)"; mc->init = npcm750_evb_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; }; -static void gsj_machine_class_init(ObjectClass *oc, void *data) +static void gsj_machine_class_init(ObjectClass *oc, const void *data) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -493,10 +494,11 @@ static void gsj_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Quanta GSJ (Cortex-A9)"; mc->init = quanta_gsj_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; }; -static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data) +static void gbs_bmc_machine_class_init(ObjectClass *oc, const void *data) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -505,10 +507,11 @@ static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Quanta GBS (Cortex-A9)"; mc->init = quanta_gbs_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; } -static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data) +static void kudo_bmc_machine_class_init(ObjectClass *oc, const void *data) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -517,10 +520,11 @@ static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Kudo BMC (Cortex-A9)"; mc->init = kudo_bmc_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; }; -static void mori_bmc_machine_class_init(ObjectClass *oc, void *data) +static void mori_bmc_machine_class_init(ObjectClass *oc, const void *data) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -529,6 +533,7 @@ static void mori_bmc_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Mori BMC (Cortex-A9)"; mc->init = mori_bmc_init; + mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; } diff --git a/hw/arm/npcm8xx.c b/hw/arm/npcm8xx.c new file mode 100644 index 00000000000..a276fea6985 --- /dev/null +++ b/hw/arm/npcm8xx.c @@ -0,0 +1,859 @@ +/* + * Nuvoton NPCM8xx SoC family. + * + * Copyright 2022 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/boards.h" +#include "hw/arm/boot.h" +#include "hw/arm/bsa.h" +#include "hw/arm/npcm8xx.h" +#include "hw/char/serial-mm.h" +#include "hw/intc/arm_gic.h" +#include "hw/loader.h" +#include "hw/misc/unimp.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "system/system.h" + +/* + * This covers the whole MMIO space. We'll use this to catch any MMIO accesses + * that aren't handled by a device. + */ +#define NPCM8XX_MMIO_BA 0x80000000 +#define NPCM8XX_MMIO_SZ 0x7ffd0000 + +/* OTP fuse array */ +#define NPCM8XX_OTP_BA 0xf0189000 + +/* GIC Distributor */ +#define NPCM8XX_GICD_BA 0xdfff9000 +#define NPCM8XX_GICC_BA 0xdfffa000 + +/* Core system modules. */ +#define NPCM8XX_CPUP_BA 0xf03fe000 +#define NPCM8XX_GCR_BA 0xf0800000 +#define NPCM8XX_CLK_BA 0xf0801000 +#define NPCM8XX_MC_BA 0xf0824000 +#define NPCM8XX_RNG_BA 0xf000b000 + +/* ADC Module */ +#define NPCM8XX_ADC_BA 0xf000c000 + +/* Internal AHB SRAM */ +#define NPCM8XX_RAM3_BA 0xc0008000 +#define NPCM8XX_RAM3_SZ (4 * KiB) + +/* Memory blocks at the end of the address space */ +#define NPCM8XX_RAM2_BA 0xfffb0000 +#define NPCM8XX_RAM2_SZ (256 * KiB) +#define NPCM8XX_ROM_BA 0xffff0100 +#define NPCM8XX_ROM_SZ (64 * KiB) + +/* SDHCI Modules */ +#define NPCM8XX_MMC_BA 0xf0842000 + +/* PCS Module */ +#define NPCM8XX_PCS_BA 0xf0780000 + +/* PSPI Modules */ +#define NPCM8XX_PSPI_BA 0xf0201000 + +/* Run PLL1 at 1600 MHz */ +#define NPCM8XX_PLLCON1_FIXUP_VAL 0x00402101 +/* Run the CPU from PLL1 and UART from PLL2 */ +#define NPCM8XX_CLKSEL_FIXUP_VAL 0x004aaba9 + +/* Clock configuration values to be fixed up when bypassing bootloader */ + +/* + * Interrupt lines going into the GIC. This does not include internal Cortex-A35 + * interrupts. + */ +enum NPCM8xxInterrupt { + NPCM8XX_ADC_IRQ = 0, + NPCM8XX_PECI_IRQ = 6, + NPCM8XX_KCS_HIB_IRQ = 9, + NPCM8XX_GMAC1_IRQ = 14, + NPCM8XX_GMAC2_IRQ, + NPCM8XX_GMAC3_IRQ, + NPCM8XX_GMAC4_IRQ, + NPCM8XX_MMC_IRQ = 26, + NPCM8XX_PSPI_IRQ = 28, + NPCM8XX_TIMER0_IRQ = 32, /* Timer Module 0 */ + NPCM8XX_TIMER1_IRQ, + NPCM8XX_TIMER2_IRQ, + NPCM8XX_TIMER3_IRQ, + NPCM8XX_TIMER4_IRQ, + NPCM8XX_TIMER5_IRQ, /* Timer Module 1 */ + NPCM8XX_TIMER6_IRQ, + NPCM8XX_TIMER7_IRQ, + NPCM8XX_TIMER8_IRQ, + NPCM8XX_TIMER9_IRQ, + NPCM8XX_TIMER10_IRQ, /* Timer Module 2 */ + NPCM8XX_TIMER11_IRQ, + NPCM8XX_TIMER12_IRQ, + NPCM8XX_TIMER13_IRQ, + NPCM8XX_TIMER14_IRQ, + NPCM8XX_WDG0_IRQ = 47, /* Timer Module 0 Watchdog */ + NPCM8XX_WDG1_IRQ, /* Timer Module 1 Watchdog */ + NPCM8XX_WDG2_IRQ, /* Timer Module 2 Watchdog */ + NPCM8XX_EHCI1_IRQ = 61, + NPCM8XX_OHCI1_IRQ, + NPCM8XX_EHCI2_IRQ, + NPCM8XX_OHCI2_IRQ, + NPCM8XX_PWM0_IRQ = 93, /* PWM module 0 */ + NPCM8XX_PWM1_IRQ, /* PWM module 1 */ + NPCM8XX_MFT0_IRQ = 96, /* MFT module 0 */ + NPCM8XX_MFT1_IRQ, /* MFT module 1 */ + NPCM8XX_MFT2_IRQ, /* MFT module 2 */ + NPCM8XX_MFT3_IRQ, /* MFT module 3 */ + NPCM8XX_MFT4_IRQ, /* MFT module 4 */ + NPCM8XX_MFT5_IRQ, /* MFT module 5 */ + NPCM8XX_MFT6_IRQ, /* MFT module 6 */ + NPCM8XX_MFT7_IRQ, /* MFT module 7 */ + NPCM8XX_PCI_MBOX1_IRQ = 105, + NPCM8XX_PCI_MBOX2_IRQ, + NPCM8XX_GPIO0_IRQ = 116, + NPCM8XX_GPIO1_IRQ, + NPCM8XX_GPIO2_IRQ, + NPCM8XX_GPIO3_IRQ, + NPCM8XX_GPIO4_IRQ, + NPCM8XX_GPIO5_IRQ, + NPCM8XX_GPIO6_IRQ, + NPCM8XX_GPIO7_IRQ, + NPCM8XX_SMBUS0_IRQ = 128, + NPCM8XX_SMBUS1_IRQ, + NPCM8XX_SMBUS2_IRQ, + NPCM8XX_SMBUS3_IRQ, + NPCM8XX_SMBUS4_IRQ, + NPCM8XX_SMBUS5_IRQ, + NPCM8XX_SMBUS6_IRQ, + NPCM8XX_SMBUS7_IRQ, + NPCM8XX_SMBUS8_IRQ, + NPCM8XX_SMBUS9_IRQ, + NPCM8XX_SMBUS10_IRQ, + NPCM8XX_SMBUS11_IRQ, + NPCM8XX_SMBUS12_IRQ, + NPCM8XX_SMBUS13_IRQ, + NPCM8XX_SMBUS14_IRQ, + NPCM8XX_SMBUS15_IRQ, + NPCM8XX_SMBUS16_IRQ, + NPCM8XX_SMBUS17_IRQ, + NPCM8XX_SMBUS18_IRQ, + NPCM8XX_SMBUS19_IRQ, + NPCM8XX_SMBUS20_IRQ, + NPCM8XX_SMBUS21_IRQ, + NPCM8XX_SMBUS22_IRQ, + NPCM8XX_SMBUS23_IRQ, + NPCM8XX_SMBUS24_IRQ, + NPCM8XX_SMBUS25_IRQ, + NPCM8XX_SMBUS26_IRQ, + NPCM8XX_UART0_IRQ = 192, + NPCM8XX_UART1_IRQ, + NPCM8XX_UART2_IRQ, + NPCM8XX_UART3_IRQ, + NPCM8XX_UART4_IRQ, + NPCM8XX_UART5_IRQ, + NPCM8XX_UART6_IRQ, +}; + +/* Total number of GIC interrupts, including internal Cortex-A35 interrupts. */ +#define NPCM8XX_NUM_IRQ (288) +#define NPCM8XX_PPI_BASE(cpu) \ + ((NPCM8XX_NUM_IRQ - GIC_INTERNAL) + (cpu) * GIC_INTERNAL) + +/* Register base address for each Timer Module */ +static const hwaddr npcm8xx_tim_addr[] = { + 0xf0008000, + 0xf0009000, + 0xf000a000, +}; + +/* Register base address for each 16550 UART */ +static const hwaddr npcm8xx_uart_addr[] = { + 0xf0000000, + 0xf0001000, + 0xf0002000, + 0xf0003000, + 0xf0004000, + 0xf0005000, + 0xf0006000, +}; + +/* Direct memory-mapped access to SPI0 CS0-1. */ +static const hwaddr npcm8xx_fiu0_flash_addr[] = { + 0x80000000, /* CS0 */ + 0x88000000, /* CS1 */ +}; + +/* Direct memory-mapped access to SPI1 CS0-3. */ +static const hwaddr npcm8xx_fiu1_flash_addr[] = { + 0x90000000, /* CS0 */ + 0x91000000, /* CS1 */ + 0x92000000, /* CS2 */ + 0x93000000, /* CS3 */ +}; + +/* Direct memory-mapped access to SPI3 CS0-3. */ +static const hwaddr npcm8xx_fiu3_flash_addr[] = { + 0xa0000000, /* CS0 */ + 0xa8000000, /* CS1 */ + 0xb0000000, /* CS2 */ + 0xb8000000, /* CS3 */ +}; + +/* Register base address for each PWM Module */ +static const hwaddr npcm8xx_pwm_addr[] = { + 0xf0103000, + 0xf0104000, + 0xf0105000, +}; + +/* Register base address for each MFT Module */ +static const hwaddr npcm8xx_mft_addr[] = { + 0xf0180000, + 0xf0181000, + 0xf0182000, + 0xf0183000, + 0xf0184000, + 0xf0185000, + 0xf0186000, + 0xf0187000, +}; + +/* Direct memory-mapped access to each SMBus Module. */ +static const hwaddr npcm8xx_smbus_addr[] = { + 0xf0080000, + 0xf0081000, + 0xf0082000, + 0xf0083000, + 0xf0084000, + 0xf0085000, + 0xf0086000, + 0xf0087000, + 0xf0088000, + 0xf0089000, + 0xf008a000, + 0xf008b000, + 0xf008c000, + 0xf008d000, + 0xf008e000, + 0xf008f000, + 0xfff00000, + 0xfff01000, + 0xfff02000, + 0xfff03000, + 0xfff04000, + 0xfff05000, + 0xfff06000, + 0xfff07000, + 0xfff08000, + 0xfff09000, + 0xfff0a000, +}; + +/* Register base address for each GMAC Module */ +static const hwaddr npcm8xx_gmac_addr[] = { + 0xf0802000, + 0xf0804000, + 0xf0806000, + 0xf0808000, +}; + +/* Register base address for each USB host EHCI registers */ +static const hwaddr npcm8xx_ehci_addr[] = { + 0xf0828100, + 0xf082a100, +}; + +/* Register base address for each USB host OHCI registers */ +static const hwaddr npcm8xx_ohci_addr[] = { + 0xf0829000, + 0xf082b000, +}; + +static const struct { + hwaddr regs_addr; + uint32_t reset_pu; + uint32_t reset_pd; + uint32_t reset_osrc; + uint32_t reset_odsc; +} npcm8xx_gpio[] = { + { + .regs_addr = 0xf0010000, + .reset_pu = 0x00000300, + .reset_pd = 0x000f0000, + }, { + .regs_addr = 0xf0011000, + .reset_pu = 0xe0fefe01, + .reset_pd = 0x07000000, + }, { + .regs_addr = 0xf0012000, + .reset_pu = 0xc00fffff, + .reset_pd = 0x3ff00000, + }, { + .regs_addr = 0xf0013000, + .reset_pd = 0x00003000, + }, { + .regs_addr = 0xf0014000, + .reset_pu = 0xffff0000, + }, { + .regs_addr = 0xf0015000, + .reset_pu = 0xff8387fe, + .reset_pd = 0x007c0001, + .reset_osrc = 0x08000000, + }, { + .regs_addr = 0xf0016000, + .reset_pu = 0x00000801, + .reset_pd = 0x00000302, + }, { + .regs_addr = 0xf0017000, + .reset_pu = 0x000002ff, + .reset_pd = 0x00000c00, + }, +}; + +static const struct { + const char *name; + hwaddr regs_addr; + int cs_count; + const hwaddr *flash_addr; + size_t flash_size; +} npcm8xx_fiu[] = { + { + .name = "fiu0", + .regs_addr = 0xfb000000, + .cs_count = ARRAY_SIZE(npcm8xx_fiu0_flash_addr), + .flash_addr = npcm8xx_fiu0_flash_addr, + .flash_size = 128 * MiB, + }, + { + .name = "fiu1", + .regs_addr = 0xfb002000, + .cs_count = ARRAY_SIZE(npcm8xx_fiu1_flash_addr), + .flash_addr = npcm8xx_fiu1_flash_addr, + .flash_size = 16 * MiB, + }, { + .name = "fiu3", + .regs_addr = 0xc0000000, + .cs_count = ARRAY_SIZE(npcm8xx_fiu3_flash_addr), + .flash_addr = npcm8xx_fiu3_flash_addr, + .flash_size = 128 * MiB, + }, +}; + +static struct arm_boot_info npcm8xx_binfo = { + .loader_start = NPCM8XX_LOADER_START, + .smp_loader_start = NPCM8XX_SMP_LOADER_START, + .smp_bootreg_addr = NPCM8XX_SMP_BOOTREG_ADDR, + .gic_cpu_if_addr = NPCM8XX_GICC_BA, + .secure_boot = false, + .board_id = -1, + .board_setup_addr = NPCM8XX_BOARD_SETUP_ADDR, + .psci_conduit = QEMU_PSCI_CONDUIT_SMC, +}; + +void npcm8xx_load_kernel(MachineState *machine, NPCM8xxState *soc) +{ + npcm8xx_binfo.ram_size = machine->ram_size; + + arm_load_kernel(&soc->cpu[0], machine, &npcm8xx_binfo); +} + +static void npcm8xx_init_fuses(NPCM8xxState *s) +{ + NPCM8xxClass *nc = NPCM8XX_GET_CLASS(s); + uint32_t value; + + /* + * The initial mask of disabled modules indicates the chip derivative (e.g. + * NPCM750 or NPCM730). + */ + value = cpu_to_le32(nc->disabled_modules); + npcm7xx_otp_array_write(&s->fuse_array, &value, NPCM7XX_FUSE_DERIVATIVE, + sizeof(value)); +} + +static void npcm8xx_write_adc_calibration(NPCM8xxState *s) +{ + /* Both ADC and the fuse array must have realized. */ + QEMU_BUILD_BUG_ON(sizeof(s->adc.calibration_r_values) != 4); + npcm7xx_otp_array_write(&s->fuse_array, s->adc.calibration_r_values, + NPCM7XX_FUSE_ADC_CALIB, sizeof(s->adc.calibration_r_values)); +} + +static qemu_irq npcm8xx_irq(NPCM8xxState *s, int n) +{ + return qdev_get_gpio_in(DEVICE(&s->gic), n); +} + +static void npcm8xx_init(Object *obj) +{ + NPCM8xxState *s = NPCM8XX(obj); + int i; + + object_initialize_child(obj, "cpu-cluster", &s->cpu_cluster, + TYPE_CPU_CLUSTER); + for (i = 0; i < NPCM8XX_MAX_NUM_CPUS; i++) { + object_initialize_child(OBJECT(&s->cpu_cluster), "cpu[*]", &s->cpu[i], + ARM_CPU_TYPE_NAME("cortex-a35")); + } + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + object_initialize_child(obj, "gcr", &s->gcr, TYPE_NPCM8XX_GCR); + object_property_add_alias(obj, "power-on-straps", OBJECT(&s->gcr), + "power-on-straps"); + object_initialize_child(obj, "clk", &s->clk, TYPE_NPCM8XX_CLK); + object_initialize_child(obj, "otp", &s->fuse_array, + TYPE_NPCM7XX_FUSE_ARRAY); + object_initialize_child(obj, "mc", &s->mc, TYPE_NPCM7XX_MC); + object_initialize_child(obj, "rng", &s->rng, TYPE_NPCM7XX_RNG); + object_initialize_child(obj, "adc", &s->adc, TYPE_NPCM7XX_ADC); + + for (i = 0; i < ARRAY_SIZE(s->tim); i++) { + object_initialize_child(obj, "tim[*]", &s->tim[i], TYPE_NPCM7XX_TIMER); + } + + for (i = 0; i < ARRAY_SIZE(s->gpio); i++) { + object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_NPCM7XX_GPIO); + } + + + for (i = 0; i < ARRAY_SIZE(s->smbus); i++) { + object_initialize_child(obj, "smbus[*]", &s->smbus[i], + TYPE_NPCM7XX_SMBUS); + DEVICE(&s->smbus[i])->id = g_strdup_printf("smbus[%d]", i); + } + + for (i = 0; i < ARRAY_SIZE(s->ehci); i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], TYPE_NPCM7XX_EHCI); + } + for (i = 0; i < ARRAY_SIZE(s->ohci); i++) { + object_initialize_child(obj, "ohci[*]", &s->ohci[i], TYPE_SYSBUS_OHCI); + } + + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_fiu) != ARRAY_SIZE(s->fiu)); + for (i = 0; i < ARRAY_SIZE(s->fiu); i++) { + object_initialize_child(obj, npcm8xx_fiu[i].name, &s->fiu[i], + TYPE_NPCM7XX_FIU); + } + + for (i = 0; i < ARRAY_SIZE(s->pwm); i++) { + object_initialize_child(obj, "pwm[*]", &s->pwm[i], TYPE_NPCM7XX_PWM); + } + + for (i = 0; i < ARRAY_SIZE(s->mft); i++) { + object_initialize_child(obj, "mft[*]", &s->mft[i], TYPE_NPCM7XX_MFT); + } + + for (i = 0; i < ARRAY_SIZE(s->gmac); i++) { + object_initialize_child(obj, "gmac[*]", &s->gmac[i], TYPE_NPCM_GMAC); + } + object_initialize_child(obj, "pcs", &s->pcs, TYPE_NPCM_PCS); + + object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI); + object_initialize_child(obj, "pspi", &s->pspi, TYPE_NPCM_PSPI); +} + +static void npcm8xx_realize(DeviceState *dev, Error **errp) +{ + NPCM8xxState *s = NPCM8XX(dev); + NPCM8xxClass *nc = NPCM8XX_GET_CLASS(s); + int i; + + if (memory_region_size(s->dram) > NPCM8XX_DRAM_SZ) { + error_setg(errp, "%s: NPCM8xx cannot address more than %" PRIu64 + " MiB of DRAM", __func__, NPCM8XX_DRAM_SZ / MiB); + return; + } + + /* CPUs */ + for (i = 0; i < nc->num_cpus; i++) { + object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", + arm_build_mp_affinity(i, NPCM8XX_MAX_NUM_CPUS), + &error_abort); + object_property_set_bool(OBJECT(&s->cpu[i]), "reset-hivecs", true, + &error_abort); + object_property_set_int(OBJECT(&s->cpu[i]), "core-count", + nc->num_cpus, &error_abort); + + /* Disable security extensions. */ + object_property_set_bool(OBJECT(&s->cpu[i]), "has_el3", false, + &error_abort); + + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + /* ARM GIC for Cortex A35. Can only fail if we pass bad parameters here. */ + object_property_set_uint(OBJECT(&s->gic), "num-cpu", nc->num_cpus, errp); + object_property_set_uint(OBJECT(&s->gic), "num-irq", NPCM8XX_NUM_IRQ, errp); + object_property_set_uint(OBJECT(&s->gic), "revision", 2, errp); + object_property_set_bool(OBJECT(&s->gic), "has-security-extensions", true, + errp); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + for (i = 0; i < nc->num_cpus; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus * 2, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + nc->num_cpus * 3, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_VFIQ)); + + qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_PHYS, + qdev_get_gpio_in(DEVICE(&s->gic), + NPCM8XX_PPI_BASE(i) + ARCH_TIMER_NS_EL1_IRQ)); + qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_VIRT, + qdev_get_gpio_in(DEVICE(&s->gic), + NPCM8XX_PPI_BASE(i) + ARCH_TIMER_VIRT_IRQ)); + qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_HYP, + qdev_get_gpio_in(DEVICE(&s->gic), + NPCM8XX_PPI_BASE(i) + ARCH_TIMER_NS_EL2_IRQ)); + qdev_connect_gpio_out(DEVICE(&s->cpu[i]), GTIMER_SEC, + qdev_get_gpio_in(DEVICE(&s->gic), + NPCM8XX_PPI_BASE(i) + ARCH_TIMER_S_EL1_IRQ)); + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, NPCM8XX_GICD_BA); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, NPCM8XX_GICC_BA); + + /* CPU cluster */ + qdev_prop_set_uint32(DEVICE(&s->cpu_cluster), "cluster-id", 0); + qdev_realize(DEVICE(&s->cpu_cluster), NULL, &error_fatal); + + /* System Global Control Registers (GCR). Can fail due to user input. */ + object_property_set_int(OBJECT(&s->gcr), "disabled-modules", + nc->disabled_modules, &error_abort); + object_property_add_const_link(OBJECT(&s->gcr), "dram-mr", OBJECT(s->dram)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gcr), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gcr), 0, NPCM8XX_GCR_BA); + + /* Clock Control Registers (CLK). Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->clk), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->clk), 0, NPCM8XX_CLK_BA); + + /* OTP fuse strap array. Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->fuse_array), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fuse_array), 0, NPCM8XX_OTP_BA); + npcm8xx_init_fuses(s); + + /* Fake Memory Controller (MC). Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->mc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mc), 0, NPCM8XX_MC_BA); + + /* ADC Modules. Cannot fail. */ + qdev_connect_clock_in(DEVICE(&s->adc), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "adc-clock")); + sysbus_realize(SYS_BUS_DEVICE(&s->adc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, NPCM8XX_ADC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + npcm8xx_irq(s, NPCM8XX_ADC_IRQ)); + npcm8xx_write_adc_calibration(s); + + /* Timer Modules (TIM). Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_tim_addr) != ARRAY_SIZE(s->tim)); + for (i = 0; i < ARRAY_SIZE(s->tim); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->tim[i]); + int first_irq; + int j; + + /* Connect the timer clock. */ + qdev_connect_clock_in(DEVICE(&s->tim[i]), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "timer-clock")); + + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm8xx_tim_addr[i]); + + first_irq = NPCM8XX_TIMER0_IRQ + i * NPCM7XX_TIMERS_PER_CTRL; + for (j = 0; j < NPCM7XX_TIMERS_PER_CTRL; j++) { + qemu_irq irq = npcm8xx_irq(s, first_irq + j); + sysbus_connect_irq(sbd, j, irq); + } + + /* IRQ for watchdogs */ + sysbus_connect_irq(sbd, NPCM7XX_TIMERS_PER_CTRL, + npcm8xx_irq(s, NPCM8XX_WDG0_IRQ + i)); + /* GPIO that connects clk module with watchdog */ + qdev_connect_gpio_out_named(DEVICE(&s->tim[i]), + NPCM7XX_WATCHDOG_RESET_GPIO_OUT, 0, + qdev_get_gpio_in_named(DEVICE(&s->clk), + NPCM7XX_WATCHDOG_RESET_GPIO_IN, i)); + } + + /* UART0..6 (16550 compatible) */ + for (i = 0; i < ARRAY_SIZE(npcm8xx_uart_addr); i++) { + serial_mm_init(get_system_memory(), npcm8xx_uart_addr[i], 2, + npcm8xx_irq(s, NPCM8XX_UART0_IRQ + i), 115200, + serial_hd(i), DEVICE_LITTLE_ENDIAN); + } + + /* Random Number Generator. Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->rng), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rng), 0, NPCM8XX_RNG_BA); + + /* GPIO modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_gpio) != ARRAY_SIZE(s->gpio)); + for (i = 0; i < ARRAY_SIZE(s->gpio); i++) { + Object *obj = OBJECT(&s->gpio[i]); + + object_property_set_uint(obj, "reset-pullup", + npcm8xx_gpio[i].reset_pu, &error_abort); + object_property_set_uint(obj, "reset-pulldown", + npcm8xx_gpio[i].reset_pd, &error_abort); + object_property_set_uint(obj, "reset-osrc", + npcm8xx_gpio[i].reset_osrc, &error_abort); + object_property_set_uint(obj, "reset-odsc", + npcm8xx_gpio[i].reset_odsc, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm8xx_gpio[i].regs_addr); + sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0, + npcm8xx_irq(s, NPCM8XX_GPIO0_IRQ + i)); + } + + /* SMBus modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_smbus_addr) != ARRAY_SIZE(s->smbus)); + for (i = 0; i < ARRAY_SIZE(s->smbus); i++) { + Object *obj = OBJECT(&s->smbus[i]); + + sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm8xx_smbus_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0, + npcm8xx_irq(s, NPCM8XX_SMBUS0_IRQ + i)); + } + + /* USB Host */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->ohci) != ARRAY_SIZE(s->ehci)); + for (i = 0; i < ARRAY_SIZE(s->ehci); i++) { + object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", true, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, npcm8xx_ehci_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + npcm8xx_irq(s, NPCM8XX_EHCI1_IRQ + 2 * i)); + } + for (i = 0; i < ARRAY_SIZE(s->ohci); i++) { + object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", "usb-bus.0", + &error_abort); + object_property_set_uint(OBJECT(&s->ohci[i]), "num-ports", 1, + &error_abort); + object_property_set_uint(OBJECT(&s->ohci[i]), "firstport", i, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, npcm8xx_ohci_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, + npcm8xx_irq(s, NPCM8XX_OHCI1_IRQ + 2 * i)); + } + + /* PWM Modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_pwm_addr) != ARRAY_SIZE(s->pwm)); + for (i = 0; i < ARRAY_SIZE(s->pwm); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->pwm[i]); + + qdev_connect_clock_in(DEVICE(&s->pwm[i]), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "apb3-clock")); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm8xx_pwm_addr[i]); + sysbus_connect_irq(sbd, i, npcm8xx_irq(s, NPCM8XX_PWM0_IRQ + i)); + } + + /* MFT Modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_mft_addr) != ARRAY_SIZE(s->mft)); + for (i = 0; i < ARRAY_SIZE(s->mft); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->mft[i]); + + qdev_connect_clock_in(DEVICE(&s->mft[i]), "clock-in", + qdev_get_clock_out(DEVICE(&s->clk), + "apb4-clock")); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm8xx_mft_addr[i]); + sysbus_connect_irq(sbd, 0, npcm8xx_irq(s, NPCM8XX_MFT0_IRQ + i)); + } + + /* + * GMAC Modules. Cannot fail. + */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_gmac_addr) != ARRAY_SIZE(s->gmac)); + for (i = 0; i < ARRAY_SIZE(s->gmac); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->gmac[i]); + + /* This is used to make sure that the NIC can create the device */ + qemu_configure_nic_device(DEVICE(sbd), false, NULL); + + /* + * The device exists regardless of whether it's connected to a QEMU + * netdev backend. So always instantiate it even if there is no + * backend. + */ + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm8xx_gmac_addr[i]); + /* + * N.B. The values for the second argument sysbus_connect_irq are + * chosen to match the registration order in npcm7xx_emc_realize. + */ + sysbus_connect_irq(sbd, 0, npcm8xx_irq(s, NPCM8XX_GMAC1_IRQ + i)); + } + /* + * GMAC Physical Coding Sublayer(PCS) Module. Cannot fail. + */ + sysbus_realize(SYS_BUS_DEVICE(&s->pcs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcs), 0, NPCM8XX_PCS_BA); + + /* + * Flash Interface Unit (FIU). Can fail if incorrect number of chip selects + * specified, but this is a programming error. + */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm8xx_fiu) != ARRAY_SIZE(s->fiu)); + for (i = 0; i < ARRAY_SIZE(s->fiu); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->fiu[i]); + int j; + + object_property_set_int(OBJECT(sbd), "cs-count", + npcm8xx_fiu[i].cs_count, &error_abort); + object_property_set_int(OBJECT(sbd), "flash-size", + npcm8xx_fiu[i].flash_size, &error_abort); + sysbus_realize(sbd, &error_abort); + + sysbus_mmio_map(sbd, 0, npcm8xx_fiu[i].regs_addr); + for (j = 0; j < npcm8xx_fiu[i].cs_count; j++) { + sysbus_mmio_map(sbd, j + 1, npcm8xx_fiu[i].flash_addr[j]); + } + } + + /* RAM2 (SRAM) */ + memory_region_init_ram(&s->sram, OBJECT(dev), "ram2", + NPCM8XX_RAM2_SZ, &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM8XX_RAM2_BA, &s->sram); + + /* RAM3 (SRAM) */ + memory_region_init_ram(&s->ram3, OBJECT(dev), "ram3", + NPCM8XX_RAM3_SZ, &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM8XX_RAM3_BA, &s->ram3); + + /* Internal ROM */ + memory_region_init_rom(&s->irom, OBJECT(dev), "irom", NPCM8XX_ROM_SZ, + &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM8XX_ROM_BA, &s->irom); + + /* SDHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->mmc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc), 0, NPCM8XX_MMC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc), 0, + npcm8xx_irq(s, NPCM8XX_MMC_IRQ)); + + /* PSPI */ + sysbus_realize(SYS_BUS_DEVICE(&s->pspi), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pspi), 0, NPCM8XX_PSPI_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pspi), 0, + npcm8xx_irq(s, NPCM8XX_PSPI_IRQ)); + + create_unimplemented_device("npcm8xx.shm", 0xc0001000, 4 * KiB); + create_unimplemented_device("npcm8xx.gicextra", 0xdfffa000, 24 * KiB); + create_unimplemented_device("npcm8xx.vdmx", 0xe0800000, 4 * KiB); + create_unimplemented_device("npcm8xx.pcierc", 0xe1000000, 64 * KiB); + create_unimplemented_device("npcm8xx.rootc", 0xe8000000, 128 * MiB); + create_unimplemented_device("npcm8xx.kcs", 0xf0007000, 4 * KiB); + create_unimplemented_device("npcm8xx.gfxi", 0xf000e000, 4 * KiB); + create_unimplemented_device("npcm8xx.fsw", 0xf000f000, 4 * KiB); + create_unimplemented_device("npcm8xx.bt", 0xf0030000, 4 * KiB); + create_unimplemented_device("npcm8xx.espi", 0xf009f000, 4 * KiB); + create_unimplemented_device("npcm8xx.peci", 0xf0100000, 4 * KiB); + create_unimplemented_device("npcm8xx.siox[1]", 0xf0101000, 4 * KiB); + create_unimplemented_device("npcm8xx.siox[2]", 0xf0102000, 4 * KiB); + create_unimplemented_device("npcm8xx.tmps", 0xf0188000, 4 * KiB); + create_unimplemented_device("npcm8xx.viru1", 0xf0204000, 4 * KiB); + create_unimplemented_device("npcm8xx.viru2", 0xf0205000, 4 * KiB); + create_unimplemented_device("npcm8xx.jtm1", 0xf0208000, 4 * KiB); + create_unimplemented_device("npcm8xx.jtm2", 0xf0209000, 4 * KiB); + create_unimplemented_device("npcm8xx.flm0", 0xf0210000, 4 * KiB); + create_unimplemented_device("npcm8xx.flm1", 0xf0211000, 4 * KiB); + create_unimplemented_device("npcm8xx.flm2", 0xf0212000, 4 * KiB); + create_unimplemented_device("npcm8xx.flm3", 0xf0213000, 4 * KiB); + create_unimplemented_device("npcm8xx.ahbpci", 0xf0400000, 1 * MiB); + create_unimplemented_device("npcm8xx.dap", 0xf0500000, 960 * KiB); + create_unimplemented_device("npcm8xx.mcphy", 0xf05f0000, 64 * KiB); + create_unimplemented_device("npcm8xx.tsgen", 0xf07fc000, 8 * KiB); + create_unimplemented_device("npcm8xx.copctl", 0xf080c000, 4 * KiB); + create_unimplemented_device("npcm8xx.tipctl", 0xf080d000, 4 * KiB); + create_unimplemented_device("npcm8xx.rst", 0xf080e000, 4 * KiB); + create_unimplemented_device("npcm8xx.vcd", 0xf0810000, 64 * KiB); + create_unimplemented_device("npcm8xx.ece", 0xf0820000, 8 * KiB); + create_unimplemented_device("npcm8xx.vdma", 0xf0822000, 8 * KiB); + create_unimplemented_device("npcm8xx.usbd[0]", 0xf0830000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[1]", 0xf0831000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[2]", 0xf0832000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[3]", 0xf0833000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[4]", 0xf0834000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[5]", 0xf0835000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[6]", 0xf0836000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[7]", 0xf0837000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[8]", 0xf0838000, 4 * KiB); + create_unimplemented_device("npcm8xx.usbd[9]", 0xf0839000, 4 * KiB); + create_unimplemented_device("npcm8xx.pci_mbox1", 0xf0848000, 64 * KiB); + create_unimplemented_device("npcm8xx.gdma0", 0xf0850000, 4 * KiB); + create_unimplemented_device("npcm8xx.gdma1", 0xf0851000, 4 * KiB); + create_unimplemented_device("npcm8xx.gdma2", 0xf0852000, 4 * KiB); + create_unimplemented_device("npcm8xx.aes", 0xf0858000, 4 * KiB); + create_unimplemented_device("npcm8xx.des", 0xf0859000, 4 * KiB); + create_unimplemented_device("npcm8xx.sha", 0xf085a000, 4 * KiB); + create_unimplemented_device("npcm8xx.pci_mbox2", 0xf0868000, 64 * KiB); + create_unimplemented_device("npcm8xx.i3c0", 0xfff10000, 4 * KiB); + create_unimplemented_device("npcm8xx.i3c1", 0xfff11000, 4 * KiB); + create_unimplemented_device("npcm8xx.i3c2", 0xfff12000, 4 * KiB); + create_unimplemented_device("npcm8xx.i3c3", 0xfff13000, 4 * KiB); + create_unimplemented_device("npcm8xx.i3c4", 0xfff14000, 4 * KiB); + create_unimplemented_device("npcm8xx.i3c5", 0xfff15000, 4 * KiB); + create_unimplemented_device("npcm8xx.spixcs0", 0xf8000000, 16 * MiB); + create_unimplemented_device("npcm8xx.spixcs1", 0xf9000000, 16 * MiB); + create_unimplemented_device("npcm8xx.spix", 0xfb001000, 4 * KiB); + create_unimplemented_device("npcm8xx.vect", 0xffff0000, 256); +} + +static const Property npcm8xx_properties[] = { + DEFINE_PROP_LINK("dram-mr", NPCM8xxState, dram, TYPE_MEMORY_REGION, + MemoryRegion *), +}; + +static void npcm8xx_class_init(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + NPCM8xxClass *nc = NPCM8XX_CLASS(oc); + + dc->realize = npcm8xx_realize; + dc->user_creatable = false; + nc->disabled_modules = 0x00000000; + nc->num_cpus = NPCM8XX_MAX_NUM_CPUS; + device_class_set_props(dc, npcm8xx_properties); +} + +static const TypeInfo npcm8xx_soc_types[] = { + { + .name = TYPE_NPCM8XX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM8xxState), + .instance_init = npcm8xx_init, + .class_size = sizeof(NPCM8xxClass), + .class_init = npcm8xx_class_init, + }, +}; + +DEFINE_TYPES(npcm8xx_soc_types); diff --git a/hw/arm/npcm8xx_boards.c b/hw/arm/npcm8xx_boards.c new file mode 100644 index 00000000000..3bf3e1f8f16 --- /dev/null +++ b/hw/arm/npcm8xx_boards.c @@ -0,0 +1,254 @@ +/* + * Machine definitions for boards featuring an NPCM8xx SoC. + * + * Copyright 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "chardev/char.h" +#include "hw/boards.h" +#include "hw/arm/npcm8xx.h" +#include "hw/core/cpu.h" +#include "hw/loader.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/datadir.h" +#include "qemu/units.h" + +#define NPCM845_EVB_POWER_ON_STRAPS 0x000017ff + +static const char npcm8xx_default_bootrom[] = "npcm8xx_bootrom.bin"; + +static void npcm8xx_load_bootrom(MachineState *machine, NPCM8xxState *soc) +{ + const char *bios_name = machine->firmware ?: npcm8xx_default_bootrom; + g_autofree char *filename = NULL; + int ret; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_report("Could not find ROM image '%s'", bios_name); + if (!machine->kernel_filename) { + /* We can't boot without a bootrom or a kernel image. */ + exit(1); + } + return; + } + ret = load_image_mr(filename, machine->ram); + if (ret < 0) { + error_report("Failed to load ROM image '%s'", filename); + exit(1); + } +} + +static void npcm8xx_connect_flash(NPCM7xxFIUState *fiu, int cs_no, + const char *flash_type, DriveInfo *dinfo) +{ + DeviceState *flash; + qemu_irq flash_cs; + + flash = qdev_new(flash_type); + if (dinfo) { + qdev_prop_set_drive(flash, "drive", blk_by_legacy_dinfo(dinfo)); + } + qdev_realize_and_unref(flash, BUS(fiu->spi), &error_fatal); + + flash_cs = qdev_get_gpio_in_named(flash, SSI_GPIO_CS, 0); + qdev_connect_gpio_out_named(DEVICE(fiu), "cs", cs_no, flash_cs); +} + +static void npcm8xx_connect_dram(NPCM8xxState *soc, MemoryRegion *dram) +{ + memory_region_add_subregion(get_system_memory(), NPCM8XX_DRAM_BA, dram); + + object_property_set_link(OBJECT(soc), "dram-mr", OBJECT(dram), + &error_abort); +} + +static NPCM8xxState *npcm8xx_create_soc(MachineState *machine, + uint32_t hw_straps) +{ + NPCM8xxMachineClass *nmc = NPCM8XX_MACHINE_GET_CLASS(machine); + Object *obj; + + obj = object_new_with_props(nmc->soc_type, OBJECT(machine), "soc", + &error_abort, NULL); + object_property_set_uint(obj, "power-on-straps", hw_straps, &error_abort); + + return NPCM8XX(obj); +} + +static I2CBus *npcm8xx_i2c_get_bus(NPCM8xxState *soc, uint32_t num) +{ + g_assert(num < ARRAY_SIZE(soc->smbus)); + return I2C_BUS(qdev_get_child_bus(DEVICE(&soc->smbus[num]), "i2c-bus")); +} + +static void npcm8xx_init_pwm_splitter(NPCM8xxMachine *machine, + NPCM8xxState *soc, const int *fan_counts) +{ + SplitIRQ *splitters = machine->fan_splitter; + + /* + * PWM 0~3 belong to module 0 output 0~3. + * PWM 4~7 belong to module 1 output 0~3. + */ + for (int i = 0; i < NPCM8XX_NR_PWM_MODULES; ++i) { + for (int j = 0; j < NPCM7XX_PWM_PER_MODULE; ++j) { + int splitter_no = i * NPCM7XX_PWM_PER_MODULE + j; + DeviceState *splitter; + + if (fan_counts[splitter_no] < 1) { + continue; + } + object_initialize_child(OBJECT(machine), "fan-splitter[*]", + &splitters[splitter_no], TYPE_SPLIT_IRQ); + splitter = DEVICE(&splitters[splitter_no]); + qdev_prop_set_uint16(splitter, "num-lines", + fan_counts[splitter_no]); + qdev_realize(splitter, NULL, &error_abort); + qdev_connect_gpio_out_named(DEVICE(&soc->pwm[i]), "duty-gpio-out", + j, qdev_get_gpio_in(splitter, 0)); + } + } +} + +static void npcm8xx_connect_pwm_fan(NPCM8xxState *soc, SplitIRQ *splitter, + int fan_no, int output_no) +{ + DeviceState *fan; + int fan_input; + qemu_irq fan_duty_gpio; + + g_assert(fan_no >= 0 && fan_no <= NPCM7XX_MFT_MAX_FAN_INPUT); + /* + * Fan 0~1 belong to module 0 input 0~1. + * Fan 2~3 belong to module 1 input 0~1. + * ... + * Fan 14~15 belong to module 7 input 0~1. + * Fan 16~17 belong to module 0 input 2~3. + * Fan 18~19 belong to module 1 input 2~3. + */ + if (fan_no < 16) { + fan = DEVICE(&soc->mft[fan_no / 2]); + fan_input = fan_no % 2; + } else { + fan = DEVICE(&soc->mft[(fan_no - 16) / 2]); + fan_input = fan_no % 2 + 2; + } + + /* Connect the Fan to PWM module */ + fan_duty_gpio = qdev_get_gpio_in_named(fan, "duty", fan_input); + qdev_connect_gpio_out(DEVICE(splitter), output_no, fan_duty_gpio); +} + +static void npcm845_evb_i2c_init(NPCM8xxState *soc) +{ + /* tmp100 temperature sensor on SVB, tmp105 is compatible */ + i2c_slave_create_simple(npcm8xx_i2c_get_bus(soc, 6), "tmp105", 0x48); +} + +static void npcm845_evb_fan_init(NPCM8xxMachine *machine, NPCM8xxState *soc) +{ + SplitIRQ *splitter = machine->fan_splitter; + static const int fan_counts[] = {2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0}; + + npcm8xx_init_pwm_splitter(machine, soc, fan_counts); + npcm8xx_connect_pwm_fan(soc, &splitter[0], 0x00, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[0], 0x01, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[1], 0x02, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[1], 0x03, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[2], 0x04, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[2], 0x05, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[3], 0x06, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[3], 0x07, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[4], 0x08, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[4], 0x09, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[5], 0x0a, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[5], 0x0b, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[6], 0x0c, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[6], 0x0d, 1); + npcm8xx_connect_pwm_fan(soc, &splitter[7], 0x0e, 0); + npcm8xx_connect_pwm_fan(soc, &splitter[7], 0x0f, 1); +} + +static void npcm845_evb_init(MachineState *machine) +{ + NPCM8xxState *soc; + + soc = npcm8xx_create_soc(machine, NPCM845_EVB_POWER_ON_STRAPS); + npcm8xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm8xx_load_bootrom(machine, soc); + npcm8xx_connect_flash(&soc->fiu[0], 0, "w25q256", drive_get(IF_MTD, 0, 0)); + npcm845_evb_i2c_init(soc); + npcm845_evb_fan_init(NPCM8XX_MACHINE(machine), soc); + npcm8xx_load_kernel(machine, soc); +} + +static void npcm8xx_set_soc_type(NPCM8xxMachineClass *nmc, const char *type) +{ + NPCM8xxClass *sc = NPCM8XX_CLASS(object_class_by_name(type)); + MachineClass *mc = MACHINE_CLASS(nmc); + + nmc->soc_type = type; + mc->default_cpus = mc->min_cpus = mc->max_cpus = sc->num_cpus; +} + +static void npcm8xx_machine_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a35"), + NULL + }; + + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->no_parallel = 1; + mc->default_ram_id = "ram"; + mc->valid_cpu_types = valid_cpu_types; +} + +static void npcm845_evb_machine_class_init(ObjectClass *oc, const void *data) +{ + NPCM8xxMachineClass *nmc = NPCM8XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm8xx_set_soc_type(nmc, TYPE_NPCM8XX); + + mc->desc = "Nuvoton NPCM845 Evaluation Board (Cortex-A35)"; + mc->init = npcm845_evb_init; + mc->default_ram_size = 1 * GiB; +}; + +static const TypeInfo npcm8xx_machine_types[] = { + { + .name = TYPE_NPCM8XX_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(NPCM8xxMachine), + .class_size = sizeof(NPCM8xxMachineClass), + .class_init = npcm8xx_machine_class_init, + .abstract = true, + }, { + .name = MACHINE_TYPE_NAME("npcm845-evb"), + .parent = TYPE_NPCM8XX_MACHINE, + .class_init = npcm845_evb_machine_class_init, + }, +}; + +DEFINE_TYPES(npcm8xx_machine_types) diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c index ac53441630f..d8cc3214ed6 100644 --- a/hw/arm/nrf51_soc.c +++ b/hw/arm/nrf51_soc.c @@ -76,16 +76,16 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) } /* This clock doesn't need migration because it is fixed-frequency */ clock_set_hz(s->sysclk, HCLK_FRQ); - qdev_connect_clock_in(DEVICE(&s->cpu), "cpuclk", s->sysclk); + qdev_connect_clock_in(DEVICE(&s->armv7m), "cpuclk", s->sysclk); /* * This SoC has no systick device, so don't connect refclk. * TODO: model the lack of systick (currently the armv7m object * will always provide one). */ - object_property_set_link(OBJECT(&s->cpu), "memory", OBJECT(&s->container), + object_property_set_link(OBJECT(&s->armv7m), "memory", OBJECT(&s->container), &error_abort); - if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu), errp)) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { return; } @@ -104,7 +104,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->uart), 0); memory_region_add_subregion_overlap(&s->container, NRF51_UART_BASE, mr, 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), 0, - qdev_get_gpio_in(DEVICE(&s->cpu), + qdev_get_gpio_in(DEVICE(&s->armv7m), BASE_TO_IRQ(NRF51_UART_BASE))); /* RNG */ @@ -115,7 +115,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0); memory_region_add_subregion_overlap(&s->container, NRF51_RNG_BASE, mr, 0); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rng), 0, - qdev_get_gpio_in(DEVICE(&s->cpu), + qdev_get_gpio_in(DEVICE(&s->armv7m), BASE_TO_IRQ(NRF51_RNG_BASE))); /* UICR, FICR, NVMC, FLASH */ @@ -161,7 +161,7 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer[i]), 0, base_addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer[i]), 0, - qdev_get_gpio_in(DEVICE(&s->cpu), + qdev_get_gpio_in(DEVICE(&s->armv7m), BASE_TO_IRQ(base_addr))); } @@ -185,10 +185,10 @@ static void nrf51_soc_init(Object *obj) memory_region_init(&s->container, obj, "nrf51-container", UINT64_MAX); - object_initialize_child(OBJECT(s), "armv6m", &s->cpu, TYPE_ARMV7M); - qdev_prop_set_string(DEVICE(&s->cpu), "cpu-type", + object_initialize_child(OBJECT(s), "armv6m", &s->armv7m, TYPE_ARMV7M); + qdev_prop_set_string(DEVICE(&s->armv7m), "cpu-type", ARM_CPU_TYPE_NAME("cortex-m0")); - qdev_prop_set_uint32(DEVICE(&s->cpu), "num-irq", 32); + qdev_prop_set_uint32(DEVICE(&s->armv7m), "num-irq", 32); object_initialize_child(obj, "uart", &s->uart, TYPE_NRF51_UART); object_property_add_alias(obj, "serial0", OBJECT(&s->uart), "chardev"); @@ -208,16 +208,15 @@ static void nrf51_soc_init(Object *obj) s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); } -static Property nrf51_soc_properties[] = { +static const Property nrf51_soc_properties[] = { DEFINE_PROP_LINK("memory", NRF51State, board_memory, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("sram-size", NRF51State, sram_size, NRF51822_SRAM_SIZE), DEFINE_PROP_UINT32("flash-size", NRF51State, flash_size, NRF51822_FLASH_SIZE), - DEFINE_PROP_END_OF_LIST(), }; -static void nrf51_soc_class_init(ObjectClass *klass, void *data) +static void nrf51_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c deleted file mode 100644 index 35364312c73..00000000000 --- a/hw/arm/nseries.c +++ /dev/null @@ -1,1473 +0,0 @@ -/* - * Nokia N-series internet tablets. - * - * Copyright (C) 2007 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "chardev/char.h" -#include "qemu/cutils.h" -#include "qemu/bswap.h" -#include "qemu/hw-version.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "hw/arm/omap.h" -#include "hw/arm/boot.h" -#include "hw/irq.h" -#include "ui/console.h" -#include "hw/boards.h" -#include "hw/i2c/i2c.h" -#include "hw/display/blizzard.h" -#include "hw/input/lm832x.h" -#include "hw/input/tsc2xxx.h" -#include "hw/misc/cbus.h" -#include "hw/sensor/tmp105.h" -#include "hw/qdev-properties.h" -#include "hw/block/flash.h" -#include "hw/hw.h" -#include "hw/loader.h" -#include "hw/sysbus.h" -#include "qemu/log.h" -#include "qemu/error-report.h" - - -/* Nokia N8x0 support */ -struct n800_s { - struct omap_mpu_state_s *mpu; - - struct rfbi_chip_s blizzard; - struct { - void *opaque; - uint32_t (*txrx)(void *opaque, uint32_t value, int len); - uWireSlave *chip; - } ts; - - int keymap[0x80]; - DeviceState *kbd; - - DeviceState *usb; - void *retu; - void *tahvo; - DeviceState *nand; -}; - -/* GPIO pins */ -#define N8X0_TUSB_ENABLE_GPIO 0 -#define N800_MMC2_WP_GPIO 8 -#define N800_UNKNOWN_GPIO0 9 /* out */ -#define N810_MMC2_VIOSD_GPIO 9 -#define N810_HEADSET_AMP_GPIO 10 -#define N800_CAM_TURN_GPIO 12 -#define N810_GPS_RESET_GPIO 12 -#define N800_BLIZZARD_POWERDOWN_GPIO 15 -#define N800_MMC1_WP_GPIO 23 -#define N810_MMC2_VSD_GPIO 23 -#define N8X0_ONENAND_GPIO 26 -#define N810_BLIZZARD_RESET_GPIO 30 -#define N800_UNKNOWN_GPIO2 53 /* out */ -#define N8X0_TUSB_INT_GPIO 58 -#define N8X0_BT_WKUP_GPIO 61 -#define N8X0_STI_GPIO 62 -#define N8X0_CBUS_SEL_GPIO 64 -#define N8X0_CBUS_DAT_GPIO 65 -#define N8X0_CBUS_CLK_GPIO 66 -#define N8X0_WLAN_IRQ_GPIO 87 -#define N8X0_BT_RESET_GPIO 92 -#define N8X0_TEA5761_CS_GPIO 93 -#define N800_UNKNOWN_GPIO 94 -#define N810_TSC_RESET_GPIO 94 -#define N800_CAM_ACT_GPIO 95 -#define N810_GPS_WAKEUP_GPIO 95 -#define N8X0_MMC_CS_GPIO 96 -#define N8X0_WLAN_PWR_GPIO 97 -#define N8X0_BT_HOST_WKUP_GPIO 98 -#define N810_SPEAKER_AMP_GPIO 101 -#define N810_KB_LOCK_GPIO 102 -#define N800_TSC_TS_GPIO 103 -#define N810_TSC_TS_GPIO 106 -#define N8X0_HEADPHONE_GPIO 107 -#define N8X0_RETU_GPIO 108 -#define N800_TSC_KP_IRQ_GPIO 109 -#define N810_KEYBOARD_GPIO 109 -#define N800_BAT_COVER_GPIO 110 -#define N810_SLIDE_GPIO 110 -#define N8X0_TAHVO_GPIO 111 -#define N800_UNKNOWN_GPIO4 112 /* out */ -#define N810_SLEEPX_LED_GPIO 112 -#define N800_TSC_RESET_GPIO 118 /* ? */ -#define N810_AIC33_RESET_GPIO 118 -#define N800_TSC_UNKNOWN_GPIO 119 /* out */ -#define N8X0_TMP105_GPIO 125 - -/* Config */ -#define BT_UART 0 -#define XLDR_LL_UART 1 - -/* Addresses on the I2C bus 0 */ -#define N810_TLV320AIC33_ADDR 0x18 /* Audio CODEC */ -#define N8X0_TCM825x_ADDR 0x29 /* Camera */ -#define N810_LP5521_ADDR 0x32 /* LEDs */ -#define N810_TSL2563_ADDR 0x3d /* Light sensor */ -#define N810_LM8323_ADDR 0x45 /* Keyboard */ -/* Addresses on the I2C bus 1 */ -#define N8X0_TMP105_ADDR 0x48 /* Temperature sensor */ -#define N8X0_MENELAUS_ADDR 0x72 /* Power management */ - -/* Chipselects on GPMC NOR interface */ -#define N8X0_ONENAND_CS 0 -#define N8X0_USB_ASYNC_CS 1 -#define N8X0_USB_SYNC_CS 4 - -#define N8X0_BD_ADDR 0x00, 0x1a, 0x89, 0x9e, 0x3e, 0x81 - -static void n800_mmc_cs_cb(void *opaque, int line, int level) -{ - /* TODO: this seems to actually be connected to the menelaus, to - * which also both MMC slots connect. */ - omap_mmc_enable((struct omap_mmc_s *) opaque, !level); -} - -static void n8x0_gpio_setup(struct n800_s *s) -{ - qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO, - qemu_allocate_irq(n800_mmc_cs_cb, s->mpu->mmc, 0)); - qemu_irq_lower(qdev_get_gpio_in(s->mpu->gpio, N800_BAT_COVER_GPIO)); -} - -#define MAEMO_CAL_HEADER(...) \ - 'C', 'o', 'n', 'F', 0x02, 0x00, 0x04, 0x00, \ - __VA_ARGS__, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - -static const uint8_t n8x0_cal_wlan_mac[] = { - MAEMO_CAL_HEADER('w', 'l', 'a', 'n', '-', 'm', 'a', 'c') - 0x1c, 0x00, 0x00, 0x00, 0x47, 0xd6, 0x69, 0xb3, - 0x30, 0x08, 0xa0, 0x83, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, - 0x89, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, - 0x5d, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, -}; - -static const uint8_t n8x0_cal_bt_id[] = { - MAEMO_CAL_HEADER('b', 't', '-', 'i', 'd', 0, 0, 0) - 0x0a, 0x00, 0x00, 0x00, 0xa3, 0x4b, 0xf6, 0x96, - 0xa8, 0xeb, 0xb2, 0x41, 0x00, 0x00, 0x00, 0x00, - N8X0_BD_ADDR, -}; - -static void n8x0_nand_setup(struct n800_s *s) -{ - char *otp_region; - DriveInfo *dinfo; - - s->nand = qdev_new("onenand"); - qdev_prop_set_uint16(s->nand, "manufacturer_id", NAND_MFR_SAMSUNG); - /* Either 0x40 or 0x48 are OK for the device ID */ - qdev_prop_set_uint16(s->nand, "device_id", 0x48); - qdev_prop_set_uint16(s->nand, "version_id", 0); - qdev_prop_set_int32(s->nand, "shift", 1); - dinfo = drive_get(IF_MTD, 0, 0); - if (dinfo) { - qdev_prop_set_drive_err(s->nand, "drive", blk_by_legacy_dinfo(dinfo), - &error_fatal); - } - sysbus_realize_and_unref(SYS_BUS_DEVICE(s->nand), &error_fatal); - sysbus_connect_irq(SYS_BUS_DEVICE(s->nand), 0, - qdev_get_gpio_in(s->mpu->gpio, N8X0_ONENAND_GPIO)); - omap_gpmc_attach(s->mpu->gpmc, N8X0_ONENAND_CS, - sysbus_mmio_get_region(SYS_BUS_DEVICE(s->nand), 0)); - otp_region = onenand_raw_otp(s->nand); - - memcpy(otp_region + 0x000, n8x0_cal_wlan_mac, sizeof(n8x0_cal_wlan_mac)); - memcpy(otp_region + 0x800, n8x0_cal_bt_id, sizeof(n8x0_cal_bt_id)); - /* XXX: in theory should also update the OOB for both pages */ -} - -static qemu_irq n8x0_system_powerdown; - -static void n8x0_powerdown_req(Notifier *n, void *opaque) -{ - qemu_irq_raise(n8x0_system_powerdown); -} - -static Notifier n8x0_system_powerdown_notifier = { - .notify = n8x0_powerdown_req -}; - -static void n8x0_i2c_setup(struct n800_s *s) -{ - DeviceState *dev; - qemu_irq tmp_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TMP105_GPIO); - I2CBus *i2c = omap_i2c_bus(s->mpu->i2c[0]); - - /* Attach a menelaus PM chip */ - dev = DEVICE(i2c_slave_create_simple(i2c, "twl92230", N8X0_MENELAUS_ADDR)); - qdev_connect_gpio_out(dev, 3, - qdev_get_gpio_in(s->mpu->ih[0], - OMAP_INT_24XX_SYS_NIRQ)); - - n8x0_system_powerdown = qdev_get_gpio_in(dev, 3); - qemu_register_powerdown_notifier(&n8x0_system_powerdown_notifier); - - /* Attach a TMP105 PM chip (A0 wired to ground) */ - dev = DEVICE(i2c_slave_create_simple(i2c, TYPE_TMP105, N8X0_TMP105_ADDR)); - qdev_connect_gpio_out(dev, 0, tmp_irq); -} - -/* Touchscreen and keypad controller */ -static const MouseTransformInfo n800_pointercal = { - .x = 800, - .y = 480, - .a = { 14560, -68, -3455208, -39, -9621, 35152972, 65536 }, -}; - -static const MouseTransformInfo n810_pointercal = { - .x = 800, - .y = 480, - .a = { 15041, 148, -4731056, 171, -10238, 35933380, 65536 }, -}; - -#define RETU_KEYCODE 61 /* F3 */ - -static void n800_key_event(void *opaque, int keycode) -{ - struct n800_s *s = (struct n800_s *) opaque; - int code = s->keymap[keycode & 0x7f]; - - if (code == -1) { - if ((keycode & 0x7f) == RETU_KEYCODE) { - retu_key_event(s->retu, !(keycode & 0x80)); - } - return; - } - - tsc210x_key_event(s->ts.chip, code, !(keycode & 0x80)); -} - -static const int n800_keys[16] = { - -1, - 72, /* Up */ - 63, /* Home (F5) */ - -1, - 75, /* Left */ - 28, /* Enter */ - 77, /* Right */ - -1, - 1, /* Cycle (ESC) */ - 80, /* Down */ - 62, /* Menu (F4) */ - -1, - 66, /* Zoom- (F8) */ - 64, /* FullScreen (F6) */ - 65, /* Zoom+ (F7) */ - -1, -}; - -static void n800_tsc_kbd_setup(struct n800_s *s) -{ - int i; - - /* XXX: are the three pins inverted inside the chip between the - * tsc and the cpu (N4111)? */ - qemu_irq penirq = NULL; /* NC */ - qemu_irq kbirq = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_KP_IRQ_GPIO); - qemu_irq dav = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_TS_GPIO); - - s->ts.chip = tsc2301_init(penirq, kbirq, dav); - s->ts.opaque = s->ts.chip->opaque; - s->ts.txrx = tsc210x_txrx; - - for (i = 0; i < 0x80; i++) { - s->keymap[i] = -1; - } - for (i = 0; i < 0x10; i++) { - if (n800_keys[i] >= 0) { - s->keymap[n800_keys[i]] = i; - } - } - - qemu_add_kbd_event_handler(n800_key_event, s); - - tsc210x_set_transform(s->ts.chip, &n800_pointercal); -} - -static void n810_tsc_setup(struct n800_s *s) -{ - qemu_irq pintdav = qdev_get_gpio_in(s->mpu->gpio, N810_TSC_TS_GPIO); - - s->ts.opaque = tsc2005_init(pintdav); - s->ts.txrx = tsc2005_txrx; - - tsc2005_set_transform(s->ts.opaque, &n810_pointercal); -} - -/* N810 Keyboard controller */ -static void n810_key_event(void *opaque, int keycode) -{ - struct n800_s *s = (struct n800_s *) opaque; - int code = s->keymap[keycode & 0x7f]; - - if (code == -1) { - if ((keycode & 0x7f) == RETU_KEYCODE) { - retu_key_event(s->retu, !(keycode & 0x80)); - } - return; - } - - lm832x_key_event(s->kbd, code, !(keycode & 0x80)); -} - -#define M 0 - -static const int n810_keys[0x80] = { - [0x01] = 16, /* Q */ - [0x02] = 37, /* K */ - [0x03] = 24, /* O */ - [0x04] = 25, /* P */ - [0x05] = 14, /* Backspace */ - [0x06] = 30, /* A */ - [0x07] = 31, /* S */ - [0x08] = 32, /* D */ - [0x09] = 33, /* F */ - [0x0a] = 34, /* G */ - [0x0b] = 35, /* H */ - [0x0c] = 36, /* J */ - - [0x11] = 17, /* W */ - [0x12] = 62, /* Menu (F4) */ - [0x13] = 38, /* L */ - [0x14] = 40, /* ' (Apostrophe) */ - [0x16] = 44, /* Z */ - [0x17] = 45, /* X */ - [0x18] = 46, /* C */ - [0x19] = 47, /* V */ - [0x1a] = 48, /* B */ - [0x1b] = 49, /* N */ - [0x1c] = 42, /* Shift (Left shift) */ - [0x1f] = 65, /* Zoom+ (F7) */ - - [0x21] = 18, /* E */ - [0x22] = 39, /* ; (Semicolon) */ - [0x23] = 12, /* - (Minus) */ - [0x24] = 13, /* = (Equal) */ - [0x2b] = 56, /* Fn (Left Alt) */ - [0x2c] = 50, /* M */ - [0x2f] = 66, /* Zoom- (F8) */ - - [0x31] = 19, /* R */ - [0x32] = 29 | M, /* Right Ctrl */ - [0x34] = 57, /* Space */ - [0x35] = 51, /* , (Comma) */ - [0x37] = 72 | M, /* Up */ - [0x3c] = 82 | M, /* Compose (Insert) */ - [0x3f] = 64, /* FullScreen (F6) */ - - [0x41] = 20, /* T */ - [0x44] = 52, /* . (Dot) */ - [0x46] = 77 | M, /* Right */ - [0x4f] = 63, /* Home (F5) */ - [0x51] = 21, /* Y */ - [0x53] = 80 | M, /* Down */ - [0x55] = 28, /* Enter */ - [0x5f] = 1, /* Cycle (ESC) */ - - [0x61] = 22, /* U */ - [0x64] = 75 | M, /* Left */ - - [0x71] = 23, /* I */ -#if 0 - [0x75] = 28 | M, /* KP Enter (KP Enter) */ -#else - [0x75] = 15, /* KP Enter (Tab) */ -#endif -}; - -#undef M - -static void n810_kbd_setup(struct n800_s *s) -{ - qemu_irq kbd_irq = qdev_get_gpio_in(s->mpu->gpio, N810_KEYBOARD_GPIO); - int i; - - for (i = 0; i < 0x80; i++) { - s->keymap[i] = -1; - } - for (i = 0; i < 0x80; i++) { - if (n810_keys[i] > 0) { - s->keymap[n810_keys[i]] = i; - } - } - - qemu_add_kbd_event_handler(n810_key_event, s); - - /* Attach the LM8322 keyboard to the I2C bus, - * should happen in n8x0_i2c_setup and s->kbd be initialised here. */ - s->kbd = DEVICE(i2c_slave_create_simple(omap_i2c_bus(s->mpu->i2c[0]), - TYPE_LM8323, N810_LM8323_ADDR)); - qdev_connect_gpio_out(s->kbd, 0, kbd_irq); -} - -/* LCD MIPI DBI-C controller (URAL) */ -struct mipid_s { - int resp[4]; - int param[4]; - int p; - int pm; - int cmd; - - int sleep; - int booster; - int te; - int selfcheck; - int partial; - int normal; - int vscr; - int invert; - int onoff; - int gamma; - uint32_t id; -}; - -static void mipid_reset(struct mipid_s *s) -{ - s->pm = 0; - s->cmd = 0; - - s->sleep = 1; - s->booster = 0; - s->selfcheck = - (1 << 7) | /* Register loading OK. */ - (1 << 5) | /* The chip is attached. */ - (1 << 4); /* Display glass still in one piece. */ - s->te = 0; - s->partial = 0; - s->normal = 1; - s->vscr = 0; - s->invert = 0; - s->onoff = 1; - s->gamma = 0; -} - -static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) -{ - struct mipid_s *s = (struct mipid_s *) opaque; - uint8_t ret; - - if (len > 9) { - hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len); - } - - if (s->p >= ARRAY_SIZE(s->resp)) { - ret = 0; - } else { - ret = s->resp[s->p++]; - } - if (s->pm-- > 0) { - s->param[s->pm] = cmd; - } else { - s->cmd = cmd; - } - - switch (s->cmd) { - case 0x00: /* NOP */ - break; - - case 0x01: /* SWRESET */ - mipid_reset(s); - break; - - case 0x02: /* BSTROFF */ - s->booster = 0; - break; - case 0x03: /* BSTRON */ - s->booster = 1; - break; - - case 0x04: /* RDDID */ - s->p = 0; - s->resp[0] = (s->id >> 16) & 0xff; - s->resp[1] = (s->id >> 8) & 0xff; - s->resp[2] = (s->id >> 0) & 0xff; - break; - - case 0x06: /* RD_RED */ - case 0x07: /* RD_GREEN */ - /* XXX the bootloader sometimes issues RD_BLUE meaning RDDID so - * for the bootloader one needs to change this. */ - case 0x08: /* RD_BLUE */ - s->p = 0; - /* TODO: return first pixel components */ - s->resp[0] = 0x01; - break; - - case 0x09: /* RDDST */ - s->p = 0; - s->resp[0] = s->booster << 7; - s->resp[1] = (5 << 4) | (s->partial << 2) | - (s->sleep << 1) | s->normal; - s->resp[2] = (s->vscr << 7) | (s->invert << 5) | - (s->onoff << 2) | (s->te << 1) | (s->gamma >> 2); - s->resp[3] = s->gamma << 6; - break; - - case 0x0a: /* RDDPM */ - s->p = 0; - s->resp[0] = (s->onoff << 2) | (s->normal << 3) | (s->sleep << 4) | - (s->partial << 5) | (s->sleep << 6) | (s->booster << 7); - break; - case 0x0b: /* RDDMADCTR */ - s->p = 0; - s->resp[0] = 0; - break; - case 0x0c: /* RDDCOLMOD */ - s->p = 0; - s->resp[0] = 5; /* 65K colours */ - break; - case 0x0d: /* RDDIM */ - s->p = 0; - s->resp[0] = (s->invert << 5) | (s->vscr << 7) | s->gamma; - break; - case 0x0e: /* RDDSM */ - s->p = 0; - s->resp[0] = s->te << 7; - break; - case 0x0f: /* RDDSDR */ - s->p = 0; - s->resp[0] = s->selfcheck; - break; - - case 0x10: /* SLPIN */ - s->sleep = 1; - break; - case 0x11: /* SLPOUT */ - s->sleep = 0; - s->selfcheck ^= 1 << 6; /* POFF self-diagnosis Ok */ - break; - - case 0x12: /* PTLON */ - s->partial = 1; - s->normal = 0; - s->vscr = 0; - break; - case 0x13: /* NORON */ - s->partial = 0; - s->normal = 1; - s->vscr = 0; - break; - - case 0x20: /* INVOFF */ - s->invert = 0; - break; - case 0x21: /* INVON */ - s->invert = 1; - break; - - case 0x22: /* APOFF */ - case 0x23: /* APON */ - goto bad_cmd; - - case 0x25: /* WRCNTR */ - if (s->pm < 0) { - s->pm = 1; - } - goto bad_cmd; - - case 0x26: /* GAMSET */ - if (!s->pm) { - s->gamma = ctz32(s->param[0] & 0xf); - if (s->gamma == 32) { - s->gamma = -1; /* XXX: should this be 0? */ - } - } else if (s->pm < 0) { - s->pm = 1; - } - break; - - case 0x28: /* DISPOFF */ - s->onoff = 0; - break; - case 0x29: /* DISPON */ - s->onoff = 1; - break; - - case 0x2a: /* CASET */ - case 0x2b: /* RASET */ - case 0x2c: /* RAMWR */ - case 0x2d: /* RGBSET */ - case 0x2e: /* RAMRD */ - case 0x30: /* PTLAR */ - case 0x33: /* SCRLAR */ - goto bad_cmd; - - case 0x34: /* TEOFF */ - s->te = 0; - break; - case 0x35: /* TEON */ - if (!s->pm) { - s->te = 1; - } else if (s->pm < 0) { - s->pm = 1; - } - break; - - case 0x36: /* MADCTR */ - goto bad_cmd; - - case 0x37: /* VSCSAD */ - s->partial = 0; - s->normal = 0; - s->vscr = 1; - break; - - case 0x38: /* IDMOFF */ - case 0x39: /* IDMON */ - case 0x3a: /* COLMOD */ - goto bad_cmd; - - case 0xb0: /* CLKINT / DISCTL */ - case 0xb1: /* CLKEXT */ - if (s->pm < 0) { - s->pm = 2; - } - break; - - case 0xb4: /* FRMSEL */ - break; - - case 0xb5: /* FRM8SEL */ - case 0xb6: /* TMPRNG / INIESC */ - case 0xb7: /* TMPHIS / NOP2 */ - case 0xb8: /* TMPREAD / MADCTL */ - case 0xba: /* DISTCTR */ - case 0xbb: /* EPVOL */ - goto bad_cmd; - - case 0xbd: /* Unknown */ - s->p = 0; - s->resp[0] = 0; - s->resp[1] = 1; - break; - - case 0xc2: /* IFMOD */ - if (s->pm < 0) { - s->pm = 2; - } - break; - - case 0xc6: /* PWRCTL */ - case 0xc7: /* PPWRCTL */ - case 0xd0: /* EPWROUT */ - case 0xd1: /* EPWRIN */ - case 0xd4: /* RDEV */ - case 0xd5: /* RDRR */ - goto bad_cmd; - - case 0xda: /* RDID1 */ - s->p = 0; - s->resp[0] = (s->id >> 16) & 0xff; - break; - case 0xdb: /* RDID2 */ - s->p = 0; - s->resp[0] = (s->id >> 8) & 0xff; - break; - case 0xdc: /* RDID3 */ - s->p = 0; - s->resp[0] = (s->id >> 0) & 0xff; - break; - - default: - bad_cmd: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: unknown command 0x%02x\n", __func__, s->cmd); - break; - } - - return ret; -} - -static void *mipid_init(void) -{ - struct mipid_s *s = g_malloc0(sizeof(*s)); - - s->id = 0x838f03; - mipid_reset(s); - - return s; -} - -static void n8x0_spi_setup(struct n800_s *s) -{ - void *tsc = s->ts.opaque; - void *mipid = mipid_init(); - - omap_mcspi_attach(s->mpu->mcspi[0], s->ts.txrx, tsc, 0); - omap_mcspi_attach(s->mpu->mcspi[0], mipid_txrx, mipid, 1); -} - -/* This task is normally performed by the bootloader. If we're loading - * a kernel directly, we need to enable the Blizzard ourselves. */ -static void n800_dss_init(struct rfbi_chip_s *chip) -{ - uint8_t *fb_blank; - - chip->write(chip->opaque, 0, 0x2a); /* LCD Width register */ - chip->write(chip->opaque, 1, 0x64); - chip->write(chip->opaque, 0, 0x2c); /* LCD HNDP register */ - chip->write(chip->opaque, 1, 0x1e); - chip->write(chip->opaque, 0, 0x2e); /* LCD Height 0 register */ - chip->write(chip->opaque, 1, 0xe0); - chip->write(chip->opaque, 0, 0x30); /* LCD Height 1 register */ - chip->write(chip->opaque, 1, 0x01); - chip->write(chip->opaque, 0, 0x32); /* LCD VNDP register */ - chip->write(chip->opaque, 1, 0x06); - chip->write(chip->opaque, 0, 0x68); /* Display Mode register */ - chip->write(chip->opaque, 1, 1); /* Enable bit */ - - chip->write(chip->opaque, 0, 0x6c); - chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */ - chip->write(chip->opaque, 1, 0x1f); /* Input X End Position */ - chip->write(chip->opaque, 1, 0x03); /* Input X End Position */ - chip->write(chip->opaque, 1, 0xdf); /* Input Y End Position */ - chip->write(chip->opaque, 1, 0x01); /* Input Y End Position */ - chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */ - chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */ - chip->write(chip->opaque, 1, 0x1f); /* Output X End Position */ - chip->write(chip->opaque, 1, 0x03); /* Output X End Position */ - chip->write(chip->opaque, 1, 0xdf); /* Output Y End Position */ - chip->write(chip->opaque, 1, 0x01); /* Output Y End Position */ - chip->write(chip->opaque, 1, 0x01); /* Input Data Format */ - chip->write(chip->opaque, 1, 0x01); /* Data Source Select */ - - fb_blank = memset(g_malloc(800 * 480 * 2), 0xff, 800 * 480 * 2); - /* Display Memory Data Port */ - chip->block(chip->opaque, 1, fb_blank, 800 * 480 * 2, 800); - g_free(fb_blank); -} - -static void n8x0_dss_setup(struct n800_s *s) -{ - s->blizzard.opaque = s1d13745_init(NULL); - s->blizzard.block = s1d13745_write_block; - s->blizzard.write = s1d13745_write; - s->blizzard.read = s1d13745_read; - - omap_rfbi_attach(s->mpu->dss, 0, &s->blizzard); -} - -static void n8x0_cbus_setup(struct n800_s *s) -{ - qemu_irq dat_out = qdev_get_gpio_in(s->mpu->gpio, N8X0_CBUS_DAT_GPIO); - qemu_irq retu_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_RETU_GPIO); - qemu_irq tahvo_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TAHVO_GPIO); - - CBus *cbus = cbus_init(dat_out); - - qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_CLK_GPIO, cbus->clk); - qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_DAT_GPIO, cbus->dat); - qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_SEL_GPIO, cbus->sel); - - cbus_attach(cbus, s->retu = retu_init(retu_irq, 1)); - cbus_attach(cbus, s->tahvo = tahvo_init(tahvo_irq, 1)); -} - -static void n8x0_usb_setup(struct n800_s *s) -{ - SysBusDevice *dev; - s->usb = qdev_new("tusb6010"); - dev = SYS_BUS_DEVICE(s->usb); - sysbus_realize_and_unref(dev, &error_fatal); - sysbus_connect_irq(dev, 0, - qdev_get_gpio_in(s->mpu->gpio, N8X0_TUSB_INT_GPIO)); - /* Using the NOR interface */ - omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_ASYNC_CS, - sysbus_mmio_get_region(dev, 0)); - omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_SYNC_CS, - sysbus_mmio_get_region(dev, 1)); - qdev_connect_gpio_out(s->mpu->gpio, N8X0_TUSB_ENABLE_GPIO, - qdev_get_gpio_in(s->usb, 0)); /* tusb_pwr */ -} - -/* Setup done before the main bootloader starts by some early setup code - * - used when we want to run the main bootloader in emulation. This - * isn't documented. */ -static const uint32_t n800_pinout[104] = { - 0x080f00d8, 0x00d40808, 0x03080808, 0x080800d0, - 0x00dc0808, 0x0b0f0f00, 0x080800b4, 0x00c00808, - 0x08080808, 0x180800c4, 0x00b80000, 0x08080808, - 0x080800bc, 0x00cc0808, 0x08081818, 0x18180128, - 0x01241800, 0x18181818, 0x000000f0, 0x01300000, - 0x00001b0b, 0x1b0f0138, 0x00e0181b, 0x1b031b0b, - 0x180f0078, 0x00740018, 0x0f0f0f1a, 0x00000080, - 0x007c0000, 0x00000000, 0x00000088, 0x00840000, - 0x00000000, 0x00000094, 0x00980300, 0x0f180003, - 0x0000008c, 0x00900f0f, 0x0f0f1b00, 0x0f00009c, - 0x01140000, 0x1b1b0f18, 0x0818013c, 0x01400008, - 0x00001818, 0x000b0110, 0x010c1800, 0x0b030b0f, - 0x181800f4, 0x00f81818, 0x00000018, 0x000000fc, - 0x00401808, 0x00000000, 0x0f1b0030, 0x003c0008, - 0x00000000, 0x00000038, 0x00340000, 0x00000000, - 0x1a080070, 0x00641a1a, 0x08080808, 0x08080060, - 0x005c0808, 0x08080808, 0x08080058, 0x00540808, - 0x08080808, 0x0808006c, 0x00680808, 0x08080808, - 0x000000a8, 0x00b00000, 0x08080808, 0x000000a0, - 0x00a40000, 0x00000000, 0x08ff0050, 0x004c0808, - 0xffffffff, 0xffff0048, 0x0044ffff, 0xffffffff, - 0x000000ac, 0x01040800, 0x08080b0f, 0x18180100, - 0x01081818, 0x0b0b1808, 0x1a0300e4, 0x012c0b1a, - 0x02020018, 0x0b000134, 0x011c0800, 0x0b1b1b00, - 0x0f0000c8, 0x00ec181b, 0x000f0f02, 0x00180118, - 0x01200000, 0x0f0b1b1b, 0x0f0200e8, 0x0000020b, -}; - -static void n800_setup_nolo_tags(void *sram_base) -{ - int i; - uint32_t *p = sram_base + 0x8000; - uint32_t *v = sram_base + 0xa000; - - memset(p, 0, 0x3000); - - strcpy((void *) (p + 0), "QEMU N800"); - - strcpy((void *) (p + 8), "F5"); - - stl_p(p + 10, 0x04f70000); - strcpy((void *) (p + 9), "RX-34"); - - /* RAM size in MB? */ - stl_p(p + 12, 0x80); - - /* Pointer to the list of tags */ - stl_p(p + 13, OMAP2_SRAM_BASE + 0x9000); - - /* The NOLO tags start here */ - p = sram_base + 0x9000; -#define ADD_TAG(tag, len) \ - stw_p((uint16_t *) p + 0, tag); \ - stw_p((uint16_t *) p + 1, len); p++; \ - stl_p(p++, OMAP2_SRAM_BASE | (((void *) v - sram_base) & 0xffff)); - - /* OMAP STI console? Pin out settings? */ - ADD_TAG(0x6e01, 414); - for (i = 0; i < ARRAY_SIZE(n800_pinout); i++) { - stl_p(v++, n800_pinout[i]); - } - - /* Kernel memsize? */ - ADD_TAG(0x6e05, 1); - stl_p(v++, 2); - - /* NOLO serial console */ - ADD_TAG(0x6e02, 4); - stl_p(v++, XLDR_LL_UART); /* UART number (1 - 3) */ - -#if 0 - /* CBUS settings (Retu/AVilma) */ - ADD_TAG(0x6e03, 6); - stw_p((uint16_t *) v + 0, 65); /* CBUS GPIO0 */ - stw_p((uint16_t *) v + 1, 66); /* CBUS GPIO1 */ - stw_p((uint16_t *) v + 2, 64); /* CBUS GPIO2 */ - v += 2; -#endif - - /* Nokia ASIC BB5 (Retu/Tahvo) */ - ADD_TAG(0x6e0a, 4); - stw_p((uint16_t *) v + 0, 111); /* "Retu" interrupt GPIO */ - stw_p((uint16_t *) v + 1, 108); /* "Tahvo" interrupt GPIO */ - v++; - - /* LCD console? */ - ADD_TAG(0x6e04, 4); - stw_p((uint16_t *) v + 0, 30); /* ??? */ - stw_p((uint16_t *) v + 1, 24); /* ??? */ - v++; - -#if 0 - /* LCD settings */ - ADD_TAG(0x6e06, 2); - stw_p((uint16_t *) (v++), 15); /* ??? */ -#endif - - /* I^2C (Menelaus) */ - ADD_TAG(0x6e07, 4); - stl_p(v++, 0x00720000); /* ??? */ - - /* Unknown */ - ADD_TAG(0x6e0b, 6); - stw_p((uint16_t *) v + 0, 94); /* ??? */ - stw_p((uint16_t *) v + 1, 23); /* ??? */ - stw_p((uint16_t *) v + 2, 0); /* ??? */ - v += 2; - - /* OMAP gpio switch info */ - ADD_TAG(0x6e0c, 80); - strcpy((void *) v, "bat_cover"); v += 3; - stw_p((uint16_t *) v + 0, 110); /* GPIO num ??? */ - stw_p((uint16_t *) v + 1, 1); /* GPIO num ??? */ - v += 2; - strcpy((void *) v, "cam_act"); v += 3; - stw_p((uint16_t *) v + 0, 95); /* GPIO num ??? */ - stw_p((uint16_t *) v + 1, 32); /* GPIO num ??? */ - v += 2; - strcpy((void *) v, "cam_turn"); v += 3; - stw_p((uint16_t *) v + 0, 12); /* GPIO num ??? */ - stw_p((uint16_t *) v + 1, 33); /* GPIO num ??? */ - v += 2; - strcpy((void *) v, "headphone"); v += 3; - stw_p((uint16_t *) v + 0, 107); /* GPIO num ??? */ - stw_p((uint16_t *) v + 1, 17); /* GPIO num ??? */ - v += 2; - - /* Bluetooth */ - ADD_TAG(0x6e0e, 12); - stl_p(v++, 0x5c623d01); /* ??? */ - stl_p(v++, 0x00000201); /* ??? */ - stl_p(v++, 0x00000000); /* ??? */ - - /* CX3110x WLAN settings */ - ADD_TAG(0x6e0f, 8); - stl_p(v++, 0x00610025); /* ??? */ - stl_p(v++, 0xffff0057); /* ??? */ - - /* MMC host settings */ - ADD_TAG(0x6e10, 12); - stl_p(v++, 0xffff000f); /* ??? */ - stl_p(v++, 0xffffffff); /* ??? */ - stl_p(v++, 0x00000060); /* ??? */ - - /* OneNAND chip select */ - ADD_TAG(0x6e11, 10); - stl_p(v++, 0x00000401); /* ??? */ - stl_p(v++, 0x0002003a); /* ??? */ - stl_p(v++, 0x00000002); /* ??? */ - - /* TEA5761 sensor settings */ - ADD_TAG(0x6e12, 2); - stl_p(v++, 93); /* GPIO num ??? */ - -#if 0 - /* Unknown tag */ - ADD_TAG(6e09, 0); - - /* Kernel UART / console */ - ADD_TAG(6e12, 0); -#endif - - /* End of the list */ - stl_p(p++, 0x00000000); - stl_p(p++, 0x00000000); -} - -/* This task is normally performed by the bootloader. If we're loading - * a kernel directly, we need to set up GPMC mappings ourselves. */ -static void n800_gpmc_init(struct n800_s *s) -{ - uint32_t config7 = - (0xf << 8) | /* MASKADDRESS */ - (1 << 6) | /* CSVALID */ - (4 << 0); /* BASEADDRESS */ - - cpu_physical_memory_write(0x6800a078, /* GPMC_CONFIG7_0 */ - &config7, sizeof(config7)); -} - -/* Setup sequence done by the bootloader */ -static void n8x0_boot_init(void *opaque) -{ - struct n800_s *s = (struct n800_s *) opaque; - uint32_t buf; - - /* PRCM setup */ -#define omap_writel(addr, val) \ - buf = (val); \ - cpu_physical_memory_write(addr, &buf, sizeof(buf)) - - omap_writel(0x48008060, 0x41); /* PRCM_CLKSRC_CTRL */ - omap_writel(0x48008070, 1); /* PRCM_CLKOUT_CTRL */ - omap_writel(0x48008078, 0); /* PRCM_CLKEMUL_CTRL */ - omap_writel(0x48008090, 0); /* PRCM_VOLTSETUP */ - omap_writel(0x48008094, 0); /* PRCM_CLKSSETUP */ - omap_writel(0x48008098, 0); /* PRCM_POLCTRL */ - omap_writel(0x48008140, 2); /* CM_CLKSEL_MPU */ - omap_writel(0x48008148, 0); /* CM_CLKSTCTRL_MPU */ - omap_writel(0x48008158, 1); /* RM_RSTST_MPU */ - omap_writel(0x480081c8, 0x15); /* PM_WKDEP_MPU */ - omap_writel(0x480081d4, 0x1d4); /* PM_EVGENCTRL_MPU */ - omap_writel(0x480081d8, 0); /* PM_EVEGENONTIM_MPU */ - omap_writel(0x480081dc, 0); /* PM_EVEGENOFFTIM_MPU */ - omap_writel(0x480081e0, 0xc); /* PM_PWSTCTRL_MPU */ - omap_writel(0x48008200, 0x047e7ff7); /* CM_FCLKEN1_CORE */ - omap_writel(0x48008204, 0x00000004); /* CM_FCLKEN2_CORE */ - omap_writel(0x48008210, 0x047e7ff1); /* CM_ICLKEN1_CORE */ - omap_writel(0x48008214, 0x00000004); /* CM_ICLKEN2_CORE */ - omap_writel(0x4800821c, 0x00000000); /* CM_ICLKEN4_CORE */ - omap_writel(0x48008230, 0); /* CM_AUTOIDLE1_CORE */ - omap_writel(0x48008234, 0); /* CM_AUTOIDLE2_CORE */ - omap_writel(0x48008238, 7); /* CM_AUTOIDLE3_CORE */ - omap_writel(0x4800823c, 0); /* CM_AUTOIDLE4_CORE */ - omap_writel(0x48008240, 0x04360626); /* CM_CLKSEL1_CORE */ - omap_writel(0x48008244, 0x00000014); /* CM_CLKSEL2_CORE */ - omap_writel(0x48008248, 0); /* CM_CLKSTCTRL_CORE */ - omap_writel(0x48008300, 0x00000000); /* CM_FCLKEN_GFX */ - omap_writel(0x48008310, 0x00000000); /* CM_ICLKEN_GFX */ - omap_writel(0x48008340, 0x00000001); /* CM_CLKSEL_GFX */ - omap_writel(0x48008400, 0x00000004); /* CM_FCLKEN_WKUP */ - omap_writel(0x48008410, 0x00000004); /* CM_ICLKEN_WKUP */ - omap_writel(0x48008440, 0x00000000); /* CM_CLKSEL_WKUP */ - omap_writel(0x48008500, 0x000000cf); /* CM_CLKEN_PLL */ - omap_writel(0x48008530, 0x0000000c); /* CM_AUTOIDLE_PLL */ - omap_writel(0x48008540, /* CM_CLKSEL1_PLL */ - (0x78 << 12) | (6 << 8)); - omap_writel(0x48008544, 2); /* CM_CLKSEL2_PLL */ - - /* GPMC setup */ - n800_gpmc_init(s); - - /* Video setup */ - n800_dss_init(&s->blizzard); - - /* CPU setup */ - s->mpu->cpu->env.GE = 0x5; - - /* If the machine has a slided keyboard, open it */ - if (s->kbd) { - qemu_irq_raise(qdev_get_gpio_in(s->mpu->gpio, N810_SLIDE_GPIO)); - } -} - -#define OMAP_TAG_NOKIA_BT 0x4e01 -#define OMAP_TAG_WLAN_CX3110X 0x4e02 -#define OMAP_TAG_CBUS 0x4e03 -#define OMAP_TAG_EM_ASIC_BB5 0x4e04 - -static const struct omap_gpiosw_info_s { - const char *name; - int line; - int type; -} n800_gpiosw_info[] = { - { - "bat_cover", N800_BAT_COVER_GPIO, - OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, - }, { - "cam_act", N800_CAM_ACT_GPIO, - OMAP_GPIOSW_TYPE_ACTIVITY, - }, { - "cam_turn", N800_CAM_TURN_GPIO, - OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED, - }, { - "headphone", N8X0_HEADPHONE_GPIO, - OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED, - }, - { /* end of list */ } -}, n810_gpiosw_info[] = { - { - "gps_reset", N810_GPS_RESET_GPIO, - OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT, - }, { - "gps_wakeup", N810_GPS_WAKEUP_GPIO, - OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT, - }, { - "headphone", N8X0_HEADPHONE_GPIO, - OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED, - }, { - "kb_lock", N810_KB_LOCK_GPIO, - OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, - }, { - "sleepx_led", N810_SLEEPX_LED_GPIO, - OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED | OMAP_GPIOSW_OUTPUT, - }, { - "slide", N810_SLIDE_GPIO, - OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, - }, - { /* end of list */ } -}; - -static const struct omap_partition_info_s { - uint32_t offset; - uint32_t size; - int mask; - const char *name; -} n800_part_info[] = { - { 0x00000000, 0x00020000, 0x3, "bootloader" }, - { 0x00020000, 0x00060000, 0x0, "config" }, - { 0x00080000, 0x00200000, 0x0, "kernel" }, - { 0x00280000, 0x00200000, 0x3, "initfs" }, - { 0x00480000, 0x0fb80000, 0x3, "rootfs" }, - { /* end of list */ } -}, n810_part_info[] = { - { 0x00000000, 0x00020000, 0x3, "bootloader" }, - { 0x00020000, 0x00060000, 0x0, "config" }, - { 0x00080000, 0x00220000, 0x0, "kernel" }, - { 0x002a0000, 0x00400000, 0x0, "initfs" }, - { 0x006a0000, 0x0f960000, 0x0, "rootfs" }, - { /* end of list */ } -}; - -static const uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR }; - -static int n8x0_atag_setup(void *p, int model) -{ - uint8_t *b; - uint16_t *w; - uint32_t *l; - const struct omap_gpiosw_info_s *gpiosw; - const struct omap_partition_info_s *partition; - const char *tag; - - w = p; - - stw_p(w++, OMAP_TAG_UART); /* u16 tag */ - stw_p(w++, 4); /* u16 len */ - stw_p(w++, (1 << 2) | (1 << 1) | (1 << 0)); /* uint enabled_uarts */ - w++; - -#if 0 - stw_p(w++, OMAP_TAG_SERIAL_CONSOLE); /* u16 tag */ - stw_p(w++, 4); /* u16 len */ - stw_p(w++, XLDR_LL_UART + 1); /* u8 console_uart */ - stw_p(w++, 115200); /* u32 console_speed */ -#endif - - stw_p(w++, OMAP_TAG_LCD); /* u16 tag */ - stw_p(w++, 36); /* u16 len */ - strcpy((void *) w, "QEMU LCD panel"); /* char panel_name[16] */ - w += 8; - strcpy((void *) w, "blizzard"); /* char ctrl_name[16] */ - w += 8; - stw_p(w++, N810_BLIZZARD_RESET_GPIO); /* TODO: n800 s16 nreset_gpio */ - stw_p(w++, 24); /* u8 data_lines */ - - stw_p(w++, OMAP_TAG_CBUS); /* u16 tag */ - stw_p(w++, 8); /* u16 len */ - stw_p(w++, N8X0_CBUS_CLK_GPIO); /* s16 clk_gpio */ - stw_p(w++, N8X0_CBUS_DAT_GPIO); /* s16 dat_gpio */ - stw_p(w++, N8X0_CBUS_SEL_GPIO); /* s16 sel_gpio */ - w++; - - stw_p(w++, OMAP_TAG_EM_ASIC_BB5); /* u16 tag */ - stw_p(w++, 4); /* u16 len */ - stw_p(w++, N8X0_RETU_GPIO); /* s16 retu_irq_gpio */ - stw_p(w++, N8X0_TAHVO_GPIO); /* s16 tahvo_irq_gpio */ - - gpiosw = (model == 810) ? n810_gpiosw_info : n800_gpiosw_info; - for (; gpiosw->name; gpiosw++) { - stw_p(w++, OMAP_TAG_GPIO_SWITCH); /* u16 tag */ - stw_p(w++, 20); /* u16 len */ - strcpy((void *) w, gpiosw->name); /* char name[12] */ - w += 6; - stw_p(w++, gpiosw->line); /* u16 gpio */ - stw_p(w++, gpiosw->type); - stw_p(w++, 0); - stw_p(w++, 0); - } - - stw_p(w++, OMAP_TAG_NOKIA_BT); /* u16 tag */ - stw_p(w++, 12); /* u16 len */ - b = (void *) w; - stb_p(b++, 0x01); /* u8 chip_type (CSR) */ - stb_p(b++, N8X0_BT_WKUP_GPIO); /* u8 bt_wakeup_gpio */ - stb_p(b++, N8X0_BT_HOST_WKUP_GPIO); /* u8 host_wakeup_gpio */ - stb_p(b++, N8X0_BT_RESET_GPIO); /* u8 reset_gpio */ - stb_p(b++, BT_UART + 1); /* u8 bt_uart */ - memcpy(b, &n8x0_bd_addr, 6); /* u8 bd_addr[6] */ - b += 6; - stb_p(b++, 0x02); /* u8 bt_sysclk (38.4) */ - w = (void *) b; - - stw_p(w++, OMAP_TAG_WLAN_CX3110X); /* u16 tag */ - stw_p(w++, 8); /* u16 len */ - stw_p(w++, 0x25); /* u8 chip_type */ - stw_p(w++, N8X0_WLAN_PWR_GPIO); /* s16 power_gpio */ - stw_p(w++, N8X0_WLAN_IRQ_GPIO); /* s16 irq_gpio */ - stw_p(w++, -1); /* s16 spi_cs_gpio */ - - stw_p(w++, OMAP_TAG_MMC); /* u16 tag */ - stw_p(w++, 16); /* u16 len */ - if (model == 810) { - stw_p(w++, 0x23f); /* unsigned flags */ - stw_p(w++, -1); /* s16 power_pin */ - stw_p(w++, -1); /* s16 switch_pin */ - stw_p(w++, -1); /* s16 wp_pin */ - stw_p(w++, 0x240); /* unsigned flags */ - stw_p(w++, 0xc000); /* s16 power_pin */ - stw_p(w++, 0x0248); /* s16 switch_pin */ - stw_p(w++, 0xc000); /* s16 wp_pin */ - } else { - stw_p(w++, 0xf); /* unsigned flags */ - stw_p(w++, -1); /* s16 power_pin */ - stw_p(w++, -1); /* s16 switch_pin */ - stw_p(w++, -1); /* s16 wp_pin */ - stw_p(w++, 0); /* unsigned flags */ - stw_p(w++, 0); /* s16 power_pin */ - stw_p(w++, 0); /* s16 switch_pin */ - stw_p(w++, 0); /* s16 wp_pin */ - } - - stw_p(w++, OMAP_TAG_TEA5761); /* u16 tag */ - stw_p(w++, 4); /* u16 len */ - stw_p(w++, N8X0_TEA5761_CS_GPIO); /* u16 enable_gpio */ - w++; - - partition = (model == 810) ? n810_part_info : n800_part_info; - for (; partition->name; partition++) { - stw_p(w++, OMAP_TAG_PARTITION); /* u16 tag */ - stw_p(w++, 28); /* u16 len */ - strcpy((void *) w, partition->name); /* char name[16] */ - l = (void *) (w + 8); - stl_p(l++, partition->size); /* unsigned int size */ - stl_p(l++, partition->offset); /* unsigned int offset */ - stl_p(l++, partition->mask); /* unsigned int mask_flags */ - w = (void *) l; - } - - stw_p(w++, OMAP_TAG_BOOT_REASON); /* u16 tag */ - stw_p(w++, 12); /* u16 len */ -#if 0 - strcpy((void *) w, "por"); /* char reason_str[12] */ - strcpy((void *) w, "charger"); /* char reason_str[12] */ - strcpy((void *) w, "32wd_to"); /* char reason_str[12] */ - strcpy((void *) w, "sw_rst"); /* char reason_str[12] */ - strcpy((void *) w, "mbus"); /* char reason_str[12] */ - strcpy((void *) w, "unknown"); /* char reason_str[12] */ - strcpy((void *) w, "swdg_to"); /* char reason_str[12] */ - strcpy((void *) w, "sec_vio"); /* char reason_str[12] */ - strcpy((void *) w, "pwr_key"); /* char reason_str[12] */ - strcpy((void *) w, "rtc_alarm"); /* char reason_str[12] */ -#else - strcpy((void *) w, "pwr_key"); /* char reason_str[12] */ -#endif - w += 6; - - tag = (model == 810) ? "RX-44" : "RX-34"; - stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ - stw_p(w++, 24); /* u16 len */ - strcpy((void *) w, "product"); /* char component[12] */ - w += 6; - strcpy((void *) w, tag); /* char version[12] */ - w += 6; - - stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ - stw_p(w++, 24); /* u16 len */ - strcpy((void *) w, "hw-build"); /* char component[12] */ - w += 6; - strcpy((void *) w, "QEMU "); - pstrcat((void *) w, 12, qemu_hw_version()); /* char version[12] */ - w += 6; - - tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu"; - stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ - stw_p(w++, 24); /* u16 len */ - strcpy((void *) w, "nolo"); /* char component[12] */ - w += 6; - strcpy((void *) w, tag); /* char version[12] */ - w += 6; - - return (void *) w - p; -} - -static int n800_atag_setup(const struct arm_boot_info *info, void *p) -{ - return n8x0_atag_setup(p, 800); -} - -static int n810_atag_setup(const struct arm_boot_info *info, void *p) -{ - return n8x0_atag_setup(p, 810); -} - -static void n8x0_init(MachineState *machine, - struct arm_boot_info *binfo, int model) -{ - struct n800_s *s = g_malloc0(sizeof(*s)); - MachineClass *mc = MACHINE_GET_CLASS(machine); - - if (machine->ram_size != mc->default_ram_size) { - char *sz = size_to_str(mc->default_ram_size); - error_report("Invalid RAM size, should be %s", sz); - g_free(sz); - exit(EXIT_FAILURE); - } - binfo->ram_size = machine->ram_size; - - memory_region_add_subregion(get_system_memory(), OMAP2_Q2_BASE, - machine->ram); - - s->mpu = omap2420_mpu_init(machine->ram, machine->cpu_type); - - /* Setup peripherals - * - * Believed external peripherals layout in the N810: - * (spi bus 1) - * tsc2005 - * lcd_mipid - * (spi bus 2) - * Conexant cx3110x (WLAN) - * optional: pc2400m (WiMAX) - * (i2c bus 0) - * TLV320AIC33 (audio codec) - * TCM825x (camera by Toshiba) - * lp5521 (clever LEDs) - * tsl2563 (light sensor, hwmon, model 7, rev. 0) - * lm8323 (keypad, manf 00, rev 04) - * (i2c bus 1) - * tmp105 (temperature sensor, hwmon) - * menelaus (pm) - * (somewhere on i2c - maybe N800-only) - * tea5761 (FM tuner) - * (serial 0) - * GPS - * (some serial port) - * csr41814 (Bluetooth) - */ - n8x0_gpio_setup(s); - n8x0_nand_setup(s); - n8x0_i2c_setup(s); - if (model == 800) { - n800_tsc_kbd_setup(s); - } else if (model == 810) { - n810_tsc_setup(s); - n810_kbd_setup(s); - } - n8x0_spi_setup(s); - n8x0_dss_setup(s); - n8x0_cbus_setup(s); - n8x0_usb_setup(s); - - if (machine->kernel_filename) { - /* Or at the linux loader. */ - arm_load_kernel(s->mpu->cpu, machine, binfo); - - qemu_register_reset(n8x0_boot_init, s); - } - - if (option_rom[0].name && - (machine->boot_config.order[0] == 'n' || !machine->kernel_filename)) { - uint8_t *nolo_tags = g_new(uint8_t, 0x10000); - /* No, wait, better start at the ROM. */ - s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; - - /* - * This is intended for loading the `secondary.bin' program from - * Nokia images (the NOLO bootloader). The entry point seems - * to be at OMAP2_Q2_BASE + 0x400000. - * - * The `2nd.bin' files contain some kind of earlier boot code and - * for them the entry point needs to be set to OMAP2_SRAM_BASE. - * - * The code above is for loading the `zImage' file from Nokia - * images. - */ - if (load_image_targphys(option_rom[0].name, - OMAP2_Q2_BASE + 0x400000, - machine->ram_size - 0x400000) < 0) { - error_report("Failed to load secondary bootloader %s", - option_rom[0].name); - exit(EXIT_FAILURE); - } - - n800_setup_nolo_tags(nolo_tags); - cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000); - g_free(nolo_tags); - } -} - -static struct arm_boot_info n800_binfo = { - .loader_start = OMAP2_Q2_BASE, - .board_id = 0x4f7, - .atag_board = n800_atag_setup, -}; - -static struct arm_boot_info n810_binfo = { - .loader_start = OMAP2_Q2_BASE, - /* 0x60c and 0x6bf (WiMAX Edition) have been assigned but are not - * used by some older versions of the bootloader and 5555 is used - * instead (including versions that shipped with many devices). */ - .board_id = 0x60c, - .atag_board = n810_atag_setup, -}; - -static void n800_init(MachineState *machine) -{ - n8x0_init(machine, &n800_binfo, 800); -} - -static void n810_init(MachineState *machine) -{ - n8x0_init(machine, &n810_binfo, 810); -} - -static void n800_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "Nokia N800 tablet aka. RX-34 (OMAP2420)"; - mc->init = n800_init; - mc->default_boot_order = ""; - mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2"); - /* Actually two chips of 0x4000000 bytes each */ - mc->default_ram_size = 0x08000000; - mc->default_ram_id = "omap2.dram"; - mc->deprecation_reason = "machine is old and unmaintained"; - - machine_add_audiodev_property(mc); -} - -static const TypeInfo n800_type = { - .name = MACHINE_TYPE_NAME("n800"), - .parent = TYPE_MACHINE, - .class_init = n800_class_init, -}; - -static void n810_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "Nokia N810 tablet aka. RX-44 (OMAP2420)"; - mc->init = n810_init; - mc->default_boot_order = ""; - mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2"); - /* Actually two chips of 0x4000000 bytes each */ - mc->default_ram_size = 0x08000000; - mc->default_ram_id = "omap2.dram"; - mc->deprecation_reason = "machine is old and unmaintained"; - - machine_add_audiodev_property(mc); -} - -static const TypeInfo n810_type = { - .name = MACHINE_TYPE_NAME("n810"), - .parent = TYPE_MACHINE, - .class_init = n810_class_init, -}; - -static void nseries_machine_init(void) -{ - type_register_static(&n800_type); - type_register_static(&n810_type); -} - -type_init(nseries_machine_init) diff --git a/hw/arm/olimex-stm32-h405.c b/hw/arm/olimex-stm32-h405.c index 4ad7b043be0..1f15620f9fd 100644 --- a/hw/arm/olimex-stm32-h405.c +++ b/hw/arm/olimex-stm32-h405.c @@ -51,7 +51,7 @@ static void olimex_stm32_h405_init(MachineState *machine) qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - armv7m_load_kernel(ARM_CPU(first_cpu), + armv7m_load_kernel(STM32F405_SOC(dev)->armv7m.cpu, machine->kernel_filename, 0, FLASH_SIZE); } diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c index 86ee336e599..74458fb7c69 100644 --- a/hw/arm/omap1.c +++ b/hw/arm/omap1.c @@ -23,24 +23,26 @@ #include "qemu/main-loop.h" #include "qapi/error.h" #include "cpu.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/hw.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/arm/boot.h" #include "hw/arm/omap.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" +#include "hw/sd/sd.h" +#include "system/blockdev.h" +#include "system/system.h" #include "hw/arm/soc_dma.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/rtc.h" +#include "system/qtest.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/rtc.h" #include "qemu/range.h" #include "hw/sysbus.h" #include "qemu/cutils.h" #include "qemu/bcd.h" #include "target/arm/cpu-qom.h" +#include "trace.h" static inline void omap_log_badwidth(const char *funcname, hwaddr addr, int sz) { @@ -142,7 +144,7 @@ static inline void omap_timer_update(struct omap_mpu_timer_s *timer) int64_t expires; if (timer->enable && timer->st && timer->rate) { - timer->val = timer->reset_val; /* Should skip this on clk enable */ + timer->val = timer->reset_val; /* Should skip this on clk enable */ expires = muldiv64((uint64_t) timer->val << (timer->ptv + 1), NANOSECONDS_PER_SECOND, timer->rate); @@ -210,13 +212,13 @@ static uint64_t omap_mpu_timer_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CNTL_TIMER */ + case 0x00: /* CNTL_TIMER */ return (s->enable << 5) | (s->ptv << 2) | (s->ar << 1) | s->st; - case 0x04: /* LOAD_TIM */ + case 0x04: /* LOAD_TIM */ break; - case 0x08: /* READ_TIM */ + case 0x08: /* READ_TIM */ return omap_timer_read(s); } @@ -235,7 +237,7 @@ static void omap_mpu_timer_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CNTL_TIMER */ + case 0x00: /* CNTL_TIMER */ omap_timer_sync(s); s->enable = (value >> 5) & 1; s->ptv = (value >> 2) & 7; @@ -244,11 +246,11 @@ static void omap_mpu_timer_write(void *opaque, hwaddr addr, omap_timer_update(s); return; - case 0x04: /* LOAD_TIM */ + case 0x04: /* LOAD_TIM */ s->reset_val = value; return; - case 0x08: /* READ_TIM */ + case 0x08: /* READ_TIM */ OMAP_RO_REG(addr); break; @@ -316,14 +318,14 @@ static uint64_t omap_wd_timer_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CNTL_TIMER */ + case 0x00: /* CNTL_TIMER */ return (s->timer.ptv << 9) | (s->timer.ar << 8) | (s->timer.st << 7) | (s->free << 1); - case 0x04: /* READ_TIMER */ + case 0x04: /* READ_TIMER */ return omap_timer_read(&s->timer); - case 0x08: /* TIMER_MODE */ + case 0x08: /* TIMER_MODE */ return s->mode << 15; } @@ -342,7 +344,7 @@ static void omap_wd_timer_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CNTL_TIMER */ + case 0x00: /* CNTL_TIMER */ omap_timer_sync(&s->timer); s->timer.ptv = (value >> 9) & 7; s->timer.ar = (value >> 8) & 1; @@ -351,11 +353,11 @@ static void omap_wd_timer_write(void *opaque, hwaddr addr, omap_timer_update(&s->timer); break; - case 0x04: /* LOAD_TIMER */ + case 0x04: /* LOAD_TIMER */ s->timer.reset_val = value & 0xffff; break; - case 0x08: /* TIMER_MODE */ + case 0x08: /* TIMER_MODE */ if (!s->mode && ((value >> 15) & 1)) omap_clk_get(s->timer.clk); s->mode |= (value >> 15) & 1; @@ -440,13 +442,13 @@ static uint64_t omap_os_timer_read(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* TVR */ + case 0x00: /* TVR */ return s->timer.reset_val; - case 0x04: /* TCR */ + case 0x04: /* TCR */ return omap_timer_read(&s->timer); - case 0x08: /* CR */ + case 0x08: /* CR */ return (s->timer.ar << 3) | (s->timer.it_ena << 2) | s->timer.st; default: @@ -468,15 +470,15 @@ static void omap_os_timer_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* TVR */ + case 0x00: /* TVR */ s->timer.reset_val = value & 0x00ffffff; break; - case 0x04: /* TCR */ + case 0x04: /* TCR */ OMAP_RO_REG(addr); break; - case 0x08: /* CR */ + case 0x08: /* CR */ s->timer.ar = (value >> 3) & 1; s->timer.it_ena = (value >> 2) & 1; if (s->timer.st != (value & 1) || (value & 2)) { @@ -541,34 +543,34 @@ static uint64_t omap_ulpd_pm_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x14: /* IT_STATUS */ + case 0x14: /* IT_STATUS */ ret = s->ulpd_pm_regs[addr >> 2]; s->ulpd_pm_regs[addr >> 2] = 0; qemu_irq_lower(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K)); return ret; - case 0x18: /* Reserved */ - case 0x1c: /* Reserved */ - case 0x20: /* Reserved */ - case 0x28: /* Reserved */ - case 0x2c: /* Reserved */ + case 0x18: /* Reserved */ + case 0x1c: /* Reserved */ + case 0x20: /* Reserved */ + case 0x28: /* Reserved */ + case 0x2c: /* Reserved */ OMAP_BAD_REG(addr); /* fall through */ - case 0x00: /* COUNTER_32_LSB */ - case 0x04: /* COUNTER_32_MSB */ - case 0x08: /* COUNTER_HIGH_FREQ_LSB */ - case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ - case 0x10: /* GAUGING_CTRL */ - case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ - case 0x30: /* CLOCK_CTRL */ - case 0x34: /* SOFT_REQ */ - case 0x38: /* COUNTER_32_FIQ */ - case 0x3c: /* DPLL_CTRL */ - case 0x40: /* STATUS_REQ */ + case 0x00: /* COUNTER_32_LSB */ + case 0x04: /* COUNTER_32_MSB */ + case 0x08: /* COUNTER_HIGH_FREQ_LSB */ + case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ + case 0x10: /* GAUGING_CTRL */ + case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ + case 0x30: /* CLOCK_CTRL */ + case 0x34: /* SOFT_REQ */ + case 0x38: /* COUNTER_32_FIQ */ + case 0x3c: /* DPLL_CTRL */ + case 0x40: /* STATUS_REQ */ /* XXX: check clk::usecount state for every clock */ - case 0x48: /* LOCL_TIME */ - case 0x4c: /* APLL_CTRL */ - case 0x50: /* POWER_CTRL */ + case 0x48: /* LOCL_TIME */ + case 0x4c: /* APLL_CTRL */ + case 0x50: /* POWER_CTRL */ return s->ulpd_pm_regs[addr >> 2]; } @@ -579,22 +581,22 @@ static uint64_t omap_ulpd_pm_read(void *opaque, hwaddr addr, static inline void omap_ulpd_clk_update(struct omap_mpu_state_s *s, uint16_t diff, uint16_t value) { - if (diff & (1 << 4)) /* USB_MCLK_EN */ + if (diff & (1 << 4)) /* USB_MCLK_EN */ omap_clk_onoff(omap_findclk(s, "usb_clk0"), (value >> 4) & 1); - if (diff & (1 << 5)) /* DIS_USB_PVCI_CLK */ + if (diff & (1 << 5)) /* DIS_USB_PVCI_CLK */ omap_clk_onoff(omap_findclk(s, "usb_w2fc_ck"), (~value >> 5) & 1); } static inline void omap_ulpd_req_update(struct omap_mpu_state_s *s, uint16_t diff, uint16_t value) { - if (diff & (1 << 0)) /* SOFT_DPLL_REQ */ + if (diff & (1 << 0)) /* SOFT_DPLL_REQ */ omap_clk_canidle(omap_findclk(s, "dpll4"), (~value >> 0) & 1); - if (diff & (1 << 1)) /* SOFT_COM_REQ */ + if (diff & (1 << 1)) /* SOFT_COM_REQ */ omap_clk_canidle(omap_findclk(s, "com_mclk_out"), (~value >> 1) & 1); - if (diff & (1 << 2)) /* SOFT_SDW_REQ */ + if (diff & (1 << 2)) /* SOFT_SDW_REQ */ omap_clk_canidle(omap_findclk(s, "bt_mclk_out"), (~value >> 2) & 1); - if (diff & (1 << 3)) /* SOFT_USB_REQ */ + if (diff & (1 << 3)) /* SOFT_USB_REQ */ omap_clk_canidle(omap_findclk(s, "usb_clk0"), (~value >> 3) & 1); } @@ -613,16 +615,16 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* COUNTER_32_LSB */ - case 0x04: /* COUNTER_32_MSB */ - case 0x08: /* COUNTER_HIGH_FREQ_LSB */ - case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ - case 0x14: /* IT_STATUS */ - case 0x40: /* STATUS_REQ */ + case 0x00: /* COUNTER_32_LSB */ + case 0x04: /* COUNTER_32_MSB */ + case 0x08: /* COUNTER_HIGH_FREQ_LSB */ + case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ + case 0x14: /* IT_STATUS */ + case 0x40: /* STATUS_REQ */ OMAP_RO_REG(addr); break; - case 0x10: /* GAUGING_CTRL */ + case 0x10: /* GAUGING_CTRL */ /* Bits 0 and 1 seem to be confused in the OMAP 310 TRM */ if ((s->ulpd_pm_regs[addr >> 2] ^ value) & 1) { now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); @@ -636,50 +638,50 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr, ticks = muldiv64(now, 32768, NANOSECONDS_PER_SECOND); s->ulpd_pm_regs[0x00 >> 2] = (ticks >> 0) & 0xffff; s->ulpd_pm_regs[0x04 >> 2] = (ticks >> 16) & 0xffff; - if (ticks >> 32) /* OVERFLOW_32K */ + if (ticks >> 32) /* OVERFLOW_32K */ s->ulpd_pm_regs[0x14 >> 2] |= 1 << 2; /* High frequency ticks */ ticks = muldiv64(now, 12000000, NANOSECONDS_PER_SECOND); s->ulpd_pm_regs[0x08 >> 2] = (ticks >> 0) & 0xffff; s->ulpd_pm_regs[0x0c >> 2] = (ticks >> 16) & 0xffff; - if (ticks >> 32) /* OVERFLOW_HI_FREQ */ + if (ticks >> 32) /* OVERFLOW_HI_FREQ */ s->ulpd_pm_regs[0x14 >> 2] |= 1 << 1; - s->ulpd_pm_regs[0x14 >> 2] |= 1 << 0; /* IT_GAUGING */ + s->ulpd_pm_regs[0x14 >> 2] |= 1 << 0; /* IT_GAUGING */ qemu_irq_raise(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K)); } } s->ulpd_pm_regs[addr >> 2] = value; break; - case 0x18: /* Reserved */ - case 0x1c: /* Reserved */ - case 0x20: /* Reserved */ - case 0x28: /* Reserved */ - case 0x2c: /* Reserved */ + case 0x18: /* Reserved */ + case 0x1c: /* Reserved */ + case 0x20: /* Reserved */ + case 0x28: /* Reserved */ + case 0x2c: /* Reserved */ OMAP_BAD_REG(addr); /* fall through */ - case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ - case 0x38: /* COUNTER_32_FIQ */ - case 0x48: /* LOCL_TIME */ - case 0x50: /* POWER_CTRL */ + case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ + case 0x38: /* COUNTER_32_FIQ */ + case 0x48: /* LOCL_TIME */ + case 0x50: /* POWER_CTRL */ s->ulpd_pm_regs[addr >> 2] = value; break; - case 0x30: /* CLOCK_CTRL */ + case 0x30: /* CLOCK_CTRL */ diff = s->ulpd_pm_regs[addr >> 2] ^ value; s->ulpd_pm_regs[addr >> 2] = value & 0x3f; omap_ulpd_clk_update(s, diff, value); break; - case 0x34: /* SOFT_REQ */ + case 0x34: /* SOFT_REQ */ diff = s->ulpd_pm_regs[addr >> 2] ^ value; s->ulpd_pm_regs[addr >> 2] = value & 0x1f; omap_ulpd_req_update(s, diff, value); break; - case 0x3c: /* DPLL_CTRL */ + case 0x3c: /* DPLL_CTRL */ /* XXX: OMAP310 TRM claims bit 3 is PLL_ENABLE, and bit 4 is * omitted altogether, probably a typo. */ /* This register has identical semantics with DPLL(1:3) control @@ -687,11 +689,11 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr, diff = s->ulpd_pm_regs[addr >> 2] & value; s->ulpd_pm_regs[addr >> 2] = value & 0x2fff; if (diff & (0x3ff << 2)) { - if (value & (1 << 4)) { /* PLL_ENABLE */ - div = ((value >> 5) & 3) + 1; /* PLL_DIV */ - mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ + if (value & (1 << 4)) { /* PLL_ENABLE */ + div = ((value >> 5) & 3) + 1; /* PLL_DIV */ + mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ } else { - div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ + div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ mult = 1; } omap_clk_setrate(omap_findclk(s, "dpll4"), div, mult); @@ -706,10 +708,10 @@ static void omap_ulpd_pm_write(void *opaque, hwaddr addr, s->ulpd_pm_regs[addr >> 2] |= 2; break; - case 0x4c: /* APLL_CTRL */ + case 0x4c: /* APLL_CTRL */ diff = s->ulpd_pm_regs[addr >> 2] & value; s->ulpd_pm_regs[addr >> 2] = value & 0xf; - if (diff & (1 << 0)) /* APLL_NDPLL_SWITCH */ + if (diff & (1 << 0)) /* APLL_NDPLL_SWITCH */ omap_clk_reparent(omap_findclk(s, "ck_48m"), omap_findclk(s, (value & (1 << 0)) ? "apll" : "dpll4")); break; @@ -773,43 +775,43 @@ static uint64_t omap_pin_cfg_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* FUNC_MUX_CTRL_0 */ - case 0x04: /* FUNC_MUX_CTRL_1 */ - case 0x08: /* FUNC_MUX_CTRL_2 */ + case 0x00: /* FUNC_MUX_CTRL_0 */ + case 0x04: /* FUNC_MUX_CTRL_1 */ + case 0x08: /* FUNC_MUX_CTRL_2 */ return s->func_mux_ctrl[addr >> 2]; - case 0x0c: /* COMP_MODE_CTRL_0 */ + case 0x0c: /* COMP_MODE_CTRL_0 */ return s->comp_mode_ctrl[0]; - case 0x10: /* FUNC_MUX_CTRL_3 */ - case 0x14: /* FUNC_MUX_CTRL_4 */ - case 0x18: /* FUNC_MUX_CTRL_5 */ - case 0x1c: /* FUNC_MUX_CTRL_6 */ - case 0x20: /* FUNC_MUX_CTRL_7 */ - case 0x24: /* FUNC_MUX_CTRL_8 */ - case 0x28: /* FUNC_MUX_CTRL_9 */ - case 0x2c: /* FUNC_MUX_CTRL_A */ - case 0x30: /* FUNC_MUX_CTRL_B */ - case 0x34: /* FUNC_MUX_CTRL_C */ - case 0x38: /* FUNC_MUX_CTRL_D */ + case 0x10: /* FUNC_MUX_CTRL_3 */ + case 0x14: /* FUNC_MUX_CTRL_4 */ + case 0x18: /* FUNC_MUX_CTRL_5 */ + case 0x1c: /* FUNC_MUX_CTRL_6 */ + case 0x20: /* FUNC_MUX_CTRL_7 */ + case 0x24: /* FUNC_MUX_CTRL_8 */ + case 0x28: /* FUNC_MUX_CTRL_9 */ + case 0x2c: /* FUNC_MUX_CTRL_A */ + case 0x30: /* FUNC_MUX_CTRL_B */ + case 0x34: /* FUNC_MUX_CTRL_C */ + case 0x38: /* FUNC_MUX_CTRL_D */ return s->func_mux_ctrl[(addr >> 2) - 1]; - case 0x40: /* PULL_DWN_CTRL_0 */ - case 0x44: /* PULL_DWN_CTRL_1 */ - case 0x48: /* PULL_DWN_CTRL_2 */ - case 0x4c: /* PULL_DWN_CTRL_3 */ + case 0x40: /* PULL_DWN_CTRL_0 */ + case 0x44: /* PULL_DWN_CTRL_1 */ + case 0x48: /* PULL_DWN_CTRL_2 */ + case 0x4c: /* PULL_DWN_CTRL_3 */ return s->pull_dwn_ctrl[(addr & 0xf) >> 2]; - case 0x50: /* GATE_INH_CTRL_0 */ + case 0x50: /* GATE_INH_CTRL_0 */ return s->gate_inh_ctrl[0]; - case 0x60: /* VOLTAGE_CTRL_0 */ + case 0x60: /* VOLTAGE_CTRL_0 */ return s->voltage_ctrl[0]; - case 0x70: /* TEST_DBG_CTRL_0 */ + case 0x70: /* TEST_DBG_CTRL_0 */ return s->test_dbg_ctrl[0]; - case 0x80: /* MOD_CONF_CTRL_0 */ + case 0x80: /* MOD_CONF_CTRL_0 */ return s->mod_conf_ctrl[0]; } @@ -821,10 +823,10 @@ static inline void omap_pin_funcmux0_update(struct omap_mpu_state_s *s, uint32_t diff, uint32_t value) { if (s->compat1509) { - if (diff & (1 << 9)) /* BLUETOOTH */ + if (diff & (1 << 9)) /* BLUETOOTH */ omap_clk_onoff(omap_findclk(s, "bt_mclk_out"), (~value >> 9) & 1); - if (diff & (1 << 7)) /* USB.CLKO */ + if (diff & (1 << 7)) /* USB.CLKO */ omap_clk_onoff(omap_findclk(s, "usb.clko"), (value >> 7) & 1); } @@ -854,23 +856,23 @@ static inline void omap_pin_modconf1_update(struct omap_mpu_state_s *s, omap_findclk(s, ((value >> 31) & 1) ? "ck_48m" : "armper_ck")); } - if (diff & (1 << 30)) /* CONF_MOD_UART2_CLK_MODE_R */ + if (diff & (1 << 30)) /* CONF_MOD_UART2_CLK_MODE_R */ omap_clk_reparent(omap_findclk(s, "uart2_ck"), omap_findclk(s, ((value >> 30) & 1) ? "ck_48m" : "armper_ck")); - if (diff & (1 << 29)) /* CONF_MOD_UART1_CLK_MODE_R */ + if (diff & (1 << 29)) /* CONF_MOD_UART1_CLK_MODE_R */ omap_clk_reparent(omap_findclk(s, "uart1_ck"), omap_findclk(s, ((value >> 29) & 1) ? "ck_48m" : "armper_ck")); - if (diff & (1 << 23)) /* CONF_MOD_MMC_SD_CLK_REQ_R */ + if (diff & (1 << 23)) /* CONF_MOD_MMC_SD_CLK_REQ_R */ omap_clk_reparent(omap_findclk(s, "mmc_ck"), omap_findclk(s, ((value >> 23) & 1) ? "ck_48m" : "armper_ck")); - if (diff & (1 << 12)) /* CONF_MOD_COM_MCLK_12_48_S */ + if (diff & (1 << 12)) /* CONF_MOD_COM_MCLK_12_48_S */ omap_clk_reparent(omap_findclk(s, "com_mclk_out"), omap_findclk(s, ((value >> 12) & 1) ? "ck_48m" : "armper_ck")); - if (diff & (1 << 9)) /* CONF_MOD_USB_HOST_HHC_UHO */ + if (diff & (1 << 9)) /* CONF_MOD_USB_HOST_HHC_UHO */ omap_clk_onoff(omap_findclk(s, "usb_hhc_ck"), (value >> 9) & 1); } @@ -886,63 +888,63 @@ static void omap_pin_cfg_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* FUNC_MUX_CTRL_0 */ + case 0x00: /* FUNC_MUX_CTRL_0 */ diff = s->func_mux_ctrl[addr >> 2] ^ value; s->func_mux_ctrl[addr >> 2] = value; omap_pin_funcmux0_update(s, diff, value); return; - case 0x04: /* FUNC_MUX_CTRL_1 */ + case 0x04: /* FUNC_MUX_CTRL_1 */ diff = s->func_mux_ctrl[addr >> 2] ^ value; s->func_mux_ctrl[addr >> 2] = value; omap_pin_funcmux1_update(s, diff, value); return; - case 0x08: /* FUNC_MUX_CTRL_2 */ + case 0x08: /* FUNC_MUX_CTRL_2 */ s->func_mux_ctrl[addr >> 2] = value; return; - case 0x0c: /* COMP_MODE_CTRL_0 */ + case 0x0c: /* COMP_MODE_CTRL_0 */ s->comp_mode_ctrl[0] = value; s->compat1509 = (value != 0x0000eaef); omap_pin_funcmux0_update(s, ~0, s->func_mux_ctrl[0]); omap_pin_funcmux1_update(s, ~0, s->func_mux_ctrl[1]); return; - case 0x10: /* FUNC_MUX_CTRL_3 */ - case 0x14: /* FUNC_MUX_CTRL_4 */ - case 0x18: /* FUNC_MUX_CTRL_5 */ - case 0x1c: /* FUNC_MUX_CTRL_6 */ - case 0x20: /* FUNC_MUX_CTRL_7 */ - case 0x24: /* FUNC_MUX_CTRL_8 */ - case 0x28: /* FUNC_MUX_CTRL_9 */ - case 0x2c: /* FUNC_MUX_CTRL_A */ - case 0x30: /* FUNC_MUX_CTRL_B */ - case 0x34: /* FUNC_MUX_CTRL_C */ - case 0x38: /* FUNC_MUX_CTRL_D */ + case 0x10: /* FUNC_MUX_CTRL_3 */ + case 0x14: /* FUNC_MUX_CTRL_4 */ + case 0x18: /* FUNC_MUX_CTRL_5 */ + case 0x1c: /* FUNC_MUX_CTRL_6 */ + case 0x20: /* FUNC_MUX_CTRL_7 */ + case 0x24: /* FUNC_MUX_CTRL_8 */ + case 0x28: /* FUNC_MUX_CTRL_9 */ + case 0x2c: /* FUNC_MUX_CTRL_A */ + case 0x30: /* FUNC_MUX_CTRL_B */ + case 0x34: /* FUNC_MUX_CTRL_C */ + case 0x38: /* FUNC_MUX_CTRL_D */ s->func_mux_ctrl[(addr >> 2) - 1] = value; return; - case 0x40: /* PULL_DWN_CTRL_0 */ - case 0x44: /* PULL_DWN_CTRL_1 */ - case 0x48: /* PULL_DWN_CTRL_2 */ - case 0x4c: /* PULL_DWN_CTRL_3 */ + case 0x40: /* PULL_DWN_CTRL_0 */ + case 0x44: /* PULL_DWN_CTRL_1 */ + case 0x48: /* PULL_DWN_CTRL_2 */ + case 0x4c: /* PULL_DWN_CTRL_3 */ s->pull_dwn_ctrl[(addr & 0xf) >> 2] = value; return; - case 0x50: /* GATE_INH_CTRL_0 */ + case 0x50: /* GATE_INH_CTRL_0 */ s->gate_inh_ctrl[0] = value; return; - case 0x60: /* VOLTAGE_CTRL_0 */ + case 0x60: /* VOLTAGE_CTRL_0 */ s->voltage_ctrl[0] = value; return; - case 0x70: /* TEST_DBG_CTRL_0 */ + case 0x70: /* TEST_DBG_CTRL_0 */ s->test_dbg_ctrl[0] = value; return; - case 0x80: /* MOD_CONF_CTRL_0 */ + case 0x80: /* MOD_CONF_CTRL_0 */ diff = s->mod_conf_ctrl[0] ^ value; s->mod_conf_ctrl[0] = value; omap_pin_modconf1_update(s, diff, value); @@ -996,17 +998,17 @@ static uint64_t omap_id_read(void *opaque, hwaddr addr, } switch (addr) { - case 0xfffe1800: /* DIE_ID_LSB */ + case 0xfffe1800: /* DIE_ID_LSB */ return 0xc9581f0e; - case 0xfffe1804: /* DIE_ID_MSB */ + case 0xfffe1804: /* DIE_ID_MSB */ return 0xa8858bfa; - case 0xfffe2000: /* PRODUCT_ID_LSB */ + case 0xfffe2000: /* PRODUCT_ID_LSB */ return 0x00aaaafc; - case 0xfffe2004: /* PRODUCT_ID_MSB */ + case 0xfffe2004: /* PRODUCT_ID_MSB */ return 0xcafeb574; - case 0xfffed400: /* JTAG_ID_LSB */ + case 0xfffed400: /* JTAG_ID_LSB */ switch (s->mpu_model) { case omap310: return 0x03310315; @@ -1017,7 +1019,7 @@ static uint64_t omap_id_read(void *opaque, hwaddr addr, } break; - case 0xfffed404: /* JTAG_ID_MSB */ + case 0xfffed404: /* JTAG_ID_MSB */ switch (s->mpu_model) { case omap310: return 0xfb57402f; @@ -1078,22 +1080,22 @@ static uint64_t omap_mpui_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CTRL */ + case 0x00: /* CTRL */ return s->mpui_ctrl; - case 0x04: /* DEBUG_ADDR */ + case 0x04: /* DEBUG_ADDR */ return 0x01ffffff; - case 0x08: /* DEBUG_DATA */ + case 0x08: /* DEBUG_DATA */ return 0xffffffff; - case 0x0c: /* DEBUG_FLAG */ + case 0x0c: /* DEBUG_FLAG */ return 0x00000800; - case 0x10: /* STATUS */ + case 0x10: /* STATUS */ return 0x00000000; /* Not in OMAP310 */ - case 0x14: /* DSP_STATUS */ - case 0x18: /* DSP_BOOT_CONFIG */ + case 0x14: /* DSP_STATUS */ + case 0x18: /* DSP_BOOT_CONFIG */ return 0x00000000; - case 0x1c: /* DSP_MPUI_CONFIG */ + case 0x1c: /* DSP_MPUI_CONFIG */ return 0x0000ffff; } @@ -1112,20 +1114,20 @@ static void omap_mpui_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* CTRL */ + case 0x00: /* CTRL */ s->mpui_ctrl = value & 0x007fffff; break; - case 0x04: /* DEBUG_ADDR */ - case 0x08: /* DEBUG_DATA */ - case 0x0c: /* DEBUG_FLAG */ - case 0x10: /* STATUS */ + case 0x04: /* DEBUG_ADDR */ + case 0x08: /* DEBUG_DATA */ + case 0x0c: /* DEBUG_FLAG */ + case 0x10: /* STATUS */ /* Not in OMAP310 */ - case 0x14: /* DSP_STATUS */ + case 0x14: /* DSP_STATUS */ OMAP_RO_REG(addr); break; - case 0x18: /* DSP_BOOT_CONFIG */ - case 0x1c: /* DSP_MPUI_CONFIG */ + case 0x18: /* DSP_BOOT_CONFIG */ + case 0x1c: /* DSP_MPUI_CONFIG */ break; default: @@ -1176,19 +1178,19 @@ static uint64_t omap_tipb_bridge_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* TIPB_CNTL */ + case 0x00: /* TIPB_CNTL */ return s->control; - case 0x04: /* TIPB_BUS_ALLOC */ + case 0x04: /* TIPB_BUS_ALLOC */ return s->alloc; - case 0x08: /* MPU_TIPB_CNTL */ + case 0x08: /* MPU_TIPB_CNTL */ return s->buffer; - case 0x0c: /* ENHANCED_TIPB_CNTL */ + case 0x0c: /* ENHANCED_TIPB_CNTL */ return s->enh_control; - case 0x10: /* ADDRESS_DBG */ - case 0x14: /* DATA_DEBUG_LOW */ - case 0x18: /* DATA_DEBUG_HIGH */ + case 0x10: /* ADDRESS_DBG */ + case 0x14: /* DATA_DEBUG_LOW */ + case 0x18: /* DATA_DEBUG_HIGH */ return 0xffff; - case 0x1c: /* DEBUG_CNTR_SIG */ + case 0x1c: /* DEBUG_CNTR_SIG */ return 0x00f8; } @@ -1207,27 +1209,27 @@ static void omap_tipb_bridge_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* TIPB_CNTL */ + case 0x00: /* TIPB_CNTL */ s->control = value & 0xffff; break; - case 0x04: /* TIPB_BUS_ALLOC */ + case 0x04: /* TIPB_BUS_ALLOC */ s->alloc = value & 0x003f; break; - case 0x08: /* MPU_TIPB_CNTL */ + case 0x08: /* MPU_TIPB_CNTL */ s->buffer = value & 0x0003; break; - case 0x0c: /* ENHANCED_TIPB_CNTL */ + case 0x0c: /* ENHANCED_TIPB_CNTL */ s->width_intr = !(value & 2); s->enh_control = value & 0x000f; break; - case 0x10: /* ADDRESS_DBG */ - case 0x14: /* DATA_DEBUG_LOW */ - case 0x18: /* DATA_DEBUG_HIGH */ - case 0x1c: /* DEBUG_CNTR_SIG */ + case 0x10: /* ADDRESS_DBG */ + case 0x14: /* DATA_DEBUG_LOW */ + case 0x18: /* DATA_DEBUG_HIGH */ + case 0x1c: /* DEBUG_CNTR_SIG */ OMAP_RO_REG(addr); break; @@ -1278,23 +1280,23 @@ static uint64_t omap_tcmi_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* IMIF_PRIO */ - case 0x04: /* EMIFS_PRIO */ - case 0x08: /* EMIFF_PRIO */ - case 0x0c: /* EMIFS_CONFIG */ - case 0x10: /* EMIFS_CS0_CONFIG */ - case 0x14: /* EMIFS_CS1_CONFIG */ - case 0x18: /* EMIFS_CS2_CONFIG */ - case 0x1c: /* EMIFS_CS3_CONFIG */ - case 0x24: /* EMIFF_MRS */ - case 0x28: /* TIMEOUT1 */ - case 0x2c: /* TIMEOUT2 */ - case 0x30: /* TIMEOUT3 */ - case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ - case 0x40: /* EMIFS_CFG_DYN_WAIT */ + case 0x00: /* IMIF_PRIO */ + case 0x04: /* EMIFS_PRIO */ + case 0x08: /* EMIFF_PRIO */ + case 0x0c: /* EMIFS_CONFIG */ + case 0x10: /* EMIFS_CS0_CONFIG */ + case 0x14: /* EMIFS_CS1_CONFIG */ + case 0x18: /* EMIFS_CS2_CONFIG */ + case 0x1c: /* EMIFS_CS3_CONFIG */ + case 0x24: /* EMIFF_MRS */ + case 0x28: /* TIMEOUT1 */ + case 0x2c: /* TIMEOUT2 */ + case 0x30: /* TIMEOUT3 */ + case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ + case 0x40: /* EMIFS_CFG_DYN_WAIT */ return s->tcmi_regs[addr >> 2]; - case 0x20: /* EMIFF_SDRAM_CONFIG */ + case 0x20: /* EMIFF_SDRAM_CONFIG */ ret = s->tcmi_regs[addr >> 2]; s->tcmi_regs[addr >> 2] &= ~1; /* XXX: Clear SLRF on SDRAM access */ /* XXX: We can try using the VGA_DIRTY flag for this */ @@ -1316,23 +1318,23 @@ static void omap_tcmi_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* IMIF_PRIO */ - case 0x04: /* EMIFS_PRIO */ - case 0x08: /* EMIFF_PRIO */ - case 0x10: /* EMIFS_CS0_CONFIG */ - case 0x14: /* EMIFS_CS1_CONFIG */ - case 0x18: /* EMIFS_CS2_CONFIG */ - case 0x1c: /* EMIFS_CS3_CONFIG */ - case 0x20: /* EMIFF_SDRAM_CONFIG */ - case 0x24: /* EMIFF_MRS */ - case 0x28: /* TIMEOUT1 */ - case 0x2c: /* TIMEOUT2 */ - case 0x30: /* TIMEOUT3 */ - case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ - case 0x40: /* EMIFS_CFG_DYN_WAIT */ + case 0x00: /* IMIF_PRIO */ + case 0x04: /* EMIFS_PRIO */ + case 0x08: /* EMIFF_PRIO */ + case 0x10: /* EMIFS_CS0_CONFIG */ + case 0x14: /* EMIFS_CS1_CONFIG */ + case 0x18: /* EMIFS_CS2_CONFIG */ + case 0x1c: /* EMIFS_CS3_CONFIG */ + case 0x20: /* EMIFF_SDRAM_CONFIG */ + case 0x24: /* EMIFF_MRS */ + case 0x28: /* TIMEOUT1 */ + case 0x2c: /* TIMEOUT2 */ + case 0x30: /* TIMEOUT3 */ + case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ + case 0x40: /* EMIFS_CFG_DYN_WAIT */ s->tcmi_regs[addr >> 2] = value; break; - case 0x0c: /* EMIFS_CONFIG */ + case 0x0c: /* EMIFS_CONFIG */ s->tcmi_regs[addr >> 2] = (value & 0xf) | (1 << 4); break; @@ -1391,7 +1393,7 @@ static uint64_t omap_dpll_read(void *opaque, hwaddr addr, return omap_badwidth_read16(opaque, addr); } - if (addr == 0x00) /* CTL_REG */ + if (addr == 0x00) /* CTL_REG */ return s->mode; OMAP_BAD_REG(addr); @@ -1411,16 +1413,16 @@ static void omap_dpll_write(void *opaque, hwaddr addr, return; } - if (addr == 0x00) { /* CTL_REG */ + if (addr == 0x00) { /* CTL_REG */ /* See omap_ulpd_pm_write() too */ diff = s->mode & value; s->mode = value & 0x2fff; if (diff & (0x3ff << 2)) { - if (value & (1 << 4)) { /* PLL_ENABLE */ - div = ((value >> 5) & 3) + 1; /* PLL_DIV */ - mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ + if (value & (1 << 4)) { /* PLL_ENABLE */ + div = ((value >> 5) & 3) + 1; /* PLL_DIV */ + mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ } else { - div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ + div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ mult = 1; } omap_clk_setrate(s->dpll, div, mult); @@ -1472,31 +1474,31 @@ static uint64_t omap_clkm_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* ARM_CKCTL */ + case 0x00: /* ARM_CKCTL */ return s->clkm.arm_ckctl; - case 0x04: /* ARM_IDLECT1 */ + case 0x04: /* ARM_IDLECT1 */ return s->clkm.arm_idlect1; - case 0x08: /* ARM_IDLECT2 */ + case 0x08: /* ARM_IDLECT2 */ return s->clkm.arm_idlect2; - case 0x0c: /* ARM_EWUPCT */ + case 0x0c: /* ARM_EWUPCT */ return s->clkm.arm_ewupct; - case 0x10: /* ARM_RSTCT1 */ + case 0x10: /* ARM_RSTCT1 */ return s->clkm.arm_rstct1; - case 0x14: /* ARM_RSTCT2 */ + case 0x14: /* ARM_RSTCT2 */ return s->clkm.arm_rstct2; - case 0x18: /* ARM_SYSST */ + case 0x18: /* ARM_SYSST */ return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start; - case 0x1c: /* ARM_CKOUT1 */ + case 0x1c: /* ARM_CKOUT1 */ return s->clkm.arm_ckout1; - case 0x20: /* ARM_CKOUT2 */ + case 0x20: /* ARM_CKOUT2 */ break; } @@ -1509,7 +1511,7 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s, { omap_clk clk; - if (diff & (1 << 14)) { /* ARM_INTHCK_SEL */ + if (diff & (1 << 14)) { /* ARM_INTHCK_SEL */ if (value & (1 << 14)) /* Reserved */; else { @@ -1517,7 +1519,7 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s, omap_clk_reparent(clk, omap_findclk(s, "tc_ck")); } } - if (diff & (1 << 12)) { /* ARM_TIMXO */ + if (diff & (1 << 12)) { /* ARM_TIMXO */ clk = omap_findclk(s, "armtim_ck"); if (value & (1 << 12)) omap_clk_reparent(clk, omap_findclk(s, "clkin")); @@ -1525,27 +1527,27 @@ static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s, omap_clk_reparent(clk, omap_findclk(s, "ck_gen1")); } /* XXX: en_dspck */ - if (diff & (3 << 10)) { /* DSPMMUDIV */ + if (diff & (3 << 10)) { /* DSPMMUDIV */ clk = omap_findclk(s, "dspmmu_ck"); omap_clk_setrate(clk, 1 << ((value >> 10) & 3), 1); } - if (diff & (3 << 8)) { /* TCDIV */ + if (diff & (3 << 8)) { /* TCDIV */ clk = omap_findclk(s, "tc_ck"); omap_clk_setrate(clk, 1 << ((value >> 8) & 3), 1); } - if (diff & (3 << 6)) { /* DSPDIV */ + if (diff & (3 << 6)) { /* DSPDIV */ clk = omap_findclk(s, "dsp_ck"); omap_clk_setrate(clk, 1 << ((value >> 6) & 3), 1); } - if (diff & (3 << 4)) { /* ARMDIV */ + if (diff & (3 << 4)) { /* ARMDIV */ clk = omap_findclk(s, "arm_ck"); omap_clk_setrate(clk, 1 << ((value >> 4) & 3), 1); } - if (diff & (3 << 2)) { /* LCDDIV */ + if (diff & (3 << 2)) { /* LCDDIV */ clk = omap_findclk(s, "lcd_ck"); omap_clk_setrate(clk, 1 << ((value >> 2) & 3), 1); } - if (diff & (3 << 0)) { /* PERDIV */ + if (diff & (3 << 0)) { /* PERDIV */ clk = omap_findclk(s, "armper_ck"); omap_clk_setrate(clk, 1 << ((value >> 0) & 3), 1); } @@ -1564,25 +1566,25 @@ static inline void omap_clkm_idlect1_update(struct omap_mpu_state_s *s, qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } -#define SET_CANIDLE(clock, bit) \ - if (diff & (1 << bit)) { \ - clk = omap_findclk(s, clock); \ - omap_clk_canidle(clk, (value >> bit) & 1); \ +#define SET_CANIDLE(clock, bit) \ + if (diff & (1 << bit)) { \ + clk = omap_findclk(s, clock); \ + omap_clk_canidle(clk, (value >> bit) & 1); \ } - SET_CANIDLE("mpuwd_ck", 0) /* IDLWDT_ARM */ - SET_CANIDLE("armxor_ck", 1) /* IDLXORP_ARM */ - SET_CANIDLE("mpuper_ck", 2) /* IDLPER_ARM */ - SET_CANIDLE("lcd_ck", 3) /* IDLLCD_ARM */ - SET_CANIDLE("lb_ck", 4) /* IDLLB_ARM */ - SET_CANIDLE("hsab_ck", 5) /* IDLHSAB_ARM */ - SET_CANIDLE("tipb_ck", 6) /* IDLIF_ARM */ - SET_CANIDLE("dma_ck", 6) /* IDLIF_ARM */ - SET_CANIDLE("tc_ck", 6) /* IDLIF_ARM */ - SET_CANIDLE("dpll1", 7) /* IDLDPLL_ARM */ - SET_CANIDLE("dpll2", 7) /* IDLDPLL_ARM */ - SET_CANIDLE("dpll3", 7) /* IDLDPLL_ARM */ - SET_CANIDLE("mpui_ck", 8) /* IDLAPI_ARM */ - SET_CANIDLE("armtim_ck", 9) /* IDLTIM_ARM */ + SET_CANIDLE("mpuwd_ck", 0) /* IDLWDT_ARM */ + SET_CANIDLE("armxor_ck", 1) /* IDLXORP_ARM */ + SET_CANIDLE("mpuper_ck", 2) /* IDLPER_ARM */ + SET_CANIDLE("lcd_ck", 3) /* IDLLCD_ARM */ + SET_CANIDLE("lb_ck", 4) /* IDLLB_ARM */ + SET_CANIDLE("hsab_ck", 5) /* IDLHSAB_ARM */ + SET_CANIDLE("tipb_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("dma_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("tc_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("dpll1", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("dpll2", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("dpll3", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("mpui_ck", 8) /* IDLAPI_ARM */ + SET_CANIDLE("armtim_ck", 9) /* IDLTIM_ARM */ } static inline void omap_clkm_idlect2_update(struct omap_mpu_state_s *s, @@ -1590,22 +1592,22 @@ static inline void omap_clkm_idlect2_update(struct omap_mpu_state_s *s, { omap_clk clk; -#define SET_ONOFF(clock, bit) \ - if (diff & (1 << bit)) { \ - clk = omap_findclk(s, clock); \ - omap_clk_onoff(clk, (value >> bit) & 1); \ +#define SET_ONOFF(clock, bit) \ + if (diff & (1 << bit)) { \ + clk = omap_findclk(s, clock); \ + omap_clk_onoff(clk, (value >> bit) & 1); \ } - SET_ONOFF("mpuwd_ck", 0) /* EN_WDTCK */ - SET_ONOFF("armxor_ck", 1) /* EN_XORPCK */ - SET_ONOFF("mpuper_ck", 2) /* EN_PERCK */ - SET_ONOFF("lcd_ck", 3) /* EN_LCDCK */ - SET_ONOFF("lb_ck", 4) /* EN_LBCK */ - SET_ONOFF("hsab_ck", 5) /* EN_HSABCK */ - SET_ONOFF("mpui_ck", 6) /* EN_APICK */ - SET_ONOFF("armtim_ck", 7) /* EN_TIMCK */ - SET_CANIDLE("dma_ck", 8) /* DMACK_REQ */ - SET_ONOFF("arm_gpio_ck", 9) /* EN_GPIOCK */ - SET_ONOFF("lbfree_ck", 10) /* EN_LBFREECK */ + SET_ONOFF("mpuwd_ck", 0) /* EN_WDTCK */ + SET_ONOFF("armxor_ck", 1) /* EN_XORPCK */ + SET_ONOFF("mpuper_ck", 2) /* EN_PERCK */ + SET_ONOFF("lcd_ck", 3) /* EN_LCDCK */ + SET_ONOFF("lb_ck", 4) /* EN_LBCK */ + SET_ONOFF("hsab_ck", 5) /* EN_HSABCK */ + SET_ONOFF("mpui_ck", 6) /* EN_APICK */ + SET_ONOFF("armtim_ck", 7) /* EN_TIMCK */ + SET_CANIDLE("dma_ck", 8) /* DMACK_REQ */ + SET_ONOFF("arm_gpio_ck", 9) /* EN_GPIOCK */ + SET_ONOFF("lbfree_ck", 10) /* EN_LBFREECK */ } static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s, @@ -1613,7 +1615,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s, { omap_clk clk; - if (diff & (3 << 4)) { /* TCLKOUT */ + if (diff & (3 << 4)) { /* TCLKOUT */ clk = omap_findclk(s, "tclk_out"); switch ((value >> 4) & 3) { case 1: @@ -1628,7 +1630,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s, omap_clk_onoff(clk, 0); } } - if (diff & (3 << 2)) { /* DCLKOUT */ + if (diff & (3 << 2)) { /* DCLKOUT */ clk = omap_findclk(s, "dclk_out"); switch ((value >> 2) & 3) { case 0: @@ -1645,7 +1647,7 @@ static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s, break; } } - if (diff & (3 << 0)) { /* ACLKOUT */ + if (diff & (3 << 0)) { /* ACLKOUT */ clk = omap_findclk(s, "aclk_out"); switch ((value >> 0) & 3) { case 1: @@ -1683,66 +1685,66 @@ static void omap_clkm_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x00: /* ARM_CKCTL */ + case 0x00: /* ARM_CKCTL */ diff = s->clkm.arm_ckctl ^ value; s->clkm.arm_ckctl = value & 0x7fff; omap_clkm_ckctl_update(s, diff, value); return; - case 0x04: /* ARM_IDLECT1 */ + case 0x04: /* ARM_IDLECT1 */ diff = s->clkm.arm_idlect1 ^ value; s->clkm.arm_idlect1 = value & 0x0fff; omap_clkm_idlect1_update(s, diff, value); return; - case 0x08: /* ARM_IDLECT2 */ + case 0x08: /* ARM_IDLECT2 */ diff = s->clkm.arm_idlect2 ^ value; s->clkm.arm_idlect2 = value & 0x07ff; omap_clkm_idlect2_update(s, diff, value); return; - case 0x0c: /* ARM_EWUPCT */ + case 0x0c: /* ARM_EWUPCT */ s->clkm.arm_ewupct = value & 0x003f; return; - case 0x10: /* ARM_RSTCT1 */ + case 0x10: /* ARM_RSTCT1 */ diff = s->clkm.arm_rstct1 ^ value; s->clkm.arm_rstct1 = value & 0x0007; if (value & 9) { qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); s->clkm.cold_start = 0xa; } - if (diff & ~value & 4) { /* DSP_RST */ + if (diff & ~value & 4) { /* DSP_RST */ omap_mpui_reset(s); omap_tipb_bridge_reset(s->private_tipb); omap_tipb_bridge_reset(s->public_tipb); } - if (diff & 2) { /* DSP_EN */ + if (diff & 2) { /* DSP_EN */ clk = omap_findclk(s, "dsp_ck"); omap_clk_canidle(clk, (~value >> 1) & 1); } return; - case 0x14: /* ARM_RSTCT2 */ + case 0x14: /* ARM_RSTCT2 */ s->clkm.arm_rstct2 = value & 0x0001; return; - case 0x18: /* ARM_SYSST */ + case 0x18: /* ARM_SYSST */ if ((s->clkm.clocking_scheme ^ (value >> 11)) & 7) { s->clkm.clocking_scheme = (value >> 11) & 7; - printf("%s: clocking scheme set to %s\n", __func__, + trace_omap1_pwl_clocking_scheme( clkschemename[s->clkm.clocking_scheme]); } s->clkm.cold_start &= value & 0x3f; return; - case 0x1c: /* ARM_CKOUT1 */ + case 0x1c: /* ARM_CKOUT1 */ diff = s->clkm.arm_ckout1 ^ value; s->clkm.arm_ckout1 = value & 0x003f; omap_clkm_ckout1_update(s, diff, value); return; - case 0x20: /* ARM_CKOUT2 */ + case 0x20: /* ARM_CKOUT2 */ default: OMAP_BAD_REG(addr); } @@ -1765,16 +1767,16 @@ static uint64_t omap_clkdsp_read(void *opaque, hwaddr addr, } switch (addr) { - case 0x04: /* DSP_IDLECT1 */ + case 0x04: /* DSP_IDLECT1 */ return s->clkm.dsp_idlect1; - case 0x08: /* DSP_IDLECT2 */ + case 0x08: /* DSP_IDLECT2 */ return s->clkm.dsp_idlect2; - case 0x14: /* DSP_RSTCT2 */ + case 0x14: /* DSP_RSTCT2 */ return s->clkm.dsp_rstct2; - case 0x18: /* DSP_SYSST */ + case 0x18: /* DSP_SYSST */ return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start | (cpu->halted << 6); /* Quite useless... */ } @@ -1788,7 +1790,7 @@ static inline void omap_clkdsp_idlect1_update(struct omap_mpu_state_s *s, { omap_clk clk; - SET_CANIDLE("dspxor_ck", 1); /* IDLXORP_DSP */ + SET_CANIDLE("dspxor_ck", 1); /* IDLXORP_DSP */ } static inline void omap_clkdsp_idlect2_update(struct omap_mpu_state_s *s, @@ -1796,7 +1798,7 @@ static inline void omap_clkdsp_idlect2_update(struct omap_mpu_state_s *s, { omap_clk clk; - SET_ONOFF("dspxor_ck", 1); /* EN_XORPCK */ + SET_ONOFF("dspxor_ck", 1); /* EN_XORPCK */ } static void omap_clkdsp_write(void *opaque, hwaddr addr, @@ -1811,23 +1813,23 @@ static void omap_clkdsp_write(void *opaque, hwaddr addr, } switch (addr) { - case 0x04: /* DSP_IDLECT1 */ + case 0x04: /* DSP_IDLECT1 */ diff = s->clkm.dsp_idlect1 ^ value; s->clkm.dsp_idlect1 = value & 0x01f7; omap_clkdsp_idlect1_update(s, diff, value); break; - case 0x08: /* DSP_IDLECT2 */ + case 0x08: /* DSP_IDLECT2 */ s->clkm.dsp_idlect2 = value & 0x0037; diff = s->clkm.dsp_idlect1 ^ value; omap_clkdsp_idlect2_update(s, diff, value); break; - case 0x14: /* DSP_RSTCT2 */ + case 0x14: /* DSP_RSTCT2 */ s->clkm.dsp_rstct2 = value & 0x0001; break; - case 0x18: /* DSP_SYSST */ + case 0x18: /* DSP_SYSST */ s->clkm.cold_start &= value & 0x3f; break; @@ -1926,8 +1928,8 @@ static void omap_mpuio_set(void *opaque, int line, int level) qemu_irq_raise(s->irq); /* TODO: wakeup */ } - if ((s->event & (1 << 0)) && /* SET_GPIO_EVENT_MODE */ - (s->event >> 1) == line) /* PIN_SELECT */ + if ((s->event & (1 << 0)) && /* SET_GPIO_EVENT_MODE */ + (s->event >> 1) == line) /* PIN_SELECT */ s->latch = s->inputs; } } @@ -1957,47 +1959,47 @@ static uint64_t omap_mpuio_read(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* INPUT_LATCH */ + case 0x00: /* INPUT_LATCH */ return s->inputs; - case 0x04: /* OUTPUT_REG */ + case 0x04: /* OUTPUT_REG */ return s->outputs; - case 0x08: /* IO_CNTL */ + case 0x08: /* IO_CNTL */ return s->dir; - case 0x10: /* KBR_LATCH */ + case 0x10: /* KBR_LATCH */ return s->row_latch; - case 0x14: /* KBC_REG */ + case 0x14: /* KBC_REG */ return s->cols; - case 0x18: /* GPIO_EVENT_MODE_REG */ + case 0x18: /* GPIO_EVENT_MODE_REG */ return s->event; - case 0x1c: /* GPIO_INT_EDGE_REG */ + case 0x1c: /* GPIO_INT_EDGE_REG */ return s->edge; - case 0x20: /* KBD_INT */ + case 0x20: /* KBD_INT */ return (~s->row_latch & 0x1f) && !s->kbd_mask; - case 0x24: /* GPIO_INT */ + case 0x24: /* GPIO_INT */ ret = s->ints; s->ints &= s->mask; if (ret) qemu_irq_lower(s->irq); return ret; - case 0x28: /* KBD_MASKIT */ + case 0x28: /* KBD_MASKIT */ return s->kbd_mask; - case 0x2c: /* GPIO_MASKIT */ + case 0x2c: /* GPIO_MASKIT */ return s->mask; - case 0x30: /* GPIO_DEBOUNCING_REG */ + case 0x30: /* GPIO_DEBOUNCING_REG */ return s->debounce; - case 0x34: /* GPIO_LATCH_REG */ + case 0x34: /* GPIO_LATCH_REG */ return s->latch; } @@ -2019,7 +2021,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x04: /* OUTPUT_REG */ + case 0x04: /* OUTPUT_REG */ diff = (s->outputs ^ value) & ~s->dir; s->outputs = value; while ((ln = ctz32(diff)) != 32) { @@ -2029,7 +2031,7 @@ static void omap_mpuio_write(void *opaque, hwaddr addr, } break; - case 0x08: /* IO_CNTL */ + case 0x08: /* IO_CNTL */ diff = s->outputs & (s->dir ^ value); s->dir = value; @@ -2041,37 +2043,37 @@ static void omap_mpuio_write(void *opaque, hwaddr addr, } break; - case 0x14: /* KBC_REG */ + case 0x14: /* KBC_REG */ s->cols = value; omap_mpuio_kbd_update(s); break; - case 0x18: /* GPIO_EVENT_MODE_REG */ + case 0x18: /* GPIO_EVENT_MODE_REG */ s->event = value & 0x1f; break; - case 0x1c: /* GPIO_INT_EDGE_REG */ + case 0x1c: /* GPIO_INT_EDGE_REG */ s->edge = value; break; - case 0x28: /* KBD_MASKIT */ + case 0x28: /* KBD_MASKIT */ s->kbd_mask = value & 1; omap_mpuio_kbd_update(s); break; - case 0x2c: /* GPIO_MASKIT */ + case 0x2c: /* GPIO_MASKIT */ s->mask = value; break; - case 0x30: /* GPIO_DEBOUNCING_REG */ + case 0x30: /* GPIO_DEBOUNCING_REG */ s->debounce = value & 0x1ff; break; - case 0x00: /* INPUT_LATCH */ - case 0x10: /* KBR_LATCH */ - case 0x20: /* KBD_INT */ - case 0x24: /* GPIO_INT */ - case 0x34: /* GPIO_LATCH_REG */ + case 0x00: /* INPUT_LATCH */ + case 0x10: /* KBR_LATCH */ + case 0x20: /* KBD_INT */ + case 0x24: /* GPIO_INT */ + case 0x34: /* GPIO_LATCH_REG */ OMAP_RO_REG(addr); return; @@ -2170,30 +2172,28 @@ struct omap_uwire_s { uint16_t rxbuf; uint16_t control; uint16_t setup[5]; - - uWireSlave *chip[4]; }; static void omap_uwire_transfer_start(struct omap_uwire_s *s) { - int chipselect = (s->control >> 10) & 3; /* INDEX */ - uWireSlave *slave = s->chip[chipselect]; + int chipselect = (s->control >> 10) & 3; /* INDEX */ - if ((s->control >> 5) & 0x1f) { /* NB_BITS_WR */ - if (s->control & (1 << 12)) /* CS_CMD */ - if (slave && slave->send) - slave->send(slave->opaque, - s->txbuf >> (16 - ((s->control >> 5) & 0x1f))); - s->control &= ~(1 << 14); /* CSRB */ + if ((s->control >> 5) & 0x1f) { /* NB_BITS_WR */ + if (s->control & (1 << 12)) { /* CS_CMD */ + qemu_log_mask(LOG_UNIMP, "uWireSlave TX CS:%d data:0x%04x\n", + chipselect, + s->txbuf >> (16 - ((s->control >> 5) & 0x1f))); + } + s->control &= ~(1 << 14); /* CSRB */ /* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or * a DRQ. When is the level IRQ supposed to be reset? */ } - if ((s->control >> 0) & 0x1f) { /* NB_BITS_RD */ - if (s->control & (1 << 12)) /* CS_CMD */ - if (slave && slave->receive) - s->rxbuf = slave->receive(slave->opaque); - s->control |= 1 << 15; /* RDRB */ + if ((s->control >> 0) & 0x1f) { /* NB_BITS_RD */ + if (s->control & (1 << 12)) { /* CS_CMD */ + qemu_log_mask(LOG_UNIMP, "uWireSlave RX CS:%d\n", chipselect); + } + s->control |= 1 << 15; /* RDRB */ /* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or * a DRQ. When is the level IRQ supposed to be reset? */ } @@ -2209,22 +2209,22 @@ static uint64_t omap_uwire_read(void *opaque, hwaddr addr, unsigned size) } switch (offset) { - case 0x00: /* RDR */ - s->control &= ~(1 << 15); /* RDRB */ + case 0x00: /* RDR */ + s->control &= ~(1 << 15); /* RDRB */ return s->rxbuf; - case 0x04: /* CSR */ + case 0x04: /* CSR */ return s->control; - case 0x08: /* SR1 */ + case 0x08: /* SR1 */ return s->setup[0]; - case 0x0c: /* SR2 */ + case 0x0c: /* SR2 */ return s->setup[1]; - case 0x10: /* SR3 */ + case 0x10: /* SR3 */ return s->setup[2]; - case 0x14: /* SR4 */ + case 0x14: /* SR4 */ return s->setup[3]; - case 0x18: /* SR5 */ + case 0x18: /* SR5 */ return s->setup[4]; } @@ -2244,39 +2244,39 @@ static void omap_uwire_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* TDR */ - s->txbuf = value; /* TD */ - if ((s->setup[4] & (1 << 2)) && /* AUTO_TX_EN */ - ((s->setup[4] & (1 << 3)) || /* CS_TOGGLE_TX_EN */ - (s->control & (1 << 12)))) { /* CS_CMD */ - s->control |= 1 << 14; /* CSRB */ + case 0x00: /* TDR */ + s->txbuf = value; /* TD */ + if ((s->setup[4] & (1 << 2)) && /* AUTO_TX_EN */ + ((s->setup[4] & (1 << 3)) || /* CS_TOGGLE_TX_EN */ + (s->control & (1 << 12)))) { /* CS_CMD */ + s->control |= 1 << 14; /* CSRB */ omap_uwire_transfer_start(s); } break; - case 0x04: /* CSR */ + case 0x04: /* CSR */ s->control = value & 0x1fff; - if (value & (1 << 13)) /* START */ + if (value & (1 << 13)) /* START */ omap_uwire_transfer_start(s); break; - case 0x08: /* SR1 */ + case 0x08: /* SR1 */ s->setup[0] = value & 0x003f; break; - case 0x0c: /* SR2 */ + case 0x0c: /* SR2 */ s->setup[1] = value & 0x0fc0; break; - case 0x10: /* SR3 */ + case 0x10: /* SR3 */ s->setup[2] = value & 0x0003; break; - case 0x14: /* SR4 */ + case 0x14: /* SR4 */ s->setup[3] = value & 0x0001; break; - case 0x18: /* SR5 */ + case 0x18: /* SR5 */ s->setup[4] = value & 0x000f; break; @@ -2321,17 +2321,6 @@ static struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory, return s; } -void omap_uwire_attach(struct omap_uwire_s *s, - uWireSlave *slave, int chipselect) -{ - if (chipselect < 0 || chipselect > 3) { - error_report("%s: Bad chipselect %i", __func__, chipselect); - exit(-1); - } - - s->chip[chipselect] = slave; -} - /* Pseudonoise Pulse-Width Light Modulator */ struct omap_pwl_s { MemoryRegion iomem; @@ -2347,7 +2336,7 @@ static void omap_pwl_update(struct omap_pwl_s *s) if (output != s->output) { s->output = output; - printf("%s: Backlight now at %i/256\n", __func__, output); + trace_omap1_pwl_backlight(output); } } @@ -2361,9 +2350,9 @@ static uint64_t omap_pwl_read(void *opaque, hwaddr addr, unsigned size) } switch (offset) { - case 0x00: /* PWL_LEVEL */ + case 0x00: /* PWL_LEVEL */ return s->level; - case 0x04: /* PWL_CTRL */ + case 0x04: /* PWL_CTRL */ return s->enable; } OMAP_BAD_REG(addr); @@ -2382,11 +2371,11 @@ static void omap_pwl_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* PWL_LEVEL */ + case 0x00: /* PWL_LEVEL */ s->level = value; omap_pwl_update(s); break; - case 0x04: /* PWL_CTRL */ + case 0x04: /* PWL_CTRL */ s->enable = value & 1; omap_pwl_update(s); break; @@ -2454,11 +2443,11 @@ static uint64_t omap_pwt_read(void *opaque, hwaddr addr, unsigned size) } switch (offset) { - case 0x00: /* FRC */ + case 0x00: /* FRC */ return s->frc; - case 0x04: /* VCR */ + case 0x04: /* VCR */ return s->vrc; - case 0x08: /* GCR */ + case 0x08: /* GCR */ return s->gcr; } OMAP_BAD_REG(addr); @@ -2477,13 +2466,13 @@ static void omap_pwt_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* FRC */ + case 0x00: /* FRC */ s->frc = value & 0x3f; break; - case 0x04: /* VRC */ + case 0x04: /* VRC */ if ((value ^ s->vrc) & 1) { - if (value & 1) - printf("%s: %iHz buzz on\n", __func__, (int) + if (value & 1) { + trace_omap1_pwt_buzz( /* 1.5 MHz from a 12-MHz or 13-MHz PWT_CLK */ ((omap_clk_getrate(s->clk) >> 3) / /* Pre-multiplexer divider */ @@ -2499,12 +2488,13 @@ static void omap_pwt_write(void *opaque, hwaddr addr, /* 80/127 divider */ ((value & (1 << 5)) ? 80 : 127) / (107 * 55 * 63 * 127))); - else - printf("%s: silence!\n", __func__); + } else { + trace_omap1_pwt_silence(); + } } s->vrc = value & 0x7f; break; - case 0x08: /* GCR */ + case 0x08: /* GCR */ s->gcr = value & 3; break; default: @@ -2571,8 +2561,9 @@ static void omap_rtc_interrupts_update(struct omap_rtc_s *s) static void omap_rtc_alarm_update(struct omap_rtc_s *s) { s->alarm_ti = mktimegm(&s->alarm_tm); - if (s->alarm_ti == -1) - printf("%s: conversion failed\n", __func__); + if (s->alarm_ti == -1) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: conversion failed\n", __func__); + } } static uint64_t omap_rtc_read(void *opaque, hwaddr addr, unsigned size) @@ -2586,69 +2577,69 @@ static uint64_t omap_rtc_read(void *opaque, hwaddr addr, unsigned size) } switch (offset) { - case 0x00: /* SECONDS_REG */ + case 0x00: /* SECONDS_REG */ return to_bcd(s->current_tm.tm_sec); - case 0x04: /* MINUTES_REG */ + case 0x04: /* MINUTES_REG */ return to_bcd(s->current_tm.tm_min); - case 0x08: /* HOURS_REG */ + case 0x08: /* HOURS_REG */ if (s->pm_am) return ((s->current_tm.tm_hour > 11) << 7) | to_bcd(((s->current_tm.tm_hour - 1) % 12) + 1); else return to_bcd(s->current_tm.tm_hour); - case 0x0c: /* DAYS_REG */ + case 0x0c: /* DAYS_REG */ return to_bcd(s->current_tm.tm_mday); - case 0x10: /* MONTHS_REG */ + case 0x10: /* MONTHS_REG */ return to_bcd(s->current_tm.tm_mon + 1); - case 0x14: /* YEARS_REG */ + case 0x14: /* YEARS_REG */ return to_bcd(s->current_tm.tm_year % 100); - case 0x18: /* WEEK_REG */ + case 0x18: /* WEEK_REG */ return s->current_tm.tm_wday; - case 0x20: /* ALARM_SECONDS_REG */ + case 0x20: /* ALARM_SECONDS_REG */ return to_bcd(s->alarm_tm.tm_sec); - case 0x24: /* ALARM_MINUTES_REG */ + case 0x24: /* ALARM_MINUTES_REG */ return to_bcd(s->alarm_tm.tm_min); - case 0x28: /* ALARM_HOURS_REG */ + case 0x28: /* ALARM_HOURS_REG */ if (s->pm_am) return ((s->alarm_tm.tm_hour > 11) << 7) | to_bcd(((s->alarm_tm.tm_hour - 1) % 12) + 1); else return to_bcd(s->alarm_tm.tm_hour); - case 0x2c: /* ALARM_DAYS_REG */ + case 0x2c: /* ALARM_DAYS_REG */ return to_bcd(s->alarm_tm.tm_mday); - case 0x30: /* ALARM_MONTHS_REG */ + case 0x30: /* ALARM_MONTHS_REG */ return to_bcd(s->alarm_tm.tm_mon + 1); - case 0x34: /* ALARM_YEARS_REG */ + case 0x34: /* ALARM_YEARS_REG */ return to_bcd(s->alarm_tm.tm_year % 100); - case 0x40: /* RTC_CTRL_REG */ + case 0x40: /* RTC_CTRL_REG */ return (s->pm_am << 3) | (s->auto_comp << 2) | (s->round << 1) | s->running; - case 0x44: /* RTC_STATUS_REG */ + case 0x44: /* RTC_STATUS_REG */ i = s->status; s->status &= ~0x3d; return i; - case 0x48: /* RTC_INTERRUPTS_REG */ + case 0x48: /* RTC_INTERRUPTS_REG */ return s->interrupts; - case 0x4c: /* RTC_COMP_LSB_REG */ + case 0x4c: /* RTC_COMP_LSB_REG */ return ((uint16_t) s->comp_reg) & 0xff; - case 0x50: /* RTC_COMP_MSB_REG */ + case 0x50: /* RTC_COMP_MSB_REG */ return ((uint16_t) s->comp_reg) >> 8; } @@ -2670,26 +2661,17 @@ static void omap_rtc_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* SECONDS_REG */ -#ifdef ALMDEBUG - printf("RTC SEC_REG <-- %02x\n", value); -#endif + case 0x00: /* SECONDS_REG */ s->ti -= s->current_tm.tm_sec; s->ti += from_bcd(value); return; - case 0x04: /* MINUTES_REG */ -#ifdef ALMDEBUG - printf("RTC MIN_REG <-- %02x\n", value); -#endif + case 0x04: /* MINUTES_REG */ s->ti -= s->current_tm.tm_min * 60; s->ti += from_bcd(value) * 60; return; - case 0x08: /* HOURS_REG */ -#ifdef ALMDEBUG - printf("RTC HRS_REG <-- %02x\n", value); -#endif + case 0x08: /* HOURS_REG */ s->ti -= s->current_tm.tm_hour * 3600; if (s->pm_am) { s->ti += (from_bcd(value & 0x3f) & 12) * 3600; @@ -2698,18 +2680,12 @@ static void omap_rtc_write(void *opaque, hwaddr addr, s->ti += from_bcd(value & 0x3f) * 3600; return; - case 0x0c: /* DAYS_REG */ -#ifdef ALMDEBUG - printf("RTC DAY_REG <-- %02x\n", value); -#endif + case 0x0c: /* DAYS_REG */ s->ti -= s->current_tm.tm_mday * 86400; s->ti += from_bcd(value) * 86400; return; - case 0x10: /* MONTHS_REG */ -#ifdef ALMDEBUG - printf("RTC MTH_REG <-- %02x\n", value); -#endif + case 0x10: /* MONTHS_REG */ memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); new_tm.tm_mon = from_bcd(value); ti[0] = mktimegm(&s->current_tm); @@ -2725,10 +2701,7 @@ static void omap_rtc_write(void *opaque, hwaddr addr, } return; - case 0x14: /* YEARS_REG */ -#ifdef ALMDEBUG - printf("RTC YRS_REG <-- %02x\n", value); -#endif + case 0x14: /* YEARS_REG */ memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); new_tm.tm_year += from_bcd(value) - (new_tm.tm_year % 100); ti[0] = mktimegm(&s->current_tm); @@ -2744,29 +2717,20 @@ static void omap_rtc_write(void *opaque, hwaddr addr, } return; - case 0x18: /* WEEK_REG */ - return; /* Ignored */ + case 0x18: /* WEEK_REG */ + return; /* Ignored */ - case 0x20: /* ALARM_SECONDS_REG */ -#ifdef ALMDEBUG - printf("ALM SEC_REG <-- %02x\n", value); -#endif + case 0x20: /* ALARM_SECONDS_REG */ s->alarm_tm.tm_sec = from_bcd(value); omap_rtc_alarm_update(s); return; - case 0x24: /* ALARM_MINUTES_REG */ -#ifdef ALMDEBUG - printf("ALM MIN_REG <-- %02x\n", value); -#endif + case 0x24: /* ALARM_MINUTES_REG */ s->alarm_tm.tm_min = from_bcd(value); omap_rtc_alarm_update(s); return; - case 0x28: /* ALARM_HOURS_REG */ -#ifdef ALMDEBUG - printf("ALM HRS_REG <-- %02x\n", value); -#endif + case 0x28: /* ALARM_HOURS_REG */ if (s->pm_am) s->alarm_tm.tm_hour = ((from_bcd(value & 0x3f)) % 12) + @@ -2776,34 +2740,22 @@ static void omap_rtc_write(void *opaque, hwaddr addr, omap_rtc_alarm_update(s); return; - case 0x2c: /* ALARM_DAYS_REG */ -#ifdef ALMDEBUG - printf("ALM DAY_REG <-- %02x\n", value); -#endif + case 0x2c: /* ALARM_DAYS_REG */ s->alarm_tm.tm_mday = from_bcd(value); omap_rtc_alarm_update(s); return; - case 0x30: /* ALARM_MONTHS_REG */ -#ifdef ALMDEBUG - printf("ALM MON_REG <-- %02x\n", value); -#endif + case 0x30: /* ALARM_MONTHS_REG */ s->alarm_tm.tm_mon = from_bcd(value); omap_rtc_alarm_update(s); return; - case 0x34: /* ALARM_YEARS_REG */ -#ifdef ALMDEBUG - printf("ALM YRS_REG <-- %02x\n", value); -#endif + case 0x34: /* ALARM_YEARS_REG */ s->alarm_tm.tm_year = from_bcd(value); omap_rtc_alarm_update(s); return; - case 0x40: /* RTC_CTRL_REG */ -#ifdef ALMDEBUG - printf("RTC CONTROL <-- %02x\n", value); -#endif + case 0x40: /* RTC_CTRL_REG */ s->pm_am = (value >> 3) & 1; s->auto_comp = (value >> 2) & 1; s->round = (value >> 1) & 1; @@ -2812,33 +2764,21 @@ static void omap_rtc_write(void *opaque, hwaddr addr, s->status |= s->running << 1; return; - case 0x44: /* RTC_STATUS_REG */ -#ifdef ALMDEBUG - printf("RTC STATUSL <-- %02x\n", value); -#endif + case 0x44: /* RTC_STATUS_REG */ s->status &= ~((value & 0xc0) ^ 0x80); omap_rtc_interrupts_update(s); return; - case 0x48: /* RTC_INTERRUPTS_REG */ -#ifdef ALMDEBUG - printf("RTC INTRS <-- %02x\n", value); -#endif + case 0x48: /* RTC_INTERRUPTS_REG */ s->interrupts = value; return; - case 0x4c: /* RTC_COMP_LSB_REG */ -#ifdef ALMDEBUG - printf("RTC COMPLSB <-- %02x\n", value); -#endif + case 0x4c: /* RTC_COMP_LSB_REG */ s->comp_reg &= 0xff00; s->comp_reg |= 0x00ff & value; return; - case 0x50: /* RTC_COMP_MSB_REG */ -#ifdef ALMDEBUG - printf("RTC COMPMSB <-- %02x\n", value); -#endif + case 0x50: /* RTC_COMP_MSB_REG */ s->comp_reg &= 0x00ff; s->comp_reg |= 0xff00 & (value << 8); return; @@ -2989,12 +2929,12 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s) { int irq; - switch ((s->spcr[0] >> 4) & 3) { /* RINTM */ + switch ((s->spcr[0] >> 4) & 3) { /* RINTM */ case 0: - irq = (s->spcr[0] >> 1) & 1; /* RRDY */ + irq = (s->spcr[0] >> 1) & 1; /* RRDY */ break; case 3: - irq = (s->spcr[0] >> 3) & 1; /* RSYNCERR */ + irq = (s->spcr[0] >> 3) & 1; /* RSYNCERR */ break; default: irq = 0; @@ -3004,12 +2944,12 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s) if (irq) qemu_irq_pulse(s->rxirq); - switch ((s->spcr[1] >> 4) & 3) { /* XINTM */ + switch ((s->spcr[1] >> 4) & 3) { /* XINTM */ case 0: - irq = (s->spcr[1] >> 1) & 1; /* XRDY */ + irq = (s->spcr[1] >> 1) & 1; /* XRDY */ break; case 3: - irq = (s->spcr[1] >> 3) & 1; /* XSYNCERR */ + irq = (s->spcr[1] >> 3) & 1; /* XSYNCERR */ break; default: irq = 0; @@ -3022,9 +2962,9 @@ static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s) static void omap_mcbsp_rx_newdata(struct omap_mcbsp_s *s) { - if ((s->spcr[0] >> 1) & 1) /* RRDY */ - s->spcr[0] |= 1 << 2; /* RFULL */ - s->spcr[0] |= 1 << 1; /* RRDY */ + if ((s->spcr[0] >> 1) & 1) /* RRDY */ + s->spcr[0] |= 1 << 2; /* RFULL */ + s->spcr[0] |= 1 << 1; /* RRDY */ qemu_irq_raise(s->rxdrq); omap_mcbsp_intr_update(s); } @@ -3036,8 +2976,9 @@ static void omap_mcbsp_source_tick(void *opaque) if (!s->rx_rate) return; - if (s->rx_req) - printf("%s: Rx FIFO overrun\n", __func__); + if (s->rx_req) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Rx FIFO overrun\n", __func__); + } s->rx_req = s->rx_rate << bps[(s->rcr[0] >> 5) & 7]; @@ -3063,14 +3004,14 @@ static void omap_mcbsp_rx_stop(struct omap_mcbsp_s *s) static void omap_mcbsp_rx_done(struct omap_mcbsp_s *s) { - s->spcr[0] &= ~(1 << 1); /* RRDY */ + s->spcr[0] &= ~(1 << 1); /* RRDY */ qemu_irq_lower(s->rxdrq); omap_mcbsp_intr_update(s); } static void omap_mcbsp_tx_newdata(struct omap_mcbsp_s *s) { - s->spcr[1] |= 1 << 1; /* XRDY */ + s->spcr[1] |= 1 << 1; /* XRDY */ qemu_irq_raise(s->txdrq); omap_mcbsp_intr_update(s); } @@ -3082,8 +3023,9 @@ static void omap_mcbsp_sink_tick(void *opaque) if (!s->tx_rate) return; - if (s->tx_req) - printf("%s: Tx FIFO underrun\n", __func__); + if (s->tx_req) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO underrun\n", __func__); + } s->tx_req = s->tx_rate << bps[(s->xcr[0] >> 5) & 7]; @@ -3104,7 +3046,7 @@ static void omap_mcbsp_tx_start(struct omap_mcbsp_s *s) static void omap_mcbsp_tx_done(struct omap_mcbsp_s *s) { - s->spcr[1] &= ~(1 << 1); /* XRDY */ + s->spcr[1] &= ~(1 << 1); /* XRDY */ qemu_irq_lower(s->txdrq); omap_mcbsp_intr_update(s); if (s->codec && s->codec->cts) @@ -3122,27 +3064,27 @@ static void omap_mcbsp_req_update(struct omap_mcbsp_s *s) { int prev_rx_rate, prev_tx_rate; int rx_rate = 0, tx_rate = 0; - int cpu_rate = 1500000; /* XXX */ + int cpu_rate = 1500000; /* XXX */ /* TODO: check CLKSTP bit */ - if (s->spcr[1] & (1 << 6)) { /* GRST */ - if (s->spcr[0] & (1 << 0)) { /* RRST */ - if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ - (s->pcr & (1 << 8))) { /* CLKRM */ - if (~s->pcr & (1 << 7)) /* SCLKME */ + if (s->spcr[1] & (1 << 6)) { /* GRST */ + if (s->spcr[0] & (1 << 0)) { /* RRST */ + if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ + (s->pcr & (1 << 8))) { /* CLKRM */ + if (~s->pcr & (1 << 7)) /* SCLKME */ rx_rate = cpu_rate / - ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ + ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ } else if (s->codec) rx_rate = s->codec->rx_rate; } - if (s->spcr[1] & (1 << 0)) { /* XRST */ - if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ - (s->pcr & (1 << 9))) { /* CLKXM */ - if (~s->pcr & (1 << 7)) /* SCLKME */ + if (s->spcr[1] & (1 << 0)) { /* XRST */ + if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ + (s->pcr & (1 << 9))) { /* CLKXM */ + if (~s->pcr & (1 << 7)) /* SCLKME */ tx_rate = cpu_rate / - ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ + ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ } else if (s->codec) tx_rate = s->codec->tx_rate; @@ -3179,13 +3121,13 @@ static uint64_t omap_mcbsp_read(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* DRR2 */ - if (((s->rcr[0] >> 5) & 7) < 3) /* RWDLEN1 */ + case 0x00: /* DRR2 */ + if (((s->rcr[0] >> 5) & 7) < 3) /* RWDLEN1 */ return 0x0000; /* Fall through. */ - case 0x02: /* DRR1 */ + case 0x02: /* DRR1 */ if (s->rx_req < 2) { - printf("%s: Rx FIFO underrun\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Rx FIFO underrun\n", __func__); omap_mcbsp_rx_done(s); } else { s->tx_req -= 2; @@ -3201,63 +3143,63 @@ static uint64_t omap_mcbsp_read(void *opaque, hwaddr addr, } return 0x0000; - case 0x04: /* DXR2 */ - case 0x06: /* DXR1 */ + case 0x04: /* DXR2 */ + case 0x06: /* DXR1 */ return 0x0000; - case 0x08: /* SPCR2 */ + case 0x08: /* SPCR2 */ return s->spcr[1]; - case 0x0a: /* SPCR1 */ + case 0x0a: /* SPCR1 */ return s->spcr[0]; - case 0x0c: /* RCR2 */ + case 0x0c: /* RCR2 */ return s->rcr[1]; - case 0x0e: /* RCR1 */ + case 0x0e: /* RCR1 */ return s->rcr[0]; - case 0x10: /* XCR2 */ + case 0x10: /* XCR2 */ return s->xcr[1]; - case 0x12: /* XCR1 */ + case 0x12: /* XCR1 */ return s->xcr[0]; - case 0x14: /* SRGR2 */ + case 0x14: /* SRGR2 */ return s->srgr[1]; - case 0x16: /* SRGR1 */ + case 0x16: /* SRGR1 */ return s->srgr[0]; - case 0x18: /* MCR2 */ + case 0x18: /* MCR2 */ return s->mcr[1]; - case 0x1a: /* MCR1 */ + case 0x1a: /* MCR1 */ return s->mcr[0]; - case 0x1c: /* RCERA */ + case 0x1c: /* RCERA */ return s->rcer[0]; - case 0x1e: /* RCERB */ + case 0x1e: /* RCERB */ return s->rcer[1]; - case 0x20: /* XCERA */ + case 0x20: /* XCERA */ return s->xcer[0]; - case 0x22: /* XCERB */ + case 0x22: /* XCERB */ return s->xcer[1]; - case 0x24: /* PCR0 */ + case 0x24: /* PCR0 */ return s->pcr; - case 0x26: /* RCERC */ + case 0x26: /* RCERC */ return s->rcer[2]; - case 0x28: /* RCERD */ + case 0x28: /* RCERD */ return s->rcer[3]; - case 0x2a: /* XCERC */ + case 0x2a: /* XCERC */ return s->xcer[2]; - case 0x2c: /* XCERD */ + case 0x2c: /* XCERD */ return s->xcer[3]; - case 0x2e: /* RCERE */ + case 0x2e: /* RCERE */ return s->rcer[4]; - case 0x30: /* RCERF */ + case 0x30: /* RCERF */ return s->rcer[5]; - case 0x32: /* XCERE */ + case 0x32: /* XCERE */ return s->xcer[4]; - case 0x34: /* XCERF */ + case 0x34: /* XCERF */ return s->xcer[5]; - case 0x36: /* RCERG */ + case 0x36: /* RCERG */ return s->rcer[6]; - case 0x38: /* RCERH */ + case 0x38: /* RCERH */ return s->rcer[7]; - case 0x3a: /* XCERG */ + case 0x3a: /* XCERG */ return s->xcer[6]; - case 0x3c: /* XCERH */ + case 0x3c: /* XCERH */ return s->xcer[7]; } @@ -3272,16 +3214,16 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr, int offset = addr & OMAP_MPUI_REG_MASK; switch (offset) { - case 0x00: /* DRR2 */ - case 0x02: /* DRR1 */ + case 0x00: /* DRR2 */ + case 0x02: /* DRR1 */ OMAP_RO_REG(addr); return; - case 0x04: /* DXR2 */ - if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ + case 0x04: /* DXR2 */ + if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ return; /* Fall through. */ - case 0x06: /* DXR1 */ + case 0x06: /* DXR1 */ if (s->tx_req > 1) { s->tx_req -= 2; if (s->codec && s->codec->cts) { @@ -3290,24 +3232,28 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr, } if (s->tx_req < 2) omap_mcbsp_tx_done(s); - } else - printf("%s: Tx FIFO overrun\n", __func__); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO overrun\n", __func__); + } return; - case 0x08: /* SPCR2 */ + case 0x08: /* SPCR2 */ s->spcr[1] &= 0x0002; s->spcr[1] |= 0x03f9 & value; - s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */ - if (~value & 1) /* XRST */ + s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */ + if (~value & 1) /* XRST */ s->spcr[1] &= ~6; omap_mcbsp_req_update(s); return; - case 0x0a: /* SPCR1 */ + case 0x0a: /* SPCR1 */ s->spcr[0] &= 0x0006; s->spcr[0] |= 0xf8f9 & value; - if (value & (1 << 15)) /* DLB */ - printf("%s: Digital Loopback mode enable attempt\n", __func__); - if (~value & 1) { /* RRST */ + if (value & (1 << 15)) { /* DLB */ + qemu_log_mask(LOG_UNIMP, + "%s: Digital Loopback mode enable attempt\n", + __func__); + } + if (~value & 1) { /* RRST */ s->spcr[0] &= ~6; s->rx_req = 0; omap_mcbsp_rx_done(s); @@ -3315,85 +3261,91 @@ static void omap_mcbsp_writeh(void *opaque, hwaddr addr, omap_mcbsp_req_update(s); return; - case 0x0c: /* RCR2 */ + case 0x0c: /* RCR2 */ s->rcr[1] = value & 0xffff; return; - case 0x0e: /* RCR1 */ + case 0x0e: /* RCR1 */ s->rcr[0] = value & 0x7fe0; return; - case 0x10: /* XCR2 */ + case 0x10: /* XCR2 */ s->xcr[1] = value & 0xffff; return; - case 0x12: /* XCR1 */ + case 0x12: /* XCR1 */ s->xcr[0] = value & 0x7fe0; return; - case 0x14: /* SRGR2 */ + case 0x14: /* SRGR2 */ s->srgr[1] = value & 0xffff; omap_mcbsp_req_update(s); return; - case 0x16: /* SRGR1 */ + case 0x16: /* SRGR1 */ s->srgr[0] = value & 0xffff; omap_mcbsp_req_update(s); return; - case 0x18: /* MCR2 */ + case 0x18: /* MCR2 */ s->mcr[1] = value & 0x03e3; - if (value & 3) /* XMCM */ - printf("%s: Tx channel selection mode enable attempt\n", __func__); + if (value & 3) { /* XMCM */ + qemu_log_mask(LOG_UNIMP, + "%s: Tx channel selection mode enable attempt\n", + __func__); + } return; - case 0x1a: /* MCR1 */ + case 0x1a: /* MCR1 */ s->mcr[0] = value & 0x03e1; - if (value & 1) /* RMCM */ - printf("%s: Rx channel selection mode enable attempt\n", __func__); + if (value & 1) { /* RMCM */ + qemu_log_mask(LOG_UNIMP, + "%s: Rx channel selection mode enable attempt\n", + __func__); + } return; - case 0x1c: /* RCERA */ + case 0x1c: /* RCERA */ s->rcer[0] = value & 0xffff; return; - case 0x1e: /* RCERB */ + case 0x1e: /* RCERB */ s->rcer[1] = value & 0xffff; return; - case 0x20: /* XCERA */ + case 0x20: /* XCERA */ s->xcer[0] = value & 0xffff; return; - case 0x22: /* XCERB */ + case 0x22: /* XCERB */ s->xcer[1] = value & 0xffff; return; - case 0x24: /* PCR0 */ + case 0x24: /* PCR0 */ s->pcr = value & 0x7faf; return; - case 0x26: /* RCERC */ + case 0x26: /* RCERC */ s->rcer[2] = value & 0xffff; return; - case 0x28: /* RCERD */ + case 0x28: /* RCERD */ s->rcer[3] = value & 0xffff; return; - case 0x2a: /* XCERC */ + case 0x2a: /* XCERC */ s->xcer[2] = value & 0xffff; return; - case 0x2c: /* XCERD */ + case 0x2c: /* XCERD */ s->xcer[3] = value & 0xffff; return; - case 0x2e: /* RCERE */ + case 0x2e: /* RCERE */ s->rcer[4] = value & 0xffff; return; - case 0x30: /* RCERF */ + case 0x30: /* RCERF */ s->rcer[5] = value & 0xffff; return; - case 0x32: /* XCERE */ + case 0x32: /* XCERE */ s->xcer[4] = value & 0xffff; return; - case 0x34: /* XCERF */ + case 0x34: /* XCERF */ s->xcer[5] = value & 0xffff; return; - case 0x36: /* RCERG */ + case 0x36: /* RCERG */ s->rcer[6] = value & 0xffff; return; - case 0x38: /* RCERH */ + case 0x38: /* RCERH */ s->rcer[7] = value & 0xffff; return; - case 0x3a: /* XCERG */ + case 0x3a: /* XCERG */ s->xcer[6] = value & 0xffff; return; - case 0x3c: /* XCERH */ + case 0x3c: /* XCERH */ s->xcer[7] = value & 0xffff; return; } @@ -3407,8 +3359,8 @@ static void omap_mcbsp_writew(void *opaque, hwaddr addr, struct omap_mcbsp_s *s = opaque; int offset = addr & OMAP_MPUI_REG_MASK; - if (offset == 0x04) { /* DXR */ - if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ + if (offset == 0x04) { /* DXR */ + if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ return; if (s->tx_req > 3) { s->tx_req -= 4; @@ -3424,8 +3376,9 @@ static void omap_mcbsp_writew(void *opaque, hwaddr addr, } if (s->tx_req < 4) omap_mcbsp_tx_done(s); - } else - printf("%s: Tx FIFO overrun\n", __func__); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Tx FIFO overrun\n", __func__); + } return; } @@ -3543,7 +3496,7 @@ static void omap_lpg_tick(void *opaque) timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->on); s->cycle = !s->cycle; - printf("%s: LED is %s\n", __func__, s->cycle ? "on" : "off"); + trace_omap1_lpg_led(s->cycle ? "on" : "off"); } static void omap_lpg_update(struct omap_lpg_s *s) @@ -3551,23 +3504,23 @@ static void omap_lpg_update(struct omap_lpg_s *s) int64_t on, period = 1, ticks = 1000; static const int per[8] = { 1, 2, 4, 8, 12, 16, 20, 24 }; - if (~s->control & (1 << 6)) /* LPGRES */ + if (~s->control & (1 << 6)) /* LPGRES */ on = 0; - else if (s->control & (1 << 7)) /* PERM_ON */ + else if (s->control & (1 << 7)) /* PERM_ON */ on = period; else { - period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */ + period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */ 256 / 32); on = (s->clk && s->power) ? muldiv64(ticks, - per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */ + per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */ } timer_del(s->tm); - if (on == period && s->on < s->period) - printf("%s: LED is on\n", __func__); - else if (on == 0 && s->on) - printf("%s: LED is off\n", __func__); - else if (on && (on != s->on || period != s->period)) { + if (on == period && s->on < s->period) { + trace_omap1_lpg_led("on"); + } else if (on == 0 && s->on) { + trace_omap1_lpg_led("off"); + } else if (on && (on != s->on || period != s->period)) { s->cycle = 0; s->on = on; s->period = period; @@ -3597,10 +3550,10 @@ static uint64_t omap_lpg_read(void *opaque, hwaddr addr, unsigned size) } switch (offset) { - case 0x00: /* LCR */ + case 0x00: /* LCR */ return s->control; - case 0x04: /* PMR */ + case 0x04: /* PMR */ return s->power; } @@ -3620,14 +3573,14 @@ static void omap_lpg_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* LCR */ - if (~value & (1 << 6)) /* LPGRES */ + case 0x00: /* LCR */ + if (~value & (1 << 6)) /* LPGRES */ omap_lpg_reset(s); s->control = value & 0xff; omap_lpg_update(s); return; - case 0x04: /* PMR */ + case 0x04: /* PMR */ s->power = value & 0x01; omap_lpg_update(s); return; @@ -3677,7 +3630,7 @@ static uint64_t omap_mpui_io_read(void *opaque, hwaddr addr, return omap_badwidth_read16(opaque, addr); } - if (addr == OMAP_MPUI_BASE) /* CMR */ + if (addr == OMAP_MPUI_BASE) /* CMR */ return 0xfe4d; OMAP_BAD_REG(addr); @@ -3729,7 +3682,6 @@ static void omap1_mpu_reset(void *opaque) omap_uart_reset(mpu->uart[0]); omap_uart_reset(mpu->uart[1]); omap_uart_reset(mpu->uart[2]); - omap_mmc_reset(mpu->mmc); omap_mpuio_reset(mpu->mpuio); omap_uwire_reset(mpu->microwire); omap_pwl_reset(mpu->pwl); @@ -3751,25 +3703,25 @@ static const struct omap_map_s { const char *name; } omap15xx_dsp_mm[] = { /* Strobe 0 */ - { 0xe1010000, 0xfffb0000, 0x800, "UART1 BT" }, /* CS0 */ - { 0xe1010800, 0xfffb0800, 0x800, "UART2 COM" }, /* CS1 */ - { 0xe1011800, 0xfffb1800, 0x800, "McBSP1 audio" }, /* CS3 */ - { 0xe1012000, 0xfffb2000, 0x800, "MCSI2 communication" }, /* CS4 */ - { 0xe1012800, 0xfffb2800, 0x800, "MCSI1 BT u-Law" }, /* CS5 */ - { 0xe1013000, 0xfffb3000, 0x800, "uWire" }, /* CS6 */ - { 0xe1013800, 0xfffb3800, 0x800, "I^2C" }, /* CS7 */ - { 0xe1014000, 0xfffb4000, 0x800, "USB W2FC" }, /* CS8 */ - { 0xe1014800, 0xfffb4800, 0x800, "RTC" }, /* CS9 */ - { 0xe1015000, 0xfffb5000, 0x800, "MPUIO" }, /* CS10 */ - { 0xe1015800, 0xfffb5800, 0x800, "PWL" }, /* CS11 */ - { 0xe1016000, 0xfffb6000, 0x800, "PWT" }, /* CS12 */ - { 0xe1017000, 0xfffb7000, 0x800, "McBSP3" }, /* CS14 */ - { 0xe1017800, 0xfffb7800, 0x800, "MMC" }, /* CS15 */ - { 0xe1019000, 0xfffb9000, 0x800, "32-kHz timer" }, /* CS18 */ - { 0xe1019800, 0xfffb9800, 0x800, "UART3" }, /* CS19 */ - { 0xe101c800, 0xfffbc800, 0x800, "TIPB switches" }, /* CS25 */ + { 0xe1010000, 0xfffb0000, 0x800, "UART1 BT" }, /* CS0 */ + { 0xe1010800, 0xfffb0800, 0x800, "UART2 COM" }, /* CS1 */ + { 0xe1011800, 0xfffb1800, 0x800, "McBSP1 audio" }, /* CS3 */ + { 0xe1012000, 0xfffb2000, 0x800, "MCSI2 communication" }, /* CS4 */ + { 0xe1012800, 0xfffb2800, 0x800, "MCSI1 BT u-Law" }, /* CS5 */ + { 0xe1013000, 0xfffb3000, 0x800, "uWire" }, /* CS6 */ + { 0xe1013800, 0xfffb3800, 0x800, "I^2C" }, /* CS7 */ + { 0xe1014000, 0xfffb4000, 0x800, "USB W2FC" }, /* CS8 */ + { 0xe1014800, 0xfffb4800, 0x800, "RTC" }, /* CS9 */ + { 0xe1015000, 0xfffb5000, 0x800, "MPUIO" }, /* CS10 */ + { 0xe1015800, 0xfffb5800, 0x800, "PWL" }, /* CS11 */ + { 0xe1016000, 0xfffb6000, 0x800, "PWT" }, /* CS12 */ + { 0xe1017000, 0xfffb7000, 0x800, "McBSP3" }, /* CS14 */ + { 0xe1017800, 0xfffb7800, 0x800, "MMC" }, /* CS15 */ + { 0xe1019000, 0xfffb9000, 0x800, "32-kHz timer" }, /* CS18 */ + { 0xe1019800, 0xfffb9800, 0x800, "UART3" }, /* CS19 */ + { 0xe101c800, 0xfffbc800, 0x800, "TIPB switches" }, /* CS25 */ /* Strobe 1 */ - { 0xe101e000, 0xfffce000, 0x800, "GPIOs" }, /* CS28 */ + { 0xe101e000, 0xfffce000, 0x800, "GPIOs" }, /* CS28 */ { 0 } }; @@ -3994,11 +3946,25 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram, if (!dinfo && !qtest_enabled()) { warn_report("missing SecureDigital device"); } - s->mmc = omap_mmc_init(0xfffb7800, system_memory, - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN), - &s->drq[OMAP_DMA_MMC_TX], - omap_findclk(s, "mmc_ck")); + + s->mmc = qdev_new(TYPE_OMAP_MMC); + sysbus_realize_and_unref(SYS_BUS_DEVICE(s->mmc), &error_fatal); + omap_mmc_set_clk(s->mmc, omap_findclk(s, "mmc_ck")); + + memory_region_add_subregion(system_memory, 0xfffb7800, + sysbus_mmio_get_region(SYS_BUS_DEVICE(s->mmc), 0)); + qdev_connect_gpio_out_named(s->mmc, "dma-tx", 0, s->drq[OMAP_DMA_MMC_TX]); + qdev_connect_gpio_out_named(s->mmc, "dma-rx", 0, s->drq[OMAP_DMA_MMC_RX]); + sysbus_connect_irq(SYS_BUS_DEVICE(s->mmc), 0, + qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN)); + + if (dinfo) { + DeviceState *card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(s->mmc, "sd-bus"), + &error_fatal); + } s->mpuio = omap_mpuio_init(system_memory, 0xfffb5000, qdev_get_gpio_in(s->ih[1], OMAP_INT_KEYBOARD), @@ -4059,18 +4025,18 @@ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram, 0xfffbd800, omap_findclk(s, "clk32-kHz")); /* Register mappings not currently implemented: - * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) - * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) - * USB W2FC fffb4000 - fffb47ff - * Camera Interface fffb6800 - fffb6fff - * USB Host fffba000 - fffba7ff - * FAC fffba800 - fffbafff - * HDQ/1-Wire fffbc000 - fffbc7ff - * TIPB switches fffbc800 - fffbcfff - * Mailbox fffcf000 - fffcf7ff - * Local bus IF fffec100 - fffec1ff - * Local bus MMU fffec200 - fffec2ff - * DSP MMU fffed200 - fffed2ff + * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) + * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) + * USB W2FC fffb4000 - fffb47ff + * Camera Interface fffb6800 - fffb6fff + * USB Host fffba000 - fffba7ff + * FAC fffba800 - fffbafff + * HDQ/1-Wire fffbc000 - fffbc7ff + * TIPB switches fffbc800 - fffbcfff + * Mailbox fffcf000 - fffcf7ff + * Local bus IF fffec100 - fffec1ff + * Local bus MMU fffec200 - fffec2ff + * DSP MMU fffed200 - fffed2ff */ omap_setup_dsp_mapping(system_memory, omap15xx_dsp_mm); diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c deleted file mode 100644 index d9683276c68..00000000000 --- a/hw/arm/omap2.c +++ /dev/null @@ -1,2715 +0,0 @@ -/* - * TI OMAP processors emulation. - * - * Copyright (C) 2007-2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/blockdev.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/arm/boot.h" -#include "hw/arm/omap.h" -#include "sysemu/sysemu.h" -#include "qemu/timer.h" -#include "chardev/char-fe.h" -#include "hw/block/flash.h" -#include "hw/arm/soc_dma.h" -#include "hw/sysbus.h" -#include "hw/boards.h" -#include "audio/audio.h" -#include "target/arm/cpu-qom.h" - -/* Enhanced Audio Controller (CODEC only) */ -struct omap_eac_s { - qemu_irq irq; - MemoryRegion iomem; - - uint16_t sysconfig; - uint8_t config[4]; - uint8_t control; - uint8_t address; - uint16_t data; - uint8_t vtol; - uint8_t vtsl; - uint16_t mixer; - uint16_t gain[4]; - uint8_t att; - uint16_t max[7]; - - struct { - qemu_irq txdrq; - qemu_irq rxdrq; - uint32_t (*txrx)(void *opaque, uint32_t, int); - void *opaque; - -#define EAC_BUF_LEN 1024 - uint32_t rxbuf[EAC_BUF_LEN]; - int rxoff; - int rxlen; - int rxavail; - uint32_t txbuf[EAC_BUF_LEN]; - int txlen; - int txavail; - - int enable; - int rate; - - uint16_t config[4]; - - /* These need to be moved to the actual codec */ - QEMUSoundCard card; - SWVoiceIn *in_voice; - SWVoiceOut *out_voice; - int hw_enable; - } codec; - - struct { - uint8_t control; - uint16_t config; - } modem, bt; -}; - -static inline void omap_eac_interrupt_update(struct omap_eac_s *s) -{ - qemu_set_irq(s->irq, (s->codec.config[1] >> 14) & 1); /* AURDI */ -} - -static inline void omap_eac_in_dmarequest_update(struct omap_eac_s *s) -{ - qemu_set_irq(s->codec.rxdrq, (s->codec.rxavail || s->codec.rxlen) && - ((s->codec.config[1] >> 12) & 1)); /* DMAREN */ -} - -static inline void omap_eac_out_dmarequest_update(struct omap_eac_s *s) -{ - qemu_set_irq(s->codec.txdrq, s->codec.txlen < s->codec.txavail && - ((s->codec.config[1] >> 11) & 1)); /* DMAWEN */ -} - -static inline void omap_eac_in_refill(struct omap_eac_s *s) -{ - int left = MIN(EAC_BUF_LEN - s->codec.rxlen, s->codec.rxavail) << 2; - int start = ((s->codec.rxoff + s->codec.rxlen) & (EAC_BUF_LEN - 1)) << 2; - int leftwrap = MIN(left, (EAC_BUF_LEN << 2) - start); - int recv = 1; - uint8_t *buf = (uint8_t *) s->codec.rxbuf + start; - - left -= leftwrap; - start = 0; - while (leftwrap && (recv = AUD_read(s->codec.in_voice, buf + start, - leftwrap)) > 0) { /* Be defensive */ - start += recv; - leftwrap -= recv; - } - if (recv <= 0) - s->codec.rxavail = 0; - else - s->codec.rxavail -= start >> 2; - s->codec.rxlen += start >> 2; - - if (recv > 0 && left > 0) { - start = 0; - while (left && (recv = AUD_read(s->codec.in_voice, - (uint8_t *) s->codec.rxbuf + start, - left)) > 0) { /* Be defensive */ - start += recv; - left -= recv; - } - if (recv <= 0) - s->codec.rxavail = 0; - else - s->codec.rxavail -= start >> 2; - s->codec.rxlen += start >> 2; - } -} - -static inline void omap_eac_out_empty(struct omap_eac_s *s) -{ - int left = s->codec.txlen << 2; - int start = 0; - int sent = 1; - - while (left && (sent = AUD_write(s->codec.out_voice, - (uint8_t *) s->codec.txbuf + start, - left)) > 0) { /* Be defensive */ - start += sent; - left -= sent; - } - - if (!sent) { - s->codec.txavail = 0; - omap_eac_out_dmarequest_update(s); - } - - if (start) - s->codec.txlen = 0; -} - -static void omap_eac_in_cb(void *opaque, int avail_b) -{ - struct omap_eac_s *s = opaque; - - s->codec.rxavail = avail_b >> 2; - omap_eac_in_refill(s); - /* TODO: possibly discard current buffer if overrun */ - omap_eac_in_dmarequest_update(s); -} - -static void omap_eac_out_cb(void *opaque, int free_b) -{ - struct omap_eac_s *s = opaque; - - s->codec.txavail = free_b >> 2; - if (s->codec.txlen) - omap_eac_out_empty(s); - else - omap_eac_out_dmarequest_update(s); -} - -static void omap_eac_enable_update(struct omap_eac_s *s) -{ - s->codec.enable = !(s->codec.config[1] & 1) && /* EACPWD */ - (s->codec.config[1] & 2) && /* AUDEN */ - s->codec.hw_enable; -} - -static const int omap_eac_fsint[4] = { - 8000, - 11025, - 22050, - 44100, -}; - -static const int omap_eac_fsint2[8] = { - 8000, - 11025, - 22050, - 44100, - 48000, - 0, 0, 0, -}; - -static const int omap_eac_fsint3[16] = { - 8000, - 11025, - 16000, - 22050, - 24000, - 32000, - 44100, - 48000, - 0, 0, 0, 0, 0, 0, 0, 0, -}; - -static void omap_eac_rate_update(struct omap_eac_s *s) -{ - int fsint[3]; - - fsint[2] = (s->codec.config[3] >> 9) & 0xf; - fsint[1] = (s->codec.config[2] >> 0) & 0x7; - fsint[0] = (s->codec.config[0] >> 6) & 0x3; - if (fsint[2] < 0xf) - s->codec.rate = omap_eac_fsint3[fsint[2]]; - else if (fsint[1] < 0x7) - s->codec.rate = omap_eac_fsint2[fsint[1]]; - else - s->codec.rate = omap_eac_fsint[fsint[0]]; -} - -static void omap_eac_volume_update(struct omap_eac_s *s) -{ - /* TODO */ -} - -static void omap_eac_format_update(struct omap_eac_s *s) -{ - struct audsettings fmt; - - /* The hardware buffers at most one sample */ - if (s->codec.rxlen) - s->codec.rxlen = 1; - - if (s->codec.in_voice) { - AUD_set_active_in(s->codec.in_voice, 0); - AUD_close_in(&s->codec.card, s->codec.in_voice); - s->codec.in_voice = NULL; - } - if (s->codec.out_voice) { - omap_eac_out_empty(s); - AUD_set_active_out(s->codec.out_voice, 0); - AUD_close_out(&s->codec.card, s->codec.out_voice); - s->codec.out_voice = NULL; - s->codec.txavail = 0; - } - /* Discard what couldn't be written */ - s->codec.txlen = 0; - - omap_eac_enable_update(s); - if (!s->codec.enable) - return; - - omap_eac_rate_update(s); - fmt.endianness = ((s->codec.config[0] >> 8) & 1); /* LI_BI */ - fmt.nchannels = ((s->codec.config[0] >> 10) & 1) ? 2 : 1; /* MN_ST */ - fmt.freq = s->codec.rate; - /* TODO: signedness possibly depends on the CODEC hardware - or - * does I2S specify it? */ - /* All register writes are 16 bits so we store 16-bit samples - * in the buffers regardless of AGCFR[B8_16] value. */ - fmt.fmt = AUDIO_FORMAT_U16; - - s->codec.in_voice = AUD_open_in(&s->codec.card, s->codec.in_voice, - "eac.codec.in", s, omap_eac_in_cb, &fmt); - s->codec.out_voice = AUD_open_out(&s->codec.card, s->codec.out_voice, - "eac.codec.out", s, omap_eac_out_cb, &fmt); - - omap_eac_volume_update(s); - - AUD_set_active_in(s->codec.in_voice, 1); - AUD_set_active_out(s->codec.out_voice, 1); -} - -static void omap_eac_reset(struct omap_eac_s *s) -{ - s->sysconfig = 0; - s->config[0] = 0x0c; - s->config[1] = 0x09; - s->config[2] = 0xab; - s->config[3] = 0x03; - s->control = 0x00; - s->address = 0x00; - s->data = 0x0000; - s->vtol = 0x00; - s->vtsl = 0x00; - s->mixer = 0x0000; - s->gain[0] = 0xe7e7; - s->gain[1] = 0x6767; - s->gain[2] = 0x6767; - s->gain[3] = 0x6767; - s->att = 0xce; - s->max[0] = 0; - s->max[1] = 0; - s->max[2] = 0; - s->max[3] = 0; - s->max[4] = 0; - s->max[5] = 0; - s->max[6] = 0; - - s->modem.control = 0x00; - s->modem.config = 0x0000; - s->bt.control = 0x00; - s->bt.config = 0x0000; - s->codec.config[0] = 0x0649; - s->codec.config[1] = 0x0000; - s->codec.config[2] = 0x0007; - s->codec.config[3] = 0x1ffc; - s->codec.rxoff = 0; - s->codec.rxlen = 0; - s->codec.txlen = 0; - s->codec.rxavail = 0; - s->codec.txavail = 0; - - omap_eac_format_update(s); - omap_eac_interrupt_update(s); -} - -static uint64_t omap_eac_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_eac_s *s = opaque; - uint32_t ret; - - if (size != 2) { - return omap_badwidth_read16(opaque, addr); - } - - switch (addr) { - case 0x000: /* CPCFR1 */ - return s->config[0]; - case 0x004: /* CPCFR2 */ - return s->config[1]; - case 0x008: /* CPCFR3 */ - return s->config[2]; - case 0x00c: /* CPCFR4 */ - return s->config[3]; - - case 0x010: /* CPTCTL */ - return s->control | ((s->codec.rxavail + s->codec.rxlen > 0) << 7) | - ((s->codec.txlen < s->codec.txavail) << 5); - - case 0x014: /* CPTTADR */ - return s->address; - case 0x018: /* CPTDATL */ - return s->data & 0xff; - case 0x01c: /* CPTDATH */ - return s->data >> 8; - case 0x020: /* CPTVSLL */ - return s->vtol; - case 0x024: /* CPTVSLH */ - return s->vtsl | (3 << 5); /* CRDY1 | CRDY2 */ - case 0x040: /* MPCTR */ - return s->modem.control; - case 0x044: /* MPMCCFR */ - return s->modem.config; - case 0x060: /* BPCTR */ - return s->bt.control; - case 0x064: /* BPMCCFR */ - return s->bt.config; - case 0x080: /* AMSCFR */ - return s->mixer; - case 0x084: /* AMVCTR */ - return s->gain[0]; - case 0x088: /* AM1VCTR */ - return s->gain[1]; - case 0x08c: /* AM2VCTR */ - return s->gain[2]; - case 0x090: /* AM3VCTR */ - return s->gain[3]; - case 0x094: /* ASTCTR */ - return s->att; - case 0x098: /* APD1LCR */ - return s->max[0]; - case 0x09c: /* APD1RCR */ - return s->max[1]; - case 0x0a0: /* APD2LCR */ - return s->max[2]; - case 0x0a4: /* APD2RCR */ - return s->max[3]; - case 0x0a8: /* APD3LCR */ - return s->max[4]; - case 0x0ac: /* APD3RCR */ - return s->max[5]; - case 0x0b0: /* APD4R */ - return s->max[6]; - case 0x0b4: /* ADWR */ - /* This should be write-only? Docs list it as read-only. */ - return 0x0000; - case 0x0b8: /* ADRDR */ - if (likely(s->codec.rxlen > 1)) { - ret = s->codec.rxbuf[s->codec.rxoff ++]; - s->codec.rxlen --; - s->codec.rxoff &= EAC_BUF_LEN - 1; - return ret; - } else if (s->codec.rxlen) { - ret = s->codec.rxbuf[s->codec.rxoff ++]; - s->codec.rxlen --; - s->codec.rxoff &= EAC_BUF_LEN - 1; - if (s->codec.rxavail) - omap_eac_in_refill(s); - omap_eac_in_dmarequest_update(s); - return ret; - } - return 0x0000; - case 0x0bc: /* AGCFR */ - return s->codec.config[0]; - case 0x0c0: /* AGCTR */ - return s->codec.config[1] | ((s->codec.config[1] & 2) << 14); - case 0x0c4: /* AGCFR2 */ - return s->codec.config[2]; - case 0x0c8: /* AGCFR3 */ - return s->codec.config[3]; - case 0x0cc: /* MBPDMACTR */ - case 0x0d0: /* MPDDMARR */ - case 0x0d8: /* MPUDMARR */ - case 0x0e4: /* BPDDMARR */ - case 0x0ec: /* BPUDMARR */ - return 0x0000; - - case 0x100: /* VERSION_NUMBER */ - return 0x0010; - - case 0x104: /* SYSCONFIG */ - return s->sysconfig; - - case 0x108: /* SYSSTATUS */ - return 1 | 0xe; /* RESETDONE | stuff */ - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_eac_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_eac_s *s = opaque; - - if (size != 2) { - omap_badwidth_write16(opaque, addr, value); - return; - } - - switch (addr) { - case 0x098: /* APD1LCR */ - case 0x09c: /* APD1RCR */ - case 0x0a0: /* APD2LCR */ - case 0x0a4: /* APD2RCR */ - case 0x0a8: /* APD3LCR */ - case 0x0ac: /* APD3RCR */ - case 0x0b0: /* APD4R */ - case 0x0b8: /* ADRDR */ - case 0x0d0: /* MPDDMARR */ - case 0x0d8: /* MPUDMARR */ - case 0x0e4: /* BPDDMARR */ - case 0x0ec: /* BPUDMARR */ - case 0x100: /* VERSION_NUMBER */ - case 0x108: /* SYSSTATUS */ - OMAP_RO_REG(addr); - return; - - case 0x000: /* CPCFR1 */ - s->config[0] = value & 0xff; - omap_eac_format_update(s); - break; - case 0x004: /* CPCFR2 */ - s->config[1] = value & 0xff; - omap_eac_format_update(s); - break; - case 0x008: /* CPCFR3 */ - s->config[2] = value & 0xff; - omap_eac_format_update(s); - break; - case 0x00c: /* CPCFR4 */ - s->config[3] = value & 0xff; - omap_eac_format_update(s); - break; - - case 0x010: /* CPTCTL */ - /* Assuming TXF and TXE bits are read-only... */ - s->control = value & 0x5f; - omap_eac_interrupt_update(s); - break; - - case 0x014: /* CPTTADR */ - s->address = value & 0xff; - break; - case 0x018: /* CPTDATL */ - s->data &= 0xff00; - s->data |= value & 0xff; - break; - case 0x01c: /* CPTDATH */ - s->data &= 0x00ff; - s->data |= value << 8; - break; - case 0x020: /* CPTVSLL */ - s->vtol = value & 0xf8; - break; - case 0x024: /* CPTVSLH */ - s->vtsl = value & 0x9f; - break; - case 0x040: /* MPCTR */ - s->modem.control = value & 0x8f; - break; - case 0x044: /* MPMCCFR */ - s->modem.config = value & 0x7fff; - break; - case 0x060: /* BPCTR */ - s->bt.control = value & 0x8f; - break; - case 0x064: /* BPMCCFR */ - s->bt.config = value & 0x7fff; - break; - case 0x080: /* AMSCFR */ - s->mixer = value & 0x0fff; - break; - case 0x084: /* AMVCTR */ - s->gain[0] = value & 0xffff; - break; - case 0x088: /* AM1VCTR */ - s->gain[1] = value & 0xff7f; - break; - case 0x08c: /* AM2VCTR */ - s->gain[2] = value & 0xff7f; - break; - case 0x090: /* AM3VCTR */ - s->gain[3] = value & 0xff7f; - break; - case 0x094: /* ASTCTR */ - s->att = value & 0xff; - break; - - case 0x0b4: /* ADWR */ - s->codec.txbuf[s->codec.txlen ++] = value; - if (unlikely(s->codec.txlen == EAC_BUF_LEN || - s->codec.txlen == s->codec.txavail)) { - if (s->codec.txavail) - omap_eac_out_empty(s); - /* Discard what couldn't be written */ - s->codec.txlen = 0; - } - break; - - case 0x0bc: /* AGCFR */ - s->codec.config[0] = value & 0x07ff; - omap_eac_format_update(s); - break; - case 0x0c0: /* AGCTR */ - s->codec.config[1] = value & 0x780f; - omap_eac_format_update(s); - break; - case 0x0c4: /* AGCFR2 */ - s->codec.config[2] = value & 0x003f; - omap_eac_format_update(s); - break; - case 0x0c8: /* AGCFR3 */ - s->codec.config[3] = value & 0xffff; - omap_eac_format_update(s); - break; - case 0x0cc: /* MBPDMACTR */ - case 0x0d4: /* MPDDMAWR */ - case 0x0e0: /* MPUDMAWR */ - case 0x0e8: /* BPDDMAWR */ - case 0x0f0: /* BPUDMAWR */ - break; - - case 0x104: /* SYSCONFIG */ - if (value & (1 << 1)) /* SOFTRESET */ - omap_eac_reset(s); - s->sysconfig = value & 0x31d; - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_eac_ops = { - .read = omap_eac_read, - .write = omap_eac_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static struct omap_eac_s *omap_eac_init(struct omap_target_agent_s *ta, - qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk) -{ - struct omap_eac_s *s = g_new0(struct omap_eac_s, 1); - - s->irq = irq; - s->codec.rxdrq = *drq ++; - s->codec.txdrq = *drq; - omap_eac_reset(s); - - if (current_machine->audiodev) { - s->codec.card.name = g_strdup(current_machine->audiodev); - s->codec.card.state = audio_state_by_name(s->codec.card.name, &error_fatal); - } - AUD_register_card("OMAP EAC", &s->codec.card, &error_fatal); - - memory_region_init_io(&s->iomem, NULL, &omap_eac_ops, s, "omap.eac", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - return s; -} - -/* STI/XTI (emulation interface) console - reverse engineered only */ -struct omap_sti_s { - qemu_irq irq; - MemoryRegion iomem; - MemoryRegion iomem_fifo; - CharBackend chr; - - uint32_t sysconfig; - uint32_t systest; - uint32_t irqst; - uint32_t irqen; - uint32_t clkcontrol; - uint32_t serial_config; -}; - -#define STI_TRACE_CONSOLE_CHANNEL 239 -#define STI_TRACE_CONTROL_CHANNEL 253 - -static inline void omap_sti_interrupt_update(struct omap_sti_s *s) -{ - qemu_set_irq(s->irq, s->irqst & s->irqen); -} - -static void omap_sti_reset(struct omap_sti_s *s) -{ - s->sysconfig = 0; - s->irqst = 0; - s->irqen = 0; - s->clkcontrol = 0; - s->serial_config = 0; - - omap_sti_interrupt_update(s); -} - -static uint64_t omap_sti_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_sti_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* STI_REVISION */ - return 0x10; - - case 0x10: /* STI_SYSCONFIG */ - return s->sysconfig; - - case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */ - return 0x00; - - case 0x18: /* STI_IRQSTATUS */ - return s->irqst; - - case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */ - return s->irqen; - - case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */ - case 0x28: /* STI_RX_DR / XTI_RXDATA */ - /* TODO */ - return 0; - - case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */ - return s->clkcontrol; - - case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */ - return s->serial_config; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_sti_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_sti_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x00: /* STI_REVISION */ - case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */ - OMAP_RO_REG(addr); - return; - - case 0x10: /* STI_SYSCONFIG */ - if (value & (1 << 1)) /* SOFTRESET */ - omap_sti_reset(s); - s->sysconfig = value & 0xfe; - break; - - case 0x18: /* STI_IRQSTATUS */ - s->irqst &= ~value; - omap_sti_interrupt_update(s); - break; - - case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */ - s->irqen = value & 0xffff; - omap_sti_interrupt_update(s); - break; - - case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */ - s->clkcontrol = value & 0xff; - break; - - case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */ - s->serial_config = value & 0xff; - break; - - case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */ - case 0x28: /* STI_RX_DR / XTI_RXDATA */ - /* TODO */ - return; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_sti_ops = { - .read = omap_sti_read, - .write = omap_sti_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static uint64_t omap_sti_fifo_read(void *opaque, hwaddr addr, unsigned size) -{ - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_sti_fifo_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_sti_s *s = opaque; - int ch = addr >> 6; - uint8_t byte = value; - - if (size != 1) { - omap_badwidth_write8(opaque, addr, size); - return; - } - - if (ch == STI_TRACE_CONTROL_CHANNEL) { - /* Flush channel value. */ - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ - qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\r", 1); - } else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) { - if (value == 0xc0 || value == 0xc3) { - /* Open channel ch. */ - } else if (value == 0x00) { - qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\n", 1); - } else { - qemu_chr_fe_write_all(&s->chr, &byte, 1); - } - } -} - -static const MemoryRegionOps omap_sti_fifo_ops = { - .read = omap_sti_fifo_read, - .write = omap_sti_fifo_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static struct omap_sti_s *omap_sti_init(struct omap_target_agent_s *ta, - MemoryRegion *sysmem, - hwaddr channel_base, qemu_irq irq, omap_clk clk, - Chardev *chr) -{ - struct omap_sti_s *s = g_new0(struct omap_sti_s, 1); - - s->irq = irq; - omap_sti_reset(s); - - qemu_chr_fe_init(&s->chr, chr ?: qemu_chr_new("null", "null", NULL), - &error_abort); - - memory_region_init_io(&s->iomem, NULL, &omap_sti_ops, s, "omap.sti", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - memory_region_init_io(&s->iomem_fifo, NULL, &omap_sti_fifo_ops, s, - "omap.sti.fifo", 0x10000); - memory_region_add_subregion(sysmem, channel_base, &s->iomem_fifo); - - return s; -} - -/* L4 Interconnect */ -#define L4TA(n) (n) -#define L4TAO(n) ((n) + 39) - -static const struct omap_l4_region_s omap_l4_region[125] = { - [ 1] = { 0x40800, 0x800, 32 }, /* Initiator agent */ - [ 2] = { 0x41000, 0x1000, 32 }, /* Link agent */ - [ 0] = { 0x40000, 0x800, 32 }, /* Address and protection */ - [ 3] = { 0x00000, 0x1000, 32 | 16 | 8 }, /* System Control and Pinout */ - [ 4] = { 0x01000, 0x1000, 32 | 16 | 8 }, /* L4TAO1 */ - [ 5] = { 0x04000, 0x1000, 32 | 16 }, /* 32K Timer */ - [ 6] = { 0x05000, 0x1000, 32 | 16 | 8 }, /* L4TAO2 */ - [ 7] = { 0x08000, 0x800, 32 }, /* PRCM Region A */ - [ 8] = { 0x08800, 0x800, 32 }, /* PRCM Region B */ - [ 9] = { 0x09000, 0x1000, 32 | 16 | 8 }, /* L4TAO */ - [ 10] = { 0x12000, 0x1000, 32 | 16 | 8 }, /* Test (BCM) */ - [ 11] = { 0x13000, 0x1000, 32 | 16 | 8 }, /* L4TA1 */ - [ 12] = { 0x14000, 0x1000, 32 }, /* Test/emulation (TAP) */ - [ 13] = { 0x15000, 0x1000, 32 | 16 | 8 }, /* L4TA2 */ - [ 14] = { 0x18000, 0x1000, 32 | 16 | 8 }, /* GPIO1 */ - [ 16] = { 0x1a000, 0x1000, 32 | 16 | 8 }, /* GPIO2 */ - [ 18] = { 0x1c000, 0x1000, 32 | 16 | 8 }, /* GPIO3 */ - [ 19] = { 0x1e000, 0x1000, 32 | 16 | 8 }, /* GPIO4 */ - [ 15] = { 0x19000, 0x1000, 32 | 16 | 8 }, /* Quad GPIO TOP */ - [ 17] = { 0x1b000, 0x1000, 32 | 16 | 8 }, /* L4TA3 */ - [ 20] = { 0x20000, 0x1000, 32 | 16 | 8 }, /* WD Timer 1 (Secure) */ - [ 22] = { 0x22000, 0x1000, 32 | 16 | 8 }, /* WD Timer 2 (OMAP) */ - [ 21] = { 0x21000, 0x1000, 32 | 16 | 8 }, /* Dual WD timer TOP */ - [ 23] = { 0x23000, 0x1000, 32 | 16 | 8 }, /* L4TA4 */ - [ 24] = { 0x28000, 0x1000, 32 | 16 | 8 }, /* GP Timer 1 */ - [ 25] = { 0x29000, 0x1000, 32 | 16 | 8 }, /* L4TA7 */ - [ 26] = { 0x48000, 0x2000, 32 | 16 | 8 }, /* Emulation (ARM11ETB) */ - [ 27] = { 0x4a000, 0x1000, 32 | 16 | 8 }, /* L4TA9 */ - [ 28] = { 0x50000, 0x400, 32 | 16 | 8 }, /* Display top */ - [ 29] = { 0x50400, 0x400, 32 | 16 | 8 }, /* Display control */ - [ 30] = { 0x50800, 0x400, 32 | 16 | 8 }, /* Display RFBI */ - [ 31] = { 0x50c00, 0x400, 32 | 16 | 8 }, /* Display encoder */ - [ 32] = { 0x51000, 0x1000, 32 | 16 | 8 }, /* L4TA10 */ - [ 33] = { 0x52000, 0x400, 32 | 16 | 8 }, /* Camera top */ - [ 34] = { 0x52400, 0x400, 32 | 16 | 8 }, /* Camera core */ - [ 35] = { 0x52800, 0x400, 32 | 16 | 8 }, /* Camera DMA */ - [ 36] = { 0x52c00, 0x400, 32 | 16 | 8 }, /* Camera MMU */ - [ 37] = { 0x53000, 0x1000, 32 | 16 | 8 }, /* L4TA11 */ - [ 38] = { 0x56000, 0x1000, 32 | 16 | 8 }, /* sDMA */ - [ 39] = { 0x57000, 0x1000, 32 | 16 | 8 }, /* L4TA12 */ - [ 40] = { 0x58000, 0x1000, 32 | 16 | 8 }, /* SSI top */ - [ 41] = { 0x59000, 0x1000, 32 | 16 | 8 }, /* SSI GDD */ - [ 42] = { 0x5a000, 0x1000, 32 | 16 | 8 }, /* SSI Port1 */ - [ 43] = { 0x5b000, 0x1000, 32 | 16 | 8 }, /* SSI Port2 */ - [ 44] = { 0x5c000, 0x1000, 32 | 16 | 8 }, /* L4TA13 */ - [ 45] = { 0x5e000, 0x1000, 32 | 16 | 8 }, /* USB OTG */ - [ 46] = { 0x5f000, 0x1000, 32 | 16 | 8 }, /* L4TAO4 */ - [ 47] = { 0x60000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER1SDRC) */ - [ 48] = { 0x61000, 0x1000, 32 | 16 | 8 }, /* L4TA14 */ - [ 49] = { 0x62000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER2GPMC) */ - [ 50] = { 0x63000, 0x1000, 32 | 16 | 8 }, /* L4TA15 */ - [ 51] = { 0x64000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER3OCM) */ - [ 52] = { 0x65000, 0x1000, 32 | 16 | 8 }, /* L4TA16 */ - [ 53] = { 0x66000, 0x300, 32 | 16 | 8 }, /* Emulation (WIN_TRACER4L4) */ - [ 54] = { 0x67000, 0x1000, 32 | 16 | 8 }, /* L4TA17 */ - [ 55] = { 0x68000, 0x1000, 32 | 16 | 8 }, /* Emulation (XTI) */ - [ 56] = { 0x69000, 0x1000, 32 | 16 | 8 }, /* L4TA18 */ - [ 57] = { 0x6a000, 0x1000, 16 | 8 }, /* UART1 */ - [ 58] = { 0x6b000, 0x1000, 32 | 16 | 8 }, /* L4TA19 */ - [ 59] = { 0x6c000, 0x1000, 16 | 8 }, /* UART2 */ - [ 60] = { 0x6d000, 0x1000, 32 | 16 | 8 }, /* L4TA20 */ - [ 61] = { 0x6e000, 0x1000, 16 | 8 }, /* UART3 */ - [ 62] = { 0x6f000, 0x1000, 32 | 16 | 8 }, /* L4TA21 */ - [ 63] = { 0x70000, 0x1000, 16 }, /* I2C1 */ - [ 64] = { 0x71000, 0x1000, 32 | 16 | 8 }, /* L4TAO5 */ - [ 65] = { 0x72000, 0x1000, 16 }, /* I2C2 */ - [ 66] = { 0x73000, 0x1000, 32 | 16 | 8 }, /* L4TAO6 */ - [ 67] = { 0x74000, 0x1000, 16 }, /* McBSP1 */ - [ 68] = { 0x75000, 0x1000, 32 | 16 | 8 }, /* L4TAO7 */ - [ 69] = { 0x76000, 0x1000, 16 }, /* McBSP2 */ - [ 70] = { 0x77000, 0x1000, 32 | 16 | 8 }, /* L4TAO8 */ - [ 71] = { 0x24000, 0x1000, 32 | 16 | 8 }, /* WD Timer 3 (DSP) */ - [ 72] = { 0x25000, 0x1000, 32 | 16 | 8 }, /* L4TA5 */ - [ 73] = { 0x26000, 0x1000, 32 | 16 | 8 }, /* WD Timer 4 (IVA) */ - [ 74] = { 0x27000, 0x1000, 32 | 16 | 8 }, /* L4TA6 */ - [ 75] = { 0x2a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 2 */ - [ 76] = { 0x2b000, 0x1000, 32 | 16 | 8 }, /* L4TA8 */ - [ 77] = { 0x78000, 0x1000, 32 | 16 | 8 }, /* GP Timer 3 */ - [ 78] = { 0x79000, 0x1000, 32 | 16 | 8 }, /* L4TA22 */ - [ 79] = { 0x7a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 4 */ - [ 80] = { 0x7b000, 0x1000, 32 | 16 | 8 }, /* L4TA23 */ - [ 81] = { 0x7c000, 0x1000, 32 | 16 | 8 }, /* GP Timer 5 */ - [ 82] = { 0x7d000, 0x1000, 32 | 16 | 8 }, /* L4TA24 */ - [ 83] = { 0x7e000, 0x1000, 32 | 16 | 8 }, /* GP Timer 6 */ - [ 84] = { 0x7f000, 0x1000, 32 | 16 | 8 }, /* L4TA25 */ - [ 85] = { 0x80000, 0x1000, 32 | 16 | 8 }, /* GP Timer 7 */ - [ 86] = { 0x81000, 0x1000, 32 | 16 | 8 }, /* L4TA26 */ - [ 87] = { 0x82000, 0x1000, 32 | 16 | 8 }, /* GP Timer 8 */ - [ 88] = { 0x83000, 0x1000, 32 | 16 | 8 }, /* L4TA27 */ - [ 89] = { 0x84000, 0x1000, 32 | 16 | 8 }, /* GP Timer 9 */ - [ 90] = { 0x85000, 0x1000, 32 | 16 | 8 }, /* L4TA28 */ - [ 91] = { 0x86000, 0x1000, 32 | 16 | 8 }, /* GP Timer 10 */ - [ 92] = { 0x87000, 0x1000, 32 | 16 | 8 }, /* L4TA29 */ - [ 93] = { 0x88000, 0x1000, 32 | 16 | 8 }, /* GP Timer 11 */ - [ 94] = { 0x89000, 0x1000, 32 | 16 | 8 }, /* L4TA30 */ - [ 95] = { 0x8a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 12 */ - [ 96] = { 0x8b000, 0x1000, 32 | 16 | 8 }, /* L4TA31 */ - [ 97] = { 0x90000, 0x1000, 16 }, /* EAC */ - [ 98] = { 0x91000, 0x1000, 32 | 16 | 8 }, /* L4TA32 */ - [ 99] = { 0x92000, 0x1000, 16 }, /* FAC */ - [100] = { 0x93000, 0x1000, 32 | 16 | 8 }, /* L4TA33 */ - [101] = { 0x94000, 0x1000, 32 | 16 | 8 }, /* IPC (MAILBOX) */ - [102] = { 0x95000, 0x1000, 32 | 16 | 8 }, /* L4TA34 */ - [103] = { 0x98000, 0x1000, 32 | 16 | 8 }, /* SPI1 */ - [104] = { 0x99000, 0x1000, 32 | 16 | 8 }, /* L4TA35 */ - [105] = { 0x9a000, 0x1000, 32 | 16 | 8 }, /* SPI2 */ - [106] = { 0x9b000, 0x1000, 32 | 16 | 8 }, /* L4TA36 */ - [107] = { 0x9c000, 0x1000, 16 | 8 }, /* MMC SDIO */ - [108] = { 0x9d000, 0x1000, 32 | 16 | 8 }, /* L4TAO9 */ - [109] = { 0x9e000, 0x1000, 32 | 16 | 8 }, /* MS_PRO */ - [110] = { 0x9f000, 0x1000, 32 | 16 | 8 }, /* L4TAO10 */ - [111] = { 0xa0000, 0x1000, 32 }, /* RNG */ - [112] = { 0xa1000, 0x1000, 32 | 16 | 8 }, /* L4TAO11 */ - [113] = { 0xa2000, 0x1000, 32 }, /* DES3DES */ - [114] = { 0xa3000, 0x1000, 32 | 16 | 8 }, /* L4TAO12 */ - [115] = { 0xa4000, 0x1000, 32 }, /* SHA1MD5 */ - [116] = { 0xa5000, 0x1000, 32 | 16 | 8 }, /* L4TAO13 */ - [117] = { 0xa6000, 0x1000, 32 }, /* AES */ - [118] = { 0xa7000, 0x1000, 32 | 16 | 8 }, /* L4TA37 */ - [119] = { 0xa8000, 0x2000, 32 }, /* PKA */ - [120] = { 0xaa000, 0x1000, 32 | 16 | 8 }, /* L4TA38 */ - [121] = { 0xb0000, 0x1000, 32 }, /* MG */ - [122] = { 0xb1000, 0x1000, 32 | 16 | 8 }, - [123] = { 0xb2000, 0x1000, 32 }, /* HDQ/1-Wire */ - [124] = { 0xb3000, 0x1000, 32 | 16 | 8 }, /* L4TA39 */ -}; - -static const struct omap_l4_agent_info_s omap_l4_agent_info[54] = { - { 0, 0, 3, 2 }, /* L4IA initiatior agent */ - { L4TAO(1), 3, 2, 1 }, /* Control and pinout module */ - { L4TAO(2), 5, 2, 1 }, /* 32K timer */ - { L4TAO(3), 7, 3, 2 }, /* PRCM */ - { L4TA(1), 10, 2, 1 }, /* BCM */ - { L4TA(2), 12, 2, 1 }, /* Test JTAG */ - { L4TA(3), 14, 6, 3 }, /* Quad GPIO */ - { L4TA(4), 20, 4, 3 }, /* WD timer 1/2 */ - { L4TA(7), 24, 2, 1 }, /* GP timer 1 */ - { L4TA(9), 26, 2, 1 }, /* ATM11 ETB */ - { L4TA(10), 28, 5, 4 }, /* Display subsystem */ - { L4TA(11), 33, 5, 4 }, /* Camera subsystem */ - { L4TA(12), 38, 2, 1 }, /* sDMA */ - { L4TA(13), 40, 5, 4 }, /* SSI */ - { L4TAO(4), 45, 2, 1 }, /* USB */ - { L4TA(14), 47, 2, 1 }, /* Win Tracer1 */ - { L4TA(15), 49, 2, 1 }, /* Win Tracer2 */ - { L4TA(16), 51, 2, 1 }, /* Win Tracer3 */ - { L4TA(17), 53, 2, 1 }, /* Win Tracer4 */ - { L4TA(18), 55, 2, 1 }, /* XTI */ - { L4TA(19), 57, 2, 1 }, /* UART1 */ - { L4TA(20), 59, 2, 1 }, /* UART2 */ - { L4TA(21), 61, 2, 1 }, /* UART3 */ - { L4TAO(5), 63, 2, 1 }, /* I2C1 */ - { L4TAO(6), 65, 2, 1 }, /* I2C2 */ - { L4TAO(7), 67, 2, 1 }, /* McBSP1 */ - { L4TAO(8), 69, 2, 1 }, /* McBSP2 */ - { L4TA(5), 71, 2, 1 }, /* WD Timer 3 (DSP) */ - { L4TA(6), 73, 2, 1 }, /* WD Timer 4 (IVA) */ - { L4TA(8), 75, 2, 1 }, /* GP Timer 2 */ - { L4TA(22), 77, 2, 1 }, /* GP Timer 3 */ - { L4TA(23), 79, 2, 1 }, /* GP Timer 4 */ - { L4TA(24), 81, 2, 1 }, /* GP Timer 5 */ - { L4TA(25), 83, 2, 1 }, /* GP Timer 6 */ - { L4TA(26), 85, 2, 1 }, /* GP Timer 7 */ - { L4TA(27), 87, 2, 1 }, /* GP Timer 8 */ - { L4TA(28), 89, 2, 1 }, /* GP Timer 9 */ - { L4TA(29), 91, 2, 1 }, /* GP Timer 10 */ - { L4TA(30), 93, 2, 1 }, /* GP Timer 11 */ - { L4TA(31), 95, 2, 1 }, /* GP Timer 12 */ - { L4TA(32), 97, 2, 1 }, /* EAC */ - { L4TA(33), 99, 2, 1 }, /* FAC */ - { L4TA(34), 101, 2, 1 }, /* IPC */ - { L4TA(35), 103, 2, 1 }, /* SPI1 */ - { L4TA(36), 105, 2, 1 }, /* SPI2 */ - { L4TAO(9), 107, 2, 1 }, /* MMC SDIO */ - { L4TAO(10), 109, 2, 1 }, - { L4TAO(11), 111, 2, 1 }, /* RNG */ - { L4TAO(12), 113, 2, 1 }, /* DES3DES */ - { L4TAO(13), 115, 2, 1 }, /* SHA1MD5 */ - { L4TA(37), 117, 2, 1 }, /* AES */ - { L4TA(38), 119, 2, 1 }, /* PKA */ - { -1, 121, 2, 1 }, - { L4TA(39), 123, 2, 1 }, /* HDQ/1-Wire */ -}; - -#define omap_l4ta(bus, cs) \ - omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TA(cs)) -#define omap_l4tao(bus, cs) \ - omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TAO(cs)) - -/* Power, Reset, and Clock Management */ -struct omap_prcm_s { - qemu_irq irq[3]; - struct omap_mpu_state_s *mpu; - MemoryRegion iomem0; - MemoryRegion iomem1; - - uint32_t irqst[3]; - uint32_t irqen[3]; - - uint32_t sysconfig; - uint32_t voltctrl; - uint32_t scratch[20]; - - uint32_t clksrc[1]; - uint32_t clkout[1]; - uint32_t clkemul[1]; - uint32_t clkpol[1]; - uint32_t clksel[8]; - uint32_t clken[12]; - uint32_t clkctrl[4]; - uint32_t clkidle[7]; - uint32_t setuptime[2]; - - uint32_t wkup[3]; - uint32_t wken[3]; - uint32_t wkst[3]; - uint32_t rst[4]; - uint32_t rstctrl[1]; - uint32_t power[4]; - uint32_t rsttime_wkup; - - uint32_t ev; - uint32_t evtime[2]; - - int dpll_lock, apll_lock[2]; -}; - -static void omap_prcm_int_update(struct omap_prcm_s *s, int dom) -{ - qemu_set_irq(s->irq[dom], s->irqst[dom] & s->irqen[dom]); - /* XXX or is the mask applied before PRCM_IRQSTATUS_* ? */ -} - -static uint64_t omap_prcm_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_prcm_s *s = opaque; - uint32_t ret; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x000: /* PRCM_REVISION */ - return 0x10; - - case 0x010: /* PRCM_SYSCONFIG */ - return s->sysconfig; - - case 0x018: /* PRCM_IRQSTATUS_MPU */ - return s->irqst[0]; - - case 0x01c: /* PRCM_IRQENABLE_MPU */ - return s->irqen[0]; - - case 0x050: /* PRCM_VOLTCTRL */ - return s->voltctrl; - case 0x054: /* PRCM_VOLTST */ - return s->voltctrl & 3; - - case 0x060: /* PRCM_CLKSRC_CTRL */ - return s->clksrc[0]; - case 0x070: /* PRCM_CLKOUT_CTRL */ - return s->clkout[0]; - case 0x078: /* PRCM_CLKEMUL_CTRL */ - return s->clkemul[0]; - case 0x080: /* PRCM_CLKCFG_CTRL */ - case 0x084: /* PRCM_CLKCFG_STATUS */ - return 0; - - case 0x090: /* PRCM_VOLTSETUP */ - return s->setuptime[0]; - - case 0x094: /* PRCM_CLKSSETUP */ - return s->setuptime[1]; - - case 0x098: /* PRCM_POLCTRL */ - return s->clkpol[0]; - - case 0x0b0: /* GENERAL_PURPOSE1 */ - case 0x0b4: /* GENERAL_PURPOSE2 */ - case 0x0b8: /* GENERAL_PURPOSE3 */ - case 0x0bc: /* GENERAL_PURPOSE4 */ - case 0x0c0: /* GENERAL_PURPOSE5 */ - case 0x0c4: /* GENERAL_PURPOSE6 */ - case 0x0c8: /* GENERAL_PURPOSE7 */ - case 0x0cc: /* GENERAL_PURPOSE8 */ - case 0x0d0: /* GENERAL_PURPOSE9 */ - case 0x0d4: /* GENERAL_PURPOSE10 */ - case 0x0d8: /* GENERAL_PURPOSE11 */ - case 0x0dc: /* GENERAL_PURPOSE12 */ - case 0x0e0: /* GENERAL_PURPOSE13 */ - case 0x0e4: /* GENERAL_PURPOSE14 */ - case 0x0e8: /* GENERAL_PURPOSE15 */ - case 0x0ec: /* GENERAL_PURPOSE16 */ - case 0x0f0: /* GENERAL_PURPOSE17 */ - case 0x0f4: /* GENERAL_PURPOSE18 */ - case 0x0f8: /* GENERAL_PURPOSE19 */ - case 0x0fc: /* GENERAL_PURPOSE20 */ - return s->scratch[(addr - 0xb0) >> 2]; - - case 0x140: /* CM_CLKSEL_MPU */ - return s->clksel[0]; - case 0x148: /* CM_CLKSTCTRL_MPU */ - return s->clkctrl[0]; - - case 0x158: /* RM_RSTST_MPU */ - return s->rst[0]; - case 0x1c8: /* PM_WKDEP_MPU */ - return s->wkup[0]; - case 0x1d4: /* PM_EVGENCTRL_MPU */ - return s->ev; - case 0x1d8: /* PM_EVEGENONTIM_MPU */ - return s->evtime[0]; - case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ - return s->evtime[1]; - case 0x1e0: /* PM_PWSTCTRL_MPU */ - return s->power[0]; - case 0x1e4: /* PM_PWSTST_MPU */ - return 0; - - case 0x200: /* CM_FCLKEN1_CORE */ - return s->clken[0]; - case 0x204: /* CM_FCLKEN2_CORE */ - return s->clken[1]; - case 0x210: /* CM_ICLKEN1_CORE */ - return s->clken[2]; - case 0x214: /* CM_ICLKEN2_CORE */ - return s->clken[3]; - case 0x21c: /* CM_ICLKEN4_CORE */ - return s->clken[4]; - - case 0x220: /* CM_IDLEST1_CORE */ - /* TODO: check the actual iclk status */ - return 0x7ffffff9; - case 0x224: /* CM_IDLEST2_CORE */ - /* TODO: check the actual iclk status */ - return 0x00000007; - case 0x22c: /* CM_IDLEST4_CORE */ - /* TODO: check the actual iclk status */ - return 0x0000001f; - - case 0x230: /* CM_AUTOIDLE1_CORE */ - return s->clkidle[0]; - case 0x234: /* CM_AUTOIDLE2_CORE */ - return s->clkidle[1]; - case 0x238: /* CM_AUTOIDLE3_CORE */ - return s->clkidle[2]; - case 0x23c: /* CM_AUTOIDLE4_CORE */ - return s->clkidle[3]; - - case 0x240: /* CM_CLKSEL1_CORE */ - return s->clksel[1]; - case 0x244: /* CM_CLKSEL2_CORE */ - return s->clksel[2]; - - case 0x248: /* CM_CLKSTCTRL_CORE */ - return s->clkctrl[1]; - - case 0x2a0: /* PM_WKEN1_CORE */ - return s->wken[0]; - case 0x2a4: /* PM_WKEN2_CORE */ - return s->wken[1]; - - case 0x2b0: /* PM_WKST1_CORE */ - return s->wkst[0]; - case 0x2b4: /* PM_WKST2_CORE */ - return s->wkst[1]; - case 0x2c8: /* PM_WKDEP_CORE */ - return 0x1e; - - case 0x2e0: /* PM_PWSTCTRL_CORE */ - return s->power[1]; - case 0x2e4: /* PM_PWSTST_CORE */ - return 0x000030 | (s->power[1] & 0xfc00); - - case 0x300: /* CM_FCLKEN_GFX */ - return s->clken[5]; - case 0x310: /* CM_ICLKEN_GFX */ - return s->clken[6]; - case 0x320: /* CM_IDLEST_GFX */ - /* TODO: check the actual iclk status */ - return 0x00000001; - case 0x340: /* CM_CLKSEL_GFX */ - return s->clksel[3]; - case 0x348: /* CM_CLKSTCTRL_GFX */ - return s->clkctrl[2]; - case 0x350: /* RM_RSTCTRL_GFX */ - return s->rstctrl[0]; - case 0x358: /* RM_RSTST_GFX */ - return s->rst[1]; - case 0x3c8: /* PM_WKDEP_GFX */ - return s->wkup[1]; - - case 0x3e0: /* PM_PWSTCTRL_GFX */ - return s->power[2]; - case 0x3e4: /* PM_PWSTST_GFX */ - return s->power[2] & 3; - - case 0x400: /* CM_FCLKEN_WKUP */ - return s->clken[7]; - case 0x410: /* CM_ICLKEN_WKUP */ - return s->clken[8]; - case 0x420: /* CM_IDLEST_WKUP */ - /* TODO: check the actual iclk status */ - return 0x0000003f; - case 0x430: /* CM_AUTOIDLE_WKUP */ - return s->clkidle[4]; - case 0x440: /* CM_CLKSEL_WKUP */ - return s->clksel[4]; - case 0x450: /* RM_RSTCTRL_WKUP */ - return 0; - case 0x454: /* RM_RSTTIME_WKUP */ - return s->rsttime_wkup; - case 0x458: /* RM_RSTST_WKUP */ - return s->rst[2]; - case 0x4a0: /* PM_WKEN_WKUP */ - return s->wken[2]; - case 0x4b0: /* PM_WKST_WKUP */ - return s->wkst[2]; - - case 0x500: /* CM_CLKEN_PLL */ - return s->clken[9]; - case 0x520: /* CM_IDLEST_CKGEN */ - ret = 0x0000070 | (s->apll_lock[0] << 9) | (s->apll_lock[1] << 8); - if (!(s->clksel[6] & 3)) - /* Core uses 32-kHz clock */ - ret |= 3 << 0; - else if (!s->dpll_lock) - /* DPLL not locked, core uses ref_clk */ - ret |= 1 << 0; - else - /* Core uses DPLL */ - ret |= 2 << 0; - return ret; - case 0x530: /* CM_AUTOIDLE_PLL */ - return s->clkidle[5]; - case 0x540: /* CM_CLKSEL1_PLL */ - return s->clksel[5]; - case 0x544: /* CM_CLKSEL2_PLL */ - return s->clksel[6]; - - case 0x800: /* CM_FCLKEN_DSP */ - return s->clken[10]; - case 0x810: /* CM_ICLKEN_DSP */ - return s->clken[11]; - case 0x820: /* CM_IDLEST_DSP */ - /* TODO: check the actual iclk status */ - return 0x00000103; - case 0x830: /* CM_AUTOIDLE_DSP */ - return s->clkidle[6]; - case 0x840: /* CM_CLKSEL_DSP */ - return s->clksel[7]; - case 0x848: /* CM_CLKSTCTRL_DSP */ - return s->clkctrl[3]; - case 0x850: /* RM_RSTCTRL_DSP */ - return 0; - case 0x858: /* RM_RSTST_DSP */ - return s->rst[3]; - case 0x8c8: /* PM_WKDEP_DSP */ - return s->wkup[2]; - case 0x8e0: /* PM_PWSTCTRL_DSP */ - return s->power[3]; - case 0x8e4: /* PM_PWSTST_DSP */ - return 0x008030 | (s->power[3] & 0x3003); - - case 0x8f0: /* PRCM_IRQSTATUS_DSP */ - return s->irqst[1]; - case 0x8f4: /* PRCM_IRQENABLE_DSP */ - return s->irqen[1]; - - case 0x8f8: /* PRCM_IRQSTATUS_IVA */ - return s->irqst[2]; - case 0x8fc: /* PRCM_IRQENABLE_IVA */ - return s->irqen[2]; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_prcm_apll_update(struct omap_prcm_s *s) -{ - int mode[2]; - - mode[0] = (s->clken[9] >> 6) & 3; - s->apll_lock[0] = (mode[0] == 3); - mode[1] = (s->clken[9] >> 2) & 3; - s->apll_lock[1] = (mode[1] == 3); - /* TODO: update clocks */ - - if (mode[0] == 1 || mode[0] == 2 || mode[1] == 1 || mode[1] == 2) - fprintf(stderr, "%s: bad EN_54M_PLL or bad EN_96M_PLL\n", - __func__); -} - -static void omap_prcm_dpll_update(struct omap_prcm_s *s) -{ - omap_clk dpll = omap_findclk(s->mpu, "dpll"); - omap_clk dpll_x2 = omap_findclk(s->mpu, "dpll"); - omap_clk core = omap_findclk(s->mpu, "core_clk"); - int mode = (s->clken[9] >> 0) & 3; - int mult, div; - - mult = (s->clksel[5] >> 12) & 0x3ff; - div = (s->clksel[5] >> 8) & 0xf; - if (mult == 0 || mult == 1) - mode = 1; /* Bypass */ - - s->dpll_lock = 0; - switch (mode) { - case 0: - fprintf(stderr, "%s: bad EN_DPLL\n", __func__); - break; - case 1: /* Low-power bypass mode (Default) */ - case 2: /* Fast-relock bypass mode */ - omap_clk_setrate(dpll, 1, 1); - omap_clk_setrate(dpll_x2, 1, 1); - break; - case 3: /* Lock mode */ - s->dpll_lock = 1; /* After 20 FINT cycles (ref_clk / (div + 1)). */ - - omap_clk_setrate(dpll, div + 1, mult); - omap_clk_setrate(dpll_x2, div + 1, mult * 2); - break; - } - - switch ((s->clksel[6] >> 0) & 3) { - case 0: - omap_clk_reparent(core, omap_findclk(s->mpu, "clk32-kHz")); - break; - case 1: - omap_clk_reparent(core, dpll); - break; - case 2: - /* Default */ - omap_clk_reparent(core, dpll_x2); - break; - case 3: - fprintf(stderr, "%s: bad CORE_CLK_SRC\n", __func__); - break; - } -} - -static void omap_prcm_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_prcm_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x000: /* PRCM_REVISION */ - case 0x054: /* PRCM_VOLTST */ - case 0x084: /* PRCM_CLKCFG_STATUS */ - case 0x1e4: /* PM_PWSTST_MPU */ - case 0x220: /* CM_IDLEST1_CORE */ - case 0x224: /* CM_IDLEST2_CORE */ - case 0x22c: /* CM_IDLEST4_CORE */ - case 0x2c8: /* PM_WKDEP_CORE */ - case 0x2e4: /* PM_PWSTST_CORE */ - case 0x320: /* CM_IDLEST_GFX */ - case 0x3e4: /* PM_PWSTST_GFX */ - case 0x420: /* CM_IDLEST_WKUP */ - case 0x520: /* CM_IDLEST_CKGEN */ - case 0x820: /* CM_IDLEST_DSP */ - case 0x8e4: /* PM_PWSTST_DSP */ - OMAP_RO_REG(addr); - return; - - case 0x010: /* PRCM_SYSCONFIG */ - s->sysconfig = value & 1; - break; - - case 0x018: /* PRCM_IRQSTATUS_MPU */ - s->irqst[0] &= ~value; - omap_prcm_int_update(s, 0); - break; - case 0x01c: /* PRCM_IRQENABLE_MPU */ - s->irqen[0] = value & 0x3f; - omap_prcm_int_update(s, 0); - break; - - case 0x050: /* PRCM_VOLTCTRL */ - s->voltctrl = value & 0xf1c3; - break; - - case 0x060: /* PRCM_CLKSRC_CTRL */ - s->clksrc[0] = value & 0xdb; - /* TODO update clocks */ - break; - - case 0x070: /* PRCM_CLKOUT_CTRL */ - s->clkout[0] = value & 0xbbbb; - /* TODO update clocks */ - break; - - case 0x078: /* PRCM_CLKEMUL_CTRL */ - s->clkemul[0] = value & 1; - /* TODO update clocks */ - break; - - case 0x080: /* PRCM_CLKCFG_CTRL */ - break; - - case 0x090: /* PRCM_VOLTSETUP */ - s->setuptime[0] = value & 0xffff; - break; - case 0x094: /* PRCM_CLKSSETUP */ - s->setuptime[1] = value & 0xffff; - break; - - case 0x098: /* PRCM_POLCTRL */ - s->clkpol[0] = value & 0x701; - break; - - case 0x0b0: /* GENERAL_PURPOSE1 */ - case 0x0b4: /* GENERAL_PURPOSE2 */ - case 0x0b8: /* GENERAL_PURPOSE3 */ - case 0x0bc: /* GENERAL_PURPOSE4 */ - case 0x0c0: /* GENERAL_PURPOSE5 */ - case 0x0c4: /* GENERAL_PURPOSE6 */ - case 0x0c8: /* GENERAL_PURPOSE7 */ - case 0x0cc: /* GENERAL_PURPOSE8 */ - case 0x0d0: /* GENERAL_PURPOSE9 */ - case 0x0d4: /* GENERAL_PURPOSE10 */ - case 0x0d8: /* GENERAL_PURPOSE11 */ - case 0x0dc: /* GENERAL_PURPOSE12 */ - case 0x0e0: /* GENERAL_PURPOSE13 */ - case 0x0e4: /* GENERAL_PURPOSE14 */ - case 0x0e8: /* GENERAL_PURPOSE15 */ - case 0x0ec: /* GENERAL_PURPOSE16 */ - case 0x0f0: /* GENERAL_PURPOSE17 */ - case 0x0f4: /* GENERAL_PURPOSE18 */ - case 0x0f8: /* GENERAL_PURPOSE19 */ - case 0x0fc: /* GENERAL_PURPOSE20 */ - s->scratch[(addr - 0xb0) >> 2] = value; - break; - - case 0x140: /* CM_CLKSEL_MPU */ - s->clksel[0] = value & 0x1f; - /* TODO update clocks */ - break; - case 0x148: /* CM_CLKSTCTRL_MPU */ - s->clkctrl[0] = value & 0x1f; - break; - - case 0x158: /* RM_RSTST_MPU */ - s->rst[0] &= ~value; - break; - case 0x1c8: /* PM_WKDEP_MPU */ - s->wkup[0] = value & 0x15; - break; - - case 0x1d4: /* PM_EVGENCTRL_MPU */ - s->ev = value & 0x1f; - break; - case 0x1d8: /* PM_EVEGENONTIM_MPU */ - s->evtime[0] = value; - break; - case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ - s->evtime[1] = value; - break; - - case 0x1e0: /* PM_PWSTCTRL_MPU */ - s->power[0] = value & 0xc0f; - break; - - case 0x200: /* CM_FCLKEN1_CORE */ - s->clken[0] = value & 0xbfffffff; - /* TODO update clocks */ - /* The EN_EAC bit only gets/puts func_96m_clk. */ - break; - case 0x204: /* CM_FCLKEN2_CORE */ - s->clken[1] = value & 0x00000007; - /* TODO update clocks */ - break; - case 0x210: /* CM_ICLKEN1_CORE */ - s->clken[2] = value & 0xfffffff9; - /* TODO update clocks */ - /* The EN_EAC bit only gets/puts core_l4_iclk. */ - break; - case 0x214: /* CM_ICLKEN2_CORE */ - s->clken[3] = value & 0x00000007; - /* TODO update clocks */ - break; - case 0x21c: /* CM_ICLKEN4_CORE */ - s->clken[4] = value & 0x0000001f; - /* TODO update clocks */ - break; - - case 0x230: /* CM_AUTOIDLE1_CORE */ - s->clkidle[0] = value & 0xfffffff9; - /* TODO update clocks */ - break; - case 0x234: /* CM_AUTOIDLE2_CORE */ - s->clkidle[1] = value & 0x00000007; - /* TODO update clocks */ - break; - case 0x238: /* CM_AUTOIDLE3_CORE */ - s->clkidle[2] = value & 0x00000007; - /* TODO update clocks */ - break; - case 0x23c: /* CM_AUTOIDLE4_CORE */ - s->clkidle[3] = value & 0x0000001f; - /* TODO update clocks */ - break; - - case 0x240: /* CM_CLKSEL1_CORE */ - s->clksel[1] = value & 0x0fffbf7f; - /* TODO update clocks */ - break; - - case 0x244: /* CM_CLKSEL2_CORE */ - s->clksel[2] = value & 0x00fffffc; - /* TODO update clocks */ - break; - - case 0x248: /* CM_CLKSTCTRL_CORE */ - s->clkctrl[1] = value & 0x7; - break; - - case 0x2a0: /* PM_WKEN1_CORE */ - s->wken[0] = value & 0x04667ff8; - break; - case 0x2a4: /* PM_WKEN2_CORE */ - s->wken[1] = value & 0x00000005; - break; - - case 0x2b0: /* PM_WKST1_CORE */ - s->wkst[0] &= ~value; - break; - case 0x2b4: /* PM_WKST2_CORE */ - s->wkst[1] &= ~value; - break; - - case 0x2e0: /* PM_PWSTCTRL_CORE */ - s->power[1] = (value & 0x00fc3f) | (1 << 2); - break; - - case 0x300: /* CM_FCLKEN_GFX */ - s->clken[5] = value & 6; - /* TODO update clocks */ - break; - case 0x310: /* CM_ICLKEN_GFX */ - s->clken[6] = value & 1; - /* TODO update clocks */ - break; - case 0x340: /* CM_CLKSEL_GFX */ - s->clksel[3] = value & 7; - /* TODO update clocks */ - break; - case 0x348: /* CM_CLKSTCTRL_GFX */ - s->clkctrl[2] = value & 1; - break; - case 0x350: /* RM_RSTCTRL_GFX */ - s->rstctrl[0] = value & 1; - /* TODO: reset */ - break; - case 0x358: /* RM_RSTST_GFX */ - s->rst[1] &= ~value; - break; - case 0x3c8: /* PM_WKDEP_GFX */ - s->wkup[1] = value & 0x13; - break; - case 0x3e0: /* PM_PWSTCTRL_GFX */ - s->power[2] = (value & 0x00c0f) | (3 << 2); - break; - - case 0x400: /* CM_FCLKEN_WKUP */ - s->clken[7] = value & 0xd; - /* TODO update clocks */ - break; - case 0x410: /* CM_ICLKEN_WKUP */ - s->clken[8] = value & 0x3f; - /* TODO update clocks */ - break; - case 0x430: /* CM_AUTOIDLE_WKUP */ - s->clkidle[4] = value & 0x0000003f; - /* TODO update clocks */ - break; - case 0x440: /* CM_CLKSEL_WKUP */ - s->clksel[4] = value & 3; - /* TODO update clocks */ - break; - case 0x450: /* RM_RSTCTRL_WKUP */ - /* TODO: reset */ - if (value & 2) - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - break; - case 0x454: /* RM_RSTTIME_WKUP */ - s->rsttime_wkup = value & 0x1fff; - break; - case 0x458: /* RM_RSTST_WKUP */ - s->rst[2] &= ~value; - break; - case 0x4a0: /* PM_WKEN_WKUP */ - s->wken[2] = value & 0x00000005; - break; - case 0x4b0: /* PM_WKST_WKUP */ - s->wkst[2] &= ~value; - break; - - case 0x500: /* CM_CLKEN_PLL */ - if (value & 0xffffff30) - fprintf(stderr, "%s: write 0s in CM_CLKEN_PLL for " - "future compatibility\n", __func__); - if ((s->clken[9] ^ value) & 0xcc) { - s->clken[9] &= ~0xcc; - s->clken[9] |= value & 0xcc; - omap_prcm_apll_update(s); - } - if ((s->clken[9] ^ value) & 3) { - s->clken[9] &= ~3; - s->clken[9] |= value & 3; - omap_prcm_dpll_update(s); - } - break; - case 0x530: /* CM_AUTOIDLE_PLL */ - s->clkidle[5] = value & 0x000000cf; - /* TODO update clocks */ - break; - case 0x540: /* CM_CLKSEL1_PLL */ - if (value & 0xfc4000d7) - fprintf(stderr, "%s: write 0s in CM_CLKSEL1_PLL for " - "future compatibility\n", __func__); - if ((s->clksel[5] ^ value) & 0x003fff00) { - s->clksel[5] = value & 0x03bfff28; - omap_prcm_dpll_update(s); - } - /* TODO update the other clocks */ - - s->clksel[5] = value & 0x03bfff28; - break; - case 0x544: /* CM_CLKSEL2_PLL */ - if (value & ~3) - fprintf(stderr, "%s: write 0s in CM_CLKSEL2_PLL[31:2] for " - "future compatibility\n", __func__); - if (s->clksel[6] != (value & 3)) { - s->clksel[6] = value & 3; - omap_prcm_dpll_update(s); - } - break; - - case 0x800: /* CM_FCLKEN_DSP */ - s->clken[10] = value & 0x501; - /* TODO update clocks */ - break; - case 0x810: /* CM_ICLKEN_DSP */ - s->clken[11] = value & 0x2; - /* TODO update clocks */ - break; - case 0x830: /* CM_AUTOIDLE_DSP */ - s->clkidle[6] = value & 0x2; - /* TODO update clocks */ - break; - case 0x840: /* CM_CLKSEL_DSP */ - s->clksel[7] = value & 0x3fff; - /* TODO update clocks */ - break; - case 0x848: /* CM_CLKSTCTRL_DSP */ - s->clkctrl[3] = value & 0x101; - break; - case 0x850: /* RM_RSTCTRL_DSP */ - /* TODO: reset */ - break; - case 0x858: /* RM_RSTST_DSP */ - s->rst[3] &= ~value; - break; - case 0x8c8: /* PM_WKDEP_DSP */ - s->wkup[2] = value & 0x13; - break; - case 0x8e0: /* PM_PWSTCTRL_DSP */ - s->power[3] = (value & 0x03017) | (3 << 2); - break; - - case 0x8f0: /* PRCM_IRQSTATUS_DSP */ - s->irqst[1] &= ~value; - omap_prcm_int_update(s, 1); - break; - case 0x8f4: /* PRCM_IRQENABLE_DSP */ - s->irqen[1] = value & 0x7; - omap_prcm_int_update(s, 1); - break; - - case 0x8f8: /* PRCM_IRQSTATUS_IVA */ - s->irqst[2] &= ~value; - omap_prcm_int_update(s, 2); - break; - case 0x8fc: /* PRCM_IRQENABLE_IVA */ - s->irqen[2] = value & 0x7; - omap_prcm_int_update(s, 2); - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_prcm_ops = { - .read = omap_prcm_read, - .write = omap_prcm_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void omap_prcm_reset(struct omap_prcm_s *s) -{ - s->sysconfig = 0; - s->irqst[0] = 0; - s->irqst[1] = 0; - s->irqst[2] = 0; - s->irqen[0] = 0; - s->irqen[1] = 0; - s->irqen[2] = 0; - s->voltctrl = 0x1040; - s->ev = 0x14; - s->evtime[0] = 0; - s->evtime[1] = 0; - s->clkctrl[0] = 0; - s->clkctrl[1] = 0; - s->clkctrl[2] = 0; - s->clkctrl[3] = 0; - s->clken[1] = 7; - s->clken[3] = 7; - s->clken[4] = 0; - s->clken[5] = 0; - s->clken[6] = 0; - s->clken[7] = 0xc; - s->clken[8] = 0x3e; - s->clken[9] = 0x0d; - s->clken[10] = 0; - s->clken[11] = 0; - s->clkidle[0] = 0; - s->clkidle[2] = 7; - s->clkidle[3] = 0; - s->clkidle[4] = 0; - s->clkidle[5] = 0x0c; - s->clkidle[6] = 0; - s->clksel[0] = 0x01; - s->clksel[1] = 0x02100121; - s->clksel[2] = 0x00000000; - s->clksel[3] = 0x01; - s->clksel[4] = 0; - s->clksel[7] = 0x0121; - s->wkup[0] = 0x15; - s->wkup[1] = 0x13; - s->wkup[2] = 0x13; - s->wken[0] = 0x04667ff8; - s->wken[1] = 0x00000005; - s->wken[2] = 5; - s->wkst[0] = 0; - s->wkst[1] = 0; - s->wkst[2] = 0; - s->power[0] = 0x00c; - s->power[1] = 4; - s->power[2] = 0x0000c; - s->power[3] = 0x14; - s->rstctrl[0] = 1; - s->rst[3] = 1; - omap_prcm_apll_update(s); - omap_prcm_dpll_update(s); -} - -static void omap_prcm_coldreset(struct omap_prcm_s *s) -{ - s->setuptime[0] = 0; - s->setuptime[1] = 0; - memset(&s->scratch, 0, sizeof(s->scratch)); - s->rst[0] = 0x01; - s->rst[1] = 0x00; - s->rst[2] = 0x01; - s->clken[0] = 0; - s->clken[2] = 0; - s->clkidle[1] = 0; - s->clksel[5] = 0; - s->clksel[6] = 2; - s->clksrc[0] = 0x43; - s->clkout[0] = 0x0303; - s->clkemul[0] = 0; - s->clkpol[0] = 0x100; - s->rsttime_wkup = 0x1002; - - omap_prcm_reset(s); -} - -static struct omap_prcm_s *omap_prcm_init(struct omap_target_agent_s *ta, - qemu_irq mpu_int, qemu_irq dsp_int, qemu_irq iva_int, - struct omap_mpu_state_s *mpu) -{ - struct omap_prcm_s *s = g_new0(struct omap_prcm_s, 1); - - s->irq[0] = mpu_int; - s->irq[1] = dsp_int; - s->irq[2] = iva_int; - s->mpu = mpu; - omap_prcm_coldreset(s); - - memory_region_init_io(&s->iomem0, NULL, &omap_prcm_ops, s, "omap.pcrm0", - omap_l4_region_size(ta, 0)); - memory_region_init_io(&s->iomem1, NULL, &omap_prcm_ops, s, "omap.pcrm1", - omap_l4_region_size(ta, 1)); - omap_l4_attach(ta, 0, &s->iomem0); - omap_l4_attach(ta, 1, &s->iomem1); - - return s; -} - -/* System and Pinout control */ -struct omap_sysctl_s { - struct omap_mpu_state_s *mpu; - MemoryRegion iomem; - - uint32_t sysconfig; - uint32_t devconfig; - uint32_t psaconfig; - uint32_t padconf[0x45]; - uint8_t obs; - uint32_t msuspendmux[5]; -}; - -static uint32_t omap_sysctl_read8(void *opaque, hwaddr addr) -{ - - struct omap_sysctl_s *s = opaque; - int pad_offset, byte_offset; - int value; - - switch (addr) { - case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ - pad_offset = (addr - 0x30) >> 2; - byte_offset = (addr - 0x30) & (4 - 1); - - value = s->padconf[pad_offset]; - value = (value >> (byte_offset * 8)) & 0xff; - - return value; - - default: - break; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static uint32_t omap_sysctl_read(void *opaque, hwaddr addr) -{ - struct omap_sysctl_s *s = opaque; - - switch (addr) { - case 0x000: /* CONTROL_REVISION */ - return 0x20; - - case 0x010: /* CONTROL_SYSCONFIG */ - return s->sysconfig; - - case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ - return s->padconf[(addr - 0x30) >> 2]; - - case 0x270: /* CONTROL_DEBOBS */ - return s->obs; - - case 0x274: /* CONTROL_DEVCONF */ - return s->devconfig; - - case 0x28c: /* CONTROL_EMU_SUPPORT */ - return 0; - - case 0x290: /* CONTROL_MSUSPENDMUX_0 */ - return s->msuspendmux[0]; - case 0x294: /* CONTROL_MSUSPENDMUX_1 */ - return s->msuspendmux[1]; - case 0x298: /* CONTROL_MSUSPENDMUX_2 */ - return s->msuspendmux[2]; - case 0x29c: /* CONTROL_MSUSPENDMUX_3 */ - return s->msuspendmux[3]; - case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */ - return s->msuspendmux[4]; - case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */ - return 0; - - case 0x2b8: /* CONTROL_PSA_CTRL */ - return s->psaconfig; - case 0x2bc: /* CONTROL_PSA_CMD */ - case 0x2c0: /* CONTROL_PSA_VALUE */ - return 0; - - case 0x2b0: /* CONTROL_SEC_CTRL */ - return 0x800000f1; - case 0x2d0: /* CONTROL_SEC_EMU */ - return 0x80000015; - case 0x2d4: /* CONTROL_SEC_TAP */ - return 0x8000007f; - case 0x2b4: /* CONTROL_SEC_TEST */ - case 0x2f0: /* CONTROL_SEC_STATUS */ - case 0x2f4: /* CONTROL_SEC_ERR_STATUS */ - /* Secure mode is not present on general-pusrpose device. Outside - * secure mode these values cannot be read or written. */ - return 0; - - case 0x2d8: /* CONTROL_OCM_RAM_PERM */ - return 0xff; - case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */ - case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */ - case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */ - /* No secure mode so no Extended Secure RAM present. */ - return 0; - - case 0x2f8: /* CONTROL_STATUS */ - /* Device Type => General-purpose */ - return 0x0300; - case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */ - - case 0x300: /* CONTROL_RPUB_KEY_H_0 */ - case 0x304: /* CONTROL_RPUB_KEY_H_1 */ - case 0x308: /* CONTROL_RPUB_KEY_H_2 */ - case 0x30c: /* CONTROL_RPUB_KEY_H_3 */ - return 0xdecafbad; - - case 0x310: /* CONTROL_RAND_KEY_0 */ - case 0x314: /* CONTROL_RAND_KEY_1 */ - case 0x318: /* CONTROL_RAND_KEY_2 */ - case 0x31c: /* CONTROL_RAND_KEY_3 */ - case 0x320: /* CONTROL_CUST_KEY_0 */ - case 0x324: /* CONTROL_CUST_KEY_1 */ - case 0x330: /* CONTROL_TEST_KEY_0 */ - case 0x334: /* CONTROL_TEST_KEY_1 */ - case 0x338: /* CONTROL_TEST_KEY_2 */ - case 0x33c: /* CONTROL_TEST_KEY_3 */ - case 0x340: /* CONTROL_TEST_KEY_4 */ - case 0x344: /* CONTROL_TEST_KEY_5 */ - case 0x348: /* CONTROL_TEST_KEY_6 */ - case 0x34c: /* CONTROL_TEST_KEY_7 */ - case 0x350: /* CONTROL_TEST_KEY_8 */ - case 0x354: /* CONTROL_TEST_KEY_9 */ - /* Can only be accessed in secure mode and when C_FieldAccEnable - * bit is set in CONTROL_SEC_CTRL. - * TODO: otherwise an interconnect access error is generated. */ - return 0; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_sysctl_write8(void *opaque, hwaddr addr, uint32_t value) -{ - struct omap_sysctl_s *s = opaque; - int pad_offset, byte_offset; - int prev_value; - - switch (addr) { - case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ - pad_offset = (addr - 0x30) >> 2; - byte_offset = (addr - 0x30) & (4 - 1); - - prev_value = s->padconf[pad_offset]; - prev_value &= ~(0xff << (byte_offset * 8)); - prev_value |= ((value & 0x1f1f1f1f) << (byte_offset * 8)) & 0x1f1f1f1f; - s->padconf[pad_offset] = prev_value; - break; - - default: - OMAP_BAD_REG(addr); - break; - } -} - -static void omap_sysctl_write(void *opaque, hwaddr addr, uint32_t value) -{ - struct omap_sysctl_s *s = opaque; - - switch (addr) { - case 0x000: /* CONTROL_REVISION */ - case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */ - case 0x2c0: /* CONTROL_PSA_VALUE */ - case 0x2f8: /* CONTROL_STATUS */ - case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */ - case 0x300: /* CONTROL_RPUB_KEY_H_0 */ - case 0x304: /* CONTROL_RPUB_KEY_H_1 */ - case 0x308: /* CONTROL_RPUB_KEY_H_2 */ - case 0x30c: /* CONTROL_RPUB_KEY_H_3 */ - case 0x310: /* CONTROL_RAND_KEY_0 */ - case 0x314: /* CONTROL_RAND_KEY_1 */ - case 0x318: /* CONTROL_RAND_KEY_2 */ - case 0x31c: /* CONTROL_RAND_KEY_3 */ - case 0x320: /* CONTROL_CUST_KEY_0 */ - case 0x324: /* CONTROL_CUST_KEY_1 */ - case 0x330: /* CONTROL_TEST_KEY_0 */ - case 0x334: /* CONTROL_TEST_KEY_1 */ - case 0x338: /* CONTROL_TEST_KEY_2 */ - case 0x33c: /* CONTROL_TEST_KEY_3 */ - case 0x340: /* CONTROL_TEST_KEY_4 */ - case 0x344: /* CONTROL_TEST_KEY_5 */ - case 0x348: /* CONTROL_TEST_KEY_6 */ - case 0x34c: /* CONTROL_TEST_KEY_7 */ - case 0x350: /* CONTROL_TEST_KEY_8 */ - case 0x354: /* CONTROL_TEST_KEY_9 */ - OMAP_RO_REG(addr); - return; - - case 0x010: /* CONTROL_SYSCONFIG */ - s->sysconfig = value & 0x1e; - break; - - case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ - /* XXX: should check constant bits */ - s->padconf[(addr - 0x30) >> 2] = value & 0x1f1f1f1f; - break; - - case 0x270: /* CONTROL_DEBOBS */ - s->obs = value & 0xff; - break; - - case 0x274: /* CONTROL_DEVCONF */ - s->devconfig = value & 0xffffc7ff; - break; - - case 0x28c: /* CONTROL_EMU_SUPPORT */ - break; - - case 0x290: /* CONTROL_MSUSPENDMUX_0 */ - s->msuspendmux[0] = value & 0x3fffffff; - break; - case 0x294: /* CONTROL_MSUSPENDMUX_1 */ - s->msuspendmux[1] = value & 0x3fffffff; - break; - case 0x298: /* CONTROL_MSUSPENDMUX_2 */ - s->msuspendmux[2] = value & 0x3fffffff; - break; - case 0x29c: /* CONTROL_MSUSPENDMUX_3 */ - s->msuspendmux[3] = value & 0x3fffffff; - break; - case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */ - s->msuspendmux[4] = value & 0x3fffffff; - break; - - case 0x2b8: /* CONTROL_PSA_CTRL */ - s->psaconfig = value & 0x1c; - s->psaconfig |= (value & 0x20) ? 2 : 1; - break; - case 0x2bc: /* CONTROL_PSA_CMD */ - break; - - case 0x2b0: /* CONTROL_SEC_CTRL */ - case 0x2b4: /* CONTROL_SEC_TEST */ - case 0x2d0: /* CONTROL_SEC_EMU */ - case 0x2d4: /* CONTROL_SEC_TAP */ - case 0x2d8: /* CONTROL_OCM_RAM_PERM */ - case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */ - case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */ - case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */ - case 0x2f0: /* CONTROL_SEC_STATUS */ - case 0x2f4: /* CONTROL_SEC_ERR_STATUS */ - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static uint64_t omap_sysctl_readfn(void *opaque, hwaddr addr, - unsigned size) -{ - switch (size) { - case 1: - return omap_sysctl_read8(opaque, addr); - case 2: - return omap_badwidth_read32(opaque, addr); /* TODO */ - case 4: - return omap_sysctl_read(opaque, addr); - default: - g_assert_not_reached(); - } -} - -static void omap_sysctl_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - switch (size) { - case 1: - omap_sysctl_write8(opaque, addr, value); - break; - case 2: - omap_badwidth_write32(opaque, addr, value); /* TODO */ - break; - case 4: - omap_sysctl_write(opaque, addr, value); - break; - default: - g_assert_not_reached(); - } -} - -static const MemoryRegionOps omap_sysctl_ops = { - .read = omap_sysctl_readfn, - .write = omap_sysctl_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void omap_sysctl_reset(struct omap_sysctl_s *s) -{ - /* (power-on reset) */ - s->sysconfig = 0; - s->obs = 0; - s->devconfig = 0x0c000000; - s->msuspendmux[0] = 0x00000000; - s->msuspendmux[1] = 0x00000000; - s->msuspendmux[2] = 0x00000000; - s->msuspendmux[3] = 0x00000000; - s->msuspendmux[4] = 0x00000000; - s->psaconfig = 1; - - s->padconf[0x00] = 0x000f0f0f; - s->padconf[0x01] = 0x00000000; - s->padconf[0x02] = 0x00000000; - s->padconf[0x03] = 0x00000000; - s->padconf[0x04] = 0x00000000; - s->padconf[0x05] = 0x00000000; - s->padconf[0x06] = 0x00000000; - s->padconf[0x07] = 0x00000000; - s->padconf[0x08] = 0x08080800; - s->padconf[0x09] = 0x08080808; - s->padconf[0x0a] = 0x08080808; - s->padconf[0x0b] = 0x08080808; - s->padconf[0x0c] = 0x08080808; - s->padconf[0x0d] = 0x08080800; - s->padconf[0x0e] = 0x08080808; - s->padconf[0x0f] = 0x08080808; - s->padconf[0x10] = 0x18181808; /* | 0x07070700 if SBoot3 */ - s->padconf[0x11] = 0x18181818; /* | 0x07070707 if SBoot3 */ - s->padconf[0x12] = 0x18181818; /* | 0x07070707 if SBoot3 */ - s->padconf[0x13] = 0x18181818; /* | 0x07070707 if SBoot3 */ - s->padconf[0x14] = 0x18181818; /* | 0x00070707 if SBoot3 */ - s->padconf[0x15] = 0x18181818; - s->padconf[0x16] = 0x18181818; /* | 0x07000000 if SBoot3 */ - s->padconf[0x17] = 0x1f001f00; - s->padconf[0x18] = 0x1f1f1f1f; - s->padconf[0x19] = 0x00000000; - s->padconf[0x1a] = 0x1f180000; - s->padconf[0x1b] = 0x00001f1f; - s->padconf[0x1c] = 0x1f001f00; - s->padconf[0x1d] = 0x00000000; - s->padconf[0x1e] = 0x00000000; - s->padconf[0x1f] = 0x08000000; - s->padconf[0x20] = 0x08080808; - s->padconf[0x21] = 0x08080808; - s->padconf[0x22] = 0x0f080808; - s->padconf[0x23] = 0x0f0f0f0f; - s->padconf[0x24] = 0x000f0f0f; - s->padconf[0x25] = 0x1f1f1f0f; - s->padconf[0x26] = 0x080f0f1f; - s->padconf[0x27] = 0x070f1808; - s->padconf[0x28] = 0x0f070707; - s->padconf[0x29] = 0x000f0f1f; - s->padconf[0x2a] = 0x0f0f0f1f; - s->padconf[0x2b] = 0x08000000; - s->padconf[0x2c] = 0x0000001f; - s->padconf[0x2d] = 0x0f0f1f00; - s->padconf[0x2e] = 0x1f1f0f0f; - s->padconf[0x2f] = 0x0f1f1f1f; - s->padconf[0x30] = 0x0f0f0f0f; - s->padconf[0x31] = 0x0f1f0f1f; - s->padconf[0x32] = 0x0f0f0f0f; - s->padconf[0x33] = 0x0f1f0f1f; - s->padconf[0x34] = 0x1f1f0f0f; - s->padconf[0x35] = 0x0f0f1f1f; - s->padconf[0x36] = 0x0f0f1f0f; - s->padconf[0x37] = 0x0f0f0f0f; - s->padconf[0x38] = 0x1f18180f; - s->padconf[0x39] = 0x1f1f1f1f; - s->padconf[0x3a] = 0x00001f1f; - s->padconf[0x3b] = 0x00000000; - s->padconf[0x3c] = 0x00000000; - s->padconf[0x3d] = 0x0f0f0f0f; - s->padconf[0x3e] = 0x18000f0f; - s->padconf[0x3f] = 0x00070000; - s->padconf[0x40] = 0x00000707; - s->padconf[0x41] = 0x0f1f0700; - s->padconf[0x42] = 0x1f1f070f; - s->padconf[0x43] = 0x0008081f; - s->padconf[0x44] = 0x00000800; -} - -static struct omap_sysctl_s *omap_sysctl_init(struct omap_target_agent_s *ta, - omap_clk iclk, struct omap_mpu_state_s *mpu) -{ - struct omap_sysctl_s *s = g_new0(struct omap_sysctl_s, 1); - - s->mpu = mpu; - omap_sysctl_reset(s); - - memory_region_init_io(&s->iomem, NULL, &omap_sysctl_ops, s, "omap.sysctl", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - return s; -} - -/* General chip reset */ -static void omap2_mpu_reset(void *opaque) -{ - struct omap_mpu_state_s *mpu = opaque; - - omap_dma_reset(mpu->dma); - omap_prcm_reset(mpu->prcm); - omap_sysctl_reset(mpu->sysc); - omap_gp_timer_reset(mpu->gptimer[0]); - omap_gp_timer_reset(mpu->gptimer[1]); - omap_gp_timer_reset(mpu->gptimer[2]); - omap_gp_timer_reset(mpu->gptimer[3]); - omap_gp_timer_reset(mpu->gptimer[4]); - omap_gp_timer_reset(mpu->gptimer[5]); - omap_gp_timer_reset(mpu->gptimer[6]); - omap_gp_timer_reset(mpu->gptimer[7]); - omap_gp_timer_reset(mpu->gptimer[8]); - omap_gp_timer_reset(mpu->gptimer[9]); - omap_gp_timer_reset(mpu->gptimer[10]); - omap_gp_timer_reset(mpu->gptimer[11]); - omap_synctimer_reset(mpu->synctimer); - omap_sdrc_reset(mpu->sdrc); - omap_gpmc_reset(mpu->gpmc); - omap_dss_reset(mpu->dss); - omap_uart_reset(mpu->uart[0]); - omap_uart_reset(mpu->uart[1]); - omap_uart_reset(mpu->uart[2]); - omap_mmc_reset(mpu->mmc); - omap_mcspi_reset(mpu->mcspi[0]); - omap_mcspi_reset(mpu->mcspi[1]); - cpu_reset(CPU(mpu->cpu)); -} - -static int omap2_validate_addr(struct omap_mpu_state_s *s, - hwaddr addr) -{ - return 1; -} - -static const struct dma_irq_map omap2_dma_irq_map[] = { - { 0, OMAP_INT_24XX_SDMA_IRQ0 }, - { 0, OMAP_INT_24XX_SDMA_IRQ1 }, - { 0, OMAP_INT_24XX_SDMA_IRQ2 }, - { 0, OMAP_INT_24XX_SDMA_IRQ3 }, -}; - -struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram, - const char *cpu_type) -{ - struct omap_mpu_state_s *s = g_new0(struct omap_mpu_state_s, 1); - qemu_irq dma_irqs[4]; - DriveInfo *dinfo; - int i; - SysBusDevice *busdev; - struct omap_target_agent_s *ta; - MemoryRegion *sysmem = get_system_memory(); - - /* Core */ - s->mpu_model = omap2420; - s->cpu = ARM_CPU(cpu_create(cpu_type)); - s->sram_size = OMAP242X_SRAM_SIZE; - - s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0); - - /* Clocks */ - omap_clk_init(s); - - /* Memory-mapped stuff */ - memory_region_init_ram(&s->sram, NULL, "omap2.sram", s->sram_size, - &error_fatal); - memory_region_add_subregion(sysmem, OMAP2_SRAM_BASE, &s->sram); - - s->l4 = omap_l4_init(sysmem, OMAP2_L4_BASE, 54); - - /* Actually mapped at any 2K boundary in the ARM11 private-peripheral if */ - s->ih[0] = qdev_new("omap2-intc"); - qdev_prop_set_uint8(s->ih[0], "revision", 0x21); - omap_intc_set_fclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_fclk")); - omap_intc_set_iclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_iclk")); - busdev = SYS_BUS_DEVICE(s->ih[0]); - sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); - sysbus_connect_irq(busdev, 1, - qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ)); - sysbus_mmio_map(busdev, 0, 0x480fe000); - s->prcm = omap_prcm_init(omap_l4tao(s->l4, 3), - qdev_get_gpio_in(s->ih[0], - OMAP_INT_24XX_PRCM_MPU_IRQ), - NULL, NULL, s); - - s->sysc = omap_sysctl_init(omap_l4tao(s->l4, 1), - omap_findclk(s, "omapctrl_iclk"), s); - - for (i = 0; i < 4; i++) { - dma_irqs[i] = qdev_get_gpio_in(s->ih[omap2_dma_irq_map[i].ih], - omap2_dma_irq_map[i].intr); - } - s->dma = omap_dma4_init(0x48056000, dma_irqs, sysmem, s, 256, 32, - omap_findclk(s, "sdma_iclk"), - omap_findclk(s, "sdma_fclk")); - s->port->addr_valid = omap2_validate_addr; - - /* Register SDRAM and SRAM ports for fast DMA transfers. */ - soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(sdram), - OMAP2_Q2_BASE, memory_region_size(sdram)); - soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->sram), - OMAP2_SRAM_BASE, s->sram_size); - - s->uart[0] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 19), - qdev_get_gpio_in(s->ih[0], - OMAP_INT_24XX_UART1_IRQ), - omap_findclk(s, "uart1_fclk"), - omap_findclk(s, "uart1_iclk"), - s->drq[OMAP24XX_DMA_UART1_TX], - s->drq[OMAP24XX_DMA_UART1_RX], - "uart1", - serial_hd(0)); - s->uart[1] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 20), - qdev_get_gpio_in(s->ih[0], - OMAP_INT_24XX_UART2_IRQ), - omap_findclk(s, "uart2_fclk"), - omap_findclk(s, "uart2_iclk"), - s->drq[OMAP24XX_DMA_UART2_TX], - s->drq[OMAP24XX_DMA_UART2_RX], - "uart2", - serial_hd(0) ? serial_hd(1) : NULL); - s->uart[2] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 21), - qdev_get_gpio_in(s->ih[0], - OMAP_INT_24XX_UART3_IRQ), - omap_findclk(s, "uart3_fclk"), - omap_findclk(s, "uart3_iclk"), - s->drq[OMAP24XX_DMA_UART3_TX], - s->drq[OMAP24XX_DMA_UART3_RX], - "uart3", - serial_hd(0) && serial_hd(1) ? serial_hd(2) : NULL); - - s->gptimer[0] = omap_gp_timer_init(omap_l4ta(s->l4, 7), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER1), - omap_findclk(s, "wu_gpt1_clk"), - omap_findclk(s, "wu_l4_iclk")); - s->gptimer[1] = omap_gp_timer_init(omap_l4ta(s->l4, 8), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER2), - omap_findclk(s, "core_gpt2_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[2] = omap_gp_timer_init(omap_l4ta(s->l4, 22), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER3), - omap_findclk(s, "core_gpt3_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[3] = omap_gp_timer_init(omap_l4ta(s->l4, 23), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER4), - omap_findclk(s, "core_gpt4_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[4] = omap_gp_timer_init(omap_l4ta(s->l4, 24), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER5), - omap_findclk(s, "core_gpt5_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[5] = omap_gp_timer_init(omap_l4ta(s->l4, 25), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER6), - omap_findclk(s, "core_gpt6_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[6] = omap_gp_timer_init(omap_l4ta(s->l4, 26), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER7), - omap_findclk(s, "core_gpt7_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[7] = omap_gp_timer_init(omap_l4ta(s->l4, 27), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER8), - omap_findclk(s, "core_gpt8_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[8] = omap_gp_timer_init(omap_l4ta(s->l4, 28), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER9), - omap_findclk(s, "core_gpt9_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[9] = omap_gp_timer_init(omap_l4ta(s->l4, 29), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER10), - omap_findclk(s, "core_gpt10_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[10] = omap_gp_timer_init(omap_l4ta(s->l4, 30), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER11), - omap_findclk(s, "core_gpt11_clk"), - omap_findclk(s, "core_l4_iclk")); - s->gptimer[11] = omap_gp_timer_init(omap_l4ta(s->l4, 31), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER12), - omap_findclk(s, "core_gpt12_clk"), - omap_findclk(s, "core_l4_iclk")); - - omap_tap_init(omap_l4ta(s->l4, 2), s); - - s->synctimer = omap_synctimer_init(omap_l4tao(s->l4, 2), s, - omap_findclk(s, "clk32-kHz"), - omap_findclk(s, "core_l4_iclk")); - - s->i2c[0] = qdev_new("omap_i2c"); - qdev_prop_set_uint8(s->i2c[0], "revision", 0x34); - omap_i2c_set_iclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.iclk")); - omap_i2c_set_fclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.fclk")); - busdev = SYS_BUS_DEVICE(s->i2c[0]); - sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C1_IRQ)); - sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C1_TX]); - sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C1_RX]); - sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 5), 0)); - - s->i2c[1] = qdev_new("omap_i2c"); - qdev_prop_set_uint8(s->i2c[1], "revision", 0x34); - omap_i2c_set_iclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.iclk")); - omap_i2c_set_fclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.fclk")); - busdev = SYS_BUS_DEVICE(s->i2c[1]); - sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C2_IRQ)); - sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C2_TX]); - sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C2_RX]); - sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 6), 0)); - - s->gpio = qdev_new("omap2-gpio"); - qdev_prop_set_int32(s->gpio, "mpu_model", s->mpu_model); - omap2_gpio_set_iclk(OMAP2_GPIO(s->gpio), omap_findclk(s, "gpio_iclk")); - omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 0, omap_findclk(s, "gpio1_dbclk")); - omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 1, omap_findclk(s, "gpio2_dbclk")); - omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 2, omap_findclk(s, "gpio3_dbclk")); - omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 3, omap_findclk(s, "gpio4_dbclk")); - if (s->mpu_model == omap2430) { - omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 4, - omap_findclk(s, "gpio5_dbclk")); - } - busdev = SYS_BUS_DEVICE(s->gpio); - sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK1)); - sysbus_connect_irq(busdev, 3, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK2)); - sysbus_connect_irq(busdev, 6, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK3)); - sysbus_connect_irq(busdev, 9, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK4)); - if (s->mpu_model == omap2430) { - sysbus_connect_irq(busdev, 12, - qdev_get_gpio_in(s->ih[0], - OMAP_INT_243X_GPIO_BANK5)); - } - ta = omap_l4ta(s->l4, 3); - sysbus_mmio_map(busdev, 0, omap_l4_region_base(ta, 1)); - sysbus_mmio_map(busdev, 1, omap_l4_region_base(ta, 0)); - sysbus_mmio_map(busdev, 2, omap_l4_region_base(ta, 2)); - sysbus_mmio_map(busdev, 3, omap_l4_region_base(ta, 4)); - sysbus_mmio_map(busdev, 4, omap_l4_region_base(ta, 5)); - - s->sdrc = omap_sdrc_init(sysmem, 0x68009000); - s->gpmc = omap_gpmc_init(s, 0x6800a000, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPMC_IRQ), - s->drq[OMAP24XX_DMA_GPMC]); - - dinfo = drive_get(IF_SD, 0, 0); - if (!dinfo && !qtest_enabled()) { - warn_report("missing SecureDigital device"); - } - s->mmc = omap2_mmc_init(omap_l4tao(s->l4, 9), - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MMC_IRQ), - &s->drq[OMAP24XX_DMA_MMC1_TX], - omap_findclk(s, "mmc_fclk"), omap_findclk(s, "mmc_iclk")); - - s->mcspi[0] = omap_mcspi_init(omap_l4ta(s->l4, 35), 4, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI1_IRQ), - &s->drq[OMAP24XX_DMA_SPI1_TX0], - omap_findclk(s, "spi1_fclk"), - omap_findclk(s, "spi1_iclk")); - s->mcspi[1] = omap_mcspi_init(omap_l4ta(s->l4, 36), 2, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI2_IRQ), - &s->drq[OMAP24XX_DMA_SPI2_TX0], - omap_findclk(s, "spi2_fclk"), - omap_findclk(s, "spi2_iclk")); - - s->dss = omap_dss_init(omap_l4ta(s->l4, 10), sysmem, 0x68000800, - /* XXX wire M_IRQ_25, D_L2_IRQ_30 and I_IRQ_13 together */ - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_DSS_IRQ), - s->drq[OMAP24XX_DMA_DSS], - omap_findclk(s, "dss_clk1"), omap_findclk(s, "dss_clk2"), - omap_findclk(s, "dss_54m_clk"), - omap_findclk(s, "dss_l3_iclk"), - omap_findclk(s, "dss_l4_iclk")); - - omap_sti_init(omap_l4ta(s->l4, 18), sysmem, 0x54000000, - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_STI), - omap_findclk(s, "emul_ck"), - serial_hd(0) && serial_hd(1) && serial_hd(2) ? - serial_hd(3) : NULL); - - s->eac = omap_eac_init(omap_l4ta(s->l4, 32), - qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_EAC_IRQ), - /* Ten consecutive lines */ - &s->drq[OMAP24XX_DMA_EAC_AC_RD], - omap_findclk(s, "func_96m_clk"), - omap_findclk(s, "core_l4_iclk")); - - /* All register mappings (including those not currently implemented): - * SystemControlMod 48000000 - 48000fff - * SystemControlL4 48001000 - 48001fff - * 32kHz Timer Mod 48004000 - 48004fff - * 32kHz Timer L4 48005000 - 48005fff - * PRCM ModA 48008000 - 480087ff - * PRCM ModB 48008800 - 48008fff - * PRCM L4 48009000 - 48009fff - * TEST-BCM Mod 48012000 - 48012fff - * TEST-BCM L4 48013000 - 48013fff - * TEST-TAP Mod 48014000 - 48014fff - * TEST-TAP L4 48015000 - 48015fff - * GPIO1 Mod 48018000 - 48018fff - * GPIO Top 48019000 - 48019fff - * GPIO2 Mod 4801a000 - 4801afff - * GPIO L4 4801b000 - 4801bfff - * GPIO3 Mod 4801c000 - 4801cfff - * GPIO4 Mod 4801e000 - 4801efff - * WDTIMER1 Mod 48020000 - 48010fff - * WDTIMER Top 48021000 - 48011fff - * WDTIMER2 Mod 48022000 - 48012fff - * WDTIMER L4 48023000 - 48013fff - * WDTIMER3 Mod 48024000 - 48014fff - * WDTIMER3 L4 48025000 - 48015fff - * WDTIMER4 Mod 48026000 - 48016fff - * WDTIMER4 L4 48027000 - 48017fff - * GPTIMER1 Mod 48028000 - 48018fff - * GPTIMER1 L4 48029000 - 48019fff - * GPTIMER2 Mod 4802a000 - 4801afff - * GPTIMER2 L4 4802b000 - 4801bfff - * L4-Config AP 48040000 - 480407ff - * L4-Config IP 48040800 - 48040fff - * L4-Config LA 48041000 - 48041fff - * ARM11ETB Mod 48048000 - 48049fff - * ARM11ETB L4 4804a000 - 4804afff - * DISPLAY Top 48050000 - 480503ff - * DISPLAY DISPC 48050400 - 480507ff - * DISPLAY RFBI 48050800 - 48050bff - * DISPLAY VENC 48050c00 - 48050fff - * DISPLAY L4 48051000 - 48051fff - * CAMERA Top 48052000 - 480523ff - * CAMERA core 48052400 - 480527ff - * CAMERA DMA 48052800 - 48052bff - * CAMERA MMU 48052c00 - 48052fff - * CAMERA L4 48053000 - 48053fff - * SDMA Mod 48056000 - 48056fff - * SDMA L4 48057000 - 48057fff - * SSI Top 48058000 - 48058fff - * SSI GDD 48059000 - 48059fff - * SSI Port1 4805a000 - 4805afff - * SSI Port2 4805b000 - 4805bfff - * SSI L4 4805c000 - 4805cfff - * USB Mod 4805e000 - 480fefff - * USB L4 4805f000 - 480fffff - * WIN_TRACER1 Mod 48060000 - 48060fff - * WIN_TRACER1 L4 48061000 - 48061fff - * WIN_TRACER2 Mod 48062000 - 48062fff - * WIN_TRACER2 L4 48063000 - 48063fff - * WIN_TRACER3 Mod 48064000 - 48064fff - * WIN_TRACER3 L4 48065000 - 48065fff - * WIN_TRACER4 Top 48066000 - 480660ff - * WIN_TRACER4 ETT 48066100 - 480661ff - * WIN_TRACER4 WT 48066200 - 480662ff - * WIN_TRACER4 L4 48067000 - 48067fff - * XTI Mod 48068000 - 48068fff - * XTI L4 48069000 - 48069fff - * UART1 Mod 4806a000 - 4806afff - * UART1 L4 4806b000 - 4806bfff - * UART2 Mod 4806c000 - 4806cfff - * UART2 L4 4806d000 - 4806dfff - * UART3 Mod 4806e000 - 4806efff - * UART3 L4 4806f000 - 4806ffff - * I2C1 Mod 48070000 - 48070fff - * I2C1 L4 48071000 - 48071fff - * I2C2 Mod 48072000 - 48072fff - * I2C2 L4 48073000 - 48073fff - * McBSP1 Mod 48074000 - 48074fff - * McBSP1 L4 48075000 - 48075fff - * McBSP2 Mod 48076000 - 48076fff - * McBSP2 L4 48077000 - 48077fff - * GPTIMER3 Mod 48078000 - 48078fff - * GPTIMER3 L4 48079000 - 48079fff - * GPTIMER4 Mod 4807a000 - 4807afff - * GPTIMER4 L4 4807b000 - 4807bfff - * GPTIMER5 Mod 4807c000 - 4807cfff - * GPTIMER5 L4 4807d000 - 4807dfff - * GPTIMER6 Mod 4807e000 - 4807efff - * GPTIMER6 L4 4807f000 - 4807ffff - * GPTIMER7 Mod 48080000 - 48080fff - * GPTIMER7 L4 48081000 - 48081fff - * GPTIMER8 Mod 48082000 - 48082fff - * GPTIMER8 L4 48083000 - 48083fff - * GPTIMER9 Mod 48084000 - 48084fff - * GPTIMER9 L4 48085000 - 48085fff - * GPTIMER10 Mod 48086000 - 48086fff - * GPTIMER10 L4 48087000 - 48087fff - * GPTIMER11 Mod 48088000 - 48088fff - * GPTIMER11 L4 48089000 - 48089fff - * GPTIMER12 Mod 4808a000 - 4808afff - * GPTIMER12 L4 4808b000 - 4808bfff - * EAC Mod 48090000 - 48090fff - * EAC L4 48091000 - 48091fff - * FAC Mod 48092000 - 48092fff - * FAC L4 48093000 - 48093fff - * MAILBOX Mod 48094000 - 48094fff - * MAILBOX L4 48095000 - 48095fff - * SPI1 Mod 48098000 - 48098fff - * SPI1 L4 48099000 - 48099fff - * SPI2 Mod 4809a000 - 4809afff - * SPI2 L4 4809b000 - 4809bfff - * MMC/SDIO Mod 4809c000 - 4809cfff - * MMC/SDIO L4 4809d000 - 4809dfff - * MS_PRO Mod 4809e000 - 4809efff - * MS_PRO L4 4809f000 - 4809ffff - * RNG Mod 480a0000 - 480a0fff - * RNG L4 480a1000 - 480a1fff - * DES3DES Mod 480a2000 - 480a2fff - * DES3DES L4 480a3000 - 480a3fff - * SHA1MD5 Mod 480a4000 - 480a4fff - * SHA1MD5 L4 480a5000 - 480a5fff - * AES Mod 480a6000 - 480a6fff - * AES L4 480a7000 - 480a7fff - * PKA Mod 480a8000 - 480a9fff - * PKA L4 480aa000 - 480aafff - * MG Mod 480b0000 - 480b0fff - * MG L4 480b1000 - 480b1fff - * HDQ/1-wire Mod 480b2000 - 480b2fff - * HDQ/1-wire L4 480b3000 - 480b3fff - * MPU interrupt 480fe000 - 480fefff - * STI channel base 54000000 - 5400ffff - * IVA RAM 5c000000 - 5c01ffff - * IVA ROM 5c020000 - 5c027fff - * IMG_BUF_A 5c040000 - 5c040fff - * IMG_BUF_B 5c042000 - 5c042fff - * VLCDS 5c048000 - 5c0487ff - * IMX_COEF 5c049000 - 5c04afff - * IMX_CMD 5c051000 - 5c051fff - * VLCDQ 5c053000 - 5c0533ff - * VLCDH 5c054000 - 5c054fff - * SEQ_CMD 5c055000 - 5c055fff - * IMX_REG 5c056000 - 5c0560ff - * VLCD_REG 5c056100 - 5c0561ff - * SEQ_REG 5c056200 - 5c0562ff - * IMG_BUF_REG 5c056300 - 5c0563ff - * SEQIRQ_REG 5c056400 - 5c0564ff - * OCP_REG 5c060000 - 5c060fff - * SYSC_REG 5c070000 - 5c070fff - * MMU_REG 5d000000 - 5d000fff - * sDMA R 68000400 - 680005ff - * sDMA W 68000600 - 680007ff - * Display Control 68000800 - 680009ff - * DSP subsystem 68000a00 - 68000bff - * MPU subsystem 68000c00 - 68000dff - * IVA subsystem 68001000 - 680011ff - * USB 68001200 - 680013ff - * Camera 68001400 - 680015ff - * VLYNQ (firewall) 68001800 - 68001bff - * VLYNQ 68001e00 - 68001fff - * SSI 68002000 - 680021ff - * L4 68002400 - 680025ff - * DSP (firewall) 68002800 - 68002bff - * DSP subsystem 68002e00 - 68002fff - * IVA (firewall) 68003000 - 680033ff - * IVA 68003600 - 680037ff - * GFX 68003a00 - 68003bff - * CMDWR emulation 68003c00 - 68003dff - * SMS 68004000 - 680041ff - * OCM 68004200 - 680043ff - * GPMC 68004400 - 680045ff - * RAM (firewall) 68005000 - 680053ff - * RAM (err login) 68005400 - 680057ff - * ROM (firewall) 68005800 - 68005bff - * ROM (err login) 68005c00 - 68005fff - * GPMC (firewall) 68006000 - 680063ff - * GPMC (err login) 68006400 - 680067ff - * SMS (err login) 68006c00 - 68006fff - * SMS registers 68008000 - 68008fff - * SDRC registers 68009000 - 68009fff - * GPMC registers 6800a000 6800afff - */ - - qemu_register_reset(omap2_mpu_reset, s); - - return s; -} diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c index 62d7915fb8f..5d4a31b7aed 100644 --- a/hw/arm/omap_sx1.c +++ b/hw/arm/omap_sx1.c @@ -1,7 +1,7 @@ /* omap_sx1.c Support for the Siemens SX1 smartphone emulation. * * Copyright (C) 2008 - * Jean-Christophe PLAGNIOL-VILLARD + * Jean-Christophe PLAGNIOL-VILLARD * Copyright (C) 2007 Vladimir Ananiev * * based on PalmOne's (TM) PDAs support (palm.c) @@ -33,8 +33,8 @@ #include "hw/boards.h" #include "hw/arm/boot.h" #include "hw/block/flash.h" -#include "sysemu/qtest.h" -#include "exec/address-spaces.h" +#include "system/qtest.h" +#include "system/address-spaces.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -76,10 +76,6 @@ static uint64_t static_read(void *opaque, hwaddr offset, static void static_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { -#ifdef SPY - printf("%s: value %" PRIx64 " %u bytes written at 0x%x\n", - __func__, value, size, (int)offset); -#endif } static const MemoryRegionOps static_ops = { @@ -206,7 +202,7 @@ static void sx1_init_v2(MachineState *machine) sx1_init(machine, 2); } -static void sx1_machine_v2_class_init(ObjectClass *oc, void *data) +static void sx1_machine_v2_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -216,6 +212,7 @@ static void sx1_machine_v2_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); mc->default_ram_size = SDRAM_SIZE; mc->default_ram_id = "omap1.dram"; + mc->auto_create_sdcard = true; } static const TypeInfo sx1_machine_v2_type = { @@ -224,7 +221,7 @@ static const TypeInfo sx1_machine_v2_type = { .class_init = sx1_machine_v2_class_init, }; -static void sx1_machine_v1_class_init(ObjectClass *oc, void *data) +static void sx1_machine_v1_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -234,6 +231,7 @@ static void sx1_machine_v1_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); mc->default_ram_size = SDRAM_SIZE; mc->default_ram_id = "omap1.dram"; + mc->auto_create_sdcard = true; } static const TypeInfo sx1_machine_v1_type = { diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index 77e328191d7..e0956880d11 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/boards.h" @@ -121,6 +121,7 @@ static void orangepi_machine_init(MachineClass *mc) mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "orangepi.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("orangepi-pc", orangepi_machine_init) diff --git a/hw/arm/palm.c b/hw/arm/palm.c deleted file mode 100644 index e04ac92eb7f..00000000000 --- a/hw/arm/palm.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * PalmOne's (TM) PDAs. - * - * Copyright (C) 2006-2007 Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "audio/audio.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "ui/console.h" -#include "hw/arm/omap.h" -#include "hw/boards.h" -#include "hw/arm/boot.h" -#include "hw/input/tsc2xxx.h" -#include "hw/irq.h" -#include "hw/loader.h" -#include "qemu/cutils.h" -#include "qom/object.h" -#include "qemu/error-report.h" - - -static uint64_t static_read(void *opaque, hwaddr offset, unsigned size) -{ - uint32_t *val = (uint32_t *)opaque; - uint32_t sizemask = 7 >> size; - - return *val >> ((offset & sizemask) << 3); -} - -static void static_write(void *opaque, hwaddr offset, uint64_t value, - unsigned size) -{ -#ifdef SPY - printf("%s: value %08lx written at " PA_FMT "\n", - __func__, value, offset); -#endif -} - -static const MemoryRegionOps static_ops = { - .read = static_read, - .write = static_write, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -/* Palm Tunsgten|E support */ - -/* Shared GPIOs */ -#define PALMTE_USBDETECT_GPIO 0 -#define PALMTE_USB_OR_DC_GPIO 1 -#define PALMTE_TSC_GPIO 4 -#define PALMTE_PINTDAV_GPIO 6 -#define PALMTE_MMC_WP_GPIO 8 -#define PALMTE_MMC_POWER_GPIO 9 -#define PALMTE_HDQ_GPIO 11 -#define PALMTE_HEADPHONES_GPIO 14 -#define PALMTE_SPEAKER_GPIO 15 -/* MPU private GPIOs */ -#define PALMTE_DC_GPIO 2 -#define PALMTE_MMC_SWITCH_GPIO 4 -#define PALMTE_MMC1_GPIO 6 -#define PALMTE_MMC2_GPIO 7 -#define PALMTE_MMC3_GPIO 11 - -static MouseTransformInfo palmte_pointercal = { - .x = 320, - .y = 320, - .a = { -5909, 8, 22465308, 104, 7644, -1219972, 65536 }, -}; - -static void palmte_microwire_setup(struct omap_mpu_state_s *cpu) -{ - uWireSlave *tsc; - - tsc = tsc2102_init(qdev_get_gpio_in(cpu->gpio, PALMTE_PINTDAV_GPIO)); - - omap_uwire_attach(cpu->microwire, tsc, 0); - omap_mcbsp_i2s_attach(cpu->mcbsp1, tsc210x_codec(tsc)); - - tsc210x_set_transform(tsc, &palmte_pointercal); -} - -static struct { - int row; - int column; -} palmte_keymap[0x80] = { - [0 ... 0x7f] = { -1, -1 }, - [0x3b] = { 0, 0 }, /* F1 -> Calendar */ - [0x3c] = { 1, 0 }, /* F2 -> Contacts */ - [0x3d] = { 2, 0 }, /* F3 -> Tasks List */ - [0x3e] = { 3, 0 }, /* F4 -> Note Pad */ - [0x01] = { 4, 0 }, /* Esc -> Power */ - [0x4b] = { 0, 1 }, /* Left */ - [0x50] = { 1, 1 }, /* Down */ - [0x48] = { 2, 1 }, /* Up */ - [0x4d] = { 3, 1 }, /* Right */ - [0x4c] = { 4, 1 }, /* Centre */ - [0x39] = { 4, 1 }, /* Spc -> Centre */ -}; - -static void palmte_button_event(void *opaque, int keycode) -{ - struct omap_mpu_state_s *cpu = opaque; - - if (palmte_keymap[keycode & 0x7f].row != -1) - omap_mpuio_key(cpu->mpuio, - palmte_keymap[keycode & 0x7f].row, - palmte_keymap[keycode & 0x7f].column, - !(keycode & 0x80)); -} - -/* - * Encapsulation of some GPIO line behaviour for the Palm board - * - * QEMU interface: - * + unnamed GPIO inputs 0..6: for the various miscellaneous input lines - */ - -#define TYPE_PALM_MISC_GPIO "palm-misc-gpio" -OBJECT_DECLARE_SIMPLE_TYPE(PalmMiscGPIOState, PALM_MISC_GPIO) - -struct PalmMiscGPIOState { - SysBusDevice parent_obj; -}; - -static void palmte_onoff_gpios(void *opaque, int line, int level) -{ - switch (line) { - case 0: - printf("%s: current to MMC/SD card %sabled.\n", - __func__, level ? "dis" : "en"); - break; - case 1: - printf("%s: internal speaker amplifier %s.\n", - __func__, level ? "down" : "on"); - break; - - /* These LCD & Audio output signals have not been identified yet. */ - case 2: - case 3: - case 4: - printf("%s: LCD GPIO%i %s.\n", - __func__, line - 1, level ? "high" : "low"); - break; - case 5: - case 6: - printf("%s: Audio GPIO%i %s.\n", - __func__, line - 4, level ? "high" : "low"); - break; - } -} - -static void palm_misc_gpio_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - - qdev_init_gpio_in(dev, palmte_onoff_gpios, 7); -} - -static const TypeInfo palm_misc_gpio_info = { - .name = TYPE_PALM_MISC_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PalmMiscGPIOState), - .instance_init = palm_misc_gpio_init, - /* - * No class init required: device has no internal state so does not - * need to set up reset or vmstate, and has no realize method. - */ -}; - -static void palmte_gpio_setup(struct omap_mpu_state_s *cpu) -{ - DeviceState *misc_gpio; - - misc_gpio = sysbus_create_simple(TYPE_PALM_MISC_GPIO, -1, NULL); - - omap_mmc_handlers(cpu->mmc, - qdev_get_gpio_in(cpu->gpio, PALMTE_MMC_WP_GPIO), - qemu_irq_invert(omap_mpuio_in_get(cpu->mpuio) - [PALMTE_MMC_SWITCH_GPIO])); - - qdev_connect_gpio_out(cpu->gpio, PALMTE_MMC_POWER_GPIO, - qdev_get_gpio_in(misc_gpio, 0)); - qdev_connect_gpio_out(cpu->gpio, PALMTE_SPEAKER_GPIO, - qdev_get_gpio_in(misc_gpio, 1)); - qdev_connect_gpio_out(cpu->gpio, 11, qdev_get_gpio_in(misc_gpio, 2)); - qdev_connect_gpio_out(cpu->gpio, 12, qdev_get_gpio_in(misc_gpio, 3)); - qdev_connect_gpio_out(cpu->gpio, 13, qdev_get_gpio_in(misc_gpio, 4)); - omap_mpuio_out_set(cpu->mpuio, 1, qdev_get_gpio_in(misc_gpio, 5)); - omap_mpuio_out_set(cpu->mpuio, 3, qdev_get_gpio_in(misc_gpio, 6)); - - /* Reset some inputs to initial state. */ - qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USBDETECT_GPIO)); - qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USB_OR_DC_GPIO)); - qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, 4)); - qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_HEADPHONES_GPIO)); - qemu_irq_lower(omap_mpuio_in_get(cpu->mpuio)[PALMTE_DC_GPIO]); - qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[6]); - qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[7]); - qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[11]); -} - -static struct arm_boot_info palmte_binfo = { - .loader_start = OMAP_EMIFF_BASE, - .ram_size = 0x02000000, - .board_id = 0x331, -}; - -static void palmte_init(MachineState *machine) -{ - MemoryRegion *address_space_mem = get_system_memory(); - struct omap_mpu_state_s *mpu; - int flash_size = 0x00800000; - static uint32_t cs0val = 0xffffffff; - static uint32_t cs1val = 0x0000e1a0; - static uint32_t cs2val = 0x0000e1a0; - static uint32_t cs3val = 0xe1a0e1a0; - int rom_size, rom_loaded = 0; - MachineClass *mc = MACHINE_GET_CLASS(machine); - MemoryRegion *flash = g_new(MemoryRegion, 1); - MemoryRegion *cs = g_new(MemoryRegion, 4); - - if (machine->ram_size != mc->default_ram_size) { - char *sz = size_to_str(mc->default_ram_size); - error_report("Invalid RAM size, should be %s", sz); - g_free(sz); - exit(EXIT_FAILURE); - } - - memory_region_add_subregion(address_space_mem, OMAP_EMIFF_BASE, - machine->ram); - - mpu = omap310_mpu_init(machine->ram, machine->cpu_type); - - /* External Flash (EMIFS) */ - memory_region_init_rom(flash, NULL, "palmte.flash", flash_size, - &error_fatal); - memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE, flash); - - memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, "palmte-cs0", - OMAP_CS0_SIZE - flash_size); - memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE + flash_size, - &cs[0]); - memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, "palmte-cs1", - OMAP_CS1_SIZE); - memory_region_add_subregion(address_space_mem, OMAP_CS1_BASE, &cs[1]); - memory_region_init_io(&cs[2], NULL, &static_ops, &cs2val, "palmte-cs2", - OMAP_CS2_SIZE); - memory_region_add_subregion(address_space_mem, OMAP_CS2_BASE, &cs[2]); - memory_region_init_io(&cs[3], NULL, &static_ops, &cs3val, "palmte-cs3", - OMAP_CS3_SIZE); - memory_region_add_subregion(address_space_mem, OMAP_CS3_BASE, &cs[3]); - - palmte_microwire_setup(mpu); - - qemu_add_kbd_event_handler(palmte_button_event, mpu); - - palmte_gpio_setup(mpu); - - /* Setup initial (reset) machine state */ - if (nb_option_roms) { - rom_size = get_image_size(option_rom[0].name); - if (rom_size > flash_size) { - fprintf(stderr, "%s: ROM image too big (%x > %x)\n", - __func__, rom_size, flash_size); - rom_size = 0; - } - if (rom_size > 0) { - rom_size = load_image_targphys(option_rom[0].name, OMAP_CS0_BASE, - flash_size); - rom_loaded = 1; - } - if (rom_size < 0) { - fprintf(stderr, "%s: error loading '%s'\n", - __func__, option_rom[0].name); - } - } - - if (!rom_loaded && !machine->kernel_filename && !qtest_enabled()) { - fprintf(stderr, "Kernel or ROM image must be specified\n"); - exit(1); - } - - /* Load the kernel. */ - arm_load_kernel(mpu->cpu, machine, &palmte_binfo); -} - -static void palmte_machine_init(MachineClass *mc) -{ - mc->desc = "Palm Tungsten|E aka. Cheetah PDA (OMAP310)"; - mc->init = palmte_init; - mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); - mc->default_ram_size = 0x02000000; - mc->default_ram_id = "omap1.dram"; - mc->deprecation_reason = "machine is old and unmaintained"; - - machine_add_audiodev_property(mc); -} - -DEFINE_MACHINE("cheetah", palmte_machine_init) - -static void palm_register_types(void) -{ - type_register_static(&palm_misc_gpio_info); -} - -type_init(palm_register_types) diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c deleted file mode 100644 index 6b2e54473b3..00000000000 --- a/hw/arm/pxa2xx.c +++ /dev/null @@ -1,2393 +0,0 @@ -/* - * Intel XScale PXA255/270 processor support. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPL. - */ - -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "qapi/error.h" -#include "exec/address-spaces.h" -#include "cpu.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "hw/arm/pxa.h" -#include "sysemu/sysemu.h" -#include "hw/char/serial.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "hw/ssi/ssi.h" -#include "hw/sd/sd.h" -#include "chardev/char-fe.h" -#include "sysemu/blockdev.h" -#include "sysemu/qtest.h" -#include "sysemu/rtc.h" -#include "qemu/cutils.h" -#include "qemu/log.h" -#include "qom/object.h" -#include "target/arm/cpregs.h" - -static struct { - hwaddr io_base; - int irqn; -} pxa255_serial[] = { - { 0x40100000, PXA2XX_PIC_FFUART }, - { 0x40200000, PXA2XX_PIC_BTUART }, - { 0x40700000, PXA2XX_PIC_STUART }, - { 0x41600000, PXA25X_PIC_HWUART }, - { 0, 0 } -}, pxa270_serial[] = { - { 0x40100000, PXA2XX_PIC_FFUART }, - { 0x40200000, PXA2XX_PIC_BTUART }, - { 0x40700000, PXA2XX_PIC_STUART }, - { 0, 0 } -}; - -typedef struct PXASSPDef { - hwaddr io_base; - int irqn; -} PXASSPDef; - -#if 0 -static PXASSPDef pxa250_ssp[] = { - { 0x41000000, PXA2XX_PIC_SSP }, - { 0, 0 } -}; -#endif - -static PXASSPDef pxa255_ssp[] = { - { 0x41000000, PXA2XX_PIC_SSP }, - { 0x41400000, PXA25X_PIC_NSSP }, - { 0, 0 } -}; - -#if 0 -static PXASSPDef pxa26x_ssp[] = { - { 0x41000000, PXA2XX_PIC_SSP }, - { 0x41400000, PXA25X_PIC_NSSP }, - { 0x41500000, PXA26X_PIC_ASSP }, - { 0, 0 } -}; -#endif - -static PXASSPDef pxa27x_ssp[] = { - { 0x41000000, PXA2XX_PIC_SSP }, - { 0x41700000, PXA27X_PIC_SSP2 }, - { 0x41900000, PXA2XX_PIC_SSP3 }, - { 0, 0 } -}; - -#define PMCR 0x00 /* Power Manager Control register */ -#define PSSR 0x04 /* Power Manager Sleep Status register */ -#define PSPR 0x08 /* Power Manager Scratch-Pad register */ -#define PWER 0x0c /* Power Manager Wake-Up Enable register */ -#define PRER 0x10 /* Power Manager Rising-Edge Detect Enable register */ -#define PFER 0x14 /* Power Manager Falling-Edge Detect Enable register */ -#define PEDR 0x18 /* Power Manager Edge-Detect Status register */ -#define PCFR 0x1c /* Power Manager General Configuration register */ -#define PGSR0 0x20 /* Power Manager GPIO Sleep-State register 0 */ -#define PGSR1 0x24 /* Power Manager GPIO Sleep-State register 1 */ -#define PGSR2 0x28 /* Power Manager GPIO Sleep-State register 2 */ -#define PGSR3 0x2c /* Power Manager GPIO Sleep-State register 3 */ -#define RCSR 0x30 /* Reset Controller Status register */ -#define PSLR 0x34 /* Power Manager Sleep Configuration register */ -#define PTSR 0x38 /* Power Manager Standby Configuration register */ -#define PVCR 0x40 /* Power Manager Voltage Change Control register */ -#define PUCR 0x4c /* Power Manager USIM Card Control/Status register */ -#define PKWR 0x50 /* Power Manager Keyboard Wake-Up Enable register */ -#define PKSR 0x54 /* Power Manager Keyboard Level-Detect Status */ -#define PCMD0 0x80 /* Power Manager I2C Command register File 0 */ -#define PCMD31 0xfc /* Power Manager I2C Command register File 31 */ - -static uint64_t pxa2xx_pm_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case PMCR ... PCMD31: - if (addr & 3) - goto fail; - - return s->pm_regs[addr >> 2]; - default: - fail: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_pm_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case PMCR: - /* Clear the write-one-to-clear bits... */ - s->pm_regs[addr >> 2] &= ~(value & 0x2a); - /* ...and set the plain r/w bits */ - s->pm_regs[addr >> 2] &= ~0x15; - s->pm_regs[addr >> 2] |= value & 0x15; - break; - - case PSSR: /* Read-clean registers */ - case RCSR: - case PKSR: - s->pm_regs[addr >> 2] &= ~value; - break; - - default: /* Read-write registers */ - if (!(addr & 3)) { - s->pm_regs[addr >> 2] = value; - break; - } - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } -} - -static const MemoryRegionOps pxa2xx_pm_ops = { - .read = pxa2xx_pm_read, - .write = pxa2xx_pm_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_pm = { - .name = "pxa2xx_pm", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(pm_regs, PXA2xxState, 0x40), - VMSTATE_END_OF_LIST() - } -}; - -#define CCCR 0x00 /* Core Clock Configuration register */ -#define CKEN 0x04 /* Clock Enable register */ -#define OSCC 0x08 /* Oscillator Configuration register */ -#define CCSR 0x0c /* Core Clock Status register */ - -static uint64_t pxa2xx_cm_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case CCCR: - case CKEN: - case OSCC: - return s->cm_regs[addr >> 2]; - - case CCSR: - return s->cm_regs[CCCR >> 2] | (3 << 28); - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_cm_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case CCCR: - case CKEN: - s->cm_regs[addr >> 2] = value; - break; - - case OSCC: - s->cm_regs[addr >> 2] &= ~0x6c; - s->cm_regs[addr >> 2] |= value & 0x6e; - if ((value >> 1) & 1) /* OON */ - s->cm_regs[addr >> 2] |= 1 << 0; /* Oscillator is now stable */ - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } -} - -static const MemoryRegionOps pxa2xx_cm_ops = { - .read = pxa2xx_cm_read, - .write = pxa2xx_cm_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_cm = { - .name = "pxa2xx_cm", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(cm_regs, PXA2xxState, 4), - VMSTATE_UINT32(clkcfg, PXA2xxState), - VMSTATE_UINT32(pmnc, PXA2xxState), - VMSTATE_END_OF_LIST() - } -}; - -static uint64_t pxa2xx_clkcfg_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - return s->clkcfg; -} - -static void pxa2xx_clkcfg_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - s->clkcfg = value & 0xf; - if (value & 2) { - printf("%s: CPU frequency change attempt\n", __func__); - } -} - -static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - static const char *pwrmode[8] = { - "Normal", "Idle", "Deep-idle", "Standby", - "Sleep", "reserved (!)", "reserved (!)", "Deep-sleep", - }; - - if (value & 8) { - printf("%s: CPU voltage change attempt\n", __func__); - } - switch (value & 7) { - case 0: - /* Do nothing */ - break; - - case 1: - /* Idle */ - if (!(s->cm_regs[CCCR >> 2] & (1U << 31))) { /* CPDIS */ - cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); - break; - } - /* Fall through. */ - - case 2: - /* Deep-Idle */ - cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); - s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ - goto message; - - case 3: - s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC; - s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I; - s->cpu->env.cp15.sctlr_ns = 0; - s->cpu->env.cp15.cpacr_el1 = 0; - s->cpu->env.cp15.ttbr0_el[1] = 0; - s->cpu->env.cp15.dacr_ns = 0; - s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ - s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ - - /* - * The scratch-pad register is almost universally used - * for storing the return address on suspend. For the - * lack of a resuming bootloader, perform a jump - * directly to that address. - */ - memset(s->cpu->env.regs, 0, 4 * 15); - s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2]; - -#if 0 - buffer = 0xe59ff000; /* ldr pc, [pc, #0] */ - cpu_physical_memory_write(0, &buffer, 4); - buffer = s->pm_regs[PSPR >> 2]; - cpu_physical_memory_write(8, &buffer, 4); -#endif - - /* Suspend */ - cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT); - - goto message; - - default: - message: - printf("%s: machine entered %s mode\n", __func__, - pwrmode[value & 7]); - } -} - -static uint64_t pxa2xx_cppmnc_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - return s->pmnc; -} - -static void pxa2xx_cppmnc_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - s->pmnc = value; -} - -static uint64_t pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - PXA2xxState *s = (PXA2xxState *)ri->opaque; - if (s->pmnc & 1) { - return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - } else { - return 0; - } -} - -static const ARMCPRegInfo pxa_cp_reginfo[] = { - /* cp14 crm==1: perf registers */ - { .name = "CPPMNC", .cp = 14, .crn = 0, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_IO, - .readfn = pxa2xx_cppmnc_read, .writefn = pxa2xx_cppmnc_write }, - { .name = "CPCCNT", .cp = 14, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_IO, - .readfn = pxa2xx_cpccnt_read, .writefn = arm_cp_write_ignore }, - { .name = "CPINTEN", .cp = 14, .crn = 4, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPFLAG", .cp = 14, .crn = 5, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPEVTSEL", .cp = 14, .crn = 8, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* cp14 crm==2: performance count registers */ - { .name = "CPPMN0", .cp = 14, .crn = 0, .crm = 2, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPPMN1", .cp = 14, .crn = 1, .crm = 2, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPPMN2", .cp = 14, .crn = 2, .crm = 2, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "CPPMN3", .cp = 14, .crn = 2, .crm = 3, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* cp14 crn==6: CLKCFG */ - { .name = "CLKCFG", .cp = 14, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_IO, - .readfn = pxa2xx_clkcfg_read, .writefn = pxa2xx_clkcfg_write }, - /* cp14 crn==7: PWRMODE */ - { .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_IO, - .readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write }, -}; - -static void pxa2xx_setup_cp14(PXA2xxState *s) -{ - define_arm_cp_regs_with_opaque(s->cpu, pxa_cp_reginfo, s); -} - -#define MDCNFG 0x00 /* SDRAM Configuration register */ -#define MDREFR 0x04 /* SDRAM Refresh Control register */ -#define MSC0 0x08 /* Static Memory Control register 0 */ -#define MSC1 0x0c /* Static Memory Control register 1 */ -#define MSC2 0x10 /* Static Memory Control register 2 */ -#define MECR 0x14 /* Expansion Memory Bus Config register */ -#define SXCNFG 0x1c /* Synchronous Static Memory Config register */ -#define MCMEM0 0x28 /* PC Card Memory Socket 0 Timing register */ -#define MCMEM1 0x2c /* PC Card Memory Socket 1 Timing register */ -#define MCATT0 0x30 /* PC Card Attribute Socket 0 register */ -#define MCATT1 0x34 /* PC Card Attribute Socket 1 register */ -#define MCIO0 0x38 /* PC Card I/O Socket 0 Timing register */ -#define MCIO1 0x3c /* PC Card I/O Socket 1 Timing register */ -#define MDMRS 0x40 /* SDRAM Mode Register Set Config register */ -#define BOOT_DEF 0x44 /* Boot-time Default Configuration register */ -#define ARB_CNTL 0x48 /* Arbiter Control register */ -#define BSCNTR0 0x4c /* Memory Buffer Strength Control register 0 */ -#define BSCNTR1 0x50 /* Memory Buffer Strength Control register 1 */ -#define LCDBSCNTR 0x54 /* LCD Buffer Strength Control register */ -#define MDMRSLP 0x58 /* Low Power SDRAM Mode Set Config register */ -#define BSCNTR2 0x5c /* Memory Buffer Strength Control register 2 */ -#define BSCNTR3 0x60 /* Memory Buffer Strength Control register 3 */ -#define SA1110 0x64 /* SA-1110 Memory Compatibility register */ - -static uint64_t pxa2xx_mm_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case MDCNFG ... SA1110: - if ((addr & 3) == 0) - return s->mm_regs[addr >> 2]; - /* fall through */ - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_mm_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - switch (addr) { - case MDCNFG ... SA1110: - if ((addr & 3) == 0) { - s->mm_regs[addr >> 2] = value; - break; - } - /* fallthrough */ - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } -} - -static const MemoryRegionOps pxa2xx_mm_ops = { - .read = pxa2xx_mm_read, - .write = pxa2xx_mm_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_mm = { - .name = "pxa2xx_mm", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(mm_regs, PXA2xxState, 0x1a), - VMSTATE_END_OF_LIST() - } -}; - -#define TYPE_PXA2XX_SSP "pxa2xx-ssp" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxSSPState, PXA2XX_SSP) - -/* Synchronous Serial Ports */ -struct PXA2xxSSPState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - qemu_irq irq; - uint32_t enable; - SSIBus *bus; - - uint32_t sscr[2]; - uint32_t sspsp; - uint32_t ssto; - uint32_t ssitr; - uint32_t sssr; - uint8_t sstsa; - uint8_t ssrsa; - uint8_t ssacd; - - uint32_t rx_fifo[16]; - uint32_t rx_level; - uint32_t rx_start; -}; - -static bool pxa2xx_ssp_vmstate_validate(void *opaque, int version_id) -{ - PXA2xxSSPState *s = opaque; - - return s->rx_start < sizeof(s->rx_fifo); -} - -static const VMStateDescription vmstate_pxa2xx_ssp = { - .name = "pxa2xx-ssp", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(enable, PXA2xxSSPState), - VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2), - VMSTATE_UINT32(sspsp, PXA2xxSSPState), - VMSTATE_UINT32(ssto, PXA2xxSSPState), - VMSTATE_UINT32(ssitr, PXA2xxSSPState), - VMSTATE_UINT32(sssr, PXA2xxSSPState), - VMSTATE_UINT8(sstsa, PXA2xxSSPState), - VMSTATE_UINT8(ssrsa, PXA2xxSSPState), - VMSTATE_UINT8(ssacd, PXA2xxSSPState), - VMSTATE_UINT32(rx_level, PXA2xxSSPState), - VMSTATE_UINT32(rx_start, PXA2xxSSPState), - VMSTATE_VALIDATE("fifo is 16 bytes", pxa2xx_ssp_vmstate_validate), - VMSTATE_UINT32_ARRAY(rx_fifo, PXA2xxSSPState, 16), - VMSTATE_END_OF_LIST() - } -}; - -#define SSCR0 0x00 /* SSP Control register 0 */ -#define SSCR1 0x04 /* SSP Control register 1 */ -#define SSSR 0x08 /* SSP Status register */ -#define SSITR 0x0c /* SSP Interrupt Test register */ -#define SSDR 0x10 /* SSP Data register */ -#define SSTO 0x28 /* SSP Time-Out register */ -#define SSPSP 0x2c /* SSP Programmable Serial Protocol register */ -#define SSTSA 0x30 /* SSP TX Time Slot Active register */ -#define SSRSA 0x34 /* SSP RX Time Slot Active register */ -#define SSTSS 0x38 /* SSP Time Slot Status register */ -#define SSACD 0x3c /* SSP Audio Clock Divider register */ - -/* Bitfields for above registers */ -#define SSCR0_SPI(x) (((x) & 0x30) == 0x00) -#define SSCR0_SSP(x) (((x) & 0x30) == 0x10) -#define SSCR0_UWIRE(x) (((x) & 0x30) == 0x20) -#define SSCR0_PSP(x) (((x) & 0x30) == 0x30) -#define SSCR0_SSE (1 << 7) -#define SSCR0_RIM (1 << 22) -#define SSCR0_TIM (1 << 23) -#define SSCR0_MOD (1U << 31) -#define SSCR0_DSS(x) (((((x) >> 16) & 0x10) | ((x) & 0xf)) + 1) -#define SSCR1_RIE (1 << 0) -#define SSCR1_TIE (1 << 1) -#define SSCR1_LBM (1 << 2) -#define SSCR1_MWDS (1 << 5) -#define SSCR1_TFT(x) ((((x) >> 6) & 0xf) + 1) -#define SSCR1_RFT(x) ((((x) >> 10) & 0xf) + 1) -#define SSCR1_EFWR (1 << 14) -#define SSCR1_PINTE (1 << 18) -#define SSCR1_TINTE (1 << 19) -#define SSCR1_RSRE (1 << 20) -#define SSCR1_TSRE (1 << 21) -#define SSCR1_EBCEI (1 << 29) -#define SSITR_INT (7 << 5) -#define SSSR_TNF (1 << 2) -#define SSSR_RNE (1 << 3) -#define SSSR_TFS (1 << 5) -#define SSSR_RFS (1 << 6) -#define SSSR_ROR (1 << 7) -#define SSSR_PINT (1 << 18) -#define SSSR_TINT (1 << 19) -#define SSSR_EOC (1 << 20) -#define SSSR_TUR (1 << 21) -#define SSSR_BCE (1 << 23) -#define SSSR_RW 0x00bc0080 - -static void pxa2xx_ssp_int_update(PXA2xxSSPState *s) -{ - int level = 0; - - level |= s->ssitr & SSITR_INT; - level |= (s->sssr & SSSR_BCE) && (s->sscr[1] & SSCR1_EBCEI); - level |= (s->sssr & SSSR_TUR) && !(s->sscr[0] & SSCR0_TIM); - level |= (s->sssr & SSSR_EOC) && (s->sssr & (SSSR_TINT | SSSR_PINT)); - level |= (s->sssr & SSSR_TINT) && (s->sscr[1] & SSCR1_TINTE); - level |= (s->sssr & SSSR_PINT) && (s->sscr[1] & SSCR1_PINTE); - level |= (s->sssr & SSSR_ROR) && !(s->sscr[0] & SSCR0_RIM); - level |= (s->sssr & SSSR_RFS) && (s->sscr[1] & SSCR1_RIE); - level |= (s->sssr & SSSR_TFS) && (s->sscr[1] & SSCR1_TIE); - qemu_set_irq(s->irq, !!level); -} - -static void pxa2xx_ssp_fifo_update(PXA2xxSSPState *s) -{ - s->sssr &= ~(0xf << 12); /* Clear RFL */ - s->sssr &= ~(0xf << 8); /* Clear TFL */ - s->sssr &= ~SSSR_TFS; - s->sssr &= ~SSSR_TNF; - if (s->enable) { - s->sssr |= ((s->rx_level - 1) & 0xf) << 12; - if (s->rx_level >= SSCR1_RFT(s->sscr[1])) - s->sssr |= SSSR_RFS; - else - s->sssr &= ~SSSR_RFS; - if (s->rx_level) - s->sssr |= SSSR_RNE; - else - s->sssr &= ~SSSR_RNE; - /* TX FIFO is never filled, so it is always in underrun - condition if SSP is enabled */ - s->sssr |= SSSR_TFS; - s->sssr |= SSSR_TNF; - } - - pxa2xx_ssp_int_update(s); -} - -static uint64_t pxa2xx_ssp_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxSSPState *s = (PXA2xxSSPState *) opaque; - uint32_t retval; - - switch (addr) { - case SSCR0: - return s->sscr[0]; - case SSCR1: - return s->sscr[1]; - case SSPSP: - return s->sspsp; - case SSTO: - return s->ssto; - case SSITR: - return s->ssitr; - case SSSR: - return s->sssr | s->ssitr; - case SSDR: - if (!s->enable) - return 0xffffffff; - if (s->rx_level < 1) { - printf("%s: SSP Rx Underrun\n", __func__); - return 0xffffffff; - } - s->rx_level --; - retval = s->rx_fifo[s->rx_start ++]; - s->rx_start &= 0xf; - pxa2xx_ssp_fifo_update(s); - return retval; - case SSTSA: - return s->sstsa; - case SSRSA: - return s->ssrsa; - case SSTSS: - return 0; - case SSACD: - return s->ssacd; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_ssp_write(void *opaque, hwaddr addr, - uint64_t value64, unsigned size) -{ - PXA2xxSSPState *s = (PXA2xxSSPState *) opaque; - uint32_t value = value64; - - switch (addr) { - case SSCR0: - s->sscr[0] = value & 0xc7ffffff; - s->enable = value & SSCR0_SSE; - if (value & SSCR0_MOD) - printf("%s: Attempt to use network mode\n", __func__); - if (s->enable && SSCR0_DSS(value) < 4) - printf("%s: Wrong data size: %u bits\n", __func__, - SSCR0_DSS(value)); - if (!(value & SSCR0_SSE)) { - s->sssr = 0; - s->ssitr = 0; - s->rx_level = 0; - } - pxa2xx_ssp_fifo_update(s); - break; - - case SSCR1: - s->sscr[1] = value; - if (value & (SSCR1_LBM | SSCR1_EFWR)) - printf("%s: Attempt to use SSP test mode\n", __func__); - pxa2xx_ssp_fifo_update(s); - break; - - case SSPSP: - s->sspsp = value; - break; - - case SSTO: - s->ssto = value; - break; - - case SSITR: - s->ssitr = value & SSITR_INT; - pxa2xx_ssp_int_update(s); - break; - - case SSSR: - s->sssr &= ~(value & SSSR_RW); - pxa2xx_ssp_int_update(s); - break; - - case SSDR: - if (SSCR0_UWIRE(s->sscr[0])) { - if (s->sscr[1] & SSCR1_MWDS) - value &= 0xffff; - else - value &= 0xff; - } else - /* Note how 32bits overflow does no harm here */ - value &= (1 << SSCR0_DSS(s->sscr[0])) - 1; - - /* Data goes from here to the Tx FIFO and is shifted out from - * there directly to the slave, no need to buffer it. - */ - if (s->enable) { - uint32_t readval; - readval = ssi_transfer(s->bus, value); - if (s->rx_level < 0x10) { - s->rx_fifo[(s->rx_start + s->rx_level ++) & 0xf] = readval; - } else { - s->sssr |= SSSR_ROR; - } - } - pxa2xx_ssp_fifo_update(s); - break; - - case SSTSA: - s->sstsa = value; - break; - - case SSRSA: - s->ssrsa = value; - break; - - case SSACD: - s->ssacd = value; - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } -} - -static const MemoryRegionOps pxa2xx_ssp_ops = { - .read = pxa2xx_ssp_read, - .write = pxa2xx_ssp_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void pxa2xx_ssp_reset(DeviceState *d) -{ - PXA2xxSSPState *s = PXA2XX_SSP(d); - - s->enable = 0; - s->sscr[0] = s->sscr[1] = 0; - s->sspsp = 0; - s->ssto = 0; - s->ssitr = 0; - s->sssr = 0; - s->sstsa = 0; - s->ssrsa = 0; - s->ssacd = 0; - s->rx_start = s->rx_level = 0; -} - -static void pxa2xx_ssp_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - PXA2xxSSPState *s = PXA2XX_SSP(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - sysbus_init_irq(sbd, &s->irq); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_ssp_ops, s, - "pxa2xx-ssp", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - - s->bus = ssi_create_bus(dev, "ssi"); -} - -/* Real-Time Clock */ -#define RCNR 0x00 /* RTC Counter register */ -#define RTAR 0x04 /* RTC Alarm register */ -#define RTSR 0x08 /* RTC Status register */ -#define RTTR 0x0c /* RTC Timer Trim register */ -#define RDCR 0x10 /* RTC Day Counter register */ -#define RYCR 0x14 /* RTC Year Counter register */ -#define RDAR1 0x18 /* RTC Wristwatch Day Alarm register 1 */ -#define RYAR1 0x1c /* RTC Wristwatch Year Alarm register 1 */ -#define RDAR2 0x20 /* RTC Wristwatch Day Alarm register 2 */ -#define RYAR2 0x24 /* RTC Wristwatch Year Alarm register 2 */ -#define SWCR 0x28 /* RTC Stopwatch Counter register */ -#define SWAR1 0x2c /* RTC Stopwatch Alarm register 1 */ -#define SWAR2 0x30 /* RTC Stopwatch Alarm register 2 */ -#define RTCPICR 0x34 /* RTC Periodic Interrupt Counter register */ -#define PIAR 0x38 /* RTC Periodic Interrupt Alarm register */ - -#define TYPE_PXA2XX_RTC "pxa2xx_rtc" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxRTCState, PXA2XX_RTC) - -struct PXA2xxRTCState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - uint32_t rttr; - uint32_t rtsr; - uint32_t rtar; - uint32_t rdar1; - uint32_t rdar2; - uint32_t ryar1; - uint32_t ryar2; - uint32_t swar1; - uint32_t swar2; - uint32_t piar; - uint32_t last_rcnr; - uint32_t last_rdcr; - uint32_t last_rycr; - uint32_t last_swcr; - uint32_t last_rtcpicr; - int64_t last_hz; - int64_t last_sw; - int64_t last_pi; - QEMUTimer *rtc_hz; - QEMUTimer *rtc_rdal1; - QEMUTimer *rtc_rdal2; - QEMUTimer *rtc_swal1; - QEMUTimer *rtc_swal2; - QEMUTimer *rtc_pi; - qemu_irq rtc_irq; -}; - -static inline void pxa2xx_rtc_int_update(PXA2xxRTCState *s) -{ - qemu_set_irq(s->rtc_irq, !!(s->rtsr & 0x2553)); -} - -static void pxa2xx_rtc_hzupdate(PXA2xxRTCState *s) -{ - int64_t rt = qemu_clock_get_ms(rtc_clock); - s->last_rcnr += ((rt - s->last_hz) << 15) / - (1000 * ((s->rttr & 0xffff) + 1)); - s->last_rdcr += ((rt - s->last_hz) << 15) / - (1000 * ((s->rttr & 0xffff) + 1)); - s->last_hz = rt; -} - -static void pxa2xx_rtc_swupdate(PXA2xxRTCState *s) -{ - int64_t rt = qemu_clock_get_ms(rtc_clock); - if (s->rtsr & (1 << 12)) - s->last_swcr += (rt - s->last_sw) / 10; - s->last_sw = rt; -} - -static void pxa2xx_rtc_piupdate(PXA2xxRTCState *s) -{ - int64_t rt = qemu_clock_get_ms(rtc_clock); - if (s->rtsr & (1 << 15)) - s->last_swcr += rt - s->last_pi; - s->last_pi = rt; -} - -static inline void pxa2xx_rtc_alarm_update(PXA2xxRTCState *s, - uint32_t rtsr) -{ - if ((rtsr & (1 << 2)) && !(rtsr & (1 << 0))) - timer_mod(s->rtc_hz, s->last_hz + - (((s->rtar - s->last_rcnr) * 1000 * - ((s->rttr & 0xffff) + 1)) >> 15)); - else - timer_del(s->rtc_hz); - - if ((rtsr & (1 << 5)) && !(rtsr & (1 << 4))) - timer_mod(s->rtc_rdal1, s->last_hz + - (((s->rdar1 - s->last_rdcr) * 1000 * - ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */ - else - timer_del(s->rtc_rdal1); - - if ((rtsr & (1 << 7)) && !(rtsr & (1 << 6))) - timer_mod(s->rtc_rdal2, s->last_hz + - (((s->rdar2 - s->last_rdcr) * 1000 * - ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */ - else - timer_del(s->rtc_rdal2); - - if ((rtsr & 0x1200) == 0x1200 && !(rtsr & (1 << 8))) - timer_mod(s->rtc_swal1, s->last_sw + - (s->swar1 - s->last_swcr) * 10); /* TODO: fixup */ - else - timer_del(s->rtc_swal1); - - if ((rtsr & 0x1800) == 0x1800 && !(rtsr & (1 << 10))) - timer_mod(s->rtc_swal2, s->last_sw + - (s->swar2 - s->last_swcr) * 10); /* TODO: fixup */ - else - timer_del(s->rtc_swal2); - - if ((rtsr & 0xc000) == 0xc000 && !(rtsr & (1 << 13))) - timer_mod(s->rtc_pi, s->last_pi + - (s->piar & 0xffff) - s->last_rtcpicr); - else - timer_del(s->rtc_pi); -} - -static inline void pxa2xx_rtc_hz_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 0); - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static inline void pxa2xx_rtc_rdal1_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 4); - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static inline void pxa2xx_rtc_rdal2_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 6); - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static inline void pxa2xx_rtc_swal1_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 8); - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static inline void pxa2xx_rtc_swal2_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 10); - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static inline void pxa2xx_rtc_pi_tick(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - s->rtsr |= (1 << 13); - pxa2xx_rtc_piupdate(s); - s->last_rtcpicr = 0; - pxa2xx_rtc_alarm_update(s, s->rtsr); - pxa2xx_rtc_int_update(s); -} - -static uint64_t pxa2xx_rtc_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - - switch (addr) { - case RTTR: - return s->rttr; - case RTSR: - return s->rtsr; - case RTAR: - return s->rtar; - case RDAR1: - return s->rdar1; - case RDAR2: - return s->rdar2; - case RYAR1: - return s->ryar1; - case RYAR2: - return s->ryar2; - case SWAR1: - return s->swar1; - case SWAR2: - return s->swar2; - case PIAR: - return s->piar; - case RCNR: - return s->last_rcnr + - ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / - (1000 * ((s->rttr & 0xffff) + 1)); - case RDCR: - return s->last_rdcr + - ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / - (1000 * ((s->rttr & 0xffff) + 1)); - case RYCR: - return s->last_rycr; - case SWCR: - if (s->rtsr & (1 << 12)) - return s->last_swcr + - (qemu_clock_get_ms(rtc_clock) - s->last_sw) / 10; - else - return s->last_swcr; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_rtc_write(void *opaque, hwaddr addr, - uint64_t value64, unsigned size) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - uint32_t value = value64; - - switch (addr) { - case RTTR: - if (!(s->rttr & (1U << 31))) { - pxa2xx_rtc_hzupdate(s); - s->rttr = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - } - break; - - case RTSR: - if ((s->rtsr ^ value) & (1 << 15)) - pxa2xx_rtc_piupdate(s); - - if ((s->rtsr ^ value) & (1 << 12)) - pxa2xx_rtc_swupdate(s); - - if (((s->rtsr ^ value) & 0x4aac) | (value & ~0xdaac)) - pxa2xx_rtc_alarm_update(s, value); - - s->rtsr = (value & 0xdaac) | (s->rtsr & ~(value & ~0xdaac)); - pxa2xx_rtc_int_update(s); - break; - - case RTAR: - s->rtar = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RDAR1: - s->rdar1 = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RDAR2: - s->rdar2 = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RYAR1: - s->ryar1 = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RYAR2: - s->ryar2 = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case SWAR1: - pxa2xx_rtc_swupdate(s); - s->swar1 = value; - s->last_swcr = 0; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case SWAR2: - s->swar2 = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case PIAR: - s->piar = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RCNR: - pxa2xx_rtc_hzupdate(s); - s->last_rcnr = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RDCR: - pxa2xx_rtc_hzupdate(s); - s->last_rdcr = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RYCR: - s->last_rycr = value; - break; - - case SWCR: - pxa2xx_rtc_swupdate(s); - s->last_swcr = value; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - case RTCPICR: - pxa2xx_rtc_piupdate(s); - s->last_rtcpicr = value & 0xffff; - pxa2xx_rtc_alarm_update(s, s->rtsr); - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - } -} - -static const MemoryRegionOps pxa2xx_rtc_ops = { - .read = pxa2xx_rtc_read, - .write = pxa2xx_rtc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void pxa2xx_rtc_init(Object *obj) -{ - PXA2xxRTCState *s = PXA2XX_RTC(obj); - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - struct tm tm; - int wom; - - s->rttr = 0x7fff; - s->rtsr = 0; - - qemu_get_timedate(&tm, 0); - wom = ((tm.tm_mday - 1) / 7) + 1; - - s->last_rcnr = (uint32_t) mktimegm(&tm); - s->last_rdcr = (wom << 20) | ((tm.tm_wday + 1) << 17) | - (tm.tm_hour << 12) | (tm.tm_min << 6) | tm.tm_sec; - s->last_rycr = ((tm.tm_year + 1900) << 9) | - ((tm.tm_mon + 1) << 5) | tm.tm_mday; - s->last_swcr = (tm.tm_hour << 19) | - (tm.tm_min << 13) | (tm.tm_sec << 7); - s->last_rtcpicr = 0; - s->last_hz = s->last_sw = s->last_pi = qemu_clock_get_ms(rtc_clock); - - sysbus_init_irq(dev, &s->rtc_irq); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_rtc_ops, s, - "pxa2xx-rtc", 0x10000); - sysbus_init_mmio(dev, &s->iomem); -} - -static void pxa2xx_rtc_realize(DeviceState *dev, Error **errp) -{ - PXA2xxRTCState *s = PXA2XX_RTC(dev); - s->rtc_hz = timer_new_ms(rtc_clock, pxa2xx_rtc_hz_tick, s); - s->rtc_rdal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal1_tick, s); - s->rtc_rdal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal2_tick, s); - s->rtc_swal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal1_tick, s); - s->rtc_swal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal2_tick, s); - s->rtc_pi = timer_new_ms(rtc_clock, pxa2xx_rtc_pi_tick, s); -} - -static int pxa2xx_rtc_pre_save(void *opaque) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - - pxa2xx_rtc_hzupdate(s); - pxa2xx_rtc_piupdate(s); - pxa2xx_rtc_swupdate(s); - - return 0; -} - -static int pxa2xx_rtc_post_load(void *opaque, int version_id) -{ - PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; - - pxa2xx_rtc_alarm_update(s, s->rtsr); - - return 0; -} - -static const VMStateDescription vmstate_pxa2xx_rtc_regs = { - .name = "pxa2xx_rtc", - .version_id = 0, - .minimum_version_id = 0, - .pre_save = pxa2xx_rtc_pre_save, - .post_load = pxa2xx_rtc_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(rttr, PXA2xxRTCState), - VMSTATE_UINT32(rtsr, PXA2xxRTCState), - VMSTATE_UINT32(rtar, PXA2xxRTCState), - VMSTATE_UINT32(rdar1, PXA2xxRTCState), - VMSTATE_UINT32(rdar2, PXA2xxRTCState), - VMSTATE_UINT32(ryar1, PXA2xxRTCState), - VMSTATE_UINT32(ryar2, PXA2xxRTCState), - VMSTATE_UINT32(swar1, PXA2xxRTCState), - VMSTATE_UINT32(swar2, PXA2xxRTCState), - VMSTATE_UINT32(piar, PXA2xxRTCState), - VMSTATE_UINT32(last_rcnr, PXA2xxRTCState), - VMSTATE_UINT32(last_rdcr, PXA2xxRTCState), - VMSTATE_UINT32(last_rycr, PXA2xxRTCState), - VMSTATE_UINT32(last_swcr, PXA2xxRTCState), - VMSTATE_UINT32(last_rtcpicr, PXA2xxRTCState), - VMSTATE_INT64(last_hz, PXA2xxRTCState), - VMSTATE_INT64(last_sw, PXA2xxRTCState), - VMSTATE_INT64(last_pi, PXA2xxRTCState), - VMSTATE_END_OF_LIST(), - }, -}; - -static void pxa2xx_rtc_sysbus_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "PXA2xx RTC Controller"; - dc->vmsd = &vmstate_pxa2xx_rtc_regs; - dc->realize = pxa2xx_rtc_realize; -} - -static const TypeInfo pxa2xx_rtc_sysbus_info = { - .name = TYPE_PXA2XX_RTC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxRTCState), - .instance_init = pxa2xx_rtc_init, - .class_init = pxa2xx_rtc_sysbus_class_init, -}; - -/* I2C Interface */ - -#define TYPE_PXA2XX_I2C_SLAVE "pxa2xx-i2c-slave" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CSlaveState, PXA2XX_I2C_SLAVE) - -struct PXA2xxI2CSlaveState { - I2CSlave parent_obj; - - PXA2xxI2CState *host; -}; - -struct PXA2xxI2CState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - PXA2xxI2CSlaveState *slave; - I2CBus *bus; - qemu_irq irq; - uint32_t offset; - uint32_t region_size; - - uint16_t control; - uint16_t status; - uint8_t ibmr; - uint8_t data; -}; - -#define IBMR 0x80 /* I2C Bus Monitor register */ -#define IDBR 0x88 /* I2C Data Buffer register */ -#define ICR 0x90 /* I2C Control register */ -#define ISR 0x98 /* I2C Status register */ -#define ISAR 0xa0 /* I2C Slave Address register */ - -static void pxa2xx_i2c_update(PXA2xxI2CState *s) -{ - uint16_t level = 0; - level |= s->status & s->control & (1 << 10); /* BED */ - level |= (s->status & (1 << 7)) && (s->control & (1 << 9)); /* IRF */ - level |= (s->status & (1 << 6)) && (s->control & (1 << 8)); /* ITE */ - level |= s->status & (1 << 9); /* SAD */ - qemu_set_irq(s->irq, !!level); -} - -/* These are only stubs now. */ -static int pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event) -{ - PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); - PXA2xxI2CState *s = slave->host; - - switch (event) { - case I2C_START_SEND: - s->status |= (1 << 9); /* set SAD */ - s->status &= ~(1 << 0); /* clear RWM */ - break; - case I2C_START_RECV: - s->status |= (1 << 9); /* set SAD */ - s->status |= 1 << 0; /* set RWM */ - break; - case I2C_FINISH: - s->status |= (1 << 4); /* set SSD */ - break; - case I2C_NACK: - s->status |= 1 << 1; /* set ACKNAK */ - break; - default: - return -1; - } - pxa2xx_i2c_update(s); - - return 0; -} - -static uint8_t pxa2xx_i2c_rx(I2CSlave *i2c) -{ - PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); - PXA2xxI2CState *s = slave->host; - - if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) { - return 0; - } - - if (s->status & (1 << 0)) { /* RWM */ - s->status |= 1 << 6; /* set ITE */ - } - pxa2xx_i2c_update(s); - - return s->data; -} - -static int pxa2xx_i2c_tx(I2CSlave *i2c, uint8_t data) -{ - PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); - PXA2xxI2CState *s = slave->host; - - if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) { - return 1; - } - - if (!(s->status & (1 << 0))) { /* RWM */ - s->status |= 1 << 7; /* set IRF */ - s->data = data; - } - pxa2xx_i2c_update(s); - - return 1; -} - -static uint64_t pxa2xx_i2c_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxI2CState *s = (PXA2xxI2CState *) opaque; - I2CSlave *slave; - - addr -= s->offset; - switch (addr) { - case ICR: - return s->control; - case ISR: - return s->status | (i2c_bus_busy(s->bus) << 2); - case ISAR: - slave = I2C_SLAVE(s->slave); - return slave->address; - case IDBR: - return s->data; - case IBMR: - if (s->status & (1 << 2)) - s->ibmr ^= 3; /* Fake SCL and SDA pin changes */ - else - s->ibmr = 0; - return s->ibmr; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_i2c_write(void *opaque, hwaddr addr, - uint64_t value64, unsigned size) -{ - PXA2xxI2CState *s = (PXA2xxI2CState *) opaque; - uint32_t value = value64; - int ack; - - addr -= s->offset; - switch (addr) { - case ICR: - s->control = value & 0xfff7; - if ((value & (1 << 3)) && (value & (1 << 6))) { /* TB and IUE */ - /* TODO: slave mode */ - if (value & (1 << 0)) { /* START condition */ - if (s->data & 1) - s->status |= 1 << 0; /* set RWM */ - else - s->status &= ~(1 << 0); /* clear RWM */ - ack = !i2c_start_transfer(s->bus, s->data >> 1, s->data & 1); - } else { - if (s->status & (1 << 0)) { /* RWM */ - s->data = i2c_recv(s->bus); - if (value & (1 << 2)) /* ACKNAK */ - i2c_nack(s->bus); - ack = 1; - } else - ack = !i2c_send(s->bus, s->data); - } - - if (value & (1 << 1)) /* STOP condition */ - i2c_end_transfer(s->bus); - - if (ack) { - if (value & (1 << 0)) /* START condition */ - s->status |= 1 << 6; /* set ITE */ - else - if (s->status & (1 << 0)) /* RWM */ - s->status |= 1 << 7; /* set IRF */ - else - s->status |= 1 << 6; /* set ITE */ - s->status &= ~(1 << 1); /* clear ACKNAK */ - } else { - s->status |= 1 << 6; /* set ITE */ - s->status |= 1 << 10; /* set BED */ - s->status |= 1 << 1; /* set ACKNAK */ - } - } - if (!(value & (1 << 3)) && (value & (1 << 6))) /* !TB and IUE */ - if (value & (1 << 4)) /* MA */ - i2c_end_transfer(s->bus); - pxa2xx_i2c_update(s); - break; - - case ISR: - s->status &= ~(value & 0x07f0); - pxa2xx_i2c_update(s); - break; - - case ISAR: - i2c_slave_set_address(I2C_SLAVE(s->slave), value & 0x7f); - break; - - case IDBR: - s->data = value & 0xff; - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - } -} - -static const MemoryRegionOps pxa2xx_i2c_ops = { - .read = pxa2xx_i2c_read, - .write = pxa2xx_i2c_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_i2c_slave = { - .name = "pxa2xx_i2c_slave", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_I2C_SLAVE(parent_obj, PXA2xxI2CSlaveState), - VMSTATE_END_OF_LIST() - } -}; - -static const VMStateDescription vmstate_pxa2xx_i2c = { - .name = "pxa2xx_i2c", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT16(control, PXA2xxI2CState), - VMSTATE_UINT16(status, PXA2xxI2CState), - VMSTATE_UINT8(ibmr, PXA2xxI2CState), - VMSTATE_UINT8(data, PXA2xxI2CState), - VMSTATE_STRUCT_POINTER(slave, PXA2xxI2CState, - vmstate_pxa2xx_i2c_slave, PXA2xxI2CSlaveState), - VMSTATE_END_OF_LIST() - } -}; - -static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data) -{ - I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - - k->event = pxa2xx_i2c_event; - k->recv = pxa2xx_i2c_rx; - k->send = pxa2xx_i2c_tx; -} - -static const TypeInfo pxa2xx_i2c_slave_info = { - .name = TYPE_PXA2XX_I2C_SLAVE, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(PXA2xxI2CSlaveState), - .class_init = pxa2xx_i2c_slave_class_init, -}; - -PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base, - qemu_irq irq, uint32_t region_size) -{ - DeviceState *dev; - SysBusDevice *i2c_dev; - PXA2xxI2CState *s; - I2CBus *i2cbus; - - dev = qdev_new(TYPE_PXA2XX_I2C); - qdev_prop_set_uint32(dev, "size", region_size + 1); - qdev_prop_set_uint32(dev, "offset", base & region_size); - - /* FIXME: Should the slave device really be on a separate bus? */ - i2cbus = i2c_init_bus(dev, "dummy"); - - i2c_dev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(i2c_dev, &error_fatal); - sysbus_mmio_map(i2c_dev, 0, base & ~region_size); - sysbus_connect_irq(i2c_dev, 0, irq); - - s = PXA2XX_I2C(i2c_dev); - s->slave = PXA2XX_I2C_SLAVE(i2c_slave_create_simple(i2cbus, - TYPE_PXA2XX_I2C_SLAVE, - 0)); - s->slave->host = s; - - return s; -} - -static void pxa2xx_i2c_initfn(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - PXA2xxI2CState *s = PXA2XX_I2C(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - s->bus = i2c_init_bus(dev, NULL); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_i2c_ops, s, - "pxa2xx-i2c", s->region_size); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); -} - -I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s) -{ - return s->bus; -} - -static Property pxa2xx_i2c_properties[] = { - DEFINE_PROP_UINT32("size", PXA2xxI2CState, region_size, 0x10000), - DEFINE_PROP_UINT32("offset", PXA2xxI2CState, offset, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa2xx_i2c_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "PXA2xx I2C Bus Controller"; - dc->vmsd = &vmstate_pxa2xx_i2c; - device_class_set_props(dc, pxa2xx_i2c_properties); -} - -static const TypeInfo pxa2xx_i2c_info = { - .name = TYPE_PXA2XX_I2C, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxI2CState), - .instance_init = pxa2xx_i2c_initfn, - .class_init = pxa2xx_i2c_class_init, -}; - -/* PXA Inter-IC Sound Controller */ -static void pxa2xx_i2s_reset(PXA2xxI2SState *i2s) -{ - i2s->rx_len = 0; - i2s->tx_len = 0; - i2s->fifo_len = 0; - i2s->clk = 0x1a; - i2s->control[0] = 0x00; - i2s->control[1] = 0x00; - i2s->status = 0x00; - i2s->mask = 0x00; -} - -#define SACR_TFTH(val) ((val >> 8) & 0xf) -#define SACR_RFTH(val) ((val >> 12) & 0xf) -#define SACR_DREC(val) (val & (1 << 3)) -#define SACR_DPRL(val) (val & (1 << 4)) - -static inline void pxa2xx_i2s_update(PXA2xxI2SState *i2s) -{ - int rfs, tfs; - rfs = SACR_RFTH(i2s->control[0]) < i2s->rx_len && - !SACR_DREC(i2s->control[1]); - tfs = (i2s->tx_len || i2s->fifo_len < SACR_TFTH(i2s->control[0])) && - i2s->enable && !SACR_DPRL(i2s->control[1]); - - qemu_set_irq(i2s->rx_dma, rfs); - qemu_set_irq(i2s->tx_dma, tfs); - - i2s->status &= 0xe0; - if (i2s->fifo_len < 16 || !i2s->enable) - i2s->status |= 1 << 0; /* TNF */ - if (i2s->rx_len) - i2s->status |= 1 << 1; /* RNE */ - if (i2s->enable) - i2s->status |= 1 << 2; /* BSY */ - if (tfs) - i2s->status |= 1 << 3; /* TFS */ - if (rfs) - i2s->status |= 1 << 4; /* RFS */ - if (!(i2s->tx_len && i2s->enable)) - i2s->status |= i2s->fifo_len << 8; /* TFL */ - i2s->status |= MAX(i2s->rx_len, 0xf) << 12; /* RFL */ - - qemu_set_irq(i2s->irq, i2s->status & i2s->mask); -} - -#define SACR0 0x00 /* Serial Audio Global Control register */ -#define SACR1 0x04 /* Serial Audio I2S/MSB-Justified Control register */ -#define SASR0 0x0c /* Serial Audio Interface and FIFO Status register */ -#define SAIMR 0x14 /* Serial Audio Interrupt Mask register */ -#define SAICR 0x18 /* Serial Audio Interrupt Clear register */ -#define SADIV 0x60 /* Serial Audio Clock Divider register */ -#define SADR 0x80 /* Serial Audio Data register */ - -static uint64_t pxa2xx_i2s_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; - - switch (addr) { - case SACR0: - return s->control[0]; - case SACR1: - return s->control[1]; - case SASR0: - return s->status; - case SAIMR: - return s->mask; - case SAICR: - return 0; - case SADIV: - return s->clk; - case SADR: - if (s->rx_len > 0) { - s->rx_len --; - pxa2xx_i2s_update(s); - return s->codec_in(s->opaque); - } - return 0; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_i2s_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; - uint32_t *sample; - - switch (addr) { - case SACR0: - if (value & (1 << 3)) /* RST */ - pxa2xx_i2s_reset(s); - s->control[0] = value & 0xff3d; - if (!s->enable && (value & 1) && s->tx_len) { /* ENB */ - for (sample = s->fifo; s->fifo_len > 0; s->fifo_len --, sample ++) - s->codec_out(s->opaque, *sample); - s->status &= ~(1 << 7); /* I2SOFF */ - } - if (value & (1 << 4)) /* EFWR */ - printf("%s: Attempt to use special function\n", __func__); - s->enable = (value & 9) == 1; /* ENB && !RST*/ - pxa2xx_i2s_update(s); - break; - case SACR1: - s->control[1] = value & 0x0039; - if (value & (1 << 5)) /* ENLBF */ - printf("%s: Attempt to use loopback function\n", __func__); - if (value & (1 << 4)) /* DPRL */ - s->fifo_len = 0; - pxa2xx_i2s_update(s); - break; - case SAIMR: - s->mask = value & 0x0078; - pxa2xx_i2s_update(s); - break; - case SAICR: - s->status &= ~(value & (3 << 5)); - pxa2xx_i2s_update(s); - break; - case SADIV: - s->clk = value & 0x007f; - break; - case SADR: - if (s->tx_len && s->enable) { - s->tx_len --; - pxa2xx_i2s_update(s); - s->codec_out(s->opaque, value); - } else if (s->fifo_len < 16) { - s->fifo[s->fifo_len ++] = value; - pxa2xx_i2s_update(s); - } - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - } -} - -static const MemoryRegionOps pxa2xx_i2s_ops = { - .read = pxa2xx_i2s_read, - .write = pxa2xx_i2s_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_i2s = { - .name = "pxa2xx_i2s", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(control, PXA2xxI2SState, 2), - VMSTATE_UINT32(status, PXA2xxI2SState), - VMSTATE_UINT32(mask, PXA2xxI2SState), - VMSTATE_UINT32(clk, PXA2xxI2SState), - VMSTATE_INT32(enable, PXA2xxI2SState), - VMSTATE_INT32(rx_len, PXA2xxI2SState), - VMSTATE_INT32(tx_len, PXA2xxI2SState), - VMSTATE_INT32(fifo_len, PXA2xxI2SState), - VMSTATE_END_OF_LIST() - } -}; - -static void pxa2xx_i2s_data_req(void *opaque, int tx, int rx) -{ - PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; - uint32_t *sample; - - /* Signal FIFO errors */ - if (s->enable && s->tx_len) - s->status |= 1 << 5; /* TUR */ - if (s->enable && s->rx_len) - s->status |= 1 << 6; /* ROR */ - - /* Should be tx - MIN(tx, s->fifo_len) but we don't really need to - * handle the cases where it makes a difference. */ - s->tx_len = tx - s->fifo_len; - s->rx_len = rx; - /* Note that is s->codec_out wasn't set, we wouldn't get called. */ - if (s->enable) - for (sample = s->fifo; s->fifo_len; s->fifo_len --, sample ++) - s->codec_out(s->opaque, *sample); - pxa2xx_i2s_update(s); -} - -static PXA2xxI2SState *pxa2xx_i2s_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) -{ - PXA2xxI2SState *s = g_new0(PXA2xxI2SState, 1); - - s->irq = irq; - s->rx_dma = rx_dma; - s->tx_dma = tx_dma; - s->data_req = pxa2xx_i2s_data_req; - - pxa2xx_i2s_reset(s); - - memory_region_init_io(&s->iomem, NULL, &pxa2xx_i2s_ops, s, - "pxa2xx-i2s", 0x100000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - vmstate_register(NULL, base, &vmstate_pxa2xx_i2s, s); - - return s; -} - -/* PXA Fast Infra-red Communications Port */ -struct PXA2xxFIrState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - qemu_irq irq; - qemu_irq rx_dma; - qemu_irq tx_dma; - uint32_t enable; - CharBackend chr; - - uint8_t control[3]; - uint8_t status[2]; - - uint32_t rx_len; - uint32_t rx_start; - uint8_t rx_fifo[64]; -}; - -static void pxa2xx_fir_reset(DeviceState *d) -{ - PXA2xxFIrState *s = PXA2XX_FIR(d); - - s->control[0] = 0x00; - s->control[1] = 0x00; - s->control[2] = 0x00; - s->status[0] = 0x00; - s->status[1] = 0x00; - s->enable = 0; -} - -static inline void pxa2xx_fir_update(PXA2xxFIrState *s) -{ - static const int tresh[4] = { 8, 16, 32, 0 }; - int intr = 0; - if ((s->control[0] & (1 << 4)) && /* RXE */ - s->rx_len >= tresh[s->control[2] & 3]) /* TRIG */ - s->status[0] |= 1 << 4; /* RFS */ - else - s->status[0] &= ~(1 << 4); /* RFS */ - if (s->control[0] & (1 << 3)) /* TXE */ - s->status[0] |= 1 << 3; /* TFS */ - else - s->status[0] &= ~(1 << 3); /* TFS */ - if (s->rx_len) - s->status[1] |= 1 << 2; /* RNE */ - else - s->status[1] &= ~(1 << 2); /* RNE */ - if (s->control[0] & (1 << 4)) /* RXE */ - s->status[1] |= 1 << 0; /* RSY */ - else - s->status[1] &= ~(1 << 0); /* RSY */ - - intr |= (s->control[0] & (1 << 5)) && /* RIE */ - (s->status[0] & (1 << 4)); /* RFS */ - intr |= (s->control[0] & (1 << 6)) && /* TIE */ - (s->status[0] & (1 << 3)); /* TFS */ - intr |= (s->control[2] & (1 << 4)) && /* TRAIL */ - (s->status[0] & (1 << 6)); /* EOC */ - intr |= (s->control[0] & (1 << 2)) && /* TUS */ - (s->status[0] & (1 << 1)); /* TUR */ - intr |= s->status[0] & 0x25; /* FRE, RAB, EIF */ - - qemu_set_irq(s->rx_dma, (s->status[0] >> 4) & 1); - qemu_set_irq(s->tx_dma, (s->status[0] >> 3) & 1); - - qemu_set_irq(s->irq, intr && s->enable); -} - -#define ICCR0 0x00 /* FICP Control register 0 */ -#define ICCR1 0x04 /* FICP Control register 1 */ -#define ICCR2 0x08 /* FICP Control register 2 */ -#define ICDR 0x0c /* FICP Data register */ -#define ICSR0 0x14 /* FICP Status register 0 */ -#define ICSR1 0x18 /* FICP Status register 1 */ -#define ICFOR 0x1c /* FICP FIFO Occupancy Status register */ - -static uint64_t pxa2xx_fir_read(void *opaque, hwaddr addr, - unsigned size) -{ - PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; - uint8_t ret; - - switch (addr) { - case ICCR0: - return s->control[0]; - case ICCR1: - return s->control[1]; - case ICCR2: - return s->control[2]; - case ICDR: - s->status[0] &= ~0x01; - s->status[1] &= ~0x72; - if (s->rx_len) { - s->rx_len --; - ret = s->rx_fifo[s->rx_start ++]; - s->rx_start &= 63; - pxa2xx_fir_update(s); - return ret; - } - printf("%s: Rx FIFO underrun.\n", __func__); - break; - case ICSR0: - return s->status[0]; - case ICSR1: - return s->status[1] | (1 << 3); /* TNF */ - case ICFOR: - return s->rx_len; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - break; - } - return 0; -} - -static void pxa2xx_fir_write(void *opaque, hwaddr addr, - uint64_t value64, unsigned size) -{ - PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; - uint32_t value = value64; - uint8_t ch; - - switch (addr) { - case ICCR0: - s->control[0] = value; - if (!(value & (1 << 4))) /* RXE */ - s->rx_len = s->rx_start = 0; - if (!(value & (1 << 3))) { /* TXE */ - /* Nop */ - } - s->enable = value & 1; /* ITR */ - if (!s->enable) - s->status[0] = 0; - pxa2xx_fir_update(s); - break; - case ICCR1: - s->control[1] = value; - break; - case ICCR2: - s->control[2] = value & 0x3f; - pxa2xx_fir_update(s); - break; - case ICDR: - if (s->control[2] & (1 << 2)) { /* TXP */ - ch = value; - } else { - ch = ~value; - } - if (s->enable && (s->control[0] & (1 << 3))) { /* TXE */ - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ - qemu_chr_fe_write_all(&s->chr, &ch, 1); - } - break; - case ICSR0: - s->status[0] &= ~(value & 0x66); - pxa2xx_fir_update(s); - break; - case ICFOR: - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, addr); - } -} - -static const MemoryRegionOps pxa2xx_fir_ops = { - .read = pxa2xx_fir_read, - .write = pxa2xx_fir_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static int pxa2xx_fir_is_empty(void *opaque) -{ - PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; - return (s->rx_len < 64); -} - -static void pxa2xx_fir_rx(void *opaque, const uint8_t *buf, int size) -{ - PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; - if (!(s->control[0] & (1 << 4))) /* RXE */ - return; - - while (size --) { - s->status[1] |= 1 << 4; /* EOF */ - if (s->rx_len >= 64) { - s->status[1] |= 1 << 6; /* ROR */ - break; - } - - if (s->control[2] & (1 << 3)) /* RXP */ - s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = *(buf ++); - else - s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = ~*(buf ++); - } - - pxa2xx_fir_update(s); -} - -static void pxa2xx_fir_event(void *opaque, QEMUChrEvent event) -{ -} - -static void pxa2xx_fir_instance_init(Object *obj) -{ - PXA2xxFIrState *s = PXA2XX_FIR(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_fir_ops, s, - "pxa2xx-fir", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); - sysbus_init_irq(sbd, &s->rx_dma); - sysbus_init_irq(sbd, &s->tx_dma); -} - -static void pxa2xx_fir_realize(DeviceState *dev, Error **errp) -{ - PXA2xxFIrState *s = PXA2XX_FIR(dev); - - qemu_chr_fe_set_handlers(&s->chr, pxa2xx_fir_is_empty, - pxa2xx_fir_rx, pxa2xx_fir_event, NULL, s, NULL, - true); -} - -static bool pxa2xx_fir_vmstate_validate(void *opaque, int version_id) -{ - PXA2xxFIrState *s = opaque; - - return s->rx_start < ARRAY_SIZE(s->rx_fifo); -} - -static const VMStateDescription pxa2xx_fir_vmsd = { - .name = "pxa2xx-fir", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(enable, PXA2xxFIrState), - VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3), - VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2), - VMSTATE_UINT32(rx_len, PXA2xxFIrState), - VMSTATE_UINT32(rx_start, PXA2xxFIrState), - VMSTATE_VALIDATE("fifo is 64 bytes", pxa2xx_fir_vmstate_validate), - VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxFIrState, 64), - VMSTATE_END_OF_LIST() - } -}; - -static Property pxa2xx_fir_properties[] = { - DEFINE_PROP_CHR("chardev", PXA2xxFIrState, chr), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa2xx_fir_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = pxa2xx_fir_realize; - dc->vmsd = &pxa2xx_fir_vmsd; - device_class_set_props(dc, pxa2xx_fir_properties); - dc->reset = pxa2xx_fir_reset; -} - -static const TypeInfo pxa2xx_fir_info = { - .name = TYPE_PXA2XX_FIR, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxFIrState), - .class_init = pxa2xx_fir_class_init, - .instance_init = pxa2xx_fir_instance_init, -}; - -static PXA2xxFIrState *pxa2xx_fir_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq, qemu_irq rx_dma, - qemu_irq tx_dma, - Chardev *chr) -{ - DeviceState *dev; - SysBusDevice *sbd; - - dev = qdev_new(TYPE_PXA2XX_FIR); - qdev_prop_set_chr(dev, "chardev", chr); - sbd = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(sbd, &error_fatal); - sysbus_mmio_map(sbd, 0, base); - sysbus_connect_irq(sbd, 0, irq); - sysbus_connect_irq(sbd, 1, rx_dma); - sysbus_connect_irq(sbd, 2, tx_dma); - return PXA2XX_FIR(dev); -} - -static void pxa2xx_reset(void *opaque, int line, int level) -{ - PXA2xxState *s = (PXA2xxState *) opaque; - - if (level && (s->pm_regs[PCFR >> 2] & 0x10)) { /* GPR_EN */ - cpu_reset(CPU(s->cpu)); - /* TODO: reset peripherals */ - } -} - -/* Initialise a PXA270 integrated chip (ARM based core). */ -PXA2xxState *pxa270_init(unsigned int sdram_size, const char *cpu_type) -{ - MemoryRegion *address_space = get_system_memory(); - PXA2xxState *s; - int i; - DriveInfo *dinfo; - s = g_new0(PXA2xxState, 1); - - if (strncmp(cpu_type, "pxa27", 5)) { - error_report("Machine requires a PXA27x processor"); - exit(1); - } - - s->cpu = ARM_CPU(cpu_create(cpu_type)); - s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0); - - /* SDRAM & Internal Memory Storage */ - memory_region_init_ram(&s->sdram, NULL, "pxa270.sdram", sdram_size, - &error_fatal); - memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram); - memory_region_init_ram(&s->internal, NULL, "pxa270.internal", 0x40000, - &error_fatal); - memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, - &s->internal); - - s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); - - s->dma = pxa27x_dma_init(0x40000000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); - - sysbus_create_varargs("pxa27x-timer", 0x40a00000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3), - qdev_get_gpio_in(s->pic, PXA27X_PIC_OST_4_11), - NULL); - - s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121); - - s->mmc = pxa2xx_mmci_init(address_space, 0x41100000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI)); - dinfo = drive_get(IF_SD, 0, 0); - if (dinfo) { - DeviceState *carddev; - - /* Create and plug in the sd card */ - carddev = qdev_new(TYPE_SD_CARD); - qdev_prop_set_drive_err(carddev, "drive", - blk_by_legacy_dinfo(dinfo), &error_fatal); - qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc), - "sd-bus"), - &error_fatal); - } else if (!qtest_enabled()) { - warn_report("missing SecureDigital device"); - } - - for (i = 0; pxa270_serial[i].io_base; i++) { - if (serial_hd(i)) { - serial_mm_init(address_space, pxa270_serial[i].io_base, 2, - qdev_get_gpio_in(s->pic, pxa270_serial[i].irqn), - 14857000 / 16, serial_hd(i), - DEVICE_NATIVE_ENDIAN); - } else { - break; - } - } - if (serial_hd(i)) - s->fir = pxa2xx_fir_init(address_space, 0x40800000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP), - serial_hd(i)); - - s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD)); - - s->cm_base = 0x41300000; - s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */ - s->clkcfg = 0x00000009; /* Turbo mode active */ - memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000); - memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); - - pxa2xx_setup_cp14(s); - - s->mm_base = 0x48000000; - s->mm_regs[MDMRS >> 2] = 0x00020002; - s->mm_regs[MDREFR >> 2] = 0x03ca4000; - s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ - memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000); - memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s); - - s->pm_base = 0x40f00000; - memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100); - memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s); - - for (i = 0; pxa27x_ssp[i].io_base; i ++); - s->ssp = g_new0(SSIBus *, i); - for (i = 0; pxa27x_ssp[i].io_base; i ++) { - DeviceState *dev; - dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa27x_ssp[i].io_base, - qdev_get_gpio_in(s->pic, pxa27x_ssp[i].irqn)); - s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi"); - } - - sysbus_create_simple("sysbus-ohci", 0x4c000000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1)); - - s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, - 0x20000000, NULL)); - s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, - 0x30000000, NULL)); - - sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); - - s->i2c[0] = pxa2xx_i2c_init(0x40301600, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff); - s->i2c[1] = pxa2xx_i2c_init(0x40f00100, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff); - - s->i2s = pxa2xx_i2s_init(address_space, 0x40400000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S)); - - s->kp = pxa27x_keypad_init(address_space, 0x41500000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_KEYPAD)); - - /* GPIO1 resets the processor */ - /* The handler can be overridden by board-specific code */ - qdev_connect_gpio_out(s->gpio, 1, s->reset); - return s; -} - -/* Initialise a PXA255 integrated chip (ARM based core). */ -PXA2xxState *pxa255_init(unsigned int sdram_size) -{ - MemoryRegion *address_space = get_system_memory(); - PXA2xxState *s; - int i; - DriveInfo *dinfo; - - s = g_new0(PXA2xxState, 1); - - s->cpu = ARM_CPU(cpu_create(ARM_CPU_TYPE_NAME("pxa255"))); - s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0); - - /* SDRAM & Internal Memory Storage */ - memory_region_init_ram(&s->sdram, NULL, "pxa255.sdram", sdram_size, - &error_fatal); - memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram); - memory_region_init_ram(&s->internal, NULL, "pxa255.internal", - PXA2XX_INTERNAL_SIZE, &error_fatal); - memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, - &s->internal); - - s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); - - s->dma = pxa255_dma_init(0x40000000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); - - sysbus_create_varargs("pxa25x-timer", 0x40a00000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2), - qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3), - NULL); - - s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85); - - s->mmc = pxa2xx_mmci_init(address_space, 0x41100000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI)); - dinfo = drive_get(IF_SD, 0, 0); - if (dinfo) { - DeviceState *carddev; - - /* Create and plug in the sd card */ - carddev = qdev_new(TYPE_SD_CARD); - qdev_prop_set_drive_err(carddev, "drive", - blk_by_legacy_dinfo(dinfo), &error_fatal); - qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc), - "sd-bus"), - &error_fatal); - } else if (!qtest_enabled()) { - warn_report("missing SecureDigital device"); - } - - for (i = 0; pxa255_serial[i].io_base; i++) { - if (serial_hd(i)) { - serial_mm_init(address_space, pxa255_serial[i].io_base, 2, - qdev_get_gpio_in(s->pic, pxa255_serial[i].irqn), - 14745600 / 16, serial_hd(i), - DEVICE_NATIVE_ENDIAN); - } else { - break; - } - } - if (serial_hd(i)) - s->fir = pxa2xx_fir_init(address_space, 0x40800000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP), - serial_hd(i)); - - s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD)); - - s->cm_base = 0x41300000; - s->cm_regs[CCCR >> 2] = 0x00000121; /* from datasheet */ - s->cm_regs[CKEN >> 2] = 0x00017def; /* from datasheet */ - - s->clkcfg = 0x00000009; /* Turbo mode active */ - memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000); - memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); - - pxa2xx_setup_cp14(s); - - s->mm_base = 0x48000000; - s->mm_regs[MDMRS >> 2] = 0x00020002; - s->mm_regs[MDREFR >> 2] = 0x03ca4000; - s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ - memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000); - memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s); - - s->pm_base = 0x40f00000; - memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100); - memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem); - vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s); - - for (i = 0; pxa255_ssp[i].io_base; i ++); - s->ssp = g_new0(SSIBus *, i); - for (i = 0; pxa255_ssp[i].io_base; i ++) { - DeviceState *dev; - dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa255_ssp[i].io_base, - qdev_get_gpio_in(s->pic, pxa255_ssp[i].irqn)); - s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi"); - } - - s->pcmcia[0] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, - 0x20000000, NULL)); - s->pcmcia[1] = PXA2XX_PCMCIA(sysbus_create_simple(TYPE_PXA2XX_PCMCIA, - 0x30000000, NULL)); - - sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); - - s->i2c[0] = pxa2xx_i2c_init(0x40301600, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff); - s->i2c[1] = pxa2xx_i2c_init(0x40f00100, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff); - - s->i2s = pxa2xx_i2s_init(address_space, 0x40400000, - qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S), - qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S), - qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S)); - - /* GPIO1 resets the processor */ - /* The handler can be overridden by board-specific code */ - qdev_connect_gpio_out(s->gpio, 1, s->reset); - return s; -} - -static void pxa2xx_ssp_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = pxa2xx_ssp_reset; - dc->vmsd = &vmstate_pxa2xx_ssp; -} - -static const TypeInfo pxa2xx_ssp_info = { - .name = TYPE_PXA2XX_SSP, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxSSPState), - .instance_init = pxa2xx_ssp_init, - .class_init = pxa2xx_ssp_class_init, -}; - -static void pxa2xx_register_types(void) -{ - type_register_static(&pxa2xx_i2c_slave_info); - type_register_static(&pxa2xx_ssp_info); - type_register_static(&pxa2xx_i2c_info); - type_register_static(&pxa2xx_rtc_sysbus_info); - type_register_static(&pxa2xx_fir_info); -} - -type_init(pxa2xx_register_types) diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c deleted file mode 100644 index 41dca036fbb..00000000000 --- a/hw/arm/pxa2xx_gpio.c +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Intel XScale PXA255/270 GPIO controller emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPL. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "hw/arm/pxa.h" -#include "qapi/error.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qom/object.h" - -#define PXA2XX_GPIO_BANKS 4 - -#define TYPE_PXA2XX_GPIO "pxa2xx-gpio" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxGPIOInfo, PXA2XX_GPIO) - -struct PXA2xxGPIOInfo { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - qemu_irq irq0, irq1, irqX; - int lines; - ARMCPU *cpu; - - /* XXX: GNU C vectors are more suitable */ - uint32_t ilevel[PXA2XX_GPIO_BANKS]; - uint32_t olevel[PXA2XX_GPIO_BANKS]; - uint32_t dir[PXA2XX_GPIO_BANKS]; - uint32_t rising[PXA2XX_GPIO_BANKS]; - uint32_t falling[PXA2XX_GPIO_BANKS]; - uint32_t status[PXA2XX_GPIO_BANKS]; - uint32_t gafr[PXA2XX_GPIO_BANKS * 2]; - - uint32_t prev_level[PXA2XX_GPIO_BANKS]; - qemu_irq handler[PXA2XX_GPIO_BANKS * 32]; - qemu_irq read_notify; -}; - -static struct { - enum { - GPIO_NONE, - GPLR, - GPSR, - GPCR, - GPDR, - GRER, - GFER, - GEDR, - GAFR_L, - GAFR_U, - } reg; - int bank; -} pxa2xx_gpio_regs[0x200] = { - [0 ... 0x1ff] = { GPIO_NONE, 0 }, -#define PXA2XX_REG(reg, a0, a1, a2, a3) \ - [a0] = { reg, 0 }, [a1] = { reg, 1 }, [a2] = { reg, 2 }, [a3] = { reg, 3 }, - - PXA2XX_REG(GPLR, 0x000, 0x004, 0x008, 0x100) - PXA2XX_REG(GPSR, 0x018, 0x01c, 0x020, 0x118) - PXA2XX_REG(GPCR, 0x024, 0x028, 0x02c, 0x124) - PXA2XX_REG(GPDR, 0x00c, 0x010, 0x014, 0x10c) - PXA2XX_REG(GRER, 0x030, 0x034, 0x038, 0x130) - PXA2XX_REG(GFER, 0x03c, 0x040, 0x044, 0x13c) - PXA2XX_REG(GEDR, 0x048, 0x04c, 0x050, 0x148) - PXA2XX_REG(GAFR_L, 0x054, 0x05c, 0x064, 0x06c) - PXA2XX_REG(GAFR_U, 0x058, 0x060, 0x068, 0x070) -}; - -static void pxa2xx_gpio_irq_update(PXA2xxGPIOInfo *s) -{ - if (s->status[0] & (1 << 0)) - qemu_irq_raise(s->irq0); - else - qemu_irq_lower(s->irq0); - - if (s->status[0] & (1 << 1)) - qemu_irq_raise(s->irq1); - else - qemu_irq_lower(s->irq1); - - if ((s->status[0] & ~3) | s->status[1] | s->status[2] | s->status[3]) - qemu_irq_raise(s->irqX); - else - qemu_irq_lower(s->irqX); -} - -/* Bitmap of pins used as standby and sleep wake-up sources. */ -static const int pxa2xx_gpio_wake[PXA2XX_GPIO_BANKS] = { - 0x8003fe1b, 0x002001fc, 0xec080000, 0x0012007f, -}; - -static void pxa2xx_gpio_set(void *opaque, int line, int level) -{ - PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; - CPUState *cpu = CPU(s->cpu); - int bank; - uint32_t mask; - - if (line >= s->lines) { - printf("%s: No GPIO pin %i\n", __func__, line); - return; - } - - bank = line >> 5; - mask = 1U << (line & 31); - - if (level) { - s->status[bank] |= s->rising[bank] & mask & - ~s->ilevel[bank] & ~s->dir[bank]; - s->ilevel[bank] |= mask; - } else { - s->status[bank] |= s->falling[bank] & mask & - s->ilevel[bank] & ~s->dir[bank]; - s->ilevel[bank] &= ~mask; - } - - if (s->status[bank] & mask) - pxa2xx_gpio_irq_update(s); - - /* Wake-up GPIOs */ - if (cpu->halted && (mask & ~s->dir[bank] & pxa2xx_gpio_wake[bank])) { - cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB); - } -} - -static void pxa2xx_gpio_handler_update(PXA2xxGPIOInfo *s) { - uint32_t level, diff; - int i, bit, line; - for (i = 0; i < PXA2XX_GPIO_BANKS; i ++) { - level = s->olevel[i] & s->dir[i]; - - for (diff = s->prev_level[i] ^ level; diff; diff ^= 1 << bit) { - bit = ctz32(diff); - line = bit + 32 * i; - qemu_set_irq(s->handler[line], (level >> bit) & 1); - } - - s->prev_level[i] = level; - } -} - -static uint64_t pxa2xx_gpio_read(void *opaque, hwaddr offset, - unsigned size) -{ - PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; - uint32_t ret; - int bank; - if (offset >= 0x200) - return 0; - - bank = pxa2xx_gpio_regs[offset].bank; - switch (pxa2xx_gpio_regs[offset].reg) { - case GPDR: /* GPIO Pin-Direction registers */ - return s->dir[bank]; - - case GPSR: /* GPIO Pin-Output Set registers */ - qemu_log_mask(LOG_GUEST_ERROR, - "pxa2xx GPIO: read from write only register GPSR\n"); - return 0; - - case GPCR: /* GPIO Pin-Output Clear registers */ - qemu_log_mask(LOG_GUEST_ERROR, - "pxa2xx GPIO: read from write only register GPCR\n"); - return 0; - - case GRER: /* GPIO Rising-Edge Detect Enable registers */ - return s->rising[bank]; - - case GFER: /* GPIO Falling-Edge Detect Enable registers */ - return s->falling[bank]; - - case GAFR_L: /* GPIO Alternate Function registers */ - return s->gafr[bank * 2]; - - case GAFR_U: /* GPIO Alternate Function registers */ - return s->gafr[bank * 2 + 1]; - - case GPLR: /* GPIO Pin-Level registers */ - ret = (s->olevel[bank] & s->dir[bank]) | - (s->ilevel[bank] & ~s->dir[bank]); - qemu_irq_raise(s->read_notify); - return ret; - - case GEDR: /* GPIO Edge Detect Status registers */ - return s->status[bank]; - - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - } - - return 0; -} - -static void pxa2xx_gpio_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; - int bank; - if (offset >= 0x200) - return; - - bank = pxa2xx_gpio_regs[offset].bank; - switch (pxa2xx_gpio_regs[offset].reg) { - case GPDR: /* GPIO Pin-Direction registers */ - s->dir[bank] = value; - pxa2xx_gpio_handler_update(s); - break; - - case GPSR: /* GPIO Pin-Output Set registers */ - s->olevel[bank] |= value; - pxa2xx_gpio_handler_update(s); - break; - - case GPCR: /* GPIO Pin-Output Clear registers */ - s->olevel[bank] &= ~value; - pxa2xx_gpio_handler_update(s); - break; - - case GRER: /* GPIO Rising-Edge Detect Enable registers */ - s->rising[bank] = value; - break; - - case GFER: /* GPIO Falling-Edge Detect Enable registers */ - s->falling[bank] = value; - break; - - case GAFR_L: /* GPIO Alternate Function registers */ - s->gafr[bank * 2] = value; - break; - - case GAFR_U: /* GPIO Alternate Function registers */ - s->gafr[bank * 2 + 1] = value; - break; - - case GEDR: /* GPIO Edge Detect Status registers */ - s->status[bank] &= ~value; - pxa2xx_gpio_irq_update(s); - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - } -} - -static const MemoryRegionOps pxa_gpio_ops = { - .read = pxa2xx_gpio_read, - .write = pxa2xx_gpio_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -DeviceState *pxa2xx_gpio_init(hwaddr base, - ARMCPU *cpu, DeviceState *pic, int lines) -{ - DeviceState *dev; - - dev = qdev_new(TYPE_PXA2XX_GPIO); - qdev_prop_set_int32(dev, "lines", lines); - object_property_set_link(OBJECT(dev), "cpu", OBJECT(cpu), &error_abort); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, - qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_0)); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, - qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_1)); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, - qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_X)); - - return dev; -} - -static void pxa2xx_gpio_initfn(Object *obj) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - DeviceState *dev = DEVICE(sbd); - PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); - - memory_region_init_io(&s->iomem, obj, &pxa_gpio_ops, - s, "pxa2xx-gpio", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq0); - sysbus_init_irq(sbd, &s->irq1); - sysbus_init_irq(sbd, &s->irqX); -} - -static void pxa2xx_gpio_realize(DeviceState *dev, Error **errp) -{ - PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); - - qdev_init_gpio_in(dev, pxa2xx_gpio_set, s->lines); - qdev_init_gpio_out(dev, s->handler, s->lines); -} - -/* - * Registers a callback to notify on GPLR reads. This normally - * shouldn't be needed but it is used for the hack on Spitz machines. - */ -void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler) -{ - PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); - - s->read_notify = handler; -} - -static const VMStateDescription vmstate_pxa2xx_gpio_regs = { - .name = "pxa2xx-gpio", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(ilevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(olevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(dir, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(rising, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(falling, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(status, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_UINT32_ARRAY(gafr, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS * 2), - VMSTATE_UINT32_ARRAY(prev_level, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), - VMSTATE_END_OF_LIST(), - }, -}; - -static Property pxa2xx_gpio_properties[] = { - DEFINE_PROP_INT32("lines", PXA2xxGPIOInfo, lines, 0), - DEFINE_PROP_LINK("cpu", PXA2xxGPIOInfo, cpu, TYPE_ARM_CPU, ARMCPU *), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa2xx_gpio_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "PXA2xx GPIO controller"; - device_class_set_props(dc, pxa2xx_gpio_properties); - dc->vmsd = &vmstate_pxa2xx_gpio_regs; - dc->realize = pxa2xx_gpio_realize; -} - -static const TypeInfo pxa2xx_gpio_info = { - .name = TYPE_PXA2XX_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxGPIOInfo), - .instance_init = pxa2xx_gpio_initfn, - .class_init = pxa2xx_gpio_class_init, -}; - -static void pxa2xx_gpio_register_types(void) -{ - type_register_static(&pxa2xx_gpio_info); -} - -type_init(pxa2xx_gpio_register_types) diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c deleted file mode 100644 index 34c5555dba9..00000000000 --- a/hw/arm/pxa2xx_pic.c +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Intel XScale PXA Programmable Interrupt Controller. - * - * Copyright (c) 2006 Openedhand Ltd. - * Copyright (c) 2006 Thorsten Zitterell - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPL. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/module.h" -#include "qemu/log.h" -#include "cpu.h" -#include "hw/arm/pxa.h" -#include "hw/sysbus.h" -#include "hw/qdev-properties.h" -#include "migration/vmstate.h" -#include "qom/object.h" -#include "target/arm/cpregs.h" - -#define ICIP 0x00 /* Interrupt Controller IRQ Pending register */ -#define ICMR 0x04 /* Interrupt Controller Mask register */ -#define ICLR 0x08 /* Interrupt Controller Level register */ -#define ICFP 0x0c /* Interrupt Controller FIQ Pending register */ -#define ICPR 0x10 /* Interrupt Controller Pending register */ -#define ICCR 0x14 /* Interrupt Controller Control register */ -#define ICHP 0x18 /* Interrupt Controller Highest Priority register */ -#define IPR0 0x1c /* Interrupt Controller Priority register 0 */ -#define IPR31 0x98 /* Interrupt Controller Priority register 31 */ -#define ICIP2 0x9c /* Interrupt Controller IRQ Pending register 2 */ -#define ICMR2 0xa0 /* Interrupt Controller Mask register 2 */ -#define ICLR2 0xa4 /* Interrupt Controller Level register 2 */ -#define ICFP2 0xa8 /* Interrupt Controller FIQ Pending register 2 */ -#define ICPR2 0xac /* Interrupt Controller Pending register 2 */ -#define IPR32 0xb0 /* Interrupt Controller Priority register 32 */ -#define IPR39 0xcc /* Interrupt Controller Priority register 39 */ - -#define PXA2XX_PIC_SRCS 40 - -#define TYPE_PXA2XX_PIC "pxa2xx_pic" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPICState, PXA2XX_PIC) - -struct PXA2xxPICState { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - MemoryRegion iomem; - ARMCPU *cpu; - uint32_t int_enabled[2]; - uint32_t int_pending[2]; - uint32_t is_fiq[2]; - uint32_t int_idle; - uint32_t priority[PXA2XX_PIC_SRCS]; -}; - -static void pxa2xx_pic_update(void *opaque) -{ - uint32_t mask[2]; - PXA2xxPICState *s = (PXA2xxPICState *) opaque; - CPUState *cpu = CPU(s->cpu); - - if (cpu->halted) { - mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle); - mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle); - if (mask[0] || mask[1]) { - cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB); - } - } - - mask[0] = s->int_pending[0] & s->int_enabled[0]; - mask[1] = s->int_pending[1] & s->int_enabled[1]; - - if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1])) { - cpu_interrupt(cpu, CPU_INTERRUPT_FIQ); - } else { - cpu_reset_interrupt(cpu, CPU_INTERRUPT_FIQ); - } - - if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1])) { - cpu_interrupt(cpu, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); - } -} - -/* Note: Here level means state of the signal on a pin, not - * IRQ/FIQ distinction as in PXA Developer Manual. */ -static void pxa2xx_pic_set_irq(void *opaque, int irq, int level) -{ - PXA2xxPICState *s = (PXA2xxPICState *) opaque; - int int_set = (irq >= 32); - irq &= 31; - - if (level) - s->int_pending[int_set] |= 1 << irq; - else - s->int_pending[int_set] &= ~(1 << irq); - - pxa2xx_pic_update(opaque); -} - -static inline uint32_t pxa2xx_pic_highest(PXA2xxPICState *s) { - int i, int_set, irq; - uint32_t bit, mask[2]; - uint32_t ichp = 0x003f003f; /* Both IDs invalid */ - - mask[0] = s->int_pending[0] & s->int_enabled[0]; - mask[1] = s->int_pending[1] & s->int_enabled[1]; - - for (i = PXA2XX_PIC_SRCS - 1; i >= 0; i --) { - irq = s->priority[i] & 0x3f; - if ((s->priority[i] & (1U << 31)) && irq < PXA2XX_PIC_SRCS) { - /* Source peripheral ID is valid. */ - bit = 1 << (irq & 31); - int_set = (irq >= 32); - - if (mask[int_set] & bit & s->is_fiq[int_set]) { - /* FIQ asserted */ - ichp &= 0xffff0000; - ichp |= (1 << 15) | irq; - } - - if (mask[int_set] & bit & ~s->is_fiq[int_set]) { - /* IRQ asserted */ - ichp &= 0x0000ffff; - ichp |= (1U << 31) | (irq << 16); - } - } - } - - return ichp; -} - -static uint64_t pxa2xx_pic_mem_read(void *opaque, hwaddr offset, - unsigned size) -{ - PXA2xxPICState *s = (PXA2xxPICState *) opaque; - - switch (offset) { - case ICIP: /* IRQ Pending register */ - return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0]; - case ICIP2: /* IRQ Pending register 2 */ - return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1]; - case ICMR: /* Mask register */ - return s->int_enabled[0]; - case ICMR2: /* Mask register 2 */ - return s->int_enabled[1]; - case ICLR: /* Level register */ - return s->is_fiq[0]; - case ICLR2: /* Level register 2 */ - return s->is_fiq[1]; - case ICCR: /* Idle mask */ - return (s->int_idle == 0); - case ICFP: /* FIQ Pending register */ - return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0]; - case ICFP2: /* FIQ Pending register 2 */ - return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1]; - case ICPR: /* Pending register */ - return s->int_pending[0]; - case ICPR2: /* Pending register 2 */ - return s->int_pending[1]; - case IPR0 ... IPR31: - return s->priority[0 + ((offset - IPR0 ) >> 2)]; - case IPR32 ... IPR39: - return s->priority[32 + ((offset - IPR32) >> 2)]; - case ICHP: /* Highest Priority register */ - return pxa2xx_pic_highest(s); - default: - qemu_log_mask(LOG_GUEST_ERROR, - "pxa2xx_pic_mem_read: bad register offset 0x%" HWADDR_PRIx - "\n", offset); - return 0; - } -} - -static void pxa2xx_pic_mem_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxPICState *s = (PXA2xxPICState *) opaque; - - switch (offset) { - case ICMR: /* Mask register */ - s->int_enabled[0] = value; - break; - case ICMR2: /* Mask register 2 */ - s->int_enabled[1] = value; - break; - case ICLR: /* Level register */ - s->is_fiq[0] = value; - break; - case ICLR2: /* Level register 2 */ - s->is_fiq[1] = value; - break; - case ICCR: /* Idle mask */ - s->int_idle = (value & 1) ? 0 : ~0; - break; - case IPR0 ... IPR31: - s->priority[0 + ((offset - IPR0 ) >> 2)] = value & 0x8000003f; - break; - case IPR32 ... IPR39: - s->priority[32 + ((offset - IPR32) >> 2)] = value & 0x8000003f; - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "pxa2xx_pic_mem_write: bad register offset 0x%" - HWADDR_PRIx "\n", offset); - return; - } - pxa2xx_pic_update(opaque); -} - -/* Interrupt Controller Coprocessor Space Register Mapping */ -static const int pxa2xx_cp_reg_map[0x10] = { - [0x0 ... 0xf] = -1, - [0x0] = ICIP, - [0x1] = ICMR, - [0x2] = ICLR, - [0x3] = ICFP, - [0x4] = ICPR, - [0x5] = ICHP, - [0x6] = ICIP2, - [0x7] = ICMR2, - [0x8] = ICLR2, - [0x9] = ICFP2, - [0xa] = ICPR2, -}; - -static uint64_t pxa2xx_pic_cp_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - int offset = pxa2xx_cp_reg_map[ri->crn]; - return pxa2xx_pic_mem_read(ri->opaque, offset, 4); -} - -static void pxa2xx_pic_cp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int offset = pxa2xx_cp_reg_map[ri->crn]; - pxa2xx_pic_mem_write(ri->opaque, offset, value, 4); -} - -#define REGINFO_FOR_PIC_CP(NAME, CRN) \ - { .name = NAME, .cp = 6, .crn = CRN, .crm = 0, .opc1 = 0, .opc2 = 0, \ - .access = PL1_RW, .type = ARM_CP_IO, \ - .readfn = pxa2xx_pic_cp_read, .writefn = pxa2xx_pic_cp_write } - -static const ARMCPRegInfo pxa_pic_cp_reginfo[] = { - REGINFO_FOR_PIC_CP("ICIP", 0), - REGINFO_FOR_PIC_CP("ICMR", 1), - REGINFO_FOR_PIC_CP("ICLR", 2), - REGINFO_FOR_PIC_CP("ICFP", 3), - REGINFO_FOR_PIC_CP("ICPR", 4), - REGINFO_FOR_PIC_CP("ICHP", 5), - REGINFO_FOR_PIC_CP("ICIP2", 6), - REGINFO_FOR_PIC_CP("ICMR2", 7), - REGINFO_FOR_PIC_CP("ICLR2", 8), - REGINFO_FOR_PIC_CP("ICFP2", 9), - REGINFO_FOR_PIC_CP("ICPR2", 0xa), -}; - -static const MemoryRegionOps pxa2xx_pic_ops = { - .read = pxa2xx_pic_mem_read, - .write = pxa2xx_pic_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static int pxa2xx_pic_post_load(void *opaque, int version_id) -{ - pxa2xx_pic_update(opaque); - return 0; -} - -static void pxa2xx_pic_reset_hold(Object *obj, ResetType type) -{ - PXA2xxPICState *s = PXA2XX_PIC(obj); - - s->int_pending[0] = 0; - s->int_pending[1] = 0; - s->int_enabled[0] = 0; - s->int_enabled[1] = 0; - s->is_fiq[0] = 0; - s->is_fiq[1] = 0; -} - -DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) -{ - DeviceState *dev = qdev_new(TYPE_PXA2XX_PIC); - - object_property_set_link(OBJECT(dev), "arm-cpu", - OBJECT(cpu), &error_abort); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - - return dev; -} - -static void pxa2xx_pic_realize(DeviceState *dev, Error **errp) -{ - PXA2xxPICState *s = PXA2XX_PIC(dev); - - qdev_init_gpio_in(dev, pxa2xx_pic_set_irq, PXA2XX_PIC_SRCS); - - /* Enable IC memory-mapped registers access. */ - memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_pic_ops, s, - "pxa2xx-pic", 0x00100000); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); - - /* Enable IC coprocessor access. */ - define_arm_cp_regs_with_opaque(s->cpu, pxa_pic_cp_reginfo, s); -} - -static const VMStateDescription vmstate_pxa2xx_pic_regs = { - .name = "pxa2xx_pic", - .version_id = 0, - .minimum_version_id = 0, - .post_load = pxa2xx_pic_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(int_enabled, PXA2xxPICState, 2), - VMSTATE_UINT32_ARRAY(int_pending, PXA2xxPICState, 2), - VMSTATE_UINT32_ARRAY(is_fiq, PXA2xxPICState, 2), - VMSTATE_UINT32(int_idle, PXA2xxPICState), - VMSTATE_UINT32_ARRAY(priority, PXA2xxPICState, PXA2XX_PIC_SRCS), - VMSTATE_END_OF_LIST(), - }, -}; - -static Property pxa2xx_pic_properties[] = { - DEFINE_PROP_LINK("arm-cpu", PXA2xxPICState, cpu, - TYPE_ARM_CPU, ARMCPU *), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa2xx_pic_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - ResettableClass *rc = RESETTABLE_CLASS(klass); - - device_class_set_props(dc, pxa2xx_pic_properties); - dc->realize = pxa2xx_pic_realize; - dc->desc = "PXA2xx PIC"; - dc->vmsd = &vmstate_pxa2xx_pic_regs; - rc->phases.hold = pxa2xx_pic_reset_hold; -} - -static const TypeInfo pxa2xx_pic_info = { - .name = TYPE_PXA2XX_PIC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxPICState), - .class_init = pxa2xx_pic_class_init, -}; - -static void pxa2xx_pic_register_types(void) -{ - type_register_static(&pxa2xx_pic_info); -} - -type_init(pxa2xx_pic_register_types) diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index a7a662f40db..9d9af63d654 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -337,48 +337,53 @@ static void raspi_machine_class_init(MachineClass *mc, mc->init = raspi_machine_init; }; -static void raspi0_machine_class_init(ObjectClass *oc, void *data) +static void raspi0_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + mc->auto_create_sdcard = true; rmc->board_rev = 0x920092; /* Revision 1.2 */ raspi_machine_class_init(mc, rmc->board_rev); }; -static void raspi1ap_machine_class_init(ObjectClass *oc, void *data) +static void raspi1ap_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + mc->auto_create_sdcard = true; rmc->board_rev = 0x900021; /* Revision 1.1 */ raspi_machine_class_init(mc, rmc->board_rev); }; -static void raspi2b_machine_class_init(ObjectClass *oc, void *data) +static void raspi2b_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + mc->auto_create_sdcard = true; rmc->board_rev = 0xa21041; raspi_machine_class_init(mc, rmc->board_rev); }; #ifdef TARGET_AARCH64 -static void raspi3ap_machine_class_init(ObjectClass *oc, void *data) +static void raspi3ap_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + mc->auto_create_sdcard = true; rmc->board_rev = 0x9020e0; /* Revision 1.0 */ raspi_machine_class_init(mc, rmc->board_rev); }; -static void raspi3b_machine_class_init(ObjectClass *oc, void *data) +static void raspi3b_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); + mc->auto_create_sdcard = true; rmc->board_rev = 0xa02082; raspi_machine_class_init(mc, rmc->board_rev); }; diff --git a/hw/arm/raspi4b.c b/hw/arm/raspi4b.c index 85877880fc7..20082d52667 100644 --- a/hw/arm/raspi4b.c +++ b/hw/arm/raspi4b.c @@ -15,7 +15,7 @@ #include "hw/display/bcm2835_fb.h" #include "hw/registerfields.h" #include "qemu/error-report.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/arm/boot.h" @@ -107,7 +107,7 @@ static void raspi4b_machine_init(MachineState *machine) raspi_base_machine_init(machine, &soc->parent_obj); } -static void raspi4b_machine_class_init(ObjectClass *oc, void *data) +static void raspi4b_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); RaspiBaseMachineClass *rmc = RASPI_BASE_MACHINE_CLASS(oc); @@ -118,6 +118,7 @@ static void raspi4b_machine_class_init(ObjectClass *oc, void *data) rmc->board_rev = 0xb03115; /* Revision 1.5, 2 Gb RAM */ #endif raspi_machine_class_common_init(mc, rmc->board_rev); + mc->auto_create_sdcard = true; mc->init = raspi4b_machine_init; } diff --git a/hw/arm/realview.c b/hw/arm/realview.c index b186f965c68..5c9050490b4 100644 --- a/hw/arm/realview.c +++ b/hw/arm/realview.c @@ -19,7 +19,7 @@ #include "hw/pci/pci.h" #include "hw/qdev-core.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/i2c/i2c.h" #include "qemu/error-report.h" @@ -35,6 +35,8 @@ #define SMP_BOOT_ADDR 0xe0000000 #define SMP_BOOTREG_ADDR 0x10000030 +#define GIC_EXT_IRQS 64 /* Realview PBX-A9 development board */ + /* Board init. */ static struct arm_boot_info realview_binfo = { @@ -185,7 +187,12 @@ static void realview_init(MachineState *machine, sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); if (is_mpcore) { - dev = qdev_new(is_pb ? TYPE_A9MPCORE_PRIV : "realview_mpcore"); + if (is_pb) { + dev = qdev_new(TYPE_A9MPCORE_PRIV); + qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL); + } else { + dev = qdev_new("realview_mpcore"); + } qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); @@ -201,7 +208,7 @@ static void realview_init(MachineState *machine, /* For now just create the nIRQ GIC, and ignore the others. */ dev = sysbus_create_simple(TYPE_REALVIEW_GIC, gic_addr, cpu_irq[0]); } - for (n = 0; n < 64; n++) { + for (n = 0; n < GIC_EXT_IRQS; n++) { pic[n] = qdev_get_gpio_in(dev, n); } @@ -406,7 +413,7 @@ static void realview_pbx_a9_init(MachineState *machine) realview_init(machine, BOARD_PBX_A9); } -static void realview_eb_class_init(ObjectClass *oc, void *data) +static void realview_eb_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -415,6 +422,7 @@ static void realview_eb_class_init(ObjectClass *oc, void *data) mc->block_default_type = IF_SCSI; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } @@ -425,7 +433,7 @@ static const TypeInfo realview_eb_type = { .class_init = realview_eb_class_init, }; -static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data) +static void realview_eb_mpcore_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -435,6 +443,7 @@ static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm11mpcore"); + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } @@ -445,7 +454,7 @@ static const TypeInfo realview_eb_mpcore_type = { .class_init = realview_eb_mpcore_class_init, }; -static void realview_pb_a8_class_init(ObjectClass *oc, void *data) +static void realview_pb_a8_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -453,6 +462,7 @@ static void realview_pb_a8_class_init(ObjectClass *oc, void *data) mc->init = realview_pb_a8_init; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8"); + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } @@ -463,7 +473,7 @@ static const TypeInfo realview_pb_a8_type = { .class_init = realview_pb_a8_class_init, }; -static void realview_pbx_a9_class_init(ObjectClass *oc, void *data) +static void realview_pbx_a9_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -472,6 +482,7 @@ static void realview_pbx_a9_class_init(ObjectClass *oc, void *data) mc->max_cpus = 4; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c index 56f184b9ae7..df60d47c6fd 100644 --- a/hw/arm/sabrelite.c +++ b/hw/arm/sabrelite.c @@ -17,7 +17,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" static struct arm_boot_info sabrelite_binfo = { /* DDR memory start */ @@ -110,6 +110,7 @@ static void sabrelite_machine_init(MachineClass *mc) mc->max_cpus = FSL_IMX6_NUM_CPUS; mc->ignore_memory_transaction_failures = true; mc->default_ram_id = "sabrelite.ram"; + mc->auto_create_sdcard = true; } DEFINE_MACHINE("sabrelite", sabrelite_machine_init) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index ae37a923015..15c1ff4b140 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -19,15 +19,16 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/datadir.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/units.h" -#include "sysemu/device_tree.h" -#include "sysemu/kvm.h" -#include "sysemu/numa.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/device_tree.h" +#include "system/kvm.h" +#include "system/numa.h" +#include "system/runstate.h" +#include "system/system.h" #include "exec/hwaddr.h" #include "kvm_arm.h" #include "hw/arm/boot.h" @@ -48,13 +49,12 @@ #include "hw/char/pl011.h" #include "hw/watchdog/sbsa_gwdt.h" #include "net/net.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qom/object.h" #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" -#define RAMLIMIT_GB 8192 -#define RAMLIMIT_BYTES (RAMLIMIT_GB * GiB) +#define RAMLIMIT_BYTES (8 * TiB) #define NUM_IRQS 256 #define NUM_SMMU_IRQS 4 @@ -164,23 +164,20 @@ static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) static void sbsa_fdt_add_gic_node(SBSAMachineState *sms) { - char *nodename; + const char *intc_nodename = "/intc"; + const char *its_nodename = "/intc/its"; - nodename = g_strdup_printf("/intc"); - qemu_fdt_add_subnode(sms->fdt, nodename); - qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg", + qemu_fdt_add_subnode(sms->fdt, intc_nodename); + qemu_fdt_setprop_sized_cells(sms->fdt, intc_nodename, "reg", 2, sbsa_ref_memmap[SBSA_GIC_DIST].base, 2, sbsa_ref_memmap[SBSA_GIC_DIST].size, 2, sbsa_ref_memmap[SBSA_GIC_REDIST].base, 2, sbsa_ref_memmap[SBSA_GIC_REDIST].size); - nodename = g_strdup_printf("/intc/its"); - qemu_fdt_add_subnode(sms->fdt, nodename); - qemu_fdt_setprop_sized_cells(sms->fdt, nodename, "reg", + qemu_fdt_add_subnode(sms->fdt, its_nodename); + qemu_fdt_setprop_sized_cells(sms->fdt, its_nodename, "reg", 2, sbsa_ref_memmap[SBSA_GIC_ITS].base, 2, sbsa_ref_memmap[SBSA_GIC_ITS].size); - - g_free(nodename); } /* @@ -487,6 +484,8 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem) [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, [GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ, + [GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ, + [GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ, }; for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { @@ -621,6 +620,7 @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus) dev = qdev_new(TYPE_ARM_SMMUV3); + object_property_set_str(OBJECT(dev), "stage", "nested", &error_abort); object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus), &error_abort); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -675,7 +675,7 @@ static void create_pcie(SBSAMachineState *sms) /* Map IO port space */ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, qdev_get_gpio_in(sms->gic, irq + i)); gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); @@ -756,7 +756,9 @@ static void sbsa_ref_init(MachineState *machine) sms->smp_cpus = smp_cpus; if (machine->ram_size > sbsa_ref_memmap[SBSA_MEM].size) { - error_report("sbsa-ref: cannot model more than %dGB RAM", RAMLIMIT_GB); + char *size_str = size_to_str(RAMLIMIT_BYTES); + + error_report("sbsa-ref: cannot model more than %s of RAM", size_str); exit(1); } @@ -880,7 +882,7 @@ static void sbsa_ref_instance_init(Object *obj) sbsa_flash_create(sms); } -static void sbsa_ref_class_init(ObjectClass *oc, void *data) +static void sbsa_ref_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); static const char * const valid_cpu_types[] = { diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index d73ad622113..4d6516443e9 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -20,6 +20,7 @@ #include "trace.h" #include "exec/target_page.h" #include "hw/core/cpu.h" +#include "hw/pci/pci_bridge.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/jhash.h" @@ -225,6 +226,27 @@ static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value, ((entry->iova & ~info->mask) == info->iova); } +static gboolean +smmu_hash_remove_by_sid_range(gpointer key, gpointer value, gpointer user_data) +{ + SMMUDevice *sdev = (SMMUDevice *)key; + uint32_t sid = smmu_get_sid(sdev); + SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; + + if (sid < sid_range->start || sid > sid_range->end) { + return false; + } + trace_smmu_config_cache_inv(sid); + return true; +} + +void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range) +{ + trace_smmu_configs_inv_sid_range(sid_range.start, sid_range.end); + g_hash_table_foreach_remove(s->configs, smmu_hash_remove_by_sid_range, + &sid_range); +} + void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl) { @@ -298,7 +320,7 @@ void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); } -inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) +void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) { trace_smmu_iotlb_inv_vmid_s1(vmid); g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); @@ -674,7 +696,7 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg, /* * combine S1 and S2 TLB entries into a single entry. - * As a result the S1 entry is overriden with combined data. + * As a result the S1 entry is overridden with combined data. */ static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2, dma_addr_t iova, SMMUTransCfg *cfg) @@ -691,7 +713,6 @@ static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2, tlbe->entry.iova = iova & ~tlbe->entry.addr_mask; /* parent_perm has s2 perm while perm keeps s1 perm. */ tlbe->parent_perm = tlbe_s2->entry.perm; - return; } /** @@ -826,12 +847,24 @@ SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num) return NULL; } -static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) +void smmu_init_sdev(SMMUState *s, SMMUDevice *sdev, PCIBus *bus, int devfn) { - SMMUState *s = opaque; - SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus); - SMMUDevice *sdev; static unsigned int index; + g_autofree char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, + index++); + sdev->smmu = s; + sdev->bus = bus; + sdev->devfn = devfn; + + memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu), + s->mrtypename, OBJECT(s), name, UINT64_MAX); + address_space_init(&sdev->as, MEMORY_REGION(&sdev->iommu), name); + trace_smmu_add_mr(name); +} + +SMMUPciBus *smmu_get_sbus(SMMUState *s, PCIBus *bus) +{ + SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus); if (!sbus) { sbus = g_malloc0(sizeof(SMMUPciBus) + @@ -840,23 +873,19 @@ static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus); } + return sbus; +} + +static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) +{ + SMMUState *s = opaque; + SMMUPciBus *sbus = smmu_get_sbus(s, bus); + SMMUDevice *sdev; + sdev = sbus->pbdev[devfn]; if (!sdev) { - char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++); - sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1); - - sdev->smmu = s; - sdev->bus = bus; - sdev->devfn = devfn; - - memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu), - s->mrtypename, - OBJECT(s), name, UINT64_MAX); - address_space_init(&sdev->as, - MEMORY_REGION(&sdev->iommu), name); - trace_smmu_add_mr(name); - g_free(name); + smmu_init_sdev(s, sdev, bus, devfn); } return &sdev->as; @@ -905,6 +934,7 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) { SMMUState *s = ARM_SMMU(dev); SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev); + PCIBus *pci_bus = s->primary_bus; Error *local_err = NULL; sbc->parent_realize(dev, &local_err); @@ -917,14 +947,50 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) g_free, g_free); s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL); - if (s->primary_bus) { - pci_setup_iommu(s->primary_bus, &smmu_ops, s); - } else { + if (!pci_bus) { error_setg(errp, "SMMU is not attached to any PCI bus!"); + return; + } + + if (!s->iommu_ops) { + s->iommu_ops = &smmu_ops; + } + /* + * We only allow default PCIe Root Complex(pcie.0) or pxb-pcie based extra + * root complexes to be associated with SMMU. + */ + if (pci_bus_is_express(pci_bus) && pci_bus_is_root(pci_bus) && + object_dynamic_cast(OBJECT(pci_bus)->parent, TYPE_PCI_HOST_BRIDGE)) { + /* + * This condition matches either the default pcie.0, pxb-pcie, or + * pxb-cxl. For both pxb-pcie and pxb-cxl, parent_dev will be set. + * Currently, we don't allow pxb-cxl as it requires further + * verification. Therefore, make sure this is indeed pxb-pcie. + */ + if (pci_bus->parent_dev) { + if (!object_dynamic_cast(OBJECT(pci_bus), TYPE_PXB_PCIE_BUS)) { + goto out_err; + } + } + + if (s->smmu_per_bus) { + pci_setup_iommu_per_bus(pci_bus, s->iommu_ops, s); + } else { + pci_setup_iommu(pci_bus, s->iommu_ops, s); + } + return; } +out_err: + error_setg(errp, "SMMU should be attached to a default PCIe root complex" + "(pcie.0) or a pxb-pcie based root complex"); } -static void smmu_base_reset_hold(Object *obj, ResetType type) +/* + * Make sure the IOMMU is reset in 'exit' phase after + * all outstanding DMA requests have been quiesced during + * the 'enter' or 'hold' reset phases + */ +static void smmu_base_reset_exit(Object *obj, ResetType type) { SMMUState *s = ARM_SMMU(obj); @@ -934,14 +1000,14 @@ static void smmu_base_reset_hold(Object *obj, ResetType type) g_hash_table_remove_all(s->iotlb); } -static Property smmu_dev_properties[] = { +static const Property smmu_dev_properties[] = { DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0), + DEFINE_PROP_BOOL("smmu_per_bus", SMMUState, smmu_per_bus, false), DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, TYPE_PCI_BUS, PCIBus *), - DEFINE_PROP_END_OF_LIST(), }; -static void smmu_base_class_init(ObjectClass *klass, void *data) +static void smmu_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -950,7 +1016,7 @@ static void smmu_base_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, smmu_dev_properties); device_class_set_parent_realize(dc, smmu_base_realize, &sbc->parent_realize); - rc->phases.hold = smmu_base_reset_hold; + rc->phases.exit = smmu_base_reset_exit; } static const TypeInfo smmu_base_info = { diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h index 843bebb185d..d143d296f34 100644 --- a/hw/arm/smmu-internal.h +++ b/hw/arm/smmu-internal.h @@ -141,9 +141,4 @@ typedef struct SMMUIOTLBPageInvInfo { uint64_t mask; } SMMUIOTLBPageInvInfo; -typedef struct SMMUSIDRange { - uint32_t start; - uint32_t end; -} SMMUSIDRange; - #endif diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c new file mode 100644 index 00000000000..db062670337 --- /dev/null +++ b/hw/arm/smmuv3-accel.c @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2025 Huawei Technologies R & D (UK) Ltd + * Copyright (C) 2025 NVIDIA + * Written by Nicolin Chen, Shameer Kolothum + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include +#include +#include "trace.h" + +#include "hw/arm/smmuv3.h" +#include "hw/iommu.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci-host/gpex.h" +#include "hw/vfio/pci.h" +#include "qemu/units.h" + +#include "smmuv3-accel.h" +#include "smmuv3-internal.h" + +#define SMMU_STE_VALID (1ULL << 0) +#define SMMU_STE_CFG_BYPASS (1ULL << 3) + +#define STE0_V MAKE_64BIT_MASK(0, 1) +#define STE0_CONFIG MAKE_64BIT_MASK(1, 3) +#define STE0_S1FMT MAKE_64BIT_MASK(4, 2) +#define STE0_CTXPTR MAKE_64BIT_MASK(6, 50) +#define STE0_S1CDMAX MAKE_64BIT_MASK(59, 5) +#define STE0_MASK (STE0_S1CDMAX | STE0_CTXPTR | STE0_S1FMT | STE0_CONFIG | \ + STE0_V) + +#define STE1_S1DSS MAKE_64BIT_MASK(0, 2) +#define STE1_S1CIR MAKE_64BIT_MASK(2, 2) +#define STE1_S1COR MAKE_64BIT_MASK(4, 2) +#define STE1_S1CSH MAKE_64BIT_MASK(6, 2) +#define STE1_S1STALLD MAKE_64BIT_MASK(27, 1) +#define STE1_ETS MAKE_64BIT_MASK(28, 2) +#define STE1_MASK (STE1_ETS | STE1_S1STALLD | STE1_S1CSH | STE1_S1COR | \ + STE1_S1CIR | STE1_S1DSS) + +static bool +smmuv3_accel_check_hw_compatible(SMMUv3State *s, + struct iommu_hw_info_arm_smmuv3 *info, + Error **errp) +{ + uint32_t val; + + /* + * QEMU SMMUv3 supports both linear and 2-level stream tables. + */ + val = FIELD_EX32(info->idr[0], IDR0, STLEVEL); + if (val != FIELD_EX32(s->idr[0], IDR0, STLEVEL)) { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, val); + error_setg(errp, "Host SUMMUv3 differs in Stream Table format"); + return false; + } + + /* QEMU SMMUv3 supports only little-endian translation table walks */ + val = FIELD_EX32(info->idr[0], IDR0, TTENDIAN); + if (!val && val > FIELD_EX32(s->idr[0], IDR0, TTENDIAN)) { + error_setg(errp, "Host SUMMUv3 doesn't support Little-endian " + "translation table"); + return false; + } + + /* QEMU SMMUv3 supports only AArch64 translation table format */ + val = FIELD_EX32(info->idr[0], IDR0, TTF); + if (val < FIELD_EX32(s->idr[0], IDR0, TTF)) { + error_setg(errp, "Host SUMMUv3 deosn't support Arch64 Translation " + "table format"); + return false; + } + + /* QEMU SMMUv3 supports SIDSIZE 16 */ + val = FIELD_EX32(info->idr[1], IDR1, SIDSIZE); + if (val < FIELD_EX32(s->idr[1], IDR1, SIDSIZE)) { + error_setg(errp, "Host SUMMUv3 SIDSIZE not compatible"); + return false; + } + + /* If user enables PASID support(pasid=on), QEMU sets SSIDSIZE to 16 */ + val = FIELD_EX32(info->idr[1], IDR1, SSIDSIZE); + if (val < FIELD_EX32(s->idr[1], IDR1, SSIDSIZE)) { + error_setg(errp, "Host SUMMUv3 SSIDSIZE not compatible"); + return false; + } + + /* User can override QEMU SMMUv3 Range Invalidation support */ + val = FIELD_EX32(info->idr[3], IDR3, RIL); + if (val != FIELD_EX32(s->idr[3], IDR3, RIL)) { + error_setg(errp, "Host SUMMUv3 differs in Range Invalidation support"); + return false; + } + + /* + * ToDo: OAS is not something Linux kernel doc says meaningful for user. + * But looks like OAS needs to be compatibe for accelerator support. Please + * check. + */ + val = FIELD_EX32(info->idr[5], IDR5, OAS); + if (val < FIELD_EX32(s->idr[5], IDR5, OAS)) { + error_setg(errp, "Host SUMMUv3 OAS not compatible"); + return false; + } + + val = FIELD_EX32(info->idr[5], IDR5, GRAN4K); + if (val != FIELD_EX32(s->idr[5], IDR5, GRAN4K)) { + error_setg(errp, "Host SMMUv3 doesn't support 64K translation granule"); + return false; + } + val = FIELD_EX32(info->idr[5], IDR5, GRAN16K); + if (val != FIELD_EX32(s->idr[5], IDR5, GRAN16K)) { + error_setg(errp, "Host SMMUv3 doesn't support 16K translation granule"); + return false; + } + val = FIELD_EX32(info->idr[5], IDR5, GRAN64K); + if (val != FIELD_EX32(s->idr[5], IDR5, GRAN64K)) { + error_setg(errp, "Host SMMUv3 doesn't support 16K translation granule"); + return false; + } + return true; +} + +static bool +smmuv3_accel_hw_compatible(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, + Error **errp) +{ + struct iommu_hw_info_arm_smmuv3 info; + uint32_t data_type = 0; + uint64_t caps; + + if (!iommufd_backend_get_device_info(idev->iommufd, idev->devid, &data_type, + &info, sizeof(info), &caps, NULL, + errp)) { + return false; + } + + if (data_type != IOMMU_HW_INFO_TYPE_ARM_SMMUV3) { + error_setg(errp, "Wrong data type (%d) for Host SMMUv3 device info", + data_type); + return false; + } + + if (!smmuv3_accel_check_hw_compatible(s, &info, errp)) { + return false; + } + + if (!tegra241_cmdqv_hw_compatible(s, idev, errp)) { + return false; + } + + return true; +} + +static bool +smmuv3_accel_alloc_vdev(SMMUv3AccelDevice *accel_dev, int sid, Error **errp) +{ + SMMUViommu *viommu = accel_dev->viommu; + IOMMUFDVdev *vdev; + uint32_t vdev_id; + + if (!accel_dev->idev || accel_dev->vdev) { + return true; + } + + if (!iommufd_backend_alloc_vdev(viommu->iommufd, accel_dev->idev->devid, + viommu->core.viommu_id, sid, + &vdev_id, errp)) { + return false; + } + if (!host_iommu_device_iommufd_attach_hwpt(accel_dev->idev, + viommu->bypass_hwpt_id, errp)) { + iommufd_backend_free_id(viommu->iommufd, vdev_id); + return false; + } + + vdev = g_new(IOMMUFDVdev, 1); + vdev->vdev_id = vdev_id; + vdev->dev_id = sid; + accel_dev->vdev = vdev; + return true; +} + +static bool +smmuv3_accel_dev_uninstall_nested_ste(SMMUv3AccelDevice *accel_dev, bool abort, + Error **errp) +{ + HostIOMMUDeviceIOMMUFD *idev = accel_dev->idev; + SMMUS1Hwpt *s1_hwpt = accel_dev->s1_hwpt; + uint32_t hwpt_id; + + if (!s1_hwpt || !accel_dev->viommu) { + return true; + } + + if (abort) { + hwpt_id = accel_dev->viommu->abort_hwpt_id; + } else { + hwpt_id = accel_dev->viommu->bypass_hwpt_id; + } + + if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) { + return false; + } + + iommufd_backend_free_id(s1_hwpt->iommufd, s1_hwpt->hwpt_id); + accel_dev->s1_hwpt = NULL; + g_free(s1_hwpt); + return true; +} + +static bool +smmuv3_accel_dev_install_nested_ste(SMMUv3AccelDevice *accel_dev, + uint32_t data_type, uint32_t data_len, + void *data, Error **errp) +{ + SMMUViommu *viommu = accel_dev->viommu; + SMMUS1Hwpt *s1_hwpt = accel_dev->s1_hwpt; + HostIOMMUDeviceIOMMUFD *idev = accel_dev->idev; + uint32_t flags = 0; + + if (!idev || !viommu) { + error_setg(errp, "Device 0x%x has no associated IOMMU dev or vIOMMU", + smmu_get_sid(&accel_dev->sdev)); + return false; + } + + if (s1_hwpt) { + if (!smmuv3_accel_dev_uninstall_nested_ste(accel_dev, true, errp)) { + return false; + } + } + + s1_hwpt = g_new0(SMMUS1Hwpt, 1); + s1_hwpt->iommufd = idev->iommufd; + if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, + viommu->core.viommu_id, flags, data_type, + data_len, data, &s1_hwpt->hwpt_id, errp)) { + return false; + } + + if (!host_iommu_device_iommufd_attach_hwpt(idev, s1_hwpt->hwpt_id, errp)) { + iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id); + return false; + } + accel_dev->s1_hwpt = s1_hwpt; + return true; +} + +bool +smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid, + Error **errp) +{ + SMMUv3AccelDevice *accel_dev; + SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid, + .inval_ste_allowed = true}; + struct iommu_hwpt_arm_smmuv3 nested_data = {}; + uint64_t ste_0, ste_1; + uint32_t config; + STE ste; + int ret; + + if (!s->accel) { + return true; + } + + accel_dev = container_of(sdev, SMMUv3AccelDevice, sdev); + if (!accel_dev->viommu) { + return true; + } + + if (!smmuv3_accel_alloc_vdev(accel_dev, sid, errp)) { + return false; + } + + ret = smmu_find_ste(sdev->smmu, sid, &ste, &event); + if (ret) { + error_setg(errp, "Failed to find STE for Device 0x%x", sid); + return true; + } + + config = STE_CONFIG(&ste); + if (!STE_VALID(&ste) || !STE_CFG_S1_ENABLED(config)) { + if (!smmuv3_accel_dev_uninstall_nested_ste(accel_dev, + STE_CFG_ABORT(config), + errp)) { + return false; + } + smmuv3_flush_config(sdev); + return true; + } + + ste_0 = (uint64_t)ste.word[0] | (uint64_t)ste.word[1] << 32; + ste_1 = (uint64_t)ste.word[2] | (uint64_t)ste.word[3] << 32; + nested_data.ste[0] = cpu_to_le64(ste_0 & STE0_MASK); + nested_data.ste[1] = cpu_to_le64(ste_1 & STE1_MASK); + + if (!smmuv3_accel_dev_install_nested_ste(accel_dev, + IOMMU_HWPT_DATA_ARM_SMMUV3, + sizeof(nested_data), + &nested_data, errp)) { + error_setg(errp, "Unable to install nested STE=%16LX:%16LX, sid=0x%x," + "ret=%d", nested_data.ste[1], nested_data.ste[0], sid, ret); + return false; + } + trace_smmuv3_accel_install_nested_ste(sid, nested_data.ste[1], + nested_data.ste[0]); + return true; +} + +bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range, + Error **errp) +{ + SMMUv3AccelState *s_accel = s->s_accel; + SMMUv3AccelDevice *accel_dev; + + if (!s_accel || !s_accel->viommu) { + return true; + } + + QLIST_FOREACH(accel_dev, &s_accel->viommu->device_list, next) { + uint32_t sid = smmu_get_sid(&accel_dev->sdev); + + if (sid >= range->start && sid <= range->end) { + if (!smmuv3_accel_install_nested_ste(s, &accel_dev->sdev, + sid, errp)) { + return false; + } + } + } + return true; +} + +/* + * This issues the invalidation cmd to the host SMMUv3. + * Note: sdev can be NULL for certain invalidation commands + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc. + */ +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev, + Error **errp) +{ + SMMUv3State *s = ARM_SMMUV3(bs); + SMMUv3AccelState *s_accel = s->s_accel; + IOMMUFDViommu *viommu_core; + uint32_t entry_num = 1; + + if (!s->accel || !s_accel->viommu) { + return true; + } + + /* + * We may end up here for any emulated PCI bridge or root port type devices. + * However, passing invalidation commands with sid (eg: CFGI_CD) to host + * SMMUv3 only matters for vfio-pci endpoint devices. Hence check that if + * sdev is valid. + */ + if (sdev) { + SMMUv3AccelDevice *accel_dev = container_of(sdev, SMMUv3AccelDevice, + sdev); + if (!accel_dev->vdev) { + return true; + } + } + + viommu_core = &s_accel->viommu->core; + return iommufd_backend_invalidate_cache( + viommu_core->iommufd, viommu_core->viommu_id, + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3, + sizeof(Cmd), &entry_num, cmd, errp); +} + +static void *smmuv3_accel_nested_event_thread(void *arg) +{ + struct iommu_vevent_arm_smmuv3 *vevent; + struct iommufd_vevent_header *hdr; + ssize_t readsz = sizeof(*hdr) + sizeof(*vevent); + struct pollfd pollfd = {}; + SMMUv3State *s = arg; + SMMUv3AccelState *s_accel = s->s_accel; + SMMUViommu *viommu = s_accel->viommu; + MemTxResult r; + ssize_t bytes; + Evt evt = {}; + void *buf; + int ret; + + if (!viommu->veventq) { + return NULL; + } + + buf = g_malloc0(readsz); + pollfd.events = POLLIN; + pollfd.fd = viommu->veventq->veventq_fd; + + while (1) { + qemu_mutex_lock(&s_accel->event_thread_mutex); + if (s_accel->event_thread_stop) { + qemu_mutex_unlock(&s_accel->event_thread_mutex); + break; + } + qemu_mutex_unlock(&s_accel->event_thread_mutex); + + ret = poll(&pollfd, 1, 100); + if (ret < 0) { + error_report("%s: poll failed: %d", __func__, ret); + goto out_free; + } + + bytes = read(pollfd.fd, buf, readsz); + if (bytes == 0) { + continue; + } + if (bytes < 0) { + error_report("%s: read failed: %d", __func__, ret); + goto out_free; + } + hdr = buf; + vevent = buf + sizeof(*hdr); + if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS) { + error_report("%s: vEVENTQ has lost events", __func__); + goto out_free; + } + + memcpy(&evt, vevent, sizeof(evt)); + r = smmuv3_write_eventq(s, &evt); + if (r != MEMTX_OK) { + smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, + R_GERROR_EVENTQ_ABT_ERR_MASK); + } + } +out_free: + g_free(buf); + close(pollfd.fd); + return NULL; +} + +bool smmuv3_accel_realloc_veventq(SMMUv3State *s, uint32_t log2size, + Error **errp) +{ + SMMUv3AccelState *s_accel = s->s_accel; + IOMMUFDVeventq *veventq; + SMMUViommu *viommu; + uint32_t veventq_id; + uint32_t veventq_fd; + + if (!s_accel || !s_accel->viommu) { + return true; + } + + viommu = s_accel->viommu; + if (viommu->veventq) { + qemu_mutex_lock(&s_accel->event_thread_mutex); + s_accel->event_thread_stop = true; + qemu_mutex_unlock(&s_accel->event_thread_mutex); + qemu_thread_join(&s_accel->event_thread_id); + iommufd_backend_free_id(viommu->iommufd, viommu->veventq->veventq_id); + g_free(viommu->veventq); + } + + if (!iommufd_backend_alloc_veventq(viommu->iommufd, viommu->core.viommu_id, + IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, + 1 << log2size, &veventq_id, &veventq_fd, + errp)) { + return false; + } + + veventq = g_new(IOMMUFDVeventq, 1); + veventq->veventq_id = veventq_id; + veventq->veventq_fd = veventq_fd; + veventq->viommu = &viommu->core; + viommu->veventq = veventq; + + s_accel->event_thread_stop = false; + qemu_thread_create(&s_accel->event_thread_id, "irq/event", + smmuv3_accel_nested_event_thread, s, + QEMU_THREAD_JOINABLE); + return true; +} + +static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus *sbus, + PCIBus *bus, int devfn) +{ + SMMUDevice *sdev = sbus->pbdev[devfn]; + SMMUv3AccelDevice *accel_dev; + + if (sdev) { + return container_of(sdev, SMMUv3AccelDevice, sdev); + } + + accel_dev = g_new0(SMMUv3AccelDevice, 1); + sdev = &accel_dev->sdev; + + sbus->pbdev[devfn] = sdev; + smmu_init_sdev(bs, sdev, bus, devfn); + return accel_dev; +} + +static bool +smmuv3_accel_dev_alloc_viommu(SMMUv3AccelDevice *accel_dev, + HostIOMMUDeviceIOMMUFD *idev, Error **errp) +{ + struct iommu_hwpt_arm_smmuv3 bypass_data = { + .ste = { SMMU_STE_CFG_BYPASS | SMMU_STE_VALID, 0x0ULL }, + }; + struct iommu_hwpt_arm_smmuv3 abort_data = { + .ste = { SMMU_STE_VALID, 0x0ULL }, + }; + SMMUDevice *sdev = &accel_dev->sdev; + SMMUState *bs = sdev->smmu; + SMMUv3State *s = ARM_SMMUV3(bs); + SMMUv3AccelState *s_accel = s->s_accel; + uint32_t s2_hwpt_id = idev->hwpt_id; + uint32_t viommu_id = 0; + SMMUViommu *viommu; + + if (s_accel->viommu) { + accel_dev->viommu = s_accel->viommu; + return true; + } + + viommu = g_new0(SMMUViommu, 1); + + if (s->cmdqv) { + if (!iommufd_backend_alloc_viommu( + idev->iommufd, idev->devid, IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV, + s2_hwpt_id, &viommu->cmdqv_data, sizeof(viommu->cmdqv_data), + &viommu_id, errp)) { + error_report("NVIDIA CMDQV is unsupported, falling back to " + "emulated cmdq"); + s->cmdqv = false; + } + } + + if (!viommu_id) { + if (!iommufd_backend_alloc_viommu(idev->iommufd, idev->devid, + IOMMU_VIOMMU_TYPE_ARM_SMMUV3, s2_hwpt_id, NULL, 0, + &viommu_id, errp)) { + return false; + } + } + + viommu->core.viommu_id = viommu_id; + viommu->core.s2_hwpt_id = s2_hwpt_id; + viommu->core.iommufd = idev->iommufd; + + if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, + viommu->core.viommu_id, 0, + IOMMU_HWPT_DATA_ARM_SMMUV3, + sizeof(abort_data), &abort_data, + &viommu->abort_hwpt_id, errp)) { + goto free_viommu; + } + + if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, + viommu->core.viommu_id, 0, + IOMMU_HWPT_DATA_ARM_SMMUV3, + sizeof(bypass_data), &bypass_data, + &viommu->bypass_hwpt_id, errp)) { + goto free_abort_hwpt; + } + + viommu->iommufd = idev->iommufd; + + s_accel->viommu = viommu; + accel_dev->viommu = viommu; + return true; + +free_abort_hwpt: + iommufd_backend_free_id(idev->iommufd, viommu->abort_hwpt_id); +free_viommu: + iommufd_backend_free_id(idev->iommufd, viommu->core.viommu_id); + g_free(viommu); + return false; +} + +static bool smmuv3_accel_set_iommu_device(PCIBus *bus, void *opaque, int devfn, + HostIOMMUDevice *hiod, Error **errp) +{ + HostIOMMUDeviceIOMMUFD *idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod); + SMMUState *bs = opaque; + SMMUv3State *s = ARM_SMMUV3(bs); + SMMUv3AccelState *s_accel = s->s_accel; + SMMUPciBus *sbus = smmu_get_sbus(bs, bus); + SMMUv3AccelDevice *accel_dev = smmuv3_accel_get_dev(bs, sbus, bus, devfn); + SMMUDevice *sdev = &accel_dev->sdev; + uint16_t sid = smmu_get_sid(sdev); + + if (!idev) { + return true; + } + + if (accel_dev->idev) { + if (accel_dev->idev != idev) { + error_setg(errp, "Device 0x%x already has an associated IOMMU dev", + sid); + return false; + } + return true; + } + + /* + * Check the host SMMUv3 associated with the dev is compatible with the + * QEMU SMMUv3 accel. + */ + if (!smmuv3_accel_hw_compatible(s, idev, errp)) { + return false; + } + + if (!smmuv3_accel_dev_alloc_viommu(accel_dev, idev, errp)) { + error_setg(errp, "Device 0x%x: Unable to alloc viommu", sid); + return false; + } + + accel_dev->idev = idev; + QLIST_INSERT_HEAD(&s_accel->viommu->device_list, accel_dev, next); + trace_smmuv3_accel_set_iommu_device(devfn, sid); + return true; +} + +static void smmuv3_accel_unset_iommu_device(PCIBus *bus, void *opaque, + int devfn) +{ + SMMUState *bs = opaque; + SMMUv3State *s = ARM_SMMUV3(bs); + SMMUPciBus *sbus = g_hash_table_lookup(bs->smmu_pcibus_by_busptr, bus); + SMMUv3AccelDevice *accel_dev; + SMMUViommu *viommu; + IOMMUFDVdev *vdev; + SMMUDevice *sdev; + uint16_t sid; + + if (!sbus) { + return; + } + + sdev = sbus->pbdev[devfn]; + if (!sdev) { + return; + } + + sid = smmu_get_sid(sdev); + accel_dev = container_of(sdev, SMMUv3AccelDevice, sdev); + if (!host_iommu_device_iommufd_attach_hwpt(accel_dev->idev, + accel_dev->idev->hwpt_id, + NULL)) { + error_report("Unable to attach dev 0x%x to the default HW pagetable", + sid); + } + + accel_dev->idev = NULL; + QLIST_REMOVE(accel_dev, next); + trace_smmuv3_accel_unset_iommu_device(devfn, sid); + + viommu = s->s_accel->viommu; + vdev = accel_dev->vdev; + if (vdev) { + iommufd_backend_free_id(viommu->iommufd, vdev->vdev_id); + g_free(vdev); + accel_dev->vdev = NULL; + } + + if (QLIST_EMPTY(&viommu->device_list)) { + iommufd_backend_free_id(viommu->iommufd, viommu->bypass_hwpt_id); + iommufd_backend_free_id(viommu->iommufd, viommu->abort_hwpt_id); + iommufd_backend_free_id(viommu->iommufd, viommu->core.viommu_id); + g_free(viommu); + s->s_accel->viommu = NULL; + } +} + +static AddressSpace *smmuv3_accel_find_msi_as(PCIBus *bus, void *opaque, + int devfn) +{ + SMMUState *bs = opaque; + SMMUPciBus *sbus = smmu_get_sbus(bs, bus); + SMMUv3AccelDevice *accel_dev = smmuv3_accel_get_dev(bs, sbus, bus, devfn); + SMMUDevice *sdev = &accel_dev->sdev; + + /* + * If the assigned vfio-pci dev has S1 translation enabled by + * Guest, return IOMMU address space for MSI translation. + * Otherwise, return system address space. + */ + if (accel_dev->s1_hwpt) { + return &sdev->as; + } else { + return &address_space_memory; + } +} + +static bool smmuv3_accel_pdev_allowed(PCIDevice *pdev, bool *vfio_pci) +{ + + if (object_dynamic_cast(OBJECT(pdev), TYPE_PCI_BRIDGE) || + object_dynamic_cast(OBJECT(pdev), TYPE_PXB_PCIE_DEV) || + object_dynamic_cast(OBJECT(pdev), TYPE_GPEX_ROOT_DEVICE)) { + return true; + } else if ((object_dynamic_cast(OBJECT(pdev), TYPE_VFIO_PCI))) { + *vfio_pci = true; + if (object_property_get_link(OBJECT(pdev), "iommufd", NULL)) { + return true; + } + } + return false; +} + +static AddressSpace *smmuv3_accel_find_add_as(PCIBus *bus, void *opaque, + int devfn) +{ + PCIDevice *pdev = pci_find_device(bus, pci_bus_num(bus), devfn); + SMMUState *bs = opaque; + SMMUPciBus *sbus = smmu_get_sbus(bs, bus); + SMMUv3AccelDevice *accel_dev = smmuv3_accel_get_dev(bs, sbus, bus, devfn); + SMMUDevice *sdev = &accel_dev->sdev; + bool vfio_pci = false; + + if (pdev && !smmuv3_accel_pdev_allowed(pdev, &vfio_pci)) { + if (DEVICE(pdev)->hotplugged) { + if (vfio_pci) { + warn_report("Hot plugging a vfio-pci device (%s) without " + "iommufd as backend is not supported", pdev->name); + } else { + warn_report("Hot plugging an emulated device %s with " + "accelerated SMMUv3. This will bring down " + "performace", pdev->name); + } + /* + * Both cases, we will return IOMMU address space. For hotplugged + * vfio-pci dev without iommufd as backend, it will fail later in + * smmuv3_notify_flag_changed() with "requires iommu MAP notifier" + * error message. + */ + return &sdev->as; + } else { + error_report("Device(%s) not allowed. Only PCIe root complex " + "devices or PCI bridge devices or vfio-pci endpoint " + "devices with iommufd as backend is allowed with " + "arm-smmuv3,accel=on", pdev->name); + exit(1); + } + } + + /* + * We return the system address for vfio-pci devices(with iommufd as + * backend) so that the VFIO core can set up Stage-2 (S2) mappings for + * guest RAM. This is needed because, in the accelerated SMMUv3 case, + * the host SMMUv3 runs in nested (S1 + S2) mode where the guest + * manages its own S1 page tables while the host manages S2. + * + * We are using the global &address_space_memory here, as this will ensure + * same system address space pointer for all devices behind the accelerated + * SMMUv3s in a VM. That way VFIO/iommufd can reuse a single IOAS ID in + * iommufd_cdev_attach(), allowing the Stage-2 page tables to be shared + * within the VM instead of duplicating them for every SMMUv3 instance. + */ + if (vfio_pci) { + return &address_space_memory; + } else { + return &sdev->as; + } +} + +static uint64_t smmuv3_accel_get_viommu_flags(void *opaque) +{ + /* + * We return VIOMMU_FLAG_WANT_NESTING_PARENT to inform VFIO core to create a + * nesting parent which is required for accelerated SMMUv3 support. + * The real HW nested support should be reported from host SMMUv3 and if + * it doesn't, the nesting parent allocation will fail anyway in VFIO core. + */ + uint64_t flags = VIOMMU_FLAG_WANT_NESTING_PARENT; + SMMUState *bs = opaque; + SMMUv3State *s = ARM_SMMUV3(bs); + + if (s->pasid) { + flags |= VIOMMU_FLAG_PASID_SUPPORTED; + } + return flags; +} + +static const PCIIOMMUOps smmuv3_accel_ops = { + .get_address_space = smmuv3_accel_find_add_as, + .get_viommu_flags = smmuv3_accel_get_viommu_flags, + .set_iommu_device = smmuv3_accel_set_iommu_device, + .unset_iommu_device = smmuv3_accel_unset_iommu_device, + .get_msi_address_space = smmuv3_accel_find_msi_as, +}; + +void smmuv3_accel_idr_override(SMMUv3State *s) +{ + if (!s->accel) { + return; + } + + /* By default QEMU SMMUv3 has RIL. Update IDR3 if user has disabled it */ + if (!s->ril) { + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 0); + } + /* QEMU SMMUv3 has no ATS. Update IDR0 if user has enabled it */ + if (s->ats) { + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ATS, 1); /* ATS */ + } + /* QEMU SMMUv3 has oas set 44. Update IDR5 if user has it set to 48 bits*/ + if (s->oas == 48) { + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS_48); + } + + /* + * By default QEMU SMMUv3 has no PASID(SSID) support. Update IDR1 if user + * has enabled it. + */ + if (s->pasid) { + s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SSIDSIZE, SMMU_IDR1_SSIDSIZE); + } + + if (s->cmdqv) { + uint32_t val; + MemoryRegionSection section = memory_region_find(get_system_memory(), + GiB, MiB); + size_t pgsize = qemu_ram_pagesize(section.mr->ram_block); + + val = FIELD_EX32(s->idr[1], IDR1, CMDQS); + /* FIXME It needs to check the full RAM space */ + s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, MIN(log2(pgsize) - 4, + val)); + } +} + +/* + * If the guest reboots and devices are configured for S1+S2, Stage1 must + * be switched to bypass. Otherwise, QEMU/UEFI may fail when accessing a + * device, e.g. when UEFI retrieves boot partition information from an + * assigned vfio-pci NVMe device. + */ +void smmuv3_accel_attach_bypass_hwpt(SMMUv3State *s) +{ + SMMUv3AccelDevice *accel_dev; + SMMUViommu *viommu; + + if (!s->accel || !s->s_accel->viommu) { + return; + } + + viommu = s->s_accel->viommu; + QLIST_FOREACH(accel_dev, &viommu->device_list, next) { + if (!accel_dev->vdev) { + continue; + } + if (!host_iommu_device_iommufd_attach_hwpt(accel_dev->idev, + viommu->bypass_hwpt_id, + NULL)) { + error_report("Failed to install bypass hwpt id %u for dev id %u", + viommu->bypass_hwpt_id, accel_dev->idev->devid); + } + } +} + +void smmuv3_accel_init(SMMUv3State *s) +{ + SMMUState *bs = ARM_SMMU(s); + + bs->iommu_ops = &smmuv3_accel_ops; + s->s_accel = g_new0(SMMUv3AccelState, 1); + qemu_mutex_init(&s->s_accel->event_thread_mutex); +} diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h new file mode 100644 index 00000000000..e4680a55c43 --- /dev/null +++ b/hw/arm/smmuv3-accel.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2025 Huawei Technologies R & D (UK) Ltd + * Copyright (C) 2025 NVIDIA + * Written by Nicolin Chen, Shameer Kolothum + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_ARM_SMMUV3_ACCEL_H +#define HW_ARM_SMMUV3_ACCEL_H + +#include "hw/arm/smmu-common.h" +#include "system/iommufd.h" +#include +#include CONFIG_DEVICES + +#define TYPE_TEGRA241_CMDQV "tegra241-cmdqv" +#define TEGRA241_CMDQV_VERSION 0x1 +#define TEGRA241_CMDQV_NUM_CMDQ_LOG2 0x1 +#define TEGRA241_CMDQV_NUM_SID_PER_VM_LOG2 0x4 +typedef struct Tegra241CMDQV Tegra241CMDQV; + +typedef struct SMMUViommu { + IOMMUFDBackend *iommufd; + IOMMUFDViommu core; + IOMMUFDVeventq *veventq; + uint32_t bypass_hwpt_id; + uint32_t abort_hwpt_id; + struct iommu_viommu_tegra241_cmdqv cmdqv_data; + QLIST_HEAD(, SMMUv3AccelDevice) device_list; +} SMMUViommu; + +typedef struct SMMUS1Hwpt { + IOMMUFDBackend *iommufd; + uint32_t hwpt_id; +} SMMUS1Hwpt; + +typedef struct SMMUv3AccelDevice { + SMMUDevice sdev; + HostIOMMUDeviceIOMMUFD *idev; + SMMUS1Hwpt *s1_hwpt; + IOMMUFDVdev *vdev; + SMMUViommu *viommu; + QLIST_ENTRY(SMMUv3AccelDevice) next; +} SMMUv3AccelDevice; + +typedef struct SMMUv3AccelState { + SMMUViommu *viommu; + QemuThread event_thread_id; + QemuMutex event_thread_mutex; + bool event_thread_stop; + struct iommu_hw_info_tegra241_cmdqv cmdqv_info; + Tegra241CMDQV *cmdqv; +} SMMUv3AccelState; + +#ifdef CONFIG_ARM_SMMUV3_ACCEL +void smmuv3_accel_init(SMMUv3State *s); +bool smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid, + Error **errp); +bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range, + Error **errp); +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev, + Error **errp); +void smmuv3_accel_attach_bypass_hwpt(SMMUv3State *s); +void smmuv3_accel_idr_override(SMMUv3State *s); +bool smmuv3_accel_realloc_veventq(SMMUv3State *s, uint32_t log2size, + Error **errp); +#else +static inline void smmuv3_accel_init(SMMUv3State *s) +{ +} +static inline bool +smmuv3_accel_install_nested_ste(SMMUv3State *s, SMMUDevice *sdev, int sid, + Error **errp) +{ + return true; +} +static inline bool +smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange *range, + Error **errp) +{ + return true; +} +static inline bool +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev, + Error **errp) +{ + return true; +} +static inline void smmuv3_accel_attach_bypass_hwpt(SMMUv3State *s) +{ +} +static inline void smmuv3_accel_idr_override(SMMUv3State *s) +{ +} +bool smmuv3_accel_realloc_veventq(SMMUv3State *s, uint32_t log2size, + Error **errp) +{ + return true; +} +#endif + +#if defined(CONFIG_TEGRA241_CMDQV) +void tegra241_cmdqv_init(SMMUv3State *s); +void tegra241_cmdqv_reset(SMMUv3State *s); +bool tegra241_cmdqv_hw_compatible(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, + Error **errp); +#else +static inline void tegra241_cmdqv_init(SMMUv3State *s) +{ +} +static inline void tegra241_cmdqv_reset(SMMUv3State *s) +{ +} +static inline bool +tegra241_cmdqv_hw_compatible(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, + Error **errp) +{ + return true; +} +#endif + +#endif /* HW_ARM_SMMUV3_ACCEL_H */ diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h index b6b7399347f..d3e59532929 100644 --- a/hw/arm/smmuv3-internal.h +++ b/hw/arm/smmuv3-internal.h @@ -81,6 +81,7 @@ REG32(IDR1, 0x4) FIELD(IDR1, ECMDQ, 31, 1) #define SMMU_IDR1_SIDSIZE 16 +#define SMMU_IDR1_SSIDSIZE 16 #define SMMU_CMDQS 19 #define SMMU_EVENTQS 19 @@ -111,7 +112,8 @@ REG32(IDR5, 0x14) FIELD(IDR5, VAX, 10, 2); FIELD(IDR5, STALL_MAX, 16, 16); -#define SMMU_IDR5_OAS 4 +#define SMMU_IDR5_OAS_44 4 +#define SMMU_IDR5_OAS_48 5 REG32(IIDR, 0x18) REG32(AIDR, 0x1c) @@ -547,6 +549,11 @@ typedef struct CD { uint32_t word[16]; } CD; +int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, SMMUEventInfo *event); +void smmuv3_flush_config(SMMUDevice *sdev); +void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask); +MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt); + /* STE fields */ #define STE_VALID(x) extract32((x)->word[0], 0, 1) diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 39719763897..5fa3cf78bb6 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -20,17 +20,20 @@ #include "qemu/bitops.h" #include "hw/irq.h" #include "hw/sysbus.h" +#include "migration/blocker.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "cpu.h" +#include "exec/target_page.h" #include "trace.h" #include "qemu/log.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "hw/arm/smmuv3.h" +#include "smmuv3-accel.h" #include "smmuv3-internal.h" #include "smmu-internal.h" @@ -46,8 +49,7 @@ * @irq: irq type * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) */ -static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, - uint32_t gerror_mask) +void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask) { bool pulse = false; @@ -141,7 +143,7 @@ static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in) return MEMTX_OK; } -static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) +MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) { SMMUQueue *q = &s->eventq; MemTxResult r; @@ -291,12 +293,15 @@ static void smmuv3_init_regs(SMMUv3State *s) s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2); - s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ + /* OAS: 44 bits */ + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS_44); /* 4K, 16K and 64K granule support */ s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); + smmuv3_accel_idr_override(s); + s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); s->cmdq.prod = 0; s->cmdq.cons = 0; @@ -377,7 +382,7 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, SMMUTransCfg *cfg, qemu_log_mask(LOG_GUEST_ERROR, "Cannot fetch pte at address=0x%"PRIx64"\n", addr); event->type = SMMU_EVT_F_CD_FETCH; - event->u.f_ste_fetch.addr = addr; + event->u.f_cd_fetch.addr = addr; return -EINVAL; } for (i = 0; i < ARRAY_SIZE(buf->word); i++) { @@ -598,7 +603,8 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, } } - if (STE_S1CDMAX(ste) != 0) { + /* If pasid enabled, we report SSIDSIZE = 16 */ + if (!FIELD_EX32(s->idr[1], IDR1, SSIDSIZE) && STE_S1CDMAX(ste) != 0) { qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support multiple context descriptors yet\n"); goto bad_ste; @@ -628,8 +634,7 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, * Supports linear and 2-level stream table * Return 0 on success, -EINVAL otherwise */ -static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, - SMMUEventInfo *event) +int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, SMMUEventInfo *event) { dma_addr_t addr, strtab_base; uint32_t log2size; @@ -898,12 +903,12 @@ static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) return cfg; } -static void smmuv3_flush_config(SMMUDevice *sdev) +void smmuv3_flush_config(SMMUDevice *sdev) { SMMUv3State *s = sdev->smmu; SMMUState *bc = &s->smmu_state; - trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); + trace_smmu_config_cache_inv(smmu_get_sid(sdev)); g_hash_table_remove(bc->configs, sdev); } @@ -1277,20 +1282,6 @@ static void smmuv3_range_inval(SMMUState *s, Cmd *cmd, SMMUStage stage) } } -static gboolean -smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data) -{ - SMMUDevice *sdev = (SMMUDevice *)key; - uint32_t sid = smmu_get_sid(sdev); - SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; - - if (sid < sid_range->start || sid > sid_range->end) { - return false; - } - trace_smmuv3_config_cache_inv(sid); - return true; -} - static int smmuv3_cmdq_consume(SMMUv3State *s) { SMMUState *bs = ARM_SMMU(s); @@ -1342,6 +1333,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) { uint32_t sid = CMD_SID(&cmd); SMMUDevice *sdev = smmu_find_sdev(bs, sid); + Error *local_err = NULL; if (CMD_SSEC(&cmd)) { cmd_error = SMMU_CERROR_ILL; @@ -1353,6 +1345,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) } trace_smmuv3_cmdq_cfgi_ste(sid); + if (!smmuv3_accel_install_nested_ste(s, sdev, sid, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } smmuv3_flush_config(sdev); break; @@ -1362,6 +1359,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) uint32_t sid = CMD_SID(&cmd), mask; uint8_t range = CMD_STE_RANGE(&cmd); SMMUSIDRange sid_range; + Error *local_err = NULL; if (CMD_SSEC(&cmd)) { cmd_error = SMMU_CERROR_ILL; @@ -1373,8 +1371,13 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) sid_range.end = sid_range.start + mask; trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end); - g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste, - &sid_range); + if (!smmuv3_accel_install_nested_ste_range(s, &sid_range, + &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } + smmu_configs_inv_sid_range(bs, sid_range); break; } case SMMU_CMD_CFGI_CD: @@ -1382,6 +1385,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) { uint32_t sid = CMD_SID(&cmd); SMMUDevice *sdev = smmu_find_sdev(bs, sid); + Error *local_err = NULL; if (CMD_SSEC(&cmd)) { cmd_error = SMMU_CERROR_ILL; @@ -1394,11 +1398,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) trace_smmuv3_cmdq_cfgi_cd(sid); smmuv3_flush_config(sdev); + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } break; } case SMMU_CMD_TLBI_NH_ASID: { int asid = CMD_ASID(&cmd); + Error *local_err = NULL; int vmid = -1; if (!STAGE1_SUPPORTED(s)) { @@ -1417,6 +1427,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) trace_smmuv3_cmdq_tlbi_nh_asid(asid); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_asid_vmid(bs, asid, vmid); + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } break; } case SMMU_CMD_TLBI_NH_ALL: @@ -1441,18 +1456,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) QEMU_FALLTHROUGH; } case SMMU_CMD_TLBI_NSNH_ALL: + { + Error *local_err = NULL; + trace_smmuv3_cmdq_tlbi_nsnh(); smmu_inv_notifiers_all(&s->smmu_state); smmu_iotlb_inv_all(bs); + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } break; + } case SMMU_CMD_TLBI_NH_VAA: case SMMU_CMD_TLBI_NH_VA: + { + Error *local_err = NULL; + if (!STAGE1_SUPPORTED(s)) { cmd_error = SMMU_CERROR_ILL; break; } smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1); + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } break; + } case SMMU_CMD_TLBI_S12_VMALL: { int vmid = CMD_VMID(&cmd); @@ -1478,13 +1511,28 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) */ smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2); break; + case SMMU_CMD_ATC_INV: + { + SMMUDevice *sdev = smmu_find_sdev(bs, CMD_SID(&cmd)); + Error *local_err = NULL; + + if (!sdev) { + break; + } + + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) { + error_report_err(local_err); + cmd_error = SMMU_CERROR_ILL; + break; + } + break; + } case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: case SMMU_CMD_TLBI_EL2_ALL: case SMMU_CMD_TLBI_EL2_ASID: case SMMU_CMD_TLBI_EL2_VA: case SMMU_CMD_TLBI_EL2_VAA: - case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: case SMMU_CMD_STALL_TERM: @@ -1540,12 +1588,20 @@ static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, } return MEMTX_OK; case A_EVENTQ_BASE: + { + Error *local_err = NULL; + s->eventq.base = data; s->eventq.log2size = extract64(s->eventq.base, 0, 5); if (s->eventq.log2size > SMMU_EVENTQS) { s->eventq.log2size = SMMU_EVENTQS; } + if (!smmuv3_accel_realloc_veventq(s, s->eventq.log2size, &local_err)) { + error_report_err(local_err); + /* ToDo: Should we return err? */ + } return MEMTX_OK; + } case A_EVENTQ_IRQ_CFG0: s->eventq_irq_cfg0 = data; return MEMTX_OK; @@ -1637,12 +1693,20 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, s->cmdq.cons = data; return MEMTX_OK; case A_EVENTQ_BASE: /* 64b */ + { + Error *local_err = NULL; + s->eventq.base = deposit64(s->eventq.base, 0, 32, data); s->eventq.log2size = extract64(s->eventq.base, 0, 5); if (s->eventq.log2size > SMMU_EVENTQS) { s->eventq.log2size = SMMU_EVENTQS; } + if (!smmuv3_accel_realloc_veventq(s, s->eventq.log2size, &local_err)) { + error_report_err(local_err); + /* ToDo: Should we return err? */ + } return MEMTX_OK; + } case A_EVENTQ_BASE + 4: s->eventq.base = deposit64(s->eventq.base, 32, 32, data); return MEMTX_OK; @@ -1870,16 +1934,60 @@ static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) } } -static void smmu_reset_hold(Object *obj, ResetType type) +/* + * Make sure the IOMMU is reset in 'exit' phase after + * all outstanding DMA requests have been quiesced during + * the 'enter' or 'hold' reset phases + */ +static void smmu_reset_exit(Object *obj, ResetType type) { SMMUv3State *s = ARM_SMMUV3(obj); SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); - if (c->parent_phases.hold) { - c->parent_phases.hold(obj, type); + trace_smmu_reset_exit(); + if (c->parent_phases.exit) { + c->parent_phases.exit(obj, type); } + smmuv3_accel_attach_bypass_hwpt(s); + tegra241_cmdqv_reset(s); +} - smmuv3_init_regs(s); +static bool smmu_validate_property(SMMUv3State *s, Error **errp) +{ +#ifndef CONFIG_ARM_SMMUV3_ACCEL + if (s->accel) { + error_setg(errp, "accel=on support not compiled in"); + return false; + } +#endif + if (s->accel) { + if (s->oas != 44 && s->oas != 48) { + error_setg(errp, "oas can only be set to 44 or 48 bits"); + return false; + } + return true; + } + if (!s->ril) { + error_setg(errp, "ril can only be disabled if accel=on"); + return false; + } + if (s->ats) { + error_setg(errp, "ats can only be enabled if accel=on"); + return false; + } + if (s->oas != 44) { + error_setg(errp, "oas can only be set to 44 bits if accel=off"); + return false; + } + if (s->pasid) { + error_setg(errp, "pasid can only be enabled if accel=on"); + return false; + } + if (s->cmdqv) { + error_setg(errp, "NVIDIA cmdqv can only be enabled if accel=on"); + return false; + } + return true; } static void smmu_realize(DeviceState *d, Error **errp) @@ -1890,6 +1998,19 @@ static void smmu_realize(DeviceState *d, Error **errp) SysBusDevice *dev = SYS_BUS_DEVICE(d); Error *local_err = NULL; + if (!smmu_validate_property(s, errp)) { + return; + } + + if (s->accel) { + smmuv3_accel_init(s); + error_setg(&s->migration_blocker, "Migration not supported with SMMUv3 " + "accelerator mode enabled"); + if (migrate_add_blocker(&s->migration_blocker, errp) < 0) { + return; + } + } + c->parent_realize(d, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -1906,6 +2027,14 @@ static void smmu_realize(DeviceState *d, Error **errp) sysbus_init_mmio(dev, &sys->iomem); smmu_init_irq(s, dev); + + smmuv3_init_regs(s); + /* + * This has to be after the above mmio and irq init as tegra241_cmdqv_init() + * also does a mmio/irq init. If not, the SMMUv3 probe may fail as it may + * read wrong mmio. + */ + tegra241_cmdqv_init(s); } static const VMStateDescription vmstate_smmuv3_queue = { @@ -1976,15 +2105,22 @@ static const VMStateDescription vmstate_smmuv3 = { } }; -static Property smmuv3_properties[] = { +static const Property smmuv3_properties[] = { /* * Stages of translation advertised. * "1": Stage 1 * "2": Stage 2 + * "nested": Both stage 1 and stage 2 * Defaults to stage 1 */ DEFINE_PROP_STRING("stage", SMMUv3State, stage), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_BOOL("accel", SMMUv3State, accel, false), + /* RIL can be turned off for accel cases */ + DEFINE_PROP_BOOL("ril", SMMUv3State, ril, true), + DEFINE_PROP_BOOL("ats", SMMUv3State, ats, false), + DEFINE_PROP_UINT8("oas", SMMUv3State, oas, 44), + DEFINE_PROP_BOOL("pasid", SMMUv3State, pasid, false), + DEFINE_PROP_BOOL("cmdqv", SMMUv3State, cmdqv, false), }; static void smmuv3_instance_init(Object *obj) @@ -1992,18 +2128,36 @@ static void smmuv3_instance_init(Object *obj) /* Nothing much to do here as of now */ } -static void smmuv3_class_init(ObjectClass *klass, void *data) +static void smmuv3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); dc->vmsd = &vmstate_smmuv3; - resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL, + resettable_class_set_parent_phases(rc, NULL, NULL, smmu_reset_exit, &c->parent_phases); device_class_set_parent_realize(dc, smmu_realize, &c->parent_realize); device_class_set_props(dc, smmuv3_properties); + dc->hotpluggable = false; + dc->user_creatable = true; + + object_class_property_set_description(klass, + "accel", + "Enable SMMUv3 accelerator support." + "Allows host SMMUv3 to be configured " + "in nested mode for vfio-pci dev assignment"); + object_class_property_set_description(klass, "ril", + "Enable/disable range invalidation support"); + object_class_property_set_description(klass, "ats", + "Enable/disable ATS support. Please ensure host platform has ATS " + "support before enabling this"); + object_class_property_set_description(klass, "oas", + "Specify Output Address Size. Supported values are 44 or 48 bits " + "Defaults to 44 bits"); + object_class_property_set_description(klass, "cmdqv", + "Enable/disable NVIDIA hw cmdq. Supported only if accel=on"); } static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, @@ -2039,7 +2193,7 @@ static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, } static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); @@ -2064,8 +2218,8 @@ static const TypeInfo smmuv3_iommu_memory_region_info = { static void smmuv3_register_types(void) { - type_register(&smmuv3_type_info); - type_register(&smmuv3_iommu_memory_region_info); + type_register_static(&smmuv3_type_info); + type_register_static(&smmuv3_iommu_memory_region_info); } type_init(smmuv3_register_types) diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c deleted file mode 100644 index 62cd55ba914..00000000000 --- a/hw/arm/spitz.c +++ /dev/null @@ -1,1284 +0,0 @@ -/* - * PXA270-based Clamshell PDA platforms. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "hw/arm/pxa.h" -#include "hw/arm/boot.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "hw/pcmcia.h" -#include "hw/qdev-properties.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "hw/ssi/ssi.h" -#include "hw/block/flash.h" -#include "qemu/timer.h" -#include "qemu/log.h" -#include "hw/arm/sharpsl.h" -#include "ui/console.h" -#include "hw/audio/wm8750.h" -#include "audio/audio.h" -#include "hw/boards.h" -#include "hw/sysbus.h" -#include "hw/adc/max111x.h" -#include "migration/vmstate.h" -#include "exec/address-spaces.h" -#include "qom/object.h" -#include "audio/audio.h" - -enum spitz_model_e { spitz, akita, borzoi, terrier }; - -struct SpitzMachineClass { - MachineClass parent; - enum spitz_model_e model; - int arm_id; -}; - -struct SpitzMachineState { - MachineState parent; - PXA2xxState *mpu; - DeviceState *mux; - DeviceState *lcdtg; - DeviceState *ads7846; - DeviceState *max1111; - DeviceState *scp0; - DeviceState *scp1; - DeviceState *misc_gpio; -}; - -#define TYPE_SPITZ_MACHINE "spitz-common" -OBJECT_DECLARE_TYPE(SpitzMachineState, SpitzMachineClass, SPITZ_MACHINE) - -#define zaurus_printf(format, ...) \ - fprintf(stderr, "%s: " format, __func__, ##__VA_ARGS__) - -/* Spitz Flash */ -#define FLASH_BASE 0x0c000000 -#define FLASH_ECCLPLB 0x00 /* Line parity 7 - 0 bit */ -#define FLASH_ECCLPUB 0x04 /* Line parity 15 - 8 bit */ -#define FLASH_ECCCP 0x08 /* Column parity 5 - 0 bit */ -#define FLASH_ECCCNTR 0x0c /* ECC byte counter */ -#define FLASH_ECCCLRR 0x10 /* Clear ECC */ -#define FLASH_FLASHIO 0x14 /* Flash I/O */ -#define FLASH_FLASHCTL 0x18 /* Flash Control */ - -#define FLASHCTL_CE0 (1 << 0) -#define FLASHCTL_CLE (1 << 1) -#define FLASHCTL_ALE (1 << 2) -#define FLASHCTL_WP (1 << 3) -#define FLASHCTL_CE1 (1 << 4) -#define FLASHCTL_RYBY (1 << 5) -#define FLASHCTL_NCE (FLASHCTL_CE0 | FLASHCTL_CE1) - -#define TYPE_SL_NAND "sl-nand" -OBJECT_DECLARE_SIMPLE_TYPE(SLNANDState, SL_NAND) - -struct SLNANDState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - DeviceState *nand; - uint8_t ctl; - uint8_t manf_id; - uint8_t chip_id; - ECCState ecc; -}; - -static uint64_t sl_read(void *opaque, hwaddr addr, unsigned size) -{ - SLNANDState *s = (SLNANDState *) opaque; - int ryby; - - switch (addr) { -#define BSHR(byte, from, to) ((s->ecc.lp[byte] >> (from - to)) & (1 << to)) - case FLASH_ECCLPLB: - return BSHR(0, 4, 0) | BSHR(0, 5, 2) | BSHR(0, 6, 4) | BSHR(0, 7, 6) | - BSHR(1, 4, 1) | BSHR(1, 5, 3) | BSHR(1, 6, 5) | BSHR(1, 7, 7); - -#define BSHL(byte, from, to) ((s->ecc.lp[byte] << (to - from)) & (1 << to)) - case FLASH_ECCLPUB: - return BSHL(0, 0, 0) | BSHL(0, 1, 2) | BSHL(0, 2, 4) | BSHL(0, 3, 6) | - BSHL(1, 0, 1) | BSHL(1, 1, 3) | BSHL(1, 2, 5) | BSHL(1, 3, 7); - - case FLASH_ECCCP: - return s->ecc.cp; - - case FLASH_ECCCNTR: - return s->ecc.count & 0xff; - - case FLASH_FLASHCTL: - nand_getpins(s->nand, &ryby); - if (ryby) - return s->ctl | FLASHCTL_RYBY; - else - return s->ctl; - - case FLASH_FLASHIO: - if (size == 4) { - return ecc_digest(&s->ecc, nand_getio(s->nand)) | - (ecc_digest(&s->ecc, nand_getio(s->nand)) << 16); - } - return ecc_digest(&s->ecc, nand_getio(s->nand)); - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "sl_read: bad register offset 0x%02" HWADDR_PRIx "\n", - addr); - } - return 0; -} - -static void sl_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - SLNANDState *s = (SLNANDState *) opaque; - - switch (addr) { - case FLASH_ECCCLRR: - /* Value is ignored. */ - ecc_reset(&s->ecc); - break; - - case FLASH_FLASHCTL: - s->ctl = value & 0xff & ~FLASHCTL_RYBY; - nand_setpins(s->nand, - s->ctl & FLASHCTL_CLE, - s->ctl & FLASHCTL_ALE, - s->ctl & FLASHCTL_NCE, - s->ctl & FLASHCTL_WP, - 0); - break; - - case FLASH_FLASHIO: - nand_setio(s->nand, ecc_digest(&s->ecc, value & 0xff)); - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "sl_write: bad register offset 0x%02" HWADDR_PRIx "\n", - addr); - } -} - -enum { - FLASH_128M, - FLASH_1024M, -}; - -static const MemoryRegionOps sl_ops = { - .read = sl_read, - .write = sl_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void sl_flash_register(PXA2xxState *cpu, int size) -{ - DeviceState *dev; - - dev = qdev_new(TYPE_SL_NAND); - - qdev_prop_set_uint8(dev, "manf_id", NAND_MFR_SAMSUNG); - if (size == FLASH_128M) - qdev_prop_set_uint8(dev, "chip_id", 0x73); - else if (size == FLASH_1024M) - qdev_prop_set_uint8(dev, "chip_id", 0xf1); - - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, FLASH_BASE); -} - -static void sl_nand_init(Object *obj) -{ - SLNANDState *s = SL_NAND(obj); - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - - s->ctl = 0; - - memory_region_init_io(&s->iomem, obj, &sl_ops, s, "sl", 0x40); - sysbus_init_mmio(dev, &s->iomem); -} - -static void sl_nand_realize(DeviceState *dev, Error **errp) -{ - SLNANDState *s = SL_NAND(dev); - DriveInfo *nand; - - /* FIXME use a qdev drive property instead of drive_get() */ - nand = drive_get(IF_MTD, 0, 0); - s->nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, - s->manf_id, s->chip_id); -} - -/* Spitz Keyboard */ - -#define SPITZ_KEY_STROBE_NUM 11 -#define SPITZ_KEY_SENSE_NUM 7 - -static const int spitz_gpio_key_sense[SPITZ_KEY_SENSE_NUM] = { - 12, 17, 91, 34, 36, 38, 39 -}; - -static const int spitz_gpio_key_strobe[SPITZ_KEY_STROBE_NUM] = { - 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 -}; - -/* Eighth additional row maps the special keys */ -static int spitz_keymap[SPITZ_KEY_SENSE_NUM + 1][SPITZ_KEY_STROBE_NUM] = { - { 0x1d, 0x02, 0x04, 0x06, 0x07, 0x08, 0x0a, 0x0b, 0x0e, 0x3f, 0x40 }, - { -1 , 0x03, 0x05, 0x13, 0x15, 0x09, 0x17, 0x18, 0x19, 0x41, 0x42 }, - { 0x0f, 0x10, 0x12, 0x14, 0x22, 0x16, 0x24, 0x25, -1 , -1 , -1 }, - { 0x3c, 0x11, 0x1f, 0x21, 0x2f, 0x23, 0x32, 0x26, -1 , 0x36, -1 }, - { 0x3b, 0x1e, 0x20, 0x2e, 0x30, 0x31, 0x34, -1 , 0x1c, 0x2a, -1 }, - { 0x44, 0x2c, 0x2d, 0x0c, 0x39, 0x33, -1 , 0x48, -1 , -1 , 0x38 }, - { 0x37, 0x3d, -1 , 0x45, 0x57, 0x58, 0x4b, 0x50, 0x4d, -1 , -1 }, - { 0x52, 0x43, 0x01, 0x47, 0x49, -1 , -1 , -1 , -1 , -1 , -1 }, -}; - -#define SPITZ_GPIO_AK_INT 13 /* Remote control */ -#define SPITZ_GPIO_SYNC 16 /* Sync button */ -#define SPITZ_GPIO_ON_KEY 95 /* Power button */ -#define SPITZ_GPIO_SWA 97 /* Lid */ -#define SPITZ_GPIO_SWB 96 /* Tablet mode */ - -/* The special buttons are mapped to unused keys */ -static const int spitz_gpiomap[5] = { - SPITZ_GPIO_AK_INT, SPITZ_GPIO_SYNC, SPITZ_GPIO_ON_KEY, - SPITZ_GPIO_SWA, SPITZ_GPIO_SWB, -}; - -#define TYPE_SPITZ_KEYBOARD "spitz-keyboard" -OBJECT_DECLARE_SIMPLE_TYPE(SpitzKeyboardState, SPITZ_KEYBOARD) - -struct SpitzKeyboardState { - SysBusDevice parent_obj; - - qemu_irq sense[SPITZ_KEY_SENSE_NUM]; - qemu_irq gpiomap[5]; - int keymap[0x80]; - uint16_t keyrow[SPITZ_KEY_SENSE_NUM]; - uint16_t strobe_state; - uint16_t sense_state; - - uint16_t pre_map[0x100]; - uint16_t modifiers; - uint16_t imodifiers; - uint8_t fifo[16]; - int fifopos, fifolen; - QEMUTimer *kbdtimer; -}; - -static void spitz_keyboard_sense_update(SpitzKeyboardState *s) -{ - int i; - uint16_t strobe, sense = 0; - for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++) { - strobe = s->keyrow[i] & s->strobe_state; - if (strobe) { - sense |= 1 << i; - if (!(s->sense_state & (1 << i))) - qemu_irq_raise(s->sense[i]); - } else if (s->sense_state & (1 << i)) - qemu_irq_lower(s->sense[i]); - } - - s->sense_state = sense; -} - -static void spitz_keyboard_strobe(void *opaque, int line, int level) -{ - SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; - - if (level) - s->strobe_state |= 1 << line; - else - s->strobe_state &= ~(1 << line); - spitz_keyboard_sense_update(s); -} - -static void spitz_keyboard_keydown(SpitzKeyboardState *s, int keycode) -{ - int spitz_keycode = s->keymap[keycode & 0x7f]; - if (spitz_keycode == -1) - return; - - /* Handle the additional keys */ - if ((spitz_keycode >> 4) == SPITZ_KEY_SENSE_NUM) { - qemu_set_irq(s->gpiomap[spitz_keycode & 0xf], (keycode < 0x80)); - return; - } - - if (keycode & 0x80) - s->keyrow[spitz_keycode >> 4] &= ~(1 << (spitz_keycode & 0xf)); - else - s->keyrow[spitz_keycode >> 4] |= 1 << (spitz_keycode & 0xf); - - spitz_keyboard_sense_update(s); -} - -#define SPITZ_MOD_SHIFT (1 << 7) -#define SPITZ_MOD_CTRL (1 << 8) -#define SPITZ_MOD_FN (1 << 9) - -#define QUEUE_KEY(c) s->fifo[(s->fifopos + s->fifolen ++) & 0xf] = c - -static void spitz_keyboard_handler(void *opaque, int keycode) -{ - SpitzKeyboardState *s = opaque; - uint16_t code; - int mapcode; - switch (keycode) { - case 0x2a: /* Left Shift */ - s->modifiers |= 1; - break; - case 0xaa: - s->modifiers &= ~1; - break; - case 0x36: /* Right Shift */ - s->modifiers |= 2; - break; - case 0xb6: - s->modifiers &= ~2; - break; - case 0x1d: /* Control */ - s->modifiers |= 4; - break; - case 0x9d: - s->modifiers &= ~4; - break; - case 0x38: /* Alt */ - s->modifiers |= 8; - break; - case 0xb8: - s->modifiers &= ~8; - break; - } - - code = s->pre_map[mapcode = ((s->modifiers & 3) ? - (keycode | SPITZ_MOD_SHIFT) : - (keycode & ~SPITZ_MOD_SHIFT))]; - - if (code != mapcode) { -#if 0 - if ((code & SPITZ_MOD_SHIFT) && !(s->modifiers & 1)) { - QUEUE_KEY(0x2a | (keycode & 0x80)); - } - if ((code & SPITZ_MOD_CTRL) && !(s->modifiers & 4)) { - QUEUE_KEY(0x1d | (keycode & 0x80)); - } - if ((code & SPITZ_MOD_FN) && !(s->modifiers & 8)) { - QUEUE_KEY(0x38 | (keycode & 0x80)); - } - if ((code & SPITZ_MOD_FN) && (s->modifiers & 1)) { - QUEUE_KEY(0x2a | (~keycode & 0x80)); - } - if ((code & SPITZ_MOD_FN) && (s->modifiers & 2)) { - QUEUE_KEY(0x36 | (~keycode & 0x80)); - } -#else - if (keycode & 0x80) { - if ((s->imodifiers & 1 ) && !(s->modifiers & 1)) - QUEUE_KEY(0x2a | 0x80); - if ((s->imodifiers & 4 ) && !(s->modifiers & 4)) - QUEUE_KEY(0x1d | 0x80); - if ((s->imodifiers & 8 ) && !(s->modifiers & 8)) - QUEUE_KEY(0x38 | 0x80); - if ((s->imodifiers & 0x10) && (s->modifiers & 1)) - QUEUE_KEY(0x2a); - if ((s->imodifiers & 0x20) && (s->modifiers & 2)) - QUEUE_KEY(0x36); - s->imodifiers = 0; - } else { - if ((code & SPITZ_MOD_SHIFT) && - !((s->modifiers | s->imodifiers) & 1)) { - QUEUE_KEY(0x2a); - s->imodifiers |= 1; - } - if ((code & SPITZ_MOD_CTRL) && - !((s->modifiers | s->imodifiers) & 4)) { - QUEUE_KEY(0x1d); - s->imodifiers |= 4; - } - if ((code & SPITZ_MOD_FN) && - !((s->modifiers | s->imodifiers) & 8)) { - QUEUE_KEY(0x38); - s->imodifiers |= 8; - } - if ((code & SPITZ_MOD_FN) && (s->modifiers & 1) && - !(s->imodifiers & 0x10)) { - QUEUE_KEY(0x2a | 0x80); - s->imodifiers |= 0x10; - } - if ((code & SPITZ_MOD_FN) && (s->modifiers & 2) && - !(s->imodifiers & 0x20)) { - QUEUE_KEY(0x36 | 0x80); - s->imodifiers |= 0x20; - } - } -#endif - } - - QUEUE_KEY((code & 0x7f) | (keycode & 0x80)); -} - -static void spitz_keyboard_tick(void *opaque) -{ - SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; - - if (s->fifolen) { - spitz_keyboard_keydown(s, s->fifo[s->fifopos ++]); - s->fifolen --; - if (s->fifopos >= 16) - s->fifopos = 0; - } - - timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - NANOSECONDS_PER_SECOND / 32); -} - -static void spitz_keyboard_pre_map(SpitzKeyboardState *s) -{ - int i; - for (i = 0; i < 0x100; i ++) - s->pre_map[i] = i; - s->pre_map[0x02 | SPITZ_MOD_SHIFT] = 0x02 | SPITZ_MOD_SHIFT; /* exclam */ - s->pre_map[0x28 | SPITZ_MOD_SHIFT] = 0x03 | SPITZ_MOD_SHIFT; /* quotedbl */ - s->pre_map[0x04 | SPITZ_MOD_SHIFT] = 0x04 | SPITZ_MOD_SHIFT; /* # */ - s->pre_map[0x05 | SPITZ_MOD_SHIFT] = 0x05 | SPITZ_MOD_SHIFT; /* dollar */ - s->pre_map[0x06 | SPITZ_MOD_SHIFT] = 0x06 | SPITZ_MOD_SHIFT; /* percent */ - s->pre_map[0x08 | SPITZ_MOD_SHIFT] = 0x07 | SPITZ_MOD_SHIFT; /* ampersand */ - s->pre_map[0x28] = 0x08 | SPITZ_MOD_SHIFT; /* ' */ - s->pre_map[0x0a | SPITZ_MOD_SHIFT] = 0x09 | SPITZ_MOD_SHIFT; /* ( */ - s->pre_map[0x0b | SPITZ_MOD_SHIFT] = 0x0a | SPITZ_MOD_SHIFT; /* ) */ - s->pre_map[0x29 | SPITZ_MOD_SHIFT] = 0x0b | SPITZ_MOD_SHIFT; /* tilde */ - s->pre_map[0x03 | SPITZ_MOD_SHIFT] = 0x0c | SPITZ_MOD_SHIFT; /* at */ - s->pre_map[0xd3] = 0x0e | SPITZ_MOD_FN; /* Delete */ - s->pre_map[0x3a] = 0x0f | SPITZ_MOD_FN; /* Caps_Lock */ - s->pre_map[0x07 | SPITZ_MOD_SHIFT] = 0x11 | SPITZ_MOD_FN; /* ^ */ - s->pre_map[0x0d] = 0x12 | SPITZ_MOD_FN; /* equal */ - s->pre_map[0x0d | SPITZ_MOD_SHIFT] = 0x13 | SPITZ_MOD_FN; /* plus */ - s->pre_map[0x1a] = 0x14 | SPITZ_MOD_FN; /* [ */ - s->pre_map[0x1b] = 0x15 | SPITZ_MOD_FN; /* ] */ - s->pre_map[0x1a | SPITZ_MOD_SHIFT] = 0x16 | SPITZ_MOD_FN; /* { */ - s->pre_map[0x1b | SPITZ_MOD_SHIFT] = 0x17 | SPITZ_MOD_FN; /* } */ - s->pre_map[0x27] = 0x22 | SPITZ_MOD_FN; /* semicolon */ - s->pre_map[0x27 | SPITZ_MOD_SHIFT] = 0x23 | SPITZ_MOD_FN; /* colon */ - s->pre_map[0x09 | SPITZ_MOD_SHIFT] = 0x24 | SPITZ_MOD_FN; /* asterisk */ - s->pre_map[0x2b] = 0x25 | SPITZ_MOD_FN; /* backslash */ - s->pre_map[0x2b | SPITZ_MOD_SHIFT] = 0x26 | SPITZ_MOD_FN; /* bar */ - s->pre_map[0x0c | SPITZ_MOD_SHIFT] = 0x30 | SPITZ_MOD_FN; /* _ */ - s->pre_map[0x33 | SPITZ_MOD_SHIFT] = 0x33 | SPITZ_MOD_FN; /* less */ - s->pre_map[0x35] = 0x33 | SPITZ_MOD_SHIFT; /* slash */ - s->pre_map[0x34 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_FN; /* greater */ - s->pre_map[0x35 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_SHIFT; /* question */ - s->pre_map[0x49] = 0x48 | SPITZ_MOD_FN; /* Page_Up */ - s->pre_map[0x51] = 0x50 | SPITZ_MOD_FN; /* Page_Down */ - - s->modifiers = 0; - s->imodifiers = 0; - s->fifopos = 0; - s->fifolen = 0; -} - -#undef SPITZ_MOD_SHIFT -#undef SPITZ_MOD_CTRL -#undef SPITZ_MOD_FN - -static int spitz_keyboard_post_load(void *opaque, int version_id) -{ - SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; - - /* Release all pressed keys */ - memset(s->keyrow, 0, sizeof(s->keyrow)); - spitz_keyboard_sense_update(s); - s->modifiers = 0; - s->imodifiers = 0; - s->fifopos = 0; - s->fifolen = 0; - - return 0; -} - -static void spitz_keyboard_register(PXA2xxState *cpu) -{ - int i; - DeviceState *dev; - SpitzKeyboardState *s; - - dev = sysbus_create_simple(TYPE_SPITZ_KEYBOARD, -1, NULL); - s = SPITZ_KEYBOARD(dev); - - for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++) - qdev_connect_gpio_out(dev, i, qdev_get_gpio_in(cpu->gpio, spitz_gpio_key_sense[i])); - - for (i = 0; i < 5; i ++) - s->gpiomap[i] = qdev_get_gpio_in(cpu->gpio, spitz_gpiomap[i]); - - if (!graphic_rotate) - s->gpiomap[4] = qemu_irq_invert(s->gpiomap[4]); - - for (i = 0; i < 5; i++) - qemu_set_irq(s->gpiomap[i], 0); - - for (i = 0; i < SPITZ_KEY_STROBE_NUM; i ++) - qdev_connect_gpio_out(cpu->gpio, spitz_gpio_key_strobe[i], - qdev_get_gpio_in(dev, i)); - - timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); - - qemu_add_kbd_event_handler(spitz_keyboard_handler, s); -} - -static void spitz_keyboard_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - SpitzKeyboardState *s = SPITZ_KEYBOARD(obj); - int i, j; - - for (i = 0; i < 0x80; i ++) - s->keymap[i] = -1; - for (i = 0; i < SPITZ_KEY_SENSE_NUM + 1; i ++) - for (j = 0; j < SPITZ_KEY_STROBE_NUM; j ++) - if (spitz_keymap[i][j] != -1) - s->keymap[spitz_keymap[i][j]] = (i << 4) | j; - - spitz_keyboard_pre_map(s); - - qdev_init_gpio_in(dev, spitz_keyboard_strobe, SPITZ_KEY_STROBE_NUM); - qdev_init_gpio_out(dev, s->sense, SPITZ_KEY_SENSE_NUM); -} - -static void spitz_keyboard_realize(DeviceState *dev, Error **errp) -{ - SpitzKeyboardState *s = SPITZ_KEYBOARD(dev); - s->kbdtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, spitz_keyboard_tick, s); -} - -/* LCD backlight controller */ - -#define LCDTG_RESCTL 0x00 -#define LCDTG_PHACTRL 0x01 -#define LCDTG_DUTYCTRL 0x02 -#define LCDTG_POWERREG0 0x03 -#define LCDTG_POWERREG1 0x04 -#define LCDTG_GPOR3 0x05 -#define LCDTG_PICTRL 0x06 -#define LCDTG_POLCTRL 0x07 - -#define TYPE_SPITZ_LCDTG "spitz-lcdtg" -OBJECT_DECLARE_SIMPLE_TYPE(SpitzLCDTG, SPITZ_LCDTG) - -struct SpitzLCDTG { - SSIPeripheral ssidev; - uint32_t bl_intensity; - uint32_t bl_power; -}; - -static void spitz_bl_update(SpitzLCDTG *s) -{ - if (s->bl_power && s->bl_intensity) - zaurus_printf("LCD Backlight now at %u/63\n", s->bl_intensity); - else - zaurus_printf("LCD Backlight now off\n"); -} - -static inline void spitz_bl_bit5(void *opaque, int line, int level) -{ - SpitzLCDTG *s = opaque; - int prev = s->bl_intensity; - - if (level) - s->bl_intensity &= ~0x20; - else - s->bl_intensity |= 0x20; - - if (s->bl_power && prev != s->bl_intensity) - spitz_bl_update(s); -} - -static inline void spitz_bl_power(void *opaque, int line, int level) -{ - SpitzLCDTG *s = opaque; - s->bl_power = !!level; - spitz_bl_update(s); -} - -static uint32_t spitz_lcdtg_transfer(SSIPeripheral *dev, uint32_t value) -{ - SpitzLCDTG *s = SPITZ_LCDTG(dev); - int addr; - addr = value >> 5; - value &= 0x1f; - - switch (addr) { - case LCDTG_RESCTL: - if (value) - zaurus_printf("LCD in QVGA mode\n"); - else - zaurus_printf("LCD in VGA mode\n"); - break; - - case LCDTG_DUTYCTRL: - s->bl_intensity &= ~0x1f; - s->bl_intensity |= value; - if (s->bl_power) - spitz_bl_update(s); - break; - - case LCDTG_POWERREG0: - /* Set common voltage to M62332FP */ - break; - } - return 0; -} - -static void spitz_lcdtg_realize(SSIPeripheral *ssi, Error **errp) -{ - SpitzLCDTG *s = SPITZ_LCDTG(ssi); - DeviceState *dev = DEVICE(s); - - s->bl_power = 0; - s->bl_intensity = 0x20; - - qdev_init_gpio_in_named(dev, spitz_bl_bit5, "bl_bit5", 1); - qdev_init_gpio_in_named(dev, spitz_bl_power, "bl_power", 1); -} - -/* SSP devices */ - -#define CORGI_SSP_PORT 2 - -#define SPITZ_GPIO_LCDCON_CS 53 -#define SPITZ_GPIO_ADS7846_CS 14 -#define SPITZ_GPIO_MAX1111_CS 20 -#define SPITZ_GPIO_TP_INT 11 - -#define TYPE_CORGI_SSP "corgi-ssp" -OBJECT_DECLARE_SIMPLE_TYPE(CorgiSSPState, CORGI_SSP) - -/* "Demux" the signal based on current chipselect */ -struct CorgiSSPState { - SSIPeripheral ssidev; - SSIBus *bus[3]; - uint32_t enable[3]; -}; - -static uint32_t corgi_ssp_transfer(SSIPeripheral *dev, uint32_t value) -{ - CorgiSSPState *s = CORGI_SSP(dev); - int i; - - for (i = 0; i < 3; i++) { - if (s->enable[i]) { - return ssi_transfer(s->bus[i], value); - } - } - return 0; -} - -static void corgi_ssp_gpio_cs(void *opaque, int line, int level) -{ - CorgiSSPState *s = (CorgiSSPState *)opaque; - assert(line >= 0 && line < 3); - s->enable[line] = !level; -} - -#define MAX1111_BATT_VOLT 1 -#define MAX1111_BATT_TEMP 2 -#define MAX1111_ACIN_VOLT 3 - -#define SPITZ_BATTERY_TEMP 0xe0 /* About 2.9V */ -#define SPITZ_BATTERY_VOLT 0xd0 /* About 4.0V */ -#define SPITZ_CHARGEON_ACIN 0x80 /* About 5.0V */ - -static void corgi_ssp_realize(SSIPeripheral *d, Error **errp) -{ - DeviceState *dev = DEVICE(d); - CorgiSSPState *s = CORGI_SSP(d); - - qdev_init_gpio_in(dev, corgi_ssp_gpio_cs, 3); - s->bus[0] = ssi_create_bus(dev, "ssi0"); - s->bus[1] = ssi_create_bus(dev, "ssi1"); - s->bus[2] = ssi_create_bus(dev, "ssi2"); -} - -static void spitz_ssp_attach(SpitzMachineState *sms) -{ - void *bus; - - sms->mux = ssi_create_peripheral(sms->mpu->ssp[CORGI_SSP_PORT - 1], - TYPE_CORGI_SSP); - - bus = qdev_get_child_bus(sms->mux, "ssi0"); - sms->lcdtg = ssi_create_peripheral(bus, TYPE_SPITZ_LCDTG); - - bus = qdev_get_child_bus(sms->mux, "ssi1"); - sms->ads7846 = ssi_create_peripheral(bus, "ads7846"); - qdev_connect_gpio_out(sms->ads7846, 0, - qdev_get_gpio_in(sms->mpu->gpio, SPITZ_GPIO_TP_INT)); - - bus = qdev_get_child_bus(sms->mux, "ssi2"); - sms->max1111 = qdev_new(TYPE_MAX_1111); - qdev_prop_set_uint8(sms->max1111, "input1" /* BATT_VOLT */, - SPITZ_BATTERY_VOLT); - qdev_prop_set_uint8(sms->max1111, "input2" /* BATT_TEMP */, 0); - qdev_prop_set_uint8(sms->max1111, "input3" /* ACIN_VOLT */, - SPITZ_CHARGEON_ACIN); - ssi_realize_and_unref(sms->max1111, bus, &error_fatal); - - qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_LCDCON_CS, - qdev_get_gpio_in(sms->mux, 0)); - qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_ADS7846_CS, - qdev_get_gpio_in(sms->mux, 1)); - qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_MAX1111_CS, - qdev_get_gpio_in(sms->mux, 2)); -} - -/* CF Microdrive */ - -static void spitz_microdrive_attach(PXA2xxState *cpu, int slot) -{ - PCMCIACardState *md; - DriveInfo *dinfo; - - dinfo = drive_get(IF_IDE, 0, 0); - if (!dinfo || dinfo->media_cd) - return; - md = dscm1xxxx_init(dinfo); - pxa2xx_pcmcia_attach(cpu->pcmcia[slot], md); -} - -/* Wm8750 and Max7310 on I2C */ - -#define AKITA_MAX_ADDR 0x18 -#define SPITZ_WM_ADDRL 0x1b -#define SPITZ_WM_ADDRH 0x1a - -#define SPITZ_GPIO_WM 5 - -static void spitz_wm8750_addr(void *opaque, int line, int level) -{ - I2CSlave *wm = (I2CSlave *) opaque; - if (level) - i2c_slave_set_address(wm, SPITZ_WM_ADDRH); - else - i2c_slave_set_address(wm, SPITZ_WM_ADDRL); -} - -static void spitz_i2c_setup(MachineState *machine, PXA2xxState *cpu) -{ - /* Attach the CPU on one end of our I2C bus. */ - I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]); - - /* Attach a WM8750 to the bus */ - I2CSlave *i2c_dev = i2c_slave_new(TYPE_WM8750, 0); - DeviceState *wm = DEVICE(i2c_dev); - - if (machine->audiodev) { - qdev_prop_set_string(wm, "audiodev", machine->audiodev); - } - i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); - - spitz_wm8750_addr(wm, 0, 0); - qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_WM, - qemu_allocate_irq(spitz_wm8750_addr, wm, 0)); - /* .. and to the sound interface. */ - cpu->i2s->opaque = wm; - cpu->i2s->codec_out = wm8750_dac_dat; - cpu->i2s->codec_in = wm8750_adc_dat; - wm8750_data_req_set(wm, cpu->i2s->data_req, cpu->i2s); -} - -static void spitz_akita_i2c_setup(PXA2xxState *cpu) -{ - /* Attach a Max7310 to Akita I2C bus. */ - i2c_slave_create_simple(pxa2xx_i2c_bus(cpu->i2c[0]), "max7310", - AKITA_MAX_ADDR); -} - -/* Other peripherals */ - -/* - * Encapsulation of some miscellaneous GPIO line behaviour for the Spitz boards. - * - * QEMU interface: - * + named GPIO inputs "green-led", "orange-led", "charging", "discharging": - * these currently just print messages that the line has been signalled - * + named GPIO input "adc-temp-on": set to cause the battery-temperature - * value to be passed to the max111x ADC - * + named GPIO output "adc-temp": the ADC value, to be wired up to the max111x - */ -#define TYPE_SPITZ_MISC_GPIO "spitz-misc-gpio" -OBJECT_DECLARE_SIMPLE_TYPE(SpitzMiscGPIOState, SPITZ_MISC_GPIO) - -struct SpitzMiscGPIOState { - SysBusDevice parent_obj; - - qemu_irq adc_value; -}; - -static void spitz_misc_charging(void *opaque, int n, int level) -{ - zaurus_printf("Charging %s.\n", level ? "off" : "on"); -} - -static void spitz_misc_discharging(void *opaque, int n, int level) -{ - zaurus_printf("Discharging %s.\n", level ? "off" : "on"); -} - -static void spitz_misc_green_led(void *opaque, int n, int level) -{ - zaurus_printf("Green LED %s.\n", level ? "off" : "on"); -} - -static void spitz_misc_orange_led(void *opaque, int n, int level) -{ - zaurus_printf("Orange LED %s.\n", level ? "off" : "on"); -} - -static void spitz_misc_adc_temp(void *opaque, int n, int level) -{ - SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(opaque); - int batt_temp = level ? SPITZ_BATTERY_TEMP : 0; - - qemu_set_irq(s->adc_value, batt_temp); -} - -static void spitz_misc_gpio_init(Object *obj) -{ - SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(obj); - DeviceState *dev = DEVICE(obj); - - qdev_init_gpio_in_named(dev, spitz_misc_charging, "charging", 1); - qdev_init_gpio_in_named(dev, spitz_misc_discharging, "discharging", 1); - qdev_init_gpio_in_named(dev, spitz_misc_green_led, "green-led", 1); - qdev_init_gpio_in_named(dev, spitz_misc_orange_led, "orange-led", 1); - qdev_init_gpio_in_named(dev, spitz_misc_adc_temp, "adc-temp-on", 1); - - qdev_init_gpio_out_named(dev, &s->adc_value, "adc-temp", 1); -} - -#define SPITZ_SCP_LED_GREEN 1 -#define SPITZ_SCP_JK_B 2 -#define SPITZ_SCP_CHRG_ON 3 -#define SPITZ_SCP_MUTE_L 4 -#define SPITZ_SCP_MUTE_R 5 -#define SPITZ_SCP_CF_POWER 6 -#define SPITZ_SCP_LED_ORANGE 7 -#define SPITZ_SCP_JK_A 8 -#define SPITZ_SCP_ADC_TEMP_ON 9 -#define SPITZ_SCP2_IR_ON 1 -#define SPITZ_SCP2_AKIN_PULLUP 2 -#define SPITZ_SCP2_BACKLIGHT_CONT 7 -#define SPITZ_SCP2_BACKLIGHT_ON 8 -#define SPITZ_SCP2_MIC_BIAS 9 - -static void spitz_scoop_gpio_setup(SpitzMachineState *sms) -{ - DeviceState *miscdev = sysbus_create_simple(TYPE_SPITZ_MISC_GPIO, -1, NULL); - - sms->misc_gpio = miscdev; - - qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_CHRG_ON, - qdev_get_gpio_in_named(miscdev, "charging", 0)); - qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_JK_B, - qdev_get_gpio_in_named(miscdev, "discharging", 0)); - qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_GREEN, - qdev_get_gpio_in_named(miscdev, "green-led", 0)); - qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_ORANGE, - qdev_get_gpio_in_named(miscdev, "orange-led", 0)); - qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_ADC_TEMP_ON, - qdev_get_gpio_in_named(miscdev, "adc-temp-on", 0)); - qdev_connect_gpio_out_named(miscdev, "adc-temp", 0, - qdev_get_gpio_in(sms->max1111, MAX1111_BATT_TEMP)); - - if (sms->scp1) { - qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_CONT, - qdev_get_gpio_in_named(sms->lcdtg, "bl_bit5", 0)); - qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_ON, - qdev_get_gpio_in_named(sms->lcdtg, "bl_power", 0)); - } -} - -#define SPITZ_GPIO_HSYNC 22 -#define SPITZ_GPIO_SD_DETECT 9 -#define SPITZ_GPIO_SD_WP 81 -#define SPITZ_GPIO_ON_RESET 89 -#define SPITZ_GPIO_BAT_COVER 90 -#define SPITZ_GPIO_CF1_IRQ 105 -#define SPITZ_GPIO_CF1_CD 94 -#define SPITZ_GPIO_CF2_IRQ 106 -#define SPITZ_GPIO_CF2_CD 93 - -static int spitz_hsync; - -static void spitz_lcd_hsync_handler(void *opaque, int line, int level) -{ - PXA2xxState *cpu = (PXA2xxState *) opaque; - qemu_set_irq(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_HSYNC), spitz_hsync); - spitz_hsync ^= 1; -} - -static void spitz_reset(void *opaque, int line, int level) -{ - if (level) { - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - } -} - -static void spitz_gpio_setup(PXA2xxState *cpu, int slots) -{ - qemu_irq lcd_hsync; - qemu_irq reset; - - /* - * Bad hack: We toggle the LCD hsync GPIO on every GPIO status - * read to satisfy broken guests that poll-wait for hsync. - * Simulating a real hsync event would be less practical and - * wouldn't guarantee that a guest ever exits the loop. - */ - spitz_hsync = 0; - lcd_hsync = qemu_allocate_irq(spitz_lcd_hsync_handler, cpu, 0); - pxa2xx_gpio_read_notifier(cpu->gpio, lcd_hsync); - pxa2xx_lcd_vsync_notifier(cpu->lcd, lcd_hsync); - - /* MMC/SD host */ - pxa2xx_mmci_handlers(cpu->mmc, - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_WP), - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_DETECT)); - - /* Battery lock always closed */ - qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_BAT_COVER)); - - /* Handle reset */ - reset = qemu_allocate_irq(spitz_reset, cpu, 0); - qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_ON_RESET, reset); - - /* PCMCIA signals: card's IRQ and Card-Detect */ - if (slots >= 1) - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0], - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_IRQ), - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_CD)); - if (slots >= 2) - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1], - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_IRQ), - qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_CD)); -} - -/* Board init. */ -#define SPITZ_RAM 0x04000000 -#define SPITZ_ROM 0x00800000 - -static struct arm_boot_info spitz_binfo = { - .loader_start = PXA2XX_SDRAM_BASE, - .ram_size = 0x04000000, -}; - -static void spitz_common_init(MachineState *machine) -{ - SpitzMachineClass *smc = SPITZ_MACHINE_GET_CLASS(machine); - SpitzMachineState *sms = SPITZ_MACHINE(machine); - enum spitz_model_e model = smc->model; - PXA2xxState *mpu; - MemoryRegion *rom = g_new(MemoryRegion, 1); - - /* Setup CPU & memory */ - mpu = pxa270_init(spitz_binfo.ram_size, machine->cpu_type); - sms->mpu = mpu; - - sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M); - - memory_region_init_rom(rom, NULL, "spitz.rom", SPITZ_ROM, &error_fatal); - memory_region_add_subregion(get_system_memory(), 0, rom); - - /* Setup peripherals */ - spitz_keyboard_register(mpu); - - spitz_ssp_attach(sms); - - sms->scp0 = sysbus_create_simple("scoop", 0x10800000, NULL); - if (model != akita) { - sms->scp1 = sysbus_create_simple("scoop", 0x08800040, NULL); - } else { - sms->scp1 = NULL; - } - - spitz_scoop_gpio_setup(sms); - - spitz_gpio_setup(mpu, (model == akita) ? 1 : 2); - - spitz_i2c_setup(machine, mpu); - - if (model == akita) - spitz_akita_i2c_setup(mpu); - - if (model == terrier) - /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */ - spitz_microdrive_attach(mpu, 1); - else if (model != akita) - /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */ - spitz_microdrive_attach(mpu, 0); - - spitz_binfo.board_id = smc->arm_id; - arm_load_kernel(mpu->cpu, machine, &spitz_binfo); - sl_bootparam_write(SL_PXA_PARAM_BASE); -} - -static void spitz_common_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->block_default_type = IF_IDE; - mc->ignore_memory_transaction_failures = true; - mc->init = spitz_common_init; - mc->deprecation_reason = "machine is old and unmaintained"; - - machine_add_audiodev_property(mc); -} - -static const TypeInfo spitz_common_info = { - .name = TYPE_SPITZ_MACHINE, - .parent = TYPE_MACHINE, - .abstract = true, - .instance_size = sizeof(SpitzMachineState), - .class_size = sizeof(SpitzMachineClass), - .class_init = spitz_common_class_init, -}; - -static void akitapda_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); - - mc->desc = "Sharp SL-C1000 (Akita) PDA (PXA270)"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); - smc->model = akita; - smc->arm_id = 0x2e8; -} - -static const TypeInfo akitapda_type = { - .name = MACHINE_TYPE_NAME("akita"), - .parent = TYPE_SPITZ_MACHINE, - .class_init = akitapda_class_init, -}; - -static void spitzpda_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); - - mc->desc = "Sharp SL-C3000 (Spitz) PDA (PXA270)"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); - smc->model = spitz; - smc->arm_id = 0x2c9; -} - -static const TypeInfo spitzpda_type = { - .name = MACHINE_TYPE_NAME("spitz"), - .parent = TYPE_SPITZ_MACHINE, - .class_init = spitzpda_class_init, -}; - -static void borzoipda_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); - - mc->desc = "Sharp SL-C3100 (Borzoi) PDA (PXA270)"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); - smc->model = borzoi; - smc->arm_id = 0x33f; -} - -static const TypeInfo borzoipda_type = { - .name = MACHINE_TYPE_NAME("borzoi"), - .parent = TYPE_SPITZ_MACHINE, - .class_init = borzoipda_class_init, -}; - -static void terrierpda_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); - - mc->desc = "Sharp SL-C3200 (Terrier) PDA (PXA270)"; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); - smc->model = terrier; - smc->arm_id = 0x33f; -} - -static const TypeInfo terrierpda_type = { - .name = MACHINE_TYPE_NAME("terrier"), - .parent = TYPE_SPITZ_MACHINE, - .class_init = terrierpda_class_init, -}; - -static void spitz_machine_init(void) -{ - type_register_static(&spitz_common_info); - type_register_static(&akitapda_type); - type_register_static(&spitzpda_type); - type_register_static(&borzoipda_type); - type_register_static(&terrierpda_type); -} - -type_init(spitz_machine_init) - -static bool is_version_0(void *opaque, int version_id) -{ - return version_id == 0; -} - -static const VMStateDescription vmstate_sl_nand_info = { - .name = "sl-nand", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(ctl, SLNANDState), - VMSTATE_STRUCT(ecc, SLNANDState, 0, vmstate_ecc_state, ECCState), - VMSTATE_END_OF_LIST(), - }, -}; - -static Property sl_nand_properties[] = { - DEFINE_PROP_UINT8("manf_id", SLNANDState, manf_id, NAND_MFR_SAMSUNG), - DEFINE_PROP_UINT8("chip_id", SLNANDState, chip_id, 0xf1), - DEFINE_PROP_END_OF_LIST(), -}; - -static void sl_nand_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &vmstate_sl_nand_info; - device_class_set_props(dc, sl_nand_properties); - dc->realize = sl_nand_realize; - /* Reason: init() method uses drive_get() */ - dc->user_creatable = false; -} - -static const TypeInfo sl_nand_info = { - .name = TYPE_SL_NAND, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SLNANDState), - .instance_init = sl_nand_init, - .class_init = sl_nand_class_init, -}; - -static const VMStateDescription vmstate_spitz_kbd = { - .name = "spitz-keyboard", - .version_id = 1, - .minimum_version_id = 0, - .post_load = spitz_keyboard_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT16(sense_state, SpitzKeyboardState), - VMSTATE_UINT16(strobe_state, SpitzKeyboardState), - VMSTATE_UNUSED_TEST(is_version_0, 5), - VMSTATE_END_OF_LIST(), - }, -}; - -static void spitz_keyboard_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &vmstate_spitz_kbd; - dc->realize = spitz_keyboard_realize; -} - -static const TypeInfo spitz_keyboard_info = { - .name = TYPE_SPITZ_KEYBOARD, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SpitzKeyboardState), - .instance_init = spitz_keyboard_init, - .class_init = spitz_keyboard_class_init, -}; - -static const VMStateDescription vmstate_corgi_ssp_regs = { - .name = "corgi-ssp", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_SSI_PERIPHERAL(ssidev, CorgiSSPState), - VMSTATE_UINT32_ARRAY(enable, CorgiSSPState, 3), - VMSTATE_END_OF_LIST(), - } -}; - -static void corgi_ssp_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - - k->realize = corgi_ssp_realize; - k->transfer = corgi_ssp_transfer; - dc->vmsd = &vmstate_corgi_ssp_regs; -} - -static const TypeInfo corgi_ssp_info = { - .name = TYPE_CORGI_SSP, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(CorgiSSPState), - .class_init = corgi_ssp_class_init, -}; - -static const VMStateDescription vmstate_spitz_lcdtg_regs = { - .name = "spitz-lcdtg", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_SSI_PERIPHERAL(ssidev, SpitzLCDTG), - VMSTATE_UINT32(bl_intensity, SpitzLCDTG), - VMSTATE_UINT32(bl_power, SpitzLCDTG), - VMSTATE_END_OF_LIST(), - } -}; - -static void spitz_lcdtg_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - - k->realize = spitz_lcdtg_realize; - k->transfer = spitz_lcdtg_transfer; - dc->vmsd = &vmstate_spitz_lcdtg_regs; -} - -static const TypeInfo spitz_lcdtg_info = { - .name = TYPE_SPITZ_LCDTG, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(SpitzLCDTG), - .class_init = spitz_lcdtg_class_init, -}; - -static const TypeInfo spitz_misc_gpio_info = { - .name = TYPE_SPITZ_MISC_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SpitzMiscGPIOState), - .instance_init = spitz_misc_gpio_init, - /* - * No class_init required: device has no internal state so does not - * need to set up reset or vmstate, and does not have a realize method. - */ -}; - -static void spitz_register_types(void) -{ - type_register_static(&corgi_ssp_info); - type_register_static(&spitz_lcdtg_info); - type_register_static(&spitz_keyboard_info); - type_register_static(&sl_nand_info); - type_register_static(&spitz_misc_gpio_info); -} - -type_init(spitz_register_types) diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index 376746251e6..031ea3a24e7 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" #include "qapi/error.h" #include "hw/core/split-irq.h" #include "hw/sysbus.h" @@ -19,8 +20,8 @@ #include "net/net.h" #include "hw/boards.h" #include "qemu/log.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/arm/armv7m.h" #include "hw/char/pl011.h" #include "hw/input/stellaris_gamepad.h" @@ -31,7 +32,7 @@ #include "hw/timer/stellaris-gptm.h" #include "hw/qdev-clock.h" #include "qom/object.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "ui/input.h" #define GPIO_A 0 @@ -49,6 +50,31 @@ #define NUM_IRQ_LINES 64 #define NUM_PRIO_BITS 3 +#define NUM_GPIO 7 +#define NUM_UART 4 +#define NUM_GPTM 4 +#define NUM_I2C 2 + +/* + * See Stellaris Data Sheet chapter 5.2.5 "System Control", + * Register 13 .. 17: Device Capabilities 0 .. 4 (DC0 .. DC4). + */ +#define DC1_WDT 3 +#define DC1_HIB 6 +#define DC1_MPU 7 +#define DC1_ADC 16 +#define DC1_PWM 20 +#define DC2_UART(n) (n) +#define DC2_SSI 4 +#define DC2_QEI(n) (8 + n) +#define DC2_I2C(n) (12 + 2 * n) +#define DC2_GPTM(n) (16 + n) +#define DC2_COMP(n) (24 + n) +#define DC4_GPIO(n) (n) +#define DC4_EMAC 28 + +#define DEV_CAP(_dc, _cap) extract32(board->dc##_dc, DC##_dc##_##_cap, 1) + typedef const struct { const char *name; uint32_t did0; @@ -101,7 +127,7 @@ static void ssys_update(ssys_state *s) qemu_set_irq(s->irq, (s->int_status & s->int_mask) != 0); } -static uint32_t pllcfg_sandstorm[16] = { +static const uint32_t pllcfg_sandstorm[16] = { 0x31c0, /* 1 Mhz */ 0x1ae0, /* 1.8432 Mhz */ 0x18c0, /* 2 Mhz */ @@ -120,7 +146,7 @@ static uint32_t pllcfg_sandstorm[16] = { 0x585b /* 8.192 Mhz */ }; -static uint32_t pllcfg_fury[16] = { +static const uint32_t pllcfg_fury[16] = { 0x3200, /* 1 Mhz */ 0x1b20, /* 1.8432 Mhz */ 0x1900, /* 2 Mhz */ @@ -438,7 +464,7 @@ static const VMStateDescription vmstate_stellaris_sys = { } }; -static Property stellaris_sys_properties[] = { +static const Property stellaris_sys_properties[] = { DEFINE_PROP_UINT32("user0", ssys_state, user0, 0), DEFINE_PROP_UINT32("user1", ssys_state, user1, 0), DEFINE_PROP_UINT32("did0", ssys_state, did0, 0), @@ -448,7 +474,6 @@ static Property stellaris_sys_properties[] = { DEFINE_PROP_UINT32("dc2", ssys_state, dc2, 0), DEFINE_PROP_UINT32("dc3", ssys_state, dc3, 0), DEFINE_PROP_UINT32("dc4", ssys_state, dc4, 0), - DEFINE_PROP_END_OF_LIST() }; static void stellaris_sys_instance_init(Object *obj) @@ -965,7 +990,7 @@ static void stellaris_adc_init(Object *obj) } /* Board init. */ -static stellaris_board_info stellaris_boards[] = { +static const stellaris_board_info stellaris_boards[] = { { "LM3S811EVB", 0, 0x0032000e, @@ -990,19 +1015,20 @@ static stellaris_board_info stellaris_boards[] = { static void stellaris_init(MachineState *ms, stellaris_board_info *board) { - static const int uart_irq[] = {5, 6, 33, 34}; - static const int timer_irq[] = {19, 21, 23, 35}; - static const uint32_t gpio_addr[7] = + static const int uart_irq[NUM_UART] = {5, 6, 33, 34}; + static const int timer_irq[NUM_GPTM] = {19, 21, 23, 35}; + static const uint32_t gpio_addr[NUM_GPIO] = { 0x40004000, 0x40005000, 0x40006000, 0x40007000, 0x40024000, 0x40025000, 0x40026000}; - static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31}; + static const int gpio_irq[NUM_GPIO] = {0, 1, 2, 3, 4, 30, 31}; + static const uint32_t i2c_addr[NUM_I2C] = {0x40020000, 0x40021000}; + static const int i2c_irq[NUM_I2C] = {8, 37}; /* Memory map of SoC devices, from * Stellaris LM3S6965 Microcontroller Data Sheet (rev I) * http://www.ti.com/lit/ds/symlink/lm3s6965.pdf * * 40000000 wdtimer - * 40002000 i2c (unimplemented) * 40004000 GPIO * 40005000 GPIO * 40006000 GPIO @@ -1032,13 +1058,13 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) */ Object *soc_container; - DeviceState *gpio_dev[7], *nvic; - qemu_irq gpio_in[7][8]; - qemu_irq gpio_out[7][8]; + DeviceState *gpio_dev[NUM_GPIO], *armv7m, *nvic; + qemu_irq gpio_in[NUM_GPIO][8]; + qemu_irq gpio_out[NUM_GPIO][8]; qemu_irq adc; int sram_size; int flash_size; - I2CBus *i2c; + DeviceState *i2c_dev[NUM_I2C] = { }; DeviceState *dev; DeviceState *ssys_dev; int i; @@ -1053,7 +1079,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024; sram_size = ((board->dc0 >> 18) + 1) * 1024; - soc_container = object_new("container"); + soc_container = object_new(TYPE_CONTAINER); object_property_add_child(OBJECT(ms), "soc", soc_container); /* Flash programming is done via the SCU, so pretend it is ROM. */ @@ -1096,25 +1122,26 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) qdev_prop_set_uint32(ssys_dev, "dc4", board->dc4); sysbus_realize_and_unref(SYS_BUS_DEVICE(ssys_dev), &error_fatal); - nvic = qdev_new(TYPE_ARMV7M); - object_property_add_child(soc_container, "v7m", OBJECT(nvic)); - qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); - qdev_prop_set_uint8(nvic, "num-prio-bits", NUM_PRIO_BITS); - qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); - qdev_prop_set_bit(nvic, "enable-bitband", true); - qdev_connect_clock_in(nvic, "cpuclk", + armv7m = qdev_new(TYPE_ARMV7M); + object_property_add_child(soc_container, "v7m", OBJECT(armv7m)); + qdev_prop_set_uint32(armv7m, "num-irq", NUM_IRQ_LINES); + qdev_prop_set_uint8(armv7m, "num-prio-bits", NUM_PRIO_BITS); + qdev_prop_set_string(armv7m, "cpu-type", ms->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", qdev_get_clock_out(ssys_dev, "SYSCLK")); /* This SoC does not connect the systick reference clock */ - object_property_set_link(OBJECT(nvic), "memory", + object_property_set_link(OBJECT(armv7m), "memory", OBJECT(get_system_memory()), &error_abort); /* This will exit with an error if the user passed us a bad cpu_type */ - sysbus_realize_and_unref(SYS_BUS_DEVICE(nvic), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(armv7m), &error_fatal); + nvic = armv7m; /* Now we can wire up the IRQ and MMIO of the system registers */ sysbus_mmio_map(SYS_BUS_DEVICE(ssys_dev), 0, 0x400fe000); sysbus_connect_irq(SYS_BUS_DEVICE(ssys_dev), 0, qdev_get_gpio_in(nvic, 28)); - if (board->dc1 & (1 << 16)) { + if (DEV_CAP(1, ADC)) { dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000, qdev_get_gpio_in(nvic, 14), qdev_get_gpio_in(nvic, 15), @@ -1125,8 +1152,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) } else { adc = NULL; } - for (i = 0; i < 4; i++) { - if (board->dc2 & (0x10000 << i)) { + for (i = 0; i < NUM_GPTM; i++) { + if (DEV_CAP(2, GPTM(i))) { SysBusDevice *sbd; dev = qdev_new(TYPE_STELLARIS_GPTM); @@ -1143,7 +1170,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) } } - if (board->dc1 & (1 << 3)) { /* watchdog present */ + if (DEV_CAP(1, WDT)) { dev = qdev_new(TYPE_LUMINARY_WATCHDOG); object_property_add_child(soc_container, "wdg", OBJECT(dev)); qdev_connect_clock_in(dev, "WDOGCLK", @@ -1159,8 +1186,8 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) } - for (i = 0; i < 7; i++) { - if (board->dc4 & (1 << i)) { + for (i = 0; i < NUM_GPIO; i++) { + if (DEV_CAP(4, GPIO(i))) { gpio_dev[i] = sysbus_create_simple("pl061_luminary", gpio_addr[i], qdev_get_gpio_in(nvic, gpio_irq[i])); @@ -1171,17 +1198,21 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) } } - if (board->dc2 & (1 << 12)) { - dev = sysbus_create_simple(TYPE_STELLARIS_I2C, 0x40020000, - qdev_get_gpio_in(nvic, 8)); - i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); - if (board->peripherals & BP_OLED_I2C) { - i2c_slave_create_simple(i2c, "ssd0303", 0x3d); + for (i = 0; i < NUM_I2C; i++) { + if (DEV_CAP(2, I2C(i))) { + i2c_dev[i] = sysbus_create_simple(TYPE_STELLARIS_I2C, i2c_addr[i], + qdev_get_gpio_in(nvic, + i2c_irq[i])); } } + if (board->peripherals & BP_OLED_I2C) { + I2CBus *bus = (I2CBus *)qdev_get_child_bus(i2c_dev[0], "i2c"); - for (i = 0; i < 4; i++) { - if (board->dc2 & (1 << i)) { + i2c_slave_create_simple(bus, "ssd0303", 0x3d); + } + + for (i = 0; i < NUM_UART; i++) { + if (DEV_CAP(2, UART(i))) { SysBusDevice *sbd; dev = qdev_new("pl011_luminary"); @@ -1193,7 +1224,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, uart_irq[i])); } } - if (board->dc2 & (1 << 4)) { + if (DEV_CAP(2, SSI)) { dev = sysbus_create_simple("pl022", 0x40008000, qdev_get_gpio_in(nvic, 7)); if (board->peripherals & BP_OLED_SSI) { @@ -1302,7 +1333,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) qemu_irq_raise(gpio_out[GPIO_D][0]); } } - if (board->dc4 & (1 << 28)) { + if (DEV_CAP(4, EMAC)) { DeviceState *enet; enet = qdev_new("stellaris_enet"); @@ -1357,8 +1388,6 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) /* Add dummy regions for the devices we don't implement yet, * so guest accesses don't cause unlogged crashes. */ - create_unimplemented_device("i2c-0", 0x40002000, 0x1000); - create_unimplemented_device("i2c-2", 0x40021000, 0x1000); create_unimplemented_device("PWM", 0x40028000, 0x1000); create_unimplemented_device("QEI-0", 0x4002c000, 0x1000); create_unimplemented_device("QEI-1", 0x4002d000, 0x1000); @@ -1366,7 +1395,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) create_unimplemented_device("hibernation", 0x400fc000, 0x1000); create_unimplemented_device("flash-control", 0x400fd000, 0x1000); - armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, 0, flash_size); + armv7m_load_kernel(ARMV7M(armv7m)->cpu, ms->kernel_filename, 0, flash_size); } /* FIXME: Figure out how to generate these from stellaris_boards. */ @@ -1380,7 +1409,11 @@ static void lm3s6965evb_init(MachineState *machine) stellaris_init(machine, &stellaris_boards[1]); } -static void lm3s811evb_class_init(ObjectClass *oc, void *data) +/* + * Stellaris LM3S811 Evaluation Board Schematics: + * https://www.ti.com/lit/ug/symlink/spmu030.pdf + */ +static void lm3s811evb_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1396,7 +1429,11 @@ static const TypeInfo lm3s811evb_type = { .class_init = lm3s811evb_class_init, }; -static void lm3s6965evb_class_init(ObjectClass *oc, void *data) +/* + * Stellaris: LM3S6965 Evaluation Board Schematics: + * https://www.ti.com/lit/ug/symlink/spmu029.pdf + */ +static void lm3s6965evb_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1404,6 +1441,7 @@ static void lm3s6965evb_class_init(ObjectClass *oc, void *data) mc->init = lm3s6965evb_init; mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->auto_create_sdcard = true; } static const TypeInfo lm3s6965evb_type = { @@ -1420,7 +1458,7 @@ static void stellaris_machine_init(void) type_init(stellaris_machine_init) -static void stellaris_i2c_class_init(ObjectClass *klass, void *data) +static void stellaris_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1439,7 +1477,7 @@ static const TypeInfo stellaris_i2c_info = { .class_init = stellaris_i2c_class_init, }; -static void stellaris_adc_class_init(ObjectClass *klass, void *data) +static void stellaris_adc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1456,7 +1494,7 @@ static const TypeInfo stellaris_adc_info = { .class_init = stellaris_adc_class_init, }; -static void stellaris_sys_class_init(ObjectClass *klass, void *data) +static void stellaris_sys_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c index 808b783515d..0702d51cc3e 100644 --- a/hw/arm/stm32f100_soc.c +++ b/hw/arm/stm32f100_soc.c @@ -27,12 +27,12 @@ #include "qapi/error.h" #include "qemu/module.h" #include "hw/arm/boot.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/arm/stm32f100_soc.h" #include "hw/qdev-properties.h" #include "hw/qdev-clock.h" #include "hw/misc/unimp.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* stm32f100_soc implementation is derived from stm32f205_soc */ @@ -181,7 +181,7 @@ static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) create_unimplemented_device("CRC", 0x40023000, 0x400); } -static void stm32f100_soc_class_init(ObjectClass *klass, void *data) +static void stm32f100_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index a451e21f59c..229af7fb108 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -26,11 +26,11 @@ #include "qapi/error.h" #include "qemu/module.h" #include "hw/arm/boot.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/arm/stm32f205_soc.h" #include "hw/qdev-properties.h" #include "hw/qdev-clock.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* At the moment only Timer 2 to 5 are modelled */ static const uint32_t timer_addr[STM_NUM_TIMERS] = { 0x40000000, 0x40000400, @@ -202,7 +202,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) } } -static void stm32f205_soc_class_init(ObjectClass *klass, void *data) +static void stm32f205_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c index 2ad5b79a069..c8684e2b4c5 100644 --- a/hw/arm/stm32f405_soc.c +++ b/hw/arm/stm32f405_soc.c @@ -24,12 +24,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/arm/stm32f405_soc.h" #include "hw/qdev-clock.h" #include "hw/misc/unimp.h" +#define RCC_ADDR 0x40023800 #define SYSCFG_ADD 0x40013800 static const uint32_t usart_addr[] = { 0x40011000, 0x40004400, 0x40004800, 0x40004C00, 0x40005000, 0x40011400, @@ -59,6 +60,8 @@ static void stm32f405_soc_initfn(Object *obj) object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + object_initialize_child(obj, "rcc", &s->rcc, TYPE_STM32_RCC); + object_initialize_child(obj, "syscfg", &s->syscfg, TYPE_STM32F4XX_SYSCFG); for (i = 0; i < STM_NUM_USARTS; i++) { @@ -160,6 +163,14 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) return; } + /* Reset and clock controller */ + dev = DEVICE(&s->rcc); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rcc), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, RCC_ADDR); + /* System configuration controller */ dev = DEVICE(&s->syscfg); if (!sysbus_realize(SYS_BUS_DEVICE(&s->syscfg), errp)) { @@ -276,7 +287,6 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) create_unimplemented_device("GPIOH", 0x40021C00, 0x400); create_unimplemented_device("GPIOI", 0x40022000, 0x400); create_unimplemented_device("CRC", 0x40023000, 0x400); - create_unimplemented_device("RCC", 0x40023800, 0x400); create_unimplemented_device("Flash Int", 0x40023C00, 0x400); create_unimplemented_device("BKPSRAM", 0x40024000, 0x400); create_unimplemented_device("DMA1", 0x40026000, 0x400); @@ -288,7 +298,7 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) create_unimplemented_device("RNG", 0x50060800, 0x400); } -static void stm32f405_soc_class_init(ObjectClass *klass, void *data) +static void stm32f405_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/stm32l4x5_soc.c b/hw/arm/stm32l4x5_soc.c index fac83d349c8..64da5559c07 100644 --- a/hw/arm/stm32l4x5_soc.c +++ b/hw/arm/stm32l4x5_soc.c @@ -24,8 +24,8 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/or-irq.h" #include "hw/arm/stm32l4x5_soc.h" #include "hw/char/stm32l4x5_usart.h" @@ -236,6 +236,8 @@ static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp) /* System configuration controller */ busdev = SYS_BUS_DEVICE(&s->syscfg); + qdev_connect_clock_in(DEVICE(&s->syscfg), "clk", + qdev_get_clock_out(DEVICE(&(s->rcc)), "syscfg-out")); if (!sysbus_realize(busdev, errp)) { return; } @@ -433,7 +435,7 @@ static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp) create_unimplemented_device("QUADSPI", 0xA0001000, 0x400); } -static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -444,21 +446,21 @@ static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data) /* No vmstate or reset required: device has no internal state */ } -static void stm32l4x5xc_soc_class_init(ObjectClass *oc, void *data) +static void stm32l4x5xc_soc_class_init(ObjectClass *oc, const void *data) { Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); ssc->flash_size = 256 * KiB; } -static void stm32l4x5xe_soc_class_init(ObjectClass *oc, void *data) +static void stm32l4x5xe_soc_class_init(ObjectClass *oc, const void *data) { Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); ssc->flash_size = 512 * KiB; } -static void stm32l4x5xg_soc_class_init(ObjectClass *oc, void *data) +static void stm32l4x5xg_soc_class_init(ObjectClass *oc, const void *data) { Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c index cc419351605..e6c1f5b8d7d 100644 --- a/hw/arm/stm32vldiscovery.c +++ b/hw/arm/stm32vldiscovery.c @@ -51,7 +51,7 @@ static void stm32vldiscovery_init(MachineState *machine) qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - armv7m_load_kernel(ARM_CPU(first_cpu), + armv7m_load_kernel(STM32F100_SOC(dev)->armv7m.cpu, machine->kernel_filename, 0, FLASH_SIZE); } diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c index 823b4931b0a..229c98ddd90 100644 --- a/hw/arm/strongarm.c +++ b/hw/arm/strongarm.c @@ -38,8 +38,8 @@ #include "hw/arm/boot.h" #include "chardev/char-fe.h" #include "chardev/char-serial.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/rtc.h" #include "hw/ssi/ssi.h" #include "qapi/error.h" #include "qemu/cutils.h" @@ -215,7 +215,7 @@ static const VMStateDescription vmstate_strongarm_pic_regs = { }, }; -static void strongarm_pic_class_init(ObjectClass *klass, void *data) +static void strongarm_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -448,7 +448,8 @@ static const VMStateDescription vmstate_strongarm_rtc_regs = { }, }; -static void strongarm_rtc_sysbus_class_init(ObjectClass *klass, void *data) +static void strongarm_rtc_sysbus_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -693,7 +694,7 @@ static const VMStateDescription vmstate_strongarm_gpio_regs = { }, }; -static void strongarm_gpio_class_init(ObjectClass *klass, void *data) +static void strongarm_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -865,7 +866,7 @@ static const VMStateDescription vmstate_strongarm_ppc_regs = { }, }; -static void strongarm_ppc_class_init(ObjectClass *klass, void *data) +static void strongarm_ppc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1332,17 +1333,16 @@ static const VMStateDescription vmstate_strongarm_uart_regs = { }, }; -static Property strongarm_uart_properties[] = { +static const Property strongarm_uart_properties[] = { DEFINE_PROP_CHR("chardev", StrongARMUARTState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void strongarm_uart_class_init(ObjectClass *klass, void *data) +static void strongarm_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "StrongARM UART controller"; - dc->reset = strongarm_uart_reset; + device_class_set_legacy_reset(dc, strongarm_uart_reset); dc->vmsd = &vmstate_strongarm_uart_regs; device_class_set_props(dc, strongarm_uart_properties); dc->realize = strongarm_uart_realize; @@ -1590,12 +1590,12 @@ static const VMStateDescription vmstate_strongarm_ssp_regs = { }, }; -static void strongarm_ssp_class_init(ObjectClass *klass, void *data) +static void strongarm_ssp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "StrongARM SSP controller"; - dc->reset = strongarm_ssp_reset; + device_class_set_legacy_reset(dc, strongarm_ssp_reset); dc->vmsd = &vmstate_strongarm_ssp_regs; } diff --git a/hw/arm/strongarm.h b/hw/arm/strongarm.h index 192821f6aab..b11b3a3379f 100644 --- a/hw/arm/strongarm.h +++ b/hw/arm/strongarm.h @@ -1,7 +1,7 @@ #ifndef STRONGARM_H #define STRONGARM_H -#include "exec/memory.h" +#include "system/memory.h" #include "target/arm/cpu-qom.h" #define SA_CS0 0x00000000 diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c new file mode 100644 index 00000000000..73294a8b64d --- /dev/null +++ b/hw/arm/tegra241-cmdqv.c @@ -0,0 +1,786 @@ +/* + * Copyright (C) 2021-2024, NVIDIA CORPORATION & AFFILIATES + * NVIDIA Tegra241 CMDQ-Virtualization extension for SMMUv3 + * + * Written by Nicolin Chen + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include +#include + +#include "hw/arm/smmuv3.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "cpu.h" +#include "system/address-spaces.h" +#include "system/memory.h" + +#include "smmuv3-accel.h" +#include "tegra241-cmdqv.h" + +#define TEGRA241_CMDQV(obj) \ + OBJECT_CHECK(Tegra241CMDQV, (obj), TYPE_TEGRA241_CMDQV) + +typedef struct Tegra241CMDQV Tegra241CMDQV; +struct Tegra241CMDQV { + SysBusDevice parent_obj; + SMMUv3State *smmu; + SMMUViommu *viommu; + IOMMUFDHWqueue *vcmdq[128]; + IOMMUFDVeventq *veventq; + QemuThread irq_thread_id; + MemoryRegion mmio_cmdqv; + MemoryRegion mmio_vcmdq_page; + MemoryRegion mmio_vintf_page; + qemu_irq irq; + void *vcmdq_page0; + + /* Register Cache */ + uint32_t config; + uint32_t param; + uint32_t status; + uint32_t vi_err_map[2]; + uint32_t vi_int_mask[2]; + uint32_t cmdq_err_map[4]; + uint32_t cmdq_alloc_map[128]; + uint32_t vintf_config; + uint32_t vintf_status; + uint32_t vintf_cmdq_err_map[4]; + uint32_t vcmdq_cons_indx[128]; + uint32_t vcmdq_prod_indx[128]; + uint32_t vcmdq_config[128]; + uint32_t vcmdq_status[128]; + uint32_t vcmdq_gerror[128]; + uint32_t vcmdq_gerrorn[128]; + uint64_t vcmdq_base[128]; + uint64_t vcmdq_cons_indx_base[128]; +}; + +static void cmdqv_init_regs(Tegra241CMDQV *s) +{ + int i; + + s->config = V_CONFIG_RESET; + s->param = + FIELD_DP32(s->param, PARAM, CMDQV_VER, TEGRA241_CMDQV_VERSION); + s->param = FIELD_DP32(s->param, PARAM, CMDQV_NUM_CMDQ_LOG2, + TEGRA241_CMDQV_NUM_CMDQ_LOG2); + s->param = FIELD_DP32(s->param, PARAM, CMDQV_NUM_SID_PER_VM_LOG2, + TEGRA241_CMDQV_NUM_SID_PER_VM_LOG2); + trace_tegra241_cmdqv_init(s->param); + s->status = R_STATUS_CMDQV_ENABLED_MASK; + for (i = 0; i < 2; i++) { + s->vi_err_map[i] = 0; + s->vi_int_mask[i] = 0; + s->cmdq_err_map[i] = 0; + } + s->vintf_config = 0; + s->vintf_status = 0; + for (i = 0; i < 4; i++) { + s->vintf_cmdq_err_map[i] = 0; + } + for (i = 0; i < 128; i++) { + s->cmdq_alloc_map[i] = 0; + s->vcmdq_cons_indx[i] = 0; + s->vcmdq_prod_indx[i] = 0; + s->vcmdq_config[i] = 0; + s->vcmdq_status[i] = 0; + s->vcmdq_gerror[i] = 0; + s->vcmdq_gerrorn[i] = 0; + s->vcmdq_base[i] = 0; + s->vcmdq_cons_indx_base[i] = 0; + } +} + +/* Note that offset aligns down to 0x1000 */ +static uint64_t tegra241_cmdqv_read_vintf(Tegra241CMDQV *s, hwaddr offset) +{ + int i; + + switch (offset) { + case A_VINTF0_CONFIG: + return s->vintf_config; + + case A_VINTF0_STATUS: + return s->vintf_status; + + case A_VINTF0_LVCMDQ_ERR_MAP_0 ... A_VINTF0_LVCMDQ_ERR_MAP_3: + i = (offset - A_VINTF0_LVCMDQ_ERR_MAP_0) / 4; + return s->vintf_cmdq_err_map[i]; + } + + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; +} + +/* Note that offset aligns down to 0x10000 */ +static uint64_t tegra241_cmdqv_read_vcmdq(Tegra241CMDQV *s, hwaddr offset, + int index) +{ + uint32_t *ptr; + + switch (offset) { + case A_VCMDQ0_CONS_INDX: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_cons_indx[index] = *ptr; + } + return s->vcmdq_cons_indx[index]; + + case A_VCMDQ0_PROD_INDX: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_prod_indx[index] = *ptr; + } + return s->vcmdq_prod_indx[index]; + + case A_VCMDQ0_CONFIG: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_config[index] = *ptr; + } + return s->vcmdq_config[index]; + + case A_VCMDQ0_STATUS: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_status[index] = *ptr; + } + return s->vcmdq_status[index]; + + case A_VCMDQ0_GERROR: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_gerror[index] = *ptr; + } + return s->vcmdq_gerror[index]; + + case A_VCMDQ0_GERRORN: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + s->vcmdq_gerrorn[index] = *ptr; + } + return s->vcmdq_gerrorn[index]; + + case A_VCMDQ0_BASE_L: + return s->vcmdq_base[index]; + + case A_VCMDQ0_BASE_H: + return s->vcmdq_base[index] >> 32; + + case A_VCMDQ0_CONS_INDX_BASE_DRAM_L: + return s->vcmdq_cons_indx_base[index]; + + case A_VCMDQ0_CONS_INDX_BASE_DRAM_H: + return s->vcmdq_cons_indx_base[index] >> 32; + } + + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; +} + +static void *tegra241_cmdqv_irq_thread(void *arg) +{ + struct iommu_vevent_tegra241_cmdqv *vevent; + struct iommufd_vevent_header *hdr; + struct pollfd pollfd = {}; + Tegra241CMDQV *s = arg; + ssize_t readsz = sizeof(*hdr) + sizeof(*vevent); + ssize_t bytes; + int i, ret; + void *buf; + + if (!s->viommu || !s->veventq) { + return NULL; + } + buf = g_malloc0(readsz); + pollfd.events = POLLIN; + pollfd.fd = s->veventq->veventq_fd; + + while (1) { + ret = poll(&pollfd, 1, -1); + if (ret < 0) { + error_report("%s: poll failed: %d", __func__, ret); + return NULL; + } + + bytes = read(pollfd.fd, buf, readsz); + if (bytes <= 0) { + error_report("%s: read failed: %d", __func__, ret); + return NULL; + } + hdr = buf; + vevent = buf + sizeof(*hdr); + if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS) { + error_report("%s: vEVENTQ has lost events", __func__); + goto out_free; + } + + if (vevent->lvcmdq_err_map[0] || vevent->lvcmdq_err_map[1]) { + s->vintf_cmdq_err_map[0] = vevent->lvcmdq_err_map[0] & 0xffffffff; + s->vintf_cmdq_err_map[1] = + (vevent->lvcmdq_err_map[0] >> 32) & 0xffffffff; + s->vintf_cmdq_err_map[2] = vevent->lvcmdq_err_map[1] & 0xffffffff; + s->vintf_cmdq_err_map[3] = + (vevent->lvcmdq_err_map[1] >> 32) & 0xffffffff; + for (i = 0; i < 4; i++) { + s->cmdq_err_map[i] = s->vintf_cmdq_err_map[i]; + } + s->vi_err_map[0] |= 0x1; + qemu_irq_pulse(s->irq); + trace_tegra241_cmdqv_err_map( + s->vintf_cmdq_err_map[3], s->vintf_cmdq_err_map[2], + s->vintf_cmdq_err_map[1], s->vintf_cmdq_err_map[0]); + } + } +out_free: + g_free(buf); + return NULL; +} + +static int tegra241_cmdqv_init_vcmdq_page0(Tegra241CMDQV *s) +{ + SMMUv3State *smmu = s->smmu; + SMMUv3AccelState *s_accel = smmu->s_accel; + SMMUViommu *viommu; + char *name; + + if (!s_accel->viommu) { + return 0; + } + + viommu = s_accel->viommu; + if (!s->viommu) { + IOMMUFDVeventq *veventq; + uint32_t veventq_id; + uint32_t veventq_fd; + + s->viommu = viommu; + if (!iommufd_backend_alloc_veventq(viommu->iommufd, + viommu->core.viommu_id, + IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV, + 1 << 16, &veventq_id, + &veventq_fd, NULL)) { + error_report( + "failed to allocate CMDQV veventq, errors will be ignored"); + } else { + veventq = g_new(IOMMUFDVeventq, 1); + veventq->veventq_id = veventq_id; + veventq->veventq_fd = veventq_fd; + veventq->viommu = &viommu->core; + s->veventq = veventq; + + qemu_thread_create(&s->irq_thread_id, "irq/cmdqv", + tegra241_cmdqv_irq_thread, s, + QEMU_THREAD_JOINABLE); + } + } + + if (!iommufd_backend_viommu_mmap(viommu->iommufd, viommu->core.viommu_id, + VCMDQ_REG_PAGE_SIZE, + viommu->cmdqv_data.out_vintf_mmap_offset, + &s->vcmdq_page0)) { + error_report("failed to mmap VCMDQ PAGE0"); + s->vcmdq_page0 = NULL; + return -EIO; + } + + name = g_strdup_printf("%s vcmdq", memory_region_name(&s->mmio_cmdqv)); + memory_region_init_ram_device_ptr(&s->mmio_vcmdq_page, + memory_region_owner(&s->mmio_cmdqv), name, + 0x10000, s->vcmdq_page0); + memory_region_add_subregion_overlap(&s->mmio_cmdqv, 0x10000, + &s->mmio_vcmdq_page, 1); + g_free(name); + + name = g_strdup_printf("%s vintf", memory_region_name(&s->mmio_cmdqv)); + memory_region_init_ram_device_ptr(&s->mmio_vintf_page, + memory_region_owner(&s->mmio_cmdqv), name, + 0x10000, s->vcmdq_page0); + memory_region_add_subregion_overlap(&s->mmio_cmdqv, 0x30000, + &s->mmio_vintf_page, 1); + g_free(name); + + return 0; +} + +static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) +{ + Tegra241CMDQV *s = (Tegra241CMDQV *)opaque; + int index; + + if (!s->vcmdq_page0) { + tegra241_cmdqv_init_vcmdq_page0(s); + } + + if (offset > 0x50000) { + qemu_log_mask(LOG_UNIMP, + "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__, + offset); + return 0; + } + + /* Fallback to cached register values */ + switch (offset) { + case A_CONFIG: + return s->config; + + case A_PARAM: + return s->param; + + case A_STATUS: + return s->status; + + case A_VI_ERR_MAP ... A_VI_ERR_MAP_1: + return s->vi_err_map[(offset - A_VI_ERR_MAP) / 4]; + + case A_VI_INT_MASK ... A_VI_INT_MASK_1: + return s->vi_int_mask[(offset - A_VI_INT_MASK) / 4]; + + case A_CMDQ_ERR_MAP ... A_CMDQ_ERR_MAP_3: + return s->cmdq_err_map[(offset - A_CMDQ_ERR_MAP) / 4]; + + case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127: + return s->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4]; + + case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3: + return tegra241_cmdqv_read_vintf(s, offset); + + case A_VI_VCMDQ0_CONS_INDX ... A_VI_VCMDQ127_GERRORN: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_CONS_INDX ... A_VCMDQ127_GERRORN: + /* + * Align offset down to 0x10000 while extracting the index: + * VCMDQ0_CONS_INDX (0x10000) => 0x10000, 0 + * VCMDQ1_CONS_INDX (0x10080) => 0x10000, 1 + * VCMDQ2_CONS_INDX (0x10100) => 0x10000, 2 + * ... + * VCMDQ127_CONS_INDX (0x13f80) => 0x10000, 127 + */ + index = (offset - 0x10000) / 0x80; + return tegra241_cmdqv_read_vcmdq(s, offset - 0x80 * index, index); + + case A_VI_VCMDQ0_BASE_L ... A_VI_VCMDQ127_CONS_INDX_BASE_DRAM_H: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_BASE_L ... A_VCMDQ127_CONS_INDX_BASE_DRAM_H: + /* + * Align offset down to 0x20000 while extracting the index: + * VCMDQ0_BASE_L (0x20000) => 0x20000, 0 + * VCMDQ1_BASE_L (0x20080) => 0x20000, 1 + * VCMDQ2_BASE_L (0x20100) => 0x20000, 2 + * ... + * VCMDQ127_BASE_L (0x23f80) => 0x20000, 127 + */ + index = (offset - 0x20000) / 0x80; + return tegra241_cmdqv_read_vcmdq(s, offset - 0x80 * index, index); + } + + qemu_log_mask(LOG_UNIMP, "%s unhandled read access at 0x%" PRIx64 "\n", + __func__, offset); + return 0; +} + +/* Note that offset aligns down to 0x1000 */ +static void tegra241_cmdqv_write_vintf(Tegra241CMDQV *s, hwaddr offset, + uint64_t value, unsigned size) +{ + switch (offset) { + case A_VINTF0_CONFIG: + /* Strip off HYP_OWN setting from guest kernel */ + value &= ~R_VINTF0_CONFIG_HYP_OWN_MASK; + + s->vintf_config = value; + if (value & R_VINTF0_CONFIG_ENABLE_MASK) { + s->vintf_status |= R_VINTF0_STATUS_ENABLE_OK_MASK; + } else { + s->vintf_status &= ~R_VINTF0_STATUS_ENABLE_OK_MASK; + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n", + __func__, offset); + return; + } +} + +static int tegra241_cmdqv_setup_vcmdq(Tegra241CMDQV *s, int index) +{ + SMMUv3State *smmu = s->smmu; + SMMUv3AccelState *s_accel = smmu->s_accel; + uint64_t base_mask = (uint64_t)R_VCMDQ0_BASE_L_ADDR_MASK | + (uint64_t)R_VCMDQ0_BASE_H_ADDR_MASK << 32; + uint64_t addr = s->vcmdq_base[index] & base_mask; + uint64_t shift = s->vcmdq_base[index] & R_VCMDQ0_BASE_L_LOG2SIZE_MASK; + uint64_t size = 1 << (shift + 4); + IOMMUFDHWqueue *vcmdq = s->vcmdq[index]; + SMMUViommu *viommu; + IOMMUFDHWqueue *hw_queue; + uint32_t hw_queue_id; + + if (!s_accel->viommu) { + return -ENODEV; + } + if (!size) { + return -EINVAL; + } + if (!cpu_physical_memory_is_ram(addr)) { + return -EINVAL; + } + if (vcmdq) { + iommufd_backend_free_id(s_accel->viommu->iommufd, vcmdq->hw_queue_id); + s->vcmdq[index] = NULL; + g_free(vcmdq); + } + + viommu = s_accel->viommu; + if (!s->viommu) { + IOMMUFDVeventq *veventq; + uint32_t veventq_id; + uint32_t veventq_fd; + + s->viommu = viommu; + if (!iommufd_backend_alloc_veventq(viommu->iommufd, + viommu->core.viommu_id, + IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV, + 1 << 16, &veventq_id, + &veventq_fd, NULL)) { + error_report( + "failed to allocate CMDQV veventq, errors will be ignored"); + } else { + veventq = g_new(IOMMUFDVeventq, 1); + veventq->veventq_id = veventq_id; + veventq->veventq_fd = veventq_fd; + veventq->viommu = &viommu->core; + s->veventq = veventq; + + qemu_thread_create(&s->irq_thread_id, "irq/cmdqv", + tegra241_cmdqv_irq_thread, s, + QEMU_THREAD_JOINABLE); + } + } + + if (!iommufd_backend_alloc_hw_queue(viommu->iommufd, viommu->core.viommu_id, + IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV, + index, addr, size, &hw_queue_id, + NULL)) { + error_report("failed to allocate VCMDQ%d, viommu_id=%d", index, + viommu->core.viommu_id); + return -ENODEV; + } + hw_queue = g_new(IOMMUFDHWqueue, 1); + hw_queue->hw_queue_id = hw_queue_id; + hw_queue->viommu = &viommu->core; + + s->vcmdq[index] = hw_queue; + + return 0; +} + +/* Note that offset aligns down to 0x10000 */ +static void tegra241_cmdqv_write_vcmdq(Tegra241CMDQV *s, hwaddr offset, + int index, uint64_t value, unsigned size) +{ + uint32_t *ptr; + + switch (offset) { + case A_VCMDQ0_CONS_INDX: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + *ptr = value; + } + s->vcmdq_cons_indx[index] = value; + return; + + case A_VCMDQ0_PROD_INDX: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + *ptr = value; + } + s->vcmdq_prod_indx[index] = value; + return; + + case A_VCMDQ0_CONFIG: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + *ptr = value; + } else { + if (value & R_VCMDQ0_CONFIG_CMDQ_EN_MASK) { + s->vcmdq_status[index] |= R_VCMDQ0_STATUS_CMDQ_EN_OK_MASK; + } else { + s->vcmdq_status[index] &= ~R_VCMDQ0_STATUS_CMDQ_EN_OK_MASK; + } + } + s->vcmdq_config[index] = value; + return; + + case A_VCMDQ0_GERRORN: + if (s->vcmdq_page0) { + ptr = + (uint32_t *)(s->vcmdq_page0 + 0x80 * index + offset - 0x10000); + *ptr = value; + } + s->vcmdq_gerrorn[index] = value; + return; + + case A_VCMDQ0_BASE_L: + if (size == 8) { + s->vcmdq_base[index] = value; + } else if (size == 4) { + s->vcmdq_base[index] &= 0xffffffff00000000; + s->vcmdq_base[index] |= value & 0xffffffff; + } + tegra241_cmdqv_setup_vcmdq(s, index); + break; + + case A_VCMDQ0_BASE_H: + s->vcmdq_base[index] &= (uint64_t)0xffffffff; + s->vcmdq_base[index] |= (uint64_t)value << 32; + tegra241_cmdqv_setup_vcmdq(s, index); + break; + + case A_VCMDQ0_CONS_INDX_BASE_DRAM_L: + if (size == 8) { + s->vcmdq_cons_indx_base[index] = value; + } else if (size == 4) { + s->vcmdq_cons_indx_base[index] &= 0xffffffff00000000; + s->vcmdq_cons_indx_base[index] |= value & 0xffffffff; + } + break; + + case A_VCMDQ0_CONS_INDX_BASE_DRAM_H: + s->vcmdq_cons_indx_base[index] &= (uint64_t)0xffffffff; + s->vcmdq_cons_indx_base[index] |= (uint64_t)value << 32; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n", + __func__, offset); + return; + } +} + +static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + Tegra241CMDQV *s = (Tegra241CMDQV *)opaque; + int index; + + if (!s->vcmdq_page0) { + tegra241_cmdqv_init_vcmdq_page0(s); + } + + if (offset > 0x50000) { + qemu_log_mask(LOG_UNIMP, + "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__, + offset); + return; + } + + switch (offset) { + case A_CONFIG: + s->config = value; + if (value & R_CONFIG_CMDQV_EN_MASK) { + s->status |= R_STATUS_CMDQV_ENABLED_MASK; + } else { + s->status &= ~R_STATUS_CMDQV_ENABLED_MASK; + } + break; + + case A_VI_INT_MASK ... A_VI_INT_MASK_1: + s->vi_int_mask[(offset - A_VI_INT_MASK) / 4] = value; + break; + + case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127: + s->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4] = value; + break; + + case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3: + tegra241_cmdqv_write_vintf(s, offset, value, size); + break; + + case A_VI_VCMDQ0_CONS_INDX ... A_VI_VCMDQ127_GERRORN: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_CONS_INDX ... A_VCMDQ127_GERRORN: + /* + * Align offset down to 0x10000 while extracting the index: + * VCMDQ0_CONS_INDX (0x10000) => 0x10000, 0 + * VCMDQ1_CONS_INDX (0x10080) => 0x10000, 1 + * VCMDQ2_CONS_INDX (0x10100) => 0x10000, 2 + * ... + * VCMDQ127_CONS_INDX (0x13f80) => 0x10000, 127 + */ + index = (offset - 0x10000) / 0x80; + tegra241_cmdqv_write_vcmdq(s, offset - 0x80 * index, index, value, + size); + break; + + case A_VI_VCMDQ0_BASE_L ... A_VI_VCMDQ127_CONS_INDX_BASE_DRAM_H: + offset -= 0x20000; + QEMU_FALLTHROUGH; + case A_VCMDQ0_BASE_L ... A_VCMDQ127_CONS_INDX_BASE_DRAM_H: + /* + * Align offset down to 0x20000 while extracting the index: + * VCMDQ0_BASE_L (0x20000) => 0x20000, 0 + * VCMDQ1_BASE_L (0x20080) => 0x20000, 1 + * VCMDQ2_BASE_L (0x20100) => 0x20000, 2 + * ... + * VCMDQ127_BASE_L (0x23f80) => 0x20000, 127 + */ + index = (offset - 0x20000) / 0x80; + tegra241_cmdqv_write_vcmdq(s, offset - 0x80 * index, index, value, + size); + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n", + __func__, offset); + return; + } +} + +static const MemoryRegionOps mmio_cmdqv_ops = { + .read = tegra241_cmdqv_read, + .write = tegra241_cmdqv_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_cmdqv = { + .name = "cmdqv", + .version_id = 1, + .minimum_version_id = 1, + .fields = + (VMStateField[]){ + VMSTATE_UINT32(status, Tegra241CMDQV), + VMSTATE_UINT32_ARRAY(vi_err_map, Tegra241CMDQV, 2), + VMSTATE_END_OF_LIST(), + }, +}; + +void tegra241_cmdqv_init(SMMUv3State *s) +{ + SMMUv3AccelState *s_accel = s->s_accel; + SMMUState *bs = ARM_SMMU(s); + SysBusDevice *sbd = &bs->dev; + Tegra241CMDQV *cmdqv; + + if (!s_accel || !s->cmdqv) { + return; + } + + cmdqv = g_new0(Tegra241CMDQV, 1); + memory_region_init_io(&cmdqv->mmio_cmdqv, OBJECT(s), &mmio_cmdqv_ops, cmdqv, + TYPE_TEGRA241_CMDQV, 0x50000); + sysbus_init_mmio(sbd, &cmdqv->mmio_cmdqv); + sysbus_init_irq(sbd, &cmdqv->irq); + cmdqv->smmu = s; + s_accel->cmdqv = cmdqv; +} + +bool tegra241_cmdqv_hw_compatible(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, + Error **errp) +{ + SMMUv3AccelState *s_accel = s->s_accel; + uint32_t data_type = IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV; + uint64_t caps; + + if (!s_accel || !s->cmdqv) { + return true; + } + + if (!iommufd_backend_get_device_info(idev->iommufd, idev->devid, + &data_type, &s_accel->cmdqv_info, + sizeof(s_accel->cmdqv_info), &caps, + NULL, errp)) { + error_setg(errp, "Failed to get Host CMDQV device info"); + return false; + } + if (data_type != IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV) { + error_setg(errp, "Wrong data type (%d) for Host CMDQV device info", + data_type); + return false; + } + if (s_accel->cmdqv_info.version != TEGRA241_CMDQV_VERSION) { + error_setg(errp, "Wrong version (%d) for Host CMDQV device info", + s_accel->cmdqv_info.version); + return false; + } + if (s_accel->cmdqv_info.log2vcmdqs != TEGRA241_CMDQV_NUM_CMDQ_LOG2) { + error_setg(errp, "Wrong num of cmdqs (%d) for Host CMDQV device info", + s_accel->cmdqv_info.version); + return false; + } + if (s_accel->cmdqv_info.log2vsids != TEGRA241_CMDQV_NUM_SID_PER_VM_LOG2) { + error_setg(errp, "Wrong num of SID per VM (%d) for Host CMDQV " + "device info", s_accel->cmdqv_info.version); + return false; + } + return true; +} + +void tegra241_cmdqv_reset(SMMUv3State *s) +{ + SMMUv3AccelState *s_accel = s->s_accel; + Tegra241CMDQV *t_cmdqv; + int i; + + if (!s->cmdqv || !s_accel || !s_accel->cmdqv) { + return; + } + + t_cmdqv = s_accel->cmdqv; + + for (i = 127; i >= 0; i--) { + if (t_cmdqv->vcmdq[i]) { + iommufd_backend_free_id(t_cmdqv->viommu->iommufd, + t_cmdqv->vcmdq[i]->hw_queue_id); + g_free(t_cmdqv->vcmdq[i]); + t_cmdqv->vcmdq[i] = NULL; + } + } + cmdqv_init_regs(t_cmdqv); +} + +static void cmdqv_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Tegra241 CMDQ-Virtualization"; + dc->vmsd = &vmstate_cmdqv; +} + +static const TypeInfo types[] = { + { + .name = TYPE_TEGRA241_CMDQV, + .parent = TYPE_ARM_SMMUV3, + .class_init = cmdqv_class_init, + } +}; +DEFINE_TYPES(types) diff --git a/hw/arm/tegra241-cmdqv.h b/hw/arm/tegra241-cmdqv.h new file mode 100644 index 00000000000..59f7881d72e --- /dev/null +++ b/hw/arm/tegra241-cmdqv.h @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2021-2024, NVIDIA CORPORATION & AFFILIATES + * NVIDIA Tegra241 CMDQ-Virtualization extension for SMMUv3 + * + * Written by Nicolin Chen + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_TEGRA241_CMDQV_H +#define HW_TEGRA241_CMDQV_H + +#include "hw/registerfields.h" + +/* MMIO Registers */ +REG32(CONFIG, 0x0) +FIELD(CONFIG, CMDQV_EN, 0, 1) +FIELD(CONFIG, CMDQV_PER_CMD_OFFSET, 1, 3) +FIELD(CONFIG, CMDQ_MAX_CLK_BATCH, 4, 8) +FIELD(CONFIG, CMDQ_MAX_CMD_BATCH, 12, 8) +FIELD(CONFIG, CONS_DRAM_EN, 20, 1) + +#define V_CONFIG_RESET 0x00020403 + +REG32(PARAM, 0x4) +FIELD(PARAM, CMDQV_VER, 0, 4) +FIELD(PARAM, CMDQV_NUM_CMDQ_LOG2, 4, 4) +FIELD(PARAM, CMDQV_NUM_VM_LOG2, 8, 4) +FIELD(PARAM, CMDQV_NUM_SID_PER_VM_LOG2, 12, 4) + +#define V_PARAM_RESET 0x00004011 + +REG32(STATUS, 0x8) +FIELD(STATUS, CMDQV_ENABLED, 0, 1) + +#define A_VI_ERR_MAP 0x14 +#define A_VI_ERR_MAP_1 0x18 +#define V_VI_ERR_MAP_NO_ERROR (0) +#define V_VI_ERR_MAP_ERROR (1) + +#define A_VI_INT_MASK 0x1c +#define A_VI_INT_MASK_1 0x20 +#define V_VI_INT_MASK_NOT_MASKED (0) +#define V_VI_INT_MASK_MASKED (1) + +#define A_CMDQ_ERR_MAP 0x24 +#define A_CMDQ_ERR_MAP_1 0x28 +#define A_CMDQ_ERR_MAP_2 0x2c +#define A_CMDQ_ERR_MAP_3 0x30 + +/* i = [0, 127] */ +#define A_CMDQ_ALLOC_MAP_(i) \ + REG32(CMDQ_ALLOC_MAP_##i, 0x200 + i * 4) \ + FIELD(CMDQ_ALLOC_MAP_##i, ALLOC, 0, 1) \ + FIELD(CMDQ_ALLOC_MAP_##i, LVCMDQ, 1, 7) \ + FIELD(CMDQ_ALLOC_MAP_##i, VIRT_INTF_INDX, 15, 6) + +A_CMDQ_ALLOC_MAP_(0) +/* Omitting 1~126 as not being directly called */ +A_CMDQ_ALLOC_MAP_(127) + +/* i = [0, 0] */ +#define A_VINTFi_CONFIG(i) \ + REG32(VINTF##i##_CONFIG, 0x1000 + i * 0x100) \ + FIELD(VINTF##i##_CONFIG, ENABLE, 0, 1) \ + FIELD(VINTF##i##_CONFIG, VMID, 1, 16) \ + FIELD(VINTF##i##_CONFIG, HYP_OWN, 17, 1) + +A_VINTFi_CONFIG(0) + +#define A_VINTFi_STATUS(i) \ + REG32(VINTF##i##_STATUS, 0x1004 + i * 0x100) \ + FIELD(VINTF##i##_STATUS, ENABLE_OK, 0, 1) \ + FIELD(VINTF##i##_STATUS, STATUS, 1, 3) \ + FIELD(VINTF##i##_STATUS, VI_NUM_LVCMDQ, 16, 8) + + A_VINTFi_STATUS(0) + +#define V_VINTF_STATUS_NO_ERROR (0 << 1) +#define V_VINTF_STATUS_VCMDQ_EROR (1 << 1) + +/* i = [0, 0], j = [0, 3] */ +#define A_VINTFi_LVCMDQ_ERR_MAP_(i, j) \ + REG32(VINTF##i##_LVCMDQ_ERR_MAP_##j, 0x10c0 + j * 4 + i * 0x100) \ + FIELD(VINTF##i##_LVCMDQ_ERR_MAP_##j, LVCMDQ_ERR_MAP, 0, 32) + + A_VINTFi_LVCMDQ_ERR_MAP_(0, 0) + /* Omitting [0][1~2] as not being directly called */ + A_VINTFi_LVCMDQ_ERR_MAP_(0, 3) + +/* VCMDQ registers -- starting from 0x10000 with size 64KB * 2 (0x20000) */ +#define VCMDQ_REG_OFFSET 0x10000 +#define VCMDQ_REG_PAGE_SIZE 0x10000 + +#define A_VCMDQi_CONS_INDX(i) \ + REG32(VCMDQ##i##_CONS_INDX, 0x10000 + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX, RD, 0, 20) \ + FIELD(VCMDQ##i##_CONS_INDX, ERR, 24, 7) + + A_VCMDQi_CONS_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX(127) + +#define V_VCMDQ_CONS_INDX_ERR_CERROR_NONE 0 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ILL_OPCODE 1 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ABT 2 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ATC_INV_SYNC 3 +#define V_VCMDQ_CONS_INDX_ERR_CERROR_ILL_ACCESS 4 + +#define A_VCMDQi_PROD_INDX(i) \ + REG32(VCMDQ##i##_PROD_INDX, 0x10000 + 0x4 + i * 0x80) \ + FIELD(VCMDQ##i##_PROD_INDX, WR, 0, 20) + + A_VCMDQi_PROD_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_PROD_INDX(127) + +#define A_VCMDQi_CONFIG(i) \ + REG32(VCMDQ##i##_CONFIG, 0x10000 + 0x8 + i * 0x80) \ + FIELD(VCMDQ##i##_CONFIG, CMDQ_EN, 0, 1) + + A_VCMDQi_CONFIG(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONFIG(127) + +#define A_VCMDQi_STATUS(i) \ + REG32(VCMDQ##i##_STATUS, 0x10000 + 0xc + i * 0x80) \ + FIELD(VCMDQ##i##_STATUS, CMDQ_EN_OK, 0, 1) + + A_VCMDQi_STATUS(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_STATUS(127) + +#define A_VCMDQi_GERROR(i) \ + REG32(VCMDQ##i##_GERROR, 0x10000 + 0x10 + i * 0x80) \ + FIELD(VCMDQ##i##_GERROR, CMDQ_ERR, 0, 1) \ + FIELD(VCMDQ##i##_GERROR, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VCMDQ##i##_GERROR, CMDQ_INIT_ERR, 2, 1) + + A_VCMDQi_GERROR(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_GERROR(127) + +#define A_VCMDQi_GERRORN(i) \ + REG32(VCMDQ##i##_GERRORN, 0x10000 + 0x14 + i * 0x80) \ + FIELD(VCMDQ##i##_GERRORN, CMDQ_ERR, 0, 1) \ + FIELD(VCMDQ##i##_GERRORN, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VCMDQ##i##_GERRORN, CMDQ_INIT_ERR, 2, 1) + + A_VCMDQi_GERRORN(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_GERRORN(127) + +#define A_VCMDQi_BASE_L(i) \ + REG32(VCMDQ##i##_BASE_L, 0x20000 + i * 0x80) \ + FIELD(VCMDQ##i##_BASE_L, LOG2SIZE, 0, 5) \ + FIELD(VCMDQ##i##_BASE_L, ADDR, 5, 27) + + A_VCMDQi_BASE_L(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_BASE_L(127) + +#define A_VCMDQi_BASE_H(i) \ + REG32(VCMDQ##i##_BASE_H, 0x20000 + 0x4 + i * 0x80) \ + FIELD(VCMDQ##i##_BASE_H, ADDR, 0, 16) + + A_VCMDQi_BASE_H(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_BASE_H(127) + +#define A_VCMDQi_CONS_INDX_BASE_DRAM_L(i) \ + REG32(VCMDQ##i##_CONS_INDX_BASE_DRAM_L, 0x20000 + 0x8 + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX_BASE_DRAM_L, ADDR, 0, 32) + + A_VCMDQi_CONS_INDX_BASE_DRAM_L(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX_BASE_DRAM_L(127) + +#define A_VCMDQi_CONS_INDX_BASE_DRAM_H(i) \ + REG32(VCMDQ##i##_CONS_INDX_BASE_DRAM_H, 0x20000 + 0xc + i * 0x80) \ + FIELD(VCMDQ##i##_CONS_INDX_BASE_DRAM_H, ADDR, 0, 16) + + A_VCMDQi_CONS_INDX_BASE_DRAM_H(0) + /* Omitting [1~126] as not being directly called */ + A_VCMDQi_CONS_INDX_BASE_DRAM_H(127) + +/* + * VINTF VI_VCMDQ registers -- starting from 0x30000 with size 64KB * 2 + * (0x20000) + */ +#define A_VI_VCMDQi_CONS_INDX(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX, 0x30000 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX, RD, 0, 20) \ + FIELD(VI_VCMDQ##i##_CONS_INDX, ERR, 24, 7) + + A_VI_VCMDQi_CONS_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX(127) + +#define A_VI_VCMDQi_PROD_INDX(i) \ + REG32(VI_VCMDQ##i##_PROD_INDX, 0x30000 + 0x4 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_PROD_INDX, WR, 0, 20) + + A_VI_VCMDQi_PROD_INDX(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_PROD_INDX(127) + +#define A_VI_VCMDQi_CONFIG(i) \ + REG32(VI_VCMDQ##i##_CONFIG, 0x30000 + 0x8 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONFIG, CMDQ_EN, 0, 1) + + A_VI_VCMDQi_CONFIG(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONFIG(127) + +#define A_VI_VCMDQi_STATUS(i) \ + REG32(VI_VCMDQ##i##_STATUS, 0x30000 + 0xc + i * 0x80) \ + FIELD(VI_VCMDQ##i##_STATUS, CMDQ_EN_OK, 0, 1) + + A_VI_VCMDQi_STATUS(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_STATUS(127) + +#define A_VI_VCMDQi_GERROR(i) \ + REG32(VI_VCMDQ##i##_GERROR, 0x30000 + 0x10 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_GERROR, CMDQ_ERR, 0, 1) \ + FIELD(VI_VCMDQ##i##_GERROR, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VI_VCMDQ##i##_GERROR, CMDQ_INIT_ERR, 2, 1) + + A_VI_VCMDQi_GERROR(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_GERROR(127) + +#define A_VI_VCMDQi_GERRORN(i) \ + REG32(VI_VCMDQ##i##_GERRORN, 0x30000 + 0x14 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_GERRORN, CMDQ_ERR, 0, 1) \ + FIELD(VI_VCMDQ##i##_GERRORN, CONS_DRAM_WR_ABT_ERR, 1, 1) \ + FIELD(VI_VCMDQ##i##_GERRORN, CMDQ_INIT_ERR, 2, 1) + + A_VI_VCMDQi_GERRORN(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_GERRORN(127) + +#define A_VI_VCMDQi_BASE_L(i) \ + REG32(VI_VCMDQ##i##_BASE_L, 0x40000 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_BASE_L, LOG2SIZE, 0, 5) \ + FIELD(VI_VCMDQ##i##_BASE_L, ADDR, 5, 27) + + A_VI_VCMDQi_BASE_L(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_BASE_L(127) + +#define A_VI_VCMDQi_BASE_H(i) \ + REG32(VI_VCMDQ##i##_BASE_H, 0x40000 + 0x4 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_BASE_H, ADDR, 0, 16) + + A_VI_VCMDQi_BASE_H(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_BASE_H(127) + +#define A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_L, 0x40000 + 0x8 + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_L, ADDR, 0, 32) + + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_L(127) + +#define A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(i) \ + REG32(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_H, 0x40000 + 0xc + i * 0x80) \ + FIELD(VI_VCMDQ##i##_CONS_INDX_BASE_DRAM_H, ADDR, 0, 16) + + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(0) + /* Omitting [1~126] as not being directly called */ + A_VI_VCMDQi_CONS_INDX_BASE_DRAM_H(127) +#endif /* HW_TEGRA241_CMDQV_H */ diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c deleted file mode 100644 index 5891f6064f9..00000000000 --- a/hw/arm/tosa.c +++ /dev/null @@ -1,327 +0,0 @@ -/* vim:set shiftwidth=4 ts=4 et: */ -/* - * PXA255 Sharp Zaurus SL-6000 PDA platform - * - * Copyright (c) 2008 Dmitry Baryshkov - * - * Code based on spitz platform by Andrzej Zaborowski - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "sysemu/runstate.h" -#include "hw/arm/pxa.h" -#include "hw/arm/boot.h" -#include "hw/arm/sharpsl.h" -#include "hw/pcmcia.h" -#include "hw/boards.h" -#include "hw/display/tc6393xb.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "hw/ssi/ssi.h" -#include "hw/sysbus.h" -#include "hw/misc/led.h" -#include "exec/address-spaces.h" -#include "qom/object.h" - -#define TOSA_RAM 0x04000000 -#define TOSA_ROM 0x00800000 - -#define TOSA_GPIO_USB_IN (5) -#define TOSA_GPIO_nSD_DETECT (9) -#define TOSA_GPIO_ON_RESET (19) -#define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ -#define TOSA_GPIO_CF_CD (13) -#define TOSA_GPIO_TC6393XB_INT (15) -#define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ - -#define TOSA_SCOOP_GPIO_BASE 1 -#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) -#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) -#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) - -#define TOSA_SCOOP_JC_GPIO_BASE 1 -#define TOSA_GPIO_BT_LED (TOSA_SCOOP_JC_GPIO_BASE + 0) -#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) -#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) -#define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5) -#define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) - -#define DAC_BASE 0x4e -#define DAC_CH1 0 -#define DAC_CH2 1 - -static void tosa_microdrive_attach(PXA2xxState *cpu) -{ - PCMCIACardState *md; - DriveInfo *dinfo; - - dinfo = drive_get(IF_IDE, 0, 0); - if (!dinfo || dinfo->media_cd) - return; - md = dscm1xxxx_init(dinfo); - pxa2xx_pcmcia_attach(cpu->pcmcia[0], md); -} - -/* - * Encapsulation of some GPIO line behaviour for the Tosa board - * - * QEMU interface: - * + named GPIO inputs "leds[0..3]": assert to light LEDs - * + named GPIO input "reset": when asserted, resets the system - */ - -#define TYPE_TOSA_MISC_GPIO "tosa-misc-gpio" -OBJECT_DECLARE_SIMPLE_TYPE(TosaMiscGPIOState, TOSA_MISC_GPIO) - -struct TosaMiscGPIOState { - SysBusDevice parent_obj; -}; - -static void tosa_reset(void *opaque, int line, int level) -{ - if (level) { - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - } -} - -static void tosa_misc_gpio_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - - qdev_init_gpio_in_named(dev, tosa_reset, "reset", 1); -} - -static void tosa_gpio_setup(PXA2xxState *cpu, - DeviceState *scp0, - DeviceState *scp1, - TC6393xbState *tmio) -{ - DeviceState *misc_gpio; - LEDState *led[4]; - - misc_gpio = sysbus_create_simple(TYPE_TOSA_MISC_GPIO, -1, NULL); - - /* MMC/SD host */ - pxa2xx_mmci_handlers(cpu->mmc, - qdev_get_gpio_in(scp0, TOSA_GPIO_SD_WP), - qemu_irq_invert(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_nSD_DETECT))); - - /* Handle reset */ - qdev_connect_gpio_out(cpu->gpio, TOSA_GPIO_ON_RESET, - qdev_get_gpio_in_named(misc_gpio, "reset", 0)); - - /* PCMCIA signals: card's IRQ and Card-Detect */ - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0], - qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_IRQ), - qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_CD)); - - pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1], - qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_JC_CF_IRQ), - NULL); - - led[0] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, - LED_COLOR_BLUE, "bluetooth"); - led[1] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, - LED_COLOR_GREEN, "note"); - led[2] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, - LED_COLOR_AMBER, "charger-error"); - led[3] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, - LED_COLOR_GREEN, "wlan"); - - qdev_connect_gpio_out(scp1, TOSA_GPIO_BT_LED, - qdev_get_gpio_in(DEVICE(led[0]), 0)); - qdev_connect_gpio_out(scp1, TOSA_GPIO_NOTE_LED, - qdev_get_gpio_in(DEVICE(led[1]), 0)); - qdev_connect_gpio_out(scp1, TOSA_GPIO_CHRG_ERR_LED, - qdev_get_gpio_in(DEVICE(led[2]), 0)); - qdev_connect_gpio_out(scp1, TOSA_GPIO_WLAN_LED, - qdev_get_gpio_in(DEVICE(led[3]), 0)); - - qdev_connect_gpio_out(scp1, TOSA_GPIO_TC6393XB_L3V_ON, tc6393xb_l3v_get(tmio)); - - /* UDC Vbus */ - qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_USB_IN)); -} - -static uint32_t tosa_ssp_tansfer(SSIPeripheral *dev, uint32_t value) -{ - fprintf(stderr, "TG: %u %02x\n", value >> 5, value & 0x1f); - return 0; -} - -static void tosa_ssp_realize(SSIPeripheral *dev, Error **errp) -{ - /* Nothing to do. */ -} - -#define TYPE_TOSA_DAC "tosa_dac" -OBJECT_DECLARE_SIMPLE_TYPE(TosaDACState, TOSA_DAC) - -struct TosaDACState { - I2CSlave parent_obj; - - int len; - char buf[3]; -}; - -static int tosa_dac_send(I2CSlave *i2c, uint8_t data) -{ - TosaDACState *s = TOSA_DAC(i2c); - - s->buf[s->len] = data; - if (s->len ++ > 2) { -#ifdef VERBOSE - fprintf(stderr, "%s: message too long (%i bytes)\n", __func__, s->len); -#endif - return 1; - } - - if (s->len == 2) { - fprintf(stderr, "dac: channel %d value 0x%02x\n", - s->buf[0], s->buf[1]); - } - - return 0; -} - -static int tosa_dac_event(I2CSlave *i2c, enum i2c_event event) -{ - TosaDACState *s = TOSA_DAC(i2c); - - s->len = 0; - switch (event) { - case I2C_START_SEND: - break; - case I2C_START_RECV: - printf("%s: recv not supported!!!\n", __func__); - break; - case I2C_FINISH: -#ifdef VERBOSE - if (s->len < 2) - printf("%s: message too short (%i bytes)\n", __func__, s->len); - if (s->len > 2) - printf("%s: message too long\n", __func__); -#endif - break; - default: - break; - } - - return 0; -} - -static uint8_t tosa_dac_recv(I2CSlave *s) -{ - printf("%s: recv not supported!!!\n", __func__); - return 0xff; -} - -static void tosa_tg_init(PXA2xxState *cpu) -{ - I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]); - i2c_slave_create_simple(bus, TYPE_TOSA_DAC, DAC_BASE); - ssi_create_peripheral(cpu->ssp[1], "tosa-ssp"); -} - - -static struct arm_boot_info tosa_binfo = { - .loader_start = PXA2XX_SDRAM_BASE, - .ram_size = 0x04000000, -}; - -static void tosa_init(MachineState *machine) -{ - MemoryRegion *address_space_mem = get_system_memory(); - MemoryRegion *rom = g_new(MemoryRegion, 1); - PXA2xxState *mpu; - TC6393xbState *tmio; - DeviceState *scp0, *scp1; - - mpu = pxa255_init(tosa_binfo.ram_size); - - memory_region_init_rom(rom, NULL, "tosa.rom", TOSA_ROM, &error_fatal); - memory_region_add_subregion(address_space_mem, 0, rom); - - tmio = tc6393xb_init(address_space_mem, 0x10000000, - qdev_get_gpio_in(mpu->gpio, TOSA_GPIO_TC6393XB_INT)); - - scp0 = sysbus_create_simple("scoop", 0x08800000, NULL); - scp1 = sysbus_create_simple("scoop", 0x14800040, NULL); - - tosa_gpio_setup(mpu, scp0, scp1, tmio); - - tosa_microdrive_attach(mpu); - - tosa_tg_init(mpu); - - tosa_binfo.board_id = 0x208; - arm_load_kernel(mpu->cpu, machine, &tosa_binfo); - sl_bootparam_write(SL_PXA_PARAM_BASE); -} - -static void tosapda_machine_init(MachineClass *mc) -{ - mc->desc = "Sharp SL-6000 (Tosa) PDA (PXA255)"; - mc->init = tosa_init; - mc->block_default_type = IF_IDE; - mc->ignore_memory_transaction_failures = true; - mc->deprecation_reason = "machine is old and unmaintained"; -} - -DEFINE_MACHINE("tosa", tosapda_machine_init) - -static void tosa_dac_class_init(ObjectClass *klass, void *data) -{ - I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - - k->event = tosa_dac_event; - k->recv = tosa_dac_recv; - k->send = tosa_dac_send; -} - -static const TypeInfo tosa_dac_info = { - .name = TYPE_TOSA_DAC, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(TosaDACState), - .class_init = tosa_dac_class_init, -}; - -static void tosa_ssp_class_init(ObjectClass *klass, void *data) -{ - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - - k->realize = tosa_ssp_realize; - k->transfer = tosa_ssp_tansfer; -} - -static const TypeInfo tosa_ssp_info = { - .name = "tosa-ssp", - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(SSIPeripheral), - .class_init = tosa_ssp_class_init, -}; - -static const TypeInfo tosa_misc_gpio_info = { - .name = TYPE_TOSA_MISC_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(TosaMiscGPIOState), - .instance_init = tosa_misc_gpio_init, - /* - * No class init required: device has no internal state so does not - * need to set up reset or vmstate, and has no realize method. - */ -}; - -static void tosa_register_types(void) -{ - type_register_static(&tosa_dac_info); - type_register_static(&tosa_ssp_info); - type_register_static(&tosa_misc_gpio_info); -} - -type_init(tosa_register_types) diff --git a/hw/arm/trace-events b/hw/arm/trace-events index be6c8f720bc..ea1feeefe1f 100644 --- a/hw/arm/trace-events +++ b/hw/arm/trace-events @@ -1,7 +1,16 @@ # See docs/devel/tracing.rst for syntax documentation. +# omap1.c +omap1_pwl_clocking_scheme(const char *scheme) "omap1 CLKM: clocking scheme set to %s" +omap1_pwl_backlight(int output) "omap1 PWL: backlight now at %d/256" +omap1_pwt_buzz(int freq) "omap1 PWT: %dHz buzz on" +omap1_pwt_silence(void) "omap1 PWT: buzzer silenced" +omap1_lpg_led(const char *onoff) "omap1 LPG: LED is %s" + # virt-acpi-build.c virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." +virt_acpi_dsdt_cmdqv(int smmu_id, uint64_t base, uint32_t irq) "DSDT: add cmdqv node for (id=%d), base=%" PRIx64 ", irq=%d" +virt_acpi_iort_smmuv3(int smmu_id, uint64_t base, uint32_t irq_min, uint32_t irq_max) "IORT: add smmuv3 node (id=%d), base=%" PRIx64 ", irq=%d-%d" # smmu-common.c smmu_add_mr(const char *name) "%s" @@ -15,6 +24,8 @@ smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d" smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_vmid_s1(int vmid) "IOTLB invalidate vmid=%d" smmu_iotlb_inv_iova(int asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 +smmu_configs_inv_sid_range(uint32_t start, uint32_t end) "Config cache INV SID range from 0x%x to 0x%x" +smmu_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" smmu_iotlb_lookup_hit(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" smmu_iotlb_lookup_miss(int asid, int vmid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d vmid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" @@ -52,10 +63,19 @@ smmuv3_cmdq_tlbi_nh(int vmid) "vmid=%d" smmuv3_cmdq_tlbi_nsnh(void) "" smmuv3_cmdq_tlbi_nh_asid(int asid) "asid=%d" smmuv3_cmdq_tlbi_s12_vmid(int vmid) "vmid=%d" -smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" smmuv3_inv_notifiers_iova(const char *name, int asid, int vmid, uint64_t iova, uint8_t tg, uint64_t num_pages, int stage) "iommu mr=%s asid=%d vmid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" stage=%d" +smmu_reset_exit(void) "" + +#smmuv3-accel.c +smmuv3_accel_set_iommu_device(int devfn, uint32_t sid) "devfn=0x%x (sid=0x%x)" +smmuv3_accel_unset_iommu_device(int devfn, uint32_t sid) "devfn=0x%x (sid=0x%x)" +smmuv3_accel_install_nested_ste(uint32_t sid, uint64_t ste_1, uint64_t ste_0) "sid=%d ste=%"PRIx64":%"PRIx64 + +# tegra241-cmdqv +tegra241_cmdqv_init(uint32_t param) "hw info received. param: 0x%04X" +tegra241_cmdqv_err_map(uint32_t map3, uint32_t map2, uint32_t map1, uint32_t map0) "hw irq received. error (hex) maps: %04X:%04X:%04X:%04X" # strongarm.c strongarm_uart_update_parameters(const char *label, int speed, char parity, int data_bits, int stop_bits) "%s speed=%d parity=%c data=%d stop=%d" @@ -68,10 +88,5 @@ z2_aer915_send_too_long(int8_t msg) "message too long (%i bytes)" z2_aer915_send(uint8_t reg, uint8_t value) "reg %d value 0x%02x" z2_aer915_event(int8_t event, int8_t len) "i2c event =0x%x len=%d bytes" -# xen_arm.c -xen_create_virtio_mmio_devices(int i, int irq, uint64_t base) "Created virtio-mmio device %d: irq %d base 0x%"PRIx64 -xen_init_ram(uint64_t machine_ram_size) "Initialized xen ram with size 0x%"PRIx64 -xen_enable_tpm(uint64_t addr) "Connected tpmdev at address 0x%"PRIx64 - # bcm2838.c bcm2838_gic_set_irq(int irq, int level) "gic irq:%d lvl:%d" diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index d48235453e4..5cf1a70d10d 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -14,7 +14,7 @@ #include "hw/arm/boot.h" #include "hw/net/smc91c111.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/pci/pci.h" #include "hw/i2c/i2c.h" #include "hw/i2c/arm_sbcon_i2c.h" @@ -27,6 +27,7 @@ #include "qom/object.h" #include "audio/audio.h" #include "target/arm/cpu-qom.h" +#include "qemu/log.h" #define VERSATILE_FLASH_ADDR 0x34000000 #define VERSATILE_FLASH_SIZE (64 * 1024 * 1024) @@ -110,7 +111,8 @@ static uint64_t vpb_sic_read(void *opaque, hwaddr offset, case 8: /* PICENABLE */ return s->pic_enable; default: - printf ("vpb_sic_read: Bad register offset 0x%x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "vpb_sic_read: Bad register offset 0x%x\n", (int)offset); return 0; } } @@ -144,7 +146,8 @@ static void vpb_sic_write(void *opaque, hwaddr offset, vpb_sic_update_pic(s); break; default: - printf ("vpb_sic_write: Bad register offset 0x%x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "vpb_sic_write: Bad register offset 0x%x\n", (int)offset); return; } vpb_sic_update(s); @@ -409,7 +412,7 @@ static void vab_init(MachineState *machine) versatile_init(machine, 0x25e); } -static void versatilepb_class_init(ObjectClass *oc, void *data) +static void versatilepb_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -419,6 +422,7 @@ static void versatilepb_class_init(ObjectClass *oc, void *data) mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); mc->default_ram_id = "versatile.ram"; + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } @@ -429,7 +433,7 @@ static const TypeInfo versatilepb_type = { .class_init = versatilepb_class_init, }; -static void versatileab_class_init(ObjectClass *oc, void *data) +static void versatileab_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -439,6 +443,7 @@ static void versatileab_class_init(ObjectClass *oc, void *data) mc->ignore_memory_transaction_failures = true; mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); mc->default_ram_id = "versatile.ram"; + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); } @@ -457,7 +462,7 @@ static void versatile_machine_init(void) type_init(versatile_machine_init) -static void vpb_sic_class_init(ObjectClass *klass, void *data) +static void vpb_sic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c index de815d84cc6..35f8d05ea17 100644 --- a/hw/arm/vexpress.c +++ b/hw/arm/vexpress.c @@ -30,11 +30,11 @@ #include "hw/net/lan9118.h" #include "hw/i2c/i2c.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/block/flash.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "qemu/error-report.h" #include #include "hw/char/pl011.h" @@ -42,7 +42,7 @@ #include "hw/cpu/a15mpcore.h" #include "hw/i2c/arm_sbcon_i2c.h" #include "hw/sd/sd.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qom/object.h" #include "audio/audio.h" #include "target/arm/cpu-qom.h" @@ -51,6 +51,8 @@ #define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024) #define VEXPRESS_FLASH_SECT_SIZE (256 * 1024) +#define GIC_EXT_IRQS 64 /* Versatile Express A9 development board */ + /* Number of virtio transports to create (0..8; limited by * number of available IRQ lines). */ @@ -241,6 +243,7 @@ static void init_cpus(MachineState *ms, const char *cpu_type, */ dev = qdev_new(privdev); qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); + qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, periphbase); @@ -251,7 +254,7 @@ static void init_cpus(MachineState *ms, const char *cpu_type, * external interrupts starting from 32 (because there * are internal interrupts 0..31). */ - for (n = 0; n < 64; n++) { + for (n = 0; n < GIC_EXT_IRQS; n++) { pic[n] = qdev_get_gpio_in(dev, n); } @@ -543,7 +546,7 @@ static void vexpress_common_init(MachineState *machine) VexpressMachineClass *vmc = VEXPRESS_MACHINE_GET_CLASS(machine); VEDBoardInfo *daughterboard = vmc->daughterboard; DeviceState *dev, *sysctl, *pl041; - qemu_irq pic[64]; + qemu_irq pic[GIC_EXT_IRQS]; uint32_t sys_id; DriveInfo *dinfo; PFlashCFI01 *pflash0; @@ -774,7 +777,7 @@ static void vexpress_a9_instance_init(Object *obj) vms->virt = false; } -static void vexpress_class_init(ObjectClass *oc, void *data) +static void vexpress_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -792,7 +795,7 @@ static void vexpress_class_init(ObjectClass *oc, void *data) "Security Extensions (TrustZone)"); } -static void vexpress_a9_class_init(ObjectClass *oc, void *data) +static void vexpress_a9_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a9"), @@ -803,11 +806,12 @@ static void vexpress_a9_class_init(ObjectClass *oc, void *data) mc->desc = "ARM Versatile Express for Cortex-A9"; mc->valid_cpu_types = valid_cpu_types; + mc->auto_create_sdcard = true; vmc->daughterboard = &a9_daughterboard; } -static void vexpress_a15_class_init(ObjectClass *oc, void *data) +static void vexpress_a15_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a15"), @@ -818,6 +822,7 @@ static void vexpress_a15_class_init(ObjectClass *oc, void *data) mc->desc = "ARM Versatile Express for Cortex-A15"; mc->valid_cpu_types = valid_cpu_types; + mc->auto_create_sdcard = true; vmc->daughterboard = &a15_daughterboard; diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index e10cad86dd7..05266af3ea0 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -34,15 +34,19 @@ #include "hw/core/cpu.h" #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" +#include "hw/acpi/pcihp.h" #include "hw/nvram/fw_cfg_acpi.h" #include "hw/acpi/bios-linker-loader.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" +#include "hw/acpi/cxl.h" #include "hw/acpi/memory_hotplug.h" #include "hw/acpi/generic_event_device.h" #include "hw/acpi/tpm.h" #include "hw/acpi/hmat.h" +#include "hw/arm/smmuv3.h" +#include "hw/cxl/cxl.h" #include "hw/pci/pcie_host.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" @@ -51,13 +55,12 @@ #include "hw/intc/arm_gicv3_its_common.h" #include "hw/mem/nvdimm.h" #include "hw/platform-bus.h" -#include "sysemu/numa.h" -#include "sysemu/reset.h" -#include "sysemu/tpm.h" +#include "system/numa.h" +#include "system/reset.h" +#include "system/tpm.h" #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" -#include "hw/acpi/acpi_generic_initiator.h" #include "hw/virtio/virtio-acpi.h" #include "target/arm/multiprocessing.h" @@ -120,23 +123,67 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) aml_append(scope, dev); } +static void build_acpi0017(Aml *table) +{ + Aml *dev, *scope, *method; + + scope = aml_scope("_SB"); + dev = aml_device("CXLM"); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017"))); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0B))); + aml_append(dev, method); + build_cxl_dsm_method(dev); + + aml_append(scope, dev); + aml_append(table, scope); +} + static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, uint32_t irq, VirtMachineState *vms) { int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam); + bool cxl_present = false; + PCIBus *bus = vms->bus; + bool acpi_pcihp = false; + + if (vms->acpi_dev) { + acpi_pcihp = object_property_get_bool(OBJECT(vms->acpi_dev), + ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, + NULL); + } + struct GPEXConfig cfg = { .mmio32 = memmap[VIRT_PCIE_MMIO], .pio = memmap[VIRT_PCIE_PIO], .ecam = memmap[ecam_id], .irq = irq, .bus = vms->bus, + .pci_native_hotplug = !acpi_pcihp, }; + /* + * Accel SMMU requires RMRs for MSI 1-1 mapping, which + * require _DSM for preserving PCI Boot Configurations + */ + if (vms->pci_preserve_config) { + cfg.preserve_config = true; + } + if (vms->highmem_mmio) { cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO]; } acpi_dsdt_add_gpex(scope, &cfg); + QLIST_FOREACH(bus, &vms->bus->child, sibling) { + if (pci_bus_is_cxl(bus)) { + cxl_present = true; + } + } + if (cxl_present) { + build_acpi0017(scope); + } } static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap, @@ -154,10 +201,10 @@ static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap, aml_append(dev, aml_name_decl("_CRS", crs)); Aml *aei = aml_resource_template(); - /* Pin 3 for power button */ - const uint32_t pin_list[1] = {3}; + + const uint32_t pin = GPIO_PIN_POWER_BUTTON; aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH, - AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1, + AML_EXCLUSIVE, AML_PULL_UP, 0, &pin, 1, "GPO0", NULL, 0)); aml_append(dev, aml_name_decl("_AEI", aei)); @@ -217,7 +264,8 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms) * Note that @id_count gets internally subtracted by one, following the spec. */ static void build_iort_id_mapping(GArray *table_data, uint32_t input_base, - uint32_t id_count, uint32_t out_ref) + uint32_t id_count, uint32_t out_ref, + uint32_t flags) { build_append_int_noprefix(table_data, input_base, 4); /* Input base */ /* Number of IDs - The number of IDs in the range minus one */ @@ -225,7 +273,7 @@ static void build_iort_id_mapping(GArray *table_data, uint32_t input_base, build_append_int_noprefix(table_data, input_base, 4); /* Output base */ build_append_int_noprefix(table_data, out_ref, 4); /* Output Reference */ /* Flags */ - build_append_int_noprefix(table_data, 0 /* Single mapping (disabled) */, 4); + build_append_int_noprefix(table_data, flags, 4); } struct AcpiIortIdMapping { @@ -267,41 +315,123 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b) return idmap_a->input_base - idmap_b->input_base; } +typedef struct AcpiIortSMMUv3Dev { + Object *obj; + int irq; + hwaddr base; + GArray *rc_smmu_idmaps; + /* Offset of the SMMUv3 IORT Node relative to the start of the IORT */ + size_t offset; + bool accel; + bool ats; +} AcpiIortSMMUv3Dev; + /* - * Input Output Remapping Table (IORT) - * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049E.b, Feb 2021 + * Populate the struct AcpiIortSMMUv3Dev for the legacy SMMUv3 and + * return the total number of associated idmaps. */ -static void -build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +static int populate_smmuv3_legacy_dev(GArray *sdev_blob) { - int i, nb_nodes, rc_mapping_count; - size_t node_size, smmu_offset = 0; - AcpiIortIdMapping *idmap; - uint32_t id = 0; - GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); - GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + AcpiIortSMMUv3Dev sdev; - AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id, - .oem_table_id = vms->oem_table_id }; - /* Table 2 The IORT */ - acpi_table_begin(&table, table_data); + sdev.rc_smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + object_child_foreach_recursive(object_get_root(), iort_host_bridges, + sdev.rc_smmu_idmaps); + /* + * There can be only one legacy SMMUv3("iommu=smmuv3") as it is a machine + * wide one. Since it may cover multiple PCIe RCs(based on "bypass_iommu" + * property), may have multiple SMMUv3 idmaps. Sort it by input_base. + */ + g_array_sort(sdev.rc_smmu_idmaps, iort_idmap_compare); - if (vms->iommu == VIRT_IOMMU_SMMUV3) { - AcpiIortIdMapping next_range = {0}; + sdev.base = vms->memmap[VIRT_SMMU].base; + sdev.irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; + g_array_append_val(sdev_blob, sdev); + return sdev.rc_smmu_idmaps->len; +} - object_child_foreach_recursive(object_get_root(), - iort_host_bridges, smmu_idmaps); +static int smmuv3_dev_idmap_compare(gconstpointer a, gconstpointer b) +{ + AcpiIortSMMUv3Dev *sdev_a = (AcpiIortSMMUv3Dev *)a; + AcpiIortSMMUv3Dev *sdev_b = (AcpiIortSMMUv3Dev *)b; + AcpiIortIdMapping *map_a = &g_array_index(sdev_a->rc_smmu_idmaps, + AcpiIortIdMapping, 0); + AcpiIortIdMapping *map_b = &g_array_index(sdev_b->rc_smmu_idmaps, + AcpiIortIdMapping, 0); + return map_a->input_base - map_b->input_base; +} - /* Sort the smmu idmap by input_base */ - g_array_sort(smmu_idmaps, iort_idmap_compare); +static int iort_smmuv3_devices(Object *obj, void *opaque) +{ + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + GArray *sdev_blob = opaque; + AcpiIortIdMapping idmap; + PlatformBusDevice *pbus; + AcpiIortSMMUv3Dev sdev; + int min_bus, max_bus; + SysBusDevice *sbdev; + PCIBus *bus; + + if (!object_dynamic_cast(obj, TYPE_ARM_SMMUV3)) { + return 0; + } + bus = PCI_BUS(object_property_get_link(obj, "primary-bus", &error_abort)); + sdev.accel = object_property_get_bool(obj, "accel", &error_abort); + sdev.ats = object_property_get_bool(obj, "ats", &error_abort); + pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + sbdev = SYS_BUS_DEVICE(obj); + sdev.base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + sdev.base += vms->memmap[VIRT_PLATFORM_BUS].base; + sdev.irq = platform_bus_get_irqn(pbus, sbdev, 0); + sdev.irq += vms->irqmap[VIRT_PLATFORM_BUS]; + sdev.irq += ARM_SPI_BASE; + sdev.obj = obj; + + pci_bus_range(bus, &min_bus, &max_bus); + sdev.rc_smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + idmap.input_base = min_bus << 8, + idmap.id_count = (max_bus - min_bus + 1) << 8, + g_array_append_val(sdev.rc_smmu_idmaps, idmap); + g_array_append_val(sdev_blob, sdev); + return 0; +} + +/* + * Populate the struct AcpiIortSMMUv3Dev for all SMMUv3 devices and + * return the total number of idmaps. + */ +static int populate_smmuv3_dev(GArray *sdev_blob) +{ + object_child_foreach_recursive(object_get_root(), + iort_smmuv3_devices, sdev_blob); + /* Sort the smmuv3 devices(if any) by smmu idmap input_base */ + g_array_sort(sdev_blob, smmuv3_dev_idmap_compare); + /* + * Since each SMMUv3 dev is assocaited with specific host bridge, + * total number of idmaps equals to total number of smmuv3 devices. + */ + return sdev_blob->len; +} + +/* Compute ID ranges (RIDs) from RC that are directed to the ITS Group node */ +static void create_rc_its_idmaps(GArray *its_idmaps, GArray *smmuv3_devs) +{ + AcpiIortIdMapping *idmap; + AcpiIortIdMapping next_range = {0}; + AcpiIortSMMUv3Dev *sdev; + + for (int i = 0; i < smmuv3_devs->len; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); /* - * Split the whole RIDs by mapping from RC to SMMU, - * build the ID mapping from RC to ITS directly. + * Based on the RID ranges that are directed to the SMMU, determine the + * bypassed RID ranges, i.e., the ones that are directed to the ITS + * Group node and do not pass through the SMMU, by subtracting the + * SMMU-bound ranges from the full RID range (0x0000–0xFFFF). */ - for (i = 0; i < smmu_idmaps->len; i++) { - idmap = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); + for (int j = 0; j < sdev->rc_smmu_idmaps->len; j++) { + idmap = &g_array_index(sdev->rc_smmu_idmaps, AcpiIortIdMapping, j); if (next_range.input_base < idmap->input_base) { next_range.id_count = idmap->input_base - next_range.input_base; @@ -310,18 +440,133 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) next_range.input_base = idmap->input_base + idmap->id_count; } + } + /* + * Append the last RC -> ITS ID mapping. + * + * RIDs are 16-bit, according to the PCI Express 2.0 Base Specification, rev + * 0.9, section 2.2.6.2, "Transaction Descriptor - Transaction ID Field", + * hence the end of the range is 0x10000. + */ + if (next_range.input_base < 0x10000) { + next_range.id_count = 0x10000 - next_range.input_base; + g_array_append_val(its_idmaps, next_range); + } +} - /* Append the last RC -> ITS ID mapping */ - if (next_range.input_base < 0x10000) { - next_range.id_count = 0x10000 - next_range.input_base; - g_array_append_val(its_idmaps, next_range); +static void +build_iort_rmr_nodes(GArray *table_data, GArray *smmuv3_devices, uint32_t *id) +{ + AcpiIortSMMUv3Dev *sdev; + AcpiIortIdMapping *idmap; + int i; + + for (i = 0; i < smmuv3_devices->len; i++) { + sdev = &g_array_index(smmuv3_devices, AcpiIortSMMUv3Dev, i); + idmap = &g_array_index(sdev->rc_smmu_idmaps, AcpiIortIdMapping, 0); + int bdf = idmap->input_base; + + if (!sdev->accel) { + continue; } - nb_nodes = 3; /* RC, ITS, SMMUv3 */ - rc_mapping_count = smmu_idmaps->len + its_idmaps->len; + /* Table 18 Reserved Memory Range Node */ + build_append_int_noprefix(table_data, 6 /* RMR */, 1); /* Type */ + /* Length */ + build_append_int_noprefix(table_data, 28 + ID_MAPPING_ENTRY_SIZE + 20, + 2); + build_append_int_noprefix(table_data, 3, 1); /* Revision */ + build_append_int_noprefix(table_data, *id, 4); /* Identifier */ + /* Number of ID mappings */ + build_append_int_noprefix(table_data, 1, 4); + /* Reference to ID Array */ + build_append_int_noprefix(table_data, 28, 4); + + /* RMR specific data */ + + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Disallow remapping */, 4); + /* Number of Memory Range Descriptors */ + build_append_int_noprefix(table_data, 1 , 4); + /* Reference to Memory Range Descriptors */ + build_append_int_noprefix(table_data, 28 + ID_MAPPING_ENTRY_SIZE, 4); + build_iort_id_mapping(table_data, bdf, idmap->id_count, sdev->offset, + 1); + + /* Table 19 Memory Range Descriptor */ + + /* Physical Range offset */ + build_append_int_noprefix(table_data, 0x8000000, 8); + /* Physical Range length */ + build_append_int_noprefix(table_data, 0x100000, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + *id += 1; + } +} + +/* + * Input Output Remapping Table (IORT) + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049E.b, Feb 2021 + */ +static void +build_iort(GArray *table_data, AcpiBuildTables *tables, VirtMachineState *vms) +{ + GArray *smmuv3_devs = tables->smmuv3_devs; + BIOSLinker *linker = tables->linker; + int i, nb_nodes, rc_mapping_count; + AcpiIortSMMUv3Dev *sdev; + size_t node_size; + bool ats_needed = false; + int num_smmus = 0; + uint32_t id = 0; + int rc_smmu_idmaps_len = 0; + GArray *rc_its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + + AcpiTable table = { .sig = "IORT", .rev = 5, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + /* Table 2 The IORT */ + acpi_table_begin(&table, table_data); + + if (vms->legacy_smmuv3_present) { + rc_smmu_idmaps_len = populate_smmuv3_legacy_dev(smmuv3_devs); + } else { + rc_smmu_idmaps_len = smmuv3_devs->len; + } + + num_smmus = smmuv3_devs->len; + if (num_smmus) { + nb_nodes = num_smmus + 1; /* RC and SMMUv3 */ + rc_mapping_count = rc_smmu_idmaps_len; + + if (vms->its) { + /* + * Knowing the ID ranges from the RC to the SMMU, it's possible to + * determine the ID ranges from RC that go directly to ITS. + */ + create_rc_its_idmaps(rc_its_idmaps, smmuv3_devs); + + nb_nodes++; /* ITS */ + rc_mapping_count += rc_its_idmaps->len; + } + /* Calculate RMR nodes required. One per SMMUv3 with accelerated mode */ + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + if (sdev->ats) { + ats_needed = true; + } + if (sdev->accel) { + nb_nodes++; + } + } } else { - nb_nodes = 2; /* RC, ITS */ - rc_mapping_count = 1; + if (vms->its) { + nb_nodes = 2; /* RC and ITS */ + rc_mapping_count = 1; /* Direct map to ITS */ + } else { + nb_nodes = 1; /* RC only */ + rc_mapping_count = 0; /* No output mapping */ + } } /* Number of IORT Nodes */ build_append_int_noprefix(table_data, nb_nodes, 4); @@ -330,33 +575,47 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, IORT_NODE_OFFSET, 4); build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - /* Table 12 ITS Group Format */ - build_append_int_noprefix(table_data, 0 /* ITS Group */, 1); /* Type */ - node_size = 20 /* fixed header size */ + 4 /* 1 GIC ITS Identifier */; - build_append_int_noprefix(table_data, node_size, 2); /* Length */ - build_append_int_noprefix(table_data, 1, 1); /* Revision */ - build_append_int_noprefix(table_data, id++, 4); /* Identifier */ - build_append_int_noprefix(table_data, 0, 4); /* Number of ID mappings */ - build_append_int_noprefix(table_data, 0, 4); /* Reference to ID Array */ - build_append_int_noprefix(table_data, 1, 4); /* Number of ITSs */ - /* GIC ITS Identifier Array */ - build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); - - if (vms->iommu == VIRT_IOMMU_SMMUV3) { - int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; + if (vms->its) { + /* Table 12 ITS Group Format */ + build_append_int_noprefix(table_data, 0 /* ITS Group */, 1); /* Type */ + node_size = 20 /* fixed header size */ + 4 /* 1 GIC ITS Identifier */; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 0, 4); /* Number of ID mappings */ + build_append_int_noprefix(table_data, 0, 4); /* Reference to ID Array */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ITSs */ + /* GIC ITS Identifier Array */ + build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); + } - smmu_offset = table_data->len - table.table_offset; + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + int smmu_mapping_count, offset_to_id_array; + int irq = sdev->irq; + + if (vms->its) { + smmu_mapping_count = 1; /* ITS Group node */ + offset_to_id_array = SMMU_V3_ENTRY_SIZE; /* Just after the header */ + } else { + smmu_mapping_count = 0; /* No ID mappings */ + offset_to_id_array = 0; /* No ID mappings array */ + } + sdev->offset = table_data->len - table.table_offset; + trace_virt_acpi_iort_smmuv3(id, sdev->base, irq, irq + 3); /* Table 9 SMMUv3 Format */ build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */ - node_size = SMMU_V3_ENTRY_SIZE + ID_MAPPING_ENTRY_SIZE; + node_size = SMMU_V3_ENTRY_SIZE + + (ID_MAPPING_ENTRY_SIZE * smmu_mapping_count); build_append_int_noprefix(table_data, node_size, 2); /* Length */ build_append_int_noprefix(table_data, 4, 1); /* Revision */ build_append_int_noprefix(table_data, id++, 4); /* Identifier */ - build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ + /* Number of ID mappings */ + build_append_int_noprefix(table_data, smmu_mapping_count, 4); /* Reference to ID Array */ - build_append_int_noprefix(table_data, SMMU_V3_ENTRY_SIZE, 4); + build_append_int_noprefix(table_data, offset_to_id_array, 4); /* Base address */ - build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8); + build_append_int_noprefix(table_data, sdev->base, 8); /* Flags */ build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4); build_append_int_noprefix(table_data, 0, 4); /* Reserved */ @@ -370,9 +629,11 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0, 4); /* Proximity domain */ /* DeviceID mapping index (ignored since interrupts are GSIV based) */ build_append_int_noprefix(table_data, 0, 4); - - /* output IORT node is the ITS group node (the first node) */ - build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET); + /* Array of ID mappings */ + if (smmu_mapping_count) { + /* Output IORT node is the ITS Group node (the first node). */ + build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET, 0); + } } /* Table 17 Root Complex Node */ @@ -394,8 +655,8 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0, 2); /* Reserved */ /* Table 15 Memory Access Flags */ build_append_int_noprefix(table_data, 0x3 /* CCA = CPM = DACS = 1 */, 1); - - build_append_int_noprefix(table_data, 0, 4); /* ATS Attribute */ + /* ATS Attribute */ + build_append_int_noprefix(table_data, (ats_needed ? 1 : 0), 4); /* MCFG pci_segment */ build_append_int_noprefix(table_data, 0, 4); /* PCI Segment number */ @@ -405,32 +666,57 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0, 3); /* Reserved */ /* Output Reference */ - if (vms->iommu == VIRT_IOMMU_SMMUV3) { + if (num_smmus) { AcpiIortIdMapping *range; - /* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */ - for (i = 0; i < smmu_idmaps->len; i++) { - range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); - /* output IORT node is the smmuv3 node */ - build_iort_id_mapping(table_data, range->input_base, - range->id_count, smmu_offset); + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + + /* + * Map RIDs (input) from RC to SMMUv3 nodes: RC -> SMMUv3. + * + * N.B.: The mapping from SMMUv3 to ITS Group node (SMMUv3 -> ITS) + * is defined in the SMMUv3 table, where all SMMUv3 IDs are mapped + * to the ITS Group node, if ITS is available. + */ + for (int j = 0; j < sdev->rc_smmu_idmaps->len; j++) { + range = &g_array_index(sdev->rc_smmu_idmaps, + AcpiIortIdMapping, j); + /* Output IORT node is the SMMUv3 node. */ + build_iort_id_mapping(table_data, range->input_base, + range->id_count, sdev->offset, 0); + } } - /* bypassed RIDs connect to ITS group node directly: RC -> ITS */ - for (i = 0; i < its_idmaps->len; i++) { - range = &g_array_index(its_idmaps, AcpiIortIdMapping, i); - /* output IORT node is the ITS group node (the first node) */ - build_iort_id_mapping(table_data, range->input_base, - range->id_count, IORT_NODE_OFFSET); + if (vms->its) { + /* + * Map bypassed (don't go through the SMMU) RIDs (input) to + * ITS Group node directly: RC -> ITS. + */ + for (i = 0; i < rc_its_idmaps->len; i++) { + range = &g_array_index(rc_its_idmaps, AcpiIortIdMapping, i); + /* Output IORT node is the ITS Group node (the first node). */ + build_iort_id_mapping(table_data, range->input_base, + range->id_count, IORT_NODE_OFFSET, 0); + } } } else { - /* output IORT node is the ITS group node (the first node) */ - build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET); + /* + * Map all RIDs (input) to ITS Group node directly, since there is no + * SMMU: RC -> ITS. + * Output IORT node is the ITS Group node (the first node). + */ + build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET, 0); } + build_iort_rmr_nodes(table_data, smmuv3_devs, &id); acpi_table_end(linker, &table); - g_array_free(smmu_idmaps, true); - g_array_free(its_idmaps, true); + g_array_free(rc_its_idmaps, true); + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + g_array_free(sdev->rc_smmu_idmaps, true); + } + g_array_free(smmuv3_devs, true); } /* @@ -464,8 +750,12 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) .pci_flags = 0, .pci_segment = 0, }; - - build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id); + /* + * Passing NULL as the SPCR Table for Revision 2 doesn't support + * NameSpaceString. + */ + build_spcr(table_data, linker, &serial, 2, vms->oem_id, vms->oem_table_id, + NULL); } /* @@ -511,7 +801,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } } - build_srat_generic_pci_initiator(table_data); + build_srat_generic_affinity_structures(table_data); if (ms->nvdimms_state->is_enabled) { nvdimm_build_srat(table_data); @@ -534,15 +824,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) static void build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); /* * Table 5-117 Flag Definitions * set only "Timer interrupt Mode" and assume "Timer Interrupt * polarity" bit as '0: Interrupt is Active high' */ - uint32_t irqflags = vmc->claim_edge_triggered_timers ? - 1 : /* Interrupt is Edge triggered */ - 0; /* Interrupt is Level triggered */ + const uint32_t irqflags = 0; /* Interrupt is Level triggered */ AcpiTable table = { .sig = "GTDT", .rev = 3, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; @@ -667,7 +954,6 @@ static void build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { int i; - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); const MemMapEntry *memmap = vms->memmap; AcpiTable table = { .sig = "APIC", .rev = 4, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; @@ -738,7 +1024,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) memmap[VIRT_HIGH_GIC_REDIST2].size); } - if (its_class_name() && !vmc->no_its) { + if (vms->its) { /* * ACPI spec, Revision 6.0 Errata A * (original 6.0 definition has invalid Length) @@ -800,17 +1086,75 @@ static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker, build_fadt(table_data, linker, &fadt, vms->oem_id, vms->oem_table_id); } +#define SMMU_CMDQV_IO_LEN 0x50000 + +static int acpi_dsdt_add_cmdqv(Aml *scope, GArray *smmuv3_devs) +{ + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < smmuv3_devs->len; i++) { + uint32_t identifier = i; + AcpiIortSMMUv3Dev *sdev; + PlatformBusDevice *pbus; + Aml *dev, *crs, *addr; + SysBusDevice *sbdev; + uint32_t irq; + hwaddr base; + + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + if (!object_property_get_bool(sdev->obj, "cmdqv", &error_abort)) { + continue; + } + + sbdev = SYS_BUS_DEVICE(sdev->obj); + pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + base = platform_bus_get_mmio_addr(pbus, sbdev, 1); + base += vms->memmap[VIRT_PLATFORM_BUS].base; + irq = platform_bus_get_irqn(pbus, sbdev, NUM_SMMU_IRQS); + irq += vms->irqmap[VIRT_PLATFORM_BUS]; + irq += ARM_SPI_BASE; + + dev = aml_device("CV%.02u", identifier); + aml_append(dev, aml_name_decl("_HID", aml_string("NVDA200C"))); + if (vms->its) { + identifier++; + } + aml_append(dev, aml_name_decl("_UID", aml_int(identifier))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + crs = aml_resource_template(); + addr = aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, 0x0, base, + base + SMMU_CMDQV_IO_LEN - 0x1, 0x0, + SMMU_CMDQV_IO_LEN); + aml_append(crs, addr); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); + + trace_virt_acpi_dsdt_cmdqv(identifier, base, irq); + } + + return 0; +} + /* DSDT */ static void -build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +build_dsdt(GArray *table_data, AcpiBuildTables *tables, VirtMachineState *vms) { VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + GArray *smmuv3_devs = tables->smmuv3_devs; + BIOSLinker *linker = tables->linker; Aml *scope, *dsdt; MachineState *ms = MACHINE(vms); const MemMapEntry *memmap = vms->memmap; const int *irqmap = vms->irqmap; AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; + Aml *pci0_scope; acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); @@ -862,8 +1206,37 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) acpi_dsdt_add_tpm(scope, vms); #endif + acpi_dsdt_add_cmdqv(scope, smmuv3_devs); + aml_append(dsdt, scope); + pci0_scope = aml_scope("\\_SB.PCI0"); + + aml_append(pci0_scope, build_pci_bridge_edsm()); + build_append_pci_bus_devices(pci0_scope, vms->bus); + if (object_property_find(OBJECT(vms->bus), ACPI_PCIHP_PROP_BSEL)) { + build_append_pcihp_slots(pci0_scope, vms->bus); + } + + if (vms->acpi_dev) { + bool acpi_pcihp; + + acpi_pcihp = object_property_get_bool(OBJECT(vms->acpi_dev), + ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, + NULL); + + if (acpi_pcihp) { + build_acpi_pci_hotplug(dsdt, AML_SYSTEM_MEMORY, + memmap[VIRT_ACPI_PCIHP].base); + build_append_pcihp_resources(pci0_scope, + memmap[VIRT_ACPI_PCIHP].base, + memmap[VIRT_ACPI_PCIHP].size); + + build_append_notification_callback(pci0_scope, vms->bus); + } + } + aml_append(dsdt, pci0_scope); + /* copy AML table into ACPI tables blob */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); @@ -890,6 +1263,16 @@ static void acpi_align_size(GArray *blob, unsigned align) g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); } +/* Prepare structures that will be shared between ACPI tables */ +static void virt_acpi_prebuild(VirtMachineState *vms, AcpiBuildTables *tables) +{ + if (!vms->legacy_smmuv3_present) { + tables->smmuv3_devs = g_array_new(false, true, + sizeof(AcpiIortSMMUv3Dev)); + populate_smmuv3_dev(tables->smmuv3_devs); + } +} + static void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) { @@ -899,6 +1282,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) GArray *tables_blob = tables->table_data; MachineState *ms = MACHINE(vms); + virt_acpi_prebuild(vms, tables); table_offsets = g_array_new(false, true /* clear */, sizeof(uint32_t)); @@ -908,7 +1292,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) /* DSDT is pointed to by FADT */ dsdt = tables_blob->len; - build_dsdt(tables_blob, tables->linker, vms); + build_dsdt(tables_blob, tables, vms); /* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */ acpi_add_table(table_offsets, tables_blob); @@ -937,16 +1321,18 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) } acpi_add_table(table_offsets, tables_blob); - spcr_setup(tables_blob, tables->linker, vms); + + if (ms->acpi_spcr_enabled) { + spcr_setup(tables_blob, tables->linker, vms); + } acpi_add_table(table_offsets, tables_blob); build_dbg2(tables_blob, tables->linker, vms); if (vms->ras) { - build_ghes_error_table(tables->hardware_errors, tables->linker); acpi_add_table(table_offsets, tables_blob); - acpi_build_hest(tables_blob, tables->linker, vms->oem_id, - vms->oem_table_id); + acpi_build_hest(tables_blob, tables->hardware_errors, tables->linker, + vms->oem_id, vms->oem_table_id); } if (ms->numa_state->num_nodes > 0) { @@ -965,16 +1351,19 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) } } + if (vms->cxl_devices_state.is_enabled) { + cxl_build_cedt(table_offsets, tables_blob, tables->linker, + vms->oem_id, vms->oem_table_id, &vms->cxl_devices_state); + } + if (ms->nvdimms_state->is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, ms->nvdimms_state, ms->ram_slots, vms->oem_id, vms->oem_table_id); } - if (its_class_name() && !vmc->no_its) { - acpi_add_table(table_offsets, tables_blob); - build_iort(tables_blob, tables->linker, vms); - } + acpi_add_table(table_offsets, tables_blob); + build_iort(tables_blob, tables, vms); #ifdef CONFIG_TPM if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 719e83e6a1e..418ed77debf 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -42,20 +42,23 @@ #include "hw/vfio/vfio-amd-xgbe.h" #include "hw/display/ramfb.h" #include "net/net.h" -#include "sysemu/device_tree.h" -#include "sysemu/numa.h" -#include "sysemu/runstate.h" -#include "sysemu/tpm.h" -#include "sysemu/tcg.h" -#include "sysemu/kvm.h" -#include "sysemu/hvf.h" -#include "sysemu/qtest.h" +#include "system/device_tree.h" +#include "system/numa.h" +#include "system/runstate.h" +#include "system/tpm.h" +#include "system/tcg.h" +#include "system/kvm.h" +#include "system/hvf.h" +#include "system/qtest.h" #include "hw/loader.h" #include "qapi/error.h" #include "qemu/bitops.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "hw/pci/pci_bus.h" #include "hw/pci-host/gpex.h" +#include "hw/pci-bridge/pci_expander_bridge.h" #include "hw/virtio/virtio-pci.h" #include "hw/core/sysbus-fdt.h" #include "hw/platform-bus.h" @@ -66,13 +69,15 @@ #include "hw/intc/arm_gicv3_its_common.h" #include "hw/irq.h" #include "kvm_arm.h" +#include "hvf_arm.h" #include "hw/firmware/smbios.h" #include "qapi/visitor.h" #include "qapi/qapi-visit-common.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "standard-headers/linux/input.h" #include "hw/arm/smmuv3.h" #include "hw/acpi/acpi.h" +#include "hw/acpi/pcihp.h" #include "target/arm/cpu-qom.h" #include "target/arm/internals.h" #include "target/arm/multiprocessing.h" @@ -80,9 +85,12 @@ #include "hw/mem/pc-dimm.h" #include "hw/mem/nvdimm.h" #include "hw/acpi/generic_event_device.h" +#include "hw/uefi/var-service-api.h" #include "hw/virtio/virtio-md-pci.h" #include "hw/virtio/virtio-iommu.h" #include "hw/char/pl011.h" +#include "hw/cxl/cxl.h" +#include "hw/cxl/cxl_host.h" #include "qemu/guest-random.h" static GlobalProperty arm_virt_compat[] = { @@ -104,7 +112,7 @@ static void arm_virt_compat_set(MachineClass *mc) #define DEFINE_VIRT_MACHINE_IMPL(latest, ...) \ static void MACHINE_VER_SYM(class_init, virt, __VA_ARGS__)( \ ObjectClass *oc, \ - void *data) \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ arm_virt_compat_set(mc); \ @@ -143,6 +151,10 @@ static void arm_virt_compat_set(MachineClass *mc) #define LEGACY_RAMLIMIT_GB 255 #define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB) +/* MMIO region size for SMMUv3 */ +#define SMMU_IO_LEN 0x20000 +#define SMMU_CMDQV_IO_LEN 0x50000 + /* Addresses and sizes of our components. * 0..128MB is space for a flash device so we can run bootrom code such as UEFI. * 128MB..256MB is used for miscellaneous device I/O. @@ -174,12 +186,13 @@ static const MemMapEntry base_memmap[] = { [VIRT_FW_CFG] = { 0x09020000, 0x00000018 }, [VIRT_GPIO] = { 0x09030000, 0x00001000 }, [VIRT_UART1] = { 0x09040000, 0x00001000 }, - [VIRT_SMMU] = { 0x09050000, 0x00020000 }, + [VIRT_SMMU] = { 0x09050000, SMMU_IO_LEN }, [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN }, [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN }, [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN}, [VIRT_PVTIME] = { 0x090a0000, 0x00010000 }, [VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 }, + [VIRT_ACPI_PCIHP] = { 0x090c0000, ACPI_PCIHP_SIZE }, [VIRT_MMIO] = { 0x0a000000, 0x00000200 }, /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */ [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 }, @@ -191,6 +204,10 @@ static const MemMapEntry base_memmap[] = { [VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES }, }; +/* Update the docs for highmem-mmio-size when changing this default */ +#define DEFAULT_HIGH_PCIE_MMIO_SIZE_GB 512 +#define DEFAULT_HIGH_PCIE_MMIO_SIZE (DEFAULT_HIGH_PCIE_MMIO_SIZE_GB * GiB) + /* * Highmem IO Regions: This memory map is floating, located after the RAM. * Each MemMapEntry base (GPA) will be dynamically computed, depending on the @@ -206,13 +223,18 @@ static const MemMapEntry base_memmap[] = { * PA space for one specific region is always reserved, even if the region * has been disabled or doesn't fit into the PA space. However, the PA space * for the region won't be reserved in these circumstances with compact layout. + * + * Note that the highmem-mmio-size property will update the high PCIE MMIO size + * field in this array. */ static MemMapEntry extended_memmap[] = { /* Additional 64 MB redist region (can contain up to 512 redistributors) */ [VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB }, + [VIRT_CXL_HOST] = { 0x0, 64 * KiB * 16 }, /* 16 UID */ [VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB }, /* Second PCIe window */ - [VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB }, + [VIRT_HIGH_PCIE_MMIO] = { 0x0, DEFAULT_HIGH_PCIE_MMIO_SIZE }, + /* Any CXL Fixed memory windows come here */ }; static const int a15irqmap[] = { @@ -360,14 +382,9 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms) * the correct information. */ ARMCPU *armcpu; - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; MachineState *ms = MACHINE(vms); - if (vmc->claim_edge_triggered_timers) { - irqflags = GIC_FDT_IRQ_FLAGS_EDGE_LO_HI; - } - if (vms->gic_version == VIRT_GIC_VERSION_2) { irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START, GIC_FDT_IRQ_PPI_CPU_WIDTH, @@ -676,8 +693,10 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms) { DeviceState *dev; MachineState *ms = MACHINE(vms); + SysBusDevice *sbdev; int irq = vms->irqmap[VIRT_ACPI_GED]; uint32_t event = ACPI_GED_PWR_DOWN_EVT; + bool acpi_pcihp; if (ms->ram_slots) { event |= ACPI_GED_MEM_HOTPLUG_EVT; @@ -689,32 +708,44 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms) dev = qdev_new(TYPE_ACPI_GED); qdev_prop_set_uint32(dev, "ged-event", event); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + object_property_set_link(OBJECT(dev), "bus", OBJECT(vms->bus), &error_abort); + sbdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbdev, &error_fatal); + + sysbus_mmio_map_name(sbdev, TYPE_ACPI_GED, vms->memmap[VIRT_ACPI_GED].base); + sysbus_mmio_map_name(sbdev, ACPI_MEMHP_REGION_NAME, + vms->memmap[VIRT_PCDIMM_ACPI].base); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_ACPI_GED].base); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, vms->memmap[VIRT_PCDIMM_ACPI].base); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(vms->gic, irq)); + acpi_pcihp = object_property_get_bool(OBJECT(dev), + ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, NULL); + + if (acpi_pcihp) { + int pcihp_region_index; + + pcihp_region_index = sysbus_mmio_map_name(sbdev, ACPI_PCIHP_REGION_NAME, + vms->memmap[VIRT_ACPI_PCIHP].base); + assert(pcihp_region_index >= 0); + } + + sysbus_connect_irq(sbdev, 0, qdev_get_gpio_in(vms->gic, irq)); return dev; } static void create_its(VirtMachineState *vms) { - const char *itsclass = its_class_name(); DeviceState *dev; - if (!strcmp(itsclass, "arm-gicv3-its")) { - if (!vms->tcg_its) { - itsclass = NULL; - } - } - - if (!itsclass) { - /* Do nothing if not supported */ + assert(vms->its); + if (!kvm_irqchip_in_kernel() && !vms->tcg_its) { + /* + * Do nothing if ITS is neither supported by the host nor emulated by + * the machine. + */ return; } - dev = qdev_new(itsclass); + dev = qdev_new(its_class_name()); object_property_set_link(OBJECT(dev), "parent-gicv3", OBJECT(vms->gic), &error_abort); @@ -790,6 +821,13 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) default: g_assert_not_reached(); } + + if (kvm_enabled() && vms->virt && + (revision != 3 || !kvm_irqchip_in_kernel())) { + error_report("KVM EL2 is only supported with in-kernel GICv3"); + exit(1); + } + vms->gic = qdev_new(gictype); qdev_prop_set_uint32(vms->gic, "revision", revision); qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); @@ -826,6 +864,9 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) OBJECT(mem), &error_fatal); qdev_prop_set_bit(vms->gic, "has-lpi", true); } + } else if (vms->virt) { + qdev_prop_set_uint32(vms->gic, "maintenance-interrupt-id", + ARCH_GIC_MAINT_IRQ); } } else { if (!kvm_irqchip_in_kernel()) { @@ -872,6 +913,8 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, [GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ, + [GTIMER_S_EL2_PHYS] = ARCH_TIMER_S_EL2_IRQ, + [GTIMER_S_EL2_VIRT] = ARCH_TIMER_S_EL2_VIRT_IRQ, }; for (unsigned irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { @@ -1004,7 +1047,7 @@ static void virt_powerdown_req(Notifier *n, void *opaque) if (s->acpi_dev) { acpi_send_event(s->acpi_dev, ACPI_POWER_DOWN_STATUS); } else { - /* use gpio Pin 3 for power button event */ + /* use gpio Pin for power button event */ qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); } } @@ -1013,7 +1056,8 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev, uint32_t phandle) { gpio_key_dev = sysbus_create_simple("gpio-key", -1, - qdev_get_gpio_in(pl061_dev, 3)); + qdev_get_gpio_in(pl061_dev, + GPIO_PIN_POWER_BUTTON)); qemu_fdt_add_subnode(fdt, "/gpio-keys"); qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys"); @@ -1024,7 +1068,7 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev, qemu_fdt_setprop_cell(fdt, "/gpio-keys/poweroff", "linux,code", KEY_POWER); qemu_fdt_setprop_cells(fdt, "/gpio-keys/poweroff", - "gpios", phandle, 3, 0); + "gpios", phandle, GPIO_PIN_POWER_BUTTON, 0); } #define SECURE_GPIO_POWEROFF 0 @@ -1404,33 +1448,43 @@ static void create_pcie_irq_map(const MachineState *ms, 0x7 /* PCI irq */); } -static void create_smmu(const VirtMachineState *vms, - PCIBus *bus) +static void create_cmdqv(const VirtMachineState *vms, hwaddr smmu_base, + DeviceState *smmu_dev) { - char *node; - const char compat[] = "arm,smmu-v3"; - int irq = vms->irqmap[VIRT_SMMU]; - int i; - hwaddr base = vms->memmap[VIRT_SMMU].base; - hwaddr size = vms->memmap[VIRT_SMMU].size; - const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror"; - DeviceState *dev; + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(smmu_dev); + int irq = platform_bus_get_irqn(pbus, sbdev, NUM_SMMU_IRQS); + hwaddr base = platform_bus_get_mmio_addr(pbus, sbdev, 1); + const char compat[] = "tegra241,cmdqv"; MachineState *ms = MACHINE(vms); + char *node, *smmu_node; - if (vms->iommu != VIRT_IOMMU_SMMUV3 || !vms->iommu_phandle) { - return; - } + base += vms->memmap[VIRT_PLATFORM_BUS].base; + irq += vms->irqmap[VIRT_PLATFORM_BUS]; - dev = qdev_new(TYPE_ARM_SMMUV3); + node = g_strdup_printf("/cmdqv@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, node); + qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", 2, base, 2, + SMMU_CMDQV_IO_LEN); - object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus), - &error_abort); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - for (i = 0; i < NUM_SMMU_IRQS; i++) { - sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, - qdev_get_gpio_in(vms->gic, irq + i)); - } + qemu_fdt_setprop_cells(ms->fdt, node, "interrupts", GIC_FDT_IRQ_TYPE_SPI, + irq, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + + smmu_node = g_strdup_printf("/smmuv3@%" PRIx64, smmu_base); + qemu_fdt_setprop_phandle(ms->fdt, node, "smmu", smmu_node); + g_free(smmu_node); + + g_free(node); +} + +static void create_smmuv3_dt_bindings(const VirtMachineState *vms, hwaddr base, + hwaddr size, int irq) +{ + char *node; + const char compat[] = "arm,smmu-v3"; + const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror"; + MachineState *ms = MACHINE(vms); node = g_strdup_printf("/smmuv3@%" PRIx64, base); qemu_fdt_add_subnode(ms->fdt, node); @@ -1447,13 +1501,74 @@ static void create_smmu(const VirtMachineState *vms, sizeof(irq_names)); qemu_fdt_setprop(ms->fdt, node, "dma-coherent", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); - qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); g_free(node); } +static void create_smmuv3_dev_dtb(VirtMachineState *vms, DeviceState *dev, + PCIBus *bus, Error **errp) +{ + bool has_cmdqv = object_property_get_bool(OBJECT(dev), "cmdqv", NULL); + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); + int irq = platform_bus_get_irqn(pbus, sbdev, 0); + hwaddr base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + MachineState *ms = MACHINE(vms); + + if (!(vms->bootinfo.firmware_loaded && virt_is_acpi_enabled(vms))) { + if (object_property_get_bool(OBJECT(dev), "accel", &error_abort)) { + error_setg(errp, "SMMUv3 with accel=on not supported for DT"); + return; + } + if (strcmp("pcie.0", bus->qbus.name)) { + warn_report("SMMUv3 device only supported with pcie.0 for DT"); + return; + } + } + base += vms->memmap[VIRT_PLATFORM_BUS].base; + irq += vms->irqmap[VIRT_PLATFORM_BUS]; + + vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); + create_smmuv3_dt_bindings(vms, base, SMMU_IO_LEN, irq); + qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, 0x10000); + + if (has_cmdqv) { + create_cmdqv(vms, base, dev); + } +} + +static void create_smmu(const VirtMachineState *vms, + PCIBus *bus) +{ + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + int irq = vms->irqmap[VIRT_SMMU]; + int i; + hwaddr base = vms->memmap[VIRT_SMMU].base; + hwaddr size = vms->memmap[VIRT_SMMU].size; + DeviceState *dev; + + if (vms->iommu != VIRT_IOMMU_SMMUV3 || !vms->iommu_phandle) { + return; + } + + dev = qdev_new(TYPE_ARM_SMMUV3); + + if (!vmc->no_nested_smmu) { + object_property_set_str(OBJECT(dev), "stage", "nested", &error_fatal); + } + object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + for (i = 0; i < NUM_SMMU_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(vms->gic, irq + i)); + } + create_smmuv3_dt_bindings(vms, base, size, irq); +} + static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) { const char compat[] = "virtio,pci-iommu\0pci1af4,1057"; @@ -1475,9 +1590,12 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); g_free(node); - qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map", - 0x0, vms->iommu_phandle, 0x0, bdf, - bdf + 1, vms->iommu_phandle, bdf + 1, 0xffff - bdf); + if (!vms->default_bus_bypass_iommu) { + qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, bdf, + bdf + 1, vms->iommu_phandle, bdf + 1, + 0xffff - bdf); + } } static void create_pcie(VirtMachineState *vms) @@ -1541,7 +1659,7 @@ static void create_pcie(VirtMachineState *vms) /* Map IO port space */ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, qdev_get_gpio_in(vms->gic, irq + i)); gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); @@ -1600,8 +1718,11 @@ static void create_pcie(VirtMachineState *vms) switch (vms->iommu) { case VIRT_IOMMU_SMMUV3: create_smmu(vms, vms->bus); - qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map", - 0x0, vms->iommu_phandle, 0x0, 0x10000); + if (!vms->default_bus_bypass_iommu) { + qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, 0x10000); + } + vms->legacy_smmuv3_present = true; break; default: g_assert_not_reached(); @@ -1609,6 +1730,17 @@ static void create_pcie(VirtMachineState *vms) } } +static void create_cxl_host_reg_region(VirtMachineState *vms) +{ + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *mr = &vms->cxl_devices_state.host_mr; + + memory_region_init(mr, OBJECT(vms), "cxl_host_reg", + vms->memmap[VIRT_CXL_HOST].size); + memory_region_add_subregion(sysmem, vms->memmap[VIRT_CXL_HOST].base, mr); + vms->highmem_cxl = true; +} + static void create_platform_bus(VirtMachineState *vms) { DeviceState *dev; @@ -1687,7 +1819,6 @@ static void virt_build_smbios(VirtMachineState *vms) { MachineClass *mc = MACHINE_GET_CLASS(vms); MachineState *ms = MACHINE(vms); - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); uint8_t *smbios_tables, *smbios_anchor; size_t smbios_tables_len, smbios_anchor_len; struct smbios_phys_mem_area mem_array; @@ -1697,8 +1828,7 @@ static void virt_build_smbios(VirtMachineState *vms) product = "KVM Virtual Machine"; } - smbios_set_defaults("QEMU", product, - vmc->smbios_old_sys_ver ? "1.0" : mc->name); + smbios_set_defaults("QEMU", product, mc->name); /* build the array of physical mem area from base_memmap */ mem_array.address = vms->memmap[VIRT_MEM].base; @@ -1727,6 +1857,12 @@ void virt_machine_done(Notifier *notifier, void *data) struct arm_boot_info *info = &vms->bootinfo; AddressSpace *as = arm_boot_address_space(cpu, info); + cxl_hook_up_pxb_registers(vms->bus, &vms->cxl_devices_state, + &error_fatal); + + if (vms->cxl_devices_state.is_enabled) { + cxl_fmws_link_targets(&error_fatal); + } /* * If the user provided a dtb, we assume the dynamic sysbus nodes * already are integrated there. This corresponds to a use case where @@ -1740,11 +1876,12 @@ void virt_machine_done(Notifier *notifier, void *data) vms->memmap[VIRT_PLATFORM_BUS].size, vms->irqmap[VIRT_PLATFORM_BUS]); } - if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { + if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms, cpu) < 0) { exit(1); } - fw_cfg_add_extra_pci_roots(vms->bus, vms->fw_cfg); + pci_bus_add_fw_cfg_extra_pci_roots(vms->fw_cfg, vms->bus, + &error_abort); virt_acpi_setup(vms); virt_build_smbios(vms); @@ -1752,24 +1889,18 @@ void virt_machine_done(Notifier *notifier, void *data) static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) { - uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + uint8_t clustersz; - if (!vmc->disallow_affinity_adjustment) { - /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the - * GIC's target-list limitations. 32-bit KVM hosts currently - * always create clusters of 4 CPUs, but that is expected to - * change when they gain support for gicv3. When KVM is enabled - * it will override the changes we make here, therefore our - * purposes are to make TCG consistent (with 64-bit KVM hosts) - * and to improve SGI efficiency. - */ - if (vms->gic_version == VIRT_GIC_VERSION_2) { - clustersz = GIC_TARGETLIST_BITS; - } else { - clustersz = GICV3_TARGETLIST_BITS; - } + /* + * Adjust MPIDR to make TCG consistent (with 64-bit KVM hosts) + * and to improve SGI efficiency. + */ + if (vms->gic_version == VIRT_GIC_VERSION_2) { + clustersz = GIC_TARGETLIST_BITS; + } else { + clustersz = GICV3_TARGETLIST_BITS; } + return arm_build_mp_affinity(idx, clustersz); } @@ -1778,6 +1909,7 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms, { bool *enabled_array[] = { &vms->highmem_redists, + &vms->highmem_cxl, &vms->highmem_ecam, &vms->highmem_mmio, }; @@ -1885,6 +2017,9 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits) if (device_memory_size > 0) { machine_memory_devices_init(ms, device_memory_base, device_memory_size); } + vms->highest_gpa = cxl_fmws_set_memmap(ROUND_UP(vms->highest_gpa + 1, + 256 * MiB), + BIT_ULL(pa_bits)) - 1; } static VirtGICType finalize_gic_version_do(const char *accel_name, @@ -2019,10 +2154,11 @@ static void finalize_gic_version(VirtMachineState *vms) } /* - * virt_cpu_post_init() must be called after the CPUs have - * been realized and the GIC has been created. + * virt_post_cpus_gic_realized() must be called after the CPUs and + * the GIC have both been realized. */ -static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) +static void virt_post_cpus_gic_realized(VirtMachineState *vms, + MemoryRegion *sysmem) { int max_cpus = MACHINE(vms)->smp.max_cpus; bool aarch64, pmu, steal_time; @@ -2055,6 +2191,10 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL); memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime); } + if (!aarch64 && vms->virt) { + error_report("KVM does not support EL2 on an AArch32 vCPU"); + exit(1); + } CPU_FOREACH(cpu) { if (pmu) { @@ -2106,7 +2246,8 @@ static void machvirt_init(MachineState *machine) /* * In accelerated mode, the memory map is computed earlier in kvm_type() - * to create a VM with the right number of IPA bits. + * for Linux, or hvf_get_physical_address_range() for macOS to create a + * VM with the right number of IPA bits. */ if (!vms->memmap) { Object *cpuobj; @@ -2192,21 +2333,27 @@ static void machvirt_init(MachineState *machine) exit(1); } - if (vms->secure && (kvm_enabled() || hvf_enabled())) { + if (vms->secure && !tcg_enabled() && !qtest_enabled()) { error_report("mach-virt: %s does not support providing " "Security extensions (TrustZone) to the guest CPU", current_accel_name()); exit(1); } - if (vms->virt && (kvm_enabled() || hvf_enabled())) { + if (vms->virt && kvm_enabled() && !kvm_arm_el2_supported()) { + error_report("mach-virt: host kernel KVM does not support providing " + "Virtualization extensions to the guest CPU"); + exit(1); + } + + if (vms->virt && !kvm_enabled() && !tcg_enabled() && !qtest_enabled()) { error_report("mach-virt: %s does not support providing " "Virtualization extensions to the guest CPU", current_accel_name()); exit(1); } - if (vms->mte && (kvm_enabled() || hvf_enabled())) { + if (vms->mte && hvf_enabled()) { error_report("mach-virt: %s does not support providing " "MTE to the guest CPU", current_accel_name()); @@ -2254,10 +2401,6 @@ static void machvirt_init(MachineState *machine) object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL); } - if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) { - object_property_set_bool(cpuobj, "pmu", false, NULL); - } - if (vmc->no_tcg_lpa2 && object_property_find(cpuobj, "lpa2")) { object_property_set_bool(cpuobj, "lpa2", false, NULL); } @@ -2276,39 +2419,51 @@ static void machvirt_init(MachineState *machine) } if (vms->mte) { - /* Create the memory region only once, but link to all cpus. */ - if (!tag_sysmem) { - /* - * The property exists only if MemTag is supported. - * If it is, we must allocate the ram to back that up. - */ - if (!object_property_find(cpuobj, "tag-memory")) { - error_report("MTE requested, but not supported " - "by the guest CPU"); - exit(1); + if (tcg_enabled()) { + /* Create the memory region only once, but link to all cpus. */ + if (!tag_sysmem) { + /* + * The property exists only if MemTag is supported. + * If it is, we must allocate the ram to back that up. + */ + if (!object_property_find(cpuobj, "tag-memory")) { + error_report("MTE requested, but not supported " + "by the guest CPU"); + exit(1); + } + + tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(tag_sysmem, OBJECT(machine), + "tag-memory", UINT64_MAX / 32); + + if (vms->secure) { + secure_tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(secure_tag_sysmem, OBJECT(machine), + "secure-tag-memory", + UINT64_MAX / 32); + + /* As with ram, secure-tag takes precedence over tag. */ + memory_region_add_subregion_overlap(secure_tag_sysmem, + 0, tag_sysmem, -1); + } } - tag_sysmem = g_new(MemoryRegion, 1); - memory_region_init(tag_sysmem, OBJECT(machine), - "tag-memory", UINT64_MAX / 32); - + object_property_set_link(cpuobj, "tag-memory", + OBJECT(tag_sysmem), &error_abort); if (vms->secure) { - secure_tag_sysmem = g_new(MemoryRegion, 1); - memory_region_init(secure_tag_sysmem, OBJECT(machine), - "secure-tag-memory", UINT64_MAX / 32); - - /* As with ram, secure-tag takes precedence over tag. */ - memory_region_add_subregion_overlap(secure_tag_sysmem, 0, - tag_sysmem, -1); + object_property_set_link(cpuobj, "secure-tag-memory", + OBJECT(secure_tag_sysmem), + &error_abort); } - } - - object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem), - &error_abort); - if (vms->secure) { - object_property_set_link(cpuobj, "secure-tag-memory", - OBJECT(secure_tag_sysmem), - &error_abort); + } else if (kvm_enabled()) { + if (!kvm_arm_mte_supported()) { + error_report("MTE requested, but not supported by KVM"); + exit(1); + } + kvm_arm_enable_mte(cpuobj, &error_abort); + } else { + error_report("MTE requested, but not supported "); + exit(1); } } @@ -2326,11 +2481,13 @@ static void machvirt_init(MachineState *machine) memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, machine->ram); + cxl_fmws_update_mmio(); + virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem); create_gic(vms, sysmem); - virt_cpu_post_init(vms, sysmem); + virt_post_cpus_gic_realized(vms, sysmem); fdt_add_pmu_nodes(vms); @@ -2381,6 +2538,7 @@ static void machvirt_init(MachineState *machine) create_rtc(vms); create_pcie(vms); + create_cxl_host_reg_region(vms); if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) { vms->acpi_dev = create_acpi_ged(vms); @@ -2530,6 +2688,40 @@ static void virt_set_highmem_mmio(Object *obj, bool value, Error **errp) vms->highmem_mmio = value; } +static void virt_get_highmem_mmio_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint64_t size = extended_memmap[VIRT_HIGH_PCIE_MMIO].size; + + visit_type_size(v, name, &size, errp); +} + +static void virt_set_highmem_mmio_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint64_t size; + + if (!visit_type_size(v, name, &size, errp)) { + return; + } + + if (!is_power_of_2(size)) { + error_setg(errp, "highmem-mmio-size is not a power of 2"); + return; + } + + if (size < DEFAULT_HIGH_PCIE_MMIO_SIZE) { + char *sz = size_to_str(DEFAULT_HIGH_PCIE_MMIO_SIZE); + error_setg(errp, "highmem-mmio-size cannot be set to a lower value " + "than the default (%s)", sz); + g_free(sz); + return; + } + + extended_memmap[VIRT_HIGH_PCIE_MMIO].size = size; +} static bool virt_get_its(Object *obj, Error **errp) { @@ -2876,6 +3068,16 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, qlist_append_str(reserved_regions, resv_prop_str); qdev_prop_set_array(dev, "reserved-regions", reserved_regions); g_free(resv_prop_str); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_ARM_SMMUV3)) { + if (vms->legacy_smmuv3_present || vms->iommu == VIRT_IOMMU_VIRTIO) { + error_setg(errp, "virt machine already has %s set. " + "Doesn't support incompatible iommus", + (vms->legacy_smmuv3_present) ? + "iommu=smmuv3" : "virtio-iommu"); + } else if (vms->iommu == VIRT_IOMMU_NONE) { + /* The new SMMUv3 device is specific to the PCI bus */ + object_property_set_bool(OBJECT(dev), "smmu_per_bus", true, NULL); + } } } @@ -2899,6 +3101,39 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp); } + if (object_dynamic_cast(OBJECT(dev), TYPE_ARM_SMMUV3)) { + if (!vms->legacy_smmuv3_present && vms->platform_bus_dev) { + PCIBus *bus; + + bus = PCI_BUS(object_property_get_link(OBJECT(dev), "primary-bus", + &error_abort)); + if (pci_bus_bypass_iommu(bus)) { + error_setg(errp, "Bypass option cannot be set for SMMUv3 " + "associated PCIe RC"); + return; + } + + if (object_property_get_bool(OBJECT(dev), "accel", &error_abort)) { + char *stage; + + stage = object_property_get_str(OBJECT(dev), "stage", + &error_fatal); + /* If no stage specified, SMMUv3 will default to stage 1 */ + if (*stage && strcmp("1", stage)) { + error_setg(errp, "Only stage1 is supported for SMMUV3 with " + "accel=on"); + return; + } + } + + create_smmuv3_dev_dtb(vms, dev, bus, errp); + if (object_property_get_bool(OBJECT(dev), "accel", &error_abort) && + !vms->pci_preserve_config) { + vms->pci_preserve_config = true; + } + } + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { PCIDevice *pdev = PCI_DEVICE(dev); @@ -3026,7 +3261,40 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) return fixed_ipa ? 0 : requested_pa_size; } -static void virt_machine_class_init(ObjectClass *oc, void *data) +static int virt_hvf_get_physical_address_range(MachineState *ms) +{ + VirtMachineState *vms = VIRT_MACHINE(ms); + + int default_ipa_size = hvf_arm_get_default_ipa_bit_size(); + int max_ipa_size = hvf_arm_get_max_ipa_bit_size(); + + /* We freeze the memory map to compute the highest gpa */ + virt_set_memmap(vms, max_ipa_size); + + int requested_ipa_size = 64 - clz64(vms->highest_gpa); + + /* + * If we're <= the default IPA size just use the default. + * If we're above the default but below the maximum, round up to + * the maximum. hvf_arm_get_max_ipa_bit_size() conveniently only + * returns values that are valid ARM PARange values. + */ + if (requested_ipa_size <= default_ipa_size) { + requested_ipa_size = default_ipa_size; + } else if (requested_ipa_size <= max_ipa_size) { + requested_ipa_size = max_ipa_size; + } else { + error_report("-m and ,maxmem option values " + "require an IPA range (%d bits) larger than " + "the one supported by the host (%d bits)", + requested_ipa_size, max_ipa_size); + return -1; + } + + return requested_ipa_size; +} + +static void virt_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -3067,6 +3335,8 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_PLATFORM); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_ARM_SMMUV3); #ifdef CONFIG_TPM machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); #endif @@ -3085,6 +3355,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->valid_cpu_types = valid_cpu_types; mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; mc->kvm_type = virt_kvm_type; + mc->hvf_get_physical_address_range = virt_hvf_get_physical_address_range; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = virt_machine_get_hotplug_handler; hc->pre_plug = virt_machine_device_pre_plug_cb; @@ -3153,6 +3424,14 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) "Set on/off to enable/disable high " "memory region for PCI MMIO"); + object_class_property_add(oc, "highmem-mmio-size", "size", + virt_get_highmem_mmio_size, + virt_set_highmem_mmio_size, + NULL, NULL); + object_class_property_set_description(oc, "highmem-mmio-size", + "Set the high memory region size " + "for PCI MMIO"); + object_class_property_add_str(oc, "gic-version", virt_get_gic_version, virt_set_gic_version); object_class_property_set_description(oc, "gic-version", @@ -3240,22 +3519,14 @@ static void virt_instance_init(Object *obj) vms->highmem_compact = !vmc->no_highmem_compact; vms->gic_version = VIRT_GIC_VERSION_NOSEL; - vms->highmem_ecam = !vmc->no_highmem_ecam; + vms->highmem_ecam = true; vms->highmem_mmio = true; vms->highmem_redists = true; - if (vmc->no_its) { - vms->its = false; - } else { - /* Default allows ITS instantiation */ - vms->its = true; - - if (vmc->no_tcg_its) { - vms->tcg_its = false; - } else { - vms->tcg_its = true; - } - } + /* Default allows ITS instantiation */ + vms->its = true; + /* Allow ITS emulation if the machine version supports it */ + vms->tcg_its = !vmc->no_tcg_its; /* Default disallows iommu instantiation */ vms->iommu = VIRT_IOMMU_NONE; @@ -3278,6 +3549,7 @@ static void virt_instance_init(Object *obj) vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); + cxl_machine_init(obj, &vms->cxl_devices_state); } static const TypeInfo virt_machine_info = { @@ -3288,7 +3560,7 @@ static const TypeInfo virt_machine_info = { .class_size = sizeof(VirtMachineClass), .class_init = virt_machine_class_init, .instance_init = virt_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, @@ -3300,10 +3572,35 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_10_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(10, 1) + +static void virt_machine_10_0_options(MachineClass *mc) +{ + virt_machine_10_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len); +} +DEFINE_VIRT_MACHINE(10, 0) + +static void virt_machine_9_2_options(MachineClass *mc) +{ + virt_machine_10_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len); +} +DEFINE_VIRT_MACHINE(9, 2) + static void virt_machine_9_1_options(MachineClass *mc) { + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_9_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len); + /* 9.1 and earlier have only a stage-1 SMMU, not a nested s1+2 one */ + vmc->no_nested_smmu = true; } -DEFINE_VIRT_MACHINE_AS_LATEST(9, 1) +DEFINE_VIRT_MACHINE(9, 1) static void virt_machine_9_0_options(MachineClass *mc) { @@ -3450,99 +3747,3 @@ static void virt_machine_4_1_options(MachineClass *mc) mc->auto_enable_numa_with_memhp = false; } DEFINE_VIRT_MACHINE(4, 1) - -static void virt_machine_4_0_options(MachineClass *mc) -{ - virt_machine_4_1_options(mc); - compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); -} -DEFINE_VIRT_MACHINE(4, 0) - -static void virt_machine_3_1_options(MachineClass *mc) -{ - virt_machine_4_0_options(mc); - compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); -} -DEFINE_VIRT_MACHINE(3, 1) - -static void virt_machine_3_0_options(MachineClass *mc) -{ - virt_machine_3_1_options(mc); - compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); -} -DEFINE_VIRT_MACHINE(3, 0) - -static void virt_machine_2_12_options(MachineClass *mc) -{ - VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); - - virt_machine_3_0_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); - vmc->no_highmem_ecam = true; - mc->max_cpus = 255; -} -DEFINE_VIRT_MACHINE(2, 12) - -static void virt_machine_2_11_options(MachineClass *mc) -{ - VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); - - virt_machine_2_12_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); - vmc->smbios_old_sys_ver = true; -} -DEFINE_VIRT_MACHINE(2, 11) - -static void virt_machine_2_10_options(MachineClass *mc) -{ - virt_machine_2_11_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); - /* before 2.11 we never faulted accesses to bad addresses */ - mc->ignore_memory_transaction_failures = true; -} -DEFINE_VIRT_MACHINE(2, 10) - -static void virt_machine_2_9_options(MachineClass *mc) -{ - virt_machine_2_10_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); -} -DEFINE_VIRT_MACHINE(2, 9) - -static void virt_machine_2_8_options(MachineClass *mc) -{ - VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); - - virt_machine_2_9_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); - /* For 2.8 and earlier we falsely claimed in the DT that - * our timers were edge-triggered, not level-triggered. - */ - vmc->claim_edge_triggered_timers = true; -} -DEFINE_VIRT_MACHINE(2, 8) - -static void virt_machine_2_7_options(MachineClass *mc) -{ - VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); - - virt_machine_2_8_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); - /* ITS was introduced with 2.8 */ - vmc->no_its = true; - /* Stick with 1K pages for migration compatibility */ - mc->minimum_page_bits = 0; -} -DEFINE_VIRT_MACHINE(2, 7) - -static void virt_machine_2_6_options(MachineClass *mc) -{ - VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); - - virt_machine_2_7_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); - vmc->disallow_affinity_adjustment = true; - /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */ - vmc->no_pmu = true; -} -DEFINE_VIRT_MACHINE(2, 6) diff --git a/hw/arm/xen-pvh.c b/hw/arm/xen-pvh.c new file mode 100644 index 00000000000..1a9eeb01c8e --- /dev/null +++ b/hw/arm/xen-pvh.c @@ -0,0 +1,105 @@ +/* + * QEMU ARM Xen PVH Machine + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/qapi-commands-migration.h" +#include "hw/boards.h" +#include "system/system.h" +#include "hw/xen/xen-pvh-common.h" + +#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh") + +/* + * VIRTIO_MMIO_DEV_SIZE is imported from tools/libs/light/libxl_arm.c under Xen + * repository. + * + * Origin: git://xenbits.xen.org/xen.git 2128143c114c + */ +#define VIRTIO_MMIO_DEV_SIZE 0x200 + +#define NR_VIRTIO_MMIO_DEVICES \ + (GUEST_VIRTIO_MMIO_SPI_LAST - GUEST_VIRTIO_MMIO_SPI_FIRST) + +static void xen_arm_instance_init(Object *obj) +{ + XenPVHMachineState *s = XEN_PVH_MACHINE(obj); + + /* Default values. */ + s->cfg.ram_low = (MemMapEntry) { GUEST_RAM0_BASE, GUEST_RAM0_SIZE }; + s->cfg.ram_high = (MemMapEntry) { GUEST_RAM1_BASE, GUEST_RAM1_SIZE }; + + s->cfg.virtio_mmio_num = NR_VIRTIO_MMIO_DEVICES; + s->cfg.virtio_mmio_irq_base = GUEST_VIRTIO_MMIO_SPI_FIRST; + s->cfg.virtio_mmio = (MemMapEntry) { GUEST_VIRTIO_MMIO_BASE, + VIRTIO_MMIO_DEV_SIZE }; +} + +static void xen_pvh_set_pci_intx_irq(void *opaque, int intx_irq, int level) +{ + XenPVHMachineState *s = XEN_PVH_MACHINE(opaque); + int irq = s->cfg.pci_intx_irq_base + intx_irq; + + if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) { + error_report("xendevicemodel_set_pci_intx_level failed"); + } +} + +static void xen_arm_machine_class_init(ObjectClass *oc, const void *data) +{ + XenPVHMachineClass *xpc = XEN_PVH_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Xen PVH ARM machine"; + + /* + * mc->max_cpus holds the MAX value allowed in the -smp command-line opts. + * + * 1. If users don't pass any -smp option: + * ms->smp.cpus will default to 1. + * ms->smp.max_cpus will default to 1. + * + * 2. If users pass -smp X: + * ms->smp.cpus will be set to X. + * ms->smp.max_cpus will also be set to X. + * + * 3. If users pass -smp X,maxcpus=Y: + * ms->smp.cpus will be set to X. + * ms->smp.max_cpus will be set to Y. + * + * In scenarios 2 and 3, if X or Y are set to something larger than + * mc->max_cpus, QEMU will bail out with an error message. + */ + mc->max_cpus = GUEST_MAX_VCPUS; + + /* Xen/ARM does not use buffered IOREQs. */ + xpc->handle_bufioreq = HVM_IOREQSRV_BUFIOREQ_OFF; + + /* PCI INTX delivery. */ + xpc->set_pci_intx_irq = xen_pvh_set_pci_intx_irq; + + /* List of supported features known to work on PVH ARM. */ + xpc->has_pci = true; + xpc->has_tpm = true; + xpc->has_virtio_mmio = true; + + xen_pvh_class_setup_common_props(xpc); +} + +static const TypeInfo xen_arm_machine_type = { + .name = TYPE_XEN_ARM, + .parent = TYPE_XEN_PVH_MACHINE, + .class_init = xen_arm_machine_class_init, + .instance_size = sizeof(XenPVHMachineState), + .instance_init = xen_arm_instance_init, +}; + +static void xen_arm_machine_register_types(void) +{ + type_register_static(&xen_arm_machine_type); +} + +type_init(xen_arm_machine_register_types) diff --git a/hw/arm/xen-stubs.c b/hw/arm/xen-stubs.c new file mode 100644 index 00000000000..6a830435533 --- /dev/null +++ b/hw/arm/xen-stubs.c @@ -0,0 +1,30 @@ +/* + * Stubs for unimplemented Xen functions for ARM. + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-commands-migration.h" +#include "system/xen.h" +#include "hw/hw.h" +#include "hw/xen/xen-hvm-common.h" +#include "hw/xen/arch_hvm.h" + +void arch_handle_ioreq(XenIOState *state, ioreq_t *req) +{ + hw_error("Invalid ioreq type 0x%x\n", req->type); +} + +void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, + bool add) +{ +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ +} diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c deleted file mode 100644 index 6fad829edea..00000000000 --- a/hw/arm/xen_arm.c +++ /dev/null @@ -1,267 +0,0 @@ -/* - * QEMU ARM Xen PVH Machine - * - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qapi/qapi-commands-migration.h" -#include "qapi/visitor.h" -#include "hw/boards.h" -#include "hw/irq.h" -#include "hw/sysbus.h" -#include "sysemu/block-backend.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/sysemu.h" -#include "hw/xen/xen-hvm-common.h" -#include "sysemu/tpm.h" -#include "hw/xen/arch_hvm.h" -#include "trace.h" - -#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh") -OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM) - -static const MemoryListener xen_memory_listener = { - .region_add = xen_region_add, - .region_del = xen_region_del, - .log_start = NULL, - .log_stop = NULL, - .log_sync = NULL, - .log_global_start = NULL, - .log_global_stop = NULL, - .priority = MEMORY_LISTENER_PRIORITY_ACCEL, -}; - -struct XenArmState { - /*< private >*/ - MachineState parent; - - XenIOState *state; - - struct { - uint64_t tpm_base_addr; - } cfg; -}; - -static MemoryRegion ram_lo, ram_hi; - -/* - * VIRTIO_MMIO_DEV_SIZE is imported from tools/libs/light/libxl_arm.c under Xen - * repository. - * - * Origin: git://xenbits.xen.org/xen.git 2128143c114c - */ -#define VIRTIO_MMIO_DEV_SIZE 0x200 - -#define NR_VIRTIO_MMIO_DEVICES \ - (GUEST_VIRTIO_MMIO_SPI_LAST - GUEST_VIRTIO_MMIO_SPI_FIRST) - -static void xen_set_irq(void *opaque, int irq, int level) -{ - if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) { - error_report("xendevicemodel_set_irq_level failed"); - } -} - -static void xen_create_virtio_mmio_devices(XenArmState *xam) -{ - int i; - - for (i = 0; i < NR_VIRTIO_MMIO_DEVICES; i++) { - hwaddr base = GUEST_VIRTIO_MMIO_BASE + i * VIRTIO_MMIO_DEV_SIZE; - qemu_irq irq = qemu_allocate_irq(xen_set_irq, NULL, - GUEST_VIRTIO_MMIO_SPI_FIRST + i); - - sysbus_create_simple("virtio-mmio", base, irq); - - trace_xen_create_virtio_mmio_devices(i, - GUEST_VIRTIO_MMIO_SPI_FIRST + i, - base); - } -} - -static void xen_init_ram(MachineState *machine) -{ - MemoryRegion *sysmem = get_system_memory(); - ram_addr_t block_len, ram_size[GUEST_RAM_BANKS]; - - trace_xen_init_ram(machine->ram_size); - if (machine->ram_size <= GUEST_RAM0_SIZE) { - ram_size[0] = machine->ram_size; - ram_size[1] = 0; - block_len = GUEST_RAM0_BASE + ram_size[0]; - } else { - ram_size[0] = GUEST_RAM0_SIZE; - ram_size[1] = machine->ram_size - GUEST_RAM0_SIZE; - block_len = GUEST_RAM1_BASE + ram_size[1]; - } - - memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len, - &error_fatal); - - memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &xen_memory, - GUEST_RAM0_BASE, ram_size[0]); - memory_region_add_subregion(sysmem, GUEST_RAM0_BASE, &ram_lo); - if (ram_size[1] > 0) { - memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &xen_memory, - GUEST_RAM1_BASE, ram_size[1]); - memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi); - } - - /* Setup support for grants. */ - memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len, - &error_fatal); - memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants); -} - -void arch_handle_ioreq(XenIOState *state, ioreq_t *req) -{ - hw_error("Invalid ioreq type 0x%x\n", req->type); - - return; -} - -void arch_xen_set_memory(XenIOState *state, MemoryRegionSection *section, - bool add) -{ -} - -void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) -{ -} - -void qmp_xen_set_global_dirty_log(bool enable, Error **errp) -{ -} - -#ifdef CONFIG_TPM -static void xen_enable_tpm(XenArmState *xam) -{ - Error *errp = NULL; - DeviceState *dev; - SysBusDevice *busdev; - - TPMBackend *be = qemu_find_tpm_be("tpm0"); - if (be == NULL) { - error_report("Couldn't find tmp0 backend"); - return; - } - dev = qdev_new(TYPE_TPM_TIS_SYSBUS); - object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp); - object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp); - busdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_mmio_map(busdev, 0, xam->cfg.tpm_base_addr); - - trace_xen_enable_tpm(xam->cfg.tpm_base_addr); -} -#endif - -static void xen_arm_init(MachineState *machine) -{ - XenArmState *xam = XEN_ARM(machine); - - xam->state = g_new0(XenIOState, 1); - - if (machine->ram_size == 0) { - warn_report("%s non-zero ram size not specified. QEMU machine started" - " without IOREQ (no emulated devices including virtio)", - MACHINE_CLASS(object_get_class(OBJECT(machine)))->desc); - return; - } - - xen_init_ram(machine); - - xen_register_ioreq(xam->state, machine->smp.cpus, &xen_memory_listener); - - xen_create_virtio_mmio_devices(xam); - -#ifdef CONFIG_TPM - if (xam->cfg.tpm_base_addr) { - xen_enable_tpm(xam); - } else { - warn_report("tpm-base-addr is not provided. TPM will not be enabled"); - } -#endif -} - -#ifdef CONFIG_TPM -static void xen_arm_get_tpm_base_addr(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - XenArmState *xam = XEN_ARM(obj); - uint64_t value = xam->cfg.tpm_base_addr; - - visit_type_uint64(v, name, &value, errp); -} - -static void xen_arm_set_tpm_base_addr(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - XenArmState *xam = XEN_ARM(obj); - uint64_t value; - - if (!visit_type_uint64(v, name, &value, errp)) { - return; - } - - xam->cfg.tpm_base_addr = value; -} -#endif - -static void xen_arm_machine_class_init(ObjectClass *oc, void *data) -{ - - MachineClass *mc = MACHINE_CLASS(oc); - mc->desc = "Xen Para-virtualized PC"; - mc->init = xen_arm_init; - mc->max_cpus = 1; - mc->default_machine_opts = "accel=xen"; - /* Set explicitly here to make sure that real ram_size is passed */ - mc->default_ram_size = 0; - -#ifdef CONFIG_TPM - object_class_property_add(oc, "tpm-base-addr", "uint64_t", - xen_arm_get_tpm_base_addr, - xen_arm_set_tpm_base_addr, - NULL, NULL); - object_class_property_set_description(oc, "tpm-base-addr", - "Set Base address for TPM device."); - - machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); -#endif -} - -static const TypeInfo xen_arm_machine_type = { - .name = TYPE_XEN_ARM, - .parent = TYPE_MACHINE, - .class_init = xen_arm_machine_class_init, - .instance_size = sizeof(XenArmState), -}; - -static void xen_arm_machine_register_types(void) -{ - type_register_static(&xen_arm_machine_type); -} - -type_init(xen_arm_machine_register_types) diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c index 3c56b9abe1c..0372cd0ac46 100644 --- a/hw/arm/xilinx_zynq.c +++ b/hw/arm/xilinx_zynq.c @@ -21,7 +21,7 @@ #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/block/flash.h" #include "hw/loader.h" @@ -34,7 +34,8 @@ #include "hw/net/cadence_gem.h" #include "hw/cpu/a9mpcore.h" #include "hw/qdev-clock.h" -#include "sysemu/reset.h" +#include "hw/misc/unimp.h" +#include "system/reset.h" #include "qom/object.h" #include "exec/tswap.h" #include "target/arm/cpu-qom.h" @@ -53,11 +54,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(ZynqMachineState, ZYNQ_MACHINE) #define FLASH_SIZE (64 * 1024 * 1024) #define FLASH_SECTOR_SIZE (128 * 1024) -#define IRQ_OFFSET 32 /* pic interrupts start from index 32 */ - #define MPCORE_PERIPHBASE 0xF8F00000 #define ZYNQ_BOARD_MIDR 0x413FC090 +#define GIC_EXT_IRQS 64 /* Zynq 7000 SoC */ + static const int dma_irqs[8] = { 46, 47, 48, 49, 72, 73, 74, 75 }; @@ -206,7 +207,7 @@ static void zynq_init(MachineState *machine) MemoryRegion *ocm_ram = g_new(MemoryRegion, 1); DeviceState *dev, *slcr; SysBusDevice *busdev; - qemu_irq pic[64]; + qemu_irq pic[GIC_EXT_IRQS]; int n; unsigned int smp_cpus = machine->smp.cpus; @@ -219,14 +220,6 @@ static void zynq_init(MachineState *machine) for (n = 0; n < smp_cpus; n++) { Object *cpuobj = object_new(machine->cpu_type); - /* - * By default A9 CPUs have EL3 enabled. This board does not currently - * support EL3 so the CPU EL3 property is disabled before realization. - */ - if (object_property_find(cpuobj, "has_el3")) { - object_property_set_bool(cpuobj, "has_el3", false, &error_fatal); - } - object_property_set_int(cpuobj, "midr", ZYNQ_BOARD_MIDR, &error_fatal); object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE, @@ -270,6 +263,7 @@ static void zynq_init(MachineState *machine) dev = qdev_new(TYPE_A9MPCORE_PRIV); qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); + qdev_prop_set_uint32(dev, "num-irq", GIC_EXT_IRQS + GIC_INTERNAL); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); @@ -284,16 +278,16 @@ static void zynq_init(MachineState *machine) qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); } - for (n = 0; n < 64; n++) { + for (n = 0; n < GIC_EXT_IRQS; n++) { pic[n] = qdev_get_gpio_in(dev, n); } - n = zynq_init_spi_flashes(0xE0006000, pic[58 - IRQ_OFFSET], false, 0); - n = zynq_init_spi_flashes(0xE0007000, pic[81 - IRQ_OFFSET], false, n); - n = zynq_init_spi_flashes(0xE000D000, pic[51 - IRQ_OFFSET], true, n); + n = zynq_init_spi_flashes(0xE0006000, pic[58 - GIC_INTERNAL], false, 0); + n = zynq_init_spi_flashes(0xE0007000, pic[81 - GIC_INTERNAL], false, n); + n = zynq_init_spi_flashes(0xE000D000, pic[51 - GIC_INTERNAL], true, n); - sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - IRQ_OFFSET]); - sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - IRQ_OFFSET]); + sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - GIC_INTERNAL]); + sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - GIC_INTERNAL]); dev = qdev_new(TYPE_CADENCE_UART); busdev = SYS_BUS_DEVICE(dev); @@ -302,7 +296,7 @@ static void zynq_init(MachineState *machine) qdev_get_clock_out(slcr, "uart0_ref_clk")); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, 0xE0000000); - sysbus_connect_irq(busdev, 0, pic[59 - IRQ_OFFSET]); + sysbus_connect_irq(busdev, 0, pic[59 - GIC_INTERNAL]); dev = qdev_new(TYPE_CADENCE_UART); busdev = SYS_BUS_DEVICE(dev); qdev_prop_set_chr(dev, "chardev", serial_hd(1)); @@ -310,15 +304,15 @@ static void zynq_init(MachineState *machine) qdev_get_clock_out(slcr, "uart1_ref_clk")); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, 0xE0001000); - sysbus_connect_irq(busdev, 0, pic[82 - IRQ_OFFSET]); + sysbus_connect_irq(busdev, 0, pic[82 - GIC_INTERNAL]); sysbus_create_varargs("cadence_ttc", 0xF8001000, - pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL); + pic[42-GIC_INTERNAL], pic[43-GIC_INTERNAL], pic[44-GIC_INTERNAL], NULL); sysbus_create_varargs("cadence_ttc", 0xF8002000, - pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL); + pic[69-GIC_INTERNAL], pic[70-GIC_INTERNAL], pic[71-GIC_INTERNAL], NULL); - gem_init(0xE000B000, pic[54 - IRQ_OFFSET]); - gem_init(0xE000C000, pic[77 - IRQ_OFFSET]); + gem_init(0xE000B000, pic[54 - GIC_INTERNAL]); + gem_init(0xE000C000, pic[77 - GIC_INTERNAL]); for (n = 0; n < 2; n++) { int hci_irq = n ? 79 : 56; @@ -337,7 +331,7 @@ static void zynq_init(MachineState *machine) qdev_prop_set_uint64(dev, "capareg", ZYNQ_SDHCI_CAPABILITIES); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, hci_addr); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - IRQ_OFFSET]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - GIC_INTERNAL]); di = drive_get(IF_SD, 0, n); blk = di ? blk_by_legacy_dinfo(di) : NULL; @@ -350,7 +344,7 @@ static void zynq_init(MachineState *machine) dev = qdev_new(TYPE_ZYNQ_XADC); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8007100); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-GIC_INTERNAL]); dev = qdev_new("pl330"); object_property_set_link(OBJECT(dev), "memory", @@ -370,17 +364,86 @@ static void zynq_init(MachineState *machine) busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, 0xF8003000); - sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */ + sysbus_connect_irq(busdev, 0, pic[45-GIC_INTERNAL]); /* abort irq line */ for (n = 0; n < ARRAY_SIZE(dma_irqs); ++n) { /* event irqs */ - sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]); + sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - GIC_INTERNAL]); } dev = qdev_new("xlnx.ps7-dev-cfg"); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, pic[40 - IRQ_OFFSET]); + sysbus_connect_irq(busdev, 0, pic[40 - GIC_INTERNAL]); sysbus_mmio_map(busdev, 0, 0xF8007000); + /* + * Refer to the ug585-Zynq-7000-TRM manual B.3 (Module Summary) and + * the zynq-7000.dtsi. Add placeholders for unimplemented devices. + */ + create_unimplemented_device("zynq.i2c0", 0xE0004000, 4 * KiB); + create_unimplemented_device("zynq.i2c1", 0xE0005000, 4 * KiB); + create_unimplemented_device("zynq.can0", 0xE0008000, 4 * KiB); + create_unimplemented_device("zynq.can1", 0xE0009000, 4 * KiB); + create_unimplemented_device("zynq.gpio", 0xE000A000, 4 * KiB); + create_unimplemented_device("zynq.smcc", 0xE000E000, 4 * KiB); + + /* Direct Memory Access Controller, PL330, Non-Secure Mode */ + create_unimplemented_device("zynq.dma_ns", 0xF8004000, 4 * KiB); + + /* System Watchdog Timer Registers */ + create_unimplemented_device("zynq.swdt", 0xF8005000, 4 * KiB); + + /* DDR memory controller */ + create_unimplemented_device("zynq.ddrc", 0xF8006000, 4 * KiB); + + /* AXI_HP Interface (AFI) */ + create_unimplemented_device("zynq.axi_hp0", 0xF8008000, 0x28); + create_unimplemented_device("zynq.axi_hp1", 0xF8009000, 0x28); + create_unimplemented_device("zynq.axi_hp2", 0xF800A000, 0x28); + create_unimplemented_device("zynq.axi_hp3", 0xF800B000, 0x28); + + create_unimplemented_device("zynq.efuse", 0xF800d000, 0x20); + + /* Embedded Trace Buffer */ + create_unimplemented_device("zynq.etb", 0xF8801000, 4 * KiB); + + /* Cross Trigger Interface, ETB and TPIU */ + create_unimplemented_device("zynq.cti_etb_tpiu", 0xF8802000, 4 * KiB); + + /* Trace Port Interface Unit */ + create_unimplemented_device("zynq.tpiu", 0xF8803000, 4 * KiB); + + /* CoreSight Trace Funnel */ + create_unimplemented_device("zynq.funnel", 0xF8804000, 4 * KiB); + + /* Instrumentation Trace Macrocell */ + create_unimplemented_device("zynq.itm", 0xF8805000, 4 * KiB); + + /* Cross Trigger Interface, FTM */ + create_unimplemented_device("zynq.cti_ftm", 0xF8809000, 4 * KiB); + + /* Fabric Trace Macrocell */ + create_unimplemented_device("zynq.ftm", 0xF880B000, 4 * KiB); + + /* Cortex A9 Performance Monitoring Unit, CPU */ + create_unimplemented_device("cortex-a9.pmu0", 0xF8891000, 4 * KiB); + create_unimplemented_device("cortex-a9.pmu1", 0xF8893000, 4 * KiB); + + /* Cross Trigger Interface, CPU */ + create_unimplemented_device("zynq.cpu_cti0", 0xF8898000, 4 * KiB); + create_unimplemented_device("zynq.cpu_cti1", 0xF8899000, 4 * KiB); + + /* CoreSight PTM-A9, CPU */ + create_unimplemented_device("cortex-a9.ptm0", 0xF889c000, 4 * KiB); + create_unimplemented_device("cortex-a9.ptm1", 0xF889d000, 4 * KiB); + + /* AMBA NIC301 TrustZone */ + create_unimplemented_device("zynq.trustZone", 0xF8900000, 0x20); + + /* AMBA Network Interconnect Advanced Quality of Service (QoS-301) */ + create_unimplemented_device("zynq.qos301_cpu", 0xF8946000, 0x130); + create_unimplemented_device("zynq.qos301_dmac", 0xF8947000, 0x130); + create_unimplemented_device("zynq.qos301_iou", 0xF8948000, 0x130); + zynq_binfo.ram_size = machine->ram_size; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; @@ -390,7 +453,7 @@ static void zynq_init(MachineState *machine) arm_load_kernel(zynq_machine->cpu[0], machine, &zynq_binfo); } -static void zynq_machine_class_init(ObjectClass *oc, void *data) +static void zynq_machine_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-a9"), @@ -398,10 +461,9 @@ static void zynq_machine_class_init(ObjectClass *oc, void *data) }; MachineClass *mc = MACHINE_CLASS(oc); ObjectProperty *prop; - mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9"; + mc->desc = "Xilinx Zynq 7000 Platform Baseboard for Cortex-A9"; mc->init = zynq_init; mc->max_cpus = ZYNQ_MAX_CPUS; - mc->no_sdcard = 1; mc->ignore_memory_transaction_failures = true; mc->valid_cpu_types = valid_cpu_types; mc->default_ram_id = "zynq.ext_ram"; diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 962f98fee2e..adadbb72902 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/block/flash.h" #include "hw/boards.h" #include "hw/sysbus.h" @@ -761,9 +761,9 @@ static void versal_virt_init(MachineState *machine) if (!flash_klass || object_class_is_abstract(flash_klass) || !object_class_dynamic_cast(flash_klass, TYPE_M25P80)) { - error_setg(&error_fatal, "'%s' is either abstract or" + error_report("'%s' is either abstract or" " not a subtype of m25p80", s->ospi_model); - return; + exit(1); } } @@ -808,7 +808,7 @@ static void versal_virt_machine_finalize(Object *obj) g_free(s->ospi_model); } -static void versal_virt_machine_class_init(ObjectClass *oc, void *data) +static void versal_virt_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -818,6 +818,7 @@ static void versal_virt_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; mc->default_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; mc->no_cdrom = true; + mc->auto_create_sdcard = true; mc->default_ram_id = "ddr"; object_class_property_add_str(oc, "ospi-flash", versal_get_ospi_model, versal_set_ospi_model); diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index 50cb0606cbf..a42b9e7140b 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -12,14 +12,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qemu/module.h" #include "hw/sysbus.h" #include "net/net.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" +#include "system/system.h" #include "hw/arm/boot.h" -#include "kvm_arm.h" #include "hw/misc/unimp.h" #include "hw/arm/xlnx-versal.h" #include "qemu/log.h" @@ -258,14 +256,23 @@ static void versal_create_gems(Versal *s, qemu_irq *pic) char *name = g_strdup_printf("gem%d", i); DeviceState *dev; MemoryRegion *mr; + OrIRQState *or_irq; object_initialize_child(OBJECT(s), name, &s->lpd.iou.gem[i], TYPE_CADENCE_GEM); + or_irq = &s->lpd.iou.gem_irq_orgate[i]; + object_initialize_child(OBJECT(s), "gem-irq-orgate[*]", + or_irq, TYPE_OR_IRQ); dev = DEVICE(&s->lpd.iou.gem[i]); qemu_configure_nic_device(dev, true, NULL); object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort); object_property_set_int(OBJECT(dev), "num-priority-queues", 2, &error_abort); + object_property_set_int(OBJECT(or_irq), + "num-lines", 2, &error_fatal); + qdev_realize(DEVICE(or_irq), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(or_irq), 0, pic[irqs[i]]); + object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), &error_abort); sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); @@ -273,7 +280,8 @@ static void versal_create_gems(Versal *s, qemu_irq *pic) mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); memory_region_add_subregion(&s->mr_ps, addrs[i], mr); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(or_irq), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, qdev_get_gpio_in(DEVICE(or_irq), 1)); g_free(name); } } @@ -958,17 +966,16 @@ static void versal_init(Object *obj) "mr-rpu-ps-alias", &s->mr_ps, 0, UINT64_MAX); } -static Property versal_properties[] = { +static const Property versal_properties[] = { DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_LINK("canbus0", Versal, lpd.iou.canbus[0], TYPE_CAN_BUS, CanBusState *), DEFINE_PROP_LINK("canbus1", Versal, lpd.iou.canbus[1], TYPE_CAN_BUS, CanBusState *), - DEFINE_PROP_END_OF_LIST() }; -static void versal_class_init(ObjectClass *klass, void *data) +static void versal_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c index 4667cb333ca..14b6641a713 100644 --- a/hw/arm/xlnx-zcu102.c +++ b/hw/arm/xlnx-zcu102.c @@ -22,7 +22,7 @@ #include "hw/boards.h" #include "qemu/error-report.h" #include "qemu/log.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "qom/object.h" #include "net/can_emu.h" #include "audio/audio.h" @@ -267,7 +267,7 @@ static void xlnx_zcu102_machine_instance_init(Object *obj) 0); } -static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data) +static void xlnx_zcu102_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -280,6 +280,7 @@ static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = XLNX_ZYNQMP_NUM_APU_CPUS + XLNX_ZYNQMP_NUM_RPU_CPUS; mc->default_cpus = XLNX_ZYNQMP_NUM_APU_CPUS; mc->default_ram_id = "ddr-ram"; + mc->auto_create_sdcard = true; machine_add_audiodev_property(mc); object_class_property_add_bool(oc, "secure", zcu102_get_secure, diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index afeb3f88f81..ec96a46eec3 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -22,9 +22,7 @@ #include "hw/intc/arm_gic_common.h" #include "hw/misc/unimp.h" #include "hw/boards.h" -#include "sysemu/kvm.h" -#include "sysemu/sysemu.h" -#include "kvm_arm.h" +#include "system/system.h" #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" @@ -394,6 +392,8 @@ static void xlnx_zynqmp_init(Object *obj) for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { object_initialize_child(obj, "gem[*]", &s->gem[i], TYPE_CADENCE_GEM); + object_initialize_child(obj, "gem-irq-orgate[*]", + &s->gem_irq_orgate[i], TYPE_OR_IRQ); } for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) { @@ -625,12 +625,19 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) &error_abort); object_property_set_int(OBJECT(&s->gem[i]), "num-priority-queues", 2, &error_abort); + object_property_set_int(OBJECT(&s->gem_irq_orgate[i]), + "num-lines", 2, &error_fatal); + qdev_realize(DEVICE(&s->gem_irq_orgate[i]), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(&s->gem_irq_orgate[i]), 0, gic_spi[gem_intr[i]]); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gem[i]), errp)) { return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem[i]), 0, gem_addr[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 0, - gic_spi[gem_intr[i]]); + qdev_get_gpio_in(DEVICE(&s->gem_irq_orgate[i]), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 1, + qdev_get_gpio_in(DEVICE(&s->gem_irq_orgate[i]), 1)); } for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) { @@ -680,16 +687,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) * - SDIO Specification Version 3.0 * - eMMC Specification Version 4.51 */ - if (!object_property_set_uint(sdhci, "sd-spec-version", 3, errp)) { - return; - } - if (!object_property_set_uint(sdhci, "capareg", SDHCI_CAPABILITIES, - errp)) { - return; - } - if (!object_property_set_uint(sdhci, "uhs", UHS_I, errp)) { - return; - } + object_property_set_uint(sdhci, "sd-spec-version", 3, &error_abort); + object_property_set_uint(sdhci, "capareg", SDHCI_CAPABILITIES, + &error_abort); + object_property_set_uint(sdhci, "uhs", UHS_I, &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(sdhci), errp)) { return; } @@ -754,14 +755,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) xlnx_zynqmp_create_unimp_mmio(s); for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) { - if (!object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128, - errp)) { - return; - } - if (!object_property_set_link(OBJECT(&s->gdma[i]), "dma", - OBJECT(system_memory), errp)) { - return; - } + object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128, + &error_abort); + object_property_set_link(OBJECT(&s->gdma[i]), "dma", + OBJECT(system_memory), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gdma[i]), errp)) { return; } @@ -802,10 +799,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, qdev_get_gpio_in(DEVICE(&s->qspi_irq_orgate), 0)); - if (!object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma", - OBJECT(&s->qspi_dma), errp)) { - return; - } + object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma", + OBJECT(&s->qspi_dma), &error_abort); if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi), errp)) { return; } @@ -824,10 +819,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) } for (i = 0; i < XLNX_ZYNQMP_NUM_USB; i++) { - if (!object_property_set_link(OBJECT(&s->usb[i].sysbus_xhci), "dma", - OBJECT(system_memory), errp)) { - return; - } + object_property_set_link(OBJECT(&s->usb[i].sysbus_xhci), "dma", + OBJECT(system_memory), &error_abort); qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "intrs", 4); qdev_prop_set_uint32(DEVICE(&s->usb[i].sysbus_xhci), "slots", 2); @@ -848,7 +841,7 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) } } -static Property xlnx_zynqmp_props[] = { +static const Property xlnx_zynqmp_props[] = { DEFINE_PROP_STRING("boot-cpu", XlnxZynqMPState, boot_cpu), DEFINE_PROP_BOOL("secure", XlnxZynqMPState, secure, false), DEFINE_PROP_BOOL("virtualization", XlnxZynqMPState, virt, false), @@ -858,10 +851,9 @@ static Property xlnx_zynqmp_props[] = { CanBusState *), DEFINE_PROP_LINK("canbus1", XlnxZynqMPState, canbus[1], TYPE_CAN_BUS, CanBusState *), - DEFINE_PROP_END_OF_LIST() }; -static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data) +static void xlnx_zynqmp_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/arm/z2.c b/hw/arm/z2.c deleted file mode 100644 index fc5672e7ab2..00000000000 --- a/hw/arm/z2.c +++ /dev/null @@ -1,355 +0,0 @@ -/* - * PXA270-based Zipit Z2 device - * - * Copyright (c) 2011 by Vasily Khoruzhick - * - * Code is based on mainstone platform. - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "hw/arm/pxa.h" -#include "hw/arm/boot.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "hw/ssi/ssi.h" -#include "migration/vmstate.h" -#include "hw/boards.h" -#include "hw/block/flash.h" -#include "ui/console.h" -#include "hw/audio/wm8750.h" -#include "audio/audio.h" -#include "exec/address-spaces.h" -#include "qom/object.h" -#include "qapi/error.h" -#include "trace.h" - -static const struct keymap map[0x100] = { - [0 ... 0xff] = { -1, -1 }, - [0x3b] = {0, 0}, /* Option = F1 */ - [0xc8] = {0, 1}, /* Up */ - [0xd0] = {0, 2}, /* Down */ - [0xcb] = {0, 3}, /* Left */ - [0xcd] = {0, 4}, /* Right */ - [0xcf] = {0, 5}, /* End */ - [0x0d] = {0, 6}, /* KPPLUS */ - [0xc7] = {1, 0}, /* Home */ - [0x10] = {1, 1}, /* Q */ - [0x17] = {1, 2}, /* I */ - [0x22] = {1, 3}, /* G */ - [0x2d] = {1, 4}, /* X */ - [0x1c] = {1, 5}, /* Enter */ - [0x0c] = {1, 6}, /* KPMINUS */ - [0xc9] = {2, 0}, /* PageUp */ - [0x11] = {2, 1}, /* W */ - [0x18] = {2, 2}, /* O */ - [0x23] = {2, 3}, /* H */ - [0x2e] = {2, 4}, /* C */ - [0x38] = {2, 5}, /* LeftAlt */ - [0xd1] = {3, 0}, /* PageDown */ - [0x12] = {3, 1}, /* E */ - [0x19] = {3, 2}, /* P */ - [0x24] = {3, 3}, /* J */ - [0x2f] = {3, 4}, /* V */ - [0x2a] = {3, 5}, /* LeftShift */ - [0x01] = {4, 0}, /* Esc */ - [0x13] = {4, 1}, /* R */ - [0x1e] = {4, 2}, /* A */ - [0x25] = {4, 3}, /* K */ - [0x30] = {4, 4}, /* B */ - [0x1d] = {4, 5}, /* LeftCtrl */ - [0x0f] = {5, 0}, /* Tab */ - [0x14] = {5, 1}, /* T */ - [0x1f] = {5, 2}, /* S */ - [0x26] = {5, 3}, /* L */ - [0x31] = {5, 4}, /* N */ - [0x39] = {5, 5}, /* Space */ - [0x3c] = {6, 0}, /* Stop = F2 */ - [0x15] = {6, 1}, /* Y */ - [0x20] = {6, 2}, /* D */ - [0x0e] = {6, 3}, /* Backspace */ - [0x32] = {6, 4}, /* M */ - [0x33] = {6, 5}, /* Comma */ - [0x3d] = {7, 0}, /* Play = F3 */ - [0x16] = {7, 1}, /* U */ - [0x21] = {7, 2}, /* F */ - [0x2c] = {7, 3}, /* Z */ - [0x27] = {7, 4}, /* Semicolon */ - [0x34] = {7, 5}, /* Dot */ -}; - -#define Z2_RAM_SIZE 0x02000000 -#define Z2_FLASH_BASE 0x00000000 -#define Z2_FLASH_SIZE 0x00800000 - -static struct arm_boot_info z2_binfo = { - .loader_start = PXA2XX_SDRAM_BASE, - .ram_size = Z2_RAM_SIZE, -}; - -#define Z2_GPIO_SD_DETECT 96 -#define Z2_GPIO_AC_IN 0 -#define Z2_GPIO_KEY_ON 1 -#define Z2_GPIO_LCD_CS 88 - -struct ZipitLCD { - SSIPeripheral ssidev; - int32_t selected; - int32_t enabled; - uint8_t buf[3]; - uint32_t cur_reg; - int pos; -}; - -#define TYPE_ZIPIT_LCD "zipit-lcd" -OBJECT_DECLARE_SIMPLE_TYPE(ZipitLCD, ZIPIT_LCD) - -static uint32_t zipit_lcd_transfer(SSIPeripheral *dev, uint32_t value) -{ - ZipitLCD *z = ZIPIT_LCD(dev); - uint16_t val; - - trace_z2_lcd_reg_update(z->cur_reg, z->buf[0], z->buf[1], z->buf[2], value); - if (z->selected) { - z->buf[z->pos] = value & 0xff; - z->pos++; - } - if (z->pos == 3) { - switch (z->buf[0]) { - case 0x74: - z->cur_reg = z->buf[2]; - break; - case 0x76: - val = z->buf[1] << 8 | z->buf[2]; - if (z->cur_reg == 0x22 && val == 0x0000) { - z->enabled = 1; - trace_z2_lcd_enable_disable_result("enabled"); - } else if (z->cur_reg == 0x10 && val == 0x0000) { - z->enabled = 0; - trace_z2_lcd_enable_disable_result("disabled"); - } - break; - default: - break; - } - z->pos = 0; - } - return 0; -} - -static void z2_lcd_cs(void *opaque, int line, int level) -{ - ZipitLCD *z2_lcd = opaque; - z2_lcd->selected = !level; -} - -static void zipit_lcd_realize(SSIPeripheral *dev, Error **errp) -{ - ZipitLCD *z = ZIPIT_LCD(dev); - z->selected = 0; - z->enabled = 0; - z->pos = 0; -} - -static const VMStateDescription vmstate_zipit_lcd_state = { - .name = "zipit-lcd", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_SSI_PERIPHERAL(ssidev, ZipitLCD), - VMSTATE_INT32(selected, ZipitLCD), - VMSTATE_INT32(enabled, ZipitLCD), - VMSTATE_BUFFER(buf, ZipitLCD), - VMSTATE_UINT32(cur_reg, ZipitLCD), - VMSTATE_INT32(pos, ZipitLCD), - VMSTATE_END_OF_LIST(), - } -}; - -static void zipit_lcd_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - - k->realize = zipit_lcd_realize; - k->transfer = zipit_lcd_transfer; - dc->vmsd = &vmstate_zipit_lcd_state; -} - -static const TypeInfo zipit_lcd_info = { - .name = TYPE_ZIPIT_LCD, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(ZipitLCD), - .class_init = zipit_lcd_class_init, -}; - -#define TYPE_AER915 "aer915" -OBJECT_DECLARE_SIMPLE_TYPE(AER915State, AER915) - -struct AER915State { - I2CSlave parent_obj; - - int len; - uint8_t buf[3]; -}; - -static int aer915_send(I2CSlave *i2c, uint8_t data) -{ - AER915State *s = AER915(i2c); - - s->buf[s->len] = data; - if (s->len++ > 2) { - trace_z2_aer915_send_too_long(s->len); - return 1; - } - - if (s->len == 2) { - trace_z2_aer915_send(s->buf[0], s->buf[1]); - } - - return 0; -} - -static int aer915_event(I2CSlave *i2c, enum i2c_event event) -{ - AER915State *s = AER915(i2c); - - trace_z2_aer915_event(s->len, event); - switch (event) { - case I2C_START_SEND: - s->len = 0; - break; - case I2C_START_RECV: - break; - case I2C_FINISH: - break; - default: - break; - } - - return 0; -} - -static uint8_t aer915_recv(I2CSlave *slave) -{ - AER915State *s = AER915(slave); - int retval = 0x00; - - switch (s->buf[0]) { - /* Return hardcoded battery voltage, - * 0xf0 means ~4.1V - */ - case 0x02: - retval = 0xf0; - break; - /* Return 0x00 for other regs, - * we don't know what they are for, - * anyway they return 0x00 on real hardware. - */ - default: - break; - } - - return retval; -} - -static const VMStateDescription vmstate_aer915_state = { - .name = "aer915", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_INT32(len, AER915State), - VMSTATE_BUFFER(buf, AER915State), - VMSTATE_END_OF_LIST(), - } -}; - -static void aer915_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - - k->event = aer915_event; - k->recv = aer915_recv; - k->send = aer915_send; - dc->vmsd = &vmstate_aer915_state; -} - -static const TypeInfo aer915_info = { - .name = TYPE_AER915, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(AER915State), - .class_init = aer915_class_init, -}; - -#define FLASH_SECTOR_SIZE (64 * KiB) - -static void z2_init(MachineState *machine) -{ - PXA2xxState *mpu; - DriveInfo *dinfo; - void *z2_lcd; - I2CBus *bus; - DeviceState *wm; - I2CSlave *i2c_dev; - - /* Setup CPU & memory */ - mpu = pxa270_init(z2_binfo.ram_size, machine->cpu_type); - - dinfo = drive_get(IF_PFLASH, 0, 0); - pflash_cfi01_register(Z2_FLASH_BASE, "z2.flash0", Z2_FLASH_SIZE, - dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, - FLASH_SECTOR_SIZE, 4, 0, 0, 0, 0, 0); - - /* setup keypad */ - pxa27x_register_keypad(mpu->kp, map, 0x100); - - /* MMC/SD host */ - pxa2xx_mmci_handlers(mpu->mmc, - NULL, - qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT)); - - type_register_static(&zipit_lcd_info); - type_register_static(&aer915_info); - z2_lcd = ssi_create_peripheral(mpu->ssp[1], TYPE_ZIPIT_LCD); - bus = pxa2xx_i2c_bus(mpu->i2c[0]); - - i2c_slave_create_simple(bus, TYPE_AER915, 0x55); - - i2c_dev = i2c_slave_new(TYPE_WM8750, 0x1b); - wm = DEVICE(i2c_dev); - - if (machine->audiodev) { - qdev_prop_set_string(wm, "audiodev", machine->audiodev); - } - i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); - - mpu->i2s->opaque = wm; - mpu->i2s->codec_out = wm8750_dac_dat; - mpu->i2s->codec_in = wm8750_adc_dat; - wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s); - - qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS, - qemu_allocate_irq(z2_lcd_cs, z2_lcd, 0)); - - z2_binfo.board_id = 0x6dd; - arm_load_kernel(mpu->cpu, machine, &z2_binfo); -} - -static void z2_machine_init(MachineClass *mc) -{ - mc->desc = "Zipit Z2 (PXA27x)"; - mc->init = z2_init; - mc->ignore_memory_transaction_failures = true; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); - mc->deprecation_reason = "machine is old and unmaintained"; - - machine_add_audiodev_property(mc); -} - -DEFINE_MACHINE("z2", z2_machine_init) diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index 3f0053f94de..eb7a847080d 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -24,7 +24,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #include "ac97.h" @@ -886,7 +886,7 @@ static void nabm_writel(void *opaque, uint32_t addr, uint32_t val) static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r, int max, int *stop) { - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; uint32_t addr = r->bd.addr; uint32_t temp = r->picb << 1; uint32_t written = 0; @@ -959,7 +959,7 @@ static void write_bup(AC97LinkState *s, int elapsed) static int read_audio(AC97LinkState *s, AC97BusMasterRegs *r, int max, int *stop) { - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; uint32_t addr = r->bd.addr; uint32_t temp = r->picb << 1; uint32_t nread = 0; @@ -1324,12 +1324,11 @@ static void ac97_exit(PCIDevice *dev) AUD_remove_card(&s->card); } -static Property ac97_properties[] = { +static const Property ac97_properties[] = { DEFINE_AUDIO_PROPERTIES(AC97LinkState, card), - DEFINE_PROP_END_OF_LIST(), }; -static void ac97_class_init(ObjectClass *klass, void *data) +static void ac97_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1344,7 +1343,7 @@ static void ac97_class_init(ObjectClass *klass, void *data) dc->desc = "Intel 82801AA AC97 Audio"; dc->vmsd = &vmstate_ac97; device_class_set_props(dc, ac97_properties); - dc->reset = ac97_on_reset; + device_class_set_legacy_reset(dc, ac97_on_reset); } static const TypeInfo ac97_info = { @@ -1352,7 +1351,7 @@ static const TypeInfo ac97_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(AC97LinkState), .class_init = ac97_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c index bd73806d83a..1f29a7e319d 100644 --- a/hw/audio/adlib.c +++ b/hw/audio/adlib.c @@ -297,14 +297,13 @@ static void adlib_realizefn (DeviceState *dev, Error **errp) portio_list_add (&s->port_list, isa_address_space_io(&s->parent_obj), 0); } -static Property adlib_properties[] = { +static const Property adlib_properties[] = { DEFINE_AUDIO_PROPERTIES(AdlibState, card), DEFINE_PROP_UINT32 ("iobase", AdlibState, port, 0x220), DEFINE_PROP_UINT32 ("freq", AdlibState, freq, 44100), - DEFINE_PROP_END_OF_LIST (), }; -static void adlib_class_initfn (ObjectClass *klass, void *data) +static void adlib_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS (klass); diff --git a/hw/audio/asc.c b/hw/audio/asc.c index 805416372c2..edd42d6d91c 100644 --- a/hw/audio/asc.c +++ b/hw/audio/asc.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "audio/audio.h" @@ -406,7 +407,6 @@ static void asc_fifo_write(void *opaque, hwaddr addr, uint64_t value, } else { fs->fifo[addr] = value; } - return; } static const MemoryRegionOps asc_fifo_ops = { @@ -654,11 +654,17 @@ static void asc_realize(DeviceState *dev, Error **errp) s->voice = AUD_open_out(&s->card, s->voice, "asc.out", s, asc_out_cb, &as); + if (!s->voice) { + AUD_remove_card(&s->card); + error_setg(errp, "Initializing audio stream failed"); + return; + } + s->shift = 1; s->samples = AUD_get_buffer_size_out(s->voice) >> s->shift; s->mixbuf = g_malloc0(s->samples << s->shift); - s->silentbuf = g_malloc0(s->samples << s->shift); + s->silentbuf = g_malloc(s->samples << s->shift); memset(s->silentbuf, 0x80, s->samples << s->shift); /* Add easc registers if required */ @@ -695,13 +701,12 @@ static void asc_init(Object *obj) sysbus_init_mmio(sbd, &s->asc); } -static Property asc_properties[] = { +static const Property asc_properties[] = { DEFINE_AUDIO_PROPERTIES(ASCState, card), DEFINE_PROP_UINT8("asctype", ASCState, type, ASC_TYPE_ASC), - DEFINE_PROP_END_OF_LIST(), }; -static void asc_class_init(ObjectClass *oc, void *data) +static void asc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); diff --git a/hw/audio/cs4231.c b/hw/audio/cs4231.c index 967caa7fcbd..97cceb44d86 100644 --- a/hw/audio/cs4231.c +++ b/hw/audio/cs4231.c @@ -160,11 +160,11 @@ static void cs4231_init(Object *obj) sysbus_init_irq(dev, &s->irq); } -static void cs4231_class_init(ObjectClass *klass, void *data) +static void cs4231_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = cs_reset; + device_class_set_legacy_reset(dc, cs_reset); dc->vmsd = &vmstate_cs4231; } diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 9ef57f042d1..6dfff202ff9 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -528,7 +528,7 @@ static int cs_write_audio (CSState *s, int nchan, int dma_pos, int dma_len, int len) { int temp, net; - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); temp = len; @@ -547,7 +547,7 @@ static int cs_write_audio (CSState *s, int nchan, int dma_pos, copied = k->read_memory(s->isa_dma, nchan, tmpbuf, dma_pos, to_copy); if (s->tab) { int i; - int16_t linbuf[4096]; + QEMU_UNINITIALIZED int16_t linbuf[4096]; for (i = 0; i < copied; ++i) linbuf[i] = s->tab[tmpbuf[i]]; @@ -682,6 +682,10 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp) return; } + if (s->irq >= ISA_NUM_IRQS) { + error_setg(errp, "Invalid IRQ %d (max %d)", s->irq, ISA_NUM_IRQS - 1); + return; + } s->pic = isa_bus_get_irq(bus, s->irq); k = ISADMA_GET_CLASS(s->isa_dma); k->register_channel(s->isa_dma, s->dma, cs_dma_read, s); @@ -689,20 +693,19 @@ static void cs4231a_realizefn (DeviceState *dev, Error **errp) isa_register_ioport (d, &s->ioports, s->port); } -static Property cs4231a_properties[] = { +static const Property cs4231a_properties[] = { DEFINE_AUDIO_PROPERTIES(CSState, card), DEFINE_PROP_UINT32 ("iobase", CSState, port, 0x534), DEFINE_PROP_UINT32 ("irq", CSState, irq, 9), DEFINE_PROP_UINT32 ("dma", CSState, dma, 3), - DEFINE_PROP_END_OF_LIST (), }; -static void cs4231a_class_initfn (ObjectClass *klass, void *data) +static void cs4231a_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS (klass); dc->realize = cs4231a_realizefn; - dc->reset = cs4231a_reset; + device_class_set_legacy_reset(dc, cs4231a_reset); set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->desc = "Crystal Semiconductor CS4231A"; dc->vmsd = &vmstate_cs4231a; diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index 4ab61d3b9da..a6a32a6348b 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -32,7 +32,7 @@ #include "migration/vmstate.h" #include "qemu/cutils.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #include "trace.h" @@ -604,7 +604,7 @@ static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size) static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, int max, bool *irq) { - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; size_t to_transfer; uint32_t addr = d->frame_addr; int sc = d->scount & 0xffff; @@ -868,12 +868,11 @@ static void es1370_exit(PCIDevice *dev) AUD_remove_card(&s->card); } -static Property es1370_properties[] = { +static const Property es1370_properties[] = { DEFINE_AUDIO_PROPERTIES(ES1370State, card), - DEFINE_PROP_END_OF_LIST(), }; -static void es1370_class_init (ObjectClass *klass, void *data) +static void es1370_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS (klass); PCIDeviceClass *k = PCI_DEVICE_CLASS (klass); @@ -888,7 +887,7 @@ static void es1370_class_init (ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->desc = "ENSONIQ AudioPCI ES1370"; dc->vmsd = &vmstate_es1370; - dc->reset = es1370_on_reset; + device_class_set_legacy_reset(dc, es1370_on_reset); device_class_set_props(dc, es1370_properties); } @@ -897,7 +896,7 @@ static const TypeInfo es1370_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof (ES1370State), .class_init = es1370_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 4beb3fd74e2..c36df0240fe 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -183,7 +183,7 @@ static int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) { GUSState *s = opaque; IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); - char tmpbuf[4096]; + QEMU_UNINITIALIZED char tmpbuf[4096]; int pos = dma_pos, mode, left = dma_len - dma_pos; ldebug ("read DMA %#x %d\n", dma_pos, dma_len); @@ -290,16 +290,15 @@ static void gus_realizefn (DeviceState *dev, Error **errp) AUD_set_active_out (s->voice, 1); } -static Property gus_properties[] = { +static const Property gus_properties[] = { DEFINE_AUDIO_PROPERTIES(GUSState, card), DEFINE_PROP_UINT32 ("freq", GUSState, freq, 44100), DEFINE_PROP_UINT32 ("iobase", GUSState, port, 0x240), DEFINE_PROP_UINT32 ("irq", GUSState, emu.gusirq, 7), DEFINE_PROP_UINT32 ("dma", GUSState, emu.gusdma, 3), - DEFINE_PROP_END_OF_LIST (), }; -static void gus_class_initfn (ObjectClass *klass, void *data) +static void gus_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS (klass); diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c index b22e486fda9..66edad280f2 100644 --- a/hw/audio/hda-codec.c +++ b/hw/audio/hda-codec.c @@ -487,8 +487,7 @@ static void hda_audio_setup(HDAAudioStream *st) if (st->output) { if (use_timer) { cb = hda_audio_output_cb; - st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, - hda_audio_output_timer, st); + timer_del(st->buft); } else { cb = hda_audio_compat_output_cb; } @@ -497,8 +496,7 @@ static void hda_audio_setup(HDAAudioStream *st) } else { if (use_timer) { cb = hda_audio_input_cb; - st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, - hda_audio_input_timer, st); + timer_del(st->buft); } else { cb = hda_audio_compat_input_cb; } @@ -726,8 +724,12 @@ static void hda_audio_init(HDACodecDevice *hda, st->gain_right = QEMU_HDA_AMP_STEPS; st->compat_bpos = sizeof(st->compat_buf); st->output = true; + st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, + hda_audio_output_timer, st); } else { st->output = false; + st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, + hda_audio_input_timer, st); } st->format = AC_FMT_TYPE_PCM | AC_FMT_BITS_16 | (1 << AC_FMT_CHAN_SHIFT); @@ -750,9 +752,7 @@ static void hda_audio_exit(HDACodecDevice *hda) if (st->node == NULL) { continue; } - if (a->use_timer) { - timer_del(st->buft); - } + timer_free(st->buft); if (st->output) { AUD_close_out(&a->card, st->voice.out); } else { @@ -857,12 +857,11 @@ static const VMStateDescription vmstate_hda_audio = { } }; -static Property hda_audio_properties[] = { +static const Property hda_audio_properties[] = { DEFINE_AUDIO_PROPERTIES(HDAAudioState, card), DEFINE_PROP_UINT32("debug", HDAAudioState, debug, 0), DEFINE_PROP_BOOL("mixer", HDAAudioState, mixer, true), DEFINE_PROP_BOOL("use-timer", HDAAudioState, use_timer, true), - DEFINE_PROP_END_OF_LIST(), }; static void hda_audio_init_output(HDACodecDevice *hda, Error **errp) @@ -901,7 +900,7 @@ static void hda_audio_init_micro(HDACodecDevice *hda, Error **errp) hda_audio_init(hda, desc, errp); } -static void hda_audio_base_class_init(ObjectClass *klass, void *data) +static void hda_audio_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); @@ -910,7 +909,7 @@ static void hda_audio_base_class_init(ObjectClass *klass, void *data) k->command = hda_audio_command; k->stream = hda_audio_stream; set_bit(DEVICE_CATEGORY_SOUND, dc->categories); - dc->reset = hda_audio_reset; + device_class_set_legacy_reset(dc, hda_audio_reset); dc->vmsd = &vmstate_hda_audio; device_class_set_props(dc, hda_audio_properties); } @@ -923,7 +922,7 @@ static const TypeInfo hda_audio_info = { .abstract = true, }; -static void hda_audio_output_class_init(ObjectClass *klass, void *data) +static void hda_audio_output_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); @@ -938,7 +937,7 @@ static const TypeInfo hda_audio_output_info = { .class_init = hda_audio_output_class_init, }; -static void hda_audio_duplex_class_init(ObjectClass *klass, void *data) +static void hda_audio_duplex_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); @@ -953,7 +952,7 @@ static const TypeInfo hda_audio_duplex_info = { .class_init = hda_audio_duplex_class_init, }; -static void hda_audio_micro_class_init(ObjectClass *klass, void *data) +static void hda_audio_micro_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 9c54e60b718..b256c8ccea1 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -30,16 +30,15 @@ #include "intel-hda.h" #include "migration/vmstate.h" #include "intel-hda-defs.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qapi/error.h" #include "qom/object.h" /* --------------------------------------------------------------------- */ /* hda bus */ -static Property hda_props[] = { +static const Property hda_props[] = { DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1), - DEFINE_PROP_END_OF_LIST() }; static const TypeInfo hda_codec_bus_info = { @@ -1215,14 +1214,13 @@ static const VMStateDescription vmstate_intel_hda = { } }; -static Property intel_hda_properties[] = { +static const Property intel_hda_properties[] = { DEFINE_PROP_UINT32("debug", IntelHDAState, debug, 0), DEFINE_PROP_ON_OFF_AUTO("msi", IntelHDAState, msi, ON_OFF_AUTO_AUTO), DEFINE_PROP_BOOL("old_msi_addr", IntelHDAState, old_msi_addr, false), - DEFINE_PROP_END_OF_LIST(), }; -static void intel_hda_class_init(ObjectClass *klass, void *data) +static void intel_hda_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1231,12 +1229,12 @@ static void intel_hda_class_init(ObjectClass *klass, void *data) k->exit = intel_hda_exit; k->vendor_id = PCI_VENDOR_ID_INTEL; k->class_id = PCI_CLASS_MULTIMEDIA_HD_AUDIO; - dc->reset = intel_hda_reset; + device_class_set_legacy_reset(dc, intel_hda_reset); dc->vmsd = &vmstate_intel_hda; device_class_set_props(dc, intel_hda_properties); } -static void intel_hda_class_init_ich6(ObjectClass *klass, void *data) +static void intel_hda_class_init_ich6(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1247,7 +1245,7 @@ static void intel_hda_class_init_ich6(ObjectClass *klass, void *data) dc->desc = "Intel HD Audio Controller (ich6)"; } -static void intel_hda_class_init_ich9(ObjectClass *klass, void *data) +static void intel_hda_class_init_ich9(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1264,7 +1262,7 @@ static const TypeInfo intel_hda_info = { .instance_size = sizeof(IntelHDAState), .class_init = intel_hda_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -1282,7 +1280,7 @@ static const TypeInfo intel_hda_info_ich9 = { .class_init = intel_hda_class_init_ich9, }; -static void hda_codec_device_class_init(ObjectClass *klass, void *data) +static void hda_codec_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = hda_codec_dev_realize; diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c index cc285444bce..c5c79d083a4 100644 --- a/hw/audio/marvell_88w8618.c +++ b/hw/audio/marvell_88w8618.c @@ -66,7 +66,7 @@ static void mv88w8618_audio_callback(void *opaque, int free_out, int free_in) { mv88w8618_audio_state *s = opaque; int16_t *codec_buffer; - int8_t buf[4096]; + QEMU_UNINITIALIZED int8_t buf[4096]; int8_t *mem_buffer; int pos, block_size; @@ -287,12 +287,12 @@ static const VMStateDescription mv88w8618_audio_vmsd = { } }; -static void mv88w8618_audio_class_init(ObjectClass *klass, void *data) +static void mv88w8618_audio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mv88w8618_audio_realize; - dc->reset = mv88w8618_audio_reset; + device_class_set_legacy_reset(dc, mv88w8618_audio_reset); dc->vmsd = &mv88w8618_audio_vmsd; dc->user_creatable = false; } diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index a4b89f17682..a419161b5b1 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -215,14 +215,13 @@ static const VMStateDescription vmstate_spk = { } }; -static Property pcspk_properties[] = { +static const Property pcspk_properties[] = { DEFINE_AUDIO_PROPERTIES(PCSpkState, card), DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, 0x61), DEFINE_PROP_BOOL("migrate", PCSpkState, migrate, true), - DEFINE_PROP_END_OF_LIST(), }; -static void pcspk_class_initfn(ObjectClass *klass, void *data) +static void pcspk_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c index b435208c242..5d9d6c1178b 100644 --- a/hw/audio/pl041.c +++ b/hw/audio/pl041.c @@ -625,21 +625,20 @@ static const VMStateDescription vmstate_pl041 = { } }; -static Property pl041_device_properties[] = { +static const Property pl041_device_properties[] = { DEFINE_AUDIO_PROPERTIES(PL041State, codec.card), /* Non-compact FIFO depth property */ DEFINE_PROP_UINT32("nc_fifo_depth", PL041State, fifo_depth, DEFAULT_FIFO_DEPTH), - DEFINE_PROP_END_OF_LIST(), }; -static void pl041_device_class_init(ObjectClass *klass, void *data) +static void pl041_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pl041_realize; set_bit(DEVICE_CATEGORY_SOUND, dc->categories); - dc->reset = pl041_device_reset; + device_class_set_legacy_reset(dc, pl041_device_reset); dc->vmsd = &vmstate_pl041; device_class_set_props(dc, pl041_device_properties); } diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index fd76e78d180..bac64118fef 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1181,7 +1181,7 @@ static int write_audio (SB16State *s, int nchan, int dma_pos, IsaDma *isa_dma = nchan == s->dma ? s->isa_dma : s->isa_hdma; IsaDmaClass *k = ISADMA_GET_CLASS(isa_dma); int temp, net; - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; temp = len; net = 0; @@ -1440,17 +1440,16 @@ static void sb16_realizefn (DeviceState *dev, Error **errp) s->can_write = 1; } -static Property sb16_properties[] = { +static const Property sb16_properties[] = { DEFINE_AUDIO_PROPERTIES(SB16State, card), DEFINE_PROP_UINT32 ("version", SB16State, ver, 0x0405), /* 4.5 */ DEFINE_PROP_UINT32 ("iobase", SB16State, port, 0x220), DEFINE_PROP_UINT32 ("irq", SB16State, irq, 5), DEFINE_PROP_UINT32 ("dma", SB16State, dma, 1), DEFINE_PROP_UINT32 ("dma16", SB16State, hdma, 5), - DEFINE_PROP_END_OF_LIST (), }; -static void sb16_class_initfn (ObjectClass *klass, void *data) +static void sb16_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS (klass); diff --git a/hw/audio/soundhw.c b/hw/audio/soundhw.c index b387b0ef7d5..d18fd9fa051 100644 --- a/hw/audio/soundhw.c +++ b/hw/audio/soundhw.c @@ -88,7 +88,8 @@ void select_soundhw(const char *name, const char *audiodev) struct soundhw *c; if (selected) { - error_setg(&error_fatal, "only one -soundhw option is allowed"); + error_report("only one -soundhw option is allowed"); + exit(1); } for (c = soundhw; c->name; ++c) { diff --git a/hw/audio/trace-events b/hw/audio/trace-events index b1870ff224b..b8ef5727678 100644 --- a/hw/audio/trace-events +++ b/hw/audio/trace-events @@ -41,7 +41,6 @@ asc_update_irq(int irq, int a, int b) "set IRQ to %d (A: 0x%x B: 0x%x)" #virtio-snd.c virtio_snd_get_config(void *vdev, uint32_t jacks, uint32_t streams, uint32_t chmaps) "snd %p: get_config jacks=%"PRIu32" streams=%"PRIu32" chmaps=%"PRIu32"" -virtio_snd_set_config(void *vdev, uint32_t jacks, uint32_t new_jacks, uint32_t streams, uint32_t new_streams, uint32_t chmaps, uint32_t new_chmaps) "snd %p: set_config jacks from %"PRIu32"->%"PRIu32", streams from %"PRIu32"->%"PRIu32", chmaps from %"PRIu32"->%"PRIu32 virtio_snd_get_features(void *vdev, uint64_t features) "snd %p: get_features 0x%"PRIx64 virtio_snd_vm_state_running(void) "vm state running" virtio_snd_vm_state_stopped(void) "vm state stopped" diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c index 4c127a1def6..d5231e1cf22 100644 --- a/hw/audio/via-ac97.c +++ b/hw/audio/via-ac97.c @@ -175,7 +175,7 @@ static void out_cb(void *opaque, int avail) ViaAC97SGDChannel *c = &s->aur; int temp, to_copy, copied; bool stop = false; - uint8_t tmpbuf[4096]; + QEMU_UNINITIALIZED uint8_t tmpbuf[4096]; if (c->stat & STAT_PAUSED) { return; @@ -459,12 +459,11 @@ static void via_ac97_exit(PCIDevice *dev) AUD_remove_card(&s->card); } -static Property via_ac97_properties[] = { +static const Property via_ac97_properties[] = { DEFINE_AUDIO_PROPERTIES(ViaAC97State, card), - DEFINE_PROP_END_OF_LIST(), }; -static void via_ac97_class_init(ObjectClass *klass, void *data) +static void via_ac97_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -478,7 +477,7 @@ static void via_ac97_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, via_ac97_properties); set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->desc = "VIA AC97"; - dc->reset = via_ac97_reset; + device_class_set_legacy_reset(dc, via_ac97_reset); /* Reason: Part of a south bridge chip */ dc->user_creatable = false; } @@ -488,7 +487,7 @@ static const TypeInfo via_ac97_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(ViaAC97State), .class_init = via_ac97_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -502,7 +501,7 @@ static void via_mc97_realize(PCIDevice *pci_dev, Error **errp) pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03); } -static void via_mc97_class_init(ObjectClass *klass, void *data) +static void via_mc97_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -523,7 +522,7 @@ static const TypeInfo via_mc97_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = via_mc97_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/audio/virtio-snd-pci.c b/hw/audio/virtio-snd-pci.c index ab58c6410ed..9eb00073920 100644 --- a/hw/audio/virtio-snd-pci.c +++ b/hw/audio/virtio-snd-pci.c @@ -27,11 +27,10 @@ struct VirtIOSoundPCI { VirtIOSound vdev; }; -static Property virtio_snd_pci_properties[] = { +static const Property virtio_snd_pci_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -43,7 +42,7 @@ static void virtio_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_snd_pci_class_init(ObjectClass *klass, void *data) +static void virtio_snd_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c index e5196aa4bb5..eca3319e59f 100644 --- a/hw/audio/virtio-snd.c +++ b/hw/audio/virtio-snd.c @@ -20,8 +20,7 @@ #include "qemu/log.h" #include "qemu/error-report.h" #include "qemu/lockable.h" -#include "exec/tswap.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" #include "qapi/error.h" #include "hw/audio/virtio-snd.h" @@ -78,7 +77,7 @@ static const VMStateDescription vmstate_virtio_snd = { }, }; -static Property virtio_snd_properties[] = { +static const Property virtio_snd_properties[] = { DEFINE_AUDIO_PROPERTIES(VirtIOSound, card), DEFINE_PROP_UINT32("jacks", VirtIOSound, snd_conf.jacks, VIRTIO_SOUND_JACK_DEFAULT), @@ -86,7 +85,6 @@ static Property virtio_snd_properties[] = { VIRTIO_SOUND_STREAM_DEFAULT), DEFINE_PROP_UINT32("chmaps", VirtIOSound, snd_conf.chmaps, VIRTIO_SOUND_CHMAP_DEFAULT), - DEFINE_PROP_END_OF_LIST(), }; static void @@ -107,29 +105,6 @@ virtio_snd_get_config(VirtIODevice *vdev, uint8_t *config) } -static void -virtio_snd_set_config(VirtIODevice *vdev, const uint8_t *config) -{ - VirtIOSound *s = VIRTIO_SND(vdev); - const virtio_snd_config *sndconfig = - (const virtio_snd_config *)config; - - - trace_virtio_snd_set_config(vdev, - s->snd_conf.jacks, - sndconfig->jacks, - s->snd_conf.streams, - sndconfig->streams, - s->snd_conf.chmaps, - sndconfig->chmaps); - - memcpy(&s->snd_conf, sndconfig, sizeof(virtio_snd_config)); - le32_to_cpus(&s->snd_conf.jacks); - le32_to_cpus(&s->snd_conf.streams); - le32_to_cpus(&s->snd_conf.chmaps); - -} - static void virtio_snd_pcm_buffer_free(VirtIOSoundPCMBuffer *buffer) { @@ -282,12 +257,12 @@ uint32_t virtio_snd_set_pcm_params(VirtIOSound *s, error_report("Number of channels is not supported."); return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); } - if (BIT(params->format) > sizeof(supported_formats) || + if (params->format >= sizeof(supported_formats) * BITS_PER_BYTE || !(supported_formats & BIT(params->format))) { error_report("Stream format is not supported."); return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); } - if (BIT(params->rate) > sizeof(supported_rates) || + if (params->rate >= sizeof(supported_rates) * BITS_PER_BYTE || !(supported_rates & BIT(params->rate))) { error_report("Stream rate is not supported."); return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); @@ -1386,7 +1361,7 @@ static void virtio_snd_reset(VirtIODevice *vdev) } } -static void virtio_snd_class_init(ObjectClass *klass, void *data) +static void virtio_snd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -1400,7 +1375,6 @@ static void virtio_snd_class_init(ObjectClass *klass, void *data) vdc->realize = virtio_snd_realize; vdc->unrealize = virtio_snd_unrealize; vdc->get_config = virtio_snd_get_config; - vdc->set_config = virtio_snd_set_config; vdc->get_features = get_features; vdc->reset = virtio_snd_reset; vdc->legacy_features = 0; diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c index ec2c4e13743..2846b55fe2a 100644 --- a/hw/audio/wm8750.c +++ b/hw/audio/wm8750.c @@ -706,12 +706,11 @@ void wm8750_set_bclk_in(void *opaque, int new_hz) wm8750_clk_update(s, 1); } -static Property wm8750_properties[] = { +static const Property wm8750_properties[] = { DEFINE_AUDIO_PROPERTIES(WM8750State, card), - DEFINE_PROP_END_OF_LIST(), }; -static void wm8750_class_init(ObjectClass *klass, void *data) +static void wm8750_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); diff --git a/hw/avr/arduino.c b/hw/avr/arduino.c index 48ef478346e..e166ca18e11 100644 --- a/hw/avr/arduino.c +++ b/hw/avr/arduino.c @@ -56,7 +56,7 @@ static void arduino_machine_init(MachineState *machine) } } -static void arduino_machine_class_init(ObjectClass *oc, void *data) +static void arduino_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -69,7 +69,7 @@ static void arduino_machine_class_init(ObjectClass *oc, void *data) mc->no_parallel = 1; } -static void arduino_duemilanove_class_init(ObjectClass *oc, void *data) +static void arduino_duemilanove_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); @@ -84,7 +84,7 @@ static void arduino_duemilanove_class_init(ObjectClass *oc, void *data) amc->xtal_hz = 16 * 1000 * 1000; }; -static void arduino_uno_class_init(ObjectClass *oc, void *data) +static void arduino_uno_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); @@ -99,7 +99,7 @@ static void arduino_uno_class_init(ObjectClass *oc, void *data) amc->xtal_hz = 16 * 1000 * 1000; }; -static void arduino_mega_class_init(ObjectClass *oc, void *data) +static void arduino_mega_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); @@ -114,7 +114,7 @@ static void arduino_mega_class_init(ObjectClass *oc, void *data) amc->xtal_hz = 16 * 1000 * 1000; }; -static void arduino_mega2560_class_init(ObjectClass *oc, void *data) +static void arduino_mega2560_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); diff --git a/hw/avr/atmega.c b/hw/avr/atmega.c index 31c8992d750..95b6da5e343 100644 --- a/hw/avr/atmega.c +++ b/hw/avr/atmega.c @@ -12,13 +12,15 @@ #include "qemu/module.h" #include "qemu/units.h" #include "qapi/error.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" -#include "sysemu/sysemu.h" +#include "exec/target_page.h" +#include "system/memory.h" +#include "system/address-spaces.h" +#include "system/system.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "qom/object.h" #include "hw/misc/unimp.h" +#include "migration/vmstate.h" #include "atmega.h" enum AtmegaPeripheral { @@ -224,8 +226,6 @@ static void atmega_realize(DeviceState *dev, Error **errp) char *devname; size_t i; - assert(mc->io_size <= 0x200); - if (!s->xtal_freq_hz) { error_setg(errp, "\"xtal-frequency-hz\" property must be provided."); return; @@ -240,11 +240,37 @@ static void atmega_realize(DeviceState *dev, Error **errp) qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); cpudev = DEVICE(&s->cpu); - /* SRAM */ - memory_region_init_ram(&s->sram, OBJECT(dev), "sram", mc->sram_size, - &error_abort); - memory_region_add_subregion(get_system_memory(), - OFFSET_DATA + mc->io_size, &s->sram); + /* + * SRAM + * + * Softmmu is not able mix i/o and ram on the same page. + * Therefore in all cases, the first page exclusively contains i/o. + * + * If the MCU's i/o region matches the page size, then we can simply + * allocate all ram starting at the second page. Otherwise, we must + * allocate some ram as i/o to complete the first page. + */ + assert(mc->io_size == 0x100 || mc->io_size == 0x200); + if (mc->io_size >= TARGET_PAGE_SIZE) { + memory_region_init_ram(&s->sram, OBJECT(dev), "sram", mc->sram_size, + &error_abort); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA + mc->io_size, &s->sram); + } else { + int sram_io_size = TARGET_PAGE_SIZE - mc->io_size; + void *sram_io_mem = g_malloc0(sram_io_size); + + memory_region_init_ram_device_ptr(&s->sram_io, OBJECT(dev), "sram-as-io", + sram_io_size, sram_io_mem); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA + mc->io_size, &s->sram_io); + vmstate_register_ram(&s->sram_io, dev); + + memory_region_init_ram(&s->sram, OBJECT(dev), "sram", + mc->sram_size - sram_io_size, &error_abort); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA + TARGET_PAGE_SIZE, &s->sram); + } /* Flash */ memory_region_init_rom(&s->flash, OBJECT(dev), @@ -355,13 +381,12 @@ static void atmega_realize(DeviceState *dev, Error **errp) create_unimplemented_device("avr-eeprom", OFFSET_DATA + 0x03f, 3); } -static Property atmega_props[] = { +static const Property atmega_props[] = { DEFINE_PROP_UINT64("xtal-frequency-hz", AtmegaMcuState, xtal_freq_hz, 0), - DEFINE_PROP_END_OF_LIST() }; -static void atmega_class_init(ObjectClass *oc, void *data) +static void atmega_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -371,7 +396,7 @@ static void atmega_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; } -static void atmega168_class_init(ObjectClass *oc, void *data) +static void atmega168_class_init(ObjectClass *oc, const void *data) { AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); @@ -386,7 +411,7 @@ static void atmega168_class_init(ObjectClass *oc, void *data) amc->dev = dev168_328; }; -static void atmega328_class_init(ObjectClass *oc, void *data) +static void atmega328_class_init(ObjectClass *oc, const void *data) { AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); @@ -401,7 +426,7 @@ static void atmega328_class_init(ObjectClass *oc, void *data) amc->dev = dev168_328; }; -static void atmega1280_class_init(ObjectClass *oc, void *data) +static void atmega1280_class_init(ObjectClass *oc, const void *data) { AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); @@ -416,7 +441,7 @@ static void atmega1280_class_init(ObjectClass *oc, void *data) amc->dev = dev1280_2560; }; -static void atmega2560_class_init(ObjectClass *oc, void *data) +static void atmega2560_class_init(ObjectClass *oc, const void *data) { AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); diff --git a/hw/avr/atmega.h b/hw/avr/atmega.h index a99ee15c7e1..9ac46782312 100644 --- a/hw/avr/atmega.h +++ b/hw/avr/atmega.h @@ -41,6 +41,7 @@ struct AtmegaMcuState { MemoryRegion flash; MemoryRegion eeprom; MemoryRegion sram; + MemoryRegion sram_io; DeviceState *io; AVRMaskState pwr[POWER_MAX]; AVRUsartState usart[USART_MAX]; diff --git a/hw/avr/boot.c b/hw/avr/boot.c index 617f3a144c8..e5a29c7218e 100644 --- a/hw/avr/boot.c +++ b/hw/avr/boot.c @@ -71,11 +71,9 @@ bool avr_load_firmware(AVRCPU *cpu, MachineState *ms, return false; } - bytes_loaded = load_elf_ram_sym(filename, - NULL, NULL, NULL, - &entry, NULL, NULL, - &e_flags, 0, EM_AVR, 0, 0, - NULL, true, NULL); + bytes_loaded = load_elf_as(filename, NULL, NULL, NULL, + &entry, NULL, NULL, + &e_flags, ELFDATA2LSB, EM_AVR, 0, 0, NULL); if (bytes_loaded >= 0) { /* If ELF file is provided, determine CPU type reading ELF e_flags. */ const char *elf_cpu = avr_elf_e_flags_to_cpu_type(e_flags); diff --git a/hw/block/Kconfig b/hw/block/Kconfig index 9e8f28f9824..737dbcdb3e9 100644 --- a/hw/block/Kconfig +++ b/hw/block/Kconfig @@ -13,24 +13,12 @@ config FDC_SYSBUS config SSI_M25P80 bool -config NAND - bool - config PFLASH_CFI01 bool config PFLASH_CFI02 bool -config ECC - bool - -config ONENAND - bool - -config TC58128 - bool - config VIRTIO_BLK bool default y diff --git a/hw/block/block.c b/hw/block/block.c index 3ceca7dce69..2e10611d95a 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -9,9 +9,10 @@ #include "qemu/osdep.h" #include "block/block_int-common.h" -#include "sysemu/blockdev.h" -#include "sysemu/block-backend.h" +#include "system/blockdev.h" +#include "system/block-backend.h" #include "hw/block/block.h" +#include "migration/cpr.h" #include "qapi/error.h" #include "qapi/qapi-types-block.h" @@ -66,6 +67,10 @@ bool blk_check_size_and_read_all(BlockBackend *blk, DeviceState *dev, int ret; g_autofree char *dev_id = NULL; + if (cpr_is_incoming()) { + return true; + } + blk_len = blk_getlength(blk); if (blk_len < 0) { error_setg_errno(errp, -blk_len, diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 98501e6885e..48c2e315f31 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -27,8 +27,8 @@ #include "hw/xen/xen.h" #include "hw/block/xen_blkif.h" #include "hw/xen/interface/io/ring.h" -#include "sysemu/block-backend.h" -#include "sysemu/iothread.h" +#include "system/block-backend.h" +#include "system/iothread.h" #include "xen-block.h" typedef struct XenBlockRequest { diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h index 7b8e9df09f7..eb70327db38 100644 --- a/hw/block/dataplane/xen-block.h +++ b/hw/block/dataplane/xen-block.h @@ -10,7 +10,7 @@ #include "hw/block/block.h" #include "hw/xen/xen-bus.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" typedef struct XenBlockDataPlane XenBlockDataPlane; diff --git a/hw/block/ecc.c b/hw/block/ecc.c deleted file mode 100644 index ed889a4184f..00000000000 --- a/hw/block/ecc.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Calculate Error-correcting Codes. Used by NAND Flash controllers - * (not by NAND chips). - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "migration/vmstate.h" -#include "hw/block/flash.h" - -/* - * Pre-calculated 256-way 1 byte column parity. Table borrowed from Linux. - */ -static const uint8_t nand_ecc_precalc_table[] = { - 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, - 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, - 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, - 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, - 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, - 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, - 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, - 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, - 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, - 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, - 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, - 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, - 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, - 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, - 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, - 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, - 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, - 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, - 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, - 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, - 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, - 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, - 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, - 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, - 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, - 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, - 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, - 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, - 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, - 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, - 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, - 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, -}; - -/* Update ECC parity count. */ -uint8_t ecc_digest(ECCState *s, uint8_t sample) -{ - uint8_t idx = nand_ecc_precalc_table[sample]; - - s->cp ^= idx & 0x3f; - if (idx & 0x40) { - s->lp[0] ^= ~s->count; - s->lp[1] ^= s->count; - } - s->count ++; - - return sample; -} - -/* Reinitialise the counters. */ -void ecc_reset(ECCState *s) -{ - s->lp[0] = 0x0000; - s->lp[1] = 0x0000; - s->cp = 0x00; - s->count = 0; -} - -/* Save/restore */ -const VMStateDescription vmstate_ecc_state = { - .name = "ecc-state", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(cp, ECCState), - VMSTATE_UINT16_ARRAY(lp, ECCState, 2), - VMSTATE_UINT16(count, ECCState), - VMSTATE_END_OF_LIST(), - }, -}; diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c index e43dc532af8..6d1790e0e61 100644 --- a/hw/block/fdc-isa.c +++ b/hw/block/fdc-isa.c @@ -39,10 +39,10 @@ #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" #include "hw/block/block.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" -#include "exec/ioport.h" +#include "system/block-backend.h" +#include "system/blockdev.h" +#include "system/system.h" +#include "system/ioport.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -147,6 +147,8 @@ static void isa_fdc_get_drive_max_chs(FloppyDriveType type, uint8_t *maxc, *maxs = fdf->last_sect; } } + /* fd_formats must contain at least one entry per FloppyDriveType */ + assert(*maxc); (*maxc)--; } @@ -281,7 +283,7 @@ static const VMStateDescription vmstate_isa_fdc = { } }; -static Property isa_fdc_properties[] = { +static const Property isa_fdc_properties[] = { DEFINE_PROP_UINT32("iobase", FDCtrlISABus, iobase, 0x3f0), DEFINE_PROP_UINT32("irq", FDCtrlISABus, irq, 6), DEFINE_PROP_UINT32("dma", FDCtrlISABus, dma, 2), @@ -294,10 +296,9 @@ static Property isa_fdc_properties[] = { DEFINE_PROP_SIGNED("fallback", FDCtrlISABus, state.fallback, FLOPPY_DRIVE_TYPE_288, qdev_prop_fdc_drive_type, FloppyDriveType), - DEFINE_PROP_END_OF_LIST(), }; -static void isabus_fdc_class_init(ObjectClass *klass, void *data) +static void isabus_fdc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); @@ -305,7 +306,7 @@ static void isabus_fdc_class_init(ObjectClass *klass, void *data) dc->desc = "virtual floppy controller"; dc->realize = isabus_fdc_realize; dc->fw_name = "fdc"; - dc->reset = fdctrl_external_reset_isa; + device_class_set_legacy_reset(dc, fdctrl_external_reset_isa); dc->vmsd = &vmstate_isa_fdc; adevc->build_dev_aml = build_fdc_aml; device_class_set_props(dc, isa_fdc_properties); @@ -330,7 +331,7 @@ static const TypeInfo isa_fdc_info = { .instance_size = sizeof(FDCtrlISABus), .class_init = isabus_fdc_class_init, .instance_init = isabus_fdc_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c index 035bc089752..956860ab298 100644 --- a/hw/block/fdc-sysbus.c +++ b/hw/block/fdc-sysbus.c @@ -26,7 +26,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qom/object.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" #include "hw/block/fdc.h" #include "migration/vmstate.h" @@ -176,12 +176,12 @@ static const VMStateDescription vmstate_sysbus_fdc = { } }; -static void sysbus_fdc_common_class_init(ObjectClass *klass, void *data) +static void sysbus_fdc_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sysbus_fdc_realize; - dc->reset = fdctrl_external_reset_sysbus; + device_class_set_legacy_reset(dc, fdctrl_external_reset_sysbus); dc->vmsd = &vmstate_sysbus_fdc; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } @@ -196,7 +196,7 @@ static const TypeInfo sysbus_fdc_common_typeinfo = { .class_size = sizeof(FDCtrlSysBusClass), }; -static Property sysbus_fdc_properties[] = { +static const Property sysbus_fdc_properties[] = { DEFINE_PROP_SIGNED("fdtypeA", FDCtrlSysBus, state.qdev_for_drives[0].type, FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, FloppyDriveType), @@ -206,10 +206,9 @@ static Property sysbus_fdc_properties[] = { DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback, FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type, FloppyDriveType), - DEFINE_PROP_END_OF_LIST(), }; -static void sysbus_fdc_class_init(ObjectClass *klass, void *data) +static void sysbus_fdc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -223,17 +222,16 @@ static const TypeInfo sysbus_fdc_typeinfo = { .class_init = sysbus_fdc_class_init, }; -static Property sun4m_fdc_properties[] = { +static const Property sun4m_fdc_properties[] = { DEFINE_PROP_SIGNED("fdtype", FDCtrlSysBus, state.qdev_for_drives[0].type, FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, FloppyDriveType), DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback, FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type, FloppyDriveType), - DEFINE_PROP_END_OF_LIST(), }; -static void sun4m_fdc_class_init(ObjectClass *klass, void *data) +static void sun4m_fdc_class_init(ObjectClass *klass, const void *data) { FDCtrlSysBusClass *sbdc = SYSBUS_FDC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 6dd94e98bc3..d0f08c7be54 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -39,9 +39,9 @@ #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" #include "hw/block/block.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" +#include "system/block-backend.h" +#include "system/blockdev.h" +#include "system/system.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -454,13 +454,12 @@ struct FloppyDrive { FloppyDriveType type; }; -static Property floppy_drive_properties[] = { +static const Property floppy_drive_properties[] = { DEFINE_PROP_UINT32("unit", FloppyDrive, unit, -1), DEFINE_BLOCK_PROPERTIES(FloppyDrive, conf), DEFINE_PROP_SIGNED("drive-type", FloppyDrive, type, FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, FloppyDriveType), - DEFINE_PROP_END_OF_LIST(), }; static void floppy_drive_realize(DeviceState *qdev, Error **errp) @@ -554,7 +553,7 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp) fd_revalidate(drive); } -static void floppy_drive_class_init(ObjectClass *klass, void *data) +static void floppy_drive_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = floppy_drive_realize; diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c index 2b0af4430f0..db221901cf4 100644 --- a/hw/block/hd-geometry.c +++ b/hw/block/hd-geometry.c @@ -31,9 +31,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/qapi-types-block.h" -#include "qemu/bswap.h" #include "hw/block/block.h" #include "trace.h" diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index 0b94af3653f..a5336d92ff9 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/block/block.h" #include "hw/block/flash.h" #include "hw/qdev-properties.h" @@ -61,7 +61,8 @@ typedef struct FlashPartInfo { */ uint8_t id[SPI_NOR_MAX_ID_LEN]; uint8_t id_len; - /* there is confusion between manufacturers as to what a sector is. In this + /* + * there is confusion between manufacturers as to what a sector is. In this * device model, a "sector" is the size that is erased by the ERASE_SECTOR * command (opcode 0xd8). */ @@ -168,7 +169,7 @@ typedef struct FlashPartInfo { /* * Spansion read mode command length in bytes, * the mode is currently not supported. -*/ + */ #define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1 #define WINBOND_CONTINUOUS_READ_MODE_CMD_LEN 1 @@ -189,7 +190,8 @@ static const FlashPartInfo known_devices[] = { { INFO("at45db081d", 0x1f2500, 0, 64 << 10, 16, ER_4K) }, - /* Atmel EEPROMS - it is assumed, that don't care bit in command + /* + * Atmel EEPROMS - it is assumed, that don't care bit in command * is set to 0. Block protection is not supported. */ { INFO("at25128a-nonjedec", 0x0, 0, 1, 131072, EEPROM) }, @@ -266,7 +268,8 @@ static const FlashPartInfo known_devices[] = { { INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) }, { INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) }, { INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024, - ER_4K | ER_32K, 2) }, + ER_4K | ER_32K, 2), + .sfdp_read = m25p80_sfdp_mt35xu01g }, { INFO_STACKED("mt35xu02gbba", 0x2c5b1c, 0x104100, 128 << 10, 2048, ER_4K | ER_32K, 4), .sfdp_read = m25p80_sfdp_mt35xu02g }, @@ -274,10 +277,13 @@ static const FlashPartInfo known_devices[] = { { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, { INFO_STACKED("mt25qu01g", 0x20bb21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, - { INFO_STACKED("mt25ql02g", 0x20ba22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) }, - { INFO_STACKED("mt25qu02g", 0x20bb22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) }, + { INFO_STACKED("mt25ql02g", 0x20ba22, 0x1040, 64 << 10, 4096, + ER_4K | ER_32K, 2) }, + { INFO_STACKED("mt25qu02g", 0x20bb22, 0x1040, 64 << 10, 4096, + ER_4K | ER_32K, 2) }, - /* Spansion -- single (large) sector size only, at least + /* + * Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). */ { INFO("s25sl032p", 0x010215, 0x4d00, 64 << 10, 64, ER_4K) }, @@ -350,7 +356,8 @@ static const FlashPartInfo known_devices[] = { { INFO("w25x64", 0xef3017, 0, 64 << 10, 128, ER_4K) }, { INFO("w25q64", 0xef4017, 0, 64 << 10, 128, ER_4K) }, { INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) }, - { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) }, + { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K), + .sfdp_read = m25p80_sfdp_w25q80bl }, { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K), .sfdp_read = m25p80_sfdp_w25q256 }, { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K), @@ -424,6 +431,11 @@ typedef enum { RDCR_EQIO = 0x35, RSTQIO = 0xf5, + /* + * Winbond: 0x31 - write status register 2 + */ + WRSR2 = 0x31, + RNVCR = 0xB5, WNVCR = 0xB1, @@ -516,7 +528,7 @@ struct Flash { struct M25P80Class { SSIPeripheralClass parent_class; - FlashPartInfo *pi; + const FlashPartInfo *pi; }; OBJECT_DECLARE_TYPE(Flash, M25P80Class, M25P80) @@ -548,7 +560,8 @@ static void blk_sync_complete(void *opaque, int ret) qemu_iovec_destroy(iov); g_free(iov); - /* do nothing. Masters do not directly interact with the backing store, + /* + * do nothing. Masters do not directly interact with the backing store, * only the working copy so no mutexing required. */ } @@ -814,6 +827,15 @@ static void complete_collecting_data(Flash *s) s->write_enable = false; } break; + case WRSR2: + switch (get_man(s)) { + case MAN_WINBOND: + s->quad_enable = !!(s->data[0] & 0x02); + break; + default: + break; + } + break; case BRWR: case EXTEND_ADDR_WRITE: s->ear = s->data[0]; @@ -1273,7 +1295,31 @@ static void decode_new_cmd(Flash *s, uint32_t value) } s->pos = 0; break; + case WRSR2: + /* + * If WP# is low and status_register_write_disabled is high, + * status register writes are disabled. + * This is also called "hardware protected mode" (HPM). All other + * combinations of the two states are called "software protected mode" + * (SPM), and status register writes are permitted. + */ + if ((s->wp_level == 0 && s->status_register_write_disabled) + || !s->write_enable) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Status register 2 write is disabled!\n"); + break; + } + switch (get_man(s)) { + case MAN_WINBOND: + s->needed_bytes = 1; + s->state = STATE_COLLECTING_DATA; + s->pos = 0; + break; + default: + break; + } + break; case WRDI: s->write_enable = false; if (get_man(s) == MAN_SST) { @@ -1674,7 +1720,7 @@ static int m25p80_pre_save(void *opaque) return 0; } -static Property m25p80_properties[] = { +static const Property m25p80_properties[] = { /* This is default value for Micron flash */ DEFINE_PROP_BOOL("write-enable", Flash, write_enable, false), DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF), @@ -1683,7 +1729,6 @@ static Property m25p80_properties[] = { DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2), DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10), DEFINE_PROP_DRIVE("drive", Flash, blk), - DEFINE_PROP_END_OF_LIST(), }; static int m25p80_pre_load(void *opaque) @@ -1812,7 +1857,7 @@ static const VMStateDescription vmstate_m25p80 = { } }; -static void m25p80_class_init(ObjectClass *klass, void *data) +static void m25p80_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); @@ -1824,8 +1869,10 @@ static void m25p80_class_init(ObjectClass *klass, void *data) k->cs_polarity = SSI_CS_LOW; dc->vmsd = &vmstate_m25p80; device_class_set_props(dc, m25p80_properties); - dc->reset = m25p80_reset; + device_class_set_legacy_reset(dc, m25p80_reset); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); mc->pi = data; + dc->desc = "Serial Flash"; } static const TypeInfo m25p80_info = { @@ -1842,13 +1889,13 @@ static void m25p80_register_types(void) type_register_static(&m25p80_info); for (i = 0; i < ARRAY_SIZE(known_devices); ++i) { - TypeInfo ti = { + const TypeInfo ti = { .name = known_devices[i].part_name, .parent = TYPE_M25P80, .class_init = m25p80_class_init, - .class_data = (void *)&known_devices[i], + .class_data = &known_devices[i], }; - type_register(&ti); + type_register_static(&ti); } } diff --git a/hw/block/m25p80_sfdp.c b/hw/block/m25p80_sfdp.c index 6ee2cfaf119..a03a291a09b 100644 --- a/hw/block/m25p80_sfdp.c +++ b/hw/block/m25p80_sfdp.c @@ -57,6 +57,43 @@ static const uint8_t sfdp_n25q256a[] = { }; define_sfdp_read(n25q256a); +static const uint8_t sfdp_mt35xu01g[] = { + 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, + 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff, + 0x84, 0x00, 0x01, 0x02, 0x80, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0x8a, 0xff, 0xff, 0xff, 0xff, 0x3f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x0c, 0x20, 0x11, 0xd8, + 0x0f, 0x52, 0x00, 0x00, 0x24, 0x5a, 0x99, 0x00, + 0x8b, 0x8e, 0x03, 0xe1, 0xac, 0x01, 0x27, 0x38, + 0x7a, 0x75, 0x7a, 0x75, 0xfb, 0xbd, 0xd5, 0x5c, + 0x00, 0x00, 0x70, 0xff, 0x81, 0xb0, 0x38, 0x36, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x43, 0x0e, 0xff, 0xff, 0x21, 0xdc, 0x5c, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +define_sfdp_read(mt35xu01g); + static const uint8_t sfdp_mt35xu02g[] = { 0x53, 0x46, 0x44, 0x50, 0x06, 0x01, 0x01, 0xff, 0x00, 0x06, 0x01, 0x10, 0x30, 0x00, 0x00, 0xff, @@ -367,6 +404,42 @@ static const uint8_t sfdp_w25q01jvq[] = { }; define_sfdp_read(w25q01jvq); +static const uint8_t sfdp_w25q80bl[] = { + 0x53, 0x46, 0x44, 0x50, 0x05, 0x01, 0x00, 0xff, + 0x00, 0x05, 0x01, 0x10, 0x80, 0x00, 0x00, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xe5, 0x20, 0xf1, 0xff, 0xff, 0xff, 0x7f, 0x00, + 0x44, 0xeb, 0x08, 0x6b, 0x08, 0x3b, 0x42, 0xbb, + 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x0c, 0x20, 0x0f, 0x52, + 0x10, 0xd8, 0x00, 0x00, 0x23, 0x02, 0xa6, 0x00, + 0x81, 0x6c, 0x14, 0xa7, 0xed, 0x61, 0x76, 0x33, + 0x7a, 0x75, 0x7a, 0x75, 0xf7, 0xa2, 0xd5, 0x5c, + 0x00, 0xf7, 0x1d, 0xff, 0xe9, 0x30, 0xc0, 0x80, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +define_sfdp_read(w25q80bl); + /* * Integrated Silicon Solution (ISSI) */ diff --git a/hw/block/m25p80_sfdp.h b/hw/block/m25p80_sfdp.h index 1733b569508..35785686a0e 100644 --- a/hw/block/m25p80_sfdp.h +++ b/hw/block/m25p80_sfdp.h @@ -16,6 +16,7 @@ #define M25P80_SFDP_MAX_SIZE (1 << 24) uint8_t m25p80_sfdp_n25q256a(uint32_t addr); +uint8_t m25p80_sfdp_mt35xu01g(uint32_t addr); uint8_t m25p80_sfdp_mt35xu02g(uint32_t addr); uint8_t m25p80_sfdp_mx25l25635e(uint32_t addr); @@ -24,7 +25,7 @@ uint8_t m25p80_sfdp_mx66l1g45g(uint32_t addr); uint8_t m25p80_sfdp_w25q256(uint32_t addr); uint8_t m25p80_sfdp_w25q512jv(uint32_t addr); - +uint8_t m25p80_sfdp_w25q80bl(uint32_t addr); uint8_t m25p80_sfdp_w25q01jvq(uint32_t addr); uint8_t m25p80_sfdp_is25wp256(uint32_t addr); diff --git a/hw/block/meson.build b/hw/block/meson.build index 8aa4dc3893c..43ed296cf47 100644 --- a/hw/block/meson.build +++ b/hw/block/meson.build @@ -3,21 +3,19 @@ system_ss.add(files( 'cdrom.c', 'hd-geometry.c' )) -system_ss.add(when: 'CONFIG_ECC', if_true: files('ecc.c')) system_ss.add(when: 'CONFIG_FDC', if_true: files('fdc.c')) system_ss.add(when: 'CONFIG_FDC_ISA', if_true: files('fdc-isa.c')) system_ss.add(when: 'CONFIG_FDC_SYSBUS', if_true: files('fdc-sysbus.c')) -system_ss.add(when: 'CONFIG_NAND', if_true: files('nand.c')) -system_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) system_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) system_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c')) system_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c')) -system_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) -specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c')) -specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c')) +specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) +system_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk-common.c')) +specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c')) +system_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('virtio-blk-common.c')) subdir('dataplane') diff --git a/hw/block/nand.c b/hw/block/nand.c deleted file mode 100644 index e2433c25bdc..00000000000 --- a/hw/block/nand.c +++ /dev/null @@ -1,836 +0,0 @@ -/* - * Flash NAND memory emulation. Based on "16M x 8 Bit NAND Flash - * Memory" datasheet for the KM29U128AT / K9F2808U0A chips from - * Samsung Electronic. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * Support for additional features based on "MT29F2G16ABCWP 2Gx16" - * datasheet from Micron Technology and "NAND02G-B2C" datasheet - * from ST Microelectronics. - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#ifndef NAND_IO - -#include "qemu/osdep.h" -#include "hw/hw.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "hw/block/flash.h" -#include "sysemu/block-backend.h" -#include "migration/vmstate.h" -#include "qapi/error.h" -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "qom/object.h" - -# define NAND_CMD_READ0 0x00 -# define NAND_CMD_READ1 0x01 -# define NAND_CMD_READ2 0x50 -# define NAND_CMD_LPREAD2 0x30 -# define NAND_CMD_NOSERIALREAD2 0x35 -# define NAND_CMD_RANDOMREAD1 0x05 -# define NAND_CMD_RANDOMREAD2 0xe0 -# define NAND_CMD_READID 0x90 -# define NAND_CMD_RESET 0xff -# define NAND_CMD_PAGEPROGRAM1 0x80 -# define NAND_CMD_PAGEPROGRAM2 0x10 -# define NAND_CMD_CACHEPROGRAM2 0x15 -# define NAND_CMD_BLOCKERASE1 0x60 -# define NAND_CMD_BLOCKERASE2 0xd0 -# define NAND_CMD_READSTATUS 0x70 -# define NAND_CMD_COPYBACKPRG1 0x85 - -# define NAND_IOSTATUS_ERROR (1 << 0) -# define NAND_IOSTATUS_PLANE0 (1 << 1) -# define NAND_IOSTATUS_PLANE1 (1 << 2) -# define NAND_IOSTATUS_PLANE2 (1 << 3) -# define NAND_IOSTATUS_PLANE3 (1 << 4) -# define NAND_IOSTATUS_READY (1 << 6) -# define NAND_IOSTATUS_UNPROTCT (1 << 7) - -# define MAX_PAGE 0x800 -# define MAX_OOB 0x40 - -typedef struct NANDFlashState NANDFlashState; -struct NANDFlashState { - DeviceState parent_obj; - - uint8_t manf_id, chip_id; - uint8_t buswidth; /* in BYTES */ - int size, pages; - int page_shift, oob_shift, erase_shift, addr_shift; - uint8_t *storage; - BlockBackend *blk; - int mem_oob; - - uint8_t cle, ale, ce, wp, gnd; - - uint8_t io[MAX_PAGE + MAX_OOB + 0x400]; - uint8_t *ioaddr; - int iolen; - - uint32_t cmd; - uint64_t addr; - int addrlen; - int status; - int offset; - - void (*blk_write)(NANDFlashState *s); - void (*blk_erase)(NANDFlashState *s); - /* - * Returns %true when block containing (@addr + @offset) is - * successfully loaded, otherwise %false. - */ - bool (*blk_load)(NANDFlashState *s, uint64_t addr, unsigned offset); - - uint32_t ioaddr_vmstate; -}; - -#define TYPE_NAND "nand" - -OBJECT_DECLARE_SIMPLE_TYPE(NANDFlashState, NAND) - -static void mem_and(uint8_t *dest, const uint8_t *src, size_t n) -{ - /* Like memcpy() but we logical-AND the data into the destination */ - int i; - for (i = 0; i < n; i++) { - dest[i] &= src[i]; - } -} - -# define NAND_NO_AUTOINCR 0x00000001 -# define NAND_BUSWIDTH_16 0x00000002 -# define NAND_NO_PADDING 0x00000004 -# define NAND_CACHEPRG 0x00000008 -# define NAND_COPYBACK 0x00000010 -# define NAND_IS_AND 0x00000020 -# define NAND_4PAGE_ARRAY 0x00000040 -# define NAND_NO_READRDY 0x00000100 -# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) - -# define NAND_IO - -# define PAGE(addr) ((addr) >> ADDR_SHIFT) -# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) -# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) -# define OOB_SHIFT (PAGE_SHIFT - 5) -# define OOB_SIZE (1 << OOB_SHIFT) -# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) -# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) - -# define NAND_PAGE_SIZE 256 -# define PAGE_SHIFT 8 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 -# include "nand.c" -# define NAND_PAGE_SIZE 512 -# define PAGE_SHIFT 9 -# define PAGE_SECTORS 1 -# define ADDR_SHIFT 8 -# include "nand.c" -# define NAND_PAGE_SIZE 2048 -# define PAGE_SHIFT 11 -# define PAGE_SECTORS 4 -# define ADDR_SHIFT 16 -# include "nand.c" - -/* Information based on Linux drivers/mtd/nand/raw/nand_ids.c */ -static const struct { - int size; - int width; - int page_shift; - int erase_shift; - uint32_t options; -} nand_flash_ids[0x100] = { - [0 ... 0xff] = { 0 }, - - [0x6b] = { 4, 8, 9, 4, 0 }, - [0xe3] = { 4, 8, 9, 4, 0 }, - [0xe5] = { 4, 8, 9, 4, 0 }, - [0xd6] = { 8, 8, 9, 4, 0 }, - [0xe6] = { 8, 8, 9, 4, 0 }, - - [0x33] = { 16, 8, 9, 5, 0 }, - [0x73] = { 16, 8, 9, 5, 0 }, - [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x35] = { 32, 8, 9, 5, 0 }, - [0x75] = { 32, 8, 9, 5, 0 }, - [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x36] = { 64, 8, 9, 5, 0 }, - [0x76] = { 64, 8, 9, 5, 0 }, - [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x78] = { 128, 8, 9, 5, 0 }, - [0x39] = { 128, 8, 9, 5, 0 }, - [0x79] = { 128, 8, 9, 5, 0 }, - [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, - - [0x71] = { 256, 8, 9, 5, 0 }, - - /* - * These are the new chips with large page size. The pagesize and the - * erasesize is determined from the extended id bytes - */ -# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) -# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) - - /* 512 Megabit */ - [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, - [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, - [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, - - /* 1 Gigabit */ - [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, - [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, - [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, - - /* 2 Gigabit */ - [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, - [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, - [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, - - /* 4 Gigabit */ - [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, - [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, - [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, - - /* 8 Gigabit */ - [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, - [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, - [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, - - /* 16 Gigabit */ - [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, - [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, - [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, -}; - -static void nand_reset(DeviceState *dev) -{ - NANDFlashState *s = NAND(dev); - s->cmd = NAND_CMD_READ0; - s->addr = 0; - s->addrlen = 0; - s->iolen = 0; - s->offset = 0; - s->status &= NAND_IOSTATUS_UNPROTCT; - s->status |= NAND_IOSTATUS_READY; -} - -static inline void nand_pushio_byte(NANDFlashState *s, uint8_t value) -{ - s->ioaddr[s->iolen++] = value; - for (value = s->buswidth; --value;) { - s->ioaddr[s->iolen++] = 0; - } -} - -/* - * nand_load_block: Load block containing (s->addr + @offset). - * Returns length of data available at @offset in this block. - */ -static unsigned nand_load_block(NANDFlashState *s, unsigned offset) -{ - unsigned iolen; - - if (!s->blk_load(s, s->addr, offset)) { - return 0; - } - - iolen = (1 << s->page_shift); - if (s->gnd) { - iolen += 1 << s->oob_shift; - } - assert(offset <= iolen); - iolen -= offset; - - return iolen; -} - -static void nand_command(NANDFlashState *s) -{ - switch (s->cmd) { - case NAND_CMD_READ0: - s->iolen = 0; - break; - - case NAND_CMD_READID: - s->ioaddr = s->io; - s->iolen = 0; - nand_pushio_byte(s, s->manf_id); - nand_pushio_byte(s, s->chip_id); - nand_pushio_byte(s, 'Q'); /* Don't-care byte (often 0xa5) */ - if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { - /* Page Size, Block Size, Spare Size; bit 6 indicates - * 8 vs 16 bit width NAND. - */ - nand_pushio_byte(s, (s->buswidth == 2) ? 0x55 : 0x15); - } else { - nand_pushio_byte(s, 0xc0); /* Multi-plane */ - } - break; - - case NAND_CMD_RANDOMREAD2: - case NAND_CMD_NOSERIALREAD2: - if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP)) - break; - s->iolen = nand_load_block(s, s->addr & ((1 << s->addr_shift) - 1)); - break; - - case NAND_CMD_RESET: - nand_reset(DEVICE(s)); - break; - - case NAND_CMD_PAGEPROGRAM1: - s->ioaddr = s->io; - s->iolen = 0; - break; - - case NAND_CMD_PAGEPROGRAM2: - if (s->wp) { - s->blk_write(s); - } - break; - - case NAND_CMD_BLOCKERASE1: - break; - - case NAND_CMD_BLOCKERASE2: - s->addr &= (1ull << s->addrlen * 8) - 1; - s->addr <<= nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP ? - 16 : 8; - - if (s->wp) { - s->blk_erase(s); - } - break; - - case NAND_CMD_READSTATUS: - s->ioaddr = s->io; - s->iolen = 0; - nand_pushio_byte(s, s->status); - break; - - default: - printf("%s: Unknown NAND command 0x%02x\n", __func__, s->cmd); - } -} - -static int nand_pre_save(void *opaque) -{ - NANDFlashState *s = NAND(opaque); - - s->ioaddr_vmstate = s->ioaddr - s->io; - - return 0; -} - -static int nand_post_load(void *opaque, int version_id) -{ - NANDFlashState *s = NAND(opaque); - - if (s->ioaddr_vmstate > sizeof(s->io)) { - return -EINVAL; - } - s->ioaddr = s->io + s->ioaddr_vmstate; - - return 0; -} - -static const VMStateDescription vmstate_nand = { - .name = "nand", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = nand_pre_save, - .post_load = nand_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(cle, NANDFlashState), - VMSTATE_UINT8(ale, NANDFlashState), - VMSTATE_UINT8(ce, NANDFlashState), - VMSTATE_UINT8(wp, NANDFlashState), - VMSTATE_UINT8(gnd, NANDFlashState), - VMSTATE_BUFFER(io, NANDFlashState), - VMSTATE_UINT32(ioaddr_vmstate, NANDFlashState), - VMSTATE_INT32(iolen, NANDFlashState), - VMSTATE_UINT32(cmd, NANDFlashState), - VMSTATE_UINT64(addr, NANDFlashState), - VMSTATE_INT32(addrlen, NANDFlashState), - VMSTATE_INT32(status, NANDFlashState), - VMSTATE_INT32(offset, NANDFlashState), - /* XXX: do we want to save s->storage too? */ - VMSTATE_END_OF_LIST() - } -}; - -static void nand_realize(DeviceState *dev, Error **errp) -{ - int pagesize; - NANDFlashState *s = NAND(dev); - int ret; - - - s->buswidth = nand_flash_ids[s->chip_id].width >> 3; - s->size = nand_flash_ids[s->chip_id].size << 20; - if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { - s->page_shift = 11; - s->erase_shift = 6; - } else { - s->page_shift = nand_flash_ids[s->chip_id].page_shift; - s->erase_shift = nand_flash_ids[s->chip_id].erase_shift; - } - - switch (1 << s->page_shift) { - case 256: - nand_init_256(s); - break; - case 512: - nand_init_512(s); - break; - case 2048: - nand_init_2048(s); - break; - default: - error_setg(errp, "Unsupported NAND block size %#x", - 1 << s->page_shift); - return; - } - - pagesize = 1 << s->oob_shift; - s->mem_oob = 1; - if (s->blk) { - if (!blk_supports_write_perm(s->blk)) { - error_setg(errp, "Can't use a read-only drive"); - return; - } - ret = blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, - BLK_PERM_ALL, errp); - if (ret < 0) { - return; - } - if (blk_getlength(s->blk) >= - (s->pages << s->page_shift) + (s->pages << s->oob_shift)) { - pagesize = 0; - s->mem_oob = 0; - } - } else { - pagesize += 1 << s->page_shift; - } - if (pagesize) { - s->storage = (uint8_t *) memset(g_malloc(s->pages * pagesize), - 0xff, s->pages * pagesize); - } - /* Give s->ioaddr a sane value in case we save state before it is used. */ - s->ioaddr = s->io; -} - -static Property nand_properties[] = { - DEFINE_PROP_UINT8("manufacturer_id", NANDFlashState, manf_id, 0), - DEFINE_PROP_UINT8("chip_id", NANDFlashState, chip_id, 0), - DEFINE_PROP_DRIVE("drive", NANDFlashState, blk), - DEFINE_PROP_END_OF_LIST(), -}; - -static void nand_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = nand_realize; - dc->reset = nand_reset; - dc->vmsd = &vmstate_nand; - device_class_set_props(dc, nand_properties); - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); -} - -static const TypeInfo nand_info = { - .name = TYPE_NAND, - .parent = TYPE_DEVICE, - .instance_size = sizeof(NANDFlashState), - .class_init = nand_class_init, -}; - -static void nand_register_types(void) -{ - type_register_static(&nand_info); -} - -/* - * Chip inputs are CLE, ALE, CE, WP, GND and eight I/O pins. Chip - * outputs are R/B and eight I/O pins. - * - * CE, WP and R/B are active low. - */ -void nand_setpins(DeviceState *dev, uint8_t cle, uint8_t ale, - uint8_t ce, uint8_t wp, uint8_t gnd) -{ - NANDFlashState *s = NAND(dev); - - s->cle = cle; - s->ale = ale; - s->ce = ce; - s->wp = wp; - s->gnd = gnd; - if (wp) { - s->status |= NAND_IOSTATUS_UNPROTCT; - } else { - s->status &= ~NAND_IOSTATUS_UNPROTCT; - } -} - -void nand_getpins(DeviceState *dev, int *rb) -{ - *rb = 1; -} - -void nand_setio(DeviceState *dev, uint32_t value) -{ - int i; - NANDFlashState *s = NAND(dev); - - if (!s->ce && s->cle) { - if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { - if (s->cmd == NAND_CMD_READ0 && value == NAND_CMD_LPREAD2) - return; - if (value == NAND_CMD_RANDOMREAD1) { - s->addr &= ~((1 << s->addr_shift) - 1); - s->addrlen = 0; - return; - } - } - if (value == NAND_CMD_READ0) { - s->offset = 0; - } else if (value == NAND_CMD_READ1) { - s->offset = 0x100; - value = NAND_CMD_READ0; - } else if (value == NAND_CMD_READ2) { - s->offset = 1 << s->page_shift; - value = NAND_CMD_READ0; - } - - s->cmd = value; - - if (s->cmd == NAND_CMD_READSTATUS || - s->cmd == NAND_CMD_PAGEPROGRAM2 || - s->cmd == NAND_CMD_BLOCKERASE1 || - s->cmd == NAND_CMD_BLOCKERASE2 || - s->cmd == NAND_CMD_NOSERIALREAD2 || - s->cmd == NAND_CMD_RANDOMREAD2 || - s->cmd == NAND_CMD_RESET) { - nand_command(s); - } - - if (s->cmd != NAND_CMD_RANDOMREAD2) { - s->addrlen = 0; - } - } - - if (s->ale) { - unsigned int shift = s->addrlen * 8; - uint64_t mask = ~(0xffull << shift); - uint64_t v = (uint64_t)value << shift; - - s->addr = (s->addr & mask) | v; - s->addrlen ++; - - switch (s->addrlen) { - case 1: - if (s->cmd == NAND_CMD_READID) { - nand_command(s); - } - break; - case 2: /* fix cache address as a byte address */ - s->addr <<= (s->buswidth - 1); - break; - case 3: - if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && - (s->cmd == NAND_CMD_READ0 || - s->cmd == NAND_CMD_PAGEPROGRAM1)) { - nand_command(s); - } - break; - case 4: - if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && - nand_flash_ids[s->chip_id].size < 256 && /* 1Gb or less */ - (s->cmd == NAND_CMD_READ0 || - s->cmd == NAND_CMD_PAGEPROGRAM1)) { - nand_command(s); - } - break; - case 5: - if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && - nand_flash_ids[s->chip_id].size >= 256 && /* 2Gb or more */ - (s->cmd == NAND_CMD_READ0 || - s->cmd == NAND_CMD_PAGEPROGRAM1)) { - nand_command(s); - } - break; - default: - break; - } - } - - if (!s->cle && !s->ale && s->cmd == NAND_CMD_PAGEPROGRAM1) { - if (s->iolen < (1 << s->page_shift) + (1 << s->oob_shift)) { - for (i = s->buswidth; i--; value >>= 8) { - s->io[s->iolen ++] = (uint8_t) (value & 0xff); - } - } - } else if (!s->cle && !s->ale && s->cmd == NAND_CMD_COPYBACKPRG1) { - if ((s->addr & ((1 << s->addr_shift) - 1)) < - (1 << s->page_shift) + (1 << s->oob_shift)) { - for (i = s->buswidth; i--; s->addr++, value >>= 8) { - s->io[s->iolen + (s->addr & ((1 << s->addr_shift) - 1))] = - (uint8_t) (value & 0xff); - } - } - } -} - -uint32_t nand_getio(DeviceState *dev) -{ - int offset; - uint32_t x = 0; - NANDFlashState *s = NAND(dev); - - /* Allow sequential reading */ - if (!s->iolen && s->cmd == NAND_CMD_READ0) { - offset = (int) (s->addr & ((1 << s->addr_shift) - 1)) + s->offset; - s->offset = 0; - s->iolen = nand_load_block(s, offset); - } - - if (s->ce || s->iolen <= 0) { - return 0; - } - - for (offset = s->buswidth; offset--;) { - x |= s->ioaddr[offset] << (offset << 3); - } - /* after receiving READ STATUS command all subsequent reads will - * return the status register value until another command is issued - */ - if (s->cmd != NAND_CMD_READSTATUS) { - s->addr += s->buswidth; - s->ioaddr += s->buswidth; - s->iolen -= s->buswidth; - } - return x; -} - -uint32_t nand_getbuswidth(DeviceState *dev) -{ - NANDFlashState *s = (NANDFlashState *) dev; - return s->buswidth << 3; -} - -DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id) -{ - DeviceState *dev; - - if (nand_flash_ids[chip_id].size == 0) { - hw_error("%s: Unsupported NAND chip ID.\n", __func__); - } - dev = qdev_new(TYPE_NAND); - qdev_prop_set_uint8(dev, "manufacturer_id", manf_id); - qdev_prop_set_uint8(dev, "chip_id", chip_id); - if (blk) { - qdev_prop_set_drive_err(dev, "drive", blk, &error_fatal); - } - - qdev_realize(dev, NULL, &error_fatal); - return dev; -} - -type_init(nand_register_types) - -#else - -/* Program a single page */ -static void glue(nand_blk_write_, NAND_PAGE_SIZE)(NANDFlashState *s) -{ - uint64_t off, page, sector, soff; - uint8_t iobuf[(PAGE_SECTORS + 2) * 0x200]; - if (PAGE(s->addr) >= s->pages) - return; - - if (!s->blk) { - mem_and(s->storage + PAGE_START(s->addr) + (s->addr & PAGE_MASK) + - s->offset, s->io, s->iolen); - } else if (s->mem_oob) { - sector = SECTOR(s->addr); - off = (s->addr & PAGE_MASK) + s->offset; - soff = SECTOR_OFFSET(s->addr); - if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, - PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) { - printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); - return; - } - - mem_and(iobuf + (soff | off), s->io, MIN(s->iolen, NAND_PAGE_SIZE - off)); - if (off + s->iolen > NAND_PAGE_SIZE) { - page = PAGE(s->addr); - mem_and(s->storage + (page << OOB_SHIFT), s->io + NAND_PAGE_SIZE - off, - MIN(OOB_SIZE, off + s->iolen - NAND_PAGE_SIZE)); - } - - if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, - PAGE_SECTORS << BDRV_SECTOR_BITS, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); - } - } else { - off = PAGE_START(s->addr) + (s->addr & PAGE_MASK) + s->offset; - sector = off >> 9; - soff = off & 0x1ff; - if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) { - printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); - return; - } - - mem_and(iobuf + soff, s->io, s->iolen); - - if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); - } - } - s->offset = 0; -} - -/* Erase a single block */ -static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s) -{ - uint64_t i, page, addr; - uint8_t iobuf[0x200] = { [0 ... 0x1ff] = 0xff, }; - addr = s->addr & ~((1 << (ADDR_SHIFT + s->erase_shift)) - 1); - - if (PAGE(addr) >= s->pages) { - return; - } - - if (!s->blk) { - memset(s->storage + PAGE_START(addr), - 0xff, (NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift); - } else if (s->mem_oob) { - memset(s->storage + (PAGE(addr) << OOB_SHIFT), - 0xff, OOB_SIZE << s->erase_shift); - i = SECTOR(addr); - page = SECTOR(addr + (1 << (ADDR_SHIFT + s->erase_shift))); - for (; i < page; i ++) - if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", __func__, i); - } - } else { - addr = PAGE_START(addr); - page = addr >> 9; - if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: read error in sector %" PRIu64 "\n", __func__, page); - } - memset(iobuf + (addr & 0x1ff), 0xff, (~addr & 0x1ff) + 1); - if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", __func__, page); - } - - memset(iobuf, 0xff, 0x200); - i = (addr & ~0x1ff) + 0x200; - for (addr += ((NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift) - 0x200; - i < addr; i += 0x200) { - if (blk_pwrite(s->blk, i, BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", - __func__, i >> 9); - } - } - - page = i >> 9; - if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: read error in sector %" PRIu64 "\n", __func__, page); - } - memset(iobuf, 0xff, ((addr - 1) & 0x1ff) + 1); - if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, iobuf, 0) < 0) { - printf("%s: write error in sector %" PRIu64 "\n", __func__, page); - } - } -} - -static bool glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, - uint64_t addr, unsigned offset) -{ - if (PAGE(addr) >= s->pages) { - return false; - } - - if (offset > NAND_PAGE_SIZE + OOB_SIZE) { - return false; - } - - if (s->blk) { - if (s->mem_oob) { - if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS, - PAGE_SECTORS << BDRV_SECTOR_BITS, s->io, 0) < 0) { - printf("%s: read error in sector %" PRIu64 "\n", - __func__, SECTOR(addr)); - } - memcpy(s->io + SECTOR_OFFSET(s->addr) + NAND_PAGE_SIZE, - s->storage + (PAGE(s->addr) << OOB_SHIFT), - OOB_SIZE); - s->ioaddr = s->io + SECTOR_OFFSET(s->addr) + offset; - } else { - if (blk_pread(s->blk, PAGE_START(addr), - (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, s->io, 0) - < 0) { - printf("%s: read error in sector %" PRIu64 "\n", - __func__, PAGE_START(addr) >> 9); - } - s->ioaddr = s->io + (PAGE_START(addr) & 0x1ff) + offset; - } - } else { - memcpy(s->io, s->storage + PAGE_START(s->addr) + - offset, NAND_PAGE_SIZE + OOB_SIZE - offset); - s->ioaddr = s->io; - } - - return true; -} - -static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s) -{ - s->oob_shift = PAGE_SHIFT - 5; - s->pages = s->size >> PAGE_SHIFT; - s->addr_shift = ADDR_SHIFT; - - s->blk_erase = glue(nand_blk_erase_, NAND_PAGE_SIZE); - s->blk_write = glue(nand_blk_write_, NAND_PAGE_SIZE); - s->blk_load = glue(nand_blk_load_, NAND_PAGE_SIZE); -} - -# undef NAND_PAGE_SIZE -# undef PAGE_SHIFT -# undef PAGE_SECTORS -# undef ADDR_SHIFT -#endif /* NAND_IO */ diff --git a/hw/block/onenand.c b/hw/block/onenand.c deleted file mode 100644 index d8a6944027a..00000000000 --- a/hw/block/onenand.c +++ /dev/null @@ -1,872 +0,0 @@ -/* - * OneNAND flash memories emulation. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "hw/hw.h" -#include "hw/block/flash.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "sysemu/block-backend.h" -#include "exec/memory.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "qemu/error-report.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qom/object.h" - -/* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */ -#define PAGE_SHIFT 11 - -/* Fixed */ -#define BLOCK_SHIFT (PAGE_SHIFT + 6) - -#define TYPE_ONE_NAND "onenand" -OBJECT_DECLARE_SIMPLE_TYPE(OneNANDState, ONE_NAND) - -struct OneNANDState { - SysBusDevice parent_obj; - - struct { - uint16_t man; - uint16_t dev; - uint16_t ver; - } id; - int shift; - hwaddr base; - qemu_irq intr; - qemu_irq rdy; - BlockBackend *blk; - BlockBackend *blk_cur; - uint8_t *image; - uint8_t *otp; - uint8_t *current; - MemoryRegion ram; - MemoryRegion mapped_ram; - uint8_t current_direction; - uint8_t *boot[2]; - uint8_t *data[2][2]; - MemoryRegion iomem; - MemoryRegion container; - int cycle; - int otpmode; - - uint16_t addr[8]; - uint16_t unladdr[8]; - int bufaddr; - int count; - uint16_t command; - uint16_t config[2]; - uint16_t status; - uint16_t intstatus; - uint16_t wpstatus; - - ECCState ecc; - - int density_mask; - int secs; - int secs_cur; - int blocks; - uint8_t *blockwp; -}; - -enum { - ONEN_BUF_BLOCK = 0, - ONEN_BUF_BLOCK2 = 1, - ONEN_BUF_DEST_BLOCK = 2, - ONEN_BUF_DEST_PAGE = 3, - ONEN_BUF_PAGE = 7, -}; - -enum { - ONEN_ERR_CMD = 1 << 10, - ONEN_ERR_ERASE = 1 << 11, - ONEN_ERR_PROG = 1 << 12, - ONEN_ERR_LOAD = 1 << 13, -}; - -enum { - ONEN_INT_RESET = 1 << 4, - ONEN_INT_ERASE = 1 << 5, - ONEN_INT_PROG = 1 << 6, - ONEN_INT_LOAD = 1 << 7, - ONEN_INT = 1 << 15, -}; - -enum { - ONEN_LOCK_LOCKTIGHTEN = 1 << 0, - ONEN_LOCK_LOCKED = 1 << 1, - ONEN_LOCK_UNLOCKED = 1 << 2, -}; - -static void onenand_mem_setup(OneNANDState *s) -{ - /* XXX: We should use IO_MEM_ROMD but we broke it earlier... - * Both 0x0000 ... 0x01ff and 0x8000 ... 0x800f can be used to - * write boot commands. Also take note of the BWPS bit. */ - memory_region_init(&s->container, OBJECT(s), "onenand", - 0x10000 << s->shift); - memory_region_add_subregion(&s->container, 0, &s->iomem); - memory_region_init_alias(&s->mapped_ram, OBJECT(s), "onenand-mapped-ram", - &s->ram, 0x0200 << s->shift, - 0xbe00 << s->shift); - memory_region_add_subregion_overlap(&s->container, - 0x0200 << s->shift, - &s->mapped_ram, - 1); -} - -static void onenand_intr_update(OneNANDState *s) -{ - qemu_set_irq(s->intr, ((s->intstatus >> 15) ^ (~s->config[0] >> 6)) & 1); -} - -static int onenand_pre_save(void *opaque) -{ - OneNANDState *s = opaque; - if (s->current == s->otp) { - s->current_direction = 1; - } else if (s->current == s->image) { - s->current_direction = 2; - } else { - s->current_direction = 0; - } - - return 0; -} - -static int onenand_post_load(void *opaque, int version_id) -{ - OneNANDState *s = opaque; - switch (s->current_direction) { - case 0: - break; - case 1: - s->current = s->otp; - break; - case 2: - s->current = s->image; - break; - default: - return -1; - } - onenand_intr_update(s); - return 0; -} - -static const VMStateDescription vmstate_onenand = { - .name = "onenand", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = onenand_pre_save, - .post_load = onenand_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(current_direction, OneNANDState), - VMSTATE_INT32(cycle, OneNANDState), - VMSTATE_INT32(otpmode, OneNANDState), - VMSTATE_UINT16_ARRAY(addr, OneNANDState, 8), - VMSTATE_UINT16_ARRAY(unladdr, OneNANDState, 8), - VMSTATE_INT32(bufaddr, OneNANDState), - VMSTATE_INT32(count, OneNANDState), - VMSTATE_UINT16(command, OneNANDState), - VMSTATE_UINT16_ARRAY(config, OneNANDState, 2), - VMSTATE_UINT16(status, OneNANDState), - VMSTATE_UINT16(intstatus, OneNANDState), - VMSTATE_UINT16(wpstatus, OneNANDState), - VMSTATE_INT32(secs_cur, OneNANDState), - VMSTATE_PARTIAL_VBUFFER(blockwp, OneNANDState, blocks), - VMSTATE_UINT8(ecc.cp, OneNANDState), - VMSTATE_UINT16_ARRAY(ecc.lp, OneNANDState, 2), - VMSTATE_UINT16(ecc.count, OneNANDState), - VMSTATE_BUFFER_POINTER_UNSAFE(otp, OneNANDState, 0, - ((64 + 2) << PAGE_SHIFT)), - VMSTATE_END_OF_LIST() - } -}; - -/* Hot reset (Reset OneNAND command) or warm reset (RP pin low) */ -static void onenand_reset(OneNANDState *s, int cold) -{ - memset(&s->addr, 0, sizeof(s->addr)); - s->command = 0; - s->count = 1; - s->bufaddr = 0; - s->config[0] = 0x40c0; - s->config[1] = 0x0000; - onenand_intr_update(s); - qemu_irq_raise(s->rdy); - s->status = 0x0000; - s->intstatus = cold ? 0x8080 : 0x8010; - s->unladdr[0] = 0; - s->unladdr[1] = 0; - s->wpstatus = 0x0002; - s->cycle = 0; - s->otpmode = 0; - s->blk_cur = s->blk; - s->current = s->image; - s->secs_cur = s->secs; - - if (cold) { - /* Lock the whole flash */ - memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks); - - if (s->blk_cur && blk_pread(s->blk_cur, 0, 8 << BDRV_SECTOR_BITS, - s->boot[0], 0) < 0) { - hw_error("%s: Loading the BootRAM failed.\n", __func__); - } - } -} - -static void onenand_system_reset(DeviceState *dev) -{ - OneNANDState *s = ONE_NAND(dev); - - onenand_reset(s, 1); -} - -static inline int onenand_load_main(OneNANDState *s, int sec, int secn, - void *dest) -{ - assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec); - assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn); - if (s->blk_cur) { - return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS, - secn << BDRV_SECTOR_BITS, dest, 0) < 0; - } else if (sec + secn > s->secs_cur) { - return 1; - } - - memcpy(dest, s->current + (sec << 9), secn << 9); - - return 0; -} - -static inline int onenand_prog_main(OneNANDState *s, int sec, int secn, - void *src) -{ - int result = 0; - - if (secn > 0) { - uint32_t size = secn << BDRV_SECTOR_BITS; - uint32_t offset = sec << BDRV_SECTOR_BITS; - assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec); - assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn); - const uint8_t *sp = (const uint8_t *)src; - uint8_t *dp = 0; - if (s->blk_cur) { - dp = g_malloc(size); - if (!dp || blk_pread(s->blk_cur, offset, size, dp, 0) < 0) { - result = 1; - } - } else { - if (sec + secn > s->secs_cur) { - result = 1; - } else { - dp = (uint8_t *)s->current + offset; - } - } - if (!result) { - uint32_t i; - for (i = 0; i < size; i++) { - dp[i] &= sp[i]; - } - if (s->blk_cur) { - result = blk_pwrite(s->blk_cur, offset, size, dp, 0) < 0; - } - } - if (dp && s->blk_cur) { - g_free(dp); - } - } - - return result; -} - -static inline int onenand_load_spare(OneNANDState *s, int sec, int secn, - void *dest) -{ - uint8_t buf[512]; - - if (s->blk_cur) { - uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS; - if (blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, buf, 0) < 0) { - return 1; - } - memcpy(dest, buf + ((sec & 31) << 4), secn << 4); - } else if (sec + secn > s->secs_cur) { - return 1; - } else { - memcpy(dest, s->current + (s->secs_cur << 9) + (sec << 4), secn << 4); - } - - return 0; -} - -static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn, - void *src) -{ - int result = 0; - if (secn > 0) { - const uint8_t *sp = (const uint8_t *)src; - uint8_t *dp = 0, *dpp = 0; - uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS; - assert(UINT32_MAX >> BDRV_SECTOR_BITS > s->secs_cur + (sec >> 5)); - if (s->blk_cur) { - dp = g_malloc(512); - if (!dp - || blk_pread(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp, 0) < 0) { - result = 1; - } else { - dpp = dp + ((sec & 31) << 4); - } - } else { - if (sec + secn > s->secs_cur) { - result = 1; - } else { - dpp = s->current + (s->secs_cur << 9) + (sec << 4); - } - } - if (!result) { - uint32_t i; - for (i = 0; i < (secn << 4); i++) { - dpp[i] &= sp[i]; - } - if (s->blk_cur) { - result = blk_pwrite(s->blk_cur, offset, BDRV_SECTOR_SIZE, dp, - 0) < 0; - } - } - g_free(dp); - } - return result; -} - -static inline int onenand_erase(OneNANDState *s, int sec, int num) -{ - uint8_t *blankbuf, *tmpbuf; - - blankbuf = g_malloc(512); - tmpbuf = g_malloc(512); - memset(blankbuf, 0xff, 512); - for (; num > 0; num--, sec++) { - if (s->blk_cur) { - int erasesec = s->secs_cur + (sec >> 5); - if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, blankbuf, 0) < 0) { - goto fail; - } - if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) { - goto fail; - } - memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4); - if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS, - BDRV_SECTOR_SIZE, tmpbuf, 0) < 0) { - goto fail; - } - } else { - if (sec + 1 > s->secs_cur) { - goto fail; - } - memcpy(s->current + (sec << 9), blankbuf, 512); - memcpy(s->current + (s->secs_cur << 9) + (sec << 4), - blankbuf, 1 << 4); - } - } - - g_free(tmpbuf); - g_free(blankbuf); - return 0; - -fail: - g_free(tmpbuf); - g_free(blankbuf); - return 1; -} - -static void onenand_command(OneNANDState *s) -{ - int b; - int sec; - void *buf; -#define SETADDR(block, page) \ - sec = (s->addr[page] & 3) + \ - ((((s->addr[page] >> 2) & 0x3f) + \ - (((s->addr[block] & 0xfff) | \ - (s->addr[block] >> 15 ? s->density_mask : 0)) \ - << 6)) \ - << (PAGE_SHIFT - 9)); -#define SETBUF_M() \ - buf = (s->bufaddr & 8) ? s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ - buf += (s->bufaddr & 3) << 9; -#define SETBUF_S() \ - buf = (s->bufaddr & 8) ? \ - s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ - buf += (s->bufaddr & 3) << 4; - - switch (s->command) { - case 0x00: /* Load single/multiple sector data unit into buffer */ - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - - SETBUF_M() - if (onenand_load_main(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; - -#if 0 - SETBUF_S() - if (onenand_load_spare(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; -#endif - - /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) - * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) - * then we need two split the read/write into two chunks. - */ - s->intstatus |= ONEN_INT | ONEN_INT_LOAD; - break; - case 0x13: /* Load single/multiple spare sector into buffer */ - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - - SETBUF_S() - if (onenand_load_spare(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; - - /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) - * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) - * then we need two split the read/write into two chunks. - */ - s->intstatus |= ONEN_INT | ONEN_INT_LOAD; - break; - case 0x80: /* Program single/multiple sector data unit from buffer */ - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - - SETBUF_M() - if (onenand_prog_main(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; - -#if 0 - SETBUF_S() - if (onenand_prog_spare(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; -#endif - - /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) - * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) - * then we need two split the read/write into two chunks. - */ - s->intstatus |= ONEN_INT | ONEN_INT_PROG; - break; - case 0x1a: /* Program single/multiple spare area sector from buffer */ - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - - SETBUF_S() - if (onenand_prog_spare(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; - - /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) - * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) - * then we need two split the read/write into two chunks. - */ - s->intstatus |= ONEN_INT | ONEN_INT_PROG; - break; - case 0x1b: /* Copy-back program */ - SETBUF_S() - - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - if (onenand_load_main(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; - - SETADDR(ONEN_BUF_DEST_BLOCK, ONEN_BUF_DEST_PAGE) - if (onenand_prog_main(s, sec, s->count, buf)) - s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; - - /* TODO: spare areas */ - - s->intstatus |= ONEN_INT | ONEN_INT_PROG; - break; - - case 0x23: /* Unlock NAND array block(s) */ - s->intstatus |= ONEN_INT; - - /* XXX the previous (?) area should be locked automatically */ - for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { - if (b >= s->blocks) { - s->status |= ONEN_ERR_CMD; - break; - } - if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) - break; - - s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; - } - break; - case 0x27: /* Unlock All NAND array blocks */ - s->intstatus |= ONEN_INT; - - for (b = 0; b < s->blocks; b ++) { - if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) - break; - - s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; - } - break; - - case 0x2a: /* Lock NAND array block(s) */ - s->intstatus |= ONEN_INT; - - for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { - if (b >= s->blocks) { - s->status |= ONEN_ERR_CMD; - break; - } - if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) - break; - - s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKED; - } - break; - case 0x2c: /* Lock-tight NAND array block(s) */ - s->intstatus |= ONEN_INT; - - for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { - if (b >= s->blocks) { - s->status |= ONEN_ERR_CMD; - break; - } - if (s->blockwp[b] == ONEN_LOCK_UNLOCKED) - continue; - - s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKTIGHTEN; - } - break; - - case 0x71: /* Erase-Verify-Read */ - s->intstatus |= ONEN_INT; - break; - case 0x95: /* Multi-block erase */ - qemu_irq_pulse(s->intr); - /* Fall through. */ - case 0x94: /* Block erase */ - sec = ((s->addr[ONEN_BUF_BLOCK] & 0xfff) | - (s->addr[ONEN_BUF_BLOCK] >> 15 ? s->density_mask : 0)) - << (BLOCK_SHIFT - 9); - if (onenand_erase(s, sec, 1 << (BLOCK_SHIFT - 9))) - s->status |= ONEN_ERR_CMD | ONEN_ERR_ERASE; - - s->intstatus |= ONEN_INT | ONEN_INT_ERASE; - break; - case 0xb0: /* Erase suspend */ - break; - case 0x30: /* Erase resume */ - s->intstatus |= ONEN_INT | ONEN_INT_ERASE; - break; - - case 0xf0: /* Reset NAND Flash core */ - onenand_reset(s, 0); - break; - case 0xf3: /* Reset OneNAND */ - onenand_reset(s, 0); - break; - - case 0x65: /* OTP Access */ - s->intstatus |= ONEN_INT; - s->blk_cur = NULL; - s->current = s->otp; - s->secs_cur = 1 << (BLOCK_SHIFT - 9); - s->addr[ONEN_BUF_BLOCK] = 0; - s->otpmode = 1; - break; - - default: - s->status |= ONEN_ERR_CMD; - s->intstatus |= ONEN_INT; - qemu_log_mask(LOG_GUEST_ERROR, "unknown OneNAND command %x\n", - s->command); - } - - onenand_intr_update(s); -} - -static uint64_t onenand_read(void *opaque, hwaddr addr, - unsigned size) -{ - OneNANDState *s = (OneNANDState *) opaque; - int offset = addr >> s->shift; - - switch (offset) { - case 0x0000 ... 0xbffe: - return lduw_le_p(s->boot[0] + addr); - - case 0xf000: /* Manufacturer ID */ - return s->id.man; - case 0xf001: /* Device ID */ - return s->id.dev; - case 0xf002: /* Version ID */ - return s->id.ver; - /* TODO: get the following values from a real chip! */ - case 0xf003: /* Data Buffer size */ - return 1 << PAGE_SHIFT; - case 0xf004: /* Boot Buffer size */ - return 0x200; - case 0xf005: /* Amount of buffers */ - return 1 | (2 << 8); - case 0xf006: /* Technology */ - return 0; - - case 0xf100 ... 0xf107: /* Start addresses */ - return s->addr[offset - 0xf100]; - - case 0xf200: /* Start buffer */ - return (s->bufaddr << 8) | ((s->count - 1) & (1 << (PAGE_SHIFT - 10))); - - case 0xf220: /* Command */ - return s->command; - case 0xf221: /* System Configuration 1 */ - return s->config[0] & 0xffe0; - case 0xf222: /* System Configuration 2 */ - return s->config[1]; - - case 0xf240: /* Controller Status */ - return s->status; - case 0xf241: /* Interrupt */ - return s->intstatus; - case 0xf24c: /* Unlock Start Block Address */ - return s->unladdr[0]; - case 0xf24d: /* Unlock End Block Address */ - return s->unladdr[1]; - case 0xf24e: /* Write Protection Status */ - return s->wpstatus; - - case 0xff00: /* ECC Status */ - return 0x00; - case 0xff01: /* ECC Result of main area data */ - case 0xff02: /* ECC Result of spare area data */ - case 0xff03: /* ECC Result of main area data */ - case 0xff04: /* ECC Result of spare area data */ - qemu_log_mask(LOG_UNIMP, - "onenand: ECC result registers unimplemented\n"); - return 0x0000; - } - - qemu_log_mask(LOG_GUEST_ERROR, "read of unknown OneNAND register 0x%x\n", - offset); - return 0; -} - -static void onenand_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - OneNANDState *s = (OneNANDState *) opaque; - int offset = addr >> s->shift; - int sec; - - switch (offset) { - case 0x0000 ... 0x01ff: - case 0x8000 ... 0x800f: - if (s->cycle) { - s->cycle = 0; - - if (value == 0x0000) { - SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) - onenand_load_main(s, sec, - 1 << (PAGE_SHIFT - 9), s->data[0][0]); - s->addr[ONEN_BUF_PAGE] += 4; - s->addr[ONEN_BUF_PAGE] &= 0xff; - } - break; - } - - switch (value) { - case 0x00f0: /* Reset OneNAND */ - onenand_reset(s, 0); - break; - - case 0x00e0: /* Load Data into Buffer */ - s->cycle = 1; - break; - - case 0x0090: /* Read Identification Data */ - memset(s->boot[0], 0, 3 << s->shift); - s->boot[0][0 << s->shift] = s->id.man & 0xff; - s->boot[0][1 << s->shift] = s->id.dev & 0xff; - s->boot[0][2 << s->shift] = s->wpstatus & 0xff; - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "unknown OneNAND boot command %" PRIx64 "\n", - value); - } - break; - - case 0xf100 ... 0xf107: /* Start addresses */ - s->addr[offset - 0xf100] = value; - break; - - case 0xf200: /* Start buffer */ - s->bufaddr = (value >> 8) & 0xf; - if (PAGE_SHIFT == 11) - s->count = (value & 3) ?: 4; - else if (PAGE_SHIFT == 10) - s->count = (value & 1) ?: 2; - break; - - case 0xf220: /* Command */ - if (s->intstatus & (1 << 15)) - break; - s->command = value; - onenand_command(s); - break; - case 0xf221: /* System Configuration 1 */ - s->config[0] = value; - onenand_intr_update(s); - qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1); - break; - case 0xf222: /* System Configuration 2 */ - s->config[1] = value; - break; - - case 0xf241: /* Interrupt */ - s->intstatus &= value; - if ((1 << 15) & ~s->intstatus) - s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE | - ONEN_ERR_PROG | ONEN_ERR_LOAD); - onenand_intr_update(s); - break; - case 0xf24c: /* Unlock Start Block Address */ - s->unladdr[0] = value & (s->blocks - 1); - /* For some reason we have to set the end address to by default - * be same as start because the software forgets to write anything - * in there. */ - s->unladdr[1] = value & (s->blocks - 1); - break; - case 0xf24d: /* Unlock End Block Address */ - s->unladdr[1] = value & (s->blocks - 1); - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "write to unknown OneNAND register 0x%x\n", - offset); - } -} - -static const MemoryRegionOps onenand_ops = { - .read = onenand_read, - .write = onenand_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void onenand_realize(DeviceState *dev, Error **errp) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - OneNANDState *s = ONE_NAND(dev); - uint32_t size = 1 << (24 + ((s->id.dev >> 4) & 7)); - void *ram; - Error *local_err = NULL; - - s->base = (hwaddr)-1; - s->rdy = NULL; - s->blocks = size >> BLOCK_SHIFT; - s->secs = size >> 9; - s->blockwp = g_malloc(s->blocks); - s->density_mask = (s->id.dev & 0x08) - ? (1 << (6 + ((s->id.dev >> 4) & 7))) : 0; - memory_region_init_io(&s->iomem, OBJECT(s), &onenand_ops, s, "onenand", - 0x10000 << s->shift); - if (!s->blk) { - s->image = memset(g_malloc(size + (size >> 5)), - 0xff, size + (size >> 5)); - } else { - if (!blk_supports_write_perm(s->blk)) { - error_setg(errp, "Can't use a read-only drive"); - return; - } - blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, - BLK_PERM_ALL, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - s->blk_cur = s->blk; - } - s->otp = memset(g_malloc((64 + 2) << PAGE_SHIFT), - 0xff, (64 + 2) << PAGE_SHIFT); - memory_region_init_ram_nomigrate(&s->ram, OBJECT(s), "onenand.ram", - 0xc000 << s->shift, &error_fatal); - vmstate_register_ram_global(&s->ram); - ram = memory_region_get_ram_ptr(&s->ram); - s->boot[0] = ram + (0x0000 << s->shift); - s->boot[1] = ram + (0x8000 << s->shift); - s->data[0][0] = ram + ((0x0200 + (0 << (PAGE_SHIFT - 1))) << s->shift); - s->data[0][1] = ram + ((0x8010 + (0 << (PAGE_SHIFT - 6))) << s->shift); - s->data[1][0] = ram + ((0x0200 + (1 << (PAGE_SHIFT - 1))) << s->shift); - s->data[1][1] = ram + ((0x8010 + (1 << (PAGE_SHIFT - 6))) << s->shift); - onenand_mem_setup(s); - sysbus_init_irq(sbd, &s->intr); - sysbus_init_mmio(sbd, &s->container); - vmstate_register(VMSTATE_IF(dev), - ((s->shift & 0x7f) << 24) - | ((s->id.man & 0xff) << 16) - | ((s->id.dev & 0xff) << 8) - | (s->id.ver & 0xff), - &vmstate_onenand, s); -} - -static Property onenand_properties[] = { - DEFINE_PROP_UINT16("manufacturer_id", OneNANDState, id.man, 0), - DEFINE_PROP_UINT16("device_id", OneNANDState, id.dev, 0), - DEFINE_PROP_UINT16("version_id", OneNANDState, id.ver, 0), - DEFINE_PROP_INT32("shift", OneNANDState, shift, 0), - DEFINE_PROP_DRIVE("drive", OneNANDState, blk), - DEFINE_PROP_END_OF_LIST(), -}; - -static void onenand_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = onenand_realize; - dc->reset = onenand_system_reset; - device_class_set_props(dc, onenand_properties); -} - -static const TypeInfo onenand_info = { - .name = TYPE_ONE_NAND, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(OneNANDState), - .class_init = onenand_class_init, -}; - -static void onenand_register_types(void) -{ - type_register_static(&onenand_info); -} - -void *onenand_raw_otp(DeviceState *onenand_device) -{ - OneNANDState *s = ONE_NAND(onenand_device); - - return s->otp; -} - -type_init(onenand_register_types) diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index c8f1cf5a872..168101d8dfe 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -41,18 +41,17 @@ #include "hw/block/flash.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/bitops.h" #include "qemu/host-utils.h" #include "qemu/log.h" -#include "qemu/module.h" #include "qemu/option.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "sysemu/blockdev.h" -#include "sysemu/runstate.h" +#include "system/blockdev.h" +#include "system/runstate.h" #include "trace.h" #define PFLASH_BE 0 @@ -614,6 +613,7 @@ static void pflash_write(PFlashCFI01 *pfl, hwaddr offset, if (!pfl->counter) { trace_pflash_write(pfl->name, "block write finished"); pfl->wcycle++; + break; } pfl->counter--; @@ -895,7 +895,7 @@ static void pflash_cfi01_system_reset(DeviceState *dev) pfl->blk_offset = -1; } -static Property pflash_cfi01_properties[] = { +static const Property pflash_cfi01_properties[] = { DEFINE_PROP_DRIVE("drive", PFlashCFI01, blk), /* num-blocks is the number of blocks actually visible to the guest, * ie the total size of the device divided by the sector length. @@ -932,34 +932,29 @@ static Property pflash_cfi01_properties[] = { DEFINE_PROP_STRING("name", PFlashCFI01, name), DEFINE_PROP_BOOL("old-multiple-chip-handling", PFlashCFI01, old_multiple_chip_handling, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pflash_cfi01_class_init(ObjectClass *klass, void *data) +static void pflash_cfi01_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pflash_cfi01_system_reset; + device_class_set_legacy_reset(dc, pflash_cfi01_system_reset); dc->realize = pflash_cfi01_realize; device_class_set_props(dc, pflash_cfi01_properties); dc->vmsd = &vmstate_pflash; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } - -static const TypeInfo pflash_cfi01_info = { - .name = TYPE_PFLASH_CFI01, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PFlashCFI01), - .class_init = pflash_cfi01_class_init, +static const TypeInfo pflash_cfi01_types[] = { + { + .name = TYPE_PFLASH_CFI01, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PFlashCFI01), + .class_init = pflash_cfi01_class_init, + }, }; -static void pflash_cfi01_register_types(void) -{ - type_register_static(&pflash_cfi01_info); -} - -type_init(pflash_cfi01_register_types) +DEFINE_TYPES(pflash_cfi01_types) PFlashCFI01 *pflash_cfi01_register(hwaddr base, const char *name, diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c index 2314142373f..3244b699b98 100644 --- a/hw/block/pflash_cfi02.c +++ b/hw/block/pflash_cfi02.c @@ -41,7 +41,7 @@ #include "qemu/error-report.h" #include "qemu/bitmap.h" #include "qemu/timer.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/host-utils.h" #include "qemu/module.h" #include "hw/sysbus.h" @@ -937,7 +937,7 @@ static void pflash_cfi02_reset(DeviceState *dev) pflash_reset_state_machine(pfl); } -static Property pflash_cfi02_properties[] = { +static const Property pflash_cfi02_properties[] = { DEFINE_PROP_DRIVE("drive", PFlashCFI02, blk), DEFINE_PROP_UINT32("num-blocks", PFlashCFI02, uniform_nb_blocs, 0), DEFINE_PROP_UINT32("sector-length", PFlashCFI02, uniform_sector_len, 0), @@ -959,7 +959,6 @@ static Property pflash_cfi02_properties[] = { DEFINE_PROP_UINT16("unlock-addr0", PFlashCFI02, unlock_addr0, 0), DEFINE_PROP_UINT16("unlock-addr1", PFlashCFI02, unlock_addr1, 0), DEFINE_PROP_STRING("name", PFlashCFI02, name), - DEFINE_PROP_END_OF_LIST(), }; static void pflash_cfi02_unrealize(DeviceState *dev) @@ -969,12 +968,12 @@ static void pflash_cfi02_unrealize(DeviceState *dev) g_free(pfl->sector_erase_map); } -static void pflash_cfi02_class_init(ObjectClass *klass, void *data) +static void pflash_cfi02_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pflash_cfi02_realize; - dc->reset = pflash_cfi02_reset; + device_class_set_legacy_reset(dc, pflash_cfi02_reset); dc->unrealize = pflash_cfi02_unrealize; device_class_set_props(dc, pflash_cfi02_properties); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); diff --git a/hw/block/swim.c b/hw/block/swim.c index 44761c11cbc..ad047362f88 100644 --- a/hw/block/swim.c +++ b/hw/block/swim.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "qapi/error.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/block/block.h" @@ -166,10 +166,9 @@ static const BlockDevOps swim_block_ops = { .change_media_cb = swim_change_cb, }; -static Property swim_drive_properties[] = { +static const Property swim_drive_properties[] = { DEFINE_PROP_INT32("unit", SWIMDrive, unit, -1), DEFINE_BLOCK_PROPERTIES(SWIMDrive, conf), - DEFINE_PROP_END_OF_LIST(), }; static void swim_drive_realize(DeviceState *qdev, Error **errp) @@ -254,7 +253,7 @@ static void swim_drive_realize(DeviceState *qdev, Error **errp) blk_set_dev_ops(drive->blk, &swim_block_ops, drive); } -static void swim_drive_class_init(ObjectClass *klass, void *data) +static void swim_drive_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = swim_drive_realize; @@ -551,12 +550,12 @@ static const VMStateDescription vmstate_sysbus_swim = { } }; -static void sysbus_swim_class_init(ObjectClass *oc, void *data) +static void sysbus_swim_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = sysbus_swim_realize; - dc->reset = sysbus_swim_reset; + device_class_set_legacy_reset(dc, sysbus_swim_reset); dc->vmsd = &vmstate_sysbus_swim; } diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c deleted file mode 100644 index 0984e37417b..00000000000 --- a/hw/block/tc58128.c +++ /dev/null @@ -1,211 +0,0 @@ -/* - * TC58128 NAND EEPROM emulation - * - * Copyright (c) 2005 Samuel Tardieu - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - * SPDX-License-Identifier: MIT - */ -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "hw/sh4/sh.h" -#include "hw/loader.h" -#include "sysemu/qtest.h" -#include "qemu/error-report.h" - -#define CE1 0x0100 -#define CE2 0x0200 -#define RE 0x0400 -#define WE 0x0800 -#define ALE 0x1000 -#define CLE 0x2000 -#define RDY1 0x4000 -#define RDY2 0x8000 -#define RDY(n) ((n) == 0 ? RDY1 : RDY2) - -typedef enum { WAIT, READ1, READ2, READ3 } state_t; - -typedef struct { - uint8_t *flash_contents; - state_t state; - uint32_t address; - uint8_t address_cycle; -} tc58128_dev; - -static tc58128_dev tc58128_devs[2]; - -#define FLASH_SIZE (16 * MiB) - -static void init_dev(tc58128_dev * dev, const char *filename) -{ - int ret, blocks; - - dev->state = WAIT; - dev->flash_contents = g_malloc(FLASH_SIZE); - memset(dev->flash_contents, 0xff, FLASH_SIZE); - if (filename) { - /* Load flash image skipping the first block */ - ret = load_image_size(filename, dev->flash_contents + 528 * 32, - FLASH_SIZE - 528 * 32); - if (ret < 0) { - if (!qtest_enabled()) { - error_report("Could not load flash image %s", filename); - exit(1); - } - } else { - /* Build first block with number of blocks */ - blocks = DIV_ROUND_UP(ret, 528 * 32); - dev->flash_contents[0] = blocks & 0xff; - dev->flash_contents[1] = (blocks >> 8) & 0xff; - dev->flash_contents[2] = (blocks >> 16) & 0xff; - dev->flash_contents[3] = (blocks >> 24) & 0xff; - fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, - filename); - } - } -} - -static void handle_command(tc58128_dev * dev, uint8_t command) -{ - switch (command) { - case 0xff: - fprintf(stderr, "reset flash device\n"); - dev->state = WAIT; - break; - case 0x00: - fprintf(stderr, "read mode 1\n"); - dev->state = READ1; - dev->address_cycle = 0; - break; - case 0x01: - fprintf(stderr, "read mode 2\n"); - dev->state = READ2; - dev->address_cycle = 0; - break; - case 0x50: - fprintf(stderr, "read mode 3\n"); - dev->state = READ3; - dev->address_cycle = 0; - break; - default: - fprintf(stderr, "unknown flash command 0x%02x\n", command); - abort(); - } -} - -static void handle_address(tc58128_dev * dev, uint8_t data) -{ - switch (dev->state) { - case READ1: - case READ2: - case READ3: - switch (dev->address_cycle) { - case 0: - dev->address = data; - if (dev->state == READ2) - dev->address |= 0x100; - else if (dev->state == READ3) - dev->address |= 0x200; - break; - case 1: - dev->address += data * 528 * 0x100; - break; - case 2: - dev->address += data * 528; - fprintf(stderr, "address pointer in flash: 0x%08x\n", - dev->address); - break; - default: - /* Invalid data */ - abort(); - } - dev->address_cycle++; - break; - default: - abort(); - } -} - -static uint8_t handle_read(tc58128_dev * dev) -{ -#if 0 - if (dev->address % 0x100000 == 0) - fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); -#endif - return dev->flash_contents[dev->address++]; -} - -/* We never mark the device as busy, so interrupts cannot be triggered - XXXXX */ - -static int tc58128_cb(uint16_t porta, uint16_t portb, - uint16_t * periph_pdtra, uint16_t * periph_portadir, - uint16_t * periph_pdtrb, uint16_t * periph_portbdir) -{ - int dev; - - if ((porta & CE1) == 0) - dev = 0; - else if ((porta & CE2) == 0) - dev = 1; - else - return 0; /* No device selected */ - - if ((porta & RE) && (porta & WE)) { - /* Nothing to do, assert ready and return to input state */ - *periph_portadir &= 0xff00; - *periph_portadir |= RDY(dev); - *periph_pdtra |= RDY(dev); - return 1; - } - - if (porta & CLE) { - /* Command */ - assert((porta & WE) == 0); - handle_command(&tc58128_devs[dev], porta & 0x00ff); - } else if (porta & ALE) { - assert((porta & WE) == 0); - handle_address(&tc58128_devs[dev], porta & 0x00ff); - } else if ((porta & RE) == 0) { - *periph_portadir |= 0x00ff; - *periph_pdtra &= 0xff00; - *periph_pdtra |= handle_read(&tc58128_devs[dev]); - } else { - abort(); - } - return 1; -} - -static sh7750_io_device tc58128 = { - RE | WE, /* Port A triggers */ - 0, /* Port B triggers */ - tc58128_cb /* Callback */ -}; - -int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) -{ - if (!qtest_enabled()) { - warn_report_once("The TC58128 flash device is deprecated"); - } - init_dev(&tc58128_devs[0], zone1); - init_dev(&tc58128_devs[1], zone2); - return sh7750_register_io_device(s, &tc58128); -} diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 5b7f46bbb07..c0cc5f69428 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -29,8 +29,8 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/runstate.h" static const int user_feature_bits[] = { VIRTIO_BLK_F_SIZE_MAX, @@ -90,27 +90,39 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) s->blkcfg.wce = blkcfg->wce; } +static int vhost_user_blk_sync_config(DeviceState *dev, Error **errp) +{ + int ret; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, + vdev->config_len, errp); + if (ret < 0) { + return ret; + } + + memcpy(vdev->config, &s->blkcfg, vdev->config_len); + virtio_notify_config(vdev); + + return 0; +} + static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) { int ret; - VirtIODevice *vdev = dev->vdev; - VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; if (!dev->started) { return 0; } - ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg, - vdev->config_len, &local_err); + ret = vhost_user_blk_sync_config(DEVICE(dev->vdev), &local_err); if (ret < 0) { error_report_err(local_err); return ret; } - memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); - virtio_notify_config(dev->vdev); - return 0; } @@ -192,34 +204,39 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp) return ret; } -static void vhost_user_blk_stop(VirtIODevice *vdev) +static int vhost_user_blk_stop(VirtIODevice *vdev) { VHostUserBlk *s = VHOST_USER_BLK(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret; + bool force_stop = false; if (!s->started_vu) { - return; + return 0; } s->started_vu = false; if (!k->set_guest_notifiers) { - return; + return 0; } - vhost_dev_stop(&s->dev, vdev, true); + force_stop = s->skip_get_vring_base_on_force_shutdown && + qemu_force_shutdown_requested(); - ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); - if (ret < 0) { + ret = force_stop ? vhost_dev_force_stop(&s->dev, vdev, true) : + vhost_dev_stop(&s->dev, vdev, true); + + if (k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false) < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); - return; + return -1; } vhost_dev_disable_notifiers(&s->dev, vdev); + return ret; } -static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) +static int vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserBlk *s = VHOST_USER_BLK(vdev); bool should_start = virtio_device_should_start(vdev, status); @@ -227,11 +244,11 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) int ret; if (!s->connected) { - return; + return -1; } if (vhost_dev_is_started(&s->dev) == should_start) { - return; + return 0; } if (should_start) { @@ -241,9 +258,12 @@ static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) qemu_chr_fe_disconnect(&s->chardev); } } else { - vhost_user_blk_stop(vdev); + ret = vhost_user_blk_stop(vdev); + if (ret < 0) { + return ret; + } } - + return 0; } static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev, @@ -558,7 +578,7 @@ static const VMStateDescription vmstate_vhost_user_blk = { }, }; -static Property vhost_user_blk_properties[] = { +static const Property vhost_user_blk_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev), DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues, VHOST_USER_BLK_AUTO_NUM_QUEUES), @@ -569,16 +589,18 @@ static Property vhost_user_blk_properties[] = { VIRTIO_BLK_F_DISCARD, true), DEFINE_PROP_BIT64("write-zeroes", VHostUserBlk, parent_obj.host_features, VIRTIO_BLK_F_WRITE_ZEROES, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("skip-get-vring-base-on-force-shutdown", VHostUserBlk, + skip_get_vring_base_on_force_shutdown, false), }; -static void vhost_user_blk_class_init(ObjectClass *klass, void *data) +static void vhost_user_blk_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); device_class_set_props(dc, vhost_user_blk_properties); dc->vmsd = &vmstate_vhost_user_blk; + dc->sync_config = vhost_user_blk_sync_config; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); vdc->realize = vhost_user_blk_device_realize; vdc->unrealize = vhost_user_blk_device_unrealize; diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 73bdfd6122a..9bab2716c14 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -22,10 +22,10 @@ #include "trace.h" #include "hw/block/block.h" #include "hw/qdev-properties.h" -#include "sysemu/blockdev.h" -#include "sysemu/block-ram-registrar.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/blockdev.h" +#include "system/block-ram-registrar.h" +#include "system/system.h" +#include "system/runstate.h" #include "hw/virtio/virtio-blk.h" #include "scsi/constants.h" #ifdef __linux__ @@ -33,6 +33,7 @@ #endif #include "hw/virtio/virtio-bus.h" #include "migration/qemu-file-types.h" +#include "hw/virtio/iothread-vq-mapping.h" #include "hw/virtio/virtio-access.h" #include "hw/virtio/virtio-blk-common.h" #include "qemu/coroutine.h" @@ -50,12 +51,7 @@ static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, req->mr_next = NULL; } -static void virtio_blk_free_request(VirtIOBlockReq *req) -{ - g_free(req); -} - -static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) { VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); @@ -93,7 +89,7 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, if (acct_failed) { block_acct_failed(blk_get_stats(s->blk), &req->acct); } - virtio_blk_free_request(req); + g_free(req); } blk_error_action(s->blk, action, is_read, error); @@ -136,7 +132,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret) virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); - virtio_blk_free_request(req); + g_free(req); } } @@ -151,7 +147,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret) virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); - virtio_blk_free_request(req); + g_free(req); } static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) @@ -169,7 +165,7 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) if (is_write_zeroes) { block_acct_done(blk_get_stats(s->blk), &req->acct); } - virtio_blk_free_request(req); + g_free(req); } static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) @@ -214,7 +210,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req) fail: virtio_blk_req_complete(req, status); - virtio_blk_free_request(req); + g_free(req); } static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, @@ -612,7 +608,7 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) out: virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); g_free(data->zone_report_data.zones); g_free(data); } @@ -661,7 +657,7 @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, return; out: virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); } static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) @@ -677,7 +673,7 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) } virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); } static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) @@ -700,7 +696,7 @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) } else { if (bs->bl.zone_size > capacity - offset) { /* The zoned device allows the last smaller zone. */ - len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); + len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1ull); } else { len = bs->bl.zone_size; } @@ -719,7 +715,7 @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) return 0; out: virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); return err_status; } @@ -750,7 +746,7 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) out: virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); g_free(data); } @@ -788,7 +784,7 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, out: virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); return err_status; } @@ -855,7 +851,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); block_acct_invalid(blk_get_stats(s->blk), is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); - virtio_blk_free_request(req); + g_free(req); return 0; } @@ -911,7 +907,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) VIRTIO_BLK_ID_BYTES)); iov_from_buf(in_iov, in_num, 0, serial, size); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); - virtio_blk_free_request(req); + g_free(req); break; } case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: @@ -943,7 +939,7 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) if (unlikely(!(type & VIRTIO_BLK_T_OUT) || out_len > sizeof(dwz_hdr))) { virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); - virtio_blk_free_request(req); + g_free(req); return 0; } @@ -960,14 +956,24 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) is_write_zeroes); if (err_status != VIRTIO_BLK_S_OK) { virtio_blk_req_complete(req, err_status); - virtio_blk_free_request(req); + g_free(req); } break; } default: - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); - virtio_blk_free_request(req); + { + /* + * Give subclasses a chance to handle unknown requests. This way the + * class lookup is not in the hot path. + */ + VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s); + if (!vbk->handle_unknown_request || + !vbk->handle_unknown_request(req, mrb, type)) { + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); + g_free(req); + } + } } return 0; } @@ -988,7 +994,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) while ((req = virtio_blk_get_request(s, vq))) { if (virtio_blk_handle_request(req, &mrb)) { virtqueue_detach_element(req->vq, &req->elem, 0); - virtio_blk_free_request(req); + g_free(req); break; } } @@ -1038,7 +1044,7 @@ static void virtio_blk_dma_restart_bh(void *opaque) while (req) { next = req->next; virtqueue_detach_element(req->vq, &req->elem, 0); - virtio_blk_free_request(req); + g_free(req); req = next; } break; @@ -1060,7 +1066,7 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running, VirtIOBlock *s = opaque; uint16_t num_queues = s->conf.num_queues; g_autofree VirtIOBlockReq **vq_rq = NULL; - VirtIOBlockReq *rq; + VirtIOBlockReq *rq = NULL; if (!running) { return; @@ -1121,7 +1127,7 @@ static void virtio_blk_reset(VirtIODevice *vdev) /* No other threads can access req->vq here */ virtqueue_detach_element(req->vq, &req->elem, 0); - virtio_blk_free_request(req); + g_free(req); } } @@ -1264,7 +1270,7 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, return features; } -static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) +static int virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) { VirtIOBlock *s = VIRTIO_BLK(vdev); @@ -1273,7 +1279,7 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) } if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { - return; + return 0; } /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send @@ -1296,6 +1302,7 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_WCE)); } + return 0; } static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) @@ -1418,128 +1425,6 @@ static const BlockDevOps virtio_block_ops = { .drained_end = virtio_blk_drained_end, }; -static bool -validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list, - uint16_t num_queues, Error **errp) -{ - g_autofree unsigned long *vqs = bitmap_new(num_queues); - g_autoptr(GHashTable) iothreads = - g_hash_table_new(g_str_hash, g_str_equal); - - for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) { - const char *name = node->value->iothread; - uint16List *vq; - - if (!iothread_by_id(name)) { - error_setg(errp, "IOThread \"%s\" object does not exist", name); - return false; - } - - if (!g_hash_table_add(iothreads, (gpointer)name)) { - error_setg(errp, - "duplicate IOThread name \"%s\" in iothread-vq-mapping", - name); - return false; - } - - if (node != list) { - if (!!node->value->vqs != !!list->value->vqs) { - error_setg(errp, "either all items in iothread-vq-mapping " - "must have vqs or none of them must have it"); - return false; - } - } - - for (vq = node->value->vqs; vq; vq = vq->next) { - if (vq->value >= num_queues) { - error_setg(errp, "vq index %u for IOThread \"%s\" must be " - "less than num_queues %u in iothread-vq-mapping", - vq->value, name, num_queues); - return false; - } - - if (test_and_set_bit(vq->value, vqs)) { - error_setg(errp, "cannot assign vq %u to IOThread \"%s\" " - "because it is already assigned", vq->value, name); - return false; - } - } - } - - if (list->value->vqs) { - for (uint16_t i = 0; i < num_queues; i++) { - if (!test_bit(i, vqs)) { - error_setg(errp, - "missing vq %u IOThread assignment in iothread-vq-mapping", - i); - return false; - } - } - } - - return true; -} - -/** - * apply_iothread_vq_mapping: - * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads. - * @vq_aio_context: The array of AioContext pointers to fill in. - * @num_queues: The length of @vq_aio_context. - * @errp: If an error occurs, a pointer to the area to store the error. - * - * Fill in the AioContext for each virtqueue in the @vq_aio_context array given - * the iothread-vq-mapping parameter in @iothread_vq_mapping_list. - * - * Returns: %true on success, %false on failure. - **/ -static bool apply_iothread_vq_mapping( - IOThreadVirtQueueMappingList *iothread_vq_mapping_list, - AioContext **vq_aio_context, - uint16_t num_queues, - Error **errp) -{ - IOThreadVirtQueueMappingList *node; - size_t num_iothreads = 0; - size_t cur_iothread = 0; - - if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list, - num_queues, errp)) { - return false; - } - - for (node = iothread_vq_mapping_list; node; node = node->next) { - num_iothreads++; - } - - for (node = iothread_vq_mapping_list; node; node = node->next) { - IOThread *iothread = iothread_by_id(node->value->iothread); - AioContext *ctx = iothread_get_aio_context(iothread); - - /* Released in virtio_blk_vq_aio_context_cleanup() */ - object_ref(OBJECT(iothread)); - - if (node->value->vqs) { - uint16List *vq; - - /* Explicit vq:IOThread assignment */ - for (vq = node->value->vqs; vq; vq = vq->next) { - assert(vq->value < num_queues); - vq_aio_context[vq->value] = ctx; - } - } else { - /* Round-robin vq:IOThread assignment */ - for (unsigned i = cur_iothread; i < num_queues; - i += num_iothreads) { - vq_aio_context[i] = ctx; - } - } - - cur_iothread++; - } - - return true; -} - /* Context: BQL held */ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp) { @@ -1567,21 +1452,12 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp) error_setg(errp, "ioeventfd is required for iothread"); return false; } - - /* - * If ioeventfd is (re-)enabled while the guest is running there could - * be block jobs that can conflict. - */ - if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { - error_prepend(errp, "cannot start virtio-blk ioeventfd: "); - return false; - } } s->vq_aio_context = g_new(AioContext *, conf->num_queues); if (conf->iothread_vq_mapping_list) { - if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list, + if (!iothread_vq_mapping_apply(conf->iothread_vq_mapping_list, s->vq_aio_context, conf->num_queues, errp)) { @@ -1615,12 +1491,7 @@ static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s) assert(!s->ioeventfd_started); if (conf->iothread_vq_mapping_list) { - IOThreadVirtQueueMappingList *node; - - for (node = conf->iothread_vq_mapping_list; node; node = node->next) { - IOThread *iothread = iothread_by_id(node->value->iothread); - object_unref(OBJECT(iothread)); - } + iothread_vq_mapping_cleanup(conf->iothread_vq_mapping_list); } if (conf->iothread) { @@ -1932,7 +1803,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) * called after ->start_ioeventfd() has already set blk's AioContext. */ s->change = - qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s); + qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, NULL, s); blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); blk_set_dev_ops(s->blk, &virtio_block_ops, s); @@ -1985,7 +1856,7 @@ static const VMStateDescription vmstate_virtio_blk = { }, }; -static Property virtio_blk_properties[] = { +static const Property virtio_blk_properties[] = { DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf), DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), @@ -2014,10 +1885,9 @@ static Property virtio_blk_properties[] = { conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS), DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock, conf.x_enable_wce_if_config_wce, true), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_blk_class_init(ObjectClass *klass, void *data) +static void virtio_blk_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -2044,6 +1914,7 @@ static const TypeInfo virtio_blk_info = { .instance_size = sizeof(VirtIOBlock), .instance_init = virtio_blk_instance_init, .class_init = virtio_blk_class_init, + .class_size = sizeof(VirtIOBlkClass), }; static void virtio_register_types(void) diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index aed1d5c330b..74de897c798 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -16,16 +16,16 @@ #include "qapi/qapi-visit-block-core.h" #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qom/object_interfaces.h" #include "hw/block/xen_blkif.h" #include "hw/qdev-properties.h" #include "hw/xen/xen-block.h" #include "hw/xen/xen-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/block-backend.h" -#include "sysemu/iothread.h" +#include "system/blockdev.h" +#include "system/block-backend.h" +#include "system/iothread.h" #include "dataplane/xen-block.h" #include "hw/xen/interface/io/xs_wire.h" #include "trace.h" @@ -239,7 +239,8 @@ static void xen_block_connect(XenDevice *xendev, Error **errp) return; } - if (xen_device_frontend_scanf(xendev, "protocol", "%ms", &str) != 1) { + str = xen_device_frontend_read(xendev, "protocol"); + if (!str) { /* x86 defaults to the 32-bit protocol even for 64-bit guests. */ if (object_dynamic_cast(OBJECT(qdev_get_machine()), "x86-machine")) { protocol = BLKIF_PROTOCOL_X86_32; @@ -407,6 +408,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp) } xen_device_backend_printf(xendev, "info", "%u", blockdev->info); + xen_device_backend_printf(xendev, "mode", + (blockdev->info & VDISK_READONLY) ? "r" : "w"); xen_device_frontend_printf(xendev, "virtual-device", "%lu", vdev->number); @@ -485,7 +488,7 @@ static char *disk_to_vbd_name(unsigned int disk) static void xen_block_get_vdev(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; XenBlockVdev *vdev = object_field_prop_ptr(obj, prop); char *str; @@ -545,7 +548,7 @@ static int vbd_name_to_disk(const char *name, const char **endp, static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; XenBlockVdev *vdev = object_field_prop_ptr(obj, prop); char *str, *p; const char *end; @@ -659,14 +662,14 @@ static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, * * https://xenbits.xen.org/docs/unstable/man/xen-vbd-interface.7.html */ -const PropertyInfo xen_block_prop_vdev = { - .name = "str", - .description = "Virtual Disk specifier: d*p*/xvd*/hd*/sd*", +static const PropertyInfo xen_block_prop_vdev = { + .type = "str", + .description = "Virtual Disk specifier (d*p*/xvd*/hd*/sd*)", .get = xen_block_get_vdev, .set = xen_block_set_vdev, }; -static Property xen_block_props[] = { +static const Property xen_block_props[] = { DEFINE_PROP("vdev", XenBlockDevice, props.vdev, xen_block_prop_vdev, XenBlockVdev), DEFINE_BLOCK_PROPERTIES(XenBlockDevice, props.conf), @@ -674,10 +677,9 @@ static Property xen_block_props[] = { props.max_ring_page_order, 4), DEFINE_PROP_LINK("iothread", XenBlockDevice, props.iothread, TYPE_IOTHREAD, IOThread *), - DEFINE_PROP_END_OF_LIST() }; -static void xen_block_class_init(ObjectClass *class, void *data) +static void xen_block_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); @@ -722,7 +724,7 @@ static void xen_disk_realize(XenBlockDevice *blockdev, Error **errp) blockdev->info = blk_supports_write_perm(conf->blk) ? 0 : VDISK_READONLY; } -static void xen_disk_class_init(ObjectClass *class, void *data) +static void xen_disk_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class); @@ -769,7 +771,7 @@ static void xen_cdrom_realize(XenBlockDevice *blockdev, Error **errp) blockdev->info = VDISK_READONLY | VDISK_CDROM; } -static void xen_cdrom_class_init(ObjectClass *class, void *data) +static void xen_cdrom_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class); diff --git a/hw/char/Kconfig b/hw/char/Kconfig index 4fd74ea8788..020c0a84bb6 100644 --- a/hw/char/Kconfig +++ b/hw/char/Kconfig @@ -11,6 +11,12 @@ config PARALLEL config PL011 bool + # The PL011 has both a Rust and a C implementation + select PL011_C if !HAVE_RUST + select X_PL011_RUST if HAVE_RUST + +config PL011_C + bool config SERIAL bool @@ -21,6 +27,10 @@ config SERIAL_ISA depends on ISA_BUS select SERIAL +config SERIAL_MM + bool + select SERIAL + config SERIAL_PCI bool default y if PCI_DEVICES @@ -38,6 +48,9 @@ config VIRTIO_SERIAL default y depends on VIRTIO +config MAX78000_UART + bool + config STM32F2XX_USART bool @@ -62,6 +75,9 @@ config RENESAS_SCI config AVR_USART bool +config DIVA_GSP + bool + config MCHP_PFSOC_MMUART bool select SERIAL @@ -74,3 +90,8 @@ config GOLDFISH_TTY config SHAKTI_UART bool + +config IP_OCTAL_232 + bool + default y + depends on IPACK diff --git a/hw/char/avr_usart.c b/hw/char/avr_usart.c index 5bcf9db0b78..fae15217e9f 100644 --- a/hw/char/avr_usart.c +++ b/hw/char/avr_usart.c @@ -86,7 +86,7 @@ static void update_char_mask(AVRUsartState *usart) usart->char_mask = 0b11111111; break; default: - assert(0); + g_assert_not_reached(); } } @@ -259,9 +259,8 @@ static const MemoryRegionOps avr_usart_ops = { .impl = {.min_access_size = 1, .max_access_size = 1} }; -static Property avr_usart_properties[] = { +static const Property avr_usart_properties[] = { DEFINE_PROP_CHR("chardev", AVRUsartState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void avr_usart_pr(void *opaque, int irq, int level) @@ -296,11 +295,11 @@ static void avr_usart_realize(DeviceState *dev, Error **errp) avr_usart_reset(dev); } -static void avr_usart_class_init(ObjectClass *klass, void *data) +static void avr_usart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = avr_usart_reset; + device_class_set_legacy_reset(dc, avr_usart_reset); device_class_set_props(dc, avr_usart_properties); dc->realize = avr_usart_realize; } diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c index fca2f27a553..2b397f2ff39 100644 --- a/hw/char/bcm2835_aux.c +++ b/hw/char/bcm2835_aux.c @@ -98,7 +98,7 @@ static uint64_t bcm2835_aux_read(void *opaque, hwaddr offset, unsigned size) * interrupts are active, besides that this cannot occur. At * present, we choose to prioritise the rx interrupt, since * the tx fifo is always empty. */ - if (s->read_count != 0) { + if ((s->iir & RX_INT) && s->read_count != 0) { res |= 0x4; } else { res |= 0x2; @@ -221,7 +221,7 @@ static int bcm2835_aux_can_receive(void *opaque) { BCM2835AuxState *s = opaque; - return s->read_count < BCM2835_AUX_RX_FIFO_LEN; + return BCM2835_AUX_RX_FIFO_LEN - s->read_count; } static void bcm2835_aux_put_fifo(void *opaque, uint8_t value) @@ -243,7 +243,9 @@ static void bcm2835_aux_put_fifo(void *opaque, uint8_t value) static void bcm2835_aux_receive(void *opaque, const uint8_t *buf, int size) { - bcm2835_aux_put_fifo(opaque, *buf); + for (int i = 0; i < size; i++) { + bcm2835_aux_put_fifo(opaque, buf[i]); + } } static const MemoryRegionOps bcm2835_aux_ops = { @@ -290,12 +292,11 @@ static void bcm2835_aux_realize(DeviceState *dev, Error **errp) bcm2835_aux_receive, NULL, NULL, s, NULL, true); } -static Property bcm2835_aux_props[] = { +static const Property bcm2835_aux_props[] = { DEFINE_PROP_CHR("chardev", BCM2835AuxState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void bcm2835_aux_class_init(ObjectClass *oc, void *data) +static void bcm2835_aux_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index 77d9a2a221f..0dfa356b6d0 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -617,12 +617,11 @@ static const VMStateDescription vmstate_cadence_uart = { }, }; -static Property cadence_uart_properties[] = { +static const Property cadence_uart_properties[] = { DEFINE_PROP_CHR("chardev", CadenceUARTState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void cadence_uart_class_init(ObjectClass *klass, void *data) +static void cadence_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c index d07cca1bd42..32090f3516f 100644 --- a/hw/char/cmsdk-apb-uart.c +++ b/hw/char/cmsdk-apb-uart.c @@ -377,19 +377,18 @@ static const VMStateDescription cmsdk_apb_uart_vmstate = { } }; -static Property cmsdk_apb_uart_properties[] = { +static const Property cmsdk_apb_uart_properties[] = { DEFINE_PROP_CHR("chardev", CMSDKAPBUART, chr), DEFINE_PROP_UINT32("pclk-frq", CMSDKAPBUART, pclk_frq, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void cmsdk_apb_uart_class_init(ObjectClass *klass, void *data) +static void cmsdk_apb_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cmsdk_apb_uart_realize; dc->vmsd = &cmsdk_apb_uart_vmstate; - dc->reset = cmsdk_apb_uart_reset; + device_class_set_legacy_reset(dc, cmsdk_apb_uart_reset); device_class_set_props(dc, cmsdk_apb_uart_properties); } diff --git a/hw/char/debugcon.c b/hw/char/debugcon.c index fdb04fee093..bf44aaf9e4a 100644 --- a/hw/char/debugcon.c +++ b/hw/char/debugcon.c @@ -114,14 +114,13 @@ static void debugcon_isa_realizefn(DeviceState *dev, Error **errp) isa->iobase, &s->io); } -static Property debugcon_isa_properties[] = { +static const Property debugcon_isa_properties[] = { DEFINE_PROP_UINT32("iobase", ISADebugconState, iobase, 0xe9), DEFINE_PROP_CHR("chardev", ISADebugconState, state.chr), DEFINE_PROP_UINT32("readback", ISADebugconState, state.readback, 0xe9), - DEFINE_PROP_END_OF_LIST(), }; -static void debugcon_isa_class_initfn(ObjectClass *klass, void *data) +static void debugcon_isa_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c index ef2d7627262..0f6af51bb7b 100644 --- a/hw/char/digic-uart.c +++ b/hw/char/digic-uart.c @@ -172,17 +172,16 @@ static const VMStateDescription vmstate_digic_uart = { } }; -static Property digic_uart_properties[] = { +static const Property digic_uart_properties[] = { DEFINE_PROP_CHR("chardev", DigicUartState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void digic_uart_class_init(ObjectClass *klass, void *data) +static void digic_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = digic_uart_realize; - dc->reset = digic_uart_reset; + device_class_set_legacy_reset(dc, digic_uart_reset); dc->vmsd = &vmstate_digic_uart; device_class_set_props(dc, digic_uart_properties); } diff --git a/hw/char/diva-gsp.c b/hw/char/diva-gsp.c new file mode 100644 index 00000000000..e1f0713cb79 --- /dev/null +++ b/hw/char/diva-gsp.c @@ -0,0 +1,295 @@ +/* + * HP Diva GSP controller + * + * The Diva PCI boards are Remote Management cards for PA-RISC machines. + * They come with built-in 16550A multi UARTs for serial consoles + * and a mailbox-like memory area for hardware auto-reboot functionality. + * GSP stands for "Guardian Service Processor". Later products were marketed + * "Management Processor" (MP). + * + * Diva cards are multifunctional cards. The first part, the aux port, + * is on physical machines not useable but we still try to mimic it here. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Copyright (c) 2025 Helge Deller + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/char/serial.h" +#include "hw/irq.h" +#include "hw/pci/pci_device.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" + +#define PCI_DEVICE_ID_HP_DIVA 0x1048 +/* various DIVA GSP cards: */ +#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A +#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B +#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 +#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 +#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 +#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 +#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 +#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 +#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a + + +#define PCI_SERIAL_MAX_PORTS 4 + +typedef struct PCIDivaSerialState { + PCIDevice dev; + MemoryRegion membar; /* for serial ports */ + MemoryRegion mailboxbar; /* for hardware mailbox */ + uint32_t subvendor; + uint32_t ports; + char *name[PCI_SERIAL_MAX_PORTS]; + SerialState state[PCI_SERIAL_MAX_PORTS]; + uint32_t level[PCI_SERIAL_MAX_PORTS]; + qemu_irq *irqs; + bool disable; +} PCIDivaSerialState; + +static void diva_pci_exit(PCIDevice *dev) +{ + PCIDivaSerialState *pci = DO_UPCAST(PCIDivaSerialState, dev, dev); + SerialState *s; + int i; + + for (i = 0; i < pci->ports; i++) { + s = pci->state + i; + qdev_unrealize(DEVICE(s)); + memory_region_del_subregion(&pci->membar, &s->io); + g_free(pci->name[i]); + } + qemu_free_irqs(pci->irqs, pci->ports); +} + +static void multi_serial_irq_mux(void *opaque, int n, int level) +{ + PCIDivaSerialState *pci = opaque; + int i, pending = 0; + + pci->level[n] = level; + for (i = 0; i < pci->ports; i++) { + if (pci->level[i]) { + pending = 1; + } + } + pci_set_irq(&pci->dev, pending); +} + +struct diva_info { + unsigned int nports:4; /* number of serial ports */ + unsigned int omask:12; /* offset mask: BIT(1) -> offset 8 */ +}; + +static struct diva_info diva_get_diva_info(PCIDeviceClass *pc) +{ + switch (pc->subsystem_id) { + case PCI_DEVICE_ID_HP_DIVA_POWERBAR: + case PCI_DEVICE_ID_HP_DIVA_HURRICANE: + return (struct diva_info) { .nports = 1, + .omask = BIT(0) }; + case PCI_DEVICE_ID_HP_DIVA_TOSCA2: + return (struct diva_info) { .nports = 2, + .omask = BIT(0) | BIT(1) }; + case PCI_DEVICE_ID_HP_DIVA_TOSCA1: + case PCI_DEVICE_ID_HP_DIVA_HALFDOME: + case PCI_DEVICE_ID_HP_DIVA_KEYSTONE: + return (struct diva_info) { .nports = 3, + .omask = BIT(0) | BIT(1) | BIT(2) }; + case PCI_DEVICE_ID_HP_DIVA_EVEREST: /* e.g. in rp3410 */ + return (struct diva_info) { .nports = 3, + .omask = BIT(0) | BIT(2) | BIT(7) }; + case PCI_DEVICE_ID_HP_DIVA_MAESTRO: + return (struct diva_info) { .nports = 4, + .omask = BIT(0) | BIT(1) | BIT(2) | BIT(7) }; + } + g_assert_not_reached(); +} + + +static void diva_pci_realize(PCIDevice *dev, Error **errp) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + PCIDivaSerialState *pci = DO_UPCAST(PCIDivaSerialState, dev, dev); + SerialState *s; + struct diva_info di = diva_get_diva_info(pc); + size_t i, offset = 0; + size_t portmask = di.omask; + + pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */ + pci->dev.config[PCI_INTERRUPT_PIN] = 1; + memory_region_init(&pci->membar, OBJECT(pci), "serial_ports", 4096); + pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &pci->membar); + pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, di.nports); + + for (i = 0; i < di.nports; i++) { + s = pci->state + i; + if (!qdev_realize(DEVICE(s), NULL, errp)) { + diva_pci_exit(dev); + return; + } + s->irq = pci->irqs[i]; + pci->name[i] = g_strdup_printf("uart #%zu", i + 1); + memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, + pci->name[i], 8); + + /* calculate offset of given port based on bitmask */ + while ((portmask & BIT(0)) == 0) { + offset += 8; + portmask >>= 1; + } + memory_region_add_subregion(&pci->membar, offset, &s->io); + offset += 8; + portmask >>= 1; + pci->ports++; + } + + /* mailbox bar */ + memory_region_init(&pci->mailboxbar, OBJECT(pci), "mailbox", 128 * KiB); + pci_register_bar(&pci->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_PREFETCH, &pci->mailboxbar); +} + +static const VMStateDescription vmstate_pci_diva = { + .name = "pci-diva-serial", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIDivaSerialState), + VMSTATE_STRUCT_ARRAY(state, PCIDivaSerialState, PCI_SERIAL_MAX_PORTS, + 0, vmstate_serial, SerialState), + VMSTATE_UINT32_ARRAY(level, PCIDivaSerialState, PCI_SERIAL_MAX_PORTS), + VMSTATE_BOOL(disable, PCIDivaSerialState), + VMSTATE_END_OF_LIST() + } +}; + +static const Property diva_serial_properties[] = { + DEFINE_PROP_BOOL("disable", PCIDivaSerialState, disable, false), + DEFINE_PROP_CHR("chardev1", PCIDivaSerialState, state[0].chr), + DEFINE_PROP_CHR("chardev2", PCIDivaSerialState, state[1].chr), + DEFINE_PROP_CHR("chardev3", PCIDivaSerialState, state[2].chr), + DEFINE_PROP_CHR("chardev4", PCIDivaSerialState, state[3].chr), + DEFINE_PROP_UINT32("subvendor", PCIDivaSerialState, subvendor, + PCI_DEVICE_ID_HP_DIVA_TOSCA1), +}; + +static void diva_serial_class_initfn(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + pc->realize = diva_pci_realize; + pc->exit = diva_pci_exit; + pc->vendor_id = PCI_VENDOR_ID_HP; + pc->device_id = PCI_DEVICE_ID_HP_DIVA; + pc->subsystem_vendor_id = PCI_VENDOR_ID_HP; + pc->subsystem_id = PCI_DEVICE_ID_HP_DIVA_TOSCA1; + pc->revision = 3; + pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL; + dc->vmsd = &vmstate_pci_diva; + device_class_set_props(dc, diva_serial_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void diva_serial_init(Object *o) +{ + PCIDevice *dev = PCI_DEVICE(o); + PCIDivaSerialState *pms = DO_UPCAST(PCIDivaSerialState, dev, dev); + struct diva_info di = diva_get_diva_info(PCI_DEVICE_GET_CLASS(dev)); + size_t i; + + for (i = 0; i < di.nports; i++) { + object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL); + } +} + + +/* Diva-aux is the driver for portion 0 of the multifunction PCI device */ + +struct DivaAuxState { + PCIDevice dev; + MemoryRegion mem; + qemu_irq irq; +}; + +#define TYPE_DIVA_AUX "diva-aux" +OBJECT_DECLARE_SIMPLE_TYPE(DivaAuxState, DIVA_AUX) + +static void diva_aux_realize(PCIDevice *dev, Error **errp) +{ + DivaAuxState *pci = DO_UPCAST(DivaAuxState, dev, dev); + + pci->dev.config[PCI_CLASS_PROG] = 0x02; + pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; + pci->irq = pci_allocate_irq(&pci->dev); + + memory_region_init(&pci->mem, OBJECT(pci), "mem", 16); + pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &pci->mem); +} + +static void diva_aux_exit(PCIDevice *dev) +{ + DivaAuxState *pci = DO_UPCAST(DivaAuxState, dev, dev); + qemu_free_irq(pci->irq); +} + +static void diva_aux_class_initfn(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + pc->realize = diva_aux_realize; + pc->exit = diva_aux_exit; + pc->vendor_id = PCI_VENDOR_ID_HP; + pc->device_id = PCI_DEVICE_ID_HP_DIVA_AUX; + pc->subsystem_vendor_id = PCI_VENDOR_ID_HP; + pc->subsystem_id = 0x1291; + pc->revision = 1; + pc->class_id = PCI_CLASS_COMMUNICATION_MULTISERIAL; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->user_creatable = false; +} + +static void diva_aux_init(Object *o) +{ +} + +static const TypeInfo diva_aux_info = { + .name = TYPE_DIVA_AUX, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(DivaAuxState), + .instance_init = diva_aux_init, + .class_init = diva_aux_class_initfn, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + + + +static const TypeInfo diva_serial_pci_info = { + .name = "diva-gsp", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDivaSerialState), + .instance_init = diva_serial_init, + .class_init = diva_serial_class_initfn, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void diva_pci_register_type(void) +{ + type_register_static(&diva_serial_pci_info); + type_register_static(&diva_aux_info); +} + +type_init(diva_pci_register_type) diff --git a/hw/char/escc.c b/hw/char/escc.c index d450d70eda1..afe4ca483e7 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -287,6 +287,7 @@ static void escc_reset_chn(ESCCChannelState *s) s->rxint = s->txint = 0; s->rxint_under_svc = s->txint_under_svc = 0; s->e0_mode = s->led_mode = s->caps_lock_mode = s->num_lock_mode = 0; + s->sunmouse_dx = s->sunmouse_dy = s->sunmouse_buttons = 0; clear_queue(s); } @@ -952,53 +953,96 @@ static void handle_kbd_command(ESCCChannelState *s, int val) } } -static void sunmouse_event(void *opaque, - int dx, int dy, int dz, int buttons_state) +static void sunmouse_handle_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) { - ESCCChannelState *s = opaque; - int ch; + ESCCChannelState *s = (ESCCChannelState *)dev; + InputMoveEvent *move; + InputBtnEvent *btn; + static const int bmap[INPUT_BUTTON__MAX] = { + [INPUT_BUTTON_LEFT] = 0x4, + [INPUT_BUTTON_MIDDLE] = 0x2, + [INPUT_BUTTON_RIGHT] = 0x1, + }; + + switch (evt->type) { + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + if (move->axis == INPUT_AXIS_X) { + s->sunmouse_dx += move->value; + } else if (move->axis == INPUT_AXIS_Y) { + s->sunmouse_dy -= move->value; + } + break; - trace_escc_sunmouse_event(dx, dy, buttons_state); - ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */ + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + if (bmap[btn->button]) { + if (btn->down) { + s->sunmouse_buttons |= bmap[btn->button]; + } else { + s->sunmouse_buttons &= ~bmap[btn->button]; + } + /* Indicate we have a supported button event */ + s->sunmouse_buttons |= 0x80; + } + break; - if (buttons_state & MOUSE_EVENT_LBUTTON) { - ch ^= 0x4; - } - if (buttons_state & MOUSE_EVENT_MBUTTON) { - ch ^= 0x2; + default: + /* keep gcc happy */ + break; } - if (buttons_state & MOUSE_EVENT_RBUTTON) { - ch ^= 0x1; +} + +static void sunmouse_sync(DeviceState *dev) +{ + ESCCChannelState *s = (ESCCChannelState *)dev; + int ch; + + if (s->sunmouse_dx == 0 && s->sunmouse_dy == 0 && + (s->sunmouse_buttons & 0x80) == 0) { + /* Nothing to do after button event filter */ + return; } + /* Clear our button event flag */ + s->sunmouse_buttons &= ~0x80; + trace_escc_sunmouse_event(s->sunmouse_dx, s->sunmouse_dy, + s->sunmouse_buttons); + ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */ + ch ^= s->sunmouse_buttons; put_queue(s, ch); - ch = dx; - + ch = s->sunmouse_dx; if (ch > 127) { ch = 127; } else if (ch < -127) { ch = -127; } - put_queue(s, ch & 0xff); + s->sunmouse_dx -= ch; - ch = -dy; - + ch = s->sunmouse_dy; if (ch > 127) { ch = 127; } else if (ch < -127) { ch = -127; } - put_queue(s, ch & 0xff); + s->sunmouse_dy -= ch; /* MSC protocol specifies two extra motion bytes */ - put_queue(s, 0); put_queue(s, 0); } +static const QemuInputHandler sunmouse_handler = { + .name = "QEMU Sun Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = sunmouse_handle_event, + .sync = sunmouse_sync, +}; + static void escc_init1(Object *obj) { ESCCState *s = ESCC(obj); @@ -1036,8 +1080,8 @@ static void escc_realize(DeviceState *dev, Error **errp) } if (s->chn[0].type == escc_mouse) { - qemu_add_mouse_event_handler(sunmouse_event, &s->chn[0], 0, - "QEMU Sun Mouse"); + s->chn[0].hs = qemu_input_handler_register((DeviceState *)(&s->chn[0]), + &sunmouse_handler); } if (s->chn[1].type == escc_kbd) { s->chn[1].hs = qemu_input_handler_register((DeviceState *)(&s->chn[1]), @@ -1045,7 +1089,7 @@ static void escc_realize(DeviceState *dev, Error **errp) } } -static Property escc_properties[] = { +static const Property escc_properties[] = { DEFINE_PROP_UINT32("frequency", ESCCState, frequency, 0), DEFINE_PROP_UINT32("it_shift", ESCCState, it_shift, 0), DEFINE_PROP_BOOL("bit_swap", ESCCState, bit_swap, false), @@ -1055,14 +1099,13 @@ static Property escc_properties[] = { DEFINE_PROP_CHR("chrB", ESCCState, chn[0].chr), DEFINE_PROP_CHR("chrA", ESCCState, chn[1].chr), DEFINE_PROP_STRING("chnA-sunkbd-layout", ESCCState, chn[1].sunkbd_layout), - DEFINE_PROP_END_OF_LIST(), }; -static void escc_class_init(ObjectClass *klass, void *data) +static void escc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = escc_reset; + device_class_set_legacy_reset(dc, escc_reset); dc->realize = escc_realize; dc->vmsd = &vmstate_escc; device_class_set_props(dc, escc_properties); diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c deleted file mode 100644 index 8d6422dae41..00000000000 --- a/hw/char/etraxfs_ser.c +++ /dev/null @@ -1,267 +0,0 @@ -/* - * QEMU ETRAX System Emulator - * - * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "hw/sysbus.h" -#include "chardev/char-fe.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qom/object.h" - -#define D(x) - -#define RW_TR_CTRL (0x00 / 4) -#define RW_TR_DMA_EN (0x04 / 4) -#define RW_REC_CTRL (0x08 / 4) -#define RW_DOUT (0x1c / 4) -#define RS_STAT_DIN (0x20 / 4) -#define R_STAT_DIN (0x24 / 4) -#define RW_INTR_MASK (0x2c / 4) -#define RW_ACK_INTR (0x30 / 4) -#define R_INTR (0x34 / 4) -#define R_MASKED_INTR (0x38 / 4) -#define R_MAX (0x3c / 4) - -#define STAT_DAV 16 -#define STAT_TR_IDLE 22 -#define STAT_TR_RDY 24 - -#define TYPE_ETRAX_FS_SERIAL "etraxfs-serial" -typedef struct ETRAXSerial ETRAXSerial; -DECLARE_INSTANCE_CHECKER(ETRAXSerial, ETRAX_SERIAL, - TYPE_ETRAX_FS_SERIAL) - -struct ETRAXSerial { - SysBusDevice parent_obj; - - MemoryRegion mmio; - CharBackend chr; - qemu_irq irq; - - int pending_tx; - - uint8_t rx_fifo[16]; - unsigned int rx_fifo_pos; - unsigned int rx_fifo_len; - - /* Control registers. */ - uint32_t regs[R_MAX]; -}; - -static void ser_update_irq(ETRAXSerial *s) -{ - - if (s->rx_fifo_len) { - s->regs[R_INTR] |= 8; - } else { - s->regs[R_INTR] &= ~8; - } - - s->regs[R_MASKED_INTR] = s->regs[R_INTR] & s->regs[RW_INTR_MASK]; - qemu_set_irq(s->irq, !!s->regs[R_MASKED_INTR]); -} - -static uint64_t -ser_read(void *opaque, hwaddr addr, unsigned int size) -{ - ETRAXSerial *s = opaque; - uint32_t r = 0; - - addr >>= 2; - switch (addr) - { - case R_STAT_DIN: - r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; - if (s->rx_fifo_len) { - r |= 1 << STAT_DAV; - } - r |= 1 << STAT_TR_RDY; - r |= 1 << STAT_TR_IDLE; - break; - case RS_STAT_DIN: - r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; - if (s->rx_fifo_len) { - r |= 1 << STAT_DAV; - s->rx_fifo_len--; - } - r |= 1 << STAT_TR_RDY; - r |= 1 << STAT_TR_IDLE; - break; - default: - r = s->regs[addr]; - D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, r)); - break; - } - return r; -} - -static void -ser_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) -{ - ETRAXSerial *s = opaque; - uint32_t value = val64; - unsigned char ch = val64; - - D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr, value)); - addr >>= 2; - switch (addr) - { - case RW_DOUT: - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ - qemu_chr_fe_write_all(&s->chr, &ch, 1); - s->regs[R_INTR] |= 3; - s->pending_tx = 1; - s->regs[addr] = value; - break; - case RW_ACK_INTR: - if (s->pending_tx) { - value &= ~1; - s->pending_tx = 0; - D(qemu_log("fixedup value=%x r_intr=%x\n", - value, s->regs[R_INTR])); - } - s->regs[addr] = value; - s->regs[R_INTR] &= ~value; - D(printf("r_intr=%x\n", s->regs[R_INTR])); - break; - default: - s->regs[addr] = value; - break; - } - ser_update_irq(s); -} - -static const MemoryRegionOps ser_ops = { - .read = ser_read, - .write = ser_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static Property etraxfs_ser_properties[] = { - DEFINE_PROP_CHR("chardev", ETRAXSerial, chr), - DEFINE_PROP_END_OF_LIST(), -}; - -static void serial_receive(void *opaque, const uint8_t *buf, int size) -{ - ETRAXSerial *s = opaque; - int i; - - /* Got a byte. */ - if (s->rx_fifo_len >= 16) { - D(qemu_log("WARNING: UART dropped char.\n")); - return; - } - - for (i = 0; i < size; i++) { - s->rx_fifo[s->rx_fifo_pos] = buf[i]; - s->rx_fifo_pos++; - s->rx_fifo_pos &= 15; - s->rx_fifo_len++; - } - - ser_update_irq(s); -} - -static int serial_can_receive(void *opaque) -{ - ETRAXSerial *s = opaque; - - /* Is the receiver enabled? */ - if (!(s->regs[RW_REC_CTRL] & (1 << 3))) { - return 0; - } - - return sizeof(s->rx_fifo) - s->rx_fifo_len; -} - -static void serial_event(void *opaque, QEMUChrEvent event) -{ - -} - -static void etraxfs_ser_reset(DeviceState *d) -{ - ETRAXSerial *s = ETRAX_SERIAL(d); - - /* transmitter begins ready and idle. */ - s->regs[RS_STAT_DIN] |= (1 << STAT_TR_RDY); - s->regs[RS_STAT_DIN] |= (1 << STAT_TR_IDLE); - - s->regs[RW_REC_CTRL] = 0x10000; - -} - -static void etraxfs_ser_init(Object *obj) -{ - ETRAXSerial *s = ETRAX_SERIAL(obj); - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - - sysbus_init_irq(dev, &s->irq); - memory_region_init_io(&s->mmio, obj, &ser_ops, s, - "etraxfs-serial", R_MAX * 4); - sysbus_init_mmio(dev, &s->mmio); -} - -static void etraxfs_ser_realize(DeviceState *dev, Error **errp) -{ - ETRAXSerial *s = ETRAX_SERIAL(dev); - - qemu_chr_fe_set_handlers(&s->chr, - serial_can_receive, serial_receive, - serial_event, NULL, s, NULL, true); -} - -static void etraxfs_ser_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = etraxfs_ser_reset; - device_class_set_props(dc, etraxfs_ser_properties); - dc->realize = etraxfs_ser_realize; -} - -static const TypeInfo etraxfs_ser_info = { - .name = TYPE_ETRAX_FS_SERIAL, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(ETRAXSerial), - .instance_init = etraxfs_ser_init, - .class_init = etraxfs_ser_class_init, -}; - -static void etraxfs_serial_register_types(void) -{ - type_register_static(&etraxfs_ser_info); -} - -type_init(etraxfs_serial_register_types) diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c index 8cdd42e54fd..6521b4cedd9 100644 --- a/hw/char/exynos4210_uart.c +++ b/hw/char/exynos4210_uart.c @@ -704,20 +704,19 @@ static void exynos4210_uart_realize(DeviceState *dev, Error **errp) NULL, s, NULL, true); } -static Property exynos4210_uart_properties[] = { +static const Property exynos4210_uart_properties[] = { DEFINE_PROP_CHR("chardev", Exynos4210UartState, chr), DEFINE_PROP_UINT32("channel", Exynos4210UartState, channel, 0), DEFINE_PROP_UINT32("rx-size", Exynos4210UartState, rx.size, 16), DEFINE_PROP_UINT32("tx-size", Exynos4210UartState, tx.size, 16), - DEFINE_PROP_END_OF_LIST(), }; -static void exynos4210_uart_class_init(ObjectClass *klass, void *data) +static void exynos4210_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = exynos4210_uart_realize; - dc->reset = exynos4210_uart_reset; + device_class_set_legacy_reset(dc, exynos4210_uart_reset); device_class_set_props(dc, exynos4210_uart_properties); dc->vmsd = &vmstate_exynos4210_uart; } diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index c2e1f6537f7..a37408adae5 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -15,8 +15,8 @@ #include "chardev/char-fe.h" #include "qemu/log.h" #include "trace.h" -#include "exec/address-spaces.h" -#include "sysemu/dma.h" +#include "system/address-spaces.h" +#include "system/dma.h" #include "hw/char/goldfish_tty.h" #define GOLDFISH_TTY_VERSION 1 @@ -241,9 +241,8 @@ static const VMStateDescription vmstate_goldfish_tty = { } }; -static Property goldfish_tty_properties[] = { +static const Property goldfish_tty_properties[] = { DEFINE_PROP_CHR("chardev", GoldfishTTYState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void goldfish_tty_instance_init(Object *obj) @@ -257,12 +256,12 @@ static void goldfish_tty_instance_init(Object *obj) sysbus_init_irq(dev, &s->irq); } -static void goldfish_tty_class_init(ObjectClass *oc, void *data) +static void goldfish_tty_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); device_class_set_props(dc, goldfish_tty_properties); - dc->reset = goldfish_tty_reset; + device_class_set_legacy_reset(dc, goldfish_tty_reset); dc->realize = goldfish_tty_realize; dc->unrealize = goldfish_tty_unrealize; dc->vmsd = &vmstate_goldfish_tty; diff --git a/hw/char/grlib_apbuart.c b/hw/char/grlib_apbuart.c index 515b65bc070..81c26e33899 100644 --- a/hw/char/grlib_apbuart.c +++ b/hw/char/grlib_apbuart.c @@ -277,17 +277,16 @@ static void grlib_apbuart_reset(DeviceState *d) uart->current = 0; } -static Property grlib_apbuart_properties[] = { +static const Property grlib_apbuart_properties[] = { DEFINE_PROP_CHR("chrdev", UART, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void grlib_apbuart_class_init(ObjectClass *klass, void *data) +static void grlib_apbuart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = grlib_apbuart_realize; - dc->reset = grlib_apbuart_reset; + device_class_set_legacy_reset(dc, grlib_apbuart_reset); device_class_set_props(dc, grlib_apbuart_properties); } diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c index 63aae6dc2c7..d6f0d18c777 100644 --- a/hw/char/ibex_uart.c +++ b/hw/char/ibex_uart.c @@ -508,9 +508,8 @@ static const VMStateDescription vmstate_ibex_uart = { } }; -static Property ibex_uart_properties[] = { +static const Property ibex_uart_properties[] = { DEFINE_PROP_CHR("chardev", IbexUartState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void ibex_uart_init(Object *obj) @@ -543,11 +542,11 @@ static void ibex_uart_realize(DeviceState *dev, Error **errp) s, NULL, true); } -static void ibex_uart_class_init(ObjectClass *klass, void *data) +static void ibex_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = ibex_uart_reset; + device_class_set_legacy_reset(dc, ibex_uart_reset); dc->realize = ibex_uart_realize; dc->vmsd = &vmstate_ibex_uart; device_class_set_props(dc, ibex_uart_properties); diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index ba37be6faab..509b0141d05 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -27,6 +27,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qemu/fifo32.h" +#include "trace.h" #ifndef DEBUG_IMX_UART #define DEBUG_IMX_UART 0 @@ -159,6 +160,7 @@ static void imx_serial_reset(IMXSerialState *s) s->ucr3 = 0x700; s->ubmr = 0; s->ubrc = 4; + s->ufcr = BIT(11) | BIT(0); fifo32_reset(&s->rx_fifo); timer_del(&s->ageing_timer); @@ -184,10 +186,10 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset, unsigned size) { IMXSerialState *s = (IMXSerialState *)opaque; + Chardev *chr = qemu_chr_fe_get_driver(&s->chr); uint32_t c, rx_used; uint8_t rxtl = s->ufcr & TL_MASK; - - DPRINTF("read(offset=0x%" HWADDR_PRIx ")\n", offset); + uint64_t value; switch (offset >> 2) { case 0x0: /* URXD */ @@ -208,49 +210,67 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset, imx_serial_rx_fifo_ageing_timer_restart(s); qemu_chr_fe_accept_input(&s->chr); } - return c; + value = c; + break; case 0x20: /* UCR1 */ - return s->ucr1; + value = s->ucr1; + break; case 0x21: /* UCR2 */ - return s->ucr2; + value = s->ucr2; + break; case 0x25: /* USR1 */ - return s->usr1; + value = s->usr1; + break; case 0x26: /* USR2 */ - return s->usr2; + value = s->usr2; + break; case 0x2A: /* BRM Modulator */ - return s->ubmr; + value = s->ubmr; + break; case 0x2B: /* Baud Rate Count */ - return s->ubrc; + value = s->ubrc; + break; case 0x2d: /* Test register */ - return s->uts1; + value = s->uts1; + break; case 0x24: /* UFCR */ - return s->ufcr; + value = s->ufcr; + break; case 0x2c: - return s->onems; + value = s->onems; + break; case 0x22: /* UCR3 */ - return s->ucr3; + value = s->ucr3; + break; case 0x23: /* UCR4 */ - return s->ucr4; + value = s->ucr4; + break; case 0x29: /* BRM Incremental */ - return 0x0; /* TODO */ + value = 0x0; /* TODO */ + break; default: qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" HWADDR_PRIx "\n", TYPE_IMX_SERIAL, __func__, offset); - return 0; + value = 0; + break; } + + trace_imx_serial_read(chr ? chr->label : "NODEV", offset, value); + + return value; } static void imx_serial_write(void *opaque, hwaddr offset, @@ -260,8 +280,7 @@ static void imx_serial_write(void *opaque, hwaddr offset, Chardev *chr = qemu_chr_fe_get_driver(&s->chr); unsigned char ch; - DPRINTF("write(offset=0x%" HWADDR_PRIx ", value = 0x%x) to %s\n", - offset, (unsigned int)value, chr ? chr->label : "NODEV"); + trace_imx_serial_write(chr ? chr->label : "NODEV", offset, value); switch (offset >> 2) { case 0x10: /* UTXD */ @@ -367,27 +386,30 @@ static void imx_serial_write(void *opaque, hwaddr offset, static int imx_can_receive(void *opaque) { IMXSerialState *s = (IMXSerialState *)opaque; - return s->ucr2 & UCR2_RXEN && fifo32_num_used(&s->rx_fifo) < FIFO_SIZE; + + return s->ucr2 & UCR2_RXEN ? fifo32_num_free(&s->rx_fifo) : 0; } static void imx_put_data(void *opaque, uint32_t value) { IMXSerialState *s = (IMXSerialState *)opaque; + Chardev *chr = qemu_chr_fe_get_driver(&s->chr); uint8_t rxtl = s->ufcr & TL_MASK; - DPRINTF("received char\n"); + trace_imx_serial_put_data(chr ? chr->label : "NODEV", value); + imx_serial_rx_fifo_push(s, value); if (fifo32_num_used(&s->rx_fifo) >= rxtl) { s->usr1 |= USR1_RRDY; } - - imx_serial_rx_fifo_ageing_timer_restart(s); - s->usr2 |= USR2_RDR; s->uts1 &= ~UTS1_RXEMPTY; if (value & URXD_BRK) { s->usr2 |= USR2_BRCD; } + + imx_serial_rx_fifo_ageing_timer_restart(s); + imx_update(s); } @@ -396,7 +418,10 @@ static void imx_receive(void *opaque, const uint8_t *buf, int size) IMXSerialState *s = (IMXSerialState *)opaque; s->usr2 |= USR2_WAKE; - imx_put_data(opaque, *buf); + + for (int i = 0; i < size; i++) { + imx_put_data(opaque, buf[i]); + } } static void imx_event(void *opaque, QEMUChrEvent event) @@ -438,18 +463,17 @@ static void imx_serial_init(Object *obj) sysbus_init_irq(sbd, &s->irq); } -static Property imx_serial_properties[] = { +static const Property imx_serial_properties[] = { DEFINE_PROP_CHR("chardev", IMXSerialState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void imx_serial_class_init(ObjectClass *klass, void *data) +static void imx_serial_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_serial_realize; dc->vmsd = &vmstate_imx_serial; - dc->reset = imx_serial_reset_at_boot; + device_class_set_legacy_reset(dc, imx_serial_reset_at_boot); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); dc->desc = "i.MX series UART"; device_class_set_props(dc, imx_serial_properties); diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c index 64be5226d4b..752c6c818ab 100644 --- a/hw/char/ipoctal232.c +++ b/hw/char/ipoctal232.c @@ -184,9 +184,9 @@ static void update_irq(IPOctalState *dev, unsigned block) unsigned intno = block / 2; if ((blk0->isr & blk0->imr) || (blk1->isr & blk1->imr)) { - qemu_irq_raise(idev->irq[intno]); + qemu_irq_raise(&idev->irq[intno]); } else { - qemu_irq_lower(idev->irq[intno]); + qemu_irq_lower(&idev->irq[intno]); } } @@ -558,7 +558,7 @@ static void ipoctal_realize(DeviceState *dev, Error **errp) } } -static Property ipoctal_properties[] = { +static const Property ipoctal_properties[] = { DEFINE_PROP_CHR("chardev0", IPOctalState, ch[0].dev), DEFINE_PROP_CHR("chardev1", IPOctalState, ch[1].dev), DEFINE_PROP_CHR("chardev2", IPOctalState, ch[2].dev), @@ -567,10 +567,9 @@ static Property ipoctal_properties[] = { DEFINE_PROP_CHR("chardev5", IPOctalState, ch[5].dev), DEFINE_PROP_CHR("chardev6", IPOctalState, ch[6].dev), DEFINE_PROP_CHR("chardev7", IPOctalState, ch[7].dev), - DEFINE_PROP_END_OF_LIST(), }; -static void ipoctal_class_init(ObjectClass *klass, void *data) +static void ipoctal_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IPackDeviceClass *ic = IPACK_DEVICE_CLASS(klass); diff --git a/hw/char/max78000_uart.c b/hw/char/max78000_uart.c new file mode 100644 index 00000000000..19506d52ef9 --- /dev/null +++ b/hw/char/max78000_uart.c @@ -0,0 +1,285 @@ +/* + * MAX78000 UART + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/char/max78000_uart.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "migration/vmstate.h" +#include "trace.h" + + +static int max78000_uart_can_receive(void *opaque) +{ + Max78000UartState *s = opaque; + if (!(s->ctrl & UART_BCLKEN)) { + return 0; + } + return fifo8_num_free(&s->rx_fifo); +} + +static void max78000_update_irq(Max78000UartState *s) +{ + int interrupt_level; + + interrupt_level = s->int_fl & s->int_en; + qemu_set_irq(s->irq, interrupt_level); +} + +static void max78000_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + Max78000UartState *s = opaque; + + assert(size <= fifo8_num_free(&s->rx_fifo)); + + fifo8_push_all(&s->rx_fifo, buf, size); + + uint32_t rx_threshold = s->ctrl & 0xf; + + if (fifo8_num_used(&s->rx_fifo) >= rx_threshold) { + s->int_fl |= UART_RX_THD; + } + + max78000_update_irq(s); +} + +static void max78000_uart_reset_hold(Object *obj, ResetType type) +{ + Max78000UartState *s = MAX78000_UART(obj); + + s->ctrl = 0; + s->status = UART_TX_EM | UART_RX_EM; + s->int_en = 0; + s->int_fl = 0; + s->osr = 0; + s->txpeek = 0; + s->pnr = UART_RTS; + s->fifo = 0; + s->dma = 0; + s->wken = 0; + s->wkfl = 0; + fifo8_reset(&s->rx_fifo); +} + +static uint64_t max78000_uart_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Max78000UartState *s = opaque; + uint64_t retvalue = 0; + switch (addr) { + case UART_CTRL: + retvalue = s->ctrl; + break; + case UART_STATUS: + retvalue = (fifo8_num_used(&s->rx_fifo) << UART_RX_LVL) | + UART_TX_EM | + (fifo8_is_empty(&s->rx_fifo) ? UART_RX_EM : 0); + break; + case UART_INT_EN: + retvalue = s->int_en; + break; + case UART_INT_FL: + retvalue = s->int_fl; + break; + case UART_CLKDIV: + retvalue = s->clkdiv; + break; + case UART_OSR: + retvalue = s->osr; + break; + case UART_TXPEEK: + if (!fifo8_is_empty(&s->rx_fifo)) { + retvalue = fifo8_peek(&s->rx_fifo); + } + break; + case UART_PNR: + retvalue = s->pnr; + break; + case UART_FIFO: + if (!fifo8_is_empty(&s->rx_fifo)) { + retvalue = fifo8_pop(&s->rx_fifo); + max78000_update_irq(s); + } + break; + case UART_DMA: + /* DMA not implemented */ + retvalue = s->dma; + break; + case UART_WKEN: + retvalue = s->wken; + break; + case UART_WKFL: + retvalue = s->wkfl; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + break; + } + + return retvalue; +} + +static void max78000_uart_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Max78000UartState *s = opaque; + + uint32_t value = val64; + uint8_t data; + + switch (addr) { + case UART_CTRL: + if (value & UART_FLUSH_RX) { + fifo8_reset(&s->rx_fifo); + } + if (value & UART_BCLKEN) { + value = value | UART_BCLKRDY; + } + s->ctrl = value & ~(UART_FLUSH_RX | UART_FLUSH_TX); + + /* + * Software can manage UART flow control manually by setting hfc_en + * in UART_CTRL. This would require emulating uart at a lower level, + * and is currently unimplemented. + */ + + return; + case UART_STATUS: + /* UART_STATUS is read only */ + return; + case UART_INT_EN: + s->int_en = value; + return; + case UART_INT_FL: + s->int_fl = s->int_fl & ~(value); + max78000_update_irq(s); + return; + case UART_CLKDIV: + s->clkdiv = value; + return; + case UART_OSR: + s->osr = value; + return; + case UART_PNR: + s->pnr = value; + return; + case UART_FIFO: + data = value & 0xff; + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &data, 1); + + /* TX is always empty */ + s->int_fl |= UART_TX_HE; + max78000_update_irq(s); + + return; + case UART_DMA: + /* DMA not implemented */ + s->dma = value; + return; + case UART_WKEN: + s->wken = value; + return; + case UART_WKFL: + s->wkfl = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps max78000_uart_ops = { + .read = max78000_uart_read, + .write = max78000_uart_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const Property max78000_uart_properties[] = { + DEFINE_PROP_CHR("chardev", Max78000UartState, chr), +}; + +static const VMStateDescription max78000_uart_vmstate = { + .name = TYPE_MAX78000_UART, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctrl, Max78000UartState), + VMSTATE_UINT32(status, Max78000UartState), + VMSTATE_UINT32(int_en, Max78000UartState), + VMSTATE_UINT32(int_fl, Max78000UartState), + VMSTATE_UINT32(clkdiv, Max78000UartState), + VMSTATE_UINT32(osr, Max78000UartState), + VMSTATE_UINT32(txpeek, Max78000UartState), + VMSTATE_UINT32(pnr, Max78000UartState), + VMSTATE_UINT32(fifo, Max78000UartState), + VMSTATE_UINT32(dma, Max78000UartState), + VMSTATE_UINT32(wken, Max78000UartState), + VMSTATE_UINT32(wkfl, Max78000UartState), + VMSTATE_FIFO8(rx_fifo, Max78000UartState), + VMSTATE_END_OF_LIST() + } +}; + +static void max78000_uart_init(Object *obj) +{ + Max78000UartState *s = MAX78000_UART(obj); + fifo8_create(&s->rx_fifo, 8); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &max78000_uart_ops, s, + TYPE_MAX78000_UART, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void max78000_uart_realize(DeviceState *dev, Error **errp) +{ + Max78000UartState *s = MAX78000_UART(dev); + + qemu_chr_fe_set_handlers(&s->chr, max78000_uart_can_receive, + max78000_uart_receive, NULL, NULL, + s, NULL, true); +} + +static void max78000_uart_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = max78000_uart_reset_hold; + + device_class_set_props(dc, max78000_uart_properties); + dc->realize = max78000_uart_realize; + + dc->vmsd = &max78000_uart_vmstate; +} + +static const TypeInfo max78000_uart_info = { + .name = TYPE_MAX78000_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Max78000UartState), + .instance_init = max78000_uart_init, + .class_init = max78000_uart_class_init, +}; + +static void max78000_uart_register_types(void) +{ + type_register_static(&max78000_uart_info); +} + +type_init(max78000_uart_register_types) diff --git a/hw/char/mcf_uart.c b/hw/char/mcf_uart.c index f9cbc9bdc42..87bfcbebdc5 100644 --- a/hw/char/mcf_uart.c +++ b/hw/char/mcf_uart.c @@ -17,6 +17,8 @@ #include "chardev/char-fe.h" #include "qom/object.h" +#define FIFO_DEPTH 4 + struct mcf_uart_state { SysBusDevice parent_obj; @@ -27,7 +29,7 @@ struct mcf_uart_state { uint8_t imr; uint8_t bg1; uint8_t bg2; - uint8_t fifo[4]; + uint8_t fifo[FIFO_DEPTH]; uint8_t tb; int current_mr; int fifo_len; @@ -247,14 +249,16 @@ static void mcf_uart_reset(DeviceState *dev) static void mcf_uart_push_byte(mcf_uart_state *s, uint8_t data) { /* Break events overwrite the last byte if the fifo is full. */ - if (s->fifo_len == 4) + if (s->fifo_len == FIFO_DEPTH) { s->fifo_len--; + } s->fifo[s->fifo_len] = data; s->fifo_len++; s->sr |= MCF_UART_RxRDY; - if (s->fifo_len == 4) + if (s->fifo_len == FIFO_DEPTH) { s->sr |= MCF_UART_FFULL; + } mcf_uart_update(s); } @@ -277,14 +281,16 @@ static int mcf_uart_can_receive(void *opaque) { mcf_uart_state *s = (mcf_uart_state *)opaque; - return s->rx_enabled && (s->sr & MCF_UART_FFULL) == 0; + return s->rx_enabled ? FIFO_DEPTH - s->fifo_len : 0; } static void mcf_uart_receive(void *opaque, const uint8_t *buf, int size) { mcf_uart_state *s = (mcf_uart_state *)opaque; - mcf_uart_push_byte(s, buf[0]); + for (int i = 0; i < size; i++) { + mcf_uart_push_byte(s, buf[i]); + } } static const MemoryRegionOps mcf_uart_ops = { @@ -312,17 +318,16 @@ static void mcf_uart_realize(DeviceState *dev, Error **errp) mcf_uart_event, NULL, s, NULL, true); } -static Property mcf_uart_properties[] = { +static const Property mcf_uart_properties[] = { DEFINE_PROP_CHR("chardev", mcf_uart_state, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void mcf_uart_class_init(ObjectClass *oc, void *data) +static void mcf_uart_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = mcf_uart_realize; - dc->reset = mcf_uart_reset; + device_class_set_legacy_reset(dc, mcf_uart_reset); device_class_set_props(dc, mcf_uart_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c index e7908bbfb5d..6149f9d2047 100644 --- a/hw/char/mchp_pfsoc_mmuart.c +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -121,12 +121,12 @@ static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { } }; -static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, void *data) +static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = mchp_pfsoc_mmuart_realize; - dc->reset = mchp_pfsoc_mmuart_reset; + device_class_set_legacy_reset(dc, mchp_pfsoc_mmuart_reset); dc->vmsd = &mchp_pfsoc_mmuart_vmstate; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } diff --git a/hw/char/meson.build b/hw/char/meson.build index e5b13b69580..a9e1dc26c0f 100644 --- a/hw/char/meson.build +++ b/hw/char/meson.build @@ -1,30 +1,32 @@ system_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_uart.c')) system_ss.add(when: 'CONFIG_CMSDK_APB_UART', if_true: files('cmsdk-apb-uart.c')) system_ss.add(when: 'CONFIG_ESCC', if_true: files('escc.c')) -system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_ser.c')) system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_apbuart.c')) system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_uart.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_serial.c')) -system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipoctal232.c')) +system_ss.add(when: 'CONFIG_IP_OCTAL_232', if_true: files('ipoctal232.c')) system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('parallel-isa.c')) system_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugcon.c')) system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_uart.c')) system_ss.add(when: 'CONFIG_PARALLEL', if_true: files('parallel.c')) -system_ss.add(when: 'CONFIG_PL011', if_true: files('pl011.c')) +system_ss.add(when: 'CONFIG_PL011_C', if_true: files('pl011.c')) system_ss.add(when: 'CONFIG_SCLPCONSOLE', if_true: files('sclpconsole.c', 'sclpconsole-lm.c')) system_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c')) system_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c')) +system_ss.add(when: 'CONFIG_SERIAL_MM', if_true: files('serial-mm.c')) system_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) system_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) system_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) system_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen_console.c')) system_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) +system_ss.add(when: 'CONFIG_DIVA_GSP', if_true: files('diva-gsp.c')) system_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c')) system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c')) system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c')) +system_ss.add(when: 'CONFIG_MAX78000_UART', if_true: files('max78000_uart.c')) system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c')) system_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c')) diff --git a/hw/char/nrf51_uart.c b/hw/char/nrf51_uart.c index c2cd6bb5e71..41d423446f3 100644 --- a/hw/char/nrf51_uart.c +++ b/hw/char/nrf51_uart.c @@ -304,16 +304,15 @@ static const VMStateDescription nrf51_uart_vmstate = { } }; -static Property nrf51_uart_properties[] = { +static const Property nrf51_uart_properties[] = { DEFINE_PROP_CHR("chardev", NRF51UARTState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void nrf51_uart_class_init(ObjectClass *klass, void *data) +static void nrf51_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = nrf51_uart_reset; + device_class_set_legacy_reset(dc, nrf51_uart_reset); dc->realize = nrf51_uart_realize; device_class_set_props(dc, nrf51_uart_properties); dc->vmsd = &nrf51_uart_vmstate; diff --git a/hw/char/omap_uart.c b/hw/char/omap_uart.c index c2ef4c137e1..8cbf6ce803e 100644 --- a/hw/char/omap_uart.c +++ b/hw/char/omap_uart.c @@ -20,15 +20,14 @@ #include "qemu/osdep.h" #include "chardev/char.h" #include "hw/arm/omap.h" -#include "hw/char/serial.h" -#include "exec/address-spaces.h" +#include "hw/char/serial-mm.h" +#include "system/address-spaces.h" /* UARTs */ struct omap_uart_s { MemoryRegion iomem; hwaddr base; SerialMM *serial; /* TODO */ - struct omap_target_agent_s *ta; omap_clk fclk; qemu_irq irq; @@ -36,8 +35,6 @@ struct omap_uart_s { uint8_t syscontrol; uint8_t wkup; uint8_t cfps; - uint8_t mdr[2]; - uint8_t scr; uint8_t clksel; }; @@ -66,113 +63,3 @@ struct omap_uart_s *omap_uart_init(hwaddr base, DEVICE_NATIVE_ENDIAN); return s; } - -static uint64_t omap_uart_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_uart_s *s = opaque; - - if (size == 4) { - return omap_badwidth_read8(opaque, addr); - } - - switch (addr) { - case 0x20: /* MDR1 */ - return s->mdr[0]; - case 0x24: /* MDR2 */ - return s->mdr[1]; - case 0x40: /* SCR */ - return s->scr; - case 0x44: /* SSR */ - return 0x0; - case 0x48: /* EBLR (OMAP2) */ - return s->eblr; - case 0x4C: /* OSC_12M_SEL (OMAP1) */ - return s->clksel; - case 0x50: /* MVR */ - return 0x30; - case 0x54: /* SYSC (OMAP2) */ - return s->syscontrol; - case 0x58: /* SYSS (OMAP2) */ - return 1; - case 0x5c: /* WER (OMAP2) */ - return s->wkup; - case 0x60: /* CFPS (OMAP2) */ - return s->cfps; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_uart_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_uart_s *s = opaque; - - if (size == 4) { - omap_badwidth_write8(opaque, addr, value); - return; - } - - switch (addr) { - case 0x20: /* MDR1 */ - s->mdr[0] = value & 0x7f; - break; - case 0x24: /* MDR2 */ - s->mdr[1] = value & 0xff; - break; - case 0x40: /* SCR */ - s->scr = value & 0xff; - break; - case 0x48: /* EBLR (OMAP2) */ - s->eblr = value & 0xff; - break; - case 0x4C: /* OSC_12M_SEL (OMAP1) */ - s->clksel = value & 1; - break; - case 0x44: /* SSR */ - case 0x50: /* MVR */ - case 0x58: /* SYSS (OMAP2) */ - OMAP_RO_REG(addr); - break; - case 0x54: /* SYSC (OMAP2) */ - s->syscontrol = value & 0x1d; - if (value & 2) { - omap_uart_reset(s); - } - break; - case 0x5c: /* WER (OMAP2) */ - s->wkup = value & 0x7f; - break; - case 0x60: /* CFPS (OMAP2) */ - s->cfps = value & 0xff; - break; - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_uart_ops = { - .read = omap_uart_read, - .write = omap_uart_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem, - struct omap_target_agent_s *ta, - qemu_irq irq, omap_clk fclk, omap_clk iclk, - qemu_irq txdma, qemu_irq rxdma, - const char *label, Chardev *chr) -{ - hwaddr base = omap_l4_attach(ta, 0, NULL); - struct omap_uart_s *s = omap_uart_init(base, irq, - fclk, iclk, txdma, rxdma, label, chr); - - memory_region_init_io(&s->iomem, NULL, &omap_uart_ops, s, "omap.uart", 0x100); - - s->ta = ta; - - memory_region_add_subregion(sysmem, base + 0x20, &s->iomem); - - return s; -} diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c index a5ce6ee13a4..b6dfb6cc31a 100644 --- a/hw/char/parallel-isa.c +++ b/hw/char/parallel-isa.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" #include "hw/char/parallel-isa.h" diff --git a/hw/char/parallel.c b/hw/char/parallel.c index c394635ada2..8732e4e9f96 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -33,8 +33,8 @@ #include "migration/vmstate.h" #include "hw/char/parallel-isa.h" #include "hw/char/parallel.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" +#include "system/reset.h" +#include "system/system.h" #include "trace.h" #include "qom/object.h" @@ -603,15 +603,14 @@ bool parallel_mm_init(MemoryRegion *address_space, return true; } -static Property parallel_isa_properties[] = { +static const Property parallel_isa_properties[] = { DEFINE_PROP_UINT32("index", ISAParallelState, index, -1), DEFINE_PROP_UINT32("iobase", ISAParallelState, iobase, -1), DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7), DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr), - DEFINE_PROP_END_OF_LIST(), }; -static void parallel_isa_class_initfn(ObjectClass *klass, void *data) +static void parallel_isa_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); @@ -628,7 +627,7 @@ static const TypeInfo parallel_isa_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(ISAParallelState), .class_init = parallel_isa_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/char/pl011.c b/hw/char/pl011.c index f8078aa216d..01335d9437d 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -85,13 +85,16 @@ DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr) #define CR_OUT1 (1 << 12) #define CR_RTS (1 << 11) #define CR_DTR (1 << 10) +#define CR_RXE (1 << 9) +#define CR_TXE (1 << 8) #define CR_LBE (1 << 7) +#define CR_UARTEN (1 << 0) /* Integer Baud Rate Divider, UARTIBRD */ -#define IBRD_MASK 0x3f +#define IBRD_MASK 0xffff /* Fractional Baud Rate Divider, UARTFBRD */ -#define FBRD_MASK 0xffff +#define FBRD_MASK 0x3f static const unsigned char pl011_id_arm[8] = { 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; @@ -138,6 +141,11 @@ static void pl011_update(PL011State *s) } } +static bool pl011_loopback_enabled(PL011State *s) +{ + return !!(s->cr & CR_LBE); +} + static bool pl011_is_fifo_enabled(PL011State *s) { return (s->lcr & LCR_FEN) != 0; @@ -149,41 +157,127 @@ static inline unsigned pl011_get_fifo_depth(PL011State *s) return pl011_is_fifo_enabled(s) ? PL011_FIFO_DEPTH : 1; } -static inline void pl011_reset_fifo(PL011State *s) +static inline void pl011_reset_rx_fifo(PL011State *s) { s->read_count = 0; s->read_pos = 0; /* Reset FIFO flags */ - s->flags &= ~(PL011_FLAG_RXFF | PL011_FLAG_TXFF); - s->flags |= PL011_FLAG_RXFE | PL011_FLAG_TXFE; + s->flags &= ~PL011_FLAG_RXFF; + s->flags |= PL011_FLAG_RXFE; +} + +static inline void pl011_reset_tx_fifo(PL011State *s) +{ + /* Reset FIFO flags */ + s->flags &= ~PL011_FLAG_TXFF; + s->flags |= PL011_FLAG_TXFE; +} + +static void pl011_fifo_rx_put(void *opaque, uint32_t value) +{ + PL011State *s = (PL011State *)opaque; + int slot; + unsigned pipe_depth; + + pipe_depth = pl011_get_fifo_depth(s); + slot = (s->read_pos + s->read_count) & (pipe_depth - 1); + s->read_fifo[slot] = value; + s->read_count++; + s->flags &= ~PL011_FLAG_RXFE; + trace_pl011_fifo_rx_put(value, s->read_count, pipe_depth); + if (s->read_count == pipe_depth) { + trace_pl011_fifo_rx_full(); + s->flags |= PL011_FLAG_RXFF; + } + if (s->read_count == s->read_trigger) { + s->int_level |= INT_RX; + pl011_update(s); + } +} + +static void pl011_loopback_tx(PL011State *s, uint32_t value) +{ + if (!pl011_loopback_enabled(s)) { + return; + } + + /* + * Caveat: + * + * In real hardware, TX loopback happens at the serial-bit level + * and then reassembled by the RX logics back into bytes and placed + * into the RX fifo. That is, loopback happens after TX fifo. + * + * Because the real hardware TX fifo is time-drained at the frame + * rate governed by the configured serial format, some loopback + * bytes in TX fifo may still be able to get into the RX fifo + * that could be full at times while being drained at software + * pace. + * + * In such scenario, the RX draining pace is the major factor + * deciding which loopback bytes get into the RX fifo, unless + * hardware flow-control is enabled. + * + * For simplicity, the above described is not emulated. + */ + pl011_fifo_rx_put(s, value); +} + +static void pl011_write_txdata(PL011State *s, uint8_t data) +{ + if (!(s->cr & CR_UARTEN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "PL011 data written to disabled UART\n"); + } + if (!(s->cr & CR_TXE)) { + qemu_log_mask(LOG_GUEST_ERROR, + "PL011 data written to disabled TX UART\n"); + } + + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &data, 1); + pl011_loopback_tx(s, data); + s->int_level |= INT_TX; + pl011_update(s); +} + +static uint32_t pl011_read_rxdata(PL011State *s) +{ + uint32_t c; + unsigned fifo_depth = pl011_get_fifo_depth(s); + + s->flags &= ~PL011_FLAG_RXFF; + c = s->read_fifo[s->read_pos]; + if (s->read_count > 0) { + s->read_count--; + s->read_pos = (s->read_pos + 1) & (fifo_depth - 1); + } + if (s->read_count == 0) { + s->flags |= PL011_FLAG_RXFE; + } + if (s->read_count == s->read_trigger - 1) { + s->int_level &= ~INT_RX; + } + trace_pl011_read_fifo(s->read_count, fifo_depth); + s->rsr = c >> 8; + pl011_update(s); + qemu_chr_fe_accept_input(&s->chr); + return c; } static uint64_t pl011_read(void *opaque, hwaddr offset, unsigned size) { PL011State *s = (PL011State *)opaque; - uint32_t c; uint64_t r; switch (offset >> 2) { case 0: /* UARTDR */ - s->flags &= ~PL011_FLAG_RXFF; - c = s->read_fifo[s->read_pos]; - if (s->read_count > 0) { - s->read_count--; - s->read_pos = (s->read_pos + 1) & (pl011_get_fifo_depth(s) - 1); - } - if (s->read_count == 0) { - s->flags |= PL011_FLAG_RXFE; - } - if (s->read_count == s->read_trigger - 1) - s->int_level &= ~ INT_RX; - trace_pl011_read_fifo(s->read_count); - s->rsr = c >> 8; - pl011_update(s); - qemu_chr_fe_accept_input(&s->chr); - r = c; + r = pl011_read_rxdata(s); break; case 1: /* UARTRSR */ r = s->rsr; @@ -268,11 +362,6 @@ static void pl011_trace_baudrate_change(const PL011State *s) s->ibrd, s->fbrd); } -static bool pl011_loopback_enabled(PL011State *s) -{ - return !!(s->cr & CR_LBE); -} - static void pl011_loopback_mdmctrl(PL011State *s) { uint32_t cr, fr, il; @@ -314,36 +403,6 @@ static void pl011_loopback_mdmctrl(PL011State *s) pl011_update(s); } -static void pl011_put_fifo(void *opaque, uint32_t value); - -static void pl011_loopback_tx(PL011State *s, uint32_t value) -{ - if (!pl011_loopback_enabled(s)) { - return; - } - - /* - * Caveat: - * - * In real hardware, TX loopback happens at the serial-bit level - * and then reassembled by the RX logics back into bytes and placed - * into the RX fifo. That is, loopback happens after TX fifo. - * - * Because the real hardware TX fifo is time-drained at the frame - * rate governed by the configured serial format, some loopback - * bytes in TX fifo may still be able to get into the RX fifo - * that could be full at times while being drained at software - * pace. - * - * In such scenario, the RX draining pace is the major factor - * deciding which loopback bytes get into the RX fifo, unless - * hardware flow-control is enabled. - * - * For simplicity, the above described is not emulated. - */ - pl011_put_fifo(s, value); -} - static void pl011_loopback_break(PL011State *s, int brk_enable) { if (brk_enable) { @@ -361,14 +420,8 @@ static void pl011_write(void *opaque, hwaddr offset, switch (offset >> 2) { case 0: /* UARTDR */ - /* ??? Check if transmitter is enabled. */ ch = value; - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ - qemu_chr_fe_write_all(&s->chr, &ch, 1); - pl011_loopback_tx(s, ch); - s->int_level |= INT_TX; - pl011_update(s); + pl011_write_txdata(s, ch); break; case 1: /* UARTRSR/UARTECR */ s->rsr = 0; @@ -390,7 +443,8 @@ static void pl011_write(void *opaque, hwaddr offset, case 11: /* UARTLCR_H */ /* Reset the FIFO state on FIFO enable or disable */ if ((s->lcr ^ value) & LCR_FEN) { - pl011_reset_fifo(s); + pl011_reset_rx_fifo(s); + pl011_reset_tx_fifo(s); } if ((s->lcr ^ value) & LCR_BRK) { int break_enable = value & LCR_BRK; @@ -433,37 +487,26 @@ static void pl011_write(void *opaque, hwaddr offset, static int pl011_can_receive(void *opaque) { PL011State *s = (PL011State *)opaque; - int r; - - r = s->read_count < pl011_get_fifo_depth(s); - trace_pl011_can_receive(s->lcr, s->read_count, r); - return r; -} + unsigned fifo_depth = pl011_get_fifo_depth(s); + unsigned fifo_available = fifo_depth - s->read_count; -static void pl011_put_fifo(void *opaque, uint32_t value) -{ - PL011State *s = (PL011State *)opaque; - int slot; - unsigned pipe_depth; + /* + * In theory we should check the UART and RX enable bits here and + * return 0 if they are not set (so the guest can't receive data + * until you have enabled the UART). In practice we suspect there + * is at least some guest code out there which has been tested only + * on QEMU and which never bothers to enable the UART because we + * historically never enforced that. So we effectively keep the + * UART continuously enabled regardless of the enable bits. + */ - pipe_depth = pl011_get_fifo_depth(s); - slot = (s->read_pos + s->read_count) & (pipe_depth - 1); - s->read_fifo[slot] = value; - s->read_count++; - s->flags &= ~PL011_FLAG_RXFE; - trace_pl011_put_fifo(value, s->read_count); - if (s->read_count == pipe_depth) { - trace_pl011_put_fifo_full(); - s->flags |= PL011_FLAG_RXFF; - } - if (s->read_count == s->read_trigger) { - s->int_level |= INT_RX; - pl011_update(s); - } + trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available); + return fifo_available; } static void pl011_receive(void *opaque, const uint8_t *buf, int size) { + trace_pl011_receive(size); /* * In loopback mode, the RX input signal is internally disconnected * from the entire receiving logics; thus, all inputs are ignored, @@ -473,13 +516,15 @@ static void pl011_receive(void *opaque, const uint8_t *buf, int size) return; } - pl011_put_fifo(opaque, *buf); + for (int i = 0; i < size; i++) { + pl011_fifo_rx_put(opaque, buf[i]); + } } static void pl011_event(void *opaque, QEMUChrEvent event) { if (event == CHR_EVENT_BREAK && !pl011_loopback_enabled(opaque)) { - pl011_put_fifo(opaque, DR_BE); + pl011_fifo_rx_put(opaque, DR_BE); } } @@ -549,7 +594,7 @@ static const VMStateDescription vmstate_pl011 = { .minimum_version_id = 2, .post_load = pl011_post_load, .fields = (const VMStateField[]) { - VMSTATE_UINT32(readbuff, PL011State), + VMSTATE_UNUSED(sizeof(uint32_t)), VMSTATE_UINT32(flags, PL011State), VMSTATE_UINT32(lcr, PL011State), VMSTATE_UINT32(rsr, PL011State), @@ -573,10 +618,9 @@ static const VMStateDescription vmstate_pl011 = { } }; -static Property pl011_properties[] = { +static const Property pl011_properties[] = { DEFINE_PROP_CHR("chardev", PL011State, chr), DEFINE_PROP_BOOL("migrate-clk", PL011State, migrate_clk, true), - DEFINE_PROP_END_OF_LIST(), }; static void pl011_init(Object *obj) @@ -621,15 +665,16 @@ static void pl011_reset(DeviceState *dev) s->ifl = 0x12; s->cr = 0x300; s->flags = 0; - pl011_reset_fifo(s); + pl011_reset_rx_fifo(s); + pl011_reset_tx_fifo(s); } -static void pl011_class_init(ObjectClass *oc, void *data) +static void pl011_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = pl011_realize; - dc->reset = pl011_reset; + device_class_set_legacy_reset(dc, pl011_reset); dc->vmsd = &vmstate_pl011; device_class_set_props(dc, pl011_properties); } diff --git a/hw/char/renesas_sci.c b/hw/char/renesas_sci.c index 5cb733545c4..b9d0ed1c89b 100644 --- a/hw/char/renesas_sci.c +++ b/hw/char/renesas_sci.c @@ -319,19 +319,18 @@ static const VMStateDescription vmstate_rsci = { } }; -static Property rsci_properties[] = { +static const Property rsci_properties[] = { DEFINE_PROP_UINT64("input-freq", RSCIState, input_freq, 0), DEFINE_PROP_CHR("chardev", RSCIState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void rsci_class_init(ObjectClass *klass, void *data) +static void rsci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = rsci_realize; dc->vmsd = &vmstate_rsci; - dc->reset = rsci_reset; + device_class_set_legacy_reset(dc, rsci_reset); device_class_set_props(dc, rsci_properties); } diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c index 9bef60def1b..a78ea9b01c0 100644 --- a/hw/char/riscv_htif.c +++ b/hw/char/riscv_htif.c @@ -24,23 +24,14 @@ #include "qapi/error.h" #include "qemu/log.h" #include "hw/char/riscv_htif.h" -#include "hw/char/serial.h" #include "chardev/char.h" #include "chardev/char-fe.h" #include "qemu/timer.h" #include "qemu/error-report.h" -#include "exec/address-spaces.h" -#include "exec/tswap.h" -#include "sysemu/dma.h" -#include "sysemu/runstate.h" - -#define RISCV_DEBUG_HTIF 0 -#define HTIF_DEBUG(fmt, ...) \ - do { \ - if (RISCV_DEBUG_HTIF) { \ - qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\ - } \ - } while (0) +#include "system/address-spaces.h" +#include "system/dma.h" +#include "system/runstate.h" +#include "trace.h" #define HTIF_DEV_SHIFT 56 #define HTIF_CMD_SHIFT 48 @@ -160,8 +151,7 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written) uint64_t payload = val_written & 0xFFFFFFFFFFFFULL; int resp = 0; - HTIF_DEBUG("mtohost write: device: %d cmd: %d what: %02" PRIx64 - " -payload: %016" PRIx64 "\n", device, cmd, payload & 0xFF, payload); + trace_htif_uart_write_to_host(device, cmd, payload); /* * Currently, there is a fixed mapping of devices: @@ -213,12 +203,16 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written) } else { uint64_t syscall[8]; cpu_physical_memory_read(payload, syscall, sizeof(syscall)); - if (tswap64(syscall[0]) == PK_SYS_WRITE && - tswap64(syscall[1]) == HTIF_DEV_CONSOLE && - tswap64(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) { + if (le64_to_cpu(syscall[0]) == PK_SYS_WRITE && + le64_to_cpu(syscall[1]) == HTIF_DEV_CONSOLE && + le64_to_cpu(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) { uint8_t ch; - cpu_physical_memory_read(tswap64(syscall[2]), &ch, 1); - qemu_chr_fe_write(&s->chr, &ch, 1); + cpu_physical_memory_read(le64_to_cpu(syscall[2]), &ch, 1); + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); resp = 0x100 | (uint8_t)payload; } else { qemu_log_mask(LOG_UNIMP, @@ -237,15 +231,18 @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written) return; } else if (cmd == HTIF_CONSOLE_CMD_PUTC) { uint8_t ch = (uint8_t)payload; - qemu_chr_fe_write(&s->chr, &ch, 1); + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); resp = 0x100 | (uint8_t)payload; } else { qemu_log("HTIF device %d: unknown command\n", device); } } else { qemu_log("HTIF unknown device or command\n"); - HTIF_DEBUG("device: %d cmd: %d what: %02" PRIx64 - " payload: %016" PRIx64, device, cmd, payload & 0xFF, payload); + trace_htif_uart_unknown_device_command(device, cmd, payload); } /* * Latest bbl does not set fromhost to 0 if there is a value in tohost. @@ -317,6 +314,11 @@ static void htif_mm_write(void *opaque, hwaddr addr, static const MemoryRegionOps htif_mm_ops = { .read = htif_mm_read, .write = htif_mm_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, }; HTIFState *htif_mm_init(MemoryRegion *address_space, Chardev *chr, diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c index 7719f438f68..3e40d5e4342 100644 --- a/hw/char/sclpconsole-lm.c +++ b/hw/char/sclpconsole-lm.c @@ -214,7 +214,7 @@ static int process_mdb(SCLPEvent *event, MDBO *mdbo) { int rc; int len; - uint8_t buffer[SIZE_BUFFER]; + QEMU_UNINITIALIZED uint8_t buffer[SIZE_BUFFER]; len = be16_to_cpu(mdbo->length); len -= sizeof(mdbo->length) + sizeof(mdbo->type) @@ -333,20 +333,19 @@ static void console_reset(DeviceState *dev) scon->write_errors = 0; } -static Property console_properties[] = { +static const Property console_properties[] = { DEFINE_PROP_CHR("chardev", SCLPConsoleLM, chr), DEFINE_PROP_UINT32("write_errors", SCLPConsoleLM, write_errors, 0), DEFINE_PROP_BOOL("echo", SCLPConsoleLM, echo, true), - DEFINE_PROP_END_OF_LIST(), }; -static void console_class_init(ObjectClass *klass, void *data) +static void console_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCLPEventClass *ec = SCLP_EVENT_CLASS(klass); device_class_set_props(dc, console_properties); - dc->reset = console_reset; + device_class_set_legacy_reset(dc, console_reset); dc->vmsd = &vmstate_sclplmconsole; ec->init = console_init; ec->get_send_mask = send_mask; diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c index 5d630b04bb9..95e3045178e 100644 --- a/hw/char/sclpconsole.c +++ b/hw/char/sclpconsole.c @@ -251,18 +251,17 @@ static void console_reset(DeviceState *dev) scon->notify = false; } -static Property console_properties[] = { +static const Property console_properties[] = { DEFINE_PROP_CHR("chardev", SCLPConsole, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void console_class_init(ObjectClass *klass, void *data) +static void console_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCLPEventClass *ec = SCLP_EVENT_CLASS(klass); device_class_set_props(dc, console_properties); - dc->reset = console_reset; + device_class_set_legacy_reset(dc, console_reset); dc->vmsd = &vmstate_sclpconsole; ec->init = console_init; ec->get_send_mask = send_mask; diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c index 329b352b9a8..0ea59a3d5c2 100644 --- a/hw/char/serial-isa.c +++ b/hw/char/serial-isa.c @@ -26,9 +26,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/acpi/acpi_aml_interface.h" #include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -112,14 +113,13 @@ static const VMStateDescription vmstate_isa_serial = { } }; -static Property serial_isa_properties[] = { +static const Property serial_isa_properties[] = { DEFINE_PROP_UINT32("index", ISASerialState, index, -1), DEFINE_PROP_UINT32("iobase", ISASerialState, iobase, -1), DEFINE_PROP_UINT32("irq", ISASerialState, isairq, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void serial_isa_class_initfn(ObjectClass *klass, void *data) +static void serial_isa_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); @@ -146,7 +146,7 @@ static const TypeInfo serial_isa_info = { .instance_size = sizeof(ISASerialState), .instance_init = serial_isa_initfn, .class_init = serial_isa_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/char/serial-mm.c b/hw/char/serial-mm.c new file mode 100644 index 00000000000..13aba780ec5 --- /dev/null +++ b/hw/char/serial-mm.c @@ -0,0 +1,156 @@ +/* + * QEMU 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/char/serial-mm.h" +#include "exec/cpu-common.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" + +static uint64_t serial_mm_read(void *opaque, hwaddr addr, unsigned size) +{ + SerialMM *s = SERIAL_MM(opaque); + return serial_io_ops.read(&s->serial, addr >> s->regshift, 1); +} + +static void serial_mm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SerialMM *s = SERIAL_MM(opaque); + value &= 255; + serial_io_ops.write(&s->serial, addr >> s->regshift, value, 1); +} + +static const MemoryRegionOps serial_mm_ops[3] = { + [DEVICE_NATIVE_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, + [DEVICE_LITTLE_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, + [DEVICE_BIG_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, +}; + +static void serial_mm_realize(DeviceState *dev, Error **errp) +{ + SerialMM *smm = SERIAL_MM(dev); + SerialState *s = &smm->serial; + + if (!qdev_realize(DEVICE(s), NULL, errp)) { + return; + } + + memory_region_init_io(&s->io, OBJECT(dev), + &serial_mm_ops[smm->endianness], smm, "serial", + 8 << smm->regshift); + sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io); + sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq); +} + +static const VMStateDescription vmstate_serial_mm = { + .name = "serial", + .version_id = 3, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +SerialMM *serial_mm_init(MemoryRegion *address_space, + hwaddr base, int regshift, + qemu_irq irq, int baudbase, + Chardev *chr, enum device_endian end) +{ + SerialMM *smm = SERIAL_MM(qdev_new(TYPE_SERIAL_MM)); + MemoryRegion *mr; + + qdev_prop_set_uint8(DEVICE(smm), "regshift", regshift); + qdev_prop_set_uint32(DEVICE(smm), "baudbase", baudbase); + qdev_prop_set_chr(DEVICE(smm), "chardev", chr); + qdev_set_legacy_instance_id(DEVICE(smm), base, 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", end); + sysbus_realize_and_unref(SYS_BUS_DEVICE(smm), &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, irq); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(smm), 0); + memory_region_add_subregion(address_space, base, mr); + + return smm; +} + +static void serial_mm_instance_init(Object *o) +{ + SerialMM *smm = SERIAL_MM(o); + + object_initialize_child(o, "serial", &smm->serial, TYPE_SERIAL); + + qdev_alias_all_properties(DEVICE(&smm->serial), o); +} + +static const Property serial_mm_properties[] = { + /* + * Set the spacing between adjacent memory-mapped UART registers. + * Each register will be at (1 << regshift) bytes after the previous one. + */ + DEFINE_PROP_UINT8("regshift", SerialMM, regshift, 0), + DEFINE_PROP_UINT8("endianness", SerialMM, endianness, DEVICE_NATIVE_ENDIAN), +}; + +static void serial_mm_class_init(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, serial_mm_properties); + dc->realize = serial_mm_realize; + dc->vmsd = &vmstate_serial_mm; +} + +static const TypeInfo types[] = { + { + .name = TYPE_SERIAL_MM, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = serial_mm_class_init, + .instance_init = serial_mm_instance_init, + .instance_size = sizeof(SerialMM), + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index 28b275709af..13df272691a 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -45,8 +45,7 @@ typedef struct PCIMultiSerialState { char *name[PCI_SERIAL_MAX_PORTS]; SerialState state[PCI_SERIAL_MAX_PORTS]; uint32_t level[PCI_SERIAL_MAX_PORTS]; - qemu_irq *irqs; - uint8_t prog_if; + IRQState irqs[PCI_SERIAL_MAX_PORTS]; } PCIMultiSerialState; static void multi_serial_pci_exit(PCIDevice *dev) @@ -61,7 +60,6 @@ static void multi_serial_pci_exit(PCIDevice *dev) memory_region_del_subregion(&pci->iobar, &s->io); g_free(pci->name[i]); } - qemu_free_irqs(pci->irqs, pci->ports); } static void multi_serial_irq_mux(void *opaque, int n, int level) @@ -98,11 +96,10 @@ static void multi_serial_pci_realize(PCIDevice *dev, Error **errp) SerialState *s; size_t i, nports = multi_serial_get_port_count(pc); - pci->dev.config[PCI_CLASS_PROG] = pci->prog_if; - pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; + pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */ + pci->dev.config[PCI_INTERRUPT_PIN] = 1; memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * nports); pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar); - pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, nports); for (i = 0; i < nports; i++) { s = pci->state + i; @@ -110,7 +107,7 @@ static void multi_serial_pci_realize(PCIDevice *dev, Error **errp) multi_serial_pci_exit(dev); return; } - s->irq = pci->irqs[i]; + s->irq = &pci->irqs[i]; pci->name[i] = g_strdup_printf("uart #%zu", i + 1); memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, pci->name[i], 8); @@ -132,23 +129,20 @@ static const VMStateDescription vmstate_pci_multi_serial = { } }; -static Property multi_2x_serial_pci_properties[] = { +static const Property multi_2x_serial_pci_properties[] = { DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr), DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr), - DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02), - DEFINE_PROP_END_OF_LIST(), }; -static Property multi_4x_serial_pci_properties[] = { +static const Property multi_4x_serial_pci_properties[] = { DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr), DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr), DEFINE_PROP_CHR("chardev3", PCIMultiSerialState, state[2].chr), DEFINE_PROP_CHR("chardev4", PCIMultiSerialState, state[3].chr), - DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02), - DEFINE_PROP_END_OF_LIST(), }; -static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data) +static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); @@ -163,7 +157,8 @@ static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } -static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, void *data) +static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); @@ -185,6 +180,7 @@ static void multi_serial_init(Object *o) size_t i, nports = multi_serial_get_port_count(PCI_DEVICE_GET_CLASS(dev)); for (i = 0; i < nports; i++) { + qemu_init_irq(&pms->irqs[i], multi_serial_irq_mux, pms, i); object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL); } } @@ -195,7 +191,7 @@ static const TypeInfo multi_2x_serial_pci_info = { .instance_size = sizeof(PCIMultiSerialState), .instance_init = multi_serial_init, .class_init = multi_2x_serial_pci_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -207,7 +203,7 @@ static const TypeInfo multi_4x_serial_pci_info = { .instance_size = sizeof(PCIMultiSerialState), .instance_init = multi_serial_init, .class_init = multi_4x_serial_pci_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c index f8a1a94d0c2..46efabc4cbe 100644 --- a/hw/char/serial-pci.c +++ b/hw/char/serial-pci.c @@ -38,7 +38,6 @@ struct PCISerialState { PCIDevice dev; SerialState state; - uint8_t prog_if; }; #define TYPE_PCI_SERIAL "pci-serial" @@ -53,8 +52,8 @@ static void serial_pci_realize(PCIDevice *dev, Error **errp) return; } - pci->dev.config[PCI_CLASS_PROG] = pci->prog_if; - pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; + pci->dev.config[PCI_CLASS_PROG] = 2; /* 16550 compatible */ + pci->dev.config[PCI_INTERRUPT_PIN] = 1; s->irq = pci_allocate_irq(&pci->dev); memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, "serial", 8); @@ -81,12 +80,7 @@ static const VMStateDescription vmstate_pci_serial = { } }; -static Property serial_pci_properties[] = { - DEFINE_PROP_UINT8("prog_if", PCISerialState, prog_if, 0x02), - DEFINE_PROP_END_OF_LIST(), -}; - -static void serial_pci_class_initfn(ObjectClass *klass, void *data) +static void serial_pci_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); @@ -97,7 +91,6 @@ static void serial_pci_class_initfn(ObjectClass *klass, void *data) pc->revision = 1; pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL; dc->vmsd = &vmstate_pci_serial; - device_class_set_props(dc, serial_pci_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } @@ -116,7 +109,7 @@ static const TypeInfo serial_pci_info = { .instance_size = sizeof(PCISerialState), .instance_init = serial_pci_init, .class_init = serial_pci_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/char/serial.c b/hw/char/serial.c index d8b2db50829..03fec3fe75b 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -31,8 +31,8 @@ #include "chardev/char-serial.h" #include "qapi/error.h" #include "qemu/timer.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qemu/error-report.h" #include "trace.h" #include "hw/qdev-properties.h" @@ -951,13 +951,6 @@ static void serial_unrealize(DeviceState *dev) qemu_unregister_reset(serial_reset, s); } -/* Change the main reference oscillator frequency. */ -void serial_set_frequency(SerialState *s, uint32_t frequency) -{ - s->baudbase = frequency; - serial_update_parameters(s); -} - const MemoryRegionOps serial_io_ops = { .read = serial_ioport_read, .write = serial_ioport_write, @@ -971,14 +964,13 @@ const MemoryRegionOps serial_io_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static Property serial_properties[] = { +static const Property serial_properties[] = { DEFINE_PROP_CHR("chardev", SerialState, chr), DEFINE_PROP_UINT32("baudbase", SerialState, baudbase, 115200), DEFINE_PROP_BOOL("wakeup", SerialState, wakeup, false), - DEFINE_PROP_END_OF_LIST(), }; -static void serial_class_init(ObjectClass *klass, void* data) +static void serial_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -996,135 +988,9 @@ static const TypeInfo serial_info = { .class_init = serial_class_init, }; -/* Memory mapped interface */ -static uint64_t serial_mm_read(void *opaque, hwaddr addr, - unsigned size) -{ - SerialMM *s = SERIAL_MM(opaque); - return serial_ioport_read(&s->serial, addr >> s->regshift, 1); -} - -static void serial_mm_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - SerialMM *s = SERIAL_MM(opaque); - value &= 255; - serial_ioport_write(&s->serial, addr >> s->regshift, value, 1); -} - -static const MemoryRegionOps serial_mm_ops[3] = { - [DEVICE_NATIVE_ENDIAN] = { - .read = serial_mm_read, - .write = serial_mm_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid.max_access_size = 8, - .impl.max_access_size = 8, - }, - [DEVICE_LITTLE_ENDIAN] = { - .read = serial_mm_read, - .write = serial_mm_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid.max_access_size = 8, - .impl.max_access_size = 8, - }, - [DEVICE_BIG_ENDIAN] = { - .read = serial_mm_read, - .write = serial_mm_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid.max_access_size = 8, - .impl.max_access_size = 8, - }, -}; - -static void serial_mm_realize(DeviceState *dev, Error **errp) -{ - SerialMM *smm = SERIAL_MM(dev); - SerialState *s = &smm->serial; - - if (!qdev_realize(DEVICE(s), NULL, errp)) { - return; - } - - memory_region_init_io(&s->io, OBJECT(dev), - &serial_mm_ops[smm->endianness], smm, "serial", - 8 << smm->regshift); - sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io); - sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq); -} - -static const VMStateDescription vmstate_serial_mm = { - .name = "serial", - .version_id = 3, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState), - VMSTATE_END_OF_LIST() - } -}; - -SerialMM *serial_mm_init(MemoryRegion *address_space, - hwaddr base, int regshift, - qemu_irq irq, int baudbase, - Chardev *chr, enum device_endian end) -{ - SerialMM *smm = SERIAL_MM(qdev_new(TYPE_SERIAL_MM)); - MemoryRegion *mr; - - qdev_prop_set_uint8(DEVICE(smm), "regshift", regshift); - qdev_prop_set_uint32(DEVICE(smm), "baudbase", baudbase); - qdev_prop_set_chr(DEVICE(smm), "chardev", chr); - qdev_set_legacy_instance_id(DEVICE(smm), base, 2); - qdev_prop_set_uint8(DEVICE(smm), "endianness", end); - sysbus_realize_and_unref(SYS_BUS_DEVICE(smm), &error_fatal); - - sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, irq); - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(smm), 0); - memory_region_add_subregion(address_space, base, mr); - - return smm; -} - -static void serial_mm_instance_init(Object *o) -{ - SerialMM *smm = SERIAL_MM(o); - - object_initialize_child(o, "serial", &smm->serial, TYPE_SERIAL); - - qdev_alias_all_properties(DEVICE(&smm->serial), o); -} - -static Property serial_mm_properties[] = { - /* - * Set the spacing between adjacent memory-mapped UART registers. - * Each register will be at (1 << regshift) bytes after the - * previous one. - */ - DEFINE_PROP_UINT8("regshift", SerialMM, regshift, 0), - DEFINE_PROP_UINT8("endianness", SerialMM, endianness, DEVICE_NATIVE_ENDIAN), - DEFINE_PROP_END_OF_LIST(), -}; - -static void serial_mm_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - device_class_set_props(dc, serial_mm_properties); - dc->realize = serial_mm_realize; - dc->vmsd = &vmstate_serial_mm; -} - -static const TypeInfo serial_mm_info = { - .name = TYPE_SERIAL_MM, - .parent = TYPE_SYS_BUS_DEVICE, - .class_init = serial_mm_class_init, - .instance_init = serial_mm_instance_init, - .instance_size = sizeof(SerialMM), -}; - static void serial_register_types(void) { type_register_static(&serial_info); - type_register_static(&serial_mm_info); } type_init(serial_register_types) diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c index 355886ee3a1..30447fa018a 100644 --- a/hw/char/sh_serial.c +++ b/hw/char/sh_serial.c @@ -78,10 +78,6 @@ struct SHSerialState { qemu_irq bri; }; -typedef struct {} SHSerialStateClass; - -OBJECT_DEFINE_TYPE(SHSerialState, sh_serial, SH_SERIAL, SYS_BUS_DEVICE) - static void sh_serial_clear_fifo(SHSerialState *s) { memset(s->rx_fifo, 0, SH_RX_FIFO_LENGTH); @@ -320,7 +316,7 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, static int sh_serial_can_receive(SHSerialState *s) { - return s->scr & (1 << 4); + return s->scr & (1 << 4) ? SH_RX_FIFO_LENGTH - s->rx_head : 0; } static void sh_serial_receive_break(SHSerialState *s) @@ -353,22 +349,20 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) if (s->feat & SH_SERIAL_FEAT_SCIF) { int i; for (i = 0; i < size; i++) { - if (s->rx_cnt < SH_RX_FIFO_LENGTH) { - s->rx_fifo[s->rx_head++] = buf[i]; - if (s->rx_head == SH_RX_FIFO_LENGTH) { - s->rx_head = 0; - } - s->rx_cnt++; - if (s->rx_cnt >= s->rtrg) { - s->flags |= SH_SERIAL_FLAG_RDF; - if (s->scr & (1 << 6) && s->rxi) { - timer_del(&s->fifo_timeout_timer); - qemu_set_irq(s->rxi, 1); - } - } else { - timer_mod(&s->fifo_timeout_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); + s->rx_fifo[s->rx_head++] = buf[i]; + if (s->rx_head == SH_RX_FIFO_LENGTH) { + s->rx_head = 0; + } + s->rx_cnt++; + if (s->rx_cnt >= s->rtrg) { + s->flags |= SH_SERIAL_FLAG_RDF; + if (s->scr & (1 << 6) && s->rxi) { + timer_del(&s->fifo_timeout_timer); + qemu_set_irq(s->rxi, 1); } + } else { + timer_mod(&s->fifo_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); } } } else { @@ -436,30 +430,37 @@ static void sh_serial_realize(DeviceState *d, Error **errp) s->etu = NANOSECONDS_PER_SECOND / 9600; } -static void sh_serial_finalize(Object *obj) +static void sh_serial_unrealize(DeviceState *dev) { - SHSerialState *s = SH_SERIAL(obj); + SHSerialState *s = SH_SERIAL(dev); timer_del(&s->fifo_timeout_timer); } -static void sh_serial_init(Object *obj) -{ -} - -static Property sh_serial_properties[] = { +static const Property sh_serial_properties[] = { DEFINE_PROP_CHR("chardev", SHSerialState, chr), DEFINE_PROP_UINT8("features", SHSerialState, feat, 0), - DEFINE_PROP_END_OF_LIST() }; -static void sh_serial_class_init(ObjectClass *oc, void *data) +static void sh_serial_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); device_class_set_props(dc, sh_serial_properties); dc->realize = sh_serial_realize; - dc->reset = sh_serial_reset; + dc->unrealize = sh_serial_unrealize; + device_class_set_legacy_reset(dc, sh_serial_reset); /* Reason: part of SuperH CPU/SoC, needs to be wired up */ dc->user_creatable = false; } + +static const TypeInfo sh_serial_types[] = { + { + .name = TYPE_SH_SERIAL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SHSerialState), + .class_init = sh_serial_class_init, + }, +}; + +DEFINE_TYPES(sh_serial_types) diff --git a/hw/char/shakti_uart.c b/hw/char/shakti_uart.c index 98b142c7df8..6e216edb0fc 100644 --- a/hw/char/shakti_uart.c +++ b/hw/char/shakti_uart.c @@ -157,15 +157,14 @@ static void shakti_uart_instance_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &sus->mmio); } -static Property shakti_uart_properties[] = { +static const Property shakti_uart_properties[] = { DEFINE_PROP_CHR("chardev", ShaktiUartState, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void shakti_uart_class_init(ObjectClass *klass, void *data) +static void shakti_uart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = shakti_uart_reset; + device_class_set_legacy_reset(dc, shakti_uart_reset); dc->realize = shakti_uart_realize; device_class_set_props(dc, shakti_uart_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 7fc6787f690..9bc697a67b5 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -26,6 +26,8 @@ #include "hw/char/sifive_uart.h" #include "hw/qdev-properties-system.h" +#define TX_INTERRUPT_TRIGGER_DELAY_NS 100 + /* * Not yet implemented: * @@ -64,6 +66,74 @@ static void sifive_uart_update_irq(SiFiveUARTState *s) } } +static gboolean sifive_uart_xmit(void *do_not_use, GIOCondition cond, + void *opaque) +{ + SiFiveUARTState *s = opaque; + int ret; + const uint8_t *characters; + uint32_t numptr = 0; + + /* instant drain the fifo when there's no back-end */ + if (!qemu_chr_fe_backend_connected(&s->chr)) { + fifo8_reset(&s->tx_fifo); + return G_SOURCE_REMOVE; + } + + if (fifo8_is_empty(&s->tx_fifo)) { + return G_SOURCE_REMOVE; + } + + /* Don't pop the FIFO in case the write fails */ + characters = fifo8_peek_bufptr(&s->tx_fifo, + fifo8_num_used(&s->tx_fifo), &numptr); + ret = qemu_chr_fe_write(&s->chr, characters, numptr); + + if (ret >= 0) { + /* We wrote the data, actually pop the fifo */ + fifo8_pop_bufptr(&s->tx_fifo, ret, NULL); + } + + if (!fifo8_is_empty(&s->tx_fifo)) { + guint r = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + sifive_uart_xmit, s); + if (!r) { + fifo8_reset(&s->tx_fifo); + return G_SOURCE_REMOVE; + } + } + + /* Clear the TX Full bit */ + if (!fifo8_is_full(&s->tx_fifo)) { + s->txfifo &= ~SIFIVE_UART_TXFIFO_FULL; + } + + sifive_uart_update_irq(s); + return G_SOURCE_REMOVE; +} + +static void sifive_uart_write_tx_fifo(SiFiveUARTState *s, const uint8_t *buf, + int size) +{ + uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (size > fifo8_num_free(&s->tx_fifo)) { + size = fifo8_num_free(&s->tx_fifo); + qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow"); + } + + fifo8_push_all(&s->tx_fifo, buf, size); + + if (fifo8_is_full(&s->tx_fifo)) { + s->txfifo |= SIFIVE_UART_TXFIFO_FULL; + } + + if (!timer_pending(s->fifo_trigger_handle)) { + timer_mod(s->fifo_trigger_handle, current_time + + TX_INTERRUPT_TRIGGER_DELAY_NS); + } +} + static uint64_t sifive_uart_read(void *opaque, hwaddr addr, unsigned int size) { @@ -82,7 +152,7 @@ sifive_uart_read(void *opaque, hwaddr addr, unsigned int size) return 0x80000000; case SIFIVE_UART_TXFIFO: - return 0; /* Should check tx fifo */ + return s->txfifo; case SIFIVE_UART_IE: return s->ie; case SIFIVE_UART_IP: @@ -106,12 +176,11 @@ sifive_uart_write(void *opaque, hwaddr addr, { SiFiveUARTState *s = opaque; uint32_t value = val64; - unsigned char ch = value; + uint8_t ch = value; switch (addr) { case SIFIVE_UART_TXFIFO: - qemu_chr_fe_write(&s->chr, &ch, 1); - sifive_uart_update_irq(s); + sifive_uart_write_tx_fifo(s, &ch, 1); return; case SIFIVE_UART_IE: s->ie = val64; @@ -131,6 +200,13 @@ sifive_uart_write(void *opaque, hwaddr addr, __func__, (int)addr, (int)value); } +static void fifo_trigger_update(void *opaque) +{ + SiFiveUARTState *s = opaque; + + sifive_uart_xmit(NULL, G_IO_OUT, s); +} + static const MemoryRegionOps sifive_uart_ops = { .read = sifive_uart_read, .write = sifive_uart_write, @@ -177,9 +253,25 @@ static int sifive_uart_be_change(void *opaque) return 0; } -static Property sifive_uart_properties[] = { +static void sifive_uart_reset_enter(Object *obj, ResetType type) +{ + SiFiveUARTState *s = SIFIVE_UART(obj); + + s->txfifo = 0; + s->ie = 0; + s->ip = 0; + s->txctrl = 0; + s->rxctrl = 0; + s->div = 0; + + s->rx_fifo_len = 0; + + memset(s->rx_fifo, 0, SIFIVE_UART_RX_FIFO_SIZE); + fifo8_reset(&s->tx_fifo); +} + +static const Property sifive_uart_properties[] = { DEFINE_PROP_CHR("chardev", SiFiveUARTState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void sifive_uart_init(Object *obj) @@ -197,21 +289,24 @@ static void sifive_uart_realize(DeviceState *dev, Error **errp) { SiFiveUARTState *s = SIFIVE_UART(dev); - qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx, - sifive_uart_event, sifive_uart_be_change, s, - NULL, true); + fifo8_create(&s->tx_fifo, SIFIVE_UART_TX_FIFO_SIZE); + + s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fifo_trigger_update, s); + + if (qemu_chr_fe_backend_connected(&s->chr)) { + qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx, + sifive_uart_event, sifive_uart_be_change, s, + NULL, true); + } } -static void sifive_uart_reset_enter(Object *obj, ResetType type) +static void sifive_uart_unrealize(DeviceState *dev) { - SiFiveUARTState *s = SIFIVE_UART(obj); - s->ie = 0; - s->ip = 0; - s->txctrl = 0; - s->rxctrl = 0; - s->div = 0; - s->rx_fifo_len = 0; + SiFiveUARTState *s = SIFIVE_UART(dev); + + fifo8_destroy(&s->tx_fifo); } static void sifive_uart_reset_hold(Object *obj, ResetType type) @@ -222,8 +317,8 @@ static void sifive_uart_reset_hold(Object *obj, ResetType type) static const VMStateDescription vmstate_sifive_uart = { .name = TYPE_SIFIVE_UART, - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(rx_fifo, SiFiveUARTState, SIFIVE_UART_RX_FIFO_SIZE), @@ -233,17 +328,21 @@ static const VMStateDescription vmstate_sifive_uart = { VMSTATE_UINT32(txctrl, SiFiveUARTState), VMSTATE_UINT32(rxctrl, SiFiveUARTState), VMSTATE_UINT32(div, SiFiveUARTState), + VMSTATE_UINT32(txfifo, SiFiveUARTState), + VMSTATE_FIFO8(tx_fifo, SiFiveUARTState), + VMSTATE_TIMER_PTR(fifo_trigger_handle, SiFiveUARTState), VMSTATE_END_OF_LIST() }, }; -static void sifive_uart_class_init(ObjectClass *oc, void *data) +static void sifive_uart_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); dc->realize = sifive_uart_realize; + dc->unrealize = sifive_uart_unrealize; dc->vmsd = &vmstate_sifive_uart; rc->phases.enter = sifive_uart_reset_enter; rc->phases.hold = sifive_uart_reset_hold; diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c index 3e23d9cbab3..fc8ea604f8d 100644 --- a/hw/char/spapr_vty.c +++ b/hw/char/spapr_vty.c @@ -163,10 +163,9 @@ void spapr_vty_create(SpaprVioBus *bus, Chardev *chardev) qdev_realize_and_unref(dev, &bus->bus, &error_fatal); } -static Property spapr_vty_properties[] = { +static const Property spapr_vty_properties[] = { DEFINE_SPAPR_PROPERTIES(SpaprVioVty, sdev), DEFINE_PROP_CHR("chardev", SpaprVioVty, chardev), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_spapr_vty = { @@ -183,7 +182,7 @@ static const VMStateDescription vmstate_spapr_vty = { }, }; -static void spapr_vty_class_init(ObjectClass *klass, void *data) +static void spapr_vty_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c index 8753afeb2b8..45c30643a74 100644 --- a/hw/char/stm32f2xx_usart.c +++ b/hw/char/stm32f2xx_usart.c @@ -30,17 +30,7 @@ #include "qemu/log.h" #include "qemu/module.h" -#ifndef STM_USART_ERR_DEBUG -#define STM_USART_ERR_DEBUG 0 -#endif - -#define DB_PRINT_L(lvl, fmt, args...) do { \ - if (STM_USART_ERR_DEBUG >= lvl) { \ - qemu_log("%s: " fmt, __func__, ## args); \ - } \ -} while (0) - -#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) +#include "trace.h" static int stm32f2xx_usart_can_receive(void *opaque) { @@ -67,10 +57,11 @@ static void stm32f2xx_update_irq(STM32F2XXUsartState *s) static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size) { STM32F2XXUsartState *s = opaque; + DeviceState *d = DEVICE(s); if (!(s->usart_cr1 & USART_CR1_UE && s->usart_cr1 & USART_CR1_RE)) { /* USART not enabled - drop the chars */ - DB_PRINT("Dropping the chars\n"); + trace_stm32f2xx_usart_drop(d->id); return; } @@ -79,7 +70,7 @@ static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size) stm32f2xx_update_irq(s); - DB_PRINT("Receiving: %c\n", s->usart_dr); + trace_stm32f2xx_usart_receive(d->id, *buf); } static void stm32f2xx_usart_reset(DeviceState *dev) @@ -101,49 +92,55 @@ static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr, unsigned int size) { STM32F2XXUsartState *s = opaque; - uint64_t retvalue; - - DB_PRINT("Read 0x%"HWADDR_PRIx"\n", addr); + DeviceState *d = DEVICE(s); + uint64_t retvalue = 0; switch (addr) { case USART_SR: retvalue = s->usart_sr; qemu_chr_fe_accept_input(&s->chr); - return retvalue; + break; case USART_DR: - DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr); retvalue = s->usart_dr & 0x3FF; s->usart_sr &= ~USART_SR_RXNE; qemu_chr_fe_accept_input(&s->chr); stm32f2xx_update_irq(s); - return retvalue; + break; case USART_BRR: - return s->usart_brr; + retvalue = s->usart_brr; + break; case USART_CR1: - return s->usart_cr1; + retvalue = s->usart_cr1; + break; case USART_CR2: - return s->usart_cr2; + retvalue = s->usart_cr2; + break; case USART_CR3: - return s->usart_cr3; + retvalue = s->usart_cr3; + break; case USART_GTPR: - return s->usart_gtpr; + retvalue = s->usart_gtpr; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); return 0; } - return 0; + trace_stm32f2xx_usart_read(d->id, size, addr, retvalue); + + return retvalue; } static void stm32f2xx_usart_write(void *opaque, hwaddr addr, uint64_t val64, unsigned int size) { STM32F2XXUsartState *s = opaque; + DeviceState *d = DEVICE(s); uint32_t value = val64; unsigned char ch; - DB_PRINT("Write 0x%" PRIx32 ", 0x%"HWADDR_PRIx"\n", value, addr); + trace_stm32f2xx_usart_write(d->id, size, addr, val64); switch (addr) { case USART_SR: @@ -199,9 +196,8 @@ static const MemoryRegionOps stm32f2xx_usart_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static Property stm32f2xx_usart_properties[] = { +static const Property stm32f2xx_usart_properties[] = { DEFINE_PROP_CHR("chardev", STM32F2XXUsartState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void stm32f2xx_usart_init(Object *obj) @@ -224,11 +220,11 @@ static void stm32f2xx_usart_realize(DeviceState *dev, Error **errp) s, NULL, true); } -static void stm32f2xx_usart_class_init(ObjectClass *klass, void *data) +static void stm32f2xx_usart_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f2xx_usart_reset; + device_class_set_legacy_reset(dc, stm32f2xx_usart_reset); device_class_set_props(dc, stm32f2xx_usart_properties); dc->realize = stm32f2xx_usart_realize; } diff --git a/hw/char/stm32l4x5_usart.c b/hw/char/stm32l4x5_usart.c index fc5dcac0c45..afbe4bab29d 100644 --- a/hw/char/stm32l4x5_usart.c +++ b/hw/char/stm32l4x5_usart.c @@ -154,6 +154,21 @@ REG32(RDR, 0x24) REG32(TDR, 0x28) FIELD(TDR, TDR, 0, 9) +static void stm32l4x5_update_isr(Stm32l4x5UsartBaseState *s) +{ + if (s->cr1 & R_CR1_TE_MASK) { + s->isr |= R_ISR_TEACK_MASK; + } else { + s->isr &= ~R_ISR_TEACK_MASK; + } + + if (s->cr1 & R_CR1_RE_MASK) { + s->isr |= R_ISR_REACK_MASK; + } else { + s->isr &= ~R_ISR_REACK_MASK; + } +} + static void stm32l4x5_update_irq(Stm32l4x5UsartBaseState *s) { if (((s->isr & R_ISR_WUF_MASK) && (s->cr3 & R_CR3_WUFIE_MASK)) || @@ -456,6 +471,7 @@ static void stm32l4x5_usart_base_write(void *opaque, hwaddr addr, case A_CR1: s->cr1 = value; stm32l4x5_update_params(s); + stm32l4x5_update_isr(s); stm32l4x5_update_irq(s); return; case A_CR2: @@ -518,9 +534,8 @@ static const MemoryRegionOps stm32l4x5_usart_base_ops = { }, }; -static Property stm32l4x5_usart_base_properties[] = { +static const Property stm32l4x5_usart_base_properties[] = { DEFINE_PROP_CHR("chardev", Stm32l4x5UsartBaseState, chr), - DEFINE_PROP_END_OF_LIST(), }; static void stm32l4x5_usart_base_init(Object *obj) @@ -579,7 +594,8 @@ static void stm32l4x5_usart_base_realize(DeviceState *dev, Error **errp) s, NULL, true); } -static void stm32l4x5_usart_base_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_usart_base_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -590,21 +606,21 @@ static void stm32l4x5_usart_base_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_stm32l4x5_usart_base; } -static void stm32l4x5_usart_class_init(ObjectClass *oc, void *data) +static void stm32l4x5_usart_class_init(ObjectClass *oc, const void *data) { Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc); subc->type = STM32L4x5_USART; } -static void stm32l4x5_uart_class_init(ObjectClass *oc, void *data) +static void stm32l4x5_uart_class_init(ObjectClass *oc, const void *data) { Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc); subc->type = STM32L4x5_UART; } -static void stm32l4x5_lpuart_class_init(ObjectClass *oc, void *data) +static void stm32l4x5_lpuart_class_init(ObjectClass *oc, const void *data) { Stm32l4x5UsartBaseClass *subc = STM32L4X5_USART_BASE_CLASS(oc); diff --git a/hw/char/terminal3270.c b/hw/char/terminal3270.c index 82e85fac2e6..d950c172921 100644 --- a/hw/char/terminal3270.c +++ b/hw/char/terminal3270.c @@ -283,9 +283,8 @@ static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd) return (retval <= 0) ? 0 : get_cds(t)->count; } -static Property terminal_properties[] = { +static const Property terminal_properties[] = { DEFINE_PROP_CHR("chardev", Terminal3270, chr), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription terminal3270_vmstate = { @@ -293,7 +292,7 @@ static const VMStateDescription terminal3270_vmstate = { .unmigratable = 1, }; -static void terminal_class_init(ObjectClass *klass, void *data) +static void terminal_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); EmulatedCcw3270Class *ck = EMULATED_CCW_3270_CLASS(klass); diff --git a/hw/char/trace-events b/hw/char/trace-events index 8875758076c..05a33036c12 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -52,15 +52,21 @@ escc_sunkbd_event_out(int ch) "Translated keycode 0x%2.2x" escc_kbd_command(int val) "Command %d" escc_sunmouse_event(int dx, int dy, int buttons_state) "dx=%d dy=%d buttons=0x%01x" +# imx_serial.c +imx_serial_read(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" PRIu64 "] -> 0x%08" PRIx64 +imx_serial_write(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" PRIu64 "] <- 0x%08" PRIx64 +imx_serial_put_data(const char *chrname, uint32_t value) "%s: 0x%" PRIx32 + # pl011.c pl011_irq_state(int level) "irq state %d" pl011_read(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_read_fifo(int read_count) "FIFO read, read_count now %d" +pl011_read_fifo(unsigned rx_fifo_used, size_t rx_fifo_depth) "RX FIFO read, used %u/%zu" pl011_write(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_can_receive(uint32_t lcr, int read_count, int r) "LCR 0x%08x read_count %d returning %d" -pl011_put_fifo(uint32_t c, int read_count) "new char 0x%x read_count now %d" -pl011_put_fifo_full(void) "FIFO now full, RXFF set" +pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, size_t rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%zu, can_receive %u chars" +pl011_fifo_rx_put(uint32_t c, unsigned read_count, size_t rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%zu depth used" +pl011_fifo_rx_full(void) "RX FIFO now full, RXFF set" pl011_baudrate_change(unsigned int baudrate, uint64_t clock, uint32_t ibrd, uint32_t fbrd) "new baudrate %u (clk: %" PRIu64 "hz, ibrd: %" PRIu32 ", fbrd: %" PRIu32 ")" +pl011_receive(int size) "recv %d chars" # cmsdk-apb-uart.c cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" @@ -125,3 +131,13 @@ xen_console_unrealize(unsigned int idx) "idx %u" xen_console_realize(unsigned int idx, const char *chrdev) "idx %u chrdev %s" xen_console_device_create(unsigned int idx) "idx %u" xen_console_device_destroy(unsigned int idx) "idx %u" + +# stm32f2xx_usart.c +stm32f2xx_usart_read(char *id, unsigned size, uint64_t ofs, uint64_t val) " %s size %d ofs 0x%02" PRIx64 " -> 0x%02" PRIx64 +stm32f2xx_usart_write(char *id, unsigned size, uint64_t ofs, uint64_t val) "%s size %d ofs 0x%02" PRIx64 " <- 0x%02" PRIx64 +stm32f2xx_usart_drop(char *id) " %s dropping the chars" +stm32f2xx_usart_receive(char *id, uint8_t chr) " %s receiving '%c'" + +# riscv_htif.c +htif_uart_write_to_host(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64 +htif_uart_unknown_device_command(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64 diff --git a/hw/char/virtio-console.c b/hw/char/virtio-console.c index dbe0b28e601..0932a3572b7 100644 --- a/hw/char/virtio-console.c +++ b/hw/char/virtio-console.c @@ -261,7 +261,7 @@ static void virtconsole_unrealize(DeviceState *dev) } } -static void virtconsole_class_init(ObjectClass *klass, void *data) +static void virtconsole_class_init(ObjectClass *klass, const void *data) { VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass); @@ -274,12 +274,11 @@ static const TypeInfo virtconsole_info = { .class_init = virtconsole_class_init, }; -static Property virtserialport_properties[] = { +static const Property virtserialport_properties[] = { DEFINE_PROP_CHR("chardev", VirtConsole, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void virtserialport_class_init(ObjectClass *klass, void *data) +static void virtserialport_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass); diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index 2094d213cdf..673c50f0be0 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -622,7 +622,7 @@ static void guest_reset(VirtIOSerial *vser) } } -static void set_status(VirtIODevice *vdev, uint8_t status) +static int set_status(VirtIODevice *vdev, uint8_t status) { VirtIOSerial *vser; VirtIOSerialPort *port; @@ -650,6 +650,7 @@ static void set_status(VirtIODevice *vdev, uint8_t status) vsc->enable_backend(port, vdev->vm_running); } } + return 0; } static void vser_reset(VirtIODevice *vdev) @@ -835,13 +836,12 @@ static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f, static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent); -static Property virtser_props[] = { +static const Property virtser_props[] = { DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID), DEFINE_PROP_STRING("name", VirtIOSerialPort, name), - DEFINE_PROP_END_OF_LIST() }; -static void virtser_bus_class_init(ObjectClass *klass, void *data) +static void virtser_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); k->print_dev = virtser_bus_dev_print; @@ -1093,7 +1093,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp) QLIST_INSERT_HEAD(&vserdevices.devices, vser, next); } -static void virtio_serial_port_class_init(ObjectClass *klass, void *data) +static void virtio_serial_port_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); @@ -1153,15 +1153,14 @@ static const VMStateDescription vmstate_virtio_console = { }, }; -static Property virtio_serial_properties[] = { +static const Property virtio_serial_properties[] = { DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports, 31), DEFINE_PROP_BIT64("emergency-write", VirtIOSerial, host_features, VIRTIO_CONSOLE_F_EMERG_WRITE, true), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_serial_class_init(ObjectClass *klass, void *data) +static void virtio_serial_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -1190,7 +1189,7 @@ static const TypeInfo virtio_device_info = { .parent = TYPE_VIRTIO_DEVICE, .instance_size = sizeof(VirtIOSerial), .class_init = virtio_serial_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c index 683c92aca1c..9c34a554bfa 100644 --- a/hw/char/xen_console.c +++ b/hw/char/xen_console.c @@ -25,7 +25,7 @@ #include #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "chardev/char-fe.h" #include "hw/xen/xen-backend.h" #include "hw/xen/xen-bus-helper.h" @@ -367,28 +367,28 @@ static char *xen_console_get_name(XenDevice *xendev, Error **errp) if (con->dev == -1) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); - char fe_path[XENSTORE_ABS_PATH_MAX + 1]; int idx = (xen_mode == XEN_EMULATE) ? 0 : 1; + Error *local_err = NULL; char *value; /* Theoretically we could go up to INT_MAX here but that's overkill */ while (idx < 100) { if (!idx) { - snprintf(fe_path, sizeof(fe_path), - "/local/domain/%u/console", xendev->frontend_id); + value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err, + "/local/domain/%u/console", + xendev->frontend_id); } else { - snprintf(fe_path, sizeof(fe_path), - "/local/domain/%u/device/console/%u", - xendev->frontend_id, idx); + value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err, + "/local/domain/%u/device/console/%u", + xendev->frontend_id, idx); } - value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL); if (!value) { if (errno == ENOENT) { con->dev = idx; + error_free(local_err); goto found; } - error_setg(errp, "cannot read %s: %s", fe_path, - strerror(errno)); + error_propagate(errp, local_err); return NULL; } free(value); @@ -487,13 +487,12 @@ static char *xen_console_get_frontend_path(XenDevice *xendev, Error **errp) } -static Property xen_console_properties[] = { +static const Property xen_console_properties[] = { DEFINE_PROP_CHR("chardev", XenConsole, chr), DEFINE_PROP_INT32("idx", XenConsole, dev, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void xen_console_class_init(ObjectClass *class, void *data) +static void xen_console_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); @@ -551,7 +550,8 @@ static void xen_console_device_create(XenBackendInstance *backend, goto fail; } - if (xs_node_scanf(xsh, XBT_NULL, fe, "type", errp, "%ms", &type) != 1) { + type = xs_node_read(xsh, XBT_NULL, NULL, errp, "%s/%s", fe, "type"); + if (!type) { error_prepend(errp, "failed to read console device type: "); goto fail; } @@ -569,7 +569,8 @@ static void xen_console_device_create(XenBackendInstance *backend, snprintf(label, sizeof(label), "xencons%ld", number); - if (xs_node_scanf(xsh, XBT_NULL, fe, "output", NULL, "%ms", &output) == 1) { + output = xs_node_read(xsh, XBT_NULL, NULL, errp, "%s/%s", fe, "output"); + if (output) { /* * FIXME: sure we want to support implicit * muxed monitors here? @@ -580,19 +581,27 @@ static void xen_console_device_create(XenBackendInstance *backend, output); goto fail; } - } else if (number) { - cd = serial_hd(number); - if (!cd) { - error_prepend(errp, "console: No serial device #%ld found: ", - number); - goto fail; - } + } else if (errno != ENOENT) { + error_prepend(errp, "console: No valid chardev found: "); + goto fail; } else { - /* No 'output' node on primary console: use null. */ - cd = qemu_chr_new(label, "null", NULL); - if (!cd) { - error_setg(errp, "console: failed to create null device"); - goto fail; + error_free(*errp); + *errp = NULL; + + if (number) { + cd = serial_hd(number); + if (!cd) { + error_setg(errp, "console: No serial device #%ld found", + number); + goto fail; + } + } else { + /* No 'output' node on primary console: use null. */ + cd = qemu_chr_new(label, "null", NULL); + if (!cd) { + error_setg(errp, "console: failed to create null device"); + goto fail; + } } } diff --git a/hw/char/xilinx_uartlite.c b/hw/char/xilinx_uartlite.c index 180bb97202c..8008171eea9 100644 --- a/hw/char/xilinx_uartlite.c +++ b/hw/char/xilinx_uartlite.c @@ -24,6 +24,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qapi/error.h" #include "hw/char/xilinx_uartlite.h" #include "hw/irq.h" #include "hw/qdev-properties.h" @@ -57,6 +58,7 @@ struct XilinxUARTLite { SysBusDevice parent_obj; + EndianMode model_endianness; MemoryRegion mmio; CharBackend chr; qemu_irq irq; @@ -166,19 +168,22 @@ uart_write(void *opaque, hwaddr addr, uart_update_irq(s); } -static const MemoryRegionOps uart_ops = { - .read = uart_read, - .write = uart_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 4 - } +static const MemoryRegionOps uart_ops[2] = { + [0 ... 1] = { + .read = uart_read, + .write = uart_write, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, }; -static Property xilinx_uartlite_properties[] = { +static const Property xilinx_uartlite_properties[] = { + DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XilinxUARTLite, model_endianness), DEFINE_PROP_CHR("chardev", XilinxUARTLite, chr), - DEFINE_PROP_END_OF_LIST(), }; static void uart_rx(void *opaque, const uint8_t *buf, int size) @@ -215,6 +220,15 @@ static void xilinx_uartlite_realize(DeviceState *dev, Error **errp) { XilinxUARTLite *s = XILINX_UARTLITE(dev); + if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) { + error_setg(errp, TYPE_XILINX_UARTLITE " property 'endianness'" + " must be set to 'big' or 'little'"); + return; + } + + memory_region_init_io(&s->mmio, OBJECT(dev), + &uart_ops[s->model_endianness == ENDIAN_MODE_BIG], + s, "xlnx.xps-uartlite", R_MAX * 4); qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx, uart_event, NULL, s, NULL, true); } @@ -224,17 +238,14 @@ static void xilinx_uartlite_init(Object *obj) XilinxUARTLite *s = XILINX_UARTLITE(obj); sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); - - memory_region_init_io(&s->mmio, obj, &uart_ops, s, - "xlnx.xps-uartlite", R_MAX * 4); sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } -static void xilinx_uartlite_class_init(ObjectClass *klass, void *data) +static void xilinx_uartlite_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xilinx_uartlite_reset; + device_class_set_legacy_reset(dc, xilinx_uartlite_reset); dc->realize = xilinx_uartlite_realize; device_class_set_props(dc, xilinx_uartlite_properties); } diff --git a/hw/core/Kconfig b/hw/core/Kconfig index 24411f59306..d1bdf765ee8 100644 --- a/hw/core/Kconfig +++ b/hw/core/Kconfig @@ -34,3 +34,7 @@ config REGISTER config SPLIT_IRQ bool + +config EIF + bool + depends on LIBCBOR && GNUTLS diff --git a/hw/core/bus.c b/hw/core/bus.c index b9d89495cdf..bddfc22d388 100644 --- a/hw/core/bus.c +++ b/hw/core/bus.c @@ -232,7 +232,7 @@ static char *default_bus_get_fw_dev_path(DeviceState *dev) return g_strdup(object_get_typename(OBJECT(dev))); } -static void bus_class_init(ObjectClass *class, void *data) +static void bus_class_init(ObjectClass *class, const void *data) { BusClass *bc = BUS_CLASS(class); ResettableClass *rc = RESETTABLE_CLASS(class); @@ -260,7 +260,7 @@ static const TypeInfo bus_info = { .instance_init = qbus_initfn, .instance_finalize = qbus_finalize, .class_init = bus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_RESETTABLE_INTERFACE }, { } }, diff --git a/hw/core/clock.c b/hw/core/clock.c index e212865307b..9c906761e19 100644 --- a/hw/core/clock.c +++ b/hw/core/clock.c @@ -13,6 +13,8 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qapi/visitor.h" +#include "system/qtest.h" #include "hw/clock.h" #include "trace.h" @@ -42,16 +44,12 @@ Clock *clock_new(Object *parent, const char *name) void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque, unsigned int events) { + assert(OBJECT(clk)->parent); clk->callback = cb; clk->callback_opaque = opaque; clk->callback_events = events; } -void clock_clear_callback(Clock *clk) -{ - clock_set_callback(clk, NULL, NULL, 0); -} - bool clock_set(Clock *clk, uint64_t period) { if (clk->period == period) { @@ -158,6 +156,25 @@ bool clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider) return true; } +static void clock_period_prop_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Clock *clk = CLOCK(obj); + uint64_t period = clock_get(clk); + visit_type_uint64(v, name, &period, errp); +} + +static void clock_unparent(Object *obj) +{ + /* + * Callback are registered by the parent, which might die anytime after + * it's unparented the children. Avoid having a callback to a deleted + * object in case the clock is still referenced somewhere else (eg: by + * a clock output). + */ + clock_set_callback(CLOCK(obj), NULL, NULL, 0); +} + static void clock_initfn(Object *obj) { Clock *clk = CLOCK(obj); @@ -166,6 +183,11 @@ static void clock_initfn(Object *obj) clk->divider = 1; QLIST_INIT(&clk->children); + + if (qtest_enabled()) { + object_property_add(obj, "qtest-clock-period", "uint64", + clock_period_prop_get, NULL, NULL, NULL); + } } static void clock_finalizefn(Object *obj) @@ -184,11 +206,17 @@ static void clock_finalizefn(Object *obj) g_free(clk->canonical_path); } +static void clock_class_init(ObjectClass *klass, const void *data) +{ + klass->unparent = clock_unparent; +} + static const TypeInfo clock_info = { .name = TYPE_CLOCK, .parent = TYPE_OBJECT, .instance_size = sizeof(Clock), .instance_init = clock_initfn, + .class_init = clock_class_init, .instance_finalize = clock_finalizefn, }; diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 7982ecd39a5..39e674aca21 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -21,12 +21,16 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/core/cpu.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" #include "qemu/log.h" #include "qemu/main-loop.h" +#include "qemu/lockcnt.h" +#include "qemu/error-report.h" +#include "qemu/qemu-print.h" +#include "qemu/target-info.h" #include "exec/log.h" #include "exec/gdbstub.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "hw/boards.h" #include "hw/qdev-properties.h" #include "trace.h" @@ -39,9 +43,7 @@ CPUState *cpu_by_arch_id(int64_t id) CPUState *cpu; CPU_FOREACH(cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->get_arch_id(cpu) == id) { + if (cpu->cc->get_arch_id(cpu) == id) { return cpu; } } @@ -100,11 +102,9 @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg) void cpu_dump_state(CPUState *cpu, FILE *f, int flags) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->dump_state) { + if (cpu->cc->dump_state) { cpu_synchronize_state(cpu); - cc->dump_state(cpu, f, flags); + cpu->cc->dump_state(cpu, f, flags); } } @@ -118,11 +118,10 @@ void cpu_reset(CPUState *cpu) static void cpu_common_reset_hold(Object *obj, ResetType type) { CPUState *cpu = CPU(obj); - CPUClass *cc = CPU_GET_CLASS(cpu); if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); - log_cpu_state(cpu, cc->reset_dump_flags); + log_cpu_state(cpu, cpu->cc->reset_dump_flags); } cpu->interrupt_request = 0; @@ -138,11 +137,6 @@ static void cpu_common_reset_hold(Object *obj, ResetType type) cpu_exec_reset_hold(cpu); } -static bool cpu_common_has_work(CPUState *cs) -{ - return false; -} - ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) { ObjectClass *oc; @@ -161,6 +155,21 @@ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) return NULL; } +char *cpu_model_from_type(const char *typename) +{ + g_autofree char *suffix = g_strdup_printf("-%s", target_cpu_type()); + + if (!object_class_by_name(typename)) { + return NULL; + } + + if (g_str_has_suffix(typename, suffix)) { + return g_strndup(typename, strlen(typename) - strlen(suffix)); + } + + return g_strdup(typename); +} + static void cpu_common_parse_features(const char *typename, char *features, Error **errp) { @@ -192,6 +201,49 @@ static void cpu_common_parse_features(const char *typename, char *features, } } +const char *parse_cpu_option(const char *cpu_option) +{ + ObjectClass *oc; + CPUClass *cc; + gchar **model_pieces; + const char *cpu_type; + + model_pieces = g_strsplit(cpu_option, ",", 2); + if (!model_pieces[0]) { + error_report("-cpu option cannot be empty"); + exit(1); + } + + oc = cpu_class_by_name(target_cpu_type(), model_pieces[0]); + if (oc == NULL) { + error_report("unable to find CPU model '%s'", model_pieces[0]); + g_strfreev(model_pieces); + exit(EXIT_FAILURE); + } + + cpu_type = object_class_get_name(oc); + cc = CPU_CLASS(oc); + cc->parse_features(cpu_type, model_pieces[1], &error_fatal); + g_strfreev(model_pieces); + return cpu_type; +} + +bool cpu_exec_realizefn(CPUState *cpu, Error **errp) +{ + if (!accel_cpu_common_realize(cpu, errp)) { + return false; + } + + gdb_init_cpu(cpu); + + /* Wait until cpu initialization complete before exposing cpu. */ + cpu_list_add(cpu); + + cpu_vmstate_register(cpu); + + return true; +} + static void cpu_common_realizefn(DeviceState *dev, Error **errp) { CPUState *cpu = CPU(dev); @@ -233,18 +285,34 @@ static void cpu_common_unrealizefn(DeviceState *dev) cpu_exec_unrealizefn(cpu); } +void cpu_exec_unrealizefn(CPUState *cpu) +{ + cpu_vmstate_unregister(cpu); + + cpu_list_remove(cpu); + /* + * Now that the vCPU has been removed from the RCU list, we can call + * accel_cpu_common_unrealize, which may free fields using call_rcu. + */ + accel_cpu_common_unrealize(cpu); +} + static void cpu_common_initfn(Object *obj) { CPUState *cpu = CPU(obj); - gdb_init_cpu(cpu); + cpu_exec_class_post_init(CPU_GET_CLASS(obj)); + + /* cache the cpu class for the hotpath */ + cpu->cc = CPU_GET_CLASS(cpu); + cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; + cpu->as = NULL; + cpu->num_ases = 0; /* user-mode doesn't have configurable SMP topology */ /* the default value is changed by qemu_init_vcpu() for system-mode */ - cpu->nr_cores = 1; cpu->nr_threads = 1; - cpu->cflags_next_tb = -1; /* allocate storage for thread info, initialise condition variables */ cpu->thread = g_new0(QemuThread, 1); @@ -298,7 +366,7 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu) return cpu->cpu_index; } -static void cpu_common_class_init(ObjectClass *klass, void *data) +static void cpu_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -306,7 +374,6 @@ static void cpu_common_class_init(ObjectClass *klass, void *data) k->parse_features = cpu_common_parse_features; k->get_arch_id = cpu_common_get_arch_id; - k->has_work = cpu_common_has_work; k->gdb_read_register = cpu_common_gdb_read_register; k->gdb_write_register = cpu_common_gdb_write_register; set_bit(DEVICE_CATEGORY_CPU, dc->categories); @@ -338,3 +405,32 @@ static void cpu_register_types(void) } type_init(cpu_register_types) + +static void cpu_list_entry(gpointer data, gpointer user_data) +{ + CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data)); + const char *typename = object_class_get_name(OBJECT_CLASS(data)); + g_autofree char *model = cpu_model_from_type(typename); + + if (cc->deprecation_note) { + qemu_printf(" %s (deprecated)\n", model); + } else { + qemu_printf(" %s\n", model); + } +} + +void list_cpus(void) +{ + CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type())); + + if (cc->list_cpus) { + cc->list_cpus(); + } else { + GSList *list; + + list = object_class_get_list_sorted(TYPE_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, cpu_list_entry, NULL); + g_slist_free(list); + } +} diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c deleted file mode 100644 index 2a9a2a4eb54..00000000000 --- a/hw/core/cpu-sysemu.c +++ /dev/null @@ -1,144 +0,0 @@ -/* - * QEMU CPU model (system emulation specific) - * - * Copyright (c) 2012-2014 SUSE LINUX Products GmbH - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see - * - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "exec/tswap.h" -#include "hw/core/sysemu-cpu-ops.h" - -bool cpu_paging_enabled(const CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->sysemu_ops->get_paging_enabled) { - return cc->sysemu_ops->get_paging_enabled(cpu); - } - - return false; -} - -bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, - Error **errp) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->sysemu_ops->get_memory_mapping) { - return cc->sysemu_ops->get_memory_mapping(cpu, list, errp); - } - - error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); - return false; -} - -hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, - MemTxAttrs *attrs) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->sysemu_ops->get_phys_page_attrs_debug) { - return cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, attrs); - } - /* Fallback for CPUs which don't implement the _attrs_ hook */ - *attrs = MEMTXATTRS_UNSPECIFIED; - return cc->sysemu_ops->get_phys_page_debug(cpu, addr); -} - -hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) -{ - MemTxAttrs attrs = {}; - - return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); -} - -int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) -{ - int ret = 0; - - if (cpu->cc->sysemu_ops->asidx_from_attrs) { - ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs); - assert(ret < cpu->num_ases && ret >= 0); - } - return ret; -} - -int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (!cc->sysemu_ops->write_elf32_qemunote) { - return 0; - } - return (*cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque); -} - -int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (!cc->sysemu_ops->write_elf32_note) { - return -1; - } - return (*cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque); -} - -int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, - void *opaque) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (!cc->sysemu_ops->write_elf64_qemunote) { - return 0; - } - return (*cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque); -} - -int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, - int cpuid, void *opaque) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (!cc->sysemu_ops->write_elf64_note) { - return -1; - } - return (*cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque); -} - -bool cpu_virtio_is_big_endian(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - if (cc->sysemu_ops->virtio_is_big_endian) { - return cc->sysemu_ops->virtio_is_big_endian(cpu); - } - return target_words_bigendian(); -} - -GuestPanicInformation *cpu_get_crash_info(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - GuestPanicInformation *res = NULL; - - if (cc->sysemu_ops->get_crash_info) { - res = cc->sysemu_ops->get_crash_info(cpu); - } - return res; -} diff --git a/hw/core/cpu-system.c b/hw/core/cpu-system.c new file mode 100644 index 00000000000..a975405d3a0 --- /dev/null +++ b/hw/core/cpu-system.c @@ -0,0 +1,305 @@ +/* + * QEMU CPU model (system specific) + * + * Copyright (c) 2012-2014 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "system/address-spaces.h" +#include "exec/cputlb.h" +#include "system/memory.h" +#include "exec/tb-flush.h" +#include "qemu/target-info.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/core/sysemu-cpu-ops.h" +#include "migration/vmstate.h" +#include "system/tcg.h" + +bool cpu_has_work(CPUState *cpu) +{ + return cpu->cc->sysemu_ops->has_work(cpu); +} + +bool cpu_paging_enabled(const CPUState *cpu) +{ + if (cpu->cc->sysemu_ops->get_paging_enabled) { + return cpu->cc->sysemu_ops->get_paging_enabled(cpu); + } + + return false; +} + +bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, + Error **errp) +{ + if (cpu->cc->sysemu_ops->get_memory_mapping) { + return cpu->cc->sysemu_ops->get_memory_mapping(cpu, list, errp); + } + + error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); + return false; +} + +hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs) +{ + hwaddr paddr; + + if (cpu->cc->sysemu_ops->get_phys_page_attrs_debug) { + paddr = cpu->cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, + attrs); + } else { + /* Fallback for CPUs which don't implement the _attrs_ hook */ + *attrs = MEMTXATTRS_UNSPECIFIED; + paddr = cpu->cc->sysemu_ops->get_phys_page_debug(cpu, addr); + } + /* Indicate that this is a debug access. */ + attrs->debug = 1; + return paddr; +} + +hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) +{ + MemTxAttrs attrs = {}; + + return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); +} + +int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) +{ + int ret = 0; + + if (cpu->cc->sysemu_ops->asidx_from_attrs) { + ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs); + assert(ret < cpu->num_ases && ret >= 0); + } + return ret; +} + +int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque) +{ + if (!cpu->cc->sysemu_ops->write_elf32_qemunote) { + return 0; + } + return (*cpu->cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque); +} + +int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque) +{ + if (!cpu->cc->sysemu_ops->write_elf32_note) { + return -1; + } + return (*cpu->cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque); +} + +int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque) +{ + if (!cpu->cc->sysemu_ops->write_elf64_qemunote) { + return 0; + } + return (*cpu->cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque); +} + +int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque) +{ + if (!cpu->cc->sysemu_ops->write_elf64_note) { + return -1; + } + return (*cpu->cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque); +} + +bool cpu_virtio_is_big_endian(CPUState *cpu) +{ + if (cpu->cc->sysemu_ops->virtio_is_big_endian) { + return cpu->cc->sysemu_ops->virtio_is_big_endian(cpu); + } + return target_big_endian(); +} + +GuestPanicInformation *cpu_get_crash_info(CPUState *cpu) +{ + GuestPanicInformation *res = NULL; + + if (cpu->cc->sysemu_ops->get_crash_info) { + res = cpu->cc->sysemu_ops->get_crash_info(cpu); + } + return res; +} + +static const Property cpu_system_props[] = { + /* + * Create a memory property for system CPU object, so users can + * wire up its memory. The default if no link is set up is to use + * the system address space. + */ + DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, + MemoryRegion *), +}; + +static bool cpu_get_start_powered_off(Object *obj, Error **errp) +{ + CPUState *cpu = CPU(obj); + return cpu->start_powered_off; +} + +static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) +{ + CPUState *cpu = CPU(obj); + cpu->start_powered_off = value; +} + +void cpu_class_init_props(DeviceClass *dc) +{ + ObjectClass *oc = OBJECT_CLASS(dc); + + /* + * We can't use DEFINE_PROP_BOOL in the Property array for this + * property, because we want this to be settable after realize. + */ + object_class_property_add_bool(oc, "start-powered-off", + cpu_get_start_powered_off, + cpu_set_start_powered_off); + + device_class_set_props(dc, cpu_system_props); +} + +void cpu_exec_class_post_init(CPUClass *cc) +{ + /* Check mandatory SysemuCPUOps handlers */ + g_assert(cc->sysemu_ops->has_work); +} + +void cpu_exec_initfn(CPUState *cpu) +{ + cpu->memory = get_system_memory(); + object_ref(OBJECT(cpu->memory)); +} + +static int cpu_common_post_load(void *opaque, int version_id) +{ + if (tcg_enabled()) { + CPUState *cpu = opaque; + + /* + * 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the + * version_id is increased. + */ + cpu->interrupt_request &= ~0x01; + + tlb_flush(cpu); + + /* + * loadvm has just updated the content of RAM, bypassing the + * usual mechanisms that ensure we flush TBs for writes to + * memory we've translated code from. So we must flush all TBs, + * which will now be stale. + */ + tb_flush(cpu); + } + + return 0; +} + +static int cpu_common_pre_load(void *opaque) +{ + CPUState *cpu = opaque; + + cpu->exception_index = -1; + + return 0; +} + +static bool cpu_common_exception_index_needed(void *opaque) +{ + CPUState *cpu = opaque; + + return tcg_enabled() && cpu->exception_index != -1; +} + +static const VMStateDescription vmstate_cpu_common_exception_index = { + .name = "cpu_common/exception_index", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpu_common_exception_index_needed, + .fields = (const VMStateField[]) { + VMSTATE_INT32(exception_index, CPUState), + VMSTATE_END_OF_LIST() + } +}; + +static bool cpu_common_crash_occurred_needed(void *opaque) +{ + CPUState *cpu = opaque; + + return cpu->crash_occurred; +} + +static const VMStateDescription vmstate_cpu_common_crash_occurred = { + .name = "cpu_common/crash_occurred", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpu_common_crash_occurred_needed, + .fields = (const VMStateField[]) { + VMSTATE_BOOL(crash_occurred, CPUState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_cpu_common = { + .name = "cpu_common", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = cpu_common_pre_load, + .post_load = cpu_common_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(halted, CPUState), + VMSTATE_UINT32(interrupt_request, CPUState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_cpu_common_exception_index, + &vmstate_cpu_common_crash_occurred, + NULL + } +}; + +void cpu_vmstate_register(CPUState *cpu) +{ + if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { + vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); + } + if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { + vmstate_register(NULL, cpu->cpu_index, + cpu->cc->sysemu_ops->legacy_vmsd, cpu); + } +} + +void cpu_vmstate_unregister(CPUState *cpu) +{ + if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { + vmstate_unregister(NULL, cpu->cc->sysemu_ops->legacy_vmsd, cpu); + } + if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { + vmstate_unregister(NULL, &vmstate_cpu_common, cpu); + } +} diff --git a/hw/core/cpu-user.c b/hw/core/cpu-user.c new file mode 100644 index 00000000000..7176791851b --- /dev/null +++ b/hw/core/cpu-user.c @@ -0,0 +1,49 @@ +/* + * QEMU CPU model (user specific) + * + * Copyright (c) Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/core/cpu.h" +#include "migration/vmstate.h" + +static const Property cpu_user_props[] = { + /* + * Create a property for the user-only object, so users can + * adjust prctl(PR_SET_UNALIGN) from the command-line. + * Has no effect if the target does not support the feature. + */ + DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState, + prctl_unalign_sigbus, false), +}; + +void cpu_class_init_props(DeviceClass *dc) +{ + device_class_set_props(dc, cpu_user_props); +} + +void cpu_exec_class_post_init(CPUClass *cc) +{ + /* nothing to do */ +} + +void cpu_exec_initfn(CPUState *cpu) +{ + /* nothing to do */ +} + +void cpu_vmstate_register(CPUState *cpu) +{ + assert(qdev_get_vmsd(DEVICE(cpu)) == NULL || + qdev_get_vmsd(DEVICE(cpu))->unmigratable); +} + +void cpu_vmstate_unregister(CPUState *cpu) +{ + /* nothing to do */ +} diff --git a/hw/core/eif.c b/hw/core/eif.c new file mode 100644 index 00000000000..513caec6b49 --- /dev/null +++ b/hw/core/eif.c @@ -0,0 +1,709 @@ +/* + * EIF (Enclave Image Format) related helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "qapi/error.h" +#include "crypto/hash.h" +#include "crypto/x509-utils.h" +#include /* for crc32 */ +#include + +#include "hw/core/eif.h" + +#define MAX_SECTIONS 32 + +/* members are ordered according to field order in .eif file */ +typedef struct EifHeader { + uint8_t magic[4]; /* must be .eif in ascii i.e., [46, 101, 105, 102] */ + uint16_t version; + uint16_t flags; + uint64_t default_memory; + uint64_t default_cpus; + uint16_t reserved; + uint16_t section_cnt; + uint64_t section_offsets[MAX_SECTIONS]; + uint64_t section_sizes[MAX_SECTIONS]; + uint32_t unused; + uint32_t eif_crc32; +} QEMU_PACKED EifHeader; + +/* members are ordered according to field order in .eif file */ +typedef struct EifSectionHeader { + /* + * 0 = invalid, 1 = kernel, 2 = cmdline, 3 = ramdisk, 4 = signature, + * 5 = metadata + */ + uint16_t section_type; + uint16_t flags; + uint64_t section_size; +} QEMU_PACKED EifSectionHeader; + +enum EifSectionTypes { + EIF_SECTION_INVALID = 0, + EIF_SECTION_KERNEL = 1, + EIF_SECTION_CMDLINE = 2, + EIF_SECTION_RAMDISK = 3, + EIF_SECTION_SIGNATURE = 4, + EIF_SECTION_METADATA = 5, + EIF_SECTION_MAX = 6, +}; + +static const char *section_type_to_string(uint16_t type) +{ + const char *str; + switch (type) { + case EIF_SECTION_INVALID: + str = "invalid"; + break; + case EIF_SECTION_KERNEL: + str = "kernel"; + break; + case EIF_SECTION_CMDLINE: + str = "cmdline"; + break; + case EIF_SECTION_RAMDISK: + str = "ramdisk"; + break; + case EIF_SECTION_SIGNATURE: + str = "signature"; + break; + case EIF_SECTION_METADATA: + str = "metadata"; + break; + default: + str = "unknown"; + break; + } + + return str; +} + +static bool read_eif_header(FILE *f, EifHeader *header, uint32_t *crc, + Error **errp) +{ + size_t got; + size_t header_size = sizeof(*header); + + got = fread(header, 1, header_size, f); + if (got != header_size) { + error_setg(errp, "Failed to read EIF header"); + return false; + } + + if (memcmp(header->magic, ".eif", 4) != 0) { + error_setg(errp, "Invalid EIF image. Magic mismatch."); + return false; + } + + /* Exclude header->eif_crc32 field from CRC calculation */ + *crc = crc32(*crc, (uint8_t *)header, header_size - 4); + + header->version = be16_to_cpu(header->version); + header->flags = be16_to_cpu(header->flags); + header->default_memory = be64_to_cpu(header->default_memory); + header->default_cpus = be64_to_cpu(header->default_cpus); + header->reserved = be16_to_cpu(header->reserved); + header->section_cnt = be16_to_cpu(header->section_cnt); + + for (int i = 0; i < MAX_SECTIONS; ++i) { + header->section_offsets[i] = be64_to_cpu(header->section_offsets[i]); + } + + for (int i = 0; i < MAX_SECTIONS; ++i) { + header->section_sizes[i] = be64_to_cpu(header->section_sizes[i]); + if (header->section_sizes[i] > SSIZE_MAX) { + error_setg(errp, "Invalid EIF image. Section size out of bounds"); + return false; + } + } + + header->unused = be32_to_cpu(header->unused); + header->eif_crc32 = be32_to_cpu(header->eif_crc32); + return true; +} + +static bool read_eif_section_header(FILE *f, EifSectionHeader *section_header, + uint32_t *crc, Error **errp) +{ + size_t got; + size_t section_header_size = sizeof(*section_header); + + got = fread(section_header, 1, section_header_size, f); + if (got != section_header_size) { + error_setg(errp, "Failed to read EIF section header"); + return false; + } + + *crc = crc32(*crc, (uint8_t *)section_header, section_header_size); + + section_header->section_type = be16_to_cpu(section_header->section_type); + section_header->flags = be16_to_cpu(section_header->flags); + section_header->section_size = be64_to_cpu(section_header->section_size); + return true; +} + +/* + * Upon success, the caller is responsible for unlinking and freeing *tmp_path. + */ +static bool get_tmp_file(const char *template, char **tmp_path, Error **errp) +{ + int tmp_fd; + + *tmp_path = NULL; + tmp_fd = g_file_open_tmp(template, tmp_path, NULL); + if (tmp_fd < 0 || *tmp_path == NULL) { + error_setg(errp, "Failed to create temporary file for template %s", + template); + return false; + } + + close(tmp_fd); + return true; +} + +static void safe_fclose(FILE *f) +{ + if (f) { + fclose(f); + } +} + +static void safe_unlink(char *f) +{ + if (f) { + unlink(f); + } +} + +/* + * Upon success, the caller is reponsible for unlinking and freeing *kernel_path + */ +static bool read_eif_kernel(FILE *f, uint64_t size, char **kernel_path, + QCryptoHash *hash0, QCryptoHash *hash1, + uint32_t *crc, Error **errp) +{ + size_t got; + FILE *tmp_file = NULL; + uint8_t *kernel = g_try_malloc(size); + if (!kernel) { + error_setg(errp, "Out of memory reading kernel section"); + goto cleanup; + } + + *kernel_path = NULL; + if (!get_tmp_file("eif-kernel-XXXXXX", kernel_path, errp)) { + goto cleanup; + } + + tmp_file = fopen(*kernel_path, "wb"); + if (tmp_file == NULL) { + error_setg_errno(errp, errno, "Failed to open temporary file %s", + *kernel_path); + goto cleanup; + } + + got = fread(kernel, 1, size, f); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to read EIF kernel section data"); + goto cleanup; + } + + got = fwrite(kernel, 1, size, tmp_file); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to write EIF kernel section data to temporary" + " file"); + goto cleanup; + } + + *crc = crc32(*crc, kernel, size); + if (qcrypto_hash_update(hash0, (char *)kernel, size, errp) != 0 || + qcrypto_hash_update(hash1, (char *)kernel, size, errp) != 0) { + goto cleanup; + } + g_free(kernel); + fclose(tmp_file); + + return true; + + cleanup: + safe_fclose(tmp_file); + + safe_unlink(*kernel_path); + g_free(*kernel_path); + *kernel_path = NULL; + + g_free(kernel); + return false; +} + +static bool read_eif_cmdline(FILE *f, uint64_t size, char *cmdline, + QCryptoHash *hash0, QCryptoHash *hash1, + uint32_t *crc, Error **errp) +{ + size_t got = fread(cmdline, 1, size, f); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to read EIF cmdline section data"); + return false; + } + + *crc = crc32(*crc, (uint8_t *)cmdline, size); + if (qcrypto_hash_update(hash0, cmdline, size, errp) != 0 || + qcrypto_hash_update(hash1, cmdline, size, errp) != 0) { + return false; + } + return true; +} + +static bool read_eif_ramdisk(FILE *eif, FILE *initrd, uint64_t size, + QCryptoHash *hash0, QCryptoHash *h, uint32_t *crc, + Error **errp) +{ + size_t got; + bool ret = false; + uint8_t *ramdisk = g_try_malloc(size); + if (!ramdisk) { + error_setg(errp, "Out of memory reading initrd section"); + goto cleanup; + } + + got = fread(ramdisk, 1, size, eif); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to read EIF ramdisk section data"); + goto cleanup; + } + + got = fwrite(ramdisk, 1, size, initrd); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to write EIF ramdisk data to temporary file"); + goto cleanup; + } + + *crc = crc32(*crc, ramdisk, size); + if (qcrypto_hash_update(hash0, (char *)ramdisk, size, errp) != 0 || + qcrypto_hash_update(h, (char *)ramdisk, size, errp) != 0) { + goto cleanup; + } + ret = true; + + cleanup: + g_free(ramdisk); + return ret; +} + +static bool get_signature_fingerprint_sha384(FILE *eif, uint64_t size, + uint8_t *sha384, + uint32_t *crc, + Error **errp) +{ + size_t got; + g_autofree uint8_t *sig = NULL; + g_autofree uint8_t *cert = NULL; + cbor_item_t *item = NULL; + cbor_item_t *pcr0 = NULL; + size_t len; + size_t hash_len = QCRYPTO_HASH_DIGEST_LEN_SHA384; + struct cbor_pair *pair; + struct cbor_load_result result; + bool ret = false; + + sig = g_try_malloc(size); + if (!sig) { + error_setg(errp, "Out of memory reading signature section"); + goto cleanup; + } + + got = fread(sig, 1, size, eif); + if ((uint64_t) got != size) { + error_setg(errp, "Failed to read EIF signature section data"); + goto cleanup; + } + + *crc = crc32(*crc, sig, size); + + item = cbor_load(sig, size, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + error_setg(errp, "Failed to load signature section data as CBOR"); + goto cleanup; + } + if (!cbor_isa_array(item) || cbor_array_size(item) < 1) { + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + pcr0 = cbor_array_get(item, 0); + if (!pcr0) { + error_setg(errp, "Failed to get PCR0 signature"); + goto cleanup; + } + if (!cbor_isa_map(pcr0) || cbor_map_size(pcr0) != 2) { + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + pair = cbor_map_handle(pcr0); + if (!cbor_isa_string(pair->key) || cbor_string_length(pair->key) != 19 || + memcmp(cbor_string_handle(pair->key), "signing_certificate", 19) != 0) { + error_setg(errp, "Invalid signautre CBOR"); + goto cleanup; + } + if (!cbor_isa_array(pair->value)) { + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + len = cbor_array_size(pair->value); + if (len == 0) { + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + cert = g_try_malloc(len); + if (!cert) { + error_setg(errp, "Out of memory reading signature section"); + goto cleanup; + } + + for (int i = 0; i < len; ++i) { + cbor_item_t *tmp = cbor_array_get(pair->value, i); + if (!tmp) { + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + if (!cbor_isa_uint(tmp) || cbor_int_get_width(tmp) != CBOR_INT_8) { + cbor_decref(&tmp); + error_setg(errp, "Invalid signature CBOR"); + goto cleanup; + } + cert[i] = cbor_get_uint8(tmp); + cbor_decref(&tmp); + } + + if (qcrypto_get_x509_cert_fingerprint(cert, len, QCRYPTO_HASH_ALGO_SHA384, + sha384, &hash_len, errp)) { + goto cleanup; + } + + ret = true; + + cleanup: + if (pcr0) { + cbor_decref(&pcr0); + } + if (item) { + cbor_decref(&item); + } + return ret; +} + +/* Expects file to have offset 0 before this function is called */ +static long get_file_size(FILE *f, Error **errp) +{ + long size; + + if (fseek(f, 0, SEEK_END) != 0) { + error_setg_errno(errp, errno, "Failed to seek to the end of file"); + return -1; + } + + size = ftell(f); + if (size == -1) { + error_setg_errno(errp, errno, "Failed to get offset"); + return -1; + } + + if (fseek(f, 0, SEEK_SET) != 0) { + error_setg_errno(errp, errno, "Failed to seek back to the start"); + return -1; + } + + return size; +} + +static bool get_SHA384_hash(QCryptoHash *h, uint8_t *hash, Error **errp) +{ + size_t hash_len = QCRYPTO_HASH_DIGEST_LEN_SHA384; + return qcrypto_hash_finalize_bytes(h, &hash, &hash_len, errp) == 0; +} + +/* + * Upon success, the caller is reponsible for unlinking and freeing + * *kernel_path, *initrd_path and freeing *cmdline. + */ +bool read_eif_file(const char *eif_path, const char *machine_initrd, + char **kernel_path, char **initrd_path, char **cmdline, + uint8_t *image_hash, uint8_t *bootstrap_hash, + uint8_t *app_hash, uint8_t *fingerprint_hash, + bool *signature_found, Error **errp) +{ + FILE *f = NULL; + FILE *machine_initrd_f = NULL; + FILE *initrd_path_f = NULL; + long machine_initrd_size; + uint32_t crc = 0; + EifHeader eif_header; + bool seen_sections[EIF_SECTION_MAX] = {false}; + /* kernel + ramdisks + cmdline SHA384 hash */ + g_autoptr(QCryptoHash) hash0 = NULL; + /* kernel + boot ramdisk + cmdline SHA384 hash */ + g_autoptr(QCryptoHash) hash1 = NULL; + /* application ramdisk(s) SHA384 hash */ + g_autoptr(QCryptoHash) hash2 = NULL; + + *signature_found = false; + *kernel_path = *initrd_path = *cmdline = NULL; + + hash0 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp); + if (!hash0) { + goto cleanup; + } + hash1 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp); + if (!hash1) { + goto cleanup; + } + hash2 = qcrypto_hash_new(QCRYPTO_HASH_ALGO_SHA384, errp); + if (!hash2) { + goto cleanup; + } + + f = fopen(eif_path, "rb"); + if (f == NULL) { + error_setg_errno(errp, errno, "Failed to open %s", eif_path); + goto cleanup; + } + + if (!read_eif_header(f, &eif_header, &crc, errp)) { + goto cleanup; + } + + if (eif_header.version < 4) { + error_setg(errp, "Expected EIF version 4 or greater"); + goto cleanup; + } + + if (eif_header.flags != 0) { + error_setg(errp, "Expected EIF flags to be 0"); + goto cleanup; + } + + if (eif_header.section_cnt > MAX_SECTIONS) { + error_setg(errp, "EIF header section count must not be greater than " + "%d but found %d", MAX_SECTIONS, eif_header.section_cnt); + goto cleanup; + } + + for (int i = 0; i < eif_header.section_cnt; ++i) { + EifSectionHeader hdr; + uint16_t section_type; + + if (eif_header.section_offsets[i] > OFF_MAX) { + error_setg(errp, "Invalid EIF image. Section offset out of bounds"); + goto cleanup; + } + if (fseek(f, eif_header.section_offsets[i], SEEK_SET) != 0) { + error_setg_errno(errp, errno, "Failed to offset to %" PRIu64 " in EIF file", + eif_header.section_offsets[i]); + goto cleanup; + } + + if (!read_eif_section_header(f, &hdr, &crc, errp)) { + goto cleanup; + } + + if (hdr.flags != 0) { + error_setg(errp, "Expected EIF section header flags to be 0"); + goto cleanup; + } + + if (eif_header.section_sizes[i] != hdr.section_size) { + error_setg(errp, "EIF section size mismatch between header and " + "section header: header %" PRIu64 ", section header %" PRIu64, + eif_header.section_sizes[i], + hdr.section_size); + goto cleanup; + } + + section_type = hdr.section_type; + + switch (section_type) { + case EIF_SECTION_KERNEL: + if (seen_sections[EIF_SECTION_KERNEL]) { + error_setg(errp, "Invalid EIF image. More than 1 kernel " + "section"); + goto cleanup; + } + + if (!read_eif_kernel(f, hdr.section_size, kernel_path, hash0, + hash1, &crc, errp)) { + goto cleanup; + } + + break; + case EIF_SECTION_CMDLINE: + { + uint64_t size; + if (seen_sections[EIF_SECTION_CMDLINE]) { + error_setg(errp, "Invalid EIF image. More than 1 cmdline " + "section"); + goto cleanup; + } + size = hdr.section_size; + *cmdline = g_try_malloc(size + 1); + if (!*cmdline) { + error_setg(errp, "Out of memory reading command line section"); + goto cleanup; + } + if (!read_eif_cmdline(f, size, *cmdline, hash0, hash1, &crc, + errp)) { + goto cleanup; + } + (*cmdline)[size] = '\0'; + + break; + } + case EIF_SECTION_RAMDISK: + { + QCryptoHash *h = hash2; + if (!seen_sections[EIF_SECTION_RAMDISK]) { + /* + * If this is the first time we are seeing a ramdisk section, + * we need to: + * 1) hash it into bootstrap (hash1) instead of app (hash2) + * along with image (hash0) + * 2) create the initrd temporary file. + */ + h = hash1; + if (!get_tmp_file("eif-initrd-XXXXXX", initrd_path, errp)) { + goto cleanup; + } + initrd_path_f = fopen(*initrd_path, "wb"); + if (initrd_path_f == NULL) { + error_setg_errno(errp, errno, "Failed to open file %s", + *initrd_path); + goto cleanup; + } + } + + if (!read_eif_ramdisk(f, initrd_path_f, hdr.section_size, hash0, h, + &crc, errp)) { + goto cleanup; + } + + break; + } + case EIF_SECTION_SIGNATURE: + *signature_found = true; + if (!get_signature_fingerprint_sha384(f, hdr.section_size, + fingerprint_hash, &crc, + errp)) { + goto cleanup; + } + break; + default: + /* other sections including invalid or unknown sections */ + { + uint8_t *buf; + size_t got; + uint64_t size = hdr.section_size; + buf = g_try_malloc(size); + if (!buf) { + error_setg(errp, "Out of memory reading unknown section"); + goto cleanup; + } + got = fread(buf, 1, size, f); + if ((uint64_t) got != size) { + g_free(buf); + error_setg(errp, "Failed to read EIF %s section data", + section_type_to_string(section_type)); + goto cleanup; + } + crc = crc32(crc, buf, size); + g_free(buf); + break; + } + } + + if (section_type < EIF_SECTION_MAX) { + seen_sections[section_type] = true; + } + } + + if (!seen_sections[EIF_SECTION_KERNEL]) { + error_setg(errp, "Invalid EIF image. No kernel section."); + goto cleanup; + } + if (!seen_sections[EIF_SECTION_CMDLINE]) { + error_setg(errp, "Invalid EIF image. No cmdline section."); + goto cleanup; + } + if (!seen_sections[EIF_SECTION_RAMDISK]) { + error_setg(errp, "Invalid EIF image. No ramdisk section."); + goto cleanup; + } + + if (eif_header.eif_crc32 != crc) { + error_setg(errp, "CRC mismatch. Expected %u but header has %u.", + crc, eif_header.eif_crc32); + goto cleanup; + } + + /* + * Let's append the initrd file from "-initrd" option if any. Although + * we pass the crc pointer to read_eif_ramdisk, it is not useful anymore. + * We have already done the crc mismatch check above this code. + */ + if (machine_initrd) { + machine_initrd_f = fopen(machine_initrd, "rb"); + if (machine_initrd_f == NULL) { + error_setg_errno(errp, errno, "Failed to open initrd file %s", + machine_initrd); + goto cleanup; + } + + machine_initrd_size = get_file_size(machine_initrd_f, errp); + if (machine_initrd_size == -1) { + goto cleanup; + } + + if (!read_eif_ramdisk(machine_initrd_f, initrd_path_f, + machine_initrd_size, hash0, hash2, &crc, errp)) { + goto cleanup; + } + } + + if (!get_SHA384_hash(hash0, image_hash, errp)) { + goto cleanup; + } + if (!get_SHA384_hash(hash1, bootstrap_hash, errp)) { + goto cleanup; + } + if (!get_SHA384_hash(hash2, app_hash, errp)) { + goto cleanup; + } + + fclose(f); + fclose(initrd_path_f); + safe_fclose(machine_initrd_f); + return true; + + cleanup: + safe_fclose(f); + safe_fclose(initrd_path_f); + safe_fclose(machine_initrd_f); + + safe_unlink(*kernel_path); + g_free(*kernel_path); + *kernel_path = NULL; + + safe_unlink(*initrd_path); + g_free(*initrd_path); + *initrd_path = NULL; + + g_free(*cmdline); + *cmdline = NULL; + + return false; +} diff --git a/hw/core/eif.h b/hw/core/eif.h new file mode 100644 index 00000000000..fed3cb55140 --- /dev/null +++ b/hw/core/eif.h @@ -0,0 +1,22 @@ +/* + * EIF (Enclave Image Format) related helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef HW_CORE_EIF_H +#define HW_CORE_EIF_H + +bool read_eif_file(const char *eif_path, const char *machine_initrd, + char **kernel_path, char **initrd_path, + char **kernel_cmdline, uint8_t *image_sha384, + uint8_t *bootstrap_sha384, uint8_t *app_sha384, + uint8_t *fingerprint_sha384, bool *signature_found, + Error **errp); + +#endif + diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index ea8628b8926..e72bbde2a23 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -31,9 +31,8 @@ */ #include "qemu/osdep.h" -#include "exec/tswap.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" +#include "system/dma.h" +#include "system/reset.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/qdev-properties.h" @@ -48,11 +47,8 @@ static void generic_loader_reset(void *opaque) GenericLoaderState *s = GENERIC_LOADER(opaque); if (s->set_pc) { - CPUClass *cc = CPU_GET_CLASS(s->cpu); cpu_reset(s->cpu); - if (cc) { - cc->set_pc(s->cpu, s->addr); - } + cpu_set_pc(s->cpu, s->addr); } if (s->data_len) { @@ -66,7 +62,6 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) { GenericLoaderState *s = GENERIC_LOADER(dev); hwaddr entry; - int big_endian; ssize_t size = 0; s->set_pc = false; @@ -134,14 +129,12 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) s->cpu = first_cpu; } - big_endian = target_words_bigendian(); - if (s->file) { AddressSpace *as = s->cpu ? s->cpu->as : NULL; if (!s->force_raw) { size = load_elf_as(s->file, NULL, NULL, NULL, &entry, NULL, NULL, - NULL, big_endian, 0, 0, 0, as); + NULL, ELFDATANONE, 0, 0, 0, as); if (size < 0) { size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL, @@ -179,7 +172,7 @@ static void generic_loader_unrealize(DeviceState *dev) qemu_unregister_reset(generic_loader_reset, dev); } -static Property generic_loader_props[] = { +static const Property generic_loader_props[] = { DEFINE_PROP_UINT64("addr", GenericLoaderState, addr, 0), DEFINE_PROP_UINT64("data", GenericLoaderState, data, 0), DEFINE_PROP_UINT8("data-len", GenericLoaderState, data_len, 0), @@ -187,10 +180,9 @@ static Property generic_loader_props[] = { DEFINE_PROP_UINT32("cpu-num", GenericLoaderState, cpu_num, CPU_NONE), DEFINE_PROP_BOOL("force-raw", GenericLoaderState, force_raw, false), DEFINE_PROP_STRING("file", GenericLoaderState, file), - DEFINE_PROP_END_OF_LIST(), }; -static void generic_loader_class_init(ObjectClass *klass, void *data) +static void generic_loader_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/core/gpio.c b/hw/core/gpio.c index 80d07a6ec99..6e32a8eec61 100644 --- a/hw/core/gpio.c +++ b/hw/core/gpio.c @@ -121,8 +121,7 @@ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, name ? name : "unnamed-gpio-out", n); if (input_pin && !OBJECT(input_pin)->parent) { /* We need a name for object_property_set_link to work */ - object_property_add_child(container_get(qdev_get_machine(), - "/unattached"), + object_property_add_child(machine_get_container("unattached"), "non-qdev-gpio[*]", OBJECT(input_pin)); } object_property_set_link(OBJECT(dev), propname, diff --git a/hw/core/guest-loader.c b/hw/core/guest-loader.c index 391c875a297..3db89d7a2e1 100644 --- a/hw/core/guest-loader.c +++ b/hw/core/guest-loader.c @@ -26,13 +26,13 @@ #include "qemu/osdep.h" #include "hw/core/cpu.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/loader.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/module.h" #include "guest-loader.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/boards.h" /* @@ -111,15 +111,14 @@ static void guest_loader_realize(DeviceState *dev, Error **errp) loader_insert_platform_data(s, size, errp); } -static Property guest_loader_props[] = { +static const Property guest_loader_props[] = { DEFINE_PROP_UINT64("addr", GuestLoaderState, addr, 0), DEFINE_PROP_STRING("kernel", GuestLoaderState, kernel), DEFINE_PROP_STRING("bootargs", GuestLoaderState, args), DEFINE_PROP_STRING("initrd", GuestLoaderState, initrd), - DEFINE_PROP_END_OF_LIST(), }; -static void guest_loader_class_init(ObjectClass *klass, void *data) +static void guest_loader_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/core/irq.c b/hw/core/irq.c index 3f14e2dda74..6dd8d47bd6e 100644 --- a/hw/core/irq.c +++ b/hw/core/irq.c @@ -26,16 +26,6 @@ #include "hw/irq.h" #include "qom/object.h" -OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ) - -struct IRQState { - Object parent_obj; - - qemu_irq_handler handler; - void *opaque; - int n; -}; - void qemu_set_irq(qemu_irq irq, int level) { if (!irq) @@ -44,6 +34,29 @@ void qemu_set_irq(qemu_irq irq, int level) irq->handler(irq->opaque, irq->n, level); } +static void init_irq_fields(IRQState *irq, qemu_irq_handler handler, + void *opaque, int n) +{ + irq->handler = handler; + irq->opaque = opaque; + irq->n = n; +} + +void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque, + int n) +{ + object_initialize(irq, sizeof(*irq), TYPE_IRQ); + init_irq_fields(irq, handler, opaque, n); +} + +void qemu_init_irqs(IRQState irq[], size_t count, + qemu_irq_handler handler, void *opaque) +{ + for (size_t i = 0; i < count; i++) { + qemu_init_irq(&irq[i], handler, opaque, i); + } +} + qemu_irq *qemu_extend_irqs(qemu_irq *old, int n_old, qemu_irq_handler handler, void *opaque, int n) { @@ -67,13 +80,8 @@ qemu_irq *qemu_allocate_irqs(qemu_irq_handler handler, void *opaque, int n) qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n) { - IRQState *irq; - - irq = IRQ(object_new(TYPE_IRQ)); - irq->handler = handler; - irq->opaque = opaque; - irq->n = n; - + IRQState *irq = IRQ(object_new(TYPE_IRQ)); + init_irq_fields(irq, handler, opaque, n); return irq; } diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c index 7ccc9d5fbc5..2dea485ae06 100644 --- a/hw/core/loader-fit.c +++ b/hw/core/loader-fit.c @@ -20,20 +20,20 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/units.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/loader.h" #include "hw/loader-fit.h" #include "qemu/cutils.h" #include "qemu/error-report.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include #include #define FIT_LOADER_MAX_PATH (128) -static const void *fit_load_image_alloc(const void *itb, const char *name, - int *poff, size_t *psz, Error **errp) +static void *fit_load_image_alloc(const void *itb, const char *name, + int *poff, size_t *psz, Error **errp) { const void *data; const char *comp; @@ -80,11 +80,11 @@ static const void *fit_load_image_alloc(const void *itb, const char *name, return NULL; } - data = g_realloc(uncomp_data, uncomp_len); + uncomp_data = g_realloc(uncomp_data, uncomp_len); if (psz) { *psz = uncomp_len; } - return data; + return uncomp_data; } error_setg(errp, "unknown compression '%s'", comp); @@ -177,13 +177,12 @@ static int fit_load_kernel(const struct fit_loader *ldr, const void *itb, static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, int cfg, void *opaque, const void *match_data, - hwaddr kernel_end, Error **errp) + hwaddr kernel_end, void **pfdt, Error **errp) { ERRP_GUARD(); Error *err = NULL; const char *name; - const void *data; - const void *load_data; + void *data; hwaddr load_addr; int img_off; size_t sz; @@ -194,7 +193,7 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, return 0; } - load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); + data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); if (!data) { error_prepend(errp, "unable to load FDT image from FIT: "); return -EINVAL; @@ -211,19 +210,23 @@ static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, } if (ldr->fdt_filter) { - load_data = ldr->fdt_filter(opaque, data, match_data, &load_addr); + void *filtered_data; + + filtered_data = ldr->fdt_filter(opaque, data, match_data, &load_addr); + if (filtered_data != data) { + g_free(data); + data = filtered_data; + } } load_addr = ldr->addr_to_phys(opaque, load_addr); - sz = fdt_totalsize(load_data); - rom_add_blob_fixed(name, load_data, sz, load_addr); + sz = fdt_totalsize(data); + rom_add_blob_fixed(name, data, sz, load_addr); - ret = 0; + *pfdt = data; + return 0; out: g_free((void *) data); - if (data != load_data) { - g_free((void *) load_data); - } return ret; } @@ -259,7 +262,8 @@ static bool fit_cfg_compatible(const void *itb, int cfg, const char *compat) return ret; } -int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) +int load_fit(const struct fit_loader *ldr, const char *filename, + void **pfdt, void *opaque) { Error *err = NULL; const struct fit_loader_match *match; @@ -323,7 +327,7 @@ int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) goto out; } - ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end, + ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end, pfdt, &err); if (ret) { error_report_err(err); diff --git a/hw/core/loader.c b/hw/core/loader.c index 31593a11717..e7056ba4bd3 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -51,17 +51,18 @@ #include "trace.h" #include "hw/hw.h" #include "disas/disas.h" +#include "migration/cpr.h" #include "migration/vmstate.h" #include "monitor/monitor.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" +#include "system/reset.h" +#include "system/system.h" #include "uboot_image.h" #include "hw/loader.h" #include "hw/nvram/fw_cfg.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/boards.h" #include "qemu/cutils.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "tcg/debuginfo.h" #include @@ -144,7 +145,7 @@ ssize_t load_image_mr(const char *filename, MemoryRegion *mr) { ssize_t size; - if (!memory_access_is_direct(mr, false)) { + if (!memory_access_is_direct(mr, false, MEMTXATTRS_UNSPECIFIED)) { /* Can only load an image into RAM or ROM */ return -1; } @@ -225,7 +226,7 @@ static void bswap_ahdr(struct exec *e) ssize_t load_aout(const char *filename, hwaddr addr, int max_sz, - int bswap_needed, hwaddr target_page_size) + bool big_endian, hwaddr target_page_size) { int fd; ssize_t size, ret; @@ -240,7 +241,7 @@ ssize_t load_aout(const char *filename, hwaddr addr, int max_sz, if (size < 0) goto fail; - if (bswap_needed) { + if (big_endian != HOST_BIG_ENDIAN) { bswap_ahdr(&e); } @@ -409,11 +410,11 @@ ssize_t load_elf(const char *filename, uint64_t (*elf_note_fn)(void *, void *, bool), uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, + uint64_t *highaddr, uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab) { return load_elf_as(filename, elf_note_fn, translate_fn, translate_opaque, - pentry, lowaddr, highaddr, pflags, big_endian, + pentry, lowaddr, highaddr, pflags, elf_data_order, elf_machine, clear_lsb, data_swab, NULL); } @@ -422,29 +423,15 @@ ssize_t load_elf_as(const char *filename, uint64_t (*elf_note_fn)(void *, void *, bool), uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, + uint64_t *highaddr, uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab, AddressSpace *as) -{ - return load_elf_ram(filename, elf_note_fn, translate_fn, translate_opaque, - pentry, lowaddr, highaddr, pflags, big_endian, - elf_machine, clear_lsb, data_swab, as, true); -} - -/* return < 0 if error, otherwise the number of bytes loaded in memory */ -ssize_t load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, int clear_lsb, - int data_swab, AddressSpace *as, bool load_rom) { return load_elf_ram_sym(filename, elf_note_fn, translate_fn, translate_opaque, - pentry, lowaddr, highaddr, pflags, big_endian, + pentry, lowaddr, highaddr, pflags, elf_data_order, elf_machine, clear_lsb, data_swab, as, - load_rom, NULL); + true, NULL); } /* return < 0 if error, otherwise the number of bytes loaded in memory */ @@ -453,11 +440,12 @@ ssize_t load_elf_ram_sym(const char *filename, uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, uint64_t *highaddr, - uint32_t *pflags, int big_endian, int elf_machine, + uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab, AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) { - int fd, data_order, target_data_order, must_swab; + const int host_data_order = HOST_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB; + int fd, must_swab; ssize_t ret = ELF_LOAD_FAILED; uint8_t e_ident[EI_NIDENT]; @@ -475,23 +463,14 @@ ssize_t load_elf_ram_sym(const char *filename, ret = ELF_LOAD_NOT_ELF; goto fail; } -#if HOST_BIG_ENDIAN - data_order = ELFDATA2MSB; -#else - data_order = ELFDATA2LSB; -#endif - must_swab = data_order != e_ident[EI_DATA]; - if (big_endian) { - target_data_order = ELFDATA2MSB; - } else { - target_data_order = ELFDATA2LSB; - } - if (target_data_order != e_ident[EI_DATA]) { + if (elf_data_order != ELFDATANONE && elf_data_order != e_ident[EI_DATA]) { ret = ELF_LOAD_WRONG_ENDIAN; goto fail; } + must_swab = host_data_order != e_ident[EI_DATA]; + lseek(fd, 0, SEEK_SET); if (e_ident[EI_CLASS] == ELFCLASS64) { ret = load_elf64(filename, fd, elf_note_fn, @@ -886,11 +865,11 @@ struct linux_efi_zboot_header { * * If the image is not a Linux EFI zboot image, do nothing and return success. */ -ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size) +ssize_t unpack_efi_zboot_image(uint8_t **buffer, ssize_t *size) { const struct linux_efi_zboot_header *header; uint8_t *data = NULL; - int ploff, plsize; + ssize_t ploff, plsize; ssize_t bytes; /* ignore if this is too small to be a EFI zboot image */ @@ -1051,7 +1030,9 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro) vmstate_register_ram_global(rom->mr); data = memory_region_get_ram_ptr(rom->mr); - memcpy(data, rom->data, rom->datasize); + if (!cpr_is_incoming()) { + memcpy(data, rom->data, rom->datasize); + } return data; } @@ -1352,20 +1333,6 @@ void rom_set_fw(FWCfgState *f) fw_cfg = f; } -void rom_set_order_override(int order) -{ - if (!fw_cfg) - return; - fw_cfg_set_order_override(fw_cfg, order); -} - -void rom_reset_order_override(void) -{ - if (!fw_cfg) - return; - fw_cfg_reset_order_override(fw_cfg); -} - void rom_transaction_begin(void) { Rom *rom; @@ -1429,7 +1396,7 @@ typedef struct RomSec { * work, but this way saves a little work later by avoiding * dealing with "gaps" of 0 length. */ -static gint sort_secs(gconstpointer a, gconstpointer b) +static gint sort_secs(gconstpointer a, gconstpointer b, gpointer d) { RomSec *ra = (RomSec *) a; RomSec *rb = (RomSec *) b; @@ -1482,7 +1449,7 @@ RomGap rom_find_largest_gap_between(hwaddr base, size_t size) /* sentinel */ secs = add_romsec_to_list(secs, base + size, 1); - secs = g_list_sort(secs, sort_secs); + secs = g_list_sort_with_data(secs, sort_secs, NULL); for (it = g_list_first(secs); it; it = g_list_next(it)) { cand = (RomSec *) it->data; diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 8701f00cc7c..3a612e2232d 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -18,11 +18,12 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-builtin-visit.h" +#include "qapi/qapi-commands-accelerator.h" #include "qapi/qapi-commands-machine.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/string-output-visitor.h" #include "qemu/error-report.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/boards.h" void hmp_info_cpus(Monitor *mon, const QDict *qdict) @@ -32,6 +33,7 @@ void hmp_info_cpus(Monitor *mon, const QDict *qdict) cpu_list = qmp_query_cpus_fast(NULL); for (cpu = cpu_list; cpu; cpu = cpu->next) { + g_autofree char *cpu_model = cpu_model_from_type(cpu->value->qom_type); int active = ' '; if (cpu->value->cpu_index == monitor_get_cpu_index(mon)) { @@ -40,7 +42,8 @@ void hmp_info_cpus(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%c CPU #%" PRId64 ":", active, cpu->value->cpu_index); - monitor_printf(mon, " thread_id=%" PRId64 "\n", cpu->value->thread_id); + monitor_printf(mon, " thread_id=%" PRId64 " model=%s\n", + cpu->value->thread_id, cpu_model); } qapi_free_CpuInfoFastList(cpu_list); diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 130217da8f9..6aca1a626e6 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -14,17 +14,20 @@ #include "hw/mem/memory-device.h" #include "qapi/error.h" #include "qapi/qapi-builtin-visit.h" +#include "qapi/qapi-commands-accelerator.h" #include "qapi/qapi-commands-machine.h" -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" #include "qapi/qobject-input-visitor.h" #include "qapi/type-helpers.h" #include "qemu/uuid.h" +#include "qemu/target-info-qapi.h" #include "qom/qom-qobject.h" -#include "sysemu/hostmem.h" -#include "sysemu/hw_accel.h" -#include "sysemu/numa.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/hostmem.h" +#include "system/hw_accel.h" +#include "system/numa.h" +#include "system/runstate.h" +#include "system/system.h" +#include "hw/s390x/storage-keys.h" /* * fast means: we NEVER interrupt vCPU threads to retrieve @@ -35,8 +38,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); MachineClass *mc = MACHINE_GET_CLASS(ms); CpuInfoFastList *head = NULL, **tail = &head; - SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), - -1, &error_abort); + SysEmuTarget target = target_arch(); CPUState *cpu; CPU_FOREACH(cpu) { @@ -45,6 +47,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) value->cpu_index = cpu->cpu_index; value->qom_path = object_get_canonical_path(OBJECT(cpu)); value->thread_id = cpu->thread_id; + value->qom_type = g_strdup(object_get_typename(OBJECT(cpu))); if (mc->cpu_index_to_instance_props) { CpuInstanceProperties *props; @@ -72,6 +75,7 @@ MachineInfoList *qmp_query_machines(bool has_compat_props, bool compat_props, for (el = machines; el; el = el->next) { MachineClass *mc = el->data; + const char *default_cpu_type = machine_class_default_cpu_type(mc); MachineInfo *info; info = g_malloc0(sizeof(*info)); @@ -90,8 +94,8 @@ MachineInfoList *qmp_query_machines(bool has_compat_props, bool compat_props, info->numa_mem_supported = mc->numa_mem_supported; info->deprecated = !!mc->deprecation_reason; info->acpi = !!object_class_property_find(OBJECT_CLASS(mc), "acpi"); - if (mc->default_cpu_type) { - info->default_cpu_type = g_strdup(mc->default_cpu_type); + if (default_cpu_type) { + info->default_cpu_type = g_strdup(default_cpu_type); } if (mc->default_ram_id) { info->default_ram_id = g_strdup(mc->default_ram_id); @@ -132,12 +136,11 @@ CurrentMachineParams *qmp_query_current_machine(Error **errp) return params; } -TargetInfo *qmp_query_target(Error **errp) +QemuTargetInfo *qmp_query_target(Error **errp) { - TargetInfo *info = g_malloc0(sizeof(*info)); + QemuTargetInfo *info = g_malloc0(sizeof(*info)); - info->arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, - &error_abort); + info->arch = target_arch(); return info; } @@ -406,3 +409,16 @@ GuidInfo *qmp_query_vm_generation_id(Error **errp) info->guid = qemu_uuid_unparse_strdup(&vms->guid); return info; } + +void qmp_dump_skeys(const char *filename, Error **errp) +{ + ObjectClass *mc = object_get_class(qdev_get_machine()); + ObjectClass *oc = object_class_dynamic_cast(mc, TYPE_DUMP_SKEYS_INTERFACE); + + if (!oc) { + error_setg(errp, "Storage keys information not available" + " for this architecture"); + return; + } + DUMP_SKEYS_INTERFACE_CLASS(oc)->qmp_dump_skeys(filename, errp); +} diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c index 5d8d7edcbd3..0be0ac044c2 100644 --- a/hw/core/machine-smp.c +++ b/hw/core/machine-smp.c @@ -261,6 +261,82 @@ void machine_parse_smp_config(MachineState *ms, } } +static bool machine_check_topo_support(MachineState *ms, + CpuTopologyLevel topo, + Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if ((topo == CPU_TOPOLOGY_LEVEL_MODULE && !mc->smp_props.modules_supported) || + (topo == CPU_TOPOLOGY_LEVEL_CLUSTER && !mc->smp_props.clusters_supported) || + (topo == CPU_TOPOLOGY_LEVEL_DIE && !mc->smp_props.dies_supported) || + (topo == CPU_TOPOLOGY_LEVEL_BOOK && !mc->smp_props.books_supported) || + (topo == CPU_TOPOLOGY_LEVEL_DRAWER && !mc->smp_props.drawers_supported)) { + error_setg(errp, + "Invalid topology level: %s. " + "The topology level is not supported by this machine", + CpuTopologyLevel_str(topo)); + return false; + } + + return true; +} + +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const SmpCachePropertiesList *node; + DECLARE_BITMAP(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX); + + bitmap_zero(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX); + for (node = caches; node; node = node->next) { + /* Prohibit users from repeating settings. */ + if (test_bit(node->value->cache, caches_bitmap)) { + error_setg(errp, + "Invalid cache properties: %s. " + "The cache properties are duplicated", + CacheLevelAndType_str(node->value->cache)); + return false; + } + + machine_set_cache_topo_level(ms, node->value->cache, + node->value->topology); + set_bit(node->value->cache, caches_bitmap); + } + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + const SmpCacheProperties *props = &ms->smp_cache.props[i]; + + /* + * Reject non "default" topology level if the cache isn't + * supported by the machine. + */ + if (props->topology != CPU_TOPOLOGY_LEVEL_DEFAULT && + !mc->smp_props.cache_supported[props->cache]) { + error_setg(errp, + "%s cache topology not supported by this machine", + CacheLevelAndType_str(props->cache)); + return false; + } + + if (props->topology == CPU_TOPOLOGY_LEVEL_THREAD) { + error_setg(errp, + "%s level cache not supported by this machine", + CpuTopologyLevel_str(props->topology)); + return false; + } + + if (!machine_check_topo_support(ms, props->topology, errp)) { + return false; + } + } + + mc->smp_props.has_caches = true; + return true; +} + unsigned int machine_topo_get_cores_per_socket(const MachineState *ms) { return ms->smp.cores * ms->smp.modules * ms->smp.clusters * ms->smp.dies; @@ -270,3 +346,63 @@ unsigned int machine_topo_get_threads_per_socket(const MachineState *ms) { return ms->smp.threads * machine_topo_get_cores_per_socket(ms); } + +CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms, + CacheLevelAndType cache) +{ + return ms->smp_cache.props[cache].topology; +} + +void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache, + CpuTopologyLevel level) +{ + ms->smp_cache.props[cache].topology = level; +} + +/* + * When both cache1 and cache2 are configured with specific topology levels + * (not default level), is cache1's topology level higher than cache2? + */ +static bool smp_cache_topo_cmp(const SmpCache *smp_cache, + CacheLevelAndType cache1, + CacheLevelAndType cache2) +{ + /* + * Before comparing, the "default" topology level should be replaced + * with the specific level. + */ + assert(smp_cache->props[cache1].topology != CPU_TOPOLOGY_LEVEL_DEFAULT); + + return smp_cache->props[cache1].topology > smp_cache->props[cache2].topology; +} + +/* + * Currently, we have no way to expose the arch-specific default cache model + * because the cache model is sometimes related to the CPU model (e.g., i386). + * + * We can only check the correctness of the cache topology after the arch loads + * the user-configured cache model from MachineState and consumes the special + * "default" level by replacing it with the specific level. + */ +bool machine_check_smp_cache(const MachineState *ms, Error **errp) +{ + if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1D, + CACHE_LEVEL_AND_TYPE_L2) || + smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1I, + CACHE_LEVEL_AND_TYPE_L2)) { + error_setg(errp, + "Invalid smp cache topology. " + "L2 cache topology level shouldn't be lower than L1 cache"); + return false; + } + + if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L2, + CACHE_LEVEL_AND_TYPE_L3)) { + error_setg(errp, + "Invalid smp cache topology. " + "L3 cache topology level shouldn't be lower than L2 cache"); + return false; + } + + return true; +} diff --git a/hw/core/machine.c b/hw/core/machine.c index 27dcda02483..bd47527479a 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -11,36 +11,64 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "qemu/accel.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "hw/boards.h" #include "hw/loader.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-visit-machine.h" +#include "qapi/qapi-commands-machine.h" #include "qemu/madvise.h" #include "qom/object_interfaces.h" -#include "sysemu/cpus.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/xen.h" -#include "sysemu/qtest.h" +#include "system/cpus.h" +#include "system/system.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/xen.h" +#include "system/qtest.h" #include "hw/pci/pci_bridge.h" #include "hw/mem/nvdimm.h" #include "migration/global_state.h" -#include "exec/confidential-guest-support.h" +#include "system/confidential-guest-support.h" #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-net.h" #include "hw/virtio/virtio-iommu.h" #include "audio/audio.h" +GlobalProperty hw_compat_10_0[] = { + { "scsi-hd", "dpofua", "off" }, + { "vfio-pci", "x-migration-load-config-after-iter", "off" }, + { "ramfb", "use-legacy-x86-rom", "true"}, + { "vfio-pci-nohotplug", "use-legacy-x86-rom", "true" }, +}; +const size_t hw_compat_10_0_len = G_N_ELEMENTS(hw_compat_10_0); + +GlobalProperty hw_compat_9_2[] = { + { "arm-cpu", "backcompat-pauth-default-use-qarma5", "true"}, + { "virtio-balloon-pci", "vectors", "0" }, + { "virtio-balloon-pci-transitional", "vectors", "0" }, + { "virtio-balloon-pci-non-transitional", "vectors", "0" }, + { "virtio-mem-pci", "vectors", "0" }, + { "migration", "multifd-clean-tls-termination", "false" }, + { "migration", "send-switchover-start", "off"}, + { "vfio-pci", "x-migration-multifd-transfer", "off" }, +}; +const size_t hw_compat_9_2_len = G_N_ELEMENTS(hw_compat_9_2); + +GlobalProperty hw_compat_9_1[] = { + { TYPE_PCI_DEVICE, "x-pcie-ext-tag", "false" }, +}; +const size_t hw_compat_9_1_len = G_N_ELEMENTS(hw_compat_9_1); + GlobalProperty hw_compat_9_0[] = { - {"arm-cpu", "backcompat-cntfrq", "true" }, + { "arm-cpu", "backcompat-cntfrq", "true" }, { "scsi-hd", "migrate-emulated-scsi-request", "false" }, { "scsi-cd", "migrate-emulated-scsi-request", "false" }, - {"vfio-pci", "skip-vsc-check", "false" }, + { "vfio-pci", "skip-vsc-check", "false" }, { "virtio-pci", "x-pcie-pm-no-soft-reset", "off" }, - {"sd-card", "spec_version", "2" }, + { "sd-card", "spec_version", "2" }, }; const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0); @@ -260,51 +288,6 @@ GlobalProperty hw_compat_2_6[] = { }; const size_t hw_compat_2_6_len = G_N_ELEMENTS(hw_compat_2_6); -GlobalProperty hw_compat_2_5[] = { - { "isa-fdc", "fallback", "144" }, - { "pvscsi", "x-old-pci-configuration", "on" }, - { "pvscsi", "x-disable-pcie", "on" }, - { "vmxnet3", "x-old-msi-offsets", "on" }, - { "vmxnet3", "x-disable-pcie", "on" }, -}; -const size_t hw_compat_2_5_len = G_N_ELEMENTS(hw_compat_2_5); - -GlobalProperty hw_compat_2_4[] = { - { "e1000", "extra_mac_registers", "off" }, - { "virtio-pci", "x-disable-pcie", "on" }, - { "virtio-pci", "migrate-extra", "off" }, - { "fw_cfg_mem", "dma_enabled", "off" }, - { "fw_cfg_io", "dma_enabled", "off" } -}; -const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4); - -GlobalProperty hw_compat_2_3[] = { - { "virtio-blk-pci", "any_layout", "off" }, - { "virtio-balloon-pci", "any_layout", "off" }, - { "virtio-serial-pci", "any_layout", "off" }, - { "virtio-9p-pci", "any_layout", "off" }, - { "virtio-rng-pci", "any_layout", "off" }, - { TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" }, - { "migration", "send-configuration", "off" }, - { "migration", "send-section-footer", "off" }, - { "migration", "store-global-state", "off" }, -}; -const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3); - -GlobalProperty hw_compat_2_2[] = {}; -const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2); - -GlobalProperty hw_compat_2_1[] = { - { "intel-hda", "old_msi_addr", "on" }, - { "VGA", "qemu-extended-regs", "off" }, - { "secondary-vga", "qemu-extended-regs", "off" }, - { "virtio-scsi-pci", "any_layout", "off" }, - { "usb-mouse", "usb_version", "1" }, - { "usb-kbd", "usb_version", "1" }, - { "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" }, -}; -const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1); - MachineState *current_machine; static char *machine_get_kernel(Object *obj, Error **errp) @@ -322,6 +305,21 @@ static void machine_set_kernel(Object *obj, const char *value, Error **errp) ms->kernel_filename = g_strdup(value); } +static char *machine_get_shim(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->shim_filename); +} + +static void machine_set_shim(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->shim_filename); + ms->shim_filename = g_strdup(value); +} + static char *machine_get_initrd(Object *obj, Error **errp) { MachineState *ms = MACHINE(obj); @@ -457,6 +455,22 @@ static void machine_set_mem_merge(Object *obj, bool value, Error **errp) ms->mem_merge = value; } +#ifdef CONFIG_POSIX +static bool machine_get_aux_ram_share(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->aux_ram_share; +} + +static void machine_set_aux_ram_share(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->aux_ram_share = value; +} +#endif + static bool machine_get_usb(Object *obj, Error **errp) { MachineState *ms = MACHINE(obj); @@ -566,6 +580,20 @@ static void machine_set_nvdimm(Object *obj, bool value, Error **errp) ms->nvdimms_state->is_enabled = value; } +static bool machine_get_spcr(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->acpi_spcr_enabled; +} + +static void machine_set_spcr(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->acpi_spcr_enabled = value; +} + static bool machine_get_hmat(Object *obj, Error **errp) { MachineState *ms = MACHINE(obj); @@ -618,11 +646,19 @@ static void machine_set_mem(Object *obj, Visitor *v, const char *name, mem->size = mc->fixup_ram_size(mem->size); } if ((ram_addr_t)mem->size != mem->size) { - error_setg(errp, "ram size too large"); + error_setg(errp, "ram size %llu exceeds permitted maximum %llu", + (unsigned long long)mem->size, + (unsigned long long)RAM_ADDR_MAX); goto out_free; } if (mem->has_max_size) { + if ((ram_addr_t)mem->max_size != mem->max_size) { + error_setg(errp, "ram size %llu exceeds permitted maximum %llu", + (unsigned long long)mem->max_size, + (unsigned long long)RAM_ADDR_MAX); + goto out_free; + } if (mem->max_size < mem->size) { error_setg(errp, "invalid value of maxmem: " "maximum memory size (0x%" PRIx64 ") must be at least " @@ -929,6 +965,40 @@ static void machine_set_smp(Object *obj, Visitor *v, const char *name, machine_parse_smp_config(ms, config, errp); } +static void machine_get_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCache *cache = &ms->smp_cache; + SmpCachePropertiesList *head = NULL; + SmpCachePropertiesList **tail = &head; + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + SmpCacheProperties *node = g_new(SmpCacheProperties, 1); + + node->cache = cache->props[i].cache; + node->topology = cache->props[i].topology; + QAPI_LIST_APPEND(tail, node); + } + + visit_type_SmpCachePropertiesList(v, name, &head, errp); + qapi_free_SmpCachePropertiesList(head); +} + +static void machine_set_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCachePropertiesList *caches; + + if (!visit_type_SmpCachePropertiesList(v, name, &caches, errp)) { + return; + } + + machine_parse_smp_cache(ms, caches, errp); + qapi_free_SmpCachePropertiesList(caches); +} + static void machine_get_boot(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -998,7 +1068,40 @@ void machine_add_audiodev_property(MachineClass *mc) "Audiodev to use for default machine devices"); } -static void machine_class_init(ObjectClass *oc, void *data) +static bool create_default_memdev(MachineState *ms, const char *path, + Error **errp) +{ + Object *obj; + MachineClass *mc = MACHINE_GET_CLASS(ms); + bool r = false; + + obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM); + if (path) { + if (!object_property_set_str(obj, "mem-path", path, errp)) { + goto out; + } + } + if (!object_property_set_int(obj, "size", ms->ram_size, errp)) { + goto out; + } + object_property_add_child(object_get_objects_root(), mc->default_ram_id, + obj); + /* Ensure backend's memory region name is equal to mc->default_ram_id */ + if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id", + false, errp)) { + goto out; + } + if (!user_creatable_complete(USER_CREATABLE(obj), errp)) { + goto out; + } + r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp); + +out: + object_unref(obj); + return r; +} + +static void machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1017,11 +1120,18 @@ static void machine_class_init(ObjectClass *oc, void *data) */ mc->numa_mem_align_shift = 23; + mc->create_default_memdev = create_default_memdev; + object_class_property_add_str(oc, "kernel", machine_get_kernel, machine_set_kernel); object_class_property_set_description(oc, "kernel", "Linux kernel image file"); + object_class_property_add_str(oc, "shim", + machine_get_shim, machine_set_shim); + object_class_property_set_description(oc, "shim", + "shim.efi file"); + object_class_property_add_str(oc, "initrd", machine_get_initrd, machine_set_initrd); object_class_property_set_description(oc, "initrd", @@ -1054,6 +1164,11 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "smp", "CPU topology"); + object_class_property_add(oc, "smp-cache", "SmpCachePropertiesWrapper", + machine_get_smp_cache, machine_set_smp_cache, NULL, NULL); + object_class_property_set_description(oc, "smp-cache", + "Cache properties list for SMP machine"); + object_class_property_add(oc, "phandle-start", "int", machine_get_phandle_start, machine_set_phandle_start, NULL, NULL); @@ -1075,6 +1190,12 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "mem-merge", "Enable/disable memory merge support"); +#ifdef CONFIG_POSIX + object_class_property_add_bool(oc, "aux-ram-share", + machine_get_aux_ram_share, + machine_set_aux_ram_share); +#endif + object_class_property_add_bool(oc, "usb", machine_get_usb, machine_set_usb); object_class_property_set_description(oc, "usb", @@ -1123,7 +1244,7 @@ static void machine_class_init(ObjectClass *oc, void *data) "Memory size configuration"); } -static void machine_class_base_init(ObjectClass *oc, void *data) +static void machine_class_base_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->max_cpus = mc->max_cpus ?: 1; @@ -1144,9 +1265,6 @@ static void machine_initfn(Object *obj) MachineState *ms = MACHINE(obj); MachineClass *mc = MACHINE_GET_CLASS(obj); - container_get(obj, "/peripheral"); - container_get(obj, "/peripheral-anon"); - ms->dump_guest_core = true; ms->mem_merge = (QEMU_MADV_MERGEABLE != QEMU_MADV_INVALID); ms->enable_graphics = true; @@ -1180,6 +1298,14 @@ static void machine_initfn(Object *obj) "Table (HMAT)"); } + /* SPCR */ + ms->acpi_spcr_enabled = true; + object_property_add_bool(obj, "spcr", machine_get_spcr, machine_set_spcr); + object_property_set_description(obj, "spcr", + "Set on/off to enable/disable " + "ACPI Serial Port Console Redirection " + "Table (spcr)"); + /* default to mc->default_cpus */ ms->smp.cpus = mc->default_cpus; ms->smp.max_cpus = mc->default_cpus; @@ -1192,6 +1318,11 @@ static void machine_initfn(Object *obj) ms->smp.cores = 1; ms->smp.threads = 1; + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + ms->smp_cache.props[i].cache = (CacheLevelAndType)i; + ms->smp_cache.props[i].topology = CPU_TOPOLOGY_LEVEL_DEFAULT; + } + machine_copy_boot_config(ms, &(BootConfiguration){ 0 }); } @@ -1410,38 +1541,6 @@ MemoryRegion *machine_consume_memdev(MachineState *machine, return ret; } -static bool create_default_memdev(MachineState *ms, const char *path, Error **errp) -{ - Object *obj; - MachineClass *mc = MACHINE_GET_CLASS(ms); - bool r = false; - - obj = object_new(path ? TYPE_MEMORY_BACKEND_FILE : TYPE_MEMORY_BACKEND_RAM); - if (path) { - if (!object_property_set_str(obj, "mem-path", path, errp)) { - goto out; - } - } - if (!object_property_set_int(obj, "size", ms->ram_size, errp)) { - goto out; - } - object_property_add_child(object_get_objects_root(), mc->default_ram_id, - obj); - /* Ensure backend's memory region name is equal to mc->default_ram_id */ - if (!object_property_set_bool(obj, "x-use-canonical-path-for-ramblock-id", - false, errp)) { - goto out; - } - if (!user_creatable_complete(USER_CREATABLE(obj), errp)) { - goto out; - } - r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp); - -out: - object_unref(obj); - return r; -} - const char *machine_class_default_cpu_type(MachineClass *mc) { if (mc->valid_cpu_types && !mc->valid_cpu_types[1]) { @@ -1545,7 +1644,9 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * machine_class->default_ram_id); return; } - if (!create_default_memdev(current_machine, mem_path, errp)) { + + if (!machine_class->create_default_memdev(current_machine, mem_path, + errp)) { return; } } @@ -1610,6 +1711,22 @@ void qemu_remove_machine_init_done_notifier(Notifier *notify) notifier_remove(notify); } +static void handle_machine_dumpdtb(MachineState *ms) +{ + if (!ms->dumpdtb) { + return; + } +#ifdef CONFIG_FDT + qmp_dumpdtb(ms->dumpdtb, &error_fatal); + exit(0); +#else + error_report("This machine doesn't have an FDT"); + error_printf("(this machine type definitely doesn't use FDT, and " + "this QEMU doesn't have FDT support compiled in)\n"); + exit(1); +#endif +} + void qdev_machine_creation_done(void) { cpu_synchronize_all_post_init(); @@ -1639,6 +1756,12 @@ void qdev_machine_creation_done(void) notifier_list_notify(&machine_init_done_notifiers, NULL); + /* + * If the user used -machine dumpdtb=file.dtb to request that we + * dump the DTB to a file, do it now, and exit. + */ + handle_machine_dumpdtb(current_machine); + if (rom_check_and_register_reset() != 0) { exit(1); } diff --git a/hw/core/meson.build b/hw/core/meson.build index a3d9bab9f42..b5a545a0edd 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -24,9 +24,10 @@ system_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) system_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) system_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) +system_ss.add(when: 'CONFIG_EIF', if_true: [files('eif.c'), zlib, libcbor, gnutls]) system_ss.add(files( - 'cpu-sysemu.c', + 'cpu-system.c', 'fw-path-provider.c', 'gpio.c', 'hotplug.c', @@ -45,3 +46,7 @@ system_ss.add(files( 'vm-change-state-handler.c', 'clock-vmstate.c', )) +user_ss.add(files( + 'cpu-user.c', + 'qdev-user.c', +)) diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c index f586a4bef54..a6e477a2d88 100644 --- a/hw/core/null-machine.c +++ b/hw/core/null-machine.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "hw/boards.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/core/cpu.h" static void machine_none_init(MachineState *mch) @@ -53,7 +53,6 @@ static void machine_none_machine_init(MachineClass *mc) mc->no_parallel = 1; mc->no_floppy = 1; mc->no_cdrom = 1; - mc->no_sdcard = 1; } DEFINE_MACHINE("none", machine_none_machine_init) diff --git a/hw/core/numa.c b/hw/core/numa.c index f8ce332cfe9..218576f7455 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -24,15 +24,15 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "sysemu/hostmem.h" -#include "sysemu/numa.h" +#include "system/hostmem.h" +#include "system/numa.h" #include "exec/cpu-common.h" #include "exec/ramlist.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" #include "qapi/qapi-visit-machine.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "hw/core/cpu.h" #include "hw/mem/pc-dimm.h" #include "hw/boards.h" @@ -249,7 +249,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, lb_data.initiator = node->initiator; lb_data.target = node->target; - if (node->data_type <= HMATLB_DATA_TYPE_WRITE_LATENCY) { + if (node->data_type <= HMAT_LB_DATA_TYPE_WRITE_LATENCY) { /* Input latency data */ if (!node->has_latency) { @@ -313,7 +313,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, numa_info[node->target].lb_info_provided |= BIT(0); } lb_data.data = node->latency; - } else if (node->data_type >= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH) { + } else if (node->data_type >= HMAT_LB_DATA_TYPE_ACCESS_BANDWIDTH) { /* Input bandwidth data */ if (!node->has_bandwidth) { error_setg(errp, "Missing 'bandwidth' option"); @@ -380,7 +380,7 @@ void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, } lb_data.data = node->bandwidth; } else { - assert(0); + g_assert_not_reached(); } g_array_append_val(hmat_lb->list, lb_data); diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c index 13907df0266..3942c709939 100644 --- a/hw/core/or-irq.c +++ b/hw/core/or-irq.c @@ -115,16 +115,15 @@ static const VMStateDescription vmstate_or_irq = { }, }; -static Property or_irq_properties[] = { +static const Property or_irq_properties[] = { DEFINE_PROP_UINT16("num-lines", OrIRQState, num_lines, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void or_irq_class_init(ObjectClass *klass, void *data) +static void or_irq_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = or_irq_reset; + device_class_set_legacy_reset(dc, or_irq_reset); device_class_set_props(dc, or_irq_properties); dc->realize = or_irq_realize; dc->vmsd = &vmstate_or_irq; diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c index b8487b26b67..6950063de4f 100644 --- a/hw/core/platform-bus.c +++ b/hw/core/platform-bus.c @@ -145,9 +145,12 @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev, * the target device's memory region */ for (off = 0; off < pbus->mmio_size; off += alignment) { - if (!memory_region_find(&pbus->mmio, off, size).mr) { + MemoryRegion *mr = memory_region_find(&pbus->mmio, off, size).mr; + if (!mr) { found_region = true; break; + } else { + memory_region_unref(mr); } } @@ -201,13 +204,12 @@ static void platform_bus_realize(DeviceState *dev, Error **errp) plaform_bus_refresh_irqs(pbus); } -static Property platform_bus_properties[] = { +static const Property platform_bus_properties[] = { DEFINE_PROP_UINT32("num_irqs", PlatformBusDevice, num_irqs, 0), DEFINE_PROP_UINT32("mmio_size", PlatformBusDevice, mmio_size, 0), - DEFINE_PROP_END_OF_LIST() }; -static void platform_bus_class_init(ObjectClass *klass, void *data) +static void platform_bus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index b1517592c6b..0aeb10fb53e 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -11,8 +11,8 @@ #include "migration/vmstate.h" #include "qemu/host-utils.h" #include "exec/replay-core.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/qtest.h" +#include "exec/icount.h" +#include "system/qtest.h" #include "block/aio.h" #include "hw/clock.h" @@ -83,7 +83,7 @@ static void ptimer_reload(ptimer_state *s, int delta_adjust) delta = s->delta = s->limit; } - if (s->period == 0) { + if (s->period == 0 && s->period_frac == 0) { if (!qtest_enabled()) { fprintf(stderr, "Timer with period zero, disabling\n"); } @@ -309,7 +309,7 @@ void ptimer_run(ptimer_state *s, int oneshot) assert(s->in_transaction); - if (was_disabled && s->period == 0) { + if (was_disabled && s->period == 0 && s->period_frac == 0) { if (!qtest_enabled()) { fprintf(stderr, "Timer with period zero, disabling\n"); } diff --git a/hw/core/qdev-clock.c b/hw/core/qdev-clock.c index 82799577f3e..dacafa4e036 100644 --- a/hw/core/qdev-clock.c +++ b/hw/core/qdev-clock.c @@ -22,7 +22,7 @@ * Add a new clock in a device */ static NamedClockList *qdev_init_clocklist(DeviceState *dev, const char *name, - bool output, Clock *clk) + bool alias, bool output, Clock *clk) { NamedClockList *ncl; @@ -38,39 +38,8 @@ static NamedClockList *qdev_init_clocklist(DeviceState *dev, const char *name, */ ncl = g_new0(NamedClockList, 1); ncl->name = g_strdup(name); + ncl->alias = alias; ncl->output = output; - ncl->alias = (clk != NULL); - - /* - * Trying to create a clock whose name clashes with some other - * clock or property is a bug in the caller and we will abort(). - */ - if (clk == NULL) { - clk = CLOCK(object_new(TYPE_CLOCK)); - object_property_add_child(OBJECT(dev), name, OBJECT(clk)); - if (output) { - /* - * Remove object_new()'s initial reference. - * Note that for inputs, the reference created by object_new() - * will be deleted in qdev_finalize_clocklist(). - */ - object_unref(OBJECT(clk)); - } - } else { - object_property_add_link(OBJECT(dev), name, - object_get_typename(OBJECT(clk)), - (Object **) &ncl->clock, - NULL, OBJ_PROP_LINK_STRONG); - /* - * Since the link property has the OBJ_PROP_LINK_STRONG flag, the clk - * object reference count gets decremented on property deletion. - * However object_property_add_link does not increment it since it - * doesn't know the linked object. Increment it here to ensure the - * aliased clock stays alive during this device life-time. - */ - object_ref(OBJECT(clk)); - } - ncl->clock = clk; QLIST_INSERT_HEAD(&dev->clocks, ncl, node); @@ -84,14 +53,11 @@ void qdev_finalize_clocklist(DeviceState *dev) QLIST_FOREACH_SAFE(ncl, &dev->clocks, node, ncl_next) { QLIST_REMOVE(ncl, node); - if (!ncl->output && !ncl->alias) { + if (!ncl->alias) { /* * We kept a reference on the input clock to ensure it lives up to - * this point so we can safely remove the callback. - * It avoids having a callback to a deleted object if ncl->clock - * is still referenced somewhere else (eg: by a clock output). + * this point; it is used by the monitor to show the frequency. */ - clock_clear_callback(ncl->clock); object_unref(OBJECT(ncl->clock)); } g_free(ncl->name); @@ -101,29 +67,25 @@ void qdev_finalize_clocklist(DeviceState *dev) Clock *qdev_init_clock_out(DeviceState *dev, const char *name) { - NamedClockList *ncl; - - assert(name); - - ncl = qdev_init_clocklist(dev, name, true, NULL); + Clock *clk = CLOCK(object_new(TYPE_CLOCK)); + object_property_add_child(OBJECT(dev), name, OBJECT(clk)); - return ncl->clock; + qdev_init_clocklist(dev, name, false, true, clk); + return clk; } Clock *qdev_init_clock_in(DeviceState *dev, const char *name, ClockCallback *callback, void *opaque, unsigned int events) { - NamedClockList *ncl; - - assert(name); - - ncl = qdev_init_clocklist(dev, name, false, NULL); + Clock *clk = CLOCK(object_new(TYPE_CLOCK)); + object_property_add_child(OBJECT(dev), name, OBJECT(clk)); + qdev_init_clocklist(dev, name, false, false, clk); if (callback) { - clock_set_callback(ncl->clock, callback, opaque, events); + clock_set_callback(clk, callback, opaque, events); } - return ncl->clock; + return clk; } void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks) @@ -194,15 +156,25 @@ Clock *qdev_get_clock_out(DeviceState *dev, const char *name) Clock *qdev_alias_clock(DeviceState *dev, const char *name, DeviceState *alias_dev, const char *alias_name) { - NamedClockList *ncl; - - assert(name && alias_name); + NamedClockList *ncl = qdev_get_clocklist(dev, name); + Clock *clk = ncl->clock; - ncl = qdev_get_clocklist(dev, name); + ncl = qdev_init_clocklist(alias_dev, alias_name, true, ncl->output, clk); - qdev_init_clocklist(alias_dev, alias_name, ncl->output, ncl->clock); + object_property_add_link(OBJECT(alias_dev), alias_name, + TYPE_CLOCK, + (Object **) &ncl->clock, + NULL, OBJ_PROP_LINK_STRONG); + /* + * Since the link property has the OBJ_PROP_LINK_STRONG flag, the clk + * object reference count gets decremented on property deletion. + * However object_property_add_link does not increment it since it + * doesn't know the linked object. Increment it here to ensure the + * aliased clock stays alive during this device life-time. + */ + object_ref(OBJECT(clk)); - return ncl->clock; + return clk; } void qdev_connect_clock_in(DeviceState *dev, const char *name, Clock *source) diff --git a/hw/core/qdev-hotplug.c b/hw/core/qdev-hotplug.c index d495d0e9c70..ff176dc1bb3 100644 --- a/hw/core/qdev-hotplug.c +++ b/hw/core/qdev-hotplug.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "hw/qdev-core.h" #include "hw/boards.h" +#include "qapi/error.h" HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) { @@ -30,12 +31,48 @@ HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) return NULL; } -bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) +static bool qdev_hotplug_unplug_allowed_common(DeviceState *dev, BusState *bus, + Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + if (!dc->hotpluggable) { + error_setg(errp, "Device '%s' does not support hotplugging", + object_get_typename(OBJECT(dev))); + return false; + } + + if (bus) { + if (!qbus_is_hotpluggable(bus)) { + error_setg(errp, "Bus '%s' does not support hotplugging", + bus->name); + return false; + } + } else { + if (!qdev_get_machine_hotplug_handler(dev)) { + /* + * No bus, no machine hotplug handler --> device is not hotpluggable + */ + error_setg(errp, + "Device '%s' can not be hotplugged on this machine", + object_get_typename(OBJECT(dev))); + return false; + } + } + + return true; +} + +bool qdev_hotplug_allowed(DeviceState *dev, BusState *bus, Error **errp) { MachineState *machine; MachineClass *mc; Object *m_obj = qdev_get_machine(); + if (!qdev_hotplug_unplug_allowed_common(dev, bus, errp)) { + return false; + } + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { machine = MACHINE(m_obj); mc = MACHINE_GET_CLASS(machine); @@ -47,6 +84,12 @@ bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) return true; } +bool qdev_hotunplug_allowed(DeviceState *dev, Error **errp) +{ + return !qdev_unplug_blocked(dev, errp) && + qdev_hotplug_unplug_allowed_common(dev, dev->parent_bus, errp); +} + HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) { if (dev->parent_bus) { diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index f13350b4fb0..1f810b7ddf2 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -29,8 +29,8 @@ #include "audio/audio.h" #include "chardev/char-fe.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" +#include "system/block-backend.h" +#include "system/blockdev.h" #include "net/net.h" #include "hw/pci/pci.h" #include "hw/pci/pcie.h" @@ -58,13 +58,39 @@ static bool check_prop_still_unset(Object *obj, const char *name, return false; } +bool qdev_prop_sanitize_s390x_loadparm(uint8_t *loadparm, const char *str, + Error **errp) +{ + int i, len; + + len = strlen(str); + if (len > 8) { + error_setg(errp, "'loadparm' can only contain up to 8 characters"); + return false; + } + + for (i = 0; i < len; i++) { + uint8_t c = qemu_toupper(str[i]); /* mimic HMC */ + + if (qemu_isalnum(c) || c == '.' || c == ' ') { + loadparm[i] = c; + } else { + error_setg(errp, + "invalid character in 'loadparm': '%c' (ASCII 0x%02x)", + c, c); + return false; + } + } + + return true; +} /* --- drive --- */ static void get_drive(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; void **ptr = object_field_prop_ptr(obj, prop); const char *value; char *p; @@ -90,7 +116,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, void *opaque, bool iothread, Error **errp) { DeviceState *dev = DEVICE(obj); - Property *prop = opaque; + const Property *prop = opaque; void **ptr = object_field_prop_ptr(obj, prop); char *str; BlockBackend *blk; @@ -119,6 +145,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, if (ctx != bdrv_get_aio_context(bs)) { error_setg(errp, "Different aio context is not supported for new " "node"); + return; } blk_replace_bs(blk, bs, errp); @@ -199,7 +226,7 @@ static void set_drive_iothread(Object *obj, Visitor *v, const char *name, static void release_drive(Object *obj, const char *name, void *opaque) { DeviceState *dev = DEVICE(obj); - Property *prop = opaque; + const Property *prop = opaque; BlockBackend **ptr = object_field_prop_ptr(obj, prop); if (*ptr) { @@ -209,7 +236,7 @@ static void release_drive(Object *obj, const char *name, void *opaque) } const PropertyInfo qdev_prop_drive = { - .name = "str", + .type = "str", .description = "Node name or ID of a block device to use as a backend", .realized_set_allowed = true, .get = get_drive, @@ -218,7 +245,7 @@ const PropertyInfo qdev_prop_drive = { }; const PropertyInfo qdev_prop_drive_iothread = { - .name = "str", + .type = "str", .description = "Node name or ID of a block device to use as a backend", .realized_set_allowed = true, .get = get_drive, @@ -243,7 +270,7 @@ static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { ERRP_GUARD(); - Property *prop = opaque; + const Property *prop = opaque; CharBackend *be = object_field_prop_ptr(obj, prop); Chardev *s; char *str; @@ -279,14 +306,14 @@ static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque, static void release_chr(Object *obj, const char *name, void *opaque) { - Property *prop = opaque; + const Property *prop = opaque; CharBackend *be = object_field_prop_ptr(obj, prop); qemu_chr_fe_deinit(be, false); } const PropertyInfo qdev_prop_chr = { - .name = "str", + .type = "str", .description = "ID of a chardev to use as a backend", .get = get_chr, .set = set_chr, @@ -303,7 +330,7 @@ const PropertyInfo qdev_prop_chr = { static void get_mac(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; MACAddr *mac = object_field_prop_ptr(obj, prop); char buffer[2 * 6 + 5 + 1]; char *p = buffer; @@ -318,7 +345,7 @@ static void get_mac(Object *obj, Visitor *v, const char *name, void *opaque, static void set_mac(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; MACAddr *mac = object_field_prop_ptr(obj, prop); int i, pos; char *str; @@ -360,7 +387,7 @@ static void set_mac(Object *obj, Visitor *v, const char *name, void *opaque, } const PropertyInfo qdev_prop_macaddr = { - .name = "str", + .type = "str", .description = "Ethernet 6-byte MAC Address, example: 52:54:00:12:34:56", .get = get_mac, .set = set_mac, @@ -380,7 +407,7 @@ void qdev_prop_set_macaddr(DeviceState *dev, const char *name, static void get_netdev(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; NICPeers *peers_ptr = object_field_prop_ptr(obj, prop); char *p = g_strdup(peers_ptr->ncs[0] ? peers_ptr->ncs[0]->name : ""); @@ -391,7 +418,7 @@ static void get_netdev(Object *obj, Visitor *v, const char *name, static void set_netdev(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; NICPeers *peers_ptr = object_field_prop_ptr(obj, prop); NetClientState **ncs = peers_ptr->ncs; NetClientState *peers[MAX_QUEUE_NUM]; @@ -448,7 +475,7 @@ static void set_netdev(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_netdev = { - .name = "str", + .type = "str", .description = "ID of a netdev to use as a backend", .get = get_netdev, .set = set_netdev, @@ -459,7 +486,7 @@ const PropertyInfo qdev_prop_netdev = { static void get_audiodev(Object *obj, Visitor *v, const char* name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; QEMUSoundCard *card = object_field_prop_ptr(obj, prop); char *p = g_strdup(audio_get_id(card)); @@ -470,7 +497,7 @@ static void get_audiodev(Object *obj, Visitor *v, const char* name, static void set_audiodev(Object *obj, Visitor *v, const char* name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; QEMUSoundCard *card = object_field_prop_ptr(obj, prop); AudioState *state; g_autofree char *str = NULL; @@ -486,7 +513,7 @@ static void set_audiodev(Object *obj, Visitor *v, const char* name, } const PropertyInfo qdev_prop_audiodev = { - .name = "str", + .type = "str", .description = "ID of an audiodev to use as a backend", /* release done on shutdown */ .get = get_audiodev, @@ -552,7 +579,7 @@ static void qdev_propinfo_set_losttickpolicy(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int *ptr = object_field_prop_ptr(obj, prop); int value; @@ -576,7 +603,8 @@ static void qdev_propinfo_set_losttickpolicy(Object *obj, Visitor *v, QEMU_BUILD_BUG_ON(sizeof(LostTickPolicy) != sizeof(int)); const PropertyInfo qdev_prop_losttickpolicy = { - .name = "LostTickPolicy", + .type = "LostTickPolicy", + .description = "Policy for handling lost ticks (discard/delay/slew)", .enum_table = &LostTickPolicy_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_losttickpolicy, @@ -588,25 +616,21 @@ const PropertyInfo qdev_prop_losttickpolicy = { static void set_blocksize(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - DeviceState *dev = DEVICE(obj); - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); uint64_t value; - Error *local_err = NULL; if (!visit_type_size(v, name, &value, errp)) { return; } - check_block_size(dev->id ? : "", name, value, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!check_block_size(name, value, errp)) { return; } *ptr = value; } const PropertyInfo qdev_prop_blocksize = { - .name = "size", + .type = "size", .description = "A power of two between " MIN_BLOCK_SIZE_STR " and " MAX_BLOCK_SIZE_STR, .get = qdev_propinfo_get_size32, @@ -619,9 +643,8 @@ const PropertyInfo qdev_prop_blocksize = { QEMU_BUILD_BUG_ON(sizeof(BlockdevOnError) != sizeof(int)); const PropertyInfo qdev_prop_blockdev_on_error = { - .name = "BlockdevOnError", - .description = "Error handling policy, " - "report/ignore/enospc/stop/auto", + .type = "BlockdevOnError", + .description = "Error handling policy (report/ignore/enospc/stop/auto)", .enum_table = &BlockdevOnError_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -633,9 +656,9 @@ const PropertyInfo qdev_prop_blockdev_on_error = { QEMU_BUILD_BUG_ON(sizeof(BiosAtaTranslation) != sizeof(int)); const PropertyInfo qdev_prop_bios_chs_trans = { - .name = "BiosAtaTranslation", - .description = "Logical CHS translation algorithm, " - "auto/none/lba/large/rechs", + .type = "BiosAtaTranslation", + .description = "Logical CHS translation algorithm " + " (auto/none/lba/large/rechs)", .enum_table = &BiosAtaTranslation_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -645,9 +668,8 @@ const PropertyInfo qdev_prop_bios_chs_trans = { /* --- FDC default drive types */ const PropertyInfo qdev_prop_fdc_drive_type = { - .name = "FdcDriveType", - .description = "FDC drive type, " - "144/288/120/none/auto", + .type = "FloppyDriveType", + .description = "Floppy drive type (144/288/120/none/auto)", .enum_table = &FloppyDriveType_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -657,9 +679,9 @@ const PropertyInfo qdev_prop_fdc_drive_type = { /* --- MultiFDCompression --- */ const PropertyInfo qdev_prop_multifd_compression = { - .name = "MultiFDCompression", - .description = "multifd_compression values, " - "none/zlib/zstd/qpl/uadk", + .type = "MultiFDCompression", + .description = "multifd_compression values" + " (none/zlib/zstd/qpl/uadk/qatzip)", .enum_table = &MultiFDCompression_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -671,9 +693,8 @@ const PropertyInfo qdev_prop_multifd_compression = { QEMU_BUILD_BUG_ON(sizeof(MigMode) != sizeof(int)); const PropertyInfo qdev_prop_mig_mode = { - .name = "MigMode", - .description = "mig_mode values, " - "normal,cpr-reboot", + .type = "MigMode", + .description = "Migration mode (normal/cpr-reboot)", .enum_table = &MigMode_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -685,9 +706,8 @@ const PropertyInfo qdev_prop_mig_mode = { QEMU_BUILD_BUG_ON(sizeof(GranuleMode) != sizeof(int)); const PropertyInfo qdev_prop_granule_mode = { - .name = "GranuleMode", - .description = "granule_mode values, " - "4k, 8k, 16k, 64k, host", + .type = "GranuleMode", + .description = "Granule page size (4k/8k/16k/64k/host)", .enum_table = &GranuleMode_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -695,9 +715,8 @@ const PropertyInfo qdev_prop_granule_mode = { }; const PropertyInfo qdev_prop_zero_page_detection = { - .name = "ZeroPageDetection", - .description = "zero_page_detection values, " - "none,legacy,multifd", + .type = "ZeroPageDetection", + .description = "Zero page detection (none/legacy/multifd)", .enum_table = &ZeroPageDetection_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, @@ -715,7 +734,7 @@ const PropertyInfo qdev_prop_zero_page_detection = { static void get_reserved_region(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; ReservedRegion *rr = object_field_prop_ptr(obj, prop); char buffer[64]; char *p = buffer; @@ -731,7 +750,7 @@ static void get_reserved_region(Object *obj, Visitor *v, const char *name, static void set_reserved_region(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; ReservedRegion *rr = object_field_prop_ptr(obj, prop); const char *endptr; uint64_t lob, upb; @@ -775,11 +794,10 @@ static void set_reserved_region(Object *obj, Visitor *v, const char *name, error_setg(errp, "reserved region fields must be separated with ':'"); out: g_free(str); - return; } const PropertyInfo qdev_prop_reserved_region = { - .name = "reserved_region", + .type = "str", .description = "Reserved Region, example: 0xFEE00000:0xFEEFFFFF:0", .get = get_reserved_region, .set = set_reserved_region, @@ -793,43 +811,61 @@ const PropertyInfo qdev_prop_reserved_region = { static void set_pci_devfn(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; + g_autofree GenericAlternate *alt; int32_t value, *ptr = object_field_prop_ptr(obj, prop); unsigned int slot, fn, n; - char *str; + g_autofree char *str = NULL; + + if (!visit_start_alternate(v, name, &alt, sizeof(*alt), errp)) { + return; + } + + switch (alt->type) { + case QTYPE_QSTRING: + if (!visit_type_str(v, name, &str, errp)) { + goto out; + } + + if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { + fn = 0; + if (sscanf(str, "%x%n", &slot, &n) != 1) { + goto invalid; + } + } + if (str[n] != '\0' || fn > 7 || slot > 31) { + goto invalid; + } + *ptr = slot << 3 | fn; + break; - if (!visit_type_str(v, name, &str, NULL)) { + case QTYPE_QNUM: if (!visit_type_int32(v, name, &value, errp)) { - return; + goto out; } if (value < -1 || value > 255) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", "a value between -1 and 255"); - return; + goto out; } *ptr = value; - return; - } + break; - if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { - fn = 0; - if (sscanf(str, "%x%n", &slot, &n) != 1) { - goto invalid; - } - } - if (str[n] != '\0' || fn > 7 || slot > 31) { - goto invalid; + default: + error_setg(errp, "Invalid parameter type for '%s', expected int or str", + name ? name : "null"); + goto out; } - *ptr = slot << 3 | fn; - g_free(str); - return; + + goto out; invalid: error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); - g_free(str); +out: + visit_end_alternate(v, (void **) &alt); } -static int print_pci_devfn(Object *obj, Property *prop, char *dest, +static int print_pci_devfn(Object *obj, const Property *prop, char *dest, size_t len) { int32_t *ptr = object_field_prop_ptr(obj, prop); @@ -842,7 +878,7 @@ static int print_pci_devfn(Object *obj, Property *prop, char *dest, } const PropertyInfo qdev_prop_pci_devfn = { - .name = "int32", + .type = "str", .description = "Slot and optional function number, example: 06.0 or 06", .print = print_pci_devfn, .get = qdev_propinfo_get_int32, @@ -855,7 +891,7 @@ const PropertyInfo qdev_prop_pci_devfn = { static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); char buffer[] = "ffff:ff:ff.f"; char *p = buffer; @@ -881,7 +917,7 @@ static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name, static void set_pci_host_devaddr(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); char *str, *p; char *e; @@ -948,8 +984,8 @@ static void set_pci_host_devaddr(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_pci_host_devaddr = { - .name = "str", - .description = "Address (bus/device/function) of " + .type = "str", + .description = "Address (bus:device.function) of " "the host device, example: 04:10.0", .get = get_pci_host_devaddr, .set = set_pci_host_devaddr, @@ -958,7 +994,7 @@ const PropertyInfo qdev_prop_pci_host_devaddr = { /* --- OffAutoPCIBAR off/auto/bar0/bar1/bar2/bar3/bar4/bar5 --- */ const PropertyInfo qdev_prop_off_auto_pcibar = { - .name = "OffAutoPCIBAR", + .type = "OffAutoPCIBAR", .description = "off/auto/bar0/bar1/bar2/bar3/bar4/bar5", .enum_table = &OffAutoPCIBAR_lookup, .get = qdev_propinfo_get_enum, @@ -971,7 +1007,7 @@ const PropertyInfo qdev_prop_off_auto_pcibar = { static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop); int speed; @@ -1005,7 +1041,7 @@ static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop); int speed; @@ -1040,7 +1076,7 @@ static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_pcie_link_speed = { - .name = "PCIELinkSpeed", + .type = "PCIELinkSpeed", .description = "2_5/5/8/16/32/64", .enum_table = &PCIELinkSpeed_lookup, .get = get_prop_pcielinkspeed, @@ -1053,7 +1089,7 @@ const PropertyInfo qdev_prop_pcie_link_speed = { static void get_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop); int width; @@ -1090,7 +1126,7 @@ static void get_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, static void set_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop); int width; @@ -1128,7 +1164,7 @@ static void set_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_pcie_link_width = { - .name = "PCIELinkWidth", + .type = "PCIELinkWidth", .description = "1/2/4/8/12/16/32", .enum_table = &PCIELinkWidth_lookup, .get = get_prop_pcielinkwidth, @@ -1141,7 +1177,7 @@ const PropertyInfo qdev_prop_pcie_link_width = { static void get_uuid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; QemuUUID *uuid = object_field_prop_ptr(obj, prop); char buffer[UUID_STR_LEN]; char *p = buffer; @@ -1156,7 +1192,7 @@ static void get_uuid(Object *obj, Visitor *v, const char *name, void *opaque, static void set_uuid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; QemuUUID *uuid = object_field_prop_ptr(obj, prop); char *str; @@ -1178,7 +1214,7 @@ static void set_default_uuid_auto(ObjectProperty *op, const Property *prop) } const PropertyInfo qdev_prop_uuid = { - .name = "str", + .type = "str", .description = "UUID (aka GUID) or \"" UUID_VALUE_AUTO "\" for random value (default)", .get = get_uuid, @@ -1188,12 +1224,12 @@ const PropertyInfo qdev_prop_uuid = { /* --- s390 cpu entitlement policy --- */ -QEMU_BUILD_BUG_ON(sizeof(CpuS390Entitlement) != sizeof(int)); +QEMU_BUILD_BUG_ON(sizeof(S390CpuEntitlement) != sizeof(int)); const PropertyInfo qdev_prop_cpus390entitlement = { - .name = "CpuS390Entitlement", - .description = "low/medium (default)/high", - .enum_table = &CpuS390Entitlement_lookup, + .type = "S390CpuEntitlement", + .description = "auto/low/medium/high (default medium)", + .enum_table = &S390CpuEntitlement_lookup, .get = qdev_propinfo_get_enum, .set = qdev_propinfo_set_enum, .set_default_value = qdev_propinfo_set_default_value_enum, @@ -1236,10 +1272,74 @@ static void release_iothread_vq_mapping_list(Object *obj, } const PropertyInfo qdev_prop_iothread_vq_mapping_list = { - .name = "IOThreadVirtQueueMappingList", + .type = "IOThreadVirtQueueMappingList", .description = "IOThread virtqueue mapping list [{\"iothread\":\"\", " "\"vqs\":[1,2,3,...]},...]", .get = get_iothread_vq_mapping_list, .set = set_iothread_vq_mapping_list, .release = release_iothread_vq_mapping_list, }; + +/* --- Endian modes */ + +const PropertyInfo qdev_prop_endian_mode = { + .type = "EndianMode", + .description = "Endian mode, big/little/unspecified", + .enum_table = &EndianMode_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +const PropertyInfo qdev_prop_vmapple_virtio_blk_variant = { + .type = "VMAppleVirtioBlkVariant", + .description = "unspecified/root/aux", + .enum_table = &VMAppleVirtioBlkVariant_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- VirtIOGPUOutputList --- */ + +static void get_virtio_gpu_output_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + VirtIOGPUOutputList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + visit_type_VirtIOGPUOutputList(v, name, prop_ptr, errp); +} + +static void set_virtio_gpu_output_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + VirtIOGPUOutputList **prop_ptr = + object_field_prop_ptr(obj, opaque); + VirtIOGPUOutputList *list; + + if (!visit_type_VirtIOGPUOutputList(v, name, &list, errp)) { + return; + } + + qapi_free_VirtIOGPUOutputList(*prop_ptr); + *prop_ptr = list; +} + +static void release_virtio_gpu_output_list(Object *obj, + const char *name, void *opaque) +{ + VirtIOGPUOutputList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + qapi_free_VirtIOGPUOutputList(*prop_ptr); + *prop_ptr = NULL; +} + +const PropertyInfo qdev_prop_virtio_gpu_output_list = { + .type = "VirtIOGPUOutputList", + .description = "VirtIO GPU output list [{\"name\":\"\"},...]", + .get = get_virtio_gpu_output_list, + .set = set_virtio_gpu_output_list, + .release = release_virtio_gpu_output_list, +}; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 86a583574dd..b7e8a89ba5c 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -2,7 +2,8 @@ #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qapi/qapi-types-misc.h" -#include "qapi/qmp/qlist.h" +#include "qapi/qapi-visit-common.h" +#include "qobject/qlist.h" #include "qemu/ctype.h" #include "qemu/error-report.h" #include "qapi/visitor.h" @@ -51,7 +52,7 @@ void qdev_prop_allow_set_link_before_realize(const Object *obj, } } -void *object_field_prop_ptr(Object *obj, Property *prop) +void *object_field_prop_ptr(Object *obj, const Property *prop) { void *ptr = obj; ptr += prop->offset; @@ -61,7 +62,7 @@ void *object_field_prop_ptr(Object *obj, Property *prop) static void field_prop_get(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; return prop->info->get(obj, v, name, opaque, errp); } @@ -78,7 +79,7 @@ static ObjectPropertyAccessor *field_prop_getter(const PropertyInfo *info) static void field_prop_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; if (!qdev_prop_allow_set(obj, name, prop->info, errp)) { return; @@ -100,7 +101,7 @@ static ObjectPropertyAccessor *field_prop_setter(const PropertyInfo *info) void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int *ptr = object_field_prop_ptr(obj, prop); visit_type_enum(v, name, ptr, prop->info->enum_table, errp); @@ -109,7 +110,7 @@ void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name, void qdev_propinfo_set_enum(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int *ptr = object_field_prop_ptr(obj, prop); visit_type_enum(v, name, ptr, prop->info->enum_table, errp); @@ -122,22 +123,15 @@ void qdev_propinfo_set_default_value_enum(ObjectProperty *op, qapi_enum_lookup(prop->info->enum_table, prop->defval.i)); } -const PropertyInfo qdev_prop_enum = { - .name = "enum", - .get = qdev_propinfo_get_enum, - .set = qdev_propinfo_set_enum, - .set_default_value = qdev_propinfo_set_default_value_enum, -}; - /* Bit */ -static uint32_t qdev_get_prop_mask(Property *prop) +static uint32_t qdev_get_prop_mask(const Property *prop) { assert(prop->info == &qdev_prop_bit); return 0x1 << prop->bitnr; } -static void bit_prop_set(Object *obj, Property *props, bool val) +static void bit_prop_set(Object *obj, const Property *props, bool val) { uint32_t *p = object_field_prop_ptr(obj, props); uint32_t mask = qdev_get_prop_mask(props); @@ -151,7 +145,7 @@ static void bit_prop_set(Object *obj, Property *props, bool val) static void prop_get_bit(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *p = object_field_prop_ptr(obj, prop); bool value = (*p & qdev_get_prop_mask(prop)) != 0; @@ -161,7 +155,7 @@ static void prop_get_bit(Object *obj, Visitor *v, const char *name, static void prop_set_bit(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; bool value; if (!visit_type_bool(v, name, &value, errp)) { @@ -176,7 +170,7 @@ static void set_default_value_bool(ObjectProperty *op, const Property *prop) } const PropertyInfo qdev_prop_bit = { - .name = "bool", + .type = "bool", .description = "on/off", .get = prop_get_bit, .set = prop_set_bit, @@ -185,13 +179,14 @@ const PropertyInfo qdev_prop_bit = { /* Bit64 */ -static uint64_t qdev_get_prop_mask64(Property *prop) +static uint64_t qdev_get_prop_mask64(const Property *prop) { - assert(prop->info == &qdev_prop_bit64); + assert(prop->info == &qdev_prop_bit64 || + prop->info == &qdev_prop_on_off_auto_bit64); return 0x1ull << prop->bitnr; } -static void bit64_prop_set(Object *obj, Property *props, bool val) +static void bit64_prop_set(Object *obj, const Property *props, bool val) { uint64_t *p = object_field_prop_ptr(obj, props); uint64_t mask = qdev_get_prop_mask64(props); @@ -205,7 +200,7 @@ static void bit64_prop_set(Object *obj, Property *props, bool val) static void prop_get_bit64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *p = object_field_prop_ptr(obj, prop); bool value = (*p & qdev_get_prop_mask64(prop)) != 0; @@ -215,7 +210,7 @@ static void prop_get_bit64(Object *obj, Visitor *v, const char *name, static void prop_set_bit64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; bool value; if (!visit_type_bool(v, name, &value, errp)) { @@ -225,19 +220,82 @@ static void prop_set_bit64(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_bit64 = { - .name = "bool", + .type = "bool", .description = "on/off", .get = prop_get_bit64, .set = prop_set_bit64, .set_default_value = set_default_value_bool, }; +static void prop_get_on_off_auto_bit64(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + OnOffAutoBit64 *p = object_field_prop_ptr(obj, prop); + OnOffAuto value; + uint64_t mask = qdev_get_prop_mask64(prop); + + if (p->auto_bits & mask) { + value = ON_OFF_AUTO_AUTO; + } else if (p->on_bits & mask) { + value = ON_OFF_AUTO_ON; + } else { + value = ON_OFF_AUTO_OFF; + } + + visit_type_OnOffAuto(v, name, &value, errp); +} + +static void prop_set_on_off_auto_bit64(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + OnOffAutoBit64 *p = object_field_prop_ptr(obj, prop); + OnOffAuto value; + uint64_t mask = qdev_get_prop_mask64(prop); + + if (!visit_type_OnOffAuto(v, name, &value, errp)) { + return; + } + + switch (value) { + case ON_OFF_AUTO_AUTO: + p->on_bits &= ~mask; + p->auto_bits |= mask; + break; + + case ON_OFF_AUTO_ON: + p->on_bits |= mask; + p->auto_bits &= ~mask; + break; + + case ON_OFF_AUTO_OFF: + p->on_bits &= ~mask; + p->auto_bits &= ~mask; + break; + + case ON_OFF_AUTO__MAX: + g_assert_not_reached(); + } +} + +const PropertyInfo qdev_prop_on_off_auto_bit64 = { + .type = "OnOffAuto", + .description = "on/off/auto", + .enum_table = &OnOffAuto_lookup, + .get = prop_get_on_off_auto_bit64, + .set = prop_set_on_off_auto_bit64, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + /* --- bool --- */ static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; bool *ptr = object_field_prop_ptr(obj, prop); visit_type_bool(v, name, ptr, errp); @@ -246,14 +304,15 @@ static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque, static void set_bool(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; bool *ptr = object_field_prop_ptr(obj, prop); visit_type_bool(v, name, ptr, errp); } const PropertyInfo qdev_prop_bool = { - .name = "bool", + .type = "bool", + .description = "on/off", .get = get_bool, .set = set_bool, .set_default_value = set_default_value_bool, @@ -264,7 +323,7 @@ const PropertyInfo qdev_prop_bool = { static void get_uint8(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint8_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint8(v, name, ptr, errp); @@ -273,7 +332,7 @@ static void get_uint8(Object *obj, Visitor *v, const char *name, void *opaque, static void set_uint8(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint8_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint8(v, name, ptr, errp); @@ -292,7 +351,7 @@ void qdev_propinfo_set_default_value_uint(ObjectProperty *op, } const PropertyInfo qdev_prop_uint8 = { - .name = "uint8", + .type = "uint8", .get = get_uint8, .set = set_uint8, .set_default_value = qdev_propinfo_set_default_value_uint, @@ -303,7 +362,7 @@ const PropertyInfo qdev_prop_uint8 = { static void get_uint16(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint16_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint16(v, name, ptr, errp); @@ -312,14 +371,14 @@ static void get_uint16(Object *obj, Visitor *v, const char *name, static void set_uint16(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint16_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint16(v, name, ptr, errp); } const PropertyInfo qdev_prop_uint16 = { - .name = "uint16", + .type = "uint16", .get = get_uint16, .set = set_uint16, .set_default_value = qdev_propinfo_set_default_value_uint, @@ -330,7 +389,7 @@ const PropertyInfo qdev_prop_uint16 = { static void get_uint32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint32(v, name, ptr, errp); @@ -339,7 +398,7 @@ static void get_uint32(Object *obj, Visitor *v, const char *name, static void set_uint32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint32(v, name, ptr, errp); @@ -348,7 +407,7 @@ static void set_uint32(Object *obj, Visitor *v, const char *name, void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int32_t *ptr = object_field_prop_ptr(obj, prop); visit_type_int32(v, name, ptr, errp); @@ -357,21 +416,21 @@ void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name, static void set_int32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int32_t *ptr = object_field_prop_ptr(obj, prop); visit_type_int32(v, name, ptr, errp); } const PropertyInfo qdev_prop_uint32 = { - .name = "uint32", + .type = "uint32", .get = get_uint32, .set = set_uint32, .set_default_value = qdev_propinfo_set_default_value_uint, }; const PropertyInfo qdev_prop_int32 = { - .name = "int32", + .type = "int32", .get = qdev_propinfo_get_int32, .set = set_int32, .set_default_value = qdev_propinfo_set_default_value_int, @@ -382,7 +441,7 @@ const PropertyInfo qdev_prop_int32 = { static void get_uint64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint64(v, name, ptr, errp); @@ -391,7 +450,7 @@ static void get_uint64(Object *obj, Visitor *v, const char *name, static void set_uint64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint64(v, name, ptr, errp); @@ -400,7 +459,7 @@ static void set_uint64(Object *obj, Visitor *v, const char *name, static void get_int64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_int64(v, name, ptr, errp); @@ -409,21 +468,21 @@ static void get_int64(Object *obj, Visitor *v, const char *name, static void set_int64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; int64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_int64(v, name, ptr, errp); } const PropertyInfo qdev_prop_uint64 = { - .name = "uint64", + .type = "uint64", .get = get_uint64, .set = set_uint64, .set_default_value = qdev_propinfo_set_default_value_uint, }; const PropertyInfo qdev_prop_int64 = { - .name = "int64", + .type = "int64", .get = get_int64, .set = set_int64, .set_default_value = qdev_propinfo_set_default_value_int, @@ -432,7 +491,7 @@ const PropertyInfo qdev_prop_int64 = { static void set_uint64_checkmask(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint64(v, name, ptr, errp); @@ -443,23 +502,60 @@ static void set_uint64_checkmask(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_uint64_checkmask = { - .name = "uint64", + .type = "uint64", .get = get_uint64, .set = set_uint64_checkmask, }; +/* --- pointer-size integer --- */ + +static void get_usize(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + const Property *prop = opaque; + +#if HOST_LONG_BITS == 32 + uint32_t *ptr = object_field_prop_ptr(obj, prop); + visit_type_uint32(v, name, ptr, errp); +#else + uint64_t *ptr = object_field_prop_ptr(obj, prop); + visit_type_uint64(v, name, ptr, errp); +#endif +} + +static void set_usize(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + const Property *prop = opaque; + +#if HOST_LONG_BITS == 32 + uint32_t *ptr = object_field_prop_ptr(obj, prop); + visit_type_uint32(v, name, ptr, errp); +#else + uint64_t *ptr = object_field_prop_ptr(obj, prop); + visit_type_uint64(v, name, ptr, errp); +#endif +} + +const PropertyInfo qdev_prop_usize = { + .type = "usize", + .get = get_usize, + .set = set_usize, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + /* --- string --- */ static void release_string(Object *obj, const char *name, void *opaque) { - Property *prop = opaque; + const Property *prop = opaque; g_free(*(char **)object_field_prop_ptr(obj, prop)); } static void get_string(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; char **ptr = object_field_prop_ptr(obj, prop); if (!*ptr) { @@ -473,7 +569,7 @@ static void get_string(Object *obj, Visitor *v, const char *name, static void set_string(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; char **ptr = object_field_prop_ptr(obj, prop); char *str; @@ -485,7 +581,7 @@ static void set_string(Object *obj, Visitor *v, const char *name, } const PropertyInfo qdev_prop_string = { - .name = "str", + .type = "str", .release = release_string, .get = get_string, .set = set_string, @@ -494,7 +590,7 @@ const PropertyInfo qdev_prop_string = { /* --- on/off/auto --- */ const PropertyInfo qdev_prop_on_off_auto = { - .name = "OnOffAuto", + .type = "OnOffAuto", .description = "on/off/auto", .enum_table = &OnOffAuto_lookup, .get = qdev_propinfo_get_enum, @@ -507,7 +603,7 @@ const PropertyInfo qdev_prop_on_off_auto = { void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); uint64_t value = *ptr; @@ -517,7 +613,7 @@ void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name, static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); uint64_t value; @@ -537,7 +633,7 @@ static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque, } const PropertyInfo qdev_prop_size32 = { - .name = "size", + .type = "size", .get = qdev_propinfo_get_size32, .set = set_size32, .set_default_value = qdev_propinfo_set_default_value_uint, @@ -557,7 +653,7 @@ struct ArrayElementList { * specific element of the array. Arrays are backed by an uint32_t length field * and an element array. @elem points at an element in this element array. */ -static Property array_elem_prop(Object *obj, Property *parent_prop, +static Property array_elem_prop(Object *obj, const Property *parent_prop, const char *name, char *elem) { return (Property) { @@ -582,7 +678,7 @@ static Property array_elem_prop(Object *obj, Property *parent_prop, */ static void release_prop_array(Object *obj, const char *name, void *opaque) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *alenptr = object_field_prop_ptr(obj, prop); void **arrayptr = (void *)obj + prop->arrayoffset; char *elem = *arrayptr; @@ -609,7 +705,7 @@ static void set_prop_array(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { ERRP_GUARD(); - Property *prop = opaque; + const Property *prop = opaque; uint32_t *alenptr = object_field_prop_ptr(obj, prop); void **arrayptr = (void *)obj + prop->arrayoffset; ArrayElementList *list, *elem, *next; @@ -685,7 +781,7 @@ static void get_prop_array(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { ERRP_GUARD(); - Property *prop = opaque; + const Property *prop = opaque; uint32_t *alenptr = object_field_prop_ptr(obj, prop); void **arrayptr = (void *)obj + prop->arrayoffset; char *elemptr = *arrayptr; @@ -740,7 +836,7 @@ static void default_prop_array(ObjectProperty *op, const Property *prop) } const PropertyInfo qdev_prop_array = { - .name = "list", + .type = "list", .get = get_prop_array, .set = set_prop_array, .release = release_prop_array, @@ -749,29 +845,26 @@ const PropertyInfo qdev_prop_array = { /* --- public helpers --- */ -static Property *qdev_prop_walk(Property *props, const char *name) +static const Property *qdev_prop_walk(DeviceClass *cls, const char *name) { - if (!props) { - return NULL; - } - while (props->name) { - if (strcmp(props->name, name) == 0) { - return props; + for (int i = 0, n = cls->props_count_; i < n; ++i) { + const Property *prop = &cls->props_[i]; + if (strcmp(prop->name, name) == 0) { + return prop; } - props++; } return NULL; } -static Property *qdev_prop_find(DeviceState *dev, const char *name) +static const Property *qdev_prop_find(DeviceState *dev, const char *name) { ObjectClass *class; - Property *prop; + const Property *prop; /* device properties */ class = object_get_class(OBJECT(dev)); do { - prop = qdev_prop_walk(DEVICE_CLASS(class)->props_, name); + prop = qdev_prop_walk(DEVICE_CLASS(class), name); if (prop) { return prop; } @@ -840,7 +933,7 @@ void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value) void qdev_prop_set_enum(DeviceState *dev, const char *name, int value) { - Property *prop; + const Property *prop; prop = qdev_prop_find(dev, name); object_property_set_str(OBJECT(dev), name, @@ -931,7 +1024,7 @@ void qdev_prop_set_globals(DeviceState *dev) static void get_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_size(v, name, ptr, errp); @@ -940,14 +1033,14 @@ static void get_size(Object *obj, Visitor *v, const char *name, void *opaque, static void set_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint64_t *ptr = object_field_prop_ptr(obj, prop); visit_type_size(v, name, ptr, errp); } const PropertyInfo qdev_prop_size = { - .name = "size", + .type = "size", .get = get_size, .set = set_size, .set_default_value = qdev_propinfo_set_default_value_uint, @@ -956,7 +1049,7 @@ const PropertyInfo qdev_prop_size = { /* --- object link property --- */ static ObjectProperty *create_link_property(ObjectClass *oc, const char *name, - Property *prop) + const Property *prop) { return object_class_property_add_link(oc, name, prop->link_type, prop->offset, @@ -965,22 +1058,22 @@ static ObjectProperty *create_link_property(ObjectClass *oc, const char *name, } const PropertyInfo qdev_prop_link = { - .name = "link", + .type = "link", .create = create_link_property, }; -void qdev_property_add_static(DeviceState *dev, Property *prop) +void qdev_property_add_static(DeviceState *dev, const Property *prop) { Object *obj = OBJECT(dev); ObjectProperty *op; assert(!prop->info->create); - op = object_property_add(obj, prop->name, prop->info->name, + op = object_property_add(obj, prop->name, prop->info->type, field_prop_getter(prop->info), field_prop_setter(prop->info), prop->info->release, - prop); + (Property *)prop); object_property_set_description(obj, prop->name, prop->info->description); @@ -994,7 +1087,7 @@ void qdev_property_add_static(DeviceState *dev, Property *prop) } static void qdev_class_add_property(DeviceClass *klass, const char *name, - Property *prop) + const Property *prop) { ObjectClass *oc = OBJECT_CLASS(klass); ObjectProperty *op; @@ -1003,11 +1096,11 @@ static void qdev_class_add_property(DeviceClass *klass, const char *name, op = prop->info->create(oc, name, prop); } else { op = object_class_property_add(oc, - name, prop->info->name, + name, prop->info->type, field_prop_getter(prop->info), field_prop_setter(prop->info), prop->info->release, - prop); + (Property *)prop); } if (prop->set_default) { prop->info->set_default_value(op, prop); @@ -1023,7 +1116,7 @@ static void qdev_get_legacy_property(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; char buffer[1024]; char *ptr = buffer; @@ -1046,7 +1139,7 @@ static void qdev_get_legacy_property(Object *obj, Visitor *v, * Do not use this in new code! QOM Properties added through this interface * will be given names in the "legacy" namespace. */ -static void qdev_class_add_legacy_property(DeviceClass *dc, Property *prop) +static void qdev_class_add_legacy_property(DeviceClass *dc, const Property *prop) { g_autofree char *name = NULL; @@ -1058,15 +1151,21 @@ static void qdev_class_add_legacy_property(DeviceClass *dc, Property *prop) name = g_strdup_printf("legacy-%s", prop->name); object_class_property_add(OBJECT_CLASS(dc), name, "str", prop->info->print ? qdev_get_legacy_property : prop->info->get, - NULL, NULL, prop); + NULL, NULL, (Property *)prop); } -void device_class_set_props(DeviceClass *dc, Property *props) +void device_class_set_props_n(DeviceClass *dc, const Property *props, size_t n) { - Property *prop; + /* We used a hole in DeviceClass because that's still a lot. */ + assert(n <= UINT16_MAX); + assert(n != 0); dc->props_ = props; - for (prop = props; prop && prop->name; prop++) { + dc->props_count_ = n; + + for (size_t i = 0; i < n; ++i) { + const Property *prop = &props[i]; + assert(prop->name); qdev_class_add_legacy_property(dc, prop); qdev_class_add_property(dc, prop->name, prop); } diff --git a/hw/core/qdev-user.c b/hw/core/qdev-user.c new file mode 100644 index 00000000000..3d421d8f4e5 --- /dev/null +++ b/hw/core/qdev-user.c @@ -0,0 +1,19 @@ +/* + * QDev helpers specific to user emulation. + * + * Copyright 2025 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qom/object.h" +#include "hw/qdev-core.h" + +void qdev_create_fake_machine(void) +{ + Object *fake_machine_obj; + + fake_machine_obj = object_property_add_new_container(object_get_root(), + "machine"); + object_property_add_new_container(fake_machine_obj, "unattached"); +} diff --git a/hw/core/qdev.c b/hw/core/qdev.c index f3a996f57de..f6002261768 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -28,7 +28,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/qapi-events-qdev.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "qemu/option.h" @@ -146,31 +146,16 @@ bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp) DeviceState *qdev_new(const char *name) { - ObjectClass *oc = object_class_by_name(name); -#ifdef CONFIG_MODULES - if (!oc) { - int rv = module_load_qom(name, &error_fatal); - if (rv > 0) { - oc = object_class_by_name(name); - } else { - error_report("could not find a module for type '%s'", name); - exit(1); - } - } -#endif - if (!oc) { - error_report("unknown type '%s'", name); - abort(); - } return DEVICE(object_new(name)); } DeviceState *qdev_try_new(const char *name) { - if (!module_object_class_by_name(name)) { + ObjectClass *oc = module_object_class_by_name(name); + if (!oc) { return NULL; } - return DEVICE(object_new(name)); + return DEVICE(object_new_with_class(oc)); } static QTAILQ_HEAD(, DeviceListener) device_listeners @@ -491,8 +476,7 @@ static void device_set_realized(Object *obj, bool value, Error **errp) if (!obj->parent) { gchar *name = g_strdup_printf("device[%d]", unattached_count++); - object_property_add_child(container_get(qdev_get_machine(), - "/unattached"), + object_property_add_child(machine_get_container("unattached"), name, obj); unattached_parent = true; g_free(name); @@ -706,11 +690,10 @@ static void device_finalize(Object *obj) dev->canonical_path = NULL; } - qobject_unref(dev->opts); g_free(dev->id); } -static void device_class_base_init(ObjectClass *class, void *data) +static void device_class_base_init(ObjectClass *class, const void *data) { DeviceClass *klass = DEVICE_CLASS(class); @@ -718,6 +701,7 @@ static void device_class_base_init(ObjectClass *class, void *data) * so do not propagate them to the subclasses. */ klass->props_ = NULL; + klass->props_count_ = 0; } static void device_unparent(Object *obj) @@ -747,58 +731,7 @@ device_vmstate_if_get_id(VMStateIf *obj) return qdev_get_dev_path(dev); } -/** - * device_phases_reset: - * Transition reset method for devices to allow moving - * smoothly from legacy reset method to multi-phases - */ -static void device_phases_reset(DeviceState *dev) -{ - ResettableClass *rc = RESETTABLE_GET_CLASS(dev); - - if (rc->phases.enter) { - rc->phases.enter(OBJECT(dev), RESET_TYPE_COLD); - } - if (rc->phases.hold) { - rc->phases.hold(OBJECT(dev), RESET_TYPE_COLD); - } - if (rc->phases.exit) { - rc->phases.exit(OBJECT(dev), RESET_TYPE_COLD); - } -} - -static void device_transitional_reset(Object *obj) -{ - DeviceClass *dc = DEVICE_GET_CLASS(obj); - - /* - * This will call either @device_phases_reset (for multi-phases transitioned - * devices) or a device's specific method for not-yet transitioned devices. - * In both case, it does not reset children. - */ - if (dc->reset) { - dc->reset(DEVICE(obj)); - } -} - -/** - * device_get_transitional_reset: - * check if the device's class is ready for multi-phase - */ -static ResettableTrFunction device_get_transitional_reset(Object *obj) -{ - DeviceClass *dc = DEVICE_GET_CLASS(obj); - if (dc->reset != device_phases_reset) { - /* - * dc->reset has been overridden by a subclass, - * the device is not ready for multi phase yet. - */ - return device_transitional_reset; - } - return NULL; -} - -static void device_class_init(ObjectClass *class, void *data) +static void device_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); VMStateIfClass *vc = VMSTATE_IF_CLASS(class); @@ -819,20 +752,12 @@ static void device_class_init(ObjectClass *class, void *data) rc->child_foreach = device_reset_child_foreach; /* - * @device_phases_reset is put as the default reset method below, allowing - * to do the multi-phase transition from base classes to leaf classes. It - * allows a legacy-reset Device class to extend a multi-phases-reset - * Device class for the following reason: - * + If a base class B has been moved to multi-phase, then it does not - * override this default reset method and may have defined phase methods. - * + A child class C (extending class B) which uses - * device_class_set_parent_reset() (or similar means) to override the - * reset method will still work as expected. @device_phases_reset function - * will be registered as the parent reset method and effectively call - * parent reset phases. + * A NULL legacy_reset implies a three-phase reset device. Devices can + * only be reset using three-phase aware mechanisms, but we still support + * for transitional purposes leaf classes which set the old legacy_reset + * method via device_class_set_legacy_reset(). */ - dc->reset = device_phases_reset; - rc->get_transitional_function = device_get_transitional_reset; + dc->legacy_reset = NULL; object_class_property_add_bool(class, "realized", device_get_realized, device_set_realized); @@ -844,12 +769,30 @@ static void device_class_init(ObjectClass *class, void *data) offsetof(DeviceState, parent_bus), NULL, 0); } -void device_class_set_parent_reset(DeviceClass *dc, - DeviceReset dev_reset, - DeviceReset *parent_reset) +static void do_legacy_reset(Object *obj, ResetType type) { - *parent_reset = dc->reset; - dc->reset = dev_reset; + DeviceClass *dc = DEVICE_GET_CLASS(obj); + + dc->legacy_reset(DEVICE(obj)); +} + +void device_class_set_legacy_reset(DeviceClass *dc, DeviceReset dev_reset) +{ + /* + * A legacy DeviceClass::reset has identical semantics to the + * three-phase "hold" method, with no "enter" or "exit" + * behaviour. Classes that use this legacy function must be leaf + * classes that do not chain up to their parent class reset. + * There is no mechanism for resetting a device that does not + * use the three-phase APIs, so the only place which calls + * the legacy_reset hook is do_legacy_reset(). + */ + ResettableClass *rc = RESETTABLE_CLASS(dc); + + rc->phases.enter = NULL; + rc->phases.hold = do_legacy_reset; + rc->phases.exit = NULL; + dc->legacy_reset = dev_reset; } void device_class_set_parent_realize(DeviceClass *dc, @@ -873,12 +816,28 @@ Object *qdev_get_machine(void) static Object *dev; if (dev == NULL) { - dev = container_get(object_get_root(), "/machine"); + dev = object_resolve_path_component(object_get_root(), "machine"); + /* + * Any call to this function before machine is created is treated + * as a programming error as of now. + */ + assert(dev); } return dev; } +Object *machine_get_container(const char *name) +{ + Object *container, *machine; + + machine = qdev_get_machine(); + container = object_resolve_path_component(machine, name); + assert(object_dynamic_cast(container, TYPE_CONTAINER)); + + return container; +} + char *qdev_get_human_name(DeviceState *dev) { g_assert(dev != NULL); @@ -911,7 +870,7 @@ static const TypeInfo device_type_info = { .class_init = device_class_init, .abstract = true, .class_size = sizeof(DeviceClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_VMSTATE_IF }, { TYPE_RESETTABLE_INTERFACE }, { } diff --git a/hw/core/register.c b/hw/core/register.c index 95b0150c0aa..8f63d9f227c 100644 --- a/hw/core/register.c +++ b/hw/core/register.c @@ -319,7 +319,7 @@ void register_finalize_block(RegisterInfoArray *r_array) g_free(r_array); } -static void register_class_init(ObjectClass *oc, void *data) +static void register_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/core/reset.c b/hw/core/reset.c index 58dfc8db3dc..65f82fa43d9 100644 --- a/hw/core/reset.c +++ b/hw/core/reset.c @@ -24,7 +24,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/resettable.h" #include "hw/core/resetcontainer.h" @@ -84,7 +84,7 @@ static void legacy_reset_finalize(Object *obj) { } -static void legacy_reset_class_init(ObjectClass *klass, void *data) +static void legacy_reset_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -170,11 +170,8 @@ void qemu_unregister_resettable(Object *obj) resettable_container_remove(get_root_reset_container(), obj); } -void qemu_devices_reset(ShutdownCause reason) +void qemu_devices_reset(ResetType type) { - ResetType type = (reason == SHUTDOWN_CAUSE_SNAPSHOT_LOAD) ? - RESET_TYPE_SNAPSHOT_LOAD : RESET_TYPE_COLD; - /* Reset the simulation */ resettable_reset(OBJECT(get_root_reset_container()), type); } diff --git a/hw/core/resetcontainer.c b/hw/core/resetcontainer.c index e4ece68e83a..5ff17002e75 100644 --- a/hw/core/resetcontainer.c +++ b/hw/core/resetcontainer.c @@ -68,7 +68,8 @@ static void resettable_container_finalize(Object *obj) { } -static void resettable_container_class_init(ObjectClass *klass, void *data) +static void resettable_container_class_init(ObjectClass *klass, + const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/core/resettable.c b/hw/core/resettable.c index 6dd3e3dc487..5cdb4a4f8d3 100644 --- a/hw/core/resettable.c +++ b/hw/core/resettable.c @@ -93,20 +93,6 @@ static void resettable_child_foreach(ResettableClass *rc, Object *obj, } } -/** - * resettable_get_tr_func: - * helper to fetch transitional reset callback if any. - */ -static ResettableTrFunction resettable_get_tr_func(ResettableClass *rc, - Object *obj) -{ - ResettableTrFunction tr_func = NULL; - if (rc->get_transitional_function) { - tr_func = rc->get_transitional_function(obj); - } - return tr_func; -} - static void resettable_phase_enter(Object *obj, void *opaque, ResetType type) { ResettableClass *rc = RESETTABLE_GET_CLASS(obj); @@ -146,7 +132,7 @@ static void resettable_phase_enter(Object *obj, void *opaque, ResetType type) if (action_needed) { trace_resettable_phase_enter_exec(obj, obj_typename, type, !!rc->phases.enter); - if (rc->phases.enter && !resettable_get_tr_func(rc, obj)) { + if (rc->phases.enter) { rc->phases.enter(obj, type); } s->hold_phase_pending = true; @@ -171,12 +157,8 @@ static void resettable_phase_hold(Object *obj, void *opaque, ResetType type) /* exec hold phase */ if (s->hold_phase_pending) { s->hold_phase_pending = false; - ResettableTrFunction tr_func = resettable_get_tr_func(rc, obj); trace_resettable_phase_hold_exec(obj, obj_typename, !!rc->phases.hold); - if (tr_func) { - trace_resettable_transitional_function(obj, obj_typename); - tr_func(obj); - } else if (rc->phases.hold) { + if (rc->phases.hold) { rc->phases.hold(obj, type); } } @@ -199,7 +181,7 @@ static void resettable_phase_exit(Object *obj, void *opaque, ResetType type) assert(s->count > 0); if (--s->count == 0) { trace_resettable_phase_exit_exec(obj, obj_typename, !!rc->phases.exit); - if (rc->phases.exit && !resettable_get_tr_func(rc, obj)) { + if (rc->phases.exit) { rc->phases.exit(obj, type); } } diff --git a/hw/core/split-irq.c b/hw/core/split-irq.c index 3b90af2e8f9..f8b48750c5b 100644 --- a/hw/core/split-irq.c +++ b/hw/core/split-irq.c @@ -59,12 +59,11 @@ static void split_irq_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(dev, s->out_irq, s->num_lines); } -static Property split_irq_properties[] = { +static const Property split_irq_properties[] = { DEFINE_PROP_UINT16("num-lines", SplitIRQ, num_lines, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void split_irq_class_init(ObjectClass *klass, void *data) +static void split_irq_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/core/sysbus-fdt.c b/hw/core/sysbus-fdt.c index eebcd28f9a3..e80776080be 100644 --- a/hw/core/sysbus-fdt.c +++ b/hw/core/sysbus-fdt.c @@ -29,13 +29,16 @@ #endif #include "hw/core/sysbus-fdt.h" #include "qemu/error-report.h" -#include "sysemu/device_tree.h" -#include "sysemu/tpm.h" +#include "system/device_tree.h" +#include "system/tpm.h" +#include "hw/arm/smmuv3.h" #include "hw/platform-bus.h" #include "hw/vfio/vfio-platform.h" #include "hw/vfio/vfio-calxeda-xgmac.h" #include "hw/vfio/vfio-amd-xgbe.h" +#include "hw/vfio/vfio-region.h" #include "hw/display/ramfb.h" +#include "hw/uefi/var-service-api.h" #include "hw/arm/fdt.h" /* @@ -471,6 +474,28 @@ static int add_tpm_tis_fdt_node(SysBusDevice *sbdev, void *opaque) } #endif +static int add_uefi_vars_node(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusFDTData *data = opaque; + PlatformBusDevice *pbus = data->pbus; + const char *parent_node = data->pbus_node_name; + void *fdt = data->fdt; + uint64_t mmio_base; + char *nodename; + + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node, + UEFI_VARS_FDT_NODE, mmio_base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, + "compatible", UEFI_VARS_FDT_COMPAT); + qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", + 1, mmio_base, + 1, UEFI_VARS_REGS_SIZE); + g_free(nodename); + return 0; +} + static int no_fdt_node(SysBusDevice *sbdev, void *opaque) { return 0; @@ -494,7 +519,10 @@ static const BindingEntry bindings[] = { #ifdef CONFIG_TPM TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node), #endif + /* No generic DT support for smmuv3 dev. Support added for arm virt only */ + TYPE_BINDING(TYPE_ARM_SMMUV3, no_fdt_node), TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node), + TYPE_BINDING(TYPE_UEFI_VARS_SYSBUS, add_uefi_vars_node), TYPE_BINDING("", NULL), /* last element */ }; diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index ad34fb73446..ec69e877a2c 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -19,10 +19,9 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu/module.h" #include "hw/sysbus.h" #include "monitor/monitor.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent); static char *sysbus_get_fw_dev_path(DeviceState *dev); @@ -65,14 +64,14 @@ void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque) }; /* Loop through all sysbus devices that were spawned outside the machine */ - container = container_get(qdev_get_machine(), "/peripheral"); + container = machine_get_container("peripheral"); find_sysbus_device(container, &find); - container = container_get(qdev_get_machine(), "/peripheral-anon"); + container = machine_get_container("peripheral-anon"); find_sysbus_device(container, &find); } -static void system_bus_class_init(ObjectClass *klass, void *data) +static void system_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -80,13 +79,6 @@ static void system_bus_class_init(ObjectClass *klass, void *data) k->get_fw_dev_path = sysbus_get_fw_dev_path; } -static const TypeInfo system_bus_info = { - .name = TYPE_SYSTEM_BUS, - .parent = TYPE_BUS, - .instance_size = sizeof(BusState), - .class_init = system_bus_class_init, -}; - /* Check whether an IRQ source exists */ bool sysbus_has_irq(SysBusDevice *dev, int n) { @@ -154,19 +146,20 @@ static void sysbus_mmio_map_common(SysBusDevice *dev, int n, hwaddr addr, } } -void sysbus_mmio_unmap(SysBusDevice *dev, int n) +void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr) { - assert(n >= 0 && n < dev->num_mmio); - - if (dev->mmio[n].addr != (hwaddr)-1) { - memory_region_del_subregion(get_system_memory(), dev->mmio[n].memory); - dev->mmio[n].addr = (hwaddr)-1; - } + sysbus_mmio_map_common(dev, n, addr, false, 0); } -void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr) +int sysbus_mmio_map_name(SysBusDevice *dev, const char *name, hwaddr addr) { - sysbus_mmio_map_common(dev, n, addr, false, 0); + for (int i = 0; i < dev->num_mmio; i++) { + if (!strcmp(dev->mmio[i].memory->name, name)) { + sysbus_mmio_map(dev, i, addr); + return i; + } + } + return -1; } void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr, @@ -298,7 +291,7 @@ static char *sysbus_get_fw_dev_path(DeviceState *dev) return g_strdup(qdev_fw_name(dev)); } -static void sysbus_device_class_init(ObjectClass *klass, void *data) +static void sysbus_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = sysbus_device_realize; @@ -316,15 +309,6 @@ static void sysbus_device_class_init(ObjectClass *klass, void *data) k->user_creatable = false; } -static const TypeInfo sysbus_device_type_info = { - .name = TYPE_SYS_BUS_DEVICE, - .parent = TYPE_DEVICE, - .instance_size = sizeof(SysBusDevice), - .abstract = true, - .class_size = sizeof(SysBusDeviceClass), - .class_init = sysbus_device_class_init, -}; - static BusState *main_system_bus; static void main_system_bus_create(void) @@ -333,8 +317,8 @@ static void main_system_bus_create(void) * assign main_system_bus before qbus_init() * in order to make "if (bus != sysbus_get_default())" work */ - main_system_bus = g_malloc0(system_bus_info.instance_size); - qbus_init(main_system_bus, system_bus_info.instance_size, + main_system_bus = g_new0(BusState, 1); + qbus_init(main_system_bus, sizeof(BusState), TYPE_SYSTEM_BUS, NULL, "main-system-bus"); OBJECT(main_system_bus)->free = g_free; } @@ -347,10 +331,36 @@ BusState *sysbus_get_default(void) return main_system_bus; } -static void sysbus_register_types(void) +static void dynamic_sysbus_device_class_init(ObjectClass *klass, + const void *data) { - type_register_static(&system_bus_info); - type_register_static(&sysbus_device_type_info); + DeviceClass *k = DEVICE_CLASS(klass); + + k->user_creatable = true; + k->hotpluggable = false; } -type_init(sysbus_register_types) +static const TypeInfo sysbus_types[] = { + { + .name = TYPE_SYSTEM_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(BusState), + .class_init = system_bus_class_init, + }, + { + .name = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SysBusDevice), + .abstract = true, + .class_size = sizeof(SysBusDeviceClass), + .class_init = sysbus_device_class_init, + }, + { + .name = TYPE_DYNAMIC_SYS_BUS_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = dynamic_sysbus_device_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(sysbus_types) diff --git a/hw/core/uboot_image.h b/hw/core/uboot_image.h index 18ac293359d..e4dcfb08f0c 100644 --- a/hw/core/uboot_image.h +++ b/hw/core/uboot_image.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * (C) Copyright 2008 Semihalf * diff --git a/hw/core/vm-change-state-handler.c b/hw/core/vm-change-state-handler.c index 8e2639224e7..99c642b5587 100644 --- a/hw/core/vm-change-state-handler.c +++ b/hw/core/vm-change-state-handler.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "hw/qdev-core.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" static int qdev_get_dev_tree_depth(DeviceState *dev) { @@ -40,6 +40,7 @@ static int qdev_get_dev_tree_depth(DeviceState *dev) * qdev_add_vm_change_state_handler: * @dev: the device that owns this handler * @cb: the callback function to be invoked + * @cb_ret: the callback function with return value to be invoked * @opaque: user data passed to the callback function * * This function works like qemu_add_vm_change_state_handler() except callbacks @@ -50,25 +51,30 @@ static int qdev_get_dev_tree_depth(DeviceState *dev) * controller's callback is invoked before the children on its bus when the VM * starts running. The order is reversed when the VM stops running. * + * Note that the parameter `cb` and `cb_ret` are mutually exclusive. + * * Returns: an entry to be freed with qemu_del_vm_change_state_handler() */ VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, VMChangeStateHandler *cb, + VMChangeStateHandlerWithRet *cb_ret, void *opaque) { - return qdev_add_vm_change_state_handler_full(dev, cb, NULL, opaque); + assert(!cb || !cb_ret); + return qdev_add_vm_change_state_handler_full(dev, cb, NULL, cb_ret, opaque); } /* * Exactly like qdev_add_vm_change_state_handler() but passes a prepare_cb - * argument too. + * and the cb_ret arguments too. */ VMChangeStateEntry *qdev_add_vm_change_state_handler_full( - DeviceState *dev, VMChangeStateHandler *cb, - VMChangeStateHandler *prepare_cb, void *opaque) + DeviceState *dev, VMChangeStateHandler *cb, VMChangeStateHandler *prepare_cb, + VMChangeStateHandlerWithRet *cb_ret, void *opaque) { int depth = qdev_get_dev_tree_depth(dev); - return qemu_add_vm_change_state_handler_prio_full(cb, prepare_cb, opaque, - depth); + assert(!cb || !cb_ret); + return qemu_add_vm_change_state_handler_prio_full(cb, prepare_cb, cb_ret, + opaque, depth); } diff --git a/hw/cpu/a15mpcore.c b/hw/cpu/a15mpcore.c index 967d8d3dd50..bd36dd94d4a 100644 --- a/hw/cpu/a15mpcore.c +++ b/hw/cpu/a15mpcore.c @@ -24,7 +24,7 @@ #include "hw/cpu/a15mpcore.h" #include "hw/irq.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_arm.h" #include "target/arm/gtimer.h" @@ -58,6 +58,11 @@ static void a15mp_priv_realize(DeviceState *dev, Error **errp) bool has_el2 = false; Object *cpuobj; + if (s->num_irq < 32 || s->num_irq > 256) { + error_setg(errp, "Property 'num-irq' must be between 32 and 256"); + return; + } + gicdev = DEVICE(&s->gic); qdev_prop_set_uint32(gicdev, "num-cpu", s->num_cpu); qdev_prop_set_uint32(gicdev, "num-irq", s->num_irq); @@ -144,19 +149,19 @@ static void a15mp_priv_realize(DeviceState *dev, Error **errp) } } -static Property a15mp_priv_properties[] = { +static const Property a15mp_priv_properties[] = { DEFINE_PROP_UINT32("num-cpu", A15MPPrivState, num_cpu, 1), - /* The Cortex-A15MP may have anything from 0 to 224 external interrupt - * IRQ lines (with another 32 internal). We default to 128+32, which - * is the number provided by the Cortex-A15MP test chip in the - * Versatile Express A15 development board. - * Other boards may differ and should set this property appropriately. + /* + * The Cortex-A15MP may have anything from 0 to 224 external interrupt + * lines, plus always 32 internal IRQs. This property sets the total + * of internal + external, so the valid range is from 32 to 256. + * The board model must set this to whatever the configuration + * used for the CPU on that board or SoC is. */ - DEFINE_PROP_UINT32("num-irq", A15MPPrivState, num_irq, 160), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT32("num-irq", A15MPPrivState, num_irq, 0), }; -static void a15mp_priv_class_init(ObjectClass *klass, void *data) +static void a15mp_priv_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -165,17 +170,14 @@ static void a15mp_priv_class_init(ObjectClass *klass, void *data) /* We currently have no saveable state */ } -static const TypeInfo a15mp_priv_info = { - .name = TYPE_A15MPCORE_PRIV, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(A15MPPrivState), - .instance_init = a15mp_priv_initfn, - .class_init = a15mp_priv_class_init, +static const TypeInfo a15mp_types[] = { + { + .name = TYPE_A15MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A15MPPrivState), + .instance_init = a15mp_priv_initfn, + .class_init = a15mp_priv_class_init, + }, }; -static void a15mp_register_types(void) -{ - type_register_static(&a15mp_priv_info); -} - -type_init(a15mp_register_types) +DEFINE_TYPES(a15mp_types) diff --git a/hw/cpu/a9mpcore.c b/hw/cpu/a9mpcore.c index c30ef72c669..64bebbd19cb 100644 --- a/hw/cpu/a9mpcore.c +++ b/hw/cpu/a9mpcore.c @@ -56,6 +56,11 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp) CPUState *cpu0; Object *cpuobj; + if (s->num_irq < 32 || s->num_irq > 256) { + error_setg(errp, "Property 'num-irq' must be between 32 and 256"); + return; + } + cpu0 = qemu_get_cpu(0); cpuobj = OBJECT(cpu0); if (strcmp(object_get_typename(cpuobj), ARM_CPU_TYPE_NAME("cortex-a9"))) { @@ -158,19 +163,19 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp) } } -static Property a9mp_priv_properties[] = { +static const Property a9mp_priv_properties[] = { DEFINE_PROP_UINT32("num-cpu", A9MPPrivState, num_cpu, 1), - /* The Cortex-A9MP may have anything from 0 to 224 external interrupt - * IRQ lines (with another 32 internal). We default to 64+32, which - * is the number provided by the Cortex-A9MP test chip in the - * Realview PBX-A9 and Versatile Express A9 development boards. - * Other boards may differ and should set this property appropriately. + /* + * The Cortex-A9MP may have anything from 0 to 224 external interrupt + * lines, plus always 32 internal IRQs. This property sets the total + * of internal + external, so the valid range is from 32 to 256. + * The board model must set this to whatever the configuration + * used for the CPU on that board or SoC is. */ - DEFINE_PROP_UINT32("num-irq", A9MPPrivState, num_irq, 96), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT32("num-irq", A9MPPrivState, num_irq, 0), }; -static void a9mp_priv_class_init(ObjectClass *klass, void *data) +static void a9mp_priv_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -178,17 +183,14 @@ static void a9mp_priv_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, a9mp_priv_properties); } -static const TypeInfo a9mp_priv_info = { - .name = TYPE_A9MPCORE_PRIV, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(A9MPPrivState), - .instance_init = a9mp_priv_initfn, - .class_init = a9mp_priv_class_init, +static const TypeInfo a9mp_types[] = { + { + .name = TYPE_A9MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A9MPPrivState), + .instance_init = a9mp_priv_initfn, + .class_init = a9mp_priv_class_init, + }, }; -static void a9mp_register_types(void) -{ - type_register_static(&a9mp_priv_info); -} - -type_init(a9mp_register_types) +DEFINE_TYPES(a9mp_types) diff --git a/hw/cpu/arm11mpcore.c b/hw/cpu/arm11mpcore.c index 89c4e35143a..01772e7f77b 100644 --- a/hw/cpu/arm11mpcore.c +++ b/hw/cpu/arm11mpcore.c @@ -131,7 +131,7 @@ static void mpcore_priv_initfn(Object *obj) object_initialize_child(obj, "wdtimer", &s->wdtimer, TYPE_ARM_MPTIMER); } -static Property mpcore_priv_properties[] = { +static const Property mpcore_priv_properties[] = { DEFINE_PROP_UINT32("num-cpu", ARM11MPCorePriveState, num_cpu, 1), /* The ARM11 MPCORE TRM says the on-chip controller may have * anything from 0 to 224 external interrupt IRQ lines (with another @@ -142,10 +142,9 @@ static Property mpcore_priv_properties[] = { * has more IRQ lines than the kernel expects. */ DEFINE_PROP_UINT32("num-irq", ARM11MPCorePriveState, num_irq, 64), - DEFINE_PROP_END_OF_LIST(), }; -static void mpcore_priv_class_init(ObjectClass *klass, void *data) +static void mpcore_priv_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -153,17 +152,14 @@ static void mpcore_priv_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, mpcore_priv_properties); } -static const TypeInfo mpcore_priv_info = { - .name = TYPE_ARM11MPCORE_PRIV, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(ARM11MPCorePriveState), - .instance_init = mpcore_priv_initfn, - .class_init = mpcore_priv_class_init, +static const TypeInfo arm11mp_types[] = { + { + .name = TYPE_ARM11MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARM11MPCorePriveState), + .instance_init = mpcore_priv_initfn, + .class_init = mpcore_priv_class_init, + }, }; -static void arm11mpcore_register_types(void) -{ - type_register_static(&mpcore_priv_info); -} - -type_init(arm11mpcore_register_types) +DEFINE_TYPES(arm11mp_types) diff --git a/hw/cpu/cluster.c b/hw/cpu/cluster.c index 61289a840d4..ef3b3d1e940 100644 --- a/hw/cpu/cluster.c +++ b/hw/cpu/cluster.c @@ -25,9 +25,8 @@ #include "hw/qdev-properties.h" #include "qapi/error.h" -static Property cpu_cluster_properties[] = { +static const Property cpu_cluster_properties[] = { DEFINE_PROP_UINT32("cluster-id", CPUClusterState, cluster_id, 0), - DEFINE_PROP_END_OF_LIST() }; typedef struct CallbackData { @@ -73,7 +72,7 @@ static void cpu_cluster_realize(DeviceState *dev, Error **errp) assert(cbdata.cpu_count > 0); } -static void cpu_cluster_class_init(ObjectClass *klass, void *data) +static void cpu_cluster_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/cpu/core.c b/hw/cpu/core.c index 495a5c30ffe..5cb2e9a7f54 100644 --- a/hw/cpu/core.c +++ b/hw/cpu/core.c @@ -77,7 +77,7 @@ static void cpu_core_instance_init(Object *obj) } } -static void cpu_core_class_init(ObjectClass *oc, void *data) +static void cpu_core_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/cpu/realview_mpcore.c b/hw/cpu/realview_mpcore.c index 72c792eef1a..099b71a9ef2 100644 --- a/hw/cpu/realview_mpcore.c +++ b/hw/cpu/realview_mpcore.c @@ -14,7 +14,6 @@ #include "hw/cpu/arm11mpcore.h" #include "hw/intc/realview_gic.h" #include "hw/irq.h" -#include "hw/qdev-properties.h" #include "qom/object.h" #define TYPE_REALVIEW_MPCORE_RIRQ "realview_mpcore" @@ -68,7 +67,6 @@ static void realview_mpcore_realize(DeviceState *dev, Error **errp) int n; int i; - qdev_prop_set_uint32(priv, "num-cpu", s->num_cpu); if (!sysbus_realize(SYS_BUS_DEVICE(&s->priv), errp)) { return; } @@ -100,6 +98,7 @@ static void mpcore_rirq_init(Object *obj) int i; object_initialize_child(obj, "a11priv", &s->priv, TYPE_ARM11MPCORE_PRIV); + object_property_add_alias(obj, "num-cpu", OBJECT(&s->priv), "num-cpu"); privbusdev = SYS_BUS_DEVICE(&s->priv); sysbus_init_mmio(sbd, sysbus_mmio_get_region(privbusdev, 0)); @@ -108,30 +107,21 @@ static void mpcore_rirq_init(Object *obj) } } -static Property mpcore_rirq_properties[] = { - DEFINE_PROP_UINT32("num-cpu", mpcore_rirq_state, num_cpu, 1), - DEFINE_PROP_END_OF_LIST(), -}; - -static void mpcore_rirq_class_init(ObjectClass *klass, void *data) +static void mpcore_rirq_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = realview_mpcore_realize; - device_class_set_props(dc, mpcore_rirq_properties); } -static const TypeInfo mpcore_rirq_info = { - .name = TYPE_REALVIEW_MPCORE_RIRQ, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(mpcore_rirq_state), - .instance_init = mpcore_rirq_init, - .class_init = mpcore_rirq_class_init, +static const TypeInfo realview_mpcore_types[] = { + { + .name = TYPE_REALVIEW_MPCORE_RIRQ, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mpcore_rirq_state), + .instance_init = mpcore_rirq_init, + .class_init = mpcore_rirq_class_init, + }, }; -static void realview_mpcore_register_types(void) -{ - type_register_static(&mpcore_rirq_info); -} - -type_init(realview_mpcore_register_types) +DEFINE_TYPES(realview_mpcore_types) diff --git a/hw/cris/Kconfig b/hw/cris/Kconfig deleted file mode 100644 index 26c7eef7437..00000000000 --- a/hw/cris/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -config AXIS - bool - default y - depends on CRIS - select ETRAXFS - select PFLASH_CFI02 - select NAND - -config ETRAXFS - bool - select PTIMER diff --git a/hw/cris/axis_dev88.c b/hw/cris/axis_dev88.c deleted file mode 100644 index 55566349210..00000000000 --- a/hw/cris/axis_dev88.c +++ /dev/null @@ -1,351 +0,0 @@ -/* - * QEMU model for the AXIS devboard 88. - * - * Copyright (c) 2009 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qapi/error.h" -#include "cpu.h" -#include "hw/sysbus.h" -#include "net/net.h" -#include "hw/block/flash.h" -#include "hw/boards.h" -#include "hw/cris/etraxfs.h" -#include "hw/loader.h" -#include "elf.h" -#include "boot.h" -#include "sysemu/qtest.h" -#include "sysemu/sysemu.h" - -#define D(x) -#define DNAND(x) - -struct nand_state_t -{ - DeviceState *nand; - MemoryRegion iomem; - unsigned int rdy:1; - unsigned int ale:1; - unsigned int cle:1; - unsigned int ce:1; -}; - -static struct nand_state_t nand_state; -static uint64_t nand_read(void *opaque, hwaddr addr, unsigned size) -{ - struct nand_state_t *s = opaque; - uint32_t r; - int rdy; - - r = nand_getio(s->nand); - nand_getpins(s->nand, &rdy); - s->rdy = rdy; - - DNAND(printf("%s addr=%x r=%x\n", __func__, addr, r)); - return r; -} - -static void -nand_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - struct nand_state_t *s = opaque; - int rdy; - - DNAND(printf("%s addr=%x v=%x\n", __func__, addr, (unsigned)value)); - nand_setpins(s->nand, s->cle, s->ale, s->ce, 1, 0); - nand_setio(s->nand, value); - nand_getpins(s->nand, &rdy); - s->rdy = rdy; -} - -static const MemoryRegionOps nand_ops = { - .read = nand_read, - .write = nand_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct tempsensor_t -{ - unsigned int shiftreg; - unsigned int count; - enum { - ST_OUT, ST_IN, ST_Z - } state; - - uint16_t regs[3]; -}; - -static void tempsensor_clkedge(struct tempsensor_t *s, - unsigned int clk, unsigned int data_in) -{ - D(printf("%s clk=%d state=%d sr=%x\n", __func__, - clk, s->state, s->shiftreg)); - if (s->count == 0) { - s->count = 16; - s->state = ST_OUT; - } - switch (s->state) { - case ST_OUT: - /* Output reg is clocked at negedge. */ - if (!clk) { - s->count--; - s->shiftreg <<= 1; - if (s->count == 0) { - s->shiftreg = 0; - s->state = ST_IN; - s->count = 16; - } - } - break; - case ST_Z: - if (clk) { - s->count--; - if (s->count == 0) { - s->shiftreg = 0; - s->state = ST_OUT; - s->count = 16; - } - } - break; - case ST_IN: - /* Indata is sampled at posedge. */ - if (clk) { - s->count--; - s->shiftreg <<= 1; - s->shiftreg |= data_in & 1; - if (s->count == 0) { - D(printf("%s cfgreg=%x\n", __func__, s->shiftreg)); - s->regs[0] = s->shiftreg; - s->state = ST_OUT; - s->count = 16; - - if ((s->regs[0] & 0xff) == 0) { - /* 25 degrees celsius. */ - s->shiftreg = 0x0b9f; - } else if ((s->regs[0] & 0xff) == 0xff) { - /* Sensor ID, 0x8100 LM70. */ - s->shiftreg = 0x8100; - } else - printf("Invalid tempsens state %x\n", s->regs[0]); - } - } - break; - } -} - - -#define RW_PA_DOUT 0x00 -#define R_PA_DIN 0x01 -#define RW_PA_OE 0x02 -#define RW_PD_DOUT 0x10 -#define R_PD_DIN 0x11 -#define RW_PD_OE 0x12 - -static struct gpio_state_t -{ - MemoryRegion iomem; - struct nand_state_t *nand; - struct tempsensor_t tempsensor; - uint32_t regs[0x5c / 4]; -} gpio_state; - -static uint64_t gpio_read(void *opaque, hwaddr addr, unsigned size) -{ - struct gpio_state_t *s = opaque; - uint32_t r = 0; - - addr >>= 2; - switch (addr) - { - case R_PA_DIN: - r = s->regs[RW_PA_DOUT] & s->regs[RW_PA_OE]; - - /* Encode pins from the nand. */ - r |= s->nand->rdy << 7; - break; - case R_PD_DIN: - r = s->regs[RW_PD_DOUT] & s->regs[RW_PD_OE]; - - /* Encode temp sensor pins. */ - r |= (!!(s->tempsensor.shiftreg & 0x10000)) << 4; - break; - - default: - r = s->regs[addr]; - break; - } - return r; - D(printf("%s %x=%x\n", __func__, addr, r)); -} - -static void gpio_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - struct gpio_state_t *s = opaque; - D(printf("%s %x=%x\n", __func__, addr, (unsigned)value)); - - addr >>= 2; - switch (addr) - { - case RW_PA_DOUT: - /* Decode nand pins. */ - s->nand->ale = !!(value & (1 << 6)); - s->nand->cle = !!(value & (1 << 5)); - s->nand->ce = !!(value & (1 << 4)); - - s->regs[addr] = value; - break; - - case RW_PD_DOUT: - /* Temp sensor clk. */ - if ((s->regs[addr] ^ value) & 2) - tempsensor_clkedge(&s->tempsensor, !!(value & 2), - !!(value & 16)); - s->regs[addr] = value; - break; - - default: - s->regs[addr] = value; - break; - } -} - -static const MemoryRegionOps gpio_ops = { - .read = gpio_read, - .write = gpio_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4, - }, -}; - -#define INTMEM_SIZE (128 * KiB) - -static struct cris_load_info li; - -static -void axisdev88_init(MachineState *machine) -{ - const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; - CRISCPU *cpu; - DeviceState *dev; - SysBusDevice *s; - DriveInfo *nand; - qemu_irq irq[30], nmi[2]; - void *etraxfs_dmac; - struct etraxfs_dma_client *dma_eth; - int i; - MemoryRegion *address_space_mem = get_system_memory(); - MemoryRegion *phys_intmem = g_new(MemoryRegion, 1); - - /* init CPUs */ - cpu = CRIS_CPU(cpu_create(machine->cpu_type)); - - memory_region_add_subregion(address_space_mem, 0x40000000, machine->ram); - - /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the - internal memory. */ - memory_region_init_ram(phys_intmem, NULL, "axisdev88.chipram", - INTMEM_SIZE, &error_fatal); - memory_region_add_subregion(address_space_mem, 0x38000000, phys_intmem); - - /* Attach a NAND flash to CS1. */ - nand = drive_get(IF_MTD, 0, 0); - nand_state.nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, - NAND_MFR_STMICRO, 0x39); - memory_region_init_io(&nand_state.iomem, NULL, &nand_ops, &nand_state, - "nand", 0x05000000); - memory_region_add_subregion(address_space_mem, 0x10000000, - &nand_state.iomem); - - gpio_state.nand = &nand_state; - memory_region_init_io(&gpio_state.iomem, NULL, &gpio_ops, &gpio_state, - "gpio", 0x5c); - memory_region_add_subregion(address_space_mem, 0x3001a000, - &gpio_state.iomem); - - - dev = qdev_new("etraxfs-pic"); - s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, 0x3001c000); - sysbus_connect_irq(s, 0, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_IRQ)); - sysbus_connect_irq(s, 1, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_NMI)); - for (i = 0; i < 30; i++) { - irq[i] = qdev_get_gpio_in(dev, i); - } - nmi[0] = qdev_get_gpio_in(dev, 30); - nmi[1] = qdev_get_gpio_in(dev, 31); - - etraxfs_dmac = etraxfs_dmac_init(0x30000000, 10); - for (i = 0; i < 10; i++) { - /* On ETRAX, odd numbered channels are inputs. */ - etraxfs_dmac_connect(etraxfs_dmac, i, irq + 7 + i, i & 1); - } - - /* Add the two ethernet blocks. */ - dma_eth = g_malloc0(sizeof dma_eth[0] * 4); /* Allocate 4 channels. */ - - etraxfs_eth_init(0x30034000, 1, &dma_eth[0], &dma_eth[1]); - /* The DMA Connector block is missing, hardwire things for now. */ - etraxfs_dmac_connect_client(etraxfs_dmac, 0, &dma_eth[0]); - etraxfs_dmac_connect_client(etraxfs_dmac, 1, &dma_eth[1]); - - if (qemu_find_nic_info("etraxfs-eth", true, "fseth")) { - etraxfs_eth_init(0x30036000, 2, &dma_eth[2], &dma_eth[3]); - etraxfs_dmac_connect_client(etraxfs_dmac, 6, &dma_eth[2]); - etraxfs_dmac_connect_client(etraxfs_dmac, 7, &dma_eth[3]); - } - - /* 2 timers. */ - sysbus_create_varargs("etraxfs-timer", 0x3001e000, irq[0x1b], nmi[1], NULL); - sysbus_create_varargs("etraxfs-timer", 0x3005e000, irq[0x1b], nmi[1], NULL); - - for (i = 0; i < 4; i++) { - etraxfs_ser_create(0x30026000 + i * 0x2000, irq[0x14 + i], serial_hd(i)); - } - - if (kernel_filename) { - li.image_filename = kernel_filename; - li.cmdline = kernel_cmdline; - li.ram_size = machine->ram_size; - cris_load_image(cpu, &li); - } else if (!qtest_enabled()) { - fprintf(stderr, "Kernel image must be specified\n"); - exit(1); - } -} - -static void axisdev88_machine_init(MachineClass *mc) -{ - mc->desc = "AXIS devboard 88"; - mc->init = axisdev88_init; - mc->is_default = true; - mc->default_cpu_type = CRIS_CPU_TYPE_NAME("crisv32"); - mc->default_ram_id = "axisdev88.ram"; -} - -DEFINE_MACHINE("axis-dev88", axisdev88_machine_init) diff --git a/hw/cris/boot.c b/hw/cris/boot.c deleted file mode 100644 index 9fa09cfd83c..00000000000 --- a/hw/cris/boot.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * CRIS image loading. - * - * Copyright (c) 2010 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "hw/loader.h" -#include "elf.h" -#include "boot.h" -#include "qemu/cutils.h" -#include "sysemu/reset.h" - -static void main_cpu_reset(void *opaque) -{ - CRISCPU *cpu = opaque; - CPUCRISState *env = &cpu->env; - struct cris_load_info *li; - - li = env->load_info; - - cpu_reset(CPU(cpu)); - - if (!li) { - /* nothing more to do. */ - return; - } - - env->pc = li->entry; - - if (li->image_filename) { - env->regs[8] = 0x56902387; /* RAM boot magic. */ - env->regs[9] = 0x40004000 + li->image_size; - } - - if (li->cmdline) { - /* Let the kernel know we are modifying the cmdline. */ - env->regs[10] = 0x87109563; - env->regs[11] = 0x40000000; - } -} - -static uint64_t translate_kernel_address(void *opaque, uint64_t addr) -{ - return addr - 0x80000000LL; -} - -void cris_load_image(CRISCPU *cpu, struct cris_load_info *li) -{ - CPUCRISState *env = &cpu->env; - uint64_t entry; - int kcmdline_len; - int image_size; - - env->load_info = li; - /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis - devboard SDK. */ - image_size = load_elf(li->image_filename, NULL, - translate_kernel_address, NULL, - &entry, NULL, NULL, NULL, 0, EM_CRIS, 0, 0); - li->entry = entry; - if (image_size < 0) { - /* Takes a kimage from the axis devboard SDK. */ - image_size = load_image_targphys(li->image_filename, 0x40004000, - li->ram_size); - li->entry = 0x40004000; - } - - if (image_size < 0) { - fprintf(stderr, "qemu: could not load kernel '%s'\n", - li->image_filename); - exit(1); - } - - if (li->cmdline && (kcmdline_len = strlen(li->cmdline))) { - if (kcmdline_len > 256) { - fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n"); - exit(1); - } - pstrcpy_targphys("cmdline", 0x40000000, 256, li->cmdline); - } - qemu_register_reset(main_cpu_reset, cpu); -} diff --git a/hw/cris/boot.h b/hw/cris/boot.h deleted file mode 100644 index 9f1e0e340c9..00000000000 --- a/hw/cris/boot.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef HW_CRIS_BOOT_H -#define HW_CRIS_BOOT_H - -struct cris_load_info -{ - const char *image_filename; - const char *cmdline; - int image_size; - ram_addr_t ram_size; - - hwaddr entry; -}; - -void cris_load_image(CRISCPU *cpu, struct cris_load_info *li); - -#endif diff --git a/hw/cris/meson.build b/hw/cris/meson.build deleted file mode 100644 index dc808a4e0f1..00000000000 --- a/hw/cris/meson.build +++ /dev/null @@ -1,5 +0,0 @@ -cris_ss = ss.source_set() -cris_ss.add(files('boot.c')) -cris_ss.add(when: 'CONFIG_AXIS', if_true: files('axis_dev88.c')) - -hw_arch += {'cris': cris_ss} diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c index cd116c04012..473895948b3 100644 --- a/hw/cxl/cxl-component-utils.c +++ b/hw/cxl/cxl-component-utils.c @@ -243,8 +243,13 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); - ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0); - ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0); + if (type == CXL2_TYPE3_DEVICE) { + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 1); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 1); + } else { + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0); + ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0); + } ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO, 0); ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO_DECODER_COUNT, 0); diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 035d034f6dd..e150d744576 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -95,11 +95,15 @@ static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) } if (offset == A_CXL_DEV_MAILBOX_STS) { uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size]; - if (cci->bg.complete_pct) { - status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP, - 0); - cxl_dstate->mbox_reg_state64[offset / size] = status_reg; - } + int bgop; + + qemu_mutex_lock(&cci->bg.lock); + bgop = !(cci->bg.complete_pct == 100 || cci->bg.aborted); + + status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP, + bgop); + cxl_dstate->mbox_reg_state64[offset / size] = status_reg; + qemu_mutex_unlock(&cci->bg.lock); } return cxl_dstate->mbox_reg_state64[offset / size]; default: @@ -352,10 +356,8 @@ static void device_reg_init_common(CXLDeviceState *cxl_dstate) } } -static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) +static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate, int msi_n) { - const uint8_t msi_n = 9; - /* 2048 payload size */ ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); @@ -382,7 +384,7 @@ static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) cxl_dstate->memdev_status = memdev_status_reg; } -void cxl_device_register_init_t3(CXLType3Dev *ct3d) +void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n) { CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; uint64_t *cap_h = cxl_dstate->caps_reg_state64; @@ -398,7 +400,7 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d) device_reg_init_common(cxl_dstate); cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION); - mailbox_reg_init_common(cxl_dstate); + mailbox_reg_init_common(cxl_dstate, msi_n); cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, CXL_MEM_DEV_STATUS_VERSION); @@ -408,7 +410,7 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d) CXL_MAILBOX_MAX_PAYLOAD_SIZE); } -void cxl_device_register_init_swcci(CSWMBCCIDev *sw) +void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n) { CXLDeviceState *cxl_dstate = &sw->cxl_dstate; uint64_t *cap_h = cxl_dstate->caps_reg_state64; @@ -423,7 +425,7 @@ void cxl_device_register_init_swcci(CSWMBCCIDev *sw) device_reg_init_common(cxl_dstate); cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); - mailbox_reg_init_common(cxl_dstate); + mailbox_reg_init_common(cxl_dstate, msi_n); cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); memdev_reg_init_common(cxl_dstate); diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c index 12dee2e4675..7583dd9162f 100644 --- a/hw/cxl/cxl-events.c +++ b/hw/cxl/cxl-events.c @@ -8,8 +8,6 @@ */ #include "qemu/osdep.h" - -#include "qemu/bswap.h" #include "qemu/error-report.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -260,3 +258,41 @@ void cxl_event_irq_assert(CXLType3Dev *ct3d) } } } + +void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d, + CXLDCEventType type, + CXLDCExtentRaw extents[], + uint32_t ext_count) +{ + CXLEventDynamicCapacity event_rec = {}; + int i; + + cxl_assign_event_header(&event_rec.hdr, + &dynamic_capacity_uuid, + (1 << CXL_EVENT_TYPE_INFO), + sizeof(event_rec), + cxl_device_get_timestamp(&ct3d->cxl_dstate)); + event_rec.type = type; + event_rec.validity_flags = 1; + event_rec.host_id = 0; + event_rec.updated_region_id = 0; + event_rec.extents_avail = CXL_NUM_EXTENTS_SUPPORTED - + ct3d->dc.total_extent_count; + + for (i = 0; i < ext_count; i++) { + memcpy(&event_rec.dynamic_capacity_extent, + &extents[i], + sizeof(CXLDCExtentRaw)); + event_rec.flags = 0; + if (i < ext_count - 1) { + /* Set "More" flag */ + event_rec.flags |= BIT(0); + } + + if (cxl_event_insert(&ct3d->cxl_dstate, + CXL_EVENT_TYPE_DYNAMIC_CAP, + (CXLEventRecordRaw *)&event_rec)) { + cxl_event_irq_assert(ct3d); + } + } +} diff --git a/hw/cxl/cxl-host-stubs.c b/hw/cxl/cxl-host-stubs.c index cae4afcdde2..c015baac813 100644 --- a/hw/cxl/cxl-host-stubs.c +++ b/hw/cxl/cxl-host-stubs.c @@ -8,8 +8,13 @@ #include "hw/cxl/cxl.h" #include "hw/cxl/cxl_host.h" -void cxl_fmws_link_targets(CXLState *stat, Error **errp) {}; +void cxl_fmws_link_targets(Error **errp) {}; void cxl_machine_init(Object *obj, CXLState *state) {}; void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) {}; +hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr) +{ + return base; +}; +void cxl_fmws_update_mmio(void) {}; const MemoryRegionOps cfmws_ops; diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index e9f2543c43c..5c2ce25a19c 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -10,7 +10,7 @@ #include "qemu/bitmap.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "hw/boards.h" #include "qapi/qapi-visit-machine.h" @@ -22,15 +22,17 @@ #include "hw/pci/pcie_port.h" #include "hw/pci-bridge/pci_expander_bridge.h" -static void cxl_fixed_memory_window_config(CXLState *cxl_state, - CXLFixedMemoryWindowOptions *object, - Error **errp) +static void cxl_fixed_memory_window_config(CXLFixedMemoryWindowOptions *object, + int index, Error **errp) { ERRP_GUARD(); - g_autofree CXLFixedWindow *fw = g_malloc0(sizeof(*fw)); + DeviceState *dev = qdev_new(TYPE_CXL_FMW); + CXLFixedWindow *fw = CXL_FMW(dev); strList *target; int i; + fw->index = index; + for (target = object->targets; target; target = target->next) { fw->num_targets++; } @@ -65,37 +67,39 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state, fw->targets[i] = g_strdup(target->value); } - cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows, - g_steal_pointer(&fw)); - - return; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp); } -void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp) +static int cxl_fmws_link(Object *obj, void *opaque) { - if (cxl_state && cxl_state->fixed_windows) { - GList *it; - - for (it = cxl_state->fixed_windows; it; it = it->next) { - CXLFixedWindow *fw = it->data; - int i; - - for (i = 0; i < fw->num_targets; i++) { - Object *o; - bool ambig; - - o = object_resolve_path_type(fw->targets[i], - TYPE_PXB_CXL_DEV, - &ambig); - if (!o) { - error_setg(errp, "Could not resolve CXLFM target %s", - fw->targets[i]); - return; - } - fw->target_hbs[i] = PXB_CXL_DEV(o); - } + struct CXLFixedWindow *fw; + int i; + + if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) { + return 0; + } + fw = CXL_FMW(obj); + + for (i = 0; i < fw->num_targets; i++) { + Object *o; + bool ambig; + + o = object_resolve_path_type(fw->targets[i], TYPE_PXB_CXL_DEV, + &ambig); + if (!o) { + error_setg(&error_fatal, "Could not resolve CXLFM target %s", + fw->targets[i]); + return 1; } + fw->target_hbs[i] = PXB_CXL_DEV(o); } + return 0; +} + +void cxl_fmws_link_targets(Error **errp) +{ + /* Order doesn't matter for this, so no need to build list */ + object_child_foreach_recursive(object_get_root(), cxl_fmws_link, NULL); } static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr, @@ -327,14 +331,15 @@ static void machine_set_cfmw(Object *obj, Visitor *v, const char *name, CXLState *state = opaque; CXLFixedMemoryWindowOptionsList *cfmw_list = NULL; CXLFixedMemoryWindowOptionsList *it; + int index; visit_type_CXLFixedMemoryWindowOptionsList(v, name, &cfmw_list, errp); if (!cfmw_list) { return; } - for (it = cfmw_list; it; it = it->next) { - cxl_fixed_memory_window_config(state, it->value, errp); + for (it = cfmw_list, index = 0; it; it = it->next, index++) { + cxl_fixed_memory_window_config(it->value, index, errp); } state->cfmw_list = cfmw_list; } @@ -372,3 +377,110 @@ void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) } } } + +static int cxl_fmws_find(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) { + return 0; + } + *list = g_slist_prepend(*list, obj); + + return 0; +} + +static GSList *cxl_fmws_get_all(void) +{ + GSList *list = NULL; + + object_child_foreach_recursive(object_get_root(), cxl_fmws_find, &list); + + return list; +} + +static gint cfmws_cmp(gconstpointer a, gconstpointer b, gpointer d) +{ + const struct CXLFixedWindow *ap = a; + const struct CXLFixedWindow *bp = b; + + return ap->index > bp->index; +} + +GSList *cxl_fmws_get_all_sorted(void) +{ + return g_slist_sort_with_data(cxl_fmws_get_all(), cfmws_cmp, NULL); +} + +static int cxl_fmws_mmio_map(Object *obj, void *opaque) +{ + struct CXLFixedWindow *fw; + + if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) { + return 0; + } + fw = CXL_FMW(obj); + sysbus_mmio_map(SYS_BUS_DEVICE(fw), 0, fw->base); + + return 0; +} + +void cxl_fmws_update_mmio(void) +{ + /* Ordering is not required for this */ + object_child_foreach_recursive(object_get_root(), cxl_fmws_mmio_map, NULL); +} + +hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr) +{ + GSList *cfmws_list, *iter; + CXLFixedWindow *fw; + + cfmws_list = cxl_fmws_get_all_sorted(); + for (iter = cfmws_list; iter; iter = iter->next) { + fw = CXL_FMW(iter->data); + if (base + fw->size <= max_addr) { + fw->base = base; + base += fw->size; + } + } + g_slist_free(cfmws_list); + + return base; +} + +static void cxl_fmw_realize(DeviceState *dev, Error **errp) +{ + CXLFixedWindow *fw = CXL_FMW(dev); + + memory_region_init_io(&fw->mr, OBJECT(dev), &cfmws_ops, fw, + "cxl-fixed-memory-region", fw->size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &fw->mr); +} + +/* + * Note: Fixed memory windows represent fixed address decoders on the host and + * as such have no dynamic state to reset or migrate + */ +static void cxl_fmw_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "CXL Fixed Memory Window"; + dc->realize = cxl_fmw_realize; + /* Reason - created by machines as tightly coupled to machine memory map */ + dc->user_creatable = false; +} + +static const TypeInfo cxl_fmw_info = { + .name = TYPE_CXL_FMW, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CXLFixedWindow), + .class_init = cxl_fmw_class_init, +}; + +static void cxl_host_register_types(void) +{ + type_register_static(&cxl_fmw_info); +} +type_init(cxl_host_register_types) diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 3ebbd32e102..68c7cc98912 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -7,6 +7,8 @@ * COPYING file in the top-level directory. */ +#include + #include "qemu/osdep.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" @@ -16,16 +18,22 @@ #include "hw/pci/pci.h" #include "hw/pci-bridge/cxl_upstream_port.h" #include "qemu/cutils.h" +#include "qemu/host-utils.h" #include "qemu/log.h" #include "qemu/units.h" #include "qemu/uuid.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qemu/range.h" +#include "qapi/qapi-types-cxl.h" #define CXL_CAPACITY_MULTIPLIER (256 * MiB) #define CXL_DC_EVENT_LOG_SIZE 8 -#define CXL_NUM_EXTENTS_SUPPORTED 512 #define CXL_NUM_TAGS_SUPPORTED 0 +#define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0) +#define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1) +#define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2) +#define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3) +#define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4) /* * How to add a new command, example. The command set FOO, with cmd BAR. @@ -56,6 +64,9 @@ enum { INFOSTAT = 0x00, #define IS_IDENTIFY 0x1 #define BACKGROUND_OPERATION_STATUS 0x2 + #define GET_RESPONSE_MSG_LIMIT 0x3 + #define SET_RESPONSE_MSG_LIMIT 0x4 + #define BACKGROUND_OPERATION_ABORT 0x5 EVENTS = 0x01, #define GET_RECORDS 0x0 #define CLEAR_RECORDS 0x1 @@ -81,9 +92,13 @@ enum { #define GET_PARTITION_INFO 0x0 #define GET_LSA 0x2 #define SET_LSA 0x3 + HEALTH_INFO_ALERTS = 0x42, + #define GET_ALERT_CONFIG 0x1 + #define SET_ALERT_CONFIG 0x2 SANITIZE = 0x44, #define OVERWRITE 0x0 #define SECURE_ERASE 0x1 + #define MEDIA_OPERATIONS 0x2 PERSISTENT_MEM = 0x45, #define GET_SECURITY_STATE 0x0 MEDIA_AND_POISON = 0x43, @@ -103,6 +118,13 @@ enum { #define GET_PHYSICAL_PORT_STATE 0x1 TUNNEL = 0x53, #define MANAGEMENT_COMMAND 0x0 + FMAPI_DCD_MGMT = 0x56, + #define GET_DCD_INFO 0x0 + #define GET_HOST_DC_REGION_CONFIG 0x1 + #define SET_DC_REGION_CONFIG 0x2 + #define GET_DC_REGION_EXTENT_LIST 0x3 + #define INITIATE_DC_ADD 0x4 + #define INITIATE_DC_RELEASE 0x5 }; /* CCI Message Format CXL r3.1 Figure 7-19 */ @@ -151,6 +173,9 @@ static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, in = (void *)payload_in; out = (void *)payload_out; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } /* Enough room for minimum sized message - no payload */ if (in->size < sizeof(in->ccimessage)) { return CXL_MBOX_INVALID_PAYLOAD_LENGTH; @@ -266,6 +291,12 @@ static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, CXLClearEventPayload *pl; pl = (CXLClearEventPayload *)payload_in; + + if (len_in < sizeof(*pl) || + len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + *len_out = 0; return cxl_event_clear_records(cxlds, pl); } @@ -374,7 +405,7 @@ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, uint16_t pcie_subsys_vid; uint16_t pcie_subsys_id; uint64_t sn; - uint8_t max_message_size; + uint8_t max_message_size; uint8_t component_type; } QEMU_PACKED *is_identify; QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); @@ -403,12 +434,58 @@ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, is_identify->component_type = 0x3; /* Type 3 */ } - /* TODO: Allow this to vary across different CCIs */ - is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */ + is_identify->max_message_size = (uint8_t)log2(cci->payload_max); *len_out = sizeof(*is_identify); return CXL_MBOX_SUCCESS; } +/* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */ +static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t rsp_limit; + } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out; + QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1); + + get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max); + + *len_out = sizeof(*get_rsp_msg_limit); + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */ +static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t rsp_limit; + } QEMU_PACKED *in = (void *)payload_in; + QEMU_BUILD_BUG_ON(sizeof(*in) != 1); + struct { + uint8_t rsp_limit; + } QEMU_PACKED *out = (void *)payload_out; + QEMU_BUILD_BUG_ON(sizeof(*out) != 1); + + if (in->rsp_limit < 8 || in->rsp_limit > 10) { + return CXL_MBOX_INVALID_INPUT; + } + + cci->payload_max = 1 << in->rsp_limit; + out->rsp_limit = in->rsp_limit; + + *len_out = sizeof(*out); + return CXL_MBOX_SUCCESS; +} + static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d, void *private) { @@ -521,6 +598,9 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } /* Check if what was requested can fit */ if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { return CXL_MBOX_INVALID_INPUT; @@ -624,6 +704,41 @@ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* + * CXL r3.1 Section 8.2.9.1.5: + * Request Abort Background Operation (Opcode 0005h) + */ +static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + int bg_set = cci->bg.opcode >> 8; + int bg_cmd = cci->bg.opcode & 0xff; + const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd]; + + if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) { + return CXL_MBOX_REQUEST_ABORT_NOTSUP; + } + + qemu_mutex_lock(&cci->bg.lock); + if (cci->bg.runtime) { + /* operation is near complete, let it finish */ + if (cci->bg.complete_pct < 85) { + timer_del(cci->bg.timer); + cci->bg.ret_code = CXL_MBOX_ABORTED; + cci->bg.starttime = 0; + cci->bg.runtime = 0; + cci->bg.aborted = true; + } + } + qemu_mutex_unlock(&cci->bg.lock); + + return CXL_MBOX_SUCCESS; +} + #define CXL_FW_SLOTS 2 #define CXL_FW_SIZE 0x02000000 /* 32 mb */ @@ -649,9 +764,9 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, } QEMU_PACKED *fw_info; QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); - if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || - (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) || - (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) { + if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || + !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || + !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } @@ -699,6 +814,10 @@ static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, } QEMU_PACKED *fw_transfer = (void *)payload_in; size_t offset, length; + if (len < sizeof(*fw_transfer)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { /* * At this point there aren't any on-going transfers @@ -927,24 +1046,28 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, get_log = (void *)payload_in; + if (get_log->length > cci->payload_max) { + return CXL_MBOX_INVALID_INPUT; + } + + if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { + return CXL_MBOX_INVALID_LOG; + } + /* * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) * The device shall return Invalid Input if the Offset or Length * fields attempt to access beyond the size of the log as reported by Get - * Supported Logs. + * Supported Log. * - * The CEL buffer is large enough to fit all commands in the emulation, so - * the only possible failure would be if the mailbox itself isn't big - * enough. + * Only valid for there to be one entry per opcode, but the length + offset + * may still be greater than that if the inputs are not valid and so access + * beyond the end of cci->cel_log. */ - if (get_log->offset + get_log->length > cci->payload_max) { + if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { return CXL_MBOX_INVALID_INPUT; } - if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { - return CXL_MBOX_INVALID_LOG; - } - /* Store off everything to local variables so we can wipe out the payload */ *len_out = get_log->length; @@ -1133,10 +1256,8 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, (struct CXLSupportedFeatureEntry) { .uuid = ecs_uuid, .feat_index = index, - .get_feat_size = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs), - .set_feat_size = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSWriteAttrs), + .get_feat_size = sizeof(CXLMemECSReadAttrs), + .set_feat_size = sizeof(CXLMemECSWriteAttrs), .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, @@ -1204,13 +1325,10 @@ static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, bytes_to_copy); } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { - if (get_feature->offset >= CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs)) { + if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { return CXL_MBOX_INVALID_INPUT; } - bytes_to_copy = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs) - - get_feature->offset; + bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; bytes_to_copy = MIN(bytes_to_copy, get_feature->count); memcpy(payload_out, (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, @@ -1243,6 +1361,9 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, CXLType3Dev *ct3d; uint16_t count; + if (len_in < sizeof(*hdr)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { return CXL_MBOX_UNSUPPORTED; @@ -1270,6 +1391,10 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, set_feat_info->data_offset = hdr->offset; bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader); + if (bytes_to_copy == 0) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) { return CXL_MBOX_UNSUPPORTED; @@ -1277,6 +1402,11 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, ps_set_feature = (void *)payload_in; ps_write_attrs = &ps_set_feature->feat_data; + + if ((uint32_t)hdr->offset + bytes_to_copy > + sizeof(ct3d->patrol_scrub_wr_attrs)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, ps_write_attrs, bytes_to_copy); @@ -1299,18 +1429,22 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, ecs_set_feature = (void *)payload_in; ecs_write_attrs = ecs_set_feature->feat_data; - memcpy((uint8_t *)ct3d->ecs_wr_attrs + hdr->offset, + + if ((uint32_t)hdr->offset + bytes_to_copy > + sizeof(ct3d->ecs_wr_attrs)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, ecs_write_attrs, bytes_to_copy); set_feat_info->data_size += bytes_to_copy; if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { + ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { - ct3d->ecs_attrs[count].ecs_log_cap = - ct3d->ecs_wr_attrs[count].ecs_log_cap; - ct3d->ecs_attrs[count].ecs_config = - ct3d->ecs_wr_attrs[count].ecs_config & 0x1F; + ct3d->ecs_attrs.fru_attrs[count].ecs_config = + ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; } } } else { @@ -1324,7 +1458,7 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { - memset(ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); + memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); } set_feat_info->data_transfer_flag = 0; set_feat_info->data_saved_across_reset = false; @@ -1445,7 +1579,7 @@ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, } QEMU_PACKED *get_lsa; CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); - uint32_t offset, length; + uint64_t offset, length; get_lsa = (void *)payload_in; offset = get_lsa->offset; @@ -1479,8 +1613,8 @@ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, const size_t hdr_len = offsetof(struct set_lsa_pl, data); *len_out = 0; - if (!len_in) { - return CXL_MBOX_SUCCESS; + if (len_in < hdr_len) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; } if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { @@ -1492,6 +1626,97 @@ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } +/* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */ +static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLAlertConfig *out = (CXLAlertConfig *)payload_out; + + memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config)); + *len_out = sizeof(ct3d->alert_config); + + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */ +static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLAlertConfig *alert_config = &ct3d->alert_config; + struct { + uint8_t valid_alert_actions; + uint8_t enable_alert_actions; + uint8_t life_used_warn_thresh; + uint8_t rsvd; + uint16_t over_temp_warn_thresh; + uint16_t under_temp_warn_thresh; + uint16_t cor_vmem_err_warn_thresh; + uint16_t cor_pmem_err_warn_thresh; + } QEMU_PACKED *in = (void *)payload_in; + + if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) { + /* + * CXL r3.2 Table 8-149 The life used warning threshold shall be + * less than the life used critical alert value. + */ + if (in->life_used_warn_thresh >= + alert_config->life_used_crit_alert_thresh) { + return CXL_MBOX_INVALID_INPUT; + } + alert_config->life_used_warn_thresh = in->life_used_warn_thresh; + alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH; + } + + if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) { + /* + * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold + * shall be less than the the Device Over-Temperature Critical + * Alert Threshold. + */ + if (in->over_temp_warn_thresh >= + alert_config->over_temp_crit_alert_thresh) { + return CXL_MBOX_INVALID_INPUT; + } + alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh; + alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH; + } + + if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) { + /* + * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold + * shall be higher than the the Device Under-Temperature Critical + * Alert Threshold. + */ + if (in->under_temp_warn_thresh <= + alert_config->under_temp_crit_alert_thresh) { + return CXL_MBOX_INVALID_INPUT; + } + alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh; + alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH; + } + + if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) { + alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh; + alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH; + } + + if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) { + alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh; + alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH; + } + return CXL_MBOX_SUCCESS; +} + /* Perform the actual device zeroing */ static void __do_sanitization(CXLType3Dev *ct3d) { @@ -1522,34 +1747,10 @@ static void __do_sanitization(CXLType3Dev *ct3d) cxl_discard_all_event_records(&ct3d->cxl_dstate); } -/* - * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) - * - * Once the Sanitize command has started successfully, the device shall be - * placed in the media disabled state. If the command fails or is interrupted - * by a reset or power failure, it shall remain in the media disabled state - * until a successful Sanitize command has been completed. During this state: - * - * 1. Memory writes to the device will have no effect, and all memory reads - * will return random values (no user data returned, even for locations that - * the failed Sanitize operation didn’t sanitize yet). - * - * 2. Mailbox commands shall still be processed in the disabled state, except - * that commands that access Sanitized areas shall fail with the Media Disabled - * error code. - */ -static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, - uint8_t *payload_in, - size_t len_in, - uint8_t *payload_out, - size_t *len_out, - CXLCCI *cci) +static int get_sanitize_duration(uint64_t total_mem) { - CXLType3Dev *ct3d = CXL_TYPE3(cci->d); - uint64_t total_mem; /* in Mb */ - int secs; + int secs = 0; - total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; if (total_mem <= 512) { secs = 4; } else if (total_mem <= 1024) { @@ -1578,6 +1779,39 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, secs = 240 * 60; /* max 4 hrs */ } + return secs; +} + +/* + * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h) + * + * Once the Sanitize command has started successfully, the device shall be + * placed in the media disabled state. If the command fails or is interrupted + * by a reset or power failure, it shall remain in the media disabled state + * until a successful Sanitize command has been completed. During this state: + * + * 1. Memory writes to the device will have no effect, and all memory reads + * will return random values (no user data returned, even for locations that + * the failed Sanitize operation didn’t sanitize yet). + * + * 2. Mailbox commands shall still be processed in the disabled state, except + * that commands that access Sanitized areas shall fail with the Media Disabled + * error code. + */ +static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + uint64_t total_mem; /* in Mb */ + int secs; + + total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20; + secs = get_sanitize_duration(total_mem); + /* EBUSY other bg cmds as of now */ cci->bg.runtime = secs * 1000UL; *len_out = 0; @@ -1588,64 +1822,382 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd, return CXL_MBOX_BG_STARTED; } -static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, - uint8_t *payload_in, - size_t len_in, - uint8_t *payload_out, - size_t *len_out, - CXLCCI *cci) +struct dpa_range_list_entry { + uint64_t starting_dpa; + uint64_t length; +} QEMU_PACKED; + +struct CXLSanitizeInfo { + uint32_t dpa_range_count; + uint8_t fill_value; + struct dpa_range_list_entry dpa_range_list[]; +} QEMU_PACKED; + +static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr) { - uint32_t *state = (uint32_t *)payload_out; + MemoryRegion *mr; + if (ct3d->hostvmem) { + mr = host_memory_backend_get_memory(ct3d->hostvmem); + if (vmr) { + *vmr = mr; + } + return memory_region_size(mr); + } + return 0; +} - *state = 0; - *len_out = 4; - return CXL_MBOX_SUCCESS; +static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr) +{ + MemoryRegion *mr; + if (ct3d->hostpmem) { + mr = host_memory_backend_get_memory(ct3d->hostpmem); + if (pmr) { + *pmr = mr; + } + return memory_region_size(mr); + } + return 0; } -/* - * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) - * - * This is very inefficient, but good enough for now! - * Also the payload will always fit, so no need to handle the MORE flag and - * make this stateful. We may want to allow longer poison lists to aid - * testing that kernel functionality. - */ -static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, - uint8_t *payload_in, - size_t len_in, - uint8_t *payload_out, - size_t *len_out, - CXLCCI *cci) +static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr) { - struct get_poison_list_pl { - uint64_t pa; - uint64_t length; - } QEMU_PACKED; + MemoryRegion *mr; + if (ct3d->dc.host_dc) { + mr = host_memory_backend_get_memory(ct3d->dc.host_dc); + if (dc_mr) { + *dc_mr = mr; + } + return memory_region_size(mr); + } + return 0; +} - struct get_poison_list_out_pl { - uint8_t flags; - uint8_t rsvd1; - uint64_t overflow_timestamp; - uint16_t count; - uint8_t rsvd2[0x14]; - struct { - uint64_t addr; - uint32_t length; - uint32_t resv; - } QEMU_PACKED records[]; - } QEMU_PACKED; +static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr, + size_t length) +{ + uint64_t vmr_size, pmr_size, dc_size; - struct get_poison_list_pl *in = (void *)payload_in; - struct get_poison_list_out_pl *out = (void *)payload_out; - CXLType3Dev *ct3d = CXL_TYPE3(cci->d); - uint16_t record_count = 0, i = 0; - uint64_t query_start, query_length; - CXLPoisonList *poison_list = &ct3d->poison_list; - CXLPoison *ent; - uint16_t out_pl_len; + if ((dpa_addr % CXL_CACHE_LINE_SIZE) || + (length % CXL_CACHE_LINE_SIZE) || + (length <= 0)) { + return -EINVAL; + } - query_start = ldq_le_p(&in->pa); - /* 64 byte alignment required */ + vmr_size = get_vmr_size(ct3d, NULL); + pmr_size = get_pmr_size(ct3d, NULL); + dc_size = get_dc_size(ct3d, NULL); + + if (dpa_addr + length > vmr_size + pmr_size + dc_size) { + return -EINVAL; + } + + if (dpa_addr > vmr_size + pmr_size) { + if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { + return -ENODEV; + } + } + + return 0; +} + +static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length, + uint8_t fill_value) +{ + + uint64_t vmr_size, pmr_size; + AddressSpace *as = NULL; + MemTxAttrs mem_attrs = {}; + + vmr_size = get_vmr_size(ct3d, NULL); + pmr_size = get_pmr_size(ct3d, NULL); + + if (dpa_addr < vmr_size) { + as = &ct3d->hostvmem_as; + } else if (dpa_addr < vmr_size + pmr_size) { + as = &ct3d->hostpmem_as; + } else { + if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) { + return -ENODEV; + } + as = &ct3d->dc.host_dc_as; + } + + return address_space_set(as, dpa_addr, fill_value, length, mem_attrs); +} + +/* Perform the actual device zeroing */ +static void __do_sanitize(CXLType3Dev *ct3d) +{ + struct CXLSanitizeInfo *san_info = ct3d->media_op_sanitize; + int dpa_range_count = san_info->dpa_range_count; + int rc = 0; + int i; + + for (i = 0; i < dpa_range_count; i++) { + rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa, + san_info->dpa_range_list[i].length, + san_info->fill_value); + if (rc) { + goto exit; + } + } +exit: + g_free(ct3d->media_op_sanitize); + ct3d->media_op_sanitize = NULL; + return; +} + +enum { + MEDIA_OP_CLASS_GENERAL = 0x0, + #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0 + MEDIA_OP_CLASS_SANITIZE = 0x1, + #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0 + #define MEDIA_OP_SAN_SUBC_ZERO 0x1 +}; + +struct media_op_supported_list_entry { + uint8_t media_op_class; + uint8_t media_op_subclass; +}; + +struct media_op_discovery_out_pl { + uint64_t dpa_range_granularity; + uint16_t total_supported_operations; + uint16_t num_of_supported_operations; + struct media_op_supported_list_entry entry[]; +} QEMU_PACKED; + +static const struct media_op_supported_list_entry media_op_matrix[] = { + { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY }, + { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE }, + { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO }, +}; + +static CXLRetCode media_operations_discovery(uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out) +{ + struct { + uint8_t media_operation_class; + uint8_t media_operation_subclass; + uint8_t rsvd[2]; + uint32_t dpa_range_count; + struct { + uint16_t start_index; + uint16_t num_ops; + } discovery_osa; + } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in; + struct media_op_discovery_out_pl *media_out_pl = + (struct media_op_discovery_out_pl *)payload_out; + int num_ops, start_index, i; + int count = 0; + + if (len_in < sizeof(*media_op_in_disc_pl)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + + num_ops = media_op_in_disc_pl->discovery_osa.num_ops; + start_index = media_op_in_disc_pl->discovery_osa.start_index; + + /* + * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and + * start index should not exceed the total number of entries for discovery + * sub class command. + */ + if (media_op_in_disc_pl->dpa_range_count || + start_index > ARRAY_SIZE(media_op_matrix)) { + return CXL_MBOX_INVALID_INPUT; + } + + media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE; + media_out_pl->total_supported_operations = + ARRAY_SIZE(media_op_matrix); + if (num_ops > 0) { + for (i = start_index; i < start_index + num_ops; i++) { + media_out_pl->entry[count].media_op_class = + media_op_matrix[i].media_op_class; + media_out_pl->entry[count].media_op_subclass = + media_op_matrix[i].media_op_subclass; + count++; + if (count == num_ops) { + break; + } + } + } + + media_out_pl->num_of_supported_operations = count; + *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry); + return CXL_MBOX_SUCCESS; +} + +static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + uint8_t fill_value, + CXLCCI *cci) +{ + struct media_operations_sanitize { + uint8_t media_operation_class; + uint8_t media_operation_subclass; + uint8_t rsvd[2]; + uint32_t dpa_range_count; + struct dpa_range_list_entry dpa_range_list[]; + } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in; + uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count; + uint64_t total_mem = 0; + size_t dpa_range_list_size; + int secs = 0, i; + + if (dpa_range_count == 0) { + return CXL_MBOX_SUCCESS; + } + + dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry); + if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + + for (i = 0; i < dpa_range_count; i++) { + uint64_t start_dpa = + media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa; + uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length; + + if (validate_dpa_addr(ct3d, start_dpa, length)) { + return CXL_MBOX_INVALID_INPUT; + } + total_mem += length; + } + ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) + + dpa_range_list_size); + + ct3d->media_op_sanitize->dpa_range_count = dpa_range_count; + ct3d->media_op_sanitize->fill_value = fill_value; + memcpy(ct3d->media_op_sanitize->dpa_range_list, + media_op_in_sanitize_pl->dpa_range_list, + dpa_range_list_size); + secs = get_sanitize_duration(total_mem >> 20); + + /* EBUSY other bg cmds as of now */ + cci->bg.runtime = secs * 1000UL; + *len_out = 0; + /* + * media op sanitize is targeted so no need to disable media or + * clear event logs + */ + return CXL_MBOX_BG_STARTED; +} + +static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t media_operation_class; + uint8_t media_operation_subclass; + uint8_t rsvd[2]; + uint32_t dpa_range_count; + } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + uint8_t media_op_cl = 0; + uint8_t media_op_subclass = 0; + + if (len_in < sizeof(*media_op_in_common_pl)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + + media_op_cl = media_op_in_common_pl->media_operation_class; + media_op_subclass = media_op_in_common_pl->media_operation_subclass; + + switch (media_op_cl) { + case MEDIA_OP_CLASS_GENERAL: + if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) { + return CXL_MBOX_UNSUPPORTED; + } + + return media_operations_discovery(payload_in, len_in, payload_out, + len_out); + case MEDIA_OP_CLASS_SANITIZE: + switch (media_op_subclass) { + case MEDIA_OP_SAN_SUBC_SANITIZE: + return media_operations_sanitize(ct3d, payload_in, len_in, + payload_out, len_out, 0xF, + cci); + case MEDIA_OP_SAN_SUBC_ZERO: + return media_operations_sanitize(ct3d, payload_in, len_in, + payload_out, len_out, 0, + cci); + default: + return CXL_MBOX_UNSUPPORTED; + } + default: + return CXL_MBOX_UNSUPPORTED; + } +} + +static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + uint32_t *state = (uint32_t *)payload_out; + + *state = 0; + *len_out = 4; + return CXL_MBOX_SUCCESS; +} + +/* + * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h) + * + * This is very inefficient, but good enough for now! + * Also the payload will always fit, so no need to handle the MORE flag and + * make this stateful. We may want to allow longer poison lists to aid + * testing that kernel functionality. + */ +static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct get_poison_list_pl { + uint64_t pa; + uint64_t length; + } QEMU_PACKED; + + struct get_poison_list_out_pl { + uint8_t flags; + uint8_t rsvd1; + uint64_t overflow_timestamp; + uint16_t count; + uint8_t rsvd2[0x14]; + struct { + uint64_t addr; + uint32_t length; + uint32_t resv; + } QEMU_PACKED records[]; + } QEMU_PACKED; + + struct get_poison_list_pl *in = (void *)payload_in; + struct get_poison_list_out_pl *out = (void *)payload_out; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + uint16_t record_count = 0, i = 0; + uint64_t query_start, query_length; + CXLPoisonList *poison_list = &ct3d->poison_list; + CXLPoison *ent; + uint16_t out_pl_len; + + query_start = ldq_le_p(&in->pa); + /* 64 byte alignment required */ if (query_start & 0x3f) { return CXL_MBOX_INVALID_INPUT; } @@ -2076,7 +2628,7 @@ static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd, start = ROUND_DOWN(ent->start, 64ull); stop = ROUND_DOWN(ent->start, 64ull) + ent->length; - stq_le_p(&out->records[i].addr, start | (ent->type & 0x7)); + stq_le_p(&out->records[i].addr, start); stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE); i++; @@ -2206,7 +2758,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, uint16_t out_pl_len, size; CXLDCExtent *ent; - if (start_extent_id > ct3d->dc.total_extent_count) { + if (start_extent_id > ct3d->dc.nr_extents_accepted) { return CXL_MBOX_INVALID_INPUT; } @@ -2217,7 +2769,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); stl_le_p(&out->count, record_count); - stl_le_p(&out->total_extents, ct3d->dc.total_extent_count); + stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq); if (record_count > 0) { @@ -2233,6 +2785,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, stw_le_p(&out_rec->shared_seq, ent->shared_seq); record_done++; + out_rec++; if (record_done == record_count) { break; } @@ -2338,16 +2891,20 @@ void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, QTAILQ_INSERT_TAIL(list, group, node); } -void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) +uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list) { CXLDCExtent *ent, *ent_next; CXLDCExtentGroup *group = QTAILQ_FIRST(list); + uint32_t extents_deleted = 0; QTAILQ_REMOVE(list, group, node); QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) { cxl_remove_extent_from_extent_list(&group->list, ent); + extents_deleted++; } g_free(group); + + return extents_deleted; } /* @@ -2466,15 +3023,25 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, CXLUpdateDCExtentListInPl *in = (void *)payload_in; CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLDCExtentList *extent_list = &ct3d->dc.extents; - uint32_t i; + uint32_t i, num; uint64_t dpa, len; CXLRetCode ret; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (in->num_entries_updated == 0) { - cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); + num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); + ct3d->dc.total_extent_count -= num; return CXL_MBOX_SUCCESS; } + if (len_in < + sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + /* Adding extents causes exceeding device's extent tracking ability. */ if (in->num_entries_updated + ct3d->dc.total_extent_count > CXL_NUM_EXTENTS_SUPPORTED) { @@ -2497,10 +3064,12 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0); ct3d->dc.total_extent_count += 1; + ct3d->dc.nr_extents_accepted += 1; ct3_set_region_block_backed(ct3d, dpa, len); } /* Remove the first extent group in the pending list */ - cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); + num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); + ct3d->dc.total_extent_count -= num; return CXL_MBOX_SUCCESS; } @@ -2606,7 +3175,7 @@ static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d, } *updated_list_size = 0; } else { - *updated_list_size = ct3d->dc.total_extent_count + cnt_delta; + *updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta; } return ret; @@ -2629,10 +3198,19 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, uint32_t updated_list_size; CXLRetCode ret; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (in->num_entries_updated == 0) { return CXL_MBOX_INVALID_INPUT; } + if (len_in < + sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + ret = cxl_detect_malformed_extent_list(ct3d, in); if (ret != CXL_MBOX_SUCCESS) { return ret; @@ -2659,12 +3237,498 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len); cxl_remove_extent_from_extent_list(&updated_list, ent); } - ct3d->dc.total_extent_count = updated_list_size; + ct3d->dc.total_extent_count += (updated_list_size - + ct3d->dc.nr_extents_accepted); + + ct3d->dc.nr_extents_accepted = updated_list_size; + + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */ +static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t num_hosts; + uint8_t num_regions_supported; + uint8_t rsvd1[2]; + uint16_t supported_add_sel_policy_bitmask; + uint8_t rsvd2[2]; + uint16_t supported_removal_policy_bitmask; + uint8_t sanitize_on_release_bitmask; + uint8_t rsvd3; + uint64_t total_dynamic_capacity; + uint64_t region_blk_size_bitmasks[8]; + } QEMU_PACKED *out = (void *)payload_out; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLDCRegion *region; + int i; + + out->num_hosts = 1; + out->num_regions_supported = ct3d->dc.num_regions; + stw_le_p(&out->supported_add_sel_policy_bitmask, + BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE)); + stw_le_p(&out->supported_removal_policy_bitmask, + BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE)); + out->sanitize_on_release_bitmask = 0; + + stq_le_p(&out->total_dynamic_capacity, + ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER); + + for (i = 0; i < ct3d->dc.num_regions; i++) { + region = &ct3d->dc.regions[i]; + memcpy(&out->region_blk_size_bitmasks[i], + ®ion->supported_blk_size_bitmask, + sizeof(out->region_blk_size_bitmasks[i])); + } + + *len_out = sizeof(*out); + return CXL_MBOX_SUCCESS; +} + +static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region) +{ + *flags = 0; + + if (region->nonvolatile) { + *flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE); + } + if (region->sharable) { + *flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE); + } + if (region->hw_managed_coherency) { + *flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY); + } + if (region->ic_specific_dc_management) { + *flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT); + } + if (region->rdonly) { + *flags |= BIT(CXL_DSMAS_FLAGS_RDONLY); + } +} + +/* + * CXL r3.2 section 7.6.7.6.2: + * Get Host DC Region Configuration (Opcode 5601h) + */ +static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint16_t host_id; + uint8_t region_cnt; + uint8_t start_rid; + } QEMU_PACKED *in = (void *)payload_in; + struct { + uint16_t host_id; + uint8_t num_regions; + uint8_t regions_returned; + struct { + uint64_t base; + uint64_t decode_len; + uint64_t region_len; + uint64_t block_size; + uint8_t flags; + uint8_t rsvd1[3]; + uint8_t sanitize; + uint8_t rsvd2[3]; + } QEMU_PACKED records[]; + } QEMU_PACKED *out = (void *)payload_out; + struct { + uint32_t num_extents_supported; + uint32_t num_extents_available; + uint32_t num_tags_supported; + uint32_t num_tags_available; + } QEMU_PACKED *extra_out; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + uint16_t record_count, out_pl_len, i; + + if (in->start_rid >= ct3d->dc.num_regions) { + return CXL_MBOX_INVALID_INPUT; + } + record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt); + + out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); + extra_out = (void *)out + out_pl_len; + out_pl_len += sizeof(*extra_out); + + assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE); + + stw_le_p(&out->host_id, 0); + out->num_regions = ct3d->dc.num_regions; + out->regions_returned = record_count; + + for (i = 0; i < record_count; i++) { + stq_le_p(&out->records[i].base, + ct3d->dc.regions[in->start_rid + i].base); + stq_le_p(&out->records[i].decode_len, + ct3d->dc.regions[in->start_rid + i].decode_len / + CXL_CAPACITY_MULTIPLIER); + stq_le_p(&out->records[i].region_len, + ct3d->dc.regions[in->start_rid + i].len); + stq_le_p(&out->records[i].block_size, + ct3d->dc.regions[in->start_rid + i].block_size); + build_dsmas_flags(&out->records[i].flags, + &ct3d->dc.regions[in->start_rid + i]); + /* Sanitize is bit 0 of flags. */ + out->records[i].sanitize = + ct3d->dc.regions[in->start_rid + i].flags & BIT(0); + } + + stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED); + stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED - + ct3d->dc.total_extent_count); + stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED); + stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED); + + *len_out = out_pl_len; + return CXL_MBOX_SUCCESS; +} + +/* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */ +static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint8_t reg_id; + uint8_t rsvd[3]; + uint64_t block_sz; + uint8_t flags; + uint8_t rsvd2[3]; + } QEMU_PACKED *in = (void *)payload_in; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLEventDynamicCapacity dcEvent = {}; + CXLDCRegion *region = &ct3d->dc.regions[in->reg_id]; + + /* + * CXL r3.2 7.6.7.6.3: Set DC Region Configuration + * This command shall fail with Unsupported when the Sanitize on Release + * field does not match the region’s configuration... and the device + * does not support reconfiguration of the Sanitize on Release setting. + * + * Currently not reconfigurable, so always fail if sanitize bit (bit 0) + * doesn't match. + */ + if ((in->flags & 0x1) != (region->flags & 0x1)) { + return CXL_MBOX_UNSUPPORTED; + } + + if (in->reg_id >= DCD_MAX_NUM_REGION) { + return CXL_MBOX_UNSUPPORTED; + } + + /* Check that no extents are in the region being reconfigured */ + if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) { + return CXL_MBOX_UNSUPPORTED; + } + + /* Check that new block size is supported */ + if (!is_power_of_2(in->block_sz) || + !(in->block_sz & region->supported_blk_size_bitmask)) { + return CXL_MBOX_INVALID_INPUT; + } + /* Return success if new block size == current block size */ + if (in->block_sz == region->block_size) { + return CXL_MBOX_SUCCESS; + } + + /* Free bitmap and create new one for new block size. */ + qemu_mutex_lock(®ion->bitmap_lock); + g_free(region->blk_bitmap); + region->blk_bitmap = bitmap_new(region->len / in->block_sz); + qemu_mutex_unlock(®ion->bitmap_lock); + region->block_size = in->block_sz; + + /* Create event record and insert into event log */ + cxl_assign_event_header(&dcEvent.hdr, + &dynamic_capacity_uuid, + (1 << CXL_EVENT_TYPE_INFO), + sizeof(dcEvent), + cxl_device_get_timestamp(&ct3d->cxl_dstate)); + dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED; + dcEvent.validity_flags = 1; + dcEvent.host_id = 0; + dcEvent.updated_region_id = in->reg_id; + + if (cxl_event_insert(&ct3d->cxl_dstate, + CXL_EVENT_TYPE_DYNAMIC_CAP, + (CXLEventRecordRaw *)&dcEvent)) { + cxl_event_irq_assert(ct3d); + } return CXL_MBOX_SUCCESS; } +/* CXL r3.2 section 7.6.7.6.4: Get DC Region Extent Lists (Opcode 5603h) */ +static CXLRetCode cmd_fm_get_dc_region_extent_list(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint16_t host_id; + uint8_t rsvd[2]; + uint32_t extent_cnt; + uint32_t start_extent_id; + } QEMU_PACKED *in = (void *)payload_in; + struct { + uint16_t host_id; + uint8_t rsvd[2]; + uint32_t start_extent_id; + uint32_t extents_returned; + uint32_t total_extents; + uint32_t list_generation_num; + uint8_t rsvd2[4]; + CXLDCExtentRaw records[]; + } QEMU_PACKED *out = (void *)payload_out; + QEMU_BUILD_BUG_ON(sizeof(*in) != 0xc); + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + CXLDCExtent *ent; + CXLDCExtentRaw *out_rec; + uint16_t record_count = 0, record_done = 0, i = 0; + uint16_t out_pl_len, max_size; + + if (in->host_id != 0) { + return CXL_MBOX_INVALID_INPUT; + } + + if (in->start_extent_id > ct3d->dc.nr_extents_accepted) { + return CXL_MBOX_INVALID_INPUT; + } + + record_count = MIN(in->extent_cnt, + ct3d->dc.nr_extents_accepted - in->start_extent_id); + max_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out); + record_count = MIN(record_count, max_size / sizeof(out->records[0])); + out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]); + + stw_le_p(&out->host_id, in->host_id); + stl_le_p(&out->start_extent_id, in->start_extent_id); + stl_le_p(&out->extents_returned, record_count); + stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted); + stl_le_p(&out->list_generation_num, ct3d->dc.ext_list_gen_seq); + + if (record_count > 0) { + QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) { + if (i++ < in->start_extent_id) { + continue; + } + out_rec = &out->records[record_done]; + stq_le_p(&out_rec->start_dpa, ent->start_dpa); + stq_le_p(&out_rec->len, ent->len); + memcpy(&out_rec->tag, ent->tag, 0x10); + stw_le_p(&out_rec->shared_seq, ent->shared_seq); + + record_done++; + if (record_done == record_count) { + break; + } + } + } + + *len_out = out_pl_len; + return CXL_MBOX_SUCCESS; +} + +/* + * Helper function to convert CXLDCExtentRaw to CXLUpdateDCExtentListInPl + * in order to reuse cxl_detect_malformed_extent_list() function which accepts + * CXLUpdateDCExtentListInPl as a parameter. + */ +static void convert_raw_extents(CXLDCExtentRaw raw_extents[], + CXLUpdateDCExtentListInPl *extent_list, + int count) +{ + int i; + + extent_list->num_entries_updated = count; + + for (i = 0; i < count; i++) { + extent_list->updated_entries[i].start_dpa = raw_extents[i].start_dpa; + extent_list->updated_entries[i].len = raw_extents[i].len; + } +} + +/* CXL r3.2 Section 7.6.7.6.5: Initiate Dynamic Capacity Add (Opcode 5604h) */ +static CXLRetCode cmd_fm_initiate_dc_add(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint16_t host_id; + uint8_t selection_policy; + uint8_t reg_num; + uint64_t length; + uint8_t tag[0x10]; + uint32_t ext_count; + CXLDCExtentRaw extents[]; + } QEMU_PACKED *in = (void *)payload_in; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + int i, rc; + + switch (in->selection_policy) { + case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE: { + /* Adding extents exceeds device's extent tracking ability. */ + if (in->ext_count + ct3d->dc.total_extent_count > + CXL_NUM_EXTENTS_SUPPORTED) { + return CXL_MBOX_RESOURCES_EXHAUSTED; + } + + g_autofree CXLUpdateDCExtentListInPl *list = + g_malloc0(sizeof(*list) + + in->ext_count * sizeof(*list->updated_entries)); + + convert_raw_extents(in->extents, list, in->ext_count); + rc = cxl_detect_malformed_extent_list(ct3d, list); + + for (i = 0; i < in->ext_count; i++) { + CXLDCExtentRaw *ext = &in->extents[i]; + + /* Check requested extents do not overlap with pending ones. */ + if (cxl_extent_groups_overlaps_dpa_range(&ct3d->dc.extents_pending, + ext->start_dpa, + ext->len)) { + return CXL_MBOX_INVALID_EXTENT_LIST; + } + /* Check requested extents do not overlap with existing ones. */ + if (cxl_extents_overlaps_dpa_range(&ct3d->dc.extents, + ext->start_dpa, + ext->len)) { + return CXL_MBOX_INVALID_EXTENT_LIST; + } + } + + if (rc) { + return rc; + } + + CXLDCExtentGroup *group = NULL; + for (i = 0; i < in->ext_count; i++) { + CXLDCExtentRaw *ext = &in->extents[i]; + + group = cxl_insert_extent_to_extent_group(group, ext->start_dpa, + ext->len, ext->tag, + ext->shared_seq); + } + + cxl_extent_group_list_insert_tail(&ct3d->dc.extents_pending, group); + ct3d->dc.total_extent_count += in->ext_count; + cxl_create_dc_event_records_for_extents(ct3d, + DC_EVENT_ADD_CAPACITY, + in->extents, + in->ext_count); + + return CXL_MBOX_SUCCESS; + } + default: { + qemu_log_mask(LOG_UNIMP, + "CXL extent selection policy not supported.\n"); + return CXL_MBOX_INVALID_INPUT; + } + } +} + +#define CXL_EXTENT_REMOVAL_POLICY_MASK 0x0F +#define CXL_FORCED_REMOVAL_MASK (1 << 4) +/* + * CXL r3.2 Section 7.6.7.6.6: + * Initiate Dynamic Capacity Release (Opcode 5605h) + */ +static CXLRetCode cmd_fm_initiate_dc_release(const struct cxl_cmd *cmd, + uint8_t *payload_in, + size_t len_in, + uint8_t *payload_out, + size_t *len_out, + CXLCCI *cci) +{ + struct { + uint16_t host_id; + uint8_t flags; + uint8_t reg_num; + uint64_t length; + uint8_t tag[0x10]; + uint32_t ext_count; + CXLDCExtentRaw extents[]; + } QEMU_PACKED *in = (void *)payload_in; + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + int i, rc; + + switch (in->flags & CXL_EXTENT_REMOVAL_POLICY_MASK) { + case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE: { + CXLDCExtentList updated_list; + uint32_t updated_list_size; + g_autofree CXLUpdateDCExtentListInPl *list = + g_malloc0(sizeof(*list) + + in->ext_count * sizeof(*list->updated_entries)); + + convert_raw_extents(in->extents, list, in->ext_count); + rc = cxl_detect_malformed_extent_list(ct3d, list); + if (rc) { + return rc; + } + + /* + * Fail with Invalid PA if an extent is pending and Forced Removal + * flag not set. + */ + if (!(in->flags & CXL_FORCED_REMOVAL_MASK)) { + for (i = 0; i < in->ext_count; i++) { + CXLDCExtentRaw ext = in->extents[i]; + /* + * Check requested extents don't overlap with pending + * extents. + */ + if (cxl_extent_groups_overlaps_dpa_range( + &ct3d->dc.extents_pending, + ext.start_dpa, + ext.len)) { + return CXL_MBOX_INVALID_PA; + } + } + } + + rc = cxl_dc_extent_release_dry_run(ct3d, + list, + &updated_list, + &updated_list_size); + if (rc) { + return rc; + } + cxl_create_dc_event_records_for_extents(ct3d, + DC_EVENT_RELEASE_CAPACITY, + in->extents, + in->ext_count); + return CXL_MBOX_SUCCESS; + } + default: { + qemu_log_mask(LOG_UNIMP, + "CXL extent removal policy not supported.\n"); + return CXL_MBOX_INVALID_INPUT; + } + } +} + static const struct cxl_cmd cxl_cmd_set[256][256] = { + [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", + cmd_infostat_bg_op_abort, 0, 0 }, [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS", cmd_events_get_records, 1, 0 }, [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS", @@ -2677,9 +3741,11 @@ static const struct cxl_cmd cxl_cmd_set[256][256] = { [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO", cmd_firmware_update_get_info, 0, 0 }, [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER", - cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION }, + cmd_firmware_update_transfer, ~0, + CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE", - cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION }, + cmd_firmware_update_activate, 2, + CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT }, [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, @@ -2705,9 +3771,20 @@ static const struct cxl_cmd cxl_cmd_set[256][256] = { [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 }, [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa, ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE }, + [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = { + "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG", + cmd_get_alert_config, 0, 0 }, + [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = { + "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG", + cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0, (CXL_MBOX_IMMEDIATE_DATA_CHANGE | CXL_MBOX_SECURITY_STATE_CHANGE | + CXL_MBOX_BACKGROUND_OPERATION | + CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, + [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations, + ~0, + (CXL_MBOX_IMMEDIATE_DATA_CHANGE | CXL_MBOX_BACKGROUND_OPERATION)}, [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE", cmd_get_security_state, 0, 0 }, @@ -2721,7 +3798,8 @@ static const struct cxl_cmd cxl_cmd_set[256][256] = { "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES", cmd_media_get_scan_media_capabilities, 16, 0 }, [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA", - cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION }, + cmd_media_scan_media, 17, + (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)}, [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = { "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS", cmd_media_get_scan_media_results, 0, 0 }, @@ -2745,6 +3823,8 @@ static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 }, [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS", cmd_infostat_bg_op_sts, 0, 0 }, + [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT", + cmd_infostat_bg_op_abort, 0, 0 }, [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 }, [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE }, @@ -2759,6 +3839,36 @@ static const struct cxl_cmd cxl_cmd_set_sw[256][256] = { cmd_tunnel_management_cmd, ~0, 0 }, }; +static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = { + [FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO", + cmd_fm_get_dcd_info, 0, 0 }, + [FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG", + cmd_fm_get_host_dc_region_config, 4, 0 }, + [FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG", + cmd_fm_set_dc_region_config, 16, + (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | + CXL_MBOX_CONFIG_CHANGE_CONV_RESET | + CXL_MBOX_CONFIG_CHANGE_CXL_RESET | + CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | + CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, + [FMAPI_DCD_MGMT][GET_DC_REGION_EXTENT_LIST] = { "GET_DC_REGION_EXTENT_LIST", + cmd_fm_get_dc_region_extent_list, 12, 0 }, + [FMAPI_DCD_MGMT][INITIATE_DC_ADD] = { "INIT_DC_ADD", + cmd_fm_initiate_dc_add, ~0, + (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | + CXL_MBOX_CONFIG_CHANGE_CONV_RESET | + CXL_MBOX_CONFIG_CHANGE_CXL_RESET | + CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | + CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, + [FMAPI_DCD_MGMT][INITIATE_DC_RELEASE] = { "INIT_DC_RELEASE", + cmd_fm_initiate_dc_release, ~0, + (CXL_MBOX_CONFIG_CHANGE_COLD_RESET | + CXL_MBOX_CONFIG_CHANGE_CONV_RESET | + CXL_MBOX_CONFIG_CHANGE_CXL_RESET | + CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | + CXL_MBOX_IMMEDIATE_DATA_CHANGE) }, +}; + /* * While the command is executing in the background, the device should * update the percentage complete in the Background Command Status Register @@ -2831,6 +3941,7 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, cci->bg.opcode = (set << 8) | cmd; cci->bg.complete_pct = 0; + cci->bg.aborted = false; cci->bg.ret_code = 0; now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); @@ -2844,10 +3955,12 @@ int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, static void bg_timercb(void *opaque) { CXLCCI *cci = opaque; - uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); - uint64_t total_time = cci->bg.starttime + cci->bg.runtime; + uint64_t now, total_time; - assert(cci->bg.runtime > 0); + qemu_mutex_lock(&cci->bg.lock); + + now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + total_time = cci->bg.starttime + cci->bg.runtime; if (now >= total_time) { /* we are done */ uint16_t ret = CXL_MBOX_SUCCESS; @@ -2866,6 +3979,12 @@ static void bg_timercb(void *opaque) cxl_dev_enable_media(&ct3d->cxl_dstate); } break; + case 0x4402: /* Media Operations sanitize */ + { + CXLType3Dev *ct3d = CXL_TYPE3(cci->d); + __do_sanitize(ct3d); + } + break; case 0x4304: /* scan media */ { CXLType3Dev *ct3d = CXL_TYPE3(cci->d); @@ -2879,7 +3998,8 @@ static void bg_timercb(void *opaque) } } else { /* estimate only */ - cci->bg.complete_pct = 100 * now / total_time; + cci->bg.complete_pct = + 100 * (now - cci->bg.starttime) / cci->bg.runtime; timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); } @@ -2899,6 +4019,8 @@ static void bg_timercb(void *opaque) msi_notify(pdev, cxl_dstate->mbox_msi_n); } } + + qemu_mutex_unlock(&cci->bg.lock); } static void cxl_rebuild_cel(CXLCCI *cci) @@ -2927,12 +4049,21 @@ void cxl_init_cci(CXLCCI *cci, size_t payload_max) cci->bg.complete_pct = 0; cci->bg.starttime = 0; cci->bg.runtime = 0; + cci->bg.aborted = false; cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, bg_timercb, cci); + qemu_mutex_init(&cci->bg.lock); memset(&cci->fw, 0, sizeof(cci->fw)); cci->fw.active_slot = 1; cci->fw.slot[cci->fw.active_slot - 1] = true; + cci->initialized = true; +} + +void cxl_destroy_cci(CXLCCI *cci) +{ + qemu_mutex_destroy(&cci->bg.lock); + cci->initialized = false; } static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256]) @@ -2996,6 +4127,10 @@ void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf, static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = { [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0}, + [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT", + cmd_get_response_msg_limit, 0, 0 }, + [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT", + cmd_set_response_msg_limit, 1, 0 }, [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 }, [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 }, @@ -3008,7 +4143,12 @@ void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d, DeviceState *intf, size_t payload_max) { + CXLType3Dev *ct3d = CXL_TYPE3(d); + cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp); + if (ct3d->dc.num_regions) { + cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd); + } cci->d = d; cci->intf = intf; cxl_init_cci(cci, payload_max); diff --git a/hw/cxl/switch-mailbox-cci.c b/hw/cxl/switch-mailbox-cci.c index ba399c62407..223f2204331 100644 --- a/hw/cxl/switch-mailbox-cci.c +++ b/hw/cxl/switch-mailbox-cci.c @@ -17,10 +17,12 @@ #include "hw/qdev-properties.h" #include "hw/cxl/cxl.h" +#define CXL_SWCCI_MSIX_MBOX 3 + static void cswmbcci_reset(DeviceState *dev) { CSWMBCCIDev *cswmb = CXL_SWITCH_MAILBOX_CCI(dev); - cxl_device_register_init_swcci(cswmb); + cxl_device_register_init_swcci(cswmb, CXL_SWCCI_MSIX_MBOX); } static void cswbcci_realize(PCIDevice *pci_dev, Error **errp) @@ -65,13 +67,12 @@ static void cswmbcci_exit(PCIDevice *pci_dev) /* Nothing to do here yet */ } -static Property cxl_switch_cci_props[] = { +static const Property cxl_switch_cci_props[] = { DEFINE_PROP_LINK("target", CSWMBCCIDev, target, TYPE_CXL_USP, PCIDevice *), - DEFINE_PROP_END_OF_LIST(), }; -static void cswmbcci_class_init(ObjectClass *oc, void *data) +static void cswmbcci_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -89,7 +90,7 @@ static void cswmbcci_class_init(ObjectClass *oc, void *data) pc->device_id = 0xa123; pc->revision = 0; dc->desc = "CXL Switch Mailbox CCI"; - dc->reset = cswmbcci_reset; + device_class_set_legacy_reset(dc, cswmbcci_reset); device_class_set_props(dc, cxl_switch_cci_props); } @@ -98,7 +99,7 @@ static const TypeInfo cswmbcci_info = { .parent = TYPE_PCI_DEVICE, .class_init = cswmbcci_class_init, .instance_size = sizeof(CSWMBCCIDev), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/display/Kconfig b/hw/display/Kconfig index a4552c8ed78..1e95ab28ef4 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -66,9 +66,6 @@ config BOCHS_DISPLAY select VGA select EDID -config BLIZZARD - bool - config FRAMEBUFFER bool @@ -76,7 +73,7 @@ config SM501 bool select I2C select DDC - select SERIAL + select SERIAL_MM select USB_OHCI_SYSBUS config TCX @@ -143,3 +140,12 @@ config XLNX_DISPLAYPORT config DM163 bool + +config MAC_PVG_MMIO + bool + depends on MAC_PVG && AARCH64 + +config MAC_PVG_PCI + bool + depends on MAC_PVG && PCI + default y if PCI_DEVICES diff --git a/hw/display/apple-gfx-mmio.m b/hw/display/apple-gfx-mmio.m new file mode 100644 index 00000000000..b0b6e2993eb --- /dev/null +++ b/hw/display/apple-gfx-mmio.m @@ -0,0 +1,285 @@ +/* + * QEMU Apple ParavirtualizedGraphics.framework device, MMIO (arm64) variant + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides + * which implements 3d graphics passthrough to the host as well as a + * proprietary guest communication channel to drive it. This device model + * implements support to drive that library from within QEMU as an MMIO-based + * system device for macOS on arm64 VMs. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "block/aio-wait.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "apple-gfx.h" +#include "trace.h" + +#import + +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXMMIOState, APPLE_GFX_MMIO) + +/* + * ParavirtualizedGraphics.Framework only ships header files for the PCI + * variant which does not include IOSFC descriptors and host devices. We add + * their definitions here so that we can also work with the ARM version. + */ +typedef bool(^IOSFCRaiseInterrupt)(uint32_t vector); +typedef bool(^IOSFCUnmapMemory)(void *, void *, void *, void *, void *, void *); +typedef bool(^IOSFCMapMemory)(uint64_t phys, uint64_t len, bool ro, void **va, + void *, void *); + +@interface PGDeviceDescriptor (IOSurfaceMapper) +@property (readwrite, nonatomic) bool usingIOSurfaceMapper; +@end + +@interface PGIOSurfaceHostDeviceDescriptor : NSObject +-(PGIOSurfaceHostDeviceDescriptor *)init; +@property (readwrite, nonatomic, copy, nullable) IOSFCMapMemory mapMemory; +@property (readwrite, nonatomic, copy, nullable) IOSFCUnmapMemory unmapMemory; +@property (readwrite, nonatomic, copy, nullable) IOSFCRaiseInterrupt raiseInterrupt; +@end + +@interface PGIOSurfaceHostDevice : NSObject +-(instancetype)initWithDescriptor:(PGIOSurfaceHostDeviceDescriptor *)desc; +-(uint32_t)mmioReadAtOffset:(size_t)offset; +-(void)mmioWriteAtOffset:(size_t)offset value:(uint32_t)value; +@end + +struct AppleGFXMapSurfaceMemoryJob; +struct AppleGFXMMIOState { + SysBusDevice parent_obj; + + AppleGFXState common; + + qemu_irq irq_gfx; + qemu_irq irq_iosfc; + MemoryRegion iomem_iosfc; + PGIOSurfaceHostDevice *pgiosfc; +}; + +typedef struct AppleGFXMMIOJob { + AppleGFXMMIOState *state; + uint64_t offset; + uint64_t value; + bool completed; +} AppleGFXMMIOJob; + +static void iosfc_do_read(void *opaque) +{ + AppleGFXMMIOJob *job = opaque; + job->value = [job->state->pgiosfc mmioReadAtOffset:job->offset]; + qatomic_set(&job->completed, true); + aio_wait_kick(); +} + +static uint64_t iosfc_read(void *opaque, hwaddr offset, unsigned size) +{ + AppleGFXMMIOJob job = { + .state = opaque, + .offset = offset, + .completed = false, + }; + dispatch_queue_t queue = + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + + dispatch_async_f(queue, &job, iosfc_do_read); + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); + + trace_apple_gfx_mmio_iosfc_read(offset, job.value); + return job.value; +} + +static void iosfc_do_write(void *opaque) +{ + AppleGFXMMIOJob *job = opaque; + [job->state->pgiosfc mmioWriteAtOffset:job->offset value:job->value]; + qatomic_set(&job->completed, true); + aio_wait_kick(); +} + +static void iosfc_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + AppleGFXMMIOJob job = { + .state = opaque, + .offset = offset, + .value = val, + .completed = false, + }; + dispatch_queue_t queue = + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + + dispatch_async_f(queue, &job, iosfc_do_write); + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); + + trace_apple_gfx_mmio_iosfc_write(offset, val); +} + +static const MemoryRegionOps apple_iosfc_ops = { + .read = iosfc_read, + .write = iosfc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static void raise_irq_bh(void *opaque) +{ + qemu_irq *irq = opaque; + + qemu_irq_pulse(*irq); +} + +static void *apple_gfx_mmio_map_surface_memory(uint64_t guest_physical_address, + uint64_t length, bool read_only) +{ + void *mem; + MemoryRegion *region = NULL; + + RCU_READ_LOCK_GUARD(); + mem = apple_gfx_host_ptr_for_gpa_range(guest_physical_address, + length, read_only, ®ion); + if (mem) { + memory_region_ref(region); + } + return mem; +} + +static bool apple_gfx_mmio_unmap_surface_memory(void *ptr) +{ + MemoryRegion *region; + ram_addr_t offset = 0; + + RCU_READ_LOCK_GUARD(); + region = memory_region_from_host(ptr, &offset); + if (!region) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: memory at %p to be unmapped not found.\n", + __func__, ptr); + return false; + } + + trace_apple_gfx_iosfc_unmap_memory_region(ptr, region); + memory_region_unref(region); + return true; +} + +static PGIOSurfaceHostDevice *apple_gfx_prepare_iosurface_host_device( + AppleGFXMMIOState *s) +{ + PGIOSurfaceHostDeviceDescriptor *iosfc_desc = + [PGIOSurfaceHostDeviceDescriptor new]; + PGIOSurfaceHostDevice *iosfc_host_dev; + + iosfc_desc.mapMemory = + ^bool(uint64_t phys, uint64_t len, bool ro, void **va, void *e, void *f) { + *va = apple_gfx_mmio_map_surface_memory(phys, len, ro); + + trace_apple_gfx_iosfc_map_memory(phys, len, ro, va, e, f, *va); + + return *va != NULL; + }; + + iosfc_desc.unmapMemory = + ^bool(void *va, void *b, void *c, void *d, void *e, void *f) { + return apple_gfx_mmio_unmap_surface_memory(va); + }; + + iosfc_desc.raiseInterrupt = ^bool(uint32_t vector) { + trace_apple_gfx_iosfc_raise_irq(vector); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + raise_irq_bh, &s->irq_iosfc); + return true; + }; + + iosfc_host_dev = + [[PGIOSurfaceHostDevice alloc] initWithDescriptor:iosfc_desc]; + [iosfc_desc release]; + return iosfc_host_dev; +} + +static void apple_gfx_mmio_realize(DeviceState *dev, Error **errp) +{ + @autoreleasepool { + AppleGFXMMIOState *s = APPLE_GFX_MMIO(dev); + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; + + desc.raiseInterrupt = ^(uint32_t vector) { + trace_apple_gfx_raise_irq(vector); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + raise_irq_bh, &s->irq_gfx); + }; + + desc.usingIOSurfaceMapper = true; + s->pgiosfc = apple_gfx_prepare_iosurface_host_device(s); + + if (!apple_gfx_common_realize(&s->common, dev, desc, errp)) { + [s->pgiosfc release]; + s->pgiosfc = nil; + } + + [desc release]; + desc = nil; + } +} + +static void apple_gfx_mmio_init(Object *obj) +{ + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); + + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_MMIO); + + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->common.iomem_gfx); + memory_region_init_io(&s->iomem_iosfc, obj, &apple_iosfc_ops, s, + TYPE_APPLE_GFX_MMIO, 0x10000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem_iosfc); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_gfx); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq_iosfc); +} + +static void apple_gfx_mmio_reset(Object *obj, ResetType type) +{ + AppleGFXMMIOState *s = APPLE_GFX_MMIO(obj); + [s->common.pgdev reset]; +} + +static const Property apple_gfx_mmio_properties[] = { + DEFINE_PROP_ARRAY("display-modes", AppleGFXMMIOState, + common.num_display_modes, common.display_modes, + qdev_prop_apple_gfx_display_mode, AppleGFXDisplayMode), +}; + +static void apple_gfx_mmio_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = apple_gfx_mmio_reset; + dc->hotpluggable = false; + dc->realize = apple_gfx_mmio_realize; + + device_class_set_props(dc, apple_gfx_mmio_properties); +} + +static const TypeInfo apple_gfx_mmio_types[] = { + { + .name = TYPE_APPLE_GFX_MMIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AppleGFXMMIOState), + .class_init = apple_gfx_mmio_class_init, + .instance_init = apple_gfx_mmio_init, + } +}; +DEFINE_TYPES(apple_gfx_mmio_types) diff --git a/hw/display/apple-gfx-pci.m b/hw/display/apple-gfx-pci.m new file mode 100644 index 00000000000..b0694f4cb85 --- /dev/null +++ b/hw/display/apple-gfx-pci.m @@ -0,0 +1,157 @@ +/* + * QEMU Apple ParavirtualizedGraphics.framework device, PCI variant + * + * Copyright © 2023-2024 Phil Dennis-Jordan + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides + * which implements 3d graphics passthrough to the host as well as a + * proprietary guest communication channel to drive it. This device model + * implements support to drive that library from within QEMU as a PCI device + * aimed primarily at x86-64 macOS VMs. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci_device.h" +#include "hw/pci/msi.h" +#include "apple-gfx.h" +#include "trace.h" + +#import + +OBJECT_DECLARE_SIMPLE_TYPE(AppleGFXPCIState, APPLE_GFX_PCI) + +struct AppleGFXPCIState { + PCIDevice parent_obj; + + AppleGFXState common; +}; + +static const char *apple_gfx_pci_option_rom_path = NULL; + +static void apple_gfx_init_option_rom_path(void) +{ + NSURL *option_rom_url = PGCopyOptionROMURL(); + const char *option_rom_path = option_rom_url.fileSystemRepresentation; + apple_gfx_pci_option_rom_path = g_strdup(option_rom_path); + [option_rom_url release]; +} + +static void apple_gfx_pci_init(Object *obj) +{ + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); + + if (!apple_gfx_pci_option_rom_path) { + /* + * The following is done on device not class init to avoid running + * ObjC code before fork() in -daemonize mode. + */ + PCIDeviceClass *pci = PCI_DEVICE_CLASS(object_get_class(obj)); + apple_gfx_init_option_rom_path(); + pci->romfile = apple_gfx_pci_option_rom_path; + } + + apple_gfx_common_init(obj, &s->common, TYPE_APPLE_GFX_PCI); +} + +typedef struct AppleGFXPCIInterruptJob { + PCIDevice *device; + uint32_t vector; +} AppleGFXPCIInterruptJob; + +static void apple_gfx_pci_raise_interrupt(void *opaque) +{ + AppleGFXPCIInterruptJob *job = opaque; + + if (msi_enabled(job->device)) { + msi_notify(job->device, job->vector); + } + g_free(job); +} + +static void apple_gfx_pci_interrupt(PCIDevice *dev, uint32_t vector) +{ + AppleGFXPCIInterruptJob *job; + + trace_apple_gfx_raise_irq(vector); + job = g_malloc0(sizeof(*job)); + job->device = dev; + job->vector = vector; + aio_bh_schedule_oneshot(qemu_get_aio_context(), + apple_gfx_pci_raise_interrupt, job); +} + +static void apple_gfx_pci_realize(PCIDevice *dev, Error **errp) +{ + AppleGFXPCIState *s = APPLE_GFX_PCI(dev); + int ret; + + pci_register_bar(dev, PG_PCI_BAR_MMIO, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->common.iomem_gfx); + + ret = msi_init(dev, 0x0 /* config offset; 0 = find space */, + PG_PCI_MAX_MSI_VECTORS, true /* msi64bit */, + false /* msi_per_vector_mask */, errp); + if (ret != 0) { + return; + } + + @autoreleasepool { + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; + desc.raiseInterrupt = ^(uint32_t vector) { + apple_gfx_pci_interrupt(dev, vector); + }; + + apple_gfx_common_realize(&s->common, DEVICE(dev), desc, errp); + [desc release]; + desc = nil; + } +} + +static void apple_gfx_pci_reset(Object *obj, ResetType type) +{ + AppleGFXPCIState *s = APPLE_GFX_PCI(obj); + [s->common.pgdev reset]; +} + +static const Property apple_gfx_pci_properties[] = { + DEFINE_PROP_ARRAY("display-modes", AppleGFXPCIState, + common.num_display_modes, common.display_modes, + qdev_prop_apple_gfx_display_mode, AppleGFXDisplayMode), +}; + +static void apple_gfx_pci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pci = PCI_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = apple_gfx_pci_reset; + dc->desc = "macOS Paravirtualized Graphics PCI Display Controller"; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + + pci->vendor_id = PG_PCI_VENDOR_ID; + pci->device_id = PG_PCI_DEVICE_ID; + pci->class_id = PCI_CLASS_DISPLAY_OTHER; + pci->realize = apple_gfx_pci_realize; + + device_class_set_props(dc, apple_gfx_pci_properties); +} + +static const TypeInfo apple_gfx_pci_types[] = { + { + .name = TYPE_APPLE_GFX_PCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(AppleGFXPCIState), + .class_init = apple_gfx_pci_class_init, + .instance_init = apple_gfx_pci_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { }, + }, + } +}; +DEFINE_TYPES(apple_gfx_pci_types) + diff --git a/hw/display/apple-gfx.h b/hw/display/apple-gfx.h new file mode 100644 index 00000000000..a8b1d1efc09 --- /dev/null +++ b/hw/display/apple-gfx.h @@ -0,0 +1,74 @@ +/* + * Data structures and functions shared between variants of the macOS + * ParavirtualizedGraphics.framework based apple-gfx display adapter. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_APPLE_GFX_H +#define QEMU_APPLE_GFX_H + +#include "qemu/queue.h" +#include "system/memory.h" +#include "hw/qdev-properties.h" +#include "ui/surface.h" + +#define TYPE_APPLE_GFX_MMIO "apple-gfx-mmio" +#define TYPE_APPLE_GFX_PCI "apple-gfx-pci" + +@class PGDeviceDescriptor; +@protocol PGDevice; +@protocol PGDisplay; +@protocol MTLDevice; +@protocol MTLTexture; +@protocol MTLCommandQueue; + +typedef QTAILQ_HEAD(, PGTask_s) PGTaskList; + +typedef struct AppleGFXDisplayMode { + uint16_t width_px; + uint16_t height_px; + uint16_t refresh_rate_hz; +} AppleGFXDisplayMode; + +typedef struct AppleGFXState { + /* Initialised on init/realize() */ + MemoryRegion iomem_gfx; + id pgdev; + id pgdisp; + QemuConsole *con; + id mtl; + id mtl_queue; + AppleGFXDisplayMode *display_modes; + uint32_t num_display_modes; + + /* List `tasks` is protected by task_mutex */ + QemuMutex task_mutex; + PGTaskList tasks; + + /* Mutable state (BQL protected) */ + QEMUCursor *cursor; + DisplaySurface *surface; + id texture; + int8_t pending_frames; /* # guest frames in the rendering pipeline */ + bool gfx_update_requested; /* QEMU display system wants a new frame */ + bool new_frame_ready; /* Guest has rendered a frame, ready to be used */ + bool using_managed_texture_storage; + uint32_t rendering_frame_width; + uint32_t rendering_frame_height; + + /* Mutable state (atomic) */ + bool cursor_show; +} AppleGFXState; + +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name); +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, + PGDeviceDescriptor *desc, Error **errp); +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, + uint64_t length, bool read_only, + MemoryRegion **mapping_in_region); + +extern const PropertyInfo qdev_prop_apple_gfx_display_mode; + +#endif + diff --git a/hw/display/apple-gfx.m b/hw/display/apple-gfx.m new file mode 100644 index 00000000000..174d56ae05b --- /dev/null +++ b/hw/display/apple-gfx.m @@ -0,0 +1,880 @@ +/* + * QEMU Apple ParavirtualizedGraphics.framework device + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * ParavirtualizedGraphics.framework is a set of libraries that macOS provides + * which implements 3d graphics passthrough to the host as well as a + * proprietary guest communication channel to drive it. This device model + * implements support to drive that library from within QEMU. + */ + +#include "qemu/osdep.h" +#include "qemu/lockable.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "block/aio-wait.h" +#include "system/address-spaces.h" +#include "system/dma.h" +#include "migration/blocker.h" +#include "ui/console.h" +#include "apple-gfx.h" +#include "trace.h" + +#include +#include +#include + +#import + +static const AppleGFXDisplayMode apple_gfx_default_modes[] = { + { 1920, 1080, 60 }, + { 1440, 1080, 60 }, + { 1280, 1024, 60 }, +}; + +static Error *apple_gfx_mig_blocker; +static uint32_t next_pgdisplay_serial_num = 1; + +static dispatch_queue_t get_background_queue(void) +{ + return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); +} + +/* ------ PGTask and task operations: new/destroy/map/unmap ------ */ + +/* + * This implements the type declared in + * which is opaque from the framework's point of view. It is used in callbacks + * in the form of its typedef PGTask_t, which also already exists in the + * framework headers. + * + * A "task" in PVG terminology represents a host-virtual contiguous address + * range which is reserved in a large chunk on task creation. The mapMemory + * callback then requests ranges of guest system memory (identified by their + * GPA) to be mapped into subranges of this reserved address space. + * This type of operation isn't well-supported by QEMU's memory subsystem, + * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call, + * which allows us to refer to the same backing memory via multiple virtual + * address ranges. The Mach VM APIs are therefore used throughout for managing + * task memory. + */ +struct PGTask_s { + QTAILQ_ENTRY(PGTask_s) node; + AppleGFXState *s; + mach_vm_address_t address; + uint64_t len; + /* + * All unique MemoryRegions for which a mapping has been created in this + * task, and on which we have thus called memory_region_ref(). There are + * usually very few regions of system RAM in total, so we expect this array + * to be very short. Therefore, no need for sorting or fancy search + * algorithms, linear search will do. + * Protected by AppleGFXState's task_mutex. + */ + GPtrArray *mapped_regions; +}; + +static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len) +{ + mach_vm_address_t task_mem; + PGTask_t *task; + kern_return_t r; + + r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE); + if (r != KERN_SUCCESS) { + return NULL; + } + + task = g_new0(PGTask_t, 1); + task->s = s; + task->address = task_mem; + task->len = len; + task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */); + + QEMU_LOCK_GUARD(&s->task_mutex); + QTAILQ_INSERT_TAIL(&s->tasks, task, node); + + return task; +} + +static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task) +{ + GPtrArray *regions = task->mapped_regions; + MemoryRegion *region; + size_t i; + + for (i = 0; i < regions->len; ++i) { + region = g_ptr_array_index(regions, i); + memory_region_unref(region); + } + g_ptr_array_unref(regions); + + mach_vm_deallocate(mach_task_self(), task->address, task->len); + + QEMU_LOCK_GUARD(&s->task_mutex); + QTAILQ_REMOVE(&s->tasks, task, node); + g_free(task); +} + +void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical, + uint64_t length, bool read_only, + MemoryRegion **mapping_in_region) +{ + MemoryRegion *ram_region; + char *host_ptr; + hwaddr ram_region_offset = 0; + hwaddr ram_region_length = length; + + ram_region = address_space_translate(&address_space_memory, + guest_physical, + &ram_region_offset, + &ram_region_length, !read_only, + MEMTXATTRS_UNSPECIFIED); + + if (!ram_region || ram_region_length < length || + !memory_access_is_direct(ram_region, !read_only, + MEMTXATTRS_UNSPECIFIED)) { + return NULL; + } + + host_ptr = memory_region_get_ram_ptr(ram_region); + if (!host_ptr) { + return NULL; + } + host_ptr += ram_region_offset; + *mapping_in_region = ram_region; + return host_ptr; +} + +static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task, + uint64_t virtual_offset, + PGPhysicalMemoryRange_t *ranges, + uint32_t range_count, bool read_only) +{ + kern_return_t r; + void *source_ptr; + mach_vm_address_t target; + vm_prot_t cur_protection, max_protection; + bool success = true; + MemoryRegion *region; + + RCU_READ_LOCK_GUARD(); + QEMU_LOCK_GUARD(&s->task_mutex); + + trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only); + for (int i = 0; i < range_count; i++) { + PGPhysicalMemoryRange_t *range = &ranges[i]; + + target = task->address + virtual_offset; + virtual_offset += range->physicalLength; + + trace_apple_gfx_map_memory_range(i, range->physicalAddress, + range->physicalLength); + + region = NULL; + source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress, + range->physicalLength, + read_only, ®ion); + if (!source_ptr) { + success = false; + continue; + } + + if (!g_ptr_array_find(task->mapped_regions, region, NULL)) { + g_ptr_array_add(task->mapped_regions, region); + memory_region_ref(region); + } + + cur_protection = 0; + max_protection = 0; + /* Map guest RAM at range->physicalAddress into PG task memory range */ + r = mach_vm_remap(mach_task_self(), + &target, range->physicalLength, vm_page_size - 1, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + mach_task_self(), (mach_vm_address_t)source_ptr, + false /* shared mapping, no copy */, + &cur_protection, &max_protection, + VM_INHERIT_COPY); + trace_apple_gfx_remap(r, source_ptr, target); + g_assert(r == KERN_SUCCESS); + } + + return success; +} + +static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task, + uint64_t virtual_offset, uint64_t length) +{ + kern_return_t r; + mach_vm_address_t range_address; + + trace_apple_gfx_unmap_memory(task, virtual_offset, length); + + /* + * Replace task memory range with fresh 0 pages, undoing the mapping + * from guest RAM. + */ + range_address = task->address + virtual_offset; + r = mach_vm_allocate(mach_task_self(), &range_address, length, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE); + g_assert(r == KERN_SUCCESS); +} + +/* ------ Rendering and frame management ------ */ + +static void apple_gfx_render_frame_completed_bh(void *opaque); + +static void apple_gfx_render_new_frame(AppleGFXState *s) +{ + bool managed_texture = s->using_managed_texture_storage; + uint32_t width = surface_width(s->surface); + uint32_t height = surface_height(s->surface); + MTLRegion region = MTLRegionMake2D(0, 0, width, height); + id command_buffer = [s->mtl_queue commandBuffer]; + id texture = s->texture; + + assert(bql_locked()); + [texture retain]; + [command_buffer retain]; + + s->rendering_frame_width = width; + s->rendering_frame_height = height; + + dispatch_async(get_background_queue(), ^{ + /* + * This is not safe to call from the BQL/BH due to PVG-internal locks + * causing deadlocks. + */ + bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer + texture:texture + region:region]; + if (!r) { + [texture release]; + [command_buffer release]; + qemu_log_mask(LOG_GUEST_ERROR, + "%s: encodeCurrentFrameToCommandBuffer:texture:region: " + "failed\n", __func__); + bql_lock(); + --s->pending_frames; + if (s->pending_frames > 0) { + apple_gfx_render_new_frame(s); + } + bql_unlock(); + return; + } + + if (managed_texture) { + /* "Managed" textures exist in both VRAM and RAM and must be synced. */ + id blit = [command_buffer blitCommandEncoder]; + [blit synchronizeResource:texture]; + [blit endEncoding]; + } + [texture release]; + [command_buffer addCompletedHandler: + ^(id cb) + { + aio_bh_schedule_oneshot(qemu_get_aio_context(), + apple_gfx_render_frame_completed_bh, s); + }]; + [command_buffer commit]; + [command_buffer release]; + }); +} + +static void copy_mtl_texture_to_surface_mem(id texture, void *vram) +{ + /* + * TODO: Skip this entirely on a pure Metal or headless/guest-only + * rendering path, else use a blit command encoder? Needs careful + * (double?) buffering design. + */ + size_t width = texture.width, height = texture.height; + MTLRegion region = MTLRegionMake2D(0, 0, width, height); + [texture getBytes:vram + bytesPerRow:(width * 4) + bytesPerImage:(width * height * 4) + fromRegion:region + mipmapLevel:0 + slice:0]; +} + +static void apple_gfx_render_frame_completed_bh(void *opaque) +{ + AppleGFXState *s = opaque; + + @autoreleasepool { + --s->pending_frames; + assert(s->pending_frames >= 0); + + /* Only update display if mode hasn't changed since we started rendering. */ + if (s->rendering_frame_width == surface_width(s->surface) && + s->rendering_frame_height == surface_height(s->surface)) { + copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface)); + if (s->gfx_update_requested) { + s->gfx_update_requested = false; + dpy_gfx_update_full(s->con); + graphic_hw_update_done(s->con); + s->new_frame_ready = false; + } else { + s->new_frame_ready = true; + } + } + if (s->pending_frames > 0) { + apple_gfx_render_new_frame(s); + } + } +} + +static void apple_gfx_fb_update_display(void *opaque) +{ + AppleGFXState *s = opaque; + + assert(bql_locked()); + if (s->new_frame_ready) { + dpy_gfx_update_full(s->con); + s->new_frame_ready = false; + graphic_hw_update_done(s->con); + } else if (s->pending_frames > 0) { + s->gfx_update_requested = true; + } else { + graphic_hw_update_done(s->con); + } +} + +static const GraphicHwOps apple_gfx_fb_ops = { + .gfx_update = apple_gfx_fb_update_display, + .gfx_update_async = true, +}; + +/* ------ Mouse cursor and display mode setting ------ */ + +static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height) +{ + MTLTextureDescriptor *textureDescriptor; + + if (s->surface && + width == surface_width(s->surface) && + height == surface_height(s->surface)) { + return; + } + + [s->texture release]; + + s->surface = qemu_create_displaysurface(width, height); + + @autoreleasepool { + textureDescriptor = + [MTLTextureDescriptor + texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm + width:width + height:height + mipmapped:NO]; + textureDescriptor.usage = s->pgdisp.minimumTextureUsage; + s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor]; + s->using_managed_texture_storage = + (s->texture.storageMode == MTLStorageModeManaged); + } + + dpy_gfx_replace_surface(s->con, s->surface); +} + +static void update_cursor(AppleGFXState *s) +{ + assert(bql_locked()); + dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x, + s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show)); +} + +static void update_cursor_bh(void *opaque) +{ + AppleGFXState *s = opaque; + update_cursor(s); +} + +typedef struct AppleGFXSetCursorGlyphJob { + AppleGFXState *s; + NSBitmapImageRep *glyph; + PGDisplayCoord_t hotspot; +} AppleGFXSetCursorGlyphJob; + +static void set_cursor_glyph(void *opaque) +{ + AppleGFXSetCursorGlyphJob *job = opaque; + AppleGFXState *s = job->s; + NSBitmapImageRep *glyph = job->glyph; + uint32_t bpp = glyph.bitsPerPixel; + size_t width = glyph.pixelsWide; + size_t height = glyph.pixelsHigh; + size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4; + const uint8_t* px_data = glyph.bitmapData; + + trace_apple_gfx_cursor_set(bpp, width, height); + + if (s->cursor) { + cursor_unref(s->cursor); + s->cursor = NULL; + } + + if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */ + s->cursor = cursor_alloc(width, height); + s->cursor->hot_x = job->hotspot.x; + s->cursor->hot_y = job->hotspot.y; + + uint32_t *dest_px = s->cursor->data; + + for (size_t y = 0; y < height; ++y) { + for (size_t x = 0; x < width; ++x) { + /* + * NSBitmapImageRep's red & blue channels are swapped + * compared to QEMUCursor's. + */ + *dest_px = + (px_data[0] << 16u) | + (px_data[1] << 8u) | + (px_data[2] << 0u) | + (px_data[3] << 24u); + ++dest_px; + px_data += 4; + } + px_data += padding_bytes_per_row; + } + dpy_cursor_define(s->con, s->cursor); + update_cursor(s); + } + [glyph release]; + + g_free(job); +} + +/* ------ DMA (device reading system memory) ------ */ + +typedef struct AppleGFXReadMemoryJob { + QemuEvent event; + hwaddr physical_address; + uint64_t length; + void *dst; + bool success; +} AppleGFXReadMemoryJob; + +static void apple_gfx_do_read_memory(void *opaque) +{ + AppleGFXReadMemoryJob *job = opaque; + MemTxResult r; + + r = dma_memory_read(&address_space_memory, job->physical_address, + job->dst, job->length, MEMTXATTRS_UNSPECIFIED); + job->success = (r == MEMTX_OK); + + qemu_event_set(&job->event); +} + +static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address, + uint64_t length, void *dst) +{ + AppleGFXReadMemoryJob job = { + .physical_address = physical_address, .length = length, .dst = dst + }; + + trace_apple_gfx_read_memory(physical_address, length, dst); + + /* Performing DMA requires BQL, so do it in a BH. */ + qemu_event_init(&job.event, 0); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + apple_gfx_do_read_memory, &job); + qemu_event_wait(&job.event); + qemu_event_destroy(&job.event); + return job.success; +} + +/* ------ Memory-mapped device I/O operations ------ */ + +typedef struct AppleGFXIOJob { + AppleGFXState *state; + uint64_t offset; + uint64_t value; + bool completed; +} AppleGFXIOJob; + +static void apple_gfx_do_read(void *opaque) +{ + AppleGFXIOJob *job = opaque; + job->value = [job->state->pgdev mmioReadAtOffset:job->offset]; + qatomic_set(&job->completed, true); + aio_wait_kick(); +} + +static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size) +{ + AppleGFXIOJob job = { + .state = opaque, + .offset = offset, + .completed = false, + }; + dispatch_queue_t queue = get_background_queue(); + + dispatch_async_f(queue, &job, apple_gfx_do_read); + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); + + trace_apple_gfx_read(offset, job.value); + return job.value; +} + +static void apple_gfx_do_write(void *opaque) +{ + AppleGFXIOJob *job = opaque; + [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value]; + qatomic_set(&job->completed, true); + aio_wait_kick(); +} + +static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + /* + * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can + * trigger synchronous operations on other dispatch queues, which in turn + * may call back out on one or more of the callback blocks. For this reason, + * and as we are holding the BQL, we invoke the I/O methods on a pool + * thread and handle AIO tasks while we wait. Any work in the callbacks + * requiring the BQL will in turn schedule BHs which this thread will + * process while waiting. + */ + AppleGFXIOJob job = { + .state = opaque, + .offset = offset, + .value = val, + .completed = false, + }; + dispatch_queue_t queue = get_background_queue(); + + dispatch_async_f(queue, &job, apple_gfx_do_write); + AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed)); + + trace_apple_gfx_write(offset, val); +} + +static const MemoryRegionOps apple_gfx_ops = { + .read = apple_gfx_read, + .write = apple_gfx_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static size_t apple_gfx_get_default_mmio_range_size(void) +{ + size_t mmio_range_size; + @autoreleasepool { + PGDeviceDescriptor *desc = [PGDeviceDescriptor new]; + mmio_range_size = desc.mmioLength; + [desc release]; + } + return mmio_range_size; +} + +/* ------ Initialisation and startup ------ */ + +void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name) +{ + size_t mmio_range_size = apple_gfx_get_default_mmio_range_size(); + + trace_apple_gfx_common_init(obj_name, mmio_range_size); + memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name, + mmio_range_size); + + /* TODO: PVG framework supports serialising device state: integrate it! */ +} + +static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s, + PGDeviceDescriptor *desc) +{ + desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) { + PGTask_t *task = apple_gfx_new_task(s, vmSize); + *baseAddress = (void *)task->address; + trace_apple_gfx_create_task(vmSize, *baseAddress); + return task; + }; + + desc.destroyTask = ^(PGTask_t * _Nonnull task) { + trace_apple_gfx_destroy_task(task, task->mapped_regions->len); + + apple_gfx_destroy_task(s, task); + }; + + desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count, + uint64_t virtual_offset, bool read_only, + PGPhysicalMemoryRange_t * _Nonnull ranges) { + return apple_gfx_task_map_memory(s, task, virtual_offset, + ranges, range_count, read_only); + }; + + desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset, + uint64_t length) { + apple_gfx_task_unmap_memory(s, task, virtual_offset, length); + return true; + }; + + desc.readMemory = ^bool(uint64_t physical_address, uint64_t length, + void * _Nonnull dst) { + return apple_gfx_read_memory(s, physical_address, length, dst); + }; +} + +static void new_frame_handler_bh(void *opaque) +{ + AppleGFXState *s = opaque; + + /* Drop frames if guest gets too far ahead. */ + if (s->pending_frames >= 2) { + return; + } + ++s->pending_frames; + if (s->pending_frames > 1) { + return; + } + + @autoreleasepool { + apple_gfx_render_new_frame(s); + } +} + +static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s) +{ + PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new]; + + disp_desc.name = @"QEMU display"; + disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */ + disp_desc.queue = dispatch_get_main_queue(); + disp_desc.newFrameEventHandler = ^(void) { + trace_apple_gfx_new_frame(); + aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s); + }; + disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels, + OSType pixelFormat) { + trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y); + + BQL_LOCK_GUARD(); + set_mode(s, sizeInPixels.x, sizeInPixels.y); + }; + disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph, + PGDisplayCoord_t hotspot) { + AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job)); + job->s = s; + job->glyph = glyph; + job->hotspot = hotspot; + [glyph retain]; + aio_bh_schedule_oneshot(qemu_get_aio_context(), + set_cursor_glyph, job); + }; + disp_desc.cursorShowHandler = ^(BOOL show) { + trace_apple_gfx_cursor_show(show); + qatomic_set(&s->cursor_show, show); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + update_cursor_bh, s); + }; + disp_desc.cursorMoveHandler = ^(void) { + trace_apple_gfx_cursor_move(); + aio_bh_schedule_oneshot(qemu_get_aio_context(), + update_cursor_bh, s); + }; + + return disp_desc; +} + +static NSArray *apple_gfx_create_display_mode_array( + const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count) +{ + PGDisplayMode *mode_obj; + NSMutableArray *mode_array = + [[NSMutableArray alloc] initWithCapacity:display_mode_count]; + + for (unsigned i = 0; i < display_mode_count; i++) { + const AppleGFXDisplayMode *mode = &display_modes[i]; + trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px); + PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px }; + + mode_obj = + [[PGDisplayMode alloc] initWithSizeInPixels:mode_size + refreshRateInHz:mode->refresh_rate_hz]; + [mode_array addObject:mode_obj]; + [mode_obj release]; + } + + return mode_array; +} + +static id copy_suitable_metal_device(void) +{ + id dev = nil; + NSArray> *devs = MTLCopyAllDevices(); + + /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */ + for (size_t i = 0; i < devs.count; ++i) { + if (devs[i].hasUnifiedMemory) { + dev = devs[i]; + break; + } + if (!devs[i].removable) { + dev = devs[i]; + } + } + + if (dev != nil) { + [dev retain]; + } else { + dev = MTLCreateSystemDefaultDevice(); + } + [devs release]; + + return dev; +} + +bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev, + PGDeviceDescriptor *desc, Error **errp) +{ + PGDisplayDescriptor *disp_desc; + const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes; + uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes); + NSArray *mode_array; + + if (apple_gfx_mig_blocker == NULL) { + error_setg(&apple_gfx_mig_blocker, + "Migration state blocked by apple-gfx display device"); + if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) { + return false; + } + } + + qemu_mutex_init(&s->task_mutex); + QTAILQ_INIT(&s->tasks); + s->mtl = copy_suitable_metal_device(); + s->mtl_queue = [s->mtl newCommandQueue]; + + desc.device = s->mtl; + + apple_gfx_register_task_mapping_handlers(s, desc); + + s->cursor_show = true; + + s->pgdev = PGNewDeviceWithDescriptor(desc); + + disp_desc = apple_gfx_prepare_display_descriptor(s); + /* + * Although the framework does, this integration currently does not support + * multiple virtual displays connected to a single PV graphics device. + * It is however possible to create + * more than one instance of the device, each with one display. The macOS + * guest will ignore these displays if they share the same serial number, + * so ensure each instance gets a unique one. + */ + s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc + port:0 + serialNum:next_pgdisplay_serial_num++]; + [disp_desc release]; + + if (s->display_modes != NULL && s->num_display_modes > 0) { + trace_apple_gfx_common_realize_modes_property(s->num_display_modes); + display_modes = s->display_modes; + num_display_modes = s->num_display_modes; + } + s->pgdisp.modeList = mode_array = + apple_gfx_create_display_mode_array(display_modes, num_display_modes); + [mode_array release]; + + s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s); + return true; +} + +/* ------ Display mode list device property ------ */ + +static void apple_gfx_get_display_mode(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); + /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */ + char buffer[5 * 3 + 2 + 1]; + char *pos = buffer; + + int rc = snprintf(buffer, sizeof(buffer), + "%"PRIu16"x%"PRIu16"@%"PRIu16, + mode->width_px, mode->height_px, + mode->refresh_rate_hz); + assert(rc < sizeof(buffer)); + + visit_type_str(v, name, &pos, errp); +} + +static void apple_gfx_set_display_mode(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop); + const char *endptr; + g_autofree char *str = NULL; + int ret; + int val; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + endptr = str; + + ret = qemu_strtoi(endptr, &endptr, 10, &val); + if (ret || val > UINT16_MAX || val <= 0) { + error_setg(errp, "width in '%s' must be a decimal integer number" + " of pixels in the range 1..65535", name); + return; + } + mode->width_px = val; + if (*endptr != 'x') { + goto separator_error; + } + + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); + if (ret || val > UINT16_MAX || val <= 0) { + error_setg(errp, "height in '%s' must be a decimal integer number" + " of pixels in the range 1..65535", name); + return; + } + mode->height_px = val; + if (*endptr != '@') { + goto separator_error; + } + + ret = qemu_strtoi(endptr + 1, &endptr, 10, &val); + if (ret || val > UINT16_MAX || val <= 0) { + error_setg(errp, "refresh rate in '%s'" + " must be a positive decimal integer (Hertz)", name); + return; + } + mode->refresh_rate_hz = val; + return; + +separator_error: + error_setg(errp, + "Each display mode takes the format 'x@'"); +} + +const PropertyInfo qdev_prop_apple_gfx_display_mode = { + .type = "display_mode", + .description = + "Display mode in pixels and Hertz, as x@ " + "Example: 3840x2160@60", + .get = apple_gfx_get_display_mode, + .set = apple_gfx_set_display_mode, +}; diff --git a/hw/display/artist.c b/hw/display/artist.c index d9134532fb7..3c884c92437 100644 --- a/hw/display/artist.c +++ b/hw/display/artist.c @@ -12,6 +12,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qemu/units.h" +#include "qemu/bswap.h" #include "qapi/error.h" #include "hw/sysbus.h" #include "hw/loader.h" @@ -48,6 +49,7 @@ struct ARTISTState { struct vram_buffer vram_buffer[16]; + bool disable; uint16_t width; uint16_t height; uint16_t depth; @@ -1211,8 +1213,8 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size) break; case 0x380004: - /* 0x02000000 Buserror */ - val = 0x6dc20006; + /* magic number detected by SeaBIOS-hppa */ + val = s->disable ? 0 : 0x6dc20006; break; default: @@ -1432,7 +1434,7 @@ static int vmstate_artist_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_artist = { .name = "artist", - .version_id = 2, + .version_id = 3, .minimum_version_id = 2, .post_load = vmstate_artist_post_load, .fields = (const VMStateField[]) { @@ -1470,28 +1472,29 @@ static const VMStateDescription vmstate_artist = { VMSTATE_UINT32(font_write1, ARTISTState), VMSTATE_UINT32(font_write2, ARTISTState), VMSTATE_UINT32(font_write_pos_y, ARTISTState), + VMSTATE_BOOL(disable, ARTISTState), VMSTATE_END_OF_LIST() } }; -static Property artist_properties[] = { +static const Property artist_properties[] = { DEFINE_PROP_UINT16("width", ARTISTState, width, 1280), DEFINE_PROP_UINT16("height", ARTISTState, height, 1024), DEFINE_PROP_UINT16("depth", ARTISTState, depth, 8), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("disable", ARTISTState, disable, false), }; static void artist_reset(DeviceState *qdev) { } -static void artist_class_init(ObjectClass *klass, void *data) +static void artist_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = artist_realizefn; dc->vmsd = &vmstate_artist; - dc->reset = artist_reset; + device_class_set_legacy_reset(dc, artist_reset); device_class_set_props(dc, artist_properties); } diff --git a/hw/display/ati.c b/hw/display/ati.c index b1f94f5b767..f7c0006a879 100644 --- a/hw/display/ati.c +++ b/hw/display/ati.c @@ -22,6 +22,7 @@ #include "vga-access.h" #include "hw/qdev-properties.h" #include "vga_regs.h" +#include "qemu/bswap.h" #include "qemu/log.h" #include "qemu/module.h" #include "qemu/error-report.h" @@ -1039,7 +1040,7 @@ static void ati_vga_exit(PCIDevice *dev) graphic_console_close(s->vga.con); } -static Property ati_vga_properties[] = { +static const Property ati_vga_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", ATIVGAState, vga.vram_size_mb, 16), DEFINE_PROP_STRING("model", ATIVGAState, model), DEFINE_PROP_UINT16("x-device-id", ATIVGAState, dev_id, @@ -1047,15 +1048,14 @@ static Property ati_vga_properties[] = { DEFINE_PROP_BOOL("guest_hwcursor", ATIVGAState, cursor_guest_mode, false), /* this is a debug option, prefer PROP_UINT over PROP_BIT for simplicity */ DEFINE_PROP_UINT8("x-pixman", ATIVGAState, use_pixman, DEFAULT_X_PIXMAN), - DEFINE_PROP_END_OF_LIST() }; -static void ati_vga_class_init(ObjectClass *klass, void *data) +static void ati_vga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - dc->reset = ati_vga_reset; + device_class_set_legacy_reset(dc, ati_vga_reset); device_class_set_props(dc, ati_vga_properties); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); @@ -1080,7 +1080,7 @@ static const TypeInfo ati_vga_info = { .instance_size = sizeof(ATIVGAState), .class_init = ati_vga_class_init, .instance_init = ati_vga_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index 650db3da82c..820e67ac8bb 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -429,7 +429,7 @@ static void bcm2835_fb_realize(DeviceState *dev, Error **errp) qemu_console_resize(s->con, s->config.xres, s->config.yres); } -static Property bcm2835_fb_props[] = { +static const Property bcm2835_fb_props[] = { DEFINE_PROP_UINT32("vcram-base", BCM2835FBState, vcram_base, 0),/*required*/ DEFINE_PROP_UINT32("vcram-size", BCM2835FBState, vcram_size, DEFAULT_VCRAM_SIZE), @@ -440,16 +440,15 @@ static Property bcm2835_fb_props[] = { initial_config.pixo, 1), /* 1=RGB, 0=BGR */ DEFINE_PROP_UINT32("alpha", BCM2835FBState, initial_config.alpha, 2), /* alpha ignored */ - DEFINE_PROP_END_OF_LIST() }; -static void bcm2835_fb_class_init(ObjectClass *klass, void *data) +static void bcm2835_fb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, bcm2835_fb_props); dc->realize = bcm2835_fb_realize; - dc->reset = bcm2835_fb_reset; + device_class_set_legacy_reset(dc, bcm2835_fb_reset); dc->vmsd = &vmstate_bcm2835_fb; } diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c deleted file mode 100644 index 030abbe7d4d..00000000000 --- a/hw/display/blizzard.c +++ /dev/null @@ -1,1026 +0,0 @@ -/* - * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/bitops.h" -#include "ui/console.h" -#include "hw/display/blizzard.h" -#include "ui/pixel_ops.h" - -typedef void (*blizzard_fn_t)(uint8_t *, const uint8_t *, unsigned int); - -typedef struct { - uint8_t reg; - uint32_t addr; - int swallow; - - int pll; - int pll_range; - int pll_ctrl; - uint8_t pll_mode; - uint8_t clksel; - int memenable; - int memrefresh; - uint8_t timing[3]; - int priority; - - uint8_t lcd_config; - int x; - int y; - int skipx; - int skipy; - uint8_t hndp; - uint8_t vndp; - uint8_t hsync; - uint8_t vsync; - uint8_t pclk; - uint8_t u; - uint8_t v; - uint8_t yrc[2]; - int ix[2]; - int iy[2]; - int ox[2]; - int oy[2]; - - int enable; - int blank; - int bpp; - int invalidate; - int mx[2]; - int my[2]; - uint8_t mode; - uint8_t effect; - uint8_t iformat; - uint8_t source; - QemuConsole *con; - blizzard_fn_t *line_fn_tab[2]; - void *fb; - - uint8_t hssi_config[3]; - uint8_t tv_config; - uint8_t tv_timing[4]; - uint8_t vbi; - uint8_t tv_x; - uint8_t tv_y; - uint8_t tv_test; - uint8_t tv_filter_config; - uint8_t tv_filter_idx; - uint8_t tv_filter_coeff[0x20]; - uint8_t border_r; - uint8_t border_g; - uint8_t border_b; - uint8_t gamma_config; - uint8_t gamma_idx; - uint8_t gamma_lut[0x100]; - uint8_t matrix_ena; - uint8_t matrix_coeff[0x12]; - uint8_t matrix_r; - uint8_t matrix_g; - uint8_t matrix_b; - uint8_t pm; - uint8_t status; - uint8_t rgbgpio_dir; - uint8_t rgbgpio; - uint8_t gpio_dir; - uint8_t gpio; - uint8_t gpio_edge[2]; - uint8_t gpio_irq; - uint8_t gpio_pdown; - - struct { - int x; - int y; - int dx; - int dy; - int len; - int buflen; - void *buf; - void *data; - uint16_t *ptr; - int angle; - int pitch; - blizzard_fn_t line_fn; - } data; -} BlizzardState; - -/* Bytes(!) per pixel */ -static const int blizzard_iformat_bpp[0x10] = { - 0, - 2, /* RGB 5:6:5*/ - 3, /* RGB 6:6:6 mode 1 */ - 3, /* RGB 8:8:8 mode 1 */ - 0, 0, - 4, /* RGB 6:6:6 mode 2 */ - 4, /* RGB 8:8:8 mode 2 */ - 0, /* YUV 4:2:2 */ - 0, /* YUV 4:2:0 */ - 0, 0, 0, 0, 0, 0, -}; - -static void blizzard_window(BlizzardState *s) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - uint8_t *src, *dst; - int bypp[2]; - int bypl[3]; - int y; - blizzard_fn_t fn = s->data.line_fn; - - if (!fn) - return; - if (s->mx[0] > s->data.x) - s->mx[0] = s->data.x; - if (s->my[0] > s->data.y) - s->my[0] = s->data.y; - if (s->mx[1] < s->data.x + s->data.dx) - s->mx[1] = s->data.x + s->data.dx; - if (s->my[1] < s->data.y + s->data.dy) - s->my[1] = s->data.y + s->data.dy; - - bypp[0] = s->bpp; - bypp[1] = surface_bytes_per_pixel(surface); - bypl[0] = bypp[0] * s->data.pitch; - bypl[1] = bypp[1] * s->x; - bypl[2] = bypp[0] * s->data.dx; - - src = s->data.data; - dst = s->fb + bypl[1] * s->data.y + bypp[1] * s->data.x; - for (y = s->data.dy; y > 0; y --, src += bypl[0], dst += bypl[1]) - fn(dst, src, bypl[2]); -} - -static int blizzard_transfer_setup(BlizzardState *s) -{ - if (s->source > 3 || !s->bpp || - s->ix[1] < s->ix[0] || s->iy[1] < s->iy[0]) - return 0; - - s->data.angle = s->effect & 3; - s->data.line_fn = s->line_fn_tab[!!s->data.angle][s->iformat]; - s->data.x = s->ix[0]; - s->data.y = s->iy[0]; - s->data.dx = s->ix[1] - s->ix[0] + 1; - s->data.dy = s->iy[1] - s->iy[0] + 1; - s->data.len = s->bpp * s->data.dx * s->data.dy; - s->data.pitch = s->data.dx; - if (s->data.len > s->data.buflen) { - s->data.buf = g_realloc(s->data.buf, s->data.len); - s->data.buflen = s->data.len; - } - s->data.ptr = s->data.buf; - s->data.data = s->data.buf; - s->data.len /= 2; - return 1; -} - -static void blizzard_reset(BlizzardState *s) -{ - s->reg = 0; - s->swallow = 0; - - s->pll = 9; - s->pll_range = 1; - s->pll_ctrl = 0x14; - s->pll_mode = 0x32; - s->clksel = 0x00; - s->memenable = 0; - s->memrefresh = 0x25c; - s->timing[0] = 0x3f; - s->timing[1] = 0x13; - s->timing[2] = 0x21; - s->priority = 0; - - s->lcd_config = 0x74; - s->x = 8; - s->y = 1; - s->skipx = 0; - s->skipy = 0; - s->hndp = 3; - s->vndp = 2; - s->hsync = 1; - s->vsync = 1; - s->pclk = 0x80; - - s->ix[0] = 0; - s->ix[1] = 0; - s->iy[0] = 0; - s->iy[1] = 0; - s->ox[0] = 0; - s->ox[1] = 0; - s->oy[0] = 0; - s->oy[1] = 0; - - s->yrc[0] = 0x00; - s->yrc[1] = 0x30; - s->u = 0; - s->v = 0; - - s->iformat = 3; - s->source = 0; - s->bpp = blizzard_iformat_bpp[s->iformat]; - - s->hssi_config[0] = 0x00; - s->hssi_config[1] = 0x00; - s->hssi_config[2] = 0x01; - s->tv_config = 0x00; - s->tv_timing[0] = 0x00; - s->tv_timing[1] = 0x00; - s->tv_timing[2] = 0x00; - s->tv_timing[3] = 0x00; - s->vbi = 0x10; - s->tv_x = 0x14; - s->tv_y = 0x03; - s->tv_test = 0x00; - s->tv_filter_config = 0x80; - s->tv_filter_idx = 0x00; - s->border_r = 0x10; - s->border_g = 0x80; - s->border_b = 0x80; - s->gamma_config = 0x00; - s->gamma_idx = 0x00; - s->matrix_ena = 0x00; - memset(&s->matrix_coeff, 0, sizeof(s->matrix_coeff)); - s->matrix_r = 0x00; - s->matrix_g = 0x00; - s->matrix_b = 0x00; - s->pm = 0x02; - s->status = 0x00; - s->rgbgpio_dir = 0x00; - s->gpio_dir = 0x00; - s->gpio_edge[0] = 0x00; - s->gpio_edge[1] = 0x00; - s->gpio_irq = 0x00; - s->gpio_pdown = 0xff; -} - -static inline void blizzard_invalidate_display(void *opaque) { - BlizzardState *s = (BlizzardState *) opaque; - - s->invalidate = 1; -} - -static uint16_t blizzard_reg_read(void *opaque, uint8_t reg) -{ - BlizzardState *s = (BlizzardState *) opaque; - - switch (reg) { - case 0x00: /* Revision Code */ - return 0xa5; - - case 0x02: /* Configuration Readback */ - return 0x83; /* Macrovision OK, CNF[2:0] = 3 */ - - case 0x04: /* PLL M-Divider */ - return (s->pll - 1) | (1 << 7); - case 0x06: /* PLL Lock Range Control */ - return s->pll_range; - case 0x08: /* PLL Lock Synthesis Control 0 */ - return s->pll_ctrl & 0xff; - case 0x0a: /* PLL Lock Synthesis Control 1 */ - return s->pll_ctrl >> 8; - case 0x0c: /* PLL Mode Control 0 */ - return s->pll_mode; - - case 0x0e: /* Clock-Source Select */ - return s->clksel; - - case 0x10: /* Memory Controller Activate */ - case 0x14: /* Memory Controller Bank 0 Status Flag */ - return s->memenable; - - case 0x18: /* Auto-Refresh Interval Setting 0 */ - return s->memrefresh & 0xff; - case 0x1a: /* Auto-Refresh Interval Setting 1 */ - return s->memrefresh >> 8; - - case 0x1c: /* Power-On Sequence Timing Control */ - return s->timing[0]; - case 0x1e: /* Timing Control 0 */ - return s->timing[1]; - case 0x20: /* Timing Control 1 */ - return s->timing[2]; - - case 0x24: /* Arbitration Priority Control */ - return s->priority; - - case 0x28: /* LCD Panel Configuration */ - return s->lcd_config; - - case 0x2a: /* LCD Horizontal Display Width */ - return s->x >> 3; - case 0x2c: /* LCD Horizontal Non-display Period */ - return s->hndp; - case 0x2e: /* LCD Vertical Display Height 0 */ - return s->y & 0xff; - case 0x30: /* LCD Vertical Display Height 1 */ - return s->y >> 8; - case 0x32: /* LCD Vertical Non-display Period */ - return s->vndp; - case 0x34: /* LCD HS Pulse-width */ - return s->hsync; - case 0x36: /* LCd HS Pulse Start Position */ - return s->skipx >> 3; - case 0x38: /* LCD VS Pulse-width */ - return s->vsync; - case 0x3a: /* LCD VS Pulse Start Position */ - return s->skipy; - - case 0x3c: /* PCLK Polarity */ - return s->pclk; - - case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ - return s->hssi_config[0]; - case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ - return s->hssi_config[1]; - case 0x42: /* High-speed Serial Interface Tx Mode */ - return s->hssi_config[2]; - case 0x44: /* TV Display Configuration */ - return s->tv_config; - case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */ - return s->tv_timing[(reg - 0x46) >> 1]; - case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ - return s->vbi; - case 0x50: /* TV Horizontal Start Position */ - return s->tv_x; - case 0x52: /* TV Vertical Start Position */ - return s->tv_y; - case 0x54: /* TV Test Pattern Setting */ - return s->tv_test; - case 0x56: /* TV Filter Setting */ - return s->tv_filter_config; - case 0x58: /* TV Filter Coefficient Index */ - return s->tv_filter_idx; - case 0x5a: /* TV Filter Coefficient Data */ - if (s->tv_filter_idx < 0x20) - return s->tv_filter_coeff[s->tv_filter_idx ++]; - return 0; - - case 0x60: /* Input YUV/RGB Translate Mode 0 */ - return s->yrc[0]; - case 0x62: /* Input YUV/RGB Translate Mode 1 */ - return s->yrc[1]; - case 0x64: /* U Data Fix */ - return s->u; - case 0x66: /* V Data Fix */ - return s->v; - - case 0x68: /* Display Mode */ - return s->mode; - - case 0x6a: /* Special Effects */ - return s->effect; - - case 0x6c: /* Input Window X Start Position 0 */ - return s->ix[0] & 0xff; - case 0x6e: /* Input Window X Start Position 1 */ - return s->ix[0] >> 3; - case 0x70: /* Input Window Y Start Position 0 */ - return s->ix[0] & 0xff; - case 0x72: /* Input Window Y Start Position 1 */ - return s->ix[0] >> 3; - case 0x74: /* Input Window X End Position 0 */ - return s->ix[1] & 0xff; - case 0x76: /* Input Window X End Position 1 */ - return s->ix[1] >> 3; - case 0x78: /* Input Window Y End Position 0 */ - return s->ix[1] & 0xff; - case 0x7a: /* Input Window Y End Position 1 */ - return s->ix[1] >> 3; - case 0x7c: /* Output Window X Start Position 0 */ - return s->ox[0] & 0xff; - case 0x7e: /* Output Window X Start Position 1 */ - return s->ox[0] >> 3; - case 0x80: /* Output Window Y Start Position 0 */ - return s->oy[0] & 0xff; - case 0x82: /* Output Window Y Start Position 1 */ - return s->oy[0] >> 3; - case 0x84: /* Output Window X End Position 0 */ - return s->ox[1] & 0xff; - case 0x86: /* Output Window X End Position 1 */ - return s->ox[1] >> 3; - case 0x88: /* Output Window Y End Position 0 */ - return s->oy[1] & 0xff; - case 0x8a: /* Output Window Y End Position 1 */ - return s->oy[1] >> 3; - - case 0x8c: /* Input Data Format */ - return s->iformat; - case 0x8e: /* Data Source Select */ - return s->source; - case 0x90: /* Display Memory Data Port */ - return 0; - - case 0xa8: /* Border Color 0 */ - return s->border_r; - case 0xaa: /* Border Color 1 */ - return s->border_g; - case 0xac: /* Border Color 2 */ - return s->border_b; - - case 0xb4: /* Gamma Correction Enable */ - return s->gamma_config; - case 0xb6: /* Gamma Correction Table Index */ - return s->gamma_idx; - case 0xb8: /* Gamma Correction Table Data */ - return s->gamma_lut[s->gamma_idx ++]; - - case 0xba: /* 3x3 Matrix Enable */ - return s->matrix_ena; - case 0xbc ... 0xde: /* Coefficient Registers */ - return s->matrix_coeff[(reg - 0xbc) >> 1]; - case 0xe0: /* 3x3 Matrix Red Offset */ - return s->matrix_r; - case 0xe2: /* 3x3 Matrix Green Offset */ - return s->matrix_g; - case 0xe4: /* 3x3 Matrix Blue Offset */ - return s->matrix_b; - - case 0xe6: /* Power-save */ - return s->pm; - case 0xe8: /* Non-display Period Control / Status */ - return s->status | (1 << 5); - case 0xea: /* RGB Interface Control */ - return s->rgbgpio_dir; - case 0xec: /* RGB Interface Status */ - return s->rgbgpio; - case 0xee: /* General-purpose IO Pins Configuration */ - return s->gpio_dir; - case 0xf0: /* General-purpose IO Pins Status / Control */ - return s->gpio; - case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ - return s->gpio_edge[0]; - case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ - return s->gpio_edge[1]; - case 0xf6: /* GPIO Interrupt Status */ - return s->gpio_irq; - case 0xf8: /* GPIO Pull-down Control */ - return s->gpio_pdown; - - default: - fprintf(stderr, "%s: unknown register %02x\n", __func__, reg); - return 0; - } -} - -static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) -{ - BlizzardState *s = (BlizzardState *) opaque; - - switch (reg) { - case 0x04: /* PLL M-Divider */ - s->pll = (value & 0x3f) + 1; - break; - case 0x06: /* PLL Lock Range Control */ - s->pll_range = value & 3; - break; - case 0x08: /* PLL Lock Synthesis Control 0 */ - s->pll_ctrl &= 0xf00; - s->pll_ctrl |= (value << 0) & 0x0ff; - break; - case 0x0a: /* PLL Lock Synthesis Control 1 */ - s->pll_ctrl &= 0x0ff; - s->pll_ctrl |= (value << 8) & 0xf00; - break; - case 0x0c: /* PLL Mode Control 0 */ - s->pll_mode = value & 0x77; - if ((value & 3) == 0 || (value & 3) == 3) - fprintf(stderr, "%s: wrong PLL Control bits (%i)\n", - __func__, value & 3); - break; - - case 0x0e: /* Clock-Source Select */ - s->clksel = value & 0xff; - break; - - case 0x10: /* Memory Controller Activate */ - s->memenable = value & 1; - break; - case 0x14: /* Memory Controller Bank 0 Status Flag */ - break; - - case 0x18: /* Auto-Refresh Interval Setting 0 */ - s->memrefresh &= 0xf00; - s->memrefresh |= (value << 0) & 0x0ff; - break; - case 0x1a: /* Auto-Refresh Interval Setting 1 */ - s->memrefresh &= 0x0ff; - s->memrefresh |= (value << 8) & 0xf00; - break; - - case 0x1c: /* Power-On Sequence Timing Control */ - s->timing[0] = value & 0x7f; - break; - case 0x1e: /* Timing Control 0 */ - s->timing[1] = value & 0x17; - break; - case 0x20: /* Timing Control 1 */ - s->timing[2] = value & 0x35; - break; - - case 0x24: /* Arbitration Priority Control */ - s->priority = value & 1; - break; - - case 0x28: /* LCD Panel Configuration */ - s->lcd_config = value & 0xff; - if (value & (1 << 7)) - fprintf(stderr, "%s: data swap not supported!\n", __func__); - break; - - case 0x2a: /* LCD Horizontal Display Width */ - s->x = value << 3; - break; - case 0x2c: /* LCD Horizontal Non-display Period */ - s->hndp = value & 0xff; - break; - case 0x2e: /* LCD Vertical Display Height 0 */ - s->y &= 0x300; - s->y |= (value << 0) & 0x0ff; - break; - case 0x30: /* LCD Vertical Display Height 1 */ - s->y &= 0x0ff; - s->y |= (value << 8) & 0x300; - break; - case 0x32: /* LCD Vertical Non-display Period */ - s->vndp = value & 0xff; - break; - case 0x34: /* LCD HS Pulse-width */ - s->hsync = value & 0xff; - break; - case 0x36: /* LCD HS Pulse Start Position */ - s->skipx = value & 0xff; - break; - case 0x38: /* LCD VS Pulse-width */ - s->vsync = value & 0xbf; - break; - case 0x3a: /* LCD VS Pulse Start Position */ - s->skipy = value & 0xff; - break; - - case 0x3c: /* PCLK Polarity */ - s->pclk = value & 0x82; - /* Affects calculation of s->hndp, s->hsync and s->skipx. */ - break; - - case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ - s->hssi_config[0] = value; - break; - case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ - s->hssi_config[1] = value; - if (((value >> 4) & 3) == 3) - fprintf(stderr, "%s: Illegal active-data-links value\n", - __func__); - break; - case 0x42: /* High-speed Serial Interface Tx Mode */ - s->hssi_config[2] = value & 0xbd; - break; - - case 0x44: /* TV Display Configuration */ - s->tv_config = value & 0xfe; - break; - case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */ - s->tv_timing[(reg - 0x46) >> 1] = value; - break; - case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ - s->vbi = value; - break; - case 0x50: /* TV Horizontal Start Position */ - s->tv_x = value; - break; - case 0x52: /* TV Vertical Start Position */ - s->tv_y = value & 0x7f; - break; - case 0x54: /* TV Test Pattern Setting */ - s->tv_test = value; - break; - case 0x56: /* TV Filter Setting */ - s->tv_filter_config = value & 0xbf; - break; - case 0x58: /* TV Filter Coefficient Index */ - s->tv_filter_idx = value & 0x1f; - break; - case 0x5a: /* TV Filter Coefficient Data */ - if (s->tv_filter_idx < 0x20) - s->tv_filter_coeff[s->tv_filter_idx ++] = value; - break; - - case 0x60: /* Input YUV/RGB Translate Mode 0 */ - s->yrc[0] = value & 0xb0; - break; - case 0x62: /* Input YUV/RGB Translate Mode 1 */ - s->yrc[1] = value & 0x30; - break; - case 0x64: /* U Data Fix */ - s->u = value & 0xff; - break; - case 0x66: /* V Data Fix */ - s->v = value & 0xff; - break; - - case 0x68: /* Display Mode */ - if ((s->mode ^ value) & 3) - s->invalidate = 1; - s->mode = value & 0xb7; - s->enable = value & 1; - s->blank = (value >> 1) & 1; - if (value & (1 << 4)) - fprintf(stderr, "%s: Macrovision enable attempt!\n", __func__); - break; - - case 0x6a: /* Special Effects */ - s->effect = value & 0xfb; - break; - - case 0x6c: /* Input Window X Start Position 0 */ - s->ix[0] &= 0x300; - s->ix[0] |= (value << 0) & 0x0ff; - break; - case 0x6e: /* Input Window X Start Position 1 */ - s->ix[0] &= 0x0ff; - s->ix[0] |= (value << 8) & 0x300; - break; - case 0x70: /* Input Window Y Start Position 0 */ - s->iy[0] &= 0x300; - s->iy[0] |= (value << 0) & 0x0ff; - break; - case 0x72: /* Input Window Y Start Position 1 */ - s->iy[0] &= 0x0ff; - s->iy[0] |= (value << 8) & 0x300; - break; - case 0x74: /* Input Window X End Position 0 */ - s->ix[1] &= 0x300; - s->ix[1] |= (value << 0) & 0x0ff; - break; - case 0x76: /* Input Window X End Position 1 */ - s->ix[1] &= 0x0ff; - s->ix[1] |= (value << 8) & 0x300; - break; - case 0x78: /* Input Window Y End Position 0 */ - s->iy[1] &= 0x300; - s->iy[1] |= (value << 0) & 0x0ff; - break; - case 0x7a: /* Input Window Y End Position 1 */ - s->iy[1] &= 0x0ff; - s->iy[1] |= (value << 8) & 0x300; - break; - case 0x7c: /* Output Window X Start Position 0 */ - s->ox[0] &= 0x300; - s->ox[0] |= (value << 0) & 0x0ff; - break; - case 0x7e: /* Output Window X Start Position 1 */ - s->ox[0] &= 0x0ff; - s->ox[0] |= (value << 8) & 0x300; - break; - case 0x80: /* Output Window Y Start Position 0 */ - s->oy[0] &= 0x300; - s->oy[0] |= (value << 0) & 0x0ff; - break; - case 0x82: /* Output Window Y Start Position 1 */ - s->oy[0] &= 0x0ff; - s->oy[0] |= (value << 8) & 0x300; - break; - case 0x84: /* Output Window X End Position 0 */ - s->ox[1] &= 0x300; - s->ox[1] |= (value << 0) & 0x0ff; - break; - case 0x86: /* Output Window X End Position 1 */ - s->ox[1] &= 0x0ff; - s->ox[1] |= (value << 8) & 0x300; - break; - case 0x88: /* Output Window Y End Position 0 */ - s->oy[1] &= 0x300; - s->oy[1] |= (value << 0) & 0x0ff; - break; - case 0x8a: /* Output Window Y End Position 1 */ - s->oy[1] &= 0x0ff; - s->oy[1] |= (value << 8) & 0x300; - break; - - case 0x8c: /* Input Data Format */ - s->iformat = value & 0xf; - s->bpp = blizzard_iformat_bpp[s->iformat]; - if (!s->bpp) - fprintf(stderr, "%s: Illegal or unsupported input format %x\n", - __func__, s->iformat); - break; - case 0x8e: /* Data Source Select */ - s->source = value & 7; - /* Currently all windows will be "destructive overlays". */ - if ((!(s->effect & (1 << 3)) && (s->ix[0] != s->ox[0] || - s->iy[0] != s->oy[0] || - s->ix[1] != s->ox[1] || - s->iy[1] != s->oy[1])) || - !((s->ix[1] - s->ix[0]) & (s->iy[1] - s->iy[0]) & - (s->ox[1] - s->ox[0]) & (s->oy[1] - s->oy[0]) & 1)) - fprintf(stderr, "%s: Illegal input/output window positions\n", - __func__); - - blizzard_transfer_setup(s); - break; - - case 0x90: /* Display Memory Data Port */ - if (!s->data.len && !blizzard_transfer_setup(s)) - break; - - *s->data.ptr ++ = value; - if (-- s->data.len == 0) - blizzard_window(s); - break; - - case 0xa8: /* Border Color 0 */ - s->border_r = value; - break; - case 0xaa: /* Border Color 1 */ - s->border_g = value; - break; - case 0xac: /* Border Color 2 */ - s->border_b = value; - break; - - case 0xb4: /* Gamma Correction Enable */ - s->gamma_config = value & 0x87; - break; - case 0xb6: /* Gamma Correction Table Index */ - s->gamma_idx = value; - break; - case 0xb8: /* Gamma Correction Table Data */ - s->gamma_lut[s->gamma_idx ++] = value; - break; - - case 0xba: /* 3x3 Matrix Enable */ - s->matrix_ena = value & 1; - break; - case 0xbc ... 0xde: /* Coefficient Registers */ - s->matrix_coeff[(reg - 0xbc) >> 1] = value & ((reg & 2) ? 0x80 : 0xff); - break; - case 0xe0: /* 3x3 Matrix Red Offset */ - s->matrix_r = value; - break; - case 0xe2: /* 3x3 Matrix Green Offset */ - s->matrix_g = value; - break; - case 0xe4: /* 3x3 Matrix Blue Offset */ - s->matrix_b = value; - break; - - case 0xe6: /* Power-save */ - s->pm = value & 0x83; - if (value & s->mode & 1) - fprintf(stderr, "%s: The display must be disabled before entering " - "Standby Mode\n", __func__); - break; - case 0xe8: /* Non-display Period Control / Status */ - s->status = value & 0x1b; - break; - case 0xea: /* RGB Interface Control */ - s->rgbgpio_dir = value & 0x8f; - break; - case 0xec: /* RGB Interface Status */ - s->rgbgpio = value & 0xcf; - break; - case 0xee: /* General-purpose IO Pins Configuration */ - s->gpio_dir = value; - break; - case 0xf0: /* General-purpose IO Pins Status / Control */ - s->gpio = value; - break; - case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ - s->gpio_edge[0] = value; - break; - case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ - s->gpio_edge[1] = value; - break; - case 0xf6: /* GPIO Interrupt Status */ - s->gpio_irq &= value; - break; - case 0xf8: /* GPIO Pull-down Control */ - s->gpio_pdown = value; - break; - - default: - fprintf(stderr, "%s: unknown register %02x\n", __func__, reg); - break; - } -} - -uint16_t s1d13745_read(void *opaque, int dc) -{ - BlizzardState *s = (BlizzardState *) opaque; - uint16_t value = blizzard_reg_read(s, s->reg); - - if (s->swallow -- > 0) - return 0; - if (dc) - s->reg ++; - - return value; -} - -void s1d13745_write(void *opaque, int dc, uint16_t value) -{ - BlizzardState *s = (BlizzardState *) opaque; - - if (s->swallow -- > 0) - return; - if (dc) { - blizzard_reg_write(s, s->reg, value); - - if (s->reg != 0x90 && s->reg != 0x5a && s->reg != 0xb8) - s->reg += 2; - } else - s->reg = value & 0xff; -} - -void s1d13745_write_block(void *opaque, int dc, - void *buf, size_t len, int pitch) -{ - BlizzardState *s = (BlizzardState *) opaque; - - while (len > 0) { - if (s->reg == 0x90 && dc && - (s->data.len || blizzard_transfer_setup(s)) && - len >= (s->data.len << 1)) { - len -= s->data.len << 1; - s->data.len = 0; - s->data.data = buf; - if (pitch) - s->data.pitch = pitch; - blizzard_window(s); - s->data.data = s->data.buf; - continue; - } - - s1d13745_write(opaque, dc, *(uint16_t *) buf); - len -= 2; - buf += 2; - } -} - -static void blizzard_update_display(void *opaque) -{ - BlizzardState *s = (BlizzardState *) opaque; - DisplaySurface *surface = qemu_console_surface(s->con); - int y, bypp, bypl, bwidth; - uint8_t *src, *dst; - - if (!s->enable) - return; - - if (s->x != surface_width(surface) || s->y != surface_height(surface)) { - s->invalidate = 1; - qemu_console_resize(s->con, s->x, s->y); - surface = qemu_console_surface(s->con); - } - - if (s->invalidate) { - s->invalidate = 0; - - if (s->blank) { - bypp = surface_bytes_per_pixel(surface); - memset(surface_data(surface), 0, bypp * s->x * s->y); - return; - } - - s->mx[0] = 0; - s->mx[1] = s->x; - s->my[0] = 0; - s->my[1] = s->y; - } - - if (s->mx[1] <= s->mx[0]) - return; - - bypp = surface_bytes_per_pixel(surface); - bypl = bypp * s->x; - bwidth = bypp * (s->mx[1] - s->mx[0]); - y = s->my[0]; - src = s->fb + bypl * y + bypp * s->mx[0]; - dst = surface_data(surface) + bypl * y + bypp * s->mx[0]; - for (; y < s->my[1]; y ++, src += bypl, dst += bypl) - memcpy(dst, src, bwidth); - - dpy_gfx_update(s->con, s->mx[0], s->my[0], - s->mx[1] - s->mx[0], y - s->my[0]); - - s->mx[0] = s->x; - s->mx[1] = 0; - s->my[0] = s->y; - s->my[1] = 0; -} - -static void blizzard_draw_line16_32(uint32_t *dest, - const uint16_t *src, unsigned int width) -{ - uint16_t data; - unsigned int r, g, b; - const uint16_t *end = (const void *) src + width; - while (src < end) { - data = *src ++; - b = extract16(data, 0, 5) << 3; - g = extract16(data, 5, 6) << 2; - r = extract16(data, 11, 5) << 3; - *dest++ = rgb_to_pixel32(r, g, b); - } -} - -static void blizzard_draw_line24mode1_32(uint32_t *dest, - const uint8_t *src, unsigned int width) -{ - /* TODO: check if SDL 24-bit planes are not in the same format and - * if so, use memcpy */ - unsigned int r[2], g[2], b[2]; - const uint8_t *end = src + width; - while (src < end) { - g[0] = *src ++; - r[0] = *src ++; - r[1] = *src ++; - b[0] = *src ++; - *dest++ = rgb_to_pixel32(r[0], g[0], b[0]); - b[1] = *src ++; - g[1] = *src ++; - *dest++ = rgb_to_pixel32(r[1], g[1], b[1]); - } -} - -static void blizzard_draw_line24mode2_32(uint32_t *dest, - const uint8_t *src, unsigned int width) -{ - unsigned int r, g, b; - const uint8_t *end = src + width; - while (src < end) { - r = *src ++; - src ++; - b = *src ++; - g = *src ++; - *dest++ = rgb_to_pixel32(r, g, b); - } -} - -/* No rotation */ -static blizzard_fn_t blizzard_draw_fn_32[0x10] = { - NULL, - /* RGB 5:6:5*/ - (blizzard_fn_t) blizzard_draw_line16_32, - /* RGB 6:6:6 mode 1 */ - (blizzard_fn_t) blizzard_draw_line24mode1_32, - /* RGB 8:8:8 mode 1 */ - (blizzard_fn_t) blizzard_draw_line24mode1_32, - NULL, NULL, - /* RGB 6:6:6 mode 2 */ - (blizzard_fn_t) blizzard_draw_line24mode2_32, - /* RGB 8:8:8 mode 2 */ - (blizzard_fn_t) blizzard_draw_line24mode2_32, - /* YUV 4:2:2 */ - NULL, - /* YUV 4:2:0 */ - NULL, - NULL, NULL, NULL, NULL, NULL, NULL, -}; - -/* 90deg, 180deg and 270deg rotation */ -static blizzard_fn_t blizzard_draw_fn_r_32[0x10] = { - /* TODO */ - [0 ... 0xf] = NULL, -}; - -static const GraphicHwOps blizzard_ops = { - .invalidate = blizzard_invalidate_display, - .gfx_update = blizzard_update_display, -}; - -void *s1d13745_init(qemu_irq gpio_int) -{ - BlizzardState *s = g_malloc0(sizeof(*s)); - DisplaySurface *surface; - - s->fb = g_malloc(0x180000); - - s->con = graphic_console_init(NULL, 0, &blizzard_ops, s); - surface = qemu_console_surface(s->con); - - assert(surface_bits_per_pixel(surface) == 32); - - s->line_fn_tab[0] = blizzard_draw_fn_32; - s->line_fn_tab[1] = blizzard_draw_fn_r_32; - - blizzard_reset(s); - - return s; -} diff --git a/hw/display/bochs-display.c b/hw/display/bochs-display.c index 3b1d922b6ea..ad2821c9745 100644 --- a/hw/display/bochs-display.c +++ b/hw/display/bochs-display.c @@ -345,14 +345,13 @@ static void bochs_display_exit(PCIDevice *dev) graphic_console_close(s->con); } -static Property bochs_display_properties[] = { +static const Property bochs_display_properties[] = { DEFINE_PROP_SIZE("vgamem", BochsDisplayState, vgamem, 16 * MiB), DEFINE_PROP_BOOL("edid", BochsDisplayState, enable_edid, true), DEFINE_EDID_PROPERTIES(BochsDisplayState, edid_info), - DEFINE_PROP_END_OF_LIST(), }; -static void bochs_display_class_init(ObjectClass *klass, void *data) +static void bochs_display_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -375,7 +374,7 @@ static const TypeInfo bochs_display_type_info = { .instance_size = sizeof(BochsDisplayState), .instance_init = bochs_display_init, .class_init = bochs_display_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, diff --git a/hw/display/cg3.c b/hw/display/cg3.c index b271faaa484..daeef152174 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -361,20 +361,19 @@ static void cg3_reset(DeviceState *d) qemu_irq_lower(s->irq); } -static Property cg3_properties[] = { +static const Property cg3_properties[] = { DEFINE_PROP_UINT32("vram-size", CG3State, vram_size, -1), DEFINE_PROP_UINT16("width", CG3State, width, -1), DEFINE_PROP_UINT16("height", CG3State, height, -1), DEFINE_PROP_UINT16("depth", CG3State, depth, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void cg3_class_init(ObjectClass *klass, void *data) +static void cg3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cg3_realizefn; - dc->reset = cg3_reset; + device_class_set_legacy_reset(dc, cg3_reset); dc->vmsd = &vmstate_cg3; device_class_set_props(dc, cg3_properties); } diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c index 150883a9716..ef08694626d 100644 --- a/hw/display/cirrus_vga.c +++ b/hw/display/cirrus_vga.c @@ -36,7 +36,7 @@ #include "qemu/module.h" #include "qemu/units.h" #include "qemu/log.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qapi/error.h" #include "trace.h" #include "hw/pci/pci_device.h" @@ -2982,17 +2982,16 @@ static void pci_cirrus_vga_realize(PCIDevice *dev, Error **errp) } } -static Property pci_vga_cirrus_properties[] = { +static const Property pci_vga_cirrus_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", struct PCICirrusVGAState, cirrus_vga.vga.vram_size_mb, 4), DEFINE_PROP_BOOL("blitter", struct PCICirrusVGAState, cirrus_vga.enable_blitter, true), DEFINE_PROP_BOOL("global-vmstate", struct PCICirrusVGAState, cirrus_vga.vga.global_vmstate, false), - DEFINE_PROP_END_OF_LIST(), }; -static void cirrus_vga_class_init(ObjectClass *klass, void *data) +static void cirrus_vga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -3014,7 +3013,7 @@ static const TypeInfo cirrus_vga_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCICirrusVGAState), .class_init = cirrus_vga_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c index 84be51670ed..4b55c48eff8 100644 --- a/hw/display/cirrus_vga_isa.c +++ b/hw/display/cirrus_vga_isa.c @@ -69,15 +69,14 @@ static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp) /* FIXME not qdev yet */ } -static Property isa_cirrus_vga_properties[] = { +static const Property isa_cirrus_vga_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", struct ISACirrusVGAState, cirrus_vga.vga.vram_size_mb, 4), DEFINE_PROP_BOOL("blitter", struct ISACirrusVGAState, cirrus_vga.enable_blitter, true), - DEFINE_PROP_END_OF_LIST(), }; -static void isa_cirrus_vga_class_init(ObjectClass *klass, void *data) +static void isa_cirrus_vga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/display/dm163.c b/hw/display/dm163.c index f92aee371d9..f8340d8275e 100644 --- a/hw/display/dm163.c +++ b/hw/display/dm163.c @@ -271,7 +271,7 @@ static uint32_t *update_display_of_row(DM163State *s, uint32_t *dest, unsigned row) { for (unsigned _ = 0; _ < LED_SQUARE_SIZE; _++) { - for (int x = 0; x < RGB_MATRIX_NUM_COLS * LED_SQUARE_SIZE; x++) { + for (int x = RGB_MATRIX_NUM_COLS * LED_SQUARE_SIZE - 1; x >= 0; x--) { /* UI layer guarantees that there's 32 bits per pixel (Mar 2024) */ *dest++ = s->buffer[s->buffer_idx_of_row[row]][x / LED_SQUARE_SIZE]; } @@ -325,12 +325,12 @@ static void dm163_realize(DeviceState *dev, Error **errp) RGB_MATRIX_NUM_ROWS * LED_SQUARE_SIZE); } -static void dm163_class_init(ObjectClass *klass, void *data) +static void dm163_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); - dc->desc = "DM163"; + dc->desc = "DM163 8x3-channel constant current LED driver"; dc->vmsd = &vmstate_dm163; dc->realize = dm163_realize; rc->phases.hold = dm163_reset_hold; diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c index aab1b1a2d7f..a157dc64e76 100644 --- a/hw/display/dpcd.c +++ b/hw/display/dpcd.c @@ -141,11 +141,11 @@ static const VMStateDescription vmstate_dpcd = { } }; -static void dpcd_class_init(ObjectClass *oc, void *data) +static void dpcd_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - dc->reset = dpcd_reset; + device_class_set_legacy_reset(dc, dpcd_reset); dc->vmsd = &vmstate_dpcd; } diff --git a/hw/display/edid-region.c b/hw/display/edid-region.c index 675429dc18b..f1596fba9a9 100644 --- a/hw/display/edid-region.c +++ b/hw/display/edid-region.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/display/edid.h" static uint64_t edid_region_read(void *ptr, hwaddr addr, unsigned size) diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c index 5712558e13d..c61e0280a7c 100644 --- a/hw/display/exynos4210_fimd.c +++ b/hw/display/exynos4210_fimd.c @@ -1925,10 +1925,9 @@ static const GraphicHwOps exynos4210_fimd_ops = { .gfx_update = exynos4210_fimd_update, }; -static Property exynos4210_fimd_properties[] = { +static const Property exynos4210_fimd_properties[] = { DEFINE_PROP_LINK("framebuffer-memory", Exynos4210fimdState, fbmem, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static void exynos4210_fimd_init(Object *obj) @@ -1959,12 +1958,12 @@ static void exynos4210_fimd_realize(DeviceState *dev, Error **errp) s->console = graphic_console_init(dev, 0, &exynos4210_fimd_ops, s); } -static void exynos4210_fimd_class_init(ObjectClass *klass, void *data) +static void exynos4210_fimd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &exynos4210_fimd_vmstate; - dc->reset = exynos4210_fimd_reset; + device_class_set_legacy_reset(dc, exynos4210_fimd_reset); dc->realize = exynos4210_fimd_realize; device_class_set_props(dc, exynos4210_fimd_properties); } diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c index 4485aa335bb..b4296e8a33e 100644 --- a/hw/display/framebuffer.c +++ b/hw/display/framebuffer.c @@ -95,9 +95,9 @@ void framebuffer_update_display( } first = -1; - addr += i * src_width; - src += i * src_width; - dest += i * dest_row_pitch; + addr += (uint64_t)i * src_width; + src += (uint64_t)i * src_width; + dest += (uint64_t)i * dest_row_pitch; snap = memory_region_snapshot_and_clear_dirty(mem, addr, src_width * rows, DIRTY_MEMORY_VGA); diff --git a/hw/display/framebuffer.h b/hw/display/framebuffer.h index 38fa0dcec61..29a828ce7a3 100644 --- a/hw/display/framebuffer.h +++ b/hw/display/framebuffer.h @@ -1,7 +1,7 @@ #ifndef QEMU_FRAMEBUFFER_H #define QEMU_FRAMEBUFFER_H -#include "exec/memory.h" +#include "system/memory.h" /* Framebuffer device helper routines. */ diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c index e08ec3f8de4..a6ddc21d3e4 100644 --- a/hw/display/g364fb.c +++ b/hw/display/g364fb.c @@ -512,9 +512,8 @@ static void g364fb_sysbus_reset(DeviceState *d) g364fb_reset(&s->g364); } -static Property g364fb_sysbus_properties[] = { +static const Property g364fb_sysbus_properties[] = { DEFINE_PROP_UINT32("vram_size", G364SysBusState, g364.vram_size, 8 * MiB), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_g364fb_sysbus = { @@ -527,14 +526,14 @@ static const VMStateDescription vmstate_g364fb_sysbus = { } }; -static void g364fb_sysbus_class_init(ObjectClass *klass, void *data) +static void g364fb_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = g364fb_sysbus_realize; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->desc = "G364 framebuffer"; - dc->reset = g364fb_sysbus_reset; + device_class_set_legacy_reset(dc, g364fb_sysbus_reset); dc->vmsd = &vmstate_g364fb_sysbus; device_class_set_props(dc, g364fb_sysbus_properties); } diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c index 3f9d1e1f6fe..2adfc1a1472 100644 --- a/hw/display/i2c-ddc.c +++ b/hw/display/i2c-ddc.c @@ -95,17 +95,16 @@ static const VMStateDescription vmstate_i2c_ddc = { } }; -static Property i2c_ddc_properties[] = { +static const Property i2c_ddc_properties[] = { DEFINE_EDID_PROPERTIES(I2CDDCState, edid_info), - DEFINE_PROP_END_OF_LIST(), }; -static void i2c_ddc_class_init(ObjectClass *oc, void *data) +static void i2c_ddc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); - dc->reset = i2c_ddc_reset; + device_class_set_legacy_reset(dc, i2c_ddc_reset); dc->vmsd = &vmstate_i2c_ddc; device_class_set_props(dc, i2c_ddc_properties); isc->event = i2c_ddc_event; diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c index 534f15dcfd4..90e82b58be2 100644 --- a/hw/display/jazz_led.c +++ b/hw/display/jazz_led.c @@ -294,13 +294,13 @@ static void jazz_led_reset(DeviceState *d) qemu_console_resize(s->con, 60, 80); } -static void jazz_led_class_init(ObjectClass *klass, void *data) +static void jazz_led_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "Jazz LED display", dc->vmsd = &vmstate_jazz_led; - dc->reset = jazz_led_reset; + device_class_set_legacy_reset(dc, jazz_led_reset); dc->realize = jazz_led_realize; } diff --git a/hw/display/macfb.c b/hw/display/macfb.c index 1ace341a0ff..574d667173c 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -383,7 +383,6 @@ static void macfb_sense_write(MacfbState *s, uint32_t val) s->regs[DAFB_MODE_SENSE >> 2] = val; trace_macfb_sense_write(val); - return; } static void macfb_update_mode(MacfbState *s) @@ -758,13 +757,12 @@ static void macfb_nubus_reset(DeviceState *d) macfb_reset(&s->macfb); } -static Property macfb_sysbus_properties[] = { +static const Property macfb_sysbus_properties[] = { DEFINE_PROP_UINT32("width", MacfbSysBusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbSysBusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbSysBusState, macfb.depth, 8), DEFINE_PROP_UINT8("display", MacfbSysBusState, macfb.type, MACFB_DISPLAY_VGA), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_macfb_sysbus = { @@ -777,13 +775,12 @@ static const VMStateDescription vmstate_macfb_sysbus = { } }; -static Property macfb_nubus_properties[] = { +static const Property macfb_nubus_properties[] = { DEFINE_PROP_UINT32("width", MacfbNubusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbNubusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbNubusState, macfb.depth, 8), DEFINE_PROP_UINT8("display", MacfbNubusState, macfb.type, MACFB_DISPLAY_VGA), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_macfb_nubus = { @@ -796,18 +793,18 @@ static const VMStateDescription vmstate_macfb_nubus = { } }; -static void macfb_sysbus_class_init(ObjectClass *klass, void *data) +static void macfb_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = macfb_sysbus_realize; dc->desc = "SysBus Macintosh framebuffer"; - dc->reset = macfb_sysbus_reset; + device_class_set_legacy_reset(dc, macfb_sysbus_reset); dc->vmsd = &vmstate_macfb_sysbus; device_class_set_props(dc, macfb_sysbus_properties); } -static void macfb_nubus_class_init(ObjectClass *klass, void *data) +static void macfb_nubus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); MacfbNubusDeviceClass *ndc = NUBUS_MACFB_CLASS(klass); @@ -817,7 +814,7 @@ static void macfb_nubus_class_init(ObjectClass *klass, void *data) device_class_set_parent_unrealize(dc, macfb_nubus_unrealize, &ndc->parent_unrealize); dc->desc = "Nubus Macintosh framebuffer"; - dc->reset = macfb_nubus_reset; + device_class_set_legacy_reset(dc, macfb_nubus_reset); dc->vmsd = &vmstate_macfb_nubus; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); device_class_set_props(dc, macfb_nubus_properties); diff --git a/hw/display/meson.build b/hw/display/meson.build index 7db05eace97..90e6c041bdb 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -22,13 +22,9 @@ system_ss.add(when: 'CONFIG_VGA_MMIO', if_true: files('vga-mmio.c')) system_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c')) system_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c')) -system_ss.add(when: 'CONFIG_BLIZZARD', if_true: files('blizzard.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_fimd.c')) system_ss.add(when: 'CONFIG_FRAMEBUFFER', if_true: files('framebuffer.c')) -system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('tc6393xb.c')) -system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dss.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_lcd.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_fb.c')) system_ss.add(when: 'CONFIG_SM501', if_true: files('sm501.c')) system_ss.add(when: 'CONFIG_TCX', if_true: files('tcx.c')) @@ -65,6 +61,8 @@ system_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) system_ss.add(when: 'CONFIG_ATI_VGA', if_true: [files('ati.c', 'ati_2d.c', 'ati_dbg.c'), pixman]) +system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_PCI'], if_true: [files('apple-gfx.m', 'apple-gfx-pci.m')]) +system_ss.add(when: [pvg, 'CONFIG_MAC_PVG_MMIO'], if_true: [files('apple-gfx.m', 'apple-gfx-mmio.m')]) if config_all_devices.has_key('CONFIG_VIRTIO_GPU') virtio_gpu_ss = ss.source_set() diff --git a/hw/display/next-fb.c b/hw/display/next-fb.c index 8446ff3c00e..ec81b766a7e 100644 --- a/hw/display/next-fb.c +++ b/hw/display/next-fb.c @@ -119,7 +119,7 @@ static void nextfb_realize(DeviceState *dev, Error **errp) qemu_console_resize(s->con, s->cols, s->rows); } -static void nextfb_class_init(ObjectClass *oc, void *data) +static void nextfb_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/display/omap_dss.c b/hw/display/omap_dss.c deleted file mode 100644 index f33fc7606d3..00000000000 --- a/hw/display/omap_dss.c +++ /dev/null @@ -1,1093 +0,0 @@ -/* - * OMAP2 Display Subsystem. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "ui/console.h" -#include "hw/arm/omap.h" - -struct omap_dss_s { - qemu_irq irq; - qemu_irq drq; - DisplayState *state; - MemoryRegion iomem_diss1, iomem_disc1, iomem_rfbi1, iomem_venc1, iomem_im3; - - int autoidle; - int control; - int enable; - - struct omap_dss_panel_s { - int enable; - int nx; - int ny; - - int x; - int y; - } dig, lcd; - - struct { - uint32_t idlemode; - uint32_t irqst; - uint32_t irqen; - uint32_t control; - uint32_t config; - uint32_t capable; - uint32_t timing[4]; - int line; - uint32_t bg[2]; - uint32_t trans[2]; - - struct omap_dss_plane_s { - int enable; - int bpp; - int posx; - int posy; - int nx; - int ny; - - hwaddr addr[3]; - - uint32_t attr; - uint32_t tresh; - int rowinc; - int colinc; - int wininc; - } l[3]; - - int invalidate; - uint16_t palette[256]; - } dispc; - - struct { - int idlemode; - uint32_t control; - int enable; - int pixels; - int busy; - int skiplines; - uint16_t rxbuf; - uint32_t config[2]; - uint32_t time[4]; - uint32_t data[6]; - uint16_t vsync; - uint16_t hsync; - struct rfbi_chip_s *chip[2]; - } rfbi; -}; - -static void omap_dispc_interrupt_update(struct omap_dss_s *s) -{ - qemu_set_irq(s->irq, s->dispc.irqst & s->dispc.irqen); -} - -static void omap_rfbi_reset(struct omap_dss_s *s) -{ - s->rfbi.idlemode = 0; - s->rfbi.control = 2; - s->rfbi.enable = 0; - s->rfbi.pixels = 0; - s->rfbi.skiplines = 0; - s->rfbi.busy = 0; - s->rfbi.config[0] = 0x00310000; - s->rfbi.config[1] = 0x00310000; - s->rfbi.time[0] = 0; - s->rfbi.time[1] = 0; - s->rfbi.time[2] = 0; - s->rfbi.time[3] = 0; - s->rfbi.data[0] = 0; - s->rfbi.data[1] = 0; - s->rfbi.data[2] = 0; - s->rfbi.data[3] = 0; - s->rfbi.data[4] = 0; - s->rfbi.data[5] = 0; - s->rfbi.vsync = 0; - s->rfbi.hsync = 0; -} - -void omap_dss_reset(struct omap_dss_s *s) -{ - s->autoidle = 0; - s->control = 0; - s->enable = 0; - - s->dig.enable = 0; - s->dig.nx = 1; - s->dig.ny = 1; - - s->lcd.enable = 0; - s->lcd.nx = 1; - s->lcd.ny = 1; - - s->dispc.idlemode = 0; - s->dispc.irqst = 0; - s->dispc.irqen = 0; - s->dispc.control = 0; - s->dispc.config = 0; - s->dispc.capable = 0x161; - s->dispc.timing[0] = 0; - s->dispc.timing[1] = 0; - s->dispc.timing[2] = 0; - s->dispc.timing[3] = 0; - s->dispc.line = 0; - s->dispc.bg[0] = 0; - s->dispc.bg[1] = 0; - s->dispc.trans[0] = 0; - s->dispc.trans[1] = 0; - - s->dispc.l[0].enable = 0; - s->dispc.l[0].bpp = 0; - s->dispc.l[0].addr[0] = 0; - s->dispc.l[0].addr[1] = 0; - s->dispc.l[0].addr[2] = 0; - s->dispc.l[0].posx = 0; - s->dispc.l[0].posy = 0; - s->dispc.l[0].nx = 1; - s->dispc.l[0].ny = 1; - s->dispc.l[0].attr = 0; - s->dispc.l[0].tresh = 0; - s->dispc.l[0].rowinc = 1; - s->dispc.l[0].colinc = 1; - s->dispc.l[0].wininc = 0; - - omap_rfbi_reset(s); - omap_dispc_interrupt_update(s); -} - -static uint64_t omap_diss_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* DSS_REVISIONNUMBER */ - return 0x20; - - case 0x10: /* DSS_SYSCONFIG */ - return s->autoidle; - - case 0x14: /* DSS_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x40: /* DSS_CONTROL */ - return s->control; - - case 0x50: /* DSS_PSA_LCD_REG_1 */ - case 0x54: /* DSS_PSA_LCD_REG_2 */ - case 0x58: /* DSS_PSA_VIDEO_REG */ - /* TODO: fake some values when appropriate s->control bits are set */ - return 0; - - case 0x5c: /* DSS_STATUS */ - return 1 + (s->control & 1); - - default: - break; - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_diss_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x00: /* DSS_REVISIONNUMBER */ - case 0x14: /* DSS_SYSSTATUS */ - case 0x50: /* DSS_PSA_LCD_REG_1 */ - case 0x54: /* DSS_PSA_LCD_REG_2 */ - case 0x58: /* DSS_PSA_VIDEO_REG */ - case 0x5c: /* DSS_STATUS */ - OMAP_RO_REG(addr); - break; - - case 0x10: /* DSS_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ - omap_dss_reset(s); - s->autoidle = value & 1; - break; - - case 0x40: /* DSS_CONTROL */ - s->control = value & 0x3dd; - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_diss_ops = { - .read = omap_diss_read, - .write = omap_diss_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static uint64_t omap_disc_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x000: /* DISPC_REVISION */ - return 0x20; - - case 0x010: /* DISPC_SYSCONFIG */ - return s->dispc.idlemode; - - case 0x014: /* DISPC_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x018: /* DISPC_IRQSTATUS */ - return s->dispc.irqst; - - case 0x01c: /* DISPC_IRQENABLE */ - return s->dispc.irqen; - - case 0x040: /* DISPC_CONTROL */ - return s->dispc.control; - - case 0x044: /* DISPC_CONFIG */ - return s->dispc.config; - - case 0x048: /* DISPC_CAPABLE */ - return s->dispc.capable; - - case 0x04c: /* DISPC_DEFAULT_COLOR0 */ - return s->dispc.bg[0]; - case 0x050: /* DISPC_DEFAULT_COLOR1 */ - return s->dispc.bg[1]; - case 0x054: /* DISPC_TRANS_COLOR0 */ - return s->dispc.trans[0]; - case 0x058: /* DISPC_TRANS_COLOR1 */ - return s->dispc.trans[1]; - - case 0x05c: /* DISPC_LINE_STATUS */ - return 0x7ff; - case 0x060: /* DISPC_LINE_NUMBER */ - return s->dispc.line; - - case 0x064: /* DISPC_TIMING_H */ - return s->dispc.timing[0]; - case 0x068: /* DISPC_TIMING_V */ - return s->dispc.timing[1]; - case 0x06c: /* DISPC_POL_FREQ */ - return s->dispc.timing[2]; - case 0x070: /* DISPC_DIVISOR */ - return s->dispc.timing[3]; - - case 0x078: /* DISPC_SIZE_DIG */ - return ((s->dig.ny - 1) << 16) | (s->dig.nx - 1); - case 0x07c: /* DISPC_SIZE_LCD */ - return ((s->lcd.ny - 1) << 16) | (s->lcd.nx - 1); - - case 0x080: /* DISPC_GFX_BA0 */ - return s->dispc.l[0].addr[0]; - case 0x084: /* DISPC_GFX_BA1 */ - return s->dispc.l[0].addr[1]; - case 0x088: /* DISPC_GFX_POSITION */ - return (s->dispc.l[0].posy << 16) | s->dispc.l[0].posx; - case 0x08c: /* DISPC_GFX_SIZE */ - return ((s->dispc.l[0].ny - 1) << 16) | (s->dispc.l[0].nx - 1); - case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ - return s->dispc.l[0].attr; - case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ - return s->dispc.l[0].tresh; - case 0x0a8: /* DISPC_GFX_FIFO_SIZE_STATUS */ - return 256; - case 0x0ac: /* DISPC_GFX_ROW_INC */ - return s->dispc.l[0].rowinc; - case 0x0b0: /* DISPC_GFX_PIXEL_INC */ - return s->dispc.l[0].colinc; - case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ - return s->dispc.l[0].wininc; - case 0x0b8: /* DISPC_GFX_TABLE_BA */ - return s->dispc.l[0].addr[2]; - - case 0x0bc: /* DISPC_VID1_BA0 */ - case 0x0c0: /* DISPC_VID1_BA1 */ - case 0x0c4: /* DISPC_VID1_POSITION */ - case 0x0c8: /* DISPC_VID1_SIZE */ - case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ - case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ - case 0x0d4: /* DISPC_VID1_FIFO_SIZE_STATUS */ - case 0x0d8: /* DISPC_VID1_ROW_INC */ - case 0x0dc: /* DISPC_VID1_PIXEL_INC */ - case 0x0e0: /* DISPC_VID1_FIR */ - case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ - case 0x0e8: /* DISPC_VID1_ACCU0 */ - case 0x0ec: /* DISPC_VID1_ACCU1 */ - case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ - case 0x14c: /* DISPC_VID2_BA0 */ - case 0x150: /* DISPC_VID2_BA1 */ - case 0x154: /* DISPC_VID2_POSITION */ - case 0x158: /* DISPC_VID2_SIZE */ - case 0x15c: /* DISPC_VID2_ATTRIBUTES */ - case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ - case 0x164: /* DISPC_VID2_FIFO_SIZE_STATUS */ - case 0x168: /* DISPC_VID2_ROW_INC */ - case 0x16c: /* DISPC_VID2_PIXEL_INC */ - case 0x170: /* DISPC_VID2_FIR */ - case 0x174: /* DISPC_VID2_PICTURE_SIZE */ - case 0x178: /* DISPC_VID2_ACCU0 */ - case 0x17c: /* DISPC_VID2_ACCU1 */ - case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ - case 0x1d4: /* DISPC_DATA_CYCLE1 */ - case 0x1d8: /* DISPC_DATA_CYCLE2 */ - case 0x1dc: /* DISPC_DATA_CYCLE3 */ - return 0; - - default: - break; - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_disc_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x010: /* DISPC_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ - omap_dss_reset(s); - s->dispc.idlemode = value & 0x301b; - break; - - case 0x018: /* DISPC_IRQSTATUS */ - s->dispc.irqst &= ~value; - omap_dispc_interrupt_update(s); - break; - - case 0x01c: /* DISPC_IRQENABLE */ - s->dispc.irqen = value & 0xffff; - omap_dispc_interrupt_update(s); - break; - - case 0x040: /* DISPC_CONTROL */ - s->dispc.control = value & 0x07ff9fff; - s->dig.enable = (value >> 1) & 1; - s->lcd.enable = (value >> 0) & 1; - if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */ - if (!((s->dispc.l[1].attr | s->dispc.l[2].attr) & 1)) { - fprintf(stderr, "%s: Overlay Optimization when no overlay " - "region effectively exists leads to " - "unpredictable behaviour!\n", __func__); - } - if (value & (1 << 6)) { /* GODIGITAL */ - /* XXX: Shadowed fields are: - * s->dispc.config - * s->dispc.capable - * s->dispc.bg[0] - * s->dispc.bg[1] - * s->dispc.trans[0] - * s->dispc.trans[1] - * s->dispc.line - * s->dispc.timing[0] - * s->dispc.timing[1] - * s->dispc.timing[2] - * s->dispc.timing[3] - * s->lcd.nx - * s->lcd.ny - * s->dig.nx - * s->dig.ny - * s->dispc.l[0].addr[0] - * s->dispc.l[0].addr[1] - * s->dispc.l[0].addr[2] - * s->dispc.l[0].posx - * s->dispc.l[0].posy - * s->dispc.l[0].nx - * s->dispc.l[0].ny - * s->dispc.l[0].tresh - * s->dispc.l[0].rowinc - * s->dispc.l[0].colinc - * s->dispc.l[0].wininc - * All they need to be loaded here from their shadow registers. - */ - } - if (value & (1 << 5)) { /* GOLCD */ - /* XXX: Likewise for LCD here. */ - } - s->dispc.invalidate = 1; - break; - - case 0x044: /* DISPC_CONFIG */ - s->dispc.config = value & 0x3fff; - /* XXX: - * bits 2:1 (LOADMODE) reset to 0 after set to 1 and palette loaded - * bits 2:1 (LOADMODE) reset to 2 after set to 3 and palette loaded - */ - s->dispc.invalidate = 1; - break; - - case 0x048: /* DISPC_CAPABLE */ - s->dispc.capable = value & 0x3ff; - break; - - case 0x04c: /* DISPC_DEFAULT_COLOR0 */ - s->dispc.bg[0] = value & 0xffffff; - s->dispc.invalidate = 1; - break; - case 0x050: /* DISPC_DEFAULT_COLOR1 */ - s->dispc.bg[1] = value & 0xffffff; - s->dispc.invalidate = 1; - break; - case 0x054: /* DISPC_TRANS_COLOR0 */ - s->dispc.trans[0] = value & 0xffffff; - s->dispc.invalidate = 1; - break; - case 0x058: /* DISPC_TRANS_COLOR1 */ - s->dispc.trans[1] = value & 0xffffff; - s->dispc.invalidate = 1; - break; - - case 0x060: /* DISPC_LINE_NUMBER */ - s->dispc.line = value & 0x7ff; - break; - - case 0x064: /* DISPC_TIMING_H */ - s->dispc.timing[0] = value & 0x0ff0ff3f; - break; - case 0x068: /* DISPC_TIMING_V */ - s->dispc.timing[1] = value & 0x0ff0ff3f; - break; - case 0x06c: /* DISPC_POL_FREQ */ - s->dispc.timing[2] = value & 0x0003ffff; - break; - case 0x070: /* DISPC_DIVISOR */ - s->dispc.timing[3] = value & 0x00ff00ff; - break; - - case 0x078: /* DISPC_SIZE_DIG */ - s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ - s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ - s->dispc.invalidate = 1; - break; - case 0x07c: /* DISPC_SIZE_LCD */ - s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ - s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ - s->dispc.invalidate = 1; - break; - case 0x080: /* DISPC_GFX_BA0 */ - s->dispc.l[0].addr[0] = (hwaddr) value; - s->dispc.invalidate = 1; - break; - case 0x084: /* DISPC_GFX_BA1 */ - s->dispc.l[0].addr[1] = (hwaddr) value; - s->dispc.invalidate = 1; - break; - case 0x088: /* DISPC_GFX_POSITION */ - s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */ - s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */ - s->dispc.invalidate = 1; - break; - case 0x08c: /* DISPC_GFX_SIZE */ - s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */ - s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */ - s->dispc.invalidate = 1; - break; - case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ - s->dispc.l[0].attr = value & 0x7ff; - if (value & (3 << 9)) - fprintf(stderr, "%s: Big-endian pixel format not supported\n", - __func__); - s->dispc.l[0].enable = value & 1; - s->dispc.l[0].bpp = (value >> 1) & 0xf; - s->dispc.invalidate = 1; - break; - case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ - s->dispc.l[0].tresh = value & 0x01ff01ff; - break; - case 0x0ac: /* DISPC_GFX_ROW_INC */ - s->dispc.l[0].rowinc = value; - s->dispc.invalidate = 1; - break; - case 0x0b0: /* DISPC_GFX_PIXEL_INC */ - s->dispc.l[0].colinc = value; - s->dispc.invalidate = 1; - break; - case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ - s->dispc.l[0].wininc = value; - break; - case 0x0b8: /* DISPC_GFX_TABLE_BA */ - s->dispc.l[0].addr[2] = (hwaddr) value; - s->dispc.invalidate = 1; - break; - - case 0x0bc: /* DISPC_VID1_BA0 */ - case 0x0c0: /* DISPC_VID1_BA1 */ - case 0x0c4: /* DISPC_VID1_POSITION */ - case 0x0c8: /* DISPC_VID1_SIZE */ - case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ - case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ - case 0x0d8: /* DISPC_VID1_ROW_INC */ - case 0x0dc: /* DISPC_VID1_PIXEL_INC */ - case 0x0e0: /* DISPC_VID1_FIR */ - case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ - case 0x0e8: /* DISPC_VID1_ACCU0 */ - case 0x0ec: /* DISPC_VID1_ACCU1 */ - case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ - case 0x14c: /* DISPC_VID2_BA0 */ - case 0x150: /* DISPC_VID2_BA1 */ - case 0x154: /* DISPC_VID2_POSITION */ - case 0x158: /* DISPC_VID2_SIZE */ - case 0x15c: /* DISPC_VID2_ATTRIBUTES */ - case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ - case 0x168: /* DISPC_VID2_ROW_INC */ - case 0x16c: /* DISPC_VID2_PIXEL_INC */ - case 0x170: /* DISPC_VID2_FIR */ - case 0x174: /* DISPC_VID2_PICTURE_SIZE */ - case 0x178: /* DISPC_VID2_ACCU0 */ - case 0x17c: /* DISPC_VID2_ACCU1 */ - case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ - case 0x1d4: /* DISPC_DATA_CYCLE1 */ - case 0x1d8: /* DISPC_DATA_CYCLE2 */ - case 0x1dc: /* DISPC_DATA_CYCLE3 */ - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_disc_ops = { - .read = omap_disc_read, - .write = omap_disc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void omap_rfbi_transfer_stop(struct omap_dss_s *s) -{ - if (!s->rfbi.busy) - return; - - /* TODO: in non-Bypass mode we probably need to just deassert the DRQ. */ - - s->rfbi.busy = 0; -} - -static void omap_rfbi_transfer_start(struct omap_dss_s *s) -{ - void *data; - hwaddr len; - hwaddr data_addr; - int pitch; - static void *bounce_buffer; - static hwaddr bounce_len; - - if (!s->rfbi.enable || s->rfbi.busy) - return; - - if (s->rfbi.control & (1 << 1)) { /* BYPASS */ - /* TODO: in non-Bypass mode we probably need to just assert the - * DRQ and wait for DMA to write the pixels. */ - qemu_log_mask(LOG_UNIMP, "%s: Bypass mode unimplemented\n", __func__); - return; - } - - if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */ - return; - /* TODO: check that LCD output is enabled in DISPC. */ - - s->rfbi.busy = 1; - - len = s->rfbi.pixels * 2; - - data_addr = s->dispc.l[0].addr[0]; - data = cpu_physical_memory_map(data_addr, &len, false); - if (data && len != s->rfbi.pixels * 2) { - cpu_physical_memory_unmap(data, len, 0, 0); - data = NULL; - len = s->rfbi.pixels * 2; - } - if (!data) { - if (len > bounce_len) { - bounce_buffer = g_realloc(bounce_buffer, len); - } - data = bounce_buffer; - cpu_physical_memory_read(data_addr, data, len); - } - - /* TODO bpp */ - s->rfbi.pixels = 0; - - /* TODO: negative values */ - pitch = s->dispc.l[0].nx + (s->dispc.l[0].rowinc - 1) / 2; - - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) - s->rfbi.chip[0]->block(s->rfbi.chip[0]->opaque, 1, data, len, pitch); - if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) - s->rfbi.chip[1]->block(s->rfbi.chip[1]->opaque, 1, data, len, pitch); - - if (data != bounce_buffer) { - cpu_physical_memory_unmap(data, len, 0, len); - } - - omap_rfbi_transfer_stop(s); - - /* TODO */ - s->dispc.irqst |= 1; /* FRAMEDONE */ - omap_dispc_interrupt_update(s); -} - -static uint64_t omap_rfbi_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* RFBI_REVISION */ - return 0x10; - - case 0x10: /* RFBI_SYSCONFIG */ - return s->rfbi.idlemode; - - case 0x14: /* RFBI_SYSSTATUS */ - return 1 | (s->rfbi.busy << 8); /* RESETDONE */ - - case 0x40: /* RFBI_CONTROL */ - return s->rfbi.control; - - case 0x44: /* RFBI_PIXELCNT */ - return s->rfbi.pixels; - - case 0x48: /* RFBI_LINE_NUMBER */ - return s->rfbi.skiplines; - - case 0x58: /* RFBI_READ */ - case 0x5c: /* RFBI_STATUS */ - return s->rfbi.rxbuf; - - case 0x60: /* RFBI_CONFIG0 */ - return s->rfbi.config[0]; - case 0x64: /* RFBI_ONOFF_TIME0 */ - return s->rfbi.time[0]; - case 0x68: /* RFBI_CYCLE_TIME0 */ - return s->rfbi.time[1]; - case 0x6c: /* RFBI_DATA_CYCLE1_0 */ - return s->rfbi.data[0]; - case 0x70: /* RFBI_DATA_CYCLE2_0 */ - return s->rfbi.data[1]; - case 0x74: /* RFBI_DATA_CYCLE3_0 */ - return s->rfbi.data[2]; - - case 0x78: /* RFBI_CONFIG1 */ - return s->rfbi.config[1]; - case 0x7c: /* RFBI_ONOFF_TIME1 */ - return s->rfbi.time[2]; - case 0x80: /* RFBI_CYCLE_TIME1 */ - return s->rfbi.time[3]; - case 0x84: /* RFBI_DATA_CYCLE1_1 */ - return s->rfbi.data[3]; - case 0x88: /* RFBI_DATA_CYCLE2_1 */ - return s->rfbi.data[4]; - case 0x8c: /* RFBI_DATA_CYCLE3_1 */ - return s->rfbi.data[5]; - - case 0x90: /* RFBI_VSYNC_WIDTH */ - return s->rfbi.vsync; - case 0x94: /* RFBI_HSYNC_WIDTH */ - return s->rfbi.hsync; - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_rfbi_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_dss_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x10: /* RFBI_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ - omap_rfbi_reset(s); - s->rfbi.idlemode = value & 0x19; - break; - - case 0x40: /* RFBI_CONTROL */ - s->rfbi.control = value & 0xf; - s->rfbi.enable = value & 1; - if (value & (1 << 4) && /* ITE */ - !(s->rfbi.config[0] & s->rfbi.config[1] & 0xc)) - omap_rfbi_transfer_start(s); - break; - - case 0x44: /* RFBI_PIXELCNT */ - s->rfbi.pixels = value; - break; - - case 0x48: /* RFBI_LINE_NUMBER */ - s->rfbi.skiplines = value & 0x7ff; - break; - - case 0x4c: /* RFBI_CMD */ - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) - s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 0, value & 0xffff); - if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) - s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 0, value & 0xffff); - break; - case 0x50: /* RFBI_PARAM */ - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) - s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff); - if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) - s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff); - break; - case 0x54: /* RFBI_DATA */ - /* TODO: take into account the format set up in s->rfbi.config[?] and - * s->rfbi.data[?], but special-case the most usual scenario so that - * speed doesn't suffer. */ - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) { - s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff); - s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value >> 16); - } - if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) { - s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff); - s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value >> 16); - } - if (!-- s->rfbi.pixels) - omap_rfbi_transfer_stop(s); - break; - case 0x58: /* RFBI_READ */ - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) - s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 1); - else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) - s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 1); - if (!-- s->rfbi.pixels) - omap_rfbi_transfer_stop(s); - break; - - case 0x5c: /* RFBI_STATUS */ - if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) - s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 0); - else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) - s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 0); - if (!-- s->rfbi.pixels) - omap_rfbi_transfer_stop(s); - break; - - case 0x60: /* RFBI_CONFIG0 */ - s->rfbi.config[0] = value & 0x003f1fff; - break; - - case 0x64: /* RFBI_ONOFF_TIME0 */ - s->rfbi.time[0] = value & 0x3fffffff; - break; - case 0x68: /* RFBI_CYCLE_TIME0 */ - s->rfbi.time[1] = value & 0x0fffffff; - break; - case 0x6c: /* RFBI_DATA_CYCLE1_0 */ - s->rfbi.data[0] = value & 0x0f1f0f1f; - break; - case 0x70: /* RFBI_DATA_CYCLE2_0 */ - s->rfbi.data[1] = value & 0x0f1f0f1f; - break; - case 0x74: /* RFBI_DATA_CYCLE3_0 */ - s->rfbi.data[2] = value & 0x0f1f0f1f; - break; - case 0x78: /* RFBI_CONFIG1 */ - s->rfbi.config[1] = value & 0x003f1fff; - break; - - case 0x7c: /* RFBI_ONOFF_TIME1 */ - s->rfbi.time[2] = value & 0x3fffffff; - break; - case 0x80: /* RFBI_CYCLE_TIME1 */ - s->rfbi.time[3] = value & 0x0fffffff; - break; - case 0x84: /* RFBI_DATA_CYCLE1_1 */ - s->rfbi.data[3] = value & 0x0f1f0f1f; - break; - case 0x88: /* RFBI_DATA_CYCLE2_1 */ - s->rfbi.data[4] = value & 0x0f1f0f1f; - break; - case 0x8c: /* RFBI_DATA_CYCLE3_1 */ - s->rfbi.data[5] = value & 0x0f1f0f1f; - break; - - case 0x90: /* RFBI_VSYNC_WIDTH */ - s->rfbi.vsync = value & 0xffff; - break; - case 0x94: /* RFBI_HSYNC_WIDTH */ - s->rfbi.hsync = value & 0xffff; - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_rfbi_ops = { - .read = omap_rfbi_read, - .write = omap_rfbi_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static uint64_t omap_venc_read(void *opaque, hwaddr addr, - unsigned size) -{ - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* REV_ID */ - case 0x04: /* STATUS */ - case 0x08: /* F_CONTROL */ - case 0x10: /* VIDOUT_CTRL */ - case 0x14: /* SYNC_CTRL */ - case 0x1c: /* LLEN */ - case 0x20: /* FLENS */ - case 0x24: /* HFLTR_CTRL */ - case 0x28: /* CC_CARR_WSS_CARR */ - case 0x2c: /* C_PHASE */ - case 0x30: /* GAIN_U */ - case 0x34: /* GAIN_V */ - case 0x38: /* GAIN_Y */ - case 0x3c: /* BLACK_LEVEL */ - case 0x40: /* BLANK_LEVEL */ - case 0x44: /* X_COLOR */ - case 0x48: /* M_CONTROL */ - case 0x4c: /* BSTAMP_WSS_DATA */ - case 0x50: /* S_CARR */ - case 0x54: /* LINE21 */ - case 0x58: /* LN_SEL */ - case 0x5c: /* L21__WC_CTL */ - case 0x60: /* HTRIGGER_VTRIGGER */ - case 0x64: /* SAVID__EAVID */ - case 0x68: /* FLEN__FAL */ - case 0x6c: /* LAL__PHASE_RESET */ - case 0x70: /* HS_INT_START_STOP_X */ - case 0x74: /* HS_EXT_START_STOP_X */ - case 0x78: /* VS_INT_START_X */ - case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ - case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ - case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ - case 0x88: /* VS_EXT_STOP_Y */ - case 0x90: /* AVID_START_STOP_X */ - case 0x94: /* AVID_START_STOP_Y */ - case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ - case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ - case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ - case 0xb0: /* TVDETGP_INT_START_STOP_X */ - case 0xb4: /* TVDETGP_INT_START_STOP_Y */ - case 0xb8: /* GEN_CTRL */ - case 0xc4: /* DAC_TST__DAC_A */ - case 0xc8: /* DAC_B__DAC_C */ - return 0; - - default: - break; - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_venc_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - if (size != 4) { - omap_badwidth_write32(opaque, addr, size); - return; - } - - switch (addr) { - case 0x08: /* F_CONTROL */ - case 0x10: /* VIDOUT_CTRL */ - case 0x14: /* SYNC_CTRL */ - case 0x1c: /* LLEN */ - case 0x20: /* FLENS */ - case 0x24: /* HFLTR_CTRL */ - case 0x28: /* CC_CARR_WSS_CARR */ - case 0x2c: /* C_PHASE */ - case 0x30: /* GAIN_U */ - case 0x34: /* GAIN_V */ - case 0x38: /* GAIN_Y */ - case 0x3c: /* BLACK_LEVEL */ - case 0x40: /* BLANK_LEVEL */ - case 0x44: /* X_COLOR */ - case 0x48: /* M_CONTROL */ - case 0x4c: /* BSTAMP_WSS_DATA */ - case 0x50: /* S_CARR */ - case 0x54: /* LINE21 */ - case 0x58: /* LN_SEL */ - case 0x5c: /* L21__WC_CTL */ - case 0x60: /* HTRIGGER_VTRIGGER */ - case 0x64: /* SAVID__EAVID */ - case 0x68: /* FLEN__FAL */ - case 0x6c: /* LAL__PHASE_RESET */ - case 0x70: /* HS_INT_START_STOP_X */ - case 0x74: /* HS_EXT_START_STOP_X */ - case 0x78: /* VS_INT_START_X */ - case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ - case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ - case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ - case 0x88: /* VS_EXT_STOP_Y */ - case 0x90: /* AVID_START_STOP_X */ - case 0x94: /* AVID_START_STOP_Y */ - case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ - case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ - case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ - case 0xb0: /* TVDETGP_INT_START_STOP_X */ - case 0xb4: /* TVDETGP_INT_START_STOP_Y */ - case 0xb8: /* GEN_CTRL */ - case 0xc4: /* DAC_TST__DAC_A */ - case 0xc8: /* DAC_B__DAC_C */ - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_venc_ops = { - .read = omap_venc_read, - .write = omap_venc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static uint64_t omap_im3_read(void *opaque, hwaddr addr, - unsigned size) -{ - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x0a8: /* SBIMERRLOGA */ - case 0x0b0: /* SBIMERRLOG */ - case 0x190: /* SBIMSTATE */ - case 0x198: /* SBTMSTATE_L */ - case 0x19c: /* SBTMSTATE_H */ - case 0x1a8: /* SBIMCONFIG_L */ - case 0x1ac: /* SBIMCONFIG_H */ - case 0x1f8: /* SBID_L */ - case 0x1fc: /* SBID_H */ - return 0; - - default: - break; - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_im3_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x0b0: /* SBIMERRLOG */ - case 0x190: /* SBIMSTATE */ - case 0x198: /* SBTMSTATE_L */ - case 0x19c: /* SBTMSTATE_H */ - case 0x1a8: /* SBIMCONFIG_L */ - case 0x1ac: /* SBIMCONFIG_H */ - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_im3_ops = { - .read = omap_im3_read, - .write = omap_im3_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_dss_s *omap_dss_init(struct omap_target_agent_s *ta, - MemoryRegion *sysmem, - hwaddr l3_base, - qemu_irq irq, qemu_irq drq, - omap_clk fck1, omap_clk fck2, omap_clk ck54m, - omap_clk ick1, omap_clk ick2) -{ - struct omap_dss_s *s = g_new0(struct omap_dss_s, 1); - - s->irq = irq; - s->drq = drq; - omap_dss_reset(s); - - memory_region_init_io(&s->iomem_diss1, NULL, &omap_diss_ops, s, "omap.diss1", - omap_l4_region_size(ta, 0)); - memory_region_init_io(&s->iomem_disc1, NULL, &omap_disc_ops, s, "omap.disc1", - omap_l4_region_size(ta, 1)); - memory_region_init_io(&s->iomem_rfbi1, NULL, &omap_rfbi_ops, s, "omap.rfbi1", - omap_l4_region_size(ta, 2)); - memory_region_init_io(&s->iomem_venc1, NULL, &omap_venc_ops, s, "omap.venc1", - omap_l4_region_size(ta, 3)); - memory_region_init_io(&s->iomem_im3, NULL, &omap_im3_ops, s, - "omap.im3", 0x1000); - - omap_l4_attach(ta, 0, &s->iomem_diss1); - omap_l4_attach(ta, 1, &s->iomem_disc1); - omap_l4_attach(ta, 2, &s->iomem_rfbi1); - omap_l4_attach(ta, 3, &s->iomem_venc1); - memory_region_add_subregion(sysmem, l3_base, &s->iomem_im3); - -#if 0 - s->state = graphic_console_init(omap_update_display, - omap_invalidate_display, omap_screen_dump, s); -#endif - - return s; -} - -void omap_rfbi_attach(struct omap_dss_s *s, int cs, struct rfbi_chip_s *chip) -{ - if (cs < 0 || cs > 1) - hw_error("%s: wrong CS %i\n", __func__, cs); - s->rfbi.chip[cs] = chip; -} diff --git a/hw/display/pl110.c b/hw/display/pl110.c index 7f145bbdbab..09c3c59e0ed 100644 --- a/hw/display/pl110.c +++ b/hw/display/pl110.c @@ -535,10 +535,9 @@ static const GraphicHwOps pl110_gfx_ops = { .gfx_update = pl110_update_display, }; -static Property pl110_properties[] = { +static const Property pl110_properties[] = { DEFINE_PROP_LINK("framebuffer-memory", PL110State, fbmem, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static void pl110_realize(DeviceState *dev, Error **errp) @@ -581,7 +580,7 @@ static void pl111_init(Object *obj) s->version = VERSION_PL111; } -static void pl110_class_init(ObjectClass *klass, void *data) +static void pl110_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c deleted file mode 100644 index a9d0d981a08..00000000000 --- a/hw/display/pxa2xx_lcd.c +++ /dev/null @@ -1,1451 +0,0 @@ -/* - * Intel XScale PXA255/270 LCDC emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "ui/console.h" -#include "hw/arm/pxa.h" -#include "ui/pixel_ops.h" -#include "hw/boards.h" -/* FIXME: For graphic_rotate. Should probably be done in common code. */ -#include "sysemu/sysemu.h" -#include "framebuffer.h" - -struct DMAChannel { - uint32_t branch; - uint8_t up; - uint8_t palette[1024]; - uint8_t pbuffer[1024]; - void (*redraw)(PXA2xxLCDState *s, hwaddr addr, - int *miny, int *maxy); - - uint32_t descriptor; - uint32_t source; - uint32_t id; - uint32_t command; -}; - -struct PXA2xxLCDState { - MemoryRegion *sysmem; - MemoryRegion iomem; - MemoryRegionSection fbsection; - qemu_irq irq; - int irqlevel; - - int invalidated; - QemuConsole *con; - int dest_width; - int xres, yres; - int pal_for; - int transp; - enum { - pxa_lcdc_2bpp = 1, - pxa_lcdc_4bpp = 2, - pxa_lcdc_8bpp = 3, - pxa_lcdc_16bpp = 4, - pxa_lcdc_18bpp = 5, - pxa_lcdc_18pbpp = 6, - pxa_lcdc_19bpp = 7, - pxa_lcdc_19pbpp = 8, - pxa_lcdc_24bpp = 9, - pxa_lcdc_25bpp = 10, - } bpp; - - uint32_t control[6]; - uint32_t status[2]; - uint32_t ovl1c[2]; - uint32_t ovl2c[2]; - uint32_t ccr; - uint32_t cmdcr; - uint32_t trgbr; - uint32_t tcr; - uint32_t liidr; - uint8_t bscntr; - - struct DMAChannel dma_ch[7]; - - qemu_irq vsync_cb; - int orientation; -}; - -typedef struct QEMU_PACKED { - uint32_t fdaddr; - uint32_t fsaddr; - uint32_t fidr; - uint32_t ldcmd; -} PXAFrameDescriptor; - -#define LCCR0 0x000 /* LCD Controller Control register 0 */ -#define LCCR1 0x004 /* LCD Controller Control register 1 */ -#define LCCR2 0x008 /* LCD Controller Control register 2 */ -#define LCCR3 0x00c /* LCD Controller Control register 3 */ -#define LCCR4 0x010 /* LCD Controller Control register 4 */ -#define LCCR5 0x014 /* LCD Controller Control register 5 */ - -#define FBR0 0x020 /* DMA Channel 0 Frame Branch register */ -#define FBR1 0x024 /* DMA Channel 1 Frame Branch register */ -#define FBR2 0x028 /* DMA Channel 2 Frame Branch register */ -#define FBR3 0x02c /* DMA Channel 3 Frame Branch register */ -#define FBR4 0x030 /* DMA Channel 4 Frame Branch register */ -#define FBR5 0x110 /* DMA Channel 5 Frame Branch register */ -#define FBR6 0x114 /* DMA Channel 6 Frame Branch register */ - -#define LCSR1 0x034 /* LCD Controller Status register 1 */ -#define LCSR0 0x038 /* LCD Controller Status register 0 */ -#define LIIDR 0x03c /* LCD Controller Interrupt ID register */ - -#define TRGBR 0x040 /* TMED RGB Seed register */ -#define TCR 0x044 /* TMED Control register */ - -#define OVL1C1 0x050 /* Overlay 1 Control register 1 */ -#define OVL1C2 0x060 /* Overlay 1 Control register 2 */ -#define OVL2C1 0x070 /* Overlay 2 Control register 1 */ -#define OVL2C2 0x080 /* Overlay 2 Control register 2 */ -#define CCR 0x090 /* Cursor Control register */ - -#define CMDCR 0x100 /* Command Control register */ -#define PRSR 0x104 /* Panel Read Status register */ - -#define PXA_LCDDMA_CHANS 7 -#define DMA_FDADR 0x00 /* Frame Descriptor Address register */ -#define DMA_FSADR 0x04 /* Frame Source Address register */ -#define DMA_FIDR 0x08 /* Frame ID register */ -#define DMA_LDCMD 0x0c /* Command register */ - -/* LCD Buffer Strength Control register */ -#define BSCNTR 0x04000054 - -/* Bitfield masks */ -#define LCCR0_ENB (1 << 0) -#define LCCR0_CMS (1 << 1) -#define LCCR0_SDS (1 << 2) -#define LCCR0_LDM (1 << 3) -#define LCCR0_SOFM0 (1 << 4) -#define LCCR0_IUM (1 << 5) -#define LCCR0_EOFM0 (1 << 6) -#define LCCR0_PAS (1 << 7) -#define LCCR0_DPD (1 << 9) -#define LCCR0_DIS (1 << 10) -#define LCCR0_QDM (1 << 11) -#define LCCR0_PDD (0xff << 12) -#define LCCR0_BSM0 (1 << 20) -#define LCCR0_OUM (1 << 21) -#define LCCR0_LCDT (1 << 22) -#define LCCR0_RDSTM (1 << 23) -#define LCCR0_CMDIM (1 << 24) -#define LCCR0_OUC (1 << 25) -#define LCCR0_LDDALT (1 << 26) -#define LCCR1_PPL(x) ((x) & 0x3ff) -#define LCCR2_LPP(x) ((x) & 0x3ff) -#define LCCR3_API (15 << 16) -#define LCCR3_BPP(x) ((((x) >> 24) & 7) | (((x) >> 26) & 8)) -#define LCCR3_PDFOR(x) (((x) >> 30) & 3) -#define LCCR4_K1(x) (((x) >> 0) & 7) -#define LCCR4_K2(x) (((x) >> 3) & 7) -#define LCCR4_K3(x) (((x) >> 6) & 7) -#define LCCR4_PALFOR(x) (((x) >> 15) & 3) -#define LCCR5_SOFM(ch) (1 << (ch - 1)) -#define LCCR5_EOFM(ch) (1 << (ch + 7)) -#define LCCR5_BSM(ch) (1 << (ch + 15)) -#define LCCR5_IUM(ch) (1 << (ch + 23)) -#define OVLC1_EN (1 << 31) -#define CCR_CEN (1 << 31) -#define FBR_BRA (1 << 0) -#define FBR_BINT (1 << 1) -#define FBR_SRCADDR (0xfffffff << 4) -#define LCSR0_LDD (1 << 0) -#define LCSR0_SOF0 (1 << 1) -#define LCSR0_BER (1 << 2) -#define LCSR0_ABC (1 << 3) -#define LCSR0_IU0 (1 << 4) -#define LCSR0_IU1 (1 << 5) -#define LCSR0_OU (1 << 6) -#define LCSR0_QD (1 << 7) -#define LCSR0_EOF0 (1 << 8) -#define LCSR0_BS0 (1 << 9) -#define LCSR0_SINT (1 << 10) -#define LCSR0_RDST (1 << 11) -#define LCSR0_CMDINT (1 << 12) -#define LCSR0_BERCH(x) (((x) & 7) << 28) -#define LCSR1_SOF(ch) (1 << (ch - 1)) -#define LCSR1_EOF(ch) (1 << (ch + 7)) -#define LCSR1_BS(ch) (1 << (ch + 15)) -#define LCSR1_IU(ch) (1 << (ch + 23)) -#define LDCMD_LENGTH(x) ((x) & 0x001ffffc) -#define LDCMD_EOFINT (1 << 21) -#define LDCMD_SOFINT (1 << 22) -#define LDCMD_PAL (1 << 26) - -/* Size of a pixel in the QEMU UI output surface, in bytes */ -#define DEST_PIXEL_WIDTH 4 - -/* Line drawing code to handle the various possible guest pixel formats */ - -# define SKIP_PIXEL(to) do { to += deststep; } while (0) -# define COPY_PIXEL(to, from) \ - do { \ - *(uint32_t *) to = from; \ - SKIP_PIXEL(to); \ - } while (0) - -#if HOST_BIG_ENDIAN -# define SWAP_WORDS 1 -#endif - -#define FN_2(x) FN(x + 1) FN(x) -#define FN_4(x) FN_2(x + 2) FN_2(x) - -static void pxa2xx_draw_line2(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t *palette = opaque; - uint32_t data; - while (width > 0) { - data = *(uint32_t *) src; -#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 2)) & 3]); -#ifdef SWAP_WORDS - FN_4(12) - FN_4(8) - FN_4(4) - FN_4(0) -#else - FN_4(0) - FN_4(4) - FN_4(8) - FN_4(12) -#endif -#undef FN - width -= 16; - src += 4; - } -} - -static void pxa2xx_draw_line4(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t *palette = opaque; - uint32_t data; - while (width > 0) { - data = *(uint32_t *) src; -#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 4)) & 0xf]); -#ifdef SWAP_WORDS - FN_2(6) - FN_2(4) - FN_2(2) - FN_2(0) -#else - FN_2(0) - FN_2(2) - FN_2(4) - FN_2(6) -#endif -#undef FN - width -= 8; - src += 4; - } -} - -static void pxa2xx_draw_line8(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t *palette = opaque; - uint32_t data; - while (width > 0) { - data = *(uint32_t *) src; -#define FN(x) COPY_PIXEL(dest, palette[(data >> (x)) & 0xff]); -#ifdef SWAP_WORDS - FN(24) - FN(16) - FN(8) - FN(0) -#else - FN(0) - FN(8) - FN(16) - FN(24) -#endif -#undef FN - width -= 4; - src += 4; - } -} - -static void pxa2xx_draw_line16(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = (data & 0x1f) << 3; - data >>= 5; - g = (data & 0x3f) << 2; - data >>= 6; - r = (data & 0x1f) << 3; - data >>= 5; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - b = (data & 0x1f) << 3; - data >>= 5; - g = (data & 0x3f) << 2; - data >>= 6; - r = (data & 0x1f) << 3; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - width -= 2; - src += 4; - } -} - -static void pxa2xx_draw_line16t(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = (data & 0x1f) << 3; - data >>= 5; - g = (data & 0x1f) << 3; - data >>= 5; - r = (data & 0x1f) << 3; - data >>= 5; - if (data & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - data >>= 1; - b = (data & 0x1f) << 3; - data >>= 5; - g = (data & 0x1f) << 3; - data >>= 5; - r = (data & 0x1f) << 3; - data >>= 5; - if (data & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - width -= 2; - src += 4; - } -} - -static void pxa2xx_draw_line18(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = (data & 0x3f) << 2; - data >>= 6; - g = (data & 0x3f) << 2; - data >>= 6; - r = (data & 0x3f) << 2; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - width -= 1; - src += 4; - } -} - -/* The wicked packed format */ -static void pxa2xx_draw_line18p(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data[3]; - unsigned int r, g, b; - while (width > 0) { - data[0] = *(uint32_t *) src; - src += 4; - data[1] = *(uint32_t *) src; - src += 4; - data[2] = *(uint32_t *) src; - src += 4; -#ifdef SWAP_WORDS - data[0] = bswap32(data[0]); - data[1] = bswap32(data[1]); - data[2] = bswap32(data[2]); -#endif - b = (data[0] & 0x3f) << 2; - data[0] >>= 6; - g = (data[0] & 0x3f) << 2; - data[0] >>= 6; - r = (data[0] & 0x3f) << 2; - data[0] >>= 12; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - b = (data[0] & 0x3f) << 2; - data[0] >>= 6; - g = ((data[1] & 0xf) << 4) | (data[0] << 2); - data[1] >>= 4; - r = (data[1] & 0x3f) << 2; - data[1] >>= 12; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - b = (data[1] & 0x3f) << 2; - data[1] >>= 6; - g = (data[1] & 0x3f) << 2; - data[1] >>= 6; - r = ((data[2] & 0x3) << 6) | (data[1] << 2); - data[2] >>= 8; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - b = (data[2] & 0x3f) << 2; - data[2] >>= 6; - g = (data[2] & 0x3f) << 2; - data[2] >>= 6; - r = data[2] << 2; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - width -= 4; - } -} - -static void pxa2xx_draw_line19(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = (data & 0x3f) << 2; - data >>= 6; - g = (data & 0x3f) << 2; - data >>= 6; - r = (data & 0x3f) << 2; - data >>= 6; - if (data & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - width -= 1; - src += 4; - } -} - -/* The wicked packed format */ -static void pxa2xx_draw_line19p(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data[3]; - unsigned int r, g, b; - while (width > 0) { - data[0] = *(uint32_t *) src; - src += 4; - data[1] = *(uint32_t *) src; - src += 4; - data[2] = *(uint32_t *) src; - src += 4; -# ifdef SWAP_WORDS - data[0] = bswap32(data[0]); - data[1] = bswap32(data[1]); - data[2] = bswap32(data[2]); -# endif - b = (data[0] & 0x3f) << 2; - data[0] >>= 6; - g = (data[0] & 0x3f) << 2; - data[0] >>= 6; - r = (data[0] & 0x3f) << 2; - data[0] >>= 6; - if (data[0] & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - data[0] >>= 6; - b = (data[0] & 0x3f) << 2; - data[0] >>= 6; - g = ((data[1] & 0xf) << 4) | (data[0] << 2); - data[1] >>= 4; - r = (data[1] & 0x3f) << 2; - data[1] >>= 6; - if (data[1] & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - data[1] >>= 6; - b = (data[1] & 0x3f) << 2; - data[1] >>= 6; - g = (data[1] & 0x3f) << 2; - data[1] >>= 6; - r = ((data[2] & 0x3) << 6) | (data[1] << 2); - data[2] >>= 2; - if (data[2] & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - data[2] >>= 6; - b = (data[2] & 0x3f) << 2; - data[2] >>= 6; - g = (data[2] & 0x3f) << 2; - data[2] >>= 6; - r = data[2] << 2; - data[2] >>= 6; - if (data[2] & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - width -= 4; - } -} - -static void pxa2xx_draw_line24(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = data & 0xff; - data >>= 8; - g = data & 0xff; - data >>= 8; - r = data & 0xff; - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - width -= 1; - src += 4; - } -} - -static void pxa2xx_draw_line24t(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = (data & 0x7f) << 1; - data >>= 7; - g = data & 0xff; - data >>= 8; - r = data & 0xff; - data >>= 8; - if (data & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - width -= 1; - src += 4; - } -} - -static void pxa2xx_draw_line25(void *opaque, uint8_t *dest, const uint8_t *src, - int width, int deststep) -{ - uint32_t data; - unsigned int r, g, b; - while (width > 0) { - data = *(uint32_t *) src; -#ifdef SWAP_WORDS - data = bswap32(data); -#endif - b = data & 0xff; - data >>= 8; - g = data & 0xff; - data >>= 8; - r = data & 0xff; - data >>= 8; - if (data & 1) { - SKIP_PIXEL(dest); - } else { - COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); - } - width -= 1; - src += 4; - } -} - -/* Overlay planes disabled, no transparency */ -static drawfn pxa2xx_draw_fn_32[16] = { - [0 ... 0xf] = NULL, - [pxa_lcdc_2bpp] = pxa2xx_draw_line2, - [pxa_lcdc_4bpp] = pxa2xx_draw_line4, - [pxa_lcdc_8bpp] = pxa2xx_draw_line8, - [pxa_lcdc_16bpp] = pxa2xx_draw_line16, - [pxa_lcdc_18bpp] = pxa2xx_draw_line18, - [pxa_lcdc_18pbpp] = pxa2xx_draw_line18p, - [pxa_lcdc_24bpp] = pxa2xx_draw_line24, -}; - -/* Overlay planes enabled, transparency used */ -static drawfn pxa2xx_draw_fn_32t[16] = { - [0 ... 0xf] = NULL, - [pxa_lcdc_4bpp] = pxa2xx_draw_line4, - [pxa_lcdc_8bpp] = pxa2xx_draw_line8, - [pxa_lcdc_16bpp] = pxa2xx_draw_line16t, - [pxa_lcdc_19bpp] = pxa2xx_draw_line19, - [pxa_lcdc_19pbpp] = pxa2xx_draw_line19p, - [pxa_lcdc_24bpp] = pxa2xx_draw_line24t, - [pxa_lcdc_25bpp] = pxa2xx_draw_line25, -}; - -#undef COPY_PIXEL -#undef SKIP_PIXEL - -#ifdef SWAP_WORDS -# undef SWAP_WORDS -#endif - -/* Route internal interrupt lines to the global IC */ -static void pxa2xx_lcdc_int_update(PXA2xxLCDState *s) -{ - int level = 0; - level |= (s->status[0] & LCSR0_LDD) && !(s->control[0] & LCCR0_LDM); - level |= (s->status[0] & LCSR0_SOF0) && !(s->control[0] & LCCR0_SOFM0); - level |= (s->status[0] & LCSR0_IU0) && !(s->control[0] & LCCR0_IUM); - level |= (s->status[0] & LCSR0_IU1) && !(s->control[5] & LCCR5_IUM(1)); - level |= (s->status[0] & LCSR0_OU) && !(s->control[0] & LCCR0_OUM); - level |= (s->status[0] & LCSR0_QD) && !(s->control[0] & LCCR0_QDM); - level |= (s->status[0] & LCSR0_EOF0) && !(s->control[0] & LCCR0_EOFM0); - level |= (s->status[0] & LCSR0_BS0) && !(s->control[0] & LCCR0_BSM0); - level |= (s->status[0] & LCSR0_RDST) && !(s->control[0] & LCCR0_RDSTM); - level |= (s->status[0] & LCSR0_CMDINT) && !(s->control[0] & LCCR0_CMDIM); - level |= (s->status[1] & ~s->control[5]); - - qemu_set_irq(s->irq, !!level); - s->irqlevel = level; -} - -/* Set Branch Status interrupt high and poke associated registers */ -static inline void pxa2xx_dma_bs_set(PXA2xxLCDState *s, int ch) -{ - int unmasked; - if (ch == 0) { - s->status[0] |= LCSR0_BS0; - unmasked = !(s->control[0] & LCCR0_BSM0); - } else { - s->status[1] |= LCSR1_BS(ch); - unmasked = !(s->control[5] & LCCR5_BSM(ch)); - } - - if (unmasked) { - if (s->irqlevel) - s->status[0] |= LCSR0_SINT; - else - s->liidr = s->dma_ch[ch].id; - } -} - -/* Set Start Of Frame Status interrupt high and poke associated registers */ -static inline void pxa2xx_dma_sof_set(PXA2xxLCDState *s, int ch) -{ - int unmasked; - if (!(s->dma_ch[ch].command & LDCMD_SOFINT)) - return; - - if (ch == 0) { - s->status[0] |= LCSR0_SOF0; - unmasked = !(s->control[0] & LCCR0_SOFM0); - } else { - s->status[1] |= LCSR1_SOF(ch); - unmasked = !(s->control[5] & LCCR5_SOFM(ch)); - } - - if (unmasked) { - if (s->irqlevel) - s->status[0] |= LCSR0_SINT; - else - s->liidr = s->dma_ch[ch].id; - } -} - -/* Set End Of Frame Status interrupt high and poke associated registers */ -static inline void pxa2xx_dma_eof_set(PXA2xxLCDState *s, int ch) -{ - int unmasked; - if (!(s->dma_ch[ch].command & LDCMD_EOFINT)) - return; - - if (ch == 0) { - s->status[0] |= LCSR0_EOF0; - unmasked = !(s->control[0] & LCCR0_EOFM0); - } else { - s->status[1] |= LCSR1_EOF(ch); - unmasked = !(s->control[5] & LCCR5_EOFM(ch)); - } - - if (unmasked) { - if (s->irqlevel) - s->status[0] |= LCSR0_SINT; - else - s->liidr = s->dma_ch[ch].id; - } -} - -/* Set Bus Error Status interrupt high and poke associated registers */ -static inline void pxa2xx_dma_ber_set(PXA2xxLCDState *s, int ch) -{ - s->status[0] |= LCSR0_BERCH(ch) | LCSR0_BER; - if (s->irqlevel) - s->status[0] |= LCSR0_SINT; - else - s->liidr = s->dma_ch[ch].id; -} - -/* Load new Frame Descriptors from DMA */ -static void pxa2xx_descriptor_load(PXA2xxLCDState *s) -{ - PXAFrameDescriptor desc; - hwaddr descptr; - int i; - - for (i = 0; i < PXA_LCDDMA_CHANS; i ++) { - s->dma_ch[i].source = 0; - - if (!s->dma_ch[i].up) - continue; - - if (s->dma_ch[i].branch & FBR_BRA) { - descptr = s->dma_ch[i].branch & FBR_SRCADDR; - if (s->dma_ch[i].branch & FBR_BINT) - pxa2xx_dma_bs_set(s, i); - s->dma_ch[i].branch &= ~FBR_BRA; - } else - descptr = s->dma_ch[i].descriptor; - - if (!((descptr >= PXA2XX_SDRAM_BASE && descptr + - sizeof(desc) <= PXA2XX_SDRAM_BASE + current_machine->ram_size) || - (descptr >= PXA2XX_INTERNAL_BASE && descptr + sizeof(desc) <= - PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { - continue; - } - - cpu_physical_memory_read(descptr, &desc, sizeof(desc)); - s->dma_ch[i].descriptor = le32_to_cpu(desc.fdaddr); - s->dma_ch[i].source = le32_to_cpu(desc.fsaddr); - s->dma_ch[i].id = le32_to_cpu(desc.fidr); - s->dma_ch[i].command = le32_to_cpu(desc.ldcmd); - } -} - -static uint64_t pxa2xx_lcdc_read(void *opaque, hwaddr offset, - unsigned size) -{ - PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; - int ch; - - switch (offset) { - case LCCR0: - return s->control[0]; - case LCCR1: - return s->control[1]; - case LCCR2: - return s->control[2]; - case LCCR3: - return s->control[3]; - case LCCR4: - return s->control[4]; - case LCCR5: - return s->control[5]; - - case OVL1C1: - return s->ovl1c[0]; - case OVL1C2: - return s->ovl1c[1]; - case OVL2C1: - return s->ovl2c[0]; - case OVL2C2: - return s->ovl2c[1]; - - case CCR: - return s->ccr; - - case CMDCR: - return s->cmdcr; - - case TRGBR: - return s->trgbr; - case TCR: - return s->tcr; - - case 0x200 ... 0x1000: /* DMA per-channel registers */ - ch = (offset - 0x200) >> 4; - if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) - goto fail; - - switch (offset & 0xf) { - case DMA_FDADR: - return s->dma_ch[ch].descriptor; - case DMA_FSADR: - return s->dma_ch[ch].source; - case DMA_FIDR: - return s->dma_ch[ch].id; - case DMA_LDCMD: - return s->dma_ch[ch].command; - default: - goto fail; - } - - case FBR0: - return s->dma_ch[0].branch; - case FBR1: - return s->dma_ch[1].branch; - case FBR2: - return s->dma_ch[2].branch; - case FBR3: - return s->dma_ch[3].branch; - case FBR4: - return s->dma_ch[4].branch; - case FBR5: - return s->dma_ch[5].branch; - case FBR6: - return s->dma_ch[6].branch; - - case BSCNTR: - return s->bscntr; - - case PRSR: - return 0; - - case LCSR0: - return s->status[0]; - case LCSR1: - return s->status[1]; - case LIIDR: - return s->liidr; - - default: - fail: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - } - - return 0; -} - -static void pxa2xx_lcdc_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; - int ch; - - switch (offset) { - case LCCR0: - /* ACK Quick Disable done */ - if ((s->control[0] & LCCR0_ENB) && !(value & LCCR0_ENB)) - s->status[0] |= LCSR0_QD; - - if (!(s->control[0] & LCCR0_LCDT) && (value & LCCR0_LCDT)) { - qemu_log_mask(LOG_UNIMP, - "%s: internal frame buffer unsupported\n", __func__); - } - if ((s->control[3] & LCCR3_API) && - (value & LCCR0_ENB) && !(value & LCCR0_LCDT)) - s->status[0] |= LCSR0_ABC; - - s->control[0] = value & 0x07ffffff; - pxa2xx_lcdc_int_update(s); - - s->dma_ch[0].up = !!(value & LCCR0_ENB); - s->dma_ch[1].up = (s->ovl1c[0] & OVLC1_EN) || (value & LCCR0_SDS); - break; - - case LCCR1: - s->control[1] = value; - break; - - case LCCR2: - s->control[2] = value; - break; - - case LCCR3: - s->control[3] = value & 0xefffffff; - s->bpp = LCCR3_BPP(value); - break; - - case LCCR4: - s->control[4] = value & 0x83ff81ff; - break; - - case LCCR5: - s->control[5] = value & 0x3f3f3f3f; - break; - - case OVL1C1: - if (!(s->ovl1c[0] & OVLC1_EN) && (value & OVLC1_EN)) { - qemu_log_mask(LOG_UNIMP, "%s: Overlay 1 not supported\n", __func__); - } - s->ovl1c[0] = value & 0x80ffffff; - s->dma_ch[1].up = (value & OVLC1_EN) || (s->control[0] & LCCR0_SDS); - break; - - case OVL1C2: - s->ovl1c[1] = value & 0x000fffff; - break; - - case OVL2C1: - if (!(s->ovl2c[0] & OVLC1_EN) && (value & OVLC1_EN)) { - qemu_log_mask(LOG_UNIMP, "%s: Overlay 2 not supported\n", __func__); - } - s->ovl2c[0] = value & 0x80ffffff; - s->dma_ch[2].up = !!(value & OVLC1_EN); - s->dma_ch[3].up = !!(value & OVLC1_EN); - s->dma_ch[4].up = !!(value & OVLC1_EN); - break; - - case OVL2C2: - s->ovl2c[1] = value & 0x007fffff; - break; - - case CCR: - if (!(s->ccr & CCR_CEN) && (value & CCR_CEN)) { - qemu_log_mask(LOG_UNIMP, - "%s: Hardware cursor unimplemented\n", __func__); - } - s->ccr = value & 0x81ffffe7; - s->dma_ch[5].up = !!(value & CCR_CEN); - break; - - case CMDCR: - s->cmdcr = value & 0xff; - break; - - case TRGBR: - s->trgbr = value & 0x00ffffff; - break; - - case TCR: - s->tcr = value & 0x7fff; - break; - - case 0x200 ... 0x1000: /* DMA per-channel registers */ - ch = (offset - 0x200) >> 4; - if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) - goto fail; - - switch (offset & 0xf) { - case DMA_FDADR: - s->dma_ch[ch].descriptor = value & 0xfffffff0; - break; - - default: - goto fail; - } - break; - - case FBR0: - s->dma_ch[0].branch = value & 0xfffffff3; - break; - case FBR1: - s->dma_ch[1].branch = value & 0xfffffff3; - break; - case FBR2: - s->dma_ch[2].branch = value & 0xfffffff3; - break; - case FBR3: - s->dma_ch[3].branch = value & 0xfffffff3; - break; - case FBR4: - s->dma_ch[4].branch = value & 0xfffffff3; - break; - case FBR5: - s->dma_ch[5].branch = value & 0xfffffff3; - break; - case FBR6: - s->dma_ch[6].branch = value & 0xfffffff3; - break; - - case BSCNTR: - s->bscntr = value & 0xf; - break; - - case PRSR: - break; - - case LCSR0: - s->status[0] &= ~(value & 0xfff); - if (value & LCSR0_BER) - s->status[0] &= ~LCSR0_BERCH(7); - break; - - case LCSR1: - s->status[1] &= ~(value & 0x3e3f3f); - break; - - default: - fail: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - } -} - -static const MemoryRegionOps pxa2xx_lcdc_ops = { - .read = pxa2xx_lcdc_read, - .write = pxa2xx_lcdc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -/* Load new palette for a given DMA channel, convert to internal format */ -static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int i, n, format, r, g, b, alpha; - uint32_t *dest; - uint8_t *src; - s->pal_for = LCCR4_PALFOR(s->control[4]); - format = s->pal_for; - - switch (bpp) { - case pxa_lcdc_2bpp: - n = 4; - break; - case pxa_lcdc_4bpp: - n = 16; - break; - case pxa_lcdc_8bpp: - n = 256; - break; - default: - return; - } - - src = (uint8_t *) s->dma_ch[ch].pbuffer; - dest = (uint32_t *) s->dma_ch[ch].palette; - alpha = r = g = b = 0; - - for (i = 0; i < n; i ++) { - switch (format) { - case 0: /* 16 bpp, no transparency */ - alpha = 0; - if (s->control[0] & LCCR0_CMS) { - r = g = b = *(uint16_t *) src & 0xff; - } - else { - r = (*(uint16_t *) src & 0xf800) >> 8; - g = (*(uint16_t *) src & 0x07e0) >> 3; - b = (*(uint16_t *) src & 0x001f) << 3; - } - src += 2; - break; - case 1: /* 16 bpp plus transparency */ - alpha = *(uint32_t *) src & (1 << 24); - if (s->control[0] & LCCR0_CMS) - r = g = b = *(uint32_t *) src & 0xff; - else { - r = (*(uint32_t *) src & 0xf80000) >> 16; - g = (*(uint32_t *) src & 0x00fc00) >> 8; - b = (*(uint32_t *) src & 0x0000f8); - } - src += 4; - break; - case 2: /* 18 bpp plus transparency */ - alpha = *(uint32_t *) src & (1 << 24); - if (s->control[0] & LCCR0_CMS) - r = g = b = *(uint32_t *) src & 0xff; - else { - r = (*(uint32_t *) src & 0xfc0000) >> 16; - g = (*(uint32_t *) src & 0x00fc00) >> 8; - b = (*(uint32_t *) src & 0x0000fc); - } - src += 4; - break; - case 3: /* 24 bpp plus transparency */ - alpha = *(uint32_t *) src & (1 << 24); - if (s->control[0] & LCCR0_CMS) - r = g = b = *(uint32_t *) src & 0xff; - else { - r = (*(uint32_t *) src & 0xff0000) >> 16; - g = (*(uint32_t *) src & 0x00ff00) >> 8; - b = (*(uint32_t *) src & 0x0000ff); - } - src += 4; - break; - } - switch (surface_bits_per_pixel(surface)) { - case 8: - *dest = rgb_to_pixel8(r, g, b) | alpha; - break; - case 15: - *dest = rgb_to_pixel15(r, g, b) | alpha; - break; - case 16: - *dest = rgb_to_pixel16(r, g, b) | alpha; - break; - case 24: - *dest = rgb_to_pixel24(r, g, b) | alpha; - break; - case 32: - *dest = rgb_to_pixel32(r, g, b) | alpha; - break; - } - dest ++; - } -} - -static inline drawfn pxa2xx_drawfn(PXA2xxLCDState *s) -{ - if (s->transp) { - return pxa2xx_draw_fn_32t[s->bpp]; - } else { - return pxa2xx_draw_fn_32[s->bpp]; - } -} - -static void pxa2xx_lcdc_dma0_redraw_rot0(PXA2xxLCDState *s, - hwaddr addr, int *miny, int *maxy) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int src_width, dest_width; - drawfn fn = pxa2xx_drawfn(s); - if (!fn) - return; - - src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ - if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) - src_width *= 3; - else if (s->bpp > pxa_lcdc_16bpp) - src_width *= 4; - else if (s->bpp > pxa_lcdc_8bpp) - src_width *= 2; - - dest_width = s->xres * DEST_PIXEL_WIDTH; - *miny = 0; - if (s->invalidated) { - framebuffer_update_memory_section(&s->fbsection, s->sysmem, - addr, s->yres, src_width); - } - framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, - src_width, dest_width, DEST_PIXEL_WIDTH, - s->invalidated, - fn, s->dma_ch[0].palette, miny, maxy); -} - -static void pxa2xx_lcdc_dma0_redraw_rot90(PXA2xxLCDState *s, - hwaddr addr, int *miny, int *maxy) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int src_width, dest_width; - drawfn fn = pxa2xx_drawfn(s); - if (!fn) - return; - - src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ - if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) - src_width *= 3; - else if (s->bpp > pxa_lcdc_16bpp) - src_width *= 4; - else if (s->bpp > pxa_lcdc_8bpp) - src_width *= 2; - - dest_width = s->yres * DEST_PIXEL_WIDTH; - *miny = 0; - if (s->invalidated) { - framebuffer_update_memory_section(&s->fbsection, s->sysmem, - addr, s->yres, src_width); - } - framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, - src_width, DEST_PIXEL_WIDTH, -dest_width, - s->invalidated, - fn, s->dma_ch[0].palette, - miny, maxy); -} - -static void pxa2xx_lcdc_dma0_redraw_rot180(PXA2xxLCDState *s, - hwaddr addr, int *miny, int *maxy) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int src_width, dest_width; - drawfn fn = pxa2xx_drawfn(s); - if (!fn) { - return; - } - - src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ - if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) { - src_width *= 3; - } else if (s->bpp > pxa_lcdc_16bpp) { - src_width *= 4; - } else if (s->bpp > pxa_lcdc_8bpp) { - src_width *= 2; - } - - dest_width = s->xres * DEST_PIXEL_WIDTH; - *miny = 0; - if (s->invalidated) { - framebuffer_update_memory_section(&s->fbsection, s->sysmem, - addr, s->yres, src_width); - } - framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, - src_width, -dest_width, -DEST_PIXEL_WIDTH, - s->invalidated, - fn, s->dma_ch[0].palette, miny, maxy); -} - -static void pxa2xx_lcdc_dma0_redraw_rot270(PXA2xxLCDState *s, - hwaddr addr, int *miny, int *maxy) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int src_width, dest_width; - drawfn fn = pxa2xx_drawfn(s); - if (!fn) { - return; - } - - src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ - if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) { - src_width *= 3; - } else if (s->bpp > pxa_lcdc_16bpp) { - src_width *= 4; - } else if (s->bpp > pxa_lcdc_8bpp) { - src_width *= 2; - } - - dest_width = s->yres * DEST_PIXEL_WIDTH; - *miny = 0; - if (s->invalidated) { - framebuffer_update_memory_section(&s->fbsection, s->sysmem, - addr, s->yres, src_width); - } - framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, - src_width, -DEST_PIXEL_WIDTH, dest_width, - s->invalidated, - fn, s->dma_ch[0].palette, - miny, maxy); -} - -static void pxa2xx_lcdc_resize(PXA2xxLCDState *s) -{ - int width, height; - if (!(s->control[0] & LCCR0_ENB)) - return; - - width = LCCR1_PPL(s->control[1]) + 1; - height = LCCR2_LPP(s->control[2]) + 1; - - if (width != s->xres || height != s->yres) { - if (s->orientation == 90 || s->orientation == 270) { - qemu_console_resize(s->con, height, width); - } else { - qemu_console_resize(s->con, width, height); - } - s->invalidated = 1; - s->xres = width; - s->yres = height; - } -} - -static void pxa2xx_update_display(void *opaque) -{ - PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; - hwaddr fbptr; - int miny, maxy; - int ch; - if (!(s->control[0] & LCCR0_ENB)) - return; - - pxa2xx_descriptor_load(s); - - pxa2xx_lcdc_resize(s); - miny = s->yres; - maxy = 0; - s->transp = s->dma_ch[2].up || s->dma_ch[3].up; - /* Note: With overlay planes the order depends on LCCR0 bit 25. */ - for (ch = 0; ch < PXA_LCDDMA_CHANS; ch ++) - if (s->dma_ch[ch].up) { - if (!s->dma_ch[ch].source) { - pxa2xx_dma_ber_set(s, ch); - continue; - } - fbptr = s->dma_ch[ch].source; - if (!((fbptr >= PXA2XX_SDRAM_BASE && - fbptr <= PXA2XX_SDRAM_BASE + current_machine->ram_size) || - (fbptr >= PXA2XX_INTERNAL_BASE && - fbptr <= PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { - pxa2xx_dma_ber_set(s, ch); - continue; - } - - if (s->dma_ch[ch].command & LDCMD_PAL) { - cpu_physical_memory_read(fbptr, s->dma_ch[ch].pbuffer, - MAX(LDCMD_LENGTH(s->dma_ch[ch].command), - sizeof(s->dma_ch[ch].pbuffer))); - pxa2xx_palette_parse(s, ch, s->bpp); - } else { - /* Do we need to reparse palette */ - if (LCCR4_PALFOR(s->control[4]) != s->pal_for) - pxa2xx_palette_parse(s, ch, s->bpp); - - /* ACK frame start */ - pxa2xx_dma_sof_set(s, ch); - - s->dma_ch[ch].redraw(s, fbptr, &miny, &maxy); - s->invalidated = 0; - - /* ACK frame completed */ - pxa2xx_dma_eof_set(s, ch); - } - } - - if (s->control[0] & LCCR0_DIS) { - /* ACK last frame completed */ - s->control[0] &= ~LCCR0_ENB; - s->status[0] |= LCSR0_LDD; - } - - if (miny >= 0) { - switch (s->orientation) { - case 0: - dpy_gfx_update(s->con, 0, miny, s->xres, maxy - miny + 1); - break; - case 90: - dpy_gfx_update(s->con, miny, 0, maxy - miny + 1, s->xres); - break; - case 180: - maxy = s->yres - maxy - 1; - miny = s->yres - miny - 1; - dpy_gfx_update(s->con, 0, maxy, s->xres, miny - maxy + 1); - break; - case 270: - maxy = s->yres - maxy - 1; - miny = s->yres - miny - 1; - dpy_gfx_update(s->con, maxy, 0, miny - maxy + 1, s->xres); - break; - } - } - pxa2xx_lcdc_int_update(s); - - qemu_irq_raise(s->vsync_cb); -} - -static void pxa2xx_invalidate_display(void *opaque) -{ - PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; - s->invalidated = 1; -} - -static void pxa2xx_lcdc_orientation(void *opaque, int angle) -{ - PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; - - switch (angle) { - case 0: - s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot0; - break; - case 90: - s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot90; - break; - case 180: - s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot180; - break; - case 270: - s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot270; - break; - } - - s->orientation = angle; - s->xres = s->yres = -1; - pxa2xx_lcdc_resize(s); -} - -static const VMStateDescription vmstate_dma_channel = { - .name = "dma_channel", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(branch, struct DMAChannel), - VMSTATE_UINT8(up, struct DMAChannel), - VMSTATE_BUFFER(pbuffer, struct DMAChannel), - VMSTATE_UINT32(descriptor, struct DMAChannel), - VMSTATE_UINT32(source, struct DMAChannel), - VMSTATE_UINT32(id, struct DMAChannel), - VMSTATE_UINT32(command, struct DMAChannel), - VMSTATE_END_OF_LIST() - } -}; - -static int pxa2xx_lcdc_post_load(void *opaque, int version_id) -{ - PXA2xxLCDState *s = opaque; - - s->bpp = LCCR3_BPP(s->control[3]); - s->xres = s->yres = s->pal_for = -1; - - return 0; -} - -static const VMStateDescription vmstate_pxa2xx_lcdc = { - .name = "pxa2xx_lcdc", - .version_id = 0, - .minimum_version_id = 0, - .post_load = pxa2xx_lcdc_post_load, - .fields = (const VMStateField[]) { - VMSTATE_INT32(irqlevel, PXA2xxLCDState), - VMSTATE_INT32(transp, PXA2xxLCDState), - VMSTATE_UINT32_ARRAY(control, PXA2xxLCDState, 6), - VMSTATE_UINT32_ARRAY(status, PXA2xxLCDState, 2), - VMSTATE_UINT32_ARRAY(ovl1c, PXA2xxLCDState, 2), - VMSTATE_UINT32_ARRAY(ovl2c, PXA2xxLCDState, 2), - VMSTATE_UINT32(ccr, PXA2xxLCDState), - VMSTATE_UINT32(cmdcr, PXA2xxLCDState), - VMSTATE_UINT32(trgbr, PXA2xxLCDState), - VMSTATE_UINT32(tcr, PXA2xxLCDState), - VMSTATE_UINT32(liidr, PXA2xxLCDState), - VMSTATE_UINT8(bscntr, PXA2xxLCDState), - VMSTATE_STRUCT_ARRAY(dma_ch, PXA2xxLCDState, 7, 0, - vmstate_dma_channel, struct DMAChannel), - VMSTATE_END_OF_LIST() - } -}; - -static const GraphicHwOps pxa2xx_ops = { - .invalidate = pxa2xx_invalidate_display, - .gfx_update = pxa2xx_update_display, -}; - -PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem, - hwaddr base, qemu_irq irq) -{ - PXA2xxLCDState *s; - - s = g_new0(PXA2xxLCDState, 1); - s->invalidated = 1; - s->irq = irq; - s->sysmem = sysmem; - - pxa2xx_lcdc_orientation(s, graphic_rotate); - - memory_region_init_io(&s->iomem, NULL, &pxa2xx_lcdc_ops, s, - "pxa2xx-lcd-controller", 0x00100000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - s->con = graphic_console_init(NULL, 0, &pxa2xx_ops, s); - - vmstate_register(NULL, 0, &vmstate_pxa2xx_lcdc, s); - - return s; -} - -void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler) -{ - s->vsync_cb = handler; -} diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c index 335d01edde7..c6a9ac1da10 100644 --- a/hw/display/qxl-render.c +++ b/hw/display/qxl-render.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "qxl.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" static void qxl_blit(PCIQXLDevice *qxl, QXLRect *rect) @@ -222,6 +222,7 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, uint32_t max_chunks = 32; size_t offset = 0; size_t bytes; + QXLPHYSICAL next_chunk_phys = 0; for (;;) { bytes = MIN(size - offset, chunk->data_size); @@ -230,7 +231,15 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, if (offset == size) { return; } - chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id, + next_chunk_phys = chunk->next_chunk; + /* fist time, only get the next chunk's data size */ + chunk = qxl_phys2virt(qxl, next_chunk_phys, group_id, + sizeof(QXLDataChunk)); + if (!chunk) { + return; + } + /* second time, check data size and get data */ + chunk = qxl_phys2virt(qxl, next_chunk_phys, group_id, sizeof(QXLDataChunk) + chunk->data_size); if (!chunk) { return; diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 7178dec85d9..18f482ca7f7 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -29,7 +29,8 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "hw/qdev-properties.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" +#include "migration/cpr.h" #include "migration/vmstate.h" #include "trace.h" @@ -50,7 +51,7 @@ #undef ALIGN #define ALIGN(a, b) (((a) + ((b) - 1)) & ~((b) - 1)) -#define PIXEL_SIZE 0.2936875 //1280x1024 is 14.8" x 11.9" +#define PIXEL_SIZE 0.2936875 /* 1280x1024 is 14.8" x 11.9" */ #define QXL_MODE(_x, _y, _b, _o) \ { .x_res = _x, \ @@ -333,6 +334,10 @@ static void init_qxl_rom(PCIQXLDevice *d) uint32_t fb; int i, n; + if (cpr_is_incoming()) { + goto skip_init; + } + memset(rom, 0, d->rom_size); rom->magic = cpu_to_le32(QXL_ROM_MAGIC); @@ -390,6 +395,7 @@ static void init_qxl_rom(PCIQXLDevice *d) sizeof(rom->client_monitors_config)); } +skip_init: d->shadow_rom = *rom; d->rom = rom; d->modes = modes; @@ -403,6 +409,9 @@ static void init_qxl_ram(PCIQXLDevice *d) buf = d->vga.vram_ptr; d->ram = (QXLRam *)(buf + le32_to_cpu(d->shadow_rom.ram_header_offset)); + if (cpr_is_incoming()) { + return; + } d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC); d->ram->int_pending = cpu_to_le32(0); d->ram->int_mask = cpu_to_le32(0); @@ -539,6 +548,10 @@ static void interface_set_compression_level(QXLInstance *sin, int level) trace_qxl_interface_set_compression_level(qxl->id, level); qxl->shadow_rom.compression_level = cpu_to_le32(level); + if (cpr_is_incoming()) { + assert(qxl->rom->compression_level == cpu_to_le32(level)); + return; + } qxl->rom->compression_level = cpu_to_le32(level); qxl_rom_set_dirty(qxl); } @@ -997,7 +1010,8 @@ static void interface_set_client_capabilities(QXLInstance *sin, } if (runstate_check(RUN_STATE_INMIGRATE) || - runstate_check(RUN_STATE_POSTMIGRATE)) { + runstate_check(RUN_STATE_POSTMIGRATE) || + cpr_is_incoming()) { return; } @@ -1200,6 +1214,10 @@ static void qxl_reset_state(PCIQXLDevice *d) { QXLRom *rom = d->rom; + if (cpr_is_incoming()) { + return; + } + qxl_check_state(d); d->shadow_rom.update_id = cpu_to_le32(0); *rom = d->shadow_rom; @@ -1301,8 +1319,8 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, }; uint64_t guest_start; uint64_t guest_end; - int pci_region; - pcibus_t pci_start; + int pci_region = -1; + pcibus_t pci_start = PCI_BAR_UNMAPPED; pcibus_t pci_end; MemoryRegion *mr; intptr_t virt_start; @@ -1370,8 +1388,11 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, memslot.virt_start = virt_start + (guest_start - pci_start); memslot.virt_end = virt_start + (guest_end - pci_start); memslot.addr_delta = memslot.virt_start - delta; - memslot.generation = d->rom->slot_generation = 0; - qxl_rom_set_dirty(d); + if (!cpr_is_incoming()) { + d->rom->slot_generation = 0; + qxl_rom_set_dirty(d); + } + memslot.generation = d->rom->slot_generation; qemu_spice_add_memslot(&d->ssd, &memslot, async); d->guest_slots[slot_id].mr = mr; @@ -2458,7 +2479,7 @@ static const VMStateDescription qxl_vmstate = { } }; -static Property qxl_properties[] = { +static const Property qxl_properties[] = { DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * MiB), DEFINE_PROP_UINT64("vram_size", PCIQXLDevice, vram32_size, 64 * MiB), DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, @@ -2475,10 +2496,9 @@ static Property qxl_properties[] = { DEFINE_PROP_UINT32("xres", PCIQXLDevice, xres, 0), DEFINE_PROP_UINT32("yres", PCIQXLDevice, yres, 0), DEFINE_PROP_BOOL("global-vmstate", PCIQXLDevice, vga.global_vmstate, false), - DEFINE_PROP_END_OF_LIST(), }; -static void qxl_pci_class_init(ObjectClass *klass, void *data) +static void qxl_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2486,7 +2506,7 @@ static void qxl_pci_class_init(ObjectClass *klass, void *data) k->vendor_id = REDHAT_PCI_VENDOR_ID; k->device_id = QXL_DEVICE_ID_STABLE; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); - dc->reset = qxl_reset_handler; + device_class_set_legacy_reset(dc, qxl_reset_handler); dc->vmsd = &qxl_vmstate; device_class_set_props(dc, qxl_properties); } @@ -2497,13 +2517,13 @@ static const TypeInfo qxl_pci_type_info = { .instance_size = sizeof(PCIQXLDevice), .abstract = true, .class_init = qxl_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void qxl_primary_class_init(ObjectClass *klass, void *data) +static void qxl_primary_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2523,7 +2543,7 @@ static const TypeInfo qxl_primary_info = { module_obj("qxl-vga"); module_kconfig(QXL); -static void qxl_secondary_class_init(ObjectClass *klass, void *data) +static void qxl_secondary_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c index 20eab34ff41..72b2071aed0 100644 --- a/hw/display/ramfb-standalone.c +++ b/hw/display/ramfb-standalone.c @@ -17,6 +17,7 @@ struct RAMFBStandaloneState { QemuConsole *con; RAMFBState *state; bool migrate; + bool use_legacy_x86_rom; }; static void display_update_wrapper(void *dev) @@ -39,7 +40,7 @@ static void ramfb_realizefn(DeviceState *dev, Error **errp) RAMFBStandaloneState *ramfb = RAMFB(dev); ramfb->con = graphic_console_init(dev, 0, &wrapper_ops, dev); - ramfb->state = ramfb_setup(errp); + ramfb->state = ramfb_setup(ramfb->use_legacy_x86_rom, errp); } static bool migrate_needed(void *opaque) @@ -60,12 +61,13 @@ static const VMStateDescription ramfb_dev_vmstate = { } }; -static Property ramfb_properties[] = { +static const Property ramfb_properties[] = { DEFINE_PROP_BOOL("x-migrate", RAMFBStandaloneState, migrate, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("use-legacy-x86-rom", RAMFBStandaloneState, + use_legacy_x86_rom, false), }; -static void ramfb_class_initfn(ObjectClass *klass, void *data) +static void ramfb_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -73,13 +75,12 @@ static void ramfb_class_initfn(ObjectClass *klass, void *data) dc->vmsd = &ramfb_dev_vmstate; dc->realize = ramfb_realizefn; dc->desc = "ram framebuffer standalone device"; - dc->user_creatable = true; device_class_set_props(dc, ramfb_properties); } static const TypeInfo ramfb_info = { .name = TYPE_RAMFB_DEVICE, - .parent = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, .instance_size = sizeof(RAMFBStandaloneState), .class_init = ramfb_class_initfn, }; diff --git a/hw/display/ramfb-stubs.c b/hw/display/ramfb-stubs.c index cf64733b10c..b83551357bb 100644 --- a/hw/display/ramfb-stubs.c +++ b/hw/display/ramfb-stubs.c @@ -8,7 +8,7 @@ void ramfb_display_update(QemuConsole *con, RAMFBState *s) { } -RAMFBState *ramfb_setup(Error **errp) +RAMFBState *ramfb_setup(bool romfile, Error **errp) { error_setg(errp, "ramfb support not available"); return NULL; diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c index 6086baf7a98..9a17d97d076 100644 --- a/hw/display/ramfb.c +++ b/hw/display/ramfb.c @@ -17,7 +17,7 @@ #include "hw/display/ramfb.h" #include "hw/display/bochs-vbe.h" /* for limits */ #include "ui/console.h" -#include "sysemu/reset.h" +#include "system/reset.h" struct QEMU_PACKED RAMFBCfg { uint64_t addr; @@ -135,7 +135,7 @@ const VMStateDescription ramfb_vmstate = { } }; -RAMFBState *ramfb_setup(Error **errp) +RAMFBState *ramfb_setup(bool romfile, Error **errp) { FWCfgState *fw_cfg = fw_cfg_find(); RAMFBState *s; @@ -147,7 +147,9 @@ RAMFBState *ramfb_setup(Error **errp) s = g_new0(RAMFBState, 1); - rom_add_vga("vgabios-ramfb.bin"); + if (romfile) { + rom_add_vga("vgabios-ramfb.bin"); + } fw_cfg_add_file_callback(fw_cfg, "etc/ramfb", NULL, ramfb_fw_cfg_write, s, &s->cfg, sizeof(s->cfg), false); diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c index 60c3f785498..d00d3e9fc54 100644 --- a/hw/display/sii9022.c +++ b/hw/display/sii9022.c @@ -167,7 +167,7 @@ static void sii9022_realize(DeviceState *dev, Error **errp) i2c_slave_create_simple(bus, TYPE_I2CDDC, 0x50); } -static void sii9022_class_init(ObjectClass *klass, void *data) +static void sii9022_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -175,7 +175,7 @@ static void sii9022_class_init(ObjectClass *klass, void *data) k->event = sii9022_event; k->recv = sii9022_rx; k->send = sii9022_tx; - dc->reset = sii9022_reset; + device_class_set_legacy_reset(dc, sii9022_reset); dc->realize = sii9022_realize; dc->vmsd = &vmstate_sii9022; } diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 26dc8170d89..bc091b3c9fb 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -26,10 +26,11 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" #include "hw/usb/hcd-ohci.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "ui/console.h" #include "hw/sysbus.h" #include "migration/vmstate.h" @@ -2054,11 +2055,10 @@ static void sm501_realize_sysbus(DeviceState *dev, Error **errp) /* TODO : chain irq to IRL */ } -static Property sm501_sysbus_properties[] = { +static const Property sm501_sysbus_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0), /* this a debug option, prefer PROP_UINT over PROP_BIT for simplicity */ DEFINE_PROP_UINT8("x-pixman", SM501SysBusState, state.use_pixman, DEFAULT_X_PIXMAN), - DEFINE_PROP_END_OF_LIST(), }; static void sm501_reset_sysbus(DeviceState *dev) @@ -2078,7 +2078,7 @@ static const VMStateDescription vmstate_sm501_sysbus = { } }; -static void sm501_sysbus_class_init(ObjectClass *klass, void *data) +static void sm501_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -2086,7 +2086,7 @@ static void sm501_sysbus_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->desc = "SM501 Multimedia Companion"; device_class_set_props(dc, sm501_sysbus_properties); - dc->reset = sm501_reset_sysbus; + device_class_set_legacy_reset(dc, sm501_reset_sysbus); dc->vmsd = &vmstate_sm501_sysbus; } @@ -2143,10 +2143,9 @@ static void sm501_realize_pci(PCIDevice *dev, Error **errp) &s->state.mmio_region); } -static Property sm501_pci_properties[] = { +static const Property sm501_pci_properties[] = { DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB), DEFINE_PROP_UINT8("x-pixman", SM501PCIState, state.use_pixman, DEFAULT_X_PIXMAN), - DEFINE_PROP_END_OF_LIST(), }; static void sm501_reset_pci(DeviceState *dev) @@ -2169,7 +2168,7 @@ static const VMStateDescription vmstate_sm501_pci = { } }; -static void sm501_pci_class_init(ObjectClass *klass, void *data) +static void sm501_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2181,7 +2180,7 @@ static void sm501_pci_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->desc = "SM501 Display Controller"; device_class_set_props(dc, sm501_pci_properties); - dc->reset = sm501_reset_pci; + device_class_set_legacy_reset(dc, sm501_reset_pci); dc->hotpluggable = false; dc->vmsd = &vmstate_sm501_pci; } @@ -2198,7 +2197,7 @@ static const TypeInfo sm501_pci_info = { .instance_size = sizeof(SM501PCIState), .class_init = sm501_pci_class_init, .instance_init = sm501_pci_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c index e292cff44ea..87781438cd5 100644 --- a/hw/display/ssd0303.c +++ b/hw/display/ssd0303.c @@ -311,7 +311,7 @@ static void ssd0303_realize(DeviceState *dev, Error **errp) qemu_console_resize(s->con, 96 * MAGNIFY, 16 * MAGNIFY); } -static void ssd0303_class_init(ObjectClass *klass, void *data) +static void ssd0303_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c index 96cf0dc662b..af5ff4fecdc 100644 --- a/hw/display/ssd0323.c +++ b/hw/display/ssd0323.c @@ -361,7 +361,7 @@ static void ssd0323_realize(SSIPeripheral *d, Error **errp) qdev_init_gpio_in(dev, ssd0323_cd, 1); } -static void ssd0323_class_init(ObjectClass *klass, void *data) +static void ssd0323_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c deleted file mode 100644 index c7beba453b0..00000000000 --- a/hw/display/tc6393xb.c +++ /dev/null @@ -1,568 +0,0 @@ -/* - * Toshiba TC6393XB I/O Controller. - * Found in Sharp Zaurus SL-6000 (tosa) or some - * Toshiba e-Series PDAs. - * - * Most features are currently unsupported!!! - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/host-utils.h" -#include "hw/irq.h" -#include "hw/display/tc6393xb.h" -#include "exec/memory.h" -#include "hw/block/flash.h" -#include "ui/console.h" -#include "ui/pixel_ops.h" -#include "sysemu/blockdev.h" - -#define IRQ_TC6393_NAND 0 -#define IRQ_TC6393_MMC 1 -#define IRQ_TC6393_OHCI 2 -#define IRQ_TC6393_SERIAL 3 -#define IRQ_TC6393_FB 4 - -#define TC6393XB_NR_IRQS 8 - -#define TC6393XB_GPIOS 16 - -#define SCR_REVID 0x08 /* b Revision ID */ -#define SCR_ISR 0x50 /* b Interrupt Status */ -#define SCR_IMR 0x52 /* b Interrupt Mask */ -#define SCR_IRR 0x54 /* b Interrupt Routing */ -#define SCR_GPER 0x60 /* w GP Enable */ -#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */ -#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */ -#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */ -#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */ -#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */ -#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */ -#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */ -#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */ -#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */ -#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */ -#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */ -#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */ -#define SCR_CCR 0x98 /* w Clock Control */ -#define SCR_PLL2CR 0x9a /* w PLL2 Control */ -#define SCR_PLL1CR 0x9c /* l PLL1 Control */ -#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */ -#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */ -#define SCR_FER 0xe0 /* b Function Enable */ -#define SCR_MCR 0xe4 /* w Mode Control */ -#define SCR_CONFIG 0xfc /* b Configuration Control */ -#define SCR_DEBUG 0xff /* b Debug */ - -#define NAND_CFG_COMMAND 0x04 /* w Command */ -#define NAND_CFG_BASE 0x10 /* l Control Base Address */ -#define NAND_CFG_INTP 0x3d /* b Interrupt Pin */ -#define NAND_CFG_INTE 0x48 /* b Int Enable */ -#define NAND_CFG_EC 0x4a /* b Event Control */ -#define NAND_CFG_ICC 0x4c /* b Internal Clock Control */ -#define NAND_CFG_ECCC 0x5b /* b ECC Control */ -#define NAND_CFG_NFTC 0x60 /* b NAND Flash Transaction Control */ -#define NAND_CFG_NFM 0x61 /* b NAND Flash Monitor */ -#define NAND_CFG_NFPSC 0x62 /* b NAND Flash Power Supply Control */ -#define NAND_CFG_NFDC 0x63 /* b NAND Flash Detect Control */ - -#define NAND_DATA 0x00 /* l Data */ -#define NAND_MODE 0x04 /* b Mode */ -#define NAND_STATUS 0x05 /* b Status */ -#define NAND_ISR 0x06 /* b Interrupt Status */ -#define NAND_IMR 0x07 /* b Interrupt Mask */ - -#define NAND_MODE_WP 0x80 -#define NAND_MODE_CE 0x10 -#define NAND_MODE_ALE 0x02 -#define NAND_MODE_CLE 0x01 -#define NAND_MODE_ECC_MASK 0x60 -#define NAND_MODE_ECC_EN 0x20 -#define NAND_MODE_ECC_READ 0x40 -#define NAND_MODE_ECC_RST 0x60 - -struct TC6393xbState { - MemoryRegion iomem; - qemu_irq irq; - qemu_irq *sub_irqs; - struct { - uint8_t ISR; - uint8_t IMR; - uint8_t IRR; - uint16_t GPER; - uint8_t GPI_SR[3]; - uint8_t GPI_IMR[3]; - uint8_t GPI_EDER[3]; - uint8_t GPI_LIR[3]; - uint8_t GP_IARCR[3]; - uint8_t GP_IARLCR[3]; - uint8_t GPI_BCR[3]; - uint16_t GPA_IARCR; - uint16_t GPA_IARLCR; - uint16_t CCR; - uint16_t PLL2CR; - uint32_t PLL1CR; - uint8_t DIARCR; - uint8_t DBOCR; - uint8_t FER; - uint16_t MCR; - uint8_t CONFIG; - uint8_t DEBUG; - } scr; - uint32_t gpio_dir; - uint32_t gpio_level; - uint32_t prev_level; - qemu_irq handler[TC6393XB_GPIOS]; - qemu_irq *gpio_in; - - struct { - uint8_t mode; - uint8_t isr; - uint8_t imr; - } nand; - int nand_enable; - uint32_t nand_phys; - DeviceState *flash; - ECCState ecc; - - QemuConsole *con; - MemoryRegion vram; - uint16_t *vram_ptr; - uint32_t scr_width, scr_height; /* in pixels */ - qemu_irq l3v; - unsigned blank : 1, - blanked : 1; -}; - -static void tc6393xb_gpio_set(void *opaque, int line, int level) -{ -// TC6393xbState *s = opaque; - - if (line > TC6393XB_GPIOS) { - printf("%s: No GPIO pin %i\n", __func__, line); - return; - } - - // FIXME: how does the chip reflect the GPIO input level change? -} - -static void tc6393xb_gpio_handler_update(TC6393xbState *s) -{ - uint32_t level, diff; - int bit; - - level = s->gpio_level & s->gpio_dir; - level &= MAKE_64BIT_MASK(0, TC6393XB_GPIOS); - - for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { - bit = ctz32(diff); - qemu_set_irq(s->handler[bit], (level >> bit) & 1); - } - - s->prev_level = level; -} - -qemu_irq tc6393xb_l3v_get(TC6393xbState *s) -{ - return s->l3v; -} - -static void tc6393xb_l3v(void *opaque, int line, int level) -{ - TC6393xbState *s = opaque; - s->blank = !level; - fprintf(stderr, "L3V: %d\n", level); -} - -static void tc6393xb_sub_irq(void *opaque, int line, int level) { - TC6393xbState *s = opaque; - uint8_t isr = s->scr.ISR; - if (level) - isr |= 1 << line; - else - isr &= ~(1 << line); - s->scr.ISR = isr; - qemu_set_irq(s->irq, isr & s->scr.IMR); -} - -#define SCR_REG_B(N) \ - case SCR_ ##N: return s->scr.N -#define SCR_REG_W(N) \ - case SCR_ ##N: return s->scr.N; \ - case SCR_ ##N + 1: return s->scr.N >> 8; -#define SCR_REG_L(N) \ - case SCR_ ##N: return s->scr.N; \ - case SCR_ ##N + 1: return s->scr.N >> 8; \ - case SCR_ ##N + 2: return s->scr.N >> 16; \ - case SCR_ ##N + 3: return s->scr.N >> 24; -#define SCR_REG_A(N) \ - case SCR_ ##N(0): return s->scr.N[0]; \ - case SCR_ ##N(1): return s->scr.N[1]; \ - case SCR_ ##N(2): return s->scr.N[2] - -static uint32_t tc6393xb_scr_readb(TC6393xbState *s, hwaddr addr) -{ - switch (addr) { - case SCR_REVID: - return 3; - case SCR_REVID+1: - return 0; - SCR_REG_B(ISR); - SCR_REG_B(IMR); - SCR_REG_B(IRR); - SCR_REG_W(GPER); - SCR_REG_A(GPI_SR); - SCR_REG_A(GPI_IMR); - SCR_REG_A(GPI_EDER); - SCR_REG_A(GPI_LIR); - case SCR_GPO_DSR(0): - case SCR_GPO_DSR(1): - case SCR_GPO_DSR(2): - return (s->gpio_level >> ((addr - SCR_GPO_DSR(0)) * 8)) & 0xff; - case SCR_GPO_DOECR(0): - case SCR_GPO_DOECR(1): - case SCR_GPO_DOECR(2): - return (s->gpio_dir >> ((addr - SCR_GPO_DOECR(0)) * 8)) & 0xff; - SCR_REG_A(GP_IARCR); - SCR_REG_A(GP_IARLCR); - SCR_REG_A(GPI_BCR); - SCR_REG_W(GPA_IARCR); - SCR_REG_W(GPA_IARLCR); - SCR_REG_W(CCR); - SCR_REG_W(PLL2CR); - SCR_REG_L(PLL1CR); - SCR_REG_B(DIARCR); - SCR_REG_B(DBOCR); - SCR_REG_B(FER); - SCR_REG_W(MCR); - SCR_REG_B(CONFIG); - SCR_REG_B(DEBUG); - } - fprintf(stderr, "tc6393xb_scr: unhandled read at %08x\n", (uint32_t) addr); - return 0; -} -#undef SCR_REG_B -#undef SCR_REG_W -#undef SCR_REG_L -#undef SCR_REG_A - -#define SCR_REG_B(N) \ - case SCR_ ##N: s->scr.N = value; return; -#define SCR_REG_W(N) \ - case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \ - case SCR_ ##N + 1: s->scr.N = (s->scr.N & 0xff) | (value << 8); return -#define SCR_REG_L(N) \ - case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \ - case SCR_ ##N + 1: s->scr.N = (s->scr.N & ~(0xff << 8)) | (value & (0xff << 8)); return; \ - case SCR_ ##N + 2: s->scr.N = (s->scr.N & ~(0xff << 16)) | (value & (0xff << 16)); return; \ - case SCR_ ##N + 3: s->scr.N = (s->scr.N & ~(0xff << 24)) | (value & (0xff << 24)); return; -#define SCR_REG_A(N) \ - case SCR_ ##N(0): s->scr.N[0] = value; return; \ - case SCR_ ##N(1): s->scr.N[1] = value; return; \ - case SCR_ ##N(2): s->scr.N[2] = value; return - -static void tc6393xb_scr_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) -{ - switch (addr) { - SCR_REG_B(ISR); - SCR_REG_B(IMR); - SCR_REG_B(IRR); - SCR_REG_W(GPER); - SCR_REG_A(GPI_SR); - SCR_REG_A(GPI_IMR); - SCR_REG_A(GPI_EDER); - SCR_REG_A(GPI_LIR); - case SCR_GPO_DSR(0): - case SCR_GPO_DSR(1): - case SCR_GPO_DSR(2): - s->gpio_level = (s->gpio_level & ~(0xff << ((addr - SCR_GPO_DSR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DSR(0))*8)); - tc6393xb_gpio_handler_update(s); - return; - case SCR_GPO_DOECR(0): - case SCR_GPO_DOECR(1): - case SCR_GPO_DOECR(2): - s->gpio_dir = (s->gpio_dir & ~(0xff << ((addr - SCR_GPO_DOECR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DOECR(0))*8)); - tc6393xb_gpio_handler_update(s); - return; - SCR_REG_A(GP_IARCR); - SCR_REG_A(GP_IARLCR); - SCR_REG_A(GPI_BCR); - SCR_REG_W(GPA_IARCR); - SCR_REG_W(GPA_IARLCR); - SCR_REG_W(CCR); - SCR_REG_W(PLL2CR); - SCR_REG_L(PLL1CR); - SCR_REG_B(DIARCR); - SCR_REG_B(DBOCR); - SCR_REG_B(FER); - SCR_REG_W(MCR); - SCR_REG_B(CONFIG); - SCR_REG_B(DEBUG); - } - fprintf(stderr, "tc6393xb_scr: unhandled write at %08x: %02x\n", - (uint32_t) addr, value & 0xff); -} -#undef SCR_REG_B -#undef SCR_REG_W -#undef SCR_REG_L -#undef SCR_REG_A - -static void tc6393xb_nand_irq(TC6393xbState *s) { - qemu_set_irq(s->sub_irqs[IRQ_TC6393_NAND], - (s->nand.imr & 0x80) && (s->nand.imr & s->nand.isr)); -} - -static uint32_t tc6393xb_nand_cfg_readb(TC6393xbState *s, hwaddr addr) { - switch (addr) { - case NAND_CFG_COMMAND: - return s->nand_enable ? 2 : 0; - case NAND_CFG_BASE: - case NAND_CFG_BASE + 1: - case NAND_CFG_BASE + 2: - case NAND_CFG_BASE + 3: - return s->nand_phys >> (addr - NAND_CFG_BASE); - } - fprintf(stderr, "tc6393xb_nand_cfg: unhandled read at %08x\n", (uint32_t) addr); - return 0; -} -static void tc6393xb_nand_cfg_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) { - switch (addr) { - case NAND_CFG_COMMAND: - s->nand_enable = (value & 0x2); - return; - case NAND_CFG_BASE: - case NAND_CFG_BASE + 1: - case NAND_CFG_BASE + 2: - case NAND_CFG_BASE + 3: - s->nand_phys &= ~(0xff << ((addr - NAND_CFG_BASE) * 8)); - s->nand_phys |= (value & 0xff) << ((addr - NAND_CFG_BASE) * 8); - return; - } - fprintf(stderr, "tc6393xb_nand_cfg: unhandled write at %08x: %02x\n", - (uint32_t) addr, value & 0xff); -} - -static uint32_t tc6393xb_nand_readb(TC6393xbState *s, hwaddr addr) { - switch (addr) { - case NAND_DATA + 0: - case NAND_DATA + 1: - case NAND_DATA + 2: - case NAND_DATA + 3: - return nand_getio(s->flash); - case NAND_MODE: - return s->nand.mode; - case NAND_STATUS: - return 0x14; - case NAND_ISR: - return s->nand.isr; - case NAND_IMR: - return s->nand.imr; - } - fprintf(stderr, "tc6393xb_nand: unhandled read at %08x\n", (uint32_t) addr); - return 0; -} -static void tc6393xb_nand_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) { -// fprintf(stderr, "tc6393xb_nand: write at %08x: %02x\n", -// (uint32_t) addr, value & 0xff); - switch (addr) { - case NAND_DATA + 0: - case NAND_DATA + 1: - case NAND_DATA + 2: - case NAND_DATA + 3: - nand_setio(s->flash, value); - s->nand.isr |= 1; - tc6393xb_nand_irq(s); - return; - case NAND_MODE: - s->nand.mode = value; - nand_setpins(s->flash, - value & NAND_MODE_CLE, - value & NAND_MODE_ALE, - !(value & NAND_MODE_CE), - value & NAND_MODE_WP, - 0); // FIXME: gnd - switch (value & NAND_MODE_ECC_MASK) { - case NAND_MODE_ECC_RST: - ecc_reset(&s->ecc); - break; - case NAND_MODE_ECC_READ: - // FIXME - break; - case NAND_MODE_ECC_EN: - ecc_reset(&s->ecc); - } - return; - case NAND_ISR: - s->nand.isr = value; - tc6393xb_nand_irq(s); - return; - case NAND_IMR: - s->nand.imr = value; - tc6393xb_nand_irq(s); - return; - } - fprintf(stderr, "tc6393xb_nand: unhandled write at %08x: %02x\n", - (uint32_t) addr, value & 0xff); -} - -static void tc6393xb_draw_graphic(TC6393xbState *s, int full_update) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int i; - uint16_t *data_buffer; - uint8_t *data_display; - - data_buffer = s->vram_ptr; - data_display = surface_data(surface); - for (i = 0; i < s->scr_height; i++) { - int j; - for (j = 0; j < s->scr_width; j++, data_display += 4, data_buffer++) { - uint16_t color = *data_buffer; - uint32_t dest_color = rgb_to_pixel32( - ((color & 0xf800) * 0x108) >> 11, - ((color & 0x7e0) * 0x41) >> 9, - ((color & 0x1f) * 0x21) >> 2 - ); - *(uint32_t *)data_display = dest_color; - } - } - dpy_gfx_update_full(s->con); -} - -static void tc6393xb_draw_blank(TC6393xbState *s, int full_update) -{ - DisplaySurface *surface = qemu_console_surface(s->con); - int i, w; - uint8_t *d; - - if (!full_update) - return; - - w = s->scr_width * surface_bytes_per_pixel(surface); - d = surface_data(surface); - for(i = 0; i < s->scr_height; i++) { - memset(d, 0, w); - d += surface_stride(surface); - } - - dpy_gfx_update_full(s->con); -} - -static void tc6393xb_update_display(void *opaque) -{ - TC6393xbState *s = opaque; - DisplaySurface *surface = qemu_console_surface(s->con); - int full_update; - - if (s->scr_width == 0 || s->scr_height == 0) - return; - - full_update = 0; - if (s->blanked != s->blank) { - s->blanked = s->blank; - full_update = 1; - } - if (s->scr_width != surface_width(surface) || - s->scr_height != surface_height(surface)) { - qemu_console_resize(s->con, s->scr_width, s->scr_height); - full_update = 1; - } - if (s->blanked) - tc6393xb_draw_blank(s, full_update); - else - tc6393xb_draw_graphic(s, full_update); -} - - -static uint64_t tc6393xb_readb(void *opaque, hwaddr addr, - unsigned size) -{ - TC6393xbState *s = opaque; - - switch (addr >> 8) { - case 0: - return tc6393xb_scr_readb(s, addr & 0xff); - case 1: - return tc6393xb_nand_cfg_readb(s, addr & 0xff); - }; - - if ((addr &~0xff) == s->nand_phys && s->nand_enable) { -// return tc6393xb_nand_readb(s, addr & 0xff); - uint8_t d = tc6393xb_nand_readb(s, addr & 0xff); -// fprintf(stderr, "tc6393xb_nand: read at %08x: %02hhx\n", (uint32_t) addr, d); - return d; - } - -// fprintf(stderr, "tc6393xb: unhandled read at %08x\n", (uint32_t) addr); - return 0; -} - -static void tc6393xb_writeb(void *opaque, hwaddr addr, - uint64_t value, unsigned size) { - TC6393xbState *s = opaque; - - switch (addr >> 8) { - case 0: - tc6393xb_scr_writeb(s, addr & 0xff, value); - return; - case 1: - tc6393xb_nand_cfg_writeb(s, addr & 0xff, value); - return; - }; - - if ((addr &~0xff) == s->nand_phys && s->nand_enable) - tc6393xb_nand_writeb(s, addr & 0xff, value); - else - fprintf(stderr, "tc6393xb: unhandled write at %08x: %02x\n", - (uint32_t) addr, (int)value & 0xff); -} - -static const GraphicHwOps tc6393xb_gfx_ops = { - .gfx_update = tc6393xb_update_display, -}; - -TC6393xbState *tc6393xb_init(MemoryRegion *sysmem, uint32_t base, qemu_irq irq) -{ - TC6393xbState *s; - DriveInfo *nand; - static const MemoryRegionOps tc6393xb_ops = { - .read = tc6393xb_readb, - .write = tc6393xb_writeb, - .endianness = DEVICE_NATIVE_ENDIAN, - .impl = { - .min_access_size = 1, - .max_access_size = 1, - }, - }; - - s = g_new0(TC6393xbState, 1); - s->irq = irq; - s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS); - - s->l3v = qemu_allocate_irq(tc6393xb_l3v, s, 0); - s->blanked = 1; - - s->sub_irqs = qemu_allocate_irqs(tc6393xb_sub_irq, s, TC6393XB_NR_IRQS); - - nand = drive_get(IF_MTD, 0, 0); - s->flash = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, - NAND_MFR_TOSHIBA, 0x76); - - memory_region_init_io(&s->iomem, NULL, &tc6393xb_ops, s, "tc6393xb", 0x10000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - memory_region_init_ram(&s->vram, NULL, "tc6393xb.vram", 0x100000, - &error_fatal); - s->vram_ptr = memory_region_get_ram_ptr(&s->vram); - memory_region_add_subregion(sysmem, base + 0x100000, &s->vram); - s->scr_width = 480; - s->scr_height = 640; - s->con = graphic_console_init(NULL, 0, &tc6393xb_gfx_ops, s); - - return s; -} diff --git a/hw/display/tcx.c b/hw/display/tcx.c index 99507e76388..4853c5e1424 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -729,7 +729,6 @@ static uint64_t tcx_dummy_readl(void *opaque, hwaddr addr, static void tcx_dummy_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - return; } static const MemoryRegionOps tcx_dummy_ops = { @@ -879,20 +878,19 @@ static void tcx_realizefn(DeviceState *dev, Error **errp) qemu_console_resize(s->con, s->width, s->height); } -static Property tcx_properties[] = { +static const Property tcx_properties[] = { DEFINE_PROP_UINT32("vram_size", TCXState, vram_size, -1), DEFINE_PROP_UINT16("width", TCXState, width, -1), DEFINE_PROP_UINT16("height", TCXState, height, -1), DEFINE_PROP_UINT16("depth", TCXState, depth, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void tcx_class_init(ObjectClass *klass, void *data) +static void tcx_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = tcx_realizefn; - dc->reset = tcx_reset; + device_class_set_legacy_reset(dc, tcx_reset); dc->vmsd = &vmstate_tcx; device_class_set_props(dc, tcx_properties); } diff --git a/hw/display/trace-events b/hw/display/trace-events index 781f8a33203..52786e6e184 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -53,6 +53,9 @@ virtio_gpu_cmd_ctx_submit(uint32_t ctx, uint32_t size) "ctx 0x%x, size %d" virtio_gpu_update_cursor(uint32_t scanout, uint32_t x, uint32_t y, const char *type, uint32_t res) "scanout %d, x %d, y %d, %s, res 0x%x" virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type 0x%x" virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64 +virtio_gpu_inc_inflight_fences(uint32_t inflight) "in-flight+ %u" +virtio_gpu_dec_inflight_fences(uint32_t inflight) "in-flight- %u" +virtio_gpu_cmd_suspended(uint32_t cmd) "cmd 0x%x" # qxl.c disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u" @@ -191,3 +194,33 @@ dm163_bits_ppi(unsigned dest_width) "dest_width : %u" dm163_leds(int led, uint32_t value) "led %d: 0x%x" dm163_channels(int channel, uint8_t value) "channel %d: 0x%x" dm163_refresh_rate(uint32_t rr) "refresh rate %d" + +# apple-gfx.m +apple_gfx_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 +apple_gfx_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 +apple_gfx_create_task(uint32_t vm_size, void *va) "vm_size=0x%x base_addr=%p" +apple_gfx_destroy_task(void *task, unsigned int num_mapped_regions) "task=%p, task->mapped_regions->len=%u" +apple_gfx_map_memory(void *task, uint32_t range_count, uint64_t virtual_offset, uint32_t read_only) "task=%p range_count=0x%x virtual_offset=0x%"PRIx64" read_only=%d" +apple_gfx_map_memory_range(uint32_t i, uint64_t phys_addr, uint64_t phys_len) "[%d] phys_addr=0x%"PRIx64" phys_len=0x%"PRIx64 +apple_gfx_remap(uint64_t retval, void *source_ptr, uint64_t target) "retval=%"PRId64" source=%p target=0x%"PRIx64 +apple_gfx_unmap_memory(void *task, uint64_t virtual_offset, uint64_t length) "task=%p virtual_offset=0x%"PRIx64" length=0x%"PRIx64 +apple_gfx_read_memory(uint64_t phys_address, uint64_t length, void *dst) "phys_addr=0x%"PRIx64" length=0x%"PRIx64" dest=%p" +apple_gfx_raise_irq(uint32_t vector) "vector=0x%x" +apple_gfx_new_frame(void) "" +apple_gfx_mode_change(uint64_t x, uint64_t y) "x=%"PRId64" y=%"PRId64 +apple_gfx_cursor_set(uint32_t bpp, uint64_t width, uint64_t height) "bpp=%d width=%"PRId64" height=0x%"PRId64 +apple_gfx_cursor_show(uint32_t show) "show=%d" +apple_gfx_cursor_move(void) "" +apple_gfx_common_init(const char *device_name, size_t mmio_size) "device: %s; MMIO size: %zu bytes" +apple_gfx_common_realize_modes_property(uint32_t num_modes) "using %u modes supplied by 'display-modes' device property" +apple_gfx_display_mode(uint32_t mode_idx, uint16_t width_px, uint16_t height_px) "mode %2"PRIu32": %4"PRIu16"x%4"PRIu16 + +# apple-gfx-mmio.m +apple_gfx_mmio_iosfc_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 +apple_gfx_mmio_iosfc_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 +apple_gfx_iosfc_map_memory(uint64_t phys, uint64_t len, uint32_t ro, void *va, void *e, void *f, void* va_result) "phys=0x%"PRIx64" len=0x%"PRIx64" ro=%d va=%p e=%p f=%p -> *va=%p" +apple_gfx_iosfc_map_memory_new_region(size_t i, void *region, uint64_t start, uint64_t end) "index=%zu, region=%p, 0x%"PRIx64"-0x%"PRIx64 +apple_gfx_iosfc_unmap_memory(void *a, void *b, void *c, void *d, void *e, void *f) "a=%p b=%p c=%p d=%p e=%p f=%p" +apple_gfx_iosfc_unmap_memory_region(void* mem, void *region) "unmapping @ %p from memory region %p" +apple_gfx_iosfc_raise_irq(uint32_t vector) "vector=0x%x" + diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c index c096ec93e52..3618913b3b7 100644 --- a/hw/display/vga-isa.c +++ b/hw/display/vga-isa.c @@ -88,17 +88,16 @@ static void vga_isa_realizefn(DeviceState *dev, Error **errp) rom_add_vga(VGABIOS_FILENAME); } -static Property vga_isa_properties[] = { +static const Property vga_isa_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", ISAVGAState, state.vram_size_mb, 8), - DEFINE_PROP_END_OF_LIST(), }; -static void vga_isa_class_initfn(ObjectClass *klass, void *data) +static void vga_isa_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = vga_isa_realizefn; - dc->reset = vga_isa_reset; + device_class_set_legacy_reset(dc, vga_isa_reset); dc->vmsd = &vmstate_vga_common; device_class_set_props(dc, vga_isa_properties); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); diff --git a/hw/display/vga-mmio.c b/hw/display/vga-mmio.c index cd2c46776dc..33263856b76 100644 --- a/hw/display/vga-mmio.c +++ b/hw/display/vga-mmio.c @@ -111,18 +111,17 @@ static void vga_mmio_realizefn(DeviceState *dev, Error **errp) s->vga.con = graphic_console_init(dev, 0, s->vga.hw_ops, &s->vga); } -static Property vga_mmio_properties[] = { +static const Property vga_mmio_properties[] = { DEFINE_PROP_UINT8("it_shift", VGAMmioState, it_shift, 0), DEFINE_PROP_UINT32("vgamem_mb", VGAMmioState, vga.vram_size_mb, 8), - DEFINE_PROP_END_OF_LIST(), }; -static void vga_mmio_class_initfn(ObjectClass *klass, void *data) +static void vga_mmio_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = vga_mmio_realizefn; - dc->reset = vga_mmio_reset; + device_class_set_legacy_reset(dc, vga_mmio_reset); dc->vmsd = &vmstate_vga_common; device_class_set_props(dc, vga_mmio_properties); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c index 2d8adce5da6..b81f7fd2d0f 100644 --- a/hw/display/vga-pci.c +++ b/hw/display/vga-pci.c @@ -330,7 +330,7 @@ static void pci_secondary_vga_reset(DeviceState *dev) vga_common_reset(&d->vga); } -static Property vga_pci_properties[] = { +static const Property vga_pci_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16), DEFINE_PROP_BIT("mmio", PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_MMIO, true), DEFINE_PROP_BIT("qemu-extended-regs", @@ -339,20 +339,18 @@ static Property vga_pci_properties[] = { PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true), DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info), DEFINE_PROP_BOOL("global-vmstate", PCIVGAState, vga.global_vmstate, false), - DEFINE_PROP_END_OF_LIST(), }; -static Property secondary_pci_properties[] = { +static const Property secondary_pci_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16), DEFINE_PROP_BIT("qemu-extended-regs", PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_QEXT, true), DEFINE_PROP_BIT("edid", PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true), DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info), - DEFINE_PROP_END_OF_LIST(), }; -static void vga_pci_class_init(ObjectClass *klass, void *data) +static void vga_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -371,14 +369,14 @@ static const TypeInfo vga_pci_type_info = { .instance_size = sizeof(PCIVGAState), .abstract = true, .class_init = vga_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { TYPE_ACPI_DEV_AML_IF }, { }, }, }; -static void vga_class_init(ObjectClass *klass, void *data) +static void vga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -394,7 +392,7 @@ static void vga_class_init(ObjectClass *klass, void *data) vga_get_big_endian_fb, vga_set_big_endian_fb); } -static void secondary_class_init(ObjectClass *klass, void *data) +static void secondary_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -403,7 +401,7 @@ static void secondary_class_init(ObjectClass *klass, void *data) k->exit = pci_secondary_vga_exit; k->class_id = PCI_CLASS_DISPLAY_OTHER; device_class_set_props(dc, secondary_pci_properties); - dc->reset = pci_secondary_vga_reset; + device_class_set_legacy_reset(dc, pci_secondary_vga_reset); } static const TypeInfo vga_info = { diff --git a/hw/display/vga.c b/hw/display/vga.c index 892fedc8dce..90b89cf4044 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -24,9 +24,9 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qapi/error.h" -#include "exec/tswap.h" +#include "qemu/target-info.h" #include "hw/display/vga.h" #include "hw/i386/x86.h" #include "hw/pci/pci.h" @@ -1873,7 +1873,6 @@ void vga_common_reset(VGACommonState *s) s->cursor_start = 0; s->cursor_end = 0; s->cursor_offset = 0; - s->big_endian_fb = s->default_endian_fb; memset(s->invalidated_y_table, '\0', sizeof(s->invalidated_y_table)); memset(s->last_palette, '\0', sizeof(s->last_palette)); memset(s->last_ch_attr, '\0', sizeof(s->last_ch_attr)); @@ -2265,7 +2264,8 @@ bool vga_common_init(VGACommonState *s, Object *obj, Error **errp) * into a device attribute set by the machine/platform to remove * all target endian dependencies from this file. */ - s->default_endian_fb = target_words_bigendian(); + s->default_endian_fb = target_big_endian(); + s->big_endian_fb = s->default_endian_fb; vga_dirty_log_start(s); diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h index f77c1c11457..747b5cc6cf8 100644 --- a/hw/display/vga_int.h +++ b/hw/display/vga_int.h @@ -26,8 +26,8 @@ #define HW_VGA_INT_H #include "ui/console.h" -#include "exec/ioport.h" -#include "exec/memory.h" +#include "system/ioport.h" +#include "system/memory.h" #include "hw/display/bochs-vbe.h" #include "hw/acpi/acpi_aml_interface.h" diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c index 63c64ddde64..9fc6bbcd2ce 100644 --- a/hw/display/vhost-user-gpu.c +++ b/hw/display/vhost-user-gpu.c @@ -18,6 +18,7 @@ #include "chardev/char-fe.h" #include "qapi/error.h" #include "migration/blocker.h" +#include "standard-headers/drm/drm_fourcc.h" typedef enum VhostUserGpuRequest { VHOST_USER_GPU_NONE = 0, @@ -249,7 +250,9 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) case VHOST_USER_GPU_DMABUF_SCANOUT: { VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout; int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr); - uint64_t modifier = 0; + uint32_t offset = 0; + uint32_t stride = m->fd_stride; + uint64_t modifier = DRM_FORMAT_MOD_INVALID; QemuDmaBuf *dmabuf; if (m->scanout_id >= g->parent_obj.conf.max_outputs) { @@ -282,10 +285,10 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) } dmabuf = qemu_dmabuf_new(m->width, m->height, - m->fd_stride, 0, 0, + &offset, &stride, 0, 0, m->fd_width, m->fd_height, m->fd_drm_fourcc, modifier, - fd, false, m->fd_flags & + &fd, 1, false, m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP); dpy_gl_scanout_dmabuf(con, dmabuf); @@ -390,7 +393,7 @@ vhost_user_gpu_chr_read(void *opaque) } msg->request = request; - msg->flags = size; + msg->flags = flags; msg->size = size; if (request == VHOST_USER_GPU_CURSOR_UPDATE || @@ -513,7 +516,7 @@ vhost_user_gpu_set_config(VirtIODevice *vdev, } } -static void +static int vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val) { VhostUserGPU *g = VHOST_USER_GPU(vdev); @@ -522,18 +525,24 @@ vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val) if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) { if (!vhost_user_gpu_do_set_socket(g, &err)) { error_report_err(err); - return; + return 0; } vhost_user_backend_start(g->vhost); } else { + int ret; + /* unblock any wait and stop processing */ if (g->vhost_gpu_fd != -1) { vhost_user_gpu_update_blocked(g, true); qemu_chr_fe_deinit(&g->vhost_chr, true); g->vhost_gpu_fd = -1; } - vhost_user_backend_stop(g->vhost); + ret = vhost_user_backend_stop(g->vhost); + if (ret < 0) { + return ret; + } } + return 0; } static bool @@ -631,6 +640,14 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) error_report("EDID requested but the backend doesn't support it."); g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED); } + if (virtio_has_feature(g->vhost->dev.features, + VIRTIO_GPU_F_RESOURCE_UUID)) { + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED; + } + if (virtio_has_feature(g->vhost->dev.features, + VIRTIO_GPU_F_RESOURCE_UUID)) { + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED; + } if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) { return; @@ -642,16 +659,15 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev) { VhostUserGPU *g = VHOST_USER_GPU(vdev); - return &g->vhost->dev; + return g->vhost ? &g->vhost->dev : NULL; } -static Property vhost_user_gpu_properties[] = { +static const Property vhost_user_gpu_properties[] = { VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf), - DEFINE_PROP_END_OF_LIST(), }; static void -vhost_user_gpu_class_init(ObjectClass *klass, void *data) +vhost_user_gpu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c index 4fc7ef8896c..7269477a1c8 100644 --- a/hw/display/virtio-gpu-base.c +++ b/hw/display/virtio-gpu-base.c @@ -19,6 +19,7 @@ #include "qemu/error-report.h" #include "hw/display/edid.h" #include "trace.h" +#include "qapi/qapi-types-virtio.h" void virtio_gpu_base_reset(VirtIOGPUBase *g) @@ -56,6 +57,8 @@ void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout, struct virtio_gpu_resp_edid *edid) { + size_t output_idx; + VirtIOGPUOutputList *node; qemu_edid_info info = { .width_mm = g->req_state[scanout].width_mm, .height_mm = g->req_state[scanout].height_mm, @@ -64,6 +67,14 @@ virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout, .refresh_rate = g->req_state[scanout].refresh_rate, }; + for (output_idx = 0, node = g->conf.outputs; + output_idx <= scanout && node; output_idx++, node = node->next) { + if (output_idx == scanout && node->value && node->value->name) { + info.name = node->value->name; + break; + } + } + edid->size = cpu_to_le32(sizeof(edid->edid)); qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); } @@ -110,7 +121,6 @@ static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) /* send event to guest */ virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); - return; } static void @@ -173,6 +183,8 @@ virtio_gpu_base_device_realize(DeviceState *qdev, VirtIOHandleOutput cursor_cb, Error **errp) { + size_t output_idx; + VirtIOGPUOutputList *node; VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); int i; @@ -182,6 +194,20 @@ virtio_gpu_base_device_realize(DeviceState *qdev, return false; } + for (output_idx = 0, node = g->conf.outputs; + node; output_idx++, node = node->next) { + if (output_idx == g->conf.max_outputs) { + error_setg(errp, "invalid outputs > %d", g->conf.max_outputs); + return false; + } + if (node->value && node->value->name && + strlen(node->value->name) > EDID_NAME_MAX_LENGTH) { + error_setg(errp, "invalid output name '%s' > %d", + node->value->name, EDID_NAME_MAX_LENGTH); + return false; + } + } + if (virtio_gpu_virgl_enabled(g->conf)) { error_setg(&g->migration_blocker, "virgl is not yet migratable"); if (migrate_add_blocker(&g->migration_blocker, errp) < 0) { @@ -235,6 +261,9 @@ virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features, if (virtio_gpu_context_init_enabled(g->conf)) { features |= (1 << VIRTIO_GPU_F_CONTEXT_INIT); } + if (virtio_gpu_resource_uuid_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_RESOURCE_UUID); + } return features; } @@ -260,7 +289,7 @@ virtio_gpu_base_device_unrealize(DeviceState *qdev) } static void -virtio_gpu_base_class_init(ObjectClass *klass, void *data) +virtio_gpu_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index 952820a4256..c06a078fb36 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -16,7 +16,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" #include "hw/virtio/virtio-gpu-bswap.h" @@ -29,9 +29,14 @@ static void virtio_gpu_gl_update_cursor_data(VirtIOGPU *g, struct virtio_gpu_scanout *s, uint32_t resource_id) { + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); uint32_t width, height; uint32_t pixels, *data; + if (gl->renderer_state != RS_INITED) { + return; + } + data = virgl_renderer_get_cursor_data(resource_id, &width, &height); if (!data) { return; @@ -65,13 +70,22 @@ static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) return; } - if (!gl->renderer_inited) { - virtio_gpu_virgl_init(g); - gl->renderer_inited = true; - } - if (gl->renderer_reset) { - gl->renderer_reset = false; + switch (gl->renderer_state) { + case RS_RESET: virtio_gpu_virgl_reset(g); + /* fallthrough */ + case RS_START: + if (virtio_gpu_virgl_init(g)) { + gl->renderer_state = RS_INIT_FAILED; + return; + } + + gl->renderer_state = RS_INITED; + break; + case RS_INIT_FAILED: + return; + case RS_INITED: + break; } cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); @@ -98,14 +112,15 @@ static void virtio_gpu_gl_reset(VirtIODevice *vdev) * GL functions must be called with the associated GL context in main * thread, and when the renderer is unblocked. */ - if (gl->renderer_inited && !gl->renderer_reset) { + if (gl->renderer_state == RS_INITED) { virtio_gpu_virgl_reset_scanout(g); - gl->renderer_reset = true; + gl->renderer_state = RS_RESET; } } static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) { + ERRP_GUARD(); VirtIOGPU *g = VIRTIO_GPU(qdev); #if HOST_BIG_ENDIAN @@ -119,24 +134,55 @@ static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) } if (!display_opengl) { - error_setg(errp, "opengl is not available"); + error_setg(errp, + "The display backend does not have OpenGL support enabled"); + error_append_hint(errp, + "It can be enabled with '-display BACKEND,gl=on' " + "where BACKEND is the name of the display backend " + "to use.\n"); return; } g->parent_obj.conf.flags |= (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); - VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = - virtio_gpu_virgl_get_num_capsets(g); + g->capset_ids = virtio_gpu_virgl_get_capsets(g); + VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = g->capset_ids->len; + +#if VIRGL_VERSION_MAJOR >= 1 + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED; +#endif virtio_gpu_device_realize(qdev, errp); } -static Property virtio_gpu_gl_properties[] = { +static const Property virtio_gpu_gl_properties[] = { DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags, VIRTIO_GPU_FLAG_STATS_ENABLED, false), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BIT("venus", VirtIOGPU, parent_obj.conf.flags, + VIRTIO_GPU_FLAG_VENUS_ENABLED, false), }; -static void virtio_gpu_gl_class_init(ObjectClass *klass, void *data) +static void virtio_gpu_gl_device_unrealize(DeviceState *qdev) +{ + VirtIOGPU *g = VIRTIO_GPU(qdev); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(qdev); + + if (gl->renderer_state >= RS_INITED) { +#if VIRGL_VERSION_MAJOR >= 1 + qemu_bh_delete(gl->cmdq_resume_bh); +#endif + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + timer_free(gl->print_stats); + } + timer_free(gl->fence_poll); + virgl_renderer_cleanup(NULL); + } + + gl->renderer_state = RS_START; + + g_array_unref(g->capset_ids); +} + +static void virtio_gpu_gl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -149,6 +195,7 @@ static void virtio_gpu_gl_class_init(ObjectClass *klass, void *data) vgc->update_cursor_data = virtio_gpu_gl_update_cursor_data; vdc->realize = virtio_gpu_gl_device_realize; + vdc->unrealize = virtio_gpu_gl_device_unrealize; vdc->reset = virtio_gpu_gl_reset; device_class_set_props(dc, virtio_gpu_gl_properties); } diff --git a/hw/display/virtio-gpu-pci-rutabaga.c b/hw/display/virtio-gpu-pci-rutabaga.c index abbb898c65d..5fdff37f2c1 100644 --- a/hw/display/virtio-gpu-pci-rutabaga.c +++ b/hw/display/virtio-gpu-pci-rutabaga.c @@ -34,7 +34,7 @@ static const TypeInfo virtio_gpu_rutabaga_pci_info[] = { .parent = TYPE_VIRTIO_GPU_PCI_BASE, .instance_size = sizeof(VirtIOGPURutabagaPCI), .instance_init = virtio_gpu_rutabaga_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, } diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c index da6a99f0380..c0d71b6254c 100644 --- a/hw/display/virtio-gpu-pci.c +++ b/hw/display/virtio-gpu-pci.c @@ -21,9 +21,8 @@ #include "hw/virtio/virtio-gpu-pci.h" #include "qom/object.h" -static Property virtio_gpu_pci_base_properties[] = { +static const Property virtio_gpu_pci_base_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -58,7 +57,7 @@ static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data) +static void virtio_gpu_pci_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/display/virtio-gpu-rutabaga.c b/hw/display/virtio-gpu-rutabaga.c index 17bf701a216..ed5ae52acbe 100644 --- a/hw/display/virtio-gpu-rutabaga.c +++ b/hw/display/virtio-gpu-rutabaga.c @@ -1096,7 +1096,7 @@ static void virtio_gpu_rutabaga_realize(DeviceState *qdev, Error **errp) virtio_gpu_device_realize(qdev, errp); } -static Property virtio_gpu_rutabaga_properties[] = { +static const Property virtio_gpu_rutabaga_properties[] = { DEFINE_PROP_BIT64("gfxstream-vulkan", VirtIOGPURutabaga, capset_mask, RUTABAGA_CAPSET_GFXSTREAM_VULKAN, false), DEFINE_PROP_BIT64("cross-domain", VirtIOGPURutabaga, capset_mask, @@ -1108,10 +1108,9 @@ static Property virtio_gpu_rutabaga_properties[] = { DEFINE_PROP_STRING("wayland-socket-path", VirtIOGPURutabaga, wayland_socket_path), DEFINE_PROP_STRING("wsi", VirtIOGPURutabaga, wsi), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data) +static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c index c02ec6d37dc..d804f321aa3 100644 --- a/hw/display/virtio-gpu-udmabuf.c +++ b/hw/display/virtio-gpu-udmabuf.c @@ -19,12 +19,13 @@ #include "hw/virtio/virtio-gpu.h" #include "hw/virtio/virtio-gpu-pixman.h" #include "trace.h" -#include "exec/ramblock.h" -#include "sysemu/hostmem.h" +#include "system/ramblock.h" +#include "system/hostmem.h" #include #include #include "qemu/memfd.h" #include "standard-headers/linux/udmabuf.h" +#include "standard-headers/drm/drm_fourcc.h" static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res) { @@ -176,16 +177,19 @@ static VGPUDMABuf struct virtio_gpu_rect *r) { VGPUDMABuf *dmabuf; + uint32_t offset = 0; if (res->dmabuf_fd < 0) { return NULL; } dmabuf = g_new0(VGPUDMABuf, 1); - dmabuf->buf = qemu_dmabuf_new(r->width, r->height, fb->stride, + dmabuf->buf = qemu_dmabuf_new(r->width, r->height, + &offset, &fb->stride, r->x, r->y, fb->width, fb->height, qemu_pixman_to_drm_format(fb->format), - 0, res->dmabuf_fd, true, false); + DRM_FORMAT_MOD_INVALID, &res->dmabuf_fd, + 1, true, false); dmabuf->scanout_id = scanout_id; QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next); diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 9f34d0e6619..94ddc01f91c 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -17,11 +17,31 @@ #include "trace.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" #include "ui/egl-helpers.h" #include +struct virtio_gpu_virgl_resource { + struct virtio_gpu_simple_resource base; + MemoryRegion *mr; +}; + +static struct virtio_gpu_virgl_resource * +virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id) +{ + struct virtio_gpu_simple_resource *res; + + res = virtio_gpu_find_resource(g, resource_id); + if (!res) { + return NULL; + } + + return container_of(res, struct virtio_gpu_virgl_resource, base); +} + #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4 static void * virgl_get_egl_display(G_GNUC_UNUSED void *cookie) @@ -30,16 +50,179 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie) } #endif +#if VIRGL_VERSION_MAJOR >= 1 +struct virtio_gpu_virgl_hostmem_region { + MemoryRegion mr; + struct VirtIOGPU *g; + bool finish_unmapping; +}; + +static struct virtio_gpu_virgl_hostmem_region * +to_hostmem_region(MemoryRegion *mr) +{ + return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr); +} + +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_virgl_hostmem_region_free(void *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b; + VirtIOGPUGL *gl; + + vmr = to_hostmem_region(mr); + vmr->finish_unmapping = true; + + b = VIRTIO_GPU_BASE(vmr->g); + b->renderer_blocked--; + + /* + * memory_region_unref() is executed from RCU thread context, while + * virglrenderer works only on the main-loop thread that's holding GL + * context. + */ + gl = VIRTIO_GPU_GL(vmr->g); + qemu_bh_schedule(gl->cmdq_resume_bh); +} + +static int +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + uint64_t offset) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr; + uint64_t size; + void *data; + int ret; + + if (!virtio_gpu_hostmem_enabled(b->conf)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__); + return -EOPNOTSUPP; + } + + ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + + vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1); + vmr->g = g; + + mr = &vmr->mr; + memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data); + memory_region_add_subregion(&b->hostmem, offset, mr); + memory_region_set_enabled(mr, true); + + /* + * MR could outlive the resource if MR's reference is held outside of + * virtio-gpu. In order to prevent unmapping resource while MR is alive, + * and thus, making the data pointer invalid, we will block virtio-gpu + * command processing until MR is fully unreferenced and freed. + */ + OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free; + + res->mr = mr; + + return 0; +} + +static int +virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + bool *cmd_suspended) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr = res->mr; + int ret; + + if (!mr) { + return 0; + } + + vmr = to_hostmem_region(res->mr); + + /* + * Perform async unmapping in 3 steps: + * + * 1. Begin async unmapping with memory_region_del_subregion() + * and suspend/block cmd processing. + * 2. Wait for res->mr to be freed and cmd processing resumed + * asynchronously by virtio_gpu_virgl_hostmem_region_free(). + * 3. Finish the unmapping with final virgl_renderer_resource_unmap(). + */ + if (vmr->finish_unmapping) { + res->mr = NULL; + g_free(vmr); + + ret = virgl_renderer_resource_unmap(res->base.resource_id); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to unmap virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + } else { + *cmd_suspended = true; + + /* render will be unblocked once MR is freed */ + b->renderer_blocked++; + + /* memory region owns self res->mr object and frees it by itself */ + memory_region_set_enabled(mr, false); + memory_region_del_subregion(&b->hostmem, mr); + object_unparent(OBJECT(mr)); + } + + return 0; +} +#endif + static void virgl_cmd_create_resource_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_resource_create_2d c2d; struct virgl_renderer_resource_create_args args; + struct virtio_gpu_virgl_resource *res; VIRTIO_GPU_FILL_CMD(c2d); trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, c2d.width, c2d.height); + if (c2d.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_virgl_find_resource(g, c2d.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, c2d.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_virgl_resource, 1); + res->base.width = c2d.width; + res->base.height = c2d.height; + res->base.format = c2d.format; + res->base.resource_id = c2d.resource_id; + res->base.dmabuf_fd = -1; + QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); + args.handle = c2d.resource_id; args.target = 2; args.format = c2d.format; @@ -59,11 +242,35 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, { struct virtio_gpu_resource_create_3d c3d; struct virgl_renderer_resource_create_args args; + struct virtio_gpu_virgl_resource *res; VIRTIO_GPU_FILL_CMD(c3d); trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, c3d.width, c3d.height, c3d.depth); + if (c3d.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_virgl_find_resource(g, c3d.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, c3d.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_virgl_resource, 1); + res->base.width = c3d.width; + res->base.height = c3d.height; + res->base.format = c3d.format; + res->base.resource_id = c3d.resource_id; + res->base.dmabuf_fd = -1; + QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); + args.handle = c3d.resource_id; args.target = c3d.target; args.format = c3d.format; @@ -79,15 +286,35 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, } static void virgl_cmd_resource_unref(VirtIOGPU *g, - struct virtio_gpu_ctrl_command *cmd) + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) { struct virtio_gpu_resource_unref unref; + struct virtio_gpu_virgl_resource *res; struct iovec *res_iovs = NULL; int num_iovs = 0; VIRTIO_GPU_FILL_CMD(unref); trace_virtio_gpu_cmd_res_unref(unref.resource_id); + res = virtio_gpu_virgl_find_resource(g, unref.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, unref.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + +#if VIRGL_VERSION_MAJOR >= 1 + if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + if (*cmd_suspended) { + return; + } +#endif + virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); @@ -95,6 +322,10 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); } virgl_renderer_resource_unref(unref.resource_id); + + QTAILQ_REMOVE(&g->reslist, &res->base, next); + + g_free(res); } static void virgl_cmd_context_create(VirtIOGPU *g, @@ -106,8 +337,24 @@ static void virgl_cmd_context_create(VirtIOGPU *g, trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, cc.debug_name); - virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, - cc.debug_name); + if (cc.context_init) { + if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + +#if VIRGL_VERSION_MAJOR >= 1 + virgl_renderer_context_create_with_flags(cc.hdr.ctx_id, + cc.context_init, + cc.nlen, + cc.debug_name); + return; +#endif + } + + virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name); } static void virgl_cmd_context_destroy(VirtIOGPU *g, @@ -171,7 +418,7 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, struct virgl_renderer_resource_info info; void *d3d_tex2d = NULL; -#ifdef HAVE_VIRGL_D3D_INFO_EXT +#if VIRGL_VERSION_MAJOR >= 1 struct virgl_renderer_resource_info_ext ext; memset(&ext, 0, sizeof(ext)); ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext); @@ -375,19 +622,13 @@ static void virgl_cmd_get_capset_info(VirtIOGPU *g, VIRTIO_GPU_FILL_CMD(info); memset(&resp, 0, sizeof(resp)); - if (info.capset_index == 0) { - resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; - virgl_renderer_get_cap_set(resp.capset_id, - &resp.capset_max_version, - &resp.capset_max_size); - } else if (info.capset_index == 1) { - resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; + + if (info.capset_index < g->capset_ids->len) { + resp.capset_id = g_array_index(g->capset_ids, uint32_t, + info.capset_index); virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); - } else { - resp.capset_max_version = 0; - resp.capset_max_size = 0; } resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); @@ -417,9 +658,221 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, g_free(resp); } +#if VIRGL_VERSION_MAJOR >= 1 +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; + g_autofree struct virtio_gpu_virgl_resource *res = NULL; + struct virtio_gpu_resource_create_blob cblob; + struct virgl_renderer_resource_info info; + int ret; + + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + VIRTIO_GPU_FILL_CMD(cblob); + virtio_gpu_create_blob_bswap(&cblob); + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); + + if (cblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_virgl_find_resource(g, cblob.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, cblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_virgl_resource, 1); + res->base.resource_id = cblob.resource_id; + res->base.blob_size = cblob.size; + res->base.dmabuf_fd = -1; + + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), + cmd, &res->base.addrs, + &res->base.iov, &res->base.iov_cnt); + if (!ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + } + + virgl_args.res_handle = cblob.resource_id; + virgl_args.ctx_id = cblob.hdr.ctx_id; + virgl_args.blob_mem = cblob.blob_mem; + virgl_args.blob_id = cblob.blob_id; + virgl_args.blob_flags = cblob.blob_flags; + virgl_args.size = cblob.size; + virgl_args.iovecs = res->base.iov; + virgl_args.num_iovs = res->base.iov_cnt; + + ret = virgl_renderer_resource_create_blob(&virgl_args); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", + __func__, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + return; + } + + ret = virgl_renderer_resource_get_info(cblob.resource_id, &info); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: resource does not have info %d: %s\n", + __func__, cblob.resource_id, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + virgl_renderer_resource_unref(cblob.resource_id); + return; + } + + res->base.dmabuf_fd = info.fd; + + QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); + res = NULL; +} + +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_map_blob mblob; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_resp_map_info resp; + int ret; + + VIRTIO_GPU_FILL_CMD(mblob); + virtio_gpu_map_blob_bswap(&mblob); + + res = virtio_gpu_virgl_find_resource(g, mblob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, mblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + memset(&resp, 0, sizeof(resp)); + resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; + virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) +{ + struct virtio_gpu_resource_unmap_blob ublob; + struct virtio_gpu_virgl_resource *res; + int ret; + + VIRTIO_GPU_FILL_CMD(ublob); + virtio_gpu_unmap_blob_bswap(&ublob); + + res = virtio_gpu_virgl_find_resource(g, ublob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ublob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } +} + +static void virgl_cmd_set_scanout_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_framebuffer fb = { 0 }; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_set_scanout_blob ss; + + VIRTIO_GPU_FILL_CMD(ss); + virtio_gpu_scanout_blob_bswap(&ss); + trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, + ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + if (ss.resource_id == 0) { + virtio_gpu_disable_scanout(g, ss.scanout_id); + return; + } + + if (ss.width < 16 || + ss.height < 16 || + ss.r.x + ss.r.width > ss.width || + ss.r.y + ss.r.height > ss.height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" + " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", + __func__, ss.scanout_id, ss.resource_id, + ss.r.x, ss.r.y, ss.r.width, ss.r.height, + ss.width, ss.height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + res = virtio_gpu_virgl_find_resource(g, ss.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + if (res->base.dmabuf_fd < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->base.blob_size)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + g->parent_obj.enable = 1; + if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r); +} +#endif + void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + bool cmd_suspended = false; + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); virgl_renderer_force_ctx_0(); @@ -461,7 +914,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, virgl_cmd_resource_flush(g, cmd); break; case VIRTIO_GPU_CMD_RESOURCE_UNREF: - virgl_cmd_resource_unref(g, cmd); + virgl_cmd_resource_unref(g, cmd, &cmd_suspended); break; case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: /* TODO add security */ @@ -483,12 +936,26 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, case VIRTIO_GPU_CMD_GET_EDID: virtio_gpu_get_edid(g, cmd); break; +#if VIRGL_VERSION_MAJOR >= 1 + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: + virgl_cmd_resource_create_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB: + virgl_cmd_resource_map_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB: + virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: + virgl_cmd_set_scanout_blob(g, cmd); + break; +#endif default: cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; break; } - if (cmd->finished) { + if (cmd_suspended || cmd->finished) { return; } if (cmd->error) { @@ -503,6 +970,15 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, } trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); +#if VIRGL_VERSION_MAJOR >= 1 + if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) { + virgl_renderer_context_create_fence(cmd->cmd_hdr.ctx_id, + VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, + cmd->cmd_hdr.ring_idx, + cmd->cmd_hdr.fence_id); + return; + } +#endif virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); } @@ -516,6 +992,11 @@ static void virgl_write_fence(void *opaque, uint32_t fence) * the guest can end up emitting fences out of order * so we should check all fenced cmds not just the first one. */ +#if VIRGL_VERSION_MAJOR >= 1 + if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) { + continue; + } +#endif if (cmd->cmd_hdr.fence_id > fence) { continue; } @@ -525,10 +1006,33 @@ static void virgl_write_fence(void *opaque, uint32_t fence) g_free(cmd); g->inflight--; if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { - fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + trace_virtio_gpu_dec_inflight_fences(g->inflight); + } + } +} + +#if VIRGL_VERSION_MAJOR >= 1 +static void virgl_write_context_fence(void *opaque, uint32_t ctx_id, + uint32_t ring_idx, uint64_t fence_id) { + VirtIOGPU *g = opaque; + struct virtio_gpu_ctrl_command *cmd, *tmp; + + QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { + if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX && + cmd->cmd_hdr.ctx_id == ctx_id && cmd->cmd_hdr.ring_idx == ring_idx && + cmd->cmd_hdr.fence_id <= fence_id) { + trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g_free(cmd); + g->inflight--; + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + trace_virtio_gpu_dec_inflight_fences(g->inflight); + } } } } +#endif static virgl_renderer_gl_context virgl_create_context(void *opaque, int scanout_idx, @@ -564,16 +1068,24 @@ static int virgl_make_context_current(void *opaque, int scanout_idx, } static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { +#if VIRGL_VERSION_MAJOR >= 1 + .version = 3, +#else .version = 1, +#endif .write_fence = virgl_write_fence, .create_gl_context = virgl_create_context, .destroy_gl_context = virgl_destroy_context, .make_current = virgl_make_context_current, +#if VIRGL_VERSION_MAJOR >= 1 + .write_context_fence = virgl_write_context_fence, +#endif }; static void virtio_gpu_print_stats(void *opaque) { VirtIOGPU *g = opaque; + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); if (g->stats.requests) { fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", @@ -588,17 +1100,18 @@ static void virtio_gpu_print_stats(void *opaque) } else { fprintf(stderr, "stats: idle\r"); } - timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); + timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); } static void virtio_gpu_fence_poll(void *opaque) { VirtIOGPU *g = opaque; + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); virgl_renderer_poll(); virtio_gpu_process_cmdq(g); if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { - timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); + timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); } } @@ -626,6 +1139,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) { int ret; uint32_t flags = 0; + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4 if (qemu_egl_display) { @@ -638,6 +1152,11 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE; } #endif +#if VIRGL_VERSION_MAJOR >= 1 + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { + flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER; + } +#endif ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs); if (ret != 0) { @@ -645,23 +1164,55 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) return ret; } - g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, - virtio_gpu_fence_poll, g); + gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_fence_poll, g); if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { - g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, - virtio_gpu_print_stats, g); - timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); + gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_print_stats, g); + timer_mod(gl->print_stats, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); } + +#if VIRGL_VERSION_MAJOR >= 1 + gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(), + virtio_gpu_virgl_resume_cmdq_bh, + g); +#endif + return 0; } -int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g) +static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id) +{ + g_array_append_val(capset_ids, capset_id); +} + +GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g) { - uint32_t capset2_max_ver, capset2_max_size; + uint32_t capset_max_ver, capset_max_size; + GArray *capset_ids; + + capset_ids = g_array_new(false, false, sizeof(uint32_t)); + + /* VIRGL is always supported. */ + virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL); + virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, - &capset2_max_ver, - &capset2_max_size); + &capset_max_ver, + &capset_max_size); + if (capset_max_ver) { + virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2); + } + + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { + virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS, + &capset_max_ver, + &capset_max_size); + if (capset_max_size) { + virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS); + } + } - return capset2_max_ver ? 2 : 1; + return capset_ids; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 3281842bfe1..0a1a625b0ea 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -14,12 +14,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/iov.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "ui/console.h" #include "ui/rect.h" #include "trace.h" -#include "sysemu/dma.h" -#include "sysemu/sysemu.h" +#include "system/dma.h" +#include "system/system.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" #include "hw/virtio/virtio-gpu.h" @@ -28,6 +28,7 @@ #include "hw/virtio/virtio-bus.h" #include "hw/qdev-properties.h" #include "qemu/log.h" +#include "qemu/memfd.h" #include "qemu/module.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -238,16 +239,6 @@ static uint32_t calc_image_hostmem(pixman_format_code_t pformat, return height * stride; } -#ifdef WIN32 -static void -win32_pixman_image_destroy(pixman_image_t *image, void *data) -{ - HANDLE handle = data; - - qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn); -} -#endif - static void virtio_gpu_resource_create_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -294,28 +285,20 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); if (res->hostmem + g->hostmem < g->conf_max_hostmem) { - void *bits = NULL; -#ifdef WIN32 - bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); - if (!bits) { + if (!qemu_pixman_image_new_shareable( + &res->image, + &res->share_handle, + "virtio-gpu res", + pformat, + c2d.width, + c2d.height, + c2d.height ? res->hostmem / c2d.height : 0, + &error_warn)) { goto end; } -#endif - res->image = pixman_image_create_bits( - pformat, - c2d.width, - c2d.height, - bits, c2d.height ? res->hostmem / c2d.height : 0); -#ifdef WIN32 - if (res->image) { - pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); - } -#endif } -#ifdef WIN32 end: -#endif if (!res->image) { qemu_log_mask(LOG_GUEST_ERROR, "%s: resource creation failed %d %d %d\n", @@ -379,7 +362,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g, QTAILQ_INSERT_HEAD(&g->reslist, res, next); } -static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) { struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; struct virtio_gpu_simple_resource *res; @@ -596,11 +579,11 @@ static void virtio_unref_resource(pixman_image_t *image, void *data) pixman_image_unref(data); } -static void virtio_gpu_update_scanout(VirtIOGPU *g, - uint32_t scanout_id, - struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb, - struct virtio_gpu_rect *r) +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { struct virtio_gpu_simple_resource *ores; struct virtio_gpu_scanout *scanout; @@ -686,9 +669,7 @@ static bool virtio_gpu_do_set_scanout(VirtIOGPU *g, /* realloc the surface ptr */ scanout->ds = qemu_create_displaysurface_pixman(rect); -#ifdef WIN32 - qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset); -#endif + qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset); pixman_image_unref(rect); dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, @@ -740,13 +721,47 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g, &fb, res, &ss.r, &cmd->error); } +bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_set_scanout_blob *ss, + uint64_t blob_size) +{ + uint64_t fbend; + + fb->format = virtio_gpu_get_pixman_format(ss->format); + if (!fb->format) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: host couldn't handle guest format %d\n", + __func__, ss->format); + return false; + } + + fb->bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb->format), 8); + fb->width = ss->width; + fb->height = ss->height; + fb->stride = ss->strides[0]; + fb->offset = ss->offsets[0] + ss->r.x * fb->bytes_pp + ss->r.y * fb->stride; + + fbend = fb->offset; + fbend += (uint64_t) fb->stride * ss->r.height; + + if (fbend > blob_size) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: fb end out of range\n", + __func__); + return false; + } + + return true; +} + + + static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_simple_resource *res; struct virtio_gpu_framebuffer fb = { 0 }; struct virtio_gpu_set_scanout_blob ss; - uint64_t fbend; VIRTIO_GPU_FILL_CMD(ss); virtio_gpu_scanout_blob_bswap(&ss); @@ -772,28 +787,7 @@ static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, return; } - fb.format = virtio_gpu_get_pixman_format(ss.format); - if (!fb.format) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: host couldn't handle guest format %d\n", - __func__, ss.format); - cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; - return; - } - - fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); - fb.width = ss.width; - fb.height = ss.height; - fb.stride = ss.strides[0]; - fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; - - fbend = fb.offset; - fbend += fb.stride * (ss.r.height - 1); - fbend += fb.bytes_pp * ss.r.width; - if (fbend > res->blob_size) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: fb end out of range\n", - __func__); + if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->blob_size)) { cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } @@ -1053,6 +1047,12 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) /* process command */ vgc->process_cmd(g, cmd); + /* command suspended */ + if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { + trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type); + break; + } + QTAILQ_REMOVE(&g->cmdq, cmd, next); if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { g->stats.requests++; @@ -1065,7 +1065,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) if (g->stats.max_inflight < g->inflight) { g->stats.max_inflight = g->inflight; } - fprintf(stderr, "inflight: %3d (+)\r", g->inflight); + trace_virtio_gpu_inc_inflight_fences(g->inflight); } } else { g_free(cmd); @@ -1085,7 +1085,7 @@ static void virtio_gpu_process_fenceq(VirtIOGPU *g) g_free(cmd); g->inflight--; if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { - fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + trace_virtio_gpu_dec_inflight_fences(g->inflight); } } } @@ -1284,7 +1284,6 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, VirtIOGPU *g = opaque; struct virtio_gpu_simple_resource *res; uint32_t resource_id, pformat; - void *bits = NULL; int i; g->hostmem = 0; @@ -1311,24 +1310,17 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, } res->hostmem = calc_image_hostmem(pformat, res->width, res->height); -#ifdef WIN32 - bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn); - if (!bits) { - g_free(res); - return -EINVAL; - } -#endif - res->image = pixman_image_create_bits( - pformat, - res->width, res->height, - bits, res->height ? res->hostmem / res->height : 0); - if (!res->image) { + if (!qemu_pixman_image_new_shareable(&res->image, + &res->share_handle, + "virtio-gpu res", + pformat, + res->width, + res->height, + res->height ? res->hostmem / res->height : 0, + &error_warn)) { g_free(res); return -EINVAL; } -#ifdef WIN32 - pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle); -#endif res->addrs = g_new(uint64_t, res->iov_cnt); res->iov = g_new(struct iovec, res->iov_cnt); @@ -1461,9 +1453,7 @@ static int virtio_gpu_post_load(void *opaque, int version_id) return -EINVAL; } scanout->ds = qemu_create_displaysurface_pixman(res->image); -#ifdef WIN32 - qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0); -#endif + qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0); dpy_gfx_replace_surface(scanout->con, scanout->ds); } @@ -1484,15 +1474,35 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && + !virtio_gpu_virgl_enabled(g->parent_obj.conf) && !virtio_gpu_have_udmabuf()) { error_setg(errp, "need rutabaga or udmabuf for blob resources"); return; } +#ifdef VIRGL_VERSION_MAJOR + #if VIRGL_VERSION_MAJOR < 1 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { - error_setg(errp, "blobs and virgl are not compatible (yet)"); + error_setg(errp, "old virglrenderer, blob resources unsupported"); return; } + #endif +#endif + } + + if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { +#ifdef VIRGL_VERSION_MAJOR + #if VIRGL_VERSION_MAJOR >= 1 + if (!virtio_gpu_blob_enabled(g->parent_obj.conf) || + !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) { + error_setg(errp, "venus requires enabled blob and hostmem options"); + return; + } + #else + error_setg(errp, "old virglrenderer, venus unsupported"); + return; + #endif +#endif } if (!virtio_gpu_base_device_realize(qdev, @@ -1664,7 +1674,7 @@ static const VMStateDescription vmstate_virtio_gpu = { .post_load = virtio_gpu_post_load, }; -static Property virtio_gpu_properties[] = { +static const Property virtio_gpu_properties[] = { VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, 256 * MiB), @@ -1672,10 +1682,9 @@ static Property virtio_gpu_properties[] = { VIRTIO_GPU_FLAG_BLOB_ENABLED, false), DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_gpu_class_init(ObjectClass *klass, void *data) +static void virtio_gpu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index 276f315108b..40e60f70fcd 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -209,12 +209,11 @@ static void virtio_vga_set_big_endian_fb(Object *obj, bool value, Error **errp) d->vga.big_endian_fb = value; } -static Property virtio_vga_base_properties[] = { +static const Property virtio_vga_base_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_vga_base_class_init(ObjectClass *klass, void *data) +static void virtio_vga_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index 3db3ff98f76..bc1a8ed4665 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -618,7 +618,7 @@ static void vmsvga_fifo_run(struct vmsvga_state_s *s) uint32_t cmd, colour; int args, len, maxloop = 1024; int x, y, dx, dy, width, height; - struct vmsvga_cursor_definition_s cursor; + QEMU_UNINITIALIZED struct vmsvga_cursor_definition_s cursor; uint32_t cmd_start; len = vmsvga_fifo_length(s); @@ -1332,15 +1332,14 @@ static void pci_vmsvga_realize(PCIDevice *dev, Error **errp) &s->chip.fifo_ram); } -static Property vga_vmware_properties[] = { +static const Property vga_vmware_properties[] = { DEFINE_PROP_UINT32("vgamem_mb", struct pci_vmsvga_state_s, chip.vga.vram_size_mb, 16), DEFINE_PROP_BOOL("global-vmstate", struct pci_vmsvga_state_s, chip.vga.global_vmstate, false), - DEFINE_PROP_END_OF_LIST(), }; -static void vmsvga_class_init(ObjectClass *klass, void *data) +static void vmsvga_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1352,7 +1351,7 @@ static void vmsvga_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_DISPLAY_VGA; k->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; k->subsystem_id = SVGA_PCI_DEVICE_ID; - dc->reset = vmsvga_reset; + device_class_set_legacy_reset(dc, vmsvga_reset); dc->vmsd = &vmstate_vmware_vga; device_class_set_props(dc, vga_vmware_properties); dc->hotpluggable = false; @@ -1364,7 +1363,7 @@ static const TypeInfo vmsvga_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(struct pci_vmsvga_state_s), .class_init = vmsvga_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c index 314d378a1b3..22822fecea3 100644 --- a/hw/display/xenfb.c +++ b/hw/display/xenfb.c @@ -29,7 +29,7 @@ #include "ui/input.h" #include "ui/console.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/xen/xen-legacy-backend.h" #include "hw/xen/interface/io/fbif.h" diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index c42fc388dc7..7c980ee6423 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -1387,18 +1387,17 @@ static void xlnx_dp_reset(DeviceState *dev) xlnx_dp_update_irq(s); } -static Property xlnx_dp_device_properties[] = { +static const Property xlnx_dp_device_properties[] = { DEFINE_AUDIO_PROPERTIES(XlnxDPState, aud_card), - DEFINE_PROP_END_OF_LIST(), }; -static void xlnx_dp_class_init(ObjectClass *oc, void *data) +static void xlnx_dp_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = xlnx_dp_realize; dc->vmsd = &vmstate_dp; - dc->reset = xlnx_dp_reset; + device_class_set_legacy_reset(dc, xlnx_dp_reset); device_class_set_props(dc, xlnx_dp_device_properties); } diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c index 9bda45072b6..a2771ddcb52 100644 --- a/hw/dma/bcm2835_dma.c +++ b/hw/dma/bcm2835_dma.c @@ -385,12 +385,12 @@ static void bcm2835_dma_realize(DeviceState *dev, Error **errp) bcm2835_dma_reset(dev); } -static void bcm2835_dma_class_init(ObjectClass *klass, void *data) +static void bcm2835_dma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = bcm2835_dma_realize; - dc->reset = bcm2835_dma_reset; + device_class_set_legacy_reset(dc, bcm2835_dma_reset); dc->vmsd = &vmstate_bcm2835_dma; } diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c deleted file mode 100644 index 9c0003de51d..00000000000 --- a/hw/dma/etraxfs_dma.c +++ /dev/null @@ -1,781 +0,0 @@ -/* - * QEMU ETRAX DMA Controller. - * - * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "qemu/main-loop.h" -#include "sysemu/runstate.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" - -#include "hw/cris/etraxfs_dma.h" - -#define D(x) - -#define RW_DATA (0x0 / 4) -#define RW_SAVED_DATA (0x58 / 4) -#define RW_SAVED_DATA_BUF (0x5c / 4) -#define RW_GROUP (0x60 / 4) -#define RW_GROUP_DOWN (0x7c / 4) -#define RW_CMD (0x80 / 4) -#define RW_CFG (0x84 / 4) -#define RW_STAT (0x88 / 4) -#define RW_INTR_MASK (0x8c / 4) -#define RW_ACK_INTR (0x90 / 4) -#define R_INTR (0x94 / 4) -#define R_MASKED_INTR (0x98 / 4) -#define RW_STREAM_CMD (0x9c / 4) - -#define DMA_REG_MAX (0x100 / 4) - -/* descriptors */ - -// ------------------------------------------------------------ dma_descr_group -typedef struct dma_descr_group { - uint32_t next; - unsigned eol : 1; - unsigned tol : 1; - unsigned bol : 1; - unsigned : 1; - unsigned intr : 1; - unsigned : 2; - unsigned en : 1; - unsigned : 7; - unsigned dis : 1; - unsigned md : 16; - struct dma_descr_group *up; - union { - struct dma_descr_context *context; - struct dma_descr_group *group; - } down; -} dma_descr_group; - -// ---------------------------------------------------------- dma_descr_context -typedef struct dma_descr_context { - uint32_t next; - unsigned eol : 1; - unsigned : 3; - unsigned intr : 1; - unsigned : 1; - unsigned store_mode : 1; - unsigned en : 1; - unsigned : 7; - unsigned dis : 1; - unsigned md0 : 16; - unsigned md1; - unsigned md2; - unsigned md3; - unsigned md4; - uint32_t saved_data; - uint32_t saved_data_buf; -} dma_descr_context; - -// ------------------------------------------------------------- dma_descr_data -typedef struct dma_descr_data { - uint32_t next; - uint32_t buf; - unsigned eol : 1; - unsigned : 2; - unsigned out_eop : 1; - unsigned intr : 1; - unsigned wait : 1; - unsigned : 2; - unsigned : 3; - unsigned in_eop : 1; - unsigned : 4; - unsigned md : 16; - uint32_t after; -} dma_descr_data; - -/* Constants */ -enum { - regk_dma_ack_pkt = 0x00000100, - regk_dma_anytime = 0x00000001, - regk_dma_array = 0x00000008, - regk_dma_burst = 0x00000020, - regk_dma_client = 0x00000002, - regk_dma_copy_next = 0x00000010, - regk_dma_copy_up = 0x00000020, - regk_dma_data_at_eol = 0x00000001, - regk_dma_dis_c = 0x00000010, - regk_dma_dis_g = 0x00000020, - regk_dma_idle = 0x00000001, - regk_dma_intern = 0x00000004, - regk_dma_load_c = 0x00000200, - regk_dma_load_c_n = 0x00000280, - regk_dma_load_c_next = 0x00000240, - regk_dma_load_d = 0x00000140, - regk_dma_load_g = 0x00000300, - regk_dma_load_g_down = 0x000003c0, - regk_dma_load_g_next = 0x00000340, - regk_dma_load_g_up = 0x00000380, - regk_dma_next_en = 0x00000010, - regk_dma_next_pkt = 0x00000010, - regk_dma_no = 0x00000000, - regk_dma_only_at_wait = 0x00000000, - regk_dma_restore = 0x00000020, - regk_dma_rst = 0x00000001, - regk_dma_running = 0x00000004, - regk_dma_rw_cfg_default = 0x00000000, - regk_dma_rw_cmd_default = 0x00000000, - regk_dma_rw_intr_mask_default = 0x00000000, - regk_dma_rw_stat_default = 0x00000101, - regk_dma_rw_stream_cmd_default = 0x00000000, - regk_dma_save_down = 0x00000020, - regk_dma_save_up = 0x00000020, - regk_dma_set_reg = 0x00000050, - regk_dma_set_w_size1 = 0x00000190, - regk_dma_set_w_size2 = 0x000001a0, - regk_dma_set_w_size4 = 0x000001c0, - regk_dma_stopped = 0x00000002, - regk_dma_store_c = 0x00000002, - regk_dma_store_descr = 0x00000000, - regk_dma_store_g = 0x00000004, - regk_dma_store_md = 0x00000001, - regk_dma_sw = 0x00000008, - regk_dma_update_down = 0x00000020, - regk_dma_yes = 0x00000001 -}; - -enum dma_ch_state -{ - RST = 1, - STOPPED = 2, - RUNNING = 4 -}; - -struct fs_dma_channel -{ - qemu_irq irq; - struct etraxfs_dma_client *client; - - /* Internal status. */ - int stream_cmd_src; - enum dma_ch_state state; - - unsigned int input : 1; - unsigned int eol : 1; - - struct dma_descr_group current_g; - struct dma_descr_context current_c; - struct dma_descr_data current_d; - - /* Control registers. */ - uint32_t regs[DMA_REG_MAX]; -}; - -struct fs_dma_ctrl -{ - MemoryRegion mmio; - int nr_channels; - struct fs_dma_channel *channels; - - QEMUBH *bh; -}; - -static void DMA_run(void *opaque); -static int channel_out_run(struct fs_dma_ctrl *ctrl, int c); - -static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) -{ - return ctrl->channels[c].regs[reg]; -} - -static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) -{ - return channel_reg(ctrl, c, RW_CFG) & 2; -} - -static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) -{ - return (channel_reg(ctrl, c, RW_CFG) & 1) - && ctrl->channels[c].client; -} - -static inline int fs_channel(hwaddr addr) -{ - /* Every channel has a 0x2000 ctrl register map. */ - return addr >> 13; -} - -#ifdef USE_THIS_DEAD_CODE -static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) -{ - hwaddr addr = channel_reg(ctrl, c, RW_GROUP); - - /* Load and decode. FIXME: handle endianness. */ - cpu_physical_memory_read(addr, &ctrl->channels[c].current_g, - sizeof(ctrl->channels[c].current_g)); -} - -static void dump_c(int ch, struct dma_descr_context *c) -{ - printf("%s ch=%d\n", __func__, ch); - printf("next=%x\n", c->next); - printf("saved_data=%x\n", c->saved_data); - printf("saved_data_buf=%x\n", c->saved_data_buf); - printf("eol=%x\n", (uint32_t) c->eol); -} - -static void dump_d(int ch, struct dma_descr_data *d) -{ - printf("%s ch=%d\n", __func__, ch); - printf("next=%x\n", d->next); - printf("buf=%x\n", d->buf); - printf("after=%x\n", d->after); - printf("intr=%x\n", (uint32_t) d->intr); - printf("out_eop=%x\n", (uint32_t) d->out_eop); - printf("in_eop=%x\n", (uint32_t) d->in_eop); - printf("eol=%x\n", (uint32_t) d->eol); -} -#endif - -static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) -{ - hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); - - /* Load and decode. FIXME: handle endianness. */ - cpu_physical_memory_read(addr, &ctrl->channels[c].current_c, - sizeof(ctrl->channels[c].current_c)); - - D(dump_c(c, &ctrl->channels[c].current_c)); - /* I guess this should update the current pos. */ - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; -} - -static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) -{ - hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); - - /* Load and decode. FIXME: handle endianness. */ - D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr)); - cpu_physical_memory_read(addr, &ctrl->channels[c].current_d, - sizeof(ctrl->channels[c].current_d)); - - D(dump_d(c, &ctrl->channels[c].current_d)); - ctrl->channels[c].regs[RW_DATA] = addr; -} - -static void channel_store_c(struct fs_dma_ctrl *ctrl, int c) -{ - hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); - - /* Encode and store. FIXME: handle endianness. */ - D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr)); - D(dump_d(c, &ctrl->channels[c].current_d)); - cpu_physical_memory_write(addr, &ctrl->channels[c].current_c, - sizeof(ctrl->channels[c].current_c)); -} - -static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) -{ - hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); - - /* Encode and store. FIXME: handle endianness. */ - D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr)); - cpu_physical_memory_write(addr, &ctrl->channels[c].current_d, - sizeof(ctrl->channels[c].current_d)); -} - -static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) -{ - /* FIXME: */ -} - -static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) -{ - if (ctrl->channels[c].client) - { - ctrl->channels[c].eol = 0; - ctrl->channels[c].state = RUNNING; - if (!ctrl->channels[c].input) - channel_out_run(ctrl, c); - } else - printf("WARNING: starting DMA ch %d with no client\n", c); - - qemu_bh_schedule_idle(ctrl->bh); -} - -static void channel_continue(struct fs_dma_ctrl *ctrl, int c) -{ - if (!channel_en(ctrl, c) - || channel_stopped(ctrl, c) - || ctrl->channels[c].state != RUNNING - /* Only reload the current data descriptor if it has eol set. */ - || !ctrl->channels[c].current_d.eol) { - D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", - c, ctrl->channels[c].state, - channel_stopped(ctrl, c), - channel_en(ctrl,c), - ctrl->channels[c].eol)); - D(dump_d(c, &ctrl->channels[c].current_d)); - return; - } - - /* Reload the current descriptor. */ - channel_load_d(ctrl, c); - - /* If the current descriptor cleared the eol flag and we had already - reached eol state, do the continue. */ - if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { - D(printf("continue %d ok %x\n", c, - ctrl->channels[c].current_d.next)); - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; - channel_load_d(ctrl, c); - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; - - channel_start(ctrl, c); - } - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; -} - -static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) -{ - unsigned int cmd = v & ((1 << 10) - 1); - - D(printf("%s ch=%d cmd=%x\n", - __func__, c, cmd)); - if (cmd & regk_dma_load_d) { - channel_load_d(ctrl, c); - if (cmd & regk_dma_burst) - channel_start(ctrl, c); - } - - if (cmd & regk_dma_load_c) { - channel_load_c(ctrl, c); - } -} - -static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) -{ - D(printf("%s %d\n", __func__, c)); - ctrl->channels[c].regs[R_INTR] &= - ~(ctrl->channels[c].regs[RW_ACK_INTR]); - - ctrl->channels[c].regs[R_MASKED_INTR] = - ctrl->channels[c].regs[R_INTR] - & ctrl->channels[c].regs[RW_INTR_MASK]; - - D(printf("%s: chan=%d masked_intr=%x\n", __func__, - c, - ctrl->channels[c].regs[R_MASKED_INTR])); - - qemu_set_irq(ctrl->channels[c].irq, - !!ctrl->channels[c].regs[R_MASKED_INTR]); -} - -static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) -{ - uint32_t len; - uint32_t saved_data_buf; - unsigned char buf[2 * 1024]; - - struct dma_context_metadata meta; - bool send_context = true; - - if (ctrl->channels[c].eol) - return 0; - - do { - bool out_eop; - D(printf("ch=%d buf=%x after=%x\n", - c, - (uint32_t)ctrl->channels[c].current_d.buf, - (uint32_t)ctrl->channels[c].current_d.after)); - - if (send_context) { - if (ctrl->channels[c].client->client.metadata_push) { - meta.metadata = ctrl->channels[c].current_d.md; - ctrl->channels[c].client->client.metadata_push( - ctrl->channels[c].client->client.opaque, - &meta); - } - send_context = false; - } - - channel_load_d(ctrl, c); - saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); - len = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.after; - len -= saved_data_buf; - - if (len > sizeof buf) - len = sizeof buf; - cpu_physical_memory_read (saved_data_buf, buf, len); - - out_eop = ((saved_data_buf + len) == - ctrl->channels[c].current_d.after) && - ctrl->channels[c].current_d.out_eop; - - D(printf("channel %d pushes %x %u bytes eop=%u\n", c, - saved_data_buf, len, out_eop)); - - if (ctrl->channels[c].client->client.push) { - if (len > 0) { - ctrl->channels[c].client->client.push( - ctrl->channels[c].client->client.opaque, - buf, len, out_eop); - } - } else { - printf("WARNING: DMA ch%d dataloss," - " no attached client.\n", c); - } - - saved_data_buf += len; - - if (saved_data_buf == (uint32_t)(unsigned long) - ctrl->channels[c].current_d.after) { - /* Done. Step to next. */ - if (ctrl->channels[c].current_d.out_eop) { - send_context = true; - } - if (ctrl->channels[c].current_d.intr) { - /* data intr. */ - D(printf("signal intr %d eol=%d\n", - len, ctrl->channels[c].current_d.eol)); - ctrl->channels[c].regs[R_INTR] |= (1 << 2); - channel_update_irq(ctrl, c); - } - channel_store_d(ctrl, c); - if (ctrl->channels[c].current_d.eol) { - D(printf("channel %d EOL\n", c)); - ctrl->channels[c].eol = 1; - - /* Mark the context as disabled. */ - ctrl->channels[c].current_c.dis = 1; - channel_store_c(ctrl, c); - - channel_stop(ctrl, c); - } else { - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl-> - channels[c].current_d.next; - /* Load new descriptor. */ - channel_load_d(ctrl, c); - saved_data_buf = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.buf; - } - - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = - saved_data_buf; - D(dump_d(c, &ctrl->channels[c].current_d)); - } - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; - } while (!ctrl->channels[c].eol); - return 1; -} - -static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, - unsigned char *buf, int buflen, int eop) -{ - uint32_t len; - uint32_t saved_data_buf; - - if (ctrl->channels[c].eol == 1) - return 0; - - channel_load_d(ctrl, c); - saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); - len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; - len -= saved_data_buf; - - if (len > buflen) - len = buflen; - - cpu_physical_memory_write (saved_data_buf, buf, len); - saved_data_buf += len; - - if (saved_data_buf == - (uint32_t)(unsigned long)ctrl->channels[c].current_d.after - || eop) { - uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; - - D(printf("in dscr end len=%d\n", - ctrl->channels[c].current_d.after - - ctrl->channels[c].current_d.buf)); - ctrl->channels[c].current_d.after = saved_data_buf; - - /* Done. Step to next. */ - if (ctrl->channels[c].current_d.intr) { - /* TODO: signal eop to the client. */ - /* data intr. */ - ctrl->channels[c].regs[R_INTR] |= 3; - } - if (eop) { - ctrl->channels[c].current_d.in_eop = 1; - ctrl->channels[c].regs[R_INTR] |= 8; - } - if (r_intr != ctrl->channels[c].regs[R_INTR]) - channel_update_irq(ctrl, c); - - channel_store_d(ctrl, c); - D(dump_d(c, &ctrl->channels[c].current_d)); - - if (ctrl->channels[c].current_d.eol) { - D(printf("channel %d EOL\n", c)); - ctrl->channels[c].eol = 1; - - /* Mark the context as disabled. */ - ctrl->channels[c].current_c.dis = 1; - channel_store_c(ctrl, c); - - channel_stop(ctrl, c); - } else { - ctrl->channels[c].regs[RW_SAVED_DATA] = - (uint32_t)(unsigned long)ctrl-> - channels[c].current_d.next; - /* Load new descriptor. */ - channel_load_d(ctrl, c); - saved_data_buf = (uint32_t)(unsigned long) - ctrl->channels[c].current_d.buf; - } - } - - ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; - return len; -} - -static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c) -{ - if (ctrl->channels[c].client->client.pull) { - ctrl->channels[c].client->client.pull( - ctrl->channels[c].client->client.opaque); - return 1; - } else - return 0; -} - -static uint32_t dma_rinvalid (void *opaque, hwaddr addr) -{ - hw_error("Unsupported short raccess. reg=" HWADDR_FMT_plx "\n", addr); - return 0; -} - -static uint64_t -dma_read(void *opaque, hwaddr addr, unsigned int size) -{ - struct fs_dma_ctrl *ctrl = opaque; - int c; - uint32_t r = 0; - - if (size != 4) { - dma_rinvalid(opaque, addr); - } - - /* Make addr relative to this channel and bounded to nr regs. */ - c = fs_channel(addr); - addr &= 0xff; - addr >>= 2; - switch (addr) - { - case RW_STAT: - r = ctrl->channels[c].state & 7; - r |= ctrl->channels[c].eol << 5; - r |= ctrl->channels[c].stream_cmd_src << 8; - break; - - default: - r = ctrl->channels[c].regs[addr]; - D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n", - __func__, c, addr)); - break; - } - return r; -} - -static void -dma_winvalid (void *opaque, hwaddr addr, uint32_t value) -{ - hw_error("Unsupported short waccess. reg=" HWADDR_FMT_plx "\n", addr); -} - -static void -dma_update_state(struct fs_dma_ctrl *ctrl, int c) -{ - if (ctrl->channels[c].regs[RW_CFG] & 2) - ctrl->channels[c].state = STOPPED; - if (!(ctrl->channels[c].regs[RW_CFG] & 1)) - ctrl->channels[c].state = RST; -} - -static void -dma_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) -{ - struct fs_dma_ctrl *ctrl = opaque; - uint32_t value = val64; - int c; - - if (size != 4) { - dma_winvalid(opaque, addr, value); - } - - /* Make addr relative to this channel and bounded to nr regs. */ - c = fs_channel(addr); - addr &= 0xff; - addr >>= 2; - switch (addr) - { - case RW_DATA: - ctrl->channels[c].regs[addr] = value; - break; - - case RW_CFG: - ctrl->channels[c].regs[addr] = value; - dma_update_state(ctrl, c); - break; - case RW_CMD: - /* continue. */ - if (value & ~1) - printf("Invalid store to ch=%d RW_CMD %x\n", - c, value); - ctrl->channels[c].regs[addr] = value; - channel_continue(ctrl, c); - break; - - case RW_SAVED_DATA: - case RW_SAVED_DATA_BUF: - case RW_GROUP: - case RW_GROUP_DOWN: - ctrl->channels[c].regs[addr] = value; - break; - - case RW_ACK_INTR: - case RW_INTR_MASK: - ctrl->channels[c].regs[addr] = value; - channel_update_irq(ctrl, c); - if (addr == RW_ACK_INTR) - ctrl->channels[c].regs[RW_ACK_INTR] = 0; - break; - - case RW_STREAM_CMD: - if (value & ~1023) - printf("Invalid store to ch=%d " - "RW_STREAMCMD %x\n", - c, value); - ctrl->channels[c].regs[addr] = value; - D(printf("stream_cmd ch=%d\n", c)); - channel_stream_cmd(ctrl, c, value); - break; - - default: - D(printf("%s c=%d " HWADDR_FMT_plx "\n", - __func__, c, addr)); - break; - } -} - -static const MemoryRegionOps dma_ops = { - .read = dma_read, - .write = dma_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 4 - } -}; - -static int etraxfs_dmac_run(void *opaque) -{ - struct fs_dma_ctrl *ctrl = opaque; - int i; - int p = 0; - - for (i = 0; - i < ctrl->nr_channels; - i++) - { - if (ctrl->channels[i].state == RUNNING) - { - if (ctrl->channels[i].input) { - p += channel_in_run(ctrl, i); - } else { - p += channel_out_run(ctrl, i); - } - } - } - return p; -} - -int etraxfs_dmac_input(struct etraxfs_dma_client *client, - void *buf, int len, int eop) -{ - return channel_in_process(client->ctrl, client->channel, - buf, len, eop); -} - -/* Connect an IRQ line with a channel. */ -void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) -{ - struct fs_dma_ctrl *ctrl = opaque; - ctrl->channels[c].irq = *line; - ctrl->channels[c].input = input; -} - -void etraxfs_dmac_connect_client(void *opaque, int c, - struct etraxfs_dma_client *cl) -{ - struct fs_dma_ctrl *ctrl = opaque; - cl->ctrl = ctrl; - cl->channel = c; - ctrl->channels[c].client = cl; -} - - -static void DMA_run(void *opaque) -{ - struct fs_dma_ctrl *etraxfs_dmac = opaque; - int p = 1; - - if (runstate_is_running()) - p = etraxfs_dmac_run(etraxfs_dmac); - - if (p) - qemu_bh_schedule_idle(etraxfs_dmac->bh); -} - -void *etraxfs_dmac_init(hwaddr base, int nr_channels) -{ - struct fs_dma_ctrl *ctrl = NULL; - - ctrl = g_malloc0(sizeof *ctrl); - - ctrl->bh = qemu_bh_new(DMA_run, ctrl); - - ctrl->nr_channels = nr_channels; - ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); - - memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma", - nr_channels * 0x2000); - memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio); - - return ctrl; -} diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c index e72aa2e1cef..e226eda6d15 100644 --- a/hw/dma/i82374.c +++ b/hw/dma/i82374.c @@ -139,18 +139,19 @@ static void i82374_realize(DeviceState *dev, Error **errp) memset(s->commands, 0, sizeof(s->commands)); } -static Property i82374_properties[] = { +static const Property i82374_properties[] = { DEFINE_PROP_UINT32("iobase", I82374State, iobase, 0x400), - DEFINE_PROP_END_OF_LIST() }; -static void i82374_class_init(ObjectClass *klass, void *data) +static void i82374_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = i82374_realize; dc->vmsd = &vmstate_i82374; device_class_set_props(dc, i82374_properties); + dc->desc = "Intel 82374 DMA controller"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static const TypeInfo i82374_info = { diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c index 24a54ca272d..2463952ada2 100644 --- a/hw/dma/i8257.c +++ b/hw/dma/i8257.c @@ -585,21 +585,20 @@ static void i8257_realize(DeviceState *dev, Error **errp) d->dma_bh = qemu_bh_new(i8257_dma_run, d); } -static Property i8257_properties[] = { +static const Property i8257_properties[] = { DEFINE_PROP_INT32("base", I8257State, base, 0x00), DEFINE_PROP_INT32("page-base", I8257State, page_base, 0x80), DEFINE_PROP_INT32("pageh-base", I8257State, pageh_base, 0x480), DEFINE_PROP_INT32("dshift", I8257State, dshift, 0), - DEFINE_PROP_END_OF_LIST() }; -static void i8257_class_init(ObjectClass *klass, void *data) +static void i8257_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IsaDmaClass *idc = ISADMA_CLASS(klass); dc->realize = i8257_realize; - dc->reset = i8257_reset; + device_class_set_legacy_reset(dc, i8257_reset); dc->vmsd = &vmstate_i8257; device_class_set_props(dc, i8257_properties); @@ -619,7 +618,7 @@ static const TypeInfo i8257_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(I8257State), .class_init = i8257_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ISADMA }, { } } diff --git a/hw/dma/meson.build b/hw/dma/meson.build index a96c1be2c8d..cc7810beb84 100644 --- a/hw/dma/meson.build +++ b/hw/dma/meson.build @@ -5,12 +5,10 @@ system_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c')) system_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c')) system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c')) system_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c')) -system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c')) system_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c')) system_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c')) system_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c')) system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c')) system_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c')) system_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c')) diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c index 77797a67b52..101f91f4a33 100644 --- a/hw/dma/omap_dma.c +++ b/hw/dma/omap_dma.c @@ -131,9 +131,9 @@ struct omap_dma_s { #define LAST_FRAME_INTR (1 << 4) #define END_BLOCK_INTR (1 << 5) #define SYNC (1 << 6) -#define END_PKT_INTR (1 << 7) -#define TRANS_ERR_INTR (1 << 8) -#define MISALIGN_INTR (1 << 11) +#define END_PKT_INTR (1 << 7) +#define TRANS_ERR_INTR (1 << 8) +#define MISALIGN_INTR (1 << 11) static inline void omap_dma_interrupts_update(struct omap_dma_s *s) { @@ -526,12 +526,12 @@ static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma) /* Check all the conditions that terminate the transfer starting * with those that can occur the soonest. */ -#define INTR_CHECK(cond, id, nelements) \ - if (cond) { \ - elements[id] = nelements; \ - if (elements[id] < min_elems) \ - min_elems = elements[id]; \ - } else \ +#define INTR_CHECK(cond, id, nelements) \ + if (cond) { \ + elements[id] = nelements; \ + if (elements[id] < min_elems) \ + min_elems = elements[id]; \ + } else \ elements[id] = INT_MAX; /* Elements */ @@ -686,10 +686,7 @@ void omap_dma_reset(struct soc_dma_s *dma) struct omap_dma_s *s = dma->opaque; soc_dma_reset(s->dma); - if (s->model < omap_dma_4) - s->gcr = 0x0004; - else - s->gcr = 0x00010010; + s->gcr = 0x0004; s->ocp = 0x00000000; memset(&s->irqstat, 0, sizeof(s->irqstat)); memset(&s->irqen, 0, sizeof(s->irqen)); @@ -697,8 +694,7 @@ void omap_dma_reset(struct soc_dma_s *dma) s->lcd_ch.condition = 0; s->lcd_ch.interrupts = 0; s->lcd_ch.dual = 0; - if (s->model < omap_dma_4) - omap_dma_enable_3_1_mapping(s); + omap_dma_enable_3_1_mapping(s); for (i = 0; i < s->chans; i ++) { s->ch[i].suspend = 0; s->ch[i].prefetch = 0; @@ -721,10 +717,7 @@ void omap_dma_reset(struct soc_dma_s *dma) s->ch[i].repeat = 0; s->ch[i].auto_init = 0; s->ch[i].link_enabled = 0; - if (s->model < omap_dma_4) - s->ch[i].interrupts = 0x0003; - else - s->ch[i].interrupts = 0x0000; + s->ch[i].interrupts = 0x0003; s->ch[i].status = 0; s->ch[i].cstatus = 0; s->ch[i].active = 0; @@ -747,7 +740,7 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s, struct omap_dma_channel_s *ch, int reg, uint16_t *value) { switch (reg) { - case 0x00: /* SYS_DMA_CSDP_CH0 */ + case 0x00: /* SYS_DMA_CSDP_CH0 */ *value = (ch->burst[1] << 14) | (ch->pack[1] << 13) | (ch->port[1] << 9) | @@ -757,9 +750,9 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s, (ch->data_type >> 1); break; - case 0x02: /* SYS_DMA_CCR_CH0 */ + case 0x02: /* SYS_DMA_CCR_CH0 */ if (s->model <= omap_dma_3_1) - *value = 0 << 10; /* FIFO_FLUSH reads as 0 */ + *value = 0 << 10; /* FIFO_FLUSH reads as 0 */ else *value = ch->omap_3_1_compatible_disable << 10; *value |= (ch->mode[1] << 14) | @@ -772,11 +765,11 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s, (ch->fs << 5) | ch->sync; break; - case 0x04: /* SYS_DMA_CICR_CH0 */ + case 0x04: /* SYS_DMA_CICR_CH0 */ *value = ch->interrupts; break; - case 0x06: /* SYS_DMA_CSR_CH0 */ + case 0x06: /* SYS_DMA_CSR_CH0 */ *value = ch->status; ch->status &= SYNC; if (!ch->omap_3_1_compatible_disable && ch->sibling) { @@ -786,77 +779,77 @@ static int omap_dma_ch_reg_read(struct omap_dma_s *s, qemu_irq_lower(ch->irq); break; - case 0x08: /* SYS_DMA_CSSA_L_CH0 */ + case 0x08: /* SYS_DMA_CSSA_L_CH0 */ *value = ch->addr[0] & 0x0000ffff; break; - case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ + case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ *value = ch->addr[0] >> 16; break; - case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ + case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ *value = ch->addr[1] & 0x0000ffff; break; - case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ + case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ *value = ch->addr[1] >> 16; break; - case 0x10: /* SYS_DMA_CEN_CH0 */ + case 0x10: /* SYS_DMA_CEN_CH0 */ *value = ch->elements; break; - case 0x12: /* SYS_DMA_CFN_CH0 */ + case 0x12: /* SYS_DMA_CFN_CH0 */ *value = ch->frames; break; - case 0x14: /* SYS_DMA_CFI_CH0 */ + case 0x14: /* SYS_DMA_CFI_CH0 */ *value = ch->frame_index[0]; break; - case 0x16: /* SYS_DMA_CEI_CH0 */ + case 0x16: /* SYS_DMA_CEI_CH0 */ *value = ch->element_index[0]; break; - case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ + case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ if (ch->omap_3_1_compatible_disable) - *value = ch->active_set.src & 0xffff; /* CSAC */ + *value = ch->active_set.src & 0xffff; /* CSAC */ else *value = ch->cpc; break; - case 0x1a: /* DMA_CDAC */ - *value = ch->active_set.dest & 0xffff; /* CDAC */ + case 0x1a: /* DMA_CDAC */ + *value = ch->active_set.dest & 0xffff; /* CDAC */ break; - case 0x1c: /* DMA_CDEI */ + case 0x1c: /* DMA_CDEI */ *value = ch->element_index[1]; break; - case 0x1e: /* DMA_CDFI */ + case 0x1e: /* DMA_CDFI */ *value = ch->frame_index[1]; break; - case 0x20: /* DMA_COLOR_L */ + case 0x20: /* DMA_COLOR_L */ *value = ch->color & 0xffff; break; - case 0x22: /* DMA_COLOR_U */ + case 0x22: /* DMA_COLOR_U */ *value = ch->color >> 16; break; - case 0x24: /* DMA_CCR2 */ + case 0x24: /* DMA_CCR2 */ *value = (ch->bs << 2) | (ch->transparent_copy << 1) | ch->constant_fill; break; - case 0x28: /* DMA_CLNK_CTRL */ + case 0x28: /* DMA_CLNK_CTRL */ *value = (ch->link_enabled << 15) | (ch->link_next_ch & 0xf); break; - case 0x2a: /* DMA_LCH_CTRL */ + case 0x2a: /* DMA_LCH_CTRL */ *value = (ch->interleave_disabled << 15) | ch->type; break; @@ -871,7 +864,7 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s, struct omap_dma_channel_s *ch, int reg, uint16_t value) { switch (reg) { - case 0x00: /* SYS_DMA_CSDP_CH0 */ + case 0x00: /* SYS_DMA_CSDP_CH0 */ ch->burst[1] = (value & 0xc000) >> 14; ch->pack[1] = (value & 0x2000) >> 13; ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9); @@ -894,7 +887,7 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s, } break; - case 0x02: /* SYS_DMA_CCR_CH0 */ + case 0x02: /* SYS_DMA_CCR_CH0 */ ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); ch->end_prog = (value & 0x0800) >> 11; @@ -916,88 +909,88 @@ static int omap_dma_ch_reg_write(struct omap_dma_s *s, break; - case 0x04: /* SYS_DMA_CICR_CH0 */ + case 0x04: /* SYS_DMA_CICR_CH0 */ ch->interrupts = value & 0x3f; break; - case 0x06: /* SYS_DMA_CSR_CH0 */ + case 0x06: /* SYS_DMA_CSR_CH0 */ OMAP_RO_REG((hwaddr) reg); break; - case 0x08: /* SYS_DMA_CSSA_L_CH0 */ + case 0x08: /* SYS_DMA_CSSA_L_CH0 */ ch->addr[0] &= 0xffff0000; ch->addr[0] |= value; break; - case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ + case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ ch->addr[0] &= 0x0000ffff; ch->addr[0] |= (uint32_t) value << 16; break; - case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ + case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ ch->addr[1] &= 0xffff0000; ch->addr[1] |= value; break; - case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ + case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ ch->addr[1] &= 0x0000ffff; ch->addr[1] |= (uint32_t) value << 16; break; - case 0x10: /* SYS_DMA_CEN_CH0 */ + case 0x10: /* SYS_DMA_CEN_CH0 */ ch->elements = value; break; - case 0x12: /* SYS_DMA_CFN_CH0 */ + case 0x12: /* SYS_DMA_CFN_CH0 */ ch->frames = value; break; - case 0x14: /* SYS_DMA_CFI_CH0 */ + case 0x14: /* SYS_DMA_CFI_CH0 */ ch->frame_index[0] = (int16_t) value; break; - case 0x16: /* SYS_DMA_CEI_CH0 */ + case 0x16: /* SYS_DMA_CEI_CH0 */ ch->element_index[0] = (int16_t) value; break; - case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ + case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ OMAP_RO_REG((hwaddr) reg); break; - case 0x1c: /* DMA_CDEI */ + case 0x1c: /* DMA_CDEI */ ch->element_index[1] = (int16_t) value; break; - case 0x1e: /* DMA_CDFI */ + case 0x1e: /* DMA_CDFI */ ch->frame_index[1] = (int16_t) value; break; - case 0x20: /* DMA_COLOR_L */ + case 0x20: /* DMA_COLOR_L */ ch->color &= 0xffff0000; ch->color |= value; break; - case 0x22: /* DMA_COLOR_U */ + case 0x22: /* DMA_COLOR_U */ ch->color &= 0xffff; ch->color |= (uint32_t)value << 16; break; - case 0x24: /* DMA_CCR2 */ + case 0x24: /* DMA_CCR2 */ ch->bs = (value >> 2) & 0x1; ch->transparent_copy = (value >> 1) & 0x1; ch->constant_fill = value & 0x1; break; - case 0x28: /* DMA_CLNK_CTRL */ + case 0x28: /* DMA_CLNK_CTRL */ ch->link_enabled = (value >> 15) & 0x1; - if (value & (1 << 14)) { /* Stop_Lnk */ + if (value & (1 << 14)) { /* Stop_Lnk */ ch->link_enabled = 0; omap_dma_disable_channel(s, ch); } ch->link_next_ch = value & 0x1f; break; - case 0x2a: /* DMA_LCH_CTRL */ + case 0x2a: /* DMA_LCH_CTRL */ ch->interleave_disabled = (value >> 15) & 0x1; ch->type = value & 0xf; break; @@ -1012,7 +1005,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, uint16_t value) { switch (offset) { - case 0xbc0: /* DMA_LCD_CSDP */ + case 0xbc0: /* DMA_LCD_CSDP */ s->brust_f2 = (value >> 14) & 0x3; s->pack_f2 = (value >> 13) & 0x1; s->data_type_f2 = (1 << ((value >> 11) & 0x3)); @@ -1021,7 +1014,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, s->data_type_f1 = (1 << ((value >> 0) & 0x3)); break; - case 0xbc2: /* DMA_LCD_CCR */ + case 0xbc2: /* DMA_LCD_CCR */ s->mode_f2 = (value >> 14) & 0x3; s->mode_f1 = (value >> 12) & 0x3; s->end_prog = (value >> 11) & 0x1; @@ -1033,7 +1026,7 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, s->bs = (value >> 4) & 0x1; break; - case 0xbc4: /* DMA_LCD_CTRL */ + case 0xbc4: /* DMA_LCD_CTRL */ s->dst = (value >> 8) & 0x1; s->src = ((value >> 6) & 0x3) << 1; s->condition = 0; @@ -1042,91 +1035,91 @@ static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, s->dual = value & 1; break; - case 0xbc8: /* TOP_B1_L */ + case 0xbc8: /* TOP_B1_L */ s->src_f1_top &= 0xffff0000; s->src_f1_top |= 0x0000ffff & value; break; - case 0xbca: /* TOP_B1_U */ + case 0xbca: /* TOP_B1_U */ s->src_f1_top &= 0x0000ffff; s->src_f1_top |= (uint32_t)value << 16; break; - case 0xbcc: /* BOT_B1_L */ + case 0xbcc: /* BOT_B1_L */ s->src_f1_bottom &= 0xffff0000; s->src_f1_bottom |= 0x0000ffff & value; break; - case 0xbce: /* BOT_B1_U */ + case 0xbce: /* BOT_B1_U */ s->src_f1_bottom &= 0x0000ffff; s->src_f1_bottom |= (uint32_t) value << 16; break; - case 0xbd0: /* TOP_B2_L */ + case 0xbd0: /* TOP_B2_L */ s->src_f2_top &= 0xffff0000; s->src_f2_top |= 0x0000ffff & value; break; - case 0xbd2: /* TOP_B2_U */ + case 0xbd2: /* TOP_B2_U */ s->src_f2_top &= 0x0000ffff; s->src_f2_top |= (uint32_t) value << 16; break; - case 0xbd4: /* BOT_B2_L */ + case 0xbd4: /* BOT_B2_L */ s->src_f2_bottom &= 0xffff0000; s->src_f2_bottom |= 0x0000ffff & value; break; - case 0xbd6: /* BOT_B2_U */ + case 0xbd6: /* BOT_B2_U */ s->src_f2_bottom &= 0x0000ffff; s->src_f2_bottom |= (uint32_t) value << 16; break; - case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ + case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ s->element_index_f1 = value; break; - case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ + case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ s->frame_index_f1 &= 0xffff0000; s->frame_index_f1 |= 0x0000ffff & value; break; - case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ + case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ s->frame_index_f1 &= 0x0000ffff; s->frame_index_f1 |= (uint32_t) value << 16; break; - case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ + case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ s->element_index_f2 = value; break; - case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ + case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ s->frame_index_f2 &= 0xffff0000; s->frame_index_f2 |= 0x0000ffff & value; break; - case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ + case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ s->frame_index_f2 &= 0x0000ffff; s->frame_index_f2 |= (uint32_t) value << 16; break; - case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ + case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ s->elements_f1 = value; break; - case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ + case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ s->frames_f1 = value; break; - case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ + case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ s->elements_f2 = value; break; - case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ + case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ s->frames_f2 = value; break; - case 0xbea: /* DMA_LCD_LCH_CTRL */ + case 0xbea: /* DMA_LCD_LCH_CTRL */ s->lch_type = value & 0xf; break; @@ -1140,7 +1133,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, uint16_t *ret) { switch (offset) { - case 0xbc0: /* DMA_LCD_CSDP */ + case 0xbc0: /* DMA_LCD_CSDP */ *ret = (s->brust_f2 << 14) | (s->pack_f2 << 13) | ((s->data_type_f2 >> 1) << 11) | @@ -1149,7 +1142,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, ((s->data_type_f1 >> 1) << 0); break; - case 0xbc2: /* DMA_LCD_CCR */ + case 0xbc2: /* DMA_LCD_CCR */ *ret = (s->mode_f2 << 14) | (s->mode_f1 << 12) | (s->end_prog << 11) | @@ -1161,7 +1154,7 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, (s->bs << 4); break; - case 0xbc4: /* DMA_LCD_CTRL */ + case 0xbc4: /* DMA_LCD_CTRL */ qemu_irq_lower(s->irq); *ret = (s->dst << 8) | ((s->src & 0x6) << 5) | @@ -1170,79 +1163,79 @@ static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, s->dual; break; - case 0xbc8: /* TOP_B1_L */ + case 0xbc8: /* TOP_B1_L */ *ret = s->src_f1_top & 0xffff; break; - case 0xbca: /* TOP_B1_U */ + case 0xbca: /* TOP_B1_U */ *ret = s->src_f1_top >> 16; break; - case 0xbcc: /* BOT_B1_L */ + case 0xbcc: /* BOT_B1_L */ *ret = s->src_f1_bottom & 0xffff; break; - case 0xbce: /* BOT_B1_U */ + case 0xbce: /* BOT_B1_U */ *ret = s->src_f1_bottom >> 16; break; - case 0xbd0: /* TOP_B2_L */ + case 0xbd0: /* TOP_B2_L */ *ret = s->src_f2_top & 0xffff; break; - case 0xbd2: /* TOP_B2_U */ + case 0xbd2: /* TOP_B2_U */ *ret = s->src_f2_top >> 16; break; - case 0xbd4: /* BOT_B2_L */ + case 0xbd4: /* BOT_B2_L */ *ret = s->src_f2_bottom & 0xffff; break; - case 0xbd6: /* BOT_B2_U */ + case 0xbd6: /* BOT_B2_U */ *ret = s->src_f2_bottom >> 16; break; - case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ + case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ *ret = s->element_index_f1; break; - case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ + case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ *ret = s->frame_index_f1 & 0xffff; break; - case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ + case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ *ret = s->frame_index_f1 >> 16; break; - case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ + case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ *ret = s->element_index_f2; break; - case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ + case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ *ret = s->frame_index_f2 & 0xffff; break; - case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ + case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ *ret = s->frame_index_f2 >> 16; break; - case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ + case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ *ret = s->elements_f1; break; - case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ + case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ *ret = s->frames_f1; break; - case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ + case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ *ret = s->elements_f2; break; - case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ + case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ *ret = s->frames_f2; break; - case 0xbea: /* DMA_LCD_LCH_CTRL */ + case 0xbea: /* DMA_LCD_LCH_CTRL */ *ret = s->lch_type; break; @@ -1256,7 +1249,7 @@ static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, uint16_t value) { switch (offset) { - case 0x300: /* SYS_DMA_LCD_CTRL */ + case 0x300: /* SYS_DMA_LCD_CTRL */ s->src = (value & 0x40) ? imif : emiff; s->condition = 0; /* Assume no bus errors and thus no BUS_ERROR irq bits. */ @@ -1264,42 +1257,42 @@ static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, s->dual = value & 1; break; - case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ + case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ s->src_f1_top &= 0xffff0000; s->src_f1_top |= 0x0000ffff & value; break; - case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ + case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ s->src_f1_top &= 0x0000ffff; s->src_f1_top |= (uint32_t)value << 16; break; - case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ + case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ s->src_f1_bottom &= 0xffff0000; s->src_f1_bottom |= 0x0000ffff & value; break; - case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ + case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ s->src_f1_bottom &= 0x0000ffff; s->src_f1_bottom |= (uint32_t)value << 16; break; - case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ + case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ s->src_f2_top &= 0xffff0000; s->src_f2_top |= 0x0000ffff & value; break; - case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ + case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ s->src_f2_top &= 0x0000ffff; s->src_f2_top |= (uint32_t)value << 16; break; - case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ + case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ s->src_f2_bottom &= 0xffff0000; s->src_f2_bottom |= 0x0000ffff & value; break; - case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ + case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ s->src_f2_bottom &= 0x0000ffff; s->src_f2_bottom |= (uint32_t)value << 16; break; @@ -1316,7 +1309,7 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, int i; switch (offset) { - case 0x300: /* SYS_DMA_LCD_CTRL */ + case 0x300: /* SYS_DMA_LCD_CTRL */ i = s->condition; s->condition = 0; qemu_irq_lower(s->irq); @@ -1324,35 +1317,35 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, (s->interrupts << 1) | s->dual; break; - case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ + case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ *ret = s->src_f1_top & 0xffff; break; - case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ + case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ *ret = s->src_f1_top >> 16; break; - case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ + case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ *ret = s->src_f1_bottom & 0xffff; break; - case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ + case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ *ret = s->src_f1_bottom >> 16; break; - case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ + case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ *ret = s->src_f2_top & 0xffff; break; - case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ + case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ *ret = s->src_f2_top >> 16; break; - case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ + case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ *ret = s->src_f2_bottom & 0xffff; break; - case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ + case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ *ret = s->src_f2_bottom >> 16; break; @@ -1365,18 +1358,18 @@ static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value) { switch (offset) { - case 0x400: /* SYS_DMA_GCR */ + case 0x400: /* SYS_DMA_GCR */ s->gcr = value; break; - case 0x404: /* DMA_GSCR */ + case 0x404: /* DMA_GSCR */ if (value & 0x8) omap_dma_disable_3_1_mapping(s); else omap_dma_enable_3_1_mapping(s); break; - case 0x408: /* DMA_GRST */ + case 0x408: /* DMA_GRST */ if (value & 0x1) omap_dma_reset(s->dma); break; @@ -1391,57 +1384,57 @@ static int omap_dma_sys_read(struct omap_dma_s *s, int offset, uint16_t *ret) { switch (offset) { - case 0x400: /* SYS_DMA_GCR */ + case 0x400: /* SYS_DMA_GCR */ *ret = s->gcr; break; - case 0x404: /* DMA_GSCR */ + case 0x404: /* DMA_GSCR */ *ret = s->omap_3_1_mapping_disabled << 3; break; - case 0x408: /* DMA_GRST */ + case 0x408: /* DMA_GRST */ *ret = 0; break; - case 0x442: /* DMA_HW_ID */ - case 0x444: /* DMA_PCh2_ID */ - case 0x446: /* DMA_PCh0_ID */ - case 0x448: /* DMA_PCh1_ID */ - case 0x44a: /* DMA_PChG_ID */ - case 0x44c: /* DMA_PChD_ID */ + case 0x442: /* DMA_HW_ID */ + case 0x444: /* DMA_PCh2_ID */ + case 0x446: /* DMA_PCh0_ID */ + case 0x448: /* DMA_PCh1_ID */ + case 0x44a: /* DMA_PChG_ID */ + case 0x44c: /* DMA_PChD_ID */ *ret = 1; break; - case 0x44e: /* DMA_CAPS_0_U */ + case 0x44e: /* DMA_CAPS_0_U */ *ret = (s->caps[0] >> 16) & 0xffff; break; - case 0x450: /* DMA_CAPS_0_L */ + case 0x450: /* DMA_CAPS_0_L */ *ret = (s->caps[0] >> 0) & 0xffff; break; - case 0x452: /* DMA_CAPS_1_U */ + case 0x452: /* DMA_CAPS_1_U */ *ret = (s->caps[1] >> 16) & 0xffff; break; - case 0x454: /* DMA_CAPS_1_L */ + case 0x454: /* DMA_CAPS_1_L */ *ret = (s->caps[1] >> 0) & 0xffff; break; - case 0x456: /* DMA_CAPS_2 */ + case 0x456: /* DMA_CAPS_2 */ *ret = s->caps[2]; break; - case 0x458: /* DMA_CAPS_3 */ + case 0x458: /* DMA_CAPS_3 */ *ret = s->caps[3]; break; - case 0x45a: /* DMA_CAPS_4 */ + case 0x45a: /* DMA_CAPS_4 */ *ret = s->caps[4]; break; - case 0x460: /* DMA_PCh2_SR */ - case 0x480: /* DMA_PCh0_SR */ - case 0x482: /* DMA_PCh1_SR */ - case 0x4c0: /* DMA_PChD_SR_0 */ + case 0x460: /* DMA_PCh2_SR */ + case 0x480: /* DMA_PCh0_SR */ + case 0x482: /* DMA_PCh1_SR */ + case 0x4c0: /* DMA_PChD_SR_0 */ qemu_log_mask(LOG_UNIMP, "%s: Physical Channel Status Registers not implemented\n", __func__); @@ -1587,41 +1580,40 @@ static void omap_dma_setcaps(struct omap_dma_s *s) case omap_dma_3_1: break; case omap_dma_3_2: - case omap_dma_4: /* XXX Only available for sDMA */ s->caps[0] = - (1 << 19) | /* Constant Fill Capability */ - (1 << 18); /* Transparent BLT Capability */ + (1 << 19) | /* Constant Fill Capability */ + (1 << 18); /* Transparent BLT Capability */ s->caps[1] = - (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */ + (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */ s->caps[2] = - (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */ - (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */ - (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */ - (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */ - (1 << 4) | /* DST_CONST_ADRS_CPBLTY */ - (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */ - (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */ - (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */ - (1 << 0); /* SRC_CONST_ADRS_CPBLTY */ + (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */ + (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */ + (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */ + (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */ + (1 << 4) | /* DST_CONST_ADRS_CPBLTY */ + (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */ + (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */ + (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */ + (1 << 0); /* SRC_CONST_ADRS_CPBLTY */ s->caps[3] = - (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */ - (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */ - (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */ - (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */ - (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */ - (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */ - (1 << 1) | /* FRAME_SYNCHR_CPBLTY */ - (1 << 0); /* ELMNT_SYNCHR_CPBLTY */ + (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */ + (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */ + (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */ + (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */ + (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */ + (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */ + (1 << 1) | /* FRAME_SYNCHR_CPBLTY */ + (1 << 0); /* ELMNT_SYNCHR_CPBLTY */ s->caps[4] = - (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */ - (1 << 6) | /* SYNC_STATUS_CPBLTY */ - (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */ - (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */ - (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */ - (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */ - (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */ - (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */ + (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */ + (1 << 6) | /* SYNC_STATUS_CPBLTY */ + (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */ + (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */ + (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */ + (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */ + (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */ + (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */ break; } } @@ -1678,443 +1670,6 @@ struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs, return s->dma; } -static void omap_dma_interrupts_4_update(struct omap_dma_s *s) -{ - struct omap_dma_channel_s *ch = s->ch; - uint32_t bmp, bit; - - for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1) - if (ch->status) { - bmp |= bit; - ch->cstatus |= ch->status; - ch->status = 0; - } - if ((s->irqstat[0] |= s->irqen[0] & bmp)) - qemu_irq_raise(s->irq[0]); - if ((s->irqstat[1] |= s->irqen[1] & bmp)) - qemu_irq_raise(s->irq[1]); - if ((s->irqstat[2] |= s->irqen[2] & bmp)) - qemu_irq_raise(s->irq[2]); - if ((s->irqstat[3] |= s->irqen[3] & bmp)) - qemu_irq_raise(s->irq[3]); -} - -static uint64_t omap_dma4_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_dma_s *s = opaque; - int irqn = 0, chnum; - struct omap_dma_channel_s *ch; - - if (size == 1) { - return omap_badwidth_read16(opaque, addr); - } - - switch (addr) { - case 0x00: /* DMA4_REVISION */ - return 0x40; - - case 0x14: /* DMA4_IRQSTATUS_L3 */ - irqn ++; - /* fall through */ - case 0x10: /* DMA4_IRQSTATUS_L2 */ - irqn ++; - /* fall through */ - case 0x0c: /* DMA4_IRQSTATUS_L1 */ - irqn ++; - /* fall through */ - case 0x08: /* DMA4_IRQSTATUS_L0 */ - return s->irqstat[irqn]; - - case 0x24: /* DMA4_IRQENABLE_L3 */ - irqn ++; - /* fall through */ - case 0x20: /* DMA4_IRQENABLE_L2 */ - irqn ++; - /* fall through */ - case 0x1c: /* DMA4_IRQENABLE_L1 */ - irqn ++; - /* fall through */ - case 0x18: /* DMA4_IRQENABLE_L0 */ - return s->irqen[irqn]; - - case 0x28: /* DMA4_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x2c: /* DMA4_OCP_SYSCONFIG */ - return s->ocp; - - case 0x64: /* DMA4_CAPS_0 */ - return s->caps[0]; - case 0x6c: /* DMA4_CAPS_2 */ - return s->caps[2]; - case 0x70: /* DMA4_CAPS_3 */ - return s->caps[3]; - case 0x74: /* DMA4_CAPS_4 */ - return s->caps[4]; - - case 0x78: /* DMA4_GCR */ - return s->gcr; - - case 0x80 ... 0xfff: - addr -= 0x80; - chnum = addr / 0x60; - ch = s->ch + chnum; - addr -= chnum * 0x60; - break; - - default: - OMAP_BAD_REG(addr); - return 0; - } - - /* Per-channel registers */ - switch (addr) { - case 0x00: /* DMA4_CCR */ - return (ch->buf_disable << 25) | - (ch->src_sync << 24) | - (ch->prefetch << 23) | - ((ch->sync & 0x60) << 14) | - (ch->bs << 18) | - (ch->transparent_copy << 17) | - (ch->constant_fill << 16) | - (ch->mode[1] << 14) | - (ch->mode[0] << 12) | - (0 << 10) | (0 << 9) | - (ch->suspend << 8) | - (ch->enable << 7) | - (ch->priority << 6) | - (ch->fs << 5) | (ch->sync & 0x1f); - - case 0x04: /* DMA4_CLNK_CTRL */ - return (ch->link_enabled << 15) | ch->link_next_ch; - - case 0x08: /* DMA4_CICR */ - return ch->interrupts; - - case 0x0c: /* DMA4_CSR */ - return ch->cstatus; - - case 0x10: /* DMA4_CSDP */ - return (ch->endian[0] << 21) | - (ch->endian_lock[0] << 20) | - (ch->endian[1] << 19) | - (ch->endian_lock[1] << 18) | - (ch->write_mode << 16) | - (ch->burst[1] << 14) | - (ch->pack[1] << 13) | - (ch->translate[1] << 9) | - (ch->burst[0] << 7) | - (ch->pack[0] << 6) | - (ch->translate[0] << 2) | - (ch->data_type >> 1); - - case 0x14: /* DMA4_CEN */ - return ch->elements; - - case 0x18: /* DMA4_CFN */ - return ch->frames; - - case 0x1c: /* DMA4_CSSA */ - return ch->addr[0]; - - case 0x20: /* DMA4_CDSA */ - return ch->addr[1]; - - case 0x24: /* DMA4_CSEI */ - return ch->element_index[0]; - - case 0x28: /* DMA4_CSFI */ - return ch->frame_index[0]; - - case 0x2c: /* DMA4_CDEI */ - return ch->element_index[1]; - - case 0x30: /* DMA4_CDFI */ - return ch->frame_index[1]; - - case 0x34: /* DMA4_CSAC */ - return ch->active_set.src & 0xffff; - - case 0x38: /* DMA4_CDAC */ - return ch->active_set.dest & 0xffff; - - case 0x3c: /* DMA4_CCEN */ - return ch->active_set.element; - - case 0x40: /* DMA4_CCFN */ - return ch->active_set.frame; - - case 0x44: /* DMA4_COLOR */ - /* XXX only in sDMA */ - return ch->color; - - default: - OMAP_BAD_REG(addr); - return 0; - } -} - -static void omap_dma4_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_dma_s *s = opaque; - int chnum, irqn = 0; - struct omap_dma_channel_s *ch; - - if (size == 1) { - omap_badwidth_write16(opaque, addr, value); - return; - } - - switch (addr) { - case 0x14: /* DMA4_IRQSTATUS_L3 */ - irqn ++; - /* fall through */ - case 0x10: /* DMA4_IRQSTATUS_L2 */ - irqn ++; - /* fall through */ - case 0x0c: /* DMA4_IRQSTATUS_L1 */ - irqn ++; - /* fall through */ - case 0x08: /* DMA4_IRQSTATUS_L0 */ - s->irqstat[irqn] &= ~value; - if (!s->irqstat[irqn]) - qemu_irq_lower(s->irq[irqn]); - return; - - case 0x24: /* DMA4_IRQENABLE_L3 */ - irqn ++; - /* fall through */ - case 0x20: /* DMA4_IRQENABLE_L2 */ - irqn ++; - /* fall through */ - case 0x1c: /* DMA4_IRQENABLE_L1 */ - irqn ++; - /* fall through */ - case 0x18: /* DMA4_IRQENABLE_L0 */ - s->irqen[irqn] = value; - return; - - case 0x2c: /* DMA4_OCP_SYSCONFIG */ - if (value & 2) /* SOFTRESET */ - omap_dma_reset(s->dma); - s->ocp = value & 0x3321; - if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */ - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n", - __func__); - } - return; - - case 0x78: /* DMA4_GCR */ - s->gcr = value & 0x00ff00ff; - if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */ - qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n", - __func__); - } - return; - - case 0x80 ... 0xfff: - addr -= 0x80; - chnum = addr / 0x60; - ch = s->ch + chnum; - addr -= chnum * 0x60; - break; - - case 0x00: /* DMA4_REVISION */ - case 0x28: /* DMA4_SYSSTATUS */ - case 0x64: /* DMA4_CAPS_0 */ - case 0x6c: /* DMA4_CAPS_2 */ - case 0x70: /* DMA4_CAPS_3 */ - case 0x74: /* DMA4_CAPS_4 */ - OMAP_RO_REG(addr); - return; - - default: - OMAP_BAD_REG(addr); - return; - } - - /* Per-channel registers */ - switch (addr) { - case 0x00: /* DMA4_CCR */ - ch->buf_disable = (value >> 25) & 1; - ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */ - if (ch->buf_disable && !ch->src_sync) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Buffering disable is not allowed in " - "destination synchronised mode\n", __func__); - } - ch->prefetch = (value >> 23) & 1; - ch->bs = (value >> 18) & 1; - ch->transparent_copy = (value >> 17) & 1; - ch->constant_fill = (value >> 16) & 1; - ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); - ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); - ch->suspend = (value & 0x0100) >> 8; - ch->priority = (value & 0x0040) >> 6; - ch->fs = (value & 0x0020) >> 5; - if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: For a packet transfer at least one port " - "must be constant-addressed\n", __func__); - } - ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060); - /* XXX must be 0x01 for CamDMA */ - - if (value & 0x0080) - omap_dma_enable_channel(s, ch); - else - omap_dma_disable_channel(s, ch); - - break; - - case 0x04: /* DMA4_CLNK_CTRL */ - ch->link_enabled = (value >> 15) & 0x1; - ch->link_next_ch = value & 0x1f; - break; - - case 0x08: /* DMA4_CICR */ - ch->interrupts = value & 0x09be; - break; - - case 0x0c: /* DMA4_CSR */ - ch->cstatus &= ~value; - break; - - case 0x10: /* DMA4_CSDP */ - ch->endian[0] =(value >> 21) & 1; - ch->endian_lock[0] =(value >> 20) & 1; - ch->endian[1] =(value >> 19) & 1; - ch->endian_lock[1] =(value >> 18) & 1; - if (ch->endian[0] != ch->endian[1]) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: DMA endianness conversion enable attempt\n", - __func__); - } - ch->write_mode = (value >> 16) & 3; - ch->burst[1] = (value & 0xc000) >> 14; - ch->pack[1] = (value & 0x2000) >> 13; - ch->translate[1] = (value & 0x1e00) >> 9; - ch->burst[0] = (value & 0x0180) >> 7; - ch->pack[0] = (value & 0x0040) >> 6; - ch->translate[0] = (value & 0x003c) >> 2; - if (ch->translate[0] | ch->translate[1]) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: bad MReqAddressTranslate sideband signal\n", - __func__); - } - ch->data_type = 1 << (value & 3); - if ((value & 3) == 3) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: bad data_type for DMA channel\n", __func__); - ch->data_type >>= 1; - } - break; - - case 0x14: /* DMA4_CEN */ - ch->set_update = 1; - ch->elements = value & 0xffffff; - break; - - case 0x18: /* DMA4_CFN */ - ch->frames = value & 0xffff; - ch->set_update = 1; - break; - - case 0x1c: /* DMA4_CSSA */ - ch->addr[0] = (hwaddr) (uint32_t) value; - ch->set_update = 1; - break; - - case 0x20: /* DMA4_CDSA */ - ch->addr[1] = (hwaddr) (uint32_t) value; - ch->set_update = 1; - break; - - case 0x24: /* DMA4_CSEI */ - ch->element_index[0] = (int16_t) value; - ch->set_update = 1; - break; - - case 0x28: /* DMA4_CSFI */ - ch->frame_index[0] = (int32_t) value; - ch->set_update = 1; - break; - - case 0x2c: /* DMA4_CDEI */ - ch->element_index[1] = (int16_t) value; - ch->set_update = 1; - break; - - case 0x30: /* DMA4_CDFI */ - ch->frame_index[1] = (int32_t) value; - ch->set_update = 1; - break; - - case 0x44: /* DMA4_COLOR */ - /* XXX only in sDMA */ - ch->color = value; - break; - - case 0x34: /* DMA4_CSAC */ - case 0x38: /* DMA4_CDAC */ - case 0x3c: /* DMA4_CCEN */ - case 0x40: /* DMA4_CCFN */ - OMAP_RO_REG(addr); - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_dma4_ops = { - .read = omap_dma4_read, - .write = omap_dma4_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs, - MemoryRegion *sysmem, - struct omap_mpu_state_s *mpu, int fifo, - int chans, omap_clk iclk, omap_clk fclk) -{ - int i; - struct omap_dma_s *s = g_new0(struct omap_dma_s, 1); - - s->model = omap_dma_4; - s->chans = chans; - s->mpu = mpu; - s->clk = fclk; - - s->dma = soc_dma_init(s->chans); - s->dma->freq = omap_clk_getrate(fclk); - s->dma->transfer_fn = omap_dma_transfer_generic; - s->dma->setup_fn = omap_dma_transfer_setup; - s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64); - s->dma->opaque = s; - for (i = 0; i < s->chans; i ++) { - s->ch[i].dma = &s->dma->ch[i]; - s->dma->ch[i].opaque = &s->ch[i]; - } - - memcpy(&s->irq, irqs, sizeof(s->irq)); - s->intr_update = omap_dma_interrupts_4_update; - - omap_dma_setcaps(s); - omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0)); - omap_dma_reset(s->dma); - omap_dma_clk_update(s, 0, !!s->dma->freq); - - memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - mpu->drq = s->dma->drq; - - return s->dma; -} - struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma) { struct omap_dma_s *s = dma->opaque; diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c index 1e49c22e933..277d9343223 100644 --- a/hw/dma/pl080.c +++ b/hw/dma/pl080.c @@ -408,20 +408,19 @@ static void pl081_init(Object *obj) s->nchannels = 2; } -static Property pl080_properties[] = { +static const Property pl080_properties[] = { DEFINE_PROP_LINK("downstream", PL080State, downstream, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void pl080_class_init(ObjectClass *oc, void *data) +static void pl080_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->vmsd = &vmstate_pl080; dc->realize = pl080_realize; device_class_set_props(dc, pl080_properties); - dc->reset = pl080_reset; + device_class_set_legacy_reset(dc, pl080_reset); } static const TypeInfo pl080_info = { diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index 5f89295af3a..a570bb08ec4 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -22,7 +22,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/timer.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -1646,7 +1646,7 @@ static void pl330_realize(DeviceState *dev, Error **errp) pl330_fifo_init(&s->fifo, s->data_width / 4 * s->data_buffer_dep); } -static Property pl330_properties[] = { +static const Property pl330_properties[] = { /* CR0 */ DEFINE_PROP_UINT32("num_chnls", PL330State, num_chnls, 8), DEFINE_PROP_UINT8("num_periph_req", PL330State, num_periph_req, 4), @@ -1669,16 +1669,14 @@ static Property pl330_properties[] = { DEFINE_PROP_LINK("memory", PL330State, mem_mr, TYPE_MEMORY_REGION, MemoryRegion *), - - DEFINE_PROP_END_OF_LIST(), }; -static void pl330_class_init(ObjectClass *klass, void *data) +static void pl330_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pl330_realize; - dc->reset = pl330_reset; + device_class_set_legacy_reset(dc, pl330_reset); device_class_set_props(dc, pl330_properties); dc->vmsd = &vmstate_pl330; } diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c deleted file mode 100644 index 9f62f0b633b..00000000000 --- a/hw/dma/pxa2xx_dma.c +++ /dev/null @@ -1,591 +0,0 @@ -/* - * Intel XScale PXA255/270 DMA controller. - * - * Copyright (c) 2006 Openedhand Ltd. - * Copyright (c) 2006 Thorsten Zitterell - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPL. - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "hw/arm/pxa.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "qapi/error.h" -#include "qemu/module.h" -#include "qom/object.h" - -#define PXA255_DMA_NUM_CHANNELS 16 -#define PXA27X_DMA_NUM_CHANNELS 32 - -#define PXA2XX_DMA_NUM_REQUESTS 75 - -typedef struct { - uint32_t descr; - uint32_t src; - uint32_t dest; - uint32_t cmd; - uint32_t state; - int request; -} PXA2xxDMAChannel; - -#define TYPE_PXA2XX_DMA "pxa2xx-dma" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxDMAState, PXA2XX_DMA) - -struct PXA2xxDMAState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - qemu_irq irq; - - uint32_t stopintr; - uint32_t eorintr; - uint32_t rasintr; - uint32_t startintr; - uint32_t endintr; - - uint32_t align; - uint32_t pio; - - int channels; - PXA2xxDMAChannel *chan; - - uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; - - /* Flag to avoid recursive DMA invocations. */ - int running; -}; - -#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ -#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ -#define DALGN 0x00a0 /* DMA Alignment register */ -#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ -#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ -#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ -#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ -#define DINT 0x00f0 /* DMA Interrupt register */ -#define DRCMR0 0x0100 /* Request to Channel Map register 0 */ -#define DRCMR63 0x01fc /* Request to Channel Map register 63 */ -#define D_CH0 0x0200 /* Channel 0 Descriptor start */ -#define DRCMR64 0x1100 /* Request to Channel Map register 64 */ -#define DRCMR74 0x1128 /* Request to Channel Map register 74 */ - -/* Per-channel register */ -#define DDADR 0x00 -#define DSADR 0x01 -#define DTADR 0x02 -#define DCMD 0x03 - -/* Bit-field masks */ -#define DRCMR_CHLNUM 0x1f -#define DRCMR_MAPVLD (1 << 7) -#define DDADR_STOP (1 << 0) -#define DDADR_BREN (1 << 1) -#define DCMD_LEN 0x1fff -#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) -#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) -#define DCMD_FLYBYT (1 << 19) -#define DCMD_FLYBYS (1 << 20) -#define DCMD_ENDIRQEN (1 << 21) -#define DCMD_STARTIRQEN (1 << 22) -#define DCMD_CMPEN (1 << 25) -#define DCMD_FLOWTRG (1 << 28) -#define DCMD_FLOWSRC (1 << 29) -#define DCMD_INCTRGADDR (1 << 30) -#define DCMD_INCSRCADDR (1 << 31) -#define DCSR_BUSERRINTR (1 << 0) -#define DCSR_STARTINTR (1 << 1) -#define DCSR_ENDINTR (1 << 2) -#define DCSR_STOPINTR (1 << 3) -#define DCSR_RASINTR (1 << 4) -#define DCSR_REQPEND (1 << 8) -#define DCSR_EORINT (1 << 9) -#define DCSR_CMPST (1 << 10) -#define DCSR_MASKRUN (1 << 22) -#define DCSR_RASIRQEN (1 << 23) -#define DCSR_CLRCMPST (1 << 24) -#define DCSR_SETCMPST (1 << 25) -#define DCSR_EORSTOPEN (1 << 26) -#define DCSR_EORJMPEN (1 << 27) -#define DCSR_EORIRQEN (1 << 28) -#define DCSR_STOPIRQEN (1 << 29) -#define DCSR_NODESCFETCH (1 << 30) -#define DCSR_RUN (1 << 31) - -static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) -{ - if (ch >= 0) { - if ((s->chan[ch].state & DCSR_STOPIRQEN) && - (s->chan[ch].state & DCSR_STOPINTR)) - s->stopintr |= 1 << ch; - else - s->stopintr &= ~(1 << ch); - - if ((s->chan[ch].state & DCSR_EORIRQEN) && - (s->chan[ch].state & DCSR_EORINT)) - s->eorintr |= 1 << ch; - else - s->eorintr &= ~(1 << ch); - - if ((s->chan[ch].state & DCSR_RASIRQEN) && - (s->chan[ch].state & DCSR_RASINTR)) - s->rasintr |= 1 << ch; - else - s->rasintr &= ~(1 << ch); - - if (s->chan[ch].state & DCSR_STARTINTR) - s->startintr |= 1 << ch; - else - s->startintr &= ~(1 << ch); - - if (s->chan[ch].state & DCSR_ENDINTR) - s->endintr |= 1 << ch; - else - s->endintr &= ~(1 << ch); - } - - if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) - qemu_irq_raise(s->irq); - else - qemu_irq_lower(s->irq); -} - -static inline void pxa2xx_dma_descriptor_fetch( - PXA2xxDMAState *s, int ch) -{ - uint32_t desc[4]; - hwaddr daddr = s->chan[ch].descr & ~0xf; - if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) - daddr += 32; - - cpu_physical_memory_read(daddr, desc, 16); - s->chan[ch].descr = desc[DDADR]; - s->chan[ch].src = desc[DSADR]; - s->chan[ch].dest = desc[DTADR]; - s->chan[ch].cmd = desc[DCMD]; - - if (s->chan[ch].cmd & DCMD_FLOWSRC) - s->chan[ch].src &= ~3; - if (s->chan[ch].cmd & DCMD_FLOWTRG) - s->chan[ch].dest &= ~3; - - if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) - printf("%s: unsupported mode in channel %i\n", __func__, ch); - - if (s->chan[ch].cmd & DCMD_STARTIRQEN) - s->chan[ch].state |= DCSR_STARTINTR; -} - -static void pxa2xx_dma_run(PXA2xxDMAState *s) -{ - int c, srcinc, destinc; - uint32_t n, size; - uint32_t width; - uint32_t length; - uint8_t buffer[32]; - PXA2xxDMAChannel *ch; - - if (s->running ++) - return; - - while (s->running) { - s->running = 1; - for (c = 0; c < s->channels; c ++) { - ch = &s->chan[c]; - - while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { - /* Test for pending requests */ - if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) - break; - - length = ch->cmd & DCMD_LEN; - size = DCMD_SIZE(ch->cmd); - width = DCMD_WIDTH(ch->cmd); - - srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; - destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; - - while (length) { - size = MIN(length, size); - - for (n = 0; n < size; n += width) { - cpu_physical_memory_read(ch->src, buffer + n, width); - ch->src += srcinc; - } - - for (n = 0; n < size; n += width) { - cpu_physical_memory_write(ch->dest, buffer + n, width); - ch->dest += destinc; - } - - length -= size; - - if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && - !ch->request) { - ch->state |= DCSR_EORINT; - if (ch->state & DCSR_EORSTOPEN) - ch->state |= DCSR_STOPINTR; - if ((ch->state & DCSR_EORJMPEN) && - !(ch->state & DCSR_NODESCFETCH)) - pxa2xx_dma_descriptor_fetch(s, c); - break; - } - } - - ch->cmd = (ch->cmd & ~DCMD_LEN) | length; - - /* Is the transfer complete now? */ - if (!length) { - if (ch->cmd & DCMD_ENDIRQEN) - ch->state |= DCSR_ENDINTR; - - if ((ch->state & DCSR_NODESCFETCH) || - (ch->descr & DDADR_STOP) || - (ch->state & DCSR_EORSTOPEN)) { - ch->state |= DCSR_STOPINTR; - ch->state &= ~DCSR_RUN; - - break; - } - - ch->state |= DCSR_STOPINTR; - break; - } - } - } - - s->running --; - } -} - -static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, - unsigned size) -{ - PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; - unsigned int channel; - - if (size != 4) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", - __func__, size); - return 5; - } - - switch (offset) { - case DRCMR64 ... DRCMR74: - offset -= DRCMR64 - DRCMR0 - (64 << 2); - /* Fall through */ - case DRCMR0 ... DRCMR63: - channel = (offset - DRCMR0) >> 2; - return s->req[channel]; - - case DRQSR0: - case DRQSR1: - case DRQSR2: - return 0; - - case DCSR0 ... DCSR31: - channel = offset >> 2; - if (s->chan[channel].request) - return s->chan[channel].state | DCSR_REQPEND; - return s->chan[channel].state; - - case DINT: - return s->stopintr | s->eorintr | s->rasintr | - s->startintr | s->endintr; - - case DALGN: - return s->align; - - case DPCSR: - return s->pio; - } - - if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { - channel = (offset - D_CH0) >> 4; - switch ((offset & 0x0f) >> 2) { - case DDADR: - return s->chan[channel].descr; - case DSADR: - return s->chan[channel].src; - case DTADR: - return s->chan[channel].dest; - case DCMD: - return s->chan[channel].cmd; - } - } - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - return 7; -} - -static void pxa2xx_dma_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; - unsigned int channel; - - if (size != 4) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", - __func__, size); - return; - } - - switch (offset) { - case DRCMR64 ... DRCMR74: - offset -= DRCMR64 - DRCMR0 - (64 << 2); - /* Fall through */ - case DRCMR0 ... DRCMR63: - channel = (offset - DRCMR0) >> 2; - - if (value & DRCMR_MAPVLD) - if ((value & DRCMR_CHLNUM) > s->channels) - hw_error("%s: Bad DMA channel %i\n", - __func__, (unsigned)value & DRCMR_CHLNUM); - - s->req[channel] = value; - break; - - case DRQSR0: - case DRQSR1: - case DRQSR2: - /* Nothing to do */ - break; - - case DCSR0 ... DCSR31: - channel = offset >> 2; - s->chan[channel].state &= 0x0000071f & ~(value & - (DCSR_EORINT | DCSR_ENDINTR | - DCSR_STARTINTR | DCSR_BUSERRINTR)); - s->chan[channel].state |= value & 0xfc800000; - - if (s->chan[channel].state & DCSR_STOPIRQEN) - s->chan[channel].state &= ~DCSR_STOPINTR; - - if (value & DCSR_NODESCFETCH) { - /* No-descriptor-fetch mode */ - if (value & DCSR_RUN) { - s->chan[channel].state &= ~DCSR_STOPINTR; - pxa2xx_dma_run(s); - } - } else { - /* Descriptor-fetch mode */ - if (value & DCSR_RUN) { - s->chan[channel].state &= ~DCSR_STOPINTR; - pxa2xx_dma_descriptor_fetch(s, channel); - pxa2xx_dma_run(s); - } - } - - /* Shouldn't matter as our DMA is synchronous. */ - if (!(value & (DCSR_RUN | DCSR_MASKRUN))) - s->chan[channel].state |= DCSR_STOPINTR; - - if (value & DCSR_CLRCMPST) - s->chan[channel].state &= ~DCSR_CMPST; - if (value & DCSR_SETCMPST) - s->chan[channel].state |= DCSR_CMPST; - - pxa2xx_dma_update(s, channel); - break; - - case DALGN: - s->align = value; - break; - - case DPCSR: - s->pio = value & 0x80000001; - break; - - default: - if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { - channel = (offset - D_CH0) >> 4; - switch ((offset & 0x0f) >> 2) { - case DDADR: - s->chan[channel].descr = value; - break; - case DSADR: - s->chan[channel].src = value; - break; - case DTADR: - s->chan[channel].dest = value; - break; - case DCMD: - s->chan[channel].cmd = value; - break; - default: - goto fail; - } - - break; - } - fail: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", - __func__, offset); - } -} - -static const MemoryRegionOps pxa2xx_dma_ops = { - .read = pxa2xx_dma_read, - .write = pxa2xx_dma_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void pxa2xx_dma_request(void *opaque, int req_num, int on) -{ - PXA2xxDMAState *s = opaque; - int ch; - if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) - hw_error("%s: Bad DMA request %i\n", __func__, req_num); - - if (!(s->req[req_num] & DRCMR_MAPVLD)) - return; - ch = s->req[req_num] & DRCMR_CHLNUM; - - if (!s->chan[ch].request && on) - s->chan[ch].state |= DCSR_RASINTR; - else - s->chan[ch].state &= ~DCSR_RASINTR; - if (s->chan[ch].request && !on) - s->chan[ch].state |= DCSR_EORINT; - - s->chan[ch].request = on; - if (on) { - pxa2xx_dma_run(s); - pxa2xx_dma_update(s, ch); - } -} - -static void pxa2xx_dma_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - PXA2xxDMAState *s = PXA2XX_DMA(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); - - qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, - "pxa2xx.dma", 0x00010000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); -} - -static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) -{ - PXA2xxDMAState *s = PXA2XX_DMA(dev); - int i; - - if (s->channels <= 0) { - error_setg(errp, "channels value invalid"); - return; - } - - s->chan = g_new0(PXA2xxDMAChannel, s->channels); - - for (i = 0; i < s->channels; i ++) - s->chan[i].state = DCSR_STOPINTR; -} - -DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) -{ - DeviceState *dev; - - dev = qdev_new("pxa2xx-dma"); - qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); - - return dev; -} - -DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) -{ - DeviceState *dev; - - dev = qdev_new("pxa2xx-dma"); - qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); - - return dev; -} - -static bool is_version_0(void *opaque, int version_id) -{ - return version_id == 0; -} - -static const VMStateDescription vmstate_pxa2xx_dma_chan = { - .name = "pxa2xx_dma_chan", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(descr, PXA2xxDMAChannel), - VMSTATE_UINT32(src, PXA2xxDMAChannel), - VMSTATE_UINT32(dest, PXA2xxDMAChannel), - VMSTATE_UINT32(cmd, PXA2xxDMAChannel), - VMSTATE_UINT32(state, PXA2xxDMAChannel), - VMSTATE_INT32(request, PXA2xxDMAChannel), - VMSTATE_END_OF_LIST(), - }, -}; - -static const VMStateDescription vmstate_pxa2xx_dma = { - .name = "pxa2xx_dma", - .version_id = 1, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UNUSED_TEST(is_version_0, 4), - VMSTATE_UINT32(stopintr, PXA2xxDMAState), - VMSTATE_UINT32(eorintr, PXA2xxDMAState), - VMSTATE_UINT32(rasintr, PXA2xxDMAState), - VMSTATE_UINT32(startintr, PXA2xxDMAState), - VMSTATE_UINT32(endintr, PXA2xxDMAState), - VMSTATE_UINT32(align, PXA2xxDMAState), - VMSTATE_UINT32(pio, PXA2xxDMAState), - VMSTATE_BUFFER(req, PXA2xxDMAState), - VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, - vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), - VMSTATE_END_OF_LIST(), - }, -}; - -static Property pxa2xx_dma_properties[] = { - DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "PXA2xx DMA controller"; - dc->vmsd = &vmstate_pxa2xx_dma; - device_class_set_props(dc, pxa2xx_dma_properties); - dc->realize = pxa2xx_dma_realize; -} - -static const TypeInfo pxa2xx_dma_info = { - .name = TYPE_PXA2XX_DMA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxDMAState), - .instance_init = pxa2xx_dma_init, - .class_init = pxa2xx_dma_class_init, -}; - -static void pxa2xx_dma_register_types(void) -{ - type_register_static(&pxa2xx_dma_info); -} - -type_init(pxa2xx_dma_register_types) diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c index 915284194fe..b6ed1d46433 100644 --- a/hw/dma/rc4030.c +++ b/hw/dma/rc4030.c @@ -32,7 +32,7 @@ #include "qemu/timer.h" #include "qemu/log.h" #include "qemu/module.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "trace.h" #include "qom/object.h" @@ -701,13 +701,13 @@ static void rc4030_unrealize(DeviceState *dev) object_unparent(OBJECT(&s->dma_mr)); } -static void rc4030_class_init(ObjectClass *klass, void *class_data) +static void rc4030_class_init(ObjectClass *klass, const void *class_data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = rc4030_realize; dc->unrealize = rc4030_unrealize; - dc->reset = rc4030_reset; + device_class_set_legacy_reset(dc, rc4030_reset); dc->vmsd = &vmstate_rc4030; } @@ -720,7 +720,7 @@ static const TypeInfo rc4030_info = { }; static void rc4030_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c index 1dd88f3479d..48de3a24785 100644 --- a/hw/dma/sifive_pdma.c +++ b/hw/dma/sifive_pdma.c @@ -28,7 +28,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/dma/sifive_pdma.h" #define DMA_CONTROL 0x000 @@ -152,7 +152,6 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch) error: s->chan[ch].state = DMA_CHAN_STATE_ERROR; s->chan[ch].control |= CONTROL_ERR; - return; } static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch) @@ -465,7 +464,7 @@ static void sifive_pdma_realize(DeviceState *dev, Error **errp) } } -static void sifive_pdma_class_init(ObjectClass *klass, void *data) +static void sifive_pdma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c index 80196419427..60c23b69e5c 100644 --- a/hw/dma/sparc32_dma.c +++ b/hw/dma/sparc32_dma.c @@ -32,7 +32,7 @@ #include "hw/sparc/sun4m_iommu.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qapi/error.h" #include "qemu/module.h" #include "trace.h" @@ -274,11 +274,11 @@ static void sparc32_dma_device_init(Object *obj) qdev_init_gpio_out(dev, s->gpio, 2); } -static void sparc32_dma_device_class_init(ObjectClass *klass, void *data) +static void sparc32_dma_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = sparc32_dma_device_reset; + device_class_set_legacy_reset(dc, sparc32_dma_device_reset); dc->vmsd = &vmstate_sparc32_dma_device; } @@ -316,7 +316,8 @@ static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(sysbus), &error_fatal); } -static void sparc32_espdma_device_class_init(ObjectClass *klass, void *data) +static void sparc32_espdma_device_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -351,7 +352,8 @@ static void sparc32_ledma_device_realize(DeviceState *dev, Error **errp) sysbus_realize(SYS_BUS_DEVICE(lance), &error_fatal); } -static void sparc32_ledma_device_class_init(ObjectClass *klass, void *data) +static void sparc32_ledma_device_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -426,7 +428,7 @@ static void sparc32_dma_init(Object *obj) TYPE_SPARC32_LEDMA_DEVICE); } -static void sparc32_dma_class_init(ObjectClass *klass, void *data) +static void sparc32_dma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index c9cfc3169b8..2020399fd59 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -33,7 +33,7 @@ #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/stream.h" #include "qom/object.h" #include "trace.h" @@ -611,7 +611,7 @@ static void xilinx_axidma_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property axidma_properties[] = { +static const Property axidma_properties[] = { DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000), DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA, tx_data_dev, TYPE_STREAM_SINK, StreamSink *), @@ -619,15 +619,14 @@ static Property axidma_properties[] = { tx_control_dev, TYPE_STREAM_SINK, StreamSink *), DEFINE_PROP_LINK("dma", XilinxAXIDMA, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void axidma_class_init(ObjectClass *klass, void *data) +static void axidma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->realize = xilinx_axidma_realize, - dc->reset = xilinx_axidma_reset; + dc->realize = xilinx_axidma_realize; + device_class_set_legacy_reset(dc, xilinx_axidma_reset); device_class_set_props(dc, axidma_properties); } @@ -640,7 +639,8 @@ static StreamSinkClass xilinx_axidma_control_stream_class = { .push = xilinx_axidma_control_stream_push, }; -static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data) +static void xilinx_axidma_stream_class_init(ObjectClass *klass, + const void *data) { StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); @@ -662,7 +662,7 @@ static const TypeInfo xilinx_axidma_data_stream_info = { .instance_size = sizeof(XilinxAXIDMAStreamSink), .class_init = xilinx_axidma_stream_class_init, .class_data = &xilinx_axidma_data_stream_class, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_STREAM_SINK }, { } } @@ -674,7 +674,7 @@ static const TypeInfo xilinx_axidma_control_stream_info = { .instance_size = sizeof(XilinxAXIDMAStreamSink), .class_init = xilinx_axidma_stream_class_init, .class_data = &xilinx_axidma_control_stream_class, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_STREAM_SINK }, { } } diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c index 670c9568669..0c075e7d0d1 100644 --- a/hw/dma/xlnx-zdma.c +++ b/hw/dma/xlnx-zdma.c @@ -810,18 +810,17 @@ static const VMStateDescription vmstate_zdma = { } }; -static Property zdma_props[] = { +static const Property zdma_props[] = { DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64), DEFINE_PROP_LINK("dma", XlnxZDMA, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void zdma_class_init(ObjectClass *klass, void *data) +static void zdma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = zdma_reset; + device_class_set_legacy_reset(dc, zdma_reset); dc->realize = zdma_realize; device_class_set_props(dc, zdma_props); dc->vmsd = &vmstate_zdma; diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c index e901f68ff34..26845713ee5 100644 --- a/hw/dma/xlnx-zynq-devcfg.c +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -29,7 +29,7 @@ #include "hw/irq.h" #include "migration/vmstate.h" #include "qemu/bitops.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/log.h" #include "qemu/module.h" @@ -380,11 +380,11 @@ static void xlnx_zynq_devcfg_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data) +static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xlnx_zynq_devcfg_reset; + device_class_set_legacy_reset(dc, xlnx_zynq_devcfg_reset); dc->vmsd = &vmstate_xlnx_zynq_devcfg; } diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c index ae307482f22..d8c7da1a501 100644 --- a/hw/dma/xlnx_csu_dma.c +++ b/hw/dma/xlnx_csu_dma.c @@ -25,7 +25,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/ptimer.h" #include "hw/stream.h" #include "hw/register.h" @@ -287,7 +287,7 @@ static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len) static void xlnx_csu_dma_src_notify(void *opaque) { XlnxCSUDMA *s = XLNX_CSU_DMA(opaque); - unsigned char buf[4 * 1024]; + QEMU_UNINITIALIZED unsigned char buf[4 * 1024]; size_t rlen = 0; ptimer_transaction_begin(s->src_timer); @@ -691,7 +691,7 @@ static const VMStateDescription vmstate_xlnx_csu_dma = { } }; -static Property xlnx_csu_dma_properties[] = { +static const Property xlnx_csu_dma_properties[] = { /* * Ref PG021, Stream Data Width: * Data width in bits of the AXI S2MM AXI4-Stream Data bus. @@ -710,16 +710,15 @@ static Property xlnx_csu_dma_properties[] = { TYPE_STREAM_SINK, StreamSink *), DEFINE_PROP_LINK("dma", XlnxCSUDMA, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data) +static void xlnx_csu_dma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass); - dc->reset = xlnx_csu_dma_reset; + device_class_set_legacy_reset(dc, xlnx_csu_dma_reset); dc->realize = xlnx_csu_dma_realize; dc->vmsd = &vmstate_xlnx_csu_dma; device_class_set_props(dc, xlnx_csu_dma_properties); @@ -745,7 +744,7 @@ static const TypeInfo xlnx_csu_dma_info = { .class_init = xlnx_csu_dma_class_init, .class_size = sizeof(XlnxCSUDMAClass), .instance_init = xlnx_csu_dma_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_STREAM_SINK }, { } } diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c index a685bd28bb8..3d88ccc8da9 100644 --- a/hw/dma/xlnx_dpdma.c +++ b/hw/dma/xlnx_dpdma.c @@ -593,12 +593,12 @@ static void xlnx_dpdma_reset(DeviceState *dev) } } -static void xlnx_dpdma_class_init(ObjectClass *oc, void *data) +static void xlnx_dpdma_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->vmsd = &vmstate_xlnx_dpdma; - dc->reset = xlnx_dpdma_reset; + device_class_set_legacy_reset(dc, xlnx_dpdma_reset); } static const TypeInfo xlnx_dpdma_info = { diff --git a/hw/fsi/aspeed_apb2opb.c b/hw/fsi/aspeed_apb2opb.c index ea50718b6a2..172ba16b0c0 100644 --- a/hw/fsi/aspeed_apb2opb.c +++ b/hw/fsi/aspeed_apb2opb.c @@ -320,13 +320,13 @@ static void fsi_aspeed_apb2opb_reset(DeviceState *dev) memcpy(s->regs, aspeed_apb2opb_reset, ASPEED_APB2OPB_NR_REGS); } -static void fsi_aspeed_apb2opb_class_init(ObjectClass *klass, void *data) +static void fsi_aspeed_apb2opb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "ASPEED APB2OPB Bridge"; dc->realize = fsi_aspeed_apb2opb_realize; - dc->reset = fsi_aspeed_apb2opb_reset; + device_class_set_legacy_reset(dc, fsi_aspeed_apb2opb_reset); } static const TypeInfo aspeed_apb2opb_info = { diff --git a/hw/fsi/cfam.c b/hw/fsi/cfam.c index c62f0f78dee..e2145c5934b 100644 --- a/hw/fsi/cfam.c +++ b/hw/fsi/cfam.c @@ -145,7 +145,7 @@ static void fsi_cfam_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(&cfam->lbus.mr, 0, &fsi_dev->iomem); } -static void fsi_cfam_class_init(ObjectClass *klass, void *data) +static void fsi_cfam_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->bus_type = TYPE_FSI_BUS; diff --git a/hw/fsi/fsi-master.c b/hw/fsi/fsi-master.c index a5f0598c98e..083a5507abc 100644 --- a/hw/fsi/fsi-master.c +++ b/hw/fsi/fsi-master.c @@ -144,14 +144,14 @@ static void fsi_master_reset(DeviceState *dev) s->regs[FSI_MVER] = 0xe0050101; } -static void fsi_master_class_init(ObjectClass *klass, void *data) +static void fsi_master_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->bus_type = TYPE_OP_BUS; dc->desc = "FSI Master"; dc->realize = fsi_master_realize; - dc->reset = fsi_master_reset; + device_class_set_legacy_reset(dc, fsi_master_reset); } static const TypeInfo fsi_master_info = { diff --git a/hw/fsi/fsi.c b/hw/fsi/fsi.c index 9a5f4e616f1..6c52d5e745e 100644 --- a/hw/fsi/fsi.c +++ b/hw/fsi/fsi.c @@ -76,13 +76,13 @@ static void fsi_slave_init(Object *o) s, TYPE_FSI_SLAVE, 0x400); } -static void fsi_slave_class_init(ObjectClass *klass, void *data) +static void fsi_slave_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->bus_type = TYPE_FSI_BUS; dc->desc = "FSI Slave"; - dc->reset = fsi_slave_reset; + device_class_set_legacy_reset(dc, fsi_slave_reset); } static const TypeInfo fsi_slave_info = { diff --git a/hw/fsi/lbus.c b/hw/fsi/lbus.c index 20495f42fd9..8ec7f5fd780 100644 --- a/hw/fsi/lbus.c +++ b/hw/fsi/lbus.c @@ -91,13 +91,13 @@ static void fsi_scratchpad_reset(DeviceState *dev) memset(s->regs, 0, sizeof(s->regs)); } -static void fsi_scratchpad_class_init(ObjectClass *klass, void *data) +static void fsi_scratchpad_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->bus_type = TYPE_FSI_LBUS; dc->realize = fsi_scratchpad_realize; - dc->reset = fsi_scratchpad_reset; + device_class_set_legacy_reset(dc, fsi_scratchpad_reset); } static const TypeInfo fsi_scratchpad_info = { diff --git a/hw/gpio/Kconfig b/hw/gpio/Kconfig index 19c97cc823f..a209294c20c 100644 --- a/hw/gpio/Kconfig +++ b/hw/gpio/Kconfig @@ -1,7 +1,3 @@ -config MAX7310 - bool - depends on I2C - config PL061 bool @@ -20,6 +16,17 @@ config SIFIVE_GPIO config STM32L4X5_GPIO bool +config PCA9552 + bool + depends on I2C + +config PCA9554 + bool + depends on I2C + config PCF8574 bool depends on I2C + +config ZAURUS_SCOOP + bool diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index 6474bb8de5b..609a556908f 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -227,6 +227,38 @@ REG32(GPIO_INDEX_REG, 0x2AC) FIELD(GPIO_INDEX_REG, COMMAND_SRC_1, 21, 1) FIELD(GPIO_INDEX_REG, INPUT_MASK, 20, 1) +/* AST2700 GPIO Register Address Offsets */ +REG32(GPIO_2700_DEBOUNCE_TIME_1, 0x000) +REG32(GPIO_2700_DEBOUNCE_TIME_2, 0x004) +REG32(GPIO_2700_DEBOUNCE_TIME_3, 0x008) +REG32(GPIO_2700_INT_STATUS_1, 0x100) +REG32(GPIO_2700_INT_STATUS_2, 0x104) +REG32(GPIO_2700_INT_STATUS_3, 0x108) +REG32(GPIO_2700_INT_STATUS_4, 0x10C) +REG32(GPIO_2700_INT_STATUS_5, 0x110) +REG32(GPIO_2700_INT_STATUS_6, 0x114) +REG32(GPIO_2700_INT_STATUS_7, 0x118) +/* GPIOA0 - GPIOAA7 Control Register */ +REG32(GPIO_A0_CONTROL, 0x180) + SHARED_FIELD(GPIO_CONTROL_OUT_DATA, 0, 1) + SHARED_FIELD(GPIO_CONTROL_DIRECTION, 1, 1) + SHARED_FIELD(GPIO_CONTROL_INT_ENABLE, 2, 1) + SHARED_FIELD(GPIO_CONTROL_INT_SENS_0, 3, 1) + SHARED_FIELD(GPIO_CONTROL_INT_SENS_1, 4, 1) + SHARED_FIELD(GPIO_CONTROL_INT_SENS_2, 5, 1) + SHARED_FIELD(GPIO_CONTROL_RESET_TOLERANCE, 6, 1) + SHARED_FIELD(GPIO_CONTROL_DEBOUNCE_1, 7, 1) + SHARED_FIELD(GPIO_CONTROL_DEBOUNCE_2, 8, 1) + SHARED_FIELD(GPIO_CONTROL_INPUT_MASK, 9, 1) + SHARED_FIELD(GPIO_CONTROL_BLINK_COUNTER_1, 10, 1) + SHARED_FIELD(GPIO_CONTROL_BLINK_COUNTER_2, 11, 1) + SHARED_FIELD(GPIO_CONTROL_INT_STATUS, 12, 1) + SHARED_FIELD(GPIO_CONTROL_IN_DATA, 13, 1) + SHARED_FIELD(GPIO_CONTROL_RESERVED, 14, 18) +REG32(GPIO_AA7_CONTROL, 0x4DC) +#define GPIO_2700_MEM_SIZE 0x4E0 +#define GPIO_2700_REG_ARRAY_SIZE (GPIO_2700_MEM_SIZE >> 2) + static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio) { uint32_t falling_edge = 0, rising_edge = 0; @@ -281,7 +313,7 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, diff &= mode_mask; if (diff) { for (gpio = 0; gpio < ASPEED_GPIOS_PER_SET; gpio++) { - uint32_t mask = 1 << gpio; + uint32_t mask = 1U << gpio; /* If the gpio needs to be updated... */ if (!(diff & mask)) { @@ -340,7 +372,8 @@ static void aspeed_gpio_set_pin_level(AspeedGPIOState *s, uint32_t set_idx, value &= ~pin_mask; } - aspeed_gpio_update(s, &s->sets[set_idx], value, ~s->sets[set_idx].direction); + aspeed_gpio_update(s, &s->sets[set_idx], value, + ~s->sets[set_idx].direction); } /* @@ -629,7 +662,6 @@ static uint64_t aspeed_gpio_read(void *opaque, hwaddr offset, uint32_t size) static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset, uint64_t data, uint32_t size) { - AspeedGPIOState *s = ASPEED_GPIO(opaque); AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); const GPIOSetProperties *props; @@ -641,7 +673,7 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset, uint32_t pin_idx = reg_idx_number % ASPEED_GPIOS_PER_SET; uint32_t group_idx = pin_idx / GPIOS_PER_GROUP; uint32_t reg_value = 0; - uint32_t cleared; + uint32_t pending = 0; set = &s->sets[set_idx]; props = &agc->props[set_idx]; @@ -703,16 +735,23 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset, FIELD_EX32(data, GPIO_INDEX_REG, INT_SENS_2)); set->int_sens_2 = update_value_control_source(set, set->int_sens_2, reg_value); - /* set interrupt status */ - reg_value = set->int_status; - reg_value = deposit32(reg_value, pin_idx, 1, - FIELD_EX32(data, GPIO_INDEX_REG, INT_STATUS)); - cleared = ctpop32(reg_value & set->int_status); - if (s->pending && cleared) { - assert(s->pending >= cleared); - s->pending -= cleared; + /* interrupt status */ + if (FIELD_EX32(data, GPIO_INDEX_REG, INT_STATUS)) { + /* pending is either 1 or 0 for a 1-bit field */ + pending = extract32(set->int_status, pin_idx, 1); + + assert(s->pending >= pending); + + /* No change to s->pending if pending is 0 */ + s->pending -= pending; + + /* + * The write acknowledged the interrupt regardless of whether it + * was pending or not. The post-condition is that it mustn't be + * pending. Unconditionally clear the status bit. + */ + set->int_status = deposit32(set->int_status, pin_idx, 1, 0); } - set->int_status &= ~reg_value; break; case gpio_reg_idx_debounce: reg_value = set->debounce_1; @@ -761,7 +800,6 @@ static void aspeed_gpio_write_index_mode(void *opaque, hwaddr offset, return; } aspeed_gpio_update(s, set, set->data_value, UINT32_MAX); - return; } static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, @@ -889,7 +927,6 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, return; } aspeed_gpio_update(s, set, set->data_value, UINT32_MAX); - return; } static int get_set_idx(AspeedGPIOState *s, const char *group, int *group_idx) @@ -963,7 +1000,314 @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name, aspeed_gpio_set_pin_level(s, set_idx, pin, level); } -/****************** Setup functions ******************/ +static uint64_t aspeed_gpio_2700_read_control_reg(AspeedGPIOState *s, + uint32_t pin) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + GPIOSets *set; + uint64_t value = 0; + uint32_t set_idx; + uint32_t pin_idx; + + set_idx = pin / ASPEED_GPIOS_PER_SET; + pin_idx = pin % ASPEED_GPIOS_PER_SET; + + if (set_idx >= agc->nr_gpio_sets) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: set index: %d, out of bounds\n", + __func__, set_idx); + return 0; + } + + set = &s->sets[set_idx]; + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_OUT_DATA, + extract32(set->data_read, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DIRECTION, + extract32(set->direction, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_ENABLE, + extract32(set->int_enable, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_0, + extract32(set->int_sens_0, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_1, + extract32(set->int_sens_1, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_SENS_2, + extract32(set->int_sens_2, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_RESET_TOLERANCE, + extract32(set->reset_tol, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DEBOUNCE_1, + extract32(set->debounce_1, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_DEBOUNCE_2, + extract32(set->debounce_2, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INPUT_MASK, + extract32(set->input_mask, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_INT_STATUS, + extract32(set->int_status, pin_idx, 1)); + value = SHARED_FIELD_DP32(value, GPIO_CONTROL_IN_DATA, + extract32(set->data_value, pin_idx, 1)); + return value; +} + +static void aspeed_gpio_2700_write_control_reg(AspeedGPIOState *s, + uint32_t pin, uint64_t data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + const GPIOSetProperties *props; + GPIOSets *set; + uint32_t set_idx; + uint32_t pin_idx; + uint32_t group_value = 0; + uint32_t pending = 0; + + set_idx = pin / ASPEED_GPIOS_PER_SET; + pin_idx = pin % ASPEED_GPIOS_PER_SET; + + if (set_idx >= agc->nr_gpio_sets) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: set index: %d, out of bounds\n", + __func__, set_idx); + return; + } + + set = &s->sets[set_idx]; + props = &agc->props[set_idx]; + + /* direction */ + group_value = set->direction; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_DIRECTION)); + /* + * where data is the value attempted to be written to the pin: + * pin type | input mask | output mask | expected value + * ------------------------------------------------------------ + * bidirectional | 1 | 1 | data + * input only | 1 | 0 | 0 + * output only | 0 | 1 | 1 + * no pin | 0 | 0 | 0 + * + * which is captured by: + * data = ( data | ~input) & output; + */ + group_value = (group_value | ~props->input) & props->output; + set->direction = update_value_control_source(set, set->direction, + group_value); + + /* out data */ + group_value = set->data_read; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_OUT_DATA)); + group_value &= props->output; + group_value = update_value_control_source(set, set->data_read, + group_value); + set->data_read = group_value; + + /* interrupt enable */ + group_value = set->int_enable; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_ENABLE)); + set->int_enable = update_value_control_source(set, set->int_enable, + group_value); + + /* interrupt sensitivity type 0 */ + group_value = set->int_sens_0; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_0)); + set->int_sens_0 = update_value_control_source(set, set->int_sens_0, + group_value); + + /* interrupt sensitivity type 1 */ + group_value = set->int_sens_1; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_1)); + set->int_sens_1 = update_value_control_source(set, set->int_sens_1, + group_value); + + /* interrupt sensitivity type 2 */ + group_value = set->int_sens_2; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_SENS_2)); + set->int_sens_2 = update_value_control_source(set, set->int_sens_2, + group_value); + + /* reset tolerance enable */ + group_value = set->reset_tol; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_RESET_TOLERANCE)); + set->reset_tol = update_value_control_source(set, set->reset_tol, + group_value); + + /* debounce 1 */ + group_value = set->debounce_1; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_DEBOUNCE_1)); + set->debounce_1 = update_value_control_source(set, set->debounce_1, + group_value); + + /* debounce 2 */ + group_value = set->debounce_2; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_DEBOUNCE_2)); + set->debounce_2 = update_value_control_source(set, set->debounce_2, + group_value); + + /* input mask */ + group_value = set->input_mask; + group_value = deposit32(group_value, pin_idx, 1, + SHARED_FIELD_EX32(data, GPIO_CONTROL_INPUT_MASK)); + /* + * feeds into interrupt generation + * 0: read from data value reg will be updated + * 1: read from data value reg will not be updated + */ + set->input_mask = group_value & props->input; + + /* blink counter 1 */ + /* blink counter 2 */ + /* unimplement */ + + /* interrupt status */ + if (SHARED_FIELD_EX32(data, GPIO_CONTROL_INT_STATUS)) { + /* pending is either 1 or 0 for a 1-bit field */ + pending = extract32(set->int_status, pin_idx, 1); + + assert(s->pending >= pending); + + /* No change to s->pending if pending is 0 */ + s->pending -= pending; + + /* + * The write acknowledged the interrupt regardless of whether it + * was pending or not. The post-condition is that it mustn't be + * pending. Unconditionally clear the status bit. + */ + set->int_status = deposit32(set->int_status, pin_idx, 1, 0); + } + + aspeed_gpio_update(s, set, set->data_value, UINT32_MAX); +} + +static uint64_t aspeed_gpio_2700_read(void *opaque, hwaddr offset, + uint32_t size) +{ + AspeedGPIOState *s = ASPEED_GPIO(opaque); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + GPIOSets *set; + uint64_t value; + uint64_t reg; + uint32_t pin; + uint32_t idx; + + reg = offset >> 2; + + if (reg >= agc->reg_table_count) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%" PRIx64 " out of bounds\n", + __func__, offset); + return 0; + } + + switch (reg) { + case R_GPIO_2700_DEBOUNCE_TIME_1 ... R_GPIO_2700_DEBOUNCE_TIME_3: + idx = reg - R_GPIO_2700_DEBOUNCE_TIME_1; + + if (idx >= ASPEED_GPIO_NR_DEBOUNCE_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: debounce index: %d, out of bounds\n", + __func__, idx); + return 0; + } + + value = (uint64_t) s->debounce_regs[idx]; + break; + case R_GPIO_2700_INT_STATUS_1 ... R_GPIO_2700_INT_STATUS_7: + idx = reg - R_GPIO_2700_INT_STATUS_1; + + if (idx >= agc->nr_gpio_sets) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: interrupt status index: %d, out of bounds\n", + __func__, idx); + return 0; + } + + set = &s->sets[idx]; + value = (uint64_t) set->int_status; + break; + case R_GPIO_A0_CONTROL ... R_GPIO_AA7_CONTROL: + pin = reg - R_GPIO_A0_CONTROL; + + if (pin >= agc->nr_gpio_pins) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid pin number: %d\n", + __func__, pin); + return 0; + } + + value = aspeed_gpio_2700_read_control_reg(s, pin); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" + PRIx64"\n", __func__, offset); + return 0; + } + + trace_aspeed_gpio_read(offset, value); + return value; +} + +static void aspeed_gpio_2700_write(void *opaque, hwaddr offset, + uint64_t data, uint32_t size) +{ + AspeedGPIOState *s = ASPEED_GPIO(opaque); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + uint64_t reg; + uint32_t pin; + uint32_t idx; + + trace_aspeed_gpio_write(offset, data); + + reg = offset >> 2; + + if (reg >= agc->reg_table_count) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%" PRIx64 " out of bounds\n", + __func__, offset); + return; + } + + switch (reg) { + case R_GPIO_2700_DEBOUNCE_TIME_1 ... R_GPIO_2700_DEBOUNCE_TIME_3: + idx = reg - R_GPIO_2700_DEBOUNCE_TIME_1; + + if (idx >= ASPEED_GPIO_NR_DEBOUNCE_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: debounce index: %d out of bounds\n", + __func__, idx); + return; + } + + s->debounce_regs[idx] = (uint32_t) data; + break; + case R_GPIO_A0_CONTROL ... R_GPIO_AA7_CONTROL: + pin = reg - R_GPIO_A0_CONTROL; + + if (pin >= agc->nr_gpio_pins) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid pin number: %d\n", + __func__, pin); + return; + } + + if (SHARED_FIELD_EX32(data, GPIO_CONTROL_RESERVED)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid reserved data: 0x%" + PRIx64"\n", __func__, data); + return; + } + + aspeed_gpio_2700_write_control_reg(s, pin, data); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" + PRIx64"\n", __func__, offset); + break; + } +} + +/* Setup functions */ static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, @@ -1009,6 +1353,16 @@ static GPIOSetProperties ast1030_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [5] = {0x000000ff, 0x00000000, {"U"} }, }; +static GPIOSetProperties ast2700_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, + [1] = {0x0fffffff, 0x0fffffff, {"E", "F", "G", "H"} }, + [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, + [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, + [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0xffffffff, {"U", "V", "W", "X"} }, + [6] = {0x00ffffff, 0x00ffffff, {"Y", "Z", "AA"} }, +}; + static const MemoryRegionOps aspeed_gpio_ops = { .read = aspeed_gpio_read, .write = aspeed_gpio_write, @@ -1017,6 +1371,14 @@ static const MemoryRegionOps aspeed_gpio_ops = { .valid.max_access_size = 4, }; +static const MemoryRegionOps aspeed_gpio_2700_ops = { + .read = aspeed_gpio_2700_read, + .write = aspeed_gpio_2700_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + static void aspeed_gpio_reset(DeviceState *dev) { AspeedGPIOState *s = ASPEED_GPIO(dev); @@ -1046,8 +1408,8 @@ static void aspeed_gpio_realize(DeviceState *dev, Error **errp) } } - memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s, - TYPE_ASPEED_GPIO, 0x800); + memory_region_init_io(&s->iomem, OBJECT(s), agc->reg_ops, s, + TYPE_ASPEED_GPIO, agc->mem_size); sysbus_init_mmio(sbd, &s->iomem); } @@ -1111,17 +1473,17 @@ static const VMStateDescription vmstate_aspeed_gpio = { } }; -static void aspeed_gpio_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_gpio_realize; - dc->reset = aspeed_gpio_reset; + device_class_set_legacy_reset(dc, aspeed_gpio_reset); dc->desc = "Aspeed GPIO Controller"; dc->vmsd = &vmstate_aspeed_gpio; } -static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, const void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); @@ -1130,9 +1492,11 @@ static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data) agc->nr_gpio_sets = 7; agc->reg_table = aspeed_3_3v_gpios; agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE; + agc->mem_size = 0x1000; + agc->reg_ops = &aspeed_gpio_ops; } -static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_2500_class_init(ObjectClass *klass, const void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); @@ -1141,9 +1505,12 @@ static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) agc->nr_gpio_sets = 8; agc->reg_table = aspeed_3_3v_gpios; agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE; + agc->mem_size = 0x1000; + agc->reg_ops = &aspeed_gpio_ops; } -static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, + const void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); @@ -1152,9 +1519,12 @@ static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data) agc->nr_gpio_sets = 7; agc->reg_table = aspeed_3_3v_gpios; agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE; + agc->mem_size = 0x800; + agc->reg_ops = &aspeed_gpio_ops; } -static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, + const void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); @@ -1163,9 +1533,11 @@ static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data) agc->nr_gpio_sets = 2; agc->reg_table = aspeed_1_8v_gpios; agc->reg_table_count = GPIO_1_8V_REG_ARRAY_SIZE; + agc->mem_size = 0x800; + agc->reg_ops = &aspeed_gpio_ops; } -static void aspeed_gpio_1030_class_init(ObjectClass *klass, void *data) +static void aspeed_gpio_1030_class_init(ObjectClass *klass, const void *data) { AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); @@ -1174,6 +1546,20 @@ static void aspeed_gpio_1030_class_init(ObjectClass *klass, void *data) agc->nr_gpio_sets = 6; agc->reg_table = aspeed_3_3v_gpios; agc->reg_table_count = GPIO_3_3V_REG_ARRAY_SIZE; + agc->mem_size = 0x1000; + agc->reg_ops = &aspeed_gpio_ops; +} + +static void aspeed_gpio_2700_class_init(ObjectClass *klass, const void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast2700_set_props; + agc->nr_gpio_pins = 216; + agc->nr_gpio_sets = 7; + agc->reg_table_count = GPIO_2700_REG_ARRAY_SIZE; + agc->mem_size = 0x1000; + agc->reg_ops = &aspeed_gpio_2700_ops; } static const TypeInfo aspeed_gpio_info = { @@ -1220,6 +1606,13 @@ static const TypeInfo aspeed_gpio_ast1030_info = { .instance_init = aspeed_gpio_init, }; +static const TypeInfo aspeed_gpio_ast2700_info = { + .name = TYPE_ASPEED_GPIO "-ast2700", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_2700_class_init, + .instance_init = aspeed_gpio_init, +}; + static void aspeed_gpio_register_types(void) { type_register_static(&aspeed_gpio_info); @@ -1228,6 +1621,7 @@ static void aspeed_gpio_register_types(void) type_register_static(&aspeed_gpio_ast2600_3_3v_info); type_register_static(&aspeed_gpio_ast2600_1_8v_info); type_register_static(&aspeed_gpio_ast1030_info); + type_register_static(&aspeed_gpio_ast2700_info); } type_init(aspeed_gpio_register_types); diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c index 6bd50bb0b69..dfb5d5cb578 100644 --- a/hw/gpio/bcm2835_gpio.c +++ b/hw/gpio/bcm2835_gpio.c @@ -319,13 +319,13 @@ static void bcm2835_gpio_realize(DeviceState *dev, Error **errp) s->sdbus_sdhost = SD_BUS(obj); } -static void bcm2835_gpio_class_init(ObjectClass *klass, void *data) +static void bcm2835_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_bcm2835_gpio; dc->realize = &bcm2835_gpio_realize; - dc->reset = &bcm2835_gpio_reset; + device_class_set_legacy_reset(dc, bcm2835_gpio_reset); } static const TypeInfo bcm2835_gpio_info = { diff --git a/hw/gpio/bcm2838_gpio.c b/hw/gpio/bcm2838_gpio.c index 2ddf62f6959..1069e7811bb 100644 --- a/hw/gpio/bcm2838_gpio.c +++ b/hw/gpio/bcm2838_gpio.c @@ -293,7 +293,6 @@ static void bcm2838_gpio_write(void *opaque, hwaddr offset, uint64_t value, qemu_log_mask(LOG_GUEST_ERROR, "%s: %s: bad offset %"HWADDR_PRIx"\n", TYPE_BCM2838_GPIO, __func__, offset); } - return; } static void bcm2838_gpio_reset(DeviceState *dev) @@ -365,13 +364,13 @@ static void bcm2838_gpio_realize(DeviceState *dev, Error **errp) s->sdbus_sdhost = SD_BUS(obj); } -static void bcm2838_gpio_class_init(ObjectClass *klass, void *data) +static void bcm2838_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_bcm2838_gpio; dc->realize = &bcm2838_gpio_realize; - dc->reset = &bcm2838_gpio_reset; + device_class_set_legacy_reset(dc, bcm2838_gpio_reset); } static const TypeInfo bcm2838_gpio_info = { diff --git a/hw/gpio/gpio_key.c b/hw/gpio/gpio_key.c index 61bb5870589..40c028bed9e 100644 --- a/hw/gpio/gpio_key.c +++ b/hw/gpio/gpio_key.c @@ -85,13 +85,13 @@ static void gpio_key_realize(DeviceState *dev, Error **errp) s->timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, gpio_key_timer_expired, s); } -static void gpio_key_class_init(ObjectClass *klass, void *data) +static void gpio_key_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = gpio_key_realize; dc->vmsd = &vmstate_gpio_key; - dc->reset = &gpio_key_reset; + device_class_set_legacy_reset(dc, gpio_key_reset); } static const TypeInfo gpio_key_info = { diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c index dbaf1c70c88..2d14f8b344c 100644 --- a/hw/gpio/gpio_pwr.c +++ b/hw/gpio/gpio_pwr.c @@ -24,7 +24,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #define TYPE_GPIOPWR "gpio-pwr" OBJECT_DECLARE_SIMPLE_TYPE(GPIO_PWR_State, GPIOPWR) diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c index e53b00d951d..450ece45482 100644 --- a/hw/gpio/imx_gpio.c +++ b/hw/gpio/imx_gpio.c @@ -24,6 +24,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" +#include "trace.h" #ifndef DEBUG_IMX_GPIO #define DEBUG_IMX_GPIO 0 @@ -34,14 +35,6 @@ typedef enum IMXGPIOLevel { IMX_GPIO_LEVEL_HIGH = 1, } IMXGPIOLevel; -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_IMX_GPIO) { \ - fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPIO, \ - __func__, ##args); \ - } \ - } while (0) - static const char *imx_gpio_reg_name(uint32_t reg) { switch (reg) { @@ -79,7 +72,7 @@ static void imx_gpio_update_int(IMXGPIOState *s) static void imx_gpio_set_int_line(IMXGPIOState *s, int line, IMXGPIOLevel level) { /* if this signal isn't configured as an input signal, nothing to do */ - if (!extract32(s->gdir, line, 1)) { + if (extract32(s->gdir, line, 1)) { return; } @@ -111,6 +104,8 @@ static void imx_gpio_set(void *opaque, int line, int level) IMXGPIOState *s = IMX_GPIO(opaque); IMXGPIOLevel imx_level = level ? IMX_GPIO_LEVEL_HIGH : IMX_GPIO_LEVEL_LOW; + trace_imx_gpio_set(DEVICE(s)->canonical_path, line, imx_level); + imx_gpio_set_int_line(s, line, imx_level); /* this is an input signal, so set PSR */ @@ -200,7 +195,8 @@ static uint64_t imx_gpio_read(void *opaque, hwaddr offset, unsigned size) break; } - DPRINTF("(%s) = 0x%" PRIx32 "\n", imx_gpio_reg_name(offset), reg_value); + trace_imx_gpio_read(DEVICE(s)->canonical_path, imx_gpio_reg_name(offset), + reg_value); return reg_value; } @@ -210,8 +206,8 @@ static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value, { IMXGPIOState *s = IMX_GPIO(opaque); - DPRINTF("(%s, value = 0x%" PRIx32 ")\n", imx_gpio_reg_name(offset), - (uint32_t)value); + trace_imx_gpio_write(DEVICE(s)->canonical_path, imx_gpio_reg_name(offset), + value); switch (offset) { case DR_ADDR: @@ -261,8 +257,6 @@ static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value, HWADDR_PRIx "\n", TYPE_IMX_GPIO, __func__, offset); break; } - - return; } static const MemoryRegionOps imx_gpio_ops = { @@ -290,11 +284,10 @@ static const VMStateDescription vmstate_imx_gpio = { } }; -static Property imx_gpio_properties[] = { +static const Property imx_gpio_properties[] = { DEFINE_PROP_BOOL("has-edge-sel", IMXGPIOState, has_edge_sel, true), DEFINE_PROP_BOOL("has-upper-pin-irq", IMXGPIOState, has_upper_pin_irq, false), - DEFINE_PROP_END_OF_LIST(), }; static void imx_gpio_reset(DeviceState *dev) @@ -328,12 +321,12 @@ static void imx_gpio_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } -static void imx_gpio_class_init(ObjectClass *klass, void *data) +static void imx_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_gpio_realize; - dc->reset = imx_gpio_reset; + device_class_set_legacy_reset(dc, imx_gpio_reset); device_class_set_props(dc, imx_gpio_properties); dc->vmsd = &vmstate_imx_gpio; dc->desc = "i.MX GPIO controller"; diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c deleted file mode 100644 index 86315714fbd..00000000000 --- a/hw/gpio/max7310.c +++ /dev/null @@ -1,217 +0,0 @@ -/* - * MAX7310 8-port GPIO expansion chip. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This file is licensed under GNU GPL. - */ - -#include "qemu/osdep.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qom/object.h" - -#define TYPE_MAX7310 "max7310" -OBJECT_DECLARE_SIMPLE_TYPE(MAX7310State, MAX7310) - -struct MAX7310State { - I2CSlave parent_obj; - - int i2c_command_byte; - int len; - - uint8_t level; - uint8_t direction; - uint8_t polarity; - uint8_t status; - uint8_t command; - qemu_irq handler[8]; - qemu_irq *gpio_in; -}; - -static void max7310_reset(DeviceState *dev) -{ - MAX7310State *s = MAX7310(dev); - - s->level &= s->direction; - s->direction = 0xff; - s->polarity = 0xf0; - s->status = 0x01; - s->command = 0x00; -} - -static uint8_t max7310_rx(I2CSlave *i2c) -{ - MAX7310State *s = MAX7310(i2c); - - switch (s->command) { - case 0x00: /* Input port */ - return s->level ^ s->polarity; - - case 0x01: /* Output port */ - return s->level & ~s->direction; - - case 0x02: /* Polarity inversion */ - return s->polarity; - - case 0x03: /* Configuration */ - return s->direction; - - case 0x04: /* Timeout */ - return s->status; - - case 0xff: /* Reserved */ - return 0xff; - - default: - qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n", - __func__, s->command); - break; - } - return 0xff; -} - -static int max7310_tx(I2CSlave *i2c, uint8_t data) -{ - MAX7310State *s = MAX7310(i2c); - uint8_t diff; - int line; - - if (s->len ++ > 1) { -#ifdef VERBOSE - printf("%s: message too long (%i bytes)\n", __func__, s->len); -#endif - return 1; - } - - if (s->i2c_command_byte) { - s->command = data; - s->i2c_command_byte = 0; - return 0; - } - - switch (s->command) { - case 0x01: /* Output port */ - for (diff = (data ^ s->level) & ~s->direction; diff; - diff &= ~(1 << line)) { - line = ctz32(diff); - if (s->handler[line]) - qemu_set_irq(s->handler[line], (data >> line) & 1); - } - s->level = (s->level & s->direction) | (data & ~s->direction); - break; - - case 0x02: /* Polarity inversion */ - s->polarity = data; - break; - - case 0x03: /* Configuration */ - s->level &= ~(s->direction ^ data); - s->direction = data; - break; - - case 0x04: /* Timeout */ - s->status = data; - break; - - case 0x00: /* Input port - ignore writes */ - break; - default: - qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n", - __func__, s->command); - return 1; - } - - return 0; -} - -static int max7310_event(I2CSlave *i2c, enum i2c_event event) -{ - MAX7310State *s = MAX7310(i2c); - s->len = 0; - - switch (event) { - case I2C_START_SEND: - s->i2c_command_byte = 1; - break; - case I2C_FINISH: -#ifdef VERBOSE - if (s->len == 1) - printf("%s: message too short (%i bytes)\n", __func__, s->len); -#endif - break; - default: - break; - } - - return 0; -} - -static const VMStateDescription vmstate_max7310 = { - .name = "max7310", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_INT32(i2c_command_byte, MAX7310State), - VMSTATE_INT32(len, MAX7310State), - VMSTATE_UINT8(level, MAX7310State), - VMSTATE_UINT8(direction, MAX7310State), - VMSTATE_UINT8(polarity, MAX7310State), - VMSTATE_UINT8(status, MAX7310State), - VMSTATE_UINT8(command, MAX7310State), - VMSTATE_I2C_SLAVE(parent_obj, MAX7310State), - VMSTATE_END_OF_LIST() - } -}; - -static void max7310_gpio_set(void *opaque, int line, int level) -{ - MAX7310State *s = (MAX7310State *) opaque; - assert(line >= 0 && line < ARRAY_SIZE(s->handler)); - - if (level) - s->level |= s->direction & (1 << line); - else - s->level &= ~(s->direction & (1 << line)); -} - -/* MAX7310 is SMBus-compatible (can be used with only SMBus protocols), - * but also accepts sequences that are not SMBus so return an I2C device. */ -static void max7310_realize(DeviceState *dev, Error **errp) -{ - MAX7310State *s = MAX7310(dev); - - qdev_init_gpio_in(dev, max7310_gpio_set, ARRAY_SIZE(s->handler)); - qdev_init_gpio_out(dev, s->handler, ARRAY_SIZE(s->handler)); -} - -static void max7310_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - - dc->realize = max7310_realize; - k->event = max7310_event; - k->recv = max7310_rx; - k->send = max7310_tx; - dc->reset = max7310_reset; - dc->vmsd = &vmstate_max7310; -} - -static const TypeInfo max7310_info = { - .name = TYPE_MAX7310, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(MAX7310State), - .class_init = max7310_class_init, -}; - -static void max7310_register_types(void) -{ - type_register_static(&max7310_info); -} - -type_init(max7310_register_types) diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build index a7495d196ae..74840619c01 100644 --- a/hw/gpio/meson.build +++ b/hw/gpio/meson.build @@ -1,11 +1,10 @@ system_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) system_ss.add(when: 'CONFIG_GPIO_MPC8XXX', if_true: files('mpc8xxx.c')) system_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) -system_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) system_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) system_ss.add(when: 'CONFIG_PCA9554', if_true: files('pca9554.c')) system_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) -system_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c')) +system_ss.add(when: 'CONFIG_ZAURUS_SCOOP', if_true: files('zaurus.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c')) diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c index 0b3f9e516da..257497af584 100644 --- a/hw/gpio/mpc8xxx.c +++ b/hw/gpio/mpc8xxx.c @@ -23,7 +23,6 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "qemu/module.h" #include "qom/object.h" #define TYPE_MPC8XXX_GPIO "mpc8xxx_gpio" @@ -200,25 +199,22 @@ static void mpc8xxx_gpio_initfn(Object *obj) qdev_init_gpio_out(dev, s->out, 32); } -static void mpc8xxx_gpio_class_init(ObjectClass *klass, void *data) +static void mpc8xxx_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_mpc8xxx_gpio; - dc->reset = mpc8xxx_gpio_reset; + device_class_set_legacy_reset(dc, mpc8xxx_gpio_reset); } -static const TypeInfo mpc8xxx_gpio_info = { - .name = TYPE_MPC8XXX_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MPC8XXXGPIOState), - .instance_init = mpc8xxx_gpio_initfn, - .class_init = mpc8xxx_gpio_class_init, +static const TypeInfo mpc8xxx_gpio_types[] = { + { + .name = TYPE_MPC8XXX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPC8XXXGPIOState), + .instance_init = mpc8xxx_gpio_initfn, + .class_init = mpc8xxx_gpio_class_init, + }, }; -static void mpc8xxx_gpio_register_types(void) -{ - type_register_static(&mpc8xxx_gpio_info); -} - -type_init(mpc8xxx_gpio_register_types) +DEFINE_TYPES(mpc8xxx_gpio_types) diff --git a/hw/gpio/npcm7xx_gpio.c b/hw/gpio/npcm7xx_gpio.c index ba19b9ebad3..66f8256a7a7 100644 --- a/hw/gpio/npcm7xx_gpio.c +++ b/hw/gpio/npcm7xx_gpio.c @@ -220,8 +220,6 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v, return; } - diff = s->regs[reg] ^ value; - switch (reg) { case NPCM7XX_GPIO_TLOCK1: case NPCM7XX_GPIO_TLOCK2: @@ -242,6 +240,7 @@ static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v, case NPCM7XX_GPIO_PU: case NPCM7XX_GPIO_PD: case NPCM7XX_GPIO_IEM: + diff = s->regs[reg] ^ value; s->regs[reg] = value; npcm7xx_gpio_update_pins(s, diff); break; @@ -386,7 +385,7 @@ static const VMStateDescription vmstate_npcm7xx_gpio = { }, }; -static Property npcm7xx_gpio_properties[] = { +static const Property npcm7xx_gpio_properties[] = { /* Bit n set => pin n has pullup enabled by default. */ DEFINE_PROP_UINT32("reset-pullup", NPCM7xxGPIOState, reset_pu, 0), /* Bit n set => pin n has pulldown enabled by default. */ @@ -395,10 +394,9 @@ static Property npcm7xx_gpio_properties[] = { DEFINE_PROP_UINT32("reset-osrc", NPCM7xxGPIOState, reset_osrc, 0), /* Bit n set => pin n has high drive strength by default. */ DEFINE_PROP_UINT32("reset-odsc", NPCM7xxGPIOState, reset_odsc, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void npcm7xx_gpio_class_init(ObjectClass *klass, void *data) +static void npcm7xx_gpio_class_init(ObjectClass *klass, const void *data) { ResettableClass *reset = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c index ffc7dff7964..d94c0c47da2 100644 --- a/hw/gpio/nrf51_gpio.c +++ b/hw/gpio/nrf51_gpio.c @@ -40,7 +40,6 @@ static bool is_connected(uint32_t config, uint32_t level) break; default: g_assert_not_reached(); - break; } return state; @@ -305,12 +304,12 @@ static void nrf51_gpio_init(Object *obj) qdev_init_gpio_out_named(DEVICE(s), &s->detect, "detect", 1); } -static void nrf51_gpio_class_init(ObjectClass *klass, void *data) +static void nrf51_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_nrf51_gpio; - dc->reset = nrf51_gpio_reset; + device_class_set_legacy_reset(dc, nrf51_gpio_reset); dc->desc = "nRF51 GPIO"; } diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c index a3341d70f16..f27806b774a 100644 --- a/hw/gpio/omap_gpio.c +++ b/hw/gpio/omap_gpio.c @@ -80,25 +80,25 @@ static uint64_t omap_gpio_read(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* DATA_INPUT */ + case 0x00: /* DATA_INPUT */ return s->inputs & s->pins; - case 0x04: /* DATA_OUTPUT */ + case 0x04: /* DATA_OUTPUT */ return s->outputs; - case 0x08: /* DIRECTION_CONTROL */ + case 0x08: /* DIRECTION_CONTROL */ return s->dir; - case 0x0c: /* INTERRUPT_CONTROL */ + case 0x0c: /* INTERRUPT_CONTROL */ return s->edge; - case 0x10: /* INTERRUPT_MASK */ + case 0x10: /* INTERRUPT_MASK */ return s->mask; - case 0x14: /* INTERRUPT_STATUS */ + case 0x14: /* INTERRUPT_STATUS */ return s->ints; - case 0x18: /* PIN_CONTROL (not in OMAP310) */ + case 0x18: /* PIN_CONTROL (not in OMAP310) */ OMAP_BAD_REG(addr); return s->pins; } @@ -121,11 +121,11 @@ static void omap_gpio_write(void *opaque, hwaddr addr, } switch (offset) { - case 0x00: /* DATA_INPUT */ + case 0x00: /* DATA_INPUT */ OMAP_RO_REG(addr); return; - case 0x04: /* DATA_OUTPUT */ + case 0x04: /* DATA_OUTPUT */ diff = (s->outputs ^ value) & ~s->dir; s->outputs = value; while ((ln = ctz32(diff)) != 32) { @@ -135,7 +135,7 @@ static void omap_gpio_write(void *opaque, hwaddr addr, } break; - case 0x08: /* DIRECTION_CONTROL */ + case 0x08: /* DIRECTION_CONTROL */ diff = s->outputs & (s->dir ^ value); s->dir = value; @@ -147,21 +147,21 @@ static void omap_gpio_write(void *opaque, hwaddr addr, } break; - case 0x0c: /* INTERRUPT_CONTROL */ + case 0x0c: /* INTERRUPT_CONTROL */ s->edge = value; break; - case 0x10: /* INTERRUPT_MASK */ + case 0x10: /* INTERRUPT_MASK */ s->mask = value; break; - case 0x14: /* INTERRUPT_STATUS */ + case 0x14: /* INTERRUPT_STATUS */ s->ints &= ~value; if (!s->ints) qemu_irq_lower(s->irq); break; - case 0x18: /* PIN_CONTROL (not in OMAP310 TRM) */ + case 0x18: /* PIN_CONTROL (not in OMAP310 TRM) */ OMAP_BAD_REG(addr); s->pins = value; break; @@ -190,408 +190,6 @@ static void omap_gpio_reset(struct omap_gpio_s *s) s->pins = ~0; } -struct omap2_gpio_s { - qemu_irq irq[2]; - qemu_irq wkup; - qemu_irq *handler; - MemoryRegion iomem; - - uint8_t revision; - uint8_t config[2]; - uint32_t inputs; - uint32_t outputs; - uint32_t dir; - uint32_t level[2]; - uint32_t edge[2]; - uint32_t mask[2]; - uint32_t wumask; - uint32_t ints[2]; - uint32_t debounce; - uint8_t delay; -}; - -struct Omap2GpioState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - int mpu_model; - void *iclk; - void *fclk[6]; - int modulecount; - struct omap2_gpio_s *modules; - qemu_irq *handler; - int autoidle; - int gpo; -}; - -/* General-Purpose Interface of OMAP2/3 */ -static inline void omap2_gpio_module_int_update(struct omap2_gpio_s *s, - int line) -{ - qemu_set_irq(s->irq[line], s->ints[line] & s->mask[line]); -} - -static void omap2_gpio_module_wake(struct omap2_gpio_s *s, int line) -{ - if (!(s->config[0] & (1 << 2))) /* ENAWAKEUP */ - return; - if (!(s->config[0] & (3 << 3))) /* Force Idle */ - return; - if (!(s->wumask & (1 << line))) - return; - - qemu_irq_raise(s->wkup); -} - -static inline void omap2_gpio_module_out_update(struct omap2_gpio_s *s, - uint32_t diff) -{ - int ln; - - s->outputs ^= diff; - diff &= ~s->dir; - while ((ln = ctz32(diff)) != 32) { - qemu_set_irq(s->handler[ln], (s->outputs >> ln) & 1); - diff &= ~(1 << ln); - } -} - -static void omap2_gpio_module_level_update(struct omap2_gpio_s *s, int line) -{ - s->ints[line] |= s->dir & - ((s->inputs & s->level[1]) | (~s->inputs & s->level[0])); - omap2_gpio_module_int_update(s, line); -} - -static inline void omap2_gpio_module_int(struct omap2_gpio_s *s, int line) -{ - s->ints[0] |= 1 << line; - omap2_gpio_module_int_update(s, 0); - s->ints[1] |= 1 << line; - omap2_gpio_module_int_update(s, 1); - omap2_gpio_module_wake(s, line); -} - -static void omap2_gpio_set(void *opaque, int line, int level) -{ - Omap2GpioState *p = opaque; - struct omap2_gpio_s *s = &p->modules[line >> 5]; - - line &= 31; - if (level) { - if (s->dir & (1 << line) & ((~s->inputs & s->edge[0]) | s->level[1])) - omap2_gpio_module_int(s, line); - s->inputs |= 1 << line; - } else { - if (s->dir & (1 << line) & ((s->inputs & s->edge[1]) | s->level[0])) - omap2_gpio_module_int(s, line); - s->inputs &= ~(1 << line); - } -} - -static void omap2_gpio_module_reset(struct omap2_gpio_s *s) -{ - s->config[0] = 0; - s->config[1] = 2; - s->ints[0] = 0; - s->ints[1] = 0; - s->mask[0] = 0; - s->mask[1] = 0; - s->wumask = 0; - s->dir = ~0; - s->level[0] = 0; - s->level[1] = 0; - s->edge[0] = 0; - s->edge[1] = 0; - s->debounce = 0; - s->delay = 0; -} - -static uint32_t omap2_gpio_module_read(void *opaque, hwaddr addr) -{ - struct omap2_gpio_s *s = opaque; - - switch (addr) { - case 0x00: /* GPIO_REVISION */ - return s->revision; - - case 0x10: /* GPIO_SYSCONFIG */ - return s->config[0]; - - case 0x14: /* GPIO_SYSSTATUS */ - return 0x01; - - case 0x18: /* GPIO_IRQSTATUS1 */ - return s->ints[0]; - - case 0x1c: /* GPIO_IRQENABLE1 */ - case 0x60: /* GPIO_CLEARIRQENABLE1 */ - case 0x64: /* GPIO_SETIRQENABLE1 */ - return s->mask[0]; - - case 0x20: /* GPIO_WAKEUPENABLE */ - case 0x80: /* GPIO_CLEARWKUENA */ - case 0x84: /* GPIO_SETWKUENA */ - return s->wumask; - - case 0x28: /* GPIO_IRQSTATUS2 */ - return s->ints[1]; - - case 0x2c: /* GPIO_IRQENABLE2 */ - case 0x70: /* GPIO_CLEARIRQENABLE2 */ - case 0x74: /* GPIO_SETIREQNEABLE2 */ - return s->mask[1]; - - case 0x30: /* GPIO_CTRL */ - return s->config[1]; - - case 0x34: /* GPIO_OE */ - return s->dir; - - case 0x38: /* GPIO_DATAIN */ - return s->inputs; - - case 0x3c: /* GPIO_DATAOUT */ - case 0x90: /* GPIO_CLEARDATAOUT */ - case 0x94: /* GPIO_SETDATAOUT */ - return s->outputs; - - case 0x40: /* GPIO_LEVELDETECT0 */ - return s->level[0]; - - case 0x44: /* GPIO_LEVELDETECT1 */ - return s->level[1]; - - case 0x48: /* GPIO_RISINGDETECT */ - return s->edge[0]; - - case 0x4c: /* GPIO_FALLINGDETECT */ - return s->edge[1]; - - case 0x50: /* GPIO_DEBOUNCENABLE */ - return s->debounce; - - case 0x54: /* GPIO_DEBOUNCINGTIME */ - return s->delay; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap2_gpio_module_write(void *opaque, hwaddr addr, - uint32_t value) -{ - struct omap2_gpio_s *s = opaque; - uint32_t diff; - int ln; - - switch (addr) { - case 0x00: /* GPIO_REVISION */ - case 0x14: /* GPIO_SYSSTATUS */ - case 0x38: /* GPIO_DATAIN */ - OMAP_RO_REG(addr); - break; - - case 0x10: /* GPIO_SYSCONFIG */ - if (((value >> 3) & 3) == 3) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Illegal IDLEMODE value: 3\n", __func__); - } - if (value & 2) - omap2_gpio_module_reset(s); - s->config[0] = value & 0x1d; - break; - - case 0x18: /* GPIO_IRQSTATUS1 */ - if (s->ints[0] & value) { - s->ints[0] &= ~value; - omap2_gpio_module_level_update(s, 0); - } - break; - - case 0x1c: /* GPIO_IRQENABLE1 */ - s->mask[0] = value; - omap2_gpio_module_int_update(s, 0); - break; - - case 0x20: /* GPIO_WAKEUPENABLE */ - s->wumask = value; - break; - - case 0x28: /* GPIO_IRQSTATUS2 */ - if (s->ints[1] & value) { - s->ints[1] &= ~value; - omap2_gpio_module_level_update(s, 1); - } - break; - - case 0x2c: /* GPIO_IRQENABLE2 */ - s->mask[1] = value; - omap2_gpio_module_int_update(s, 1); - break; - - case 0x30: /* GPIO_CTRL */ - s->config[1] = value & 7; - break; - - case 0x34: /* GPIO_OE */ - diff = s->outputs & (s->dir ^ value); - s->dir = value; - - value = s->outputs & ~s->dir; - while ((ln = ctz32(diff)) != 32) { - diff &= ~(1 << ln); - qemu_set_irq(s->handler[ln], (value >> ln) & 1); - } - - omap2_gpio_module_level_update(s, 0); - omap2_gpio_module_level_update(s, 1); - break; - - case 0x3c: /* GPIO_DATAOUT */ - omap2_gpio_module_out_update(s, s->outputs ^ value); - break; - - case 0x40: /* GPIO_LEVELDETECT0 */ - s->level[0] = value; - omap2_gpio_module_level_update(s, 0); - omap2_gpio_module_level_update(s, 1); - break; - - case 0x44: /* GPIO_LEVELDETECT1 */ - s->level[1] = value; - omap2_gpio_module_level_update(s, 0); - omap2_gpio_module_level_update(s, 1); - break; - - case 0x48: /* GPIO_RISINGDETECT */ - s->edge[0] = value; - break; - - case 0x4c: /* GPIO_FALLINGDETECT */ - s->edge[1] = value; - break; - - case 0x50: /* GPIO_DEBOUNCENABLE */ - s->debounce = value; - break; - - case 0x54: /* GPIO_DEBOUNCINGTIME */ - s->delay = value; - break; - - case 0x60: /* GPIO_CLEARIRQENABLE1 */ - s->mask[0] &= ~value; - omap2_gpio_module_int_update(s, 0); - break; - - case 0x64: /* GPIO_SETIRQENABLE1 */ - s->mask[0] |= value; - omap2_gpio_module_int_update(s, 0); - break; - - case 0x70: /* GPIO_CLEARIRQENABLE2 */ - s->mask[1] &= ~value; - omap2_gpio_module_int_update(s, 1); - break; - - case 0x74: /* GPIO_SETIREQNEABLE2 */ - s->mask[1] |= value; - omap2_gpio_module_int_update(s, 1); - break; - - case 0x80: /* GPIO_CLEARWKUENA */ - s->wumask &= ~value; - break; - - case 0x84: /* GPIO_SETWKUENA */ - s->wumask |= value; - break; - - case 0x90: /* GPIO_CLEARDATAOUT */ - omap2_gpio_module_out_update(s, s->outputs & value); - break; - - case 0x94: /* GPIO_SETDATAOUT */ - omap2_gpio_module_out_update(s, ~s->outputs & value); - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static uint64_t omap2_gpio_module_readp(void *opaque, hwaddr addr, - unsigned size) -{ - return omap2_gpio_module_read(opaque, addr & ~3) >> ((addr & 3) << 3); -} - -static void omap2_gpio_module_writep(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - uint32_t cur = 0; - uint32_t mask = 0xffff; - - if (size == 4) { - omap2_gpio_module_write(opaque, addr, value); - return; - } - - switch (addr & ~3) { - case 0x00: /* GPIO_REVISION */ - case 0x14: /* GPIO_SYSSTATUS */ - case 0x38: /* GPIO_DATAIN */ - OMAP_RO_REG(addr); - break; - - case 0x10: /* GPIO_SYSCONFIG */ - case 0x1c: /* GPIO_IRQENABLE1 */ - case 0x20: /* GPIO_WAKEUPENABLE */ - case 0x2c: /* GPIO_IRQENABLE2 */ - case 0x30: /* GPIO_CTRL */ - case 0x34: /* GPIO_OE */ - case 0x3c: /* GPIO_DATAOUT */ - case 0x40: /* GPIO_LEVELDETECT0 */ - case 0x44: /* GPIO_LEVELDETECT1 */ - case 0x48: /* GPIO_RISINGDETECT */ - case 0x4c: /* GPIO_FALLINGDETECT */ - case 0x50: /* GPIO_DEBOUNCENABLE */ - case 0x54: /* GPIO_DEBOUNCINGTIME */ - cur = omap2_gpio_module_read(opaque, addr & ~3) & - ~(mask << ((addr & 3) << 3)); - - /* Fall through. */ - case 0x18: /* GPIO_IRQSTATUS1 */ - case 0x28: /* GPIO_IRQSTATUS2 */ - case 0x60: /* GPIO_CLEARIRQENABLE1 */ - case 0x64: /* GPIO_SETIRQENABLE1 */ - case 0x70: /* GPIO_CLEARIRQENABLE2 */ - case 0x74: /* GPIO_SETIREQNEABLE2 */ - case 0x80: /* GPIO_CLEARWKUENA */ - case 0x84: /* GPIO_SETWKUENA */ - case 0x90: /* GPIO_CLEARDATAOUT */ - case 0x94: /* GPIO_SETDATAOUT */ - value <<= (addr & 3) << 3; - omap2_gpio_module_write(opaque, addr, cur | value); - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap2_gpio_module_ops = { - .read = omap2_gpio_module_readp, - .write = omap2_gpio_module_writep, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - static void omap_gpif_reset(DeviceState *dev) { Omap1GpioState *s = OMAP1_GPIO(dev); @@ -599,81 +197,6 @@ static void omap_gpif_reset(DeviceState *dev) omap_gpio_reset(&s->omap1); } -static void omap2_gpif_reset(DeviceState *dev) -{ - Omap2GpioState *s = OMAP2_GPIO(dev); - int i; - - for (i = 0; i < s->modulecount; i++) { - omap2_gpio_module_reset(&s->modules[i]); - } - s->autoidle = 0; - s->gpo = 0; -} - -static uint64_t omap2_gpif_top_read(void *opaque, hwaddr addr, unsigned size) -{ - Omap2GpioState *s = opaque; - - switch (addr) { - case 0x00: /* IPGENERICOCPSPL_REVISION */ - return 0x18; - - case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */ - return s->autoidle; - - case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */ - return 0x01; - - case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */ - return 0x00; - - case 0x40: /* IPGENERICOCPSPL_GPO */ - return s->gpo; - - case 0x50: /* IPGENERICOCPSPL_GPI */ - return 0x00; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap2_gpif_top_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - Omap2GpioState *s = opaque; - - switch (addr) { - case 0x00: /* IPGENERICOCPSPL_REVISION */ - case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */ - case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */ - case 0x50: /* IPGENERICOCPSPL_GPI */ - OMAP_RO_REG(addr); - break; - - case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */ - if (value & (1 << 1)) /* SOFTRESET */ - omap2_gpif_reset(DEVICE(s)); - s->autoidle = value & 1; - break; - - case 0x40: /* IPGENERICOCPSPL_GPO */ - s->gpo = value & 1; - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap2_gpif_top_ops = { - .read = omap2_gpif_top_read, - .write = omap2_gpif_top_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - static void omap_gpio_init(Object *obj) { DeviceState *dev = DEVICE(obj); @@ -697,67 +220,21 @@ static void omap_gpio_realize(DeviceState *dev, Error **errp) } } -static void omap2_gpio_realize(DeviceState *dev, Error **errp) -{ - Omap2GpioState *s = OMAP2_GPIO(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - int i; - - if (!s->iclk) { - error_setg(errp, "omap2-gpio: iclk not connected"); - return; - } - - s->modulecount = s->mpu_model < omap2430 ? 4 - : s->mpu_model < omap3430 ? 5 - : 6; - - if (s->mpu_model < omap3430) { - memory_region_init_io(&s->iomem, OBJECT(dev), &omap2_gpif_top_ops, s, - "omap2.gpio", 0x1000); - sysbus_init_mmio(sbd, &s->iomem); - } - - s->modules = g_new0(struct omap2_gpio_s, s->modulecount); - s->handler = g_new0(qemu_irq, s->modulecount * 32); - qdev_init_gpio_in(dev, omap2_gpio_set, s->modulecount * 32); - qdev_init_gpio_out(dev, s->handler, s->modulecount * 32); - - for (i = 0; i < s->modulecount; i++) { - struct omap2_gpio_s *m = &s->modules[i]; - - if (!s->fclk[i]) { - error_setg(errp, "omap2-gpio: fclk%d not connected", i); - return; - } - - m->revision = (s->mpu_model < omap3430) ? 0x18 : 0x25; - m->handler = &s->handler[i * 32]; - sysbus_init_irq(sbd, &m->irq[0]); /* mpu irq */ - sysbus_init_irq(sbd, &m->irq[1]); /* dsp irq */ - sysbus_init_irq(sbd, &m->wkup); - memory_region_init_io(&m->iomem, OBJECT(dev), &omap2_gpio_module_ops, m, - "omap.gpio-module", 0x1000); - sysbus_init_mmio(sbd, &m->iomem); - } -} - void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk) { gpio->clk = clk; } -static Property omap_gpio_properties[] = { +static const Property omap_gpio_properties[] = { DEFINE_PROP_INT32("mpu_model", Omap1GpioState, mpu_model, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void omap_gpio_class_init(ObjectClass *klass, void *data) +static void omap_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = omap_gpio_realize; - dc->reset = omap_gpif_reset; + device_class_set_legacy_reset(dc, omap_gpif_reset); device_class_set_props(dc, omap_gpio_properties); /* Reason: pointer property "clk" */ dc->user_creatable = false; @@ -771,44 +248,9 @@ static const TypeInfo omap_gpio_info = { .class_init = omap_gpio_class_init, }; -void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk) -{ - gpio->iclk = clk; -} - -void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk) -{ - assert(i <= 5); - gpio->fclk[i] = clk; -} - -static Property omap2_gpio_properties[] = { - DEFINE_PROP_INT32("mpu_model", Omap2GpioState, mpu_model, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void omap2_gpio_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = omap2_gpio_realize; - dc->reset = omap2_gpif_reset; - device_class_set_props(dc, omap2_gpio_properties); - /* Reason: pointer properties "iclk", "fclk0", ..., "fclk5" */ - dc->user_creatable = false; -} - -static const TypeInfo omap2_gpio_info = { - .name = TYPE_OMAP2_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Omap2GpioState), - .class_init = omap2_gpio_class_init, -}; - static void omap_gpio_register_types(void) { type_register_static(&omap_gpio_info); - type_register_static(&omap2_gpio_info); } type_init(omap_gpio_register_types) diff --git a/hw/gpio/pca9552.c b/hw/gpio/pca9552.c index 27d4db06809..1e10238b2e0 100644 --- a/hw/gpio/pca9552.c +++ b/hw/gpio/pca9552.c @@ -76,7 +76,7 @@ static void pca955x_display_pins_status(PCA955xState *s, return; } if (trace_event_get_state_backends(TRACE_PCA955X_GPIO_STATUS)) { - char *buf = g_newa(char, k->pin_count + 1); + char buf[PCA955X_PIN_COUNT_MAX + 1]; for (i = 0; i < k->pin_count; i++) { if (extract32(pins_status, i, 1)) { @@ -428,12 +428,11 @@ static void pca955x_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, pca955x_gpio_in_handler, k->pin_count); } -static Property pca955x_properties[] = { +static const Property pca955x_properties[] = { DEFINE_PROP_STRING("description", PCA955xState, description), - DEFINE_PROP_END_OF_LIST(), }; -static void pca955x_class_init(ObjectClass *klass, void *data) +static void pca955x_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -455,12 +454,12 @@ static const TypeInfo pca955x_info = { .abstract = true, }; -static void pca9552_class_init(ObjectClass *oc, void *data) +static void pca9552_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCA955xClass *pc = PCA955X_CLASS(oc); - dc->reset = pca9552_reset; + device_class_set_legacy_reset(dc, pca9552_reset); dc->vmsd = &pca9552_vmstate; pc->max_reg = PCA9552_LS3; pc->pin_count = 16; diff --git a/hw/gpio/pca9554.c b/hw/gpio/pca9554.c index 7d10a64ba7c..de3f883aee9 100644 --- a/hw/gpio/pca9554.c +++ b/hw/gpio/pca9554.c @@ -118,11 +118,8 @@ static void pca9554_write(PCA9554State *s, uint8_t reg, uint8_t data) static uint8_t pca9554_recv(I2CSlave *i2c) { PCA9554State *s = PCA9554(i2c); - uint8_t ret; - ret = pca9554_read(s, s->pointer & 0x3); - - return ret; + return pca9554_read(s, s->pointer & 0x3); } static int pca9554_send(I2CSlave *i2c, uint8_t data) @@ -291,12 +288,11 @@ static void pca9554_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, pca9554_gpio_in_handler, PCA9554_PIN_COUNT); } -static Property pca9554_properties[] = { +static const Property pca9554_properties[] = { DEFINE_PROP_STRING("description", PCA9554State, description), - DEFINE_PROP_END_OF_LIST(), }; -static void pca9554_class_init(ObjectClass *klass, void *data) +static void pca9554_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -305,7 +301,7 @@ static void pca9554_class_init(ObjectClass *klass, void *data) k->recv = pca9554_recv; k->send = pca9554_send; dc->realize = pca9554_realize; - dc->reset = pca9554_reset; + device_class_set_legacy_reset(dc, pca9554_reset); dc->vmsd = &pca9554_vmstate; device_class_set_props(dc, pca9554_properties); } diff --git a/hw/gpio/pcf8574.c b/hw/gpio/pcf8574.c index d37909e2ada..274b44bb616 100644 --- a/hw/gpio/pcf8574.c +++ b/hw/gpio/pcf8574.c @@ -138,7 +138,7 @@ static void pcf8574_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out_named(dev, &s->intrq, "nINT", 1); } -static void pcf8574_class_init(ObjectClass *klass, void *data) +static void pcf8574_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -146,7 +146,7 @@ static void pcf8574_class_init(ObjectClass *klass, void *data) k->recv = pcf8574_rx; k->send = pcf8574_tx; dc->realize = pcf8574_realize; - dc->reset = pcf8574_reset; + device_class_set_legacy_reset(dc, pcf8574_reset); dc->vmsd = &vmstate_pcf8574; } diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c index d5838b8e98d..1acca3f2f80 100644 --- a/hw/gpio/pl061.c +++ b/hw/gpio/pl061.c @@ -443,7 +443,6 @@ static void pl061_write(void *opaque, hwaddr offset, return; } pl061_update(s); - return; } static void pl061_enter_reset(Object *obj, ResetType type) @@ -562,13 +561,12 @@ static void pl061_realize(DeviceState *dev, Error **errp) } } -static Property pl061_props[] = { +static const Property pl061_props[] = { DEFINE_PROP_UINT32("pullups", PL061State, pullups, 0xff), DEFINE_PROP_UINT32("pulldowns", PL061State, pulldowns, 0x0), - DEFINE_PROP_END_OF_LIST() }; -static void pl061_class_init(ObjectClass *klass, void *data) +static void pl061_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/gpio/sifive_gpio.c b/hw/gpio/sifive_gpio.c index 995a43c7958..5831647b4d0 100644 --- a/hw/gpio/sifive_gpio.c +++ b/hw/gpio/sifive_gpio.c @@ -349,9 +349,8 @@ static const VMStateDescription vmstate_sifive_gpio = { } }; -static Property sifive_gpio_properties[] = { +static const Property sifive_gpio_properties[] = { DEFINE_PROP_UINT32("ngpio", SIFIVEGPIOState, ngpio, SIFIVE_GPIO_PINS), - DEFINE_PROP_END_OF_LIST(), }; static void sifive_gpio_realize(DeviceState *dev, Error **errp) @@ -371,14 +370,14 @@ static void sifive_gpio_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(DEVICE(s), s->output, s->ngpio); } -static void sifive_gpio_class_init(ObjectClass *klass, void *data) +static void sifive_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, sifive_gpio_properties); dc->vmsd = &vmstate_sifive_gpio; dc->realize = sifive_gpio_realize; - dc->reset = sifive_gpio_reset; + device_class_set_legacy_reset(dc, sifive_gpio_reset); dc->desc = "SiFive GPIO"; } diff --git a/hw/gpio/stm32l4x5_gpio.c b/hw/gpio/stm32l4x5_gpio.c index 30d8d6cba41..414ce830390 100644 --- a/hw/gpio/stm32l4x5_gpio.c +++ b/hw/gpio/stm32l4x5_gpio.c @@ -447,15 +447,14 @@ static const VMStateDescription vmstate_stm32l4x5_gpio = { } }; -static Property stm32l4x5_gpio_properties[] = { +static const Property stm32l4x5_gpio_properties[] = { DEFINE_PROP_STRING("name", Stm32l4x5GpioState, name), DEFINE_PROP_UINT32("mode-reset", Stm32l4x5GpioState, moder_reset, 0), DEFINE_PROP_UINT32("ospeed-reset", Stm32l4x5GpioState, ospeedr_reset, 0), DEFINE_PROP_UINT32("pupd-reset", Stm32l4x5GpioState, pupdr_reset, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void stm32l4x5_gpio_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events index b91cc7e9a45..cea896b28f6 100644 --- a/hw/gpio/trace-events +++ b/hw/gpio/trace-events @@ -1,5 +1,10 @@ # See docs/devel/tracing.rst for syntax documentation. +# imx_gpio.c +imx_gpio_read(const char *id, const char *reg, uint32_t value) "%s:[%s] -> 0x%" PRIx32 +imx_gpio_write(const char *id, const char *reg, uint32_t value) "%s:[%s] <- 0x%" PRIx32 +imx_gpio_set(const char *id, int line, int level) "%s:[%d] <- %d" + # npcm7xx_gpio.c npcm7xx_gpio_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 npcm7xx_gpio_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c index 7342440b958..b8d27f59738 100644 --- a/hw/gpio/zaurus.c +++ b/hw/gpio/zaurus.c @@ -243,7 +243,7 @@ static const VMStateDescription vmstate_scoop_regs = { }, }; -static void scoop_sysbus_class_init(ObjectClass *klass, void *data) +static void scoop_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig index d4d457f4ab4..cab21045de9 100644 --- a/hw/hppa/Kconfig +++ b/hw/hppa/Kconfig @@ -9,8 +9,9 @@ config HPPA_B160L select ASTRO select DINO select LASI - select SERIAL + select SERIAL_MM select SERIAL_PCI + select DIVA_GSP select ISA_BUS select I8259 select IDE_CMD646 diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h index a9be7bb8518..21c777cba65 100644 --- a/hw/hppa/hppa_hardware.h +++ b/hw/hppa/hppa_hardware.h @@ -6,6 +6,11 @@ #define FIRMWARE_START 0xf0000000 #define FIRMWARE_END 0xf0800000 +#define FIRMWARE_HIGH 0xfffffff0 /* upper 32-bits of 64-bit firmware address */ + +#define RAM_MAP_HIGH 0x0100000000 /* memory above 3.75 GB is mapped here */ + +#define MEM_PDC_ENTRY 0x4800 /* PDC entry address */ #define DEVICE_HPA_LEN 0x00100000 @@ -18,6 +23,7 @@ #define LASI_UART_HPA 0xffd05000 #define LASI_SCSI_HPA 0xffd06000 #define LASI_LAN_HPA 0xffd07000 +#define LASI_RTC_HPA 0xffd09000 #define LASI_LPT_HPA 0xffd02000 #define LASI_AUDIO_HPA 0xffd04000 #define LASI_PS2KBD_HPA 0xffd08000 @@ -27,16 +33,23 @@ #define CPU_HPA 0xfffb0000 #define MEMORY_HPA 0xfffff000 -#define PCI_HPA DINO_HPA /* PCI bus */ #define IDE_HPA 0xf9000000 /* Boot disc controller */ +#define ASTRO_HPA 0xfed00000 +#define ELROY0_HPA 0xfed30000 +#define ELROY2_HPA 0xfed32000 +#define ELROY8_HPA 0xfed38000 +#define ELROYc_HPA 0xfed3c000 +#define ASTRO_MEMORY_HPA 0xfed10200 + +#define SCSI_HPA 0xf1040000 /* emulated SCSI, needs to be in f region */ /* offsets to DINO HPA: */ #define DINO_PCI_ADDR 0x064 #define DINO_CONFIG_DATA 0x068 #define DINO_IO_DATA 0x06c -#define PORT_PCI_CMD (PCI_HPA + DINO_PCI_ADDR) -#define PORT_PCI_DATA (PCI_HPA + DINO_CONFIG_DATA) +#define PORT_PCI_CMD hppa_port_pci_cmd +#define PORT_PCI_DATA hppa_port_pci_data #define FW_CFG_IO_BASE 0xfffa0000 @@ -46,7 +59,24 @@ #define HPPA_MAX_CPUS 16 /* max. number of SMP CPUs */ #define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */ +#define CR_PSW_DEFAULT 6 /* used by SeaBIOS & QEMU for default PSW */ #define CPU_HPA_CR_REG 7 /* store CPU HPA in cr7 (SeaBIOS internal) */ #define PIM_STORAGE_SIZE 600 /* storage size of pdc_pim_toc_struct (64bit) */ +#define ASTRO_BUS_MODULE 0x0a /* C3700: 0x0a, others maybe 0 ? */ + +/* ASTRO Memory and I/O regions */ +#define ASTRO_BASE_HPA 0xfffed00000 +#define ELROY0_BASE_HPA 0xfffed30000 /* ELROY0_HPA */ + +#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */ + +#define LMMIO_DIRECT0_BASE 0x300 +#define LMMIO_DIRECT0_MASK 0x308 +#define LMMIO_DIRECT0_ROUTE 0x310 + +/* space register hashing */ +#define HPPA64_DIAG_SPHASH_ENABLE 0x200 /* DIAG_SPHASH_ENAB (bit 54) */ +#define HPPA64_PDC_CACHE_RET_SPID_VAL 0xfe0 /* PDC return value on 64-bit CPU */ + #endif diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index 5d0a8739deb..dacedc5409c 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -11,13 +11,14 @@ #include "elf.h" #include "hw/loader.h" #include "qemu/error-report.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" +#include "exec/target_page.h" +#include "system/reset.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/runstate.h" #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/char/parallel.h" #include "hw/intc/i8259.h" #include "hw/input/lasips2.h" @@ -240,7 +241,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus, g_memdup2(qemu_version, sizeof(qemu_version)), sizeof(qemu_version)); - fw_cfg_add_extra_pci_roots(pci_bus, fw_cfg); + pci_bus_add_fw_cfg_extra_pci_roots(fw_cfg, pci_bus, &error_abort); return fw_cfg; } @@ -283,16 +284,13 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type)); } - /* - * For now, treat address layout as if PSW_W is clear. - * TODO: create a proper hppa64 board model and load elf64 firmware. - */ + /* Initialize memory */ if (hppa_is_pa20(&cpu[0]->env)) { translate = translate_pa20; - ram_max = 0xf0000000; /* 3.75 GB (limited by 32-bit firmware) */ + ram_max = 256 * GiB; /* like HP rp8440 */ } else { translate = translate_pa10; - ram_max = 0xf0000000; /* 3.75 GB (32-bit CPU) */ + ram_max = FIRMWARE_START; /* 3.75 GB (32-bit CPU) */ } soft_power_reg = translate(NULL, HPA_POWER_BUTTON); @@ -320,7 +318,22 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) info_report("Max RAM size limited to %" PRIu64 " MB", ram_max / MiB); machine->ram_size = ram_max; } - memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1); + if (machine->ram_size <= FIRMWARE_START) { + /* contiguous memory up to 3.75 GB RAM */ + memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1); + } else { + /* non-contiguous: Memory above 3.75 GB is mapped at RAM_MAP_HIGH */ + MemoryRegion *mem_region; + mem_region = g_new(MemoryRegion, 2); + memory_region_init_alias(&mem_region[0], &addr_space->parent_obj, + "LowMem", machine->ram, 0, FIRMWARE_START); + memory_region_init_alias(&mem_region[1], &addr_space->parent_obj, + "HighMem", machine->ram, FIRMWARE_START, + machine->ram_size - FIRMWARE_START); + memory_region_add_subregion_overlap(addr_space, 0, &mem_region[0], -1); + memory_region_add_subregion_overlap(addr_space, RAM_MAP_HIGH, + &mem_region[1], -1); + } return translate; } @@ -344,7 +357,6 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, uint64_t kernel_entry = 0, kernel_low, kernel_high; MemoryRegion *addr_space = get_system_memory(); MemoryRegion *rom_region; - unsigned int smp_cpus = machine->smp.cpus; SysBusDevice *s; /* SCSI disk setup. */ @@ -355,12 +367,15 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, /* Graphics setup. */ if (machine->enable_graphics && vga_interface_type != VGA_NONE) { - vga_interface_created = true; dev = qdev_new("artist"); s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, translate(NULL, LASI_GFX_HPA)); - sysbus_mmio_map(s, 1, translate(NULL, ARTIST_FB_ADDR)); + bool disabled = object_property_get_bool(OBJECT(dev), "disable", NULL); + if (!disabled) { + sysbus_realize_and_unref(s, &error_fatal); + vga_interface_created = true; + sysbus_mmio_map(s, 0, translate(NULL, LASI_GFX_HPA)); + sysbus_mmio_map(s, 1, translate(NULL, ARTIST_FB_ADDR)); + } } /* Network setup. */ @@ -372,26 +387,17 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, pci_init_nic_devices(pci_bus, mc->default_nic); - /* BMC board: HP Powerbar SP2 Diva (with console only) */ - pci_dev = pci_new(-1, "pci-serial"); - if (!lasi_dev) { - /* bind default keyboard/serial to Diva card */ - qdev_prop_set_chr(DEVICE(pci_dev), "chardev", serial_hd(0)); - } - qdev_prop_set_uint8(DEVICE(pci_dev), "prog_if", 0); - pci_realize_and_unref(pci_dev, pci_bus, &error_fatal); - pci_config_set_vendor_id(pci_dev->config, PCI_VENDOR_ID_HP); - pci_config_set_device_id(pci_dev->config, 0x1048); - pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_VENDOR_ID], PCI_VENDOR_ID_HP); - pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_ID], 0x1227); /* Powerbar */ - - /* create a second serial PCI card when running Astro */ - if (serial_hd(1) && !lasi_dev) { - pci_dev = pci_new(-1, "pci-serial-4x"); - qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(1)); - qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(2)); - qdev_prop_set_chr(DEVICE(pci_dev), "chardev3", serial_hd(3)); - qdev_prop_set_chr(DEVICE(pci_dev), "chardev4", serial_hd(4)); + /* BMC board: HP Diva GSP */ + dev = qdev_new("diva-gsp"); + if (!object_property_get_bool(OBJECT(dev), "disable", NULL)) { + pci_dev = pci_new_multifunction(PCI_DEVFN(2, 0), "diva-gsp"); + if (!lasi_dev) { + /* bind default keyboard/serial to Diva card */ + qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(0)); + qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(1)); + qdev_prop_set_chr(DEVICE(pci_dev), "chardev3", serial_hd(2)); + qdev_prop_set_chr(DEVICE(pci_dev), "chardev4", serial_hd(3)); + } pci_realize_and_unref(pci_dev, pci_bus, &error_fatal); } @@ -429,7 +435,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, size = load_elf(firmware_filename, NULL, translate, NULL, &firmware_entry, &firmware_low, &firmware_high, NULL, - true, EM_PARISC, 0, 0); + ELFDATA2MSB, EM_PARISC, 0, 0); if (size < 0) { error_report("could not load firmware '%s'", firmware_filename); @@ -456,7 +462,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, if (kernel_filename) { size = load_elf(kernel_filename, NULL, linux_kernel_virt_to_phys, NULL, &kernel_entry, &kernel_low, &kernel_high, NULL, - true, EM_PARISC, 0, 0); + ELFDATA2MSB, EM_PARISC, 0, 0); kernel_entry = linux_kernel_virt_to_phys(NULL, kernel_entry); @@ -470,8 +476,8 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, kernel_low, kernel_high, kernel_entry, size / KiB); if (kernel_cmdline) { - cpu[0]->env.gr[24] = 0x4000; - pstrcpy_targphys("cmdline", cpu[0]->env.gr[24], + cpu[0]->env.cmdline_or_bootorder = 0x4000; + pstrcpy_targphys("cmdline", cpu[0]->env.cmdline_or_bootorder, TARGET_PAGE_SIZE, kernel_cmdline); } @@ -501,32 +507,22 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, } load_image_targphys(initrd_filename, initrd_base, initrd_size); - cpu[0]->env.gr[23] = initrd_base; - cpu[0]->env.gr[22] = initrd_base + initrd_size; + cpu[0]->env.initrd_base = initrd_base; + cpu[0]->env.initrd_end = initrd_base + initrd_size; } } if (!kernel_entry) { /* When booting via firmware, tell firmware if we want interactive - * mode (kernel_entry=1), and to boot from CD (gr[24]='d') - * or hard disc * (gr[24]='c'). + * mode (kernel_entry=1), and to boot from CD (cmdline_or_bootorder='d') + * or hard disc (cmdline_or_bootorder='c'). */ kernel_entry = machine->boot_config.has_menu ? machine->boot_config.menu : 0; - cpu[0]->env.gr[24] = machine->boot_config.order[0]; + cpu[0]->env.cmdline_or_bootorder = machine->boot_config.order[0]; } - /* We jump to the firmware entry routine and pass the - * various parameters in registers. After firmware initialization, - * firmware will start the Linux kernel with ramdisk and cmdline. - */ - cpu[0]->env.gr[26] = machine->ram_size; - cpu[0]->env.gr[25] = kernel_entry; - - /* tell firmware how many SMP CPUs to present in inventory table */ - cpu[0]->env.gr[21] = smp_cpus; - - /* tell firmware fw_cfg port */ - cpu[0]->env.gr[19] = FW_CFG_IO_BASE; + /* Keep initial kernel_entry for first boot */ + cpu[0]->env.kernel_entry = kernel_entry; } /* @@ -642,12 +638,12 @@ static void machine_HP_C3700_init(MachineState *machine) machine_HP_common_init_tail(machine, pci_bus, translate); } -static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) +static void hppa_machine_reset(MachineState *ms, ResetType type) { unsigned int smp_cpus = ms->smp.cpus; int i; - qemu_devices_reset(reason); + qemu_devices_reset(type); /* Start all CPUs at the firmware entry point. * Monarch CPU will initialize firmware, secondary CPUs @@ -655,26 +651,27 @@ static void hppa_machine_reset(MachineState *ms, ShutdownCause reason) for (i = 0; i < smp_cpus; i++) { CPUState *cs = CPU(cpu[i]); + /* reset CPU */ + resettable_reset(OBJECT(cs), RESET_TYPE_COLD); + cpu_set_pc(cs, firmware_entry); cpu[i]->env.psw = PSW_Q; cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000; - - cs->exception_index = -1; - cs->halted = 0; - } - - /* already initialized by machine_hppa_init()? */ - if (cpu[0]->env.gr[26] == ms->ram_size) { - return; } cpu[0]->env.gr[26] = ms->ram_size; - cpu[0]->env.gr[25] = 0; /* no firmware boot menu */ - cpu[0]->env.gr[24] = 'c'; - /* gr22/gr23 unused, no initrd while reboot. */ + cpu[0]->env.gr[25] = cpu[0]->env.kernel_entry; + cpu[0]->env.gr[24] = cpu[0]->env.cmdline_or_bootorder; + cpu[0]->env.gr[23] = cpu[0]->env.initrd_base; + cpu[0]->env.gr[22] = cpu[0]->env.initrd_end; cpu[0]->env.gr[21] = smp_cpus; - /* tell firmware fw_cfg port */ cpu[0]->env.gr[19] = FW_CFG_IO_BASE; + + /* reset static fields to avoid starting Linux kernel & initrd on reboot */ + cpu[0]->env.kernel_entry = 0; + cpu[0]->env.initrd_base = 0; + cpu[0]->env.initrd_end = 0; + cpu[0]->env.cmdline_or_bootorder = 'c'; } static void hppa_nmi(NMIState *n, int cpu_index, Error **errp) @@ -686,7 +683,7 @@ static void hppa_nmi(NMIState *n, int cpu_index, Error **errp) } } -static void HP_B160L_machine_init_class_init(ObjectClass *oc, void *data) +static void HP_B160L_machine_init_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { TYPE_HPPA_CPU, @@ -716,13 +713,13 @@ static const TypeInfo HP_B160L_machine_init_typeinfo = { .name = MACHINE_TYPE_NAME("B160L"), .parent = TYPE_MACHINE, .class_init = HP_B160L_machine_init_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { } }, }; -static void HP_C3700_machine_init_class_init(ObjectClass *oc, void *data) +static void HP_C3700_machine_init_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { TYPE_HPPA64_CPU, @@ -752,7 +749,7 @@ static const TypeInfo HP_C3700_machine_init_typeinfo = { .name = MACHINE_TYPE_NAME("C3700"), .parent = TYPE_MACHINE, .class_init = HP_C3700_machine_init_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { } }, diff --git a/hw/hyperv/hv-balloon-our_range_memslots.h b/hw/hyperv/hv-balloon-our_range_memslots.h index df3b686bc7c..b1f19d77daf 100644 --- a/hw/hyperv/hv-balloon-our_range_memslots.h +++ b/hw/hyperv/hv-balloon-our_range_memslots.h @@ -11,7 +11,7 @@ #define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #include "hv-balloon-page_range_tree.h" diff --git a/hw/hyperv/hv-balloon.c b/hw/hyperv/hv-balloon.c index 3a9ef076910..6dbcb2d9a29 100644 --- a/hw/hyperv/hv-balloon.c +++ b/hw/hyperv/hv-balloon.c @@ -10,9 +10,9 @@ #include "qemu/osdep.h" #include "hv-balloon-internal.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "exec/cpu-common.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "hw/boards.h" #include "hw/hyperv/dynmem-proto.h" #include "hw/hyperv/hv-balloon.h" @@ -26,15 +26,15 @@ #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-events-machine.h" #include "qapi/qapi-types-machine.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/units.h" #include "qemu/timer.h" -#include "sysemu/balloon.h" -#include "sysemu/hostmem.h" -#include "sysemu/reset.h" +#include "system/balloon.h" +#include "system/hostmem.h" +#include "system/reset.h" #include "hv-balloon-our_range_memslots.h" #include "hv-balloon-page_range_tree.h" #include "trace.h" @@ -67,10 +67,6 @@ * these requests */ -struct HvBalloonClass { - VMBusDeviceClass parent_class; -} HvBalloonClass; - typedef enum State { /* not a real state */ S_NO_CHANGE = 0, @@ -162,8 +158,9 @@ typedef struct HvBalloon { MemoryRegion *mr; } HvBalloon; -OBJECT_DEFINE_TYPE_WITH_INTERFACES(HvBalloon, hv_balloon, HV_BALLOON, VMBUS_DEVICE, \ - { TYPE_MEMORY_DEVICE }, { }) +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(HvBalloon, hv_balloon, \ + HV_BALLOON, VMBUS_DEVICE, \ + { TYPE_MEMORY_DEVICE }, { }) #define HV_BALLOON_SET_STATE(hvb, news) \ do { \ @@ -1733,7 +1730,7 @@ static void hv_balloon_finalize(Object *obj) hv_balloon_unrealize_finalize_common(balloon); } -static Property hv_balloon_properties[] = { +static const Property hv_balloon_properties[] = { DEFINE_PROP_BOOL("status-report", HvBalloon, status_report.enabled, false), @@ -1741,11 +1738,9 @@ static Property hv_balloon_properties[] = { DEFINE_PROP_LINK(HV_BALLOON_MEMDEV_PROP, HvBalloon, hostmem, TYPE_MEMORY_BACKEND, HostMemoryBackend *), DEFINE_PROP_UINT64(HV_BALLOON_ADDR_PROP, HvBalloon, addr, 0), - - DEFINE_PROP_END_OF_LIST(), }; -static void hv_balloon_class_init(ObjectClass *klass, void *data) +static void hv_balloon_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VMBusDeviceClass *vdc = VMBUS_DEVICE_CLASS(klass); diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index 483dcca3083..e4d0688dbfa 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -11,9 +11,11 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#include "sysemu/kvm.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "exec/target_page.h" +#include "linux/kvm.h" +#include "system/kvm.h" #include "qemu/bitops.h" #include "qemu/error-report.h" #include "qemu/lockable.h" @@ -23,8 +25,7 @@ #include "hw/hyperv/hyperv.h" #include "qom/object.h" #include "target/i386/kvm/hyperv-proto.h" -#include "target/i386/cpu.h" -#include "exec/cpu-all.h" +#include "exec/target_page.h" struct SynICState { DeviceState parent_obj; @@ -133,12 +134,12 @@ static void synic_reset(DeviceState *dev) assert(QLIST_EMPTY(&synic->sint_routes)); } -static void synic_class_init(ObjectClass *klass, void *data) +static void synic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = synic_realize; - dc->reset = synic_reset; + device_class_set_legacy_reset(dc, synic_reset); dc->user_creatable = false; } diff --git a/hw/hyperv/hyperv_testdev.c b/hw/hyperv/hyperv_testdev.c index 9a56ddf83fe..2d4a63693b8 100644 --- a/hw/hyperv/hyperv_testdev.c +++ b/hw/hyperv/hyperv_testdev.c @@ -88,8 +88,7 @@ static TestSintRoute *sint_route_find(HypervTestDev *dev, return sint_route; } } - assert(false); - return NULL; + g_assert_not_reached(); } static void sint_route_destroy(HypervTestDev *dev, @@ -187,7 +186,7 @@ static void msg_conn_destroy(HypervTestDev *dev, uint8_t conn_id) return; } } - assert(false); + g_assert_not_reached(); } static void evt_conn_handler(EventNotifier *notifier) @@ -237,7 +236,7 @@ static void evt_conn_destroy(HypervTestDev *dev, uint8_t conn_id) return; } } - assert(false); + g_assert_not_reached(); } static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size) @@ -304,7 +303,7 @@ static void hv_test_dev_realizefn(DeviceState *d, Error **errp) memory_region_add_subregion(io, 0x3000, &dev->sint_control); } -static void hv_test_dev_class_init(ObjectClass *klass, void *data) +static void hv_test_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/hyperv/meson.build b/hw/hyperv/meson.build index d3d2668c71a..d1cf781f049 100644 --- a/hw/hyperv/meson.build +++ b/hw/hyperv/meson.build @@ -1,5 +1,6 @@ -specific_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c')) -specific_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c')) -specific_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c')) -specific_ss.add(when: 'CONFIG_SYNDBG', if_true: files('syndbg.c')) -specific_ss.add(when: 'CONFIG_HV_BALLOON', if_true: files('hv-balloon.c', 'hv-balloon-page_range_tree.c', 'hv-balloon-our_range_memslots.c'), if_false: files('hv-balloon-stub.c')) +system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c')) +system_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c')) +system_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c')) +system_ss.add(when: 'CONFIG_SYNDBG', if_true: files('syndbg.c')) +system_ss.add(when: 'CONFIG_HV_BALLOON', if_true: files('hv-balloon.c', 'hv-balloon-page_range_tree.c', 'hv-balloon-our_range_memslots.c')) +system_ss.add(when: 'CONFIG_HV_BALLOON', if_false: files('hv-balloon-stub.c')) diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c index 065e12fb1ed..ac7e15f6f1d 100644 --- a/hw/hyperv/syndbg.c +++ b/hw/hyperv/syndbg.c @@ -10,11 +10,12 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/sockets.h" +#include "qemu/units.h" #include "qapi/error.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "hw/loader.h" -#include "cpu.h" +#include "exec/target_page.h" #include "hw/hyperv/hyperv.h" #include "hw/hyperv/vmbus-bridge.h" #include "hw/hyperv/hyperv-proto.h" @@ -183,12 +184,15 @@ static bool create_udp_pkt(HvSynDbg *syndbg, void *pkt, uint32_t pkt_len, return true; } +#define MSG_BUFSZ (4 * KiB) + static uint16_t handle_recv_msg(HvSynDbg *syndbg, uint64_t outgpa, uint32_t count, bool is_raw, uint32_t options, uint64_t timeout, uint32_t *retrieved_count) { uint16_t ret; - uint8_t data_buf[TARGET_PAGE_SIZE - UDP_PKT_HEADER_SIZE]; + g_assert(MSG_BUFSZ >= qemu_target_page_size()); + QEMU_UNINITIALIZED uint8_t data_buf[MSG_BUFSZ]; hwaddr out_len; void *out_data; ssize_t recv_byte_count; @@ -201,7 +205,7 @@ static uint16_t handle_recv_msg(HvSynDbg *syndbg, uint64_t outgpa, recv_byte_count = 0; } else { recv_byte_count = recv(syndbg->socket, data_buf, - MIN(sizeof(data_buf), count), MSG_WAITALL); + MIN(MSG_BUFSZ, count), MSG_WAITALL); if (recv_byte_count == -1) { return HV_STATUS_INVALID_PARAMETER; } @@ -366,14 +370,13 @@ static const VMStateDescription vmstate_hv_syndbg = { .unmigratable = 1, }; -static Property hv_syndbg_properties[] = { +static const Property hv_syndbg_properties[] = { DEFINE_PROP_STRING("host_ip", HvSynDbg, host_ip), DEFINE_PROP_UINT16("host_port", HvSynDbg, host_port, 50000), DEFINE_PROP_BOOL("use_hcalls", HvSynDbg, use_hcalls, false), - DEFINE_PROP_END_OF_LIST(), }; -static void hv_syndbg_class_init(ObjectClass *klass, void *data) +static void hv_syndbg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index 490d805d298..961406cdd6a 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -10,6 +10,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" +#include "exec/target_page.h" #include "qapi/error.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" @@ -18,7 +19,7 @@ #include "hw/hyperv/vmbus.h" #include "hw/hyperv/vmbus-bridge.h" #include "hw/sysbus.h" -#include "cpu.h" +#include "exec/target_page.h" #include "trace.h" enum { @@ -1874,7 +1875,7 @@ static void send_create_gpadl(VMBus *vmbus) } } - assert(false); + g_assert_not_reached(); } static bool complete_create_gpadl(VMBus *vmbus) @@ -1889,8 +1890,7 @@ static bool complete_create_gpadl(VMBus *vmbus) } } - assert(false); - return false; + g_assert_not_reached(); } static void handle_gpadl_teardown(VMBus *vmbus, @@ -1931,7 +1931,7 @@ static void send_teardown_gpadl(VMBus *vmbus) } } - assert(false); + g_assert_not_reached(); } static bool complete_teardown_gpadl(VMBus *vmbus) @@ -1946,8 +1946,7 @@ static bool complete_teardown_gpadl(VMBus *vmbus) } } - assert(false); - return false; + g_assert_not_reached(); } static void handle_open_channel(VMBus *vmbus, vmbus_message_open_channel *msg, @@ -1996,7 +1995,7 @@ static void send_open_channel(VMBus *vmbus) } } - assert(false); + g_assert_not_reached(); } static bool complete_open_channel(VMBus *vmbus) @@ -2020,8 +2019,7 @@ static bool complete_open_channel(VMBus *vmbus) } } - assert(false); - return false; + g_assert_not_reached(); } static void vdev_reset_on_close(VMBusDevice *vdev) @@ -2076,7 +2074,6 @@ static void send_unload(VMBus *vmbus) qemu_mutex_unlock(&vmbus->rx_queue_lock); post_msg(vmbus, &msg, sizeof(msg)); - return; } static bool complete_unload(VMBus *vmbus) @@ -2349,20 +2346,19 @@ static void vmbus_dev_unrealize(DeviceState *dev) free_channels(vdev); } -static Property vmbus_dev_props[] = { +static const Property vmbus_dev_props[] = { DEFINE_PROP_UUID("instanceid", VMBusDevice, instanceid), - DEFINE_PROP_END_OF_LIST() }; -static void vmbus_dev_class_init(ObjectClass *klass, void *data) +static void vmbus_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *kdev = DEVICE_CLASS(klass); device_class_set_props(kdev, vmbus_dev_props); kdev->bus_type = TYPE_VMBUS; kdev->realize = vmbus_dev_realize; kdev->unrealize = vmbus_dev_unrealize; - kdev->reset = vmbus_dev_reset; + device_class_set_legacy_reset(kdev, vmbus_dev_reset); } static void vmbus_dev_instance_init(Object *obj) @@ -2473,7 +2469,7 @@ static char *vmbus_get_fw_dev_path(DeviceState *dev) return g_strdup_printf("%s@%s", qdev_fw_name(dev), uuid); } -static void vmbus_class_init(ObjectClass *klass, void *data) +static void vmbus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -2656,12 +2652,11 @@ static const VMStateDescription vmstate_vmbus_bridge = { }, }; -static Property vmbus_bridge_props[] = { +static const Property vmbus_bridge_props[] = { DEFINE_PROP_UINT8("irq", VMBusBridge, irq, 7), - DEFINE_PROP_END_OF_LIST() }; -static void vmbus_bridge_class_init(ObjectClass *klass, void *data) +static void vmbus_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); SysBusDeviceClass *sk = SYS_BUS_DEVICE_CLASS(klass); diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c index 16f1d6d40e7..fe887e1c6a9 100644 --- a/hw/i2c/allwinner-i2c.c +++ b/hw/i2c/allwinner-i2c.c @@ -407,7 +407,7 @@ static const MemoryRegionOps allwinner_i2c_ops = { .write = allwinner_i2c_write, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static const VMStateDescription allwinner_i2c_vmstate = { @@ -438,7 +438,7 @@ static void allwinner_i2c_realize(DeviceState *dev, Error **errp) s->bus = i2c_init_bus(dev, "i2c"); } -static void allwinner_i2c_class_init(ObjectClass *klass, void *data) +static void allwinner_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index b52a99896c5..83fb906bdc7 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -114,7 +114,10 @@ static uint64_t aspeed_i2c_bus_old_read(AspeedI2CBus *bus, hwaddr offset, if (!aic->has_dma) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); value = -1; + break; } + + value = extract64(bus->dma_dram_offset, 0, 32); break; case A_I2CD_DMA_LEN: if (!aic->has_dma) { @@ -137,6 +140,7 @@ static uint64_t aspeed_i2c_bus_old_read(AspeedI2CBus *bus, hwaddr offset, static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset, unsigned size) { + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); uint64_t value = bus->regs[offset / sizeof(*bus->regs)]; switch (offset) { @@ -150,9 +154,7 @@ static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset, case A_I2CM_DMA_TX_ADDR: case A_I2CM_DMA_RX_ADDR: case A_I2CM_DMA_LEN_STS: - case A_I2CC_DMA_ADDR: case A_I2CC_DMA_LEN: - case A_I2CS_DEV_ADDR: case A_I2CS_DMA_RX_ADDR: case A_I2CS_DMA_LEN: @@ -161,11 +163,24 @@ static uint64_t aspeed_i2c_bus_new_read(AspeedI2CBus *bus, hwaddr offset, case A_I2CS_DMA_LEN_STS: /* Value is already set, don't do anything. */ break; + case A_I2CC_DMA_ADDR: + value = extract64(bus->dma_dram_offset, 0, 32); + break; case A_I2CS_INTR_STS: break; case A_I2CM_CMD: value = SHARED_FIELD_DP32(value, BUS_BUSY_STS, i2c_bus_busy(bus->bus)); break; + case A_I2CM_DMA_TX_ADDR_HI: + case A_I2CM_DMA_RX_ADDR_HI: + case A_I2CS_DMA_TX_ADDR_HI: + case A_I2CS_DMA_RX_ADDR_HI: + if (!aic->has_dma64) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n", + __func__); + value = -1; + } + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -210,18 +225,18 @@ static int aspeed_i2c_dma_read(AspeedI2CBus *bus, uint8_t *data) { MemTxResult result; AspeedI2CState *s = bus->controller; - uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus); uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); - result = address_space_read(&s->dram_as, bus->regs[reg_dma_addr], + result = address_space_read(&s->dram_as, bus->dma_dram_offset, MEMTXATTRS_UNSPECIFIED, data, 1); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", - __func__, bus->regs[reg_dma_addr]); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: DRAM read failed @%" PRIx64 "\n", + __func__, bus->dma_dram_offset); return -1; } - bus->regs[reg_dma_addr]++; + bus->dma_dram_offset++; bus->regs[reg_dma_len]--; return 0; } @@ -291,7 +306,6 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus) uint32_t reg_pool_ctrl = aspeed_i2c_bus_pool_ctrl_offset(bus); uint32_t reg_byte_buf = aspeed_i2c_bus_byte_buf_offset(bus); uint32_t reg_dma_len = aspeed_i2c_bus_dma_len_offset(bus); - uint32_t reg_dma_addr = aspeed_i2c_bus_dma_addr_offset(bus); int pool_rx_count = SHARED_ARRAY_FIELD_EX32(bus->regs, reg_pool_ctrl, RX_SIZE) + 1; @@ -323,14 +337,17 @@ static void aspeed_i2c_bus_recv(AspeedI2CBus *bus) data = i2c_recv(bus->bus); trace_aspeed_i2c_bus_recv("DMA", bus->regs[reg_dma_len], bus->regs[reg_dma_len], data); - result = address_space_write(&s->dram_as, bus->regs[reg_dma_addr], + + result = address_space_write(&s->dram_as, bus->dma_dram_offset, MEMTXATTRS_UNSPECIFIED, &data, 1); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", - __func__, bus->regs[reg_dma_addr]); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: DRAM write failed @%" PRIx64 "\n", + __func__, bus->dma_dram_offset); return; } - bus->regs[reg_dma_addr]++; + + bus->dma_dram_offset++; bus->regs[reg_dma_len]--; /* In new mode, keep track of how many bytes we RXed */ if (aspeed_i2c_is_new_mode(bus->controller)) { @@ -636,14 +653,18 @@ static void aspeed_i2c_bus_new_write(AspeedI2CBus *bus, hwaddr offset, case A_I2CM_DMA_TX_ADDR: bus->regs[R_I2CM_DMA_TX_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR); - bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR); + bus->dma_dram_offset = + deposit64(bus->dma_dram_offset, 0, 32, + FIELD_EX32(value, I2CM_DMA_TX_ADDR, ADDR)); bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN, TX_BUF_LEN) + 1; break; case A_I2CM_DMA_RX_ADDR: bus->regs[R_I2CM_DMA_RX_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR); - bus->regs[R_I2CC_DMA_ADDR] = FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR); + bus->dma_dram_offset = + deposit64(bus->dma_dram_offset, 0, 32, + FIELD_EX32(value, I2CM_DMA_RX_ADDR, ADDR)); bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CM_DMA_LEN, RX_BUF_LEN) + 1; break; @@ -721,6 +742,56 @@ static void aspeed_i2c_bus_new_write(AspeedI2CBus *bus, hwaddr offset, qemu_log_mask(LOG_UNIMP, "%s: Slave mode DMA TX is not implemented\n", __func__); break; + + /* + * The AST2700 support the maximum DRAM size is 8 GB. + * The DRAM offset range is from 0x0_0000_0000 to + * 0x1_FFFF_FFFF and it is enough to use bits [33:0] + * saving the dram offset. + * Therefore, save the high part physical address bit[1:0] + * of Tx/Rx buffer address as dma_dram_offset bit[33:32]. + */ + case A_I2CM_DMA_TX_ADDR_HI: + if (!aic->has_dma64) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n", + __func__); + break; + } + bus->regs[R_I2CM_DMA_TX_ADDR_HI] = FIELD_EX32(value, + I2CM_DMA_TX_ADDR_HI, + ADDR_HI); + bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32, + extract32(value, 0, 2)); + break; + case A_I2CM_DMA_RX_ADDR_HI: + if (!aic->has_dma64) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n", + __func__); + break; + } + bus->regs[R_I2CM_DMA_RX_ADDR_HI] = FIELD_EX32(value, + I2CM_DMA_RX_ADDR_HI, + ADDR_HI); + bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32, + extract32(value, 0, 2)); + break; + case A_I2CS_DMA_TX_ADDR_HI: + qemu_log_mask(LOG_UNIMP, + "%s: Slave mode DMA TX Addr high is not implemented\n", + __func__); + break; + case A_I2CS_DMA_RX_ADDR_HI: + if (!aic->has_dma64) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA 64 bits support\n", + __func__); + break; + } + bus->regs[R_I2CS_DMA_RX_ADDR_HI] = FIELD_EX32(value, + I2CS_DMA_RX_ADDR_HI, + ADDR_HI); + bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 32, 32, + extract32(value, 0, 2)); + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -811,7 +882,8 @@ static void aspeed_i2c_bus_old_write(AspeedI2CBus *bus, hwaddr offset, break; } - bus->regs[R_I2CD_DMA_ADDR] = value & 0x3ffffffc; + bus->dma_dram_offset = deposit64(bus->dma_dram_offset, 0, 32, + value & 0x3ffffffc); break; case A_I2CD_DMA_LEN: @@ -941,12 +1013,49 @@ static const MemoryRegionOps aspeed_i2c_share_pool_ops = { }, }; +static uint64_t aspeed_i2c_bus_pool_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI2CBus *s = opaque; + uint64_t ret = 0; + int i; + + for (i = 0; i < size; i++) { + ret |= (uint64_t) s->pool[offset + i] << (8 * i); + } + + return ret; +} + +static void aspeed_i2c_bus_pool_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CBus *s = opaque; + int i; + + for (i = 0; i < size; i++) { + s->pool[offset + i] = (value >> (8 * i)) & 0xFF; + } +} + +static const MemoryRegionOps aspeed_i2c_bus_pool_ops = { + .read = aspeed_i2c_bus_pool_read, + .write = aspeed_i2c_bus_pool_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + static const VMStateDescription aspeed_i2c_bus_vmstate = { .name = TYPE_ASPEED_I2C, - .version_id = 5, - .minimum_version_id = 5, + .version_id = 6, + .minimum_version_id = 6, .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedI2CBus, ASPEED_I2C_NEW_NUM_REG), + VMSTATE_UINT8_ARRAY(pool, AspeedI2CBus, ASPEED_I2C_BUS_POOL_SIZE), + VMSTATE_UINT64(dma_dram_offset, AspeedI2CBus), VMSTATE_END_OF_LIST() } }; @@ -996,7 +1105,21 @@ static void aspeed_i2c_instance_init(Object *obj) * 0x140 ... 0x17F: Device 5 * 0x180 ... 0x1BF: Device 6 * 0x1C0 ... 0x1FF: Device 7 - * 0x200 ... 0x2FF: Buffer Pool (AST2500 unused in linux driver) + * 0x200 ... 0x20F: Device 1 buffer (AST2500 unused in linux driver) + * 0x210 ... 0x21F: Device 2 buffer + * 0x220 ... 0x22F: Device 3 buffer + * 0x230 ... 0x23F: Device 4 buffer + * 0x240 ... 0x24F: Device 5 buffer + * 0x250 ... 0x25F: Device 6 buffer + * 0x260 ... 0x26F: Device 7 buffer + * 0x270 ... 0x27F: Device 8 buffer + * 0x280 ... 0x28F: Device 9 buffer + * 0x290 ... 0x29F: Device 10 buffer + * 0x2A0 ... 0x2AF: Device 11 buffer + * 0x2B0 ... 0x2BF: Device 12 buffer + * 0x2C0 ... 0x2CF: Device 13 buffer + * 0x2D0 ... 0x2DF: Device 14 buffer + * 0x2E0 ... 0x2FF: Reserved * 0x300 ... 0x33F: Device 8 * 0x340 ... 0x37F: Device 9 * 0x380 ... 0x3BF: Device 10 @@ -1005,6 +1128,76 @@ static void aspeed_i2c_instance_init(Object *obj) * 0x440 ... 0x47F: Device 13 * 0x480 ... 0x4BF: Device 14 * 0x800 ... 0xFFF: Buffer Pool (AST2400 unused in linux driver) + * + * Address Definitions (AST2600 and AST1030) + * 0x000 ... 0x07F: Global Register + * 0x080 ... 0x0FF: Device 1 + * 0x100 ... 0x17F: Device 2 + * 0x180 ... 0x1FF: Device 3 + * 0x200 ... 0x27F: Device 4 + * 0x280 ... 0x2FF: Device 5 + * 0x300 ... 0x37F: Device 6 + * 0x380 ... 0x3FF: Device 7 + * 0x400 ... 0x47F: Device 8 + * 0x480 ... 0x4FF: Device 9 + * 0x500 ... 0x57F: Device 10 + * 0x580 ... 0x5FF: Device 11 + * 0x600 ... 0x67F: Device 12 + * 0x680 ... 0x6FF: Device 13 + * 0x700 ... 0x77F: Device 14 + * 0x780 ... 0x7FF: Device 15 (15 and 16 unused in AST1030) + * 0x800 ... 0x87F: Device 16 + * 0xC00 ... 0xC1F: Device 1 buffer + * 0xC20 ... 0xC3F: Device 2 buffer + * 0xC40 ... 0xC5F: Device 3 buffer + * 0xC60 ... 0xC7F: Device 4 buffer + * 0xC80 ... 0xC9F: Device 5 buffer + * 0xCA0 ... 0xCBF: Device 6 buffer + * 0xCC0 ... 0xCDF: Device 7 buffer + * 0xCE0 ... 0xCFF: Device 8 buffer + * 0xD00 ... 0xD1F: Device 9 buffer + * 0xD20 ... 0xD3F: Device 10 buffer + * 0xD40 ... 0xD5F: Device 11 buffer + * 0xD60 ... 0xD7F: Device 12 buffer + * 0xD80 ... 0xD9F: Device 13 buffer + * 0xDA0 ... 0xDBF: Device 14 buffer + * 0xDC0 ... 0xDDF: Device 15 buffer (15 and 16 unused in AST1030) + * 0xDE0 ... 0xDFF: Device 16 buffer + * + * Address Definitions (AST2700) + * 0x000 ... 0x0FF: Global Register + * 0x100 ... 0x17F: Device 0 + * 0x1A0 ... 0x1BF: Device 0 buffer + * 0x200 ... 0x27F: Device 1 + * 0x2A0 ... 0x2BF: Device 1 buffer + * 0x300 ... 0x37F: Device 2 + * 0x3A0 ... 0x3BF: Device 2 buffer + * 0x400 ... 0x47F: Device 3 + * 0x4A0 ... 0x4BF: Device 3 buffer + * 0x500 ... 0x57F: Device 4 + * 0x5A0 ... 0x5BF: Device 4 buffer + * 0x600 ... 0x67F: Device 5 + * 0x6A0 ... 0x6BF: Device 5 buffer + * 0x700 ... 0x77F: Device 6 + * 0x7A0 ... 0x7BF: Device 6 buffer + * 0x800 ... 0x87F: Device 7 + * 0x8A0 ... 0x8BF: Device 7 buffer + * 0x900 ... 0x97F: Device 8 + * 0x9A0 ... 0x9BF: Device 8 buffer + * 0xA00 ... 0xA7F: Device 9 + * 0xAA0 ... 0xABF: Device 9 buffer + * 0xB00 ... 0xB7F: Device 10 + * 0xBA0 ... 0xBBF: Device 10 buffer + * 0xC00 ... 0xC7F: Device 11 + * 0xCA0 ... 0xCBF: Device 11 buffer + * 0xD00 ... 0xD7F: Device 12 + * 0xDA0 ... 0xDBF: Device 12 buffer + * 0xE00 ... 0xE7F: Device 13 + * 0xEA0 ... 0xEBF: Device 13 buffer + * 0xF00 ... 0xF7F: Device 14 + * 0xFA0 ... 0xFBF: Device 14 buffer + * 0x1000 ... 0x107F: Device 15 + * 0x10A0 ... 0x10BF: Device 15 buffer */ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) { @@ -1012,6 +1205,8 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedI2CState *s = ASPEED_I2C(dev); AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + uint32_t reg_offset = aic->reg_size + aic->reg_gap_size; + uint32_t pool_offset = aic->pool_size + aic->pool_gap_size; sysbus_init_irq(sbd, &s->irq); memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_i2c_ctrl_ops, s, @@ -1034,14 +1229,23 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) return; } - memory_region_add_subregion(&s->iomem, aic->reg_size * (i + offset), + memory_region_add_subregion(&s->iomem, reg_offset * (i + offset), &s->busses[i].mr); } - memory_region_init_io(&s->pool_iomem, OBJECT(s), - &aspeed_i2c_share_pool_ops, s, - "aspeed.i2c-share-pool", aic->pool_size); - memory_region_add_subregion(&s->iomem, aic->pool_base, &s->pool_iomem); + if (aic->has_share_pool) { + memory_region_init_io(&s->pool_iomem, OBJECT(s), + &aspeed_i2c_share_pool_ops, s, + "aspeed.i2c-share-pool", aic->pool_size); + memory_region_add_subregion(&s->iomem, aic->pool_base, + &s->pool_iomem); + } else { + for (i = 0; i < aic->num_busses; i++) { + memory_region_add_subregion(&s->iomem, + aic->pool_base + (pool_offset * i), + &s->busses[i].mr_pool); + } + } if (aic->has_dma) { if (!s->dram_mr) { @@ -1054,18 +1258,17 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) } } -static Property aspeed_i2c_properties[] = { +static const Property aspeed_i2c_properties[] = { DEFINE_PROP_LINK("dram", AspeedI2CState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_i2c_class_init(ObjectClass *klass, void *data) +static void aspeed_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &aspeed_i2c_vmstate; - dc->reset = aspeed_i2c_reset; + device_class_set_legacy_reset(dc, aspeed_i2c_reset); device_class_set_props(dc, aspeed_i2c_properties); dc->realize = aspeed_i2c_realize; dc->desc = "Aspeed I2C Controller"; @@ -1092,8 +1295,9 @@ static int aspeed_i2c_bus_new_slave_event(AspeedI2CBus *bus, return -1; } ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN, 0); - bus->regs[R_I2CC_DMA_ADDR] = - ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_RX_ADDR, ADDR); + bus->dma_dram_offset = + deposit64(bus->dma_dram_offset, 0, 32, + ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_RX_ADDR, ADDR)); bus->regs[R_I2CC_DMA_LEN] = ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN, RX_BUF_LEN) + 1; i2c_ack(bus->bus); @@ -1159,10 +1363,10 @@ static int aspeed_i2c_bus_slave_event(I2CSlave *slave, enum i2c_event event) static void aspeed_i2c_bus_new_slave_send_async(AspeedI2CBus *bus, uint8_t data) { assert(address_space_write(&bus->controller->dram_as, - bus->regs[R_I2CC_DMA_ADDR], + bus->dma_dram_offset, MEMTXATTRS_UNSPECIFIED, &data, 1) == MEMTX_OK); - bus->regs[R_I2CC_DMA_ADDR]++; + bus->dma_dram_offset++; bus->regs[R_I2CC_DMA_LEN]--; ARRAY_FIELD_DP32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN, ARRAY_FIELD_EX32(bus->regs, I2CS_DMA_LEN_STS, RX_LEN) + 1); @@ -1186,7 +1390,8 @@ static void aspeed_i2c_bus_slave_send_async(I2CSlave *slave, uint8_t data) aspeed_i2c_bus_raise_interrupt(bus); } -static void aspeed_i2c_bus_slave_class_init(ObjectClass *klass, void *data) +static void aspeed_i2c_bus_slave_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); @@ -1217,6 +1422,7 @@ static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp) AspeedI2CBus *s = ASPEED_I2C_BUS(dev); AspeedI2CClass *aic; g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I2C_BUS ".%d", s->id); + g_autofree char *pool_name = g_strdup_printf("%s.pool", name); if (!s->controller) { error_setg(errp, TYPE_ASPEED_I2C_BUS ": 'controller' link not set"); @@ -1234,22 +1440,25 @@ static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i2c_bus_ops, s, name, aic->reg_size); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); + + memory_region_init_io(&s->mr_pool, OBJECT(s), &aspeed_i2c_bus_pool_ops, + s, pool_name, aic->pool_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr_pool); } -static Property aspeed_i2c_bus_properties[] = { +static const Property aspeed_i2c_bus_properties[] = { DEFINE_PROP_UINT8("bus-id", AspeedI2CBus, id, 0), DEFINE_PROP_LINK("controller", AspeedI2CBus, controller, TYPE_ASPEED_I2C, AspeedI2CState *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_i2c_bus_class_init(ObjectClass *klass, void *data) +static void aspeed_i2c_bus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "Aspeed I2C Bus"; dc->realize = aspeed_i2c_bus_realize; - dc->reset = aspeed_i2c_bus_reset; + device_class_set_legacy_reset(dc, aspeed_i2c_bus_reset); device_class_set_props(dc, aspeed_i2c_bus_properties); } @@ -1275,7 +1484,7 @@ static uint8_t *aspeed_2400_i2c_bus_pool_base(AspeedI2CBus *bus) return &pool_page[ARRAY_FIELD_EX32(bus->regs, I2CD_POOL_CTRL, OFFSET)]; } -static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); @@ -1286,6 +1495,7 @@ static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data) aic->reg_size = 0x40; aic->gap = 7; aic->bus_get_irq = aspeed_2400_i2c_bus_get_irq; + aic->has_share_pool = true; aic->pool_size = 0x800; aic->pool_base = 0x800; aic->bus_pool_base = aspeed_2400_i2c_bus_pool_base; @@ -1305,10 +1515,10 @@ static qemu_irq aspeed_2500_i2c_bus_get_irq(AspeedI2CBus *bus) static uint8_t *aspeed_2500_i2c_bus_pool_base(AspeedI2CBus *bus) { - return &bus->controller->share_pool[bus->id * 0x10]; + return bus->pool; } -static void aspeed_2500_i2c_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); @@ -1319,7 +1529,7 @@ static void aspeed_2500_i2c_class_init(ObjectClass *klass, void *data) aic->reg_size = 0x40; aic->gap = 7; aic->bus_get_irq = aspeed_2500_i2c_bus_get_irq; - aic->pool_size = 0x100; + aic->pool_size = 0x10; aic->pool_base = 0x200; aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base; aic->check_sram = true; @@ -1338,12 +1548,7 @@ static qemu_irq aspeed_2600_i2c_bus_get_irq(AspeedI2CBus *bus) return bus->irq; } -static uint8_t *aspeed_2600_i2c_bus_pool_base(AspeedI2CBus *bus) -{ - return &bus->controller->share_pool[bus->id * 0x20]; -} - -static void aspeed_2600_i2c_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); @@ -1354,9 +1559,9 @@ static void aspeed_2600_i2c_class_init(ObjectClass *klass, void *data) aic->reg_size = 0x80; aic->gap = -1; /* no gap */ aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq; - aic->pool_size = 0x200; + aic->pool_size = 0x20; aic->pool_base = 0xC00; - aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base; + aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base; aic->has_dma = true; aic->mem_size = 0x1000; } @@ -1367,7 +1572,7 @@ static const TypeInfo aspeed_2600_i2c_info = { .class_init = aspeed_2600_i2c_class_init, }; -static void aspeed_1030_i2c_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); @@ -1378,9 +1583,9 @@ static void aspeed_1030_i2c_class_init(ObjectClass *klass, void *data) aic->reg_size = 0x80; aic->gap = -1; /* no gap */ aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq; - aic->pool_size = 0x200; + aic->pool_size = 0x20; aic->pool_base = 0xC00; - aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base; + aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base; aic->has_dma = true; aic->mem_size = 0x10000; } @@ -1391,6 +1596,33 @@ static const TypeInfo aspeed_1030_i2c_info = { .class_init = aspeed_1030_i2c_class_init, }; +static void aspeed_2700_i2c_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); + + dc->desc = "ASPEED 2700 I2C Controller"; + + aic->num_busses = 16; + aic->reg_size = 0x80; + aic->reg_gap_size = 0x80; + aic->gap = -1; /* no gap */ + aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq; + aic->pool_size = 0x20; + aic->pool_gap_size = 0xe0; + aic->pool_base = 0x1a0; + aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base; + aic->has_dma = true; + aic->mem_size = 0x2000; + aic->has_dma64 = true; +} + +static const TypeInfo aspeed_2700_i2c_info = { + .name = TYPE_ASPEED_2700_I2C, + .parent = TYPE_ASPEED_I2C, + .class_init = aspeed_2700_i2c_class_init, +}; + static void aspeed_i2c_register_types(void) { type_register_static(&aspeed_i2c_bus_info); @@ -1400,6 +1632,7 @@ static void aspeed_i2c_register_types(void) type_register_static(&aspeed_2500_i2c_info); type_register_static(&aspeed_2600_i2c_info); type_register_static(&aspeed_1030_i2c_info); + type_register_static(&aspeed_2700_i2c_info); } type_init(aspeed_i2c_register_types) diff --git a/hw/i2c/bcm2835_i2c.c b/hw/i2c/bcm2835_i2c.c index 20ec46eeabc..be11cca2a93 100644 --- a/hw/i2c/bcm2835_i2c.c +++ b/hw/i2c/bcm2835_i2c.c @@ -258,11 +258,11 @@ static const VMStateDescription vmstate_bcm2835_i2c = { } }; -static void bcm2835_i2c_class_init(ObjectClass *klass, void *data) +static void bcm2835_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_i2c_reset; + device_class_set_legacy_reset(dc, bcm2835_i2c_reset); dc->realize = bcm2835_i2c_realize; dc->vmsd = &vmstate_bcm2835_i2c; } diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c index de5f5aacf54..e020f314e2a 100644 --- a/hw/i2c/bitbang_i2c.c +++ b/hw/i2c/bitbang_i2c.c @@ -222,7 +222,7 @@ static void gpio_i2c_init(Object *obj) qdev_init_gpio_out(dev, &s->out, 1); } -static void gpio_i2c_class_init(ObjectClass *klass, void *data) +static void gpio_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/i2c/core.c b/hw/i2c/core.c index 4cf30b2c863..4b6345b5889 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -18,9 +18,8 @@ #define I2C_BROADCAST 0x00 -static Property i2c_props[] = { +static const Property i2c_props[] = { DEFINE_PROP_UINT8("address", struct I2CSlave, address, 0), - DEFINE_PROP_END_OF_LIST(), }; static const TypeInfo i2c_bus_info = { @@ -402,7 +401,7 @@ static bool i2c_slave_match(I2CSlave *candidate, uint8_t address, return false; } -static void i2c_slave_class_init(ObjectClass *klass, void *data) +static void i2c_slave_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); diff --git a/hw/i2c/exynos4210_i2c.c b/hw/i2c/exynos4210_i2c.c index 9445424d5fd..9d0c1cdaa8a 100644 --- a/hw/i2c/exynos4210_i2c.c +++ b/hw/i2c/exynos4210_i2c.c @@ -309,12 +309,12 @@ static void exynos4210_i2c_init(Object *obj) s->bus = i2c_init_bus(dev, "i2c"); } -static void exynos4210_i2c_class_init(ObjectClass *klass, void *data) +static void exynos4210_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &exynos4210_i2c_vmstate; - dc->reset = exynos4210_i2c_reset; + device_class_set_legacy_reset(dc, exynos4210_i2c_reset); } static const TypeInfo exynos4210_i2c_type_info = { diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c index db5db956a6b..a8ef640cd25 100644 --- a/hw/i2c/i2c_mux_pca954x.c +++ b/hw/i2c/i2c_mux_pca954x.c @@ -172,13 +172,13 @@ I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel) return pca954x->bus[channel]; } -static void pca9546_class_init(ObjectClass *klass, void *data) +static void pca9546_class_init(ObjectClass *klass, const void *data) { Pca954xClass *s = PCA954X_CLASS(klass); s->nchans = PCA9546_CHANNEL_COUNT; } -static void pca9548_class_init(ObjectClass *klass, void *data) +static void pca9548_class_init(ObjectClass *klass, const void *data) { Pca954xClass *s = PCA954X_CLASS(klass); s->nchans = PCA9548_CHANNEL_COUNT; @@ -211,12 +211,11 @@ static void pca954x_init(Object *obj) } } -static Property pca954x_props[] = { +static const Property pca954x_props[] = { DEFINE_PROP_STRING("name", Pca954xState, name), - DEFINE_PROP_END_OF_LIST() }; -static void pca954x_class_init(ObjectClass *klass, void *data) +static void pca954x_class_init(ObjectClass *klass, const void *data) { I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c index a25676f0254..d26177c85df 100644 --- a/hw/i2c/imx_i2c.c +++ b/hw/i2c/imx_i2c.c @@ -25,18 +25,7 @@ #include "hw/i2c/i2c.h" #include "qemu/log.h" #include "qemu/module.h" - -#ifndef DEBUG_IMX_I2C -#define DEBUG_IMX_I2C 0 -#endif - -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_IMX_I2C) { \ - fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_I2C, \ - __func__, ##args); \ - } \ - } while (0) +#include "trace.h" static const char *imx_i2c_get_regname(unsigned offset) { @@ -90,13 +79,12 @@ static void imx_i2c_reset(DeviceState *dev) static inline void imx_i2c_raise_interrupt(IMXI2CState *s) { - /* - * raise an interrupt if the device is enabled and it is configured - * to generate some interrupts. - */ - if (imx_i2c_is_enabled(s) && imx_i2c_interrupt_is_enabled(s)) { + if (imx_i2c_is_enabled(s)) { s->i2sr |= I2SR_IIF; - qemu_irq_raise(s->irq); + + if (imx_i2c_interrupt_is_enabled(s)) { + qemu_irq_raise(s->irq); + } } } @@ -152,8 +140,8 @@ static uint64_t imx_i2c_read(void *opaque, hwaddr offset, break; } - DPRINTF("read %s [0x%" HWADDR_PRIx "] -> 0x%02x\n", - imx_i2c_get_regname(offset), offset, value); + trace_imx_i2c_read(DEVICE(s)->canonical_path, imx_i2c_get_regname(offset), + offset, value); return (uint64_t)value; } @@ -163,8 +151,8 @@ static void imx_i2c_write(void *opaque, hwaddr offset, { IMXI2CState *s = IMX_I2C(opaque); - DPRINTF("write %s [0x%" HWADDR_PRIx "] <- 0x%02x\n", - imx_i2c_get_regname(offset), offset, (int)value); + trace_imx_i2c_read(DEVICE(s)->canonical_path, imx_i2c_get_regname(offset), + offset, value); value &= 0xff; @@ -308,12 +296,12 @@ static void imx_i2c_realize(DeviceState *dev, Error **errp) s->bus = i2c_init_bus(dev, NULL); } -static void imx_i2c_class_init(ObjectClass *klass, void *data) +static void imx_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &imx_i2c_vmstate; - dc->reset = imx_i2c_reset; + device_class_set_legacy_reset(dc, imx_i2c_reset); dc->realize = imx_i2c_realize; dc->desc = "i.MX I2C Controller"; } diff --git a/hw/i2c/microbit_i2c.c b/hw/i2c/microbit_i2c.c index 24d36d15b09..2291d6370e2 100644 --- a/hw/i2c/microbit_i2c.c +++ b/hw/i2c/microbit_i2c.c @@ -105,12 +105,12 @@ static void microbit_i2c_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static void microbit_i2c_class_init(ObjectClass *klass, void *data) +static void microbit_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = µbit_i2c_vmstate; - dc->reset = microbit_i2c_reset; + device_class_set_legacy_reset(dc, microbit_i2c_reset); dc->realize = microbit_i2c_realize; dc->desc = "Microbit I2C controller"; } diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c index 06d4ce7d68d..25f91b7bc82 100644 --- a/hw/i2c/mpc_i2c.c +++ b/hw/i2c/mpc_i2c.c @@ -20,10 +20,10 @@ #include "qemu/osdep.h" #include "hw/i2c/i2c.h" #include "hw/irq.h" -#include "qemu/module.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "trace.h" /* #define DEBUG_I2C */ @@ -224,8 +224,8 @@ static uint64_t mpc_i2c_read(void *opaque, hwaddr addr, unsigned size) break; } - DPRINTF("%s: addr " HWADDR_FMT_plx " %02" PRIx32 "\n", __func__, - addr, value); + trace_mpc_i2c_read(addr, value); + return (uint64_t)value; } @@ -234,8 +234,8 @@ static void mpc_i2c_write(void *opaque, hwaddr addr, { MPCI2CState *s = opaque; - DPRINTF("%s: addr " HWADDR_FMT_plx " val %08" PRIx64 "\n", __func__, - addr, value); + trace_mpc_i2c_write(addr, value); + switch (addr) { case MPC_I2C_ADR: s->adr = value & CADR_MASK; @@ -334,26 +334,23 @@ static void mpc_i2c_realize(DeviceState *dev, Error **errp) i2c->bus = i2c_init_bus(dev, "i2c"); } -static void mpc_i2c_class_init(ObjectClass *klass, void *data) +static void mpc_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &mpc_i2c_vmstate ; - dc->reset = mpc_i2c_reset; + device_class_set_legacy_reset(dc, mpc_i2c_reset); dc->realize = mpc_i2c_realize; dc->desc = "MPC I2C Controller"; } -static const TypeInfo mpc_i2c_type_info = { - .name = TYPE_MPC_I2C, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MPCI2CState), - .class_init = mpc_i2c_class_init, +static const TypeInfo mpc_i2c_types[] = { + { + .name = TYPE_MPC_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPCI2CState), + .class_init = mpc_i2c_class_init, + }, }; -static void mpc_i2c_register_types(void) -{ - type_register_static(&mpc_i2c_type_info); -} - -type_init(mpc_i2c_register_types) +DEFINE_TYPES(mpc_i2c_types) diff --git a/hw/i2c/npcm7xx_smbus.c b/hw/i2c/npcm7xx_smbus.c index 22d68fc67dd..179852a4fd9 100644 --- a/hw/i2c/npcm7xx_smbus.c +++ b/hw/i2c/npcm7xx_smbus.c @@ -1075,7 +1075,7 @@ static const VMStateDescription vmstate_npcm7xx_smbus = { }, }; -static void npcm7xx_smbus_class_init(ObjectClass *klass, void *data) +static void npcm7xx_smbus_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c index e5d205dda5a..751bf748fd9 100644 --- a/hw/i2c/omap_i2c.c +++ b/hw/i2c/omap_i2c.c @@ -55,16 +55,16 @@ struct OMAPI2CState { uint16_t test; }; -#define OMAP2_INTR_REV 0x34 -#define OMAP2_GC_REV 0x34 +#define OMAP2_INTR_REV 0x34 +#define OMAP2_GC_REV 0x34 static void omap_i2c_interrupts_update(OMAPI2CState *s) { qemu_set_irq(s->irq, s->stat & s->mask); - if ((s->dma >> 15) & 1) /* RDMA_EN */ - qemu_set_irq(s->drq[0], (s->stat >> 3) & 1); /* RRDY */ - if ((s->dma >> 7) & 1) /* XDMA_EN */ - qemu_set_irq(s->drq[1], (s->stat >> 4) & 1); /* XRDY */ + if ((s->dma >> 15) & 1) /* RDMA_EN */ + qemu_set_irq(s->drq[0], (s->stat >> 3) & 1); /* RRDY */ + if ((s->dma >> 7) & 1) /* XDMA_EN */ + qemu_set_irq(s->drq[1], (s->stat >> 4) & 1); /* XRDY */ } static void omap_i2c_fifo_run(OMAPI2CState *s) @@ -74,25 +74,25 @@ static void omap_i2c_fifo_run(OMAPI2CState *s) if (!i2c_bus_busy(s->bus)) return; - if ((s->control >> 2) & 1) { /* RM */ - if ((s->control >> 1) & 1) { /* STP */ + if ((s->control >> 2) & 1) { /* RM */ + if ((s->control >> 1) & 1) { /* STP */ i2c_end_transfer(s->bus); - s->control &= ~(1 << 1); /* STP */ + s->control &= ~(1 << 1); /* STP */ s->count_cur = s->count; s->txlen = 0; - } else if ((s->control >> 9) & 1) { /* TRX */ + } else if ((s->control >> 9) & 1) { /* TRX */ while (ack && s->txlen) ack = (i2c_send(s->bus, (s->fifo >> ((-- s->txlen) << 3)) & 0xff) >= 0); - s->stat |= 1 << 4; /* XRDY */ + s->stat |= 1 << 4; /* XRDY */ } else { while (s->rxlen < 4) s->fifo |= i2c_recv(s->bus) << ((s->rxlen ++) << 3); - s->stat |= 1 << 3; /* RRDY */ + s->stat |= 1 << 3; /* RRDY */ } } else { - if ((s->control >> 9) & 1) { /* TRX */ + if ((s->control >> 9) & 1) { /* TRX */ while (ack && s->count_cur && s->txlen) { ack = (i2c_send(s->bus, (s->fifo >> ((-- s->txlen) << 3)) & @@ -100,12 +100,12 @@ static void omap_i2c_fifo_run(OMAPI2CState *s) s->count_cur --; } if (ack && s->count_cur) - s->stat |= 1 << 4; /* XRDY */ + s->stat |= 1 << 4; /* XRDY */ else - s->stat &= ~(1 << 4); /* XRDY */ + s->stat &= ~(1 << 4); /* XRDY */ if (!s->count_cur) { - s->stat |= 1 << 2; /* ARDY */ - s->control &= ~(1 << 10); /* MST */ + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ } } else { while (s->count_cur && s->rxlen < 4) { @@ -113,26 +113,26 @@ static void omap_i2c_fifo_run(OMAPI2CState *s) s->count_cur --; } if (s->rxlen) - s->stat |= 1 << 3; /* RRDY */ + s->stat |= 1 << 3; /* RRDY */ else - s->stat &= ~(1 << 3); /* RRDY */ + s->stat &= ~(1 << 3); /* RRDY */ } if (!s->count_cur) { - if ((s->control >> 1) & 1) { /* STP */ + if ((s->control >> 1) & 1) { /* STP */ i2c_end_transfer(s->bus); - s->control &= ~(1 << 1); /* STP */ + s->control &= ~(1 << 1); /* STP */ s->count_cur = s->count; s->txlen = 0; } else { - s->stat |= 1 << 2; /* ARDY */ - s->control &= ~(1 << 10); /* MST */ + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ } } } - s->stat |= (!ack) << 1; /* NACK */ + s->stat |= (!ack) << 1; /* NACK */ if (!ack) - s->control &= ~(1 << 1); /* STP */ + s->control &= ~(1 << 1); /* STP */ } static void omap_i2c_reset(DeviceState *dev) @@ -163,16 +163,16 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr) uint16_t ret; switch (offset) { - case 0x00: /* I2C_REV */ - return s->revision; /* REV */ + case 0x00: /* I2C_REV */ + return s->revision; /* REV */ - case 0x04: /* I2C_IE */ + case 0x04: /* I2C_IE */ return s->mask; - case 0x08: /* I2C_STAT */ + case 0x08: /* I2C_STAT */ return s->stat | (i2c_bus_busy(s->bus) << 12); - case 0x0c: /* I2C_IV */ + case 0x0c: /* I2C_IV */ if (s->revision >= OMAP2_INTR_REV) break; ret = ctz32(s->stat & s->mask); @@ -185,18 +185,18 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr) omap_i2c_interrupts_update(s); return ret; - case 0x10: /* I2C_SYSS */ - return (s->control >> 15) & 1; /* I2C_EN */ + case 0x10: /* I2C_SYSS */ + return (s->control >> 15) & 1; /* I2C_EN */ - case 0x14: /* I2C_BUF */ + case 0x14: /* I2C_BUF */ return s->dma; - case 0x18: /* I2C_CNT */ - return s->count_cur; /* DCOUNT */ + case 0x18: /* I2C_CNT */ + return s->count_cur; /* DCOUNT */ - case 0x1c: /* I2C_DATA */ + case 0x1c: /* I2C_DATA */ ret = 0; - if (s->control & (1 << 14)) { /* BE */ + if (s->control & (1 << 14)) { /* BE */ ret |= ((s->fifo >> 0) & 0xff) << 8; ret |= ((s->fifo >> 8) & 0xff) << 0; } else { @@ -204,7 +204,7 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr) ret |= ((s->fifo >> 0) & 0xff) << 0; } if (s->rxlen == 1) { - s->stat |= 1 << 15; /* SBD */ + s->stat |= 1 << 15; /* SBD */ s->rxlen = 0; } else if (s->rxlen > 1) { if (s->rxlen > 2) @@ -214,41 +214,41 @@ static uint32_t omap_i2c_read(void *opaque, hwaddr addr) /* XXX: remote access (qualifier) error - what's that? */ } if (!s->rxlen) { - s->stat &= ~(1 << 3); /* RRDY */ - if (((s->control >> 10) & 1) && /* MST */ - ((~s->control >> 9) & 1)) { /* TRX */ - s->stat |= 1 << 2; /* ARDY */ - s->control &= ~(1 << 10); /* MST */ + s->stat &= ~(1 << 3); /* RRDY */ + if (((s->control >> 10) & 1) && /* MST */ + ((~s->control >> 9) & 1)) { /* TRX */ + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ } } - s->stat &= ~(1 << 11); /* ROVR */ + s->stat &= ~(1 << 11); /* ROVR */ omap_i2c_fifo_run(s); omap_i2c_interrupts_update(s); return ret; - case 0x20: /* I2C_SYSC */ + case 0x20: /* I2C_SYSC */ return 0; - case 0x24: /* I2C_CON */ + case 0x24: /* I2C_CON */ return s->control; - case 0x28: /* I2C_OA */ + case 0x28: /* I2C_OA */ return s->addr[0]; - case 0x2c: /* I2C_SA */ + case 0x2c: /* I2C_SA */ return s->addr[1]; - case 0x30: /* I2C_PSC */ + case 0x30: /* I2C_PSC */ return s->divider; - case 0x34: /* I2C_SCLL */ + case 0x34: /* I2C_SCLL */ return s->times[0]; - case 0x38: /* I2C_SCLH */ + case 0x38: /* I2C_SCLH */ return s->times[1]; - case 0x3c: /* I2C_SYSTEST */ - if (s->test & (1 << 15)) { /* ST_EN */ + case 0x3c: /* I2C_SYSTEST */ + if (s->test & (1 << 15)) { /* ST_EN */ s->test ^= 0xa; return s->test; } else @@ -267,17 +267,17 @@ static void omap_i2c_write(void *opaque, hwaddr addr, int nack; switch (offset) { - case 0x00: /* I2C_REV */ - case 0x0c: /* I2C_IV */ - case 0x10: /* I2C_SYSS */ + case 0x00: /* I2C_REV */ + case 0x0c: /* I2C_IV */ + case 0x10: /* I2C_SYSS */ OMAP_RO_REG(addr); return; - case 0x04: /* I2C_IE */ + case 0x04: /* I2C_IE */ s->mask = value & (s->revision < OMAP2_GC_REV ? 0x1f : 0x3f); break; - case 0x08: /* I2C_STAT */ + case 0x08: /* I2C_STAT */ if (s->revision < OMAP2_INTR_REV) { OMAP_RO_REG(addr); return; @@ -288,40 +288,40 @@ static void omap_i2c_write(void *opaque, hwaddr addr, omap_i2c_interrupts_update(s); break; - case 0x14: /* I2C_BUF */ + case 0x14: /* I2C_BUF */ s->dma = value & 0x8080; - if (value & (1 << 15)) /* RDMA_EN */ - s->mask &= ~(1 << 3); /* RRDY_IE */ - if (value & (1 << 7)) /* XDMA_EN */ - s->mask &= ~(1 << 4); /* XRDY_IE */ + if (value & (1 << 15)) /* RDMA_EN */ + s->mask &= ~(1 << 3); /* RRDY_IE */ + if (value & (1 << 7)) /* XDMA_EN */ + s->mask &= ~(1 << 4); /* XRDY_IE */ break; - case 0x18: /* I2C_CNT */ - s->count = value; /* DCOUNT */ + case 0x18: /* I2C_CNT */ + s->count = value; /* DCOUNT */ break; - case 0x1c: /* I2C_DATA */ + case 0x1c: /* I2C_DATA */ if (s->txlen > 2) { /* XXX: remote access (qualifier) error - what's that? */ break; } s->fifo <<= 16; s->txlen += 2; - if (s->control & (1 << 14)) { /* BE */ + if (s->control & (1 << 14)) { /* BE */ s->fifo |= ((value >> 8) & 0xff) << 8; s->fifo |= ((value >> 0) & 0xff) << 0; } else { s->fifo |= ((value >> 0) & 0xff) << 8; s->fifo |= ((value >> 8) & 0xff) << 0; } - s->stat &= ~(1 << 10); /* XUDF */ + s->stat &= ~(1 << 10); /* XUDF */ if (s->txlen > 2) - s->stat &= ~(1 << 4); /* XRDY */ + s->stat &= ~(1 << 4); /* XRDY */ omap_i2c_fifo_run(s); omap_i2c_interrupts_update(s); break; - case 0x20: /* I2C_SYSC */ + case 0x20: /* I2C_SYSC */ if (s->revision < OMAP2_INTR_REV) { OMAP_BAD_REG(addr); return; @@ -332,9 +332,9 @@ static void omap_i2c_write(void *opaque, hwaddr addr, } break; - case 0x24: /* I2C_CON */ + case 0x24: /* I2C_CON */ s->control = value & 0xcf87; - if (~value & (1 << 15)) { /* I2C_EN */ + if (~value & (1 << 15)) { /* I2C_EN */ if (s->revision < OMAP2_INTR_REV) { omap_i2c_reset(DEVICE(s)); } @@ -351,14 +351,14 @@ static void omap_i2c_write(void *opaque, hwaddr addr, __func__); break; } - if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */ - nack = !!i2c_start_transfer(s->bus, s->addr[1], /* SA */ - (~value >> 9) & 1); /* TRX */ - s->stat |= nack << 1; /* NACK */ - s->control &= ~(1 << 0); /* STT */ + if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */ + nack = !!i2c_start_transfer(s->bus, s->addr[1], /* SA */ + (~value >> 9) & 1); /* TRX */ + s->stat |= nack << 1; /* NACK */ + s->control &= ~(1 << 0); /* STT */ s->fifo = 0; if (nack) - s->control &= ~(1 << 1); /* STP */ + s->control &= ~(1 << 1); /* STP */ else { s->count_cur = s->count; omap_i2c_fifo_run(s); @@ -367,34 +367,34 @@ static void omap_i2c_write(void *opaque, hwaddr addr, } break; - case 0x28: /* I2C_OA */ + case 0x28: /* I2C_OA */ s->addr[0] = value & 0x3ff; break; - case 0x2c: /* I2C_SA */ + case 0x2c: /* I2C_SA */ s->addr[1] = value & 0x3ff; break; - case 0x30: /* I2C_PSC */ + case 0x30: /* I2C_PSC */ s->divider = value; break; - case 0x34: /* I2C_SCLL */ + case 0x34: /* I2C_SCLL */ s->times[0] = value; break; - case 0x38: /* I2C_SCLH */ + case 0x38: /* I2C_SCLH */ s->times[1] = value; break; - case 0x3c: /* I2C_SYSTEST */ + case 0x3c: /* I2C_SYSTEST */ s->test = value & 0xf80f; - if (value & (1 << 11)) /* SBB */ + if (value & (1 << 11)) /* SBB */ if (s->revision >= OMAP2_INTR_REV) { s->stat |= 0x3f; omap_i2c_interrupts_update(s); } - if (value & (1 << 15)) { /* ST_EN */ + if (value & (1 << 15)) { /* ST_EN */ qemu_log_mask(LOG_UNIMP, "%s: System Test not supported\n", __func__); } @@ -413,7 +413,7 @@ static void omap_i2c_writeb(void *opaque, hwaddr addr, int offset = addr & OMAP_MPUI_REG_MASK; switch (offset) { - case 0x1c: /* I2C_DATA */ + case 0x1c: /* I2C_DATA */ if (s->txlen > 2) { /* XXX: remote access (qualifier) error - what's that? */ break; @@ -421,9 +421,9 @@ static void omap_i2c_writeb(void *opaque, hwaddr addr, s->fifo <<= 8; s->txlen += 1; s->fifo |= value & 0xff; - s->stat &= ~(1 << 10); /* XUDF */ + s->stat &= ~(1 << 10); /* XUDF */ if (s->txlen > 2) - s->stat &= ~(1 << 4); /* XRDY */ + s->stat &= ~(1 << 4); /* XRDY */ omap_i2c_fifo_run(s); omap_i2c_interrupts_update(s); break; @@ -511,17 +511,16 @@ void omap_i2c_set_fclk(OMAPI2CState *i2c, omap_clk clk) i2c->fclk = clk; } -static Property omap_i2c_properties[] = { +static const Property omap_i2c_properties[] = { DEFINE_PROP_UINT8("revision", OMAPI2CState, revision, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void omap_i2c_class_init(ObjectClass *klass, void *data) +static void omap_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, omap_i2c_properties); - dc->reset = omap_i2c_reset; + device_class_set_legacy_reset(dc, omap_i2c_reset); /* Reason: pointer properties "iclk", "fclk" */ dc->user_creatable = false; dc->realize = omap_i2c_realize; diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c index 3eed8110b95..4e685fd26e7 100644 --- a/hw/i2c/pm_smbus.c +++ b/hw/i2c/pm_smbus.c @@ -205,7 +205,6 @@ static void smb_transaction(PMSMBus *s) error: s->smb_stat |= STS_DEV_ERR; - return; } static void smb_transaction_start(PMSMBus *s) diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index ba1d2fd7160..853dc4b4342 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -1902,7 +1902,7 @@ static void pmbus_device_finalize(Object *obj) g_free(pmdev->pages); } -static void pmbus_device_class_init(ObjectClass *klass, void *data) +static void pmbus_device_class_init(ObjectClass *klass, const void *data) { SMBusDeviceClass *k = SMBUS_DEVICE_CLASS(klass); diff --git a/hw/i2c/ppc4xx_i2c.c b/hw/i2c/ppc4xx_i2c.c index 75d50f15158..09d4c49d657 100644 --- a/hw/i2c/ppc4xx_i2c.c +++ b/hw/i2c/ppc4xx_i2c.c @@ -354,11 +354,11 @@ static void ppc4xx_i2c_init(Object *o) bitbang_i2c_init(&s->bitbang, s->bus); } -static void ppc4xx_i2c_class_init(ObjectClass *klass, void *data) +static void ppc4xx_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = ppc4xx_i2c_reset; + device_class_set_legacy_reset(dc, ppc4xx_i2c_reset); } static const TypeInfo ppc4xx_i2c_type_info = { diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c index c42236bb139..0a1088fbb0a 100644 --- a/hw/i2c/smbus_eeprom.c +++ b/hw/i2c/smbus_eeprom.c @@ -137,13 +137,13 @@ static void smbus_eeprom_realize(DeviceState *dev, Error **errp) } } -static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data) +static void smbus_eeprom_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SMBusDeviceClass *sc = SMBUS_DEVICE_CLASS(klass); dc->realize = smbus_eeprom_realize; - dc->reset = smbus_eeprom_reset; + device_class_set_legacy_reset(dc, smbus_eeprom_reset); sc->receive_byte = eeprom_receive_byte; sc->write_data = eeprom_write_data; dc->vmsd = &vmstate_smbus_eeprom; @@ -151,19 +151,16 @@ static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data) dc->user_creatable = false; } -static const TypeInfo smbus_eeprom_info = { - .name = TYPE_SMBUS_EEPROM, - .parent = TYPE_SMBUS_DEVICE, - .instance_size = sizeof(SMBusEEPROMDevice), - .class_init = smbus_eeprom_class_initfn, +static const TypeInfo smbus_eeprom_types[] = { + { + .name = TYPE_SMBUS_EEPROM, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(SMBusEEPROMDevice), + .class_init = smbus_eeprom_class_initfn, + }, }; -static void smbus_eeprom_register_types(void) -{ - type_register_static(&smbus_eeprom_info); -} - -type_init(smbus_eeprom_register_types) +DEFINE_TYPES(smbus_eeprom_types) void smbus_eeprom_init_one(I2CBus *smbus, uint8_t address, uint8_t *eeprom_buf) { diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c index 208f263ac5b..956c9b59bbc 100644 --- a/hw/i2c/smbus_ich9.c +++ b/hw/i2c/smbus_ich9.c @@ -118,7 +118,7 @@ static void build_ich9_smb_aml(AcpiDevAmlIf *adev, Aml *scope) qbus_build_aml(bus, scope); } -static void ich9_smb_class_init(ObjectClass *klass, void *data) +static void ich9_smb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -145,7 +145,7 @@ static const TypeInfo ich9_smb_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(ICH9SMBState), .class_init = ich9_smb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { TYPE_ACPI_DEV_AML_IF }, { }, diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c index 9f9afc25a40..cfb61c879e0 100644 --- a/hw/i2c/smbus_slave.c +++ b/hw/i2c/smbus_slave.c @@ -201,7 +201,7 @@ static int smbus_i2c_send(I2CSlave *s, uint8_t data) return 0; } -static void smbus_device_class_init(ObjectClass *klass, void *data) +static void smbus_device_class_init(ObjectClass *klass, const void *data) { I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events index 6900e06edad..1ad0e95c0e6 100644 --- a/hw/i2c/trace-events +++ b/hw/i2c/trace-events @@ -35,6 +35,11 @@ aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t va aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x" aspeed_i2c_bus_recv(const char *mode, int i, int count, uint8_t byte) "%s recv %d/%d 0x%02x" +# mpc_i2c.c + +mpc_i2c_read(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] -> 0x%02" PRIx32 +mpc_i2c_write(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] <- 0x%02" PRIx32 + # npcm7xx_smbus.c npcm7xx_smbus_read(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" @@ -51,3 +56,8 @@ npcm7xx_smbus_recv_fifo(const char *id, uint8_t received, uint8_t expected) "%s pca954x_write_bytes(uint8_t value) "PCA954X write data: 0x%02x" pca954x_read_data(uint8_t value) "PCA954X read data: 0x%02x" + +# imx_i2c.c + +imx_i2c_read(const char *id, const char *reg, uint64_t ofs, uint64_t value) "%s:[%s (0x%" PRIx64 ")] -> 0x%02" PRIx64 +imx_i2c_write(const char *id, const char *reg, uint64_t ofs, uint64_t value) "%s:[%s (0x%" PRIx64 ")] <- 0x%02" PRIx64 diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index f4a33b6c082..3a0e2b8ebbb 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -4,12 +4,17 @@ config X86_FW_OVMF config SEV bool select X86_FW_OVMF - depends on KVM + depends on KVM && X86_64 config SGX bool depends on KVM +config TDX + bool + select X86_FW_OVMF + depends on KVM && X86_64 + config PC bool imply APPLESMC @@ -26,6 +31,7 @@ config PC imply QXL imply SEV imply SGX + imply TDX imply TEST_DEVICES imply TPM_CRB imply TPM_TIS_ISA @@ -43,6 +49,7 @@ config PC select SERIAL_ISA select ACPI_PCI select ACPI_VMGENID + select ACPI_VMCLOCK select VIRTIO_PMEM_SUPPORTED select VIRTIO_MEM_SUPPORTED select HV_BALLOON_SUPPORTED @@ -124,11 +131,22 @@ config MICROVM select I8259 select MC146818RTC select VIRTIO_MMIO + select ACPI_PCI select ACPI_HW_REDUCED select PCI_EXPRESS_GENERIC_BRIDGE select USB_XHCI_SYSBUS select I8254 +config NITRO_ENCLAVE + default y + depends on I386 && FDT # for MICROVM + depends on LIBCBOR && GNUTLS # for EIF and VIRTIO_NSM + depends on VHOST_USER # for VHOST_USER_VSOCK + select EIF + select MICROVM + select VHOST_USER_VSOCK + select VIRTIO_NSM + config X86_IOMMU bool depends on PC diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 5d4bd2b7106..423c4959fe8 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -22,7 +22,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qnum.h" #include "acpi-build.h" #include "acpi-common.h" #include "qemu/bitmap.h" @@ -40,18 +40,19 @@ #include "hw/acpi/acpi_aml_interface.h" #include "hw/input/i8042.h" #include "hw/acpi/memory_hotplug.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #include "hw/acpi/tpm.h" #include "hw/acpi/vmgenid.h" +#include "hw/acpi/vmclock.h" #include "hw/acpi/erst.h" #include "hw/acpi/piix4.h" -#include "sysemu/tpm_backend.h" +#include "system/tpm_backend.h" #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" #include "hw/mem/memory-device.h" #include "hw/mem/nvdimm.h" -#include "sysemu/numa.h" -#include "sysemu/reset.h" +#include "system/numa.h" +#include "system/reset.h" #include "hw/hyperv/vmbus-bridge.h" /* Supported chipsets: */ @@ -68,7 +69,6 @@ #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" #include "hw/acpi/cxl.h" -#include "hw/acpi/acpi_generic_initiator.h" #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" @@ -139,7 +139,7 @@ static void init_common_fadt_data(MachineState *ms, Object *o, /* * "ICH9-LPC" or "PIIX4_PM" has "smm-compat" property to keep the old * behavior for compatibility irrelevant to smm_enabled, which doesn't - * comforms to ACPI spec. + * conform to the ACPI spec. */ bool smm_enabled = object_property_get_bool(o, "smm-compat", NULL) ? true : x86_machine_is_smm_enabled(x86ms); @@ -338,506 +338,45 @@ build_facs(GArray *table_data) g_array_append_vals(table_data, reserved, 40); /* Reserved */ } -Aml *aml_pci_device_dsm(void) -{ - Aml *method; - - method = aml_method("_DSM", 4, AML_SERIALIZED); - { - Aml *params = aml_local(0); - Aml *pkg = aml_package(2); - aml_append(pkg, aml_int(0)); - aml_append(pkg, aml_int(0)); - aml_append(method, aml_store(pkg, params)); - aml_append(method, - aml_store(aml_name("BSEL"), aml_index(params, aml_int(0)))); - aml_append(method, - aml_store(aml_name("ASUN"), aml_index(params, aml_int(1)))); - aml_append(method, - aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1), - aml_arg(2), aml_arg(3), params)) - ); - } - return method; -} - -static void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar) -{ - Aml *UUID, *ifctx1; - uint8_t byte_list[1] = { 0 }; /* nothing supported yet */ - - aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar)); - /* - * PCI Firmware Specification 3.1 - * 4.6. _DSM Definitions for PCI - */ - UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); - ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID))); - { - /* call is for unsupported UUID, bail out */ - aml_append(ifctx1, aml_return(retvar)); - } - aml_append(ctx, ifctx1); - - ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2))); - { - /* call is for unsupported REV, bail out */ - aml_append(ifctx1, aml_return(retvar)); - } - aml_append(ctx, ifctx1); -} - -static Aml *aml_pci_edsm(void) -{ - Aml *method, *ifctx; - Aml *zero = aml_int(0); - Aml *func = aml_arg(2); - Aml *ret = aml_local(0); - Aml *aidx = aml_local(1); - Aml *params = aml_arg(4); - - method = aml_method("EDSM", 5, AML_SERIALIZED); - - /* get supported functions */ - ifctx = aml_if(aml_equal(func, zero)); - { - /* 1: have supported functions */ - /* 7: support for function 7 */ - const uint8_t caps = 1 | BIT(7); - build_append_pci_dsm_func0_common(ifctx, ret); - aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero))); - aml_append(ifctx, aml_return(ret)); - } - aml_append(method, ifctx); - - /* handle specific functions requests */ - /* - * PCI Firmware Specification 3.1 - * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under - * Operating Systems - */ - ifctx = aml_if(aml_equal(func, aml_int(7))); - { - Aml *pkg = aml_package(2); - aml_append(pkg, zero); - /* optional, if not impl. should return null string */ - aml_append(pkg, aml_string("%s", "")); - aml_append(ifctx, aml_store(pkg, ret)); - - /* - * IASL is fine when initializing Package with computational data, - * however it makes guest unhappy /it fails to process such AML/. - * So use runtime assignment to set acpi-index after initializer - * to make OSPM happy. - */ - aml_append(ifctx, - aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx)); - aml_append(ifctx, aml_store(aidx, aml_index(ret, zero))); - aml_append(ifctx, aml_return(ret)); - } - aml_append(method, ifctx); - - return method; -} - -static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev) -{ - Aml *method; - - g_assert(pdev->acpi_index != 0); - method = aml_method("_DSM", 4, AML_SERIALIZED); - { - Aml *params = aml_local(0); - Aml *pkg = aml_package(1); - aml_append(pkg, aml_int(pdev->acpi_index)); - aml_append(method, aml_store(pkg, params)); - aml_append(method, - aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1), - aml_arg(2), aml_arg(3), params)) - ); - } - return method; -} - -static void build_append_pcihp_notify_entry(Aml *method, int slot) -{ - Aml *if_ctx; - int32_t devfn = PCI_DEVFN(slot, 0); - - if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL)); - aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1))); - aml_append(method, if_ctx); -} - -static bool is_devfn_ignored_generic(const int devfn, const PCIBus *bus) -{ - const PCIDevice *pdev = bus->devices[devfn]; - - if (PCI_FUNC(devfn)) { - if (IS_PCI_BRIDGE(pdev)) { - /* - * Ignore only hotplugged PCI bridges on !0 functions, but - * allow describing cold plugged bridges on all functions - */ - if (DEVICE(pdev)->hotplugged) { - return true; - } - } - } - return false; -} - -static bool is_devfn_ignored_hotplug(const int devfn, const PCIBus *bus) -{ - PCIDevice *pdev = bus->devices[devfn]; - if (pdev) { - return is_devfn_ignored_generic(devfn, bus) || - !DEVICE_GET_CLASS(pdev)->hotpluggable || - /* Cold plugged bridges aren't themselves hot-pluggable */ - (IS_PCI_BRIDGE(pdev) && !DEVICE(pdev)->hotplugged); - } else { /* non populated slots */ - /* - * hotplug is supported only for non-multifunction device - * so generate device description only for function 0 - */ - if (PCI_FUNC(devfn) || - (pci_bus_is_express(bus) && PCI_SLOT(devfn) > 0)) { - return true; - } - } - return false; -} - -void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus) -{ - int devfn; - Aml *dev, *notify_method = NULL, *method; - QObject *bsel = object_property_get_qobject(OBJECT(bus), - ACPI_PCIHP_PROP_BSEL, NULL); - uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); - qobject_unref(bsel); - - aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val))); - notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED); - - for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { - int slot = PCI_SLOT(devfn); - int adr = slot << 16 | PCI_FUNC(devfn); - - if (is_devfn_ignored_hotplug(devfn, bus)) { - continue; - } - - if (bus->devices[devfn]) { - dev = aml_scope("S%.02X", devfn); - } else { - dev = aml_device("S%.02X", devfn); - aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); - } - - /* - * Can't declare _SUN here for every device as it changes 'slot' - * enumeration order in linux kernel, so use another variable for it - */ - aml_append(dev, aml_name_decl("ASUN", aml_int(slot))); - aml_append(dev, aml_pci_device_dsm()); - - aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); - /* add _EJ0 to make slot hotpluggable */ - method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); - aml_append(method, - aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) - ); - aml_append(dev, method); - - build_append_pcihp_notify_entry(notify_method, slot); - - /* device descriptor has been composed, add it into parent context */ - aml_append(parent_scope, dev); - } - aml_append(parent_scope, notify_method); -} - -void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus) -{ - int devfn; - Aml *dev; - - for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { - /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ - int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn); - PCIDevice *pdev = bus->devices[devfn]; - - if (!pdev || is_devfn_ignored_generic(devfn, bus)) { - continue; - } - - /* start to compose PCI device descriptor */ - dev = aml_device("S%.02X", devfn); - aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); - - call_dev_aml_func(DEVICE(bus->devices[devfn]), dev); - /* add _DSM if device has acpi-index set */ - if (pdev->acpi_index && - !object_property_get_bool(OBJECT(pdev), "hotpluggable", - &error_abort)) { - aml_append(dev, aml_pci_static_endpoint_dsm(pdev)); - } - - /* device descriptor has been composed, add it into parent context */ - aml_append(parent_scope, dev); - } -} - -static bool build_append_notfication_callback(Aml *parent_scope, - const PCIBus *bus) -{ - Aml *method; - PCIBus *sec; - QObject *bsel; - int nr_notifiers = 0; - GQueue *pcnt_bus_list = g_queue_new(); - - QLIST_FOREACH(sec, &bus->child, sibling) { - Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn); - if (pci_bus_is_root(sec)) { - continue; - } - nr_notifiers = nr_notifiers + - build_append_notfication_callback(br_scope, sec); - /* - * add new child scope to parent - * and keep track of bus that have PCNT, - * bus list is used later to call children PCNTs from this level PCNT - */ - if (nr_notifiers) { - g_queue_push_tail(pcnt_bus_list, sec); - aml_append(parent_scope, br_scope); - } - } - - /* - * Append PCNT method to notify about events on local and child buses. - * ps: hostbridge might not have hotplug (bsel) enabled but might have - * child bridges that do have bsel. - */ - method = aml_method("PCNT", 0, AML_NOTSERIALIZED); - - /* If bus supports hotplug select it and notify about local events */ - bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); - if (bsel) { - uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); - - aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM"))); - aml_append(method, aml_call2("DVNT", aml_name("PCIU"), - aml_int(1))); /* Device Check */ - aml_append(method, aml_call2("DVNT", aml_name("PCID"), - aml_int(3))); /* Eject Request */ - nr_notifiers++; - } - - /* Notify about child bus events in any case */ - while ((sec = g_queue_pop_head(pcnt_bus_list))) { - aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn)); - } - - aml_append(parent_scope, method); - qobject_unref(bsel); - g_queue_free(pcnt_bus_list); - return !!nr_notifiers; -} - -static Aml *aml_pci_pdsm(void) -{ - Aml *method, *ifctx, *ifctx1; - Aml *ret = aml_local(0); - Aml *caps = aml_local(1); - Aml *acpi_index = aml_local(2); - Aml *zero = aml_int(0); - Aml *one = aml_int(1); - Aml *func = aml_arg(2); - Aml *params = aml_arg(4); - Aml *bnum = aml_derefof(aml_index(params, aml_int(0))); - Aml *sunum = aml_derefof(aml_index(params, aml_int(1))); - - method = aml_method("PDSM", 5, AML_SERIALIZED); - - /* get supported functions */ - ifctx = aml_if(aml_equal(func, zero)); - { - build_append_pci_dsm_func0_common(ifctx, ret); - - aml_append(ifctx, aml_store(zero, caps)); - aml_append(ifctx, - aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); - /* - * advertise function 7 if device has acpi-index - * acpi_index values: - * 0: not present (default value) - * FFFFFFFF: not supported (old QEMU without PIDX reg) - * other: device's acpi-index - */ - ifctx1 = aml_if(aml_lnot( - aml_or(aml_equal(acpi_index, zero), - aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) - )); - { - /* have supported functions */ - aml_append(ifctx1, aml_or(caps, one, caps)); - /* support for function 7 */ - aml_append(ifctx1, - aml_or(caps, aml_shiftleft(one, aml_int(7)), caps)); - } - aml_append(ifctx, ifctx1); - - aml_append(ifctx, aml_store(caps, aml_index(ret, zero))); - aml_append(ifctx, aml_return(ret)); - } - aml_append(method, ifctx); - - /* handle specific functions requests */ - /* - * PCI Firmware Specification 3.1 - * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under - * Operating Systems - */ - ifctx = aml_if(aml_equal(func, aml_int(7))); - { - Aml *pkg = aml_package(2); - - aml_append(pkg, zero); - /* - * optional, if not impl. should return null string - */ - aml_append(pkg, aml_string("%s", "")); - aml_append(ifctx, aml_store(pkg, ret)); - - aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index)); - /* - * update acpi-index to actual value - */ - aml_append(ifctx, aml_store(acpi_index, aml_index(ret, zero))); - aml_append(ifctx, aml_return(ret)); - } - - aml_append(method, ifctx); - return method; -} - -/** - * build_prt_entry: - * @link_name: link name for PCI route entry - * - * build AML package containing a PCI route entry for @link_name - */ -static Aml *build_prt_entry(const char *link_name) -{ - Aml *a_zero = aml_int(0); - Aml *pkg = aml_package(4); - aml_append(pkg, a_zero); - aml_append(pkg, a_zero); - aml_append(pkg, aml_name("%s", link_name)); - aml_append(pkg, a_zero); - return pkg; -} - /* - * initialize_route - Initialize the interrupt routing rule - * through a specific LINK: - * if (lnk_idx == idx) - * route using link 'link_name' - */ -static Aml *initialize_route(Aml *route, const char *link_name, - Aml *lnk_idx, int idx) -{ - Aml *if_ctx = aml_if(aml_equal(lnk_idx, aml_int(idx))); - Aml *pkg = build_prt_entry(link_name); - - aml_append(if_ctx, aml_store(pkg, route)); - - return if_ctx; -} - -/* - * build_prt - Define interrupt rounting rules + * build_prt - Define interrupt routing rules * * Returns an array of 128 routes, one for each device, * based on device location. * The main goal is to equally distribute the interrupts * over the 4 existing ACPI links (works only for i440fx). - * The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]". + * The hash function is: (slot + pin) & 3 -> "LNK[D|A|B|C]". * */ static Aml *build_prt(bool is_pci0_prt) { - Aml *method, *while_ctx, *pin, *res; + const int nroutes = 128; + Aml *rt_pkg, *method; + int pin; method = aml_method("_PRT", 0, AML_NOTSERIALIZED); - res = aml_local(0); - pin = aml_local(1); - aml_append(method, aml_store(aml_package(128), res)); - aml_append(method, aml_store(aml_int(0), pin)); - - /* while (pin < 128) */ - while_ctx = aml_while(aml_lless(pin, aml_int(128))); - { - Aml *slot = aml_local(2); - Aml *lnk_idx = aml_local(3); - Aml *route = aml_local(4); - - /* slot = pin >> 2 */ - aml_append(while_ctx, - aml_store(aml_shiftright(pin, aml_int(2), NULL), slot)); - /* lnk_idx = (slot + pin) & 3 */ - aml_append(while_ctx, - aml_store(aml_and(aml_add(pin, slot, NULL), aml_int(3), NULL), - lnk_idx)); - - /* route[2] = "LNK[D|A|B|C]", selection based on pin % 3 */ - aml_append(while_ctx, initialize_route(route, "LNKD", lnk_idx, 0)); - if (is_pci0_prt) { - Aml *if_device_1, *if_pin_4, *else_pin_4; - - /* device 1 is the power-management device, needs SCI */ - if_device_1 = aml_if(aml_equal(lnk_idx, aml_int(1))); - { - if_pin_4 = aml_if(aml_equal(pin, aml_int(4))); - { - aml_append(if_pin_4, - aml_store(build_prt_entry("LNKS"), route)); - } - aml_append(if_device_1, if_pin_4); - else_pin_4 = aml_else(); - { - aml_append(else_pin_4, - aml_store(build_prt_entry("LNKA"), route)); - } - aml_append(if_device_1, else_pin_4); - } - aml_append(while_ctx, if_device_1); + assert(nroutes < 256); + rt_pkg = aml_package(nroutes); + + for (pin = 0; pin < nroutes; pin++) { + Aml *pkg = aml_package(4); + int slot = pin >> 2; + + aml_append(pkg, aml_int((slot << 16) | 0xFFFF)); + aml_append(pkg, aml_int(pin & 3)); + /* device 1 is the power-management device, needs SCI */ + if (is_pci0_prt && pin == 4) { + aml_append(pkg, aml_name("%s", "LNKS")); } else { - aml_append(while_ctx, initialize_route(route, "LNKA", lnk_idx, 1)); + static const char link_name[][5] = {"LNKD", "LNKA", "LNKB", "LNKC"}; + int hash = (slot + pin) & 3; + aml_append(pkg, aml_name("%s", link_name[hash])); } - aml_append(while_ctx, initialize_route(route, "LNKB", lnk_idx, 2)); - aml_append(while_ctx, initialize_route(route, "LNKC", lnk_idx, 3)); - - /* route[0] = 0x[slot]FFFF */ - aml_append(while_ctx, - aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF), - NULL), - aml_index(route, aml_int(0)))); - /* route[1] = pin & 3 */ - aml_append(while_ctx, - aml_store(aml_and(pin, aml_int(3), NULL), - aml_index(route, aml_int(1)))); - /* res[pin] = route */ - aml_append(while_ctx, aml_store(route, aml_index(res, pin))); - /* pin++ */ - aml_append(while_ctx, aml_increment(pin)); + aml_append(pkg, aml_int(0)); + aml_append(rt_pkg, pkg); } - aml_append(method, while_ctx); - /* return res*/ - aml_append(method, aml_return(res)); + + aml_append(method, aml_return(rt_pkg)); return method; } @@ -1289,112 +828,6 @@ static Aml *build_q35_dram_controller(const AcpiMcfgInfo *mcfg) return dev; } -static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr) -{ - Aml *scope; - Aml *field; - Aml *method; - - scope = aml_scope("_SB.PCI0"); - - aml_append(scope, - aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(pcihp_addr), 0x08)); - field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); - aml_append(field, aml_named_field("PCIU", 32)); - aml_append(field, aml_named_field("PCID", 32)); - aml_append(scope, field); - - aml_append(scope, - aml_operation_region("SEJ", AML_SYSTEM_IO, - aml_int(pcihp_addr + ACPI_PCIHP_SEJ_BASE), 0x04)); - field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); - aml_append(field, aml_named_field("B0EJ", 32)); - aml_append(scope, field); - - aml_append(scope, - aml_operation_region("BNMR", AML_SYSTEM_IO, - aml_int(pcihp_addr + ACPI_PCIHP_BNMR_BASE), 0x08)); - field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); - aml_append(field, aml_named_field("BNUM", 32)); - aml_append(field, aml_named_field("PIDX", 32)); - aml_append(scope, field); - - aml_append(scope, aml_mutex("BLCK", 0)); - - method = aml_method("PCEJ", 2, AML_NOTSERIALIZED); - aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); - aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); - aml_append(method, - aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ"))); - aml_append(method, aml_release(aml_name("BLCK"))); - aml_append(method, aml_return(aml_int(0))); - aml_append(scope, method); - - method = aml_method("AIDX", 2, AML_NOTSERIALIZED); - aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); - aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); - aml_append(method, - aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("PIDX"))); - aml_append(method, aml_store(aml_name("PIDX"), aml_local(0))); - aml_append(method, aml_release(aml_name("BLCK"))); - aml_append(method, aml_return(aml_local(0))); - aml_append(scope, method); - - aml_append(scope, aml_pci_pdsm()); - - aml_append(table, scope); -} - -static Aml *build_q35_osc_method(bool enable_native_pcie_hotplug) -{ - Aml *if_ctx; - Aml *if_ctx2; - Aml *else_ctx; - Aml *method; - Aml *a_cwd1 = aml_name("CDW1"); - Aml *a_ctrl = aml_local(0); - - method = aml_method("_OSC", 4, AML_NOTSERIALIZED); - aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); - - if_ctx = aml_if(aml_equal( - aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"))); - aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); - aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); - - aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl)); - - /* - * Always allow native PME, AER (no dependencies) - * Allow SHPC (PCI bridges can have SHPC controller) - * Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled. - */ - aml_append(if_ctx, aml_and(a_ctrl, - aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl)); - - if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); - /* Unknown revision */ - aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1)); - aml_append(if_ctx, if_ctx2); - - if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); - /* Capabilities bits were masked */ - aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1)); - aml_append(if_ctx, if_ctx2); - - /* Update DWORD3 in the buffer */ - aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3"))); - aml_append(method, if_ctx); - - else_ctx = aml_else(); - /* Unrecognized UUID */ - aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1)); - aml_append(method, else_ctx); - - aml_append(method, aml_return(aml_arg(3))); - return method; -} - static void build_acpi0017(Aml *table) { Aml *dev, *scope, *method; @@ -1451,12 +884,12 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); - aml_append(dev, aml_pci_edsm()); + aml_append(dev, build_pci_bridge_edsm()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); if (pm->pcihp_bridge_en || pm->pcihp_root_en) { - build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); + build_acpi_pci_hotplug(dsdt, AML_SYSTEM_IO, pm->pcihp_io_base); } build_piix4_pci0_int(dsdt); } else if (q35) { @@ -1465,8 +898,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); - aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en)); - aml_append(dev, aml_pci_edsm()); + aml_append(dev, build_pci_host_bridge_osc_method(!pm->pcihp_bridge_en)); + aml_append(dev, build_pci_bridge_edsm()); aml_append(sb_scope, dev); if (mcfg_valid) { aml_append(sb_scope, build_q35_dram_controller(&mcfg)); @@ -1500,7 +933,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dsdt, sb_scope); if (pm->pcihp_bridge_en) { - build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); + build_acpi_pci_hotplug(dsdt, AML_SYSTEM_IO, pm->pcihp_io_base); } build_q35_pci0_int(dsdt); } @@ -1552,6 +985,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); + uint32_t uid; /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { @@ -1562,6 +996,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, root_bus_limit = bus_num - 1; } + uid = object_property_get_uint(OBJECT(bus), "acpi_uid", + &error_fatal); scope = aml_scope("\\_SB"); if (pci_bus_is_cxl(bus)) { @@ -1569,7 +1005,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } else { dev = aml_device("PC%.02X", bus_num); } - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (pci_bus_is_cxl(bus)) { struct Aml *aml_pkg = aml_package(2); @@ -1584,7 +1020,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); /* Expander bridges do not have ACPI PCI Hot-plug enabled */ - aml_append(dev, build_q35_osc_method(true)); + aml_append(dev, build_pci_host_bridge_osc_method(true)); } else { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); } @@ -1713,19 +1149,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, /* reserve PCIHP resources */ if (pm->pcihp_io_len && (pm->pcihp_bridge_en || pm->pcihp_root_en)) { - dev = aml_device("PHPR"); - aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); - aml_append(dev, - aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - crs = aml_resource_template(); - aml_append(crs, - aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, - pm->pcihp_io_len) - ); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); + build_append_pcihp_resources(scope, + pm->pcihp_io_base, pm->pcihp_io_len); } aml_append(dsdt, scope); @@ -1832,7 +1257,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, PCIBus *b = PCI_HOST_BRIDGE(pci_host)->bus; scope = aml_scope("\\_SB.PCI0"); - has_pcnt = build_append_notfication_callback(scope, b); + has_pcnt = build_append_notification_callback(scope, b); if (has_pcnt) { aml_append(dsdt, scope); } @@ -2047,7 +1472,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } - build_srat_generic_pci_initiator(table_data); + build_srat_generic_affinity_structures(table_data); /* * Entry is required for Windows to enable memory hotplug in OS @@ -2392,12 +1817,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, build_append_int_noprefix(table_data, ivhd_blob->len + 24, 2); /* DeviceID */ build_append_int_noprefix(table_data, - object_property_get_int(OBJECT(&s->pci), "addr", + object_property_get_int(OBJECT(s->pci), "addr", &error_abort), 2); /* Capability offset */ - build_append_int_noprefix(table_data, s->pci.capab_offset, 2); + build_append_int_noprefix(table_data, s->pci->capab_offset, 2); /* IOMMU base address */ - build_append_int_noprefix(table_data, s->mmio.addr, 8); + build_append_int_noprefix(table_data, s->mr_mmio.addr, 8); /* PCI Segment Group */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU info */ @@ -2427,12 +1852,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, build_append_int_noprefix(table_data, ivhd_blob->len + 40, 2); /* DeviceID */ build_append_int_noprefix(table_data, - object_property_get_int(OBJECT(&s->pci), "addr", + object_property_get_int(OBJECT(s->pci), "addr", &error_abort), 2); /* Capability offset */ - build_append_int_noprefix(table_data, s->pci.capab_offset, 2); + build_append_int_noprefix(table_data, s->pci->capab_offset, 2); /* IOMMU base address */ - build_append_int_noprefix(table_data, s->mmio.addr, 8); + build_append_int_noprefix(table_data, s->mr_mmio.addr, 8); /* PCI Segment Group */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU info */ @@ -2505,7 +1930,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) uint8_t *u; GArray *tables_blob = tables->table_data; AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL }; - Object *vmgenid_dev; + Object *vmgenid_dev, *vmclock_dev; char *oem_id; char *oem_table_id; @@ -2578,6 +2003,13 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) tables->vmgenid, tables->linker, x86ms->oem_id); } + vmclock_dev = find_vmclock_dev(); + if (vmclock_dev) { + acpi_add_table(table_offsets, tables_blob); + vmclock_build_acpi(VMCLOCK(vmclock_dev), tables_blob, tables->linker, + x86ms->oem_id); + } + if (misc.has_hpet) { acpi_add_table(table_offsets, tables_blob); build_hpet(tables_blob, tables->linker, x86ms->oem_id, diff --git a/hw/i386/acpi-build.h b/hw/i386/acpi-build.h index 0dce155c8cc..8ba3c33e483 100644 --- a/hw/i386/acpi-build.h +++ b/hw/i386/acpi-build.h @@ -5,10 +5,6 @@ extern const struct AcpiGenericAddress x86_nvdimm_acpi_dsmio; -/* PCI Hot-plug registers bases. See docs/spec/acpi_pci_hotplug.txt */ -#define ACPI_PCIHP_SEJ_BASE 0x8 -#define ACPI_PCIHP_BNMR_BASE 0x10 - void acpi_setup(void); Object *acpi_get_i386_pci_host(void); diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 0cc2919bb85..7bd08067a73 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c index 279da6b4aa2..bc6571778c3 100644 --- a/hw/i386/acpi-microvm.c +++ b/hw/i386/acpi-microvm.c @@ -24,7 +24,7 @@ #include "qemu/cutils.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/acpi/acpi.h" #include "hw/acpi/acpi_aml_interface.h" #include "hw/acpi/aml-build.h" diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 87643d28917..26be69bec8a 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -32,6 +32,7 @@ #include "trace.h" #include "hw/i386/apic-msidef.h" #include "hw/qdev-properties.h" +#include "kvm/kvm_i386.h" /* used AMD-Vi MMIO registers */ const char *amdvi_mmio_low[] = { @@ -60,8 +61,9 @@ struct AMDVIAddressSpace { uint8_t bus_num; /* bus number */ uint8_t devfn; /* device function */ AMDVIState *iommu_state; /* AMDVI - one per machine */ - MemoryRegion root; /* AMDVI Root memory map region */ + MemoryRegion root; /* AMDVI Root memory map region */ IOMMUMemoryRegion iommu; /* Device's address translation region */ + MemoryRegion iommu_nodma; /* Alias of shared nodma memory region */ MemoryRegion iommu_ir; /* Device's interrupt remapping region */ AddressSpace as; /* device's corresponding address space */ }; @@ -121,8 +123,13 @@ static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val) uint16_t romask = lduw_le_p(&s->romask[addr]); uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]); uint16_t oldval = lduw_le_p(&s->mmior[addr]); + + uint16_t oldval_preserved = oldval & (romask | w1cmask); + uint16_t newval_write = val & ~romask; + uint16_t newval_w1c_set = val & w1cmask; + stw_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) @@ -130,23 +137,33 @@ static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) uint32_t romask = ldl_le_p(&s->romask[addr]); uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); uint32_t oldval = ldl_le_p(&s->mmior[addr]); + + uint32_t oldval_preserved = oldval & (romask | w1cmask); + uint32_t newval_write = val & ~romask; + uint32_t newval_w1c_set = val & w1cmask; + stl_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) { uint64_t romask = ldq_le_p(&s->romask[addr]); uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); - uint32_t oldval = ldq_le_p(&s->mmior[addr]); + uint64_t oldval = ldq_le_p(&s->mmior[addr]); + + uint64_t oldval_preserved = oldval & (romask | w1cmask); + uint64_t newval_write = val & ~romask; + uint64_t newval_w1c_set = val & w1cmask; + stq_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } -/* OR a 64-bit register with a 64-bit value */ +/* AND a 64-bit register with a 64-bit value */ static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val) { - return amdvi_readq(s, addr) | val; + return amdvi_readq(s, addr) & val; } /* OR a 64-bit register with a 64-bit value storing result in the register */ @@ -165,29 +182,41 @@ static void amdvi_generate_msi_interrupt(AMDVIState *s) { MSIMessage msg = {}; MemTxAttrs attrs = { - .requester_id = pci_requester_id(&s->pci.dev) + .requester_id = pci_requester_id(&s->pci->dev) }; - if (msi_enabled(&s->pci.dev)) { - msg = msi_get_message(&s->pci.dev, 0); + if (msi_enabled(&s->pci->dev)) { + msg = msi_get_message(&s->pci->dev, 0); address_space_stl_le(&address_space_memory, msg.address, msg.data, attrs, NULL); } } +static uint32_t get_next_eventlog_entry(AMDVIState *s) +{ + uint32_t evtlog_size = s->evtlog_len * AMDVI_EVENT_LEN; + return (s->evtlog_tail + AMDVI_EVENT_LEN) % evtlog_size; +} + static void amdvi_log_event(AMDVIState *s, uint64_t *evt) { + uint32_t evtlog_tail_next; + /* event logging not enabled */ if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF)) { return; } + evtlog_tail_next = get_next_eventlog_entry(s); + /* event log buffer full */ - if (s->evtlog_tail >= s->evtlog_len) { - amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); - /* generate interrupt */ - amdvi_generate_msi_interrupt(s); + if (evtlog_tail_next == s->evtlog_head) { + /* generate overflow interrupt */ + if (s->evtlog_intr) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); + amdvi_generate_msi_interrupt(s); + } return; } @@ -196,9 +225,13 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt) trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); } - s->evtlog_tail += AMDVI_EVENT_LEN; - amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); - amdvi_generate_msi_interrupt(s); + s->evtlog_tail = evtlog_tail_next; + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail); + + if (s->evtlog_intr) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVENT_INT); + amdvi_generate_msi_interrupt(s); + } } static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, @@ -237,7 +270,7 @@ static void amdvi_page_fault(AMDVIState *s, uint16_t devid, info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF; amdvi_encode_event(evt, devid, addr, info); amdvi_log_event(s, evt); - pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS, PCI_STATUS_SIG_TARGET_ABORT); } /* @@ -254,7 +287,7 @@ static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, amdvi_encode_event(evt, devid, devtab, info); amdvi_log_event(s, evt); - pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS, PCI_STATUS_SIG_TARGET_ABORT); } /* log an event trying to access command buffer @@ -267,7 +300,7 @@ static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) amdvi_encode_event(evt, 0, addr, info); amdvi_log_event(s, evt); - pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS, PCI_STATUS_SIG_TARGET_ABORT); } /* log an illegal command event @@ -308,7 +341,7 @@ static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid, info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR; amdvi_encode_event(evt, devid, addr, info); amdvi_log_event(s, evt); - pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + pci_word_test_and_set_mask(s->pci->dev.config + PCI_STATUS, PCI_STATUS_SIG_TARGET_ABORT); } @@ -430,6 +463,12 @@ static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) trace_amdvi_ppr_exec(); } +static void amdvi_intremap_inval_notify_all(AMDVIState *s, bool global, + uint32_t index, uint32_t mask) +{ + x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); +} + static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) { if (extract64(cmd[0], 0, 60) || cmd[1]) { @@ -437,6 +476,9 @@ static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) s->cmdbuf + s->cmdbuf_head); } + /* Notify global invalidation */ + amdvi_intremap_inval_notify_all(s, true, 0, 0); + amdvi_iotlb_reset(s); trace_amdvi_all_inval(); } @@ -485,6 +527,9 @@ static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) return; } + /* Notify global invalidation */ + amdvi_intremap_inval_notify_all(s, true, 0, 0); + trace_amdvi_intr_inval(); } @@ -494,7 +539,7 @@ static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd) { - uint16_t devid = extract64(cmd[0], 0, 16); + uint16_t devid = cpu_to_le16(extract64(cmd[0], 0, 16)); if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || extract64(cmd[1], 6, 6)) { amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), @@ -507,7 +552,7 @@ static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd) &devid); } else { amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12, - cpu_to_le16(extract64(cmd[1], 0, 16))); + devid); } trace_amdvi_iotlb_inval(); } @@ -578,18 +623,31 @@ static void amdvi_cmdbuf_run(AMDVIState *s) } } -static void amdvi_mmio_trace(hwaddr addr, unsigned size) +static inline uint8_t amdvi_mmio_get_index(hwaddr addr) { uint8_t index = (addr & ~0x2000) / 8; if ((addr & 0x2000)) { /* high table */ index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index; - trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); } else { index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index; - trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); } + + return index; +} + +static void amdvi_mmio_trace_read(hwaddr addr, unsigned size) +{ + uint8_t index = amdvi_mmio_get_index(addr); + trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); +} + +static void amdvi_mmio_trace_write(hwaddr addr, unsigned size, uint64_t val) +{ + uint8_t index = amdvi_mmio_get_index(addr); + trace_amdvi_mmio_write(amdvi_mmio_low[index], addr, size, val, + addr & ~0x07); } static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) @@ -609,7 +667,7 @@ static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) } else if (size == 8) { val = amdvi_readq(s, addr); } - amdvi_mmio_trace(addr, size); + amdvi_mmio_trace_read(addr, size); return val; } @@ -619,7 +677,6 @@ static void amdvi_handle_control_write(AMDVIState *s) unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL); s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN); - s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN); s->evtlog_enabled = s->enabled && !!(control & AMDVI_MMIO_CONTROL_EVENTLOGEN); @@ -651,8 +708,8 @@ static inline void amdvi_handle_devtab_write(AMDVIState *s) uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE); s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK); - /* set device table length */ - s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 * + /* set device table length (i.e. number of entries table can hold) */ + s->devtab_len = (((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1) * (AMDVI_MMIO_DEVTAB_SIZE_UNIT / AMDVI_MMIO_DEVTAB_ENTRY_SIZE)); } @@ -690,9 +747,19 @@ static inline void amdvi_handle_excllim_write(AMDVIState *s) static inline void amdvi_handle_evtbase_write(AMDVIState *s) { uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE); + + if (amdvi_readq(s, AMDVI_MMIO_STATUS) & AMDVI_MMIO_STATUS_EVENT_INT) + /* Do not reset if eventlog interrupt bit is set*/ + return; + s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK; s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE) & AMDVI_MMIO_EVTLOG_SIZE_MASK); + + /* clear tail and head pointer to 0 when event base is updated */ + s->evtlog_tail = s->evtlog_head = 0; + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_HEAD, s->evtlog_head); + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail); } static inline void amdvi_handle_evttail_write(AMDVIState *s) @@ -756,7 +823,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, return; } - amdvi_mmio_trace(addr, size); + amdvi_mmio_trace_write(addr, size, val); switch (addr & ~0x07) { case AMDVI_MMIO_CONTROL: amdvi_mmio_reg_write(s, size, val, addr); @@ -821,6 +888,9 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, amdvi_mmio_reg_write(s, size, val, addr); amdvi_handle_pprtail_write(s); break; + case AMDVI_MMIO_STATUS: + amdvi_mmio_reg_write(s, size, val, addr); + break; } } @@ -834,9 +904,10 @@ static inline uint64_t amdvi_get_perms(uint64_t entry) static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, uint64_t *dte) { - if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED) - || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED) - || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) { + if ((dte[0] & AMDVI_DTE_QUAD0_RESERVED) || + (dte[1] & AMDVI_DTE_QUAD1_RESERVED) || + (dte[2] & AMDVI_DTE_QUAD2_RESERVED) || + (dte[3] & AMDVI_DTE_QUAD3_RESERVED)) { amdvi_log_illegaldevtab_error(s, devid, s->devtab + devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); @@ -1295,15 +1366,15 @@ static int amdvi_int_remap_msi(AMDVIState *iommu, ret = -AMDVI_IR_ERR; break; case AMDVI_IOAPIC_INT_TYPE_NMI: - pass = dte[3] & AMDVI_DEV_NMI_PASS_MASK; + pass = dte[2] & AMDVI_DEV_NMI_PASS_MASK; trace_amdvi_ir_delivery_mode("nmi"); break; case AMDVI_IOAPIC_INT_TYPE_INIT: - pass = dte[3] & AMDVI_DEV_INT_PASS_MASK; + pass = dte[2] & AMDVI_DEV_INT_PASS_MASK; trace_amdvi_ir_delivery_mode("init"); break; case AMDVI_IOAPIC_INT_TYPE_EINT: - pass = dte[3] & AMDVI_DEV_EINT_PASS_MASK; + pass = dte[2] & AMDVI_DEV_EINT_PASS_MASK; trace_amdvi_ir_delivery_mode("eint"); break; default: @@ -1436,13 +1507,13 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) * Memory region relationships looks like (Address range shows * only lower 32 bits to make it short in length...): * - * |-----------------+-------------------+----------| - * | Name | Address range | Priority | - * |-----------------+-------------------+----------+ - * | amdvi_root | 00000000-ffffffff | 0 | - * | amdvi_iommu | 00000000-ffffffff | 1 | - * | amdvi_iommu_ir | fee00000-feefffff | 64 | - * |-----------------+-------------------+----------| + * |--------------------+-------------------+----------| + * | Name | Address range | Priority | + * |--------------------+-------------------+----------+ + * | amdvi-root | 00000000-ffffffff | 0 | + * | amdvi-iommu_nodma | 00000000-ffffffff | 0 | + * | amdvi-iommu_ir | fee00000-feefffff | 1 | + * |--------------------+-------------------+----------| */ memory_region_init_iommu(&amdvi_dev_as->iommu, sizeof(amdvi_dev_as->iommu), @@ -1452,16 +1523,27 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) memory_region_init(&amdvi_dev_as->root, OBJECT(s), "amdvi_root", UINT64_MAX); address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name); - memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s), - &amdvi_ir_ops, s, "amd_iommu_ir", - AMDVI_INT_ADDR_SIZE); - memory_region_add_subregion_overlap(&amdvi_dev_as->root, - AMDVI_INT_ADDR_FIRST, - &amdvi_dev_as->iommu_ir, - 64); memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, MEMORY_REGION(&amdvi_dev_as->iommu), - 1); + 0); + + /* Build the DMA Disabled alias to shared memory */ + memory_region_init_alias(&amdvi_dev_as->iommu_nodma, OBJECT(s), + "amdvi-sys", &s->mr_sys, 0, + memory_region_size(&s->mr_sys)); + memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, + &amdvi_dev_as->iommu_nodma, + 0); + /* Build the Interrupt Remapping alias to shared memory */ + memory_region_init_alias(&amdvi_dev_as->iommu_ir, OBJECT(s), + "amdvi-ir", &s->mr_ir, 0, + memory_region_size(&s->mr_ir)); + memory_region_add_subregion_overlap(MEMORY_REGION(&amdvi_dev_as->iommu), + AMDVI_INT_ADDR_FIRST, + &amdvi_dev_as->iommu_ir, 1); + + memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, false); + memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), true); } return &iommu_as[devfn]->as; } @@ -1516,7 +1598,6 @@ static void amdvi_init(AMDVIState *s) s->excl_allow = false; s->mmio_enabled = false; s->enabled = false; - s->ats_enabled = false; s->cmdbuf_enabled = false; /* reset MMIO */ @@ -1560,9 +1641,9 @@ static void amdvi_pci_realize(PCIDevice *pdev, Error **errp) /* reset AMDVI specific capabilities, all r/o */ pci_set_long(pdev->config + s->capab_offset, AMDVI_CAPAB_FEATURES); pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, - AMDVI_BASE_ADDR & ~(0xffff0000)); + AMDVI_BASE_ADDR & MAKE_64BIT_MASK(14, 18)); pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, - (AMDVI_BASE_ADDR & ~(0xffff)) >> 16); + AMDVI_BASE_ADDR >> 32); pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_RANGE, 0xff000000); pci_set_long(pdev->config + s->capab_offset + AMDVI_CAPAB_MISC, 0); @@ -1574,41 +1655,138 @@ static void amdvi_sysbus_reset(DeviceState *dev) { AMDVIState *s = AMD_IOMMU_DEVICE(dev); - msi_reset(&s->pci.dev); + msi_reset(&s->pci->dev); amdvi_init(s); } +static const VMStateDescription vmstate_amdvi_sysbus_migratable = { + .name = "amd-iommu", + .version_id = 1, + .minimum_version_id = 1, + .priority = MIG_PRI_IOMMU, + .fields = (VMStateField[]) { + /* Updated in amdvi_handle_control_write() */ + VMSTATE_BOOL(enabled, AMDVIState), + VMSTATE_BOOL(ga_enabled, AMDVIState), + /* bool ats_enabled is obsolete */ + VMSTATE_UNUSED(1), /* was ats_enabled */ + VMSTATE_BOOL(cmdbuf_enabled, AMDVIState), + VMSTATE_BOOL(completion_wait_intr, AMDVIState), + VMSTATE_BOOL(evtlog_enabled, AMDVIState), + VMSTATE_BOOL(evtlog_intr, AMDVIState), + /* Updated in amdvi_handle_devtab_write() */ + VMSTATE_UINT64(devtab, AMDVIState), + VMSTATE_UINT64(devtab_len, AMDVIState), + /* Updated in amdvi_handle_cmdbase_write() */ + VMSTATE_UINT64(cmdbuf, AMDVIState), + VMSTATE_UINT64(cmdbuf_len, AMDVIState), + /* Updated in amdvi_handle_cmdhead_write() */ + VMSTATE_UINT32(cmdbuf_head, AMDVIState), + /* Updated in amdvi_handle_cmdtail_write() */ + VMSTATE_UINT32(cmdbuf_tail, AMDVIState), + /* Updated in amdvi_handle_evtbase_write() */ + VMSTATE_UINT64(evtlog, AMDVIState), + VMSTATE_UINT32(evtlog_len, AMDVIState), + /* Updated in amdvi_handle_evthead_write() */ + VMSTATE_UINT32(evtlog_head, AMDVIState), + /* Updated in amdvi_handle_evttail_write() */ + VMSTATE_UINT32(evtlog_tail, AMDVIState), + /* Updated in amdvi_handle_pprbase_write() */ + VMSTATE_UINT64(ppr_log, AMDVIState), + VMSTATE_UINT32(pprlog_len, AMDVIState), + /* Updated in amdvi_handle_pprhead_write() */ + VMSTATE_UINT32(pprlog_head, AMDVIState), + /* Updated in amdvi_handle_tailhead_write() */ + VMSTATE_UINT32(pprlog_tail, AMDVIState), + /* MMIO registers */ + VMSTATE_UINT8_ARRAY(mmior, AMDVIState, AMDVI_MMIO_SIZE), + VMSTATE_UINT8_ARRAY(romask, AMDVIState, AMDVI_MMIO_SIZE), + VMSTATE_UINT8_ARRAY(w1cmask, AMDVIState, AMDVI_MMIO_SIZE), + VMSTATE_END_OF_LIST() + } +}; + static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) { + DeviceClass *dc = (DeviceClass *) object_get_class(OBJECT(dev)); AMDVIState *s = AMD_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); X86MachineState *x86ms = X86_MACHINE(ms); PCIBus *bus = pcms->pcibus; + if (s->pci_id) { + PCIDevice *pdev = NULL; + int ret = pci_qdev_find_device(s->pci_id, &pdev); + + if (ret) { + error_report("Cannot find PCI device '%s'", s->pci_id); + return; + } + + if (!object_dynamic_cast(OBJECT(pdev), TYPE_AMD_IOMMU_PCI)) { + error_report("Device '%s' must be an AMDVI-PCI device type", s->pci_id); + return; + } + + s->pci = AMD_IOMMU_PCI(pdev); + dc->vmsd = &vmstate_amdvi_sysbus_migratable; + } else { + s->pci = AMD_IOMMU_PCI(object_new(TYPE_AMD_IOMMU_PCI)); + /* This device should take care of IOMMU PCI properties */ + if (!qdev_realize(DEVICE(s->pci), &bus->qbus, errp)) { + return; + } + } + s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, amdvi_uint64_equal, g_free, g_free); - /* This device should take care of IOMMU PCI properties */ - if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { - return; - } + /* set up MMIO */ + memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s, + "amdvi-mmio", AMDVI_MMIO_SIZE); + memory_region_add_subregion(get_system_memory(), AMDVI_BASE_ADDR, + &s->mr_mmio); + + /* Create the share memory regions by all devices */ + memory_region_init(&s->mr_sys, OBJECT(s), "amdvi-sys", UINT64_MAX); + + /* set up the DMA disabled memory region */ + memory_region_init_alias(&s->mr_nodma, OBJECT(s), + "amdvi-nodma", get_system_memory(), 0, + memory_region_size(get_system_memory())); + memory_region_add_subregion_overlap(&s->mr_sys, 0, + &s->mr_nodma, 0); + + /* set up the Interrupt Remapping memory region */ + memory_region_init_io(&s->mr_ir, OBJECT(s), &amdvi_ir_ops, + s, "amdvi-ir", AMDVI_INT_ADDR_SIZE); + memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST, + &s->mr_ir, 1); /* Pseudo address space under root PCI bus. */ x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); - /* set up MMIO */ - memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", - AMDVI_MMIO_SIZE); - memory_region_add_subregion(get_system_memory(), AMDVI_BASE_ADDR, - &s->mmio); + if (kvm_enabled() && x86ms->apic_id_limit > 255 && !s->xtsup) { + error_report("AMD IOMMU with x2APIC configuration requires xtsup=on"); + exit(EXIT_FAILURE); + } + + if (s->xtsup) { + if (kvm_irqchip_is_split() && !kvm_enable_x2apic()) { + error_report("AMD IOMMU xtsup=on requires x2APIC support on " + "the KVM side"); + exit(EXIT_FAILURE); + } + } + pci_setup_iommu(bus, &amdvi_iommu_ops, s); amdvi_init(s); } -static Property amdvi_properties[] = { +static const Property amdvi_properties[] = { DEFINE_PROP_BOOL("xtsup", AMDVIState, xtsup, false), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_STRING("pci-id", AMDVIState, pci_id), }; static const VMStateDescription vmstate_amdvi_sysbus = { @@ -1616,25 +1794,16 @@ static const VMStateDescription vmstate_amdvi_sysbus = { .unmigratable = 1 }; -static void amdvi_sysbus_instance_init(Object *klass) -{ - AMDVIState *s = AMD_IOMMU_DEVICE(klass); - - object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); -} - -static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) +static void amdvi_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass); - dc->reset = amdvi_sysbus_reset; + device_class_set_legacy_reset(dc, amdvi_sysbus_reset); dc->vmsd = &vmstate_amdvi_sysbus; dc->hotpluggable = false; dc_class->realize = amdvi_sysbus_realize; dc_class->int_remap = amdvi_int_remap; - /* Supported by the pc-q35-* machine types */ - dc->user_creatable = true; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; device_class_set_props(dc, amdvi_properties); @@ -1644,16 +1813,16 @@ static const TypeInfo amdvi_sysbus = { .name = TYPE_AMD_IOMMU_DEVICE, .parent = TYPE_X86_IOMMU_DEVICE, .instance_size = sizeof(AMDVIState), - .instance_init = amdvi_sysbus_instance_init, .class_init = amdvi_sysbus_class_init }; -static void amdvi_pci_class_init(ObjectClass *klass, void *data) +static void amdvi_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->vendor_id = PCI_VENDOR_ID_AMD; + k->device_id = 0x1419; k->class_id = 0x0806; k->realize = amdvi_pci_realize; @@ -1666,13 +1835,14 @@ static const TypeInfo amdvi_pci = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(AMDVIPCIState), .class_init = amdvi_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data) +static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 73619fe9eaa..2476296c490 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -25,6 +25,8 @@ #include "hw/i386/x86-iommu.h" #include "qom/object.h" +#define GENMASK64(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l)) + /* Capability registers */ #define AMDVI_CAPAB_BAR_LOW 0x04 #define AMDVI_CAPAB_BAR_HIGH 0x08 @@ -66,34 +68,34 @@ #define AMDVI_MMIO_SIZE 0x4000 -#define AMDVI_MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1) -#define AMDVI_MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~ \ - AMDVI_MMIO_DEVTAB_SIZE_MASK) +#define AMDVI_MMIO_DEVTAB_SIZE_MASK GENMASK64(8, 0) +#define AMDVI_MMIO_DEVTAB_BASE_MASK GENMASK64(51, 12) + #define AMDVI_MMIO_DEVTAB_ENTRY_SIZE 32 #define AMDVI_MMIO_DEVTAB_SIZE_UNIT 4096 /* some of this are similar but just for readability */ #define AMDVI_MMIO_CMDBUF_SIZE_BYTE (AMDVI_MMIO_COMMAND_BASE + 7) #define AMDVI_MMIO_CMDBUF_SIZE_MASK 0x0f -#define AMDVI_MMIO_CMDBUF_BASE_MASK AMDVI_MMIO_DEVTAB_BASE_MASK -#define AMDVI_MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f) -#define AMDVI_MMIO_CMDBUF_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK +#define AMDVI_MMIO_CMDBUF_BASE_MASK GENMASK64(51, 12) +#define AMDVI_MMIO_CMDBUF_HEAD_MASK GENMASK64(18, 4) +#define AMDVI_MMIO_CMDBUF_TAIL_MASK GENMASK64(18, 4) #define AMDVI_MMIO_EVTLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7) -#define AMDVI_MMIO_EVTLOG_SIZE_MASK AMDVI_MMIO_CMDBUF_SIZE_MASK -#define AMDVI_MMIO_EVTLOG_BASE_MASK AMDVI_MMIO_CMDBUF_BASE_MASK -#define AMDVI_MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f) -#define AMDVI_MMIO_EVTLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK +#define AMDVI_MMIO_EVTLOG_SIZE_MASK 0x0f +#define AMDVI_MMIO_EVTLOG_BASE_MASK GENMASK64(51, 12) +#define AMDVI_MMIO_EVTLOG_HEAD_MASK GENMASK64(18, 4) +#define AMDVI_MMIO_EVTLOG_TAIL_MASK GENMASK64(18, 4) -#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7) -#define AMDVI_MMIO_PPRLOG_HEAD_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK -#define AMDVI_MMIO_PPRLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK -#define AMDVI_MMIO_PPRLOG_BASE_MASK AMDVI_MMIO_EVTLOG_BASE_MASK -#define AMDVI_MMIO_PPRLOG_SIZE_MASK AMDVI_MMIO_EVTLOG_SIZE_MASK +#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_PPR_BASE + 7) +#define AMDVI_MMIO_PPRLOG_SIZE_MASK 0x0f +#define AMDVI_MMIO_PPRLOG_BASE_MASK GENMASK64(51, 12) +#define AMDVI_MMIO_PPRLOG_HEAD_MASK GENMASK64(18, 4) +#define AMDVI_MMIO_PPRLOG_TAIL_MASK GENMASK64(18, 4) #define AMDVI_MMIO_EXCL_ENABLED_MASK (1ULL << 0) #define AMDVI_MMIO_EXCL_ALLOW_MASK (1ULL << 1) -#define AMDVI_MMIO_EXCL_LIMIT_MASK AMDVI_MMIO_DEVTAB_BASE_MASK +#define AMDVI_MMIO_EXCL_LIMIT_MASK GENMASK64(51, 12) #define AMDVI_MMIO_EXCL_LIMIT_LOW 0xfff /* mmio control register flags */ @@ -109,6 +111,7 @@ #define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4) #define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3) #define AMDVI_MMIO_STATUS_COMP_INT (1 << 2) +#define AMDVI_MMIO_STATUS_EVENT_INT (1 << 1) #define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0) #define AMDVI_CMDBUF_ID_BYTE 0x07 @@ -130,14 +133,14 @@ #define AMDVI_DEV_TRANSLATION_VALID (1ULL << 1) #define AMDVI_DEV_MODE_MASK 0x7 #define AMDVI_DEV_MODE_RSHIFT 9 -#define AMDVI_DEV_PT_ROOT_MASK 0xffffffffff000 +#define AMDVI_DEV_PT_ROOT_MASK GENMASK64(51, 12) #define AMDVI_DEV_PT_ROOT_RSHIFT 12 #define AMDVI_DEV_PERM_SHIFT 61 #define AMDVI_DEV_PERM_READ (1ULL << 61) #define AMDVI_DEV_PERM_WRITE (1ULL << 62) /* Device table entry bits 64:127 */ -#define AMDVI_DEV_DOMID_ID_MASK ((1ULL << 16) - 1) +#define AMDVI_DEV_DOMID_ID_MASK GENMASK64(15, 0) /* Event codes and flags, as stored in the info field */ #define AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY (0x1U << 12) @@ -162,9 +165,10 @@ #define AMDVI_FEATURE_PC (1ULL << 9) /* Perf counters */ /* reserved DTE bits */ -#define AMDVI_DTE_LOWER_QUAD_RESERVED 0x80300000000000fc -#define AMDVI_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100 -#define AMDVI_DTE_UPPER_QUAD_RESERVED 0x08f0000000000000 +#define AMDVI_DTE_QUAD0_RESERVED (GENMASK64(6, 2) | GENMASK64(63, 63)) +#define AMDVI_DTE_QUAD1_RESERVED 0 +#define AMDVI_DTE_QUAD2_RESERVED GENMASK64(53, 52) +#define AMDVI_DTE_QUAD3_RESERVED (GENMASK64(14, 0) | GENMASK64(53, 48)) /* AMDVI paging mode */ #define AMDVI_GATS_MODE (2ULL << 12) @@ -187,23 +191,19 @@ AMDVI_CAPAB_FLAG_HTTUNNEL | AMDVI_CAPAB_EFR_SUP) /* AMDVI default address */ -#define AMDVI_BASE_ADDR 0xfed80000 +#define AMDVI_BASE_ADDR 0xfed80000ULL /* page management constants */ #define AMDVI_PAGE_SHIFT 12 #define AMDVI_PAGE_SIZE (1ULL << AMDVI_PAGE_SHIFT) #define AMDVI_PAGE_SHIFT_4K 12 -#define AMDVI_PAGE_MASK_4K (~((1ULL << AMDVI_PAGE_SHIFT_4K) - 1)) - -#define AMDVI_MAX_VA_ADDR (48UL << 5) -#define AMDVI_MAX_PH_ADDR (40UL << 8) -#define AMDVI_MAX_GVA_ADDR (48UL << 15) +#define AMDVI_PAGE_MASK_4K GENMASK64(63, 12) -/* Completion Wait data size */ -#define AMDVI_COMPLETION_DATA_SIZE 8 +#define AMDVI_MAX_GVA_ADDR (2UL << 5) +#define AMDVI_MAX_PH_ADDR (40UL << 8) +#define AMDVI_MAX_VA_ADDR (48UL << 15) -#define AMDVI_COMMAND_SIZE 16 /* Completion Wait data size */ #define AMDVI_COMPLETION_DATA_SIZE 8 @@ -228,7 +228,7 @@ #define AMDVI_IR_INTCTL_PASS 1 #define AMDVI_IR_INTCTL_REMAP 2 -#define AMDVI_IR_PHYS_ADDR_MASK (((1ULL << 45) - 1) << 6) +#define AMDVI_IR_PHYS_ADDR_MASK GENMASK64(51, 6) /* MSI data 10:0 bits (section 2.2.5.1 Fig 14) */ #define AMDVI_IRTE_OFFSET 0x7ff @@ -315,20 +315,20 @@ struct AMDVIPCIState { struct AMDVIState { X86IOMMUState iommu; /* IOMMU bus device */ - AMDVIPCIState pci; /* IOMMU PCI device */ + AMDVIPCIState *pci; /* IOMMU PCI device */ + char *pci_id; /* ID of AMDVI-PCI device, if user created */ uint32_t version; uint64_t mmio_addr; bool enabled; /* IOMMU enabled */ - bool ats_enabled; /* address translation enabled */ bool cmdbuf_enabled; /* command buffer enabled */ bool evtlog_enabled; /* event log enabled */ bool excl_enabled; hwaddr devtab; /* base address device table */ - size_t devtab_len; /* device table length */ + uint64_t devtab_len; /* device table length */ hwaddr cmdbuf; /* command buffer base address */ uint64_t cmdbuf_len; /* command buffer length */ @@ -353,7 +353,10 @@ struct AMDVIState { uint32_t pprlog_head; /* ppr log head */ uint32_t pprlog_tail; /* ppr log tail */ - MemoryRegion mmio; /* MMIO region */ + MemoryRegion mr_mmio; /* MMIO region */ + MemoryRegion mr_sys; + MemoryRegion mr_nodma; + MemoryRegion mr_ir; uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */ uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */ uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */ diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c index 0e4494627c2..5c0bcd5f8a9 100644 --- a/hw/i386/fw_cfg.c +++ b/hw/i386/fw_cfg.c @@ -13,7 +13,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" #include "hw/firmware/smbios.h" @@ -26,7 +26,9 @@ #include CONFIG_DEVICES #include "target/i386/cpu.h" -struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX}; +#if !defined(CONFIG_HPET) +struct hpet_fw_config hpet_fw_cfg = {.count = UINT8_MAX}; +#endif const char *fw_cfg_arch_key_name(uint16_t key) { @@ -143,13 +145,13 @@ FWCfgState *fw_cfg_arch_create(MachineState *ms, */ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, apic_id_limit); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, ms->ram_size); -#ifdef CONFIG_ACPI - fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, - acpi_tables, acpi_tables_len); -#endif + if (acpi_builtin()) { + fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, + acpi_tables, acpi_tables_len); + } fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1); - fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg)); + fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_fw_cfg, sizeof(hpet_fw_cfg)); /* allocate memory for the NUMA channel: one (64bit) word for the number * of nodes, one word for each VCPU->node and one word for each node to * hold the amount of memory. diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 16d2885fcc0..83c5e444131 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -32,9 +32,9 @@ #include "hw/i386/apic-msidef.h" #include "hw/i386/x86-iommu.h" #include "hw/pci-host/q35.h" -#include "sysemu/kvm.h" -#include "sysemu/dma.h" -#include "sysemu/sysemu.h" +#include "system/kvm.h" +#include "system/dma.h" +#include "system/system.h" #include "hw/i386/apic_internal.h" #include "kvm/kvm_i386.h" #include "migration/vmstate.h" @@ -48,7 +48,10 @@ /* pe operations */ #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) -#define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) +#define VTD_PE_GET_FL_LEVEL(pe) \ + (4 + (((pe)->val[2] >> 2) & VTD_SM_PASID_ENTRY_FLPM)) +#define VTD_PE_GET_SL_LEVEL(pe) \ + (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) /* * PCI bus number (or SID) is not reliable since the device is usaully @@ -67,6 +70,11 @@ struct vtd_hiod_key { uint8_t devfn; }; +struct vtd_as_raw_key { + uint16_t sid; + uint32_t pasid; +}; + struct vtd_iotlb_key { uint64_t gfn; uint32_t pasid; @@ -284,15 +292,15 @@ static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, } /* The shift of an addr for a certain level of paging structure */ -static inline uint32_t vtd_slpt_level_shift(uint32_t level) +static inline uint32_t vtd_pt_level_shift(uint32_t level) { assert(level != 0); - return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; + return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_LEVEL_BITS; } -static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) +static inline uint64_t vtd_pt_level_page_mask(uint32_t level) { - return ~((1ULL << vtd_slpt_level_shift(level)) - 1); + return ~((1ULL << vtd_pt_level_shift(level)) - 1); } static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, @@ -302,9 +310,43 @@ static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; - return (entry->domain_id == info->domain_id) && - (((entry->gfn & info->mask) == gfn) || - (entry->gfn == gfn_tlb)); + + if (entry->domain_id != info->domain_id) { + return false; + } + + /* + * According to spec, IOTLB entries caching first-stage (PGTT=001b) or + * nested (PGTT=011b) mapping associated with specified domain-id are + * invalidated. Nested isn't supported yet, so only need to check 001b. + */ + if (entry->pgtt == VTD_SM_PASID_ENTRY_FLT) { + return true; + } + + return (entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb; +} + +static gboolean vtd_hash_remove_by_page_piotlb(gpointer key, gpointer value, + gpointer user_data) +{ + VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; + VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; + uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; + uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; + + /* + * According to spec, PASID-based-IOTLB Invalidation in page granularity + * doesn't invalidate IOTLB entries caching second-stage (PGTT=010b) + * or pass-through (PGTT=100b) mappings. Nested isn't supported yet, + * so only need to check first-stage (PGTT=001b) mappings. + */ + if (entry->pgtt != VTD_SM_PASID_ENTRY_FLT) { + return false; + } + + return entry->domain_id == info->domain_id && entry->pasid == info->pasid && + ((entry->gfn & info->mask) == gfn || entry->gfn == gfn_tlb); } /* Reset all the gen of VTDAddressSpace to zero and set the gen of @@ -349,7 +391,7 @@ static void vtd_reset_caches(IntelIOMMUState *s) static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) { - return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; + return (addr & vtd_pt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; } /* Must be called with IOMMU lock held */ @@ -360,7 +402,7 @@ static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, VTDIOTLBEntry *entry; unsigned level; - for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { + for (level = VTD_PT_LEVEL; level < VTD_PML4_LEVEL; level++) { key.gfn = vtd_get_iotlb_gfn(addr, level); key.level = level; key.sid = source_id; @@ -377,15 +419,15 @@ static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, /* Must be with IOMMU lock held */ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, - uint16_t domain_id, hwaddr addr, uint64_t slpte, + uint16_t domain_id, hwaddr addr, uint64_t pte, uint8_t access_flags, uint32_t level, - uint32_t pasid) + uint32_t pasid, uint8_t pgtt) { VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); struct vtd_iotlb_key *key = g_malloc(sizeof(*key)); uint64_t gfn = vtd_get_iotlb_gfn(addr, level); - trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); + trace_vtd_iotlb_page_update(source_id, addr, pte, domain_id); if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { trace_vtd_iotlb_reset("iotlb exceeds size limit"); vtd_reset_iotlb_locked(s); @@ -393,10 +435,11 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, entry->gfn = gfn; entry->domain_id = domain_id; - entry->slpte = slpte; + entry->pte = pte; entry->access_flags = access_flags; - entry->mask = vtd_slpt_level_page_mask(level); + entry->mask = vtd_pt_level_page_mask(level); entry->pasid = pasid; + entry->pgtt = pgtt; key->gfn = gfn; key->sid = source_id; @@ -710,32 +753,32 @@ static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; } -static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw) +static inline uint64_t vtd_get_pte_addr(uint64_t pte, uint8_t aw) { - return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw); + return pte & VTD_PT_BASE_ADDR_MASK(aw); } /* Whether the pte indicates the address of the page frame */ -static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) +static inline bool vtd_is_last_pte(uint64_t pte, uint32_t level) { - return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); + return level == VTD_PT_LEVEL || (pte & VTD_PT_PAGE_SIZE_MASK); } -/* Get the content of a spte located in @base_addr[@index] */ -static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) +/* Get the content of a pte located in @base_addr[@index] */ +static uint64_t vtd_get_pte(dma_addr_t base_addr, uint32_t index) { - uint64_t slpte; + uint64_t pte; - assert(index < VTD_SL_PT_ENTRY_NR); + assert(index < VTD_PT_ENTRY_NR); if (dma_memory_read(&address_space_memory, - base_addr + index * sizeof(slpte), - &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) { - slpte = (uint64_t)-1; - return slpte; + base_addr + index * sizeof(pte), + &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { + pte = (uint64_t)-1; + return pte; } - slpte = le64_to_cpu(slpte); - return slpte; + pte = le64_to_cpu(pte); + return pte; } /* Given an iova and the level of paging structure, return the offset @@ -743,36 +786,39 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) */ static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) { - return (iova >> vtd_slpt_level_shift(level)) & - ((1ULL << VTD_SL_LEVEL_BITS) - 1); + return (iova >> vtd_pt_level_shift(level)) & + ((1ULL << VTD_LEVEL_BITS) - 1); } /* Check Capability Register to see if the @level of page-table is supported */ -static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) +static inline bool vtd_is_sl_level_supported(IntelIOMMUState *s, uint32_t level) { return VTD_CAP_SAGAW_MASK & s->cap & (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); } +static inline bool vtd_is_fl_level_supported(IntelIOMMUState *s, uint32_t level) +{ + return level == VTD_PML4_LEVEL; +} + /* Return true if check passed, otherwise false */ -static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, - VTDPASIDEntry *pe) +static inline bool vtd_pe_type_check(IntelIOMMUState *s, VTDPASIDEntry *pe) { switch (VTD_PE_GET_TYPE(pe)) { case VTD_SM_PASID_ENTRY_FLT: + return !!(s->ecap & VTD_ECAP_FLTS); case VTD_SM_PASID_ENTRY_SLT: + return !!(s->ecap & VTD_ECAP_SLTS); case VTD_SM_PASID_ENTRY_NESTED: - break; + /* Not support NESTED page table type yet */ + return false; case VTD_SM_PASID_ENTRY_PT: - if (!x86_iommu->pt_supported) { - return false; - } - break; + return !!(s->ecap & VTD_ECAP_PT); default: /* Unknown type */ return false; } - return true; } static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire) @@ -796,7 +842,7 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, addr = pasid_dir_base + index * entry_size; if (dma_memory_read(&address_space_memory, addr, pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) { - return -VTD_FR_PASID_TABLE_INV; + return -VTD_FR_PASID_DIR_ACCESS_ERR; } pdire->val = le64_to_cpu(pdire->val); @@ -814,28 +860,35 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, dma_addr_t addr, VTDPASIDEntry *pe) { + uint8_t pgtt; uint32_t index; dma_addr_t entry_size; - X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); index = VTD_PASID_TABLE_INDEX(pasid); entry_size = VTD_PASID_ENTRY_SIZE; addr = addr + index * entry_size; if (dma_memory_read(&address_space_memory, addr, pe, entry_size, MEMTXATTRS_UNSPECIFIED)) { - return -VTD_FR_PASID_TABLE_INV; + return -VTD_FR_PASID_TABLE_ACCESS_ERR; } for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) { pe->val[i] = le64_to_cpu(pe->val[i]); } /* Do translation type check */ - if (!vtd_pe_type_check(x86_iommu, pe)) { - return -VTD_FR_PASID_TABLE_INV; + if (!vtd_pe_type_check(s, pe)) { + return -VTD_FR_PASID_TABLE_ENTRY_INV; } - if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { - return -VTD_FR_PASID_TABLE_INV; + pgtt = VTD_PE_GET_TYPE(pe); + if (pgtt == VTD_SM_PASID_ENTRY_SLT && + !vtd_is_sl_level_supported(s, VTD_PE_GET_SL_LEVEL(pe))) { + return -VTD_FR_PASID_TABLE_ENTRY_INV; + } + + if (pgtt == VTD_SM_PASID_ENTRY_FLT && + !vtd_is_fl_level_supported(s, VTD_PE_GET_FL_LEVEL(pe))) { + return -VTD_FR_PASID_TABLE_ENTRY_INV; } return 0; @@ -876,7 +929,7 @@ static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s, } if (!vtd_pdire_present(&pdire)) { - return -VTD_FR_PASID_TABLE_INV; + return -VTD_FR_PASID_DIR_ENTRY_P; } ret = vtd_get_pe_from_pdire(s, pasid, &pdire, pe); @@ -885,7 +938,7 @@ static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s, } if (!vtd_pe_present(pe)) { - return -VTD_FR_PASID_TABLE_INV; + return -VTD_FR_PASID_ENTRY_P; } return 0; @@ -938,7 +991,7 @@ static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, } if (!vtd_pdire_present(&pdire)) { - return -VTD_FR_PASID_TABLE_INV; + return -VTD_FR_PASID_DIR_ENTRY_P; } /* @@ -973,7 +1026,11 @@ static uint32_t vtd_get_iova_level(IntelIOMMUState *s, if (s->root_scalable) { vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); - return VTD_PE_GET_LEVEL(&pe); + if (s->flts) { + return VTD_PE_GET_FL_LEVEL(&pe); + } else { + return VTD_PE_GET_SL_LEVEL(&pe); + } } return vtd_ce_get_level(ce); @@ -1041,9 +1098,9 @@ static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, } /* Return true if IOVA passes range check, otherwise false. */ -static inline bool vtd_iova_range_check(IntelIOMMUState *s, - uint64_t iova, VTDContextEntry *ce, - uint8_t aw, uint32_t pasid) +static inline bool vtd_iova_sl_range_check(IntelIOMMUState *s, + uint64_t iova, VTDContextEntry *ce, + uint8_t aw, uint32_t pasid) { /* * Check if @iova is above 2^X-1, where X is the minimum of MGAW @@ -1060,7 +1117,11 @@ static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, if (s->root_scalable) { vtd_ce_get_rid2pasid_entry(s, ce, &pe, pasid); - return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; + if (s->flts) { + return pe.val[2] & VTD_SM_PASID_ENTRY_FLPTPTR; + } else { + return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; + } } return vtd_ce_get_slpt_base(ce); @@ -1084,17 +1145,17 @@ static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) /* * We should have caught a guest-mis-programmed level earlier, - * via vtd_is_level_supported. + * via vtd_is_sl_level_supported. */ assert(level < VTD_SPTE_RSVD_LEN); /* - * Zero level doesn't exist. The smallest level is VTD_SL_PT_LEVEL=1 and - * checked by vtd_is_last_slpte(). + * Zero level doesn't exist. The smallest level is VTD_PT_LEVEL=1 and + * checked by vtd_is_last_pte(). */ assert(level); - if ((level == VTD_SL_PD_LEVEL || level == VTD_SL_PDP_LEVEL) && - (slpte & VTD_SL_PT_PAGE_SIZE_MASK)) { + if ((level == VTD_PD_LEVEL || level == VTD_PDP_LEVEL) && + (slpte & VTD_PT_PAGE_SIZE_MASK)) { /* large page */ rsvd_mask = vtd_spte_rsvd_large[level]; } else { @@ -1118,9 +1179,8 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, uint32_t offset; uint64_t slpte; uint64_t access_right_check; - uint64_t xlat, size; - if (!vtd_iova_range_check(s, iova, ce, aw_bits, pasid)) { + if (!vtd_iova_sl_range_check(s, iova, ce, aw_bits, pasid)) { error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 "," "pasid=0x%" PRIx32 ")", __func__, iova, pasid); return -VTD_FR_ADDR_BEYOND_MGAW; @@ -1131,7 +1191,7 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, while (true) { offset = vtd_iova_level_offset(iova, level); - slpte = vtd_get_slpte(addr, offset); + slpte = vtd_get_pte(addr, offset); if (slpte == (uint64_t)-1) { error_report_once("%s: detected read error on DMAR slpte " @@ -1162,37 +1222,16 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, return -VTD_FR_PAGING_ENTRY_RSVD; } - if (vtd_is_last_slpte(slpte, level)) { + if (vtd_is_last_pte(slpte, level)) { *slptep = slpte; *slpte_level = level; break; } - addr = vtd_get_slpte_addr(slpte, aw_bits); + addr = vtd_get_pte_addr(slpte, aw_bits); level--; } - xlat = vtd_get_slpte_addr(*slptep, aw_bits); - size = ~vtd_slpt_level_page_mask(level) + 1; - - /* - * From VT-d spec 3.14: Untranslated requests and translation - * requests that result in an address in the interrupt range will be - * blocked with condition code LGN.4 or SGN.8. - */ - if ((xlat > VTD_INTERRUPT_ADDR_LAST || - xlat + size - 1 < VTD_INTERRUPT_ADDR_FIRST)) { - return 0; - } else { - error_report_once("%s: xlat address is in interrupt range " - "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " - "slpte=0x%" PRIx64 ", write=%d, " - "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ", " - "pasid=0x%" PRIx32 ")", - __func__, iova, level, slpte, is_write, - xlat, size, pasid); - return s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR : - -VTD_FR_INTERRUPT_ADDR; - } + return 0; } typedef int (*vtd_page_walk_hook)(const IOMMUTLBEvent *event, void *private); @@ -1323,14 +1362,14 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, trace_vtd_page_walk_level(addr, level, start, end); - subpage_size = 1ULL << vtd_slpt_level_shift(level); - subpage_mask = vtd_slpt_level_page_mask(level); + subpage_size = 1ULL << vtd_pt_level_shift(level); + subpage_mask = vtd_pt_level_page_mask(level); while (iova < end) { iova_next = (iova & subpage_mask) + subpage_size; offset = vtd_iova_level_offset(iova, level); - slpte = vtd_get_slpte(addr, offset); + slpte = vtd_get_pte(addr, offset); if (slpte == (uint64_t)-1) { trace_vtd_page_walk_skip_read(iova, iova_next); @@ -1353,12 +1392,12 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, */ entry_valid = read_cur | write_cur; - if (!vtd_is_last_slpte(slpte, level) && entry_valid) { + if (!vtd_is_last_pte(slpte, level) && entry_valid) { /* * This is a valid PDE (or even bigger than PDE). We need * to walk one further level. */ - ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw), + ret = vtd_page_walk_level(vtd_get_pte_addr(slpte, info->aw), iova, MIN(iova_next, end), level - 1, read_cur, write_cur, info); } else { @@ -1375,7 +1414,7 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); event.entry.addr_mask = ~subpage_mask; /* NOTE: this is only meaningful if entry_valid == true */ - event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); + event.entry.translated_addr = vtd_get_pte_addr(slpte, info->aw); event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP; ret = vtd_page_walk_one(&event, info); @@ -1409,11 +1448,11 @@ static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid); uint32_t level = vtd_get_iova_level(s, ce, pasid); - if (!vtd_iova_range_check(s, start, ce, info->aw, pasid)) { + if (!vtd_iova_sl_range_check(s, start, ce, info->aw, pasid)) { return -VTD_FR_ADDR_BEYOND_MGAW; } - if (!vtd_iova_range_check(s, end, ce, info->aw, pasid)) { + if (!vtd_iova_sl_range_check(s, end, ce, info->aw, pasid)) { /* Fix end so that it reaches the maximum */ end = vtd_iova_limit(s, ce, info->aw, pasid); } @@ -1528,7 +1567,7 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, /* Check if the programming of context-entry is valid */ if (!s->root_scalable && - !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { + !vtd_is_sl_level_supported(s, vtd_ce_get_level(ce))) { error_report_once("%s: invalid context entry: hi=%"PRIx64 ", lo=%"PRIx64" (level %d not supported)", __func__, ce->hi, ce->lo, @@ -1689,8 +1728,6 @@ static bool vtd_as_pt_enabled(VTDAddressSpace *as) static bool vtd_switch_address_space(VTDAddressSpace *as) { bool use_iommu, pt; - /* Whether we need to take the BQL on our own */ - bool take_bql = !bql_locked(); assert(as); @@ -1707,9 +1744,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) * from vtd_pt_enable_fast_path(). However the memory APIs need * it. We'd better make sure we have had it already, or, take it. */ - if (take_bql) { - bql_lock(); - } + BQL_LOCK_GUARD(); /* Turn off first then on the other */ if (use_iommu) { @@ -1762,10 +1797,6 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) memory_region_set_enabled(&as->iommu_ir_fault, false); } - if (take_bql) { - bql_unlock(); - } - return use_iommu; } @@ -1795,8 +1826,20 @@ static const bool vtd_qualified_faults[] = { [VTD_FR_ROOT_ENTRY_RSVD] = false, [VTD_FR_PAGING_ENTRY_RSVD] = true, [VTD_FR_CONTEXT_ENTRY_TT] = true, - [VTD_FR_PASID_TABLE_INV] = false, + [VTD_FR_PASID_DIR_ACCESS_ERR] = false, + [VTD_FR_PASID_DIR_ENTRY_P] = true, + [VTD_FR_PASID_TABLE_ACCESS_ERR] = false, + [VTD_FR_PASID_ENTRY_P] = true, + [VTD_FR_PASID_TABLE_ENTRY_INV] = true, + [VTD_FR_FS_PAGING_ENTRY_INV] = true, + [VTD_FR_FS_PAGING_ENTRY_P] = true, + [VTD_FR_FS_PAGING_ENTRY_RSVD] = true, + [VTD_FR_PASID_ENTRY_FSPTPTR_INV] = true, + [VTD_FR_FS_NON_CANONICAL] = true, + [VTD_FR_FS_PAGING_ENTRY_US] = true, + [VTD_FR_SM_WRITE] = true, [VTD_FR_SM_INTERRUPT_ADDR] = true, + [VTD_FR_FS_BIT_UPDATE_FAILED] = true, [VTD_FR_MAX] = false, }; @@ -1814,29 +1857,32 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr) return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; } -static gboolean vtd_find_as_by_sid(gpointer key, gpointer value, - gpointer user_data) +static gboolean vtd_find_as_by_sid_and_pasid(gpointer key, gpointer value, + gpointer user_data) { struct vtd_as_key *as_key = (struct vtd_as_key *)key; - uint16_t target_sid = *(uint16_t *)user_data; + struct vtd_as_raw_key *target = (struct vtd_as_raw_key *)user_data; uint16_t sid = PCI_BUILD_BDF(pci_bus_num(as_key->bus), as_key->devfn); - return sid == target_sid; + + return (as_key->pasid == target->pasid) && (sid == target->sid); } -static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid) +static VTDAddressSpace *vtd_get_as_by_sid_and_pasid(IntelIOMMUState *s, + uint16_t sid, + uint32_t pasid) { - uint8_t bus_num = PCI_BUS_NUM(sid); - VTDAddressSpace *vtd_as = s->vtd_as_cache[bus_num]; - - if (vtd_as && - (sid == PCI_BUILD_BDF(pci_bus_num(vtd_as->bus), vtd_as->devfn))) { - return vtd_as; - } + struct vtd_as_raw_key key = { + .sid = sid, + .pasid = pasid + }; - vtd_as = g_hash_table_find(s->vtd_address_spaces, vtd_find_as_by_sid, &sid); - s->vtd_as_cache[bus_num] = vtd_as; + return g_hash_table_find(s->vtd_address_spaces, + vtd_find_as_by_sid_and_pasid, &key); +} - return vtd_as; +static VTDAddressSpace *vtd_get_as_by_sid(IntelIOMMUState *s, uint16_t sid) +{ + return vtd_get_as_by_sid_and_pasid(s, sid, PCI_NO_PASID); } static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) @@ -1858,6 +1904,156 @@ static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) trace_vtd_pt_enable_fast_path(source_id, success); } +/* + * Rsvd field masks for fpte: + * vtd_fpte_rsvd 4k pages + * vtd_fpte_rsvd_large large pages + * + * We support only 4-level page tables. + */ +#define VTD_FPTE_RSVD_LEN 5 +static uint64_t vtd_fpte_rsvd[VTD_FPTE_RSVD_LEN]; +static uint64_t vtd_fpte_rsvd_large[VTD_FPTE_RSVD_LEN]; + +static bool vtd_flpte_nonzero_rsvd(uint64_t flpte, uint32_t level) +{ + uint64_t rsvd_mask; + + /* + * We should have caught a guest-mis-programmed level earlier, + * via vtd_is_fl_level_supported. + */ + assert(level < VTD_FPTE_RSVD_LEN); + /* + * Zero level doesn't exist. The smallest level is VTD_PT_LEVEL=1 and + * checked by vtd_is_last_pte(). + */ + assert(level); + + if ((level == VTD_PD_LEVEL || level == VTD_PDP_LEVEL) && + (flpte & VTD_PT_PAGE_SIZE_MASK)) { + /* large page */ + rsvd_mask = vtd_fpte_rsvd_large[level]; + } else { + rsvd_mask = vtd_fpte_rsvd[level]; + } + + return flpte & rsvd_mask; +} + +static inline bool vtd_flpte_present(uint64_t flpte) +{ + return !!(flpte & VTD_FL_P); +} + +/* Return true if IOVA is canonical, otherwise false. */ +static bool vtd_iova_fl_check_canonical(IntelIOMMUState *s, uint64_t iova, + VTDContextEntry *ce, uint32_t pasid) +{ + uint64_t iova_limit = vtd_iova_limit(s, ce, s->aw_bits, pasid); + uint64_t upper_bits_mask = ~(iova_limit - 1); + uint64_t upper_bits = iova & upper_bits_mask; + bool msb = ((iova & (iova_limit >> 1)) != 0); + + if (msb) { + return upper_bits == upper_bits_mask; + } else { + return !upper_bits; + } +} + +static MemTxResult vtd_set_flag_in_pte(dma_addr_t base_addr, uint32_t index, + uint64_t pte, uint64_t flag) +{ + if (pte & flag) { + return MEMTX_OK; + } + pte |= flag; + pte = cpu_to_le64(pte); + return dma_memory_write(&address_space_memory, + base_addr + index * sizeof(pte), + &pte, sizeof(pte), + MEMTXATTRS_UNSPECIFIED); +} + +/* + * Given the @iova, get relevant @flptep. @flpte_level will be the last level + * of the translation, can be used for deciding the size of large page. + */ +static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t iova, bool is_write, + uint64_t *flptep, uint32_t *flpte_level, + bool *reads, bool *writes, uint8_t aw_bits, + uint32_t pasid) +{ + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid); + uint32_t offset; + uint64_t flpte, flag_ad = VTD_FL_A; + *flpte_level = vtd_get_iova_level(s, ce, pasid); + + if (!vtd_iova_fl_check_canonical(s, iova, ce, pasid)) { + error_report_once("%s: detected non canonical IOVA (iova=0x%" PRIx64 "," + "pasid=0x%" PRIx32 ")", __func__, iova, pasid); + return -VTD_FR_FS_NON_CANONICAL; + } + + while (true) { + offset = vtd_iova_level_offset(iova, *flpte_level); + flpte = vtd_get_pte(addr, offset); + + if (flpte == (uint64_t)-1) { + if (*flpte_level == vtd_get_iova_level(s, ce, pasid)) { + /* Invalid programming of pasid-entry */ + return -VTD_FR_PASID_ENTRY_FSPTPTR_INV; + } else { + return -VTD_FR_FS_PAGING_ENTRY_INV; + } + } + + if (!vtd_flpte_present(flpte)) { + *reads = false; + *writes = false; + return -VTD_FR_FS_PAGING_ENTRY_P; + } + + /* No emulated device supports supervisor privilege request yet */ + if (!(flpte & VTD_FL_US)) { + *reads = false; + *writes = false; + return -VTD_FR_FS_PAGING_ENTRY_US; + } + + *reads = true; + *writes = (*writes) && (flpte & VTD_FL_RW); + if (is_write && !(flpte & VTD_FL_RW)) { + return -VTD_FR_SM_WRITE; + } + if (vtd_flpte_nonzero_rsvd(flpte, *flpte_level)) { + error_report_once("%s: detected flpte reserved non-zero " + "iova=0x%" PRIx64 ", level=0x%" PRIx32 + "flpte=0x%" PRIx64 ", pasid=0x%" PRIX32 ")", + __func__, iova, *flpte_level, flpte, pasid); + return -VTD_FR_FS_PAGING_ENTRY_RSVD; + } + + if (vtd_is_last_pte(flpte, *flpte_level) && is_write) { + flag_ad |= VTD_FL_D; + } + + if (vtd_set_flag_in_pte(addr, offset, flpte, flag_ad) != MEMTX_OK) { + return -VTD_FR_FS_BIT_UPDATE_FAILED; + } + + if (vtd_is_last_pte(flpte, *flpte_level)) { + *flptep = flpte; + return 0; + } + + addr = vtd_get_pte_addr(flpte, aw_bits); + (*flpte_level)--; + } +} + static void vtd_report_fault(IntelIOMMUState *s, int err, bool is_fpd_set, uint16_t source_id, @@ -1894,16 +2090,18 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, VTDContextEntry ce; uint8_t bus_num = pci_bus_num(bus); VTDContextCacheEntry *cc_entry; - uint64_t slpte, page_mask; - uint32_t level, pasid = vtd_as->pasid; + uint64_t pte, page_mask; + uint32_t level = UINT32_MAX; + uint32_t pasid = vtd_as->pasid; uint16_t source_id = PCI_BUILD_BDF(bus_num, devfn); int ret_fr; bool is_fpd_set = false; bool reads = true; bool writes = true; - uint8_t access_flags; + uint8_t access_flags, pgtt; bool rid2pasid = (pasid == PCI_NO_PASID) && s->root_scalable; VTDIOTLBEntry *iotlb_entry; + uint64_t xlat, size; /* * We have standalone memory region for interrupt addresses, we @@ -1915,13 +2113,13 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, cc_entry = &vtd_as->context_cache_entry; - /* Try to fetch slpte form IOTLB, we don't need RID2PASID logic */ + /* Try to fetch pte from IOTLB, we don't need RID2PASID logic */ if (!rid2pasid) { iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr); if (iotlb_entry) { - trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, + trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, iotlb_entry->domain_id); - slpte = iotlb_entry->slpte; + pte = iotlb_entry->pte; access_flags = iotlb_entry->access_flags; page_mask = iotlb_entry->mask; goto out; @@ -1993,44 +2191,79 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, return true; } - /* Try to fetch slpte form IOTLB for RID2PASID slow path */ + /* Try to fetch pte from IOTLB for RID2PASID slow path */ if (rid2pasid) { iotlb_entry = vtd_lookup_iotlb(s, source_id, pasid, addr); if (iotlb_entry) { - trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, + trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->pte, iotlb_entry->domain_id); - slpte = iotlb_entry->slpte; + pte = iotlb_entry->pte; access_flags = iotlb_entry->access_flags; page_mask = iotlb_entry->mask; goto out; } } - ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, - &reads, &writes, s->aw_bits, pasid); + if (s->flts && s->root_scalable) { + ret_fr = vtd_iova_to_flpte(s, &ce, addr, is_write, &pte, &level, + &reads, &writes, s->aw_bits, pasid); + pgtt = VTD_SM_PASID_ENTRY_FLT; + } else { + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &pte, &level, + &reads, &writes, s->aw_bits, pasid); + pgtt = VTD_SM_PASID_ENTRY_SLT; + } + if (!ret_fr) { + xlat = vtd_get_pte_addr(pte, s->aw_bits); + size = ~vtd_pt_level_page_mask(level) + 1; + + /* + * Per VT-d spec 4.1 section 3.15: Untranslated requests and translation + * requests that result in an address in the interrupt range will be + * blocked with condition code LGN.4 or SGN.8. + */ + if ((xlat <= VTD_INTERRUPT_ADDR_LAST && + xlat + size - 1 >= VTD_INTERRUPT_ADDR_FIRST)) { + error_report_once("%s: xlat address is in interrupt range " + "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " + "pte=0x%" PRIx64 ", write=%d, " + "xlat=0x%" PRIx64 ", size=0x%" PRIx64 ", " + "pasid=0x%" PRIx32 ")", + __func__, addr, level, pte, is_write, + xlat, size, pasid); + ret_fr = s->scalable_mode ? -VTD_FR_SM_INTERRUPT_ADDR : + -VTD_FR_INTERRUPT_ADDR; + } + } + if (ret_fr) { vtd_report_fault(s, -ret_fr, is_fpd_set, source_id, addr, is_write, pasid != PCI_NO_PASID, pasid); goto error; } - page_mask = vtd_slpt_level_page_mask(level); + page_mask = vtd_pt_level_page_mask(level); access_flags = IOMMU_ACCESS_FLAG(reads, writes); vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce, pasid), - addr, slpte, access_flags, level, pasid); + addr, pte, access_flags, level, pasid, pgtt); out: vtd_iommu_unlock(s); entry->iova = addr & page_mask; - entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask; + entry->translated_addr = vtd_get_pte_addr(pte, s->aw_bits) & page_mask; entry->addr_mask = ~page_mask; - entry->perm = access_flags; + entry->perm = (is_write ? access_flags : (access_flags & (~IOMMU_WO))); return true; error: vtd_iommu_unlock(s); entry->iova = 0; entry->translated_addr = 0; - entry->addr_mask = 0; + /* + * Set the mask for ATS (the range must be present even when the + * translation fails : PCIe rev 5 10.2.3.5) + */ + entry->addr_mask = (level != UINT32_MAX) ? + (~vtd_pt_level_page_mask(level)) : (~VTD_PAGE_MASK_4K); entry->perm = IOMMU_NONE; return false; } @@ -2215,8 +2448,13 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) } } +/* + * There is no pasid field in iotlb invalidation descriptor, so PCI_NO_PASID + * is passed as parameter. Piotlb invalidation supports pasid, pasid in its + * descriptor is passed which should not be PCI_NO_PASID. + */ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, - uint16_t domain_id, hwaddr addr, + uint16_t domain_id, hwaddr addr, uint8_t am, uint32_t pasid) { VTDAddressSpace *vtd_as; @@ -2225,19 +2463,37 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, hwaddr size = (1 << am) * VTD_PAGE_SIZE; QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { - if (pasid != PCI_NO_PASID && pasid != vtd_as->pasid) { - continue; - } ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce); if (!ret && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { + uint32_t rid2pasid = PCI_NO_PASID; + + if (s->root_scalable) { + rid2pasid = VTD_CE_GET_RID2PASID(&ce); + } + + /* + * In legacy mode, vtd_as->pasid == pasid is always true. + * In scalable mode, for vtd address space backing a PCI + * device without pasid, needs to compare pasid with + * rid2pasid of this device. + */ + if (!(vtd_as->pasid == pasid || + (vtd_as->pasid == PCI_NO_PASID && pasid == rid2pasid))) { + continue; + } + if (vtd_as_has_map_notifier(vtd_as)) { /* - * As long as we have MAP notifications registered in - * any of our IOMMU notifiers, we need to sync the - * shadow page table. + * When stage-1 translation is off, as long as we have MAP + * notifications registered in any of our IOMMU notifiers, + * we need to sync the shadow page table. Otherwise VFIO + * device attaches to nested page table instead of shadow + * page table, so no need to sync. */ - vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); + if (!s->flts || !s->root_scalable) { + vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); + } } else { /* * For UNMAP-only notifiers, we don't need to walk the @@ -2252,6 +2508,7 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, .translated_addr = 0, .addr_mask = size - 1, .perm = IOMMU_NONE, + .pasid = vtd_as->pasid, }, }; memory_region_notify_iommu(&vtd_as->iommu, 0, event); @@ -2532,22 +2789,57 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s, return true; } +static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s, + VTDInvDesc *inv_desc, + uint64_t mask[4], bool dw, + const char *func_name, + const char *desc_type) +{ + if (s->iq_dw) { + if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] || + inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) { + error_report("%s: invalid %s desc val[3]: 0x%"PRIx64 + " val[2]: 0x%"PRIx64" val[1]=0x%"PRIx64 + " val[0]=0x%"PRIx64" (reserved nonzero)", + func_name, desc_type, inv_desc->val[3], + inv_desc->val[2], inv_desc->val[1], + inv_desc->val[0]); + return false; + } + } else { + if (dw) { + error_report("%s: 256-bit %s desc in 128-bit invalidation queue", + func_name, desc_type); + return false; + } + + if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) { + error_report("%s: invalid %s desc: hi=%"PRIx64", lo=%"PRIx64 + " (reserved nonzero)", func_name, desc_type, + inv_desc->hi, inv_desc->lo); + return false; + } + } + + return true; +} + static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { - if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || - (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { - error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + bool ret = true; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "wait")) { return false; } + if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { /* Status Write */ uint32_t status_data = (uint32_t)(inv_desc->lo >> VTD_INV_DESC_WAIT_DATA_SHIFT); - assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); - /* FIXME: need to be masked with HAW? */ dma_addr_t status_addr = inv_desc->hi; trace_vtd_inv_desc_wait_sw(status_addr, status_data); @@ -2556,31 +2848,36 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) &status_data, sizeof(status_data), MEMTXATTRS_UNSPECIFIED)) { trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); - return false; + ret = false; } - } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { + } + + if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { /* Interrupt flag */ vtd_generate_completion_event(s); - } else { + } + + if (!(inv_desc->lo & (VTD_INV_DESC_WAIT_IF | VTD_INV_DESC_WAIT_SW))) { error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 " (unknown type)", __func__, inv_desc->hi, inv_desc->lo); return false; } - return true; + return ret; } static bool vtd_process_context_cache_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { uint16_t sid, fmask; + uint64_t mask[4] = {VTD_INV_DESC_CC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { - error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "cc inv")) { return false; } + switch (inv_desc->lo & VTD_INV_DESC_CC_G) { case VTD_INV_DESC_CC_DOMAIN: trace_vtd_inv_desc_cc_domain( @@ -2610,12 +2907,11 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) uint16_t domain_id; uint8_t am; hwaddr addr; + uint64_t mask[4] = {VTD_INV_DESC_IOTLB_RSVD_LO, VTD_INV_DESC_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 - ", lo=0x%"PRIx64" (reserved bits unzero)", - __func__, inv_desc->hi, inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iotlb inv")) { return false; } @@ -2653,9 +2949,117 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) return true; } +static gboolean vtd_hash_remove_by_pasid(gpointer key, gpointer value, + gpointer user_data) +{ + VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; + VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; + + return ((entry->domain_id == info->domain_id) && + (entry->pasid == info->pasid)); +} + +static void vtd_piotlb_pasid_invalidate(IntelIOMMUState *s, + uint16_t domain_id, uint32_t pasid) +{ + VTDIOTLBPageInvInfo info; + VTDAddressSpace *vtd_as; + VTDContextEntry ce; + + info.domain_id = domain_id; + info.pasid = pasid; + + vtd_iommu_lock(s); + g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_pasid, + &info); + vtd_iommu_unlock(s); + + QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { + if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce) && + domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { + uint32_t rid2pasid = VTD_CE_GET_RID2PASID(&ce); + + if ((vtd_as->pasid != PCI_NO_PASID || pasid != rid2pasid) && + vtd_as->pasid != pasid) { + continue; + } + + if (!s->flts || !vtd_as_has_map_notifier(vtd_as)) { + vtd_address_space_sync(vtd_as); + } + } + } +} + +static void vtd_piotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, + uint32_t pasid, hwaddr addr, uint8_t am) +{ + VTDIOTLBPageInvInfo info; + + info.domain_id = domain_id; + info.pasid = pasid; + info.addr = addr; + info.mask = ~((1 << am) - 1); + + vtd_iommu_lock(s); + g_hash_table_foreach_remove(s->iotlb, + vtd_hash_remove_by_page_piotlb, &info); + vtd_iommu_unlock(s); + + vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am, pasid); +} + +static bool vtd_process_piotlb_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + uint16_t domain_id; + uint32_t pasid; + hwaddr addr; + uint8_t am; + uint64_t mask[4] = {VTD_INV_DESC_PIOTLB_RSVD_VAL0, + VTD_INV_DESC_PIOTLB_RSVD_VAL1, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, true, + __func__, "piotlb inv")) { + return false; + } + + domain_id = VTD_INV_DESC_PIOTLB_DID(inv_desc->val[0]); + pasid = VTD_INV_DESC_PIOTLB_PASID(inv_desc->val[0]); + switch (inv_desc->val[0] & VTD_INV_DESC_PIOTLB_G) { + case VTD_INV_DESC_PIOTLB_ALL_IN_PASID: + vtd_piotlb_pasid_invalidate(s, domain_id, pasid); + break; + + case VTD_INV_DESC_PIOTLB_PSI_IN_PASID: + am = VTD_INV_DESC_PIOTLB_AM(inv_desc->val[1]); + addr = (hwaddr) VTD_INV_DESC_PIOTLB_ADDR(inv_desc->val[1]); + vtd_piotlb_page_invalidate(s, domain_id, pasid, addr, am); + break; + + default: + error_report_once("%s: invalid piotlb inv desc: hi=0x%"PRIx64 + ", lo=0x%"PRIx64" (type mismatch: 0x%llx)", + __func__, inv_desc->val[1], inv_desc->val[0], + inv_desc->val[0] & VTD_INV_DESC_IOTLB_G); + return false; + } + return true; +} + static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { + uint64_t mask[4] = {VTD_INV_DESC_IEC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iec inv")) { + return false; + } + trace_vtd_inv_desc_iec(inv_desc->iec.granularity, inv_desc->iec.index, inv_desc->iec.index_mask); @@ -2695,9 +3099,53 @@ static void do_invalidate_device_tlb(VTDAddressSpace *vtd_dev_as, event.entry.iova = addr; event.entry.perm = IOMMU_NONE; event.entry.translated_addr = 0; + event.entry.pasid = vtd_dev_as->pasid; memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event); } +static bool vtd_process_device_piotlb_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + uint16_t sid; + VTDAddressSpace *vtd_dev_as; + bool size; + bool global; + hwaddr addr; + uint32_t pasid; + uint64_t mask[4] = {VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL0, + VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL1, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, true, + __func__, "device piotlb inv")) { + return false; + } + + global = VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(inv_desc->hi); + size = VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(inv_desc->hi); + addr = VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(inv_desc->hi); + sid = VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(inv_desc->lo); + if (global) { + QLIST_FOREACH(vtd_dev_as, &s->vtd_as_with_notifiers, next) { + if ((vtd_dev_as->pasid != PCI_NO_PASID) && + (PCI_BUILD_BDF(pci_bus_num(vtd_dev_as->bus), + vtd_dev_as->devfn) == sid)) { + do_invalidate_device_tlb(vtd_dev_as, size, addr); + } + } + } else { + pasid = VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(inv_desc->lo); + vtd_dev_as = vtd_get_as_by_sid_and_pasid(s, sid, pasid); + if (!vtd_dev_as) { + return true; + } + + do_invalidate_device_tlb(vtd_dev_as, size, addr); + } + + return true; +} + static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { @@ -2705,19 +3153,19 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, hwaddr addr; uint16_t sid; bool size; + uint64_t mask[4] = {VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO, + VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "dev-iotlb inv")) { + return false; + } addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); - if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 - ", lo=%"PRIx64" (reserved nonzero)", __func__, - inv_desc->hi, inv_desc->lo); - return false; - } - /* * Using sid is OK since the guest should have finished the * initialization of both the bus and device. @@ -2744,7 +3192,7 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) return false; } - desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; + desc_type = VTD_INV_DESC_TYPE(inv_desc.lo); /* FIXME: should update at first or at last? */ s->iq_last_desc_type = desc_type; @@ -2763,15 +3211,11 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; - /* - * TODO: the entity of below two cases will be implemented in future series. - * To make guest (which integrates scalable mode support patch set in - * iommu driver) work, just return true is enough so far. - */ - case VTD_INV_DESC_PC: - break; - case VTD_INV_DESC_PIOTLB: + trace_vtd_inv_desc("p-iotlb", inv_desc.val[1], inv_desc.val[0]); + if (!vtd_process_piotlb_desc(s, &inv_desc)) { + return false; + } break; case VTD_INV_DESC_WAIT: @@ -2788,6 +3232,13 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; + case VTD_INV_DESC_DEV_PIOTLB: + trace_vtd_inv_desc("device-piotlb", inv_desc.hi, inv_desc.lo); + if (!vtd_process_device_piotlb_desc(s, &inv_desc)) { + return false; + } + break; + case VTD_INV_DESC_DEVICE: trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo); if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { @@ -2795,6 +3246,16 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; + /* + * TODO: the entity of below two cases will be implemented in future series. + * To make guest (which integrates scalable mode support patch set in + * iommu driver) work, just return true is enough so far. + */ + case VTD_INV_DESC_PC: + if (s->scalable_mode) { + break; + } + /* fallthrough */ default: error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64 " (unknown type)", __func__, inv_desc.hi, @@ -2847,6 +3308,7 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s) if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { error_report_once("%s: RSV bit is set: val=0x%"PRIx64, __func__, val); + vtd_handle_inv_queue_error(s); return; } s->iq_tail = VTD_IQT_QT(s->iq_dw, val); @@ -3220,6 +3682,7 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, IOMMUTLBEntry iotlb = { /* We'll fill in the rest later. */ .target_as = &address_space_memory, + .pasid = vtd_as->pasid, }; bool success; @@ -3359,7 +3822,7 @@ static const MemoryRegionOps vtd_mem_ops = { }, }; -static Property vtd_properties[] = { +static const Property vtd_properties[] = { DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0), DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim, ON_OFF_AUTO_AUTO), @@ -3368,11 +3831,13 @@ static Property vtd_properties[] = { VTD_HOST_ADDRESS_WIDTH), DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), + DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, flts, FALSE), DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false), DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false), + DEFINE_PROP_BOOL("fs1gp", IntelIOMMUState, fs1gp, true), }; /* Read IRTE entry with specific index */ @@ -3751,9 +4216,30 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, VTDAddressSpace *vtd_dev_as; char name[128]; + vtd_iommu_lock(s); vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); + vtd_iommu_unlock(s); + if (!vtd_dev_as) { - struct vtd_as_key *new_key = g_malloc(sizeof(*new_key)); + struct vtd_as_key *new_key; + /* Slow path */ + + /* + * memory_region_add_subregion_overlap requires the bql, + * make sure we own it. + */ + BQL_LOCK_GUARD(); + vtd_iommu_lock(s); + + /* Check again as we released the lock for a moment */ + vtd_dev_as = g_hash_table_lookup(s->vtd_address_spaces, &key); + if (vtd_dev_as) { + vtd_iommu_unlock(s); + return vtd_dev_as; + } + + /* Still nothing, allocate a new address space */ + new_key = g_malloc(sizeof(*new_key)); new_key->bus = bus; new_key->devfn = devfn; @@ -3844,6 +4330,8 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, vtd_switch_address_space(vtd_dev_as); g_hash_table_insert(s->vtd_address_spaces, new_key, vtd_dev_as); + + vtd_iommu_unlock(s); } return vtd_dev_as; } @@ -3869,7 +4357,13 @@ static bool vtd_check_hiod(IntelIOMMUState *s, HostIOMMUDevice *hiod, return false; } - return true; + if (!s->flts) { + /* All checks requested by VTD stage-2 translation pass */ + return true; + } + + error_setg(errp, "host device is uncompatible with stage-1 translation"); + return false; } static bool vtd_dev_set_iommu_device(PCIBus *bus, void *opaque, int devfn, @@ -4047,8 +4541,6 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn)); } - - return; } static void vtd_cap_init(IntelIOMMUState *s) @@ -4092,7 +4584,12 @@ static void vtd_cap_init(IntelIOMMUState *s) } /* TODO: read cap/ecap from host to decide which cap to be exposed. */ - if (s->scalable_mode) { + if (s->flts) { + s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_FLTS; + if (s->fs1gp) { + s->cap |= VTD_CAP_FS1GP; + } + } else if (s->scalable_mode) { s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; } @@ -4101,7 +4598,7 @@ static void vtd_cap_init(IntelIOMMUState *s) } if (s->pasid) { - s->ecap |= VTD_ECAP_PASID; + s->ecap |= VTD_ECAP_PASID | VTD_ECAP_PSS; } } @@ -4138,15 +4635,27 @@ static void vtd_init(IntelIOMMUState *s) */ vtd_spte_rsvd[0] = ~0ULL; vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); + + /* + * Rsvd field masks for fpte + */ + vtd_fpte_rsvd[0] = ~0ULL; + vtd_fpte_rsvd[1] = VTD_FPTE_PAGE_L1_RSVD_MASK(s->aw_bits); + vtd_fpte_rsvd[2] = VTD_FPTE_PAGE_L2_RSVD_MASK(s->aw_bits); + vtd_fpte_rsvd[3] = VTD_FPTE_PAGE_L3_RSVD_MASK(s->aw_bits); + vtd_fpte_rsvd[4] = VTD_FPTE_PAGE_L4_RSVD_MASK(s->aw_bits); + + vtd_fpte_rsvd_large[2] = VTD_FPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); + vtd_fpte_rsvd_large[3] = VTD_FPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); if (s->scalable_mode || s->snoop_control) { vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP; @@ -4212,10 +4721,11 @@ static void vtd_init(IntelIOMMUState *s) /* Should not reset address_spaces when reset because devices will still use * the address space they got at first (won't ask the bus again). */ -static void vtd_reset(DeviceState *dev) +static void vtd_reset_exit(Object *obj, ResetType type) { - IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); + IntelIOMMUState *s = INTEL_IOMMU_DEVICE(obj); + trace_vtd_reset_exit(); vtd_init(s); vtd_address_space_refresh_all(s); } @@ -4231,10 +4741,118 @@ static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) return &vtd_as->as; } +static IOMMUTLBEntry vtd_iommu_ats_do_translate(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flags) +{ + IOMMUTLBEntry entry; + VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); + + if (vtd_is_interrupt_addr(addr)) { + vtd_report_ir_illegal_access(vtd_as, addr, flags & IOMMU_WO); + entry.target_as = &address_space_memory; + entry.iova = 0; + entry.translated_addr = 0; + entry.addr_mask = ~VTD_PAGE_MASK_4K; + entry.perm = IOMMU_NONE; + entry.pasid = PCI_NO_PASID; + } else { + entry = vtd_iommu_translate(iommu, addr, flags, 0); + } + + return entry; +} + +static ssize_t vtd_ats_request_translation(PCIBus *bus, void *opaque, + int devfn, uint32_t pasid, + bool priv_req, bool exec_req, + hwaddr addr, size_t length, + bool no_write, IOMMUTLBEntry *result, + size_t result_length, + uint32_t *err_count) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + IOMMUAccessFlags flags = IOMMU_ACCESS_FLAG_FULL(true, !no_write, exec_req, + priv_req, false, false); + ssize_t res_index = 0; + hwaddr target_address = addr + length; + IOMMUTLBEntry entry; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + *err_count = 0; + + while ((addr < target_address) && (res_index < result_length)) { + entry = vtd_iommu_ats_do_translate(&vtd_as->iommu, addr, flags); + entry.perm &= ~IOMMU_GLOBAL; /* Spec 4.1.2: Global Mapping never set */ + + if ((entry.perm & flags) != flags) { + *err_count += 1; /* Less than expected */ + } + + result[res_index] = entry; + res_index += 1; + addr = (addr & (~entry.addr_mask)) + (entry.addr_mask + 1); + } + + /* Buffer too small */ + if (addr < target_address) { + return -ENOMEM; + } + + return res_index; +} + +static void vtd_init_iotlb_notifier(PCIBus *bus, void *opaque, int devfn, + IOMMUNotifier *n, IOMMUNotify fn, + void *user_opaque) +{ + n->opaque = user_opaque; + iommu_notifier_init(n, fn, IOMMU_NOTIFIER_DEVIOTLB_EVENTS, 0, + HWADDR_MAX, 0); +} + +static void vtd_get_iotlb_info(void *opaque, uint8_t *addr_width, + uint32_t *min_page_size) +{ + IntelIOMMUState *s = opaque; + + *addr_width = s->aw_bits; + *min_page_size = VTD_PAGE_SIZE; +} + +static void vtd_register_iotlb_notifier(PCIBus *bus, void *opaque, + int devfn, uint32_t pasid, + IOMMUNotifier *n) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + memory_region_register_iommu_notifier(MEMORY_REGION(&vtd_as->iommu), n, + &error_fatal); +} + +static void vtd_unregister_iotlb_notifier(PCIBus *bus, void *opaque, + int devfn, uint32_t pasid, + IOMMUNotifier *n) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + memory_region_unregister_iommu_notifier(MEMORY_REGION(&vtd_as->iommu), n); +} + static PCIIOMMUOps vtd_iommu_ops = { .get_address_space = vtd_host_dma_iommu, .set_iommu_device = vtd_dev_set_iommu_device, .unset_iommu_device = vtd_dev_unset_iommu_device, + .get_iotlb_info = vtd_get_iotlb_info, + .init_iotlb_notifier = vtd_init_iotlb_notifier, + .register_iotlb_notifier = vtd_register_iotlb_notifier, + .unregister_iotlb_notifier = vtd_unregister_iotlb_notifier, + .ats_request_translation = vtd_ats_request_translation, }; static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) @@ -4259,14 +4877,26 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) } } - /* Currently only address widths supported are 39 and 48 bits */ - if ((s->aw_bits != VTD_HOST_AW_39BIT) && - (s->aw_bits != VTD_HOST_AW_48BIT)) { - error_setg(errp, "Supported values for aw-bits are: %d, %d", + if (!s->scalable_mode && s->flts) { + error_setg(errp, "x-flts is only available in scalable mode"); + return false; + } + + if (!s->flts && s->aw_bits != VTD_HOST_AW_39BIT && + s->aw_bits != VTD_HOST_AW_48BIT) { + error_setg(errp, "%s: supported values for aw-bits are: %d, %d", + s->scalable_mode ? "Scalable mode(flts=off)" : "Legacy mode", VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT); return false; } + if (s->flts && s->aw_bits != VTD_HOST_AW_48BIT) { + error_setg(errp, + "Scalable mode(flts=on): supported value for aw-bits is: %d", + VTD_HOST_AW_48BIT); + return false; + } + if (s->scalable_mode && !s->dma_drain) { error_setg(errp, "Need to set dma_drain for scalable mode"); return false; @@ -4363,19 +4993,22 @@ static void vtd_realize(DeviceState *dev, Error **errp) qemu_add_machine_init_done_notifier(&vtd_machine_done_notify); } -static void vtd_class_init(ObjectClass *klass, void *data) +static void vtd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - dc->reset = vtd_reset; + /* + * Use 'exit' reset phase to make sure all DMA requests + * have been quiesced during 'enter' or 'hold' phase + */ + rc->phases.exit = vtd_reset_exit; dc->vmsd = &vtd_vmstate; device_class_set_props(dc, vtd_properties); dc->hotpluggable = false; x86_class->realize = vtd_realize; x86_class->int_remap = vtd_int_remap; - /* Supported by the pc-q35-* machine types */ - dc->user_creatable = true; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "Intel IOMMU (VT-d) DMA Remapping device"; } @@ -4388,7 +5021,7 @@ static const TypeInfo vtd_info = { }; static void vtd_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 5f32c369434..360e937989d 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -192,9 +192,11 @@ #define VTD_ECAP_SC (1ULL << 7) #define VTD_ECAP_MHMV (15ULL << 20) #define VTD_ECAP_SRS (1ULL << 31) +#define VTD_ECAP_PSS (7ULL << 35) /* limit: MemTxAttrs::pid */ #define VTD_ECAP_PASID (1ULL << 40) #define VTD_ECAP_SMTS (1ULL << 43) #define VTD_ECAP_SLTS (1ULL << 46) +#define VTD_ECAP_FLTS (1ULL << 47) /* CAP_REG */ /* (offset >> 4) << 24 */ @@ -211,6 +213,7 @@ #define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35)) #define VTD_CAP_DRAIN_WRITE (1ULL << 54) #define VTD_CAP_DRAIN_READ (1ULL << 55) +#define VTD_CAP_FS1GP (1ULL << 56) #define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE) #define VTD_CAP_CM (1ULL << 7) #define VTD_PASID_ID_SHIFT 20 @@ -311,10 +314,28 @@ typedef enum VTDFaultReason { * request while disabled */ VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */ - VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */ + /* PASID directory entry access failure */ + VTD_FR_PASID_DIR_ACCESS_ERR = 0x50, + /* The Present(P) field of pasid directory entry is 0 */ + VTD_FR_PASID_DIR_ENTRY_P = 0x51, + VTD_FR_PASID_TABLE_ACCESS_ERR = 0x58, /* PASID table entry access failure */ + /* The Present(P) field of pasid table entry is 0 */ + VTD_FR_PASID_ENTRY_P = 0x59, + VTD_FR_PASID_TABLE_ENTRY_INV = 0x5b, /*Invalid PASID table entry */ + + /* Fail to access a first-level paging entry (not FS_PML4E) */ + VTD_FR_FS_PAGING_ENTRY_INV = 0x70, + VTD_FR_FS_PAGING_ENTRY_P = 0x71, + /* Non-zero reserved field in present first-stage paging entry */ + VTD_FR_FS_PAGING_ENTRY_RSVD = 0x72, + VTD_FR_PASID_ENTRY_FSPTPTR_INV = 0x73, /* Invalid FSPTPTR in PASID entry */ + VTD_FR_FS_NON_CANONICAL = 0x80, /* SNG.1 : Address for FS not canonical.*/ + VTD_FR_FS_PAGING_ENTRY_US = 0x81, /* Privilege violation */ + VTD_FR_SM_WRITE = 0x85, /* No write permission */ /* Output address in the interrupt address range for scalable mode */ VTD_FR_SM_INTERRUPT_ADDR = 0x87, + VTD_FR_FS_BIT_UPDATE_FAILED = 0x91, /* SFS.10 */ VTD_FR_MAX, /* Guard */ } VTDFaultReason; @@ -356,7 +377,9 @@ union VTDInvDesc { typedef union VTDInvDesc VTDInvDesc; /* Masks for struct VTDInvDesc */ -#define VTD_INV_DESC_TYPE 0xf +#define VTD_INV_DESC_ALL_ONE -1ULL +#define VTD_INV_DESC_TYPE(val) ((((val) >> 5) & 0x70ULL) | \ + ((val) & 0xfULL)) #define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */ #define VTD_INV_DESC_IOTLB 0x2 #define VTD_INV_DESC_DEVICE 0x3 @@ -365,6 +388,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */ #define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */ #define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */ +#define VTD_INV_DESC_DEV_PIOTLB 0x8 /* PASID-based-DIOTLB inv_desc*/ #define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */ /* Masks for Invalidation Wait Descriptor*/ @@ -372,7 +396,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_WAIT_IF (1ULL << 4) #define VTD_INV_DESC_WAIT_FN (1ULL << 6) #define VTD_INV_DESC_WAIT_DATA_SHIFT 32 -#define VTD_INV_DESC_WAIT_RSVD_LO 0Xffffff80ULL +#define VTD_INV_DESC_WAIT_RSVD_LO 0Xfffff180ULL #define VTD_INV_DESC_WAIT_RSVD_HI 3ULL /* Masks for Context-cache Invalidation Descriptor */ @@ -383,7 +407,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_CC_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) #define VTD_INV_DESC_CC_SID(val) (((val) >> 32) & 0xffffUL) #define VTD_INV_DESC_CC_FM(val) (((val) >> 48) & 3UL) -#define VTD_INV_DESC_CC_RSVD 0xfffc00000000ffc0ULL +#define VTD_INV_DESC_CC_RSVD 0xfffc00000000f1c0ULL /* Masks for IOTLB Invalidate Descriptor */ #define VTD_INV_DESC_IOTLB_G (3ULL << 4) @@ -393,26 +417,34 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_IOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) #define VTD_INV_DESC_IOTLB_ADDR(val) ((val) & ~0xfffULL) #define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL) -#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL +#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000f100ULL #define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL -#define VTD_INV_DESC_IOTLB_PASID_PASID (2ULL << 4) -#define VTD_INV_DESC_IOTLB_PASID_PAGE (3ULL << 4) -#define VTD_INV_DESC_IOTLB_PASID(val) (((val) >> 32) & VTD_PASID_ID_MASK) -#define VTD_INV_DESC_IOTLB_PASID_RSVD_LO 0xfff00000000001c0ULL -#define VTD_INV_DESC_IOTLB_PASID_RSVD_HI 0xf80ULL /* Mask for Device IOTLB Invalidate Descriptor */ #define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL) #define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1) #define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL) #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL -#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8 +#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0f1f0 + +/* Masks for Interrupt Entry Invalidate Descriptor */ +#define VTD_INV_DESC_IEC_RSVD 0xffff000007fff1e0ULL + +/* Masks for PASID based Device IOTLB Invalidate Descriptor */ +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_ADDR(val) ((val) & \ + 0xfffffffffffff000ULL) +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_SIZE(val) ((val >> 11) & 0x1) +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_GLOBAL(val) ((val) & 0x1) +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_SID(val) (((val) >> 16) & 0xffffULL) +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_PASID(val) ((val >> 32) & 0xfffffULL) +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL0 0xfff000000000f000ULL +#define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL1 0x7feULL /* Rsvd field masks for spte */ #define VTD_SPTE_SNP 0x800ULL -#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) #define VTD_SPTE_PAGE_L2_RSVD_MASK(aw) \ @@ -422,15 +454,43 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_SPTE_PAGE_L4_RSVD_MASK(aw) \ (0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) -#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) -#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) +/* Rsvd field masks for fpte */ +#define VTD_FS_UPPER_IGNORED 0xfff0000000000000ULL +#define VTD_FPTE_PAGE_L1_RSVD_MASK(aw) \ + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) +#define VTD_FPTE_PAGE_L2_RSVD_MASK(aw) \ + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) +#define VTD_FPTE_PAGE_L3_RSVD_MASK(aw) \ + (~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) +#define VTD_FPTE_PAGE_L4_RSVD_MASK(aw) \ + (0x80ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) + +#define VTD_FPTE_LPAGE_L2_RSVD_MASK(aw) \ + (0x1fe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) +#define VTD_FPTE_LPAGE_L3_RSVD_MASK(aw) \ + (0x3fffe000ULL | ~(VTD_HAW_MASK(aw) | VTD_FS_UPPER_IGNORED)) + +/* Masks for PIOTLB Invalidate Descriptor */ +#define VTD_INV_DESC_PIOTLB_G (3ULL << 4) +#define VTD_INV_DESC_PIOTLB_ALL_IN_PASID (2ULL << 4) +#define VTD_INV_DESC_PIOTLB_PSI_IN_PASID (3ULL << 4) +#define VTD_INV_DESC_PIOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) +#define VTD_INV_DESC_PIOTLB_PASID(val) (((val) >> 32) & 0xfffffULL) +#define VTD_INV_DESC_PIOTLB_AM(val) ((val) & 0x3fULL) +#define VTD_INV_DESC_PIOTLB_IH(val) (((val) >> 6) & 0x1) +#define VTD_INV_DESC_PIOTLB_ADDR(val) ((val) & ~0xfffULL) +#define VTD_INV_DESC_PIOTLB_RSVD_VAL0 0xfff000000000f1c0ULL +#define VTD_INV_DESC_PIOTLB_RSVD_VAL1 0xf80ULL + /* Information about page-selective IOTLB invalidate */ struct VTDIOTLBPageInvInfo { uint16_t domain_id; @@ -514,27 +574,38 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */ #define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK) +#define VTD_SM_PASID_ENTRY_FLPM 3ULL +#define VTD_SM_PASID_ENTRY_FLPTPTR (~0xfffULL) + +/* First Level Paging Structure */ +/* Masks for First Level Paging Entry */ +#define VTD_FL_P 1ULL +#define VTD_FL_RW (1ULL << 1) +#define VTD_FL_US (1ULL << 2) +#define VTD_FL_A (1ULL << 5) +#define VTD_FL_D (1ULL << 6) + /* Second Level Page Translation Pointer*/ #define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL) -/* Paging Structure common */ -#define VTD_SL_PT_PAGE_SIZE_MASK (1ULL << 7) -/* Bits to decide the offset for each level */ -#define VTD_SL_LEVEL_BITS 9 - /* Second Level Paging Structure */ -#define VTD_SL_PML4_LEVEL 4 -#define VTD_SL_PDP_LEVEL 3 -#define VTD_SL_PD_LEVEL 2 -#define VTD_SL_PT_LEVEL 1 -#define VTD_SL_PT_ENTRY_NR 512 - /* Masks for Second Level Paging Entry */ #define VTD_SL_RW_MASK 3ULL #define VTD_SL_R 1ULL #define VTD_SL_W (1ULL << 1) -#define VTD_SL_PT_BASE_ADDR_MASK(aw) (~(VTD_PAGE_SIZE - 1) & VTD_HAW_MASK(aw)) #define VTD_SL_IGN_COM 0xbff0000000000000ULL #define VTD_SL_TM (1ULL << 62) +/* Common for both First Level and Second Level */ +#define VTD_PML4_LEVEL 4 +#define VTD_PDP_LEVEL 3 +#define VTD_PD_LEVEL 2 +#define VTD_PT_LEVEL 1 +#define VTD_PT_ENTRY_NR 512 +#define VTD_PT_PAGE_SIZE_MASK (1ULL << 7) +#define VTD_PT_BASE_ADDR_MASK(aw) (~(VTD_PAGE_SIZE - 1) & VTD_HAW_MASK(aw)) + +/* Bits to decide the offset for each level */ +#define VTD_LEVEL_BITS 9 + #endif diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c index a72c28e8a7d..1be9bfe36e9 100644 --- a/hw/i386/kvm/apic.c +++ b/hw/i386/kvm/apic.c @@ -14,9 +14,10 @@ #include "qemu/module.h" #include "hw/i386/apic_internal.h" #include "hw/pci/msi.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" +#include "system/hw_accel.h" +#include "system/kvm.h" #include "kvm/kvm_i386.h" +#include "kvm/tdx.h" static inline void kvm_apic_set_reg(struct kvm_lapic_state *kapic, int reg_id, uint32_t val) @@ -141,6 +142,10 @@ static void kvm_apic_put(CPUState *cs, run_on_cpu_data data) struct kvm_lapic_state kapic; int ret; + if (is_tdx_vm()) { + return; + } + kvm_put_apicbase(s->cpu, s->apicbase); kvm_put_apic_state(s, &kapic); @@ -214,7 +219,7 @@ static void kvm_apic_mem_write(void *opaque, hwaddr addr, static const MemoryRegionOps kvm_apic_io_ops = { .read = kvm_apic_mem_read, .write = kvm_apic_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void kvm_apic_reset(APICCommonState *s) @@ -240,7 +245,7 @@ static void kvm_apic_unrealize(DeviceState *dev) { } -static void kvm_apic_class_init(ObjectClass *klass, void *data) +static void kvm_apic_class_init(ObjectClass *klass, const void *data) { APICCommonClass *k = APIC_COMMON_CLASS(klass); diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index 40aa9a32c32..f56382717f7 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -16,9 +16,9 @@ #include "qemu/osdep.h" #include "qemu/host-utils.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "sysemu/hw_accel.h" +#include "system/kvm.h" +#include "system/runstate.h" +#include "system/hw_accel.h" #include "kvm/kvm_i386.h" #include "migration/vmstate.h" #include "hw/sysbus.h" @@ -27,7 +27,6 @@ #include "qapi/error.h" #include -#include "standard-headers/asm-x86/kvm_para.h" #include "qom/object.h" #define TYPE_KVM_CLOCK "kvmclock" @@ -305,13 +304,12 @@ static const VMStateDescription kvmclock_vmsd = { } }; -static Property kvmclock_properties[] = { +static const Property kvmclock_properties[] = { DEFINE_PROP_BOOL("x-mach-use-reliable-get-clock", KVMClockState, mach_use_reliable_get_clock, true), - DEFINE_PROP_END_OF_LIST(), }; -static void kvmclock_class_init(ObjectClass *klass, void *data) +static void kvmclock_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -334,8 +332,8 @@ void kvmclock_create(bool create_always) assert(kvm_enabled()); if (create_always || - cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | - (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { + cpu->env.features[FEAT_KVM] & (CPUID_KVM_CLOCK | + CPUID_KVM_CLOCK2)) { sysbus_create_simple(TYPE_KVM_CLOCK, -1, NULL); } } diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index e49b9c4b565..14b78f30a83 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -29,11 +29,11 @@ #include "qapi/error.h" #include "qemu/module.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/timer/i8254.h" #include "hw/timer/i8254_internal.h" #include "hw/qdev-properties-system.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "target/i386/kvm/kvm_i386.h" #include "qom/object.h" @@ -287,13 +287,12 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp) kpc->parent_realize(dev, errp); } -static Property kvm_pit_properties[] = { +static const Property kvm_pit_properties[] = { DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", KVMPITState, lost_tick_policy, LOST_TICK_POLICY_DELAY), - DEFINE_PROP_END_OF_LIST(), }; -static void kvm_pit_class_init(ObjectClass *klass, void *data) +static void kvm_pit_class_init(ObjectClass *klass, const void *data) { KVMPITClass *kpc = KVM_PIT_CLASS(klass); PITCommonClass *k = PIT_COMMON_CLASS(klass); @@ -303,7 +302,7 @@ static void kvm_pit_class_init(ObjectClass *klass, void *data) &kpc->parent_realize); k->set_channel_gate = kvm_pit_set_gate; k->get_channel_info = kvm_pit_get_channel_info; - dc->reset = kvm_pit_reset; + device_class_set_legacy_reset(dc, kvm_pit_reset); device_class_set_props(dc, kvm_pit_properties); } diff --git a/hw/i386/kvm/i8259.c b/hw/i386/kvm/i8259.c index 3ca0e1ff036..8a72d6e4dd3 100644 --- a/hw/i386/kvm/i8259.c +++ b/hw/i386/kvm/i8259.c @@ -16,7 +16,7 @@ #include "qemu/module.h" #include "hw/intc/kvm_irqcount.h" #include "hw/irq.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qom/object.h" #define TYPE_KVM_I8259 "kvm-i8259" @@ -139,13 +139,13 @@ qemu_irq *kvm_i8259_init(ISABus *bus) return qemu_allocate_irqs(kvm_pic_set_irq, NULL, ISA_NUM_IRQS); } -static void kvm_i8259_class_init(ObjectClass *klass, void *data) +static void kvm_i8259_class_init(ObjectClass *klass, const void *data) { KVMPICClass *kpc = KVM_PIC_CLASS(klass); PICCommonClass *k = PIC_COMMON_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = kvm_pic_reset; + device_class_set_legacy_reset(dc, kvm_pic_reset); device_class_set_parent_realize(dc, kvm_pic_realize, &kpc->parent_realize); k->pre_save = kvm_pic_get; k->post_load = kvm_pic_put; diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c index b96fe84eed3..693ee978a12 100644 --- a/hw/i386/kvm/ioapic.c +++ b/hw/i386/kvm/ioapic.c @@ -15,7 +15,7 @@ #include "hw/qdev-properties.h" #include "hw/intc/ioapic_internal.h" #include "hw/intc/kvm_irqcount.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm/kvm_i386.h" /* PC Utility function */ @@ -133,12 +133,11 @@ static void kvm_ioapic_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, kvm_ioapic_set_irq, IOAPIC_NUM_PINS); } -static Property kvm_ioapic_properties[] = { +static const Property kvm_ioapic_properties[] = { DEFINE_PROP_UINT32("gsi_base", KVMIOAPICState, kvm_gsi_base, 0), - DEFINE_PROP_END_OF_LIST() }; -static void kvm_ioapic_class_init(ObjectClass *klass, void *data) +static void kvm_ioapic_class_init(ObjectClass *klass, const void *data) { IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -146,7 +145,7 @@ static void kvm_ioapic_class_init(ObjectClass *klass, void *data) k->realize = kvm_ioapic_realize; k->pre_save = kvm_ioapic_get; k->post_load = kvm_ioapic_put; - dc->reset = kvm_ioapic_reset; + device_class_set_legacy_reset(dc, kvm_ioapic_reset); device_class_set_props(dc, kvm_ioapic_properties); } diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c index d03131e6864..ce73119ee7a 100644 --- a/hw/i386/kvm/xen-stubs.c +++ b/hw/i386/kvm/xen-stubs.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" #include "xen_evtchn.h" #include "xen_primary_console.h" @@ -38,15 +37,3 @@ void xen_primary_console_create(void) void xen_primary_console_set_be_port(uint16_t port) { } -#ifdef TARGET_I386 -EvtchnInfoList *qmp_xen_event_list(Error **errp) -{ - error_setg(errp, "Xen event channel emulation not enabled"); - return NULL; -} - -void qmp_xen_event_inject(uint32_t port, Error **errp) -{ - error_setg(errp, "Xen event channel emulation not enabled"); -} -#endif diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index 07bd0c9ab80..dd566c49679 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -19,11 +19,11 @@ #include "monitor/monitor.h" #include "monitor/hmp.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" -#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-misc-i386.h" +#include "qobject/qdict.h" #include "qom/object.h" #include "exec/target_page.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/vmstate.h" #include "trace.h" @@ -41,8 +41,8 @@ #include "xen_overlay.h" #include "xen_xenstore.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" #include #include @@ -140,6 +140,8 @@ struct XenEvtchnState { uint64_t callback_param; bool evtchn_in_kernel; + bool setting_callback_gsi; + int extern_gsi_level; uint32_t callback_gsi; QEMUBH *gsi_bh; @@ -269,7 +271,7 @@ static const VMStateDescription xen_evtchn_vmstate = { } }; -static void xen_evtchn_class_init(ObjectClass *klass, void *data) +static void xen_evtchn_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -431,9 +433,22 @@ void xen_evtchn_set_callback_level(int level) } if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) { - qemu_set_irq(s->callback_gsis[s->callback_gsi], level); - if (level) { - /* Ensure the vCPU polls for deassertion */ + /* + * Ugly, but since we hold the BQL we can set this flag so that + * xen_evtchn_set_gsi() can tell the difference between this code + * setting the GSI, and an external device (PCI INTx) doing so. + */ + s->setting_callback_gsi = true; + /* Do not deassert the line if an external device is asserting it. */ + qemu_set_irq(s->callback_gsis[s->callback_gsi], + level || s->extern_gsi_level); + s->setting_callback_gsi = false; + + /* + * If the callback GSI is the only one asserted, ensure the status + * is polled for deassertion in kvm_arch_post_run(). + */ + if (level && !s->extern_gsi_level) { kvm_xen_set_callback_asserted(); } } @@ -1596,7 +1611,7 @@ static int allocate_pirq(XenEvtchnState *s, int type, int gsi) return pirq; } -bool xen_evtchn_set_gsi(int gsi, int level) +bool xen_evtchn_set_gsi(int gsi, int *level) { XenEvtchnState *s = xen_evtchn_singleton; int pirq; @@ -1608,16 +1623,35 @@ bool xen_evtchn_set_gsi(int gsi, int level) } /* - * Check that that it *isn't* the event channel GSI, and thus - * that we are not recursing and it's safe to take s->port_lock. - * - * Locking aside, it's perfectly sane to bail out early for that - * special case, as it would make no sense for the event channel - * GSI to be routed back to event channels, when the delivery - * method is to raise the GSI... that recursion wouldn't *just* - * be a locking issue. + * For the callback_gsi we need to implement a logical OR of the event + * channel GSI and the external input (e.g. from PCI INTx), because + * QEMU itself doesn't support shared level interrupts via demux or + * resamplers. */ if (gsi && gsi == s->callback_gsi) { + /* Remember the external state of the GSI pin (e.g. from PCI INTx) */ + if (!s->setting_callback_gsi) { + s->extern_gsi_level = *level; + + /* + * Don't allow the external device to deassert the line if the + * eveht channel GSI should still be asserted. + */ + if (!s->extern_gsi_level) { + struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0); + if (vi && vi->evtchn_upcall_pending) { + /* Need to poll for deassertion */ + kvm_xen_set_callback_asserted(); + *level = 1; + } + } + } + + /* + * The event channel GSI cannot be routed to PIRQ, as that would make + * no sense. It could also deadlock on s->port_lock, if we proceed. + * So bail out now. + */ return false; } @@ -1628,7 +1662,7 @@ bool xen_evtchn_set_gsi(int gsi, int level) return false; } - if (level) { + if (*level) { int port = s->pirq[pirq].port; s->pirq_gsi_set |= (1U << gsi); diff --git a/hw/i386/kvm/xen_evtchn.h b/hw/i386/kvm/xen_evtchn.h index b740acfc0d7..0521ebc0922 100644 --- a/hw/i386/kvm/xen_evtchn.h +++ b/hw/i386/kvm/xen_evtchn.h @@ -23,7 +23,7 @@ void xen_evtchn_set_callback_level(int level); int xen_evtchn_set_port(uint16_t port); -bool xen_evtchn_set_gsi(int gsi, int level); +bool xen_evtchn_set_gsi(int gsi, int *level); void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, uint64_t addr, uint32_t data, bool is_masked); void xen_evtchn_remove_pci_device(PCIDevice *dev); diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c index 245e4b15db7..4b9e272c5eb 100644 --- a/hw/i386/kvm/xen_gnttab.c +++ b/hw/i386/kvm/xen_gnttab.c @@ -17,7 +17,7 @@ #include "qapi/error.h" #include "qom/object.h" #include "exec/target_page.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/vmstate.h" #include "hw/sysbus.h" @@ -27,8 +27,8 @@ #include "xen_gnttab.h" #include "xen_primary_console.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" #include "hw/xen/interface/memory.h" #include "hw/xen/interface/grant_table.h" @@ -135,7 +135,7 @@ static const VMStateDescription xen_gnttab_vmstate = { } }; -static void xen_gnttab_class_init(ObjectClass *klass, void *data) +static void xen_gnttab_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index c68e78ac5ce..3cb73619371 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -16,15 +16,15 @@ #include "qapi/error.h" #include "qom/object.h" #include "exec/target_page.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/vmstate.h" #include "hw/sysbus.h" #include "hw/xen/xen.h" #include "xen_overlay.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" #include #include "hw/xen/interface/memory.h" @@ -151,11 +151,11 @@ static void xen_overlay_reset(DeviceState *dev) kvm_xen_soft_reset(); } -static void xen_overlay_class_init(ObjectClass *klass, void *data) +static void xen_overlay_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xen_overlay_reset; + device_class_set_legacy_reset(dc, xen_overlay_reset); dc->realize = xen_overlay_realize; dc->vmsd = &xen_overlay_vmstate; } diff --git a/hw/i386/kvm/xen_primary_console.c b/hw/i386/kvm/xen_primary_console.c index abe79f565be..6e9d6417c3e 100644 --- a/hw/i386/kvm/xen_primary_console.c +++ b/hw/i386/kvm/xen_primary_console.c @@ -20,8 +20,8 @@ #include "xen_overlay.h" #include "xen_primary_console.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" #include "trace.h" @@ -67,7 +67,7 @@ static void xen_primary_console_realize(DeviceState *dev, Error **errp) xen_primary_console_singleton = s; } -static void xen_primary_console_class_init(ObjectClass *klass, void *data) +static void xen_primary_console_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 1a9bc342b88..42955cccd90 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -28,8 +28,8 @@ #include "xen_primary_console.h" #include "xen_xenstore.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" #include "trace.h" @@ -209,7 +209,6 @@ static int xen_xenstore_post_load(void *opaque, int ver) { XenXenstoreState *s = opaque; GByteArray *save; - int ret; /* * As qemu/dom0, rebind to the guest's port. The Windows drivers may @@ -231,8 +230,7 @@ static int xen_xenstore_post_load(void *opaque, int ver) s->impl_state = NULL; s->impl_state_size = 0; - ret = xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); - return ret; + return xs_impl_deserialize(s->impl, save, xen_domid, fire_watch_cb, s); } static const VMStateDescription xen_xenstore_vmstate = { @@ -261,7 +259,7 @@ static const VMStateDescription xen_xenstore_vmstate = { } }; -static void xen_xenstore_class_init(ObjectClass *klass, void *data) +static void xen_xenstore_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -532,6 +530,10 @@ static void xs_read(XenXenstoreState *s, unsigned int req_id, return; } + if (!len) { + return; + } + memcpy(&rsp_data[rsp->len], data->data, len); rsp->len += len; } diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 03aad10df7a..7896f348cff 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -15,6 +15,7 @@ i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'), if_false: files('amd_iommu-stub.c')) i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('x86-common.c', 'microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) +i386_ss.add(when: 'CONFIG_NITRO_ENCLAVE', if_true: files('nitro_enclave.c')) i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c')) i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c')) @@ -31,6 +32,7 @@ i386_ss.add(when: 'CONFIG_PC', if_true: files( 'port92.c')) i386_ss.add(when: 'CONFIG_X86_FW_OVMF', if_true: files('pc_sysfw_ovmf.c'), if_false: files('pc_sysfw_ovmf-stubs.c')) +i386_ss.add(when: 'CONFIG_TDX', if_true: files('tdvf.c', 'tdvf-hob.c')) subdir('kvm') subdir('xen') diff --git a/hw/i386/microvm-dt.c b/hw/i386/microvm-dt.c index b3049e4f9f2..cb27dfd732e 100644 --- a/hw/i386/microvm-dt.c +++ b/hw/i386/microvm-dt.c @@ -33,8 +33,8 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qapi/error.h" -#include "sysemu/device_tree.h" -#include "hw/char/serial.h" +#include "system/device_tree.h" +#include "hw/char/serial-isa.h" #include "hw/i386/fw_cfg.h" #include "hw/rtc/mc146818rtc.h" #include "hw/sysbus.h" diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 40edcee7af2..94d22a232ac 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -22,11 +22,11 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "qapi/qapi-visit-common.h" -#include "sysemu/sysemu.h" -#include "sysemu/cpus.h" -#include "sysemu/numa.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/cpus.h" +#include "system/numa.h" +#include "system/reset.h" +#include "system/runstate.h" #include "acpi-microvm.h" #include "microvm-dt.h" @@ -39,7 +39,7 @@ #include "hw/intc/i8259.h" #include "hw/timer/i8254.h" #include "hw/rtc/mc146818rtc.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "hw/display/ramfb.h" #include "hw/i386/topology.h" #include "hw/i386/e820_memory_layout.h" @@ -49,6 +49,7 @@ #include "hw/acpi/generic_event_device.h" #include "hw/pci-host/gpex.h" #include "hw/usb/xhci.h" +#include "hw/vfio/types.h" #include "elf.h" #include "kvm/kvm_i386.h" @@ -139,7 +140,7 @@ static void create_gpex(MicrovmMachineState *mms) mms->gpex.mmio64.base, mmio64_alias); } - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, x86ms->gsi[mms->gpex.irq + i]); } @@ -283,6 +284,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) static void microvm_memory_init(MicrovmMachineState *mms) { + MicrovmMachineClass *mmc = MICROVM_MACHINE_GET_CLASS(mms); MachineState *machine = MACHINE(mms); X86MachineState *x86ms = X86_MACHINE(mms); MemoryRegion *ram_below_4g, *ram_above_4g; @@ -328,7 +330,7 @@ static void microvm_memory_init(MicrovmMachineState *mms) rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true); + mmc->x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { @@ -450,11 +452,44 @@ static HotplugHandler *microvm_get_hotplug_handler(MachineState *machine, return NULL; } +static void microvm_machine_done(Notifier *notifier, void *data) +{ + MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, + machine_done); + X86MachineState *x86ms = X86_MACHINE(mms); + + acpi_setup_microvm(mms); + dt_setup_microvm(mms); + fw_cfg_add_e820(x86ms->fw_cfg); +} + +static void microvm_powerdown_req(Notifier *notifier, void *data) +{ + MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, + powerdown_req); + X86MachineState *x86ms = X86_MACHINE(mms); + + if (x86ms->acpi_dev) { + Object *obj = OBJECT(x86ms->acpi_dev); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj); + adevc->send_event(ACPI_DEVICE_IF(x86ms->acpi_dev), + ACPI_POWER_DOWN_STATUS); + } +} + static void microvm_machine_state_init(MachineState *machine) { MicrovmMachineState *mms = MICROVM_MACHINE(machine); X86MachineState *x86ms = X86_MACHINE(machine); + /* State */ + mms->kernel_cmdline_fixed = false; + + mms->machine_done.notify = microvm_machine_done; + qemu_add_machine_init_done_notifier(&mms->machine_done); + mms->powerdown_req.notify = microvm_powerdown_req; + qemu_register_powerdown_notifier(&mms->powerdown_req); + microvm_memory_init(mms); x86_cpus_init(x86ms, CPU_VERSION_LATEST); @@ -462,7 +497,7 @@ static void microvm_machine_state_init(MachineState *machine) microvm_devices_init(mms); } -static void microvm_machine_reset(MachineState *machine, ShutdownCause reason) +static void microvm_machine_reset(MachineState *machine, ResetType type) { MicrovmMachineState *mms = MICROVM_MACHINE(machine); CPUState *cs; @@ -475,7 +510,7 @@ static void microvm_machine_reset(MachineState *machine, ShutdownCause reason) mms->kernel_cmdline_fixed = true; } - qemu_devices_reset(reason); + qemu_devices_reset(type); CPU_FOREACH(cs) { cpu = X86_CPU(cs); @@ -580,31 +615,6 @@ static void microvm_machine_set_auto_kernel_cmdline(Object *obj, bool value, mms->auto_kernel_cmdline = value; } -static void microvm_machine_done(Notifier *notifier, void *data) -{ - MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, - machine_done); - X86MachineState *x86ms = X86_MACHINE(mms); - - acpi_setup_microvm(mms); - dt_setup_microvm(mms); - fw_cfg_add_e820(x86ms->fw_cfg); -} - -static void microvm_powerdown_req(Notifier *notifier, void *data) -{ - MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, - powerdown_req); - X86MachineState *x86ms = X86_MACHINE(mms); - - if (x86ms->acpi_dev) { - Object *obj = OBJECT(x86ms->acpi_dev); - AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj); - adevc->send_event(ACPI_DEVICE_IF(x86ms->acpi_dev), - ACPI_POWER_DOWN_STATUS); - } -} - static void microvm_machine_initfn(Object *obj) { MicrovmMachineState *mms = MICROVM_MACHINE(obj); @@ -616,14 +626,6 @@ static void microvm_machine_initfn(Object *obj) mms->isa_serial = true; mms->option_roms = true; mms->auto_kernel_cmdline = true; - - /* State */ - mms->kernel_cmdline_fixed = false; - - mms->machine_done.notify = microvm_machine_done; - qemu_add_machine_init_done_notifier(&mms->machine_done); - mms->powerdown_req.notify = microvm_powerdown_req; - qemu_register_powerdown_notifier(&mms->powerdown_req); } GlobalProperty microvm_properties[] = { @@ -632,14 +634,19 @@ GlobalProperty microvm_properties[] = { * so reserving io space is not going to work. Turn it off. */ { "pcie-root-port", "io-reserve", "0" }, + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, }; -static void microvm_class_init(ObjectClass *oc, void *data) +static void microvm_class_init(ObjectClass *oc, const void *data) { X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); + MicrovmMachineClass *mmc = MICROVM_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + mmc->x86_load_linux = x86_load_linux; + mc->init = microvm_machine_state_init; mc->family = "microvm_i386"; @@ -722,7 +729,7 @@ static const TypeInfo microvm_machine_info = { .instance_init = microvm_machine_initfn, .class_size = sizeof(MicrovmMachineClass), .class_init = microvm_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/i386/monitor.c b/hw/i386/monitor.c index 1ebd3564bf2..79df96562f6 100644 --- a/hw/i386/monitor.c +++ b/hw/i386/monitor.c @@ -24,9 +24,9 @@ #include "qemu/osdep.h" #include "monitor/monitor.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-commands-misc-i386.h" #include "hw/i386/x86.h" #include "hw/rtc/mc146818rtc.h" diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c index 3332712ab35..6e6b96bc345 100644 --- a/hw/i386/multiboot.c +++ b/hw/i386/multiboot.c @@ -29,7 +29,8 @@ #include "multiboot.h" #include "hw/loader.h" #include "elf.h" -#include "sysemu/sysemu.h" +#include "exec/target_page.h" +#include "system/system.h" #include "qemu/error-report.h" /* Show multiboot debug output */ @@ -133,9 +134,9 @@ static void mb_add_mod(MultibootState *s, p = (char *)s->mb_buf + s->offset_mbinfo + MB_MOD_SIZE * s->mb_mods_count; - stl_p(p + MB_MOD_START, start); - stl_p(p + MB_MOD_END, end); - stl_p(p + MB_MOD_CMDLINE, cmdline_phys); + stl_le_p(p + MB_MOD_START, start); + stl_le_p(p + MB_MOD_END, end); + stl_le_p(p + MB_MOD_CMDLINE, cmdline_phys); mb_debug("mod%02d: "HWADDR_FMT_plx" - "HWADDR_FMT_plx, s->mb_mods_count, start, end); @@ -168,9 +169,9 @@ int load_multiboot(X86MachineState *x86ms, /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ for (i = 0; i < (8192 - 48); i += 4) { - if (ldl_p(header+i) == 0x1BADB002) { - uint32_t checksum = ldl_p(header+i+8); - flags = ldl_p(header+i+4); + if (ldl_le_p(header + i) == 0x1BADB002) { + uint32_t checksum = ldl_le_p(header + i + 8); + flags = ldl_le_p(header + i + 4); checksum += flags; checksum += (uint32_t)0x1BADB002; if (!checksum) { @@ -202,8 +203,8 @@ int load_multiboot(X86MachineState *x86ms, } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, - &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE, - 0, 0); + &elf_low, &elf_high, NULL, + ELFDATA2LSB, I386_ELF_MACHINE, 0, 0); if (kernel_size < 0) { error_report("Error while loading elf kernel"); exit(1); @@ -223,11 +224,11 @@ int load_multiboot(X86MachineState *x86ms, mb_kernel_size, (size_t)mh_entry_addr); } else { /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ - uint32_t mh_header_addr = ldl_p(header+i+12); - uint32_t mh_load_end_addr = ldl_p(header+i+20); - uint32_t mh_bss_end_addr = ldl_p(header+i+24); + uint32_t mh_header_addr = ldl_le_p(header + i + 12); + uint32_t mh_load_end_addr = ldl_le_p(header + i + 20); + uint32_t mh_bss_end_addr = ldl_le_p(header + i + 24); - mh_load_addr = ldl_p(header+i+16); + mh_load_addr = ldl_le_p(header + i + 16); if (mh_header_addr < mh_load_addr) { error_report("invalid load_addr address"); exit(1); @@ -239,7 +240,7 @@ int load_multiboot(X86MachineState *x86ms, uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); uint32_t mb_load_size = 0; - mh_entry_addr = ldl_p(header+i+28); + mh_entry_addr = ldl_le_p(header + i + 28); if (mh_load_end_addr) { if (mh_load_end_addr < mh_load_addr) { @@ -364,22 +365,21 @@ int load_multiboot(X86MachineState *x86ms, /* Commandline support */ kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline); - stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); - - stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name)); - - stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); - stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ + stl_le_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); + stl_le_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, + bootloader_name)); + stl_le_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); + stl_le_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ /* the kernel is where we want it to be now */ - stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY + stl_le_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY | MULTIBOOT_FLAGS_BOOT_DEVICE | MULTIBOOT_FLAGS_CMDLINE | MULTIBOOT_FLAGS_MODULES | MULTIBOOT_FLAGS_MMAP | MULTIBOOT_FLAGS_BOOTLOADER); - stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ - stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); + stl_le_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ + stl_le_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); mb_debug("multiboot: entry_addr = %#x", mh_entry_addr); mb_debug(" mb_buf_phys = "HWADDR_FMT_plx, mbs.mb_buf_phys); diff --git a/hw/i386/nitro_enclave.c b/hw/i386/nitro_enclave.c new file mode 100644 index 00000000000..5ee50f3b858 --- /dev/null +++ b/hw/i386/nitro_enclave.c @@ -0,0 +1,353 @@ +/* + * AWS nitro-enclave machine + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qom/object_interfaces.h" + +#include "chardev/char.h" +#include "hw/sysbus.h" +#include "hw/core/eif.h" +#include "hw/i386/x86.h" +#include "hw/i386/microvm.h" +#include "hw/i386/nitro_enclave.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/virtio/virtio-nsm.h" +#include "hw/virtio/vhost-user-vsock.h" +#include "system/hostmem.h" + +static BusState *find_free_virtio_mmio_bus(void) +{ + BusChild *kid; + BusState *bus = sysbus_get_default(); + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO)) { + VirtIOMMIOProxy *mmio = VIRTIO_MMIO(OBJECT(dev)); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + if (QTAILQ_EMPTY(&mmio_bus->children)) { + return mmio_bus; + } + } + } + + return NULL; +} + +static void vhost_user_vsock_init(NitroEnclaveMachineState *nems) +{ + DeviceState *dev = qdev_new(TYPE_VHOST_USER_VSOCK); + VHostUserVSock *vsock = VHOST_USER_VSOCK(dev); + BusState *bus; + + if (!nems->vsock) { + error_report("A valid chardev id for vhost-user-vsock device must be " + "provided using the 'vsock' machine option"); + exit(1); + } + + bus = find_free_virtio_mmio_bus(); + if (!bus) { + error_report("Failed to find bus for vhost-user-vsock device"); + exit(1); + } + + Chardev *chardev = qemu_chr_find(nems->vsock); + if (!chardev) { + error_report("Failed to find chardev with id %s", nems->vsock); + exit(1); + } + + vsock->conf.chardev.chr = chardev; + + qdev_realize_and_unref(dev, bus, &error_fatal); +} + +static void virtio_nsm_init(NitroEnclaveMachineState *nems) +{ + DeviceState *dev = qdev_new(TYPE_VIRTIO_NSM); + VirtIONSM *vnsm = VIRTIO_NSM(dev); + BusState *bus = find_free_virtio_mmio_bus(); + + if (!bus) { + error_report("Failed to find bus for virtio-nsm device."); + exit(1); + } + + qdev_prop_set_string(dev, "module-id", nems->id); + + qdev_realize_and_unref(dev, bus, &error_fatal); + nems->vnsm = vnsm; +} + +static void nitro_enclave_devices_init(NitroEnclaveMachineState *nems) +{ + vhost_user_vsock_init(nems); + virtio_nsm_init(nems); +} + +static void nitro_enclave_machine_state_init(MachineState *machine) +{ + NitroEnclaveMachineClass *ne_class = + NITRO_ENCLAVE_MACHINE_GET_CLASS(machine); + NitroEnclaveMachineState *ne_state = NITRO_ENCLAVE_MACHINE(machine); + + ne_class->parent_init(machine); + nitro_enclave_devices_init(ne_state); +} + +static void nitro_enclave_machine_reset(MachineState *machine, ResetType type) +{ + NitroEnclaveMachineClass *ne_class = + NITRO_ENCLAVE_MACHINE_GET_CLASS(machine); + NitroEnclaveMachineState *ne_state = NITRO_ENCLAVE_MACHINE(machine); + + ne_class->parent_reset(machine, type); + + memset(ne_state->vnsm->pcrs, 0, sizeof(ne_state->vnsm->pcrs)); + + /* PCR0 */ + ne_state->vnsm->extend_pcr(ne_state->vnsm, 0, ne_state->image_hash, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + /* PCR1 */ + ne_state->vnsm->extend_pcr(ne_state->vnsm, 1, ne_state->bootstrap_hash, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + /* PCR2 */ + ne_state->vnsm->extend_pcr(ne_state->vnsm, 2, ne_state->app_hash, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + /* PCR3 */ + if (ne_state->parent_role) { + ne_state->vnsm->extend_pcr(ne_state->vnsm, 3, + (uint8_t *) ne_state->parent_role, + strlen(ne_state->parent_role)); + } + /* PCR4 */ + if (ne_state->parent_id) { + ne_state->vnsm->extend_pcr(ne_state->vnsm, 4, + (uint8_t *) ne_state->parent_id, + strlen(ne_state->parent_id)); + } + /* PCR8 */ + if (ne_state->signature_found) { + ne_state->vnsm->extend_pcr(ne_state->vnsm, 8, + ne_state->fingerprint_hash, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + } + + /* First 16 PCRs are locked from boot and reserved for nitro enclave */ + for (int i = 0; i < 16; ++i) { + ne_state->vnsm->lock_pcr(ne_state->vnsm, i); + } +} + +static void nitro_enclave_machine_initfn(Object *obj) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + X86MachineState *x86ms = X86_MACHINE(obj); + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + nems->id = g_strdup("i-234-enc5678"); + + /* AWS nitro enclaves have PCIE and ACPI disabled */ + mms->pcie = ON_OFF_AUTO_OFF; + x86ms->acpi = ON_OFF_AUTO_OFF; +} + +static void x86_load_eif(X86MachineState *x86ms, FWCfgState *fw_cfg, + int acpi_data_size, bool pvh_enabled) +{ + Error *err = NULL; + char *eif_kernel, *eif_initrd, *eif_cmdline; + MachineState *machine = MACHINE(x86ms); + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(x86ms); + + if (!read_eif_file(machine->kernel_filename, machine->initrd_filename, + &eif_kernel, &eif_initrd, &eif_cmdline, + nems->image_hash, nems->bootstrap_hash, + nems->app_hash, nems->fingerprint_hash, + &(nems->signature_found), &err)) { + error_report_err(err); + exit(1); + } + + g_free(machine->kernel_filename); + machine->kernel_filename = eif_kernel; + g_free(machine->initrd_filename); + machine->initrd_filename = eif_initrd; + + /* + * If kernel cmdline argument was provided, let's concatenate it to the + * extracted EIF kernel cmdline. + */ + if (machine->kernel_cmdline != NULL) { + char *cmd = g_strdup_printf("%s %s", eif_cmdline, + machine->kernel_cmdline); + g_free(eif_cmdline); + g_free(machine->kernel_cmdline); + machine->kernel_cmdline = cmd; + } else { + machine->kernel_cmdline = eif_cmdline; + } + + x86_load_linux(x86ms, fw_cfg, 0, true); + + unlink(machine->kernel_filename); + unlink(machine->initrd_filename); +} + +static bool create_memfd_backend(MachineState *ms, const char *path, + Error **errp) +{ + Object *obj; + MachineClass *mc = MACHINE_GET_CLASS(ms); + bool r = false; + + obj = object_new(TYPE_MEMORY_BACKEND_MEMFD); + if (!object_property_set_int(obj, "size", ms->ram_size, errp)) { + goto out; + } + object_property_add_child(object_get_objects_root(), mc->default_ram_id, + obj); + + if (!user_creatable_complete(USER_CREATABLE(obj), errp)) { + goto out; + } + r = object_property_set_link(OBJECT(ms), "memory-backend", obj, errp); + +out: + object_unref(obj); + return r; +} + +static char *nitro_enclave_get_vsock_chardev_id(Object *obj, Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + return g_strdup(nems->vsock); +} + +static void nitro_enclave_set_vsock_chardev_id(Object *obj, const char *value, + Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + g_free(nems->vsock); + nems->vsock = g_strdup(value); +} + +static char *nitro_enclave_get_id(Object *obj, Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + return g_strdup(nems->id); +} + +static void nitro_enclave_set_id(Object *obj, const char *value, + Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + g_free(nems->id); + nems->id = g_strdup(value); +} + +static char *nitro_enclave_get_parent_role(Object *obj, Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + return g_strdup(nems->parent_role); +} + +static void nitro_enclave_set_parent_role(Object *obj, const char *value, + Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + g_free(nems->parent_role); + nems->parent_role = g_strdup(value); +} + +static char *nitro_enclave_get_parent_id(Object *obj, Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + return g_strdup(nems->parent_id); +} + +static void nitro_enclave_set_parent_id(Object *obj, const char *value, + Error **errp) +{ + NitroEnclaveMachineState *nems = NITRO_ENCLAVE_MACHINE(obj); + + g_free(nems->parent_id); + nems->parent_id = g_strdup(value); +} + +static void nitro_enclave_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MicrovmMachineClass *mmc = MICROVM_MACHINE_CLASS(oc); + NitroEnclaveMachineClass *nemc = NITRO_ENCLAVE_MACHINE_CLASS(oc); + + mmc->x86_load_linux = x86_load_eif; + + mc->family = "nitro_enclave_i386"; + mc->desc = "AWS Nitro Enclave"; + + nemc->parent_init = mc->init; + mc->init = nitro_enclave_machine_state_init; + + nemc->parent_reset = mc->reset; + mc->reset = nitro_enclave_machine_reset; + + mc->create_default_memdev = create_memfd_backend; + + object_class_property_add_str(oc, NITRO_ENCLAVE_VSOCK_CHARDEV_ID, + nitro_enclave_get_vsock_chardev_id, + nitro_enclave_set_vsock_chardev_id); + object_class_property_set_description(oc, NITRO_ENCLAVE_VSOCK_CHARDEV_ID, + "Set chardev id for vhost-user-vsock " + "device"); + + object_class_property_add_str(oc, NITRO_ENCLAVE_ID, nitro_enclave_get_id, + nitro_enclave_set_id); + object_class_property_set_description(oc, NITRO_ENCLAVE_ID, + "Set enclave identifier"); + + object_class_property_add_str(oc, NITRO_ENCLAVE_PARENT_ROLE, + nitro_enclave_get_parent_role, + nitro_enclave_set_parent_role); + object_class_property_set_description(oc, NITRO_ENCLAVE_PARENT_ROLE, + "Set parent instance IAM role ARN"); + + object_class_property_add_str(oc, NITRO_ENCLAVE_PARENT_ID, + nitro_enclave_get_parent_id, + nitro_enclave_set_parent_id); + object_class_property_set_description(oc, NITRO_ENCLAVE_PARENT_ID, + "Set parent instance identifier"); +} + +static const TypeInfo nitro_enclave_machine_info = { + .name = TYPE_NITRO_ENCLAVE_MACHINE, + .parent = TYPE_MICROVM_MACHINE, + .instance_size = sizeof(NitroEnclaveMachineState), + .instance_init = nitro_enclave_machine_initfn, + .class_size = sizeof(NitroEnclaveMachineClass), + .class_init = nitro_enclave_class_init, +}; + +static void nitro_enclave_machine_init(void) +{ + type_register_static(&nitro_enclave_machine_info); +} +type_init(nitro_enclave_machine_init); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index c74931d577a..2f58e73d334 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -24,13 +24,14 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "exec/target_page.h" #include "hw/i386/pc.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "hw/char/parallel.h" #include "hw/hyperv/hv-balloon.h" #include "hw/i386/fw_cfg.h" #include "hw/i386/vmport.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "hw/ide/ide-bus.h" #include "hw/timer/hpet.h" #include "hw/loader.h" @@ -39,12 +40,13 @@ #include "hw/timer/i8254.h" #include "hw/input/i8042.h" #include "hw/audio/pcspk.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/xen.h" +#include "system/reset.h" #include "kvm/kvm_i386.h" +#include "kvm/tdx.h" #include "hw/xen/xen.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qemu/error-report.h" #include "hw/acpi/cpu_hotplug.h" #include "acpi-build.h" @@ -79,6 +81,23 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_10_0[] = { + { TYPE_X86_CPU, "x-consistent-cache", "false" }, + { TYPE_X86_CPU, "x-vendor-cpuid-only-v2", "false" }, +}; +const size_t pc_compat_10_0_len = G_N_ELEMENTS(pc_compat_10_0); + +GlobalProperty pc_compat_9_2[] = {}; +const size_t pc_compat_9_2_len = G_N_ELEMENTS(pc_compat_9_2); + +GlobalProperty pc_compat_9_1[] = { + { "ICH9-LPC", "x-smi-swsmi-timer", "off" }, + { "ICH9-LPC", "x-smi-periodic-timer", "off" }, + { TYPE_INTEL_IOMMU_DEVICE, "stale-tm", "on" }, + { TYPE_INTEL_IOMMU_DEVICE, "aw-bits", "39" }, +}; +const size_t pc_compat_9_1_len = G_N_ELEMENTS(pc_compat_9_1); + GlobalProperty pc_compat_9_0[] = { { TYPE_X86_CPU, "x-amd-topoext-features-only", "false" }, { TYPE_X86_CPU, "x-l1-cache-per-thread", "false" }, @@ -244,28 +263,6 @@ GlobalProperty pc_compat_2_6[] = { }; const size_t pc_compat_2_6_len = G_N_ELEMENTS(pc_compat_2_6); -GlobalProperty pc_compat_2_5[] = {}; -const size_t pc_compat_2_5_len = G_N_ELEMENTS(pc_compat_2_5); - -GlobalProperty pc_compat_2_4[] = { - PC_CPU_MODEL_IDS("2.4.0") - { "Haswell-" TYPE_X86_CPU, "abm", "off" }, - { "Haswell-noTSX-" TYPE_X86_CPU, "abm", "off" }, - { "Broadwell-" TYPE_X86_CPU, "abm", "off" }, - { "Broadwell-noTSX-" TYPE_X86_CPU, "abm", "off" }, - { "host" "-" TYPE_X86_CPU, "host-cache-info", "on" }, - { TYPE_X86_CPU, "check", "off" }, - { "qemu64" "-" TYPE_X86_CPU, "sse4a", "on" }, - { "qemu64" "-" TYPE_X86_CPU, "abm", "on" }, - { "qemu64" "-" TYPE_X86_CPU, "popcnt", "on" }, - { "qemu32" "-" TYPE_X86_CPU, "popcnt", "on" }, - { "Opteron_G2" "-" TYPE_X86_CPU, "rdtscp", "on" }, - { "Opteron_G3" "-" TYPE_X86_CPU, "rdtscp", "on" }, - { "Opteron_G4" "-" TYPE_X86_CPU, "rdtscp", "on" }, - { "Opteron_G5" "-" TYPE_X86_CPU, "rdtscp", "on", } -}; -const size_t pc_compat_2_4_len = G_N_ELEMENTS(pc_compat_2_4); - /* * @PC_FW_DATA: * Size of the chunk of memory at the top of RAM for the BIOS ACPI tables @@ -453,7 +450,7 @@ static int check_fdc(Object *obj, void *opaque) } static const char * const fdc_container_path[] = { - "/unattached", "/peripheral", "/peripheral-anon" + "unattached", "peripheral", "peripheral-anon" }; /* @@ -467,7 +464,7 @@ static ISADevice *pc_find_fdc0(void) CheckFdcState state = { 0 }; for (i = 0; i < ARRAY_SIZE(fdc_container_path); i++) { - container = container_get(qdev_get_machine(), fdc_container_path[i]); + container = machine_get_container(fdc_container_path[i]); object_child_foreach(container, check_fdc, &state); } @@ -615,13 +612,14 @@ void pc_machine_done(Notifier *notifier, void *data) &error_fatal); if (pcms->cxl_devices_state.is_enabled) { - cxl_fmws_link_targets(&pcms->cxl_devices_state, &error_fatal); + cxl_fmws_link_targets(&error_fatal); } /* set the number of CPUs */ x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); - fw_cfg_add_extra_pci_roots(pcms->pcibus, x86ms->fw_cfg); + pci_bus_add_fw_cfg_extra_pci_roots(x86ms->fw_cfg, pcms->pcibus, + &error_abort); acpi_setup(); if (x86ms->fw_cfg) { @@ -723,20 +721,28 @@ static uint64_t pc_get_cxl_range_start(PCMachineState *pcms) return cxl_base; } -static uint64_t pc_get_cxl_range_end(PCMachineState *pcms) +static int cxl_get_fmw_end(Object *obj, void *opaque) { - uint64_t start = pc_get_cxl_range_start(pcms) + MiB; + struct CXLFixedWindow *fw; + uint64_t *start = opaque; - if (pcms->cxl_devices_state.fixed_windows) { - GList *it; - - start = ROUND_UP(start, 256 * MiB); - for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) { - CXLFixedWindow *fw = it->data; - start += fw->size; - } + if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) { + return 0; } + fw = CXL_FMW(obj); + *start += fw->size; + + return 0; +} + +static uint64_t pc_get_cxl_range_end(PCMachineState *pcms) +{ + uint64_t start = pc_get_cxl_range_start(pcms) + MiB; + + /* Ordering doesn't matter so no need to build a sorted list */ + object_child_foreach_recursive(object_get_root(), cxl_get_fmw_end, + &start); return start; } @@ -938,43 +944,31 @@ void pc_memory_init(PCMachineState *pcms, cxl_base = pc_get_cxl_range_start(pcms); memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size); memory_region_add_subregion(system_memory, cxl_base, mr); - cxl_resv_end = cxl_base + cxl_size; - if (pcms->cxl_devices_state.fixed_windows) { - hwaddr cxl_fmw_base; - GList *it; - - cxl_fmw_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB); - for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) { - CXLFixedWindow *fw = it->data; - - fw->base = cxl_fmw_base; - memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw, - "cxl-fixed-memory-region", fw->size); - memory_region_add_subregion(system_memory, fw->base, &fw->mr); - cxl_fmw_base += fw->size; - cxl_resv_end = cxl_fmw_base; - } - } + cxl_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB); + cxl_resv_end = cxl_fmws_set_memmap(cxl_base, maxphysaddr); + cxl_fmws_update_mmio(); } /* Initialize PC system firmware */ pc_system_firmware_init(pcms, rom_memory); - option_rom_mr = g_malloc(sizeof(*option_rom_mr)); - if (machine_require_guest_memfd(machine)) { - memory_region_init_ram_guest_memfd(option_rom_mr, NULL, "pc.rom", - PC_ROM_SIZE, &error_fatal); - } else { - memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE, - &error_fatal); - if (pcmc->pci_enabled) { - memory_region_set_readonly(option_rom_mr, true); + if (!is_tdx_vm()) { + option_rom_mr = g_malloc(sizeof(*option_rom_mr)); + if (machine_require_guest_memfd(machine)) { + memory_region_init_ram_guest_memfd(option_rom_mr, NULL, "pc.rom", + PC_ROM_SIZE, &error_fatal); + } else { + memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE, + &error_fatal); + if (pcmc->pci_enabled) { + memory_region_set_readonly(option_rom_mr, true); + } } + memory_region_add_subregion_overlap(rom_memory, + PC_ROM_MIN_VGA, + option_rom_mr, + 1); } - memory_region_add_subregion_overlap(rom_memory, - PC_ROM_MIN_VGA, - option_rom_mr, - 1); fw_cfg = fw_cfg_arch_create(machine, x86ms->boot_cpus, x86ms->apic_id_limit); @@ -983,14 +977,13 @@ void pc_memory_init(PCMachineState *pcms, if (machine->device_memory) { uint64_t *val = g_malloc(sizeof(*val)); - uint64_t res_mem_end = machine->device_memory->base; - - if (!pcmc->broken_reserved_end) { - res_mem_end += memory_region_size(&machine->device_memory->mr); - } + uint64_t res_mem_end; if (pcms->cxl_devices_state.is_enabled) { res_mem_end = cxl_resv_end; + } else { + res_mem_end = machine->device_memory->base + + memory_region_size(&machine->device_memory->mr); } *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val)); @@ -1028,9 +1021,7 @@ uint64_t pc_pci_hole64_start(void) hole64_start = pc_get_cxl_range_end(pcms); } else if (pcmc->has_reserved_memory && (ms->ram_size < ms->maxram_size)) { pc_get_device_memory_range(pcms, &hole64_start, &size); - if (!pcmc->broken_reserved_end) { - hole64_start += size; - } + hole64_start += size; } else { hole64_start = pc_above_4g_end(pcms); } @@ -1042,7 +1033,6 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus) { DeviceState *dev = NULL; - rom_set_order_override(FW_CFG_ORDER_OVERRIDE_VGA); if (pci_bus) { PCIDevice *pcidev = pci_vga_init(pci_bus); dev = pcidev ? &pcidev->qdev : NULL; @@ -1050,14 +1040,14 @@ DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus) ISADevice *isadev = isa_vga_init(isa_bus); dev = isadev ? DEVICE(isadev) : NULL; } - rom_reset_order_override(); + return dev; } static const MemoryRegionOps ioport80_io_ops = { .write = ioport80_write, .read = ioport80_read, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = 1, .max_access_size = 1, @@ -1067,7 +1057,7 @@ static const MemoryRegionOps ioport80_io_ops = { static const MemoryRegionOps ioportF0_io_ops = { .write = ioportF0_write, .read = ioportF0_read, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = 1, .max_access_size = 1, @@ -1075,7 +1065,7 @@ static const MemoryRegionOps ioportF0_io_ops = { }; static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, - bool create_i8042, bool no_vmport) + bool create_i8042, bool no_vmport, Error **errp) { int i; DriveInfo *fd[MAX_FD]; @@ -1100,6 +1090,10 @@ static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, } if (!create_i8042) { + if (!no_vmport) { + error_setg(errp, + "vmport requires the i8042 controller to be enabled"); + } return; } @@ -1217,9 +1211,17 @@ void pc_basic_device_init(struct PCMachineState *pcms, isa_realize_and_unref(pcms->pcspk, isa_bus, &error_fatal); } + if (pcms->vmport == ON_OFF_AUTO_AUTO) { + pcms->vmport = (xen_enabled() || !pcms->i8042_enabled) + ? ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON; + } + /* Super I/O */ pc_superio_init(isa_bus, create_fdctrl, pcms->i8042_enabled, - pcms->vmport != ON_OFF_AUTO_ON); + pcms->vmport != ON_OFF_AUTO_ON, &error_fatal); + + pcms->machine_done.notify = pc_machine_done; + qemu_add_machine_init_done_notifier(&pcms->machine_done); } void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) @@ -1228,16 +1230,14 @@ void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) bool default_is_ne2k = g_str_equal(mc->default_nic, TYPE_ISA_NE2000); NICInfo *nd; - rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC); - while ((nd = qemu_find_nic_info(TYPE_ISA_NE2000, default_is_ne2k, NULL))) { pc_init_ne2k_isa(isa_bus, nd, &error_fatal); } /* Anything remaining should be a PCI NIC */ - pci_init_nic_devices(pci_bus, mc->default_nic); - - rom_reset_order_override(); + if (pci_bus) { + pci_init_nic_devices(pci_bus, mc->default_nic); + } } void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs) @@ -1678,7 +1678,7 @@ static void pc_machine_initfn(Object *obj) pcms->sata_enabled = true; pcms->i8042_enabled = true; pcms->max_fw_size = 8 * MiB; -#ifdef CONFIG_HPET +#if defined(CONFIG_HPET) pcms->hpet_enabled = true; #endif pcms->fd_bootchk = true; @@ -1691,17 +1691,14 @@ static void pc_machine_initfn(Object *obj) if (pcmc->pci_enabled) { cxl_machine_init(obj, &pcms->cxl_devices_state); } - - pcms->machine_done.notify = pc_machine_done; - qemu_add_machine_init_done_notifier(&pcms->machine_done); } -static void pc_machine_reset(MachineState *machine, ShutdownCause reason) +static void pc_machine_reset(MachineState *machine, ResetType type) { CPUState *cs; X86CPU *cpu; - qemu_devices_reset(reason); + qemu_devices_reset(type); /* Reset APIC after devices have been reset to cancel * any changes that qemu_devices_reset() might have done. @@ -1716,7 +1713,7 @@ static void pc_machine_reset(MachineState *machine, ShutdownCause reason) static void pc_machine_wakeup(MachineState *machine) { cpu_synchronize_all_states(); - pc_machine_reset(machine, SHUTDOWN_CAUSE_NONE); + pc_machine_reset(machine, RESET_TYPE_WAKEUP); cpu_synchronize_all_post_reset(); } @@ -1739,7 +1736,7 @@ static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) return true; } -static void pc_machine_class_init(ObjectClass *oc, void *data) +static void pc_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); @@ -1775,6 +1772,10 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) mc->nvdimm_supported = true; mc->smp_props.dies_supported = true; mc->smp_props.modules_supported = true; + mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1D] = true; + mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1I] = true; + mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L2] = true; + mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L3] = true; mc->default_ram_id = "pc.ram"; pcmc->default_smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_AUTO; @@ -1807,6 +1808,8 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, PC_MACHINE_I8042, pc_machine_get_i8042, pc_machine_set_i8042); + object_class_property_set_description(oc, PC_MACHINE_I8042, + "Enable/disable Intel 8042 PS/2 controller emulation"); object_class_property_add_bool(oc, "default-bus-bypass-iommu", pc_machine_get_default_bus_bypass_iommu, @@ -1827,6 +1830,18 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "fd-bootchk", pc_machine_get_fd_bootchk, pc_machine_set_fd_bootchk); + +#if defined(CONFIG_IGVM) + object_class_property_add_link(oc, "igvm-cfg", + TYPE_IGVM_CFG, + offsetof(X86MachineState, igvm), + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_class_property_set_description(oc, "igvm-cfg", + "Set IGVM configuration"); +#endif + + } static const TypeInfo pc_machine_info = { @@ -1837,7 +1852,7 @@ static const TypeInfo pc_machine_info = { .instance_init = pc_machine_initfn, .class_size = sizeof(PCMachineClass), .class_init = pc_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index d9e69243b4a..c03324281bd 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -43,15 +43,16 @@ #include "hw/ide/isa.h" #include "hw/ide/pci.h" #include "hw/irq.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/i386/kvm/clock.h" #include "hw/sysbus.h" #include "hw/i2c/smbus_eeprom.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/acpi/acpi.h" +#include "hw/vfio/types.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/xen.h" +#include "system/xen.h" #ifdef CONFIG_XEN #include #include "hw/xen/xen_pt.h" @@ -61,10 +62,11 @@ #include "hw/xen/xen.h" #include "migration/global_state.h" #include "migration/misc.h" -#include "sysemu/runstate.h" -#include "sysemu/numa.h" +#include "system/runstate.h" +#include "system/numa.h" #include "hw/hyperv/vmbus-bridge.h" #include "hw/mem/nvdimm.h" +#include "hw/uefi/var-service-api.h" #include "hw/i386/acpi-build.h" #include "target/i386/cpu.h" @@ -76,6 +78,13 @@ static const int ide_iobase2[MAX_IDE_BUS] = { 0x3f6, 0x376 }; static const int ide_irq[MAX_IDE_BUS] = { 14, 15 }; #endif +static GlobalProperty pc_piix_compat_defaults[] = { + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, +}; +static const size_t pc_piix_compat_defaults_len = + G_N_ELEMENTS(pc_piix_compat_defaults); + /* * Return the global irq number corresponding to a given device irq * pin. We could also use the bus number to have a more precise mapping. @@ -284,6 +293,8 @@ static void pc_init1(MachineState *machine, const char *pci_type) pcms->idebus[0] = qdev_get_child_bus(dev, "ide.0"); pcms->idebus[1] = qdev_get_child_bus(dev, "ide.1"); } else { + uint32_t irq; + isa_bus = isa_bus_new(NULL, system_memory, system_io, &error_abort); isa_bus_register_input_irqs(isa_bus, x86ms->gsi); @@ -291,6 +302,9 @@ static void pc_init1(MachineState *machine, const char *pci_type) x86ms->rtc = isa_new(TYPE_MC146818_RTC); qdev_prop_set_int32(DEVICE(x86ms->rtc), "base_year", 2000); isa_realize_and_unref(x86ms->rtc, isa_bus, &error_fatal); + irq = object_property_get_uint(OBJECT(x86ms->rtc), "irq", + &error_fatal); + isa_connect_gpio_out(ISA_DEVICE(x86ms->rtc), 0, irq); i8257_dma_init(OBJECT(machine), isa_bus, 0); pcms->hpet_enabled = false; @@ -310,11 +324,6 @@ static void pc_init1(MachineState *machine, const char *pci_type) pc_vga_init(isa_bus, pcmc->pci_enabled ? pcms->pcibus : NULL); - assert(pcms->vmport != ON_OFF_AUTO__MAX); - if (pcms->vmport == ON_OFF_AUTO_AUTO) { - pcms->vmport = xen_enabled() ? ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON; - } - /* init basic PC hardware */ pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, !MACHINE_CLASS(pcmc)->no_floppy, 0x4); @@ -365,6 +374,16 @@ static void pc_init1(MachineState *machine, const char *pci_type) x86_nvdimm_acpi_dsmio, x86ms->fw_cfg, OBJECT(pcms)); } + +#if defined(CONFIG_IGVM) + /* Apply guest state from IGVM if supplied */ + if (x86ms->igvm) { + if (IGVM_CFG_GET_CLASS(x86ms->igvm) + ->process(x86ms->igvm, machine->cgs, false, &error_fatal) < 0) { + g_assert_not_reached(); + } + } +#endif } typedef enum PCSouthBridgeOption { @@ -451,7 +470,10 @@ static void pc_i440fx_init(MachineState *machine) } #define DEFINE_I440FX_MACHINE(major, minor) \ - DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, major, minor); + DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, false, NULL, major, minor); + +#define DEFINE_I440FX_MACHINE_AS_LATEST(major, minor) \ + DEFINE_PC_VER_MACHINE(pc_i440fx, "pc-i440fx", pc_i440fx_init, true, "pc", major, minor); static void pc_i440fx_machine_options(MachineClass *m) { @@ -470,6 +492,7 @@ static void pc_i440fx_machine_options(MachineClass *m) m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_UEFI_VARS_X64); object_class_property_add_enum(oc, "x-south-bridge", "PCSouthBridgeOption", &PCSouthBridgeOption_lookup, @@ -477,13 +500,40 @@ static void pc_i440fx_machine_options(MachineClass *m) pc_set_south_bridge); object_class_property_set_description(oc, "x-south-bridge", "Use a different south bridge than PIIX3"); + compat_props_add(m->compat_props, + pc_piix_compat_defaults, pc_piix_compat_defaults_len); } -static void pc_i440fx_machine_9_1_options(MachineClass *m) +static void pc_i440fx_machine_10_1_options(MachineClass *m) { pc_i440fx_machine_options(m); - m->alias = "pc"; - m->is_default = true; +} + +DEFINE_I440FX_MACHINE_AS_LATEST(10, 1); + +static void pc_i440fx_machine_10_0_options(MachineClass *m) +{ + pc_i440fx_machine_10_1_options(m); + compat_props_add(m->compat_props, hw_compat_10_0, hw_compat_10_0_len); + compat_props_add(m->compat_props, pc_compat_10_0, pc_compat_10_0_len); +} + +DEFINE_I440FX_MACHINE(10, 0); + +static void pc_i440fx_machine_9_2_options(MachineClass *m) +{ + pc_i440fx_machine_10_0_options(m); + compat_props_add(m->compat_props, hw_compat_9_2, hw_compat_9_2_len); + compat_props_add(m->compat_props, pc_compat_9_2, pc_compat_9_2_len); +} + +DEFINE_I440FX_MACHINE(9, 2); + +static void pc_i440fx_machine_9_1_options(MachineClass *m) +{ + pc_i440fx_machine_9_2_options(m); + compat_props_add(m->compat_props, hw_compat_9_1, hw_compat_9_1_len); + compat_props_add(m->compat_props, pc_compat_9_1, pc_compat_9_1_len); } DEFINE_I440FX_MACHINE(9, 1); @@ -493,8 +543,6 @@ static void pc_i440fx_machine_9_0_options(MachineClass *m) PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_machine_9_1_options(m); - m->alias = NULL; - m->is_default = false; m->smbios_memory_device_size = 16 * GiB; compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len); @@ -755,32 +803,6 @@ static void pc_i440fx_machine_2_6_options(MachineClass *m) DEFINE_I440FX_MACHINE(2, 6); -static void pc_i440fx_machine_2_5_options(MachineClass *m) -{ - X86MachineClass *x86mc = X86_MACHINE_CLASS(m); - - pc_i440fx_machine_2_6_options(m); - x86mc->save_tsc_khz = false; - m->legacy_fw_cfg_order = 1; - compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len); - compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len); -} - -DEFINE_I440FX_MACHINE(2, 5); - -static void pc_i440fx_machine_2_4_options(MachineClass *m) -{ - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - - pc_i440fx_machine_2_5_options(m); - m->hw_version = "2.4.0"; - pcmc->broken_reserved_end = true; - compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len); - compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len); -} - -DEFINE_I440FX_MACHINE(2, 4); - #ifdef CONFIG_ISAPC static void isapc_machine_options(MachineClass *m) { diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 9d108b194e4..b309b2b378d 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -35,8 +35,8 @@ #include "hw/loader.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/rtc/mc146818rtc.h" -#include "sysemu/tcg.h" -#include "sysemu/kvm.h" +#include "system/tcg.h" +#include "system/kvm.h" #include "hw/i386/kvm/clock.h" #include "hw/pci-host/q35.h" #include "hw/pci/pcie_port.h" @@ -45,6 +45,7 @@ #include "hw/i386/pc.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/vfio/types.h" #include "hw/virtio/virtio-iommu.h" #include "hw/display/ramfb.h" #include "hw/ide/pci.h" @@ -55,9 +56,10 @@ #include "hw/usb/hcd-uhci.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/hyperv/vmbus-bridge.h" #include "hw/mem/nvdimm.h" +#include "hw/uefi/var-service-api.h" #include "hw/i386/acpi-build.h" #include "target/i386/cpu.h" @@ -66,6 +68,8 @@ static GlobalProperty pc_q35_compat_defaults[] = { { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "39" }, + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, }; static const size_t pc_q35_compat_defaults_len = G_N_ELEMENTS(pc_q35_compat_defaults); @@ -276,11 +280,6 @@ static void pc_q35_init(MachineState *machine) x86_register_ferr_irq(x86ms->gsi[13]); } - assert(pcms->vmport != ON_OFF_AUTO__MAX); - if (pcms->vmport == ON_OFF_AUTO_AUTO) { - pcms->vmport = ON_OFF_AUTO_ON; - } - /* init basic PC hardware */ pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, !mc->no_floppy, 0xff0104); @@ -329,13 +328,26 @@ static void pc_q35_init(MachineState *machine) x86_nvdimm_acpi_dsmio, x86ms->fw_cfg, OBJECT(pcms)); } + +#if defined(CONFIG_IGVM) + /* Apply guest state from IGVM if supplied */ + if (x86ms->igvm) { + if (IGVM_CFG_GET_CLASS(x86ms->igvm) + ->process(x86ms->igvm, machine->cgs, false, &error_fatal) < 0) { + g_assert_not_reached(); + } + } +#endif } #define DEFINE_Q35_MACHINE(major, minor) \ - DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, major, minor); + DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, NULL, major, minor); + +#define DEFINE_Q35_MACHINE_AS_LATEST(major, minor) \ + DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, "q35", major, minor); #define DEFINE_Q35_MACHINE_BUGFIX(major, minor, micro) \ - DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, major, minor, micro); + DEFINE_PC_VER_MACHINE(pc_q35, "pc-q35", pc_q35_init, false, NULL, major, minor, micro); static void pc_q35_machine_options(MachineClass *m) { @@ -357,14 +369,41 @@ static void pc_q35_machine_options(MachineClass *m) machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_UEFI_VARS_X64); compat_props_add(m->compat_props, pc_q35_compat_defaults, pc_q35_compat_defaults_len); } -static void pc_q35_machine_9_1_options(MachineClass *m) +static void pc_q35_machine_10_1_options(MachineClass *m) { pc_q35_machine_options(m); - m->alias = "q35"; +} + +DEFINE_Q35_MACHINE_AS_LATEST(10, 1); + +static void pc_q35_machine_10_0_options(MachineClass *m) +{ + pc_q35_machine_10_1_options(m); + compat_props_add(m->compat_props, hw_compat_10_0, hw_compat_10_0_len); + compat_props_add(m->compat_props, pc_compat_10_0, pc_compat_10_0_len); +} + +DEFINE_Q35_MACHINE(10, 0); + +static void pc_q35_machine_9_2_options(MachineClass *m) +{ + pc_q35_machine_10_0_options(m); + compat_props_add(m->compat_props, hw_compat_9_2, hw_compat_9_2_len); + compat_props_add(m->compat_props, pc_compat_9_2, pc_compat_9_2_len); +} + +DEFINE_Q35_MACHINE(9, 2); + +static void pc_q35_machine_9_1_options(MachineClass *m) +{ + pc_q35_machine_9_2_options(m); + compat_props_add(m->compat_props, hw_compat_9_1, hw_compat_9_1_len); + compat_props_add(m->compat_props, pc_compat_9_1, pc_compat_9_1_len); } DEFINE_Q35_MACHINE(9, 1); @@ -373,7 +412,6 @@ static void pc_q35_machine_9_0_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_machine_9_1_options(m); - m->alias = NULL; m->smbios_memory_device_size = 16 * GiB; compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len); compat_props_add(m->compat_props, pc_compat_9_0, pc_compat_9_0_len); @@ -647,29 +685,3 @@ static void pc_q35_machine_2_6_options(MachineClass *m) } DEFINE_Q35_MACHINE(2, 6); - -static void pc_q35_machine_2_5_options(MachineClass *m) -{ - X86MachineClass *x86mc = X86_MACHINE_CLASS(m); - - pc_q35_machine_2_6_options(m); - x86mc->save_tsc_khz = false; - m->legacy_fw_cfg_order = 1; - compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len); - compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len); -} - -DEFINE_Q35_MACHINE(2, 5); - -static void pc_q35_machine_2_4_options(MachineClass *m) -{ - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - - pc_q35_machine_2_5_options(m); - m->hw_version = "2.4.0"; - pcmc->broken_reserved_end = true; - compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len); - compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len); -} - -DEFINE_Q35_MACHINE(2, 4); diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index ef80281d28b..1a12b635ad9 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/units.h" @@ -35,8 +35,9 @@ #include "hw/loader.h" #include "hw/qdev-properties.h" #include "hw/block/flash.h" -#include "sysemu/kvm.h" -#include "sev.h" +#include "system/kvm.h" +#include "target/i386/sev.h" +#include "kvm/tdx.h" #define FLASH_SECTOR_SIZE 4096 @@ -219,7 +220,13 @@ void pc_system_firmware_init(PCMachineState *pcms, BlockBackend *pflash_blk[ARRAY_SIZE(pcms->flash)]; if (!pcmc->pci_enabled) { - x86_bios_rom_init(X86_MACHINE(pcms), "bios.bin", rom_memory, true); + /* + * If an IGVM file is specified then the firmware must be provided + * in the IGVM file. + */ + if (!X86_MACHINE(pcms)->igvm) { + x86_bios_rom_init(X86_MACHINE(pcms), "bios.bin", rom_memory, true); + } return; } @@ -239,8 +246,13 @@ void pc_system_firmware_init(PCMachineState *pcms, } if (!pflash_blk[0]) { - /* Machine property pflash0 not set, use ROM mode */ - x86_bios_rom_init(X86_MACHINE(pcms), "bios.bin", rom_memory, false); + /* + * Machine property pflash0 not set, use ROM mode unless using IGVM, + * in which case the firmware must be provided by the IGVM file. + */ + if (!X86_MACHINE(pcms)->igvm) { + x86_bios_rom_init(X86_MACHINE(pcms), "bios.bin", rom_memory, false); + } } else { if (kvm_enabled() && !kvm_readonly_mem_enabled()) { /* @@ -256,6 +268,20 @@ void pc_system_firmware_init(PCMachineState *pcms, } pc_system_flash_cleanup_unused(pcms); + + /* + * The user should not have specified any pflash devices when using IGVM + * to configure the guest. + */ + if (X86_MACHINE(pcms)->igvm) { + for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { + if (pcms->flash[i]) { + error_report("pflash devices cannot be configured when " + "using IGVM"); + exit(1); + } + } + } } void x86_firmware_configure(hwaddr gpa, void *ptr, int size) @@ -280,5 +306,11 @@ void x86_firmware_configure(hwaddr gpa, void *ptr, int size) } sev_encrypt_flash(gpa, ptr, size, &error_fatal); + } else if (is_tdx_vm()) { + ret = tdx_parse_tdvf(ptr, size); + if (ret) { + error_report("failed to parse TDVF for TDX VM"); + exit(1); + } } } diff --git a/hw/i386/pc_sysfw_ovmf.c b/hw/i386/pc_sysfw_ovmf.c index 07a4c267faa..da947c3ca41 100644 --- a/hw/i386/pc_sysfw_ovmf.c +++ b/hw/i386/pc_sysfw_ovmf.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "hw/i386/pc.h" +#include "exec/target_page.h" #include "cpu.h" #define OVMF_TABLE_FOOTER_GUID "96b582de-1fb2-45f7-baea-a366c55a082d" diff --git a/hw/i386/port92.c b/hw/i386/port92.c index b25157f6e4b..39b6f3178fe 100644 --- a/hw/i386/port92.c +++ b/hw/i386/port92.c @@ -7,7 +7,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "migration/vmstate.h" #include "hw/irq.h" #include "hw/isa/isa.h" @@ -97,12 +97,12 @@ static void port92_realizefn(DeviceState *dev, Error **errp) isa_register_ioport(isadev, &s->io, 0x92); } -static void port92_class_initfn(ObjectClass *klass, void *data) +static void port92_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = port92_realizefn; - dc->reset = port92_reset; + device_class_set_legacy_reset(dc, port92_reset); dc->vmsd = &vmstate_port92_isa; /* * Reason: unlike ordinary ISA devices, this one needs additional diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c index d664829d354..2b3b2823b5f 100644 --- a/hw/i386/sgx-epc.c +++ b/hw/i386/sgx-epc.c @@ -17,14 +17,13 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "target/i386/cpu.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" -static Property sgx_epc_properties[] = { +static const Property sgx_epc_properties[] = { DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), DEFINE_PROP_UINT32(SGX_EPC_NUMA_NODE_PROP, SGXEPCDevice, node, 0), DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), - DEFINE_PROP_END_OF_LIST(), }; static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name, @@ -148,7 +147,7 @@ static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC; } -static void sgx_epc_class_init(ObjectClass *oc, void *data) +static void sgx_epc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); @@ -174,7 +173,7 @@ static const TypeInfo sgx_epc_info = { .instance_init = sgx_epc_init, .class_init = sgx_epc_class_init, .class_size = sizeof(DeviceClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_MEMORY_DEVICE }, { } }, diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c index 38ff75e9f37..d295e54d239 100644 --- a/hw/i386/sgx-stub.c +++ b/hw/i386/sgx-stub.c @@ -3,20 +3,20 @@ #include "monitor/hmp-target.h" #include "hw/i386/pc.h" #include "hw/i386/sgx-epc.h" +#include "qapi/qapi-commands-misc-i386.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" void sgx_epc_build_srat(GArray *table_data) { } -SGXInfo *qmp_query_sgx(Error **errp) +SgxInfo *qmp_query_sgx(Error **errp) { error_setg(errp, "SGX support is not compiled in"); return NULL; } -SGXInfo *qmp_query_sgx_capabilities(Error **errp) +SgxInfo *qmp_query_sgx_capabilities(Error **errp) { error_setg(errp, "SGX support is not compiled in"); return NULL; diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c index 4900dd414a1..e2801546ad6 100644 --- a/hw/i386/sgx.c +++ b/hw/i386/sgx.c @@ -19,10 +19,10 @@ #include "monitor/hmp-target.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "qapi/qapi-commands-misc-target.h" -#include "exec/address-spaces.h" -#include "sysemu/hw_accel.h" -#include "sysemu/reset.h" +#include "qapi/qapi-commands-misc-i386.h" +#include "system/address-spaces.h" +#include "system/hw_accel.h" +#include "system/reset.h" #include #include "hw/acpi/aml-build.h" @@ -84,10 +84,10 @@ static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) ((high & MAKE_64BIT_MASK(0, 20)) << 32); } -static SGXEPCSectionList *sgx_calc_host_epc_sections(void) +static SgxEpcSectionList *sgx_calc_host_epc_sections(void) { - SGXEPCSectionList *head = NULL, **tail = &head; - SGXEPCSection *section; + SgxEpcSectionList *head = NULL, **tail = &head; + SgxEpcSection *section; uint32_t i, type; uint32_t eax, ebx, ecx, edx; uint32_t j = 0; @@ -104,7 +104,7 @@ static SGXEPCSectionList *sgx_calc_host_epc_sections(void) break; } - section = g_new0(SGXEPCSection, 1); + section = g_new0(SgxEpcSection, 1); section->node = j++; section->size = sgx_calc_section_metric(ecx, edx); QAPI_LIST_APPEND(tail, section); @@ -153,9 +153,9 @@ static void sgx_epc_reset(void *opaque) } } -SGXInfo *qmp_query_sgx_capabilities(Error **errp) +SgxInfo *qmp_query_sgx_capabilities(Error **errp) { - SGXInfo *info = NULL; + SgxInfo *info = NULL; uint32_t eax, ebx, ecx, edx; Error *local_err = NULL; @@ -166,7 +166,7 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp) return NULL; } - info = g_new0(SGXInfo, 1); + info = g_new0(SgxInfo, 1); host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx); info->sgx = ebx & (1U << 2) ? true : false; @@ -183,17 +183,17 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp) return info; } -static SGXEPCSectionList *sgx_get_epc_sections_list(void) +static SgxEpcSectionList *sgx_get_epc_sections_list(void) { GSList *device_list = sgx_epc_get_device_list(); - SGXEPCSectionList *head = NULL, **tail = &head; - SGXEPCSection *section; + SgxEpcSectionList *head = NULL, **tail = &head; + SgxEpcSection *section; for (; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; Object *obj = OBJECT(dev); - section = g_new0(SGXEPCSection, 1); + section = g_new0(SgxEpcSection, 1); section->node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP, &error_abort); section->size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP, @@ -205,9 +205,9 @@ static SGXEPCSectionList *sgx_get_epc_sections_list(void) return head; } -SGXInfo *qmp_query_sgx(Error **errp) +SgxInfo *qmp_query_sgx(Error **errp) { - SGXInfo *info = NULL; + SgxInfo *info = NULL; X86MachineState *x86ms; PCMachineState *pcms = (PCMachineState *)object_dynamic_cast(qdev_get_machine(), @@ -223,7 +223,7 @@ SGXInfo *qmp_query_sgx(Error **errp) return NULL; } - info = g_new0(SGXInfo, 1); + info = g_new0(SgxInfo, 1); info->sgx = true; info->sgx1 = true; @@ -237,8 +237,8 @@ SGXInfo *qmp_query_sgx(Error **errp) void hmp_info_sgx(Monitor *mon, const QDict *qdict) { Error *err = NULL; - SGXEPCSectionList *section_list, *section; - g_autoptr(SGXInfo) info = qmp_query_sgx(&err); + SgxEpcSectionList *section_list, *section; + g_autoptr(SgxInfo) info = qmp_query_sgx(&err); uint64_t size = 0; if (err) { diff --git a/hw/i386/tdvf-hob.c b/hw/i386/tdvf-hob.c new file mode 100644 index 00000000000..782b3d15787 --- /dev/null +++ b/hw/i386/tdvf-hob.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2025 Intel Corporation + * Author: Isaku Yamahata + * + * Xiaoyao Li + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "standard-headers/uefi/uefi.h" +#include "hw/pci/pcie_host.h" +#include "tdvf-hob.h" + +typedef struct TdvfHob { + hwaddr hob_addr; + void *ptr; + int size; + + /* working area */ + void *current; + void *end; +} TdvfHob; + +static uint64_t tdvf_current_guest_addr(const TdvfHob *hob) +{ + return hob->hob_addr + (hob->current - hob->ptr); +} + +static void tdvf_align(TdvfHob *hob, size_t align) +{ + hob->current = QEMU_ALIGN_PTR_UP(hob->current, align); +} + +static void *tdvf_get_area(TdvfHob *hob, uint64_t size) +{ + void *ret; + + if (hob->current + size > hob->end) { + error_report("TD_HOB overrun, size = 0x%" PRIx64, size); + exit(1); + } + + ret = hob->current; + hob->current += size; + tdvf_align(hob, 8); + return ret; +} + +static void tdvf_hob_add_memory_resources(TdxGuest *tdx, TdvfHob *hob) +{ + EFI_HOB_RESOURCE_DESCRIPTOR *region; + EFI_RESOURCE_ATTRIBUTE_TYPE attr; + EFI_RESOURCE_TYPE resource_type; + + TdxRamEntry *e; + int i; + + for (i = 0; i < tdx->nr_ram_entries; i++) { + e = &tdx->ram_entries[i]; + + if (e->type == TDX_RAM_UNACCEPTED) { + resource_type = EFI_RESOURCE_MEMORY_UNACCEPTED; + attr = EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED; + } else if (e->type == TDX_RAM_ADDED) { + resource_type = EFI_RESOURCE_SYSTEM_MEMORY; + attr = EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE; + } else { + error_report("unknown TDX_RAM_ENTRY type %d", e->type); + exit(1); + } + + region = tdvf_get_area(hob, sizeof(*region)); + *region = (EFI_HOB_RESOURCE_DESCRIPTOR) { + .Header = { + .HobType = EFI_HOB_TYPE_RESOURCE_DESCRIPTOR, + .HobLength = cpu_to_le16(sizeof(*region)), + .Reserved = cpu_to_le32(0), + }, + .Owner = EFI_HOB_OWNER_ZERO, + .ResourceType = cpu_to_le32(resource_type), + .ResourceAttribute = cpu_to_le32(attr), + .PhysicalStart = cpu_to_le64(e->address), + .ResourceLength = cpu_to_le64(e->length), + }; + } +} + +void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob) +{ + TdvfHob hob = { + .hob_addr = td_hob->address, + .size = td_hob->size, + .ptr = td_hob->mem_ptr, + + .current = td_hob->mem_ptr, + .end = td_hob->mem_ptr + td_hob->size, + }; + + EFI_HOB_GENERIC_HEADER *last_hob; + EFI_HOB_HANDOFF_INFO_TABLE *hit; + + /* Note, Efi{Free}Memory{Bottom,Top} are ignored, leave 'em zeroed. */ + hit = tdvf_get_area(&hob, sizeof(*hit)); + *hit = (EFI_HOB_HANDOFF_INFO_TABLE) { + .Header = { + .HobType = EFI_HOB_TYPE_HANDOFF, + .HobLength = cpu_to_le16(sizeof(*hit)), + .Reserved = cpu_to_le32(0), + }, + .Version = cpu_to_le32(EFI_HOB_HANDOFF_TABLE_VERSION), + .BootMode = cpu_to_le32(0), + .EfiMemoryTop = cpu_to_le64(0), + .EfiMemoryBottom = cpu_to_le64(0), + .EfiFreeMemoryTop = cpu_to_le64(0), + .EfiFreeMemoryBottom = cpu_to_le64(0), + .EfiEndOfHobList = cpu_to_le64(0), /* initialized later */ + }; + + tdvf_hob_add_memory_resources(tdx, &hob); + + last_hob = tdvf_get_area(&hob, sizeof(*last_hob)); + *last_hob = (EFI_HOB_GENERIC_HEADER) { + .HobType = EFI_HOB_TYPE_END_OF_HOB_LIST, + .HobLength = cpu_to_le16(sizeof(*last_hob)), + .Reserved = cpu_to_le32(0), + }; + hit->EfiEndOfHobList = tdvf_current_guest_addr(&hob); +} diff --git a/hw/i386/tdvf-hob.h b/hw/i386/tdvf-hob.h new file mode 100644 index 00000000000..4fc6a3740a5 --- /dev/null +++ b/hw/i386/tdvf-hob.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef HW_I386_TD_HOB_H +#define HW_I386_TD_HOB_H + +#include "hw/i386/tdvf.h" +#include "target/i386/kvm/tdx.h" + +void tdvf_hob_create(TdxGuest *tdx, TdxFirmwareEntry *td_hob); + +#define EFI_RESOURCE_ATTRIBUTE_TDVF_PRIVATE \ + (EFI_RESOURCE_ATTRIBUTE_PRESENT | \ + EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \ + EFI_RESOURCE_ATTRIBUTE_TESTED) + +#define EFI_RESOURCE_ATTRIBUTE_TDVF_UNACCEPTED \ + (EFI_RESOURCE_ATTRIBUTE_PRESENT | \ + EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \ + EFI_RESOURCE_ATTRIBUTE_TESTED) + +#define EFI_RESOURCE_ATTRIBUTE_TDVF_MMIO \ + (EFI_RESOURCE_ATTRIBUTE_PRESENT | \ + EFI_RESOURCE_ATTRIBUTE_INITIALIZED | \ + EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE) + +#endif diff --git a/hw/i386/tdvf.c b/hw/i386/tdvf.c new file mode 100644 index 00000000000..645d9d1294b --- /dev/null +++ b/hw/i386/tdvf.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2025 Intel Corporation + * Author: Isaku Yamahata + * + * Xiaoyao Li + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" + +#include "hw/i386/pc.h" +#include "hw/i386/tdvf.h" +#include "system/kvm.h" + +#define TDX_METADATA_OFFSET_GUID "e47a6535-984a-4798-865e-4685a7bf8ec2" +#define TDX_METADATA_VERSION 1 +#define TDVF_SIGNATURE 0x46564454 /* TDVF as little endian */ +#define TDVF_ALIGNMENT 4096 + +/* + * the raw structs read from TDVF keeps the name convention in + * TDVF Design Guide spec. + */ +typedef struct { + uint32_t DataOffset; + uint32_t RawDataSize; + uint64_t MemoryAddress; + uint64_t MemoryDataSize; + uint32_t Type; + uint32_t Attributes; +} TdvfSectionEntry; + +typedef struct { + uint32_t Signature; + uint32_t Length; + uint32_t Version; + uint32_t NumberOfSectionEntries; + TdvfSectionEntry SectionEntries[]; +} TdvfMetadata; + +struct tdx_metadata_offset { + uint32_t offset; +}; + +static TdvfMetadata *tdvf_get_metadata(void *flash_ptr, int size) +{ + TdvfMetadata *metadata; + uint32_t offset = 0; + uint8_t *data; + + if ((uint32_t) size != size) { + return NULL; + } + + if (pc_system_ovmf_table_find(TDX_METADATA_OFFSET_GUID, &data, NULL)) { + offset = size - le32_to_cpu(((struct tdx_metadata_offset *)data)->offset); + + if (offset + sizeof(*metadata) > size) { + return NULL; + } + } else { + error_report("Cannot find TDX_METADATA_OFFSET_GUID"); + return NULL; + } + + metadata = flash_ptr + offset; + + /* Finally, verify the signature to determine if this is a TDVF image. */ + metadata->Signature = le32_to_cpu(metadata->Signature); + if (metadata->Signature != TDVF_SIGNATURE) { + error_report("Invalid TDVF signature in metadata!"); + return NULL; + } + + /* Sanity check that the TDVF doesn't overlap its own metadata. */ + metadata->Length = le32_to_cpu(metadata->Length); + if (offset + metadata->Length > size) { + return NULL; + } + + /* Only version 1 is supported/defined. */ + metadata->Version = le32_to_cpu(metadata->Version); + if (metadata->Version != TDX_METADATA_VERSION) { + return NULL; + } + + return metadata; +} + +static int tdvf_parse_and_check_section_entry(const TdvfSectionEntry *src, + TdxFirmwareEntry *entry) +{ + entry->data_offset = le32_to_cpu(src->DataOffset); + entry->data_len = le32_to_cpu(src->RawDataSize); + entry->address = le64_to_cpu(src->MemoryAddress); + entry->size = le64_to_cpu(src->MemoryDataSize); + entry->type = le32_to_cpu(src->Type); + entry->attributes = le32_to_cpu(src->Attributes); + + /* sanity check */ + if (entry->size < entry->data_len) { + error_report("Broken metadata RawDataSize 0x%x MemoryDataSize 0x%"PRIx64, + entry->data_len, entry->size); + return -1; + } + if (!QEMU_IS_ALIGNED(entry->address, TDVF_ALIGNMENT)) { + error_report("MemoryAddress 0x%"PRIx64" not page aligned", entry->address); + return -1; + } + if (!QEMU_IS_ALIGNED(entry->size, TDVF_ALIGNMENT)) { + error_report("MemoryDataSize 0x%"PRIx64" not page aligned", entry->size); + return -1; + } + + switch (entry->type) { + case TDVF_SECTION_TYPE_BFV: + case TDVF_SECTION_TYPE_CFV: + /* The sections that must be copied from firmware image to TD memory */ + if (entry->data_len == 0) { + error_report("%d section with RawDataSize == 0", entry->type); + return -1; + } + break; + case TDVF_SECTION_TYPE_TD_HOB: + case TDVF_SECTION_TYPE_TEMP_MEM: + /* The sections that no need to be copied from firmware image */ + if (entry->data_len != 0) { + error_report("%d section with RawDataSize 0x%x != 0", + entry->type, entry->data_len); + return -1; + } + break; + default: + error_report("TDVF contains unsupported section type %d", entry->type); + return -1; + } + + return 0; +} + +int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size) +{ + g_autofree TdvfSectionEntry *sections = NULL; + TdvfMetadata *metadata; + ssize_t entries_size; + int i; + + metadata = tdvf_get_metadata(flash_ptr, size); + if (!metadata) { + return -EINVAL; + } + + /* load and parse metadata entries */ + fw->nr_entries = le32_to_cpu(metadata->NumberOfSectionEntries); + if (fw->nr_entries < 2) { + error_report("Invalid number of fw entries (%u) in TDVF Metadata", + fw->nr_entries); + return -EINVAL; + } + + entries_size = fw->nr_entries * sizeof(TdvfSectionEntry); + if (metadata->Length != sizeof(*metadata) + entries_size) { + error_report("TDVF metadata len (0x%x) mismatch, expected (0x%x)", + metadata->Length, + (uint32_t)(sizeof(*metadata) + entries_size)); + return -EINVAL; + } + + fw->entries = g_new(TdxFirmwareEntry, fw->nr_entries); + sections = g_new(TdvfSectionEntry, fw->nr_entries); + + memcpy(sections, (void *)metadata + sizeof(*metadata), entries_size); + + for (i = 0; i < fw->nr_entries; i++) { + if (tdvf_parse_and_check_section_entry(§ions[i], &fw->entries[i])) { + goto err; + } + } + + fw->mem_ptr = flash_ptr; + return 0; + +err: + fw->entries = 0; + g_free(fw->entries); + return -EINVAL; +} diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 53c02d7ac85..ac9e1a10aa4 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -68,6 +68,7 @@ vtd_frr_new(int index, uint64_t hi, uint64_t lo) "index %d high 0x%"PRIx64" low vtd_warn_invalid_qi_tail(uint16_t tail) "tail 0x%"PRIx16 vtd_warn_ir_vector(uint16_t sid, int index, int vec, int target) "sid 0x%"PRIx16" index %d vec %d (should be: %d)" vtd_warn_ir_trigger(uint16_t sid, int index, int trig, int target) "sid 0x%"PRIx16" index %d trigger %d (should be: %d)" +vtd_reset_exit(void) "" # amd_iommu.c amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32 diff --git a/hw/i386/vapic.c b/hw/i386/vapic.c index f5b1db7e5fc..0c1c92c4793 100644 --- a/hw/i386/vapic.c +++ b/hw/i386/vapic.c @@ -11,12 +11,13 @@ #include "qemu/osdep.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" -#include "sysemu/cpus.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "exec/address-spaces.h" +#include "exec/target_page.h" +#include "system/system.h" +#include "system/cpus.h" +#include "system/hw_accel.h" +#include "system/kvm.h" +#include "system/runstate.h" +#include "system/address-spaces.h" #include "hw/i386/apic_internal.h" #include "hw/sysbus.h" #include "hw/boards.h" @@ -718,7 +719,7 @@ static uint64_t vapic_read(void *opaque, hwaddr addr, unsigned size) static const MemoryRegionOps vapic_ops = { .write = vapic_write, .read = vapic_read, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void vapic_realize(DeviceState *dev, Error **errp) @@ -846,11 +847,11 @@ static const VMStateDescription vmstate_vapic = { } }; -static void vapic_class_init(ObjectClass *klass, void *data) +static void vapic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = vapic_reset; + device_class_set_legacy_reset(dc, vapic_reset); dc->vmsd = &vmstate_vapic; dc->realize = vapic_realize; } diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index a8d014d09a8..3896159b055 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -317,17 +317,16 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp) vmport_register(VMPORT_CMD_VMMOUSE_DATA, vmmouse_ioport_read, s); } -static Property vmmouse_properties[] = { +static const Property vmmouse_properties[] = { DEFINE_PROP_LINK("i8042", VMMouseState, i8042, TYPE_I8042, ISAKBDState *), - DEFINE_PROP_END_OF_LIST(), }; -static void vmmouse_class_initfn(ObjectClass *klass, void *data) +static void vmmouse_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = vmmouse_realizefn; - dc->reset = vmmouse_reset; + device_class_set_legacy_reset(dc, vmmouse_reset); dc->vmsd = &vmstate_vmmouse; device_class_set_props(dc, vmmouse_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); diff --git a/hw/i386/vmport.c b/hw/i386/vmport.c index 7cc75dbc6da..6d93457c526 100644 --- a/hw/i386/vmport.c +++ b/hw/i386/vmport.c @@ -33,9 +33,9 @@ #include "hw/i386/vmport.h" #include "hw/qdev-properties.h" #include "hw/boards.h" -#include "sysemu/sysemu.h" -#include "sysemu/hw_accel.h" -#include "sysemu/qtest.h" +#include "system/system.h" +#include "system/hw_accel.h" +#include "system/qtest.h" #include "qemu/log.h" #include "trace.h" #include "qom/object.h" @@ -252,7 +252,7 @@ static void vmport_realizefn(DeviceState *dev, Error **errp) } } -static Property vmport_properties[] = { +static const Property vmport_properties[] = { /* Used to enforce compatibility for migration */ DEFINE_PROP_BIT("x-read-set-eax", VMPortState, compat_flags, VMPORT_COMPAT_READ_SET_EAX_BIT, true), @@ -284,11 +284,9 @@ static Property vmport_properties[] = { * 5 - ACE 1.x (Deprecated) */ DEFINE_PROP_UINT8("vmware-vmx-type", VMPortState, vmware_vmx_type, 2), - - DEFINE_PROP_END_OF_LIST(), }; -static void vmport_class_initfn(ObjectClass *klass, void *data) +static void vmport_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/i386/x86-common.c b/hw/i386/x86-common.c index c0c66a0eb52..b1b5f11e739 100644 --- a/hw/i386/x86-common.c +++ b/hw/i386/x86-common.c @@ -26,9 +26,9 @@ #include "qemu/units.h" #include "qemu/datadir.h" #include "qapi/error.h" -#include "sysemu/numa.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" +#include "system/numa.h" +#include "system/system.h" +#include "system/xen.h" #include "trace.h" #include "hw/i386/x86.h" @@ -44,6 +44,7 @@ #include "standard-headers/asm-x86/bootparam.h" #include CONFIG_DEVICES #include "kvm/kvm_i386.h" +#include "kvm/tdx.h" #ifdef CONFIG_XEN_EMU #include "hw/xen/xen.h" @@ -248,9 +249,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, CPUX86State *env = &cpu->env; MachineState *ms = MACHINE(hotplug_dev); X86MachineState *x86ms = X86_MACHINE(hotplug_dev); - unsigned int smp_cores = ms->smp.cores; - unsigned int smp_threads = ms->smp.threads; - X86CPUTopoInfo topo_info; + X86CPUTopoInfo *topo_info = &env->topo_info; if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) { error_setg(errp, "Invalid CPU type, expected cpu type: '%s'", @@ -269,16 +268,14 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, } } - init_topo_info(&topo_info, x86ms); + init_topo_info(topo_info, x86ms); if (ms->smp.modules > 1) { - env->nr_modules = ms->smp.modules; - set_bit(CPU_TOPO_LEVEL_MODULE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_MODULE, env->avail_cpu_topo); } if (ms->smp.dies > 1) { - env->nr_dies = ms->smp.dies; - set_bit(CPU_TOPO_LEVEL_DIE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_DIE, env->avail_cpu_topo); } /* @@ -329,17 +326,17 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, if (cpu->core_id < 0) { error_setg(errp, "CPU core-id is not set"); return; - } else if (cpu->core_id > (smp_cores - 1)) { + } else if (cpu->core_id > (ms->smp.cores - 1)) { error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u", - cpu->core_id, smp_cores - 1); + cpu->core_id, ms->smp.cores - 1); return; } if (cpu->thread_id < 0) { error_setg(errp, "CPU thread-id is not set"); return; - } else if (cpu->thread_id > (smp_threads - 1)) { + } else if (cpu->thread_id > (ms->smp.threads - 1)) { error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u", - cpu->thread_id, smp_threads - 1); + cpu->thread_id, ms->smp.threads - 1); return; } @@ -348,12 +345,12 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, topo_ids.module_id = cpu->module_id; topo_ids.core_id = cpu->core_id; topo_ids.smt_id = cpu->thread_id; - cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids); + cpu->apic_id = x86_apicid_from_topo_ids(topo_info, &topo_ids); } cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx); if (!cpu_slot) { - x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids); + x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); error_setg(errp, "Invalid CPU [socket: %u, die: %u, module: %u, core: %u, thread: %u]" @@ -376,7 +373,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn() * once -smp refactoring is complete and there will be CPU private * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */ - x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids); + x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids); if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) { error_setg(errp, "property socket-id: %u doesn't match set apic-id:" " 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id, @@ -450,8 +447,27 @@ static long get_file_size(FILE *f) void gsi_handler(void *opaque, int n, int level) { GSIState *s = opaque; + bool bypass_ioapic = false; trace_x86_gsi_interrupt(n, level); + +#ifdef CONFIG_XEN_EMU + /* + * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC + * routing actually works properly under Xen). And then to + * *either* the PIRQ handling or the I/OAPIC depending on whether + * the former wants it. + * + * Additionally, this hook allows the Xen event channel GSI to + * work around QEMU's lack of support for shared level interrupts, + * by keeping track of the externally driven state of the pin and + * implementing a logical OR with the state of the evtchn GSI. + */ + if (xen_mode == XEN_EMULATE) { + bypass_ioapic = xen_evtchn_set_gsi(n, &level); + } +#endif + switch (n) { case 0 ... ISA_NUM_IRQS - 1: if (s->i8259_irq[n]) { @@ -460,18 +476,9 @@ void gsi_handler(void *opaque, int n, int level) } /* fall through */ case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1: -#ifdef CONFIG_XEN_EMU - /* - * Xen delivers the GSI to the Legacy PIC (not that Legacy PIC - * routing actually works properly under Xen). And then to - * *either* the PIRQ handling or the I/OAPIC depending on - * whether the former wants it. - */ - if (xen_mode == XEN_EMULATE && xen_evtchn_set_gsi(n, level)) { - break; + if (!bypass_ioapic) { + qemu_set_irq(s->ioapic_irq[n], level); } -#endif - qemu_set_irq(s->ioapic_irq[n], level); break; case IO_APIC_SECONDARY_IRQBASE ... IO_APIC_SECONDARY_IRQBASE + IOAPIC_NUM_PINS - 1: @@ -586,7 +593,7 @@ static bool load_elfboot(const char *kernel_filename, uint64_t elf_low, elf_high; int kernel_size; - if (ldl_p(header) != 0x464c457f) { + if (ldl_le_p(header) != 0x464c457f) { return false; /* no elfboot */ } @@ -602,8 +609,8 @@ static bool load_elfboot(const char *kernel_filename, uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY; kernel_size = load_elf(kernel_filename, read_pvh_start_addr, NULL, &elf_note_type, &elf_entry, - &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE, - 0, 0); + &elf_low, &elf_high, NULL, + ELFDATA2LSB, I386_ELF_MACHINE, 0, 0); if (kernel_size < 0) { error_report("Error while loading elf kernel"); @@ -665,9 +672,12 @@ void x86_load_linux(X86MachineState *x86ms, exit(1); } - /* kernel protocol version */ - if (ldl_p(header + 0x202) == 0x53726448) { - protocol = lduw_p(header + 0x206); + /* + * kernel protocol version. + * Please see https://www.kernel.org/doc/Documentation/x86/boot.txt + */ + if (ldl_le_p(header + 0x202) == 0x53726448) /* Magic signature "HdrS" */ { + protocol = lduw_le_p(header + 0x206); } else { /* * This could be a multiboot kernel. If it is, let's stop treating it @@ -694,9 +704,11 @@ void x86_load_linux(X86MachineState *x86ms, strlen(kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + setup = g_memdup2(header, sizeof(header)); + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header)); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, - header, sizeof(header)); + setup, sizeof(header)); /* load initrd */ if (initrd_filename) { @@ -759,7 +771,7 @@ void x86_load_linux(X86MachineState *x86ms, /* highest address for loading the initrd */ if (protocol >= 0x20c && - lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) { + lduw_le_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) { /* * Linux has supported initrd up to 4 GB for a very long time (2007, * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013), @@ -778,7 +790,7 @@ void x86_load_linux(X86MachineState *x86ms, */ initrd_max = UINT32_MAX; } else if (protocol >= 0x203) { - initrd_max = ldl_p(header + 0x22c); + initrd_max = ldl_le_p(header + 0x22c); } else { initrd_max = 0x37ffffff; } @@ -794,10 +806,10 @@ void x86_load_linux(X86MachineState *x86ms, sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; if (protocol >= 0x202) { - stl_p(header + 0x228, cmdline_addr); + stl_le_p(header + 0x228, cmdline_addr); } else { - stw_p(header + 0x20, 0xA33F); - stw_p(header + 0x22, cmdline_addr - real_addr); + stw_le_p(header + 0x20, 0xA33F); + stw_le_p(header + 0x22, cmdline_addr - real_addr); } /* handle vga= parameter */ @@ -821,7 +833,7 @@ void x86_load_linux(X86MachineState *x86ms, exit(1); } } - stw_p(header + 0x1fa, video_mode); + stw_le_p(header + 0x1fa, video_mode); } /* loader type */ @@ -836,7 +848,7 @@ void x86_load_linux(X86MachineState *x86ms, /* heap */ if (protocol >= 0x201) { header[0x211] |= 0x80; /* CAN_USE_HEAP */ - stw_p(header + 0x224, cmdline_addr - real_addr - 0x200); + stw_le_p(header + 0x224, cmdline_addr - real_addr - 0x200); } /* load initrd */ @@ -876,8 +888,8 @@ void x86_load_linux(X86MachineState *x86ms, sev_load_ctx.initrd_data = initrd_data; sev_load_ctx.initrd_size = initrd_size; - stl_p(header + 0x218, initrd_addr); - stl_p(header + 0x21c, initrd_size); + stl_le_p(header + 0x218, initrd_addr); + stl_le_p(header + 0x21c, initrd_size); } /* load kernel and setup */ @@ -890,7 +902,6 @@ void x86_load_linux(X86MachineState *x86ms, fprintf(stderr, "qemu: invalid kernel header\n"); exit(1); } - kernel_size -= setup_size; setup = g_malloc(setup_size); kernel = g_malloc(kernel_size); @@ -899,6 +910,7 @@ void x86_load_linux(X86MachineState *x86ms, fprintf(stderr, "fread() failed\n"); exit(1); } + fseek(f, 0, SEEK_SET); if (fread(kernel, 1, kernel_size, f) != kernel_size) { fprintf(stderr, "fread() failed\n"); exit(1); @@ -923,7 +935,7 @@ void x86_load_linux(X86MachineState *x86ms, kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; kernel = g_realloc(kernel, kernel_size); - stq_p(header + 0x250, prot_addr + setup_data_offset); + stq_le_p(header + 0x250, prot_addr + setup_data_offset); setup_data = (struct setup_data *)(kernel + setup_data_offset); setup_data->next = 0; @@ -940,15 +952,16 @@ void x86_load_linux(X86MachineState *x86ms, * kernel on the other side of the fw_cfg interface matches the hash of the * file the user passed in. */ - if (!sev_enabled()) { + if (!sev_enabled() && protocol > 0) { memcpy(setup, header, MIN(sizeof(header), setup_size)); } fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); - sev_load_ctx.kernel_data = (char *)kernel; - sev_load_ctx.kernel_size = kernel_size; + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size - setup_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, + kernel + setup_size, kernel_size - setup_size); + sev_load_ctx.kernel_data = (char *)kernel + setup_size; + sev_load_ctx.kernel_size = kernel_size - setup_size; fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); @@ -956,6 +969,25 @@ void x86_load_linux(X86MachineState *x86ms, sev_load_ctx.setup_data = (char *)setup; sev_load_ctx.setup_size = setup_size; + /* kernel without setup header patches */ + fw_cfg_add_file(fw_cfg, "etc/boot/kernel", kernel, kernel_size); + + if (machine->shim_filename) { + GMappedFile *mapped_file; + GError *gerr = NULL; + + mapped_file = g_mapped_file_new(machine->shim_filename, false, &gerr); + if (!mapped_file) { + fprintf(stderr, "qemu: error reading shim %s: %s\n", + machine->shim_filename, gerr->message); + exit(1); + } + + fw_cfg_add_file(fw_cfg, "etc/boot/shim", + g_mapped_file_get_contents(mapped_file), + g_mapped_file_get_length(mapped_file)); + } + if (sev_enabled()) { sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal); } @@ -1004,11 +1036,14 @@ void x86_bios_rom_init(X86MachineState *x86ms, const char *default_firmware, if (machine_require_guest_memfd(MACHINE(x86ms))) { memory_region_init_ram_guest_memfd(&x86ms->bios, NULL, "pc.bios", bios_size, &error_fatal); + if (is_tdx_vm()) { + tdx_set_tdvf_region(&x86ms->bios); + } } else { memory_region_init_ram(&x86ms->bios, NULL, "pc.bios", bios_size, &error_fatal); } - if (sev_enabled()) { + if (sev_enabled() || is_tdx_vm()) { /* * The concept of a "reset" simply doesn't exist for * confidential computing guests, we have to destroy and diff --git a/hw/i386/x86-cpu.c b/hw/i386/x86-cpu.c index ab2920522d1..c876e6709e0 100644 --- a/hw/i386/x86-cpu.c +++ b/hw/i386/x86-cpu.c @@ -21,15 +21,15 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" -#include "sysemu/whpx.h" -#include "sysemu/cpu-timers.h" +#include "system/whpx.h" +#include "system/cpu-timers.h" #include "trace.h" #include "hw/i386/x86.h" #include "target/i386/cpu.h" #include "hw/intc/i8259.h" #include "hw/irq.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" /* TSC handling */ uint64_t cpu_get_tsc(CPUX86State *env) diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 60af8962253..d34a6849f4a 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -25,7 +25,7 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "trace.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" void x86_iommu_iec_register_notifier(X86IOMMUState *iommu, iec_notify_fn fn, void *data) @@ -125,15 +125,14 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp) } } -static Property x86_iommu_properties[] = { +static const Property x86_iommu_properties[] = { DEFINE_PROP_ON_OFF_AUTO("intremap", X86IOMMUState, intr_supported, ON_OFF_AUTO_AUTO), DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false), DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true), - DEFINE_PROP_END_OF_LIST(), }; -static void x86_iommu_class_init(ObjectClass *klass, void *data) +static void x86_iommu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = x86_iommu_realize; @@ -147,7 +146,7 @@ bool x86_iommu_ir_supported(X86IOMMUState *s) static const TypeInfo x86_iommu_info = { .name = TYPE_X86_IOMMU_DEVICE, - .parent = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, .instance_size = sizeof(X86IOMMUState), .class_init = x86_iommu_class_init, .class_size = sizeof(X86IOMMUClass), diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 01fc5e65627..f80533df1c5 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -27,8 +27,8 @@ #include "qapi/qapi-visit-common.h" #include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" -#include "sysemu/qtest.h" -#include "sysemu/numa.h" +#include "system/qtest.h" +#include "system/numa.h" #include "trace.h" #include "hw/acpi/aml-build.h" @@ -372,7 +372,7 @@ static void x86_machine_initfn(Object *obj) x86ms->above_4g_mem_start = 4 * GiB; } -static void x86_machine_class_init(ObjectClass *oc, void *data) +static void x86_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); @@ -382,7 +382,6 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; mc->kvm_type = x86_kvm_type; - x86mc->save_tsc_khz = true; x86mc->fwcfg_dma_enabled = true; nc->nmi_monitor_handler = x86_nmi; @@ -450,7 +449,7 @@ static const TypeInfo x86_machine_info = { .instance_init = x86_machine_initfn, .class_size = sizeof(X86MachineClass), .class_init = x86_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { } }, diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build index 3f0df8bc075..c73c62b8e39 100644 --- a/hw/i386/xen/meson.build +++ b/hw/i386/xen/meson.build @@ -4,6 +4,7 @@ i386_ss.add(when: 'CONFIG_XEN', if_true: files( )) i386_ss.add(when: ['CONFIG_XEN', xen], if_true: files( 'xen-hvm.c', + 'xen-pvh.c', )) i386_ss.add(when: 'CONFIG_XEN_BUS', if_true: files( diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 4f6446600c5..ceb2242aa71 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -10,10 +10,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-commands-migration.h" #include "trace.h" +#include "hw/hw.h" #include "hw/i386/pc.h" #include "hw/irq.h" #include "hw/i386/apic-msidef.h" @@ -24,6 +26,10 @@ #include "hw/xen/arch_hvm.h" #include #include "exec/target_page.h" +#include "target/i386/cpu.h" +#include "system/runstate.h" +#include "system/xen-mapcache.h" +#include "system/xen.h" static MemoryRegion ram_640k, ram_lo, ram_hi; static MemoryRegion *framebuffer; @@ -614,7 +620,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) state = g_new0(XenIOState, 1); - xen_register_ioreq(state, max_cpus, &xen_memory_listener); + xen_register_ioreq(state, max_cpus, + HVM_IOREQSRV_BUFIOREQ_ATOMIC, + &xen_memory_listener); xen_is_stubdomain = xen_check_stubdomain(state->xenstore); @@ -750,6 +758,4 @@ void arch_handle_ioreq(XenIOState *state, ioreq_t *req) default: hw_error("Invalid ioreq type 0x%x\n", req->type); } - - return; } diff --git a/hw/i386/xen/xen-pvh.c b/hw/i386/xen/xen-pvh.c new file mode 100644 index 00000000000..067f73e9773 --- /dev/null +++ b/hw/i386/xen/xen-pvh.c @@ -0,0 +1,125 @@ +/* + * QEMU Xen PVH x86 Machine + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * Written by Edgar E. Iglesias + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "system/system.h" +#include "hw/xen/arch_hvm.h" +#include +#include "hw/xen/xen-pvh-common.h" +#include "target/i386/cpu.h" + +#define TYPE_XEN_PVH_X86 MACHINE_TYPE_NAME("xenpvh") +OBJECT_DECLARE_SIMPLE_TYPE(XenPVHx86State, XEN_PVH_X86) + +struct XenPVHx86State { + /*< private >*/ + XenPVHMachineState parent; + + DeviceState **cpu; +}; + +static DeviceState *xen_pvh_cpu_new(MachineState *ms, + int64_t apic_id) +{ + Object *cpu = object_new(ms->cpu_type); + + object_property_add_child(OBJECT(ms), "cpu[*]", cpu); + object_property_set_uint(cpu, "apic-id", apic_id, &error_fatal); + qdev_realize(DEVICE(cpu), NULL, &error_fatal); + object_unref(cpu); + + return DEVICE(cpu); +} + +static void xen_pvh_init(MachineState *ms) +{ + XenPVHx86State *xp = XEN_PVH_X86(ms); + int i; + + /* Create dummy cores. This will indirectly create the APIC MSI window. */ + xp->cpu = g_malloc(sizeof xp->cpu[0] * ms->smp.max_cpus); + for (i = 0; i < ms->smp.max_cpus; i++) { + xp->cpu[i] = xen_pvh_cpu_new(ms, i); + } +} + +static void xen_pvh_instance_init(Object *obj) +{ + XenPVHMachineState *s = XEN_PVH_MACHINE(obj); + + /* Default values. */ + s->cfg.ram_low = (MemMapEntry) { 0x0, 0x80000000U }; + s->cfg.ram_high = (MemMapEntry) { 0xC000000000ULL, 0x4000000000ULL }; + s->cfg.pci_intx_irq_base = 16; +} + +/* + * Deliver INTX interrupts to Xen guest. + */ +static void xen_pvh_set_pci_intx_irq(void *opaque, int irq, int level) +{ + /* + * Since QEMU emulates all of the swizziling + * We don't want Xen to do any additional swizzling in + * xen_set_pci_intx_level() so we always set device to 0. + */ + if (xen_set_pci_intx_level(xen_domid, 0, 0, 0, irq, level)) { + error_report("xendevicemodel_set_pci_intx_level failed"); + } +} + +static void xen_pvh_machine_class_init(ObjectClass *oc, const void *data) +{ + XenPVHMachineClass *xpc = XEN_PVH_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Xen PVH x86 machine"; + mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; + + /* mc->max_cpus holds the MAX value allowed in the -smp cmd-line opts. */ + mc->max_cpus = HVM_MAX_VCPUS; + + /* We have an implementation specific init to create CPU objects. */ + xpc->init = xen_pvh_init; + + /* Enable buffered IOREQs. */ + xpc->handle_bufioreq = HVM_IOREQSRV_BUFIOREQ_ATOMIC; + + /* + * PCI INTX routing. + * + * We describe the mapping between the 4 INTX interrupt and GSIs + * using xen_set_pci_link_route(). xen_pvh_set_pci_intx_irq is + * used to deliver the interrupt. + */ + xpc->set_pci_intx_irq = xen_pvh_set_pci_intx_irq; + xpc->set_pci_link_route = xen_set_pci_link_route; + + /* List of supported features known to work on PVH x86. */ + xpc->has_pci = true; + + xen_pvh_class_setup_common_props(xpc); +} + +static const TypeInfo xen_pvh_x86_machine_type = { + .name = TYPE_XEN_PVH_X86, + .parent = TYPE_XEN_PVH_MACHINE, + .class_init = xen_pvh_machine_class_init, + .instance_init = xen_pvh_instance_init, + .instance_size = sizeof(XenPVHx86State), +}; + +static void xen_pvh_machine_register_types(void) +{ + type_register_static(&xen_pvh_x86_machine_type); +} + +type_init(xen_pvh_machine_register_types) diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c index 101e16a7662..f30398fa4ae 100644 --- a/hw/i386/xen/xen_apic.c +++ b/hw/i386/xen/xen_apic.c @@ -36,7 +36,7 @@ static void xen_apic_mem_write(void *opaque, hwaddr addr, static const MemoryRegionOps xen_apic_io_ops = { .read = xen_apic_mem_read, .write = xen_apic_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void xen_apic_realize(DeviceState *dev, Error **errp) @@ -76,7 +76,7 @@ static void xen_send_msi(MSIMessage *msi) xen_hvm_inject_msi(msi->address, msi->data); } -static void xen_apic_class_init(ObjectClass *klass, void *data) +static void xen_apic_class_init(ObjectClass *klass, const void *data) { APICCommonClass *k = APIC_COMMON_CLASS(klass); diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 708488af32d..c8b852be0cc 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -30,8 +30,8 @@ #include "migration/vmstate.h" #include "net/net.h" #include "trace.h" -#include "sysemu/xen.h" -#include "sysemu/block-backend.h" +#include "system/xen.h" +#include "system/block-backend.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qom/object.h" @@ -514,7 +514,7 @@ static void platform_mmio_write(void *opaque, hwaddr addr, static const MemoryRegionOps platform_mmio_handler = { .read = &platform_mmio_read, .write = &platform_mmio_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void platform_mmio_setup(PCIXenPlatformState *d) @@ -581,7 +581,7 @@ static void platform_reset(DeviceState *dev) platform_fixed_ioport_reset(s); } -static void xen_platform_class_init(ObjectClass *klass, void *data) +static void xen_platform_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -595,7 +595,7 @@ static void xen_platform_class_init(ObjectClass *klass, void *data) k->revision = 1; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "XEN platform pci device"; - dc->reset = platform_reset; + device_class_set_legacy_reset(dc, platform_reset); dc->vmsd = &vmstate_xen_platform; } @@ -604,7 +604,7 @@ static const TypeInfo xen_platform_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIXenPlatformState), .class_init = xen_platform_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c index ed621531d81..87a974ae5a0 100644 --- a/hw/i386/xen/xen_pvdevice.c +++ b/hw/i386/xen/xen_pvdevice.c @@ -115,15 +115,14 @@ static void xen_pv_realize(PCIDevice *pci_dev, Error **errp) &d->mmio); } -static Property xen_pv_props[] = { +static const Property xen_pv_props[] = { DEFINE_PROP_UINT16("vendor-id", XenPVDevice, vendor_id, PCI_VENDOR_ID_XEN), DEFINE_PROP_UINT16("device-id", XenPVDevice, device_id, 0xffff), DEFINE_PROP_UINT8("revision", XenPVDevice, revision, 0x01), DEFINE_PROP_UINT32("size", XenPVDevice, size, 0x400000), - DEFINE_PROP_END_OF_LIST() }; -static void xen_pv_class_init(ObjectClass *klass, void *data) +static void xen_pv_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -140,7 +139,7 @@ static const TypeInfo xen_pv_type_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(XenPVDevice), .class_init = xen_pv_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig index 6dfc5a21292..b55507b836e 100644 --- a/hw/ide/Kconfig +++ b/hw/ide/Kconfig @@ -43,12 +43,6 @@ config IDE_VIA bool select IDE_PCI -config MICRODRIVE - bool - select IDE_BUS - select IDE_DEV - depends on PCMCIA - config AHCI bool select IDE_BUS @@ -60,6 +54,10 @@ config AHCI_ICH9 depends on PCI select AHCI +config AHCI_SYSBUS + bool + select AHCI + config IDE_SII3112 bool select IDE_PCI diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c index 9620de8ce84..bc7a116a89a 100644 --- a/hw/ide/ahci-allwinner.c +++ b/hw/ide/ahci-allwinner.c @@ -18,7 +18,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "migration/vmstate.h" #include "hw/ide/ahci-sysbus.h" @@ -103,7 +103,7 @@ static const VMStateDescription vmstate_allwinner_ahci = { } }; -static void allwinner_ahci_class_init(ObjectClass *klass, void *data) +static void allwinner_ahci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ide/ahci-internal.h b/hw/ide/ahci-internal.h index 7e63ea23102..a318f36811c 100644 --- a/hw/ide/ahci-internal.h +++ b/hw/ide/ahci-internal.h @@ -25,7 +25,6 @@ #define HW_IDE_AHCI_INTERNAL_H #include "hw/ide/ahci.h" -#include "hw/pci/pci_device.h" #include "ide-internal.h" #define AHCI_MEM_BAR_SIZE 0x1000 diff --git a/hw/ide/ahci-sysbus.c b/hw/ide/ahci-sysbus.c new file mode 100644 index 00000000000..210818d0479 --- /dev/null +++ b/hw/ide/ahci-sysbus.c @@ -0,0 +1,90 @@ +/* + * QEMU AHCI Emulation (MMIO-mapped devices) + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek + * Copyright (c) 2010 Sebastian Herbszt + * Copyright (c) 2010 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "system/address-spaces.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#include "hw/ide/ahci-sysbus.h" +#include "ahci-internal.h" + +static const VMStateDescription vmstate_sysbus_ahci = { + .name = "sysbus-ahci", + .fields = (const VMStateField[]) { + VMSTATE_AHCI(ahci, SysbusAHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void sysbus_ahci_reset(DeviceState *dev) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_reset(&s->ahci); +} + +static void sysbus_ahci_init(Object *obj) +{ + SysbusAHCIState *s = SYSBUS_AHCI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + ahci_init(&s->ahci, DEVICE(obj)); + + sysbus_init_mmio(sbd, &s->ahci.mem); + sysbus_init_irq(sbd, &s->ahci.irq); +} + +static void sysbus_ahci_realize(DeviceState *dev, Error **errp) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_realize(&s->ahci, dev, &address_space_memory); +} + +static const Property sysbus_ahci_properties[] = { + DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, ahci.ports, 1), +}; + +static void sysbus_ahci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_ahci_realize; + dc->vmsd = &vmstate_sysbus_ahci; + device_class_set_props(dc, sysbus_ahci_properties); + device_class_set_legacy_reset(dc, sysbus_ahci_reset); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sysbus_ahci_types[] = { + { + .name = TYPE_SYSBUS_AHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysbusAHCIState), + .instance_init = sysbus_ahci_init, + .class_init = sysbus_ahci_class_init, + }, +}; + +DEFINE_TYPES(sysbus_ahci_types) diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index bfefad2965d..1303c21cb70 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -23,20 +23,13 @@ #include "qemu/osdep.h" #include "hw/irq.h" -#include "hw/pci/msi.h" -#include "hw/pci/pci.h" -#include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/main-loop.h" -#include "qemu/module.h" -#include "sysemu/block-backend.h" -#include "sysemu/dma.h" -#include "hw/ide/pci.h" -#include "hw/ide/ahci-pci.h" -#include "hw/ide/ahci-sysbus.h" +#include "system/block-backend.h" +#include "system/dma.h" #include "ahci-internal.h" #include "ide-internal.h" @@ -179,34 +172,6 @@ static uint32_t ahci_port_read(AHCIState *s, int port, int offset) return val; } -static void ahci_irq_raise(AHCIState *s) -{ - DeviceState *dev_state = s->container; - PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), - TYPE_PCI_DEVICE); - - trace_ahci_irq_raise(s); - - if (pci_dev && msi_enabled(pci_dev)) { - msi_notify(pci_dev, 0); - } else { - qemu_irq_raise(s->irq); - } -} - -static void ahci_irq_lower(AHCIState *s) -{ - DeviceState *dev_state = s->container; - PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), - TYPE_PCI_DEVICE); - - trace_ahci_irq_lower(s); - - if (!pci_dev || !msi_enabled(pci_dev)) { - qemu_irq_lower(s->irq); - } -} - static void ahci_check_irq(AHCIState *s) { int i; @@ -222,9 +187,11 @@ static void ahci_check_irq(AHCIState *s) trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus); if (s->control_regs.irqstatus && (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { - ahci_irq_raise(s); + trace_ahci_irq_raise(s); + qemu_irq_raise(s->irq); } else { - ahci_irq_lower(s); + trace_ahci_irq_lower(s); + qemu_irq_lower(s->irq); } } @@ -948,7 +915,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, uint64_t sum = 0; int off_idx = -1; int64_t off_pos = -1; - int tbl_entry_size; IDEBus *bus = &ad->port; BusState *qbus = BUS(bus); @@ -976,6 +942,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, /* Get entries in the PRDT, init a qemu sglist accordingly */ if (prdtl > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; + int tbl_entry_size = 0; + sum = 0; for (i = 0; i < prdtl; i++) { tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); @@ -1607,7 +1575,6 @@ static const IDEDMAOps ahci_dma_ops = { void ahci_init(AHCIState *s, DeviceState *qdev) { - s->container = qdev; /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, "ahci", AHCI_MEM_BAR_SIZE); @@ -1833,70 +1800,6 @@ const VMStateDescription vmstate_ahci = { }, }; -static const VMStateDescription vmstate_sysbus_ahci = { - .name = "sysbus-ahci", - .fields = (const VMStateField[]) { - VMSTATE_AHCI(ahci, SysbusAHCIState), - VMSTATE_END_OF_LIST() - }, -}; - -static void sysbus_ahci_reset(DeviceState *dev) -{ - SysbusAHCIState *s = SYSBUS_AHCI(dev); - - ahci_reset(&s->ahci); -} - -static void sysbus_ahci_init(Object *obj) -{ - SysbusAHCIState *s = SYSBUS_AHCI(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - ahci_init(&s->ahci, DEVICE(obj)); - - sysbus_init_mmio(sbd, &s->ahci.mem); - sysbus_init_irq(sbd, &s->ahci.irq); -} - -static void sysbus_ahci_realize(DeviceState *dev, Error **errp) -{ - SysbusAHCIState *s = SYSBUS_AHCI(dev); - - ahci_realize(&s->ahci, dev, &address_space_memory); -} - -static Property sysbus_ahci_properties[] = { - DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, ahci.ports, 1), - DEFINE_PROP_END_OF_LIST(), -}; - -static void sysbus_ahci_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = sysbus_ahci_realize; - dc->vmsd = &vmstate_sysbus_ahci; - device_class_set_props(dc, sysbus_ahci_properties); - dc->reset = sysbus_ahci_reset; - set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); -} - -static const TypeInfo sysbus_ahci_info = { - .name = TYPE_SYSBUS_AHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SysbusAHCIState), - .instance_init = sysbus_ahci_init, - .class_init = sysbus_ahci_class_init, -}; - -static void sysbus_ahci_register_types(void) -{ - type_register_static(&sysbus_ahci_info); -} - -type_init(sysbus_ahci_register_types) - void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd) { int i; diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index fcb6cca1573..a42b7485218 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -26,7 +26,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "hw/scsi/scsi.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "scsi/constants.h" #include "ide-internal.h" #include "trace.h" @@ -265,7 +265,7 @@ void ide_atapi_cmd_reply_end(IDEState *s) byte_count_limit--; size = byte_count_limit; } - s->lcyl = size; + s->lcyl = size & 0xff; s->hcyl = size >> 8; s->elementary_transfer_size = size; /* we cannot transmit more than one sector at a time */ diff --git a/hw/ide/cf.c b/hw/ide/cf.c index 2a425cb0f23..f87cd413b68 100644 --- a/hw/ide/cf.c +++ b/hw/ide/cf.c @@ -24,15 +24,14 @@ static void ide_cf_realize(IDEDevice *dev, Error **errp) ide_dev_initfn(dev, IDE_CFATA, errp); } -static Property ide_cf_properties[] = { +static const Property ide_cf_properties[] = { DEFINE_IDE_DEV_PROPERTIES(), DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), - DEFINE_PROP_END_OF_LIST(), }; -static void ide_cf_class_init(ObjectClass *klass, void *data) +static void ide_cf_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index 8cebd1b63d3..2a59516a9dd 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -29,8 +29,8 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "hw/isa/isa.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" +#include "system/dma.h" +#include "system/reset.h" #include "hw/ide/pci.h" #include "ide-internal.h" @@ -313,17 +313,16 @@ static void pci_cmd646_ide_exitfn(PCIDevice *dev) } } -static Property cmd646_ide_properties[] = { +static const Property cmd646_ide_properties[] = { DEFINE_PROP_UINT32("secondary", PCIIDEState, secondary, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void cmd646_ide_class_init(ObjectClass *klass, void *data) +static void cmd646_ide_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - dc->reset = cmd646_reset; + device_class_set_legacy_reset(dc, cmd646_reset); dc->vmsd = &vmstate_ide_pci; k->realize = pci_cmd646_ide_realize; k->exit = pci_cmd646_ide_exitfn; diff --git a/hw/ide/core.c b/hw/ide/core.c index 08d92184554..b14983ec54f 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -32,15 +32,15 @@ #include "qemu/timer.h" #include "qemu/hw-version.h" #include "qemu/memalign.h" -#include "sysemu/sysemu.h" -#include "sysemu/blockdev.h" -#include "sysemu/dma.h" +#include "system/system.h" +#include "system/blockdev.h" +#include "system/dma.h" #include "hw/block/block.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qemu/cutils.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/replay.h" +#include "system/runstate.h" #include "ide-internal.h" #include "trace.h" @@ -968,8 +968,7 @@ static void ide_dma_cb(void *opaque, int ret) BDRV_SECTOR_SIZE, ide_dma_cb, s); break; case IDE_DMA_TRIM: - s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), - &s->sg, offset, BDRV_SECTOR_SIZE, + s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, BDRV_SECTOR_SIZE, ide_issue_trim, s, ide_dma_cb, s, DMA_DIRECTION_TO_DEVICE); break; diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 9b909c87f33..4cade0d1219 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -61,13 +61,12 @@ */ #include "qemu/osdep.h" -#include "hw/irq.h" #include "hw/pci/msi.h" #include "hw/pci/pci.h" #include "migration/vmstate.h" #include "qemu/module.h" #include "hw/isa/isa.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/ide/pci.h" #include "hw/ide/ahci-pci.h" #include "ahci-internal.h" @@ -91,6 +90,19 @@ static const VMStateDescription vmstate_ich9_ahci = { }, }; +static void pci_ich9_ahci_update_irq(void *opaque, int irq_num, int level) +{ + PCIDevice *pci_dev = opaque; + + if (msi_enabled(pci_dev)) { + if (level) { + msi_notify(pci_dev, 0); + } + } else { + pci_set_irq(pci_dev, level); + } +} + static void pci_ich9_reset(DeviceState *dev) { AHCIPCIState *d = ICH9_AHCI(dev); @@ -102,7 +114,9 @@ static void pci_ich9_ahci_init(Object *obj) { AHCIPCIState *d = ICH9_AHCI(obj); + qemu_init_irq(&d->irq, pci_ich9_ahci_update_irq, d, 0); ahci_init(&d->ahci, DEVICE(obj)); + d->ahci.irq = &d->irq; } static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) @@ -125,8 +139,6 @@ static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) /* XXX Software should program this register */ dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ - d->ahci.irq = pci_allocate_irq(dev); - pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, &d->ahci.idp); pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, @@ -161,10 +173,9 @@ static void pci_ich9_uninit(PCIDevice *dev) msi_uninit(dev); ahci_uninit(&d->ahci); - qemu_free_irq(d->ahci.irq); } -static void ich_ahci_class_init(ObjectClass *klass, void *data) +static void ich_ahci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -176,7 +187,7 @@ static void ich_ahci_class_init(ObjectClass *klass, void *data) k->revision = 0x02; k->class_id = PCI_CLASS_STORAGE_SATA; dc->vmsd = &vmstate_ich9_ahci; - dc->reset = pci_ich9_reset; + device_class_set_legacy_reset(dc, pci_ich9_reset); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } @@ -186,7 +197,7 @@ static const TypeInfo ich_ahci_info = { .instance_size = sizeof(AHCIPCIState), .instance_init = pci_ich9_ahci_init, .class_init = ich_ahci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/ide/ide-bus.c b/hw/ide/ide-bus.c index 37d003dd9ad..b24e4d17b48 100644 --- a/hw/ide/ide-bus.c +++ b/hw/ide/ide-bus.c @@ -21,15 +21,15 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/runstate.h" +#include "system/block-backend.h" +#include "system/blockdev.h" +#include "system/runstate.h" #include "ide-internal.h" static char *idebus_get_fw_dev_path(DeviceState *dev); static void idebus_unrealize(BusState *qdev); -static void ide_bus_class_init(ObjectClass *klass, void *data) +static void ide_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); diff --git a/hw/ide/ide-dev.c b/hw/ide/ide-dev.c index 03f79677988..5d478588c61 100644 --- a/hw/ide/ide-dev.c +++ b/hw/ide/ide-dev.c @@ -23,16 +23,15 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "hw/ide/ide-dev.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" +#include "system/block-backend.h" +#include "system/blockdev.h" +#include "system/system.h" #include "qapi/visitor.h" #include "ide-internal.h" -static Property ide_props[] = { +static const Property ide_props[] = { DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), DEFINE_PROP_BOOL("win2k-install-hack", IDEDevice, win2k_install_hack, false), - DEFINE_PROP_END_OF_LIST(), }; static void ide_qdev_realize(DeviceState *qdev, Error **errp) @@ -191,16 +190,15 @@ static void ide_cd_realize(IDEDevice *dev, Error **errp) ide_dev_initfn(dev, IDE_CD, errp); } -static Property ide_hd_properties[] = { +static const Property ide_hd_properties[] = { DEFINE_IDE_DEV_PROPERTIES(), DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), DEFINE_PROP_UINT16("rotation_rate", IDEDrive, dev.rotation_rate, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ide_hd_class_init(ObjectClass *klass, void *data) +static void ide_hd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); @@ -218,12 +216,11 @@ static const TypeInfo ide_hd_info = { .class_init = ide_hd_class_init, }; -static Property ide_cd_properties[] = { +static const Property ide_cd_properties[] = { DEFINE_IDE_DEV_PROPERTIES(), - DEFINE_PROP_END_OF_LIST(), }; -static void ide_cd_class_init(ObjectClass *klass, void *data) +static void ide_cd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); @@ -241,7 +238,7 @@ static const TypeInfo ide_cd_info = { .class_init = ide_cd_class_init, }; -static void ide_device_class_init(ObjectClass *klass, void *data) +static void ide_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = ide_qdev_realize; diff --git a/hw/ide/isa.c b/hw/ide/isa.c index 934c45887cc..5f418413c18 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -29,7 +29,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/ide/isa.h" #include "qom/object.h" @@ -101,20 +101,19 @@ ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int irqnum, return isadev; } -static Property isa_ide_properties[] = { +static const Property isa_ide_properties[] = { DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0), DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6), DEFINE_PROP_UINT32("irq", ISAIDEState, irqnum, 14), - DEFINE_PROP_END_OF_LIST(), }; -static void isa_ide_class_initfn(ObjectClass *klass, void *data) +static void isa_ide_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = isa_ide_realizefn; dc->fw_name = "ide"; - dc->reset = isa_ide_reset; + device_class_set_legacy_reset(dc, isa_ide_reset); device_class_set_props(dc, isa_ide_properties); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } diff --git a/hw/ide/macio.c b/hw/ide/macio.c index e84bf2c9f65..c23bf32d2b1 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -30,8 +30,8 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "hw/misc/macio/macio.h" -#include "sysemu/block-backend.h" -#include "sysemu/dma.h" +#include "system/block-backend.h" +#include "system/dma.h" #include "ide-internal.h" @@ -119,9 +119,6 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) return; done: - dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, - io->dir, io->dma_len); - if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); } else { @@ -190,8 +187,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) pmac_ide_transfer_cb, io); break; case IDE_DMA_TRIM: - s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg, - offset, 0x1, ide_issue_trim, s, + s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, 0x1, ide_issue_trim, s, pmac_ide_transfer_cb, io, DMA_DIRECTION_TO_DEVICE); break; @@ -202,9 +198,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) return; done: - dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, - io->dir, io->dma_len); - if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); @@ -465,18 +458,17 @@ static void macio_ide_initfn(Object *obj) qdev_prop_allow_set_link_before_realize, 0); } -static Property macio_ide_properties[] = { +static const Property macio_ide_properties[] = { DEFINE_PROP_UINT32("channel", MACIOIDEState, channel, 0), DEFINE_PROP_UINT32("addr", MACIOIDEState, addr, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void macio_ide_class_init(ObjectClass *oc, void *data) +static void macio_ide_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = macio_ide_realizefn; - dc->reset = macio_ide_reset; + device_class_set_legacy_reset(dc, macio_ide_reset); device_class_set_props(dc, macio_ide_properties); dc->vmsd = &vmstate_pmac; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); diff --git a/hw/ide/meson.build b/hw/ide/meson.build index d09705cac03..ddd70660400 100644 --- a/hw/ide/meson.build +++ b/hw/ide/meson.build @@ -1,5 +1,6 @@ system_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c')) system_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c')) +system_ss.add(when: 'CONFIG_AHCI_SYSBUS', if_true: files('ahci-sysbus.c')) system_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c')) system_ss.add(when: 'CONFIG_IDE_BUS', if_true: files('ide-bus.c')) system_ss.add(when: 'CONFIG_IDE_CF', if_true: files('cf.c')) @@ -13,4 +14,3 @@ system_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c')) system_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c')) system_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c')) system_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c')) -system_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c')) diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c deleted file mode 100644 index 3bb152b5d33..00000000000 --- a/hw/ide/microdrive.c +++ /dev/null @@ -1,644 +0,0 @@ -/* - * QEMU IDE Emulation: microdrive (CF / PCMCIA) - * - * Copyright (c) 2003 Fabrice Bellard - * Copyright (c) 2006 Openedhand Ltd. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "hw/pcmcia.h" -#include "migration/vmstate.h" -#include "qapi/error.h" -#include "qemu/module.h" -#include "sysemu/dma.h" -#include "hw/irq.h" - -#include "qom/object.h" -#include "ide-internal.h" - -#define TYPE_MICRODRIVE "microdrive" -OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE) - -/***********************************************************/ -/* CF-ATA Microdrive */ - -#define METADATA_SIZE 0x20 - -/* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */ - -struct MicroDriveState { - /*< private >*/ - PCMCIACardState parent_obj; - /*< public >*/ - - IDEBus bus; - uint32_t attr_base; - uint32_t io_base; - - /* Card state */ - uint8_t opt; - uint8_t stat; - uint8_t pins; - - uint8_t ctrl; - uint16_t io; - uint8_t cycle; -}; - -/* Register bitfields */ -enum md_opt { - OPT_MODE_MMAP = 0, - OPT_MODE_IOMAP16 = 1, - OPT_MODE_IOMAP1 = 2, - OPT_MODE_IOMAP2 = 3, - OPT_MODE = 0x3f, - OPT_LEVIREQ = 0x40, - OPT_SRESET = 0x80, -}; -enum md_cstat { - STAT_INT = 0x02, - STAT_PWRDWN = 0x04, - STAT_XE = 0x10, - STAT_IOIS8 = 0x20, - STAT_SIGCHG = 0x40, - STAT_CHANGED = 0x80, -}; -enum md_pins { - PINS_MRDY = 0x02, - PINS_CRDY = 0x20, -}; -enum md_ctrl { - CTRL_IEN = 0x02, - CTRL_SRST = 0x04, -}; - -static inline void md_interrupt_update(MicroDriveState *s) -{ - PCMCIACardState *card = PCMCIA_CARD(s); - - if (card->slot == NULL) { - return; - } - - qemu_set_irq(card->slot->irq, - !(s->stat & STAT_INT) && /* Inverted */ - !(s->ctrl & (CTRL_IEN | CTRL_SRST)) && - !(s->opt & OPT_SRESET)); -} - -static void md_set_irq(void *opaque, int irq, int level) -{ - MicroDriveState *s = opaque; - - if (level) { - s->stat |= STAT_INT; - } else { - s->stat &= ~STAT_INT; - } - - md_interrupt_update(s); -} - -static void md_reset(DeviceState *dev) -{ - MicroDriveState *s = MICRODRIVE(dev); - - s->opt = OPT_MODE_MMAP; - s->stat = 0; - s->pins = 0; - s->cycle = 0; - s->ctrl = 0; - ide_bus_reset(&s->bus); -} - -static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at) -{ - MicroDriveState *s = MICRODRIVE(card); - PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); - - if (at < s->attr_base) { - if (at < pcc->cis_len) { - return pcc->cis[at]; - } else { - return 0x00; - } - } - - at -= s->attr_base; - - switch (at) { - case 0x00: /* Configuration Option Register */ - return s->opt; - case 0x02: /* Card Configuration Status Register */ - if (s->ctrl & CTRL_IEN) { - return s->stat & ~STAT_INT; - } else { - return s->stat; - } - case 0x04: /* Pin Replacement Register */ - return (s->pins & PINS_CRDY) | 0x0c; - case 0x06: /* Socket and Copy Register */ - return 0x00; -#ifdef VERBOSE - default: - printf("%s: Bad attribute space register %02x\n", __func__, at); -#endif - } - - return 0; -} - -static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) -{ - MicroDriveState *s = MICRODRIVE(card); - - at -= s->attr_base; - - switch (at) { - case 0x00: /* Configuration Option Register */ - s->opt = value & 0xcf; - if (value & OPT_SRESET) { - device_cold_reset(DEVICE(s)); - } - md_interrupt_update(s); - break; - case 0x02: /* Card Configuration Status Register */ - if ((s->stat ^ value) & STAT_PWRDWN) { - s->pins |= PINS_CRDY; - } - s->stat &= 0x82; - s->stat |= value & 0x74; - md_interrupt_update(s); - /* Word 170 in Identify Device must be equal to STAT_XE */ - break; - case 0x04: /* Pin Replacement Register */ - s->pins &= PINS_CRDY; - s->pins |= value & PINS_MRDY; - break; - case 0x06: /* Socket and Copy Register */ - break; - default: - printf("%s: Bad attribute space register %02x\n", __func__, at); - } -} - -static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) -{ - MicroDriveState *s = MICRODRIVE(card); - IDEState *ifs; - uint16_t ret; - at -= s->io_base; - - switch (s->opt & OPT_MODE) { - case OPT_MODE_MMAP: - if ((at & ~0x3ff) == 0x400) { - at = 0; - } - break; - case OPT_MODE_IOMAP16: - at &= 0xf; - break; - case OPT_MODE_IOMAP1: - if ((at & ~0xf) == 0x3f0) { - at -= 0x3e8; - } else if ((at & ~0xf) == 0x1f0) { - at -= 0x1f0; - } - break; - case OPT_MODE_IOMAP2: - if ((at & ~0xf) == 0x370) { - at -= 0x368; - } else if ((at & ~0xf) == 0x170) { - at -= 0x170; - } - } - - switch (at) { - case 0x0: /* Even RD Data */ - case 0x8: - return ide_data_readw(&s->bus, 0); - - /* TODO: 8-bit accesses */ - if (s->cycle) { - ret = s->io >> 8; - } else { - s->io = ide_data_readw(&s->bus, 0); - ret = s->io & 0xff; - } - s->cycle = !s->cycle; - return ret; - case 0x9: /* Odd RD Data */ - return s->io >> 8; - case 0xd: /* Error */ - return ide_ioport_read(&s->bus, 0x1); - case 0xe: /* Alternate Status */ - ifs = ide_bus_active_if(&s->bus); - if (ifs->blk) { - return ifs->status; - } else { - return 0; - } - case 0xf: /* Device Address */ - ifs = ide_bus_active_if(&s->bus); - return 0xc2 | ((~ifs->select << 2) & 0x3c); - default: - return ide_ioport_read(&s->bus, at); - } - - return 0; -} - -static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) -{ - MicroDriveState *s = MICRODRIVE(card); - at -= s->io_base; - - switch (s->opt & OPT_MODE) { - case OPT_MODE_MMAP: - if ((at & ~0x3ff) == 0x400) { - at = 0; - } - break; - case OPT_MODE_IOMAP16: - at &= 0xf; - break; - case OPT_MODE_IOMAP1: - if ((at & ~0xf) == 0x3f0) { - at -= 0x3e8; - } else if ((at & ~0xf) == 0x1f0) { - at -= 0x1f0; - } - break; - case OPT_MODE_IOMAP2: - if ((at & ~0xf) == 0x370) { - at -= 0x368; - } else if ((at & ~0xf) == 0x170) { - at -= 0x170; - } - } - - switch (at) { - case 0x0: /* Even WR Data */ - case 0x8: - ide_data_writew(&s->bus, 0, value); - break; - - /* TODO: 8-bit accesses */ - if (s->cycle) { - ide_data_writew(&s->bus, 0, s->io | (value << 8)); - } else { - s->io = value & 0xff; - } - s->cycle = !s->cycle; - break; - case 0x9: - s->io = value & 0xff; - s->cycle = !s->cycle; - break; - case 0xd: /* Features */ - ide_ioport_write(&s->bus, 0x1, value); - break; - case 0xe: /* Device Control */ - s->ctrl = value; - if (value & CTRL_SRST) { - device_cold_reset(DEVICE(s)); - } - md_interrupt_update(s); - break; - default: - if (s->stat & STAT_PWRDWN) { - s->pins |= PINS_CRDY; - s->stat &= ~STAT_PWRDWN; - } - ide_ioport_write(&s->bus, at, value); - } -} - -static const VMStateDescription vmstate_microdrive = { - .name = "microdrive", - .version_id = 3, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(opt, MicroDriveState), - VMSTATE_UINT8(stat, MicroDriveState), - VMSTATE_UINT8(pins, MicroDriveState), - VMSTATE_UINT8(ctrl, MicroDriveState), - VMSTATE_UINT16(io, MicroDriveState), - VMSTATE_UINT8(cycle, MicroDriveState), - VMSTATE_IDE_BUS(bus, MicroDriveState), - VMSTATE_IDE_DRIVES(bus.ifs, MicroDriveState), - VMSTATE_END_OF_LIST() - } -}; - -static const uint8_t dscm1xxxx_cis[0x14a] = { - [0x000] = CISTPL_DEVICE, /* 5V Device Information */ - [0x002] = 0x03, /* Tuple length = 4 bytes */ - [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x006] = 0x01, /* Size = 2K bytes */ - [0x008] = CISTPL_ENDMARK, - - [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ - [0x00c] = 0x04, /* Tuple length = 4 byest */ - [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ - [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ - [0x012] = 0x01, /* Size = 2K bytes */ - [0x014] = CISTPL_ENDMARK, - - [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ - [0x018] = 0x02, /* Tuple length = 2 bytes */ - [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ - [0x01c] = 0x01, - - [0x01e] = CISTPL_MANFID, /* Manufacture ID */ - [0x020] = 0x04, /* Tuple length = 4 bytes */ - [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ - [0x024] = 0x00, - [0x026] = 0x00, /* PLMID_CARD = 0000 */ - [0x028] = 0x00, - - [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ - [0x02c] = 0x12, /* Tuple length = 23 bytes */ - [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ - [0x030] = 0x01, /* Minor Version = 1 */ - [0x032] = 'I', - [0x034] = 'B', - [0x036] = 'M', - [0x038] = 0x00, - [0x03a] = 'm', - [0x03c] = 'i', - [0x03e] = 'c', - [0x040] = 'r', - [0x042] = 'o', - [0x044] = 'd', - [0x046] = 'r', - [0x048] = 'i', - [0x04a] = 'v', - [0x04c] = 'e', - [0x04e] = 0x00, - [0x050] = CISTPL_ENDMARK, - - [0x052] = CISTPL_FUNCID, /* Function ID */ - [0x054] = 0x02, /* Tuple length = 2 bytes */ - [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ - [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ - - [0x05a] = CISTPL_FUNCE, /* Function Extension */ - [0x05c] = 0x02, /* Tuple length = 2 bytes */ - [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ - [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ - - [0x062] = CISTPL_FUNCE, /* Function Extension */ - [0x064] = 0x03, /* Tuple length = 3 bytes */ - [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ - [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ - [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ - - [0x06c] = CISTPL_CONFIG, /* Configuration */ - [0x06e] = 0x05, /* Tuple length = 5 bytes */ - [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ - [0x072] = 0x07, /* TPCC_LAST = 7 */ - [0x074] = 0x00, /* TPCC_RADR = 0200 */ - [0x076] = 0x02, - [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ - - [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x07c] = 0x0b, /* Tuple length = 11 bytes */ - [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ - [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ - [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ - [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x086] = 0x55, /* NomV: 5.0 V */ - [0x088] = 0x4d, /* MinV: 4.5 V */ - [0x08a] = 0x5d, /* MaxV: 5.5 V */ - [0x08c] = 0x4e, /* Peakl: 450 mA */ - [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ - [0x090] = 0x00, /* Window descriptor: Window length = 0 */ - [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ - - [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x096] = 0x06, /* Tuple length = 6 bytes */ - [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ - [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x09e] = 0xb5, /* NomV: 3.3 V */ - [0x0a0] = 0x1e, - [0x0a2] = 0x3e, /* Peakl: 350 mA */ - - [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ - [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ - [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0b0] = 0x55, /* NomV: 5.0 V */ - [0x0b2] = 0x4d, /* MinV: 4.5 V */ - [0x0b4] = 0x5d, /* MaxV: 5.5 V */ - [0x0b6] = 0x4e, /* Peakl: 450 mA */ - [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ - [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ - [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ - [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ - [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ - - [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0c4] = 0x06, /* Tuple length = 6 bytes */ - [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ - [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x0cc] = 0xb5, /* NomV: 3.3 V */ - [0x0ce] = 0x1e, - [0x0d0] = 0x3e, /* Peakl: 350 mA */ - - [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0d4] = 0x12, /* Tuple length = 18 bytes */ - [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ - [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x0de] = 0x55, /* NomV: 5.0 V */ - [0x0e0] = 0x4d, /* MinV: 4.5 V */ - [0x0e2] = 0x5d, /* MaxV: 5.5 V */ - [0x0e4] = 0x4e, /* Peakl: 450 mA */ - [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ - [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ - [0x0ec] = 0x01, - [0x0ee] = 0x07, /* Address block length = 8 */ - [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ - [0x0f2] = 0x03, - [0x0f4] = 0x01, /* Address block length = 2 */ - [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ - - [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x0fc] = 0x06, /* Tuple length = 6 bytes */ - [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ - [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x104] = 0xb5, /* NomV: 3.3 V */ - [0x106] = 0x1e, - [0x108] = 0x3e, /* Peakl: 350 mA */ - - [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x10c] = 0x12, /* Tuple length = 18 bytes */ - [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ - [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ - [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ - [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ - [0x116] = 0x55, /* NomV: 5.0 V */ - [0x118] = 0x4d, /* MinV: 4.5 V */ - [0x11a] = 0x5d, /* MaxV: 5.5 V */ - [0x11c] = 0x4e, /* Peakl: 450 mA */ - [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ - [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ - [0x122] = 0x70, /* Field 1 address = 0x0170 */ - [0x124] = 0x01, - [0x126] = 0x07, /* Address block length = 8 */ - [0x128] = 0x76, /* Field 2 address = 0x0376 */ - [0x12a] = 0x03, - [0x12c] = 0x01, /* Address block length = 2 */ - [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ - [0x130] = 0x20, /* TPCE_MI = support power down mode */ - - [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ - [0x134] = 0x06, /* Tuple length = 6 bytes */ - [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ - [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ - [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ - [0x13c] = 0xb5, /* NomV: 3.3 V */ - [0x13e] = 0x1e, - [0x140] = 0x3e, /* Peakl: 350 mA */ - - [0x142] = CISTPL_NO_LINK, /* No Link */ - [0x144] = 0x00, /* Tuple length = 0 bytes */ - - [0x146] = CISTPL_END, /* Tuple End */ -}; - -#define TYPE_DSCM1XXXX "dscm1xxxx" - -static int dscm1xxxx_attach(PCMCIACardState *card) -{ - MicroDriveState *md = MICRODRIVE(card); - PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); - - md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8); - md->io_base = 0x0; - - device_cold_reset(DEVICE(md)); - md_interrupt_update(md); - - return 0; -} - -static int dscm1xxxx_detach(PCMCIACardState *card) -{ - MicroDriveState *md = MICRODRIVE(card); - - device_cold_reset(DEVICE(md)); - return 0; -} - -PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo) -{ - MicroDriveState *md; - - md = MICRODRIVE(object_new(TYPE_DSCM1XXXX)); - qdev_realize(DEVICE(md), NULL, &error_fatal); - - if (dinfo != NULL) { - ide_bus_create_drive(&md->bus, 0, dinfo); - } - md->bus.ifs[0].drive_kind = IDE_CFATA; - md->bus.ifs[0].mdata_size = METADATA_SIZE; - md->bus.ifs[0].mdata_storage = g_malloc0(METADATA_SIZE); - - return PCMCIA_CARD(md); -} - -static void dscm1xxxx_class_init(ObjectClass *oc, void *data) -{ - PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); - DeviceClass *dc = DEVICE_CLASS(oc); - - pcc->cis = dscm1xxxx_cis; - pcc->cis_len = sizeof(dscm1xxxx_cis); - - pcc->attach = dscm1xxxx_attach; - pcc->detach = dscm1xxxx_detach; - /* Reason: Needs to be wired-up in code, see dscm1xxxx_init() */ - dc->user_creatable = false; -} - -static const TypeInfo dscm1xxxx_type_info = { - .name = TYPE_DSCM1XXXX, - .parent = TYPE_MICRODRIVE, - .class_init = dscm1xxxx_class_init, -}; - -static void microdrive_realize(DeviceState *dev, Error **errp) -{ - MicroDriveState *md = MICRODRIVE(dev); - - ide_bus_init_output_irq(&md->bus, qemu_allocate_irq(md_set_irq, md, 0)); -} - -static void microdrive_init(Object *obj) -{ - MicroDriveState *md = MICRODRIVE(obj); - - ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); -} - -static void microdrive_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); - - pcc->attr_read = md_attr_read; - pcc->attr_write = md_attr_write; - pcc->common_read = md_common_read; - pcc->common_write = md_common_write; - pcc->io_read = md_common_read; - pcc->io_write = md_common_write; - - dc->realize = microdrive_realize; - dc->reset = md_reset; - dc->vmsd = &vmstate_microdrive; -} - -static const TypeInfo microdrive_type_info = { - .name = TYPE_MICRODRIVE, - .parent = TYPE_PCMCIA_CARD, - .instance_size = sizeof(MicroDriveState), - .instance_init = microdrive_init, - .abstract = true, - .class_init = microdrive_class_init, -}; - -static void microdrive_register_types(void) -{ - type_register_static(µdrive_type_info); - type_register_static(&dscm1xxxx_type_info); -} - -type_init(microdrive_register_types) diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index 87362813056..699874db785 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -27,7 +27,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/ide/mmio.h" #include "hw/qdev-properties.h" @@ -141,17 +141,16 @@ static void mmio_ide_initfn(Object *obj) sysbus_init_irq(d, &s->irq); } -static Property mmio_ide_properties[] = { +static const Property mmio_ide_properties[] = { DEFINE_PROP_UINT32("shift", MMIOIDEState, shift, 0), - DEFINE_PROP_END_OF_LIST() }; -static void mmio_ide_class_init(ObjectClass *oc, void *data) +static void mmio_ide_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = mmio_ide_realizefn; - dc->reset = mmio_ide_reset; + device_class_set_legacy_reset(dc, mmio_ide_reset); device_class_set_props(dc, mmio_ide_properties); dc->vmsd = &vmstate_ide_mmio; } diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 4675d079a17..1e50bb9e483 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -27,7 +27,7 @@ #include "hw/irq.h" #include "hw/pci/pci.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "hw/ide/pci.h" @@ -237,7 +237,7 @@ static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit) /* end of table (with a fail safe of one page) */ if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { - return s->sg.size; + break; } pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); bm->cur_addr += 8; @@ -266,10 +266,7 @@ static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit) s->io_buffer_size += l; } } - - qemu_sglist_destroy(&s->sg); - s->io_buffer_size = 0; - return -1; + return s->sg.size; } /* return 0 if buffer completed */ @@ -628,7 +625,7 @@ static const TypeInfo pci_ide_type_info = { .instance_size = sizeof(PCIIDEState), .instance_init = pci_ide_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/ide/piix.c b/hw/ide/piix.c index 80efc633d3c..a0f2709c697 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -178,12 +178,12 @@ static void pci_piix_ide_exitfn(PCIDevice *dev) } /* NOTE: for the PIIX3, the IRQs and IOports are hardcoded */ -static void piix3_ide_class_init(ObjectClass *klass, void *data) +static void piix3_ide_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - dc->reset = piix_ide_reset; + device_class_set_legacy_reset(dc, piix_ide_reset); dc->vmsd = &vmstate_ide_pci; k->realize = pci_piix_ide_realize; k->exit = pci_piix_ide_exitfn; @@ -201,12 +201,12 @@ static const TypeInfo piix3_ide_info = { }; /* NOTE: for the PIIX4, the IRQs and IOports are hardcoded */ -static void piix4_ide_class_init(ObjectClass *klass, void *data) +static void piix4_ide_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - dc->reset = piix_ide_reset; + device_class_set_legacy_reset(dc, piix_ide_reset); dc->vmsd = &vmstate_ide_pci; k->realize = pci_piix_ide_realize; k->exit = pci_piix_ide_exitfn; diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c index af17384ff20..9b28c691fd7 100644 --- a/hw/ide/sii3112.c +++ b/hw/ide/sii3112.c @@ -290,7 +290,7 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp) } } -static void sii3112_pci_class_init(ObjectClass *klass, void *data) +static void sii3112_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pd = PCI_DEVICE_CLASS(klass); @@ -300,7 +300,7 @@ static void sii3112_pci_class_init(ObjectClass *klass, void *data) pd->class_id = PCI_CLASS_STORAGE_RAID; pd->revision = 1; pd->realize = sii3112_pci_realize; - dc->reset = sii3112_reset; + device_class_set_legacy_reset(dc, sii3112_reset); dc->desc = "SiI3112A SATA controller"; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } diff --git a/hw/ide/via.c b/hw/ide/via.c index a32f56b0e79..dedc2674c00 100644 --- a/hw/ide/via.c +++ b/hw/ide/via.c @@ -29,7 +29,7 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "qemu/range.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/isa/vt82c686.h" #include "hw/ide/pci.h" #include "hw/irq.h" @@ -245,12 +245,12 @@ static void via_ide_exitfn(PCIDevice *dev) } } -static void via_ide_class_init(ObjectClass *klass, void *data) +static void via_ide_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - dc->reset = via_ide_reset; + device_class_set_legacy_reset(dc, via_ide_reset); dc->vmsd = &vmstate_ide_pci; /* Reason: only works as function of VIA southbridge */ dc->user_creatable = false; diff --git a/hw/input/Kconfig b/hw/input/Kconfig index f86e98c8293..a116cb82dff 100644 --- a/hw/input/Kconfig +++ b/hw/input/Kconfig @@ -1,13 +1,6 @@ config ADB bool -config ADS7846 - bool - -config LM832X - bool - depends on I2C - config PCKBD bool select PS2 @@ -23,9 +16,6 @@ config PS2 config STELLARIS_GAMEPAD bool -config TSC2005 - bool - config VIRTIO_INPUT bool default y @@ -41,8 +31,5 @@ config VHOST_USER_INPUT default y depends on VIRTIO_INPUT && VHOST_USER -config TSC210X - bool - config LASIPS2 select PS2 diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c index 758fa6d2676..507557deecc 100644 --- a/hw/input/adb-kbd.c +++ b/hw/input/adb-kbd.c @@ -375,7 +375,7 @@ static void adb_kbd_initfn(Object *obj) d->devaddr = ADB_DEVID_KEYBOARD; } -static void adb_kbd_class_init(ObjectClass *oc, void *data) +static void adb_kbd_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); @@ -387,7 +387,7 @@ static void adb_kbd_class_init(ObjectClass *oc, void *data) adc->devreq = adb_kbd_request; adc->devhasdata = adb_kbd_has_data; - dc->reset = adb_kbd_reset; + device_class_set_legacy_reset(dc, adb_kbd_reset); dc->vmsd = &vmstate_adb_kbd; } diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c index 144a0ccce71..373ef3f953c 100644 --- a/hw/input/adb-mouse.c +++ b/hw/input/adb-mouse.c @@ -38,6 +38,7 @@ struct MouseState { ADBDevice parent_obj; /*< private >*/ + QemuInputHandlerState *hs; int buttons_state, last_buttons_state; int dx, dy, dz; }; @@ -51,17 +52,57 @@ struct ADBMouseClass { DeviceRealize parent_realize; }; -static void adb_mouse_event(void *opaque, - int dx1, int dy1, int dz1, int buttons_state) +#define ADB_MOUSE_BUTTON_LEFT 0x01 +#define ADB_MOUSE_BUTTON_RIGHT 0x02 + +static void adb_mouse_handle_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) { - MouseState *s = opaque; + MouseState *s = (MouseState *)dev; + InputMoveEvent *move; + InputBtnEvent *btn; + static const int bmap[INPUT_BUTTON__MAX] = { + [INPUT_BUTTON_LEFT] = ADB_MOUSE_BUTTON_LEFT, + [INPUT_BUTTON_RIGHT] = ADB_MOUSE_BUTTON_RIGHT, + }; + + switch (evt->type) { + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + if (move->axis == INPUT_AXIS_X) { + s->dx += move->value; + } else if (move->axis == INPUT_AXIS_Y) { + s->dy += move->value; + } + break; + + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + if (bmap[btn->button]) { + if (btn->down) { + s->buttons_state |= bmap[btn->button]; + } else { + s->buttons_state &= ~bmap[btn->button]; + } + } + break; - s->dx += dx1; - s->dy += dy1; - s->dz += dz1; - s->buttons_state = buttons_state; + default: + /* keep gcc happy */ + break; + } } +static const QemuInputHandler adb_mouse_handler = { + .name = "QEMU ADB Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = adb_mouse_handle_event, + /* + * We do not need the .sync handler because unlike e.g. PS/2 where async + * mouse events are sent over the serial port, an ADB mouse is constantly + * polled by the host via the adb_mouse_poll() callback. + */ +}; static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf) { @@ -94,10 +135,10 @@ static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf) dx &= 0x7f; dy &= 0x7f; - if (!(s->buttons_state & MOUSE_EVENT_LBUTTON)) { + if (!(s->buttons_state & ADB_MOUSE_BUTTON_LEFT)) { dy |= 0x80; } - if (!(s->buttons_state & MOUSE_EVENT_RBUTTON)) { + if (!(s->buttons_state & ADB_MOUSE_BUTTON_RIGHT)) { dx |= 0x80; } @@ -236,7 +277,7 @@ static void adb_mouse_realizefn(DeviceState *dev, Error **errp) amc->parent_realize(dev, errp); - qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse"); + s->hs = qemu_input_handler_register(dev, &adb_mouse_handler); } static void adb_mouse_initfn(Object *obj) @@ -246,7 +287,7 @@ static void adb_mouse_initfn(Object *obj) d->devaddr = ADB_DEVID_MOUSE; } -static void adb_mouse_class_init(ObjectClass *oc, void *data) +static void adb_mouse_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); @@ -258,7 +299,7 @@ static void adb_mouse_class_init(ObjectClass *oc, void *data) adc->devreq = adb_mouse_request; adc->devhasdata = adb_mouse_has_data; - dc->reset = adb_mouse_reset; + device_class_set_legacy_reset(dc, adb_mouse_reset); dc->vmsd = &vmstate_adb_mouse; } diff --git a/hw/input/adb.c b/hw/input/adb.c index aff7130fd0f..bcb11edca35 100644 --- a/hw/input/adb.c +++ b/hw/input/adb.c @@ -259,7 +259,7 @@ static void adb_bus_unrealize(BusState *qbus) vmstate_unregister(NULL, &vmstate_adb_bus, adb_bus); } -static void adb_bus_class_init(ObjectClass *klass, void *data) +static void adb_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -299,7 +299,7 @@ static void adb_device_realizefn(DeviceState *dev, Error **errp) bus->devices[bus->nb_devices++] = d; } -static void adb_device_class_init(ObjectClass *oc, void *data) +static void adb_device_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c deleted file mode 100644 index cde38922165..00000000000 --- a/hw/input/ads7846.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * TI ADS7846 / TSC2046 chip emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/ssi/ssi.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "ui/console.h" -#include "qom/object.h" - -struct ADS7846State { - SSIPeripheral ssidev; - qemu_irq interrupt; - - int input[8]; - int pressure; - int noise; - - int cycle; - int output; -}; - -#define TYPE_ADS7846 "ads7846" -OBJECT_DECLARE_SIMPLE_TYPE(ADS7846State, ADS7846) - -/* Control-byte bitfields */ -#define CB_PD0 (1 << 0) -#define CB_PD1 (1 << 1) -#define CB_SER (1 << 2) -#define CB_MODE (1 << 3) -#define CB_A0 (1 << 4) -#define CB_A1 (1 << 5) -#define CB_A2 (1 << 6) -#define CB_START (1 << 7) - -#define X_AXIS_DMAX 3470 -#define X_AXIS_MIN 290 -#define Y_AXIS_DMAX 3450 -#define Y_AXIS_MIN 200 - -#define ADS_VBAT 2000 -#define ADS_VAUX 2000 -#define ADS_TEMP0 2000 -#define ADS_TEMP1 3000 -#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15)) -#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15)) -#define ADS_Z1POS(x, y) 600 -#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y)) - -static void ads7846_int_update(ADS7846State *s) -{ - if (s->interrupt) - qemu_set_irq(s->interrupt, s->pressure == 0); -} - -static uint32_t ads7846_transfer(SSIPeripheral *dev, uint32_t value) -{ - ADS7846State *s = ADS7846(dev); - - switch (s->cycle ++) { - case 0: - if (!(value & CB_START)) { - s->cycle = 0; - break; - } - - s->output = s->input[(value >> 4) & 7]; - - /* Imitate the ADC noise, some drivers expect this. */ - s->noise = (s->noise + 3) & 7; - switch ((value >> 4) & 7) { - case 1: s->output += s->noise ^ 2; break; - case 3: s->output += s->noise ^ 0; break; - case 4: s->output += s->noise ^ 7; break; - case 5: s->output += s->noise ^ 5; break; - } - - if (value & CB_MODE) - s->output >>= 4; /* 8 bits instead of 12 */ - - break; - case 1: - s->cycle = 0; - break; - } - return s->output; -} - -static void ads7846_ts_event(void *opaque, - int x, int y, int z, int buttons_state) -{ - ADS7846State *s = opaque; - - if (buttons_state) { - x = 0x7fff - x; - s->input[1] = ADS_XPOS(x, y); - s->input[3] = ADS_Z1POS(x, y); - s->input[4] = ADS_Z2POS(x, y); - s->input[5] = ADS_YPOS(x, y); - } - - if (s->pressure == !buttons_state) { - s->pressure = !!buttons_state; - - ads7846_int_update(s); - } -} - -static int ads7856_post_load(void *opaque, int version_id) -{ - ADS7846State *s = opaque; - - s->pressure = 0; - ads7846_int_update(s); - return 0; -} - -static const VMStateDescription vmstate_ads7846 = { - .name = "ads7846", - .version_id = 1, - .minimum_version_id = 1, - .post_load = ads7856_post_load, - .fields = (const VMStateField[]) { - VMSTATE_SSI_PERIPHERAL(ssidev, ADS7846State), - VMSTATE_INT32_ARRAY(input, ADS7846State, 8), - VMSTATE_INT32(noise, ADS7846State), - VMSTATE_INT32(cycle, ADS7846State), - VMSTATE_INT32(output, ADS7846State), - VMSTATE_END_OF_LIST() - } -}; - -static void ads7846_realize(SSIPeripheral *d, Error **errp) -{ - DeviceState *dev = DEVICE(d); - ADS7846State *s = ADS7846(d); - - qdev_init_gpio_out(dev, &s->interrupt, 1); - - s->input[0] = ADS_TEMP0; /* TEMP0 */ - s->input[2] = ADS_VBAT; /* VBAT */ - s->input[6] = ADS_VAUX; /* VAUX */ - s->input[7] = ADS_TEMP1; /* TEMP1 */ - - /* We want absolute coordinates */ - qemu_add_mouse_event_handler(ads7846_ts_event, s, 1, - "QEMU ADS7846-driven Touchscreen"); - - ads7846_int_update(s); - - vmstate_register_any(NULL, &vmstate_ads7846, s); -} - -static void ads7846_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); - - k->realize = ads7846_realize; - k->transfer = ads7846_transfer; - set_bit(DEVICE_CATEGORY_INPUT, dc->categories); -} - -static const TypeInfo ads7846_info = { - .name = TYPE_ADS7846, - .parent = TYPE_SSI_PERIPHERAL, - .instance_size = sizeof(ADS7846State), - .class_init = ads7846_class_init, -}; - -static void ads7846_register_types(void) -{ - type_register_static(&ads7846_info); -} - -type_init(ads7846_register_types) diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index d9f8c36778d..de625723c78 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -29,7 +29,7 @@ #include "hw/input/lasips2.h" #include "exec/hwaddr.h" #include "trace.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/vmstate.h" #include "hw/irq.h" #include "qapi/error.h" @@ -306,7 +306,7 @@ static void lasips2_init(Object *obj) "lasips2-port-input-irq", 2); } -static void lasips2_class_init(ObjectClass *klass, void *data) +static void lasips2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -347,7 +347,7 @@ static void lasips2_port_init(Object *obj) "ps2-input-irq", 1); } -static void lasips2_port_class_init(ObjectClass *klass, void *data) +static void lasips2_port_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -397,7 +397,7 @@ static void lasips2_kbd_port_init(Object *obj) lp->lasips2 = container_of(s, LASIPS2State, kbd_port); } -static void lasips2_kbd_port_class_init(ObjectClass *klass, void *data) +static void lasips2_kbd_port_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); @@ -447,7 +447,7 @@ static void lasips2_mouse_port_init(Object *obj) lp->lasips2 = container_of(s, LASIPS2State, mouse_port); } -static void lasips2_mouse_port_class_init(ObjectClass *klass, void *data) +static void lasips2_mouse_port_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); LASIPS2PortDeviceClass *lpdc = LASIPS2_PORT_CLASS(klass); diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c deleted file mode 100644 index 59e5567afd5..00000000000 --- a/hw/input/lm832x.c +++ /dev/null @@ -1,528 +0,0 @@ -/* - * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/input/lm832x.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "qemu/timer.h" -#include "ui/console.h" -#include "qom/object.h" - -OBJECT_DECLARE_SIMPLE_TYPE(LM823KbdState, LM8323) - -struct LM823KbdState { - I2CSlave parent_obj; - - uint8_t i2c_dir; - uint8_t i2c_cycle; - uint8_t reg; - - qemu_irq nirq; - uint16_t model; - - struct { - qemu_irq out[2]; - int in[2][2]; - } mux; - - uint8_t config; - uint8_t status; - uint8_t acttime; - uint8_t error; - uint8_t clock; - - struct { - uint16_t pull; - uint16_t mask; - uint16_t dir; - uint16_t level; - qemu_irq out[16]; - } gpio; - - struct { - uint8_t dbnctime; - uint8_t size; - uint8_t start; - uint8_t len; - uint8_t fifo[16]; - } kbd; - - struct { - uint16_t file[256]; - uint8_t faddr; - uint8_t addr[3]; - QEMUTimer *tm[3]; - } pwm; -}; - -#define INT_KEYPAD (1 << 0) -#define INT_ERROR (1 << 3) -#define INT_NOINIT (1 << 4) -#define INT_PWMEND(n) (1 << (5 + n)) - -#define ERR_BADPAR (1 << 0) -#define ERR_CMDUNK (1 << 1) -#define ERR_KEYOVR (1 << 2) -#define ERR_FIFOOVR (1 << 6) - -static void lm_kbd_irq_update(LM823KbdState *s) -{ - qemu_set_irq(s->nirq, !s->status); -} - -static void lm_kbd_gpio_update(LM823KbdState *s) -{ -} - -static void lm_kbd_reset(DeviceState *dev) -{ - LM823KbdState *s = LM8323(dev); - - s->config = 0x80; - s->status = INT_NOINIT; - s->acttime = 125; - s->kbd.dbnctime = 3; - s->kbd.size = 0x33; - s->clock = 0x08; - - lm_kbd_irq_update(s); - lm_kbd_gpio_update(s); -} - -static void lm_kbd_error(LM823KbdState *s, int err) -{ - s->error |= err; - s->status |= INT_ERROR; - lm_kbd_irq_update(s); -} - -static void lm_kbd_pwm_tick(LM823KbdState *s, int line) -{ -} - -static void lm_kbd_pwm_start(LM823KbdState *s, int line) -{ - lm_kbd_pwm_tick(s, line); -} - -static void lm_kbd_pwm0_tick(void *opaque) -{ - lm_kbd_pwm_tick(opaque, 0); -} -static void lm_kbd_pwm1_tick(void *opaque) -{ - lm_kbd_pwm_tick(opaque, 1); -} -static void lm_kbd_pwm2_tick(void *opaque) -{ - lm_kbd_pwm_tick(opaque, 2); -} - -enum { - LM832x_CMD_READ_ID = 0x80, /* Read chip ID. */ - LM832x_CMD_WRITE_CFG = 0x81, /* Set configuration item. */ - LM832x_CMD_READ_INT = 0x82, /* Get interrupt status. */ - LM832x_CMD_RESET = 0x83, /* Reset, same as external one */ - LM823x_CMD_WRITE_PULL_DOWN = 0x84, /* Select GPIO pull-up/down. */ - LM832x_CMD_WRITE_PORT_SEL = 0x85, /* Select GPIO in/out. */ - LM832x_CMD_WRITE_PORT_STATE = 0x86, /* Set GPIO pull-up/down. */ - LM832x_CMD_READ_PORT_SEL = 0x87, /* Get GPIO in/out. */ - LM832x_CMD_READ_PORT_STATE = 0x88, /* Get GPIO pull-up/down. */ - LM832x_CMD_READ_FIFO = 0x89, /* Read byte from FIFO. */ - LM832x_CMD_RPT_READ_FIFO = 0x8a, /* Read FIFO (no increment). */ - LM832x_CMD_SET_ACTIVE = 0x8b, /* Set active time. */ - LM832x_CMD_READ_ERROR = 0x8c, /* Get error status. */ - LM832x_CMD_READ_ROTATOR = 0x8e, /* Read rotator status. */ - LM832x_CMD_SET_DEBOUNCE = 0x8f, /* Set debouncing time. */ - LM832x_CMD_SET_KEY_SIZE = 0x90, /* Set keypad size. */ - LM832x_CMD_READ_KEY_SIZE = 0x91, /* Get keypad size. */ - LM832x_CMD_READ_CFG = 0x92, /* Get configuration item. */ - LM832x_CMD_WRITE_CLOCK = 0x93, /* Set clock config. */ - LM832x_CMD_READ_CLOCK = 0x94, /* Get clock config. */ - LM832x_CMD_PWM_WRITE = 0x95, /* Write PWM script. */ - LM832x_CMD_PWM_START = 0x96, /* Start PWM engine. */ - LM832x_CMD_PWM_STOP = 0x97, /* Stop PWM engine. */ - LM832x_GENERAL_ERROR = 0xff, /* There was one error. - Previously was represented by -1 - This is not a command */ -}; - -#define LM832x_MAX_KPX 8 -#define LM832x_MAX_KPY 12 - -static uint8_t lm_kbd_read(LM823KbdState *s, int reg, int byte) -{ - int ret; - - switch (reg) { - case LM832x_CMD_READ_ID: - ret = 0x0400; - break; - - case LM832x_CMD_READ_INT: - ret = s->status; - if (!(s->status & INT_NOINIT)) { - s->status = 0; - lm_kbd_irq_update(s); - } - break; - - case LM832x_CMD_READ_PORT_SEL: - ret = s->gpio.dir; - break; - case LM832x_CMD_READ_PORT_STATE: - ret = s->gpio.mask; - break; - - case LM832x_CMD_READ_FIFO: - if (s->kbd.len <= 1) - return 0x00; - - /* Example response from the two commands after a INT_KEYPAD - * interrupt caused by the key 0x3c being pressed: - * RPT_READ_FIFO: 55 bc 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 - * READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 - * RPT_READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 - * - * 55 is the code of the key release event serviced in the previous - * interrupt handling. - * - * TODO: find out whether the FIFO is advanced a single character - * before reading every byte or the whole size of the FIFO at the - * last LM832x_CMD_READ_FIFO. This affects LM832x_CMD_RPT_READ_FIFO - * output in cases where there are more than one event in the FIFO. - * Assume 0xbc and 0x3c events are in the FIFO: - * RPT_READ_FIFO: 55 bc 3c 00 4e ff 0a 50 08 00 29 d9 08 01 c9 - * READ_FIFO: bc 3c 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 - * Does RPT_READ_FIFO now return 0xbc and 0x3c or only 0x3c? - */ - s->kbd.start ++; - s->kbd.start &= sizeof(s->kbd.fifo) - 1; - s->kbd.len --; - - return s->kbd.fifo[s->kbd.start]; - case LM832x_CMD_RPT_READ_FIFO: - if (byte >= s->kbd.len) - return 0x00; - - return s->kbd.fifo[(s->kbd.start + byte) & (sizeof(s->kbd.fifo) - 1)]; - - case LM832x_CMD_READ_ERROR: - return s->error; - - case LM832x_CMD_READ_ROTATOR: - return 0; - - case LM832x_CMD_READ_KEY_SIZE: - return s->kbd.size; - - case LM832x_CMD_READ_CFG: - return s->config & 0xf; - - case LM832x_CMD_READ_CLOCK: - return (s->clock & 0xfc) | 2; - - default: - lm_kbd_error(s, ERR_CMDUNK); - fprintf(stderr, "%s: unknown command %02x\n", __func__, reg); - return 0x00; - } - - return ret >> (byte << 3); -} - -static void lm_kbd_write(LM823KbdState *s, int reg, int byte, uint8_t value) -{ - switch (reg) { - case LM832x_CMD_WRITE_CFG: - s->config = value; - /* This must be done whenever s->mux.in is updated (never). */ - if ((s->config >> 1) & 1) /* MUX1EN */ - qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 0) & 1]); - if ((s->config >> 3) & 1) /* MUX2EN */ - qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 2) & 1]); - /* TODO: check that this is issued only following the chip reset - * and not in the middle of operation and that it is followed by - * the GPIO ports re-resablishing through WRITE_PORT_SEL and - * WRITE_PORT_STATE (using a timer perhaps) and otherwise output - * warnings. */ - s->status = 0; - lm_kbd_irq_update(s); - s->kbd.len = 0; - s->kbd.start = 0; - s->reg = LM832x_GENERAL_ERROR; - break; - - case LM832x_CMD_RESET: - if (value == 0xaa) - lm_kbd_reset(DEVICE(s)); - else - lm_kbd_error(s, ERR_BADPAR); - s->reg = LM832x_GENERAL_ERROR; - break; - - case LM823x_CMD_WRITE_PULL_DOWN: - if (!byte) - s->gpio.pull = value; - else { - s->gpio.pull |= value << 8; - lm_kbd_gpio_update(s); - s->reg = LM832x_GENERAL_ERROR; - } - break; - case LM832x_CMD_WRITE_PORT_SEL: - if (!byte) - s->gpio.dir = value; - else { - s->gpio.dir |= value << 8; - lm_kbd_gpio_update(s); - s->reg = LM832x_GENERAL_ERROR; - } - break; - case LM832x_CMD_WRITE_PORT_STATE: - if (!byte) - s->gpio.mask = value; - else { - s->gpio.mask |= value << 8; - lm_kbd_gpio_update(s); - s->reg = LM832x_GENERAL_ERROR; - } - break; - - case LM832x_CMD_SET_ACTIVE: - s->acttime = value; - s->reg = LM832x_GENERAL_ERROR; - break; - - case LM832x_CMD_SET_DEBOUNCE: - s->kbd.dbnctime = value; - s->reg = LM832x_GENERAL_ERROR; - if (!value) - lm_kbd_error(s, ERR_BADPAR); - break; - - case LM832x_CMD_SET_KEY_SIZE: - s->kbd.size = value; - s->reg = LM832x_GENERAL_ERROR; - if ( - (value & 0xf) < 3 || (value & 0xf) > LM832x_MAX_KPY || - (value >> 4) < 3 || (value >> 4) > LM832x_MAX_KPX) - lm_kbd_error(s, ERR_BADPAR); - break; - - case LM832x_CMD_WRITE_CLOCK: - s->clock = value; - s->reg = LM832x_GENERAL_ERROR; - if ((value & 3) && (value & 3) != 3) { - lm_kbd_error(s, ERR_BADPAR); - fprintf(stderr, "%s: invalid clock setting in RCPWM\n", - __func__); - } - /* TODO: Validate that the command is only issued once */ - break; - - case LM832x_CMD_PWM_WRITE: - if (byte == 0) { - if (!(value & 3) || (value >> 2) > 59) { - lm_kbd_error(s, ERR_BADPAR); - s->reg = LM832x_GENERAL_ERROR; - break; - } - - s->pwm.faddr = value; - s->pwm.file[s->pwm.faddr] = 0; - } else if (byte == 1) { - s->pwm.file[s->pwm.faddr] |= value << 8; - } else if (byte == 2) { - s->pwm.file[s->pwm.faddr] |= value << 0; - s->reg = LM832x_GENERAL_ERROR; - } - break; - case LM832x_CMD_PWM_START: - s->reg = LM832x_GENERAL_ERROR; - if (!(value & 3) || (value >> 2) > 59) { - lm_kbd_error(s, ERR_BADPAR); - break; - } - - s->pwm.addr[(value & 3) - 1] = value >> 2; - lm_kbd_pwm_start(s, (value & 3) - 1); - break; - case LM832x_CMD_PWM_STOP: - s->reg = LM832x_GENERAL_ERROR; - if (!(value & 3)) { - lm_kbd_error(s, ERR_BADPAR); - break; - } - - timer_del(s->pwm.tm[(value & 3) - 1]); - break; - - case LM832x_GENERAL_ERROR: - lm_kbd_error(s, ERR_BADPAR); - break; - default: - lm_kbd_error(s, ERR_CMDUNK); - fprintf(stderr, "%s: unknown command %02x\n", __func__, reg); - break; - } -} - -static int lm_i2c_event(I2CSlave *i2c, enum i2c_event event) -{ - LM823KbdState *s = LM8323(i2c); - - switch (event) { - case I2C_START_RECV: - case I2C_START_SEND: - s->i2c_cycle = 0; - s->i2c_dir = (event == I2C_START_SEND); - break; - - default: - break; - } - - return 0; -} - -static uint8_t lm_i2c_rx(I2CSlave *i2c) -{ - LM823KbdState *s = LM8323(i2c); - - return lm_kbd_read(s, s->reg, s->i2c_cycle ++); -} - -static int lm_i2c_tx(I2CSlave *i2c, uint8_t data) -{ - LM823KbdState *s = LM8323(i2c); - - if (!s->i2c_cycle) - s->reg = data; - else - lm_kbd_write(s, s->reg, s->i2c_cycle - 1, data); - s->i2c_cycle ++; - - return 0; -} - -static int lm_kbd_post_load(void *opaque, int version_id) -{ - LM823KbdState *s = opaque; - - lm_kbd_irq_update(s); - lm_kbd_gpio_update(s); - - return 0; -} - -static const VMStateDescription vmstate_lm_kbd = { - .name = "LM8323", - .version_id = 0, - .minimum_version_id = 0, - .post_load = lm_kbd_post_load, - .fields = (const VMStateField[]) { - VMSTATE_I2C_SLAVE(parent_obj, LM823KbdState), - VMSTATE_UINT8(i2c_dir, LM823KbdState), - VMSTATE_UINT8(i2c_cycle, LM823KbdState), - VMSTATE_UINT8(reg, LM823KbdState), - VMSTATE_UINT8(config, LM823KbdState), - VMSTATE_UINT8(status, LM823KbdState), - VMSTATE_UINT8(acttime, LM823KbdState), - VMSTATE_UINT8(error, LM823KbdState), - VMSTATE_UINT8(clock, LM823KbdState), - VMSTATE_UINT16(gpio.pull, LM823KbdState), - VMSTATE_UINT16(gpio.mask, LM823KbdState), - VMSTATE_UINT16(gpio.dir, LM823KbdState), - VMSTATE_UINT16(gpio.level, LM823KbdState), - VMSTATE_UINT8(kbd.dbnctime, LM823KbdState), - VMSTATE_UINT8(kbd.size, LM823KbdState), - VMSTATE_UINT8(kbd.start, LM823KbdState), - VMSTATE_UINT8(kbd.len, LM823KbdState), - VMSTATE_BUFFER(kbd.fifo, LM823KbdState), - VMSTATE_UINT16_ARRAY(pwm.file, LM823KbdState, 256), - VMSTATE_UINT8(pwm.faddr, LM823KbdState), - VMSTATE_BUFFER(pwm.addr, LM823KbdState), - VMSTATE_TIMER_PTR_ARRAY(pwm.tm, LM823KbdState, 3), - VMSTATE_END_OF_LIST() - } -}; - - -static void lm8323_realize(DeviceState *dev, Error **errp) -{ - LM823KbdState *s = LM8323(dev); - - s->model = 0x8323; - s->pwm.tm[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm0_tick, s); - s->pwm.tm[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm1_tick, s); - s->pwm.tm[2] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm2_tick, s); - qdev_init_gpio_out(dev, &s->nirq, 1); -} - -void lm832x_key_event(DeviceState *dev, int key, int state) -{ - LM823KbdState *s = LM8323(dev); - - if ((s->status & INT_ERROR) && (s->error & ERR_FIFOOVR)) - return; - - if (s->kbd.len >= sizeof(s->kbd.fifo)) { - lm_kbd_error(s, ERR_FIFOOVR); - return; - } - - s->kbd.fifo[(s->kbd.start + s->kbd.len ++) & (sizeof(s->kbd.fifo) - 1)] = - key | (state << 7); - - /* We never set ERR_KEYOVR because we support multiple keys fine. */ - s->status |= INT_KEYPAD; - lm_kbd_irq_update(s); -} - -static void lm8323_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - - dc->reset = lm_kbd_reset; - dc->realize = lm8323_realize; - k->event = lm_i2c_event; - k->recv = lm_i2c_rx; - k->send = lm_i2c_tx; - dc->vmsd = &vmstate_lm_kbd; -} - -static const TypeInfo lm8323_info = { - .name = TYPE_LM8323, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(LM823KbdState), - .class_init = lm8323_class_init, -}; - -static void lm832x_register_types(void) -{ - type_register_static(&lm8323_info); -} - -type_init(lm832x_register_types) diff --git a/hw/input/meson.build b/hw/input/meson.build index 3cc8ab85f0c..90a214962c7 100644 --- a/hw/input/meson.build +++ b/hw/input/meson.build @@ -1,17 +1,12 @@ system_ss.add(files('hid.c')) system_ss.add(when: 'CONFIG_ADB', if_true: files('adb.c', 'adb-mouse.c', 'adb-kbd.c')) -system_ss.add(when: 'CONFIG_ADS7846', if_true: files('ads7846.c')) -system_ss.add(when: 'CONFIG_LM832X', if_true: files('lm832x.c')) system_ss.add(when: 'CONFIG_PCKBD', if_true: files('pckbd.c')) system_ss.add(when: 'CONFIG_PL050', if_true: files('pl050.c')) system_ss.add(when: 'CONFIG_PS2', if_true: files('ps2.c')) system_ss.add(when: 'CONFIG_STELLARIS_GAMEPAD', if_true: files('stellaris_gamepad.c')) -system_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c')) system_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c')) -system_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c')) system_ss.add(when: 'CONFIG_LASIPS2', if_true: files('lasips2.c')) diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index 74f10b640fd..71f5f976e9c 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -34,8 +34,8 @@ #include "hw/irq.h" #include "hw/input/i8042.h" #include "hw/qdev-properties.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "trace.h" @@ -735,10 +735,9 @@ static void i8042_mmio_init(Object *obj) "ps2-mouse-input-irq", 1); } -static Property i8042_mmio_properties[] = { +static const Property i8042_mmio_properties[] = { DEFINE_PROP_UINT64("mask", MMIOKBDState, kbd.mask, UINT64_MAX), DEFINE_PROP_UINT32("size", MMIOKBDState, size, -1), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_kbd_mmio = { @@ -751,12 +750,12 @@ static const VMStateDescription vmstate_kbd_mmio = { } }; -static void i8042_mmio_class_init(ObjectClass *klass, void *data) +static void i8042_mmio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = i8042_mmio_realize; - dc->reset = i8042_mmio_reset; + device_class_set_legacy_reset(dc, i8042_mmio_reset); dc->vmsd = &vmstate_kbd_mmio; device_class_set_props(dc, i8042_mmio_properties); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); @@ -933,21 +932,20 @@ static void i8042_build_aml(AcpiDevAmlIf *adev, Aml *scope) aml_append(scope, mou); } -static Property i8042_properties[] = { +static const Property i8042_properties[] = { DEFINE_PROP_BOOL("extended-state", ISAKBDState, kbd.extended_state, true), DEFINE_PROP_BOOL("kbd-throttle", ISAKBDState, kbd_throttle, false), DEFINE_PROP_UINT8("kbd-irq", ISAKBDState, kbd_irq, 1), DEFINE_PROP_UINT8("mouse-irq", ISAKBDState, mouse_irq, 12), - DEFINE_PROP_END_OF_LIST(), }; -static void i8042_class_initfn(ObjectClass *klass, void *data) +static void i8042_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); device_class_set_props(dc, i8042_properties); - dc->reset = i8042_reset; + device_class_set_legacy_reset(dc, i8042_reset); dc->realize = i8042_realizefn; dc->vmsd = &vmstate_kbd_isa; adevc->build_dev_aml = i8042_build_aml; @@ -960,7 +958,7 @@ static const TypeInfo i8042_info = { .instance_size = sizeof(ISAKBDState), .instance_init = i8042_initfn, .class_init = i8042_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/input/pl050.c b/hw/input/pl050.c index 6519e260ed5..c5f4a3fa843 100644 --- a/hw/input/pl050.c +++ b/hw/input/pl050.c @@ -203,7 +203,7 @@ static void pl050_mouse_init(Object *obj) object_initialize_child(obj, "mouse", &s->mouse, TYPE_PS2_MOUSE_DEVICE); } -static void pl050_kbd_class_init(ObjectClass *oc, void *data) +static void pl050_kbd_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PL050DeviceClass *pdc = PL050_CLASS(oc); @@ -220,7 +220,7 @@ static const TypeInfo pl050_kbd_info = { .class_init = pl050_kbd_class_init, }; -static void pl050_mouse_class_init(ObjectClass *oc, void *data) +static void pl050_mouse_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PL050DeviceClass *pdc = PL050_CLASS(oc); @@ -249,7 +249,7 @@ static void pl050_init(Object *obj) qdev_init_gpio_in_named(DEVICE(obj), pl050_set_irq, "ps2-input-irq", 1); } -static void pl050_class_init(ObjectClass *oc, void *data) +static void pl050_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/input/ps2.c b/hw/input/ps2.c index d6f834443dd..7f7b1fce2e5 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -30,8 +30,8 @@ #include "migration/vmstate.h" #include "ui/console.h" #include "ui/input.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qapi/error.h" #include "trace.h" @@ -1254,7 +1254,7 @@ static void ps2_mouse_realize(DeviceState *dev, Error **errp) qemu_input_handler_register(dev, &ps2_mouse_handler); } -static void ps2_kbd_class_init(ObjectClass *klass, void *data) +static void ps2_kbd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1273,7 +1273,7 @@ static const TypeInfo ps2_kbd_info = { .class_init = ps2_kbd_class_init }; -static void ps2_mouse_class_init(ObjectClass *klass, void *data) +static void ps2_mouse_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1299,7 +1299,7 @@ static void ps2_init(Object *obj) qdev_init_gpio_out(DEVICE(obj), &s->irq, 1); } -static void ps2_class_init(ObjectClass *klass, void *data) +static void ps2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c deleted file mode 100644 index 3858648d9f6..00000000000 --- a/hw/input/pxa2xx_keypad.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Intel PXA27X Keypad Controller emulation. - * - * Copyright (c) 2007 MontaVista Software, Inc - * Written by Armin Kuster - * or - * - * This code is licensed under the GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "hw/arm/pxa.h" -#include "ui/console.h" - -/* - * Keypad - */ -#define KPC 0x00 /* Keypad Interface Control register */ -#define KPDK 0x08 /* Keypad Interface Direct Key register */ -#define KPREC 0x10 /* Keypad Interface Rotary Encoder register */ -#define KPMK 0x18 /* Keypad Interface Matrix Key register */ -#define KPAS 0x20 /* Keypad Interface Automatic Scan register */ -#define KPASMKP0 0x28 /* Keypad Interface Automatic Scan Multiple - Key Presser register 0 */ -#define KPASMKP1 0x30 /* Keypad Interface Automatic Scan Multiple - Key Presser register 1 */ -#define KPASMKP2 0x38 /* Keypad Interface Automatic Scan Multiple - Key Presser register 2 */ -#define KPASMKP3 0x40 /* Keypad Interface Automatic Scan Multiple - Key Presser register 3 */ -#define KPKDI 0x48 /* Keypad Interface Key Debounce Interval - register */ - -/* Keypad defines */ -#define KPC_AS (0x1 << 30) /* Automatic Scan bit */ -#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */ -#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */ -#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */ -#define KPC_MS7 (0x1 << 20) /* Matrix scan line 7 */ -#define KPC_MS6 (0x1 << 19) /* Matrix scan line 6 */ -#define KPC_MS5 (0x1 << 18) /* Matrix scan line 5 */ -#define KPC_MS4 (0x1 << 17) /* Matrix scan line 4 */ -#define KPC_MS3 (0x1 << 16) /* Matrix scan line 3 */ -#define KPC_MS2 (0x1 << 15) /* Matrix scan line 2 */ -#define KPC_MS1 (0x1 << 14) /* Matrix scan line 1 */ -#define KPC_MS0 (0x1 << 13) /* Matrix scan line 0 */ -#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */ -#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */ -#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */ -#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */ -#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */ -#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */ -#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */ -#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */ -#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */ - -#define KPDK_DKP (0x1 << 31) -#define KPDK_DK7 (0x1 << 7) -#define KPDK_DK6 (0x1 << 6) -#define KPDK_DK5 (0x1 << 5) -#define KPDK_DK4 (0x1 << 4) -#define KPDK_DK3 (0x1 << 3) -#define KPDK_DK2 (0x1 << 2) -#define KPDK_DK1 (0x1 << 1) -#define KPDK_DK0 (0x1 << 0) - -#define KPREC_OF1 (0x1 << 31) -#define KPREC_UF1 (0x1 << 30) -#define KPREC_OF0 (0x1 << 15) -#define KPREC_UF0 (0x1 << 14) - -#define KPMK_MKP (0x1 << 31) -#define KPAS_SO (0x1 << 31) -#define KPASMKPx_SO (0x1 << 31) - - -#define KPASMKPx_MKC(row, col) (1 << (row + 16 * (col % 2))) - -#define PXAKBD_MAXROW 8 -#define PXAKBD_MAXCOL 8 - -struct PXA2xxKeyPadState { - MemoryRegion iomem; - qemu_irq irq; - const struct keymap *map; - int pressed_cnt; - int alt_code; - - uint32_t kpc; - uint32_t kpdk; - uint32_t kprec; - uint32_t kpmk; - uint32_t kpas; - uint32_t kpasmkp[4]; - uint32_t kpkdi; -}; - -static void pxa27x_keypad_find_pressed_key(PXA2xxKeyPadState *kp, int *row, int *col) -{ - int i; - for (i = 0; i < 4; i++) - { - *col = i * 2; - for (*row = 0; *row < 8; (*row)++) { - if (kp->kpasmkp[i] & (1 << *row)) - return; - } - *col = i * 2 + 1; - for (*row = 0; *row < 8; (*row)++) { - if (kp->kpasmkp[i] & (1 << (*row + 16))) - return; - } - } -} - -static void pxa27x_keyboard_event (PXA2xxKeyPadState *kp, int keycode) -{ - int row, col, rel, assert_irq = 0; - uint32_t val; - - if (keycode == 0xe0) { - kp->alt_code = 1; - return; - } - - if(!(kp->kpc & KPC_ME)) /* skip if not enabled */ - return; - - rel = (keycode & 0x80) ? 1 : 0; /* key release from qemu */ - keycode &= ~0x80; /* strip qemu key release bit */ - if (kp->alt_code) { - keycode |= 0x80; - kp->alt_code = 0; - } - - row = kp->map[keycode].row; - col = kp->map[keycode].column; - if (row == -1 || col == -1) { - return; - } - - val = KPASMKPx_MKC(row, col); - if (rel) { - if (kp->kpasmkp[col / 2] & val) { - kp->kpasmkp[col / 2] &= ~val; - kp->pressed_cnt--; - assert_irq = 1; - } - } else { - if (!(kp->kpasmkp[col / 2] & val)) { - kp->kpasmkp[col / 2] |= val; - kp->pressed_cnt++; - assert_irq = 1; - } - } - kp->kpas = ((kp->pressed_cnt & 0x1f) << 26) | (0xf << 4) | 0xf; - if (kp->pressed_cnt == 1) { - kp->kpas &= ~((0xf << 4) | 0xf); - if (rel) { - pxa27x_keypad_find_pressed_key(kp, &row, &col); - } - kp->kpas |= ((row & 0xf) << 4) | (col & 0xf); - } - - if (!(kp->kpc & (KPC_AS | KPC_ASACT))) - assert_irq = 0; - - if (assert_irq && (kp->kpc & KPC_MIE)) { - kp->kpc |= KPC_MI; - qemu_irq_raise(kp->irq); - } -} - -static uint64_t pxa2xx_keypad_read(void *opaque, hwaddr offset, - unsigned size) -{ - PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque; - uint32_t tmp; - - switch (offset) { - case KPC: - tmp = s->kpc; - if(tmp & KPC_MI) - s->kpc &= ~(KPC_MI); - if(tmp & KPC_DI) - s->kpc &= ~(KPC_DI); - qemu_irq_lower(s->irq); - return tmp; - case KPDK: - return s->kpdk; - case KPREC: - tmp = s->kprec; - if(tmp & KPREC_OF1) - s->kprec &= ~(KPREC_OF1); - if(tmp & KPREC_UF1) - s->kprec &= ~(KPREC_UF1); - if(tmp & KPREC_OF0) - s->kprec &= ~(KPREC_OF0); - if(tmp & KPREC_UF0) - s->kprec &= ~(KPREC_UF0); - return tmp; - case KPMK: - tmp = s->kpmk; - if(tmp & KPMK_MKP) - s->kpmk &= ~(KPMK_MKP); - return tmp; - case KPAS: - return s->kpas; - case KPASMKP0: - return s->kpasmkp[0]; - case KPASMKP1: - return s->kpasmkp[1]; - case KPASMKP2: - return s->kpasmkp[2]; - case KPASMKP3: - return s->kpasmkp[3]; - case KPKDI: - return s->kpkdi; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad read offset 0x%"HWADDR_PRIx"\n", - __func__, offset); - } - - return 0; -} - -static void pxa2xx_keypad_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque; - - switch (offset) { - case KPC: - s->kpc = value; - if (s->kpc & KPC_AS) { - s->kpc &= ~(KPC_AS); - } - break; - case KPDK: - s->kpdk = value; - break; - case KPREC: - s->kprec = value; - break; - case KPMK: - s->kpmk = value; - break; - case KPAS: - s->kpas = value; - break; - case KPASMKP0: - s->kpasmkp[0] = value; - break; - case KPASMKP1: - s->kpasmkp[1] = value; - break; - case KPASMKP2: - s->kpasmkp[2] = value; - break; - case KPASMKP3: - s->kpasmkp[3] = value; - break; - case KPKDI: - s->kpkdi = value; - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Bad write offset 0x%"HWADDR_PRIx"\n", - __func__, offset); - } -} - -static const MemoryRegionOps pxa2xx_keypad_ops = { - .read = pxa2xx_keypad_read, - .write = pxa2xx_keypad_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static const VMStateDescription vmstate_pxa2xx_keypad = { - .name = "pxa2xx_keypad", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(kpc, PXA2xxKeyPadState), - VMSTATE_UINT32(kpdk, PXA2xxKeyPadState), - VMSTATE_UINT32(kprec, PXA2xxKeyPadState), - VMSTATE_UINT32(kpmk, PXA2xxKeyPadState), - VMSTATE_UINT32(kpas, PXA2xxKeyPadState), - VMSTATE_UINT32_ARRAY(kpasmkp, PXA2xxKeyPadState, 4), - VMSTATE_UINT32(kpkdi, PXA2xxKeyPadState), - VMSTATE_END_OF_LIST() - } -}; - -PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq) -{ - PXA2xxKeyPadState *s; - - s = g_new0(PXA2xxKeyPadState, 1); - s->irq = irq; - - memory_region_init_io(&s->iomem, NULL, &pxa2xx_keypad_ops, s, - "pxa2xx-keypad", 0x00100000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - vmstate_register(NULL, 0, &vmstate_pxa2xx_keypad, s); - - return s; -} - -void pxa27x_register_keypad(PXA2xxKeyPadState *kp, - const struct keymap *map, int size) -{ - if(!map || size < 0x80) { - fprintf(stderr, "%s - No PXA keypad map defined\n", __func__); - exit(-1); - } - - kp->map = map; - qemu_add_kbd_event_handler((QEMUPutKBDEvent *) pxa27x_keyboard_event, kp); -} diff --git a/hw/input/stellaris_gamepad.c b/hw/input/stellaris_gamepad.c index 17ee42b9fce..fec1161c9c1 100644 --- a/hw/input/stellaris_gamepad.c +++ b/hw/input/stellaris_gamepad.c @@ -77,13 +77,12 @@ static void stellaris_gamepad_reset_enter(Object *obj, ResetType type) memset(s->pressed, 0, s->num_buttons * sizeof(uint8_t)); } -static Property stellaris_gamepad_properties[] = { +static const Property stellaris_gamepad_properties[] = { DEFINE_PROP_ARRAY("keycodes", StellarisGamepad, num_buttons, keycodes, qdev_prop_uint32, uint32_t), - DEFINE_PROP_END_OF_LIST(), }; -static void stellaris_gamepad_class_init(ObjectClass *klass, void *data) +static void stellaris_gamepad_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/input/trace-events b/hw/input/trace-events index 29001a827d9..1484625565b 100644 --- a/hw/input/trace-events +++ b/hw/input/trace-events @@ -46,9 +46,6 @@ ps2_mouse_reset(void *opaque) "%p" hid_kbd_queue_full(void) "queue full" hid_kbd_queue_empty(void) "queue empty" -# tsc2005.c -tsc2005_sense(const char *state) "touchscreen sense %s" - # virtio-input.c virtio_input_queue_full(void) "queue full" diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c deleted file mode 100644 index 54a15d24410..00000000000 --- a/hw/input/tsc2005.c +++ /dev/null @@ -1,571 +0,0 @@ -/* - * TI TSC2005 emulator. - * - * Copyright (c) 2006 Andrzej Zaborowski - * Copyright (C) 2008 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "qemu/timer.h" -#include "sysemu/reset.h" -#include "ui/console.h" -#include "hw/input/tsc2xxx.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "trace.h" - -#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - (p ? 12 : 10))) - -typedef struct { - qemu_irq pint; /* Combination of the nPENIRQ and DAV signals */ - QEMUTimer *timer; - uint16_t model; - - int32_t x, y; - bool pressure; - - uint8_t reg, state; - bool irq, command; - uint16_t data, dav; - - bool busy; - bool enabled; - bool host_mode; - int8_t function; - int8_t nextfunction; - bool precision; - bool nextprecision; - uint16_t filter; - uint8_t pin_func; - uint16_t timing[2]; - uint8_t noise; - bool reset; - bool pdst; - bool pnd0; - uint16_t temp_thr[2]; - uint16_t aux_thr[2]; - - int32_t tr[8]; -} TSC2005State; - -enum { - TSC_MODE_XYZ_SCAN = 0x0, - TSC_MODE_XY_SCAN, - TSC_MODE_X, - TSC_MODE_Y, - TSC_MODE_Z, - TSC_MODE_AUX, - TSC_MODE_TEMP1, - TSC_MODE_TEMP2, - TSC_MODE_AUX_SCAN, - TSC_MODE_X_TEST, - TSC_MODE_Y_TEST, - TSC_MODE_TS_TEST, - TSC_MODE_RESERVED, - TSC_MODE_XX_DRV, - TSC_MODE_YY_DRV, - TSC_MODE_YX_DRV, -}; - -static const uint16_t mode_regs[16] = { - 0xf000, /* X, Y, Z scan */ - 0xc000, /* X, Y scan */ - 0x8000, /* X */ - 0x4000, /* Y */ - 0x3000, /* Z */ - 0x0800, /* AUX */ - 0x0400, /* TEMP1 */ - 0x0200, /* TEMP2 */ - 0x0800, /* AUX scan */ - 0x0040, /* X test */ - 0x0020, /* Y test */ - 0x0080, /* Short-circuit test */ - 0x0000, /* Reserved */ - 0x0000, /* X+, X- drivers */ - 0x0000, /* Y+, Y- drivers */ - 0x0000, /* Y+, X- drivers */ -}; - -#define X_TRANSFORM(s) \ - ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3]) -#define Y_TRANSFORM(s) \ - ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7]) -#define Z1_TRANSFORM(s) \ - ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4) -#define Z2_TRANSFORM(s) \ - ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4) - -#define AUX_VAL (700 << 4) /* +/- 3 at 12-bit */ -#define TEMP1_VAL (1264 << 4) /* +/- 5 at 12-bit */ -#define TEMP2_VAL (1531 << 4) /* +/- 5 at 12-bit */ - -static uint16_t tsc2005_read(TSC2005State *s, int reg) -{ - uint16_t ret; - - switch (reg) { - case 0x0: /* X */ - s->dav &= ~mode_regs[TSC_MODE_X]; - return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) + - (s->noise & 3); - case 0x1: /* Y */ - s->dav &= ~mode_regs[TSC_MODE_Y]; - s->noise++; - return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^ - (s->noise & 3); - case 0x2: /* Z1 */ - s->dav &= 0xdfff; - return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) - - (s->noise & 3); - case 0x3: /* Z2 */ - s->dav &= 0xefff; - return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) | - (s->noise & 3); - - case 0x4: /* AUX */ - s->dav &= ~mode_regs[TSC_MODE_AUX]; - return TSC_CUT_RESOLUTION(AUX_VAL, s->precision); - - case 0x5: /* TEMP1 */ - s->dav &= ~mode_regs[TSC_MODE_TEMP1]; - return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) - - (s->noise & 5); - case 0x6: /* TEMP2 */ - s->dav &= 0xdfff; - s->dav &= ~mode_regs[TSC_MODE_TEMP2]; - return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^ - (s->noise & 3); - - case 0x7: /* Status */ - ret = s->dav | (s->reset << 7) | (s->pdst << 2) | 0x0; - s->dav &= ~(mode_regs[TSC_MODE_X_TEST] | mode_regs[TSC_MODE_Y_TEST] | - mode_regs[TSC_MODE_TS_TEST]); - s->reset = true; - return ret; - - case 0x8: /* AUX high threshold */ - return s->aux_thr[1]; - case 0x9: /* AUX low threshold */ - return s->aux_thr[0]; - - case 0xa: /* TEMP high threshold */ - return s->temp_thr[1]; - case 0xb: /* TEMP low threshold */ - return s->temp_thr[0]; - - case 0xc: /* CFR0 */ - return (s->pressure << 15) | ((!s->busy) << 14) | - (s->nextprecision << 13) | s->timing[0]; - case 0xd: /* CFR1 */ - return s->timing[1]; - case 0xe: /* CFR2 */ - return (s->pin_func << 14) | s->filter; - - case 0xf: /* Function select status */ - return s->function >= 0 ? 1 << s->function : 0; - } - - /* Never gets here */ - return 0xffff; -} - -static void tsc2005_write(TSC2005State *s, int reg, uint16_t data) -{ - switch (reg) { - case 0x8: /* AUX high threshold */ - s->aux_thr[1] = data; - break; - case 0x9: /* AUX low threshold */ - s->aux_thr[0] = data; - break; - - case 0xa: /* TEMP high threshold */ - s->temp_thr[1] = data; - break; - case 0xb: /* TEMP low threshold */ - s->temp_thr[0] = data; - break; - - case 0xc: /* CFR0 */ - s->host_mode = (data >> 15) != 0; - if (s->enabled != !(data & 0x4000)) { - s->enabled = !(data & 0x4000); - trace_tsc2005_sense(s->enabled ? "enabled" : "disabled"); - if (s->busy && !s->enabled) { - timer_del(s->timer); - } - s->busy = s->busy && s->enabled; - } - s->nextprecision = (data >> 13) & 1; - s->timing[0] = data & 0x1fff; - if ((s->timing[0] >> 11) == 3) { - qemu_log_mask(LOG_GUEST_ERROR, - "tsc2005_write: illegal conversion clock setting\n"); - } - break; - case 0xd: /* CFR1 */ - s->timing[1] = data & 0xf07; - break; - case 0xe: /* CFR2 */ - s->pin_func = (data >> 14) & 3; - s->filter = data & 0x3fff; - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: write into read-only register 0x%x\n", - __func__, reg); - } -} - -/* This handles most of the chip's logic. */ -static void tsc2005_pin_update(TSC2005State *s) -{ - int64_t expires; - bool pin_state; - - switch (s->pin_func) { - case 0: - pin_state = !s->pressure && !!s->dav; - break; - case 1: - case 3: - default: - pin_state = !s->dav; - break; - case 2: - pin_state = !s->pressure; - } - - if (pin_state != s->irq) { - s->irq = pin_state; - qemu_set_irq(s->pint, s->irq); - } - - switch (s->nextfunction) { - case TSC_MODE_XYZ_SCAN: - case TSC_MODE_XY_SCAN: - if (!s->host_mode && s->dav) { - s->enabled = false; - } - if (!s->pressure) { - return; - } - /* Fall through */ - case TSC_MODE_AUX_SCAN: - break; - - case TSC_MODE_X: - case TSC_MODE_Y: - case TSC_MODE_Z: - if (!s->pressure) { - return; - } - /* Fall through */ - case TSC_MODE_AUX: - case TSC_MODE_TEMP1: - case TSC_MODE_TEMP2: - case TSC_MODE_X_TEST: - case TSC_MODE_Y_TEST: - case TSC_MODE_TS_TEST: - if (s->dav) { - s->enabled = false; - } - break; - - case TSC_MODE_RESERVED: - case TSC_MODE_XX_DRV: - case TSC_MODE_YY_DRV: - case TSC_MODE_YX_DRV: - default: - return; - } - - if (!s->enabled || s->busy) { - return; - } - - s->busy = true; - s->precision = s->nextprecision; - s->function = s->nextfunction; - s->pdst = !s->pnd0; /* Synchronised on internal clock */ - expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND >> 7); - timer_mod(s->timer, expires); -} - -static void tsc2005_reset(TSC2005State *s) -{ - s->state = 0; - s->pin_func = 0; - s->enabled = false; - s->busy = false; - s->nextprecision = false; - s->nextfunction = 0; - s->timing[0] = 0; - s->timing[1] = 0; - s->irq = false; - s->dav = 0; - s->reset = false; - s->pdst = true; - s->pnd0 = false; - s->function = -1; - s->temp_thr[0] = 0x000; - s->temp_thr[1] = 0xfff; - s->aux_thr[0] = 0x000; - s->aux_thr[1] = 0xfff; - - tsc2005_pin_update(s); -} - -static uint8_t tsc2005_txrx_word(void *opaque, uint8_t value) -{ - TSC2005State *s = opaque; - uint32_t ret = 0; - - switch (s->state++) { - case 0: - if (value & 0x80) { - /* Command */ - if (value & (1 << 1)) - tsc2005_reset(s); - else { - s->nextfunction = (value >> 3) & 0xf; - s->nextprecision = (value >> 2) & 1; - if (s->enabled != !(value & 1)) { - s->enabled = !(value & 1); - trace_tsc2005_sense(s->enabled ? "enabled" : "disabled"); - if (s->busy && !s->enabled) { - timer_del(s->timer); - } - s->busy = s->busy && s->enabled; - } - tsc2005_pin_update(s); - } - - s->state = 0; - } else if (value) { - /* Data transfer */ - s->reg = (value >> 3) & 0xf; - s->pnd0 = (value >> 1) & 1; - s->command = value & 1; - - if (s->command) { - /* Read */ - s->data = tsc2005_read(s, s->reg); - tsc2005_pin_update(s); - } else - s->data = 0; - } else - s->state = 0; - break; - - case 1: - if (s->command) { - ret = (s->data >> 8) & 0xff; - } else { - s->data |= value << 8; - } - break; - - case 2: - if (s->command) - ret = s->data & 0xff; - else { - s->data |= value; - tsc2005_write(s, s->reg, s->data); - tsc2005_pin_update(s); - } - - s->state = 0; - break; - } - - return ret; -} - -uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len) -{ - uint32_t ret = 0; - - len &= ~7; - while (len > 0) { - len -= 8; - ret |= tsc2005_txrx_word(opaque, (value >> len) & 0xff) << len; - } - - return ret; -} - -static void tsc2005_timer_tick(void *opaque) -{ - TSC2005State *s = opaque; - unsigned int function = s->function; - - assert(function < ARRAY_SIZE(mode_regs)); - - /* Timer ticked -- a set of conversions has been finished. */ - - if (!s->busy) { - return; - } - - s->busy = false; - s->dav |= mode_regs[function]; - s->function = -1; - tsc2005_pin_update(s); -} - -static void tsc2005_touchscreen_event(void *opaque, - int x, int y, int z, int buttons_state) -{ - TSC2005State *s = opaque; - int p = s->pressure; - - if (buttons_state) { - s->x = x; - s->y = y; - } - s->pressure = !!buttons_state; - - /* - * Note: We would get better responsiveness in the guest by - * signaling TS events immediately, but for now we simulate - * the first conversion delay for sake of correctness. - */ - if (p != s->pressure) { - tsc2005_pin_update(s); - } -} - -static int tsc2005_post_load(void *opaque, int version_id) -{ - TSC2005State *s = (TSC2005State *) opaque; - - s->busy = timer_pending(s->timer); - tsc2005_pin_update(s); - - return 0; -} - -static const VMStateDescription vmstate_tsc2005 = { - .name = "tsc2005", - .version_id = 2, - .minimum_version_id = 2, - .post_load = tsc2005_post_load, - .fields = (const VMStateField []) { - VMSTATE_BOOL(pressure, TSC2005State), - VMSTATE_BOOL(irq, TSC2005State), - VMSTATE_BOOL(command, TSC2005State), - VMSTATE_BOOL(enabled, TSC2005State), - VMSTATE_BOOL(host_mode, TSC2005State), - VMSTATE_BOOL(reset, TSC2005State), - VMSTATE_BOOL(pdst, TSC2005State), - VMSTATE_BOOL(pnd0, TSC2005State), - VMSTATE_BOOL(precision, TSC2005State), - VMSTATE_BOOL(nextprecision, TSC2005State), - VMSTATE_UINT8(reg, TSC2005State), - VMSTATE_UINT8(state, TSC2005State), - VMSTATE_UINT16(data, TSC2005State), - VMSTATE_UINT16(dav, TSC2005State), - VMSTATE_UINT16(filter, TSC2005State), - VMSTATE_INT8(nextfunction, TSC2005State), - VMSTATE_INT8(function, TSC2005State), - VMSTATE_INT32(x, TSC2005State), - VMSTATE_INT32(y, TSC2005State), - VMSTATE_TIMER_PTR(timer, TSC2005State), - VMSTATE_UINT8(pin_func, TSC2005State), - VMSTATE_UINT16_ARRAY(timing, TSC2005State, 2), - VMSTATE_UINT8(noise, TSC2005State), - VMSTATE_UINT16_ARRAY(temp_thr, TSC2005State, 2), - VMSTATE_UINT16_ARRAY(aux_thr, TSC2005State, 2), - VMSTATE_INT32_ARRAY(tr, TSC2005State, 8), - VMSTATE_END_OF_LIST() - } -}; - -void *tsc2005_init(qemu_irq pintdav) -{ - TSC2005State *s; - - s = g_new0(TSC2005State, 1); - s->x = 400; - s->y = 240; - s->pressure = false; - s->precision = s->nextprecision = false; - s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc2005_timer_tick, s); - s->pint = pintdav; - s->model = 0x2005; - - s->tr[0] = 0; - s->tr[1] = 1; - s->tr[2] = 1; - s->tr[3] = 0; - s->tr[4] = 1; - s->tr[5] = 0; - s->tr[6] = 1; - s->tr[7] = 0; - - tsc2005_reset(s); - - qemu_add_mouse_event_handler(tsc2005_touchscreen_event, s, 1, - "QEMU TSC2005-driven Touchscreen"); - - qemu_register_reset((void *) tsc2005_reset, s); - vmstate_register(NULL, 0, &vmstate_tsc2005, s); - - return s; -} - -/* - * Use tslib generated calibration data to generate ADC input values - * from the touchscreen. Assuming 12-bit precision was used during - * tslib calibration. - */ -void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info) -{ - TSC2005State *s = (TSC2005State *) opaque; - - /* This version assumes touchscreen X & Y axis are parallel or - * perpendicular to LCD's X & Y axis in some way. */ - if (abs(info->a[0]) > abs(info->a[1])) { - s->tr[0] = 0; - s->tr[1] = -info->a[6] * info->x; - s->tr[2] = info->a[0]; - s->tr[3] = -info->a[2] / info->a[0]; - s->tr[4] = info->a[6] * info->y; - s->tr[5] = 0; - s->tr[6] = info->a[4]; - s->tr[7] = -info->a[5] / info->a[4]; - } else { - s->tr[0] = info->a[6] * info->y; - s->tr[1] = 0; - s->tr[2] = info->a[1]; - s->tr[3] = -info->a[2] / info->a[1]; - s->tr[4] = 0; - s->tr[5] = -info->a[6] * info->x; - s->tr[6] = info->a[3]; - s->tr[7] = -info->a[5] / info->a[3]; - } - - s->tr[0] >>= 11; - s->tr[1] >>= 11; - s->tr[3] <<= 4; - s->tr[4] >>= 11; - s->tr[5] >>= 11; - s->tr[7] <<= 4; -} diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c deleted file mode 100644 index c4e32c7a42f..00000000000 --- a/hw/input/tsc210x.c +++ /dev/null @@ -1,1241 +0,0 @@ -/* - * TI TSC2102 (touchscreen/sensors/audio controller) emulator. - * TI TSC2301 (touchscreen/sensors/keypad). - * - * Copyright (c) 2006 Andrzej Zaborowski - * Copyright (C) 2008 Nokia Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/hw.h" -#include "audio/audio.h" -#include "qemu/timer.h" -#include "qemu/log.h" -#include "sysemu/reset.h" -#include "ui/console.h" -#include "hw/arm/omap.h" /* For I2SCodec */ -#include "hw/boards.h" /* for current_machine */ -#include "hw/input/tsc2xxx.h" -#include "hw/irq.h" -#include "migration/vmstate.h" -#include "qapi/error.h" - -#define TSC_DATA_REGISTERS_PAGE 0x0 -#define TSC_CONTROL_REGISTERS_PAGE 0x1 -#define TSC_AUDIO_REGISTERS_PAGE 0x2 - -#define TSC_VERBOSE - -#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - resolution[p])) - -typedef struct { - qemu_irq pint; - qemu_irq kbint; - qemu_irq davint; - QEMUTimer *timer; - QEMUSoundCard card; - uWireSlave chip; - I2SCodec codec; - uint8_t in_fifo[16384]; - uint8_t out_fifo[16384]; - uint16_t model; - - int32_t x, y; - bool pressure; - - uint8_t page, offset; - uint16_t dav; - - bool state; - bool irq; - bool command; - bool busy; - bool enabled; - bool host_mode; - uint8_t function, nextfunction; - uint8_t precision, nextprecision; - uint8_t filter; - uint8_t pin_func; - uint8_t ref; - uint8_t timing; - uint8_t noise; - - uint16_t audio_ctrl1; - uint16_t audio_ctrl2; - uint16_t audio_ctrl3; - uint16_t pll[3]; - uint16_t volume; - int64_t volume_change; - bool softstep; - uint16_t dac_power; - int64_t powerdown; - uint16_t filter_data[0x14]; - - const char *name; - SWVoiceIn *adc_voice[1]; - SWVoiceOut *dac_voice[1]; - int i2s_rx_rate; - int i2s_tx_rate; - - int tr[8]; - - struct { - uint16_t down; - uint16_t mask; - int scan; - int debounce; - int mode; - int intr; - } kb; - int64_t now; /* Time at migration */ -} TSC210xState; - -static const int resolution[4] = { 12, 8, 10, 12 }; - -#define TSC_MODE_NO_SCAN 0x0 -#define TSC_MODE_XY_SCAN 0x1 -#define TSC_MODE_XYZ_SCAN 0x2 -#define TSC_MODE_X 0x3 -#define TSC_MODE_Y 0x4 -#define TSC_MODE_Z 0x5 -#define TSC_MODE_BAT1 0x6 -#define TSC_MODE_BAT2 0x7 -#define TSC_MODE_AUX 0x8 -#define TSC_MODE_AUX_SCAN 0x9 -#define TSC_MODE_TEMP1 0xa -#define TSC_MODE_PORT_SCAN 0xb -#define TSC_MODE_TEMP2 0xc -#define TSC_MODE_XX_DRV 0xd -#define TSC_MODE_YY_DRV 0xe -#define TSC_MODE_YX_DRV 0xf - -static const uint16_t mode_regs[16] = { - 0x0000, /* No scan */ - 0x0600, /* X, Y scan */ - 0x0780, /* X, Y, Z scan */ - 0x0400, /* X */ - 0x0200, /* Y */ - 0x0180, /* Z */ - 0x0040, /* BAT1 */ - 0x0030, /* BAT2 */ - 0x0010, /* AUX */ - 0x0010, /* AUX scan */ - 0x0004, /* TEMP1 */ - 0x0070, /* Port scan */ - 0x0002, /* TEMP2 */ - 0x0000, /* X+, X- drivers */ - 0x0000, /* Y+, Y- drivers */ - 0x0000, /* Y+, X- drivers */ -}; - -#define X_TRANSFORM(s) \ - ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3]) -#define Y_TRANSFORM(s) \ - ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7]) -#define Z1_TRANSFORM(s) \ - ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4) -#define Z2_TRANSFORM(s) \ - ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4) - -#define BAT1_VAL 0x8660 -#define BAT2_VAL 0x0000 -#define AUX1_VAL 0x35c0 -#define AUX2_VAL 0xffff -#define TEMP1_VAL 0x8c70 -#define TEMP2_VAL 0xa5b0 - -#define TSC_POWEROFF_DELAY 50 -#define TSC_SOFTSTEP_DELAY 50 - -static void tsc210x_reset(TSC210xState *s) -{ - s->state = false; - s->pin_func = 2; - s->enabled = false; - s->busy = false; - s->nextfunction = 0; - s->ref = 0; - s->timing = 0; - s->irq = false; - s->dav = 0; - - s->audio_ctrl1 = 0x0000; - s->audio_ctrl2 = 0x4410; - s->audio_ctrl3 = 0x0000; - s->pll[0] = 0x1004; - s->pll[1] = 0x0000; - s->pll[2] = 0x1fff; - s->volume = 0xffff; - s->dac_power = 0x8540; - s->softstep = true; - s->volume_change = 0; - s->powerdown = 0; - s->filter_data[0x00] = 0x6be3; - s->filter_data[0x01] = 0x9666; - s->filter_data[0x02] = 0x675d; - s->filter_data[0x03] = 0x6be3; - s->filter_data[0x04] = 0x9666; - s->filter_data[0x05] = 0x675d; - s->filter_data[0x06] = 0x7d83; - s->filter_data[0x07] = 0x84ee; - s->filter_data[0x08] = 0x7d83; - s->filter_data[0x09] = 0x84ee; - s->filter_data[0x0a] = 0x6be3; - s->filter_data[0x0b] = 0x9666; - s->filter_data[0x0c] = 0x675d; - s->filter_data[0x0d] = 0x6be3; - s->filter_data[0x0e] = 0x9666; - s->filter_data[0x0f] = 0x675d; - s->filter_data[0x10] = 0x7d83; - s->filter_data[0x11] = 0x84ee; - s->filter_data[0x12] = 0x7d83; - s->filter_data[0x13] = 0x84ee; - - s->i2s_tx_rate = 0; - s->i2s_rx_rate = 0; - - s->kb.scan = 1; - s->kb.debounce = 0; - s->kb.mask = 0x0000; - s->kb.mode = 3; - s->kb.intr = 0; - - qemu_set_irq(s->pint, !s->irq); - qemu_set_irq(s->davint, !s->dav); - qemu_irq_raise(s->kbint); -} - -typedef struct { - int rate; - int dsor; - int fsref; -} TSC210xRateInfo; - -/* { rate, dsor, fsref } */ -static const TSC210xRateInfo tsc2102_rates[] = { - /* Fsref / 6.0 */ - { 7350, 63, 1 }, - { 8000, 63, 0 }, - /* Fsref / 6.0 */ - { 7350, 54, 1 }, - { 8000, 54, 0 }, - /* Fsref / 5.0 */ - { 8820, 45, 1 }, - { 9600, 45, 0 }, - /* Fsref / 4.0 */ - { 11025, 36, 1 }, - { 12000, 36, 0 }, - /* Fsref / 3.0 */ - { 14700, 27, 1 }, - { 16000, 27, 0 }, - /* Fsref / 2.0 */ - { 22050, 18, 1 }, - { 24000, 18, 0 }, - /* Fsref / 1.5 */ - { 29400, 9, 1 }, - { 32000, 9, 0 }, - /* Fsref */ - { 44100, 0, 1 }, - { 48000, 0, 0 }, - - { 0, 0, 0 }, -}; - -static inline void tsc210x_out_flush(TSC210xState *s, int len) -{ - uint8_t *data = s->codec.out.fifo + s->codec.out.start; - uint8_t *end = data + len; - - while (data < end) - data += AUD_write(s->dac_voice[0], data, end - data) ?: (end - data); - - s->codec.out.len -= len; - if (s->codec.out.len) - memmove(s->codec.out.fifo, end, s->codec.out.len); - s->codec.out.start = 0; -} - -static void tsc210x_audio_out_cb(TSC210xState *s, int free_b) -{ - if (s->codec.out.len >= free_b) { - tsc210x_out_flush(s, free_b); - return; - } - - s->codec.out.size = MIN(free_b, 16384); - qemu_irq_raise(s->codec.tx_start); -} - -static void tsc2102_audio_rate_update(TSC210xState *s) -{ - const TSC210xRateInfo *rate; - - s->codec.tx_rate = 0; - s->codec.rx_rate = 0; - if (s->dac_power & (1 << 15)) /* PWDNC */ - return; - - for (rate = tsc2102_rates; rate->rate; rate ++) - if (rate->dsor == (s->audio_ctrl1 & 0x3f) && /* DACFS */ - rate->fsref == ((s->audio_ctrl3 >> 13) & 1))/* REFFS */ - break; - if (!rate->rate) { - printf("%s: unknown sampling rate configured\n", __func__); - return; - } - - s->codec.tx_rate = rate->rate; -} - -static void tsc2102_audio_output_update(TSC210xState *s) -{ - int enable; - struct audsettings fmt; - - if (s->dac_voice[0]) { - tsc210x_out_flush(s, s->codec.out.len); - s->codec.out.size = 0; - AUD_set_active_out(s->dac_voice[0], 0); - AUD_close_out(&s->card, s->dac_voice[0]); - s->dac_voice[0] = NULL; - } - s->codec.cts = 0; - - enable = - (~s->dac_power & (1 << 15)) && /* PWDNC */ - (~s->dac_power & (1 << 10)); /* DAPWDN */ - if (!enable || !s->codec.tx_rate) - return; - - /* Force our own sampling rate even in slave DAC mode */ - fmt.endianness = 0; - fmt.nchannels = 2; - fmt.freq = s->codec.tx_rate; - fmt.fmt = AUDIO_FORMAT_S16; - - s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], - "tsc2102.sink", s, (void *) tsc210x_audio_out_cb, &fmt); - if (s->dac_voice[0]) { - s->codec.cts = 1; - AUD_set_active_out(s->dac_voice[0], 1); - } -} - -static uint16_t tsc2102_data_register_read(TSC210xState *s, int reg) -{ - switch (reg) { - case 0x00: /* X */ - s->dav &= 0xfbff; - return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) + - (s->noise & 3); - - case 0x01: /* Y */ - s->noise ++; - s->dav &= 0xfdff; - return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^ - (s->noise & 3); - - case 0x02: /* Z1 */ - s->dav &= 0xfeff; - return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) - - (s->noise & 3); - - case 0x03: /* Z2 */ - s->dav &= 0xff7f; - return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) | - (s->noise & 3); - - case 0x04: /* KPData */ - if ((s->model & 0xff00) == 0x2300) { - if (s->kb.intr && (s->kb.mode & 2)) { - s->kb.intr = 0; - qemu_irq_raise(s->kbint); - } - return s->kb.down; - } - - return 0xffff; - - case 0x05: /* BAT1 */ - s->dav &= 0xffbf; - return TSC_CUT_RESOLUTION(BAT1_VAL, s->precision) + - (s->noise & 6); - - case 0x06: /* BAT2 */ - s->dav &= 0xffdf; - return TSC_CUT_RESOLUTION(BAT2_VAL, s->precision); - - case 0x07: /* AUX1 */ - s->dav &= 0xffef; - return TSC_CUT_RESOLUTION(AUX1_VAL, s->precision); - - case 0x08: /* AUX2 */ - s->dav &= 0xfff7; - return 0xffff; - - case 0x09: /* TEMP1 */ - s->dav &= 0xfffb; - return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) - - (s->noise & 5); - - case 0x0a: /* TEMP2 */ - s->dav &= 0xfffd; - return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^ - (s->noise & 3); - - case 0x0b: /* DAC */ - s->dav &= 0xfffe; - return 0xffff; - - default: -#ifdef TSC_VERBOSE - fprintf(stderr, "tsc2102_data_register_read: " - "no such register: 0x%02x\n", reg); -#endif - return 0xffff; - } -} - -static uint16_t tsc2102_control_register_read( - TSC210xState *s, int reg) -{ - switch (reg) { - case 0x00: /* TSC ADC */ - return (s->pressure << 15) | ((!s->busy) << 14) | - (s->nextfunction << 10) | (s->nextprecision << 8) | s->filter; - - case 0x01: /* Status / Keypad Control */ - if ((s->model & 0xff00) == 0x2100) - return (s->pin_func << 14) | ((!s->enabled) << 13) | - (s->host_mode << 12) | ((!!s->dav) << 11) | s->dav; - else - return (s->kb.intr << 15) | ((s->kb.scan || !s->kb.down) << 14) | - (s->kb.debounce << 11); - - case 0x02: /* DAC Control */ - if ((s->model & 0xff00) == 0x2300) - return s->dac_power & 0x8000; - else - goto bad_reg; - - case 0x03: /* Reference */ - return s->ref; - - case 0x04: /* Reset */ - return 0xffff; - - case 0x05: /* Configuration */ - return s->timing; - - case 0x06: /* Secondary configuration */ - if ((s->model & 0xff00) == 0x2100) - goto bad_reg; - return ((!s->dav) << 15) | ((s->kb.mode & 1) << 14) | s->pll[2]; - - case 0x10: /* Keypad Mask */ - if ((s->model & 0xff00) == 0x2100) - goto bad_reg; - return s->kb.mask; - - default: - bad_reg: -#ifdef TSC_VERBOSE - fprintf(stderr, "tsc2102_control_register_read: " - "no such register: 0x%02x\n", reg); -#endif - return 0xffff; - } -} - -static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg) -{ - int l_ch, r_ch; - uint16_t val; - - switch (reg) { - case 0x00: /* Audio Control 1 */ - return s->audio_ctrl1; - - case 0x01: - return 0xff00; - - case 0x02: /* DAC Volume Control */ - return s->volume; - - case 0x03: - return 0x8b00; - - case 0x04: /* Audio Control 2 */ - l_ch = 1; - r_ch = 1; - if (s->softstep && !(s->dac_power & (1 << 10))) { - l_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > - s->volume_change + TSC_SOFTSTEP_DELAY); - r_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > - s->volume_change + TSC_SOFTSTEP_DELAY); - } - - return s->audio_ctrl2 | (l_ch << 3) | (r_ch << 2); - - case 0x05: /* Stereo DAC Power Control */ - return 0x2aa0 | s->dac_power | - (((s->dac_power & (1 << 10)) && - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > - s->powerdown + TSC_POWEROFF_DELAY)) << 6); - - case 0x06: /* Audio Control 3 */ - val = s->audio_ctrl3 | 0x0001; - s->audio_ctrl3 &= 0xff3f; - return val; - - case 0x07: /* LCH_BASS_BOOST_N0 */ - case 0x08: /* LCH_BASS_BOOST_N1 */ - case 0x09: /* LCH_BASS_BOOST_N2 */ - case 0x0a: /* LCH_BASS_BOOST_N3 */ - case 0x0b: /* LCH_BASS_BOOST_N4 */ - case 0x0c: /* LCH_BASS_BOOST_N5 */ - case 0x0d: /* LCH_BASS_BOOST_D1 */ - case 0x0e: /* LCH_BASS_BOOST_D2 */ - case 0x0f: /* LCH_BASS_BOOST_D4 */ - case 0x10: /* LCH_BASS_BOOST_D5 */ - case 0x11: /* RCH_BASS_BOOST_N0 */ - case 0x12: /* RCH_BASS_BOOST_N1 */ - case 0x13: /* RCH_BASS_BOOST_N2 */ - case 0x14: /* RCH_BASS_BOOST_N3 */ - case 0x15: /* RCH_BASS_BOOST_N4 */ - case 0x16: /* RCH_BASS_BOOST_N5 */ - case 0x17: /* RCH_BASS_BOOST_D1 */ - case 0x18: /* RCH_BASS_BOOST_D2 */ - case 0x19: /* RCH_BASS_BOOST_D4 */ - case 0x1a: /* RCH_BASS_BOOST_D5 */ - return s->filter_data[reg - 0x07]; - - case 0x1b: /* PLL Programmability 1 */ - return s->pll[0]; - - case 0x1c: /* PLL Programmability 2 */ - return s->pll[1]; - - case 0x1d: /* Audio Control 4 */ - return (!s->softstep) << 14; - - default: -#ifdef TSC_VERBOSE - fprintf(stderr, "tsc2102_audio_register_read: " - "no such register: 0x%02x\n", reg); -#endif - return 0xffff; - } -} - -static void tsc2102_data_register_write( - TSC210xState *s, int reg, uint16_t value) -{ - switch (reg) { - case 0x00: /* X */ - case 0x01: /* Y */ - case 0x02: /* Z1 */ - case 0x03: /* Z2 */ - case 0x05: /* BAT1 */ - case 0x06: /* BAT2 */ - case 0x07: /* AUX1 */ - case 0x08: /* AUX2 */ - case 0x09: /* TEMP1 */ - case 0x0a: /* TEMP2 */ - return; - - default: - qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_data_register_write: " - "no such register: 0x%02x\n", reg); - } -} - -static void tsc2102_control_register_write( - TSC210xState *s, int reg, uint16_t value) -{ - switch (reg) { - case 0x00: /* TSC ADC */ - s->host_mode = value >> 15; - s->enabled = !(value & 0x4000); - if (s->busy && !s->enabled) - timer_del(s->timer); - s->busy = s->busy && s->enabled; - s->nextfunction = (value >> 10) & 0xf; - s->nextprecision = (value >> 8) & 3; - s->filter = value & 0xff; - return; - - case 0x01: /* Status / Keypad Control */ - if ((s->model & 0xff00) == 0x2100) - s->pin_func = value >> 14; - else { - s->kb.scan = (value >> 14) & 1; - s->kb.debounce = (value >> 11) & 7; - if (s->kb.intr && s->kb.scan) { - s->kb.intr = 0; - qemu_irq_raise(s->kbint); - } - } - return; - - case 0x02: /* DAC Control */ - if ((s->model & 0xff00) == 0x2300) { - s->dac_power &= 0x7fff; - s->dac_power |= 0x8000 & value; - } else - goto bad_reg; - break; - - case 0x03: /* Reference */ - s->ref = value & 0x1f; - return; - - case 0x04: /* Reset */ - if (value == 0xbb00) { - if (s->busy) - timer_del(s->timer); - tsc210x_reset(s); -#ifdef TSC_VERBOSE - } else { - fprintf(stderr, "tsc2102_control_register_write: " - "wrong value written into RESET\n"); -#endif - } - return; - - case 0x05: /* Configuration */ - s->timing = value & 0x3f; -#ifdef TSC_VERBOSE - if (value & ~0x3f) - fprintf(stderr, "tsc2102_control_register_write: " - "wrong value written into CONFIG\n"); -#endif - return; - - case 0x06: /* Secondary configuration */ - if ((s->model & 0xff00) == 0x2100) - goto bad_reg; - s->kb.mode = value >> 14; - s->pll[2] = value & 0x3ffff; - return; - - case 0x10: /* Keypad Mask */ - if ((s->model & 0xff00) == 0x2100) - goto bad_reg; - s->kb.mask = value; - return; - - default: - bad_reg: - qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_control_register_write: " - "no such register: 0x%02x\n", reg); - } -} - -static void tsc2102_audio_register_write( - TSC210xState *s, int reg, uint16_t value) -{ - switch (reg) { - case 0x00: /* Audio Control 1 */ - s->audio_ctrl1 = value & 0x0f3f; -#ifdef TSC_VERBOSE - if ((value & ~0x0f3f) || ((value & 7) != ((value >> 3) & 7))) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into Audio 1\n"); -#endif - tsc2102_audio_rate_update(s); - tsc2102_audio_output_update(s); - return; - - case 0x01: -#ifdef TSC_VERBOSE - if (value != 0xff00) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into reg 0x01\n"); -#endif - return; - - case 0x02: /* DAC Volume Control */ - s->volume = value; - s->volume_change = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - return; - - case 0x03: -#ifdef TSC_VERBOSE - if (value != 0x8b00) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into reg 0x03\n"); -#endif - return; - - case 0x04: /* Audio Control 2 */ - s->audio_ctrl2 = value & 0xf7f2; -#ifdef TSC_VERBOSE - if (value & ~0xf7fd) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into Audio 2\n"); -#endif - return; - - case 0x05: /* Stereo DAC Power Control */ - if ((value & ~s->dac_power) & (1 << 10)) - s->powerdown = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - - s->dac_power = value & 0x9543; -#ifdef TSC_VERBOSE - if ((value & ~0x9543) != 0x2aa0) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into Power\n"); -#endif - tsc2102_audio_rate_update(s); - tsc2102_audio_output_update(s); - return; - - case 0x06: /* Audio Control 3 */ - s->audio_ctrl3 &= 0x00c0; - s->audio_ctrl3 |= value & 0xf800; -#ifdef TSC_VERBOSE - if (value & ~0xf8c7) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into Audio 3\n"); -#endif - tsc2102_audio_output_update(s); - return; - - case 0x07: /* LCH_BASS_BOOST_N0 */ - case 0x08: /* LCH_BASS_BOOST_N1 */ - case 0x09: /* LCH_BASS_BOOST_N2 */ - case 0x0a: /* LCH_BASS_BOOST_N3 */ - case 0x0b: /* LCH_BASS_BOOST_N4 */ - case 0x0c: /* LCH_BASS_BOOST_N5 */ - case 0x0d: /* LCH_BASS_BOOST_D1 */ - case 0x0e: /* LCH_BASS_BOOST_D2 */ - case 0x0f: /* LCH_BASS_BOOST_D4 */ - case 0x10: /* LCH_BASS_BOOST_D5 */ - case 0x11: /* RCH_BASS_BOOST_N0 */ - case 0x12: /* RCH_BASS_BOOST_N1 */ - case 0x13: /* RCH_BASS_BOOST_N2 */ - case 0x14: /* RCH_BASS_BOOST_N3 */ - case 0x15: /* RCH_BASS_BOOST_N4 */ - case 0x16: /* RCH_BASS_BOOST_N5 */ - case 0x17: /* RCH_BASS_BOOST_D1 */ - case 0x18: /* RCH_BASS_BOOST_D2 */ - case 0x19: /* RCH_BASS_BOOST_D4 */ - case 0x1a: /* RCH_BASS_BOOST_D5 */ - s->filter_data[reg - 0x07] = value; - return; - - case 0x1b: /* PLL Programmability 1 */ - s->pll[0] = value & 0xfffc; -#ifdef TSC_VERBOSE - if (value & ~0xfffc) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into PLL 1\n"); -#endif - return; - - case 0x1c: /* PLL Programmability 2 */ - s->pll[1] = value & 0xfffc; -#ifdef TSC_VERBOSE - if (value & ~0xfffc) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into PLL 2\n"); -#endif - return; - - case 0x1d: /* Audio Control 4 */ - s->softstep = !(value & 0x4000); -#ifdef TSC_VERBOSE - if (value & ~0x4000) - fprintf(stderr, "tsc2102_audio_register_write: " - "wrong value written into Audio 4\n"); -#endif - return; - - default: - qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_audio_register_write: " - "no such register: 0x%02x\n", reg); - } -} - -/* This handles most of the chip logic. */ -static void tsc210x_pin_update(TSC210xState *s) -{ - int64_t expires; - bool pin_state; - - switch (s->pin_func) { - case 0: - pin_state = s->pressure; - break; - case 1: - pin_state = !!s->dav; - break; - case 2: - default: - pin_state = s->pressure && !s->dav; - } - - if (!s->enabled) - pin_state = false; - - if (pin_state != s->irq) { - s->irq = pin_state; - qemu_set_irq(s->pint, !s->irq); - } - - switch (s->nextfunction) { - case TSC_MODE_XY_SCAN: - case TSC_MODE_XYZ_SCAN: - if (!s->pressure) - return; - break; - - case TSC_MODE_X: - case TSC_MODE_Y: - case TSC_MODE_Z: - if (!s->pressure) - return; - /* Fall through */ - case TSC_MODE_BAT1: - case TSC_MODE_BAT2: - case TSC_MODE_AUX: - case TSC_MODE_TEMP1: - case TSC_MODE_TEMP2: - if (s->dav) - s->enabled = false; - break; - - case TSC_MODE_AUX_SCAN: - case TSC_MODE_PORT_SCAN: - break; - - case TSC_MODE_NO_SCAN: - case TSC_MODE_XX_DRV: - case TSC_MODE_YY_DRV: - case TSC_MODE_YX_DRV: - default: - return; - } - - if (!s->enabled || s->busy || s->dav) - return; - - s->busy = true; - s->precision = s->nextprecision; - s->function = s->nextfunction; - expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND >> 10); - timer_mod(s->timer, expires); -} - -static uint16_t tsc210x_read(TSC210xState *s) -{ - uint16_t ret = 0x0000; - - if (!s->command) - fprintf(stderr, "tsc210x_read: SPI underrun!\n"); - - switch (s->page) { - case TSC_DATA_REGISTERS_PAGE: - ret = tsc2102_data_register_read(s, s->offset); - if (!s->dav) - qemu_irq_raise(s->davint); - break; - case TSC_CONTROL_REGISTERS_PAGE: - ret = tsc2102_control_register_read(s, s->offset); - break; - case TSC_AUDIO_REGISTERS_PAGE: - ret = tsc2102_audio_register_read(s, s->offset); - break; - default: - hw_error("tsc210x_read: wrong memory page\n"); - } - - tsc210x_pin_update(s); - - /* Allow sequential reads. */ - s->offset ++; - s->state = false; - return ret; -} - -static void tsc210x_write(TSC210xState *s, uint16_t value) -{ - /* - * This is a two-state state machine for reading - * command and data every second time. - */ - if (!s->state) { - s->command = (value >> 15) != 0; - s->page = (value >> 11) & 0x0f; - s->offset = (value >> 5) & 0x3f; - s->state = true; - } else { - if (s->command) - fprintf(stderr, "tsc210x_write: SPI overrun!\n"); - else - switch (s->page) { - case TSC_DATA_REGISTERS_PAGE: - tsc2102_data_register_write(s, s->offset, value); - break; - case TSC_CONTROL_REGISTERS_PAGE: - tsc2102_control_register_write(s, s->offset, value); - break; - case TSC_AUDIO_REGISTERS_PAGE: - tsc2102_audio_register_write(s, s->offset, value); - break; - default: - hw_error("tsc210x_write: wrong memory page\n"); - } - - tsc210x_pin_update(s); - s->state = false; - } -} - -uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len) -{ - TSC210xState *s = opaque; - uint32_t ret = 0; - - if (len != 16) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: bad SPI word width %i\n", __func__, len); - return 0; - } - - /* TODO: sequential reads etc - how do we make sure the host doesn't - * unintentionally read out a conversion result from a register while - * transmitting the command word of the next command? */ - if (!value || (s->state && s->command)) - ret = tsc210x_read(s); - if (value || (s->state && !s->command)) - tsc210x_write(s, value); - - return ret; -} - -static void tsc210x_timer_tick(void *opaque) -{ - TSC210xState *s = opaque; - - /* Timer ticked -- a set of conversions has been finished. */ - - if (!s->busy) - return; - - s->busy = false; - s->dav |= mode_regs[s->function]; - tsc210x_pin_update(s); - qemu_irq_lower(s->davint); -} - -static void tsc210x_touchscreen_event(void *opaque, - int x, int y, int z, int buttons_state) -{ - TSC210xState *s = opaque; - int p = s->pressure; - - if (buttons_state) { - s->x = x; - s->y = y; - } - s->pressure = !!buttons_state; - - /* - * Note: We would get better responsiveness in the guest by - * signaling TS events immediately, but for now we simulate - * the first conversion delay for sake of correctness. - */ - if (p != s->pressure) - tsc210x_pin_update(s); -} - -static void tsc210x_i2s_swallow(TSC210xState *s) -{ - if (s->dac_voice[0]) - tsc210x_out_flush(s, s->codec.out.len); - else - s->codec.out.len = 0; -} - -static void tsc210x_i2s_set_rate(TSC210xState *s, int in, int out) -{ - s->i2s_tx_rate = out; - s->i2s_rx_rate = in; -} - -static int tsc210x_pre_save(void *opaque) -{ - TSC210xState *s = (TSC210xState *) opaque; - s->now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - - return 0; -} - -static int tsc210x_post_load(void *opaque, int version_id) -{ - TSC210xState *s = (TSC210xState *) opaque; - int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - - if (s->function >= ARRAY_SIZE(mode_regs)) { - return -EINVAL; - } - if (s->nextfunction >= ARRAY_SIZE(mode_regs)) { - return -EINVAL; - } - if (s->precision >= ARRAY_SIZE(resolution)) { - return -EINVAL; - } - if (s->nextprecision >= ARRAY_SIZE(resolution)) { - return -EINVAL; - } - - s->volume_change -= s->now; - s->volume_change += now; - s->powerdown -= s->now; - s->powerdown += now; - - s->busy = timer_pending(s->timer); - qemu_set_irq(s->pint, !s->irq); - qemu_set_irq(s->davint, !s->dav); - - return 0; -} - -static const VMStateField vmstatefields_tsc210x[] = { - VMSTATE_BOOL(enabled, TSC210xState), - VMSTATE_BOOL(host_mode, TSC210xState), - VMSTATE_BOOL(irq, TSC210xState), - VMSTATE_BOOL(command, TSC210xState), - VMSTATE_BOOL(pressure, TSC210xState), - VMSTATE_BOOL(softstep, TSC210xState), - VMSTATE_BOOL(state, TSC210xState), - VMSTATE_UINT16(dav, TSC210xState), - VMSTATE_INT32(x, TSC210xState), - VMSTATE_INT32(y, TSC210xState), - VMSTATE_UINT8(offset, TSC210xState), - VMSTATE_UINT8(page, TSC210xState), - VMSTATE_UINT8(filter, TSC210xState), - VMSTATE_UINT8(pin_func, TSC210xState), - VMSTATE_UINT8(ref, TSC210xState), - VMSTATE_UINT8(timing, TSC210xState), - VMSTATE_UINT8(noise, TSC210xState), - VMSTATE_UINT8(function, TSC210xState), - VMSTATE_UINT8(nextfunction, TSC210xState), - VMSTATE_UINT8(precision, TSC210xState), - VMSTATE_UINT8(nextprecision, TSC210xState), - VMSTATE_UINT16(audio_ctrl1, TSC210xState), - VMSTATE_UINT16(audio_ctrl2, TSC210xState), - VMSTATE_UINT16(audio_ctrl3, TSC210xState), - VMSTATE_UINT16_ARRAY(pll, TSC210xState, 3), - VMSTATE_UINT16(volume, TSC210xState), - VMSTATE_UINT16(dac_power, TSC210xState), - VMSTATE_INT64(volume_change, TSC210xState), - VMSTATE_INT64(powerdown, TSC210xState), - VMSTATE_INT64(now, TSC210xState), - VMSTATE_UINT16_ARRAY(filter_data, TSC210xState, 0x14), - VMSTATE_TIMER_PTR(timer, TSC210xState), - VMSTATE_END_OF_LIST() -}; - -static const VMStateDescription vmstate_tsc2102 = { - .name = "tsc2102", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = tsc210x_pre_save, - .post_load = tsc210x_post_load, - .fields = vmstatefields_tsc210x, -}; - -static const VMStateDescription vmstate_tsc2301 = { - .name = "tsc2301", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = tsc210x_pre_save, - .post_load = tsc210x_post_load, - .fields = vmstatefields_tsc210x, -}; - -static void tsc210x_init(TSC210xState *s, - const char *name, - const VMStateDescription *vmsd) -{ - s->tr[0] = 0; - s->tr[1] = 1; - s->tr[2] = 1; - s->tr[3] = 0; - s->tr[4] = 1; - s->tr[5] = 0; - s->tr[6] = 1; - s->tr[7] = 0; - - s->chip.opaque = s; - s->chip.send = (void *) tsc210x_write; - s->chip.receive = (void *) tsc210x_read; - - s->codec.opaque = s; - s->codec.tx_swallow = (void *) tsc210x_i2s_swallow; - s->codec.set_rate = (void *) tsc210x_i2s_set_rate; - s->codec.in.fifo = s->in_fifo; - s->codec.out.fifo = s->out_fifo; - - tsc210x_reset(s); - - qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, name); - - if (current_machine->audiodev) { - s->card.name = g_strdup(current_machine->audiodev); - s->card.state = audio_state_by_name(s->card.name, &error_fatal); - } - AUD_register_card(s->name, &s->card, &error_fatal); - - qemu_register_reset((void *) tsc210x_reset, s); - vmstate_register(NULL, 0, vmsd, s); -} - -uWireSlave *tsc2102_init(qemu_irq pint) -{ - TSC210xState *s; - - s = g_new0(TSC210xState, 1); - s->x = 160; - s->y = 160; - s->pressure = 0; - s->precision = s->nextprecision = 0; - s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s); - s->pint = pint; - s->model = 0x2102; - s->name = "tsc2102"; - - tsc210x_init(s, "QEMU TSC2102-driven Touchscreen", &vmstate_tsc2102); - - return &s->chip; -} - -uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav) -{ - TSC210xState *s; - - s = g_new0(TSC210xState, 1); - s->x = 400; - s->y = 240; - s->pressure = 0; - s->precision = s->nextprecision = 0; - s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s); - s->pint = penirq; - s->kbint = kbirq; - s->davint = dav; - s->model = 0x2301; - s->name = "tsc2301"; - - tsc210x_init(s, "QEMU TSC2301-driven Touchscreen", &vmstate_tsc2301); - - return &s->chip; -} - -I2SCodec *tsc210x_codec(uWireSlave *chip) -{ - TSC210xState *s = (TSC210xState *) chip->opaque; - - return &s->codec; -} - -/* - * Use tslib generated calibration data to generate ADC input values - * from the touchscreen. Assuming 12-bit precision was used during - * tslib calibration. - */ -void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info) -{ - TSC210xState *s = (TSC210xState *) chip->opaque; -#if 0 - int64_t ltr[8]; - - ltr[0] = (int64_t) info->a[1] * info->y; - ltr[1] = (int64_t) info->a[4] * info->x; - ltr[2] = (int64_t) info->a[1] * info->a[3] - - (int64_t) info->a[4] * info->a[0]; - ltr[3] = (int64_t) info->a[2] * info->a[4] - - (int64_t) info->a[5] * info->a[1]; - ltr[4] = (int64_t) info->a[0] * info->y; - ltr[5] = (int64_t) info->a[3] * info->x; - ltr[6] = (int64_t) info->a[4] * info->a[0] - - (int64_t) info->a[1] * info->a[3]; - ltr[7] = (int64_t) info->a[2] * info->a[3] - - (int64_t) info->a[5] * info->a[0]; - - /* Avoid integer overflow */ - s->tr[0] = ltr[0] >> 11; - s->tr[1] = ltr[1] >> 11; - s->tr[2] = muldiv64(ltr[2], 1, info->a[6]); - s->tr[3] = muldiv64(ltr[3], 1 << 4, ltr[2]); - s->tr[4] = ltr[4] >> 11; - s->tr[5] = ltr[5] >> 11; - s->tr[6] = muldiv64(ltr[6], 1, info->a[6]); - s->tr[7] = muldiv64(ltr[7], 1 << 4, ltr[6]); -#else - - /* This version assumes touchscreen X & Y axis are parallel or - * perpendicular to LCD's X & Y axis in some way. */ - if (abs(info->a[0]) > abs(info->a[1])) { - s->tr[0] = 0; - s->tr[1] = -info->a[6] * info->x; - s->tr[2] = info->a[0]; - s->tr[3] = -info->a[2] / info->a[0]; - s->tr[4] = info->a[6] * info->y; - s->tr[5] = 0; - s->tr[6] = info->a[4]; - s->tr[7] = -info->a[5] / info->a[4]; - } else { - s->tr[0] = info->a[6] * info->y; - s->tr[1] = 0; - s->tr[2] = info->a[1]; - s->tr[3] = -info->a[2] / info->a[1]; - s->tr[4] = 0; - s->tr[5] = -info->a[6] * info->x; - s->tr[6] = info->a[3]; - s->tr[7] = -info->a[5] / info->a[3]; - } - - s->tr[0] >>= 11; - s->tr[1] >>= 11; - s->tr[3] <<= 4; - s->tr[4] >>= 11; - s->tr[5] >>= 11; - s->tr[7] <<= 4; -#endif -} - -void tsc210x_key_event(uWireSlave *chip, int key, int down) -{ - TSC210xState *s = (TSC210xState *) chip->opaque; - - if (down) - s->kb.down |= 1 << key; - else - s->kb.down &= ~(1 << key); - - if (down && (s->kb.down & ~s->kb.mask) && !s->kb.intr) { - s->kb.intr = 1; - qemu_irq_lower(s->kbint); - } else if (s->kb.intr && !(s->kb.down & ~s->kb.mask) && - !(s->kb.mode & 1)) { - s->kb.intr = 0; - qemu_irq_raise(s->kbint); - } -} diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c index 45e4d4c75d9..d986c3c16e3 100644 --- a/hw/input/virtio-input-hid.c +++ b/hw/input/virtio-input-hid.c @@ -237,13 +237,12 @@ static void virtio_input_hid_handle_status(VirtIOInput *vinput, } } -static Property virtio_input_hid_properties[] = { +static const Property virtio_input_hid_properties[] = { DEFINE_PROP_STRING("display", VirtIOInputHID, display), DEFINE_PROP_UINT32("head", VirtIOInputHID, head, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_input_hid_class_init(ObjectClass *klass, void *data) +static void virtio_input_hid_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); @@ -380,12 +379,11 @@ static struct virtio_input_config virtio_mouse_config_v2[] = { { /* end of list */ }, }; -static Property virtio_mouse_properties[] = { +static const Property virtio_mouse_properties[] = { DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_mouse_class_init(ObjectClass *klass, void *data) +static void virtio_mouse_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -505,12 +503,11 @@ static struct virtio_input_config virtio_tablet_config_v2[] = { { /* end of list */ }, }; -static Property virtio_tablet_properties[] = { +static const Property virtio_tablet_properties[] = { DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_tablet_class_init(ObjectClass *klass, void *data) +static void virtio_tablet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/input/virtio-input-host.c b/hw/input/virtio-input-host.c index fea7139382a..bbfee9d3b9f 100644 --- a/hw/input/virtio-input-host.c +++ b/hw/input/virtio-input-host.c @@ -178,7 +178,6 @@ static void virtio_input_host_realize(DeviceState *dev, Error **errp) err_close: close(vih->fd); vih->fd = -1; - return; } static void virtio_input_host_unrealize(DeviceState *dev) @@ -221,12 +220,11 @@ static const VMStateDescription vmstate_virtio_input_host = { .unmigratable = 1, }; -static Property virtio_input_host_properties[] = { +static const Property virtio_input_host_properties[] = { DEFINE_PROP_STRING("evdev", VirtIOInputHost, evdev), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_input_host_class_init(ObjectClass *klass, void *data) +static void virtio_input_host_class_init(ObjectClass *klass, const void *data) { VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c index 3bcdae41b2f..a3f554f2110 100644 --- a/hw/input/virtio-input.c +++ b/hw/input/virtio-input.c @@ -189,7 +189,7 @@ static uint64_t virtio_input_get_features(VirtIODevice *vdev, uint64_t f, return f; } -static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val) +static int virtio_input_set_status(VirtIODevice *vdev, uint8_t val) { VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev); VirtIOInput *vinput = VIRTIO_INPUT(vdev); @@ -202,6 +202,7 @@ static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val) } } } + return 0; } static void virtio_input_reset(VirtIODevice *vdev) @@ -300,12 +301,11 @@ static const VMStateDescription vmstate_virtio_input = { .post_load = virtio_input_post_load, }; -static Property virtio_input_properties[] = { +static const Property virtio_input_properties[] = { DEFINE_PROP_STRING("serial", VirtIOInput, serial), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_input_class_init(ObjectClass *klass, void *data) +static void virtio_input_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index 58b6d3a7100..7547528f2c2 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -23,13 +23,13 @@ config APIC config ARM_GIC bool - select ARM_GICV3_TCG if TCG + select ARM_GICV3 if TCG select ARM_GIC_KVM if KVM select MSI_NONBROKEN -config ARM_GICV3_TCG +config ARM_GICV3 bool - depends on ARM_GIC && TCG + depends on ARM_GIC config ARM_GIC_KVM bool @@ -87,8 +87,16 @@ config GOLDFISH_PIC config M68K_IRQC bool +config LOONGSON_IPI_COMMON + bool + config LOONGSON_IPI bool + select LOONGSON_IPI_COMMON + +config LOONGARCH_IPI + bool + select LOONGSON_IPI_COMMON config LOONGARCH_PCH_PIC bool diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index cea559c39dd..04097341557 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -135,7 +135,7 @@ static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value, static const MemoryRegionOps aw_a10_pic_ops = { .read = aw_a10_pic_read, .write = aw_a10_pic_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static const VMStateDescription vmstate_aw_a10_pic = { @@ -187,11 +187,11 @@ static void aw_a10_pic_reset(DeviceState *d) } } -static void aw_a10_pic_class_init(ObjectClass *klass, void *data) +static void aw_a10_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = aw_a10_pic_reset; + device_class_set_legacy_reset(dc, aw_a10_pic_reset); dc->desc = "allwinner a10 pic"; dc->vmsd = &vmstate_aw_a10_pic; } diff --git a/hw/intc/apic.c b/hw/intc/apic.c index 4186c57b34c..bcb103560c7 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -26,7 +26,7 @@ #include "hw/intc/kvm_irqcount.h" #include "hw/pci/msi.h" #include "qemu/host-utils.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "trace.h" #include "hw/i386/apic-msidef.h" #include "qapi/error.h" @@ -350,9 +350,8 @@ static int apic_set_base(APICCommonState *s, uint64_t val) return -1; } - s->apicbase = (val & 0xfffff000) | + s->apicbase = (val & MSR_IA32_APICBASE_BASE) | (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); - /* if disabled, cannot be enabled again */ if (!(val & MSR_IA32_APICBASE_ENABLE)) { s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; cpu_clear_apic_feature(&s->cpu->env); @@ -1177,7 +1176,7 @@ static void apic_unrealize(DeviceState *dev) local_apics[s->initial_apic_id] = NULL; } -static void apic_class_init(ObjectClass *klass, void *data) +static void apic_class_init(ObjectClass *klass, const void *data) { APICCommonClass *k = APIC_COMMON_CLASS(klass); diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index c13cdd79943..37a7a7019d3 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -28,7 +28,7 @@ #include "hw/intc/kvm_irqcount.h" #include "trace.h" #include "hw/boards.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "migration/vmstate.h" @@ -408,13 +408,12 @@ static const VMStateDescription vmstate_apic_common = { } }; -static Property apic_properties_common[] = { +static const Property apic_properties_common[] = { DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14), DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT, true), DEFINE_PROP_BOOL("legacy-instance-id", APICCommonState, legacy_instance_id, false), - DEFINE_PROP_END_OF_LIST(), }; static void apic_common_get_id(Object *obj, Visitor *v, const char *name, @@ -467,11 +466,11 @@ static void apic_common_initfn(Object *obj) apic_common_set_id, NULL, NULL); } -static void apic_common_class_init(ObjectClass *klass, void *data) +static void apic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = apic_reset_common; + device_class_set_legacy_reset(dc, apic_reset_common); device_class_set_props(dc, apic_properties_common); dc->realize = apic_common_realize; dc->unrealize = apic_common_unrealize; diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c index 806832439b4..899f1333633 100644 --- a/hw/intc/arm_gic.c +++ b/hw/intc/arm_gic.c @@ -27,8 +27,8 @@ #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" -#include "sysemu/kvm.h" -#include "sysemu/qtest.h" +#include "system/kvm.h" +#include "system/qtest.h" /* #define DEBUG_GIC */ @@ -59,7 +59,7 @@ static const uint8_t gic_id_gicv2[] = { static inline int gic_get_current_cpu(GICState *s) { if (!qtest_enabled() && s->num_cpu > 1) { - return current_cpu->cpu_index; + return current_cpu->cpu_index - s->first_cpu_index; } return 0; } @@ -1263,9 +1263,14 @@ static void gic_dist_writeb(void *opaque, hwaddr offset, trace_gic_enable_irq(irq + i); } GIC_DIST_SET_ENABLED(irq + i, cm); - /* If a raised level triggered IRQ enabled then mark - is as pending. */ - if (GIC_DIST_TEST_LEVEL(irq + i, mask) + /* + * If a raised level triggered IRQ enabled then mark + * it as pending on 11MPCore. For other GIC revisions we + * handle the "level triggered and line asserted" check + * at the other end in gic_test_pending(). + */ + if (s->revision == REV_11MPCORE + && GIC_DIST_TEST_LEVEL(irq + i, mask) && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { DPRINTF("Set %d pending mask %x\n", irq + i, mask); GIC_DIST_SET_PENDING(irq + i, mask); @@ -2157,7 +2162,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) } -static void arm_gic_class_init(ObjectClass *klass, void *data) +static void arm_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ARMGICClass *agc = ARM_GIC_CLASS(klass); diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 53fb2c4e2d3..ed5be056452 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -26,7 +26,7 @@ #include "hw/arm/linux-boot-if.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" static int gic_pre_save(void *opaque) { @@ -348,8 +348,9 @@ static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, } } -static Property arm_gic_common_properties[] = { +static const Property arm_gic_common_properties[] = { DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1), + DEFINE_PROP_UINT32("first-cpu-index", GICState, first_cpu_index, 0), DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32), /* Revision can be 1 or 2 for GIC architecture specification * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. @@ -360,10 +361,9 @@ static Property arm_gic_common_properties[] = { /* True if the GIC should implement the virtualization extensions */ DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0), DEFINE_PROP_UINT32("num-priority-bits", GICState, n_prio_bits, 8), - DEFINE_PROP_END_OF_LIST(), }; -static void arm_gic_common_class_init(ObjectClass *klass, void *data) +static void arm_gic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -383,7 +383,7 @@ static const TypeInfo arm_gic_common_type = { .class_size = sizeof(ARMGICCommonClass), .class_init = arm_gic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo []) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ARM_LINUX_BOOT_IF }, { }, }, diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index 53defee7d59..1e9232f47c8 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -23,7 +23,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "migration/blocker.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_arm.h" #include "gic_internal.h" #include "vgic_common.h" @@ -547,17 +547,10 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort); } - } else if (kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { + } else { error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); error_append_hint(errp, "Perhaps the host CPU does not support GICv2?\n"); - } else if (ret != -ENODEV && ret != -ENOTSUP) { - /* - * Very ancient kernel without KVM_CAP_DEVICE_CTRL: assume that - * ENODEV or ENOTSUP mean "can't create GICv2 with KVM_CREATE_DEVICE", - * and that we will get a GICv2 via KVM_CREATE_IRQCHIP. - */ - error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); return; } @@ -591,7 +584,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) } } -static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) +static void kvm_arm_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c index d564b857eba..cef06882216 100644 --- a/hw/intc/arm_gicv2m.c +++ b/hw/intc/arm_gicv2m.c @@ -31,7 +31,7 @@ #include "hw/irq.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" @@ -170,13 +170,12 @@ static void gicv2m_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property gicv2m_properties[] = { +static const Property gicv2m_properties[] = { DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0), DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64), - DEFINE_PROP_END_OF_LIST(), }; -static void gicv2m_class_init(ObjectClass *klass, void *data) +static void gicv2m_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c index 58e18fff54f..6059ce926a6 100644 --- a/hw/intc/arm_gicv3.c +++ b/hw/intc/arm_gicv3.c @@ -452,7 +452,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) gicv3_init_cpuif(s); } -static void arm_gicv3_class_init(ObjectClass *klass, void *data) +static void arm_gicv3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index bd50a1b0795..e438d8c042d 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -31,7 +31,7 @@ #include "migration/vmstate.h" #include "gicv3_internal.h" #include "hw/arm/linux-boot-if.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs) @@ -605,13 +605,14 @@ static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, } } -static Property arm_gicv3_common_properties[] = { +static const Property arm_gicv3_common_properties[] = { DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1), DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), DEFINE_PROP_BOOL("has-nmi", GICv3State, nmi_support, 0), DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), + DEFINE_PROP_UINT32("maintenance-interrupt-id", GICv3State, maint_irq, 0), /* * Compatibility property: force 8 bits of physical priority, even * if the CPU being emulated should have fewer. @@ -621,10 +622,9 @@ static Property arm_gicv3_common_properties[] = { redist_region_count, qdev_prop_uint32, uint32_t), DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) +static void arm_gicv3_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -645,7 +645,7 @@ static const TypeInfo arm_gicv3_common_type = { .class_init = arm_gicv3_common_class_init, .instance_finalize = arm_gicv3_finalize, .abstract = true, - .interfaces = (InterfaceInfo []) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ARM_LINUX_BOOT_IF }, { }, }, diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index bdb13b00e98..4b4cf091570 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -22,8 +22,9 @@ #include "cpu.h" #include "target/arm/cpregs.h" #include "target/arm/cpu-features.h" -#include "sysemu/tcg.h" -#include "sysemu/qtest.h" +#include "target/arm/internals.h" +#include "system/tcg.h" +#include "system/qtest.h" /* * Special case return value from hppvi_index(); must be larger than @@ -583,7 +584,6 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, } gicv3_cpuif_virt_irq_fiq_update(cs); - return; } static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -781,7 +781,7 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) if (nmi) { cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI; } else { - cs->ich_apr[grp][regno] |= (1 << regbit); + cs->ich_apr[grp][regno] |= (1U << regbit); } } @@ -793,7 +793,7 @@ static void icv_activate_vlpi(GICv3CPUState *cs) int regno = aprbit / 32; int regbit = aprbit % 32; - cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit); + cs->ich_apr[cs->hppvlpi.grp][regno] |= (1U << regbit); gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0); } @@ -1170,7 +1170,7 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq) if (nmi) { cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI; } else { - cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); + cs->icc_apr[cs->hppi.grp][regno] |= (1U << regbit); } if (irq < GIC_INTERNAL) { @@ -2291,7 +2291,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, r = CP_ACCESS_TRAP_EL3; break; case 3: - if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + if (!arm_is_el3_or_mon(env)) { r = CP_ACCESS_TRAP_EL3; } break; @@ -2300,9 +2300,6 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, } } - if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { - r = CP_ACCESS_TRAP; - } return r; } @@ -2356,7 +2353,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env, r = CP_ACCESS_TRAP_EL3; break; case 3: - if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + if (!arm_is_el3_or_mon(env)) { r = CP_ACCESS_TRAP_EL3; } break; @@ -2365,9 +2362,6 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env, } } - if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { - r = CP_ACCESS_TRAP; - } return r; } @@ -2395,7 +2389,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env, r = CP_ACCESS_TRAP_EL3; break; case 3: - if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + if (!arm_is_el3_or_mon(env)) { r = CP_ACCESS_TRAP_EL3; } break; @@ -2404,9 +2398,6 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env, } } - if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { - r = CP_ACCESS_TRAP; - } return r; } diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index bf31158470e..577b4454057 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -465,7 +465,7 @@ static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who, static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite, int irqlevel) { - CTEntry cte; + CTEntry cte = {}; ItsCmdResult cmdres; cmdres = lookup_cte(s, __func__, ite->icid, &cte); @@ -479,7 +479,7 @@ static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite, static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite, int irqlevel) { - VTEntry vte; + VTEntry vte = {}; ItsCmdResult cmdres; cmdres = lookup_vte(s, __func__, ite->vpeid, &vte); @@ -514,8 +514,8 @@ static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite, static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, uint32_t eventid, ItsCmdType cmd) { - DTEntry dte; - ITEntry ite; + DTEntry dte = {}; + ITEntry ite = {}; ItsCmdResult cmdres; int irqlevel; @@ -583,8 +583,8 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, uint32_t pIntid = 0; uint64_t num_eventids; uint16_t icid = 0; - DTEntry dte; - ITEntry ite; + DTEntry dte = {}; + ITEntry ite = {}; devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; eventid = cmdpkt[1] & EVENTID_MASK; @@ -651,8 +651,8 @@ static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt, { uint32_t devid, eventid, vintid, doorbell, vpeid; uint32_t num_eventids; - DTEntry dte; - ITEntry ite; + DTEntry dte = {}; + ITEntry ite = {}; if (!its_feature_virtual(s)) { return CMD_CONTINUE; @@ -761,7 +761,7 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte) static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt) { uint16_t icid; - CTEntry cte; + CTEntry cte = {}; icid = cmdpkt[2] & ICID_MASK; cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK; @@ -822,7 +822,7 @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte) static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt) { uint32_t devid; - DTEntry dte; + DTEntry dte = {}; devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT; dte.size = cmdpkt[1] & SIZE_MASK; @@ -886,9 +886,9 @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) { uint32_t devid, eventid; uint16_t new_icid; - DTEntry dte; - CTEntry old_cte, new_cte; - ITEntry old_ite; + DTEntry dte = {}; + CTEntry old_cte = {}, new_cte = {}; + ITEntry old_ite = {}; ItsCmdResult cmdres; devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID); @@ -965,7 +965,7 @@ static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte) static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt) { - VTEntry vte; + VTEntry vte = {}; uint32_t vpeid; if (!its_feature_virtual(s)) { @@ -1030,7 +1030,7 @@ static void vmovp_callback(gpointer data, gpointer opaque) */ GICv3ITSState *s = data; VmovpCallbackData *cbdata = opaque; - VTEntry vte; + VTEntry vte = {}; ItsCmdResult cmdres; cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte); @@ -1085,9 +1085,9 @@ static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt) { uint32_t devid, eventid, vpeid, doorbell; bool doorbell_valid; - DTEntry dte; - ITEntry ite; - VTEntry old_vte, new_vte; + DTEntry dte = {}; + ITEntry ite = {}; + VTEntry old_vte = {}, new_vte = {}; ItsCmdResult cmdres; if (!its_feature_virtual(s)) { @@ -1186,10 +1186,10 @@ static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt) static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt) { uint32_t devid, eventid; - ITEntry ite; - DTEntry dte; - CTEntry cte; - VTEntry vte; + ITEntry ite = {}; + DTEntry dte = {}; + CTEntry cte = {}; + VTEntry vte = {}; ItsCmdResult cmdres; devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID); @@ -2002,13 +2002,12 @@ static void gicv3_its_post_load(GICv3ITSState *s) } } -static Property gicv3_its_props[] = { +static const Property gicv3_its_props[] = { DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", GICv3State *), - DEFINE_PROP_END_OF_LIST(), }; -static void gicv3_its_class_init(ObjectClass *klass, void *data) +static void gicv3_its_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index 0b97362cd21..e946e3fb87b 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -24,7 +24,7 @@ #include "hw/intc/arm_gicv3_its_common.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" static int gicv3_its_pre_save(void *opaque) { @@ -135,7 +135,7 @@ static void gicv3_its_common_reset_hold(Object *obj, ResetType type) memset(&s->baser, 0, sizeof(s->baser)); } -static void gicv3_its_common_class_init(ObjectClass *klass, void *data) +static void gicv3_its_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 35539c099fc..9812d508597 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -24,8 +24,8 @@ #include "qemu/error-report.h" #include "hw/intc/arm_gicv3_its_common.h" #include "hw/qdev-properties.h" -#include "sysemu/runstate.h" -#include "sysemu/kvm.h" +#include "system/runstate.h" +#include "system/kvm.h" #include "kvm_arm.h" #include "migration/blocker.h" #include "qom/object.h" @@ -234,13 +234,12 @@ static void kvm_arm_its_reset_hold(Object *obj, ResetType type) } } -static Property kvm_arm_its_props[] = { +static const Property kvm_arm_its_props[] = { DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3", GICv3State *), - DEFINE_PROP_END_OF_LIST(), }; -static void kvm_arm_its_class_init(ObjectClass *klass, void *data) +static void kvm_arm_its_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 9ea6b8e2189..6166283cd1a 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -22,10 +22,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/intc/arm_gicv3_common.h" +#include "hw/arm/virt.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" +#include "system/kvm.h" +#include "system/runstate.h" #include "kvm_arm.h" #include "gicv3_internal.h" #include "vgic_common.h" @@ -294,7 +295,7 @@ static void kvm_dist_putbmp(GICv3State *s, uint32_t offset, * the 1 bits. */ if (clroffset != 0) { - reg = 0; + reg = ~0; kvm_gicd_access(s, clroffset, ®, true); clroffset += 4; } @@ -386,8 +387,6 @@ static void kvm_arm_gicv3_put(GICv3State *s) reg = c->level; kvm_gic_line_level_access(s, 0, ncpu, ®, true); - reg = ~0; - kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true); reg = c->gicr_ipendr0; kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true); @@ -444,7 +443,7 @@ static void kvm_arm_gicv3_put(GICv3State *s) kvm_gic_put_line_level_bmp(s, s->level); /* s->pending bitmap -> GICD_ISPENDRn */ - kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending); + kvm_dist_putbmp(s, GICD_ISPENDR, 0, s->pending); /* s->active bitmap -> GICD_ISACTIVERn */ kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active); @@ -825,6 +824,34 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->maint_irq) { + Error *kvm_nv_migration_blocker = NULL; + int ret; + + error_setg(&kvm_nv_migration_blocker, + "Live migration disabled because KVM nested virt is enabled"); + if (migrate_add_blocker(&kvm_nv_migration_blocker, errp)) { + error_free(kvm_nv_migration_blocker); + return; + } + + ret = kvm_device_check_attr(s->dev_fd, + KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0); + if (!ret) { + error_setg_errno(errp, errno, + "VGICv3 setting maintenance IRQ is not " + "supported by this host kernel"); + return; + } + + ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0, + &s->maint_irq, true, errp); + if (ret) { + error_setg_errno(errp, errno, "Failed to set VGIC maintenance IRQ"); + return; + } + } + multiple_redist_region_allowed = kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION); @@ -893,7 +920,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) } } -static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) +static void kvm_arm_gicv3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 404a445138a..7c78961040e 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -18,11 +18,11 @@ #include "hw/intc/armv7m_nvic.h" #include "hw/irq.h" #include "hw/qdev-properties.h" -#include "sysemu/tcg.h" -#include "sysemu/runstate.h" +#include "system/tcg.h" +#include "system/runstate.h" #include "target/arm/cpu.h" #include "target/arm/cpu-features.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/memop.h" #include "qemu/log.h" #include "qemu/module.h" @@ -988,6 +988,7 @@ static void nvic_nmi_trigger(void *opaque, int n, int level) static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) { ARMCPU *cpu = s->cpu; + ARMISARegisters *isar = &cpu->isar; uint32_t val; switch (offset) { @@ -1263,74 +1264,74 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr0; + return GET_IDREG(isar, ID_PFR0); case 0xd44: /* PFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr1; + return GET_IDREG(isar, ID_PFR1); case 0xd48: /* DFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_dfr0; + return GET_IDREG(isar, ID_DFR0); case 0xd4c: /* AFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->id_afr0; + return GET_IDREG(isar, ID_AFR0); case 0xd50: /* MMFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr0; + return GET_IDREG(isar, ID_MMFR0); case 0xd54: /* MMFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr1; + return GET_IDREG(isar, ID_MMFR1); case 0xd58: /* MMFR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr2; + return GET_IDREG(isar, ID_MMFR2); case 0xd5c: /* MMFR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr3; + return GET_IDREG(isar, ID_MMFR3); case 0xd60: /* ISAR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar0; + return GET_IDREG(&cpu->isar, ID_ISAR0); case 0xd64: /* ISAR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar1; + return GET_IDREG(&cpu->isar, ID_ISAR1); case 0xd68: /* ISAR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar2; + return GET_IDREG(&cpu->isar, ID_ISAR2); case 0xd6c: /* ISAR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar3; + return GET_IDREG(&cpu->isar, ID_ISAR3); case 0xd70: /* ISAR4. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar4; + return GET_IDREG(&cpu->isar, ID_ISAR4); case 0xd74: /* ISAR5. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar5; + return GET_IDREG(&cpu->isar, ID_ISAR5); case 0xd78: /* CLIDR */ - return cpu->clidr; + return GET_IDREG(&cpu->isar, CLIDR); case 0xd7c: /* CTR */ return cpu->ctr; case 0xd80: /* CSSIDR */ @@ -2569,7 +2570,7 @@ static const VMStateDescription vmstate_nvic = { } }; -static Property props_nvic[] = { +static const Property props_nvic[] = { /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), /* @@ -2577,7 +2578,6 @@ static Property props_nvic[] = { * to use a reasonable default. */ DEFINE_PROP_UINT8("num-prio-bits", NVICState, num_prio_bits, 0), - DEFINE_PROP_END_OF_LIST() }; static void armv7m_nvic_reset(DeviceState *dev) @@ -2731,13 +2731,13 @@ static void armv7m_nvic_instance_init(Object *obj) qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); } -static void armv7m_nvic_class_init(ObjectClass *klass, void *data) +static void armv7m_nvic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_nvic; device_class_set_props(dc, props_nvic); - dc->reset = armv7m_nvic_reset; + device_class_set_legacy_reset(dc, armv7m_nvic_reset); dc->realize = armv7m_nvic_realize; } diff --git a/hw/intc/aspeed_intc.c b/hw/intc/aspeed_intc.c index 7515558baba..5cd786dee6c 100644 --- a/hw/intc/aspeed_intc.c +++ b/hw/intc/aspeed_intc.c @@ -14,72 +14,291 @@ #include "hw/registerfields.h" #include "qapi/error.h" -/* INTC Registers */ -REG32(GICINT128_EN, 0x1000) -REG32(GICINT128_STATUS, 0x1004) -REG32(GICINT129_EN, 0x1100) -REG32(GICINT129_STATUS, 0x1104) -REG32(GICINT130_EN, 0x1200) -REG32(GICINT130_STATUS, 0x1204) -REG32(GICINT131_EN, 0x1300) -REG32(GICINT131_STATUS, 0x1304) -REG32(GICINT132_EN, 0x1400) -REG32(GICINT132_STATUS, 0x1404) -REG32(GICINT133_EN, 0x1500) -REG32(GICINT133_STATUS, 0x1504) -REG32(GICINT134_EN, 0x1600) -REG32(GICINT134_STATUS, 0x1604) -REG32(GICINT135_EN, 0x1700) -REG32(GICINT135_STATUS, 0x1704) -REG32(GICINT136_EN, 0x1800) -REG32(GICINT136_STATUS, 0x1804) - -#define GICINT_STATUS_BASE R_GICINT128_STATUS - -static void aspeed_intc_update(AspeedINTCState *s, int irq, int level) +/* + * INTC Registers + * + * values below are offset by - 0x1000 from datasheet + * because its memory region is start at 0x1000 + * + */ +REG32(GICINT128_EN, 0x000) +REG32(GICINT128_STATUS, 0x004) +REG32(GICINT129_EN, 0x100) +REG32(GICINT129_STATUS, 0x104) +REG32(GICINT130_EN, 0x200) +REG32(GICINT130_STATUS, 0x204) +REG32(GICINT131_EN, 0x300) +REG32(GICINT131_STATUS, 0x304) +REG32(GICINT132_EN, 0x400) +REG32(GICINT132_STATUS, 0x404) +REG32(GICINT133_EN, 0x500) +REG32(GICINT133_STATUS, 0x504) +REG32(GICINT134_EN, 0x600) +REG32(GICINT134_STATUS, 0x604) +REG32(GICINT135_EN, 0x700) +REG32(GICINT135_STATUS, 0x704) +REG32(GICINT136_EN, 0x800) +REG32(GICINT136_STATUS, 0x804) +REG32(GICINT192_201_EN, 0xB00) +REG32(GICINT192_201_STATUS, 0xB04) + +/* + * INTCIO Registers + * + * values below are offset by - 0x100 from datasheet + * because its memory region is start at 0x100 + * + */ +REG32(GICINT192_EN, 0x00) +REG32(GICINT192_STATUS, 0x04) +REG32(GICINT193_EN, 0x10) +REG32(GICINT193_STATUS, 0x14) +REG32(GICINT194_EN, 0x20) +REG32(GICINT194_STATUS, 0x24) +REG32(GICINT195_EN, 0x30) +REG32(GICINT195_STATUS, 0x34) +REG32(GICINT196_EN, 0x40) +REG32(GICINT196_STATUS, 0x44) +REG32(GICINT197_EN, 0x50) +REG32(GICINT197_STATUS, 0x54) + +/* + * SSP INTC Registers + */ +REG32(SSPINT128_EN, 0x2000) +REG32(SSPINT128_STATUS, 0x2004) +REG32(SSPINT129_EN, 0x2100) +REG32(SSPINT129_STATUS, 0x2104) +REG32(SSPINT130_EN, 0x2200) +REG32(SSPINT130_STATUS, 0x2204) +REG32(SSPINT131_EN, 0x2300) +REG32(SSPINT131_STATUS, 0x2304) +REG32(SSPINT132_EN, 0x2400) +REG32(SSPINT132_STATUS, 0x2404) +REG32(SSPINT133_EN, 0x2500) +REG32(SSPINT133_STATUS, 0x2504) +REG32(SSPINT134_EN, 0x2600) +REG32(SSPINT134_STATUS, 0x2604) +REG32(SSPINT135_EN, 0x2700) +REG32(SSPINT135_STATUS, 0x2704) +REG32(SSPINT136_EN, 0x2800) +REG32(SSPINT136_STATUS, 0x2804) +REG32(SSPINT137_EN, 0x2900) +REG32(SSPINT137_STATUS, 0x2904) +REG32(SSPINT138_EN, 0x2A00) +REG32(SSPINT138_STATUS, 0x2A04) +REG32(SSPINT160_169_EN, 0x2B00) +REG32(SSPINT160_169_STATUS, 0x2B04) + +/* + * SSP INTCIO Registers + */ +REG32(SSPINT160_EN, 0x180) +REG32(SSPINT160_STATUS, 0x184) +REG32(SSPINT161_EN, 0x190) +REG32(SSPINT161_STATUS, 0x194) +REG32(SSPINT162_EN, 0x1A0) +REG32(SSPINT162_STATUS, 0x1A4) +REG32(SSPINT163_EN, 0x1B0) +REG32(SSPINT163_STATUS, 0x1B4) +REG32(SSPINT164_EN, 0x1C0) +REG32(SSPINT164_STATUS, 0x1C4) +REG32(SSPINT165_EN, 0x1D0) +REG32(SSPINT165_STATUS, 0x1D4) + +/* + * TSP INTC Registers + */ +REG32(TSPINT128_EN, 0x3000) +REG32(TSPINT128_STATUS, 0x3004) +REG32(TSPINT129_EN, 0x3100) +REG32(TSPINT129_STATUS, 0x3104) +REG32(TSPINT130_EN, 0x3200) +REG32(TSPINT130_STATUS, 0x3204) +REG32(TSPINT131_EN, 0x3300) +REG32(TSPINT131_STATUS, 0x3304) +REG32(TSPINT132_EN, 0x3400) +REG32(TSPINT132_STATUS, 0x3404) +REG32(TSPINT133_EN, 0x3500) +REG32(TSPINT133_STATUS, 0x3504) +REG32(TSPINT134_EN, 0x3600) +REG32(TSPINT134_STATUS, 0x3604) +REG32(TSPINT135_EN, 0x3700) +REG32(TSPINT135_STATUS, 0x3704) +REG32(TSPINT136_EN, 0x3800) +REG32(TSPINT136_STATUS, 0x3804) +REG32(TSPINT137_EN, 0x3900) +REG32(TSPINT137_STATUS, 0x3904) +REG32(TSPINT138_EN, 0x3A00) +REG32(TSPINT138_STATUS, 0x3A04) +REG32(TSPINT160_169_EN, 0x3B00) +REG32(TSPINT160_169_STATUS, 0x3B04) + +/* + * TSP INTCIO Registers + */ + +REG32(TSPINT160_EN, 0x200) +REG32(TSPINT160_STATUS, 0x204) +REG32(TSPINT161_EN, 0x210) +REG32(TSPINT161_STATUS, 0x214) +REG32(TSPINT162_EN, 0x220) +REG32(TSPINT162_STATUS, 0x224) +REG32(TSPINT163_EN, 0x230) +REG32(TSPINT163_STATUS, 0x234) +REG32(TSPINT164_EN, 0x240) +REG32(TSPINT164_STATUS, 0x244) +REG32(TSPINT165_EN, 0x250) +REG32(TSPINT165_STATUS, 0x254) + +static const AspeedINTCIRQ *aspeed_intc_get_irq(AspeedINTCClass *aic, + uint32_t reg) +{ + int i; + + for (i = 0; i < aic->irq_table_count; i++) { + if (aic->irq_table[i].enable_reg == reg || + aic->irq_table[i].status_reg == reg) { + return &aic->irq_table[i]; + } + } + + /* + * Invalid reg. + */ + g_assert_not_reached(); +} + +/* + * Update the state of an interrupt controller pin by setting + * the specified output pin to the given level. + * The input pin index should be between 0 and the number of input pins. + * The output pin index should be between 0 and the number of output pins. + */ +static void aspeed_intc_update(AspeedINTCState *s, int inpin_idx, + int outpin_idx, int level) { AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); + const char *name = object_get_typename(OBJECT(s)); - if (irq >= aic->num_ints) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n", - __func__, irq); - return; + assert((outpin_idx < aic->num_outpins) && (inpin_idx < aic->num_inpins)); + + trace_aspeed_intc_update_irq(name, inpin_idx, outpin_idx, level); + qemu_set_irq(s->output_pins[outpin_idx], level); +} + +static void aspeed_intc_set_irq_handler(AspeedINTCState *s, + const AspeedINTCIRQ *intc_irq, + uint32_t select) +{ + const char *name = object_get_typename(OBJECT(s)); + uint32_t status_reg; + int outpin_idx; + int inpin_idx; + + status_reg = intc_irq->status_reg; + outpin_idx = intc_irq->outpin_idx; + inpin_idx = intc_irq->inpin_idx; + + if ((s->mask[inpin_idx] & select) || (s->regs[status_reg] & select)) { + /* + * a. mask is not 0 means in ISR mode + * sources interrupt routine are executing. + * b. status register value is not 0 means previous + * source interrupt does not be executed, yet. + * + * save source interrupt to pending variable. + */ + s->pending[inpin_idx] |= select; + trace_aspeed_intc_pending_irq(name, inpin_idx, s->pending[inpin_idx]); + } else { + /* + * notify firmware which source interrupt are coming + * by setting status register + */ + s->regs[status_reg] = select; + trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx, + s->regs[status_reg]); + aspeed_intc_update(s, inpin_idx, outpin_idx, 1); } +} + +static void aspeed_intc_set_irq_handler_multi_outpins(AspeedINTCState *s, + const AspeedINTCIRQ *intc_irq, uint32_t select) +{ + const char *name = object_get_typename(OBJECT(s)); + uint32_t status_reg; + int num_outpins; + int outpin_idx; + int inpin_idx; + int i; + + num_outpins = intc_irq->num_outpins; + status_reg = intc_irq->status_reg; + outpin_idx = intc_irq->outpin_idx; + inpin_idx = intc_irq->inpin_idx; - trace_aspeed_intc_update_irq(irq, level); - qemu_set_irq(s->output_pins[irq], level); + for (i = 0; i < num_outpins; i++) { + if (select & BIT(i)) { + if (s->mask[inpin_idx] & BIT(i) || + s->regs[status_reg] & BIT(i)) { + /* + * a. mask bit is not 0 means in ISR mode sources interrupt + * routine are executing. + * b. status bit is not 0 means previous source interrupt + * does not be executed, yet. + * + * save source interrupt to pending bit. + */ + s->pending[inpin_idx] |= BIT(i); + trace_aspeed_intc_pending_irq(name, inpin_idx, + s->pending[inpin_idx]); + } else { + /* + * notify firmware which source interrupt are coming + * by setting status bit + */ + s->regs[status_reg] |= BIT(i); + trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx + i, + s->regs[status_reg]); + aspeed_intc_update(s, inpin_idx, outpin_idx + i, 1); + } + } + } } /* - * The address of GICINT128 to GICINT136 are from 0x1000 to 0x1804. - * Utilize "address & 0x0f00" to get the irq and irq output pin index - * The value of irq should be 0 to num_ints. - * The irq 0 indicates GICINT128, irq 1 indicates GICINT129 and so on. + * GICINT192_201 maps 1:10 to input IRQ 0 and output IRQs 0 to 9. + * GICINT128 to GICINT136 map 1:1 to input IRQs 1 to 9 and output + * IRQs 10 to 18. The value of input IRQ should be between 0 and + * the number of input pins. */ static void aspeed_intc_set_irq(void *opaque, int irq, int level) { AspeedINTCState *s = (AspeedINTCState *)opaque; AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); - uint32_t status_addr = GICINT_STATUS_BASE + ((0x100 * irq) >> 2); + const char *name = object_get_typename(OBJECT(s)); + const AspeedINTCIRQ *intc_irq; uint32_t select = 0; uint32_t enable; + int num_outpins; + int inpin_idx; int i; - if (irq >= aic->num_ints) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n", - __func__, irq); - return; - } + assert(irq < aic->num_inpins); - trace_aspeed_intc_set_irq(irq, level); - enable = s->enable[irq]; + intc_irq = &aic->irq_table[irq]; + num_outpins = intc_irq->num_outpins; + inpin_idx = intc_irq->inpin_idx; + trace_aspeed_intc_set_irq(name, inpin_idx, level); + enable = s->enable[inpin_idx]; if (!level) { return; } for (i = 0; i < aic->num_lines; i++) { - if (s->orgates[irq].levels[i]) { + if (s->orgates[inpin_idx].levels[i]) { if (enable & BIT(i)) { select |= BIT(i); } @@ -90,45 +309,190 @@ static void aspeed_intc_set_irq(void *opaque, int irq, int level) return; } - trace_aspeed_intc_select(select); + trace_aspeed_intc_select(name, select); + if (num_outpins > 1) { + aspeed_intc_set_irq_handler_multi_outpins(s, intc_irq, select); + } else { + aspeed_intc_set_irq_handler(s, intc_irq, select); + } +} - if (s->mask[irq] || s->regs[status_addr]) { - /* - * a. mask is not 0 means in ISR mode - * sources interrupt routine are executing. - * b. status register value is not 0 means previous - * source interrupt does not be executed, yet. - * - * save source interrupt to pending variable. - */ - s->pending[irq] |= select; - trace_aspeed_intc_pending_irq(irq, s->pending[irq]); +static void aspeed_intc_enable_handler(AspeedINTCState *s, hwaddr offset, + uint64_t data) +{ + AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); + const char *name = object_get_typename(OBJECT(s)); + const AspeedINTCIRQ *intc_irq; + uint32_t reg = offset >> 2; + uint32_t old_enable; + uint32_t change; + int inpin_idx; + + intc_irq = aspeed_intc_get_irq(aic, reg); + inpin_idx = intc_irq->inpin_idx; + + assert(inpin_idx < aic->num_inpins); + + /* + * The enable registers are used to enable source interrupts. + * They also handle masking and unmasking of source interrupts + * during the execution of the source ISR. + */ + + /* disable all source interrupt */ + if (!data && !s->enable[inpin_idx]) { + s->regs[reg] = data; + return; + } + + old_enable = s->enable[inpin_idx]; + s->enable[inpin_idx] |= data; + + /* enable new source interrupt */ + if (old_enable != s->enable[inpin_idx]) { + trace_aspeed_intc_enable(name, s->enable[inpin_idx]); + s->regs[reg] = data; + return; + } + + /* mask and unmask source interrupt */ + change = s->regs[reg] ^ data; + if (change & data) { + s->mask[inpin_idx] &= ~change; + trace_aspeed_intc_unmask(name, change, s->mask[inpin_idx]); } else { - /* - * notify firmware which source interrupt are coming - * by setting status register - */ - s->regs[status_addr] = select; - trace_aspeed_intc_trigger_irq(irq, s->regs[status_addr]); - aspeed_intc_update(s, irq, 1); + s->mask[inpin_idx] |= change; + trace_aspeed_intc_mask(name, change, s->mask[inpin_idx]); + } + + s->regs[reg] = data; +} + +static void aspeed_intc_status_handler(AspeedINTCState *s, hwaddr offset, + uint64_t data) +{ + AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); + const char *name = object_get_typename(OBJECT(s)); + const AspeedINTCIRQ *intc_irq; + uint32_t reg = offset >> 2; + int outpin_idx; + int inpin_idx; + + if (!data) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid data 0\n", __func__); + return; + } + + intc_irq = aspeed_intc_get_irq(aic, reg); + outpin_idx = intc_irq->outpin_idx; + inpin_idx = intc_irq->inpin_idx; + + assert(inpin_idx < aic->num_inpins); + + /* clear status */ + s->regs[reg] &= ~data; + + /* + * These status registers are used for notify sources ISR are executed. + * If one source ISR is executed, it will clear one bit. + * If it clear all bits, it means to initialize this register status + * rather than sources ISR are executed. + */ + if (data == 0xffffffff) { + return; + } + + /* All source ISR execution are done */ + if (!s->regs[reg]) { + trace_aspeed_intc_all_isr_done(name, inpin_idx); + if (s->pending[inpin_idx]) { + /* + * handle pending source interrupt + * notify firmware which source interrupt are pending + * by setting status register + */ + s->regs[reg] = s->pending[inpin_idx]; + s->pending[inpin_idx] = 0; + trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx, + s->regs[reg]); + aspeed_intc_update(s, inpin_idx, outpin_idx, 1); + } else { + /* clear irq */ + trace_aspeed_intc_clear_irq(name, inpin_idx, outpin_idx, 0); + aspeed_intc_update(s, inpin_idx, outpin_idx, 0); + } + } +} + +static void aspeed_intc_status_handler_multi_outpins(AspeedINTCState *s, + hwaddr offset, uint64_t data) +{ + const char *name = object_get_typename(OBJECT(s)); + AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); + const AspeedINTCIRQ *intc_irq; + uint32_t reg = offset >> 2; + int num_outpins; + int outpin_idx; + int inpin_idx; + int i; + + if (!data) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid data 0\n", __func__); + return; + } + + intc_irq = aspeed_intc_get_irq(aic, reg); + num_outpins = intc_irq->num_outpins; + outpin_idx = intc_irq->outpin_idx; + inpin_idx = intc_irq->inpin_idx; + assert(inpin_idx < aic->num_inpins); + + /* clear status */ + s->regs[reg] &= ~data; + + /* + * The status registers are used for notify sources ISR are executed. + * If one source ISR is executed, it will clear one bit. + * If it clear all bits, it means to initialize this register status + * rather than sources ISR are executed. + */ + if (data == 0xffffffff) { + return; + } + + for (i = 0; i < num_outpins; i++) { + /* All source ISR executions are done from a specific bit */ + if (data & BIT(i)) { + trace_aspeed_intc_all_isr_done_bit(name, inpin_idx, i); + if (s->pending[inpin_idx] & BIT(i)) { + /* + * Handle pending source interrupt. + * Notify firmware which source interrupt is pending + * by setting the status bit. + */ + s->regs[reg] |= BIT(i); + s->pending[inpin_idx] &= ~BIT(i); + trace_aspeed_intc_trigger_irq(name, inpin_idx, outpin_idx + i, + s->regs[reg]); + aspeed_intc_update(s, inpin_idx, outpin_idx + i, 1); + } else { + /* clear irq for the specific bit */ + trace_aspeed_intc_clear_irq(name, inpin_idx, outpin_idx + i, 0); + aspeed_intc_update(s, inpin_idx, outpin_idx + i, 0); + } + } } } static uint64_t aspeed_intc_read(void *opaque, hwaddr offset, unsigned int size) { AspeedINTCState *s = ASPEED_INTC(opaque); - uint32_t addr = offset >> 2; + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; uint32_t value = 0; - if (addr >= ASPEED_INTC_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", - __func__, offset); - return 0; - } - - value = s->regs[addr]; - trace_aspeed_intc_read(offset, size, value); + value = s->regs[reg]; + trace_aspeed_intc_read(name, offset, size, value); return value; } @@ -137,22 +501,12 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) { AspeedINTCState *s = ASPEED_INTC(opaque); - AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); - uint32_t addr = offset >> 2; - uint32_t old_enable; - uint32_t change; - uint32_t irq; + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; - if (addr >= ASPEED_INTC_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", - __func__, offset); - return; - } - - trace_aspeed_intc_write(offset, size, data); + trace_aspeed_intc_write(name, offset, size, data); - switch (addr) { + switch (reg) { case R_GICINT128_EN: case R_GICINT129_EN: case R_GICINT130_EN: @@ -162,45 +516,8 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data, case R_GICINT134_EN: case R_GICINT135_EN: case R_GICINT136_EN: - irq = (offset & 0x0f00) >> 8; - - if (irq >= aic->num_ints) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n", - __func__, irq); - return; - } - - /* - * These registers are used for enable sources interrupt and - * mask and unmask source interrupt while executing source ISR. - */ - - /* disable all source interrupt */ - if (!data && !s->enable[irq]) { - s->regs[addr] = data; - return; - } - - old_enable = s->enable[irq]; - s->enable[irq] |= data; - - /* enable new source interrupt */ - if (old_enable != s->enable[irq]) { - trace_aspeed_intc_enable(s->enable[irq]); - s->regs[addr] = data; - return; - } - - /* mask and unmask source interrupt */ - change = s->regs[addr] ^ data; - if (change & data) { - s->mask[irq] &= ~change; - trace_aspeed_intc_unmask(change, s->mask[irq]); - } else { - s->mask[irq] |= change; - trace_aspeed_intc_mask(change, s->mask[irq]); - } - s->regs[addr] = data; + case R_GICINT192_201_EN: + aspeed_intc_enable_handler(s, offset, data); break; case R_GICINT128_STATUS: case R_GICINT129_STATUS: @@ -211,59 +528,271 @@ static void aspeed_intc_write(void *opaque, hwaddr offset, uint64_t data, case R_GICINT134_STATUS: case R_GICINT135_STATUS: case R_GICINT136_STATUS: - irq = (offset & 0x0f00) >> 8; + aspeed_intc_status_handler(s, offset, data); + break; + case R_GICINT192_201_STATUS: + aspeed_intc_status_handler_multi_outpins(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} - if (irq >= aic->num_ints) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n", - __func__, irq); - return; - } +static void aspeed_ssp_intc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; - /* clear status */ - s->regs[addr] &= ~data; + trace_aspeed_intc_write(name, offset, size, data); - /* - * These status registers are used for notify sources ISR are executed. - * If one source ISR is executed, it will clear one bit. - * If it clear all bits, it means to initialize this register status - * rather than sources ISR are executed. - */ - if (data == 0xffffffff) { - return; - } + switch (reg) { + case R_SSPINT128_EN: + case R_SSPINT129_EN: + case R_SSPINT130_EN: + case R_SSPINT131_EN: + case R_SSPINT132_EN: + case R_SSPINT133_EN: + case R_SSPINT134_EN: + case R_SSPINT135_EN: + case R_SSPINT136_EN: + case R_SSPINT160_169_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_SSPINT128_STATUS: + case R_SSPINT129_STATUS: + case R_SSPINT130_STATUS: + case R_SSPINT131_STATUS: + case R_SSPINT132_STATUS: + case R_SSPINT133_STATUS: + case R_SSPINT134_STATUS: + case R_SSPINT135_STATUS: + case R_SSPINT136_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + case R_SSPINT160_169_STATUS: + aspeed_intc_status_handler_multi_outpins(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} - /* All source ISR execution are done */ - if (!s->regs[addr]) { - trace_aspeed_intc_all_isr_done(irq); - if (s->pending[irq]) { - /* - * handle pending source interrupt - * notify firmware which source interrupt are pending - * by setting status register - */ - s->regs[addr] = s->pending[irq]; - s->pending[irq] = 0; - trace_aspeed_intc_trigger_irq(irq, s->regs[addr]); - aspeed_intc_update(s, irq, 1); - } else { - /* clear irq */ - trace_aspeed_intc_clear_irq(irq, 0); - aspeed_intc_update(s, irq, 0); - } - } +static void aspeed_tsp_intc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_TSPINT128_EN: + case R_TSPINT129_EN: + case R_TSPINT130_EN: + case R_TSPINT131_EN: + case R_TSPINT132_EN: + case R_TSPINT133_EN: + case R_TSPINT134_EN: + case R_TSPINT135_EN: + case R_TSPINT136_EN: + case R_TSPINT160_169_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_TSPINT128_STATUS: + case R_TSPINT129_STATUS: + case R_TSPINT130_STATUS: + case R_TSPINT131_STATUS: + case R_TSPINT132_STATUS: + case R_TSPINT133_STATUS: + case R_TSPINT134_STATUS: + case R_TSPINT135_STATUS: + case R_TSPINT136_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + case R_TSPINT160_169_STATUS: + aspeed_intc_status_handler_multi_outpins(s, offset, data); break; default: - s->regs[addr] = data; + s->regs[reg] = data; break; } +} - return; +static uint64_t aspeed_intcio_read(void *opaque, hwaddr offset, + unsigned int size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + uint32_t value = 0; + + value = s->regs[reg]; + trace_aspeed_intc_read(name, offset, size, value); + + return value; +} + +static void aspeed_intcio_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_GICINT192_EN: + case R_GICINT193_EN: + case R_GICINT194_EN: + case R_GICINT195_EN: + case R_GICINT196_EN: + case R_GICINT197_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_GICINT192_STATUS: + case R_GICINT193_STATUS: + case R_GICINT194_STATUS: + case R_GICINT195_STATUS: + case R_GICINT196_STATUS: + case R_GICINT197_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} + +static void aspeed_ssp_intcio_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_SSPINT160_EN: + case R_SSPINT161_EN: + case R_SSPINT162_EN: + case R_SSPINT163_EN: + case R_SSPINT164_EN: + case R_SSPINT165_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_SSPINT160_STATUS: + case R_SSPINT161_STATUS: + case R_SSPINT162_STATUS: + case R_SSPINT163_STATUS: + case R_SSPINT164_STATUS: + case R_SSPINT165_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } +} + +static void aspeed_tsp_intcio_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedINTCState *s = ASPEED_INTC(opaque); + const char *name = object_get_typename(OBJECT(s)); + uint32_t reg = offset >> 2; + + trace_aspeed_intc_write(name, offset, size, data); + + switch (reg) { + case R_TSPINT160_EN: + case R_TSPINT161_EN: + case R_TSPINT162_EN: + case R_TSPINT163_EN: + case R_TSPINT164_EN: + case R_TSPINT165_EN: + aspeed_intc_enable_handler(s, offset, data); + break; + case R_TSPINT160_STATUS: + case R_TSPINT161_STATUS: + case R_TSPINT162_STATUS: + case R_TSPINT163_STATUS: + case R_TSPINT164_STATUS: + case R_TSPINT165_STATUS: + aspeed_intc_status_handler(s, offset, data); + break; + default: + s->regs[reg] = data; + break; + } } static const MemoryRegionOps aspeed_intc_ops = { .read = aspeed_intc_read, .write = aspeed_intc_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_intcio_ops = { + .read = aspeed_intcio_read, + .write = aspeed_intcio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_ssp_intc_ops = { + .read = aspeed_intc_read, + .write = aspeed_ssp_intc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_ssp_intcio_ops = { + .read = aspeed_intcio_read, + .write = aspeed_ssp_intcio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_tsp_intc_ops = { + .read = aspeed_intc_read, + .write = aspeed_tsp_intc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps aspeed_tsp_intcio_ops = { + .read = aspeed_intcio_read, + .write = aspeed_tsp_intcio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -276,8 +805,8 @@ static void aspeed_intc_instance_init(Object *obj) AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); int i; - assert(aic->num_ints <= ASPEED_INTC_NR_INTS); - for (i = 0; i < aic->num_ints; i++) { + assert(aic->num_inpins <= ASPEED_INTC_MAX_INPINS); + for (i = 0; i < aic->num_inpins; i++) { object_initialize_child(obj, "intc-orgates[*]", &s->orgates[i], TYPE_OR_IRQ); object_property_set_int(OBJECT(&s->orgates[i]), "num-lines", @@ -288,8 +817,9 @@ static void aspeed_intc_instance_init(Object *obj) static void aspeed_intc_reset(DeviceState *dev) { AspeedINTCState *s = ASPEED_INTC(dev); + AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); - memset(s->regs, 0, sizeof(s->regs)); + memset(s->regs, 0, aic->nr_regs << 2); memset(s->enable, 0, sizeof(s->enable)); memset(s->mask, 0, sizeof(s->mask)); memset(s->pending, 0, sizeof(s->pending)); @@ -302,28 +832,51 @@ static void aspeed_intc_realize(DeviceState *dev, Error **errp) AspeedINTCClass *aic = ASPEED_INTC_GET_CLASS(s); int i; - memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_intc_ops, s, - TYPE_ASPEED_INTC ".regs", ASPEED_INTC_NR_REGS << 2); + memory_region_init(&s->iomem_container, OBJECT(s), + TYPE_ASPEED_INTC ".container", aic->mem_size); + + sysbus_init_mmio(sbd, &s->iomem_container); + + s->regs = g_new(uint32_t, aic->nr_regs); + memory_region_init_io(&s->iomem, OBJECT(s), aic->reg_ops, s, + TYPE_ASPEED_INTC ".regs", aic->nr_regs << 2); - sysbus_init_mmio(sbd, &s->iomem); - qdev_init_gpio_in(dev, aspeed_intc_set_irq, aic->num_ints); + memory_region_add_subregion(&s->iomem_container, aic->reg_offset, + &s->iomem); - for (i = 0; i < aic->num_ints; i++) { + qdev_init_gpio_in(dev, aspeed_intc_set_irq, aic->num_inpins); + + for (i = 0; i < aic->num_inpins; i++) { if (!qdev_realize(DEVICE(&s->orgates[i]), NULL, errp)) { return; } + } + + for (i = 0; i < aic->num_outpins; i++) { sysbus_init_irq(sbd, &s->output_pins[i]); } } -static void aspeed_intc_class_init(ObjectClass *klass, void *data) +static void aspeed_intc_unrealize(DeviceState *dev) +{ + AspeedINTCState *s = ASPEED_INTC(dev); + + g_free(s->regs); + s->regs = NULL; +} + +static void aspeed_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); dc->desc = "ASPEED INTC Controller"; dc->realize = aspeed_intc_realize; - dc->reset = aspeed_intc_reset; + dc->unrealize = aspeed_intc_unrealize; + device_class_set_legacy_reset(dc, aspeed_intc_reset); dc->vmsd = NULL; + + aic->reg_ops = &aspeed_intc_ops; } static const TypeInfo aspeed_intc_info = { @@ -336,14 +889,33 @@ static const TypeInfo aspeed_intc_info = { .abstract = true, }; -static void aspeed_2700_intc_class_init(ObjectClass *klass, void *data) +static AspeedINTCIRQ aspeed_2700_intc_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 10, R_GICINT192_201_EN, R_GICINT192_201_STATUS}, + {1, 10, 1, R_GICINT128_EN, R_GICINT128_STATUS}, + {2, 11, 1, R_GICINT129_EN, R_GICINT129_STATUS}, + {3, 12, 1, R_GICINT130_EN, R_GICINT130_STATUS}, + {4, 13, 1, R_GICINT131_EN, R_GICINT131_STATUS}, + {5, 14, 1, R_GICINT132_EN, R_GICINT132_STATUS}, + {6, 15, 1, R_GICINT133_EN, R_GICINT133_STATUS}, + {7, 16, 1, R_GICINT134_EN, R_GICINT134_STATUS}, + {8, 17, 1, R_GICINT135_EN, R_GICINT135_STATUS}, + {9, 18, 1, R_GICINT136_EN, R_GICINT136_STATUS}, +}; + +static void aspeed_2700_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); dc->desc = "ASPEED 2700 INTC Controller"; aic->num_lines = 32; - aic->num_ints = 9; + aic->num_inpins = 10; + aic->num_outpins = 19; + aic->mem_size = 0x4000; + aic->nr_regs = 0xB08 >> 2; + aic->reg_offset = 0x1000; + aic->irq_table = aspeed_2700_intc_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700_intc_irqs); } static const TypeInfo aspeed_2700_intc_info = { @@ -352,10 +924,185 @@ static const TypeInfo aspeed_2700_intc_info = { .class_init = aspeed_2700_intc_class_init, }; +static AspeedINTCIRQ aspeed_2700_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 1, R_GICINT192_EN, R_GICINT192_STATUS}, + {1, 1, 1, R_GICINT193_EN, R_GICINT193_STATUS}, + {2, 2, 1, R_GICINT194_EN, R_GICINT194_STATUS}, + {3, 3, 1, R_GICINT195_EN, R_GICINT195_STATUS}, + {4, 4, 1, R_GICINT196_EN, R_GICINT196_STATUS}, + {5, 5, 1, R_GICINT197_EN, R_GICINT197_STATUS}, +}; + +static void aspeed_2700_intcio_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 INTC IO Controller"; + aic->num_lines = 32; + aic->num_inpins = 6; + aic->num_outpins = 6; + aic->mem_size = 0x400; + aic->nr_regs = 0x58 >> 2; + aic->reg_offset = 0x100; + aic->reg_ops = &aspeed_intcio_ops; + aic->irq_table = aspeed_2700_intcio_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700_intcio_irqs); +} + +static const TypeInfo aspeed_2700_intcio_info = { + .name = TYPE_ASPEED_2700_INTCIO, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700_intcio_class_init, +}; + +static AspeedINTCIRQ aspeed_2700ssp_intc_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 10, R_SSPINT160_169_EN, R_SSPINT160_169_STATUS}, + {1, 10, 1, R_SSPINT128_EN, R_SSPINT128_STATUS}, + {2, 11, 1, R_SSPINT129_EN, R_SSPINT129_STATUS}, + {3, 12, 1, R_SSPINT130_EN, R_SSPINT130_STATUS}, + {4, 13, 1, R_SSPINT131_EN, R_SSPINT131_STATUS}, + {5, 14, 1, R_SSPINT132_EN, R_SSPINT132_STATUS}, + {6, 15, 1, R_SSPINT133_EN, R_SSPINT133_STATUS}, + {7, 16, 1, R_SSPINT134_EN, R_SSPINT134_STATUS}, + {8, 17, 1, R_SSPINT135_EN, R_SSPINT135_STATUS}, + {9, 18, 1, R_SSPINT136_EN, R_SSPINT136_STATUS}, +}; + +static void aspeed_2700ssp_intc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 SSP INTC Controller"; + aic->num_lines = 32; + aic->num_inpins = 10; + aic->num_outpins = 19; + aic->mem_size = 0x4000; + aic->nr_regs = 0x2B08 >> 2; + aic->reg_offset = 0x0; + aic->reg_ops = &aspeed_ssp_intc_ops; + aic->irq_table = aspeed_2700ssp_intc_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intc_irqs); +} + +static const TypeInfo aspeed_2700ssp_intc_info = { + .name = TYPE_ASPEED_2700SSP_INTC, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700ssp_intc_class_init, +}; + +static AspeedINTCIRQ aspeed_2700ssp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 1, R_SSPINT160_EN, R_SSPINT160_STATUS}, + {1, 1, 1, R_SSPINT161_EN, R_SSPINT161_STATUS}, + {2, 2, 1, R_SSPINT162_EN, R_SSPINT162_STATUS}, + {3, 3, 1, R_SSPINT163_EN, R_SSPINT163_STATUS}, + {4, 4, 1, R_SSPINT164_EN, R_SSPINT164_STATUS}, + {5, 5, 1, R_SSPINT165_EN, R_SSPINT165_STATUS}, +}; + +static void aspeed_2700ssp_intcio_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 SSP INTC IO Controller"; + aic->num_lines = 32; + aic->num_inpins = 6; + aic->num_outpins = 6; + aic->mem_size = 0x400; + aic->nr_regs = 0x1d8 >> 2; + aic->reg_offset = 0; + aic->reg_ops = &aspeed_ssp_intcio_ops; + aic->irq_table = aspeed_2700ssp_intcio_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700ssp_intcio_irqs); +} + +static const TypeInfo aspeed_2700ssp_intcio_info = { + .name = TYPE_ASPEED_2700SSP_INTCIO, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700ssp_intcio_class_init, +}; + +static AspeedINTCIRQ aspeed_2700tsp_intc_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 10, R_TSPINT160_169_EN, R_TSPINT160_169_STATUS}, + {1, 10, 1, R_TSPINT128_EN, R_TSPINT128_STATUS}, + {2, 11, 1, R_TSPINT129_EN, R_TSPINT129_STATUS}, + {3, 12, 1, R_TSPINT130_EN, R_TSPINT130_STATUS}, + {4, 13, 1, R_TSPINT131_EN, R_TSPINT131_STATUS}, + {5, 14, 1, R_TSPINT132_EN, R_TSPINT132_STATUS}, + {6, 15, 1, R_TSPINT133_EN, R_TSPINT133_STATUS}, + {7, 16, 1, R_TSPINT134_EN, R_TSPINT134_STATUS}, + {8, 17, 1, R_TSPINT135_EN, R_TSPINT135_STATUS}, + {9, 18, 1, R_TSPINT136_EN, R_TSPINT136_STATUS}, +}; + +static void aspeed_2700tsp_intc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 TSP INTC Controller"; + aic->num_lines = 32; + aic->num_inpins = 10; + aic->num_outpins = 19; + aic->mem_size = 0x4000; + aic->nr_regs = 0x3B08 >> 2; + aic->reg_offset = 0; + aic->reg_ops = &aspeed_tsp_intc_ops; + aic->irq_table = aspeed_2700tsp_intc_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intc_irqs); +} + +static const TypeInfo aspeed_2700tsp_intc_info = { + .name = TYPE_ASPEED_2700TSP_INTC, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700tsp_intc_class_init, +}; + +static AspeedINTCIRQ aspeed_2700tsp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { + {0, 0, 1, R_TSPINT160_EN, R_TSPINT160_STATUS}, + {1, 1, 1, R_TSPINT161_EN, R_TSPINT161_STATUS}, + {2, 2, 1, R_TSPINT162_EN, R_TSPINT162_STATUS}, + {3, 3, 1, R_TSPINT163_EN, R_TSPINT163_STATUS}, + {4, 4, 1, R_TSPINT164_EN, R_TSPINT164_STATUS}, + {5, 5, 1, R_TSPINT165_EN, R_TSPINT165_STATUS}, +}; + +static void aspeed_2700tsp_intcio_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); + + dc->desc = "ASPEED 2700 TSP INTC IO Controller"; + aic->num_lines = 32; + aic->num_inpins = 6; + aic->num_outpins = 6; + aic->mem_size = 0x400; + aic->nr_regs = 0x258 >> 2; + aic->reg_offset = 0x0; + aic->reg_ops = &aspeed_tsp_intcio_ops; + aic->irq_table = aspeed_2700tsp_intcio_irqs; + aic->irq_table_count = ARRAY_SIZE(aspeed_2700tsp_intcio_irqs); +} + +static const TypeInfo aspeed_2700tsp_intcio_info = { + .name = TYPE_ASPEED_2700TSP_INTCIO, + .parent = TYPE_ASPEED_INTC, + .class_init = aspeed_2700tsp_intcio_class_init, +}; + static void aspeed_intc_register_types(void) { type_register_static(&aspeed_intc_info); type_register_static(&aspeed_2700_intc_info); + type_register_static(&aspeed_2700_intcio_info); + type_register_static(&aspeed_2700ssp_intc_info); + type_register_static(&aspeed_2700ssp_intcio_info); + type_register_static(&aspeed_2700tsp_intc_info); + type_register_static(&aspeed_2700tsp_intcio_info); } type_init(aspeed_intc_register_types); diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c index ba1d953c2cf..7120088454c 100644 --- a/hw/intc/aspeed_vic.c +++ b/hw/intc/aspeed_vic.c @@ -339,11 +339,11 @@ static const VMStateDescription vmstate_aspeed_vic = { } }; -static void aspeed_vic_class_init(ObjectClass *klass, void *data) +static void aspeed_vic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_vic_realize; - dc->reset = aspeed_vic_reset; + device_class_set_legacy_reset(dc, aspeed_vic_reset); dc->desc = "ASPEED Interrupt Controller (New)"; dc->vmsd = &vmstate_aspeed_vic; } diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c index 2c2e2b1822c..55e0a5a503b 100644 --- a/hw/intc/bcm2835_ic.c +++ b/hw/intc/bcm2835_ic.c @@ -219,11 +219,11 @@ static const VMStateDescription vmstate_bcm2835_ic = { } }; -static void bcm2835_ic_class_init(ObjectClass *klass, void *data) +static void bcm2835_ic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_ic_reset; + device_class_set_legacy_reset(dc, bcm2835_ic_reset); dc->vmsd = &vmstate_bcm2835_ic; } diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index 81faf032b0e..1c028536696 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -384,11 +384,11 @@ static const VMStateDescription vmstate_bcm2836_control = { } }; -static void bcm2836_control_class_init(ObjectClass *klass, void *data) +static void bcm2836_control_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2836_control_reset; + device_class_set_legacy_reset(dc, bcm2836_control_reset); dc->vmsd = &vmstate_bcm2836_control; } diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c deleted file mode 100644 index bd37d1cca04..00000000000 --- a/hw/intc/etraxfs_pic.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * QEMU ETRAX Interrupt Controller. - * - * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "hw/sysbus.h" -#include "qemu/module.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "qom/object.h" - -#define D(x) - -#define R_RW_MASK 0 -#define R_R_VECT 1 -#define R_R_MASKED_VECT 2 -#define R_R_NMI 3 -#define R_R_GURU 4 -#define R_MAX 5 - -#define TYPE_ETRAX_FS_PIC "etraxfs-pic" -DECLARE_INSTANCE_CHECKER(struct etrax_pic, ETRAX_FS_PIC, - TYPE_ETRAX_FS_PIC) - -struct etrax_pic -{ - SysBusDevice parent_obj; - - MemoryRegion mmio; - qemu_irq parent_irq; - qemu_irq parent_nmi; - uint32_t regs[R_MAX]; -}; - -static void pic_update(struct etrax_pic *fs) -{ - uint32_t vector = 0; - int i; - - fs->regs[R_R_MASKED_VECT] = fs->regs[R_R_VECT] & fs->regs[R_RW_MASK]; - - /* The ETRAX interrupt controller signals interrupts to the core - through an interrupt request wire and an irq vector bus. If - multiple interrupts are simultaneously active it chooses vector - 0x30 and lets the sw choose the priorities. */ - if (fs->regs[R_R_MASKED_VECT]) { - uint32_t mv = fs->regs[R_R_MASKED_VECT]; - for (i = 0; i < 31; i++) { - if (mv & 1) { - vector = 0x31 + i; - /* Check for multiple interrupts. */ - if (mv > 1) - vector = 0x30; - break; - } - mv >>= 1; - } - } - - qemu_set_irq(fs->parent_irq, vector); -} - -static uint64_t -pic_read(void *opaque, hwaddr addr, unsigned int size) -{ - struct etrax_pic *fs = opaque; - uint32_t rval; - - rval = fs->regs[addr >> 2]; - D(printf("%s %x=%x\n", __func__, addr, rval)); - return rval; -} - -static void pic_write(void *opaque, hwaddr addr, - uint64_t value, unsigned int size) -{ - struct etrax_pic *fs = opaque; - D(printf("%s addr=%x val=%x\n", __func__, addr, value)); - - if (addr == R_RW_MASK) { - fs->regs[R_RW_MASK] = value; - pic_update(fs); - } -} - -static const MemoryRegionOps pic_ops = { - .read = pic_read, - .write = pic_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static void nmi_handler(void *opaque, int irq, int level) -{ - struct etrax_pic *fs = (void *)opaque; - uint32_t mask; - - mask = 1 << irq; - if (level) - fs->regs[R_R_NMI] |= mask; - else - fs->regs[R_R_NMI] &= ~mask; - - qemu_set_irq(fs->parent_nmi, !!fs->regs[R_R_NMI]); -} - -static void irq_handler(void *opaque, int irq, int level) -{ - struct etrax_pic *fs = (void *)opaque; - - if (irq >= 30) { - nmi_handler(opaque, irq, level); - return; - } - - irq -= 1; - fs->regs[R_R_VECT] &= ~(1 << irq); - fs->regs[R_R_VECT] |= (!!level << irq); - pic_update(fs); -} - -static void etraxfs_pic_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - struct etrax_pic *s = ETRAX_FS_PIC(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - qdev_init_gpio_in(dev, irq_handler, 32); - sysbus_init_irq(sbd, &s->parent_irq); - sysbus_init_irq(sbd, &s->parent_nmi); - - memory_region_init_io(&s->mmio, obj, &pic_ops, s, - "etraxfs-pic", R_MAX * 4); - sysbus_init_mmio(sbd, &s->mmio); -} - -static const TypeInfo etraxfs_pic_info = { - .name = TYPE_ETRAX_FS_PIC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(struct etrax_pic), - .instance_init = etraxfs_pic_init, -}; - -static void etraxfs_pic_register_types(void) -{ - type_register_static(&etraxfs_pic_info); -} - -type_init(etraxfs_pic_register_types) diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c index f0d310a0ebc..ebbe23436f4 100644 --- a/hw/intc/exynos4210_combiner.c +++ b/hw/intc/exynos4210_combiner.c @@ -325,16 +325,15 @@ static void exynos4210_combiner_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property exynos4210_combiner_properties[] = { +static const Property exynos4210_combiner_properties[] = { DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void exynos4210_combiner_class_init(ObjectClass *klass, void *data) +static void exynos4210_combiner_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_combiner_reset; + device_class_set_legacy_reset(dc, exynos4210_combiner_reset); device_class_set_props(dc, exynos4210_combiner_properties); dc->vmsd = &vmstate_exynos4210_combiner; } diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c index fcca85c6c69..7e2d79d00cd 100644 --- a/hw/intc/exynos4210_gic.c +++ b/hw/intc/exynos4210_gic.c @@ -111,12 +111,11 @@ static void exynos4210_gic_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->dist_container); } -static Property exynos4210_gic_properties[] = { +static const Property exynos4210_gic_properties[] = { DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void exynos4210_gic_class_init(ObjectClass *klass, void *data) +static void exynos4210_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index 6cc1c69d267..2359861785d 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -181,17 +181,16 @@ static void goldfish_pic_instance_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), goldfish_irq_request, GOLDFISH_PIC_IRQ_NB); } -static Property goldfish_pic_properties[] = { +static const Property goldfish_pic_properties[] = { DEFINE_PROP_UINT8("index", GoldfishPICState, idx, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void goldfish_pic_class_init(ObjectClass *oc, void *data) +static void goldfish_pic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc); - dc->reset = goldfish_pic_reset; + device_class_set_legacy_reset(dc, goldfish_pic_reset); dc->realize = goldfish_pic_realize; dc->vmsd = &vmstate_goldfish_pic; ic->get_statistics = goldfish_pic_get_statistics; @@ -205,7 +204,7 @@ static const TypeInfo goldfish_pic_info = { .class_init = goldfish_pic_class_init, .instance_init = goldfish_pic_instance_init, .instance_size = sizeof(GoldfishPICState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c index c6c51a349cc..e0f26466ba3 100644 --- a/hw/intc/grlib_irqmp.c +++ b/hw/intc/grlib_irqmp.c @@ -376,17 +376,16 @@ static void grlib_irqmp_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &irqmp->iomem); } -static Property grlib_irqmp_properties[] = { +static const Property grlib_irqmp_properties[] = { DEFINE_PROP_UINT32("ncpus", IRQMP, ncpus, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void grlib_irqmp_class_init(ObjectClass *klass, void *data) +static void grlib_irqmp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = grlib_irqmp_realize; - dc->reset = grlib_irqmp_reset; + device_class_set_legacy_reset(dc, grlib_irqmp_reset); device_class_set_props(dc, grlib_irqmp_properties); } diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c index c2946ba1ad5..447e8c25d8d 100644 --- a/hw/intc/heathrow_pic.c +++ b/hw/intc/heathrow_pic.c @@ -184,11 +184,11 @@ static void heathrow_init(Object *obj) sysbus_init_mmio(sbd, &s->mem); } -static void heathrow_class_init(ObjectClass *oc, void *data) +static void heathrow_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - dc->reset = heathrow_reset; + device_class_set_legacy_reset(dc, heathrow_reset); dc->vmsd = &vmstate_heathrow; set_bit(DEVICE_CATEGORY_MISC, dc->categories); } diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c index bbae2d87f4b..b6f96bf208c 100644 --- a/hw/intc/i8259.c +++ b/hw/intc/i8259.c @@ -32,10 +32,7 @@ #include "trace.h" #include "qom/object.h" -/* debug PIC */ -//#define DEBUG_PIC - -//#define DEBUG_IRQ_LATENCY +/*#define DEBUG_IRQ_LATENCY*/ #define TYPE_I8259 "isa-i8259" typedef struct PICClass PICClass; @@ -436,13 +433,13 @@ qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq_in) return irq_set; } -static void i8259_class_init(ObjectClass *klass, void *data) +static void i8259_class_init(ObjectClass *klass, const void *data) { PICClass *k = PIC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_parent_realize(dc, pic_realize, &k->parent_realize); - dc->reset = pic_reset; + device_class_set_legacy_reset(dc, pic_reset); } static const TypeInfo i8259_info = { diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index d9558e39404..602e44c8eaf 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -193,15 +193,14 @@ static const VMStateDescription vmstate_pic_common = { } }; -static Property pic_properties_common[] = { +static const Property pic_properties_common[] = { DEFINE_PROP_UINT32("iobase", PICCommonState, iobase, -1), DEFINE_PROP_UINT32("elcr_addr", PICCommonState, elcr_addr, -1), DEFINE_PROP_UINT8("elcr_mask", PICCommonState, elcr_mask, -1), DEFINE_PROP_BIT("master", PICCommonState, master, 0, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pic_common_class_init(ObjectClass *klass, void *data) +static void pic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); @@ -227,7 +226,7 @@ static const TypeInfo pic_common_type = { .class_size = sizeof(PICCommonClass), .class_init = pic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c index aedc708bed4..09c3bfac0b6 100644 --- a/hw/intc/imx_avic.c +++ b/hw/intc/imx_avic.c @@ -341,12 +341,12 @@ static void imx_avic_init(Object *obj) } -static void imx_avic_class_init(ObjectClass *klass, void *data) +static void imx_avic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_imx_avic; - dc->reset = imx_avic_reset; + device_class_set_legacy_reset(dc, imx_avic_reset); dc->desc = "i.MX Advanced Vector Interrupt Controller"; } diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c index af45e5194c4..58d286c1cfc 100644 --- a/hw/intc/imx_gpcv2.c +++ b/hw/intc/imx_gpcv2.c @@ -102,11 +102,11 @@ static const VMStateDescription vmstate_imx_gpcv2 = { }, }; -static void imx_gpcv2_class_init(ObjectClass *klass, void *data) +static void imx_gpcv2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = imx_gpcv2_reset; + device_class_set_legacy_reset(dc, imx_gpcv2_reset); dc->vmsd = &vmstate_imx_gpcv2; dc->desc = "i.MX GPCv2 Module"; } diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 716ffc8bbbd..133bef852d1 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -30,8 +30,8 @@ #include "hw/intc/ioapic_internal.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" -#include "sysemu/sysemu.h" +#include "system/kvm.h" +#include "system/system.h" #include "hw/i386/apic-msidef.h" #include "hw/i386/x86-iommu.h" #include "trace.h" @@ -476,12 +476,11 @@ static void ioapic_unrealize(DeviceState *dev) timer_free(s->delayed_ioapic_service_timer); } -static Property ioapic_properties[] = { +static const Property ioapic_properties[] = { DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF), - DEFINE_PROP_END_OF_LIST(), }; -static void ioapic_class_init(ObjectClass *klass, void *data) +static void ioapic_class_init(ObjectClass *klass, const void *data) { IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -493,7 +492,7 @@ static void ioapic_class_init(ObjectClass *klass, void *data) * migration, otherwise first 24 gsi routes will be invalid. */ k->post_load = ioapic_update_kvm_routes; - dc->reset = ioapic_reset_common; + device_class_set_legacy_reset(dc, ioapic_reset_common); device_class_set_props(dc, ioapic_properties); } diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index 769896353a4..fce3486e519 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -197,7 +197,7 @@ static const VMStateDescription vmstate_ioapic_common = { } }; -static void ioapic_common_class_init(ObjectClass *klass, void *data) +static void ioapic_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); @@ -215,7 +215,7 @@ static const TypeInfo ioapic_common_type = { .class_size = sizeof(IOAPICCommonClass), .class_init = ioapic_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/ioapic_internal.h b/hw/intc/ioapic_internal.h index 37b8565539b..51205767f44 100644 --- a/hw/intc/ioapic_internal.h +++ b/hw/intc/ioapic_internal.h @@ -22,7 +22,7 @@ #ifndef HW_INTC_IOAPIC_INTERNAL_H #define HW_INTC_IOAPIC_INTERNAL_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/intc/ioapic.h" #include "hw/sysbus.h" #include "qemu/notify.h" diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 1e8e0114dc1..3e9c88d1d9c 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -10,16 +10,31 @@ #include "qemu/log.h" #include "qapi/error.h" #include "hw/irq.h" -#include "hw/sysbus.h" #include "hw/loongarch/virt.h" -#include "hw/qdev-properties.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/kvm.h" #include "hw/intc/loongarch_extioi.h" -#include "migration/vmstate.h" #include "trace.h" +static int extioi_get_index_from_archid(LoongArchExtIOICommonState *s, + uint64_t arch_id) +{ + int i; + + for (i = 0; i < s->num_cpu; i++) { + if (s->cpu[i].arch_id == arch_id) { + break; + } + } -static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) + if ((i < s->num_cpu) && s->cpu[i].cpu) { + return i; + } + + return -1; +} + +static void extioi_update_irq(LoongArchExtIOICommonState *s, int irq, int level) { int ipnum, cpu, found, irq_index, irq_mask; @@ -54,17 +69,12 @@ static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) static void extioi_setirq(void *opaque, int irq, int level) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); trace_loongarch_extioi_setirq(irq, level); if (level) { - /* - * s->isr should be used in vmstate structure, - * but it not support 'unsigned long', - * so we have to switch it. - */ - set_bit(irq, (unsigned long *)s->isr); + set_bit32(irq, s->isr); } else { - clear_bit(irq, (unsigned long *)s->isr); + clear_bit32(irq, s->isr); } extioi_update_irq(s, irq, level); } @@ -72,7 +82,7 @@ static void extioi_setirq(void *opaque, int irq, int level) static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, unsigned size, MemTxAttrs attrs) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); unsigned long offset = addr & 0xffff; uint32_t index, cpu; @@ -111,7 +121,7 @@ static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, return MEMTX_OK; } -static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ +static inline void extioi_enable_irq(LoongArchExtIOICommonState *s, int index,\ uint32_t mask, int level) { uint32_t val; @@ -130,10 +140,10 @@ static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ } } -static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, - uint64_t val, bool notify) +static inline void extioi_update_sw_coremap(LoongArchExtIOICommonState *s, + int irq, uint64_t val, bool notify) { - int i, cpu; + int i, cpu, cpuid; /* * loongarch only support little endian, @@ -142,19 +152,24 @@ static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, val = cpu_to_le64(val); for (i = 0; i < 4; i++) { - cpu = val & 0xff; + cpuid = val & 0xff; val = val >> 8; if (!(s->status & BIT(EXTIOI_ENABLE_CPU_ENCODE))) { - cpu = ctz32(cpu); - cpu = (cpu >= 4) ? 0 : cpu; + cpuid = ctz32(cpuid); + cpuid = (cpuid >= 4) ? 0 : cpuid; + } + + cpu = extioi_get_index_from_archid(s, cpuid); + if (cpu < 0) { + continue; } if (s->sw_coremap[irq + i] == cpu) { continue; } - if (notify && test_bit(irq + i, (unsigned long *)s->isr)) { + if (notify && test_bit32(irq + i, s->isr)) { /* * lower irq at old cpu and raise irq at new cpu */ @@ -167,8 +182,8 @@ static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, } } -static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index, - uint64_t val) +static inline void extioi_update_sw_ipmap(LoongArchExtIOICommonState *s, + int index, uint64_t val) { int i; uint8_t ipnum; @@ -191,7 +206,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); int cpu, index, old_data, irq; uint32_t offset; @@ -271,7 +286,7 @@ static const MemoryRegionOps extioi_ops = { static MemTxResult extioi_virt_readw(void *opaque, hwaddr addr, uint64_t *data, unsigned size, MemTxAttrs attrs) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); switch (addr) { case EXTIOI_VIRT_FEATURES: @@ -291,7 +306,7 @@ static MemTxResult extioi_virt_writew(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); switch (addr) { case EXTIOI_VIRT_FEATURES: @@ -325,65 +340,74 @@ static const MemoryRegionOps extioi_virt_ops = { static void loongarch_extioi_realize(DeviceState *dev, Error **errp) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); + LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - int i, pin; + Error *local_err = NULL; + int i; - if (s->num_cpu == 0) { - error_setg(errp, "num-cpu must be at least 1"); + lec->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } - for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(sbd, &s->irq[i]); - } - - qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); - memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, - s, "extioi_system_mem", 0x900); - sysbus_init_mmio(sbd, &s->extioi_system_mem); - if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { - memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops, - s, "extioi_virt", EXTIOI_VIRT_SIZE); - sysbus_init_mmio(sbd, &s->virt_extend); s->features |= EXTIOI_VIRT_HAS_FEATURES; } else { s->status |= BIT(EXTIOI_ENABLE); } - s->cpu = g_new0(ExtIOICore, s->num_cpu); - if (s->cpu == NULL) { - error_setg(errp, "Memory allocation for ExtIOICore faile"); - return; - } + if (kvm_irqchip_in_kernel()) { + kvm_extioi_realize(dev, errp); + } else { + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } - for (i = 0; i < s->num_cpu; i++) { - for (pin = 0; pin < LS3A_INTC_IP; pin++) { - qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1); + qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(sbd, &s->extioi_system_mem); + if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { + memory_region_init_io(&s->virt_extend, OBJECT(s), &extioi_virt_ops, + s, "extioi_virt", EXTIOI_VIRT_SIZE); + sysbus_init_mmio(sbd, &s->virt_extend); } } } -static void loongarch_extioi_finalize(Object *obj) +static void loongarch_extioi_reset_hold(Object *obj, ResetType type) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); + LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(obj); + + if (lec->parent_phases.hold) { + lec->parent_phases.hold(obj, type); + } - g_free(s->cpu); + if (kvm_irqchip_in_kernel()) { + kvm_extioi_put(obj, 0); + } } -static void loongarch_extioi_reset(DeviceState *d) +static int vmstate_extioi_pre_save(void *opaque) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(d); + if (kvm_irqchip_in_kernel()) { + return kvm_extioi_get(opaque); + } - s->status = 0; + return 0; } static int vmstate_extioi_post_load(void *opaque, int version_id) { - LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(opaque); int i, start_irq; + if (kvm_irqchip_in_kernel()) { + return kvm_extioi_put(opaque, version_id); + } + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { start_irq = i * 4; extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); @@ -396,66 +420,29 @@ static int vmstate_extioi_post_load(void *opaque, int version_id) return 0; } -static const VMStateDescription vmstate_extioi_core = { - .name = "extioi-core", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_END_OF_LIST() - } -}; - -static const VMStateDescription vmstate_loongarch_extioi = { - .name = TYPE_LOONGARCH_EXTIOI, - .version_id = 3, - .minimum_version_id = 3, - .post_load = vmstate_extioi_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, - EXTIOI_IRQS_NODETYPE_COUNT / 2), - VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32), - VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32), - VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4), - VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4), - - VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu, - vmstate_extioi_core, ExtIOICore), - VMSTATE_UINT32(features, LoongArchExtIOI), - VMSTATE_UINT32(status, LoongArchExtIOI), - VMSTATE_END_OF_LIST() - } -}; - -static Property extioi_properties[] = { - DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1), - DEFINE_PROP_BIT("has-virtualization-extension", LoongArchExtIOI, features, - EXTIOI_HAS_VIRT_EXTENSION, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void loongarch_extioi_class_init(ObjectClass *klass, void *data) +static void loongarch_extioi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = loongarch_extioi_realize; - dc->reset = loongarch_extioi_reset; - device_class_set_props(dc, extioi_properties); - dc->vmsd = &vmstate_loongarch_extioi; + LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_CLASS(klass); + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + device_class_set_parent_realize(dc, loongarch_extioi_realize, + &lec->parent_realize); + resettable_class_set_parent_phases(rc, NULL, loongarch_extioi_reset_hold, + NULL, &lec->parent_phases); + lecc->pre_save = vmstate_extioi_pre_save; + lecc->post_load = vmstate_extioi_post_load; } -static const TypeInfo loongarch_extioi_info = { - .name = TYPE_LOONGARCH_EXTIOI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(struct LoongArchExtIOI), - .class_init = loongarch_extioi_class_init, - .instance_finalize = loongarch_extioi_finalize, +static const TypeInfo loongarch_extioi_types[] = { + { + .name = TYPE_LOONGARCH_EXTIOI, + .parent = TYPE_LOONGARCH_EXTIOI_COMMON, + .instance_size = sizeof(LoongArchExtIOIState), + .class_size = sizeof(LoongArchExtIOIClass), + .class_init = loongarch_extioi_class_init, + } }; -static void loongarch_extioi_register_types(void) -{ - type_register_static(&loongarch_extioi_info); -} - -type_init(loongarch_extioi_register_types) +DEFINE_TYPES(loongarch_extioi_types) diff --git a/hw/intc/loongarch_extioi_common.c b/hw/intc/loongarch_extioi_common.c new file mode 100644 index 00000000000..ba03383ed19 --- /dev/null +++ b/hw/intc/loongarch_extioi_common.c @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Loongson extioi interrupt controller emulation + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/intc/loongarch_extioi_common.h" +#include "migration/vmstate.h" +#include "target/loongarch/cpu.h" + +static ExtIOICore *loongarch_extioi_get_cpu(LoongArchExtIOICommonState *s, + DeviceState *dev) +{ + CPUClass *k = CPU_GET_CLASS(dev); + uint64_t arch_id = k->get_arch_id(CPU(dev)); + int i; + + for (i = 0; i < s->num_cpu; i++) { + if (s->cpu[i].arch_id == arch_id) { + return &s->cpu[i]; + } + } + + return NULL; +} + +static void loongarch_extioi_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(hotplug_dev); + Object *obj = OBJECT(dev); + ExtIOICore *core; + int pin, index; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch extioi: Invalid %s device type", + object_get_typename(obj)); + return; + } + + core = loongarch_extioi_get_cpu(s, dev); + if (!core) { + return; + } + + core->cpu = CPU(dev); + index = core - s->cpu; + + /* + * connect extioi irq to the cpu irq + * cpu_pin[LS3A_INTC_IP + 2 : 2] <= intc_pin[LS3A_INTC_IP : 0] + */ + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_connect_gpio_out(DEVICE(s), index * LS3A_INTC_IP + pin, + qdev_get_gpio_in(dev, pin + 2)); + } +} + +static void loongarch_extioi_cpu_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(hotplug_dev); + Object *obj = OBJECT(dev); + ExtIOICore *core; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch extioi: Invalid %s device type", + object_get_typename(obj)); + return; + } + + core = loongarch_extioi_get_cpu(s, dev); + if (!core) { + return; + } + + core->cpu = NULL; +} + +static void loongarch_extioi_common_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)dev; + MachineState *machine = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *id_list; + int i, pin; + + assert(mc->possible_cpu_arch_ids); + id_list = mc->possible_cpu_arch_ids(machine); + s->num_cpu = id_list->len; + s->cpu = g_new0(ExtIOICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + s->cpu[i].arch_id = id_list->cpus[i].arch_id; + s->cpu[i].cpu = CPU(id_list->cpus[i].cpu); + + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1); + } + } +} + +static void loongarch_extioi_common_unrealize(DeviceState *dev) +{ + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev); + + g_free(s->cpu); +} + +static void loongarch_extioi_common_reset_hold(Object *obj, ResetType type) +{ + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(obj); + LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(obj); + ExtIOICore *core; + int i; + + if (lecc->parent_phases.hold) { + lecc->parent_phases.hold(obj, type); + } + + /* Clear HW registers for the board */ + memset(s->nodetype, 0, sizeof(s->nodetype)); + memset(s->bounce, 0, sizeof(s->bounce)); + memset(s->isr, 0, sizeof(s->isr)); + memset(s->enable, 0, sizeof(s->enable)); + memset(s->ipmap, 0, sizeof(s->ipmap)); + memset(s->coremap, 0, sizeof(s->coremap)); + memset(s->sw_pending, 0, sizeof(s->sw_pending)); + memset(s->sw_ipmap, 0, sizeof(s->sw_ipmap)); + memset(s->sw_coremap, 0, sizeof(s->sw_coremap)); + + for (i = 0; i < s->num_cpu; i++) { + core = s->cpu + i; + /* EXTIOI with targeted CPU available however not present */ + if (!core->cpu) { + continue; + } + + /* Clear HW registers for CPUs */ + memset(core->coreisr, 0, sizeof(core->coreisr)); + memset(core->sw_isr, 0, sizeof(core->sw_isr)); + } + + s->status = 0; +} + +static int loongarch_extioi_common_pre_save(void *opaque) +{ + LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)opaque; + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(s); + + if (lecc->pre_save) { + return lecc->pre_save(s); + } + + return 0; +} + +static int loongarch_extioi_common_post_load(void *opaque, int version_id) +{ + LoongArchExtIOICommonState *s = (LoongArchExtIOICommonState *)opaque; + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(s); + + if (lecc->post_load) { + return lecc->post_load(s, version_id); + } + + return 0; +} + +static const VMStateDescription vmstate_extioi_core = { + .name = "extioi-core", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongarch_extioi = { + .name = "loongarch.extioi", + .version_id = 3, + .minimum_version_id = 3, + .pre_save = loongarch_extioi_common_pre_save, + .post_load = loongarch_extioi_common_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOICommonState, + EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOICommonState, + EXTIOI_IRQS_NODETYPE_COUNT / 2), + VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOICommonState, + EXTIOI_IRQS / 32), + VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOICommonState, + EXTIOI_IRQS / 32), + VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOICommonState, + EXTIOI_IRQS_IPMAP_SIZE / 4), + VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOICommonState, + EXTIOI_IRQS / 4), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOICommonState, + num_cpu, vmstate_extioi_core, ExtIOICore), + VMSTATE_UINT32(features, LoongArchExtIOICommonState), + VMSTATE_UINT32(status, LoongArchExtIOICommonState), + VMSTATE_END_OF_LIST() + } +}; + +static const Property extioi_properties[] = { + DEFINE_PROP_BIT("has-virtualization-extension", LoongArchExtIOICommonState, + features, EXTIOI_HAS_VIRT_EXTENSION, 0), +}; + +static void loongarch_extioi_common_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + device_class_set_parent_realize(dc, loongarch_extioi_common_realize, + &lecc->parent_realize); + device_class_set_parent_unrealize(dc, loongarch_extioi_common_unrealize, + &lecc->parent_unrealize); + resettable_class_set_parent_phases(rc, NULL, + loongarch_extioi_common_reset_hold, + NULL, &lecc->parent_phases); + device_class_set_props(dc, extioi_properties); + dc->vmsd = &vmstate_loongarch_extioi; + hc->plug = loongarch_extioi_cpu_plug; + hc->unplug = loongarch_extioi_cpu_unplug; +} + +static const TypeInfo loongarch_extioi_common_types[] = { + { + .name = TYPE_LOONGARCH_EXTIOI_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchExtIOICommonState), + .class_size = sizeof(LoongArchExtIOICommonClass), + .class_init = loongarch_extioi_common_class_init, + .interfaces = (const InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, + .abstract = true, + } +}; + +DEFINE_TYPES(loongarch_extioi_common_types) diff --git a/hw/intc/loongarch_extioi_kvm.c b/hw/intc/loongarch_extioi_kvm.c new file mode 100644 index 00000000000..aa2e8c753fb --- /dev/null +++ b/hw/intc/loongarch_extioi_kvm.c @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch EXTIOI interrupt kvm support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/intc/loongarch_extioi.h" +#include "linux/kvm.h" +#include "qapi/error.h" +#include "system/kvm.h" + +static void kvm_extioi_access_reg(int fd, uint64_t addr, void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_extioi_access_sw_state(int fd, uint64_t addr, + void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS, + addr, val, write, &error_abort); +} + +static void kvm_extioi_access_sw_status(void *opaque, bool write) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int addr; + + addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE; + kvm_extioi_access_sw_state(les->dev_fd, addr, &lecs->status, write); +} + +static void kvm_extioi_access_regs(void *opaque, bool write) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(opaque); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int fd = les->dev_fd; + int addr, offset, cpu; + + for (addr = EXTIOI_NODETYPE_START; addr < EXTIOI_NODETYPE_END; addr += 4) { + offset = (addr - EXTIOI_NODETYPE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->nodetype[offset], write); + } + + for (addr = EXTIOI_IPMAP_START; addr < EXTIOI_IPMAP_END; addr += 4) { + offset = (addr - EXTIOI_IPMAP_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->ipmap[offset], write); + } + + for (addr = EXTIOI_ENABLE_START; addr < EXTIOI_ENABLE_END; addr += 4) { + offset = (addr - EXTIOI_ENABLE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->enable[offset], write); + } + + for (addr = EXTIOI_BOUNCE_START; addr < EXTIOI_BOUNCE_END; addr += 4) { + offset = (addr - EXTIOI_BOUNCE_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->bounce[offset], write); + } + + for (addr = EXTIOI_ISR_START; addr < EXTIOI_ISR_END; addr += 4) { + offset = (addr - EXTIOI_ISR_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->isr[offset], write); + } + + for (addr = EXTIOI_COREMAP_START; addr < EXTIOI_COREMAP_END; addr += 4) { + offset = (addr - EXTIOI_COREMAP_START) / 4; + kvm_extioi_access_reg(fd, addr, &lecs->coremap[offset], write); + } + + for (cpu = 0; cpu < lecs->num_cpu; cpu++) { + for (addr = EXTIOI_COREISR_START; + addr < EXTIOI_COREISR_END; addr += 4) { + offset = (addr - EXTIOI_COREISR_START) / 4; + kvm_extioi_access_reg(fd, (cpu << 16) | addr, + &lecs->cpu[cpu].coreisr[offset], write); + } + } +} + +int kvm_extioi_get(void *opaque) +{ + kvm_extioi_access_regs(opaque, false); + kvm_extioi_access_sw_status(opaque, false); + return 0; +} + +int kvm_extioi_put(void *opaque, int version_id) +{ + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(opaque); + int fd = les->dev_fd; + + if (fd == 0) { + return 0; + } + + kvm_extioi_access_regs(opaque, true); + kvm_extioi_access_sw_status(opaque, true); + kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED, + NULL, true, &error_abort); + return 0; +} + +void kvm_extioi_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOICommonState *lecs = LOONGARCH_EXTIOI_COMMON(dev); + LoongArchExtIOIState *les = LOONGARCH_EXTIOI(dev); + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_EIOINTC, false); + if (ret < 0) { + fprintf(stderr, "create KVM_LOONGARCH_EIOINTC failed: %s\n", + strerror(-ret)); + abort(); + } + + les->dev_fd = ret; + ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU, + &lecs->num_cpu, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_NUM_CPU failed: %s\n", + strerror(-ret)); + abort(); + } + + ret = kvm_device_access(les->dev_fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, + KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE, + &lecs->features, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_EXTIOI_INIT_FEATURE failed: %s\n", + strerror(-ret)); + abort(); + } +} diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c new file mode 100644 index 00000000000..fc8005c9444 --- /dev/null +++ b/hw/intc/loongarch_ipi.c @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch IPI interrupt support + * + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "qapi/error.h" +#include "hw/intc/loongarch_ipi.h" +#include "hw/qdev-properties.h" +#include "system/kvm.h" +#include "target/loongarch/cpu.h" + +static AddressSpace *get_iocsr_as(CPUState *cpu) +{ + return LOONGARCH_CPU(cpu)->env.address_space_iocsr; +} + +static int loongarch_ipi_cmp(const void *a, const void *b) +{ + IPICore *ipi_a = (IPICore *)a; + IPICore *ipi_b = (IPICore *)b; + + return ipi_a->arch_id - ipi_b->arch_id; +} + +static int loongarch_cpu_by_arch_id(LoongsonIPICommonState *lics, + int64_t arch_id, int *index, CPUState **pcs) +{ + IPICore ipi, *found; + + ipi.arch_id = arch_id; + found = bsearch(&ipi, lics->cpu, lics->num_cpu, sizeof(IPICore), + loongarch_ipi_cmp); + if (found && found->cpu) { + if (index) { + *index = found - lics->cpu; + } + + if (pcs) { + *pcs = found->cpu; + } + + return MEMTX_OK; + } + + return MEMTX_ERROR; +} + +static IPICore *loongarch_ipi_get_cpu(LoongsonIPICommonState *lics, + DeviceState *dev) +{ + CPUClass *k = CPU_GET_CLASS(dev); + uint64_t arch_id = k->get_arch_id(CPU(dev)); + int i; + + for (i = 0; i < lics->num_cpu; i++) { + if (lics->cpu[i].arch_id == arch_id) { + return &lics->cpu[i]; + } + } + + return NULL; +} + +static void loongarch_ipi_realize(DeviceState *dev, Error **errp) +{ + LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(dev); + LoongarchIPIClass *lic = LOONGARCH_IPI_GET_CLASS(dev); + MachineState *machine = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *id_list; + Error *local_err = NULL; + int i; + + lic->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + assert(mc->possible_cpu_arch_ids); + id_list = mc->possible_cpu_arch_ids(machine); + lics->num_cpu = id_list->len; + lics->cpu = g_new0(IPICore, lics->num_cpu); + for (i = 0; i < lics->num_cpu; i++) { + lics->cpu[i].arch_id = id_list->cpus[i].arch_id; + lics->cpu[i].cpu = CPU(id_list->cpus[i].cpu); + lics->cpu[i].ipi = lics; + qdev_init_gpio_out(dev, &lics->cpu[i].irq, 1); + } + + if (kvm_irqchip_in_kernel()) { + kvm_ipi_realize(dev, errp); + } +} + +static void loongarch_ipi_reset_hold(Object *obj, ResetType type) +{ + int i; + LoongarchIPIClass *lic = LOONGARCH_IPI_GET_CLASS(obj); + LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(obj); + IPICore *core; + + if (lic->parent_phases.hold) { + lic->parent_phases.hold(obj, type); + } + + for (i = 0; i < lics->num_cpu; i++) { + core = lics->cpu + i; + /* IPI with targeted CPU available however not present */ + if (!core->cpu) { + continue; + } + + core->status = 0; + core->en = 0; + core->set = 0; + core->clear = 0; + memset(core->buf, 0, sizeof(core->buf)); + } + + if (kvm_irqchip_in_kernel()) { + kvm_ipi_put(obj, 0); + } +} + +static void loongarch_ipi_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(hotplug_dev); + Object *obj = OBJECT(dev); + IPICore *core; + int index; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch extioi: Invalid %s device type", + object_get_typename(obj)); + return; + } + + core = loongarch_ipi_get_cpu(lics, dev); + if (!core) { + return; + } + + core->cpu = CPU(dev); + index = core - lics->cpu; + + /* connect ipi irq to cpu irq */ + qdev_connect_gpio_out(DEVICE(lics), index, qdev_get_gpio_in(dev, IRQ_IPI)); +} + +static void loongarch_ipi_cpu_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongsonIPICommonState *lics = LOONGSON_IPI_COMMON(hotplug_dev); + Object *obj = OBJECT(dev); + IPICore *core; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch extioi: Invalid %s device type", + object_get_typename(obj)); + return; + } + + core = loongarch_ipi_get_cpu(lics, dev); + if (!core) { + return; + } + + core->cpu = NULL; +} + +static int loongarch_ipi_pre_save(void *opaque) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_ipi_get(opaque); + } + + return 0; +} + +static int loongarch_ipi_post_load(void *opaque, int version_id) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_ipi_put(opaque, version_id); + } + + return 0; +} + +static void loongarch_ipi_class_init(ObjectClass *klass, const void *data) +{ + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + LoongarchIPIClass *lic = LOONGARCH_IPI_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_parent_realize(dc, loongarch_ipi_realize, + &lic->parent_realize); + resettable_class_set_parent_phases(rc, NULL, loongarch_ipi_reset_hold, + NULL, &lic->parent_phases); + licc->get_iocsr_as = get_iocsr_as; + licc->cpu_by_arch_id = loongarch_cpu_by_arch_id; + hc->plug = loongarch_ipi_cpu_plug; + hc->unplug = loongarch_ipi_cpu_unplug; + licc->pre_save = loongarch_ipi_pre_save; + licc->post_load = loongarch_ipi_post_load; +} + +static const TypeInfo loongarch_ipi_types[] = { + { + .name = TYPE_LOONGARCH_IPI, + .parent = TYPE_LOONGSON_IPI_COMMON, + .instance_size = sizeof(LoongarchIPIState), + .class_size = sizeof(LoongarchIPIClass), + .class_init = loongarch_ipi_class_init, + .interfaces = (const InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, + } +}; + +DEFINE_TYPES(loongarch_ipi_types) diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c new file mode 100644 index 00000000000..dd4c367abf9 --- /dev/null +++ b/hw/intc/loongarch_ipi_kvm.c @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch IPI interrupt KVM support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/intc/loongarch_ipi.h" +#include "system/kvm.h" +#include "target/loongarch/cpu.h" + +static void kvm_ipi_access_reg(int fd, uint64_t addr, uint32_t *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_IPI_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_ipi_access_regs(void *opaque, bool write) +{ + LoongsonIPICommonState *ipi = (LoongsonIPICommonState *)opaque; + LoongarchIPIState *lis = LOONGARCH_IPI(opaque); + IPICore *core; + uint64_t attr; + int i, cpu_index, fd = lis->dev_fd; + + if (fd == 0) { + return; + } + + for (i = 0; i < ipi->num_cpu; i++) { + core = &ipi->cpu[i]; + if (core->cpu == NULL) { + continue; + } + cpu_index = i; + + attr = (cpu_index << 16) | CORE_STATUS_OFF; + kvm_ipi_access_reg(fd, attr, &core->status, write); + + attr = (cpu_index << 16) | CORE_EN_OFF; + kvm_ipi_access_reg(fd, attr, &core->en, write); + + attr = (cpu_index << 16) | CORE_SET_OFF; + kvm_ipi_access_reg(fd, attr, &core->set, write); + + attr = (cpu_index << 16) | CORE_CLEAR_OFF; + kvm_ipi_access_reg(fd, attr, &core->clear, write); + + attr = (cpu_index << 16) | CORE_BUF_20; + kvm_ipi_access_reg(fd, attr, &core->buf[0], write); + + attr = (cpu_index << 16) | CORE_BUF_28; + kvm_ipi_access_reg(fd, attr, &core->buf[2], write); + + attr = (cpu_index << 16) | CORE_BUF_30; + kvm_ipi_access_reg(fd, attr, &core->buf[4], write); + + attr = (cpu_index << 16) | CORE_BUF_38; + kvm_ipi_access_reg(fd, attr, &core->buf[6], write); + } +} + +int kvm_ipi_get(void *opaque) +{ + kvm_ipi_access_regs(opaque, false); + return 0; +} + +int kvm_ipi_put(void *opaque, int version_id) +{ + kvm_ipi_access_regs(opaque, true); + return 0; +} + +void kvm_ipi_realize(DeviceState *dev, Error **errp) +{ + LoongarchIPIState *lis = LOONGARCH_IPI(dev); + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_IPI, false); + if (ret < 0) { + fprintf(stderr, "IPI KVM_CREATE_DEVICE failed: %s\n", + strerror(-ret)); + abort(); + } + + lis->dev_fd = ret; +} diff --git a/hw/intc/loongarch_pch_msi.c b/hw/intc/loongarch_pch_msi.c index ecf3ed0267e..f6d163158da 100644 --- a/hw/intc/loongarch_pch_msi.c +++ b/hw/intc/loongarch_pch_msi.c @@ -13,6 +13,7 @@ #include "hw/pci/msi.h" #include "hw/misc/unimp.h" #include "migration/vmstate.h" +#include "system/kvm.h" #include "trace.h" static uint64_t loongarch_msi_mem_read(void *opaque, hwaddr addr, unsigned size) @@ -26,6 +27,15 @@ static void loongarch_msi_mem_write(void *opaque, hwaddr addr, LoongArchPCHMSI *s = (LoongArchPCHMSI *)opaque; int irq_num; + if (kvm_irqchip_in_kernel()) { + MSIMessage msg; + + msg.address = addr; + msg.data = val; + kvm_irqchip_send_msi(kvm_state, msg); + return; + } + /* * vector number is irq number from upper extioi intc * need subtract irq base to get msi vector offset @@ -42,13 +52,6 @@ static const MemoryRegionOps loongarch_pch_msi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void pch_msi_irq_handler(void *opaque, int irq, int level) -{ - LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(opaque); - - qemu_set_irq(s->pch_msi_irq[irq], level); -} - static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp) { LoongArchPCHMSI *s = LOONGARCH_PCH_MSI(dev); @@ -59,9 +62,7 @@ static void loongarch_pch_msi_realize(DeviceState *dev, Error **errp) } s->pch_msi_irq = g_new(qemu_irq, s->irq_num); - qdev_init_gpio_out(dev, s->pch_msi_irq, s->irq_num); - qdev_init_gpio_in(dev, pch_msi_irq_handler, s->irq_num); } static void loongarch_pch_msi_unrealize(DeviceState *dev) @@ -83,13 +84,12 @@ static void loongarch_pch_msi_init(Object *obj) } -static Property loongarch_msi_properties[] = { +static const Property loongarch_msi_properties[] = { DEFINE_PROP_UINT32("msi_irq_base", LoongArchPCHMSI, irq_base, 0), DEFINE_PROP_UINT32("msi_irq_num", LoongArchPCHMSI, irq_num, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void loongarch_pch_msi_class_init(ObjectClass *klass, void *data) +static void loongarch_pch_msi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index 2d5e65abfff..c4b242dbf41 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -7,17 +7,15 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" -#include "hw/sysbus.h" -#include "hw/loongarch/virt.h" -#include "hw/pci-host/ls7a.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/intc/loongarch_pch_pic.h" -#include "hw/qdev-properties.h" -#include "migration/vmstate.h" +#include "system/kvm.h" #include "trace.h" #include "qapi/error.h" -static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level) +static void pch_pic_update_irq(LoongArchPICCommonState *s, uint64_t mask, + int level) { uint64_t val; int irq; @@ -45,12 +43,17 @@ static void pch_pic_update_irq(LoongArchPCHPIC *s, uint64_t mask, int level) static void pch_pic_irq_handler(void *opaque, int irq, int level) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); uint64_t mask = 1ULL << irq; assert(irq < s->irq_num); trace_loongarch_pch_pic_irq_handler(irq, level); + if (kvm_irqchip_in_kernel()) { + kvm_set_irq(kvm_state, irq, !!level); + return; + } + if (s->intedge & mask) { /* Edge triggered */ if (level) { @@ -75,389 +78,266 @@ static void pch_pic_irq_handler(void *opaque, int irq, int level) pch_pic_update_irq(s, mask, level); } -static uint64_t loongarch_pch_pic_low_readw(void *opaque, hwaddr addr, - unsigned size) +static uint64_t pch_pic_read(void *opaque, hwaddr addr, uint64_t field_mask) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); uint64_t val = 0; - uint32_t offset = addr & 0xfff; + uint32_t offset; - switch (offset) { - case PCH_PIC_INT_ID_LO: - val = PCH_PIC_INT_ID_VAL; + offset = addr & 7; + addr -= offset; + switch (addr) { + case PCH_PIC_INT_ID: + val = cpu_to_le64(s->id.data); break; - case PCH_PIC_INT_ID_HI: - /* - * With 7A1000 manual - * bit 0-15 pch irqchip version - * bit 16-31 irq number supported with pch irqchip - */ - val = deposit32(PCH_PIC_INT_ID_VER, 16, 16, s->irq_num - 1); + case PCH_PIC_INT_MASK: + val = s->int_mask; break; - case PCH_PIC_INT_MASK_LO: - val = (uint32_t)s->int_mask; + case PCH_PIC_INT_EDGE: + val = s->intedge; break; - case PCH_PIC_INT_MASK_HI: - val = s->int_mask >> 32; + case PCH_PIC_HTMSI_EN: + val = s->htmsi_en; break; - case PCH_PIC_INT_EDGE_LO: - val = (uint32_t)s->intedge; + case PCH_PIC_AUTO_CTRL0: + case PCH_PIC_AUTO_CTRL1: + /* PCH PIC connect to EXTIOI always, discard auto_ctrl access */ break; - case PCH_PIC_INT_EDGE_HI: - val = s->intedge >> 32; + case PCH_PIC_INT_STATUS: + val = s->intisr & (~s->int_mask); break; - case PCH_PIC_HTMSI_EN_LO: - val = (uint32_t)s->htmsi_en; + case PCH_PIC_INT_POL: + val = s->int_polarity; break; - case PCH_PIC_HTMSI_EN_HI: - val = s->htmsi_en >> 32; + case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: + val = *(uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); break; - case PCH_PIC_AUTO_CTRL0_LO: - case PCH_PIC_AUTO_CTRL0_HI: - case PCH_PIC_AUTO_CTRL1_LO: - case PCH_PIC_AUTO_CTRL1_HI: + case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: + val = *(uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "pch_pic_read: Bad address 0x%"PRIx64"\n", addr); break; } - trace_loongarch_pch_pic_low_readw(size, addr, val); - return val; + return (val >> (offset * 8)) & field_mask; } -static uint64_t get_writew_val(uint64_t value, uint32_t target, bool hi) +static void pch_pic_write(void *opaque, hwaddr addr, uint64_t value, + uint64_t field_mask) { - uint64_t mask = 0xffffffff00000000; - uint64_t data = target; - - return hi ? (value & ~mask) | (data << 32) : (value & mask) | data; -} - -static void loongarch_pch_pic_low_writew(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); - uint32_t offset, old_valid, data = (uint32_t)value; - uint64_t old, int_mask; - offset = addr & 0xfff; - - trace_loongarch_pch_pic_low_writew(size, addr, data); - - switch (offset) { - case PCH_PIC_INT_MASK_LO: - old = s->int_mask; - s->int_mask = get_writew_val(old, data, 0); - old_valid = (uint32_t)old; - if (old_valid & ~data) { - pch_pic_update_irq(s, (old_valid & ~data), 1); - } - if (~old_valid & data) { - pch_pic_update_irq(s, (~old_valid & data), 0); - } - break; - case PCH_PIC_INT_MASK_HI: + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); + uint32_t offset; + uint64_t old, mask, data, *ptemp; + + offset = addr & 7; + addr -= offset; + mask = field_mask << (offset * 8); + data = (value & field_mask) << (offset * 8); + switch (addr) { + case PCH_PIC_INT_MASK: old = s->int_mask; - s->int_mask = get_writew_val(old, data, 1); - old_valid = (uint32_t)(old >> 32); - int_mask = old_valid & ~data; - if (int_mask) { - pch_pic_update_irq(s, int_mask << 32, 1); + s->int_mask = (old & ~mask) | data; + if (old & ~data) { + pch_pic_update_irq(s, old & ~data, 1); } - int_mask = ~old_valid & data; - if (int_mask) { - pch_pic_update_irq(s, int_mask << 32, 0); + + if (~old & data) { + pch_pic_update_irq(s, ~old & data, 0); } break; - case PCH_PIC_INT_EDGE_LO: - s->intedge = get_writew_val(s->intedge, data, 0); - break; - case PCH_PIC_INT_EDGE_HI: - s->intedge = get_writew_val(s->intedge, data, 1); + case PCH_PIC_INT_EDGE: + s->intedge = (s->intedge & ~mask) | data; break; - case PCH_PIC_INT_CLEAR_LO: + case PCH_PIC_INT_CLEAR: if (s->intedge & data) { - s->intirr &= (~data); + s->intirr &= ~data; pch_pic_update_irq(s, data, 0); - s->intisr &= (~data); + s->intisr &= ~data; } break; - case PCH_PIC_INT_CLEAR_HI: - value <<= 32; - if (s->intedge & value) { - s->intirr &= (~value); - pch_pic_update_irq(s, value, 0); - s->intisr &= (~value); - } + case PCH_PIC_HTMSI_EN: + s->htmsi_en = (s->htmsi_en & ~mask) | data; break; - case PCH_PIC_HTMSI_EN_LO: - s->htmsi_en = get_writew_val(s->htmsi_en, data, 0); + case PCH_PIC_AUTO_CTRL0: + case PCH_PIC_AUTO_CTRL1: + /* Discard auto_ctrl access */ break; - case PCH_PIC_HTMSI_EN_HI: - s->htmsi_en = get_writew_val(s->htmsi_en, data, 1); + case PCH_PIC_INT_POL: + s->int_polarity = (s->int_polarity & ~mask) | data; break; - case PCH_PIC_AUTO_CTRL0_LO: - case PCH_PIC_AUTO_CTRL0_HI: - case PCH_PIC_AUTO_CTRL1_LO: - case PCH_PIC_AUTO_CTRL1_HI: + case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: + ptemp = (uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); + *ptemp = (*ptemp & ~mask) | data; + break; + case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: + ptemp = (uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); + *ptemp = (*ptemp & ~mask) | data; break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "pch_pic_write: Bad address 0x%"PRIx64"\n", addr); break; } } -static uint64_t loongarch_pch_pic_high_readw(void *opaque, hwaddr addr, - unsigned size) +static uint64_t loongarch_pch_pic_read(void *opaque, hwaddr addr, + unsigned size) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); uint64_t val = 0; - uint32_t offset = addr & 0xfff; - switch (offset) { - case STATUS_LO_START: - val = (uint32_t)(s->intisr & (~s->int_mask)); + switch (size) { + case 1: + val = pch_pic_read(opaque, addr, UCHAR_MAX); break; - case STATUS_HI_START: - val = (s->intisr & (~s->int_mask)) >> 32; + case 2: + val = pch_pic_read(opaque, addr, USHRT_MAX); break; - case POL_LO_START: - val = (uint32_t)s->int_polarity; + case 4: + val = pch_pic_read(opaque, addr, UINT_MAX); break; - case POL_HI_START: - val = s->int_polarity >> 32; + case 8: + val = pch_pic_read(opaque, addr, UINT64_MAX); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "loongarch_pch_pic_read: Bad size %d\n", size); break; } - trace_loongarch_pch_pic_high_readw(size, addr, val); + trace_loongarch_pch_pic_read(size, addr, val); return val; } -static void loongarch_pch_pic_high_writew(void *opaque, hwaddr addr, - uint64_t value, unsigned size) +static void loongarch_pch_pic_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); - uint32_t offset, data = (uint32_t)value; - offset = addr & 0xfff; - - trace_loongarch_pch_pic_high_writew(size, addr, data); + trace_loongarch_pch_pic_write(size, addr, value); - switch (offset) { - case STATUS_LO_START: - s->intisr = get_writew_val(s->intisr, data, 0); + switch (size) { + case 1: + pch_pic_write(opaque, addr, value, UCHAR_MAX); break; - case STATUS_HI_START: - s->intisr = get_writew_val(s->intisr, data, 1); + case 2: + pch_pic_write(opaque, addr, value, USHRT_MAX); break; - case POL_LO_START: - s->int_polarity = get_writew_val(s->int_polarity, data, 0); break; - case POL_HI_START: - s->int_polarity = get_writew_val(s->int_polarity, data, 1); - break; - default: - break; - } -} - -static uint64_t loongarch_pch_pic_readb(void *opaque, hwaddr addr, - unsigned size) -{ - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); - uint64_t val = 0; - uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; - int64_t offset_tmp; - - switch (offset) { - case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: - offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - val = s->htmsi_vector[offset_tmp]; - } - break; - case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: - offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - val = s->route_entry[offset_tmp]; - } - break; - default: - break; - } - - trace_loongarch_pch_pic_readb(size, addr, val); - return val; -} - -static void loongarch_pch_pic_writeb(void *opaque, hwaddr addr, - uint64_t data, unsigned size) -{ - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(opaque); - int32_t offset_tmp; - uint32_t offset = (addr & 0xfff) + PCH_PIC_ROUTE_ENTRY_OFFSET; - - trace_loongarch_pch_pic_writeb(size, addr, data); - - switch (offset) { - case PCH_PIC_HTMSI_VEC_OFFSET ... PCH_PIC_HTMSI_VEC_END: - offset_tmp = offset - PCH_PIC_HTMSI_VEC_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - s->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff); - } + case 4: + pch_pic_write(opaque, addr, value, UINT_MAX); break; - case PCH_PIC_ROUTE_ENTRY_OFFSET ... PCH_PIC_ROUTE_ENTRY_END: - offset_tmp = offset - PCH_PIC_ROUTE_ENTRY_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { - s->route_entry[offset_tmp] = (uint8_t)(data & 0xff); - } + case 8: + pch_pic_write(opaque, addr, value, UINT64_MAX); break; default: + qemu_log_mask(LOG_GUEST_ERROR, + "loongarch_pch_pic_write: Bad size %d\n", size); break; } } -static const MemoryRegionOps loongarch_pch_pic_reg32_low_ops = { - .read = loongarch_pch_pic_low_readw, - .write = loongarch_pch_pic_low_writew, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -static const MemoryRegionOps loongarch_pch_pic_reg32_high_ops = { - .read = loongarch_pch_pic_high_readw, - .write = loongarch_pch_pic_high_writew, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -static const MemoryRegionOps loongarch_pch_pic_reg8_ops = { - .read = loongarch_pch_pic_readb, - .write = loongarch_pch_pic_writeb, +static const MemoryRegionOps loongarch_pch_pic_ops = { + .read = loongarch_pch_pic_read, + .write = loongarch_pch_pic_write, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 8, + /* + * PCH PIC device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .unaligned = false, }, .impl = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 8, }, .endianness = DEVICE_LITTLE_ENDIAN, }; -static void loongarch_pch_pic_reset(DeviceState *d) +static void loongarch_pic_reset_hold(Object *obj, ResetType type) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(d); - int i; - - s->int_mask = -1; - s->htmsi_en = 0x0; - s->intedge = 0x0; - s->intclr = 0x0; - s->auto_crtl0 = 0x0; - s->auto_crtl1 = 0x0; - for (i = 0; i < 64; i++) { - s->route_entry[i] = 0x1; - s->htmsi_vector[i] = 0x0; + LoongarchPICClass *lpc = LOONGARCH_PIC_GET_CLASS(obj); + + if (lpc->parent_phases.hold) { + lpc->parent_phases.hold(obj, type); + } + + if (kvm_irqchip_in_kernel()) { + kvm_pic_put(obj, 0); } - s->intirr = 0x0; - s->intisr = 0x0; - s->last_intirr = 0x0; - s->int_polarity = 0x0; } -static void loongarch_pch_pic_realize(DeviceState *dev, Error **errp) +static void loongarch_pic_realize(DeviceState *dev, Error **errp) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(dev); - - if (!s->irq_num || s->irq_num > VIRT_PCH_PIC_IRQ_NUM) { - error_setg(errp, "Invalid 'pic_irq_num'"); + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(dev); + LoongarchPICClass *lpc = LOONGARCH_PIC_GET_CLASS(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *local_err = NULL; + + lpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } qdev_init_gpio_out(dev, s->parent_irq, s->irq_num); qdev_init_gpio_in(dev, pch_pic_irq_handler, s->irq_num); + + if (kvm_irqchip_in_kernel()) { + kvm_pic_realize(dev, errp); + } else { + memory_region_init_io(&s->iomem, OBJECT(dev), + &loongarch_pch_pic_ops, + s, TYPE_LOONGARCH_PIC, VIRT_PCH_REG_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + } } -static void loongarch_pch_pic_init(Object *obj) +static int loongarch_pic_pre_save(LoongArchPICCommonState *opaque) { - LoongArchPCHPIC *s = LOONGARCH_PCH_PIC(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - memory_region_init_io(&s->iomem32_low, obj, - &loongarch_pch_pic_reg32_low_ops, - s, PCH_PIC_NAME(.reg32_part1), 0x100); - memory_region_init_io(&s->iomem8, obj, &loongarch_pch_pic_reg8_ops, - s, PCH_PIC_NAME(.reg8), 0x2a0); - memory_region_init_io(&s->iomem32_high, obj, - &loongarch_pch_pic_reg32_high_ops, - s, PCH_PIC_NAME(.reg32_part2), 0xc60); - sysbus_init_mmio(sbd, &s->iomem32_low); - sysbus_init_mmio(sbd, &s->iomem8); - sysbus_init_mmio(sbd, &s->iomem32_high); + if (kvm_irqchip_in_kernel()) { + return kvm_pic_get(opaque); + } + return 0; } -static Property loongarch_pch_pic_properties[] = { - DEFINE_PROP_UINT32("pch_pic_irq_num", LoongArchPCHPIC, irq_num, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static const VMStateDescription vmstate_loongarch_pch_pic = { - .name = TYPE_LOONGARCH_PCH_PIC, - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT64(int_mask, LoongArchPCHPIC), - VMSTATE_UINT64(htmsi_en, LoongArchPCHPIC), - VMSTATE_UINT64(intedge, LoongArchPCHPIC), - VMSTATE_UINT64(intclr, LoongArchPCHPIC), - VMSTATE_UINT64(auto_crtl0, LoongArchPCHPIC), - VMSTATE_UINT64(auto_crtl1, LoongArchPCHPIC), - VMSTATE_UINT8_ARRAY(route_entry, LoongArchPCHPIC, 64), - VMSTATE_UINT8_ARRAY(htmsi_vector, LoongArchPCHPIC, 64), - VMSTATE_UINT64(last_intirr, LoongArchPCHPIC), - VMSTATE_UINT64(intirr, LoongArchPCHPIC), - VMSTATE_UINT64(intisr, LoongArchPCHPIC), - VMSTATE_UINT64(int_polarity, LoongArchPCHPIC), - VMSTATE_END_OF_LIST() +static int loongarch_pic_post_load(LoongArchPICCommonState *opaque, + int version_id) +{ + if (kvm_irqchip_in_kernel()) { + return kvm_pic_put(opaque, version_id); } -}; -static void loongarch_pch_pic_class_init(ObjectClass *klass, void *data) + return 0; +} + +static void loongarch_pic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = loongarch_pch_pic_realize; - dc->reset = loongarch_pch_pic_reset; - dc->vmsd = &vmstate_loongarch_pch_pic; - device_class_set_props(dc, loongarch_pch_pic_properties); + LoongarchPICClass *lpc = LOONGARCH_PIC_CLASS(klass); + LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + resettable_class_set_parent_phases(rc, NULL, loongarch_pic_reset_hold, + NULL, &lpc->parent_phases); + device_class_set_parent_realize(dc, loongarch_pic_realize, + &lpc->parent_realize); + lpcc->pre_save = loongarch_pic_pre_save; + lpcc->post_load = loongarch_pic_post_load; } -static const TypeInfo loongarch_pch_pic_info = { - .name = TYPE_LOONGARCH_PCH_PIC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(LoongArchPCHPIC), - .instance_init = loongarch_pch_pic_init, - .class_init = loongarch_pch_pic_class_init, +static const TypeInfo loongarch_pic_types[] = { + { + .name = TYPE_LOONGARCH_PIC, + .parent = TYPE_LOONGARCH_PIC_COMMON, + .instance_size = sizeof(LoongarchPICState), + .class_size = sizeof(LoongarchPICClass), + .class_init = loongarch_pic_class_init, + } }; -static void loongarch_pch_pic_register_types(void) -{ - type_register_static(&loongarch_pch_pic_info); -} - -type_init(loongarch_pch_pic_register_types) +DEFINE_TYPES(loongarch_pic_types) diff --git a/hw/intc/loongarch_pic_common.c b/hw/intc/loongarch_pic_common.c new file mode 100644 index 00000000000..de170501cf2 --- /dev/null +++ b/hw/intc/loongarch_pic_common.c @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU Loongson 7A1000 I/O interrupt controller. + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/intc/loongarch_pic_common.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +static int loongarch_pic_pre_save(void *opaque) +{ + LoongArchPICCommonState *s = (LoongArchPICCommonState *)opaque; + LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_GET_CLASS(s); + + if (lpcc->pre_save) { + return lpcc->pre_save(s); + } + + return 0; +} + +static int loongarch_pic_post_load(void *opaque, int version_id) +{ + LoongArchPICCommonState *s = (LoongArchPICCommonState *)opaque; + LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_GET_CLASS(s); + + if (lpcc->post_load) { + return lpcc->post_load(s, version_id); + } + + return 0; +} + +static void loongarch_pic_common_realize(DeviceState *dev, Error **errp) +{ + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(dev); + + if (!s->irq_num || s->irq_num > VIRT_PCH_PIC_IRQ_NUM) { + error_setg(errp, "Invalid 'pic_irq_num'"); + return; + } +} + +static void loongarch_pic_common_reset_hold(Object *obj, ResetType type) +{ + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(obj); + int i; + + /* + * With Loongson 7A1000 user manual + * Chapter 5.2 "Description of Interrupt-related Registers" + * + * Interrupt controller identification register 1 + * Bit 24-31 Interrupt Controller ID + * Interrupt controller identification register 2 + * Bit 0-7 Interrupt Controller version number + * Bit 16-23 The number of interrupt sources supported + */ + s->id.desc.id = PCH_PIC_INT_ID_VAL; + s->id.desc.version = PCH_PIC_INT_ID_VER; + s->id.desc.irq_num = s->irq_num - 1; + s->int_mask = UINT64_MAX; + s->htmsi_en = 0x0; + s->intedge = 0x0; + s->intclr = 0x0; + s->auto_crtl0 = 0x0; + s->auto_crtl1 = 0x0; + for (i = 0; i < 64; i++) { + s->route_entry[i] = 0x1; + s->htmsi_vector[i] = 0x0; + } + s->intirr = 0x0; + s->intisr = 0x0; + s->last_intirr = 0x0; + s->int_polarity = 0x0; +} + +static const Property loongarch_pic_common_properties[] = { + DEFINE_PROP_UINT32("pch_pic_irq_num", LoongArchPICCommonState, irq_num, 0), +}; + +static const VMStateDescription vmstate_loongarch_pic_common = { + .name = "loongarch_pch_pic", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = loongarch_pic_pre_save, + .post_load = loongarch_pic_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(int_mask, LoongArchPICCommonState), + VMSTATE_UINT64(htmsi_en, LoongArchPICCommonState), + VMSTATE_UINT64(intedge, LoongArchPICCommonState), + VMSTATE_UINT64(intclr, LoongArchPICCommonState), + VMSTATE_UINT64(auto_crtl0, LoongArchPICCommonState), + VMSTATE_UINT64(auto_crtl1, LoongArchPICCommonState), + VMSTATE_UINT8_ARRAY(route_entry, LoongArchPICCommonState, 64), + VMSTATE_UINT8_ARRAY(htmsi_vector, LoongArchPICCommonState, 64), + VMSTATE_UINT64(last_intirr, LoongArchPICCommonState), + VMSTATE_UINT64(intirr, LoongArchPICCommonState), + VMSTATE_UINT64(intisr, LoongArchPICCommonState), + VMSTATE_UINT64(int_polarity, LoongArchPICCommonState), + VMSTATE_END_OF_LIST() + } +}; + +static void loongarch_pic_common_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LoongArchPICCommonClass *lpcc = LOONGARCH_PIC_COMMON_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + device_class_set_parent_realize(dc, loongarch_pic_common_realize, + &lpcc->parent_realize); + resettable_class_set_parent_phases(rc, NULL, + loongarch_pic_common_reset_hold, + NULL, &lpcc->parent_phases); + device_class_set_props(dc, loongarch_pic_common_properties); + dc->vmsd = &vmstate_loongarch_pic_common; +} + +static const TypeInfo loongarch_pic_common_types[] = { + { + .name = TYPE_LOONGARCH_PIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchPICCommonState), + .class_size = sizeof(LoongArchPICCommonClass), + .class_init = loongarch_pic_common_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(loongarch_pic_common_types) diff --git a/hw/intc/loongarch_pic_kvm.c b/hw/intc/loongarch_pic_kvm.c new file mode 100644 index 00000000000..dd504ec6a6f --- /dev/null +++ b/hw/intc/loongarch_pic_kvm.c @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch kvm pch pic interrupt support + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "hw/loongarch/virt.h" +#include "hw/pci-host/ls7a.h" +#include "system/kvm.h" + +static void kvm_pch_pic_access_reg(int fd, uint64_t addr, void *val, bool write) +{ + kvm_device_access(fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS, + addr, val, write, &error_abort); +} + +static void kvm_pch_pic_access(void *opaque, bool write) +{ + LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); + LoongarchPICState *lps = LOONGARCH_PIC(opaque); + int fd = lps->dev_fd; + int addr, offset; + + if (fd == 0) { + return; + } + + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_MASK, &s->int_mask, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_HTMSI_EN, &s->htmsi_en, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_EDGE, &s->intedge, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL0, &s->auto_crtl0, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_AUTO_CTRL1, &s->auto_crtl1, write); + + for (addr = PCH_PIC_ROUTE_ENTRY; + addr < PCH_PIC_ROUTE_ENTRY_END; addr++) { + offset = addr - PCH_PIC_ROUTE_ENTRY; + kvm_pch_pic_access_reg(fd, addr, &s->route_entry[offset], write); + } + + for (addr = PCH_PIC_HTMSI_VEC; addr < PCH_PIC_HTMSI_VEC_END; addr++) { + offset = addr - PCH_PIC_HTMSI_VEC; + kvm_pch_pic_access_reg(fd, addr, &s->htmsi_vector[offset], write); + } + + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_REQUEST, &s->intirr, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_STATUS, &s->intisr, write); + kvm_pch_pic_access_reg(fd, PCH_PIC_INT_POL, &s->int_polarity, write); +} + +int kvm_pic_get(void *opaque) +{ + kvm_pch_pic_access(opaque, false); + return 0; +} + +int kvm_pic_put(void *opaque, int version_id) +{ + kvm_pch_pic_access(opaque, true); + return 0; +} + +void kvm_pic_realize(DeviceState *dev, Error **errp) +{ + LoongarchPICState *lps = LOONGARCH_PIC(dev); + uint64_t pch_pic_base = VIRT_PCH_REG_BASE; + int ret; + + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_LOONGARCH_PCHPIC, false); + if (ret < 0) { + fprintf(stderr, "Create KVM_LOONGARCH_PCHPIC failed: %s\n", + strerror(-ret)); + abort(); + } + + lps->dev_fd = ret; + ret = kvm_device_access(lps->dev_fd, KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL, + KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT, + &pch_pic_base, true, NULL); + if (ret < 0) { + fprintf(stderr, "KVM_LOONGARCH_PCH_PIC_INIT failed: %s\n", + strerror(-ret)); + abort(); + } +} diff --git a/hw/intc/loongson_ipi.c b/hw/intc/loongson_ipi.c index 682cec96f3f..fbc73e8b001 100644 --- a/hw/intc/loongson_ipi.c +++ b/hw/intc/loongson_ipi.c @@ -6,225 +6,41 @@ */ #include "qemu/osdep.h" -#include "hw/boards.h" -#include "hw/sysbus.h" #include "hw/intc/loongson_ipi.h" -#include "hw/irq.h" #include "hw/qdev-properties.h" #include "qapi/error.h" -#include "qemu/log.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#include "migration/vmstate.h" -#ifdef TARGET_LOONGARCH64 -#include "target/loongarch/cpu.h" -#endif -#ifdef TARGET_MIPS #include "target/mips/cpu.h" -#endif -#include "trace.h" -static MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, - uint64_t *data, - unsigned size, MemTxAttrs attrs) +static AddressSpace *get_iocsr_as(CPUState *cpu) { - IPICore *s = opaque; - uint64_t ret = 0; - int index = 0; - - addr &= 0xff; - switch (addr) { - case CORE_STATUS_OFF: - ret = s->status; - break; - case CORE_EN_OFF: - ret = s->en; - break; - case CORE_SET_OFF: - ret = 0; - break; - case CORE_CLEAR_OFF: - ret = 0; - break; - case CORE_BUF_20 ... CORE_BUF_38 + 4: - index = (addr - CORE_BUF_20) >> 2; - ret = s->buf[index]; - break; - default: - qemu_log_mask(LOG_UNIMP, "invalid read: %x", (uint32_t)addr); - break; - } - - trace_loongson_ipi_read(size, (uint64_t)addr, ret); - *data = ret; - return MEMTX_OK; -} - -static MemTxResult loongson_ipi_iocsr_readl(void *opaque, hwaddr addr, - uint64_t *data, - unsigned size, MemTxAttrs attrs) -{ - LoongsonIPI *ipi = opaque; - IPICore *s; - - if (attrs.requester_id >= ipi->num_cpu) { - return MEMTX_DECODE_ERROR; - } - - s = &ipi->cpu[attrs.requester_id]; - return loongson_ipi_core_readl(s, addr, data, size, attrs); -} - -static AddressSpace *get_cpu_iocsr_as(CPUState *cpu) -{ -#ifdef TARGET_LOONGARCH64 - return LOONGARCH_CPU(cpu)->env.address_space_iocsr; -#endif -#ifdef TARGET_MIPS if (ase_lcsr_available(&MIPS_CPU(cpu)->env)) { return &MIPS_CPU(cpu)->env.iocsr.as; } -#endif - return NULL; -} - -static MemTxResult send_ipi_data(CPUState *cpu, uint64_t val, hwaddr addr, - MemTxAttrs attrs) -{ - int i, mask = 0, data = 0; - AddressSpace *iocsr_as = get_cpu_iocsr_as(cpu); - - if (!iocsr_as) { - return MEMTX_DECODE_ERROR; - } - /* - * bit 27-30 is mask for byte writing, - * if the mask is 0, we need not to do anything. - */ - if ((val >> 27) & 0xf) { - data = address_space_ldl_le(iocsr_as, addr, attrs, NULL); - for (i = 0; i < 4; i++) { - /* get mask for byte writing */ - if (val & (0x1 << (27 + i))) { - mask |= 0xff << (i * 8); - } - } - } - - data &= mask; - data |= (val >> 32) & ~mask; - address_space_stl_le(iocsr_as, addr, data, attrs, NULL); - - return MEMTX_OK; + return NULL; } -static MemTxResult mail_send(uint64_t val, MemTxAttrs attrs) +static int loongson_cpu_by_arch_id(LoongsonIPICommonState *lics, + int64_t arch_id, int *index, CPUState **pcs) { - uint32_t cpuid; - hwaddr addr; CPUState *cs; - cpuid = extract32(val, 16, 10); - cs = cpu_by_arch_id(cpuid); + cs = cpu_by_arch_id(arch_id); if (cs == NULL) { - return MEMTX_DECODE_ERROR; + return MEMTX_ERROR; } - /* override requester_id */ - addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c); - attrs.requester_id = cs->cpu_index; - return send_ipi_data(cs, val, addr, attrs); -} - -static MemTxResult any_send(uint64_t val, MemTxAttrs attrs) -{ - uint32_t cpuid; - hwaddr addr; - CPUState *cs; - - cpuid = extract32(val, 16, 10); - cs = cpu_by_arch_id(cpuid); - if (cs == NULL) { - return MEMTX_DECODE_ERROR; + if (index) { + *index = cs->cpu_index; } - /* override requester_id */ - addr = val & 0xffff; - attrs.requester_id = cs->cpu_index; - return send_ipi_data(cs, val, addr, attrs); -} - -static MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr, - uint64_t val, unsigned size, - MemTxAttrs attrs) -{ - IPICore *s = opaque; - LoongsonIPI *ipi = s->ipi; - int index = 0; - uint32_t cpuid; - uint8_t vector; - CPUState *cs; - - addr &= 0xff; - trace_loongson_ipi_write(size, (uint64_t)addr, val); - switch (addr) { - case CORE_STATUS_OFF: - qemu_log_mask(LOG_GUEST_ERROR, "can not be written"); - break; - case CORE_EN_OFF: - s->en = val; - break; - case CORE_SET_OFF: - s->status |= val; - if (s->status != 0 && (s->status & s->en) != 0) { - qemu_irq_raise(s->irq); - } - break; - case CORE_CLEAR_OFF: - s->status &= ~val; - if (s->status == 0 && s->en != 0) { - qemu_irq_lower(s->irq); - } - break; - case CORE_BUF_20 ... CORE_BUF_38 + 4: - index = (addr - CORE_BUF_20) >> 2; - s->buf[index] = val; - break; - case IOCSR_IPI_SEND: - cpuid = extract32(val, 16, 10); - /* IPI status vector */ - vector = extract8(val, 0, 5); - cs = cpu_by_arch_id(cpuid); - if (cs == NULL || cs->cpu_index >= ipi->num_cpu) { - return MEMTX_DECODE_ERROR; - } - loongson_ipi_core_writel(&ipi->cpu[cs->cpu_index], CORE_SET_OFF, - BIT(vector), 4, attrs); - break; - default: - qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); - break; + if (pcs) { + *pcs = cs; } return MEMTX_OK; } -static MemTxResult loongson_ipi_iocsr_writel(void *opaque, hwaddr addr, - uint64_t val, unsigned size, - MemTxAttrs attrs) -{ - LoongsonIPI *ipi = opaque; - IPICore *s; - - if (attrs.requester_id >= ipi->num_cpu) { - return MEMTX_DECODE_ERROR; - } - - s = &ipi->cpu[attrs.requester_id]; - return loongson_ipi_core_writel(s, addr, val, size, attrs); -} - static const MemoryRegionOps loongson_ipi_core_ops = { .read_with_attrs = loongson_ipi_core_readl, .write_with_attrs = loongson_ipi_core_writel, @@ -235,141 +51,77 @@ static const MemoryRegionOps loongson_ipi_core_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static const MemoryRegionOps loongson_ipi_iocsr_ops = { - .read_with_attrs = loongson_ipi_iocsr_readl, - .write_with_attrs = loongson_ipi_iocsr_writel, - .impl.min_access_size = 4, - .impl.max_access_size = 4, - .valid.min_access_size = 4, - .valid.max_access_size = 8, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -/* mail send and any send only support writeq */ -static MemTxResult loongson_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, - unsigned size, MemTxAttrs attrs) -{ - MemTxResult ret = MEMTX_OK; - - addr &= 0xfff; - switch (addr) { - case MAIL_SEND_OFFSET: - ret = mail_send(val, attrs); - break; - case ANY_SEND_OFFSET: - ret = any_send(val, attrs); - break; - default: - break; - } - - return ret; -} - -static const MemoryRegionOps loongson_ipi64_ops = { - .write_with_attrs = loongson_ipi_writeq, - .impl.min_access_size = 8, - .impl.max_access_size = 8, - .valid.min_access_size = 8, - .valid.max_access_size = 8, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - static void loongson_ipi_realize(DeviceState *dev, Error **errp) { - LoongsonIPI *s = LOONGSON_IPI(dev); + LoongsonIPICommonState *sc = LOONGSON_IPI_COMMON(dev); + LoongsonIPIState *s = LOONGSON_IPI(dev); + LoongsonIPIClass *lic = LOONGSON_IPI_GET_CLASS(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *local_err = NULL; int i; - if (s->num_cpu == 0) { - error_setg(errp, "num-cpu must be at least 1"); + lic->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } - memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), - &loongson_ipi_iocsr_ops, - s, "loongson_ipi_iocsr", 0x48); - - /* loongson_ipi_iocsr performs re-entrant IO through ipi_send */ - s->ipi_iocsr_mem.disable_reentrancy_guard = true; - - sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); - - memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev), - &loongson_ipi64_ops, - s, "loongson_ipi64_iocsr", 0x118); - sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); - - s->cpu = g_new0(IPICore, s->num_cpu); - if (s->cpu == NULL) { - error_setg(errp, "Memory allocation for IPICore faile"); + if (sc->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); return; } - for (i = 0; i < s->num_cpu; i++) { - s->cpu[i].ipi = s; - s->cpu[i].ipi_mmio_mem = g_new0(MemoryRegion, 1); + sc->cpu = g_new0(IPICore, sc->num_cpu); + for (i = 0; i < sc->num_cpu; i++) { + sc->cpu[i].ipi = sc; + qdev_init_gpio_out(dev, &sc->cpu[i].irq, 1); + } + + s->ipi_mmio_mem = g_new0(MemoryRegion, sc->num_cpu); + for (i = 0; i < sc->num_cpu; i++) { g_autofree char *name = g_strdup_printf("loongson_ipi_cpu%d_mmio", i); - memory_region_init_io(s->cpu[i].ipi_mmio_mem, OBJECT(dev), - &loongson_ipi_core_ops, &s->cpu[i], name, 0x48); - sysbus_init_mmio(sbd, s->cpu[i].ipi_mmio_mem); - qdev_init_gpio_out(dev, &s->cpu[i].irq, 1); + memory_region_init_io(&s->ipi_mmio_mem[i], OBJECT(dev), + &loongson_ipi_core_ops, &sc->cpu[i], name, 0x48); + sysbus_init_mmio(sbd, &s->ipi_mmio_mem[i]); } } static void loongson_ipi_unrealize(DeviceState *dev) { - LoongsonIPI *s = LOONGSON_IPI(dev); - - g_free(s->cpu); -} + LoongsonIPIState *s = LOONGSON_IPI(dev); + LoongsonIPIClass *k = LOONGSON_IPI_GET_CLASS(dev); -static const VMStateDescription vmstate_ipi_core = { - .name = "ipi-single", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(status, IPICore), - VMSTATE_UINT32(en, IPICore), - VMSTATE_UINT32(set, IPICore), - VMSTATE_UINT32(clear, IPICore), - VMSTATE_UINT32_ARRAY(buf, IPICore, IPI_MBX_NUM * 2), - VMSTATE_END_OF_LIST() - } -}; + g_free(s->ipi_mmio_mem); -static const VMStateDescription vmstate_loongson_ipi = { - .name = TYPE_LOONGSON_IPI, - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongsonIPI, num_cpu, - vmstate_ipi_core, IPICore), - VMSTATE_END_OF_LIST() - } -}; + k->parent_unrealize(dev); +} -static Property ipi_properties[] = { - DEFINE_PROP_UINT32("num-cpu", LoongsonIPI, num_cpu, 1), - DEFINE_PROP_END_OF_LIST(), +static const Property loongson_ipi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongsonIPICommonState, num_cpu, 1), }; -static void loongson_ipi_class_init(ObjectClass *klass, void *data) +static void loongson_ipi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = loongson_ipi_realize; - dc->unrealize = loongson_ipi_unrealize; - device_class_set_props(dc, ipi_properties); - dc->vmsd = &vmstate_loongson_ipi; + LoongsonIPIClass *lic = LOONGSON_IPI_CLASS(klass); + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass); + + device_class_set_parent_realize(dc, loongson_ipi_realize, + &lic->parent_realize); + device_class_set_parent_unrealize(dc, loongson_ipi_unrealize, + &lic->parent_unrealize); + device_class_set_props(dc, loongson_ipi_properties); + licc->get_iocsr_as = get_iocsr_as; + licc->cpu_by_arch_id = loongson_cpu_by_arch_id; } static const TypeInfo loongson_ipi_types[] = { { .name = TYPE_LOONGSON_IPI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(LoongsonIPI), + .parent = TYPE_LOONGSON_IPI_COMMON, + .instance_size = sizeof(LoongsonIPIState), + .class_size = sizeof(LoongsonIPIClass), .class_init = loongson_ipi_class_init, } }; diff --git a/hw/intc/loongson_ipi_common.c b/hw/intc/loongson_ipi_common.c new file mode 100644 index 00000000000..8cd78d48589 --- /dev/null +++ b/hw/intc/loongson_ipi_common.c @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Loongson IPI interrupt common support + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/intc/loongson_ipi_common.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "system/kvm.h" +#include "trace.h" + +MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + IPICore *s = opaque; + uint64_t ret = 0; + int index = 0; + + addr &= 0xff; + switch (addr) { + case CORE_STATUS_OFF: + ret = s->status; + break; + case CORE_EN_OFF: + ret = s->en; + break; + case CORE_SET_OFF: + ret = 0; + break; + case CORE_CLEAR_OFF: + ret = 0; + break; + case CORE_BUF_20 ... CORE_BUF_38 + 4: + index = (addr - CORE_BUF_20) >> 2; + ret = s->buf[index]; + break; + default: + qemu_log_mask(LOG_UNIMP, "invalid read: %x", (uint32_t)addr); + break; + } + + trace_loongson_ipi_read(size, (uint64_t)addr, ret); + *data = ret; + + return MEMTX_OK; +} + +static MemTxResult loongson_ipi_iocsr_readl(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + LoongsonIPICommonState *ipi = opaque; + IPICore *s; + + if (attrs.requester_id >= ipi->num_cpu) { + return MEMTX_DECODE_ERROR; + } + + s = &ipi->cpu[attrs.requester_id]; + return loongson_ipi_core_readl(s, addr, data, size, attrs); +} + +static MemTxResult send_ipi_data(LoongsonIPICommonState *ipi, CPUState *cpu, + uint64_t val, hwaddr addr, MemTxAttrs attrs) +{ + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi); + int i, mask = 0, data = 0; + AddressSpace *iocsr_as = licc->get_iocsr_as(cpu); + + if (!iocsr_as) { + return MEMTX_DECODE_ERROR; + } + + /* + * bit 27-30 is mask for byte writing, + * if the mask is 0, we need not to do anything. + */ + if ((val >> 27) & 0xf) { + data = address_space_ldl_le(iocsr_as, addr, attrs, NULL); + for (i = 0; i < 4; i++) { + /* get mask for byte writing */ + if (val & (0x1 << (27 + i))) { + mask |= 0xff << (i * 8); + } + } + } + + data &= mask; + data |= (val >> 32) & ~mask; + address_space_stl_le(iocsr_as, addr, data, attrs, NULL); + + return MEMTX_OK; +} + +static MemTxResult mail_send(LoongsonIPICommonState *ipi, + uint64_t val, MemTxAttrs attrs) +{ + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi); + uint32_t cpuid; + hwaddr addr; + CPUState *cs; + int cpu, ret; + + cpuid = extract32(val, 16, 10); + ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs); + if (ret != MEMTX_OK) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c); + attrs.requester_id = cpu; + return send_ipi_data(ipi, cs, val, addr, attrs); +} + +static MemTxResult any_send(LoongsonIPICommonState *ipi, + uint64_t val, MemTxAttrs attrs) +{ + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi); + uint32_t cpuid; + hwaddr addr; + CPUState *cs; + int cpu, ret; + + cpuid = extract32(val, 16, 10); + ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs); + if (ret != MEMTX_OK) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = val & 0xffff; + attrs.requester_id = cpu; + return send_ipi_data(ipi, cs, val, addr, attrs); +} + +MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) +{ + IPICore *s = opaque; + LoongsonIPICommonState *ipi = s->ipi; + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(ipi); + int index = 0; + uint32_t cpuid; + uint8_t vector; + CPUState *cs; + int cpu, ret; + + addr &= 0xff; + trace_loongson_ipi_write(size, (uint64_t)addr, val); + switch (addr) { + case CORE_STATUS_OFF: + qemu_log_mask(LOG_GUEST_ERROR, "can not be written"); + break; + case CORE_EN_OFF: + s->en = val; + break; + case CORE_SET_OFF: + s->status |= val; + if (s->status != 0 && (s->status & s->en) != 0) { + qemu_irq_raise(s->irq); + } + break; + case CORE_CLEAR_OFF: + s->status &= ~val; + if (s->status == 0 && s->en != 0) { + qemu_irq_lower(s->irq); + } + break; + case CORE_BUF_20 ... CORE_BUF_38 + 4: + index = (addr - CORE_BUF_20) >> 2; + s->buf[index] = val; + break; + case IOCSR_IPI_SEND: + cpuid = extract32(val, 16, 10); + /* IPI status vector */ + vector = extract8(val, 0, 5); + ret = licc->cpu_by_arch_id(ipi, cpuid, &cpu, &cs); + if (ret != MEMTX_OK || cpu >= ipi->num_cpu) { + return MEMTX_DECODE_ERROR; + } + loongson_ipi_core_writel(&ipi->cpu[cpu], CORE_SET_OFF, + BIT(vector), 4, attrs); + break; + default: + qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); + break; + } + + return MEMTX_OK; +} + +static MemTxResult loongson_ipi_iocsr_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + LoongsonIPICommonState *ipi = opaque; + IPICore *s; + + if (attrs.requester_id >= ipi->num_cpu) { + return MEMTX_DECODE_ERROR; + } + + s = &ipi->cpu[attrs.requester_id]; + return loongson_ipi_core_writel(s, addr, val, size, attrs); +} + +static const MemoryRegionOps loongson_ipi_iocsr_ops = { + .read_with_attrs = loongson_ipi_iocsr_readl, + .write_with_attrs = loongson_ipi_iocsr_writel, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 4, + .valid.max_access_size = 8, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* mail send and any send only support writeq */ +static MemTxResult loongson_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) +{ + LoongsonIPICommonState *ipi = opaque; + MemTxResult ret = MEMTX_OK; + + addr &= 0xfff; + switch (addr) { + case MAIL_SEND_OFFSET: + ret = mail_send(ipi, val, attrs); + break; + case ANY_SEND_OFFSET: + ret = any_send(ipi, val, attrs); + break; + default: + break; + } + + return ret; +} + +static const MemoryRegionOps loongson_ipi64_ops = { + .write_with_attrs = loongson_ipi_writeq, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void loongson_ipi_common_realize(DeviceState *dev, Error **errp) +{ + LoongsonIPICommonState *s = LOONGSON_IPI_COMMON(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (kvm_irqchip_in_kernel()) { + return; + } + + memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), + &loongson_ipi_iocsr_ops, + s, "loongson_ipi_iocsr", 0x48); + + /* loongson_ipi_iocsr performs re-entrant IO through ipi_send */ + s->ipi_iocsr_mem.disable_reentrancy_guard = true; + + sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); + + memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev), + &loongson_ipi64_ops, + s, "loongson_ipi64_iocsr", 0x118); + sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); +} + +static void loongson_ipi_common_unrealize(DeviceState *dev) +{ + LoongsonIPICommonState *s = LOONGSON_IPI_COMMON(dev); + + g_free(s->cpu); +} + +static int loongson_ipi_common_pre_save(void *opaque) +{ + IPICore *ipicore = (IPICore *)opaque; + LoongsonIPICommonState *s = ipicore->ipi; + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s); + + if (licc->pre_save) { + return licc->pre_save(s); + } + + return 0; +} + +static int loongson_ipi_common_post_load(void *opaque, int version_id) +{ + IPICore *ipicore = (IPICore *)opaque; + LoongsonIPICommonState *s = ipicore->ipi; + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_GET_CLASS(s); + + if (licc->post_load) { + return licc->post_load(s, version_id); + } + + return 0; +} + +static const VMStateDescription vmstate_ipi_core = { + .name = "ipi-single", + .version_id = 2, + .minimum_version_id = 2, + .pre_save = loongson_ipi_common_pre_save, + .post_load = loongson_ipi_common_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(status, IPICore), + VMSTATE_UINT32(en, IPICore), + VMSTATE_UINT32(set, IPICore), + VMSTATE_UINT32(clear, IPICore), + VMSTATE_UINT32_ARRAY(buf, IPICore, IPI_MBX_NUM * 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongson_ipi_common = { + .name = "loongson_ipi", + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongsonIPICommonState, + num_cpu, vmstate_ipi_core, + IPICore), + VMSTATE_END_OF_LIST() + } +}; + +static void loongson_ipi_common_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + LoongsonIPICommonClass *licc = LOONGSON_IPI_COMMON_CLASS(klass); + + device_class_set_parent_realize(dc, loongson_ipi_common_realize, + &licc->parent_realize); + device_class_set_parent_unrealize(dc, loongson_ipi_common_unrealize, + &licc->parent_unrealize); + dc->vmsd = &vmstate_loongson_ipi_common; +} + +static const TypeInfo loongarch_ipi_common_types[] = { + { + .name = TYPE_LOONGSON_IPI_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongsonIPICommonState), + .class_size = sizeof(LoongsonIPICommonClass), + .class_init = loongson_ipi_common_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(loongarch_ipi_common_types) diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index cf3beefcfe1..2532322618a 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -85,13 +85,12 @@ static const VMStateDescription vmstate_m68k_irqc = { } }; -static Property m68k_irqc_properties[] = { +static const Property m68k_irqc_properties[] = { DEFINE_PROP_LINK("m68k-cpu", M68KIRQCState, cpu, TYPE_M68K_CPU, ArchCPU *), - DEFINE_PROP_END_OF_LIST(), }; -static void m68k_irqc_class_init(ObjectClass *oc, void *data) +static void m68k_irqc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); @@ -99,7 +98,7 @@ static void m68k_irqc_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, m68k_irqc_properties); nc->nmi_monitor_handler = m68k_nmi; - dc->reset = m68k_irqc_reset; + device_class_set_legacy_reset(dc, m68k_irqc_reset); dc->vmsd = &vmstate_m68k_irqc; ic->get_statistics = m68k_irqc_get_statistics; ic->print_info = m68k_irqc_print_info; @@ -111,7 +110,7 @@ static const TypeInfo m68k_irqc_type_info = { .instance_size = sizeof(M68KIRQCState), .instance_init = m68k_irqc_instance_init, .class_init = m68k_irqc_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { TYPE_INTERRUPT_STATS_PROVIDER }, { } diff --git a/hw/intc/meson.build b/hw/intc/meson.build index afd1aa51ee9..3137521a4ad 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -6,7 +6,7 @@ system_ss.add(when: 'CONFIG_ARM_GIC', if_true: files( 'arm_gicv3_common.c', 'arm_gicv3_its_common.c', )) -system_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files( +system_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files( 'arm_gicv3.c', 'arm_gicv3_dist.c', 'arm_gicv3_its.c', @@ -15,7 +15,6 @@ system_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files( system_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_intc.c')) -system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) system_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) system_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c')) @@ -40,7 +39,7 @@ endif specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c')) specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif_common.c')) -specific_ss.add(when: 'CONFIG_ARM_GICV3_TCG', if_true: files('arm_gicv3_cpuif.c')) +specific_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files('arm_gicv3_cpuif.c')) specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c')) specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c')) specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) @@ -69,7 +68,15 @@ specific_ss.add(when: 'CONFIG_XIVE', if_true: files('xive.c')) specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'], if_true: files('spapr_xive_kvm.c')) specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c')) +specific_ss.add(when: 'CONFIG_LOONGSON_IPI_COMMON', if_true: files('loongson_ipi_common.c')) specific_ss.add(when: 'CONFIG_LOONGSON_IPI', if_true: files('loongson_ipi.c')) -specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_IPI', if_true: files('loongarch_ipi.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_IPI'], + if_true: files('loongarch_ipi_kvm.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_PIC', if_true: files('loongarch_pch_pic.c', 'loongarch_pic_common.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_PCH_PIC'], + if_true: files('loongarch_pic_kvm.c')) specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_MSI', if_true: files('loongarch_pch_msi.c')) -specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c', 'loongarch_extioi_common.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_EXTIOI'], + if_true: files('loongarch_extioi_kvm.c')) diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c index 77ba7348a34..0c50ba41f62 100644 --- a/hw/intc/mips_gic.c +++ b/hw/intc/mips_gic.c @@ -14,9 +14,9 @@ #include "qemu/module.h" #include "qapi/error.h" #include "hw/sysbus.h" -#include "exec/memory.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/memory.h" +#include "system/kvm.h" +#include "system/reset.h" #include "kvm_mips.h" #include "hw/intc/mips_gic.h" #include "hw/irq.h" @@ -255,7 +255,6 @@ static void gic_write_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr, return; bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr); - return; } static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) @@ -438,13 +437,12 @@ static void mips_gic_realize(DeviceState *dev, Error **errp) } } -static Property mips_gic_properties[] = { +static const Property mips_gic_properties[] = { DEFINE_PROP_UINT32("num-vp", MIPSGICState, num_vps, 1), DEFINE_PROP_UINT32("num-irq", MIPSGICState, num_irq, 256), - DEFINE_PROP_END_OF_LIST(), }; -static void mips_gic_class_init(ObjectClass *klass, void *data) +static void mips_gic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c index 435c47600fc..c61158bddd3 100644 --- a/hw/intc/omap_intc.c +++ b/hw/intc/omap_intc.c @@ -50,8 +50,6 @@ struct OMAPIntcState { int level_only; uint32_t size; - uint8_t revision; - /* state */ uint32_t new_agr[2]; int sir_intr[2]; @@ -104,8 +102,8 @@ static inline void omap_inth_update(OMAPIntcState *s, int is_fiq) } } -#define INT_FALLING_EDGE 0 -#define INT_LOW_LEVEL 1 +#define INT_FALLING_EDGE 0 +#define INT_LOW_LEVEL 1 static void omap_set_intr(void *opaque, int irq, int req) { @@ -133,26 +131,6 @@ static void omap_set_intr(void *opaque, int irq, int req) } } -/* Simplified version with no edge detection */ -static void omap_set_intr_noedge(void *opaque, int irq, int req) -{ - OMAPIntcState *ih = opaque; - uint32_t rise; - - struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5]; - int n = irq & 31; - - if (req) { - rise = ~bank->inputs & (1 << n); - if (rise) { - bank->irqs |= bank->inputs |= rise; - omap_inth_update(ih, 0); - omap_inth_update(ih, 1); - } - } else - bank->irqs = (bank->inputs &= ~(1 << n)) | bank->swi; -} - static uint64_t omap_inth_read(void *opaque, hwaddr addr, unsigned size) { @@ -164,13 +142,13 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr, offset &= 0xff; switch (offset) { - case 0x00: /* ITR */ + case 0x00: /* ITR */ return bank->irqs; - case 0x04: /* MIR */ + case 0x04: /* MIR */ return bank->mask; - case 0x10: /* SIR_IRQ_CODE */ + case 0x10: /* SIR_IRQ_CODE */ case 0x14: /* SIR_FIQ_CODE */ if (bank_no != 0) break; @@ -181,49 +159,49 @@ static uint64_t omap_inth_read(void *opaque, hwaddr addr, bank->irqs &= ~(1 << i); return line_no; - case 0x18: /* CONTROL_REG */ + case 0x18: /* CONTROL_REG */ if (bank_no != 0) break; return 0; - case 0x1c: /* ILR0 */ - case 0x20: /* ILR1 */ - case 0x24: /* ILR2 */ - case 0x28: /* ILR3 */ - case 0x2c: /* ILR4 */ - case 0x30: /* ILR5 */ - case 0x34: /* ILR6 */ - case 0x38: /* ILR7 */ - case 0x3c: /* ILR8 */ - case 0x40: /* ILR9 */ - case 0x44: /* ILR10 */ - case 0x48: /* ILR11 */ - case 0x4c: /* ILR12 */ - case 0x50: /* ILR13 */ - case 0x54: /* ILR14 */ - case 0x58: /* ILR15 */ - case 0x5c: /* ILR16 */ - case 0x60: /* ILR17 */ - case 0x64: /* ILR18 */ - case 0x68: /* ILR19 */ - case 0x6c: /* ILR20 */ - case 0x70: /* ILR21 */ - case 0x74: /* ILR22 */ - case 0x78: /* ILR23 */ - case 0x7c: /* ILR24 */ - case 0x80: /* ILR25 */ - case 0x84: /* ILR26 */ - case 0x88: /* ILR27 */ - case 0x8c: /* ILR28 */ - case 0x90: /* ILR29 */ - case 0x94: /* ILR30 */ - case 0x98: /* ILR31 */ + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ i = (offset - 0x1c) >> 2; return (bank->priority[i] << 2) | (((bank->sens_edge >> i) & 1) << 1) | ((bank->fiq >> i) & 1); - case 0x9c: /* ISR */ + case 0x9c: /* ISR */ return 0x00000000; } @@ -241,24 +219,24 @@ static void omap_inth_write(void *opaque, hwaddr addr, offset &= 0xff; switch (offset) { - case 0x00: /* ITR */ + case 0x00: /* ITR */ /* Important: ignore the clearing if the IRQ is level-triggered and the input bit is 1 */ bank->irqs &= value | (bank->inputs & bank->sens_edge); return; - case 0x04: /* MIR */ + case 0x04: /* MIR */ bank->mask = value; omap_inth_update(s, 0); omap_inth_update(s, 1); return; - case 0x10: /* SIR_IRQ_CODE */ - case 0x14: /* SIR_FIQ_CODE */ + case 0x10: /* SIR_IRQ_CODE */ + case 0x14: /* SIR_FIQ_CODE */ OMAP_RO_REG(addr); break; - case 0x18: /* CONTROL_REG */ + case 0x18: /* CONTROL_REG */ if (bank_no != 0) break; if (value & 2) { @@ -273,38 +251,38 @@ static void omap_inth_write(void *opaque, hwaddr addr, } return; - case 0x1c: /* ILR0 */ - case 0x20: /* ILR1 */ - case 0x24: /* ILR2 */ - case 0x28: /* ILR3 */ - case 0x2c: /* ILR4 */ - case 0x30: /* ILR5 */ - case 0x34: /* ILR6 */ - case 0x38: /* ILR7 */ - case 0x3c: /* ILR8 */ - case 0x40: /* ILR9 */ - case 0x44: /* ILR10 */ - case 0x48: /* ILR11 */ - case 0x4c: /* ILR12 */ - case 0x50: /* ILR13 */ - case 0x54: /* ILR14 */ - case 0x58: /* ILR15 */ - case 0x5c: /* ILR16 */ - case 0x60: /* ILR17 */ - case 0x64: /* ILR18 */ - case 0x68: /* ILR19 */ - case 0x6c: /* ILR20 */ - case 0x70: /* ILR21 */ - case 0x74: /* ILR22 */ - case 0x78: /* ILR23 */ - case 0x7c: /* ILR24 */ - case 0x80: /* ILR25 */ - case 0x84: /* ILR26 */ - case 0x88: /* ILR27 */ - case 0x8c: /* ILR28 */ - case 0x90: /* ILR29 */ - case 0x94: /* ILR30 */ - case 0x98: /* ILR31 */ + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ i = (offset - 0x1c) >> 2; bank->priority[i] = (value >> 2) & 0x1f; bank->sens_edge &= ~(1 << i); @@ -313,7 +291,7 @@ static void omap_inth_write(void *opaque, hwaddr addr, bank->fiq |= (value & 1) << i; return; - case 0x9c: /* ISR */ + case 0x9c: /* ISR */ for (i = 0; i < 32; i ++) if (value & (1 << i)) { omap_set_intr(s, 32 * bank_no + i, 1); @@ -397,16 +375,15 @@ void omap_intc_set_fclk(OMAPIntcState *intc, omap_clk clk) intc->fclk = clk; } -static Property omap_intc_properties[] = { +static const Property omap_intc_properties[] = { DEFINE_PROP_UINT32("size", OMAPIntcState, size, 0x100), - DEFINE_PROP_END_OF_LIST(), }; -static void omap_intc_class_init(ObjectClass *klass, void *data) +static void omap_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = omap_inth_reset; + device_class_set_legacy_reset(dc, omap_inth_reset); device_class_set_props(dc, omap_intc_properties); /* Reason: pointer property "clk" */ dc->user_creatable = false; @@ -414,277 +391,16 @@ static void omap_intc_class_init(ObjectClass *klass, void *data) } static const TypeInfo omap_intc_info = { - .name = "omap-intc", - .parent = TYPE_OMAP_INTC, - .instance_init = omap_intc_init, - .class_init = omap_intc_class_init, -}; - -static uint64_t omap2_inth_read(void *opaque, hwaddr addr, - unsigned size) -{ - OMAPIntcState *s = opaque; - int offset = addr; - int bank_no, line_no; - struct omap_intr_handler_bank_s *bank = NULL; - - if ((offset & 0xf80) == 0x80) { - bank_no = (offset & 0x60) >> 5; - if (bank_no < s->nbanks) { - offset &= ~0x60; - bank = &s->bank[bank_no]; - } else { - OMAP_BAD_REG(addr); - return 0; - } - } - - switch (offset) { - case 0x00: /* INTC_REVISION */ - return s->revision; - - case 0x10: /* INTC_SYSCONFIG */ - return (s->autoidle >> 2) & 1; - - case 0x14: /* INTC_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x40: /* INTC_SIR_IRQ */ - return s->sir_intr[0]; - - case 0x44: /* INTC_SIR_FIQ */ - return s->sir_intr[1]; - - case 0x48: /* INTC_CONTROL */ - return (!s->mask) << 2; /* GLOBALMASK */ - - case 0x4c: /* INTC_PROTECTION */ - return 0; - - case 0x50: /* INTC_IDLE */ - return s->autoidle & 3; - - /* Per-bank registers */ - case 0x80: /* INTC_ITR */ - return bank->inputs; - - case 0x84: /* INTC_MIR */ - return bank->mask; - - case 0x88: /* INTC_MIR_CLEAR */ - case 0x8c: /* INTC_MIR_SET */ - return 0; - - case 0x90: /* INTC_ISR_SET */ - return bank->swi; - - case 0x94: /* INTC_ISR_CLEAR */ - return 0; - - case 0x98: /* INTC_PENDING_IRQ */ - return bank->irqs & ~bank->mask & ~bank->fiq; - - case 0x9c: /* INTC_PENDING_FIQ */ - return bank->irqs & ~bank->mask & bank->fiq; - - /* Per-line registers */ - case 0x100 ... 0x300: /* INTC_ILR */ - bank_no = (offset - 0x100) >> 7; - if (bank_no > s->nbanks) - break; - bank = &s->bank[bank_no]; - line_no = (offset & 0x7f) >> 2; - return (bank->priority[line_no] << 2) | - ((bank->fiq >> line_no) & 1); - } - OMAP_BAD_REG(addr); - return 0; -} - -static void omap2_inth_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - OMAPIntcState *s = opaque; - int offset = addr; - int bank_no, line_no; - struct omap_intr_handler_bank_s *bank = NULL; - - if ((offset & 0xf80) == 0x80) { - bank_no = (offset & 0x60) >> 5; - if (bank_no < s->nbanks) { - offset &= ~0x60; - bank = &s->bank[bank_no]; - } else { - OMAP_BAD_REG(addr); - return; - } - } - - switch (offset) { - case 0x10: /* INTC_SYSCONFIG */ - s->autoidle &= 4; - s->autoidle |= (value & 1) << 2; - if (value & 2) { /* SOFTRESET */ - omap_inth_reset(DEVICE(s)); - } - return; - - case 0x48: /* INTC_CONTROL */ - s->mask = (value & 4) ? 0 : ~0; /* GLOBALMASK */ - if (value & 2) { /* NEWFIQAGR */ - qemu_set_irq(s->parent_intr[1], 0); - s->new_agr[1] = ~0; - omap_inth_update(s, 1); - } - if (value & 1) { /* NEWIRQAGR */ - qemu_set_irq(s->parent_intr[0], 0); - s->new_agr[0] = ~0; - omap_inth_update(s, 0); - } - return; - - case 0x4c: /* INTC_PROTECTION */ - /* TODO: Make a bitmap (or sizeof(char)map) of access privileges - * for every register, see Chapter 3 and 4 for privileged mode. */ - if (value & 1) - fprintf(stderr, "%s: protection mode enable attempt\n", - __func__); - return; - - case 0x50: /* INTC_IDLE */ - s->autoidle &= ~3; - s->autoidle |= value & 3; - return; - - /* Per-bank registers */ - case 0x84: /* INTC_MIR */ - bank->mask = value; - omap_inth_update(s, 0); - omap_inth_update(s, 1); - return; - - case 0x88: /* INTC_MIR_CLEAR */ - bank->mask &= ~value; - omap_inth_update(s, 0); - omap_inth_update(s, 1); - return; - - case 0x8c: /* INTC_MIR_SET */ - bank->mask |= value; - return; - - case 0x90: /* INTC_ISR_SET */ - bank->irqs |= bank->swi |= value; - omap_inth_update(s, 0); - omap_inth_update(s, 1); - return; - - case 0x94: /* INTC_ISR_CLEAR */ - bank->swi &= ~value; - bank->irqs = bank->swi & bank->inputs; - return; - - /* Per-line registers */ - case 0x100 ... 0x300: /* INTC_ILR */ - bank_no = (offset - 0x100) >> 7; - if (bank_no > s->nbanks) - break; - bank = &s->bank[bank_no]; - line_no = (offset & 0x7f) >> 2; - bank->priority[line_no] = (value >> 2) & 0x3f; - bank->fiq &= ~(1 << line_no); - bank->fiq |= (value & 1) << line_no; - return; - - case 0x00: /* INTC_REVISION */ - case 0x14: /* INTC_SYSSTATUS */ - case 0x40: /* INTC_SIR_IRQ */ - case 0x44: /* INTC_SIR_FIQ */ - case 0x80: /* INTC_ITR */ - case 0x98: /* INTC_PENDING_IRQ */ - case 0x9c: /* INTC_PENDING_FIQ */ - OMAP_RO_REG(addr); - return; - } - OMAP_BAD_REG(addr); -} - -static const MemoryRegionOps omap2_inth_mem_ops = { - .read = omap2_inth_read, - .write = omap2_inth_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4, - }, -}; - -static void omap2_intc_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - OMAPIntcState *s = OMAP_INTC(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - s->level_only = 1; - s->nbanks = 3; - sysbus_init_irq(sbd, &s->parent_intr[0]); - sysbus_init_irq(sbd, &s->parent_intr[1]); - qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32); - memory_region_init_io(&s->mmio, obj, &omap2_inth_mem_ops, s, - "omap2-intc", 0x1000); - sysbus_init_mmio(sbd, &s->mmio); -} - -static void omap2_intc_realize(DeviceState *dev, Error **errp) -{ - OMAPIntcState *s = OMAP_INTC(dev); - - if (!s->iclk) { - error_setg(errp, "omap2-intc: iclk not connected"); - return; - } - if (!s->fclk) { - error_setg(errp, "omap2-intc: fclk not connected"); - return; - } -} - -static Property omap2_intc_properties[] = { - DEFINE_PROP_UINT8("revision", OMAPIntcState, - revision, 0x21), - DEFINE_PROP_END_OF_LIST(), -}; - -static void omap2_intc_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = omap_inth_reset; - device_class_set_props(dc, omap2_intc_properties); - /* Reason: pointer property "iclk", "fclk" */ - dc->user_creatable = false; - dc->realize = omap2_intc_realize; -} - -static const TypeInfo omap2_intc_info = { - .name = "omap2-intc", - .parent = TYPE_OMAP_INTC, - .instance_init = omap2_intc_init, - .class_init = omap2_intc_class_init, -}; - -static const TypeInfo omap_intc_type_info = { .name = TYPE_OMAP_INTC, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(OMAPIntcState), - .abstract = true, + .instance_init = omap_intc_init, + .class_init = omap_intc_class_init, }; static void omap_intc_register_types(void) { - type_register_static(&omap_intc_type_info); type_register_static(&omap_intc_info); - type_register_static(&omap2_intc_info); } type_init(omap_intc_register_types) diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c index 99032ea7f73..047c367478d 100644 --- a/hw/intc/ompic.c +++ b/hw/intc/ompic.c @@ -13,7 +13,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #define TYPE_OR1K_OMPIC "or1k-ompic" @@ -128,9 +128,8 @@ static void or1k_ompic_realize(DeviceState *dev, Error **errp) } } -static Property or1k_ompic_properties[] = { +static const Property or1k_ompic_properties[] = { DEFINE_PROP_UINT32("num-cpus", OR1KOMPICState, num_cpus, 1), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_or1k_ompic_cpu = { @@ -156,7 +155,7 @@ static const VMStateDescription vmstate_or1k_ompic = { } }; -static void or1k_ompic_class_init(ObjectClass *klass, void *data) +static void or1k_ompic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index 9792a112240..87733eb7c30 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -41,7 +41,6 @@ #include "hw/pci/msi.h" #include "qapi/error.h" #include "qemu/bitops.h" -#include "qapi/qmp/qerror.h" #include "qemu/module.h" #include "qemu/timer.h" #include "qemu/error-report.h" @@ -1032,13 +1031,14 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr, s_IRQ = IRQ_get_next(opp, &dst->servicing); /* Check queued interrupts. */ n_IRQ = IRQ_get_next(opp, &dst->raised); - src = &opp->src[n_IRQ]; - if (n_IRQ != -1 && - (s_IRQ == -1 || - IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { - DPRINTF("Raise OpenPIC INT output cpu %d irq %d", - idx, n_IRQ); - qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); + if (n_IRQ != -1) { + src = &opp->src[n_IRQ]; + if (s_IRQ == -1 || + IVPR_PRIORITY(src->ivpr) > dst->servicing.priority) { + DPRINTF("Raise OpenPIC INT output cpu %d irq %d", + idx, n_IRQ); + qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); + } } break; default: @@ -1535,9 +1535,7 @@ static void openpic_realize(DeviceState *dev, Error **errp) }; if (opp->nb_cpus > MAX_CPU) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, - TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus, - (uint64_t)0, (uint64_t)MAX_CPU); + error_setg(errp, "property 'nb_cpus' can be at most %d", MAX_CPU); return; } @@ -1608,19 +1606,18 @@ static void openpic_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq); } -static Property openpic_properties[] = { +static const Property openpic_properties[] = { DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20), DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void openpic_class_init(ObjectClass *oc, void *data) +static void openpic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = openpic_realize; device_class_set_props(dc, openpic_properties); - dc->reset = openpic_reset; + device_class_set_legacy_reset(dc, openpic_reset); dc->vmsd = &vmstate_openpic; set_bit(DEVICE_CATEGORY_MISC, dc->categories); } diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c index 557dd0c2bf5..673ea9ca055 100644 --- a/hw/intc/openpic_kvm.c +++ b/hw/intc/openpic_kvm.c @@ -30,7 +30,7 @@ #include "hw/pci/msi.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" @@ -262,19 +262,18 @@ int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs) kvm_arch_vcpu_id(cs)); } -static Property kvm_openpic_properties[] = { +static const Property kvm_openpic_properties[] = { DEFINE_PROP_UINT32("model", KVMOpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20), - DEFINE_PROP_END_OF_LIST(), }; -static void kvm_openpic_class_init(ObjectClass *oc, void *data) +static void kvm_openpic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = kvm_openpic_realize; device_class_set_props(dc, kvm_openpic_properties); - dc->reset = kvm_openpic_reset; + device_class_set_legacy_reset(dc, kvm_openpic_reset); set_bit(DEVICE_CATEGORY_MISC, dc->categories); } diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c index d79e5d8076f..838c21c4a02 100644 --- a/hw/intc/pl190.c +++ b/hw/intc/pl190.c @@ -273,11 +273,11 @@ static const VMStateDescription vmstate_pl190 = { } }; -static void pl190_class_init(ObjectClass *klass, void *data) +static void pl190_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pl190_reset; + device_class_set_legacy_reset(dc, pl190_reset); dc->vmsd = &vmstate_pl190; } diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index 5bacbce6a46..c2ca40b8be8 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -1,10 +1,9 @@ /* * QEMU PowerPC XIVE interrupt controller model * - * Copyright (c) 2017-2019, IBM Corporation. + * Copyright (c) 2017-2024, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" @@ -12,9 +11,9 @@ #include "qemu/module.h" #include "qapi/error.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" +#include "system/cpus.h" +#include "system/dma.h" +#include "system/reset.h" #include "hw/ppc/fdt.h" #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_chip.h" @@ -471,14 +470,13 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) return xive->regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive *xive = PNV_XIVE(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; for (i = 0; i < chip->nr_cores; i++) { @@ -500,7 +498,8 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, * Check the thread context CAM lines and record matches. */ ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, - nvt_idx, cam_ignore, logic_serv); + nvt_idx, cam_ignore, + logic_serv); /* * Save the context and follow on to catch duplicates, that we * don't support yet. @@ -510,17 +509,18 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " "thread context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } } - return count; + return !!match->count; } static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr) @@ -2059,17 +2059,16 @@ static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } -static Property pnv_xive_properties[] = { +static const Property pnv_xive_properties[] = { DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), /* The PnvChip id identifies the XIVE interrupt controller. */ DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_xive_class_init(ObjectClass *klass, void *data) +static void pnv_xive_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -2107,7 +2106,7 @@ static const TypeInfo pnv_xive_info = { .instance_size = sizeof(PnvXive), .class_init = pnv_xive_class_init, .class_size = sizeof(PnvXiveClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 78609105a84..e019cad5c14 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -1,18 +1,17 @@ /* * QEMU PowerPC XIVE2 interrupt controller model (POWER10) * - * Copyright (c) 2019-2022, IBM Corporation. + * Copyright (c) 2019-2024, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/dma.h" +#include "system/cpus.h" +#include "system/dma.h" #include "hw/ppc/fdt.h" #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_chip.h" @@ -24,8 +23,8 @@ #include "hw/ppc/xive2_regs.h" #include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" -#include "sysemu/reset.h" -#include "sysemu/qtest.h" +#include "system/reset.h" +#include "system/qtest.h" #include @@ -102,12 +101,10 @@ static uint32_t pnv_xive2_block_id(PnvXive2 *xive) } /* - * Remote access to controllers. HW uses MMIOs. For now, a simple scan - * of the chips is good enough. - * - * TODO: Block scope support + * Remote access to INT controllers. HW uses MMIOs(?). For now, a simple + * scan of all the chips INT controller is good enough. */ -static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) +static PnvXive2 *pnv_xive2_get_remote(uint32_t vsd_type, hwaddr fwd_addr) { PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); int i; @@ -116,10 +113,23 @@ static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); PnvXive2 *xive = &chip10->xive; - if (pnv_xive2_block_id(xive) == blk) { + /* + * Is this the XIVE matching the forwarded VSD address is for this + * VSD type + */ + if ((vsd_type == VST_ESB && fwd_addr == xive->esb_base) || + (vsd_type == VST_END && fwd_addr == xive->end_base) || + ((vsd_type == VST_NVP || + vsd_type == VST_NVG) && fwd_addr == xive->nvpg_base) || + (vsd_type == VST_NVC && fwd_addr == xive->nvc_base)) { return xive; } } + + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: >>>>> %s vsd_type %u fwd_addr 0x%"HWADDR_PRIx + " NOT FOUND\n", + __func__, vsd_type, fwd_addr); return NULL; } @@ -252,8 +262,7 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk, /* Remote VST access */ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { - xive = pnv_xive2_get_remote(blk); - + xive = pnv_xive2_get_remote(type, (vsd & VSD_ADDRESS_MASK)); return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0; } @@ -490,6 +499,23 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, word_number); } +static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc); +} + +static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc, + XIVE_VST_WORD_ALL); +} + static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type) { switch (nxc_type) { @@ -579,20 +605,28 @@ static uint32_t pnv_xive2_get_config(Xive2Router *xrtr) { PnvXive2 *xive = PNV_XIVE2(xrtr); uint32_t cfg = 0; + uint64_t reg = xive->cq_regs[CQ_XIVE_CFG >> 3]; - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) { + if (reg & CQ_XIVE_CFG_GEN1_TIMA_OS) { cfg |= XIVE2_GEN1_TIMA_OS; } - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { + if (reg & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { cfg |= XIVE2_VP_SAVE_RESTORE; } - if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, - xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) { + if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, reg) == + CQ_XIVE_CFG_THREADID_8BITS) { cfg |= XIVE2_THREADID_8BITS; } + if (reg & CQ_XIVE_CFG_EN_VP_GRP_PRIORITY) { + cfg |= XIVE2_EN_VP_GRP_PRIORITY; + } + + cfg = SETFIELD(XIVE2_VP_INT_PRIO, cfg, + GETFIELD(CQ_XIVE_CFG_VP_INT_PRIO, reg)); + return cfg; } @@ -606,24 +640,28 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive2 *xive = PNV_XIVE2(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; bool gen1_tima_os = xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + static int next_start_core; + static int next_start_thread; + int start_core = next_start_core; + int start_thread = next_start_thread; for (i = 0; i < chip->nr_cores; i++) { - PnvCore *pc = chip->cores[i]; + PnvCore *pc = chip->cores[(i + start_core) % chip->nr_cores]; CPUCore *cc = CPU_CORE(pc); for (j = 0; j < cc->nr_threads; j++) { - PowerPCCPU *cpu = pc->threads[j]; + /* Start search for match with different thread each call */ + PowerPCCPU *cpu = pc->threads[(j + start_thread) % cc->nr_threads]; XiveTCTX *tctx; int ring; @@ -639,30 +677,53 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, logic_serv); } else { ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, - nvt_idx, cam_ignore, - logic_serv); + nvt_idx, crowd, cam_ignore, + logic_serv); } - /* - * Save the context and follow on to catch duplicates, - * that we don't support yet. - */ if (ring != -1) { - if (match->tctx) { + /* + * For VP-specific match, finding more than one is a + * problem. For group notification, it's possible. + */ + if (!cam_ignore && match->tctx) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " "thread context NVT %x/%x\n", nvt_blk, nvt_idx); - return false; + /* Should set a FIR if we ever model it */ + match->count++; + continue; + } + /* + * For a group notification, we need to know if the + * match is precluded first by checking the current + * thread priority. If the interrupt can be delivered, + * we always notify the first match (for now). + */ + if (cam_ignore && + xive2_tm_irq_precluded(tctx, ring, priority)) { + match->precluded = true; + } else { + if (!match->tctx) { + match->ring = ring; + match->tctx = tctx; + + next_start_thread = j + start_thread + 1; + if (next_start_thread >= cc->nr_threads) { + next_start_thread = 0; + next_start_core = i + start_core + 1; + if (next_start_core >= chip->nr_cores) { + next_start_core = 0; + } + } + } + match->count++; } - - match->ring = ring; - match->tctx = tctx; - count++; } } } - return count; + return !!match->count; } static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) @@ -676,6 +737,47 @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) return cfg; } +static int pnv_xive2_broadcast(XivePresenter *xptr, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool ignore, uint8_t priority) +{ + PnvXive2 *xive = PNV_XIVE2(xptr); + PnvChip *chip = xive->chip; + int i, j; + bool gen1_tima_os = + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + + for (j = 0; j < cc->nr_threads; j++) { + PowerPCCPU *cpu = pc->threads[j]; + XiveTCTX *tctx; + int ring; + + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { + continue; + } + + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + + if (gen1_tima_os) { + ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, + nvt_idx, ignore, 0); + } else { + ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, + nvt_idx, crowd, ignore, 0); + } + + if (ring != -1) { + xive2_tm_set_lsmfb(tctx, ring, priority); + } + } + } + return 0; +} + static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) { return pnv_xive2_block_id(PNV_XIVE2(xrtr)); @@ -1103,7 +1205,8 @@ static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset, case CQ_FIRMASK_OR: /* FIR error reporting */ break; default: - xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset); + xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1234,7 +1337,6 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH2_SPEC: case VC_ENDC_WATCH3_SPEC: watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6; - xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT); pnv_xive2_endc_cache_watch_release(xive, watch_engine); val = xive->vc_regs[reg]; break; @@ -1245,10 +1347,11 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH3_DATA0: /* * Load DATA registers from cache with data requested by the - * SPEC register + * SPEC register. Clear gen_flipped bit in word 1. */ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6; pnv_xive2_end_cache_load(xive, watch_engine); + xive->vc_regs[reg] &= ~(uint64_t)END2_W1_GEN_FLIPPED; val = xive->vc_regs[reg]; break; @@ -1316,7 +1419,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * ESB cache updates (not modeled) */ - /* case VC_ESBC_FLUSH_CTRL: */ + case VC_ESBC_FLUSH_CTRL: + if (val & VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ESBC_FLUSH_POLL: xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID; /* ESB update */ @@ -1332,7 +1442,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * EAS cache updates (not modeled) */ - /* case VC_EASC_FLUSH_CTRL: */ + case VC_EASC_FLUSH_CTRL: + if (val & VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_EASC_FLUSH_POLL: xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID; /* EAS update */ @@ -1371,7 +1488,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; - /* case VC_ENDC_FLUSH_CTRL: */ + case VC_ENDC_FLUSH_CTRL: + if (val & VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ENDC_FLUSH_POLL: xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID; break; @@ -1400,7 +1524,8 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "VC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1591,7 +1716,14 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, pnv_xive2_nxc_update(xive, watch_engine); break; - /* case PC_NXC_FLUSH_CTRL: */ + case PC_NXC_FLUSH_CTRL: + if (val & PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case PC_NXC_FLUSH_POLL: xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID; break; @@ -1608,7 +1740,8 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "PC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1695,7 +1828,8 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, xive->tctxt_regs[reg] = val; break; default: - xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "TCTXT: invalid write @0x%"HWADDR_PRIx + " data 0x%"PRIx64, offset, val); return; } } @@ -1766,7 +1900,8 @@ static void pnv_xive2_xscom_write(void *opaque, hwaddr offset, pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size); break; default: - xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1834,7 +1969,8 @@ static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1876,7 +2012,8 @@ static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset, { PnvXive2 *xive = PNV_XIVE2(opaque); - xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); } static const MemoryRegionOps pnv_xive2_ic_lsi_ops = { @@ -1979,7 +2116,8 @@ static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset, inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI; break; default: - xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -2132,21 +2270,40 @@ static const MemoryRegionOps pnv_xive2_tm_ops = { }, }; -static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, +static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr, unsigned size) { PnvXive2 *xive = PNV_XIVE2(opaque); + XivePresenter *xptr = XIVE_PRESENTER(xive); + uint32_t page = addr >> xive->nvpg_shift; + uint16_t op = addr & 0xFFF; + uint8_t blk = pnv_xive2_block_id(xive); - xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); - return -1; + if (size != 2) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n", + size); + return -1; + } + + return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1); } -static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, +static void pnv_xive2_nvc_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvXive2 *xive = PNV_XIVE2(opaque); + XivePresenter *xptr = XIVE_PRESENTER(xive); + uint32_t page = addr >> xive->nvc_shift; + uint16_t op = addr & 0xFFF; + uint8_t blk = pnv_xive2_block_id(xive); + + if (size != 1) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n", + size); + return; + } - xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); + (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val); } static const MemoryRegionOps pnv_xive2_nvc_ops = { @@ -2154,30 +2311,63 @@ static const MemoryRegionOps pnv_xive2_nvc_ops = { .write = pnv_xive2_nvc_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { - .min_access_size = 8, + .min_access_size = 1, .max_access_size = 8, }, .impl = { - .min_access_size = 8, + .min_access_size = 1, .max_access_size = 8, }, }; -static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, +static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr, unsigned size) { PnvXive2 *xive = PNV_XIVE2(opaque); + XivePresenter *xptr = XIVE_PRESENTER(xive); + uint32_t page = addr >> xive->nvpg_shift; + uint16_t op = addr & 0xFFF; + uint32_t index = page >> 1; + uint8_t blk = pnv_xive2_block_id(xive); - xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); - return -1; + if (size != 2) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n", + size); + return -1; + } + + if (page % 2) { + /* odd page - NVG */ + return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1); + } else { + /* even page - NVP */ + return xive2_presenter_nvp_backlog_op(xptr, blk, index, op); + } } -static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, +static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvXive2 *xive = PNV_XIVE2(opaque); + XivePresenter *xptr = XIVE_PRESENTER(xive); + uint32_t page = addr >> xive->nvpg_shift; + uint16_t op = addr & 0xFFF; + uint32_t index = page >> 1; + uint8_t blk = pnv_xive2_block_id(xive); + + if (size != 1) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n", + size); + return; + } - xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); + if (page % 2) { + /* odd page - NVG */ + (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val); + } else { + /* even page - NVP */ + (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op); + } } static const MemoryRegionOps pnv_xive2_nvpg_ops = { @@ -2185,11 +2375,11 @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = { .write = pnv_xive2_nvpg_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { - .min_access_size = 8, + .min_access_size = 1, .max_access_size = 8, }, .impl = { - .min_access_size = 8, + .min_access_size = 1, .max_access_size = 8, }, }; @@ -2337,7 +2527,7 @@ static void pnv_xive2_realize(DeviceState *dev, Error **errp) qemu_register_reset(pnv_xive2_reset, dev); } -static Property pnv_xive2_properties[] = { +static const Property pnv_xive2_properties[] = { DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0), DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0), DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0), @@ -2349,7 +2539,6 @@ static Property pnv_xive2_properties[] = { DEFINE_PROP_UINT64("config", PnvXive2, config, PNV_XIVE2_CONFIGURATION), DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; static void pnv_xive2_instance_init(Object *obj) @@ -2384,7 +2573,7 @@ static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } -static void pnv_xive2_class_init(ObjectClass *klass, void *data) +static void pnv_xive2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -2407,6 +2596,8 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) xrc->write_end = pnv_xive2_write_end; xrc->get_nvp = pnv_xive2_get_nvp; xrc->write_nvp = pnv_xive2_write_nvp; + xrc->get_nvgc = pnv_xive2_get_nvgc; + xrc->write_nvgc = pnv_xive2_write_nvgc; xrc->get_config = pnv_xive2_get_config; xrc->get_block_id = pnv_xive2_get_block_id; @@ -2414,6 +2605,7 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) xpc->match_nvt = pnv_xive2_match_nvt; xpc->get_config = pnv_xive2_presenter_get_config; + xpc->broadcast = pnv_xive2_broadcast; }; static const TypeInfo pnv_xive2_info = { @@ -2423,7 +2615,7 @@ static const TypeInfo pnv_xive2_info = { .instance_size = sizeof(PnvXive2), .class_init = pnv_xive2_class_init, .class_size = sizeof(PnvXive2Class), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } @@ -2497,8 +2689,9 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) Xive2Eas eas; Xive2End end; Xive2Nvp nvp; + Xive2Nvgc nvgc; int i; - uint64_t xive_nvp_per_subpage; + uint64_t entries_per_subpage; g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, srcno0 + nr_esbs - 1); @@ -2530,10 +2723,28 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk, 0, XIVE2_NVP_COUNT - 1); - xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); - for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) { + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) { xive2_nvp_pic_print_info(&nvp, i++, buf); } } + + g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } + + g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } } diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h index e8b87b3d2c1..d53300f709b 100644 --- a/hw/intc/pnv_xive2_regs.h +++ b/hw/intc/pnv_xive2_regs.h @@ -66,6 +66,7 @@ #define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28) +#define CQ_XIVE_CFG_EN_VP_GRP_PRIORITY PPC_BIT(32) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */ diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c index 9a67f7f6511..bc4dc90ade1 100644 --- a/hw/intc/ppc-uic.c +++ b/hw/intc/ppc-uic.c @@ -259,10 +259,9 @@ static void ppc_uic_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, ppcuic_set_irq, UIC_MAX_IRQ); } -static Property ppc_uic_properties[] = { +static const Property ppc_uic_properties[] = { DEFINE_PROP_UINT32("dcr-base", PPCUIC, dcr_base, 0xc0), DEFINE_PROP_BOOL("use-vectors", PPCUIC, use_vectors, true), - DEFINE_PROP_END_OF_LIST() }; static const VMStateDescription ppc_uic_vmstate = { @@ -282,11 +281,11 @@ static const VMStateDescription ppc_uic_vmstate = { }, }; -static void ppc_uic_class_init(ObjectClass *klass, void *data) +static void ppc_uic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = ppc_uic_reset; + device_class_set_legacy_reset(dc, ppc_uic_reset); dc->realize = ppc_uic_realize; dc->vmsd = &ppc_uic_vmstate; device_class_set_props(dc, ppc_uic_properties); diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c index 9b12116b2af..63e25c2a781 100644 --- a/hw/intc/realview_gic.c +++ b/hw/intc/realview_gic.c @@ -63,7 +63,7 @@ static void realview_gic_init(Object *obj) qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", 1); } -static void realview_gic_class_init(ObjectClass *oc, void *data) +static void realview_gic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index e9f0536b1c6..4623cfa0293 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -28,6 +28,7 @@ #include "qemu/module.h" #include "hw/sysbus.h" #include "target/riscv/cpu.h" +#include "target/riscv/time_helper.h" #include "hw/qdev-properties.h" #include "hw/intc/riscv_aclint.h" #include "qemu/timer.h" @@ -240,6 +241,10 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), mtimer->hartid_base + i, mtimer->timecmp[i]); + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + } return; } @@ -262,7 +267,7 @@ static const MemoryRegionOps riscv_aclint_mtimer_ops = { } }; -static Property riscv_aclint_mtimer_properties[] = { +static const Property riscv_aclint_mtimer_properties[] = { DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState, hartid_base, 0), DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1), @@ -274,7 +279,6 @@ static Property riscv_aclint_mtimer_properties[] = { aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE), DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState, timebase_freq, 0), - DEFINE_PROP_END_OF_LIST(), }; static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) @@ -329,7 +333,7 @@ static const VMStateDescription vmstate_riscv_mtimer = { } }; -static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data) +static void riscv_aclint_mtimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_mtimer_realize; @@ -462,11 +466,10 @@ static const MemoryRegionOps riscv_aclint_swi_ops = { } }; -static Property riscv_aclint_swi_properties[] = { +static const Property riscv_aclint_swi_properties[] = { DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0), DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1), DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false), - DEFINE_PROP_END_OF_LIST(), }; static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp) @@ -511,7 +514,7 @@ static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type) } } -static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data) +static void riscv_aclint_swi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_swi_realize; diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index 32edd6d07bb..a1d9fa50855 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -22,7 +22,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/bswap.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/sysbus.h" #include "hw/pci/msi.h" #include "hw/boards.h" @@ -30,8 +30,9 @@ #include "hw/intc/riscv_aplic.h" #include "hw/irq.h" #include "target/riscv/cpu.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" +#include "system/system.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "kvm/kvm_riscv.h" #include "migration/vmstate.h" @@ -154,36 +155,76 @@ * KVM AIA only supports APLIC MSI, fallback to QEMU emulation if we want to use * APLIC Wired. */ -static bool is_kvm_aia(bool msimode) +bool riscv_is_kvm_aia_aplic_imsic(bool msimode) { return kvm_irqchip_in_kernel() && msimode; } +bool riscv_use_emulated_aplic(bool msimode) +{ +#ifdef CONFIG_KVM + if (tcg_enabled()) { + return true; + } + + if (!riscv_is_kvm_aia_aplic_imsic(msimode)) { + return true; + } + + return kvm_kernel_irqchip_split(); +#else + return true; +#endif +} + +void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr) +{ +#ifdef CONFIG_KVM + if (riscv_use_emulated_aplic(aplic->msimode)) { + addr >>= APLIC_xMSICFGADDR_PPN_SHIFT; + aplic->kvm_msicfgaddr = extract64(addr, 0, 32); + aplic->kvm_msicfgaddrH = extract64(addr, 32, 32) & + APLIC_xMSICFGADDRH_VALID_MASK; + } +#endif +} + +static bool riscv_aplic_irq_rectified_val(RISCVAPLICState *aplic, + uint32_t irq) +{ + uint32_t sourcecfg, sm, raw_input, irq_inverted; + + if (!irq || aplic->num_irqs <= irq) { + return false; + } + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + return false; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return false; + } + + raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0; + irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW || + sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0; + + return !!(raw_input ^ irq_inverted); +} + static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic, uint32_t word) { - uint32_t i, irq, sourcecfg, sm, raw_input, irq_inverted, ret = 0; + uint32_t i, irq, rectified_val, ret = 0; for (i = 0; i < 32; i++) { irq = word * 32 + i; - if (!irq || aplic->num_irqs <= irq) { - continue; - } - - sourcecfg = aplic->sourcecfg[irq]; - if (sourcecfg & APLIC_SOURCECFG_D) { - continue; - } - sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; - if (sm == APLIC_SOURCECFG_SM_INACTIVE) { - continue; - } - - raw_input = (aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0; - irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW || - sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0; - ret |= (raw_input ^ irq_inverted) << i; + rectified_val = riscv_aplic_irq_rectified_val(aplic, irq); + ret |= rectified_val << i; } return ret; @@ -237,9 +278,12 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic, if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) { - if (!aplic->msimode || (aplic->msimode && !pending)) { + if (!aplic->msimode) { return; } + if (aplic->msimode && !pending) { + goto noskip_write_pending; + } if ((aplic->state[irq] & APLIC_ISTATE_INPUT) && (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)) { return; @@ -250,6 +294,7 @@ static void riscv_aplic_set_pending(RISCVAPLICState *aplic, } } +noskip_write_pending: riscv_aplic_set_pending_raw(aplic, irq, pending); } @@ -348,21 +393,29 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic, uint32_t lhxs, lhxw, hhxs, hhxw, group_idx, msicfgaddr, msicfgaddrH; aplic_m = aplic; - while (aplic_m && !aplic_m->mmode) { - aplic_m = aplic_m->parent; - } - if (!aplic_m) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n", - __func__); - return; + + if (!aplic->kvm_splitmode) { + while (aplic_m && !aplic_m->mmode) { + aplic_m = aplic_m->parent; + } + if (!aplic_m) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n", + __func__); + return; + } } - if (aplic->mmode) { - msicfgaddr = aplic_m->mmsicfgaddr; - msicfgaddrH = aplic_m->mmsicfgaddrH; + if (aplic->kvm_splitmode) { + msicfgaddr = aplic->kvm_msicfgaddr; + msicfgaddrH = ((uint64_t)aplic->kvm_msicfgaddrH << 32); } else { - msicfgaddr = aplic_m->smsicfgaddr; - msicfgaddrH = aplic_m->smsicfgaddrH; + if (aplic->mmode) { + msicfgaddr = aplic_m->mmsicfgaddr; + msicfgaddrH = aplic_m->mmsicfgaddrH; + } else { + msicfgaddr = aplic_m->smsicfgaddr; + msicfgaddrH = aplic_m->smsicfgaddrH; + } } lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) & @@ -375,7 +428,6 @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic, APLIC_xMSICFGADDRH_HHXW_MASK; group_idx = hart_idx >> lhxw; - hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw); addr = msicfgaddr; addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32; @@ -576,7 +628,7 @@ static void riscv_aplic_request(void *opaque, int irq, int level) static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) { - uint32_t irq, word, idc; + uint32_t irq, word, idc, sm; RISCVAPLICState *aplic = opaque; /* Reads must be 4 byte words */ @@ -644,6 +696,10 @@ static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) } else if ((APLIC_TARGET_BASE <= addr) && (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return 0; + } return aplic->target[irq]; } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { @@ -702,6 +758,10 @@ static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value, (aplic->sourcecfg[irq] == 0)) { riscv_aplic_set_pending_raw(aplic, irq, false); riscv_aplic_set_enabled_raw(aplic, irq, false); + } else { + if (riscv_aplic_irq_rectified_val(aplic, irq)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + } } } else if (aplic->mmode && aplic->msimode && (addr == APLIC_MMSICFGADDR)) { @@ -838,7 +898,27 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) uint32_t i; RISCVAPLICState *aplic = RISCV_APLIC(dev); - if (!is_kvm_aia(aplic->msimode)) { + if (riscv_use_emulated_aplic(aplic->msimode)) { + /* Create output IRQ lines for non-MSI mode */ + if (!aplic->msimode) { + /* Claim the CPU interrupt to be triggered by this APLIC */ + for (i = 0; i < aplic->num_harts; i++) { + RISCVCPU *cpu; + + cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, + (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { + error_report("%s already claimed", + (aplic->mmode) ? "MEIP" : "SEIP"); + exit(1); + } + } + + aplic->external_irqs = g_malloc(sizeof(qemu_irq) * + aplic->num_harts); + qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts); + } + aplic->bitfield_words = (aplic->num_irqs + 31) >> 5; aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs); aplic->state = g_new0(uint32_t, aplic->num_irqs); @@ -855,6 +935,10 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic, TYPE_RISCV_APLIC, aplic->aperture_size); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio); + + if (kvm_enabled()) { + aplic->kvm_splitmode = true; + } } /* @@ -862,34 +946,17 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) * have IRQ lines delegated by their parent APLIC. */ if (!aplic->parent) { - if (kvm_enabled() && is_kvm_aia(aplic->msimode)) { + if (kvm_enabled() && !riscv_use_emulated_aplic(aplic->msimode)) { qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs); } else { qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs); } } - /* Create output IRQ lines for non-MSI mode */ - if (!aplic->msimode) { - aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts); - qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts); - - /* Claim the CPU interrupt to be triggered by this APLIC */ - for (i = 0; i < aplic->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i)); - if (riscv_cpu_claim_interrupts(cpu, - (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { - error_report("%s already claimed", - (aplic->mmode) ? "MEIP" : "SEIP"); - exit(1); - } - } - } - msi_nonbroken = true; } -static Property riscv_aplic_properties[] = { +static const Property riscv_aplic_properties[] = { DEFINE_PROP_UINT32("aperture-size", RISCVAPLICState, aperture_size, 0), DEFINE_PROP_UINT32("hartid-base", RISCVAPLICState, hartid_base, 0), DEFINE_PROP_UINT32("num-harts", RISCVAPLICState, num_harts, 0), @@ -897,13 +964,20 @@ static Property riscv_aplic_properties[] = { DEFINE_PROP_UINT32("num-irqs", RISCVAPLICState, num_irqs, 0), DEFINE_PROP_BOOL("msimode", RISCVAPLICState, msimode, 0), DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0), - DEFINE_PROP_END_OF_LIST(), }; +static bool riscv_aplic_state_needed(void *opaque) +{ + RISCVAPLICState *aplic = opaque; + + return riscv_use_emulated_aplic(aplic->msimode); +} + static const VMStateDescription vmstate_riscv_aplic = { .name = "riscv_aplic", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 3, + .minimum_version_id = 3, + .needed = riscv_aplic_state_needed, .fields = (const VMStateField[]) { VMSTATE_UINT32(domaincfg, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), @@ -911,6 +985,8 @@ static const VMStateDescription vmstate_riscv_aplic = { VMSTATE_UINT32(smsicfgaddr, RISCVAPLICState), VMSTATE_UINT32(smsicfgaddrH, RISCVAPLICState), VMSTATE_UINT32(genmsi, RISCVAPLICState), + VMSTATE_UINT32(kvm_msicfgaddr, RISCVAPLICState), + VMSTATE_UINT32(kvm_msicfgaddrH, RISCVAPLICState), VMSTATE_VARRAY_UINT32(sourcecfg, RISCVAPLICState, num_irqs, 0, vmstate_info_uint32, uint32_t), @@ -933,7 +1009,7 @@ static const VMStateDescription vmstate_riscv_aplic = { } }; -static void riscv_aplic_class_init(ObjectClass *klass, void *data) +static void riscv_aplic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1006,17 +1082,17 @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - if (!is_kvm_aia(msimode)) { + if (riscv_use_emulated_aplic(msimode)) { sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); - } - if (!msimode) { - for (i = 0; i < num_harts; i++) { - CPUState *cpu = cpu_by_arch_id(hartid_base + i); + if (!msimode) { + for (i = 0; i < num_harts; i++) { + CPUState *cpu = cpu_by_arch_id(hartid_base + i); - qdev_connect_gpio_out_named(dev, NULL, i, - qdev_get_gpio_in(DEVICE(cpu), + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); + } } } diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index b90f0d731df..6174e1a05d4 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -22,7 +22,7 @@ #include "qemu/module.h" #include "qemu/error-report.h" #include "qemu/bswap.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/sysbus.h" #include "hw/pci/msi.h" #include "hw/boards.h" @@ -31,8 +31,8 @@ #include "hw/irq.h" #include "target/riscv/cpu.h" #include "target/riscv/cpu_bits.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" +#include "system/system.h" +#include "system/kvm.h" #include "migration/vmstate.h" #define IMSIC_MMIO_PAGE_LE 0x00 @@ -55,7 +55,7 @@ static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page) (imsic->eithreshold[page] <= imsic->num_irqs)) ? imsic->eithreshold[page] : imsic->num_irqs; for (i = 1; i < max_irq; i++) { - if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) == + if ((qatomic_read(&imsic->eistate[base + i]) & IMSIC_EISTATE_ENPEND) == IMSIC_EISTATE_ENPEND) { return (i << IMSIC_TOPEI_IID_SHIFT) | i; } @@ -66,10 +66,24 @@ static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page) static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page) { + uint32_t base = page * imsic->num_irqs; + + /* + * Lower the interrupt line if necessary, then evaluate the current + * IMSIC state. + * This sequence ensures that any race between evaluating the eistate and + * updating the interrupt line will not result in an incorrectly + * deactivated connected CPU IRQ line. + * If multiple interrupts are pending, this sequence functions identically + * to qemu_irq_pulse. + */ + + if (qatomic_fetch_and(&imsic->eistate[base], ~IMSIC_EISTATE_ENPEND)) { + qemu_irq_lower(imsic->external_irqs[page]); + } if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) { qemu_irq_raise(imsic->external_irqs[page]); - } else { - qemu_irq_lower(imsic->external_irqs[page]); + qatomic_or(&imsic->eistate[base], IMSIC_EISTATE_ENPEND); } } @@ -125,12 +139,11 @@ static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page, topei >>= IMSIC_TOPEI_IID_SHIFT; base = page * imsic->num_irqs; if (topei) { - imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING; + qatomic_and(&imsic->eistate[base + topei], ~IMSIC_EISTATE_PENDING); } - - riscv_imsic_update(imsic, page); } + riscv_imsic_update(imsic, page); return 0; } @@ -139,7 +152,7 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic, uint32_t num, bool pend, target_ulong *val, target_ulong new_val, target_ulong wr_mask) { - uint32_t i, base; + uint32_t i, base, prev; target_ulong mask; uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED; @@ -157,10 +170,6 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic, if (val) { *val = 0; - for (i = 0; i < xlen; i++) { - mask = (target_ulong)1 << i; - *val |= (imsic->eistate[base + i] & state) ? mask : 0; - } } for (i = 0; i < xlen; i++) { @@ -172,10 +181,15 @@ static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic, mask = (target_ulong)1 << i; if (wr_mask & mask) { if (new_val & mask) { - imsic->eistate[base + i] |= state; + prev = qatomic_fetch_or(&imsic->eistate[base + i], state); } else { - imsic->eistate[base + i] &= ~state; + prev = qatomic_fetch_and(&imsic->eistate[base + i], ~state); } + } else { + prev = qatomic_read(&imsic->eistate[base + i]); + } + if (val && (prev & state)) { + *val |= mask; } } @@ -302,14 +316,14 @@ static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value, page = addr >> IMSIC_MMIO_PAGE_SHIFT; if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) { if (value && (value < imsic->num_irqs)) { - imsic->eistate[(page * imsic->num_irqs) + value] |= - IMSIC_EISTATE_PENDING; + qatomic_or(&imsic->eistate[(page * imsic->num_irqs) + value], + IMSIC_EISTATE_PENDING); + + /* Update CPU external interrupt status */ + riscv_imsic_update(imsic, page); } } - /* Update CPU external interrupt status */ - riscv_imsic_update(imsic, page); - return; err: @@ -335,7 +349,19 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp) CPUState *cpu = cpu_by_arch_id(imsic->hartid); CPURISCVState *env = cpu ? cpu_env(cpu) : NULL; + /* Claim the CPU interrupt to be triggered by this IMSIC */ + if (riscv_cpu_claim_interrupts(rcpu, + (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { + error_setg(errp, "%s already claimed", + (imsic->mmode) ? "MEIP" : "SEIP"); + return; + } + if (!kvm_irqchip_in_kernel()) { + /* Create output IRQ lines */ + imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages); + qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages); + imsic->num_eistate = imsic->num_pages * imsic->num_irqs; imsic->eidelivery = g_new0(uint32_t, imsic->num_pages); imsic->eithreshold = g_new0(uint32_t, imsic->num_pages); @@ -347,18 +373,6 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp) IMSIC_MMIO_SIZE(imsic->num_pages)); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio); - /* Claim the CPU interrupt to be triggered by this IMSIC */ - if (riscv_cpu_claim_interrupts(rcpu, - (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { - error_setg(errp, "%s already claimed", - (imsic->mmode) ? "MEIP" : "SEIP"); - return; - } - - /* Create output IRQ lines */ - imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages); - qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages); - /* Force select AIA feature and setup CSR read-modify-write callback */ if (env) { if (!imsic->mmode) { @@ -367,25 +381,33 @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp) } else { rcpu->cfg.ext_smaia = true; } - riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S, - riscv_imsic_rmw, imsic); + + if (!kvm_irqchip_in_kernel()) { + riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S, + riscv_imsic_rmw, imsic); + } } msi_nonbroken = true; } -static Property riscv_imsic_properties[] = { +static const Property riscv_imsic_properties[] = { DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0), DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0), DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0), DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0), - DEFINE_PROP_END_OF_LIST(), }; +static bool riscv_imsic_state_needed(void *opaque) +{ + return !kvm_irqchip_in_kernel(); +} + static const VMStateDescription vmstate_riscv_imsic = { .name = "riscv_imsic", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, + .needed = riscv_imsic_state_needed, .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, num_pages, 0, @@ -400,7 +422,7 @@ static const VMStateDescription vmstate_riscv_imsic = { } }; -static void riscv_imsic_class_init(ObjectClass *klass, void *data) +static void riscv_imsic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -451,15 +473,17 @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); - for (i = 0; i < num_pages; i++) { - if (!i) { - qdev_connect_gpio_out_named(dev, NULL, i, - qdev_get_gpio_in(DEVICE(cpu), + if (!kvm_irqchip_in_kernel()) { + for (i = 0; i < num_pages; i++) { + if (!i) { + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); - } else { - qdev_connect_gpio_out_named(dev, NULL, i, - qdev_get_gpio_in(DEVICE(cpu), + } else { + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), IRQ_LOCAL_MAX + i - 1)); + } } } diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c index b2d4338f612..f8615527b75 100644 --- a/hw/intc/rx_icu.c +++ b/hw/intc/rx_icu.c @@ -361,15 +361,14 @@ static const VMStateDescription vmstate_rxicu = { } }; -static Property rxicu_properties[] = { +static const Property rxicu_properties[] = { DEFINE_PROP_ARRAY("ipr-map", RXICUState, nr_irqs, map, qdev_prop_uint8, uint8_t), DEFINE_PROP_ARRAY("trigger-level", RXICUState, nr_sense, init_sense, qdev_prop_uint8, uint8_t), - DEFINE_PROP_END_OF_LIST(), }; -static void rxicu_class_init(ObjectClass *klass, void *data) +static void rxicu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index a91a4a47e82..8f4c9fd52e8 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -445,19 +445,18 @@ static void qemu_s390_flic_instance_init(Object *obj) } } -static Property qemu_s390_flic_properties[] = { +static const Property qemu_s390_flic_properties[] = { DEFINE_PROP_BOOL("migrate-all-state", QEMUS390FLICState, migrate_all_state, true), - DEFINE_PROP_END_OF_LIST(), }; -static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) +static void qemu_s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); device_class_set_props(dc, qemu_s390_flic_properties); - dc->reset = qemu_s390_flic_reset; + device_class_set_legacy_reset(dc, qemu_s390_flic_reset); dc->vmsd = &qemu_s390_flic_vmstate; fsc->register_io_adapter = qemu_s390_register_io_adapter; fsc->io_adapter_map = qemu_s390_io_adapter_map; @@ -471,33 +470,17 @@ static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk; } -static Property s390_flic_common_properties[] = { - DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState, - adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI), - DEFINE_PROP_BOOL("migration-enabled", S390FLICState, - migration_enabled, true), - DEFINE_PROP_END_OF_LIST(), -}; - static void s390_flic_common_realize(DeviceState *dev, Error **errp) { S390FLICState *fs = S390_FLIC_COMMON(dev); - uint32_t max_batch = fs->adapter_routes_max_batch; - - if (max_batch > ADAPTER_ROUTES_MAX_GSI) { - error_setg(errp, "flic property adapter_routes_max_batch too big" - " (%d > %d)", max_batch, ADAPTER_ROUTES_MAX_GSI); - return; - } fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION); } -static void s390_flic_class_init(ObjectClass *oc, void *data) +static void s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - device_class_set_props(dc, s390_flic_common_properties); dc->realize = s390_flic_common_realize; } @@ -526,18 +509,10 @@ static void qemu_s390_flic_register_types(void) type_init(qemu_s390_flic_register_types) -static bool adapter_info_so_needed(void *opaque) -{ - S390FLICState *fs = s390_get_flic(); - - return fs->migration_enabled; -} - const VMStateDescription vmstate_adapter_info_so = { .name = "s390_adapter_info/summary_offset", .version_id = 1, .minimum_version_id = 1, - .needed = adapter_info_so_needed, .fields = (const VMStateField[]) { VMSTATE_UINT32(summary_offset, AdapterInfo), VMSTATE_END_OF_LIST() diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index 330f08dfdc2..f833a3996a6 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -16,7 +16,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qapi/error.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/s390x/s390_flic.h" #include "hw/s390x/adapter.h" #include "hw/s390x/css.h" @@ -670,7 +670,7 @@ static void kvm_s390_flic_reset(DeviceState *dev) flic_enable_pfault(flic); } -static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) +static void kvm_s390_flic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); @@ -679,7 +679,7 @@ static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) device_class_set_parent_realize(dc, kvm_s390_flic_realize, &kfsc->parent_realize); dc->vmsd = &kvm_s390_flic_vmstate; - dc->reset = kvm_s390_flic_reset; + device_class_set_legacy_reset(dc, kvm_s390_flic_reset); fsc->register_io_adapter = kvm_s390_register_io_adapter; fsc->io_adapter_map = kvm_s390_io_adapter_map; fsc->add_adapter_routes = kvm_s390_add_adapter_routes; diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index e559f118052..3160b216fdc 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -30,7 +30,7 @@ #include "target/riscv/cpu.h" #include "migration/vmstate.h" #include "hw/irq.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" static bool addr_between(uint32_t addr, uint32_t base, uint32_t num) { @@ -189,8 +189,13 @@ static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) { uint32_t irq = (addr - plic->priority_base) >> 2; - - if (((plic->num_priorities + 1) & plic->num_priorities) == 0) { + if (irq == 0) { + /* IRQ 0 source prioority is reserved */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid source priority write 0x%" + HWADDR_PRIx "\n", __func__, addr); + return; + } else if (((plic->num_priorities + 1) & plic->num_priorities) == 0) { /* * if "num_priorities + 1" is power-of-2, make each register bit of * interrupt priority WARL (Write-Any-Read-Legal). Just filter @@ -349,8 +354,10 @@ static void sifive_plic_irq_request(void *opaque, int irq, int level) { SiFivePLICState *s = opaque; - sifive_plic_set_pending(s, irq, level > 0); - sifive_plic_update(s); + if (level > 0) { + sifive_plic_set_pending(s, irq, true); + sifive_plic_update(s); + } } static void sifive_plic_realize(DeviceState *dev, Error **errp) @@ -423,7 +430,7 @@ static const VMStateDescription vmstate_sifive_plic = { } }; -static Property sifive_plic_properties[] = { +static const Property sifive_plic_properties[] = { DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), /* number of interrupt sources including interrupt source 0 */ @@ -437,14 +444,13 @@ static Property sifive_plic_properties[] = { DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void sifive_plic_class_init(ObjectClass *klass, void *data) +static void sifive_plic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = sifive_plic_reset; + device_class_set_legacy_reset(dc, sifive_plic_reset); device_class_set_props(dc, sifive_plic_properties); dc->realize = sifive_plic_realize; dc->vmsd = &vmstate_sifive_plic; diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c index d6e49d29aad..00b80bb177c 100644 --- a/hw/intc/slavio_intctl.c +++ b/hw/intc/slavio_intctl.c @@ -441,12 +441,12 @@ static void slavio_intctl_init(Object *obj) } } -static void slavio_intctl_class_init(ObjectClass *klass, void *data) +static void slavio_intctl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); - dc->reset = slavio_intctl_reset; + device_class_set_legacy_reset(dc, slavio_intctl_reset); dc->vmsd = &vmstate_intctl; #ifdef DEBUG_IRQ_COUNT ic->get_statistics = slavio_intctl_get_statistics; @@ -460,7 +460,7 @@ static const TypeInfo slavio_intctl_info = { .instance_size = sizeof(SLAVIO_INTCTLState), .instance_init = slavio_intctl_init, .class_init = slavio_intctl_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 283a6b8fd24..e393f5dcdcc 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -1,10 +1,9 @@ /* * QEMU PowerPC sPAPR XIVE interrupt controller model * - * Copyright (c) 2017-2018, IBM Corporation. + * Copyright (c) 2017-2024, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" @@ -13,8 +12,8 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/reset.h" +#include "system/cpus.h" +#include "system/reset.h" #include "migration/vmstate.h" #include "hw/ppc/fdt.h" #include "hw/ppc/spapr.h" @@ -429,13 +428,13 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, g_assert_not_reached(); } -static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, + uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { CPUState *cs; - int count = 0; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -463,16 +462,17 @@ static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, if (match->tctx) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " "context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } - return count; + return !!match->count; } static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr) @@ -627,13 +627,12 @@ static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn) xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); } -static Property spapr_xive_properties[] = { +static const Property spapr_xive_properties[] = { DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0), DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0), DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE), DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE), DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7), - DEFINE_PROP_END_OF_LIST(), }; static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc, @@ -810,7 +809,7 @@ static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr) return spapr_xive_in_kernel(SPAPR_XIVE(xptr)); } -static void spapr_xive_class_init(ObjectClass *klass, void *data) +static void spapr_xive_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); @@ -857,7 +856,7 @@ static const TypeInfo spapr_xive_info = { .instance_size = sizeof(SpaprXive), .class_init = spapr_xive_class_init, .class_size = sizeof(SpaprXiveClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_SPAPR_INTC }, { } }, diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 5789062379c..26d30b41c15 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -12,9 +12,9 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" +#include "system/cpus.h" +#include "system/kvm.h" +#include "system/runstate.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/spapr_xive.h" @@ -720,7 +720,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc = &xive->source; - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); size_t tima_len = 4ull << TM_SHIFT; CPUState *cs; int fd; @@ -824,7 +824,7 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc) { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc; - size_t esb_len; + uint64_t esb_len; assert(xive->fd != -1); diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 3dcf1471983..018c609ca5e 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -80,18 +80,19 @@ aspeed_vic_update_irq(int flags) "Raising IRQ: %d" aspeed_vic_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32 aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 # aspeed_intc.c -aspeed_intc_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32 -aspeed_intc_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 -aspeed_intc_set_irq(int irq, int level) "Set IRQ %d: %d" -aspeed_intc_clear_irq(int irq, int level) "Clear IRQ %d: %d" -aspeed_intc_update_irq(int irq, int level) "Update IRQ: %d: %d" -aspeed_intc_pending_irq(int irq, uint32_t value) "Pending IRQ: %d: 0x%x" -aspeed_intc_trigger_irq(int irq, uint32_t value) "Trigger IRQ: %d: 0x%x" -aspeed_intc_all_isr_done(int irq) "All source ISR execution are done: %d" -aspeed_intc_enable(uint32_t value) "Enable: 0x%x" -aspeed_intc_select(uint32_t value) "Select: 0x%x" -aspeed_intc_mask(uint32_t change, uint32_t value) "Mask: 0x%x: 0x%x" -aspeed_intc_unmask(uint32_t change, uint32_t value) "UnMask: 0x%x: 0x%x" +aspeed_intc_read(const char *s, uint64_t offset, unsigned size, uint32_t value) "%s: From 0x%" PRIx64 " of size %u: 0x%" PRIx32 +aspeed_intc_write(const char *s, uint64_t offset, unsigned size, uint32_t data) "%s: To 0x%" PRIx64 " of size %u: 0x%" PRIx32 +aspeed_intc_set_irq(const char *s, int inpin_idx, int level) "%s: Set IRQ %d: %d" +aspeed_intc_clear_irq(const char *s, int inpin_idx, int outpin_idx, int level) "%s: Clear IRQ %d-%d: %d" +aspeed_intc_update_irq(const char *s, int inpin_idx, int outpin_idx, int level) "%s: Update IRQ: %d-%d: %d" +aspeed_intc_pending_irq(const char *s, int inpin_idx, uint32_t value) "%s: Pending IRQ: %d: 0x%x" +aspeed_intc_trigger_irq(const char *s, int inpin_idx, int outpin_idx, uint32_t value) "%s: Trigger IRQ: %d-%d: 0x%x" +aspeed_intc_all_isr_done(const char *s, int inpin_idx) "%s: All source ISR execution are done: %d" +aspeed_intc_enable(const char *s, uint32_t value) "%s: Enable: 0x%x" +aspeed_intc_select(const char *s, uint32_t value) "%s: Select: 0x%x" +aspeed_intc_mask(const char *s, uint32_t change, uint32_t value) "%s: Mask: 0x%x: 0x%x" +aspeed_intc_unmask(const char *s, uint32_t change, uint32_t value) "%s: UnMask: 0x%x: 0x%x" +aspeed_intc_all_isr_done_bit(const char *s, int inpin_idx, int bit) "%s: All source ISR execution are done from specific bit: %d-%d" # arm_gic.c gic_enable_irq(int irq) "irq %d enabled" @@ -273,18 +274,28 @@ kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device" kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x" # xive.c -xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" -xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" -xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" +xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" +xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" +xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_source_notify(uint32_t srcno) "Processing notification for queued IRQ 0x%x" +xive_source_blocked(uint32_t srcno) "No action needed for IRQ 0x%x currently" xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x" xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 -xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" +xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 +# xive2.c +xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" +xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" +xive_redistribute(uint32_t index, uint8_t ring, uint8_t end_blk, uint32_t end_idx) "Redistribute from target=%d ring=0x%x NVP 0x%x/0x%x" +xive_end_enqueue(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "Queue event for END 0x%x/0x%x data=0x%x" +xive_escalate_end(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t esc_data) "Escalate from END 0x%x/0x%x to END 0x%x/0x%x data=0x%x" +xive_escalate_esb(uint8_t end_blk, uint32_t end_idx, uint32_t lisn) "Escalate from END 0x%x/0x%x to LISN=0x%x" + # pnv_xive.c pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 @@ -309,12 +320,8 @@ loongson_ipi_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x loongson_ipi_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%"PRIx64 # loongarch_pch_pic.c loongarch_pch_pic_irq_handler(int irq, int level) "irq %d level %d" -loongarch_pch_pic_low_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_low_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_high_readw(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_high_writew(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_readb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 -loongarch_pch_pic_writeb(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_read(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 +loongarch_pch_pic_write(unsigned size, uint64_t addr, uint64_t val) "size: %u addr: 0x%"PRIx64 "val: 0x%" PRIx64 # loongarch_pch_msi.c loongarch_msi_set_irq(int irq_num) "set msi irq %d" diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 6f4d5271ea0..d9a199e8834 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -37,8 +37,8 @@ #include "migration/vmstate.h" #include "hw/intc/intc.h" #include "hw/irq.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/kvm.h" +#include "system/reset.h" #include "target/ppc/cpu.h" void icp_pic_print_info(ICPState *icp, GString *buf) @@ -335,22 +335,6 @@ static void icp_realize(DeviceState *dev, Error **errp) return; } } - /* - * The way that pre_2_10_icp is handling is really, really hacky. - * We used to have here this call: - * - * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); - * - * But we were doing: - * pre_2_10_vmstate_register_dummy_icp() - * this vmstate_register() - * pre_2_10_vmstate_unregister_dummy_icp() - * - * So for a short amount of time we had to vmstate entries with - * the same name. This fixes it. - */ - vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index, - &vmstate_icp_server, icp); } static void icp_unrealize(DeviceState *dev) @@ -360,14 +344,13 @@ static void icp_unrealize(DeviceState *dev) vmstate_unregister(NULL, &vmstate_icp_server, icp); } -static Property icp_properties[] = { +static const Property icp_properties[] = { DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC, XICSFabric *), DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *), - DEFINE_PROP_END_OF_LIST(), }; -static void icp_class_init(ObjectClass *klass, void *data) +static void icp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -692,14 +675,13 @@ static const VMStateDescription vmstate_ics = { }, }; -static Property ics_properties[] = { +static const Property ics_properties[] = { DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0), DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC, XICSFabric *), - DEFINE_PROP_END_OF_LIST(), }; -static void ics_class_init(ObjectClass *klass, void *data) +static void ics_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index 9719d98a179..ee72969f5f1 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -28,7 +28,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "trace.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/xics.h" diff --git a/hw/intc/xics_pnv.c b/hw/intc/xics_pnv.c index 753c067f172..ff602d9a346 100644 --- a/hw/intc/xics_pnv.c +++ b/hw/intc/xics_pnv.c @@ -176,7 +176,7 @@ static void pnv_icp_realize(DeviceState *dev, Error **errp) icp, "icp-thread", 0x1000); } -static void pnv_icp_class_init(ObjectClass *klass, void *data) +static void pnv_icp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICPStateClass *icpc = ICP_CLASS(klass); diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index a0d97bdefed..7663596a4a4 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -436,7 +436,7 @@ static void xics_spapr_deactivate(SpaprInterruptController *intc) } } -static void ics_spapr_class_init(ObjectClass *klass, void *data) +static void ics_spapr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICSStateClass *isc = ICS_CLASS(klass); @@ -461,7 +461,7 @@ static const TypeInfo ics_spapr_info = { .name = TYPE_ICS_SPAPR, .parent = TYPE_ICS, .class_init = ics_spapr_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_SPAPR_INTC }, { } }, diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c index 6e5012e66eb..5257ad54b17 100644 --- a/hw/intc/xilinx_intc.c +++ b/hw/intc/xilinx_intc.c @@ -3,6 +3,9 @@ * * Copyright (c) 2009 Edgar E. Iglesias. * + * https://docs.amd.com/v/u/en-US/xps_intc + * DS572: LogiCORE IP XPS Interrupt Controller (v2.01a) + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights @@ -23,10 +26,12 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "qemu/module.h" #include "hw/irq.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "qom/object.h" #define D(x) @@ -49,6 +54,7 @@ struct XpsIntc { SysBusDevice parent_obj; + EndianMode model_endianness; MemoryRegion mmio; qemu_irq parent_irq; @@ -140,14 +146,28 @@ static void pic_write(void *opaque, hwaddr addr, update_irq(p); } -static const MemoryRegionOps pic_ops = { - .read = pic_read, - .write = pic_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } +static const MemoryRegionOps pic_ops[2] = { + [0 ... 1] = { + .read = pic_read, + .write = pic_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + /* + * All XPS INTC registers are accessed through the PLB interface. + * The base address for these registers is provided by the + * configuration parameter, C_BASEADDR. Each register is 32 bits + * although some bits may be unused and is accessed on a 4-byte + * boundary offset from the base address. + */ + .min_access_size = 4, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, }; static void irq_handler(void *opaque, int irq, int level) @@ -170,21 +190,35 @@ static void xilinx_intc_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), irq_handler, 32); sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio); +} - memory_region_init_io(&p->mmio, obj, &pic_ops, p, "xlnx.xps-intc", +static void xilinx_intc_realize(DeviceState *dev, Error **errp) +{ + XpsIntc *p = XILINX_INTC(dev); + + if (p->model_endianness == ENDIAN_MODE_UNSPECIFIED) { + error_setg(errp, TYPE_XILINX_INTC " property 'endianness'" + " must be set to 'big' or 'little'"); + return; + } + + memory_region_init_io(&p->mmio, OBJECT(dev), + &pic_ops[p->model_endianness == ENDIAN_MODE_BIG], + p, "xlnx.xps-intc", R_MAX * 4); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio); } -static Property xilinx_intc_properties[] = { +static const Property xilinx_intc_properties[] = { + DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XpsIntc, model_endianness), DEFINE_PROP_UINT32("kind-of-intr", XpsIntc, c_kind_of_intr, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_intc_class_init(ObjectClass *klass, void *data) +static void xilinx_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = xilinx_intc_realize; device_class_set_props(dc, xilinx_intc_properties); } diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 5a02dd8e02e..e0ffcf89ebf 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -3,8 +3,7 @@ * * Copyright (c) 2017-2018, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" @@ -12,9 +11,9 @@ #include "qemu/module.h" #include "qapi/error.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" +#include "system/cpus.h" +#include "system/dma.h" +#include "system/reset.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "hw/irq.h" @@ -26,29 +25,59 @@ /* * XIVE Thread Interrupt Management context */ - -/* - * Convert an Interrupt Pending Buffer (IPB) register to a Pending - * Interrupt Priority Register (PIPR), which contains the priority of - * the most favored pending notification. - */ -static uint8_t ipb_to_pipr(uint8_t ibp) +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring) { - return ibp ? clz32((uint32_t)ibp << 24) : 0xff; + uint8_t cur_ring; + + for (cur_ring = ring; cur_ring <= TM_QW3_HV_PHYS; + cur_ring += XIVE_TM_RING_SIZE) { + if (!(tctx->regs[cur_ring + TM_WORD2] & 0x80)) { + return false; + } + } + return true; } -static uint8_t exception_mask(uint8_t ring) +bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr) { switch (ring) { case TM_QW1_OS: - return TM_QW1_NSR_EO; + return !!(nsr & TM_QW1_NSR_EO); + case TM_QW2_HV_POOL: case TM_QW3_HV_PHYS: - return TM_QW3_NSR_HE; + return !!(nsr & TM_QW3_NSR_HE); default: g_assert_not_reached(); } } +bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr) +{ + if ((nsr & TM_NSR_GRP_LVL) > 0) { + g_assert(xive_nsr_indicates_exception(ring, nsr)); + return true; + } + return false; +} + +uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr) +{ + /* NSR determines if pool/phys ring is for phys or pool interrupt */ + if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) { + uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6; + + if (he == TM_QW3_NSR_HE_PHYS) { + return TM_QW3_HV_PHYS; + } else if (he == TM_QW3_NSR_HE_POOL) { + return TM_QW2_HV_POOL; + } else { + /* Don't support LSI mode */ + g_assert_not_reached(); + } + } + return ring; +} + static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) { switch (ring) { @@ -64,92 +93,177 @@ static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) } } -static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +/* + * interrupt is accepted on the presentation ring, for PHYS ring the NSR + * directs it to the PHYS or POOL rings. + */ +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t sig_ring) { - uint8_t *regs = &tctx->regs[ring]; - uint8_t nsr = regs[TM_NSR]; - uint8_t mask = exception_mask(ring); + uint8_t *sig_regs = &tctx->regs[sig_ring]; + uint8_t nsr = sig_regs[TM_NSR]; - qemu_irq_lower(xive_tctx_output(tctx, ring)); + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); - if (regs[TM_NSR] & mask) { - uint8_t cppr = regs[TM_PIPR]; + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); - regs[TM_CPPR] = cppr; + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + uint8_t cppr = sig_regs[TM_PIPR]; + uint8_t ring; + uint8_t *regs; - /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + ring = xive_nsr_exception_ring(sig_ring, nsr); + regs = &tctx->regs[ring]; - /* Drop Exception bit */ - regs[TM_NSR] &= ~mask; + sig_regs[TM_CPPR] = cppr; + + /* + * If the interrupt was for a specific VP, reset the pending + * buffer bit, otherwise clear the logical server indicator + */ + if (!xive_nsr_indicates_group_exception(sig_ring, nsr)) { + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); + } + + /* Clear the exception from NSR */ + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, sig_ring)); trace_xive_tctx_accept(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + regs[TM_IPB], sig_regs[TM_PIPR], + sig_regs[TM_CPPR], sig_regs[TM_NSR]); } - return (nsr << 8) | regs[TM_CPPR]; + return ((uint64_t)nsr << 8) | sig_regs[TM_CPPR]; } -static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) +/* Change PIPR and calculate NSR and irq based on PIPR, CPPR, group */ +void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t pipr, + uint8_t group_level) { + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; - if (regs[TM_PIPR] < regs[TM_CPPR]) { + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); + + sig_regs[TM_PIPR] = pipr; + + if (pipr < sig_regs[TM_CPPR]) { switch (ring) { case TM_QW1_OS: - regs[TM_NSR] |= TM_QW1_NSR_EO; + sig_regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); + break; + case TM_QW2_HV_POOL: + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); break; case TM_QW3_HV_PHYS: - regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); break; default: g_assert_not_reached(); } trace_xive_tctx_notify(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + regs[TM_IPB], pipr, + sig_regs[TM_CPPR], sig_regs[TM_NSR]); qemu_irq_raise(xive_tctx_output(tctx, ring)); + } else { + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, ring)); } } -void xive_tctx_reset_os_signal(XiveTCTX *tctx) +void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) { /* - * Lower the External interrupt. Used when pulling an OS - * context. It is necessary to avoid catching it in the hypervisor - * context. It should be raised again when re-pushing the OS - * context. + * Lower the External interrupt. Used when pulling a context. It is + * necessary to avoid catching it in the higher privilege context. It + * should be raised again when re-pushing the lower privilege context. */ - qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS)); + qemu_irq_lower(xive_tctx_output(tctx, ring)); } static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { - uint8_t *regs = &tctx->regs[ring]; + uint8_t *sig_regs = &tctx->regs[ring]; + uint8_t pipr_min; + uint8_t ring_min; + + g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + /* XXX: should show pool IPB for PHYS ring */ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - cppr, regs[TM_NSR]); + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, sig_regs[TM_NSR]); if (cppr > XIVE_PRIORITY_MAX) { cppr = 0xff; } - tctx->regs[ring + TM_CPPR] = cppr; + sig_regs[TM_CPPR] = cppr; + + /* + * Recompute the PIPR based on local pending interrupts. The PHYS + * ring must take the minimum of both the PHYS and POOL PIPR values. + */ + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); + ring_min = ring; + + /* PHYS updates also depend on POOL values */ + if (ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; + + /* POOL values only matter if POOL ctx is valid */ + if (pool_regs[TM_WORD2] & 0x80) { + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); + + /* + * Determine highest priority interrupt and + * remember which ring has it. + */ + if (pool_pipr < pipr_min) { + pipr_min = pool_pipr; + ring_min = TM_QW2_HV_POOL; + } + } + } + + /* CPPR has changed, this may present or preclude a pending exception */ + xive_tctx_pipr_set(tctx, ring_min, pipr_min, 0); +} + +static void xive_tctx_pipr_recompute_from_ipb(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; + + /* Does not support a presented group interrupt */ + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); - /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring); + xive_tctx_pipr_set(tctx, ring, xive_ipb_to_pipr(regs[TM_IPB]), 0); } -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) +void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level) { + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; + uint8_t pipr = xive_priority_to_pipr(priority); - regs[TM_IPB] |= ipb; - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); - xive_tctx_notify(tctx, ring); + if (group_level == 0) { + regs[TM_IPB] |= xive_priority_to_ipb(priority); + if (pipr >= sig_regs[TM_PIPR]) { + /* VP interrupts can come here with lower priority than PIPR */ + return; + } + } + g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB])); + g_assert(pipr < sig_regs[TM_PIPR]); + xive_tctx_pipr_set(tctx, ring, pipr, group_level); } /* @@ -168,17 +282,81 @@ static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); } +static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + if (nvt_blk) { + *nvt_blk = xive_nvt_blk(cam); + } + if (nvt_idx) { + *nvt_idx = xive_nvt_idx(cam); + } + if (vp) { + *vp = !!(cam & TM_QW2W2_VP); + } +} + +static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t cam = be32_to_cpu(qw2w2); + + xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp); + return qw2w2; +} + +static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2) +{ + memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); +} + static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); uint32_t qw2w2; + uint32_t qw2w2_new; + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vp; + + qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp); + + if (!vp) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n", + nvt_blk, nvt_idx); + } + + /* Invalidate CAM line */ + qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0); + xive_tctx_set_pool_cam(tctx, qw2w2_new); + + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL); + /* Re-check phys for interrupts if pool was disabled */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW3_HV_PHYS); - qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); - memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); return qw2w2; } +static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + uint8_t qw3b8_new; + + qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + if (!(qw3b8 & TM_QW3B8_VT)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n"); + } + qw3b8_new = qw3b8 & ~TM_QW3B8_VT; + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new; + + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS); + return qw3b8; +} + static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { @@ -206,15 +384,15 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, static const uint8_t xive_tm_hw_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ + 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ + 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; @@ -277,7 +455,7 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return; } @@ -308,7 +486,7 @@ static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return -1; } @@ -341,14 +519,38 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_LGS] = lgs; +} + +static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); +} + +static void xive_tm_set_pool_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW2_HV_POOL, value & 0xff); +} + /* - * Adjust the IPB to allow a CPU to process event queues of other + * Adjust the PIPR to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. */ static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + + /* XXX: how should this work exactly? */ + regs[TM_IPB] |= xive_priority_to_ipb(value & 0xff); + xive_tctx_pipr_recompute_from_ipb(tctx, ring); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -392,7 +594,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); if (!vo) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n", nvt_blk, nvt_idx); } @@ -400,11 +602,11 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); xive_tctx_set_os_cam(tctx, qw1w2_new); - xive_tctx_reset_os_signal(tctx); + xive_tctx_reset_signal(tctx, TM_QW1_OS); return qw1w2; } -static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, +static void xive_tctx_restore_nvp(XiveRouter *xrtr, XiveTCTX *tctx, uint8_t nvt_blk, uint32_t nvt_idx) { XiveNVT nvt; @@ -426,16 +628,10 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, /* Reset the NVT value */ nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + + uint8_t *regs = &tctx->regs[TM_QW1_OS]; + regs[TM_IPB] |= ipb; } - /* - * Always call xive_tctx_ipb_update(). Even if there were no - * escalation triggered, there could be a pending interrupt which - * was saved when the context was pulled and that we need to take - * into account by recalculating the PIPR (which is not - * saved/restored). - * It will also raise the External interrupt signal if needed. - */ - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); } /* @@ -457,7 +653,17 @@ static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, /* Check the interrupt pending bits */ if (vo) { - xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + xive_tctx_restore_nvp(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + + /* + * Always call xive_tctx_recompute_from_ipb(). Even if there were no + * escalation triggered, there could be a pending interrupt which + * was saved when the context was pulled and that we need to take + * into account by recalculating the PIPR (which is not + * saved/restored). + * It will also raise the External interrupt signal if needed. + */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW1_OS); /* fxb */ } } @@ -476,6 +682,8 @@ typedef struct XiveTmOp { uint8_t page_offset; uint32_t op_offset; unsigned size; + bool hw_ok; + bool sw_ok; void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); @@ -488,20 +696,34 @@ static const XiveTmOp xive_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive_tm_vt_push, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive_tm_pull_phys_ctx }, }; static const XiveTmOp xive2_tm_operations[] = { @@ -509,20 +731,58 @@ static const XiveTmOp xive2_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive2_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, true, true, + xive_tm_set_os_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 4, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 8, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_LGS, 1, true, true, + xive_tm_set_pool_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive2_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive2_tm_push_phys_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, true, true, + xive2_tm_set_hv_target, NULL }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive2_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, true, false, + xive2_tm_pull_os_ctx_ol, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, true, false, + xive2_tm_pull_phys_ctx_ol, NULL }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, true, false, + xive2_tm_ack_os_el, NULL }, }; static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, @@ -564,21 +824,31 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); - /* - * TODO: check V bit in Q[0-3]W2 - */ - /* * First, check for special operations in the 2K region */ + xto = xive_tm_find_op(tctx->xptr, offset, size, true); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } + if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); } else { xto->write_handler(xptr, tctx, offset, value, size); } @@ -588,7 +858,6 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (xto) { xto->write_handler(xptr, tctx, offset, value, size); return; @@ -597,6 +866,11 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Finish with raw access to the register values */ + if (hw_owned) { + /* Store context operations are dangerous when context is valid */ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } xive_tm_raw_write(tctx, offset, value, size); } @@ -604,20 +878,30 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; uint64_t ret; - /* - * TODO: check V bit in Q[0-3]W2 - */ + xto = xive_tm_find_op(tctx->xptr, offset, size, false); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } /* * First, check for special operations in the 2K region */ if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); return -1; } ret = xto->read_handler(xptr, tctx, offset, size); @@ -627,7 +911,6 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (xto) { ret = xto->read_handler(xptr, tctx, offset, size); goto out; @@ -718,15 +1001,19 @@ void xive_tctx_reset(XiveTCTX *tctx) tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + if (!(xive_presenter_get_config(tctx->xptr) & + XIVE_PRESENTER_GEN1_TIMA_OS)) { + tctx->regs[TM_QW1_OS + TM_OGEN] = 2; + } /* * Initialize PIPR to 0xFF to avoid phantom interrupts when the * CPPR is first set. */ tctx->regs[TM_QW1_OS + TM_PIPR] = - ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); + xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = - ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); + xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); } static void xive_tctx_realize(DeviceState *dev, Error **errp) @@ -810,14 +1097,13 @@ static const VMStateDescription vmstate_xive_tctx = { }, }; -static Property xive_tctx_properties[] = { +static const Property xive_tctx_properties[] = { DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER, XivePresenter *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive_tctx_class_init(ObjectClass *klass, void *data) +static void xive_tctx_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1078,6 +1364,7 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) /* Forward the source event notification for routing */ if (ret) { + trace_xive_source_notify(srcno); xive_source_notify(xsrc, srcno); } break; @@ -1173,6 +1460,8 @@ static void xive_source_esb_write(void *opaque, hwaddr addr, /* Forward the source event notification for routing */ if (notify) { xive_source_notify(xsrc, srcno); + } else { + trace_xive_source_blocked(srcno); } } @@ -1242,7 +1531,7 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); assert(xsrc->xive); @@ -1286,7 +1575,7 @@ static const VMStateDescription vmstate_xive_source = { * The default XIVE interrupt source setting for the ESB MMIOs is two * 64k pages without Store EOI, to be in sync with KVM. */ -static Property xive_source_properties[] = { +static const Property xive_source_properties[] = { DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), @@ -1297,10 +1586,9 @@ static Property xive_source_properties[] = { DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF), DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, XiveNotifier *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive_source_class_init(ObjectClass *klass, void *data) +static void xive_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1543,6 +1831,75 @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); } +uint32_t xive_get_vpgroup_size(uint32_t nvp_index) +{ + /* + * Group size is a power of 2. The position of the first 0 + * (starting with the least significant bits) in the NVP index + * gives the size of the group. + */ + int first_zero = cto32(nvp_index); + if (first_zero >= 31) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", + nvp_index); + return 0; + } + + return 1U << (first_zero + 1); +} + +uint8_t xive_get_group_level(bool crowd, bool ignore, + uint32_t nvp_blk, uint32_t nvp_index) +{ + int first_zero; + uint8_t level; + + if (!ignore) { + g_assert(!crowd); + return 0; + } + + first_zero = cto32(nvp_index); + if (first_zero >= 31) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x", + nvp_index); + return 0; + } + + level = (first_zero + 1) & 0b1111; + if (crowd) { + uint32_t blk; + + /* crowd level is bit position of first 0 from the right in nvp_blk */ + first_zero = cto32(nvp_blk); + if (first_zero >= 31) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd block 0x%08x", + nvp_blk); + return 0; + } + blk = first_zero + 1; + + /* + * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. + * HW will encode level 4 as the value 3. See xive2_pgofnext(). + */ + switch (blk) { + case 1: + case 2: + break; + case 4: + blk = 3; + break; + default: + g_assert_not_reached(); + } + + /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ + level |= blk << 4; + } + return level; +} + /* * The thread context register words are in big-endian format. */ @@ -1609,44 +1966,41 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, /* * This is our simple Xive Presenter Engine model. It is merged in the * Router as it does not require an extra object. - * - * It receives notification requests sent by the IVRE to find one - * matching NVT (or more) dispatched on the processor threads. In case - * of a single NVT notification, the process is abbreviated and the - * thread is signaled if a match is found. In case of a logical server - * notification (bits ignored at the end of the NVT identifier), the - * IVPE and IVRE select a winning thread using different filters. This - * involves 2 or 3 exchanges on the PowerBus that the model does not - * support. - * - * The parameters represent what is sent on the PowerBus */ -bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, +bool xive_presenter_match(XiveFabric *xfb, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv) + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); - XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; - int count; + + memset(match, 0, sizeof(*match)); /* - * Ask the machine to scan the interrupt controllers for a match + * Ask the machine to scan the interrupt controllers for a match. + * + * For VP-specific notification, we expect at most one match and + * one call to the presenters is all we need (abbreviated notify + * sequence documented by the architecture). + * + * For VP-group notification, match_nvt() is the equivalent of the + * "histogram" and "poll" commands sent to the power bus to the + * presenters. 'count' could be more than one, but we always + * select the first match for now. 'precluded' tells if (at least) + * one thread matches but can't take the interrupt now because + * it's running at a more favored priority. We return the + * information to the router so that it can take appropriate + * actions (backlog, escalation, broadcast, etc...) + * + * If we were to implement a better way of dispatching the + * interrupt in case of multiple matches (instead of the first + * match), we would need a heuristic to elect a thread (for + * example, the hardware keeps track of an 'age' in the TIMA) and + * a new command to the presenters (the equivalent of the "assign" + * power bus command in the documented full notify sequence. */ - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, - priority, logic_serv, &match); - if (count < 0) { - return false; - } - - /* handle CPU exception delivery */ - if (count) { - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); - xive_tctx_ipb_update(match.tctx, match.ring, - xive_priority_to_ipb(priority)); - } - - return !!count; + return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, + priority, logic_serv, match); } /* @@ -1683,7 +2037,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) uint8_t nvt_blk; uint32_t nvt_idx; XiveNVT nvt; - bool found; + XiveTCTXMatch match; uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); @@ -1763,14 +2117,16 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) return; } - found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, - xive_get_field32(END_W7_F0_IGNORE, end.w7), - priority, - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); - /* TODO: Auto EOI. */ - - if (found) { + /* we don't support VP-group notification on P9, so precluded is not used */ + if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx, + false /* crowd */, + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0); + xive_tctx_pipr_present(match.tctx, match.ring, priority, 0); return; } @@ -1885,13 +2241,12 @@ void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) xive_router_end_notify_handler(xrtr, &eas); } -static Property xive_router_properties[] = { +static const Property xive_router_properties[] = { DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb, TYPE_XIVE_FABRIC, XiveFabric *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive_router_class_init(ObjectClass *klass, void *data) +static void xive_router_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); @@ -1914,7 +2269,7 @@ static const TypeInfo xive_router_info = { .instance_size = sizeof(XiveRouter), .class_size = sizeof(XiveRouterClass), .class_init = xive_router_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { TYPE_XIVE_PRESENTER }, { } @@ -2053,15 +2408,14 @@ static void xive_end_source_realize(DeviceState *dev, Error **errp) (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); } -static Property xive_end_source_properties[] = { +static const Property xive_end_source_properties[] = { DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, XiveRouter *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive_end_source_class_init(ObjectClass *klass, void *data) +static void xive_end_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index 1f150685bf3..ee5fa261784 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -1,10 +1,9 @@ /* * QEMU PowerPC XIVE2 interrupt controller model (POWER10) * - * Copyright (c) 2019-2022, IBM Corporation.. + * Copyright (c) 2019-2024, IBM Corporation.. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" @@ -12,12 +11,20 @@ #include "qemu/module.h" #include "qapi/error.h" #include "target/ppc/cpu.h" -#include "sysemu/cpus.h" -#include "sysemu/dma.h" +#include "system/cpus.h" +#include "system/dma.h" #include "hw/qdev-properties.h" #include "hw/ppc/xive.h" #include "hw/ppc/xive2.h" #include "hw/ppc/xive2_regs.h" +#include "trace.h" + +static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data, + bool redistribute); + +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, + uint8_t *nvp_blk, uint32_t *nvp_idx); uint32_t xive2_router_get_config(Xive2Router *xrtr) { @@ -26,6 +33,155 @@ uint32_t xive2_router_get_config(Xive2Router *xrtr) return xrc->get_config(xrtr); } +static int xive2_router_get_block_id(Xive2Router *xrtr) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_block_id(xrtr); +} + +static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp) +{ + uint64_t cache_addr; + + cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 | + xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7); + cache_addr <<= 8; /* aligned on a cache line pair */ + return cache_addr; +} + +static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) +{ + uint32_t val = 0; + uint8_t *ptr, i; + + if (priority > 7) { + return 0; + } + + /* + * The per-priority backlog counters are 24-bit and the structure + * is stored in big endian. NVGC is 32-bytes long, so 24-bytes from + * w2, which fits 8 priorities * 24-bits per priority. + */ + ptr = (uint8_t *)&nvgc->w2 + priority * 3; + for (i = 0; i < 3; i++, ptr++) { + val = (val << 8) + *ptr; + } + return val; +} + +static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, + uint32_t val) +{ + uint8_t *ptr, i; + uint32_t shift; + + if (priority > 7) { + return; + } + + if (val > 0xFFFFFF) { + val = 0xFFFFFF; + } + /* + * The per-priority backlog counters are 24-bit and the structure + * is stored in big endian + */ + ptr = (uint8_t *)&nvgc->w2 + priority * 3; + for (i = 0; i < 3; i++, ptr++) { + shift = 8 * (2 - i); + *ptr = (val >> shift) & 0xFF; + } +} + +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, + bool crowd, + uint8_t blk, uint32_t idx, + uint16_t offset, uint16_t val) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); + Xive2Nvgc nvgc; + uint32_t count, old_count; + + if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n", + crowd ? "NVC" : "NVG", blk, idx); + return -1; + } + if (!xive2_nvgc_is_valid(&nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx); + return -1; + } + + old_count = xive2_nvgc_get_backlog(&nvgc, priority); + count = old_count; + /* + * op: + * 0b00 => increment + * 0b01 => decrement + * 0b1- => read + */ + if (op == 0b00 || op == 0b01) { + if (op == 0b00) { + count += val; + } else { + if (count > val) { + count -= val; + } else { + count = 0; + } + } + xive2_nvgc_set_backlog(&nvgc, priority, count); + xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc); + } + trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count); + return old_count; +} + +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, + uint8_t blk, uint32_t idx, + uint16_t offset) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); + Xive2Nvp nvp; + uint8_t ipb, old_ipb, rc; + + if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx); + return -1; + } + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx); + return -1; + } + + old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); + ipb = old_ipb; + /* + * op: + * 0b00 => set priority bit + * 0b01 => reset priority bit + * 0b1- => read + */ + if (op == 0b00 || op == 0b01) { + if (op == 0b00) { + ipb |= xive_priority_to_ipb(priority); + } else { + ipb &= ~xive_priority_to_ipb(priority); + } + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); + xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2); + } + rc = !!(old_ipb & xive_priority_to_ipb(priority)); + trace_xive_nvp_backlog_op(blk, idx, op, priority, rc); + return rc; +} + void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) { if (!xive2_eas_is_valid(eas)) { @@ -39,12 +195,27 @@ void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); } +#define XIVE2_QSIZE_CHUNK_CL 128 +#define XIVE2_QSIZE_CHUNK_4k 4096 +/* Calculate max number of queue entries for an END */ +static uint32_t xive2_end_get_qentries(Xive2End *end) +{ + uint32_t w3 = end->w3; + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3); + if (xive_get_field32(END2_W3_CL, w3)) { + g_assert(qsize <= 4); + return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t); + } else { + g_assert(qsize <= 12); + return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t); + } +} + void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); int i; /* @@ -74,11 +245,10 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) uint64_t qaddr_base = xive2_end_qaddr(end); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); - uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); - uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); + uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); + uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7); uint8_t pq; @@ -107,7 +277,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) xive2_end_is_firmware2(end) ? 'F' : '-', xive2_end_is_ignore(end) ? 'i' : '-', xive2_end_is_crowd(end) ? 'c' : '-', - priority, nvp_blk, nvp_idx); + priority, nvx_blk, nvx_idx); if (qaddr_base) { g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", @@ -144,14 +314,20 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) { uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); + uint64_t cache_line = xive2_nvp_reporting_addr(nvp); if (!xive2_nvp_is_valid(nvp)) { return; } - g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x", + g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x", nvp_idx, eq_blk, eq_idx, - xive_get_field32(NVP2_W2_IPB, nvp->w2)); + xive_get_field32(NVP2_W2_IPB, nvp->w2), + xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0)); + if (cache_line) { + g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line); + } + /* * When the NVP is HW controlled, more fields are updated */ @@ -166,16 +342,32 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) g_string_append_c(buf, '\n'); } +void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) +{ + uint8_t i; + + if (!xive2_nvgc_is_valid(nvgc)) { + return; + } + + g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx, + xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0)); + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { + g_string_append_printf(buf, "[%d]=0x%x ", + i, xive2_nvgc_get_backlog(nvgc, i)); + } + g_string_append_printf(buf, "\n"); +} + static void xive2_end_enqueue(Xive2End *end, uint32_t data) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); uint64_t qaddr = qaddr_base + (qindex << 2); uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { @@ -189,12 +381,121 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) qgen ^= 1; end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); - /* TODO(PowerNV): reset GF bit on a cache watch operation */ - end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); + /* Set gen flipped to 1, it gets reset on a cache watch operation */ + end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1); } end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); } +static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, + uint8_t next_level) +{ + uint32_t mask, next_idx; + uint8_t next_blk; + + /* + * Adjust the block and index of a VP for the next group/crowd + * size (PGofFirst/PGofNext field in the NVP and NVGC structures). + * + * The 6-bit group level is split into a 2-bit crowd and 4-bit + * group levels. Encoding is similar. However, we don't support + * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd + * size of 16. + */ + next_blk = NVx_CROWD_LVL(next_level); + if (next_blk == 3) { + next_blk = 4; + } + mask = (1 << next_blk) - 1; + *nvgc_blk &= ~mask; + *nvgc_blk |= mask >> 1; + + next_idx = NVx_GROUP_LVL(next_level); + mask = (1 << next_idx) - 1; + *nvgc_idx &= ~mask; + *nvgc_idx |= mask >> 1; +} + +/* + * Scan the group chain and return the highest priority and group + * level of pending group interrupts. + */ +static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, + uint8_t nvx_blk, uint32_t nvx_idx, + uint8_t first_group, + uint8_t *out_level) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t nvgc_idx; + uint32_t current_level, count; + uint8_t nvgc_blk, prio; + Xive2Nvgc nvgc; + + for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { + current_level = first_group & 0x3F; + nvgc_blk = nvx_blk; + nvgc_idx = nvx_idx; + + while (current_level) { + xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); + + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level), + nvgc_blk, nvgc_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", + nvgc_blk, nvgc_idx); + return 0xFF; + } + if (!xive2_nvgc_is_valid(&nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", + nvgc_blk, nvgc_idx); + return 0xFF; + } + + count = xive2_nvgc_get_backlog(&nvgc, prio); + if (count) { + *out_level = current_level; + return prio; + } + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F; + } + } + return 0xFF; +} + +static void xive2_presenter_backlog_decr(XivePresenter *xptr, + uint8_t nvx_blk, uint32_t nvx_idx, + uint8_t group_prio, + uint8_t group_level) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t nvgc_idx, count; + uint8_t nvgc_blk; + Xive2Nvgc nvgc; + + nvgc_blk = nvx_blk; + nvgc_idx = nvx_idx; + xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level); + + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level), + nvgc_blk, nvgc_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", + nvgc_blk, nvgc_idx); + return; + } + if (!xive2_nvgc_is_valid(&nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", + nvgc_blk, nvgc_idx); + return; + } + count = xive2_nvgc_get_backlog(&nvgc, group_prio); + if (!count) { + return; + } + xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); + xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level), + nvgc_blk, nvgc_idx, &nvgc); +} + /* * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode * @@ -210,13 +511,15 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) * the NVP by changing the H bit while the context is enabled */ -static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx) +static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; Xive2Nvp nvp; - uint8_t *regs = &tctx->regs[TM_QW1_OS]; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", @@ -251,8 +554,27 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, } nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); - nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); - nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); + + if ((nvp.w0 & NVP2_W0_P) || ring != TM_QW2_HV_POOL) { + /* + * Non-pool contexts always save CPPR (ignore p bit). XXX: Clarify + * whether that is the correct behaviour. + */ + nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, sig_regs[TM_CPPR]); + } + if (nvp.w0 & NVP2_W0_L) { + /* + * Typically not used. If LSMFB is restored with 0, it will + * force a backlog rescan + */ + nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); + } + if (nvp.w0 & NVP2_W0_G) { + nvp.w2 = xive_set_field32(NVP2_W2_LGS, nvp.w2, regs[TM_LGS]); + } + if (nvp.w0 & NVP2_W0_T) { + nvp.w2 = xive_set_field32(NVP2_W2_T, nvp.w2, regs[TM_T]); + } xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0); @@ -261,52 +583,320 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); } -static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, - uint32_t *nvp_idx, bool *vo, bool *ho) +/* POOL cam is the same as OS cam encoding */ +static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, + uint32_t *nvp_idx, bool *valid, bool *hw) { *nvp_blk = xive2_nvp_blk(cam); *nvp_idx = xive2_nvp_idx(cam); - *vo = !!(cam & TM2_QW1W2_VO); - *ho = !!(cam & TM2_QW1W2_HO); + *valid = !!(cam & TM2_W2_VALID); + *hw = !!(cam & TM2_W2_HW); } -uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, unsigned size) +/* + * Encode the HW CAM line with 7bit or 8bit thread id. The thread id + * width and block id width is configurable at the IC level. + * + * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) + * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) + */ +static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) { Xive2Router *xrtr = XIVE2_ROUTER(xptr); - uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); - uint32_t qw1w2_new; - uint32_t cam = be32_to_cpu(qw1w2); + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive2_router_get_block_id(xrtr); + uint8_t tid_shift = + xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; + uint8_t tid_mask = (1 << tid_shift) - 1; + + return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); +} + +static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t pipr = sig_regs[TM_PIPR]; + uint8_t crowd = NVx_CROWD_LVL(nsr); + uint8_t group = NVx_GROUP_LVL(nsr); + uint8_t nvgc_blk, end_blk, nvp_blk; + uint32_t nvgc_idx, end_idx, nvp_idx; + Xive2Nvgc nvgc; + uint8_t prio_limit; + uint32_t cfg; + + /* redistribution is only for group/crowd interrupts */ + if (!xive_nsr_indicates_group_exception(ring, nsr)) { + return; + } + + /* Don't check return code since ring is expected to be invalidated */ + xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + /* convert crowd/group to blk/idx */ + if (group > 0) { + nvgc_idx = (nvp_idx & (0xffffffff << group)) | + ((1 << (group - 1)) - 1); + } else { + nvgc_idx = nvp_idx; + } + + if (crowd > 0) { + crowd = (crowd == 3) ? 4 : crowd; + nvgc_blk = (nvp_blk & (0xffffffff << crowd)) | + ((1 << (crowd - 1)) - 1); + } else { + nvgc_blk = nvp_blk; + } + + /* Use blk/idx to retrieve the NVGC */ + if (xive2_router_get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", + crowd ? "NVC" : "NVG", nvgc_blk, nvgc_idx); + return; + } + + /* retrieve the END blk/idx from the NVGC */ + end_blk = xive_get_field32(NVGC2_W1_END_BLK, nvgc.w1); + end_idx = xive_get_field32(NVGC2_W1_END_IDX, nvgc.w1); + + /* determine number of priorities being used */ + cfg = xive2_router_get_config(xrtr); + if (cfg & XIVE2_EN_VP_GRP_PRIORITY) { + prio_limit = 1 << GETFIELD(NVGC2_W1_PSIZE, nvgc.w1); + } else { + prio_limit = 1 << GETFIELD(XIVE2_VP_INT_PRIO, cfg); + } + + /* add priority offset to end index */ + end_idx += pipr % prio_limit; + + /* trigger the group END */ + xive2_router_end_notify(xrtr, end_blk, end_idx, 0, true); + + /* clear interrupt indication for the context */ + sig_regs[TM_NSR] = 0; + sig_regs[TM_PIPR] = sig_regs[TM_CPPR]; + xive_tctx_reset_signal(tctx, ring); +} + +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring); + +static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]); + uint32_t cam = be32_to_cpu(target_ringw2); uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + uint8_t cur_ring; + bool valid; bool do_save; + uint8_t nsr; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); - if (!vo) { + if (xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", nvp_blk, nvp_idx); } - /* Invalidate CAM line */ - qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); + /* Invalidate CAM line of requested ring and all lower rings */ + for (cur_ring = TM_QW0_USER; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); + uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); + bool is_valid = !!(xive_get_field32(TM2_QW1W2_VO, ringw2)); + uint8_t *sig_regs; + + memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); + + /* Skip the rest for USER or invalid contexts */ + if ((cur_ring == TM_QW0_USER) || !is_valid) { + continue; + } + + /* Active group/crowd interrupts need to be redistributed */ + sig_regs = xive_tctx_signal_regs(tctx, ring); + nsr = sig_regs[TM_NSR]; + if (xive_nsr_indicates_group_exception(cur_ring, nsr)) { + /* Ensure ring matches NSR (for HV NSR POOL vs PHYS rings) */ + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive2_redistribute(xrtr, tctx, cur_ring); + } + } + + /* + * Lower external interrupt line of requested ring and below except for + * USER, which doesn't exist. + */ + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive_tctx_reset_signal(tctx, cur_ring); + } + } + } + + if (ring == TM_QW2_HV_POOL) { + /* Re-check phys for interrupts if pool was disabled */ + nsr = tctx->regs[TM_QW3_HV_PHYS + TM_NSR]; + if (xive_nsr_indicates_exception(TM_QW3_HV_PHYS, nsr)) { + /* Ring must be PHYS because POOL would have been redistributed */ + g_assert(xive_nsr_exception_ring(TM_QW3_HV_PHYS, nsr) == + TM_QW3_HV_PHYS); + } else { + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + } + } if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { - xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); + xive2_tctx_save_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx); } - xive_tctx_reset_os_signal(tctx); - return qw1w2; + return target_ringw2; +} + +uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); } -static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx, - Xive2Nvp *nvp) +uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW2_HV_POOL); +} + +uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW3_HV_PHYS); +} + +#define REPORT_LINE_GEN1_SIZE 16 + +static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, + uint8_t size) +{ + uint8_t *regs = tctx->regs; + + g_assert(size == REPORT_LINE_GEN1_SIZE); + memset(data, 0, size); + /* + * See xive architecture for description of what is saved. It is + * hand-picked information to fit in 16 bytes. + */ + data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR]; + data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR]; + data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB]; + data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB]; + data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT]; + data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS]; + data[0x6] = 0xFF; + data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80; + data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1; + data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2; + data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3); + data[0x8] = regs[TM_QW1_OS + TM_NSR]; + data[0x9] = regs[TM_QW1_OS + TM_CPPR]; + data[0xA] = regs[TM_QW1_OS + TM_IPB]; + data[0xB] = regs[TM_QW1_OS + TM_LGS]; + if (regs[TM_QW0_USER + TM_WORD2] & 0x80) { + /* + * Logical server extension, except VU bit replaced by EB bit + * from NSR + */ + data[0xC] = regs[TM_QW0_USER + TM_WORD2]; + data[0xC] &= ~0x80; + data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80; + data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1]; + data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2]; + data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3]; + } +} + +static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, + unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t hw_cam, nvp_idx, xive2_cfg, reserved; + uint8_t nvp_blk; + Xive2Nvp nvp; + uint64_t phys_addr; + MemTxResult result; + + hw_cam = xive2_tctx_hw_cam_line(xptr, tctx); + nvp_blk = xive2_nvp_blk(hw_cam); + nvp_idx = xive2_nvp_idx(hw_cam); + + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + xive2_cfg = xive2_router_get_config(xrtr); + + phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */ + if (xive2_cfg & XIVE2_GEN1_TIMA_OS) { + uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE]; + + xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE); + result = dma_memory_write(&address_space_memory, phys_addr, + pull_ctxt, REPORT_LINE_GEN1_SIZE, + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } else { + result = dma_memory_write(&address_space_memory, phys_addr, + &tctx->regs, sizeof(tctx->regs), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + reserved = 0xFFFFFFFF; + result = dma_memory_write(&address_space_memory, phys_addr + 12, + &reserved, sizeof(reserved), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } + + /* the rest is similar to pull context to registers */ + xive2_tm_pull_ctx(xptr, tctx, offset, size, ring); +} + +void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS); +} + + +void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); +} + +static uint8_t xive2_tctx_restore_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; uint8_t cppr; if (!xive2_nvp_is_hw(nvp)) { @@ -319,8 +909,10 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); - tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; - /* we don't model LSMFB */ + sig_regs[TM_CPPR] = cppr; + regs[TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); + regs[TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); + regs[TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); @@ -329,9 +921,18 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, /* * Checkout privilege: 0:OS, 1:Pool, 2:Hard * - * TODO: we only support OS push/pull + * TODO: we don't support hard push/pull */ - nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + switch (ring) { + case TM_QW1_OS: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + break; + case TM_QW2_HV_POOL: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 1); + break; + default: + g_assert_not_reached(); + } xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); @@ -339,12 +940,15 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, return cppr; } -static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, +/* Restore TIMA VP context from NVP backlog */ +static void xive2_tctx_restore_nvp(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, uint8_t nvp_blk, uint32_t nvp_idx, bool do_restore) { - Xive2Nvp nvp; + uint8_t *regs = &tctx->regs[ring]; uint8_t ipb; + Xive2Nvp nvp; /* * Grab the associated thread interrupt context registers in the @@ -363,9 +967,8 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, } /* Automatically restore thread context registers */ - if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && - do_restore) { - xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_restore) { + xive2_tctx_restore_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx, &nvp); } ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); @@ -373,42 +976,429 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); } - /* - * Always call xive_tctx_ipb_update(). Even if there were no - * escalation triggered, there could be a pending interrupt which - * was saved when the context was pulled and that we need to take - * into account by recalculating the PIPR (which is not - * saved/restored). - * It will also raise the External interrupt signal if needed. - */ - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); + /* IPB bits in the backlog are merged with the TIMA IPB bits */ + regs[TM_IPB] |= ipb; } /* - * Updating the OS CAM line can trigger a resend of interrupt + * Updating the ring CAM line can trigger a resend of interrupt */ -void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, uint64_t value, unsigned size) +static void xive2_tm_push_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size, + uint8_t ring) { - uint32_t cam = value; - uint32_t qw1w2 = cpu_to_be32(cam); + uint32_t cam; + uint32_t w2; + uint64_t dw1; uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + bool v; bool do_restore; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); + if (xive_ring_valid(tctx, ring)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Attempt to push VP to enabled" + " ring 0x%02x\n", ring); + return; + } /* First update the thead context */ - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + switch (size) { + case 1: + tctx->regs[ring + TM_WORD2] = value & 0xff; + cam = xive2_tctx_hw_cam_line(xptr, tctx); + cam |= ((value & 0xc0) << 24); /* V and H bits */ + break; + case 4: + cam = value; + w2 = cpu_to_be32(cam); + memcpy(&tctx->regs[ring + TM_WORD2], &w2, 4); + break; + case 8: + cam = value >> 32; + dw1 = cpu_to_be64(value); + memcpy(&tctx->regs[ring + TM_WORD2], &dw1, 8); + break; + default: + g_assert_not_reached(); + } + + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &v, &do_restore); /* Check the interrupt pending bits */ - if (vo) { - xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, - do_restore); + if (v) { + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t cur_ring; + + xive2_tctx_restore_nvp(xrtr, tctx, ring, + nvp_blk, nvp_idx, do_restore); + + for (cur_ring = TM_QW1_OS; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, cur_ring); + uint8_t nsr = sig_regs[TM_NSR]; + + if (!xive_ring_valid(tctx, cur_ring)) { + continue; + } + + if (cur_ring == TM_QW2_HV_POOL) { + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + g_assert(xive_nsr_exception_ring(cur_ring, nsr) == + TM_QW3_HV_PHYS); + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(ring, nsr)); + } + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + break; + } + xive2_tctx_process_pending(tctx, cur_ring); + } } } +void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW1_OS); +} + +void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW2_HV_POOL); +} + +void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); +} + +/* returns -1 if ring is invalid, but still populates block and index */ +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, + uint8_t *nvp_blk, uint32_t *nvp_idx) +{ + uint32_t w2; + uint32_t cam = 0; + int rc = 0; + + w2 = xive_tctx_word2(&tctx->regs[ring]); + switch (ring) { + case TM_QW1_OS: + if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { + rc = -1; + } + cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); + break; + case TM_QW2_HV_POOL: + if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { + rc = -1; + } + cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); + break; + case TM_QW3_HV_PHYS: + if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { + rc = -1; + } + cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); + break; + default: + rc = -1; + } + *nvp_blk = xive2_nvp_blk(cam); + *nvp_idx = xive2_nvp_idx(cam); + return rc; +} + +static void xive2_tctx_accept_el(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t ring, uint8_t cl_ring) +{ + uint64_t rd; + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t nvp_idx, xive2_cfg; + uint8_t nvp_blk; + Xive2Nvp nvp; + uint64_t phys_addr; + uint8_t OGen = 0; + + xive2_tctx_get_nvp_indexes(tctx, cl_ring, &nvp_blk, &nvp_idx); + + if (xive2_router_get_nvp(xrtr, (uint8_t)nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + + rd = xive_tctx_accept(tctx, ring); + + if (ring == TM_QW1_OS) { + OGen = tctx->regs[ring + TM_OGEN]; + } + xive2_cfg = xive2_router_get_config(xrtr); + phys_addr = xive2_nvp_reporting_addr(&nvp); + uint8_t report_data[REPORT_LINE_GEN1_SIZE]; + memset(report_data, 0xff, sizeof(report_data)); + if ((OGen == 1) || (xive2_cfg & XIVE2_GEN1_TIMA_OS)) { + report_data[8] = (rd >> 8) & 0xff; + report_data[9] = rd & 0xff; + } else { + report_data[0] = (rd >> 8) & 0xff; + report_data[1] = rd & 0xff; + } + cpu_physical_memory_write(phys_addr, report_data, REPORT_LINE_GEN1_SIZE); +} + +void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_accept_el(xptr, tctx, TM_QW1_OS, TM_QW1_OS); +} + +/* Re-calculate and present pending interrupts */ +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t backlog_prio; + uint8_t first_group; + uint8_t group_level; + uint8_t pipr_min; + uint8_t lsmfb_min; + uint8_t ring_min; + uint8_t cppr = sig_regs[TM_CPPR]; + bool group_enabled; + Xive2Nvp nvp; + int rc; + + g_assert(sig_ring == TM_QW3_HV_PHYS || sig_ring == TM_QW1_OS); + g_assert(sig_regs[TM_WORD2] & 0x80); + g_assert(!xive_nsr_indicates_group_exception(sig_ring, sig_regs[TM_NSR])); + + /* + * Recompute the PIPR based on local pending interrupts. It will + * be adjusted below if needed in case of pending group interrupts. + */ +again: + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); + group_enabled = !!sig_regs[TM_LGS]; + lsmfb_min = group_enabled ? sig_regs[TM_LSMFB] : 0xff; + ring_min = sig_ring; + group_level = 0; + + /* PHYS updates also depend on POOL values */ + if (sig_ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; + + /* POOL values only matter if POOL ctx is valid */ + if (pool_regs[TM_WORD2] & 0x80) { + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); + uint8_t pool_lsmfb = pool_regs[TM_LSMFB]; + + /* + * Determine highest priority interrupt and + * remember which ring has it. + */ + if (pool_pipr < pipr_min) { + pipr_min = pool_pipr; + if (pool_pipr < lsmfb_min) { + ring_min = TM_QW2_HV_POOL; + } + } + + /* Values needed for group priority calculation */ + if (pool_regs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { + group_enabled = true; + lsmfb_min = pool_lsmfb; + if (lsmfb_min < pipr_min) { + ring_min = TM_QW2_HV_POOL; + } + } + } + } + + if (group_enabled && + lsmfb_min < cppr && + lsmfb_min < pipr_min) { + + uint8_t nvp_blk; + uint32_t nvp_idx; + + /* + * Thread has seen a group interrupt with a higher priority + * than the new cppr or pending local interrupt. Check the + * backlog + */ + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); + if (rc) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid " + "context\n"); + return; + } + + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); + if (!first_group) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + backlog_prio = xive2_presenter_backlog_scan(tctx->xptr, + nvp_blk, nvp_idx, + first_group, &group_level); + tctx->regs[ring_min + TM_LSMFB] = backlog_prio; + if (backlog_prio != lsmfb_min) { + /* + * If the group backlog scan finds a less favored or no interrupt, + * then re-do the processing which may turn up a more favored + * interrupt from IPB or the other pool. Backlog should not + * find a priority < LSMFB. + */ + g_assert(backlog_prio >= lsmfb_min); + goto again; + } + + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, + backlog_prio, group_level); + pipr_min = backlog_prio; + } + + if (pipr_min > cppr) { + pipr_min = cppr; + } + xive_tctx_pipr_set(tctx, ring_min, pipr_min, group_level); +} + +/* NOTE: CPPR only exists for TM_QW1_OS and TM_QW3_HV_PHYS */ +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t sig_ring, uint8_t cppr) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t old_cppr; + uint8_t nsr = sig_regs[TM_NSR]; + + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + /* XXX: should show pool IPB for PHYS ring */ + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, sig_ring, + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, nsr); + + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + old_cppr = sig_regs[TM_CPPR]; + sig_regs[TM_CPPR] = cppr; + + /* Handle increased CPPR priority (lower value) */ + if (cppr < old_cppr) { + if (cppr <= sig_regs[TM_PIPR]) { + /* CPPR lowered below PIPR, must un-present interrupt */ + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + if (xive_nsr_indicates_group_exception(sig_ring, nsr)) { + /* redistribute precluded active grp interrupt */ + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(sig_ring, nsr)); + return; + } + } + + /* interrupt is VP directed, pending in IPB */ + xive_tctx_pipr_set(tctx, sig_ring, cppr, 0); + return; + } else { + /* CPPR was lowered, but still above PIPR. No action needed. */ + return; + } + } + + /* CPPR didn't change, nothing needs to be done */ + if (cppr == old_cppr) { + return; + } + + /* CPPR priority decreased (higher value) */ + if (!xive_nsr_indicates_exception(sig_ring, nsr)) { + xive2_tctx_process_pending(tctx, sig_ring); + } +} + +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); +} + +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); +} + +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + uint8_t priority = value & 0xff; + + /* + * XXX: should this simply set a bit in IPB and wait for it to be picked + * up next cycle, or is it supposed to present it now? We implement the + * latter here. + */ + regs[TM_IPB] |= xive_priority_to_ipb(priority); + if (xive_ipb_to_pipr(regs[TM_IPB]) >= regs[TM_PIPR]) { + return; + } + if (xive_nsr_indicates_group_exception(ring, regs[TM_NSR])) { + xive2_redistribute(xrtr, tctx, ring); + } + + xive_tctx_pipr_present(tctx, ring, priority, 0); +} + +static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_T] = target; +} + +void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff); +} + /* * XIVE Router (aka. Virtualization Controller or IVRE) */ @@ -471,31 +1461,63 @@ int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); } -static int xive2_router_get_block_id(Xive2Router *xrtr) +int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xrc->get_block_id(xrtr); + return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); } -/* - * Encode the HW CAM line with 7bit or 8bit thread id. The thread id - * width and block id width is configurable at the IC level. - * - * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) - * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) - */ -static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) +int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { - Xive2Router *xrtr = XIVE2_ROUTER(xptr); - CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; - uint32_t pir = env->spr_cb[SPR_PIR].default_value; - uint8_t blk = xive2_router_get_block_id(xrtr); - uint8_t tid_shift = - xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; - uint8_t tid_mask = (1 << tid_shift) - 1; + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); + return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); +} + +static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, + uint32_t vp_mask) +{ + return (cam1 & vp_mask) == (cam2 & vp_mask); +} + +static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd) +{ + uint8_t block_mask = 0b1111; + + /* 3 supported crowd sizes: 2, 4, 16 */ + if (crowd) { + uint32_t size = xive_get_vpgroup_size(nvt_blk); + + if (size != 2 && size != 4 && size != 16) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of %d", + size); + return block_mask; + } + block_mask &= ~(size - 1); + } + return block_mask; +} + +static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore) +{ + uint32_t index_mask = 0xFFFFFF; /* 24 bits */ + + if (cam_ignore) { + uint32_t size = xive_get_vpgroup_size(nvt_index); + + if (size < 2) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group size of %d", + size); + return index_mask; + } + index_mask &= ~(size - 1); + } + return index_mask; } /* @@ -504,7 +1526,8 @@ static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint32_t logic_serv) + bool crowd, bool cam_ignore, + uint32_t logic_serv) { uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); @@ -512,44 +1535,51 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); - /* - * TODO (PowerNV): ignore mode. The low order bits of the NVT - * identifier are ignored in the "CAM" match. - */ + uint32_t index_mask, vp_mask; + uint8_t block_mask; if (format == 0) { - if (cam_ignore == true) { - /* - * F=0 & i=1: Logical server notification (bits ignored at - * the end of the NVT identifier) - */ - qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", - nvt_blk, nvt_idx); - return -1; - } + /* + * i=0: Specific NVT notification + * i=1: VP-group notification (bits ignored at the end of the + * NVT identifier) + */ + block_mask = xive2_get_vp_block_mask(nvt_blk, crowd); + index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore); + vp_mask = xive2_nvp_cam_line(block_mask, index_mask); - /* F=0 & i=0: Specific NVT notification */ + /* For VP-group notifications, threads with LGS=0 are excluded */ /* PHYS ring */ if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && - cam == xive2_tctx_hw_cam_line(xptr, tctx)) { + !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && + xive2_vp_match_mask(cam, + xive2_tctx_hw_cam_line(xptr, tctx), + vp_mask)) { return TM_QW3_HV_PHYS; } /* HV POOL ring */ if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && - cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { + !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && + xive2_vp_match_mask(cam, + xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), + vp_mask)) { return TM_QW2_HV_POOL; } /* OS ring */ if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && - cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { + !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && + xive2_vp_match_mask(cam, + xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), + vp_mask)) { return TM_QW1_OS; } } else { /* F=1 : User level Event-Based Branch (EBB) notification */ + /* FIXME: what if cam_ignore and LGS = 0 ? */ /* USER ring */ if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && @@ -561,6 +1591,35 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, return -1; } +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + + /* + * The xive2_presenter_tctx_match() above tells if there's a match + * but for VP-group notification, we still need to look at the + * priority to know if the thread can take the interrupt now or if + * it is precluded. + */ + if (priority < sig_regs[TM_PIPR]) { + return false; + } + return true; +} + +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) +{ + uint8_t *regs = &tctx->regs[ring]; + + /* + * Called by the router during a VP-group notification when the + * thread matches but can't take the interrupt because it's + * already running at a more favored priority. It then stores the + * new interrupt priority in the LSMFB field. + */ + regs[TM_LSMFB] = priority; +} + static void xive2_router_realize(DeviceState *dev, Error **errp) { Xive2Router *xrtr = XIVE2_ROUTER(dev); @@ -595,15 +1654,16 @@ static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, * message has the same parameters than in the function below. */ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, - uint32_t end_idx, uint32_t end_data) + uint32_t end_idx, uint32_t end_data, + bool redistribute) { Xive2End end; uint8_t priority; uint8_t format; - bool found; - Xive2Nvp nvp; - uint8_t nvp_blk; - uint32_t nvp_idx; + XiveTCTXMatch match; + bool crowd, cam_ignore; + uint8_t nvx_blk; + uint32_t nvx_idx; /* END cache lookup */ if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) { @@ -618,7 +1678,14 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - if (xive2_end_is_enqueue(&end)) { + if (xive2_end_is_crowd(&end) && !xive2_end_is_ignore(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n"); + return; + } + + if (!redistribute && xive2_end_is_enqueue(&end)) { + trace_xive_end_enqueue(end_blk, end_idx, end_data); xive2_end_enqueue(&end, end_data); /* Enqueuing event data modifies the EQ toggle and index */ xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); @@ -662,40 +1729,39 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, /* * Follows IVPE notification */ - nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); - nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); - - /* NVP cache lookup */ - if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", - nvp_blk, nvp_idx); - return; - } - - if (!xive2_nvp_is_valid(&nvp)) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", - nvp_blk, nvp_idx); - return; - } - - found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, - xive2_end_is_ignore(&end), - priority, - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); + nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); + nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); + crowd = xive2_end_is_crowd(&end); + cam_ignore = xive2_end_is_ignore(&end); /* TODO: Auto EOI. */ + if (xive_presenter_match(xrtr->xfb, format, nvx_blk, nvx_idx, + crowd, cam_ignore, priority, + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + XiveTCTX *tctx = match.tctx; + uint8_t ring = match.ring; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t group_level; + + if (priority < sig_regs[TM_PIPR] && + xive_nsr_indicates_group_exception(ring, nsr)) { + xive2_redistribute(xrtr, tctx, xive_nsr_exception_ring(ring, nsr)); + } - if (found) { + group_level = xive_get_group_level(crowd, cam_ignore, nvx_blk, nvx_idx); + trace_xive_presenter_notify(nvx_blk, nvx_idx, ring, group_level); + xive_tctx_pipr_present(tctx, ring, priority, group_level); return; } /* * If no matching NVP is dispatched on a HW thread : * - specific VP: update the NVP structure if backlog is activated - * - logical server : forward request to IVPE (not supported) + * - VP-group: update the backlog counter for that priority in the NVG */ if (xive2_end_is_backlog(&end)) { - uint8_t ipb; if (format == 1) { qemu_log_mask(LOG_GUEST_ERROR, @@ -704,19 +1770,77 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - /* - * Record the IPB in the associated NVP structure for later - * use. The presenter will resend the interrupt when the vCPU - * is dispatched again on a HW thread. - */ - ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | - xive_priority_to_ipb(priority); - nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); - xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); + if (!cam_ignore) { + uint8_t ipb; + Xive2Nvp nvp; - /* - * On HW, follows a "Broadcast Backlog" to IVPEs - */ + /* NVP cache lookup */ + if (xive2_router_get_nvp(xrtr, nvx_blk, nvx_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n", + nvx_blk, nvx_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n", + nvx_blk, nvx_idx); + return; + } + + /* + * Record the IPB in the associated NVP structure for later + * use. The presenter will resend the interrupt when the vCPU + * is dispatched again on a HW thread. + */ + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | + xive_priority_to_ipb(priority); + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); + xive2_router_write_nvp(xrtr, nvx_blk, nvx_idx, &nvp, 2); + } else { + Xive2Nvgc nvgc; + uint32_t backlog; + + /* + * For groups and crowds, the per-priority backlog + * counters are stored in the NVG/NVC structures + */ + if (xive2_router_get_nvgc(xrtr, crowd, + nvx_blk, nvx_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", + crowd ? "NVC" : "NVG", nvx_blk, nvx_idx); + return; + } + + if (!xive2_nvgc_is_valid(&nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", + nvx_blk, nvx_idx); + return; + } + + /* + * Increment the backlog counter for that priority. + * We only call broadcast the first time the counter is + * incremented. broadcast will set the LSMFB field of the TIMA of + * relevant threads so that they know an interrupt is pending. + */ + backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1; + xive2_nvgc_set_backlog(&nvgc, priority, backlog); + xive2_router_write_nvgc(xrtr, crowd, nvx_blk, nvx_idx, &nvgc); + + if (backlog == 1) { + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); + xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx, + crowd, cam_ignore, priority); + + if (!xive2_end_is_precluded_escalation(&end)) { + /* + * The interrupt will be picked up when the + * matching thread lowers its priority level + */ + return; + } + } + } } do_escalation: @@ -740,18 +1864,41 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, } } - /* - * The END trigger becomes an Escalation trigger - */ - xive2_router_end_notify(xrtr, - xive_get_field32(END2_W4_END_BLOCK, end.w4), - xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), - xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); + if (xive2_end_is_escalate_end(&end)) { + /* + * Perform END Adaptive escalation processing + * The END trigger becomes an Escalation trigger + */ + uint8_t esc_blk = xive_get_field32(END2_W4_END_BLOCK, end.w4); + uint32_t esc_idx = xive_get_field32(END2_W4_ESC_END_INDEX, end.w4); + uint32_t esc_data = xive_get_field32(END2_W5_ESC_END_DATA, end.w5); + trace_xive_escalate_end(end_blk, end_idx, esc_blk, esc_idx, esc_data); + xive2_router_end_notify(xrtr, esc_blk, esc_idx, esc_data, false); + } /* end END adaptive escalation */ + + else { + uint32_t lisn; /* Logical Interrupt Source Number */ + + /* + * Perform ESB escalation processing + * E[N] == 1 --> N + * Req[Block] <- E[ESB_Block] + * Req[Index] <- E[ESB_Index] + * Req[Offset] <- 0x000 + * Execute Req command + */ + lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK, end.w4), + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4)); + + trace_xive_escalate_esb(end_blk, end_idx, lisn); + xive2_notify(xrtr, lisn, true /* pq_checked */); + } + + return; } -void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +void xive2_notify(Xive2Router *xrtr , uint32_t lisn, bool pq_checked) { - Xive2Router *xrtr = XIVE2_ROUTER(xn); uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); uint32_t eas_idx = XIVE_EAS_INDEX(lisn); Xive2Eas eas; @@ -794,22 +1941,39 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) return; } + /* TODO: add support for EAS resume */ + if (xive2_eas_is_resume(&eas)) { + qemu_log_mask(LOG_UNIMP, + "XIVE: EAS resume processing unimplemented - LISN %x\n", + lisn); + return; + } + /* * The event trigger becomes an END trigger */ xive2_router_end_notify(xrtr, - xive_get_field64(EAS2_END_BLOCK, eas.w), - xive_get_field64(EAS2_END_INDEX, eas.w), - xive_get_field64(EAS2_END_DATA, eas.w)); + xive_get_field64(EAS2_END_BLOCK, eas.w), + xive_get_field64(EAS2_END_INDEX, eas.w), + xive_get_field64(EAS2_END_DATA, eas.w), + false); + return; +} + +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xn); + + xive2_notify(xrtr, lisn, pq_checked); + return; } -static Property xive2_router_properties[] = { +static const Property xive2_router_properties[] = { DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb, TYPE_XIVE_FABRIC, XiveFabric *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive2_router_class_init(ObjectClass *klass, void *data) +static void xive2_router_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); @@ -828,7 +1992,7 @@ static const TypeInfo xive2_router_info = { .instance_size = sizeof(Xive2Router), .class_size = sizeof(Xive2RouterClass), .class_init = xive2_router_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { TYPE_XIVE_PRESENTER }, { } @@ -1017,15 +2181,14 @@ static void xive2_end_source_realize(DeviceState *dev, Error **errp) (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); } -static Property xive2_end_source_properties[] = { +static const Property xive2_end_source_properties[] = { DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0), DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K), DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER, Xive2Router *), - DEFINE_PROP_END_OF_LIST(), }; -static void xive2_end_source_class_init(ObjectClass *klass, void *data) +static void xive2_end_source_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c index 12bd1a3fff3..9200585e325 100644 --- a/hw/intc/xlnx-pmu-iomod-intc.c +++ b/hw/intc/xlnx-pmu-iomod-intc.c @@ -474,11 +474,10 @@ static const MemoryRegionOps xlnx_pmu_io_intc_ops = { }, }; -static Property xlnx_pmu_io_intc_properties[] = { +static const Property xlnx_pmu_io_intc_properties[] = { DEFINE_PROP_UINT32("intc-intr-size", XlnxPMUIOIntc, cfg.intr_size, 0), DEFINE_PROP_UINT32("intc-level-edge", XlnxPMUIOIntc, cfg.level_edge, 0), DEFINE_PROP_UINT32("intc-positive", XlnxPMUIOIntc, cfg.positive, 0), - DEFINE_PROP_END_OF_LIST(), }; static void xlnx_pmu_io_intc_realize(DeviceState *dev, Error **errp) @@ -532,11 +531,11 @@ static const VMStateDescription vmstate_xlnx_pmu_io_intc = { } }; -static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, void *data) +static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xlnx_pmu_io_intc_reset; + device_class_set_legacy_reset(dc, xlnx_pmu_io_intc_reset); dc->realize = xlnx_pmu_io_intc_realize; dc->vmsd = &vmstate_xlnx_pmu_io_intc; device_class_set_props(dc, xlnx_pmu_io_intc_properties); diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c index 509ee799cc2..610cd0e3164 100644 --- a/hw/intc/xlnx-zynqmp-ipi.c +++ b/hw/intc/xlnx-zynqmp-ipi.c @@ -355,11 +355,11 @@ static const VMStateDescription vmstate_zynqmp_pmu_ipi = { } }; -static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, void *data) +static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xlnx_zynqmp_ipi_reset; + device_class_set_legacy_reset(dc, xlnx_zynqmp_ipi_reset); dc->realize = xlnx_zynqmp_ipi_realize; dc->vmsd = &vmstate_zynqmp_pmu_ipi; } diff --git a/hw/ipack/Kconfig b/hw/ipack/Kconfig index f8da24a62be..28d668727c8 100644 --- a/hw/ipack/Kconfig +++ b/hw/ipack/Kconfig @@ -1,4 +1,8 @@ config IPACK bool + +config TPCI200 + bool + select IPACK default y if PCI_DEVICES depends on PCI diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c index c39dbb481f2..ab602bff734 100644 --- a/hw/ipack/ipack.c +++ b/hw/ipack/ipack.c @@ -55,30 +55,26 @@ static void ipack_device_realize(DeviceState *dev, Error **errp) } bus->free_slot = idev->slot + 1; - idev->irq = qemu_allocate_irqs(bus->set_irq, idev, 2); + qemu_init_irqs(idev->irq, ARRAY_SIZE(idev->irq), bus->set_irq, idev); k->realize(dev, errp); } static void ipack_device_unrealize(DeviceState *dev) { - IPackDevice *idev = IPACK_DEVICE(dev); IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(dev); if (k->unrealize) { k->unrealize(dev); return; } - - qemu_free_irqs(idev->irq, 2); } -static Property ipack_device_props[] = { +static const Property ipack_device_props[] = { DEFINE_PROP_INT32("slot", IPackDevice, slot, -1), - DEFINE_PROP_END_OF_LIST() }; -static void ipack_device_class_init(ObjectClass *klass, void *data) +static void ipack_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); diff --git a/hw/ipack/meson.build b/hw/ipack/meson.build index 26567f1068e..e4805228926 100644 --- a/hw/ipack/meson.build +++ b/hw/ipack/meson.build @@ -1 +1,2 @@ -system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c', 'tpci200.c')) +system_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c')) +system_ss.add(when: 'CONFIG_TPCI200', if_true: files('tpci200.c')) diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c index 88eef4b8308..40b30517c75 100644 --- a/hw/ipack/tpci200.c +++ b/hw/ipack/tpci200.c @@ -275,11 +275,11 @@ static void tpci200_write_las0(void *opaque, hwaddr addr, uint64_t val, if (ip != NULL) { if (val & STATUS_INT(i, 0)) { DPRINTF("Clear IP %c INT0# status\n", 'A' + i); - qemu_irq_lower(ip->irq[0]); + qemu_irq_lower(&ip->irq[0]); } if (val & STATUS_INT(i, 1)) { DPRINTF("Clear IP %c INT1# status\n", 'A' + i); - qemu_irq_lower(ip->irq[1]); + qemu_irq_lower(&ip->irq[1]); } } @@ -344,7 +344,7 @@ static uint64_t tpci200_read_las1(void *opaque, hwaddr addr, unsigned size) bool int_set = s->status & STATUS_INT(ip_n, intno); bool int_edge_sensitive = s->ctrl[ip_n] & CTRL_INT_EDGE(intno); if (int_set && !int_edge_sensitive) { - qemu_irq_lower(ip->irq[intno]); + qemu_irq_lower(&ip->irq[intno]); } } @@ -629,7 +629,7 @@ static const VMStateDescription vmstate_tpci200 = { } }; -static void tpci200_class_init(ObjectClass *klass, void *data) +static void tpci200_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -650,7 +650,7 @@ static const TypeInfo tpci200_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(TPCI200State), .class_init = tpci200_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c index bbb07b151e9..b91e487e1b8 100644 --- a/hw/ipmi/ipmi.c +++ b/hw/ipmi/ipmi.c @@ -26,7 +26,7 @@ #include "hw/ipmi/ipmi.h" #include "hw/qdev-properties.h" #include "qom/object_interfaces.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qapi/error.h" #include "qemu/module.h" #include "hw/nmi.h" @@ -78,7 +78,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly) } } -static void ipmi_interface_class_init(ObjectClass *class, void *data) +static void ipmi_interface_class_init(ObjectClass *class, const void *data) { IPMIInterfaceClass *ik = IPMI_INTERFACE_CLASS(class); @@ -108,12 +108,11 @@ void ipmi_bmc_find_and_link(Object *obj, Object **bmc) OBJ_PROP_LINK_STRONG); } -static Property ipmi_bmc_properties[] = { +static const Property ipmi_bmc_properties[] = { DEFINE_PROP_UINT8("slave_addr", IPMIBmc, slave_addr, 0x20), - DEFINE_PROP_END_OF_LIST(), }; -static void bmc_class_init(ObjectClass *oc, void *data) +static void bmc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c index 29c5af3cc36..9f1ba7b2f87 100644 --- a/hw/ipmi/ipmi_bmc_extern.c +++ b/hw/ipmi/ipmi_bmc_extern.c @@ -142,7 +142,6 @@ static void continue_send(IPMIBmcExtern *ibe) qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 4000000000ULL); } } - return; } static void extern_timeout(void *opaque) @@ -214,7 +213,7 @@ static void ipmi_bmc_extern_handle_command(IPMIBmc *b, rsp[2] = err; ibe->waiting_rsp = false; k->handle_rsp(s, msg_id, rsp, 3); - goto out; + return; } addchar(ibe, msg_id); @@ -229,9 +228,6 @@ static void ipmi_bmc_extern_handle_command(IPMIBmc *b, /* Start the transmit */ continue_send(ibe); - - out: - return; } static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op) @@ -497,8 +493,6 @@ static void ipmi_bmc_extern_realize(DeviceState *dev, Error **errp) qemu_chr_fe_set_handlers(&ibe->chr, can_receive, receive, chr_event, NULL, ibe, NULL, true); - - vmstate_register(NULL, 0, &vmstate_ipmi_bmc_extern, ibe); } static void ipmi_bmc_extern_init(Object *obj) @@ -515,12 +509,11 @@ static void ipmi_bmc_extern_finalize(Object *obj) timer_free(ibe->extern_timer); } -static Property ipmi_bmc_extern_properties[] = { +static const Property ipmi_bmc_extern_properties[] = { DEFINE_PROP_CHR("chardev", IPMIBmcExtern, chr), - DEFINE_PROP_END_OF_LIST(), }; -static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data) +static void ipmi_bmc_extern_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIBmcClass *bk = IPMI_BMC_CLASS(oc); @@ -529,6 +522,7 @@ static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data) bk->handle_reset = ipmi_bmc_extern_handle_reset; dc->hotpluggable = false; dc->realize = ipmi_bmc_extern_realize; + dc->vmsd = &vmstate_ipmi_bmc_extern; device_class_set_props(dc, ipmi_bmc_extern_properties); } diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c index 33c839c65aa..04e1dcd0e76 100644 --- a/hw/ipmi/ipmi_bmc_sim.c +++ b/hw/ipmi/ipmi_bmc_sim.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/timer.h" #include "hw/ipmi/ipmi.h" #include "qemu/error-report.h" @@ -70,6 +70,7 @@ #define IPMI_CMD_GET_MSG 0x33 #define IPMI_CMD_SEND_MSG 0x34 #define IPMI_CMD_READ_EVT_MSG_BUF 0x35 +#define IPMI_CMD_GET_CHANNEL_INFO 0x42 #define IPMI_NETFN_STORAGE 0x0a @@ -234,6 +235,7 @@ struct IPMIBmcSim { #define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(s) \ (IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE & (s)->msg_flags) +#define IPMI_BMC_GLOBAL_ENABLES_SUPPORTED 0x0f #define IPMI_BMC_RCV_MSG_QUEUE_INT_BIT 0 #define IPMI_BMC_EVBUF_FULL_INT_BIT 1 #define IPMI_BMC_EVENT_MSG_BUF_BIT 2 @@ -463,14 +465,12 @@ void ipmi_bmc_gen_event(IPMIBmc *b, uint8_t *evt, bool log) } if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) { - goto out; + return; } memcpy(ibs->evtbuf, evt, 16); ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL; k->set_atn(s, 1, attn_irq_enabled(ibs)); - out: - return; } static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert, uint8_t evd1, uint8_t evd2, uint8_t evd3) @@ -513,7 +513,8 @@ static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert, static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor, unsigned int bit, unsigned int val, - uint8_t evd1, uint8_t evd2, uint8_t evd3) + uint8_t evd1, uint8_t evd2, uint8_t evd3, + bool do_log) { IPMISensor *sens; uint16_t mask; @@ -533,7 +534,7 @@ static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor, return; /* Already asserted */ } sens->assert_states |= mask & sens->assert_suppt; - if (sens->assert_enable & mask & sens->assert_states) { + if (do_log && (sens->assert_enable & mask & sens->assert_states)) { /* Send an event on assert */ gen_event(ibs, sensor, 0, evd1, evd2, evd3); } @@ -543,7 +544,7 @@ static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor, return; /* Already deasserted */ } sens->deassert_states |= mask & sens->deassert_suppt; - if (sens->deassert_enable & mask & sens->deassert_states) { + if (do_log && (sens->deassert_enable & mask & sens->deassert_states)) { /* Send an event on deassert */ gen_event(ibs, sensor, 1, evd1, evd2, evd3); } @@ -699,6 +700,7 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs) { IPMIInterface *s = ibs->parent.intf; IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + bool do_log = !IPMI_BMC_WATCHDOG_GET_DONT_LOG(ibs); if (!ibs->watchdog_running) { goto out; @@ -710,14 +712,16 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs) ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK; k->do_hw_op(s, IPMI_SEND_NMI, 0); sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1, - 0xc8, (2 << 4) | 0xf, 0xff); + 0xc8, (2 << 4) | 0xf, 0xff, + do_log); break; case IPMI_BMC_WATCHDOG_PRE_MSG_INT: ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK; k->set_atn(s, 1, attn_irq_enabled(ibs)); sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1, - 0xc8, (3 << 4) | 0xf, 0xff); + 0xc8, (3 << 4) | 0xf, 0xff, + do_log); break; default: @@ -737,24 +741,28 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs) switch (IPMI_BMC_WATCHDOG_GET_ACTION(ibs)) { case IPMI_BMC_WATCHDOG_ACTION_NONE: sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 0, 1, - 0xc0, ibs->watchdog_use & 0xf, 0xff); + 0xc0, ibs->watchdog_use & 0xf, 0xff, + do_log); break; case IPMI_BMC_WATCHDOG_ACTION_RESET: sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 1, 1, - 0xc1, ibs->watchdog_use & 0xf, 0xff); + 0xc1, ibs->watchdog_use & 0xf, 0xff, + do_log); k->do_hw_op(s, IPMI_RESET_CHASSIS, 0); break; case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN: sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1, - 0xc2, ibs->watchdog_use & 0xf, 0xff); + 0xc2, ibs->watchdog_use & 0xf, 0xff, + do_log); k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0); break; case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE: sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1, - 0xc3, ibs->watchdog_use & 0xf, 0xff); + 0xc3, ibs->watchdog_use & 0xf, 0xff, + do_log); k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0); break; } @@ -925,7 +933,14 @@ static void set_bmc_global_enables(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, RspBuffer *rsp) { - set_global_enables(ibs, cmd[2]); + uint8_t val = cmd[2]; + + if (val & ~IPMI_BMC_GLOBAL_ENABLES_SUPPORTED) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + set_global_enables(ibs, val); } static void get_bmc_global_enables(IPMIBmcSim *ibs, @@ -980,7 +995,7 @@ static void get_msg(IPMIBmcSim *ibs, if (QTAILQ_EMPTY(&ibs->rcvbufs)) { rsp_buffer_set_error(rsp, 0x80); /* Queue empty */ - goto out; + return; } rsp_buffer_push(rsp, 0); /* Channel 0 */ msg = QTAILQ_FIRST(&ibs->rcvbufs); @@ -995,9 +1010,6 @@ static void get_msg(IPMIBmcSim *ibs, ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE; k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs)); } - -out: - return; } static unsigned char @@ -1020,8 +1032,8 @@ static void send_msg(IPMIBmcSim *ibs, uint8_t *buf; uint8_t netfn, rqLun, rsLun, rqSeq; - if (cmd[2] != 0) { - /* We only handle channel 0 with no options */ + if (cmd[2] != IPMI_CHANNEL_IPMB) { + /* We only handle channel 0h (IPMB) with no options */ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); return; } @@ -1219,6 +1231,68 @@ static void get_watchdog_timer(IPMIBmcSim *ibs, } } +static void get_channel_info(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + IPMIFwInfo info = {}; + uint8_t ch = cmd[2] & 0x0f; + + /* Only define channel 0h (IPMB) and Fh (system interface) */ + + if (ch == 0x0e) { /* "This channel" */ + ch = IPMI_CHANNEL_SYSTEM; + } + rsp_buffer_push(rsp, ch); + + if (ch != IPMI_CHANNEL_IPMB && ch != IPMI_CHANNEL_SYSTEM) { + /* Not a supported channel */ + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + if (k->get_fwinfo) { + k->get_fwinfo(s, &info); + } + + if (ch == IPMI_CHANNEL_IPMB) { + rsp_buffer_push(rsp, IPMI_CHANNEL_MEDIUM_IPMB); + rsp_buffer_push(rsp, IPMI_CHANNEL_PROTOCOL_IPMB); + } else { /* IPMI_CHANNEL_SYSTEM */ + rsp_buffer_push(rsp, IPMI_CHANNEL_MEDIUM_SYSTEM); + rsp_buffer_push(rsp, info.ipmi_channel_protocol); + } + + rsp_buffer_push(rsp, 0x00); /* Session-less */ + + /* IPMI Enterprise Number for Vendor ID */ + rsp_buffer_push(rsp, 0xf2); + rsp_buffer_push(rsp, 0x1b); + rsp_buffer_push(rsp, 0x00); + + if (ch == IPMI_CHANNEL_SYSTEM) { + uint8_t irq; + + if (info.irq_source == IPMI_ISA_IRQ) { + irq = info.interrupt_number; + } else if (info.irq_source == IPMI_PCI_IRQ) { + irq = 0x10 + info.interrupt_number; + } else { + irq = 0xff; /* no interrupt / unspecified */ + } + + /* Both interrupts use the same irq number */ + rsp_buffer_push(rsp, irq); + rsp_buffer_push(rsp, irq); + } else { + /* Reserved */ + rsp_buffer_push(rsp, 0x00); + rsp_buffer_push(rsp, 0x00); + } +} + static void get_sdr_rep_info(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, RspBuffer *rsp) @@ -2015,6 +2089,7 @@ static const IPMICmdHandler app_cmds[] = { [IPMI_CMD_RESET_WATCHDOG_TIMER] = { reset_watchdog_timer }, [IPMI_CMD_SET_WATCHDOG_TIMER] = { set_watchdog_timer, 8 }, [IPMI_CMD_GET_WATCHDOG_TIMER] = { get_watchdog_timer }, + [IPMI_CMD_GET_CHANNEL_INFO] = { get_channel_info, 3 }, }; static const IPMINetfn app_netfn = { .cmd_nums = ARRAY_SIZE(app_cmds), @@ -2187,11 +2262,9 @@ static void ipmi_sim_realize(DeviceState *dev, Error **errp) register_cmds(ibs); ibs->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ipmi_timeout, ibs); - - vmstate_register(NULL, 0, &vmstate_ipmi_sim, ibs); } -static Property ipmi_sim_properties[] = { +static const Property ipmi_sim_properties[] = { DEFINE_PROP_UINT16("fruareasize", IPMIBmcSim, fru.areasize, 1024), DEFINE_PROP_STRING("frudatafile", IPMIBmcSim, fru.filename), DEFINE_PROP_STRING("sdrfile", IPMIBmcSim, sdr_filename), @@ -2203,16 +2276,16 @@ static Property ipmi_sim_properties[] = { DEFINE_PROP_UINT32("mfg_id", IPMIBmcSim, mfg_id, 0), DEFINE_PROP_UINT16("product_id", IPMIBmcSim, product_id, 0), DEFINE_PROP_UUID_NODEFAULT("guid", IPMIBmcSim, uuid), - DEFINE_PROP_END_OF_LIST(), }; -static void ipmi_sim_class_init(ObjectClass *oc, void *data) +static void ipmi_sim_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIBmcClass *bk = IPMI_BMC_CLASS(oc); dc->hotpluggable = false; dc->realize = ipmi_sim_realize; + dc->vmsd = &vmstate_ipmi_sim; device_class_set_props(dc, ipmi_sim_properties); bk->handle_command = ipmi_sim_handle_command; } diff --git a/hw/ipmi/ipmi_bt.c b/hw/ipmi/ipmi_bt.c index 583fc64730c..e01d02fe528 100644 --- a/hw/ipmi/ipmi_bt.c +++ b/hw/ipmi/ipmi_bt.c @@ -98,14 +98,14 @@ static void ipmi_bt_handle_event(IPMIInterface *ii) IPMIBT *ib = iic->get_backend_data(ii); if (ib->inlen < 4) { - goto out; + return; } /* Note that overruns are handled by handle_command */ if (ib->inmsg[0] != (ib->inlen - 1)) { /* Length mismatch, just ignore. */ IPMI_BT_SET_BBUSY(ib->control_reg, 1); ib->inlen = 0; - goto out; + return; } if ((ib->inmsg[1] == (IPMI_NETFN_APP << 2)) && (ib->inmsg[3] == IPMI_CMD_GET_BT_INTF_CAP)) { @@ -136,7 +136,7 @@ static void ipmi_bt_handle_event(IPMIInterface *ii) IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1); ipmi_bt_raise_irq(ib); } - goto out; + return; } ib->waiting_seq = ib->inmsg[2]; ib->inmsg[2] = ib->inmsg[1]; @@ -145,8 +145,6 @@ static void ipmi_bt_handle_event(IPMIInterface *ii) bk->handle_command(ib->bmc, ib->inmsg + 2, ib->inlen - 2, sizeof(ib->inmsg), ib->waiting_rsp); } - out: - return; } static void ipmi_bt_handle_rsp(IPMIInterface *ii, uint8_t msg_id, @@ -419,6 +417,8 @@ void ipmi_bt_get_fwinfo(struct IPMIBT *ib, IPMIFwInfo *info) info->interface_type = IPMI_SMBIOS_BT; info->ipmi_spec_major_revision = 2; info->ipmi_spec_minor_revision = 0; + /* BT System Interface Format, IPMI v1.5 */ + info->ipmi_channel_protocol = IPMI_CHANNEL_PROTOCOL_BT_15; info->base_address = ib->io_base; info->register_length = ib->io_length; info->register_spacing = 1; diff --git a/hw/ipmi/ipmi_kcs.c b/hw/ipmi/ipmi_kcs.c index c15977cab4c..d5cfe6c0689 100644 --- a/hw/ipmi/ipmi_kcs.c +++ b/hw/ipmi/ipmi_kcs.c @@ -168,7 +168,7 @@ static void ipmi_kcs_handle_event(IPMIInterface *ii) ik->outpos = 0; bk->handle_command(ik->bmc, ik->inmsg, ik->inlen, sizeof(ik->inmsg), ik->waiting_rsp); - goto out_noibf; + return; } else if (ik->cmd_reg == IPMI_KCS_WRITE_END_CMD) { ik->cmd_reg = -1; ik->write_end = 1; @@ -197,8 +197,6 @@ static void ipmi_kcs_handle_event(IPMIInterface *ii) ik->cmd_reg = -1; ik->data_in_reg = -1; IPMI_KCS_SET_IBF(ik->status_reg, 0); - out_noibf: - return; } static void ipmi_kcs_handle_rsp(IPMIInterface *ii, uint8_t msg_id, @@ -405,6 +403,7 @@ void ipmi_kcs_get_fwinfo(IPMIKCS *ik, IPMIFwInfo *info) info->interface_type = IPMI_SMBIOS_KCS; info->ipmi_spec_major_revision = 2; info->ipmi_spec_minor_revision = 0; + info->ipmi_channel_protocol = IPMI_CHANNEL_PROTOCOL_KCS; info->base_address = ik->io_base; info->i2c_slave_address = ik->bmc->slave_addr; info->register_length = ik->io_length; diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c index 7b36d514945..0ad91ccf689 100644 --- a/hw/ipmi/isa_ipmi_bt.c +++ b/hw/ipmi/isa_ipmi_bt.c @@ -49,6 +49,7 @@ static void isa_ipmi_bt_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) ISAIPMIBTDevice *iib = ISA_IPMI_BT(ii); ipmi_bt_get_fwinfo(&iib->bt, info); + info->irq_source = IPMI_ISA_IRQ; info->interrupt_number = iib->isairq; info->i2c_slave_address = iib->bt.bmc->slave_addr; info->uuid = iib->uuid; @@ -117,8 +118,6 @@ static void isa_ipmi_bt_realize(DeviceState *dev, Error **errp) qdev_set_legacy_instance_id(dev, iib->bt.io_base, iib->bt.io_length); isa_register_ioport(isadev, &iib->bt.io, iib->bt.io_base); - - vmstate_register(NULL, 0, &vmstate_ISAIPMIBTDevice, dev); } static void isa_ipmi_bt_init(Object *obj) @@ -135,19 +134,19 @@ static void *isa_ipmi_bt_get_backend_data(IPMIInterface *ii) return &iib->bt; } -static Property ipmi_isa_properties[] = { +static const Property ipmi_isa_properties[] = { DEFINE_PROP_UINT32("ioport", ISAIPMIBTDevice, bt.io_base, 0xe4), DEFINE_PROP_INT32("irq", ISAIPMIBTDevice, isairq, 5), - DEFINE_PROP_END_OF_LIST(), }; -static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data) +static void isa_ipmi_bt_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc); dc->realize = isa_ipmi_bt_realize; + dc->vmsd = &vmstate_ISAIPMIBTDevice; device_class_set_props(dc, ipmi_isa_properties); iic->get_backend_data = isa_ipmi_bt_get_backend_data; @@ -162,7 +161,7 @@ static const TypeInfo isa_ipmi_bt_info = { .instance_size = sizeof(ISAIPMIBTDevice), .instance_init = isa_ipmi_bt_init, .class_init = isa_ipmi_bt_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, { TYPE_ACPI_DEV_AML_IF }, { } diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c index f52b32e590b..418d234e0f1 100644 --- a/hw/ipmi/isa_ipmi_kcs.c +++ b/hw/ipmi/isa_ipmi_kcs.c @@ -49,6 +49,7 @@ static void isa_ipmi_kcs_get_fwinfo(IPMIInterface *ii, IPMIFwInfo *info) ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(ii); ipmi_kcs_get_fwinfo(&iik->kcs, info); + info->irq_source = IPMI_ISA_IRQ; info->interrupt_number = iik->isairq; info->uuid = iik->uuid; } @@ -72,6 +73,10 @@ static bool vmstate_kcs_before_version2(void *opaque, int version) return version <= 1; } +/* + * Version 1 had an incorrect name, it clashed with the BT IPMI + * device, so receive it, but transmit a different version. + */ static const VMStateDescription vmstate_ISAIPMIKCSDevice = { .name = TYPE_IPMI_INTERFACE, .version_id = 2, @@ -119,13 +124,6 @@ static void ipmi_isa_realize(DeviceState *dev, Error **errp) qdev_set_legacy_instance_id(dev, iik->kcs.io_base, iik->kcs.io_length); isa_register_ioport(isadev, &iik->kcs.io, iik->kcs.io_base); - - /* - * Version 1 had an incorrect name, it clashed with the BT - * IPMI device, so receive it, but transmit a different - * version. - */ - vmstate_register(NULL, 0, &vmstate_ISAIPMIKCSDevice, iik); } static void isa_ipmi_kcs_init(Object *obj) @@ -142,19 +140,19 @@ static void *isa_ipmi_kcs_get_backend_data(IPMIInterface *ii) return &iik->kcs; } -static Property ipmi_isa_properties[] = { +static const Property ipmi_isa_properties[] = { DEFINE_PROP_UINT32("ioport", ISAIPMIKCSDevice, kcs.io_base, 0xca2), DEFINE_PROP_INT32("irq", ISAIPMIKCSDevice, isairq, 5), - DEFINE_PROP_END_OF_LIST(), }; -static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data) +static void isa_ipmi_kcs_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(oc); dc->realize = ipmi_isa_realize; + dc->vmsd = &vmstate_ISAIPMIKCSDevice; device_class_set_props(dc, ipmi_isa_properties); iic->get_backend_data = isa_ipmi_kcs_get_backend_data; @@ -169,7 +167,7 @@ static const TypeInfo isa_ipmi_kcs_info = { .instance_size = sizeof(ISAIPMIKCSDevice), .instance_init = isa_ipmi_kcs_init, .class_init = isa_ipmi_kcs_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, { TYPE_ACPI_DEV_AML_IF }, { } diff --git a/hw/ipmi/pci_ipmi_bt.c b/hw/ipmi/pci_ipmi_bt.c index afeea6f3031..905101dcafd 100644 --- a/hw/ipmi/pci_ipmi_bt.c +++ b/hw/ipmi/pci_ipmi_bt.c @@ -38,49 +38,60 @@ struct PCIIPMIBTDevice { uint32_t uuid; }; -static void pci_ipmi_raise_irq(IPMIBT *ik) +static void pci_ipmi_bt_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) { - PCIIPMIBTDevice *pik = ik->opaque; + PCIIPMIBTDevice *pib = PCI_IPMI_BT(ii); - pci_set_irq(&pik->dev, true); + ipmi_bt_get_fwinfo(&pib->bt, info); + info->irq_source = IPMI_PCI_IRQ; + info->interrupt_number = pci_intx(&pib->dev); + info->i2c_slave_address = pib->bt.bmc->slave_addr; + info->uuid = pib->uuid; } -static void pci_ipmi_lower_irq(IPMIBT *ik) +static void pci_ipmi_raise_irq(IPMIBT *ib) { - PCIIPMIBTDevice *pik = ik->opaque; + PCIIPMIBTDevice *pib = ib->opaque; - pci_set_irq(&pik->dev, false); + pci_set_irq(&pib->dev, true); +} + +static void pci_ipmi_lower_irq(IPMIBT *ib) +{ + PCIIPMIBTDevice *pib = ib->opaque; + + pci_set_irq(&pib->dev, false); } static void pci_ipmi_bt_realize(PCIDevice *pd, Error **errp) { Error *err = NULL; - PCIIPMIBTDevice *pik = PCI_IPMI_BT(pd); + PCIIPMIBTDevice *pib = PCI_IPMI_BT(pd); IPMIInterface *ii = IPMI_INTERFACE(pd); IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); - if (!pik->bt.bmc) { + if (!pib->bt.bmc) { error_setg(errp, "IPMI device requires a bmc attribute to be set"); return; } - pik->uuid = ipmi_next_uuid(); + pib->uuid = ipmi_next_uuid(); - pik->bt.bmc->intf = ii; - pik->bt.opaque = pik; + pib->bt.bmc->intf = ii; + pib->bt.opaque = pib; pci_config_set_prog_interface(pd->config, 0x02); /* BT */ pci_config_set_interrupt_pin(pd->config, 0x01); - pik->bt.use_irq = 1; - pik->bt.raise_irq = pci_ipmi_raise_irq; - pik->bt.lower_irq = pci_ipmi_lower_irq; + pib->bt.use_irq = 1; + pib->bt.raise_irq = pci_ipmi_raise_irq; + pib->bt.lower_irq = pci_ipmi_lower_irq; iic->init(ii, 8, &err); if (err) { error_propagate(errp, err); return; } - pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pik->bt.io); + pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pib->bt.io); } const VMStateDescription vmstate_PCIIPMIBTDevice = { @@ -96,19 +107,19 @@ const VMStateDescription vmstate_PCIIPMIBTDevice = { static void pci_ipmi_bt_instance_init(Object *obj) { - PCIIPMIBTDevice *pik = PCI_IPMI_BT(obj); + PCIIPMIBTDevice *pib = PCI_IPMI_BT(obj); - ipmi_bmc_find_and_link(obj, (Object **) &pik->bt.bmc); + ipmi_bmc_find_and_link(obj, (Object **) &pib->bt.bmc); } static void *pci_ipmi_bt_get_backend_data(IPMIInterface *ii) { - PCIIPMIBTDevice *pik = PCI_IPMI_BT(ii); + PCIIPMIBTDevice *pib = PCI_IPMI_BT(ii); - return &pik->bt; + return &pib->bt; } -static void pci_ipmi_bt_class_init(ObjectClass *oc, void *data) +static void pci_ipmi_bt_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); @@ -125,6 +136,7 @@ static void pci_ipmi_bt_class_init(ObjectClass *oc, void *data) iic->get_backend_data = pci_ipmi_bt_get_backend_data; ipmi_bt_class_init(iic); + iic->get_fwinfo = pci_ipmi_bt_get_fwinfo; } static const TypeInfo pci_ipmi_bt_info = { @@ -133,7 +145,7 @@ static const TypeInfo pci_ipmi_bt_info = { .instance_size = sizeof(PCIIPMIBTDevice), .instance_init = pci_ipmi_bt_instance_init, .class_init = pci_ipmi_bt_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } diff --git a/hw/ipmi/pci_ipmi_kcs.c b/hw/ipmi/pci_ipmi_kcs.c index 05ba97ec58f..4d6cde826ec 100644 --- a/hw/ipmi/pci_ipmi_kcs.c +++ b/hw/ipmi/pci_ipmi_kcs.c @@ -38,6 +38,16 @@ struct PCIIPMIKCSDevice { uint32_t uuid; }; +static void pci_ipmi_kcs_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) +{ + PCIIPMIKCSDevice *pik = PCI_IPMI_KCS(ii); + + ipmi_kcs_get_fwinfo(&pik->kcs, info); + info->irq_source = IPMI_PCI_IRQ; + info->interrupt_number = pci_intx(&pik->dev); + info->uuid = pik->uuid; +} + static void pci_ipmi_raise_irq(IPMIKCS *ik) { PCIIPMIKCSDevice *pik = ik->opaque; @@ -108,7 +118,7 @@ static void *pci_ipmi_kcs_get_backend_data(IPMIInterface *ii) return &pik->kcs; } -static void pci_ipmi_kcs_class_init(ObjectClass *oc, void *data) +static void pci_ipmi_kcs_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); @@ -125,6 +135,7 @@ static void pci_ipmi_kcs_class_init(ObjectClass *oc, void *data) iic->get_backend_data = pci_ipmi_kcs_get_backend_data; ipmi_kcs_class_init(iic); + iic->get_fwinfo = pci_ipmi_kcs_get_fwinfo; } static const TypeInfo pci_ipmi_kcs_info = { @@ -133,7 +144,7 @@ static const TypeInfo pci_ipmi_kcs_info = { .instance_size = sizeof(PCIIPMIKCSDevice), .instance_init = pci_ipmi_kcs_instance_init, .class_init = pci_ipmi_kcs_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c index 56865df7dbd..78c332de54d 100644 --- a/hw/ipmi/smbus_ipmi.c +++ b/hw/ipmi/smbus_ipmi.c @@ -351,7 +351,7 @@ static void smbus_ipmi_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) info->uuid = sid->uuid; } -static void smbus_ipmi_class_init(ObjectClass *oc, void *data) +static void smbus_ipmi_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); @@ -376,7 +376,7 @@ static const TypeInfo smbus_ipmi_info = { .instance_size = sizeof(SMBusIPMIDevice), .instance_init = smbus_ipmi_init, .class_init = smbus_ipmi_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_IPMI_INTERFACE }, { TYPE_ACPI_DEV_AML_IF }, { } diff --git a/hw/isa/fdc37m81x-superio.c b/hw/isa/fdc37m81x-superio.c index 55e91fbca17..c2a38f04b11 100644 --- a/hw/isa/fdc37m81x-superio.c +++ b/hw/isa/fdc37m81x-superio.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "hw/isa/superio.h" -static void fdc37m81x_class_init(ObjectClass *klass, void *data) +static void fdc37m81x_class_init(ObjectClass *klass, const void *data) { ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index cbaa152a899..06e8f0ce3e0 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -122,7 +122,7 @@ static void i82378_init(Object *obj) qdev_init_gpio_in(dev, i82378_request_pic_irq, 16); } -static void i82378_class_init(ObjectClass *klass, void *data) +static void i82378_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -142,7 +142,7 @@ static const TypeInfo i82378_type_info = { .instance_size = sizeof(I82378State), .instance_init = i82378_init, .class_init = i82378_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c index f1e0f140078..6c9802eb7ac 100644 --- a/hw/isa/isa-bus.c +++ b/hw/isa/isa-bus.c @@ -22,14 +22,14 @@ #include "qemu/module.h" #include "qapi/error.h" #include "hw/sysbus.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/isa/isa.h" static ISABus *isabus; static char *isabus_get_fw_dev_path(DeviceState *dev); -static void isa_bus_class_init(ObjectClass *klass, void *data) +static void isa_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -205,7 +205,7 @@ ISADevice *isa_vga_init(ISABus *bus) } } -static void isabus_bridge_class_init(ObjectClass *klass, void *data) +static void isabus_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -220,7 +220,7 @@ static const TypeInfo isabus_bridge_info = { .class_init = isabus_bridge_class_init, }; -static void isa_device_class_init(ObjectClass *klass, void *data) +static void isa_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->bus_type = TYPE_ISA_BUS; diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c index a8c8c58ef7f..2853485977c 100644 --- a/hw/isa/isa-superio.c +++ b/hw/isa/isa-superio.c @@ -14,7 +14,7 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qapi/error.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "chardev/char.h" #include "hw/char/parallel.h" #include "hw/block/fdc.h" @@ -22,7 +22,7 @@ #include "hw/qdev-properties.h" #include "hw/input/i8042.h" #include "hw/char/parallel-isa.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "trace.h" static void isa_superio_realize(DeviceState *dev, Error **errp) @@ -172,7 +172,7 @@ static void isa_superio_realize(DeviceState *dev, Error **errp) } } -static void isa_superio_class_init(ObjectClass *oc, void *data) +static void isa_superio_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index bd727b2320b..304dffac322 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -43,10 +43,11 @@ #include "hw/southbridge/ich9.h" #include "hw/acpi/acpi.h" #include "hw/acpi/ich9.h" +#include "hw/acpi/ich9_timer.h" #include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "hw/core/cpu.h" #include "hw/nvram/fw_cfg.h" #include "qemu/cutils.h" @@ -181,7 +182,6 @@ static uint64_t ich9_cc_read(void *opaque, hwaddr addr, } /* IRQ routing */ -/* */ static void ich9_lpc_rout(uint8_t pirq_rout, int *pic_irq, int *pic_dis) { *pic_irq = pirq_rout & ICH9_LPC_PIRQ_ROUT_MASK; @@ -531,6 +531,15 @@ ich9_lpc_pmcon_update(ICH9LPCState *lpc) uint16_t gen_pmcon_1 = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1); uint16_t wmask; + if (lpc->pm.swsmi_timer_enabled) { + ich9_pm_update_swsmi_timer( + &lpc->pm, lpc->pm.smi_en & ICH9_PMIO_SMI_EN_SWSMI_EN); + } + if (lpc->pm.periodic_timer_enabled) { + ich9_pm_update_periodic_timer( + &lpc->pm, lpc->pm.smi_en & ICH9_PMIO_SMI_EN_PERIODIC_EN); + } + if (gen_pmcon_1 & ICH9_LPC_GEN_PMCON_1_SMI_LOCK) { wmask = pci_get_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1); wmask &= ~ICH9_LPC_GEN_PMCON_1_SMI_LOCK; @@ -816,7 +825,7 @@ static const VMStateDescription vmstate_ich9_lpc = { } }; -static Property ich9_lpc_properties[] = { +static const Property ich9_lpc_properties[] = { DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, false), DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false), DEFINE_PROP_BOOL("smm-enabled", ICH9LPCState, pm.smm_enabled, false), @@ -826,7 +835,10 @@ static Property ich9_lpc_properties[] = { ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT, true), DEFINE_PROP_BIT64("x-smi-cpu-hotunplug", ICH9LPCState, smi_host_features, ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("x-smi-swsmi-timer", ICH9LPCState, + pm.swsmi_timer_enabled, true), + DEFINE_PROP_BOOL("x-smi-periodic-timer", ICH9LPCState, + pm.periodic_timer_enabled, true), }; static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) @@ -862,7 +874,7 @@ static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope) qbus_build_aml(bus, scope); } -static void ich9_lpc_class_init(ObjectClass *klass, void *data) +static void ich9_lpc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -871,7 +883,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data) AcpiDevAmlIfClass *amldevc = ACPI_DEV_AML_IF_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - dc->reset = ich9_lpc_reset; + device_class_set_legacy_reset(dc, ich9_lpc_reset); k->realize = ich9_lpc_realize; dc->vmsd = &vmstate_ich9_lpc; device_class_set_props(dc, ich9_lpc_properties); @@ -902,7 +914,7 @@ static const TypeInfo ich9_lpc_info = { .instance_size = sizeof(ICH9LPCState), .instance_init = ich9_lpc_initfn, .class_init = ich9_lpc_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c index 64dd17b537f..388da8f5900 100644 --- a/hw/isa/pc87312.c +++ b/hw/isa/pc87312.c @@ -327,18 +327,17 @@ static const VMStateDescription vmstate_pc87312 = { } }; -static Property pc87312_properties[] = { +static const Property pc87312_properties[] = { DEFINE_PROP_UINT16("iobase", PC87312State, iobase, 0x398), DEFINE_PROP_UINT8("config", PC87312State, config, 1), - DEFINE_PROP_END_OF_LIST() }; -static void pc87312_class_init(ObjectClass *klass, void *data) +static void pc87312_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - dc->reset = pc87312_reset; + device_class_set_legacy_reset(dc, pc87312_reset); dc->vmsd = &vmstate_pc87312; device_class_set_parent_realize(dc, pc87312_realize, &sc->parent_realize); diff --git a/hw/isa/piix.c b/hw/isa/piix.c index 2d30711b178..52c14d3cd5b 100644 --- a/hw/isa/piix.c +++ b/hw/isa/piix.c @@ -34,7 +34,7 @@ #include "hw/ide/piix.h" #include "hw/intc/i8259.h" #include "hw/isa/isa.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "migration/vmstate.h" #include "hw/acpi/acpi_aml_interface.h" @@ -408,24 +408,23 @@ static void pci_piix_init(Object *obj) object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC); } -static Property pci_piix_props[] = { +static const Property pci_piix_props[] = { DEFINE_PROP_UINT32("smb_io_base", PIIXState, smb_io_base, 0), DEFINE_PROP_BOOL("has-acpi", PIIXState, has_acpi, true), DEFINE_PROP_BOOL("has-pic", PIIXState, has_pic, true), DEFINE_PROP_BOOL("has-pit", PIIXState, has_pit, true), DEFINE_PROP_BOOL("has-usb", PIIXState, has_usb, true), DEFINE_PROP_BOOL("smm-enabled", PIIXState, smm_enabled, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pci_piix_class_init(ObjectClass *klass, void *data) +static void pci_piix_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); k->config_write = piix_write_config; - dc->reset = piix_reset; + device_class_set_legacy_reset(dc, piix_reset); dc->desc = "ISA bridge"; dc->hotpluggable = false; k->vendor_id = PCI_VENDOR_ID_INTEL; @@ -446,7 +445,7 @@ static const TypeInfo piix_pci_type_info = { .instance_init = pci_piix_init, .abstract = true, .class_init = pci_piix_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { TYPE_ACPI_DEV_AML_IF }, { }, @@ -465,7 +464,7 @@ static void piix3_init(Object *obj) object_initialize_child(obj, "ide", &d->ide, TYPE_PIIX3_IDE); } -static void piix3_class_init(ObjectClass *klass, void *data) +static void piix3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -495,7 +494,7 @@ static void piix4_init(Object *obj) object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE); } -static void piix4_class_init(ObjectClass *klass, void *data) +static void piix4_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/isa/smc37c669-superio.c b/hw/isa/smc37c669-superio.c index d2e58c9a895..0ec63f520c5 100644 --- a/hw/isa/smc37c669-superio.c +++ b/hw/isa/smc37c669-superio.c @@ -58,7 +58,7 @@ static unsigned int get_fdc_dma(ISASuperIODevice *sio, uint8_t index) return 2; } -static void smc37c669_class_init(ObjectClass *klass, void *data) +static void smc37c669_class_init(ObjectClass *klass, const void *data) { ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index 505b44c4e6b..337958617a4 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -17,7 +17,7 @@ #include "hw/isa/vt82c686.h" #include "hw/block/fdc.h" #include "hw/char/parallel-isa.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" #include "hw/ide/pci.h" @@ -220,11 +220,11 @@ typedef struct via_pm_init_info { uint16_t device_id; } ViaPMInitInfo; -static void via_pm_class_init(ObjectClass *klass, void *data) +static void via_pm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - ViaPMInitInfo *info = data; + const ViaPMInitInfo *info = data; k->realize = via_pm_realize; k->config_write = pm_write_config; @@ -232,7 +232,7 @@ static void via_pm_class_init(ObjectClass *klass, void *data) k->device_id = info->device_id; k->class_id = PCI_CLASS_BRIDGE_OTHER; k->revision = 0x40; - dc->reset = via_pm_reset; + device_class_set_legacy_reset(dc, via_pm_reset); /* Reason: part of VIA south bridge, does not exist stand alone */ dc->user_creatable = false; dc->vmsd = &vmstate_acpi; @@ -243,7 +243,7 @@ static const TypeInfo via_pm_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(ViaPMState), .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -259,7 +259,7 @@ static const TypeInfo vt82c686b_pm_info = { .name = TYPE_VT82C686B_PM, .parent = TYPE_VIA_PM, .class_init = via_pm_class_init, - .class_data = (void *)&vt82c686b_pm_init_info, + .class_data = &vt82c686b_pm_init_info, }; static const ViaPMInitInfo vt8231_pm_init_info = { @@ -272,7 +272,7 @@ static const TypeInfo vt8231_pm_info = { .name = TYPE_VT8231_PM, .parent = TYPE_VIA_PM, .class_init = via_pm_class_init, - .class_data = (void *)&vt8231_pm_init_info, + .class_data = &vt8231_pm_init_info, }; @@ -337,7 +337,7 @@ static void via_superio_devices_enable(ViaSuperIOState *s, uint8_t data) isa_fdc_set_enabled(s->superio.floppy, data & BIT(4)); } -static void via_superio_class_init(ObjectClass *klass, void *data) +static void via_superio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); @@ -456,12 +456,12 @@ static void vt82c686b_superio_init(Object *obj) VIA_SUPERIO(obj)->io_ops = &vt82c686b_superio_cfg_ops; } -static void vt82c686b_superio_class_init(ObjectClass *klass, void *data) +static void vt82c686b_superio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - dc->reset = vt82c686b_superio_reset; + device_class_set_legacy_reset(dc, vt82c686b_superio_reset); sc->serial.count = 2; sc->parallel.count = 1; sc->ide.count = 0; /* emulated by via-ide */ @@ -565,12 +565,12 @@ static void vt8231_superio_init(Object *obj) VIA_SUPERIO(obj)->io_ops = &vt8231_superio_cfg_ops; } -static void vt8231_superio_class_init(ObjectClass *klass, void *data) +static void vt8231_superio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); - dc->reset = vt8231_superio_reset; + device_class_set_legacy_reset(dc, vt8231_superio_reset); sc->serial.count = 1; sc->parallel.count = 1; sc->ide.count = 0; /* emulated by via-ide */ @@ -592,6 +592,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA) struct ViaISAState { PCIDevice dev; + + IRQState i8259_irq; qemu_irq cpu_intr; qemu_irq *isa_irqs_in; uint16_t irq_state[ISA_NUM_IRQS]; @@ -632,7 +634,7 @@ static const TypeInfo via_isa_info = { .instance_size = sizeof(ViaISAState), .instance_init = via_isa_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -715,13 +717,12 @@ static void via_isa_realize(PCIDevice *d, Error **errp) ViaISAState *s = VIA_ISA(d); DeviceState *dev = DEVICE(d); PCIBus *pci_bus = pci_get_bus(d); - qemu_irq *isa_irq; ISABus *isa_bus; int i; qdev_init_gpio_out_named(dev, &s->cpu_intr, "intr", 1); qdev_init_gpio_in_named(dev, via_isa_pirq, "pirq", PCI_NUM_PINS); - isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); + qemu_init_irq(&s->i8259_irq, via_isa_request_i8259_irq, s, 0); isa_bus = isa_bus_new(dev, pci_address_space(d), pci_address_space_io(d), errp); @@ -729,7 +730,7 @@ static void via_isa_realize(PCIDevice *d, Error **errp) return; } - s->isa_irqs_in = i8259_init(isa_bus, *isa_irq); + s->isa_irqs_in = i8259_init(isa_bus, &s->i8259_irq); isa_bus_register_input_irqs(isa_bus, s->isa_irqs_in); i8254_pit_init(isa_bus, 0x40, 0, NULL); i8257_dma_init(OBJECT(d), isa_bus, 0); @@ -832,7 +833,7 @@ static void vt82c686b_init(Object *obj) object_initialize_child(obj, "pm", &s->pm, TYPE_VT82C686B_PM); } -static void vt82c686b_class_init(ObjectClass *klass, void *data) +static void vt82c686b_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -843,7 +844,7 @@ static void vt82c686b_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_VIA_82C686B_ISA; k->class_id = PCI_CLASS_BRIDGE_ISA; k->revision = 0x40; - dc->reset = vt82c686b_isa_reset; + device_class_set_legacy_reset(dc, vt82c686b_isa_reset); dc->desc = "ISA bridge"; dc->vmsd = &vmstate_via; /* Reason: part of VIA VT82C686 southbridge, needs to be wired up */ @@ -897,7 +898,7 @@ static void vt8231_init(Object *obj) object_initialize_child(obj, "pm", &s->pm, TYPE_VT8231_PM); } -static void vt8231_class_init(ObjectClass *klass, void *data) +static void vt8231_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -908,7 +909,7 @@ static void vt8231_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_VIA_8231_ISA; k->class_id = PCI_CLASS_BRIDGE_ISA; k->revision = 0x10; - dc->reset = vt8231_isa_reset; + device_class_set_legacy_reset(dc, vt8231_isa_reset); dc->desc = "ISA bridge"; dc->vmsd = &vmstate_via; /* Reason: part of VIA VT8231 southbridge, needs to be wired up */ diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig index 89be737726f..bb2838b7b53 100644 --- a/hw/loongarch/Kconfig +++ b/hw/loongarch/Kconfig @@ -5,19 +5,19 @@ config LOONGARCH_VIRT select DEVICE_TREE select PCI select PCI_EXPRESS_GENERIC_BRIDGE - imply VIRTIO_VGA imply PCI_DEVICES imply NVDIMM imply TPM_TIS_SYSBUS - select SERIAL + select SERIAL_MM select VIRTIO_PCI select PLATFORM_BUS - select LOONGSON_IPI + select LOONGARCH_IPI select LOONGARCH_PCH_PIC select LOONGARCH_PCH_MSI select LOONGARCH_EXTIOI select LS7A_RTC select SMBIOS + select ACPI_CPU_HOTPLUG select ACPI_PCI select ACPI_HW_REDUCED select FW_CFG_DMA diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c deleted file mode 100644 index 72bfc35ae6c..00000000000 --- a/hw/loongarch/acpi-build.c +++ /dev/null @@ -1,661 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Support for generating ACPI tables and passing them to Guests - * - * Copyright (C) 2021 Loongson Technology Corporation Limited - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/error-report.h" -#include "qemu/bitmap.h" -#include "hw/pci/pci.h" -#include "hw/core/cpu.h" -#include "target/loongarch/cpu.h" -#include "hw/acpi/acpi-defs.h" -#include "hw/acpi/acpi.h" -#include "hw/nvram/fw_cfg.h" -#include "hw/acpi/bios-linker-loader.h" -#include "migration/vmstate.h" -#include "hw/mem/memory-device.h" -#include "sysemu/reset.h" - -/* Supported chipsets: */ -#include "hw/pci-host/ls7a.h" -#include "hw/loongarch/virt.h" - -#include "hw/acpi/utils.h" -#include "hw/acpi/pci.h" - -#include "qom/qom-qobject.h" - -#include "hw/acpi/generic_event_device.h" -#include "hw/pci-host/gpex.h" -#include "sysemu/tpm.h" -#include "hw/platform-bus.h" -#include "hw/acpi/aml-build.h" -#include "hw/acpi/hmat.h" - -#define ACPI_BUILD_ALIGN_SIZE 0x1000 -#define ACPI_BUILD_TABLE_SIZE 0x20000 - -#ifdef DEBUG_ACPI_BUILD -#define ACPI_BUILD_DPRINTF(fmt, ...) \ - do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0) -#else -#define ACPI_BUILD_DPRINTF(fmt, ...) -#endif - -/* build FADT */ -static void init_common_fadt_data(AcpiFadtData *data) -{ - AcpiFadtData fadt = { - /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */ - .rev = 5, - .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) | - (1 << ACPI_FADT_F_RESET_REG_SUP)), - - /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */ - .sleep_ctl = { - .space_id = AML_AS_SYSTEM_MEMORY, - .bit_width = 8, - .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL, - }, - .sleep_sts = { - .space_id = AML_AS_SYSTEM_MEMORY, - .bit_width = 8, - .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS, - }, - - /* ACPI 5.0: 4.8.3.6 Reset Register */ - .reset_reg = { - .space_id = AML_AS_SYSTEM_MEMORY, - .bit_width = 8, - .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET, - }, - .reset_val = ACPI_GED_RESET_VALUE, - }; - *data = fadt; -} - -static void acpi_align_size(GArray *blob, unsigned align) -{ - /* - * Align size to multiple of given size. This reduces the chance - * we need to change size in the future (breaking cross version migration). - */ - g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); -} - -/* build FACS */ -static void -build_facs(GArray *table_data) -{ - const char *sig = "FACS"; - const uint8_t reserved[40] = {}; - - g_array_append_vals(table_data, sig, 4); /* Signature */ - build_append_int_noprefix(table_data, 64, 4); /* Length */ - build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ - build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ - build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ - build_append_int_noprefix(table_data, 0, 4); /* Flags */ - g_array_append_vals(table_data, reserved, 40); /* Reserved */ -} - -/* build MADT */ -static void -build_madt(GArray *table_data, BIOSLinker *linker, - LoongArchVirtMachineState *lvms) -{ - MachineState *ms = MACHINE(lvms); - MachineClass *mc = MACHINE_GET_CLASS(ms); - const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); - int i, arch_id; - AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lvms->oem_id, - .oem_table_id = lvms->oem_table_id }; - - acpi_table_begin(&table, table_data); - - /* Local APIC Address */ - build_append_int_noprefix(table_data, 0, 4); - build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ - - for (i = 0; i < arch_ids->len; i++) { - /* Processor Core Interrupt Controller Structure */ - arch_id = arch_ids->cpus[i].arch_id; - - build_append_int_noprefix(table_data, 17, 1); /* Type */ - build_append_int_noprefix(table_data, 15, 1); /* Length */ - build_append_int_noprefix(table_data, 1, 1); /* Version */ - build_append_int_noprefix(table_data, i, 4); /* ACPI Processor ID */ - build_append_int_noprefix(table_data, arch_id, 4); /* Core ID */ - build_append_int_noprefix(table_data, 1, 4); /* Flags */ - } - - /* Extend I/O Interrupt Controller Structure */ - build_append_int_noprefix(table_data, 20, 1); /* Type */ - build_append_int_noprefix(table_data, 13, 1); /* Length */ - build_append_int_noprefix(table_data, 1, 1); /* Version */ - build_append_int_noprefix(table_data, 3, 1); /* Cascade */ - build_append_int_noprefix(table_data, 0, 1); /* Node */ - build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */ - - /* MSI Interrupt Controller Structure */ - build_append_int_noprefix(table_data, 21, 1); /* Type */ - build_append_int_noprefix(table_data, 19, 1); /* Length */ - build_append_int_noprefix(table_data, 1, 1); /* Version */ - build_append_int_noprefix(table_data, VIRT_PCH_MSI_ADDR_LOW, 8);/* Address */ - build_append_int_noprefix(table_data, 0x40, 4); /* Start */ - build_append_int_noprefix(table_data, 0xc0, 4); /* Count */ - - /* Bridge I/O Interrupt Controller Structure */ - build_append_int_noprefix(table_data, 22, 1); /* Type */ - build_append_int_noprefix(table_data, 17, 1); /* Length */ - build_append_int_noprefix(table_data, 1, 1); /* Version */ - build_append_int_noprefix(table_data, VIRT_PCH_REG_BASE, 8);/* Address */ - build_append_int_noprefix(table_data, 0x1000, 2); /* Size */ - build_append_int_noprefix(table_data, 0, 2); /* Id */ - build_append_int_noprefix(table_data, 0x40, 2); /* Base */ - - acpi_table_end(linker, &table); -} - -/* build SRAT */ -static void -build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) -{ - int i, arch_id, node_id; - hwaddr len, base, gap; - NodeInfo *numa_info; - int nodes, nb_numa_nodes = machine->numa_state->num_nodes; - LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); - MachineClass *mc = MACHINE_GET_CLASS(lvms); - const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine); - AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lvms->oem_id, - .oem_table_id = lvms->oem_table_id }; - - acpi_table_begin(&table, table_data); - build_append_int_noprefix(table_data, 1, 4); /* Reserved */ - build_append_int_noprefix(table_data, 0, 8); /* Reserved */ - - for (i = 0; i < arch_ids->len; ++i) { - arch_id = arch_ids->cpus[i].arch_id; - node_id = arch_ids->cpus[i].props.node_id; - - /* Processor Local APIC/SAPIC Affinity Structure */ - build_append_int_noprefix(table_data, 0, 1); /* Type */ - build_append_int_noprefix(table_data, 16, 1); /* Length */ - /* Proximity Domain [7:0] */ - build_append_int_noprefix(table_data, node_id, 1); - build_append_int_noprefix(table_data, arch_id, 1); /* APIC ID */ - /* Flags, Table 5-36 */ - build_append_int_noprefix(table_data, 1, 4); - build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ - /* Proximity Domain [31:8] */ - build_append_int_noprefix(table_data, 0, 3); - build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - } - - base = VIRT_LOWMEM_BASE; - gap = VIRT_LOWMEM_SIZE; - numa_info = machine->numa_state->nodes; - nodes = nb_numa_nodes; - if (!nodes) { - nodes = 1; - } - - for (i = 0; i < nodes; i++) { - if (nb_numa_nodes) { - len = numa_info[i].node_mem; - } else { - len = machine->ram_size; - } - - /* - * memory for the node splited into two part - * lowram: [base, +gap) - * highram: [VIRT_HIGHMEM_BASE, +(len - gap)) - */ - if (len >= gap) { - build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED); - len -= gap; - base = VIRT_HIGHMEM_BASE; - gap = machine->ram_size - VIRT_LOWMEM_SIZE; - } - - if (len) { - build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED); - base += len; - gap -= len; - } - } - - if (machine->device_memory) { - build_srat_memory(table_data, machine->device_memory->base, - memory_region_size(&machine->device_memory->mr), - nodes - 1, - MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); - } - - acpi_table_end(linker, &table); -} - -typedef -struct AcpiBuildState { - /* Copy of table in RAM (for patching). */ - MemoryRegion *table_mr; - /* Is table patched? */ - uint8_t patched; - void *rsdp; - MemoryRegion *rsdp_mr; - MemoryRegion *linker_mr; -} AcpiBuildState; - -static void build_uart_device_aml(Aml *table) -{ - Aml *dev; - Aml *crs; - Aml *pkg0, *pkg1, *pkg2; - uint32_t uart_irq = VIRT_UART_IRQ; - - Aml *scope = aml_scope("_SB"); - dev = aml_device("COMA"); - aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); - aml_append(dev, aml_name_decl("_UID", aml_int(0))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - crs = aml_resource_template(); - aml_append(crs, - aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, - AML_NON_CACHEABLE, AML_READ_WRITE, - 0, VIRT_UART_BASE, VIRT_UART_BASE + VIRT_UART_SIZE - 1, - 0, VIRT_UART_SIZE)); - aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, - AML_SHARED, &uart_irq, 1)); - aml_append(dev, aml_name_decl("_CRS", crs)); - pkg0 = aml_package(0x2); - aml_append(pkg0, aml_int(0x05F5E100)); - aml_append(pkg0, aml_string("clock-frenquency")); - pkg1 = aml_package(0x1); - aml_append(pkg1, pkg0); - pkg2 = aml_package(0x2); - aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); - aml_append(pkg2, pkg1); - aml_append(dev, aml_name_decl("_DSD", pkg2)); - aml_append(scope, dev); - aml_append(table, scope); -} - -static void -build_la_ged_aml(Aml *dsdt, MachineState *machine) -{ - uint32_t event; - LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); - - build_ged_aml(dsdt, "\\_SB."GED_DEVICE, - HOTPLUG_HANDLER(lvms->acpi_ged), - VIRT_SCI_IRQ, AML_SYSTEM_MEMORY, - VIRT_GED_EVT_ADDR); - event = object_property_get_uint(OBJECT(lvms->acpi_ged), - "ged-event", &error_abort); - if (event & ACPI_GED_MEM_HOTPLUG_EVT) { - build_memory_hotplug_aml(dsdt, machine->ram_slots, "\\_SB", NULL, - AML_SYSTEM_MEMORY, - VIRT_GED_MEM_ADDR); - } - acpi_dsdt_add_power_button(dsdt); -} - -static void build_pci_device_aml(Aml *scope, LoongArchVirtMachineState *lvms) -{ - struct GPEXConfig cfg = { - .mmio64.base = VIRT_PCI_MEM_BASE, - .mmio64.size = VIRT_PCI_MEM_SIZE, - .pio.base = VIRT_PCI_IO_BASE, - .pio.size = VIRT_PCI_IO_SIZE, - .ecam.base = VIRT_PCI_CFG_BASE, - .ecam.size = VIRT_PCI_CFG_SIZE, - .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS, - .bus = lvms->pci_bus, - }; - - acpi_dsdt_add_gpex(scope, &cfg); -} - -static void build_flash_aml(Aml *scope, LoongArchVirtMachineState *lvms) -{ - Aml *dev, *crs; - MemoryRegion *flash_mem; - - hwaddr flash0_base; - hwaddr flash0_size; - - hwaddr flash1_base; - hwaddr flash1_size; - - flash_mem = pflash_cfi01_get_memory(lvms->flash[0]); - flash0_base = flash_mem->addr; - flash0_size = memory_region_size(flash_mem); - - flash_mem = pflash_cfi01_get_memory(lvms->flash[1]); - flash1_base = flash_mem->addr; - flash1_size = memory_region_size(flash_mem); - - dev = aml_device("FLS0"); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); - aml_append(dev, aml_name_decl("_UID", aml_int(0))); - - crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(flash0_base, flash0_size, - AML_READ_WRITE)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); - - dev = aml_device("FLS1"); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); - aml_append(dev, aml_name_decl("_UID", aml_int(1))); - - crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(flash1_base, flash1_size, - AML_READ_WRITE)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); -} - -#ifdef CONFIG_TPM -static void acpi_dsdt_add_tpm(Aml *scope, LoongArchVirtMachineState *vms) -{ - PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); - hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS; - SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find()); - MemoryRegion *sbdev_mr; - hwaddr tpm_base; - - if (!sbdev) { - return; - } - - tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); - assert(tpm_base != -1); - - tpm_base += pbus_base; - - sbdev_mr = sysbus_mmio_get_region(sbdev, 0); - - Aml *dev = aml_device("TPM0"); - aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); - aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); - aml_append(dev, aml_name_decl("_UID", aml_int(0))); - - Aml *crs = aml_resource_template(); - aml_append(crs, - aml_memory32_fixed(tpm_base, - (uint32_t)memory_region_size(sbdev_mr), - AML_READ_WRITE)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); -} -#endif - -/* build DSDT */ -static void -build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine) -{ - Aml *dsdt, *scope, *pkg; - LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); - AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lvms->oem_id, - .oem_table_id = lvms->oem_table_id }; - - acpi_table_begin(&table, table_data); - dsdt = init_aml_allocator(); - build_uart_device_aml(dsdt); - build_pci_device_aml(dsdt, lvms); - build_la_ged_aml(dsdt, machine); - build_flash_aml(dsdt, lvms); -#ifdef CONFIG_TPM - acpi_dsdt_add_tpm(dsdt, lvms); -#endif - /* System State Package */ - scope = aml_scope("\\"); - pkg = aml_package(4); - aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5)); - aml_append(pkg, aml_int(0)); /* ignored */ - aml_append(pkg, aml_int(0)); /* reserved */ - aml_append(pkg, aml_int(0)); /* reserved */ - aml_append(scope, aml_name_decl("_S5", pkg)); - aml_append(dsdt, scope); - /* Copy AML table into ACPI tables blob and patch header there */ - g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - acpi_table_end(linker, &table); - free_aml_allocator(); -} - -static void acpi_build(AcpiBuildTables *tables, MachineState *machine) -{ - LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); - GArray *table_offsets; - AcpiFadtData fadt_data; - unsigned facs, rsdt, dsdt; - uint8_t *u; - GArray *tables_blob = tables->table_data; - - init_common_fadt_data(&fadt_data); - - table_offsets = g_array_new(false, true, sizeof(uint32_t)); - ACPI_BUILD_DPRINTF("init ACPI tables\n"); - - bios_linker_loader_alloc(tables->linker, - ACPI_BUILD_TABLE_FILE, tables_blob, - 64, false); - - /* - * FACS is pointed to by FADT. - * We place it first since it's the only table that has alignment - * requirements. - */ - facs = tables_blob->len; - build_facs(tables_blob); - - /* DSDT is pointed to by FADT */ - dsdt = tables_blob->len; - build_dsdt(tables_blob, tables->linker, machine); - - /* ACPI tables pointed to by RSDT */ - acpi_add_table(table_offsets, tables_blob); - fadt_data.facs_tbl_offset = &facs; - fadt_data.dsdt_tbl_offset = &dsdt; - fadt_data.xdsdt_tbl_offset = &dsdt; - build_fadt(tables_blob, tables->linker, &fadt_data, - lvms->oem_id, lvms->oem_table_id); - - acpi_add_table(table_offsets, tables_blob); - build_madt(tables_blob, tables->linker, lvms); - - acpi_add_table(table_offsets, tables_blob); - build_pptt(tables_blob, tables->linker, machine, - lvms->oem_id, lvms->oem_table_id); - - acpi_add_table(table_offsets, tables_blob); - build_srat(tables_blob, tables->linker, machine); - - if (machine->numa_state->num_nodes) { - if (machine->numa_state->have_numa_distance) { - acpi_add_table(table_offsets, tables_blob); - build_slit(tables_blob, tables->linker, machine, lvms->oem_id, - lvms->oem_table_id); - } - if (machine->numa_state->hmat_enabled) { - acpi_add_table(table_offsets, tables_blob); - build_hmat(tables_blob, tables->linker, machine->numa_state, - lvms->oem_id, lvms->oem_table_id); - } - } - - acpi_add_table(table_offsets, tables_blob); - { - AcpiMcfgInfo mcfg = { - .base = cpu_to_le64(VIRT_PCI_CFG_BASE), - .size = cpu_to_le64(VIRT_PCI_CFG_SIZE), - }; - build_mcfg(tables_blob, tables->linker, &mcfg, lvms->oem_id, - lvms->oem_table_id); - } - -#ifdef CONFIG_TPM - /* TPM info */ - if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { - acpi_add_table(table_offsets, tables_blob); - build_tpm2(tables_blob, tables->linker, - tables->tcpalog, lvms->oem_id, - lvms->oem_table_id); - } -#endif - /* Add tables supplied by user (if any) */ - for (u = acpi_table_first(); u; u = acpi_table_next(u)) { - unsigned len = acpi_table_len(u); - - acpi_add_table(table_offsets, tables_blob); - g_array_append_vals(tables_blob, u, len); - } - - /* RSDT is pointed to by RSDP */ - rsdt = tables_blob->len; - build_rsdt(tables_blob, tables->linker, table_offsets, - lvms->oem_id, lvms->oem_table_id); - - /* RSDP is in FSEG memory, so allocate it separately */ - { - AcpiRsdpData rsdp_data = { - .revision = 0, - .oem_id = lvms->oem_id, - .xsdt_tbl_offset = NULL, - .rsdt_tbl_offset = &rsdt, - }; - build_rsdp(tables->rsdp, tables->linker, &rsdp_data); - } - - /* - * The align size is 128, warn if 64k is not enough therefore - * the align size could be resized. - */ - if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { - warn_report("ACPI table size %u exceeds %d bytes," - " migration may not work", - tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); - error_printf("Try removing CPUs, NUMA nodes, memory slots" - " or PCI bridges.\n"); - } - - acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); - - /* Cleanup memory that's no longer used. */ - g_array_free(table_offsets, true); -} - -static void acpi_ram_update(MemoryRegion *mr, GArray *data) -{ - uint32_t size = acpi_data_len(data); - - /* - * Make sure RAM size is correct - in case it got changed - * e.g. by migration - */ - memory_region_ram_resize(mr, size, &error_abort); - - memcpy(memory_region_get_ram_ptr(mr), data->data, size); - memory_region_set_dirty(mr, 0, size); -} - -static void acpi_build_update(void *build_opaque) -{ - AcpiBuildState *build_state = build_opaque; - AcpiBuildTables tables; - - /* No state to update or already patched? Nothing to do. */ - if (!build_state || build_state->patched) { - return; - } - build_state->patched = 1; - - acpi_build_tables_init(&tables); - - acpi_build(&tables, MACHINE(qdev_get_machine())); - - acpi_ram_update(build_state->table_mr, tables.table_data); - acpi_ram_update(build_state->rsdp_mr, tables.rsdp); - acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); - - acpi_build_tables_cleanup(&tables, true); -} - -static void acpi_build_reset(void *build_opaque) -{ - AcpiBuildState *build_state = build_opaque; - build_state->patched = 0; -} - -static const VMStateDescription vmstate_acpi_build = { - .name = "acpi_build", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT8(patched, AcpiBuildState), - VMSTATE_END_OF_LIST() - }, -}; - -static bool loongarch_is_acpi_enabled(LoongArchVirtMachineState *lvms) -{ - if (lvms->acpi == ON_OFF_AUTO_OFF) { - return false; - } - return true; -} - -void loongarch_acpi_setup(LoongArchVirtMachineState *lvms) -{ - AcpiBuildTables tables; - AcpiBuildState *build_state; - - if (!lvms->fw_cfg) { - ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); - return; - } - - if (!loongarch_is_acpi_enabled(lvms)) { - ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); - return; - } - - build_state = g_malloc0(sizeof *build_state); - - acpi_build_tables_init(&tables); - acpi_build(&tables, MACHINE(lvms)); - - /* Now expose it all to Guest */ - build_state->table_mr = acpi_add_rom_blob(acpi_build_update, - build_state, tables.table_data, - ACPI_BUILD_TABLE_FILE); - assert(build_state->table_mr != NULL); - - build_state->linker_mr = - acpi_add_rom_blob(acpi_build_update, build_state, - tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE); - - build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, - build_state, tables.rsdp, - ACPI_BUILD_RSDP_FILE); - - fw_cfg_add_file(lvms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, - acpi_data_len(tables.tcpalog)); - - qemu_register_reset(acpi_build_reset, build_state); - acpi_build_reset(build_state); - vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); - - /* - * Cleanup tables but don't free the memory: we track it - * in build_state. - */ - acpi_build_tables_cleanup(&tables, false); -} diff --git a/hw/loongarch/boot.c b/hw/loongarch/boot.c index cb668703bdd..14d6c52d4e4 100644 --- a/hw/loongarch/boot.c +++ b/hw/loongarch/boot.c @@ -12,14 +12,28 @@ #include "hw/loader.h" #include "elf.h" #include "qemu/error-report.h" -#include "sysemu/reset.h" -#include "sysemu/qtest.h" +#include "system/reset.h" +#include "system/qtest.h" -struct memmap_entry *memmap_table; -unsigned memmap_entries; - -ram_addr_t initrd_offset; -uint64_t initrd_size; +/* + * Linux Image Format + * https://docs.kernel.org/arch/loongarch/booting.html + */ +#define LINUX_PE_MAGIC 0x818223cd +#define MZ_MAGIC 0x5a4d /* "MZ" */ + +struct loongarch_linux_hdr { + uint32_t mz_magic; + uint32_t res0; + uint64_t kernel_entry; + uint64_t kernel_size; + uint64_t load_offset; + uint64_t res1; + uint64_t res2; + uint64_t res3; + uint32_t linux_pe_magic; + uint32_t pe_header_offset; +} QEMU_PACKED; static const unsigned int slave_boot_code[] = { /* Configure reset ebase. */ @@ -74,12 +88,16 @@ static inline void *guidcpy(void *dst, const void *src) return memcpy(dst, src, sizeof(efi_guid_t)); } -static void init_efi_boot_memmap(struct efi_system_table *systab, +static void init_efi_boot_memmap(MachineState *ms, + struct efi_system_table *systab, void *p, void *start) { unsigned i; struct efi_boot_memmap *boot_memmap = p; efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms); + struct memmap_entry *memmap_table; + unsigned int memmap_entries; /* efi_configuration_table 1 */ guidcpy(&systab->tables[0].guid, &tbl_guid); @@ -91,6 +109,8 @@ static void init_efi_boot_memmap(struct efi_system_table *systab, boot_memmap->map_size = 0; efi_memory_desc_t *map = p + sizeof(struct efi_boot_memmap); + memmap_table = lvms->memmap_table; + memmap_entries = lvms->memmap_entries; for (i = 0; i < memmap_entries; i++) { map = (void *)boot_memmap + sizeof(*map); map[i].type = memmap_table[i].type; @@ -101,7 +121,8 @@ static void init_efi_boot_memmap(struct efi_system_table *systab, } } -static void init_efi_initrd_table(struct efi_system_table *systab, +static void init_efi_initrd_table(struct loongarch_boot_info *info, + struct efi_system_table *systab, void *p, void *start) { efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID; @@ -112,8 +133,8 @@ static void init_efi_initrd_table(struct efi_system_table *systab, systab->tables[1].table = (struct efi_configuration_table *)(p - start); systab->nr_tables = 2; - initrd_table->base = initrd_offset; - initrd_table->size = initrd_size; + initrd_table->base = info->initrd_addr; + initrd_table->size = info->initrd_size; } static void init_efi_fdt_table(struct efi_system_table *systab) @@ -126,10 +147,12 @@ static void init_efi_fdt_table(struct efi_system_table *systab) systab->nr_tables = 3; } -static void init_systab(struct loongarch_boot_info *info, void *p, void *start) +static void init_systab(MachineState *ms, + struct loongarch_boot_info *info, void *p, void *start) { void *bp_tables_start; struct efi_system_table *systab = p; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms); info->a2 = p - start; @@ -146,10 +169,10 @@ static void init_systab(struct loongarch_boot_info *info, void *p, void *start) systab->tables = p; bp_tables_start = p; - init_efi_boot_memmap(systab, p, start); + init_efi_boot_memmap(ms, systab, p, start); p += ROUND_UP(sizeof(struct efi_boot_memmap) + - sizeof(efi_memory_desc_t) * memmap_entries, 64 * KiB); - init_efi_initrd_table(systab, p, start); + sizeof(efi_memory_desc_t) * lvms->memmap_entries, 64 * KiB); + init_efi_initrd_table(info, systab, p, start); p += ROUND_UP(sizeof(struct efi_initrd), 64 * KiB); init_efi_fdt_table(systab); @@ -171,16 +194,105 @@ static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS); } +static int64_t load_loongarch_linux_image(const char *filename, + uint64_t *kernel_entry, + uint64_t *kernel_low, + uint64_t *kernel_high) +{ + gsize len; + ssize_t size; + uint8_t *buffer; + struct loongarch_linux_hdr *hdr; + + /* Load as raw file otherwise */ + if (!g_file_get_contents(filename, (char **)&buffer, &len, NULL)) { + return -1; + } + size = len; + + /* Unpack the image if it is a EFI zboot image */ + if (unpack_efi_zboot_image(&buffer, &size) < 0) { + g_free(buffer); + return -1; + } + + hdr = (struct loongarch_linux_hdr *)buffer; + + if (extract32(le32_to_cpu(hdr->mz_magic), 0, 16) != MZ_MAGIC || + le32_to_cpu(hdr->linux_pe_magic) != LINUX_PE_MAGIC) { + g_free(buffer); + return -1; + } + + /* Early kernel versions may have those fields in virtual address */ + *kernel_entry = extract64(le64_to_cpu(hdr->kernel_entry), + 0, TARGET_PHYS_ADDR_SPACE_BITS); + *kernel_low = extract64(le64_to_cpu(hdr->load_offset), + 0, TARGET_PHYS_ADDR_SPACE_BITS); + *kernel_high = *kernel_low + size; + + rom_add_blob_fixed(filename, buffer, size, *kernel_low); + + g_free(buffer); + + return size; +} + +static ram_addr_t alloc_initrd_memory(struct loongarch_boot_info *info, + uint64_t advice_start, ssize_t rd_size) +{ + hwaddr base, ram_size, gap, low_end; + ram_addr_t initrd_end, initrd_start; + + base = VIRT_LOWMEM_BASE; + gap = VIRT_LOWMEM_SIZE; + initrd_start = advice_start; + initrd_end = initrd_start + rd_size; + + ram_size = info->ram_size; + low_end = base + MIN(ram_size, gap); + if (initrd_end <= low_end) { + return initrd_start; + } + + if (ram_size <= gap) { + error_report("The low memory too small for initial ram disk '%s'," + "You need to expand the ram", + info->initrd_filename); + exit(1); + } + + /* + * Try to load initrd in the high memory + */ + ram_size -= gap; + initrd_start = VIRT_HIGHMEM_BASE; + if (rd_size <= ram_size) { + return initrd_start; + } + + error_report("The high memory too small for initial ram disk '%s'," + "You need to expand the ram", + info->initrd_filename); + exit(1); +} + static int64_t load_kernel_info(struct loongarch_boot_info *info) { - uint64_t kernel_entry, kernel_low, kernel_high; - ssize_t kernel_size; + uint64_t kernel_entry, kernel_low, kernel_high, initrd_offset = 0; + ssize_t kernel_size, initrd_size; kernel_size = load_elf(info->kernel_filename, NULL, cpu_loongarch_virt_to_phys, NULL, &kernel_entry, &kernel_low, - &kernel_high, NULL, 0, + &kernel_high, NULL, ELFDATA2LSB, EM_LOONGARCH, 1, 0); + kernel_entry = cpu_loongarch_virt_to_phys(NULL, kernel_entry); + if (kernel_size < 0) { + kernel_size = load_loongarch_linux_image(info->kernel_filename, + &kernel_entry, &kernel_low, + &kernel_high); + } if (kernel_size < 0) { error_report("could not load kernel '%s': %s", @@ -193,15 +305,10 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) initrd_size = get_image_size(info->initrd_filename); if (initrd_size > 0) { initrd_offset = ROUND_UP(kernel_high + 4 * kernel_size, 64 * KiB); - - if (initrd_offset + initrd_size > info->ram_size) { - error_report("memory too small for initial ram disk '%s'", - info->initrd_filename); - exit(1); - } - - initrd_size = load_image_targphys(info->initrd_filename, initrd_offset, - info->ram_size - initrd_offset); + initrd_offset = alloc_initrd_memory(info, initrd_offset, + initrd_size); + initrd_size = load_image_targphys(info->initrd_filename, + initrd_offset, initrd_size); } if (initrd_size == (target_ulong)-1) { @@ -209,8 +316,9 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) info->initrd_filename); exit(1); } - } else { - initrd_size = 0; + + info->initrd_addr = initrd_offset; + info->initrd_size = initrd_size; } return kernel_entry; @@ -223,7 +331,7 @@ static void reset_load_elf(void *opaque) cpu_reset(CPU(cpu)); if (env->load_elf) { - if (cpu == LOONGARCH_CPU(first_cpu)) { + if (cpu == LOONGARCH_CPU(first_cpu)) { env->gpr[4] = env->boot_info->a0; env->gpr[5] = env->boot_info->a1; env->gpr[6] = env->boot_info->a2; @@ -265,36 +373,37 @@ static void loongarch_firmware_boot(LoongArchVirtMachineState *lvms, fw_cfg_add_kernel_info(info, lvms->fw_cfg); } -static void init_boot_rom(struct loongarch_boot_info *info, void *p) +static void init_boot_rom(MachineState *ms, + struct loongarch_boot_info *info, void *p) { void *start = p; init_cmdline(info, p, start); p += COMMAND_LINE_SIZE; - init_systab(info, p, start); + init_systab(ms, info, p, start); } -static void loongarch_direct_kernel_boot(struct loongarch_boot_info *info) +static void loongarch_direct_kernel_boot(MachineState *ms, + struct loongarch_boot_info *info) { void *p, *bp; - int64_t kernel_addr = 0; + int64_t kernel_addr = VIRT_FLASH0_BASE; LoongArchCPU *lacpu; CPUState *cs; if (info->kernel_filename) { kernel_addr = load_kernel_info(info); } else { - if(!qtest_enabled()) { - error_report("Need kernel filename\n"); - exit(1); + if (!qtest_enabled()) { + warn_report("No kernel provided, booting from flash drive."); } } /* Load cmdline and system tables at [0 - 1 MiB] */ p = g_malloc0(1 * MiB); bp = p; - init_boot_rom(info, p); + init_boot_rom(ms, info, p); rom_add_blob_fixed_as("boot_info", bp, 1 * MiB, 0, &address_space_memory); /* Load slave boot code at pflash0 . */ @@ -334,6 +443,6 @@ void loongarch_load_kernel(MachineState *ms, struct loongarch_boot_info *info) if (lvms->bios_loaded) { loongarch_firmware_boot(lvms, info); } else { - loongarch_direct_kernel_boot(info); + loongarch_direct_kernel_boot(ms, info); } } diff --git a/hw/loongarch/fw_cfg.c b/hw/loongarch/fw_cfg.c index 35aeb2decbd..493563669e5 100644 --- a/hw/loongarch/fw_cfg.c +++ b/hw/loongarch/fw_cfg.c @@ -9,7 +9,7 @@ #include "hw/loongarch/fw_cfg.h" #include "hw/loongarch/virt.h" #include "hw/nvram/fw_cfg.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build index bce7ebac97e..d494d1e2838 100644 --- a/hw/loongarch/meson.build +++ b/hw/loongarch/meson.build @@ -1,9 +1,11 @@ loongarch_ss = ss.source_set() loongarch_ss.add(files( - 'fw_cfg.c', 'boot.c', )) -loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files('virt.c')) -loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-build.c')) +common_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files('fw_cfg.c')) +loongarch_ss.add(when: 'CONFIG_LOONGARCH_VIRT', if_true: files( + 'virt-fdt-build.c', + 'virt.c')) +loongarch_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) hw_arch += {'loongarch': loongarch_ss} diff --git a/hw/loongarch/virt-acpi-build.c b/hw/loongarch/virt-acpi-build.c new file mode 100644 index 00000000000..8c2228a7722 --- /dev/null +++ b/hw/loongarch/virt-acpi-build.c @@ -0,0 +1,744 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/bitmap.h" +#include "hw/pci/pci.h" +#include "hw/core/cpu.h" +#include "target/loongarch/cpu.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "migration/vmstate.h" +#include "hw/mem/memory-device.h" +#include "system/reset.h" + +/* Supported chipsets: */ +#include "hw/pci-host/ls7a.h" +#include "hw/loongarch/virt.h" + +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" + +#include "qom/qom-qobject.h" + +#include "hw/acpi/generic_event_device.h" +#include "hw/pci-host/gpex.h" +#include "system/system.h" +#include "system/tpm.h" +#include "hw/platform-bus.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/hmat.h" + +#define ACPI_BUILD_ALIGN_SIZE 0x1000 +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +#ifdef DEBUG_ACPI_BUILD +#define ACPI_BUILD_DPRINTF(fmt, ...) \ + do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0) +#else +#define ACPI_BUILD_DPRINTF(fmt, ...) +#endif + +static void virt_madt_cpu_entry(int uid, + const CPUArchIdList *apic_ids, + GArray *entry, bool force_enabled) +{ + uint32_t flags, apic_id = apic_ids->cpus[uid].arch_id; + + flags = apic_ids->cpus[uid].cpu || force_enabled ? 1 /* Enabled */ : 0; + + /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */ + build_append_int_noprefix(entry, 0, 1); /* Type */ + build_append_int_noprefix(entry, 8, 1); /* Length */ + build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */ + build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ +} + +/* build FADT */ +static void init_common_fadt_data(AcpiFadtData *data) +{ + AcpiFadtData fadt = { + /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */ + .rev = 5, + .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) | + (1 << ACPI_FADT_F_RESET_REG_SUP)), + + /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */ + .sleep_ctl = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_CTL, + }, + .sleep_sts = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_SLEEP_STS, + }, + + /* ACPI 5.0: 4.8.3.6 Reset Register */ + .reset_reg = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = VIRT_GED_REG_ADDR + ACPI_GED_REG_RESET, + }, + .reset_val = ACPI_GED_RESET_VALUE, + }; + *data = fadt; +} + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +/* build FACS */ +static void +build_facs(GArray *table_data) +{ + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +/* build MADT */ +static void +build_madt(GArray *table_data, BIOSLinker *linker, + LoongArchVirtMachineState *lvms) +{ + MachineState *ms = MACHINE(lvms); + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + int i, arch_id, flags; + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lvms->oem_id, + .oem_table_id = lvms->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Local APIC Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ + + for (i = 0; i < arch_ids->len; i++) { + /* Processor Core Interrupt Controller Structure */ + arch_id = arch_ids->cpus[i].arch_id; + flags = arch_ids->cpus[i].cpu ? 1 : 0; + build_append_int_noprefix(table_data, 17, 1); /* Type */ + build_append_int_noprefix(table_data, 15, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor ID */ + build_append_int_noprefix(table_data, arch_id, 4); /* Core ID */ + build_append_int_noprefix(table_data, flags, 4); /* Flags */ + } + + /* Extend I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 20, 1); /* Type */ + build_append_int_noprefix(table_data, 13, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, 3, 1); /* Cascade */ + build_append_int_noprefix(table_data, 0, 1); /* Node */ + build_append_int_noprefix(table_data, 0xffff, 8); /* Node map */ + + /* MSI Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 21, 1); /* Type */ + build_append_int_noprefix(table_data, 19, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, VIRT_PCH_MSI_ADDR_LOW, 8);/* Address */ + build_append_int_noprefix(table_data, 0x40, 4); /* Start */ + build_append_int_noprefix(table_data, 0xc0, 4); /* Count */ + + /* Bridge I/O Interrupt Controller Structure */ + build_append_int_noprefix(table_data, 22, 1); /* Type */ + build_append_int_noprefix(table_data, 17, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, VIRT_PCH_REG_BASE, 8);/* Address */ + build_append_int_noprefix(table_data, 0x1000, 2); /* Size */ + build_append_int_noprefix(table_data, 0, 2); /* Id */ + build_append_int_noprefix(table_data, 0x40, 2); /* Base */ + + acpi_table_end(linker, &table); +} + +/* build SRAT */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + int i, arch_id, node_id; + hwaddr len, base, gap; + NodeInfo *numa_info; + int nodes, nb_numa_nodes = machine->numa_state->num_nodes; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(lvms); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lvms->oem_id, + .oem_table_id = lvms->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < arch_ids->len; ++i) { + arch_id = arch_ids->cpus[i].arch_id; + node_id = arch_ids->cpus[i].props.node_id; + + /* Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, node_id, 1); + build_append_int_noprefix(table_data, arch_id, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + + base = VIRT_LOWMEM_BASE; + gap = VIRT_LOWMEM_SIZE; + numa_info = machine->numa_state->nodes; + nodes = nb_numa_nodes; + if (!nodes) { + nodes = 1; + } + + for (i = 0; i < nodes; i++) { + if (nb_numa_nodes) { + len = numa_info[i].node_mem; + } else { + len = machine->ram_size; + } + + /* + * memory for the node splited into two part + * lowram: [base, +gap) + * highram: [VIRT_HIGHMEM_BASE, +(len - gap)) + */ + if (len >= gap) { + build_srat_memory(table_data, base, gap, i, MEM_AFFINITY_ENABLED); + len -= gap; + base = VIRT_HIGHMEM_BASE; + gap = machine->ram_size - VIRT_LOWMEM_SIZE; + } + + if (len) { + build_srat_memory(table_data, base, len, i, MEM_AFFINITY_ENABLED); + base += len; + gap -= len; + } + } + + if (machine->device_memory) { + build_srat_memory(table_data, machine->device_memory->base, + memory_region_size(&machine->device_memory->mr), + nodes - 1, + MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + } + + acpi_table_end(linker, &table); +} + +/* + * Serial Port Console Redirection Table (SPCR) + * https://learn.microsoft.com/en-us/windows-hardware/drivers/serports/serial-port-console-redirection-table + */ +static void +spcr_setup(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + LoongArchVirtMachineState *lvms; + AcpiSpcrData serial = { + .interface_type = 0, /* 16550 compatible */ + .base_addr.id = AML_AS_SYSTEM_MEMORY, + .base_addr.width = 32, + .base_addr.offset = 0, + .base_addr.size = 1, + .base_addr.addr = VIRT_UART_BASE, + .interrupt_type = 0, /* Interrupt not supported */ + .pc_interrupt = 0, + .interrupt = VIRT_UART_IRQ, + .baud_rate = 7, /* 115200 */ + .parity = 0, + .stop_bits = 1, + .flow_control = 0, + .terminal_type = 3, /* ANSI */ + .language = 0, /* Language */ + .pci_device_id = 0xffff, /* not a PCI device*/ + .pci_vendor_id = 0xffff, /* not a PCI device*/ + .pci_bus = 0, + .pci_device = 0, + .pci_function = 0, + .pci_flags = 0, + .pci_segment = 0, + }; + + lvms = LOONGARCH_VIRT_MACHINE(machine); + /* + * Passing NULL as the SPCR Table for Revision 2 doesn't support + * NameSpaceString. + */ + build_spcr(table_data, linker, &serial, 2, lvms->oem_id, + lvms->oem_table_id, NULL); +} + +typedef +struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + /* Is table patched? */ + uint8_t patched; + void *rsdp; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; +} AcpiBuildState; + +static void build_uart_device_aml(Aml *table, int index) +{ + Aml *dev; + Aml *crs; + Aml *pkg0, *pkg1, *pkg2; + Aml *scope; + uint32_t uart_irq; + uint64_t base; + + uart_irq = VIRT_UART_IRQ + index; + base = VIRT_UART_BASE + index * VIRT_UART_SIZE; + scope = aml_scope("_SB"); + dev = aml_device("COM%d", index); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(index))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0, base, base + VIRT_UART_SIZE - 1, + 0, VIRT_UART_SIZE)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_SHARED, &uart_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + pkg0 = aml_package(0x2); + aml_append(pkg0, aml_int(0x05F5E100)); + aml_append(pkg0, aml_string("clock-frenquency")); + pkg1 = aml_package(0x1); + aml_append(pkg1, pkg0); + pkg2 = aml_package(0x2); + aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); + aml_append(pkg2, pkg1); + aml_append(dev, aml_name_decl("_DSD", pkg2)); + aml_append(scope, dev); + aml_append(table, scope); +} + +static void +build_la_ged_aml(Aml *dsdt, MachineState *machine) +{ + uint32_t event; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); + CPUHotplugFeatures opts; + + build_ged_aml(dsdt, "\\_SB."GED_DEVICE, + HOTPLUG_HANDLER(lvms->acpi_ged), + VIRT_SCI_IRQ, AML_SYSTEM_MEMORY, + VIRT_GED_EVT_ADDR); + event = object_property_get_uint(OBJECT(lvms->acpi_ged), + "ged-event", &error_abort); + if (event & ACPI_GED_MEM_HOTPLUG_EVT) { + build_memory_hotplug_aml(dsdt, machine->ram_slots, "\\_SB", NULL, + AML_SYSTEM_MEMORY, + VIRT_GED_MEM_ADDR); + } + + if (event & ACPI_GED_CPU_HOTPLUG_EVT) { + opts.acpi_1_compatible = false; + opts.has_legacy_cphp = false; + opts.fw_unplugs_cpu = false; + opts.smi_path = NULL; + + build_cpus_aml(dsdt, machine, opts, virt_madt_cpu_entry, + VIRT_GED_CPUHP_ADDR, "\\_SB", + AML_GED_EVT_CPU_SCAN_METHOD, AML_SYSTEM_MEMORY); + } + + acpi_dsdt_add_power_button(dsdt); +} + +static void build_pci_device_aml(Aml *scope, LoongArchVirtMachineState *lvms) +{ + struct GPEXConfig cfg = { + .mmio64.base = VIRT_PCI_MEM_BASE, + .mmio64.size = VIRT_PCI_MEM_SIZE, + .pio.base = VIRT_PCI_IO_BASE, + .pio.size = VIRT_PCI_IO_SIZE, + .ecam.base = VIRT_PCI_CFG_BASE, + .ecam.size = VIRT_PCI_CFG_SIZE, + .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS, + .bus = lvms->pci_bus, + }; + + acpi_dsdt_add_gpex(scope, &cfg); +} + +static void build_flash_aml(Aml *scope, LoongArchVirtMachineState *lvms) +{ + Aml *dev, *crs; + MemoryRegion *flash_mem; + + hwaddr flash0_base; + hwaddr flash0_size; + + hwaddr flash1_base; + hwaddr flash1_size; + + flash_mem = pflash_cfi01_get_memory(lvms->flash[0]); + flash0_base = flash_mem->addr; + flash0_size = memory_region_size(flash_mem); + + flash_mem = pflash_cfi01_get_memory(lvms->flash[1]); + flash1_base = flash_mem->addr; + flash1_size = memory_region_size(flash_mem); + + dev = aml_device("FLS0"); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(flash0_base, flash0_size, + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + + dev = aml_device("FLS1"); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(flash1_base, flash1_size, + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +#ifdef CONFIG_TPM +static void acpi_dsdt_add_tpm(Aml *scope, LoongArchVirtMachineState *vms) +{ + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS; + SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find()); + MemoryRegion *sbdev_mr; + hwaddr tpm_base; + + if (!sbdev) { + return; + } + + tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + assert(tpm_base != -1); + + tpm_base += pbus_base; + + sbdev_mr = sysbus_mmio_get_region(sbdev, 0); + + Aml *dev = aml_device("TPM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, + aml_memory32_fixed(tpm_base, + (uint32_t)memory_region_size(sbdev_mr), + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} +#endif + +/* build DSDT */ +static void +build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + int i; + Aml *dsdt, *scope, *pkg; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lvms->oem_id, + .oem_table_id = lvms->oem_table_id }; + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + for (i = 0; i < VIRT_UART_COUNT; i++) { + build_uart_device_aml(dsdt, i); + } + build_pci_device_aml(dsdt, lvms); + build_la_ged_aml(dsdt, machine); + build_flash_aml(dsdt, lvms); +#ifdef CONFIG_TPM + acpi_dsdt_add_tpm(dsdt, lvms); +#endif + /* System State Package */ + scope = aml_scope("\\"); + pkg = aml_package(4); + aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5)); + aml_append(pkg, aml_int(0)); /* ignored */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + /* Copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void acpi_build(AcpiBuildTables *tables, MachineState *machine) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); + GArray *table_offsets; + AcpiFadtData fadt_data; + unsigned facs, xsdt, dsdt; + uint8_t *u; + GArray *tables_blob = tables->table_data; + + init_common_fadt_data(&fadt_data); + + table_offsets = g_array_new(false, true, sizeof(uint32_t)); + ACPI_BUILD_DPRINTF("init ACPI tables\n"); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false); + + /* + * FACS is pointed to by FADT. + * We place it first since it's the only table that has alignment + * requirements. + */ + facs = tables_blob->len; + build_facs(tables_blob); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, machine); + + /* ACPI tables pointed to by RSDT */ + acpi_add_table(table_offsets, tables_blob); + fadt_data.facs_tbl_offset = &facs; + fadt_data.dsdt_tbl_offset = &dsdt; + fadt_data.xdsdt_tbl_offset = &dsdt; + build_fadt(tables_blob, tables->linker, &fadt_data, + lvms->oem_id, lvms->oem_table_id); + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, lvms); + + acpi_add_table(table_offsets, tables_blob); + build_pptt(tables_blob, tables->linker, machine, + lvms->oem_id, lvms->oem_table_id); + + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, machine); + acpi_add_table(table_offsets, tables_blob); + + if (machine->acpi_spcr_enabled) + spcr_setup(tables_blob, tables->linker, machine); + + if (machine->numa_state->num_nodes) { + if (machine->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, machine, lvms->oem_id, + lvms->oem_table_id); + } + if (machine->numa_state->hmat_enabled) { + acpi_add_table(table_offsets, tables_blob); + build_hmat(tables_blob, tables->linker, machine->numa_state, + lvms->oem_id, lvms->oem_table_id); + } + } + + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = VIRT_PCI_CFG_BASE, + .size = VIRT_PCI_CFG_SIZE, + }; + build_mcfg(tables_blob, tables->linker, &mcfg, lvms->oem_id, + lvms->oem_table_id); + } + +#ifdef CONFIG_TPM + /* TPM info */ + if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { + acpi_add_table(table_offsets, tables_blob); + build_tpm2(tables_blob, tables->linker, + tables->tcpalog, lvms->oem_id, + lvms->oem_table_id); + } +#endif + /* Add tables supplied by user (if any) */ + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + unsigned len = acpi_table_len(u); + + acpi_add_table(table_offsets, tables_blob); + g_array_append_vals(tables_blob, u, len); + } + + /* RSDT is pointed to by RSDP */ + xsdt = tables_blob->len; + build_xsdt(tables_blob, tables->linker, table_offsets, + lvms->oem_id, lvms->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 2, + .oem_id = lvms->oem_id, + .xsdt_tbl_offset = &xsdt, + .rsdt_tbl_offset = NULL, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges.\n"); + } + + acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - in case it got changed + * e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = 1; + + acpi_build_tables_init(&tables); + + acpi_build(&tables, MACHINE(qdev_get_machine())); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = 0; +} + +static const VMStateDescription vmstate_acpi_build = { + .name = "acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT8(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +static bool virt_is_acpi_enabled(LoongArchVirtMachineState *lvms) +{ + if (lvms->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +void virt_acpi_setup(LoongArchVirtMachineState *lvms) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + + if (!lvms->fw_cfg) { + ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); + return; + } + + if (!virt_is_acpi_enabled(lvms)) { + ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + acpi_build(&tables, MACHINE(lvms)); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = + acpi_add_rom_blob(acpi_build_update, build_state, + tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE); + + build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + fw_cfg_add_file(lvms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, + acpi_data_len(tables.tcpalog)); + + qemu_register_reset(acpi_build_reset, build_state); + acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); + + /* + * Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/loongarch/virt-fdt-build.c b/hw/loongarch/virt-fdt-build.c new file mode 100644 index 00000000000..728ce466996 --- /dev/null +++ b/hw/loongarch/virt-fdt-build.c @@ -0,0 +1,534 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/guest-random.h" +#include +#include "hw/acpi/generic_event_device.h" +#include "hw/core/sysbus-fdt.h" +#include "hw/intc/loongarch_extioi.h" +#include "hw/loader.h" +#include "hw/loongarch/virt.h" +#include "hw/pci-host/gpex.h" +#include "hw/pci-host/ls7a.h" +#include "system/device_tree.h" +#include "system/reset.h" +#include "target/loongarch/cpu.h" + +static void create_fdt(LoongArchVirtMachineState *lvms) +{ + MachineState *ms = MACHINE(lvms); + uint8_t rng_seed[32]; + + ms->fdt = create_device_tree(&lvms->fdt_size); + if (!ms->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + /* Header */ + qemu_fdt_setprop_string(ms->fdt, "/", "compatible", + "linux,dummy-loongson3"); + qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); + qemu_fdt_add_subnode(ms->fdt, "/chosen"); + + /* Pass seed to RNG */ + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); +} + +static void fdt_add_cpu_nodes(const LoongArchVirtMachineState *lvms) +{ + int num; + MachineState *ms = MACHINE(lvms); + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus; + LoongArchCPU *cpu; + CPUState *cs; + char *nodename, *map_path; + + qemu_fdt_add_subnode(ms->fdt, "/cpus"); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); + + /* cpu nodes */ + possible_cpus = mc->possible_cpu_arch_ids(ms); + for (num = 0; num < possible_cpus->len; num++) { + cs = possible_cpus->cpus[num].cpu; + if (cs == NULL) { + continue; + } + + nodename = g_strdup_printf("/cpus/cpu@%d", num); + cpu = LOONGARCH_CPU(cs); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + cpu->dtb_compatible); + if (possible_cpus->cpus[num].props.has_node_id) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", + possible_cpus->cpus[num].props.node_id); + } + qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + g_free(nodename); + } + + /*cpu map */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + for (num = 0; num < possible_cpus->len; num++) { + cs = possible_cpus->cpus[num].cpu; + if (cs == NULL) { + continue; + } + + nodename = g_strdup_printf("/cpus/cpu@%d", num); + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d/thread%d", + num / (ms->smp.cores * ms->smp.threads), + (num / ms->smp.threads) % ms->smp.cores, + num % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/core%d", + num / ms->smp.cores, + num % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", nodename); + + g_free(map_path); + g_free(nodename); + } +} + +static void fdt_add_memory_node(MachineState *ms, + uint64_t base, uint64_t size, int node_id) +{ + char *nodename = g_strdup_printf("/memory@%" PRIx64, base); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", base >> 32, base, + size >> 32, size); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory"); + + if (ms->numa_state && ms->numa_state->num_nodes) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", node_id); + } + + g_free(nodename); +} + +static void fdt_add_memory_nodes(MachineState *ms) +{ + hwaddr base, size, ram_size, gap; + int i, nb_numa_nodes, nodes; + NodeInfo *numa_info; + + ram_size = ms->ram_size; + base = VIRT_LOWMEM_BASE; + gap = VIRT_LOWMEM_SIZE; + nodes = nb_numa_nodes = ms->numa_state->num_nodes; + numa_info = ms->numa_state->nodes; + if (!nodes) { + nodes = 1; + } + + for (i = 0; i < nodes; i++) { + if (nb_numa_nodes) { + size = numa_info[i].node_mem; + } else { + size = ram_size; + } + + /* + * memory for the node splited into two part + * lowram: [base, +gap) + * highram: [VIRT_HIGHMEM_BASE, +(len - gap)) + */ + if (size >= gap) { + fdt_add_memory_node(ms, base, gap, i); + size -= gap; + base = VIRT_HIGHMEM_BASE; + gap = ram_size - VIRT_LOWMEM_SIZE; + } + + if (size) { + fdt_add_memory_node(ms, base, size, i); + base += size; + gap -= size; + } + } +} + +static void fdt_add_fw_cfg_node(const LoongArchVirtMachineState *lvms) +{ + char *nodename; + hwaddr base = VIRT_FWCFG_BASE; + const MachineState *ms = MACHINE(lvms); + + nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, 0x18); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); +} + +static void fdt_add_flash_node(LoongArchVirtMachineState *lvms) +{ + MachineState *ms = MACHINE(lvms); + char *nodename; + MemoryRegion *flash_mem; + + hwaddr flash0_base; + hwaddr flash0_size; + + hwaddr flash1_base; + hwaddr flash1_size; + + flash_mem = pflash_cfi01_get_memory(lvms->flash[0]); + flash0_base = flash_mem->addr; + flash0_size = memory_region_size(flash_mem); + + flash_mem = pflash_cfi01_get_memory(lvms->flash[1]); + flash1_base = flash_mem->addr; + flash1_size = memory_region_size(flash_mem); + + nodename = g_strdup_printf("/flash@%" PRIx64, flash0_base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, flash0_base, 2, flash0_size, + 2, flash1_base, 2, flash1_size); + qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); + g_free(nodename); +} + +static void fdt_add_cpuic_node(LoongArchVirtMachineState *lvms, + uint32_t *cpuintc_phandle) +{ + MachineState *ms = MACHINE(lvms); + char *nodename; + + *cpuintc_phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/cpuic"); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *cpuintc_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongson,cpu-interrupt-controller"); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1); + g_free(nodename); +} + +static void fdt_add_eiointc_node(LoongArchVirtMachineState *lvms, + uint32_t *cpuintc_phandle, + uint32_t *eiointc_phandle) +{ + MachineState *ms = MACHINE(lvms); + char *nodename; + hwaddr extioi_base = APIC_BASE; + hwaddr extioi_size = EXTIOI_SIZE; + + *eiointc_phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/eiointc@%" PRIx64, extioi_base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *eiointc_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongson,ls2k2000-eiointc"); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *cpuintc_phandle); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupts", 3); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, + extioi_base, 0x0, extioi_size); + g_free(nodename); +} + +static void fdt_add_pch_pic_node(LoongArchVirtMachineState *lvms, + uint32_t *eiointc_phandle, + uint32_t *pch_pic_phandle) +{ + MachineState *ms = MACHINE(lvms); + char *nodename; + hwaddr pch_pic_base = VIRT_PCH_REG_BASE; + hwaddr pch_pic_size = VIRT_PCH_REG_SIZE; + + *pch_pic_phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/platic@%" PRIx64, pch_pic_base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_pic_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongson,pch-pic-1.0"); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0, + pch_pic_base, 0, pch_pic_size); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *eiointc_phandle); + qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,pic-base-vec", 0); + g_free(nodename); +} + +static void fdt_add_pch_msi_node(LoongArchVirtMachineState *lvms, + uint32_t *eiointc_phandle, + uint32_t *pch_msi_phandle) +{ + MachineState *ms = MACHINE(lvms); + char *nodename; + hwaddr pch_msi_base = VIRT_PCH_MSI_ADDR_LOW; + hwaddr pch_msi_size = VIRT_PCH_MSI_SIZE; + + *pch_msi_phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/msi@%" PRIx64, pch_msi_base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_msi_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongson,pch-msi-1.0"); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", + 0, pch_msi_base, + 0, pch_msi_size); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *eiointc_phandle); + qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-base-vec", + VIRT_PCH_PIC_IRQ_NUM); + qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-num-vecs", + EXTIOI_IRQS - VIRT_PCH_PIC_IRQ_NUM); + g_free(nodename); +} + +static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, + char *nodename, + uint32_t *pch_pic_phandle) +{ + int pin, dev; + uint32_t irq_map_stride = 0; + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 10] = {}; + uint32_t *irq_map = full_irq_map; + const MachineState *ms = MACHINE(lvms); + + /* + * This code creates a standard swizzle of interrupts such that + * each device's first interrupt is based on it's PCI_SLOT number. + * (See pci_swizzle_map_irq_fn()) + * + * We only need one entry per interrupt in the table (not one per + * possible slot) seeing the interrupt-map-mask will allow the table + * to wrap to any number of devices. + */ + + for (dev = 0; dev < PCI_NUM_PINS; dev++) { + int devfn = dev * 0x8; + + for (pin = 0; pin < PCI_NUM_PINS; pin++) { + int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); + int i = 0; + + /* Fill PCI address cells */ + irq_map[i] = cpu_to_be32(devfn << 8); + i += 3; + + /* Fill PCI Interrupt cells */ + irq_map[i] = cpu_to_be32(pin + 1); + i += 1; + + /* Fill interrupt controller phandle and cells */ + irq_map[i++] = cpu_to_be32(*pch_pic_phandle); + irq_map[i++] = cpu_to_be32(irq_nr); + + if (!irq_map_stride) { + irq_map_stride = i; + } + irq_map += irq_map_stride; + } + } + + + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map, + PCI_NUM_PINS * PCI_NUM_PINS * + irq_map_stride * sizeof(uint32_t)); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", + 0x1800, 0, 0, 0x7); +} + +static void fdt_add_pcie_node(const LoongArchVirtMachineState *lvms, + uint32_t *pch_pic_phandle, + uint32_t *pch_msi_phandle) +{ + char *nodename; + hwaddr base_mmio = VIRT_PCI_MEM_BASE; + hwaddr size_mmio = VIRT_PCI_MEM_SIZE; + hwaddr base_pio = VIRT_PCI_IO_BASE; + hwaddr size_pio = VIRT_PCI_IO_SIZE; + hwaddr base_pcie = VIRT_PCI_CFG_BASE; + hwaddr size_pcie = VIRT_PCI_CFG_SIZE; + hwaddr base = base_pcie; + const MachineState *ms = MACHINE(lvms); + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0, + PCIE_MMCFG_BUS(VIRT_PCI_CFG_SIZE - 1)); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base_pcie, 2, size_pcie); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, VIRT_PCI_IO_OFFSET, + 2, base_pio, 2, size_pio, + 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio); + qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map", + 0, *pch_msi_phandle, 0, 0x10000); + fdt_add_pcie_irq_map_node(lvms, nodename, pch_pic_phandle); + g_free(nodename); +} + +static void fdt_add_uart_node(LoongArchVirtMachineState *lvms, + uint32_t *pch_pic_phandle, hwaddr base, + int irq, bool chosen) +{ + char *nodename; + hwaddr size = VIRT_UART_SIZE; + MachineState *ms = MACHINE(lvms); + + nodename = g_strdup_printf("/serial@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, base, 0x0, size); + qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000); + if (chosen) { + qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename); + } + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", irq, 0x4); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *pch_pic_phandle); + g_free(nodename); +} + +static void fdt_add_rtc_node(LoongArchVirtMachineState *lvms, + uint32_t *pch_pic_phandle) +{ + char *nodename; + hwaddr base = VIRT_RTC_REG_BASE; + hwaddr size = VIRT_RTC_LEN; + MachineState *ms = MACHINE(lvms); + + nodename = g_strdup_printf("/rtc@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "loongson,ls7a-rtc"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + VIRT_RTC_IRQ - VIRT_GSI_BASE , 0x4); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *pch_pic_phandle); + g_free(nodename); +} + +static void fdt_add_ged_reset(LoongArchVirtMachineState *lvms) +{ + char *name; + uint32_t ged_handle; + MachineState *ms = MACHINE(lvms); + hwaddr base = VIRT_GED_REG_ADDR; + hwaddr size = ACPI_GED_REG_COUNT; + + ged_handle = qemu_fdt_alloc_phandle(ms->fdt); + name = g_strdup_printf("/ged@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, name); + qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon"); + qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0x0, base, 0x0, size); + /* 8 bit registers */ + qemu_fdt_setprop_cell(ms->fdt, name, "reg-shift", 0); + qemu_fdt_setprop_cell(ms->fdt, name, "reg-io-width", 1); + qemu_fdt_setprop_cell(ms->fdt, name, "phandle", ged_handle); + ged_handle = qemu_fdt_get_phandle(ms->fdt, name); + g_free(name); + + name = g_strdup_printf("/reboot"); + qemu_fdt_add_subnode(ms->fdt, name); + qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(ms->fdt, name, "regmap", ged_handle); + qemu_fdt_setprop_cell(ms->fdt, name, "offset", ACPI_GED_REG_RESET); + qemu_fdt_setprop_cell(ms->fdt, name, "value", ACPI_GED_RESET_VALUE); + g_free(name); + + name = g_strdup_printf("/poweroff"); + qemu_fdt_add_subnode(ms->fdt, name); + qemu_fdt_setprop_string(ms->fdt, name, "compatible", "syscon-poweroff"); + qemu_fdt_setprop_cell(ms->fdt, name, "regmap", ged_handle); + qemu_fdt_setprop_cell(ms->fdt, name, "offset", ACPI_GED_REG_SLEEP_CTL); + qemu_fdt_setprop_cell(ms->fdt, name, "value", ACPI_GED_SLP_EN | + (ACPI_GED_SLP_TYP_S5 << ACPI_GED_SLP_TYP_POS)); + g_free(name); +} + +void virt_fdt_setup(LoongArchVirtMachineState *lvms) +{ + MachineState *machine = MACHINE(lvms); + uint32_t cpuintc_phandle, eiointc_phandle, pch_pic_phandle, pch_msi_phandle; + int i; + + create_fdt(lvms); + fdt_add_cpu_nodes(lvms); + fdt_add_memory_nodes(machine); + fdt_add_fw_cfg_node(lvms); + fdt_add_flash_node(lvms); + + /* Add cpu interrupt-controller */ + fdt_add_cpuic_node(lvms, &cpuintc_phandle); + /* Add Extend I/O Interrupt Controller node */ + fdt_add_eiointc_node(lvms, &cpuintc_phandle, &eiointc_phandle); + /* Add PCH PIC node */ + fdt_add_pch_pic_node(lvms, &eiointc_phandle, &pch_pic_phandle); + /* Add PCH MSI node */ + fdt_add_pch_msi_node(lvms, &eiointc_phandle, &pch_msi_phandle); + /* Add pcie node */ + fdt_add_pcie_node(lvms, &pch_pic_phandle, &pch_msi_phandle); + + /* + * Create uart fdt node in reverse order so that they appear + * in the finished device tree lowest address first + */ + for (i = VIRT_UART_COUNT; i-- > 0;) { + hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE; + int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE; + fdt_add_uart_node(lvms, &pch_pic_phandle, base, irq, i == 0); + } + + fdt_add_rtc_node(lvms, &pch_pic_phandle); + fdt_add_ged_reset(lvms); + platform_bus_add_all_fdt_nodes(machine->fdt, "/platic", + VIRT_PLATFORM_BUS_BASEADDRESS, + VIRT_PLATFORM_BUS_SIZE, + VIRT_PLATFORM_BUS_IRQ); + + /* + * Since lowmem region starts from 0 and Linux kernel legacy start address + * at 2 MiB, FDT base address is located at 1 MiB to avoid NULL pointer + * access. FDT size limit with 1 MiB. + * Put the FDT into the memory map as a ROM image: this will ensure + * the FDT is copied again upon reset, even if addr points into RAM. + */ + rom_add_blob_fixed_as("fdt", machine->fdt, lvms->fdt_size, FDT_BASE, + &address_space_memory); + qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, + rom_ptr_for_as(&address_space_memory, FDT_BASE, lvms->fdt_size)); +} diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index e592b1b6b79..b15ada20787 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -8,22 +8,23 @@ #include "qemu/units.h" #include "qemu/datadir.h" #include "qapi/error.h" +#include "exec/target_page.h" #include "hw/boards.h" -#include "hw/char/serial.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" -#include "sysemu/reset.h" -#include "sysemu/rtc.h" +#include "hw/char/serial-mm.h" +#include "system/kvm.h" +#include "system/tcg.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/runstate.h" +#include "system/reset.h" +#include "system/rtc.h" #include "hw/loongarch/virt.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/irq.h" #include "net/net.h" #include "hw/loader.h" #include "elf.h" -#include "hw/intc/loongson_ipi.h" +#include "hw/intc/loongarch_ipi.h" #include "hw/intc/loongarch_extioi.h" #include "hw/intc/loongarch_pch_pic.h" #include "hw/intc/loongarch_pch_msi.h" @@ -33,30 +34,19 @@ #include "hw/loongarch/fw_cfg.h" #include "target/loongarch/cpu.h" #include "hw/firmware/smbios.h" -#include "hw/acpi/aml-build.h" #include "qapi/qapi-visit-common.h" #include "hw/acpi/generic_event_device.h" #include "hw/mem/nvdimm.h" -#include "sysemu/device_tree.h" -#include -#include "hw/core/sysbus-fdt.h" #include "hw/platform-bus.h" #include "hw/display/ramfb.h" +#include "hw/uefi/var-service-api.h" #include "hw/mem/pc-dimm.h" -#include "sysemu/tpm.h" -#include "sysemu/block-backend.h" +#include "system/tpm.h" +#include "system/block-backend.h" #include "hw/block/flash.h" #include "hw/virtio/virtio-iommu.h" #include "qemu/error-report.h" -static bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms) -{ - if (lvms->veiointc == ON_OFF_AUTO_OFF) { - return false; - } - return true; -} - static void virt_get_veiointc(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -134,416 +124,6 @@ static void virt_flash_map(LoongArchVirtMachineState *lvms, virt_flash_map1(flash1, VIRT_FLASH1_BASE, VIRT_FLASH1_SIZE, sysmem); } -static void fdt_add_cpuic_node(LoongArchVirtMachineState *lvms, - uint32_t *cpuintc_phandle) -{ - MachineState *ms = MACHINE(lvms); - char *nodename; - - *cpuintc_phandle = qemu_fdt_alloc_phandle(ms->fdt); - nodename = g_strdup_printf("/cpuic"); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *cpuintc_phandle); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - "loongson,cpu-interrupt-controller"); - qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1); - g_free(nodename); -} - -static void fdt_add_eiointc_node(LoongArchVirtMachineState *lvms, - uint32_t *cpuintc_phandle, - uint32_t *eiointc_phandle) -{ - MachineState *ms = MACHINE(lvms); - char *nodename; - hwaddr extioi_base = APIC_BASE; - hwaddr extioi_size = EXTIOI_SIZE; - - *eiointc_phandle = qemu_fdt_alloc_phandle(ms->fdt); - nodename = g_strdup_printf("/eiointc@%" PRIx64, extioi_base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *eiointc_phandle); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - "loongson,ls2k2000-eiointc"); - qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", - *cpuintc_phandle); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupts", 3); - qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, - extioi_base, 0x0, extioi_size); - g_free(nodename); -} - -static void fdt_add_pch_pic_node(LoongArchVirtMachineState *lvms, - uint32_t *eiointc_phandle, - uint32_t *pch_pic_phandle) -{ - MachineState *ms = MACHINE(lvms); - char *nodename; - hwaddr pch_pic_base = VIRT_PCH_REG_BASE; - hwaddr pch_pic_size = VIRT_PCH_REG_SIZE; - - *pch_pic_phandle = qemu_fdt_alloc_phandle(ms->fdt); - nodename = g_strdup_printf("/platic@%" PRIx64, pch_pic_base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_pic_phandle); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - "loongson,pch-pic-1.0"); - qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0, - pch_pic_base, 0, pch_pic_size); - qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 2); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", - *eiointc_phandle); - qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,pic-base-vec", 0); - g_free(nodename); -} - -static void fdt_add_pch_msi_node(LoongArchVirtMachineState *lvms, - uint32_t *eiointc_phandle, - uint32_t *pch_msi_phandle) -{ - MachineState *ms = MACHINE(lvms); - char *nodename; - hwaddr pch_msi_base = VIRT_PCH_MSI_ADDR_LOW; - hwaddr pch_msi_size = VIRT_PCH_MSI_SIZE; - - *pch_msi_phandle = qemu_fdt_alloc_phandle(ms->fdt); - nodename = g_strdup_printf("/msi@%" PRIx64, pch_msi_base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", *pch_msi_phandle); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - "loongson,pch-msi-1.0"); - qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", - 0, pch_msi_base, - 0, pch_msi_size); - qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", - *eiointc_phandle); - qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-base-vec", - VIRT_PCH_PIC_IRQ_NUM); - qemu_fdt_setprop_cell(ms->fdt, nodename, "loongson,msi-num-vecs", - EXTIOI_IRQS - VIRT_PCH_PIC_IRQ_NUM); - g_free(nodename); -} - -static void fdt_add_flash_node(LoongArchVirtMachineState *lvms) -{ - MachineState *ms = MACHINE(lvms); - char *nodename; - MemoryRegion *flash_mem; - - hwaddr flash0_base; - hwaddr flash0_size; - - hwaddr flash1_base; - hwaddr flash1_size; - - flash_mem = pflash_cfi01_get_memory(lvms->flash[0]); - flash0_base = flash_mem->addr; - flash0_size = memory_region_size(flash_mem); - - flash_mem = pflash_cfi01_get_memory(lvms->flash[1]); - flash1_base = flash_mem->addr; - flash1_size = memory_region_size(flash_mem); - - nodename = g_strdup_printf("/flash@%" PRIx64, flash0_base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); - qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", - 2, flash0_base, 2, flash0_size, - 2, flash1_base, 2, flash1_size); - qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); - g_free(nodename); -} - -static void fdt_add_rtc_node(LoongArchVirtMachineState *lvms, - uint32_t *pch_pic_phandle) -{ - char *nodename; - hwaddr base = VIRT_RTC_REG_BASE; - hwaddr size = VIRT_RTC_LEN; - MachineState *ms = MACHINE(lvms); - - nodename = g_strdup_printf("/rtc@%" PRIx64, base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - "loongson,ls7a-rtc"); - qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); - qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", - VIRT_RTC_IRQ - VIRT_GSI_BASE , 0x4); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", - *pch_pic_phandle); - g_free(nodename); -} - -static void fdt_add_uart_node(LoongArchVirtMachineState *lvms, - uint32_t *pch_pic_phandle) -{ - char *nodename; - hwaddr base = VIRT_UART_BASE; - hwaddr size = VIRT_UART_SIZE; - MachineState *ms = MACHINE(lvms); - - nodename = g_strdup_printf("/serial@%" PRIx64, base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "ns16550a"); - qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", 0x0, base, 0x0, size); - qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000); - qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename); - qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", - VIRT_UART_IRQ - VIRT_GSI_BASE, 0x4); - qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", - *pch_pic_phandle); - g_free(nodename); -} - -static void create_fdt(LoongArchVirtMachineState *lvms) -{ - MachineState *ms = MACHINE(lvms); - - ms->fdt = create_device_tree(&lvms->fdt_size); - if (!ms->fdt) { - error_report("create_device_tree() failed"); - exit(1); - } - - /* Header */ - qemu_fdt_setprop_string(ms->fdt, "/", "compatible", - "linux,dummy-loongson3"); - qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); - qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); - qemu_fdt_add_subnode(ms->fdt, "/chosen"); -} - -static void fdt_add_cpu_nodes(const LoongArchVirtMachineState *lvms) -{ - int num; - const MachineState *ms = MACHINE(lvms); - int smp_cpus = ms->smp.cpus; - - qemu_fdt_add_subnode(ms->fdt, "/cpus"); - qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); - qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); - - /* cpu nodes */ - for (num = smp_cpus - 1; num >= 0; num--) { - char *nodename = g_strdup_printf("/cpus/cpu@%d", num); - LoongArchCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num)); - CPUState *cs = CPU(cpu); - - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); - qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", - cpu->dtb_compatible); - if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { - qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", - ms->possible_cpus->cpus[cs->cpu_index].props.node_id); - } - qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", num); - qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", - qemu_fdt_alloc_phandle(ms->fdt)); - g_free(nodename); - } - - /*cpu map */ - qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); - - for (num = smp_cpus - 1; num >= 0; num--) { - char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num); - char *map_path; - - if (ms->smp.threads > 1) { - map_path = g_strdup_printf( - "/cpus/cpu-map/socket%d/core%d/thread%d", - num / (ms->smp.cores * ms->smp.threads), - (num / ms->smp.threads) % ms->smp.cores, - num % ms->smp.threads); - } else { - map_path = g_strdup_printf( - "/cpus/cpu-map/socket%d/core%d", - num / ms->smp.cores, - num % ms->smp.cores); - } - qemu_fdt_add_path(ms->fdt, map_path); - qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); - - g_free(map_path); - g_free(cpu_path); - } -} - -static void fdt_add_fw_cfg_node(const LoongArchVirtMachineState *lvms) -{ - char *nodename; - hwaddr base = VIRT_FWCFG_BASE; - const MachineState *ms = MACHINE(lvms); - - nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, - "compatible", "qemu,fw-cfg-mmio"); - qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", - 2, base, 2, 0x18); - qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); - g_free(nodename); -} - -static void fdt_add_pcie_irq_map_node(const LoongArchVirtMachineState *lvms, - char *nodename, - uint32_t *pch_pic_phandle) -{ - int pin, dev; - uint32_t irq_map_stride = 0; - uint32_t full_irq_map[GPEX_NUM_IRQS *GPEX_NUM_IRQS * 10] = {}; - uint32_t *irq_map = full_irq_map; - const MachineState *ms = MACHINE(lvms); - - /* This code creates a standard swizzle of interrupts such that - * each device's first interrupt is based on it's PCI_SLOT number. - * (See pci_swizzle_map_irq_fn()) - * - * We only need one entry per interrupt in the table (not one per - * possible slot) seeing the interrupt-map-mask will allow the table - * to wrap to any number of devices. - */ - - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { - int devfn = dev * 0x8; - - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { - int irq_nr = 16 + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); - int i = 0; - - /* Fill PCI address cells */ - irq_map[i] = cpu_to_be32(devfn << 8); - i += 3; - - /* Fill PCI Interrupt cells */ - irq_map[i] = cpu_to_be32(pin + 1); - i += 1; - - /* Fill interrupt controller phandle and cells */ - irq_map[i++] = cpu_to_be32(*pch_pic_phandle); - irq_map[i++] = cpu_to_be32(irq_nr); - - if (!irq_map_stride) { - irq_map_stride = i; - } - irq_map += irq_map_stride; - } - } - - - qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", full_irq_map, - GPEX_NUM_IRQS * GPEX_NUM_IRQS * - irq_map_stride * sizeof(uint32_t)); - qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", - 0x1800, 0, 0, 0x7); -} - -static void fdt_add_pcie_node(const LoongArchVirtMachineState *lvms, - uint32_t *pch_pic_phandle, - uint32_t *pch_msi_phandle) -{ - char *nodename; - hwaddr base_mmio = VIRT_PCI_MEM_BASE; - hwaddr size_mmio = VIRT_PCI_MEM_SIZE; - hwaddr base_pio = VIRT_PCI_IO_BASE; - hwaddr size_pio = VIRT_PCI_IO_SIZE; - hwaddr base_pcie = VIRT_PCI_CFG_BASE; - hwaddr size_pcie = VIRT_PCI_CFG_SIZE; - hwaddr base = base_pcie; - - const MachineState *ms = MACHINE(lvms); - - nodename = g_strdup_printf("/pcie@%" PRIx64, base); - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_string(ms->fdt, nodename, - "compatible", "pci-host-ecam-generic"); - qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); - qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3); - qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2); - qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0); - qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0, - PCIE_MMCFG_BUS(VIRT_PCI_CFG_SIZE - 1)); - qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); - qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", - 2, base_pcie, 2, size_pcie); - qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", - 1, FDT_PCI_RANGE_IOPORT, 2, VIRT_PCI_IO_OFFSET, - 2, base_pio, 2, size_pio, - 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, - 2, base_mmio, 2, size_mmio); - qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-map", - 0, *pch_msi_phandle, 0, 0x10000); - - fdt_add_pcie_irq_map_node(lvms, nodename, pch_pic_phandle); - - g_free(nodename); -} - -static void fdt_add_memory_node(MachineState *ms, - uint64_t base, uint64_t size, int node_id) -{ - char *nodename = g_strdup_printf("/memory@%" PRIx64, base); - - qemu_fdt_add_subnode(ms->fdt, nodename); - qemu_fdt_setprop_cells(ms->fdt, nodename, "reg", base >> 32, base, - size >> 32, size); - qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory"); - - if (ms->numa_state && ms->numa_state->num_nodes) { - qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", node_id); - } - - g_free(nodename); -} - -static void fdt_add_memory_nodes(MachineState *ms) -{ - hwaddr base, size, ram_size, gap; - int i, nb_numa_nodes, nodes; - NodeInfo *numa_info; - - ram_size = ms->ram_size; - base = VIRT_LOWMEM_BASE; - gap = VIRT_LOWMEM_SIZE; - nodes = nb_numa_nodes = ms->numa_state->num_nodes; - numa_info = ms->numa_state->nodes; - if (!nodes) { - nodes = 1; - } - - for (i = 0; i < nodes; i++) { - if (nb_numa_nodes) { - size = numa_info[i].node_mem; - } else { - size = ram_size; - } - - /* - * memory for the node splited into two part - * lowram: [base, +gap) - * highram: [VIRT_HIGHMEM_BASE, +(len - gap)) - */ - if (size >= gap) { - fdt_add_memory_node(ms, base, gap, i); - size -= gap; - base = VIRT_HIGHMEM_BASE; - gap = ram_size - VIRT_LOWMEM_SIZE; - } - - if (size) { - fdt_add_memory_node(ms, base, size, i); - base += size; - gap -= size; - } - } -} - static void virt_build_smbios(LoongArchVirtMachineState *lvms) { MachineState *ms = MACHINE(lvms); @@ -556,6 +136,10 @@ static void virt_build_smbios(LoongArchVirtMachineState *lvms) return; } + if (kvm_enabled()) { + product = "KVM Virtual Machine"; + } + smbios_set_defaults("QEMU", product, mc->name); smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64, @@ -576,7 +160,8 @@ static void virt_done(Notifier *notifier, void *data) LoongArchVirtMachineState *lvms = container_of(notifier, LoongArchVirtMachineState, machine_done); virt_build_smbios(lvms); - loongarch_acpi_setup(lvms); + virt_acpi_setup(lvms); + virt_fdt_setup(lvms); } static void virt_powerdown_req(Notifier *notifier, void *opaque) @@ -587,8 +172,15 @@ static void virt_powerdown_req(Notifier *notifier, void *opaque) acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS); } -static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) +static void memmap_add_entry(MachineState *ms, uint64_t address, + uint64_t length, uint32_t type) { + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms); + struct memmap_entry *memmap_table; + unsigned int memmap_entries; + + memmap_table = lvms->memmap_table; + memmap_entries = lvms->memmap_entries; /* Ensure there are no duplicate entries. */ for (unsigned i = 0; i < memmap_entries; i++) { assert(memmap_table[i].address != address); @@ -601,6 +193,8 @@ static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) memmap_table[memmap_entries].type = cpu_to_le32(type); memmap_table[memmap_entries].reserved = 0; memmap_entries++; + lvms->memmap_table = memmap_table; + lvms->memmap_entries = memmap_entries; } static DeviceState *create_acpi_ged(DeviceState *pch_pic, @@ -608,11 +202,17 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, { DeviceState *dev; MachineState *ms = MACHINE(lvms); + MachineClass *mc = MACHINE_GET_CLASS(lvms); uint32_t event = ACPI_GED_PWR_DOWN_EVT; if (ms->ram_slots) { event |= ACPI_GED_MEM_HOTPLUG_EVT; } + + if (mc->has_hotpluggable_cpus) { + event |= ACPI_GED_CPU_HOTPLUG_EVT; + } + dev = qdev_new(TYPE_ACPI_GED); qdev_prop_set_uint32(dev, "ged-event", event); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -624,6 +224,10 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, /* ged regs used for reset and power down */ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); + if (mc->has_hotpluggable_cpus) { + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 3, VIRT_GED_CPUHP_ADDR); + } + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE)); return dev; @@ -655,9 +259,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic) } static void virt_devices_init(DeviceState *pch_pic, - LoongArchVirtMachineState *lvms, - uint32_t *pch_pic_phandle, - uint32_t *pch_msi_phandle) + LoongArchVirtMachineState *lvms) { MachineClass *mc = MACHINE_GET_CLASS(lvms); DeviceState *gpex_dev; @@ -697,20 +299,23 @@ static void virt_devices_init(DeviceState *pch_pic, memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE, pio_alias); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { sysbus_connect_irq(d, i, qdev_get_gpio_in(pch_pic, 16 + i)); gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i); } - /* Add pcie node */ - fdt_add_pcie_node(lvms, pch_pic_phandle, pch_msi_phandle); - - serial_mm_init(get_system_memory(), VIRT_UART_BASE, 0, - qdev_get_gpio_in(pch_pic, - VIRT_UART_IRQ - VIRT_GSI_BASE), - 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); - fdt_add_uart_node(lvms, pch_pic_phandle); + /* + * Create uart fdt node in reverse order so that they appear + * in the finished device tree lowest address first + */ + for (i = VIRT_UART_COUNT; i-- > 0;) { + hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE; + int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE; + serial_mm_init(get_system_memory(), base, 0, + qdev_get_gpio_in(pch_pic, irq), + 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN); + } /* Network init */ pci_init_nic_devices(pci_bus, mc->default_nic); @@ -723,7 +328,6 @@ static void virt_devices_init(DeviceState *pch_pic, sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE, qdev_get_gpio_in(pch_pic, VIRT_RTC_IRQ - VIRT_GSI_BASE)); - fdt_add_rtc_node(lvms, pch_pic_phandle); /* acpi ged */ lvms->acpi_ged = create_acpi_ged(pch_pic, lvms); @@ -731,17 +335,35 @@ static void virt_devices_init(DeviceState *pch_pic, lvms->platform_bus_dev = create_platform_bus(pch_pic); } -static void virt_irq_init(LoongArchVirtMachineState *lvms) +static void virt_cpu_irq_init(LoongArchVirtMachineState *lvms) { + int num; MachineState *ms = MACHINE(lvms); - DeviceState *pch_pic, *pch_msi, *cpudev; + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus; + CPUState *cs; + + /* cpu nodes */ + possible_cpus = mc->possible_cpu_arch_ids(ms); + for (num = 0; num < possible_cpus->len; num++) { + cs = possible_cpus->cpus[num].cpu; + if (cs == NULL) { + continue; + } + + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), DEVICE(cs), + &error_abort); + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), DEVICE(cs), + &error_abort); + } +} + +static void virt_irq_init(LoongArchVirtMachineState *lvms) +{ + DeviceState *pch_pic, *pch_msi; DeviceState *ipi, *extioi; SysBusDevice *d; - LoongArchCPU *lacpu; - CPULoongArchState *env; - CPUState *cpu_state; - int cpu, pin, i, start, num; - uint32_t cpuintc_phandle, eiointc_phandle, pch_pic_phandle, pch_msi_phandle; + int i, start, num; /* * Extended IRQ model. @@ -788,81 +410,24 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) */ /* Create IPI device */ - ipi = qdev_new(TYPE_LOONGSON_IPI); - qdev_prop_set_uint32(ipi, "num-cpu", ms->smp.cpus); + ipi = qdev_new(TYPE_LOONGARCH_IPI); + lvms->ipi = ipi; sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); - /* IPI iocsr memory region */ - memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); - memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); - - /* Add cpu interrupt-controller */ - fdt_add_cpuic_node(lvms, &cpuintc_phandle); - - for (cpu = 0; cpu < ms->smp.cpus; cpu++) { - cpu_state = qemu_get_cpu(cpu); - cpudev = DEVICE(cpu_state); - lacpu = LOONGARCH_CPU(cpu_state); - env = &(lacpu->env); - env->address_space_iocsr = &lvms->as_iocsr; - - /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); - env->ipistate = ipi; - } - /* Create EXTIOI device */ extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); - qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.cpus); + lvms->extioi = extioi; if (virt_is_veiointc_enabled(lvms)) { qdev_prop_set_bit(extioi, "has-virtualization-extension", true); } sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); - memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE, - sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); - if (virt_is_veiointc_enabled(lvms)) { - memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE, - sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1)); - } - /* - * connect ext irq to the cpu irq - * cpu_pin[9:2] <= intc_pin[7:0] - */ - for (cpu = 0; cpu < ms->smp.cpus; cpu++) { - cpudev = DEVICE(qemu_get_cpu(cpu)); - for (pin = 0; pin < LS3A_INTC_IP; pin++) { - qdev_connect_gpio_out(extioi, (cpu * 8 + pin), - qdev_get_gpio_in(cpudev, pin + 2)); - } - } - - /* Add Extend I/O Interrupt Controller node */ - fdt_add_eiointc_node(lvms, &cpuintc_phandle, &eiointc_phandle); - - pch_pic = qdev_new(TYPE_LOONGARCH_PCH_PIC); + virt_cpu_irq_init(lvms); + pch_pic = qdev_new(TYPE_LOONGARCH_PIC); num = VIRT_PCH_PIC_IRQ_NUM; qdev_prop_set_uint32(pch_pic, "pch_pic_irq_num", num); d = SYS_BUS_DEVICE(pch_pic); sysbus_realize_and_unref(d, &error_fatal); - memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE, - sysbus_mmio_get_region(d, 0)); - memory_region_add_subregion(get_system_memory(), - VIRT_IOAPIC_REG_BASE + PCH_PIC_ROUTE_ENTRY_OFFSET, - sysbus_mmio_get_region(d, 1)); - memory_region_add_subregion(get_system_memory(), - VIRT_IOAPIC_REG_BASE + PCH_PIC_INT_STATUS_LO, - sysbus_mmio_get_region(d, 2)); - - /* Connect pch_pic irqs to extioi */ - for (i = 0; i < num; i++) { - qdev_connect_gpio_out(DEVICE(d), i, qdev_get_gpio_in(extioi, i)); - } - - /* Add PCH PIC node */ - fdt_add_pch_pic_node(lvms, &eiointc_phandle, &pch_pic_phandle); pch_msi = qdev_new(TYPE_LOONGARCH_PCH_MSI); start = num; @@ -872,16 +437,41 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) d = SYS_BUS_DEVICE(pch_msi); sysbus_realize_and_unref(d, &error_fatal); sysbus_mmio_map(d, 0, VIRT_PCH_MSI_ADDR_LOW); - for (i = 0; i < num; i++) { - /* Connect pch_msi irqs to extioi */ - qdev_connect_gpio_out(DEVICE(d), i, - qdev_get_gpio_in(extioi, i + start)); - } - /* Add PCH MSI node */ - fdt_add_pch_msi_node(lvms, &eiointc_phandle, &pch_msi_phandle); + if (kvm_irqchip_in_kernel()) { + kvm_loongarch_init_irq_routing(); + } else { + /* IPI iocsr memory region */ + memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); + memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); + + /* EXTIOI iocsr memory region */ + memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); + if (virt_is_veiointc_enabled(lvms)) { + memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1)); + } + + /* PCH_PIC memory region */ + memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(pch_pic), 0)); + + /* Connect pch_pic irqs to extioi */ + for (i = 0; i < VIRT_PCH_PIC_IRQ_NUM; i++) { + qdev_connect_gpio_out(DEVICE(pch_pic), i, + qdev_get_gpio_in(extioi, i)); + } - virt_devices_init(pch_pic, lvms, &pch_pic_phandle, &pch_msi_phandle); + for (i = VIRT_PCH_PIC_IRQ_NUM; i < EXTIOI_IRQS; i++) { + /* Connect pch_msi irqs to extioi */ + qdev_connect_gpio_out(DEVICE(pch_msi), i - VIRT_PCH_PIC_IRQ_NUM, + qdev_get_gpio_in(extioi, i)); + } + } + virt_devices_init(pch_pic, lvms); } static void virt_firmware_init(LoongArchVirtMachineState *lvms) @@ -941,6 +531,10 @@ static MemTxResult virt_iocsr_misc_write(void *opaque, hwaddr addr, switch (addr) { case MISC_FUNC_REG: + if (kvm_irqchip_in_kernel()) { + return MEMTX_OK; + } + if (!virt_is_veiointc_enabled(lvms)) { return MEMTX_OK; } @@ -991,6 +585,10 @@ static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr, ret = 0x303030354133ULL; /* "3A5000" */ break; case MISC_FUNC_REG: + if (kvm_irqchip_in_kernel()) { + return MEMTX_OK; + } + if (!virt_is_veiointc_enabled(lvms)) { ret |= BIT_ULL(IOCSRM_EXTIOI_EN); break; @@ -1051,13 +649,13 @@ static void fw_cfg_add_memory(MachineState *ms) } if (size >= gap) { - memmap_add_entry(base, gap, 1); + memmap_add_entry(ms, base, gap, 1); size -= gap; base = VIRT_HIGHMEM_BASE; } if (size) { - memmap_add_entry(base, size, 1); + memmap_add_entry(ms, base, size, 1); base += size; } @@ -1072,35 +670,32 @@ static void fw_cfg_add_memory(MachineState *ms) * lowram: [base, +(gap - numa_info[0].node_mem)) * highram: [VIRT_HIGHMEM_BASE, +(ram_size - gap)) */ - memmap_add_entry(base, gap - numa_info[0].node_mem, 1); + memmap_add_entry(ms, base, gap - numa_info[0].node_mem, 1); size = ram_size - gap; base = VIRT_HIGHMEM_BASE; } else { size = ram_size - numa_info[0].node_mem; } - if (size) - memmap_add_entry(base, size, 1); + if (size) { + memmap_add_entry(ms, base, size, 1); + } } static void virt_init(MachineState *machine) { - LoongArchCPU *lacpu; const char *cpu_model = machine->cpu_type; MemoryRegion *address_space_mem = get_system_memory(); LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); int i; hwaddr base, size, ram_size = machine->ram_size; - const CPUArchIdList *possible_cpus; MachineClass *mc = MACHINE_GET_CLASS(machine); - CPUState *cpu; + Object *cpuobj; if (!cpu_model) { cpu_model = LOONGARCH_CPU_TYPE_NAME("la464"); } - create_fdt(lvms); - /* Create IOCSR space */ memory_region_init_io(&lvms->system_iocsr, OBJECT(machine), NULL, machine, "iocsr", UINT64_MAX); @@ -1111,16 +706,16 @@ static void virt_init(MachineState *machine) memory_region_add_subregion(&lvms->system_iocsr, 0, &lvms->iocsr_mem); /* Init CPUs */ - possible_cpus = mc->possible_cpu_arch_ids(machine); - for (i = 0; i < possible_cpus->len; i++) { - cpu = cpu_create(machine->cpu_type); - cpu->cpu_index = i; - machine->possible_cpus->cpus[i].cpu = cpu; - lacpu = LOONGARCH_CPU(cpu); - lacpu->phy_id = machine->possible_cpus->cpus[i].arch_id; + mc->possible_cpu_arch_ids(machine); + for (i = 0; i < machine->smp.cpus; i++) { + cpuobj = object_new(machine->cpu_type); + if (cpuobj == NULL) { + error_report("Fail to create object with type %s ", + machine->cpu_type); + exit(EXIT_FAILURE); + } + qdev_realize_and_unref(DEVICE(cpuobj), NULL, &error_fatal); } - fdt_add_cpu_nodes(lvms); - fdt_add_memory_nodes(machine); fw_cfg_add_memory(machine); /* Node0 memory */ @@ -1169,37 +764,18 @@ static void virt_init(MachineState *machine) rom_set_fw(lvms->fw_cfg); if (lvms->fw_cfg != NULL) { fw_cfg_add_file(lvms->fw_cfg, "etc/memmap", - memmap_table, - sizeof(struct memmap_entry) * (memmap_entries)); + lvms->memmap_table, + sizeof(struct memmap_entry) * lvms->memmap_entries); } - fdt_add_fw_cfg_node(lvms); - fdt_add_flash_node(lvms); /* Initialize the IO interrupt subsystem */ virt_irq_init(lvms); - platform_bus_add_all_fdt_nodes(machine->fdt, "/platic", - VIRT_PLATFORM_BUS_BASEADDRESS, - VIRT_PLATFORM_BUS_SIZE, - VIRT_PLATFORM_BUS_IRQ); lvms->machine_done.notify = virt_done; qemu_add_machine_init_done_notifier(&lvms->machine_done); /* connect powerdown request */ lvms->powerdown_notifier.notify = virt_powerdown_req; qemu_register_powerdown_notifier(&lvms->powerdown_notifier); - /* - * Since lowmem region starts from 0 and Linux kernel legacy start address - * at 2 MiB, FDT base address is located at 1 MiB to avoid NULL pointer - * access. FDT size limit with 1 MiB. - * Put the FDT into the memory map as a ROM image: this will ensure - * the FDT is copied again upon reset, even if addr points into RAM. - */ - qemu_fdt_dumpdtb(machine->fdt, lvms->fdt_size); - rom_add_blob_fixed_as("fdt", machine->fdt, lvms->fdt_size, FDT_BASE, - &address_space_memory); - qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, - rom_ptr_for_as(&address_space_memory, FDT_BASE, lvms->fdt_size)); - lvms->bootinfo.ram_size = ram_size; loongarch_load_kernel(machine, &lvms->bootinfo); } @@ -1221,6 +797,48 @@ static void virt_set_acpi(Object *obj, Visitor *v, const char *name, visit_type_OnOffAuto(v, name, &lvms->acpi, errp); } +static char *virt_get_oem_id(Object *obj, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + + return g_strdup(lvms->oem_id); +} + +static void virt_set_oem_id(Object *obj, const char *value, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + size_t len = strlen(value); + + if (len > 6) { + error_setg(errp, + "User specified oem-id value is bigger than 6 bytes in size"); + return; + } + + strncpy(lvms->oem_id, value, 6); +} + +static char *virt_get_oem_table_id(Object *obj, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + + return g_strdup(lvms->oem_table_id); +} + +static void virt_set_oem_table_id(Object *obj, const char *value, + Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + size_t len = strlen(value); + + if (len > 8) { + error_setg(errp, + "User specified oem-table-id value is bigger than 8 bytes in size"); + return; + } + strncpy(lvms->oem_table_id, value, 8); +} + static void virt_initfn(Object *obj) { LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); @@ -1234,6 +852,195 @@ static void virt_initfn(Object *obj) virt_flash_create(lvms); } +static void virt_get_topo_from_index(MachineState *ms, + LoongArchCPUTopo *topo, int index) +{ + topo->socket_id = index / (ms->smp.cores * ms->smp.threads); + topo->core_id = index / ms->smp.threads % ms->smp.cores; + topo->thread_id = index % ms->smp.threads; +} + +static unsigned int topo_align_up(unsigned int count) +{ + g_assert(count >= 1); + count -= 1; + return BIT(count ? 32 - clz32(count) : 0); +} + +/* + * LoongArch Reference Manual Vol1, Chapter 7.4.12 CPU Identity + * For CPU architecture, bit0 .. bit8 is valid for CPU id, max cpuid is 512 + * However for IPI/Eiointc interrupt controller, max supported cpu id for + * irq routingis 256 + * + * Here max cpu id is 256 for virt machine + */ +static int virt_get_arch_id_from_topo(MachineState *ms, LoongArchCPUTopo *topo) +{ + int arch_id, threads, cores, sockets; + + threads = topo_align_up(ms->smp.threads); + cores = topo_align_up(ms->smp.cores); + sockets = topo_align_up(ms->smp.sockets); + if ((threads * cores * sockets) > 256) { + error_report("Exceeding max cpuid 256 with sockets[%d] cores[%d]" + " threads[%d]", ms->smp.sockets, ms->smp.cores, + ms->smp.threads); + exit(1); + } + + arch_id = topo->thread_id + topo->core_id * threads; + arch_id += topo->socket_id * threads * cores; + return arch_id; +} + +/* Find cpu slot in machine->possible_cpus by arch_id */ +static CPUArchId *virt_find_cpu_slot(MachineState *ms, int arch_id) +{ + int n; + for (n = 0; n < ms->possible_cpus->len; n++) { + if (ms->possible_cpus->cpus[n].arch_id == arch_id) { + return &ms->possible_cpus->cpus[n]; + } + } + + return NULL; +} + +/* Find cpu slot for cold-plut CPU object where cpu is NULL */ +static CPUArchId *virt_find_empty_cpu_slot(MachineState *ms) +{ + int n; + for (n = 0; n < ms->possible_cpus->len; n++) { + if (ms->possible_cpus->cpus[n].cpu == NULL) { + return &ms->possible_cpus->cpus[n]; + } + } + + return NULL; +} + +static void virt_cpu_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); + MachineState *ms = MACHINE(OBJECT(hotplug_dev)); + LoongArchCPU *cpu = LOONGARCH_CPU(dev); + CPUState *cs = CPU(dev); + CPUArchId *cpu_slot; + LoongArchCPUTopo topo; + int arch_id; + + if (lvms->acpi_ged) { + if ((cpu->thread_id < 0) || (cpu->thread_id >= ms->smp.threads)) { + error_setg(errp, + "Invalid thread-id %u specified, must be in range 1:%u", + cpu->thread_id, ms->smp.threads - 1); + return; + } + + if ((cpu->core_id < 0) || (cpu->core_id >= ms->smp.cores)) { + error_setg(errp, + "Invalid core-id %u specified, must be in range 1:%u", + cpu->core_id, ms->smp.cores - 1); + return; + } + + if ((cpu->socket_id < 0) || (cpu->socket_id >= ms->smp.sockets)) { + error_setg(errp, + "Invalid socket-id %u specified, must be in range 1:%u", + cpu->socket_id, ms->smp.sockets - 1); + return; + } + + topo.socket_id = cpu->socket_id; + topo.core_id = cpu->core_id; + topo.thread_id = cpu->thread_id; + arch_id = virt_get_arch_id_from_topo(ms, &topo); + cpu_slot = virt_find_cpu_slot(ms, arch_id); + if (CPU(cpu_slot->cpu)) { + error_setg(errp, + "cpu(id%d=%d:%d:%d) with arch-id %" PRIu64 " exists", + cs->cpu_index, cpu->socket_id, cpu->core_id, + cpu->thread_id, cpu_slot->arch_id); + return; + } + } else { + /* For cold-add cpu, find empty cpu slot */ + cpu_slot = virt_find_empty_cpu_slot(ms); + topo.socket_id = cpu_slot->props.socket_id; + topo.core_id = cpu_slot->props.core_id; + topo.thread_id = cpu_slot->props.thread_id; + object_property_set_int(OBJECT(dev), "socket-id", topo.socket_id, NULL); + object_property_set_int(OBJECT(dev), "core-id", topo.core_id, NULL); + object_property_set_int(OBJECT(dev), "thread-id", topo.thread_id, NULL); + } + + cpu->env.address_space_iocsr = &lvms->as_iocsr; + cpu->phy_id = cpu_slot->arch_id; + cs->cpu_index = cpu_slot - ms->possible_cpus->cpus; + numa_cpu_pre_plug(cpu_slot, dev, errp); +} + +static void virt_cpu_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); + LoongArchCPU *cpu = LOONGARCH_CPU(dev); + CPUState *cs = CPU(dev); + + if (cs->cpu_index == 0) { + error_setg(errp, "hot-unplug of boot cpu(id%d=%d:%d:%d) not supported", + cs->cpu_index, cpu->socket_id, + cpu->core_id, cpu->thread_id); + return; + } + + hotplug_handler_unplug_request(HOTPLUG_HANDLER(lvms->acpi_ged), dev, errp); +} + +static void virt_cpu_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CPUArchId *cpu_slot; + LoongArchCPU *cpu = LOONGARCH_CPU(dev); + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); + + /* Notify ipi and extioi irqchip to remove interrupt routing to CPU */ + hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort); + hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort); + + /* Notify acpi ged CPU removed */ + hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &error_abort); + + cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id); + cpu_slot->cpu = NULL; +} + +static void virt_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CPUArchId *cpu_slot; + LoongArchCPU *cpu = LOONGARCH_CPU(dev); + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev); + + if (lvms->ipi) { + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort); + } + + if (lvms->extioi) { + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort); + } + + if (lvms->acpi_ged) { + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, + &error_abort); + } + + cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id); + cpu_slot->cpu = CPU(dev); +} + static bool memhp_type_supported(DeviceState *dev) { /* we only support pc dimm now */ @@ -1252,6 +1059,8 @@ static void virt_device_pre_plug(HotplugHandler *hotplug_dev, { if (memhp_type_supported(dev)) { virt_mem_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + virt_cpu_pre_plug(hotplug_dev, dev, errp); } } @@ -1270,6 +1079,8 @@ static void virt_device_unplug_request(HotplugHandler *hotplug_dev, { if (memhp_type_supported(dev)) { virt_mem_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + virt_cpu_unplug_request(hotplug_dev, dev, errp); } } @@ -1288,6 +1099,8 @@ static void virt_device_unplug(HotplugHandler *hotplug_dev, { if (memhp_type_supported(dev)) { virt_mem_unplug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + virt_cpu_unplug(hotplug_dev, dev, errp); } } @@ -1315,6 +1128,8 @@ static void virt_device_plug_cb(HotplugHandler *hotplug_dev, } } else if (memhp_type_supported(dev)) { virt_mem_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + virt_cpu_plug(hotplug_dev, dev, errp); } } @@ -1324,6 +1139,7 @@ static HotplugHandler *virt_get_hotplug_handler(MachineState *machine, MachineClass *mc = MACHINE_GET_CLASS(machine); if (device_is_dynamic_sysbus(mc, dev) || + object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU) || object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || memhp_type_supported(dev)) { return HOTPLUG_HANDLER(machine); @@ -1333,8 +1149,9 @@ static HotplugHandler *virt_get_hotplug_handler(MachineState *machine, static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) { - int n; + int n, arch_id; unsigned int max_cpus = ms->smp.max_cpus; + LoongArchCPUTopo topo; if (ms->possible_cpus) { assert(ms->possible_cpus->len == max_cpus); @@ -1345,17 +1162,17 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) sizeof(CPUArchId) * max_cpus); ms->possible_cpus->len = max_cpus; for (n = 0; n < ms->possible_cpus->len; n++) { + virt_get_topo_from_index(ms, &topo, n); + arch_id = virt_get_arch_id_from_topo(ms, &topo); ms->possible_cpus->cpus[n].type = ms->cpu_type; - ms->possible_cpus->cpus[n].arch_id = n; - + ms->possible_cpus->cpus[n].arch_id = arch_id; + ms->possible_cpus->cpus[n].vcpus_count = 1; ms->possible_cpus->cpus[n].props.has_socket_id = true; - ms->possible_cpus->cpus[n].props.socket_id = - n / (ms->smp.cores * ms->smp.threads); + ms->possible_cpus->cpus[n].props.socket_id = topo.socket_id; ms->possible_cpus->cpus[n].props.has_core_id = true; - ms->possible_cpus->cpus[n].props.core_id = - n / ms->smp.threads % ms->smp.cores; + ms->possible_cpus->cpus[n].props.core_id = topo.core_id; ms->possible_cpus->cpus[n].props.has_thread_id = true; - ms->possible_cpus->cpus[n].props.thread_id = n % ms->smp.threads; + ms->possible_cpus->cpus[n].props.thread_id = topo.thread_id; } return ms->possible_cpus; } @@ -1382,7 +1199,7 @@ static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx) } } -static void virt_class_init(ObjectClass *oc, void *data) +static void virt_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -1390,6 +1207,7 @@ static void virt_class_init(ObjectClass *oc, void *data) mc->init = virt_init; mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464"); mc->default_ram_id = "loongarch.ram"; + mc->desc = "QEMU LoongArch Virtual Machine"; mc->max_cpus = LOONGARCH_MAX_CPUS; mc->is_default = 1; mc->default_kernel_irqchip_split = false; @@ -1402,6 +1220,7 @@ static void virt_class_init(ObjectClass *oc, void *data) mc->numa_mem_supported = true; mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; + mc->has_hotpluggable_cpus = true; mc->get_hotplug_handler = virt_get_hotplug_handler; mc->default_nic = "virtio-net-pci"; hc->plug = virt_device_plug_cb; @@ -1420,9 +1239,26 @@ static void virt_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "v-eiointc", "Enable Virt Extend I/O Interrupt Controller."); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS); #ifdef CONFIG_TPM machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); #endif + object_class_property_add_str(oc, "x-oem-id", + virt_get_oem_id, + virt_set_oem_id); + object_class_property_set_description(oc, "x-oem-id", + "Override the default value of field OEMID " + "in ACPI table header." + "The string may be up to 6 bytes in size"); + + + object_class_property_add_str(oc, "x-oem-table-id", + virt_get_oem_table_id, + virt_set_oem_table_id); + object_class_property_set_description(oc, "x-oem-table-id", + "Override the default value of field OEM Table ID " + "in ACPI table header." + "The string may be up to 8 bytes in size"); } static const TypeInfo virt_machine_types[] = { @@ -1432,7 +1268,7 @@ static const TypeInfo virt_machine_types[] = { .instance_size = sizeof(LoongArchVirtMachineState), .class_init = virt_class_init, .instance_init = virt_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/m68k/Kconfig b/hw/m68k/Kconfig index 0092cda4e9c..aff769b30f1 100644 --- a/hw/m68k/Kconfig +++ b/hw/m68k/Kconfig @@ -18,6 +18,7 @@ config NEXTCUBE depends on M68K select FRAMEBUFFER select ESCC + select EMPTY_SLOT config Q800 bool diff --git a/hw/m68k/an5206.c b/hw/m68k/an5206.c index 1e8e64f8bd0..d97399b882b 100644 --- a/hw/m68k/an5206.c +++ b/hw/m68k/an5206.c @@ -14,7 +14,7 @@ #include "hw/loader.h" #include "elf.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #define KERNEL_LOAD_ADDR 0x10000 #define AN5206_MBAR_ADDR 0x10000000 @@ -74,7 +74,7 @@ static void an5206_init(MachineState *machine) } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, - NULL, NULL, NULL, 1, EM_68K, 0, 0); + NULL, NULL, NULL, ELFDATA2MSB, EM_68K, 0, 0); entry = elf_entry; if (kernel_size < 0) { kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h index 0e6e3eea87d..0b3e7c4ea01 100644 --- a/hw/m68k/bootinfo.h +++ b/hw/m68k/bootinfo.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + * SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note * * Bootinfo tags from linux bootinfo.h and bootinfo-mac.h: * This is an easily parsable and extendable structure containing all @@ -14,39 +14,39 @@ #define BOOTINFO0(base, id) \ do { \ - stw_p(base, id); \ + stw_be_p(base, id); \ base += 2; \ - stw_p(base, sizeof(struct bi_record)); \ + stw_be_p(base, sizeof(struct bi_record)); \ base += 2; \ } while (0) #define BOOTINFO1(base, id, value) \ do { \ - stw_p(base, id); \ + stw_be_p(base, id); \ base += 2; \ - stw_p(base, sizeof(struct bi_record) + 4); \ + stw_be_p(base, sizeof(struct bi_record) + 4); \ base += 2; \ - stl_p(base, value); \ + stl_be_p(base, value); \ base += 4; \ } while (0) #define BOOTINFO2(base, id, value1, value2) \ do { \ - stw_p(base, id); \ + stw_be_p(base, id); \ base += 2; \ - stw_p(base, sizeof(struct bi_record) + 8); \ + stw_be_p(base, sizeof(struct bi_record) + 8); \ base += 2; \ - stl_p(base, value1); \ + stl_be_p(base, value1); \ base += 4; \ - stl_p(base, value2); \ + stl_be_p(base, value2); \ base += 4; \ } while (0) #define BOOTINFOSTR(base, id, string) \ do { \ - stw_p(base, id); \ + stw_be_p(base, id); \ base += 2; \ - stw_p(base, \ + stw_be_p(base, \ (sizeof(struct bi_record) + strlen(string) + \ 1 /* null termination */ + 3 /* padding */) & ~3); \ base += 2; \ @@ -59,13 +59,13 @@ #define BOOTINFODATA(base, id, data, len) \ do { \ - stw_p(base, id); \ + stw_be_p(base, id); \ base += 2; \ - stw_p(base, \ + stw_be_p(base, \ (sizeof(struct bi_record) + len + \ 2 /* length field */ + 3 /* padding */) & ~3); \ base += 2; \ - stw_p(base, len); \ + stw_be_p(base, len); \ base += 2; \ for (unsigned i_ = 0; i_ < len; ++i_) { \ stb_p(base++, data[i_]); \ diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c index 183fd3cc085..a25e782403f 100644 --- a/hw/m68k/mcf5206.c +++ b/hw/m68k/mcf5206.c @@ -16,7 +16,7 @@ #include "hw/m68k/mcf.h" #include "qemu/timer.h" #include "hw/ptimer.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/sysbus.h" /* General purpose timer module. */ @@ -582,7 +582,7 @@ static const MemoryRegionOps m5206_mbar_ops = { .write = m5206_mbar_writefn, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static void mcf5206_mbar_realize(DeviceState *dev, Error **errp) @@ -600,13 +600,12 @@ static void mcf5206_mbar_realize(DeviceState *dev, Error **errp) s->uart[1] = mcf_uart_create(s->pic[13], serial_hd(1)); } -static Property mcf5206_mbar_properties[] = { +static const Property mcf5206_mbar_properties[] = { DEFINE_PROP_LINK("m68k-cpu", m5206_mbar_state, cpu, TYPE_M68K_CPU, M68kCPU *), - DEFINE_PROP_END_OF_LIST(), }; -static void mcf5206_mbar_class_init(ObjectClass *oc, void *data) +static void mcf5206_mbar_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -614,7 +613,7 @@ static void mcf5206_mbar_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "MCF5206 system integration module"; dc->realize = mcf5206_mbar_realize; - dc->reset = m5206_mbar_reset; + device_class_set_legacy_reset(dc, m5206_mbar_reset); } static const TypeInfo mcf5206_mbar_info = { diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c index ec14096aa43..75cc076f787 100644 --- a/hw/m68k/mcf5208.c +++ b/hw/m68k/mcf5208.c @@ -4,6 +4,14 @@ * Copyright (c) 2007 CodeSourcery. * * This code is licensed under the GPL + * + * This file models both the MCF5208 SoC, and the + * MCF5208EVB evaluation board. For details see + * + * "MCF5208 Reference Manual" + * https://www.nxp.com/docs/en/reference-manual/MCF5208RM.pdf + * "M5208EVB-RevB 32-bit Microcontroller User Manual" + * https://www.nxp.com/docs/en/reference-manual/M5208EVBUM.pdf */ #include "qemu/osdep.h" @@ -18,8 +26,8 @@ #include "hw/m68k/mcf_fec.h" #include "qemu/timer.h" #include "hw/ptimer.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" +#include "system/system.h" +#include "system/qtest.h" #include "net/net.h" #include "hw/boards.h" #include "hw/loader.h" @@ -147,7 +155,7 @@ static uint64_t m5208_timer_read(void *opaque, hwaddr addr, static const MemoryRegionOps m5208_timer_ops = { .read = m5208_timer_read, .write = m5208_timer_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static uint64_t m5208_sys_read(void *opaque, hwaddr addr, @@ -158,7 +166,7 @@ static uint64_t m5208_sys_read(void *opaque, hwaddr addr, { int n; for (n = 0; n < 32; n++) { - if (current_machine->ram_size < (2u << n)) { + if (current_machine->ram_size < (2ULL << n)) { break; } } @@ -184,7 +192,7 @@ static void m5208_sys_write(void *opaque, hwaddr addr, static const MemoryRegionOps m5208_sys_ops = { .read = m5208_sys_read, .write = m5208_sys_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static uint64_t m5208_rcm_read(void *opaque, hwaddr addr, @@ -216,7 +224,7 @@ static void m5208_rcm_write(void *opaque, hwaddr addr, static const MemoryRegionOps m5208_rcm_ops = { .read = m5208_rcm_read, .write = m5208_rcm_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic, @@ -351,7 +359,7 @@ static void mcf5208evb_init(MachineState *machine) /* Initial PC is always at offset 4 in firmware binaries */ ptr = rom_ptr(0x4, 4); assert(ptr != NULL); - env->pc = ldl_p(ptr); + env->pc = ldl_be_p(ptr); } /* Load kernel. */ @@ -364,7 +372,7 @@ static void mcf5208evb_init(MachineState *machine) } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, - NULL, NULL, NULL, 1, EM_68K, 0, 0); + NULL, NULL, NULL, ELFDATA2MSB, EM_68K, 0, 0); entry = elf_entry; if (kernel_size < 0) { kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, diff --git a/hw/m68k/mcf_intc.c b/hw/m68k/mcf_intc.c index 1d3b34e18c6..e3055b841e8 100644 --- a/hw/m68k/mcf_intc.c +++ b/hw/m68k/mcf_intc.c @@ -166,7 +166,7 @@ static void mcf_intc_reset(DeviceState *dev) static const MemoryRegionOps mcf_intc_ops = { .read = mcf_intc_read, .write = mcf_intc_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static void mcf_intc_instance_init(Object *obj) @@ -177,19 +177,18 @@ static void mcf_intc_instance_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); } -static Property mcf_intc_properties[] = { +static const Property mcf_intc_properties[] = { DEFINE_PROP_LINK("m68k-cpu", mcf_intc_state, cpu, TYPE_M68K_CPU, M68kCPU *), - DEFINE_PROP_END_OF_LIST(), }; -static void mcf_intc_class_init(ObjectClass *oc, void *data) +static void mcf_intc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); device_class_set_props(dc, mcf_intc_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = mcf_intc_reset; + device_class_set_legacy_reset(dc, mcf_intc_reset); } static const TypeInfo mcf_intc_gate_info = { diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c index 9f6f90d68b4..957644b2d1e 100644 --- a/hw/m68k/next-cube.c +++ b/hw/m68k/next-cube.c @@ -2,6 +2,7 @@ * NeXT Cube System Driver * * Copyright (c) 2011 Bryce Lanham + * Copyright (c) 2024 Mark Cave-Ayland * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published @@ -11,8 +12,9 @@ #include "qemu/osdep.h" #include "exec/hwaddr.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" +#include "exec/cpu-interrupt.h" +#include "system/system.h" +#include "system/qtest.h" #include "hw/irq.h" #include "hw/m68k/next-cube.h" #include "hw/boards.h" @@ -22,6 +24,7 @@ #include "qom/object.h" #include "hw/char/escc.h" /* ZILOG 8530 Serial Emulation */ #include "hw/block/fdc.h" +#include "hw/misc/empty_slot.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -37,31 +40,17 @@ #define DPRINTF(fmt, ...) do { } while (0) #endif -#define TYPE_NEXT_MACHINE MACHINE_TYPE_NAME("next-cube") -OBJECT_DECLARE_SIMPLE_TYPE(NeXTState, NEXT_MACHINE) - #define ENTRY 0x0100001e #define RAM_SIZE 0x4000000 #define ROM_FILE "Rev_2.5_v66.bin" -typedef struct next_dma { - uint32_t csr; - uint32_t saved_next; - uint32_t saved_limit; - uint32_t saved_start; - uint32_t saved_stop; +#define TYPE_NEXT_RTC "next-rtc" +OBJECT_DECLARE_SIMPLE_TYPE(NeXTRTC, NEXT_RTC) - uint32_t next; - uint32_t limit; - uint32_t start; - uint32_t stop; - - uint32_t next_initbuf; - uint32_t size; -} next_dma; +struct NeXTRTC { + SysBusDevice parent_obj; -typedef struct NextRtc { int8_t phase; uint8_t ram[32]; uint8_t command; @@ -69,18 +58,25 @@ typedef struct NextRtc { uint8_t status; uint8_t control; uint8_t retval; -} NextRtc; -struct NeXTState { - MachineState parent; + qemu_irq data_out_irq; + qemu_irq power_irq; +}; - MemoryRegion rom; - MemoryRegion rom2; - MemoryRegion dmamem; - MemoryRegion bmapm1; - MemoryRegion bmapm2; +#define TYPE_NEXT_SCSI "next-scsi" +OBJECT_DECLARE_SIMPLE_TYPE(NeXTSCSI, NEXT_SCSI) - next_dma dma[10]; +/* NeXT SCSI Controller */ +struct NeXTSCSI { + SysBusDevice parent_obj; + + MemoryRegion scsi_mem; + + SysBusESPState sysbus_esp; + + MemoryRegion scsi_csr_mem; + uint8_t scsi_csr_1; + uint8_t scsi_csr_2; }; #define TYPE_NEXT_PC "next-pc" @@ -92,6 +88,9 @@ struct NeXTPC { M68kCPU *cpu; + MemoryRegion floppy_mem; + MemoryRegion timer_mem; + MemoryRegion dummyen_mem; MemoryRegion mmiomem; MemoryRegion scrmem; @@ -101,13 +100,49 @@ struct NeXTPC { uint32_t int_mask; uint32_t int_status; uint32_t led; - uint8_t scsi_csr_1; - uint8_t scsi_csr_2; + + NeXTSCSI next_scsi; qemu_irq scsi_reset; qemu_irq scsi_dma; - NextRtc rtc; + ESCCState escc; + + NeXTRTC rtc; + qemu_irq rtc_data_irq; + qemu_irq rtc_cmd_reset_irq; +}; + +typedef struct next_dma { + uint32_t csr; + + uint32_t saved_next; + uint32_t saved_limit; + uint32_t saved_start; + uint32_t saved_stop; + + uint32_t next; + uint32_t limit; + uint32_t start; + uint32_t stop; + + uint32_t next_initbuf; + uint32_t size; +} next_dma; + +#define TYPE_NEXT_MACHINE MACHINE_TYPE_NAME("next-cube") +OBJECT_DECLARE_SIMPLE_TYPE(NeXTState, NEXT_MACHINE) + +struct NeXTState { + MachineState parent; + + MemoryRegion rom; + MemoryRegion rom2; + MemoryRegion dmamem; + MemoryRegion bmapm1; + MemoryRegion bmapm2; + + next_dma dma[10]; }; /* Thanks to NeXT forums for this */ @@ -144,120 +179,26 @@ static void next_scr2_led_update(NeXTPC *s) static void next_scr2_rtc_update(NeXTPC *s) { - uint8_t old_scr2, scr2_2; - NextRtc *rtc = &s->rtc; + uint8_t old_scr2_rtc, scr2_rtc; - old_scr2 = extract32(s->old_scr2, 8, 8); - scr2_2 = extract32(s->scr2, 8, 8); + old_scr2_rtc = extract32(s->old_scr2, 8, 8); + scr2_rtc = extract32(s->scr2, 8, 8); - if (scr2_2 & 0x1) { + if (scr2_rtc & 0x1) { /* DPRINTF("RTC %x phase %i\n", scr2_2, rtc->phase); */ - if (rtc->phase == -1) { - rtc->phase = 0; - } /* If we are in going down clock... do something */ - if (((old_scr2 & SCR2_RTCLK) != (scr2_2 & SCR2_RTCLK)) && - ((scr2_2 & SCR2_RTCLK) == 0)) { - if (rtc->phase < 8) { - rtc->command = (rtc->command << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - } - if (rtc->phase >= 8 && rtc->phase < 16) { - rtc->value = (rtc->value << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - - /* if we read RAM register, output RT_DATA bit */ - if (rtc->command <= 0x1F) { - scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->ram[rtc->command] & (0x80 >> (rtc->phase - 8))) { - scr2_2 |= SCR2_RTDATA; - } - - rtc->retval = (rtc->retval << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - } - /* read the status 0x30 */ - if (rtc->command == 0x30) { - scr2_2 = scr2_2 & (~SCR2_RTDATA); - /* for now status = 0x98 (new rtc + FTU) */ - if (rtc->status & (0x80 >> (rtc->phase - 8))) { - scr2_2 |= SCR2_RTDATA; - } - - rtc->retval = (rtc->retval << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - } - /* read the status 0x31 */ - if (rtc->command == 0x31) { - scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->control & (0x80 >> (rtc->phase - 8))) { - scr2_2 |= SCR2_RTDATA; - } - rtc->retval = (rtc->retval << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - } - - if ((rtc->command >= 0x20) && (rtc->command <= 0x2F)) { - scr2_2 = scr2_2 & (~SCR2_RTDATA); - /* for now 0x00 */ - time_t time_h = time(NULL); - struct tm *info = localtime(&time_h); - int ret = 0; - - switch (rtc->command) { - case 0x20: - ret = SCR2_TOBCD(info->tm_sec); - break; - case 0x21: - ret = SCR2_TOBCD(info->tm_min); - break; - case 0x22: - ret = SCR2_TOBCD(info->tm_hour); - break; - case 0x24: - ret = SCR2_TOBCD(info->tm_mday); - break; - case 0x25: - ret = SCR2_TOBCD((info->tm_mon + 1)); - break; - case 0x26: - ret = SCR2_TOBCD((info->tm_year - 100)); - break; - - } - - if (ret & (0x80 >> (rtc->phase - 8))) { - scr2_2 |= SCR2_RTDATA; - } - rtc->retval = (rtc->retval << 1) | - ((scr2_2 & SCR2_RTDATA) ? 1 : 0); - } - - } - - rtc->phase++; - if (rtc->phase == 16) { - if (rtc->command >= 0x80 && rtc->command <= 0x9F) { - rtc->ram[rtc->command - 0x80] = rtc->value; - } - /* write to x30 register */ - if (rtc->command == 0xB1) { - /* clear FTU */ - if (rtc->value & 0x04) { - rtc->status = rtc->status & (~0x18); - s->int_status = s->int_status & (~0x04); - } - } + if (((old_scr2_rtc & SCR2_RTCLK) != (scr2_rtc & SCR2_RTCLK)) && + ((scr2_rtc & SCR2_RTCLK) == 0)) { + if (scr2_rtc & SCR2_RTDATA) { + qemu_irq_raise(s->rtc_data_irq); + } else { + qemu_irq_lower(s->rtc_data_irq); } } } else { /* else end or abort */ - rtc->phase = -1; - rtc->command = 0; - rtc->value = 0; + qemu_irq_raise(s->rtc_cmd_reset_irq); } - - s->scr2 = deposit32(s->scr2, 8, 8, scr2_2); } static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size) @@ -266,30 +207,26 @@ static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size) uint64_t val; switch (addr) { - case 0x7000: + case 0x2000: /* 0x2007000 */ /* DPRINTF("Read INT status: %x\n", s->int_status); */ val = s->int_status; break; - case 0x7800: + case 0x2800: /* 0x2007800 */ DPRINTF("MMIO Read INT mask: %x\n", s->int_mask); val = s->int_mask; break; - case 0xc000 ... 0xc003: - val = extract32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + case 0x7000 ... 0x7003: /* 0x200c000 */ + val = extract32(s->scr1, (4 - (addr - 0x7000) - size) << 3, size << 3); break; - case 0xd000 ... 0xd003: - val = extract32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + case 0x8000 ... 0x8003: /* 0x200d000 */ + val = extract32(s->scr2, (4 - (addr - 0x8000) - size) << 3, size << 3); break; - case 0x14020: - val = 0x7f; - break; - default: val = 0; DPRINTF("MMIO Read @ 0x%"HWADDR_PRIx" size %d\n", addr, size); @@ -305,25 +242,25 @@ static void next_mmio_write(void *opaque, hwaddr addr, uint64_t val, NeXTPC *s = NEXT_PC(opaque); switch (addr) { - case 0x7000: + case 0x2000: /* 0x2007000 */ DPRINTF("INT Status old: %x new: %x\n", s->int_status, (unsigned int)val); s->int_status = val; break; - case 0x7800: + case 0x2800: /* 0x2007800 */ DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, (unsigned int)val); s->int_mask = val; break; - case 0xc000 ... 0xc003: + case 0x7000 ... 0x7003: /* 0x200c000 */ DPRINTF("SCR1 Write: %x\n", (unsigned int)val); - s->scr1 = deposit32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + s->scr1 = deposit32(s->scr1, (4 - (addr - 0x7000) - size) << 3, size << 3, val); break; - case 0xd000 ... 0xd003: - s->scr2 = deposit32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + case 0x8000 ... 0x8003: /* 0x200d000 */ + s->scr2 = deposit32(s->scr2, (4 - (addr - 0x8000) - size) << 3, size << 3, val); next_scr2_led_update(s); next_scr2_rtc_update(s); @@ -351,143 +288,6 @@ static const MemoryRegionOps next_mmio_ops = { #define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ #define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ -static uint64_t next_scr_readfn(void *opaque, hwaddr addr, unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - uint64_t val; - - switch (addr) { - case 0x14108: - DPRINTF("FD read @ %x\n", (unsigned int)addr); - val = 0x40 | 0x04 | 0x2 | 0x1; - break; - - case 0x14020: - DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1); - val = s->scsi_csr_1; - break; - - case 0x14021: - DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2); - val = 0x40; - break; - - /* - * These 4 registers are the hardware timer, not sure which register - * is the latch instead of data, but no problems so far. - * - * Hack: We need to have the LSB change consistently to make it work - */ - case 0x1a000 ... 0x1a003: - val = extract32(clock(), (4 - (addr - 0x1a000) - size) << 3, - size << 3); - break; - - /* For now return dummy byte to allow the Ethernet test to timeout */ - case 0x6000: - val = 0xff; - break; - - default: - DPRINTF("BMAP Read @ 0x%x size %u\n", (unsigned int)addr, size); - val = 0; - break; - } - - return val; -} - -static void next_scr_writefn(void *opaque, hwaddr addr, uint64_t val, - unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (addr) { - case 0x14108: - DPRINTF("FDCSR Write: %x\n", value); - if (val == 0x0) { - /* qemu_irq_raise(s->fd_irq[0]); */ - } - break; - - case 0x14020: /* SCSI Control Register */ - if (val & SCSICSR_FIFOFL) { - DPRINTF("SCSICSR FIFO Flush\n"); - /* will have to add another irq to the esp if this is needed */ - /* esp_puflush_fifo(esp_g); */ - } - - if (val & SCSICSR_ENABLE) { - DPRINTF("SCSICSR Enable\n"); - /* - * qemu_irq_raise(s->scsi_dma); - * s->scsi_csr_1 = 0xc0; - * s->scsi_csr_1 |= 0x1; - * qemu_irq_pulse(s->scsi_dma); - */ - } - /* - * else - * s->scsi_csr_1 &= ~SCSICSR_ENABLE; - */ - - if (val & SCSICSR_RESET) { - DPRINTF("SCSICSR Reset\n"); - /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */ - qemu_irq_raise(s->scsi_reset); - s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1); - qemu_irq_lower(s->scsi_reset); - } - if (val & SCSICSR_DMADIR) { - DPRINTF("SCSICSR DMAdir\n"); - } - if (val & SCSICSR_CPUDMA) { - DPRINTF("SCSICSR CPUDMA\n"); - /* qemu_irq_raise(s->scsi_dma); */ - s->int_status |= 0x4000000; - } else { - /* fprintf(stderr,"SCSICSR CPUDMA disabled\n"); */ - s->int_status &= ~(0x4000000); - /* qemu_irq_lower(s->scsi_dma); */ - } - if (val & SCSICSR_INTMASK) { - DPRINTF("SCSICSR INTMASK\n"); - /* - * int_mask &= ~0x1000; - * s->scsi_csr_1 |= val; - * s->scsi_csr_1 &= ~SCSICSR_INTMASK; - * if (s->scsi_queued) { - * s->scsi_queued = 0; - * next_irq(s, NEXT_SCSI_I, level); - * } - */ - } else { - /* int_mask |= 0x1000; */ - } - if (val & 0x80) { - /* int_mask |= 0x1000; */ - /* s->scsi_csr_1 |= 0x80; */ - } - DPRINTF("SCSICSR Write: %x\n", val); - /* s->scsi_csr_1 = val; */ - break; - - /* Hardware timer latch - not implemented yet */ - case 0x1a000: - default: - DPRINTF("BMAP Write @ 0x%x with 0x%x size %u\n", (unsigned int)addr, - val, size); - } -} - -static const MemoryRegionOps next_scr_ops = { - .read = next_scr_readfn, - .write = next_scr_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_BIG_ENDIAN, -}; - #define NEXTDMA_SCSI(x) (0x10 + x) #define NEXTDMA_FD(x) (0x10 + x) #define NEXTDMA_ENTX(x) (0x110 + x) @@ -585,7 +385,7 @@ static void next_dma_write(void *opaque, hwaddr addr, uint64_t val, break; default: - DPRINTF("DMA write @ %x w/ %x\n", (unsigned)addr, (unsigned)value); + DPRINTF("DMA write @ %x w/ %x\n", (unsigned)addr, (unsigned)val); } } @@ -828,84 +628,579 @@ static void nextscsi_write(void *opaque, uint8_t *buf, int size) nextdma_write(opaque, buf, size, NEXTDMA_SCSI); } -static void next_scsi_init(DeviceState *pcdev, M68kCPU *cpu) +static void next_scsi_csr_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + NeXTSCSI *s = NEXT_SCSI(opaque); + NeXTPC *pc = NEXT_PC(container_of(s, NeXTPC, next_scsi)); + + switch (addr) { + case 0: + if (val & SCSICSR_FIFOFL) { + DPRINTF("SCSICSR FIFO Flush\n"); + /* will have to add another irq to the esp if this is needed */ + /* esp_puflush_fifo(esp_g); */ + } + + if (val & SCSICSR_ENABLE) { + DPRINTF("SCSICSR Enable\n"); + /* + * qemu_irq_raise(s->scsi_dma); + * s->scsi_csr_1 = 0xc0; + * s->scsi_csr_1 |= 0x1; + * qemu_irq_pulse(s->scsi_dma); + */ + } + /* + * else + * s->scsi_csr_1 &= ~SCSICSR_ENABLE; + */ + + if (val & SCSICSR_RESET) { + DPRINTF("SCSICSR Reset\n"); + /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */ + qemu_irq_raise(pc->scsi_reset); + s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1); + qemu_irq_lower(pc->scsi_reset); + } + if (val & SCSICSR_DMADIR) { + DPRINTF("SCSICSR DMAdir\n"); + } + if (val & SCSICSR_CPUDMA) { + DPRINTF("SCSICSR CPUDMA\n"); + /* qemu_irq_raise(s->scsi_dma); */ + pc->int_status |= 0x4000000; + } else { + /* fprintf(stderr,"SCSICSR CPUDMA disabled\n"); */ + pc->int_status &= ~(0x4000000); + /* qemu_irq_lower(s->scsi_dma); */ + } + if (val & SCSICSR_INTMASK) { + DPRINTF("SCSICSR INTMASK\n"); + /* + * int_mask &= ~0x1000; + * s->scsi_csr_1 |= val; + * s->scsi_csr_1 &= ~SCSICSR_INTMASK; + * if (s->scsi_queued) { + * s->scsi_queued = 0; + * next_irq(s, NEXT_SCSI_I, level); + * } + */ + } else { + /* int_mask |= 0x1000; */ + } + if (val & 0x80) { + /* int_mask |= 0x1000; */ + /* s->scsi_csr_1 |= 0x80; */ + } + DPRINTF("SCSICSR1 Write: %"PRIx64 "\n", val); + s->scsi_csr_1 = val; + break; + + case 1: + DPRINTF("SCSICSR2 Write: %"PRIx64 "\n", val); + s->scsi_csr_2 = val; + break; + + default: + g_assert_not_reached(); + } +} + +static uint64_t next_scsi_csr_read(void *opaque, hwaddr addr, unsigned size) +{ + NeXTSCSI *s = NEXT_SCSI(opaque); + uint64_t val; + + switch (addr) { + case 0: + DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1); + val = s->scsi_csr_1; + break; + + case 1: + DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2); + val = s->scsi_csr_2; + break; + + default: + g_assert_not_reached(); + } + + return val; +} + +static const MemoryRegionOps next_scsi_csr_ops = { + .read = next_scsi_csr_read, + .write = next_scsi_csr_write, + .valid.min_access_size = 1, + .valid.max_access_size = 1, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void next_scsi_init(Object *obj) { - struct NeXTPC *next_pc = NEXT_PC(pcdev); - DeviceState *dev; - SysBusDevice *sysbusdev; + NeXTSCSI *s = NEXT_SCSI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + object_initialize_child(obj, "esp", &s->sysbus_esp, TYPE_SYSBUS_ESP); + + memory_region_init_io(&s->scsi_csr_mem, obj, &next_scsi_csr_ops, + s, "csrs", 2); + + memory_region_init(&s->scsi_mem, obj, "next.scsi", 0x40); + sysbus_init_mmio(sbd, &s->scsi_mem); +} + +static void next_scsi_realize(DeviceState *dev, Error **errp) +{ + NeXTSCSI *s = NEXT_SCSI(dev); SysBusESPState *sysbus_esp; + SysBusDevice *sbd; ESPState *esp; + NeXTPC *pcdev; + + pcdev = NEXT_PC(container_of(s, NeXTPC, next_scsi)); - dev = qdev_new(TYPE_SYSBUS_ESP); - sysbus_esp = SYSBUS_ESP(dev); + /* ESP */ + sysbus_esp = SYSBUS_ESP(&s->sysbus_esp); esp = &sysbus_esp->esp; esp->dma_memory_read = nextscsi_read; esp->dma_memory_write = nextscsi_write; esp->dma_opaque = pcdev; sysbus_esp->it_shift = 0; esp->dma_enabled = 1; - sysbusdev = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(sysbusdev, &error_fatal); - sysbus_connect_irq(sysbusdev, 0, qdev_get_gpio_in(pcdev, NEXT_SCSI_I)); - sysbus_mmio_map(sysbusdev, 0, 0x2114000); + sbd = SYS_BUS_DEVICE(sysbus_esp); + if (!sysbus_realize(sbd, errp)) { + return; + } + memory_region_add_subregion(&s->scsi_mem, 0x0, + sysbus_mmio_get_region(sbd, 0)); - next_pc->scsi_reset = qdev_get_gpio_in(dev, 0); - next_pc->scsi_dma = qdev_get_gpio_in(dev, 1); + /* SCSI CSRs */ + memory_region_add_subregion(&s->scsi_mem, 0x20, &s->scsi_csr_mem); - scsi_bus_legacy_handle_cmdline(&esp->bus); + scsi_bus_legacy_handle_cmdline(&s->sysbus_esp.esp.bus); } -static void next_escc_init(DeviceState *pcdev) +static const VMStateDescription next_scsi_vmstate = { + .name = "next-scsi", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_UINT8(scsi_csr_1, NeXTSCSI), + VMSTATE_UINT8(scsi_csr_2, NeXTSCSI), + VMSTATE_END_OF_LIST() + }, +}; + +static void next_scsi_class_init(ObjectClass *klass, const void *data) { - DeviceState *dev; - SysBusDevice *s; - - dev = qdev_new(TYPE_ESCC); - qdev_prop_set_uint32(dev, "disabled", 0); - qdev_prop_set_uint32(dev, "frequency", 9600 * 384); - qdev_prop_set_uint32(dev, "it_shift", 0); - qdev_prop_set_bit(dev, "bit_swap", true); - qdev_prop_set_chr(dev, "chrB", serial_hd(1)); - qdev_prop_set_chr(dev, "chrA", serial_hd(0)); - qdev_prop_set_uint32(dev, "chnBtype", escc_serial); - qdev_prop_set_uint32(dev, "chnAtype", escc_serial); - - s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_connect_irq(s, 0, qdev_get_gpio_in(pcdev, NEXT_SCC_I)); - sysbus_connect_irq(s, 1, qdev_get_gpio_in(pcdev, NEXT_SCC_DMA_I)); - sysbus_mmio_map(s, 0, 0x2118000); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NeXT SCSI Controller"; + dc->realize = next_scsi_realize; + dc->vmsd = &next_scsi_vmstate; } -static void next_pc_reset(DeviceState *dev) +static const TypeInfo next_scsi_info = { + .name = TYPE_NEXT_SCSI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = next_scsi_init, + .instance_size = sizeof(NeXTSCSI), + .class_init = next_scsi_class_init, +}; + +static void next_floppy_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { - NeXTPC *s = NEXT_PC(dev); + switch (addr) { + case 0: + DPRINTF("FDCSR Write: %"PRIx64 "\n", val); + if (val == 0x0) { + /* qemu_irq_raise(s->fd_irq[0]); */ + } + break; + + default: + g_assert_not_reached(); + } +} + +static uint64_t next_floppy_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val; + + switch (addr) { + case 0: + DPRINTF("FD read @ %x\n", (unsigned int)addr); + val = 0x40 | 0x04 | 0x2 | 0x1; + break; + + default: + g_assert_not_reached(); + } + + return val; +} + +static const MemoryRegionOps next_floppy_ops = { + .read = next_floppy_read, + .write = next_floppy_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void next_timer_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + switch (addr) { + case 0 ... 3: + /* Hardware timer latch - not implemented yet */ + break; + + default: + g_assert_not_reached(); + } +} + +static uint64_t next_timer_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val; + + switch (addr) { + case 0 ... 3: + /* + * These 4 registers are the hardware timer, not sure which register + * is the latch instead of data, but no problems so far. + * + * Hack: We need to have the LSB change consistently to make it work + */ + val = extract32(clock(), (4 - addr - size) << 3, + size << 3); + break; + + default: + g_assert_not_reached(); + } + + return val; +} + +static const MemoryRegionOps next_timer_ops = { + .read = next_timer_read, + .write = next_timer_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void next_dummy_en_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + /* Do nothing */ +} + +static uint64_t next_dummy_en_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val; + + switch (addr) { + case 0: + /* For now return dummy byte to allow the Ethernet test to timeout */ + val = 0xff; + break; + + default: + val = 0; + } + + return val; +} + +static const MemoryRegionOps next_dummy_en_ops = { + .read = next_dummy_en_read, + .write = next_dummy_en_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static bool next_rtc_cmd_is_write(uint8_t cmd) +{ + return (cmd >= 0x80 && cmd <= 0x9f) || + (cmd == 0xb1); +} + +static void next_rtc_data_in_irq(void *opaque, int n, int level) +{ + NeXTRTC *rtc = NEXT_RTC(opaque); + + if (rtc->phase < 8) { + rtc->command = (rtc->command << 1) | level; + + if (rtc->phase == 7 && !next_rtc_cmd_is_write(rtc->command)) { + if (rtc->command <= 0x1f) { + /* RAM registers */ + rtc->retval = rtc->ram[rtc->command]; + } + if ((rtc->command >= 0x20) && (rtc->command <= 0x2f)) { + /* RTC */ + time_t time_h = time(NULL); + struct tm *info = localtime(&time_h); + rtc->retval = 0; + + switch (rtc->command) { + case 0x20: + rtc->retval = SCR2_TOBCD(info->tm_sec); + break; + case 0x21: + rtc->retval = SCR2_TOBCD(info->tm_min); + break; + case 0x22: + rtc->retval = SCR2_TOBCD(info->tm_hour); + break; + case 0x24: + rtc->retval = SCR2_TOBCD(info->tm_mday); + break; + case 0x25: + rtc->retval = SCR2_TOBCD((info->tm_mon + 1)); + break; + case 0x26: + rtc->retval = SCR2_TOBCD((info->tm_year - 100)); + break; + } + } + if (rtc->command == 0x30) { + /* read the status 0x30 */ + rtc->retval = rtc->status; + } + if (rtc->command == 0x31) { + /* read the control 0x31 */ + rtc->retval = rtc->control; + } + } + } + if (rtc->phase >= 8 && rtc->phase < 16) { + if (next_rtc_cmd_is_write(rtc->command)) { + /* Shift in value to write */ + rtc->value = (rtc->value << 1) | level; + } else { + /* Shift out value to read */ + if (rtc->retval & (0x80 >> (rtc->phase - 8))) { + qemu_irq_raise(rtc->data_out_irq); + } else { + qemu_irq_lower(rtc->data_out_irq); + } + } + } + + rtc->phase++; + if (rtc->phase == 16 && next_rtc_cmd_is_write(rtc->command)) { + if (rtc->command >= 0x80 && rtc->command <= 0x9f) { + /* RAM registers */ + rtc->ram[rtc->command - 0x80] = rtc->value; + } + if (rtc->command == 0xb1) { + /* write to 0x30 register */ + if (rtc->value & 0x04) { + /* clear FTU */ + rtc->status = rtc->status & (~0x18); + qemu_irq_lower(rtc->power_irq); + } + } + } +} + +static void next_rtc_cmd_reset_irq(void *opaque, int n, int level) +{ + NeXTRTC *rtc = NEXT_RTC(opaque); + + if (level) { + rtc->phase = 0; + rtc->command = 0; + rtc->value = 0; + } +} + +static void next_rtc_reset_hold(Object *obj, ResetType type) +{ + NeXTRTC *rtc = NEXT_RTC(obj); + + rtc->status = 0x90; + + /* Load RTC RAM - TODO: provide possibility to load contents from file */ + memcpy(rtc->ram, rtc_ram2, 32); +} + +static void next_rtc_init(Object *obj) +{ + NeXTRTC *rtc = NEXT_RTC(obj); + + qdev_init_gpio_in_named(DEVICE(obj), next_rtc_data_in_irq, + "rtc-data-in", 1); + qdev_init_gpio_out_named(DEVICE(obj), &rtc->data_out_irq, + "rtc-data-out", 1); + qdev_init_gpio_in_named(DEVICE(obj), next_rtc_cmd_reset_irq, + "rtc-cmd-reset", 1); + qdev_init_gpio_out_named(DEVICE(obj), &rtc->power_irq, + "rtc-power-out", 1); +} + +static const VMStateDescription next_rtc_vmstate = { + .name = "next-rtc", + .version_id = 3, + .minimum_version_id = 3, + .fields = (const VMStateField[]) { + VMSTATE_INT8(phase, NeXTRTC), + VMSTATE_UINT8_ARRAY(ram, NeXTRTC, 32), + VMSTATE_UINT8(command, NeXTRTC), + VMSTATE_UINT8(value, NeXTRTC), + VMSTATE_UINT8(status, NeXTRTC), + VMSTATE_UINT8(control, NeXTRTC), + VMSTATE_UINT8(retval, NeXTRTC), + VMSTATE_END_OF_LIST() + }, +}; + +static void next_rtc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->desc = "NeXT RTC"; + dc->vmsd = &next_rtc_vmstate; + rc->phases.hold = next_rtc_reset_hold; +} + +static const TypeInfo next_rtc_info = { + .name = TYPE_NEXT_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = next_rtc_init, + .instance_size = sizeof(NeXTRTC), + .class_init = next_rtc_class_init, +}; + +static void next_pc_rtc_data_in_irq(void *opaque, int n, int level) +{ + NeXTPC *s = NEXT_PC(opaque); + uint8_t scr2_2 = extract32(s->scr2, 8, 8); + + if (level) { + scr2_2 |= SCR2_RTDATA; + } else { + scr2_2 &= ~SCR2_RTDATA; + } + + s->scr2 = deposit32(s->scr2, 8, 8, scr2_2); +} + +static void next_pc_reset_hold(Object *obj, ResetType type) +{ + NeXTPC *s = NEXT_PC(obj); /* Set internal registers to initial values */ /* 0x0000XX00 << vital bits */ s->scr1 = 0x00011102; s->scr2 = 0x00ff0c80; s->old_scr2 = s->scr2; - - s->rtc.status = 0x90; - - /* Load RTC RAM - TODO: provide possibility to load contents from file */ - memcpy(s->rtc.ram, rtc_ram2, 32); } static void next_pc_realize(DeviceState *dev, Error **errp) { NeXTPC *s = NEXT_PC(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusDevice *sbd; + DeviceState *d; + + /* SCSI */ + sbd = SYS_BUS_DEVICE(&s->next_scsi); + if (!sysbus_realize(sbd, errp)) { + return; + } - qdev_init_gpio_in(dev, next_irq, NEXT_NUM_IRQS); + d = DEVICE(object_resolve_path_component(OBJECT(&s->next_scsi), "esp")); + sysbus_connect_irq(SYS_BUS_DEVICE(d), 0, + qdev_get_gpio_in(DEVICE(s), NEXT_SCSI_I)); + + s->scsi_reset = qdev_get_gpio_in(d, 0); + s->scsi_dma = qdev_get_gpio_in(d, 1); + + /* ESCC */ + d = DEVICE(&s->escc); + qdev_prop_set_uint32(d, "disabled", 0); + qdev_prop_set_uint32(d, "frequency", 9600 * 384); + qdev_prop_set_uint32(d, "it_shift", 0); + qdev_prop_set_bit(d, "bit_swap", true); + qdev_prop_set_chr(d, "chrB", serial_hd(1)); + qdev_prop_set_chr(d, "chrA", serial_hd(0)); + qdev_prop_set_uint32(d, "chnBtype", escc_serial); + qdev_prop_set_uint32(d, "chnAtype", escc_serial); + + sbd = SYS_BUS_DEVICE(d); + if (!sysbus_realize(sbd, errp)) { + return; + } + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(dev, NEXT_SCC_I)); + sysbus_connect_irq(sbd, 1, qdev_get_gpio_in(dev, NEXT_SCC_DMA_I)); + + /* RTC */ + d = DEVICE(&s->rtc); + if (!sysbus_realize(SYS_BUS_DEVICE(d), errp)) { + return; + } + /* Data from NeXTPC to RTC */ + qdev_connect_gpio_out_named(dev, "rtc-data-out", 0, + qdev_get_gpio_in_named(d, "rtc-data-in", 0)); + /* Data from RTC to NeXTPC */ + qdev_connect_gpio_out_named(d, "rtc-data-out", 0, + qdev_get_gpio_in_named(dev, + "rtc-data-in", 0)); + qdev_connect_gpio_out_named(dev, "rtc-cmd-reset", 0, + qdev_get_gpio_in_named(d, "rtc-cmd-reset", 0)); + qdev_connect_gpio_out_named(d, "rtc-power-out", 0, + qdev_get_gpio_in(dev, NEXT_PWR_I)); +} + +static void next_pc_init(Object *obj) +{ + NeXTPC *s = NEXT_PC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(DEVICE(obj), next_irq, NEXT_NUM_IRQS); memory_region_init_io(&s->mmiomem, OBJECT(s), &next_mmio_ops, s, - "next.mmio", 0xd0000); - memory_region_init_io(&s->scrmem, OBJECT(s), &next_scr_ops, s, - "next.scr", 0x20000); + "next.mmio", 0x9000); sysbus_init_mmio(sbd, &s->mmiomem); - sysbus_init_mmio(sbd, &s->scrmem); + + memory_region_init_io(&s->dummyen_mem, OBJECT(s), &next_dummy_en_ops, s, + "next.en", 0x20); + sysbus_init_mmio(sbd, &s->dummyen_mem); + + object_initialize_child(obj, "next-scsi", &s->next_scsi, TYPE_NEXT_SCSI); + sysbus_init_mmio(sbd, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->next_scsi), 0)); + + memory_region_init_io(&s->floppy_mem, OBJECT(s), &next_floppy_ops, s, + "next.floppy", 4); + sysbus_init_mmio(sbd, &s->floppy_mem); + + object_initialize_child(obj, "escc", &s->escc, TYPE_ESCC); + sysbus_init_mmio(sbd, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->escc), 0)); + + memory_region_init_io(&s->timer_mem, OBJECT(s), &next_timer_ops, s, + "next.timer", 4); + sysbus_init_mmio(sbd, &s->timer_mem); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_NEXT_RTC); + + qdev_init_gpio_in_named(DEVICE(obj), next_pc_rtc_data_in_irq, + "rtc-data-in", 1); + qdev_init_gpio_out_named(DEVICE(obj), &s->rtc_data_irq, + "rtc-data-out", 1); + qdev_init_gpio_out_named(DEVICE(obj), &s->rtc_cmd_reset_irq, + "rtc-cmd-reset", 1); } /* @@ -914,31 +1209,14 @@ static void next_pc_realize(DeviceState *dev, Error **errp) * this cpu link property and could instead provide outbound IRQ lines * that the board could wire up to the CPU. */ -static Property next_pc_properties[] = { +static const Property next_pc_properties[] = { DEFINE_PROP_LINK("cpu", NeXTPC, cpu, TYPE_M68K_CPU, M68kCPU *), - DEFINE_PROP_END_OF_LIST(), -}; - -static const VMStateDescription next_rtc_vmstate = { - .name = "next-rtc", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_INT8(phase, NextRtc), - VMSTATE_UINT8_ARRAY(ram, NextRtc, 32), - VMSTATE_UINT8(command, NextRtc), - VMSTATE_UINT8(value, NextRtc), - VMSTATE_UINT8(status, NextRtc), - VMSTATE_UINT8(control, NextRtc), - VMSTATE_UINT8(retval, NextRtc), - VMSTATE_END_OF_LIST() - }, }; static const VMStateDescription next_pc_vmstate = { .name = "next-pc", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 4, + .minimum_version_id = 4, .fields = (const VMStateField[]) { VMSTATE_UINT32(scr1, NeXTPC), VMSTATE_UINT32(scr2, NeXTPC), @@ -946,27 +1224,26 @@ static const VMStateDescription next_pc_vmstate = { VMSTATE_UINT32(int_mask, NeXTPC), VMSTATE_UINT32(int_status, NeXTPC), VMSTATE_UINT32(led, NeXTPC), - VMSTATE_UINT8(scsi_csr_1, NeXTPC), - VMSTATE_UINT8(scsi_csr_2, NeXTPC), - VMSTATE_STRUCT(rtc, NeXTPC, 0, next_rtc_vmstate, NextRtc), VMSTATE_END_OF_LIST() }, }; -static void next_pc_class_init(ObjectClass *klass, void *data) +static void next_pc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); dc->desc = "NeXT Peripheral Controller"; dc->realize = next_pc_realize; - dc->reset = next_pc_reset; device_class_set_props(dc, next_pc_properties); dc->vmsd = &next_pc_vmstate; + rc->phases.hold = next_pc_reset_hold; } static const TypeInfo next_pc_info = { .name = TYPE_NEXT_PC, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = next_pc_init, .instance_size = sizeof(NeXTPC), .class_init = next_pc_class_init, }; @@ -1004,11 +1281,32 @@ static void next_cube_init(MachineState *machine) sysbus_create_simple(TYPE_NEXTFB, 0x0B000000, NULL); /* MMIO */ - sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 0, 0x02000000); + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 0, 0x02005000); /* BMAP IO - acts as a catch-all for now */ sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02100000); + /* en network (dummy) */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02106000); + + /* unknown: Brightness control register? */ + empty_slot_init("next.unknown.0", 0x02110000, 0x10); + /* unknown: Magneto-Optical drive controller? */ + empty_slot_init("next.unknown.1", 0x02112000, 0x10); + + /* SCSI */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 2, 0x02114000); + /* Floppy */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 3, 0x02114108); + /* ESCC */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 4, 0x02118000); + + /* unknown: Serial clock configuration register? */ + empty_slot_init("next.unknown.2", 0x02118004, 0x10); + + /* Timer */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 5, 0x0211a000); + /* BMAP memory */ memory_region_init_ram_flags_nomigrate(&m->bmapm1, NULL, "next.bmapmem", 64, RAM_SHARED, &error_fatal); @@ -1036,7 +1334,7 @@ static void next_cube_init(MachineState *machine) /* Initial PC is always at offset 4 in firmware binaries */ ptr = rom_ptr(0x01000004, 4); g_assert(ptr != NULL); - env->pc = ldl_p(ptr); + env->pc = ldl_be_p(ptr); if (env->pc >= 0x01020000) { error_report("'%s' does not seem to be a valid firmware image.", bios_name); @@ -1044,21 +1342,13 @@ static void next_cube_init(MachineState *machine) } } - /* Serial */ - next_escc_init(pcdev); - - /* TODO: */ - /* Network */ - /* SCSI */ - next_scsi_init(pcdev, cpu); - /* DMA */ memory_region_init_io(&m->dmamem, NULL, &next_dma_ops, machine, "next.dma", 0x5000); memory_region_add_subregion(sysmem, 0x02000000, &m->dmamem); } -static void next_machine_class_init(ObjectClass *oc, void *data) +static void next_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1068,6 +1358,7 @@ static void next_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_size = RAM_SIZE; mc->default_ram_id = "next.ram"; mc->default_cpu_type = M68K_CPU_TYPE_NAME("m68040"); + mc->no_cdrom = true; } static const TypeInfo next_typeinfo = { @@ -1081,6 +1372,8 @@ static void next_register_type(void) { type_register_static(&next_typeinfo); type_register_static(&next_pc_info); + type_register_static(&next_scsi_info); + type_register_static(&next_rtc_info); } type_init(next_register_type) diff --git a/hw/m68k/next-kbd.c b/hw/m68k/next-kbd.c index 0c348c18cf2..2bec945acfb 100644 --- a/hw/m68k/next-kbd.c +++ b/hw/m68k/next-kbd.c @@ -68,7 +68,6 @@ struct NextKBDState { uint16_t shift; }; -static void queue_code(void *opaque, int code); /* lots of magic numbers here */ static uint32_t kbd_read_byte(void *opaque, hwaddr addr) @@ -163,71 +162,73 @@ static const MemoryRegionOps kbd_ops = { .write = kbd_writefn, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; -static void nextkbd_event(void *opaque, int ch) -{ - /* - * Will want to set vars for caps/num lock - * if (ch & 0x80) -> key release - * there's also e0 escaped scancodes that might need to be handled - */ - queue_code(opaque, ch); -} - -static const unsigned char next_keycodes[128] = { - 0x00, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x50, 0x4F, - 0x4E, 0x1E, 0x1F, 0x20, 0x1D, 0x1C, 0x1B, 0x00, - 0x42, 0x43, 0x44, 0x45, 0x48, 0x47, 0x46, 0x06, - 0x07, 0x08, 0x00, 0x00, 0x2A, 0x00, 0x39, 0x3A, - 0x3B, 0x3C, 0x3D, 0x40, 0x3F, 0x3E, 0x2D, 0x2C, - 0x2B, 0x26, 0x00, 0x00, 0x31, 0x32, 0x33, 0x34, - 0x35, 0x37, 0x36, 0x2e, 0x2f, 0x30, 0x00, 0x00, - 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +static const int qcode_to_nextkbd_keycode[] = { + [Q_KEY_CODE_ESC] = 0x49, + [Q_KEY_CODE_1] = 0x4a, + [Q_KEY_CODE_2] = 0x4b, + [Q_KEY_CODE_3] = 0x4c, + [Q_KEY_CODE_4] = 0x4d, + [Q_KEY_CODE_5] = 0x50, + [Q_KEY_CODE_6] = 0x4f, + [Q_KEY_CODE_7] = 0x4e, + [Q_KEY_CODE_8] = 0x1e, + [Q_KEY_CODE_9] = 0x1f, + [Q_KEY_CODE_0] = 0x20, + [Q_KEY_CODE_MINUS] = 0x1d, + [Q_KEY_CODE_EQUAL] = 0x1c, + [Q_KEY_CODE_BACKSPACE] = 0x1b, + + [Q_KEY_CODE_Q] = 0x42, + [Q_KEY_CODE_W] = 0x43, + [Q_KEY_CODE_E] = 0x44, + [Q_KEY_CODE_R] = 0x45, + [Q_KEY_CODE_T] = 0x48, + [Q_KEY_CODE_Y] = 0x47, + [Q_KEY_CODE_U] = 0x46, + [Q_KEY_CODE_I] = 0x06, + [Q_KEY_CODE_O] = 0x07, + [Q_KEY_CODE_P] = 0x08, + [Q_KEY_CODE_RET] = 0x2a, + [Q_KEY_CODE_A] = 0x39, + [Q_KEY_CODE_S] = 0x3a, + + [Q_KEY_CODE_D] = 0x3b, + [Q_KEY_CODE_F] = 0x3c, + [Q_KEY_CODE_G] = 0x3d, + [Q_KEY_CODE_H] = 0x40, + [Q_KEY_CODE_J] = 0x3f, + [Q_KEY_CODE_K] = 0x3e, + [Q_KEY_CODE_L] = 0x2d, + [Q_KEY_CODE_SEMICOLON] = 0x2c, + [Q_KEY_CODE_APOSTROPHE] = 0x2b, + [Q_KEY_CODE_GRAVE_ACCENT] = 0x26, + [Q_KEY_CODE_Z] = 0x31, + [Q_KEY_CODE_X] = 0x32, + [Q_KEY_CODE_C] = 0x33, + [Q_KEY_CODE_V] = 0x34, + + [Q_KEY_CODE_B] = 0x35, + [Q_KEY_CODE_N] = 0x37, + [Q_KEY_CODE_M] = 0x36, + [Q_KEY_CODE_COMMA] = 0x2e, + [Q_KEY_CODE_DOT] = 0x2f, + [Q_KEY_CODE_SLASH] = 0x30, + + [Q_KEY_CODE_SPC] = 0x38, }; -static void queue_code(void *opaque, int code) +static void nextkbd_put_keycode(NextKBDState *s, int keycode) { - NextKBDState *s = NEXTKBD(opaque); KBDQueue *q = &s->queue; - int key = code & KD_KEYMASK; - int release = code & 0x80; - static int ext; - - if (code == 0xE0) { - ext = 1; - } - - if (code == 0x2A || code == 0x1D || code == 0x36) { - if (code == 0x2A) { - s->shift = KD_LSHIFT; - } else if (code == 0x36) { - s->shift = KD_RSHIFT; - ext = 0; - } else if (code == 0x1D && !ext) { - s->shift = KD_LCOMM; - } else if (code == 0x1D && ext) { - ext = 0; - s->shift = KD_RCOMM; - } - return; - } else if (code == (0x2A | 0x80) || code == (0x1D | 0x80) || - code == (0x36 | 0x80)) { - s->shift = 0; - return; - } if (q->count >= KBD_QUEUE_SIZE) { return; } - q->data[q->wptr] = next_keycodes[key] | release; - + q->data[q->wptr] = keycode; if (++q->wptr == KBD_QUEUE_SIZE) { q->wptr = 0; } @@ -241,6 +242,53 @@ static void queue_code(void *opaque, int code) /* s->update_irq(s->update_arg, 1); */ } +static void nextkbd_event(DeviceState *dev, QemuConsole *src, InputEvent *evt) +{ + NextKBDState *s = NEXTKBD(dev); + int qcode, keycode; + bool key_down = evt->u.key.data->down; + + qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key); + if (qcode >= ARRAY_SIZE(qcode_to_nextkbd_keycode)) { + return; + } + + /* Shift key currently has no keycode, so handle separately */ + if (qcode == Q_KEY_CODE_SHIFT) { + if (key_down) { + s->shift |= KD_LSHIFT; + } else { + s->shift &= ~KD_LSHIFT; + } + } + + if (qcode == Q_KEY_CODE_SHIFT_R) { + if (key_down) { + s->shift |= KD_RSHIFT; + } else { + s->shift &= ~KD_RSHIFT; + } + } + + keycode = qcode_to_nextkbd_keycode[qcode]; + if (!keycode) { + return; + } + + /* If key release event, create keyboard break code */ + if (!key_down) { + keycode |= 0x80; + } + + nextkbd_put_keycode(s, keycode); +} + +static const QemuInputHandler nextkbd_handler = { + .name = "QEMU NeXT Keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = nextkbd_event, +}; + static void nextkbd_reset(DeviceState *dev) { NextKBDState *nks = NEXTKBD(dev); @@ -256,7 +304,7 @@ static void nextkbd_realize(DeviceState *dev, Error **errp) memory_region_init_io(&s->mr, OBJECT(dev), &kbd_ops, s, "next.kbd", 0x1000); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); - qemu_add_kbd_event_handler(nextkbd_event, s); + qemu_input_handler_register(dev, &nextkbd_handler); } static const VMStateDescription nextkbd_vmstate = { @@ -264,14 +312,14 @@ static const VMStateDescription nextkbd_vmstate = { .unmigratable = 1, /* TODO: Implement this when m68k CPU is migratable */ }; -static void nextkbd_class_init(ObjectClass *oc, void *data) +static void nextkbd_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); set_bit(DEVICE_CATEGORY_INPUT, dc->categories); dc->vmsd = &nextkbd_vmstate; dc->realize = nextkbd_realize; - dc->reset = nextkbd_reset; + device_class_set_legacy_reset(dc, nextkbd_reset); } static const TypeInfo nextkbd_info = { diff --git a/hw/m68k/q800-glue.c b/hw/m68k/q800-glue.c index e2ae7c32011..36de67c328e 100644 --- a/hw/m68k/q800-glue.c +++ b/hw/m68k/q800-glue.c @@ -203,9 +203,8 @@ static const VMStateDescription vmstate_glue = { * this cpu link property and could instead provide outbound IRQ lines * that the board could wire up to the CPU. */ -static Property glue_properties[] = { +static const Property glue_properties[] = { DEFINE_PROP_LINK("cpu", GLUEState, cpu, TYPE_M68K_CPU, M68kCPU *), - DEFINE_PROP_END_OF_LIST(), }; static void glue_finalize(Object *obj) @@ -229,7 +228,7 @@ static void glue_init(Object *obj) s->nmi_release = timer_new_ms(QEMU_CLOCK_VIRTUAL, glue_nmi_release, s); } -static void glue_class_init(ObjectClass *klass, void *data) +static void glue_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -249,7 +248,7 @@ static const TypeInfo glue_info_types[] = { .instance_init = glue_init, .instance_finalize = glue_finalize, .class_init = glue_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { } }, diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index fa7683bf76f..793b23f8155 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -24,7 +24,8 @@ #include "qemu/units.h" #include "qemu/datadir.h" #include "qemu/guest-random.h" -#include "sysemu/sysemu.h" +#include "exec/target_page.h" +#include "system/system.h" #include "cpu.h" #include "hw/boards.h" #include "hw/or-irq.h" @@ -51,9 +52,9 @@ #include "net/util.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/runstate.h" +#include "system/reset.h" #include "migration/vmstate.h" #define MACROM_ADDR 0x40800000 @@ -210,7 +211,6 @@ static uint64_t machine_id_read(void *opaque, hwaddr addr, unsigned size) static void machine_id_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - return; } static const MemoryRegionOps machine_id_ops = { @@ -231,7 +231,6 @@ static uint64_t ramio_read(void *opaque, hwaddr addr, unsigned size) static void ramio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - return; } static const MemoryRegionOps ramio_ops = { @@ -585,7 +584,7 @@ static void q800_machine_init(MachineState *machine) } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, - &elf_entry, NULL, &high, NULL, 1, + &elf_entry, NULL, &high, NULL, ELFDATA2MSB, EM_68K, 0, 0); if (kernel_size < 0) { error_report("could not load kernel '%s'", kernel_filename); @@ -684,9 +683,9 @@ static void q800_machine_init(MachineState *machine) ptr = rom_ptr(MACROM_ADDR, bios_size); assert(ptr != NULL); - stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */ + stl_phys(cs->as, 0, ldl_be_p(ptr)); /* reset initial SP */ stl_phys(cs->as, 4, - MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */ + MACROM_ADDR + ldl_be_p(ptr + 4)); /* reset initial PC */ } } } @@ -728,7 +727,7 @@ static GlobalProperty hw_compat_q800[] = { }; static const size_t hw_compat_q800_len = G_N_ELEMENTS(hw_compat_q800); -static void q800_machine_class_init(ObjectClass *oc, void *data) +static void q800_machine_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { M68K_CPU_TYPE_NAME("m68040"), diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index cda199af8fa..875fd00ef8d 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -10,7 +10,8 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/guest-random.h" -#include "sysemu/sysemu.h" +#include "exec/target_page.h" +#include "system/system.h" #include "cpu.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -24,9 +25,9 @@ #include "net/net.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/runstate.h" +#include "system/reset.h" #include "hw/intc/m68k_irqc.h" #include "hw/misc/virt_ctrl.h" @@ -228,7 +229,7 @@ static void virt_init(MachineState *machine) } kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, - &elf_entry, NULL, &high, NULL, 1, + &elf_entry, NULL, &high, NULL, ELFDATA2MSB, EM_68K, 0, 0); if (kernel_size < 0) { error_report("could not load kernel '%s'", kernel_filename); @@ -309,7 +310,7 @@ static void virt_init(MachineState *machine) } } -static void virt_machine_class_init(ObjectClass *oc, void *data) +static void virt_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = "QEMU M68K Virtual Machine"; @@ -338,7 +339,7 @@ type_init(virt_machine_register_types) #define DEFINE_VIRT_MACHINE_IMPL(latest, ...) \ static void MACHINE_VER_SYM(class_init, virt, __VA_ARGS__)( \ ObjectClass *oc, \ - void *data) \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ MACHINE_VER_SYM(options, virt, __VA_ARGS__)(mc); \ @@ -366,10 +367,31 @@ type_init(virt_machine_register_types) #define DEFINE_VIRT_MACHINE(major, minor) \ DEFINE_VIRT_MACHINE_IMPL(false, major, minor) +static void virt_machine_10_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(10, 1) + +static void virt_machine_10_0_options(MachineClass *mc) +{ + virt_machine_10_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len); +} +DEFINE_VIRT_MACHINE(10, 0) + +static void virt_machine_9_2_options(MachineClass *mc) +{ + virt_machine_10_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len); +} +DEFINE_VIRT_MACHINE(9, 2) + static void virt_machine_9_1_options(MachineClass *mc) { + virt_machine_9_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(9, 1) +DEFINE_VIRT_MACHINE(9, 1) static void virt_machine_9_0_options(MachineClass *mc) { diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index d648192ab9d..be609ff9d03 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -8,6 +8,7 @@ * * SPDX-License-Identifier: GPL-v2-only */ +#include #include "qemu/osdep.h" #include "qemu/units.h" @@ -17,6 +18,7 @@ #include "hw/mem/pc-dimm.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" @@ -24,11 +26,19 @@ #include "qemu/range.h" #include "qemu/rcu.h" #include "qemu/guest-random.h" -#include "sysemu/hostmem.h" -#include "sysemu/numa.h" +#include "system/hostmem.h" +#include "system/numa.h" #include "hw/cxl/cxl.h" #include "hw/pci/msix.h" +/* type3 device private */ +enum CXL_T3_MSIX_VECTOR { + CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS = 0, + CXL_T3_MSIX_EVENT_START = 2, + CXL_T3_MSIX_MBOX = CXL_T3_MSIX_EVENT_START + CXL_EVENT_TYPE_MAX, + CXL_T3_MSIX_VECTOR_NR +}; + #define DWORD_BYTE 4 #define CXL_CAPACITY_MULTIPLIER (256 * MiB) @@ -216,10 +226,16 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv) * future. */ for (i = 0; i < ct3d->dc.num_regions; i++) { + ct3d->dc.regions[i].nonvolatile = false; + ct3d->dc.regions[i].sharable = false; + ct3d->dc.regions[i].hw_managed_coherency = false; + ct3d->dc.regions[i].ic_specific_dc_management = false; + ct3d->dc.regions[i].rdonly = false; ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++, ct3d->dc.regions[i].len, - false, true, region_base); + ct3d->dc.regions[i].nonvolatile, + true, region_base); ct3d->dc.regions[i].dsmadhandle = dsmad_handle - 1; cur_ent += CT3_CDAT_NUM_ENTRIES; @@ -625,6 +641,8 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp) uint64_t region_len; uint64_t decode_len; uint64_t blk_size = 2 * MiB; + /* Only 1 block size is supported for now. */ + uint64_t supported_blk_size_bitmask = blk_size; CXLDCRegion *region; MemoryRegion *mr; uint64_t dc_size; @@ -670,9 +688,11 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp) .block_size = blk_size, /* dsmad_handle set when creating CDAT table entries */ .flags = 0, + .supported_blk_size_bitmask = supported_blk_size_bitmask, }; ct3d->dc.total_capacity += region->len; region->blk_bitmap = bitmap_new(region->len / region->block_size); + qemu_mutex_init(®ion->bitmap_lock); } QTAILQ_INIT(&ct3d->dc.extents); QTAILQ_INIT(&ct3d->dc.extents_pending); @@ -834,6 +854,19 @@ static DOEProtocol doe_cdat_prot[] = { { } }; +/* Initialize CXL device alerts with default threshold values. */ +static void init_alert_config(CXLType3Dev *ct3d) +{ + ct3d->alert_config = (CXLAlertConfig) { + .life_used_crit_alert_thresh = 75, + .life_used_warn_thresh = 40, + .over_temp_crit_alert_thresh = 35, + .under_temp_crit_alert_thresh = 10, + .over_temp_warn_thresh = 25, + .under_temp_warn_thresh = 20 + }; +} + static void ct3_realize(PCIDevice *pci_dev, Error **errp) { ERRP_GUARD(); @@ -842,7 +875,6 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) ComponentRegisters *regs = &cxl_cstate->crb; MemoryRegion *mr = ®s->component_registers; uint8_t *pci_conf = pci_dev->config; - unsigned short msix_num = 6; int i, rc; uint16_t count; @@ -883,31 +915,33 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) &ct3d->cxl_dstate.device_registers); /* MSI(-X) Initialization */ - rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + rc = msix_init_exclusive_bar(pci_dev, CXL_T3_MSIX_VECTOR_NR, 4, errp); if (rc) { - goto err_address_space_free; + goto err_free_special_ops; } - for (i = 0; i < msix_num; i++) { + for (i = 0; i < CXL_T3_MSIX_VECTOR_NR; i++) { msix_vector_use(pci_dev, i); } /* DOE Initialization */ - pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0); + pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, + CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS); cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table; cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; cxl_cstate->cdat.private = ct3d; if (!cxl_doe_cdat_init(cxl_cstate, errp)) { - goto err_free_special_ops; + goto err_msix_uninit; } + init_alert_config(ct3d); pcie_cap_deverr_init(pci_dev); /* Leave a bit of room for expansion */ - rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL); + rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, errp); if (rc) { goto err_release_cdat; } - cxl_event_init(&ct3d->cxl_dstate, 2); + cxl_event_init(&ct3d->cxl_dstate, CXL_T3_MSIX_EVENT_START); /* Set default value for patrol scrub attributes */ ct3d->patrol_scrub_attrs.scrub_cycle_cap = @@ -919,25 +953,25 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT; /* Set default value for DDR5 ECS read attributes */ + ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT; for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { - ct3d->ecs_attrs[count].ecs_log_cap = - CXL_ECS_LOG_ENTRY_TYPE_DEFAULT; - ct3d->ecs_attrs[count].ecs_cap = + ct3d->ecs_attrs.fru_attrs[count].ecs_cap = CXL_ECS_REALTIME_REPORT_CAP_DEFAULT; - ct3d->ecs_attrs[count].ecs_config = + ct3d->ecs_attrs.fru_attrs[count].ecs_config = CXL_ECS_THRESHOLD_COUNT_DEFAULT | (CXL_ECS_MODE_DEFAULT << 3); /* Reserved */ - ct3d->ecs_attrs[count].ecs_flags = 0; + ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0; } return; err_release_cdat: cxl_doe_cdat_release(cxl_cstate); +err_msix_uninit: + msix_uninit_exclusive_bar(pci_dev); err_free_special_ops: g_free(regs->special_ops); -err_address_space_free: if (ct3d->dc.host_dc) { cxl_destroy_dc_regions(ct3d); address_space_destroy(&ct3d->dc.host_dc_as); @@ -948,7 +982,6 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) if (ct3d->hostvmem) { address_space_destroy(&ct3d->hostvmem_as); } - return; } static void ct3_exit(PCIDevice *pci_dev) @@ -959,7 +992,9 @@ static void ct3_exit(PCIDevice *pci_dev) pcie_aer_exit(pci_dev); cxl_doe_cdat_release(cxl_cstate); + msix_uninit_exclusive_bar(pci_dev); g_free(regs->special_ops); + cxl_destroy_cci(&ct3d->cci); if (ct3d->dc.host_dc) { cxl_destroy_dc_regions(ct3d); address_space_destroy(&ct3d->dc.host_dc_as); @@ -986,6 +1021,7 @@ void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, return; } + QEMU_LOCK_GUARD(®ion->bitmap_lock); bitmap_set(region->blk_bitmap, (dpa - region->base) / region->block_size, len / region->block_size); } @@ -1012,6 +1048,7 @@ bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, * if bits between [dpa, dpa + len) are all 1s, meaning the DPA range is * backed with DC extents, return true; else return false. */ + QEMU_LOCK_GUARD(®ion->bitmap_lock); return find_next_zero_bit(region->blk_bitmap, nr + nbits, nr) == nr + nbits; } @@ -1033,6 +1070,7 @@ void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, nr = (dpa - region->base) / region->block_size; nbits = len / region->block_size; + QEMU_LOCK_GUARD(®ion->bitmap_lock); bitmap_clear(region->blk_bitmap, nr, nbits); } @@ -1090,10 +1128,17 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa) continue; } - *dpa = dpa_base + - ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | - ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) - >> iw)); + if (iw < 8) { + *dpa = dpa_base + + ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | + ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) + >> iw)); + } else { + *dpa = dpa_base + + ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) | + ((((MAKE_64BIT_MASK(ig + iw, 64 - ig - iw) & hpa_offset) + >> (ig + iw)) / 3) << (ig + 8))); + } return true; } @@ -1200,22 +1245,28 @@ static void ct3d_reset(DeviceState *dev) uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; + pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed); cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); - cxl_device_register_init_t3(ct3d); + cxl_device_register_init_t3(ct3d, CXL_T3_MSIX_MBOX); /* * Bring up an endpoint to target with MCTP over VDM. * This device is emulating an MLD with single LD for now. */ + if (ct3d->vdm_fm_owned_ld_mctp_cci.initialized) { + cxl_destroy_cci(&ct3d->vdm_fm_owned_ld_mctp_cci); + } cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci, DEVICE(ct3d), DEVICE(ct3d), 512); /* Max payload made up */ + if (ct3d->ld0_cci.initialized) { + cxl_destroy_cci(&ct3d->ld0_cci); + } cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d), 512); /* Max payload made up */ - } -static Property ct3_props[] = { +static const Property ct3_props[] = { DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND, HostMemoryBackend *), /* for backward compatibility */ DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem, @@ -1229,7 +1280,10 @@ static Property ct3_props[] = { DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0), DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc, TYPE_MEMORY_BACKEND, HostMemoryBackend *), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev, + speed, PCIE_LINK_SPEED_32), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev, + width, PCIE_LINK_WIDTH_16), }; static uint64_t get_lsa_size(CXLType3Dev *ct3d) @@ -1375,9 +1429,7 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, ct3d = CXL_TYPE3(obj); QLIST_FOREACH(p, &ct3d->poison_list, node) { - if (((start >= p->start) && (start < p->start + p->length)) || - ((start + length > p->start) && - (start + length <= p->start + p->length))) { + if ((start < p->start + p->length) && (start + length > p->start)) { error_setg(errp, "Overlap with existing poisoned region not supported"); return; @@ -1492,8 +1544,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path, stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err); pcie_aer_inject_error(PCI_DEVICE(obj), &err); - - return; } void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, @@ -1540,9 +1590,9 @@ void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type, pcie_aer_inject_error(PCI_DEVICE(obj), &err); } -static void cxl_assign_event_header(CXLEventRecordHdr *hdr, - const QemuUUID *uuid, uint32_t flags, - uint8_t length, uint64_t timestamp) +void cxl_assign_event_header(CXLEventRecordHdr *hdr, + const QemuUUID *uuid, uint32_t flags, + uint8_t length, uint64_t timestamp) { st24_le_p(&hdr->flags, flags); hdr->length = length; @@ -1769,7 +1819,6 @@ void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags, if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) { cxl_event_irq_assert(ct3d); } - return; } void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, @@ -1831,28 +1880,13 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log, } } -/* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */ -static const QemuUUID dynamic_capacity_uuid = { - .data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f, - 0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a), -}; - -typedef enum CXLDCEventType { - DC_EVENT_ADD_CAPACITY = 0x0, - DC_EVENT_RELEASE_CAPACITY = 0x1, - DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2, - DC_EVENT_REGION_CONFIG_UPDATED = 0x3, - DC_EVENT_ADD_CAPACITY_RSP = 0x4, - DC_EVENT_CAPACITY_RELEASED = 0x5, -} CXLDCEventType; - /* * Check whether the range [dpa, dpa + len - 1] has overlaps with extents in * the list. * Return value: return true if has overlaps; otherwise, return false */ -static bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list, - uint64_t dpa, uint64_t len) +bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list, + uint64_t dpa, uint64_t len) { CXLDCExtent *ent; Range range1, range2; @@ -1897,8 +1931,8 @@ bool cxl_extents_contains_dpa_range(CXLDCExtentList *list, return false; } -static bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list, - uint64_t dpa, uint64_t len) +bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list, + uint64_t dpa, uint64_t len) { CXLDCExtentGroup *group; @@ -1923,15 +1957,11 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path, CxlDynamicCapacityExtentList *records, Error **errp) { Object *obj; - CXLEventDynamicCapacity dCap = {}; - CXLEventRecordHdr *hdr = &dCap.hdr; CXLType3Dev *dcd; - uint8_t flags = 1 << CXL_EVENT_TYPE_INFO; uint32_t num_extents = 0; CxlDynamicCapacityExtentList *list; CXLDCExtentGroup *group = NULL; g_autofree CXLDCExtentRaw *extents = NULL; - uint8_t enc_log = CXL_EVENT_TYPE_DYNAMIC_CAP; uint64_t dpa, offset, len, block_size; g_autofree unsigned long *blk_bitmap = NULL; int i; @@ -2041,40 +2071,10 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path, } if (group) { cxl_extent_group_list_insert_tail(&dcd->dc.extents_pending, group); + dcd->dc.total_extent_count += num_extents; } - /* - * CXL r3.1 section 8.2.9.2.1.6: Dynamic Capacity Event Record - * - * All Dynamic Capacity event records shall set the Event Record Severity - * field in the Common Event Record Format to Informational Event. All - * Dynamic Capacity related events shall be logged in the Dynamic Capacity - * Event Log. - */ - cxl_assign_event_header(hdr, &dynamic_capacity_uuid, flags, sizeof(dCap), - cxl_device_get_timestamp(&dcd->cxl_dstate)); - - dCap.type = type; - /* FIXME: for now, validity flag is cleared */ - dCap.validity_flags = 0; - stw_le_p(&dCap.host_id, hid); - /* only valid for DC_REGION_CONFIG_UPDATED event */ - dCap.updated_region_id = 0; - dCap.flags = 0; - for (i = 0; i < num_extents; i++) { - memcpy(&dCap.dynamic_capacity_extent, &extents[i], - sizeof(CXLDCExtentRaw)); - - if (i < num_extents - 1) { - /* Set "More" flag */ - dCap.flags |= BIT(0); - } - - if (cxl_event_insert(&dcd->cxl_dstate, enc_log, - (CXLEventRecordRaw *)&dCap)) { - cxl_event_irq_assert(dcd); - } - } + cxl_create_dc_event_records_for_extents(dcd, type, extents, num_extents); } void qmp_cxl_add_dynamic_capacity(const char *path, uint16_t host_id, @@ -2126,7 +2126,7 @@ void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id, } } -static void ct3_class_init(ObjectClass *oc, void *data) +static void ct3_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -2144,7 +2144,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->desc = "CXL Memory Device (Type 3)"; - dc->reset = ct3d_reset; + device_class_set_legacy_reset(dc, ct3d_reset); device_class_set_props(dc, ct3_props); cvc->get_lsa_size = get_lsa_size; @@ -2159,7 +2159,7 @@ static const TypeInfo ct3d_info = { .class_size = sizeof(struct CXLType3Class), .class_init = ct3_class_init, .instance_size = sizeof(CXLType3Dev), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CXL_DEVICE }, { INTERFACE_PCIE_DEVICE }, {} diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c index a5f279adcc1..1a432e9bd22 100644 --- a/hw/mem/memory-device.c +++ b/hw/mem/memory-device.c @@ -16,8 +16,8 @@ #include "hw/boards.h" #include "qemu/range.h" #include "hw/virtio/vhost.h" -#include "sysemu/kvm.h" -#include "exec/address-spaces.h" +#include "system/kvm.h" +#include "system/address-spaces.h" #include "trace.h" static bool memory_device_is_empty(const MemoryDeviceState *md) diff --git a/hw/mem/npcm7xx_mc.c b/hw/mem/npcm7xx_mc.c index abc5af56208..07fc108e0a9 100644 --- a/hw/mem/npcm7xx_mc.c +++ b/hw/mem/npcm7xx_mc.c @@ -65,7 +65,7 @@ static void npcm7xx_mc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); } -static void npcm7xx_mc_class_init(ObjectClass *klass, void *data) +static void npcm7xx_mc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c index 1631a7d13fa..23ab143ef8e 100644 --- a/hw/mem/nvdimm.c +++ b/hw/mem/nvdimm.c @@ -30,7 +30,7 @@ #include "hw/mem/nvdimm.h" #include "hw/qdev-properties.h" #include "hw/mem/memory-device.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -246,12 +246,11 @@ static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf, memory_region_set_dirty(mr, backend_offset, size); } -static Property nvdimm_properties[] = { +static const Property nvdimm_properties[] = { DEFINE_PROP_BOOL(NVDIMM_UNARMED_PROP, NVDIMMDevice, unarmed, false), - DEFINE_PROP_END_OF_LIST(), }; -static void nvdimm_class_init(ObjectClass *oc, void *data) +static void nvdimm_class_init(ObjectClass *oc, const void *data) { PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc); MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index 27919ca45d2..f701d5b5f9b 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -28,8 +28,8 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/module.h" -#include "sysemu/hostmem.h" -#include "sysemu/numa.h" +#include "system/hostmem.h" +#include "system/numa.h" #include "trace.h" static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp); @@ -150,14 +150,13 @@ static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp) return slot; } -static Property pc_dimm_properties[] = { +static const Property pc_dimm_properties[] = { DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0), DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0), DEFINE_PROP_INT32(PC_DIMM_SLOT_PROP, PCDIMMDevice, slot, PC_DIMM_UNASSIGNED_SLOT), DEFINE_PROP_LINK(PC_DIMM_MEMDEV_PROP, PCDIMMDevice, hostmem, TYPE_MEMORY_BACKEND, HostMemoryBackend *), - DEFINE_PROP_END_OF_LIST(), }; static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name, @@ -277,7 +276,7 @@ static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md, } } -static void pc_dimm_class_init(ObjectClass *oc, void *data) +static void pc_dimm_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); @@ -302,7 +301,7 @@ static const TypeInfo pc_dimm_info = { .instance_init = pc_dimm_init, .class_init = pc_dimm_class_init, .class_size = sizeof(PCDIMMDeviceClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_MEMORY_DEVICE }, { } }, diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c index 6e8f4f84fbd..d7b00e563ae 100644 --- a/hw/mem/sparse-mem.c +++ b/hw/mem/sparse-mem.c @@ -17,7 +17,7 @@ #include "hw/sysbus.h" #include "qapi/error.h" #include "qemu/units.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "hw/mem/sparse-mem.h" #define SPARSE_MEM(obj) OBJECT_CHECK(SparseMemState, (obj), TYPE_SPARSE_MEM) @@ -82,7 +82,6 @@ static void sparse_mem_enter_reset(Object *obj, ResetType type) { SparseMemState *s = SPARSE_MEM(obj); g_hash_table_remove_all(s->mapped); - return; } static const MemoryRegionOps sparse_mem_ops = { @@ -96,14 +95,13 @@ static const MemoryRegionOps sparse_mem_ops = { }, }; -static Property sparse_mem_properties[] = { +static const Property sparse_mem_properties[] = { /* The base address of the memory */ DEFINE_PROP_UINT64("baseaddr", SparseMemState, baseaddr, 0x0), /* The length of the sparse memory region */ DEFINE_PROP_UINT64("length", SparseMemState, length, UINT64_MAX), /* Max amount of actual memory that can be used to back the sparse memory */ DEFINE_PROP_UINT64("maxsize", SparseMemState, maxsize, 10 * MiB), - DEFINE_PROP_END_OF_LIST(), }; MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length) @@ -138,7 +136,7 @@ static void sparse_mem_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->mmio); } -static void sparse_mem_class_init(ObjectClass *klass, void *data) +static void sparse_mem_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/meson.build b/hw/meson.build index 1c6308fe957..791ce21ab42 100644 --- a/hw/meson.build +++ b/hw/meson.build @@ -27,7 +27,6 @@ subdir('nvram') subdir('pci') subdir('pci-bridge') subdir('pci-host') -subdir('pcmcia') subdir('rtc') subdir('scsi') subdir('sd') @@ -36,10 +35,13 @@ subdir('smbios') subdir('ssi') subdir('timer') subdir('tpm') +subdir('uefi') subdir('ufs') subdir('usb') subdir('vfio') +subdir('vfio-user') subdir('virtio') +subdir('vmapple') subdir('watchdog') subdir('xen') subdir('xenpv') @@ -48,7 +50,6 @@ subdir('fsi') subdir('alpha') subdir('arm') subdir('avr') -subdir('cris') subdir('hppa') subdir('i386') subdir('loongarch') diff --git a/hw/microblaze/Kconfig b/hw/microblaze/Kconfig index d78ba843fac..72d8072f764 100644 --- a/hw/microblaze/Kconfig +++ b/hw/microblaze/Kconfig @@ -1,7 +1,7 @@ config PETALOGIX_S3ADSP1800 bool default y - depends on MICROBLAZE + depends on MICROBLAZE && FDT select PFLASH_CFI01 select XILINX select XILINX_AXI @@ -11,9 +11,9 @@ config PETALOGIX_S3ADSP1800 config PETALOGIX_ML605 bool default y - depends on MICROBLAZE + depends on MICROBLAZE && FDT select PFLASH_CFI01 - select SERIAL + select SERIAL_MM select SSI_M25P80 select XILINX select XILINX_AXI diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c index ed61e483ee8..4a9c9df3181 100644 --- a/hw/microblaze/boot.c +++ b/hw/microblaze/boot.c @@ -31,8 +31,8 @@ #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/guest-random.h" -#include "sysemu/device_tree.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/reset.h" #include "hw/boards.h" #include "hw/loader.h" #include "elf.h" @@ -114,8 +114,8 @@ static uint64_t translate_kernel_address(void *opaque, uint64_t addr) return addr - 0x30000000LL; } -void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, - uint32_t ramsize, +void microblaze_load_kernel(MicroBlazeCPU *cpu, bool is_little_endian, + hwaddr ddr_base, uint32_t ramsize, const char *initrd_filename, const char *dtb_filename, void (*machine_cpu_reset)(MicroBlazeCPU *)) @@ -130,7 +130,7 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, dtb_arg = current_machine->dtb; /* default to pcbios dtb as passed by machine_init */ if (!dtb_arg && dtb_filename) { - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); + filename = qemu_find_file(QEMU_FILE_TYPE_DTB, dtb_filename); } boot_info.machine_cpu_reset = machine_cpu_reset; @@ -144,13 +144,15 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, /* Boots a kernel elf binary. */ kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &entry, NULL, &high, NULL, - TARGET_BIG_ENDIAN, EM_MICROBLAZE, 0, 0); + is_little_endian ? ELFDATA2LSB : ELFDATA2MSB, + EM_MICROBLAZE, 0, 0); base32 = entry; if (base32 == 0xc0000000) { kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, NULL, &entry, NULL, NULL, NULL, - TARGET_BIG_ENDIAN, EM_MICROBLAZE, 0, 0); + is_little_endian ? ELFDATA2LSB : ELFDATA2MSB, + EM_MICROBLAZE, 0, 0); } /* Always boot into physical ram. */ boot_info.bootstrap_pc = (uint32_t)entry; diff --git a/hw/microblaze/boot.h b/hw/microblaze/boot.h index 5a8c2f79750..d179a551a69 100644 --- a/hw/microblaze/boot.h +++ b/hw/microblaze/boot.h @@ -2,8 +2,8 @@ #define MICROBLAZE_BOOT_H -void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, - uint32_t ramsize, +void microblaze_load_kernel(MicroBlazeCPU *cpu, bool is_little_endian, + hwaddr ddr_base, uint32_t ramsize, const char *initrd_filename, const char *dtb_filename, void (*machine_cpu_reset)(MicroBlazeCPU *)); diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c index 0f5fabc32e3..6e923c49cfc 100644 --- a/hw/microblaze/petalogix_ml605_mmu.c +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -32,11 +32,11 @@ #include "hw/sysbus.h" #include "net/net.h" #include "hw/block/flash.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/qdev-properties.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/ssi/ssi.h" #include "boot.h" @@ -90,7 +90,7 @@ petalogix_ml605_init(MachineState *machine) object_property_set_int(OBJECT(cpu), "use-fpu", 1, &error_abort); object_property_set_bool(OBJECT(cpu), "dcache-writeback", true, &error_abort); - object_property_set_bool(OBJECT(cpu), "endianness", true, &error_abort); + object_property_set_bool(OBJECT(cpu), "little-endian", true, &error_abort); qdev_realize(DEVICE(cpu), NULL, &error_abort); /* Attach emulated BRAM through the LMB. */ @@ -111,6 +111,7 @@ petalogix_ml605_init(MachineState *machine) dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); qdev_prop_set_uint32(dev, "kind-of-intr", 1 << TIMER_IRQ); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); @@ -126,6 +127,7 @@ petalogix_ml605_init(MachineState *machine) /* 2 timers at irq 2 @ 100 Mhz. */ dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); qdev_prop_set_uint32(dev, "one-timer-only", 0); qdev_prop_set_uint32(dev, "clock-frequency", 100 * 1000000); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -173,6 +175,7 @@ petalogix_ml605_init(MachineState *machine) SSIBus *spi; dev = qdev_new("xlnx.xps-spi"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); qdev_prop_set_uint8(dev, "num-ss-bits", NUM_SPI_FLASHES); busdev = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(busdev, &error_fatal); @@ -204,7 +207,7 @@ petalogix_ml605_init(MachineState *machine) cpu->cfg.pvr_regs[5] = 0xc56be000; cpu->cfg.pvr_regs[10] = 0x0e000000; /* virtex 6 */ - microblaze_load_kernel(cpu, MEMORY_BASEADDR, ram_size, + microblaze_load_kernel(cpu, true, MEMORY_BASEADDR, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); @@ -213,7 +216,7 @@ petalogix_ml605_init(MachineState *machine) static void petalogix_ml605_machine_init(MachineClass *mc) { - mc->desc = "PetaLogix linux refdesign for xilinx ml605 little endian"; + mc->desc = "PetaLogix linux refdesign for xilinx ml605 (little endian)"; mc->init = petalogix_ml605_init; } diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c index dad46bd7f98..e8d0ddfdf86 100644 --- a/hw/microblaze/petalogix_s3adsp1800_mmu.c +++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c @@ -30,10 +30,10 @@ #include "hw/sysbus.h" #include "net/net.h" #include "hw/block/flash.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/misc/unimp.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/char/xilinx_uartlite.h" #include "boot.h" @@ -55,9 +55,23 @@ #define ETHLITE_IRQ 1 #define UARTLITE_IRQ 3 +#define TYPE_PETALOGIX_S3ADSP1800_MACHINE \ + MACHINE_TYPE_NAME("petalogix-s3adsp1800") + +struct S3Adsp1800MachineState { + MachineState parent_class; + + EndianMode endianness; +}; + +OBJECT_DECLARE_TYPE(S3Adsp1800MachineState, MachineClass, + PETALOGIX_S3ADSP1800_MACHINE) + + static void petalogix_s3adsp1800_init(MachineState *machine) { + S3Adsp1800MachineState *psms = PETALOGIX_S3ADSP1800_MACHINE(machine); ram_addr_t ram_size = machine->ram_size; DeviceState *dev; MicroBlazeCPU *cpu; @@ -68,9 +82,12 @@ petalogix_s3adsp1800_init(MachineState *machine) MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq irq[32]; MemoryRegion *sysmem = get_system_memory(); + EndianMode endianness = psms->endianness; cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); object_property_set_str(OBJECT(cpu), "version", "7.10.d", &error_abort); + object_property_set_bool(OBJECT(cpu), "little-endian", + endianness == ENDIAN_MODE_LITTLE, &error_abort); qdev_realize(DEVICE(cpu), NULL, &error_abort); /* Attach emulated BRAM through the LMB. */ @@ -90,6 +107,7 @@ petalogix_s3adsp1800_init(MachineState *machine) 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1); dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_enum(dev, "endianness", endianness); qdev_prop_set_uint32(dev, "kind-of-intr", 1 << ETHLITE_IRQ | 1 << UARTLITE_IRQ); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -101,6 +119,7 @@ petalogix_s3adsp1800_init(MachineState *machine) } dev = qdev_new(TYPE_XILINX_UARTLITE); + qdev_prop_set_enum(dev, "endianness", endianness); qdev_prop_set_chr(dev, "chardev", serial_hd(0)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR); @@ -108,6 +127,7 @@ petalogix_s3adsp1800_init(MachineState *machine) /* 2 timers at irq 2 @ 62 Mhz. */ dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_enum(dev, "endianness", endianness); qdev_prop_set_uint32(dev, "one-timer-only", 0); qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -115,6 +135,7 @@ petalogix_s3adsp1800_init(MachineState *machine) sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); dev = qdev_new("xlnx.xps-ethernetlite"); + qdev_prop_set_enum(dev, "endianness", endianness); qemu_configure_nic_device(dev, true, NULL); qdev_prop_set_uint32(dev, "tx-ping-pong", 0); qdev_prop_set_uint32(dev, "rx-ping-pong", 0); @@ -122,19 +143,52 @@ petalogix_s3adsp1800_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); - create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000); + create_unimplemented_device("xps_gpio", GPIO_BASEADDR, 0x10000); - microblaze_load_kernel(cpu, ddr_base, ram_size, - machine->initrd_filename, + microblaze_load_kernel(cpu, endianness == ENDIAN_MODE_LITTLE, ddr_base, + ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); } -static void petalogix_s3adsp1800_machine_init(MachineClass *mc) +static int machine_get_endianness(Object *obj, Error **errp G_GNUC_UNUSED) +{ + S3Adsp1800MachineState *ms = PETALOGIX_S3ADSP1800_MACHINE(obj); + return ms->endianness; +} + +static void machine_set_endianness(Object *obj, int endianness, Error **errp) +{ + S3Adsp1800MachineState *ms = PETALOGIX_S3ADSP1800_MACHINE(obj); + ms->endianness = endianness; +} + +static void petalogix_s3adsp1800_machine_class_init(ObjectClass *oc, + const void *data) { + MachineClass *mc = MACHINE_CLASS(oc); + ObjectProperty *prop; + mc->desc = "PetaLogix linux refdesign for xilinx Spartan 3ADSP1800"; mc->init = petalogix_s3adsp1800_init; mc->is_default = true; + + prop = object_class_property_add_enum(oc, "endianness", "EndianMode", + &EndianMode_lookup, + machine_get_endianness, + machine_set_endianness); + object_property_set_default_str(prop, TARGET_BIG_ENDIAN ? "big" : "little"); + object_class_property_set_description(oc, "endianness", + "Defines whether the machine runs in big or little endian mode"); } -DEFINE_MACHINE("petalogix-s3adsp1800", petalogix_s3adsp1800_machine_init) +static const TypeInfo petalogix_s3adsp1800_machine_types[] = { + { + .name = TYPE_PETALOGIX_S3ADSP1800_MACHINE, + .parent = TYPE_MACHINE, + .class_init = petalogix_s3adsp1800_machine_class_init, + .instance_size = sizeof(S3Adsp1800MachineState), + }, +}; + +DEFINE_TYPES(petalogix_s3adsp1800_machine_types) diff --git a/hw/microblaze/xlnx-zynqmp-pmu.c b/hw/microblaze/xlnx-zynqmp-pmu.c index 1bfc9641d29..e909802bb74 100644 --- a/hw/microblaze/xlnx-zynqmp-pmu.c +++ b/hw/microblaze/xlnx-zynqmp-pmu.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "cpu.h" #include "boot.h" @@ -90,7 +90,7 @@ static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp) object_property_set_bool(OBJECT(&s->cpu), "use-pcmp-instr", true, &error_abort); object_property_set_bool(OBJECT(&s->cpu), "use-mmu", false, &error_abort); - object_property_set_bool(OBJECT(&s->cpu), "endianness", true, + object_property_set_bool(OBJECT(&s->cpu), "little-endian", true, &error_abort); object_property_set_str(OBJECT(&s->cpu), "version", "8.40.b", &error_abort); @@ -121,7 +121,7 @@ static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp) } } -static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, void *data) +static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -172,7 +172,7 @@ static void xlnx_zynqmp_pmu_init(MachineState *machine) qdev_realize(DEVICE(pmu), NULL, &error_fatal); /* Load the kernel */ - microblaze_load_kernel(&pmu->cpu, XLNX_ZYNQMP_PMU_RAM_ADDR, + microblaze_load_kernel(&pmu->cpu, true, XLNX_ZYNQMP_PMU_RAM_ADDR, machine->ram_size, machine->initrd_filename, machine->dtb, @@ -181,9 +181,8 @@ static void xlnx_zynqmp_pmu_init(MachineState *machine) static void xlnx_zynqmp_pmu_machine_init(MachineClass *mc) { - mc->desc = "Xilinx ZynqMP PMU machine"; + mc->desc = "Xilinx ZynqMP PMU machine (little endian)"; mc->init = xlnx_zynqmp_pmu_init; } DEFINE_MACHINE("xlnx-zynqmp-pmu", xlnx_zynqmp_pmu_machine_init) - diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index 692bede538e..f84fffcd323 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -10,14 +10,14 @@ config MALTA select MIPS_CPS select PIIX select PFLASH_CFI01 - select SERIAL + select SERIAL_MM select SMBUS_EEPROM config MIPSSIM bool default y depends on MIPS - select SERIAL + select SERIAL_MM select MIPSNET config JAZZ @@ -37,7 +37,7 @@ config JAZZ select FDC_SYSBUS select MC146818RTC select PCKBD - select SERIAL + select SERIAL_MM select PARALLEL select DS1225Y select JAZZ_LED @@ -65,7 +65,7 @@ config LOONGSON3V imply VIRTIO_VGA imply QXL if SPICE imply USB_OHCI_PCI - select SERIAL + select SERIAL_MM select GOLDFISH_RTC select LOONGSON_IPI select LOONGSON_LIOINTC @@ -76,7 +76,7 @@ config LOONGSON3V config MIPS_CPS bool - select MIPS_ITU + select MIPS_ITU if TCG config MIPS_BOSTON bool @@ -89,7 +89,7 @@ config MIPS_BOSTON select MIPS_CPS select PCI_EXPRESS_XILINX select AHCI_ICH9 - select SERIAL + select SERIAL_MM config FW_CFG_MIPS bool diff --git a/hw/mips/boston.c b/hw/mips/boston.c index 1b44fb354c4..149a263bd5a 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -22,7 +22,7 @@ #include "elf.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/ide/pci.h" #include "hw/ide/ahci-pci.h" #include "hw/loader.h" @@ -37,11 +37,11 @@ #include "qemu/guest-random.h" #include "qemu/log.h" #include "chardev/char.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/runstate.h" +#include "system/reset.h" #include #include "qom/object.h" @@ -220,7 +220,7 @@ static void boston_lcd_write(void *opaque, hwaddr addr, static const MemoryRegionOps boston_lcd_ops = { .read = boston_lcd_read, .write = boston_lcd_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static uint64_t boston_platreg_read(void *opaque, hwaddr addr, @@ -299,7 +299,7 @@ static void boston_platreg_write(void *opaque, hwaddr addr, static const MemoryRegionOps boston_platreg_ops = { .read = boston_platreg_read, .write = boston_platreg_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void mips_boston_instance_init(Object *obj) @@ -358,8 +358,8 @@ static void gen_firmware(void *p, hwaddr kernel_entry, hwaddr fdt_addr) kernel_entry); } -static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, - const void *match_data, hwaddr *load_addr) +static void *boston_fdt_filter(void *opaque, const void *fdt_orig, + const void *match_data, hwaddr *load_addr) { BostonState *s = BOSTON(opaque); MachineState *machine = s->mach; @@ -395,7 +395,6 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, 1, ram_high_sz); fdt = g_realloc(fdt, fdt_totalsize(fdt)); - qemu_fdt_dumpdtb(fdt, fdt_sz); s->fdt_base = *load_addr; @@ -758,7 +757,7 @@ static void boston_mach_init(MachineState *machine) s->uart = serial_mm_init(sys_mem, boston_memmap[BOSTON_UART].base, 2, get_cps_irq(&s->cps, 3), 10000000, - serial_hd(0), DEVICE_NATIVE_ENDIAN); + serial_hd(0), DEVICE_LITTLE_ENDIAN); lcd = g_new(MemoryRegion, 1); memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8); @@ -792,12 +791,12 @@ static void boston_mach_init(MachineState *machine) kernel_size = load_elf(machine->kernel_filename, NULL, cpu_mips_kseg0_to_phys, NULL, &kernel_entry, NULL, &kernel_high, - NULL, 0, EM_MIPS, 1, 0); + NULL, ELFDATA2LSB, EM_MIPS, 1, 0); if (kernel_size > 0) { int dt_size; g_autofree const void *dtb_file_data = NULL; - g_autofree const void *dtb_load_data = NULL; + void *dtb_load_data = NULL; hwaddr dtb_paddr = QEMU_ALIGN_UP(kernel_high, 64 * KiB); hwaddr dtb_vaddr = cpu_mips_phys_to_kseg0(NULL, dtb_paddr); @@ -810,6 +809,12 @@ static void boston_mach_init(MachineState *machine) dtb_load_data = boston_fdt_filter(s, dtb_file_data, NULL, &dtb_vaddr); + if (!dtb_load_data) { + /* boston_fdt_filter() already printed the error for us */ + exit(1); + } + + machine->fdt = dtb_load_data; /* Calculate real fdt size after filter */ dt_size = fdt_totalsize(dtb_load_data); @@ -818,7 +823,8 @@ static void boston_mach_init(MachineState *machine) rom_ptr(dtb_paddr, dt_size)); } else { /* Try to load file as FIT */ - fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); + fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, + &machine->fdt, s); if (fit_err) { error_report("unable to load kernel image"); exit(1); diff --git a/hw/mips/cps.c b/hw/mips/cps.c index 07b73b0a1f4..e47695e2b0a 100644 --- a/hw/mips/cps.c +++ b/hw/mips/cps.c @@ -24,8 +24,8 @@ #include "hw/mips/mips.h" #include "hw/qdev-clock.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/tcg.h" +#include "system/reset.h" qemu_irq get_cps_irq(MIPSCPSState *s, int pin_number) { @@ -59,7 +59,7 @@ static bool cpu_mips_itu_supported(CPUMIPSState *env) { bool is_mt = (env->CP0_Config5 & (1 << CP0C5_VP)) || ase_mt_available(env); - return is_mt && !kvm_enabled(); + return is_mt && tcg_enabled(); } static void mips_cps_realize(DeviceState *dev, Error **errp) @@ -77,6 +77,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) MIPSCPU *cpu = MIPS_CPU(object_new(s->cpu_type)); CPUMIPSState *env = &cpu->env; + object_property_set_bool(OBJECT(cpu), "big-endian", s->cpu_is_bigendian, + &error_abort); + /* All VPs are halted on reset. Leave powering up to CPC. */ object_property_set_bool(OBJECT(cpu), "start-powered-off", true, &error_abort); @@ -163,14 +166,14 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gcr), 0)); } -static Property mips_cps_properties[] = { +static const Property mips_cps_properties[] = { DEFINE_PROP_UINT32("num-vp", MIPSCPSState, num_vp, 1), DEFINE_PROP_UINT32("num-irq", MIPSCPSState, num_irq, 256), DEFINE_PROP_STRING("cpu-type", MIPSCPSState, cpu_type), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_BOOL("cpu-big-endian", MIPSCPSState, cpu_is_bigendian, false), }; -static void mips_cps_class_init(ObjectClass *klass, void *data) +static void mips_cps_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c index 6e4303ba473..2a8507b8b0a 100644 --- a/hw/mips/fuloong2e.c +++ b/hw/mips/fuloong2e.c @@ -36,10 +36,11 @@ #include "hw/qdev-properties.h" #include "elf.h" #include "hw/isa/vt82c686.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" +#include "system/qtest.h" +#include "system/reset.h" +#include "system/system.h" #include "qemu/error-report.h" +#include "exec/tswap.h" #define ENVP_PADDR 0x2000 #define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR) @@ -105,7 +106,7 @@ static uint64_t load_kernel(MIPSCPU *cpu) cpu_mips_kseg0_to_phys, NULL, &kernel_entry, NULL, &kernel_high, NULL, - 0, EM_MIPS, 1, 0); + ELFDATA2LSB, EM_MIPS, 1, 0); if (kernel_size < 0) { error_report("could not load kernel '%s': %s", loaderparams.kernel_filename, @@ -229,7 +230,7 @@ static void mips_fuloong2e_init(MachineState *machine) clock_set_hz(cpuclk, 533080000); /* ~533 MHz */ /* init CPUs */ - cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false); env = &cpu->env; qemu_register_reset(main_cpu_reset, cpu); @@ -333,7 +334,6 @@ static void mips_fuloong2e_machine_init(MachineClass *mc) mc->default_cpu_type = MIPS_CPU_TYPE_NAME("Loongson-2E"); mc->default_ram_size = 256 * MiB; mc->default_ram_id = "fuloong2e.ram"; - mc->minimum_page_bits = 14; machine_add_audiodev_property(mc); } diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index 1bc17e69d3a..7fb0b97a388 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -28,11 +28,11 @@ #include "hw/mips/mips.h" #include "hw/intc/i8259.h" #include "hw/dma/i8257.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/char/parallel.h" #include "hw/isa/isa.h" #include "hw/block/fdc.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "net/net.h" #include "hw/scsi/esp.h" @@ -44,13 +44,13 @@ #include "hw/audio/pcspk.h" #include "hw/input/i8042.h" #include "hw/sysbus.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/reset.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/help_option.h" #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #endif /* CONFIG_TCG */ #include "cpu.h" @@ -59,12 +59,6 @@ enum jazz_model_e { JAZZ_PICA61, }; -#if TARGET_BIG_ENDIAN -#define BIOS_FILENAME "mips_bios.bin" -#else -#define BIOS_FILENAME "mipsel_bios.bin" -#endif - static void main_cpu_reset(void *opaque) { MIPSCPU *cpu = opaque; @@ -128,7 +122,7 @@ static void mips_jazz_init_net(IOMMUMemoryRegion *rc4030_dma_mr, uint8_t *prom; NICInfo *nd; - nd = qemu_find_nic_info("dp8393x", true, "dp82932"); + nd = qemu_find_nic_info("dp8393x", true, "dp83932"); if (!nd) { return; } @@ -168,6 +162,8 @@ static void mips_jazz_init_net(IOMMUMemoryRegion *rc4030_dma_mr, static void mips_jazz_init(MachineState *machine, enum jazz_model_e jazz_model) { + const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin" + : "mipsel_bios.bin"; MemoryRegion *address_space = get_system_memory(); char *filename; int bios_size, n; @@ -212,7 +208,8 @@ static void mips_jazz_init(MachineState *machine, * ext_clk[jazz_model].pll_mult); /* init CPUs */ - cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, + TARGET_BIG_ENDIAN); env = &cpu->env; qemu_register_reset(main_cpu_reset, cpu); @@ -244,7 +241,8 @@ static void mips_jazz_init(MachineState *machine, memory_region_add_subregion(address_space, 0xfff00000LL, bios2); /* load the BIOS image. */ - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME); + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: bios_name); if (filename) { bios_size = load_image_targphys(filename, 0xfff00000LL, MAGNUM_BIOS_SIZE); @@ -414,7 +412,7 @@ void mips_pica61_init(MachineState *machine) mips_jazz_init(machine, JAZZ_PICA61); } -static void mips_magnum_class_init(ObjectClass *oc, void *data) +static void mips_magnum_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -431,7 +429,7 @@ static const TypeInfo mips_magnum_type = { .class_init = mips_magnum_class_init, }; -static void mips_pica61_class_init(ObjectClass *oc, void *data) +static void mips_pica61_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/mips/loongson3_bootp.c b/hw/mips/loongson3_bootp.c index b97b81903b7..67812666c5b 100644 --- a/hw/mips/loongson3_bootp.c +++ b/hw/mips/loongson3_bootp.c @@ -21,16 +21,17 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qemu/cutils.h" -#include "cpu.h" -#include "hw/boards.h" +#include "qemu/bswap.h" +#include "exec/hwaddr.h" #include "hw/mips/loongson3_bootp.h" -static void init_cpu_info(void *g_cpuinfo, uint64_t cpu_freq) +static void init_cpu_info(void *g_cpuinfo, uint32_t cpu_count, + uint32_t processor_id, uint64_t cpu_freq) { struct efi_cpuinfo_loongson *c = g_cpuinfo; c->cputype = cpu_to_le32(Loongson_3A); - c->processor_id = cpu_to_le32(MIPS_CPU(first_cpu)->env.CP0_PRid); + c->processor_id = cpu_to_le32(processor_id); if (cpu_freq > UINT_MAX) { c->cpu_clock_freq = cpu_to_le32(UINT_MAX); } else { @@ -38,8 +39,8 @@ static void init_cpu_info(void *g_cpuinfo, uint64_t cpu_freq) } c->cpu_startup_core_id = cpu_to_le16(0); - c->nr_cpus = cpu_to_le32(current_machine->smp.cpus); - c->total_node = cpu_to_le32(DIV_ROUND_UP(current_machine->smp.cpus, + c->nr_cpus = cpu_to_le32(cpu_count); + c->total_node = cpu_to_le32(DIV_ROUND_UP(cpu_count, LOONGSON3_CORE_PER_NODE)); } @@ -110,9 +111,10 @@ static void init_special_info(void *g_special) } void init_loongson_params(struct loongson_params *lp, void *p, + uint32_t cpu_count, uint32_t processor_id, uint64_t cpu_freq, uint64_t ram_size) { - init_cpu_info(p, cpu_freq); + init_cpu_info(p, cpu_count, processor_id, cpu_freq); lp->cpu_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); p += ROUND_UP(sizeof(struct efi_cpuinfo_loongson), 64); diff --git a/hw/mips/loongson3_bootp.h b/hw/mips/loongson3_bootp.h index 9091265df7f..9dc325a8557 100644 --- a/hw/mips/loongson3_bootp.h +++ b/hw/mips/loongson3_bootp.h @@ -233,6 +233,7 @@ enum { extern const MemMapEntry virt_memmap[]; void init_loongson_params(struct loongson_params *lp, void *p, + uint32_t cpu_count, uint32_t processor_id, uint64_t cpu_freq, uint64_t ram_size); void init_reset_system(struct efi_reset_system_t *reset); diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index 408e3d7054c..de6fbcc0cb4 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -29,7 +29,7 @@ #include "qemu/datadir.h" #include "qapi/error.h" #include "elf.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/intc/loongson_liointc.h" #include "hw/mips/mips.h" #include "hw/mips/fw_cfg.h" @@ -45,10 +45,10 @@ #include "hw/pci-host/gpex.h" #include "hw/usb.h" #include "net/net.h" -#include "sysemu/kvm.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/kvm.h" +#include "system/qtest.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qemu/error-report.h" #define PM_CNTL_MODE 0x10 @@ -97,6 +97,7 @@ struct LoongsonMachineState { MemoryRegion *pio_alias; MemoryRegion *mmio_alias; MemoryRegion *ecam_alias; + MemoryRegion *core_iocsr[LOONGSON_MAX_VCPUS]; }; typedef struct LoongsonMachineState LoongsonMachineState; @@ -143,7 +144,7 @@ static void loongson3_pm_write(void *opaque, hwaddr addr, static const MemoryRegionOps loongson3_pm_ops = { .read = loongson3_pm_read, .write = loongson3_pm_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, .max_access_size = 1 @@ -152,7 +153,7 @@ static const MemoryRegionOps loongson3_pm_ops = { #define DEF_LOONGSON3_FREQ (800 * 1000 * 1000) -static uint64_t get_cpu_freq_hz(void) +static uint64_t get_cpu_freq_hz(const MIPSCPU *cpu) { #ifdef CONFIG_KVM int ret; @@ -163,7 +164,7 @@ static uint64_t get_cpu_freq_hz(void) }; if (kvm_enabled()) { - ret = kvm_vcpu_ioctl(first_cpu, KVM_GET_ONE_REG, &freq_reg); + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_ONE_REG, &freq_reg); if (ret >= 0) { return freq * 2; } @@ -172,7 +173,7 @@ static uint64_t get_cpu_freq_hz(void) return DEF_LOONGSON3_FREQ; } -static void init_boot_param(void) +static void init_boot_param(unsigned cpu_count, uint32_t processor_id) { static void *p; struct boot_params *bp; @@ -183,7 +184,7 @@ static void init_boot_param(void) bp->efi.smbios.vers = cpu_to_le16(1); init_reset_system(&(bp->reset_system)); p += ROUND_UP(sizeof(struct boot_params), 64); - init_loongson_params(&(bp->efi.smbios.lp), p, + init_loongson_params(&(bp->efi.smbios.lp), p, cpu_count, processor_id, loaderparams.cpu_freq, loaderparams.ram_size); rom_add_blob_fixed("params_rom", bp, @@ -279,7 +280,7 @@ static void fw_cfg_boot_set(void *opaque, const char *boot_device, fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } -static void fw_conf_init(unsigned long ram_size) +static void fw_conf_init(void) { static const uint8_t suspend[6] = {128, 0, 0, 129, 128, 128}; FWCfgState *fw_cfg; @@ -288,9 +289,9 @@ static void fw_conf_init(unsigned long ram_size) fw_cfg = fw_cfg_init_mem_wide(cfg_addr, cfg_addr + 8, 8, 0, NULL); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)current_machine->smp.cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)current_machine->smp.max_cpus); - fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, loaderparams.ram_size); fw_cfg_add_i32(fw_cfg, FW_CFG_MACHINE_VERSION, 1); - fw_cfg_add_i64(fw_cfg, FW_CFG_CPU_FREQ, get_cpu_freq_hz()); + fw_cfg_add_i64(fw_cfg, FW_CFG_CPU_FREQ, loaderparams.cpu_freq); fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup2(suspend, sizeof(suspend)), sizeof(suspend)); @@ -357,7 +358,7 @@ static uint64_t load_kernel(CPUMIPSState *env) cpu_mips_kseg0_to_phys, NULL, &kernel_entry, &kernel_low, &kernel_high, - NULL, 0, EM_MIPS, 1, 0); + NULL, ELFDATA2LSB, EM_MIPS, 1, 0); if (kernel_size < 0) { error_report("could not load kernel '%s': %s", loaderparams.kernel_filename, @@ -398,25 +399,33 @@ static uint64_t load_kernel(CPUMIPSState *env) return kernel_entry; } -static void main_cpu_reset(void *opaque) +static void generic_cpu_reset(void *opaque) { MIPSCPU *cpu = opaque; CPUMIPSState *env = &cpu->env; cpu_reset(CPU(cpu)); - /* Loongson-3 reset stuff */ if (loaderparams.kernel_filename) { - if (cpu == MIPS_CPU(first_cpu)) { - env->active_tc.gpr[4] = loaderparams.a0; - env->active_tc.gpr[5] = loaderparams.a1; - env->active_tc.gpr[6] = loaderparams.a2; - env->active_tc.PC = loaderparams.kernel_entry; - } env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL)); } } +static void main_cpu_reset(void *opaque) +{ + generic_cpu_reset(opaque); + + if (loaderparams.kernel_filename) { + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + + env->active_tc.gpr[4] = loaderparams.a0; + env->active_tc.gpr[5] = loaderparams.a1; + env->active_tc.gpr[6] = loaderparams.a2; + env->active_tc.PC = loaderparams.kernel_entry; + } +} + static inline void loongson3_virt_devices_init(MachineState *machine, DeviceState *pic) { @@ -457,7 +466,7 @@ static inline void loongson3_virt_devices_init(MachineState *machine, virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i); sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i); @@ -483,9 +492,8 @@ static void mips_loongson3_virt_init(MachineState *machine) { int i; long bios_size; - MIPSCPU *cpu; + MIPSCPU *cpu = NULL; Clock *cpuclk; - CPUMIPSState *env; DeviceState *liointc; DeviceState *ipi = NULL; char *filename; @@ -493,6 +501,7 @@ static void mips_loongson3_virt_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; ram_addr_t ram_size = machine->ram_size; + LoongsonMachineState *s = LOONGSON_MACHINE(machine); MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *bios = g_new(MemoryRegion, 1); @@ -551,7 +560,7 @@ static void mips_loongson3_virt_init(MachineState *machine) serial_mm_init(address_space_mem, virt_memmap[VIRT_UART].base, 0, qdev_get_gpio_in(liointc, UART_IRQ), 115200, serial_hd(0), - DEVICE_NATIVE_ENDIAN); + DEVICE_LITTLE_ENDIAN); sysbus_create_simple("goldfish_rtc", virt_memmap[VIRT_RTC].base, qdev_get_gpio_in(liointc, RTC_IRQ)); @@ -559,20 +568,20 @@ static void mips_loongson3_virt_init(MachineState *machine) cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); clock_set_hz(cpuclk, DEF_LOONGSON3_FREQ); - for (i = 0; i < machine->smp.cpus; i++) { + for (i = machine->smp.cpus - 1; i >= 0; --i) { int node = i / LOONGSON3_CORE_PER_NODE; int core = i % LOONGSON3_CORE_PER_NODE; int ip; /* init CPUs */ - cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, false); /* Init internal devices */ cpu_mips_irq_init_cpu(cpu); cpu_mips_clock_init(cpu); - qemu_register_reset(main_cpu_reset, cpu); + qemu_register_reset(i ? generic_cpu_reset : main_cpu_reset, cpu); - if (ipi) { + if (!kvm_enabled()) { hwaddr base = ((hwaddr)node << 44) + virt_memmap[VIRT_IPI].base; base += core * 0x100; qdev_connect_gpio_out(ipi, i, cpu->env.irq[6]); @@ -586,6 +595,7 @@ static void mips_loongson3_virt_init(MachineState *machine) iocsr, 0, UINT32_MAX); memory_region_add_subregion(&MIPS_CPU(cpu)->env.iocsr.mr, 0, core_iocsr); + s->core_iocsr[i] = core_iocsr; } if (node > 0) { @@ -598,7 +608,7 @@ static void mips_loongson3_virt_init(MachineState *machine) pin, cpu->env.irq[ip + 2]); } } - env = &MIPS_CPU(first_cpu)->env; + assert(cpu); /* This variable points to the first created cpu. */ /* Allocate RAM/BIOS, 0x00000000~0x10000000 is alias of 0x80000000~0x90000000 */ memory_region_init_rom(bios, NULL, "loongson3.bios", @@ -623,16 +633,16 @@ static void mips_loongson3_virt_init(MachineState *machine) * Please use -L to set the BIOS path and -bios to set bios name. */ + loaderparams.cpu_freq = get_cpu_freq_hz(cpu); + loaderparams.ram_size = ram_size; if (kernel_filename) { - loaderparams.cpu_freq = get_cpu_freq_hz(); - loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; - loaderparams.kernel_entry = load_kernel(env); + loaderparams.kernel_entry = load_kernel(&cpu->env); init_boot_rom(); - init_boot_param(); + init_boot_param(machine->smp.cpus, cpu->env.CP0_PRid); } else { filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: LOONGSON3_BIOSNAME); @@ -651,13 +661,13 @@ static void mips_loongson3_virt_init(MachineState *machine) exit(1); } - fw_conf_init(ram_size); + fw_conf_init(); } loongson3_virt_devices_init(machine, liointc); } -static void loongson3v_machine_class_init(ObjectClass *oc, void *data) +static void loongson3v_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -667,7 +677,6 @@ static void loongson3v_machine_class_init(ObjectClass *oc, void *data) mc->max_cpus = LOONGSON_MAX_VCPUS; mc->default_ram_id = "loongson3.highram"; mc->default_ram_size = 1600 * MiB; - mc->minimum_page_bits = 14; mc->default_nic = "virtio-net-pci"; } diff --git a/hw/mips/malta.c b/hw/mips/malta.c index 664a2ae0a9e..cbdbb210568 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -28,10 +28,11 @@ #include "qemu/datadir.h" #include "qemu/cutils.h" #include "qemu/guest-random.h" +#include "exec/tswap.h" #include "hw/clock.h" #include "hw/southbridge/piix.h" #include "hw/isa/superio.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "net/net.h" #include "hw/boards.h" #include "hw/i2c/smbus_eeprom.h" @@ -48,12 +49,12 @@ #include "qom/object.h" #include "hw/sysbus.h" /* SysBusDevice */ #include "qemu/host-utils.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/qtest.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "semihosting/semihost.h" #include "hw/mips/cps.h" #include "hw/qdev-clock.h" @@ -93,12 +94,6 @@ typedef struct { bool display_inited; } MaltaFPGAState; -#if TARGET_BIG_ENDIAN -#define BIOS_FILENAME "mips_bios.bin" -#else -#define BIOS_FILENAME "mipsel_bios.bin" -#endif - #define TYPE_MIPS_MALTA "mips-malta" OBJECT_DECLARE_SIMPLE_TYPE(MaltaState, MIPS_MALTA) @@ -382,11 +377,7 @@ static uint64_t malta_fpga_read(void *opaque, hwaddr addr, /* STATUS Register */ case 0x00208: -#if TARGET_BIG_ENDIAN - val = 0x00000012; -#else - val = 0x00000010; -#endif + val = TARGET_BIG_ENDIAN ? 0x00000012 : 0x00000010; break; /* JMPRS Register */ @@ -879,8 +870,9 @@ static uint64_t load_kernel(void) kernel_size = load_elf(loaderparams.kernel_filename, NULL, cpu_mips_kseg0_to_phys, NULL, &kernel_entry, NULL, - &kernel_high, NULL, TARGET_BIG_ENDIAN, EM_MIPS, - 1, 0); + &kernel_high, NULL, + TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB, + EM_MIPS, 1, 0); if (kernel_size < 0) { error_report("could not load kernel '%s': %s", loaderparams.kernel_filename, @@ -1034,7 +1026,8 @@ static void create_cpu_without_cps(MachineState *ms, MaltaState *s, int i; for (i = 0; i < ms->smp.cpus; i++) { - cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk); + cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk, + TARGET_BIG_ENDIAN); /* Init internal devices */ cpu_mips_irq_init_cpu(cpu); @@ -1054,6 +1047,8 @@ static void create_cps(MachineState *ms, MaltaState *s, object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS); object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type, &error_fatal); + object_property_set_bool(OBJECT(&s->cps), "cpu-big-endian", + TARGET_BIG_ENDIAN, &error_abort); object_property_set_uint(OBJECT(&s->cps), "num-vp", ms->smp.cpus, &error_fatal); qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk); @@ -1172,9 +1167,12 @@ void mips_malta_init(MachineState *machine) target_long bios_size = FLASH_SIZE; /* Load firmware from flash. */ if (!dinfo) { + const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin" + : "mipsel_bios.bin"; + /* Load a BIOS image. */ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, - machine->firmware ?: BIOS_FILENAME); + machine->firmware ?: bios_name); if (filename) { bios_size = load_image_targphys(filename, FLASH_ADDRESS, BIOS_SIZE); @@ -1192,8 +1190,7 @@ void mips_malta_init(MachineState *machine) * In little endian mode the 32bit words in the bios are swapped, * a neat trick which allows bi-endian firmware. */ -#if !TARGET_BIG_ENDIAN - { + if (!TARGET_BIG_ENDIAN) { uint32_t *end, *addr; const size_t swapsize = MIN(bios_size, 0x3e0000); addr = rom_ptr(FLASH_ADDRESS, swapsize); @@ -1206,7 +1203,6 @@ void mips_malta_init(MachineState *machine) addr++; } } -#endif } /* diff --git a/hw/mips/meson.build b/hw/mips/meson.build index ca37c42d900..31dbd2bf4d9 100644 --- a/hw/mips/meson.build +++ b/hw/mips/meson.build @@ -1,7 +1,8 @@ mips_ss = ss.source_set() mips_ss.add(files('bootloader.c', 'mips_int.c')) -mips_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c')) -mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loongson3_virt.c')) +common_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c')) +common_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c')) +mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_virt.c')) mips_ss.add(when: 'CONFIG_MALTA', if_true: files('malta.c')) mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c index eef2fd2cd11..26fdb934f50 100644 --- a/hw/mips/mips_int.c +++ b/hw/mips/mips_int.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "hw/irq.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_mips.h" static void cpu_mips_irq_request(void *opaque, int irq, int level) diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c index 9170d6c474b..e843307b9b6 100644 --- a/hw/mips/mipssim.c +++ b/hw/mips/mipssim.c @@ -28,30 +28,24 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/datadir.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/clock.h" #include "hw/mips/mips.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/loader.h" #include "elf.h" #include "hw/sysbus.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/reset.h" #include "cpu.h" #define BIOS_SIZE (4 * MiB) -#if TARGET_BIG_ENDIAN -#define BIOS_FILENAME "mips_bios.bin" -#else -#define BIOS_FILENAME "mipsel_bios.bin" -#endif - static struct _loaderparams { int ram_size; const char *kernel_filename; @@ -73,7 +67,8 @@ static uint64_t load_kernel(void) kernel_size = load_elf(loaderparams.kernel_filename, NULL, cpu_mips_kseg0_to_phys, NULL, &entry, NULL, - &kernel_high, NULL, TARGET_BIG_ENDIAN, + &kernel_high, NULL, + TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB, EM_MIPS, 1, 0); if (kernel_size < 0) { error_report("could not load kernel '%s': %s", @@ -142,6 +137,8 @@ mips_mipssim_init(MachineState *machine) const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; + const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin" + : "mipsel_bios.bin"; char *filename; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); @@ -160,7 +157,8 @@ mips_mipssim_init(MachineState *machine) #endif /* Init CPUs. */ - cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, + TARGET_BIG_ENDIAN); env = &cpu->env; reset_info = g_new0(ResetData, 1); @@ -177,7 +175,8 @@ mips_mipssim_init(MachineState *machine) /* Map the BIOS / boot exception handler. */ memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); /* Load a BIOS / boot exception handler image. */ - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME); + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: bios_name); if (filename) { bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); g_free(filename); diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig index 1e08785b832..4e35657468b 100644 --- a/hw/misc/Kconfig +++ b/hw/misc/Kconfig @@ -30,14 +30,6 @@ config EDU default y if TEST_DEVICES depends on PCI && MSI_NONBROKEN -config PCA9552 - bool - depends on I2C - -config PCA9554 - bool - depends on I2C - config I2C_ECHO bool default y if TEST_DEVICES @@ -55,6 +47,18 @@ config A9SCU config ARM11SCU bool +config MAX78000_AES + bool + +config MAX78000_GCR + bool + +config MAX78000_ICC + bool + +config MAX78000_TRNG + bool + config MOS6522 bool @@ -72,9 +76,13 @@ config IVSHMEM_DEVICE default y if PCI_DEVICES depends on PCI && LINUX && IVSHMEM && MSI_NONBROKEN +config IVSHMEM_FLAT_DEVICE + bool + default y + depends on LINUX && IVSHMEM + config ECCMEMCTL bool - select ECC config IMX bool @@ -82,6 +90,15 @@ config IMX select SSI select USB_EHCI_SYSBUS +config FSL_IMX8MP_ANALOG + bool + +config FSL_IMX8MP_CCM + bool + +config STM32_RCC + bool + config STM32F2XX_SYSCFG bool @@ -102,6 +119,7 @@ config STM32L4X5_RCC config MIPS_ITU bool + depends on TCG config MPS2_FPGAIO bool @@ -143,6 +161,10 @@ config PVPANIC_ISA depends on ISA_BUS select PVPANIC_COMMON +config PVPANIC_MMIO + bool + select PVPANIC_COMMON + config AUX bool select I2C diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c index 04225dfb78d..bb00ae29692 100644 --- a/hw/misc/a9scu.c +++ b/hw/misc/a9scu.c @@ -123,18 +123,17 @@ static const VMStateDescription vmstate_a9_scu = { } }; -static Property a9_scu_properties[] = { +static const Property a9_scu_properties[] = { DEFINE_PROP_UINT32("num-cpu", A9SCUState, num_cpu, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void a9_scu_class_init(ObjectClass *klass, void *data) +static void a9_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, a9_scu_properties); dc->vmsd = &vmstate_a9_scu; - dc->reset = a9_scu_reset; + device_class_set_legacy_reset(dc, a9_scu_reset); dc->realize = a9_scu_realize; } diff --git a/hw/misc/allwinner-a10-ccm.c b/hw/misc/allwinner-a10-ccm.c index 575b0189524..6b188c25a5d 100644 --- a/hw/misc/allwinner-a10-ccm.c +++ b/hw/misc/allwinner-a10-ccm.c @@ -147,7 +147,7 @@ static void allwinner_a10_ccm_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_a10_ccm_ops = { .read = allwinner_a10_ccm_read, .write = allwinner_a10_ccm_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -199,7 +199,7 @@ static const VMStateDescription allwinner_a10_ccm_vmstate = { } }; -static void allwinner_a10_ccm_class_init(ObjectClass *klass, void *data) +static void allwinner_a10_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/misc/allwinner-a10-dramc.c b/hw/misc/allwinner-a10-dramc.c index a7c58fa6d06..c16814cc5b8 100644 --- a/hw/misc/allwinner-a10-dramc.c +++ b/hw/misc/allwinner-a10-dramc.c @@ -114,7 +114,7 @@ static void allwinner_a10_dramc_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_a10_dramc_ops = { .read = allwinner_a10_dramc_read, .write = allwinner_a10_dramc_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -154,7 +154,7 @@ static const VMStateDescription allwinner_a10_dramc_vmstate = { } }; -static void allwinner_a10_dramc_class_init(ObjectClass *klass, void *data) +static void allwinner_a10_dramc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/misc/allwinner-cpucfg.c b/hw/misc/allwinner-cpucfg.c index 31b97809695..90dd872abf0 100644 --- a/hw/misc/allwinner-cpucfg.c +++ b/hw/misc/allwinner-cpucfg.c @@ -217,7 +217,7 @@ static void allwinner_cpucfg_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_cpucfg_ops = { .read = allwinner_cpucfg_read, .write = allwinner_cpucfg_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -258,11 +258,11 @@ static const VMStateDescription allwinner_cpucfg_vmstate = { } }; -static void allwinner_cpucfg_class_init(ObjectClass *klass, void *data) +static void allwinner_cpucfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_cpucfg_reset; + device_class_set_legacy_reset(dc, allwinner_cpucfg_reset); dc->vmsd = &allwinner_cpucfg_vmstate; } diff --git a/hw/misc/allwinner-h3-ccu.c b/hw/misc/allwinner-h3-ccu.c index cfc68522d33..be91c0c1cae 100644 --- a/hw/misc/allwinner-h3-ccu.c +++ b/hw/misc/allwinner-h3-ccu.c @@ -155,7 +155,7 @@ static void allwinner_h3_ccu_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_h3_ccu_ops = { .read = allwinner_h3_ccu_read, .write = allwinner_h3_ccu_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -218,11 +218,11 @@ static const VMStateDescription allwinner_h3_ccu_vmstate = { } }; -static void allwinner_h3_ccu_class_init(ObjectClass *klass, void *data) +static void allwinner_h3_ccu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_h3_ccu_reset; + device_class_set_legacy_reset(dc, allwinner_h3_ccu_reset); dc->vmsd = &allwinner_h3_ccu_vmstate; } diff --git a/hw/misc/allwinner-h3-dramc.c b/hw/misc/allwinner-h3-dramc.c index e168ffe6233..8834524c30c 100644 --- a/hw/misc/allwinner-h3-dramc.c +++ b/hw/misc/allwinner-h3-dramc.c @@ -24,7 +24,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "hw/misc/allwinner-h3-dramc.h" @@ -219,7 +219,7 @@ static void allwinner_h3_dramphy_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_h3_dramcom_ops = { .read = allwinner_h3_dramcom_read, .write = allwinner_h3_dramcom_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -230,7 +230,7 @@ static const MemoryRegionOps allwinner_h3_dramcom_ops = { static const MemoryRegionOps allwinner_h3_dramctl_ops = { .read = allwinner_h3_dramctl_read, .write = allwinner_h3_dramctl_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -241,7 +241,7 @@ static const MemoryRegionOps allwinner_h3_dramctl_ops = { static const MemoryRegionOps allwinner_h3_dramphy_ops = { .read = allwinner_h3_dramphy_read, .write = allwinner_h3_dramphy_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -314,10 +314,9 @@ static void allwinner_h3_dramc_init(Object *obj) sysbus_init_mmio(sbd, &s->dramphy_iomem); } -static Property allwinner_h3_dramc_properties[] = { +static const Property allwinner_h3_dramc_properties[] = { DEFINE_PROP_UINT64("ram-addr", AwH3DramCtlState, ram_addr, 0x0), DEFINE_PROP_UINT32("ram-size", AwH3DramCtlState, ram_size, 256 * MiB), - DEFINE_PROP_END_OF_LIST() }; static const VMStateDescription allwinner_h3_dramc_vmstate = { @@ -332,11 +331,11 @@ static const VMStateDescription allwinner_h3_dramc_vmstate = { } }; -static void allwinner_h3_dramc_class_init(ObjectClass *klass, void *data) +static void allwinner_h3_dramc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_h3_dramc_reset; + device_class_set_legacy_reset(dc, allwinner_h3_dramc_reset); dc->vmsd = &allwinner_h3_dramc_vmstate; dc->realize = allwinner_h3_dramc_realize; device_class_set_props(dc, allwinner_h3_dramc_properties); diff --git a/hw/misc/allwinner-h3-sysctrl.c b/hw/misc/allwinner-h3-sysctrl.c index 2d29be83e3a..6b86524606a 100644 --- a/hw/misc/allwinner-h3-sysctrl.c +++ b/hw/misc/allwinner-h3-sysctrl.c @@ -78,7 +78,7 @@ static void allwinner_h3_sysctrl_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_h3_sysctrl_ops = { .read = allwinner_h3_sysctrl_read, .write = allwinner_h3_sysctrl_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -116,11 +116,12 @@ static const VMStateDescription allwinner_h3_sysctrl_vmstate = { } }; -static void allwinner_h3_sysctrl_class_init(ObjectClass *klass, void *data) +static void allwinner_h3_sysctrl_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_h3_sysctrl_reset; + device_class_set_legacy_reset(dc, allwinner_h3_sysctrl_reset); dc->vmsd = &allwinner_h3_sysctrl_vmstate; } diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c index 33baf4429dd..4e21eeafdd0 100644 --- a/hw/misc/allwinner-r40-ccu.c +++ b/hw/misc/allwinner-r40-ccu.c @@ -129,7 +129,7 @@ static void allwinner_r40_ccu_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_r40_ccu_ops = { .read = allwinner_r40_ccu_read, .write = allwinner_r40_ccu_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -185,11 +185,11 @@ static const VMStateDescription allwinner_r40_ccu_vmstate = { } }; -static void allwinner_r40_ccu_class_init(ObjectClass *klass, void *data) +static void allwinner_r40_ccu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_r40_ccu_reset; + device_class_set_legacy_reset(dc, allwinner_r40_ccu_reset); dc->vmsd = &allwinner_r40_ccu_vmstate; } diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c index 75b0bef4fd9..1c8e17e3c0b 100644 --- a/hw/misc/allwinner-r40-dramc.c +++ b/hw/misc/allwinner-r40-dramc.c @@ -24,7 +24,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/bitops.h" @@ -297,7 +297,7 @@ static void allwinner_r40_dramphy_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_r40_dramcom_ops = { .read = allwinner_r40_dramcom_read, .write = allwinner_r40_dramcom_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -308,7 +308,7 @@ static const MemoryRegionOps allwinner_r40_dramcom_ops = { static const MemoryRegionOps allwinner_r40_dramctl_ops = { .read = allwinner_r40_dramctl_read, .write = allwinner_r40_dramctl_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -319,7 +319,7 @@ static const MemoryRegionOps allwinner_r40_dramctl_ops = { static const MemoryRegionOps allwinner_r40_dramphy_ops = { .read = allwinner_r40_dramphy_read, .write = allwinner_r40_dramphy_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -358,7 +358,7 @@ static void allwinner_r40_detect_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_r40_detect_ops = { .read = allwinner_r40_detect_read, .write = allwinner_r40_detect_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -393,7 +393,7 @@ static uint64_t allwinner_r40_dualrank_detect_read(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_r40_dualrank_detect_ops = { .read = allwinner_r40_dualrank_detect_read, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -464,10 +464,9 @@ static void allwinner_r40_dramc_init(Object *obj) sysbus_init_mmio(sbd, &s->dramphy_iomem); } -static Property allwinner_r40_dramc_properties[] = { +static const Property allwinner_r40_dramc_properties[] = { DEFINE_PROP_UINT64("ram-addr", AwR40DramCtlState, ram_addr, 0x0), DEFINE_PROP_UINT32("ram-size", AwR40DramCtlState, ram_size, 256), /* MiB */ - DEFINE_PROP_END_OF_LIST() }; static const VMStateDescription allwinner_r40_dramc_vmstate = { @@ -485,11 +484,11 @@ static const VMStateDescription allwinner_r40_dramc_vmstate = { } }; -static void allwinner_r40_dramc_class_init(ObjectClass *klass, void *data) +static void allwinner_r40_dramc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_r40_dramc_reset; + device_class_set_legacy_reset(dc, allwinner_r40_dramc_reset); dc->vmsd = &allwinner_r40_dramc_vmstate; dc->realize = allwinner_r40_dramc_realize; device_class_set_props(dc, allwinner_r40_dramc_properties); diff --git a/hw/misc/allwinner-sid.c b/hw/misc/allwinner-sid.c index e5cd431743b..1e66c14567b 100644 --- a/hw/misc/allwinner-sid.c +++ b/hw/misc/allwinner-sid.c @@ -99,7 +99,7 @@ static void allwinner_sid_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_sid_ops = { .read = allwinner_sid_read, .write = allwinner_sid_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -127,9 +127,8 @@ static void allwinner_sid_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property allwinner_sid_properties[] = { +static const Property allwinner_sid_properties[] = { DEFINE_PROP_UUID_NODEFAULT("identifier", AwSidState, identifier), - DEFINE_PROP_END_OF_LIST() }; static const VMStateDescription allwinner_sid_vmstate = { @@ -144,11 +143,11 @@ static const VMStateDescription allwinner_sid_vmstate = { } }; -static void allwinner_sid_class_init(ObjectClass *klass, void *data) +static void allwinner_sid_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_sid_reset; + device_class_set_legacy_reset(dc, allwinner_sid_reset); dc->vmsd = &allwinner_sid_vmstate; device_class_set_props(dc, allwinner_sid_properties); } diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c index cf10ca8ffe8..ed299ecaae7 100644 --- a/hw/misc/allwinner-sramc.c +++ b/hw/misc/allwinner-sramc.c @@ -104,7 +104,7 @@ static void allwinner_sramc_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_sramc_ops = { .read = allwinner_sramc_read, .write = allwinner_sramc_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -135,11 +135,11 @@ static void allwinner_sramc_reset(DeviceState *dev) } } -static void allwinner_sramc_class_init(ObjectClass *klass, void *data) +static void allwinner_sramc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_sramc_reset; + device_class_set_legacy_reset(dc, allwinner_sramc_reset); dc->vmsd = &allwinner_sramc_vmstate; } @@ -163,7 +163,7 @@ static const TypeInfo allwinner_sramc_info = { .class_init = allwinner_sramc_class_init, }; -static void allwinner_r40_sramc_class_init(ObjectClass *klass, void *data) +static void allwinner_r40_sramc_class_init(ObjectClass *klass, const void *data) { AwSRAMCClass *sc = AW_SRAMC_CLASS(klass); diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c index 59a48993127..a015d4a9b8c 100644 --- a/hw/misc/applesmc.c +++ b/hw/misc/applesmc.c @@ -350,11 +350,10 @@ static void applesmc_unrealize(DeviceState *dev) } } -static Property applesmc_isa_properties[] = { +static const Property applesmc_isa_properties[] = { DEFINE_PROP_UINT32(APPLESMC_PROP_IO_BASE, AppleSMCState, iobase, APPLESMC_DEFAULT_IOBASE), DEFINE_PROP_STRING("osk", AppleSMCState, osk), - DEFINE_PROP_END_OF_LIST(), }; static void build_applesmc_aml(AcpiDevAmlIf *adev, Aml *scope) @@ -376,14 +375,14 @@ static void build_applesmc_aml(AcpiDevAmlIf *adev, Aml *scope) aml_append(scope, dev); } -static void qdev_applesmc_class_init(ObjectClass *klass, void *data) +static void qdev_applesmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); dc->realize = applesmc_isa_realize; dc->unrealize = applesmc_unrealize; - dc->reset = qdev_applesmc_isa_reset; + device_class_set_legacy_reset(dc, qdev_applesmc_isa_reset); device_class_set_props(dc, applesmc_isa_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); adevc->build_dev_aml = build_applesmc_aml; @@ -394,7 +393,7 @@ static const TypeInfo applesmc_isa_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(AppleSMCState), .class_init = qdev_applesmc_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/misc/arm11scu.c b/hw/misc/arm11scu.c index 17c36a05457..2ad4fd1d21d 100644 --- a/hw/misc/arm11scu.c +++ b/hw/misc/arm11scu.c @@ -75,12 +75,11 @@ static void arm11_scu_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property arm11_scu_properties[] = { +static const Property arm11_scu_properties[] = { DEFINE_PROP_UINT32("num-cpu", ARM11SCUState, num_cpu, 1), - DEFINE_PROP_END_OF_LIST() }; -static void arm11_scu_class_init(ObjectClass *oc, void *data) +static void arm11_scu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c index b14d0a26767..8b4b61eed00 100644 --- a/hw/misc/arm_l2x0.c +++ b/hw/misc/arm_l2x0.c @@ -173,18 +173,17 @@ static void l2x0_priv_init(Object *obj) sysbus_init_mmio(dev, &s->iomem); } -static Property l2x0_properties[] = { +static const Property l2x0_properties[] = { DEFINE_PROP_UINT32("cache-type", L2x0State, cache_type, 0x1c100100), - DEFINE_PROP_END_OF_LIST(), }; -static void l2x0_class_init(ObjectClass *klass, void *data) +static void l2x0_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_l2x0; device_class_set_props(dc, l2x0_properties); - dc->reset = l2x0_priv_reset; + device_class_set_legacy_reset(dc, l2x0_priv_reset); } static const TypeInfo l2x0_info = { diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c index 5108f3eda92..0f4e37cd474 100644 --- a/hw/misc/arm_sysctl.c +++ b/hw/misc/arm_sysctl.c @@ -11,7 +11,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qemu/bitops.h" #include "hw/sysbus.h" #include "migration/vmstate.h" @@ -520,7 +520,7 @@ static void arm_sysctl_write(void *opaque, hwaddr offset, * as zero. */ s->sys_cfgctrl = val & ~((3 << 18) | (1 << 31)); - if (val & (1 << 31)) { + if (extract64(val, 31, 1)) { /* Start bit set -- actually do something */ unsigned int dcc = extract32(s->sys_cfgctrl, 26, 4); unsigned int function = extract32(s->sys_cfgctrl, 20, 6); @@ -623,7 +623,7 @@ static void arm_sysctl_finalize(Object *obj) g_free(s->db_clock_reset); } -static Property arm_sysctl_properties[] = { +static const Property arm_sysctl_properties[] = { DEFINE_PROP_UINT32("sys_id", arm_sysctl_state, sys_id, 0), DEFINE_PROP_UINT32("proc_id", arm_sysctl_state, proc_id, 0), /* Daughterboard power supply voltages (as reported via SYS_CFG) */ @@ -632,15 +632,14 @@ static Property arm_sysctl_properties[] = { /* Daughterboard clock reset values (as reported via SYS_CFG) */ DEFINE_PROP_ARRAY("db-clock", arm_sysctl_state, db_num_clocks, db_clock_reset, qdev_prop_uint32, uint32_t), - DEFINE_PROP_END_OF_LIST(), }; -static void arm_sysctl_class_init(ObjectClass *klass, void *data) +static void arm_sysctl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = arm_sysctl_realize; - dc->reset = arm_sysctl_reset; + device_class_set_legacy_reset(dc, arm_sysctl_reset); dc->vmsd = &vmstate_arm_sysctl; device_class_set_props(dc, arm_sysctl_properties); } diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c index bfc51d175cb..66e9218f277 100644 --- a/hw/misc/armsse-cpu-pwrctrl.c +++ b/hw/misc/armsse-cpu-pwrctrl.c @@ -125,11 +125,11 @@ static void pwrctrl_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static void pwrctrl_class_init(ObjectClass *klass, void *data) +static void pwrctrl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pwrctrl_reset; + device_class_set_legacy_reset(dc, pwrctrl_reset); dc->vmsd = &pwrctrl_vmstate; } diff --git a/hw/misc/armsse-cpuid.c b/hw/misc/armsse-cpuid.c index e785a090519..a57764d731f 100644 --- a/hw/misc/armsse-cpuid.c +++ b/hw/misc/armsse-cpuid.c @@ -92,9 +92,8 @@ static const MemoryRegionOps armsse_cpuid_ops = { .valid.max_access_size = 4, }; -static Property armsse_cpuid_props[] = { +static const Property armsse_cpuid_props[] = { DEFINE_PROP_UINT32("CPUID", ARMSSECPUID, cpuid, 0), - DEFINE_PROP_END_OF_LIST() }; static void armsse_cpuid_init(Object *obj) @@ -107,7 +106,7 @@ static void armsse_cpuid_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static void armsse_cpuid_class_init(ObjectClass *klass, void *data) +static void armsse_cpuid_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/armsse-mhu.c b/hw/misc/armsse-mhu.c index 55625b2cca8..d5d307a186c 100644 --- a/hw/misc/armsse-mhu.c +++ b/hw/misc/armsse-mhu.c @@ -176,11 +176,11 @@ static void armsse_mhu_init(Object *obj) sysbus_init_irq(sbd, &s->cpu1irq); } -static void armsse_mhu_class_init(ObjectClass *klass, void *data) +static void armsse_mhu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = armsse_mhu_reset; + device_class_set_legacy_reset(dc, armsse_mhu_reset); dc->vmsd = &armsse_mhu_vmstate; } diff --git a/hw/misc/armv7m_ras.c b/hw/misc/armv7m_ras.c index de24922c944..7bf5acd0a54 100644 --- a/hw/misc/armv7m_ras.c +++ b/hw/misc/armv7m_ras.c @@ -72,7 +72,7 @@ static void armv7m_ras_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static void armv7m_ras_class_init(ObjectClass *klass, void *data) +static void armv7m_ras_class_init(ObjectClass *klass, const void *data) { /* This device has no state: no need for vmstate or reset */ } diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index c06c04ddc66..726368fbbc2 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -1,6 +1,7 @@ /* * ASPEED Hash and Crypto Engine * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (C) 2021 IBM Corp. * * Joel Stanley @@ -9,14 +10,17 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/error-report.h" +#include "qemu/iov.h" #include "hw/misc/aspeed_hace.h" #include "qapi/error.h" #include "migration/vmstate.h" #include "crypto/hash.h" #include "hw/qdev-properties.h" #include "hw/irq.h" +#include "trace.h" #define R_CRYPT_CMD (0x10 / 4) @@ -26,9 +30,12 @@ #define TAG_IRQ BIT(15) #define R_HASH_SRC (0x20 / 4) -#define R_HASH_DEST (0x24 / 4) +#define R_HASH_DIGEST (0x24 / 4) #define R_HASH_KEY_BUFF (0x28 / 4) #define R_HASH_SRC_LEN (0x2c / 4) +#define R_HASH_SRC_HI (0x90 / 4) +#define R_HASH_DIGEST_HI (0x94 / 4) +#define R_HASH_KEY_BUFF_HI (0x98 / 4) #define R_HASH_CMD (0x30 / 4) /* Hash algorithm selection */ @@ -58,6 +65,7 @@ /* Other cmd bits */ #define HASH_IRQ_EN BIT(9) #define HASH_SG_EN BIT(18) +#define CRYPT_IRQ_EN BIT(12) /* Scatter-gather data list */ #define SG_LIST_LEN_SIZE 4 #define SG_LIST_LEN_MASK 0x0FFFFFFF @@ -68,17 +76,56 @@ static const struct { uint32_t mask; - QCryptoHashAlgorithm algo; + QCryptoHashAlgo algo; } hash_algo_map[] = { - { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 }, - { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 }, - { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 }, - { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 }, - { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 }, - { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 }, - { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 }, + { HASH_ALGO_MD5, QCRYPTO_HASH_ALGO_MD5 }, + { HASH_ALGO_SHA1, QCRYPTO_HASH_ALGO_SHA1 }, + { HASH_ALGO_SHA224, QCRYPTO_HASH_ALGO_SHA224 }, + { HASH_ALGO_SHA256, QCRYPTO_HASH_ALGO_SHA256 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, + QCRYPTO_HASH_ALGO_SHA512 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, + QCRYPTO_HASH_ALGO_SHA384 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, + QCRYPTO_HASH_ALGO_SHA256 }, }; +static void hace_hexdump(const char *desc, const char *buf, size_t size) +{ + g_autoptr(GString) str = g_string_sized_new(64); + size_t len; + size_t i; + + for (i = 0; i < size; i += len) { + len = MIN(16, size - i); + g_string_truncate(str, 0); + qemu_hexdump_line(str, buf + i, len, 1, 4); + trace_aspeed_hace_hexdump(desc, i, str->str); + } +} + +static void hace_iov_hexdump(const char *desc, const struct iovec *iov, + const unsigned int iov_cnt) +{ + size_t size = 0; + char *buf; + int i; + + for (i = 0; i < iov_cnt; i++) { + size += iov[i].iov_len; + } + + buf = g_malloc(size); + + if (!buf) { + return; + } + + iov_to_buf(iov, iov_cnt, 0, buf, size); + hace_hexdump(desc, buf, size); + g_free(buf); +} + static int hash_algo_lookup(uint32_t reg) { int i; @@ -123,6 +170,11 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov, if (*total_msg_len <= s->total_req_len) { uint32_t padding_size = s->total_req_len - *total_msg_len; uint8_t *padding = iov->iov_base; + + if (padding_size > req_len) { + return false; + } + *pad_offset = req_len - padding_size; if (padding[*pad_offset] == 0x80) { return true; @@ -132,162 +184,269 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov, return false; } -static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id, - uint32_t *pad_offset) +static uint64_t hash_get_source_addr(AspeedHACEState *s) { - int i, iov_count; - if (*pad_offset != 0) { - s->iov_cache[s->iov_count].iov_base = iov[id].iov_base; - s->iov_cache[s->iov_count].iov_len = *pad_offset; - ++s->iov_count; - } - for (i = 0; i < s->iov_count; i++) { - iov[i].iov_base = s->iov_cache[i].iov_base; - iov[i].iov_len = s->iov_cache[i].iov_len; + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + uint64_t src_addr = 0; + + src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]); + if (ahc->has_dma64) { + src_addr = deposit64(src_addr, 32, 32, s->regs[R_HASH_SRC_HI]); } - iov_count = s->iov_count; - s->iov_count = 0; - s->total_req_len = 0; - return iov_count; + + return src_addr; } -/** - * Generate iov for accumulative mode. - * - * @param s aspeed hace state object - * @param iov iov of the current request - * @param id index of the current iov - * @param req_len length of the current request - * - * @return count of iov - */ -static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id, - hwaddr *req_len) +static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov, + bool acc_mode, bool *acc_final_request) { - uint32_t pad_offset; uint32_t total_msg_len; - s->total_req_len += *req_len; + uint32_t pad_offset; + uint64_t src; + void *haddr; + hwaddr plen; + int iov_idx; + + plen = s->regs[R_HASH_SRC_LEN]; + src = hash_get_source_addr(s); + trace_aspeed_hace_hash_addr("src", src); + haddr = address_space_map(&s->dram_as, src, &plen, false, + MEMTXATTRS_UNSPECIFIED); + if (haddr == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unable to map address, addr=0x%" HWADDR_PRIx + " ,plen=0x%" HWADDR_PRIx "\n", + __func__, src, plen); + return -1; + } - if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) { - if (s->iov_count) { - return reconstruct_iov(s, iov, id, &pad_offset); - } + iov[0].iov_base = haddr; + iov_idx = 1; - *req_len -= s->total_req_len - total_msg_len; - s->total_req_len = 0; - iov[id].iov_len = *req_len; + if (acc_mode) { + s->total_req_len += plen; + + if (has_padding(s, &iov[0], plen, &total_msg_len, + &pad_offset)) { + /* Padding being present indicates the final request */ + *acc_final_request = true; + iov[0].iov_len = pad_offset; + } else { + iov[0].iov_len = plen; + } } else { - s->iov_cache[s->iov_count].iov_base = iov->iov_base; - s->iov_cache[s->iov_count].iov_len = *req_len; - ++s->iov_count; + iov[0].iov_len = plen; } - return id + 1; + return iov_idx; } -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, - bool acc_mode) +static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov, + bool acc_mode, bool *acc_final_request) { - struct iovec iov[ASPEED_HACE_MAX_SG]; - g_autofree uint8_t *digest_buf = NULL; - size_t digest_len = 0; - int niov = 0; - int i; + uint32_t total_msg_len; + uint32_t pad_offset; + uint32_t len = 0; + uint32_t sg_addr; + uint64_t src; + int iov_idx; + hwaddr plen; void *haddr; - if (sg_mode) { - uint32_t len = 0; - - for (i = 0; !(len & SG_LIST_LEN_LAST); i++) { - uint32_t addr, src; - hwaddr plen; - - if (i == ASPEED_HACE_MAX_SG) { - qemu_log_mask(LOG_GUEST_ERROR, - "aspeed_hace: guest failed to set end of sg list marker\n"); - break; - } - - src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE); + src = hash_get_source_addr(s); + for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) { + if (iov_idx == ASPEED_HACE_MAX_SG) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to set end of sg list marker\n", + __func__); + return -1; + } - len = address_space_ldl_le(&s->dram_as, src, + len = address_space_ldl_le(&s->dram_as, src, + MEMTXATTRS_UNSPECIFIED, NULL); + sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, MEMTXATTRS_UNSPECIFIED, NULL); + sg_addr &= SG_LIST_ADDR_MASK; + trace_aspeed_hace_hash_sg(iov_idx, src, sg_addr, len); + /* + * To maintain compatibility with older SoCs such as the AST2600, + * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr. + * As a result, the firmware only needs to provide a 32-bit sg_addr + * containing bits [31:0]. This is sufficient for the AST2700, as + * it uses a DRAM offset rather than a DRAM address. + */ + plen = len & SG_LIST_LEN_MASK; + haddr = address_space_map(&s->dram_as, sg_addr, &plen, false, + MEMTXATTRS_UNSPECIFIED); - addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, - MEMTXATTRS_UNSPECIFIED, NULL); - addr &= SG_LIST_ADDR_MASK; + if (haddr == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unable to map address, sg_addr=0x%x, " + "plen=0x%" HWADDR_PRIx "\n", + __func__, sg_addr, plen); + return -1; + } - plen = len & SG_LIST_LEN_MASK; - haddr = address_space_map(&s->dram_as, addr, &plen, false, - MEMTXATTRS_UNSPECIFIED); - if (haddr == NULL) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); - return; - } - iov[i].iov_base = haddr; - if (acc_mode) { - niov = gen_acc_mode_iov(s, iov, i, &plen); + src += SG_LIST_ENTRY_SIZE; + + iov[iov_idx].iov_base = haddr; + if (acc_mode) { + s->total_req_len += plen; + if (has_padding(s, &iov[iov_idx], plen, &total_msg_len, + &pad_offset)) { + /* Padding being present indicates the final request */ + *acc_final_request = true; + iov[iov_idx].iov_len = pad_offset; } else { - iov[i].iov_len = plen; + iov[iov_idx].iov_len = plen; } + } else { + iov[iov_idx].iov_len = plen; } - } else { - hwaddr len = s->regs[R_HASH_SRC_LEN]; + } - haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC], - &len, false, MEMTXATTRS_UNSPECIFIED); - if (haddr == NULL) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); + return iov_idx; +} + +static uint64_t hash_get_digest_addr(AspeedHACEState *s) +{ + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + uint64_t digest_addr = 0; + + digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]); + if (ahc->has_dma64) { + digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DIGEST_HI]); + } + + return digest_addr; +} + +static void hash_write_digest_and_unmap_iov(AspeedHACEState *s, + struct iovec *iov, + int iov_idx, + uint8_t *digest_buf, + size_t digest_len) +{ + uint64_t digest_addr = 0; + + digest_addr = hash_get_digest_addr(s); + trace_aspeed_hace_hash_addr("digest", digest_addr); + if (address_space_write(&s->dram_as, digest_addr, + MEMTXATTRS_UNSPECIFIED, + digest_buf, digest_len)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n", + __func__, digest_addr); + } + + if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) { + hace_hexdump("digest", (char *)digest_buf, digest_len); + } + + for (; iov_idx > 0; iov_idx--) { + address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base, + iov[iov_idx - 1].iov_len, false, + iov[iov_idx - 1].iov_len); + } +} + +static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo, + struct iovec *iov, int iov_idx) +{ + g_autofree uint8_t *digest_buf = NULL; + Error *local_err = NULL; + size_t digest_len = 0; + + if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf, + &digest_len, &local_err) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: qcrypto hash bytesv failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); + return; + } + + hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); +} + +static void hash_execute_acc_mode(AspeedHACEState *s, int algo, + struct iovec *iov, int iov_idx, + bool final_request) +{ + g_autofree uint8_t *digest_buf = NULL; + Error *local_err = NULL; + size_t digest_len = 0; + + trace_aspeed_hace_hash_execute_acc_mode(final_request); + + if (s->hash_ctx == NULL) { + s->hash_ctx = qcrypto_hash_new(algo, &local_err); + if (s->hash_ctx == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); return; } - iov[0].iov_base = haddr; - iov[0].iov_len = len; - i = 1; - - if (s->iov_count) { - /* - * In aspeed sdk kernel driver, sg_mode is disabled in hash_final(). - * Thus if we received a request with sg_mode disabled, it is - * required to check whether cache is empty. If no, we should - * combine cached iov and the current iov. - */ - uint32_t total_msg_len; - uint32_t pad_offset; - s->total_req_len += len; - if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) { - niov = reconstruct_iov(s, iov, 0, &pad_offset); - } - } } - if (niov) { - i = niov; + if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); + return; } - if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); - return; + if (final_request) { + if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf, + &digest_len, &local_err)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: qcrypto hash finalize bytes failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); + local_err = NULL; + } + + qcrypto_hash_free(s->hash_ctx); + + s->hash_ctx = NULL; + s->total_req_len = 0; } - if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST], - MEMTXATTRS_UNSPECIFIED, - digest_buf, digest_len)) { + hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); +} + +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, + bool acc_mode) +{ + QEMU_UNINITIALIZED struct iovec iov[ASPEED_HACE_MAX_SG]; + bool acc_final_request = false; + int iov_idx = -1; + + /* Prepares the iov for hashing operations based on the selected mode */ + if (sg_mode) { + iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request); + } else { + iov_idx = hash_prepare_direct_iov(s, iov, acc_mode, + &acc_final_request); + } + + if (iov_idx <= 0) { qemu_log_mask(LOG_GUEST_ERROR, - "aspeed_hace: address space write failed\n"); + "%s: Failed to prepare iov\n", __func__); + return; } - for (; i > 0; i--) { - address_space_unmap(&s->dram_as, iov[i - 1].iov_base, - iov[i - 1].iov_len, false, - iov[i - 1].iov_len); + if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) { + hace_iov_hexdump("plaintext", iov, iov_idx); } - /* - * Set status bits to indicate completion. Testing shows hardware sets - * these irrespective of HASH_IRQ_EN. - */ - s->regs[R_STATUS] |= HASH_IRQ; + /* Executes the hash operation */ + if (acc_mode) { + hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request); + } else { + hash_execute_non_acc_mode(s, algo, iov, iov_idx); + } } static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) @@ -296,12 +455,7 @@ static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) addr >>= 2; - if (addr >= ASPEED_HACE_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", - __func__, addr << 2); - return 0; - } + trace_aspeed_hace_read(addr << 2, s->regs[addr]); return s->regs[addr]; } @@ -314,12 +468,7 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, addr >>= 2; - if (addr >= ASPEED_HACE_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", - __func__, addr << 2); - return; - } + trace_aspeed_hace_write(addr << 2, data); switch (addr) { case R_STATUS: @@ -330,11 +479,20 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, qemu_irq_lower(s->irq); } } + if (ahc->raise_crypt_interrupt_workaround) { + if (data & CRYPT_IRQ) { + data &= ~CRYPT_IRQ; + + if (s->regs[addr] & CRYPT_IRQ) { + qemu_irq_lower(s->irq); + } + } + } break; case R_HASH_SRC: data &= ahc->src_mask; break; - case R_HASH_DEST: + case R_HASH_DIGEST: data &= ahc->dest_mask; break; case R_HASH_KEY_BUFF: @@ -362,10 +520,16 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", __func__, data & ahc->hash_mask); - break; + } else { + do_hash_operation(s, algo, data & HASH_SG_EN, + ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); } - do_hash_operation(s, algo, data & HASH_SG_EN, - ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); + + /* + * Set status bits to indicate completion. Testing shows hardware sets + * these irrespective of HASH_IRQ_EN. + */ + s->regs[R_STATUS] |= HASH_IRQ; if (data & HASH_IRQ_EN) { qemu_irq_raise(s->irq); @@ -375,6 +539,21 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, case R_CRYPT_CMD: qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n", __func__); + if (ahc->raise_crypt_interrupt_workaround) { + s->regs[R_STATUS] |= CRYPT_IRQ; + if (data & CRYPT_IRQ_EN) { + qemu_irq_raise(s->irq); + } + } + break; + case R_HASH_SRC_HI: + data &= ahc->src_hi_mask; + break; + case R_HASH_DIGEST_HI: + data &= ahc->dest_hi_mask; + break; + case R_HASH_KEY_BUFF_HI: + data &= ahc->key_hi_mask; break; default: break; @@ -396,9 +575,14 @@ static const MemoryRegionOps aspeed_hace_ops = { static void aspeed_hace_reset(DeviceState *dev) { struct AspeedHACEState *s = ASPEED_HACE(dev); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); - memset(s->regs, 0, sizeof(s->regs)); - s->iov_count = 0; + if (s->hash_ctx != NULL) { + qcrypto_hash_free(s->hash_ctx); + s->hash_ctx = NULL; + } + + memset(s->regs, 0, ahc->nr_regs << 2); s->total_req_len = 0; } @@ -406,11 +590,13 @@ static void aspeed_hace_realize(DeviceState *dev, Error **errp) { AspeedHACEState *s = ASPEED_HACE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); sysbus_init_irq(sbd, &s->irq); + s->regs = g_new(uint32_t, ahc->nr_regs); memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, - TYPE_ASPEED_HACE, 0x1000); + TYPE_ASPEED_HACE, ahc->nr_regs << 2); if (!s->dram_mr) { error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); @@ -422,31 +608,37 @@ static void aspeed_hace_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static Property aspeed_hace_properties[] = { +static const Property aspeed_hace_properties[] = { DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_aspeed_hace = { .name = TYPE_ASPEED_HACE, - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), VMSTATE_UINT32(total_req_len, AspeedHACEState), - VMSTATE_UINT32(iov_count, AspeedHACEState), VMSTATE_END_OF_LIST(), } }; -static void aspeed_hace_class_init(ObjectClass *klass, void *data) +static void aspeed_hace_unrealize(DeviceState *dev) +{ + AspeedHACEState *s = ASPEED_HACE(dev); + + g_free(s->regs); + s->regs = NULL; +} + +static void aspeed_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_hace_realize; - dc->reset = aspeed_hace_reset; + dc->unrealize = aspeed_hace_unrealize; + device_class_set_legacy_reset(dc, aspeed_hace_reset); device_class_set_props(dc, aspeed_hace_properties); dc->vmsd = &vmstate_aspeed_hace; } @@ -459,13 +651,14 @@ static const TypeInfo aspeed_hace_info = { .class_size = sizeof(AspeedHACEClass) }; -static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data) +static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); dc->desc = "AST2400 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x0FFFFFFF; ahc->dest_mask = 0x0FFFFFF8; ahc->key_mask = 0x0FFFFFC0; @@ -478,13 +671,14 @@ static const TypeInfo aspeed_ast2400_hace_info = { .class_init = aspeed_ast2400_hace_class_init, }; -static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data) +static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); dc->desc = "AST2500 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x3fffffff; ahc->dest_mask = 0x3ffffff8; ahc->key_mask = 0x3FFFFFC0; @@ -497,13 +691,14 @@ static const TypeInfo aspeed_ast2500_hace_info = { .class_init = aspeed_ast2500_hace_class_init, }; -static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data) +static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); dc->desc = "AST2600 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; ahc->key_mask = 0x7FFFFFF8; @@ -516,13 +711,14 @@ static const TypeInfo aspeed_ast2600_hace_info = { .class_init = aspeed_ast2600_hace_class_init, }; -static void aspeed_ast1030_hace_class_init(ObjectClass *klass, void *data) +static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); dc->desc = "AST1030 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; ahc->key_mask = 0x7FFFFFF8; @@ -535,12 +731,58 @@ static const TypeInfo aspeed_ast1030_hace_info = { .class_init = aspeed_ast1030_hace_class_init, }; +static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2700 Hash and Crypto Engine"; + + ahc->nr_regs = 0x9C >> 2; + ahc->src_mask = 0x7FFFFFFF; + ahc->dest_mask = 0x7FFFFFF8; + ahc->key_mask = 0x7FFFFFF8; + ahc->hash_mask = 0x00147FFF; + + /* + * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM + * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range + * fits within 34 bits, only bits [33:0] are needed to store the DRAM + * offset. To optimize address storage, the high physical address bits + * [1:0] of the source, digest and key buffer addresses are stored as + * dram_offset bits [33:32]. + * + * This approach eliminates the need to reduce the high part of the DRAM + * physical address for DMA operations. Previously, this was calculated as + * (high physical address bits [7:0] - 4), since the DRAM start address is + * 0x4_00000000, making the high part address [7:0] - 4. + */ + ahc->src_hi_mask = 0x00000003; + ahc->dest_hi_mask = 0x00000003; + ahc->key_hi_mask = 0x00000003; + + /* + * Currently, it does not support the CRYPT command. Instead, it only + * sends an interrupt to notify the firmware that the crypt command + * has completed. It is a temporary workaround. + */ + ahc->raise_crypt_interrupt_workaround = true; + ahc->has_dma64 = true; +} + +static const TypeInfo aspeed_ast2700_hace_info = { + .name = TYPE_ASPEED_AST2700_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2700_hace_class_init, +}; + static void aspeed_hace_register_types(void) { type_register_static(&aspeed_ast2400_hace_info); type_register_static(&aspeed_ast2500_hace_info); type_register_static(&aspeed_ast2600_hace_info); type_register_static(&aspeed_ast1030_hace_info); + type_register_static(&aspeed_ast2700_hace_info); type_register_static(&aspeed_hace_info); } diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c index 827c9e522d3..3bef1c84dd1 100644 --- a/hw/misc/aspeed_i3c.c +++ b/hw/misc/aspeed_i3c.c @@ -323,18 +323,17 @@ static void aspeed_i3c_realize(DeviceState *dev, Error **errp) } -static Property aspeed_i3c_device_properties[] = { +static const Property aspeed_i3c_device_properties[] = { DEFINE_PROP_UINT8("device-id", AspeedI3CDevice, id, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_i3c_device_class_init(ObjectClass *klass, void *data) +static void aspeed_i3c_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "Aspeed I3C Device"; dc->realize = aspeed_i3c_device_realize; - dc->reset = aspeed_i3c_device_reset; + device_class_set_legacy_reset(dc, aspeed_i3c_device_reset); device_class_set_props(dc, aspeed_i3c_device_properties); } @@ -357,12 +356,12 @@ static const VMStateDescription vmstate_aspeed_i3c = { } }; -static void aspeed_i3c_class_init(ObjectClass *klass, void *data) +static void aspeed_i3c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_i3c_realize; - dc->reset = aspeed_i3c_reset; + device_class_set_legacy_reset(dc, aspeed_i3c_reset); dc->desc = "Aspeed I3C Controller"; dc->vmsd = &vmstate_aspeed_i3c; } diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c index 193f0dea591..78406dae248 100644 --- a/hw/misc/aspeed_lpc.c +++ b/hw/misc/aspeed_lpc.c @@ -454,17 +454,16 @@ static const VMStateDescription vmstate_aspeed_lpc = { } }; -static Property aspeed_lpc_properties[] = { +static const Property aspeed_lpc_properties[] = { DEFINE_PROP_UINT32("hicr7", AspeedLPCState, hicr7, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_lpc_class_init(ObjectClass *klass, void *data) +static void aspeed_lpc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_lpc_realize; - dc->reset = aspeed_lpc_reset; + device_class_set_legacy_reset(dc, aspeed_lpc_reset); dc->desc = "Aspeed LPC Controller", dc->vmsd = &vmstate_aspeed_lpc; device_class_set_props(dc, aspeed_lpc_properties); diff --git a/hw/misc/aspeed_peci.c b/hw/misc/aspeed_peci.c index 93cc672e968..a7a449a9236 100644 --- a/hw/misc/aspeed_peci.c +++ b/hw/misc/aspeed_peci.c @@ -130,12 +130,12 @@ static void aspeed_peci_reset(DeviceState *dev) memset(s->regs, 0, sizeof(s->regs)); } -static void aspeed_peci_class_init(ObjectClass *klass, void *data) +static void aspeed_peci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_peci_realize; - dc->reset = aspeed_peci_reset; + device_class_set_legacy_reset(dc, aspeed_peci_reset); dc->desc = "Aspeed PECI Controller"; } diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c index 8bb1f90e4e7..a7d101ba71f 100644 --- a/hw/misc/aspeed_sbc.c +++ b/hw/misc/aspeed_sbc.c @@ -136,18 +136,17 @@ static const VMStateDescription vmstate_aspeed_sbc = { } }; -static Property aspeed_sbc_properties[] = { +static const Property aspeed_sbc_properties[] = { DEFINE_PROP_BOOL("emmc-abr", AspeedSBCState, emmc_abr, 0), DEFINE_PROP_UINT32("signing-settings", AspeedSBCState, signing_settings, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_sbc_class_init(ObjectClass *klass, void *data) +static void aspeed_sbc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_sbc_realize; - dc->reset = aspeed_sbc_reset; + device_class_set_legacy_reset(dc, aspeed_sbc_reset); dc->vmsd = &vmstate_aspeed_sbc; device_class_set_props(dc, aspeed_sbc_properties); } @@ -160,7 +159,7 @@ static const TypeInfo aspeed_sbc_info = { .class_size = sizeof(AspeedSBCClass) }; -static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, void *data) +static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index 451e837272d..a0ab5eed8f1 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -91,6 +91,7 @@ #define BMC_DEV_ID TO_REG(0x1A4) #define AST2600_PROT_KEY TO_REG(0x00) +#define AST2600_PROT_KEY2 TO_REG(0x10) #define AST2600_SILICON_REV TO_REG(0x04) #define AST2600_SILICON_REV2 TO_REG(0x14) #define AST2600_SYS_RST_CTRL TO_REG(0x40) @@ -157,6 +158,7 @@ #define AST2700_SCU_FREQ_CNTR TO_REG(0x3b0) #define AST2700_SCU_CPU_SCRATCH_0 TO_REG(0x780) #define AST2700_SCU_CPU_SCRATCH_1 TO_REG(0x784) +#define AST2700_SCU_VGA_SCRATCH_0 TO_REG(0x900) #define AST2700_SCUIO_CLK_STOP_CTL_1 TO_REG(0x240) #define AST2700_SCUIO_CLK_STOP_CLR_1 TO_REG(0x244) @@ -175,6 +177,7 @@ #define AST2700_SCUIO_UARTCLK_GEN TO_REG(0x330) #define AST2700_SCUIO_HUARTCLK_GEN TO_REG(0x334) #define AST2700_SCUIO_CLK_DUTY_MEAS_RST TO_REG(0x388) +#define AST2700_SCUIO_FREQ_CNT_CTL TO_REG(0x3A0) #define SCU_IO_REGION_SIZE 0x1000 @@ -426,6 +429,10 @@ static const MemoryRegionOps aspeed_ast2400_scu_ops = { .read = aspeed_scu_read, .write = aspeed_ast2400_scu_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, .valid = { .min_access_size = 1, .max_access_size = 4, @@ -436,7 +443,9 @@ static const MemoryRegionOps aspeed_ast2500_scu_ops = { .read = aspeed_scu_read, .write = aspeed_ast2500_scu_write, .endianness = DEVICE_LITTLE_ENDIAN, - .valid.min_access_size = 4, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, .valid.max_access_size = 4, .valid.unaligned = false, }; @@ -559,6 +568,8 @@ static uint32_t aspeed_silicon_revs[] = { AST2700_A0_SILICON_REV, AST2720_A0_SILICON_REV, AST2750_A0_SILICON_REV, + AST2700_A1_SILICON_REV, + AST2750_A1_SILICON_REV, }; bool is_supported_silicon_rev(uint32_t silicon_rev) @@ -602,19 +613,18 @@ static const VMStateDescription vmstate_aspeed_scu = { } }; -static Property aspeed_scu_properties[] = { +static const Property aspeed_scu_properties[] = { DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0), DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0), DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap2, 0), DEFINE_PROP_UINT32("hw-prot-key", AspeedSCUState, hw_prot_key, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_scu_realize; - dc->reset = aspeed_scu_reset; + device_class_set_legacy_reset(dc, aspeed_scu_reset); dc->desc = "ASPEED System Control Unit"; dc->vmsd = &vmstate_aspeed_scu; device_class_set_props(dc, aspeed_scu_properties); @@ -629,7 +639,7 @@ static const TypeInfo aspeed_scu_info = { .abstract = true, }; -static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); @@ -651,7 +661,7 @@ static const TypeInfo aspeed_2400_scu_info = { .class_init = aspeed_2400_scu_class_init, }; -static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); @@ -714,6 +724,8 @@ static void aspeed_ast2600_scu_write(void *opaque, hwaddr offset, int reg = TO_REG(offset); /* Truncate here so bitwise operations below behave as expected */ uint32_t data = data64; + bool prot_data_state = data == ASPEED_SCU_PROT_KEY; + bool unlocked = s->regs[AST2600_PROT_KEY] && s->regs[AST2600_PROT_KEY2]; if (reg >= ASPEED_AST2600_SCU_NR_REGS) { qemu_log_mask(LOG_GUEST_ERROR, @@ -722,15 +734,24 @@ static void aspeed_ast2600_scu_write(void *opaque, hwaddr offset, return; } - if (reg > PROT_KEY && !s->regs[PROT_KEY]) { + if ((reg != AST2600_PROT_KEY && reg != AST2600_PROT_KEY2) && !unlocked) { qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + return; } trace_aspeed_scu_write(offset, size, data); switch (reg) { case AST2600_PROT_KEY: - s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; + /* + * Writing a value to SCU000 will modify both protection + * registers to each protection register individually. + */ + s->regs[AST2600_PROT_KEY] = prot_data_state; + s->regs[AST2600_PROT_KEY2] = prot_data_state; + return; + case AST2600_PROT_KEY2: + s->regs[AST2600_PROT_KEY2] = prot_data_state; return; case AST2600_HW_STRAP1: case AST2600_HW_STRAP2: @@ -777,7 +798,9 @@ static const MemoryRegionOps aspeed_ast2600_scu_ops = { .read = aspeed_ast2600_scu_read, .write = aspeed_ast2600_scu_write, .endianness = DEVICE_LITTLE_ENDIAN, - .valid.min_access_size = 4, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, .valid.max_access_size = 4, .valid.unaligned = false, }; @@ -825,13 +848,13 @@ static void aspeed_ast2600_scu_reset(DeviceState *dev) s->regs[PROT_KEY] = s->hw_prot_key; } -static void aspeed_2600_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); dc->desc = "ASPEED 2600 System Control Unit"; - dc->reset = aspeed_ast2600_scu_reset; + device_class_set_legacy_reset(dc, aspeed_ast2600_scu_reset); asc->resets = ast2600_a3_resets; asc->calc_hpll = aspeed_2600_scu_calc_hpll; asc->get_apb = aspeed_2600_scu_get_apb_freq; @@ -904,14 +927,14 @@ static const MemoryRegionOps aspeed_ast2700_scu_ops = { .read = aspeed_ast2700_scu_read, .write = aspeed_ast2700_scu_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, .valid.min_access_size = 1, .valid.max_access_size = 8, .valid.unaligned = false, }; static const uint32_t ast2700_a0_resets[ASPEED_AST2700_SCU_NR_REGS] = { - [AST2700_SILICON_REV] = AST2700_A0_SILICON_REV, - [AST2700_HW_STRAP1] = 0x00000800, [AST2700_HW_STRAP1_CLR] = 0xFFF0FFF0, [AST2700_HW_STRAP1_LOCK] = 0x00000FFF, [AST2700_HW_STRAP1_SEC1] = 0x000000FF, @@ -931,6 +954,7 @@ static const uint32_t ast2700_a0_resets[ASPEED_AST2700_SCU_NR_REGS] = { [AST2700_SCU_FREQ_CNTR] = 0x000375eb, [AST2700_SCU_CPU_SCRATCH_0] = 0x00000000, [AST2700_SCU_CPU_SCRATCH_1] = 0x00000004, + [AST2700_SCU_VGA_SCRATCH_0] = 0x00000040, }; static void aspeed_ast2700_scu_reset(DeviceState *dev) @@ -939,15 +963,17 @@ static void aspeed_ast2700_scu_reset(DeviceState *dev) AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev); memcpy(s->regs, asc->resets, asc->nr_regs * 4); + s->regs[AST2700_SILICON_REV] = s->silicon_rev; + s->regs[AST2700_HW_STRAP1] = s->hw_strap1; } -static void aspeed_2700_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); dc->desc = "ASPEED 2700 System Control Unit"; - dc->reset = aspeed_ast2700_scu_reset; + device_class_set_legacy_reset(dc, aspeed_ast2700_scu_reset); asc->resets = ast2700_a0_resets; asc->calc_hpll = aspeed_2600_scu_calc_hpll; asc->get_apb = aspeed_2700_scu_get_apb_freq; @@ -1009,6 +1035,10 @@ static void aspeed_ast2700_scuio_write(void *opaque, hwaddr offset, s->regs[reg - 1] ^= data; updated = true; break; + case AST2700_SCUIO_FREQ_CNT_CTL: + s->regs[reg] = deposit32(s->regs[reg], 6, 1, !!(data & BIT(1))); + updated = true; + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Unhandled write at offset 0x%" HWADDR_PRIx "\n", @@ -1025,14 +1055,14 @@ static const MemoryRegionOps aspeed_ast2700_scuio_ops = { .read = aspeed_ast2700_scuio_read, .write = aspeed_ast2700_scuio_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, .valid.min_access_size = 1, .valid.max_access_size = 8, .valid.unaligned = false, }; static const uint32_t ast2700_a0_resets_io[ASPEED_AST2700_SCU_NR_REGS] = { - [AST2700_SILICON_REV] = 0x06000003, - [AST2700_HW_STRAP1] = 0x00000504, [AST2700_HW_STRAP1_CLR] = 0xFFF0FFF0, [AST2700_HW_STRAP1_LOCK] = 0x00000FFF, [AST2700_HW_STRAP1_SEC1] = 0x000000FF, @@ -1053,15 +1083,16 @@ static const uint32_t ast2700_a0_resets_io[ASPEED_AST2700_SCU_NR_REGS] = { [AST2700_SCUIO_UARTCLK_GEN] = 0x00014506, [AST2700_SCUIO_HUARTCLK_GEN] = 0x000145c0, [AST2700_SCUIO_CLK_DUTY_MEAS_RST] = 0x0c9100d2, + [AST2700_SCUIO_FREQ_CNT_CTL] = 0x00000080, }; -static void aspeed_2700_scuio_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_scuio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); dc->desc = "ASPEED 2700 System Control Unit I/O"; - dc->reset = aspeed_ast2700_scu_reset; + device_class_set_legacy_reset(dc, aspeed_ast2700_scu_reset); asc->resets = ast2700_a0_resets_io; asc->calc_hpll = aspeed_2600_scu_calc_hpll; asc->get_apb = aspeed_2700_scuio_get_apb_freq; @@ -1113,13 +1144,13 @@ static void aspeed_ast1030_scu_reset(DeviceState *dev) s->regs[PROT_KEY] = s->hw_prot_key; } -static void aspeed_1030_scu_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_scu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); dc->desc = "ASPEED 1030 System Control Unit"; - dc->reset = aspeed_ast1030_scu_reset; + device_class_set_legacy_reset(dc, aspeed_ast1030_scu_reset); asc->resets = ast1030_a1_resets; asc->calc_hpll = aspeed_2600_scu_calc_hpll; asc->get_apb = aspeed_1030_scu_get_apb_freq; diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c index ebf139cb5c9..dff7cc362d3 100644 --- a/hw/misc/aspeed_sdmc.c +++ b/hw/misc/aspeed_sdmc.c @@ -294,17 +294,16 @@ static const VMStateDescription vmstate_aspeed_sdmc = { } }; -static Property aspeed_sdmc_properties[] = { +static const Property aspeed_sdmc_properties[] = { DEFINE_PROP_UINT64("max-ram-size", AspeedSDMCState, max_ram_size, 0), DEFINE_PROP_BOOL("unlocked", AspeedSDMCState, unlocked, false), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_sdmc_class_init(ObjectClass *klass, void *data) +static void aspeed_sdmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_sdmc_realize; - dc->reset = aspeed_sdmc_reset; + device_class_set_legacy_reset(dc, aspeed_sdmc_reset); dc->desc = "ASPEED SDRAM Memory Controller"; dc->vmsd = &vmstate_aspeed_sdmc; device_class_set_props(dc, aspeed_sdmc_properties); @@ -381,7 +380,7 @@ static void aspeed_2400_sdmc_write(AspeedSDMCState *s, uint32_t reg, static const uint64_t aspeed_2400_ram_sizes[] = { 64 * MiB, 128 * MiB, 256 * MiB, 512 * MiB, 0}; -static void aspeed_2400_sdmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_sdmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); @@ -449,7 +448,7 @@ static void aspeed_2500_sdmc_write(AspeedSDMCState *s, uint32_t reg, static const uint64_t aspeed_2500_ram_sizes[] = { 128 * MiB, 256 * MiB, 512 * MiB, 1024 * MiB, 0}; -static void aspeed_2500_sdmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_sdmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); @@ -543,7 +542,7 @@ static void aspeed_2600_sdmc_write(AspeedSDMCState *s, uint32_t reg, static const uint64_t aspeed_2600_ram_sizes[] = { 256 * MiB, 512 * MiB, 1024 * MiB, 2048 * MiB, 0}; -static void aspeed_2600_sdmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_sdmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); @@ -571,6 +570,9 @@ static void aspeed_2700_sdmc_reset(DeviceState *dev) /* Set ram size bit and defaults values */ s->regs[R_MAIN_CONF] = asc->compute_conf(s, 0); + /* Skipping dram init */ + s->regs[R_MAIN_CONTROL] = BIT(16); + if (s->unlocked) { s->regs[R_2700_PROT] = PROT_UNLOCKED; } @@ -671,13 +673,13 @@ static const uint64_t aspeed_2700_ram_sizes[] = { 256 * MiB, 512 * MiB, 1024 * MiB, 2048 * MiB, 4096 * MiB, 8192 * MiB, 0}; -static void aspeed_2700_sdmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_sdmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); dc->desc = "ASPEED 2700 SDRAM Memory Controller"; - dc->reset = aspeed_2700_sdmc_reset; + device_class_set_legacy_reset(dc, aspeed_2700_sdmc_reset); asc->is_bus64bit = true; asc->max_ram_size = 8 * GiB; diff --git a/hw/misc/aspeed_sli.c b/hw/misc/aspeed_sli.c index fe720ead509..c51484035e0 100644 --- a/hw/misc/aspeed_sli.c +++ b/hw/misc/aspeed_sli.c @@ -124,7 +124,7 @@ static void aspeed_sliio_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static void aspeed_sli_class_init(ObjectClass *klass, void *data) +static void aspeed_sli_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -140,14 +140,14 @@ static const TypeInfo aspeed_sli_info = { .abstract = true, }; -static void aspeed_2700_sli_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_sli_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "AST2700 SLI Controller"; } -static void aspeed_2700_sliio_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_sliio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c index 76ab8467ddb..cc03c422b0e 100644 --- a/hw/misc/aspeed_xdma.c +++ b/hw/misc/aspeed_xdma.c @@ -150,7 +150,7 @@ static const VMStateDescription aspeed_xdma_vmstate = { }, }; -static void aspeed_2600_xdma_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_xdma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); @@ -173,7 +173,7 @@ static const TypeInfo aspeed_2600_xdma_info = { .class_init = aspeed_2600_xdma_class_init, }; -static void aspeed_2500_xdma_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_xdma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); @@ -195,7 +195,7 @@ static const TypeInfo aspeed_2500_xdma_info = { .class_init = aspeed_2500_xdma_class_init, }; -static void aspeed_2400_xdma_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_xdma_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); @@ -217,12 +217,12 @@ static const TypeInfo aspeed_2400_xdma_info = { .class_init = aspeed_2400_xdma_class_init, }; -static void aspeed_xdma_class_init(ObjectClass *classp, void *data) +static void aspeed_xdma_class_init(ObjectClass *classp, const void *data) { DeviceClass *dc = DEVICE_CLASS(classp); dc->realize = aspeed_xdma_realize; - dc->reset = aspeed_xdma_reset; + device_class_set_legacy_reset(dc, aspeed_xdma_reset); dc->vmsd = &aspeed_xdma_vmstate; } diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c index 28d50d9d097..877f3456062 100644 --- a/hw/misc/auxbus.c +++ b/hw/misc/auxbus.c @@ -50,7 +50,7 @@ static void aux_slave_dev_print(Monitor *mon, DeviceState *dev, int indent); static inline I2CBus *aux_bridge_get_i2c_bus(AUXTOI2CState *bridge); /* aux-bus implementation (internal not public) */ -static void aux_bus_class_init(ObjectClass *klass, void *data) +static void aux_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -256,7 +256,7 @@ struct AUXTOI2CState { I2CBus *i2c_bus; }; -static void aux_bridge_class_init(ObjectClass *oc, void *data) +static void aux_bridge_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -311,7 +311,7 @@ void aux_init_mmio(AUXSlave *aux_slave, MemoryRegion *mmio) aux_slave->mmio = mmio; } -static void aux_slave_class_init(ObjectClass *klass, void *data) +static void aux_slave_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); diff --git a/hw/misc/avr_power.c b/hw/misc/avr_power.c index a5412f2cfe6..411f016c997 100644 --- a/hw/misc/avr_power.c +++ b/hw/misc/avr_power.c @@ -90,11 +90,11 @@ static void avr_mask_init(Object *dev) s->val = 0x00; } -static void avr_mask_class_init(ObjectClass *klass, void *data) +static void avr_mask_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = avr_mask_reset; + device_class_set_legacy_reset(dc, avr_mask_reset); } static const TypeInfo avr_mask_info = { diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c index af646878cd2..46d17712dde 100644 --- a/hw/misc/axp2xx.c +++ b/hw/misc/axp2xx.c @@ -225,7 +225,7 @@ static const VMStateDescription vmstate_axp2xx = { } }; -static void axp2xx_class_init(ObjectClass *oc, void *data) +static void axp2xx_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); @@ -247,7 +247,7 @@ static const TypeInfo axp2xx_info = { .abstract = true, }; -static void axp209_class_init(ObjectClass *oc, void *data) +static void axp209_class_init(ObjectClass *oc, const void *data) { AXP2xxClass *sc = AXP2XX_CLASS(oc); @@ -260,7 +260,7 @@ static const TypeInfo axp209_info = { .class_init = axp209_class_init }; -static void axp221_class_init(ObjectClass *oc, void *data) +static void axp221_class_init(ObjectClass *oc, const void *data) { AXP2xxClass *sc = AXP2XX_CLASS(oc); diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c index 91c8f7bd170..efe6f900dbf 100644 --- a/hw/misc/bcm2835_cprman.c +++ b/hw/misc/bcm2835_cprman.c @@ -131,12 +131,14 @@ static const VMStateDescription pll_vmstate = { } }; -static void pll_class_init(ObjectClass *klass, void *data) +static void pll_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pll_reset; + device_class_set_legacy_reset(dc, pll_reset); dc->vmsd = &pll_vmstate; + /* Reason: Part of BCM2835CprmanState component */ + dc->user_creatable = false; } static const TypeInfo cprman_pll_info = { @@ -235,12 +237,14 @@ static const VMStateDescription pll_channel_vmstate = { } }; -static void pll_channel_class_init(ObjectClass *klass, void *data) +static void pll_channel_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pll_channel_reset; + device_class_set_legacy_reset(dc, pll_channel_reset); dc->vmsd = &pll_channel_vmstate; + /* Reason: Part of BCM2835CprmanState component */ + dc->user_creatable = false; } static const TypeInfo cprman_pll_channel_info = { @@ -356,12 +360,14 @@ static const VMStateDescription clock_mux_vmstate = { } }; -static void clock_mux_class_init(ObjectClass *klass, void *data) +static void clock_mux_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = clock_mux_reset; + device_class_set_legacy_reset(dc, clock_mux_reset); dc->vmsd = &clock_mux_vmstate; + /* Reason: Part of BCM2835CprmanState component */ + dc->user_creatable = false; } static const TypeInfo cprman_clock_mux_info = { @@ -411,11 +417,13 @@ static const VMStateDescription dsi0hsck_mux_vmstate = { } }; -static void dsi0hsck_mux_class_init(ObjectClass *klass, void *data) +static void dsi0hsck_mux_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &dsi0hsck_mux_vmstate; + /* Reason: Part of BCM2835CprmanState component */ + dc->user_creatable = false; } static const TypeInfo cprman_dsi0hsck_mux_info = { @@ -778,17 +786,16 @@ static const VMStateDescription cprman_vmstate = { } }; -static Property cprman_properties[] = { +static const Property cprman_properties[] = { DEFINE_PROP_UINT32("xosc-freq-hz", BCM2835CprmanState, xosc_freq, 19200000), - DEFINE_PROP_END_OF_LIST() }; -static void cprman_class_init(ObjectClass *klass, void *data) +static void cprman_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cprman_realize; - dc->reset = cprman_reset; + device_class_set_legacy_reset(dc, cprman_reset); dc->vmsd = &cprman_vmstate; device_class_set_props(dc, cprman_properties); } diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c index 67bfc3bd719..603eaaa710d 100644 --- a/hw/misc/bcm2835_mbox.c +++ b/hw/misc/bcm2835_mbox.c @@ -314,12 +314,12 @@ static void bcm2835_mbox_realize(DeviceState *dev, Error **errp) bcm2835_mbox_reset(dev); } -static void bcm2835_mbox_class_init(ObjectClass *klass, void *data) +static void bcm2835_mbox_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = bcm2835_mbox_realize; - dc->reset = bcm2835_mbox_reset; + device_class_set_legacy_reset(dc, bcm2835_mbox_reset); dc->vmsd = &vmstate_bcm2835_mbox; } diff --git a/hw/misc/bcm2835_mphi.c b/hw/misc/bcm2835_mphi.c index f1eeda27862..55d79e7e876 100644 --- a/hw/misc/bcm2835_mphi.c +++ b/hw/misc/bcm2835_mphi.c @@ -166,12 +166,12 @@ const VMStateDescription vmstate_mphi_state = { } }; -static void mphi_class_init(ObjectClass *klass, void *data) +static void mphi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mphi_realize; - dc->reset = mphi_reset; + device_class_set_legacy_reset(dc, mphi_reset); dc->vmsd = &vmstate_mphi_state; } diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c index 1649da86689..3ec7abad0e0 100644 --- a/hw/misc/bcm2835_powermgt.c +++ b/hw/misc/bcm2835_powermgt.c @@ -13,7 +13,7 @@ #include "qemu/module.h" #include "hw/misc/bcm2835_powermgt.h" #include "migration/vmstate.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #define PASSWORD 0x5a000000 #define PASSWORD_MASK 0xff000000 @@ -136,11 +136,11 @@ static void bcm2835_powermgt_reset(DeviceState *dev) s->wdog = 0x00000000; } -static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data) +static void bcm2835_powermgt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_powermgt_reset; + device_class_set_legacy_reset(dc, bcm2835_powermgt_reset); dc->vmsd = &vmstate_bcm2835_powermgt; } diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index 8ca3128f29b..a21c6a541c0 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -13,7 +13,7 @@ #include "hw/irq.h" #include "hw/misc/bcm2835_mbox_defs.h" #include "hw/arm/raspberrypi-fw-defs.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -551,13 +551,12 @@ static void bcm2835_property_realize(DeviceState *dev, Error **errp) bcm2835_property_reset(dev); } -static Property bcm2835_property_props[] = { +static const Property bcm2835_property_props[] = { DEFINE_PROP_UINT32("board-rev", BCM2835PropertyState, board_rev, 0), DEFINE_PROP_STRING("command-line", BCM2835PropertyState, command_line), - DEFINE_PROP_END_OF_LIST() }; -static void bcm2835_property_class_init(ObjectClass *klass, void *data) +static void bcm2835_property_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c index 10e741b11d1..e4d2c224c88 100644 --- a/hw/misc/bcm2835_rng.c +++ b/hw/misc/bcm2835_rng.c @@ -123,11 +123,11 @@ static void bcm2835_rng_reset(DeviceState *dev) s->rng_status = 0; } -static void bcm2835_rng_class_init(ObjectClass *klass, void *data) +static void bcm2835_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_rng_reset; + device_class_set_legacy_reset(dc, bcm2835_rng_reset); dc->vmsd = &vmstate_bcm2835_rng; } diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c index 0c49c088a79..33bfc91c7aa 100644 --- a/hw/misc/bcm2835_thermal.c +++ b/hw/misc/bcm2835_thermal.c @@ -113,12 +113,12 @@ static const VMStateDescription bcm2835_thermal_vmstate = { } }; -static void bcm2835_thermal_class_init(ObjectClass *klass, void *data) +static void bcm2835_thermal_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = bcm2835_thermal_realize; - dc->reset = bcm2835_thermal_reset; + device_class_set_legacy_reset(dc, bcm2835_thermal_reset); dc->vmsd = &bcm2835_thermal_vmstate; } diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c deleted file mode 100644 index 653e8ddcd5b..00000000000 --- a/hw/misc/cbus.c +++ /dev/null @@ -1,619 +0,0 @@ -/* - * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma / - * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms. - * Based on reverse-engineering of a linux driver. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "hw/misc/cbus.h" -#include "sysemu/runstate.h" - -//#define DEBUG - -typedef struct { - void *opaque; - void (*io)(void *opaque, int rw, int reg, uint16_t *val); - int addr; -} CBusSlave; - -typedef struct { - CBus cbus; - - int sel; - int dat; - int clk; - int bit; - int dir; - uint16_t val; - qemu_irq dat_out; - - int addr; - int reg; - int rw; - enum { - cbus_address, - cbus_value, - } cycle; - - CBusSlave *slave[8]; -} CBusPriv; - -static void cbus_io(CBusPriv *s) -{ - if (s->slave[s->addr]) - s->slave[s->addr]->io(s->slave[s->addr]->opaque, - s->rw, s->reg, &s->val); - else - hw_error("%s: bad slave address %i\n", __func__, s->addr); -} - -static void cbus_cycle(CBusPriv *s) -{ - switch (s->cycle) { - case cbus_address: - s->addr = (s->val >> 6) & 7; - s->rw = (s->val >> 5) & 1; - s->reg = (s->val >> 0) & 0x1f; - - s->cycle = cbus_value; - s->bit = 15; - s->dir = !s->rw; - s->val = 0; - - if (s->rw) - cbus_io(s); - break; - - case cbus_value: - if (!s->rw) - cbus_io(s); - - s->cycle = cbus_address; - s->bit = 8; - s->dir = 1; - s->val = 0; - break; - } -} - -static void cbus_clk(void *opaque, int line, int level) -{ - CBusPriv *s = (CBusPriv *) opaque; - - if (!s->sel && level && !s->clk) { - if (s->dir) - s->val |= s->dat << (s->bit --); - else - qemu_set_irq(s->dat_out, (s->val >> (s->bit --)) & 1); - - if (s->bit < 0) - cbus_cycle(s); - } - - s->clk = level; -} - -static void cbus_dat(void *opaque, int line, int level) -{ - CBusPriv *s = (CBusPriv *) opaque; - - s->dat = level; -} - -static void cbus_sel(void *opaque, int line, int level) -{ - CBusPriv *s = (CBusPriv *) opaque; - - if (!level) { - s->dir = 1; - s->bit = 8; - s->val = 0; - } - - s->sel = level; -} - -CBus *cbus_init(qemu_irq dat) -{ - CBusPriv *s = g_malloc0(sizeof(*s)); - - s->dat_out = dat; - s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0); - s->cbus.dat = qemu_allocate_irq(cbus_dat, s, 0); - s->cbus.sel = qemu_allocate_irq(cbus_sel, s, 0); - - s->sel = 1; - s->clk = 0; - s->dat = 0; - - return &s->cbus; -} - -void cbus_attach(CBus *bus, void *slave_opaque) -{ - CBusSlave *slave = (CBusSlave *) slave_opaque; - CBusPriv *s = (CBusPriv *) bus; - - s->slave[slave->addr] = slave; -} - -/* Retu/Vilma */ -typedef struct { - uint16_t irqst; - uint16_t irqen; - uint16_t cc[2]; - int channel; - uint16_t result[16]; - uint16_t sample; - uint16_t status; - - struct { - uint16_t cal; - } rtc; - - int is_vilma; - qemu_irq irq; - CBusSlave cbus; -} CBusRetu; - -static void retu_interrupt_update(CBusRetu *s) -{ - qemu_set_irq(s->irq, s->irqst & ~s->irqen); -} - -#define RETU_REG_ASICR 0x00 /* (RO) ASIC ID & revision */ -#define RETU_REG_IDR 0x01 /* (T) Interrupt ID */ -#define RETU_REG_IMR 0x02 /* (RW) Interrupt mask */ -#define RETU_REG_RTCDSR 0x03 /* (RW) RTC seconds register */ -#define RETU_REG_RTCHMR 0x04 /* (RO) RTC hours and minutes reg */ -#define RETU_REG_RTCHMAR 0x05 /* (RW) RTC hours and minutes set reg */ -#define RETU_REG_RTCCALR 0x06 /* (RW) RTC calibration register */ -#define RETU_REG_ADCR 0x08 /* (RW) ADC result register */ -#define RETU_REG_ADCSCR 0x09 /* (RW) ADC sample control register */ -#define RETU_REG_AFCR 0x0a /* (RW) AFC register */ -#define RETU_REG_ANTIFR 0x0b /* (RW) AntiF register */ -#define RETU_REG_CALIBR 0x0c /* (RW) CalibR register*/ -#define RETU_REG_CCR1 0x0d /* (RW) Common control register 1 */ -#define RETU_REG_CCR2 0x0e /* (RW) Common control register 2 */ -#define RETU_REG_RCTRL_CLR 0x0f /* (T) Regulator clear register */ -#define RETU_REG_RCTRL_SET 0x10 /* (T) Regulator set register */ -#define RETU_REG_TXCR 0x11 /* (RW) TxC register */ -#define RETU_REG_STATUS 0x16 /* (RO) Status register */ -#define RETU_REG_WATCHDOG 0x17 /* (RW) Watchdog register */ -#define RETU_REG_AUDTXR 0x18 /* (RW) Audio Codec Tx register */ -#define RETU_REG_AUDPAR 0x19 /* (RW) AudioPA register */ -#define RETU_REG_AUDRXR1 0x1a /* (RW) Audio receive register 1 */ -#define RETU_REG_AUDRXR2 0x1b /* (RW) Audio receive register 2 */ -#define RETU_REG_SGR1 0x1c /* (RW) */ -#define RETU_REG_SCR1 0x1d /* (RW) */ -#define RETU_REG_SGR2 0x1e /* (RW) */ -#define RETU_REG_SCR2 0x1f /* (RW) */ - -/* Retu Interrupt sources */ -enum { - retu_int_pwr = 0, /* Power button */ - retu_int_char = 1, /* Charger */ - retu_int_rtcs = 2, /* Seconds */ - retu_int_rtcm = 3, /* Minutes */ - retu_int_rtcd = 4, /* Days */ - retu_int_rtca = 5, /* Alarm */ - retu_int_hook = 6, /* Hook */ - retu_int_head = 7, /* Headset */ - retu_int_adcs = 8, /* ADC sample */ -}; - -/* Retu ADC channel wiring */ -enum { - retu_adc_bsi = 1, /* BSI */ - retu_adc_batt_temp = 2, /* Battery temperature */ - retu_adc_chg_volt = 3, /* Charger voltage */ - retu_adc_head_det = 4, /* Headset detection */ - retu_adc_hook_det = 5, /* Hook detection */ - retu_adc_rf_gp = 6, /* RF GP */ - retu_adc_tx_det = 7, /* Wideband Tx detection */ - retu_adc_batt_volt = 8, /* Battery voltage */ - retu_adc_sens = 10, /* Light sensor */ - retu_adc_sens_temp = 11, /* Light sensor temperature */ - retu_adc_bbatt_volt = 12, /* Backup battery voltage */ - retu_adc_self_temp = 13, /* RETU temperature */ -}; - -static inline uint16_t retu_read(CBusRetu *s, int reg) -{ -#ifdef DEBUG - printf("RETU read at %02x\n", reg); -#endif - - switch (reg) { - case RETU_REG_ASICR: - return 0x0215 | (s->is_vilma << 7); - - case RETU_REG_IDR: /* TODO: Or is this ffs(s->irqst)? */ - return s->irqst; - - case RETU_REG_IMR: - return s->irqen; - - case RETU_REG_RTCDSR: - case RETU_REG_RTCHMR: - case RETU_REG_RTCHMAR: - /* TODO */ - return 0x0000; - - case RETU_REG_RTCCALR: - return s->rtc.cal; - - case RETU_REG_ADCR: - return (s->channel << 10) | s->result[s->channel]; - case RETU_REG_ADCSCR: - return s->sample; - - case RETU_REG_AFCR: - case RETU_REG_ANTIFR: - case RETU_REG_CALIBR: - /* TODO */ - return 0x0000; - - case RETU_REG_CCR1: - return s->cc[0]; - case RETU_REG_CCR2: - return s->cc[1]; - - case RETU_REG_RCTRL_CLR: - case RETU_REG_RCTRL_SET: - case RETU_REG_TXCR: - /* TODO */ - return 0x0000; - - case RETU_REG_STATUS: - return s->status; - - case RETU_REG_WATCHDOG: - case RETU_REG_AUDTXR: - case RETU_REG_AUDPAR: - case RETU_REG_AUDRXR1: - case RETU_REG_AUDRXR2: - case RETU_REG_SGR1: - case RETU_REG_SCR1: - case RETU_REG_SGR2: - case RETU_REG_SCR2: - /* TODO */ - return 0x0000; - - default: - hw_error("%s: bad register %02x\n", __func__, reg); - } -} - -static inline void retu_write(CBusRetu *s, int reg, uint16_t val) -{ -#ifdef DEBUG - printf("RETU write of %04x at %02x\n", val, reg); -#endif - - switch (reg) { - case RETU_REG_IDR: - s->irqst ^= val; - retu_interrupt_update(s); - break; - - case RETU_REG_IMR: - s->irqen = val; - retu_interrupt_update(s); - break; - - case RETU_REG_RTCDSR: - case RETU_REG_RTCHMAR: - /* TODO */ - break; - - case RETU_REG_RTCCALR: - s->rtc.cal = val; - break; - - case RETU_REG_ADCR: - s->channel = (val >> 10) & 0xf; - s->irqst |= 1 << retu_int_adcs; - retu_interrupt_update(s); - break; - case RETU_REG_ADCSCR: - s->sample &= ~val; - break; - - case RETU_REG_AFCR: - case RETU_REG_ANTIFR: - case RETU_REG_CALIBR: - - case RETU_REG_CCR1: - s->cc[0] = val; - break; - case RETU_REG_CCR2: - s->cc[1] = val; - break; - - case RETU_REG_RCTRL_CLR: - case RETU_REG_RCTRL_SET: - /* TODO */ - break; - - case RETU_REG_WATCHDOG: - if (val == 0 && (s->cc[0] & 2)) - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); - break; - - case RETU_REG_TXCR: - case RETU_REG_AUDTXR: - case RETU_REG_AUDPAR: - case RETU_REG_AUDRXR1: - case RETU_REG_AUDRXR2: - case RETU_REG_SGR1: - case RETU_REG_SCR1: - case RETU_REG_SGR2: - case RETU_REG_SCR2: - /* TODO */ - break; - - default: - hw_error("%s: bad register %02x\n", __func__, reg); - } -} - -static void retu_io(void *opaque, int rw, int reg, uint16_t *val) -{ - CBusRetu *s = (CBusRetu *) opaque; - - if (rw) - *val = retu_read(s, reg); - else - retu_write(s, reg, *val); -} - -void *retu_init(qemu_irq irq, int vilma) -{ - CBusRetu *s = g_malloc0(sizeof(*s)); - - s->irq = irq; - s->irqen = 0xffff; - s->irqst = 0x0000; - s->status = 0x0020; - s->is_vilma = !!vilma; - s->rtc.cal = 0x01; - s->result[retu_adc_bsi] = 0x3c2; - s->result[retu_adc_batt_temp] = 0x0fc; - s->result[retu_adc_chg_volt] = 0x165; - s->result[retu_adc_head_det] = 123; - s->result[retu_adc_hook_det] = 1023; - s->result[retu_adc_rf_gp] = 0x11; - s->result[retu_adc_tx_det] = 0x11; - s->result[retu_adc_batt_volt] = 0x250; - s->result[retu_adc_sens] = 2; - s->result[retu_adc_sens_temp] = 0x11; - s->result[retu_adc_bbatt_volt] = 0x3d0; - s->result[retu_adc_self_temp] = 0x330; - - s->cbus.opaque = s; - s->cbus.io = retu_io; - s->cbus.addr = 1; - - return &s->cbus; -} - -void retu_key_event(void *retu, int state) -{ - CBusSlave *slave = (CBusSlave *) retu; - CBusRetu *s = (CBusRetu *) slave->opaque; - - s->irqst |= 1 << retu_int_pwr; - retu_interrupt_update(s); - - if (state) - s->status &= ~(1 << 5); - else - s->status |= 1 << 5; -} - -#if 0 -static void retu_head_event(void *retu, int state) -{ - CBusSlave *slave = (CBusSlave *) retu; - CBusRetu *s = (CBusRetu *) slave->opaque; - - if ((s->cc[0] & 0x500) == 0x500) { /* TODO: Which bits? */ - /* TODO: reissue the interrupt every 100ms or so. */ - s->irqst |= 1 << retu_int_head; - retu_interrupt_update(s); - } - - if (state) - s->result[retu_adc_head_det] = 50; - else - s->result[retu_adc_head_det] = 123; -} - -static void retu_hook_event(void *retu, int state) -{ - CBusSlave *slave = (CBusSlave *) retu; - CBusRetu *s = (CBusRetu *) slave->opaque; - - if ((s->cc[0] & 0x500) == 0x500) { - /* TODO: reissue the interrupt every 100ms or so. */ - s->irqst |= 1 << retu_int_hook; - retu_interrupt_update(s); - } - - if (state) - s->result[retu_adc_hook_det] = 50; - else - s->result[retu_adc_hook_det] = 123; -} -#endif - -/* Tahvo/Betty */ -typedef struct { - uint16_t irqst; - uint16_t irqen; - uint8_t charger; - uint8_t backlight; - uint16_t usbr; - uint16_t power; - - int is_betty; - qemu_irq irq; - CBusSlave cbus; -} CBusTahvo; - -static void tahvo_interrupt_update(CBusTahvo *s) -{ - qemu_set_irq(s->irq, s->irqst & ~s->irqen); -} - -#define TAHVO_REG_ASICR 0x00 /* (RO) ASIC ID & revision */ -#define TAHVO_REG_IDR 0x01 /* (T) Interrupt ID */ -#define TAHVO_REG_IDSR 0x02 /* (RO) Interrupt status */ -#define TAHVO_REG_IMR 0x03 /* (RW) Interrupt mask */ -#define TAHVO_REG_CHAPWMR 0x04 /* (RW) Charger PWM */ -#define TAHVO_REG_LEDPWMR 0x05 /* (RW) LED PWM */ -#define TAHVO_REG_USBR 0x06 /* (RW) USB control */ -#define TAHVO_REG_RCR 0x07 /* (RW) Some kind of power management */ -#define TAHVO_REG_CCR1 0x08 /* (RW) Common control register 1 */ -#define TAHVO_REG_CCR2 0x09 /* (RW) Common control register 2 */ -#define TAHVO_REG_TESTR1 0x0a /* (RW) Test register 1 */ -#define TAHVO_REG_TESTR2 0x0b /* (RW) Test register 2 */ -#define TAHVO_REG_NOPR 0x0c /* (RW) Number of periods */ -#define TAHVO_REG_FRR 0x0d /* (RO) FR */ - -static inline uint16_t tahvo_read(CBusTahvo *s, int reg) -{ -#ifdef DEBUG - printf("TAHVO read at %02x\n", reg); -#endif - - switch (reg) { - case TAHVO_REG_ASICR: - return 0x0021 | (s->is_betty ? 0x0b00 : 0x0300); /* 22 in N810 */ - - case TAHVO_REG_IDR: - case TAHVO_REG_IDSR: /* XXX: what does this do? */ - return s->irqst; - - case TAHVO_REG_IMR: - return s->irqen; - - case TAHVO_REG_CHAPWMR: - return s->charger; - - case TAHVO_REG_LEDPWMR: - return s->backlight; - - case TAHVO_REG_USBR: - return s->usbr; - - case TAHVO_REG_RCR: - return s->power; - - case TAHVO_REG_CCR1: - case TAHVO_REG_CCR2: - case TAHVO_REG_TESTR1: - case TAHVO_REG_TESTR2: - case TAHVO_REG_NOPR: - case TAHVO_REG_FRR: - return 0x0000; - - default: - hw_error("%s: bad register %02x\n", __func__, reg); - } -} - -static inline void tahvo_write(CBusTahvo *s, int reg, uint16_t val) -{ -#ifdef DEBUG - printf("TAHVO write of %04x at %02x\n", val, reg); -#endif - - switch (reg) { - case TAHVO_REG_IDR: - s->irqst ^= val; - tahvo_interrupt_update(s); - break; - - case TAHVO_REG_IMR: - s->irqen = val; - tahvo_interrupt_update(s); - break; - - case TAHVO_REG_CHAPWMR: - s->charger = val; - break; - - case TAHVO_REG_LEDPWMR: - if (s->backlight != (val & 0x7f)) { - s->backlight = val & 0x7f; - printf("%s: LCD backlight now at %i / 127\n", - __func__, s->backlight); - } - break; - - case TAHVO_REG_USBR: - s->usbr = val; - break; - - case TAHVO_REG_RCR: - s->power = val; - break; - - case TAHVO_REG_CCR1: - case TAHVO_REG_CCR2: - case TAHVO_REG_TESTR1: - case TAHVO_REG_TESTR2: - case TAHVO_REG_NOPR: - case TAHVO_REG_FRR: - break; - - default: - hw_error("%s: bad register %02x\n", __func__, reg); - } -} - -static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val) -{ - CBusTahvo *s = (CBusTahvo *) opaque; - - if (rw) - *val = tahvo_read(s, reg); - else - tahvo_write(s, reg, *val); -} - -void *tahvo_init(qemu_irq irq, int betty) -{ - CBusTahvo *s = g_malloc0(sizeof(*s)); - - s->irq = irq; - s->irqen = 0xffff; - s->irqst = 0x0000; - s->is_betty = !!betty; - - s->cbus.opaque = s; - s->cbus.io = tahvo_io; - s->cbus.addr = 2; - - return &s->cbus; -} diff --git a/hw/misc/debugexit.c b/hw/misc/debugexit.c index c5c562fd935..04a9fc31223 100644 --- a/hw/misc/debugexit.c +++ b/hw/misc/debugexit.c @@ -12,7 +12,7 @@ #include "hw/qdev-properties.h" #include "qemu/module.h" #include "qom/object.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #define TYPE_ISA_DEBUG_EXIT_DEVICE "isa-debug-exit" OBJECT_DECLARE_SIMPLE_TYPE(ISADebugExitState, ISA_DEBUG_EXIT_DEVICE) @@ -56,13 +56,12 @@ static void debug_exit_realizefn(DeviceState *d, Error **errp) isa->iobase, &isa->io); } -static Property debug_exit_properties[] = { +static const Property debug_exit_properties[] = { DEFINE_PROP_UINT32("iobase", ISADebugExitState, iobase, 0x501), DEFINE_PROP_UINT32("iosize", ISADebugExitState, iosize, 0x02), - DEFINE_PROP_END_OF_LIST(), }; -static void debug_exit_class_initfn(ObjectClass *klass, void *data) +static void debug_exit_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/djmemc.c b/hw/misc/djmemc.c index 96d5efb5e3a..c5b09f551b0 100644 --- a/hw/misc/djmemc.c +++ b/hw/misc/djmemc.c @@ -113,7 +113,7 @@ static const VMStateDescription vmstate_djmemc = { } }; -static void djmemc_class_init(ObjectClass *oc, void *data) +static void djmemc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c index 5a14a489991..81fc536131c 100644 --- a/hw/misc/eccmemctl.c +++ b/hw/misc/eccmemctl.c @@ -325,17 +325,16 @@ static void ecc_realize(DeviceState *dev, Error **errp) } } -static Property ecc_properties[] = { +static const Property ecc_properties[] = { DEFINE_PROP_UINT32("version", ECCState, version, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void ecc_class_init(ObjectClass *klass, void *data) +static void ecc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = ecc_realize; - dc->reset = ecc_reset; + device_class_set_legacy_reset(dc, ecc_reset); dc->vmsd = &vmstate_ecc; device_class_set_props(dc, ecc_properties); } diff --git a/hw/misc/edu.c b/hw/misc/edu.c index 504178b4a22..cece633e113 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -415,7 +415,7 @@ static void edu_instance_init(Object *obj) &edu->dma_mask, OBJ_PROP_FLAG_READWRITE); } -static void edu_class_init(ObjectClass *class, void *data) +static void edu_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); PCIDeviceClass *k = PCI_DEVICE_CLASS(class); @@ -429,21 +429,18 @@ static void edu_class_init(ObjectClass *class, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); } -static void pci_edu_register_types(void) -{ - static InterfaceInfo interfaces[] = { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }; - static const TypeInfo edu_info = { +static const TypeInfo edu_types[] = { + { .name = TYPE_PCI_EDU_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(EduState), .instance_init = edu_instance_init, .class_init = edu_class_init, - .interfaces = interfaces, - }; + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, + } +}; - type_register_static(&edu_info); -} -type_init(pci_edu_register_types) +DEFINE_TYPES(edu_types) diff --git a/hw/misc/empty_slot.c b/hw/misc/empty_slot.c index 37b0ddfb02a..239d7603207 100644 --- a/hw/misc/empty_slot.c +++ b/hw/misc/empty_slot.c @@ -79,13 +79,12 @@ static void empty_slot_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } -static Property empty_slot_properties[] = { +static const Property empty_slot_properties[] = { DEFINE_PROP_UINT64("size", EmptySlot, size, 0), DEFINE_PROP_STRING("name", EmptySlot, name), - DEFINE_PROP_END_OF_LIST(), }; -static void empty_slot_class_init(ObjectClass *klass, void *data) +static void empty_slot_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c index 4566a426faa..fdf5bdd6034 100644 --- a/hw/misc/exynos4210_clk.c +++ b/hw/misc/exynos4210_clk.c @@ -141,11 +141,11 @@ static const VMStateDescription exynos4210_clk_vmstate = { } }; -static void exynos4210_clk_class_init(ObjectClass *klass, void *data) +static void exynos4210_clk_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_clk_reset; + device_class_set_legacy_reset(dc, exynos4210_clk_reset); dc->vmsd = &exynos4210_clk_vmstate; } diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c index 7e28e790d7c..a86ec9aba85 100644 --- a/hw/misc/exynos4210_pmu.c +++ b/hw/misc/exynos4210_pmu.c @@ -28,7 +28,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qom/object.h" #ifndef DEBUG_PMU @@ -498,11 +498,11 @@ static const VMStateDescription exynos4210_pmu_vmstate = { } }; -static void exynos4210_pmu_class_init(ObjectClass *klass, void *data) +static void exynos4210_pmu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_pmu_reset; + device_class_set_legacy_reset(dc, exynos4210_pmu_reset); dc->vmsd = &exynos4210_pmu_vmstate; } diff --git a/hw/misc/exynos4210_rng.c b/hw/misc/exynos4210_rng.c index 674d8eece5f..2d0ebc457bc 100644 --- a/hw/misc/exynos4210_rng.c +++ b/hw/misc/exynos4210_rng.c @@ -255,11 +255,11 @@ static const VMStateDescription exynos4210_rng_vmstate = { } }; -static void exynos4210_rng_class_init(ObjectClass *klass, void *data) +static void exynos4210_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_rng_reset; + device_class_set_legacy_reset(dc, exynos4210_rng_reset); dc->vmsd = &exynos4210_rng_vmstate; } diff --git a/hw/misc/grlib_ahb_apb_pnp.c b/hw/misc/grlib_ahb_apb_pnp.c index 5b05f158592..cdca00ad542 100644 --- a/hw/misc/grlib_ahb_apb_pnp.c +++ b/hw/misc/grlib_ahb_apb_pnp.c @@ -168,7 +168,7 @@ static void grlib_ahb_pnp_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &ahb_pnp->iomem); } -static void grlib_ahb_pnp_class_init(ObjectClass *klass, void *data) +static void grlib_ahb_pnp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -280,7 +280,7 @@ static void grlib_apb_pnp_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &apb_pnp->iomem); } -static void grlib_apb_pnp_class_init(ObjectClass *klass, void *data) +static void grlib_apb_pnp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/i2c-echo.c b/hw/misc/i2c-echo.c index 5ae3d0817ea..2bb99ec0dbf 100644 --- a/hw/misc/i2c-echo.c +++ b/hw/misc/i2c-echo.c @@ -13,6 +13,7 @@ #include "qemu/main-loop.h" #include "block/aio.h" #include "hw/i2c/i2c.h" +#include "trace.h" #define TYPE_I2C_ECHO "i2c-echo" OBJECT_DECLARE_SIMPLE_TYPE(I2CEchoState, I2C_ECHO) @@ -80,11 +81,13 @@ static int i2c_echo_event(I2CSlave *s, enum i2c_event event) case I2C_START_RECV: state->pos = 0; + trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_START_RECV"); break; case I2C_START_SEND: state->pos = 0; + trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_START_SEND"); break; case I2C_FINISH: @@ -92,12 +95,15 @@ static int i2c_echo_event(I2CSlave *s, enum i2c_event event) state->state = I2C_ECHO_STATE_START_SEND; i2c_bus_master(state->bus, state->bh); + trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_FINISH"); break; case I2C_NACK: + trace_i2c_echo_event(DEVICE(s)->canonical_path, "I2C_NACK"); break; default: + trace_i2c_echo_event(DEVICE(s)->canonical_path, "UNHANDLED"); return -1; } @@ -112,6 +118,7 @@ static uint8_t i2c_echo_recv(I2CSlave *s) return 0xff; } + trace_i2c_echo_recv(DEVICE(s)->canonical_path, state->data[state->pos]); return state->data[state->pos++]; } @@ -119,6 +126,7 @@ static int i2c_echo_send(I2CSlave *s, uint8_t data) { I2CEchoState *state = I2C_ECHO(s); + trace_i2c_echo_send(DEVICE(s)->canonical_path, data); if (state->pos > 2) { return -1; } @@ -135,11 +143,9 @@ static void i2c_echo_realize(DeviceState *dev, Error **errp) state->bus = I2C_BUS(bus); state->bh = qemu_bh_new(i2c_echo_bh, state); - - return; } -static void i2c_echo_class_init(ObjectClass *oc, void *data) +static void i2c_echo_class_init(ObjectClass *oc, const void *data) { I2CSlaveClass *sc = I2C_SLAVE_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c index faa726a86ac..a6665d55353 100644 --- a/hw/misc/imx25_ccm.c +++ b/hw/misc/imx25_ccm.c @@ -292,12 +292,12 @@ static void imx25_ccm_init(Object *obj) sysbus_init_mmio(sd, &s->iomem); } -static void imx25_ccm_class_init(ObjectClass *klass, void *data) +static void imx25_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IMXCCMClass *ccm = IMX_CCM_CLASS(klass); - dc->reset = imx25_ccm_reset; + device_class_set_legacy_reset(dc, imx25_ccm_reset); dc->vmsd = &vmstate_imx25_ccm; dc->desc = "i.MX25 Clock Control Module"; diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c index 125d4fceebb..339458e8596 100644 --- a/hw/misc/imx31_ccm.c +++ b/hw/misc/imx31_ccm.c @@ -319,12 +319,12 @@ static void imx31_ccm_init(Object *obj) sysbus_init_mmio(sd, &s->iomem); } -static void imx31_ccm_class_init(ObjectClass *klass, void *data) +static void imx31_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IMXCCMClass *ccm = IMX_CCM_CLASS(klass); - dc->reset = imx31_ccm_reset; + device_class_set_legacy_reset(dc, imx31_ccm_reset); dc->vmsd = &vmstate_imx31_ccm; dc->desc = "i.MX31 Clock Control Module"; diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c index b1def7f05b9..a10b67d3961 100644 --- a/hw/misc/imx6_ccm.c +++ b/hw/misc/imx6_ccm.c @@ -301,7 +301,6 @@ static uint64_t imx6_analog_get_periph_clk(IMX6CCMState *dev) default: /* We should never get there */ g_assert_not_reached(); - break; } trace_imx6_analog_get_periph_clk(freq); @@ -742,12 +741,12 @@ static void imx6_ccm_init(Object *obj) sysbus_init_mmio(sd, &s->container); } -static void imx6_ccm_class_init(ObjectClass *klass, void *data) +static void imx6_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IMXCCMClass *ccm = IMX_CCM_CLASS(klass); - dc->reset = imx6_ccm_reset; + device_class_set_legacy_reset(dc, imx6_ccm_reset); dc->vmsd = &vmstate_imx6_ccm; dc->desc = "i.MX6 Clock Control Module"; diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index 3766bdf5619..8d2c4175e4e 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -17,18 +17,7 @@ #include "qemu/module.h" #include "target/arm/arm-powerctl.h" #include "hw/core/cpu.h" - -#ifndef DEBUG_IMX6_SRC -#define DEBUG_IMX6_SRC 0 -#endif - -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_IMX6_SRC) { \ - fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX6_SRC, \ - __func__, ##args); \ - } \ - } while (0) +#include "trace.h" static const char *imx6_src_reg_name(uint32_t reg) { @@ -87,7 +76,7 @@ static void imx6_src_reset(DeviceState *dev) { IMX6SRCState *s = IMX6_SRC(dev); - DPRINTF("\n"); + trace_imx6_src_reset(); memset(s->regs, 0, sizeof(s->regs)); @@ -111,7 +100,7 @@ static uint64_t imx6_src_read(void *opaque, hwaddr offset, unsigned size) } - DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_src_reg_name(index), value); + trace_imx6_src_read(imx6_src_reg_name(index), value); return value; } @@ -134,8 +123,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) assert(bql_locked()); s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0); - DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", - imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]); + trace_imx6_clear_reset_bit(imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]); g_free(ri); } @@ -173,8 +161,7 @@ static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value, return; } - DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_src_reg_name(index), - (uint32_t)current_value); + trace_imx6_src_write(imx6_src_reg_name(index), value); change_mask = s->regs[index] ^ (uint32_t)current_value; @@ -286,12 +273,12 @@ static void imx6_src_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } -static void imx6_src_class_init(ObjectClass *klass, void *data) +static void imx6_src_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx6_src_realize; - dc->reset = imx6_src_reset; + device_class_set_legacy_reset(dc, imx6_src_reset); dc->vmsd = &vmstate_imx6_src; dc->desc = "i.MX6 System Reset Controller"; } diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c index 0ac49ea34b3..7f3ae61710c 100644 --- a/hw/misc/imx6ul_ccm.c +++ b/hw/misc/imx6ul_ccm.c @@ -904,12 +904,12 @@ static void imx6ul_ccm_init(Object *obj) sysbus_init_mmio(sd, &s->container); } -static void imx6ul_ccm_class_init(ObjectClass *klass, void *data) +static void imx6ul_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IMXCCMClass *ccm = IMX_CCM_CLASS(klass); - dc->reset = imx6ul_ccm_reset; + device_class_set_legacy_reset(dc, imx6ul_ccm_reset); dc->vmsd = &vmstate_imx6ul_ccm; dc->desc = "i.MX6UL Clock Control Module"; diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c index 88354f020ee..c061a584e46 100644 --- a/hw/misc/imx7_ccm.c +++ b/hw/misc/imx7_ccm.c @@ -262,12 +262,12 @@ static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) return freq; } -static void imx7_ccm_class_init(ObjectClass *klass, void *data) +static void imx7_ccm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); IMXCCMClass *ccm = IMX_CCM_CLASS(klass); - dc->reset = imx7_ccm_reset; + device_class_set_legacy_reset(dc, imx7_ccm_reset); dc->vmsd = &vmstate_imx7_ccm; dc->desc = "i.MX7 Clock Control Module"; @@ -293,11 +293,11 @@ static const VMStateDescription vmstate_imx7_analog = { }, }; -static void imx7_analog_class_init(ObjectClass *klass, void *data) +static void imx7_analog_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = imx7_analog_reset; + device_class_set_legacy_reset(dc, imx7_analog_reset); dc->vmsd = &vmstate_imx7_analog; dc->desc = "i.MX7 Analog Module"; } diff --git a/hw/misc/imx7_gpr.c b/hw/misc/imx7_gpr.c index b03341a2eba..e12b4962730 100644 --- a/hw/misc/imx7_gpr.c +++ b/hw/misc/imx7_gpr.c @@ -102,7 +102,7 @@ static void imx7_gpr_init(Object *obj) sysbus_init_mmio(sd, &s->mmio); } -static void imx7_gpr_class_init(ObjectClass *klass, void *data) +static void imx7_gpr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c index edb2df215a6..6a8733d23d7 100644 --- a/hw/misc/imx7_snvs.c +++ b/hw/misc/imx7_snvs.c @@ -19,9 +19,9 @@ #include "hw/misc/imx7_snvs.h" #include "qemu/cutils.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/rtc.h" +#include "system/runstate.h" #include "trace.h" #define RTC_FREQ 32768ULL @@ -143,11 +143,11 @@ static void imx7_snvs_init(Object *obj) qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; } -static void imx7_snvs_class_init(ObjectClass *klass, void *data) +static void imx7_snvs_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = imx7_snvs_reset; + device_class_set_legacy_reset(dc, imx7_snvs_reset); dc->vmsd = &vmstate_imx7_snvs; dc->desc = "i.MX7 Secure Non-Volatile Storage Module"; } diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c index d19f0450d4a..df0b0a69057 100644 --- a/hw/misc/imx7_src.c +++ b/hw/misc/imx7_src.c @@ -251,12 +251,12 @@ static void imx7_src_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } -static void imx7_src_class_init(ObjectClass *klass, void *data) +static void imx7_src_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx7_src_realize; - dc->reset = imx7_src_reset; + device_class_set_legacy_reset(dc, imx7_src_reset); dc->vmsd = &vmstate_imx7_src; dc->desc = "i.MX6 System Reset Controller"; } diff --git a/hw/misc/imx8mp_analog.c b/hw/misc/imx8mp_analog.c new file mode 100644 index 00000000000..23ffae84f89 --- /dev/null +++ b/hw/misc/imx8mp_analog.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2025 Bernhard Beschow + * + * i.MX 8M Plus ANALOG IP block emulation code + * + * Based on hw/misc/imx7_ccm.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" + +#include "hw/misc/imx8mp_analog.h" +#include "migration/vmstate.h" + +#define ANALOG_PLL_LOCK BIT(31) + +static void imx8mp_analog_reset(DeviceState *dev) +{ + IMX8MPAnalogState *s = IMX8MP_ANALOG(dev); + + memset(s->analog, 0, sizeof(s->analog)); + + s->analog[ANALOG_AUDIO_PLL1_GEN_CTRL] = 0x00002010; + s->analog[ANALOG_AUDIO_PLL1_FDIV_CTL0] = 0x00145032; + s->analog[ANALOG_AUDIO_PLL1_FDIV_CTL1] = 0x00000000; + s->analog[ANALOG_AUDIO_PLL1_SSCG_CTRL] = 0x00000000; + s->analog[ANALOG_AUDIO_PLL1_MNIT_CTRL] = 0x00100103; + s->analog[ANALOG_AUDIO_PLL2_GEN_CTRL] = 0x00002010; + s->analog[ANALOG_AUDIO_PLL2_FDIV_CTL0] = 0x00145032; + s->analog[ANALOG_AUDIO_PLL2_FDIV_CTL1] = 0x00000000; + s->analog[ANALOG_AUDIO_PLL2_SSCG_CTRL] = 0x00000000; + s->analog[ANALOG_AUDIO_PLL2_MNIT_CTRL] = 0x00100103; + s->analog[ANALOG_VIDEO_PLL1_GEN_CTRL] = 0x00002010; + s->analog[ANALOG_VIDEO_PLL1_FDIV_CTL0] = 0x00145032; + s->analog[ANALOG_VIDEO_PLL1_FDIV_CTL1] = 0x00000000; + s->analog[ANALOG_VIDEO_PLL1_SSCG_CTRL] = 0x00000000; + s->analog[ANALOG_VIDEO_PLL1_MNIT_CTRL] = 0x00100103; + s->analog[ANALOG_DRAM_PLL_GEN_CTRL] = 0x00002010; + s->analog[ANALOG_DRAM_PLL_FDIV_CTL0] = 0x0012c032; + s->analog[ANALOG_DRAM_PLL_FDIV_CTL1] = 0x00000000; + s->analog[ANALOG_DRAM_PLL_SSCG_CTRL] = 0x00000000; + s->analog[ANALOG_DRAM_PLL_MNIT_CTRL] = 0x00100103; + s->analog[ANALOG_GPU_PLL_GEN_CTRL] = 0x00000810; + s->analog[ANALOG_GPU_PLL_FDIV_CTL0] = 0x000c8031; + s->analog[ANALOG_GPU_PLL_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_GPU_PLL_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_VPU_PLL_GEN_CTRL] = 0x00000810; + s->analog[ANALOG_VPU_PLL_FDIV_CTL0] = 0x0012c032; + s->analog[ANALOG_VPU_PLL_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_VPU_PLL_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_ARM_PLL_GEN_CTRL] = 0x00000810; + s->analog[ANALOG_ARM_PLL_FDIV_CTL0] = 0x000fa031; + s->analog[ANALOG_ARM_PLL_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_ARM_PLL_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_SYS_PLL1_GEN_CTRL] = 0x0aaaa810; + s->analog[ANALOG_SYS_PLL1_FDIV_CTL0] = 0x00190032; + s->analog[ANALOG_SYS_PLL1_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_SYS_PLL1_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_SYS_PLL2_GEN_CTRL] = 0x0aaaa810; + s->analog[ANALOG_SYS_PLL2_FDIV_CTL0] = 0x000fa031; + s->analog[ANALOG_SYS_PLL2_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_SYS_PLL2_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_SYS_PLL3_GEN_CTRL] = 0x00000810; + s->analog[ANALOG_SYS_PLL3_FDIV_CTL0] = 0x000fa031; + s->analog[ANALOG_SYS_PLL3_LOCKD_CTRL] = 0x0010003f; + s->analog[ANALOG_SYS_PLL3_MNIT_CTRL] = 0x00280081; + s->analog[ANALOG_OSC_MISC_CFG] = 0x00000000; + s->analog[ANALOG_ANAMIX_PLL_MNIT_CTL] = 0x00000000; + s->analog[ANALOG_DIGPROG] = 0x00824010; + + /* all PLLs need to be locked */ + s->analog[ANALOG_AUDIO_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_AUDIO_PLL2_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_VIDEO_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_DRAM_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_GPU_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_VPU_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_ARM_PLL_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_SYS_PLL1_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_SYS_PLL2_GEN_CTRL] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_SYS_PLL3_GEN_CTRL] |= ANALOG_PLL_LOCK; +} + +static uint64_t imx8mp_analog_read(void *opaque, hwaddr offset, unsigned size) +{ + IMX8MPAnalogState *s = opaque; + + return s->analog[offset >> 2]; +} + +static void imx8mp_analog_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IMX8MPAnalogState *s = opaque; + + if (offset >> 2 == ANALOG_DIGPROG) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest write to read-only ANALOG_DIGPROG register\n"); + } else { + s->analog[offset >> 2] = value; + } +} + +static const struct MemoryRegionOps imx8mp_analog_ops = { + .read = imx8mp_analog_read, + .write = imx8mp_analog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx8mp_analog_init(Object *obj) +{ + IMX8MPAnalogState *s = IMX8MP_ANALOG(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + + memory_region_init(&s->mmio.container, obj, TYPE_IMX8MP_ANALOG, 0x10000); + + memory_region_init_io(&s->mmio.analog, obj, &imx8mp_analog_ops, s, + TYPE_IMX8MP_ANALOG, sizeof(s->analog)); + memory_region_add_subregion(&s->mmio.container, 0, &s->mmio.analog); + + sysbus_init_mmio(sd, &s->mmio.container); +} + +static const VMStateDescription imx8mp_analog_vmstate = { + .name = TYPE_IMX8MP_ANALOG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(analog, IMX8MPAnalogState, ANALOG_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx8mp_analog_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_legacy_reset(dc, imx8mp_analog_reset); + dc->vmsd = &imx8mp_analog_vmstate; + dc->desc = "i.MX 8M Plus Analog Module"; +} + +static const TypeInfo imx8mp_analog_types[] = { + { + .name = TYPE_IMX8MP_ANALOG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX8MPAnalogState), + .instance_init = imx8mp_analog_init, + .class_init = imx8mp_analog_class_init, + } +}; + +DEFINE_TYPES(imx8mp_analog_types); diff --git a/hw/misc/imx8mp_ccm.c b/hw/misc/imx8mp_ccm.c new file mode 100644 index 00000000000..911911ed865 --- /dev/null +++ b/hw/misc/imx8mp_ccm.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2025 Bernhard Beschow + * + * i.MX 8M Plus CCM IP block emulation code + * + * Based on hw/misc/imx7_ccm.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" + +#include "hw/misc/imx8mp_ccm.h" +#include "migration/vmstate.h" + +#include "trace.h" + +#define CKIH_FREQ 16000000 /* 16MHz crystal input */ + +static void imx8mp_ccm_reset(DeviceState *dev) +{ + IMX8MPCCMState *s = IMX8MP_CCM(dev); + + memset(s->ccm, 0, sizeof(s->ccm)); +} + +#define CCM_INDEX(offset) (((offset) & ~(hwaddr)0xF) / sizeof(uint32_t)) +#define CCM_BITOP(offset) ((offset) & (hwaddr)0xF) + +enum { + CCM_BITOP_NONE = 0x00, + CCM_BITOP_SET = 0x04, + CCM_BITOP_CLR = 0x08, + CCM_BITOP_TOG = 0x0C, +}; + +static uint64_t imx8mp_set_clr_tog_read(void *opaque, hwaddr offset, + unsigned size) +{ + const uint32_t *mmio = opaque; + + return mmio[CCM_INDEX(offset)]; +} + +static void imx8mp_set_clr_tog_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + const uint8_t bitop = CCM_BITOP(offset); + const uint32_t index = CCM_INDEX(offset); + uint32_t *mmio = opaque; + + switch (bitop) { + case CCM_BITOP_NONE: + mmio[index] = value; + break; + case CCM_BITOP_SET: + mmio[index] |= value; + break; + case CCM_BITOP_CLR: + mmio[index] &= ~value; + break; + case CCM_BITOP_TOG: + mmio[index] ^= value; + break; + }; +} + +static const struct MemoryRegionOps imx8mp_set_clr_tog_ops = { + .read = imx8mp_set_clr_tog_read, + .write = imx8mp_set_clr_tog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx8mp_ccm_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX8MPCCMState *s = IMX8MP_CCM(obj); + + memory_region_init_io(&s->iomem, + obj, + &imx8mp_set_clr_tog_ops, + s->ccm, + TYPE_IMX8MP_CCM ".ccm", + sizeof(s->ccm)); + + sysbus_init_mmio(sd, &s->iomem); +} + +static const VMStateDescription imx8mp_ccm_vmstate = { + .name = TYPE_IMX8MP_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(ccm, IMX8MPCCMState, CCM_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t imx8mp_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + /* + * This function is "consumed" by GPT emulation code. Some clocks + * have fixed frequencies and we can provide requested frequency + * easily. However for CCM provided clocks (like IPG) each GPT + * timer can have its own clock root. + * This means we need additional information when calling this + * function to know the requester's identity. + */ + uint32_t freq = 0; + + switch (clock) { + case CLK_NONE: + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + case CLK_HIGH: + freq = CKIH_FREQ; + break; + case CLK_IPG: + case CLK_IPG_HIGH: + /* + * For now we don't have a way to figure out the device this + * function is called for. Until then the IPG derived clocks + * are left unimplemented. + */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Clock %d Not implemented\n", + TYPE_IMX8MP_CCM, __func__, clock); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX8MP_CCM, __func__, clock); + break; + } + + trace_ccm_clock_freq(clock, freq); + + return freq; +} + +static void imx8mp_ccm_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + device_class_set_legacy_reset(dc, imx8mp_ccm_reset); + dc->vmsd = &imx8mp_ccm_vmstate; + dc->desc = "i.MX 8M Plus Clock Control Module"; + + ccm->get_clock_frequency = imx8mp_ccm_get_clock_frequency; +} + +static const TypeInfo imx8mp_ccm_types[] = { + { + .name = TYPE_IMX8MP_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX8MPCCMState), + .instance_init = imx8mp_ccm_init, + .class_init = imx8mp_ccm_class_init, + }, +}; + +DEFINE_TYPES(imx8mp_ccm_types); diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index ab7775e0952..630f6cb54b5 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -254,12 +254,12 @@ static const VMStateDescription vmstate_imx_rngc = { } }; -static void imx_rngc_class_init(ObjectClass *klass, void *data) +static void imx_rngc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_rngc_realize; - dc->reset = imx_rngc_reset; + device_class_set_legacy_reset(dc, imx_rngc_reset); dc->desc = RNGC_NAME, dc->vmsd = &vmstate_imx_rngc; } diff --git a/hw/misc/iosb.c b/hw/misc/iosb.c index 31927eaedb4..96221e1ee5a 100644 --- a/hw/misc/iosb.c +++ b/hw/misc/iosb.c @@ -111,7 +111,7 @@ static const VMStateDescription vmstate_iosb = { } }; -static void iosb_class_init(ObjectClass *oc, void *data) +static void iosb_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c index f9c45f60bf3..afd9ab48df7 100644 --- a/hw/misc/iotkit-secctl.c +++ b/hw/misc/iotkit-secctl.c @@ -814,17 +814,16 @@ static const VMStateDescription iotkit_secctl_vmstate = { }, }; -static Property iotkit_secctl_props[] = { +static const Property iotkit_secctl_props[] = { DEFINE_PROP_UINT32("sse-version", IoTKitSecCtl, sse_version, 0), - DEFINE_PROP_END_OF_LIST() }; -static void iotkit_secctl_class_init(ObjectClass *klass, void *data) +static void iotkit_secctl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &iotkit_secctl_vmstate; - dc->reset = iotkit_secctl_reset; + device_class_set_legacy_reset(dc, iotkit_secctl_reset); dc->realize = iotkit_secctl_realize; device_class_set_props(dc, iotkit_secctl_props); } diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c index 45393e84ba4..d70e51ab2ec 100644 --- a/hw/misc/iotkit-sysctl.c +++ b/hw/misc/iotkit-sysctl.c @@ -20,7 +20,7 @@ #include "qemu/bitops.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" #include "qapi/error.h" #include "hw/sysbus.h" @@ -835,22 +835,21 @@ static const VMStateDescription iotkit_sysctl_vmstate = { } }; -static Property iotkit_sysctl_props[] = { +static const Property iotkit_sysctl_props[] = { DEFINE_PROP_UINT32("sse-version", IoTKitSysCtl, sse_version, 0), DEFINE_PROP_UINT32("CPUWAIT_RST", IoTKitSysCtl, cpuwait_rst, 0), DEFINE_PROP_UINT32("INITSVTOR0_RST", IoTKitSysCtl, initsvtor0_rst, 0x10000000), DEFINE_PROP_UINT32("INITSVTOR1_RST", IoTKitSysCtl, initsvtor1_rst, 0x10000000), - DEFINE_PROP_END_OF_LIST() }; -static void iotkit_sysctl_class_init(ObjectClass *klass, void *data) +static void iotkit_sysctl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &iotkit_sysctl_vmstate; - dc->reset = iotkit_sysctl_reset; + device_class_set_legacy_reset(dc, iotkit_sysctl_reset); device_class_set_props(dc, iotkit_sysctl_props); dc->realize = iotkit_sysctl_realize; } diff --git a/hw/misc/iotkit-sysinfo.c b/hw/misc/iotkit-sysinfo.c index aaa9305b2ea..57405cb7e17 100644 --- a/hw/misc/iotkit-sysinfo.c +++ b/hw/misc/iotkit-sysinfo.c @@ -131,12 +131,11 @@ static const MemoryRegionOps iotkit_sysinfo_ops = { .valid.max_access_size = 4, }; -static Property iotkit_sysinfo_props[] = { +static const Property iotkit_sysinfo_props[] = { DEFINE_PROP_UINT32("SYS_VERSION", IoTKitSysInfo, sys_version, 0), DEFINE_PROP_UINT32("SYS_CONFIG", IoTKitSysInfo, sys_config, 0), DEFINE_PROP_UINT32("sse-version", IoTKitSysInfo, sse_version, 0), DEFINE_PROP_UINT32("IIDR", IoTKitSysInfo, iidr, 0), - DEFINE_PROP_END_OF_LIST() }; static void iotkit_sysinfo_init(Object *obj) @@ -159,7 +158,7 @@ static void iotkit_sysinfo_realize(DeviceState *dev, Error **errp) } } -static void iotkit_sysinfo_class_init(ObjectClass *klass, void *data) +static void iotkit_sysinfo_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/ivshmem-flat.c b/hw/misc/ivshmem-flat.c new file mode 100644 index 00000000000..fe4be6be178 --- /dev/null +++ b/hw/misc/ivshmem-flat.c @@ -0,0 +1,458 @@ +/* + * Inter-VM Shared Memory Flat Device + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2023 Linaro Ltd. + * Authors: + * Gustavo Romero + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "chardev/char-fe.h" +#include "system/address-spaces.h" +#include "trace.h" + +#include "hw/misc/ivshmem-flat.h" + +static int64_t ivshmem_flat_recv_msg(IvshmemFTState *s, int *pfd) +{ + int64_t msg; + int n, ret; + + n = 0; + do { + ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n, + sizeof(msg) - n); + if (ret < 0) { + if (ret == -EINTR) { + continue; + } + exit(1); + } + n += ret; + } while (n < sizeof(msg)); + + if (pfd) { + *pfd = qemu_chr_fe_get_msgfd(&s->server_chr); + } + return le64_to_cpu(msg); +} + +static void ivshmem_flat_irq_handler(void *opaque) +{ + VectorInfo *vi = opaque; + EventNotifier *e = &vi->event_notifier; + uint16_t vector_id; + const VectorInfo (*v)[64]; + + assert(e->initialized); + + vector_id = vi->id; + + /* + * The vector info struct is passed to the handler via the 'opaque' pointer. + * This struct pointer allows the retrieval of the vector ID and its + * associated event notifier. However, for triggering an interrupt using + * qemu_set_irq, it's necessary to also have a pointer to the device state, + * i.e., a pointer to the IvshmemFTState struct. Since the vector info + * struct is contained within the IvshmemFTState struct, its pointer can be + * used to obtain the pointer to IvshmemFTState through simple pointer math. + */ + v = (void *)(vi - vector_id); /* v = &IvshmemPeer->vector[0] */ + IvshmemPeer *own_peer = container_of(v, IvshmemPeer, vector); + IvshmemFTState *s = container_of(own_peer, IvshmemFTState, own); + + /* Clear event */ + if (!event_notifier_test_and_clear(e)) { + return; + } + + trace_ivshmem_flat_irq_handler(vector_id); + + /* + * Toggle device's output line, which is connected to interrupt controller, + * generating an interrupt request to the CPU. + */ + qemu_irq_pulse(s->irq); +} + +static IvshmemPeer *ivshmem_flat_find_peer(IvshmemFTState *s, uint16_t peer_id) +{ + IvshmemPeer *peer; + + /* Own ID */ + if (s->own.id == peer_id) { + return &s->own; + } + + /* Peer ID */ + QTAILQ_FOREACH(peer, &s->peer, next) { + if (peer->id == peer_id) { + return peer; + } + } + + return NULL; +} + +static IvshmemPeer *ivshmem_flat_add_peer(IvshmemFTState *s, uint16_t peer_id) +{ + IvshmemPeer *new_peer; + + new_peer = g_malloc0(sizeof(*new_peer)); + new_peer->id = peer_id; + new_peer->vector_counter = 0; + + QTAILQ_INSERT_TAIL(&s->peer, new_peer, next); + + trace_ivshmem_flat_new_peer(peer_id); + + return new_peer; +} + +static void ivshmem_flat_remove_peer(IvshmemFTState *s, uint16_t peer_id) +{ + IvshmemPeer *peer; + + peer = ivshmem_flat_find_peer(s, peer_id); + assert(peer); + + QTAILQ_REMOVE(&s->peer, peer, next); + for (int n = 0; n < peer->vector_counter; n++) { + int efd; + efd = event_notifier_get_fd(&(peer->vector[n].event_notifier)); + close(efd); + } + + g_free(peer); +} + +static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer, + int vector_fd) +{ + if (peer->vector_counter >= IVSHMEM_MAX_VECTOR_NUM) { + trace_ivshmem_flat_add_vector_failure(peer->vector_counter, + vector_fd, peer->id); + close(vector_fd); + + return; + } + + trace_ivshmem_flat_add_vector_success(peer->vector_counter, + vector_fd, peer->id); + + /* + * Set vector ID and its associated eventfd notifier and add them to the + * peer. + */ + peer->vector[peer->vector_counter].id = peer->vector_counter; + g_unix_set_fd_nonblocking(vector_fd, true, NULL); + event_notifier_init_fd(&peer->vector[peer->vector_counter].event_notifier, + vector_fd); + + /* + * If it's the device's own ID, register also the handler for the eventfd + * so the device can be notified by the other peers. + */ + if (peer == &s->own) { + qemu_set_fd_handler(vector_fd, ivshmem_flat_irq_handler, NULL, + &peer->vector); + } + + peer->vector_counter++; +} + +static void ivshmem_flat_process_msg(IvshmemFTState *s, uint64_t msg, int fd) +{ + uint16_t peer_id; + IvshmemPeer *peer; + + peer_id = msg & 0xFFFF; + peer = ivshmem_flat_find_peer(s, peer_id); + + if (!peer) { + peer = ivshmem_flat_add_peer(s, peer_id); + } + + if (fd >= 0) { + ivshmem_flat_add_vector(s, peer, fd); + } else { /* fd == -1, which is received when peers disconnect. */ + ivshmem_flat_remove_peer(s, peer_id); + } +} + +static int ivshmem_flat_can_receive_data(void *opaque) +{ + IvshmemFTState *s = opaque; + + assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); + return sizeof(s->msg_buf) - s->msg_buffered_bytes; +} + +static void ivshmem_flat_read_msg(void *opaque, const uint8_t *buf, int size) +{ + IvshmemFTState *s = opaque; + int fd; + int64_t msg; + + assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); + memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); + s->msg_buffered_bytes += size; + if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { + return; + } + msg = le64_to_cpu(s->msg_buf); + s->msg_buffered_bytes = 0; + + fd = qemu_chr_fe_get_msgfd(&s->server_chr); + + ivshmem_flat_process_msg(s, msg, fd); +} + +static uint64_t ivshmem_flat_iomem_read(void *opaque, + hwaddr offset, unsigned size) +{ + IvshmemFTState *s = opaque; + uint32_t ret; + + trace_ivshmem_flat_read_mmr(offset); + + switch (offset) { + case INTMASK: + ret = 0; /* Ignore read since all bits are reserved in rev 1. */ + break; + case INTSTATUS: + ret = 0; /* Ignore read since all bits are reserved in rev 1. */ + break; + case IVPOSITION: + ret = s->own.id; + break; + case DOORBELL: + trace_ivshmem_flat_read_mmr_doorbell(); /* DOORBELL is write-only */ + ret = 0; + break; + default: + /* Should never reach out here due to iomem map range being exact */ + trace_ivshmem_flat_read_write_mmr_invalid(offset); + ret = 0; + } + + return ret; +} + +static int ivshmem_flat_interrupt_peer(IvshmemFTState *s, + uint16_t peer_id, uint16_t vector_id) +{ + IvshmemPeer *peer; + + peer = ivshmem_flat_find_peer(s, peer_id); + if (!peer) { + trace_ivshmem_flat_interrupt_invalid_peer(peer_id); + return 1; + } + + event_notifier_set(&(peer->vector[vector_id].event_notifier)); + + return 0; +} + +static void ivshmem_flat_iomem_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IvshmemFTState *s = opaque; + uint16_t peer_id = (value >> 16) & 0xFFFF; + uint16_t vector_id = value & 0xFFFF; + + trace_ivshmem_flat_write_mmr(offset); + + switch (offset) { + case INTMASK: + break; + case INTSTATUS: + break; + case IVPOSITION: + break; + case DOORBELL: + trace_ivshmem_flat_interrupt_peer(peer_id, vector_id); + ivshmem_flat_interrupt_peer(s, peer_id, vector_id); + break; + default: + /* Should never reach out here due to iomem map range being exact. */ + trace_ivshmem_flat_read_write_mmr_invalid(offset); + break; + } +} + +static const MemoryRegionOps ivshmem_flat_ops = { + .read = ivshmem_flat_iomem_read, + .write = ivshmem_flat_iomem_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { /* Read/write aligned at 32 bits. */ + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void ivshmem_flat_instance_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + IvshmemFTState *s = IVSHMEM_FLAT(obj); + + /* + * Init mem region for 4 MMRs (ivshmem_registers), + * 32 bits each => 16 bytes (0x10). + */ + memory_region_init_io(&s->iomem, obj, &ivshmem_flat_ops, s, + "ivshmem-mmio", 0x10); + sysbus_init_mmio(sbd, &s->iomem); + + /* + * Create one output IRQ that will be connect to the + * machine's interrupt controller. + */ + sysbus_init_irq(sbd, &s->irq); + + QTAILQ_INIT(&s->peer); +} + +static bool ivshmem_flat_connect_server(DeviceState *dev, Error **errp) +{ + IvshmemFTState *s = IVSHMEM_FLAT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int64_t protocol_version, msg; + int shmem_fd; + uint16_t peer_id; + struct stat fdstat; + + /* Check ivshmem server connection. */ + if (!qemu_chr_fe_backend_connected(&s->server_chr)) { + error_setg(errp, "ivshmem server socket not specified or incorret." + " Can't create device."); + return false; + } + + /* + * Message sequence from server on new connection: + * _____________________________________ + * |STEP| uint64_t msg | int fd | + * ------------------------------------- + * + * 0 PROTOCOL -1 \ + * 1 OWN PEER ID -1 |-- Header/Greeting + * 2 -1 shmem fd / + * + * 3 PEER IDx Other peer's Vector 0 eventfd + * 4 PEER IDx Other peer's Vector 1 eventfd + * . . + * . . + * . . + * N PEER IDy Other peer's Vector 0 eventfd + * N+1 PEER IDy Other peer's Vector 1 eventfd + * . . + * . . + * . . + * + * ivshmem_flat_recv_msg() calls return 'msg' and 'fd'. + * + * See docs/specs/ivshmem-spec.rst for details on the protocol. + */ + + /* Step 0 */ + protocol_version = ivshmem_flat_recv_msg(s, NULL); + + /* Step 1 */ + msg = ivshmem_flat_recv_msg(s, NULL); + peer_id = 0xFFFF & msg; + s->own.id = peer_id; + s->own.vector_counter = 0; + + trace_ivshmem_flat_proto_ver_own_id(protocol_version, s->own.id); + + /* Step 2 */ + msg = ivshmem_flat_recv_msg(s, &shmem_fd); + /* Map shmem fd and MMRs into memory regions. */ + if (msg != -1 || shmem_fd < 0) { + error_setg(errp, "Could not receive valid shmem fd." + " Can't create device!"); + return false; + } + + if (fstat(shmem_fd, &fdstat) != 0) { + error_setg(errp, "Could not determine shmem fd size." + " Can't create device!"); + return false; + } + trace_ivshmem_flat_shmem_size(shmem_fd, fdstat.st_size); + + /* + * Shmem size provided by the ivshmem server must be equal to + * device's shmem size. + */ + if (fdstat.st_size != s->shmem_size) { + error_setg(errp, "Can't map shmem fd: shmem size different" + " from device size!"); + return false; + } + + /* + * Beyond step 2 ivshmem_process_msg, called by ivshmem_flat_read_msg + * handler -- when data is available on the server socket -- will handle + * the additional messages that will be generated by the server as peers + * connect or disconnect. + */ + qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_flat_can_receive_data, + ivshmem_flat_read_msg, NULL, NULL, s, NULL, true); + + memory_region_init_ram_from_fd(&s->shmem, OBJECT(s), + "ivshmem-shmem", s->shmem_size, + RAM_SHARED, shmem_fd, 0, NULL); + sysbus_init_mmio(sbd, &s->shmem); + + return true; +} + +static void ivshmem_flat_realize(DeviceState *dev, Error **errp) +{ + if (!ivshmem_flat_connect_server(dev, errp)) { + return; + } +} + +static const Property ivshmem_flat_props[] = { + DEFINE_PROP_CHR("chardev", IvshmemFTState, server_chr), + DEFINE_PROP_UINT32("shmem-size", IvshmemFTState, shmem_size, 4 * MiB), +}; + +static void ivshmem_flat_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->hotpluggable = true; + dc->realize = ivshmem_flat_realize; + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, ivshmem_flat_props); + + /* Reason: Must be wired up in code (sysbus MRs and IRQ) */ + dc->user_creatable = false; +} + +static const TypeInfo ivshmem_flat_types[] = { + { + .name = TYPE_IVSHMEM_FLAT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IvshmemFTState), + .instance_init = ivshmem_flat_instance_init, + .class_init = ivshmem_flat_class_init, + }, +}; + +DEFINE_TYPES(ivshmem_flat_types) diff --git a/hw/misc/ivshmem-pci.c b/hw/misc/ivshmem-pci.c new file mode 100644 index 00000000000..d47ae739d61 --- /dev/null +++ b/hw/misc/ivshmem-pci.c @@ -0,0 +1,1138 @@ +/* + * Inter-VM Shared Memory PCI device. + * + * Author: + * Cam Macdonell + * + * Based On: cirrus_vga.c + * Copyright (c) 2004 Fabrice Bellard + * Copyright (c) 2004 Makoto Suzuki (suzu) + * + * and rtl8139.c + * Copyright (c) 2006 Igor Kovalenko + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "system/kvm.h" +#include "migration/blocker.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qom/object_interfaces.h" +#include "chardev/char-fe.h" +#include "system/hostmem.h" +#include "qapi/visitor.h" + +#include "hw/misc/ivshmem.h" +#include "qom/object.h" + +#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET +#define PCI_DEVICE_ID_IVSHMEM 0x1110 + +#define IVSHMEM_MAX_PEERS UINT16_MAX +#define IVSHMEM_IOEVENTFD 0 +#define IVSHMEM_MSI 1 + +#define IVSHMEM_REG_BAR_SIZE 0x100 + +#define IVSHMEM_DEBUG 0 +#define IVSHMEM_DPRINTF(fmt, ...) \ + do { \ + if (IVSHMEM_DEBUG) { \ + printf("IVSHMEM: " fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +#define TYPE_IVSHMEM_COMMON "ivshmem-common" +typedef struct IVShmemState IVShmemState; +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_COMMON, + TYPE_IVSHMEM_COMMON) + +#define TYPE_IVSHMEM_PLAIN "ivshmem-plain" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_PLAIN, + TYPE_IVSHMEM_PLAIN) + +#define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_DOORBELL, + TYPE_IVSHMEM_DOORBELL) + +#define TYPE_IVSHMEM "ivshmem" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM, + TYPE_IVSHMEM) + +typedef struct Peer { + int nb_eventfds; + EventNotifier *eventfds; +} Peer; + +typedef struct MSIVector { + PCIDevice *pdev; + int virq; + bool unmasked; +} MSIVector; + +struct IVShmemState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + uint32_t features; + + /* exactly one of these two may be set */ + HostMemoryBackend *hostmem; /* with interrupts */ + CharBackend server_chr; /* without interrupts */ + + /* registers */ + uint32_t intrmask; + uint32_t intrstatus; + int vm_id; + + /* BARs */ + MemoryRegion ivshmem_mmio; /* BAR 0 (registers) */ + MemoryRegion *ivshmem_bar2; /* BAR 2 (shared memory) */ + MemoryRegion server_bar2; /* used with server_chr */ + + /* interrupt support */ + Peer *peers; + int nb_peers; /* space in @peers[] */ + uint32_t vectors; + MSIVector *msi_vectors; + uint64_t msg_buf; /* buffer for receiving server messages */ + int msg_buffered_bytes; /* #bytes in @msg_buf */ + + /* migration stuff */ + OnOffAuto master; + Error *migration_blocker; +}; + +/* registers for the Inter-VM shared memory device */ +enum ivshmem_registers { + INTRMASK = 0, + INTRSTATUS = 4, + IVPOSITION = 8, + DOORBELL = 12, +}; + +static inline uint32_t ivshmem_has_feature(IVShmemState *ivs, + unsigned int feature) { + return (ivs->features & (1 << feature)); +} + +static inline bool ivshmem_is_master(IVShmemState *s) +{ + assert(s->master != ON_OFF_AUTO_AUTO); + return s->master == ON_OFF_AUTO_ON; +} + +static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val) +{ + IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val); + + s->intrmask = val; +} + +static uint32_t ivshmem_IntrMask_read(IVShmemState *s) +{ + uint32_t ret = s->intrmask; + + IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret); + return ret; +} + +static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val) +{ + IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val); + + s->intrstatus = val; +} + +static uint32_t ivshmem_IntrStatus_read(IVShmemState *s) +{ + uint32_t ret = s->intrstatus; + + /* reading ISR clears all interrupts */ + s->intrstatus = 0; + return ret; +} + +static void ivshmem_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + IVShmemState *s = opaque; + + uint16_t dest = val >> 16; + uint16_t vector = val & 0xff; + + addr &= 0xfc; + + IVSHMEM_DPRINTF("writing to addr " HWADDR_FMT_plx "\n", addr); + switch (addr) + { + case INTRMASK: + ivshmem_IntrMask_write(s, val); + break; + + case INTRSTATUS: + ivshmem_IntrStatus_write(s, val); + break; + + case DOORBELL: + /* check that dest VM ID is reasonable */ + if (dest >= s->nb_peers) { + IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); + break; + } + + /* check doorbell range */ + if (vector < s->peers[dest].nb_eventfds) { + IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); + event_notifier_set(&s->peers[dest].eventfds[vector]); + } else { + IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n", + vector, dest); + } + break; + default: + IVSHMEM_DPRINTF("Unhandled write " HWADDR_FMT_plx "\n", addr); + } +} + +static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + + IVShmemState *s = opaque; + uint32_t ret; + + switch (addr) + { + case INTRMASK: + ret = ivshmem_IntrMask_read(s); + break; + + case INTRSTATUS: + ret = ivshmem_IntrStatus_read(s); + break; + + case IVPOSITION: + ret = s->vm_id; + break; + + default: + IVSHMEM_DPRINTF("why are we reading " HWADDR_FMT_plx "\n", addr); + ret = 0; + } + + return ret; +} + +static const MemoryRegionOps ivshmem_mmio_ops = { + .read = ivshmem_io_read, + .write = ivshmem_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void ivshmem_vector_notify(void *opaque) +{ + MSIVector *entry = opaque; + PCIDevice *pdev = entry->pdev; + IVShmemState *s = IVSHMEM_COMMON(pdev); + int vector = entry - s->msi_vectors; + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + + if (!event_notifier_test_and_clear(n)) { + return; + } + + IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + if (msix_enabled(pdev)) { + msix_notify(pdev, vector); + } + } else { + ivshmem_IntrStatus_write(s, 1); + } +} + +static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, + MSIMessage msg) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + MSIVector *v = &s->msi_vectors[vector]; + int ret; + + IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); + if (!v->pdev) { + error_report("ivshmem: vector %d route does not exist", vector); + return -EINVAL; + } + assert(!v->unmasked); + + ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); + if (ret < 0) { + return ret; + } + kvm_irqchip_commit_routes(kvm_state); + + ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); + if (ret < 0) { + return ret; + } + v->unmasked = true; + + return 0; +} + +static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + MSIVector *v = &s->msi_vectors[vector]; + int ret; + + IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); + if (!v->pdev) { + error_report("ivshmem: vector %d route does not exist", vector); + return; + } + assert(v->unmasked); + + ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); + if (ret < 0) { + error_report("remove_irqfd_notifier_gsi failed"); + return; + } + v->unmasked = false; +} + +static void ivshmem_vector_poll(PCIDevice *dev, + unsigned int vector_start, + unsigned int vector_end) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + unsigned int vector; + + IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); + + vector_end = MIN(vector_end, s->vectors); + + for (vector = vector_start; vector < vector_end; vector++) { + EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; + + if (!msix_is_masked(dev, vector)) { + continue; + } + + if (event_notifier_test_and_clear(notifier)) { + msix_set_pending(dev, vector); + } + } +} + +static void watch_vector_notifier(IVShmemState *s, EventNotifier *n, + int vector) +{ + int eventfd = event_notifier_get_fd(n); + + assert(!s->msi_vectors[vector].pdev); + s->msi_vectors[vector].pdev = PCI_DEVICE(s); + + qemu_set_fd_handler(eventfd, ivshmem_vector_notify, + NULL, &s->msi_vectors[vector]); +} + +static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_add_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + &s->peers[posn].eventfds[i]); +} + +static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_del_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + &s->peers[posn].eventfds[i]); +} + +static void close_peer_eventfds(IVShmemState *s, int posn) +{ + int i, n; + + assert(posn >= 0 && posn < s->nb_peers); + n = s->peers[posn].nb_eventfds; + + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { + memory_region_transaction_begin(); + for (i = 0; i < n; i++) { + ivshmem_del_eventfd(s, posn, i); + } + memory_region_transaction_commit(); + } + + for (i = 0; i < n; i++) { + event_notifier_cleanup(&s->peers[posn].eventfds[i]); + } + + g_free(s->peers[posn].eventfds); + s->peers[posn].nb_eventfds = 0; +} + +static void resize_peers(IVShmemState *s, int nb_peers) +{ + int old_nb_peers = s->nb_peers; + int i; + + assert(nb_peers > old_nb_peers); + IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers); + + s->peers = g_renew(Peer, s->peers, nb_peers); + s->nb_peers = nb_peers; + + for (i = old_nb_peers; i < nb_peers; i++) { + s->peers[i].eventfds = g_new0(EventNotifier, s->vectors); + s->peers[i].nb_eventfds = 0; + } +} + +static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector, + Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(s); + KVMRouteChange c; + int ret; + + IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); + assert(!s->msi_vectors[vector].pdev); + + c = kvm_irqchip_begin_route_changes(kvm_state); + ret = kvm_irqchip_add_msi_route(&c, vector, pdev); + if (ret < 0) { + error_setg(errp, "kvm_irqchip_add_msi_route failed"); + return; + } + kvm_irqchip_commit_route_changes(&c); + + s->msi_vectors[vector].virq = ret; + s->msi_vectors[vector].pdev = pdev; +} + +static void setup_interrupt(IVShmemState *s, int vector, Error **errp) +{ + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + bool with_irqfd = kvm_msi_via_irqfd_enabled() && + ivshmem_has_feature(s, IVSHMEM_MSI); + PCIDevice *pdev = PCI_DEVICE(s); + Error *err = NULL; + + IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); + + if (!with_irqfd) { + IVSHMEM_DPRINTF("with eventfd\n"); + watch_vector_notifier(s, n, vector); + } else if (msix_enabled(pdev)) { + IVSHMEM_DPRINTF("with irqfd\n"); + ivshmem_add_kvm_msi_virq(s, vector, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!msix_is_masked(pdev, vector)) { + kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, + s->msi_vectors[vector].virq); + /* TODO handle error */ + } + } else { + /* it will be delayed until msix is enabled, in write_config */ + IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n"); + } +} + +static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) +{ + struct stat buf; + size_t size; + + if (fd < 0) { + error_setg(errp, "server didn't provide fd with shared memory message"); + return; + } + + if (s->ivshmem_bar2) { + error_setg(errp, "server sent unexpected shared memory message"); + close(fd); + return; + } + + if (fstat(fd, &buf) < 0) { + error_setg_errno(errp, errno, + "can't determine size of shared memory sent by server"); + close(fd); + return; + } + + size = buf.st_size; + + /* mmap the region and map into the BAR2 */ + if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), + "ivshmem.bar2", size, RAM_SHARED, + fd, 0, errp)) { + return; + } + + s->ivshmem_bar2 = &s->server_bar2; +} + +static void process_msg_disconnect(IVShmemState *s, uint16_t posn, + Error **errp) +{ + IVSHMEM_DPRINTF("posn %d has gone away\n", posn); + if (posn >= s->nb_peers || posn == s->vm_id) { + error_setg(errp, "invalid peer %d", posn); + return; + } + close_peer_eventfds(s, posn); +} + +static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, + Error **errp) +{ + Peer *peer = &s->peers[posn]; + int vector; + + /* + * The N-th connect message for this peer comes with the file + * descriptor for vector N-1. Count messages to find the vector. + */ + if (peer->nb_eventfds >= s->vectors) { + error_setg(errp, "Too many eventfd received, device has %d vectors", + s->vectors); + close(fd); + return; + } + vector = peer->nb_eventfds++; + + IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); + event_notifier_init_fd(&peer->eventfds[vector], fd); + g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */ + + if (posn == s->vm_id) { + setup_interrupt(s, vector, errp); + /* TODO do we need to handle the error? */ + } + + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { + ivshmem_add_eventfd(s, posn, vector); + } +} + +static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp) +{ + IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd); + + if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { + error_setg(errp, "server sent invalid message %" PRId64, msg); + if (fd >= 0) { + close(fd); + } + return; + } + + if (msg == -1) { + process_msg_shmem(s, fd, errp); + return; + } + + if (msg >= s->nb_peers) { + resize_peers(s, msg + 1); + } + + if (fd >= 0) { + process_msg_connect(s, msg, fd, errp); + } else { + process_msg_disconnect(s, msg, errp); + } +} + +static int ivshmem_can_receive(void *opaque) +{ + IVShmemState *s = opaque; + + assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); + return sizeof(s->msg_buf) - s->msg_buffered_bytes; +} + +static void ivshmem_read(void *opaque, const uint8_t *buf, int size) +{ + IVShmemState *s = opaque; + Error *err = NULL; + int fd; + int64_t msg; + + assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); + memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); + s->msg_buffered_bytes += size; + if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { + return; + } + msg = le64_to_cpu(s->msg_buf); + s->msg_buffered_bytes = 0; + + fd = qemu_chr_fe_get_msgfd(&s->server_chr); + + process_msg(s, msg, fd, &err); + if (err) { + error_report_err(err); + } +} + +static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp) +{ + int64_t msg; + int n, ret; + + n = 0; + do { + ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n, + sizeof(msg) - n); + if (ret < 0) { + if (ret == -EINTR) { + continue; + } + error_setg_errno(errp, -ret, "read from server failed"); + return INT64_MIN; + } + n += ret; + } while (n < sizeof(msg)); + + *pfd = qemu_chr_fe_get_msgfd(&s->server_chr); + return le64_to_cpu(msg); +} + +static void ivshmem_recv_setup(IVShmemState *s, Error **errp) +{ + Error *err = NULL; + int64_t msg; + int fd; + + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + if (msg != IVSHMEM_PROTOCOL_VERSION) { + error_setg(errp, "server sent version %" PRId64 ", expecting %d", + msg, IVSHMEM_PROTOCOL_VERSION); + return; + } + if (fd != -1) { + error_setg(errp, "server sent invalid version message"); + return; + } + + /* + * ivshmem-server sends the remaining initial messages in a fixed + * order, but the device has always accepted them in any order. + * Stay as compatible as practical, just in case people use + * servers that behave differently. + */ + + /* + * ivshmem_device_spec.txt has always required the ID message + * right here, and ivshmem-server has always complied. However, + * older versions of the device accepted it out of order, but + * broke when an interrupt setup message arrived before it. + */ + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) { + error_setg(errp, "server sent invalid ID message"); + return; + } + s->vm_id = msg; + + /* + * Receive more messages until we got shared memory. + */ + do { + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + process_msg(s, msg, fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + } while (msg != -1); + + /* + * This function must either map the shared memory or fail. The + * loop above ensures that: it terminates normally only after it + * successfully processed the server's shared memory message. + * Assert that actually mapped the shared memory: + */ + assert(s->ivshmem_bar2); +} + +/* Select the MSI-X vectors used by device. + * ivshmem maps events to vectors statically, so + * we just enable all vectors on init and after reset. */ +static void ivshmem_msix_vector_use(IVShmemState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int i; + + for (i = 0; i < s->vectors; i++) { + msix_vector_use(d, i); + } +} + +static void ivshmem_disable_irqfd(IVShmemState *s); + +static void ivshmem_reset(DeviceState *d) +{ + IVShmemState *s = IVSHMEM_COMMON(d); + + ivshmem_disable_irqfd(s); + + s->intrstatus = 0; + s->intrmask = 0; + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + ivshmem_msix_vector_use(s); + } +} + +static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp) +{ + /* allocate QEMU callback data for receiving interrupts */ + s->msi_vectors = g_new0(MSIVector, s->vectors); + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) { + return -1; + } + + IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); + ivshmem_msix_vector_use(s); + } + + return 0; +} + +static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) +{ + IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); + + if (s->msi_vectors[vector].pdev == NULL) { + return; + } + + /* it was cleaned when masked in the frontend. */ + kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); + + s->msi_vectors[vector].pdev = NULL; +} + +static void ivshmem_enable_irqfd(IVShmemState *s) +{ + PCIDevice *pdev = PCI_DEVICE(s); + int i; + + for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { + Error *err = NULL; + + ivshmem_add_kvm_msi_virq(s, i, &err); + if (err) { + error_report_err(err); + goto undo; + } + } + + if (msix_set_vector_notifiers(pdev, + ivshmem_vector_unmask, + ivshmem_vector_mask, + ivshmem_vector_poll)) { + error_report("ivshmem: msix_set_vector_notifiers failed"); + goto undo; + } + return; + +undo: + while (--i >= 0) { + ivshmem_remove_kvm_msi_virq(s, i); + } +} + +static void ivshmem_disable_irqfd(IVShmemState *s) +{ + PCIDevice *pdev = PCI_DEVICE(s); + int i; + + if (!pdev->msix_vector_use_notifier) { + return; + } + + msix_unset_vector_notifiers(pdev); + + for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { + /* + * MSI-X is already disabled here so msix_unset_vector_notifiers() + * didn't call our release notifier. Do it now to keep our masks and + * unmasks balanced. + */ + if (s->msi_vectors[i].unmasked) { + ivshmem_vector_mask(pdev, i); + } + ivshmem_remove_kvm_msi_virq(s, i); + } + +} + +static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, + uint32_t val, int len) +{ + IVShmemState *s = IVSHMEM_COMMON(pdev); + int is_enabled, was_enabled = msix_enabled(pdev); + + pci_default_write_config(pdev, address, val, len); + is_enabled = msix_enabled(pdev); + + if (kvm_msi_via_irqfd_enabled()) { + if (!was_enabled && is_enabled) { + ivshmem_enable_irqfd(s); + } else if (was_enabled && !is_enabled) { + ivshmem_disable_irqfd(s); + } + } +} + +static void ivshmem_common_realize(PCIDevice *dev, Error **errp) +{ + ERRP_GUARD(); + IVShmemState *s = IVSHMEM_COMMON(dev); + Error *err = NULL; + uint8_t *pci_conf; + + /* IRQFD requires MSI */ + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && + !ivshmem_has_feature(s, IVSHMEM_MSI)) { + error_setg(errp, "ioeventfd/irqfd requires MSI"); + return; + } + + pci_conf = dev->config; + pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + + memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, + "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); + + /* region for registers*/ + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, + &s->ivshmem_mmio); + + if (s->hostmem != NULL) { + IVSHMEM_DPRINTF("using hostmem\n"); + + s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem); + host_memory_backend_set_mapped(s->hostmem, true); + } else { + Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr); + assert(chr); + + IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n", + chr->filename); + + /* we allocate enough space for 16 peers and grow as needed */ + resize_peers(s, 16); + + /* + * Receive setup messages from server synchronously. + * Older versions did it asynchronously, but that creates a + * number of entertaining race conditions. + */ + ivshmem_recv_setup(s, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) { + error_setg(errp, + "master must connect to the server before any peers"); + return; + } + + qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive, + ivshmem_read, NULL, NULL, s, NULL, true); + + if (ivshmem_setup_interrupts(s, errp) < 0) { + error_prepend(errp, "Failed to initialize interrupts: "); + return; + } + } + + if (s->master == ON_OFF_AUTO_AUTO) { + s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + + if (!ivshmem_is_master(s)) { + error_setg(&s->migration_blocker, + "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); + if (migrate_add_blocker(&s->migration_blocker, errp) < 0) { + return; + } + } + + vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); + pci_register_bar(PCI_DEVICE(s), 2, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_PREFETCH | + PCI_BASE_ADDRESS_MEM_TYPE_64, + s->ivshmem_bar2); +} + +static void ivshmem_exit(PCIDevice *dev) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + int i; + + migrate_del_blocker(&s->migration_blocker); + + if (memory_region_is_mapped(s->ivshmem_bar2)) { + if (!s->hostmem) { + void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2); + int fd; + + if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) { + error_report("Failed to munmap shared memory %s", + strerror(errno)); + } + + fd = memory_region_get_fd(s->ivshmem_bar2); + close(fd); + } + + vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev)); + } + + if (s->hostmem) { + host_memory_backend_set_mapped(s->hostmem, false); + } + + if (s->peers) { + for (i = 0; i < s->nb_peers; i++) { + close_peer_eventfds(s, i); + } + g_free(s->peers); + } + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + msix_uninit_exclusive_bar(dev); + } + + g_free(s->msi_vectors); +} + +static int ivshmem_pre_load(void *opaque) +{ + IVShmemState *s = opaque; + + if (!ivshmem_is_master(s)) { + error_report("'peer' devices are not migratable"); + return -EINVAL; + } + + return 0; +} + +static int ivshmem_post_load(void *opaque, int version_id) +{ + IVShmemState *s = opaque; + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + ivshmem_msix_vector_use(s); + } + return 0; +} + +static void ivshmem_common_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_common_realize; + k->exit = ivshmem_exit; + k->config_write = ivshmem_write_config; + k->vendor_id = PCI_VENDOR_ID_IVSHMEM; + k->device_id = PCI_DEVICE_ID_IVSHMEM; + k->class_id = PCI_CLASS_MEMORY_RAM; + k->revision = 1; + device_class_set_legacy_reset(dc, ivshmem_reset); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "Inter-VM shared memory"; +} + +static const TypeInfo ivshmem_common_info = { + .name = TYPE_IVSHMEM_COMMON, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(IVShmemState), + .abstract = true, + .class_init = ivshmem_common_class_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const VMStateDescription ivshmem_plain_vmsd = { + .name = TYPE_IVSHMEM_PLAIN, + .version_id = 0, + .minimum_version_id = 0, + .pre_load = ivshmem_pre_load, + .post_load = ivshmem_post_load, + .fields = (const VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), + VMSTATE_UINT32(intrstatus, IVShmemState), + VMSTATE_UINT32(intrmask, IVShmemState), + VMSTATE_END_OF_LIST() + }, +}; + +static const Property ivshmem_plain_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), + DEFINE_PROP_LINK("memdev", IVShmemState, hostmem, TYPE_MEMORY_BACKEND, + HostMemoryBackend *), +}; + +static void ivshmem_plain_realize(PCIDevice *dev, Error **errp) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + + if (!s->hostmem) { + error_setg(errp, "You must specify a 'memdev'"); + return; + } else if (host_memory_backend_is_mapped(s->hostmem)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(s->hostmem))); + return; + } + + ivshmem_common_realize(dev, errp); +} + +static void ivshmem_plain_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_plain_realize; + device_class_set_props(dc, ivshmem_plain_properties); + dc->vmsd = &ivshmem_plain_vmsd; +} + +static const TypeInfo ivshmem_plain_info = { + .name = TYPE_IVSHMEM_PLAIN, + .parent = TYPE_IVSHMEM_COMMON, + .instance_size = sizeof(IVShmemState), + .class_init = ivshmem_plain_class_init, +}; + +static const VMStateDescription ivshmem_doorbell_vmsd = { + .name = TYPE_IVSHMEM_DOORBELL, + .version_id = 0, + .minimum_version_id = 0, + .pre_load = ivshmem_pre_load, + .post_load = ivshmem_post_load, + .fields = (const VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), + VMSTATE_MSIX(parent_obj, IVShmemState), + VMSTATE_UINT32(intrstatus, IVShmemState), + VMSTATE_UINT32(intrmask, IVShmemState), + VMSTATE_END_OF_LIST() + }, +}; + +static const Property ivshmem_doorbell_properties[] = { + DEFINE_PROP_CHR("chardev", IVShmemState, server_chr), + DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1), + DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, + true), + DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), +}; + +static void ivshmem_doorbell_init(Object *obj) +{ + IVShmemState *s = IVSHMEM_DOORBELL(obj); + + s->features |= (1 << IVSHMEM_MSI); +} + +static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + + if (!qemu_chr_fe_backend_connected(&s->server_chr)) { + error_setg(errp, "You must specify a 'chardev'"); + return; + } + + ivshmem_common_realize(dev, errp); +} + +static void ivshmem_doorbell_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_doorbell_realize; + device_class_set_props(dc, ivshmem_doorbell_properties); + dc->vmsd = &ivshmem_doorbell_vmsd; +} + +static const TypeInfo ivshmem_doorbell_info = { + .name = TYPE_IVSHMEM_DOORBELL, + .parent = TYPE_IVSHMEM_COMMON, + .instance_size = sizeof(IVShmemState), + .instance_init = ivshmem_doorbell_init, + .class_init = ivshmem_doorbell_class_init, +}; + +static void ivshmem_register_types(void) +{ + type_register_static(&ivshmem_common_info); + type_register_static(&ivshmem_plain_info); + type_register_static(&ivshmem_doorbell_info); +} + +type_init(ivshmem_register_types) diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c deleted file mode 100644 index de49d1b8a82..00000000000 --- a/hw/misc/ivshmem.c +++ /dev/null @@ -1,1133 +0,0 @@ -/* - * Inter-VM Shared Memory PCI device. - * - * Author: - * Cam Macdonell - * - * Based On: cirrus_vga.c - * Copyright (c) 2004 Fabrice Bellard - * Copyright (c) 2004 Makoto Suzuki (suzu) - * - * and rtl8139.c - * Copyright (c) 2006 Igor Kovalenko - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qapi/error.h" -#include "qemu/cutils.h" -#include "hw/pci/pci.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "hw/pci/msi.h" -#include "hw/pci/msix.h" -#include "sysemu/kvm.h" -#include "migration/blocker.h" -#include "migration/vmstate.h" -#include "qemu/error-report.h" -#include "qemu/event_notifier.h" -#include "qemu/module.h" -#include "qom/object_interfaces.h" -#include "chardev/char-fe.h" -#include "sysemu/hostmem.h" -#include "qapi/visitor.h" - -#include "hw/misc/ivshmem.h" -#include "qom/object.h" - -#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET -#define PCI_DEVICE_ID_IVSHMEM 0x1110 - -#define IVSHMEM_MAX_PEERS UINT16_MAX -#define IVSHMEM_IOEVENTFD 0 -#define IVSHMEM_MSI 1 - -#define IVSHMEM_REG_BAR_SIZE 0x100 - -#define IVSHMEM_DEBUG 0 -#define IVSHMEM_DPRINTF(fmt, ...) \ - do { \ - if (IVSHMEM_DEBUG) { \ - printf("IVSHMEM: " fmt, ## __VA_ARGS__); \ - } \ - } while (0) - -#define TYPE_IVSHMEM_COMMON "ivshmem-common" -typedef struct IVShmemState IVShmemState; -DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_COMMON, - TYPE_IVSHMEM_COMMON) - -#define TYPE_IVSHMEM_PLAIN "ivshmem-plain" -DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_PLAIN, - TYPE_IVSHMEM_PLAIN) - -#define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell" -DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_DOORBELL, - TYPE_IVSHMEM_DOORBELL) - -#define TYPE_IVSHMEM "ivshmem" -DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM, - TYPE_IVSHMEM) - -typedef struct Peer { - int nb_eventfds; - EventNotifier *eventfds; -} Peer; - -typedef struct MSIVector { - PCIDevice *pdev; - int virq; - bool unmasked; -} MSIVector; - -struct IVShmemState { - /*< private >*/ - PCIDevice parent_obj; - /*< public >*/ - - uint32_t features; - - /* exactly one of these two may be set */ - HostMemoryBackend *hostmem; /* with interrupts */ - CharBackend server_chr; /* without interrupts */ - - /* registers */ - uint32_t intrmask; - uint32_t intrstatus; - int vm_id; - - /* BARs */ - MemoryRegion ivshmem_mmio; /* BAR 0 (registers) */ - MemoryRegion *ivshmem_bar2; /* BAR 2 (shared memory) */ - MemoryRegion server_bar2; /* used with server_chr */ - - /* interrupt support */ - Peer *peers; - int nb_peers; /* space in @peers[] */ - uint32_t vectors; - MSIVector *msi_vectors; - uint64_t msg_buf; /* buffer for receiving server messages */ - int msg_buffered_bytes; /* #bytes in @msg_buf */ - - /* migration stuff */ - OnOffAuto master; - Error *migration_blocker; -}; - -/* registers for the Inter-VM shared memory device */ -enum ivshmem_registers { - INTRMASK = 0, - INTRSTATUS = 4, - IVPOSITION = 8, - DOORBELL = 12, -}; - -static inline uint32_t ivshmem_has_feature(IVShmemState *ivs, - unsigned int feature) { - return (ivs->features & (1 << feature)); -} - -static inline bool ivshmem_is_master(IVShmemState *s) -{ - assert(s->master != ON_OFF_AUTO_AUTO); - return s->master == ON_OFF_AUTO_ON; -} - -static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val) -{ - IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val); - - s->intrmask = val; -} - -static uint32_t ivshmem_IntrMask_read(IVShmemState *s) -{ - uint32_t ret = s->intrmask; - - IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret); - return ret; -} - -static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val) -{ - IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val); - - s->intrstatus = val; -} - -static uint32_t ivshmem_IntrStatus_read(IVShmemState *s) -{ - uint32_t ret = s->intrstatus; - - /* reading ISR clears all interrupts */ - s->intrstatus = 0; - return ret; -} - -static void ivshmem_io_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - IVShmemState *s = opaque; - - uint16_t dest = val >> 16; - uint16_t vector = val & 0xff; - - addr &= 0xfc; - - IVSHMEM_DPRINTF("writing to addr " HWADDR_FMT_plx "\n", addr); - switch (addr) - { - case INTRMASK: - ivshmem_IntrMask_write(s, val); - break; - - case INTRSTATUS: - ivshmem_IntrStatus_write(s, val); - break; - - case DOORBELL: - /* check that dest VM ID is reasonable */ - if (dest >= s->nb_peers) { - IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); - break; - } - - /* check doorbell range */ - if (vector < s->peers[dest].nb_eventfds) { - IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); - event_notifier_set(&s->peers[dest].eventfds[vector]); - } else { - IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n", - vector, dest); - } - break; - default: - IVSHMEM_DPRINTF("Unhandled write " HWADDR_FMT_plx "\n", addr); - } -} - -static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, - unsigned size) -{ - - IVShmemState *s = opaque; - uint32_t ret; - - switch (addr) - { - case INTRMASK: - ret = ivshmem_IntrMask_read(s); - break; - - case INTRSTATUS: - ret = ivshmem_IntrStatus_read(s); - break; - - case IVPOSITION: - ret = s->vm_id; - break; - - default: - IVSHMEM_DPRINTF("why are we reading " HWADDR_FMT_plx "\n", addr); - ret = 0; - } - - return ret; -} - -static const MemoryRegionOps ivshmem_mmio_ops = { - .read = ivshmem_io_read, - .write = ivshmem_io_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .impl = { - .min_access_size = 4, - .max_access_size = 4, - }, -}; - -static void ivshmem_vector_notify(void *opaque) -{ - MSIVector *entry = opaque; - PCIDevice *pdev = entry->pdev; - IVShmemState *s = IVSHMEM_COMMON(pdev); - int vector = entry - s->msi_vectors; - EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; - - if (!event_notifier_test_and_clear(n)) { - return; - } - - IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); - if (ivshmem_has_feature(s, IVSHMEM_MSI)) { - if (msix_enabled(pdev)) { - msix_notify(pdev, vector); - } - } else { - ivshmem_IntrStatus_write(s, 1); - } -} - -static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, - MSIMessage msg) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; - MSIVector *v = &s->msi_vectors[vector]; - int ret; - - IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); - if (!v->pdev) { - error_report("ivshmem: vector %d route does not exist", vector); - return -EINVAL; - } - assert(!v->unmasked); - - ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); - if (ret < 0) { - return ret; - } - kvm_irqchip_commit_routes(kvm_state); - - ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); - if (ret < 0) { - return ret; - } - v->unmasked = true; - - return 0; -} - -static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; - MSIVector *v = &s->msi_vectors[vector]; - int ret; - - IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); - if (!v->pdev) { - error_report("ivshmem: vector %d route does not exist", vector); - return; - } - assert(v->unmasked); - - ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); - if (ret < 0) { - error_report("remove_irqfd_notifier_gsi failed"); - return; - } - v->unmasked = false; -} - -static void ivshmem_vector_poll(PCIDevice *dev, - unsigned int vector_start, - unsigned int vector_end) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - unsigned int vector; - - IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); - - vector_end = MIN(vector_end, s->vectors); - - for (vector = vector_start; vector < vector_end; vector++) { - EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; - - if (!msix_is_masked(dev, vector)) { - continue; - } - - if (event_notifier_test_and_clear(notifier)) { - msix_set_pending(dev, vector); - } - } -} - -static void watch_vector_notifier(IVShmemState *s, EventNotifier *n, - int vector) -{ - int eventfd = event_notifier_get_fd(n); - - assert(!s->msi_vectors[vector].pdev); - s->msi_vectors[vector].pdev = PCI_DEVICE(s); - - qemu_set_fd_handler(eventfd, ivshmem_vector_notify, - NULL, &s->msi_vectors[vector]); -} - -static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) -{ - memory_region_add_eventfd(&s->ivshmem_mmio, - DOORBELL, - 4, - true, - (posn << 16) | i, - &s->peers[posn].eventfds[i]); -} - -static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) -{ - memory_region_del_eventfd(&s->ivshmem_mmio, - DOORBELL, - 4, - true, - (posn << 16) | i, - &s->peers[posn].eventfds[i]); -} - -static void close_peer_eventfds(IVShmemState *s, int posn) -{ - int i, n; - - assert(posn >= 0 && posn < s->nb_peers); - n = s->peers[posn].nb_eventfds; - - if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { - memory_region_transaction_begin(); - for (i = 0; i < n; i++) { - ivshmem_del_eventfd(s, posn, i); - } - memory_region_transaction_commit(); - } - - for (i = 0; i < n; i++) { - event_notifier_cleanup(&s->peers[posn].eventfds[i]); - } - - g_free(s->peers[posn].eventfds); - s->peers[posn].nb_eventfds = 0; -} - -static void resize_peers(IVShmemState *s, int nb_peers) -{ - int old_nb_peers = s->nb_peers; - int i; - - assert(nb_peers > old_nb_peers); - IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers); - - s->peers = g_renew(Peer, s->peers, nb_peers); - s->nb_peers = nb_peers; - - for (i = old_nb_peers; i < nb_peers; i++) { - s->peers[i].eventfds = g_new0(EventNotifier, s->vectors); - s->peers[i].nb_eventfds = 0; - } -} - -static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector, - Error **errp) -{ - PCIDevice *pdev = PCI_DEVICE(s); - KVMRouteChange c; - int ret; - - IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); - assert(!s->msi_vectors[vector].pdev); - - c = kvm_irqchip_begin_route_changes(kvm_state); - ret = kvm_irqchip_add_msi_route(&c, vector, pdev); - if (ret < 0) { - error_setg(errp, "kvm_irqchip_add_msi_route failed"); - return; - } - kvm_irqchip_commit_route_changes(&c); - - s->msi_vectors[vector].virq = ret; - s->msi_vectors[vector].pdev = pdev; -} - -static void setup_interrupt(IVShmemState *s, int vector, Error **errp) -{ - EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; - bool with_irqfd = kvm_msi_via_irqfd_enabled() && - ivshmem_has_feature(s, IVSHMEM_MSI); - PCIDevice *pdev = PCI_DEVICE(s); - Error *err = NULL; - - IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); - - if (!with_irqfd) { - IVSHMEM_DPRINTF("with eventfd\n"); - watch_vector_notifier(s, n, vector); - } else if (msix_enabled(pdev)) { - IVSHMEM_DPRINTF("with irqfd\n"); - ivshmem_add_kvm_msi_virq(s, vector, &err); - if (err) { - error_propagate(errp, err); - return; - } - - if (!msix_is_masked(pdev, vector)) { - kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, - s->msi_vectors[vector].virq); - /* TODO handle error */ - } - } else { - /* it will be delayed until msix is enabled, in write_config */ - IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n"); - } -} - -static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) -{ - struct stat buf; - size_t size; - - if (s->ivshmem_bar2) { - error_setg(errp, "server sent unexpected shared memory message"); - close(fd); - return; - } - - if (fstat(fd, &buf) < 0) { - error_setg_errno(errp, errno, - "can't determine size of shared memory sent by server"); - close(fd); - return; - } - - size = buf.st_size; - - /* mmap the region and map into the BAR2 */ - if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), - "ivshmem.bar2", size, RAM_SHARED, - fd, 0, errp)) { - return; - } - - s->ivshmem_bar2 = &s->server_bar2; -} - -static void process_msg_disconnect(IVShmemState *s, uint16_t posn, - Error **errp) -{ - IVSHMEM_DPRINTF("posn %d has gone away\n", posn); - if (posn >= s->nb_peers || posn == s->vm_id) { - error_setg(errp, "invalid peer %d", posn); - return; - } - close_peer_eventfds(s, posn); -} - -static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, - Error **errp) -{ - Peer *peer = &s->peers[posn]; - int vector; - - /* - * The N-th connect message for this peer comes with the file - * descriptor for vector N-1. Count messages to find the vector. - */ - if (peer->nb_eventfds >= s->vectors) { - error_setg(errp, "Too many eventfd received, device has %d vectors", - s->vectors); - close(fd); - return; - } - vector = peer->nb_eventfds++; - - IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); - event_notifier_init_fd(&peer->eventfds[vector], fd); - g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */ - - if (posn == s->vm_id) { - setup_interrupt(s, vector, errp); - /* TODO do we need to handle the error? */ - } - - if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { - ivshmem_add_eventfd(s, posn, vector); - } -} - -static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp) -{ - IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd); - - if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { - error_setg(errp, "server sent invalid message %" PRId64, msg); - close(fd); - return; - } - - if (msg == -1) { - process_msg_shmem(s, fd, errp); - return; - } - - if (msg >= s->nb_peers) { - resize_peers(s, msg + 1); - } - - if (fd >= 0) { - process_msg_connect(s, msg, fd, errp); - } else { - process_msg_disconnect(s, msg, errp); - } -} - -static int ivshmem_can_receive(void *opaque) -{ - IVShmemState *s = opaque; - - assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); - return sizeof(s->msg_buf) - s->msg_buffered_bytes; -} - -static void ivshmem_read(void *opaque, const uint8_t *buf, int size) -{ - IVShmemState *s = opaque; - Error *err = NULL; - int fd; - int64_t msg; - - assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); - memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); - s->msg_buffered_bytes += size; - if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { - return; - } - msg = le64_to_cpu(s->msg_buf); - s->msg_buffered_bytes = 0; - - fd = qemu_chr_fe_get_msgfd(&s->server_chr); - - process_msg(s, msg, fd, &err); - if (err) { - error_report_err(err); - } -} - -static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp) -{ - int64_t msg; - int n, ret; - - n = 0; - do { - ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n, - sizeof(msg) - n); - if (ret < 0) { - if (ret == -EINTR) { - continue; - } - error_setg_errno(errp, -ret, "read from server failed"); - return INT64_MIN; - } - n += ret; - } while (n < sizeof(msg)); - - *pfd = qemu_chr_fe_get_msgfd(&s->server_chr); - return le64_to_cpu(msg); -} - -static void ivshmem_recv_setup(IVShmemState *s, Error **errp) -{ - Error *err = NULL; - int64_t msg; - int fd; - - msg = ivshmem_recv_msg(s, &fd, &err); - if (err) { - error_propagate(errp, err); - return; - } - if (msg != IVSHMEM_PROTOCOL_VERSION) { - error_setg(errp, "server sent version %" PRId64 ", expecting %d", - msg, IVSHMEM_PROTOCOL_VERSION); - return; - } - if (fd != -1) { - error_setg(errp, "server sent invalid version message"); - return; - } - - /* - * ivshmem-server sends the remaining initial messages in a fixed - * order, but the device has always accepted them in any order. - * Stay as compatible as practical, just in case people use - * servers that behave differently. - */ - - /* - * ivshmem_device_spec.txt has always required the ID message - * right here, and ivshmem-server has always complied. However, - * older versions of the device accepted it out of order, but - * broke when an interrupt setup message arrived before it. - */ - msg = ivshmem_recv_msg(s, &fd, &err); - if (err) { - error_propagate(errp, err); - return; - } - if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) { - error_setg(errp, "server sent invalid ID message"); - return; - } - s->vm_id = msg; - - /* - * Receive more messages until we got shared memory. - */ - do { - msg = ivshmem_recv_msg(s, &fd, &err); - if (err) { - error_propagate(errp, err); - return; - } - process_msg(s, msg, fd, &err); - if (err) { - error_propagate(errp, err); - return; - } - } while (msg != -1); - - /* - * This function must either map the shared memory or fail. The - * loop above ensures that: it terminates normally only after it - * successfully processed the server's shared memory message. - * Assert that actually mapped the shared memory: - */ - assert(s->ivshmem_bar2); -} - -/* Select the MSI-X vectors used by device. - * ivshmem maps events to vectors statically, so - * we just enable all vectors on init and after reset. */ -static void ivshmem_msix_vector_use(IVShmemState *s) -{ - PCIDevice *d = PCI_DEVICE(s); - int i; - - for (i = 0; i < s->vectors; i++) { - msix_vector_use(d, i); - } -} - -static void ivshmem_disable_irqfd(IVShmemState *s); - -static void ivshmem_reset(DeviceState *d) -{ - IVShmemState *s = IVSHMEM_COMMON(d); - - ivshmem_disable_irqfd(s); - - s->intrstatus = 0; - s->intrmask = 0; - if (ivshmem_has_feature(s, IVSHMEM_MSI)) { - ivshmem_msix_vector_use(s); - } -} - -static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp) -{ - /* allocate QEMU callback data for receiving interrupts */ - s->msi_vectors = g_new0(MSIVector, s->vectors); - - if (ivshmem_has_feature(s, IVSHMEM_MSI)) { - if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) { - return -1; - } - - IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); - ivshmem_msix_vector_use(s); - } - - return 0; -} - -static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) -{ - IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); - - if (s->msi_vectors[vector].pdev == NULL) { - return; - } - - /* it was cleaned when masked in the frontend. */ - kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); - - s->msi_vectors[vector].pdev = NULL; -} - -static void ivshmem_enable_irqfd(IVShmemState *s) -{ - PCIDevice *pdev = PCI_DEVICE(s); - int i; - - for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { - Error *err = NULL; - - ivshmem_add_kvm_msi_virq(s, i, &err); - if (err) { - error_report_err(err); - goto undo; - } - } - - if (msix_set_vector_notifiers(pdev, - ivshmem_vector_unmask, - ivshmem_vector_mask, - ivshmem_vector_poll)) { - error_report("ivshmem: msix_set_vector_notifiers failed"); - goto undo; - } - return; - -undo: - while (--i >= 0) { - ivshmem_remove_kvm_msi_virq(s, i); - } -} - -static void ivshmem_disable_irqfd(IVShmemState *s) -{ - PCIDevice *pdev = PCI_DEVICE(s); - int i; - - if (!pdev->msix_vector_use_notifier) { - return; - } - - msix_unset_vector_notifiers(pdev); - - for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { - /* - * MSI-X is already disabled here so msix_unset_vector_notifiers() - * didn't call our release notifier. Do it now to keep our masks and - * unmasks balanced. - */ - if (s->msi_vectors[i].unmasked) { - ivshmem_vector_mask(pdev, i); - } - ivshmem_remove_kvm_msi_virq(s, i); - } - -} - -static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, - uint32_t val, int len) -{ - IVShmemState *s = IVSHMEM_COMMON(pdev); - int is_enabled, was_enabled = msix_enabled(pdev); - - pci_default_write_config(pdev, address, val, len); - is_enabled = msix_enabled(pdev); - - if (kvm_msi_via_irqfd_enabled()) { - if (!was_enabled && is_enabled) { - ivshmem_enable_irqfd(s); - } else if (was_enabled && !is_enabled) { - ivshmem_disable_irqfd(s); - } - } -} - -static void ivshmem_common_realize(PCIDevice *dev, Error **errp) -{ - ERRP_GUARD(); - IVShmemState *s = IVSHMEM_COMMON(dev); - Error *err = NULL; - uint8_t *pci_conf; - - /* IRQFD requires MSI */ - if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && - !ivshmem_has_feature(s, IVSHMEM_MSI)) { - error_setg(errp, "ioeventfd/irqfd requires MSI"); - return; - } - - pci_conf = dev->config; - pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; - - memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, - "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); - - /* region for registers*/ - pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, - &s->ivshmem_mmio); - - if (s->hostmem != NULL) { - IVSHMEM_DPRINTF("using hostmem\n"); - - s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem); - host_memory_backend_set_mapped(s->hostmem, true); - } else { - Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr); - assert(chr); - - IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n", - chr->filename); - - /* we allocate enough space for 16 peers and grow as needed */ - resize_peers(s, 16); - - /* - * Receive setup messages from server synchronously. - * Older versions did it asynchronously, but that creates a - * number of entertaining race conditions. - */ - ivshmem_recv_setup(s, &err); - if (err) { - error_propagate(errp, err); - return; - } - - if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) { - error_setg(errp, - "master must connect to the server before any peers"); - return; - } - - qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive, - ivshmem_read, NULL, NULL, s, NULL, true); - - if (ivshmem_setup_interrupts(s, errp) < 0) { - error_prepend(errp, "Failed to initialize interrupts: "); - return; - } - } - - if (s->master == ON_OFF_AUTO_AUTO) { - s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; - } - - if (!ivshmem_is_master(s)) { - error_setg(&s->migration_blocker, - "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); - if (migrate_add_blocker(&s->migration_blocker, errp) < 0) { - return; - } - } - - vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); - pci_register_bar(PCI_DEVICE(s), 2, - PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_PREFETCH | - PCI_BASE_ADDRESS_MEM_TYPE_64, - s->ivshmem_bar2); -} - -static void ivshmem_exit(PCIDevice *dev) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - int i; - - migrate_del_blocker(&s->migration_blocker); - - if (memory_region_is_mapped(s->ivshmem_bar2)) { - if (!s->hostmem) { - void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2); - int fd; - - if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) { - error_report("Failed to munmap shared memory %s", - strerror(errno)); - } - - fd = memory_region_get_fd(s->ivshmem_bar2); - close(fd); - } - - vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev)); - } - - if (s->hostmem) { - host_memory_backend_set_mapped(s->hostmem, false); - } - - if (s->peers) { - for (i = 0; i < s->nb_peers; i++) { - close_peer_eventfds(s, i); - } - g_free(s->peers); - } - - if (ivshmem_has_feature(s, IVSHMEM_MSI)) { - msix_uninit_exclusive_bar(dev); - } - - g_free(s->msi_vectors); -} - -static int ivshmem_pre_load(void *opaque) -{ - IVShmemState *s = opaque; - - if (!ivshmem_is_master(s)) { - error_report("'peer' devices are not migratable"); - return -EINVAL; - } - - return 0; -} - -static int ivshmem_post_load(void *opaque, int version_id) -{ - IVShmemState *s = opaque; - - if (ivshmem_has_feature(s, IVSHMEM_MSI)) { - ivshmem_msix_vector_use(s); - } - return 0; -} - -static void ivshmem_common_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - k->realize = ivshmem_common_realize; - k->exit = ivshmem_exit; - k->config_write = ivshmem_write_config; - k->vendor_id = PCI_VENDOR_ID_IVSHMEM; - k->device_id = PCI_DEVICE_ID_IVSHMEM; - k->class_id = PCI_CLASS_MEMORY_RAM; - k->revision = 1; - dc->reset = ivshmem_reset; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->desc = "Inter-VM shared memory"; -} - -static const TypeInfo ivshmem_common_info = { - .name = TYPE_IVSHMEM_COMMON, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(IVShmemState), - .abstract = true, - .class_init = ivshmem_common_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }, -}; - -static const VMStateDescription ivshmem_plain_vmsd = { - .name = TYPE_IVSHMEM_PLAIN, - .version_id = 0, - .minimum_version_id = 0, - .pre_load = ivshmem_pre_load, - .post_load = ivshmem_post_load, - .fields = (const VMStateField[]) { - VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), - VMSTATE_UINT32(intrstatus, IVShmemState), - VMSTATE_UINT32(intrmask, IVShmemState), - VMSTATE_END_OF_LIST() - }, -}; - -static Property ivshmem_plain_properties[] = { - DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), - DEFINE_PROP_LINK("memdev", IVShmemState, hostmem, TYPE_MEMORY_BACKEND, - HostMemoryBackend *), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ivshmem_plain_realize(PCIDevice *dev, Error **errp) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - - if (!s->hostmem) { - error_setg(errp, "You must specify a 'memdev'"); - return; - } else if (host_memory_backend_is_mapped(s->hostmem)) { - error_setg(errp, "can't use already busy memdev: %s", - object_get_canonical_path_component(OBJECT(s->hostmem))); - return; - } - - ivshmem_common_realize(dev, errp); -} - -static void ivshmem_plain_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - k->realize = ivshmem_plain_realize; - device_class_set_props(dc, ivshmem_plain_properties); - dc->vmsd = &ivshmem_plain_vmsd; -} - -static const TypeInfo ivshmem_plain_info = { - .name = TYPE_IVSHMEM_PLAIN, - .parent = TYPE_IVSHMEM_COMMON, - .instance_size = sizeof(IVShmemState), - .class_init = ivshmem_plain_class_init, -}; - -static const VMStateDescription ivshmem_doorbell_vmsd = { - .name = TYPE_IVSHMEM_DOORBELL, - .version_id = 0, - .minimum_version_id = 0, - .pre_load = ivshmem_pre_load, - .post_load = ivshmem_post_load, - .fields = (const VMStateField[]) { - VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), - VMSTATE_MSIX(parent_obj, IVShmemState), - VMSTATE_UINT32(intrstatus, IVShmemState), - VMSTATE_UINT32(intrmask, IVShmemState), - VMSTATE_END_OF_LIST() - }, -}; - -static Property ivshmem_doorbell_properties[] = { - DEFINE_PROP_CHR("chardev", IVShmemState, server_chr), - DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1), - DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, - true), - DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ivshmem_doorbell_init(Object *obj) -{ - IVShmemState *s = IVSHMEM_DOORBELL(obj); - - s->features |= (1 << IVSHMEM_MSI); -} - -static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp) -{ - IVShmemState *s = IVSHMEM_COMMON(dev); - - if (!qemu_chr_fe_backend_connected(&s->server_chr)) { - error_setg(errp, "You must specify a 'chardev'"); - return; - } - - ivshmem_common_realize(dev, errp); -} - -static void ivshmem_doorbell_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - - k->realize = ivshmem_doorbell_realize; - device_class_set_props(dc, ivshmem_doorbell_properties); - dc->vmsd = &ivshmem_doorbell_vmsd; -} - -static const TypeInfo ivshmem_doorbell_info = { - .name = TYPE_IVSHMEM_DOORBELL, - .parent = TYPE_IVSHMEM_COMMON, - .instance_size = sizeof(IVShmemState), - .instance_init = ivshmem_doorbell_init, - .class_init = ivshmem_doorbell_class_init, -}; - -static void ivshmem_register_types(void) -{ - type_register_static(&ivshmem_common_info); - type_register_static(&ivshmem_plain_info); - type_register_static(&ivshmem_doorbell_info); -} - -type_init(ivshmem_register_types) diff --git a/hw/misc/lasi.c b/hw/misc/lasi.c index 970fc98b5c7..9f758c6a863 100644 --- a/hw/misc/lasi.c +++ b/hw/misc/lasi.c @@ -15,8 +15,8 @@ #include "qapi/error.h" #include "trace.h" #include "hw/irq.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/runstate.h" #include "migration/vmstate.h" #include "qom/object.h" #include "hw/misc/lasi.h" @@ -263,11 +263,11 @@ static void lasi_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), lasi_set_irq, LASI_IRQS); } -static void lasi_class_init(ObjectClass *klass, void *data) +static void lasi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = lasi_reset; + device_class_set_legacy_reset(dc, lasi_reset); dc->vmsd = &vmstate_lasi; } diff --git a/hw/misc/led.c b/hw/misc/led.c index d9998ab8954..f7f709072af 100644 --- a/hw/misc/led.c +++ b/hw/misc/led.c @@ -101,20 +101,19 @@ static void led_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(DEVICE(s), led_set_state_gpio_handler, 1); } -static Property led_properties[] = { +static const Property led_properties[] = { DEFINE_PROP_STRING("color", LEDState, color), DEFINE_PROP_STRING("description", LEDState, description), DEFINE_PROP_BOOL("gpio-active-high", LEDState, gpio_active_high, true), - DEFINE_PROP_END_OF_LIST(), }; -static void led_class_init(ObjectClass *klass, void *data) +static void led_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "LED"; dc->vmsd = &vmstate_led; - dc->reset = led_reset; + device_class_set_legacy_reset(dc, led_reset); dc->realize = led_realize; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); device_class_set_props(dc, led_properties); diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index 652395b84fc..bc37e2a2cb7 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -16,7 +16,7 @@ */ #include "qemu/osdep.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/vmstate.h" #include "hw/sysbus.h" #include "hw/irq.h" @@ -24,13 +24,13 @@ #include "hw/misc/mac_via.h" #include "hw/misc/mos6522.h" #include "hw/input/adb.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "sysemu/block-backend.h" -#include "sysemu/rtc.h" +#include "system/block-backend.h" +#include "system/rtc.h" #include "trace.h" #include "qemu/log.h" @@ -495,7 +495,6 @@ static void via1_rtc_update(MOS6522Q800VIA1State *v1s) break; default: g_assert_not_reached(); - break; } return; } @@ -556,7 +555,6 @@ static void via1_rtc_update(MOS6522Q800VIA1State *v1s) break; default: g_assert_not_reached(); - break; } return; } @@ -1324,12 +1322,11 @@ static const VMStateDescription vmstate_q800_via1 = { } }; -static Property mos6522_q800_via1_properties[] = { +static const Property mos6522_q800_via1_properties[] = { DEFINE_PROP_DRIVE("drive", MOS6522Q800VIA1State, blk), - DEFINE_PROP_END_OF_LIST(), }; -static void mos6522_q800_via1_class_init(ObjectClass *oc, void *data) +static void mos6522_q800_via1_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); @@ -1418,7 +1415,7 @@ static const VMStateDescription vmstate_q800_via2 = { } }; -static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data) +static void mos6522_q800_via2_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index beab0ffb13f..bcd00c9bb1b 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -29,8 +29,8 @@ #include "migration/vmstate.h" #include "hw/misc/macio/cuda.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/rtc.h" +#include "system/runstate.h" +#include "system/rtc.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -554,17 +554,16 @@ static void cuda_init(Object *obj) DEVICE(obj), "adb.0"); } -static Property cuda_properties[] = { +static const Property cuda_properties[] = { DEFINE_PROP_UINT64("timebase-frequency", CUDAState, tb_frequency, 0), - DEFINE_PROP_END_OF_LIST() }; -static void cuda_class_init(ObjectClass *oc, void *data) +static void cuda_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = cuda_realize; - dc->reset = cuda_reset; + device_class_set_legacy_reset(dc, cuda_reset); dc->vmsd = &vmstate_cuda; device_class_set_props(dc, cuda_properties); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); @@ -599,7 +598,7 @@ static void mos6522_cuda_reset_hold(Object *obj, ResetType type) ms->timers[1].frequency = (SCALE_US * 6000) / 4700; } -static void mos6522_cuda_class_init(ObjectClass *oc, void *data) +static void mos6522_cuda_class_init(ObjectClass *oc, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c index 549563747dc..990551f91f4 100644 --- a/hw/misc/macio/gpio.c +++ b/hw/misc/macio/gpio.c @@ -34,6 +34,11 @@ #include "qemu/module.h" #include "trace.h" +enum MacioGPIORegisterBits { + OUT_DATA = 1, + IN_DATA = 2, + OUT_ENABLE = 4, +}; void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state) { @@ -41,14 +46,14 @@ void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state) trace_macio_set_gpio(gpio, state); - if (s->gpio_regs[gpio] & 4) { + if (s->gpio_regs[gpio] & OUT_ENABLE) { qemu_log_mask(LOG_GUEST_ERROR, "GPIO: Setting GPIO %d while it's an output\n", gpio); } - new_reg = s->gpio_regs[gpio] & ~2; + new_reg = s->gpio_regs[gpio] & ~IN_DATA; if (state) { - new_reg |= 2; + new_reg |= IN_DATA; } if (new_reg == s->gpio_regs[gpio]) { @@ -107,12 +112,12 @@ static void macio_gpio_write(void *opaque, hwaddr addr, uint64_t value, addr -= 8; if (addr < 36) { - value &= ~2; + value &= ~IN_DATA; - if (value & 4) { - ibit = (value & 1) << 1; + if (value & OUT_ENABLE) { + ibit = (value & OUT_DATA) << 1; } else { - ibit = s->gpio_regs[addr] & 2; + ibit = s->gpio_regs[addr] & IN_DATA; } s->gpio_regs[addr] = value | ibit; @@ -135,7 +140,7 @@ static uint64_t macio_gpio_read(void *opaque, hwaddr addr, unsigned size) } } - trace_macio_gpio_write(addr, val); + trace_macio_gpio_read(addr, val); return val; } @@ -189,12 +194,12 @@ static void macio_gpio_nmi(NMIState *n, int cpu_index, Error **errp) macio_set_gpio(MACIO_GPIO(n), 9, false); } -static void macio_gpio_class_init(ObjectClass *oc, void *data) +static void macio_gpio_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); - dc->reset = macio_gpio_reset; + device_class_set_legacy_reset(dc, macio_gpio_reset); dc->vmsd = &vmstate_macio_gpio; nc->nmi_monitor_handler = macio_gpio_nmi; } @@ -205,7 +210,7 @@ static const TypeInfo macio_gpio_init_info = { .instance_size = sizeof(MacIOGPIOState), .instance_init = macio_gpio_init, .class_init = macio_gpio_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { } }, diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 2a528ea08ca..b2b42dd5622 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -44,7 +44,7 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/log.h" -#include "sysemu/dma.h" +#include "system/dma.h" /* debug DBDMA */ #define DEBUG_DBDMA 0 @@ -917,12 +917,12 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp) s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard); } -static void mac_dbdma_class_init(ObjectClass *oc, void *data) +static void mac_dbdma_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = mac_dbdma_realize; - dc->reset = mac_dbdma_reset; + device_class_set_legacy_reset(dc, mac_dbdma_reset); dc->vmsd = &vmstate_dbdma; } diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index 3f449f91c00..6710485d728 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -385,7 +385,7 @@ static const VMStateDescription vmstate_macio_oldworld = { } }; -static void macio_oldworld_class_init(ObjectClass *oc, void *data) +static void macio_oldworld_class_init(ObjectClass *oc, const void *data) { PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -405,13 +405,12 @@ static const VMStateDescription vmstate_macio_newworld = { } }; -static Property macio_newworld_properties[] = { +static const Property macio_newworld_properties[] = { DEFINE_PROP_BOOL("has-pmu", NewWorldMacIOState, has_pmu, false), DEFINE_PROP_BOOL("has-adb", NewWorldMacIOState, has_adb, false), - DEFINE_PROP_END_OF_LIST() }; -static void macio_newworld_class_init(ObjectClass *oc, void *data) +static void macio_newworld_class_init(ObjectClass *oc, const void *data) { PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -422,12 +421,11 @@ static void macio_newworld_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, macio_newworld_properties); } -static Property macio_properties[] = { +static const Property macio_properties[] = { DEFINE_PROP_UINT64("frequency", MacIOState, frequency, 0), - DEFINE_PROP_END_OF_LIST() }; -static void macio_class_init(ObjectClass *klass, void *data) +static void macio_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -467,7 +465,7 @@ static const TypeInfo macio_type_info = { .instance_init = macio_instance_init, .abstract = true, .class_init = macio_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 238da58eade..37349139948 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -34,8 +34,8 @@ #include "hw/irq.h" #include "hw/misc/macio/pmu.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/rtc.h" +#include "system/runstate.h" +#include "system/rtc.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -760,17 +760,16 @@ static void pmu_init(Object *obj) sysbus_init_mmio(d, &s->mem); } -static Property pmu_properties[] = { +static const Property pmu_properties[] = { DEFINE_PROP_BOOL("has-adb", PMUState, has_adb, true), - DEFINE_PROP_END_OF_LIST() }; -static void pmu_class_init(ObjectClass *oc, void *data) +static void pmu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = pmu_realize; - dc->reset = pmu_reset; + device_class_set_legacy_reset(dc, pmu_reset); dc->vmsd = &vmstate_pmu; device_class_set_props(dc, pmu_properties); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); @@ -809,7 +808,7 @@ static void mos6522_pmu_reset_hold(Object *obj, ResetType type) s->last_b = ms->b = TACK | TREQ; } -static void mos6522_pmu_class_init(ObjectClass *oc, void *data) +static void mos6522_pmu_class_init(ObjectClass *oc, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(oc); MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); diff --git a/hw/misc/macio/trace-events b/hw/misc/macio/trace-events index ad4b9d1c08e..055a407aebb 100644 --- a/hw/misc/macio/trace-events +++ b/hw/misc/macio/trace-events @@ -18,7 +18,8 @@ macio_timer_read(uint64_t addr, unsigned len, uint32_t val) "read addr 0x%"PRIx6 macio_set_gpio(int gpio, bool state) "setting GPIO %d to %d" macio_gpio_irq_assert(int gpio) "asserting GPIO %d" macio_gpio_irq_deassert(int gpio) "deasserting GPIO %d" -macio_gpio_write(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64" value: 0x%"PRIx64 +macio_gpio_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 +macio_gpio_read(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 # pmu.c pmu_adb_poll(int olen) "ADB autopoll, olen=%d" diff --git a/hw/misc/max78000_aes.c b/hw/misc/max78000_aes.c new file mode 100644 index 00000000000..d883ddd2b61 --- /dev/null +++ b/hw/misc/max78000_aes.c @@ -0,0 +1,229 @@ +/* + * MAX78000 AES + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/max78000_aes.h" +#include "crypto/aes.h" + +static void max78000_aes_set_status(Max78000AesState *s) +{ + s->status = 0; + if (s->result_index >= 16) { + s->status |= OUTPUT_FULL; + } + if (s->result_index == 0) { + s->status |= OUTPUT_EMPTY; + } + if (s->data_index >= 16) { + s->status |= INPUT_FULL; + } + if (s->data_index == 0) { + s->status |= INPUT_EMPTY; + } +} + +static uint64_t max78000_aes_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Max78000AesState *s = opaque; + switch (addr) { + case CTRL: + return s->ctrl; + + case STATUS: + return s->status; + + case INTFL: + return s->intfl; + + case INTEN: + return s->inten; + + case FIFO: + if (s->result_index >= 4) { + s->intfl &= ~DONE; + s->result_index -= 4; + max78000_aes_set_status(s); + return ldl_be_p(&s->result[s->result_index]); + } else{ + return 0; + } + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + + } + return 0; +} + +static void max78000_aes_do_crypto(Max78000AesState *s) +{ + int keylen = 256; + uint8_t *keydata = s->key; + if ((s->ctrl & KEY_SIZE) == 0) { + keylen = 128; + keydata += 16; + } else if ((s->ctrl & KEY_SIZE) == 1 << 6) { + keylen = 192; + keydata += 8; + } + + /* + * The MAX78000 AES engine stores an internal key, which it uses only + * for decryption. This results in the slighly odd looking pairs of + * set_encrypt and set_decrypt calls below; s->internal_key is + * being stored for later use in both cases. + */ + AES_KEY key; + if ((s->ctrl & TYPE) == 0) { + AES_set_encrypt_key(keydata, keylen, &key); + AES_set_decrypt_key(keydata, keylen, &s->internal_key); + AES_encrypt(s->data, s->result, &key); + s->result_index = 16; + } else if ((s->ctrl & TYPE) == 1 << 8) { + AES_set_decrypt_key(keydata, keylen, &key); + AES_set_decrypt_key(keydata, keylen, &s->internal_key); + AES_decrypt(s->data, s->result, &key); + s->result_index = 16; + } else{ + AES_decrypt(s->data, s->result, &s->internal_key); + s->result_index = 16; + } + s->intfl |= DONE; +} + +static void max78000_aes_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Max78000AesState *s = opaque; + uint32_t val = val64; + switch (addr) { + case CTRL: + if (val & OUTPUT_FLUSH) { + s->result_index = 0; + val &= ~OUTPUT_FLUSH; + } + if (val & INPUT_FLUSH) { + s->data_index = 0; + val &= ~INPUT_FLUSH; + } + if (val & START) { + max78000_aes_do_crypto(s); + } + + /* Hardware appears to stay enabled even if 0 written */ + s->ctrl = val | (s->ctrl & AES_EN); + break; + + case FIFO: + assert(s->data_index <= 12); + stl_be_p(&s->data[12 - s->data_index], val); + s->data_index += 4; + if (s->data_index >= 16) { + s->data_index = 0; + max78000_aes_do_crypto(s); + } + break; + + case KEY_BASE ... KEY_END - 4: + stl_be_p(&s->key[(KEY_END - KEY_BASE - 4) - (addr - KEY_BASE)], val); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + + } + max78000_aes_set_status(s); +} + +static void max78000_aes_reset_hold(Object *obj, ResetType type) +{ + Max78000AesState *s = MAX78000_AES(obj); + s->ctrl = 0; + s->status = 0; + s->intfl = 0; + s->inten = 0; + + s->data_index = 0; + s->result_index = 0; + + memset(s->data, 0, sizeof(s->data)); + memset(s->key, 0, sizeof(s->key)); + memset(s->result, 0, sizeof(s->result)); + memset(&s->internal_key, 0, sizeof(s->internal_key)); +} + +static const MemoryRegionOps max78000_aes_ops = { + .read = max78000_aes_read, + .write = max78000_aes_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_max78000_aes = { + .name = TYPE_MAX78000_AES, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(ctrl, Max78000AesState), + VMSTATE_UINT32(status, Max78000AesState), + VMSTATE_UINT32(intfl, Max78000AesState), + VMSTATE_UINT32(inten, Max78000AesState), + VMSTATE_UINT8_ARRAY(data, Max78000AesState, 16), + VMSTATE_UINT8_ARRAY(key, Max78000AesState, 32), + VMSTATE_UINT8_ARRAY(result, Max78000AesState, 16), + VMSTATE_UINT32_ARRAY(internal_key.rd_key, Max78000AesState, 60), + VMSTATE_INT32(internal_key.rounds, Max78000AesState), + VMSTATE_END_OF_LIST() + } +}; + +static void max78000_aes_init(Object *obj) +{ + Max78000AesState *s = MAX78000_AES(obj); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &max78000_aes_ops, s, + TYPE_MAX78000_AES, 0xc00); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + +} + +static void max78000_aes_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + rc->phases.hold = max78000_aes_reset_hold; + dc->vmsd = &vmstate_max78000_aes; + +} + +static const TypeInfo max78000_aes_info = { + .name = TYPE_MAX78000_AES, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Max78000AesState), + .instance_init = max78000_aes_init, + .class_init = max78000_aes_class_init, +}; + +static void max78000_aes_register_types(void) +{ + type_register_static(&max78000_aes_info); +} + +type_init(max78000_aes_register_types) diff --git a/hw/misc/max78000_gcr.c b/hw/misc/max78000_gcr.c new file mode 100644 index 00000000000..fbbc92cca32 --- /dev/null +++ b/hw/misc/max78000_gcr.c @@ -0,0 +1,351 @@ +/* + * MAX78000 Global Control Registers + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "system/runstate.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/char/max78000_uart.h" +#include "hw/misc/max78000_trng.h" +#include "hw/misc/max78000_aes.h" +#include "hw/misc/max78000_gcr.h" + + +static void max78000_gcr_reset_hold(Object *obj, ResetType type) +{ + DeviceState *dev = DEVICE(obj); + Max78000GcrState *s = MAX78000_GCR(dev); + s->sysctrl = 0x21002; + s->rst0 = 0; + /* All clocks are always ready */ + s->clkctrl = 0x3e140008; + s->pm = 0x3f000; + s->pclkdiv = 0; + s->pclkdis0 = 0xffffffff; + s->memctrl = 0x5; + s->memz = 0; + s->sysst = 0; + s->rst1 = 0; + s->pckdis1 = 0xffffffff; + s->eventen = 0; + s->revision = 0xa1; + s->sysie = 0; + s->eccerr = 0; + s->ecced = 0; + s->eccie = 0; + s->eccaddr = 0; +} + +static uint64_t max78000_gcr_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Max78000GcrState *s = opaque; + + switch (addr) { + case SYSCTRL: + return s->sysctrl; + + case RST0: + return s->rst0; + + case CLKCTRL: + return s->clkctrl; + + case PM: + return s->pm; + + case PCLKDIV: + return s->pclkdiv; + + case PCLKDIS0: + return s->pclkdis0; + + case MEMCTRL: + return s->memctrl; + + case MEMZ: + return s->memz; + + case SYSST: + return s->sysst; + + case RST1: + return s->rst1; + + case PCKDIS1: + return s->pckdis1; + + case EVENTEN: + return s->eventen; + + case REVISION: + return s->revision; + + case SYSIE: + return s->sysie; + + case ECCERR: + return s->eccerr; + + case ECCED: + return s->ecced; + + case ECCIE: + return s->eccie; + + case ECCADDR: + return s->eccaddr; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + return 0; + + } +} + +static void max78000_gcr_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Max78000GcrState *s = opaque; + uint32_t val = val64; + uint8_t zero[0xc000] = {0}; + switch (addr) { + case SYSCTRL: + /* Checksum calculations always pass immediately */ + s->sysctrl = (val & 0x30000) | 0x1002; + break; + + case RST0: + if (val & SYSTEM_RESET) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + if (val & PERIPHERAL_RESET) { + /* + * Peripheral reset resets all peripherals. The CPU + * retains its state. The GPIO, watchdog timers, AoD, + * RAM retention, and general control registers (GCR), + * including the clock configuration, are unaffected. + */ + val = UART2_RESET | UART1_RESET | UART0_RESET | + ADC_RESET | CNN_RESET | TRNG_RESET | + RTC_RESET | I2C0_RESET | SPI1_RESET | + TMR3_RESET | TMR2_RESET | TMR1_RESET | + TMR0_RESET | WDT0_RESET | DMA_RESET; + } + if (val & SOFT_RESET) { + /* Soft reset also resets GPIO */ + val = UART2_RESET | UART1_RESET | UART0_RESET | + ADC_RESET | CNN_RESET | TRNG_RESET | + RTC_RESET | I2C0_RESET | SPI1_RESET | + TMR3_RESET | TMR2_RESET | TMR1_RESET | + TMR0_RESET | GPIO1_RESET | GPIO0_RESET | + DMA_RESET; + } + if (val & UART2_RESET) { + device_cold_reset(s->uart2); + } + if (val & UART1_RESET) { + device_cold_reset(s->uart1); + } + if (val & UART0_RESET) { + device_cold_reset(s->uart0); + } + if (val & TRNG_RESET) { + device_cold_reset(s->trng); + } + if (val & AES_RESET) { + device_cold_reset(s->aes); + } + /* TODO: As other devices are implemented, add them here */ + break; + + case CLKCTRL: + s->clkctrl = val | SYSCLK_RDY; + break; + + case PM: + s->pm = val; + break; + + case PCLKDIV: + s->pclkdiv = val; + break; + + case PCLKDIS0: + s->pclkdis0 = val; + break; + + case MEMCTRL: + s->memctrl = val; + break; + + case MEMZ: + if (val & ram0) { + address_space_write(&s->sram_as, SYSRAM0_START, + MEMTXATTRS_UNSPECIFIED, zero, 0x8000); + } + if (val & ram1) { + address_space_write(&s->sram_as, SYSRAM1_START, + MEMTXATTRS_UNSPECIFIED, zero, 0x8000); + } + if (val & ram2) { + address_space_write(&s->sram_as, SYSRAM2_START, + MEMTXATTRS_UNSPECIFIED, zero, 0xC000); + } + if (val & ram3) { + address_space_write(&s->sram_as, SYSRAM3_START, + MEMTXATTRS_UNSPECIFIED, zero, 0x4000); + } + break; + + case SYSST: + s->sysst = val; + break; + + case RST1: + /* TODO: As other devices are implemented, add them here */ + s->rst1 = val; + break; + + case PCKDIS1: + s->pckdis1 = val; + break; + + case EVENTEN: + s->eventen = val; + break; + + case REVISION: + s->revision = val; + break; + + case SYSIE: + s->sysie = val; + break; + + case ECCERR: + s->eccerr = val; + break; + + case ECCED: + s->ecced = val; + break; + + case ECCIE: + s->eccie = val; + break; + + case ECCADDR: + s->eccaddr = val; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + break; + + } +} + +static const Property max78000_gcr_properties[] = { + DEFINE_PROP_LINK("sram", Max78000GcrState, sram, + TYPE_MEMORY_REGION, MemoryRegion*), + DEFINE_PROP_LINK("uart0", Max78000GcrState, uart0, + TYPE_MAX78000_UART, DeviceState*), + DEFINE_PROP_LINK("uart1", Max78000GcrState, uart1, + TYPE_MAX78000_UART, DeviceState*), + DEFINE_PROP_LINK("uart2", Max78000GcrState, uart2, + TYPE_MAX78000_UART, DeviceState*), + DEFINE_PROP_LINK("trng", Max78000GcrState, trng, + TYPE_MAX78000_TRNG, DeviceState*), + DEFINE_PROP_LINK("aes", Max78000GcrState, aes, + TYPE_MAX78000_AES, DeviceState*), +}; + +static const MemoryRegionOps max78000_gcr_ops = { + .read = max78000_gcr_read, + .write = max78000_gcr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_max78000_gcr = { + .name = TYPE_MAX78000_GCR, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(sysctrl, Max78000GcrState), + VMSTATE_UINT32(rst0, Max78000GcrState), + VMSTATE_UINT32(clkctrl, Max78000GcrState), + VMSTATE_UINT32(pm, Max78000GcrState), + VMSTATE_UINT32(pclkdiv, Max78000GcrState), + VMSTATE_UINT32(pclkdis0, Max78000GcrState), + VMSTATE_UINT32(memctrl, Max78000GcrState), + VMSTATE_UINT32(memz, Max78000GcrState), + VMSTATE_UINT32(sysst, Max78000GcrState), + VMSTATE_UINT32(rst1, Max78000GcrState), + VMSTATE_UINT32(pckdis1, Max78000GcrState), + VMSTATE_UINT32(eventen, Max78000GcrState), + VMSTATE_UINT32(revision, Max78000GcrState), + VMSTATE_UINT32(sysie, Max78000GcrState), + VMSTATE_UINT32(eccerr, Max78000GcrState), + VMSTATE_UINT32(ecced, Max78000GcrState), + VMSTATE_UINT32(eccie, Max78000GcrState), + VMSTATE_UINT32(eccaddr, Max78000GcrState), + VMSTATE_END_OF_LIST() + } +}; + +static void max78000_gcr_init(Object *obj) +{ + Max78000GcrState *s = MAX78000_GCR(obj); + + memory_region_init_io(&s->mmio, obj, &max78000_gcr_ops, s, + TYPE_MAX78000_GCR, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + +} + +static void max78000_gcr_realize(DeviceState *dev, Error **errp) +{ + Max78000GcrState *s = MAX78000_GCR(dev); + + address_space_init(&s->sram_as, s->sram, "sram"); +} + +static void max78000_gcr_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + device_class_set_props(dc, max78000_gcr_properties); + + dc->realize = max78000_gcr_realize; + dc->vmsd = &vmstate_max78000_gcr; + rc->phases.hold = max78000_gcr_reset_hold; +} + +static const TypeInfo max78000_gcr_info = { + .name = TYPE_MAX78000_GCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Max78000GcrState), + .instance_init = max78000_gcr_init, + .class_init = max78000_gcr_class_init, +}; + +static void max78000_gcr_register_types(void) +{ + type_register_static(&max78000_gcr_info); +} + +type_init(max78000_gcr_register_types) diff --git a/hw/misc/max78000_icc.c b/hw/misc/max78000_icc.c new file mode 100644 index 00000000000..6f7d2b20bf5 --- /dev/null +++ b/hw/misc/max78000_icc.c @@ -0,0 +1,120 @@ +/* + * MAX78000 Instruction Cache + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/max78000_icc.h" + + +static uint64_t max78000_icc_read(void *opaque, hwaddr addr, + unsigned int size) +{ + Max78000IccState *s = opaque; + switch (addr) { + case ICC_INFO: + return s->info; + + case ICC_SZ: + return s->sz; + + case ICC_CTRL: + return s->ctrl; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; + + } +} + +static void max78000_icc_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Max78000IccState *s = opaque; + + switch (addr) { + case ICC_CTRL: + s->ctrl = 0x10000 | (val64 & 1); + break; + + case ICC_INVALIDATE: + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps max78000_icc_ops = { + .read = max78000_icc_read, + .write = max78000_icc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription max78000_icc_vmstate = { + .name = TYPE_MAX78000_ICC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(info, Max78000IccState), + VMSTATE_UINT32(sz, Max78000IccState), + VMSTATE_UINT32(ctrl, Max78000IccState), + VMSTATE_END_OF_LIST() + } +}; + +static void max78000_icc_reset_hold(Object *obj, ResetType type) +{ + Max78000IccState *s = MAX78000_ICC(obj); + s->info = 0; + s->sz = 0x10000010; + s->ctrl = 0x10000; +} + +static void max78000_icc_init(Object *obj) +{ + Max78000IccState *s = MAX78000_ICC(obj); + + memory_region_init_io(&s->mmio, obj, &max78000_icc_ops, s, + TYPE_MAX78000_ICC, 0x800); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void max78000_icc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = max78000_icc_reset_hold; + dc->vmsd = &max78000_icc_vmstate; +} + +static const TypeInfo max78000_icc_info = { + .name = TYPE_MAX78000_ICC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Max78000IccState), + .instance_init = max78000_icc_init, + .class_init = max78000_icc_class_init, +}; + +static void max78000_icc_register_types(void) +{ + type_register_static(&max78000_icc_info); +} + +type_init(max78000_icc_register_types) diff --git a/hw/misc/max78000_trng.c b/hw/misc/max78000_trng.c new file mode 100644 index 00000000000..ecdaef53b6c --- /dev/null +++ b/hw/misc/max78000_trng.c @@ -0,0 +1,139 @@ +/* + * MAX78000 True Random Number Generator + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/max78000_trng.h" +#include "qemu/guest-random.h" + +static uint64_t max78000_trng_read(void *opaque, hwaddr addr, + unsigned int size) +{ + uint32_t data; + + Max78000TrngState *s = opaque; + switch (addr) { + case CTRL: + return s->ctrl; + + case STATUS: + return 1; + + case DATA: + /* + * When interrupts are enabled, reading random data should cause a + * new interrupt to be generated; since there's always a random number + * available, we could qemu_set_irq(s->irq, s->ctrl & RND_IE). Because + * of how trng_write is set up, this is always a noop, so don't + */ + qemu_guest_getrandom_nofail(&data, sizeof(data)); + return data; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + } + return 0; +} + +static void max78000_trng_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + Max78000TrngState *s = opaque; + uint32_t val = val64; + switch (addr) { + case CTRL: + /* TODO: implement AES keygen */ + s->ctrl = val; + + /* + * This device models random number generation as taking 0 time. + * A new random number is always available, so the condition for the + * RND interrupt is always fulfilled; we can just set irq to 1. + */ + if (val & RND_IE) { + qemu_set_irq(s->irq, 1); + } else{ + qemu_set_irq(s->irq, 0); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + } +} + +static void max78000_trng_reset_hold(Object *obj, ResetType type) +{ + Max78000TrngState *s = MAX78000_TRNG(obj); + s->ctrl = 0; + s->status = 0; + s->data = 0; +} + +static const MemoryRegionOps max78000_trng_ops = { + .read = max78000_trng_read, + .write = max78000_trng_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription max78000_trng_vmstate = { + .name = TYPE_MAX78000_TRNG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(ctrl, Max78000TrngState), + VMSTATE_UINT32(status, Max78000TrngState), + VMSTATE_UINT32(data, Max78000TrngState), + VMSTATE_END_OF_LIST() + } +}; + +static void max78000_trng_init(Object *obj) +{ + Max78000TrngState *s = MAX78000_TRNG(obj); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &max78000_trng_ops, s, + TYPE_MAX78000_TRNG, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + +} + +static void max78000_trng_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + rc->phases.hold = max78000_trng_reset_hold; + dc->vmsd = &max78000_trng_vmstate; + +} + +static const TypeInfo max78000_trng_info = { + .name = TYPE_MAX78000_TRNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Max78000TrngState), + .instance_init = max78000_trng_init, + .class_init = max78000_trng_class_init, +}; + +static void max78000_trng_register_types(void) +{ + type_register_static(&max78000_trng_info); +} + +type_init(max78000_trng_register_types) diff --git a/hw/misc/mchp_pfsoc_dmc.c b/hw/misc/mchp_pfsoc_dmc.c index 43d8e970abc..599f845f459 100644 --- a/hw/misc/mchp_pfsoc_dmc.c +++ b/hw/misc/mchp_pfsoc_dmc.c @@ -110,7 +110,8 @@ static void mchp_pfsoc_ddr_sgmii_phy_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sgmii_phy); } -static void mchp_pfsoc_ddr_sgmii_phy_class_init(ObjectClass *klass, void *data) +static void mchp_pfsoc_ddr_sgmii_phy_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -192,7 +193,7 @@ static void mchp_pfsoc_ddr_cfg_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->cfg); } -static void mchp_pfsoc_ddr_cfg_class_init(ObjectClass *klass, void *data) +static void mchp_pfsoc_ddr_cfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/mchp_pfsoc_ioscb.c b/hw/misc/mchp_pfsoc_ioscb.c index a71d134295a..10fc7ea2a95 100644 --- a/hw/misc/mchp_pfsoc_ioscb.c +++ b/hw/misc/mchp_pfsoc_ioscb.c @@ -292,7 +292,7 @@ static void mchp_pfsoc_ioscb_realize(DeviceState *dev, Error **errp) sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); } -static void mchp_pfsoc_ioscb_class_init(ObjectClass *klass, void *data) +static void mchp_pfsoc_ioscb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/mchp_pfsoc_sysreg.c b/hw/misc/mchp_pfsoc_sysreg.c index 7876fe0c5ba..f47c835f80d 100644 --- a/hw/misc/mchp_pfsoc_sysreg.c +++ b/hw/misc/mchp_pfsoc_sysreg.c @@ -27,7 +27,9 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "hw/misc/mchp_pfsoc_sysreg.h" +#include "system/runstate.h" +#define MSS_RESET_CR 0x18 #define ENVM_CR 0xb8 #define MESSAGE_INT 0x118c @@ -56,6 +58,11 @@ static void mchp_pfsoc_sysreg_write(void *opaque, hwaddr offset, { MchpPfSoCSysregState *s = opaque; switch (offset) { + case MSS_RESET_CR: + if (value == 0xdead) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; case MESSAGE_INT: qemu_irq_lower(s->irq); break; @@ -85,7 +92,7 @@ static void mchp_pfsoc_sysreg_realize(DeviceState *dev, Error **errp) sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); } -static void mchp_pfsoc_sysreg_class_init(ObjectClass *klass, void *data) +static void mchp_pfsoc_sysreg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/meson.build b/hw/misc/meson.build index 2ca8717be28..b1d8d8e5d2a 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -37,7 +37,9 @@ system_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c')) subdir('macio') -system_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c')) +# ivshmem devices +system_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem-pci.c')) +system_ss.add(when: 'CONFIG_IVSHMEM_FLAT_DEVICE', if_true: files('ivshmem-flat.c')) system_ss.add(when: 'CONFIG_ALLWINNER_SRAMC', if_true: files('allwinner-sramc.c')) system_ss.add(when: 'CONFIG_ALLWINNER_A10_CCM', if_true: files('allwinner-a10-ccm.c')) @@ -51,9 +53,10 @@ system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-ccu.c' system_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40-dramc.c')) system_ss.add(when: 'CONFIG_AXP2XX_PMU', if_true: files('axp2xx.c')) system_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) -system_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) system_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pmu.c', 'exynos4210_clk.c', 'exynos4210_rng.c')) +system_ss.add(when: 'CONFIG_FSL_IMX8MP_ANALOG', if_true: files('imx8mp_analog.c')) +system_ss.add(when: 'CONFIG_FSL_IMX8MP_CCM', if_true: files('imx8mp_ccm.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files( 'imx25_ccm.c', 'imx31_ccm.c', @@ -67,20 +70,19 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files( 'imx_ccm.c', 'imx_rngc.c', )) -system_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mst_fpga.c')) +system_ss.add(when: 'CONFIG_MAX78000_AES', if_true: files('max78000_aes.c')) +system_ss.add(when: 'CONFIG_MAX78000_GCR', if_true: files('max78000_gcr.c')) +system_ss.add(when: 'CONFIG_MAX78000_ICC', if_true: files('max78000_icc.c')) +system_ss.add(when: 'CONFIG_MAX78000_TRNG', if_true: files('max78000_trng.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files( - 'npcm7xx_clk.c', - 'npcm7xx_gcr.c', + 'npcm_clk.c', + 'npcm_gcr.c', 'npcm7xx_mft.c', 'npcm7xx_pwm.c', 'npcm7xx_rng.c', )) system_ss.add(when: 'CONFIG_OMAP', if_true: files( 'omap_clk.c', - 'omap_gpmc.c', - 'omap_l4.c', - 'omap_sdrc.c', - 'omap_tap.c', )) system_ss.add(when: 'CONFIG_RASPI', if_true: files( 'bcm2835_mbox.c', @@ -106,6 +108,7 @@ system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files( system_ss.add(when: 'CONFIG_XLNX_VERSAL_TRNG', if_true: files( 'xlnx-versal-trng.c', )) +system_ss.add(when: 'CONFIG_STM32_RCC', if_true: files('stm32_rcc.c')) system_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) system_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) system_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) @@ -127,6 +130,7 @@ system_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) system_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) system_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) +system_ss.add(when: 'CONFIG_PVPANIC_MMIO', if_true: files('pvpanic-mmio.c')) system_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed_hace.c', diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c index 2703040f459..5484b739674 100644 --- a/hw/misc/mips_cmgcr.c +++ b/hw/misc/mips_cmgcr.c @@ -211,7 +211,7 @@ static const VMStateDescription vmstate_mips_gcr = { }, }; -static Property mips_gcr_properties[] = { +static const Property mips_gcr_properties[] = { DEFINE_PROP_UINT32("num-vp", MIPSGCRState, num_vps, 1), DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800), DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR), @@ -219,7 +219,6 @@ static Property mips_gcr_properties[] = { MemoryRegion *), DEFINE_PROP_LINK("cpc", MIPSGCRState, cpc_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static void mips_gcr_realize(DeviceState *dev, Error **errp) @@ -230,12 +229,12 @@ static void mips_gcr_realize(DeviceState *dev, Error **errp) s->vps = g_new(MIPSGCRVPState, s->num_vps); } -static void mips_gcr_class_init(ObjectClass *klass, void *data) +static void mips_gcr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, mips_gcr_properties); dc->vmsd = &vmstate_mips_gcr; - dc->reset = mips_gcr_reset; + device_class_set_legacy_reset(dc, mips_gcr_reset); dc->realize = mips_gcr_realize; } diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c index 1e8fd2e6996..9bfb7c97219 100644 --- a/hw/misc/mips_cpc.c +++ b/hw/misc/mips_cpc.c @@ -92,8 +92,6 @@ static void cpc_write(void *opaque, hwaddr offset, uint64_t data, "%s: Bad offset 0x%x\n", __func__, (int)offset); break; } - - return; } static uint64_t cpc_read(void *opaque, hwaddr offset, unsigned size) @@ -163,18 +161,17 @@ static const VMStateDescription vmstate_mips_cpc = { }, }; -static Property mips_cpc_properties[] = { +static const Property mips_cpc_properties[] = { DEFINE_PROP_UINT32("num-vp", MIPSCPCState, num_vp, 0x1), DEFINE_PROP_UINT64("vp-start-running", MIPSCPCState, vp_start_running, 0x1), - DEFINE_PROP_END_OF_LIST(), }; -static void mips_cpc_class_init(ObjectClass *klass, void *data) +static void mips_cpc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mips_cpc_realize; - dc->reset = mips_cpc_reset; + device_class_set_legacy_reset(dc, mips_cpc_reset); dc->vmsd = &vmstate_mips_cpc; device_class_set_props(dc, mips_cpc_properties); } diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c index f8acfb3ee26..fc17385cde7 100644 --- a/hw/misc/mips_itu.c +++ b/hw/misc/mips_itu.c @@ -533,21 +533,20 @@ static void mips_itu_reset(DeviceState *dev) itc_reset_cells(s); } -static Property mips_itu_properties[] = { +static const Property mips_itu_properties[] = { DEFINE_PROP_UINT32("num-fifo", MIPSITUState, num_fifo, ITC_FIFO_NUM_MAX), DEFINE_PROP_UINT32("num-semaphores", MIPSITUState, num_semaphores, ITC_SEMAPH_NUM_MAX), - DEFINE_PROP_END_OF_LIST(), }; -static void mips_itu_class_init(ObjectClass *klass, void *data) +static void mips_itu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, mips_itu_properties); dc->realize = mips_itu_realize; - dc->reset = mips_itu_reset; + device_class_set_legacy_reset(dc, mips_itu_reset); } static const TypeInfo mips_itu_info = { diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c index 515f62e687d..8dd6b82ac5f 100644 --- a/hw/misc/mos6522.c +++ b/hw/misc/mos6522.c @@ -696,12 +696,11 @@ static void mos6522_finalize(Object *obj) timer_free(s->timers[1].timer); } -static Property mos6522_properties[] = { +static const Property mos6522_properties[] = { DEFINE_PROP_UINT64("frequency", MOS6522State, frequency, 0), - DEFINE_PROP_END_OF_LIST() }; -static void mos6522_class_init(ObjectClass *oc, void *data) +static void mos6522_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ResettableClass *rc = RESETTABLE_CLASS(oc); diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c index aa1bb83e721..bee1309f5a6 100644 --- a/hw/misc/mps2-fpgaio.c +++ b/hw/misc/mps2-fpgaio.c @@ -198,7 +198,7 @@ static void mps2_fpgaio_write(void *opaque, hwaddr offset, uint64_t value, s->led0 = value & MAKE_64BIT_MASK(0, s->num_leds); for (i = 0; i < s->num_leds; i++) { - led_set_state(s->led[i], value & (1 << i)); + led_set_state(s->led[i], extract64(value, i, 1)); } } break; @@ -319,23 +319,22 @@ static const VMStateDescription mps2_fpgaio_vmstate = { }, }; -static Property mps2_fpgaio_properties[] = { +static const Property mps2_fpgaio_properties[] = { /* Frequency of the prescale counter */ DEFINE_PROP_UINT32("prescale-clk", MPS2FPGAIO, prescale_clk, 20000000), /* Number of LEDs controlled by LED0 register */ DEFINE_PROP_UINT32("num-leds", MPS2FPGAIO, num_leds, 2), DEFINE_PROP_BOOL("has-switches", MPS2FPGAIO, has_switches, false), DEFINE_PROP_BOOL("has-dbgctrl", MPS2FPGAIO, has_dbgctrl, false), - DEFINE_PROP_END_OF_LIST(), }; -static void mps2_fpgaio_class_init(ObjectClass *klass, void *data) +static void mps2_fpgaio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &mps2_fpgaio_vmstate; dc->realize = mps2_fpgaio_realize; - dc->reset = mps2_fpgaio_reset; + device_class_set_legacy_reset(dc, mps2_fpgaio_reset); device_class_set_props(dc, mps2_fpgaio_properties); } diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c index 18be74157ee..a9a5d4a535b 100644 --- a/hw/misc/mps2-scc.c +++ b/hw/misc/mps2-scc.c @@ -456,7 +456,7 @@ static const VMStateDescription mps2_scc_vmstate = { } }; -static Property mps2_scc_properties[] = { +static const Property mps2_scc_properties[] = { /* Values for various read-only ID registers (which are specific * to the board model or FPGA image) */ @@ -472,16 +472,15 @@ static Property mps2_scc_properties[] = { */ DEFINE_PROP_ARRAY("oscclk", MPS2SCC, num_oscclk, oscclk_reset, qdev_prop_uint32, uint32_t), - DEFINE_PROP_END_OF_LIST(), }; -static void mps2_scc_class_init(ObjectClass *klass, void *data) +static void mps2_scc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mps2_scc_realize; dc->vmsd = &mps2_scc_vmstate; - dc->reset = mps2_scc_reset; + device_class_set_legacy_reset(dc, mps2_scc_reset); device_class_set_props(dc, mps2_scc_properties); } diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c index f54382a816c..ce0ad50c1be 100644 --- a/hw/misc/msf2-sysreg.c +++ b/hw/misc/msf2-sysreg.c @@ -118,11 +118,10 @@ static const VMStateDescription vmstate_msf2_sysreg = { } }; -static Property msf2_sysreg_properties[] = { +static const Property msf2_sysreg_properties[] = { /* default divisors in Libero GUI */ DEFINE_PROP_UINT8("apb0divisor", MSF2SysregState, apb0div, 2), DEFINE_PROP_UINT8("apb1divisor", MSF2SysregState, apb1div, 2), - DEFINE_PROP_END_OF_LIST(), }; static void msf2_sysreg_realize(DeviceState *dev, Error **errp) @@ -137,12 +136,12 @@ static void msf2_sysreg_realize(DeviceState *dev, Error **errp) } } -static void msf2_sysreg_class_init(ObjectClass *klass, void *data) +static void msf2_sysreg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_msf2_sysreg; - dc->reset = msf2_sysreg_reset; + device_class_set_legacy_reset(dc, msf2_sysreg_reset); device_class_set_props(dc, msf2_sysreg_properties); dc->realize = msf2_sysreg_realize; } diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c deleted file mode 100644 index 2d7bfa5ad9e..00000000000 --- a/hw/misc/mst_fpga.c +++ /dev/null @@ -1,269 +0,0 @@ -/* - * PXA270-based Intel Mainstone platforms. - * FPGA driver - * - * Copyright (c) 2007 by Armin Kuster or - * - * - * This code is licensed under the GNU GPL v2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "qom/object.h" - -/* Mainstone FPGA for extern irqs */ -#define FPGA_GPIO_PIN 0 -#define MST_NUM_IRQS 16 -#define MST_LEDDAT1 0x10 -#define MST_LEDDAT2 0x14 -#define MST_LEDCTRL 0x40 -#define MST_GPSWR 0x60 -#define MST_MSCWR1 0x80 -#define MST_MSCWR2 0x84 -#define MST_MSCWR3 0x88 -#define MST_MSCRD 0x90 -#define MST_INTMSKENA 0xc0 -#define MST_INTSETCLR 0xd0 -#define MST_PCMCIA0 0xe0 -#define MST_PCMCIA1 0xe4 - -#define MST_PCMCIAx_READY (1 << 10) -#define MST_PCMCIAx_nCD (1 << 5) - -#define MST_PCMCIA_CD0_IRQ 9 -#define MST_PCMCIA_CD1_IRQ 13 - -#define TYPE_MAINSTONE_FPGA "mainstone-fpga" -OBJECT_DECLARE_SIMPLE_TYPE(mst_irq_state, MAINSTONE_FPGA) - -struct mst_irq_state { - SysBusDevice parent_obj; - - MemoryRegion iomem; - - qemu_irq parent; - - uint32_t prev_level; - uint32_t leddat1; - uint32_t leddat2; - uint32_t ledctrl; - uint32_t gpswr; - uint32_t mscwr1; - uint32_t mscwr2; - uint32_t mscwr3; - uint32_t mscrd; - uint32_t intmskena; - uint32_t intsetclr; - uint32_t pcmcia0; - uint32_t pcmcia1; -}; - -static void -mst_fpga_set_irq(void *opaque, int irq, int level) -{ - mst_irq_state *s = (mst_irq_state *)opaque; - uint32_t oldint = s->intsetclr & s->intmskena; - - if (level) - s->prev_level |= 1u << irq; - else - s->prev_level &= ~(1u << irq); - - switch(irq) { - case MST_PCMCIA_CD0_IRQ: - if (level) - s->pcmcia0 &= ~MST_PCMCIAx_nCD; - else - s->pcmcia0 |= MST_PCMCIAx_nCD; - break; - case MST_PCMCIA_CD1_IRQ: - if (level) - s->pcmcia1 &= ~MST_PCMCIAx_nCD; - else - s->pcmcia1 |= MST_PCMCIAx_nCD; - break; - } - - if ((s->intmskena & (1u << irq)) && level) - s->intsetclr |= 1u << irq; - - if (oldint != (s->intsetclr & s->intmskena)) - qemu_set_irq(s->parent, s->intsetclr & s->intmskena); -} - - -static uint64_t -mst_fpga_readb(void *opaque, hwaddr addr, unsigned size) -{ - mst_irq_state *s = (mst_irq_state *) opaque; - - switch (addr) { - case MST_LEDDAT1: - return s->leddat1; - case MST_LEDDAT2: - return s->leddat2; - case MST_LEDCTRL: - return s->ledctrl; - case MST_GPSWR: - return s->gpswr; - case MST_MSCWR1: - return s->mscwr1; - case MST_MSCWR2: - return s->mscwr2; - case MST_MSCWR3: - return s->mscwr3; - case MST_MSCRD: - return s->mscrd; - case MST_INTMSKENA: - return s->intmskena; - case MST_INTSETCLR: - return s->intsetclr; - case MST_PCMCIA0: - return s->pcmcia0; - case MST_PCMCIA1: - return s->pcmcia1; - default: - printf("Mainstone - mst_fpga_readb: Bad register offset " - "0x" HWADDR_FMT_plx "\n", addr); - } - return 0; -} - -static void -mst_fpga_writeb(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - mst_irq_state *s = (mst_irq_state *) opaque; - value &= 0xffffffff; - - switch (addr) { - case MST_LEDDAT1: - s->leddat1 = value; - break; - case MST_LEDDAT2: - s->leddat2 = value; - break; - case MST_LEDCTRL: - s->ledctrl = value; - break; - case MST_GPSWR: - s->gpswr = value; - break; - case MST_MSCWR1: - s->mscwr1 = value; - break; - case MST_MSCWR2: - s->mscwr2 = value; - break; - case MST_MSCWR3: - s->mscwr3 = value; - break; - case MST_MSCRD: - s->mscrd = value; - break; - case MST_INTMSKENA: /* Mask interrupt */ - s->intmskena = (value & 0xFEEFF); - qemu_set_irq(s->parent, s->intsetclr & s->intmskena); - break; - case MST_INTSETCLR: /* clear or set interrupt */ - s->intsetclr = (value & 0xFEEFF); - qemu_set_irq(s->parent, s->intsetclr & s->intmskena); - break; - /* For PCMCIAx allow the to change only power and reset */ - case MST_PCMCIA0: - s->pcmcia0 = (value & 0x1f) | (s->pcmcia0 & ~0x1f); - break; - case MST_PCMCIA1: - s->pcmcia1 = (value & 0x1f) | (s->pcmcia1 & ~0x1f); - break; - default: - printf("Mainstone - mst_fpga_writeb: Bad register offset " - "0x" HWADDR_FMT_plx "\n", addr); - } -} - -static const MemoryRegionOps mst_fpga_ops = { - .read = mst_fpga_readb, - .write = mst_fpga_writeb, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static int mst_fpga_post_load(void *opaque, int version_id) -{ - mst_irq_state *s = (mst_irq_state *) opaque; - - qemu_set_irq(s->parent, s->intsetclr & s->intmskena); - return 0; -} - -static void mst_fpga_init(Object *obj) -{ - DeviceState *dev = DEVICE(obj); - mst_irq_state *s = MAINSTONE_FPGA(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - - s->pcmcia0 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD; - s->pcmcia1 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD; - - sysbus_init_irq(sbd, &s->parent); - - /* alloc the external 16 irqs */ - qdev_init_gpio_in(dev, mst_fpga_set_irq, MST_NUM_IRQS); - - memory_region_init_io(&s->iomem, obj, &mst_fpga_ops, s, - "fpga", 0x00100000); - sysbus_init_mmio(sbd, &s->iomem); -} - -static const VMStateDescription vmstate_mst_fpga_regs = { - .name = "mainstone_fpga", - .version_id = 0, - .minimum_version_id = 0, - .post_load = mst_fpga_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(prev_level, mst_irq_state), - VMSTATE_UINT32(leddat1, mst_irq_state), - VMSTATE_UINT32(leddat2, mst_irq_state), - VMSTATE_UINT32(ledctrl, mst_irq_state), - VMSTATE_UINT32(gpswr, mst_irq_state), - VMSTATE_UINT32(mscwr1, mst_irq_state), - VMSTATE_UINT32(mscwr2, mst_irq_state), - VMSTATE_UINT32(mscwr3, mst_irq_state), - VMSTATE_UINT32(mscrd, mst_irq_state), - VMSTATE_UINT32(intmskena, mst_irq_state), - VMSTATE_UINT32(intsetclr, mst_irq_state), - VMSTATE_UINT32(pcmcia0, mst_irq_state), - VMSTATE_UINT32(pcmcia1, mst_irq_state), - VMSTATE_END_OF_LIST(), - }, -}; - -static void mst_fpga_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "Mainstone II FPGA"; - dc->vmsd = &vmstate_mst_fpga_regs; -} - -static const TypeInfo mst_fpga_info = { - .name = TYPE_MAINSTONE_FPGA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(mst_irq_state), - .instance_init = mst_fpga_init, - .class_init = mst_fpga_class_init, -}; - -static void mst_fpga_register_types(void) -{ - type_register_static(&mst_fpga_info); -} - -type_init(mst_fpga_register_types) diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c deleted file mode 100644 index 2098c85ee01..00000000000 --- a/hw/misc/npcm7xx_clk.c +++ /dev/null @@ -1,1088 +0,0 @@ -/* - * Nuvoton NPCM7xx Clock Control Registers. - * - * Copyright 2020 Google LLC - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - */ - -#include "qemu/osdep.h" - -#include "hw/misc/npcm7xx_clk.h" -#include "hw/timer/npcm7xx_timer.h" -#include "hw/qdev-clock.h" -#include "migration/vmstate.h" -#include "qemu/error-report.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qemu/timer.h" -#include "qemu/units.h" -#include "trace.h" -#include "sysemu/watchdog.h" - -/* - * The reference clock hz, and the SECCNT and CNTR25M registers in this module, - * is always 25 MHz. - */ -#define NPCM7XX_CLOCK_REF_HZ (25000000) - -/* Register Field Definitions */ -#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */ - -#define PLLCON_LOKI BIT(31) -#define PLLCON_LOKS BIT(30) -#define PLLCON_PWDEN BIT(12) -#define PLLCON_FBDV(con) extract32((con), 16, 12) -#define PLLCON_OTDV2(con) extract32((con), 13, 3) -#define PLLCON_OTDV1(con) extract32((con), 8, 3) -#define PLLCON_INDV(con) extract32((con), 0, 6) - -enum NPCM7xxCLKRegisters { - NPCM7XX_CLK_CLKEN1, - NPCM7XX_CLK_CLKSEL, - NPCM7XX_CLK_CLKDIV1, - NPCM7XX_CLK_PLLCON0, - NPCM7XX_CLK_PLLCON1, - NPCM7XX_CLK_SWRSTR, - NPCM7XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t), - NPCM7XX_CLK_IPSRST2, - NPCM7XX_CLK_CLKEN2, - NPCM7XX_CLK_CLKDIV2, - NPCM7XX_CLK_CLKEN3, - NPCM7XX_CLK_IPSRST3, - NPCM7XX_CLK_WD0RCR, - NPCM7XX_CLK_WD1RCR, - NPCM7XX_CLK_WD2RCR, - NPCM7XX_CLK_SWRSTC1, - NPCM7XX_CLK_SWRSTC2, - NPCM7XX_CLK_SWRSTC3, - NPCM7XX_CLK_SWRSTC4, - NPCM7XX_CLK_PLLCON2, - NPCM7XX_CLK_CLKDIV3, - NPCM7XX_CLK_CORSTC, - NPCM7XX_CLK_PLLCONG, - NPCM7XX_CLK_AHBCKFI, - NPCM7XX_CLK_SECCNT, - NPCM7XX_CLK_CNTR25M, - NPCM7XX_CLK_REGS_END, -}; - -/* - * These reset values were taken from version 0.91 of the NPCM750R data sheet. - * - * All are loaded on power-up reset. CLKENx and SWRSTR should also be loaded on - * core domain reset, but this reset type is not yet supported by QEMU. - */ -static const uint32_t cold_reset_values[NPCM7XX_CLK_NR_REGS] = { - [NPCM7XX_CLK_CLKEN1] = 0xffffffff, - [NPCM7XX_CLK_CLKSEL] = 0x004aaaaa, - [NPCM7XX_CLK_CLKDIV1] = 0x5413f855, - [NPCM7XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI, - [NPCM7XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI, - [NPCM7XX_CLK_IPSRST1] = 0x00001000, - [NPCM7XX_CLK_IPSRST2] = 0x80000000, - [NPCM7XX_CLK_CLKEN2] = 0xffffffff, - [NPCM7XX_CLK_CLKDIV2] = 0xaa4f8f9f, - [NPCM7XX_CLK_CLKEN3] = 0xffffffff, - [NPCM7XX_CLK_IPSRST3] = 0x03000000, - [NPCM7XX_CLK_WD0RCR] = 0xffffffff, - [NPCM7XX_CLK_WD1RCR] = 0xffffffff, - [NPCM7XX_CLK_WD2RCR] = 0xffffffff, - [NPCM7XX_CLK_SWRSTC1] = 0x00000003, - [NPCM7XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI, - [NPCM7XX_CLK_CORSTC] = 0x04000003, - [NPCM7XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI, - [NPCM7XX_CLK_AHBCKFI] = 0x000000c8, -}; - -/* The number of watchdogs that can trigger a reset. */ -#define NPCM7XX_NR_WATCHDOGS (3) - -/* Clock converter functions */ - -#define TYPE_NPCM7XX_CLOCK_PLL "npcm7xx-clock-pll" -#define NPCM7XX_CLOCK_PLL(obj) OBJECT_CHECK(NPCM7xxClockPLLState, \ - (obj), TYPE_NPCM7XX_CLOCK_PLL) -#define TYPE_NPCM7XX_CLOCK_SEL "npcm7xx-clock-sel" -#define NPCM7XX_CLOCK_SEL(obj) OBJECT_CHECK(NPCM7xxClockSELState, \ - (obj), TYPE_NPCM7XX_CLOCK_SEL) -#define TYPE_NPCM7XX_CLOCK_DIVIDER "npcm7xx-clock-divider" -#define NPCM7XX_CLOCK_DIVIDER(obj) OBJECT_CHECK(NPCM7xxClockDividerState, \ - (obj), TYPE_NPCM7XX_CLOCK_DIVIDER) - -static void npcm7xx_clk_update_pll(void *opaque) -{ - NPCM7xxClockPLLState *s = opaque; - uint32_t con = s->clk->regs[s->reg]; - uint64_t freq; - - /* The PLL is grounded if it is not locked yet. */ - if (con & PLLCON_LOKI) { - freq = clock_get_hz(s->clock_in); - freq *= PLLCON_FBDV(con); - freq /= PLLCON_INDV(con) * PLLCON_OTDV1(con) * PLLCON_OTDV2(con); - } else { - freq = 0; - } - - clock_update_hz(s->clock_out, freq); -} - -static void npcm7xx_clk_update_sel(void *opaque) -{ - NPCM7xxClockSELState *s = opaque; - uint32_t index = extract32(s->clk->regs[NPCM7XX_CLK_CLKSEL], s->offset, - s->len); - - if (index >= s->input_size) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: SEL index: %u out of range\n", - __func__, index); - index = 0; - } - clock_update_hz(s->clock_out, clock_get_hz(s->clock_in[index])); -} - -static void npcm7xx_clk_update_divider(void *opaque) -{ - NPCM7xxClockDividerState *s = opaque; - uint32_t freq; - - freq = s->divide(s); - clock_update_hz(s->clock_out, freq); -} - -static uint32_t divide_by_constant(NPCM7xxClockDividerState *s) -{ - return clock_get_hz(s->clock_in) / s->divisor; -} - -static uint32_t divide_by_reg_divisor(NPCM7xxClockDividerState *s) -{ - return clock_get_hz(s->clock_in) / - (extract32(s->clk->regs[s->reg], s->offset, s->len) + 1); -} - -static uint32_t divide_by_reg_divisor_times_2(NPCM7xxClockDividerState *s) -{ - return divide_by_reg_divisor(s) / 2; -} - -static uint32_t shift_by_reg_divisor(NPCM7xxClockDividerState *s) -{ - return clock_get_hz(s->clock_in) >> - extract32(s->clk->regs[s->reg], s->offset, s->len); -} - -static NPCM7xxClockPLL find_pll_by_reg(enum NPCM7xxCLKRegisters reg) -{ - switch (reg) { - case NPCM7XX_CLK_PLLCON0: - return NPCM7XX_CLOCK_PLL0; - case NPCM7XX_CLK_PLLCON1: - return NPCM7XX_CLOCK_PLL1; - case NPCM7XX_CLK_PLLCON2: - return NPCM7XX_CLOCK_PLL2; - case NPCM7XX_CLK_PLLCONG: - return NPCM7XX_CLOCK_PLLG; - default: - g_assert_not_reached(); - } -} - -static void npcm7xx_clk_update_all_plls(NPCM7xxCLKState *clk) -{ - int i; - - for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { - npcm7xx_clk_update_pll(&clk->plls[i]); - } -} - -static void npcm7xx_clk_update_all_sels(NPCM7xxCLKState *clk) -{ - int i; - - for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { - npcm7xx_clk_update_sel(&clk->sels[i]); - } -} - -static void npcm7xx_clk_update_all_dividers(NPCM7xxCLKState *clk) -{ - int i; - - for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { - npcm7xx_clk_update_divider(&clk->dividers[i]); - } -} - -static void npcm7xx_clk_update_all_clocks(NPCM7xxCLKState *clk) -{ - clock_update_hz(clk->clkref, NPCM7XX_CLOCK_REF_HZ); - npcm7xx_clk_update_all_plls(clk); - npcm7xx_clk_update_all_sels(clk); - npcm7xx_clk_update_all_dividers(clk); -} - -/* Types of clock sources. */ -typedef enum ClockSrcType { - CLKSRC_REF, - CLKSRC_PLL, - CLKSRC_SEL, - CLKSRC_DIV, -} ClockSrcType; - -typedef struct PLLInitInfo { - const char *name; - ClockSrcType src_type; - int src_index; - int reg; - const char *public_name; -} PLLInitInfo; - -typedef struct SELInitInfo { - const char *name; - uint8_t input_size; - ClockSrcType src_type[NPCM7XX_CLK_SEL_MAX_INPUT]; - int src_index[NPCM7XX_CLK_SEL_MAX_INPUT]; - int offset; - int len; - const char *public_name; -} SELInitInfo; - -typedef struct DividerInitInfo { - const char *name; - ClockSrcType src_type; - int src_index; - uint32_t (*divide)(NPCM7xxClockDividerState *s); - int reg; /* not used when type == CONSTANT */ - int offset; /* not used when type == CONSTANT */ - int len; /* not used when type == CONSTANT */ - int divisor; /* used only when type == CONSTANT */ - const char *public_name; -} DividerInitInfo; - -static const PLLInitInfo pll_init_info_list[] = { - [NPCM7XX_CLOCK_PLL0] = { - .name = "pll0", - .src_type = CLKSRC_REF, - .reg = NPCM7XX_CLK_PLLCON0, - }, - [NPCM7XX_CLOCK_PLL1] = { - .name = "pll1", - .src_type = CLKSRC_REF, - .reg = NPCM7XX_CLK_PLLCON1, - }, - [NPCM7XX_CLOCK_PLL2] = { - .name = "pll2", - .src_type = CLKSRC_REF, - .reg = NPCM7XX_CLK_PLLCON2, - }, - [NPCM7XX_CLOCK_PLLG] = { - .name = "pllg", - .src_type = CLKSRC_REF, - .reg = NPCM7XX_CLK_PLLCONG, - }, -}; - -static const SELInitInfo sel_init_info_list[] = { - [NPCM7XX_CLOCK_PIXCKSEL] = { - .name = "pixcksel", - .input_size = 2, - .src_type = {CLKSRC_PLL, CLKSRC_REF}, - .src_index = {NPCM7XX_CLOCK_PLLG, 0}, - .offset = 5, - .len = 1, - .public_name = "pixel-clock", - }, - [NPCM7XX_CLOCK_MCCKSEL] = { - .name = "mccksel", - .input_size = 4, - .src_type = {CLKSRC_DIV, CLKSRC_REF, CLKSRC_REF, - /*MCBPCK, shouldn't be used in normal operation*/ - CLKSRC_REF}, - .src_index = {NPCM7XX_CLOCK_PLL1D2, 0, 0, 0}, - .offset = 12, - .len = 2, - .public_name = "mc-phy-clock", - }, - [NPCM7XX_CLOCK_CPUCKSEL] = { - .name = "cpucksel", - .input_size = 4, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, - /*SYSBPCK, shouldn't be used in normal operation*/ - CLKSRC_REF}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, 0}, - .offset = 0, - .len = 2, - .public_name = "system-clock", - }, - [NPCM7XX_CLOCK_CLKOUTSEL] = { - .name = "clkoutsel", - .input_size = 5, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, - CLKSRC_PLL, CLKSRC_DIV}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, - NPCM7XX_CLOCK_PLLG, NPCM7XX_CLOCK_PLL2D2}, - .offset = 18, - .len = 3, - .public_name = "tock", - }, - [NPCM7XX_CLOCK_UARTCKSEL] = { - .name = "uartcksel", - .input_size = 4, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, - NPCM7XX_CLOCK_PLL2D2}, - .offset = 8, - .len = 2, - }, - [NPCM7XX_CLOCK_TIMCKSEL] = { - .name = "timcksel", - .input_size = 4, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, - NPCM7XX_CLOCK_PLL2D2}, - .offset = 14, - .len = 2, - }, - [NPCM7XX_CLOCK_SDCKSEL] = { - .name = "sdcksel", - .input_size = 4, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, - NPCM7XX_CLOCK_PLL2D2}, - .offset = 6, - .len = 2, - }, - [NPCM7XX_CLOCK_GFXMSEL] = { - .name = "gfxmksel", - .input_size = 2, - .src_type = {CLKSRC_REF, CLKSRC_PLL}, - .src_index = {0, NPCM7XX_CLOCK_PLL2}, - .offset = 21, - .len = 1, - }, - [NPCM7XX_CLOCK_SUCKSEL] = { - .name = "sucksel", - .input_size = 4, - .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, - .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, - NPCM7XX_CLOCK_PLL2D2}, - .offset = 10, - .len = 2, - }, -}; - -static const DividerInitInfo divider_init_info_list[] = { - [NPCM7XX_CLOCK_PLL1D2] = { - .name = "pll1d2", - .src_type = CLKSRC_PLL, - .src_index = NPCM7XX_CLOCK_PLL1, - .divide = divide_by_constant, - .divisor = 2, - }, - [NPCM7XX_CLOCK_PLL2D2] = { - .name = "pll2d2", - .src_type = CLKSRC_PLL, - .src_index = NPCM7XX_CLOCK_PLL2, - .divide = divide_by_constant, - .divisor = 2, - }, - [NPCM7XX_CLOCK_MC_DIVIDER] = { - .name = "mc-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_MCCKSEL, - .divide = divide_by_constant, - .divisor = 2, - .public_name = "mc-clock" - }, - [NPCM7XX_CLOCK_AXI_DIVIDER] = { - .name = "axi-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_CPUCKSEL, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 0, - .len = 1, - .public_name = "clk2" - }, - [NPCM7XX_CLOCK_AHB_DIVIDER] = { - .name = "ahb-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AXI_DIVIDER, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 26, - .len = 2, - .public_name = "clk4" - }, - [NPCM7XX_CLOCK_AHB3_DIVIDER] = { - .name = "ahb3-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 6, - .len = 5, - .public_name = "ahb3-spi3-clock" - }, - [NPCM7XX_CLOCK_SPI0_DIVIDER] = { - .name = "spi0-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV3, - .offset = 6, - .len = 5, - .public_name = "spi0-clock", - }, - [NPCM7XX_CLOCK_SPIX_DIVIDER] = { - .name = "spix-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV3, - .offset = 1, - .len = 5, - .public_name = "spix-clock", - }, - [NPCM7XX_CLOCK_APB1_DIVIDER] = { - .name = "apb1-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 24, - .len = 2, - .public_name = "apb1-clock", - }, - [NPCM7XX_CLOCK_APB2_DIVIDER] = { - .name = "apb2-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 26, - .len = 2, - .public_name = "apb2-clock", - }, - [NPCM7XX_CLOCK_APB3_DIVIDER] = { - .name = "apb3-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 28, - .len = 2, - .public_name = "apb3-clock", - }, - [NPCM7XX_CLOCK_APB4_DIVIDER] = { - .name = "apb4-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 30, - .len = 2, - .public_name = "apb4-clock", - }, - [NPCM7XX_CLOCK_APB5_DIVIDER] = { - .name = "apb5-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 22, - .len = 2, - .public_name = "apb5-clock", - }, - [NPCM7XX_CLOCK_CLKOUT_DIVIDER] = { - .name = "clkout-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_CLKOUTSEL, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 16, - .len = 5, - .public_name = "clkout", - }, - [NPCM7XX_CLOCK_UART_DIVIDER] = { - .name = "uart-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_UARTCKSEL, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 16, - .len = 5, - .public_name = "uart-clock", - }, - [NPCM7XX_CLOCK_TIMER_DIVIDER] = { - .name = "timer-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_TIMCKSEL, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 21, - .len = 5, - .public_name = "timer-clock", - }, - [NPCM7XX_CLOCK_ADC_DIVIDER] = { - .name = "adc-divider", - .src_type = CLKSRC_DIV, - .src_index = NPCM7XX_CLOCK_TIMER_DIVIDER, - .divide = shift_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 28, - .len = 3, - .public_name = "adc-clock", - }, - [NPCM7XX_CLOCK_MMC_DIVIDER] = { - .name = "mmc-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_SDCKSEL, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV1, - .offset = 11, - .len = 5, - .public_name = "mmc-clock", - }, - [NPCM7XX_CLOCK_SDHC_DIVIDER] = { - .name = "sdhc-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_SDCKSEL, - .divide = divide_by_reg_divisor_times_2, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 0, - .len = 4, - .public_name = "sdhc-clock", - }, - [NPCM7XX_CLOCK_GFXM_DIVIDER] = { - .name = "gfxm-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_GFXMSEL, - .divide = divide_by_constant, - .divisor = 3, - .public_name = "gfxm-clock", - }, - [NPCM7XX_CLOCK_UTMI_DIVIDER] = { - .name = "utmi-divider", - .src_type = CLKSRC_SEL, - .src_index = NPCM7XX_CLOCK_SUCKSEL, - .divide = divide_by_reg_divisor, - .reg = NPCM7XX_CLK_CLKDIV2, - .offset = 8, - .len = 5, - .public_name = "utmi-clock", - }, -}; - -static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event) -{ - npcm7xx_clk_update_pll(opaque); -} - -static void npcm7xx_clk_pll_init(Object *obj) -{ - NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj); - - pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in", - npcm7xx_clk_update_pll_cb, pll, - ClockUpdate); - pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out"); -} - -static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event) -{ - npcm7xx_clk_update_sel(opaque); -} - -static void npcm7xx_clk_sel_init(Object *obj) -{ - int i; - NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj); - - for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) { - g_autofree char *s = g_strdup_printf("clock-in[%d]", i); - sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), s, - npcm7xx_clk_update_sel_cb, sel, ClockUpdate); - } - sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out"); -} - -static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event) -{ - npcm7xx_clk_update_divider(opaque); -} - -static void npcm7xx_clk_divider_init(Object *obj) -{ - NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj); - - div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in", - npcm7xx_clk_update_divider_cb, - div, ClockUpdate); - div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out"); -} - -static void npcm7xx_init_clock_pll(NPCM7xxClockPLLState *pll, - NPCM7xxCLKState *clk, const PLLInitInfo *init_info) -{ - pll->name = init_info->name; - pll->clk = clk; - pll->reg = init_info->reg; - if (init_info->public_name != NULL) { - qdev_alias_clock(DEVICE(pll), "clock-out", DEVICE(clk), - init_info->public_name); - } -} - -static void npcm7xx_init_clock_sel(NPCM7xxClockSELState *sel, - NPCM7xxCLKState *clk, const SELInitInfo *init_info) -{ - int input_size = init_info->input_size; - - sel->name = init_info->name; - sel->clk = clk; - sel->input_size = init_info->input_size; - g_assert(input_size <= NPCM7XX_CLK_SEL_MAX_INPUT); - sel->offset = init_info->offset; - sel->len = init_info->len; - if (init_info->public_name != NULL) { - qdev_alias_clock(DEVICE(sel), "clock-out", DEVICE(clk), - init_info->public_name); - } -} - -static void npcm7xx_init_clock_divider(NPCM7xxClockDividerState *div, - NPCM7xxCLKState *clk, const DividerInitInfo *init_info) -{ - div->name = init_info->name; - div->clk = clk; - - div->divide = init_info->divide; - if (div->divide == divide_by_constant) { - div->divisor = init_info->divisor; - } else { - div->reg = init_info->reg; - div->offset = init_info->offset; - div->len = init_info->len; - } - if (init_info->public_name != NULL) { - qdev_alias_clock(DEVICE(div), "clock-out", DEVICE(clk), - init_info->public_name); - } -} - -static Clock *npcm7xx_get_clock(NPCM7xxCLKState *clk, ClockSrcType type, - int index) -{ - switch (type) { - case CLKSRC_REF: - return clk->clkref; - case CLKSRC_PLL: - return clk->plls[index].clock_out; - case CLKSRC_SEL: - return clk->sels[index].clock_out; - case CLKSRC_DIV: - return clk->dividers[index].clock_out; - default: - g_assert_not_reached(); - } -} - -static void npcm7xx_connect_clocks(NPCM7xxCLKState *clk) -{ - int i, j; - Clock *src; - - for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { - src = npcm7xx_get_clock(clk, pll_init_info_list[i].src_type, - pll_init_info_list[i].src_index); - clock_set_source(clk->plls[i].clock_in, src); - } - for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { - for (j = 0; j < sel_init_info_list[i].input_size; ++j) { - src = npcm7xx_get_clock(clk, sel_init_info_list[i].src_type[j], - sel_init_info_list[i].src_index[j]); - clock_set_source(clk->sels[i].clock_in[j], src); - } - } - for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { - src = npcm7xx_get_clock(clk, divider_init_info_list[i].src_type, - divider_init_info_list[i].src_index); - clock_set_source(clk->dividers[i].clock_in, src); - } -} - -static uint64_t npcm7xx_clk_read(void *opaque, hwaddr offset, unsigned size) -{ - uint32_t reg = offset / sizeof(uint32_t); - NPCM7xxCLKState *s = opaque; - int64_t now_ns; - uint32_t value = 0; - - if (reg >= NPCM7XX_CLK_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: offset 0x%04" HWADDR_PRIx " out of range\n", - __func__, offset); - return 0; - } - - switch (reg) { - case NPCM7XX_CLK_SWRSTR: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n", - __func__, offset); - break; - - case NPCM7XX_CLK_SECCNT: - now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - value = (now_ns - s->ref_ns) / NANOSECONDS_PER_SECOND; - break; - - case NPCM7XX_CLK_CNTR25M: - now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - /* - * This register counts 25 MHz cycles, updating every 640 ns. It rolls - * over to zero every second. - * - * The 4 LSBs are always zero: (1e9 / 640) << 4 = 25000000. - */ - value = (((now_ns - s->ref_ns) / 640) << 4) % NPCM7XX_CLOCK_REF_HZ; - break; - - default: - value = s->regs[reg]; - break; - }; - - trace_npcm7xx_clk_read(offset, value); - - return value; -} - -static void npcm7xx_clk_write(void *opaque, hwaddr offset, - uint64_t v, unsigned size) -{ - uint32_t reg = offset / sizeof(uint32_t); - NPCM7xxCLKState *s = opaque; - uint32_t value = v; - - trace_npcm7xx_clk_write(offset, value); - - if (reg >= NPCM7XX_CLK_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: offset 0x%04" HWADDR_PRIx " out of range\n", - __func__, offset); - return; - } - - switch (reg) { - case NPCM7XX_CLK_SWRSTR: - qemu_log_mask(LOG_UNIMP, "%s: SW reset not implemented: 0x%02x\n", - __func__, value); - value = 0; - break; - - case NPCM7XX_CLK_PLLCON0: - case NPCM7XX_CLK_PLLCON1: - case NPCM7XX_CLK_PLLCON2: - case NPCM7XX_CLK_PLLCONG: - if (value & PLLCON_PWDEN) { - /* Power down -- clear lock and indicate loss of lock */ - value &= ~PLLCON_LOKI; - value |= PLLCON_LOKS; - } else { - /* Normal mode -- assume always locked */ - value |= PLLCON_LOKI; - /* Keep LOKS unchanged unless cleared by writing 1 */ - if (value & PLLCON_LOKS) { - value &= ~PLLCON_LOKS; - } else { - value |= (value & PLLCON_LOKS); - } - } - /* Only update PLL when it is locked. */ - if (value & PLLCON_LOKI) { - npcm7xx_clk_update_pll(&s->plls[find_pll_by_reg(reg)]); - } - break; - - case NPCM7XX_CLK_CLKSEL: - npcm7xx_clk_update_all_sels(s); - break; - - case NPCM7XX_CLK_CLKDIV1: - case NPCM7XX_CLK_CLKDIV2: - case NPCM7XX_CLK_CLKDIV3: - npcm7xx_clk_update_all_dividers(s); - break; - - case NPCM7XX_CLK_CNTR25M: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", - __func__, offset); - return; - } - - s->regs[reg] = value; -} - -/* Perform reset action triggered by a watchdog */ -static void npcm7xx_clk_perform_watchdog_reset(void *opaque, int n, - int level) -{ - NPCM7xxCLKState *clk = NPCM7XX_CLK(opaque); - uint32_t rcr; - - g_assert(n >= 0 && n <= NPCM7XX_NR_WATCHDOGS); - rcr = clk->regs[NPCM7XX_CLK_WD0RCR + n]; - if (rcr & NPCM7XX_CLK_WDRCR_CA9C) { - watchdog_perform_action(); - } else { - qemu_log_mask(LOG_UNIMP, - "%s: only CPU reset is implemented. (requested 0x%" PRIx32")\n", - __func__, rcr); - } -} - -static const struct MemoryRegionOps npcm7xx_clk_ops = { - .read = npcm7xx_clk_read, - .write = npcm7xx_clk_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4, - .unaligned = false, - }, -}; - -static void npcm7xx_clk_enter_reset(Object *obj, ResetType type) -{ - NPCM7xxCLKState *s = NPCM7XX_CLK(obj); - - QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values)); - - memcpy(s->regs, cold_reset_values, sizeof(cold_reset_values)); - s->ref_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - npcm7xx_clk_update_all_clocks(s); - /* - * A small number of registers need to be reset on a core domain reset, - * but no such reset type exists yet. - */ -} - -static void npcm7xx_clk_init_clock_hierarchy(NPCM7xxCLKState *s) -{ - int i; - - s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0); - - /* First pass: init all converter modules */ - QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS); - QEMU_BUILD_BUG_ON(ARRAY_SIZE(sel_init_info_list) != NPCM7XX_CLOCK_NR_SELS); - QEMU_BUILD_BUG_ON(ARRAY_SIZE(divider_init_info_list) - != NPCM7XX_CLOCK_NR_DIVIDERS); - for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { - object_initialize_child(OBJECT(s), pll_init_info_list[i].name, - &s->plls[i], TYPE_NPCM7XX_CLOCK_PLL); - npcm7xx_init_clock_pll(&s->plls[i], s, - &pll_init_info_list[i]); - } - for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { - object_initialize_child(OBJECT(s), sel_init_info_list[i].name, - &s->sels[i], TYPE_NPCM7XX_CLOCK_SEL); - npcm7xx_init_clock_sel(&s->sels[i], s, - &sel_init_info_list[i]); - } - for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { - object_initialize_child(OBJECT(s), divider_init_info_list[i].name, - &s->dividers[i], TYPE_NPCM7XX_CLOCK_DIVIDER); - npcm7xx_init_clock_divider(&s->dividers[i], s, - ÷r_init_info_list[i]); - } - - /* Second pass: connect converter modules */ - npcm7xx_connect_clocks(s); - - clock_update_hz(s->clkref, NPCM7XX_CLOCK_REF_HZ); -} - -static void npcm7xx_clk_init(Object *obj) -{ - NPCM7xxCLKState *s = NPCM7XX_CLK(obj); - - memory_region_init_io(&s->iomem, obj, &npcm7xx_clk_ops, s, - TYPE_NPCM7XX_CLK, 4 * KiB); - sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); -} - -static int npcm7xx_clk_post_load(void *opaque, int version_id) -{ - if (version_id >= 1) { - NPCM7xxCLKState *clk = opaque; - - npcm7xx_clk_update_all_clocks(clk); - } - - return 0; -} - -static void npcm7xx_clk_realize(DeviceState *dev, Error **errp) -{ - int i; - NPCM7xxCLKState *s = NPCM7XX_CLK(dev); - - qdev_init_gpio_in_named(DEVICE(s), npcm7xx_clk_perform_watchdog_reset, - NPCM7XX_WATCHDOG_RESET_GPIO_IN, NPCM7XX_NR_WATCHDOGS); - npcm7xx_clk_init_clock_hierarchy(s); - - /* Realize child devices */ - for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { - if (!qdev_realize(DEVICE(&s->plls[i]), NULL, errp)) { - return; - } - } - for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { - if (!qdev_realize(DEVICE(&s->sels[i]), NULL, errp)) { - return; - } - } - for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { - if (!qdev_realize(DEVICE(&s->dividers[i]), NULL, errp)) { - return; - } - } -} - -static const VMStateDescription vmstate_npcm7xx_clk_pll = { - .name = "npcm7xx-clock-pll", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState), - VMSTATE_END_OF_LIST(), - }, -}; - -static const VMStateDescription vmstate_npcm7xx_clk_sel = { - .name = "npcm7xx-clock-sel", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState, - NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock), - VMSTATE_END_OF_LIST(), - }, -}; - -static const VMStateDescription vmstate_npcm7xx_clk_divider = { - .name = "npcm7xx-clock-divider", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState), - VMSTATE_END_OF_LIST(), - }, -}; - -static const VMStateDescription vmstate_npcm7xx_clk = { - .name = "npcm7xx-clk", - .version_id = 1, - .minimum_version_id = 1, - .post_load = npcm7xx_clk_post_load, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, NPCM7xxCLKState, NPCM7XX_CLK_NR_REGS), - VMSTATE_INT64(ref_ns, NPCM7xxCLKState), - VMSTATE_CLOCK(clkref, NPCM7xxCLKState), - VMSTATE_END_OF_LIST(), - }, -}; - -static void npcm7xx_clk_pll_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "NPCM7xx Clock PLL Module"; - dc->vmsd = &vmstate_npcm7xx_clk_pll; -} - -static void npcm7xx_clk_sel_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "NPCM7xx Clock SEL Module"; - dc->vmsd = &vmstate_npcm7xx_clk_sel; -} - -static void npcm7xx_clk_divider_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "NPCM7xx Clock Divider Module"; - dc->vmsd = &vmstate_npcm7xx_clk_divider; -} - -static void npcm7xx_clk_class_init(ObjectClass *klass, void *data) -{ - ResettableClass *rc = RESETTABLE_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - QEMU_BUILD_BUG_ON(NPCM7XX_CLK_REGS_END > NPCM7XX_CLK_NR_REGS); - - dc->desc = "NPCM7xx Clock Control Registers"; - dc->vmsd = &vmstate_npcm7xx_clk; - dc->realize = npcm7xx_clk_realize; - rc->phases.enter = npcm7xx_clk_enter_reset; -} - -static const TypeInfo npcm7xx_clk_pll_info = { - .name = TYPE_NPCM7XX_CLOCK_PLL, - .parent = TYPE_DEVICE, - .instance_size = sizeof(NPCM7xxClockPLLState), - .instance_init = npcm7xx_clk_pll_init, - .class_init = npcm7xx_clk_pll_class_init, -}; - -static const TypeInfo npcm7xx_clk_sel_info = { - .name = TYPE_NPCM7XX_CLOCK_SEL, - .parent = TYPE_DEVICE, - .instance_size = sizeof(NPCM7xxClockSELState), - .instance_init = npcm7xx_clk_sel_init, - .class_init = npcm7xx_clk_sel_class_init, -}; - -static const TypeInfo npcm7xx_clk_divider_info = { - .name = TYPE_NPCM7XX_CLOCK_DIVIDER, - .parent = TYPE_DEVICE, - .instance_size = sizeof(NPCM7xxClockDividerState), - .instance_init = npcm7xx_clk_divider_init, - .class_init = npcm7xx_clk_divider_class_init, -}; - -static const TypeInfo npcm7xx_clk_info = { - .name = TYPE_NPCM7XX_CLK, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(NPCM7xxCLKState), - .instance_init = npcm7xx_clk_init, - .class_init = npcm7xx_clk_class_init, -}; - -static void npcm7xx_clk_register_type(void) -{ - type_register_static(&npcm7xx_clk_pll_info); - type_register_static(&npcm7xx_clk_sel_info); - type_register_static(&npcm7xx_clk_divider_info); - type_register_static(&npcm7xx_clk_info); -} -type_init(npcm7xx_clk_register_type); diff --git a/hw/misc/npcm7xx_gcr.c b/hw/misc/npcm7xx_gcr.c deleted file mode 100644 index c4c4e246d7e..00000000000 --- a/hw/misc/npcm7xx_gcr.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Nuvoton NPCM7xx System Global Control Registers. - * - * Copyright 2020 Google LLC - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - */ - -#include "qemu/osdep.h" - -#include "hw/misc/npcm7xx_gcr.h" -#include "hw/qdev-properties.h" -#include "migration/vmstate.h" -#include "qapi/error.h" -#include "qemu/cutils.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "qemu/units.h" - -#include "trace.h" - -#define NPCM7XX_GCR_MIN_DRAM_SIZE (128 * MiB) -#define NPCM7XX_GCR_MAX_DRAM_SIZE (2 * GiB) - -enum NPCM7xxGCRRegisters { - NPCM7XX_GCR_PDID, - NPCM7XX_GCR_PWRON, - NPCM7XX_GCR_MFSEL1 = 0x0c / sizeof(uint32_t), - NPCM7XX_GCR_MFSEL2, - NPCM7XX_GCR_MISCPE, - NPCM7XX_GCR_SPSWC = 0x038 / sizeof(uint32_t), - NPCM7XX_GCR_INTCR, - NPCM7XX_GCR_INTSR, - NPCM7XX_GCR_HIFCR = 0x050 / sizeof(uint32_t), - NPCM7XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t), - NPCM7XX_GCR_MFSEL3, - NPCM7XX_GCR_SRCNT, - NPCM7XX_GCR_RESSR, - NPCM7XX_GCR_RLOCKR1, - NPCM7XX_GCR_FLOCKR1, - NPCM7XX_GCR_DSCNT, - NPCM7XX_GCR_MDLR, - NPCM7XX_GCR_SCRPAD3, - NPCM7XX_GCR_SCRPAD2, - NPCM7XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t), - NPCM7XX_GCR_INTCR3, - NPCM7XX_GCR_VSINTR = 0x0ac / sizeof(uint32_t), - NPCM7XX_GCR_MFSEL4, - NPCM7XX_GCR_CPBPNTR = 0x0c4 / sizeof(uint32_t), - NPCM7XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t), - NPCM7XX_GCR_CP2BST, - NPCM7XX_GCR_B2CPNT, - NPCM7XX_GCR_CPPCTL, - NPCM7XX_GCR_I2CSEGSEL, - NPCM7XX_GCR_I2CSEGCTL, - NPCM7XX_GCR_VSRCR, - NPCM7XX_GCR_MLOCKR, - NPCM7XX_GCR_SCRPAD = 0x013c / sizeof(uint32_t), - NPCM7XX_GCR_USB1PHYCTL, - NPCM7XX_GCR_USB2PHYCTL, - NPCM7XX_GCR_REGS_END, -}; - -static const uint32_t cold_reset_values[NPCM7XX_GCR_NR_REGS] = { - [NPCM7XX_GCR_PDID] = 0x04a92750, /* Poleg A1 */ - [NPCM7XX_GCR_MISCPE] = 0x0000ffff, - [NPCM7XX_GCR_SPSWC] = 0x00000003, - [NPCM7XX_GCR_INTCR] = 0x0000035e, - [NPCM7XX_GCR_HIFCR] = 0x0000004e, - [NPCM7XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */ - [NPCM7XX_GCR_RESSR] = 0x80000000, - [NPCM7XX_GCR_DSCNT] = 0x000000c0, - [NPCM7XX_GCR_DAVCLVLR] = 0x5a00f3cf, - [NPCM7XX_GCR_SCRPAD] = 0x00000008, - [NPCM7XX_GCR_USB1PHYCTL] = 0x034730e4, - [NPCM7XX_GCR_USB2PHYCTL] = 0x034730e4, -}; - -static uint64_t npcm7xx_gcr_read(void *opaque, hwaddr offset, unsigned size) -{ - uint32_t reg = offset / sizeof(uint32_t); - NPCM7xxGCRState *s = opaque; - - if (reg >= NPCM7XX_GCR_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: offset 0x%04" HWADDR_PRIx " out of range\n", - __func__, offset); - return 0; - } - - trace_npcm7xx_gcr_read(offset, s->regs[reg]); - - return s->regs[reg]; -} - -static void npcm7xx_gcr_write(void *opaque, hwaddr offset, - uint64_t v, unsigned size) -{ - uint32_t reg = offset / sizeof(uint32_t); - NPCM7xxGCRState *s = opaque; - uint32_t value = v; - - trace_npcm7xx_gcr_write(offset, value); - - if (reg >= NPCM7XX_GCR_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: offset 0x%04" HWADDR_PRIx " out of range\n", - __func__, offset); - return; - } - - switch (reg) { - case NPCM7XX_GCR_PDID: - case NPCM7XX_GCR_PWRON: - case NPCM7XX_GCR_INTSR: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", - __func__, offset); - return; - - case NPCM7XX_GCR_RESSR: - case NPCM7XX_GCR_CP2BST: - /* Write 1 to clear */ - value = s->regs[reg] & ~value; - break; - - case NPCM7XX_GCR_RLOCKR1: - case NPCM7XX_GCR_MDLR: - /* Write 1 to set */ - value |= s->regs[reg]; - break; - }; - - s->regs[reg] = value; -} - -static const struct MemoryRegionOps npcm7xx_gcr_ops = { - .read = npcm7xx_gcr_read, - .write = npcm7xx_gcr_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4, - .unaligned = false, - }, -}; - -static void npcm7xx_gcr_enter_reset(Object *obj, ResetType type) -{ - NPCM7xxGCRState *s = NPCM7XX_GCR(obj); - - QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values)); - - memcpy(s->regs, cold_reset_values, sizeof(s->regs)); - s->regs[NPCM7XX_GCR_PWRON] = s->reset_pwron; - s->regs[NPCM7XX_GCR_MDLR] = s->reset_mdlr; - s->regs[NPCM7XX_GCR_INTCR3] = s->reset_intcr3; -} - -static void npcm7xx_gcr_realize(DeviceState *dev, Error **errp) -{ - ERRP_GUARD(); - NPCM7xxGCRState *s = NPCM7XX_GCR(dev); - uint64_t dram_size; - Object *obj; - - obj = object_property_get_link(OBJECT(dev), "dram-mr", errp); - if (!obj) { - error_prepend(errp, "%s: required dram-mr link not found: ", __func__); - return; - } - dram_size = memory_region_size(MEMORY_REGION(obj)); - if (!is_power_of_2(dram_size) || - dram_size < NPCM7XX_GCR_MIN_DRAM_SIZE || - dram_size > NPCM7XX_GCR_MAX_DRAM_SIZE) { - g_autofree char *sz = size_to_str(dram_size); - g_autofree char *min_sz = size_to_str(NPCM7XX_GCR_MIN_DRAM_SIZE); - g_autofree char *max_sz = size_to_str(NPCM7XX_GCR_MAX_DRAM_SIZE); - error_setg(errp, "%s: unsupported DRAM size %s", __func__, sz); - error_append_hint(errp, - "DRAM size must be a power of two between %s and %s," - " inclusive.\n", min_sz, max_sz); - return; - } - - /* Power-on reset value */ - s->reset_intcr3 = 0x00001002; - - /* - * The GMMAP (Graphics Memory Map) field is used by u-boot to detect the - * DRAM size, and is normally initialized by the boot block as part of DRAM - * training. However, since we don't have a complete emulation of the - * memory controller and try to make it look like it has already been - * initialized, the boot block will skip this initialization, and we need - * to make sure this field is set correctly up front. - * - * WARNING: some versions of u-boot only looks at bits 8 and 9, so 2 GiB of - * DRAM will be interpreted as 128 MiB. - * - * https://github.com/Nuvoton-Israel/u-boot/blob/2aef993bd2aafeb5408dbaad0f3ce099ee40c4aa/board/nuvoton/poleg/poleg.c#L244 - */ - s->reset_intcr3 |= ctz64(dram_size / NPCM7XX_GCR_MIN_DRAM_SIZE) << 8; -} - -static void npcm7xx_gcr_init(Object *obj) -{ - NPCM7xxGCRState *s = NPCM7XX_GCR(obj); - - memory_region_init_io(&s->iomem, obj, &npcm7xx_gcr_ops, s, - TYPE_NPCM7XX_GCR, 4 * KiB); - sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); -} - -static const VMStateDescription vmstate_npcm7xx_gcr = { - .name = "npcm7xx-gcr", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, NPCM7xxGCRState, NPCM7XX_GCR_NR_REGS), - VMSTATE_END_OF_LIST(), - }, -}; - -static Property npcm7xx_gcr_properties[] = { - DEFINE_PROP_UINT32("disabled-modules", NPCM7xxGCRState, reset_mdlr, 0), - DEFINE_PROP_UINT32("power-on-straps", NPCM7xxGCRState, reset_pwron, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void npcm7xx_gcr_class_init(ObjectClass *klass, void *data) -{ - ResettableClass *rc = RESETTABLE_CLASS(klass); - DeviceClass *dc = DEVICE_CLASS(klass); - - QEMU_BUILD_BUG_ON(NPCM7XX_GCR_REGS_END > NPCM7XX_GCR_NR_REGS); - - dc->desc = "NPCM7xx System Global Control Registers"; - dc->realize = npcm7xx_gcr_realize; - dc->vmsd = &vmstate_npcm7xx_gcr; - rc->phases.enter = npcm7xx_gcr_enter_reset; - - device_class_set_props(dc, npcm7xx_gcr_properties); -} - -static const TypeInfo npcm7xx_gcr_info = { - .name = TYPE_NPCM7XX_GCR, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(NPCM7xxGCRState), - .instance_init = npcm7xx_gcr_init, - .class_init = npcm7xx_gcr_class_init, -}; - -static void npcm7xx_gcr_register_type(void) -{ - type_register_static(&npcm7xx_gcr_info); -} -type_init(npcm7xx_gcr_register_type); diff --git a/hw/misc/npcm7xx_mft.c b/hw/misc/npcm7xx_mft.c index 9fcc69fe5c5..b35e971fe53 100644 --- a/hw/misc/npcm7xx_mft.c +++ b/hw/misc/npcm7xx_mft.c @@ -172,8 +172,9 @@ static NPCM7xxMFTCaptureState npcm7xx_mft_compute_cnt( * RPM = revolution/min. The time for one revlution (in ns) is * MINUTE_TO_NANOSECOND / RPM. */ - count = clock_ns_to_ticks(clock, (60 * NANOSECONDS_PER_SECOND) / - (rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION)); + count = clock_ns_to_ticks(clock, + (uint64_t)(60 * NANOSECONDS_PER_SECOND) / + ((uint64_t)rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION)); } if (count > NPCM7XX_MFT_MAX_CNT) { @@ -514,7 +515,7 @@ static const VMStateDescription vmstate_npcm7xx_mft = { }, }; -static void npcm7xx_mft_class_init(ObjectClass *klass, void *data) +static void npcm7xx_mft_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c index f7f77e30a22..2de18d09b82 100644 --- a/hw/misc/npcm7xx_pwm.c +++ b/hw/misc/npcm7xx_pwm.c @@ -543,7 +543,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm_module = { }, }; -static void npcm7xx_pwm_class_init(ObjectClass *klass, void *data) +static void npcm7xx_pwm_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/npcm7xx_rng.c b/hw/misc/npcm7xx_rng.c index 7f7e5eca626..7d47a1caa07 100644 --- a/hw/misc/npcm7xx_rng.c +++ b/hw/misc/npcm7xx_rng.c @@ -158,7 +158,7 @@ static const VMStateDescription vmstate_npcm7xx_rng = { }, }; -static void npcm7xx_rng_class_init(ObjectClass *klass, void *data) +static void npcm7xx_rng_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/npcm_clk.c b/hw/misc/npcm_clk.c new file mode 100644 index 00000000000..c48d40b4468 --- /dev/null +++ b/hw/misc/npcm_clk.c @@ -0,0 +1,1220 @@ +/* + * Nuvoton NPCM7xx/8xx Clock Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/misc/npcm_clk.h" +#include "hw/timer/npcm7xx_timer.h" +#include "hw/qdev-clock.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/units.h" +#include "trace.h" +#include "system/watchdog.h" + +/* + * The reference clock hz, and the SECCNT and CNTR25M registers in this module, + * is always 25 MHz. + */ +#define NPCM7XX_CLOCK_REF_HZ (25000000) + +/* Register Field Definitions */ +#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */ + +#define PLLCON_LOKI BIT(31) +#define PLLCON_LOKS BIT(30) +#define PLLCON_PWDEN BIT(12) +#define PLLCON_FBDV(con) extract32((con), 16, 12) +#define PLLCON_OTDV2(con) extract32((con), 13, 3) +#define PLLCON_OTDV1(con) extract32((con), 8, 3) +#define PLLCON_INDV(con) extract32((con), 0, 6) + +enum NPCM7xxCLKRegisters { + NPCM7XX_CLK_CLKEN1, + NPCM7XX_CLK_CLKSEL, + NPCM7XX_CLK_CLKDIV1, + NPCM7XX_CLK_PLLCON0, + NPCM7XX_CLK_PLLCON1, + NPCM7XX_CLK_SWRSTR, + NPCM7XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t), + NPCM7XX_CLK_IPSRST2, + NPCM7XX_CLK_CLKEN2, + NPCM7XX_CLK_CLKDIV2, + NPCM7XX_CLK_CLKEN3, + NPCM7XX_CLK_IPSRST3, + NPCM7XX_CLK_WD0RCR, + NPCM7XX_CLK_WD1RCR, + NPCM7XX_CLK_WD2RCR, + NPCM7XX_CLK_SWRSTC1, + NPCM7XX_CLK_SWRSTC2, + NPCM7XX_CLK_SWRSTC3, + NPCM7XX_CLK_SWRSTC4, + NPCM7XX_CLK_PLLCON2, + NPCM7XX_CLK_CLKDIV3, + NPCM7XX_CLK_CORSTC, + NPCM7XX_CLK_PLLCONG, + NPCM7XX_CLK_AHBCKFI, + NPCM7XX_CLK_SECCNT, + NPCM7XX_CLK_CNTR25M, +}; + +enum NPCM8xxCLKRegisters { + NPCM8XX_CLK_CLKEN1, + NPCM8XX_CLK_CLKSEL, + NPCM8XX_CLK_CLKDIV1, + NPCM8XX_CLK_PLLCON0, + NPCM8XX_CLK_PLLCON1, + NPCM8XX_CLK_SWRSTR, + NPCM8XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t), + NPCM8XX_CLK_IPSRST2, + NPCM8XX_CLK_CLKEN2, + NPCM8XX_CLK_CLKDIV2, + NPCM8XX_CLK_CLKEN3, + NPCM8XX_CLK_IPSRST3, + NPCM8XX_CLK_WD0RCR, + NPCM8XX_CLK_WD1RCR, + NPCM8XX_CLK_WD2RCR, + NPCM8XX_CLK_SWRSTC1, + NPCM8XX_CLK_SWRSTC2, + NPCM8XX_CLK_SWRSTC3, + NPCM8XX_CLK_TIPRSTC, + NPCM8XX_CLK_PLLCON2, + NPCM8XX_CLK_CLKDIV3, + NPCM8XX_CLK_CORSTC, + NPCM8XX_CLK_PLLCONG, + NPCM8XX_CLK_AHBCKFI, + NPCM8XX_CLK_SECCNT, + NPCM8XX_CLK_CNTR25M, + /* Registers unique to NPCM8XX SoC */ + NPCM8XX_CLK_CLKEN4, + NPCM8XX_CLK_IPSRST4, + NPCM8XX_CLK_BUSTO, + NPCM8XX_CLK_CLKDIV4, + NPCM8XX_CLK_WD0RCRB, + NPCM8XX_CLK_WD1RCRB, + NPCM8XX_CLK_WD2RCRB, + NPCM8XX_CLK_SWRSTC1B, + NPCM8XX_CLK_SWRSTC2B, + NPCM8XX_CLK_SWRSTC3B, + NPCM8XX_CLK_TIPRSTCB, + NPCM8XX_CLK_CORSTCB, + NPCM8XX_CLK_IPSRSTDIS1, + NPCM8XX_CLK_IPSRSTDIS2, + NPCM8XX_CLK_IPSRSTDIS3, + NPCM8XX_CLK_IPSRSTDIS4, + NPCM8XX_CLK_CLKENDIS1, + NPCM8XX_CLK_CLKENDIS2, + NPCM8XX_CLK_CLKENDIS3, + NPCM8XX_CLK_CLKENDIS4, + NPCM8XX_CLK_THRTL_CNT, +}; + +/* + * These reset values were taken from version 0.91 of the NPCM750R data sheet. + * + * All are loaded on power-up reset. CLKENx and SWRSTR should also be loaded on + * core domain reset, but this reset type is not yet supported by QEMU. + */ +static const uint32_t npcm7xx_cold_reset_values[NPCM7XX_CLK_NR_REGS] = { + [NPCM7XX_CLK_CLKEN1] = 0xffffffff, + [NPCM7XX_CLK_CLKSEL] = 0x004aaaaa, + [NPCM7XX_CLK_CLKDIV1] = 0x5413f855, + [NPCM7XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI, + [NPCM7XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI, + [NPCM7XX_CLK_IPSRST1] = 0x00001000, + [NPCM7XX_CLK_IPSRST2] = 0x80000000, + [NPCM7XX_CLK_CLKEN2] = 0xffffffff, + [NPCM7XX_CLK_CLKDIV2] = 0xaa4f8f9f, + [NPCM7XX_CLK_CLKEN3] = 0xffffffff, + [NPCM7XX_CLK_IPSRST3] = 0x03000000, + [NPCM7XX_CLK_WD0RCR] = 0xffffffff, + [NPCM7XX_CLK_WD1RCR] = 0xffffffff, + [NPCM7XX_CLK_WD2RCR] = 0xffffffff, + [NPCM7XX_CLK_SWRSTC1] = 0x00000003, + [NPCM7XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI, + [NPCM7XX_CLK_CORSTC] = 0x04000003, + [NPCM7XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI, + [NPCM7XX_CLK_AHBCKFI] = 0x000000c8, +}; + +/* + * These reset values were taken from version 0.92 of the NPCM8xx data sheet. + */ +static const uint32_t npcm8xx_cold_reset_values[NPCM8XX_CLK_NR_REGS] = { + [NPCM8XX_CLK_CLKEN1] = 0xffffffff, + [NPCM8XX_CLK_CLKSEL] = 0x154aaaaa, + [NPCM8XX_CLK_CLKDIV1] = 0x5413f855, + [NPCM8XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI, + [NPCM8XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI, + [NPCM8XX_CLK_IPSRST1] = 0x00001000, + [NPCM8XX_CLK_IPSRST2] = 0x80000000, + [NPCM8XX_CLK_CLKEN2] = 0xffffffff, + [NPCM8XX_CLK_CLKDIV2] = 0xaa4f8f9f, + [NPCM8XX_CLK_CLKEN3] = 0xffffffff, + [NPCM8XX_CLK_IPSRST3] = 0x03000000, + [NPCM8XX_CLK_WD0RCR] = 0xffffffff, + [NPCM8XX_CLK_WD1RCR] = 0xffffffff, + [NPCM8XX_CLK_WD2RCR] = 0xffffffff, + [NPCM8XX_CLK_SWRSTC1] = 0x00000003, + [NPCM8XX_CLK_SWRSTC2] = 0x00000001, + [NPCM8XX_CLK_SWRSTC3] = 0x00000001, + [NPCM8XX_CLK_TIPRSTC] = 0x00000001, + [NPCM8XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI, + [NPCM8XX_CLK_CLKDIV3] = 0x00009100, + [NPCM8XX_CLK_CORSTC] = 0x04000003, + [NPCM8XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI, + [NPCM8XX_CLK_AHBCKFI] = 0x000000c8, + [NPCM8XX_CLK_CLKEN4] = 0xffffffff, + [NPCM8XX_CLK_CLKDIV4] = 0x70009000, + [NPCM8XX_CLK_IPSRST4] = 0x02000000, + [NPCM8XX_CLK_WD0RCRB] = 0xfffffe71, + [NPCM8XX_CLK_WD1RCRB] = 0xfffffe71, + [NPCM8XX_CLK_WD2RCRB] = 0xfffffe71, + [NPCM8XX_CLK_SWRSTC1B] = 0xfffffe71, + [NPCM8XX_CLK_SWRSTC2B] = 0xfffffe71, + [NPCM8XX_CLK_SWRSTC3B] = 0xfffffe71, + [NPCM8XX_CLK_TIPRSTCB] = 0xfffffe71, + [NPCM8XX_CLK_CORSTCB] = 0xfffffe71, +}; + +/* The number of watchdogs that can trigger a reset. */ +#define NPCM7XX_NR_WATCHDOGS (3) + +/* Clock converter functions */ + +#define TYPE_NPCM7XX_CLOCK_PLL "npcm7xx-clock-pll" +#define NPCM7XX_CLOCK_PLL(obj) OBJECT_CHECK(NPCM7xxClockPLLState, \ + (obj), TYPE_NPCM7XX_CLOCK_PLL) +#define TYPE_NPCM7XX_CLOCK_SEL "npcm7xx-clock-sel" +#define NPCM7XX_CLOCK_SEL(obj) OBJECT_CHECK(NPCM7xxClockSELState, \ + (obj), TYPE_NPCM7XX_CLOCK_SEL) +#define TYPE_NPCM7XX_CLOCK_DIVIDER "npcm7xx-clock-divider" +#define NPCM7XX_CLOCK_DIVIDER(obj) OBJECT_CHECK(NPCM7xxClockDividerState, \ + (obj), TYPE_NPCM7XX_CLOCK_DIVIDER) + +static void npcm7xx_clk_update_pll(void *opaque) +{ + NPCM7xxClockPLLState *s = opaque; + uint32_t con = s->clk->regs[s->reg]; + uint64_t freq; + + /* The PLL is grounded if it is not locked yet. */ + if (con & PLLCON_LOKI) { + freq = clock_get_hz(s->clock_in); + freq *= PLLCON_FBDV(con); + freq /= PLLCON_INDV(con) * PLLCON_OTDV1(con) * PLLCON_OTDV2(con); + } else { + freq = 0; + } + + clock_update_hz(s->clock_out, freq); +} + +static void npcm7xx_clk_update_sel(void *opaque) +{ + NPCM7xxClockSELState *s = opaque; + uint32_t index = extract32(s->clk->regs[NPCM7XX_CLK_CLKSEL], s->offset, + s->len); + + if (index >= s->input_size) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SEL index: %u out of range\n", + __func__, index); + index = 0; + } + clock_update_hz(s->clock_out, clock_get_hz(s->clock_in[index])); +} + +static void npcm7xx_clk_update_divider(void *opaque) +{ + NPCM7xxClockDividerState *s = opaque; + uint32_t freq; + + freq = s->divide(s); + clock_update_hz(s->clock_out, freq); +} + +static uint32_t divide_by_constant(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) / s->divisor; +} + +static uint32_t divide_by_reg_divisor(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) / + (extract32(s->clk->regs[s->reg], s->offset, s->len) + 1); +} + +static uint32_t divide_by_reg_divisor_times_2(NPCM7xxClockDividerState *s) +{ + return divide_by_reg_divisor(s) / 2; +} + +static uint32_t shift_by_reg_divisor(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) >> + extract32(s->clk->regs[s->reg], s->offset, s->len); +} + +static NPCM7xxClockPLL find_pll_by_reg(enum NPCM7xxCLKRegisters reg) +{ + switch (reg) { + case NPCM7XX_CLK_PLLCON0: + return NPCM7XX_CLOCK_PLL0; + case NPCM7XX_CLK_PLLCON1: + return NPCM7XX_CLOCK_PLL1; + case NPCM7XX_CLK_PLLCON2: + return NPCM7XX_CLOCK_PLL2; + case NPCM7XX_CLK_PLLCONG: + return NPCM7XX_CLOCK_PLLG; + default: + g_assert_not_reached(); + } +} + +static void npcm7xx_clk_update_all_plls(NPCMCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + npcm7xx_clk_update_pll(&clk->plls[i]); + } +} + +static void npcm7xx_clk_update_all_sels(NPCMCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + npcm7xx_clk_update_sel(&clk->sels[i]); + } +} + +static void npcm7xx_clk_update_all_dividers(NPCMCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + npcm7xx_clk_update_divider(&clk->dividers[i]); + } +} + +static void npcm7xx_clk_update_all_clocks(NPCMCLKState *clk) +{ + clock_update_hz(clk->clkref, NPCM7XX_CLOCK_REF_HZ); + npcm7xx_clk_update_all_plls(clk); + npcm7xx_clk_update_all_sels(clk); + npcm7xx_clk_update_all_dividers(clk); +} + +/* Types of clock sources. */ +typedef enum ClockSrcType { + CLKSRC_REF, + CLKSRC_PLL, + CLKSRC_SEL, + CLKSRC_DIV, +} ClockSrcType; + +typedef struct PLLInitInfo { + const char *name; + ClockSrcType src_type; + int src_index; + int reg; + const char *public_name; +} PLLInitInfo; + +typedef struct SELInitInfo { + const char *name; + uint8_t input_size; + ClockSrcType src_type[NPCM7XX_CLK_SEL_MAX_INPUT]; + int src_index[NPCM7XX_CLK_SEL_MAX_INPUT]; + int offset; + int len; + const char *public_name; +} SELInitInfo; + +typedef struct DividerInitInfo { + const char *name; + ClockSrcType src_type; + int src_index; + uint32_t (*divide)(NPCM7xxClockDividerState *s); + int reg; /* not used when type == CONSTANT */ + int offset; /* not used when type == CONSTANT */ + int len; /* not used when type == CONSTANT */ + int divisor; /* used only when type == CONSTANT */ + const char *public_name; +} DividerInitInfo; + +static const PLLInitInfo pll_init_info_list[] = { + [NPCM7XX_CLOCK_PLL0] = { + .name = "pll0", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON0, + }, + [NPCM7XX_CLOCK_PLL1] = { + .name = "pll1", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON1, + }, + [NPCM7XX_CLOCK_PLL2] = { + .name = "pll2", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON2, + }, + [NPCM7XX_CLOCK_PLLG] = { + .name = "pllg", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCONG, + }, +}; + +static const SELInitInfo sel_init_info_list[] = { + [NPCM7XX_CLOCK_PIXCKSEL] = { + .name = "pixcksel", + .input_size = 2, + .src_type = {CLKSRC_PLL, CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLLG, 0}, + .offset = 5, + .len = 1, + .public_name = "pixel-clock", + }, + [NPCM7XX_CLOCK_MCCKSEL] = { + .name = "mccksel", + .input_size = 4, + .src_type = {CLKSRC_DIV, CLKSRC_REF, CLKSRC_REF, + /*MCBPCK, shouldn't be used in normal operation*/ + CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLL1D2, 0, 0, 0}, + .offset = 12, + .len = 2, + .public_name = "mc-phy-clock", + }, + [NPCM7XX_CLOCK_CPUCKSEL] = { + .name = "cpucksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, + /*SYSBPCK, shouldn't be used in normal operation*/ + CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, 0}, + .offset = 0, + .len = 2, + .public_name = "system-clock", + }, + [NPCM7XX_CLOCK_CLKOUTSEL] = { + .name = "clkoutsel", + .input_size = 5, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, + CLKSRC_PLL, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLLG, NPCM7XX_CLOCK_PLL2D2}, + .offset = 18, + .len = 3, + .public_name = "tock", + }, + [NPCM7XX_CLOCK_UARTCKSEL] = { + .name = "uartcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 8, + .len = 2, + }, + [NPCM7XX_CLOCK_TIMCKSEL] = { + .name = "timcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 14, + .len = 2, + }, + [NPCM7XX_CLOCK_SDCKSEL] = { + .name = "sdcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 6, + .len = 2, + }, + [NPCM7XX_CLOCK_GFXMSEL] = { + .name = "gfxmksel", + .input_size = 2, + .src_type = {CLKSRC_REF, CLKSRC_PLL}, + .src_index = {0, NPCM7XX_CLOCK_PLL2}, + .offset = 21, + .len = 1, + }, + [NPCM7XX_CLOCK_SUCKSEL] = { + .name = "sucksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 10, + .len = 2, + }, +}; + +static const DividerInitInfo divider_init_info_list[] = { + [NPCM7XX_CLOCK_PLL1D2] = { + .name = "pll1d2", + .src_type = CLKSRC_PLL, + .src_index = NPCM7XX_CLOCK_PLL1, + .divide = divide_by_constant, + .divisor = 2, + }, + [NPCM7XX_CLOCK_PLL2D2] = { + .name = "pll2d2", + .src_type = CLKSRC_PLL, + .src_index = NPCM7XX_CLOCK_PLL2, + .divide = divide_by_constant, + .divisor = 2, + }, + [NPCM7XX_CLOCK_MC_DIVIDER] = { + .name = "mc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_MCCKSEL, + .divide = divide_by_constant, + .divisor = 2, + .public_name = "mc-clock" + }, + [NPCM7XX_CLOCK_AXI_DIVIDER] = { + .name = "axi-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_CPUCKSEL, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 0, + .len = 1, + .public_name = "clk2" + }, + [NPCM7XX_CLOCK_AHB_DIVIDER] = { + .name = "ahb-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AXI_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 26, + .len = 2, + .public_name = "clk4" + }, + [NPCM7XX_CLOCK_AHB3_DIVIDER] = { + .name = "ahb3-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 6, + .len = 5, + .public_name = "ahb3-spi3-clock" + }, + [NPCM7XX_CLOCK_SPI0_DIVIDER] = { + .name = "spi0-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV3, + .offset = 6, + .len = 5, + .public_name = "spi0-clock", + }, + [NPCM7XX_CLOCK_SPIX_DIVIDER] = { + .name = "spix-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV3, + .offset = 1, + .len = 5, + .public_name = "spix-clock", + }, + [NPCM7XX_CLOCK_APB1_DIVIDER] = { + .name = "apb1-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 24, + .len = 2, + .public_name = "apb1-clock", + }, + [NPCM7XX_CLOCK_APB2_DIVIDER] = { + .name = "apb2-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 26, + .len = 2, + .public_name = "apb2-clock", + }, + [NPCM7XX_CLOCK_APB3_DIVIDER] = { + .name = "apb3-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 28, + .len = 2, + .public_name = "apb3-clock", + }, + [NPCM7XX_CLOCK_APB4_DIVIDER] = { + .name = "apb4-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 30, + .len = 2, + .public_name = "apb4-clock", + }, + [NPCM7XX_CLOCK_APB5_DIVIDER] = { + .name = "apb5-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 22, + .len = 2, + .public_name = "apb5-clock", + }, + [NPCM7XX_CLOCK_CLKOUT_DIVIDER] = { + .name = "clkout-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_CLKOUTSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 16, + .len = 5, + .public_name = "clkout", + }, + [NPCM7XX_CLOCK_UART_DIVIDER] = { + .name = "uart-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_UARTCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 16, + .len = 5, + .public_name = "uart-clock", + }, + [NPCM7XX_CLOCK_TIMER_DIVIDER] = { + .name = "timer-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_TIMCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 21, + .len = 5, + .public_name = "timer-clock", + }, + [NPCM7XX_CLOCK_ADC_DIVIDER] = { + .name = "adc-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_TIMER_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 28, + .len = 3, + .public_name = "adc-clock", + }, + [NPCM7XX_CLOCK_MMC_DIVIDER] = { + .name = "mmc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SDCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 11, + .len = 5, + .public_name = "mmc-clock", + }, + [NPCM7XX_CLOCK_SDHC_DIVIDER] = { + .name = "sdhc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SDCKSEL, + .divide = divide_by_reg_divisor_times_2, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 0, + .len = 4, + .public_name = "sdhc-clock", + }, + [NPCM7XX_CLOCK_GFXM_DIVIDER] = { + .name = "gfxm-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_GFXMSEL, + .divide = divide_by_constant, + .divisor = 3, + .public_name = "gfxm-clock", + }, + [NPCM7XX_CLOCK_UTMI_DIVIDER] = { + .name = "utmi-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SUCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 8, + .len = 5, + .public_name = "utmi-clock", + }, +}; + +static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_pll(opaque); +} + +static void npcm7xx_clk_pll_init(Object *obj) +{ + NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj); + + pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in", + npcm7xx_clk_update_pll_cb, pll, + ClockUpdate); + pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out"); +} + +static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_sel(opaque); +} + +static void npcm7xx_clk_sel_init(Object *obj) +{ + int i; + NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj); + + for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) { + g_autofree char *s = g_strdup_printf("clock-in[%d]", i); + sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), s, + npcm7xx_clk_update_sel_cb, sel, ClockUpdate); + } + sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out"); +} + +static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_divider(opaque); +} + +static void npcm7xx_clk_divider_init(Object *obj) +{ + NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj); + + div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in", + npcm7xx_clk_update_divider_cb, + div, ClockUpdate); + div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out"); +} + +static void npcm7xx_init_clock_pll(NPCM7xxClockPLLState *pll, + NPCMCLKState *clk, const PLLInitInfo *init_info) +{ + pll->name = init_info->name; + pll->clk = clk; + pll->reg = init_info->reg; + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(pll), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static void npcm7xx_init_clock_sel(NPCM7xxClockSELState *sel, + NPCMCLKState *clk, const SELInitInfo *init_info) +{ + int input_size = init_info->input_size; + + sel->name = init_info->name; + sel->clk = clk; + sel->input_size = init_info->input_size; + g_assert(input_size <= NPCM7XX_CLK_SEL_MAX_INPUT); + sel->offset = init_info->offset; + sel->len = init_info->len; + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(sel), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static void npcm7xx_init_clock_divider(NPCM7xxClockDividerState *div, + NPCMCLKState *clk, const DividerInitInfo *init_info) +{ + div->name = init_info->name; + div->clk = clk; + + div->divide = init_info->divide; + if (div->divide == divide_by_constant) { + div->divisor = init_info->divisor; + } else { + div->reg = init_info->reg; + div->offset = init_info->offset; + div->len = init_info->len; + } + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(div), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static Clock *npcm7xx_get_clock(NPCMCLKState *clk, ClockSrcType type, + int index) +{ + switch (type) { + case CLKSRC_REF: + return clk->clkref; + case CLKSRC_PLL: + return clk->plls[index].clock_out; + case CLKSRC_SEL: + return clk->sels[index].clock_out; + case CLKSRC_DIV: + return clk->dividers[index].clock_out; + default: + g_assert_not_reached(); + } +} + +static void npcm7xx_connect_clocks(NPCMCLKState *clk) +{ + int i, j; + Clock *src; + + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + src = npcm7xx_get_clock(clk, pll_init_info_list[i].src_type, + pll_init_info_list[i].src_index); + clock_set_source(clk->plls[i].clock_in, src); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + for (j = 0; j < sel_init_info_list[i].input_size; ++j) { + src = npcm7xx_get_clock(clk, sel_init_info_list[i].src_type[j], + sel_init_info_list[i].src_index[j]); + clock_set_source(clk->sels[i].clock_in[j], src); + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + src = npcm7xx_get_clock(clk, divider_init_info_list[i].src_type, + divider_init_info_list[i].src_index); + clock_set_source(clk->dividers[i].clock_in, src); + } +} + +static uint64_t npcm_clk_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCMCLKState *s = opaque; + NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s); + int64_t now_ns; + uint32_t value = 0; + + if (reg >= c->nr_regs) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return 0; + } + + switch (reg) { + case NPCM7XX_CLK_SWRSTR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n", + __func__, offset); + break; + + case NPCM7XX_CLK_SECCNT: + now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + value = (now_ns - s->ref_ns) / NANOSECONDS_PER_SECOND; + break; + + case NPCM7XX_CLK_CNTR25M: + now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* + * This register counts 25 MHz cycles, updating every 640 ns. It rolls + * over to zero every second. + * + * The 4 LSBs are always zero: (1e9 / 640) << 4 = 25000000. + */ + value = (((now_ns - s->ref_ns) / 640) << 4) % NPCM7XX_CLOCK_REF_HZ; + break; + + default: + value = s->regs[reg]; + break; + }; + + trace_npcm_clk_read(offset, value); + + return value; +} + +static void npcm_clk_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCMCLKState *s = opaque; + NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s); + uint32_t value = v; + + trace_npcm_clk_write(offset, value); + + if (reg >= c->nr_regs) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return; + } + + switch (reg) { + case NPCM7XX_CLK_SWRSTR: + qemu_log_mask(LOG_UNIMP, "%s: SW reset not implemented: 0x%02x\n", + __func__, value); + value = 0; + break; + + case NPCM7XX_CLK_PLLCON0: + case NPCM7XX_CLK_PLLCON1: + case NPCM7XX_CLK_PLLCON2: + case NPCM7XX_CLK_PLLCONG: + if (value & PLLCON_PWDEN) { + /* Power down -- clear lock and indicate loss of lock */ + value &= ~PLLCON_LOKI; + value |= PLLCON_LOKS; + } else { + /* Normal mode -- assume always locked */ + value |= PLLCON_LOKI; + /* Keep LOKS unchanged unless cleared by writing 1 */ + if (value & PLLCON_LOKS) { + value &= ~PLLCON_LOKS; + } else { + value |= (value & PLLCON_LOKS); + } + } + /* Only update PLL when it is locked. */ + if (value & PLLCON_LOKI) { + npcm7xx_clk_update_pll(&s->plls[find_pll_by_reg(reg)]); + } + break; + + case NPCM7XX_CLK_CLKSEL: + npcm7xx_clk_update_all_sels(s); + break; + + case NPCM7XX_CLK_CLKDIV1: + case NPCM7XX_CLK_CLKDIV2: + case NPCM7XX_CLK_CLKDIV3: + npcm7xx_clk_update_all_dividers(s); + break; + + case NPCM7XX_CLK_CNTR25M: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + return; + } + + s->regs[reg] = value; +} + +/* Perform reset action triggered by a watchdog */ +static void npcm7xx_clk_perform_watchdog_reset(void *opaque, int n, + int level) +{ + NPCMCLKState *clk = NPCM_CLK(opaque); + uint32_t rcr; + + g_assert(n >= 0 && n <= NPCM7XX_NR_WATCHDOGS); + rcr = clk->regs[NPCM7XX_CLK_WD0RCR + n]; + if (rcr & NPCM7XX_CLK_WDRCR_CA9C) { + watchdog_perform_action(); + } else { + qemu_log_mask(LOG_UNIMP, + "%s: only CPU reset is implemented. (requested 0x%" PRIx32")\n", + __func__, rcr); + } +} + +static const struct MemoryRegionOps npcm_clk_ops = { + .read = npcm_clk_read, + .write = npcm_clk_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm_clk_enter_reset(Object *obj, ResetType type) +{ + NPCMCLKState *s = NPCM_CLK(obj); + NPCMCLKClass *c = NPCM_CLK_GET_CLASS(s); + + size_t sizeof_regs = c->nr_regs * sizeof(uint32_t); + g_assert(sizeof(s->regs) >= sizeof_regs); + memcpy(s->regs, c->cold_reset_values, sizeof_regs); + s->ref_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + npcm7xx_clk_update_all_clocks(s); + /* + * A small number of registers need to be reset on a core domain reset, + * but no such reset type exists yet. + */ +} + +static void npcm7xx_clk_init_clock_hierarchy(NPCMCLKState *s) +{ + int i; + + s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0); + + /* First pass: init all converter modules */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(sel_init_info_list) != NPCM7XX_CLOCK_NR_SELS); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(divider_init_info_list) + != NPCM7XX_CLOCK_NR_DIVIDERS); + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + object_initialize_child(OBJECT(s), pll_init_info_list[i].name, + &s->plls[i], TYPE_NPCM7XX_CLOCK_PLL); + npcm7xx_init_clock_pll(&s->plls[i], s, + &pll_init_info_list[i]); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + object_initialize_child(OBJECT(s), sel_init_info_list[i].name, + &s->sels[i], TYPE_NPCM7XX_CLOCK_SEL); + npcm7xx_init_clock_sel(&s->sels[i], s, + &sel_init_info_list[i]); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + object_initialize_child(OBJECT(s), divider_init_info_list[i].name, + &s->dividers[i], TYPE_NPCM7XX_CLOCK_DIVIDER); + npcm7xx_init_clock_divider(&s->dividers[i], s, + ÷r_init_info_list[i]); + } + + /* Second pass: connect converter modules */ + npcm7xx_connect_clocks(s); + + clock_update_hz(s->clkref, NPCM7XX_CLOCK_REF_HZ); +} + +static void npcm_clk_init(Object *obj) +{ + NPCMCLKState *s = NPCM_CLK(obj); + + memory_region_init_io(&s->iomem, obj, &npcm_clk_ops, s, + TYPE_NPCM_CLK, 4 * KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static int npcm_clk_post_load(void *opaque, int version_id) +{ + if (version_id >= 1) { + NPCMCLKState *clk = opaque; + + npcm7xx_clk_update_all_clocks(clk); + } + + return 0; +} + +static void npcm_clk_realize(DeviceState *dev, Error **errp) +{ + int i; + NPCMCLKState *s = NPCM_CLK(dev); + + qdev_init_gpio_in_named(DEVICE(s), npcm7xx_clk_perform_watchdog_reset, + NPCM7XX_WATCHDOG_RESET_GPIO_IN, NPCM7XX_NR_WATCHDOGS); + npcm7xx_clk_init_clock_hierarchy(s); + + /* Realize child devices */ + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + if (!qdev_realize(DEVICE(&s->plls[i]), NULL, errp)) { + return; + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + if (!qdev_realize(DEVICE(&s->sels[i]), NULL, errp)) { + return; + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + if (!qdev_realize(DEVICE(&s->dividers[i]), NULL, errp)) { + return; + } + } +} + +static const VMStateDescription vmstate_npcm7xx_clk_pll = { + .name = "npcm7xx-clock-pll", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_clk_sel = { + .name = "npcm7xx-clock-sel", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState, + NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_clk_divider = { + .name = "npcm7xx-clock-divider", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm_clk = { + .name = "npcm-clk", + .version_id = 3, + .minimum_version_id = 3, + .post_load = npcm_clk_post_load, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCMCLKState, NPCM_CLK_MAX_NR_REGS), + VMSTATE_INT64(ref_ns, NPCMCLKState), + VMSTATE_CLOCK(clkref, NPCMCLKState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_clk_pll_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock PLL Module"; + dc->vmsd = &vmstate_npcm7xx_clk_pll; + /* Reason: Part of NPCMCLKState component */ + dc->user_creatable = false; +} + +static void npcm7xx_clk_sel_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock SEL Module"; + dc->vmsd = &vmstate_npcm7xx_clk_sel; + /* Reason: Part of NPCMCLKState component */ + dc->user_creatable = false; +} + +static void npcm7xx_clk_divider_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock Divider Module"; + dc->vmsd = &vmstate_npcm7xx_clk_divider; + /* Reason: Part of NPCMCLKState component */ + dc->user_creatable = false; +} + +static void npcm_clk_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_npcm_clk; + dc->realize = npcm_clk_realize; + rc->phases.enter = npcm_clk_enter_reset; +} + +static void npcm7xx_clk_class_init(ObjectClass *klass, const void *data) +{ + NPCMCLKClass *c = NPCM_CLK_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock Control Registers"; + c->nr_regs = NPCM7XX_CLK_NR_REGS; + c->cold_reset_values = npcm7xx_cold_reset_values; +} + +static void npcm8xx_clk_class_init(ObjectClass *klass, const void *data) +{ + NPCMCLKClass *c = NPCM_CLK_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM8xx Clock Control Registers"; + c->nr_regs = NPCM8XX_CLK_NR_REGS; + c->cold_reset_values = npcm8xx_cold_reset_values; +} + +static const TypeInfo npcm7xx_clk_pll_info = { + .name = TYPE_NPCM7XX_CLOCK_PLL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockPLLState), + .instance_init = npcm7xx_clk_pll_init, + .class_init = npcm7xx_clk_pll_class_init, +}; + +static const TypeInfo npcm7xx_clk_sel_info = { + .name = TYPE_NPCM7XX_CLOCK_SEL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockSELState), + .instance_init = npcm7xx_clk_sel_init, + .class_init = npcm7xx_clk_sel_class_init, +}; + +static const TypeInfo npcm7xx_clk_divider_info = { + .name = TYPE_NPCM7XX_CLOCK_DIVIDER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockDividerState), + .instance_init = npcm7xx_clk_divider_init, + .class_init = npcm7xx_clk_divider_class_init, +}; + +static const TypeInfo npcm_clk_info = { + .name = TYPE_NPCM_CLK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCMCLKState), + .instance_init = npcm_clk_init, + .class_size = sizeof(NPCMCLKClass), + .class_init = npcm_clk_class_init, + .abstract = true, +}; + +static const TypeInfo npcm7xx_clk_info = { + .name = TYPE_NPCM7XX_CLK, + .parent = TYPE_NPCM_CLK, + .class_init = npcm7xx_clk_class_init, +}; + +static const TypeInfo npcm8xx_clk_info = { + .name = TYPE_NPCM8XX_CLK, + .parent = TYPE_NPCM_CLK, + .class_init = npcm8xx_clk_class_init, +}; + +static void npcm7xx_clk_register_type(void) +{ + type_register_static(&npcm7xx_clk_pll_info); + type_register_static(&npcm7xx_clk_sel_info); + type_register_static(&npcm7xx_clk_divider_info); + type_register_static(&npcm_clk_info); + type_register_static(&npcm7xx_clk_info); + type_register_static(&npcm8xx_clk_info); +} +type_init(npcm7xx_clk_register_type); diff --git a/hw/misc/npcm_gcr.c b/hw/misc/npcm_gcr.c new file mode 100644 index 00000000000..2acaa167713 --- /dev/null +++ b/hw/misc/npcm_gcr.c @@ -0,0 +1,482 @@ +/* + * Nuvoton NPCM7xx/8xx System Global Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/misc/npcm_gcr.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#include "trace.h" + +#define NPCM7XX_GCR_MIN_DRAM_SIZE (128 * MiB) +#define NPCM7XX_GCR_MAX_DRAM_SIZE (2 * GiB) + +enum NPCM7xxGCRRegisters { + NPCM7XX_GCR_PDID, + NPCM7XX_GCR_PWRON, + NPCM7XX_GCR_MFSEL1 = 0x0c / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL2, + NPCM7XX_GCR_MISCPE, + NPCM7XX_GCR_SPSWC = 0x038 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR, + NPCM7XX_GCR_INTSR, + NPCM7XX_GCR_HIFCR = 0x050 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL3, + NPCM7XX_GCR_SRCNT, + NPCM7XX_GCR_RESSR, + NPCM7XX_GCR_RLOCKR1, + NPCM7XX_GCR_FLOCKR1, + NPCM7XX_GCR_DSCNT, + NPCM7XX_GCR_MDLR, + NPCM7XX_GCR_SCRPAD3, + NPCM7XX_GCR_SCRPAD2, + NPCM7XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR3, + NPCM7XX_GCR_VSINTR = 0x0ac / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL4, + NPCM7XX_GCR_CPBPNTR = 0x0c4 / sizeof(uint32_t), + NPCM7XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t), + NPCM7XX_GCR_CP2BST, + NPCM7XX_GCR_B2CPNT, + NPCM7XX_GCR_CPPCTL, + NPCM7XX_GCR_I2CSEGSEL, + NPCM7XX_GCR_I2CSEGCTL, + NPCM7XX_GCR_VSRCR, + NPCM7XX_GCR_MLOCKR, + NPCM7XX_GCR_SCRPAD = 0x013c / sizeof(uint32_t), + NPCM7XX_GCR_USB1PHYCTL, + NPCM7XX_GCR_USB2PHYCTL, +}; + +static const uint32_t npcm7xx_cold_reset_values[NPCM7XX_GCR_NR_REGS] = { + [NPCM7XX_GCR_PDID] = 0x04a92750, /* Poleg A1 */ + [NPCM7XX_GCR_MISCPE] = 0x0000ffff, + [NPCM7XX_GCR_SPSWC] = 0x00000003, + [NPCM7XX_GCR_INTCR] = 0x0000035e, + [NPCM7XX_GCR_HIFCR] = 0x0000004e, + [NPCM7XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */ + [NPCM7XX_GCR_RESSR] = 0x80000000, + [NPCM7XX_GCR_DSCNT] = 0x000000c0, + [NPCM7XX_GCR_DAVCLVLR] = 0x5a00f3cf, + [NPCM7XX_GCR_SCRPAD] = 0x00000008, + [NPCM7XX_GCR_USB1PHYCTL] = 0x034730e4, + [NPCM7XX_GCR_USB2PHYCTL] = 0x034730e4, +}; + +enum NPCM8xxGCRRegisters { + NPCM8XX_GCR_PDID, + NPCM8XX_GCR_PWRON, + NPCM8XX_GCR_MISCPE = 0x014 / sizeof(uint32_t), + NPCM8XX_GCR_FLOCKR2 = 0x020 / sizeof(uint32_t), + NPCM8XX_GCR_FLOCKR3, + NPCM8XX_GCR_A35_MODE = 0x034 / sizeof(uint32_t), + NPCM8XX_GCR_SPSWC, + NPCM8XX_GCR_INTCR, + NPCM8XX_GCR_INTSR, + NPCM8XX_GCR_HIFCR = 0x050 / sizeof(uint32_t), + NPCM8XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t), + NPCM8XX_GCR_SRCNT = 0x068 / sizeof(uint32_t), + NPCM8XX_GCR_RESSR, + NPCM8XX_GCR_RLOCKR1, + NPCM8XX_GCR_FLOCKR1, + NPCM8XX_GCR_DSCNT, + NPCM8XX_GCR_MDLR, + NPCM8XX_GCR_SCRPAD_C = 0x080 / sizeof(uint32_t), + NPCM8XX_GCR_SCRPAD_B, + NPCM8XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t), + NPCM8XX_GCR_INTCR3, + NPCM8XX_GCR_PCIRCTL = 0x0a0 / sizeof(uint32_t), + NPCM8XX_GCR_VSINTR, + NPCM8XX_GCR_SD2SUR1 = 0x0b4 / sizeof(uint32_t), + NPCM8XX_GCR_SD2SUR2, + NPCM8XX_GCR_INTCR4 = 0x0c0 / sizeof(uint32_t), + NPCM8XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t), + NPCM8XX_GCR_CP2BST, + NPCM8XX_GCR_B2CPNT, + NPCM8XX_GCR_CPPCTL, + NPCM8XX_GCR_I2CSEGSEL = 0x0e0 / sizeof(uint32_t), + NPCM8XX_GCR_I2CSEGCTL, + NPCM8XX_GCR_VSRCR, + NPCM8XX_GCR_MLOCKR, + NPCM8XX_GCR_SCRPAD = 0x13c / sizeof(uint32_t), + NPCM8XX_GCR_USB1PHYCTL, + NPCM8XX_GCR_USB2PHYCTL, + NPCM8XX_GCR_USB3PHYCTL, + NPCM8XX_GCR_MFSEL1 = 0x260 / sizeof(uint32_t), + NPCM8XX_GCR_MFSEL2, + NPCM8XX_GCR_MFSEL3, + NPCM8XX_GCR_MFSEL4, + NPCM8XX_GCR_MFSEL5, + NPCM8XX_GCR_MFSEL6, + NPCM8XX_GCR_MFSEL7, + NPCM8XX_GCR_MFSEL_LK1 = 0x280 / sizeof(uint32_t), + NPCM8XX_GCR_MFSEL_LK2, + NPCM8XX_GCR_MFSEL_LK3, + NPCM8XX_GCR_MFSEL_LK4, + NPCM8XX_GCR_MFSEL_LK5, + NPCM8XX_GCR_MFSEL_LK6, + NPCM8XX_GCR_MFSEL_LK7, + NPCM8XX_GCR_MFSEL_SET1 = 0x2a0 / sizeof(uint32_t), + NPCM8XX_GCR_MFSEL_SET2, + NPCM8XX_GCR_MFSEL_SET3, + NPCM8XX_GCR_MFSEL_SET4, + NPCM8XX_GCR_MFSEL_SET5, + NPCM8XX_GCR_MFSEL_SET6, + NPCM8XX_GCR_MFSEL_SET7, + NPCM8XX_GCR_MFSEL_CLR1 = 0x2c0 / sizeof(uint32_t), + NPCM8XX_GCR_MFSEL_CLR2, + NPCM8XX_GCR_MFSEL_CLR3, + NPCM8XX_GCR_MFSEL_CLR4, + NPCM8XX_GCR_MFSEL_CLR5, + NPCM8XX_GCR_MFSEL_CLR6, + NPCM8XX_GCR_MFSEL_CLR7, + NPCM8XX_GCR_WD0RCRLK = 0x400 / sizeof(uint32_t), + NPCM8XX_GCR_WD1RCRLK, + NPCM8XX_GCR_WD2RCRLK, + NPCM8XX_GCR_SWRSTC1LK, + NPCM8XX_GCR_SWRSTC2LK, + NPCM8XX_GCR_SWRSTC3LK, + NPCM8XX_GCR_TIPRSTCLK, + NPCM8XX_GCR_CORSTCLK, + NPCM8XX_GCR_WD0RCRBLK, + NPCM8XX_GCR_WD1RCRBLK, + NPCM8XX_GCR_WD2RCRBLK, + NPCM8XX_GCR_SWRSTC1BLK, + NPCM8XX_GCR_SWRSTC2BLK, + NPCM8XX_GCR_SWRSTC3BLK, + NPCM8XX_GCR_TIPRSTCBLK, + NPCM8XX_GCR_CORSTCBLK, + /* 64 scratch pad registers start here. 0xe00 ~ 0xefc */ + NPCM8XX_GCR_SCRPAD_00 = 0xe00 / sizeof(uint32_t), + /* 32 semaphore registers start here. 0xf00 ~ 0xf7c */ + NPCM8XX_GCR_GP_SEMFR_00 = 0xf00 / sizeof(uint32_t), + NPCM8XX_GCR_GP_SEMFR_31 = 0xf7c / sizeof(uint32_t), +}; + +static const uint32_t npcm8xx_cold_reset_values[NPCM8XX_GCR_NR_REGS] = { + [NPCM8XX_GCR_PDID] = 0x04a35850, /* Arbel A1 */ + [NPCM8XX_GCR_MISCPE] = 0x0000ffff, + [NPCM8XX_GCR_A35_MODE] = 0xfff4ff30, + [NPCM8XX_GCR_SPSWC] = 0x00000003, + [NPCM8XX_GCR_INTCR] = 0x0010035e, + [NPCM8XX_GCR_HIFCR] = 0x0000004e, + [NPCM8XX_GCR_SD2SUR1] = 0xfdc80000, + [NPCM8XX_GCR_SD2SUR2] = 0x5200b130, + [NPCM8XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */ + [NPCM8XX_GCR_RESSR] = 0x80000000, + [NPCM8XX_GCR_DAVCLVLR] = 0x5a00f3cf, + [NPCM8XX_GCR_INTCR3] = 0x5e001002, + [NPCM8XX_GCR_VSRCR] = 0x00004800, + [NPCM8XX_GCR_SCRPAD] = 0x00000008, + [NPCM8XX_GCR_USB1PHYCTL] = 0x034730e4, + [NPCM8XX_GCR_USB2PHYCTL] = 0x034730e4, + [NPCM8XX_GCR_USB3PHYCTL] = 0x034730e4, + /* All 32 semaphores should be initialized to 1. */ + [NPCM8XX_GCR_GP_SEMFR_00...NPCM8XX_GCR_GP_SEMFR_31] = 0x00000001, +}; + +static uint64_t npcm_gcr_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCMGCRState *s = opaque; + NPCMGCRClass *c = NPCM_GCR_GET_CLASS(s); + uint64_t value; + + if (reg >= c->nr_regs) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return 0; + } + + switch (size) { + case 4: + value = s->regs[reg]; + break; + + case 8: + g_assert(!(reg & 1)); + value = deposit64(s->regs[reg], 32, 32, s->regs[reg + 1]); + break; + + default: + g_assert_not_reached(); + } + + trace_npcm_gcr_read(offset, value); + return value; +} + +static void npcm_gcr_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCMGCRState *s = opaque; + NPCMGCRClass *c = NPCM_GCR_GET_CLASS(s); + uint32_t value = v; + + trace_npcm_gcr_write(offset, v); + + if (reg >= c->nr_regs) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return; + } + + switch (size) { + case 4: + switch (reg) { + case NPCM7XX_GCR_PDID: + case NPCM7XX_GCR_PWRON: + case NPCM7XX_GCR_INTSR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + return; + + case NPCM7XX_GCR_RESSR: + case NPCM7XX_GCR_CP2BST: + /* Write 1 to clear */ + value = s->regs[reg] & ~value; + break; + + case NPCM7XX_GCR_RLOCKR1: + case NPCM7XX_GCR_MDLR: + /* Write 1 to set */ + value |= s->regs[reg]; + break; + }; + s->regs[reg] = value; + break; + + case 8: + g_assert(!(reg & 1)); + s->regs[reg] = value; + s->regs[reg + 1] = extract64(v, 32, 32); + break; + + default: + g_assert_not_reached(); + } +} + +static bool npcm_gcr_check_mem_op(void *opaque, hwaddr offset, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + NPCMGCRClass *c = NPCM_GCR_GET_CLASS(opaque); + + if (offset >= c->nr_regs * sizeof(uint32_t)) { + return false; + } + + switch (size) { + case 4: + return true; + case 8: + if (offset >= NPCM8XX_GCR_SCRPAD_00 * sizeof(uint32_t) && + offset < (NPCM8XX_GCR_NR_REGS - 1) * sizeof(uint32_t)) { + return true; + } else { + return false; + } + default: + return false; + } +} + +static const struct MemoryRegionOps npcm_gcr_ops = { + .read = npcm_gcr_read, + .write = npcm_gcr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + .accepts = npcm_gcr_check_mem_op, + .unaligned = false, + }, +}; + +static void npcm7xx_gcr_enter_reset(Object *obj, ResetType type) +{ + NPCMGCRState *s = NPCM_GCR(obj); + NPCMGCRClass *c = NPCM_GCR_GET_CLASS(obj); + + g_assert(sizeof(s->regs) >= sizeof(c->cold_reset_values)); + g_assert(sizeof(s->regs) >= c->nr_regs * sizeof(uint32_t)); + memcpy(s->regs, c->cold_reset_values, c->nr_regs * sizeof(uint32_t)); + /* These 3 registers are at the same location in both 7xx and 8xx. */ + s->regs[NPCM7XX_GCR_PWRON] = s->reset_pwron; + s->regs[NPCM7XX_GCR_MDLR] = s->reset_mdlr; + s->regs[NPCM7XX_GCR_INTCR3] = s->reset_intcr3; +} + +static void npcm8xx_gcr_enter_reset(Object *obj, ResetType type) +{ + NPCMGCRState *s = NPCM_GCR(obj); + NPCMGCRClass *c = NPCM_GCR_GET_CLASS(obj); + + memcpy(s->regs, c->cold_reset_values, c->nr_regs * sizeof(uint32_t)); + /* These 3 registers are at the same location in both 7xx and 8xx. */ + s->regs[NPCM8XX_GCR_PWRON] = s->reset_pwron; + s->regs[NPCM8XX_GCR_MDLR] = s->reset_mdlr; + s->regs[NPCM8XX_GCR_INTCR3] = s->reset_intcr3; + s->regs[NPCM8XX_GCR_SCRPAD_B] = s->reset_scrpad_b; +} + +static void npcm_gcr_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + NPCMGCRState *s = NPCM_GCR(dev); + uint64_t dram_size; + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "dram-mr", errp); + if (!obj) { + error_prepend(errp, "%s: required dram-mr link not found: ", __func__); + return; + } + dram_size = memory_region_size(MEMORY_REGION(obj)); + if (!is_power_of_2(dram_size) || + dram_size < NPCM7XX_GCR_MIN_DRAM_SIZE || + dram_size > NPCM7XX_GCR_MAX_DRAM_SIZE) { + g_autofree char *sz = size_to_str(dram_size); + g_autofree char *min_sz = size_to_str(NPCM7XX_GCR_MIN_DRAM_SIZE); + g_autofree char *max_sz = size_to_str(NPCM7XX_GCR_MAX_DRAM_SIZE); + error_setg(errp, "%s: unsupported DRAM size %s", __func__, sz); + error_append_hint(errp, + "DRAM size must be a power of two between %s and %s," + " inclusive.\n", min_sz, max_sz); + return; + } + + /* Power-on reset value */ + s->reset_intcr3 = 0x00001002; + + /* + * The GMMAP (Graphics Memory Map) field is used by u-boot to detect the + * DRAM size, and is normally initialized by the boot block as part of DRAM + * training. However, since we don't have a complete emulation of the + * memory controller and try to make it look like it has already been + * initialized, the boot block will skip this initialization, and we need + * to make sure this field is set correctly up front. + * + * WARNING: some versions of u-boot only looks at bits 8 and 9, so 2 GiB of + * DRAM will be interpreted as 128 MiB. + * + * https://github.com/Nuvoton-Israel/u-boot/blob/2aef993bd2aafeb5408dbaad0f3ce099ee40c4aa/board/nuvoton/poleg/poleg.c#L244 + */ + s->reset_intcr3 |= ctz64(dram_size / NPCM7XX_GCR_MIN_DRAM_SIZE) << 8; + + /* + * The boot block starting from 0.0.6 for NPCM8xx SoCs stores the DRAM size + * in the SCRPAD2 registers. We need to set this field correctly since + * the initialization is skipped as we mentioned above. + * https://github.com/Nuvoton-Israel/u-boot/blob/npcm8mnx-v2019.01_tmp/board/nuvoton/arbel/arbel.c#L737 + */ + s->reset_scrpad_b = dram_size; +} + +static void npcm_gcr_init(Object *obj) +{ + NPCMGCRState *s = NPCM_GCR(obj); + + memory_region_init_io(&s->iomem, obj, &npcm_gcr_ops, s, + TYPE_NPCM_GCR, 4 * KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static const VMStateDescription vmstate_npcm_gcr = { + .name = "npcm-gcr", + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCMGCRState, NPCM_GCR_MAX_NR_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static const Property npcm_gcr_properties[] = { + DEFINE_PROP_UINT32("disabled-modules", NPCMGCRState, reset_mdlr, 0), + DEFINE_PROP_UINT32("power-on-straps", NPCMGCRState, reset_pwron, 0), +}; + +static void npcm_gcr_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = npcm_gcr_realize; + dc->vmsd = &vmstate_npcm_gcr; + + device_class_set_props(dc, npcm_gcr_properties); +} + +static void npcm7xx_gcr_class_init(ObjectClass *klass, const void *data) +{ + NPCMGCRClass *c = NPCM_GCR_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->desc = "NPCM7xx System Global Control Registers"; + rc->phases.enter = npcm7xx_gcr_enter_reset; + + c->nr_regs = NPCM7XX_GCR_NR_REGS; + c->cold_reset_values = npcm7xx_cold_reset_values; + rc->phases.enter = npcm7xx_gcr_enter_reset; +} + +static void npcm8xx_gcr_class_init(ObjectClass *klass, const void *data) +{ + NPCMGCRClass *c = NPCM_GCR_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->desc = "NPCM8xx System Global Control Registers"; + c->nr_regs = NPCM8XX_GCR_NR_REGS; + c->cold_reset_values = npcm8xx_cold_reset_values; + rc->phases.enter = npcm8xx_gcr_enter_reset; +} + +static const TypeInfo npcm_gcr_info[] = { + { + .name = TYPE_NPCM_GCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCMGCRState), + .instance_init = npcm_gcr_init, + .class_size = sizeof(NPCMGCRClass), + .class_init = npcm_gcr_class_init, + .abstract = true, + }, + { + .name = TYPE_NPCM7XX_GCR, + .parent = TYPE_NPCM_GCR, + .class_init = npcm7xx_gcr_class_init, + }, + { + .name = TYPE_NPCM8XX_GCR, + .parent = TYPE_NPCM_GCR, + .class_init = npcm8xx_gcr_class_init, + }, +}; +DEFINE_TYPES(npcm_gcr_info) diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c index 2d76c457182..8cd7ffe3f55 100644 --- a/hw/misc/nrf51_rng.c +++ b/hw/misc/nrf51_rng.c @@ -107,25 +107,25 @@ static void rng_write(void *opaque, hwaddr offset, break; case NRF51_RNG_REG_SHORTS: s->shortcut_stop_on_valrdy = - (value & BIT_MASK(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0; break; case NRF51_RNG_REG_INTEN: s->interrupt_enabled = - (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0; break; case NRF51_RNG_REG_INTENSET: - if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) { s->interrupt_enabled = 1; } break; case NRF51_RNG_REG_INTENCLR: - if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) { s->interrupt_enabled = 0; } break; case NRF51_RNG_REG_CONFIG: s->filter_enabled = - (value & BIT_MASK(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0; break; default: @@ -219,12 +219,11 @@ static void nrf51_rng_reset(DeviceState *dev) } -static Property nrf51_rng_properties[] = { +static const Property nrf51_rng_properties[] = { DEFINE_PROP_UINT16("period_unfiltered_us", NRF51RNGState, period_unfiltered_us, 167), DEFINE_PROP_UINT16("period_filtered_us", NRF51RNGState, period_filtered_us, 660), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_rng = { @@ -241,13 +240,13 @@ static const VMStateDescription vmstate_rng = { } }; -static void nrf51_rng_class_init(ObjectClass *klass, void *data) +static void nrf51_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, nrf51_rng_properties); dc->vmsd = &vmstate_rng; - dc->reset = nrf51_rng_reset; + device_class_set_legacy_reset(dc, nrf51_rng_reset); } static const TypeInfo nrf51_rng_info = { diff --git a/hw/misc/omap_clk.c b/hw/misc/omap_clk.c index c77ca2fc74e..da95c4ace58 100644 --- a/hw/misc/omap_clk.c +++ b/hw/misc/omap_clk.c @@ -30,174 +30,170 @@ struct clk { struct clk *parent; struct clk *child1; struct clk *sibling; -#define ALWAYS_ENABLED (1 << 0) -#define CLOCK_IN_OMAP310 (1 << 10) -#define CLOCK_IN_OMAP730 (1 << 11) -#define CLOCK_IN_OMAP1510 (1 << 12) -#define CLOCK_IN_OMAP16XX (1 << 13) -#define CLOCK_IN_OMAP242X (1 << 14) -#define CLOCK_IN_OMAP243X (1 << 15) -#define CLOCK_IN_OMAP343X (1 << 16) +#define ALWAYS_ENABLED (1 << 0) +#define CLOCK_IN_OMAP310 (1 << 10) +#define CLOCK_IN_OMAP730 (1 << 11) +#define CLOCK_IN_OMAP1510 (1 << 12) +#define CLOCK_IN_OMAP16XX (1 << 13) uint32_t flags; int id; - int running; /* Is currently ticking */ - int enabled; /* Is enabled, regardless of its input clk */ - unsigned long rate; /* Current rate (if .running) */ - unsigned int divisor; /* Rate relative to input (if .enabled) */ - unsigned int multiplier; /* Rate relative to input (if .enabled) */ - qemu_irq users[16]; /* Who to notify on change */ - int usecount; /* Automatically idle when unused */ + int running; /* Is currently ticking */ + int enabled; /* Is enabled, regardless of its input clk */ + unsigned long rate; /* Current rate (if .running) */ + unsigned int divisor; /* Rate relative to input (if .enabled) */ + unsigned int multiplier; /* Rate relative to input (if .enabled) */ + qemu_irq users[16]; /* Who to notify on change */ + int usecount; /* Automatically idle when unused */ }; static struct clk xtal_osc12m = { - .name = "xtal_osc_12m", - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "xtal_osc_12m", + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk xtal_osc32k = { - .name = "xtal_osc_32k", - .rate = 32768, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | - CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .name = "xtal_osc_32k", + .rate = 32768, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk ck_ref = { - .name = "ck_ref", - .alias = "clkin", - .parent = &xtal_osc12m, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "ck_ref", + .alias = "clkin", + .parent = &xtal_osc12m, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; /* If a dpll is disabled it becomes a bypass, child clocks don't stop */ static struct clk dpll1 = { - .name = "dpll1", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "dpll1", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk dpll2 = { - .name = "dpll2", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .name = "dpll2", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk dpll3 = { - .name = "dpll3", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .name = "dpll3", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk dpll4 = { - .name = "dpll4", - .parent = &ck_ref, - .multiplier = 4, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "dpll4", + .parent = &ck_ref, + .multiplier = 4, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk apll = { - .name = "apll", - .parent = &ck_ref, - .multiplier = 48, - .divisor = 12, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "apll", + .parent = &ck_ref, + .multiplier = 48, + .divisor = 12, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk ck_48m = { - .name = "ck_48m", - .parent = &dpll4, /* either dpll4 or apll */ - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "ck_48m", + .parent = &dpll4, /* either dpll4 or apll */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk ck_dpll1out = { - .name = "ck_dpll1out", - .parent = &dpll1, - .flags = CLOCK_IN_OMAP16XX, + .name = "ck_dpll1out", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk sossi_ck = { - .name = "ck_sossi", - .parent = &ck_dpll1out, - .flags = CLOCK_IN_OMAP16XX, + .name = "ck_sossi", + .parent = &ck_dpll1out, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk clkm1 = { - .name = "clkm1", - .alias = "ck_gen1", - .parent = &dpll1, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "clkm1", + .alias = "ck_gen1", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk clkm2 = { - .name = "clkm2", - .alias = "ck_gen2", - .parent = &dpll1, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "clkm2", + .alias = "ck_gen2", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk clkm3 = { - .name = "clkm3", - .alias = "ck_gen3", - .parent = &dpll1, /* either dpll1 or ck_ref */ - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "clkm3", + .alias = "ck_gen3", + .parent = &dpll1, /* either dpll1 or ck_ref */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk arm_ck = { - .name = "arm_ck", - .alias = "mpu_ck", - .parent = &clkm1, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "arm_ck", + .alias = "mpu_ck", + .parent = &clkm1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk armper_ck = { - .name = "armper_ck", - .alias = "mpuper_ck", - .parent = &clkm1, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "armper_ck", + .alias = "mpuper_ck", + .parent = &clkm1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk arm_gpio_ck = { - .name = "arm_gpio_ck", - .alias = "mpu_gpio_ck", - .parent = &clkm1, - .divisor = 1, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .name = "arm_gpio_ck", + .alias = "mpu_gpio_ck", + .parent = &clkm1, + .divisor = 1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk armxor_ck = { - .name = "armxor_ck", - .alias = "mpuxor_ck", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "armxor_ck", + .alias = "mpuxor_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk armtim_ck = { - .name = "armtim_ck", - .alias = "mputim_ck", - .parent = &ck_ref, /* either CLKIN or DPLL1 */ - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "armtim_ck", + .alias = "mputim_ck", + .parent = &ck_ref, /* either CLKIN or DPLL1 */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk armwdt_ck = { - .name = "armwdt_ck", - .alias = "mpuwd_ck", - .parent = &clkm1, - .divisor = 14, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "armwdt_ck", + .alias = "mpuwd_ck", + .parent = &clkm1, + .divisor = 14, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk arminth_ck16xx = { - .name = "arminth_ck", - .parent = &arm_ck, - .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .name = "arminth_ck", + .parent = &arm_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, /* Note: On 16xx the frequency can be divided by 2 by programming * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1 * @@ -206,48 +202,48 @@ static struct clk arminth_ck16xx = { }; static struct clk dsp_ck = { - .name = "dsp_ck", - .parent = &clkm2, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "dsp_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk dspmmu_ck = { - .name = "dspmmu_ck", - .parent = &clkm2, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + .name = "dspmmu_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, }; static struct clk dspper_ck = { - .name = "dspper_ck", - .parent = &clkm2, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "dspper_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk dspxor_ck = { - .name = "dspxor_ck", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "dspxor_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk dsptim_ck = { - .name = "dsptim_ck", - .parent = &ck_ref, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "dsptim_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk tc_ck = { - .name = "tc_ck", - .parent = &clkm3, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + .name = "tc_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk arminth_ck15xx = { - .name = "arminth_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .name = "arminth_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, /* Note: On 1510 the frequency follows TC_CK * * 16xx version is in MPU clocks. @@ -256,698 +252,259 @@ static struct clk arminth_ck15xx = { static struct clk tipb_ck = { /* No-idle controlled by "tc_ck" */ - .name = "tipb_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .name = "tipb_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk l3_ocpi_ck = { /* No-idle controlled by "tc_ck" */ - .name = "l3_ocpi_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX, + .name = "l3_ocpi_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk tc1_ck = { - .name = "tc1_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX, + .name = "tc1_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk tc2_ck = { - .name = "tc2_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX, + .name = "tc2_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk dma_ck = { /* No-idle controlled by "tc_ck" */ - .name = "dma_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .name = "dma_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk dma_lcdfree_ck = { - .name = "dma_lcdfree_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .name = "dma_lcdfree_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, }; static struct clk api_ck = { - .name = "api_ck", - .alias = "mpui_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .name = "api_ck", + .alias = "mpui_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk lb_ck = { - .name = "lb_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .name = "lb_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk lbfree_ck = { - .name = "lbfree_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .name = "lbfree_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk hsab_ck = { - .name = "hsab_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .name = "hsab_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk rhea1_ck = { - .name = "rhea1_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .name = "rhea1_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, }; static struct clk rhea2_ck = { - .name = "rhea2_ck", - .parent = &tc_ck, - .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .name = "rhea2_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, }; static struct clk lcd_ck_16xx = { - .name = "lcd_ck", - .parent = &clkm3, - .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730, + .name = "lcd_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730, }; static struct clk lcd_ck_1510 = { - .name = "lcd_ck", - .parent = &clkm3, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .name = "lcd_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk uart1_1510 = { - .name = "uart1_ck", + .name = "uart1_ck", /* Direct from ULPD, no real parent */ - .parent = &armper_ck, /* either armper_ck or dpll4 */ - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk uart1_16xx = { - .name = "uart1_ck", + .name = "uart1_ck", /* Direct from ULPD, no real parent */ - .parent = &armper_ck, - .rate = 48000000, - .flags = CLOCK_IN_OMAP16XX, + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk uart2_ck = { - .name = "uart2_ck", + .name = "uart2_ck", /* Direct from ULPD, no real parent */ - .parent = &armper_ck, /* either armper_ck or dpll4 */ - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk uart3_1510 = { - .name = "uart3_ck", + .name = "uart3_ck", /* Direct from ULPD, no real parent */ - .parent = &armper_ck, /* either armper_ck or dpll4 */ - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, }; static struct clk uart3_16xx = { - .name = "uart3_ck", + .name = "uart3_ck", /* Direct from ULPD, no real parent */ - .parent = &armper_ck, - .rate = 48000000, - .flags = CLOCK_IN_OMAP16XX, + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, }; -static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */ - .name = "usb_clk0", - .alias = "usb.clko", +static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */ + .name = "usb_clk0", + .alias = "usb.clko", /* Direct from ULPD, no parent */ - .rate = 6000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .rate = 6000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk usb_hhc_ck1510 = { - .name = "usb_hhc_ck", + .name = "usb_hhc_ck", /* Direct from ULPD, no parent */ - .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */ - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, + .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, }; static struct clk usb_hhc_ck16xx = { - .name = "usb_hhc_ck", + .name = "usb_hhc_ck", /* Direct from ULPD, no parent */ - .rate = 48000000, + .rate = 48000000, /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */ - .flags = CLOCK_IN_OMAP16XX, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk usb_w2fc_mclk = { - .name = "usb_w2fc_mclk", - .alias = "usb_w2fc_ck", - .parent = &ck_48m, - .rate = 48000000, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "usb_w2fc_mclk", + .alias = "usb_w2fc_ck", + .parent = &ck_48m, + .rate = 48000000, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk mclk_1510 = { - .name = "mclk", + .name = "mclk", /* Direct from ULPD, no parent. May be enabled by ext hardware. */ - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510, + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510, }; static struct clk bclk_310 = { - .name = "bt_mclk_out", /* Alias midi_mclk_out? */ - .parent = &armper_ck, - .flags = CLOCK_IN_OMAP310, + .name = "bt_mclk_out", /* Alias midi_mclk_out? */ + .parent = &armper_ck, + .flags = CLOCK_IN_OMAP310, }; static struct clk mclk_310 = { - .name = "com_mclk_out", - .parent = &armper_ck, - .flags = CLOCK_IN_OMAP310, + .name = "com_mclk_out", + .parent = &armper_ck, + .flags = CLOCK_IN_OMAP310, }; static struct clk mclk_16xx = { - .name = "mclk", + .name = "mclk", /* Direct from ULPD, no parent. May be enabled by ext hardware. */ - .flags = CLOCK_IN_OMAP16XX, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk bclk_1510 = { - .name = "bclk", + .name = "bclk", /* Direct from ULPD, no parent. May be enabled by ext hardware. */ - .rate = 12000000, - .flags = CLOCK_IN_OMAP1510, + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510, }; static struct clk bclk_16xx = { - .name = "bclk", + .name = "bclk", /* Direct from ULPD, no parent. May be enabled by ext hardware. */ - .flags = CLOCK_IN_OMAP16XX, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk mmc1_ck = { - .name = "mmc_ck", - .id = 1, + .name = "mmc_ck", + .id = 1, /* Functional clock is direct from ULPD, interface clock is ARMPER */ - .parent = &armper_ck, /* either armper_ck or dpll4 */ - .rate = 48000000, - .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 48000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, }; static struct clk mmc2_ck = { - .name = "mmc_ck", - .id = 2, + .name = "mmc_ck", + .id = 2, /* Functional clock is direct from ULPD, interface clock is ARMPER */ - .parent = &armper_ck, - .rate = 48000000, - .flags = CLOCK_IN_OMAP16XX, + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, }; static struct clk cam_mclk = { - .name = "cam.mclk", - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, - .rate = 12000000, + .name = "cam.mclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .rate = 12000000, }; static struct clk cam_exclk = { - .name = "cam.exclk", - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "cam.exclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, /* Either 12M from cam.mclk or 48M from dpll4 */ - .parent = &cam_mclk, + .parent = &cam_mclk, }; static struct clk cam_lclk = { - .name = "cam.lclk", - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .name = "cam.lclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, }; static struct clk i2c_fck = { - .name = "i2c_fck", - .id = 1, - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + .name = "i2c_fck", + .id = 1, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, - .parent = &armxor_ck, + .parent = &armxor_ck, }; static struct clk i2c_ick = { - .name = "i2c_ick", - .id = 1, - .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, - .parent = &armper_ck, + .name = "i2c_ick", + .id = 1, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .parent = &armper_ck, }; static struct clk clk32k = { - .name = "clk32-kHz", - .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | - CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .parent = &xtal_osc32k, -}; - -static struct clk ref_clk = { - .name = "ref_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 12000000, /* 12 MHz or 13 MHz or 19.2 MHz */ - /*.parent = sys.xtalin */ -}; - -static struct clk apll_96m = { - .name = "apll_96m", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 96000000, - /*.parent = ref_clk */ -}; - -static struct clk apll_54m = { - .name = "apll_54m", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 54000000, - /*.parent = ref_clk */ -}; - -static struct clk sys_clk = { - .name = "sys_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 32768, - /*.parent = sys.xtalin */ -}; - -static struct clk sleep_clk = { - .name = "sleep_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 32768, - /*.parent = sys.xtalin */ -}; - -static struct clk dpll_ck = { - .name = "dpll", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .parent = &ref_clk, -}; - -static struct clk dpll_x2_ck = { - .name = "dpll_x2", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .parent = &ref_clk, -}; - -static struct clk wdt1_sys_clk = { - .name = "wdt1_sys_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, - .rate = 32768, - /*.parent = sys.xtalin */ -}; - -static struct clk func_96m_clk = { - .name = "func_96m_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .divisor = 1, - .parent = &apll_96m, -}; - -static struct clk func_48m_clk = { - .name = "func_48m_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .divisor = 2, - .parent = &apll_96m, -}; - -static struct clk func_12m_clk = { - .name = "func_12m_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .divisor = 8, - .parent = &apll_96m, -}; - -static struct clk func_54m_clk = { - .name = "func_54m_clk", - .flags = CLOCK_IN_OMAP242X, - .divisor = 1, - .parent = &apll_54m, -}; - -static struct clk sys_clkout = { - .name = "clkout", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk sys_clkout2 = { - .name = "clkout2", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_clk = { - .name = "core_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &dpll_x2_ck, /* Switchable between dpll_ck and clk32k */ -}; - -static struct clk l3_clk = { - .name = "l3_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk core_l4_iclk = { - .name = "core_l4_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &l3_clk, -}; - -static struct clk wu_l4_iclk = { - .name = "wu_l4_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &l3_clk, -}; - -static struct clk core_l3_iclk = { - .name = "core_l3_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk core_l4_usb_clk = { - .name = "core_l4_usb_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &l3_clk, -}; - -static struct clk wu_gpt1_clk = { - .name = "wu_gpt1_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk wu_32k_clk = { - .name = "wu_32k_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk uart1_fclk = { - .name = "uart1_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, -}; - -static struct clk uart1_iclk = { - .name = "uart1_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk uart2_fclk = { - .name = "uart2_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, -}; - -static struct clk uart2_iclk = { - .name = "uart2_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk uart3_fclk = { - .name = "uart3_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, -}; - -static struct clk uart3_iclk = { - .name = "uart3_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk mpu_fclk = { - .name = "mpu_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk mpu_iclk = { - .name = "mpu_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk int_m_fclk = { - .name = "int_m_fclk", - .alias = "mpu_intc_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk int_m_iclk = { - .name = "int_m_iclk", - .alias = "mpu_intc_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, -}; - -static struct clk core_gpt2_clk = { - .name = "core_gpt2_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt3_clk = { - .name = "core_gpt3_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt4_clk = { - .name = "core_gpt4_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt5_clk = { - .name = "core_gpt5_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt6_clk = { - .name = "core_gpt6_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt7_clk = { - .name = "core_gpt7_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt8_clk = { - .name = "core_gpt8_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt9_clk = { - .name = "core_gpt9_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt10_clk = { - .name = "core_gpt10_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt11_clk = { - .name = "core_gpt11_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk core_gpt12_clk = { - .name = "core_gpt12_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, -}; - -static struct clk mcbsp1_clk = { - .name = "mcbsp1_cg", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .divisor = 2, - .parent = &func_96m_clk, -}; - -static struct clk mcbsp2_clk = { - .name = "mcbsp2_cg", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .divisor = 2, - .parent = &func_96m_clk, -}; - -static struct clk emul_clk = { - .name = "emul_ck", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_54m_clk, -}; - -static struct clk sdma_fclk = { - .name = "sdma_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &l3_clk, -}; - -static struct clk sdma_iclk = { - .name = "sdma_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l3_iclk, /* core_l4_iclk for the configuration port */ -}; - -static struct clk i2c1_fclk = { - .name = "i2c1.fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_12m_clk, - .divisor = 1, -}; - -static struct clk i2c1_iclk = { - .name = "i2c1.iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk i2c2_fclk = { - .name = "i2c2.fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_12m_clk, - .divisor = 1, -}; - -static struct clk i2c2_iclk = { - .name = "i2c2.iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk gpio_dbclk[5] = { - { - .name = "gpio1_dbclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &wu_32k_clk, - }, { - .name = "gpio2_dbclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &wu_32k_clk, - }, { - .name = "gpio3_dbclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &wu_32k_clk, - }, { - .name = "gpio4_dbclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &wu_32k_clk, - }, { - .name = "gpio5_dbclk", - .flags = CLOCK_IN_OMAP243X, - .parent = &wu_32k_clk, - }, -}; - -static struct clk gpio_iclk = { - .name = "gpio_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &wu_l4_iclk, -}; - -static struct clk mmc_fck = { - .name = "mmc_fclk", - .flags = CLOCK_IN_OMAP242X, - .parent = &func_96m_clk, -}; - -static struct clk mmc_ick = { - .name = "mmc_iclk", - .flags = CLOCK_IN_OMAP242X, - .parent = &core_l4_iclk, -}; - -static struct clk spi_fclk[3] = { - { - .name = "spi1_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, - }, { - .name = "spi2_fclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, - }, { - .name = "spi3_fclk", - .flags = CLOCK_IN_OMAP243X, - .parent = &func_48m_clk, - }, -}; - -static struct clk dss_clk[2] = { - { - .name = "dss_clk1", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_clk, - }, { - .name = "dss_clk2", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &sys_clk, - }, -}; - -static struct clk dss_54m_clk = { - .name = "dss_54m_clk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &func_54m_clk, -}; - -static struct clk dss_l3_iclk = { - .name = "dss_l3_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l3_iclk, -}; - -static struct clk dss_l4_iclk = { - .name = "dss_l4_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, -}; - -static struct clk spi_iclk[3] = { - { - .name = "spi1_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, - }, { - .name = "spi2_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, - }, { - .name = "spi3_iclk", - .flags = CLOCK_IN_OMAP243X, - .parent = &core_l4_iclk, - }, -}; - -static struct clk omapctrl_clk = { - .name = "omapctrl_iclk", - .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, - /* XXX Should be in WKUP domain */ - .parent = &core_l4_iclk, + .name = "clk32-kHz", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + ALWAYS_ENABLED, + .parent = &xtal_osc32k, }; static struct clk *onchip_clks[] = { @@ -1019,80 +576,6 @@ static struct clk *onchip_clks[] = { &i2c_fck, &i2c_ick, - /* OMAP 2 */ - - &ref_clk, - &apll_96m, - &apll_54m, - &sys_clk, - &sleep_clk, - &dpll_ck, - &dpll_x2_ck, - &wdt1_sys_clk, - &func_96m_clk, - &func_48m_clk, - &func_12m_clk, - &func_54m_clk, - &sys_clkout, - &sys_clkout2, - &core_clk, - &l3_clk, - &core_l4_iclk, - &wu_l4_iclk, - &core_l3_iclk, - &core_l4_usb_clk, - &wu_gpt1_clk, - &wu_32k_clk, - &uart1_fclk, - &uart1_iclk, - &uart2_fclk, - &uart2_iclk, - &uart3_fclk, - &uart3_iclk, - &mpu_fclk, - &mpu_iclk, - &int_m_fclk, - &int_m_iclk, - &core_gpt2_clk, - &core_gpt3_clk, - &core_gpt4_clk, - &core_gpt5_clk, - &core_gpt6_clk, - &core_gpt7_clk, - &core_gpt8_clk, - &core_gpt9_clk, - &core_gpt10_clk, - &core_gpt11_clk, - &core_gpt12_clk, - &mcbsp1_clk, - &mcbsp2_clk, - &emul_clk, - &sdma_fclk, - &sdma_iclk, - &i2c1_fclk, - &i2c1_iclk, - &i2c2_fclk, - &i2c2_iclk, - &gpio_dbclk[0], - &gpio_dbclk[1], - &gpio_dbclk[2], - &gpio_dbclk[3], - &gpio_iclk, - &mmc_fck, - &mmc_ick, - &spi_fclk[0], - &spi_iclk[0], - &spi_fclk[1], - &spi_iclk[1], - &spi_fclk[2], - &spi_iclk[2], - &dss_clk[0], - &dss_clk[1], - &dss_54m_clk, - &dss_l3_iclk, - &dss_l4_iclk, - &omapctrl_clk, - NULL }; @@ -1230,12 +713,6 @@ void omap_clk_init(struct omap_mpu_state_s *mpu) flag = CLOCK_IN_OMAP310; else if (cpu_is_omap1510(mpu)) flag = CLOCK_IN_OMAP1510; - else if (cpu_is_omap2410(mpu) || cpu_is_omap2420(mpu)) - flag = CLOCK_IN_OMAP242X; - else if (cpu_is_omap2430(mpu)) - flag = CLOCK_IN_OMAP243X; - else if (cpu_is_omap3430(mpu)) - flag = CLOCK_IN_OMAP243X; else return; diff --git a/hw/misc/omap_gpmc.c b/hw/misc/omap_gpmc.c deleted file mode 100644 index 67158eb1649..00000000000 --- a/hw/misc/omap_gpmc.c +++ /dev/null @@ -1,898 +0,0 @@ -/* - * TI OMAP general purpose memory controller emulation. - * - * Copyright (C) 2007-2009 Nokia Corporation - * Original code written by Andrzej Zaborowski - * Enhancements for OMAP3 and NAND support written by Juha Riihimäki - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/block/flash.h" -#include "hw/arm/omap.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" - -/* General-Purpose Memory Controller */ -struct omap_gpmc_s { - qemu_irq irq; - qemu_irq drq; - MemoryRegion iomem; - int accept_256; - - uint8_t revision; - uint8_t sysconfig; - uint16_t irqst; - uint16_t irqen; - uint16_t lastirq; - uint16_t timeout; - uint16_t config; - struct omap_gpmc_cs_file_s { - uint32_t config[7]; - MemoryRegion *iomem; - MemoryRegion container; - MemoryRegion nandiomem; - DeviceState *dev; - } cs_file[8]; - int ecc_cs; - int ecc_ptr; - uint32_t ecc_cfg; - ECCState ecc[9]; - struct prefetch { - uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */ - uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */ - int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */ - int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */ - int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */ - MemoryRegion iomem; - uint8_t fifo[64]; - } prefetch; -}; - -#define OMAP_GPMC_8BIT 0 -#define OMAP_GPMC_16BIT 1 -#define OMAP_GPMC_NOR 0 -#define OMAP_GPMC_NAND 2 - -static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f) -{ - return (f->config[0] >> 10) & 3; -} - -static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f) -{ - /* devsize field is really 2 bits but we ignore the high - * bit to ensure consistent behaviour if the guest sets - * it (values 2 and 3 are reserved in the TRM) - */ - return (f->config[0] >> 12) & 1; -} - -/* Extract the chip-select value from the prefetch config1 register */ -static int prefetch_cs(uint32_t config1) -{ - return (config1 >> 24) & 7; -} - -static int prefetch_threshold(uint32_t config1) -{ - return (config1 >> 8) & 0x7f; -} - -static void omap_gpmc_int_update(struct omap_gpmc_s *s) -{ - /* The TRM is a bit unclear, but it seems to say that - * the TERMINALCOUNTSTATUS bit is set only on the - * transition when the prefetch engine goes from - * active to inactive, whereas the FIFOEVENTSTATUS - * bit is held high as long as the fifo has at - * least THRESHOLD bytes available. - * So we do the latter here, but TERMINALCOUNTSTATUS - * is set elsewhere. - */ - if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) { - s->irqst |= 1; - } - if ((s->irqen & s->irqst) != s->lastirq) { - s->lastirq = s->irqen & s->irqst; - qemu_set_irq(s->irq, s->lastirq); - } -} - -static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value) -{ - if (s->prefetch.config1 & 4) { - qemu_set_irq(s->drq, value); - } -} - -/* Access functions for when a NAND-like device is mapped into memory: - * all addresses in the region behave like accesses to the relevant - * GPMC_NAND_DATA_i register (which is actually implemented to call these) - */ -static uint64_t omap_nand_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_gpmc_cs_file_s *f = opaque; - uint64_t v; - nand_setpins(f->dev, 0, 0, 0, 1, 0); - switch (omap_gpmc_devsize(f)) { - case OMAP_GPMC_8BIT: - v = nand_getio(f->dev); - if (size == 1) { - return v; - } - v |= (nand_getio(f->dev) << 8); - if (size == 2) { - return v; - } - v |= (nand_getio(f->dev) << 16); - v |= (nand_getio(f->dev) << 24); - return v; - case OMAP_GPMC_16BIT: - v = nand_getio(f->dev); - if (size == 1) { - /* 8 bit read from 16 bit device : probably a guest bug */ - return v & 0xff; - } - if (size == 2) { - return v; - } - v |= (nand_getio(f->dev) << 16); - return v; - default: - abort(); - } -} - -static void omap_nand_setio(DeviceState *dev, uint64_t value, - int nandsize, int size) -{ - /* Write the specified value to the NAND device, respecting - * both size of the NAND device and size of the write access. - */ - switch (nandsize) { - case OMAP_GPMC_8BIT: - switch (size) { - case 1: - nand_setio(dev, value & 0xff); - break; - case 2: - nand_setio(dev, value & 0xff); - nand_setio(dev, (value >> 8) & 0xff); - break; - case 4: - default: - nand_setio(dev, value & 0xff); - nand_setio(dev, (value >> 8) & 0xff); - nand_setio(dev, (value >> 16) & 0xff); - nand_setio(dev, (value >> 24) & 0xff); - break; - } - break; - case OMAP_GPMC_16BIT: - switch (size) { - case 1: - /* writing to a 16bit device with 8bit access is probably a guest - * bug; pass the value through anyway. - */ - case 2: - nand_setio(dev, value & 0xffff); - break; - case 4: - default: - nand_setio(dev, value & 0xffff); - nand_setio(dev, (value >> 16) & 0xffff); - break; - } - break; - } -} - -static void omap_nand_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_gpmc_cs_file_s *f = opaque; - nand_setpins(f->dev, 0, 0, 0, 1, 0); - omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); -} - -static const MemoryRegionOps omap_nand_ops = { - .read = omap_nand_read, - .write = omap_nand_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void fill_prefetch_fifo(struct omap_gpmc_s *s) -{ - /* Fill the prefetch FIFO by reading data from NAND. - * We do this synchronously, unlike the hardware which - * will do this asynchronously. We refill when the - * FIFO has THRESHOLD bytes free, and we always refill - * as much data as possible starting at the top end - * of the FIFO. - * (We have to refill at THRESHOLD rather than waiting - * for the FIFO to empty to allow for the case where - * the FIFO size isn't an exact multiple of THRESHOLD - * and we're doing DMA transfers.) - * This means we never need to handle wrap-around in - * the fifo-reading code, and the next byte of data - * to read is always fifo[63 - fifopointer]. - */ - int fptr; - int cs = prefetch_cs(s->prefetch.config1); - int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); - int bytes; - /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE - * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. - * Instead believe the bit that says it is always a byte count. - */ - bytes = 64 - s->prefetch.fifopointer; - if (bytes > s->prefetch.count) { - bytes = s->prefetch.count; - } - if (is16bit) { - bytes &= ~1; - } - - s->prefetch.count -= bytes; - s->prefetch.fifopointer += bytes; - fptr = 64 - s->prefetch.fifopointer; - /* Move the existing data in the FIFO so it sits just - * before what we're about to read in - */ - while (fptr < (64 - bytes)) { - s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; - fptr++; - } - while (fptr < 64) { - if (is16bit) { - uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); - s->prefetch.fifo[fptr++] = v & 0xff; - s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; - } else { - s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); - } - } - if (s->prefetch.startengine && (s->prefetch.count == 0)) { - /* This was the final transfer: raise TERMINALCOUNTSTATUS */ - s->irqst |= 2; - s->prefetch.startengine = 0; - } - /* If there are any bytes in the FIFO at this point then - * we must raise a DMA request (either this is a final part - * transfer, or we filled the FIFO in which case we certainly - * have THRESHOLD bytes available) - */ - if (s->prefetch.fifopointer != 0) { - omap_gpmc_dma_update(s, 1); - } - omap_gpmc_int_update(s); -} - -/* Access functions for a NAND-like device when the prefetch/postwrite - * engine is enabled -- all addresses in the region behave alike: - * data is read or written to the FIFO. - */ -static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_gpmc_s *s = opaque; - uint32_t data; - if (s->prefetch.config1 & 1) { - /* The TRM doesn't define the behaviour if you read from the - * FIFO when the prefetch engine is in write mode. We choose - * to always return zero. - */ - return 0; - } - /* Note that trying to read an empty fifo repeats the last byte */ - if (s->prefetch.fifopointer) { - s->prefetch.fifopointer--; - } - data = s->prefetch.fifo[63 - s->prefetch.fifopointer]; - if (s->prefetch.fifopointer == - (64 - prefetch_threshold(s->prefetch.config1))) { - /* We've drained THRESHOLD bytes now. So deassert the - * DMA request, then refill the FIFO (which will probably - * assert it again.) - */ - omap_gpmc_dma_update(s, 0); - fill_prefetch_fifo(s); - } - omap_gpmc_int_update(s); - return data; -} - -static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_gpmc_s *s = opaque; - int cs = prefetch_cs(s->prefetch.config1); - if ((s->prefetch.config1 & 1) == 0) { - /* The TRM doesn't define the behaviour of writing to the - * FIFO when the prefetch engine is in read mode. We - * choose to ignore the write. - */ - return; - } - if (s->prefetch.count == 0) { - /* The TRM doesn't define the behaviour of writing to the - * FIFO if the transfer is complete. We choose to ignore. - */ - return; - } - /* The only reason we do any data buffering in postwrite - * mode is if we are talking to a 16 bit NAND device, in - * which case we need to buffer the first byte of the - * 16 bit word until the other byte arrives. - */ - int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); - if (is16bit) { - /* fifopointer alternates between 64 (waiting for first - * byte of word) and 63 (waiting for second byte) - */ - if (s->prefetch.fifopointer == 64) { - s->prefetch.fifo[0] = value; - s->prefetch.fifopointer--; - } else { - value = (value << 8) | s->prefetch.fifo[0]; - omap_nand_write(&s->cs_file[cs], 0, value, 2); - s->prefetch.count--; - s->prefetch.fifopointer = 64; - } - } else { - /* Just write the byte : fifopointer remains 64 at all times */ - omap_nand_write(&s->cs_file[cs], 0, value, 1); - s->prefetch.count--; - } - if (s->prefetch.count == 0) { - /* Final transfer: raise TERMINALCOUNTSTATUS */ - s->irqst |= 2; - s->prefetch.startengine = 0; - } - omap_gpmc_int_update(s); -} - -static const MemoryRegionOps omap_prefetch_ops = { - .read = omap_gpmc_prefetch_read, - .write = omap_gpmc_prefetch_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .impl.min_access_size = 1, - .impl.max_access_size = 1, -}; - -static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs) -{ - /* Return the MemoryRegion* to map/unmap for this chipselect */ - struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; - if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) { - return f->iomem; - } - if ((s->prefetch.config1 & 0x80) && - (prefetch_cs(s->prefetch.config1) == cs)) { - /* The prefetch engine is enabled for this CS: map the FIFO */ - return &s->prefetch.iomem; - } - return &f->nandiomem; -} - -static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs) -{ - struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; - uint32_t mask = (f->config[6] >> 8) & 0xf; - uint32_t base = f->config[6] & 0x3f; - uint32_t size; - - if (!f->iomem && !f->dev) { - return; - } - - if (!(f->config[6] & (1 << 6))) { - /* Do nothing unless CSVALID */ - return; - } - - /* TODO: check for overlapping regions and report access errors */ - if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf - && !(s->accept_256 && !mask)) { - fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n", - __func__, mask); - } - - base <<= 24; - size = (0x0fffffff & ~(mask << 24)) + 1; - /* TODO: rather than setting the size of the mapping (which should be - * constant), the mask should cause wrapping of the address space, so - * that the same memory becomes accessible at every size bytes - * starting from base. */ - memory_region_init(&f->container, NULL, "omap-gpmc-file", size); - memory_region_add_subregion(&f->container, 0, - omap_gpmc_cs_memregion(s, cs)); - memory_region_add_subregion(get_system_memory(), base, - &f->container); -} - -static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs) -{ - struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; - if (!(f->config[6] & (1 << 6))) { - /* Do nothing unless CSVALID */ - return; - } - if (!f->iomem && !f->dev) { - return; - } - memory_region_del_subregion(get_system_memory(), &f->container); - memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs)); - object_unparent(OBJECT(&f->container)); -} - -void omap_gpmc_reset(struct omap_gpmc_s *s) -{ - int i; - - s->sysconfig = 0; - s->irqst = 0; - s->irqen = 0; - omap_gpmc_int_update(s); - for (i = 0; i < 8; i++) { - /* This has to happen before we change any of the config - * used to determine which memory regions are mapped or unmapped. - */ - omap_gpmc_cs_unmap(s, i); - } - s->timeout = 0; - s->config = 0xa00; - s->prefetch.config1 = 0x00004000; - s->prefetch.transfercount = 0x00000000; - s->prefetch.startengine = 0; - s->prefetch.fifopointer = 0; - s->prefetch.count = 0; - for (i = 0; i < 8; i ++) { - s->cs_file[i].config[1] = 0x101001; - s->cs_file[i].config[2] = 0x020201; - s->cs_file[i].config[3] = 0x10031003; - s->cs_file[i].config[4] = 0x10f1111; - s->cs_file[i].config[5] = 0; - s->cs_file[i].config[6] = 0xf00; - /* In theory we could probe attached devices for some CFG1 - * bits here, but we just retain them across resets as they - * were set initially by omap_gpmc_attach(). - */ - if (i == 0) { - s->cs_file[i].config[0] &= 0x00433e00; - s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */ - omap_gpmc_cs_map(s, i); - } else { - s->cs_file[i].config[0] &= 0x00403c00; - } - } - s->ecc_cs = 0; - s->ecc_ptr = 0; - s->ecc_cfg = 0x3fcff000; - for (i = 0; i < 9; i ++) - ecc_reset(&s->ecc[i]); -} - -static int gpmc_wordaccess_only(hwaddr addr) -{ - /* Return true if the register offset is to a register that - * only permits word width accesses. - * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND - * for any chipselect. - */ - if (addr >= 0x60 && addr <= 0x1d4) { - int cs = (addr - 0x60) / 0x30; - addr -= cs * 0x30; - if (addr >= 0x7c && addr < 0x88) { - /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */ - return 0; - } - } - return 1; -} - -static uint64_t omap_gpmc_read(void *opaque, hwaddr addr, - unsigned size) -{ - struct omap_gpmc_s *s = opaque; - int cs; - struct omap_gpmc_cs_file_s *f; - - if (size != 4 && gpmc_wordaccess_only(addr)) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x000: /* GPMC_REVISION */ - return s->revision; - - case 0x010: /* GPMC_SYSCONFIG */ - return s->sysconfig; - - case 0x014: /* GPMC_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x018: /* GPMC_IRQSTATUS */ - return s->irqst; - - case 0x01c: /* GPMC_IRQENABLE */ - return s->irqen; - - case 0x040: /* GPMC_TIMEOUT_CONTROL */ - return s->timeout; - - case 0x044: /* GPMC_ERR_ADDRESS */ - case 0x048: /* GPMC_ERR_TYPE */ - return 0; - - case 0x050: /* GPMC_CONFIG */ - return s->config; - - case 0x054: /* GPMC_STATUS */ - return 0x001; - - case 0x060 ... 0x1d4: - cs = (addr - 0x060) / 0x30; - addr -= cs * 0x30; - f = s->cs_file + cs; - switch (addr) { - case 0x60: /* GPMC_CONFIG1 */ - return f->config[0]; - case 0x64: /* GPMC_CONFIG2 */ - return f->config[1]; - case 0x68: /* GPMC_CONFIG3 */ - return f->config[2]; - case 0x6c: /* GPMC_CONFIG4 */ - return f->config[3]; - case 0x70: /* GPMC_CONFIG5 */ - return f->config[4]; - case 0x74: /* GPMC_CONFIG6 */ - return f->config[5]; - case 0x78: /* GPMC_CONFIG7 */ - return f->config[6]; - case 0x84 ... 0x87: /* GPMC_NAND_DATA */ - if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { - return omap_nand_read(f, 0, size); - } - return 0; - } - break; - - case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ - return s->prefetch.config1; - case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ - return s->prefetch.transfercount; - case 0x1ec: /* GPMC_PREFETCH_CONTROL */ - return s->prefetch.startengine; - case 0x1f0: /* GPMC_PREFETCH_STATUS */ - /* NB: The OMAP3 TRM is inconsistent about whether the GPMC - * FIFOTHRESHOLDSTATUS bit should be set when - * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD. - * Apparently the underlying functional spec from which the TRM was - * created states that the behaviour is ">=", and this also - * makes more conceptual sense. - */ - return (s->prefetch.fifopointer << 24) | - ((s->prefetch.fifopointer >= - ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) | - s->prefetch.count; - - case 0x1f4: /* GPMC_ECC_CONFIG */ - return s->ecc_cs; - case 0x1f8: /* GPMC_ECC_CONTROL */ - return s->ecc_ptr; - case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ - return s->ecc_cfg; - case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ - cs = (addr & 0x1f) >> 2; - /* TODO: check correctness */ - return - ((s->ecc[cs].cp & 0x07) << 0) | - ((s->ecc[cs].cp & 0x38) << 13) | - ((s->ecc[cs].lp[0] & 0x1ff) << 3) | - ((s->ecc[cs].lp[1] & 0x1ff) << 19); - - case 0x230: /* GPMC_TESTMODE_CTRL */ - return 0; - case 0x234: /* GPMC_PSA_LSB */ - case 0x238: /* GPMC_PSA_MSB */ - return 0x00000000; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_gpmc_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_gpmc_s *s = opaque; - int cs; - struct omap_gpmc_cs_file_s *f; - - if (size != 4 && gpmc_wordaccess_only(addr)) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x000: /* GPMC_REVISION */ - case 0x014: /* GPMC_SYSSTATUS */ - case 0x054: /* GPMC_STATUS */ - case 0x1f0: /* GPMC_PREFETCH_STATUS */ - case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ - case 0x234: /* GPMC_PSA_LSB */ - case 0x238: /* GPMC_PSA_MSB */ - OMAP_RO_REG(addr); - break; - - case 0x010: /* GPMC_SYSCONFIG */ - if ((value >> 3) == 0x3) - fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n", - __func__, value >> 3); - if (value & 2) - omap_gpmc_reset(s); - s->sysconfig = value & 0x19; - break; - - case 0x018: /* GPMC_IRQSTATUS */ - s->irqst &= ~value; - omap_gpmc_int_update(s); - break; - - case 0x01c: /* GPMC_IRQENABLE */ - s->irqen = value & 0xf03; - omap_gpmc_int_update(s); - break; - - case 0x040: /* GPMC_TIMEOUT_CONTROL */ - s->timeout = value & 0x1ff1; - break; - - case 0x044: /* GPMC_ERR_ADDRESS */ - case 0x048: /* GPMC_ERR_TYPE */ - break; - - case 0x050: /* GPMC_CONFIG */ - s->config = value & 0xf13; - break; - - case 0x060 ... 0x1d4: - cs = (addr - 0x060) / 0x30; - addr -= cs * 0x30; - f = s->cs_file + cs; - switch (addr) { - case 0x60: /* GPMC_CONFIG1 */ - f->config[0] = value & 0xffef3e13; - break; - case 0x64: /* GPMC_CONFIG2 */ - f->config[1] = value & 0x001f1f8f; - break; - case 0x68: /* GPMC_CONFIG3 */ - f->config[2] = value & 0x001f1f8f; - break; - case 0x6c: /* GPMC_CONFIG4 */ - f->config[3] = value & 0x1f8f1f8f; - break; - case 0x70: /* GPMC_CONFIG5 */ - f->config[4] = value & 0x0f1f1f1f; - break; - case 0x74: /* GPMC_CONFIG6 */ - f->config[5] = value & 0x00000fcf; - break; - case 0x78: /* GPMC_CONFIG7 */ - if ((f->config[6] ^ value) & 0xf7f) { - omap_gpmc_cs_unmap(s, cs); - f->config[6] = value & 0x00000f7f; - omap_gpmc_cs_map(s, cs); - } - break; - case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */ - if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { - nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */ - omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); - } - break; - case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */ - if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { - nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */ - omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); - } - break; - case 0x84 ... 0x87: /* GPMC_NAND_DATA */ - if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { - omap_nand_write(f, 0, value, size); - } - break; - default: - goto bad_reg; - } - break; - - case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ - if (!s->prefetch.startengine) { - uint32_t newconfig1 = value & 0x7f8f7fbf; - uint32_t changed; - changed = newconfig1 ^ s->prefetch.config1; - if (changed & (0x80 | 0x7000000)) { - /* Turning the engine on or off, or mapping it somewhere else. - * cs_map() and cs_unmap() check the prefetch config and - * overall CSVALID bits, so it is sufficient to unmap-and-map - * both the old cs and the new one. Note that we adhere to - * the "unmap/change config/map" order (and not unmap twice - * if newcs == oldcs), otherwise we'll try to delete the wrong - * memory region. - */ - int oldcs = prefetch_cs(s->prefetch.config1); - int newcs = prefetch_cs(newconfig1); - omap_gpmc_cs_unmap(s, oldcs); - if (oldcs != newcs) { - omap_gpmc_cs_unmap(s, newcs); - } - s->prefetch.config1 = newconfig1; - omap_gpmc_cs_map(s, oldcs); - if (oldcs != newcs) { - omap_gpmc_cs_map(s, newcs); - } - } else { - s->prefetch.config1 = newconfig1; - } - } - break; - - case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ - if (!s->prefetch.startengine) { - s->prefetch.transfercount = value & 0x3fff; - } - break; - - case 0x1ec: /* GPMC_PREFETCH_CONTROL */ - if (s->prefetch.startengine != (value & 1)) { - s->prefetch.startengine = value & 1; - if (s->prefetch.startengine) { - /* Prefetch engine start */ - s->prefetch.count = s->prefetch.transfercount; - if (s->prefetch.config1 & 1) { - /* Write */ - s->prefetch.fifopointer = 64; - } else { - /* Read */ - s->prefetch.fifopointer = 0; - fill_prefetch_fifo(s); - } - } else { - /* Prefetch engine forcibly stopped. The TRM - * doesn't define the behaviour if you do this. - * We clear the prefetch count, which means that - * we permit no more writes, and don't read any - * more data from NAND. The CPU can still drain - * the FIFO of unread data. - */ - s->prefetch.count = 0; - } - omap_gpmc_int_update(s); - } - break; - - case 0x1f4: /* GPMC_ECC_CONFIG */ - s->ecc_cs = 0x8f; - break; - case 0x1f8: /* GPMC_ECC_CONTROL */ - if (value & (1 << 8)) - for (cs = 0; cs < 9; cs ++) - ecc_reset(&s->ecc[cs]); - s->ecc_ptr = value & 0xf; - if (s->ecc_ptr == 0 || s->ecc_ptr > 9) { - s->ecc_ptr = 0; - s->ecc_cs &= ~1; - } - break; - case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ - s->ecc_cfg = value & 0x3fcff1ff; - break; - case 0x230: /* GPMC_TESTMODE_CTRL */ - if (value & 7) - fprintf(stderr, "%s: test mode enable attempt\n", __func__); - break; - - default: - bad_reg: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_gpmc_ops = { - .read = omap_gpmc_read, - .write = omap_gpmc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, - hwaddr base, - qemu_irq irq, qemu_irq drq) -{ - int cs; - struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1); - - memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000); - memory_region_add_subregion(get_system_memory(), base, &s->iomem); - - s->irq = irq; - s->drq = drq; - s->accept_256 = cpu_is_omap3630(mpu); - s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20; - s->lastirq = 0; - omap_gpmc_reset(s); - - /* We have to register a different IO memory handler for each - * chip select region in case a NAND device is mapped there. We - * make the region the worst-case size of 256MB and rely on the - * container memory region in cs_map to chop it down to the actual - * guest-requested size. - */ - for (cs = 0; cs < 8; cs++) { - memory_region_init_io(&s->cs_file[cs].nandiomem, NULL, - &omap_nand_ops, - &s->cs_file[cs], - "omap-nand", - 256 * 1024 * 1024); - } - - memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s, - "omap-gpmc-prefetch", 256 * 1024 * 1024); - return s; -} - -void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem) -{ - struct omap_gpmc_cs_file_s *f; - assert(iomem); - - if (cs < 0 || cs >= 8) { - fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); - exit(-1); - } - f = &s->cs_file[cs]; - - omap_gpmc_cs_unmap(s, cs); - f->config[0] &= ~(0xf << 10); - f->iomem = iomem; - omap_gpmc_cs_map(s, cs); -} - -void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand) -{ - struct omap_gpmc_cs_file_s *f; - assert(nand); - - if (cs < 0 || cs >= 8) { - fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); - exit(-1); - } - f = &s->cs_file[cs]; - - omap_gpmc_cs_unmap(s, cs); - f->config[0] &= ~(0xf << 10); - f->config[0] |= (OMAP_GPMC_NAND << 10); - f->dev = nand; - if (nand_getbuswidth(f->dev) == 16) { - f->config[0] |= OMAP_GPMC_16BIT << 12; - } - omap_gpmc_cs_map(s, cs); -} diff --git a/hw/misc/omap_l4.c b/hw/misc/omap_l4.c deleted file mode 100644 index b7875489da3..00000000000 --- a/hw/misc/omap_l4.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * TI OMAP L4 interconnect emulation. - * - * Copyright (C) 2007-2009 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ -#include "qemu/osdep.h" -#include "hw/arm/omap.h" - -struct omap_l4_s { - MemoryRegion *address_space; - hwaddr base; - int ta_num; - struct omap_target_agent_s ta[]; -}; - -struct omap_l4_s *omap_l4_init(MemoryRegion *address_space, - hwaddr base, int ta_num) -{ - struct omap_l4_s *bus = g_malloc0( - sizeof(*bus) + ta_num * sizeof(*bus->ta)); - - bus->address_space = address_space; - bus->ta_num = ta_num; - bus->base = base; - - return bus; -} - -hwaddr omap_l4_region_base(struct omap_target_agent_s *ta, - int region) -{ - return ta->bus->base + ta->start[region].offset; -} - -hwaddr omap_l4_region_size(struct omap_target_agent_s *ta, - int region) -{ - return ta->start[region].size; -} - -static uint64_t omap_l4ta_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_target_agent_s *s = opaque; - - if (size != 2) { - return omap_badwidth_read16(opaque, addr); - } - - switch (addr) { - case 0x00: /* COMPONENT */ - return s->component; - - case 0x20: /* AGENT_CONTROL */ - return s->control; - - case 0x28: /* AGENT_STATUS */ - return s->status; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_l4ta_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_target_agent_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x00: /* COMPONENT */ - case 0x28: /* AGENT_STATUS */ - OMAP_RO_REG(addr); - break; - - case 0x20: /* AGENT_CONTROL */ - s->control = value & 0x01000700; - if (value & 1) /* OCP_RESET */ - s->status &= ~1; /* REQ_TIMEOUT */ - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static const MemoryRegionOps omap_l4ta_ops = { - .read = omap_l4ta_read, - .write = omap_l4ta_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_target_agent_s *omap_l4ta_get(struct omap_l4_s *bus, - const struct omap_l4_region_s *regions, - const struct omap_l4_agent_info_s *agents, - int cs) -{ - int i; - struct omap_target_agent_s *ta = NULL; - const struct omap_l4_agent_info_s *info = NULL; - - for (i = 0; i < bus->ta_num; i ++) - if (agents[i].ta == cs) { - ta = &bus->ta[i]; - info = &agents[i]; - break; - } - if (!ta) { - fprintf(stderr, "%s: bad target agent (%i)\n", __func__, cs); - exit(-1); - } - - ta->bus = bus; - ta->start = ®ions[info->region]; - ta->regions = info->regions; - - ta->component = ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); - ta->status = 0x00000000; - ta->control = 0x00000200; /* XXX 01000200 for L4TAO */ - - memory_region_init_io(&ta->iomem, NULL, &omap_l4ta_ops, ta, "omap.l4ta", - omap_l4_region_size(ta, info->ta_region)); - omap_l4_attach(ta, info->ta_region, &ta->iomem); - - return ta; -} - -hwaddr omap_l4_attach(struct omap_target_agent_s *ta, - int region, MemoryRegion *mr) -{ - hwaddr base; - - if (region < 0 || region >= ta->regions) { - fprintf(stderr, "%s: bad io region (%i)\n", __func__, region); - exit(-1); - } - - base = ta->bus->base + ta->start[region].offset; - if (mr) { - memory_region_add_subregion(ta->bus->address_space, base, mr); - } - - return base; -} diff --git a/hw/misc/omap_sdrc.c b/hw/misc/omap_sdrc.c deleted file mode 100644 index 6aa1b3ef7fb..00000000000 --- a/hw/misc/omap_sdrc.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * TI OMAP SDRAM controller emulation. - * - * Copyright (C) 2007-2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ -#include "qemu/osdep.h" -#include "hw/arm/omap.h" - -/* SDRAM Controller Subsystem */ -struct omap_sdrc_s { - MemoryRegion iomem; - uint8_t config; -}; - -void omap_sdrc_reset(struct omap_sdrc_s *s) -{ - s->config = 0x10; -} - -static uint64_t omap_sdrc_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_sdrc_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* SDRC_REVISION */ - return 0x20; - - case 0x10: /* SDRC_SYSCONFIG */ - return s->config; - - case 0x14: /* SDRC_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x40: /* SDRC_CS_CFG */ - case 0x44: /* SDRC_SHARING */ - case 0x48: /* SDRC_ERR_ADDR */ - case 0x4c: /* SDRC_ERR_TYPE */ - case 0x60: /* SDRC_DLLA_SCTRL */ - case 0x64: /* SDRC_DLLA_STATUS */ - case 0x68: /* SDRC_DLLB_CTRL */ - case 0x6c: /* SDRC_DLLB_STATUS */ - case 0x70: /* SDRC_POWER */ - case 0x80: /* SDRC_MCFG_0 */ - case 0x84: /* SDRC_MR_0 */ - case 0x88: /* SDRC_EMR1_0 */ - case 0x8c: /* SDRC_EMR2_0 */ - case 0x90: /* SDRC_EMR3_0 */ - case 0x94: /* SDRC_DCDL1_CTRL */ - case 0x98: /* SDRC_DCDL2_CTRL */ - case 0x9c: /* SDRC_ACTIM_CTRLA_0 */ - case 0xa0: /* SDRC_ACTIM_CTRLB_0 */ - case 0xa4: /* SDRC_RFR_CTRL_0 */ - case 0xa8: /* SDRC_MANUAL_0 */ - case 0xb0: /* SDRC_MCFG_1 */ - case 0xb4: /* SDRC_MR_1 */ - case 0xb8: /* SDRC_EMR1_1 */ - case 0xbc: /* SDRC_EMR2_1 */ - case 0xc0: /* SDRC_EMR3_1 */ - case 0xc4: /* SDRC_ACTIM_CTRLA_1 */ - case 0xc8: /* SDRC_ACTIM_CTRLB_1 */ - case 0xd4: /* SDRC_RFR_CTRL_1 */ - case 0xd8: /* SDRC_MANUAL_1 */ - return 0x00; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_sdrc_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_sdrc_s *s = opaque; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x00: /* SDRC_REVISION */ - case 0x14: /* SDRC_SYSSTATUS */ - case 0x48: /* SDRC_ERR_ADDR */ - case 0x64: /* SDRC_DLLA_STATUS */ - case 0x6c: /* SDRC_DLLB_STATUS */ - OMAP_RO_REG(addr); - return; - - case 0x10: /* SDRC_SYSCONFIG */ - if ((value >> 3) != 0x2) - fprintf(stderr, "%s: bad SDRAM idle mode %i\n", - __func__, (unsigned)value >> 3); - if (value & 2) - omap_sdrc_reset(s); - s->config = value & 0x18; - break; - - case 0x40: /* SDRC_CS_CFG */ - case 0x44: /* SDRC_SHARING */ - case 0x4c: /* SDRC_ERR_TYPE */ - case 0x60: /* SDRC_DLLA_SCTRL */ - case 0x68: /* SDRC_DLLB_CTRL */ - case 0x70: /* SDRC_POWER */ - case 0x80: /* SDRC_MCFG_0 */ - case 0x84: /* SDRC_MR_0 */ - case 0x88: /* SDRC_EMR1_0 */ - case 0x8c: /* SDRC_EMR2_0 */ - case 0x90: /* SDRC_EMR3_0 */ - case 0x94: /* SDRC_DCDL1_CTRL */ - case 0x98: /* SDRC_DCDL2_CTRL */ - case 0x9c: /* SDRC_ACTIM_CTRLA_0 */ - case 0xa0: /* SDRC_ACTIM_CTRLB_0 */ - case 0xa4: /* SDRC_RFR_CTRL_0 */ - case 0xa8: /* SDRC_MANUAL_0 */ - case 0xb0: /* SDRC_MCFG_1 */ - case 0xb4: /* SDRC_MR_1 */ - case 0xb8: /* SDRC_EMR1_1 */ - case 0xbc: /* SDRC_EMR2_1 */ - case 0xc0: /* SDRC_EMR3_1 */ - case 0xc4: /* SDRC_ACTIM_CTRLA_1 */ - case 0xc8: /* SDRC_ACTIM_CTRLB_1 */ - case 0xd4: /* SDRC_RFR_CTRL_1 */ - case 0xd8: /* SDRC_MANUAL_1 */ - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_sdrc_ops = { - .read = omap_sdrc_read, - .write = omap_sdrc_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem, - hwaddr base) -{ - struct omap_sdrc_s *s = g_new0(struct omap_sdrc_s, 1); - - omap_sdrc_reset(s); - - memory_region_init_io(&s->iomem, NULL, &omap_sdrc_ops, s, "omap.sdrc", 0x1000); - memory_region_add_subregion(sysmem, base, &s->iomem); - - return s; -} diff --git a/hw/misc/omap_tap.c b/hw/misc/omap_tap.c deleted file mode 100644 index 4d7fb7d85f1..00000000000 --- a/hw/misc/omap_tap.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * TI OMAP TEST-Chip-level TAP emulation. - * - * Copyright (C) 2007-2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/hw.h" -#include "hw/arm/omap.h" - -/* TEST-Chip-level TAP */ -static uint64_t omap_tap_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_mpu_state_s *s = opaque; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x204: /* IDCODE_reg */ - switch (s->mpu_model) { - case omap2420: - case omap2422: - case omap2423: - return 0x5b5d902f; /* ES 2.2 */ - case omap2430: - return 0x5b68a02f; /* ES 2.2 */ - case omap3430: - return 0x1b7ae02f; /* ES 2 */ - default: - hw_error("%s: Bad mpu model\n", __func__); - } - - case 0x208: /* PRODUCTION_ID_reg for OMAP2 */ - case 0x210: /* PRODUCTION_ID_reg for OMAP3 */ - switch (s->mpu_model) { - case omap2420: - return 0x000254f0; /* POP ESHS2.1.1 in N91/93/95, ES2 in N800 */ - case omap2422: - return 0x000400f0; - case omap2423: - return 0x000800f0; - case omap2430: - return 0x000000f0; - case omap3430: - return 0x000000f0; - default: - hw_error("%s: Bad mpu model\n", __func__); - } - - case 0x20c: - switch (s->mpu_model) { - case omap2420: - case omap2422: - case omap2423: - return 0xcafeb5d9; /* ES 2.2 */ - case omap2430: - return 0xcafeb68a; /* ES 2.2 */ - case omap3430: - return 0xcafeb7ae; /* ES 2 */ - default: - hw_error("%s: Bad mpu model\n", __func__); - } - - case 0x218: /* DIE_ID_reg */ - return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); - case 0x21c: /* DIE_ID_reg */ - return 0x54 << 24; - case 0x220: /* DIE_ID_reg */ - return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); - case 0x224: /* DIE_ID_reg */ - return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_tap_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - OMAP_BAD_REG(addr); -} - -static const MemoryRegionOps omap_tap_ops = { - .read = omap_tap_read, - .write = omap_tap_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -void omap_tap_init(struct omap_target_agent_s *ta, - struct omap_mpu_state_s *mpu) -{ - memory_region_init_io(&mpu->tap_iomem, NULL, &omap_tap_ops, mpu, "omap.tap", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &mpu->tap_iomem); -} diff --git a/hw/misc/pc-testdev.c b/hw/misc/pc-testdev.c index e3896518694..67c486f347c 100644 --- a/hw/misc/pc-testdev.c +++ b/hw/misc/pc-testdev.c @@ -193,7 +193,7 @@ static void testdev_realizefn(DeviceState *d, Error **errp) memory_region_add_subregion(mem, 0xff000000, &dev->iomem); } -static void testdev_class_init(ObjectClass *klass, void *data) +static void testdev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/pci-testdev.c b/hw/misc/pci-testdev.c index acedd0f82bc..ba71c5069ff 100644 --- a/hw/misc/pci-testdev.c +++ b/hw/misc/pci-testdev.c @@ -23,7 +23,7 @@ #include "hw/qdev-properties.h" #include "qemu/event_notifier.h" #include "qemu/module.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qom/object.h" typedef struct PCITestDevHdr { @@ -90,6 +90,7 @@ struct PCITestDevState { int current; uint64_t membar_size; + bool membar_backed; MemoryRegion membar; }; @@ -258,8 +259,14 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp) pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->portio); if (d->membar_size) { - memory_region_init(&d->membar, OBJECT(d), "pci-testdev-membar", - d->membar_size); + if (d->membar_backed) + memory_region_init_ram(&d->membar, OBJECT(d), + "pci-testdev-membar-backed", + d->membar_size, NULL); + else + memory_region_init(&d->membar, OBJECT(d), + "pci-testdev-membar", + d->membar_size); pci_register_bar(pci_dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_PREFETCH | @@ -319,12 +326,12 @@ static void qdev_pci_testdev_reset(DeviceState *dev) pci_testdev_reset(d); } -static Property pci_testdev_properties[] = { +static const Property pci_testdev_properties[] = { DEFINE_PROP_SIZE("membar", PCITestDevState, membar_size, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("membar-backed", PCITestDevState, membar_backed, false), }; -static void pci_testdev_class_init(ObjectClass *klass, void *data) +static void pci_testdev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -337,7 +344,7 @@ static void pci_testdev_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_OTHERS; dc->desc = "PCI Test Device"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = qdev_pci_testdev_reset; + device_class_set_legacy_reset(dc, qdev_pci_testdev_reset); device_class_set_props(dc, pci_testdev_properties); } @@ -346,7 +353,7 @@ static const TypeInfo pci_testdev_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCITestDevState), .class_init = pci_testdev_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/misc/pvpanic-isa.c b/hw/misc/pvpanic-isa.c index 9a923b78690..f7b421c713b 100644 --- a/hw/misc/pvpanic-isa.c +++ b/hw/misc/pvpanic-isa.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" @@ -98,14 +98,13 @@ static void build_pvpanic_isa_aml(AcpiDevAmlIf *adev, Aml *scope) aml_append(scope, dev); } -static Property pvpanic_isa_properties[] = { +static const Property pvpanic_isa_properties[] = { DEFINE_PROP_UINT16(PVPANIC_IOPORT_PROP, PVPanicISAState, ioport, 0x505), DEFINE_PROP_UINT8("events", PVPanicISAState, pvpanic.events, PVPANIC_EVENTS), - DEFINE_PROP_END_OF_LIST(), }; -static void pvpanic_isa_class_init(ObjectClass *klass, void *data) +static void pvpanic_isa_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); @@ -122,7 +121,7 @@ static const TypeInfo pvpanic_isa_info = { .instance_size = sizeof(PVPanicISAState), .instance_init = pvpanic_isa_initfn, .class_init = pvpanic_isa_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/misc/pvpanic-mmio.c b/hw/misc/pvpanic-mmio.c new file mode 100644 index 00000000000..2a363106b2d --- /dev/null +++ b/hw/misc/pvpanic-mmio.c @@ -0,0 +1,60 @@ +/* + * QEMU simulated pvpanic device (MMIO frontend) + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/misc/pvpanic.h" +#include "hw/sysbus.h" +#include "standard-headers/misc/pvpanic.h" + +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicMMIOState, PVPANIC_MMIO_DEVICE) + +#define PVPANIC_MMIO_SIZE 0x2 + +struct PVPanicMMIOState { + SysBusDevice parent_obj; + + PVPanicState pvpanic; +}; + +static void pvpanic_mmio_initfn(Object *obj) +{ + PVPanicMMIOState *s = PVPANIC_MMIO_DEVICE(obj); + + pvpanic_setup_io(&s->pvpanic, DEVICE(s), PVPANIC_MMIO_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->pvpanic.mr); +} + +static const Property pvpanic_mmio_properties[] = { + DEFINE_PROP_UINT8("events", PVPanicMMIOState, pvpanic.events, + PVPANIC_PANICKED | PVPANIC_CRASH_LOADED), +}; + +static void pvpanic_mmio_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, pvpanic_mmio_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo pvpanic_mmio_info = { + .name = TYPE_PVPANIC_MMIO_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PVPanicMMIOState), + .instance_init = pvpanic_mmio_initfn, + .class_init = pvpanic_mmio_class_init, +}; + +static void pvpanic_register_types(void) +{ + type_register_static(&pvpanic_mmio_info); +} + +type_init(pvpanic_register_types) diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c index 106d03ccd69..2869b6a7ff8 100644 --- a/hw/misc/pvpanic-pci.c +++ b/hw/misc/pvpanic-pci.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" @@ -53,13 +53,12 @@ static void pvpanic_pci_realizefn(PCIDevice *dev, Error **errp) pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &ps->mr); } -static Property pvpanic_pci_properties[] = { +static const Property pvpanic_pci_properties[] = { DEFINE_PROP_UINT8("events", PVPanicPCIState, pvpanic.events, PVPANIC_EVENTS), - DEFINE_PROP_END_OF_LIST(), }; -static void pvpanic_pci_class_init(ObjectClass *klass, void *data) +static void pvpanic_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); @@ -81,7 +80,7 @@ static const TypeInfo pvpanic_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PVPanicPCIState), .class_init = pvpanic_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } } diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c index 3b893340c05..c83247c4087 100644 --- a/hw/misc/pvpanic.c +++ b/hw/misc/pvpanic.c @@ -15,7 +15,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c index 86b23a5372f..dfee1af5ad2 100644 --- a/hw/misc/sbsa_ec.c +++ b/hw/misc/sbsa_ec.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "hw/sysbus.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" typedef struct SECUREECState { SysBusDevice parent_obj; @@ -73,7 +73,7 @@ static void sbsa_ec_init(Object *obj) sysbus_init_mmio(dev, &s->iomem); } -static void sbsa_ec_class_init(ObjectClass *klass, void *data) +static void sbsa_ec_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/sifive_e_aon.c b/hw/misc/sifive_e_aon.c index 4656457d0bb..6eef38d622a 100644 --- a/hw/misc/sifive_e_aon.c +++ b/hw/misc/sifive_e_aon.c @@ -24,7 +24,7 @@ #include "hw/misc/sifive_e_aon.h" #include "qapi/visitor.h" #include "qapi/error.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/qdev-properties.h" REG32(AON_WDT_WDOGCFG, 0x0) @@ -289,17 +289,16 @@ static void sifive_e_aon_init(Object *obj) sysbus_init_irq(sbd, &r->wdog_irq); } -static Property sifive_e_aon_properties[] = { +static const Property sifive_e_aon_properties[] = { DEFINE_PROP_UINT64("wdogclk-frequency", SiFiveEAONState, wdogclk_freq, SIFIVE_E_LFCLK_DEFAULT_FREQ), - DEFINE_PROP_END_OF_LIST(), }; -static void sifive_e_aon_class_init(ObjectClass *oc, void *data) +static void sifive_e_aon_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - dc->reset = sifive_e_aon_reset; + device_class_set_legacy_reset(dc, sifive_e_aon_reset); device_class_set_props(dc, sifive_e_aon_properties); } diff --git a/hw/misc/sifive_test.c b/hw/misc/sifive_test.c index ad688079c41..b94bb2d29db 100644 --- a/hw/misc/sifive_test.c +++ b/hw/misc/sifive_test.c @@ -23,9 +23,9 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/misc/sifive_test.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static uint64_t sifive_test_read(void *opaque, hwaddr addr, unsigned int size) { diff --git a/hw/misc/sifive_u_otp.c b/hw/misc/sifive_u_otp.c index 8965f5c22aa..1ebed2fd8b4 100644 --- a/hw/misc/sifive_u_otp.c +++ b/hw/misc/sifive_u_otp.c @@ -27,8 +27,8 @@ #include "qemu/log.h" #include "qemu/module.h" #include "hw/misc/sifive_u_otp.h" -#include "sysemu/blockdev.h" -#include "sysemu/block-backend.h" +#include "system/blockdev.h" +#include "system/block-backend.h" #define WRITTEN_BIT_ON 0x1 @@ -194,10 +194,9 @@ static const MemoryRegionOps sifive_u_otp_ops = { } }; -static Property sifive_u_otp_properties[] = { +static const Property sifive_u_otp_properties[] = { DEFINE_PROP_UINT32("serial", SiFiveUOTPState, serial, 0), DEFINE_PROP_DRIVE("drive", SiFiveUOTPState, blk), - DEFINE_PROP_END_OF_LIST(), }; static void sifive_u_otp_realize(DeviceState *dev, Error **errp) @@ -271,7 +270,7 @@ static void sifive_u_otp_realize(DeviceState *dev, Error **errp) memset(s->fuse_wo, 0x00, sizeof(s->fuse_wo)); } -static void sifive_u_otp_class_init(ObjectClass *klass, void *data) +static void sifive_u_otp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/sifive_u_prci.c b/hw/misc/sifive_u_prci.c index 5d9d446ee86..6e75cb6d0d5 100644 --- a/hw/misc/sifive_u_prci.c +++ b/hw/misc/sifive_u_prci.c @@ -146,12 +146,12 @@ static void sifive_u_prci_reset(DeviceState *dev) s->coreclksel = SIFIVE_U_PRCI_CORECLKSEL_HFCLK; } -static void sifive_u_prci_class_init(ObjectClass *klass, void *data) +static void sifive_u_prci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sifive_u_prci_realize; - dc->reset = sifive_u_prci_reset; + device_class_set_legacy_reset(dc, sifive_u_prci_reset); } static const TypeInfo sifive_u_prci_info = { diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c index 94369e4cc88..a034df3592f 100644 --- a/hw/misc/slavio_misc.c +++ b/hw/misc/slavio_misc.c @@ -27,7 +27,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" #include "qom/object.h" @@ -483,11 +483,11 @@ static void slavio_misc_init(Object *obj) qdev_init_gpio_in(dev, slavio_set_power_fail, 1); } -static void slavio_misc_class_init(ObjectClass *klass, void *data) +static void slavio_misc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = slavio_misc_reset; + device_class_set_legacy_reset(dc, slavio_misc_reset); dc->vmsd = &vmstate_misc; } diff --git a/hw/misc/stm32_rcc.c b/hw/misc/stm32_rcc.c new file mode 100644 index 00000000000..5815b3efa54 --- /dev/null +++ b/hw/misc/stm32_rcc.c @@ -0,0 +1,162 @@ +/* + * STM32 RCC (only reset and enable registers are implemented) + * + * Copyright (c) 2024 Román Cárdenas + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32_rcc.h" + +static void stm32_rcc_reset(DeviceState *dev) +{ + STM32RccState *s = STM32_RCC(dev); + + for (int i = 0; i < STM32_RCC_NREGS; i++) { + s->regs[i] = 0; + } +} + +static uint64_t stm32_rcc_read(void *opaque, hwaddr addr, unsigned int size) +{ + STM32RccState *s = STM32_RCC(opaque); + + uint32_t value = 0; + if (addr > STM32_RCC_DCKCFGR2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + } else { + value = s->regs[addr >> 2]; + } + trace_stm32_rcc_read(addr, value); + return value; +} + +static void stm32_rcc_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32RccState *s = STM32_RCC(opaque); + uint32_t value = val64; + uint32_t prev_value, new_value, irq_offset; + + trace_stm32_rcc_write(addr, value); + + if (addr > STM32_RCC_DCKCFGR2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + switch (addr) { + case STM32_RCC_AHB1_RSTR...STM32_RCC_APB2_RSTR: + prev_value = s->regs[addr / 4]; + s->regs[addr / 4] = value; + + irq_offset = ((addr - STM32_RCC_AHB1_RSTR) / 4) * 32; + for (int i = 0; i < 32; i++) { + new_value = extract32(value, i, 1); + if (extract32(prev_value, i, 1) && !new_value) { + trace_stm32_rcc_pulse_reset(irq_offset + i, new_value); + qemu_set_irq(s->reset_irq[irq_offset + i], new_value); + } + } + return; + case STM32_RCC_AHB1_ENR...STM32_RCC_APB2_ENR: + prev_value = s->regs[addr / 4]; + s->regs[addr / 4] = value; + + irq_offset = ((addr - STM32_RCC_AHB1_ENR) / 4) * 32; + for (int i = 0; i < 32; i++) { + new_value = extract32(value, i, 1); + if (!extract32(prev_value, i, 1) && new_value) { + trace_stm32_rcc_pulse_enable(irq_offset + i, new_value); + qemu_set_irq(s->enable_irq[irq_offset + i], new_value); + } + } + return; + default: + qemu_log_mask( + LOG_UNIMP, + "%s: The RCC peripheral only supports enable and reset in QEMU\n", + __func__ + ); + s->regs[addr >> 2] = value; + } +} + +static const MemoryRegionOps stm32_rcc_ops = { + .read = stm32_rcc_read, + .write = stm32_rcc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stm32_rcc_init(Object *obj) +{ + STM32RccState *s = STM32_RCC(obj); + + memory_region_init_io(&s->mmio, obj, &stm32_rcc_ops, s, + TYPE_STM32_RCC, STM32_RCC_PERIPHERAL_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_out(DEVICE(obj), s->reset_irq, STM32_RCC_NIRQS); + qdev_init_gpio_out(DEVICE(obj), s->enable_irq, STM32_RCC_NIRQS); + + for (int i = 0; i < STM32_RCC_NIRQS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->reset_irq[i]); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->enable_irq[i]); + } +} + +static const VMStateDescription vmstate_stm32_rcc = { + .name = TYPE_STM32_RCC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, STM32RccState, STM32_RCC_NREGS), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32_rcc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_stm32_rcc; + device_class_set_legacy_reset(dc, stm32_rcc_reset); +} + +static const TypeInfo stm32_rcc_info = { + .name = TYPE_STM32_RCC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32RccState), + .instance_init = stm32_rcc_init, + .class_init = stm32_rcc_class_init, +}; + +static void stm32_rcc_register_types(void) +{ + type_register_static(&stm32_rcc_info); +} + +type_init(stm32_rcc_register_types) diff --git a/hw/misc/stm32f2xx_syscfg.c b/hw/misc/stm32f2xx_syscfg.c index 19c1e864245..d285896ea76 100644 --- a/hw/misc/stm32f2xx_syscfg.c +++ b/hw/misc/stm32f2xx_syscfg.c @@ -138,11 +138,11 @@ static void stm32f2xx_syscfg_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } -static void stm32f2xx_syscfg_class_init(ObjectClass *klass, void *data) +static void stm32f2xx_syscfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f2xx_syscfg_reset; + device_class_set_legacy_reset(dc, stm32f2xx_syscfg_reset); } static const TypeInfo stm32f2xx_syscfg_info = { diff --git a/hw/misc/stm32f4xx_exti.c b/hw/misc/stm32f4xx_exti.c index 7bd3afcd7cc..0688e6e73ec 100644 --- a/hw/misc/stm32f4xx_exti.c +++ b/hw/misc/stm32f4xx_exti.c @@ -164,11 +164,11 @@ static const VMStateDescription vmstate_stm32f4xx_exti = { } }; -static void stm32f4xx_exti_class_init(ObjectClass *klass, void *data) +static void stm32f4xx_exti_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f4xx_exti_reset; + device_class_set_legacy_reset(dc, stm32f4xx_exti_reset); dc->vmsd = &vmstate_stm32f4xx_exti; } diff --git a/hw/misc/stm32f4xx_syscfg.c b/hw/misc/stm32f4xx_syscfg.c index 854fce6a952..addfb031e88 100644 --- a/hw/misc/stm32f4xx_syscfg.c +++ b/hw/misc/stm32f4xx_syscfg.c @@ -147,11 +147,11 @@ static const VMStateDescription vmstate_stm32f4xx_syscfg = { } }; -static void stm32f4xx_syscfg_class_init(ObjectClass *klass, void *data) +static void stm32f4xx_syscfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f4xx_syscfg_reset; + device_class_set_legacy_reset(dc, stm32f4xx_syscfg_reset); dc->vmsd = &vmstate_stm32f4xx_syscfg; } diff --git a/hw/misc/stm32l4x5_exti.c b/hw/misc/stm32l4x5_exti.c index e281841dcf4..9c002164c82 100644 --- a/hw/misc/stm32l4x5_exti.c +++ b/hw/misc/stm32l4x5_exti.c @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_stm32l4x5_exti = { } }; -static void stm32l4x5_exti_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_exti_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/misc/stm32l4x5_rcc.c b/hw/misc/stm32l4x5_rcc.c index 417bd5e85f6..0e1f27fbdda 100644 --- a/hw/misc/stm32l4x5_rcc.c +++ b/hw/misc/stm32l4x5_rcc.c @@ -141,7 +141,7 @@ static const VMStateDescription clock_mux_vmstate = { } }; -static void clock_mux_class_init(ObjectClass *klass, void *data) +static void clock_mux_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -150,6 +150,8 @@ static void clock_mux_class_init(ObjectClass *klass, void *data) rc->phases.hold = clock_mux_reset_hold; rc->phases.exit = clock_mux_reset_exit; dc->vmsd = &clock_mux_vmstate; + /* Reason: Part of Stm32l4x5RccState component */ + dc->user_creatable = false; } static void clock_mux_set_enable(RccClockMuxState *mux, bool enabled) @@ -293,7 +295,7 @@ static const VMStateDescription pll_vmstate = { } }; -static void pll_class_init(ObjectClass *klass, void *data) +static void pll_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -302,6 +304,8 @@ static void pll_class_init(ObjectClass *klass, void *data) rc->phases.hold = pll_reset_hold; rc->phases.exit = pll_reset_exit; dc->vmsd = &pll_vmstate; + /* Reason: Part of Stm32l4x5RccState component */ + dc->user_creatable = false; } static void pll_set_vco_multiplier(RccPllState *pll, uint32_t vco_multiplier) @@ -543,19 +547,31 @@ static void rcc_update_cfgr_register(Stm32l4x5RccState *s) uint32_t val; /* MCOPRE */ val = FIELD_EX32(s->cfgr, CFGR, MCOPRE); - assert(val <= 0b100); - clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO], - 1, 1 << val); + if (val > 0b100) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid MCOPRE value: 0x%"PRIx32"\n", + __func__, val); + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false); + } else { + clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO], + 1, 1 << val); + } /* MCOSEL */ val = FIELD_EX32(s->cfgr, CFGR, MCOSEL); - assert(val <= 0b111); - if (val == 0) { + if (val > 0b111) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid MCOSEL value: 0x%"PRIx32"\n", + __func__, val); clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false); } else { - clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true); - clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO], - val - 1); + if (val == 0) { + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false); + } else { + clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true); + clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO], + val - 1); + } } /* STOPWUCK */ @@ -1414,17 +1430,16 @@ static void stm32l4x5_rcc_realize(DeviceState *dev, Error **errp) clock_update(s->gnd, 0); } -static Property stm32l4x5_rcc_properties[] = { +static const Property stm32l4x5_rcc_properties[] = { DEFINE_PROP_UINT64("hse_frequency", Stm32l4x5RccState, hse_frequency, HSE_DEFAULT_FRQ), DEFINE_PROP_UINT64("sai1_extclk_frequency", Stm32l4x5RccState, sai1_extclk_frequency, 0), DEFINE_PROP_UINT64("sai2_extclk_frequency", Stm32l4x5RccState, sai2_extclk_frequency, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void stm32l4x5_rcc_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_rcc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/misc/stm32l4x5_syscfg.c b/hw/misc/stm32l4x5_syscfg.c index a5a1ce26804..4e21756e0b4 100644 --- a/hw/misc/stm32l4x5_syscfg.c +++ b/hw/misc/stm32l4x5_syscfg.c @@ -26,6 +26,9 @@ #include "trace.h" #include "hw/irq.h" #include "migration/vmstate.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "qapi/error.h" #include "hw/misc/stm32l4x5_syscfg.h" #include "hw/gpio/stm32l4x5_gpio.h" @@ -225,12 +228,22 @@ static void stm32l4x5_syscfg_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), stm32l4x5_syscfg_set_irq, GPIO_NUM_PINS * NUM_GPIOS); qdev_init_gpio_out(DEVICE(obj), s->gpio_out, GPIO_NUM_PINS); + s->clk = qdev_init_clock_in(DEVICE(s), "clk", NULL, s, 0); +} + +static void stm32l4x5_syscfg_realize(DeviceState *dev, Error **errp) +{ + Stm32l4x5SyscfgState *s = STM32L4X5_SYSCFG(dev); + if (!clock_has_source(s->clk)) { + error_setg(errp, "SYSCFG: clk input must be connected"); + return; + } } static const VMStateDescription vmstate_stm32l4x5_syscfg = { .name = TYPE_STM32L4X5_SYSCFG, - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (VMStateField[]) { VMSTATE_UINT32(memrmp, Stm32l4x5SyscfgState), VMSTATE_UINT32(cfgr1, Stm32l4x5SyscfgState), @@ -241,16 +254,18 @@ static const VMStateDescription vmstate_stm32l4x5_syscfg = { VMSTATE_UINT32(swpr, Stm32l4x5SyscfgState), VMSTATE_UINT32(skr, Stm32l4x5SyscfgState), VMSTATE_UINT32(swpr2, Stm32l4x5SyscfgState), + VMSTATE_CLOCK(clk, Stm32l4x5SyscfgState), VMSTATE_END_OF_LIST() } }; -static void stm32l4x5_syscfg_class_init(ObjectClass *klass, void *data) +static void stm32l4x5_syscfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); dc->vmsd = &vmstate_stm32l4x5_syscfg; + dc->realize = stm32l4x5_syscfg_realize; rc->phases.hold = stm32l4x5_syscfg_hold_reset; } diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 1be0717c0c9..e3f64c0ff6b 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -130,13 +130,13 @@ mos6522_set_sr_int(void) "set sr_int" mos6522_write(uint64_t addr, const char *name, uint64_t val) "reg=0x%"PRIx64 " [%s] val=0x%"PRIx64 mos6522_read(uint64_t addr, const char *name, unsigned val) "reg=0x%"PRIx64 " [%s] val=0x%x" -# npcm7xx_clk.c -npcm7xx_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 -npcm7xx_clk_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +# npcm_clk.c +npcm_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm_clk_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 -# npcm7xx_gcr.c -npcm7xx_gcr_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 -npcm7xx_gcr_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +# npcm_gcr.c +npcm_gcr_read(uint64_t offset, uint64_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx64 +npcm_gcr_write(uint64_t offset, uint64_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx64 # npcm7xx_mft.c npcm7xx_mft_read(const char *name, uint64_t offset, uint16_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 @@ -156,6 +156,12 @@ npcm7xx_pwm_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0 npcm7xx_pwm_update_freq(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Freq: old_freq: %u, new_freq: %u" npcm7xx_pwm_update_duty(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Duty: old_duty: %u, new_duty: %u" +# stm32_rcc.c +stm32_rcc_read(uint64_t addr, uint64_t data) "reg read: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" +stm32_rcc_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" +stm32_rcc_pulse_enable(int line, int level) "Enable: %d to %d" +stm32_rcc_pulse_reset(int line, int level) "Reset: %d to %d" + # stm32f4xx_syscfg.c stm32f4xx_syscfg_set_irq(int gpio, int line, int level) "Interrupt: GPIO: %d, Line: %d; Level: %d" stm32f4xx_pulse_exti(int irq) "Pulse EXTI: %d" @@ -247,6 +253,12 @@ ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d" ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 +# imx6_src.c +imx6_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 +imx6_src_write(const char *reg_name, uint64_t value) "reg[%s] <= 0x%" PRIx64 +imx6_clear_reset_bit(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 +imx6_src_reset(void) "" + # imx7_src.c imx7_src_read(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 imx7_src_write(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 @@ -290,6 +302,14 @@ aspeed_peci_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" aspeed_peci_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 aspeed_peci_raise_interrupt(uint32_t ctrl, uint32_t status) "ctrl 0x%" PRIx32 " status 0x%" PRIx32 +# aspeed_hace.c +aspeed_hace_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_hace_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_hace_hash_sg(int index, uint64_t list_addr, uint64_t buf_addr, uint32_t len) "%d: list_addr 0x%" PRIx64 " buf_addr 0x%" PRIx64 " len 0x%" PRIx32 +aspeed_hace_hash_addr(const char *s, uint64_t addr) "%s: 0x%" PRIx64 +aspeed_hace_hash_execute_acc_mode(bool final_request) "final request: %d" +aspeed_hace_hexdump(const char *desc, uint32_t offset, char *s) "%s: 0x%08x: %s" + # bcm2835_property.c bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu" @@ -362,3 +382,24 @@ aspeed_sli_read(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx aspeed_sliio_write(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 aspeed_sliio_read(uint64_t offset, unsigned int size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 +# ivshmem-flat.c +ivshmem_flat_irq_handler(uint16_t vector_id) "Caught interrupt request: vector %d" +ivshmem_flat_new_peer(uint16_t peer_id) "New peer ID: %d" +ivshmem_flat_add_vector_failure(uint16_t vector_id, uint32_t vector_fd, uint16_t peer_id) "Failed to add vector %u (fd = %u) to peer ID %u, maximum number of vectors reached" +ivshmem_flat_add_vector_success(uint16_t vector_id, uint32_t vector_fd, uint16_t peer_id) "Successful addition of vector %u (fd = %u) to peer ID %u" +ivshmem_flat_irq_resolved(const char *irq_qompath) "IRQ QOM path '%s' correctly resolved" +ivshmem_flat_proto_ver_own_id(uint64_t proto_ver, uint16_t peer_id) "Protocol Version = 0x%"PRIx64", Own Peer ID = %u" +ivshmem_flat_shmem_size(int fd, uint64_t size) "Shmem fd (%d) total size is %"PRIu64" byte(s)" +ivshmem_flat_shmem_map(uint64_t addr) "Mapping shmem @ 0x%"PRIx64 +ivshmem_flat_mmio_map(uint64_t addr) "Mapping MMRs @ 0x%"PRIx64 +ivshmem_flat_read_mmr(uint64_t addr_offset) "Read access at offset %"PRIu64 +ivshmem_flat_read_mmr_doorbell(void) "DOORBELL register is write-only!" +ivshmem_flat_read_write_mmr_invalid(uint64_t addr_offset) "No ivshmem register mapped at offset %"PRIu64 +ivshmem_flat_interrupt_invalid_peer(uint16_t peer_id) "Can't interrupt non-existing peer %u" +ivshmem_flat_write_mmr(uint64_t addr_offset) "Write access at offset %"PRIu64 +ivshmem_flat_interrupt_peer(uint16_t peer_id, uint16_t vector_id) "Interrupting peer ID %u, vector %u..." + +# i2c-echo.c +i2c_echo_event(const char *id, const char *event) "%s: %s" +i2c_echo_recv(const char *id, uint8_t data) "%s: recv 0x%02" PRIx8 +i2c_echo_send(const char *id, uint8_t data) "%s: send 0x%02" PRIx8 diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c index 92b994919be..a158d4a2944 100644 --- a/hw/misc/tz-mpc.c +++ b/hw/misc/tz-mpc.c @@ -587,19 +587,18 @@ static const VMStateDescription tz_mpc_vmstate = { } }; -static Property tz_mpc_properties[] = { +static const Property tz_mpc_properties[] = { DEFINE_PROP_LINK("downstream", TZMPC, downstream, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void tz_mpc_class_init(ObjectClass *klass, void *data) +static void tz_mpc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = tz_mpc_realize; dc->vmsd = &tz_mpc_vmstate; - dc->reset = tz_mpc_reset; + device_class_set_legacy_reset(dc, tz_mpc_reset); device_class_set_props(dc, tz_mpc_properties); } @@ -612,7 +611,7 @@ static const TypeInfo tz_mpc_info = { }; static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/misc/tz-msc.c b/hw/misc/tz-msc.c index de5a3126cca..af0cc5d4718 100644 --- a/hw/misc/tz-msc.c +++ b/hw/misc/tz-msc.c @@ -278,21 +278,20 @@ static const VMStateDescription tz_msc_vmstate = { } }; -static Property tz_msc_properties[] = { +static const Property tz_msc_properties[] = { DEFINE_PROP_LINK("downstream", TZMSC, downstream, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_LINK("idau", TZMSC, idau, TYPE_IDAU_INTERFACE, IDAUInterface *), - DEFINE_PROP_END_OF_LIST(), }; -static void tz_msc_class_init(ObjectClass *klass, void *data) +static void tz_msc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = tz_msc_realize; dc->vmsd = &tz_msc_vmstate; - dc->reset = tz_msc_reset; + device_class_set_legacy_reset(dc, tz_msc_reset); device_class_set_props(dc, tz_msc_properties); } diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c index 64507787209..e4235a846d4 100644 --- a/hw/misc/tz-ppc.c +++ b/hw/misc/tz-ppc.c @@ -305,7 +305,7 @@ static const VMStateDescription tz_ppc_vmstate = { DEFINE_PROP_LINK("port[" #N "]", TZPPC, port[N].downstream, \ TYPE_MEMORY_REGION, MemoryRegion *) -static Property tz_ppc_properties[] = { +static const Property tz_ppc_properties[] = { DEFINE_PROP_UINT32("NONSEC_MASK", TZPPC, nonsec_mask, 0), DEFINE_PORT(0), DEFINE_PORT(1), @@ -323,16 +323,15 @@ static Property tz_ppc_properties[] = { DEFINE_PORT(13), DEFINE_PORT(14), DEFINE_PORT(15), - DEFINE_PROP_END_OF_LIST(), }; -static void tz_ppc_class_init(ObjectClass *klass, void *data) +static void tz_ppc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = tz_ppc_realize; dc->vmsd = &tz_ppc_vmstate; - dc->reset = tz_ppc_reset; + device_class_set_legacy_reset(dc, tz_ppc_reset); device_class_set_props(dc, tz_ppc_properties); } diff --git a/hw/misc/unimp.c b/hw/misc/unimp.c index 6cfc5727f0b..4370c14ef16 100644 --- a/hw/misc/unimp.c +++ b/hw/misc/unimp.c @@ -70,13 +70,12 @@ static void unimp_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); } -static Property unimp_properties[] = { +static const Property unimp_properties[] = { DEFINE_PROP_UINT64("size", UnimplementedDeviceState, size, 0), DEFINE_PROP_STRING("name", UnimplementedDeviceState, name), - DEFINE_PROP_END_OF_LIST(), }; -static void unimp_class_init(ObjectClass *klass, void *data) +static void unimp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index 1a6c744bac2..9f16093ca2c 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -10,7 +10,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "trace.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/misc/virt_ctrl.h" enum { @@ -125,11 +125,11 @@ static void virt_ctrl_instance_init(Object *obj) sysbus_init_irq(dev, &s->irq); } -static void virt_ctrl_class_init(ObjectClass *oc, void *data) +static void virt_ctrl_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); - dc->reset = virt_ctrl_reset; + device_class_set_legacy_reset(dc, virt_ctrl_reset); dc->realize = virt_ctrl_realize; dc->vmsd = &vmstate_virt_ctrl; } diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c index 833773ade52..9c2e9005ad3 100644 --- a/hw/misc/vmcoreinfo.c +++ b/hw/misc/vmcoreinfo.c @@ -13,22 +13,22 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/nvram/fw_cfg.h" #include "migration/vmstate.h" #include "hw/misc/vmcoreinfo.h" -static void fw_cfg_vmci_write(void *dev, off_t offset, size_t len) +static void fw_cfg_vmci_write(void *opaque, off_t offset, size_t len) { - VMCoreInfoState *s = VMCOREINFO(dev); + VMCoreInfoState *s = opaque; s->has_vmcoreinfo = offset == 0 && len == sizeof(s->vmcoreinfo) && s->vmcoreinfo.guest_format != FW_CFG_VMCOREINFO_FORMAT_NONE; } -static void vmcoreinfo_reset(void *dev) +static void vmcoreinfo_reset_hold(Object *obj, ResetType type) { - VMCoreInfoState *s = VMCOREINFO(dev); + VMCoreInfoState *s = VMCOREINFO(obj); s->has_vmcoreinfo = false; memset(&s->vmcoreinfo, 0, sizeof(s->vmcoreinfo)); @@ -47,13 +47,13 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp) */ if (!vmcoreinfo_find()) { error_setg(errp, "at most one %s device is permitted", - VMCOREINFO_DEVICE); + TYPE_VMCOREINFO); return; } if (!fw_cfg || !fw_cfg->dma_enabled) { error_setg(errp, "%s device requires fw_cfg with DMA", - VMCOREINFO_DEVICE); + TYPE_VMCOREINFO); return; } @@ -65,7 +65,7 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp) * This device requires to register a global reset because it is * not plugged to a bus (which, as its QOM parent, would reset it). */ - qemu_register_reset(vmcoreinfo_reset, dev); + qemu_register_resettable(OBJECT(s)); vmcoreinfo_state = s; } @@ -83,26 +83,25 @@ static const VMStateDescription vmstate_vmcoreinfo = { }, }; -static void vmcoreinfo_device_class_init(ObjectClass *klass, void *data) +static void vmcoreinfo_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); dc->vmsd = &vmstate_vmcoreinfo; dc->realize = vmcoreinfo_realize; dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_MISC, dc->categories); + rc->phases.hold = vmcoreinfo_reset_hold; } -static const TypeInfo vmcoreinfo_device_info = { - .name = VMCOREINFO_DEVICE, - .parent = TYPE_DEVICE, - .instance_size = sizeof(VMCoreInfoState), - .class_init = vmcoreinfo_device_class_init, +static const TypeInfo vmcoreinfo_types[] = { + { + .name = TYPE_VMCOREINFO, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VMCoreInfoState), + .class_init = vmcoreinfo_device_class_init, + } }; -static void vmcoreinfo_register_types(void) -{ - type_register_static(&vmcoreinfo_device_info); -} - -type_init(vmcoreinfo_register_types) +DEFINE_TYPES(vmcoreinfo_types) diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c index 3fc838bd54b..1ce083e2409 100644 --- a/hw/misc/xlnx-versal-cframe-reg.c +++ b/hw/misc/xlnx-versal-cframe-reg.c @@ -720,7 +720,7 @@ static const VMStateDescription vmstate_cframe_reg = { } }; -static Property cframe_regs_props[] = { +static const Property cframe_regs_props[] = { DEFINE_PROP_LINK("cfu-fdro", XlnxVersalCFrameReg, cfg.cfu_fdro, TYPE_XLNX_CFI_IF, XlnxCfiIf *), DEFINE_PROP_UINT32("blktype0-frames", XlnxVersalCFrameReg, @@ -737,7 +737,6 @@ static Property cframe_regs_props[] = { cfg.blktype_num_frames[5], 0), DEFINE_PROP_UINT32("blktype6-frames", XlnxVersalCFrameReg, cfg.blktype_num_frames[6], 0), - DEFINE_PROP_END_OF_LIST(), }; static void cframe_bcast_reg_init(Object *obj) @@ -771,7 +770,7 @@ static const VMStateDescription vmstate_cframe_bcast_reg = { } }; -static Property cframe_bcast_regs_props[] = { +static const Property cframe_bcast_regs_props[] = { DEFINE_PROP_LINK("cframe0", XlnxVersalCFrameBcastReg, cfg.cframe[0], TYPE_XLNX_CFI_IF, XlnxCfiIf *), DEFINE_PROP_LINK("cframe1", XlnxVersalCFrameBcastReg, cfg.cframe[1], @@ -802,10 +801,9 @@ static Property cframe_bcast_regs_props[] = { TYPE_XLNX_CFI_IF, XlnxCfiIf *), DEFINE_PROP_LINK("cframe14", XlnxVersalCFrameBcastReg, cfg.cframe[14], TYPE_XLNX_CFI_IF, XlnxCfiIf *), - DEFINE_PROP_END_OF_LIST(), }; -static void cframe_reg_class_init(ObjectClass *klass, void *data) +static void cframe_reg_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -819,7 +817,7 @@ static void cframe_reg_class_init(ObjectClass *klass, void *data) xcic->cfi_transfer_packet = cframe_reg_cfi_transfer_packet; } -static void cframe_bcast_reg_class_init(ObjectClass *klass, void *data) +static void cframe_bcast_reg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -835,7 +833,7 @@ static const TypeInfo cframe_reg_info = { .instance_size = sizeof(XlnxVersalCFrameReg), .class_init = cframe_reg_class_init, .instance_init = cframe_reg_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XLNX_CFI_IF }, { } } diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c index 6bb82e51c15..b920fc77c3f 100644 --- a/hw/misc/xlnx-versal-cfu.c +++ b/hw/misc/xlnx-versal-cfu.c @@ -397,6 +397,13 @@ static void cfu_fdro_init(Object *obj) fifo32_create(&s->fdro_data, 8 * KiB / sizeof(uint32_t)); } +static void cfu_fdro_finalize(Object *obj) +{ + XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj); + + fifo32_destroy(&s->fdro_data); +} + static void cfu_fdro_reset_enter(Object *obj, ResetType type) { XlnxVersalCFUFDRO *s = XLNX_VERSAL_CFU_FDRO(obj); @@ -419,7 +426,7 @@ static void cfu_fdro_cfi_transfer_packet(XlnxCfiIf *cfi_if, XlnxCfiPacket *pkt) } } -static Property cfu_props[] = { +static const Property cfu_props[] = { DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0], TYPE_XLNX_CFI_IF, XlnxCfiIf *), DEFINE_PROP_LINK("cframe1", XlnxVersalCFUAPB, cfg.cframe[1], @@ -450,13 +457,11 @@ static Property cfu_props[] = { TYPE_XLNX_CFI_IF, XlnxCfiIf *), DEFINE_PROP_LINK("cframe14", XlnxVersalCFUAPB, cfg.cframe[14], TYPE_XLNX_CFI_IF, XlnxCfiIf *), - DEFINE_PROP_END_OF_LIST(), }; -static Property cfu_sfr_props[] = { +static const Property cfu_sfr_props[] = { DEFINE_PROP_LINK("cfu", XlnxVersalCFUSFR, cfg.cfu, TYPE_XLNX_VERSAL_CFU_APB, XlnxVersalCFUAPB *), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_cfu_apb = { @@ -491,16 +496,16 @@ static const VMStateDescription vmstate_cfu_sfr = { } }; -static void cfu_apb_class_init(ObjectClass *klass, void *data) +static void cfu_apb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = cfu_apb_reset; + device_class_set_legacy_reset(dc, cfu_apb_reset); dc->vmsd = &vmstate_cfu_apb; device_class_set_props(dc, cfu_props); } -static void cfu_fdro_class_init(ObjectClass *klass, void *data) +static void cfu_fdro_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -511,7 +516,7 @@ static void cfu_fdro_class_init(ObjectClass *klass, void *data) rc->phases.enter = cfu_fdro_reset_enter; } -static void cfu_sfr_class_init(ObjectClass *klass, void *data) +static void cfu_sfr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -527,7 +532,7 @@ static const TypeInfo cfu_apb_info = { .instance_size = sizeof(XlnxVersalCFUAPB), .class_init = cfu_apb_class_init, .instance_init = cfu_apb_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XLNX_CFI_IF }, { } } @@ -539,7 +544,8 @@ static const TypeInfo cfu_fdro_info = { .instance_size = sizeof(XlnxVersalCFUFDRO), .class_init = cfu_fdro_class_init, .instance_init = cfu_fdro_init, - .interfaces = (InterfaceInfo[]) { + .instance_finalize = cfu_fdro_finalize, + .interfaces = (const InterfaceInfo[]) { { TYPE_XLNX_CFI_IF }, { } } diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c index f143900d5b4..08ff2fcc24f 100644 --- a/hw/misc/xlnx-versal-crl.c +++ b/hw/misc/xlnx-versal-crl.c @@ -394,7 +394,7 @@ static const VMStateDescription vmstate_crl = { } }; -static void crl_class_init(ObjectClass *klass, void *data) +static void crl_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c index e469c04d763..d76df468d49 100644 --- a/hw/misc/xlnx-versal-pmc-iou-slcr.c +++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c @@ -1419,7 +1419,8 @@ static const VMStateDescription vmstate_pmc_iou_slcr = { } }; -static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass, void *data) +static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/misc/xlnx-versal-trng.c b/hw/misc/xlnx-versal-trng.c index 51eb7600414..f34dd3ef352 100644 --- a/hw/misc/xlnx-versal-trng.c +++ b/hw/misc/xlnx-versal-trng.c @@ -608,9 +608,8 @@ static void trng_init(Object *obj) { XlnxVersalTRng *s = XLNX_VERSAL_TRNG(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - RegisterInfoArray *reg_array; - reg_array = + s->reg_array = register_init_block32(DEVICE(obj), trng_regs_info, ARRAY_SIZE(trng_regs_info), s->regs_info, s->regs, @@ -618,16 +617,17 @@ static void trng_init(Object *obj) XLNX_VERSAL_TRNG_ERR_DEBUG, R_MAX * 4); - sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_mmio(sbd, &s->reg_array->mem); sysbus_init_irq(sbd, &s->irq); s->prng = g_rand_new(); } -static void trng_unrealize(DeviceState *dev) +static void trng_finalize(Object *obj) { - XlnxVersalTRng *s = XLNX_VERSAL_TRNG(dev); + XlnxVersalTRng *s = XLNX_VERSAL_TRNG(obj); + register_finalize_block(s->reg_array); g_rand_free(s->prng); s->prng = NULL; } @@ -641,7 +641,7 @@ static void trng_prop_fault_event_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *events = object_field_prop_ptr(obj, prop); if (!visit_type_uint32(v, name, events, errp)) { @@ -652,7 +652,7 @@ static void trng_prop_fault_event_set(Object *obj, Visitor *v, } static const PropertyInfo trng_prop_fault_events = { - .name = "uint32:bits", + .type = "uint32", .description = "Set to trigger TRNG fault events", .set = trng_prop_fault_event_set, .realized_set_allowed = true, @@ -660,13 +660,12 @@ static const PropertyInfo trng_prop_fault_events = { static PropertyInfo trng_prop_uint64; /* to extend qdev_prop_uint64 */ -static Property trng_props[] = { - DEFINE_PROP_UINT64("forced-prng", XlnxVersalTRng, forced_prng_seed, 0), +static const Property trng_props[] = { + DEFINE_PROP_UNSIGNED("forced-prng", XlnxVersalTRng, forced_prng_seed, + 0, trng_prop_uint64, uint64_t), DEFINE_PROP_UINT32("hw-version", XlnxVersalTRng, hw_version, 0x0200), DEFINE_PROP("fips-fault-events", XlnxVersalTRng, forced_faults, trng_prop_fault_events, uint32_t), - - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_trng = { @@ -683,19 +682,17 @@ static const VMStateDescription vmstate_trng = { } }; -static void trng_class_init(ObjectClass *klass, void *data) +static void trng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); dc->vmsd = &vmstate_trng; - dc->unrealize = trng_unrealize; rc->phases.hold = trng_reset_hold; /* Clone uint64 property with set allowed after realized */ trng_prop_uint64 = qdev_prop_uint64; trng_prop_uint64.realized_set_allowed = true; - trng_props[0].info = &trng_prop_uint64; device_class_set_props(dc, trng_props); } @@ -706,6 +703,7 @@ static const TypeInfo trng_info = { .instance_size = sizeof(XlnxVersalTRng), .class_init = trng_class_init, .instance_init = trng_init, + .instance_finalize = trng_finalize, }; static void trng_register_types(void) diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c index ad839ce7e9f..07370b80c0d 100644 --- a/hw/misc/xlnx-versal-xramc.c +++ b/hw/misc/xlnx-versal-xramc.c @@ -218,12 +218,11 @@ static const VMStateDescription vmstate_xram_ctrl = { } }; -static Property xram_ctrl_properties[] = { +static const Property xram_ctrl_properties[] = { DEFINE_PROP_UINT64("size", XlnxXramCtrl, cfg.size, 1 * MiB), - DEFINE_PROP_END_OF_LIST(), }; -static void xram_ctrl_class_init(ObjectClass *klass, void *data) +static void xram_ctrl_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c index 87e4a140679..e85da32d99c 100644 --- a/hw/misc/xlnx-zynqmp-apu-ctrl.c +++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c @@ -224,7 +224,7 @@ static const VMStateDescription vmstate_zynqmp_apu = { } }; -static void zynqmp_apu_class_init(ObjectClass *klass, void *data) +static void zynqmp_apu_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c index e5aba56f691..cccca0e814e 100644 --- a/hw/misc/xlnx-zynqmp-crf.c +++ b/hw/misc/xlnx-zynqmp-crf.c @@ -239,7 +239,7 @@ static const VMStateDescription vmstate_crf = { } }; -static void crf_class_init(ObjectClass *klass, void *data) +static void crf_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c index ad814c3a79b..010387beec4 100644 --- a/hw/misc/zynq_slcr.c +++ b/hw/misc/zynq_slcr.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/log.h" @@ -623,12 +623,11 @@ static const VMStateDescription vmstate_zynq_slcr = { } }; -static Property zynq_slcr_props[] = { +static const Property zynq_slcr_props[] = { DEFINE_PROP_UINT8("boot-mode", ZynqSLCRState, boot_mode, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void zynq_slcr_class_init(ObjectClass *klass, void *data) +static void zynq_slcr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 7fcc0d7faa2..7f80218d10f 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -62,8 +62,12 @@ config VMXNET3_PCI config SMC91C111 bool +config LAN9118_PHY + bool + config LAN9118 bool + select LAN9118_PHY select PTIMER config NE2000_ISA @@ -89,6 +93,7 @@ config ALLWINNER_SUN8I_EMAC config IMX_FEC bool + select LAN9118_PHY config CADENCE bool diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index 108ae9c8535..30a81576b4c 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -30,7 +30,7 @@ #include "net/checksum.h" #include "qemu/module.h" #include "exec/cpu-common.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/net/allwinner-sun8i-emac.h" /* EMAC register offsets */ @@ -784,7 +784,7 @@ static void allwinner_sun8i_emac_set_link(NetClientState *nc) static const MemoryRegionOps allwinner_sun8i_emac_mem_ops = { .read = allwinner_sun8i_emac_read, .write = allwinner_sun8i_emac_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -829,12 +829,11 @@ static void allwinner_sun8i_emac_realize(DeviceState *dev, Error **errp) qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } -static Property allwinner_sun8i_emac_properties[] = { +static const Property allwinner_sun8i_emac_properties[] = { DEFINE_NIC_PROPERTIES(AwSun8iEmacState, conf), DEFINE_PROP_UINT8("phy-addr", AwSun8iEmacState, mii_phy_addr, 0), DEFINE_PROP_LINK("dma-memory", AwSun8iEmacState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static int allwinner_sun8i_emac_post_load(void *opaque, int version_id) @@ -876,12 +875,13 @@ static const VMStateDescription vmstate_aw_emac = { } }; -static void allwinner_sun8i_emac_class_init(ObjectClass *klass, void *data) +static void allwinner_sun8i_emac_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = allwinner_sun8i_emac_realize; - dc->reset = allwinner_sun8i_emac_reset; + device_class_set_legacy_reset(dc, allwinner_sun8i_emac_reset); dc->vmsd = &vmstate_aw_emac; device_class_set_props(dc, allwinner_sun8i_emac_properties); } diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c index d40ff37e994..77d089d9887 100644 --- a/hw/net/allwinner_emac.c +++ b/hw/net/allwinner_emac.c @@ -421,7 +421,7 @@ static void aw_emac_set_link(NetClientState *nc) static const MemoryRegionOps aw_emac_mem_ops = { .read = aw_emac_read, .write = aw_emac_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -462,10 +462,9 @@ static void aw_emac_realize(DeviceState *dev, Error **errp) fifo8_create(&s->tx_fifo[1], TX_FIFO_SIZE); } -static Property aw_emac_properties[] = { +static const Property aw_emac_properties[] = { DEFINE_NIC_PROPERTIES(AwEmacState, conf), DEFINE_PROP_UINT8("phy-addr", AwEmacState, phy_addr, 0), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_mii = { @@ -515,13 +514,13 @@ static const VMStateDescription vmstate_aw_emac = { } }; -static void aw_emac_class_init(ObjectClass *klass, void *data) +static void aw_emac_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aw_emac_realize; device_class_set_props(dc, aw_emac_properties); - dc->reset = aw_emac_reset; + device_class_set_legacy_reset(dc, aw_emac_reset); dc->vmsd = &vmstate_aw_emac; } diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index ec7bf562e57..44446666deb 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include /* For crc32 */ +#include /* for crc32 */ #include "hw/irq.h" #include "hw/net/cadence_gem.h" @@ -33,7 +33,7 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "net/checksum.h" #include "net/eth.h" @@ -909,8 +909,8 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, /* Compare A, B, C */ for (j = 0; j < 3; j++) { - uint32_t cr0, cr1, mask, compare; - uint16_t rx_cmp; + uint32_t cr0, cr1, mask, compare, disable_mask; + uint32_t rx_cmp; int offset; int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6, R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH); @@ -946,9 +946,25 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, break; } - rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset]; - mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE); - compare = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE); + disable_mask = + FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, DISABLE_MASK); + if (disable_mask) { + /* + * If disable_mask is set, mask_value is used as an + * additional 2 byte Compare Value; that is equivalent + * to using the whole cr0 register as the comparison value. + * Load 32 bits of data from rx_buf, and set mask to + * all-ones so we compare all 32 bits. + */ + rx_cmp = ldl_le_p(rxbuf_ptr + offset); + mask = 0xFFFFFFFF; + compare = cr0; + } else { + rx_cmp = lduw_le_p(rxbuf_ptr + offset); + mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE); + compare = + FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE); + } if ((rx_cmp & mask) == (compare & mask)) { matched = true; @@ -1740,6 +1756,7 @@ static void gem_realize(DeviceState *dev, Error **errp) sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); } + gem_init_register_masks(s); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_gem_info, &s->conf, @@ -1760,7 +1777,6 @@ static void gem_init(Object *obj) DB_PRINT("\n"); - gem_init_register_masks(s); memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s, "enet", sizeof(s->regs)); @@ -1784,7 +1800,7 @@ static const VMStateDescription vmstate_cadence_gem = { } }; -static Property gem_properties[] = { +static const Property gem_properties[] = { DEFINE_NIC_PROPERTIES(CadenceGEMState, conf), DEFINE_PROP_UINT32("revision", CadenceGEMState, revision, GEM_MODID_VALUE), @@ -1799,17 +1815,16 @@ static Property gem_properties[] = { jumbo_max_len, 10240), DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void gem_class_init(ObjectClass *klass, void *data) +static void gem_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = gem_realize; device_class_set_props(dc, gem_properties); dc->vmsd = &vmstate_cadence_gem; - dc->reset = gem_reset; + device_class_set_legacy_reset(dc, gem_reset); } static const TypeInfo gem_info = { diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c index bf41e6b2612..be16769de29 100644 --- a/hw/net/can/can_kvaser_pci.c +++ b/hw/net/can/can_kvaser_pci.c @@ -30,12 +30,8 @@ */ #include "qemu/osdep.h" -#include "qemu/event_notifier.h" #include "qemu/module.h" -#include "qemu/thread.h" -#include "qemu/sockets.h" #include "qapi/error.h" -#include "chardev/char.h" #include "hw/irq.h" #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" @@ -286,7 +282,7 @@ static void kvaser_pci_instance_init(Object *obj) 0); } -static void kvaser_pci_class_init(ObjectClass *klass, void *data) +static void kvaser_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -299,7 +295,7 @@ static void kvaser_pci_class_init(ObjectClass *klass, void *data) k->class_id = 0x00ff00; dc->desc = "Kvaser PCICANx"; dc->vmsd = &vmstate_kvaser_pci; - dc->reset = kvaser_pci_reset; + device_class_set_legacy_reset(dc, kvaser_pci_reset); set_bit(DEVICE_CATEGORY_MISC, dc->categories); } @@ -309,7 +305,7 @@ static const TypeInfo kvaser_pci_info = { .instance_size = sizeof(KvaserPCIState), .class_init = kvaser_pci_class_init, .instance_init = kvaser_pci_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c index 308b17e0c00..44f3ba370dc 100644 --- a/hw/net/can/can_mioe3680_pci.c +++ b/hw/net/can/can_mioe3680_pci.c @@ -26,12 +26,8 @@ */ #include "qemu/osdep.h" -#include "qemu/event_notifier.h" #include "qemu/module.h" -#include "qemu/thread.h" -#include "qemu/sockets.h" #include "qapi/error.h" -#include "chardev/char.h" #include "hw/irq.h" #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" @@ -227,7 +223,7 @@ static void mioe3680_pci_instance_init(Object *obj) 0); } -static void mioe3680_pci_class_init(ObjectClass *klass, void *data) +static void mioe3680_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -243,7 +239,7 @@ static void mioe3680_pci_class_init(ObjectClass *klass, void *data) dc->desc = "Mioe3680 PCICANx"; dc->vmsd = &vmstate_mioe3680_pci; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = mioe3680_pci_reset; + device_class_set_legacy_reset(dc, mioe3680_pci_reset); } static const TypeInfo mioe3680_pci_info = { @@ -252,7 +248,7 @@ static const TypeInfo mioe3680_pci_info = { .instance_size = sizeof(Mioe3680PCIState), .class_init = mioe3680_pci_class_init, .instance_init = mioe3680_pci_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c index e4c8d93b984..7296d63be79 100644 --- a/hw/net/can/can_pcm3680_pci.c +++ b/hw/net/can/can_pcm3680_pci.c @@ -26,12 +26,8 @@ */ #include "qemu/osdep.h" -#include "qemu/event_notifier.h" #include "qemu/module.h" -#include "qemu/thread.h" -#include "qemu/sockets.h" #include "qapi/error.h" -#include "chardev/char.h" #include "hw/irq.h" #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" @@ -228,7 +224,7 @@ static void pcm3680i_pci_instance_init(Object *obj) 0); } -static void pcm3680i_pci_class_init(ObjectClass *klass, void *data) +static void pcm3680i_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -244,7 +240,7 @@ static void pcm3680i_pci_class_init(ObjectClass *klass, void *data) dc->desc = "Pcm3680i PCICANx"; dc->vmsd = &vmstate_pcm3680i_pci; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = pcm3680i_pci_reset; + device_class_set_legacy_reset(dc, pcm3680i_pci_reset); } static const TypeInfo pcm3680i_pci_info = { @@ -253,7 +249,7 @@ static const TypeInfo pcm3680i_pci_info = { .instance_size = sizeof(Pcm3680iPCIState), .class_init = pcm3680i_pci_class_init, .instance_init = pcm3680i_pci_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 6694d7bfd84..5b6ba9df6c4 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "chardev/char.h" +#include "qemu/bitops.h" #include "hw/irq.h" #include "migration/vmstate.h" #include "net/can_emu.h" diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c index 812b83e93e1..6bd99c477b2 100644 --- a/hw/net/can/ctucan_core.c +++ b/hw/net/can/ctucan_core.c @@ -28,7 +28,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "chardev/char.h" +#include "qemu/bitops.h" #include "hw/irq.h" #include "migration/vmstate.h" #include "net/can_emu.h" @@ -399,8 +399,6 @@ void ctucan_mem_write(CtuCanCoreState *s, hwaddr addr, uint64_t val, ctucan_update_irq(s); } - - return; } uint64_t ctucan_mem_read(CtuCanCoreState *s, hwaddr addr, unsigned size) diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c index d8f7344ddc6..bed6785433e 100644 --- a/hw/net/can/ctucan_pci.c +++ b/hw/net/can/ctucan_pci.c @@ -27,12 +27,8 @@ */ #include "qemu/osdep.h" -#include "qemu/event_notifier.h" #include "qemu/module.h" -#include "qemu/thread.h" -#include "qemu/sockets.h" #include "qapi/error.h" -#include "chardev/char.h" #include "hw/irq.h" #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" @@ -241,7 +237,7 @@ static void ctucan_pci_instance_init(Object *obj) #endif } -static void ctucan_pci_class_init(ObjectClass *klass, void *data) +static void ctucan_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -257,7 +253,7 @@ static void ctucan_pci_class_init(ObjectClass *klass, void *data) dc->desc = "CTU CAN PCI"; dc->vmsd = &vmstate_ctucan_pci; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = ctucan_pci_reset; + device_class_set_legacy_reset(dc, ctucan_pci_reset); } static const TypeInfo ctucan_pci_info = { @@ -266,7 +262,7 @@ static const TypeInfo ctucan_pci_info = { .instance_size = sizeof(CtuCanPCIState), .class_init = ctucan_pci_class_init, .instance_init = ctucan_pci_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c index 5f083c21e93..3eb111949f8 100644 --- a/hw/net/can/xlnx-versal-canfd.c +++ b/hw/net/can/xlnx-versal-canfd.c @@ -678,12 +678,10 @@ REG32(RB_DW15_REGISTER_1, 0x4144) FIELD(RB_DW15_REGISTER_1, DATA_BYTES62, 8, 8) FIELD(RB_DW15_REGISTER_1, DATA_BYTES63, 0, 8) -static uint8_t canfd_dlc_array[8] = {8, 12, 16, 20, 24, 32, 48, 64}; - static void canfd_update_irq(XlnxVersalCANFDState *s) { - unsigned int irq = s->regs[R_INTERRUPT_STATUS_REGISTER] & - s->regs[R_INTERRUPT_ENABLE_REGISTER]; + const bool irq = (s->regs[R_INTERRUPT_STATUS_REGISTER] & + s->regs[R_INTERRUPT_ENABLE_REGISTER]) != 0; g_autofree char *path = object_get_canonical_path(OBJECT(s)); /* RX watermark interrupts. */ @@ -869,6 +867,10 @@ static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame, uint32_t val = 0; uint32_t dlc_reg_val = 0; uint32_t dlc_value = 0; + uint32_t id_reg_val = 0; + bool is_rtr = false; + + frame->flags = 0; /* Check that reg_num should be within TX register space. */ assert(reg_num <= R_TB_ID_REGISTER + (NUM_REGS_PER_MSG_SPACE * @@ -877,56 +879,37 @@ static void regs2frame(XlnxVersalCANFDState *s, qemu_can_frame *frame, dlc_reg_val = s->regs[reg_num + 1]; dlc_value = FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, DLC); - frame->can_id = s->regs[reg_num]; + id_reg_val = s->regs[reg_num]; + if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, IDE)) { + frame->can_id = (FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID) << 18) | + (FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID_EXT)) | + QEMU_CAN_EFF_FLAG; + if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, RTR_RRS)) { + is_rtr = true; + } + } else { + frame->can_id = FIELD_EX32(id_reg_val, TB_ID_REGISTER, ID); + if (FIELD_EX32(id_reg_val, TB_ID_REGISTER, SRR_RTR_RRS)) { + is_rtr = true; + } + } if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, FDF)) { - /* - * CANFD frame. - * Converting dlc(0 to 15) 4 Byte data to plain length(i.e. 0 to 64) - * 1 Byte data. This is done to make it work with SocketCAN. - * On actual CANFD frame, this value can't be more than 0xF. - * Conversion table for DLC to plain length: - * - * DLC Plain Length - * 0 - 8 0 - 8 - * 9 9 - 12 - * 10 13 - 16 - * 11 17 - 20 - * 12 21 - 24 - * 13 25 - 32 - * 14 33 - 48 - * 15 49 - 64 - */ - - frame->flags = QEMU_CAN_FRMF_TYPE_FD; + frame->flags |= QEMU_CAN_FRMF_TYPE_FD; - if (dlc_value < 8) { - frame->can_dlc = dlc_value; - } else { - assert((dlc_value - 8) < ARRAY_SIZE(canfd_dlc_array)); - frame->can_dlc = canfd_dlc_array[dlc_value - 8]; + if (FIELD_EX32(dlc_reg_val, TB0_DLC_REGISTER, BRS)) { + frame->flags |= QEMU_CAN_FRMF_BRS; } } else { - /* - * FD Format bit not set that means it is a CAN Frame. - * Conversion table for classic CAN: - * - * DLC Plain Length - * 0 - 7 0 - 7 - * 8 - 15 8 - */ - - if (dlc_value > 8) { - frame->can_dlc = 8; - qemu_log_mask(LOG_GUEST_ERROR, "Maximum DLC value for Classic CAN" - " frame is 8. Only 8 byte data will be sent.\n"); - } else { - frame->can_dlc = dlc_value; + if (is_rtr) { + frame->can_id |= QEMU_CAN_RTR_FLAG; } } + frame->can_dlc = can_dlc2len(dlc_value); + for (j = 0; j < frame->can_dlc; j++) { - val = 8 * i; + val = 8 * (3 - i); frame->data[j] = extract32(s->regs[reg_num + 2 + (j / 4)], val, 8); i++; @@ -948,6 +931,33 @@ static void process_cancellation_requests(XlnxVersalCANFDState *s) canfd_update_irq(s); } +static uint32_t frame_to_reg_id(const qemu_can_frame *frame) +{ + uint32_t id_reg_val = 0; + const bool is_canfd_frame = frame->flags & QEMU_CAN_FRMF_TYPE_FD; + const bool is_rtr = !is_canfd_frame && (frame->can_id & QEMU_CAN_RTR_FLAG); + + if (frame->can_id & QEMU_CAN_EFF_FLAG) { + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID, + (frame->can_id & QEMU_CAN_EFF_MASK) >> 18); + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID_EXT, + frame->can_id & QEMU_CAN_EFF_MASK); + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, IDE, 1); + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, SRR_RTR_RRS, 1); + if (is_rtr) { + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, RTR_RRS, 1); + } + } else { + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, ID, + frame->can_id & QEMU_CAN_SFF_MASK); + if (is_rtr) { + id_reg_val |= FIELD_DP32(0, RB_ID_REGISTER, SRR_RTR_RRS, 1); + } + } + + return id_reg_val; +} + static void store_rx_sequential(XlnxVersalCANFDState *s, const qemu_can_frame *frame, uint32_t fill_level, uint32_t read_index, @@ -955,7 +965,6 @@ static void store_rx_sequential(XlnxVersalCANFDState *s, bool rx_fifo_id, uint8_t filter_index) { int i; - bool is_canfd_frame; uint8_t dlc = frame->can_dlc; uint8_t rx_reg_num = 0; uint32_t dlc_reg_val = 0; @@ -999,30 +1008,21 @@ static void store_rx_sequential(XlnxVersalCANFDState *s, NUM_REGS_PER_MSG_SPACE)); } - s->regs[store_location] = frame->can_id; + s->regs[store_location] = frame_to_reg_id(frame); - dlc = frame->can_dlc; + dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, can_len2dlc(dlc)); - if (frame->flags == QEMU_CAN_FRMF_TYPE_FD) { - is_canfd_frame = true; + if (frame->flags & QEMU_CAN_FRMF_TYPE_FD) { + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, 1); - /* Store dlc value in Xilinx specific format. */ - for (i = 0; i < ARRAY_SIZE(canfd_dlc_array); i++) { - if (canfd_dlc_array[i] == frame->can_dlc) { - dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, 8 + i); - } + if (frame->flags & QEMU_CAN_FRMF_BRS) { + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, BRS, 1); } - } else { - is_canfd_frame = false; - - if (frame->can_dlc > 8) { - dlc = 8; + if (frame->flags & QEMU_CAN_FRMF_ESI) { + dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, ESI, 1); } - - dlc_reg_val = FIELD_DP32(0, RB_DLC_REGISTER, DLC, dlc); } - dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, FDF, is_canfd_frame); dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, TIMESTAMP, rx_timestamp); dlc_reg_val |= FIELD_DP32(0, RB_DLC_REGISTER, MATCHED_FILTER_INDEX, filter_index); @@ -1034,19 +1034,19 @@ static void store_rx_sequential(XlnxVersalCANFDState *s, case 0: rx_reg_num = i / 4; - data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3, + data_reg_val = FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0, frame->data[i]); break; case 1: - data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2, + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1, frame->data[i]); break; case 2: - data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES1, + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES2, frame->data[i]); break; case 3: - data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES0, + data_reg_val |= FIELD_DP32(0, RB_DW0_REGISTER, DATA_BYTES3, frame->data[i]); /* * Last Bytes data which means we have all 4 bytes ready to @@ -1090,11 +1090,12 @@ static void update_rx_sequential(XlnxVersalCANFDState *s, if (s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]) { uint32_t acceptance_filter_status = s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER]; + const uint32_t reg_id = frame_to_reg_id(frame); for (i = 0; i < 32; i++) { if (acceptance_filter_status & 0x1) { uint32_t msg_id_masked = s->regs[R_AFMR_REGISTER + 2 * i] & - frame->can_id; + reg_id; uint32_t afir_id_masked = s->regs[R_AFIR_REGISTER + 2 * i] & s->regs[R_AFMR_REGISTER + 2 * i]; uint16_t std_msg_id_masked = FIELD_EX32(msg_id_masked, @@ -1143,18 +1144,8 @@ static void update_rx_sequential(XlnxVersalCANFDState *s, read_index = ARRAY_FIELD_EX32(s->regs, RX_FIFO_STATUS_REGISTER, RI); store_index = read_index + fill_level; - if (read_index == s->cfg.rx0_fifo - 1) { - /* - * When ri is s->cfg.rx0_fifo - 1 i.e. max, it goes cyclic that - * means we reset the ri to 0x0. - */ - read_index = 0; - ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI, - read_index); - } - if (store_index > s->cfg.rx0_fifo - 1) { - store_index -= s->cfg.rx0_fifo - 1; + store_index -= s->cfg.rx0_fifo; } store_location = R_RB_ID_REGISTER + @@ -1171,18 +1162,8 @@ static void update_rx_sequential(XlnxVersalCANFDState *s, RI_1); store_index = read_index + fill_level; - if (read_index == s->cfg.rx1_fifo - 1) { - /* - * When ri is s->cfg.rx1_fifo - 1 i.e. max, it goes cyclic that - * means we reset the ri to 0x0. - */ - read_index = 0; - ARRAY_FIELD_DP32(s->regs, RX_FIFO_STATUS_REGISTER, RI_1, - read_index); - } - if (store_index > s->cfg.rx1_fifo - 1) { - store_index -= s->cfg.rx1_fifo - 1; + store_index -= s->cfg.rx1_fifo; } store_location = R_RB_ID_REGISTER_1 + @@ -1264,18 +1245,8 @@ static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid) " Discarding the message\n"); ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXEOFLW, 1); } else { - if (read_index == s->cfg.tx_fifo - 1) { - /* - * When ri is s->cfg.tx_fifo - 1 i.e. max, it goes cyclic that - * means we reset the ri to 0x0. - */ - read_index = 0; - ARRAY_FIELD_DP32(s->regs, TX_EVENT_FIFO_STATUS_REGISTER, TXE_RI, - read_index); - } - if (store_index > s->cfg.tx_fifo - 1) { - store_index -= s->cfg.tx_fifo - 1; + store_index -= s->cfg.tx_fifo; } assert(store_index < s->cfg.tx_fifo); @@ -1307,7 +1278,7 @@ static void tx_fifo_stamp(XlnxVersalCANFDState *s, uint32_t tb0_regid) } } -static gint g_cmp_ids(gconstpointer data1, gconstpointer data2) +static gint g_cmp_ids(gconstpointer data1, gconstpointer data2, gpointer d) { tx_ready_reg_info *tx_reg_1 = (tx_ready_reg_info *) data1; tx_ready_reg_info *tx_reg_2 = (tx_ready_reg_info *) data2; @@ -1327,8 +1298,6 @@ static void free_list(GSList *list) } g_slist_free(list); - - return; } static GSList *prepare_tx_data(XlnxVersalCANFDState *s) @@ -1347,7 +1316,7 @@ static GSList *prepare_tx_data(XlnxVersalCANFDState *s) temp->can_id = s->regs[reg_num]; temp->reg_num = reg_num; list = g_slist_prepend(list, temp); - list = g_slist_sort(list, g_cmp_ids); + list = g_slist_sort_with_data(list, g_cmp_ids, NULL); } reg_ready >>= 1; @@ -2071,7 +2040,7 @@ static const VMStateDescription vmstate_canfd = { } }; -static Property canfd_core_properties[] = { +static const Property canfd_core_properties[] = { DEFINE_PROP_UINT8("rx-fifo0", XlnxVersalCANFDState, cfg.rx0_fifo, 0x40), DEFINE_PROP_UINT8("rx-fifo1", XlnxVersalCANFDState, cfg.rx1_fifo, 0x40), DEFINE_PROP_UINT8("tx-fifo", XlnxVersalCANFDState, cfg.tx_fifo, 0x20), @@ -2081,14 +2050,13 @@ static Property canfd_core_properties[] = { CANFD_DEFAULT_CLOCK), DEFINE_PROP_LINK("canfdbus", XlnxVersalCANFDState, canfdbus, TYPE_CAN_BUS, CanBusState *), - DEFINE_PROP_END_OF_LIST(), }; -static void canfd_class_init(ObjectClass *klass, void *data) +static void canfd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = canfd_reset; + device_class_set_legacy_reset(dc, canfd_reset); dc->realize = canfd_realize; device_class_set_props(dc, canfd_core_properties); dc->vmsd = &vmstate_canfd; diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c index 58f1432bb35..ca9edd4a5b8 100644 --- a/hw/net/can/xlnx-zynqmp-can.c +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -1169,15 +1169,14 @@ static const VMStateDescription vmstate_can = { } }; -static Property xlnx_zynqmp_can_properties[] = { +static const Property xlnx_zynqmp_can_properties[] = { DEFINE_PROP_UINT32("ext_clk_freq", XlnxZynqMPCANState, cfg.ext_clk_freq, CAN_DEFAULT_CLOCK), DEFINE_PROP_LINK("canbus", XlnxZynqMPCANState, canbus, TYPE_CAN_BUS, CanBusState *), - DEFINE_PROP_END_OF_LIST(), }; -static void xlnx_zynqmp_can_class_init(ObjectClass *klass, void *data) +static void xlnx_zynqmp_can_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c index bf0652da1b4..d49032059bb 100644 --- a/hw/net/dp8393x.c +++ b/hw/net/dp8393x.c @@ -27,7 +27,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "qemu/timer.h" -#include +#include /* for crc32 */ #include "qom/object.h" #include "trace.h" @@ -931,22 +931,21 @@ static const VMStateDescription vmstate_dp8393x = { } }; -static Property dp8393x_properties[] = { +static const Property dp8393x_properties[] = { DEFINE_NIC_PROPERTIES(dp8393xState, conf), DEFINE_PROP_LINK("dma_mr", dp8393xState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT8("it_shift", dp8393xState, it_shift, 0), DEFINE_PROP_BOOL("big_endian", dp8393xState, big_endian, false), - DEFINE_PROP_END_OF_LIST(), }; -static void dp8393x_class_init(ObjectClass *klass, void *data) +static void dp8393x_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->realize = dp8393x_realize; - dc->reset = dp8393x_reset; + device_class_set_legacy_reset(dc, dp8393x_reset); dc->vmsd = &vmstate_dp8393x; device_class_set_props(dc, dp8393x_properties); } diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 5012b964640..a80a7b0cdb4 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -33,8 +33,8 @@ #include "net/eth.h" #include "net/net.h" #include "net/checksum.h" -#include "sysemu/sysemu.h" -#include "sysemu/dma.h" +#include "system/system.h" +#include "system/dma.h" #include "qemu/iov.h" #include "qemu/module.h" #include "qemu/range.h" @@ -127,10 +127,8 @@ struct E1000State_st { QEMUTimer *flush_queue_timer; /* Compatibility flags for migration to/from qemu 1.3.0 and older */ -#define E1000_FLAG_MAC_BIT 2 #define E1000_FLAG_TSO_BIT 3 #define E1000_FLAG_VET_BIT 4 -#define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT) #define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT) #define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT) @@ -1212,52 +1210,51 @@ enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) }; enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 }; -#define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED) /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p] * f - flag bits (up to 6 possible flags) * n - flag needed - * p - partially implenented */ + * p - partially implemented */ static const uint8_t mac_reg_access[0x8000] = { - [IPAV] = markflag(MAC), [WUC] = markflag(MAC), - [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC), - [FFVT] = markflag(MAC), [WUPM] = markflag(MAC), - [ECOL] = markflag(MAC), [MCC] = markflag(MAC), - [DC] = markflag(MAC), [TNCRS] = markflag(MAC), - [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC), - [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC), - [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC), - [WUS] = markflag(MAC), [AIT] = markflag(MAC), - [FFLT] = markflag(MAC), [FFMT] = markflag(MAC), - [SCC] = markflag(MAC), [FCRUC] = markflag(MAC), - [LATECOL] = markflag(MAC), [COLC] = markflag(MAC), - [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC), - [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC), - [RJC] = markflag(MAC), [RNBC] = markflag(MAC), - [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC), - [RUC] = markflag(MAC), [ROC] = markflag(MAC), - [GORCL] = markflag(MAC), [GORCH] = markflag(MAC), - [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC), - [BPRC] = markflag(MAC), [MPRC] = markflag(MAC), - [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC), - [PRC127] = markflag(MAC), [PRC255] = markflag(MAC), - [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC), - [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC), - [PTC127] = markflag(MAC), [PTC255] = markflag(MAC), - [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC), - [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC), - [BPTC] = markflag(MAC), - - [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL, - [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [IPAV] = MAC_ACCESS_FLAG_NEEDED, [WUC] = MAC_ACCESS_FLAG_NEEDED, + [IP6AT] = MAC_ACCESS_FLAG_NEEDED, [IP4AT] = MAC_ACCESS_FLAG_NEEDED, + [FFVT] = MAC_ACCESS_FLAG_NEEDED, [WUPM] = MAC_ACCESS_FLAG_NEEDED, + [ECOL] = MAC_ACCESS_FLAG_NEEDED, [MCC] = MAC_ACCESS_FLAG_NEEDED, + [DC] = MAC_ACCESS_FLAG_NEEDED, [TNCRS] = MAC_ACCESS_FLAG_NEEDED, + [RLEC] = MAC_ACCESS_FLAG_NEEDED, [XONRXC] = MAC_ACCESS_FLAG_NEEDED, + [XOFFTXC] = MAC_ACCESS_FLAG_NEEDED, [RFC] = MAC_ACCESS_FLAG_NEEDED, + [TSCTFC] = MAC_ACCESS_FLAG_NEEDED, [MGTPRC] = MAC_ACCESS_FLAG_NEEDED, + [WUS] = MAC_ACCESS_FLAG_NEEDED, [AIT] = MAC_ACCESS_FLAG_NEEDED, + [FFLT] = MAC_ACCESS_FLAG_NEEDED, [FFMT] = MAC_ACCESS_FLAG_NEEDED, + [SCC] = MAC_ACCESS_FLAG_NEEDED, [FCRUC] = MAC_ACCESS_FLAG_NEEDED, + [LATECOL] = MAC_ACCESS_FLAG_NEEDED, [COLC] = MAC_ACCESS_FLAG_NEEDED, + [SEQEC] = MAC_ACCESS_FLAG_NEEDED, [CEXTERR] = MAC_ACCESS_FLAG_NEEDED, + [XONTXC] = MAC_ACCESS_FLAG_NEEDED, [XOFFRXC] = MAC_ACCESS_FLAG_NEEDED, + [RJC] = MAC_ACCESS_FLAG_NEEDED, [RNBC] = MAC_ACCESS_FLAG_NEEDED, + [MGTPDC] = MAC_ACCESS_FLAG_NEEDED, [MGTPTC] = MAC_ACCESS_FLAG_NEEDED, + [RUC] = MAC_ACCESS_FLAG_NEEDED, [ROC] = MAC_ACCESS_FLAG_NEEDED, + [GORCL] = MAC_ACCESS_FLAG_NEEDED, [GORCH] = MAC_ACCESS_FLAG_NEEDED, + [GOTCL] = MAC_ACCESS_FLAG_NEEDED, [GOTCH] = MAC_ACCESS_FLAG_NEEDED, + [BPRC] = MAC_ACCESS_FLAG_NEEDED, [MPRC] = MAC_ACCESS_FLAG_NEEDED, + [TSCTC] = MAC_ACCESS_FLAG_NEEDED, [PRC64] = MAC_ACCESS_FLAG_NEEDED, + [PRC127] = MAC_ACCESS_FLAG_NEEDED, [PRC255] = MAC_ACCESS_FLAG_NEEDED, + [PRC511] = MAC_ACCESS_FLAG_NEEDED, [PRC1023] = MAC_ACCESS_FLAG_NEEDED, + [PRC1522] = MAC_ACCESS_FLAG_NEEDED, [PTC64] = MAC_ACCESS_FLAG_NEEDED, + [PTC127] = MAC_ACCESS_FLAG_NEEDED, [PTC255] = MAC_ACCESS_FLAG_NEEDED, + [PTC511] = MAC_ACCESS_FLAG_NEEDED, [PTC1023] = MAC_ACCESS_FLAG_NEEDED, + [PTC1522] = MAC_ACCESS_FLAG_NEEDED, [MPTC] = MAC_ACCESS_FLAG_NEEDED, + [BPTC] = MAC_ACCESS_FLAG_NEEDED, + + [TDFH] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [TDFT] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [TDFHS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [TDFTS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [TDFPC] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [RDFH] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [RDFT] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [RDFHS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [RDFTS] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [RDFPC] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, + [PBM] = MAC_ACCESS_FLAG_NEEDED | MAC_ACCESS_PARTIAL, }; static void @@ -1419,13 +1416,6 @@ static int e1000_tx_tso_post_load(void *opaque, int version_id) return 0; } -static bool e1000_full_mac_needed(void *opaque) -{ - E1000State *s = opaque; - - return chkflag(MAC); -} - static bool e1000_tso_state_needed(void *opaque) { E1000State *s = opaque; @@ -1451,7 +1441,6 @@ static const VMStateDescription vmstate_e1000_full_mac_state = { .name = "e1000/full_mac_state", .version_id = 1, .minimum_version_id = 1, - .needed = e1000_full_mac_needed, .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000), VMSTATE_END_OF_LIST() @@ -1677,15 +1666,12 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp) e1000_flush_queue_timer, d); } -static Property e1000_properties[] = { +static const Property e1000_properties[] = { DEFINE_NIC_PROPERTIES(E1000State, conf), - DEFINE_PROP_BIT("extra_mac_registers", E1000State, - compat_flags, E1000_FLAG_MAC_BIT, true), DEFINE_PROP_BIT("migrate_tso_props", E1000State, compat_flags, E1000_FLAG_TSO_BIT, true), DEFINE_PROP_BIT("init-vet", E1000State, compat_flags, E1000_FLAG_VET_BIT, true), - DEFINE_PROP_END_OF_LIST(), }; typedef struct E1000Info { @@ -1695,7 +1681,7 @@ typedef struct E1000Info { uint16_t phy_id2; } E1000Info; -static void e1000_class_init(ObjectClass *klass, void *data) +static void e1000_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1733,7 +1719,7 @@ static const TypeInfo e1000_base_info = { .instance_init = e1000_instance_init, .class_size = sizeof(E1000BaseClass), .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -1771,10 +1757,10 @@ static void e1000_register_types(void) type_info.name = info->name; type_info.parent = TYPE_E1000_BASE; - type_info.class_data = (void *)info; + type_info.class_data = info; type_info.class_init = e1000_class_init; - type_register(&type_info); + type_register_static(&type_info); } } diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index 843892ce093..89e6d52ba0f 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -40,7 +40,7 @@ #include "net/tap.h" #include "qemu/module.h" #include "qemu/range.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/hw.h" #include "hw/net/mii.h" #include "hw/pci/msi.h" @@ -372,8 +372,7 @@ static int e1000e_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc) { Error *local_err = NULL; - int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset, - PCI_PM_SIZEOF, &local_err); + int ret = pci_pm_init(pdev, offset, &local_err); if (local_err) { error_report_err(local_err); @@ -661,7 +660,7 @@ static PropertyInfo e1000e_prop_disable_vnet, e1000e_prop_subsys_ven, e1000e_prop_subsys; -static Property e1000e_properties[] = { +static const Property e1000e_properties[] = { DEFINE_NIC_PROPERTIES(E1000EState, conf), DEFINE_PROP_SIGNED("disable_vnet_hdr", E1000EState, disable_vnet, false, e1000e_prop_disable_vnet, bool), @@ -672,10 +671,9 @@ static Property e1000e_properties[] = { e1000e_prop_subsys, uint16_t), DEFINE_PROP_BOOL("init-vet", E1000EState, init_vet, true), DEFINE_PROP_BOOL("migrate-timadj", E1000EState, timadj, true), - DEFINE_PROP_END_OF_LIST(), }; -static void e1000e_class_init(ObjectClass *class, void *data) +static void e1000e_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); ResettableClass *rc = RESETTABLE_CLASS(class); @@ -723,7 +721,7 @@ static const TypeInfo e1000e_info = { .instance_size = sizeof(E1000EState), .class_init = e1000e_class_init, .instance_init = e1000e_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 3ae2a184d5d..24138587905 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -40,7 +40,7 @@ #include "hw/net/mii.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "net_tx_pkt.h" #include "net_rx_pkt.h" @@ -561,8 +561,7 @@ e1000e_rss_calc_hash(E1000ECore *core, type = NetPktRssIpV6Ex; break; default: - assert(false); - return 0; + g_assert_not_reached(); } return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); @@ -841,7 +840,6 @@ e1000e_ring_free_descr_num(E1000ECore *core, const E1000ERingInfo *r) } g_assert_not_reached(); - return 0; } static inline bool diff --git a/hw/net/e1000x_regs.h b/hw/net/e1000x_regs.h index cd896fc0ca6..e9a74de6f42 100644 --- a/hw/net/e1000x_regs.h +++ b/hw/net/e1000x_regs.h @@ -900,7 +900,7 @@ struct e1000_context_desc { uint16_t tucse; /* TCP checksum end */ } tcp_fields; } upper_setup; - uint32_t cmd_and_length; /* */ + uint32_t cmd_and_length; union { uint32_t data; struct { diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index d9a70c4544d..d47df5a97fd 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -6,10 +6,12 @@ * Portions of the code are copies from grub / etherboot eepro100.c * and linux e100.c. * + * SPDX-License-Identifier: GPL-2.0-or-later + * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or - * (at your option) version 3 or any later version. + * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -48,9 +50,9 @@ #include "net/net.h" #include "net/eth.h" #include "hw/nvram/eeprom93xx.h" -#include "sysemu/sysemu.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/dma.h" +#include "system/reset.h" #include "qemu/bitops.h" #include "qemu/module.h" #include "qapi/error.h" @@ -549,9 +551,7 @@ static void e100_pci_reset(EEPRO100State *s, Error **errp) if (info->power_management) { /* Power Management Capabilities */ int cfg_offset = 0xdc; - int r = pci_add_capability(&s->dev, PCI_CAP_ID_PM, - cfg_offset, PCI_PM_SIZEOF, - errp); + int r = pci_pm_init(&s->dev, cfg_offset, errp); if (r < 0) { return; } @@ -2056,12 +2056,11 @@ static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s) return eepro100_get_class_by_name(object_get_typename(OBJECT(s))); } -static Property e100_properties[] = { +static const Property e100_properties[] = { DEFINE_NIC_PROPERTIES(EEPRO100State, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void eepro100_class_init(ObjectClass *klass, void *data) +static void eepro100_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2095,12 +2094,12 @@ static void eepro100_register_types(void) type_info.class_init = eepro100_class_init; type_info.instance_size = sizeof(EEPRO100State); type_info.instance_init = eepro100_instance_init; - type_info.interfaces = (InterfaceInfo[]) { + type_info.interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }; - type_register(&type_info); + type_register_static(&type_info); } } diff --git a/hw/net/etraxfs_eth.c b/hw/net/etraxfs_eth.c deleted file mode 100644 index 5faf20c782c..00000000000 --- a/hw/net/etraxfs_eth.c +++ /dev/null @@ -1,688 +0,0 @@ -/* - * QEMU ETRAX Ethernet Controller. - * - * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "hw/sysbus.h" -#include "net/net.h" -#include "hw/cris/etraxfs.h" -#include "qemu/error-report.h" -#include "qemu/module.h" -#include "trace.h" -#include "qom/object.h" - -#define D(x) - -/* Advertisement control register. */ -#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ -#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ -#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ -#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ - -/* - * The MDIO extensions in the TDK PHY model were reversed engineered from the - * linux driver (PHYID and Diagnostics reg). - * TODO: Add friendly names for the register nums. - */ -struct qemu_phy -{ - uint32_t regs[32]; - - int link; - - unsigned int (*read)(struct qemu_phy *phy, unsigned int req); - void (*write)(struct qemu_phy *phy, unsigned int req, unsigned int data); -}; - -static unsigned int tdk_read(struct qemu_phy *phy, unsigned int req) -{ - int regnum; - unsigned r = 0; - - regnum = req & 0x1f; - - switch (regnum) { - case 1: - if (!phy->link) { - break; - } - /* MR1. */ - /* Speeds and modes. */ - r |= (1 << 13) | (1 << 14); - r |= (1 << 11) | (1 << 12); - r |= (1 << 5); /* Autoneg complete. */ - r |= (1 << 3); /* Autoneg able. */ - r |= (1 << 2); /* link. */ - break; - case 5: - /* Link partner ability. - We are kind; always agree with whatever best mode - the guest advertises. */ - r = 1 << 14; /* Success. */ - /* Copy advertised modes. */ - r |= phy->regs[4] & (15 << 5); - /* Autoneg support. */ - r |= 1; - break; - case 18: - { - /* Diagnostics reg. */ - int duplex = 0; - int speed_100 = 0; - - if (!phy->link) { - break; - } - - /* Are we advertising 100 half or 100 duplex ? */ - speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF); - speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL); - - /* Are we advertising 10 duplex or 100 duplex ? */ - duplex = !!(phy->regs[4] & ADVERTISE_100FULL); - duplex |= !!(phy->regs[4] & ADVERTISE_10FULL); - r = (speed_100 << 10) | (duplex << 11); - } - break; - - default: - r = phy->regs[regnum]; - break; - } - trace_mdio_phy_read(regnum, r); - return r; -} - -static void -tdk_write(struct qemu_phy *phy, unsigned int req, unsigned int data) -{ - int regnum; - - regnum = req & 0x1f; - trace_mdio_phy_write(regnum, data); - switch (regnum) { - default: - phy->regs[regnum] = data; - break; - } -} - -static void -tdk_reset(struct qemu_phy *phy) -{ - phy->regs[0] = 0x3100; - /* PHY Id. */ - phy->regs[2] = 0x0300; - phy->regs[3] = 0xe400; - /* Autonegotiation advertisement reg. */ - phy->regs[4] = 0x01E1; - phy->link = 1; -} - -struct qemu_mdio -{ - /* bus. */ - int mdc; - int mdio; - - /* decoder. */ - enum { - PREAMBLE, - SOF, - OPC, - ADDR, - REQ, - TURNAROUND, - DATA - } state; - unsigned int drive; - - unsigned int cnt; - unsigned int addr; - unsigned int opc; - unsigned int req; - unsigned int data; - - struct qemu_phy *devs[32]; -}; - -static void -mdio_attach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr) -{ - bus->devs[addr & 0x1f] = phy; -} - -#ifdef USE_THIS_DEAD_CODE -static void -mdio_detach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr) -{ - bus->devs[addr & 0x1f] = NULL; -} -#endif - -static void mdio_read_req(struct qemu_mdio *bus) -{ - struct qemu_phy *phy; - - phy = bus->devs[bus->addr]; - if (phy && phy->read) { - bus->data = phy->read(phy, bus->req); - } else { - bus->data = 0xffff; - } -} - -static void mdio_write_req(struct qemu_mdio *bus) -{ - struct qemu_phy *phy; - - phy = bus->devs[bus->addr]; - if (phy && phy->write) { - phy->write(phy, bus->req, bus->data); - } -} - -static void mdio_cycle(struct qemu_mdio *bus) -{ - bus->cnt++; - - trace_mdio_bitbang(bus->mdc, bus->mdio, bus->state, bus->cnt, bus->drive); -#if 0 - if (bus->mdc) { - printf("%d", bus->mdio); - } -#endif - switch (bus->state) { - case PREAMBLE: - if (bus->mdc) { - if (bus->cnt >= (32 * 2) && !bus->mdio) { - bus->cnt = 0; - bus->state = SOF; - bus->data = 0; - } - } - break; - case SOF: - if (bus->mdc) { - if (bus->mdio != 1) { - printf("WARNING: no SOF\n"); - } - if (bus->cnt == 1*2) { - bus->cnt = 0; - bus->opc = 0; - bus->state = OPC; - } - } - break; - case OPC: - if (bus->mdc) { - bus->opc <<= 1; - bus->opc |= bus->mdio & 1; - if (bus->cnt == 2*2) { - bus->cnt = 0; - bus->addr = 0; - bus->state = ADDR; - } - } - break; - case ADDR: - if (bus->mdc) { - bus->addr <<= 1; - bus->addr |= bus->mdio & 1; - - if (bus->cnt == 5*2) { - bus->cnt = 0; - bus->req = 0; - bus->state = REQ; - } - } - break; - case REQ: - if (bus->mdc) { - bus->req <<= 1; - bus->req |= bus->mdio & 1; - if (bus->cnt == 5*2) { - bus->cnt = 0; - bus->state = TURNAROUND; - } - } - break; - case TURNAROUND: - if (bus->mdc && bus->cnt == 2*2) { - bus->mdio = 0; - bus->cnt = 0; - - if (bus->opc == 2) { - bus->drive = 1; - mdio_read_req(bus); - bus->mdio = bus->data & 1; - } - bus->state = DATA; - } - break; - case DATA: - if (!bus->mdc) { - if (bus->drive) { - bus->mdio = !!(bus->data & (1 << 15)); - bus->data <<= 1; - } - } else { - if (!bus->drive) { - bus->data <<= 1; - bus->data |= bus->mdio; - } - if (bus->cnt == 16 * 2) { - bus->cnt = 0; - bus->state = PREAMBLE; - if (!bus->drive) { - mdio_write_req(bus); - } - bus->drive = 0; - } - } - break; - default: - break; - } -} - -/* ETRAX-FS Ethernet MAC block starts here. */ - -#define RW_MA0_LO 0x00 -#define RW_MA0_HI 0x01 -#define RW_MA1_LO 0x02 -#define RW_MA1_HI 0x03 -#define RW_GA_LO 0x04 -#define RW_GA_HI 0x05 -#define RW_GEN_CTRL 0x06 -#define RW_REC_CTRL 0x07 -#define RW_TR_CTRL 0x08 -#define RW_CLR_ERR 0x09 -#define RW_MGM_CTRL 0x0a -#define R_STAT 0x0b -#define FS_ETH_MAX_REGS 0x17 - -#define TYPE_ETRAX_FS_ETH "etraxfs-eth" -OBJECT_DECLARE_SIMPLE_TYPE(ETRAXFSEthState, ETRAX_FS_ETH) - -struct ETRAXFSEthState { - SysBusDevice parent_obj; - - MemoryRegion mmio; - NICState *nic; - NICConf conf; - - /* Two addrs in the filter. */ - uint8_t macaddr[2][6]; - uint32_t regs[FS_ETH_MAX_REGS]; - - struct etraxfs_dma_client *dma_out; - struct etraxfs_dma_client *dma_in; - - /* MDIO bus. */ - struct qemu_mdio mdio_bus; - unsigned int phyaddr; - int duplex_mismatch; - - /* PHY. */ - struct qemu_phy phy; -}; - -static void eth_validate_duplex(ETRAXFSEthState *eth) -{ - struct qemu_phy *phy; - unsigned int phy_duplex; - unsigned int mac_duplex; - int new_mm = 0; - - phy = eth->mdio_bus.devs[eth->phyaddr]; - phy_duplex = !!(phy->read(phy, 18) & (1 << 11)); - mac_duplex = !!(eth->regs[RW_REC_CTRL] & 128); - - if (mac_duplex != phy_duplex) { - new_mm = 1; - } - - if (eth->regs[RW_GEN_CTRL] & 1) { - if (new_mm != eth->duplex_mismatch) { - if (new_mm) { - printf("HW: WARNING ETH duplex mismatch MAC=%d PHY=%d\n", - mac_duplex, phy_duplex); - } else { - printf("HW: ETH duplex ok.\n"); - } - } - eth->duplex_mismatch = new_mm; - } -} - -static uint64_t -eth_read(void *opaque, hwaddr addr, unsigned int size) -{ - ETRAXFSEthState *eth = opaque; - uint32_t r = 0; - - addr >>= 2; - - switch (addr) { - case R_STAT: - r = eth->mdio_bus.mdio & 1; - break; - default: - r = eth->regs[addr]; - D(printf("%s %x\n", __func__, addr * 4)); - break; - } - return r; -} - -static void eth_update_ma(ETRAXFSEthState *eth, int ma) -{ - int reg; - int i = 0; - - ma &= 1; - - reg = RW_MA0_LO; - if (ma) { - reg = RW_MA1_LO; - } - - eth->macaddr[ma][i++] = eth->regs[reg]; - eth->macaddr[ma][i++] = eth->regs[reg] >> 8; - eth->macaddr[ma][i++] = eth->regs[reg] >> 16; - eth->macaddr[ma][i++] = eth->regs[reg] >> 24; - eth->macaddr[ma][i++] = eth->regs[reg + 1]; - eth->macaddr[ma][i] = eth->regs[reg + 1] >> 8; - - D(printf("set mac%d=%x.%x.%x.%x.%x.%x\n", ma, - eth->macaddr[ma][0], eth->macaddr[ma][1], - eth->macaddr[ma][2], eth->macaddr[ma][3], - eth->macaddr[ma][4], eth->macaddr[ma][5])); -} - -static void -eth_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) -{ - ETRAXFSEthState *eth = opaque; - uint32_t value = val64; - - addr >>= 2; - switch (addr) { - case RW_MA0_LO: - case RW_MA0_HI: - eth->regs[addr] = value; - eth_update_ma(eth, 0); - break; - case RW_MA1_LO: - case RW_MA1_HI: - eth->regs[addr] = value; - eth_update_ma(eth, 1); - break; - - case RW_MGM_CTRL: - /* Attach an MDIO/PHY abstraction. */ - if (value & 2) { - eth->mdio_bus.mdio = value & 1; - } - if (eth->mdio_bus.mdc != (value & 4)) { - mdio_cycle(ð->mdio_bus); - eth_validate_duplex(eth); - } - eth->mdio_bus.mdc = !!(value & 4); - eth->regs[addr] = value; - break; - - case RW_REC_CTRL: - eth->regs[addr] = value; - eth_validate_duplex(eth); - break; - - default: - eth->regs[addr] = value; - D(printf("%s %x %x\n", __func__, addr, value)); - break; - } -} - -/* The ETRAX FS has a groupt address table (GAT) which works like a k=1 bloom - filter dropping group addresses we have not joined. The filter has 64 - bits (m). The has function is a simple nible xor of the group addr. */ -static int eth_match_groupaddr(ETRAXFSEthState *eth, const unsigned char *sa) -{ - unsigned int hsh; - int m_individual = eth->regs[RW_REC_CTRL] & 4; - int match; - - /* First bit on the wire of a MAC address signals multicast or - physical address. */ - if (!m_individual && !(sa[0] & 1)) { - return 0; - } - - /* Calculate the hash index for the GA registers. */ - hsh = 0; - hsh ^= (*sa) & 0x3f; - hsh ^= ((*sa) >> 6) & 0x03; - ++sa; - hsh ^= ((*sa) << 2) & 0x03c; - hsh ^= ((*sa) >> 4) & 0xf; - ++sa; - hsh ^= ((*sa) << 4) & 0x30; - hsh ^= ((*sa) >> 2) & 0x3f; - ++sa; - hsh ^= (*sa) & 0x3f; - hsh ^= ((*sa) >> 6) & 0x03; - ++sa; - hsh ^= ((*sa) << 2) & 0x03c; - hsh ^= ((*sa) >> 4) & 0xf; - ++sa; - hsh ^= ((*sa) << 4) & 0x30; - hsh ^= ((*sa) >> 2) & 0x3f; - - hsh &= 63; - if (hsh > 31) { - match = eth->regs[RW_GA_HI] & (1 << (hsh - 32)); - } else { - match = eth->regs[RW_GA_LO] & (1 << hsh); - } - D(printf("hsh=%x ga=%x.%x mtch=%d\n", hsh, - eth->regs[RW_GA_HI], eth->regs[RW_GA_LO], match)); - return match; -} - -static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) -{ - unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - ETRAXFSEthState *eth = qemu_get_nic_opaque(nc); - int use_ma0 = eth->regs[RW_REC_CTRL] & 1; - int use_ma1 = eth->regs[RW_REC_CTRL] & 2; - int r_bcast = eth->regs[RW_REC_CTRL] & 8; - - if (size < 12) { - return -1; - } - - D(printf("%x.%x.%x.%x.%x.%x ma=%d %d bc=%d\n", - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], - use_ma0, use_ma1, r_bcast)); - - /* Does the frame get through the address filters? */ - if ((!use_ma0 || memcmp(buf, eth->macaddr[0], 6)) - && (!use_ma1 || memcmp(buf, eth->macaddr[1], 6)) - && (!r_bcast || memcmp(buf, sa_bcast, 6)) - && !eth_match_groupaddr(eth, buf)) { - return size; - } - - /* FIXME: Find another way to pass on the fake csum. */ - etraxfs_dmac_input(eth->dma_in, (void *)buf, size + 4, 1); - - return size; -} - -static int eth_tx_push(void *opaque, unsigned char *buf, int len, bool eop) -{ - ETRAXFSEthState *eth = opaque; - - D(printf("%s buf=%p len=%d\n", __func__, buf, len)); - qemu_send_packet(qemu_get_queue(eth->nic), buf, len); - return len; -} - -static void eth_set_link(NetClientState *nc) -{ - ETRAXFSEthState *eth = qemu_get_nic_opaque(nc); - D(printf("%s %d\n", __func__, nc->link_down)); - eth->phy.link = !nc->link_down; -} - -static const MemoryRegionOps eth_ops = { - .read = eth_read, - .write = eth_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static NetClientInfo net_etraxfs_info = { - .type = NET_CLIENT_DRIVER_NIC, - .size = sizeof(NICState), - .receive = eth_receive, - .link_status_changed = eth_set_link, -}; - -static void etraxfs_eth_reset(DeviceState *dev) -{ - ETRAXFSEthState *s = ETRAX_FS_ETH(dev); - - memset(s->regs, 0, sizeof(s->regs)); - memset(s->macaddr, 0, sizeof(s->macaddr)); - s->duplex_mismatch = 0; - - s->mdio_bus.mdc = 0; - s->mdio_bus.mdio = 0; - s->mdio_bus.state = 0; - s->mdio_bus.drive = 0; - s->mdio_bus.cnt = 0; - s->mdio_bus.addr = 0; - s->mdio_bus.opc = 0; - s->mdio_bus.req = 0; - s->mdio_bus.data = 0; - - tdk_reset(&s->phy); -} - -static void etraxfs_eth_realize(DeviceState *dev, Error **errp) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - ETRAXFSEthState *s = ETRAX_FS_ETH(dev); - - if (!s->dma_out || !s->dma_in) { - error_setg(errp, "Unconnected ETRAX-FS Ethernet MAC"); - return; - } - - s->dma_out->client.push = eth_tx_push; - s->dma_out->client.opaque = s; - s->dma_in->client.opaque = s; - s->dma_in->client.pull = NULL; - - memory_region_init_io(&s->mmio, OBJECT(dev), ð_ops, s, - "etraxfs-eth", 0x5c); - sysbus_init_mmio(sbd, &s->mmio); - - qemu_macaddr_default_if_unset(&s->conf.macaddr); - s->nic = qemu_new_nic(&net_etraxfs_info, &s->conf, - object_get_typename(OBJECT(s)), dev->id, - &dev->mem_reentrancy_guard, s); - qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); - - s->phy.read = tdk_read; - s->phy.write = tdk_write; - mdio_attach(&s->mdio_bus, &s->phy, s->phyaddr); -} - -static Property etraxfs_eth_properties[] = { - DEFINE_PROP_UINT32("phyaddr", ETRAXFSEthState, phyaddr, 1), - DEFINE_NIC_PROPERTIES(ETRAXFSEthState, conf), - DEFINE_PROP_END_OF_LIST(), -}; - -static void etraxfs_eth_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = etraxfs_eth_realize; - dc->reset = etraxfs_eth_reset; - device_class_set_props(dc, etraxfs_eth_properties); - /* Reason: dma_out, dma_in are not user settable */ - dc->user_creatable = false; -} - - -/* Instantiate an ETRAXFS Ethernet MAC. */ -DeviceState * -etraxfs_eth_init(hwaddr base, int phyaddr, - struct etraxfs_dma_client *dma_out, - struct etraxfs_dma_client *dma_in) -{ - DeviceState *dev; - - dev = qdev_new("etraxfs-eth"); - qemu_configure_nic_device(dev, true, "fseth"); - qdev_prop_set_uint32(dev, "phyaddr", phyaddr); - - /* - * TODO: QOM design, define a QOM interface for "I am an etraxfs - * DMA client" (which replaces the current 'struct - * etraxfs_dma_client' ad-hoc interface), implement it on the - * ethernet device, and then have QOM link properties on the DMA - * controller device so that you can pass the interface - * implementations to it. - */ - ETRAX_FS_ETH(dev)->dma_out = dma_out; - ETRAX_FS_ETH(dev)->dma_in = dma_in; - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); - - return dev; -} - -static const TypeInfo etraxfs_eth_info = { - .name = TYPE_ETRAX_FS_ETH, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(ETRAXFSEthState), - .class_init = etraxfs_eth_class_init, -}; - -static void etraxfs_eth_register_types(void) -{ - type_register_static(&etraxfs_eth_info); -} - -type_init(etraxfs_eth_register_types) diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c index 00315f305d8..846f6cbc5d9 100644 --- a/hw/net/fsl_etsec/etsec.c +++ b/hw/net/fsl_etsec/etsec.c @@ -36,7 +36,6 @@ #include "registers.h" #include "qapi/error.h" #include "qemu/log.h" -#include "qemu/module.h" /* #define HEX_DUMP */ /* #define DEBUG_REGISTER */ @@ -390,6 +389,7 @@ static void etsec_realize(DeviceState *dev, Error **errp) { eTSEC *etsec = ETSEC_COMMON(dev); + qemu_macaddr_default_if_unset(&etsec->conf.macaddr); etsec->nic = qemu_new_nic(&net_etsec_info, &etsec->conf, object_get_typename(OBJECT(dev)), dev->id, &dev->mem_reentrancy_guard, etsec); @@ -415,33 +415,29 @@ static void etsec_instance_init(Object *obj) sysbus_init_irq(sbd, &etsec->err_irq); } -static Property etsec_properties[] = { +static const Property etsec_properties[] = { DEFINE_NIC_PROPERTIES(eTSEC, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void etsec_class_init(ObjectClass *klass, void *data) +static void etsec_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = etsec_realize; - dc->reset = etsec_reset; + dc->desc = "Freescale Enhanced Three-Speed Ethernet Controller"; + device_class_set_legacy_reset(dc, etsec_reset); device_class_set_props(dc, etsec_properties); - /* Supported by ppce500 machine */ - dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } -static const TypeInfo etsec_info = { - .name = TYPE_ETSEC_COMMON, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(eTSEC), - .class_init = etsec_class_init, - .instance_init = etsec_instance_init, +static const TypeInfo etsec_types[] = { + { + .name = TYPE_ETSEC_COMMON, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, + .instance_size = sizeof(eTSEC), + .class_init = etsec_class_init, + .instance_init = etsec_instance_init, + }, }; -static void etsec_register_types(void) -{ - type_register_static(&etsec_info); -} - -type_init(etsec_register_types) +DEFINE_TYPES(etsec_types) diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c index b48d2cb57bd..4e9169907a9 100644 --- a/hw/net/fsl_etsec/miim.c +++ b/hw/net/fsl_etsec/miim.c @@ -29,13 +29,6 @@ /* #define DEBUG_MIIM */ -#define MIIM_CONTROL 0 -#define MIIM_STATUS 1 -#define MIIM_PHY_ID_1 2 -#define MIIM_PHY_ID_2 3 -#define MIIM_T2_STATUS 10 -#define MIIM_EXT_STATUS 15 - static void miim_read_cycle(eTSEC *etsec) { uint8_t phy; @@ -47,14 +40,14 @@ static void miim_read_cycle(eTSEC *etsec) addr = etsec->regs[MIIMADD].value & 0x1F; switch (addr) { - case MIIM_CONTROL: + case MII_BMCR: value = etsec->phy_control; break; - case MIIM_STATUS: + case MII_BMSR: value = etsec->phy_status; break; - case MIIM_T2_STATUS: - value = 0x1800; /* Local and remote receivers OK */ + case MII_STAT1000: + value = MII_STAT1000_LOK | MII_STAT1000_ROK; break; default: value = 0x0; @@ -84,8 +77,8 @@ static void miim_write_cycle(eTSEC *etsec) #endif switch (addr) { - case MIIM_CONTROL: - etsec->phy_control = value & ~(0x8100); + case MII_BMCR: + etsec->phy_control = value & ~(MII_BMCR_RESET | MII_BMCR_FD); break; default: break; diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index 80f9cd56d53..c41ce889cf1 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "hw/irq.h" #include "hw/net/ftgmac100.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" @@ -24,8 +24,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" -/* For crc32 */ -#include +#include /* for crc32 */ /* * FTGMAC100 registers @@ -1255,19 +1254,18 @@ static const VMStateDescription vmstate_ftgmac100 = { } }; -static Property ftgmac100_properties[] = { +static const Property ftgmac100_properties[] = { DEFINE_PROP_BOOL("aspeed", FTGMAC100State, aspeed, false), DEFINE_NIC_PROPERTIES(FTGMAC100State, conf), DEFINE_PROP_BOOL("dma64", FTGMAC100State, dma64, false), - DEFINE_PROP_END_OF_LIST(), }; -static void ftgmac100_class_init(ObjectClass *klass, void *data) +static void ftgmac100_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_ftgmac100; - dc->reset = ftgmac100_reset; + device_class_set_legacy_reset(dc, ftgmac100_reset); device_class_set_props(dc, ftgmac100_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->realize = ftgmac100_realize; @@ -1416,18 +1414,17 @@ static const VMStateDescription vmstate_aspeed_mii = { } }; -static Property aspeed_mii_properties[] = { +static const Property aspeed_mii_properties[] = { DEFINE_PROP_LINK("nic", AspeedMiiState, nic, TYPE_FTGMAC100, FTGMAC100State *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_mii_class_init(ObjectClass *klass, void *data) +static void aspeed_mii_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_aspeed_mii; - dc->reset = aspeed_mii_reset; + device_class_set_legacy_reset(dc, aspeed_mii_reset); dc->realize = aspeed_mii_realize; dc->desc = "Aspeed MII controller"; device_class_set_props(dc, aspeed_mii_properties); diff --git a/hw/net/i82596.c b/hw/net/i82596.c index 6cc8292a65a..c1ff3e6c564 100644 --- a/hw/net/i82596.c +++ b/hw/net/i82596.c @@ -5,7 +5,7 @@ * This work is licensed under the GNU GPL license version 2 or later. * * This software was written to be compatible with the specification: - * https://www.intel.com/assets/pdf/general/82596ca.pdf + * https://parisc.docs.kernel.org/en/latest/_downloads/96672be0650d9fc046bbcea40b92482f/82596CA.pdf */ #include "qemu/osdep.h" @@ -15,11 +15,11 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qemu/module.h" #include "trace.h" #include "i82596.h" -#include /* For crc32 */ +#include /* for crc32 */ #if defined(ENABLE_DEBUG) #define DBG(x) x @@ -177,6 +177,26 @@ static void set_individual_address(I82596State *s, uint32_t addr) trace_i82596_new_mac(nc->info_str); } +static void i82596_configure(I82596State *s, uint32_t addr) +{ + uint8_t byte_cnt; + byte_cnt = get_byte(addr + 8) & 0x0f; + + byte_cnt = MAX(byte_cnt, 4); + byte_cnt = MIN(byte_cnt, sizeof(s->config)); + /* copy byte_cnt max. */ + address_space_read(&address_space_memory, addr + 8, + MEMTXATTRS_UNSPECIFIED, s->config, byte_cnt); + /* config byte according to page 35ff */ + s->config[2] &= 0x82; /* mask valid bits */ + s->config[2] |= 0x40; + s->config[7] &= 0xf7; /* clear zero bit */ + assert(I596_NOCRC_INS == 0); /* do CRC insertion */ + s->config[10] = MAX(s->config[10], 5); /* min frame length */ + s->config[12] &= 0x40; /* only full duplex field valid */ + s->config[13] |= 0x3f; /* set ones in byte 13 */ +} + static void set_multicast_list(I82596State *s, uint32_t addr) { uint16_t mc_count, i; @@ -234,7 +254,6 @@ static void command_loop(I82596State *s) { uint16_t cmd; uint16_t status; - uint8_t byte_cnt; DBG(printf("STARTING COMMAND LOOP cmd_p=%08x\n", s->cmd_p)); @@ -254,20 +273,7 @@ static void command_loop(I82596State *s) set_individual_address(s, s->cmd_p); break; case CmdConfigure: - byte_cnt = get_byte(s->cmd_p + 8) & 0x0f; - byte_cnt = MAX(byte_cnt, 4); - byte_cnt = MIN(byte_cnt, sizeof(s->config)); - /* copy byte_cnt max. */ - address_space_read(&address_space_memory, s->cmd_p + 8, - MEMTXATTRS_UNSPECIFIED, s->config, byte_cnt); - /* config byte according to page 35ff */ - s->config[2] &= 0x82; /* mask valid bits */ - s->config[2] |= 0x40; - s->config[7] &= 0xf7; /* clear zero bit */ - assert(I596_NOCRC_INS == 0); /* do CRC insertion */ - s->config[10] = MAX(s->config[10], 5); /* min frame length */ - s->config[12] &= 0x40; /* only full duplex field valid */ - s->config[13] |= 0x3f; /* set ones in byte 13 */ + i82596_configure(s, s->cmd_p); break; case CmdTDR: /* get signal LINK */ @@ -282,7 +288,7 @@ static void command_loop(I82596State *s) case CmdDump: case CmdDiagnose: printf("FIXME Command %d !!\n", cmd & 7); - assert(0); + g_assert_not_reached(); } /* update status */ diff --git a/hw/net/i82596.h b/hw/net/i82596.h index f0bbe810eb2..dc1fa1a1dc0 100644 --- a/hw/net/i82596.h +++ b/hw/net/i82596.h @@ -3,8 +3,8 @@ #define I82596_IOPORT_SIZE 0x20 -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/memory.h" +#include "system/address-spaces.h" #define PORT_RESET 0x00 /* reset 82596 */ #define PORT_SELFTEST 0x01 /* selftest */ diff --git a/hw/net/igb.c b/hw/net/igb.c index b92bba402e0..e4c02365d67 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -44,7 +44,7 @@ #include "net/tap.h" #include "qemu/module.h" #include "qemu/range.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/hw.h" #include "hw/net/mii.h" #include "hw/pci/pci.h" @@ -356,8 +356,7 @@ static int igb_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc) { Error *local_err = NULL; - int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset, - PCI_PM_SIZEOF, &local_err); + int ret = pci_pm_init(pdev, offset, &local_err); if (local_err) { error_report_err(local_err); @@ -446,9 +445,13 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp) pcie_ari_init(pci_dev, 0x150); - pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF, - IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS, - IGB_VF_OFFSET, IGB_VF_STRIDE); + if (!pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF, + IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, + IGB_MAX_VF_FUNCTIONS, IGB_VF_OFFSET, IGB_VF_STRIDE, + errp)) { + igb_cleanup_msix(s); + return; + } pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH, @@ -591,13 +594,12 @@ static const VMStateDescription igb_vmstate = { } }; -static Property igb_properties[] = { +static const Property igb_properties[] = { DEFINE_NIC_PROPERTIES(IGBState, conf), DEFINE_PROP_BOOL("x-pcie-flr-init", IGBState, has_flr, true), - DEFINE_PROP_END_OF_LIST(), }; -static void igb_class_init(ObjectClass *class, void *data) +static void igb_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); ResettableClass *rc = RESETTABLE_CLASS(class); @@ -633,7 +635,7 @@ static const TypeInfo igb_info = { .instance_size = sizeof(IGBState), .class_init = igb_class_init, .instance_init = igb_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index bcd5f6cd9cd..39e3ce1c8fe 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -44,7 +44,7 @@ #include "hw/net/mii.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "net_tx_pkt.h" #include "net_rx_pkt.h" @@ -397,8 +397,7 @@ igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) type = NetPktRssIpV6Udp; break; default: - assert(false); - return 0; + g_assert_not_reached(); } return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); @@ -747,7 +746,6 @@ igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r) } g_assert_not_reached(); - return 0; } static inline bool diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index e5a47eab64b..4dc4c31da27 100644 --- a/hw/net/igb_regs.h +++ b/hw/net/igb_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * This is copied + edited from kernel header files in * drivers/net/ethernet/intel/igb diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c index 21a97d4d61d..31d72c4977d 100644 --- a/hw/net/igbvf.c +++ b/hw/net/igbvf.c @@ -299,7 +299,7 @@ static void igbvf_pci_uninit(PCIDevice *dev) msix_uninit(dev, &s->msix, &s->msix); } -static void igbvf_class_init(ObjectClass *class, void *data) +static void igbvf_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); PCIDeviceClass *c = PCI_DEVICE_CLASS(class); @@ -325,7 +325,7 @@ static const TypeInfo igbvf_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(IgbVfState), .class_init = igbvf_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index 8c91d20d44c..e5e34dd1a47 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -26,15 +26,14 @@ #include "hw/net/imx_fec.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/log.h" #include "qemu/module.h" #include "net/checksum.h" #include "net/eth.h" #include "trace.h" -/* For crc32 */ -#include +#include /* for crc32 */ #define IMX_MAX_DESC 1024 @@ -204,17 +203,12 @@ static const VMStateDescription vmstate_imx_eth_txdescs = { static const VMStateDescription vmstate_imx_eth = { .name = TYPE_IMX_FEC, - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), VMSTATE_UINT32(rx_descriptor, IMXFECState), VMSTATE_UINT32(tx_descriptor[0], IMXFECState), - VMSTATE_UINT32(phy_status, IMXFECState), - VMSTATE_UINT32(phy_control, IMXFECState), - VMSTATE_UINT32(phy_advertise, IMXFECState), - VMSTATE_UINT32(phy_int, IMXFECState), - VMSTATE_UINT32(phy_int_mask, IMXFECState), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { @@ -223,14 +217,6 @@ static const VMStateDescription vmstate_imx_eth = { }, }; -#define PHY_INT_ENERGYON (1 << 7) -#define PHY_INT_AUTONEG_COMPLETE (1 << 6) -#define PHY_INT_FAULT (1 << 5) -#define PHY_INT_DOWN (1 << 4) -#define PHY_INT_AUTONEG_LP (1 << 3) -#define PHY_INT_PARFAULT (1 << 2) -#define PHY_INT_AUTONEG_PAGE (1 << 1) - static void imx_eth_update(IMXFECState *s); /* @@ -239,47 +225,19 @@ static void imx_eth_update(IMXFECState *s); * For now we don't handle any GPIO/interrupt line, so the OS will * have to poll for the PHY status. */ -static void imx_phy_update_irq(IMXFECState *s) -{ - imx_eth_update(s); -} - -static void imx_phy_update_link(IMXFECState *s) +static void imx_phy_update_irq(void *opaque, int n, int level) { - /* Autonegotiation status mirrors link status. */ - if (qemu_get_queue(s->nic)->link_down) { - trace_imx_phy_update_link("down"); - s->phy_status &= ~0x0024; - s->phy_int |= PHY_INT_DOWN; - } else { - trace_imx_phy_update_link("up"); - s->phy_status |= 0x0024; - s->phy_int |= PHY_INT_ENERGYON; - s->phy_int |= PHY_INT_AUTONEG_COMPLETE; - } - imx_phy_update_irq(s); + imx_eth_update(opaque); } static void imx_eth_set_link(NetClientState *nc) { - imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc))); -} - -static void imx_phy_reset(IMXFECState *s) -{ - trace_imx_phy_reset(); - - s->phy_status = 0x7809; - s->phy_control = 0x3000; - s->phy_advertise = 0x01e1; - s->phy_int_mask = 0; - s->phy_int = 0; - imx_phy_update_link(s); + lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii, + nc->link_down); } static uint32_t imx_phy_read(IMXFECState *s, int reg) { - uint32_t val; uint32_t phy = reg / 32; if (!s->phy_connected) { @@ -297,54 +255,7 @@ static uint32_t imx_phy_read(IMXFECState *s, int reg) reg %= 32; - switch (reg) { - case 0: /* Basic Control */ - val = s->phy_control; - break; - case 1: /* Basic Status */ - val = s->phy_status; - break; - case 2: /* ID1 */ - val = 0x0007; - break; - case 3: /* ID2 */ - val = 0xc0d1; - break; - case 4: /* Auto-neg advertisement */ - val = s->phy_advertise; - break; - case 5: /* Auto-neg Link Partner Ability */ - val = 0x0f71; - break; - case 6: /* Auto-neg Expansion */ - val = 1; - break; - case 29: /* Interrupt source. */ - val = s->phy_int; - s->phy_int = 0; - imx_phy_update_irq(s); - break; - case 30: /* Interrupt mask */ - val = s->phy_int_mask; - break; - case 17: - case 18: - case 27: - case 31: - qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n", - TYPE_IMX_FEC, __func__, reg); - val = 0; - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", - TYPE_IMX_FEC, __func__, reg); - val = 0; - break; - } - - trace_imx_phy_read(val, phy, reg); - - return val; + return lan9118_phy_read(&s->mii, reg); } static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) @@ -366,39 +277,7 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) reg %= 32; - trace_imx_phy_write(val, phy, reg); - - switch (reg) { - case 0: /* Basic Control */ - if (val & 0x8000) { - imx_phy_reset(s); - } else { - s->phy_control = val & 0x7980; - /* Complete autonegotiation immediately. */ - if (val & 0x1000) { - s->phy_status |= 0x0020; - } - } - break; - case 4: /* Auto-neg advertisement */ - s->phy_advertise = (val & 0x2d7f) | 0x80; - break; - case 30: /* Interrupt mask */ - s->phy_int_mask = val & 0xff; - imx_phy_update_irq(s); - break; - case 17: - case 18: - case 27: - case 31: - qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n", - TYPE_IMX_FEC, __func__, reg); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", - TYPE_IMX_FEC, __func__, reg); - break; - } + lan9118_phy_write(&s->mii, reg, val); } static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) @@ -683,9 +562,6 @@ static void imx_eth_reset(DeviceState *d) s->rx_descriptor = 0; memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor)); - - /* We also reset the PHY */ - imx_phy_reset(s); } static uint32_t imx_default_read(IMXFECState *s, uint32_t index) @@ -792,7 +668,6 @@ static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value) { qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); - return; } static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value) @@ -1330,6 +1205,13 @@ static void imx_eth_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->irq[0]); sysbus_init_irq(sbd, &s->irq[1]); + qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0); + object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY); + if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq); + qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, @@ -1339,22 +1221,21 @@ static void imx_eth_realize(DeviceState *dev, Error **errp) qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } -static Property imx_eth_properties[] = { +static const Property imx_eth_properties[] = { DEFINE_NIC_PROPERTIES(IMXFECState, conf), DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0), DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true), DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC, IMXFECState *), - DEFINE_PROP_END_OF_LIST(), }; -static void imx_eth_class_init(ObjectClass *klass, void *data) +static void imx_eth_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_imx_eth; - dc->reset = imx_eth_reset; + device_class_set_legacy_reset(dc, imx_eth_reset); device_class_set_props(dc, imx_eth_properties); dc->realize = imx_eth_realize; dc->desc = "i.MX FEC/ENET Ethernet Controller"; diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index 91d81b410b5..3017e129710 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -16,14 +16,15 @@ #include "net/net.h" #include "net/eth.h" #include "hw/irq.h" +#include "hw/net/lan9118_phy.h" #include "hw/net/lan9118.h" #include "hw/ptimer.h" #include "hw/qdev-properties.h" #include "qapi/error.h" +#include "qemu/bswap.h" #include "qemu/log.h" #include "qemu/module.h" -/* For crc32 */ -#include +#include /* for crc32 */ #include "qom/object.h" //#define DEBUG_LAN9118 @@ -140,14 +141,6 @@ do { printf("lan9118: " fmt , ## __VA_ARGS__); } while (0) #define MAC_CR_RXEN 0x00000004 #define MAC_CR_RESERVED 0x7f404213 -#define PHY_INT_ENERGYON 0x80 -#define PHY_INT_AUTONEG_COMPLETE 0x40 -#define PHY_INT_FAULT 0x20 -#define PHY_INT_DOWN 0x10 -#define PHY_INT_AUTONEG_LP 0x08 -#define PHY_INT_PARFAULT 0x04 -#define PHY_INT_AUTONEG_PAGE 0x02 - #define GPT_TIMER_EN 0x20000000 /* @@ -229,11 +222,8 @@ struct lan9118_state { uint32_t mac_mii_data; uint32_t mac_flow; - uint32_t phy_status; - uint32_t phy_control; - uint32_t phy_advertise; - uint32_t phy_int; - uint32_t phy_int_mask; + Lan9118PhyState mii; + IRQState mii_irq; int32_t eeprom_writable; uint8_t eeprom[128]; @@ -275,8 +265,8 @@ struct lan9118_state { static const VMStateDescription vmstate_lan9118 = { .name = "lan9118", - .version_id = 2, - .minimum_version_id = 1, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, lan9118_state), VMSTATE_UINT32(irq_cfg, lan9118_state), @@ -302,11 +292,6 @@ static const VMStateDescription vmstate_lan9118 = { VMSTATE_UINT32(mac_mii_acc, lan9118_state), VMSTATE_UINT32(mac_mii_data, lan9118_state), VMSTATE_UINT32(mac_flow, lan9118_state), - VMSTATE_UINT32(phy_status, lan9118_state), - VMSTATE_UINT32(phy_control, lan9118_state), - VMSTATE_UINT32(phy_advertise, lan9118_state), - VMSTATE_UINT32(phy_int, lan9118_state), - VMSTATE_UINT32(phy_int_mask, lan9118_state), VMSTATE_INT32(eeprom_writable, lan9118_state), VMSTATE_UINT8_ARRAY(eeprom, lan9118_state, 128), VMSTATE_INT32(tx_fifo_size, lan9118_state), @@ -386,9 +371,11 @@ static void lan9118_reload_eeprom(lan9118_state *s) lan9118_mac_changed(s); } -static void phy_update_irq(lan9118_state *s) +static void lan9118_update_irq(void *opaque, int n, int level) { - if (s->phy_int & s->phy_int_mask) { + lan9118_state *s = opaque; + + if (level) { s->int_sts |= PHY_INT; } else { s->int_sts &= ~PHY_INT; @@ -396,33 +383,10 @@ static void phy_update_irq(lan9118_state *s) lan9118_update(s); } -static void phy_update_link(lan9118_state *s) -{ - /* Autonegotiation status mirrors link status. */ - if (qemu_get_queue(s->nic)->link_down) { - s->phy_status &= ~0x0024; - s->phy_int |= PHY_INT_DOWN; - } else { - s->phy_status |= 0x0024; - s->phy_int |= PHY_INT_ENERGYON; - s->phy_int |= PHY_INT_AUTONEG_COMPLETE; - } - phy_update_irq(s); -} - static void lan9118_set_link(NetClientState *nc) { - phy_update_link(qemu_get_nic_opaque(nc)); -} - -static void phy_reset(lan9118_state *s) -{ - s->phy_status = 0x7809; - s->phy_control = 0x3000; - s->phy_advertise = 0x01e1; - s->phy_int_mask = 0; - s->phy_int = 0; - phy_update_link(s); + lan9118_phy_update_link(&LAN9118(qemu_get_nic_opaque(nc))->mii, + nc->link_down); } static void lan9118_reset(DeviceState *d) @@ -479,8 +443,6 @@ static void lan9118_reset(DeviceState *d) s->read_word_n = 0; s->write_word_n = 0; - phy_reset(s); - s->eeprom_writable = 0; lan9118_reload_eeprom(s); } @@ -679,7 +641,7 @@ static void do_tx_packet(lan9118_state *s) uint32_t status; /* FIXME: Honor TX disable, and allow queueing of packets. */ - if (s->phy_control & 0x4000) { + if (s->mii.control & 0x4000) { /* This assumes the receive routine doesn't touch the VLANClient. */ qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len); } else { @@ -835,68 +797,6 @@ static void tx_fifo_push(lan9118_state *s, uint32_t val) } } -static uint32_t do_phy_read(lan9118_state *s, int reg) -{ - uint32_t val; - - switch (reg) { - case 0: /* Basic Control */ - return s->phy_control; - case 1: /* Basic Status */ - return s->phy_status; - case 2: /* ID1 */ - return 0x0007; - case 3: /* ID2 */ - return 0xc0d1; - case 4: /* Auto-neg advertisement */ - return s->phy_advertise; - case 5: /* Auto-neg Link Partner Ability */ - return 0x0f71; - case 6: /* Auto-neg Expansion */ - return 1; - /* TODO 17, 18, 27, 29, 30, 31 */ - case 29: /* Interrupt source. */ - val = s->phy_int; - s->phy_int = 0; - phy_update_irq(s); - return val; - case 30: /* Interrupt mask */ - return s->phy_int_mask; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "do_phy_read: PHY read reg %d\n", reg); - return 0; - } -} - -static void do_phy_write(lan9118_state *s, int reg, uint32_t val) -{ - switch (reg) { - case 0: /* Basic Control */ - if (val & 0x8000) { - phy_reset(s); - break; - } - s->phy_control = val & 0x7980; - /* Complete autonegotiation immediately. */ - if (val & 0x1000) { - s->phy_status |= 0x0020; - } - break; - case 4: /* Auto-neg advertisement */ - s->phy_advertise = (val & 0x2d7f) | 0x80; - break; - /* TODO 17, 18, 27, 31 */ - case 30: /* Interrupt mask */ - s->phy_int_mask = val & 0xff; - phy_update_irq(s); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "do_phy_write: PHY write reg %d = 0x%04x\n", reg, val); - } -} - static void do_mac_write(lan9118_state *s, int reg, uint32_t val) { switch (reg) { @@ -930,9 +830,9 @@ static void do_mac_write(lan9118_state *s, int reg, uint32_t val) if (val & 2) { DPRINTF("PHY write %d = 0x%04x\n", (val >> 6) & 0x1f, s->mac_mii_data); - do_phy_write(s, (val >> 6) & 0x1f, s->mac_mii_data); + lan9118_phy_write(&s->mii, (val >> 6) & 0x1f, s->mac_mii_data); } else { - s->mac_mii_data = do_phy_read(s, (val >> 6) & 0x1f); + s->mac_mii_data = lan9118_phy_read(&s->mii, (val >> 6) & 0x1f); DPRINTF("PHY read %d = 0x%04x\n", (val >> 6) & 0x1f, s->mac_mii_data); } @@ -1127,7 +1027,7 @@ static void lan9118_writel(void *opaque, hwaddr offset, break; case CSR_PMT_CTRL: if (val & 0x400) { - phy_reset(s); + lan9118_phy_reset(&s->mii); } s->pmt_ctrl &= ~0x34e; s->pmt_ctrl |= (val & 0x34e); @@ -1374,6 +1274,13 @@ static void lan9118_realize(DeviceState *dev, Error **errp) const MemoryRegionOps *mem_ops = s->mode_16bit ? &lan9118_16bit_mem_ops : &lan9118_mem_ops; + qemu_init_irq(&s->mii_irq, lan9118_update_irq, s, 0); + object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY); + if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq); + memory_region_init_io(&s->mmio, OBJECT(dev), mem_ops, s, "lan9118-mmio", 0x100); sysbus_init_mmio(sbd, &s->mmio); @@ -1398,17 +1305,16 @@ static void lan9118_realize(DeviceState *dev, Error **errp) ptimer_transaction_commit(s->timer); } -static Property lan9118_properties[] = { +static const Property lan9118_properties[] = { DEFINE_NIC_PROPERTIES(lan9118_state, conf), DEFINE_PROP_UINT32("mode_16bit", lan9118_state, mode_16bit, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void lan9118_class_init(ObjectClass *klass, void *data) +static void lan9118_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = lan9118_reset; + device_class_set_legacy_reset(dc, lan9118_reset); device_class_set_props(dc, lan9118_properties); dc->vmsd = &vmstate_lan9118; dc->realize = lan9118_realize; diff --git a/hw/net/lan9118_phy.c b/hw/net/lan9118_phy.c new file mode 100644 index 00000000000..4c4e03df115 --- /dev/null +++ b/hw/net/lan9118_phy.c @@ -0,0 +1,222 @@ +/* + * SMSC LAN9118 PHY emulation + * + * Copyright (c) 2009 CodeSourcery, LLC. + * Written by Paul Brook + * + * Copyright (c) 2013 Jean-Christophe Dubois. + * + * This code is licensed under the GNU GPL v2 + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/net/lan9118_phy.h" +#include "hw/net/mii.h" +#include "hw/irq.h" +#include "hw/resettable.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "trace.h" + +#define PHY_INT_ENERGYON (1 << 7) +#define PHY_INT_AUTONEG_COMPLETE (1 << 6) +#define PHY_INT_FAULT (1 << 5) +#define PHY_INT_DOWN (1 << 4) +#define PHY_INT_AUTONEG_LP (1 << 3) +#define PHY_INT_PARFAULT (1 << 2) +#define PHY_INT_AUTONEG_PAGE (1 << 1) + +static void lan9118_phy_update_irq(Lan9118PhyState *s) +{ + qemu_set_irq(s->irq, !!(s->ints & s->int_mask)); +} + +uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg) +{ + uint16_t val; + + switch (reg) { + case MII_BMCR: + val = s->control; + break; + case MII_BMSR: + val = s->status; + break; + case MII_PHYID1: + val = SMSCLAN9118_PHYID1; + break; + case MII_PHYID2: + val = SMSCLAN9118_PHYID2; + break; + case MII_ANAR: + val = s->advertise; + break; + case MII_ANLPAR: + val = MII_ANLPAR_PAUSEASY | MII_ANLPAR_PAUSE | MII_ANLPAR_T4 | + MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD | + MII_ANLPAR_10 | MII_ANLPAR_CSMACD; + break; + case MII_ANER: + val = MII_ANER_NWAY; + break; + case 29: /* Interrupt source. */ + val = s->ints; + s->ints = 0; + lan9118_phy_update_irq(s); + break; + case 30: /* Interrupt mask */ + val = s->int_mask; + break; + case 17: + case 18: + case 27: + case 31: + qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n", + __func__, reg); + val = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n", + __func__, reg); + val = 0; + break; + } + + trace_lan9118_phy_read(val, reg); + + return val; +} + +void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val) +{ + trace_lan9118_phy_write(val, reg); + + switch (reg) { + case MII_BMCR: + if (val & MII_BMCR_RESET) { + lan9118_phy_reset(s); + } else { + s->control = val & (MII_BMCR_LOOPBACK | MII_BMCR_SPEED100 | + MII_BMCR_AUTOEN | MII_BMCR_PDOWN | MII_BMCR_FD | + MII_BMCR_CTST); + /* Complete autonegotiation immediately. */ + if (val & MII_BMCR_AUTOEN) { + s->status |= MII_BMSR_AN_COMP; + } + } + break; + case MII_ANAR: + s->advertise = (val & (MII_ANAR_RFAULT | MII_ANAR_PAUSE_ASYM | + MII_ANAR_PAUSE | MII_ANAR_TXFD | MII_ANAR_10FD | + MII_ANAR_10 | MII_ANAR_SELECT)) + | MII_ANAR_TX; + break; + case 30: /* Interrupt mask */ + s->int_mask = val & 0xff; + lan9118_phy_update_irq(s); + break; + case 17: + case 18: + case 27: + case 31: + qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n", + __func__, reg); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n", + __func__, reg); + break; + } +} + +void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down) +{ + s->link_down = link_down; + + /* Autonegotiation status mirrors link status. */ + if (link_down) { + trace_lan9118_phy_update_link("down"); + s->status &= ~(MII_BMSR_AN_COMP | MII_BMSR_LINK_ST); + s->ints |= PHY_INT_DOWN; + } else { + trace_lan9118_phy_update_link("up"); + s->status |= MII_BMSR_AN_COMP | MII_BMSR_LINK_ST; + s->ints |= PHY_INT_ENERGYON; + s->ints |= PHY_INT_AUTONEG_COMPLETE; + } + lan9118_phy_update_irq(s); +} + +void lan9118_phy_reset(Lan9118PhyState *s) +{ + trace_lan9118_phy_reset(); + + s->control = MII_BMCR_AUTOEN | MII_BMCR_SPEED100; + s->status = MII_BMSR_100TX_FD + | MII_BMSR_100TX_HD + | MII_BMSR_10T_FD + | MII_BMSR_10T_HD + | MII_BMSR_AUTONEG + | MII_BMSR_EXTCAP; + s->advertise = MII_ANAR_TXFD + | MII_ANAR_TX + | MII_ANAR_10FD + | MII_ANAR_10 + | MII_ANAR_CSMACD; + s->int_mask = 0; + s->ints = 0; + lan9118_phy_update_link(s, s->link_down); +} + +static void lan9118_phy_reset_hold(Object *obj, ResetType type) +{ + Lan9118PhyState *s = LAN9118_PHY(obj); + + lan9118_phy_reset(s); +} + +static void lan9118_phy_init(Object *obj) +{ + Lan9118PhyState *s = LAN9118_PHY(obj); + + qdev_init_gpio_out(DEVICE(s), &s->irq, 1); +} + +static const VMStateDescription vmstate_lan9118_phy = { + .name = "lan9118-phy", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT16(status, Lan9118PhyState), + VMSTATE_UINT16(control, Lan9118PhyState), + VMSTATE_UINT16(advertise, Lan9118PhyState), + VMSTATE_UINT16(ints, Lan9118PhyState), + VMSTATE_UINT16(int_mask, Lan9118PhyState), + VMSTATE_BOOL(link_down, Lan9118PhyState), + VMSTATE_END_OF_LIST() + } +}; + +static void lan9118_phy_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + rc->phases.hold = lan9118_phy_reset_hold; + dc->vmsd = &vmstate_lan9118_phy; +} + +static const TypeInfo types[] = { + { + .name = TYPE_LAN9118_PHY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Lan9118PhyState), + .instance_init = lan9118_phy_init, + .class_init = lan9118_phy_class_init, + } +}; + +DEFINE_TYPES(types) diff --git a/hw/net/lance.c b/hw/net/lance.c index e1ed24c2cea..dfb855c23a4 100644 --- a/hw/net/lance.c +++ b/hw/net/lance.c @@ -43,7 +43,7 @@ #include "hw/net/lance.h" #include "hw/qdev-properties.h" #include "trace.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static void parent_lance_reset(void *opaque, int irq, int level) @@ -137,21 +137,20 @@ static void lance_instance_init(Object *obj) DEVICE(obj)); } -static Property lance_properties[] = { +static const Property lance_properties[] = { DEFINE_PROP_LINK("dma", SysBusPCNetState, state.dma_opaque, TYPE_DEVICE, DeviceState *), DEFINE_NIC_PROPERTIES(SysBusPCNetState, state.conf), - DEFINE_PROP_END_OF_LIST(), }; -static void lance_class_init(ObjectClass *klass, void *data) +static void lance_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = lance_realize; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->fw_name = "ethernet"; - dc->reset = lance_reset; + device_class_set_legacy_reset(dc, lance_reset); dc->vmsd = &vmstate_lance; device_class_set_props(dc, lance_properties); } diff --git a/hw/net/lasi_i82596.c b/hw/net/lasi_i82596.c index fcf7fae9411..9e1dd215467 100644 --- a/hw/net/lasi_i82596.c +++ b/hw/net/lasi_i82596.c @@ -14,7 +14,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "hw/sysbus.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "net/eth.h" #include "hw/net/lasi_82596.h" #include "hw/net/i82596.h" @@ -158,19 +158,18 @@ static void lasi_82596_instance_init(Object *obj) DEVICE(obj)); } -static Property lasi_82596_properties[] = { +static const Property lasi_82596_properties[] = { DEFINE_NIC_PROPERTIES(SysBusI82596State, state.conf), - DEFINE_PROP_END_OF_LIST(), }; -static void lasi_82596_class_init(ObjectClass *klass, void *data) +static void lasi_82596_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = lasi_82596_realize; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->fw_name = "ethernet"; - dc->reset = lasi_82596_reset; + device_class_set_legacy_reset(dc, lasi_82596_reset); dc->vmsd = &vmstate_lasi_82596; dc->user_creatable = false; device_class_set_props(dc, lasi_82596_properties); diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c index e6902716bd2..ae128fa3114 100644 --- a/hw/net/mcf_fec.c +++ b/hw/net/mcf_fec.c @@ -16,8 +16,7 @@ #include "hw/net/mii.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" -/* For crc32 */ -#include +#include /* for crc32 */ //#define DEBUG_FEC 1 @@ -661,19 +660,18 @@ static void mcf_fec_instance_init(Object *obj) } } -static Property mcf_fec_properties[] = { +static const Property mcf_fec_properties[] = { DEFINE_NIC_PROPERTIES(mcf_fec_state, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void mcf_fec_class_init(ObjectClass *oc, void *data) +static void mcf_fec_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->realize = mcf_fec_realize; dc->desc = "MCF Fast Ethernet Controller network device"; - dc->reset = mcf_fec_reset; + device_class_set_legacy_reset(dc, mcf_fec_reset); device_class_set_props(dc, mcf_fec_properties); } diff --git a/hw/net/meson.build b/hw/net/meson.build index b7426870e8d..e6759e26ca6 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -19,6 +19,7 @@ system_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c')) system_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c')) system_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c')) +system_ss.add(when: 'CONFIG_LAN9118_PHY', if_true: files('lan9118_phy.c')) system_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c')) system_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c')) system_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c')) @@ -39,8 +40,8 @@ system_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c')) system_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c')) system_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c', 'npcm_gmac.c')) +system_ss.add(when: 'CONFIG_NPCM8XX', if_true: files('npcm_pcs.c')) -system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_llan.c')) system_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c index df5101aed73..583aa1c7de6 100644 --- a/hw/net/mipsnet.c +++ b/hw/net/mipsnet.c @@ -266,19 +266,18 @@ static void mipsnet_sysbus_reset(DeviceState *dev) mipsnet_reset(s); } -static Property mipsnet_properties[] = { +static const Property mipsnet_properties[] = { DEFINE_NIC_PROPERTIES(MIPSnetState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void mipsnet_class_init(ObjectClass *klass, void *data) +static void mipsnet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mipsnet_realize; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->desc = "MIPS Simulator network device"; - dc->reset = mipsnet_sysbus_reset; + device_class_set_legacy_reset(dc, mipsnet_sysbus_reset); dc->vmsd = &vmstate_mipsnet; device_class_set_props(dc, mipsnet_properties); } diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index c1fc10de2ab..59045973ab9 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -546,11 +546,10 @@ static void msf2_emac_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } -static Property msf2_emac_properties[] = { +static const Property msf2_emac_properties[] = { DEFINE_PROP_LINK("ahb-bus", MSF2EmacState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_NIC_PROPERTIES(MSF2EmacState, conf), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_msf2_emac = { @@ -566,12 +565,12 @@ static const VMStateDescription vmstate_msf2_emac = { } }; -static void msf2_emac_class_init(ObjectClass *klass, void *data) +static void msf2_emac_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = msf2_emac_realize; - dc->reset = msf2_emac_reset; + device_class_set_legacy_reset(dc, msf2_emac_reset); dc->vmsd = &vmstate_msf2_emac; device_class_set_props(dc, msf2_emac_properties); } diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c index 96c65f4d462..6f08846c81c 100644 --- a/hw/net/mv88w8618_eth.c +++ b/hw/net/mv88w8618_eth.c @@ -12,7 +12,7 @@ #include "hw/irq.h" #include "hw/net/mv88w8618_eth.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "net/net.h" #define MP_ETH_SIZE 0x00001000 @@ -371,14 +371,13 @@ static const VMStateDescription mv88w8618_eth_vmsd = { } }; -static Property mv88w8618_eth_properties[] = { +static const Property mv88w8618_eth_properties[] = { DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf), DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void mv88w8618_eth_class_init(ObjectClass *klass, void *data) +static void mv88w8618_eth_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c index 26980e087ee..673c785abc9 100644 --- a/hw/net/ne2000-isa.c +++ b/hw/net/ne2000-isa.c @@ -27,7 +27,7 @@ #include "hw/net/ne2000-isa.h" #include "migration/vmstate.h" #include "ne2000.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/module.h" @@ -79,14 +79,13 @@ static void isa_ne2000_realizefn(DeviceState *dev, Error **errp) qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); } -static Property ne2000_isa_properties[] = { +static const Property ne2000_isa_properties[] = { DEFINE_PROP_UINT32("iobase", ISANE2000State, iobase, 0x300), DEFINE_PROP_UINT32("irq", ISANE2000State, isairq, 9), DEFINE_NIC_PROPERTIES(ISANE2000State, ne2000.c), - DEFINE_PROP_END_OF_LIST(), }; -static void isa_ne2000_class_initfn(ObjectClass *klass, void *data) +static void isa_ne2000_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c index 74773069c69..ce937e1b618 100644 --- a/hw/net/ne2000-pci.c +++ b/hw/net/ne2000-pci.c @@ -28,7 +28,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "ne2000.h" -#include "sysemu/sysemu.h" +#include "system/system.h" typedef struct PCINE2000State { PCIDevice dev; @@ -96,12 +96,11 @@ static void ne2000_instance_init(Object *obj) &pci_dev->qdev); } -static Property ne2000_properties[] = { +static const Property ne2000_properties[] = { DEFINE_NIC_PROPERTIES(PCINE2000State, ne2000.c), - DEFINE_PROP_END_OF_LIST(), }; -static void ne2000_class_init(ObjectClass *klass, void *data) +static void ne2000_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -123,7 +122,7 @@ static const TypeInfo ne2000_info = { .instance_size = sizeof(PCINE2000State), .class_init = ne2000_class_init, .instance_init = ne2000_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c index b482c5f3af7..b1923c8c3e1 100644 --- a/hw/net/ne2000.c +++ b/hw/net/ne2000.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "net/eth.h" #include "qemu/module.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/irq.h" #include "migration/vmstate.h" #include "ne2000.h" diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c index 32e5f3f9cf7..f87b6f046cc 100644 --- a/hw/net/net_rx_pkt.c +++ b/hw/net/net_rx_pkt.c @@ -209,12 +209,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, *l4hdr_proto = pkt->l4hdr_info.proto; } -size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt) -{ - assert(pkt); - return pkt->l3hdr_off; -} - size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt) { assert(pkt); @@ -375,8 +369,7 @@ net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt, _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); break; default: - assert(false); - break; + g_assert_not_reached(); } net_toeplitz_key_init(&key_data, key); @@ -427,13 +420,6 @@ struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt) return pkt->vec; } -uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt) -{ - assert(pkt); - - return pkt->vec_len; -} - void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt, struct virtio_net_hdr *vhdr) { diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h index 55ec67a1a7b..ea077f5fdb1 100644 --- a/hw/net/net_rx_pkt.h +++ b/hw/net/net_rx_pkt.h @@ -77,14 +77,6 @@ void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, bool *hasip4, bool *hasip6, EthL4HdrProto *l4hdr_proto); -/** -* fetches L3 header offset -* -* @pkt: packet -* -*/ -size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt); - /** * fetches L4 header offset * @@ -267,15 +259,6 @@ net_rx_pkt_attach_data(struct NetRxPkt *pkt, const void *data, */ struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt); -/** -* returns io vector length that holds the attached data -* -* @pkt: packet -* @ret: IOVec length -* -*/ -uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt); - /** * prints rx packet data if debug is enabled * diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c index 1f79b82b77f..903238dca24 100644 --- a/hw/net/net_tx_pkt.c +++ b/hw/net/net_tx_pkt.c @@ -141,10 +141,6 @@ bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt) uint32_t csum = 0; struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG; - if (iov_size(pl_start_frag, pkt->payload_frags) < 8 + sizeof(csum)) { - return false; - } - if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) { return false; } diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index d1583b6f9b3..9ba35e2c819 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -29,8 +29,7 @@ #include "qemu/osdep.h" -/* For crc32 */ -#include +#include /* for crc32 */ #include "hw/irq.h" #include "hw/qdev-clock.h" @@ -43,7 +42,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qemu/units.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "trace.h" #define CRC_LENGTH 4 @@ -846,12 +845,11 @@ static const VMStateDescription vmstate_npcm7xx_emc = { }, }; -static Property npcm7xx_emc_properties[] = { +static const Property npcm7xx_emc_properties[] = { DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void npcm7xx_emc_class_init(ObjectClass *klass, void *data) +static void npcm7xx_emc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -859,7 +857,7 @@ static void npcm7xx_emc_class_init(ObjectClass *klass, void *data) dc->desc = "NPCM7xx EMC Controller"; dc->realize = npcm7xx_emc_realize; dc->unrealize = npcm7xx_emc_unrealize; - dc->reset = npcm7xx_emc_reset; + device_class_set_legacy_reset(dc, npcm7xx_emc_reset); dc->vmsd = &vmstate_npcm7xx_emc; device_class_set_props(dc, npcm7xx_emc_properties); } diff --git a/hw/net/npcm_gmac.c b/hw/net/npcm_gmac.c index 1b71e2526e3..5e32cd3edf3 100644 --- a/hw/net/npcm_gmac.c +++ b/hw/net/npcm_gmac.c @@ -33,7 +33,7 @@ #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/units.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "trace.h" REG32(NPCM_DMA_BUS_MODE, 0x1000) @@ -516,8 +516,6 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) uint32_t desc_addr; struct NPCMGMACTxDesc tx_desc; uint32_t tx_buf_addr, tx_buf_len; - uint16_t length = 0; - uint8_t *buf = tx_send_buffer; uint32_t prev_buf_size = 0; int csum = 0; @@ -546,9 +544,8 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) /* 1 = DMA Owned, 0 = Software Owned */ if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { - qemu_log_mask(LOG_GUEST_ERROR, - "TX Descriptor @ 0x%x is owned by software\n", - desc_addr); + trace_npcm_gmac_tx_desc_owner(DEVICE(gmac)->canonical_path, + desc_addr); gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU; gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, NPCM_DMA_STATUS_TX_SUSPENDED_STATE); @@ -569,22 +566,20 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) tx_buf_addr = tx_desc.tdes2; gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); - buf = &tx_send_buffer[prev_buf_size]; - if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + if ((prev_buf_size + tx_buf_len) > tx_buffer_size) { tx_buffer_size = prev_buf_size + tx_buf_len; tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); - buf = &tx_send_buffer[prev_buf_size]; } /* step 5 */ - if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + if (dma_memory_read(&address_space_memory, tx_buf_addr, + tx_send_buffer + prev_buf_size, tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, tx_buf_addr); return; } - length += tx_buf_len; prev_buf_size += tx_buf_len; /* If not chained we'll have a second buffer. */ @@ -592,30 +587,32 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) tx_buf_addr = tx_desc.tdes3; gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1); - buf = &tx_send_buffer[prev_buf_size]; - if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + if ((prev_buf_size + tx_buf_len) > tx_buffer_size) { tx_buffer_size = prev_buf_size + tx_buf_len; tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); - buf = &tx_send_buffer[prev_buf_size]; } - if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + if (dma_memory_read(&address_space_memory, tx_buf_addr, + tx_send_buffer + prev_buf_size, tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, tx_buf_addr); return; } - length += tx_buf_len; prev_buf_size += tx_buf_len; } if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) { + /* + * This will truncate the packet at 64K. + * TODO: find out if this is the correct behaviour. + */ + uint16_t length = prev_buf_size; net_checksum_calculate(tx_send_buffer, length, csum); qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length); trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length); - buf = tx_send_buffer; - length = 0; + prev_buf_size = 0; } /* step 6 */ @@ -913,12 +910,11 @@ static const VMStateDescription vmstate_npcm_gmac = { }, }; -static Property npcm_gmac_properties[] = { +static const Property npcm_gmac_properties[] = { DEFINE_NIC_PROPERTIES(NPCMGMACState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void npcm_gmac_class_init(ObjectClass *klass, void *data) +static void npcm_gmac_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -926,7 +922,7 @@ static void npcm_gmac_class_init(ObjectClass *klass, void *data) dc->desc = "NPCM GMAC Controller"; dc->realize = npcm_gmac_realize; dc->unrealize = npcm_gmac_unrealize; - dc->reset = npcm_gmac_reset; + device_class_set_legacy_reset(dc, npcm_gmac_reset); dc->vmsd = &vmstate_npcm_gmac; device_class_set_props(dc, npcm_gmac_properties); } diff --git a/hw/net/npcm_pcs.c b/hw/net/npcm_pcs.c new file mode 100644 index 00000000000..6aec105271a --- /dev/null +++ b/hw/net/npcm_pcs.c @@ -0,0 +1,410 @@ +/* + * Nuvoton NPCM8xx PCS Module + * + * Copyright 2022 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +/* + * Disclaimer: + * Currently we only implemented the default values of the registers and + * the soft reset feature. These are required to boot up the GMAC module + * in Linux kernel for NPCM845 boards. Other functionalities are not modeled. + */ + +#include "qemu/osdep.h" + +#include "exec/hwaddr.h" +#include "hw/registerfields.h" +#include "hw/net/npcm_pcs.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/units.h" +#include "trace.h" + +#define NPCM_PCS_IND_AC_BA 0x1fe +#define NPCM_PCS_IND_SR_CTL 0x1e00 +#define NPCM_PCS_IND_SR_MII 0x1f00 +#define NPCM_PCS_IND_SR_TIM 0x1f07 +#define NPCM_PCS_IND_VR_MII 0x1f80 + +REG16(NPCM_PCS_SR_CTL_ID1, 0x08) +REG16(NPCM_PCS_SR_CTL_ID2, 0x0a) +REG16(NPCM_PCS_SR_CTL_STS, 0x10) + +REG16(NPCM_PCS_SR_MII_CTRL, 0x00) +REG16(NPCM_PCS_SR_MII_STS, 0x02) +REG16(NPCM_PCS_SR_MII_DEV_ID1, 0x04) +REG16(NPCM_PCS_SR_MII_DEV_ID2, 0x06) +REG16(NPCM_PCS_SR_MII_AN_ADV, 0x08) +REG16(NPCM_PCS_SR_MII_LP_BABL, 0x0a) +REG16(NPCM_PCS_SR_MII_AN_EXPN, 0x0c) +REG16(NPCM_PCS_SR_MII_EXT_STS, 0x1e) + +REG16(NPCM_PCS_SR_TIM_SYNC_ABL, 0x10) +REG16(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR, 0x12) +REG16(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_UPR, 0x14) +REG16(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR, 0x16) +REG16(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_UPR, 0x18) +REG16(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR, 0x1a) +REG16(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_UPR, 0x1c) +REG16(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR, 0x1e) +REG16(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_UPR, 0x20) + +REG16(NPCM_PCS_VR_MII_MMD_DIG_CTRL1, 0x000) +REG16(NPCM_PCS_VR_MII_AN_CTRL, 0x002) +REG16(NPCM_PCS_VR_MII_AN_INTR_STS, 0x004) +REG16(NPCM_PCS_VR_MII_TC, 0x006) +REG16(NPCM_PCS_VR_MII_DBG_CTRL, 0x00a) +REG16(NPCM_PCS_VR_MII_EEE_MCTRL0, 0x00c) +REG16(NPCM_PCS_VR_MII_EEE_TXTIMER, 0x010) +REG16(NPCM_PCS_VR_MII_EEE_RXTIMER, 0x012) +REG16(NPCM_PCS_VR_MII_LINK_TIMER_CTRL, 0x014) +REG16(NPCM_PCS_VR_MII_EEE_MCTRL1, 0x016) +REG16(NPCM_PCS_VR_MII_DIG_STS, 0x020) +REG16(NPCM_PCS_VR_MII_ICG_ERRCNT1, 0x022) +REG16(NPCM_PCS_VR_MII_MISC_STS, 0x030) +REG16(NPCM_PCS_VR_MII_RX_LSTS, 0x040) +REG16(NPCM_PCS_VR_MII_MP_TX_BSTCTRL0, 0x070) +REG16(NPCM_PCS_VR_MII_MP_TX_LVLCTRL0, 0x074) +REG16(NPCM_PCS_VR_MII_MP_TX_GENCTRL0, 0x07a) +REG16(NPCM_PCS_VR_MII_MP_TX_GENCTRL1, 0x07c) +REG16(NPCM_PCS_VR_MII_MP_TX_STS, 0x090) +REG16(NPCM_PCS_VR_MII_MP_RX_GENCTRL0, 0x0b0) +REG16(NPCM_PCS_VR_MII_MP_RX_GENCTRL1, 0x0b2) +REG16(NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0, 0x0ba) +REG16(NPCM_PCS_VR_MII_MP_MPLL_CTRL0, 0x0f0) +REG16(NPCM_PCS_VR_MII_MP_MPLL_CTRL1, 0x0f2) +REG16(NPCM_PCS_VR_MII_MP_MPLL_STS, 0x110) +REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL2, 0x126) +REG16(NPCM_PCS_VR_MII_MP_LVL_CTRL, 0x130) +REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL0, 0x132) +REG16(NPCM_PCS_VR_MII_MP_MISC_CTRL1, 0x134) +REG16(NPCM_PCS_VR_MII_DIG_CTRL2, 0x1c2) +REG16(NPCM_PCS_VR_MII_DIG_ERRCNT_SEL, 0x1c4) + +/* Register Fields */ +#define NPCM_PCS_SR_MII_CTRL_RST BIT(15) + +static const uint16_t npcm_pcs_sr_ctl_cold_reset_values[NPCM_PCS_NR_SR_CTLS] = { + [R_NPCM_PCS_SR_CTL_ID1] = 0x699e, + [R_NPCM_PCS_SR_CTL_STS] = 0x8000, +}; + +static const uint16_t npcm_pcs_sr_mii_cold_reset_values[NPCM_PCS_NR_SR_MIIS] = { + [R_NPCM_PCS_SR_MII_CTRL] = 0x1140, + [R_NPCM_PCS_SR_MII_STS] = 0x0109, + [R_NPCM_PCS_SR_MII_DEV_ID1] = 0x699e, + [R_NPCM_PCS_SR_MII_DEV_ID2] = 0xced0, + [R_NPCM_PCS_SR_MII_AN_ADV] = 0x0020, + [R_NPCM_PCS_SR_MII_EXT_STS] = 0xc000, +}; + +static const uint16_t npcm_pcs_sr_tim_cold_reset_values[NPCM_PCS_NR_SR_TIMS] = { + [R_NPCM_PCS_SR_TIM_SYNC_ABL] = 0x0003, + [R_NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR] = 0x0038, + [R_NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR] = 0x0038, + [R_NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR] = 0x0058, + [R_NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR] = 0x0048, +}; + +static const uint16_t npcm_pcs_vr_mii_cold_reset_values[NPCM_PCS_NR_VR_MIIS] = { + [R_NPCM_PCS_VR_MII_MMD_DIG_CTRL1] = 0x2400, + [R_NPCM_PCS_VR_MII_AN_INTR_STS] = 0x000a, + [R_NPCM_PCS_VR_MII_EEE_MCTRL0] = 0x899c, + [R_NPCM_PCS_VR_MII_DIG_STS] = 0x0010, + [R_NPCM_PCS_VR_MII_MP_TX_BSTCTRL0] = 0x000a, + [R_NPCM_PCS_VR_MII_MP_TX_LVLCTRL0] = 0x007f, + [R_NPCM_PCS_VR_MII_MP_TX_GENCTRL0] = 0x0001, + [R_NPCM_PCS_VR_MII_MP_RX_GENCTRL0] = 0x0100, + [R_NPCM_PCS_VR_MII_MP_RX_GENCTRL1] = 0x1100, + [R_NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0] = 0x000e, + [R_NPCM_PCS_VR_MII_MP_MPLL_CTRL0] = 0x0100, + [R_NPCM_PCS_VR_MII_MP_MPLL_CTRL1] = 0x0032, + [R_NPCM_PCS_VR_MII_MP_MPLL_STS] = 0x0001, + [R_NPCM_PCS_VR_MII_MP_LVL_CTRL] = 0x0019, +}; + +static void npcm_pcs_soft_reset(NPCMPCSState *s) +{ + memcpy(s->sr_ctl, npcm_pcs_sr_ctl_cold_reset_values, + NPCM_PCS_NR_SR_CTLS * sizeof(uint16_t)); + memcpy(s->sr_mii, npcm_pcs_sr_mii_cold_reset_values, + NPCM_PCS_NR_SR_MIIS * sizeof(uint16_t)); + memcpy(s->sr_tim, npcm_pcs_sr_tim_cold_reset_values, + NPCM_PCS_NR_SR_TIMS * sizeof(uint16_t)); + memcpy(s->vr_mii, npcm_pcs_vr_mii_cold_reset_values, + NPCM_PCS_NR_VR_MIIS * sizeof(uint16_t)); +} + +static uint16_t npcm_pcs_read_sr_ctl(NPCMPCSState *s, hwaddr offset) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_CTLS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_CTL read offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return 0; + } + + return s->sr_ctl[regno]; +} + +static uint16_t npcm_pcs_read_sr_mii(NPCMPCSState *s, hwaddr offset) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_MIIS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_MII read offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return 0; + } + + return s->sr_mii[regno]; +} + +static uint16_t npcm_pcs_read_sr_tim(NPCMPCSState *s, hwaddr offset) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_TIMS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_TIM read offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return 0; + } + + return s->sr_tim[regno]; +} + +static uint16_t npcm_pcs_read_vr_mii(NPCMPCSState *s, hwaddr offset) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_VR_MIIS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VR_MII read offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return 0; + } + + return s->vr_mii[regno]; +} + +static void npcm_pcs_write_sr_ctl(NPCMPCSState *s, hwaddr offset, uint16_t v) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_CTLS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_CTL write offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return; + } + + s->sr_ctl[regno] = v; +} + +static void npcm_pcs_write_sr_mii(NPCMPCSState *s, hwaddr offset, uint16_t v) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_MIIS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_MII write offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return; + } + + s->sr_mii[regno] = v; + + if ((offset == A_NPCM_PCS_SR_MII_CTRL) && (v & NPCM_PCS_SR_MII_CTRL_RST)) { + /* Trigger a soft reset */ + npcm_pcs_soft_reset(s); + } +} + +static void npcm_pcs_write_sr_tim(NPCMPCSState *s, hwaddr offset, uint16_t v) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_SR_TIMS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SR_TIM write offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return; + } + + s->sr_tim[regno] = v; +} + +static void npcm_pcs_write_vr_mii(NPCMPCSState *s, hwaddr offset, uint16_t v) +{ + hwaddr regno = offset / sizeof(uint16_t); + + if (regno >= NPCM_PCS_NR_VR_MIIS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VR_MII write offset 0x%04" HWADDR_PRIx + " is out of range.\n", + DEVICE(s)->canonical_path, offset); + return; + } + + s->vr_mii[regno] = v; +} + +static uint64_t npcm_pcs_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCMPCSState *s = opaque; + uint16_t v = 0; + + if (offset == NPCM_PCS_IND_AC_BA) { + v = s->indirect_access_base; + } else { + switch (s->indirect_access_base) { + case NPCM_PCS_IND_SR_CTL: + v = npcm_pcs_read_sr_ctl(s, offset); + break; + + case NPCM_PCS_IND_SR_MII: + v = npcm_pcs_read_sr_mii(s, offset); + break; + + case NPCM_PCS_IND_SR_TIM: + v = npcm_pcs_read_sr_tim(s, offset); + break; + + case NPCM_PCS_IND_VR_MII: + v = npcm_pcs_read_vr_mii(s, offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read with invalid indirect address base: 0x%" + PRIx16 "\n", DEVICE(s)->canonical_path, + s->indirect_access_base); + } + } + + trace_npcm_pcs_reg_read(DEVICE(s)->canonical_path, s->indirect_access_base, + offset, v); + return v; +} + +static void npcm_pcs_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + NPCMPCSState *s = opaque; + + trace_npcm_pcs_reg_write(DEVICE(s)->canonical_path, s->indirect_access_base, + offset, v); + if (offset == NPCM_PCS_IND_AC_BA) { + s->indirect_access_base = v; + } else { + switch (s->indirect_access_base) { + case NPCM_PCS_IND_SR_CTL: + npcm_pcs_write_sr_ctl(s, offset, v); + break; + + case NPCM_PCS_IND_SR_MII: + npcm_pcs_write_sr_mii(s, offset, v); + break; + + case NPCM_PCS_IND_SR_TIM: + npcm_pcs_write_sr_tim(s, offset, v); + break; + + case NPCM_PCS_IND_VR_MII: + npcm_pcs_write_vr_mii(s, offset, v); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write with invalid indirect address base: 0x%02" + PRIx16 "\n", DEVICE(s)->canonical_path, + s->indirect_access_base); + } + } +} + +static void npcm_pcs_enter_reset(Object *obj, ResetType type) +{ + NPCMPCSState *s = NPCM_PCS(obj); + + npcm_pcs_soft_reset(s); +} + +static const struct MemoryRegionOps npcm_pcs_ops = { + .read = npcm_pcs_read, + .write = npcm_pcs_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 2, + .unaligned = false, + }, +}; + +static void npcm_pcs_realize(DeviceState *dev, Error **errp) +{ + NPCMPCSState *pcs = NPCM_PCS(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&pcs->iomem, OBJECT(pcs), &npcm_pcs_ops, pcs, + TYPE_NPCM_PCS, 8 * KiB); + sysbus_init_mmio(sbd, &pcs->iomem); +} + +static const VMStateDescription vmstate_npcm_pcs = { + .name = TYPE_NPCM_PCS, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT16(indirect_access_base, NPCMPCSState), + VMSTATE_UINT16_ARRAY(sr_ctl, NPCMPCSState, NPCM_PCS_NR_SR_CTLS), + VMSTATE_UINT16_ARRAY(sr_mii, NPCMPCSState, NPCM_PCS_NR_SR_MIIS), + VMSTATE_UINT16_ARRAY(sr_tim, NPCMPCSState, NPCM_PCS_NR_SR_TIMS), + VMSTATE_UINT16_ARRAY(vr_mii, NPCMPCSState, NPCM_PCS_NR_VR_MIIS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm_pcs_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "NPCM PCS Controller"; + dc->realize = npcm_pcs_realize; + dc->vmsd = &vmstate_npcm_pcs; + rc->phases.enter = npcm_pcs_enter_reset; +} + +static const TypeInfo npcm_pcs_types[] = { + { + .name = TYPE_NPCM_PCS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCMPCSState), + .class_init = npcm_pcs_class_init, + }, +}; +DEFINE_TYPES(npcm_pcs_types) diff --git a/hw/net/opencores_eth.c b/hw/net/opencores_eth.c index f96d6ea2ccf..7e955c01322 100644 --- a/hw/net/opencores_eth.c +++ b/hw/net/opencores_eth.c @@ -743,19 +743,18 @@ static void qdev_open_eth_reset(DeviceState *dev) open_eth_reset(d); } -static Property open_eth_properties[] = { +static const Property open_eth_properties[] = { DEFINE_NIC_PROPERTIES(OpenEthState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void open_eth_class_init(ObjectClass *klass, void *data) +static void open_eth_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sysbus_open_eth_realize; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->desc = "Opencores 10/100 Mbit Ethernet"; - dc->reset = qdev_open_eth_reset; + device_class_set_legacy_reset(dc, qdev_open_eth_reset); device_class_set_props(dc, open_eth_properties); } diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c index fe1a845b2b0..0ca5bc21938 100644 --- a/hw/net/pcnet-pci.c +++ b/hw/net/pcnet-pci.c @@ -35,8 +35,8 @@ #include "net/net.h" #include "qemu/module.h" #include "qemu/timer.h" -#include "sysemu/dma.h" -#include "sysemu/sysemu.h" +#include "system/dma.h" +#include "system/system.h" #include "trace.h" #include "pcnet.h" @@ -252,12 +252,11 @@ static void pcnet_instance_init(Object *obj) DEVICE(obj)); } -static Property pcnet_properties[] = { +static const Property pcnet_properties[] = { DEFINE_NIC_PROPERTIES(PCIPCNetState, state.conf), - DEFINE_PROP_END_OF_LIST(), }; -static void pcnet_class_init(ObjectClass *klass, void *data) +static void pcnet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -269,7 +268,7 @@ static void pcnet_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_AMD_LANCE; k->revision = 0x10; k->class_id = PCI_CLASS_NETWORK_ETHERNET; - dc->reset = pci_reset; + device_class_set_legacy_reset(dc, pci_reset); dc->vmsd = &vmstate_pci_pcnet; device_class_set_props(dc, pcnet_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); @@ -281,7 +280,7 @@ static const TypeInfo pcnet_info = { .instance_size = sizeof(PCIPCNetState), .class_init = pcnet_class_init, .instance_init = pcnet_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/pcnet.h b/hw/net/pcnet.h index eb7f46aab35..a94356ec30b 100644 --- a/hw/net/pcnet.h +++ b/hw/net/pcnet.h @@ -7,7 +7,7 @@ #define PCNET_LOOPTEST_CRC 1 #define PCNET_LOOPTEST_NOCRC 2 -#include "exec/memory.h" +#include "system/memory.h" #include "hw/irq.h" /* BUS CONFIGURATION REGISTERS */ diff --git a/hw/net/rocker/rocker-hmp-cmds.c b/hw/net/rocker/rocker-hmp-cmds.c index 197c6e28dc2..df40991f6d2 100644 --- a/hw/net/rocker/rocker-hmp-cmds.c +++ b/hw/net/rocker/rocker-hmp-cmds.c @@ -18,7 +18,7 @@ #include "monitor/monitor.h" #include "net/eth.h" #include "qapi/qapi-commands-rocker.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" void hmp_rocker(Monitor *mon, const QDict *qdict) { diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c index 7ea8eb6ba55..cc49701dd3c 100644 --- a/hw/net/rocker/rocker.c +++ b/hw/net/rocker/rocker.c @@ -134,11 +134,6 @@ RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp) return list; } -uint32_t rocker_fp_ports(Rocker *r) -{ - return r->fp_ports; -} - static uint32_t rocker_get_pport_by_tx_ring(Rocker *r, DescRing *ring) { @@ -1464,7 +1459,7 @@ static void rocker_reset(DeviceState *dev) DPRINTF("Reset done\n"); } -static Property rocker_properties[] = { +static const Property rocker_properties[] = { DEFINE_PROP_STRING("name", Rocker, name), DEFINE_PROP_STRING("world", Rocker, world_name), DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker, @@ -1473,7 +1468,6 @@ static Property rocker_properties[] = { switch_id, 0), DEFINE_PROP_ARRAY("ports", Rocker, fp_ports, fp_ports_peers, qdev_prop_netdev, NICPeers), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription rocker_vmsd = { @@ -1481,7 +1475,7 @@ static const VMStateDescription rocker_vmsd = { .unmigratable = 1, }; -static void rocker_class_init(ObjectClass *klass, void *data) +static void rocker_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1494,7 +1488,7 @@ static void rocker_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_NETWORK_OTHER; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->desc = "Rocker Switch"; - dc->reset = rocker_reset; + device_class_set_legacy_reset(dc, rocker_reset); device_class_set_props(dc, rocker_properties); dc->vmsd = &rocker_vmsd; } @@ -1504,7 +1498,7 @@ static const TypeInfo rocker_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(Rocker), .class_init = rocker_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/rocker/rocker.h b/hw/net/rocker/rocker.h index f85354d9d13..ae06c1c72af 100644 --- a/hw/net/rocker/rocker.h +++ b/hw/net/rocker/rocker.h @@ -36,15 +36,7 @@ static inline G_GNUC_PRINTF(1, 2) int DPRINTF(const char *fmt, ...) } #endif -#define __le16 uint16_t -#define __le32 uint32_t -#define __le64 uint64_t - -#define __be16 uint16_t -#define __be32 uint32_t -#define __be64 uint64_t - -static inline bool ipv4_addr_is_multicast(__be32 addr) +static inline bool ipv4_addr_is_multicast(uint32_t addr) { return (addr & htonl(0xf0000000)) == htonl(0xe0000000); } @@ -52,8 +44,8 @@ static inline bool ipv4_addr_is_multicast(__be32 addr) typedef struct ipv6_addr { union { uint8_t addr8[16]; - __be16 addr16[8]; - __be32 addr32[4]; + uint16_t addr16[8]; + uint32_t addr32[4]; }; } Ipv6Addr; @@ -72,7 +64,6 @@ DECLARE_INSTANCE_CHECKER(Rocker, ROCKER, TYPE_ROCKER) Rocker *rocker_find(const char *name); -uint32_t rocker_fp_ports(Rocker *r); int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up); int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr, uint16_t vlan_id); diff --git a/hw/net/rocker/rocker_hw.h b/hw/net/rocker/rocker_hw.h index 1786323fa4a..7ec6bfbcb92 100644 --- a/hw/net/rocker/rocker_hw.h +++ b/hw/net/rocker/rocker_hw.h @@ -9,10 +9,6 @@ #ifndef ROCKER_HW_H #define ROCKER_HW_H -#define __le16 uint16_t -#define __le32 uint32_t -#define __le64 uint64_t - /* * Return codes */ @@ -124,12 +120,12 @@ enum { */ typedef struct rocker_desc { - __le64 buf_addr; + uint64_t buf_addr; uint64_t cookie; - __le16 buf_size; - __le16 tlv_size; - __le16 rsvd[5]; /* pad to 32 bytes */ - __le16 comp_err; + uint16_t buf_size; + uint16_t tlv_size; + uint16_t rsvd[5]; /* pad to 32 bytes */ + uint16_t comp_err; } __attribute__((packed, aligned(8))) RockerDesc; /* @@ -137,9 +133,9 @@ typedef struct rocker_desc { */ typedef struct rocker_tlv { - __le32 type; - __le16 len; - __le16 rsvd; + uint32_t type; + uint16_t len; + uint16_t rsvd; } __attribute__((packed, aligned(8))) RockerTlv; /* cmd msg */ diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c index 5e16056be66..4aed1787566 100644 --- a/hw/net/rocker/rocker_of_dpa.c +++ b/hw/net/rocker/rocker_of_dpa.c @@ -52,10 +52,10 @@ typedef struct of_dpa_flow_key { uint32_t tunnel_id; /* overlay tunnel id */ uint32_t tbl_id; /* table id */ struct { - __be16 vlan_id; /* 0 if no VLAN */ + uint16_t vlan_id; /* 0 if no VLAN */ MACAddr src; /* ethernet source address */ MACAddr dst; /* ethernet destination address */ - __be16 type; /* ethernet frame type */ + uint16_t type; /* ethernet frame type */ } eth; struct { uint8_t proto; /* IP protocol or ARP opcode */ @@ -66,14 +66,14 @@ typedef struct of_dpa_flow_key { union { struct { struct { - __be32 src; /* IP source address */ - __be32 dst; /* IP destination address */ + uint32_t src; /* IP source address */ + uint32_t dst; /* IP destination address */ } addr; union { struct { - __be16 src; /* TCP/UDP/SCTP source port */ - __be16 dst; /* TCP/UDP/SCTP destination port */ - __be16 flags; /* TCP flags */ + uint16_t src; /* TCP/UDP/SCTP source port */ + uint16_t dst; /* TCP/UDP/SCTP destination port */ + uint16_t flags; /* TCP flags */ } tp; struct { MACAddr sha; /* ARP source hardware address */ @@ -86,11 +86,11 @@ typedef struct of_dpa_flow_key { Ipv6Addr src; /* IPv6 source address */ Ipv6Addr dst; /* IPv6 destination address */ } addr; - __be32 label; /* IPv6 flow label */ + uint32_t label; /* IPv6 flow label */ struct { - __be16 src; /* TCP/UDP/SCTP source port */ - __be16 dst; /* TCP/UDP/SCTP destination port */ - __be16 flags; /* TCP flags */ + uint16_t src; /* TCP/UDP/SCTP source port */ + uint16_t dst; /* TCP/UDP/SCTP destination port */ + uint16_t flags; /* TCP flags */ } tp; struct { Ipv6Addr target; /* ND target address */ @@ -112,13 +112,13 @@ typedef struct of_dpa_flow_action { struct { uint32_t group_id; uint32_t tun_log_lport; - __be16 vlan_id; + uint16_t vlan_id; } write; struct { - __be16 new_vlan_id; + uint16_t new_vlan_id; uint32_t out_pport; uint8_t copy_to_cpu; - __be16 vlan_id; + uint16_t vlan_id; } apply; } OfDpaFlowAction; @@ -143,7 +143,7 @@ typedef struct of_dpa_flow { typedef struct of_dpa_flow_pkt_fields { uint32_t tunnel_id; struct eth_header *ethhdr; - __be16 *h_proto; + uint16_t *h_proto; struct vlan_header *vlanhdr; struct ip_header *ipv4hdr; struct ip6_header *ipv6hdr; @@ -180,7 +180,7 @@ typedef struct of_dpa_group { uint32_t group_id; MACAddr src_mac; MACAddr dst_mac; - __be16 vlan_id; + uint16_t vlan_id; } l2_rewrite; struct { uint16_t group_count; @@ -190,13 +190,13 @@ typedef struct of_dpa_group { uint32_t group_id; MACAddr src_mac; MACAddr dst_mac; - __be16 vlan_id; + uint16_t vlan_id; uint8_t ttl_check; } l3_unicast; }; } OfDpaGroup; -static int of_dpa_mask2prefix(__be32 mask) +static int of_dpa_mask2prefix(uint32_t mask) { int i; int count = 32; @@ -451,7 +451,7 @@ static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc, fc->iovcnt = iovcnt + 2; } -static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id) +static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, uint16_t vlan_id) { OfDpaFlowPktFields *fields = &fc->fields; uint16_t h_proto = fields->ethhdr->h_proto; @@ -486,7 +486,7 @@ static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc) static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc, uint8_t *src_mac, uint8_t *dst_mac, - __be16 vlan_id) + uint16_t vlan_id) { OfDpaFlowPktFields *fields = &fc->fields; @@ -1635,8 +1635,8 @@ static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow, return ROCKER_OK; } -static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask, - RockerTlv **flow_tlvs) +static void of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask, + RockerTlv **flow_tlvs) { key->width = FLOW_KEY_WIDTH(ip.tos); @@ -1669,8 +1669,6 @@ static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask, mask->ip.tos |= rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6; } - - return ROCKER_OK; } static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs) @@ -1689,7 +1687,6 @@ static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs) ACL_MODE_ANY_VLAN, ACL_MODE_ANY_TENANT, } mode = ACL_MODE_UNKNOWN; - int err = ROCKER_OK; if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] || !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) { @@ -1776,14 +1773,10 @@ static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs) switch (ntohs(key->eth.type)) { case 0x0800: case 0x86dd: - err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs); + of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs); break; } - if (err) { - return err; - } - if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { action->write.group_id = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 03a204ef8ab..324fb932aac 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -48,20 +48,19 @@ * 2011-Mar-22 Benjamin Poirier: Implemented VLAN offloading */ -/* For crc32 */ - #include "qemu/osdep.h" -#include +#include /* for crc32 */ #include "hw/pci/pci_device.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/module.h" #include "qemu/timer.h" +#include "qemu/bswap.h" #include "net/net.h" #include "net/eth.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qom/object.h" /* debug RTL8139 card */ @@ -1818,7 +1817,7 @@ static int rtl8139_transmit_one(RTL8139State *s, int descriptor) PCIDevice *d = PCI_DEVICE(s); int txsize = s->TxStatus[descriptor] & 0x1fff; - uint8_t txbuffer[0x2000]; + QEMU_UNINITIALIZED uint8_t txbuffer[0x2000]; DPRINTF("+++ transmit reading %d bytes from host memory at 0x%08x\n", txsize, s->TxAddr[descriptor]); @@ -3412,12 +3411,11 @@ static void rtl8139_instance_init(Object *obj) DEVICE(obj)); } -static Property rtl8139_properties[] = { +static const Property rtl8139_properties[] = { DEFINE_NIC_PROPERTIES(RTL8139State, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void rtl8139_class_init(ObjectClass *klass, void *data) +static void rtl8139_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -3429,7 +3427,7 @@ static void rtl8139_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_REALTEK_8139; k->revision = RTL8139_PCI_REVID; /* >=0x20 is for 8139C+ */ k->class_id = PCI_CLASS_NETWORK_ETHERNET; - dc->reset = rtl8139_reset; + device_class_set_legacy_reset(dc, rtl8139_reset); dc->vmsd = &vmstate_rtl8139; device_class_set_props(dc, rtl8139_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); @@ -3441,7 +3439,7 @@ static const TypeInfo rtl8139_info = { .instance_size = sizeof(RTL8139State), .class_init = rtl8139_class_init, .instance_init = rtl8139_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index 702d0e8e837..5cd78e334b6 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -13,16 +13,23 @@ #include "net/net.h" #include "hw/irq.h" #include "hw/net/smc91c111.h" +#include "hw/registerfields.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" -/* For crc32 */ -#include +#include /* for crc32 */ #include "qom/object.h" /* Number of 2k memory pages available. */ #define NUM_PACKETS 4 +/* + * Maximum size of a data frame, including the leading status word + * and byte count fields and the trailing CRC, last data byte + * and control byte (per figure 8-1 in the Microchip Technology + * LAN91C111 datasheet). + */ +#define MAX_PACKET_SIZE 2048 #define TYPE_SMC91C111 "smc91c111" OBJECT_DECLARE_SIMPLE_TYPE(smc91c111_state, SMC91C111) @@ -52,7 +59,7 @@ struct smc91c111_state { int tx_fifo_done_len; int tx_fifo_done[NUM_PACKETS]; /* Packet buffer memory. */ - uint8_t data[NUM_PACKETS][2048]; + uint8_t data[NUM_PACKETS][MAX_PACKET_SIZE]; uint8_t int_level; uint8_t int_mask; MemoryRegion mmio; @@ -80,7 +87,8 @@ static const VMStateDescription vmstate_smc91c111 = { VMSTATE_INT32_ARRAY(rx_fifo, smc91c111_state, NUM_PACKETS), VMSTATE_INT32(tx_fifo_done_len, smc91c111_state), VMSTATE_INT32_ARRAY(tx_fifo_done, smc91c111_state, NUM_PACKETS), - VMSTATE_BUFFER_UNSAFE(data, smc91c111_state, 0, NUM_PACKETS * 2048), + VMSTATE_BUFFER_UNSAFE(data, smc91c111_state, 0, + NUM_PACKETS * MAX_PACKET_SIZE), VMSTATE_UINT8(int_level, smc91c111_state), VMSTATE_UINT8(int_mask, smc91c111_state), VMSTATE_END_OF_LIST() @@ -119,6 +127,18 @@ static const VMStateDescription vmstate_smc91c111 = { #define RS_TOOSHORT 0x0400 #define RS_MULTICAST 0x0001 +FIELD(PTR, PTR, 0, 11) +FIELD(PTR, NOT_EMPTY, 11, 1) +FIELD(PTR, RESERVED, 12, 1) +FIELD(PTR, READ, 13, 1) +FIELD(PTR, AUTOINCR, 14, 1) +FIELD(PTR, RCV, 15, 1) + +static inline bool packetnum_valid(int packet_num) +{ + return packet_num >= 0 && packet_num < NUM_PACKETS; +} + /* Update interrupt status. */ static void smc91c111_update(smc91c111_state *s) { @@ -183,6 +203,15 @@ static void smc91c111_pop_rx_fifo(smc91c111_state *s) { int i; + if (s->rx_fifo_len == 0) { + /* + * The datasheet doesn't document what the behaviour is if the + * guest tries to pop an empty RX FIFO, and there's no obvious + * error status register to report it. Just ignore the attempt. + */ + return; + } + s->rx_fifo_len--; if (s->rx_fifo_len) { for (i = 0; i < s->rx_fifo_len; i++) @@ -210,12 +239,33 @@ static void smc91c111_pop_tx_fifo_done(smc91c111_state *s) /* Release the memory allocated to a packet. */ static void smc91c111_release_packet(smc91c111_state *s, int packet) { + if (!packetnum_valid(packet)) { + /* + * Data sheet doesn't document behaviour in this guest error + * case, and there is no error status register to report it. + * Log and ignore the attempt. + */ + qemu_log_mask(LOG_GUEST_ERROR, + "smc91c111: attempt to release invalid packet %d\n", + packet); + return; + } s->allocated &= ~(1 << packet); if (s->tx_alloc == 0x80) smc91c111_tx_alloc(s); smc91c111_flush_queued_packets(s); } +static void smc91c111_complete_tx_packet(smc91c111_state *s, int packetnum) +{ + if (s->ctr & CTR_AUTO_RELEASE) { + /* Race? */ + smc91c111_release_packet(s, packetnum); + } else if (s->tx_fifo_done_len < NUM_PACKETS) { + s->tx_fifo_done[s->tx_fifo_done_len++] = packetnum; + } +} + /* Flush the TX FIFO. */ static void smc91c111_do_tx(smc91c111_state *s) { @@ -231,12 +281,25 @@ static void smc91c111_do_tx(smc91c111_state *s) return; for (i = 0; i < s->tx_fifo_len; i++) { packetnum = s->tx_fifo[i]; + /* queue_tx checked the packet number was valid */ + assert(packetnum_valid(packetnum)); p = &s->data[packetnum][0]; /* Set status word. */ *(p++) = 0x01; *(p++) = 0x40; len = *(p++); len |= ((int)*(p++)) << 8; + if (len > MAX_PACKET_SIZE) { + /* + * Datasheet doesn't say what to do here, and there is no + * relevant tx error condition listed. Log, and drop the packet. + */ + qemu_log_mask(LOG_GUEST_ERROR, + "smc91c111: tx packet with bad length %d, dropping\n", + len); + smc91c111_complete_tx_packet(s, packetnum); + continue; + } len -= 6; control = p[len + 1]; if (control & 0x20) @@ -265,11 +328,7 @@ static void smc91c111_do_tx(smc91c111_state *s) } } #endif - if (s->ctr & CTR_AUTO_RELEASE) - /* Race? */ - smc91c111_release_packet(s, packetnum); - else if (s->tx_fifo_done_len < NUM_PACKETS) - s->tx_fifo_done[s->tx_fifo_done_len++] = packetnum; + smc91c111_complete_tx_packet(s, packetnum); qemu_send_packet(qemu_get_queue(s->nic), p, len); } s->tx_fifo_len = 0; @@ -279,6 +338,17 @@ static void smc91c111_do_tx(smc91c111_state *s) /* Add a packet to the TX FIFO. */ static void smc91c111_queue_tx(smc91c111_state *s, int packet) { + if (!packetnum_valid(packet)) { + /* + * Datasheet doesn't document behaviour in this error case, and + * there's no error status register we could report it in. + * Log and ignore. + */ + qemu_log_mask(LOG_GUEST_ERROR, + "smc91c111: attempt to queue invalid packet %d\n", + packet); + return; + } if (s->tx_fifo_len == NUM_PACKETS) return; s->tx_fifo[s->tx_fifo_len++] = packet; @@ -310,6 +380,49 @@ static void smc91c111_reset(DeviceState *dev) #define SET_LOW(name, val) s->name = (s->name & 0xff00) | val #define SET_HIGH(name, val) s->name = (s->name & 0xff) | (val << 8) +/* + * The pointer register's pointer is an 11 bit value (so it exactly + * indexes a 2048-byte data frame). Add the specified offset to it, + * wrapping around at the 2048 byte mark, and return the resulting + * wrapped value. There are flag bits in the top part of the register, + * but we can ignore them here as the mask will mask them out. + */ +static int ptr_reg_add(smc91c111_state *s, int offset) +{ + return (s->ptr + offset) & R_PTR_PTR_MASK; +} + +/* + * For an access to the Data Register at @offset, return the + * required offset into the packet's data frame. This will + * perform the pointer register autoincrement if required, and + * guarantees to return an in-bounds offset. + */ +static int data_reg_ptr(smc91c111_state *s, int offset) +{ + int p; + + if (s->ptr & R_PTR_AUTOINCR_MASK) { + /* + * Autoincrement: use the current pointer value, and + * increment the pointer register's pointer field. + */ + p = FIELD_EX32(s->ptr, PTR, PTR); + s->ptr = FIELD_DP32(s->ptr, PTR, PTR, ptr_reg_add(s, 1)); + } else { + /* + * No autoincrement: register offset determines which + * byte we're addressing. Setting the pointer to the top + * of the data buffer and then using the pointer wrapping + * to read the bottom byte of the buffer is not something + * sensible guest software will do, but the datasheet + * doesn't say what the behaviour is, so we don't forbid it. + */ + p = ptr_reg_add(s, offset & 3); + } + return p; +} + static void smc91c111_writeb(void *opaque, hwaddr offset, uint32_t value) { @@ -449,12 +562,14 @@ static void smc91c111_writeb(void *opaque, hwaddr offset, n = s->rx_fifo[0]; else n = s->packet_num; - p = s->ptr & 0x07ff; - if (s->ptr & 0x4000) { - s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x7ff); - } else { - p += (offset & 3); + if (!packetnum_valid(n)) { + /* Datasheet doesn't document what to do here */ + qemu_log_mask(LOG_GUEST_ERROR, + "smc91c111: attempt to write data to invalid packet %d\n", + n); + return; } + p = data_reg_ptr(s, offset); s->data[n][p] = value; } return; @@ -597,12 +712,14 @@ static uint32_t smc91c111_readb(void *opaque, hwaddr offset) n = s->rx_fifo[0]; else n = s->packet_num; - p = s->ptr & 0x07ff; - if (s->ptr & 0x4000) { - s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x07ff); - } else { - p += (offset & 3); + if (!packetnum_valid(n)) { + /* Datasheet doesn't document what to do here */ + qemu_log_mask(LOG_GUEST_ERROR, + "smc91c111: attempt to read data from invalid packet %d\n", + n); + return 0; } + p = data_reg_ptr(s, offset); return s->data[n][p]; } case 12: /* Interrupt status. */ @@ -698,13 +815,16 @@ static ssize_t smc91c111_receive(NetClientState *nc, const uint8_t *buf, size_t if (crc) packetsize += 4; /* TODO: Flag overrun and receive errors. */ - if (packetsize > 2048) + if (packetsize > MAX_PACKET_SIZE) { return -1; + } packetnum = smc91c111_allocate_packet(s); if (packetnum == 0x80) return -1; s->rx_fifo[s->rx_fifo_len++] = packetnum; + /* allocate_packet() will not hand us back an invalid packet number */ + assert(packetnum_valid(packetnum)); p = &s->data[packetnum][0]; /* ??? Multicast packets? */ status = 0; @@ -789,17 +909,16 @@ static void smc91c111_realize(DeviceState *dev, Error **errp) /* ??? Save/restore. */ } -static Property smc91c111_properties[] = { +static const Property smc91c111_properties[] = { DEFINE_NIC_PROPERTIES(smc91c111_state, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void smc91c111_class_init(ObjectClass *klass, void *data) +static void smc91c111_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = smc91c111_realize; - dc->reset = smc91c111_reset; + device_class_set_legacy_reset(dc, smc91c111_reset); dc->vmsd = &vmstate_smc91c111; device_class_set_props(dc, smc91c111_properties); } diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c index 8af33d91b65..f6f217d6320 100644 --- a/hw/net/spapr_llan.c +++ b/hw/net/spapr_llan.c @@ -33,7 +33,7 @@ #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_vio.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "trace.h" #include @@ -786,12 +786,11 @@ static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu, return H_SUCCESS; } -static Property spapr_vlan_properties[] = { +static const Property spapr_vlan_properties[] = { DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev), DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf), DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan, compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true), - DEFINE_PROP_END_OF_LIST(), }; static bool spapr_vlan_rx_buffer_pools_needed(void *opaque) @@ -849,7 +848,7 @@ static const VMStateDescription vmstate_spapr_llan = { } }; -static void spapr_vlan_class_init(ObjectClass *klass, void *data) +static void spapr_vlan_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c index db95766e294..2fc51e1e16d 100644 --- a/hw/net/stellaris_enet.c +++ b/hw/net/stellaris_enet.c @@ -15,7 +15,7 @@ #include "net/net.h" #include "qemu/log.h" #include "qemu/module.h" -#include +#include /* for crc32 */ #include "qom/object.h" //#define DEBUG_STELLARIS_ENET 1 @@ -497,17 +497,16 @@ static void stellaris_enet_realize(DeviceState *dev, Error **errp) qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } -static Property stellaris_enet_properties[] = { +static const Property stellaris_enet_properties[] = { DEFINE_NIC_PROPERTIES(stellaris_enet_state, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void stellaris_enet_class_init(ObjectClass *klass, void *data) +static void stellaris_enet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = stellaris_enet_realize; - dc->reset = stellaris_enet_reset; + device_class_set_legacy_reset(dc, stellaris_enet_reset); device_class_set_props(dc, stellaris_enet_properties); dc->vmsd = &vmstate_stellaris_enet; } diff --git a/hw/net/sungem.c b/hw/net/sungem.c index dd1b4a13446..b405eb89fa5 100644 --- a/hw/net/sungem.c +++ b/hw/net/sungem.c @@ -17,7 +17,7 @@ #include "net/eth.h" #include "net/checksum.h" #include "hw/net/mii.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "trace.h" #include "qom/object.h" @@ -1420,14 +1420,13 @@ static void sungem_instance_init(Object *obj) DEVICE(obj)); } -static Property sungem_properties[] = { +static const Property sungem_properties[] = { DEFINE_NIC_PROPERTIES(SunGEMState, conf), /* Phy address should be 0 for most Apple machines except * for K2 in which case it's 1. Will be set by a machine * override. */ DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_sungem = { @@ -1455,7 +1454,7 @@ static const VMStateDescription vmstate_sungem = { } }; -static void sungem_class_init(ObjectClass *klass, void *data) +static void sungem_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1467,7 +1466,7 @@ static void sungem_class_init(ObjectClass *klass, void *data) k->revision = 0x01; k->class_id = PCI_CLASS_NETWORK_ETHERNET; dc->vmsd = &vmstate_sungem; - dc->reset = sungem_reset; + device_class_set_legacy_reset(dc, sungem_reset); device_class_set_props(dc, sungem_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } @@ -1478,7 +1477,7 @@ static const TypeInfo sungem_info = { .instance_size = sizeof(SunGEMState), .class_init = sungem_class_init, .instance_init = sungem_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } } diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index ae8452e5f9f..c2f7a8483da 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -31,7 +31,7 @@ #include "qemu/module.h" #include "net/checksum.h" #include "net/eth.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "trace.h" #include "qom/object.h" @@ -177,9 +177,8 @@ struct SunHMEState { uint16_t miiregs[HME_MII_REGS_SIZE]; }; -static Property sunhme_properties[] = { +static const Property sunhme_properties[] = { DEFINE_NIC_PROPERTIES(SunHMEState, conf), - DEFINE_PROP_END_OF_LIST(), }; static void sunhme_reset_tx(SunHMEState *s) @@ -938,7 +937,7 @@ static const VMStateDescription vmstate_hme = { } }; -static void sunhme_class_init(ObjectClass *klass, void *data) +static void sunhme_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -948,7 +947,7 @@ static void sunhme_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_SUN_HME; k->class_id = PCI_CLASS_NETWORK_ETHERNET; dc->vmsd = &vmstate_hme; - dc->reset = sunhme_reset; + device_class_set_legacy_reset(dc, sunhme_reset); device_class_set_props(dc, sunhme_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } @@ -959,7 +958,7 @@ static const TypeInfo sunhme_info = { .class_init = sunhme_class_init, .instance_size = sizeof(SunHMEState), .instance_init = sunhme_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } } diff --git a/hw/net/trace-events b/hw/net/trace-events index 78efa2ec2cc..72b69c4a8bb 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -10,10 +10,11 @@ allwinner_sun8i_emac_set_link(bool active) "Set link: active=%u" allwinner_sun8i_emac_read(uint64_t offset, uint64_t val) "MMIO read: offset=0x%" PRIx64 " value=0x%" PRIx64 allwinner_sun8i_emac_write(uint64_t offset, uint64_t val) "MMIO write: offset=0x%" PRIx64 " value=0x%" PRIx64 -# etraxfs_eth.c -mdio_phy_read(int regnum, uint16_t value) "read phy_reg:%d value:0x%04x" -mdio_phy_write(int regnum, uint16_t value) "write phy_reg:%d value:0x%04x" -mdio_bitbang(bool mdc, bool mdio, int state, uint16_t cnt, unsigned int drive) "bitbang mdc=%u mdio=%u state=%d cnt=%u drv=%d" +# lan9118_phy.c +lan9118_phy_read(uint16_t val, int reg) "[0x%02x] -> 0x%04" PRIx16 +lan9118_phy_write(uint16_t val, int reg) "[0x%02x] <- 0x%04" PRIx16 +lan9118_phy_update_link(const char *s) "%s" +lan9118_phy_reset(void) "" # lance.c lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x" @@ -399,9 +400,11 @@ virtio_net_announce_notify(void) "" virtio_net_announce_timer(int round) "%d" virtio_net_handle_announce(int round) "%d" virtio_net_post_load_device(void) -virtio_net_rss_disable(void) -virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x" -virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x, table of %d, key of %d" +virtio_net_rss_load(void *nic, size_t nfds, void *fds) "nic=%p nfds=%zu fds=%p" +virtio_net_rss_attach_ebpf(void *nic, int prog_fd) "nic=%p prog-fd=%d" +virtio_net_rss_disable(void *nic) "nic=%p" +virtio_net_rss_error(void *nic, const char *msg, uint32_t value) "nic=%p msg=%s, value 0x%08x" +virtio_net_rss_enable(void *nic, uint32_t p1, uint16_t p2, uint8_t p3) "nic=%p hashes 0x%x, table of %d, key of %d" # tulip.c tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64 @@ -431,12 +434,8 @@ i82596_set_multicast(uint16_t count) "Added %d multicast entries" i82596_channel_attention(void *s) "%p: Received CHANNEL ATTENTION" # imx_fec.c -imx_phy_read(uint32_t val, int phy, int reg) "0x%04"PRIx32" <= phy[%d].reg[%d]" imx_phy_read_num(int phy, int configured) "read request from unconfigured phy %d (configured %d)" -imx_phy_write(uint32_t val, int phy, int reg) "0x%04"PRIx32" => phy[%d].reg[%d]" imx_phy_write_num(int phy, int configured) "write request to unconfigured phy %d (configured %d)" -imx_phy_update_link(const char *s) "%s" -imx_phy_reset(void) "" imx_fec_read_bd(uint64_t addr, int flags, int len, int data) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x" imx_enet_read_bd(uint64_t addr, int flags, int len, int data, int options, int status) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x option 0x%04x status 0x%04x" imx_eth_tx_bd_busy(void) "tx_bd ran out of descriptors to transmit" @@ -481,10 +480,11 @@ npcm_gmac_packet_received(const char* name, uint32_t len) "%s: Reception finishe npcm_gmac_packet_sent(const char* name, uint16_t len) "%s: TX packet sent!, length: 0x%04" PRIX16 npcm_gmac_debug_desc_data(const char* name, void* addr, uint32_t des0, uint32_t des1, uint32_t des2, uint32_t des3)"%s: Address: %p Descriptor 0: 0x%04" PRIX32 " Descriptor 1: 0x%04" PRIX32 "Descriptor 2: 0x%04" PRIX32 " Descriptor 3: 0x%04" PRIX32 npcm_gmac_packet_tx_desc_data(const char* name, uint32_t tdes0, uint32_t tdes1) "%s: Tdes0: 0x%04" PRIX32 " Tdes1: 0x%04" PRIX32 +npcm_gmac_tx_desc_owner(const char* name, uint32_t desc_addr) "%s: TX Descriptor @0x%04" PRIX32 " is owned by software" # npcm_pcs.c -npcm_pcs_reg_read(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 -npcm_pcs_reg_write(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 +npcm_pcs_reg_read(const char *name, uint16_t indirect_access_base, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 +npcm_pcs_reg_write(const char *name, uint16_t indirect_access_base, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 # dp8398x.c dp8393x_raise_irq(int isr) "raise irq, isr is 0x%04x" @@ -513,3 +513,7 @@ xen_netdev_connect(int dev, unsigned int tx, unsigned int rx, int port) "vif%u t xen_netdev_frontend_changed(const char *dev, int state) "vif%s state %d" xen_netdev_tx(int dev, int ref, int off, int len, unsigned int flags, const char *c, const char *d, const char *m, const char *e) "vif%u ref %u off %u len %u flags 0x%x%s%s%s%s" xen_netdev_rx(int dev, int idx, int status, int flags) "vif%u idx %d status %d flags 0x%x" + +# xilinx_ethlite.c +ethlite_pkt_lost(uint32_t rx_ctrl) "rx_ctrl:0x%" PRIx32 +ethlite_pkt_size_too_big(uint64_t size) "size:0x%" PRIx64 diff --git a/hw/net/tulip.c b/hw/net/tulip.c index 1f2ef209775..319af906c8d 100644 --- a/hw/net/tulip.c +++ b/hw/net/tulip.c @@ -13,7 +13,7 @@ #include "hw/qdev-properties.h" #include "hw/nvram/eeprom93xx.h" #include "migration/vmstate.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "tulip.h" #include "trace.h" #include "net/eth.h" @@ -629,7 +629,7 @@ static void tulip_setup_filter_addr(TULIPState *s, uint8_t *buf, int n) static void tulip_setup_frame(TULIPState *s, struct tulip_descriptor *desc) { - uint8_t buf[4096]; + QEMU_UNINITIALIZED uint8_t buf[4096]; int len = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK; int i; @@ -1007,12 +1007,11 @@ static void tulip_instance_init(Object *obj) &pci_dev->qdev); } -static Property tulip_properties[] = { +static const Property tulip_properties[] = { DEFINE_NIC_PROPERTIES(TULIPState, c), - DEFINE_PROP_END_OF_LIST(), }; -static void tulip_class_init(ObjectClass *klass, void *data) +static void tulip_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1026,7 +1025,7 @@ static void tulip_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_NETWORK_ETHERNET; dc->vmsd = &vmstate_pci_tulip; device_class_set_props(dc, tulip_properties); - dc->reset = tulip_qdev_reset; + device_class_set_legacy_reset(dc, tulip_qdev_reset); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); } @@ -1036,7 +1035,7 @@ static const TypeInfo tulip_info = { .instance_size = sizeof(TULIPState), .class_init = tulip_class_init, .instance_init = tulip_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index 72df6d757e4..7d49f82906a 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "net/net.h" #include "net/tap.h" -#include "net/vhost-user.h" #include "hw/virtio/virtio-net.h" #include "net/vhost_net.h" @@ -101,7 +100,7 @@ VHostNetState *get_vhost_net(NetClientState *nc) return 0; } -int vhost_set_vring_enable(NetClientState *nc, int enable) +int vhost_net_set_vring_enable(NetClientState *nc, int enable) { return 0; } diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index a788e6937e0..540492b37dd 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -16,7 +16,6 @@ #include "qemu/osdep.h" #include "net/net.h" #include "net/tap.h" -#include "net/vhost-user.h" #include "net/vhost-vdpa.h" #include "standard-headers/linux/vhost_types.h" @@ -36,92 +35,9 @@ #include "hw/virtio/virtio-bus.h" #include "linux-headers/linux/vhost.h" - -/* Features supported by host kernel. */ -static const int kernel_feature_bits[] = { - VIRTIO_F_NOTIFY_ON_EMPTY, - VIRTIO_RING_F_INDIRECT_DESC, - VIRTIO_RING_F_EVENT_IDX, - VIRTIO_NET_F_MRG_RXBUF, - VIRTIO_F_VERSION_1, - VIRTIO_NET_F_MTU, - VIRTIO_F_IOMMU_PLATFORM, - VIRTIO_F_RING_PACKED, - VIRTIO_F_RING_RESET, - VIRTIO_F_IN_ORDER, - VIRTIO_F_NOTIFICATION_DATA, - VIRTIO_NET_F_HASH_REPORT, - VHOST_INVALID_FEATURE_BIT -}; - -/* Features supported by others. */ -static const int user_feature_bits[] = { - VIRTIO_F_NOTIFY_ON_EMPTY, - VIRTIO_F_NOTIFICATION_DATA, - VIRTIO_RING_F_INDIRECT_DESC, - VIRTIO_RING_F_EVENT_IDX, - - VIRTIO_F_ANY_LAYOUT, - VIRTIO_F_VERSION_1, - VIRTIO_NET_F_CSUM, - VIRTIO_NET_F_GUEST_CSUM, - VIRTIO_NET_F_GSO, - VIRTIO_NET_F_GUEST_TSO4, - VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, - VIRTIO_NET_F_GUEST_UFO, - VIRTIO_NET_F_HOST_TSO4, - VIRTIO_NET_F_HOST_TSO6, - VIRTIO_NET_F_HOST_ECN, - VIRTIO_NET_F_HOST_UFO, - VIRTIO_NET_F_MRG_RXBUF, - VIRTIO_NET_F_MTU, - VIRTIO_F_IOMMU_PLATFORM, - VIRTIO_F_RING_PACKED, - VIRTIO_F_RING_RESET, - VIRTIO_F_IN_ORDER, - VIRTIO_NET_F_RSS, - VIRTIO_NET_F_HASH_REPORT, - VIRTIO_NET_F_GUEST_USO4, - VIRTIO_NET_F_GUEST_USO6, - VIRTIO_NET_F_HOST_USO, - - /* This bit implies RARP isn't sent by QEMU out of band */ - VIRTIO_NET_F_GUEST_ANNOUNCE, - - VIRTIO_NET_F_MQ, - - VHOST_INVALID_FEATURE_BIT -}; - -static const int *vhost_net_get_feature_bits(struct vhost_net *net) -{ - const int *feature_bits = 0; - - switch (net->nc->info->type) { - case NET_CLIENT_DRIVER_TAP: - feature_bits = kernel_feature_bits; - break; - case NET_CLIENT_DRIVER_VHOST_USER: - feature_bits = user_feature_bits; - break; -#ifdef CONFIG_VHOST_NET_VDPA - case NET_CLIENT_DRIVER_VHOST_VDPA: - feature_bits = vdpa_feature_bits; - break; -#endif - default: - error_report("Feature bits not defined for this type: %d", - net->nc->info->type); - break; - } - - return feature_bits; -} - uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) { - return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net), + return vhost_get_features(&net->dev, net->feature_bits, features); } int vhost_net_get_config(struct vhost_net *net, uint8_t *config, @@ -138,7 +54,7 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, void vhost_net_ack_features(struct vhost_net *net, uint64_t features) { net->dev.acked_features = net->dev.backend_features; - vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features); + vhost_ack_features(&net->dev, net->feature_bits, features); } uint64_t vhost_net_get_max_queues(VHostNetState *net) @@ -153,11 +69,153 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net) void vhost_net_save_acked_features(NetClientState *nc) { -#ifdef CONFIG_VHOST_NET_USER - if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { - vhost_user_save_acked_features(nc); + struct vhost_net *net = get_vhost_net(nc); + + if (net && net->save_acked_features) { + net->save_acked_features(nc); + } +} + +static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev, + NetClientState *ncs, int data_queue_pairs, int nvhosts) +{ + VirtIONet *n = VIRTIO_NET(dev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); + struct vhost_net *net; + struct vhost_dev *hdev; + int r, i, j; + NetClientState *peer; + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + + net = get_vhost_net(peer); + hdev = &net->dev; + for (j = 0; j < hdev->nvqs; j++) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), + hdev->vq_index + j, + false); + if (r < 0) { + error_report("vhost %d VQ %d notifier cleanup failed: %d", + i, j, -r); + } + assert(r >= 0); + } + } + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + + net = get_vhost_net(peer); + hdev = &net->dev; + for (j = 0; j < hdev->nvqs; j++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), + hdev->vq_index + j); + } + virtio_device_release_ioeventfd(dev); } -#endif +} + +static int vhost_net_enable_notifiers(VirtIODevice *dev, + NetClientState *ncs, int data_queue_pairs, int cvq) +{ + VirtIONet *n = VIRTIO_NET(dev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); + int nvhosts = data_queue_pairs + cvq; + struct vhost_net *net; + struct vhost_dev *hdev; + int r, i, j, k; + NetClientState *peer; + + /* + * We will pass the notifiers to the kernel, make sure that QEMU + * doesn't interfere. + */ + for (i = 0; i < nvhosts; i++) { + r = virtio_device_grab_ioeventfd(dev); + if (r < 0) { + error_report("vhost %d binding does not support host notifiers", i); + for (k = 0; k < i; k++) { + virtio_device_release_ioeventfd(dev); + } + return r; + } + } + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + + net = get_vhost_net(peer); + hdev = &net->dev; + + for (j = 0; j < hdev->nvqs; j++) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), + hdev->vq_index + j, + true); + if (r < 0) { + error_report("vhost %d VQ %d notifier binding failed: %d", + i, j, -r); + memory_region_transaction_commit(); + vhost_dev_disable_notifiers_nvqs(hdev, dev, j); + goto fail_nvhosts; + } + } + } + + memory_region_transaction_commit(); + + return 0; +fail_nvhosts: + vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i); + /* + * This for loop starts from i+1, not i, because the i-th ioeventfd + * has already been released in vhost_dev_disable_notifiers_nvqs(). + */ + for (k = i + 1; k < nvhosts; k++) { + virtio_device_release_ioeventfd(dev); + } + + return r; +} + +/* + * Stop processing guest IO notifications in qemu. + * Start processing them in vhost in kernel. + */ +static void vhost_net_disable_notifiers(VirtIODevice *dev, + NetClientState *ncs, int data_queue_pairs, int cvq) +{ + vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, + data_queue_pairs + cvq); } static int vhost_net_get_fd(NetClientState *backend) @@ -185,6 +243,10 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) } net->nc = options->net_backend; net->dev.nvqs = options->nvqs; + net->feature_bits = options->feature_bits; + net->save_acked_features = options->save_acked_features; + net->max_tx_queue_size = options->max_tx_queue_size; + net->is_vhost_user = options->is_vhost_user; net->dev.max_queues = 1; net->dev.vqs = net->vqs; @@ -228,9 +290,8 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) } /* Set sane init value. Override when guest acks. */ -#ifdef CONFIG_VHOST_NET_USER - if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { - features = vhost_user_get_acked_features(net->nc); + if (options->get_acked_features) { + features = options->get_acked_features(net->nc); if (~net->dev.features & features) { fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 " for backend\n", @@ -238,7 +299,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) goto fail; } } -#endif vhost_net_ack_features(net, features); @@ -270,11 +330,6 @@ static int vhost_net_start_one(struct vhost_net *net, } } - r = vhost_dev_enable_notifiers(&net->dev, dev); - if (r < 0) { - goto fail_notifiers; - } - r = vhost_dev_start(&net->dev, dev, false); if (r < 0) { goto fail_start; @@ -326,8 +381,6 @@ static int vhost_net_start_one(struct vhost_net *net, } vhost_dev_stop(&net->dev, dev, false); fail_start: - vhost_dev_disable_notifiers(&net->dev, dev); -fail_notifiers: return r; } @@ -349,7 +402,6 @@ static void vhost_net_stop_one(struct vhost_net *net, if (net->nc->info->stop) { net->nc->info->stop(net->nc); } - vhost_dev_disable_notifiers(&net->dev, dev); } int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, @@ -389,15 +441,21 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, * because vhost user doesn't interrupt masking/unmasking * properly. */ - if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + if (net->is_vhost_user) { dev->use_guest_notifier_mask = false; } } + r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq); + if (r < 0) { + error_report("Error enabling host notifiers: %d", -r); + goto err; + } + r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); - goto err; + goto err_host_notifiers; } for (i = 0; i < nvhosts; i++) { @@ -409,22 +467,22 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, if (peer->vring_enable) { /* restore vring enable state */ - r = vhost_set_vring_enable(peer, peer->vring_enable); + r = vhost_net_set_vring_enable(peer, peer->vring_enable); if (r < 0) { - goto err_start; + goto err_guest_notifiers; } } r = vhost_net_start_one(get_vhost_net(peer), dev); if (r < 0) { - goto err_start; + goto err_guest_notifiers; } } return 0; -err_start: +err_guest_notifiers: while (--i >= 0) { peer = qemu_get_peer(ncs, i < data_queue_pairs ? i : n->max_queue_pairs); @@ -435,6 +493,8 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); fflush(stderr); } +err_host_notifiers: + vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq); err: return r; } @@ -466,6 +526,8 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, fflush(stderr); } assert(r >= 0); + + vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq); } void vhost_net_cleanup(struct vhost_net *net) @@ -503,44 +565,21 @@ void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask) { vhost_config_mask(&net->dev, dev, mask); } + VHostNetState *get_vhost_net(NetClientState *nc) { - VHostNetState *vhost_net = 0; - if (!nc) { return 0; } - switch (nc->info->type) { - case NET_CLIENT_DRIVER_TAP: - vhost_net = tap_get_vhost_net(nc); - /* - * tap_get_vhost_net() can return NULL if a tap net-device backend is - * created with 'vhost=off' option, 'vhostforce=off' or no vhost or - * vhostforce or vhostfd options at all. Please see net_init_tap_one(). - * Hence, we omit the assertion here. - */ - break; -#ifdef CONFIG_VHOST_NET_USER - case NET_CLIENT_DRIVER_VHOST_USER: - vhost_net = vhost_user_get_vhost_net(nc); - assert(vhost_net); - break; -#endif -#ifdef CONFIG_VHOST_NET_VDPA - case NET_CLIENT_DRIVER_VHOST_VDPA: - vhost_net = vhost_vdpa_get_vhost_net(nc); - assert(vhost_net); - break; -#endif - default: - break; + if (nc->info->get_vhost_net) { + return nc->info->get_vhost_net(nc); } - return vhost_net; + return NULL; } -int vhost_set_vring_enable(NetClientState *nc, int enable) +int vhost_net_set_vring_enable(NetClientState *nc, int enable) { VHostNetState *net = get_vhost_net(nc); const VhostOps *vhost_ops = net->dev.vhost_ops; diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 08aa0b65e39..6b5b5dace33 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -26,7 +26,7 @@ #include "qemu/option.h" #include "qemu/option_int.h" #include "qemu/config-file.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/virtio/virtio-net.h" #include "net/vhost_net.h" #include "net/announce.h" @@ -39,14 +39,15 @@ #include "hw/virtio/virtio-access.h" #include "migration/misc.h" #include "standard-headers/linux/ethtool.h" -#include "sysemu/sysemu.h" +#include "system/system.h" +#include "system/replay.h" #include "trace.h" #include "monitor/qdev.h" #include "monitor/monitor.h" #include "hw/pci/pci_device.h" #include "net_rx_pkt.h" #include "hw/virtio/vhost.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #define VIRTIO_NET_VM_VERSION 11 @@ -157,7 +158,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ? VIRTIO_NET_RSS_MAX_TABLE_LEN : 1); virtio_stl_p(vdev, &netcfg.supported_hash_types, - VIRTIO_NET_RSS_SUPPORTED_HASHES); + n->rss_data.supported_hash_types); memcpy(config, &netcfg, n->config_size); /* @@ -381,7 +382,7 @@ static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) } } -static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +static int virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) { VirtIONet *n = VIRTIO_NET(vdev); VirtIONetQueue *q; @@ -417,7 +418,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) timer_mod(q->tx_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); } else { - qemu_bh_schedule(q->tx_bh); + replay_bh_schedule_event(q->tx_bh); } } else { if (q->tx_timer) { @@ -436,6 +437,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) } } } + return 0; } static void virtio_net_set_link_status(NetClientState *nc) @@ -668,34 +670,36 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, static int virtio_net_max_tx_queue_size(VirtIONet *n) { NetClientState *peer = n->nic_conf.peers.ncs[0]; + struct vhost_net *net; - /* - * Backends other than vhost-user or vhost-vdpa don't support max queue - * size. - */ if (!peer) { - return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; + goto default_value; } - switch(peer->info->type) { - case NET_CLIENT_DRIVER_VHOST_USER: - case NET_CLIENT_DRIVER_VHOST_VDPA: - return VIRTQUEUE_MAX_SIZE; - default: - return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; - }; + net = get_vhost_net(peer); + + if (!net || !net->max_tx_queue_size) { + goto default_value; + } + + return net->max_tx_queue_size; + +default_value: + return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; } static int peer_attach(VirtIONet *n, int index) { NetClientState *nc = qemu_get_subqueue(n->nic, index); + struct vhost_net *net; if (!nc->peer) { return 0; } - if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { - vhost_set_vring_enable(nc->peer, 1); + net = get_vhost_net(nc->peer); + if (net && net->is_vhost_user) { + vhost_net_set_vring_enable(nc->peer, 1); } if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { @@ -712,13 +716,15 @@ static int peer_attach(VirtIONet *n, int index) static int peer_detach(VirtIONet *n, int index) { NetClientState *nc = qemu_get_subqueue(n->nic, index); + struct vhost_net *net; if (!nc->peer) { return 0; } - if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { - vhost_set_vring_enable(nc->peer, 0); + net = get_vhost_net(nc->peer); + if (net && net->is_vhost_user) { + vhost_net_set_vring_enable(nc->peer, 0); } if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { @@ -750,79 +756,6 @@ static void virtio_net_set_queue_pairs(VirtIONet *n) static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); -static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, - Error **errp) -{ - VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_queue(n->nic); - - /* Firstly sync all virtio-net possible supported features */ - features |= n->host_features; - - virtio_add_feature(&features, VIRTIO_NET_F_MAC); - - if (!peer_has_vnet_hdr(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); - - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); - - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); - - virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); - } - - if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); - } - - if (!peer_has_uso(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); - } - - if (!get_vhost_net(nc->peer)) { - return features; - } - - if (!ebpf_rss_is_loaded(&n->ebpf_rss)) { - virtio_clear_feature(&features, VIRTIO_NET_F_RSS); - } - features = vhost_net_get_features(get_vhost_net(nc->peer), features); - vdev->backend_features = features; - - if (n->mtu_bypass_backend && - (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { - features |= (1ULL << VIRTIO_NET_F_MTU); - } - - /* - * Since GUEST_ANNOUNCE is emulated the feature bit could be set without - * enabled. This happens in the vDPA case. - * - * Make sure the feature set is not incoherent, as the driver could refuse - * to start. - * - * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes, - * helping guest to notify the new location with vDPA devices that does not - * support it. - */ - if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); - } - - return features; -} - static uint64_t virtio_net_bad_features(VirtIODevice *vdev) { uint64_t features = 0; @@ -996,8 +929,9 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) vhost_net_save_acked_features(nc->peer); } - if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { - memset(n->vlans, 0xff, MAX_VLAN >> 3); + if (virtio_has_feature(vdev->guest_features ^ features, VIRTIO_NET_F_CTRL_VLAN)) { + bool vlan = virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN); + memset(n->vlans, vlan ? 0 : 0xff, MAX_VLAN >> 3); } if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { @@ -1240,6 +1174,7 @@ static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd) return false; } + trace_virtio_net_rss_attach_ebpf(nic, prog_fd); return nc->info->set_steering_ebpf(nc, prog_fd); } @@ -1248,12 +1183,12 @@ static void rss_data_to_rss_config(struct VirtioNetRssData *data, { config->redirect = data->redirect; config->populate_hash = data->populate_hash; - config->hash_types = data->hash_types; + config->hash_types = data->runtime_hash_types; config->indirections_len = data->indirections_len; config->default_queue = data->default_queue; } -static bool virtio_net_attach_epbf_rss(VirtIONet *n) +static bool virtio_net_attach_ebpf_rss(VirtIONet *n) { struct EBPFRSSConfig config = {}; @@ -1264,7 +1199,8 @@ static bool virtio_net_attach_epbf_rss(VirtIONet *n) rss_data_to_rss_config(&n->rss_data, &config); if (!ebpf_rss_set_all(&n->ebpf_rss, &config, - n->rss_data.indirections_table, n->rss_data.key)) { + n->rss_data.indirections_table, n->rss_data.key, + NULL)) { return false; } @@ -1275,18 +1211,22 @@ static bool virtio_net_attach_epbf_rss(VirtIONet *n) return true; } -static void virtio_net_detach_epbf_rss(VirtIONet *n) +static void virtio_net_detach_ebpf_rss(VirtIONet *n) { virtio_net_attach_ebpf_to_backend(n->nic, -1); } static void virtio_net_commit_rss_config(VirtIONet *n) { + if (n->rss_data.peer_hash_available) { + return; + } + if (n->rss_data.enabled) { n->rss_data.enabled_software_rss = n->rss_data.populate_hash; if (n->rss_data.populate_hash) { - virtio_net_detach_epbf_rss(n); - } else if (!virtio_net_attach_epbf_rss(n)) { + virtio_net_detach_ebpf_rss(n); + } else if (!virtio_net_attach_ebpf_rss(n)) { if (get_vhost_net(qemu_get_queue(n->nic)->peer)) { warn_report("Can't load eBPF RSS for vhost"); } else { @@ -1295,12 +1235,13 @@ static void virtio_net_commit_rss_config(VirtIONet *n) } } - trace_virtio_net_rss_enable(n->rss_data.hash_types, + trace_virtio_net_rss_enable(n, + n->rss_data.runtime_hash_types, n->rss_data.indirections_len, sizeof(n->rss_data.key)); } else { - virtio_net_detach_epbf_rss(n); - trace_virtio_net_rss_disable(); + virtio_net_detach_ebpf_rss(n); + trace_virtio_net_rss_disable(n); } } @@ -1314,28 +1255,27 @@ static void virtio_net_disable_rss(VirtIONet *n) virtio_net_commit_rss_config(n); } -static bool virtio_net_load_ebpf_fds(VirtIONet *n) +static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp) { int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1}; int ret = true; int i = 0; if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) { - warn_report("Expected %d file descriptors but got %d", - EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds); - return false; - } + error_setg(errp, "Expected %d file descriptors but got %d", + EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds); + return false; + } for (i = 0; i < n->nr_ebpf_rss_fds; i++) { - fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], - &error_warn); + fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i], errp); if (fds[i] < 0) { ret = false; goto exit; } } - ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3]); + ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3], errp); exit: if (!ret) { @@ -1347,17 +1287,27 @@ static bool virtio_net_load_ebpf_fds(VirtIONet *n) return ret; } -static bool virtio_net_load_ebpf(VirtIONet *n) +static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) { - bool ret = false; + if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { + return true; + } - if (virtio_net_attach_ebpf_to_backend(n->nic, -1)) { - if (!(n->ebpf_rss_fds && virtio_net_load_ebpf_fds(n))) { - ret = ebpf_rss_load(&n->ebpf_rss); - } + trace_virtio_net_rss_load(n, n->nr_ebpf_rss_fds, n->ebpf_rss_fds); + + /* + * If user explicitly gave QEMU RSS FDs to use, then + * failing to use them must be considered a fatal + * error. If no RSS FDs were provided, QEMU is trying + * eBPF on a "best effort" basis only, so report a + * warning and allow fallback to software RSS. + */ + if (n->ebpf_rss_fds) { + return virtio_net_load_ebpf_fds(n, errp); } - return ret; + ebpf_rss_load(&n->ebpf_rss, &error_warn); + return true; } static void virtio_net_unload_ebpf(VirtIONet *n) @@ -1397,20 +1347,20 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, err_value = (uint32_t)s; goto error; } - n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types); + n->rss_data.runtime_hash_types = virtio_ldl_p(vdev, &cfg.hash_types); n->rss_data.indirections_len = virtio_lduw_p(vdev, &cfg.indirection_table_mask); - n->rss_data.indirections_len++; if (!do_rss) { - n->rss_data.indirections_len = 1; + n->rss_data.indirections_len = 0; } - if (!is_power_of_2(n->rss_data.indirections_len)) { - err_msg = "Invalid size of indirection table"; + if (n->rss_data.indirections_len >= VIRTIO_NET_RSS_MAX_TABLE_LEN) { + err_msg = "Too large indirection table"; err_value = n->rss_data.indirections_len; goto error; } - if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { - err_msg = "Too large indirection table"; + n->rss_data.indirections_len++; + if (!is_power_of_2(n->rss_data.indirections_len)) { + err_msg = "Invalid size of indirection table"; err_value = n->rss_data.indirections_len; goto error; } @@ -1460,12 +1410,12 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, err_value = temp.b; goto error; } - if (!temp.b && n->rss_data.hash_types) { + if (!temp.b && n->rss_data.runtime_hash_types) { err_msg = "No key provided"; err_value = 0; goto error; } - if (!temp.b && !n->rss_data.hash_types) { + if (!temp.b && !n->rss_data.runtime_hash_types) { virtio_net_disable_rss(n); return queue_pairs; } @@ -1481,7 +1431,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, virtio_net_commit_rss_config(n); return queue_pairs; error: - trace_virtio_net_rss_error(err_msg, err_value); + trace_virtio_net_rss_error(n, err_msg, err_value); virtio_net_disable_rss(n); return 0; } @@ -1691,8 +1641,11 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, uint8_t *buf, size_t size) { + size_t csum_size = ETH_HLEN + sizeof(struct ip_header) + + sizeof(struct udp_header); + if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ - (size > 27 && size < 1500) && /* normal sized MTU */ + (size >= csum_size && size < 1500) && /* normal sized MTU */ (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ (buf[23] == 17) && /* ip.protocol == UDP */ (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ @@ -1864,7 +1817,7 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len); net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto, - n->rss_data.hash_types); + n->rss_data.runtime_hash_types); if (net_hash_type > NetPktRssIpV6UdpEx) { if (n->rss_data.populate_hash) { hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE; @@ -1889,32 +1842,34 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, } static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, - size_t size, bool no_rss) + size_t size) { VirtIONet *n = qemu_get_nic_opaque(nc); - VirtIONetQueue *q = virtio_net_get_subqueue(nc); + VirtIONetQueue *q; VirtIODevice *vdev = VIRTIO_DEVICE(n); - VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE]; - size_t lens[VIRTQUEUE_MAX_SIZE]; - struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; + QEMU_UNINITIALIZED VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE]; + QEMU_UNINITIALIZED size_t lens[VIRTQUEUE_MAX_SIZE]; + QEMU_UNINITIALIZED struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; struct virtio_net_hdr_v1_hash extra_hdr; unsigned mhdr_cnt = 0; size_t offset, i, guest_offset, j; ssize_t err; - if (!virtio_net_can_receive(nc)) { - return -1; - } + memset(&extra_hdr, 0, sizeof(extra_hdr)); - if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) { + if (n->rss_data.enabled && n->rss_data.enabled_software_rss) { int index = virtio_net_process_rss(nc, buf, size, &extra_hdr); if (index >= 0) { - NetClientState *nc2 = - qemu_get_subqueue(n->nic, index % n->curr_queue_pairs); - return virtio_net_receive_rcu(nc2, buf, size, true); + nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs); } } + if (!virtio_net_can_receive(nc)) { + return -1; + } + + q = virtio_net_get_subqueue(nc); + /* hdr_len refers to the header we supply to the guest */ if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { return 0; @@ -1970,6 +1925,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, sg, elem->in_num, offsetof(typeof(extra_hdr), hdr.num_buffers), sizeof(extra_hdr.hdr.num_buffers)); + } else { + extra_hdr.hdr.num_buffers = cpu_to_le16(1); } receive_header(n, sg, elem->in_num, buf, size); @@ -2040,7 +1997,22 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, { RCU_READ_LOCK_GUARD(); - return virtio_net_receive_rcu(nc, buf, size, false); + return virtio_net_receive_rcu(nc, buf, size); +} + +/* + * Accessors to read and write the IP packet data length field. This + * is a potentially unaligned network-byte-order 16 bit unsigned integer + * pointed to by unit->ip_len. + */ +static uint16_t read_unit_ip_len(VirtioNetRscUnit *unit) +{ + return lduw_be_p(unit->ip_plen); +} + +static void write_unit_ip_len(VirtioNetRscUnit *unit, uint16_t l) +{ + stw_be_p(unit->ip_plen, l); } static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain, @@ -2057,7 +2029,7 @@ static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain, unit->ip_plen = &ip->ip_len; unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen); unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; - unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen; + unit->payload = read_unit_ip_len(unit) - ip_hdrlen - unit->tcp_hdrlen; } static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, @@ -2076,7 +2048,7 @@ static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, /* There is a difference between payload length in ipv4 and v6, ip header is excluded in ipv6 */ - unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen; + unit->payload = read_unit_ip_len(unit) - unit->tcp_hdrlen; } static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain, @@ -2123,7 +2095,7 @@ static void virtio_net_rsc_purge(void *opq) chain->stat.timer++; if (!QTAILQ_EMPTY(&chain->buffers)) { timer_mod(chain->drain_timer, - qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout); } } @@ -2225,7 +2197,7 @@ static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain, VirtioNetRscUnit *o_unit; o_unit = &seg->unit; - o_ip_len = htons(*o_unit->ip_plen); + o_ip_len = read_unit_ip_len(o_unit); nseq = htonl(n_unit->tcp->th_seq); oseq = htonl(o_unit->tcp->th_seq); @@ -2261,7 +2233,7 @@ static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain, o_unit->payload += n_unit->payload; /* update new data len */ /* update field in ip header */ - *o_unit->ip_plen = htons(o_ip_len + n_unit->payload); + write_unit_ip_len(o_unit, o_ip_len + n_unit->payload); /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced for windows guest, while this may change the behavior for linux @@ -2359,7 +2331,7 @@ static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain, chain->stat.empty_cache++; virtio_net_rsc_cache_buf(chain, nc, buf, size); timer_mod(chain->drain_timer, - qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + chain->n->rsc_timeout); return size; } @@ -2597,7 +2569,7 @@ static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n, chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD; chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; } - chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST, + chain->drain_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_rsc_purge, chain); memset(&chain->stat, 0, sizeof(chain->stat)); @@ -2672,7 +2644,7 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) */ virtio_queue_set_notification(q->tx_vq, 0); if (q->tx_bh) { - qemu_bh_schedule(q->tx_bh); + replay_bh_schedule_event(q->tx_bh); } else { timer_mod(q->tx_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); @@ -2838,7 +2810,7 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) return; } virtio_queue_set_notification(vq, 0); - qemu_bh_schedule(q->tx_bh); + replay_bh_schedule_event(q->tx_bh); } static void virtio_net_tx_timer(void *opaque) @@ -2921,7 +2893,7 @@ static void virtio_net_tx_bh(void *opaque) /* If we flush a full burst of packets, assume there are * more coming and immediately reschedule */ if (ret >= n->tx_burst) { - qemu_bh_schedule(q->tx_bh); + replay_bh_schedule_event(q->tx_bh); q->tx_waiting = 1; return; } @@ -2935,7 +2907,7 @@ static void virtio_net_tx_bh(void *opaque) return; } else if (ret > 0) { virtio_queue_set_notification(q->tx_vq, 0); - qemu_bh_schedule(q->tx_bh); + replay_bh_schedule_event(q->tx_bh); q->tx_waiting = 1; } } @@ -2986,11 +2958,10 @@ static void virtio_net_del_queue(VirtIONet *n, int index) virtio_del_queue(vdev, index * 2 + 1); } -static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs) +static void virtio_net_change_num_queues(VirtIONet *n, int new_num_queues) { VirtIODevice *vdev = VIRTIO_DEVICE(n); int old_num_queues = virtio_get_num_queues(vdev); - int new_num_queues = new_max_queue_pairs * 2 + 1; int i; assert(old_num_queues >= 3); @@ -3026,11 +2997,115 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) int max = multiqueue ? n->max_queue_pairs : 1; n->multiqueue = multiqueue; - virtio_net_change_num_queue_pairs(n, max); + virtio_net_change_num_queues(n, max * 2 + 1); virtio_net_set_queue_pairs(n); } +static int virtio_net_pre_load_queues(VirtIODevice *vdev, uint32_t n) +{ + virtio_net_change_num_queues(VIRTIO_NET(vdev), n); + + return 0; +} + +static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_queue(n->nic); + uint32_t supported_hash_types = n->rss_data.supported_hash_types; + uint32_t peer_hash_types = n->rss_data.peer_hash_types; + bool use_own_hash = + (supported_hash_types & VIRTIO_NET_RSS_SUPPORTED_HASHES) == + supported_hash_types; + bool use_peer_hash = + n->rss_data.peer_hash_available && + (supported_hash_types & peer_hash_types) == supported_hash_types; + + /* Firstly sync all virtio-net possible supported features */ + features |= n->host_features; + + virtio_add_feature(&features, VIRTIO_NET_F_MAC); + + if (!peer_has_vnet_hdr(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); + + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); + + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + + virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + } + + if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); + } + + if (!peer_has_uso(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + } + + if (!get_vhost_net(nc->peer)) { + if (!use_own_hash) { + virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + virtio_clear_feature(&features, VIRTIO_NET_F_RSS); + } else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) { + virtio_net_load_ebpf(n, errp); + } + + return features; + } + + if (!use_peer_hash) { + virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + + if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) { + if (!virtio_net_load_ebpf(n, errp)) { + return features; + } + + virtio_clear_feature(&features, VIRTIO_NET_F_RSS); + } + } + + features = vhost_net_get_features(get_vhost_net(nc->peer), features); + vdev->backend_features = features; + + if (n->mtu_bypass_backend && + (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { + features |= (1ULL << VIRTIO_NET_F_MTU); + } + + /* + * Since GUEST_ANNOUNCE is emulated the feature bit could be set without + * enabled. This happens in the vDPA case. + * + * Make sure the feature set is not incoherent, as the driver could refuse + * to start. + * + * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes, + * helping guest to notify the new location with vDPA devices that does not + * support it. + */ + if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); + } + + return features; +} + static int virtio_net_post_load_device(void *opaque, int version_id) { VirtIONet *n = opaque; @@ -3269,6 +3344,17 @@ static const VMStateDescription vmstate_virtio_net_has_vnet = { }, }; +static int virtio_net_rss_post_load(void *opaque, int version_id) +{ + VirtIONet *n = VIRTIO_NET(opaque); + + if (version_id == 1) { + n->rss_data.supported_hash_types = VIRTIO_NET_RSS_SUPPORTED_HASHES; + } + + return 0; +} + static bool virtio_net_rss_needed(void *opaque) { return VIRTIO_NET(opaque)->rss_data.enabled; @@ -3276,14 +3362,16 @@ static bool virtio_net_rss_needed(void *opaque) static const VMStateDescription vmstate_virtio_net_rss = { .name = "virtio-net-device/rss", - .version_id = 1, + .version_id = 2, .minimum_version_id = 1, + .post_load = virtio_net_rss_post_load, .needed = virtio_net_rss_needed, .fields = (const VMStateField[]) { VMSTATE_BOOL(rss_data.enabled, VirtIONet), VMSTATE_BOOL(rss_data.redirect, VirtIONet), VMSTATE_BOOL(rss_data.populate_hash, VirtIONet), - VMSTATE_UINT32(rss_data.hash_types, VirtIONet), + VMSTATE_UINT32(rss_data.runtime_hash_types, VirtIONet), + VMSTATE_UINT32_V(rss_data.supported_hash_types, VirtIONet, 2), VMSTATE_UINT16(rss_data.indirections_len, VirtIONet), VMSTATE_UINT16(rss_data.default_queue, VirtIONet), VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet, @@ -3295,6 +3383,117 @@ static const VMStateDescription vmstate_virtio_net_rss = { }, }; +static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc; + struct vhost_net *net; + + if (!n->nic) { + return NULL; + } + + nc = qemu_get_queue(n->nic); + if (!nc) { + return NULL; + } + + net = get_vhost_net(nc->peer); + if (!net) { + return NULL; + } + + return &net->dev; +} + +static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, + JSONWriter *vmdesc) +{ + VirtIONet *n = pv; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + struct vhost_dev *vhdev; + Error *local_error = NULL; + int ret; + + vhdev = virtio_net_get_vhost(vdev); + if (vhdev == NULL) { + error_reportf_err(local_error, + "Error getting vhost back-end of %s device %s: ", + vdev->name, vdev->parent_obj.canonical_path); + return -1; + } + + ret = vhost_save_backend_state(vhdev, f, &local_error); + if (ret < 0) { + error_reportf_err(local_error, + "Error saving back-end state of %s device %s: ", + vdev->name, vdev->parent_obj.canonical_path); + return ret; + } + + return 0; +} + +static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + VirtIONet *n = pv; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + struct vhost_dev *vhdev; + Error *local_error = NULL; + int ret; + + vhdev = virtio_net_get_vhost(vdev); + if (vhdev == NULL) { + error_reportf_err(local_error, + "Error getting vhost back-end of %s device %s: ", + vdev->name, vdev->parent_obj.canonical_path); + return -1; + } + + ret = vhost_load_backend_state(vhdev, f, &local_error); + if (ret < 0) { + error_reportf_err(local_error, + "Error loading back-end state of %s device %s: ", + vdev->name, vdev->parent_obj.canonical_path); + return ret; + } + + return 0; +} + +static bool vhost_user_net_is_internal_migration(void *opaque) +{ + VirtIONet *n = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + struct vhost_dev *vhdev; + + vhdev = virtio_net_get_vhost(vdev); + if (vhdev == NULL) { + return false; + } + + return vhost_supports_device_state(vhdev); +} + +static const VMStateDescription vhost_user_net_backend_state = { + .name = "virtio-net-device/backend", + .version_id = 0, + .needed = vhost_user_net_is_internal_migration, + .fields = (const VMStateField[]) { + { + .name = "backend", + .info = &(const VMStateInfo) { + .name = "virtio-net vhost-user backend state", + .get = vhost_user_net_load_state, + .put = vhost_user_net_save_state, + }, + }, + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_virtio_net_device = { .name = "virtio-net-device", .version_id = VIRTIO_NET_VM_VERSION, @@ -3347,6 +3546,7 @@ static const VMStateDescription vmstate_virtio_net_device = { }, .subsections = (const VMStateDescription * const []) { &vmstate_virtio_net_rss, + &vhost_user_net_backend_state, NULL } }; @@ -3743,6 +3943,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); n->vlans = g_malloc0(MAX_VLAN >> 3); + memset(n->vlans, 0xff, MAX_VLAN >> 3); nc = qemu_get_queue(n->nic); nc->rxfilter_notify_enabled = 1; @@ -3758,8 +3959,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) net_rx_pkt_init(&n->rx_pkt); - if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { - virtio_net_load_ebpf(n); + if (qemu_get_vnet_hash_supported_types(qemu_get_queue(n->nic)->peer, + &n->rss_data.peer_hash_types)) { + n->rss_data.peer_hash_available = true; + n->rss_data.supported_hash_types = + n->rss_data.specified_hash_types.on_bits | + (n->rss_data.specified_hash_types.auto_bits & + n->rss_data.peer_hash_types); + } else { + n->rss_data.supported_hash_types = + n->rss_data.specified_hash_types.on_bits | + n->rss_data.specified_hash_types.auto_bits; } } @@ -3833,7 +4043,6 @@ static void virtio_net_reset(VirtIODevice *vdev) memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); - memset(n->vlans, 0, MAX_VLAN >> 3); /* Flush any async TX */ for (i = 0; i < n->max_queue_pairs; i++) { @@ -3892,14 +4101,6 @@ static bool dev_unplug_pending(void *opaque) return vdc->primary_unplug_pending(dev); } -static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev) -{ - VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_queue(n->nic); - struct vhost_net *net = get_vhost_net(nc->peer); - return &net->dev; -} - static const VMStateDescription vmstate_virtio_net = { .name = "virtio-net", .minimum_version_id = VIRTIO_NET_VM_VERSION, @@ -3912,7 +4113,7 @@ static const VMStateDescription vmstate_virtio_net = { .dev_unplug_pending = dev_unplug_pending, }; -static Property virtio_net_properties[] = { +static const Property virtio_net_properties[] = { DEFINE_PROP_BIT64("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true), DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features, @@ -3984,10 +4185,45 @@ static Property virtio_net_properties[] = { VIRTIO_NET_F_GUEST_USO6, true), DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features, VIRTIO_NET_F_HOST_USO, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv4", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_IPv4 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp4", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_TCPv4 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp4", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_UDPv4 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_IPv6 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_TCPv6 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_UDPv6 - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6ex", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_IPv6_EX - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6ex", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_TCPv6_EX - 1, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6ex", VirtIONet, + rss_data.specified_hash_types, + VIRTIO_NET_HASH_REPORT_UDPv6_EX - 1, + ON_OFF_AUTO_AUTO), }; -static void virtio_net_class_init(ObjectClass *klass, void *data) +static void virtio_net_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -4009,6 +4245,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); + vdc->pre_load_queues = virtio_net_pre_load_queues; vdc->post_load = virtio_net_post_load_virtio; vdc->vmsd = &vmstate_virtio_net_device; vdc->primary_unplug_pending = primary_unplug_pending; diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 63a91877730..af73aa8ef28 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -21,8 +21,7 @@ #include "hw/qdev-properties.h" #include "net/tap.h" #include "net/checksum.h" -#include "sysemu/sysemu.h" -#include "qemu/bswap.h" +#include "system/system.h" #include "qemu/log.h" #include "qemu/module.h" #include "hw/pci/msix.h" @@ -41,19 +40,9 @@ #define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 #define VMXNET3_MSIX_BAR_SIZE 0x2000 -/* Compatibility flags for migration */ -#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0 -#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \ - (1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT) -#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT 1 -#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE \ - (1 << VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT) - #define VMXNET3_EXP_EP_OFFSET (0x48) -#define VMXNET3_MSI_OFFSET(s) \ - ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x50 : 0x84) -#define VMXNET3_MSIX_OFFSET(s) \ - ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0 : 0x9c) +#define VMXNET3_MSI_OFFSET (0x84) +#define VMXNET3_MSIX_OFFSET (0x9c) #define VMXNET3_DSN_OFFSET (0x100) #define VMXNET3_BAR0_IDX (0) @@ -61,8 +50,7 @@ #define VMXNET3_MSIX_BAR_IDX (2) #define VMXNET3_OFF_MSIX_TABLE (0x000) -#define VMXNET3_OFF_MSIX_PBA(s) \ - ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x800 : 0x1000) +#define VMXNET3_OFF_MSIX_PBA (0x1000) /* Link speed in Mbps should be shifted by 16 */ #define VMXNET3_LINK_SPEED (1000 << 16) @@ -456,7 +444,6 @@ vmxnet3_setup_tx_offloads(VMXNET3State *s) default: g_assert_not_reached(); - return false; } return true; @@ -933,7 +920,6 @@ static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, nocsum: rxcd->cnc = 1; - return; } static void @@ -2124,8 +2110,8 @@ vmxnet3_init_msix(VMXNET3State *s) &s->msix_bar, VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE, &s->msix_bar, - VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA(s), - VMXNET3_MSIX_OFFSET(s), NULL); + VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA, + VMXNET3_MSIX_OFFSET, NULL); if (0 > res) { VMW_WRPRN("Failed to initialize MSI-X, error %d", res); @@ -2223,7 +2209,7 @@ static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) /* Interrupt pin A */ pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; - ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, + ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET, VMXNET3_MAX_NMSIX_INTRS, VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL); /* Any error other than -ENOTSUP(board's MSI support is broken) * is a programming error. Fall back to INTx silently on -ENOTSUP */ @@ -2251,6 +2237,7 @@ static void vmxnet3_instance_init(Object *obj) device_add_bootindex_property(obj, &s->conf.bootindex, "bootindex", "/ethernet-phy@0", DEVICE(obj)); + PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } static void vmxnet3_pci_uninit(PCIDevice *pci_dev) @@ -2472,33 +2459,14 @@ static const VMStateDescription vmstate_vmxnet3 = { } }; -static Property vmxnet3_properties[] = { +static const Property vmxnet3_properties[] = { DEFINE_NIC_PROPERTIES(VMXNET3State, conf), - DEFINE_PROP_BIT("x-old-msi-offsets", VMXNET3State, compat_flags, - VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT, false), - DEFINE_PROP_BIT("x-disable-pcie", VMXNET3State, compat_flags, - VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT, false), - DEFINE_PROP_END_OF_LIST(), }; -static void vmxnet3_realize(DeviceState *qdev, Error **errp) -{ - VMXNET3Class *vc = VMXNET3_DEVICE_GET_CLASS(qdev); - PCIDevice *pci_dev = PCI_DEVICE(qdev); - VMXNET3State *s = VMXNET3(qdev); - - if (!(s->compat_flags & VMXNET3_COMPAT_FLAG_DISABLE_PCIE)) { - pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; - } - - vc->parent_dc_realize(qdev, errp); -} - -static void vmxnet3_class_init(ObjectClass *class, void *data) +static void vmxnet3_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); PCIDeviceClass *c = PCI_DEVICE_CLASS(class); - VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class); c->realize = vmxnet3_pci_realize; c->exit = vmxnet3_pci_uninit; @@ -2509,10 +2477,8 @@ static void vmxnet3_class_init(ObjectClass *class, void *data) c->class_id = PCI_CLASS_NETWORK_ETHERNET; c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; - device_class_set_parent_realize(dc, vmxnet3_realize, - &vc->parent_dc_realize); dc->desc = "VMWare Paravirtualized Ethernet v3"; - dc->reset = vmxnet3_qdev_reset; + device_class_set_legacy_reset(dc, vmxnet3_qdev_reset); dc->vmsd = &vmstate_vmxnet3; device_class_set_props(dc, vmxnet3_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); @@ -2525,7 +2491,7 @@ static const TypeInfo vmxnet3_info = { .instance_size = sizeof(VMXNET3State), .class_init = vmxnet3_class_init, .instance_init = vmxnet3_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } diff --git a/hw/net/vmxnet3.h b/hw/net/vmxnet3.h index f9283f9e7b6..dbc69d5fb66 100644 --- a/hw/net/vmxnet3.h +++ b/hw/net/vmxnet3.h @@ -63,8 +63,8 @@ * details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * along with this program; if not, see + * . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 89487b49baf..34c6a1d0b08 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -24,7 +24,7 @@ #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/qemu-print.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" #include @@ -510,23 +510,22 @@ static char *xen_netdev_get_name(XenDevice *xendev, Error **errp) if (netdev->dev == -1) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); - char fe_path[XENSTORE_ABS_PATH_MAX + 1]; int idx = (xen_mode == XEN_EMULATE) ? 0 : 1; + Error *local_err = NULL; char *value; /* Theoretically we could go up to INT_MAX here but that's overkill */ while (idx < 100) { - snprintf(fe_path, sizeof(fe_path), - "/local/domain/%u/device/vif/%u", - xendev->frontend_id, idx); - value = qemu_xen_xs_read(xenbus->xsh, XBT_NULL, fe_path, NULL); + value = xs_node_read(xenbus->xsh, XBT_NULL, NULL, &local_err, + "/local/domain/%u/device/vif/%u", + xendev->frontend_id, idx); if (!value) { if (errno == ENOENT) { netdev->dev = idx; + error_free(local_err); goto found; } - error_setg(errp, "cannot read %s: %s", fe_path, - strerror(errno)); + error_propagate(errp, local_err); return NULL; } free(value); @@ -555,13 +554,12 @@ static void xen_netdev_unrealize(XenDevice *xendev) /* ------------------------------------------------------------- */ -static Property xen_netdev_properties[] = { +static const Property xen_netdev_properties[] = { DEFINE_NIC_PROPERTIES(XenNetDev, conf), DEFINE_PROP_INT32("idx", XenNetDev, dev, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void xen_netdev_class_init(ObjectClass *class, void *data) +static void xen_netdev_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c index ffe3fc8dbef..d45f8724673 100644 --- a/hw/net/xgmac.c +++ b/hw/net/xgmac.c @@ -207,7 +207,7 @@ static void xgmac_enet_send(XgmacState *s) struct desc bd; int frame_size; int len; - uint8_t frame[8192]; + QEMU_UNINITIALIZED uint8_t frame[8192]; uint8_t *ptr; ptr = frame; @@ -414,12 +414,11 @@ static void xgmac_enet_realize(DeviceState *dev, Error **errp) s->conf.macaddr.a[0]; } -static Property xgmac_properties[] = { +static const Property xgmac_properties[] = { DEFINE_NIC_PROPERTIES(XgmacState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void xgmac_enet_class_init(ObjectClass *klass, void *data) +static void xgmac_enet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c index 05d41bd5480..1f5c7480476 100644 --- a/hw/net/xilinx_axienet.c +++ b/hw/net/xilinx_axienet.c @@ -996,7 +996,7 @@ static void xilinx_enet_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property xilinx_enet_properties[] = { +static const Property xilinx_enet_properties[] = { DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7), DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000), DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000), @@ -1005,27 +1005,27 @@ static Property xilinx_enet_properties[] = { tx_data_dev, TYPE_STREAM_SINK, StreamSink *), DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet, tx_control_dev, TYPE_STREAM_SINK, StreamSink *), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_enet_class_init(ObjectClass *klass, void *data) +static void xilinx_enet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = xilinx_enet_realize; device_class_set_props(dc, xilinx_enet_properties); - dc->reset = xilinx_axienet_reset; + device_class_set_legacy_reset(dc, xilinx_axienet_reset); } static void xilinx_enet_control_stream_class_init(ObjectClass *klass, - void *data) + const void *data) { StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); ssc->push = xilinx_axienet_control_stream_push; } -static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data) +static void xilinx_enet_data_stream_class_init(ObjectClass *klass, + const void *data) { StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); @@ -1045,7 +1045,7 @@ static const TypeInfo xilinx_enet_data_stream_info = { .parent = TYPE_OBJECT, .instance_size = sizeof(XilinxAXIEnetStreamSink), .class_init = xilinx_enet_data_stream_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_STREAM_SINK }, { } } @@ -1056,7 +1056,7 @@ static const TypeInfo xilinx_enet_control_stream_info = { .parent = TYPE_OBJECT, .instance_size = sizeof(XilinxAXIEnetStreamSink), .class_init = xilinx_enet_control_stream_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_STREAM_SINK }, { } } diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c index 989afaf037f..42b19d07c76 100644 --- a/hw/net/xilinx_ethlite.c +++ b/hw/net/xilinx_ethlite.c @@ -2,6 +2,10 @@ * QEMU model of the Xilinx Ethernet Lite MAC. * * Copyright (c) 2009 Edgar E. Iglesias. + * Copyright (c) 2024 Linaro, Ltd + * + * DS580: https://docs.amd.com/v/u/en-US/xps_ethernetlite + * LogiCORE IP XPS Ethernet Lite Media Access Controller * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -24,27 +28,35 @@ #include "qemu/osdep.h" #include "qemu/module.h" +#include "qemu/bitops.h" #include "qom/object.h" -#include "exec/tswap.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/misc/unimp.h" #include "net/net.h" +#include "trace.h" + +#define BUFSZ_MAX 0x07e4 +#define A_MDIO_BASE 0x07e4 +#define A_TX_BASE0 0x07f4 +#define A_TX_BASE1 0x0ff4 +#define A_RX_BASE0 0x17fc +#define A_RX_BASE1 0x1ffc + +enum { + TX_LEN = 0, + TX_GIE = 1, + TX_CTRL = 2, + TX_MAX +}; -#define D(x) -#define R_TX_BUF0 0 -#define R_TX_LEN0 (0x07f4 / 4) -#define R_TX_GIE0 (0x07f8 / 4) -#define R_TX_CTRL0 (0x07fc / 4) -#define R_TX_BUF1 (0x0800 / 4) -#define R_TX_LEN1 (0x0ff4 / 4) -#define R_TX_CTRL1 (0x0ffc / 4) - -#define R_RX_BUF0 (0x1000 / 4) -#define R_RX_CTRL0 (0x17fc / 4) -#define R_RX_BUF1 (0x1800 / 4) -#define R_RX_CTRL1 (0x1ffc / 4) -#define R_MAX (0x2000 / 4) +enum { + RX_CTRL = 0, + RX_MAX +}; #define GIE_GIE 0x80000000 @@ -52,174 +64,238 @@ #define CTRL_P 0x2 #define CTRL_S 0x1 +typedef struct XlnxXpsEthLitePort { + MemoryRegion txio; + MemoryRegion rxio; + MemoryRegion txbuf; + MemoryRegion rxbuf; + + struct { + uint32_t tx_len; + uint32_t tx_gie; + uint32_t tx_ctrl; + + uint32_t rx_ctrl; + } reg; +} XlnxXpsEthLitePort; + #define TYPE_XILINX_ETHLITE "xlnx.xps-ethernetlite" -DECLARE_INSTANCE_CHECKER(struct xlx_ethlite, XILINX_ETHLITE, - TYPE_XILINX_ETHLITE) +OBJECT_DECLARE_SIMPLE_TYPE(XlnxXpsEthLite, XILINX_ETHLITE) -struct xlx_ethlite +struct XlnxXpsEthLite { SysBusDevice parent_obj; - MemoryRegion mmio; + EndianMode model_endianness; + MemoryRegion container; qemu_irq irq; NICState *nic; NICConf conf; uint32_t c_tx_pingpong; uint32_t c_rx_pingpong; - unsigned int txbuf; - unsigned int rxbuf; + unsigned int port_index; /* dual port RAM index */ - uint32_t regs[R_MAX]; + UnimplementedDeviceState rsvd; + UnimplementedDeviceState mdio; + XlnxXpsEthLitePort port[2]; }; -static inline void eth_pulse_irq(struct xlx_ethlite *s) +static inline void eth_pulse_irq(XlnxXpsEthLite *s) { /* Only the first gie reg is active. */ - if (s->regs[R_TX_GIE0] & GIE_GIE) { + if (s->port[0].reg.tx_gie & GIE_GIE) { qemu_irq_pulse(s->irq); } } -static uint64_t -eth_read(void *opaque, hwaddr addr, unsigned int size) +static unsigned addr_to_port_index(hwaddr addr) { - struct xlx_ethlite *s = opaque; - uint32_t r = 0; + return extract64(addr, 11, 1); +} - addr >>= 2; +static void *txbuf_ptr(XlnxXpsEthLite *s, unsigned port_index) +{ + return memory_region_get_ram_ptr(&s->port[port_index].txbuf); +} - switch (addr) - { - case R_TX_GIE0: - case R_TX_LEN0: - case R_TX_LEN1: - case R_TX_CTRL1: - case R_TX_CTRL0: - case R_RX_CTRL1: - case R_RX_CTRL0: - r = s->regs[addr]; - D(qemu_log("%s " HWADDR_FMT_plx "=%x\n", __func__, addr * 4, r)); - break; - - default: - r = tswap32(s->regs[addr]); - break; +static void *rxbuf_ptr(XlnxXpsEthLite *s, unsigned port_index) +{ + return memory_region_get_ram_ptr(&s->port[port_index].rxbuf); +} + +static uint64_t port_tx_read(void *opaque, hwaddr addr, unsigned int size) +{ + XlnxXpsEthLite *s = opaque; + unsigned port_index = addr_to_port_index(addr); + uint32_t r = 0; + + switch (addr >> 2) { + case TX_LEN: + r = s->port[port_index].reg.tx_len; + break; + case TX_GIE: + r = s->port[port_index].reg.tx_gie; + break; + case TX_CTRL: + r = s->port[port_index].reg.tx_ctrl; + break; + default: + g_assert_not_reached(); } + return r; } -static void -eth_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) +static void port_tx_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) { - struct xlx_ethlite *s = opaque; - unsigned int base = 0; - uint32_t value = val64; - - addr >>= 2; - switch (addr) - { - case R_TX_CTRL0: - case R_TX_CTRL1: - if (addr == R_TX_CTRL1) - base = 0x800 / 4; - - D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n", - __func__, addr * 4, value)); - if ((value & (CTRL_P | CTRL_S)) == CTRL_S) { - qemu_send_packet(qemu_get_queue(s->nic), - (void *) &s->regs[base], - s->regs[base + R_TX_LEN0]); - D(qemu_log("eth_tx %d\n", s->regs[base + R_TX_LEN0])); - if (s->regs[base + R_TX_CTRL0] & CTRL_I) - eth_pulse_irq(s); - } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) { - memcpy(&s->conf.macaddr.a[0], &s->regs[base], 6); - if (s->regs[base + R_TX_CTRL0] & CTRL_I) - eth_pulse_irq(s); + XlnxXpsEthLite *s = opaque; + unsigned port_index = addr_to_port_index(addr); + + switch (addr >> 2) { + case TX_LEN: + s->port[port_index].reg.tx_len = value; + break; + case TX_GIE: + s->port[port_index].reg.tx_gie = value; + break; + case TX_CTRL: + if ((value & (CTRL_P | CTRL_S)) == CTRL_S) { + qemu_send_packet(qemu_get_queue(s->nic), + txbuf_ptr(s, port_index), + s->port[port_index].reg.tx_len); + if (s->port[port_index].reg.tx_ctrl & CTRL_I) { + eth_pulse_irq(s); + } + } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) { + memcpy(&s->conf.macaddr.a[0], txbuf_ptr(s, port_index), 6); + if (s->port[port_index].reg.tx_ctrl & CTRL_I) { + eth_pulse_irq(s); } + } + /* + * We are fast and get ready pretty much immediately + * so we actually never flip the S nor P bits to one. + */ + s->port[port_index].reg.tx_ctrl = value & ~(CTRL_P | CTRL_S); + break; + default: + g_assert_not_reached(); + } +} - /* We are fast and get ready pretty much immediately so - we actually never flip the S nor P bits to one. */ - s->regs[addr] = value & ~(CTRL_P | CTRL_S); - break; +static const MemoryRegionOps eth_porttx_ops[2] = { + [0 ... 1] = { + .read = port_tx_read, + .write = port_tx_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, +}; - /* Keep these native. */ - case R_RX_CTRL0: - case R_RX_CTRL1: - if (!(value & CTRL_S)) { - qemu_flush_queued_packets(qemu_get_queue(s->nic)); - } - /* fall through */ - case R_TX_LEN0: - case R_TX_LEN1: - case R_TX_GIE0: - D(qemu_log("%s addr=" HWADDR_FMT_plx " val=%x\n", - __func__, addr * 4, value)); - s->regs[addr] = value; - break; - - default: - s->regs[addr] = tswap32(value); - break; +static uint64_t port_rx_read(void *opaque, hwaddr addr, unsigned int size) +{ + XlnxXpsEthLite *s = opaque; + unsigned port_index = addr_to_port_index(addr); + uint32_t r = 0; + + switch (addr >> 2) { + case RX_CTRL: + r = s->port[port_index].reg.rx_ctrl; + break; + default: + g_assert_not_reached(); } + + return r; } -static const MemoryRegionOps eth_ops = { - .read = eth_read, - .write = eth_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 +static void port_rx_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + XlnxXpsEthLite *s = opaque; + unsigned port_index = addr_to_port_index(addr); + + switch (addr >> 2) { + case RX_CTRL: + if (!(value & CTRL_S)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + s->port[port_index].reg.rx_ctrl = value; + break; + default: + g_assert_not_reached(); } +} + +static const MemoryRegionOps eth_portrx_ops[2] = { + [0 ... 1] = { + .read = port_rx_read, + .write = port_rx_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, }; static bool eth_can_rx(NetClientState *nc) { - struct xlx_ethlite *s = qemu_get_nic_opaque(nc); - unsigned int rxbase = s->rxbuf * (0x800 / 4); + XlnxXpsEthLite *s = qemu_get_nic_opaque(nc); - return !(s->regs[rxbase + R_RX_CTRL0] & CTRL_S); + return !(s->port[s->port_index].reg.rx_ctrl & CTRL_S); } static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) { - struct xlx_ethlite *s = qemu_get_nic_opaque(nc); - unsigned int rxbase = s->rxbuf * (0x800 / 4); + XlnxXpsEthLite *s = qemu_get_nic_opaque(nc); + unsigned int port_index = s->port_index; /* DA filter. */ if (!(buf[0] & 0x80) && memcmp(&s->conf.macaddr.a[0], buf, 6)) return size; - if (s->regs[rxbase + R_RX_CTRL0] & CTRL_S) { - D(qemu_log("ethlite lost packet %x\n", s->regs[R_RX_CTRL0])); + if (s->port[port_index].reg.rx_ctrl & CTRL_S) { + trace_ethlite_pkt_lost(s->port[port_index].reg.rx_ctrl); return -1; } - D(qemu_log("%s %zd rxbase=%x\n", __func__, size, rxbase)); - if (size > (R_MAX - R_RX_BUF0 - rxbase) * 4) { - D(qemu_log("ethlite packet is too big, size=%x\n", size)); + if (size >= BUFSZ_MAX) { + trace_ethlite_pkt_size_too_big(size); return -1; } - memcpy(&s->regs[rxbase + R_RX_BUF0], buf, size); + memcpy(rxbuf_ptr(s, port_index), buf, size); - s->regs[rxbase + R_RX_CTRL0] |= CTRL_S; - if (s->regs[R_RX_CTRL0] & CTRL_I) { + s->port[port_index].reg.rx_ctrl |= CTRL_S; + if (s->port[port_index].reg.rx_ctrl & CTRL_I) { eth_pulse_irq(s); } /* If c_rx_pingpong was set flip buffers. */ - s->rxbuf ^= s->c_rx_pingpong; + s->port_index ^= s->c_rx_pingpong; return size; } static void xilinx_ethlite_reset(DeviceState *dev) { - struct xlx_ethlite *s = XILINX_ETHLITE(dev); + XlnxXpsEthLite *s = XILINX_ETHLITE(dev); - s->rxbuf = 0; + s->port_index = 0; } static NetClientInfo net_xilinx_ethlite_info = { @@ -231,7 +307,61 @@ static NetClientInfo net_xilinx_ethlite_info = { static void xilinx_ethlite_realize(DeviceState *dev, Error **errp) { - struct xlx_ethlite *s = XILINX_ETHLITE(dev); + XlnxXpsEthLite *s = XILINX_ETHLITE(dev); + unsigned ops_index; + + if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) { + error_setg(errp, TYPE_XILINX_ETHLITE " property 'endianness'" + " must be set to 'big' or 'little'"); + return; + } + ops_index = s->model_endianness == ENDIAN_MODE_BIG ? 1 : 0; + + memory_region_init(&s->container, OBJECT(dev), + "xlnx.xps-ethernetlite", 0x2000); + + object_initialize_child(OBJECT(dev), "ethlite.reserved", &s->rsvd, + TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(&s->rsvd), "name", "ethlite.reserved"); + qdev_prop_set_uint64(DEVICE(&s->rsvd), "size", + memory_region_size(&s->container)); + sysbus_realize(SYS_BUS_DEVICE(&s->rsvd), &error_fatal); + memory_region_add_subregion_overlap(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rsvd), 0), + -1); + + object_initialize_child(OBJECT(dev), "ethlite.mdio", &s->mdio, + TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(&s->mdio), "name", "ethlite.mdio"); + qdev_prop_set_uint64(DEVICE(&s->mdio), "size", 4 * 4); + sysbus_realize(SYS_BUS_DEVICE(&s->mdio), &error_fatal); + memory_region_add_subregion(&s->container, A_MDIO_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mdio), 0)); + + for (unsigned i = 0; i < 2; i++) { + memory_region_init_ram(&s->port[i].txbuf, OBJECT(dev), + i ? "ethlite.tx[1]buf" : "ethlite.tx[0]buf", + BUFSZ_MAX, &error_abort); + memory_region_add_subregion(&s->container, 0x0800 * i, &s->port[i].txbuf); + memory_region_init_io(&s->port[i].txio, OBJECT(dev), + ð_porttx_ops[ops_index], s, + i ? "ethlite.tx[1]io" : "ethlite.tx[0]io", + 4 * TX_MAX); + memory_region_add_subregion(&s->container, i ? A_TX_BASE1 : A_TX_BASE0, + &s->port[i].txio); + + memory_region_init_ram(&s->port[i].rxbuf, OBJECT(dev), + i ? "ethlite.rx[1]buf" : "ethlite.rx[0]buf", + BUFSZ_MAX, &error_abort); + memory_region_add_subregion(&s->container, 0x1000 + 0x0800 * i, + &s->port[i].rxbuf); + memory_region_init_io(&s->port[i].rxio, OBJECT(dev), + ð_portrx_ops[ops_index], s, + i ? "ethlite.rx[1]io" : "ethlite.rx[0]io", + 4 * RX_MAX); + memory_region_add_subregion(&s->container, i ? A_RX_BASE1 : A_RX_BASE0, + &s->port[i].rxio); + } qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xilinx_ethlite_info, &s->conf, @@ -242,42 +372,36 @@ static void xilinx_ethlite_realize(DeviceState *dev, Error **errp) static void xilinx_ethlite_init(Object *obj) { - struct xlx_ethlite *s = XILINX_ETHLITE(obj); + XlnxXpsEthLite *s = XILINX_ETHLITE(obj); sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); - - memory_region_init_io(&s->mmio, obj, ð_ops, s, - "xlnx.xps-ethernetlite", R_MAX * 4); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->container); } -static Property xilinx_ethlite_properties[] = { - DEFINE_PROP_UINT32("tx-ping-pong", struct xlx_ethlite, c_tx_pingpong, 1), - DEFINE_PROP_UINT32("rx-ping-pong", struct xlx_ethlite, c_rx_pingpong, 1), - DEFINE_NIC_PROPERTIES(struct xlx_ethlite, conf), - DEFINE_PROP_END_OF_LIST(), +static const Property xilinx_ethlite_properties[] = { + DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XlnxXpsEthLite, model_endianness), + DEFINE_PROP_UINT32("tx-ping-pong", XlnxXpsEthLite, c_tx_pingpong, 1), + DEFINE_PROP_UINT32("rx-ping-pong", XlnxXpsEthLite, c_rx_pingpong, 1), + DEFINE_NIC_PROPERTIES(XlnxXpsEthLite, conf), }; -static void xilinx_ethlite_class_init(ObjectClass *klass, void *data) +static void xilinx_ethlite_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = xilinx_ethlite_realize; - dc->reset = xilinx_ethlite_reset; + device_class_set_legacy_reset(dc, xilinx_ethlite_reset); device_class_set_props(dc, xilinx_ethlite_properties); } -static const TypeInfo xilinx_ethlite_info = { - .name = TYPE_XILINX_ETHLITE, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(struct xlx_ethlite), - .instance_init = xilinx_ethlite_init, - .class_init = xilinx_ethlite_class_init, +static const TypeInfo xilinx_ethlite_types[] = { + { + .name = TYPE_XILINX_ETHLITE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxXpsEthLite), + .instance_init = xilinx_ethlite_init, + .class_init = xilinx_ethlite_class_init, + }, }; -static void xilinx_ethlite_register_types(void) -{ - type_register_static(&xilinx_ethlite_info); -} - -type_init(xilinx_ethlite_register_types) +DEFINE_TYPES(xilinx_ethlite_types) diff --git a/hw/nubus/mac-nubus-bridge.c b/hw/nubus/mac-nubus-bridge.c index a0da5a8b2fa..0dac8d19b37 100644 --- a/hw/nubus/mac-nubus-bridge.c +++ b/hw/nubus/mac-nubus-bridge.c @@ -40,7 +40,7 @@ static void mac_nubus_bridge_init(Object *obj) sysbus_init_mmio(sbd, &s->slot_alias); } -static void mac_nubus_bridge_class_init(ObjectClass *klass, void *data) +static void mac_nubus_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c index a42c86080f2..fb14402c4fc 100644 --- a/hw/nubus/nubus-bridge.c +++ b/hw/nubus/nubus-bridge.c @@ -23,13 +23,12 @@ static void nubus_bridge_init(Object *obj) qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS); } -static Property nubus_bridge_properties[] = { +static const Property nubus_bridge_properties[] = { DEFINE_PROP_UINT16("slot-available-mask", NubusBridge, bus.slot_available_mask, 0xffff), - DEFINE_PROP_END_OF_LIST() }; -static void nubus_bridge_class_init(ObjectClass *klass, void *data) +static void nubus_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nubus/nubus-bus.c b/hw/nubus/nubus-bus.c index 07c279bde5c..44820f13a85 100644 --- a/hw/nubus/nubus-bus.c +++ b/hw/nubus/nubus-bus.c @@ -162,7 +162,7 @@ static bool nubus_check_address(BusState *bus, DeviceState *dev, Error **errp) return true; } -static void nubus_class_init(ObjectClass *oc, void *data) +static void nubus_class_init(ObjectClass *oc, const void *data) { BusClass *bc = BUS_CLASS(oc); diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c index be4cb246966..7797e61c7fc 100644 --- a/hw/nubus/nubus-device.c +++ b/hw/nubus/nubus-device.c @@ -35,6 +35,13 @@ static void nubus_device_realize(DeviceState *dev, Error **errp) uint8_t *rom_ptr; int ret; + if (nd->slot < 0 || nd->slot >= NUBUS_SLOT_NB) { + error_setg(errp, + "'slot' value %d out of range (must be between 0 and %d)", + nd->slot, NUBUS_SLOT_NB - 1); + return; + } + /* Super */ slot_offset = nd->slot * NUBUS_SUPER_SLOT_SIZE; @@ -100,13 +107,12 @@ static void nubus_device_realize(DeviceState *dev, Error **errp) } } -static Property nubus_device_properties[] = { +static const Property nubus_device_properties[] = { DEFINE_PROP_INT32("slot", NubusDevice, slot, -1), DEFINE_PROP_STRING("romfile", NubusDevice, romfile), - DEFINE_PROP_END_OF_LIST() }; -static void nubus_device_class_init(ObjectClass *oc, void *data) +static void nubus_device_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/nubus/nubus-virtio-mmio.c b/hw/nubus/nubus-virtio-mmio.c index 7a98731c451..63aeca5b122 100644 --- a/hw/nubus/nubus-virtio-mmio.c +++ b/hw/nubus/nubus-virtio-mmio.c @@ -81,7 +81,7 @@ static void nubus_virtio_mmio_init(Object *obj) "pic-input-irq", 1); } -static void nubus_virtio_mmio_class_init(ObjectClass *oc, void *data) +static void nubus_virtio_mmio_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); NubusVirtioMMIODeviceClass *nvmdc = NUBUS_VIRTIO_MMIO_CLASS(oc); diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index c6d4f61a47f..f5ee6bf260f 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -22,7 +22,7 @@ * * Usage * ----- - * See docs/system/nvme.rst for extensive documentation. + * See docs/system/devices/nvme.rst for extensive documentation. * * Add options: * -drive file=,if=none,id= @@ -40,6 +40,9 @@ * sriov_vi_flexible= \ * sriov_max_vi_per_vf= \ * sriov_max_vq_per_vf= \ + * atomic.dn=, \ + * atomic.awun, \ + * atomic.awupf, \ * subsys= * -device nvme-ns,drive=,bus=,nsid=,\ * zoned=, \ @@ -198,12 +201,12 @@ #include "qemu/range.h" #include "qapi/error.h" #include "qapi/visitor.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" -#include "sysemu/hostmem.h" +#include "system/system.h" +#include "system/block-backend.h" +#include "system/hostmem.h" #include "hw/pci/msix.h" #include "hw/pci/pcie_sriov.h" -#include "sysemu/spdm-socket.h" +#include "system/spdm-socket.h" #include "migration/vmstate.h" #include "nvme.h" @@ -254,6 +257,7 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE, [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, + [NVME_WRITE_ATOMICITY] = NVME_FEAT_CAP_CHANGE, [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, @@ -262,7 +266,7 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { [NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, }; -static const uint32_t nvme_cse_acs[256] = { +static const uint32_t nvme_cse_acs_default[256] = { [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP, @@ -273,17 +277,14 @@ static const uint32_t nvme_cse_acs[256] = { [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, - [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, - [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC | + NVME_CMD_EFF_CCC, [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP, }; -static const uint32_t nvme_cse_iocs_none[256]; - -static const uint32_t nvme_cse_iocs_nvm[256] = { +static const uint32_t nvme_cse_iocs_nvm_default[256] = { [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, @@ -296,7 +297,7 @@ static const uint32_t nvme_cse_iocs_nvm[256] = { [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, }; -static const uint32_t nvme_cse_iocs_zoned[256] = { +static const uint32_t nvme_cse_iocs_zoned_default[256] = { [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, @@ -305,6 +306,9 @@ static const uint32_t nvme_cse_iocs_zoned[256] = { [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP, @@ -652,6 +656,12 @@ static void nvme_irq_check(NvmeCtrl *n) if (msix_enabled(pci)) { return; } + + /* vfs does not implement intx */ + if (pci_is_vf(pci)) { + return; + } + if (~intms & n->irq_status) { pci_irq_assert(pci); } else { @@ -1047,7 +1057,8 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, */ #define SEG_CHUNK_SIZE 256 - NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld; + QEMU_UNINITIALIZED NvmeSglDescriptor segment[SEG_CHUNK_SIZE]; + NvmeSglDescriptor *sgld, *last_sgld; uint64_t nsgld; uint32_t seg_len; uint16_t status; @@ -1516,9 +1527,16 @@ static void nvme_post_cqes(void *opaque) stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); break; } + QTAILQ_REMOVE(&cq->req_list, req, entry); + nvme_inc_cq_tail(cq); nvme_sg_unmap(&req->sg); + + if (QTAILQ_EMPTY(&sq->req_list) && !nvme_sq_empty(sq)) { + qemu_bh_schedule(sq->bh); + } + QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); } if (cq->tail != cq->head) { @@ -1649,9 +1667,16 @@ static void nvme_smart_event(NvmeCtrl *n, uint8_t event) static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) { + NvmeAsyncEvent *event, *next; + n->aer_mask &= ~(1 << event_type); - if (!QTAILQ_EMPTY(&n->aer_queue)) { - nvme_process_aers(n); + + QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { + if (event->result.event_type == event_type) { + QTAILQ_REMOVE(&n->aer_queue, event, entry); + n->aer_queued--; + g_free(event); + } } } @@ -1738,47 +1763,6 @@ static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba, return NVME_SUCCESS; } -static void nvme_aio_err(NvmeRequest *req, int ret) -{ - uint16_t status = NVME_SUCCESS; - Error *local_err = NULL; - - switch (req->cmd.opcode) { - case NVME_CMD_READ: - status = NVME_UNRECOVERED_READ; - break; - case NVME_CMD_FLUSH: - case NVME_CMD_WRITE: - case NVME_CMD_WRITE_ZEROES: - case NVME_CMD_ZONE_APPEND: - case NVME_CMD_COPY: - status = NVME_WRITE_FAULT; - break; - default: - status = NVME_INTERNAL_DEV_ERROR; - break; - } - - if (ret == -ECANCELED) { - status = NVME_CMD_ABORT_REQ; - } - - trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); - - error_setg_errno(&local_err, -ret, "aio failed"); - error_report_err(local_err); - - /* - * Set the command status code to the first encountered error but allow a - * subsequent Internal Device Error to trump it. - */ - if (req->status && status != NVME_INTERNAL_DEV_ERROR) { - return; - } - - req->status = status; -} - static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba) { return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : @@ -1816,7 +1800,7 @@ static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone) trace_pci_nvme_err_zone_is_read_only(zslba); return NVME_ZONE_READ_ONLY; default: - assert(false); + g_assert_not_reached(); } return NVME_INTERNAL_DEV_ERROR; @@ -1870,7 +1854,7 @@ static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone) trace_pci_nvme_err_zone_is_offline(zone->d.zslba); return NVME_ZONE_OFFLINE; default: - assert(false); + g_assert_not_reached(); } return NVME_INTERNAL_DEV_ERROR; @@ -2137,11 +2121,16 @@ static inline bool nvme_is_write(NvmeRequest *req) static void nvme_misc_cb(void *opaque, int ret) { NvmeRequest *req = opaque; + uint16_t cid = nvme_cid(req); - trace_pci_nvme_misc_cb(nvme_cid(req)); + trace_pci_nvme_misc_cb(cid); if (ret) { - nvme_aio_err(req, ret); + if (!req->status) { + req->status = NVME_INTERNAL_DEV_ERROR; + } + + trace_pci_nvme_err_aio(cid, strerror(-ret), req->status); } nvme_enqueue_req_completion(nvme_cq(req), req); @@ -2158,8 +2147,30 @@ void nvme_rw_complete_cb(void *opaque, int ret) trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk)); if (ret) { + Error *err = NULL; + block_acct_failed(stats, acct); - nvme_aio_err(req, ret); + + switch (req->cmd.opcode) { + case NVME_CMD_READ: + req->status = NVME_UNRECOVERED_READ; + break; + + case NVME_CMD_WRITE: + case NVME_CMD_WRITE_ZEROES: + case NVME_CMD_ZONE_APPEND: + req->status = NVME_WRITE_FAULT; + break; + + default: + req->status = NVME_INTERNAL_DEV_ERROR; + break; + } + + trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); + + error_setg_errno(&err, -ret, "aio failed"); + error_report_err(err); } else { block_acct_done(stats, acct); } @@ -2244,7 +2255,10 @@ static void nvme_verify_cb(void *opaque, int ret) if (ret) { block_acct_failed(stats, acct); - nvme_aio_err(req, ret); + req->status = NVME_UNRECOVERED_READ; + + trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); + goto out; } @@ -2343,7 +2357,10 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) if (ret) { block_acct_failed(stats, acct); - nvme_aio_err(req, ret); + req->status = NVME_UNRECOVERED_READ; + + trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); + goto out; } @@ -2425,7 +2442,10 @@ static void nvme_compare_data_cb(void *opaque, int ret) if (ret) { block_acct_failed(stats, acct); - nvme_aio_err(req, ret); + req->status = NVME_UNRECOVERED_READ; + + trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), req->status); + goto out; } @@ -2646,6 +2666,7 @@ static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) uint64_t slba = le64_to_cpu(rw->slba); uint32_t nlb = le16_to_cpu(rw->nlb) + 1; size_t len = nvme_l2b(ns, nlb); + size_t data_len = len; int64_t offset = nvme_l2b(ns, slba); uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint32_t reftag = le32_to_cpu(rw->reftag); @@ -2665,7 +2686,11 @@ static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) } } - if (len > n->page_size << n->params.vsl) { + if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { + data_len += nvme_m2b(ns, nlb); + } + + if (data_len > (n->page_size << n->params.vsl)) { return NVME_INVALID_FIELD | NVME_DNR; } @@ -2899,6 +2924,7 @@ static void nvme_copy_out_completed_cb(void *opaque, int ret) if (ret < 0) { iocb->ret = ret; + req->status = NVME_WRITE_FAULT; goto out; } else if (iocb->ret < 0) { goto out; @@ -2963,6 +2989,7 @@ static void nvme_copy_in_completed_cb(void *opaque, int ret) if (ret < 0) { iocb->ret = ret; + req->status = NVME_UNRECOVERED_READ; goto out; } else if (iocb->ret < 0) { goto out; @@ -3406,7 +3433,11 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) len += nvme_m2b(ns, nlb); } - status = nvme_check_mdts(n, len); + if (NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt)) { + status = nvme_check_mdts(n, data_len); + } else { + status = nvme_check_mdts(n, len); + } if (status) { return status; } @@ -3481,6 +3512,7 @@ static void nvme_flush_ns_cb(void *opaque, int ret) if (ret < 0) { iocb->ret = ret; + iocb->req->status = NVME_WRITE_FAULT; goto out; } else if (iocb->ret < 0) { goto out; @@ -3583,7 +3615,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) BlockBackend *blk = ns->blkconf.blk; uint16_t status; - if (nvme_ns_ext(ns)) { + if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { mapped_size += nvme_m2b(ns, nlb); if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { @@ -3695,7 +3727,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, BlockBackend *blk = ns->blkconf.blk; uint16_t status; - if (nvme_ns_ext(ns)) { + if (nvme_ns_ext(ns) && !(NVME_ID_CTRL_CTRATT_MEM(n->id_ctrl.ctratt))) { mapped_size += nvme_m2b(ns, nlb); if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { @@ -4474,7 +4506,7 @@ static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req, nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr); - buf = g_malloc(trans_len); + buf = g_malloc0(trans_len); trans_len = MIN(trans_len, len); @@ -4572,6 +4604,61 @@ static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req) }; } +static uint16_t __nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req) +{ + switch (req->cmd.opcode) { + case NVME_CMD_WRITE: + return nvme_write(n, req); + case NVME_CMD_READ: + return nvme_read(n, req); + case NVME_CMD_COMPARE: + return nvme_compare(n, req); + case NVME_CMD_WRITE_ZEROES: + return nvme_write_zeroes(n, req); + case NVME_CMD_DSM: + return nvme_dsm(n, req); + case NVME_CMD_VERIFY: + return nvme_verify(n, req); + case NVME_CMD_COPY: + return nvme_copy(n, req); + case NVME_CMD_IO_MGMT_RECV: + return nvme_io_mgmt_recv(n, req); + case NVME_CMD_IO_MGMT_SEND: + return nvme_io_mgmt_send(n, req); + } + + g_assert_not_reached(); +} + +static uint16_t nvme_io_cmd_nvm(NvmeCtrl *n, NvmeRequest *req) +{ + if (!(n->cse.iocs.nvm[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { + trace_pci_nvme_err_invalid_opc(req->cmd.opcode); + return NVME_INVALID_OPCODE | NVME_DNR; + } + + return __nvme_io_cmd_nvm(n, req); +} + +static uint16_t nvme_io_cmd_zoned(NvmeCtrl *n, NvmeRequest *req) +{ + if (!(n->cse.iocs.zoned[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { + trace_pci_nvme_err_invalid_opc(req->cmd.opcode); + return NVME_INVALID_OPCODE | NVME_DNR; + } + + switch (req->cmd.opcode) { + case NVME_CMD_ZONE_APPEND: + return nvme_zone_append(n, req); + case NVME_CMD_ZONE_MGMT_SEND: + return nvme_zone_mgmt_send(n, req); + case NVME_CMD_ZONE_MGMT_RECV: + return nvme_zone_mgmt_recv(n, req); + } + + return __nvme_io_cmd_nvm(n, req); +} + static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns; @@ -4613,11 +4700,6 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_FIELD | NVME_DNR; } - if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { - trace_pci_nvme_err_invalid_opc(req->cmd.opcode); - return NVME_INVALID_OPCODE | NVME_DNR; - } - if (ns->status) { return ns->status; } @@ -4628,36 +4710,14 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) req->ns = ns; - switch (req->cmd.opcode) { - case NVME_CMD_WRITE_ZEROES: - return nvme_write_zeroes(n, req); - case NVME_CMD_ZONE_APPEND: - return nvme_zone_append(n, req); - case NVME_CMD_WRITE: - return nvme_write(n, req); - case NVME_CMD_READ: - return nvme_read(n, req); - case NVME_CMD_COMPARE: - return nvme_compare(n, req); - case NVME_CMD_DSM: - return nvme_dsm(n, req); - case NVME_CMD_VERIFY: - return nvme_verify(n, req); - case NVME_CMD_COPY: - return nvme_copy(n, req); - case NVME_CMD_ZONE_MGMT_SEND: - return nvme_zone_mgmt_send(n, req); - case NVME_CMD_ZONE_MGMT_RECV: - return nvme_zone_mgmt_recv(n, req); - case NVME_CMD_IO_MGMT_RECV: - return nvme_io_mgmt_recv(n, req); - case NVME_CMD_IO_MGMT_SEND: - return nvme_io_mgmt_send(n, req); - default: - assert(false); + switch (ns->csi) { + case NVME_CSI_NVM: + return nvme_io_cmd_nvm(n, req); + case NVME_CSI_ZONED: + return nvme_io_cmd_zoned(n, req); } - return NVME_INVALID_OPCODE | NVME_DNR; + g_assert_not_reached(); } static void nvme_cq_notifier(EventNotifier *e) @@ -4766,6 +4826,7 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) while (!QTAILQ_EMPTY(&sq->out_req_list)) { r = QTAILQ_FIRST(&sq->out_req_list); assert(r->aiocb); + r->status = NVME_CMD_ABORT_SQ_DEL; blk_aio_cancel(r->aiocb); } @@ -4884,6 +4945,45 @@ static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; } +static uint16_t nvme_ocp_extended_smart_info(NvmeCtrl *n, uint8_t rae, + uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + NvmeNamespace *ns = NULL; + NvmeSmartLogExtended smart_l = { 0 }; + struct nvme_stats stats = { 0 }; + uint32_t trans_len; + + if (off >= sizeof(smart_l)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + /* accumulate all stats from all namespaces */ + for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (ns) { + nvme_set_blk_stats(ns, &stats); + } + } + + smart_l.physical_media_units_written[0] = cpu_to_le64(stats.units_written); + smart_l.physical_media_units_read[0] = cpu_to_le64(stats.units_read); + smart_l.log_page_version = 0x0005; + + static const uint8_t guid[16] = { + 0xC5, 0xAF, 0x10, 0x28, 0xEA, 0xBF, 0xF2, 0xA4, + 0x9C, 0x4F, 0x6F, 0x7C, 0xC9, 0x14, 0xD5, 0xAF + }; + memcpy(smart_l.log_page_guid, guid, sizeof(smart_l.log_page_guid)); + + if (!rae) { + nvme_clear_events(n, NVME_AER_TYPE_SMART); + } + + trans_len = MIN(sizeof(smart_l) - off, buf_len); + return nvme_c2h(n, (uint8_t *) &smart_l + off, trans_len, req); +} + static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, uint64_t off, NvmeRequest *req) { @@ -5029,7 +5129,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, uint64_t off, NvmeRequest *req) { - uint32_t nslist[1024]; + uint32_t nslist[1024] = {}; uint32_t trans_len; int i = 0; uint32_t nsid; @@ -5039,7 +5139,6 @@ static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, return NVME_INVALID_FIELD | NVME_DNR; } - memset(nslist, 0x0, sizeof(nslist)); trans_len = MIN(sizeof(nslist) - off, buf_len); while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != @@ -5077,7 +5176,7 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, uint64_t off, NvmeRequest *req) { NvmeEffectsLog log = {}; - const uint32_t *src_iocs = NULL; + const uint32_t *iocs = NULL; uint32_t trans_len; if (off >= sizeof(log)) { @@ -5087,25 +5186,26 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { case NVME_CC_CSS_NVM: - src_iocs = nvme_cse_iocs_nvm; - /* fall through */ - case NVME_CC_CSS_ADMIN_ONLY: + iocs = n->cse.iocs.nvm; break; - case NVME_CC_CSS_CSI: + + case NVME_CC_CSS_ALL: switch (csi) { case NVME_CSI_NVM: - src_iocs = nvme_cse_iocs_nvm; + iocs = n->cse.iocs.nvm; break; case NVME_CSI_ZONED: - src_iocs = nvme_cse_iocs_zoned; + iocs = n->cse.iocs.zoned; break; } + + break; } - memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs)); + memcpy(log.acs, n->cse.acs, sizeof(log.acs)); - if (src_iocs) { - memcpy(log.iocs, src_iocs, sizeof(log.iocs)); + if (iocs) { + memcpy(log.iocs, iocs, sizeof(log.iocs)); } trans_len = MIN(sizeof(log) - off, buf_len); @@ -5113,6 +5213,23 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); } +static uint16_t nvme_vendor_specific_log(NvmeCtrl *n, uint8_t rae, + uint32_t buf_len, uint64_t off, + NvmeRequest *req, uint8_t lid) +{ + switch (lid) { + case NVME_OCP_EXTENDED_SMART_INFO: + if (n->params.ocp) { + return nvme_ocp_extended_smart_info(n, rae, buf_len, off, req); + } + break; + /* add a case for each additional vendor specific log id */ + } + + trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); + return NVME_INVALID_FIELD | NVME_DNR; +} + static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss) { size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr) @@ -5363,6 +5480,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) return nvme_smart_info(n, rae, len, off, req); case NVME_LOG_FW_SLOT_INFO: return nvme_fw_log_info(n, len, off, req); + case NVME_LOG_VENDOR_START...NVME_LOG_VENDOR_END: + return nvme_vendor_specific_log(n, rae, len, off, req, lid); case NVME_LOG_CHANGED_NSLIST: return nvme_changed_nslist(n, rae, len, off, req); case NVME_LOG_CMD_EFFECTS: @@ -5396,7 +5515,7 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) event_notifier_set_handler(&cq->notifier, NULL); event_notifier_cleanup(&cq->notifier); } - if (msix_enabled(pci)) { + if (msix_enabled(pci) && cq->irq_enabled) { msix_vector_unuse(pci, cq->vector); } if (cq->cqid) { @@ -5437,9 +5556,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, { PCIDevice *pci = PCI_DEVICE(n); - if (msix_enabled(pci)) { + if (msix_enabled(pci) && irq_enabled) { msix_vector_use(pci, vector); } + cq->ctrl = n; cq->cqid = cqid; cq->size = size; @@ -5549,7 +5669,9 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) switch (c->csi) { case NVME_CSI_NVM: id_nvm->vsl = n->params.vsl; + id_nvm->dmrl = NVME_ID_CTRL_NVM_DMRL_MAX; id_nvm->dmrsl = cpu_to_le32(n->dmrsl); + id_nvm->dmsl = NVME_ID_CTRL_NVM_DMRL_MAX * n->dmrsl; break; case NVME_CSI_ZONED: @@ -5591,7 +5713,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); } - return NVME_INVALID_CMD_SET | NVME_DNR; + return NVME_INVALID_IOCS | NVME_DNR; } static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, @@ -5672,6 +5794,33 @@ static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); } +static uint16_t nvme_identify_ns_ind(NvmeCtrl *n, NvmeRequest *req, bool alloc) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + + trace_pci_nvme_identify_ns_ind(nsid); + + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + if (alloc) { + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return nvme_rpt_empty_id_struct(n, req); + } + } else { + return nvme_rpt_empty_id_struct(n, req); + } + } + + return nvme_c2h(n, (uint8_t *)&ns->id_ns_ind, sizeof(NvmeIdNsInd), req); +} + static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, bool active) { @@ -5926,6 +6075,10 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) return nvme_identify_sec_ctrl_list(n, req); case NVME_ID_CNS_CS_NS: return nvme_identify_ns_csi(n, req, true); + case NVME_ID_CNS_CS_IND_NS: + return nvme_identify_ns_ind(n, req, false); + case NVME_ID_CNS_CS_IND_NS_ALLOCATED: + return nvme_identify_ns_ind(n, req, true); case NVME_ID_CNS_CS_NS_PRESENT: return nvme_identify_ns_csi(n, req, false); case NVME_ID_CNS_CTRL: @@ -5983,6 +6136,7 @@ static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) QTAILQ_FOREACH_SAFE(r, &sq->out_req_list, entry, next) { if (r->cqe.cid == cid) { if (r->aiocb) { + r->status = NVME_CMD_ABORT_REQ; blk_aio_cancel_async(r->aiocb); } break; @@ -6293,8 +6447,10 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) if (ret) { return ret; } - goto out; + break; + case NVME_WRITE_ATOMICITY: + result = n->dn; break; default: result = nvme_feature_default[fid]; @@ -6378,6 +6534,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) uint8_t save = NVME_SETFEAT_SAVE(dw10); uint16_t status; int i; + NvmeIdCtrl *id = &n->id_ctrl; + NvmeAtomic *atomic = &n->atomic; trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); @@ -6522,7 +6680,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) case NVME_COMMAND_SET_PROFILE: if (dw11 & 0x1ff) { trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); - return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; + return NVME_IOCS_COMBINATION_REJECTED | NVME_DNR; } break; case NVME_FDP_MODE: @@ -6530,6 +6688,22 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) return NVME_CMD_SEQ_ERROR | NVME_DNR; case NVME_FDP_EVENTS: return nvme_set_feature_fdp_events(n, ns, req); + case NVME_WRITE_ATOMICITY: + + n->dn = 0x1 & dw11; + + if (n->dn) { + atomic->atomic_max_write_size = le16_to_cpu(id->awupf) + 1; + } else { + atomic->atomic_max_write_size = le16_to_cpu(id->awun) + 1; + } + + if (atomic->atomic_max_write_size == 1) { + atomic->atomic_writes = 0; + } else { + atomic->atomic_writes = 1; + } + break; default: return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } @@ -6555,40 +6729,49 @@ static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) return NVME_NO_COMPLETE; } -static void nvme_update_dmrsl(NvmeCtrl *n) +static void nvme_update_dsm_limits(NvmeCtrl *n, NvmeNamespace *ns) { - int nsid; + if (ns) { + n->dmrsl = + MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); - for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { - NvmeNamespace *ns = nvme_ns(n, nsid); + return; + } + + for (uint32_t nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { + ns = nvme_ns(n, nsid); if (!ns) { continue; } - n->dmrsl = MIN_NON_ZERO(n->dmrsl, - BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); + n->dmrsl = + MIN_NON_ZERO(n->dmrsl, BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); } } -static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns) +static bool nvme_csi_supported(NvmeCtrl *n, uint8_t csi) { - uint32_t cc = ldl_le_p(&n->bar.cc); + uint32_t cc; - ns->iocs = nvme_cse_iocs_none; - switch (ns->csi) { + switch (csi) { case NVME_CSI_NVM: - if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) { - ns->iocs = nvme_cse_iocs_nvm; - } - break; + return true; + case NVME_CSI_ZONED: - if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) { - ns->iocs = nvme_cse_iocs_zoned; - } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) { - ns->iocs = nvme_cse_iocs_nvm; - } - break; + cc = ldl_le_p(&n->bar.cc); + + return NVME_CC_CSS(cc) == NVME_CC_CSS_ALL; } + + g_assert_not_reached(); +} + +static void nvme_detach_ns(NvmeCtrl *n, NvmeNamespace *ns) +{ + assert(ns->attached > 0); + + n->namespaces[ns->params.nsid] = NULL; + ns->attached--; } static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) @@ -6641,8 +6824,12 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) return NVME_NS_PRIVATE | NVME_DNR; } + if (!nvme_csi_supported(ctrl, ns->csi)) { + return NVME_IOCS_NOT_SUPPORTED | NVME_DNR; + } + nvme_attach_ns(ctrl, ns); - nvme_select_iocs_ns(ctrl, ns); + nvme_update_dsm_limits(ctrl, ns); break; @@ -6651,10 +6838,8 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) return NVME_NS_NOT_ATTACHED | NVME_DNR; } - ctrl->namespaces[nsid] = NULL; - ns->attached--; - - nvme_update_dmrsl(ctrl); + nvme_detach_ns(ctrl, ns); + nvme_update_dsm_limits(ctrl, NULL); break; @@ -7157,7 +7342,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, nvme_adm_opc_str(req->cmd.opcode)); - if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { + if (!(n->cse.acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); return NVME_INVALID_OPCODE | NVME_DNR; } @@ -7205,7 +7390,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) case NVME_ADM_CMD_DIRECTIVE_RECV: return nvme_directive_receive(n, req); default: - assert(false); + g_assert_not_reached(); } return NVME_INVALID_OPCODE | NVME_DNR; @@ -7227,6 +7412,81 @@ static void nvme_update_sq_tail(NvmeSQueue *sq) trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail); } +#define NVME_ATOMIC_NO_START 0 +#define NVME_ATOMIC_START_ATOMIC 1 +#define NVME_ATOMIC_START_NONATOMIC 2 + +static int nvme_atomic_write_check(NvmeCtrl *n, NvmeCmd *cmd, + NvmeAtomic *atomic) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb); + uint64_t elba = slba + nlb; + bool cmd_atomic_wr = true; + int i; + + if ((cmd->opcode == NVME_CMD_READ) || ((cmd->opcode == NVME_CMD_WRITE) && + ((rw->nlb + 1) > atomic->atomic_max_write_size))) { + cmd_atomic_wr = false; + } + + /* + * Walk the queues to see if there are any atomic conflicts. + */ + for (i = 1; i < n->params.max_ioqpairs + 1; i++) { + NvmeSQueue *sq; + NvmeRequest *req; + NvmeRwCmd *req_rw; + uint64_t req_slba; + uint32_t req_nlb; + uint64_t req_elba; + + sq = n->sq[i]; + if (!sq) { + continue; + } + + /* + * Walk all the requests on a given queue. + */ + QTAILQ_FOREACH(req, &sq->out_req_list, entry) { + req_rw = (NvmeRwCmd *)&req->cmd; + + if (((req_rw->opcode == NVME_CMD_WRITE) || + (req_rw->opcode == NVME_CMD_READ)) && + (cmd->nsid == req->ns->params.nsid)) { + req_slba = le64_to_cpu(req_rw->slba); + req_nlb = (uint32_t)le16_to_cpu(req_rw->nlb); + req_elba = req_slba + req_nlb; + + if (cmd_atomic_wr) { + if ((elba >= req_slba) && (slba <= req_elba)) { + return NVME_ATOMIC_NO_START; + } + } else { + if (req->atomic_write && ((elba >= req_slba) && + (slba <= req_elba))) { + return NVME_ATOMIC_NO_START; + } + } + } + } + } + if (cmd_atomic_wr) { + return NVME_ATOMIC_START_ATOMIC; + } + return NVME_ATOMIC_START_NONATOMIC; +} + +static NvmeAtomic *nvme_get_atomic(NvmeCtrl *n, NvmeCmd *cmd) +{ + if (n->atomic.atomic_writes) { + return &n->atomic; + } + return NULL; +} + static void nvme_process_sq(void *opaque) { NvmeSQueue *sq = opaque; @@ -7243,6 +7503,9 @@ static void nvme_process_sq(void *opaque) } while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { + NvmeAtomic *atomic; + bool cmd_is_atomic; + addr = sq->dma_addr + (sq->head << NVME_SQES); if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { trace_pci_nvme_err_addr_read(addr); @@ -7250,6 +7513,26 @@ static void nvme_process_sq(void *opaque) stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); break; } + + atomic = nvme_get_atomic(n, &cmd); + + cmd_is_atomic = false; + if (sq->sqid && atomic) { + int ret; + + ret = nvme_atomic_write_check(n, &cmd, atomic); + switch (ret) { + case NVME_ATOMIC_NO_START: + qemu_bh_schedule(sq->bh); + return; + case NVME_ATOMIC_START_ATOMIC: + cmd_is_atomic = true; + break; + case NVME_ATOMIC_START_NONATOMIC: + default: + break; + } + } nvme_inc_sq_head(sq); req = QTAILQ_FIRST(&sq->req_list); @@ -7259,6 +7542,10 @@ static void nvme_process_sq(void *opaque) req->cqe.cid = cmd.cid; memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); + if (sq->sqid && atomic) { + req->atomic_write = cmd_is_atomic; + } + status = sq->sqid ? nvme_io_cmd(n, req) : nvme_admin_cmd(n, req); if (status != NVME_NO_COMPLETE) { @@ -7362,6 +7649,8 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst) n->outstanding_aers = 0; n->qs_created = false; + n->dn = n->params.atomic_dn; /* Set Disable Normal */ + nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); if (pci_is_vf(pci_dev)) { @@ -7400,21 +7689,6 @@ static void nvme_ctrl_shutdown(NvmeCtrl *n) } } -static void nvme_select_iocs(NvmeCtrl *n) -{ - NvmeNamespace *ns; - int i; - - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (!ns) { - continue; - } - - nvme_select_iocs_ns(n, ns); - } -} - static int nvme_start_ctrl(NvmeCtrl *n) { uint64_t cap = ldq_le_p(&n->bar.cap); @@ -7481,7 +7755,22 @@ static int nvme_start_ctrl(NvmeCtrl *n) nvme_set_timestamp(n, 0ULL); - nvme_select_iocs(n); + /* verify that the command sets of attached namespaces are supported */ + for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) { + NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); + + if (!ns || (!ns->params.shared && ns->ctrl != n)) { + continue; + } + + if (nvme_csi_supported(n, ns->csi) && !ns->params.detached) { + if (!ns->attached || ns->params.shared) { + nvme_attach_ns(n, ns); + } + } + } + + nvme_update_dsm_limits(n, NULL); return 0; } @@ -7806,7 +8095,6 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) /* Completion queue doorbell write */ uint16_t new_head = val & 0xffff; - int start_sqs; NvmeCQueue *cq; qid = (addr - (0x1000 + (1 << 2))) >> 3; @@ -7857,18 +8145,15 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); - start_sqs = nvme_cq_full(cq) ? 1 : 0; + /* scheduled deferred cqe posting if queue was previously full */ + if (nvme_cq_full(cq)) { + qemu_bh_schedule(cq->bh); + } + cq->head = new_head; if (!qid && n->dbbuf_enabled) { stl_le_pci_dma(pci, cq->db_addr, cq->head, MEMTXATTRS_UNSPECIFIED); } - if (start_sqs) { - NvmeSQueue *sq; - QTAILQ_FOREACH(sq, &cq->sq_list, entry) { - qemu_bh_schedule(sq->bh); - } - qemu_bh_schedule(cq->bh); - } if (cq->tail == cq->head) { if (cq->irq_enabled) { @@ -8054,6 +8339,11 @@ static bool nvme_check_params(NvmeCtrl *n, Error **errp) host_memory_backend_set_mapped(n->pmr.dev, true); } + if (!n->params.mdts || ((1 << n->params.mdts) + 1) > IOV_MAX) { + error_setg(errp, "mdts exceeds IOV_MAX"); + return false; + } + if (n->params.zasl > n->params.mdts) { error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " "than or equal to mdts (Maximum Data Transfer Size)"); @@ -8138,6 +8428,8 @@ static void nvme_init_state(NvmeCtrl *n) NvmeSecCtrlEntry *list = n->sec_ctrl_list; NvmeSecCtrlEntry *sctrl; PCIDevice *pci = PCI_DEVICE(n); + NvmeAtomic *atomic = &n->atomic; + NvmeIdCtrl *id = &n->id_ctrl; uint8_t max_vfs; int i; @@ -8195,6 +8487,29 @@ static void nvme_init_state(NvmeCtrl *n) cpu_to_le16(n->params.sriov_max_vi_per_vf) : cap->vifrt / MAX(max_vfs, 1); } + + /* Atomic Write */ + id->awun = cpu_to_le16(n->params.atomic_awun); + id->awupf = cpu_to_le16(n->params.atomic_awupf); + n->dn = n->params.atomic_dn; + + if (id->awun || id->awupf) { + if (id->awupf > id->awun) { + id->awupf = 0; + } + + if (n->dn) { + atomic->atomic_max_write_size = id->awupf + 1; + } else { + atomic->atomic_max_write_size = id->awun + 1; + } + + if (atomic->atomic_max_write_size == 1) { + atomic->atomic_writes = 0; + } else { + atomic->atomic_writes = 1; + } + } } static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) @@ -8271,7 +8586,8 @@ static uint64_t nvme_mbar_size(unsigned total_queues, unsigned total_irqs, return pow2ceil(bar_size); } -static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) +static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset, + Error **errp) { uint16_t vf_dev_id = n->params.use_intel_id ? PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME; @@ -8280,12 +8596,16 @@ static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset) le16_to_cpu(cap->vifrsm), NULL, NULL); - pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id, - n->params.sriov_max_vfs, n->params.sriov_max_vfs, - NVME_VF_OFFSET, NVME_VF_STRIDE); + if (!pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id, + n->params.sriov_max_vfs, n->params.sriov_max_vfs, + NVME_VF_OFFSET, NVME_VF_STRIDE, errp)) { + return false; + } pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size); + + return true; } static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset) @@ -8293,8 +8613,7 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset) Error *err = NULL; int ret; - ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, offset, - PCI_PM_SIZEOF, &err); + ret = pci_pm_init(pci_dev, offset, &err); if (err) { error_report_err(err); return ret; @@ -8340,7 +8659,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) unsigned nr_vectors; int ret; - pci_conf[PCI_INTERRUPT_PIN] = 1; + pci_conf[PCI_INTERRUPT_PIN] = pci_is_vf(pci_dev) ? 0 : 1; pci_config_set_prog_interface(pci_conf, 0x2); if (n->params.use_intel_id) { @@ -8410,6 +8729,11 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) return false; } + if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs && + !nvme_init_sriov(n, pci_dev, 0x120, errp)) { + return false; + } + nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); pcie_cap_deverr_init(pci_dev); @@ -8439,10 +8763,6 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) nvme_init_pmr(n, pci_dev); } - if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) { - nvme_init_sriov(n, pci_dev, 0x120); - } - return true; } @@ -8465,7 +8785,13 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) uint8_t *pci_conf = pci_dev->config; uint64_t cap = ldq_le_p(&n->bar.cap); NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); - uint32_t ctratt; + uint32_t ctratt = le32_to_cpu(id->ctratt); + uint16_t oacs; + + memcpy(n->cse.acs, nvme_cse_acs_default, sizeof(n->cse.acs)); + memcpy(n->cse.iocs.nvm, nvme_cse_iocs_nvm_default, sizeof(n->cse.iocs.nvm)); + memcpy(n->cse.iocs.zoned, nvme_cse_iocs_zoned_default, + sizeof(n->cse.iocs.zoned)); id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); @@ -8476,7 +8802,12 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->cntlid = cpu_to_le16(n->cntlid); id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); - ctratt = NVME_CTRATT_ELBAS; + + ctratt |= NVME_CTRATT_ELBAS; + if (n->params.ctratt.mem) { + ctratt |= NVME_CTRATT_MEM; + } + id->ctratt = cpu_to_le32(ctratt); id->rab = 6; @@ -8492,9 +8823,23 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); - id->oacs = - cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF | - NVME_OACS_DIRECTIVES); + + oacs = NVME_OACS_NMS | NVME_OACS_FORMAT | NVME_OACS_DIRECTIVES; + + if (n->params.dbcs) { + oacs |= NVME_OACS_DBCS; + + n->cse.acs[NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP; + } + + if (n->params.sriov_max_vfs) { + oacs |= NVME_OACS_VMS; + + n->cse.acs[NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP; + } + + id->oacs = cpu_to_le16(oacs); + id->cntrltype = 0x1; /* @@ -8536,7 +8881,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0 | NVME_OCFS_COPY_FORMAT_1 | NVME_OCFS_COPY_FORMAT_2 | NVME_OCFS_COPY_FORMAT_3); - id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN); + id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | + NVME_CTRL_SGLS_MPTR_SGL); nvme_init_subnqn(n); @@ -8544,25 +8890,11 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->psd[0].enlat = cpu_to_le32(0x10); id->psd[0].exlat = cpu_to_le32(0x4); - if (n->subsys) { - id->cmic |= NVME_CMIC_MULTI_CTRL; - ctratt |= NVME_CTRATT_ENDGRPS; - - id->endgidmax = cpu_to_le16(0x1); - - if (n->subsys->endgrp.fdp.enabled) { - ctratt |= NVME_CTRATT_FDPS; - } - } - - id->ctratt = cpu_to_le32(ctratt); - NVME_CAP_SET_MQES(cap, n->params.mqes); NVME_CAP_SET_CQR(cap, 1); NVME_CAP_SET_TO(cap, 0xf); - NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM); - NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP); - NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY); + NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NCSS); + NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_IOCSS); NVME_CAP_SET_MPSMAX(cap, 4); NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); @@ -8581,7 +8913,29 @@ static int nvme_init_subsys(NvmeCtrl *n, Error **errp) int cntlid; if (!n->subsys) { - return 0; + DeviceState *dev = qdev_new(TYPE_NVME_SUBSYS); + + qdev_prop_set_string(dev, "nqn", n->params.serial); + + if (!qdev_realize(dev, NULL, errp)) { + return -1; + } + + n->subsys = NVME_SUBSYS(dev); + } else { + NvmeIdCtrl *id = &n->id_ctrl; + uint32_t ctratt = le32_to_cpu(id->ctratt); + + id->cmic |= NVME_CMIC_MULTI_CTRL; + ctratt |= NVME_CTRATT_ENDGRPS; + + id->endgidmax = cpu_to_le16(0x1); + + if (n->subsys->endgrp.fdp.enabled) { + ctratt |= NVME_CTRATT_FDPS; + } + + id->ctratt = cpu_to_le32(ctratt); } cntlid = nvme_subsys_register_ctrl(n, errp); @@ -8601,9 +8955,6 @@ void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) n->namespaces[nsid] = ns; ns->attached++; - - n->dmrsl = MIN_NON_ZERO(n->dmrsl, - BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); } static void nvme_realize(PCIDevice *pci_dev, Error **errp) @@ -8626,6 +8977,13 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) */ n->params.serial = g_strdup(pn->params.serial); n->subsys = pn->subsys; + + /* + * Assigning this link (strong link) causes an `object_unref` later in + * `object_release_link_property`. Increment the refcount to balance + * this out. + */ + object_ref(OBJECT(pn->subsys)); } if (!nvme_check_params(n, errp)) { @@ -8647,12 +9005,13 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) if (n->namespace.blkconf.blk) { ns = &n->namespace; ns->params.nsid = 1; + ns->ctrl = n; if (nvme_ns_setup(ns, errp)) { return; } - nvme_attach_ns(n, ns); + n->subsys->namespaces[ns->params.nsid] = ns; } } @@ -8664,17 +9023,15 @@ static void nvme_exit(PCIDevice *pci_dev) nvme_ctrl_reset(n, NVME_RESET_FUNCTION); - if (n->subsys) { - for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - ns = nvme_ns(n, i); - if (ns) { - ns->attached--; - } + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (ns) { + ns->attached--; } - - nvme_subsys_unregister_ctrl(n->subsys, n); } + nvme_subsys_unregister_ctrl(n->subsys, n); + g_free(n->cq); g_free(n->sq); g_free(n->aer_reqs); @@ -8696,11 +9053,16 @@ static void nvme_exit(PCIDevice *pci_dev) pcie_sriov_pf_exit(pci_dev); } - msix_uninit(pci_dev, &n->bar0, &n->bar0); + if (n->params.msix_exclusive_bar && !pci_is_vf(pci_dev)) { + msix_uninit_exclusive_bar(pci_dev); + } else { + msix_uninit(pci_dev, &n->bar0, &n->bar0); + } + memory_region_del_subregion(&n->bar0, &n->iomem); } -static Property nvme_props[] = { +static const Property nvme_props[] = { DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf), DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND, HostMemoryBackend *), @@ -8718,6 +9080,7 @@ static Property nvme_props[] = { DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), DEFINE_PROP_BOOL("ioeventfd", NvmeCtrl, params.ioeventfd, false), + DEFINE_PROP_BOOL("dbcs", NvmeCtrl, params.dbcs, true), DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl, params.auto_transition_zones, true), @@ -8734,7 +9097,11 @@ static Property nvme_props[] = { false), DEFINE_PROP_UINT16("mqes", NvmeCtrl, params.mqes, 0x7ff), DEFINE_PROP_UINT16("spdm_port", PCIDevice, spdm_port, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("ctratt.mem", NvmeCtrl, params.ctratt.mem, false), + DEFINE_PROP_BOOL("atomic.dn", NvmeCtrl, params.atomic_dn, 0), + DEFINE_PROP_UINT16("atomic.awun", NvmeCtrl, params.atomic_awun, 0), + DEFINE_PROP_UINT16("atomic.awupf", NvmeCtrl, params.atomic_awupf, 0), + DEFINE_PROP_BOOL("ocp", NvmeCtrl, params.ocp, false), }; static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name, @@ -8829,7 +9196,7 @@ static const VMStateDescription nvme_vmstate = { .unmigratable = 1, }; -static void nvme_class_init(ObjectClass *oc, void *data) +static void nvme_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -8845,7 +9212,7 @@ static void nvme_class_init(ObjectClass *oc, void *data) dc->desc = "Non-Volatile Memory Express"; device_class_set_props(dc, nvme_props); dc->vmsd = &nvme_vmstate; - dc->reset = nvme_pci_reset; + device_class_set_legacy_reset(dc, nvme_pci_reset); } static void nvme_instance_init(Object *obj) @@ -8867,7 +9234,7 @@ static const TypeInfo nvme_info = { .instance_size = sizeof(NvmeCtrl), .instance_init = nvme_instance_init, .class_init = nvme_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c index 01b19c33734..4e7874f3223 100644 --- a/hw/nvme/dif.c +++ b/hw/nvme/dif.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "nvme.h" #include "dif.h" @@ -575,11 +575,6 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) uint8_t *mbuf, *end; int16_t pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); - status = nvme_check_prinfo(ns, prinfo, slba, reftag); - if (status) { - goto err; - } - flags = 0; ctx->mdata.bounce = g_malloc0(mlen); diff --git a/hw/nvme/nguid.c b/hw/nvme/nguid.c index 829832bd9f4..4cd6fad6ac9 100644 --- a/hw/nvme/nguid.c +++ b/hw/nvme/nguid.c @@ -149,7 +149,7 @@ static void nvme_nguid_stringify(const NvmeNGUID *nguid, char *out) static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); char buffer[NGUID_STR_LEN]; char *p = buffer; @@ -162,7 +162,7 @@ static void get_nguid(Object *obj, Visitor *v, const char *name, void *opaque, static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; NvmeNGUID *nguid = object_field_prop_ptr(obj, prop); char *str; @@ -179,7 +179,7 @@ static void set_nguid(Object *obj, Visitor *v, const char *name, void *opaque, } const PropertyInfo qdev_prop_nguid = { - .name = "str", + .type = "str", .description = "NGUID or \"" NGUID_VALUE_AUTO "\" for random value", .get = get_nguid, diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index ea8db175dbd..6df2e8e7c5a 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -18,8 +18,8 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "qemu/bitops.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" +#include "system/system.h" +#include "system/block-backend.h" #include "nvme.h" #include "trace.h" @@ -30,6 +30,7 @@ void nvme_ns_init_format(NvmeNamespace *ns) { NvmeIdNs *id_ns = &ns->id_ns; + NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm; BlockDriverInfo bdi; int npdg, ret; int64_t nlbas; @@ -55,6 +56,8 @@ void nvme_ns_init_format(NvmeNamespace *ns) } id_ns->npda = id_ns->npdg = npdg - 1; + id_ns_nvm->npdal = npdg; + id_ns_nvm->npdgl = npdg; } static int nvme_ns_init(NvmeNamespace *ns, Error **errp) @@ -62,6 +65,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) static uint64_t ns_count; NvmeIdNs *id_ns = &ns->id_ns; NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm; + NvmeIdNsInd *id_ns_ind = &ns->id_ns_ind; uint8_t ds; uint16_t ms; int i; @@ -72,10 +76,12 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) ns->id_ns.dlfeat = 0x1; /* support DULBE and I/O optimization fields */ - id_ns->nsfeat |= (0x4 | 0x10); + id_ns->nsfeat |= (NVME_ID_NS_NSFEAT_DAE | NVME_ID_NS_NSFEAT_OPTPERF_ALL); if (ns->params.shared) { - id_ns->nmic |= NVME_NMIC_NS_SHARED; + id_ns->nmic |= NVME_ID_NS_IND_NMIC_SHRNS; + id_ns_ind->nmic = NVME_ID_NS_IND_NMIC_SHRNS; + id_ns_ind->nstat = NVME_ID_NS_IND_NSTAT_NRDY; } /* Substitute a missing EUI-64 by an autogenerated one */ @@ -721,25 +727,14 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) uint32_t nsid = ns->params.nsid; int i; - if (!n->subsys) { - /* If no subsys, the ns cannot be attached to more than one ctrl. */ - ns->params.shared = false; - if (ns->params.detached) { - error_setg(errp, "detached requires that the nvme device is " - "linked to an nvme-subsys device"); - return; - } - } else { - /* - * If this namespace belongs to a subsystem (through a link on the - * controller device), reparent the device. - */ - if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) { - return; - } - ns->subsys = subsys; - ns->endgrp = &subsys->endgrp; + assert(subsys); + + /* reparent to subsystem bus */ + if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) { + return; } + ns->subsys = subsys; + ns->endgrp = &subsys->endgrp; if (nvme_ns_setup(ns, errp)) { return; @@ -747,7 +742,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) if (!nsid) { for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { - if (nvme_ns(n, i) || nvme_subsys_ns(subsys, i)) { + if (nvme_subsys_ns(subsys, i)) { continue; } @@ -759,40 +754,22 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) error_setg(errp, "no free namespace id"); return; } - } else { - if (nvme_ns(n, nsid) || nvme_subsys_ns(subsys, nsid)) { - error_setg(errp, "namespace id '%d' already allocated", nsid); - return; - } + } else if (nvme_subsys_ns(subsys, nsid)) { + error_setg(errp, "namespace id '%d' already allocated", nsid); + return; } - if (subsys) { - subsys->namespaces[nsid] = ns; + subsys->namespaces[nsid] = ns; - ns->id_ns.endgid = cpu_to_le16(0x1); - - if (ns->params.detached) { - return; - } - - if (ns->params.shared) { - for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) { - NvmeCtrl *ctrl = subsys->ctrls[i]; - - if (ctrl && ctrl != SUBSYS_SLOT_RSVD) { - nvme_attach_ns(ctrl, ns); - } - } - - return; - } + ns->id_ns.endgid = cpu_to_le16(0x1); + ns->id_ns_ind.endgrpid = cpu_to_le16(0x1); + if (!ns->params.shared) { + ns->ctrl = n; } - - nvme_attach_ns(n, ns); } -static Property nvme_ns_props[] = { +static const Property nvme_ns_props[] = { DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf), DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false), DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true), @@ -827,10 +804,9 @@ static Property nvme_ns_props[] = { DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, false), DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs), - DEFINE_PROP_END_OF_LIST(), }; -static void nvme_ns_class_init(ObjectClass *oc, void *data) +static void nvme_ns_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 781985754d0..b5c9378ea4e 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -220,6 +220,11 @@ typedef struct NvmeNamespaceParams { } fdp; } NvmeNamespaceParams; +typedef struct NvmeAtomic { + uint32_t atomic_max_write_size; + bool atomic_writes; +} NvmeAtomic; + typedef struct NvmeNamespace { DeviceState parent_obj; BlockConf blkconf; @@ -228,10 +233,10 @@ typedef struct NvmeNamespace { int64_t moff; NvmeIdNs id_ns; NvmeIdNsNvm id_ns_nvm; + NvmeIdNsInd id_ns_ind; NvmeLBAF lbaf; unsigned int nlbaf; size_t lbasz; - const uint32_t *iocs; uint8_t csi; uint16_t status; int attached; @@ -263,6 +268,9 @@ typedef struct NvmeNamespace { NvmeSubsystem *subsys; NvmeEnduranceGroup *endgrp; + /* NULL for shared namespaces; set to specific controller if private */ + NvmeCtrl *ctrl; + struct { uint32_t err_rec; } features; @@ -421,6 +429,7 @@ typedef struct NvmeRequest { NvmeCmd cmd; BlockAcctCookie acct; NvmeSg sg; + bool atomic_write; QTAILQ_ENTRY(NvmeRequest)entry; } NvmeRequest; @@ -532,12 +541,22 @@ typedef struct NvmeParams { bool auto_transition_zones; bool legacy_cmb; bool ioeventfd; + bool dbcs; uint16_t sriov_max_vfs; uint16_t sriov_vq_flexible; uint16_t sriov_vi_flexible; uint32_t sriov_max_vq_per_vf; uint32_t sriov_max_vi_per_vf; bool msix_exclusive_bar; + bool ocp; + + struct { + bool mem; + } ctratt; + + uint16_t atomic_awun; + uint16_t atomic_awupf; + bool atomic_dn; } NvmeParams; typedef struct NvmeCtrl { @@ -568,6 +587,14 @@ typedef struct NvmeCtrl { uint64_t dbbuf_eis; bool dbbuf_enabled; + struct { + uint32_t acs[256]; + struct { + uint32_t nvm[256]; + uint32_t zoned[256]; + } iocs; + } cse; + struct { MemoryRegion mem; uint8_t *buf; @@ -619,6 +646,8 @@ typedef struct NvmeCtrl { uint16_t vqrfap; uint16_t virfap; } next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */ + uint32_t dn; /* Disable Normal */ + NvmeAtomic atomic; } NvmeCtrl; typedef enum NvmeResetType { diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c index 77deaf2c2c9..777e1c620fd 100644 --- a/hw/nvme/subsys.c +++ b/hw/nvme/subsys.c @@ -56,7 +56,7 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) { NvmeSubsystem *subsys = n->subsys; NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); - int cntlid, nsid, num_rsvd, num_vfs = n->params.sriov_max_vfs; + int cntlid, num_rsvd, num_vfs = n->params.sriov_max_vfs; if (pci_is_vf(&n->parent_obj)) { cntlid = le16_to_cpu(sctrl->scid); @@ -92,13 +92,6 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) subsys->ctrls[cntlid] = n; - for (nsid = 1; nsid < ARRAY_SIZE(subsys->namespaces); nsid++) { - NvmeNamespace *ns = subsys->namespaces[nsid]; - if (ns && ns->params.shared && !ns->params.detached) { - nvme_attach_ns(n, ns); - } - } - return cntlid; } @@ -216,17 +209,16 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp) nvme_subsys_setup(subsys, errp); } -static Property nvme_subsystem_props[] = { +static const Property nvme_subsystem_props[] = { DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn), DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false), DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs, NVME_DEFAULT_RU_SIZE), DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1), DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void nvme_subsys_class_init(ObjectClass *oc, void *data) +static void nvme_subsys_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -234,7 +226,6 @@ static void nvme_subsys_class_init(ObjectClass *oc, void *data) dc->realize = nvme_subsys_realize; dc->desc = "Virtual NVMe subsystem"; - dc->hotpluggable = false; device_class_set_props(dc, nvme_subsystem_props); } diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index 3a67680c6ad..6be0bfa1c1f 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -56,6 +56,7 @@ pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid pci_nvme_identify_ctrl(void) "identify controller" pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8"" pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32"" +pci_nvme_identify_ns_ind(uint32_t nsid) "nsid %"PRIu32"" pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16"" pci_nvme_identify_pri_ctrl_cap(uint16_t cntlid) "identify primary controller capabilities cntlid=%"PRIu16"" pci_nvme_identify_sec_ctrl_list(uint16_t cntlid, uint8_t numcntl) "identify secondary controller list cntlid=%"PRIu16" numcntl=%"PRIu8"" diff --git a/hw/nvram/bcm2835_otp.c b/hw/nvram/bcm2835_otp.c index c4aed28472b..6816b534178 100644 --- a/hw/nvram/bcm2835_otp.c +++ b/hw/nvram/bcm2835_otp.c @@ -164,7 +164,7 @@ static const VMStateDescription vmstate_bcm2835_otp = { } }; -static void bcm2835_otp_class_init(ObjectClass *klass, void *data) +static void bcm2835_otp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nvram/chrp_nvram.c b/hw/nvram/chrp_nvram.c index d4d10a7c03c..0b204e36c62 100644 --- a/hw/nvram/chrp_nvram.c +++ b/hw/nvram/chrp_nvram.c @@ -23,7 +23,7 @@ #include "qemu/cutils.h" #include "qemu/error-report.h" #include "hw/nvram/chrp_nvram.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str, int max_len) diff --git a/hw/nvram/ds1225y.c b/hw/nvram/ds1225y.c index 6d510dcc686..dbfd0d2e536 100644 --- a/hw/nvram/ds1225y.c +++ b/hw/nvram/ds1225y.c @@ -142,13 +142,12 @@ static void nvram_sysbus_realize(DeviceState *dev, Error **errp) nvram_post_load(s, 0); } -static Property nvram_sysbus_properties[] = { +static const Property nvram_sysbus_properties[] = { DEFINE_PROP_UINT32("size", SysBusNvRamState, nvram.chip_size, 0x2000), DEFINE_PROP_STRING("filename", SysBusNvRamState, nvram.filename), - DEFINE_PROP_END_OF_LIST(), }; -static void nvram_sysbus_class_init(ObjectClass *klass, void *data) +static void nvram_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c index 3272068663e..82ea97e552a 100644 --- a/hw/nvram/eeprom_at24c.c +++ b/hw/nvram/eeprom_at24c.c @@ -10,12 +10,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/module.h" #include "hw/i2c/i2c.h" #include "hw/nvram/eeprom_at24c.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qom/object.h" /* #define DEBUG_AT24C */ @@ -26,13 +27,8 @@ #define DPRINTK(FMT, ...) do {} while (0) #endif -#define ERR(FMT, ...) fprintf(stderr, TYPE_AT24C_EE " : " FMT, \ - ## __VA_ARGS__) - #define TYPE_AT24C_EE "at24c-eeprom" -typedef struct EEPROMState EEPROMState; -DECLARE_INSTANCE_CHECKER(EEPROMState, AT24C_EE, - TYPE_AT24C_EE) +OBJECT_DECLARE_SIMPLE_TYPE(EEPROMState, AT24C_EE) struct EEPROMState { I2CSlave parent_obj; @@ -77,8 +73,7 @@ int at24c_eeprom_event(I2CSlave *s, enum i2c_event event) if (ee->blk && ee->changed) { int ret = blk_pwrite(ee->blk, 0, ee->rsize, ee->mem, 0); if (ret < 0) { - ERR(TYPE_AT24C_EE - " : failed to write backing file\n"); + error_report("%s: failed to write backing file", __func__); } DPRINTK("Wrote to backing file\n"); } @@ -195,20 +190,18 @@ static void at24c_eeprom_realize(DeviceState *dev, Error **errp) } ee->mem = g_malloc0(ee->rsize); - memset(ee->mem, 0, ee->rsize); - - if (ee->init_rom) { - memcpy(ee->mem, ee->init_rom, MIN(ee->init_rom_size, ee->rsize)); - } if (ee->blk) { int ret = blk_pread(ee->blk, 0, ee->rsize, ee->mem, 0); if (ret < 0) { - ERR(TYPE_AT24C_EE - " : Failed initial sync with backing file\n"); + error_setg(errp, "%s: Failed initial sync with backing file", + TYPE_AT24C_EE); + return; } DPRINTK("Reset read backing file\n"); + } else if (ee->init_rom) { + memcpy(ee->mem, ee->init_rom, MIN(ee->init_rom_size, ee->rsize)); } /* @@ -234,16 +227,15 @@ void at24c_eeprom_reset(DeviceState *state) ee->haveaddr = 0; } -static Property at24c_eeprom_props[] = { +static const Property at24c_eeprom_props[] = { DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0), DEFINE_PROP_UINT8("address-size", EEPROMState, asize, 0), DEFINE_PROP_BOOL("writable", EEPROMState, writable, true), DEFINE_PROP_DRIVE("drive", EEPROMState, blk), - DEFINE_PROP_END_OF_LIST() }; static -void at24c_eeprom_class_init(ObjectClass *klass, void *data) +void at24c_eeprom_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -254,7 +246,7 @@ void at24c_eeprom_class_init(ObjectClass *klass, void *data) k->send = &at24c_eeprom_send; device_class_set_props(dc, at24c_eeprom_props); - dc->reset = at24c_eeprom_reset; + device_class_set_legacy_reset(dc, at24c_eeprom_reset); } static diff --git a/hw/nvram/fw_cfg-acpi.c b/hw/nvram/fw_cfg-acpi.c index 58cdcd3121c..2e6ef89b989 100644 --- a/hw/nvram/fw_cfg-acpi.c +++ b/hw/nvram/fw_cfg-acpi.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Add fw_cfg device in DSDT * diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index fc0263f3491..aa240504935 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -24,10 +24,10 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" -#include "sysemu/sysemu.h" -#include "sysemu/dma.h" -#include "sysemu/reset.h" -#include "exec/address-spaces.h" +#include "system/system.h" +#include "system/dma.h" +#include "system/reset.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" @@ -41,7 +41,6 @@ #include "qemu/cutils.h" #include "qapi/error.h" #include "hw/acpi/aml-build.h" -#include "hw/pci/pci_bus.h" #include "hw/loader.h" #define FW_CFG_FILE_SLOTS_DFLT 0x20 @@ -730,7 +729,6 @@ static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key, ptr = s->entries[arch][key].data; s->entries[arch][key].data = data; s->entries[arch][key].len = len; - s->entries[arch][key].callback_opaque = NULL; s->entries[arch][key].allow_write = false; return ptr; @@ -819,62 +817,6 @@ void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value) g_free(old); } -void fw_cfg_set_order_override(FWCfgState *s, int order) -{ - assert(s->fw_cfg_order_override == 0); - s->fw_cfg_order_override = order; -} - -void fw_cfg_reset_order_override(FWCfgState *s) -{ - assert(s->fw_cfg_order_override != 0); - s->fw_cfg_order_override = 0; -} - -/* - * This is the legacy order list. For legacy systems, files are in - * the fw_cfg in the order defined below, by the "order" value. Note - * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a - * specific area, but there may be more than one and they occur in the - * order that the user specifies them on the command line. Those are - * handled in a special manner, using the order override above. - * - * For non-legacy, the files are sorted by filename to avoid this kind - * of complexity in the future. - * - * This is only for x86, other arches don't implement versioning so - * they won't set legacy mode. - */ -static struct { - const char *name; - int order; -} fw_cfg_order[] = { - { "etc/boot-menu-wait", 10 }, - { "bootsplash.jpg", 11 }, - { "bootsplash.bmp", 12 }, - { "etc/boot-fail-wait", 15 }, - { "etc/smbios/smbios-tables", 20 }, - { "etc/smbios/smbios-anchor", 30 }, - { "etc/e820", 40 }, - { "etc/reserved-memory-end", 50 }, - { "genroms/kvmvapic.bin", 55 }, - { "genroms/linuxboot.bin", 60 }, - { }, /* VGA ROMs from pc_vga_init come here, 70. */ - { }, /* NIC option ROMs from pc_nic_init come here, 80. */ - { "etc/system-states", 90 }, - { }, /* User ROMs come here, 100. */ - { }, /* Device FW comes here, 110. */ - { "etc/extra-pci-roots", 120 }, - { "etc/acpi/tables", 130 }, - { "etc/table-loader", 140 }, - { "etc/tpm/log", 150 }, - { "etc/acpi/rsdp", 160 }, - { "bootorder", 170 }, - { "etc/msr_feature_control", 180 }, - -#define FW_CFG_ORDER_OVERRIDE_LAST 200 -}; - /* * Any sub-page size update to these table MRs will be lost during migration, * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path. @@ -892,29 +834,6 @@ static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len) } } -static int get_fw_cfg_order(FWCfgState *s, const char *name) -{ - int i; - - if (s->fw_cfg_order_override > 0) { - return s->fw_cfg_order_override; - } - - for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) { - if (fw_cfg_order[i].name == NULL) { - continue; - } - - if (strcmp(name, fw_cfg_order[i].name) == 0) { - return fw_cfg_order[i].order; - } - } - - /* Stick unknown stuff at the end. */ - warn_report("Unknown firmware file in legacy mode: %s", name); - return FW_CFG_ORDER_OVERRIDE_LAST; -} - void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, FWCfgCallback select_cb, FWCfgWriteCallback write_cb, @@ -923,7 +842,6 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, { int i, index, count; size_t dsize; - MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); int order = 0; if (!s->files) { @@ -935,22 +853,11 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, count = be32_to_cpu(s->files->count); assert(count < fw_cfg_file_slots(s)); - /* Find the insertion point. */ - if (mc->legacy_fw_cfg_order) { - /* - * Sort by order. For files with the same order, we keep them - * in the sequence in which they were added. - */ - order = get_fw_cfg_order(s, filename); - for (index = count; - index > 0 && order < s->entry_order[index - 1]; - index--); - } else { - /* Sort by file name. */ - for (index = count; - index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0; - index--); - } + /* Find the insertion point, sorting by file name. */ + for (index = count; + index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0; + index--) + ; /* * Move all the entries from the index point and after down one @@ -1027,27 +934,29 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename, return NULL; } -bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename, - const char *gen_id, Error **errp) +bool fw_cfg_add_file_from_generator(FWCfgState *s, + Object *parent, const char *part, + const char *filename, Error **errp) { + ERRP_GUARD(); FWCfgDataGeneratorClass *klass; GByteArray *array; Object *obj; gsize size; - obj = object_resolve_path_component(object_get_objects_root(), gen_id); + obj = object_resolve_path_component(parent, part); if (!obj) { - error_setg(errp, "Cannot find object ID '%s'", gen_id); + error_setg(errp, "Cannot find object ID '%s'", part); return false; } if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) { error_setg(errp, "Object ID '%s' is not a '%s' subclass", - gen_id, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE); + part, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE); return false; } klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj); array = klass->get_data(obj, errp); - if (!array) { + if (*errp || !array) { return false; } size = array->len; @@ -1056,31 +965,8 @@ bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename, return true; } -void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s) -{ - int extra_hosts = 0; - - if (!bus) { - return; - } - - QLIST_FOREACH(bus, &bus->child, sibling) { - /* look for expander root buses */ - if (pci_bus_is_root(bus)) { - extra_hosts++; - } - } - - if (extra_hosts && s) { - uint64_t *val = g_malloc(sizeof(*val)); - *val = cpu_to_le64(extra_hosts); - fw_cfg_add_file(s, "etc/extra-pci-roots", val, sizeof(*val)); - } -} - static void fw_cfg_machine_reset(void *opaque) { - MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); FWCfgState *s = opaque; void *ptr; size_t len; @@ -1090,11 +976,9 @@ static void fw_cfg_machine_reset(void *opaque) ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len); g_free(ptr); - if (!mc->legacy_fw_cfg_order) { - buf = get_boot_devices_lchs_list(&len); - ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len); - g_free(ptr); - } + buf = get_boot_devices_lchs_list(&len); + ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len); + g_free(ptr); } static void fw_cfg_machine_ready(struct Notifier *n, void *data) @@ -1103,9 +987,8 @@ static void fw_cfg_machine_ready(struct Notifier *n, void *data) qemu_register_reset(fw_cfg_machine_reset, s); } -static Property fw_cfg_properties[] = { +static const Property fw_cfg_properties[] = { DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true), - DEFINE_PROP_END_OF_LIST(), }; static void fw_cfg_common_realize(DeviceState *dev, Error **errp) @@ -1171,11 +1054,6 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, return s; } -FWCfgState *fw_cfg_init_io(uint32_t iobase) -{ - return fw_cfg_init_io_dma(iobase, 0, NULL); -} - FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr, hwaddr data_addr, uint32_t data_width, hwaddr dma_addr, AddressSpace *dma_as) @@ -1256,11 +1134,11 @@ void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, fw_cfg_add_bytes(fw_cfg, data_key, data, size); } -static void fw_cfg_class_init(ObjectClass *klass, void *data) +static void fw_cfg_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = fw_cfg_reset; + device_class_set_legacy_reset(dc, fw_cfg_reset); dc->vmsd = &vmstate_fw_cfg; device_class_set_props(dc, fw_cfg_properties); @@ -1299,12 +1177,11 @@ static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp) s->entry_order = g_new0(int, fw_cfg_max_entry(s)); } -static Property fw_cfg_io_properties[] = { +static const Property fw_cfg_io_properties[] = { DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled, true), DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots, FW_CFG_FILE_SLOTS_DFLT), - DEFINE_PROP_END_OF_LIST(), }; static void fw_cfg_io_realize(DeviceState *dev, Error **errp) @@ -1332,7 +1209,7 @@ static void fw_cfg_io_realize(DeviceState *dev, Error **errp) fw_cfg_common_realize(dev, errp); } -static void fw_cfg_io_class_init(ObjectClass *klass, void *data) +static void fw_cfg_io_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1348,13 +1225,12 @@ static const TypeInfo fw_cfg_io_info = { }; -static Property fw_cfg_mem_properties[] = { +static const Property fw_cfg_mem_properties[] = { DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1), DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled, true), DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots, FW_CFG_FILE_SLOTS_DFLT), - DEFINE_PROP_END_OF_LIST(), }; static void fw_cfg_mem_realize(DeviceState *dev, Error **errp) @@ -1394,7 +1270,7 @@ static void fw_cfg_mem_realize(DeviceState *dev, Error **errp) fw_cfg_common_realize(dev, errp); } -static void fw_cfg_mem_class_init(ObjectClass *klass, void *data) +static void fw_cfg_mem_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c index fe9df9fa35b..66526a22914 100644 --- a/hw/nvram/mac_nvram.c +++ b/hw/nvram/mac_nvram.c @@ -29,13 +29,13 @@ #include "hw/nvram/mac_nvram.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "migration/vmstate.h" #include "qemu/cutils.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "trace.h" -#include +#include /* for adler32 */ #define DEF_SYSTEM_SIZE 0xc10 @@ -134,20 +134,19 @@ static void macio_nvram_unrealizefn(DeviceState *dev) g_free(s->data); } -static Property macio_nvram_properties[] = { +static const Property macio_nvram_properties[] = { DEFINE_PROP_UINT32("size", MacIONVRAMState, size, 0), DEFINE_PROP_UINT32("it_shift", MacIONVRAMState, it_shift, 0), DEFINE_PROP_DRIVE("drive", MacIONVRAMState, blk), - DEFINE_PROP_END_OF_LIST() }; -static void macio_nvram_class_init(ObjectClass *oc, void *data) +static void macio_nvram_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = macio_nvram_realizefn; dc->unrealize = macio_nvram_unrealizefn; - dc->reset = macio_nvram_reset; + device_class_set_legacy_reset(dc, macio_nvram_reset); dc->vmsd = &vmstate_macio_nvram; device_class_set_props(dc, macio_nvram_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); diff --git a/hw/nvram/npcm7xx_otp.c b/hw/nvram/npcm7xx_otp.c index f00ebfa931e..1fb752b20f4 100644 --- a/hw/nvram/npcm7xx_otp.c +++ b/hw/nvram/npcm7xx_otp.c @@ -391,7 +391,7 @@ static const VMStateDescription vmstate_npcm7xx_otp = { }, }; -static void npcm7xx_otp_class_init(ObjectClass *klass, void *data) +static void npcm7xx_otp_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -403,14 +403,14 @@ static void npcm7xx_otp_class_init(ObjectClass *klass, void *data) rc->phases.enter = npcm7xx_otp_enter_reset; } -static void npcm7xx_key_storage_class_init(ObjectClass *klass, void *data) +static void npcm7xx_key_storage_class_init(ObjectClass *klass, const void *data) { NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass); oc->mmio_ops = &npcm7xx_key_storage_ops; } -static void npcm7xx_fuse_array_class_init(ObjectClass *klass, void *data) +static void npcm7xx_fuse_array_class_init(ObjectClass *klass, const void *data) { NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass); diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c index 73564f7e6ea..23cc9fe9b3d 100644 --- a/hw/nvram/nrf51_nvm.c +++ b/hw/nvram/nrf51_nvm.c @@ -354,9 +354,8 @@ static void nrf51_nvm_reset(DeviceState *dev) memset(s->uicr_content, 0xFF, sizeof(s->uicr_content)); } -static Property nrf51_nvm_properties[] = { +static const Property nrf51_nvm_properties[] = { DEFINE_PROP_UINT32("flash-size", NRF51NVMState, flash_size, 0x40000), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_nvm = { @@ -371,14 +370,14 @@ static const VMStateDescription vmstate_nvm = { } }; -static void nrf51_nvm_class_init(ObjectClass *klass, void *data) +static void nrf51_nvm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, nrf51_nvm_properties); dc->vmsd = &vmstate_nvm; dc->realize = nrf51_nvm_realize; - dc->reset = nrf51_nvm_reset; + device_class_set_legacy_reset(dc, nrf51_nvm_reset); } static const TypeInfo nrf51_nvm_info = { diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c index bfd8aa367e1..d0ac4e5735f 100644 --- a/hw/nvram/spapr_nvram.c +++ b/hw/nvram/spapr_nvram.c @@ -28,10 +28,10 @@ #include "qapi/error.h" #include -#include "sysemu/block-backend.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/block-backend.h" +#include "system/device_tree.h" +#include "system/system.h" +#include "system/runstate.h" #include "migration/vmstate.h" #include "hw/nvram/chrp_nvram.h" #include "hw/ppc/spapr.h" @@ -252,13 +252,12 @@ static const VMStateDescription vmstate_spapr_nvram = { }, }; -static Property spapr_nvram_properties[] = { +static const Property spapr_nvram_properties[] = { DEFINE_SPAPR_PROPERTIES(SpaprNvram, sdev), DEFINE_PROP_DRIVE("drive", SpaprNvram, blk), - DEFINE_PROP_END_OF_LIST(), }; -static void spapr_nvram_class_init(ObjectClass *klass, void *data) +static void spapr_nvram_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c index 09575a77d77..5702bb3f310 100644 --- a/hw/nvram/xlnx-bbram.c +++ b/hw/nvram/xlnx-bbram.c @@ -29,7 +29,7 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qapi/error.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" @@ -456,9 +456,8 @@ static void bbram_ctrl_init(Object *obj) { XlnxBBRam *s = XLNX_BBRAM(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - RegisterInfoArray *reg_array; - reg_array = + s->reg_array = register_init_block32(DEVICE(obj), bbram_ctrl_regs_info, ARRAY_SIZE(bbram_ctrl_regs_info), s->regs_info, s->regs, @@ -466,10 +465,17 @@ static void bbram_ctrl_init(Object *obj) XLNX_BBRAM_ERR_DEBUG, R_MAX * 4); - sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_mmio(sbd, &s->reg_array->mem); sysbus_init_irq(sbd, &s->irq_bbram); } +static void bbram_ctrl_finalize(Object *obj) +{ + XlnxBBRam *s = XLNX_BBRAM(obj); + + register_finalize_block(s->reg_array); +} + static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -496,7 +502,7 @@ static void bbram_prop_release_drive(Object *obj, const char *name, } static const PropertyInfo bbram_prop_drive = { - .name = "str", + .type = "str", .description = "Node name or ID of a block device to use as BBRAM backend", .realized_set_allowed = true, .get = bbram_prop_get_drive, @@ -514,13 +520,12 @@ static const VMStateDescription vmstate_bbram_ctrl = { } }; -static Property bbram_ctrl_props[] = { +static const Property bbram_ctrl_props[] = { DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *), DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void bbram_ctrl_class_init(ObjectClass *klass, void *data) +static void bbram_ctrl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -537,6 +542,7 @@ static const TypeInfo bbram_ctrl_info = { .instance_size = sizeof(XlnxBBRam), .class_init = bbram_ctrl_class_init, .instance_init = bbram_ctrl_init, + .instance_finalize = bbram_ctrl_finalize, }; static void bbram_ctrl_register_types(void) diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c index f7b849f7de4..4c23f8b9312 100644 --- a/hw/nvram/xlnx-efuse.c +++ b/hw/nvram/xlnx-efuse.c @@ -30,7 +30,7 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qapi/error.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" @@ -257,7 +257,7 @@ static void efuse_prop_release_drive(Object *obj, const char *name, } static const PropertyInfo efuse_prop_drive = { - .name = "str", + .type = "str", .description = "Node name or ID of a block device to use as eFUSE backend", .realized_set_allowed = true, .get = efuse_prop_get_drive, @@ -265,22 +265,23 @@ static const PropertyInfo efuse_prop_drive = { .release = efuse_prop_release_drive, }; -static Property efuse_properties[] = { +static const Property efuse_properties[] = { DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *), DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3), DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32), DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true), DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits, qdev_prop_uint32, uint32_t), - DEFINE_PROP_END_OF_LIST(), }; -static void efuse_class_init(ObjectClass *klass, void *data) +static void efuse_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = efuse_realize; device_class_set_props(dc, efuse_properties); + /* Reason: Part of Xilinx SoC */ + dc->user_creatable = false; } static const TypeInfo efuse_info = { diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c index eaec64d785e..d4ec96a6264 100644 --- a/hw/nvram/xlnx-versal-efuse-cache.c +++ b/hw/nvram/xlnx-versal-efuse-cache.c @@ -83,15 +83,13 @@ static void efuse_cache_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static Property efuse_cache_props[] = { +static const Property efuse_cache_props[] = { DEFINE_PROP_LINK("efuse", XlnxVersalEFuseCache, efuse, TYPE_XLNX_EFUSE, XlnxEFuse *), - - DEFINE_PROP_END_OF_LIST(), }; -static void efuse_cache_class_init(ObjectClass *klass, void *data) +static void efuse_cache_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c index def6fe3302b..90962198008 100644 --- a/hw/nvram/xlnx-versal-efuse-ctrl.c +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -494,7 +494,6 @@ static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); efuse_imr_update_irq(s); - return; } static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64) @@ -712,9 +711,8 @@ static void efuse_ctrl_init(Object *obj) { XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - RegisterInfoArray *reg_array; - reg_array = + s->reg_array = register_init_block32(DEVICE(obj), efuse_ctrl_regs_info, ARRAY_SIZE(efuse_ctrl_regs_info), s->regs_info, s->regs, @@ -722,7 +720,7 @@ static void efuse_ctrl_init(Object *obj) XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG, R_MAX * 4); - sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_mmio(sbd, &s->reg_array->mem); sysbus_init_irq(sbd, &s->irq_efuse_imr); } @@ -730,6 +728,7 @@ static void efuse_ctrl_finalize(Object *obj) { XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); + register_finalize_block(s->reg_array); g_free(s->extra_pg0_lock_spec); } @@ -743,18 +742,16 @@ static const VMStateDescription vmstate_efuse_ctrl = { } }; -static Property efuse_ctrl_props[] = { +static const Property efuse_ctrl_props[] = { DEFINE_PROP_LINK("efuse", XlnxVersalEFuseCtrl, efuse, TYPE_XLNX_EFUSE, XlnxEFuse *), DEFINE_PROP_ARRAY("pg0-lock", XlnxVersalEFuseCtrl, extra_pg0_lock_n16, extra_pg0_lock_spec, qdev_prop_uint16, uint16_t), - - DEFINE_PROP_END_OF_LIST(), }; -static void efuse_ctrl_class_init(ObjectClass *klass, void *data) +static void efuse_ctrl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c index 2d465f0fc6a..5a218c32e84 100644 --- a/hw/nvram/xlnx-zynqmp-efuse.c +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -803,9 +803,8 @@ static void zynqmp_efuse_init(Object *obj) { XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - RegisterInfoArray *reg_array; - reg_array = + s->reg_array = register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info, ARRAY_SIZE(zynqmp_efuse_regs_info), s->regs_info, s->regs, @@ -813,10 +812,17 @@ static void zynqmp_efuse_init(Object *obj) ZYNQMP_EFUSE_ERR_DEBUG, R_MAX * 4); - sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_mmio(sbd, &s->reg_array->mem); sysbus_init_irq(sbd, &s->irq); } +static void zynqmp_efuse_finalize(Object *obj) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); + + register_finalize_block(s->reg_array); +} + static const VMStateDescription vmstate_efuse = { .name = TYPE_XLNX_ZYNQMP_EFUSE, .version_id = 1, @@ -827,15 +833,13 @@ static const VMStateDescription vmstate_efuse = { } }; -static Property zynqmp_efuse_props[] = { +static const Property zynqmp_efuse_props[] = { DEFINE_PROP_LINK("efuse", XlnxZynqMPEFuse, efuse, TYPE_XLNX_EFUSE, XlnxEFuse *), - - DEFINE_PROP_END_OF_LIST(), }; -static void zynqmp_efuse_class_init(ObjectClass *klass, void *data) +static void zynqmp_efuse_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -853,6 +857,7 @@ static const TypeInfo efuse_info = { .instance_size = sizeof(XlnxZynqMPEFuse), .class_init = zynqmp_efuse_class_init, .instance_init = zynqmp_efuse_init, + .instance_finalize = zynqmp_efuse_finalize, }; static void efuse_register_types(void) diff --git a/hw/openrisc/Kconfig b/hw/openrisc/Kconfig index 76b953c62c2..0702f622a55 100644 --- a/hw/openrisc/Kconfig +++ b/hw/openrisc/Kconfig @@ -3,7 +3,7 @@ config OR1K_SIM default y depends on OPENRISC select DEVICE_TREE - select SERIAL + select SERIAL_MM select OPENCORES_ETH select OMPIC select SPLIT_IRQ @@ -19,6 +19,6 @@ config OR1K_VIRT select PCI select PCI_EXPRESS_GENERIC_BRIDGE select GOLDFISH_RTC - select SERIAL + select SERIAL_MM select SIFIVE_TEST select VIRTIO_MMIO diff --git a/hw/openrisc/boot.c b/hw/openrisc/boot.c index 55475aa6d60..c81efe8138a 100644 --- a/hw/openrisc/boot.c +++ b/hw/openrisc/boot.c @@ -9,12 +9,13 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/cpu-defs.h" +#include "exec/target_page.h" #include "elf.h" #include "hw/loader.h" #include "hw/openrisc/boot.h" -#include "sysemu/device_tree.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/qtest.h" +#include "system/reset.h" #include "qemu/error-report.h" #include @@ -32,7 +33,7 @@ hwaddr openrisc_load_kernel(ram_addr_t ram_size, if (kernel_filename && !qtest_enabled()) { kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, - &elf_entry, NULL, &high_addr, NULL, 1, + &elf_entry, NULL, &high_addr, NULL, ELFDATA2MSB, EM_OPENRISC, 1, 0); entry = elf_entry; if (kernel_size < 0) { @@ -90,8 +91,8 @@ hwaddr openrisc_load_initrd(void *fdt, const char *filename, return start + size; } -uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start, - uint64_t mem_size) +uint32_t openrisc_load_fdt(MachineState *ms, void *fdt, + hwaddr load_start, uint64_t mem_size) { uint32_t fdt_addr; int ret; @@ -109,7 +110,9 @@ uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start, /* Should only fail if we've built a corrupted tree */ g_assert(ret == 0); /* copy in the device tree */ - qemu_fdt_dumpdtb(fdt, fdtsize); + + /* Save FDT for dumpdtb monitor command */ + ms->fdt = fdt; rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr, &address_space_memory); diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c index 835986c4dbe..6331997d56b 100644 --- a/hw/openrisc/cputimer.c +++ b/hw/openrisc/cputimer.c @@ -22,14 +22,15 @@ #include "cpu.h" #include "migration/vmstate.h" #include "qemu/timer.h" -#include "sysemu/reset.h" +#include "system/reset.h" #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */ /* Tick Timer global state to allow all cores to be in sync */ typedef struct OR1KTimerState { uint32_t ttcr; - uint64_t last_clk; + uint32_t ttcr_offset; + uint64_t clk_offset; } OR1KTimerState; static OR1KTimerState *or1k_timer; @@ -37,6 +38,8 @@ static OR1KTimerState *or1k_timer; void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val) { or1k_timer->ttcr = val; + or1k_timer->ttcr_offset = val; + or1k_timer->clk_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu) @@ -53,9 +56,8 @@ void cpu_openrisc_count_update(OpenRISCCPU *cpu) return; } now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk) - / TIMER_PERIOD); - or1k_timer->last_clk = now; + or1k_timer->ttcr = or1k_timer->ttcr_offset + + DIV_ROUND_UP(now - or1k_timer->clk_offset, TIMER_PERIOD); } /* Update the next timeout time as difference between ttmr and ttcr */ @@ -69,7 +71,7 @@ void cpu_openrisc_timer_update(OpenRISCCPU *cpu) } cpu_openrisc_count_update(cpu); - now = or1k_timer->last_clk; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) { wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1; @@ -110,7 +112,8 @@ static void openrisc_timer_cb(void *opaque) case TIMER_NONE: break; case TIMER_INTR: - or1k_timer->ttcr = 0; + /* Zero the count by applying a negative offset to the counter */ + or1k_timer->ttcr_offset -= (cpu->env.ttmr & TTMR_TP); break; case TIMER_SHOT: cpu_openrisc_count_stop(cpu); @@ -137,17 +140,18 @@ static void openrisc_count_reset(void *opaque) /* Reset the global timer state. */ static void openrisc_timer_reset(void *opaque) { - or1k_timer->ttcr = 0x00000000; - or1k_timer->last_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + OpenRISCCPU *cpu = opaque; + cpu_openrisc_count_set(cpu, 0); } static const VMStateDescription vmstate_or1k_timer = { .name = "or1k_timer", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (const VMStateField[]) { VMSTATE_UINT32(ttcr, OR1KTimerState), - VMSTATE_UINT64(last_clk, OR1KTimerState), + VMSTATE_UINT32(ttcr_offset, OR1KTimerState), + VMSTATE_UINT64(clk_offset, OR1KTimerState), VMSTATE_END_OF_LIST() } }; diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c index bffd6f721f7..880c8ebbb8b 100644 --- a/hw/openrisc/openrisc_sim.c +++ b/hw/openrisc/openrisc_sim.c @@ -24,16 +24,16 @@ #include "cpu.h" #include "hw/irq.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "net/net.h" #include "hw/openrisc/boot.h" #include "hw/qdev-properties.h" -#include "exec/address-spaces.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" +#include "system/address-spaces.h" +#include "system/device_tree.h" +#include "system/system.h" #include "hw/sysbus.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/reset.h" #include "hw/core/split-irq.h" #include @@ -250,7 +250,7 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, void *fdt = state->fdt; char *nodename; qemu_irq serial_irq; - char alias[sizeof("uart0")]; + char alias[sizeof("serial0")]; int i; if (num_cpus > 1) { @@ -265,8 +265,8 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, serial_irq = get_cpu_irq(cpus, 0, irq_pin); } serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200, - serial_hd(OR1KSIM_UART_COUNT - uart_idx - 1), - DEVICE_NATIVE_ENDIAN); + serial_hd(uart_idx), + DEVICE_BIG_ENDIAN); /* Add device tree node for serial. */ nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base); @@ -277,10 +277,13 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", OR1KSIM_CLK_MHZ); qemu_fdt_setprop(fdt, nodename, "big-endian", NULL, 0); - /* The /chosen node is created during fdt creation. */ - qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); - snprintf(alias, sizeof(alias), "uart%d", uart_idx); + if (uart_idx == 0) { + /* The /chosen node is created during fdt creation. */ + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); + } + snprintf(alias, sizeof(alias), "serial%d", uart_idx); qemu_fdt_setprop_string(fdt, "/aliases", alias, nodename); + g_free(nodename); } @@ -303,8 +306,6 @@ static void openrisc_sim_init(MachineState *machine) exit(1); } - cpu_openrisc_clock_init(cpus[n]); - qemu_register_reset(main_cpu_reset, cpus[n]); } @@ -326,11 +327,22 @@ static void openrisc_sim_init(MachineState *machine) smp_cpus, cpus, OR1KSIM_OMPIC_IRQ); } - for (n = 0; n < OR1KSIM_UART_COUNT; ++n) + /* + * We create the UART nodes starting with the highest address and + * working downwards, because in QEMU the DTB nodes end up in the + * DTB in reverse order of creation. Correctly-written guest software + * will not care about the node order (it will look at stdout-path + * or the alias nodes), but for the benefit of guest software which + * just looks for the first UART node in the DTB, make sure the + * lowest-address UART (which is QEMU's first serial port) appears + * first in the DTB. + */ + for (n = OR1KSIM_UART_COUNT - 1; n >= 0; n--) { openrisc_sim_serial_init(state, or1ksim_memmap[OR1KSIM_UART].base + or1ksim_memmap[OR1KSIM_UART].size * n, or1ksim_memmap[OR1KSIM_UART].size, smp_cpus, cpus, OR1KSIM_UART_IRQ, n); + } load_addr = openrisc_load_kernel(ram_size, kernel_filename, &boot_info.bootstrap_pc); @@ -340,12 +352,12 @@ static void openrisc_sim_init(MachineState *machine) machine->initrd_filename, load_addr, machine->ram_size); } - boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr, + boot_info.fdt_addr = openrisc_load_fdt(machine, state->fdt, load_addr, machine->ram_size); } } -static void openrisc_sim_machine_init(ObjectClass *oc, void *data) +static void openrisc_sim_machine_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/openrisc/virt.c b/hw/openrisc/virt.c index f8a68a6a6b1..a98071c9365 100644 --- a/hw/openrisc/virt.c +++ b/hw/openrisc/virt.c @@ -11,10 +11,10 @@ #include "qemu/guest-random.h" #include "qapi/error.h" #include "cpu.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/irq.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/core/split-irq.h" #include "hw/openrisc/boot.h" #include "hw/misc/sifive_test.h" @@ -24,10 +24,10 @@ #include "hw/rtc/goldfish_rtc.h" #include "hw/sysbus.h" #include "hw/virtio/virtio-mmio.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/reset.h" #include @@ -236,7 +236,7 @@ static void openrisc_virt_serial_init(OR1KVirtState *state, hwaddr base, qemu_irq serial_irq = get_per_cpu_irq(cpus, num_cpus, irq_pin); serial_mm_init(get_system_memory(), base, 0, serial_irq, 115200, - serial_hd(0), DEVICE_NATIVE_ENDIAN); + serial_hd(0), DEVICE_BIG_ENDIAN); /* Add device tree node for serial. */ nodename = g_strdup_printf("/serial@%" HWADDR_PRIx, base); @@ -318,7 +318,7 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, { int pin, dev; uint32_t irq_map_stride = 0; - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * 6] = {}; + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * 6] = {}; uint32_t *irq_map = full_irq_map; /* @@ -330,11 +330,11 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, * possible slot) seeing the interrupt-map-mask will allow the table * to wrap to any number of devices. */ - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { + for (dev = 0; dev < PCI_NUM_PINS; dev++) { int devfn = dev << 3; - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { - int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); + for (pin = 0; pin < PCI_NUM_PINS; pin++) { + int irq_nr = irq_base + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); int i = 0; /* Fill PCI address cells */ @@ -357,7 +357,7 @@ static void create_pcie_irq_map(void *fdt, char *nodename, int irq_base, } qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, - GPEX_NUM_IRQS * GPEX_NUM_IRQS * + PCI_NUM_PINS * PCI_NUM_PINS * irq_map_stride * sizeof(uint32_t)); qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", @@ -409,7 +409,7 @@ static void openrisc_virt_pcie_init(OR1KVirtState *state, memory_region_add_subregion(get_system_memory(), pio_base, alias); /* Connect IRQ lines. */ - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { pcie_irq = get_per_cpu_irq(cpus, num_cpus, irq_base + i); sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pcie_irq); @@ -487,8 +487,6 @@ static void openrisc_virt_init(MachineState *machine) exit(1); } - cpu_openrisc_clock_init(cpus[n]); - qemu_register_reset(main_cpu_reset, cpus[n]); } @@ -540,12 +538,12 @@ static void openrisc_virt_init(MachineState *machine) machine->initrd_filename, load_addr, machine->ram_size); } - boot_info.fdt_addr = openrisc_load_fdt(state->fdt, load_addr, + boot_info.fdt_addr = openrisc_load_fdt(machine, state->fdt, load_addr, machine->ram_size); } } -static void openrisc_virt_machine_init(ObjectClass *oc, void *data) +static void openrisc_virt_machine_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index 67077366cc7..449ec986435 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -1,3 +1,8 @@ +config PCI_BRIDGE + bool + default y if PCI_DEVICES + depends on PCI + config PCIE_PORT bool default y if PCI_DEVICES diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 742da07a015..1065245a8b8 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -13,6 +13,8 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/cxl/cxl.h" #include "qapi/error.h" @@ -210,24 +212,19 @@ static void cxl_dsp_exitfn(PCIDevice *d) pci_bridge_exitfn(d); } -static void cxl_dsp_instance_post_init(Object *obj) -{ - PCIESlot *s = PCIE_SLOT(obj); - - if (!s->speed) { - s->speed = QEMU_PCI_EXP_LNK_2_5GT; - } - - if (!s->width) { - s->width = QEMU_PCI_EXP_LNK_X1; - } -} +static const Property cxl_dsp_props[] = { + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot, + speed, PCIE_LINK_SPEED_64), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, + width, PCIE_LINK_WIDTH_16), +}; -static void cxl_dsp_class_init(ObjectClass *oc, void *data) +static void cxl_dsp_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + device_class_set_props(dc, cxl_dsp_props); k->config_write = cxl_dsp_config_write; k->realize = cxl_dsp_realize; k->exit = cxl_dsp_exitfn; @@ -236,16 +233,15 @@ static void cxl_dsp_class_init(ObjectClass *oc, void *data) k->revision = 0; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->desc = "CXL Switch Downstream Port"; - dc->reset = cxl_dsp_reset; + device_class_set_legacy_reset(dc, cxl_dsp_reset); } static const TypeInfo cxl_dsp_info = { .name = TYPE_CXL_DSP, .instance_size = sizeof(CXLDownstreamPort), .parent = TYPE_PCIE_SLOT, - .instance_post_init = cxl_dsp_instance_post_init, .class_init = cxl_dsp_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CXL_DEVICE }, { } diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index 2dd10239bd2..e6a4035d26c 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -24,6 +24,7 @@ #include "hw/pci/pcie_port.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/sysbus.h" #include "qapi/error.h" #include "hw/cxl/cxl.h" @@ -198,7 +199,7 @@ static void cxl_rp_reset_hold(Object *obj, ResetType type) latch_registers(crp); } -static Property gen_rp_props[] = { +static const Property gen_rp_props[] = { DEFINE_PROP_UINT32("bus-reserve", CXLRootPort, res_reserve.bus, -1), DEFINE_PROP_SIZE("io-reserve", CXLRootPort, res_reserve.io, -1), DEFINE_PROP_SIZE("mem-reserve", CXLRootPort, res_reserve.mem_non_pref, -1), @@ -206,7 +207,10 @@ static Property gen_rp_props[] = { -1), DEFINE_PROP_SIZE("pref64-reserve", CXLRootPort, res_reserve.mem_pref_64, -1), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot, + speed, PCIE_LINK_SPEED_64), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, + width, PCIE_LINK_WIDTH_32), }; static void cxl_rp_dvsec_write_config(PCIDevice *dev, uint32_t addr, @@ -258,7 +262,7 @@ static void cxl_rp_write_config(PCIDevice *d, uint32_t address, uint32_t val, cxl_rp_dvsec_write_config(d, address, val, len); } -static void cxl_root_port_class_init(ObjectClass *oc, void *data) +static void cxl_root_port_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); @@ -290,7 +294,7 @@ static const TypeInfo cxl_root_port_info = { .parent = TYPE_PCIE_ROOT_PORT, .instance_size = sizeof(CXLRootPort), .class_init = cxl_root_port_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CXL_DEVICE }, { } }, diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index e51221a5f33..208e0c6172e 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -11,6 +11,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" @@ -100,6 +101,7 @@ static void cxl_usp_reset(DeviceState *qdev) pci_bridge_reset(qdev); pcie_cap_deverr_reset(d); + pcie_cap_fill_link_ep_usp(d, usp->width, usp->speed); latch_registers(usp); } @@ -234,7 +236,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) .type = CDAT_TYPE_SSLBIS, .length = sslbis_size, }, - .data_type = HMATLB_DATA_TYPE_ACCESS_LATENCY, + .data_type = HMAT_LB_DATA_TYPE_ACCESS_LATENCY, .entry_base_unit = 10000, }, }; @@ -254,7 +256,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) .type = CDAT_TYPE_SSLBIS, .length = sslbis_size, }, - .data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH, + .data_type = HMAT_LB_DATA_TYPE_ACCESS_BANDWIDTH, .entry_base_unit = 1024, }, }; @@ -360,13 +362,16 @@ static void cxl_usp_exitfn(PCIDevice *d) pci_bridge_exitfn(d); } -static Property cxl_upstream_props[] = { +static const Property cxl_upstream_props[] = { DEFINE_PROP_UINT64("sn", CXLUpstreamPort, sn, UI64_NULL), DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLUpstreamPort, + speed, PCIE_LINK_SPEED_32), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLUpstreamPort, + width, PCIE_LINK_WIDTH_16), }; -static void cxl_upstream_class_init(ObjectClass *oc, void *data) +static void cxl_upstream_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); @@ -380,7 +385,7 @@ static void cxl_upstream_class_init(ObjectClass *oc, void *data) k->revision = 0; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->desc = "CXL Switch Upstream Port"; - dc->reset = cxl_usp_reset; + device_class_set_legacy_reset(dc, cxl_usp_reset); device_class_set_props(dc, cxl_upstream_props); } @@ -389,7 +394,7 @@ static const TypeInfo cxl_usp_info = { .parent = TYPE_PCIE_PORT, .instance_size = sizeof(CXLUpstreamPort), .class_init = cxl_upstream_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CXL_DEVICE }, { } diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 784507c826b..d9078e783bf 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -128,7 +128,7 @@ static const VMStateDescription vmstate_rp_dev = { } }; -static Property gen_rp_props[] = { +static const Property gen_rp_props[] = { DEFINE_PROP_BOOL("x-migrate-msix", GenPCIERootPort, migrate_msix, true), DEFINE_PROP_UINT32("bus-reserve", GenPCIERootPort, @@ -145,10 +145,9 @@ static Property gen_rp_props[] = { speed, PCIE_LINK_SPEED_16), DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, width, PCIE_LINK_WIDTH_32), - DEFINE_PROP_END_OF_LIST() }; -static void gen_rp_dev_class_init(ObjectClass *klass, void *data) +static void gen_rp_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c index c140919cbc5..1d73c14c1f8 100644 --- a/hw/pci-bridge/i82801b11.c +++ b/hw/pci-bridge/i82801b11.c @@ -87,7 +87,7 @@ static const VMStateDescription i82801b11_bridge_dev_vmstate = { } }; -static void i82801b11_bridge_class_init(ObjectClass *klass, void *data) +static void i82801b11_bridge_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -98,7 +98,7 @@ static void i82801b11_bridge_class_init(ObjectClass *klass, void *data) k->realize = i82801b11_bridge_realize; k->config_write = pci_bridge_write_config; dc->vmsd = &i82801b11_bridge_dev_vmstate; - dc->reset = pci_bridge_reset; + device_class_set_legacy_reset(dc, pci_bridge_reset); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); } @@ -107,7 +107,7 @@ static const TypeInfo i82801b11_bridge_info = { .parent = TYPE_PCI_BRIDGE, .instance_size = sizeof(I82801b11Bridge), .class_init = i82801b11_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c index be752a4bda5..bba640f495b 100644 --- a/hw/pci-bridge/ioh3420.c +++ b/hw/pci-bridge/ioh3420.c @@ -96,7 +96,7 @@ static const VMStateDescription vmstate_ioh3420 = { } }; -static void ioh3420_class_init(ObjectClass *klass, void *data) +static void ioh3420_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index f2a60434dda..2e0eb0d2332 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -1,5 +1,5 @@ pci_ss = ss.source_set() -pci_ss.add(files('pci_bridge_dev.c')) +pci_ss.add(when: 'CONFIG_PCI_BRIDGE', if_true: files('pci_bridge_dev.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c')) diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c index 089f91efed4..b328e50ab31 100644 --- a/hw/pci-bridge/pci_bridge_dev.c +++ b/hw/pci-bridge/pci_bridge_dev.c @@ -28,7 +28,7 @@ #include "hw/pci/shpc.h" #include "hw/pci/slotid_cap.h" #include "hw/qdev-properties.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/pci/pci_bus.h" #include "hw/hotplug.h" #include "qom/object.h" @@ -168,7 +168,7 @@ static void qdev_pci_bridge_dev_reset(DeviceState *qdev) } } -static Property pci_bridge_dev_properties[] = { +static const Property pci_bridge_dev_properties[] = { /* Note: 0 is not a legal chassis number. */ DEFINE_PROP_UINT8(PCI_BRIDGE_DEV_PROP_CHASSIS_NR, PCIBridgeDev, chassis_nr, 0), @@ -186,7 +186,6 @@ static Property pci_bridge_dev_properties[] = { res_reserve.mem_pref_32, -1), DEFINE_PROP_SIZE("pref64-reserve", PCIBridgeDev, res_reserve.mem_pref_64, -1), - DEFINE_PROP_END_OF_LIST(), }; static bool pci_device_shpc_present(void *opaque, int version_id) @@ -241,7 +240,7 @@ void pci_bridge_dev_unplug_request_cb(HotplugHandler *hotplug_dev, shpc_device_unplug_request_cb(hotplug_dev, dev, errp); } -static void pci_bridge_dev_class_init(ObjectClass *klass, void *data) +static void pci_bridge_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -254,7 +253,7 @@ static void pci_bridge_dev_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_REDHAT_BRIDGE; k->class_id = PCI_CLASS_BRIDGE_PCI; dc->desc = "Standard PCI Bridge"; - dc->reset = qdev_pci_bridge_dev_reset; + device_class_set_legacy_reset(dc, qdev_pci_bridge_dev_reset); device_class_set_props(dc, pci_bridge_dev_properties); dc->vmsd = &pci_bridge_dev_vmstate; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); @@ -269,7 +268,7 @@ static const TypeInfo pci_bridge_dev_info = { .instance_size = sizeof(PCIBridgeDev), .class_init = pci_bridge_dev_class_init, .instance_finalize = pci_bridge_dev_instance_finalize, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } @@ -281,7 +280,7 @@ static const TypeInfo pci_bridge_dev_info = { * different pci id, so we can match it easily in the guest for * automagic multiseat configuration. See docs/multiseat.txt for more. */ -static void pci_bridge_dev_seat_class_init(ObjectClass *klass, void *data) +static void pci_bridge_dev_seat_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 0411ad31ea4..a8eb2d24260 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -23,7 +23,7 @@ #include "qemu/range.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/numa.h" +#include "system/numa.h" #include "hw/boards.h" #include "qom/object.h" @@ -34,11 +34,9 @@ typedef struct PXBBus PXBBus; DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, TYPE_PXB_BUS) -#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus" DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS, TYPE_PXB_PCIE_BUS) -#define TYPE_PXB_CXL_BUS "pxb-cxl-bus" DECLARE_INSTANCE_CHECKER(PXBBus, PXB_CXL_BUS, TYPE_PXB_CXL_BUS) @@ -50,7 +48,6 @@ struct PXBBus { char bus_path[8]; }; -#define TYPE_PXB_PCIE_DEV "pxb-pcie" OBJECT_DECLARE_SIMPLE_TYPE(PXBPCIEDev, PXB_PCIE_DEV) static GList *pxb_dev_list; @@ -85,12 +82,25 @@ static uint16_t pxb_bus_numa_node(PCIBus *bus) return pxb->numa_node; } -static void pxb_bus_class_init(ObjectClass *class, void *data) +static void prop_pxb_uid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint32_t uid = pci_bus_num(PCI_BUS(obj)); + + visit_type_uint32(v, name, &uid, errp); +} + +static void pxb_bus_class_init(ObjectClass *class, const void *data) { PCIBusClass *pbc = PCI_BUS_CLASS(class); pbc->bus_num = pxb_bus_num; pbc->numa_node = pxb_bus_numa_node; + + object_class_property_add(class, "acpi_uid", "uint32", + prop_pxb_uid_get, NULL, NULL, NULL); + object_class_property_set_description(class, "acpi_uid", + "ACPI Unique ID used to distinguish this PCI Host Bridge / ACPI00016"); } static const TypeInfo pxb_bus_info = { @@ -157,7 +167,7 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice *dev) return NULL; } -static void pxb_host_class_init(ObjectClass *class, void *data) +static void pxb_host_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(class); @@ -212,7 +222,7 @@ void pxb_cxl_hook_up_registers(CXLState *cxl_state, PCIBus *bus, Error **errp) cxl_state->next_mr_idx++; } -static void pxb_cxl_host_class_init(ObjectClass *class, void *data) +static void pxb_cxl_host_class_init(ObjectClass *class, const void *data) { DeviceClass *dc = DEVICE_CLASS(class); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class); @@ -318,7 +328,7 @@ static gint pxb_compare(gconstpointer a, gconstpointer b) 0; } -static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, +static bool pxb_dev_realize_common(PCIDevice *dev, enum BusType type, Error **errp) { PXBDev *pxb = PXB_DEV(dev); @@ -330,13 +340,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, if (ms->numa_state == NULL) { error_setg(errp, "NUMA is not supported by this machine-type"); - return; + return false; } if (pxb->numa_node != NUMA_NODE_UNASSIGNED && pxb->numa_node >= ms->numa_state->num_nodes) { error_setg(errp, "Illegal numa node %d", pxb->numa_node); - return; + return false; } if (dev->qdev.id && *dev->qdev.id) { @@ -382,12 +392,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST); pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare); - return; + return true; err_register_bus: object_unref(OBJECT(bds)); object_unparent(OBJECT(bus)); object_unref(OBJECT(ds)); + return false; } static void pxb_dev_realize(PCIDevice *dev, Error **errp) @@ -407,15 +418,14 @@ static void pxb_dev_exitfn(PCIDevice *pci_dev) pxb_dev_list = g_list_remove(pxb_dev_list, pxb); } -static Property pxb_dev_properties[] = { +static const Property pxb_dev_properties[] = { /* Note: 0 is not a legal PXB bus number. */ DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0), DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED), DEFINE_PROP_BOOL("bypass_iommu", PXBDev, bypass_iommu, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pxb_dev_class_init(ObjectClass *klass, void *data) +static void pxb_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -437,7 +447,7 @@ static const TypeInfo pxb_dev_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PXBDev), .class_init = pxb_dev_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -453,7 +463,7 @@ static void pxb_pcie_dev_realize(PCIDevice *dev, Error **errp) pxb_dev_realize_common(dev, PCIE, errp); } -static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data) +static void pxb_pcie_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -474,7 +484,7 @@ static const TypeInfo pxb_pcie_dev_info = { .parent = TYPE_PXB_DEV, .instance_size = sizeof(PXBPCIEDev), .class_init = pxb_pcie_dev_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -488,16 +498,17 @@ static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp) return; } - pxb_dev_realize_common(dev, CXL, errp); + if (!pxb_dev_realize_common(dev, CXL, errp)) { + return; + } pxb_cxl_dev_reset(DEVICE(dev)); } -static Property pxb_cxl_dev_properties[] = { +static const Property pxb_cxl_dev_properties[] = { DEFINE_PROP_BOOL("hdm_for_passthrough", PXBCXLDev, hdm_for_passthrough, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) +static void pxb_cxl_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -515,7 +526,7 @@ static void pxb_cxl_dev_class_init(ObjectClass *klass, void *data) /* Host bridges aren't hotpluggable. FIXME: spec reference */ dc->hotpluggable = false; - dc->reset = pxb_cxl_dev_reset; + device_class_set_legacy_reset(dc, pxb_cxl_dev_reset); } static const TypeInfo pxb_cxl_dev_info = { @@ -524,7 +535,7 @@ static const TypeInfo pxb_cxl_dev_info = { .instance_size = sizeof(PXBCXLDev), .class_init = pxb_cxl_dev_class_init, .interfaces = - (InterfaceInfo[]){ + (const InterfaceInfo[]){ { INTERFACE_CONVENTIONAL_PCI_DEVICE }, {}, }, diff --git a/hw/pci-bridge/pcie_pci_bridge.c b/hw/pci-bridge/pcie_pci_bridge.c index 7646ac23975..fce292a519b 100644 --- a/hw/pci-bridge/pcie_pci_bridge.c +++ b/hw/pci-bridge/pcie_pci_bridge.c @@ -52,11 +52,10 @@ static void pcie_pci_bridge_realize(PCIDevice *d, Error **errp) goto cap_error; } - pos = pci_add_capability(d, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF, errp); + pos = pci_pm_init(d, 0, errp); if (pos < 0) { goto pm_error; } - d->exp.pm_cap = pos; pci_set_word(d->config + pos + PCI_PM_PMC, 0x3); pcie_cap_arifwd_init(d); @@ -124,9 +123,8 @@ static void pcie_pci_bridge_write_config(PCIDevice *d, shpc_cap_write_config(d, address, val, len); } -static Property pcie_pci_bridge_dev_properties[] = { +static const Property pcie_pci_bridge_dev_properties[] = { DEFINE_PROP_ON_OFF_AUTO("msi", PCIEPCIBridge, msi, ON_OFF_AUTO_AUTO), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription pcie_pci_bridge_dev_vmstate = { @@ -139,7 +137,7 @@ static const VMStateDescription pcie_pci_bridge_dev_vmstate = { } }; -static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data) +static void pcie_pci_bridge_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -152,7 +150,7 @@ static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data) k->config_write = pcie_pci_bridge_write_config; dc->vmsd = &pcie_pci_bridge_dev_vmstate; device_class_set_props(dc, pcie_pci_bridge_dev_properties); - dc->reset = &pcie_pci_bridge_reset; + device_class_set_legacy_reset(dc, pcie_pci_bridge_reset); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); hc->plug = pci_bridge_dev_plug_cb; hc->unplug = pci_bridge_dev_unplug_cb; @@ -164,7 +162,7 @@ static const TypeInfo pcie_pci_bridge_info = { .parent = TYPE_PCI_BRIDGE, .instance_size = sizeof(PCIEPCIBridge), .class_init = pcie_pci_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { INTERFACE_PCIE_DEVICE }, { }, diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c index 09a34786bc6..22c2fdb71e7 100644 --- a/hw/pci-bridge/pcie_root_port.c +++ b/hw/pci-bridge/pcie_root_port.c @@ -148,11 +148,10 @@ static void rp_exit(PCIDevice *d) pci_bridge_exitfn(d); } -static Property rp_props[] = { +static const Property rp_props[] = { DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present, QEMU_PCIE_SLTCAP_PCP_BITNR, true), DEFINE_PROP_BOOL("disable-acs", PCIESlot, disable_acs, false), - DEFINE_PROP_END_OF_LIST() }; static void rp_instance_post_init(Object *obj) @@ -168,7 +167,7 @@ static void rp_instance_post_init(Object *obj) } } -static void rp_class_init(ObjectClass *klass, void *data) +static void rp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -189,7 +188,7 @@ static const TypeInfo rp_info = { .class_init = rp_class_init, .abstract = true, .class_size = sizeof(PCIERootPortClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/pci-bridge/simba.c b/hw/pci-bridge/simba.c index 17aa0d7b216..bbae594e119 100644 --- a/hw/pci-bridge/simba.c +++ b/hw/pci-bridge/simba.c @@ -66,7 +66,7 @@ static void simba_pci_bridge_realize(PCIDevice *dev, Error **errp) pci_bridge_update_mappings(PCI_BRIDGE(br)); } -static void simba_pci_bridge_class_init(ObjectClass *klass, void *data) +static void simba_pci_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -78,7 +78,7 @@ static void simba_pci_bridge_class_init(ObjectClass *klass, void *data) k->revision = 0x11; k->config_write = pci_bridge_write_config; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - dc->reset = pci_bridge_reset; + device_class_set_legacy_reset(dc, pci_bridge_reset); dc->vmsd = &vmstate_pci_device; } @@ -87,7 +87,7 @@ static const TypeInfo simba_pci_bridge_info = { .parent = TYPE_PCI_BRIDGE, .class_init = simba_pci_bridge_class_init, .instance_size = sizeof(SimbaPCIBridge), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 907d5105b01..dc7d1aa7d77 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -134,10 +134,9 @@ static void xio3130_downstream_exitfn(PCIDevice *d) pci_bridge_exitfn(d); } -static Property xio3130_downstream_props[] = { +static const Property xio3130_downstream_props[] = { DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present, QEMU_PCIE_SLTCAP_PCP_BITNR, true), - DEFINE_PROP_END_OF_LIST() }; static const VMStateDescription vmstate_xio3130_downstream = { @@ -154,7 +153,7 @@ static const VMStateDescription vmstate_xio3130_downstream = { } }; -static void xio3130_downstream_class_init(ObjectClass *klass, void *data) +static void xio3130_downstream_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -167,7 +166,7 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data) k->revision = XIO3130_REVISION; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->desc = "TI X3130 Downstream Port of PCI Express Switch"; - dc->reset = xio3130_downstream_reset; + device_class_set_legacy_reset(dc, xio3130_downstream_reset); dc->vmsd = &vmstate_xio3130_downstream; device_class_set_props(dc, xio3130_downstream_props); } @@ -176,7 +175,7 @@ static const TypeInfo xio3130_downstream_info = { .name = TYPE_XIO3130_DOWNSTREAM, .parent = TYPE_PCIE_SLOT, .class_init = xio3130_downstream_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index 2a6cff6e033..40057b749bf 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -123,7 +123,7 @@ static const VMStateDescription vmstate_xio3130_upstream = { } }; -static void xio3130_upstream_class_init(ObjectClass *klass, void *data) +static void xio3130_upstream_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -136,7 +136,7 @@ static void xio3130_upstream_class_init(ObjectClass *klass, void *data) k->revision = XIO3130_REVISION; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->desc = "TI X3130 Upstream Port of PCI Express Switch"; - dc->reset = xio3130_upstream_reset; + device_class_set_legacy_reset(dc, xio3130_upstream_reset); dc->vmsd = &vmstate_xio3130_upstream; } @@ -144,7 +144,7 @@ static const TypeInfo xio3130_upstream_info = { .name = "x3130-upstream", .parent = TYPE_PCIE_PORT, .class_init = xio3130_upstream_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig index c91880b2370..9824fa188d6 100644 --- a/hw/pci-host/Kconfig +++ b/hw/pci-host/Kconfig @@ -54,6 +54,7 @@ config PCI_EXPRESS_Q35 config PCI_EXPRESS_GENERIC_BRIDGE bool select PCI_EXPRESS + imply ACPI_PCI config PCI_EXPRESS_XILINX bool @@ -99,6 +100,9 @@ config ASTRO bool select PCI +config PCI_EXPRESS_FSL_IMX8M_PHY + bool + config GT64120 bool select PCI diff --git a/hw/pci-host/articia.c b/hw/pci-host/articia.c index f3fcc49f812..cc65aac2a87 100644 --- a/hw/pci-host/articia.c +++ b/hw/pci-host/articia.c @@ -195,7 +195,7 @@ static void articia_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(dev, s->irq, ARRAY_SIZE(s->irq)); } -static void articia_class_init(ObjectClass *klass, void *data) +static void articia_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -228,7 +228,7 @@ static void articia_pci_host_cfg_write(PCIDevice *d, uint32_t addr, } } -static void articia_pci_host_class_init(ObjectClass *klass, void *data) +static void articia_pci_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -246,7 +246,7 @@ static void articia_pci_host_class_init(ObjectClass *klass, void *data) /* TYPE_ARTICIA_PCI_BRIDGE */ -static void articia_pci_bridge_class_init(ObjectClass *klass, void *data) +static void articia_pci_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -273,7 +273,7 @@ static const TypeInfo articia_types[] = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(ArticiaHostState), .class_init = articia_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -283,7 +283,7 @@ static const TypeInfo articia_types[] = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = articia_pci_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c index e3e589ceacc..859e308c577 100644 --- a/hw/pci-host/astro.c +++ b/hw/pci-host/astro.c @@ -35,6 +35,7 @@ #include "target/hppa/cpu.h" #include "trace.h" #include "qom/object.h" +#include "exec/target_page.h" /* * Helper functions @@ -461,10 +462,6 @@ static void elroy_pcihost_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), elroy_set_irq, ELROY_IRQS); } -static Property elroy_pcihost_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - static const VMStateDescription vmstate_elroy = { .name = "Elroy", .version_id = 1, @@ -485,12 +482,11 @@ static const VMStateDescription vmstate_elroy = { } }; -static void elroy_pcihost_class_init(ObjectClass *klass, void *data) +static void elroy_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = elroy_reset; - device_class_set_props(dc, elroy_pcihost_properties); + device_class_set_legacy_reset(dc, elroy_reset); dc->vmsd = &vmstate_elroy; dc->user_creatable = false; } @@ -526,6 +522,53 @@ static ElroyState *elroy_init(int num) * Astro Runway chip. */ +static void adjust_LMMIO_DIRECT_mapping(AstroState *s, unsigned int reg_index) +{ + MemoryRegion *lmmio_alias; + unsigned int lmmio_index, map_route; + hwaddr map_addr; + uint32_t map_size; + struct ElroyState *elroy; + + /* pointer to LMMIO_DIRECT entry */ + lmmio_index = reg_index / 3; + lmmio_alias = &s->lmmio_direct[lmmio_index]; + + map_addr = s->ioc_ranges[3 * lmmio_index + 0]; + map_size = s->ioc_ranges[3 * lmmio_index + 1]; + map_route = s->ioc_ranges[3 * lmmio_index + 2]; + + /* find elroy to which this address is routed */ + map_route &= (ELROY_NUM - 1); + elroy = s->elroy[map_route]; + + if (lmmio_alias->enabled) { + memory_region_set_enabled(lmmio_alias, false); + } + + map_addr = F_EXTEND(map_addr); + map_addr &= TARGET_PAGE_MASK; + map_size = (~map_size) + 1; + map_size &= TARGET_PAGE_MASK; + + /* exit if disabled or zero map size */ + if (!(map_addr & 1) || !map_size) { + return; + } + + if (!memory_region_size(lmmio_alias)) { + memory_region_init_alias(lmmio_alias, OBJECT(elroy), + "pci-lmmmio-alias", &elroy->pci_mmio, + (uint32_t) map_addr, map_size); + memory_region_add_subregion(get_system_memory(), map_addr, + lmmio_alias); + } else { + memory_region_set_alias_offset(lmmio_alias, map_addr); + memory_region_set_size(lmmio_alias, map_size); + memory_region_set_enabled(lmmio_alias, true); + } +} + static MemTxResult astro_chip_read_with_attrs(void *opaque, hwaddr addr, uint64_t *data, unsigned size, MemTxAttrs attrs) @@ -633,6 +676,11 @@ static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr, break; case 0x0300 ... 0x03d8 - 1: /* LMMIO_DIRECT0_BASE... */ put_val_in_arrary(s->ioc_ranges, 0x300, addr, size, val); + unsigned int index = (addr - 0x300) / 8; + /* check if one of the 4 LMMIO_DIRECT regs, each using 3 entries. */ + if (index < LMMIO_DIRECT_RANGES * 3) { + adjust_LMMIO_DIRECT_mapping(s, index); + } break; case 0x10200: case 0x10220: @@ -861,11 +909,11 @@ static void astro_realize(DeviceState *obj, Error **errp) } } -static void astro_class_init(ObjectClass *klass, void *data) +static void astro_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = astro_reset; + device_class_set_legacy_reset(dc, astro_reset); dc->vmsd = &vmstate_astro; dc->realize = astro_realize; /* @@ -884,7 +932,7 @@ static const TypeInfo astro_chip_info = { }; static void astro_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index 1516d0074dd..7d6251a78d7 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -48,7 +48,7 @@ #include "hw/pci-host/bonito.h" #include "hw/pci/pci_host.h" #include "migration/vmstate.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/misc/unimp.h" #include "hw/registerfields.h" #include "qom/object.h" @@ -757,7 +757,7 @@ PCIBus *bonito_init(qemu_irq *pic) return phb->bus; } -static void bonito_pci_class_init(ObjectClass *klass, void *data) +static void bonito_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -783,13 +783,13 @@ static const TypeInfo bonito_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIBonitoState), .class_init = bonito_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void bonito_host_class_init(ObjectClass *klass, void *data) +static void bonito_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index c25d50f1c6b..f6e49ce9b8d 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -20,8 +20,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qemu/module.h" #include "qemu/log.h" +#include "qemu/bitops.h" #include "hw/pci/msi.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pci_host.h" @@ -55,7 +55,17 @@ #define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff) #define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C -#define DESIGNWARE_PCIE_IRQ_MSI 3 +static void designware_pcie_root_bus_class_init(ObjectClass *klass, + const void *data) +{ + BusClass *k = BUS_CLASS(klass); + + /* + * Designware has only a single root complex. Enforce the limit on the + * parent bus + */ + k->max_dev = 1; +} static DesignwarePCIEHost * designware_pcie_root_to_host(DesignwarePCIERoot *root) @@ -90,7 +100,7 @@ static void designware_pcie_root_msi_write(void *opaque, hwaddr addr, root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable; if (root->msi.intr[0].status & ~root->msi.intr[0].mask) { - qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 1); + qemu_set_irq(host->pci.msi, 1); } } @@ -153,11 +163,9 @@ designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len) break; case DESIGNWARE_PCIE_MSI_ADDR_LO: - val = root->msi.base; - break; - case DESIGNWARE_PCIE_MSI_ADDR_HI: - val = root->msi.base >> 32; + val = extract64(root->msi.base, + address == DESIGNWARE_PCIE_MSI_ADDR_LO ? 0 : 32, 32); break; case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: @@ -181,19 +189,16 @@ designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len) break; case DESIGNWARE_PCIE_ATU_LOWER_BASE: - val = viewport->base; - break; - case DESIGNWARE_PCIE_ATU_UPPER_BASE: - val = viewport->base >> 32; + val = extract64(viewport->base, + address == DESIGNWARE_PCIE_ATU_LOWER_BASE ? 0 : 32, 32); break; case DESIGNWARE_PCIE_ATU_LOWER_TARGET: - val = viewport->target; - break; - case DESIGNWARE_PCIE_ATU_UPPER_TARGET: - val = viewport->target >> 32; + val = extract64(viewport->target, + address == DESIGNWARE_PCIE_ATU_LOWER_TARGET ? 0 : 32, + 32); break; case DESIGNWARE_PCIE_ATU_LIMIT: @@ -312,14 +317,10 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, break; case DESIGNWARE_PCIE_MSI_ADDR_LO: - root->msi.base &= 0xFFFFFFFF00000000ULL; - root->msi.base |= val; - designware_pcie_root_update_msi_mapping(root); - break; - case DESIGNWARE_PCIE_MSI_ADDR_HI: - root->msi.base &= 0x00000000FFFFFFFFULL; - root->msi.base |= (uint64_t)val << 32; + root->msi.base = deposit64(root->msi.base, + address == DESIGNWARE_PCIE_MSI_ADDR_LO + ? 0 : 32, 32, val); designware_pcie_root_update_msi_mapping(root); break; @@ -335,7 +336,7 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, case DESIGNWARE_PCIE_MSI_INTR0_STATUS: root->msi.intr[0].status ^= val; if (!root->msi.intr[0].status) { - qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 0); + qemu_set_irq(host->pci.msi, 0); } break; @@ -346,23 +347,17 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, break; case DESIGNWARE_PCIE_ATU_LOWER_BASE: - viewport->base &= 0xFFFFFFFF00000000ULL; - viewport->base |= val; - break; - case DESIGNWARE_PCIE_ATU_UPPER_BASE: - viewport->base &= 0x00000000FFFFFFFFULL; - viewport->base |= (uint64_t)val << 32; + viewport->base = deposit64(viewport->base, + address == DESIGNWARE_PCIE_ATU_LOWER_BASE + ? 0 : 32, 32, val); break; case DESIGNWARE_PCIE_ATU_LOWER_TARGET: - viewport->target &= 0xFFFFFFFF00000000ULL; - viewport->target |= val; - break; - case DESIGNWARE_PCIE_ATU_UPPER_TARGET: - viewport->target &= 0x00000000FFFFFFFFULL; - viewport->target |= val; + viewport->target = deposit64(viewport->target, + address == DESIGNWARE_PCIE_ATU_LOWER_TARGET + ? 0 : 32, 32, val); break; case DESIGNWARE_PCIE_ATU_LIMIT: @@ -395,6 +390,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) { DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(dev); DesignwarePCIEHost *host = designware_pcie_root_to_host(root); + MemoryRegion *host_mem = get_system_memory(); MemoryRegion *address_space = &host->pci.memory; PCIBridge *br = PCI_BRIDGE(dev); DesignwarePCIEViewport *viewport; @@ -435,7 +431,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM; source = &host->pci.address_space_root; - destination = get_system_memory(); + destination = host_mem; direction = "Inbound"; /* @@ -460,7 +456,7 @@ static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) destination = &host->pci.memory; direction = "Outbound"; - source = get_system_memory(); + source = host_mem; /* * Configure MemoryRegion implementing CPU -> PCI memory @@ -591,7 +587,8 @@ static const VMStateDescription vmstate_designware_pcie_root = { } }; -static void designware_pcie_root_class_init(ObjectClass *klass, void *data) +static void designware_pcie_root_class_init(ObjectClass *klass, + const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -607,7 +604,7 @@ static void designware_pcie_root_class_init(ObjectClass *klass, void *data) k->config_read = designware_pcie_root_config_read; k->config_write = designware_pcie_root_config_write; - dc->reset = pci_bridge_reset; + device_class_set_legacy_reset(dc, pci_bridge_reset); /* * PCI-facing part of the host bridge, not usable without the * host-facing part, which can't be device_add'ed, yet. @@ -679,6 +676,7 @@ static void designware_pcie_host_realize(DeviceState *dev, Error **errp) for (i = 0; i < ARRAY_SIZE(s->pci.irqs); i++) { sysbus_init_irq(sbd, &s->pci.irqs[i]); } + sysbus_init_irq(sbd, &s->pci.msi); memory_region_init_io(&s->mmio, OBJECT(s), @@ -699,7 +697,7 @@ static void designware_pcie_host_realize(DeviceState *dev, Error **errp) &s->pci.memory, &s->pci.io, 0, 4, - TYPE_PCIE_BUS); + TYPE_DESIGNWARE_PCIE_ROOT_BUS); pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; memory_region_init(&s->pci.address_space_root, @@ -730,7 +728,8 @@ static const VMStateDescription vmstate_designware_pcie_host = { } }; -static void designware_pcie_host_class_init(ObjectClass *klass, void *data) +static void designware_pcie_host_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); @@ -752,28 +751,28 @@ static void designware_pcie_host_init(Object *obj) qdev_prop_set_bit(DEVICE(root), "multifunction", false); } -static const TypeInfo designware_pcie_root_info = { - .name = TYPE_DESIGNWARE_PCIE_ROOT, - .parent = TYPE_PCI_BRIDGE, - .instance_size = sizeof(DesignwarePCIERoot), - .class_init = designware_pcie_root_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } +static const TypeInfo designware_pcie_types[] = { + { + .name = TYPE_DESIGNWARE_PCIE_ROOT_BUS, + .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(DesignwarePCIERootBus), + .class_init = designware_pcie_root_bus_class_init, + }, { + .name = TYPE_DESIGNWARE_PCIE_HOST, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(DesignwarePCIEHost), + .instance_init = designware_pcie_host_init, + .class_init = designware_pcie_host_class_init, + }, { + .name = TYPE_DESIGNWARE_PCIE_ROOT, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(DesignwarePCIERoot), + .class_init = designware_pcie_root_class_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, }, }; -static const TypeInfo designware_pcie_host_info = { - .name = TYPE_DESIGNWARE_PCIE_HOST, - .parent = TYPE_PCI_HOST_BRIDGE, - .instance_size = sizeof(DesignwarePCIEHost), - .instance_init = designware_pcie_host_init, - .class_init = designware_pcie_host_class_init, -}; - -static void designware_pcie_register(void) -{ - type_register_static(&designware_pcie_root_info); - type_register_static(&designware_pcie_host_info); -} -type_init(designware_pcie_register) +DEFINE_TYPES(designware_pcie_types) diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c index d992c4bb69d..11b353be2ea 100644 --- a/hw/pci-host/dino.c +++ b/hw/pci-host/dino.c @@ -492,17 +492,16 @@ static void dino_pcihost_init(Object *obj) qdev_init_gpio_in(DEVICE(obj), dino_set_irq, DINO_IRQS); } -static Property dino_pcihost_properties[] = { +static const Property dino_pcihost_properties[] = { DEFINE_PROP_LINK("memory-as", DinoState, memory_as, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void dino_pcihost_class_init(ObjectClass *klass, void *data) +static void dino_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = dino_pcihost_reset; + device_class_set_legacy_reset(dc, dino_pcihost_reset); dc->realize = dino_pcihost_realize; dc->unrealize = dino_pcihost_unrealize; device_class_set_props(dc, dino_pcihost_properties); diff --git a/hw/pci-host/fsl_imx8m_phy.c b/hw/pci-host/fsl_imx8m_phy.c new file mode 100644 index 00000000000..04da3f99a07 --- /dev/null +++ b/hw/pci-host/fsl_imx8m_phy.c @@ -0,0 +1,98 @@ +/* + * i.MX8 PCIe PHY emulation + * + * Copyright (c) 2025 Bernhard Beschow + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/pci-host/fsl_imx8m_phy.h" +#include "hw/resettable.h" +#include "migration/vmstate.h" + +#define CMN_REG075 0x1d4 +#define ANA_PLL_LOCK_DONE BIT(1) +#define ANA_PLL_AFC_DONE BIT(0) + +static uint64_t fsl_imx8m_pcie_phy_read(void *opaque, hwaddr offset, + unsigned size) +{ + FslImx8mPciePhyState *s = opaque; + + if (offset == CMN_REG075) { + return s->data[offset] | ANA_PLL_LOCK_DONE | ANA_PLL_AFC_DONE; + } + + return s->data[offset]; +} + +static void fsl_imx8m_pcie_phy_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + FslImx8mPciePhyState *s = opaque; + + s->data[offset] = value; +} + +static const MemoryRegionOps fsl_imx8m_pcie_phy_ops = { + .read = fsl_imx8m_pcie_phy_read, + .write = fsl_imx8m_pcie_phy_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void fsl_imx8m_pcie_phy_realize(DeviceState *dev, Error **errp) +{ + FslImx8mPciePhyState *s = FSL_IMX8M_PCIE_PHY(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &fsl_imx8m_pcie_phy_ops, s, + TYPE_FSL_IMX8M_PCIE_PHY, ARRAY_SIZE(s->data)); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void fsl_imx8m_pcie_phy_reset_hold(Object *obj, ResetType type) +{ + FslImx8mPciePhyState *s = FSL_IMX8M_PCIE_PHY(obj); + + memset(s->data, 0, sizeof(s->data)); +} + +static const VMStateDescription fsl_imx8m_pcie_phy_vmstate = { + .name = "fsl-imx8m-pcie-phy", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT8_ARRAY(data, FslImx8mPciePhyState, + FSL_IMX8M_PCIE_PHY_DATA_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static void fsl_imx8m_pcie_phy_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = fsl_imx8m_pcie_phy_realize; + dc->vmsd = &fsl_imx8m_pcie_phy_vmstate; + rc->phases.hold = fsl_imx8m_pcie_phy_reset_hold; +} + +static const TypeInfo fsl_imx8m_pcie_phy_types[] = { + { + .name = TYPE_FSL_IMX8M_PCIE_PHY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(FslImx8mPciePhyState), + .class_init = fsl_imx8m_pcie_phy_class_init, + } +}; + +DEFINE_TYPES(fsl_imx8m_pcie_phy_types) diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index 391fabb8a8f..66dc7503d12 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -1,11 +1,13 @@ #include "qemu/osdep.h" #include "hw/acpi/aml-build.h" +#include "hw/acpi/pci.h" #include "hw/pci-host/gpex.h" #include "hw/arm/virt.h" #include "hw/pci/pci_bus.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pcie_host.h" #include "hw/acpi/cxl.h" +#include "hw/acpi/acpi_egm_memory.h" static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq, Aml *scope, uint8_t bus_num) @@ -50,61 +52,11 @@ static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq, } } -static void acpi_dsdt_add_pci_osc(Aml *dev) +static Aml *build_pci_host_bridge_dsm_method(bool preserve_config) { - Aml *method, *UUID, *ifctx, *ifctx1, *elsectx, *buf; - - /* Declare an _OSC (OS Control Handoff) method */ - aml_append(dev, aml_name_decl("SUPP", aml_int(0))); - aml_append(dev, aml_name_decl("CTRL", aml_int(0))); - method = aml_method("_OSC", 4, AML_NOTSERIALIZED); - aml_append(method, - aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); - - /* PCI Firmware Specification 3.0 - * 4.5.1. _OSC Interface for PCI Host Bridge Devices - * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is - * identified by the Universal Unique IDentifier (UUID) - * 33DB4D5B-1FF7-401C-9657-7441C03DD766 - */ - UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"); - ifctx = aml_if(aml_equal(aml_arg(0), UUID)); - aml_append(ifctx, - aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); - aml_append(ifctx, - aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); - aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP"))); - aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL"))); - - /* - * Allow OS control for all 5 features: - * PCIeHotplug SHPCHotplug PME AER PCIeCapability. - */ - aml_append(ifctx, aml_and(aml_name("CTRL"), aml_int(0x1F), - aml_name("CTRL"))); - - ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1)))); - aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x08), - aml_name("CDW1"))); - aml_append(ifctx, ifctx1); - - ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL")))); - aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x10), - aml_name("CDW1"))); - aml_append(ifctx, ifctx1); - - aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3"))); - aml_append(ifctx, aml_return(aml_arg(3))); - aml_append(method, ifctx); - - elsectx = aml_else(); - aml_append(elsectx, aml_or(aml_name("CDW1"), aml_int(4), - aml_name("CDW1"))); - aml_append(elsectx, aml_return(aml_arg(3))); - aml_append(method, elsectx); - aml_append(dev, method); - - method = aml_method("_DSM", 4, AML_NOTSERIALIZED); + Aml *method = aml_method("_DSM", 4, AML_NOTSERIALIZED); + Aml *UUID, *ifctx, *ifctx1, *buf; + uint8_t byte_list[1] = {0}; /* PCI Firmware Specification 3.0 * 4.6.1. _DSM for PCI Express Slot Information @@ -114,16 +66,39 @@ static void acpi_dsdt_add_pci_osc(Aml *dev) UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); ifctx = aml_if(aml_equal(aml_arg(0), UUID)); ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0))); - uint8_t byte_list[1] = {1}; + if (preserve_config) { + /* support for function 0 and function 5 */ + byte_list[0] = 0x21; + } buf = aml_buffer(1, byte_list); aml_append(ifctx1, aml_return(buf)); aml_append(ifctx, ifctx1); + if (preserve_config) { + Aml *ifctx2 = aml_if(aml_equal(aml_arg(2), aml_int(5))); + /* + * 0 - The operating system must not ignore the PCI configuration that + * firmware has done at boot time. + */ + aml_append(ifctx2, aml_return(aml_int(0))); + aml_append(ifctx, ifctx2); + } + aml_append(method, ifctx); byte_list[0] = 0; buf = aml_buffer(1, byte_list); aml_append(method, aml_return(buf)); - aml_append(dev, method); + return method; +} + +static void acpi_dsdt_add_host_bridge_methods(Aml *dev, + bool enable_native_pcie_hotplug, + bool preserve_config) +{ + /* Declare an _OSC (OS Control Handoff) method */ + aml_append(dev, + build_pci_host_bridge_osc_method(enable_native_pcie_hotplug)); + aml_append(dev, build_pci_host_bridge_dsm_method(preserve_config)); } void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) @@ -141,7 +116,10 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); + uint32_t uid; bool is_cxl = pci_bus_is_cxl(bus); + int devfn; + Aml *brg; if (!pci_bus_is_root(bus)) { continue; @@ -156,6 +134,8 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) nr_pcie_buses = bus_num; } + uid = object_property_get_uint(OBJECT(bus), "acpi_uid", + &error_fatal); dev = aml_device("PC%.02X", bus_num); if (is_cxl) { struct Aml *pkg = aml_package(2); @@ -168,7 +148,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); } aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device"))); aml_append(dev, aml_name_decl("_CCA", aml_int(1))); if (numa_node != NUMA_NODE_UNASSIGNED) { @@ -179,7 +159,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) /* * Resources defined for PXBs are composed of the following parts: - * 1. The resources the pci-brige/pcie-root-port need. + * 1. The resources the pci-bridge/pcie-root-port need. * 2. The resources the devices behind pxb need. */ crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set, @@ -189,7 +169,24 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) if (is_cxl) { build_cxl_osc_method(dev); } else { - acpi_dsdt_add_pci_osc(dev); + /* pxb bridges do not have ACPI PCI Hot-plug enabled */ + acpi_dsdt_add_host_bridge_methods(dev, true, + cfg->preserve_config); + } + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ + int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn); + PCIDevice *pdev = bus->devices[devfn]; + + if (!pdev) { + continue; + } + + brg = aml_device("RP%.02X", devfn); + aml_append(brg, aml_name_decl("_ADR", aml_int(adr))); + build_acpi_egm_memory_dsdt(brg, bus_num + 1); + aml_append(dev, brg); } aml_append(scope, dev); @@ -264,7 +261,10 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) } aml_append(dev, aml_name_decl("_CRS", rbuf)); - acpi_dsdt_add_pci_osc(dev); + acpi_dsdt_add_host_bridge_methods(dev, cfg->pci_native_hotplug, + cfg->preserve_config); + + build_acpi_egm_memory_dsdt(dev, 0); Aml *dev_res0 = aml_device("%s", "RES0"); aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02"))); diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c index e9cf455bf52..b806a2286f7 100644 --- a/hw/pci-host/gpex.c +++ b/hw/pci-host/gpex.c @@ -32,6 +32,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/irq.h" +#include "hw/pci/pci_bus.h" #include "hw/pci-host/gpex.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -41,20 +42,25 @@ * GPEX host */ +struct GPEXIrq { + qemu_irq irq; + int irq_num; +}; + static void gpex_set_irq(void *opaque, int irq_num, int level) { GPEXHost *s = opaque; - qemu_set_irq(s->irq[irq_num], level); + qemu_set_irq(s->irq[irq_num].irq, level); } int gpex_set_irq_num(GPEXHost *s, int index, int gsi) { - if (index >= GPEX_NUM_IRQS) { + if (index >= s->num_irqs) { return -EINVAL; } - s->irq_num[index] = gsi; + s->irq[index].irq_num = gsi; return 0; } @@ -62,7 +68,7 @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) { PCIINTxRoute route; GPEXHost *s = opaque; - int gsi = s->irq_num[pin]; + int gsi = s->irq[pin].irq_num; route.irq = gsi; if (gsi < 0) { @@ -74,6 +80,13 @@ static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) return route; } +static int gpex_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) +{ + PCIBus *bus = pci_device_root_bus(pci_dev); + + return (PCI_SLOT(pci_dev->devfn) + pin) % bus->nirq; +} + static void gpex_host_realize(DeviceState *dev, Error **errp) { PCIHostState *pci = PCI_HOST_BRIDGE(dev); @@ -82,6 +95,8 @@ static void gpex_host_realize(DeviceState *dev, Error **errp) PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); int i; + s->irq = g_malloc0_n(s->num_irqs, sizeof(*s->irq)); + pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); sysbus_init_mmio(sbd, &pex->mmio); @@ -128,26 +143,34 @@ static void gpex_host_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->io_ioport); } - for (i = 0; i < GPEX_NUM_IRQS; i++) { - sysbus_init_irq(sbd, &s->irq[i]); - s->irq_num[i] = -1; + for (i = 0; i < s->num_irqs; i++) { + sysbus_init_irq(sbd, &s->irq[i].irq); + s->irq[i].irq_num = -1; } pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq, - pci_swizzle_map_irq_fn, s, &s->io_mmio, - &s->io_ioport, 0, 4, TYPE_PCIE_BUS); + gpex_swizzle_map_irq_fn, + s, &s->io_mmio, &s->io_ioport, 0, + s->num_irqs, TYPE_PCIE_BUS); pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq); qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal); } +static void gpex_host_unrealize(DeviceState *dev) +{ + GPEXHost *s = GPEX_HOST(dev); + + g_free(s->irq); +} + static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus) { return "0000:00"; } -static Property gpex_host_properties[] = { +static const Property gpex_host_properties[] = { /* * Permit CPU accesses to unmapped areas of the PIO and MMIO windows * (discarding writes and returning -1 for reads) rather than aborting. @@ -166,16 +189,17 @@ static Property gpex_host_properties[] = { gpex_cfg.mmio64.base, 0), DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, gpex_cfg.mmio64.size, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_UINT8("num-irqs", GPEXHost, num_irqs, PCI_NUM_PINS), }; -static void gpex_host_class_init(ObjectClass *klass, void *data) +static void gpex_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); hc->root_bus_path = gpex_host_root_bus_path; dc->realize = gpex_host_realize; + dc->unrealize = gpex_host_unrealize; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "pci"; device_class_set_props(dc, gpex_host_properties); @@ -213,7 +237,7 @@ static const VMStateDescription vmstate_gpex_root = { } }; -static void gpex_root_class_init(ObjectClass *klass, void *data) +static void gpex_root_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -237,7 +261,7 @@ static const TypeInfo gpex_root_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(GPEXRootState), .class_init = gpex_root_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c index 8e589ff2c9e..f9da5a908c1 100644 --- a/hw/pci-host/grackle.c +++ b/hw/pci-host/grackle.c @@ -94,7 +94,7 @@ static void grackle_pci_realize(PCIDevice *d, Error **errp) d->config[PCI_CLASS_PROG] = 0x01; } -static void grackle_pci_class_init(ObjectClass *klass, void *data) +static void grackle_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -116,7 +116,7 @@ static const TypeInfo grackle_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = grackle_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -129,12 +129,11 @@ static char *grackle_ofw_unit_address(const SysBusDevice *dev) return g_strdup_printf("%x", s->ofw_addr); } -static Property grackle_properties[] = { +static const Property grackle_properties[] = { DEFINE_PROP_UINT32("ofw-addr", GrackleState, ofw_addr, -1), - DEFINE_PROP_END_OF_LIST() }; -static void grackle_class_init(ObjectClass *klass, void *data) +static void grackle_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index e02efc9e2ea..b1d96f62fe9 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -1,6 +1,8 @@ /* * QEMU GT64120 PCI host * + * (Datasheet GT-64120 Rev 1.4 from Sep 14, 1999) + * * Copyright (c) 2006,2007 Aurelien Jarno * * Permission is hereby granted, free of charge, to any person obtaining a copy @@ -26,6 +28,7 @@ #include "qapi/error.h" #include "qemu/units.h" #include "qemu/log.h" +#include "qemu/bswap.h" #include "hw/qdev-properties.h" #include "hw/registerfields.h" #include "hw/pci/pci_device.h" @@ -318,38 +321,6 @@ static void gt64120_isd_mapping(GT64120State *s) memory_region_transaction_commit(); } -static void gt64120_update_pci_cfgdata_mapping(GT64120State *s) -{ - /* Indexed on MByteSwap bit, see Table 158: PCI_0 Command, Offset: 0xc00 */ - static const MemoryRegionOps *pci_host_data_ops[] = { - &pci_host_data_be_ops, &pci_host_data_le_ops - }; - PCIHostState *phb = PCI_HOST_BRIDGE(s); - - memory_region_transaction_begin(); - - /* - * The setting of the MByteSwap bit and MWordSwap bit in the PCI Internal - * Command Register determines how data transactions from the CPU to/from - * PCI are handled along with the setting of the Endianness bit in the CPU - * Configuration Register. See: - * - Table 16: 32-bit PCI Transaction Endianness - * - Table 158: PCI_0 Command, Offset: 0xc00 - */ - - if (memory_region_is_mapped(&phb->data_mem)) { - memory_region_del_subregion(&s->ISD_mem, &phb->data_mem); - object_unparent(OBJECT(&phb->data_mem)); - } - memory_region_init_io(&phb->data_mem, OBJECT(phb), - pci_host_data_ops[s->regs[GT_PCI0_CMD] & 1], - s, "pci-conf-data", 4); - memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGDATA << 2, - &phb->data_mem, 1); - - memory_region_transaction_commit(); -} - static void gt64120_pci_mapping(GT64120State *s) { memory_region_transaction_begin(); @@ -643,7 +614,6 @@ static void gt64120_writel(void *opaque, hwaddr addr, case GT_PCI0_CMD: case GT_PCI1_CMD: s->regs[saddr] = val & 0x0401fc0f; - gt64120_update_pci_cfgdata_mapping(s); break; case GT_PCI0_TOR: case GT_PCI0_BS_SCS10: @@ -687,7 +657,6 @@ static void gt64120_writel(void *opaque, hwaddr addr, case GT_PCI0_CFGDATA: /* Mapped via in gt64120_pci_mapping() */ g_assert_not_reached(); - break; /* Interrupts */ case GT_INTRCAUSE: @@ -931,7 +900,6 @@ static uint64_t gt64120_readl(void *opaque, case GT_PCI0_CFGDATA: /* Mapped via in gt64120_pci_mapping() */ g_assert_not_reached(); - break; case GT_PCI0_CMD: case GT_PCI0_TOR: @@ -1024,6 +992,48 @@ static const MemoryRegionOps isd_mem_ops = { }, }; +static bool bswap(const GT64120State *s) +{ + PCIHostState *phb = PCI_HOST_BRIDGE(s); + /*check for bus == 0 && device == 0, Bits 11:15 = Device , Bits 16:23 = Bus*/ + bool is_phb_dev0 = extract32(phb->config_reg, 11, 13) == 0; + bool le_mode = FIELD_EX32(s->regs[GT_PCI0_CMD], GT_PCI0_CMD, MByteSwap); + /* Only swap for non-bridge devices in big-endian mode */ + return !le_mode && !is_phb_dev0; +} + +static uint64_t gt64120_pci_data_read(void *opaque, hwaddr addr, unsigned size) +{ + GT64120State *s = opaque; + uint32_t val = pci_host_data_le_ops.read(opaque, addr, size); + + if (bswap(s)) { + val = bswap32(val); + } + return val; +} + +static void gt64120_pci_data_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + GT64120State *s = opaque; + + if (bswap(s)) { + val = bswap32(val); + } + pci_host_data_le_ops.write(opaque, addr, val, size); +} + +static const MemoryRegionOps gt64120_pci_data_ops = { + .read = gt64120_pci_data_read, + .write = gt64120_pci_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + static void gt64120_reset(DeviceState *dev) { GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev); @@ -1178,7 +1188,6 @@ static void gt64120_reset(DeviceState *dev) gt64120_isd_mapping(s); gt64120_pci_mapping(s); - gt64120_update_pci_cfgdata_mapping(s); } static void gt64120_realize(DeviceState *dev, Error **errp) @@ -1202,6 +1211,12 @@ static void gt64120_realize(DeviceState *dev, Error **errp) memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGADDR << 2, &phb->conf_mem, 1); + memory_region_init_io(&phb->data_mem, OBJECT(phb), + >64120_pci_data_ops, + s, "pci-conf-data", 4); + memory_region_add_subregion_overlap(&s->ISD_mem, GT_PCI0_CFGDATA << 2, + &phb->data_mem, 1); + /* * The whole address space decoded by the GT-64120A doesn't generate @@ -1213,25 +1228,44 @@ static void gt64120_realize(DeviceState *dev, Error **errp) static void gt64120_pci_realize(PCIDevice *d, Error **errp) { - /* FIXME: Malta specific hw assumptions ahead */ + /* Values from chapter 17.16 "PCI Configuration" */ + + pci_set_long(d->wmask + PCI_BASE_ADDRESS_0, 0xfffff008); /* SCS[1:0] */ + pci_set_long(d->wmask + PCI_BASE_ADDRESS_1, 0xfffff008); /* SCS[3:2] */ + pci_set_long(d->wmask + PCI_BASE_ADDRESS_2, 0xfffff008); /* CS[2:0] */ + pci_set_long(d->wmask + PCI_BASE_ADDRESS_3, 0xfffff008); /* CS[3], BootCS */ + pci_set_long(d->wmask + PCI_BASE_ADDRESS_4, 0xfffff000); /* ISD MMIO */ + pci_set_long(d->wmask + PCI_BASE_ADDRESS_5, 0xfffff001); /* ISD I/O */ +} + +static void gt64120_pci_reset_hold(Object *obj, ResetType type) +{ + PCIDevice *d = PCI_DEVICE(obj); + + /* Values from chapter 17.16 "PCI Configuration" */ + pci_set_word(d->config + PCI_COMMAND, 0); pci_set_word(d->config + PCI_STATUS, PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); pci_config_set_prog_interface(d->config, 0); + pci_set_long(d->config + PCI_BASE_ADDRESS_0, 0x00000008); pci_set_long(d->config + PCI_BASE_ADDRESS_1, 0x01000008); pci_set_long(d->config + PCI_BASE_ADDRESS_2, 0x1c000000); pci_set_long(d->config + PCI_BASE_ADDRESS_3, 0x1f000000); pci_set_long(d->config + PCI_BASE_ADDRESS_4, 0x14000000); pci_set_long(d->config + PCI_BASE_ADDRESS_5, 0x14000001); + pci_set_byte(d->config + 0x3d, 0x01); } -static void gt64120_pci_class_init(ObjectClass *klass, void *data) +static void gt64120_pci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.hold = gt64120_pci_reset_hold; k->realize = gt64120_pci_realize; k->vendor_id = PCI_VENDOR_ID_MARVELL; k->device_id = PCI_DEVICE_ID_MARVELL_GT6412X; @@ -1249,26 +1283,25 @@ static const TypeInfo gt64120_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = gt64120_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static Property gt64120_properties[] = { +static const Property gt64120_properties[] = { DEFINE_PROP_BOOL("cpu-little-endian", GT64120State, cpu_little_endian, false), - DEFINE_PROP_END_OF_LIST(), }; -static void gt64120_class_init(ObjectClass *klass, void *data) +static void gt64120_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); device_class_set_props(dc, gt64120_properties); dc->realize = gt64120_realize; - dc->reset = gt64120_reset; + device_class_set_legacy_reset(dc, gt64120_reset); dc->vmsd = &vmstate_gt64120; } diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 4f0a0438d77..e13bb1b53e4 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -315,7 +315,7 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) i440fx_update_memory_mappings(f); } -static void i440fx_class_init(ObjectClass *klass, void *data) +static void i440fx_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -341,7 +341,7 @@ static const TypeInfo i440fx_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCII440FXState), .class_init = i440fx_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -353,7 +353,7 @@ static const char *i440fx_pcihost_root_bus_path(PCIHostState *host_bridge, return "0000:00"; } -static Property i440fx_props[] = { +static const Property i440fx_props[] = { DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState, pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT), DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, I440FXState, @@ -362,10 +362,9 @@ static Property i440fx_props[] = { above_4g_mem_size, 0), DEFINE_PROP_BOOL("x-pci-hole64-fix", I440FXState, pci_hole64_fix, true), DEFINE_PROP_STRING(I440FX_HOST_PROP_PCI_TYPE, I440FXState, pci_type), - DEFINE_PROP_END_OF_LIST(), }; -static void i440fx_pcihost_class_init(ObjectClass *klass, void *data) +static void i440fx_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build index 3001e93a437..937a0f72acf 100644 --- a/hw/pci-host/meson.build +++ b/hw/pci-host/meson.build @@ -28,6 +28,7 @@ pci_ss.add(when: 'CONFIG_ARTICIA', if_true: files('articia.c')) pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c')) # ARM devices +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_FSL_IMX8M_PHY', if_true: files('fsl_imx8m_phy.c')) pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c')) # HPPA devices diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c index 01bd8c887fa..e05b6770102 100644 --- a/hw/pci-host/mv64361.c +++ b/hw/pci-host/mv64361.c @@ -17,7 +17,7 @@ #include "hw/irq.h" #include "hw/intc/i8259.h" #include "hw/qdev-properties.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qemu/log.h" #include "qemu/error-report.h" #include "trace.h" @@ -26,7 +26,7 @@ #define TYPE_MV64361_PCI_BRIDGE "mv64361-pcibridge" -static void mv64361_pcibridge_class_init(ObjectClass *klass, void *data) +static void mv64361_pcibridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -46,7 +46,7 @@ static const TypeInfo mv64361_pcibridge_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = mv64361_pcibridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -95,14 +95,14 @@ static void mv64361_pcihost_realize(DeviceState *dev, Error **errp) &s->mem, &s->io, 0, 4, TYPE_PCI_BUS); g_free(name); pci_create_simple(h->bus, 0, TYPE_MV64361_PCI_BRIDGE); + qdev_init_gpio_out(dev, s->irq, ARRAY_SIZE(s->irq)); } -static Property mv64361_pcihost_props[] = { +static const Property mv64361_pcihost_props[] = { DEFINE_PROP_UINT8("index", MV64361PCIState, index, 0), - DEFINE_PROP_END_OF_LIST() }; -static void mv64361_pcihost_class_init(ObjectClass *klass, void *data) +static void mv64361_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -923,12 +923,12 @@ static void mv64361_reset(DeviceState *dev) set_mem_windows(s, 0xfbfff); } -static void mv64361_class_init(ObjectClass *klass, void *data) +static void mv64361_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mv64361_realize; - dc->reset = mv64361_reset; + device_class_set_legacy_reset(dc, mv64361_reset); } static const TypeInfo mv64361_type_info = { diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c index d4c118d4436..4b0ced79b08 100644 --- a/hw/pci-host/pnv_phb.c +++ b/hw/pci-host/pnv_phb.c @@ -17,7 +17,7 @@ #include "hw/ppc/pnv.h" #include "hw/qdev-properties.h" #include "qom/object.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* @@ -183,7 +183,7 @@ static const char *pnv_phb_root_bus_path(PCIHostState *host_bridge, return phb->bus_path; } -static Property pnv_phb_properties[] = { +static const Property pnv_phb_properties[] = { DEFINE_PROP_UINT32("index", PnvPHB, phb_id, 0), DEFINE_PROP_UINT32("chip-id", PnvPHB, chip_id, 0), DEFINE_PROP_UINT32("version", PnvPHB, version, 0), @@ -192,11 +192,9 @@ static Property pnv_phb_properties[] = { DEFINE_PROP_LINK("pec", PnvPHB, pec, TYPE_PNV_PHB4_PEC, PnvPhb4PecState *), - - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_phb_class_init(ObjectClass *klass, void *data) +static void pnv_phb_class_init(ObjectClass *klass, const void *data) { PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -302,13 +300,11 @@ static void pnv_phb_root_port_realize(DeviceState *dev, Error **errp) pci_config_set_interrupt_pin(pci->config, 0); } -static Property pnv_phb_root_port_properties[] = { +static const Property pnv_phb_root_port_properties[] = { DEFINE_PROP_UINT32("version", PnvPHBRootPort, version, 0), - - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_phb_root_port_class_init(ObjectClass *klass, void *data) +static void pnv_phb_root_port_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c index 2a74dbe45f5..5d8383fac30 100644 --- a/hw/pci-host/pnv_phb3.c +++ b/hw/pci-host/pnv_phb3.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/bswap.h" #include "qapi/visitor.h" #include "qapi/error.h" #include "hw/pci-host/pnv_phb3_regs.h" @@ -20,7 +21,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "qom/object.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #define phb3_error(phb, fmt, ...) \ qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \ @@ -888,7 +889,7 @@ DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB3_IOMMU_MEMORY_REGION, TYPE_PNV_PHB3_IOMMU_MEMORY_REGION) static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); @@ -1090,15 +1091,14 @@ void pnv_phb3_update_regions(PnvPHB3 *phb) pnv_phb3_check_all_m64s(phb); } -static Property pnv_phb3_properties[] = { +static const Property pnv_phb3_properties[] = { DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), DEFINE_PROP_LINK("chip", PnvPHB3, chip, TYPE_PNV_CHIP, PnvChip *), DEFINE_PROP_LINK("phb-base", PnvPHB3, phb_base, TYPE_PNV_PHB, PnvPHB *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_phb3_class_init(ObjectClass *klass, void *data) +static void pnv_phb3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1150,7 +1150,7 @@ static void pnv_phb3_root_bus_set_prop(Object *obj, Visitor *v, } } -static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) +static void pnv_phb3_root_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index 77d673da540..3a83311faff 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -15,7 +15,7 @@ #include "hw/pci/msi.h" #include "hw/irq.h" #include "hw/qdev-properties.h" -#include "sysemu/reset.h" +#include "system/reset.h" static uint64_t phb3_msi_ive_addr(PnvPHB3 *phb, int srcno) { @@ -284,7 +284,7 @@ static void phb3_msi_instance_init(Object *obj) ics->offset = 0; } -static void phb3_msi_class_init(ObjectClass *klass, void *data) +static void phb3_msi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ICSStateClass *isc = ICS_CLASS(klass); diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c index 82f70efa431..1f7a149580d 100644 --- a/hw/pci-host/pnv_phb3_pbcq.c +++ b/hw/pci-host/pnv_phb3_pbcq.c @@ -337,7 +337,7 @@ static void phb3_pbcq_instance_init(Object *obj) OBJ_PROP_LINK_STRONG); } -static void pnv_pbcq_class_init(ObjectClass *klass, void *data) +static void pnv_pbcq_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -354,7 +354,7 @@ static const TypeInfo pnv_pbcq_type_info = { .instance_size = sizeof(PnvPBCQState), .instance_init = phb3_pbcq_instance_init, .class_init = pnv_pbcq_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c index 99991008c13..18992054e83 100644 --- a/hw/pci-host/pnv_phb4.c +++ b/hw/pci-host/pnv_phb4.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/bswap.h" #include "qapi/visitor.h" #include "qapi/error.h" #include "target/ppc/cpu.h" @@ -1362,7 +1363,7 @@ DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB4_IOMMU_MEMORY_REGION, TYPE_PNV_PHB4_IOMMU_MEMORY_REGION) static void pnv_phb4_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); @@ -1688,16 +1689,15 @@ static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno, } } -static Property pnv_phb4_properties[] = { +static const Property pnv_phb4_properties[] = { DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), DEFINE_PROP_LINK("pec", PnvPHB4, pec, TYPE_PNV_PHB4_PEC, PnvPhb4PecState *), DEFINE_PROP_LINK("phb-base", PnvPHB4, phb_base, TYPE_PNV_PHB, PnvPHB *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_phb4_class_init(ObjectClass *klass, void *data) +static void pnv_phb4_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); @@ -1715,7 +1715,7 @@ static const TypeInfo pnv_phb4_type_info = { .instance_init = pnv_phb4_instance_init, .instance_size = sizeof(PnvPHB4), .class_init = pnv_phb4_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { }, } @@ -1762,7 +1762,7 @@ static void pnv_phb4_root_bus_set_prop(Object *obj, Visitor *v, } } -static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) +static void pnv_phb4_root_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index ce8e228f987..5bac1c42ed0 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -19,7 +19,7 @@ #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_chip.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include @@ -197,6 +197,9 @@ static PnvPHB *pnv_pec_default_phb_realize(PnvPhb4PecState *pec, return phb; } +#define XPEC_P9_PCI_LANE_CFG PPC_BITMASK(10, 11) +#define XPEC_P10_PCI_LANE_CFG PPC_BITMASK(0, 1) + static void pnv_pec_realize(DeviceState *dev, Error **errp) { PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); @@ -211,6 +214,43 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp) pec->num_phbs = pecc->num_phbs[pec->index]; + /* Pervasive chiplet */ + object_initialize_child(OBJECT(pec), "nest-pervasive-common", + &pec->nest_pervasive, + TYPE_PNV_NEST_CHIPLET_PERVASIVE); + if (!qdev_realize(DEVICE(&pec->nest_pervasive), NULL, errp)) { + return; + } + + /* Set up pervasive chiplet registers */ + /* + * Most registers are not set up, this just sets the PCI CONF1 link-width + * field because skiboot probes it. + */ + if (pecc->version == PNV_PHB4_VERSION) { + /* + * On P9, PEC2 has configurable 1/2/3-furcation). + * Make it trifurcated (x8, x4, x4) to match pnv_pec_num_phbs. + */ + if (pec->index == 2) { + pec->nest_pervasive.control_regs.cplt_cfg1 = + SETFIELD(XPEC_P9_PCI_LANE_CFG, + pec->nest_pervasive.control_regs.cplt_cfg1, + 0b10); + } + } else if (pecc->version == PNV_PHB5_VERSION) { + /* + * On P10, both PECs are configurable 1/2/3-furcation). + * Both are trifurcated to match pnv_phb5_pec_num_stacks. + */ + pec->nest_pervasive.control_regs.cplt_cfg1 = + SETFIELD(XPEC_P10_PCI_LANE_CFG, + pec->nest_pervasive.control_regs.cplt_cfg1, + 0b10); + } else { + g_assert_not_reached(); + } + /* Create PHBs if running with defaults */ if (defaults_enabled()) { g_assert(pec->num_phbs <= MAX_PHBS_PER_PEC); @@ -283,17 +323,23 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } -static Property pnv_pec_properties[] = { +static const Property pnv_pec_properties[] = { DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0), DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0), DEFINE_PROP_LINK("chip", PnvPhb4PecState, chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; +#define XPEC_PCI_CPLT_OFFSET 0x1000000ULL + +static uint32_t pnv_pec_xscom_cplt_base(PnvPhb4PecState *pec) +{ + return PNV9_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index; +} + static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec) { - return PNV9_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index; + return PNV9_XSCOM_PEC_PCI_BASE + XPEC_PCI_CPLT_OFFSET * pec->index; } static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec) @@ -308,7 +354,7 @@ static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec) */ static const uint32_t pnv_pec_num_phbs[] = { 1, 2, 3 }; -static void pnv_pec_class_init(ObjectClass *klass, void *data) +static void pnv_pec_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -322,6 +368,7 @@ static void pnv_pec_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, pnv_pec_properties); dc->user_creatable = false; + pecc->xscom_cplt_base = pnv_pec_xscom_cplt_base; pecc->xscom_nest_base = pnv_pec_xscom_nest_base; pecc->xscom_pci_base = pnv_pec_xscom_pci_base; pecc->xscom_nest_size = PNV9_XSCOM_PEC_NEST_SIZE; @@ -341,7 +388,7 @@ static const TypeInfo pnv_pec_type_info = { .instance_size = sizeof(PnvPhb4PecState), .class_init = pnv_pec_class_init, .class_size = sizeof(PnvPhb4PecClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } @@ -350,6 +397,10 @@ static const TypeInfo pnv_pec_type_info = { /* * POWER10 definitions */ +static uint32_t pnv_phb5_pec_xscom_cplt_base(PnvPhb4PecState *pec) +{ + return PNV10_XSCOM_PEC_NEST_CPLT_BASE + XPEC_PCI_CPLT_OFFSET * pec->index; +} static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec) { @@ -368,12 +419,13 @@ static uint32_t pnv_phb5_pec_xscom_nest_base(PnvPhb4PecState *pec) */ static const uint32_t pnv_phb5_pec_num_stacks[] = { 3, 3 }; -static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data) +static void pnv_phb5_pec_class_init(ObjectClass *klass, const void *data) { PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass); static const char compat[] = "ibm,power10-pbcq"; static const char stk_compat[] = "ibm,power10-phb-stack"; + pecc->xscom_cplt_base = pnv_phb5_pec_xscom_cplt_base; pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base; pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base; pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE; @@ -393,7 +445,7 @@ static const TypeInfo pnv_phb5_pec_type_info = { .instance_size = sizeof(PnvPhb4PecState), .class_init = pnv_phb5_pec_class_init, .class_size = sizeof(PnvPhb4PecClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/pci-host/ppc440_pcix.c b/hw/pci-host/ppc440_pcix.c index ef212d99aaf..744b85e49cc 100644 --- a/hw/pci-host/ppc440_pcix.c +++ b/hw/pci-host/ppc440_pcix.c @@ -519,12 +519,12 @@ static void ppc440_pcix_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static void ppc440_pcix_class_init(ObjectClass *klass, void *data) +static void ppc440_pcix_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = ppc440_pcix_realize; - dc->reset = ppc440_pcix_reset; + device_class_set_legacy_reset(dc, ppc440_pcix_reset); } static const TypeInfo ppc440_pcix_info = { diff --git a/hw/pci-host/ppc4xx_pci.c b/hw/pci-host/ppc4xx_pci.c index b6c6c8993c4..25478176881 100644 --- a/hw/pci-host/ppc4xx_pci.c +++ b/hw/pci-host/ppc4xx_pci.c @@ -27,7 +27,7 @@ #include "hw/pci-host/ppc4xx.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/pci/pci_device.h" #include "hw/pci/pci_host.h" #include "trace.h" @@ -349,7 +349,7 @@ static void ppc4xx_pcihost_realize(DeviceState *dev, Error **errp) qemu_register_reset(ppc4xx_pci_reset, s); } -static void ppc4xx_host_bridge_class_init(ObjectClass *klass, void *data) +static void ppc4xx_host_bridge_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -370,13 +370,13 @@ static const TypeInfo ppc4xx_host_bridge_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = ppc4xx_host_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void ppc4xx_pcihost_class_init(ObjectClass *klass, void *data) +static void ppc4xx_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c index 95b983b2b3c..975d191ccb8 100644 --- a/hw/pci-host/ppce500.c +++ b/hw/pci-host/ppce500.c @@ -16,13 +16,10 @@ #include "qemu/osdep.h" #include "hw/irq.h" -#include "hw/ppc/e500-ccsr.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "hw/pci/pci_device.h" #include "hw/pci/pci_host.h" -#include "qemu/bswap.h" -#include "qemu/module.h" #include "hw/pci-host/ppce500.h" #include "qom/object.h" @@ -419,11 +416,12 @@ static const VMStateDescription vmstate_ppce500_pci = { static void e500_pcihost_bridge_realize(PCIDevice *d, Error **errp) { PPCE500PCIBridgeState *b = PPC_E500_PCI_BRIDGE(d); - PPCE500CCSRState *ccsr = CCSR(container_get(qdev_get_machine(), - "/e500-ccsr")); + SysBusDevice *ccsr = SYS_BUS_DEVICE( + object_resolve_path_component(qdev_get_machine(), "e500-ccsr")); + MemoryRegion *ccsr_space = sysbus_mmio_get_region(ccsr, 0); - memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0", &ccsr->ccsr_space, - 0, int128_get64(ccsr->ccsr_space.size)); + memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0", + ccsr_space, 0, int128_get64(ccsr_space->size)); pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &b->bar0); } @@ -475,7 +473,7 @@ static void e500_pcihost_realize(DeviceState *dev, Error **errp) address_space_init(&s->bm_as, &s->bm, "pci-bm"); pci_setup_iommu(b, &ppce500_iommu_ops, s); - pci_create_simple(b, 0, "e500-host-bridge"); + pci_create_simple(b, 0, TYPE_PPC_E500_PCI_BRIDGE); memory_region_init(&s->container, OBJECT(h), "pci-container", PCIE500_ALL_SIZE); memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_be_ops, h, @@ -491,7 +489,7 @@ static void e500_pcihost_realize(DeviceState *dev, Error **errp) pci_bus_set_route_irq_fn(b, e500_route_intx_pin_to_irq); } -static void e500_host_bridge_class_init(ObjectClass *klass, void *data) +static void e500_host_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -508,24 +506,12 @@ static void e500_host_bridge_class_init(ObjectClass *klass, void *data) dc->user_creatable = false; } -static const TypeInfo e500_host_bridge_info = { - .name = TYPE_PPC_E500_PCI_BRIDGE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PPCE500PCIBridgeState), - .class_init = e500_host_bridge_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }, -}; - -static Property pcihost_properties[] = { +static const Property pcihost_properties[] = { DEFINE_PROP_UINT32("first_slot", PPCE500PCIState, first_slot, 0x11), DEFINE_PROP_UINT32("first_pin_irq", PPCE500PCIState, first_pin_irq, 0x1), - DEFINE_PROP_END_OF_LIST(), }; -static void e500_pcihost_class_init(ObjectClass *klass, void *data) +static void e500_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -535,17 +521,23 @@ static void e500_pcihost_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_ppce500_pci; } -static const TypeInfo e500_pcihost_info = { - .name = TYPE_PPC_E500_PCI_HOST_BRIDGE, - .parent = TYPE_PCI_HOST_BRIDGE, - .instance_size = sizeof(PPCE500PCIState), - .class_init = e500_pcihost_class_init, +static const TypeInfo e500_pci_types[] = { + { + .name = TYPE_PPC_E500_PCI_BRIDGE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PPCE500PCIBridgeState), + .class_init = e500_host_bridge_class_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, + }, + { + .name = TYPE_PPC_E500_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PPCE500PCIState), + .class_init = e500_pcihost_class_init, + }, }; -static void e500_pci_register_types(void) -{ - type_register_static(&e500_pcihost_info); - type_register_static(&e500_host_bridge_info); -} - -type_init(e500_pci_register_types) +DEFINE_TYPES(e500_pci_types) diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 0b6cbaed7ed..1951ae440cc 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -170,7 +170,7 @@ static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v, * properties need to be initialized manually by * q35_host_initfn() after the object_initialize() call. */ -static Property q35_host_props[] = { +static const Property q35_host_props[] = { DEFINE_PROP_UINT64(PCIE_HOST_MCFG_BASE, Q35PCIHost, parent_obj.base_addr, MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT), DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost, @@ -182,10 +182,9 @@ static Property q35_host_props[] = { DEFINE_PROP_BOOL(PCI_HOST_PROP_SMM_RANGES, Q35PCIHost, mch.has_smm_ranges, true), DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true), - DEFINE_PROP_END_OF_LIST(), }; -static void q35_host_class_init(ObjectClass *klass, void *data) +static void q35_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); @@ -662,31 +661,20 @@ static void mch_realize(PCIDevice *d, Error **errp) OBJECT(&mch->smram)); } -uint64_t mch_mcfg_base(void) -{ - bool ambiguous; - Object *o = object_resolve_path_type("", TYPE_MCH_PCI_DEVICE, &ambiguous); - if (!o) { - return 0; - } - return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT; -} - -static Property mch_props[] = { +static const Property mch_props[] = { DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes, 16), DEFINE_PROP_BOOL("smbase-smram", MCHPCIState, has_smram_at_smbase, true), - DEFINE_PROP_END_OF_LIST(), }; -static void mch_class_init(ObjectClass *klass, void *data) +static void mch_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->realize = mch_realize; k->config_write = mch_write_config; - dc->reset = mch_reset; + device_class_set_legacy_reset(dc, mch_reset); device_class_set_props(dc, mch_props); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->desc = "Host bridge"; @@ -715,7 +703,7 @@ static const TypeInfo mch_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(MCHPCIState), .class_init = mch_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index a7dfddd69ea..f8c0be5d21c 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -24,7 +24,6 @@ */ #include "qemu/osdep.h" -#include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/log.h" #include "qapi/error.h" @@ -35,9 +34,7 @@ #include "migration/vmstate.h" #include "hw/intc/i8259.h" #include "hw/irq.h" -#include "hw/loader.h" #include "hw/or-irq.h" -#include "elf.h" #include "qom/object.h" #define TYPE_RAVEN_PCI_DEVICE "raven" @@ -47,10 +44,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(RavenPCIState, RAVEN_PCI_DEVICE) struct RavenPCIState { PCIDevice dev; - - uint32_t elf_machine; - char *bios_name; - MemoryRegion bios; }; typedef struct PRePPCIState PREPPCIState; @@ -75,11 +68,8 @@ struct PRePPCIState { RavenPCIState pci_dev; int contiguous_map; - bool is_legacy_prep; }; -#define BIOS_SIZE (1 * MiB) - #define PCI_IO_BASE_ADDR 0x80000000 /* Physical address on main bus */ static inline uint32_t raven_pci_io_config(hwaddr addr) @@ -243,22 +233,18 @@ static void raven_pcihost_realizefn(DeviceState *d, Error **errp) MemoryRegion *address_space_mem = get_system_memory(); int i; - if (s->is_legacy_prep) { - for (i = 0; i < PCI_NUM_PINS; i++) { - sysbus_init_irq(dev, &s->pci_irqs[i]); - } - } else { - /* According to PReP specification section 6.1.6 "System Interrupt - * Assignments", all PCI interrupts are routed via IRQ 15 */ - s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ)); - object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS, - &error_fatal); - qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal); - sysbus_init_irq(dev, &s->or_irq->out_irq); - - for (i = 0; i < PCI_NUM_PINS; i++) { - s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i); - } + /* + * According to PReP specification section 6.1.6 "System Interrupt + * Assignments", all PCI interrupts are routed via IRQ 15 + */ + s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS, + &error_fatal); + qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal); + sysbus_init_irq(dev, &s->or_irq->out_irq); + + for (i = 0; i < PCI_NUM_PINS; i++) { + s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i); } qdev_init_gpio_in(d, raven_change_gpio, 1); @@ -338,48 +324,9 @@ static void raven_pcihost_initfn(Object *obj) static void raven_realize(PCIDevice *d, Error **errp) { - RavenPCIState *s = RAVEN_PCI_DEVICE(d); - char *filename; - int bios_size = -1; - d->config[PCI_CACHE_LINE_SIZE] = 0x08; d->config[PCI_LATENCY_TIMER] = 0x10; d->config[PCI_CAPABILITY_LIST] = 0x00; - - if (!memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", - BIOS_SIZE, errp)) { - return; - } - memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), - &s->bios); - if (s->bios_name) { - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name); - if (filename) { - if (s->elf_machine != EM_NONE) { - bios_size = load_elf(filename, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, 1, s->elf_machine, - 0, 0); - } - if (bios_size < 0) { - bios_size = get_image_size(filename); - if (bios_size > 0 && bios_size <= BIOS_SIZE) { - hwaddr bios_addr; - bios_size = (bios_size + 0xfff) & ~0xfff; - bios_addr = (uint32_t)(-BIOS_SIZE); - bios_size = load_image_targphys(filename, bios_addr, - bios_size); - } - } - } - g_free(filename); - if (bios_size < 0 || bios_size > BIOS_SIZE) { - memory_region_del_subregion(get_system_memory(), &s->bios); - error_setg(errp, "Could not load bios image '%s'", s->bios_name); - return; - } - } - - vmstate_register_ram_global(&s->bios); } static const VMStateDescription vmstate_raven = { @@ -392,7 +339,7 @@ static const VMStateDescription vmstate_raven = { }, }; -static void raven_class_init(ObjectClass *klass, void *data) +static void raven_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -416,29 +363,18 @@ static const TypeInfo raven_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(RavenPCIState), .class_init = raven_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static Property raven_pcihost_properties[] = { - DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine, - EM_NONE), - DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name), - /* Temporary workaround until legacy prep machine is removed */ - DEFINE_PROP_BOOL("is-legacy-prep", PREPPCIState, is_legacy_prep, - false), - DEFINE_PROP_END_OF_LIST() -}; - -static void raven_pcihost_class_init(ObjectClass *klass, void *data) +static void raven_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->realize = raven_pcihost_realizefn; - device_class_set_props(dc, raven_pcihost_properties); dc->fw_name = "pci"; } diff --git a/hw/pci-host/remote.c b/hw/pci-host/remote.c index bfb25ef6af8..e6d2af4502a 100644 --- a/hw/pci-host/remote.c +++ b/hw/pci-host/remote.c @@ -28,7 +28,7 @@ #include "hw/pci/pcie_host.h" #include "hw/qdev-properties.h" #include "hw/pci-host/remote.h" -#include "exec/memory.h" +#include "system/memory.h" static const char *remote_pcihost_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus) @@ -46,7 +46,7 @@ static void remote_pcihost_realize(DeviceState *dev, Error **errp) 0, TYPE_PCIE_BUS); } -static void remote_pcihost_class_init(ObjectClass *klass, void *data) +static void remote_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); diff --git a/hw/pci-host/sabre.c b/hw/pci-host/sabre.c index d0851b48b02..538624c5079 100644 --- a/hw/pci-host/sabre.c +++ b/hw/pci-host/sabre.c @@ -37,7 +37,7 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace.h" /* @@ -456,7 +456,7 @@ static void sabre_pci_realize(PCIDevice *d, Error **errp) PCI_STATUS_DEVSEL_MEDIUM); } -static void sabre_pci_class_init(ObjectClass *klass, void *data) +static void sabre_pci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -477,7 +477,7 @@ static const TypeInfo sabre_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(SabrePCIState), .class_init = sabre_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -492,19 +492,18 @@ static char *sabre_ofw_unit_address(const SysBusDevice *dev) (uint32_t)(s->special_base & 0xffffffff)); } -static Property sabre_properties[] = { +static const Property sabre_properties[] = { DEFINE_PROP_UINT64("special-base", SabreState, special_base, 0), DEFINE_PROP_UINT64("mem-base", SabreState, mem_base, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void sabre_class_init(ObjectClass *klass, void *data) +static void sabre_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); dc->realize = sabre_realize; - dc->reset = sabre_reset; + device_class_set_legacy_reset(dc, sabre_reset); device_class_set_props(dc, sabre_properties); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "pci"; diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c index 4edebced5eb..62fb945075f 100644 --- a/hw/pci-host/sh_pci.c +++ b/hw/pci-host/sh_pci.c @@ -28,7 +28,6 @@ #include "hw/irq.h" #include "hw/pci/pci_device.h" #include "hw/pci/pci_host.h" -#include "qemu/bswap.h" #include "qemu/module.h" #include "qom/object.h" @@ -153,7 +152,7 @@ static void sh_pcic_pci_realize(PCIDevice *d, Error **errp) PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); } -static void sh_pcic_pci_class_init(ObjectClass *klass, void *data) +static void sh_pcic_pci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -168,7 +167,7 @@ static void sh_pcic_pci_class_init(ObjectClass *klass, void *data) dc->user_creatable = false; } -static void sh_pcic_host_class_init(ObjectClass *klass, void *data) +static void sh_pcic_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -186,7 +185,7 @@ static const TypeInfo sh_pcic_types[] = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = sh_pcic_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c index e4c1abd8715..194037d6e75 100644 --- a/hw/pci-host/uninorth.c +++ b/hw/pci-host/uninorth.c @@ -311,7 +311,7 @@ static void unin_internal_pci_host_realize(PCIDevice *d, Error **errp) d->config[PCI_CAPABILITY_LIST] = 0x00; } -static void unin_main_pci_host_class_init(ObjectClass *klass, void *data) +static void unin_main_pci_host_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -333,13 +333,13 @@ static const TypeInfo unin_main_pci_host_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = unin_main_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void u3_agp_pci_host_class_init(ObjectClass *klass, void *data) +static void u3_agp_pci_host_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -361,13 +361,13 @@ static const TypeInfo u3_agp_pci_host_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = u3_agp_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void unin_agp_pci_host_class_init(ObjectClass *klass, void *data) +static void unin_agp_pci_host_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -389,13 +389,14 @@ static const TypeInfo unin_agp_pci_host_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = unin_agp_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void unin_internal_pci_host_class_init(ObjectClass *klass, void *data) +static void unin_internal_pci_host_class_init(ObjectClass *klass, + const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -417,18 +418,17 @@ static const TypeInfo unin_internal_pci_host_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = unin_internal_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static Property pci_unin_main_pci_host_props[] = { +static const Property pci_unin_main_pci_host_props[] = { DEFINE_PROP_UINT32("ofw-addr", UNINHostState, ofw_addr, -1), - DEFINE_PROP_END_OF_LIST() }; -static void pci_unin_main_class_init(ObjectClass *klass, void *data) +static void pci_unin_main_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); @@ -448,7 +448,7 @@ static const TypeInfo pci_unin_main_info = { .class_init = pci_unin_main_class_init, }; -static void pci_u3_agp_class_init(ObjectClass *klass, void *data) +static void pci_u3_agp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -464,7 +464,7 @@ static const TypeInfo pci_u3_agp_info = { .class_init = pci_u3_agp_class_init, }; -static void pci_unin_agp_class_init(ObjectClass *klass, void *data) +static void pci_unin_agp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -480,7 +480,7 @@ static const TypeInfo pci_unin_agp_info = { .class_init = pci_unin_agp_class_init, }; -static void pci_unin_internal_class_init(ObjectClass *klass, void *data) +static void pci_unin_internal_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -536,7 +536,7 @@ static void unin_init(Object *obj) sysbus_init_mmio(sbd, &s->mem); } -static void unin_class_init(ObjectClass *klass, void *data) +static void unin_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 0e65deb3f97..8ea26e3ff00 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -246,7 +246,7 @@ static uint64_t pci_vpb_reg_read(void *opaque, hwaddr addr, static const MemoryRegionOps pci_vpb_reg_ops = { .read = pci_vpb_reg_read, .write = pci_vpb_reg_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -312,7 +312,7 @@ static uint64_t pci_vpb_config_read(void *opaque, hwaddr addr, static const MemoryRegionOps pci_vpb_config_ops = { .read = pci_vpb_config_read, .write = pci_vpb_config_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static int pci_vpb_map_irq(PCIDevice *d, int irq_num) @@ -471,7 +471,7 @@ static void versatile_pci_host_realize(PCIDevice *d, Error **errp) pci_set_byte(d->config + PCI_LATENCY_TIMER, 0x10); } -static void versatile_pci_host_class_init(ObjectClass *klass, void *data) +static void versatile_pci_host_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -492,24 +492,23 @@ static const TypeInfo versatile_pci_host_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = versatile_pci_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static Property pci_vpb_properties[] = { +static const Property pci_vpb_properties[] = { DEFINE_PROP_UINT8("broken-irq-mapping", PCIVPBState, irq_mapping_prop, PCI_VPB_IRQMAP_ASSUME_OK), - DEFINE_PROP_END_OF_LIST() }; -static void pci_vpb_class_init(ObjectClass *klass, void *data) +static void pci_vpb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pci_vpb_realize; - dc->reset = pci_vpb_reset; + device_class_set_legacy_reset(dc, pci_vpb_reset); dc->vmsd = &pci_vpb_vmstate; device_class_set_props(dc, pci_vpb_properties); } diff --git a/hw/pci-host/xen_igd_pt.c b/hw/pci-host/xen_igd_pt.c index d094b675d6d..5dd17ef236f 100644 --- a/hw/pci-host/xen_igd_pt.c +++ b/hw/pci-host/xen_igd_pt.c @@ -95,7 +95,8 @@ static void igd_pt_i440fx_realize(PCIDevice *pci_dev, Error **errp) } } -static void igd_passthrough_i440fx_class_init(ObjectClass *klass, void *data) +static void igd_passthrough_i440fx_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/pci-host/xilinx-pcie.c b/hw/pci-host/xilinx-pcie.c index c9ab7052f43..c71492de9e7 100644 --- a/hw/pci-host/xilinx-pcie.c +++ b/hw/pci-host/xilinx-pcie.c @@ -156,17 +156,16 @@ static void xilinx_pcie_host_init(Object *obj) qdev_prop_set_bit(DEVICE(root), "multifunction", false); } -static Property xilinx_pcie_host_props[] = { +static const Property xilinx_pcie_host_props[] = { DEFINE_PROP_UINT32("bus_nr", XilinxPCIEHost, bus_nr, 0), DEFINE_PROP_SIZE("cfg_base", XilinxPCIEHost, cfg_base, 0), DEFINE_PROP_SIZE("cfg_size", XilinxPCIEHost, cfg_size, 32 * MiB), DEFINE_PROP_SIZE("mmio_base", XilinxPCIEHost, mmio_base, 0), DEFINE_PROP_SIZE("mmio_size", XilinxPCIEHost, mmio_size, 1 * MiB), DEFINE_PROP_BOOL("link_up", XilinxPCIEHost, link_up, true), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_pcie_host_class_init(ObjectClass *klass, void *data) +static void xilinx_pcie_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); @@ -287,7 +286,7 @@ static void xilinx_pcie_root_realize(PCIDevice *pci_dev, Error **errp) } } -static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data) +static void xilinx_pcie_root_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -300,7 +299,7 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_BRIDGE_HOST; k->realize = xilinx_pcie_root_realize; k->exit = pci_bridge_exitfn; - dc->reset = pci_bridge_reset; + device_class_set_legacy_reset(dc, pci_bridge_reset); k->config_read = xilinx_pcie_root_config_read; k->config_write = xilinx_pcie_root_config_write; /* @@ -315,7 +314,7 @@ static const TypeInfo xilinx_pcie_root_info = { .parent = TYPE_PCI_BRIDGE, .instance_size = sizeof(XilinxPCIERoot), .class_init = xilinx_pcie_root_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, diff --git a/hw/pci/msi.c b/hw/pci/msi.c index 8104ac1d91a..b9f5b45920b 100644 --- a/hw/pci/msi.c +++ b/hw/pci/msi.c @@ -23,7 +23,7 @@ #include "hw/xen/xen.h" #include "qemu/range.h" #include "qapi/error.h" -#include "sysemu/xen.h" +#include "system/xen.h" #include "hw/i386/kvm/xen_evtchn.h" diff --git a/hw/pci/msix.c b/hw/pci/msix.c index 487e49834ee..8c7f6709e2a 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -15,11 +15,12 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/pci/pci.h" #include "hw/xen/xen.h" -#include "sysemu/xen.h" +#include "system/xen.h" #include "migration/qemu-file-types.h" #include "migration/vmstate.h" #include "qemu/range.h" @@ -71,7 +72,7 @@ static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) return dev->msix_pba + vector / 8; } -static int msix_is_pending(PCIDevice *dev, int vector) +int msix_is_pending(PCIDevice *dev, unsigned int vector) { return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); } @@ -250,7 +251,7 @@ static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, PCIDevice *dev = opaque; if (dev->msix_vector_poll_notifier) { unsigned vector_start = addr * 8; - unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr); + unsigned vector_end = MIN((addr + size) * 8, dev->msix_entries_nr); dev->msix_vector_poll_notifier(dev, vector_start, vector_end); } @@ -260,6 +261,14 @@ static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, static void msix_pba_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { + PCIDevice *dev = opaque; + + qemu_log_mask(LOG_GUEST_ERROR, + "PCI [%s:%02x:%02x.%x] attempt to write to MSI-X " + "PBA at 0x%" FMT_PCIBUS ", ignoring.\n", + pci_root_bus_path(dev), pci_dev_bus_num(dev), + PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), + addr); } static const MemoryRegionOps msix_pba_mmio_ops = { diff --git a/hw/pci/pci-hmp-cmds.c b/hw/pci/pci-hmp-cmds.c index b09fce93770..a5f6483cc3d 100644 --- a/hw/pci/pci-hmp-cmds.c +++ b/hw/pci/pci-hmp-cmds.c @@ -20,7 +20,7 @@ #include "monitor/monitor.h" #include "pci-internal.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qapi-commands-pci.h" #include "qemu/cutils.h" @@ -83,15 +83,25 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) monitor_printf(mon, " BAR%" PRId64 ": ", region->value->bar); if (!strcmp(region->value->type, "io")) { - monitor_printf(mon, "I/O at 0x%04" PRIx64 - " [0x%04" PRIx64 "].\n", - addr, addr + size - 1); + if (addr != PCI_BAR_UNMAPPED) { + monitor_printf(mon, "I/O at 0x%04" PRIx64 + " [0x%04" PRIx64 "]\n", + addr, addr + size - 1); + } else { + monitor_printf(mon, "I/O (not mapped)\n"); + } } else { - monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64 - " [0x%08" PRIx64 "].\n", - region->value->mem_type_64 ? 64 : 32, - region->value->prefetch ? " prefetchable" : "", - addr, addr + size - 1); + if (addr != PCI_BAR_UNMAPPED) { + monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64 + " [0x%08" PRIx64 "]\n", + region->value->mem_type_64 ? 64 : 32, + region->value->prefetch ? " prefetchable" : "", + addr, addr + size - 1); + } else { + monitor_printf(mon, "%d bit%s memory (not mapped)\n", + region->value->mem_type_64 ? 64 : 32, + region->value->prefetch ? " prefetchable" : ""); + } } } diff --git a/hw/pci/pci-stub.c b/hw/pci/pci-stub.c index f0508682d2b..3397d0c82ea 100644 --- a/hw/pci/pci-stub.c +++ b/hw/pci/pci-stub.c @@ -46,14 +46,12 @@ void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict) /* kvm-all wants this */ MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) { - g_assert(false); - return (MSIMessage){}; + g_assert_not_reached(); } uint16_t pci_requester_id(PCIDevice *dev) { - g_assert(false); - return 0; + g_assert_not_reached(); } /* Required by ahci.c */ diff --git a/hw/pci/pci.c b/hw/pci/pci.c index fab86d05672..f025d04a24b 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -32,12 +32,13 @@ #include "hw/pci/pci_host.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "migration/cpr.h" #include "migration/qemu-file-types.h" #include "migration/vmstate.h" #include "net/net.h" -#include "sysemu/numa.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/numa.h" +#include "system/runstate.h" +#include "system/system.h" #include "hw/loader.h" #include "qemu/error-report.h" #include "qemu/range.h" @@ -46,6 +47,7 @@ #include "hw/pci/msix.h" #include "hw/hotplug.h" #include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "pci-internal.h" @@ -53,13 +55,6 @@ #include "hw/xen/xen.h" #include "hw/i386/kvm/xen_evtchn.h" -//#define DEBUG_PCI -#ifdef DEBUG_PCI -# define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) -#else -# define PCI_DPRINTF(format, ...) do { } while (0) -#endif - bool pci_available = true; static char *pcibus_get_dev_path(DeviceState *dev); @@ -67,11 +62,24 @@ static char *pcibus_get_fw_dev_path(DeviceState *dev); static void pcibus_reset_hold(Object *obj, ResetType type); static bool pcie_has_upstream_port(PCIDevice *dev); -static Property pci_props[] = { +static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); + + visit_type_uint8(v, name, &busnr, errp); +} + +static const PropertyInfo prop_pci_busnr = { + .type = "busnr", + .get = prop_pci_busnr_get, +}; + +static const Property pci_props[] = { DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), DEFINE_PROP_STRING("romfile", PCIDevice, romfile), DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, UINT32_MAX), - DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), + DEFINE_PROP_INT32("rombar", PCIDevice, rom_bar, -1), DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, @@ -85,7 +93,12 @@ static Property pci_props[] = { QEMU_PCIE_ERR_UNC_MASK_BITNR, true), DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present, QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, + max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), + DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf), + DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, + QEMU_PCIE_EXT_TAG_BITNR, true), + { .name = "busnr", .info = &prop_pci_busnr }, }; static const VMStateDescription vmstate_pcibus = { @@ -116,6 +129,12 @@ static GSequence *pci_acpi_index_list(void) return used_acpi_index_list; } +static void pci_set_master(PCIDevice *d, bool enable) +{ + memory_region_set_enabled(&d->bus_master_enable_region, enable); + d->is_master = enable; /* cache the status */ +} + static void pci_init_bus_master(PCIDevice *pci_dev) { AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); @@ -123,7 +142,7 @@ static void pci_init_bus_master(PCIDevice *pci_dev) memory_region_init_alias(&pci_dev->bus_master_enable_region, OBJECT(pci_dev), "bus master", dma_as->root, 0, memory_region_size(dma_as->root)); - memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); + pci_set_master(pci_dev, false); memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, &pci_dev->bus_master_enable_region); } @@ -198,11 +217,57 @@ static uint16_t pcibus_numa_node(PCIBus *bus) return NUMA_NODE_UNASSIGNED; } -static void pci_bus_class_init(ObjectClass *klass, void *data) +bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, + PCIBus *bus, + Error **errp) +{ + Object *obj; + + if (!bus) { + return true; + } + obj = OBJECT(bus); + + return fw_cfg_add_file_from_generator(fw_cfg, obj->parent, + object_get_canonical_path_component(obj), + "etc/extra-pci-roots", errp); +} + +static GByteArray *pci_bus_fw_cfg_gen_data(Object *obj, Error **errp) +{ + PCIBus *bus = PCI_BUS(obj); + GByteArray *byte_array; + uint64_t extra_hosts = 0; + + if (!bus) { + return NULL; + } + + QLIST_FOREACH(bus, &bus->child, sibling) { + /* look for expander root buses */ + if (pci_bus_is_root(bus)) { + extra_hosts++; + } + } + + if (!extra_hosts) { + return NULL; + } + extra_hosts = cpu_to_le64(extra_hosts); + + byte_array = g_byte_array_new(); + g_byte_array_append(byte_array, + (const void *)&extra_hosts, sizeof(extra_hosts)); + + return byte_array; +} + +static void pci_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); PCIBusClass *pbc = PCI_BUS_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); + FWCfgDataGeneratorClass *fwgc = FW_CFG_DATA_GENERATOR_CLASS(klass); k->print_dev = pcibus_dev_print; k->get_dev_path = pcibus_get_dev_path; @@ -214,6 +279,8 @@ static void pci_bus_class_init(ObjectClass *klass, void *data) pbc->bus_num = pcibus_num; pbc->numa_node = pcibus_numa_node; + + fwgc->get_data = pci_bus_fw_cfg_gen_data; } static const TypeInfo pci_bus_info = { @@ -222,6 +289,10 @@ static const TypeInfo pci_bus_info = { .instance_size = sizeof(PCIBus), .class_size = sizeof(PCIBusClass), .class_init = pci_bus_class_init, + .interfaces = (const InterfaceInfo[]) { + { TYPE_FW_CFG_DATA_GENERATOR_INTERFACE }, + { } + } }; static const TypeInfo cxl_interface_info = { @@ -239,7 +310,7 @@ static const TypeInfo conventional_pci_interface_info = { .parent = TYPE_INTERFACE, }; -static void pcie_bus_class_init(ObjectClass *klass, void *data) +static void pcie_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -365,6 +436,84 @@ static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg) attrs, NULL); } +/* + * Register and track a PM capability. If wmask is also enabled for the power + * state field of the pmcsr register, guest writes may change the device PM + * state. BAR access is only enabled while the device is in the D0 state. + * Return the capability offset or negative error code. + */ +int pci_pm_init(PCIDevice *d, uint8_t offset, Error **errp) +{ + int cap = pci_add_capability(d, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF, errp); + + if (cap < 0) { + return cap; + } + + d->pm_cap = cap; + d->cap_present |= QEMU_PCI_CAP_PM; + + return cap; +} + +static uint8_t pci_pm_state(PCIDevice *d) +{ + uint16_t pmcsr; + + if (!(d->cap_present & QEMU_PCI_CAP_PM)) { + return 0; + } + + pmcsr = pci_get_word(d->config + d->pm_cap + PCI_PM_CTRL); + + return pmcsr & PCI_PM_CTRL_STATE_MASK; +} + +/* + * Update the PM capability state based on the new value stored in config + * space respective to the old, pre-write state provided. If the new value + * is rejected (unsupported or invalid transition) restore the old value. + * Return the resulting PM state. + */ +static uint8_t pci_pm_update(PCIDevice *d, uint32_t addr, int l, uint8_t old) +{ + uint16_t pmc; + uint8_t new; + + if (!(d->cap_present & QEMU_PCI_CAP_PM) || + !range_covers_byte(addr, l, d->pm_cap + PCI_PM_CTRL)) { + return old; + } + + new = pci_pm_state(d); + if (new == old) { + return old; + } + + pmc = pci_get_word(d->config + d->pm_cap + PCI_PM_PMC); + + /* + * Transitions to D1 & D2 are only allowed if supported. Devices may + * only transition to higher D-states or to D0. + */ + if ((!(pmc & PCI_PM_CAP_D1) && new == 1) || + (!(pmc & PCI_PM_CAP_D2) && new == 2) || + (old && new && new < old)) { + pci_word_test_and_clear_mask(d->config + d->pm_cap + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK); + pci_word_test_and_set_mask(d->config + d->pm_cap + PCI_PM_CTRL, + old); + trace_pci_pm_bad_transition(d->name, pci_dev_bus_num(d), + PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), + old, new); + return old; + } + + trace_pci_pm_transition(d->name, pci_dev_bus_num(d), PCI_SLOT(d->devfn), + PCI_FUNC(d->devfn), old, new); + return new; +} + static void pci_reset_regions(PCIDevice *dev) { int r; @@ -389,6 +538,10 @@ static void pci_reset_regions(PCIDevice *dev) static void pci_do_device_reset(PCIDevice *dev) { + if ((dev->cap_present & QEMU_PCI_SKIP_RESET_ON_CPR) && cpr_is_incoming()) { + return; + } + pci_device_deassert_intx(dev); assert(dev->irq_state == 0); @@ -404,6 +557,11 @@ static void pci_do_device_reset(PCIDevice *dev) pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); dev->config[PCI_CACHE_LINE_SIZE] = 0x0; + /* Default PM state is D0 */ + if (dev->cap_present & QEMU_PCI_CAP_PM) { + pci_word_test_and_clear_mask(dev->config + dev->pm_cap + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK); + } pci_reset_regions(dev); pci_update_mappings(dev); @@ -657,9 +815,8 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, pci_bridge_update_mappings(PCI_BRIDGE(s)); } - memory_region_set_enabled(&s->bus_master_enable_region, - pci_get_word(s->config + PCI_COMMAND) - & PCI_COMMAND_MASTER); + pci_set_master(s, pci_get_word(s->config + PCI_COMMAND) + & PCI_COMMAND_MASTER); g_free(config); return 0; @@ -733,10 +890,17 @@ static bool migrate_is_not_pcie(void *opaque, int version_id) return !pci_is_express((PCIDevice *)opaque); } +static int pci_post_load(void *opaque, int version_id) +{ + pcie_sriov_pf_post_load(opaque); + return 0; +} + const VMStateDescription vmstate_pci_device = { .name = "PCIDevice", .version_id = 2, .minimum_version_id = 1, + .post_load = pci_post_load, .fields = (const VMStateField[]) { VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, @@ -952,13 +1116,8 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; } - /* - * With SR/IOV and ARI, a device at function 0 need not be a multifunction - * device, as it may just be a VF that ended up with function 0 in - * the legacy PCI interpretation. Avoid failing in such cases: - */ - if (pci_is_vf(dev) && - dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + /* SR/IOV is not handled here. */ + if (pci_is_vf(dev)) { return; } @@ -991,7 +1150,8 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) } /* function 0 indicates single function, so function > 0 must be NULL */ for (func = 1; func < PCI_FUNC_MAX; ++func) { - if (bus->devices[PCI_DEVFN(slot, func)]) { + PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)]; + if (device && !pci_is_vf(device)) { error_setg(errp, "PCI: %x.0 indicates single function, " "but %x.%x is already populated.", slot, slot, func); @@ -1179,14 +1339,15 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCI_SLOT(devfn), PCI_FUNC(devfn), name, bus->devices[devfn]->name, bus->devices[devfn]->qdev.id); return NULL; - } /* - * Populating function 0 triggers a scan from the guest that - * exposes other non-zero functions. Hence we need to ensure that - * function 0 wasn't added yet. - */ - else if (dev->hotplugged && - !pci_is_vf(pci_dev) && - pci_get_function_0(pci_dev)) { + } + + /* + * Populating function 0 triggers a scan from the guest that + * exposes other non-zero functions. Hence we need to ensure that + * function 0 wasn't added yet. + */ + if (dev->hotplugged && !pci_is_vf(pci_dev) && + pci_get_function_0(pci_dev)) { error_setg(errp, "PCI: slot %d function 0 already occupied by %s," " new func %s cannot be exposed to guest.", PCI_SLOT(pci_get_function_0(pci_dev)->devfn), @@ -1204,6 +1365,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, "bus master container", UINT64_MAX); address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_container_region, pci_dev->name); + pci_dev->bus_master_as.max_bounce_buffer_size = + pci_dev->max_bounce_buffer_size; if (phase_check(PHASE_MACHINE_READY)) { pci_init_bus_master(pci_dev); @@ -1276,6 +1439,7 @@ static void pci_qdev_unrealize(DeviceState *dev) pci_unregister_io_regions(pci_dev); pci_del_option_rom(pci_dev); + pcie_sriov_unregister_device(pci_dev); if (pc->exit) { pc->exit(pci_dev); @@ -1307,7 +1471,6 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, pcibus_t size = memory_region_size(memory); uint8_t hdr_type; - assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */ assert(region_num >= 0); assert(region_num < PCI_NUM_REGIONS); assert(is_power_of_2(size)); @@ -1318,7 +1481,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); r = &pci_dev->io_regions[region_num]; - r->addr = PCI_BAR_UNMAPPED; + assert(!r->size); r->size = size; r->type = type; r->memory = memory; @@ -1326,22 +1489,35 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, ? pci_get_bus(pci_dev)->address_space_io : pci_get_bus(pci_dev)->address_space_mem; - wmask = ~(size - 1); - if (region_num == PCI_ROM_SLOT) { - /* ROM enable bit is writable */ - wmask |= PCI_ROM_ADDRESS_ENABLE; - } - - addr = pci_bar(pci_dev, region_num); - pci_set_long(pci_dev->config + addr, type); + if (pci_is_vf(pci_dev)) { + PCIDevice *pf = pci_dev->exp.sriov_vf.pf; + assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]); - if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && - r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_set_quad(pci_dev->wmask + addr, wmask); - pci_set_quad(pci_dev->cmask + addr, ~0ULL); + r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size); + if (r->addr != PCI_BAR_UNMAPPED) { + memory_region_add_subregion_overlap(r->address_space, + r->addr, r->memory, 1); + } } else { - pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); - pci_set_long(pci_dev->cmask + addr, 0xffffffff); + r->addr = PCI_BAR_UNMAPPED; + + wmask = ~(size - 1); + if (region_num == PCI_ROM_SLOT) { + /* ROM enable bit is writable */ + wmask |= PCI_ROM_ADDRESS_ENABLE; + } + + addr = pci_bar(pci_dev, region_num); + pci_set_long(pci_dev->config + addr, type); + + if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && + r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_set_quad(pci_dev->wmask + addr, wmask); + pci_set_quad(pci_dev->cmask + addr, ~0ULL); + } else { + pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); + pci_set_long(pci_dev->cmask + addr, 0xffffffff); + } } } @@ -1430,7 +1606,11 @@ static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg, pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET); uint16_t vf_stride = pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE); - uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride; + uint32_t vf_num = d->devfn - (pf->devfn + vf_offset); + + if (vf_num) { + vf_num /= vf_stride; + } if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { new_addr = pci_get_quad(pf->config + bar); @@ -1525,7 +1705,7 @@ static void pci_update_mappings(PCIDevice *d) continue; new_addr = pci_bar_address(d, i, r->type, r->size); - if (!d->has_power) { + if (!d->enabled || pci_pm_state(d)) { new_addr = PCI_BAR_UNMAPPED; } @@ -1555,7 +1735,7 @@ static void pci_update_mappings(PCIDevice *d) pci_update_vga(d); } -static inline int pci_irq_disabled(PCIDevice *d) +int pci_irq_disabled(PCIDevice *d) { return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; } @@ -1591,6 +1771,7 @@ uint32_t pci_default_read_config(PCIDevice *d, void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) { + uint8_t new_pm_state, old_pm_state = pci_pm_state(d); int i, was_irq_disabled = pci_irq_disabled(d); uint32_t val = val_in; @@ -1603,17 +1784,21 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ } + + new_pm_state = pci_pm_update(d, addr, l, old_pm_state); + if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || - range_covers_byte(addr, l, PCI_COMMAND)) + range_covers_byte(addr, l, PCI_COMMAND) || + !!new_pm_state != !!old_pm_state) { pci_update_mappings(d); + } if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { pci_update_irq_disabled(d, was_irq_disabled); - memory_region_set_enabled(&d->bus_master_enable_region, - (pci_get_word(d->config + PCI_COMMAND) - & PCI_COMMAND_MASTER) && d->has_power); + pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) & + PCI_COMMAND_MASTER) && d->enabled); } msi_write_config(d, addr, val_in, l); @@ -2098,6 +2283,11 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp) } } + if (!pcie_sriov_register_device(pci_dev, errp)) { + pci_qdev_unrealize(DEVICE(pci_dev)); + return; + } + /* * A PCIe Downstream Port that do not have ARI Forwarding enabled must * associate only Device 0 with the device attached to the bus @@ -2269,12 +2459,12 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) /* Only a valid rom will be patched. */ rom_magic = pci_get_word(ptr); if (rom_magic != 0xaa55) { - PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); + trace_pci_bad_rom_magic(rom_magic, 0xaa55); return; } pcir_offset = pci_get_word(ptr + 0x18); if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { - PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); + trace_pci_bad_pcir_offset(pcir_offset); return; } @@ -2283,8 +2473,8 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); rom_device_id = pci_get_word(ptr + pcir_offset + 6); - PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, - vendor_id, device_id, rom_vendor_id, rom_device_id); + trace_pci_rom_and_pci_ids(pdev->romfile, vendor_id, device_id, + rom_vendor_id, rom_device_id); checksum = ptr[6]; @@ -2292,7 +2482,7 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); - PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); + trace_pci_rom_checksum_change(ptr[6], checksum); ptr[6] = checksum; pci_set_word(ptr + pcir_offset + 4, vendor_id); } @@ -2301,7 +2491,7 @@ static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) /* Patch device id and checksum (at offset 6 for etherboot roms). */ checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); - PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); + trace_pci_rom_checksum_change(ptr[6], checksum); ptr[6] = checksum; pci_set_word(ptr + pcir_offset + 6, device_id); } @@ -2352,6 +2542,14 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, return; } + if (pci_is_vf(pdev)) { + if (pdev->rom_bar > 0) { + error_setg(errp, "ROM BAR cannot be enabled for SR-IOV VF"); + } + + return; + } + if (load_file || pdev->romsize == UINT32_MAX) { path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); if (path == NULL) { @@ -2625,7 +2823,7 @@ MemoryRegion *pci_address_space_io(PCIDevice *dev) return pci_get_bus(dev)->address_space_io; } -static void pci_device_class_init(ObjectClass *klass, void *data) +static void pci_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); @@ -2633,9 +2831,13 @@ static void pci_device_class_init(ObjectClass *klass, void *data) k->unrealize = pci_qdev_unrealize; k->bus_type = TYPE_PCI_BUS; device_class_set_props(k, pci_props); + object_class_property_set_description( + klass, "x-max-bounce-buffer-size", + "Maximum buffer size allocated for bounce buffers used for mapped " + "access to indirect DMA memory"); } -static void pci_device_class_base_init(ObjectClass *klass, void *data) +static void pci_device_class_base_init(ObjectClass *klass, const void *data) { if (!object_class_is_abstract(klass)) { ObjectClass *conventional = @@ -2655,20 +2857,21 @@ static void pci_device_class_base_init(ObjectClass *klass, void *data) * For call sites which don't need aliased BDF, passing NULL to * aliased_[bus|devfn] is allowed. * + * Returns true if PCI device RID is aliased or false otherwise. + * * @piommu_bus: return root #PCIBus backed by an IOMMU for the PCI device. * * @aliased_bus: return aliased #PCIBus of the PCI device, optional. * * @aliased_devfn: return aliased devfn of the PCI device, optional. */ -static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, - PCIBus **piommu_bus, - PCIBus **aliased_bus, - int *aliased_devfn) +bool pci_device_get_iommu_bus_devfn(PCIDevice *dev, PCIBus **piommu_bus, + PCIBus **aliased_bus, int *aliased_devfn) { PCIBus *bus = pci_get_bus(dev); PCIBus *iommu_bus = bus; int devfn = dev->devfn; + bool aliased = false; while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) { PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); @@ -2705,6 +2908,20 @@ static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, devfn = parent->devfn; bus = parent_bus; } + aliased = true; + } + + /* + * When multiple PCI Express Root Buses are defined using pxb-pcie, + * the IOMMU configuration may be specific to each root bus. However, + * pxb-pcie acts as a special root complex whose parent is effectively + * the default root complex(pcie.0). Ensure that we retrieve the + * correct IOMMU ops(if any) in such cases. + */ + if (pci_bus_is_express(iommu_bus) && pci_bus_is_root(iommu_bus)) { + if (parent_bus->iommu_per_bus) { + break; + } } iommu_bus = parent_bus; @@ -2726,6 +2943,8 @@ static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, if (aliased_devfn) { *aliased_devfn = devfn; } + + return aliased; } AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) @@ -2742,6 +2961,42 @@ AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) return &address_space_memory; } +AddressSpace *pci_device_iommu_msi_address_space(PCIDevice *dev) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); + if (iommu_bus) { + if (iommu_bus->iommu_ops->get_msi_address_space) { + return iommu_bus->iommu_ops->get_msi_address_space(bus, + iommu_bus->iommu_opaque, devfn); + } else { + return iommu_bus->iommu_ops->get_address_space(bus, + iommu_bus->iommu_opaque, devfn); + } + } + return &address_space_memory; +} + +int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n, + IOMMUNotify fn, void *opaque) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) { + iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque, + devfn, n, fn, opaque); + return 0; + } + + return -ENODEV; +} + bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, Error **errp) { @@ -2773,6 +3028,181 @@ void pci_device_unset_iommu_device(PCIDevice *dev) } } +uint64_t pci_device_get_viommu_flags(PCIDevice *dev) +{ + PCIBus *iommu_bus; + + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); + if (iommu_bus && iommu_bus->iommu_ops->get_viommu_flags) { + return iommu_bus->iommu_ops->get_viommu_flags(iommu_bus->iommu_opaque); + } + return 0; +} + +int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req, + bool exec_req, hwaddr addr, bool lpig, + uint16_t prgi, bool is_read, bool is_write) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + if (!dev->is_master || + ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { + return -EPERM; + } + + if (!pcie_pri_enabled(dev)) { + return -EPERM; + } + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) { + return iommu_bus->iommu_ops->pri_request_page(bus, + iommu_bus->iommu_opaque, + devfn, pasid, priv_req, + exec_req, addr, lpig, prgi, + is_read, is_write); + } + + return -ENODEV; +} + +int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUPRINotifier *notifier) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + if (!dev->is_master || + ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { + return -EPERM; + } + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) { + iommu_bus->iommu_ops->pri_register_notifier(bus, + iommu_bus->iommu_opaque, + devfn, pasid, notifier); + return 0; + } + + return -ENODEV; +} + +void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) { + iommu_bus->iommu_ops->pri_unregister_notifier(bus, + iommu_bus->iommu_opaque, + devfn, pasid); + } +} + +ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid, + bool priv_req, bool exec_req, + hwaddr addr, size_t length, + bool no_write, IOMMUTLBEntry *result, + size_t result_length, + uint32_t *err_count) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + if (!dev->is_master || + ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev))) { + return -EPERM; + } + + if (result_length == 0) { + return -ENOSPC; + } + + if (!pcie_ats_enabled(dev)) { + return -EPERM; + } + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) { + return iommu_bus->iommu_ops->ats_request_translation(bus, + iommu_bus->iommu_opaque, + devfn, pasid, priv_req, + exec_req, addr, length, + no_write, result, + result_length, err_count); + } + + return -ENODEV; +} + +int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUNotifier *n) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) { + return -EPERM; + } + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) { + iommu_bus->iommu_ops->register_iotlb_notifier(bus, + iommu_bus->iommu_opaque, devfn, + pasid, n); + return 0; + } + + return -ENODEV; +} + +int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUNotifier *n) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + if ((pasid != PCI_NO_PASID) && !pcie_pasid_enabled(dev)) { + return -EPERM; + } + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) { + iommu_bus->iommu_ops->unregister_iotlb_notifier(bus, + iommu_bus->iommu_opaque, + devfn, pasid, n); + return 0; + } + + return -ENODEV; +} + +int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width, + uint32_t *min_page_size) +{ + PCIBus *bus; + PCIBus *iommu_bus; + int devfn; + + pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) { + iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque, + addr_width, min_page_size); + return 0; + } + + return -ENODEV; +} + void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) { /* @@ -2786,6 +3216,24 @@ void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) bus->iommu_opaque = opaque; } +/* + * Similar to pci_setup_iommu(), but sets iommu_per_bus to true, + * indicating that the IOMMU is specific to this bus. This is used by + * IOMMU implementations that are tied to a specific PCIe root complex. + * + * In QEMU, pxb-pcie behaves as a special root complex whose parent is + * effectively the default root complex (pcie.0). The iommu_per_bus + * is checked in pci_device_get_iommu_bus_devfn() to ensure the correct + * IOMMU ops are returned, avoiding the use of the parent’s IOMMU when + * it's not appropriate. + */ +void pci_setup_iommu_per_bus(PCIBus *bus, const PCIIOMMUOps *ops, + void *opaque) +{ + pci_setup_iommu(bus, ops, opaque); + bus->iommu_per_bus = true; +} + static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) { Range *range = opaque; @@ -2886,16 +3334,30 @@ MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) void pci_set_power(PCIDevice *d, bool state) { - if (d->has_power == state) { + /* + * Don't change the enabled state of VFs when powering on/off the device. + * + * When powering on, VFs must not be enabled immediately but they must + * wait until the guest configures SR-IOV. + * When powering off, their corresponding PFs will be reset and disable + * VFs. + */ + if (!pci_is_vf(d)) { + pci_set_enabled(d, state); + } +} + +void pci_set_enabled(PCIDevice *d, bool state) +{ + if (d->enabled == state) { return; } - d->has_power = state; + d->enabled = state; pci_update_mappings(d); - memory_region_set_enabled(&d->bus_master_enable_region, - (pci_get_word(d->config + PCI_COMMAND) - & PCI_COMMAND_MASTER) && d->has_power); - if (!d->has_power) { + pci_set_master(d, (pci_get_word(d->config + PCI_COMMAND) + & PCI_COMMAND_MASTER) && d->enabled); + if (qdev_is_realized(&d->qdev)) { pci_device_reset(d); } } diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index 6a4e38856dd..76255c4cd89 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -380,9 +380,12 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; sec_bus->address_space_mem = &br->address_space_mem; memory_region_init(&br->address_space_mem, OBJECT(br), "pci_bridge_pci", UINT64_MAX); + address_space_init(&br->as_mem, &br->address_space_mem, + "pci_bridge_pci_mem"); sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 4 * GiB); + address_space_init(&br->as_io, &br->address_space_io, "pci_bridge_pci_io"); pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); @@ -399,6 +402,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev) PCIBridge *s = PCI_BRIDGE(pci_dev); assert(QLIST_EMPTY(&s->sec_bus.child)); QLIST_REMOVE(&s->sec_bus, sibling); + address_space_destroy(&s->as_mem); + address_space_destroy(&s->as_io); pci_bridge_region_del(s, &s->windows); pci_bridge_region_cleanup(s, &s->windows); /* object_unparent() is called automatically during device deletion */ @@ -472,13 +477,12 @@ int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, return 0; } -static Property pci_bridge_properties[] = { +static const Property pci_bridge_properties[] = { DEFINE_PROP_BOOL("x-pci-express-writeable-slt-bug", PCIBridge, pcie_writeable_slt_bug, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pci_bridge_class_init(ObjectClass *klass, void *data) +static void pci_bridge_class_init(ObjectClass *klass, const void *data) { AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass); DeviceClass *k = DEVICE_CLASS(klass); @@ -493,7 +497,7 @@ static const TypeInfo pci_bridge_type_info = { .instance_size = sizeof(PCIBridge), .class_init = pci_bridge_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index dfe6fe61840..7179d99178b 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -86,7 +86,7 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr, * allowing direct removal of unexposed functions. */ if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || - !pci_dev->has_power || is_pci_dev_ejected(pci_dev)) { + !pci_dev->enabled || is_pci_dev_ejected(pci_dev)) { return; } @@ -111,7 +111,7 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr, * allowing direct removal of unexposed functions. */ if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || - !pci_dev->has_power || is_pci_dev_ejected(pci_dev)) { + !pci_dev->enabled || is_pci_dev_ejected(pci_dev)) { return ~0x0; } @@ -217,12 +217,6 @@ const MemoryRegionOps pci_host_data_le_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -const MemoryRegionOps pci_host_data_be_ops = { - .read = pci_host_data_read, - .write = pci_host_data_write, - .endianness = DEVICE_BIG_ENDIAN, -}; - static bool pci_host_needed(void *opaque) { PCIHostState *s = opaque; @@ -240,14 +234,13 @@ const VMStateDescription vmstate_pcihost = { } }; -static Property pci_host_properties_common[] = { +static const Property pci_host_properties_common[] = { DEFINE_PROP_BOOL("x-config-reg-migration-enabled", PCIHostState, mig_enabled, true), DEFINE_PROP_BOOL(PCI_HOST_BYPASS_IOMMU, PCIHostState, bypass_iommu, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pci_host_class_init(ObjectClass *klass, void *data) +static void pci_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, pci_host_properties_common); diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 4b2f0805c6e..eaeb68894e6 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -86,7 +86,13 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) * Specification, Revision 1.1., or subsequent PCI Express Base * Specification revisions. */ - pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER); + uint32_t devcap = PCI_EXP_DEVCAP_RBER; + + if (dev->cap_present & QEMU_PCIE_EXT_TAG) { + devcap = PCI_EXP_DEVCAP_RBER | PCI_EXP_DEVCAP_EXT_TAG; + } + + pci_set_long(exp_cap + PCI_EXP_DEVCAP, devcap); pci_set_long(exp_cap + PCI_EXP_LNKCAP, (port << PCI_EXP_LNKCAP_PN_SHIFT) | @@ -105,46 +111,18 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) pci_set_word(cmask + PCI_EXP_LNKSTA, 0); } -static void pcie_cap_fill_slot_lnk(PCIDevice *dev) +/* Includes setting the target speed default */ +static void pcie_cap_fill_lnk(uint8_t *exp_cap, PCIExpLinkWidth width, + PCIExpLinkSpeed speed) { - PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); - uint8_t *exp_cap = dev->config + dev->exp.exp_cap; - - /* Skip anything that isn't a PCIESlot */ - if (!s) { - return; - } - /* Clear and fill LNKCAP from what was configured above */ pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - QEMU_PCI_EXP_LNKCAP_MLW(s->width) | - QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); - - /* - * Link bandwidth notification is required for all root ports and - * downstream ports supporting links wider than x1 or multiple link - * speeds. - */ - if (s->width > QEMU_PCI_EXP_LNK_X1 || - s->speed > QEMU_PCI_EXP_LNK_2_5GT) { - pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - PCI_EXP_LNKCAP_LBNC); - } - - if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { - /* - * Hot-plug capable downstream ports and downstream ports supporting - * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC - * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which - * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also - * technically implement this, but it's not done here for compatibility. - */ - pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - PCI_EXP_LNKCAP_DLLLARC); - /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */ + QEMU_PCI_EXP_LNKCAP_MLW(width) | + QEMU_PCI_EXP_LNKCAP_MLS(speed)); + if (speed > QEMU_PCI_EXP_LNK_2_5GT) { /* * Target Link Speed defaults to the highest link speed supported by * the component. 2.5GT/s devices are permitted to hardwire to zero. @@ -152,7 +130,7 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2, PCI_EXP_LNKCTL2_TLS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2, - QEMU_PCI_EXP_LNKCAP_MLS(s->speed) & + QEMU_PCI_EXP_LNKCAP_MLS(speed) & PCI_EXP_LNKCTL2_TLS); } @@ -161,27 +139,82 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) * actually a reference to the highest bit supported in this register. * We assume the device supports all link speeds. */ - if (s->speed > QEMU_PCI_EXP_LNK_5GT) { + if (speed > QEMU_PCI_EXP_LNK_5GT) { pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_2_5GB | PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_8_0GB); - if (s->speed > QEMU_PCI_EXP_LNK_8GT) { + if (speed > QEMU_PCI_EXP_LNK_8GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_16_0GB); } - if (s->speed > QEMU_PCI_EXP_LNK_16GT) { + if (speed > QEMU_PCI_EXP_LNK_16GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_32_0GB); } - if (s->speed > QEMU_PCI_EXP_LNK_32GT) { + if (speed > QEMU_PCI_EXP_LNK_32GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_64_0GB); } } } +void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width, + PCIExpLinkSpeed speed) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + + /* + * For an end point or USP need to set the current status as well + * as the capabilities. + */ + pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, + QEMU_PCI_EXP_LNKSTA_NLW(width) | + QEMU_PCI_EXP_LNKSTA_CLS(speed)); + + pcie_cap_fill_lnk(exp_cap, width, speed); +} + +static void pcie_cap_fill_slot_lnk(PCIDevice *dev) +{ + PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + + /* Skip anything that isn't a PCIESlot */ + if (!s) { + return; + } + + /* + * Link bandwidth notification is required for all root ports and + * downstream ports supporting links wider than x1 or multiple link + * speeds. + */ + if (s->width > QEMU_PCI_EXP_LNK_X1 || + s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_LBNC); + } + + if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + /* + * Hot-plug capable downstream ports and downstream ports supporting + * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC + * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which + * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also + * technically implement this, but it's not done here for compatibility. + */ + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_DLLLARC); + /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */ + } + + pcie_cap_fill_lnk(exp_cap, s->width, s->speed); +} + int pcie_cap_init(PCIDevice *dev, uint8_t offset, uint8_t type, uint8_t port, Error **errp) @@ -1080,18 +1113,22 @@ void pcie_sync_bridge_lnk(PCIDevice *bridge_dev) if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) { lnksta &= ~PCI_EXP_LNKSTA_NLW; lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW; - } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) { - lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1); } if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) { lnksta &= ~PCI_EXP_LNKSTA_CLS; lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS; - } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) { - lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT); } } + if (!(lnksta & PCI_EXP_LNKSTA_NLW)) { + lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1); + } + + if (!(lnksta & PCI_EXP_LNKSTA_CLS)) { + lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT); + } + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta & @@ -1177,3 +1214,81 @@ void pcie_acs_reset(PCIDevice *dev) pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0); } } + +/* PASID */ +void pcie_pasid_init(PCIDevice *dev, uint16_t offset, uint8_t pasid_width, + bool exec_perm, bool priv_mod) +{ + static const uint16_t control_reg_rw_mask = 0x07; + uint16_t capability_reg; + + assert(pasid_width <= PCI_EXT_CAP_PASID_MAX_WIDTH); + + pcie_add_capability(dev, PCI_EXT_CAP_ID_PASID, PCI_PASID_VER, offset, + PCI_EXT_CAP_PASID_SIZEOF); + + capability_reg = ((uint16_t)pasid_width) << PCI_PASID_CAP_WIDTH_SHIFT; + capability_reg |= exec_perm ? PCI_PASID_CAP_EXEC : 0; + capability_reg |= priv_mod ? PCI_PASID_CAP_PRIV : 0; + pci_set_word(dev->config + offset + PCI_PASID_CAP, capability_reg); + + /* Everything is disabled by default */ + pci_set_word(dev->config + offset + PCI_PASID_CTRL, 0); + + pci_set_word(dev->wmask + offset + PCI_PASID_CTRL, control_reg_rw_mask); + + dev->exp.pasid_cap = offset; +} + +/* PRI */ +void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap, + bool prg_response_pasid_req) +{ + static const uint16_t control_reg_rw_mask = 0x3; + static const uint16_t status_reg_rw1_mask = 0x3; + static const uint32_t pr_alloc_reg_rw_mask = 0xffffffff; + uint16_t status_reg; + + status_reg = prg_response_pasid_req ? PCI_PRI_STATUS_PASID : 0; + status_reg |= PCI_PRI_STATUS_STOPPED; /* Stopped by default */ + + pcie_add_capability(dev, PCI_EXT_CAP_ID_PRI, PCI_PRI_VER, offset, + PCI_EXT_CAP_PRI_SIZEOF); + /* Disabled by default */ + + pci_set_word(dev->config + offset + PCI_PRI_STATUS, status_reg); + pci_set_long(dev->config + offset + PCI_PRI_MAX_REQ, outstanding_pr_cap); + + pci_set_word(dev->wmask + offset + PCI_PRI_CTRL, control_reg_rw_mask); + pci_set_word(dev->w1cmask + offset + PCI_PRI_STATUS, status_reg_rw1_mask); + pci_set_long(dev->wmask + offset + PCI_PRI_ALLOC_REQ, pr_alloc_reg_rw_mask); + + dev->exp.pri_cap = offset; +} + +bool pcie_pri_enabled(const PCIDevice *dev) +{ + if (!pci_is_express(dev) || !dev->exp.pri_cap) { + return false; + } + return (pci_get_word(dev->config + dev->exp.pri_cap + PCI_PRI_CTRL) & + PCI_PRI_CTRL_ENABLE) != 0; +} + +bool pcie_pasid_enabled(const PCIDevice *dev) +{ + if (!pci_is_express(dev) || !dev->exp.pasid_cap) { + return false; + } + return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) & + PCI_PASID_CTRL_ENABLE) != 0; +} + +bool pcie_ats_enabled(const PCIDevice *dev) +{ + if (!pci_is_express(dev) || !dev->exp.ats_cap) { + return false; + } + return (pci_get_word(dev->config + dev->exp.ats_cap + PCI_ATS_CTRL) & + PCI_ATS_CTRL_ENABLE) != 0; +} diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c index 20ff2b39e88..f3841a26568 100644 --- a/hw/pci/pcie_port.c +++ b/hw/pci/pcie_port.c @@ -92,16 +92,6 @@ static PCIESlot *pcie_chassis_find_slot_with_chassis(struct PCIEChassis *c, return s; } -PCIESlot *pcie_chassis_find_slot(uint8_t chassis_number, uint16_t slot) -{ - struct PCIEChassis *c; - c = pcie_chassis_find(chassis_number); - if (!c) { - return NULL; - } - return pcie_chassis_find_slot_with_chassis(c, slot); -} - int pcie_chassis_add_slot(struct PCIESlot *slot) { struct PCIEChassis *c; @@ -121,15 +111,14 @@ void pcie_chassis_del_slot(PCIESlot *s) QLIST_REMOVE(s, next); } -static Property pcie_port_props[] = { +static const Property pcie_port_props[] = { DEFINE_PROP_UINT8("port", PCIEPort, port, 0), DEFINE_PROP_UINT16("aer_log_max", PCIEPort, parent_obj.parent_obj.exp.aer_log.log_max, PCIE_AER_LOG_MAX_DEFAULT), - DEFINE_PROP_END_OF_LIST() }; -static void pcie_port_class_init(ObjectClass *oc, void *data) +static void pcie_port_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -199,7 +188,7 @@ int pcie_count_ds_ports(PCIBus *bus) return dsp_count; } -static bool pcie_slot_is_hotpluggbale_bus(HotplugHandler *plug_handler, +static bool pcie_slot_is_hotpluggable_bus(HotplugHandler *plug_handler, BusState *bus) { PCIESlot *s = PCIE_SLOT(bus->parent); @@ -214,16 +203,15 @@ static const TypeInfo pcie_port_type_info = { .class_init = pcie_port_class_init, }; -static Property pcie_slot_props[] = { +static const Property pcie_slot_props[] = { DEFINE_PROP_UINT8("chassis", PCIESlot, chassis, 0), DEFINE_PROP_UINT16("slot", PCIESlot, slot, 0), DEFINE_PROP_BOOL("hotplug", PCIESlot, hotplug, true), DEFINE_PROP_BOOL("x-do-not-expose-native-hotplug-cap", PCIESlot, hide_native_hotplug_cap, false), - DEFINE_PROP_END_OF_LIST() }; -static void pcie_slot_class_init(ObjectClass *oc, void *data) +static void pcie_slot_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -233,7 +221,7 @@ static void pcie_slot_class_init(ObjectClass *oc, void *data) hc->plug = pcie_cap_slot_plug_cb; hc->unplug = pcie_cap_slot_unplug_cb; hc->unplug_request = pcie_cap_slot_unplug_request_cb; - hc->is_hotpluggable_bus = pcie_slot_is_hotpluggbale_bus; + hc->is_hotpluggable_bus = pcie_slot_is_hotpluggable_bus; } static const TypeInfo pcie_slot_type_info = { @@ -242,7 +230,7 @@ static const TypeInfo pcie_slot_type_info = { .instance_size = sizeof(PCIESlot), .abstract = true, .class_init = pcie_slot_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c index e9b23221d71..8a4bf0d6f7c 100644 --- a/hw/pci/pcie_sriov.c +++ b/hw/pci/pcie_sriov.c @@ -15,28 +15,104 @@ #include "hw/pci/pcie.h" #include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" -#include "qemu/error-report.h" #include "qemu/range.h" #include "qapi/error.h" #include "trace.h" -static PCIDevice *register_vf(PCIDevice *pf, int devfn, - const char *name, uint16_t vf_num); -static void unregister_vfs(PCIDevice *dev); +static GHashTable *pfs; -void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, - const char *vfname, uint16_t vf_dev_id, - uint16_t init_vfs, uint16_t total_vfs, - uint16_t vf_offset, uint16_t vf_stride) +static void unparent_vfs(PCIDevice *dev, uint16_t total_vfs) +{ + for (uint16_t i = 0; i < total_vfs; i++) { + PCIDevice *vf = dev->exp.sriov_pf.vf[i]; + object_unparent(OBJECT(vf)); + object_unref(OBJECT(vf)); + } + g_free(dev->exp.sriov_pf.vf); + dev->exp.sriov_pf.vf = NULL; +} + +static void register_vfs(PCIDevice *dev) +{ + uint16_t num_vfs; + uint16_t i; + uint16_t sriov_cap = dev->exp.sriov_cap; + + assert(sriov_cap > 0); + num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); + + trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn), num_vfs); + for (i = 0; i < num_vfs; i++) { + pci_set_enabled(dev->exp.sriov_pf.vf[i], true); + } + + pci_set_word(dev->wmask + sriov_cap + PCI_SRIOV_NUM_VF, 0); +} + +static void unregister_vfs(PCIDevice *dev) { + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + uint16_t i; + + trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn)); + for (i = 0; i < pci_get_word(cfg + PCI_SRIOV_TOTAL_VF); i++) { + pci_set_enabled(dev->exp.sriov_pf.vf[i], false); + } + + pci_set_word(dev->wmask + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0xffff); +} + +static void consume_config(PCIDevice *dev) +{ + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + + if (pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) { + register_vfs(dev); + } else { + uint8_t *wmask = dev->wmask + dev->exp.sriov_cap; + uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF); + uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI; + + unregister_vfs(dev); + + if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) { + wmask_val |= PCI_SRIOV_CTRL_VFE; + } + + pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val); + } +} + +static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset, + uint16_t vf_dev_id, uint16_t init_vfs, + uint16_t total_vfs, uint16_t vf_offset, + uint16_t vf_stride, Error **errp) +{ + int32_t devfn = dev->devfn + vf_offset; uint8_t *cfg = dev->config + offset; uint8_t *wmask; + if (!pci_is_express(dev)) { + error_setg(errp, "PCI Express is required for SR-IOV PF"); + return false; + } + + if (pci_is_vf(dev)) { + error_setg(errp, "a device cannot be a SR-IOV PF and a VF at the same time"); + return false; + } + + if (total_vfs && + (uint32_t)devfn + (uint32_t)(total_vfs - 1) * vf_stride >= PCI_DEVFN_MAX) { + error_setg(errp, "VF addr overflows"); + return false; + } + pcie_add_capability(dev, PCI_EXT_CAP_ID_SRIOV, 1, offset, PCI_EXT_CAP_SRIOV_SIZEOF); dev->exp.sriov_cap = offset; - dev->exp.sriov_pf.num_vfs = 0; - dev->exp.sriov_pf.vfname = g_strdup(vfname); dev->exp.sriov_pf.vf = NULL; pci_set_word(cfg + PCI_SRIOV_VF_OFFSET, vf_offset); @@ -69,13 +145,74 @@ void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, pci_set_word(wmask + PCI_SRIOV_SYS_PGSIZE, 0x553); qdev_prop_set_bit(&dev->qdev, "multifunction", true); + + return true; +} + +bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, + const char *vfname, uint16_t vf_dev_id, + uint16_t init_vfs, uint16_t total_vfs, + uint16_t vf_offset, uint16_t vf_stride, + Error **errp) +{ + BusState *bus = qdev_get_parent_bus(&dev->qdev); + int32_t devfn = dev->devfn + vf_offset; + + if (pfs && g_hash_table_contains(pfs, dev->qdev.id)) { + error_setg(errp, "attaching user-created SR-IOV VF unsupported"); + return false; + } + + if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, init_vfs, + total_vfs, vf_offset, vf_stride, errp)) { + return false; + } + + dev->exp.sriov_pf.vf = g_new(PCIDevice *, total_vfs); + + for (uint16_t i = 0; i < total_vfs; i++) { + PCIDevice *vf = pci_new(devfn, vfname); + vf->exp.sriov_vf.pf = dev; + vf->exp.sriov_vf.vf_number = i; + + if (!qdev_realize(&vf->qdev, bus, errp)) { + object_unparent(OBJECT(vf)); + object_unref(vf); + unparent_vfs(dev, i); + return false; + } + + /* set vid/did according to sr/iov spec - they are not used */ + pci_config_set_vendor_id(vf->config, 0xffff); + pci_config_set_device_id(vf->config, 0xffff); + + dev->exp.sriov_pf.vf[i] = vf; + devfn += vf_stride; + } + + return true; } void pcie_sriov_pf_exit(PCIDevice *dev) { - unregister_vfs(dev); - g_free((char *)dev->exp.sriov_pf.vfname); - dev->exp.sriov_pf.vfname = NULL; + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + + if (dev->exp.sriov_pf.vf_user_created) { + uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID); + uint16_t total_vfs = pci_get_word(dev->config + PCI_SRIOV_TOTAL_VF); + uint16_t vf_dev_id = pci_get_word(dev->config + PCI_SRIOV_VF_DID); + + unregister_vfs(dev); + + for (uint16_t i = 0; i < total_vfs; i++) { + dev->exp.sriov_pf.vf[i]->exp.sriov_vf.pf = NULL; + + pci_config_set_vendor_id(dev->exp.sriov_pf.vf[i]->config, ven_id); + pci_config_set_device_id(dev->exp.sriov_pf.vf[i]->config, vf_dev_id); + } + } else { + unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)); + } } void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, @@ -108,113 +245,179 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, MemoryRegion *memory) { - PCIIORegion *r; - PCIBus *bus = pci_get_bus(dev); uint8_t type; - pcibus_t size = memory_region_size(memory); - assert(pci_is_vf(dev)); /* PFs must use pci_register_bar */ - assert(region_num >= 0); - assert(region_num < PCI_NUM_REGIONS); + assert(dev->exp.sriov_vf.pf); type = dev->exp.sriov_vf.pf->exp.sriov_pf.vf_bar_type[region_num]; - if (!is_power_of_2(size)) { - error_report("%s: PCI region size must be a power" - " of two - type=0x%x, size=0x%"FMT_PCIBUS, - __func__, type, size); - exit(1); - } + return pci_register_bar(dev, region_num, type, memory); +} - r = &dev->io_regions[region_num]; - r->memory = memory; - r->address_space = - type & PCI_BASE_ADDRESS_SPACE_IO - ? bus->address_space_io - : bus->address_space_mem; - r->size = size; - r->type = type; - - r->addr = pci_bar_address(dev, region_num, r->type, r->size); - if (r->addr != PCI_BAR_UNMAPPED) { - memory_region_add_subregion_overlap(r->address_space, - r->addr, r->memory, 1); - } +static gint compare_vf_devfns(gconstpointer a, gconstpointer b) +{ + return (*(PCIDevice **)a)->devfn - (*(PCIDevice **)b)->devfn; } -static PCIDevice *register_vf(PCIDevice *pf, int devfn, const char *name, - uint16_t vf_num) +int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev, + uint16_t offset, + Error **errp) { - PCIDevice *dev = pci_new(devfn, name); - dev->exp.sriov_vf.pf = pf; - dev->exp.sriov_vf.vf_number = vf_num; - PCIBus *bus = pci_get_bus(pf); - Error *local_err = NULL; - - qdev_realize(&dev->qdev, &bus->qbus, &local_err); - if (local_err) { - error_report_err(local_err); - return NULL; + GPtrArray *pf; + PCIDevice **vfs; + BusState *bus = qdev_get_parent_bus(DEVICE(dev)); + uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID); + uint16_t size = PCI_EXT_CAP_SRIOV_SIZEOF; + uint16_t vf_dev_id; + uint16_t vf_offset; + uint16_t vf_stride; + uint16_t i; + + if (!pfs || !dev->qdev.id) { + return 0; + } + + pf = g_hash_table_lookup(pfs, dev->qdev.id); + if (!pf) { + return 0; + } + + if (pf->len > UINT16_MAX) { + error_setg(errp, "too many VFs"); + return -1; + } + + g_ptr_array_sort(pf, compare_vf_devfns); + vfs = (void *)pf->pdata; + + if (vfs[0]->devfn <= dev->devfn) { + error_setg(errp, "a VF function number is less than the PF function number"); + return -1; + } + + vf_dev_id = pci_get_word(vfs[0]->config + PCI_DEVICE_ID); + vf_offset = vfs[0]->devfn - dev->devfn; + vf_stride = pf->len < 2 ? 0 : vfs[1]->devfn - vfs[0]->devfn; + + for (i = 0; i < pf->len; i++) { + if (bus != qdev_get_parent_bus(&vfs[i]->qdev)) { + error_setg(errp, "SR-IOV VF parent bus mismatches with PF"); + return -1; + } + + if (ven_id != pci_get_word(vfs[i]->config + PCI_VENDOR_ID)) { + error_setg(errp, "SR-IOV VF vendor ID mismatches with PF"); + return -1; + } + + if (vf_dev_id != pci_get_word(vfs[i]->config + PCI_DEVICE_ID)) { + error_setg(errp, "inconsistent SR-IOV VF device IDs"); + return -1; + } + + for (size_t j = 0; j < PCI_NUM_REGIONS; j++) { + if (vfs[i]->io_regions[j].size != vfs[0]->io_regions[j].size || + vfs[i]->io_regions[j].type != vfs[0]->io_regions[j].type) { + error_setg(errp, "inconsistent SR-IOV BARs"); + return -1; + } + } + + if (vfs[i]->devfn - vfs[0]->devfn != vf_stride * i) { + error_setg(errp, "inconsistent SR-IOV stride"); + return -1; + } } - /* set vid/did according to sr/iov spec - they are not used */ - pci_config_set_vendor_id(dev->config, 0xffff); - pci_config_set_device_id(dev->config, 0xffff); + if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, pf->len, + pf->len, vf_offset, vf_stride, errp)) { + return -1; + } + + if (!pcie_find_capability(dev, PCI_EXT_CAP_ID_ARI)) { + pcie_ari_init(dev, offset + size); + size += PCI_ARI_SIZEOF; + } - return dev; + for (i = 0; i < pf->len; i++) { + vfs[i]->exp.sriov_vf.pf = dev; + vfs[i]->exp.sriov_vf.vf_number = i; + + /* set vid/did according to sr/iov spec - they are not used */ + pci_config_set_vendor_id(vfs[i]->config, 0xffff); + pci_config_set_device_id(vfs[i]->config, 0xffff); + } + + dev->exp.sriov_pf.vf = vfs; + dev->exp.sriov_pf.vf_user_created = true; + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + PCIIORegion *region = &vfs[0]->io_regions[i]; + + if (region->size) { + pcie_sriov_pf_init_vf_bar(dev, i, region->type, region->size); + } + } + + return size; } -static void register_vfs(PCIDevice *dev) +bool pcie_sriov_register_device(PCIDevice *dev, Error **errp) { - uint16_t num_vfs; - uint16_t i; - uint16_t sriov_cap = dev->exp.sriov_cap; - uint16_t vf_offset = - pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_OFFSET); - uint16_t vf_stride = - pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_STRIDE); - int32_t devfn = dev->devfn + vf_offset; - - assert(sriov_cap > 0); - num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF); - if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) { - return; + if (!dev->exp.sriov_pf.vf && dev->qdev.id && + pfs && g_hash_table_contains(pfs, dev->qdev.id)) { + error_setg(errp, "attaching user-created SR-IOV VF unsupported"); + return false; } - dev->exp.sriov_pf.vf = g_new(PCIDevice *, num_vfs); + if (dev->sriov_pf) { + PCIDevice *pci_pf; + GPtrArray *pf; - trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn), num_vfs); - for (i = 0; i < num_vfs; i++) { - dev->exp.sriov_pf.vf[i] = register_vf(dev, devfn, - dev->exp.sriov_pf.vfname, i); - if (!dev->exp.sriov_pf.vf[i]) { - num_vfs = i; - break; + if (!PCI_DEVICE_GET_CLASS(dev)->sriov_vf_user_creatable) { + error_setg(errp, "user cannot create SR-IOV VF with this device type"); + return false; } - devfn += vf_stride; + + if (!pci_is_express(dev)) { + error_setg(errp, "PCI Express is required for SR-IOV VF"); + return false; + } + + if (!pci_qdev_find_device(dev->sriov_pf, &pci_pf)) { + error_setg(errp, "PCI device specified as SR-IOV PF already exists"); + return false; + } + + if (!pfs) { + pfs = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); + } + + pf = g_hash_table_lookup(pfs, dev->sriov_pf); + if (!pf) { + pf = g_ptr_array_new(); + g_hash_table_insert(pfs, g_strdup(dev->sriov_pf), pf); + } + + g_ptr_array_add(pf, dev); } - dev->exp.sriov_pf.num_vfs = num_vfs; + + return true; } -static void unregister_vfs(PCIDevice *dev) +void pcie_sriov_unregister_device(PCIDevice *dev) { - uint16_t num_vfs = dev->exp.sriov_pf.num_vfs; - uint16_t i; + if (dev->sriov_pf && pfs) { + GPtrArray *pf = g_hash_table_lookup(pfs, dev->sriov_pf); - trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn), num_vfs); - for (i = 0; i < num_vfs; i++) { - Error *err = NULL; - PCIDevice *vf = dev->exp.sriov_pf.vf[i]; - if (!object_property_set_bool(OBJECT(vf), "realized", false, &err)) { - error_reportf_err(err, "Failed to unplug: "); + if (pf) { + g_ptr_array_remove_fast(pf, dev); + + if (!pf->len) { + g_hash_table_remove(pfs, dev->sriov_pf); + g_ptr_array_free(pf, FALSE); + } } - object_unparent(OBJECT(vf)); - object_unref(OBJECT(vf)); } - g_free(dev->exp.sriov_pf.vf); - dev->exp.sriov_pf.vf = NULL; - dev->exp.sriov_pf.num_vfs = 0; } void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, @@ -234,16 +437,13 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, trace_sriov_config_write(dev->name, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), off, val, len); - if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { - if (dev->exp.sriov_pf.num_vfs) { - if (!(val & PCI_SRIOV_CTRL_VFE)) { - unregister_vfs(dev); - } - } else { - if (val & PCI_SRIOV_CTRL_VFE) { - register_vfs(dev); - } - } + consume_config(dev); +} + +void pcie_sriov_pf_post_load(PCIDevice *dev) +{ + if (dev->exp.sriov_cap) { + consume_config(dev); } } @@ -260,6 +460,8 @@ void pcie_sriov_pf_reset(PCIDevice *dev) unregister_vfs(dev); pci_set_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF, 0); + pci_set_word(dev->wmask + sriov_cap + PCI_SRIOV_CTRL, + PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI); /* * Default is to use 4K pages, software can modify it @@ -294,7 +496,7 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize) uint16_t pcie_sriov_vf_number(PCIDevice *dev) { - assert(pci_is_vf(dev)); + assert(dev->exp.sriov_vf.pf); return dev->exp.sriov_vf.vf_number; } @@ -306,7 +508,7 @@ PCIDevice *pcie_sriov_get_pf(PCIDevice *dev) PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n) { assert(!pci_is_vf(dev)); - if (n < dev->exp.sriov_pf.num_vfs) { + if (n < pcie_sriov_num_vfs(dev)) { return dev->exp.sriov_pf.vf[n]; } return NULL; @@ -314,5 +516,10 @@ PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n) uint16_t pcie_sriov_num_vfs(PCIDevice *dev) { - return dev->exp.sriov_pf.num_vfs; + uint16_t sriov_cap = dev->exp.sriov_cap; + uint8_t *cfg = dev->config + sriov_cap; + + return sriov_cap && + (pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) ? + pci_get_word(cfg + PCI_SRIOV_NUM_VF) : 0; } diff --git a/hw/pci/trace-events b/hw/pci/trace-events index 19643aa8c6b..02c80d3ec86 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -1,9 +1,15 @@ # See docs/devel/tracing.rst for syntax documentation. # pci.c +pci_pm_bad_transition(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, uint8_t old, uint8_t new) "%s %02x:%02x.%x REJECTED PM transition D%d->D%d" +pci_pm_transition(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, uint8_t old, uint8_t new) "%s %02x:%02x.%x PM transition D%d->D%d" pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s" +pci_bad_rom_magic(uint16_t bad_rom_magic, uint16_t good_rom_magic) "Bad ROM magic number: %04"PRIX16". Should be: %04"PRIX16 +pci_bad_pcir_offset(uint16_t pcir_offset) "Bad PCIR offset 0x%"PRIx16" or signature" +pci_rom_and_pci_ids(char *romfile, uint16_t vendor_id, uint16_t device_id, uint16_t rom_vendor_id, uint16_t rom_device_id) "%s: ROM ID %04"PRIx16":%04"PRIx16" | PCI ID %04"PRIx16":%04"PRIx16 +pci_rom_checksum_change(uint8_t old_checksum, uint8_t new_checksum) "ROM checksum changed from %02"PRIx8" to %02"PRIx8 # pci_host.c pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x" @@ -14,7 +20,7 @@ msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d mask # hw/pci/pcie_sriov.c sriov_register_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: creating %d vf devs" -sriov_unregister_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: Unregistering %d vf devs" +sriov_unregister_vfs(const char *name, int slot, int function) "%s %02x:%x: Unregistering vf devs" sriov_config_write(const char *name, int slot, int fun, uint32_t offset, uint32_t val, uint32_t len) "%s %02x:%x: sriov offset 0x%x val 0x%x len %d" # pcie.c diff --git a/hw/pcmcia/Kconfig b/hw/pcmcia/Kconfig deleted file mode 100644 index 41f2df91366..00000000000 --- a/hw/pcmcia/Kconfig +++ /dev/null @@ -1,2 +0,0 @@ -config PCMCIA - bool diff --git a/hw/pcmcia/meson.build b/hw/pcmcia/meson.build deleted file mode 100644 index 04e29c109c0..00000000000 --- a/hw/pcmcia/meson.build +++ /dev/null @@ -1,2 +0,0 @@ -system_ss.add(when: 'CONFIG_PCMCIA', if_true: files('pcmcia.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c')) diff --git a/hw/pcmcia/pcmcia.c b/hw/pcmcia/pcmcia.c deleted file mode 100644 index 03d13e7d670..00000000000 --- a/hw/pcmcia/pcmcia.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * PCMCIA emulation - * - * Copyright 2013 SUSE LINUX Products GmbH - */ - -#include "qemu/osdep.h" -#include "qemu/module.h" -#include "hw/pcmcia.h" - -static const TypeInfo pcmcia_card_type_info = { - .name = TYPE_PCMCIA_CARD, - .parent = TYPE_DEVICE, - .instance_size = sizeof(PCMCIACardState), - .abstract = true, - .class_size = sizeof(PCMCIACardClass), -}; - -static void pcmcia_register_types(void) -{ - type_register_static(&pcmcia_card_type_info); -} - -type_init(pcmcia_register_types) diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c deleted file mode 100644 index e3111fdf1a1..00000000000 --- a/hw/pcmcia/pxa2xx.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Intel XScale PXA255/270 PC Card and CompactFlash Interface. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/sysbus.h" -#include "qapi/error.h" -#include "qemu/module.h" -#include "hw/pcmcia.h" -#include "hw/arm/pxa.h" - -struct PXA2xxPCMCIAState { - SysBusDevice parent_obj; - - PCMCIASocket slot; - MemoryRegion container_mem; - MemoryRegion common_iomem; - MemoryRegion attr_iomem; - MemoryRegion iomem; - - qemu_irq irq; - qemu_irq cd_irq; - - PCMCIACardState *card; -}; - -static uint64_t pxa2xx_pcmcia_common_read(void *opaque, - hwaddr offset, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - return pcc->common_read(s->card, offset); - } - - return 0; -} - -static void pxa2xx_pcmcia_common_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - pcc->common_write(s->card, offset, value); - } -} - -static uint64_t pxa2xx_pcmcia_attr_read(void *opaque, - hwaddr offset, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - return pcc->attr_read(s->card, offset); - } - - return 0; -} - -static void pxa2xx_pcmcia_attr_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - pcc->attr_write(s->card, offset, value); - } -} - -static uint64_t pxa2xx_pcmcia_io_read(void *opaque, - hwaddr offset, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - return pcc->io_read(s->card, offset); - } - - return 0; -} - -static void pxa2xx_pcmcia_io_write(void *opaque, hwaddr offset, - uint64_t value, unsigned size) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - pcc = PCMCIA_CARD_GET_CLASS(s->card); - pcc->io_write(s->card, offset, value); - } -} - -static const MemoryRegionOps pxa2xx_pcmcia_common_ops = { - .read = pxa2xx_pcmcia_common_read, - .write = pxa2xx_pcmcia_common_write, - .endianness = DEVICE_NATIVE_ENDIAN -}; - -static const MemoryRegionOps pxa2xx_pcmcia_attr_ops = { - .read = pxa2xx_pcmcia_attr_read, - .write = pxa2xx_pcmcia_attr_write, - .endianness = DEVICE_NATIVE_ENDIAN -}; - -static const MemoryRegionOps pxa2xx_pcmcia_io_ops = { - .read = pxa2xx_pcmcia_io_read, - .write = pxa2xx_pcmcia_io_write, - .endianness = DEVICE_NATIVE_ENDIAN -}; - -static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - if (!s->irq) - return; - - qemu_set_irq(s->irq, level); -} - -static void pxa2xx_pcmcia_initfn(Object *obj) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - PXA2xxPCMCIAState *s = PXA2XX_PCMCIA(obj); - - memory_region_init(&s->container_mem, obj, "container", 0x10000000); - sysbus_init_mmio(sbd, &s->container_mem); - - /* Socket I/O Memory Space */ - memory_region_init_io(&s->iomem, obj, &pxa2xx_pcmcia_io_ops, s, - "pxa2xx-pcmcia-io", 0x04000000); - memory_region_add_subregion(&s->container_mem, 0x00000000, - &s->iomem); - - /* Then next 64 MB is reserved */ - - /* Socket Attribute Memory Space */ - memory_region_init_io(&s->attr_iomem, obj, &pxa2xx_pcmcia_attr_ops, s, - "pxa2xx-pcmcia-attribute", 0x04000000); - memory_region_add_subregion(&s->container_mem, 0x08000000, - &s->attr_iomem); - - /* Socket Common Memory Space */ - memory_region_init_io(&s->common_iomem, obj, &pxa2xx_pcmcia_common_ops, s, - "pxa2xx-pcmcia-common", 0x04000000); - memory_region_add_subregion(&s->container_mem, 0x0c000000, - &s->common_iomem); - - s->slot.irq = qemu_allocate_irq(pxa2xx_pcmcia_set_irq, s, 0); - - object_property_add_link(obj, "card", TYPE_PCMCIA_CARD, - (Object **)&s->card, - NULL, /* read-only property */ - 0); -} - -/* Insert a new card into a slot */ -int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (s->slot.attached) { - return -EEXIST; - } - - if (s->cd_irq) { - qemu_irq_raise(s->cd_irq); - } - - s->card = card; - pcc = PCMCIA_CARD_GET_CLASS(s->card); - - s->slot.attached = true; - s->card->slot = &s->slot; - pcc->attach(s->card); - - return 0; -} - -/* Eject card from the slot */ -int pxa2xx_pcmcia_detach(void *opaque) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - PCMCIACardClass *pcc; - - if (!s->slot.attached) { - return -ENOENT; - } - - pcc = PCMCIA_CARD_GET_CLASS(s->card); - pcc->detach(s->card); - s->card->slot = NULL; - s->card = NULL; - - s->slot.attached = false; - - if (s->irq) { - qemu_irq_lower(s->irq); - } - if (s->cd_irq) { - qemu_irq_lower(s->cd_irq); - } - - return 0; -} - -/* Who to notify on card events */ -void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq) -{ - PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; - s->irq = irq; - s->cd_irq = cd_irq; -} - -static const TypeInfo pxa2xx_pcmcia_type_info = { - .name = TYPE_PXA2XX_PCMCIA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxPCMCIAState), - .instance_init = pxa2xx_pcmcia_initfn, -}; - -static void pxa2xx_pcmcia_register_types(void) -{ - type_register_static(&pxa2xx_pcmcia_type_info); -} - -type_init(pxa2xx_pcmcia_register_types) diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index c235519881f..ced6bbc7404 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -39,19 +39,11 @@ config POWERNV select PCI_POWERNV select PCA9552 select PCA9554 + select SERIAL_ISA select SSI select SSI_M25P80 select PNV_SPI -config PPC405 - bool - default y - depends on PPC - select M48T59 - select PFLASH_CFI02 - select PPC4XX - select SERIAL - config PPC440 bool default y @@ -62,7 +54,7 @@ config PPC440 select PCI_EXPRESS select PPC440_PCIX select PPC4XX - select SERIAL + select SERIAL_MM select FDT_PPC config PPC4XX @@ -79,7 +71,7 @@ config SAM460EX select IDE_SII3112 select M41T80 select PPC440 - select SERIAL + select SERIAL_MM select SM501 select SMBUS_EEPROM select USB_EHCI_SYSBUS @@ -162,7 +154,7 @@ config E500 select PLATFORM_BUS select PPCE500_PCI select SDHCI - select SERIAL + select SERIAL_MM select MPC_I2C select FDT_PPC select DS1338 @@ -186,7 +178,7 @@ config VIRTEX depends on PPC && FDT select PPC4XX select PFLASH_CFI01 - select SERIAL + select SERIAL_MM select XILINX select XILINX_ETHLITE select FDT_PPC diff --git a/hw/ppc/amigaone.c b/hw/ppc/amigaone.c index 900f93c15e1..12279f42bc9 100644 --- a/hw/ppc/amigaone.c +++ b/hw/ppc/amigaone.c @@ -21,12 +21,26 @@ #include "hw/ide/pci.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/ppc/ppc.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/block-backend.h" +#include "system/qtest.h" +#include "system/reset.h" #include "kvm_ppc.h" +#include "elf.h" + +#include /* for crc32 */ #define BUS_FREQ_HZ 100000000 +#define INITRD_MIN_ADDR 0x600000 +#define INIT_RAM_ADDR 0x40000000 + +#define PCI_HIGH_ADDR 0x80000000 +#define PCI_HIGH_SIZE 0x7d000000 +#define PCI_LOW_ADDR 0xfd000000 +#define PCI_LOW_SIZE 0xe0000 + +#define ARTICIA_ADDR 0xfe000000 + /* * Firmware binary available at * https://www.hyperion-entertainment.com/index.php/downloads?view=files&parent=28 @@ -41,20 +55,204 @@ /* AmigaOS calls this routine from ROM, use this if no firmware loaded */ static const char dummy_fw[] = { - 0x38, 0x00, 0x00, 0x08, /* li r0,8 */ - 0x7c, 0x09, 0x03, 0xa6, /* mtctr r0 */ - 0x54, 0x63, 0xf8, 0x7e, /* srwi r3,r3,1 */ - 0x42, 0x00, 0xff, 0xfc, /* bdnz 0x8 */ + 0x54, 0x63, 0xc2, 0x3e, /* srwi r3,r3,8 */ 0x7c, 0x63, 0x18, 0xf8, /* not r3,r3 */ 0x4e, 0x80, 0x00, 0x20, /* blr */ }; +#define NVRAM_ADDR 0xfd0e0000 +#define NVRAM_SIZE (4 * KiB) + +static const char default_env[] = + "baudrate=115200\0" + "stdout=vga\0" + "stdin=ps2kbd\0" + "bootcmd=boota; menu; run menuboot_cmd\0" + "boot1=ide\0" + "boot2=cdrom\0" + "boota_timeout=3\0" + "ide_doreset=on\0" + "pci_irqa=9\0" + "pci_irqa_select=level\0" + "pci_irqb=10\0" + "pci_irqb_select=level\0" + "pci_irqc=11\0" + "pci_irqc_select=level\0" + "pci_irqd=7\0" + "pci_irqd_select=level\0" + "a1ide_irq=1111\0" + "a1ide_xfer=FFFF\0"; +#define CRC32_DEFAULT_ENV 0xb5548481 +#define CRC32_ALL_ZEROS 0x603b0489 + +#define TYPE_A1_NVRAM "a1-nvram" +OBJECT_DECLARE_SIMPLE_TYPE(A1NVRAMState, A1_NVRAM) + +struct A1NVRAMState { + SysBusDevice parent_obj; + + MemoryRegion mr; + BlockBackend *blk; +}; + +static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned int size) +{ + /* read callback not used because of romd mode */ + g_assert_not_reached(); +} + +static void nvram_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + A1NVRAMState *s = opaque; + uint8_t *p = memory_region_get_ram_ptr(&s->mr); + + p[addr] = val; + if (s->blk && blk_pwrite(s->blk, addr, 1, &val, 0) < 0) { + error_report("%s: could not write %s", __func__, blk_name(s->blk)); + } +} + +static const MemoryRegionOps nvram_ops = { + .read = nvram_read, + .write = nvram_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void nvram_realize(DeviceState *dev, Error **errp) +{ + A1NVRAMState *s = A1_NVRAM(dev); + void *p; + uint32_t crc, *c; + + memory_region_init_rom_device(&s->mr, NULL, &nvram_ops, s, "nvram", + NVRAM_SIZE, &error_fatal); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); + c = p = memory_region_get_ram_ptr(&s->mr); + if (s->blk) { + if (blk_getlength(s->blk) != NVRAM_SIZE) { + error_setg(errp, "NVRAM backing file size must be %" PRId64 "bytes", + NVRAM_SIZE); + return; + } + blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, &error_fatal); + if (blk_pread(s->blk, 0, NVRAM_SIZE, p, 0) < 0) { + error_setg(errp, "Cannot read NVRAM contents from backing file"); + return; + } + } + crc = crc32(0, p + 4, NVRAM_SIZE - 4); + if (crc == CRC32_ALL_ZEROS) { /* If env is uninitialized set default */ + *c = cpu_to_be32(CRC32_DEFAULT_ENV); + /* Also copies terminating \0 as env is terminated by \0\0 */ + memcpy(p + 4, default_env, sizeof(default_env)); + if (s->blk && + blk_pwrite(s->blk, 0, sizeof(crc) + sizeof(default_env), p, 0) < 0 + ) { + error_report("%s: could not write %s", __func__, blk_name(s->blk)); + } + return; + } + if (*c == 0) { + *c = cpu_to_be32(crc32(0, p + 4, NVRAM_SIZE - 4)); + if (s->blk && blk_pwrite(s->blk, 0, 4, p, 0) < 0) { + error_report("%s: could not write %s", __func__, blk_name(s->blk)); + } + } + if (be32_to_cpu(*c) != crc) { + warn_report("NVRAM checksum mismatch"); + } +} + +static const Property nvram_properties[] = { + DEFINE_PROP_DRIVE("drive", A1NVRAMState, blk), +}; + +static void nvram_class_init(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = nvram_realize; + device_class_set_props(dc, nvram_properties); +} + +static const TypeInfo nvram_types[] = { + { + .name = TYPE_A1_NVRAM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A1NVRAMState), + .class_init = nvram_class_init, + }, +}; +DEFINE_TYPES(nvram_types) + +struct boot_info { + hwaddr entry; + hwaddr stack; + hwaddr bd_info; + hwaddr initrd_start; + hwaddr initrd_end; + hwaddr cmdline_start; + hwaddr cmdline_end; +}; + +/* Board info struct from U-Boot */ +struct bd_info { + uint32_t bi_memstart; + uint32_t bi_memsize; + uint32_t bi_flashstart; + uint32_t bi_flashsize; + uint32_t bi_flashoffset; + uint32_t bi_sramstart; + uint32_t bi_sramsize; + uint32_t bi_bootflags; + uint32_t bi_ip_addr; + uint8_t bi_enetaddr[6]; + uint16_t bi_ethspeed; + uint32_t bi_intfreq; + uint32_t bi_busfreq; + uint32_t bi_baudrate; +} QEMU_PACKED; + +static void create_bd_info(hwaddr addr, ram_addr_t ram_size) +{ + struct bd_info *bd = g_new0(struct bd_info, 1); + + bd->bi_memsize = cpu_to_be32(ram_size); + bd->bi_flashstart = cpu_to_be32(PROM_ADDR); + bd->bi_flashsize = cpu_to_be32(1); /* match what U-Boot detects */ + bd->bi_bootflags = cpu_to_be32(1); + bd->bi_intfreq = cpu_to_be32(11.5 * BUS_FREQ_HZ); + bd->bi_busfreq = cpu_to_be32(BUS_FREQ_HZ); + bd->bi_baudrate = cpu_to_be32(115200); + + cpu_physical_memory_write(addr, bd, sizeof(*bd)); +} + static void amigaone_cpu_reset(void *opaque) { PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; cpu_reset(CPU(cpu)); - cpu_ppc_tb_reset(&cpu->env); + if (env->load_info) { + struct boot_info *bi = env->load_info; + + env->gpr[1] = bi->stack; + env->gpr[2] = 1024; + env->gpr[3] = bi->bd_info; + env->gpr[4] = bi->initrd_start; + env->gpr[5] = bi->initrd_end; + env->gpr[6] = bi->cmdline_start; + env->gpr[7] = bi->cmdline_end; + env->nip = bi->entry; + } + cpu_ppc_tb_reset(env); } static void fix_spd_data(uint8_t *spd) @@ -75,7 +273,9 @@ static void amigaone_init(MachineState *machine) DeviceState *dev; I2CBus *i2c_bus; uint8_t *spd_data; - int i; + DriveInfo *di; + hwaddr loadaddr; + struct boot_info *bi = NULL; /* init CPU */ cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); @@ -97,9 +297,19 @@ static void amigaone_init(MachineState *machine) /* Firmware uses this area for startup */ mr = g_new(MemoryRegion, 1); memory_region_init_ram(mr, NULL, "init-cache", 32 * KiB, &error_fatal); - memory_region_add_subregion(get_system_memory(), 0x40000000, mr); + memory_region_add_subregion(get_system_memory(), INIT_RAM_ADDR, mr); } + /* nvram */ + dev = qdev_new(TYPE_A1_NVRAM); + di = drive_get(IF_MTD, 0, 0); + if (di) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(di)); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_add_subregion(get_system_memory(), NVRAM_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); + /* allocate and load firmware */ rom = g_new(MemoryRegion, 1); memory_region_init_rom(rom, NULL, "rom", PROM_SIZE, &error_fatal); @@ -122,7 +332,7 @@ static void amigaone_init(MachineState *machine) } /* Articia S */ - dev = sysbus_create_simple(TYPE_ARTICIA, 0xfe000000, NULL); + dev = sysbus_create_simple(TYPE_ARTICIA, ARTICIA_ADDR, NULL); i2c_bus = I2C_BUS(qdev_get_child_bus(dev, "smbus")); if (machine->ram_size > 512 * MiB) { @@ -139,12 +349,12 @@ static void amigaone_init(MachineState *machine) pci_mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); mr = g_new(MemoryRegion, 1); memory_region_init_alias(mr, OBJECT(dev), "pci-mem-low", pci_mem, - 0, 0x1000000); - memory_region_add_subregion(get_system_memory(), 0xfd000000, mr); + 0, PCI_LOW_SIZE); + memory_region_add_subregion(get_system_memory(), PCI_LOW_ADDR, mr); mr = g_new(MemoryRegion, 1); memory_region_init_alias(mr, OBJECT(dev), "pci-mem-high", pci_mem, - 0x80000000, 0x7d000000); - memory_region_add_subregion(get_system_memory(), 0x80000000, mr); + PCI_HIGH_ADDR, PCI_HIGH_SIZE); + memory_region_add_subregion(get_system_memory(), PCI_HIGH_ADDR, mr); pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); /* VIA VT82c686B South Bridge (multifunction PCI device) */ @@ -156,12 +366,62 @@ static void amigaone_init(MachineState *machine) qdev_connect_gpio_out_named(DEVICE(via), "intr", 0, qdev_get_gpio_in(DEVICE(cpu), PPC6xx_INPUT_INT)); - for (i = 0; i < PCI_NUM_PINS; i++) { + for (int i = 0; i < PCI_NUM_PINS; i++) { qdev_connect_gpio_out(dev, i, qdev_get_gpio_in_named(DEVICE(via), "pirq", i)); } pci_ide_create_devs(PCI_DEVICE(object_resolve_path_component(via, "ide"))); pci_vga_init(pci_bus); + + if (!machine->kernel_filename) { + return; + } + + /* handle -kernel, -initrd, -append options and emulate U-Boot */ + bi = g_new0(struct boot_info, 1); + cpu->env.load_info = bi; + + loadaddr = MIN(machine->ram_size, 256 * MiB); + bi->bd_info = loadaddr - 8 * MiB; + create_bd_info(bi->bd_info, machine->ram_size); + bi->stack = bi->bd_info - 64 * KiB - 8; + + if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { + size_t len = strlen(machine->kernel_cmdline); + + loadaddr = bi->bd_info + 1 * MiB; + cpu_physical_memory_write(loadaddr, machine->kernel_cmdline, len + 1); + bi->cmdline_start = loadaddr; + bi->cmdline_end = loadaddr + len + 1; /* including terminating '\0' */ + } + + sz = load_elf(machine->kernel_filename, NULL, NULL, NULL, + &bi->entry, &loadaddr, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); + if (sz <= 0) { + sz = load_uimage(machine->kernel_filename, &bi->entry, &loadaddr, + NULL, NULL, NULL); + } + if (sz <= 0) { + error_report("Could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + loadaddr += sz; + + if (machine->initrd_filename) { + loadaddr = ROUND_UP(loadaddr + 4 * MiB, 4 * KiB); + loadaddr = MAX(loadaddr, INITRD_MIN_ADDR); + sz = load_image_targphys(machine->initrd_filename, loadaddr, + bi->bd_info - loadaddr); + if (sz <= 0) { + error_report("Could not load initrd '%s'", + machine->initrd_filename); + exit(1); + } + bi->initrd_start = loadaddr; + bi->initrd_end = loadaddr + sz; + } } static void amigaone_machine_init(MachineClass *mc) diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 3bd12b54ab9..723c97fad2e 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -18,21 +18,22 @@ #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/guest-random.h" +#include "exec/target_page.h" #include "qapi/error.h" #include "e500.h" #include "e500-ccsr.h" #include "net/net.h" #include "qemu/config-file.h" #include "hw/block/flash.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/pci/pci.h" -#include "sysemu/block-backend-io.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/block-backend-io.h" +#include "system/system.h" +#include "system/kvm.h" +#include "system/reset.h" +#include "system/runstate.h" #include "kvm_ppc.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/ppc/openpic.h" #include "hw/ppc/openpic_kvm.h" #include "hw/ppc/ppc.h" @@ -78,8 +79,6 @@ #define MPC85XX_ESDHC_IRQ 72 #define RTC_REGS_OFFSET 0x68 -#define PLATFORM_CLK_FREQ_HZ (400 * 1000 * 1000) - struct boot_info { uint32_t dt_base; @@ -119,7 +118,7 @@ static uint32_t *pci_map_create(void *fdt, uint32_t mpic, int first_slot, } static void dt_serial_create(void *fdt, unsigned long long offset, - const char *soc, const char *mpic, + const char *soc, uint32_t freq, const char *mpic, const char *alias, int idx, bool defcon) { char *ser; @@ -130,7 +129,7 @@ static void dt_serial_create(void *fdt, unsigned long long offset, qemu_fdt_setprop_string(fdt, ser, "compatible", "ns16550"); qemu_fdt_setprop_cells(fdt, ser, "reg", offset, 0x100); qemu_fdt_setprop_cell(fdt, ser, "cell-index", idx); - qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", PLATFORM_CLK_FREQ_HZ); + qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", freq); qemu_fdt_setprop_cells(fdt, ser, "interrupts", 42, 2); qemu_fdt_setprop_phandle(fdt, ser, "interrupt-parent", mpic); qemu_fdt_setprop_string(fdt, "/aliases", alias, ser); @@ -203,6 +202,8 @@ static void dt_i2c_create(void *fdt, const char *soc, const char *mpic, qemu_fdt_setprop_cells(fdt, i2c, "cell-index", 0); qemu_fdt_setprop_cells(fdt, i2c, "interrupts", irq0, 0x2); qemu_fdt_setprop_phandle(fdt, i2c, "interrupt-parent", mpic); + qemu_fdt_setprop_cell(fdt, i2c, "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, i2c, "#address-cells", 1); qemu_fdt_setprop_string(fdt, "/aliases", alias, i2c); g_free(i2c); @@ -379,8 +380,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, int fdt_size; void *fdt; uint8_t hypercall[16]; - uint32_t clock_freq = PLATFORM_CLK_FREQ_HZ; - uint32_t tb_freq = PLATFORM_CLK_FREQ_HZ; + uint32_t clock_freq, tb_freq; int i; char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus"; char *soc; @@ -408,7 +408,7 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, if (dtb_file) { char *filename; - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file); + filename = qemu_find_file(QEMU_FILE_TYPE_DTB, dtb_file); if (!filename) { goto out; } @@ -481,6 +481,9 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, if (kvmppc_get_hasidle(env)) { qemu_fdt_setprop(fdt, "/hypervisor", "has-idle", NULL, 0); } + } else { + clock_freq = pmc->clock_freq; + tb_freq = pmc->tb_freq; } /* Create CPU nodes */ @@ -561,12 +564,12 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, */ if (serial_hd(1)) { dt_serial_create(fdt, MPC8544_SERIAL1_REGS_OFFSET, - soc, mpic, "serial1", 1, false); + soc, pmc->clock_freq, mpic, "serial1", 1, false); } if (serial_hd(0)) { dt_serial_create(fdt, MPC8544_SERIAL0_REGS_OFFSET, - soc, mpic, "serial0", 0, true); + soc, pmc->clock_freq, mpic, "serial0", 0, true); } /* i2c */ @@ -656,7 +659,6 @@ static int ppce500_load_device_tree(PPCE500MachineState *pms, done: if (!dry_run) { - qemu_fdt_dumpdtb(fdt, fdt_size); cpu_physical_memory_write(addr, fdt, fdt_size); /* Set machine->fdt for 'dumpdtb' QMP/HMP command */ @@ -721,11 +723,21 @@ static int ppce500_prep_device_tree(PPCE500MachineState *machine, kernel_base, kernel_size, true); } -hwaddr booke206_page_size_to_tlb(uint64_t size) +static hwaddr booke206_page_size_to_tlb(uint64_t size) { return 63 - clz64(size / KiB); } +void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa, + hwaddr len) +{ + tlb->mas1 = booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT; + tlb->mas1 |= MAS1_VALID; + tlb->mas2 = va & TARGET_PAGE_MASK; + tlb->mas7_3 = pa & TARGET_PAGE_MASK; + tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; +} + static int booke206_initial_map_tsize(CPUPPCState *env) { struct boot_info *bi = env->load_info; @@ -751,25 +763,6 @@ static uint64_t mmubooke_initial_mapsize(CPUPPCState *env) return (1ULL << 10 << tsize); } -/* Create -kernel TLB entries for BookE. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env) -{ - ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); - hwaddr size; - int ps; - - ps = booke206_initial_map_tsize(env); - size = (ps << MAS1_TSIZE_SHIFT); - tlb->mas1 = MAS1_VALID | size; - tlb->mas2 = 0; - tlb->mas7_3 = 0; - tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; - -#ifdef CONFIG_KVM - env->tlb_dirty = true; -#endif -} - static void ppce500_cpu_reset_sec(void *opaque) { PowerPCCPU *cpu = opaque; @@ -786,6 +779,8 @@ static void ppce500_cpu_reset(void *opaque) CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; struct boot_info *bi = env->load_info; + uint64_t map_size = mmubooke_initial_mapsize(env); + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); cpu_reset(cs); @@ -796,11 +791,15 @@ static void ppce500_cpu_reset(void *opaque) env->gpr[4] = 0; env->gpr[5] = 0; env->gpr[6] = EPAPR_MAGIC; - env->gpr[7] = mmubooke_initial_mapsize(env); + env->gpr[7] = map_size; env->gpr[8] = 0; env->gpr[9] = 0; env->nip = bi->entry; - mmubooke_create_initial_mapping(env); + /* create initial mapping */ + booke206_set_tlb(tlb, 0, 0, map_size); +#ifdef CONFIG_KVM + env->tlb_dirty = true; +#endif } static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, @@ -832,7 +831,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, } static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, - IrqLines *irqs, Error **errp) + Error **errp) { #ifdef CONFIG_KVM DeviceState *dev; @@ -872,7 +871,7 @@ static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, Error *err = NULL; if (kvm_kernel_irqchip_allowed()) { - dev = ppce500_init_mpic_kvm(pmc, irqs, &err); + dev = ppce500_init_mpic_kvm(pmc, &err); } if (kvm_kernel_irqchip_required() && !dev) { error_reportf_err(err, @@ -932,7 +931,6 @@ void ppce500_init(MachineState *machine) CPUPPCState *firstenv = NULL; MemoryRegion *ccsr_addr_space; SysBusDevice *s; - PPCE500CCSRState *ccsr; I2CBus *i2c; irqs = g_new0(IrqLines, smp_cpus); @@ -969,7 +967,7 @@ void ppce500_init(MachineState *machine) env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; - ppc_booke_timers_init(cpu, PLATFORM_CLK_FREQ_HZ, PPC_TIMER_E500); + ppc_booke_timers_init(cpu, pmc->tb_freq, PPC_TIMER_E500); /* Register reset handler */ if (!i) { @@ -994,10 +992,10 @@ void ppce500_init(MachineState *machine) memory_region_add_subregion(address_space_mem, 0, machine->ram); dev = qdev_new("e500-ccsr"); + s = SYS_BUS_DEVICE(dev); object_property_add_child(OBJECT(machine), "e500-ccsr", OBJECT(dev)); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - ccsr = CCSR(dev); - ccsr_addr_space = &ccsr->ccsr_space; + sysbus_realize_and_unref(s, &error_fatal); + ccsr_addr_space = sysbus_mmio_get_region(s, 0); memory_region_add_subregion(address_space_mem, pmc->ccsrbar_base, ccsr_addr_space); @@ -1024,7 +1022,7 @@ void ppce500_init(MachineState *machine) sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8544_I2C_IRQ)); memory_region_add_subregion(ccsr_addr_space, MPC8544_I2C_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c = I2C_BUS(qdev_get_child_bus(dev, "i2c")); i2c_slave_create_simple(i2c, "ds1338", RTC_REGS_OFFSET); /* eSDHC */ @@ -1045,6 +1043,7 @@ void ppce500_init(MachineState *machine) dev = qdev_new(TYPE_SYSBUS_SDHCI); qdev_prop_set_uint8(dev, "sd-spec-version", 2); qdev_prop_set_uint8(dev, "endianness", DEVICE_BIG_ENDIAN); + qdev_prop_set_uint8(dev, "vendor", SDHCI_VENDOR_FSL); s = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(s, &error_fatal); sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC85XX_ESDHC_IRQ)); @@ -1073,7 +1072,7 @@ void ppce500_init(MachineState *machine) memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); if (!pci_bus) printf("couldn't create PCI controller!\n"); @@ -1195,7 +1194,7 @@ void ppce500_init(MachineState *machine) payload_size = load_elf(filename, NULL, NULL, NULL, &bios_entry, &loadaddr, NULL, NULL, - 1, PPC_ELF_MACHINE, 0, 0); + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (payload_size < 0) { /* * Hrm. No ELF image? Try a uImage, maybe someone is giving us an @@ -1284,6 +1283,7 @@ static void e500_ccsr_initfn(Object *obj) PPCE500CCSRState *ccsr = CCSR(obj); memory_region_init(&ccsr->ccsr_space, obj, "e500-ccsr", MPC8544_CCSRBAR_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(ccsr), &ccsr->ccsr_space); } static const TypeInfo e500_ccsr_info = { diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h index 8c09ef92e44..00f490519c2 100644 --- a/hw/ppc/e500.h +++ b/hw/ppc/e500.h @@ -5,6 +5,8 @@ #include "hw/platform-bus.h" #include "qom/object.h" +#define PLATFORM_CLK_FREQ_HZ (400 * 1000 * 1000) + struct PPCE500MachineState { /*< private >*/ MachineState parent_obj; @@ -37,12 +39,12 @@ struct PPCE500MachineClass { hwaddr pci_mmio_base; hwaddr pci_mmio_bus_base; hwaddr spin_base; + uint32_t clock_freq; + uint32_t tb_freq; }; void ppce500_init(MachineState *machine); -hwaddr booke206_page_size_to_tlb(uint64_t size); - #define TYPE_PPCE500_MACHINE "ppce500-base-machine" OBJECT_DECLARE_TYPE(PPCE500MachineState, PPCE500MachineClass, PPCE500_MACHINE) diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c index 7aa2f2107a7..4f1d659e723 100644 --- a/hw/ppc/e500plat.c +++ b/hw/ppc/e500plat.c @@ -13,8 +13,8 @@ #include "qemu/units.h" #include "e500.h" #include "hw/net/fsl_etsec/etsec.h" -#include "sysemu/device_tree.h" -#include "sysemu/kvm.h" +#include "system/device_tree.h" +#include "system/kvm.h" #include "hw/sysbus.h" #include "hw/pci/pci.h" #include "hw/ppc/openpic.h" @@ -68,7 +68,7 @@ HotplugHandler *e500plat_machine_get_hotpug_handler(MachineState *machine, #define TYPE_E500PLAT_MACHINE MACHINE_TYPE_NAME("ppce500") -static void e500plat_machine_class_init(ObjectClass *oc, void *data) +static void e500plat_machine_class_init(ObjectClass *oc, const void *data) { PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -93,6 +93,8 @@ static void e500plat_machine_class_init(ObjectClass *oc, void *data) pmc->pci_mmio_base = 0xC00000000ULL; pmc->pci_mmio_bus_base = 0xE0000000ULL; pmc->spin_base = 0xFEF000000ULL; + pmc->clock_freq = PLATFORM_CLK_FREQ_HZ; + pmc->tb_freq = PLATFORM_CLK_FREQ_HZ; mc->desc = "generic paravirt e500 platform"; mc->init = e500plat_init; @@ -107,7 +109,7 @@ static const TypeInfo e500plat_info = { .name = TYPE_E500PLAT_MACHINE, .parent = TYPE_PPCE500_MACHINE, .class_init = e500plat_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index ff9e490c4e4..0b6e096116a 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -50,6 +50,7 @@ #include "qemu/datadir.h" #include "qemu/units.h" #include "qapi/error.h" +#include "exec/target_page.h" #include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" #include "hw/nvram/mac_nvram.h" @@ -59,7 +60,7 @@ #include "hw/ppc/mac_dbdma.h" #include "hw/pci/pci.h" #include "net/net.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/nvram/fw_cfg.h" #include "hw/char/escc.h" #include "hw/misc/macio/macio.h" @@ -68,8 +69,8 @@ #include "hw/fw-path-provider.h" #include "elf.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/kvm.h" +#include "system/reset.h" #include "kvm_ppc.h" #include "hw/usb.h" #include "hw/sysbus.h" @@ -182,7 +183,8 @@ static void ppc_core99_init(MachineState *machine) if (filename) { /* Load OpenBIOS (ELF) */ bios_size = load_elf(filename, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + NULL, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (bios_size <= 0) { /* or load binary ROM image */ @@ -196,19 +198,14 @@ static void ppc_core99_init(MachineState *machine) } if (machine->kernel_filename) { - int bswap_needed = 0; - -#ifdef BSWAP_NEEDED - bswap_needed = 1; -#endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(machine->kernel_filename, NULL, translate_kernel_address, NULL, NULL, NULL, - NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (kernel_size < 0) { kernel_size = load_aout(machine->kernel_filename, kernel_base, machine->ram_size - kernel_base, - bswap_needed, TARGET_PAGE_SIZE); + true, TARGET_PAGE_SIZE); } if (kernel_size < 0) { kernel_size = load_image_targphys(machine->kernel_filename, @@ -566,12 +563,12 @@ static int core99_kvm_type(MachineState *machine, const char *arg) return 2; } -static void core99_machine_class_init(ObjectClass *oc, void *data) +static void core99_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); - mc->desc = "Mac99 based PowerMAC"; + mc->desc = "Mac99 based PowerMac"; mc->init = ppc_core99_init; mc->block_default_type = IF_IDE; /* SMP is not supported currently */ @@ -634,8 +631,6 @@ static void core99_instance_init(Object *obj) object_property_set_description(obj, "via", "Set VIA configuration. " "Valid values are cuda, pmu and pmu-adb"); - - return; } static const TypeInfo core99_machine_info = { @@ -644,7 +639,7 @@ static const TypeInfo core99_machine_info = { .class_init = core99_machine_class_init, .instance_init = core99_instance_init, .instance_size = sizeof(Core99MachineState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index 1981d3d8f6e..40ae936ad8a 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -28,11 +28,12 @@ #include "qemu/datadir.h" #include "qemu/units.h" #include "qapi/error.h" +#include "exec/target_page.h" #include "hw/ppc/ppc.h" #include "hw/qdev-properties.h" #include "hw/boards.h" #include "hw/input/adb.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "net/net.h" #include "hw/isa/isa.h" #include "hw/pci/pci.h" @@ -45,8 +46,8 @@ #include "hw/fw-path-provider.h" #include "elf.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/kvm.h" +#include "system/reset.h" #include "kvm_ppc.h" #define MAX_IDE_BUS 2 @@ -136,7 +137,7 @@ static void ppc_heathrow_init(MachineState *machine) if (filename) { /* Load OpenBIOS (ELF) */ bios_size = load_elf(filename, NULL, NULL, NULL, NULL, &bios_addr, - NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); /* Unfortunately, load_elf sign-extends reading elf32 */ bios_addr = (uint32_t)bios_addr; @@ -153,19 +154,14 @@ static void ppc_heathrow_init(MachineState *machine) } if (machine->kernel_filename) { - int bswap_needed = 0; - -#ifdef BSWAP_NEEDED - bswap_needed = 1; -#endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(machine->kernel_filename, NULL, translate_kernel_address, NULL, NULL, NULL, - NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + NULL, NULL, ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (kernel_size < 0) { kernel_size = load_aout(machine->kernel_filename, kernel_base, machine->ram_size - kernel_base, - bswap_needed, TARGET_PAGE_SIZE); + true, TARGET_PAGE_SIZE); } if (kernel_size < 0) { kernel_size = load_image_targphys(machine->kernel_filename, @@ -406,12 +402,12 @@ static int heathrow_kvm_type(MachineState *machine, const char *arg) return 2; } -static void heathrow_class_init(ObjectClass *oc, void *data) +static void heathrow_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); - mc->desc = "Heathrow based PowerMAC"; + mc->desc = "Heathrow based PowerMac"; mc->init = ppc_heathrow_init; mc->block_default_type = IF_IDE; /* SMP is not supported currently */ @@ -434,7 +430,7 @@ static const TypeInfo ppc_heathrow_machine_info = { .name = MACHINE_TYPE_NAME("g3beige"), .parent = TYPE_MACHINE, .class_init = heathrow_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index 7cd91898699..9893f8adebb 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -57,9 +57,6 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( 'pnv_n1_chiplet.c', )) # PowerPC 4xx boards -ppc_ss.add(when: 'CONFIG_PPC405', if_true: files( - 'ppc405_boards.c', - 'ppc405_uc.c')) ppc_ss.add(when: 'CONFIG_PPC440', if_true: files( 'ppc440_bamboo.c', 'ppc440_uc.c')) diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c index e3540b02816..a25041e8367 100644 --- a/hw/ppc/mpc8544_guts.c +++ b/hw/ppc/mpc8544_guts.c @@ -18,9 +18,8 @@ */ #include "qemu/osdep.h" -#include "qemu/module.h" #include "qemu/log.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "cpu.h" #include "hw/sysbus.h" #include "qom/object.h" @@ -29,6 +28,12 @@ #define MPC8544_GUTS_RSTCR_RESET 0x02 #define MPC8544_GUTS_ADDR_PORPLLSR 0x00 +REG32(GUTS_PORPLLSR, 0x00) + FIELD(GUTS_PORPLLSR, E500_1_RATIO, 24, 6) + FIELD(GUTS_PORPLLSR, E500_0_RATIO, 16, 6) + FIELD(GUTS_PORPLLSR, DDR_RATIO, 9, 5) + FIELD(GUTS_PORPLLSR, PLAT_RATIO, 1, 5) + #define MPC8544_GUTS_ADDR_PORBMSR 0x04 #define MPC8544_GUTS_ADDR_PORIMPSCR 0x08 #define MPC8544_GUTS_ADDR_PORDEVSR 0x0C @@ -75,6 +80,12 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr, addr &= MPC8544_GUTS_MMIO_SIZE - 1; switch (addr) { + case MPC8544_GUTS_ADDR_PORPLLSR: + value = FIELD_DP32(value, GUTS_PORPLLSR, E500_1_RATIO, 6); /* 3:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, E500_0_RATIO, 6); /* 3:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, DDR_RATIO, 12); /* 12:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, PLAT_RATIO, 6); /* 6:1 */ + break; case MPC8544_GUTS_ADDR_PVR: value = env->spr[SPR_PVR]; break; @@ -129,16 +140,13 @@ static void mpc8544_guts_initfn(Object *obj) sysbus_init_mmio(d, &s->iomem); } -static const TypeInfo mpc8544_guts_info = { - .name = TYPE_MPC8544_GUTS, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(GutsState), - .instance_init = mpc8544_guts_initfn, +static const TypeInfo mpc8544_guts_types[] = { + { + .name = TYPE_MPC8544_GUTS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GutsState), + .instance_init = mpc8544_guts_initfn, + }, }; -static void mpc8544_guts_register_types(void) -{ - type_register_static(&mpc8544_guts_info); -} - -type_init(mpc8544_guts_register_types) +DEFINE_TYPES(mpc8544_guts_types) diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c index b7130903d66..582698559d2 100644 --- a/hw/ppc/mpc8544ds.c +++ b/hw/ppc/mpc8544ds.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "e500.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/ppc/openpic.h" #include "qemu/error-report.h" #include "qemu/units.h" @@ -37,7 +37,7 @@ static void mpc8544ds_init(MachineState *machine) ppce500_init(machine); } -static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data) +static void mpc8544ds_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc); @@ -55,6 +55,8 @@ static void mpc8544ds_machine_class_init(ObjectClass *oc, void *data) pmc->pci_mmio_bus_base = 0xC0000000ULL; pmc->pci_pio_base = 0xE1000000ULL; pmc->spin_base = 0xEF000000ULL; + pmc->clock_freq = PLATFORM_CLK_FREQ_HZ; + pmc->tb_freq = PLATFORM_CLK_FREQ_HZ; mc->desc = "mpc8544ds"; mc->init = mpc8544ds_init; diff --git a/hw/ppc/pef.c b/hw/ppc/pef.c index 47553348b1e..254f5707876 100644 --- a/hw/ppc/pef.c +++ b/hw/ppc/pef.c @@ -12,9 +12,9 @@ #include "qapi/error.h" #include "qom/object_interfaces.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "migration/blocker.h" -#include "exec/confidential-guest-support.h" +#include "system/confidential-guest-support.h" #define TYPE_PEF_GUEST "pef-guest" OBJECT_DECLARE_SIMPLE_TYPE(PefGuest, PEF_GUEST) @@ -128,7 +128,7 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(PefGuest, { TYPE_USER_CREATABLE }, { NULL }) -static void pef_guest_class_init(ObjectClass *oc, void *data) +static void pef_guest_class_init(ObjectClass *oc, const void *data) { ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 9b0a6b70ab5..e15cf964278 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -14,28 +14,29 @@ #include "hw/sysbus.h" #include "hw/pci/pci_host.h" #include "hw/irq.h" +#include "hw/or-irq.h" #include "hw/pci-host/mv64361.h" #include "hw/isa/vt82c686.h" #include "hw/ide/pci.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/qdev-properties.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/qtest.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/qtest.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/fw-path-provider.h" #include "elf.h" #include "qemu/log.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_ppc.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/qom-qobject.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "trace.h" #include "qemu/datadir.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/ppc/vof.h" #include @@ -73,8 +74,11 @@ OBJECT_DECLARE_TYPE(Pegasos2MachineState, MachineClass, PEGASOS2_MACHINE) struct Pegasos2MachineState { MachineState parent_obj; + PowerPCCPU *cpu; DeviceState *mv; + IRQState pci_irqs[PCI_NUM_PINS]; + OrIRQState orirq[PCI_NUM_PINS]; qemu_irq mv_pirq[PCI_NUM_PINS]; qemu_irq via_pirq[PCI_NUM_PINS]; Vof *vof; @@ -156,8 +160,8 @@ static void pegasos2_init(MachineState *machine) } memory_region_init_rom(rom, NULL, "pegasos2.rom", PROM_SIZE, &error_fatal); memory_region_add_subregion(get_system_memory(), PROM_ADDR, rom); - sz = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1, - PPC_ELF_MACHINE, 0, 0); + sz = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (sz <= 0) { sz = load_image_targphys(filename, pm->vof ? 0 : PROM_ADDR, PROM_SIZE); } @@ -177,7 +181,6 @@ static void pegasos2_init(MachineState *machine) pm->mv_pirq[i] = qdev_get_gpio_in_named(pm->mv, "gpp", 12 + i); } pci_bus = mv64361_get_pci_bus(pm->mv, 1); - pci_bus_irqs(pci_bus, pegasos2_pci_irq, pm, PCI_NUM_PINS); /* VIA VT8231 South Bridge (multifunction PCI device) */ via = OBJECT(pci_new_multifunction(PCI_DEVFN(12, 0), TYPE_VT8231_ISA)); @@ -209,10 +212,35 @@ static void pegasos2_init(MachineState *machine) /* other PC hardware */ pci_vga_init(pci_bus); + /* PCI interrupt routing: lines from pci.0 and pci.1 are ORed */ + for (int h = 0; h < 2; h++) { + DeviceState *pd; + g_autofree const char *pn = g_strdup_printf("pcihost%d", h); + + pd = DEVICE(object_resolve_path_component(OBJECT(pm->mv), pn)); + assert(pd); + for (i = 0; i < PCI_NUM_PINS; i++) { + OrIRQState *ori = &pm->orirq[i]; + + if (h == 0) { + g_autofree const char *n = g_strdup_printf("pci-orirq[%d]", i); + + object_initialize_child_with_props(OBJECT(pm), n, + ori, sizeof(*ori), + TYPE_OR_IRQ, &error_fatal, + "num-lines", "2", NULL); + qdev_realize(DEVICE(ori), NULL, &error_fatal); + qemu_init_irq(&pm->pci_irqs[i], pegasos2_pci_irq, pm, i); + qdev_connect_gpio_out(DEVICE(ori), 0, &pm->pci_irqs[i]); + } + qdev_connect_gpio_out(pd, i, qdev_get_gpio_in(DEVICE(ori), h)); + } + } + if (machine->kernel_filename) { sz = load_elf(machine->kernel_filename, NULL, NULL, NULL, - &pm->kernel_entry, &pm->kernel_addr, NULL, NULL, 1, - PPC_ELF_MACHINE, 0, 0); + &pm->kernel_entry, &pm->kernel_addr, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (sz <= 0) { error_report("Could not load kernel '%s'", machine->kernel_filename); @@ -291,14 +319,14 @@ static void pegasos2_superio_write(uint8_t addr, uint8_t val) cpu_physical_memory_write(PCI1_IO_BASE + 0x3f1, &val, 1); } -static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) +static void pegasos2_machine_reset(MachineState *machine, ResetType type) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); void *fdt; uint64_t d[2]; int sz; - qemu_devices_reset(reason); + qemu_devices_reset(type); if (!pm->vof) { return; /* Firmware should set up machine so nothing to do */ } @@ -389,7 +417,6 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) d[1] = cpu_to_be64(pm->kernel_size - (pm->kernel_entry - pm->kernel_addr)); qemu_fdt_setprop(fdt, "/chosen", "qemu,boot-kernel", d, sizeof(d)); - qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); g_free(pm->fdt_blob); pm->fdt_blob = fdt; @@ -561,7 +588,7 @@ static bool pegasos2_setprop(MachineState *ms, const char *path, return true; } -static void pegasos2_machine_class_init(ObjectClass *oc, void *data) +static void pegasos2_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); @@ -592,7 +619,7 @@ static const TypeInfo pegasos2_machine_info = { .parent = TYPE_MACHINE, .class_init = pegasos2_machine_class_init, .instance_size = sizeof(Pegasos2MachineState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PPC_VIRTUAL_HYPERVISOR }, { TYPE_VOF_MACHINE_IF }, { } diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 3526852685b..d84c9067edb 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -1,7 +1,9 @@ /* * QEMU PowerPC PowerNV machine model * - * Copyright (c) 2016, IBM Corporation. + * Copyright (c) 2016-2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -22,14 +24,14 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" -#include "sysemu/qtest.h" -#include "sysemu/sysemu.h" -#include "sysemu/numa.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/cpus.h" -#include "sysemu/device_tree.h" -#include "sysemu/hw_accel.h" +#include "system/qtest.h" +#include "system/system.h" +#include "system/numa.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/cpus.h" +#include "system/device_tree.h" +#include "system/hw_accel.h" #include "target/ppc/cpu.h" #include "hw/ppc/fdt.h" #include "hw/ppc/ppc.h" @@ -53,7 +55,7 @@ #include "hw/ppc/pnv_pnor.h" #include "hw/isa/isa.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" #include "hw/rtc/mc146818rtc.h" #include @@ -64,6 +66,8 @@ #define FW_LOAD_ADDR 0x0 #define FW_MAX_SIZE (16 * MiB) +#define PNOR_FILE_NAME "pnv-pnor.bin" + #define KERNEL_LOAD_ADDR 0x20000000 #define KERNEL_MAX_SIZE (128 * MiB) #define INITRD_LOAD_ADDR 0x28000000 @@ -709,13 +713,13 @@ static void pnv_powerdown_notify(Notifier *n, void *opaque) } } -static void pnv_reset(MachineState *machine, ShutdownCause reason) +static void pnv_reset(MachineState *machine, ResetType type) { PnvMachineState *pnv = PNV_MACHINE(machine); IPMIBmc *bmc; void *fdt; - qemu_devices_reset(reason); + qemu_devices_reset(type); /* * The machine should provide by default an internal BMC simulator. @@ -736,21 +740,26 @@ static void pnv_reset(MachineState *machine, ShutdownCause reason) } } - fdt = pnv_dt_create(machine); - - /* Pack resulting tree */ - _FDT((fdt_pack(fdt))); + if (machine->fdt) { + fdt = machine->fdt; + } else { + fdt = pnv_dt_create(machine); + /* Pack resulting tree */ + _FDT((fdt_pack(fdt))); + } - qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt)); - /* - * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free - * the existing machine->fdt to avoid leaking it during - * a reset. - */ - g_free(machine->fdt); - machine->fdt = fdt; + /* Update machine->fdt with latest fdt */ + if (machine->fdt != fdt) { + /* + * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free + * the existing machine->fdt to avoid leaking it during + * a reset. + */ + g_free(machine->fdt); + machine->fdt = fdt; + } } static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp) @@ -936,7 +945,7 @@ static void pnv_init(MachineState *machine) uint64_t chip_ram_start = 0; int i; char *chip_typename; - DriveInfo *pnor = drive_get(IF_MTD, 0, 0); + DriveInfo *pnor; DeviceState *dev; if (kvm_enabled()) { @@ -952,12 +961,32 @@ static void pnv_init(MachineState *machine) g_free(sz); exit(EXIT_FAILURE); } + + /* checks for invalid option combinations */ + if (machine->dtb && (strlen(machine->kernel_cmdline) != 0)) { + error_report("-append and -dtb cannot be used together, as passed" + " command line is ignored in case of custom dtb"); + exit(EXIT_FAILURE); + } + memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* * Create our simple PNOR device */ dev = qdev_new(TYPE_PNV_PNOR); + pnor = drive_get(IF_MTD, 0, 0); + if (!pnor && defaults_enabled()) { + fw_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, PNOR_FILE_NAME); + if (!fw_filename) { + warn_report("Could not find PNOR '%s'", PNOR_FILE_NAME); + } else { + QemuOpts *opts; + opts = drive_add(IF_MTD, -1, fw_filename, "format=raw,readonly=on"); + pnor = drive_new(opts, IF_MTD, &error_fatal); + g_free(fw_filename); + } + } if (pnor) { qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(pnor)); } @@ -1003,6 +1032,21 @@ static void pnv_init(MachineState *machine) } } + /* load dtb if passed */ + if (machine->dtb) { + int fdt_size; + + warn_report("with manually passed dtb, some options like '-append'" + " will get ignored and the dtb passed will be used as-is"); + + /* read the file 'machine->dtb', and load it into 'fdt' buffer */ + machine->fdt = load_device_tree(machine->dtb, &fdt_size); + if (!machine->fdt) { + error_report("Could not load dtb '%s'", machine->dtb); + exit(1); + } + } + /* MSIs are supported on this platform */ msi_nonbroken = true; @@ -1147,7 +1191,7 @@ static void pnv_init(MachineState *machine) * Since we can not reach the remote BMC machine with LPC memops, * map it always for now. */ - memory_region_add_subregion(pnv->chips[0]->fw_mr, PNOR_SPI_OFFSET, + memory_region_add_subregion(pnv->chips[0]->fw_mr, pnv->pnor->lpc_address, &pnv->pnor->mmio); /* @@ -1527,7 +1571,21 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) return; } + /* HOMER (must be created before OCC) */ + object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs); + /* Homer RAM region */ + memory_region_add_subregion(get_system_memory(), chip8->homer.base, + &chip8->homer.mem); + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip8->occ), "homer", + OBJECT(&chip8->homer), &error_abort); if (!qdev_realize(DEVICE(&chip8->occ), NULL, errp)) { return; } @@ -1539,19 +1597,6 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip), &chip8->occ.sram_regs); - /* HOMER */ - object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip), - &error_abort); - if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) { - return; - } - /* Homer Xscom region */ - pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs); - - /* Homer mmio region */ - memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip), - &chip8->homer.regs); - /* PHB controllers */ for (i = 0; i < chip8->num_phbs; i++) { PnvPHB *phb = chip8->phbs[i]; @@ -1573,7 +1618,7 @@ static uint32_t pnv_chip_power8_xscom_pcba(PnvChip *chip, uint64_t addr) return ((addr >> 4) & ~0xfull) | ((addr >> 3) & 0xf); } -static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) +static void pnv_chip_power8e_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); @@ -1597,7 +1642,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) &k->parent_realize); } -static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) +static void pnv_chip_power8_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); @@ -1621,7 +1666,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) &k->parent_realize); } -static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) +static void pnv_chip_power8nvl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); @@ -1725,6 +1770,7 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp) for (i = 0; i < chip->num_pecs; i++) { PnvPhb4PecState *pec = &chip9->pecs[i]; PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + uint32_t pec_cplt_base; uint32_t pec_nest_base; uint32_t pec_pci_base; @@ -1737,9 +1783,12 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp) return; } + pec_cplt_base = pecc->xscom_cplt_base(pec); pec_nest_base = pecc->xscom_nest_base(pec); pec_pci_base = pecc->xscom_pci_base(pec); + pnv_xscom_add_subregion(chip, pec_cplt_base, + &pec->nest_pervasive.xscom_ctrl_regs_mr); pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); } @@ -1831,18 +1880,6 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) pnv_xscom_add_subregion(chip, PNV9_XSCOM_CHIPTOD_BASE, &chip9->chiptod.xscom_regs); - /* Create the simplified OCC model */ - if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) { - return; - } - pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs); - qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in( - DEVICE(psi9), PSIHB9_IRQ_OCC)); - - /* OCC SRAM model */ - memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip), - &chip9->occ.sram_regs); - /* SBE */ if (!qdev_realize(DEVICE(&chip9->sbe), NULL, errp)) { return; @@ -1854,7 +1891,7 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) qdev_connect_gpio_out(DEVICE(&chip9->sbe), 0, qdev_get_gpio_in( DEVICE(psi9), PSIHB9_IRQ_PSU)); - /* HOMER */ + /* HOMER (must be created before OCC) */ object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip), &error_abort); if (!qdev_realize(DEVICE(&chip9->homer), NULL, errp)) { @@ -1862,10 +1899,23 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) } /* Homer Xscom region */ pnv_xscom_add_subregion(chip, PNV9_XSCOM_PBA_BASE, &chip9->homer.pba_regs); + /* Homer RAM region */ + memory_region_add_subregion(get_system_memory(), chip9->homer.base, + &chip9->homer.mem); - /* Homer mmio region */ - memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip), - &chip9->homer.regs); + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip9->occ), "homer", + OBJECT(&chip9->homer), &error_abort); + if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip9->occ), 0, qdev_get_gpio_in( + DEVICE(psi9), PSIHB9_IRQ_OCC)); + + /* OCC SRAM model */ + memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip), + &chip9->occ.sram_regs); /* PEC PHBs */ pnv_chip_power9_pec_realize(chip, &local_err); @@ -1904,7 +1954,7 @@ static uint32_t pnv_chip_power9_xscom_pcba(PnvChip *chip, uint64_t addr) return addr >> 3; } -static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) +static void pnv_chip_power9_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); @@ -1999,6 +2049,7 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp) for (i = 0; i < chip->num_pecs; i++) { PnvPhb4PecState *pec = &chip10->pecs[i]; PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + uint32_t pec_cplt_base; uint32_t pec_nest_base; uint32_t pec_pci_base; @@ -2011,9 +2062,12 @@ static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp) return; } + pec_cplt_base = pecc->xscom_cplt_base(pec); pec_nest_base = pecc->xscom_nest_base(pec); pec_pci_base = pecc->xscom_pci_base(pec); + pnv_xscom_add_subregion(chip, pec_cplt_base, + &pec->nest_pervasive.xscom_ctrl_regs_mr); pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); } @@ -2108,7 +2162,22 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) pnv_xscom_add_subregion(chip, PNV10_XSCOM_CHIPTOD_BASE, &chip10->chiptod.xscom_regs); + /* HOMER (must be created before OCC) */ + object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE, + &chip10->homer.pba_regs); + /* Homer RAM region */ + memory_region_add_subregion(get_system_memory(), chip10->homer.base, + &chip10->homer.mem); + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip10->occ), "homer", + OBJECT(&chip10->homer), &error_abort); if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) { return; } @@ -2133,20 +2202,6 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) qdev_connect_gpio_out(DEVICE(&chip10->sbe), 0, qdev_get_gpio_in( DEVICE(&chip10->psi), PSIHB9_IRQ_PSU)); - /* HOMER */ - object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip), - &error_abort); - if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) { - return; - } - /* Homer Xscom region */ - pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE, - &chip10->homer.pba_regs); - - /* Homer mmio region */ - memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip), - &chip10->homer.regs); - /* N1 chiplet */ if (!qdev_realize(DEVICE(&chip10->n1_chiplet), NULL, errp)) { return; @@ -2197,6 +2252,8 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) /* pib_spic[2] connected to 25csm04 which implements 1 byte transfer */ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "transfer_len", (i == 2) ? 1 : 4, &error_fatal); + object_property_set_int(OBJECT(&chip10->pib_spic[i]), "chip-id", + chip->chip_id, &error_fatal); if (!sysbus_realize(SYS_BUS_DEVICE(OBJECT (&chip10->pib_spic[i])), errp)) { return; @@ -2245,7 +2302,7 @@ static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr) return addr >> 3; } -static void pnv_chip_power10_class_init(ObjectClass *klass, void *data) +static void pnv_chip_power10_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); @@ -2393,7 +2450,7 @@ static void pnv_chip_realize(DeviceState *dev, Error **errp) } } -static Property pnv_chip_properties[] = { +static const Property pnv_chip_properties[] = { DEFINE_PROP_UINT32("chip-id", PnvChip, chip_id, 0), DEFINE_PROP_UINT64("ram-start", PnvChip, ram_start, 0), DEFINE_PROP_UINT64("ram-size", PnvChip, ram_size, 0), @@ -2402,10 +2459,9 @@ static Property pnv_chip_properties[] = { DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1), DEFINE_PROP_BOOL("big-core", PnvChip, big_core, false), DEFINE_PROP_BOOL("lpar-per-core", PnvChip, lpar_per_core, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_chip_class_init(ObjectClass *klass, void *data) +static void pnv_chip_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -2552,62 +2608,64 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf) } } -static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, - XiveTCTXMatch *match) +static bool pnv_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) { PnvMachineState *pnv = PNV_MACHINE(xfb); - int total_count = 0; int i; for (i = 0; i < pnv->num_chips; i++) { Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); XivePresenter *xptr = XIVE_PRESENTER(&chip9->xive); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, - priority, logic_serv, match); + xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, + cam_ignore, priority, logic_serv, match); + } - if (count < 0) { - return count; - } + return !!match->count; +} + +static bool pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) +{ + PnvMachineState *pnv = PNV_MACHINE(xfb); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - total_count += count; + xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, + cam_ignore, priority, logic_serv, match); } - return total_count; + return !!match->count; } -static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, +static int pnv10_xive_broadcast(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, - XiveTCTXMatch *match) + bool crowd, bool cam_ignore, + uint8_t priority) { PnvMachineState *pnv = PNV_MACHINE(xfb); - int total_count = 0; int i; for (i = 0; i < pnv->num_chips; i++) { Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, - priority, logic_serv, match); - - if (count < 0) { - return count; - } - total_count += count; + xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); } - - return total_count; + return 0; } static bool pnv_machine_get_big_core(Object *obj, Error **errp) @@ -2650,7 +2708,7 @@ static void pnv_machine_set_hb(Object *obj, bool value, Error **errp) } } -static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) +static void pnv_machine_power8_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); @@ -2679,7 +2737,7 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } -static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) +static void pnv_machine_power9_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); @@ -2718,7 +2776,7 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) "Use 1 LPAR per core mode"); } -static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) +static void pnv_machine_p10_common_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); @@ -2743,11 +2801,12 @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) pmc->dt_power_mgt = pnv_dt_power_mgt; xfc->match_nvt = pnv10_xive_match_nvt; + xfc->broadcast = pnv10_xive_broadcast; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); } -static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) +static void pnv_machine_power10_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -2772,7 +2831,8 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) "Use 1 LPAR per core mode"); } -static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data) +static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); @@ -2837,7 +2897,7 @@ static void pnv_nmi(NMIState *n, int cpu_index, Error **errp) } } -static void pnv_machine_class_init(ObjectClass *oc, void *data) +static void pnv_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); @@ -2897,7 +2957,7 @@ static const TypeInfo types[] = { .name = MACHINE_TYPE_NAME("powernv10"), .parent = TYPE_PNV_MACHINE, .class_init = pnv_machine_power10_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_FABRIC }, { }, }, @@ -2906,7 +2966,7 @@ static const TypeInfo types[] = { .name = MACHINE_TYPE_NAME("powernv9"), .parent = TYPE_PNV_MACHINE, .class_init = pnv_machine_power9_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_FABRIC }, { }, }, @@ -2915,7 +2975,7 @@ static const TypeInfo types[] = { .name = MACHINE_TYPE_NAME("powernv8"), .parent = TYPE_PNV_MACHINE, .class_init = pnv_machine_power8_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XICS_FABRIC }, { }, }, @@ -2927,7 +2987,7 @@ static const TypeInfo types[] = { .instance_size = sizeof(PnvMachineState), .class_init = pnv_machine_class_init, .class_size = sizeof(PnvMachineClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { TYPE_NMI }, { }, diff --git a/hw/ppc/pnv_adu.c b/hw/ppc/pnv_adu.c index 81b7d6e5267..005fbda4750 100644 --- a/hw/ppc/pnv_adu.c +++ b/hw/ppc/pnv_adu.c @@ -116,6 +116,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val, uint32_t lpc_size = lpc_cmd_size(adu); uint64_t data = 0; + if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) { + qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access " + "size:%" PRId32 "\n", lpc_size); + break; + } + pnv_lpc_opb_read(adu->lpc, lpc_addr, (void *)&data, lpc_size); /* @@ -135,6 +141,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val, uint32_t lpc_size = lpc_cmd_size(adu); uint64_t data; + if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) { + qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access " + "size:%" PRId32 "\n", lpc_size); + break; + } + data = cpu_to_be64(val) >> ((lpc_addr & 7) * 8); /* See above */ pnv_lpc_opb_write(adu->lpc, lpc_addr, (void *)&data, lpc_size); } @@ -173,12 +185,11 @@ static void pnv_adu_realize(DeviceState *dev, Error **errp) PNV9_XSCOM_ADU_SIZE); } -static Property pnv_adu_properties[] = { +static const Property pnv_adu_properties[] = { DEFINE_PROP_LINK("lpc", PnvADU, lpc, TYPE_PNV_LPC, PnvLpcController *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_adu_class_init(ObjectClass *klass, void *data) +static void pnv_adu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -193,7 +204,7 @@ static const TypeInfo pnv_adu_type_info = { .parent = TYPE_DEVICE, .instance_size = sizeof(PnvADU), .class_init = pnv_adu_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } }, }; diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c index 0c1274df21a..fb70a8c1f22 100644 --- a/hw/ppc/pnv_bmc.c +++ b/hw/ppc/pnv_bmc.c @@ -174,8 +174,8 @@ static void hiomap_cmd(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, { PnvPnor *pnor = PNV_PNOR(object_property_get_link(OBJECT(ibs), "pnor", &error_abort)); + uint32_t pnor_addr = pnor->lpc_address; uint32_t pnor_size = pnor->size; - uint32_t pnor_addr = PNOR_SPI_OFFSET; bool readonly = false; rsp_buffer_push(rsp, cmd[2]); @@ -251,10 +251,38 @@ static const IPMINetfn hiomap_netfn = { void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor) { + uint32_t pnor_addr = pnor->lpc_address; + uint32_t pnor_size = pnor->size; + if (!pnv_bmc_is_simulator(bmc)) { return; } + /* + * The HIOMAP protocol uses block units and 16-bit addressing. + * Prevent overflow or misalign. + */ + if (pnor_addr >= 1U << (BLOCK_SHIFT + 16)) { + warn_report("PNOR address is larger than 2^%d, disabling PNOR", + BLOCK_SHIFT + 16); + return; + } + if (pnor_addr & ((1U << BLOCK_SHIFT) - 1)) { + warn_report("PNOR address is not aligned to 2^%d, disabling PNOR", + BLOCK_SHIFT); + return; + } + if (pnor_size > 1U << (BLOCK_SHIFT + 16)) { + warn_report("PNOR size is larger than 2^%d, disabling PNOR", + BLOCK_SHIFT + 16); + return; + } + if (pnor_size & ((1U << BLOCK_SHIFT) - 1)) { + warn_report("PNOR size is not aligned to 2^%d, disabling PNOR", + BLOCK_SHIFT); + return; + } + object_ref(OBJECT(pnor)); object_property_add_const_link(OBJECT(bmc), "pnor", OBJECT(pnor)); diff --git a/hw/ppc/pnv_chiptod.c b/hw/ppc/pnv_chiptod.c index 1e41fe557ab..b9e9c7ba3db 100644 --- a/hw/ppc/pnv_chiptod.c +++ b/hw/ppc/pnv_chiptod.c @@ -23,7 +23,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "target/ppc/cpu.h" #include "qapi/error.h" #include "qemu/log.h" @@ -450,14 +450,13 @@ static int pnv_chiptod_power9_dt_xscom(PnvXScomInterface *dev, void *fdt, return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat)); } -static Property pnv_chiptod_properties[] = { +static const Property pnv_chiptod_properties[] = { DEFINE_PROP_BOOL("primary", PnvChipTOD, primary, false), DEFINE_PROP_BOOL("secondary", PnvChipTOD, secondary, false), DEFINE_PROP_LINK("chip", PnvChipTOD , chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_chiptod_power9_class_init(ObjectClass *klass, void *data) +static void pnv_chiptod_power9_class_init(ObjectClass *klass, const void *data) { PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -479,7 +478,7 @@ static const TypeInfo pnv_chiptod_power9_type_info = { .parent = TYPE_PNV_CHIPTOD, .instance_size = sizeof(PnvChipTOD), .class_init = pnv_chiptod_power9_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } @@ -493,7 +492,7 @@ static int pnv_chiptod_power10_dt_xscom(PnvXScomInterface *dev, void *fdt, return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat)); } -static void pnv_chiptod_power10_class_init(ObjectClass *klass, void *data) +static void pnv_chiptod_power10_class_init(ObjectClass *klass, const void *data) { PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -515,7 +514,7 @@ static const TypeInfo pnv_chiptod_power10_type_info = { .parent = TYPE_PNV_CHIPTOD, .instance_size = sizeof(PnvChipTOD), .class_init = pnv_chiptod_power10_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } @@ -556,7 +555,7 @@ static void pnv_chiptod_unrealize(DeviceState *dev) qemu_unregister_reset(pnv_chiptod_reset, chiptod); } -static void pnv_chiptod_class_init(ObjectClass *klass, void *data) +static void pnv_chiptod_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index a30693990b2..08c20224b97 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -18,7 +18,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" @@ -217,8 +217,8 @@ static uint64_t pnv_core_power10_xscom_read(void *opaque, hwaddr addr, case PNV10_XSCOM_EC_CORE_RAS_STATUS: for (i = 0; i < nr_threads; i++) { PowerPCCPU *cpu = pc->threads[i]; - CPUState *cs = CPU(cpu); - if (cs->stopped) { + CPUPPCState *env = &cpu->env; + if (env->quiesced) { val |= PPC_BIT(0 + 8 * i) | PPC_BIT(1 + 8 * i); } } @@ -244,20 +244,29 @@ static void pnv_core_power10_xscom_write(void *opaque, hwaddr addr, for (i = 0; i < nr_threads; i++) { PowerPCCPU *cpu = pc->threads[i]; CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; if (val & PPC_BIT(7 + 8 * i)) { /* stop */ val &= ~PPC_BIT(7 + 8 * i); + env->quiesced = true; + ppc_maybe_interrupt(env); cpu_pause(cs); } if (val & PPC_BIT(6 + 8 * i)) { /* start */ val &= ~PPC_BIT(6 + 8 * i); + env->quiesced = false; + ppc_maybe_interrupt(env); cpu_resume(cs); } if (val & PPC_BIT(4 + 8 * i)) { /* sreset */ val &= ~PPC_BIT(4 + 8 * i); + env->quiesced = false; + ppc_maybe_interrupt(env); pnv_cpu_do_nmi_resume(cs); } if (val & PPC_BIT(3 + 8 * i)) { /* clear maint */ + env->quiesced = false; + ppc_maybe_interrupt(env); /* * Hardware has very particular cases for where clear maint * must be used and where start must be used to resume a @@ -317,6 +326,8 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp, pir_spr->default_value = pir; tir_spr->default_value = tir; + env->chip_index = pc->chip->chip_id; + if (pc->big_core) { /* 2 "small cores" get the same core index for SMT operations */ env->core_index = core_hwid >> 1; @@ -428,7 +439,7 @@ static void pnv_core_unrealize(DeviceState *dev) g_free(pc->threads); } -static Property pnv_core_properties[] = { +static const Property pnv_core_properties[] = { DEFINE_PROP_UINT32("hwid", PnvCore, hwid, 0), DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0), DEFINE_PROP_BOOL("big-core", PnvCore, big_core, false), @@ -436,10 +447,9 @@ static Property pnv_core_properties[] = { false), DEFINE_PROP_BOOL("lpar-per-core", PnvCore, lpar_per_core, false), DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_core_power8_class_init(ObjectClass *oc, void *data) +static void pnv_core_power8_class_init(ObjectClass *oc, const void *data) { PnvCoreClass *pcc = PNV_CORE_CLASS(oc); @@ -447,7 +457,7 @@ static void pnv_core_power8_class_init(ObjectClass *oc, void *data) pcc->xscom_size = PNV_XSCOM_EX_SIZE; } -static void pnv_core_power9_class_init(ObjectClass *oc, void *data) +static void pnv_core_power9_class_init(ObjectClass *oc, const void *data) { PnvCoreClass *pcc = PNV_CORE_CLASS(oc); @@ -455,7 +465,7 @@ static void pnv_core_power9_class_init(ObjectClass *oc, void *data) pcc->xscom_size = PNV_XSCOM_EX_SIZE; } -static void pnv_core_power10_class_init(ObjectClass *oc, void *data) +static void pnv_core_power10_class_init(ObjectClass *oc, const void *data) { PnvCoreClass *pcc = PNV_CORE_CLASS(oc); @@ -463,7 +473,7 @@ static void pnv_core_power10_class_init(ObjectClass *oc, void *data) pcc->xscom_size = PNV10_XSCOM_EC_SIZE; } -static void pnv_core_class_init(ObjectClass *oc, void *data) +static void pnv_core_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -686,12 +696,11 @@ static void pnv_quad_power10_realize(DeviceState *dev, Error **errp) pqc->xscom_qme_size); } -static Property pnv_quad_properties[] = { +static const Property pnv_quad_properties[] = { DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_quad_power9_class_init(ObjectClass *oc, void *data) +static void pnv_quad_power9_class_init(ObjectClass *oc, const void *data) { PnvQuadClass *pqc = PNV_QUAD_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -702,7 +711,7 @@ static void pnv_quad_power9_class_init(ObjectClass *oc, void *data) pqc->xscom_size = PNV9_XSCOM_EQ_SIZE; } -static void pnv_quad_power10_class_init(ObjectClass *oc, void *data) +static void pnv_quad_power10_class_init(ObjectClass *oc, const void *data) { PnvQuadClass *pqc = PNV_QUAD_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -716,7 +725,7 @@ static void pnv_quad_power10_class_init(ObjectClass *oc, void *data) pqc->xscom_qme_size = PNV10_XSCOM_QME_SIZE; } -static void pnv_quad_class_init(ObjectClass *oc, void *data) +static void pnv_quad_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c index f9a203d11d0..2208ffe632c 100644 --- a/hw/ppc/pnv_homer.c +++ b/hw/ppc/pnv_homer.c @@ -20,8 +20,8 @@ #include "qemu/log.h" #include "qapi/error.h" #include "exec/hwaddr.h" -#include "exec/memory.h" -#include "sysemu/cpus.h" +#include "system/memory.h" +#include "system/cpus.h" #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "hw/ppc/pnv.h" @@ -29,94 +29,6 @@ #include "hw/ppc/pnv_homer.h" #include "hw/ppc/pnv_xscom.h" - -static bool core_max_array(PnvHomer *homer, hwaddr addr) -{ - int i; - PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); - - for (i = 0; i <= homer->chip->nr_cores; i++) { - if (addr == (hmrc->core_max_base + i)) { - return true; - } - } - return false; -} - -/* P8 Pstate table */ - -#define PNV8_OCC_PSTATE_VERSION 0x1f8001 -#define PNV8_OCC_PSTATE_MIN 0x1f8003 -#define PNV8_OCC_PSTATE_VALID 0x1f8000 -#define PNV8_OCC_PSTATE_THROTTLE 0x1f8002 -#define PNV8_OCC_PSTATE_NOM 0x1f8004 -#define PNV8_OCC_PSTATE_TURBO 0x1f8005 -#define PNV8_OCC_PSTATE_ULTRA_TURBO 0x1f8006 -#define PNV8_OCC_PSTATE_DATA 0x1f8008 -#define PNV8_OCC_PSTATE_ID_ZERO 0x1f8010 -#define PNV8_OCC_PSTATE_ID_ONE 0x1f8018 -#define PNV8_OCC_PSTATE_ID_TWO 0x1f8020 -#define PNV8_OCC_VDD_VOLTAGE_IDENTIFIER 0x1f8012 -#define PNV8_OCC_VCS_VOLTAGE_IDENTIFIER 0x1f8013 -#define PNV8_OCC_PSTATE_ZERO_FREQUENCY 0x1f8014 -#define PNV8_OCC_PSTATE_ONE_FREQUENCY 0x1f801c -#define PNV8_OCC_PSTATE_TWO_FREQUENCY 0x1f8024 -#define PNV8_CORE_MAX_BASE 0x1f8810 - - -static uint64_t pnv_power8_homer_read(void *opaque, hwaddr addr, - unsigned size) -{ - PnvHomer *homer = PNV_HOMER(opaque); - - switch (addr) { - case PNV8_OCC_PSTATE_VERSION: - case PNV8_OCC_PSTATE_MIN: - case PNV8_OCC_PSTATE_ID_ZERO: - return 0; - case PNV8_OCC_PSTATE_VALID: - case PNV8_OCC_PSTATE_THROTTLE: - case PNV8_OCC_PSTATE_NOM: - case PNV8_OCC_PSTATE_TURBO: - case PNV8_OCC_PSTATE_ID_ONE: - case PNV8_OCC_VDD_VOLTAGE_IDENTIFIER: - case PNV8_OCC_VCS_VOLTAGE_IDENTIFIER: - return 1; - case PNV8_OCC_PSTATE_ULTRA_TURBO: - case PNV8_OCC_PSTATE_ID_TWO: - return 2; - case PNV8_OCC_PSTATE_DATA: - return 0x1000000000000000; - /* P8 frequency for 0, 1, and 2 pstates */ - case PNV8_OCC_PSTATE_ZERO_FREQUENCY: - case PNV8_OCC_PSTATE_ONE_FREQUENCY: - case PNV8_OCC_PSTATE_TWO_FREQUENCY: - return 3000; - } - /* pstate table core max array */ - if (core_max_array(homer, addr)) { - return 1; - } - return 0; -} - -static void pnv_power8_homer_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - /* callback function defined to homer write */ - return; -} - -static const MemoryRegionOps pnv_power8_homer_ops = { - .read = pnv_power8_homer_read, - .write = pnv_power8_homer_write, - .valid.min_access_size = 1, - .valid.max_access_size = 8, - .impl.min_access_size = 1, - .impl.max_access_size = 8, - .endianness = DEVICE_BIG_ENDIAN, -}; - /* P8 PBA BARs */ #define PBA_BAR0 0x00 #define PBA_BAR1 0x01 @@ -131,16 +43,16 @@ static uint64_t pnv_homer_power8_pba_read(void *opaque, hwaddr addr, unsigned size) { PnvHomer *homer = PNV_HOMER(opaque); - PnvChip *chip = homer->chip; + PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); uint32_t reg = addr >> 3; uint64_t val = 0; switch (reg) { case PBA_BAR0: - val = PNV_HOMER_BASE(chip); + val = homer->base; break; case PBA_BARMASK0: /* P8 homer region mask */ - val = (PNV_HOMER_SIZE - 1) & 0x300000; + val = (hmrc->size - 1) & 0x300000; break; case PBA_BAR3: /* P8 occ common area */ val = PNV_OCC_COMMON_AREA_BASE; @@ -172,15 +84,19 @@ static const MemoryRegionOps pnv_homer_power8_pba_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_homer_power8_class_init(ObjectClass *klass, void *data) +static hwaddr pnv_homer_power8_get_base(PnvChip *chip) +{ + return PNV_HOMER_BASE(chip); +} + +static void pnv_homer_power8_class_init(ObjectClass *klass, const void *data) { PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + homer->get_base = pnv_homer_power8_get_base; + homer->size = PNV_HOMER_SIZE; homer->pba_size = PNV_XSCOM_PBA_SIZE; homer->pba_ops = &pnv_homer_power8_pba_ops; - homer->homer_size = PNV_HOMER_SIZE; - homer->homer_ops = &pnv_power8_homer_ops; - homer->core_max_base = PNV8_CORE_MAX_BASE; } static const TypeInfo pnv_homer_power8_type_info = { @@ -190,100 +106,20 @@ static const TypeInfo pnv_homer_power8_type_info = { .class_init = pnv_homer_power8_class_init, }; -/* P9 Pstate table */ - -#define PNV9_OCC_PSTATE_ID_ZERO 0xe2018 -#define PNV9_OCC_PSTATE_ID_ONE 0xe2020 -#define PNV9_OCC_PSTATE_ID_TWO 0xe2028 -#define PNV9_OCC_PSTATE_DATA 0xe2000 -#define PNV9_OCC_PSTATE_DATA_AREA 0xe2008 -#define PNV9_OCC_PSTATE_MIN 0xe2003 -#define PNV9_OCC_PSTATE_NOM 0xe2004 -#define PNV9_OCC_PSTATE_TURBO 0xe2005 -#define PNV9_OCC_PSTATE_ULTRA_TURBO 0xe2818 -#define PNV9_OCC_MAX_PSTATE_ULTRA_TURBO 0xe2006 -#define PNV9_OCC_PSTATE_MAJOR_VERSION 0xe2001 -#define PNV9_OCC_OPAL_RUNTIME_DATA 0xe2b85 -#define PNV9_CHIP_HOMER_IMAGE_POINTER 0x200008 -#define PNV9_CHIP_HOMER_BASE 0x0 -#define PNV9_OCC_PSTATE_ZERO_FREQUENCY 0xe201c -#define PNV9_OCC_PSTATE_ONE_FREQUENCY 0xe2024 -#define PNV9_OCC_PSTATE_TWO_FREQUENCY 0xe202c -#define PNV9_OCC_ROLE_MASTER_OR_SLAVE 0xe2002 -#define PNV9_CORE_MAX_BASE 0xe2819 - - -static uint64_t pnv_power9_homer_read(void *opaque, hwaddr addr, - unsigned size) -{ - PnvHomer *homer = PNV_HOMER(opaque); - - switch (addr) { - case PNV9_OCC_MAX_PSTATE_ULTRA_TURBO: - case PNV9_OCC_PSTATE_ID_ZERO: - return 0; - case PNV9_OCC_PSTATE_DATA: - case PNV9_OCC_ROLE_MASTER_OR_SLAVE: - case PNV9_OCC_PSTATE_NOM: - case PNV9_OCC_PSTATE_TURBO: - case PNV9_OCC_PSTATE_ID_ONE: - case PNV9_OCC_PSTATE_ULTRA_TURBO: - case PNV9_OCC_OPAL_RUNTIME_DATA: - return 1; - case PNV9_OCC_PSTATE_MIN: - case PNV9_OCC_PSTATE_ID_TWO: - return 2; - - /* 3000 khz frequency for 0, 1, and 2 pstates */ - case PNV9_OCC_PSTATE_ZERO_FREQUENCY: - case PNV9_OCC_PSTATE_ONE_FREQUENCY: - case PNV9_OCC_PSTATE_TWO_FREQUENCY: - return 3000; - case PNV9_OCC_PSTATE_MAJOR_VERSION: - return 0x90; - case PNV9_CHIP_HOMER_BASE: - case PNV9_OCC_PSTATE_DATA_AREA: - case PNV9_CHIP_HOMER_IMAGE_POINTER: - return 0x1000000000000000; - } - /* pstate table core max array */ - if (core_max_array(homer, addr)) { - return 1; - } - return 0; -} - -static void pnv_power9_homer_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - /* callback function defined to homer write */ - return; -} - -static const MemoryRegionOps pnv_power9_homer_ops = { - .read = pnv_power9_homer_read, - .write = pnv_power9_homer_write, - .valid.min_access_size = 1, - .valid.max_access_size = 8, - .impl.min_access_size = 1, - .impl.max_access_size = 8, - .endianness = DEVICE_BIG_ENDIAN, -}; - static uint64_t pnv_homer_power9_pba_read(void *opaque, hwaddr addr, unsigned size) { PnvHomer *homer = PNV_HOMER(opaque); - PnvChip *chip = homer->chip; + PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); uint32_t reg = addr >> 3; uint64_t val = 0; switch (reg) { case PBA_BAR0: - val = PNV9_HOMER_BASE(chip); + val = homer->base; break; case PBA_BARMASK0: /* P9 homer region mask */ - val = (PNV9_HOMER_SIZE - 1) & 0x300000; + val = (hmrc->size - 1) & 0x300000; break; case PBA_BAR2: /* P9 occ common area */ val = PNV9_OCC_COMMON_AREA_BASE; @@ -315,15 +151,19 @@ static const MemoryRegionOps pnv_homer_power9_pba_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_homer_power9_class_init(ObjectClass *klass, void *data) +static hwaddr pnv_homer_power9_get_base(PnvChip *chip) +{ + return PNV9_HOMER_BASE(chip); +} + +static void pnv_homer_power9_class_init(ObjectClass *klass, const void *data) { PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + homer->get_base = pnv_homer_power9_get_base; + homer->size = PNV_HOMER_SIZE; homer->pba_size = PNV9_XSCOM_PBA_SIZE; homer->pba_ops = &pnv_homer_power9_pba_ops; - homer->homer_size = PNV9_HOMER_SIZE; - homer->homer_ops = &pnv_power9_homer_ops; - homer->core_max_base = PNV9_CORE_MAX_BASE; } static const TypeInfo pnv_homer_power9_type_info = { @@ -337,16 +177,16 @@ static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr, unsigned size) { PnvHomer *homer = PNV_HOMER(opaque); - PnvChip *chip = homer->chip; + PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); uint32_t reg = addr >> 3; uint64_t val = 0; switch (reg) { case PBA_BAR0: - val = PNV10_HOMER_BASE(chip); + val = homer->base; break; case PBA_BARMASK0: /* P10 homer region mask */ - val = (PNV10_HOMER_SIZE - 1) & 0x300000; + val = (hmrc->size - 1) & 0x300000; break; case PBA_BAR2: /* P10 occ common area */ val = PNV10_OCC_COMMON_AREA_BASE; @@ -378,15 +218,19 @@ static const MemoryRegionOps pnv_homer_power10_pba_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_homer_power10_class_init(ObjectClass *klass, void *data) +static hwaddr pnv_homer_power10_get_base(PnvChip *chip) +{ + return PNV10_HOMER_BASE(chip); +} + +static void pnv_homer_power10_class_init(ObjectClass *klass, const void *data) { PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + homer->get_base = pnv_homer_power10_get_base; + homer->size = PNV_HOMER_SIZE; homer->pba_size = PNV10_XSCOM_PBA_SIZE; homer->pba_ops = &pnv_homer_power10_pba_ops; - homer->homer_size = PNV10_HOMER_SIZE; - homer->homer_ops = &pnv_power9_homer_ops; /* TODO */ - homer->core_max_base = PNV9_CORE_MAX_BASE; } static const TypeInfo pnv_homer_power10_type_info = { @@ -400,24 +244,29 @@ static void pnv_homer_realize(DeviceState *dev, Error **errp) { PnvHomer *homer = PNV_HOMER(dev); PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); + char homer_str[32]; assert(homer->chip); pnv_xscom_region_init(&homer->pba_regs, OBJECT(dev), hmrc->pba_ops, homer, "xscom-pba", hmrc->pba_size); - /* homer region */ - memory_region_init_io(&homer->regs, OBJECT(dev), - hmrc->homer_ops, homer, "homer-main-memory", - hmrc->homer_size); + /* Homer RAM region */ + homer->base = hmrc->get_base(homer->chip); + + snprintf(homer_str, sizeof(homer_str), "homer-chip%d-memory", + homer->chip->chip_id); + if (!memory_region_init_ram(&homer->mem, OBJECT(homer), + homer_str, hmrc->size, errp)) { + return; + } } -static Property pnv_homer_properties[] = { +static const Property pnv_homer_properties[] = { DEFINE_PROP_LINK("chip", PnvHomer, chip, TYPE_PNV_CHIP, PnvChip *), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_homer_class_init(ObjectClass *klass, void *data) +static void pnv_homer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/pnv_i2c.c b/hw/ppc/pnv_i2c.c index eec5047ce83..60de4794917 100644 --- a/hw/ppc/pnv_i2c.c +++ b/hw/ppc/pnv_i2c.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/log.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/irq.h" #include "hw/qdev-properties.h" @@ -543,14 +543,13 @@ static void pnv_i2c_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(DEVICE(dev), &i2c->psi_irq, 1); } -static Property pnv_i2c_properties[] = { +static const Property pnv_i2c_properties[] = { DEFINE_PROP_LINK("chip", PnvI2C, chip, TYPE_PNV_CHIP, PnvChip *), DEFINE_PROP_UINT32("engine", PnvI2C, engine, 1), DEFINE_PROP_UINT32("num-busses", PnvI2C, num_busses, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_i2c_class_init(ObjectClass *klass, void *data) +static void pnv_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -570,7 +569,7 @@ static const TypeInfo pnv_i2c_info = { .parent = TYPE_DEVICE, .instance_size = sizeof(PnvI2C), .class_init = pnv_i2c_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c index f8aad955b5b..f6beba0917d 100644 --- a/hw/ppc/pnv_lpc.c +++ b/hw/ppc/pnv_lpc.c @@ -85,7 +85,7 @@ enum { #define ISA_IO_SIZE 0x00010000 #define ISA_MEM_SIZE 0x10000000 -#define ISA_FW_SIZE 0x10000000 +#define ISA_FW_SIZE 0x100000000 #define LPC_IO_OPB_ADDR 0xd0010000 #define LPC_IO_OPB_SIZE 0x00010000 #define LPC_MEM_OPB_ADDR 0xe0000000 @@ -353,6 +353,8 @@ static const MemoryRegionOps pnv_lpc_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; +static void pnv_lpc_opb_noresponse(PnvLpcController *lpc); + static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size) { PnvLpcController *lpc = PNV_LPC(opaque); @@ -376,6 +378,7 @@ static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size) } if (result != MEMTX_OK) { + pnv_lpc_opb_noresponse(lpc); qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%" HWADDR_PRIx "\n", addr); } @@ -406,6 +409,7 @@ static void pnv_lpc_mmio_write(void *opaque, hwaddr addr, } if (result != MEMTX_OK) { + pnv_lpc_opb_noresponse(lpc); qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%" HWADDR_PRIx "\n", addr); } @@ -427,21 +431,27 @@ static void pnv_lpc_eval_serirq_routes(PnvLpcController *lpc) int irq; if (!lpc->psi_has_serirq) { - if ((lpc->opb_irq_route0 & PPC_BITMASK(8, 13)) || - (lpc->opb_irq_route1 & PPC_BITMASK(4, 31))) { + if ((lpc->opb_irq_route0 & PPC_BITMASK32(8, 13)) || + (lpc->opb_irq_route1 & PPC_BITMASK32(4, 31))) { qemu_log_mask(LOG_GUEST_ERROR, "OPB: setting serirq routing on POWER8 system, ignoring.\n"); } return; } + /* + * Each of the ISA irqs is routed to one of the 4 SERIRQ irqs with 2 + * bits, split across 2 OPB registers. + */ for (irq = 0; irq <= 13; irq++) { - int serirq = (lpc->opb_irq_route1 >> (31 - 5 - (irq * 2))) & 0x3; + int serirq = extract32(lpc->opb_irq_route1, + PPC_BIT32_NR(5 + irq * 2), 2); lpc->irq_to_serirq_route[irq] = serirq; } for (irq = 14; irq < ISA_NUM_IRQS; irq++) { - int serirq = (lpc->opb_irq_route0 >> (31 - 9 - (irq * 2))) & 0x3; + int serirq = extract32(lpc->opb_irq_route0, + PPC_BIT32_NR(9 + (irq - 14) * 2), 2); lpc->irq_to_serirq_route[irq] = serirq; } } @@ -450,46 +460,18 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc) { uint32_t active_irqs = 0; - if (lpc->lpc_hc_irqstat & PPC_BITMASK32(16, 31)) { - qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented irqs in IRQSTAT: " - "0x%08"PRIx32"\n", lpc->lpc_hc_irqstat); - } - - if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) { - active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask; + active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask; + if (!(lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN)) { + active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL; } /* Reflect the interrupt */ - if (!lpc->psi_has_serirq) { - /* - * POWER8 ORs all irqs together (also with LPCHC internal interrupt - * sources) and outputs a single line that raises the PSI LPCHC irq - * which then latches an OPB IRQ status register that sends the irq - * to PSI. - * - * We don't honor the polarity register, it's pointless and unused - * anyway - */ - if (active_irqs) { - lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC; - } else { - lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC; - } - - /* Update OPB internal latch */ - lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask; - - qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0); - } else { + if (lpc->psi_has_serirq) { /* - * POWER9 and POWER10 have routing fields in OPB master registers that + * POWER9 and later have routing fields in OPB master registers that * send LPC irqs to 4 output lines that raise the PSI SERIRQ irqs. * These don't appear to get latched into an OPB register like the * LPCHC irqs. - * - * POWER9 LPC controller internal irqs still go via the OPB - * and LPCHC PSI irqs like P8, but we have no such internal sources - * modelled yet. */ bool serirq_out[4] = { false, false, false, false }; int irq; @@ -504,7 +486,39 @@ static void pnv_lpc_eval_irqs(PnvLpcController *lpc) qemu_set_irq(lpc->psi_irq_serirq[1], serirq_out[1]); qemu_set_irq(lpc->psi_irq_serirq[2], serirq_out[2]); qemu_set_irq(lpc->psi_irq_serirq[3], serirq_out[3]); + + /* + * POWER9 and later LPC controller internal irqs still go via the OPB + * and LPCHC PSI irqs like P8, so take the SERIRQs out and continue. + */ + active_irqs &= ~LPC_HC_IRQ_SERIRQ_ALL; + } + + /* + * POWER8 ORs all irqs together (also with LPCHC internal interrupt + * sources) and outputs a single line that raises the PSI LPCHC irq + * which then latches an OPB IRQ status register that sends the irq + * to PSI. + * + * We don't honor the polarity register, it's pointless and unused + * anyway + */ + if (active_irqs) { + lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC; + } else { + lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC; } + + /* Update OPB internal latch */ + lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask; + + qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0); +} + +static void pnv_lpc_opb_noresponse(PnvLpcController *lpc) +{ + lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SYNC_NORESP_ERR; + pnv_lpc_eval_irqs(lpc); } static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size) @@ -547,10 +561,13 @@ static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val, switch (addr) { case LPC_HC_FW_SEG_IDSEL: - /* XXX Actually figure out how that works as this impact - * memory regions/aliases + /* + * ISA FW "devices" are modeled as 16x256MB windows into a + * 4GB LPC FW address space. */ + val &= 0xf; /* Selects device 0-15 */ lpc->lpc_hc_fw_seg_idsel = val; + memory_region_set_alias_offset(&lpc->opb_isa_fw, val * LPC_FW_OPB_SIZE); break; case LPC_HC_FW_RD_ACC_SIZE: lpc->lpc_hc_fw_rd_acc_size = val; @@ -697,7 +714,7 @@ static void pnv_lpc_power8_realize(DeviceState *dev, Error **errp) PNV_XSCOM_LPC_SIZE); } -static void pnv_lpc_power8_class_init(ObjectClass *klass, void *data) +static void pnv_lpc_power8_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -715,7 +732,7 @@ static const TypeInfo pnv_lpc_power8_info = { .name = TYPE_PNV8_LPC, .parent = TYPE_PNV_LPC, .class_init = pnv_lpc_power8_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } @@ -743,7 +760,7 @@ static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out_named(dev, lpc->psi_irq_serirq, "SERIRQ", 4); } -static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data) +static void pnv_lpc_power9_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvLpcClass *plc = PNV_LPC_CLASS(klass); @@ -760,7 +777,7 @@ static const TypeInfo pnv_lpc_power9_info = { .class_init = pnv_lpc_power9_class_init, }; -static void pnv_lpc_power10_class_init(ObjectClass *klass, void *data) +static void pnv_lpc_power10_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -784,9 +801,9 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull); address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb"); - /* Create ISA IO and Mem space regions which are the root of - * the ISA bus (ie, ISA address spaces). We don't create a - * separate one for FW which we alias to memory. + /* + * Create ISA IO, Mem, and FW space regions which are the root of + * the ISA bus (ie, ISA address spaces). */ memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE); memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE); @@ -822,12 +839,11 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out_named(dev, &lpc->psi_irq_lpchc, "LPCHC", 1); } -static Property pnv_lpc_properties[] = { +static const Property pnv_lpc_properties[] = { DEFINE_PROP_BOOL("psi-serirq", PnvLpcController, psi_has_serirq, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_lpc_class_init(ObjectClass *klass, void *data) +static void pnv_lpc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/pnv_n1_chiplet.c b/hw/ppc/pnv_n1_chiplet.c index 03ff9fbad0d..053f6473f22 100644 --- a/hw/ppc/pnv_n1_chiplet.c +++ b/hw/ppc/pnv_n1_chiplet.c @@ -136,7 +136,7 @@ static void pnv_n1_chiplet_realize(DeviceState *dev, Error **errp) PNV10_XSCOM_N1_PB_SCOM_ES_SIZE); } -static void pnv_n1_chiplet_class_init(ObjectClass *klass, void *data) +static void pnv_n1_chiplet_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -159,7 +159,7 @@ static const TypeInfo pnv_n1_chiplet_info = { .instance_init = pnv_n1_chiplet_instance_init, .instance_size = sizeof(PnvN1Chiplet), .class_init = pnv_n1_chiplet_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/ppc/pnv_nest_pervasive.c b/hw/ppc/pnv_nest_pervasive.c index 77476753a40..1b1b14fed95 100644 --- a/hw/ppc/pnv_nest_pervasive.c +++ b/hw/ppc/pnv_nest_pervasive.c @@ -177,11 +177,11 @@ static void pnv_nest_pervasive_realize(DeviceState *dev, Error **errp) pnv_xscom_region_init(&nest_pervasive->xscom_ctrl_regs_mr, OBJECT(nest_pervasive), &pnv_nest_pervasive_control_xscom_ops, - nest_pervasive, "pervasive-control", + nest_pervasive, "xscom-pervasive-control", PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE); } -static void pnv_nest_pervasive_class_init(ObjectClass *klass, void *data) +static void pnv_nest_pervasive_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -194,7 +194,7 @@ static const TypeInfo pnv_nest_pervasive_info = { .parent = TYPE_DEVICE, .instance_size = sizeof(PnvNestChipletPervasive), .class_init = pnv_nest_pervasive_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c index 48123ceae17..24b789c191c 100644 --- a/hw/ppc/pnv_occ.c +++ b/hw/ppc/pnv_occ.c @@ -24,40 +24,53 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_chip.h" #include "hw/ppc/pnv_xscom.h" #include "hw/ppc/pnv_occ.h" +#define P8_HOMER_OPAL_DATA_OFFSET 0x1F8000 +#define P9_HOMER_OPAL_DATA_OFFSET 0x0E2000 + #define OCB_OCI_OCCMISC 0x4020 #define OCB_OCI_OCCMISC_AND 0x4021 #define OCB_OCI_OCCMISC_OR 0x4022 +#define OCCMISC_PSI_IRQ PPC_BIT(0) +#define OCCMISC_IRQ_SHMEM PPC_BIT(3) /* OCC sensors */ -#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x580000 -#define OCC_SENSOR_DATA_VALID 0x580001 -#define OCC_SENSOR_DATA_VERSION 0x580002 -#define OCC_SENSOR_DATA_READING_VERSION 0x580004 -#define OCC_SENSOR_DATA_NR_SENSORS 0x580008 -#define OCC_SENSOR_DATA_NAMES_OFFSET 0x580010 -#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x580014 -#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x58000c -#define OCC_SENSOR_DATA_NAME_LENGTH 0x58000d -#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x580023 -#define OCC_SENSOR_LOC_CORE 0x580022 -#define OCC_SENSOR_LOC_GPU 0x580020 -#define OCC_SENSOR_TYPE_POWER 0x580003 -#define OCC_SENSOR_NAME 0x580005 -#define HWMON_SENSORS_MASK 0x58001e -#define SLW_IMAGE_BASE 0x0 +#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x0000 +#define OCC_SENSOR_DATA_VALID 0x0001 +#define OCC_SENSOR_DATA_VERSION 0x0002 +#define OCC_SENSOR_DATA_READING_VERSION 0x0004 +#define OCC_SENSOR_DATA_NR_SENSORS 0x0008 +#define OCC_SENSOR_DATA_NAMES_OFFSET 0x0010 +#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x0014 +#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x000c +#define OCC_SENSOR_DATA_NAME_LENGTH 0x000d +#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x0023 +#define OCC_SENSOR_LOC_CORE 0x0022 +#define OCC_SENSOR_LOC_GPU 0x0020 +#define OCC_SENSOR_TYPE_POWER 0x0003 +#define OCC_SENSOR_NAME 0x0005 +#define HWMON_SENSORS_MASK 0x001e static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val) { - bool irq_state; - - val &= 0xffff000000000000ull; + val &= PPC_BITMASK(0, 18); /* Mask out unimplemented bits */ occ->occmisc = val; - irq_state = !!(val >> 63); - qemu_set_irq(occ->psi_irq, irq_state); + + /* + * OCCMISC IRQ bit triggers the interrupt on a 0->1 edge, but not clear + * how that is handled in PSI so it is level-triggered here, which is not + * really correct (but skiboot is okay with it). + */ + qemu_set_irq(occ->psi_irq, !!(val & OCCMISC_PSI_IRQ)); +} + +static void pnv_occ_raise_msg_irq(PnvOCC *occ) +{ + pnv_occ_set_misc(occ, occ->occmisc | OCCMISC_PSI_IRQ | OCCMISC_IRQ_SHMEM); } static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr, @@ -129,8 +142,6 @@ static uint64_t pnv_occ_common_area_read(void *opaque, hwaddr addr, case HWMON_SENSORS_MASK: case OCC_SENSOR_LOC_GPU: return 0x8e00; - case SLW_IMAGE_BASE: - return 0x1000000000000000; } return 0; } @@ -139,7 +150,6 @@ static void pnv_occ_common_area_write(void *opaque, hwaddr addr, uint64_t val, unsigned width) { /* callback function defined to occ common area write */ - return; } static const MemoryRegionOps pnv_occ_power8_xscom_ops = { @@ -162,10 +172,14 @@ const MemoryRegionOps pnv_occ_sram_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_occ_power8_class_init(ObjectClass *klass, void *data) +static void pnv_occ_power8_class_init(ObjectClass *klass, const void *data) { PnvOCCClass *poc = PNV_OCC_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "PowerNV OCC Controller (POWER8)"; + poc->opal_shared_memory_offset = P8_HOMER_OPAL_DATA_OFFSET; + poc->opal_shared_memory_version = 0x02; poc->xscom_size = PNV_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power8_xscom_ops; } @@ -232,14 +246,17 @@ static const MemoryRegionOps pnv_occ_power9_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_occ_power9_class_init(ObjectClass *klass, void *data) +static void pnv_occ_power9_class_init(ObjectClass *klass, const void *data) { PnvOCCClass *poc = PNV_OCC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "PowerNV OCC Controller (POWER9)"; + poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET; + poc->opal_shared_memory_version = 0x90; poc->xscom_size = PNV9_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power9_xscom_ops; + assert(!dc->user_creatable); } static const TypeInfo pnv_occ_power9_type_info = { @@ -249,23 +266,52 @@ static const TypeInfo pnv_occ_power9_type_info = { .class_init = pnv_occ_power9_class_init, }; -static void pnv_occ_power10_class_init(ObjectClass *klass, void *data) +static void pnv_occ_power10_class_init(ObjectClass *klass, const void *data) { + PnvOCCClass *poc = PNV_OCC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "PowerNV OCC Controller (POWER10)"; + poc->opal_shared_memory_offset = P9_HOMER_OPAL_DATA_OFFSET; + poc->opal_shared_memory_version = 0xA0; + poc->xscom_size = PNV9_XSCOM_OCC_SIZE; + poc->xscom_ops = &pnv_occ_power9_xscom_ops; + assert(!dc->user_creatable); } static const TypeInfo pnv_occ_power10_type_info = { .name = TYPE_PNV10_OCC, - .parent = TYPE_PNV9_OCC, + .parent = TYPE_PNV_OCC, .class_init = pnv_occ_power10_class_init, }; +static bool occ_init_homer_memory(PnvOCC *occ, Error **errp); +static bool occ_model_tick(PnvOCC *occ); + +/* Relatively arbitrary */ +#define OCC_POLL_MS 100 + +static void occ_state_machine_timer(void *opaque) +{ + PnvOCC *occ = opaque; + uint64_t next = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + OCC_POLL_MS; + + if (occ_model_tick(occ)) { + timer_mod(&occ->state_machine_timer, next); + } +} + static void pnv_occ_realize(DeviceState *dev, Error **errp) { PnvOCC *occ = PNV_OCC(dev); PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + PnvHomer *homer = occ->homer; + + assert(homer); + + if (!occ_init_homer_memory(occ, errp)) { + return; + } occ->occmisc = 0; @@ -279,14 +325,22 @@ static void pnv_occ_realize(DeviceState *dev, Error **errp) PNV_OCC_SENSOR_DATA_BLOCK_SIZE); qdev_init_gpio_out(dev, &occ->psi_irq, 1); + + timer_init_ms(&occ->state_machine_timer, QEMU_CLOCK_VIRTUAL, + occ_state_machine_timer, occ); + timer_mod(&occ->state_machine_timer, OCC_POLL_MS); } -static void pnv_occ_class_init(ObjectClass *klass, void *data) +static const Property pnv_occ_properties[] = { + DEFINE_PROP_LINK("homer", PnvOCC, homer, TYPE_PNV_HOMER, PnvHomer *), +}; + +static void pnv_occ_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pnv_occ_realize; - dc->desc = "PowerNV OCC Controller"; + device_class_set_props(dc, pnv_occ_properties); dc->user_creatable = false; } @@ -308,3 +362,561 @@ static void pnv_occ_register_types(void) } type_init(pnv_occ_register_types); + +/* + * From skiboot/hw/occ.c with following changes: + * - tab to space conversion + * - Type conversions u8->uint8_t s8->int8_t __be16->uint16_t etc + * - __packed -> QEMU_PACKED + */ +/* OCC Communication Area for PStates */ + +#define OPAL_DYNAMIC_DATA_OFFSET 0x0B80 +/* relative to HOMER_OPAL_DATA_OFFSET */ + +#define MAX_PSTATES 256 +#define MAX_P8_CORES 12 +#define MAX_P9_CORES 24 +#define MAX_P10_CORES 32 + +#define MAX_OPAL_CMD_DATA_LENGTH 4090 +#define MAX_OCC_RSP_DATA_LENGTH 8698 + +#define P8_PIR_CORE_MASK 0xFFF8 +#define P9_PIR_QUAD_MASK 0xFFF0 +#define P10_PIR_CHIP_MASK 0x0000 +#define FREQ_MAX_IN_DOMAIN 0 +#define FREQ_MOST_RECENTLY_SET 1 + +/** + * OCC-OPAL Shared Memory Region + * + * Reference document : + * https://github.com/open-power/docs/blob/master/occ/OCC_OpenPwr_FW_Interfaces.pdf + * + * Supported layout versions: + * - 0x01, 0x02 : P8 + * https://github.com/open-power/occ/blob/master_p8/src/occ/proc/proc_pstate.h + * + * - 0x90 : P9 + * https://github.com/open-power/occ/blob/master/src/occ_405/proc/proc_pstate.h + * In 0x90 the data is separated into :- + * -- Static Data (struct occ_pstate_table): Data is written once by OCC + * -- Dynamic Data (struct occ_dynamic_data): Data is updated at runtime + * + * struct occ_pstate_table - Pstate table layout + * @valid: Indicates if data is valid + * @version: Layout version [Major/Minor] + * @v2.throttle: Reason for limiting the max pstate + * @v9.occ_role: OCC role (Master/Slave) + * @v#.pstate_min: Minimum pstate ever allowed + * @v#.pstate_nom: Nominal pstate + * @v#.pstate_turbo: Maximum turbo pstate + * @v#.pstate_ultra_turbo: Maximum ultra turbo pstate and the maximum + * pstate ever allowed + * @v#.pstates: Pstate-id and frequency list from Pmax to Pmin + * @v#.pstates.id: Pstate-id + * @v#.pstates.flags: Pstate-flag(reserved) + * @v2.pstates.vdd: Voltage Identifier + * @v2.pstates.vcs: Voltage Identifier + * @v#.pstates.freq_khz: Frequency in KHz + * @v#.core_max[1..N]: Max pstate with N active cores + * @spare/reserved/pad: Unused data + */ +struct occ_pstate_table { + uint8_t valid; + uint8_t version; + union QEMU_PACKED { + struct QEMU_PACKED { /* Version 0x01 and 0x02 */ + uint8_t throttle; + int8_t pstate_min; + int8_t pstate_nom; + int8_t pstate_turbo; + int8_t pstate_ultra_turbo; + uint8_t spare; + uint64_t reserved; + struct QEMU_PACKED { + int8_t id; + uint8_t flags; + uint8_t vdd; + uint8_t vcs; + uint32_t freq_khz; + } pstates[MAX_PSTATES]; + int8_t core_max[MAX_P8_CORES]; + uint8_t pad[100]; + } v2; + struct QEMU_PACKED { /* Version 0x90 */ + uint8_t occ_role; + uint8_t pstate_min; + uint8_t pstate_nom; + uint8_t pstate_turbo; + uint8_t pstate_ultra_turbo; + uint8_t spare; + uint64_t reserved1; + uint64_t reserved2; + struct QEMU_PACKED { + uint8_t id; + uint8_t flags; + uint16_t reserved; + uint32_t freq_khz; + } pstates[MAX_PSTATES]; + uint8_t core_max[MAX_P9_CORES]; + uint8_t pad[56]; + } v9; + struct QEMU_PACKED { /* Version 0xA0 */ + uint8_t occ_role; + uint8_t pstate_min; + uint8_t pstate_fixed_freq; + uint8_t pstate_base; + uint8_t pstate_ultra_turbo; + uint8_t pstate_fmax; + uint8_t minor; + uint8_t pstate_bottom_throttle; + uint8_t spare; + uint8_t spare1; + uint32_t reserved_32; + uint64_t reserved_64; + struct QEMU_PACKED { + uint8_t id; + uint8_t valid; + uint16_t reserved; + uint32_t freq_khz; + } pstates[MAX_PSTATES]; + uint8_t core_max[MAX_P10_CORES]; + uint8_t pad[48]; + } v10; + }; +} QEMU_PACKED; + +/** + * OPAL-OCC Command Response Interface + * + * OPAL-OCC Command Buffer + * + * --------------------------------------------------------------------- + * | OPAL | Cmd | OPAL | | Cmd Data | Cmd Data | OPAL | + * | Cmd | Request | OCC | Reserved | Length | Length | Cmd | + * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... | + * --------------------------------------------------------------------- + * | ….OPAL Command Data up to max of Cmd Data Length 4090 bytes | + * | | + * --------------------------------------------------------------------- + * + * OPAL Command Flag + * + * ----------------------------------------------------------------- + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 | + * | (msb) | | | | | | | (lsb) | + * ----------------------------------------------------------------- + * |Cmd | | | | | | | | + * |Ready | | | | | | | | + * ----------------------------------------------------------------- + * + * struct opal_command_buffer - Defines the layout of OPAL command buffer + * @flag: Provides general status of the command + * @request_id: Token to identify request + * @cmd: Command sent + * @data_size: Command data length + * @data: Command specific data + * @spare: Unused byte + */ +struct opal_command_buffer { + uint8_t flag; + uint8_t request_id; + uint8_t cmd; + uint8_t spare; + uint16_t data_size; + uint8_t data[MAX_OPAL_CMD_DATA_LENGTH]; +} QEMU_PACKED; + +/** + * OPAL-OCC Response Buffer + * + * --------------------------------------------------------------------- + * | OCC | Cmd | OPAL | Response | Rsp Data | Rsp Data | OPAL | + * | Rsp | Request | OCC | Status | Length | Length | Rsp | + * | Flags | ID | Cmd | | (MSB) | (LSB) | Data... | + * --------------------------------------------------------------------- + * | ….OPAL Response Data up to max of Rsp Data Length 8698 bytes | + * | | + * --------------------------------------------------------------------- + * + * OCC Response Flag + * + * ----------------------------------------------------------------- + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 | + * | (msb) | | | | | | | (lsb) | + * ----------------------------------------------------------------- + * | | | | | | |OCC in | Rsp | + * | | | | | | |progress|Ready | + * ----------------------------------------------------------------- + * + * struct occ_response_buffer - Defines the layout of OCC response buffer + * @flag: Provides general status of the response + * @request_id: Token to identify request + * @cmd: Command requested + * @status: Indicates success/failure status of + * the command + * @data_size: Response data length + * @data: Response specific data + */ +struct occ_response_buffer { + uint8_t flag; + uint8_t request_id; + uint8_t cmd; + uint8_t status; + uint16_t data_size; + uint8_t data[MAX_OCC_RSP_DATA_LENGTH]; +} QEMU_PACKED; + +/** + * OCC-OPAL Shared Memory Interface Dynamic Data Vx90 + * + * struct occ_dynamic_data - Contains runtime attributes + * @occ_state: Current state of OCC + * @major_version: Major version number + * @minor_version: Minor version number (backwards compatible) + * Version 1 indicates GPU presence populated + * @gpus_present: Bitmask of GPUs present (on systems where GPU + * presence is detected through APSS) + * @cpu_throttle: Reason for limiting the max pstate + * @mem_throttle: Reason for throttling memory + * @quick_pwr_drop: Indicates if QPD is asserted + * @pwr_shifting_ratio: Indicates the current percentage of power to + * take away from the CPU vs GPU when shifting + * power to maintain a power cap. Value of 100 + * means take all power from CPU. + * @pwr_cap_type: Indicates type of power cap in effect + * @hard_min_pwr_cap: Hard minimum system power cap in Watts. + * Guaranteed unless hardware failure + * @max_pwr_cap: Maximum allowed system power cap in Watts + * @cur_pwr_cap: Current system power cap + * @soft_min_pwr_cap: Soft powercap minimum. OCC may or may not be + * able to maintain this + * @spare/reserved: Unused data + * @cmd: Opal Command Buffer + * @rsp: OCC Response Buffer + */ +struct occ_dynamic_data { + uint8_t occ_state; + uint8_t major_version; + uint8_t minor_version; + uint8_t gpus_present; + union QEMU_PACKED { + struct QEMU_PACKED { /* Version 0x90 */ + uint8_t spare1; + } v9; + struct QEMU_PACKED { /* Version 0xA0 */ + uint8_t wof_enabled; + } v10; + }; + uint8_t cpu_throttle; + uint8_t mem_throttle; + uint8_t quick_pwr_drop; + uint8_t pwr_shifting_ratio; + uint8_t pwr_cap_type; + uint16_t hard_min_pwr_cap; + uint16_t max_pwr_cap; + uint16_t cur_pwr_cap; + uint16_t soft_min_pwr_cap; + uint8_t pad[110]; + struct opal_command_buffer cmd; + struct occ_response_buffer rsp; +} QEMU_PACKED; + +enum occ_response_status { + OCC_RSP_SUCCESS = 0x00, + OCC_RSP_INVALID_COMMAND = 0x11, + OCC_RSP_INVALID_CMD_DATA_LENGTH = 0x12, + OCC_RSP_INVALID_DATA = 0x13, + OCC_RSP_INTERNAL_ERROR = 0x15, +}; + +#define OCC_ROLE_SLAVE 0x00 +#define OCC_ROLE_MASTER 0x01 + +#define OCC_FLAG_RSP_READY 0x01 +#define OCC_FLAG_CMD_IN_PROGRESS 0x02 +#define OPAL_FLAG_CMD_READY 0x80 + +#define PCAP_MAX_POWER_W 100 +#define PCAP_SOFT_MIN_POWER_W 20 +#define PCAP_HARD_MIN_POWER_W 10 + +static bool occ_write_static_data(PnvOCC *occ, + struct occ_pstate_table *static_data, + Error **errp) +{ + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + PnvHomer *homer = occ->homer; + hwaddr static_addr = homer->base + poc->opal_shared_memory_offset; + MemTxResult ret; + + ret = address_space_write(&address_space_memory, static_addr, + MEMTXATTRS_UNSPECIFIED, static_data, + sizeof(*static_data)); + if (ret != MEMTX_OK) { + error_setg(errp, "OCC: cannot write OCC-OPAL static data"); + return false; + } + + return true; +} + +static bool occ_read_dynamic_data(PnvOCC *occ, + struct occ_dynamic_data *dynamic_data, + Error **errp) +{ + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + PnvHomer *homer = occ->homer; + hwaddr static_addr = homer->base + poc->opal_shared_memory_offset; + hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET; + MemTxResult ret; + + ret = address_space_read(&address_space_memory, dynamic_addr, + MEMTXATTRS_UNSPECIFIED, dynamic_data, + sizeof(*dynamic_data)); + if (ret != MEMTX_OK) { + error_setg(errp, "OCC: cannot read OCC-OPAL dynamic data"); + return false; + } + + return true; +} + +static bool occ_write_dynamic_data(PnvOCC *occ, + struct occ_dynamic_data *dynamic_data, + Error **errp) +{ + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + PnvHomer *homer = occ->homer; + hwaddr static_addr = homer->base + poc->opal_shared_memory_offset; + hwaddr dynamic_addr = static_addr + OPAL_DYNAMIC_DATA_OFFSET; + MemTxResult ret; + + ret = address_space_write(&address_space_memory, dynamic_addr, + MEMTXATTRS_UNSPECIFIED, dynamic_data, + sizeof(*dynamic_data)); + if (ret != MEMTX_OK) { + error_setg(errp, "OCC: cannot write OCC-OPAL dynamic data"); + return false; + } + + return true; +} + +static bool occ_opal_send_response(PnvOCC *occ, + struct occ_dynamic_data *dynamic_data, + enum occ_response_status status, + uint8_t *data, uint16_t datalen) +{ + struct opal_command_buffer *cmd = &dynamic_data->cmd; + struct occ_response_buffer *rsp = &dynamic_data->rsp; + + rsp->request_id = cmd->request_id; + rsp->cmd = cmd->cmd; + rsp->status = status; + rsp->data_size = cpu_to_be16(datalen); + if (datalen) { + memcpy(rsp->data, data, datalen); + } + if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) { + return false; + } + /* Would be a memory barrier here */ + rsp->flag = OCC_FLAG_RSP_READY; + cmd->flag = 0; + if (!occ_write_dynamic_data(occ, dynamic_data, NULL)) { + return false; + } + + pnv_occ_raise_msg_irq(occ); + + return true; +} + +/* Returns error status */ +static bool occ_opal_process_command(PnvOCC *occ, + struct occ_dynamic_data *dynamic_data) +{ + struct opal_command_buffer *cmd = &dynamic_data->cmd; + struct occ_response_buffer *rsp = &dynamic_data->rsp; + + if (rsp->flag == 0) { + /* Spend one "tick" in the in-progress state */ + rsp->flag = OCC_FLAG_CMD_IN_PROGRESS; + return occ_write_dynamic_data(occ, dynamic_data, NULL); + } else if (rsp->flag != OCC_FLAG_CMD_IN_PROGRESS) { + return occ_opal_send_response(occ, dynamic_data, + OCC_RSP_INTERNAL_ERROR, + NULL, 0); + } + + switch (cmd->cmd) { + case 0xD1: { /* SET_POWER_CAP */ + uint16_t data; + if (be16_to_cpu(cmd->data_size) != 2) { + return occ_opal_send_response(occ, dynamic_data, + OCC_RSP_INVALID_CMD_DATA_LENGTH, + (uint8_t *)&dynamic_data->cur_pwr_cap, + 2); + } + data = be16_to_cpu(*(uint16_t *)cmd->data); + if (data == 0) { /* clear power cap */ + dynamic_data->pwr_cap_type = 0x00; /* none */ + data = PCAP_MAX_POWER_W; + } else { + dynamic_data->pwr_cap_type = 0x02; /* user set in-band */ + if (data < PCAP_HARD_MIN_POWER_W) { + data = PCAP_HARD_MIN_POWER_W; + } else if (data > PCAP_MAX_POWER_W) { + data = PCAP_MAX_POWER_W; + } + } + dynamic_data->cur_pwr_cap = cpu_to_be16(data); + return occ_opal_send_response(occ, dynamic_data, + OCC_RSP_SUCCESS, + (uint8_t *)&dynamic_data->cur_pwr_cap, 2); + } + + default: + return occ_opal_send_response(occ, dynamic_data, + OCC_RSP_INVALID_COMMAND, + NULL, 0); + } + g_assert_not_reached(); +} + +static bool occ_model_tick(PnvOCC *occ) +{ + QEMU_UNINITIALIZED struct occ_dynamic_data dynamic_data; + + if (!occ_read_dynamic_data(occ, &dynamic_data, NULL)) { + /* Can't move OCC state field to safe because we can't map it! */ + qemu_log("OCC: failed to read HOMER data, shutting down OCC\n"); + return false; + } + if (dynamic_data.cmd.flag == OPAL_FLAG_CMD_READY) { + if (!occ_opal_process_command(occ, &dynamic_data)) { + qemu_log("OCC: failed to write HOMER data, shutting down OCC\n"); + return false; + } + } + + return true; +} + +static bool occ_init_homer_memory(PnvOCC *occ, Error **errp) +{ + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + PnvHomer *homer = occ->homer; + PnvChip *chip = homer->chip; + struct occ_pstate_table static_data; + struct occ_dynamic_data dynamic_data; + int i; + + memset(&static_data, 0, sizeof(static_data)); + static_data.valid = 1; + static_data.version = poc->opal_shared_memory_version; + switch (poc->opal_shared_memory_version) { + case 0x02: + static_data.v2.throttle = 0; + static_data.v2.pstate_min = -2; + static_data.v2.pstate_nom = -1; + static_data.v2.pstate_turbo = -1; + static_data.v2.pstate_ultra_turbo = 0; + static_data.v2.pstates[0].id = 0; + static_data.v2.pstates[1].freq_khz = cpu_to_be32(4000000); + static_data.v2.pstates[1].id = -1; + static_data.v2.pstates[1].freq_khz = cpu_to_be32(3000000); + static_data.v2.pstates[2].id = -2; + static_data.v2.pstates[2].freq_khz = cpu_to_be32(2000000); + for (i = 0; i < chip->nr_cores; i++) { + static_data.v2.core_max[i] = 1; + } + break; + case 0x90: + if (chip->chip_id == 0) { + static_data.v9.occ_role = OCC_ROLE_MASTER; + } else { + static_data.v9.occ_role = OCC_ROLE_SLAVE; + } + static_data.v9.pstate_min = 2; + static_data.v9.pstate_nom = 1; + static_data.v9.pstate_turbo = 1; + static_data.v9.pstate_ultra_turbo = 0; + static_data.v9.pstates[0].id = 0; + static_data.v9.pstates[0].freq_khz = cpu_to_be32(4000000); + static_data.v9.pstates[1].id = 1; + static_data.v9.pstates[1].freq_khz = cpu_to_be32(3000000); + static_data.v9.pstates[2].id = 2; + static_data.v9.pstates[2].freq_khz = cpu_to_be32(2000000); + for (i = 0; i < chip->nr_cores; i++) { + static_data.v9.core_max[i] = 1; + } + break; + case 0xA0: + if (chip->chip_id == 0) { + static_data.v10.occ_role = OCC_ROLE_MASTER; + } else { + static_data.v10.occ_role = OCC_ROLE_SLAVE; + } + static_data.v10.pstate_min = 4; + static_data.v10.pstate_fixed_freq = 3; + static_data.v10.pstate_base = 2; + static_data.v10.pstate_ultra_turbo = 0; + static_data.v10.pstate_fmax = 1; + static_data.v10.minor = 0x01; + static_data.v10.pstates[0].valid = 1; + static_data.v10.pstates[0].id = 0; + static_data.v10.pstates[0].freq_khz = cpu_to_be32(4200000); + static_data.v10.pstates[1].valid = 1; + static_data.v10.pstates[1].id = 1; + static_data.v10.pstates[1].freq_khz = cpu_to_be32(4000000); + static_data.v10.pstates[2].valid = 1; + static_data.v10.pstates[2].id = 2; + static_data.v10.pstates[2].freq_khz = cpu_to_be32(3800000); + static_data.v10.pstates[3].valid = 1; + static_data.v10.pstates[3].id = 3; + static_data.v10.pstates[3].freq_khz = cpu_to_be32(3000000); + static_data.v10.pstates[4].valid = 1; + static_data.v10.pstates[4].id = 4; + static_data.v10.pstates[4].freq_khz = cpu_to_be32(2000000); + for (i = 0; i < chip->nr_cores; i++) { + static_data.v10.core_max[i] = 1; + } + break; + default: + g_assert_not_reached(); + } + if (!occ_write_static_data(occ, &static_data, errp)) { + return false; + } + + memset(&dynamic_data, 0, sizeof(dynamic_data)); + dynamic_data.occ_state = 0x3; /* active */ + dynamic_data.major_version = 0x0; + dynamic_data.hard_min_pwr_cap = cpu_to_be16(PCAP_HARD_MIN_POWER_W); + dynamic_data.max_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W); + dynamic_data.cur_pwr_cap = cpu_to_be16(PCAP_MAX_POWER_W); + dynamic_data.soft_min_pwr_cap = cpu_to_be16(PCAP_SOFT_MIN_POWER_W); + switch (poc->opal_shared_memory_version) { + case 0xA0: + dynamic_data.minor_version = 0x1; + dynamic_data.v10.wof_enabled = 0x1; + break; + case 0x90: + dynamic_data.minor_version = 0x1; + break; + case 0x02: + dynamic_data.minor_version = 0x0; + break; + default: + g_assert_not_reached(); + } + if (!occ_write_dynamic_data(occ, &dynamic_data, errp)) { + return false; + } + + return true; +} diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c index 62804082992..af7cfd028ba 100644 --- a/hw/ppc/pnv_pnor.c +++ b/hw/ppc/pnv_pnor.c @@ -11,8 +11,8 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/units.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" +#include "system/block-backend.h" +#include "system/blockdev.h" #include "hw/loader.h" #include "hw/ppc/pnv_pnor.h" #include "hw/qdev-properties.h" @@ -108,17 +108,18 @@ static void pnv_pnor_realize(DeviceState *dev, Error **errp) memset(s->storage, 0xFF, s->size); } + s->lpc_address = PNOR_SPI_OFFSET; + memory_region_init_io(&s->mmio, OBJECT(s), &pnv_pnor_ops, s, TYPE_PNV_PNOR, s->size); } -static Property pnv_pnor_properties[] = { +static const Property pnv_pnor_properties[] = { DEFINE_PROP_INT64("size", PnvPnor, size, 128 * MiB), DEFINE_PROP_DRIVE("drive", PnvPnor, blk), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_pnor_class_init(ObjectClass *klass, void *data) +static void pnv_pnor_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c index 18cc76a7e4b..5d947d8b520 100644 --- a/hw/ppc/pnv_psi.c +++ b/hw/ppc/pnv_psi.c @@ -18,12 +18,12 @@ */ #include "qemu/osdep.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/irq.h" #include "target/ppc/cpu.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "qapi/error.h" @@ -552,13 +552,12 @@ static int pnv_psi_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset) return 0; } -static Property pnv_psi_properties[] = { +static const Property pnv_psi_properties[] = { DEFINE_PROP_UINT64("bar", PnvPsi, bar, 0), DEFINE_PROP_UINT64("fsp-bar", PnvPsi, fsp_bar, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void pnv_psi_power8_class_init(ObjectClass *klass, void *data) +static void pnv_psi_power8_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvPsiClass *ppc = PNV_PSI_CLASS(klass); @@ -888,7 +887,7 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp) pnv_psi_realize(dev, errp); } -static void pnv_psi_power9_class_init(ObjectClass *klass, void *data) +static void pnv_psi_power9_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvPsiClass *ppc = PNV_PSI_CLASS(klass); @@ -897,7 +896,7 @@ static void pnv_psi_power9_class_init(ObjectClass *klass, void *data) dc->desc = "PowerNV PSI Controller POWER9"; dc->realize = pnv_psi_power9_realize; - dc->reset = pnv_psi_power9_reset; + device_class_set_legacy_reset(dc, pnv_psi_power9_reset); ppc->xscom_pcba = PNV9_XSCOM_PSIHB_BASE; ppc->xscom_size = PNV9_XSCOM_PSIHB_SIZE; @@ -914,13 +913,13 @@ static const TypeInfo pnv_psi_power9_info = { .instance_size = sizeof(Pnv9Psi), .instance_init = pnv_psi_power9_instance_init, .class_init = pnv_psi_power9_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_XIVE_NOTIFIER }, { }, }, }; -static void pnv_psi_power10_class_init(ObjectClass *klass, void *data) +static void pnv_psi_power10_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvPsiClass *ppc = PNV_PSI_CLASS(klass); @@ -940,7 +939,7 @@ static const TypeInfo pnv_psi_power10_info = { .class_init = pnv_psi_power10_class_init, }; -static void pnv_psi_class_init(ObjectClass *klass, void *data) +static void pnv_psi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -949,7 +948,7 @@ static void pnv_psi_class_init(ObjectClass *klass, void *data) dc->desc = "PowerNV PSI Controller"; device_class_set_props(dc, pnv_psi_properties); - dc->reset = pnv_psi_reset; + device_class_set_legacy_reset(dc, pnv_psi_reset); dc->user_creatable = false; } @@ -960,7 +959,7 @@ static const TypeInfo pnv_psi_info = { .class_init = pnv_psi_class_init, .class_size = sizeof(PnvPsiClass), .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/ppc/pnv_sbe.c b/hw/ppc/pnv_sbe.c index 74cee4eea7a..34dc013d47b 100644 --- a/hw/ppc/pnv_sbe.c +++ b/hw/ppc/pnv_sbe.c @@ -331,7 +331,7 @@ static const MemoryRegionOps pnv_sbe_power9_xscom_mbox_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_sbe_power9_class_init(ObjectClass *klass, void *data) +static void pnv_sbe_power9_class_init(ObjectClass *klass, const void *data) { PnvSBEClass *psc = PNV_SBE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -350,7 +350,7 @@ static const TypeInfo pnv_sbe_power9_type_info = { .class_init = pnv_sbe_power9_class_init, }; -static void pnv_sbe_power10_class_init(ObjectClass *klass, void *data) +static void pnv_sbe_power10_class_init(ObjectClass *klass, const void *data) { PnvSBEClass *psc = PNV_SBE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -386,7 +386,7 @@ static void pnv_sbe_realize(DeviceState *dev, Error **errp) sbe->timer = timer_new_us(QEMU_CLOCK_VIRTUAL, sbe_timer, sbe); } -static void pnv_sbe_class_init(ObjectClass *klass, void *data) +static void pnv_sbe_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index d192bbe2c20..fbfec829d5b 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" #include "target/ppc/cpu.h" #include "hw/sysbus.h" diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index e6fa5580c01..43d0d0e7553 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -27,13 +27,14 @@ #include "hw/ppc/ppc.h" #include "hw/ppc/ppc_e500.h" #include "qemu/timer.h" -#include "sysemu/cpus.h" +#include "exec/cpu-interrupt.h" +#include "system/cpus.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/kvm.h" +#include "system/replay.h" +#include "system/runstate.h" #include "kvm_ppc.h" #include "migration/vmstate.h" #include "trace.h" @@ -267,7 +268,6 @@ static void power9_set_irq(void *opaque, int pin, int level) break; default: g_assert_not_reached(); - return; } } @@ -729,7 +729,9 @@ static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now, int64_t decr; n = ns_to_tb(tb_env->decr_freq, now); - if (next > n && tb_env->flags & PPC_TIMER_BOOKE) { + + /* BookE timers stop when reaching 0. */ + if (next < n && tb_env->flags & PPC_TIMER_BOOKE) { decr = 0; } else { decr = next - n; @@ -1122,16 +1124,21 @@ void cpu_ppc_tb_reset(CPUPPCState *env) timer_del(tb_env->hdecr_timer); ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); tb_env->hdecr_next = 0; + _cpu_ppc_store_hdecr(cpu, 0, 0, 0, 64); } /* * There is a bug in Linux 2.4 kernels: * if a decrementer exception is pending when it enables msr_ee at startup, * it's not ready to handle it... + * + * On machine reset, this is called before icount is reset, so for + * icount-mode, setting TB registers using now == qemu_clock_get_ns() + * results in them being garbage after icount is reset. Use an + * explicit now == 0 to get a consistent reset state. */ - cpu_ppc_store_decr(env, -1); - cpu_ppc_store_hdecr(env, -1); - cpu_ppc_store_purr(env, 0x0000000000000000ULL); + _cpu_ppc_store_decr(cpu, 0, 0, -1, 64); + _cpu_ppc_store_purr(env, 0, 0); } void cpu_ppc_tb_free(CPUPPCState *env) diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h deleted file mode 100644 index 9a4312691e1..00000000000 --- a/hw/ppc/ppc405.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * QEMU PowerPC 405 shared definitions - * - * Copyright (c) 2007 Jocelyn Mayer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#ifndef PPC405_H -#define PPC405_H - -#include "qom/object.h" -#include "hw/ppc/ppc4xx.h" -#include "hw/intc/ppc-uic.h" -#include "hw/i2c/ppc4xx_i2c.h" - -/* PLB to OPB bridge */ -#define TYPE_PPC405_POB "ppc405-pob" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405PobState, PPC405_POB); -struct Ppc405PobState { - Ppc4xxDcrDeviceState parent_obj; - - uint32_t bear; - uint32_t besr0; - uint32_t besr1; -}; - -/* OPB arbitrer */ -#define TYPE_PPC405_OPBA "ppc405-opba" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OpbaState, PPC405_OPBA); -struct Ppc405OpbaState { - SysBusDevice parent_obj; - - MemoryRegion io; - uint8_t cr; - uint8_t pr; -}; - -/* DMA controller */ -#define TYPE_PPC405_DMA "ppc405-dma" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405DmaState, PPC405_DMA); -struct Ppc405DmaState { - Ppc4xxDcrDeviceState parent_obj; - - qemu_irq irqs[4]; - uint32_t cr[4]; - uint32_t ct[4]; - uint32_t da[4]; - uint32_t sa[4]; - uint32_t sg[4]; - uint32_t sr; - uint32_t sgc; - uint32_t slp; - uint32_t pol; -}; - -/* GPIO */ -#define TYPE_PPC405_GPIO "ppc405-gpio" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GpioState, PPC405_GPIO); -struct Ppc405GpioState { - SysBusDevice parent_obj; - - MemoryRegion io; - uint32_t or; - uint32_t tcr; - uint32_t osrh; - uint32_t osrl; - uint32_t tsrh; - uint32_t tsrl; - uint32_t odr; - uint32_t ir; - uint32_t rr1; - uint32_t isr1h; - uint32_t isr1l; -}; - -/* On Chip Memory */ -#define TYPE_PPC405_OCM "ppc405-ocm" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405OcmState, PPC405_OCM); -struct Ppc405OcmState { - Ppc4xxDcrDeviceState parent_obj; - - MemoryRegion ram; - MemoryRegion isarc_ram; - MemoryRegion dsarc_ram; - uint32_t isarc; - uint32_t isacntl; - uint32_t dsarc; - uint32_t dsacntl; -}; - -/* General purpose timers */ -#define TYPE_PPC405_GPT "ppc405-gpt" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405GptState, PPC405_GPT); -struct Ppc405GptState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - - int64_t tb_offset; - uint32_t tb_freq; - QEMUTimer *timer; - qemu_irq irqs[5]; - uint32_t oe; - uint32_t ol; - uint32_t im; - uint32_t is; - uint32_t ie; - uint32_t comp[5]; - uint32_t mask[5]; -}; - -#define TYPE_PPC405_CPC "ppc405-cpc" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405CpcState, PPC405_CPC); - -enum { - PPC405EP_CPU_CLK = 0, - PPC405EP_PLB_CLK = 1, - PPC405EP_OPB_CLK = 2, - PPC405EP_EBC_CLK = 3, - PPC405EP_MAL_CLK = 4, - PPC405EP_PCI_CLK = 5, - PPC405EP_UART0_CLK = 6, - PPC405EP_UART1_CLK = 7, - PPC405EP_CLK_NB = 8, -}; - -struct Ppc405CpcState { - Ppc4xxDcrDeviceState parent_obj; - - uint32_t sysclk; - clk_setup_t clk_setup[PPC405EP_CLK_NB]; - uint32_t boot; - uint32_t epctl; - uint32_t pllmr[2]; - uint32_t ucr; - uint32_t srr; - uint32_t jtagid; - uint32_t pci; - /* Clock and power management */ - uint32_t er; - uint32_t fr; - uint32_t sr; -}; - -#define TYPE_PPC405_SOC "ppc405-soc" -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405SoCState, PPC405_SOC); - -struct Ppc405SoCState { - /* Private */ - DeviceState parent_obj; - - /* Public */ - PowerPCCPU cpu; - PPCUIC uic; - Ppc405CpcState cpc; - Ppc405GptState gpt; - Ppc405OcmState ocm; - Ppc405GpioState gpio; - Ppc405DmaState dma; - PPC4xxI2CState i2c; - Ppc4xxEbcState ebc; - Ppc405OpbaState opba; - Ppc405PobState pob; - Ppc4xxPlbState plb; - Ppc4xxMalState mal; - Ppc4xxSdramDdrState sdram; -}; - -#endif /* PPC405_H */ diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c deleted file mode 100644 index c44e7ed162f..00000000000 --- a/hw/ppc/ppc405_boards.c +++ /dev/null @@ -1,520 +0,0 @@ -/* - * QEMU PowerPC 405 evaluation boards emulation - * - * Copyright (c) 2007 Jocelyn Mayer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qapi/error.h" -#include "qemu/datadir.h" -#include "cpu.h" -#include "hw/ppc/ppc.h" -#include "hw/qdev-properties.h" -#include "hw/sysbus.h" -#include "ppc405.h" -#include "hw/rtc/m48t59.h" -#include "hw/block/flash.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/block-backend.h" -#include "hw/boards.h" -#include "qemu/error-report.h" -#include "hw/loader.h" -#include "qemu/cutils.h" -#include "elf.h" - -#define BIOS_FILENAME "ppc405_rom.bin" -#define BIOS_SIZE (2 * MiB) - -#define KERNEL_LOAD_ADDR 0x01000000 -#define INITRD_LOAD_ADDR 0x01800000 - -#define PPC405EP_SDRAM_BASE 0x00000000 -#define PPC405EP_SRAM_BASE 0xFFF00000 -#define PPC405EP_SRAM_SIZE (512 * KiB) - -#define USE_FLASH_BIOS - -#define TYPE_PPC405_MACHINE MACHINE_TYPE_NAME("ppc405") -OBJECT_DECLARE_SIMPLE_TYPE(Ppc405MachineState, PPC405_MACHINE); - -struct Ppc405MachineState { - /* Private */ - MachineState parent_obj; - /* Public */ - - Ppc405SoCState soc; -}; - -/* CPU reset handler when booting directly from a loaded kernel */ -static struct boot_info { - uint32_t entry; - uint32_t bdloc; - uint32_t initrd_base; - uint32_t initrd_size; - uint32_t cmdline_base; - uint32_t cmdline_size; -} boot_info; - -static void main_cpu_reset(void *opaque) -{ - PowerPCCPU *cpu = opaque; - CPUPPCState *env = &cpu->env; - struct boot_info *bi = env->load_info; - - cpu_reset(CPU(cpu)); - - /* stack: top of sram */ - env->gpr[1] = PPC405EP_SRAM_BASE + PPC405EP_SRAM_SIZE - 8; - - /* Tune our boot state */ - env->gpr[3] = bi->bdloc; - env->gpr[4] = bi->initrd_base; - env->gpr[5] = bi->initrd_base + bi->initrd_size; - env->gpr[6] = bi->cmdline_base; - env->gpr[7] = bi->cmdline_size; - - env->nip = bi->entry; -} - -/* Bootinfo as set-up by u-boot */ -typedef struct { - uint32_t bi_memstart; - uint32_t bi_memsize; - uint32_t bi_flashstart; - uint32_t bi_flashsize; - uint32_t bi_flashoffset; /* 0x10 */ - uint32_t bi_sramstart; - uint32_t bi_sramsize; - uint32_t bi_bootflags; - uint32_t bi_ipaddr; /* 0x20 */ - uint8_t bi_enetaddr[6]; - uint16_t bi_ethspeed; - uint32_t bi_intfreq; - uint32_t bi_busfreq; /* 0x30 */ - uint32_t bi_baudrate; - uint8_t bi_s_version[4]; - uint8_t bi_r_version[32]; - uint32_t bi_procfreq; - uint32_t bi_plb_busfreq; - uint32_t bi_pci_busfreq; - uint8_t bi_pci_enetaddr[6]; - uint8_t bi_pci_enetaddr2[6]; /* PPC405EP specific */ - uint32_t bi_opbfreq; - uint32_t bi_iic_fast[2]; -} ppc4xx_bd_info_t; - -static void ppc405_set_default_bootinfo(ppc4xx_bd_info_t *bd, - ram_addr_t ram_size) -{ - memset(bd, 0, sizeof(*bd)); - - bd->bi_memstart = PPC405EP_SDRAM_BASE; - bd->bi_memsize = ram_size; - bd->bi_sramstart = PPC405EP_SRAM_BASE; - bd->bi_sramsize = PPC405EP_SRAM_SIZE; - bd->bi_bootflags = 0; - bd->bi_intfreq = 133333333; - bd->bi_busfreq = 33333333; - bd->bi_baudrate = 115200; - bd->bi_s_version[0] = 'Q'; - bd->bi_s_version[1] = 'M'; - bd->bi_s_version[2] = 'U'; - bd->bi_s_version[3] = '\0'; - bd->bi_r_version[0] = 'Q'; - bd->bi_r_version[1] = 'E'; - bd->bi_r_version[2] = 'M'; - bd->bi_r_version[3] = 'U'; - bd->bi_r_version[4] = '\0'; - bd->bi_procfreq = 133333333; - bd->bi_plb_busfreq = 33333333; - bd->bi_pci_busfreq = 33333333; - bd->bi_opbfreq = 33333333; -} - -static ram_addr_t __ppc405_set_bootinfo(CPUPPCState *env, ppc4xx_bd_info_t *bd) -{ - CPUState *cs = env_cpu(env); - ram_addr_t bdloc; - int i, n; - - /* We put the bd structure at the top of memory */ - if (bd->bi_memsize >= 0x01000000UL) { - bdloc = 0x01000000UL - sizeof(ppc4xx_bd_info_t); - } else { - bdloc = bd->bi_memsize - sizeof(ppc4xx_bd_info_t); - } - stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart); - stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize); - stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart); - stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize); - stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset); - stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart); - stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize); - stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags); - stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr); - for (i = 0; i < 6; i++) { - stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]); - } - stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed); - stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq); - stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq); - stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate); - for (i = 0; i < 4; i++) { - stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]); - } - for (i = 0; i < 32; i++) { - stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]); - } - stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_procfreq); - stl_be_phys(cs->as, bdloc + 0x60, bd->bi_plb_busfreq); - stl_be_phys(cs->as, bdloc + 0x64, bd->bi_pci_busfreq); - for (i = 0; i < 6; i++) { - stb_phys(cs->as, bdloc + 0x68 + i, bd->bi_pci_enetaddr[i]); - } - n = 0x70; /* includes 2 bytes hole */ - for (i = 0; i < 6; i++) { - stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]); - } - stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq); - n += 4; - for (i = 0; i < 2; i++) { - stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]); - n += 4; - } - - return bdloc; -} - -static ram_addr_t ppc405_set_bootinfo(CPUPPCState *env, ram_addr_t ram_size) -{ - ppc4xx_bd_info_t bd; - - memset(&bd, 0, sizeof(bd)); - - ppc405_set_default_bootinfo(&bd, ram_size); - - return __ppc405_set_bootinfo(env, &bd); -} - -static void boot_from_kernel(MachineState *machine, PowerPCCPU *cpu) -{ - CPUPPCState *env = &cpu->env; - hwaddr boot_entry; - hwaddr kernel_base; - int kernel_size; - hwaddr initrd_base; - int initrd_size; - ram_addr_t bdloc; - int len; - - bdloc = ppc405_set_bootinfo(env, machine->ram_size); - boot_info.bdloc = bdloc; - - kernel_size = load_elf(machine->kernel_filename, NULL, NULL, NULL, - &boot_entry, &kernel_base, NULL, NULL, - 1, PPC_ELF_MACHINE, 0, 0); - if (kernel_size < 0) { - error_report("Could not load kernel '%s' : %s", - machine->kernel_filename, load_elf_strerror(kernel_size)); - exit(1); - } - boot_info.entry = boot_entry; - - /* load initrd */ - if (machine->initrd_filename) { - initrd_base = INITRD_LOAD_ADDR; - initrd_size = load_image_targphys(machine->initrd_filename, initrd_base, - machine->ram_size - initrd_base); - if (initrd_size < 0) { - error_report("could not load initial ram disk '%s'", - machine->initrd_filename); - exit(1); - } - - boot_info.initrd_base = initrd_base; - boot_info.initrd_size = initrd_size; - } - - if (machine->kernel_cmdline) { - len = strlen(machine->kernel_cmdline); - bdloc -= ((len + 255) & ~255); - cpu_physical_memory_write(bdloc, machine->kernel_cmdline, len + 1); - boot_info.cmdline_base = bdloc; - boot_info.cmdline_size = bdloc + len; - } - - /* Install our custom reset handler to start from Linux */ - qemu_register_reset(main_cpu_reset, cpu); - env->load_info = &boot_info; -} - -static void ppc405_init(MachineState *machine) -{ - Ppc405MachineState *ppc405 = PPC405_MACHINE(machine); - const char *kernel_filename = machine->kernel_filename; - MemoryRegion *sysmem = get_system_memory(); - - object_initialize_child(OBJECT(machine), "soc", &ppc405->soc, - TYPE_PPC405_SOC); - object_property_set_link(OBJECT(&ppc405->soc), "dram", - OBJECT(machine->ram), &error_abort); - object_property_set_uint(OBJECT(&ppc405->soc), "sys-clk", 33333333, - &error_abort); - qdev_realize(DEVICE(&ppc405->soc), NULL, &error_fatal); - - /* allocate and load BIOS */ - if (machine->firmware) { - MemoryRegion *bios = g_new(MemoryRegion, 1); - g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, - machine->firmware); - long bios_size; - - memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE, - &error_fatal); - - if (!filename) { - error_report("Could not find firmware '%s'", machine->firmware); - exit(1); - } - - bios_size = load_image_size(filename, - memory_region_get_ram_ptr(bios), - BIOS_SIZE); - if (bios_size < 0) { - error_report("Could not load PowerPC BIOS '%s'", machine->firmware); - exit(1); - } - - bios_size = (bios_size + 0xfff) & ~0xfff; - memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); - } - - /* Load kernel and initrd using U-Boot images */ - if (kernel_filename && machine->firmware) { - target_ulong kernel_base, initrd_base; - long kernel_size, initrd_size; - - kernel_base = KERNEL_LOAD_ADDR; - kernel_size = load_image_targphys(kernel_filename, kernel_base, - machine->ram_size - kernel_base); - if (kernel_size < 0) { - error_report("could not load kernel '%s'", kernel_filename); - exit(1); - } - - /* load initrd */ - if (machine->initrd_filename) { - initrd_base = INITRD_LOAD_ADDR; - initrd_size = load_image_targphys(machine->initrd_filename, - initrd_base, - machine->ram_size - initrd_base); - if (initrd_size < 0) { - error_report("could not load initial ram disk '%s'", - machine->initrd_filename); - exit(1); - } - } - - /* Load ELF kernel and rootfs.cpio */ - } else if (kernel_filename && !machine->firmware) { - ppc4xx_sdram_ddr_enable(&ppc405->soc.sdram); - boot_from_kernel(machine, &ppc405->soc.cpu); - } -} - -static void ppc405_machine_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "PPC405 generic machine"; - mc->init = ppc405_init; - mc->default_ram_size = 128 * MiB; - mc->default_ram_id = "ppc405.ram"; - mc->deprecation_reason = "machine is old and unmaintained"; -} - -static const TypeInfo ppc405_machine_type = { - .name = TYPE_PPC405_MACHINE, - .parent = TYPE_MACHINE, - .instance_size = sizeof(Ppc405MachineState), - .class_init = ppc405_machine_class_init, - .abstract = true, -}; - -/*****************************************************************************/ -/* PPC405EP reference board (IBM) */ -/* - * Standalone board with: - * - PowerPC 405EP CPU - * - SDRAM (0x00000000) - * - Flash (0xFFF80000) - * - SRAM (0xFFF00000) - * - NVRAM (0xF0000000) - * - FPGA (0xF0300000) - */ - -#define PPC405EP_NVRAM_BASE 0xF0000000 -#define PPC405EP_FPGA_BASE 0xF0300000 -#define PPC405EP_FLASH_BASE 0xFFF80000 - -#define TYPE_REF405EP_FPGA "ref405ep-fpga" -OBJECT_DECLARE_SIMPLE_TYPE(Ref405epFpgaState, REF405EP_FPGA); -struct Ref405epFpgaState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - - uint8_t reg0; - uint8_t reg1; -}; - -static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size) -{ - Ref405epFpgaState *fpga = opaque; - uint32_t ret; - - switch (addr) { - case 0x0: - ret = fpga->reg0; - break; - case 0x1: - ret = fpga->reg1; - break; - default: - ret = 0; - break; - } - - return ret; -} - -static void ref405ep_fpga_writeb(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - Ref405epFpgaState *fpga = opaque; - - switch (addr) { - case 0x0: - /* Read only */ - break; - case 0x1: - fpga->reg1 = value; - break; - default: - break; - } -} - -static const MemoryRegionOps ref405ep_fpga_ops = { - .read = ref405ep_fpga_readb, - .write = ref405ep_fpga_writeb, - .impl.min_access_size = 1, - .impl.max_access_size = 1, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_BIG_ENDIAN, -}; - -static void ref405ep_fpga_reset(DeviceState *dev) -{ - Ref405epFpgaState *fpga = REF405EP_FPGA(dev); - - fpga->reg0 = 0x00; - fpga->reg1 = 0x0F; -} - -static void ref405ep_fpga_realize(DeviceState *dev, Error **errp) -{ - Ref405epFpgaState *s = REF405EP_FPGA(dev); - - memory_region_init_io(&s->iomem, OBJECT(s), &ref405ep_fpga_ops, s, - "fpga", 0x00000100); - sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); -} - -static void ref405ep_fpga_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ref405ep_fpga_realize; - dc->reset = ref405ep_fpga_reset; - /* Reason: only works as part of a ppc405 board */ - dc->user_creatable = false; -} - -static const TypeInfo ref405ep_fpga_type = { - .name = TYPE_REF405EP_FPGA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Ref405epFpgaState), - .class_init = ref405ep_fpga_class_init, -}; - -static void ref405ep_init(MachineState *machine) -{ - DeviceState *dev; - SysBusDevice *s; - MemoryRegion *sram = g_new(MemoryRegion, 1); - - ppc405_init(machine); - - /* allocate SRAM */ - memory_region_init_ram(sram, NULL, "ref405ep.sram", PPC405EP_SRAM_SIZE, - &error_fatal); - memory_region_add_subregion(get_system_memory(), PPC405EP_SRAM_BASE, sram); - - /* Register FPGA */ - dev = qdev_new(TYPE_REF405EP_FPGA); - object_property_add_child(OBJECT(machine), "fpga", OBJECT(dev)); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, PPC405EP_FPGA_BASE); - - /* Register NVRAM */ - dev = qdev_new("sysbus-m48t08"); - qdev_prop_set_int32(dev, "base-year", 1968); - s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, PPC405EP_NVRAM_BASE); -} - -static void ref405ep_class_init(ObjectClass *oc, void *data) -{ - MachineClass *mc = MACHINE_CLASS(oc); - - mc->desc = "ref405ep"; - mc->init = ref405ep_init; -} - -static const TypeInfo ref405ep_type = { - .name = MACHINE_TYPE_NAME("ref405ep"), - .parent = TYPE_PPC405_MACHINE, - .class_init = ref405ep_class_init, -}; - -static void ppc405_machine_init(void) -{ - type_register_static(&ppc405_machine_type); - type_register_static(&ref405ep_type); - type_register_static(&ref405ep_fpga_type); -} - -type_init(ppc405_machine_init) diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c deleted file mode 100644 index 0cc68178adf..00000000000 --- a/hw/ppc/ppc405_uc.c +++ /dev/null @@ -1,1217 +0,0 @@ -/* - * QEMU PowerPC 405 embedded processors emulation - * - * Copyright (c) 2007 Jocelyn Mayer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/units.h" -#include "qapi/error.h" -#include "qemu/log.h" -#include "cpu.h" -#include "hw/ppc/ppc.h" -#include "hw/i2c/ppc4xx_i2c.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "ppc405.h" -#include "hw/char/serial.h" -#include "qemu/timer.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" -#include "exec/address-spaces.h" -#include "hw/intc/ppc-uic.h" -#include "trace.h" - -/*****************************************************************************/ -/* Shared peripherals */ - -/*****************************************************************************/ -/* PLB to OPB bridge */ -enum { - POB0_BESR0 = 0x0A0, - POB0_BESR1 = 0x0A2, - POB0_BEAR = 0x0A4, -}; - -static uint32_t dcr_read_pob(void *opaque, int dcrn) -{ - Ppc405PobState *pob = opaque; - uint32_t ret; - - switch (dcrn) { - case POB0_BEAR: - ret = pob->bear; - break; - case POB0_BESR0: - ret = pob->besr0; - break; - case POB0_BESR1: - ret = pob->besr1; - break; - default: - /* Avoid gcc warning */ - ret = 0; - break; - } - - return ret; -} - -static void dcr_write_pob(void *opaque, int dcrn, uint32_t val) -{ - Ppc405PobState *pob = opaque; - - switch (dcrn) { - case POB0_BEAR: - /* Read only */ - break; - case POB0_BESR0: - /* Write-clear */ - pob->besr0 &= ~val; - break; - case POB0_BESR1: - /* Write-clear */ - pob->besr1 &= ~val; - break; - } -} - -static void ppc405_pob_reset(DeviceState *dev) -{ - Ppc405PobState *pob = PPC405_POB(dev); - - /* No error */ - pob->bear = 0x00000000; - pob->besr0 = 0x0000000; - pob->besr1 = 0x0000000; -} - -static void ppc405_pob_realize(DeviceState *dev, Error **errp) -{ - Ppc405PobState *pob = PPC405_POB(dev); - Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - - ppc4xx_dcr_register(dcr, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob); - ppc4xx_dcr_register(dcr, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob); - ppc4xx_dcr_register(dcr, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob); -} - -static void ppc405_pob_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_pob_realize; - dc->reset = ppc405_pob_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* OPB arbitrer */ -static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size) -{ - Ppc405OpbaState *opba = opaque; - uint32_t ret; - - switch (addr) { - case 0x00: - ret = opba->cr; - break; - case 0x01: - ret = opba->pr; - break; - default: - ret = 0x00; - break; - } - - trace_opba_readb(addr, ret); - return ret; -} - -static void opba_writeb(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - Ppc405OpbaState *opba = opaque; - - trace_opba_writeb(addr, value); - - switch (addr) { - case 0x00: - opba->cr = value & 0xF8; - break; - case 0x01: - opba->pr = value & 0xFF; - break; - default: - break; - } -} -static const MemoryRegionOps opba_ops = { - .read = opba_readb, - .write = opba_writeb, - .impl.min_access_size = 1, - .impl.max_access_size = 1, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_BIG_ENDIAN, -}; - -static void ppc405_opba_reset(DeviceState *dev) -{ - Ppc405OpbaState *opba = PPC405_OPBA(dev); - - opba->cr = 0x00; /* No dynamic priorities - park disabled */ - opba->pr = 0x11; -} - -static void ppc405_opba_realize(DeviceState *dev, Error **errp) -{ - Ppc405OpbaState *s = PPC405_OPBA(dev); - - memory_region_init_io(&s->io, OBJECT(s), &opba_ops, s, "opba", 2); - sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io); -} - -static void ppc405_opba_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_opba_realize; - dc->reset = ppc405_opba_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* Code decompression controller */ -/* XXX: TODO */ - -/*****************************************************************************/ -/* DMA controller */ -enum { - DMA0_CR0 = 0x100, - DMA0_CT0 = 0x101, - DMA0_DA0 = 0x102, - DMA0_SA0 = 0x103, - DMA0_SG0 = 0x104, - DMA0_CR1 = 0x108, - DMA0_CT1 = 0x109, - DMA0_DA1 = 0x10A, - DMA0_SA1 = 0x10B, - DMA0_SG1 = 0x10C, - DMA0_CR2 = 0x110, - DMA0_CT2 = 0x111, - DMA0_DA2 = 0x112, - DMA0_SA2 = 0x113, - DMA0_SG2 = 0x114, - DMA0_CR3 = 0x118, - DMA0_CT3 = 0x119, - DMA0_DA3 = 0x11A, - DMA0_SA3 = 0x11B, - DMA0_SG3 = 0x11C, - DMA0_SR = 0x120, - DMA0_SGC = 0x123, - DMA0_SLP = 0x125, - DMA0_POL = 0x126, -}; - -static uint32_t dcr_read_dma(void *opaque, int dcrn) -{ - return 0; -} - -static void dcr_write_dma(void *opaque, int dcrn, uint32_t val) -{ -} - -static void ppc405_dma_reset(DeviceState *dev) -{ - Ppc405DmaState *dma = PPC405_DMA(dev); - int i; - - for (i = 0; i < 4; i++) { - dma->cr[i] = 0x00000000; - dma->ct[i] = 0x00000000; - dma->da[i] = 0x00000000; - dma->sa[i] = 0x00000000; - dma->sg[i] = 0x00000000; - } - dma->sr = 0x00000000; - dma->sgc = 0x00000000; - dma->slp = 0x7C000000; - dma->pol = 0x00000000; -} - -static void ppc405_dma_realize(DeviceState *dev, Error **errp) -{ - Ppc405DmaState *dma = PPC405_DMA(dev); - Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - int i; - - for (i = 0; i < ARRAY_SIZE(dma->irqs); i++) { - sysbus_init_irq(SYS_BUS_DEVICE(dma), &dma->irqs[i]); - } - - ppc4xx_dcr_register(dcr, DMA0_CR0, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CT0, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_DA0, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SA0, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SG0, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CR1, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CT1, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_DA1, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SA1, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SG1, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CR2, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CT2, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_DA2, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SA2, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SG2, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CR3, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_CT3, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_DA3, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SA3, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SG3, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SR, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SGC, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_SLP, dma, &dcr_read_dma, &dcr_write_dma); - ppc4xx_dcr_register(dcr, DMA0_POL, dma, &dcr_read_dma, &dcr_write_dma); -} - -static void ppc405_dma_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_dma_realize; - dc->reset = ppc405_dma_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* GPIO */ -static uint64_t ppc405_gpio_read(void *opaque, hwaddr addr, unsigned size) -{ - trace_ppc405_gpio_read(addr, size); - return 0; -} - -static void ppc405_gpio_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - trace_ppc405_gpio_write(addr, size, value); -} - -static const MemoryRegionOps ppc405_gpio_ops = { - .read = ppc405_gpio_read, - .write = ppc405_gpio_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void ppc405_gpio_realize(DeviceState *dev, Error **errp) -{ - Ppc405GpioState *s = PPC405_GPIO(dev); - - memory_region_init_io(&s->io, OBJECT(s), &ppc405_gpio_ops, s, "gpio", - 0x38); - sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io); -} - -static void ppc405_gpio_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_gpio_realize; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* On Chip Memory */ -enum { - OCM0_ISARC = 0x018, - OCM0_ISACNTL = 0x019, - OCM0_DSARC = 0x01A, - OCM0_DSACNTL = 0x01B, -}; - -static void ocm_update_mappings(Ppc405OcmState *ocm, - uint32_t isarc, uint32_t isacntl, - uint32_t dsarc, uint32_t dsacntl) -{ - trace_ocm_update_mappings(isarc, isacntl, dsarc, dsacntl, ocm->isarc, - ocm->isacntl, ocm->dsarc, ocm->dsacntl); - - if (ocm->isarc != isarc || - (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) { - if (ocm->isacntl & 0x80000000) { - /* Unmap previously assigned memory region */ - trace_ocm_unmap("ISA", ocm->isarc); - memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram); - } - if (isacntl & 0x80000000) { - /* Map new instruction memory region */ - trace_ocm_map("ISA", isarc); - memory_region_add_subregion(get_system_memory(), isarc, - &ocm->isarc_ram); - } - } - if (ocm->dsarc != dsarc || - (ocm->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) { - if (ocm->dsacntl & 0x80000000) { - /* Beware not to unmap the region we just mapped */ - if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) { - /* Unmap previously assigned memory region */ - trace_ocm_unmap("DSA", ocm->dsarc); - memory_region_del_subregion(get_system_memory(), - &ocm->dsarc_ram); - } - } - if (dsacntl & 0x80000000) { - /* Beware not to remap the region we just mapped */ - if (!(isacntl & 0x80000000) || dsarc != isarc) { - /* Map new data memory region */ - trace_ocm_map("DSA", dsarc); - memory_region_add_subregion(get_system_memory(), dsarc, - &ocm->dsarc_ram); - } - } - } -} - -static uint32_t dcr_read_ocm(void *opaque, int dcrn) -{ - Ppc405OcmState *ocm = opaque; - uint32_t ret; - - switch (dcrn) { - case OCM0_ISARC: - ret = ocm->isarc; - break; - case OCM0_ISACNTL: - ret = ocm->isacntl; - break; - case OCM0_DSARC: - ret = ocm->dsarc; - break; - case OCM0_DSACNTL: - ret = ocm->dsacntl; - break; - default: - ret = 0; - break; - } - - return ret; -} - -static void dcr_write_ocm(void *opaque, int dcrn, uint32_t val) -{ - Ppc405OcmState *ocm = opaque; - uint32_t isarc, dsarc, isacntl, dsacntl; - - isarc = ocm->isarc; - dsarc = ocm->dsarc; - isacntl = ocm->isacntl; - dsacntl = ocm->dsacntl; - switch (dcrn) { - case OCM0_ISARC: - isarc = val & 0xFC000000; - break; - case OCM0_ISACNTL: - isacntl = val & 0xC0000000; - break; - case OCM0_DSARC: - isarc = val & 0xFC000000; - break; - case OCM0_DSACNTL: - isacntl = val & 0xC0000000; - break; - } - ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl); - ocm->isarc = isarc; - ocm->dsarc = dsarc; - ocm->isacntl = isacntl; - ocm->dsacntl = dsacntl; -} - -static void ppc405_ocm_reset(DeviceState *dev) -{ - Ppc405OcmState *ocm = PPC405_OCM(dev); - uint32_t isarc, dsarc, isacntl, dsacntl; - - isarc = 0x00000000; - isacntl = 0x00000000; - dsarc = 0x00000000; - dsacntl = 0x00000000; - ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl); - ocm->isarc = isarc; - ocm->dsarc = dsarc; - ocm->isacntl = isacntl; - ocm->dsacntl = dsacntl; -} - -static void ppc405_ocm_realize(DeviceState *dev, Error **errp) -{ - Ppc405OcmState *ocm = PPC405_OCM(dev); - Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - - /* XXX: Size is 4096 or 0x04000000 */ - memory_region_init_ram(&ocm->isarc_ram, OBJECT(ocm), "ppc405.ocm", 4 * KiB, - &error_fatal); - memory_region_init_alias(&ocm->dsarc_ram, OBJECT(ocm), "ppc405.dsarc", - &ocm->isarc_ram, 0, 4 * KiB); - - ppc4xx_dcr_register(dcr, OCM0_ISARC, ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc4xx_dcr_register(dcr, OCM0_ISACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc4xx_dcr_register(dcr, OCM0_DSARC, ocm, &dcr_read_ocm, &dcr_write_ocm); - ppc4xx_dcr_register(dcr, OCM0_DSACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); -} - -static void ppc405_ocm_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_ocm_realize; - dc->reset = ppc405_ocm_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* General purpose timers */ -static int ppc4xx_gpt_compare(Ppc405GptState *gpt, int n) -{ - /* XXX: TODO */ - return 0; -} - -static void ppc4xx_gpt_set_output(Ppc405GptState *gpt, int n, int level) -{ - /* XXX: TODO */ -} - -static void ppc4xx_gpt_set_outputs(Ppc405GptState *gpt) -{ - uint32_t mask; - int i; - - mask = 0x80000000; - for (i = 0; i < 5; i++) { - if (gpt->oe & mask) { - /* Output is enabled */ - if (ppc4xx_gpt_compare(gpt, i)) { - /* Comparison is OK */ - ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask); - } else { - /* Comparison is KO */ - ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask ? 0 : 1); - } - } - mask = mask >> 1; - } -} - -static void ppc4xx_gpt_set_irqs(Ppc405GptState *gpt) -{ - uint32_t mask; - int i; - - mask = 0x00008000; - for (i = 0; i < 5; i++) { - if (gpt->is & gpt->im & mask) { - qemu_irq_raise(gpt->irqs[i]); - } else { - qemu_irq_lower(gpt->irqs[i]); - } - mask = mask >> 1; - } -} - -static void ppc4xx_gpt_compute_timer(Ppc405GptState *gpt) -{ - /* XXX: TODO */ -} - -static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size) -{ - Ppc405GptState *gpt = opaque; - uint32_t ret; - int idx; - - trace_ppc4xx_gpt_read(addr, size); - - switch (addr) { - case 0x00: - /* Time base counter */ - ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset, - gpt->tb_freq, NANOSECONDS_PER_SECOND); - break; - case 0x10: - /* Output enable */ - ret = gpt->oe; - break; - case 0x14: - /* Output level */ - ret = gpt->ol; - break; - case 0x18: - /* Interrupt mask */ - ret = gpt->im; - break; - case 0x1C: - case 0x20: - /* Interrupt status */ - ret = gpt->is; - break; - case 0x24: - /* Interrupt enable */ - ret = gpt->ie; - break; - case 0x80 ... 0x90: - /* Compare timer */ - idx = (addr - 0x80) >> 2; - ret = gpt->comp[idx]; - break; - case 0xC0 ... 0xD0: - /* Compare mask */ - idx = (addr - 0xC0) >> 2; - ret = gpt->mask[idx]; - break; - default: - ret = -1; - break; - } - - return ret; -} - -static void ppc4xx_gpt_write(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - Ppc405GptState *gpt = opaque; - int idx; - - trace_ppc4xx_gpt_write(addr, size, value); - - switch (addr) { - case 0x00: - /* Time base counter */ - gpt->tb_offset = muldiv64(value, NANOSECONDS_PER_SECOND, gpt->tb_freq) - - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - ppc4xx_gpt_compute_timer(gpt); - break; - case 0x10: - /* Output enable */ - gpt->oe = value & 0xF8000000; - ppc4xx_gpt_set_outputs(gpt); - break; - case 0x14: - /* Output level */ - gpt->ol = value & 0xF8000000; - ppc4xx_gpt_set_outputs(gpt); - break; - case 0x18: - /* Interrupt mask */ - gpt->im = value & 0x0000F800; - break; - case 0x1C: - /* Interrupt status set */ - gpt->is |= value & 0x0000F800; - ppc4xx_gpt_set_irqs(gpt); - break; - case 0x20: - /* Interrupt status clear */ - gpt->is &= ~(value & 0x0000F800); - ppc4xx_gpt_set_irqs(gpt); - break; - case 0x24: - /* Interrupt enable */ - gpt->ie = value & 0x0000F800; - ppc4xx_gpt_set_irqs(gpt); - break; - case 0x80 ... 0x90: - /* Compare timer */ - idx = (addr - 0x80) >> 2; - gpt->comp[idx] = value & 0xF8000000; - ppc4xx_gpt_compute_timer(gpt); - break; - case 0xC0 ... 0xD0: - /* Compare mask */ - idx = (addr - 0xC0) >> 2; - gpt->mask[idx] = value & 0xF8000000; - ppc4xx_gpt_compute_timer(gpt); - break; - } -} - -static const MemoryRegionOps gpt_ops = { - .read = ppc4xx_gpt_read, - .write = ppc4xx_gpt_write, - .valid.min_access_size = 4, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void ppc4xx_gpt_cb(void *opaque) -{ - Ppc405GptState *gpt = opaque; - - ppc4xx_gpt_set_irqs(gpt); - ppc4xx_gpt_set_outputs(gpt); - ppc4xx_gpt_compute_timer(gpt); -} - -static void ppc405_gpt_reset(DeviceState *dev) -{ - Ppc405GptState *gpt = PPC405_GPT(dev); - int i; - - timer_del(gpt->timer); - gpt->oe = 0x00000000; - gpt->ol = 0x00000000; - gpt->im = 0x00000000; - gpt->is = 0x00000000; - gpt->ie = 0x00000000; - for (i = 0; i < 5; i++) { - gpt->comp[i] = 0x00000000; - gpt->mask[i] = 0x00000000; - } -} - -static void ppc405_gpt_realize(DeviceState *dev, Error **errp) -{ - Ppc405GptState *s = PPC405_GPT(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - int i; - - s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, s); - memory_region_init_io(&s->iomem, OBJECT(s), &gpt_ops, s, "gpt", 0xd4); - sysbus_init_mmio(sbd, &s->iomem); - - for (i = 0; i < ARRAY_SIZE(s->irqs); i++) { - sysbus_init_irq(sbd, &s->irqs[i]); - } -} - -static void ppc405_gpt_finalize(Object *obj) -{ - /* timer will be NULL if the GPT wasn't realized */ - if (PPC405_GPT(obj)->timer) { - timer_del(PPC405_GPT(obj)->timer); - } -} - -static void ppc405_gpt_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_gpt_realize; - dc->reset = ppc405_gpt_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; -} - -/*****************************************************************************/ -/* PowerPC 405EP */ -/* CPU control */ -enum { - PPC405EP_CPC0_PLLMR0 = 0x0F0, - PPC405EP_CPC0_BOOT = 0x0F1, - PPC405EP_CPC0_EPCTL = 0x0F3, - PPC405EP_CPC0_PLLMR1 = 0x0F4, - PPC405EP_CPC0_UCR = 0x0F5, - PPC405EP_CPC0_SRR = 0x0F6, - PPC405EP_CPC0_JTAGID = 0x0F7, - PPC405EP_CPC0_PCI = 0x0F9, -#if 0 - PPC405EP_CPC0_ER = xxx, - PPC405EP_CPC0_FR = xxx, - PPC405EP_CPC0_SR = xxx, -#endif -}; - -static void ppc405ep_compute_clocks(Ppc405CpcState *cpc) -{ - uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk; - uint32_t UART0_clk, UART1_clk; - uint64_t VCO_out, PLL_out; - int M, D; - - VCO_out = 0; - if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) { - M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */ - trace_ppc405ep_clocks_compute("FBMUL", (cpc->pllmr[1] >> 20) & 0xF, M); - D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */ - trace_ppc405ep_clocks_compute("FWDA", (cpc->pllmr[1] >> 16) & 0x7, D); - VCO_out = (uint64_t)cpc->sysclk * M * D; - if (VCO_out < 500000000UL || VCO_out > 1000000000UL) { - /* Error - unlock the PLL */ - qemu_log_mask(LOG_GUEST_ERROR, "VCO out of range %" PRIu64 "\n", - VCO_out); -#if 0 - cpc->pllmr[1] &= ~0x80000000; - goto pll_bypass; -#endif - } - PLL_out = VCO_out / D; - /* Pretend the PLL is locked */ - cpc->boot |= 0x00000001; - } else { -#if 0 - pll_bypass: -#endif - PLL_out = cpc->sysclk; - if (cpc->pllmr[1] & 0x40000000) { - /* Pretend the PLL is not locked */ - cpc->boot &= ~0x00000001; - } - } - /* Now, compute all other clocks */ - D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */ - trace_ppc405ep_clocks_compute("CCDV", (cpc->pllmr[0] >> 20) & 0x3, D); - CPU_clk = PLL_out / D; - D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */ - trace_ppc405ep_clocks_compute("CBDV", (cpc->pllmr[0] >> 16) & 0x3, D); - PLB_clk = CPU_clk / D; - D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */ - trace_ppc405ep_clocks_compute("OPDV", (cpc->pllmr[0] >> 12) & 0x3, D); - OPB_clk = PLB_clk / D; - D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */ - trace_ppc405ep_clocks_compute("EPDV", (cpc->pllmr[0] >> 8) & 0x3, D); - EBC_clk = PLB_clk / D; - D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */ - trace_ppc405ep_clocks_compute("MPDV", (cpc->pllmr[0] >> 4) & 0x3, D); - MAL_clk = PLB_clk / D; - D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */ - trace_ppc405ep_clocks_compute("PPDV", cpc->pllmr[0] & 0x3, D); - PCI_clk = PLB_clk / D; - D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */ - trace_ppc405ep_clocks_compute("U0DIV", cpc->ucr & 0x7F, D); - UART0_clk = PLL_out / D; - D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */ - trace_ppc405ep_clocks_compute("U1DIV", (cpc->ucr >> 8) & 0x7F, D); - UART1_clk = PLL_out / D; - - if (trace_event_get_state_backends(TRACE_PPC405EP_CLOCKS_SETUP)) { - g_autofree char *trace = g_strdup_printf( - "Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64 - " PLL out %" PRIu64 " Hz\n" - "CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32 - " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32 - " UART1 %" PRIu32 "\n", - cpc->sysclk, VCO_out, PLL_out, - CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk, - UART0_clk, UART1_clk); - trace_ppc405ep_clocks_setup(trace); - } - - /* Setup CPU clocks */ - clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk); - /* Setup PLB clock */ - clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk); - /* Setup OPB clock */ - clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk); - /* Setup external clock */ - clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk); - /* Setup MAL clock */ - clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk); - /* Setup PCI clock */ - clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk); - /* Setup UART0 clock */ - clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk); - /* Setup UART1 clock */ - clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk); -} - -static uint32_t dcr_read_epcpc(void *opaque, int dcrn) -{ - Ppc405CpcState *cpc = opaque; - uint32_t ret; - - switch (dcrn) { - case PPC405EP_CPC0_BOOT: - ret = cpc->boot; - break; - case PPC405EP_CPC0_EPCTL: - ret = cpc->epctl; - break; - case PPC405EP_CPC0_PLLMR0: - ret = cpc->pllmr[0]; - break; - case PPC405EP_CPC0_PLLMR1: - ret = cpc->pllmr[1]; - break; - case PPC405EP_CPC0_UCR: - ret = cpc->ucr; - break; - case PPC405EP_CPC0_SRR: - ret = cpc->srr; - break; - case PPC405EP_CPC0_JTAGID: - ret = cpc->jtagid; - break; - case PPC405EP_CPC0_PCI: - ret = cpc->pci; - break; - default: - /* Avoid gcc warning */ - ret = 0; - break; - } - - return ret; -} - -static void dcr_write_epcpc(void *opaque, int dcrn, uint32_t val) -{ - Ppc405CpcState *cpc = opaque; - - switch (dcrn) { - case PPC405EP_CPC0_BOOT: - /* Read-only register */ - break; - case PPC405EP_CPC0_EPCTL: - /* Don't care for now */ - cpc->epctl = val & 0xC00000F3; - break; - case PPC405EP_CPC0_PLLMR0: - cpc->pllmr[0] = val & 0x00633333; - ppc405ep_compute_clocks(cpc); - break; - case PPC405EP_CPC0_PLLMR1: - cpc->pllmr[1] = val & 0xC0F73FFF; - ppc405ep_compute_clocks(cpc); - break; - case PPC405EP_CPC0_UCR: - /* UART control - don't care for now */ - cpc->ucr = val & 0x003F7F7F; - break; - case PPC405EP_CPC0_SRR: - cpc->srr = val; - break; - case PPC405EP_CPC0_JTAGID: - /* Read-only */ - break; - case PPC405EP_CPC0_PCI: - cpc->pci = val; - break; - } -} - -static void ppc405_cpc_reset(DeviceState *dev) -{ - Ppc405CpcState *cpc = PPC405_CPC(dev); - - cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */ - cpc->epctl = 0x00000000; - cpc->pllmr[0] = 0x00021002; - cpc->pllmr[1] = 0x80a552be; - cpc->ucr = 0x00004646; - cpc->srr = 0x00040000; - cpc->pci = 0x00000000; - cpc->er = 0x00000000; - cpc->fr = 0x00000000; - cpc->sr = 0x00000000; - cpc->jtagid = 0x20267049; - ppc405ep_compute_clocks(cpc); -} - -/* XXX: sysclk should be between 25 and 100 MHz */ -static void ppc405_cpc_realize(DeviceState *dev, Error **errp) -{ - Ppc405CpcState *cpc = PPC405_CPC(dev); - Ppc4xxDcrDeviceState *dcr = PPC4xx_DCR_DEVICE(dev); - - assert(dcr->cpu); - cpc->clk_setup[PPC405EP_CPU_CLK].cb = - ppc_40x_timers_init(&dcr->cpu->env, cpc->sysclk, PPC_INTERRUPT_PIT); - cpc->clk_setup[PPC405EP_CPU_CLK].opaque = &dcr->cpu->env; - - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_BOOT, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_EPCTL, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR0, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PLLMR1, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_UCR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_SRR, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_JTAGID, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); - ppc4xx_dcr_register(dcr, PPC405EP_CPC0_PCI, cpc, - &dcr_read_epcpc, &dcr_write_epcpc); -} - -static Property ppc405_cpc_properties[] = { - DEFINE_PROP_UINT32("sys-clk", Ppc405CpcState, sysclk, 0), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ppc405_cpc_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_cpc_realize; - dc->reset = ppc405_cpc_reset; - /* Reason: only works as function of a ppc4xx SoC */ - dc->user_creatable = false; - device_class_set_props(dc, ppc405_cpc_properties); -} - -/* PPC405_SOC */ - -static void ppc405_soc_instance_init(Object *obj) -{ - Ppc405SoCState *s = PPC405_SOC(obj); - - object_initialize_child(obj, "cpu", &s->cpu, - POWERPC_CPU_TYPE_NAME("405ep")); - - object_initialize_child(obj, "uic", &s->uic, TYPE_PPC_UIC); - - object_initialize_child(obj, "cpc", &s->cpc, TYPE_PPC405_CPC); - object_property_add_alias(obj, "sys-clk", OBJECT(&s->cpc), "sys-clk"); - - object_initialize_child(obj, "gpt", &s->gpt, TYPE_PPC405_GPT); - - object_initialize_child(obj, "ocm", &s->ocm, TYPE_PPC405_OCM); - - object_initialize_child(obj, "gpio", &s->gpio, TYPE_PPC405_GPIO); - - object_initialize_child(obj, "dma", &s->dma, TYPE_PPC405_DMA); - - object_initialize_child(obj, "i2c", &s->i2c, TYPE_PPC4xx_I2C); - - object_initialize_child(obj, "ebc", &s->ebc, TYPE_PPC4xx_EBC); - - object_initialize_child(obj, "opba", &s->opba, TYPE_PPC405_OPBA); - - object_initialize_child(obj, "pob", &s->pob, TYPE_PPC405_POB); - - object_initialize_child(obj, "plb", &s->plb, TYPE_PPC4xx_PLB); - - object_initialize_child(obj, "mal", &s->mal, TYPE_PPC4xx_MAL); - - object_initialize_child(obj, "sdram", &s->sdram, TYPE_PPC4xx_SDRAM_DDR); - object_property_add_alias(obj, "dram", OBJECT(&s->sdram), "dram"); -} - -static void ppc405_reset(void *opaque) -{ - cpu_reset(CPU(opaque)); -} - -static void ppc405_soc_realize(DeviceState *dev, Error **errp) -{ - Ppc405SoCState *s = PPC405_SOC(dev); - CPUPPCState *env; - SysBusDevice *sbd; - int i; - - /* init CPUs */ - if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { - return; - } - qemu_register_reset(ppc405_reset, &s->cpu); - - env = &s->cpu.env; - - ppc_dcr_init(env, NULL, NULL); - - /* CPU control */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->cpc), &s->cpu, errp)) { - return; - } - - /* PLB arbitrer */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->plb), &s->cpu, errp)) { - return; - } - - /* PLB to OPB bridge */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->pob), &s->cpu, errp)) { - return; - } - - /* OBP arbitrer */ - sbd = SYS_BUS_DEVICE(&s->opba); - if (!sysbus_realize(sbd, errp)) { - return; - } - sysbus_mmio_map(sbd, 0, 0xef600600); - - /* Universal interrupt controller */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->uic), &s->cpu, errp)) { - return; - } - sbd = SYS_BUS_DEVICE(&s->uic); - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, - qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_INT)); - sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, - qdev_get_gpio_in(DEVICE(&s->cpu), PPC40x_INPUT_CINT)); - - /* SDRAM controller */ - /* - * We use the 440 DDR SDRAM controller which has more regs and features - * but it's compatible enough for now - */ - object_property_set_int(OBJECT(&s->sdram), "nbanks", 2, &error_abort); - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->sdram), &s->cpu, errp)) { - return; - } - /* XXX 405EP has no ECC interrupt */ - sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdram), 0, - qdev_get_gpio_in(DEVICE(&s->uic), 17)); - - /* External bus controller */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ebc), &s->cpu, errp)) { - return; - } - - /* DMA controller */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->dma), &s->cpu, errp)) { - return; - } - sbd = SYS_BUS_DEVICE(&s->dma); - for (i = 0; i < ARRAY_SIZE(s->dma.irqs); i++) { - sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 5 + i)); - } - - /* I2C controller */ - sbd = SYS_BUS_DEVICE(&s->i2c); - if (!sysbus_realize(sbd, errp)) { - return; - } - sysbus_mmio_map(sbd, 0, 0xef600500); - sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(DEVICE(&s->uic), 2)); - - /* GPIO */ - sbd = SYS_BUS_DEVICE(&s->gpio); - if (!sysbus_realize(sbd, errp)) { - return; - } - sysbus_mmio_map(sbd, 0, 0xef600700); - - /* Serial ports */ - if (serial_hd(0) != NULL) { - serial_mm_init(get_system_memory(), 0xef600300, 0, - qdev_get_gpio_in(DEVICE(&s->uic), 0), - PPC_SERIAL_MM_BAUDBASE, serial_hd(0), - DEVICE_BIG_ENDIAN); - } - if (serial_hd(1) != NULL) { - serial_mm_init(get_system_memory(), 0xef600400, 0, - qdev_get_gpio_in(DEVICE(&s->uic), 1), - PPC_SERIAL_MM_BAUDBASE, serial_hd(1), - DEVICE_BIG_ENDIAN); - } - - /* OCM */ - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->ocm), &s->cpu, errp)) { - return; - } - - /* GPT */ - sbd = SYS_BUS_DEVICE(&s->gpt); - if (!sysbus_realize(sbd, errp)) { - return; - } - sysbus_mmio_map(sbd, 0, 0xef600000); - for (i = 0; i < ARRAY_SIZE(s->gpt.irqs); i++) { - sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 19 + i)); - } - - /* MAL */ - object_property_set_int(OBJECT(&s->mal), "txc-num", 4, &error_abort); - object_property_set_int(OBJECT(&s->mal), "rxc-num", 2, &error_abort); - if (!ppc4xx_dcr_realize(PPC4xx_DCR_DEVICE(&s->mal), &s->cpu, errp)) { - return; - } - sbd = SYS_BUS_DEVICE(&s->mal); - for (i = 0; i < ARRAY_SIZE(s->mal.irqs); i++) { - sysbus_connect_irq(sbd, i, qdev_get_gpio_in(DEVICE(&s->uic), 11 + i)); - } - - /* Ethernet */ - /* Uses UIC IRQs 9, 15, 17 */ -} - -static void ppc405_soc_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->realize = ppc405_soc_realize; - /* Reason: only works as part of a ppc405 board/machine */ - dc->user_creatable = false; -} - -static const TypeInfo ppc405_types[] = { - { - .name = TYPE_PPC405_POB, - .parent = TYPE_PPC4xx_DCR_DEVICE, - .instance_size = sizeof(Ppc405PobState), - .class_init = ppc405_pob_class_init, - }, { - .name = TYPE_PPC405_OPBA, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Ppc405OpbaState), - .class_init = ppc405_opba_class_init, - }, { - .name = TYPE_PPC405_DMA, - .parent = TYPE_PPC4xx_DCR_DEVICE, - .instance_size = sizeof(Ppc405DmaState), - .class_init = ppc405_dma_class_init, - }, { - .name = TYPE_PPC405_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Ppc405GpioState), - .class_init = ppc405_gpio_class_init, - }, { - .name = TYPE_PPC405_OCM, - .parent = TYPE_PPC4xx_DCR_DEVICE, - .instance_size = sizeof(Ppc405OcmState), - .class_init = ppc405_ocm_class_init, - }, { - .name = TYPE_PPC405_GPT, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(Ppc405GptState), - .instance_finalize = ppc405_gpt_finalize, - .class_init = ppc405_gpt_class_init, - }, { - .name = TYPE_PPC405_CPC, - .parent = TYPE_PPC4xx_DCR_DEVICE, - .instance_size = sizeof(Ppc405CpcState), - .class_init = ppc405_cpc_class_init, - }, { - .name = TYPE_PPC405_SOC, - .parent = TYPE_DEVICE, - .instance_size = sizeof(Ppc405SoCState), - .instance_init = ppc405_soc_instance_init, - .class_init = ppc405_soc_class_init, - } -}; - -DEFINE_TYPES(ppc405_types) diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index 73f80cf7065..6fff0d8afbc 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -19,15 +19,15 @@ #include "net/net.h" #include "hw/pci/pci.h" #include "hw/boards.h" -#include "sysemu/kvm.h" -#include "sysemu/device_tree.h" +#include "system/kvm.h" +#include "system/device_tree.h" #include "hw/loader.h" #include "elf.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/ppc/ppc.h" #include "hw/pci-host/ppc4xx.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/reset.h" #include "hw/sysbus.h" #include "hw/intc/ppc-uic.h" #include "hw/qdev-properties.h" @@ -64,7 +64,7 @@ static int bamboo_load_device_tree(MachineState *machine, uint32_t tb_freq = 400000000; uint32_t clock_freq = 400000000; - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + filename = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE); if (!filename) { return -1; } @@ -110,29 +110,6 @@ static int bamboo_load_device_tree(MachineState *machine, return 0; } -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; - - tlb = &env->tlb.tlbe[1]; - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0xffffffff */ - tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static void main_cpu_reset(void *opaque) { PowerPCCPU *cpu = opaque; @@ -143,8 +120,9 @@ static void main_cpu_reset(void *opaque) env->gpr[3] = FDT_ADDR; env->nip = entry; - /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); + /* Create a mapping spanning the 32bit addr space. */ + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31); + booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31); } static void bamboo_init(MachineState *machine) @@ -250,7 +228,8 @@ static void bamboo_init(MachineState *machine) if (success < 0) { uint64_t elf_entry; success = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, - NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + NULL, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); entry = elf_entry; } /* XXX try again as binary */ diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c index 1312aa2080e..89e3fae08d7 100644 --- a/hw/ppc/ppc440_uc.c +++ b/hw/ppc/ppc440_uc.c @@ -17,7 +17,7 @@ #include "hw/pci-host/ppc4xx.h" #include "hw/qdev-properties.h" #include "hw/pci/pci.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "cpu.h" #include "ppc440.h" @@ -1020,15 +1020,14 @@ static void ppc460ex_pcie_realize(DeviceState *dev, Error **errp) ppc460ex_pcie_register_dcrs(s); } -static Property ppc460ex_pcie_props[] = { +static const Property ppc460ex_pcie_props[] = { DEFINE_PROP_INT32("busnum", PPC460EXPCIEState, num, -1), DEFINE_PROP_INT32("dcrn-base", PPC460EXPCIEState, dcrn_base, -1), DEFINE_PROP_LINK("cpu", PPC460EXPCIEState, cpu, TYPE_POWERPC_CPU, PowerPCCPU *), - DEFINE_PROP_END_OF_LIST(), }; -static void ppc460ex_pcie_class_init(ObjectClass *klass, void *data) +static void ppc460ex_pcie_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/ppc4xx_devs.c b/hw/ppc/ppc4xx_devs.c index c1d111465db..f36c519c8bf 100644 --- a/hw/ppc/ppc4xx_devs.c +++ b/hw/ppc/ppc4xx_devs.c @@ -231,18 +231,17 @@ static void ppc4xx_mal_finalize(Object *obj) g_free(mal->txctpr); } -static Property ppc4xx_mal_properties[] = { +static const Property ppc4xx_mal_properties[] = { DEFINE_PROP_UINT8("txc-num", Ppc4xxMalState, txcnum, 0), DEFINE_PROP_UINT8("rxc-num", Ppc4xxMalState, rxcnum, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ppc4xx_mal_class_init(ObjectClass *oc, void *data) +static void ppc4xx_mal_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = ppc4xx_mal_realize; - dc->reset = ppc4xx_mal_reset; + device_class_set_legacy_reset(dc, ppc4xx_mal_reset); /* Reason: only works as function of a ppc4xx SoC */ dc->user_creatable = false; device_class_set_props(dc, ppc4xx_mal_properties); @@ -327,12 +326,12 @@ static void ppc405_plb_realize(DeviceState *dev, Error **errp) ppc4xx_dcr_register(dcr, PLB4A1_ACR, plb, &dcr_read_plb, &dcr_write_plb); } -static void ppc405_plb_class_init(ObjectClass *oc, void *data) +static void ppc405_plb_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = ppc405_plb_realize; - dc->reset = ppc405_plb_reset; + device_class_set_legacy_reset(dc, ppc405_plb_reset); /* Reason: only works as function of a ppc4xx SoC */ dc->user_creatable = false; } @@ -513,12 +512,12 @@ static void ppc405_ebc_realize(DeviceState *dev, Error **errp) ppc4xx_dcr_register(dcr, EBC0_CFGDATA, ebc, &dcr_read_ebc, &dcr_write_ebc); } -static void ppc405_ebc_class_init(ObjectClass *oc, void *data) +static void ppc405_ebc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = ppc405_ebc_realize; - dc->reset = ppc405_ebc_reset; + device_class_set_legacy_reset(dc, ppc405_ebc_reset); /* Reason: only works as function of a ppc4xx SoC */ dc->user_creatable = false; } @@ -539,13 +538,12 @@ bool ppc4xx_dcr_realize(Ppc4xxDcrDeviceState *dev, PowerPCCPU *cpu, return sysbus_realize(SYS_BUS_DEVICE(dev), errp); } -static Property ppc4xx_dcr_properties[] = { +static const Property ppc4xx_dcr_properties[] = { DEFINE_PROP_LINK("cpu", Ppc4xxDcrDeviceState, cpu, TYPE_POWERPC_CPU, PowerPCCPU *), - DEFINE_PROP_END_OF_LIST(), }; -static void ppc4xx_dcr_class_init(ObjectClass *oc, void *data) +static void ppc4xx_dcr_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ppc/ppc4xx_sdram.c b/hw/ppc/ppc4xx_sdram.c index c0c87ff636a..592769826bd 100644 --- a/hw/ppc/ppc4xx_sdram.c +++ b/hw/ppc/ppc4xx_sdram.c @@ -34,7 +34,7 @@ #include "qapi/error.h" #include "qemu/log.h" #include "qemu/error-report.h" -#include "exec/address-spaces.h" /* get_system_memory() */ +#include "system/address-spaces.h" /* get_system_memory() */ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/ppc/ppc4xx.h" @@ -425,19 +425,18 @@ static void ppc4xx_sdram_ddr_realize(DeviceState *dev, Error **errp) s, &sdram_ddr_dcr_read, &sdram_ddr_dcr_write); } -static Property ppc4xx_sdram_ddr_props[] = { +static const Property ppc4xx_sdram_ddr_props[] = { DEFINE_PROP_LINK("dram", Ppc4xxSdramDdrState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdrState, nbanks, 4), - DEFINE_PROP_END_OF_LIST(), }; -static void ppc4xx_sdram_ddr_class_init(ObjectClass *oc, void *data) +static void ppc4xx_sdram_ddr_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = ppc4xx_sdram_ddr_realize; - dc->reset = ppc4xx_sdram_ddr_reset; + device_class_set_legacy_reset(dc, ppc4xx_sdram_ddr_reset); /* Reason: only works as function of a ppc4xx SoC */ dc->user_creatable = false; device_class_set_props(dc, ppc4xx_sdram_ddr_props); @@ -710,19 +709,18 @@ static void ppc4xx_sdram_ddr2_realize(DeviceState *dev, Error **errp) s, &sdram_ddr2_dcr_read, &sdram_ddr2_dcr_write); } -static Property ppc4xx_sdram_ddr2_props[] = { +static const Property ppc4xx_sdram_ddr2_props[] = { DEFINE_PROP_LINK("dram", Ppc4xxSdramDdr2State, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_UINT32("nbanks", Ppc4xxSdramDdr2State, nbanks, 4), - DEFINE_PROP_END_OF_LIST(), }; -static void ppc4xx_sdram_ddr2_class_init(ObjectClass *oc, void *data) +static void ppc4xx_sdram_ddr2_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = ppc4xx_sdram_ddr2_realize; - dc->reset = ppc4xx_sdram_ddr2_reset; + device_class_set_legacy_reset(dc, ppc4xx_sdram_ddr2_reset); /* Reason: only works as function of a ppc4xx SoC */ dc->user_creatable = false; device_class_set_props(dc, ppc4xx_sdram_ddr2_props); diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c index ca22da196ad..3872ae28222 100644 --- a/hw/ppc/ppc_booke.c +++ b/hw/ppc/ppc_booke.c @@ -24,13 +24,24 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/target_page.h" #include "hw/ppc/ppc.h" #include "qemu/timer.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "hw/loader.h" #include "kvm_ppc.h" +void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa, + target_ulong size) +{ + tlb->attr = 0; + tlb->prot = PAGE_RWX << 4 | PAGE_VALID; + tlb->size = size; + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; +} /* Timer Control Register */ diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index dfbe759481a..2310f62a91e 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -32,7 +32,8 @@ #include "qemu/units.h" #include "hw/hw.h" #include "hw/sysbus.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" +#include "hw/ppc/ppc.h" #include "e500.h" #include "qom/object.h" @@ -70,30 +71,12 @@ static void spin_reset(DeviceState *dev) } } -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa, - hwaddr len) -{ - ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1); - hwaddr size; - - size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT); - tlb->mas1 = MAS1_VALID | size; - tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M; - tlb->mas7_3 = pa & TARGET_PAGE_MASK; - tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; -#ifdef CONFIG_KVM - env->tlb_dirty = true; -#endif -} - static void spin_kick(CPUState *cs, run_on_cpu_data data) { CPUPPCState *env = cpu_env(cs); SpinInfo *curspin = data.host_ptr; - hwaddr map_size = 64 * MiB; - hwaddr map_start; + hwaddr map_start, map_size = 64 * MiB; + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1); cpu_synchronize_state(cs); stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); @@ -107,7 +90,12 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data) env->gpr[9] = 0; map_start = ldq_p(&curspin->addr) & ~(map_size - 1); - mmubooke_create_initial_mapping(env, 0, map_start, map_size); + /* create initial mapping */ + booke206_set_tlb(tlb, 0, map_start, map_size); + tlb->mas2 |= MAS2_M; +#ifdef CONFIG_KVM + env->tlb_dirty = true; +#endif cs->halted = 0; cs->exception_index = -1; @@ -187,11 +175,11 @@ static void ppce500_spin_initfn(Object *obj) sysbus_init_mmio(dev, &s->iomem); } -static void ppce500_spin_class_init(ObjectClass *klass, void *data) +static void ppce500_spin_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = spin_reset; + device_class_set_legacy_reset(dc, spin_reset); } static const TypeInfo ppce500_spin_info = { diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index 4eb54770690..982e40e53e1 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -25,7 +25,6 @@ #include "qemu/osdep.h" #include "hw/rtc/m48t59.h" -#include "hw/char/serial.h" #include "hw/block/fdc.h" #include "net/net.h" #include "hw/isa/isa.h" @@ -36,12 +35,14 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/log.h" +#include "qemu/datadir.h" #include "hw/loader.h" #include "hw/rtc/mc146818rtc.h" #include "hw/isa/pc87312.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "exec/target_page.h" +#include "system/kvm.h" +#include "system/reset.h" #include "trace.h" #include "elf.h" #include "qemu/units.h" @@ -55,6 +56,8 @@ #define KERNEL_LOAD_ADDR 0x01000000 #define INITRD_LOAD_ADDR 0x01800000 +#define BIOS_ADDR 0xfff00000 +#define BIOS_SIZE (1 * MiB) #define NVRAM_SIZE 0x2000 static void fw_cfg_boot_set(void *opaque, const char *boot_device, @@ -241,6 +244,9 @@ static void ibm_40p_init(MachineState *machine) ISADevice *isa_dev; ISABus *isa_bus; void *fw_cfg; + MemoryRegion *bios = g_new(MemoryRegion, 1); + char *filename; + ssize_t bios_size = -1; uint32_t kernel_base = 0, initrd_base = 0; long kernel_size = 0, initrd_size = 0; char boot_device; @@ -263,10 +269,27 @@ static void ibm_40p_init(MachineState *machine) cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); qemu_register_reset(ppc_prep_reset, cpu); + /* allocate and load firmware */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_report("Could not find bios image '%s'", bios_name); + exit(1); + } + memory_region_init_rom(bios, NULL, "bios", BIOS_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), BIOS_ADDR, bios); + bios_size = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); + if (bios_size < 0) { + bios_size = load_image_targphys(filename, BIOS_ADDR, BIOS_SIZE); + } + if (bios_size < 0 || bios_size > BIOS_SIZE) { + error_report("Could not load bios image '%s'", filename); + return; + } + g_free(filename); + /* PCI host */ dev = qdev_new("raven-pcihost"); - qdev_prop_set_string(dev, "bios-name", bios_name); - qdev_prop_set_uint32(dev, "elf-machine", PPC_ELF_MACHINE); pcihost = SYS_BUS_DEVICE(dev); object_property_add_child(qdev_get_machine(), "raven", OBJECT(dev)); sysbus_realize_and_unref(pcihost, &error_fatal); diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index 4d3a251ed82..41cd923b94a 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -28,11 +28,11 @@ #include "hw/isa/isa.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/object.h" #include "qemu/error-report.h" /* for error_report() */ #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "cpu.h" #include "trace.h" @@ -285,13 +285,12 @@ static const VMStateDescription vmstate_prep_systemio = { }, }; -static Property prep_systemio_properties[] = { +static const Property prep_systemio_properties[] = { DEFINE_PROP_UINT8("ibm-planar-id", PrepSystemIoState, ibm_planar_id, 0), DEFINE_PROP_UINT8("equipment", PrepSystemIoState, equipment, 0), - DEFINE_PROP_END_OF_LIST() }; -static void prep_systemio_class_initfn(ObjectClass *klass, void *data) +static void prep_systemio_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/rs6000_mc.c b/hw/ppc/rs6000_mc.c index e6ec4b4c406..a0964051d1e 100644 --- a/hw/ppc/rs6000_mc.c +++ b/hw/ppc/rs6000_mc.c @@ -3,10 +3,12 @@ * * Copyright (c) 2017 Hervé Poussineau * + * SPDX-License-Identifier: GPL-2.0-or-later + * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or - * (at your option) version 3 or any later version. + * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,7 +24,7 @@ #include "hw/isa/isa.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qapi/error.h" #include "trace.h" #include "qom/object.h" @@ -205,13 +207,12 @@ static const VMStateDescription vmstate_rs6000mc = { }, }; -static Property rs6000mc_properties[] = { +static const Property rs6000mc_properties[] = { DEFINE_PROP_UINT32("ram-size", RS6000MCState, ram_size, 0), DEFINE_PROP_BOOL("auto-configure", RS6000MCState, autoconfigure, true), - DEFINE_PROP_END_OF_LIST() }; -static void rs6000mc_class_initfn(ObjectClass *klass, void *data) +static void rs6000mc_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 8dc75fb9f08..ee31bd8f349 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -17,21 +17,21 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "hw/boards.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_ppc.h" -#include "sysemu/device_tree.h" -#include "sysemu/block-backend.h" +#include "system/device_tree.h" +#include "system/block-backend.h" #include "exec/page-protection.h" #include "hw/loader.h" #include "elf.h" -#include "exec/memory.h" +#include "system/memory.h" #include "ppc440.h" #include "hw/pci-host/ppc4xx.h" #include "hw/block/flash.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/reset.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/i2c/ppc4xx_i2c.h" #include "hw/i2c/smbus_eeprom.h" #include "hw/ide/pci.h" @@ -142,7 +142,7 @@ static int sam460ex_load_device_tree(MachineState *machine, uint32_t clock_freq = CPU_FREQ; int offset; - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + filename = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE); if (!filename) { error_report("Couldn't find dtb file `%s'", BINARY_DEVICE_TREE_FILE); exit(1); @@ -213,38 +213,6 @@ static int sam460ex_load_device_tree(MachineState *machine, return fdt_size; } -/* Create reset TLB entries for BookE, mapping only the flash memory. */ -static void mmubooke_create_initial_mapping_uboot(CPUPPCState *env) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - /* on reset the flash is mapped by a shadow TLB, - * but since we don't implement them we need to use - * the same values U-Boot will use to avoid a fault. - */ - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 0x10000000; /* up to 0xffffffff */ - tlb->EPN = 0xf0000000 & TARGET_PAGE_MASK; - tlb->RPN = (0xf0000000 & TARGET_PAGE_MASK) | 0x4; - tlb->PID = 0; -} - -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1 << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static void main_cpu_reset(void *opaque) { PowerPCCPU *cpu = opaque; @@ -253,20 +221,27 @@ static void main_cpu_reset(void *opaque) cpu_reset(CPU(cpu)); - /* either we have a kernel to boot or we jump to U-Boot */ + /* + * On reset the flash is mapped by a shadow TLB, but since we + * don't implement them we need to use the same values U-Boot + * will use to avoid a fault. + * either we have a kernel to boot or we jump to U-Boot + */ if (bi->entry != UBOOT_ENTRY) { env->gpr[1] = (16 * MiB) - 8; env->gpr[3] = FDT_ADDR; env->nip = bi->entry; /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); - env->gpr[6] = tswap32(EPAPR_MAGIC); + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1 << 31); + env->gpr[6] = EPAPR_MAGIC; env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */ } else { env->nip = UBOOT_ENTRY; - mmubooke_create_initial_mapping_uboot(env); + /* Create a mapping for U-Boot. */ + booke_set_tlb(&env->tlb.tlbe[0], 0xf0000000, 0xf0000000, 0x10000000); + env->tlb.tlbe[0].RPN |= 4; } } @@ -504,7 +479,7 @@ static void sam460ex_init(MachineState *machine) success = load_elf(machine->kernel_filename, NULL, NULL, NULL, &elf_entry, NULL, NULL, NULL, - 1, PPC_ELF_MACHINE, 0, 0); + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); entry = elf_entry; } /* XXX try again as binary */ diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 370d7c35d3a..1855a3cd8d0 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -4,6 +4,9 @@ * Copyright (c) 2004-2007 Fabrice Bellard * Copyright (c) 2007 Jocelyn Mayer * Copyright (c) 2010 David Gibson, IBM Corporation. + * Copyright (c) 2010-2024, IBM Corporation.. + * + * SPDX-License-Identifier: GPL-2.0-or-later * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -32,20 +35,20 @@ #include "qapi/qapi-events-machine.h" #include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" -#include "sysemu/sysemu.h" -#include "sysemu/hostmem.h" -#include "sysemu/numa.h" -#include "sysemu/tcg.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/hostmem.h" +#include "system/numa.h" +#include "system/tcg.h" +#include "system/qtest.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qemu/log.h" #include "hw/fw-path-provider.h" #include "elf.h" #include "net/net.h" -#include "sysemu/device_tree.h" -#include "sysemu/cpus.h" -#include "sysemu/hw_accel.h" +#include "system/device_tree.h" +#include "system/cpus.h" +#include "system/hw_accel.h" #include "kvm_ppc.h" #include "migration/misc.h" #include "migration/qemu-file-types.h" @@ -74,8 +77,8 @@ #include "hw/virtio/virtio-scsi.h" #include "hw/virtio/vhost-scsi-common.h" -#include "exec/ram_addr.h" -#include "exec/confidential-guest-support.h" +#include "system/ram_addr.h" +#include "system/confidential-guest-support.h" #include "hw/usb.h" #include "qemu/config-file.h" #include "qemu/error-report.h" @@ -132,61 +135,6 @@ static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; } -static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) -{ - /* Dummy entries correspond to unused ICPState objects in older QEMUs, - * and newer QEMUs don't even have them. In both cases, we don't want - * to send anything on the wire. - */ - return false; -} - -static const VMStateDescription pre_2_10_vmstate_dummy_icp = { - /* - * Hack ahead. We can't have two devices with the same name and - * instance id. So I rename this to pass make check. - * Real help from people who knows the hardware is needed. - */ - .name = "icp/server", - .version_id = 1, - .minimum_version_id = 1, - .needed = pre_2_10_vmstate_dummy_icp_needed, - .fields = (const VMStateField[]) { - VMSTATE_UNUSED(4), /* uint32_t xirr */ - VMSTATE_UNUSED(1), /* uint8_t pending_priority */ - VMSTATE_UNUSED(1), /* uint8_t mfrr */ - VMSTATE_END_OF_LIST() - }, -}; - -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * You have to remove vmstate_replace_hack_for_ppc() when you remove - * the machine types that need the following function. - */ -static void pre_2_10_vmstate_register_dummy_icp(int i) -{ - vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, - (void *)(uintptr_t) i); -} - -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * You have to remove vmstate_replace_hack_for_ppc() when you remove - * the machine types that need the following function. - */ -static void pre_2_10_vmstate_unregister_dummy_icp(int i) -{ - /* - * This used to be: - * - * vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, - * (void *)(uintptr_t) i); - */ -} - int spapr_max_server_number(SpaprMachineState *spapr) { MachineState *ms = MACHINE(spapr); @@ -298,7 +246,7 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ /* 54: DecFP, 56: DecI, 58: SHA */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ - /* 60: NM atomic, 62: RNG */ + /* 60: NM atomic, 62: RNG, 64: DAWR1 (ISA 3.1) */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */ 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */ @@ -347,6 +295,10 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, * in pa-features. So hide it from them. */ pa_features[40 + 2] &= ~0x80; /* Radix MMU */ } + if (spapr_get_cap(spapr, SPAPR_CAP_DAWR1)) { + g_assert(pa_size > 66); + pa_features[66] |= 0x80; + } _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); } @@ -625,7 +577,7 @@ static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt, /* * Adds ibm,dynamic-reconfiguration-memory node. - * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation + * Refer to docs/specs/ppc-spapr-hotplug.rst for the documentation * of this device tree node. */ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, @@ -682,7 +634,6 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) { MachineState *machine = MACHINE(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); hwaddr mem_start, node_size; int i, nb_nodes = machine->numa_state->num_nodes; NodeInfo *nodes = machine->numa_state->nodes; @@ -724,7 +675,6 @@ static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { int ret; - g_assert(smc->dr_lmb_enabled); ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); if (ret) { return ret; @@ -758,7 +708,7 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; int i; - drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, env->core_index); if (drc) { drc_index = spapr_drc_index(drc); _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); @@ -1307,9 +1257,7 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) spapr_dt_cpus(fdt, spapr); /* ibm,drc-indexes and friends */ - if (smc->dr_lmb_enabled) { - root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; - } + root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; if (smc->dr_phb_enabled) { root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB; } @@ -1458,11 +1406,34 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu, } } -#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) -#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) -#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) -#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) -#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) +static uint64_t *hpte_get_ptr(SpaprMachineState *s, unsigned index) +{ + uint64_t *table = s->htab; + + return &table[2 * index]; +} + +static bool hpte_is_valid(SpaprMachineState *s, unsigned index) +{ + return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_VALID; +} + +static bool hpte_is_dirty(SpaprMachineState *s, unsigned index) +{ + return ldq_be_p(hpte_get_ptr(s, index)) & HPTE64_V_HPTE_DIRTY; +} + +static void hpte_set_clean(SpaprMachineState *s, unsigned index) +{ + stq_be_p(hpte_get_ptr(s, index), + ldq_be_p(hpte_get_ptr(s, index)) & ~HPTE64_V_HPTE_DIRTY); +} + +static void hpte_set_dirty(SpaprMachineState *s, unsigned index) +{ + stq_be_p(hpte_get_ptr(s, index), + ldq_be_p(hpte_get_ptr(s, index)) | HPTE64_V_HPTE_DIRTY); +} /* * Get the fd to access the kernel htab, re-opening it if necessary @@ -1673,7 +1644,7 @@ int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp) spapr->htab_shift = shift; for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { - DIRTY_HPTE(HPTE(spapr->htab, i)); + hpte_set_dirty(spapr, i); } } /* We're setting up a hash table, so that means we're not radix */ @@ -1725,7 +1696,7 @@ void spapr_check_mmu_mode(bool guest_radix) } } -static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) +static void spapr_machine_reset(MachineState *machine, ResetType type) { SpaprMachineState *spapr = SPAPR_MACHINE(machine); PowerPCCPU *first_ppc_cpu; @@ -1733,7 +1704,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) void *fdt; int rc; - if (reason != SHUTDOWN_CAUSE_SNAPSHOT_LOAD) { + if (type != RESET_TYPE_SNAPSHOT_LOAD) { /* * Record-replay snapshot load must not consume random, this was * already replayed from initial machine reset. @@ -1762,7 +1733,7 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) spapr_setup_hpt(spapr); } - qemu_devices_reset(reason); + qemu_devices_reset(type); spapr_ovec_cleanup(spapr->ov5_cas); spapr->ov5_cas = spapr_ovec_new(); @@ -1819,7 +1790,6 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) 0, fdt_addr, 0); cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); } - qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); g_free(spapr->fdt_blob); spapr->fdt_size = fdt_totalsize(fdt); @@ -2197,6 +2167,7 @@ static const VMStateDescription vmstate_spapr = { &vmstate_spapr_cap_rpt_invalidate, &vmstate_spapr_cap_ail_mode_3, &vmstate_spapr_cap_nested_papr, + &vmstate_spapr_cap_dawr1, NULL } }; @@ -2231,7 +2202,7 @@ static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, qemu_put_be32(f, chunkstart); qemu_put_be16(f, n_valid); qemu_put_be16(f, n_invalid); - qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), + qemu_put_buffer(f, (void *)hpte_get_ptr(spapr, chunkstart), HASH_PTE_SIZE_64 * n_valid); } @@ -2257,16 +2228,16 @@ static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, /* Consume invalid HPTEs */ while ((index < htabslots) - && !HPTE_VALID(HPTE(spapr->htab, index))) { - CLEAN_HPTE(HPTE(spapr->htab, index)); + && !hpte_is_valid(spapr, index)) { + hpte_set_clean(spapr, index); index++; } /* Consume valid HPTEs */ chunkstart = index; while ((index < htabslots) && (index - chunkstart < USHRT_MAX) - && HPTE_VALID(HPTE(spapr->htab, index))) { - CLEAN_HPTE(HPTE(spapr->htab, index)); + && hpte_is_valid(spapr, index)) { + hpte_set_clean(spapr, index); index++; } @@ -2306,7 +2277,7 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, /* Consume non-dirty HPTEs */ while ((index < htabslots) - && !HPTE_DIRTY(HPTE(spapr->htab, index))) { + && !hpte_is_dirty(spapr, index)) { index++; examined++; } @@ -2314,9 +2285,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, chunkstart = index; /* Consume valid dirty HPTEs */ while ((index < htabslots) && (index - chunkstart < USHRT_MAX) - && HPTE_DIRTY(HPTE(spapr->htab, index)) - && HPTE_VALID(HPTE(spapr->htab, index))) { - CLEAN_HPTE(HPTE(spapr->htab, index)); + && hpte_is_dirty(spapr, index) + && hpte_is_valid(spapr, index)) { + hpte_set_clean(spapr, index); index++; examined++; } @@ -2324,9 +2295,9 @@ static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, invalidstart = index; /* Consume invalid dirty HPTEs */ while ((index < htabslots) && (index - invalidstart < USHRT_MAX) - && HPTE_DIRTY(HPTE(spapr->htab, index)) - && !HPTE_VALID(HPTE(spapr->htab, index))) { - CLEAN_HPTE(HPTE(spapr->htab, index)); + && hpte_is_dirty(spapr, index) + && !hpte_is_valid(spapr, index)) { + hpte_set_clean(spapr, index); index++; examined++; } @@ -2508,11 +2479,11 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id) if (spapr->htab) { if (n_valid) { - qemu_get_buffer(f, HPTE(spapr->htab, index), + qemu_get_buffer(f, (void *)hpte_get_ptr(spapr, index), HASH_PTE_SIZE_64 * n_valid); } if (n_invalid) { - memset(HPTE(spapr->htab, index + n_valid), 0, + memset(hpte_get_ptr(spapr, index + n_valid), 0, HASH_PTE_SIZE_64 * n_invalid); } } else { @@ -2547,7 +2518,7 @@ static void htab_save_cleanup(void *opaque) static SaveVMHandlers savevm_htab_handlers = { .save_setup = htab_save_setup, .save_live_iterate = htab_save_iterate, - .save_live_complete_precopy = htab_save_complete, + .save_complete = htab_save_complete, .save_cleanup = htab_save_cleanup, .load_state = htab_load, }; @@ -2715,7 +2686,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr) { MachineState *machine = MACHINE(spapr); MachineClass *mc = MACHINE_GET_CLASS(machine); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); const char *type = spapr_get_cpu_core_type(machine->cpu_type); const CPUArchIdList *possible_cpus; unsigned int smp_cpus = machine->smp.cpus; @@ -2744,15 +2714,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr) boot_cores_nr = possible_cpus->len; } - if (smc->pre_2_10_has_unused_icps) { - for (i = 0; i < spapr_max_server_number(spapr); i++) { - /* Dummy entries get deregistered when real ICPState objects - * are registered during CPU core hotplug. - */ - pre_2_10_vmstate_register_dummy_icp(i); - } - } - for (i = 0; i < possible_cpus->len; i++) { int core_id = i * smp_threads; @@ -2929,10 +2890,8 @@ static void spapr_machine_init(MachineState *machine) spapr->ov5 = spapr_ovec_new(); spapr->ov5_cas = spapr_ovec_new(); - if (smc->dr_lmb_enabled) { - spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); - spapr_validate_node_memory(machine, &error_fatal); - } + spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); + spapr_validate_node_memory(machine, &error_fatal); spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); @@ -2959,6 +2918,9 @@ static void spapr_machine_init(MachineState *machine) spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); } + qemu_guest_getrandom_nofail(&spapr->hashpkey_val, + sizeof(spapr->hashpkey_val)); + /* init CPUs */ spapr_init_cpus(spapr); @@ -3016,9 +2978,7 @@ static void spapr_machine_init(MachineState *machine) machine_memory_devices_init(machine, device_mem_base, device_mem_size); } - if (smc->dr_lmb_enabled) { - spapr_create_lmb_dr_connectors(spapr); - } + spapr_create_lmb_dr_connectors(spapr); if (mc->nvdimm_supported) { spapr_create_nvdimm_dr_connectors(spapr); @@ -3078,11 +3038,7 @@ static void spapr_machine_init(MachineState *machine) } if (machine->usb) { - if (smc->use_ohci_by_default) { - pci_create_simple(phb->bus, -1, "pci-ohci"); - } else { - pci_create_simple(phb->bus, -1, "nec-usb-xhci"); - } + pci_create_simple(phb->bus, -1, "nec-usb-xhci"); if (has_vga) { USBBus *usb_bus; @@ -3099,13 +3055,13 @@ static void spapr_machine_init(MachineState *machine) spapr->kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, spapr, - NULL, &loaded_addr, NULL, NULL, 1, - PPC_ELF_MACHINE, 0, 0); + NULL, &loaded_addr, NULL, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { spapr->kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, spapr, - NULL, &loaded_addr, NULL, NULL, 0, - PPC_ELF_MACHINE, 0, 0); + NULL, &loaded_addr, NULL, NULL, + ELFDATA2LSB, PPC_ELF_MACHINE, 0, 0); spapr->kernel_le = spapr->kernel_size > 0; } if (spapr->kernel_size < 0) { @@ -3662,7 +3618,6 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev) static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); PCDIMMDevice *dimm = PC_DIMM(dev); @@ -3671,11 +3626,6 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Object *memdev; hwaddr pagesize; - if (!smc->dr_lmb_enabled) { - error_setg(errp, "Memory hotplug not supported for this machine"); - return; - } - size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); if (local_err) { error_propagate(errp, local_err); @@ -3932,21 +3882,9 @@ void spapr_core_release(DeviceState *dev) static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) { MachineState *ms = MACHINE(hotplug_dev); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); CPUCore *cc = CPU_CORE(dev); CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); - if (smc->pre_2_10_has_unused_icps) { - SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); - int i; - - for (i = 0; i < cc->nr_threads; i++) { - CPUState *cs = CPU(sc->threads[i]); - - pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); - } - } - assert(core_slot); core_slot->cpu = NULL; qdev_unrealize(dev); @@ -4027,7 +3965,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) { SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); MachineClass *mc = MACHINE_GET_CLASS(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); CPUCore *cc = CPU_CORE(dev); SpaprDrc *drc; @@ -4077,12 +4014,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) } } - if (smc->pre_2_10_has_unused_icps) { - for (i = 0; i < cc->nr_threads; i++) { - CPUState *cs = CPU(core->threads[i]); - pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); - } - } } static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, @@ -4537,21 +4468,14 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf) /* * This is a XIVE only operation */ -static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool spapr_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { SpaprMachineState *spapr = SPAPR_MACHINE(xfb); XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, - priority, logic_serv, match); - if (count < 0) { - return count; - } /* * When we implement the save and restore of the thread interrupt @@ -4562,12 +4486,14 @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, * Until this is done, the sPAPR machine should find at least one * matching context always. */ - if (count == 0) { + if (!xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, + priority, logic_serv, match)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", nvt_blk, nvt_idx); + return false; } - return count; + return true; } int spapr_get_vcpu_id(PowerPCCPU *cpu) @@ -4664,7 +4590,7 @@ static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) } } -static void spapr_machine_class_init(ObjectClass *oc, void *data) +static void spapr_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); @@ -4713,7 +4639,6 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = spapr_machine_device_unplug_request; hc->unplug = spapr_machine_device_unplug; - smc->dr_lmb_enabled = true; smc->update_dt_enabled = true; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); mc->has_hotpluggable_cpus = true; @@ -4758,6 +4683,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_ON; /* * This cap specifies whether the AIL 3 mode for @@ -4786,7 +4712,7 @@ static const TypeInfo spapr_machine_info = { .instance_finalize = spapr_machine_finalizefn, .class_size = sizeof(SpaprMachineClass), .class_init = spapr_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { TYPE_NMI }, { TYPE_HOTPLUG_HANDLER }, @@ -4808,7 +4734,7 @@ static void spapr_machine_latest_class_options(MachineClass *mc) #define DEFINE_SPAPR_MACHINE_IMPL(latest, ...) \ static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)( \ ObjectClass *oc, \ - void *data) \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc); \ @@ -4826,7 +4752,7 @@ static void spapr_machine_latest_class_options(MachineClass *mc) static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void) \ { \ MACHINE_VER_DELETION(__VA_ARGS__); \ - type_register(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \ + type_register_static(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \ } \ type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__)) @@ -4834,18 +4760,49 @@ static void spapr_machine_latest_class_options(MachineClass *mc) DEFINE_SPAPR_MACHINE_IMPL(true, major, minor) #define DEFINE_SPAPR_MACHINE(major, minor) \ DEFINE_SPAPR_MACHINE_IMPL(false, major, minor) -#define DEFINE_SPAPR_MACHINE_TAGGED(major, minor, tag) \ - DEFINE_SPAPR_MACHINE_IMPL(false, major, minor, _, tag) + +/* + * pseries-10.1 + */ +static void spapr_machine_10_1_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE_AS_LATEST(10, 1); + +/* + * pseries-10.0 + */ +static void spapr_machine_10_0_class_options(MachineClass *mc) +{ + spapr_machine_10_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len); +} + +DEFINE_SPAPR_MACHINE(10, 0); + +/* + * pseries-9.2 + */ +static void spapr_machine_9_2_class_options(MachineClass *mc) +{ + spapr_machine_10_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len); +} + +DEFINE_SPAPR_MACHINE(9, 2); /* * pseries-9.1 */ static void spapr_machine_9_1_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_9_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len); } -DEFINE_SPAPR_MACHINE_AS_LATEST(9, 1); +DEFINE_SPAPR_MACHINE(9, 1); /* * pseries-9.0 @@ -4865,6 +4822,7 @@ static void spapr_machine_8_2_class_options(MachineClass *mc) { spapr_machine_9_0_class_options(mc); compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.2"); } DEFINE_SPAPR_MACHINE(8, 2); @@ -5109,278 +5067,6 @@ static void spapr_machine_3_0_class_options(MachineClass *mc) DEFINE_SPAPR_MACHINE(3, 0); -/* - * pseries-2.12 - */ -static void spapr_machine_2_12_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, - { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, - }; - - spapr_machine_3_0_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - - /* We depend on kvm_enabled() to choose a default value for the - * hpt-max-page-size capability. Of course we can't do it here - * because this is too early and the HW accelerator isn't initialized - * yet. Postpone this to machine init (see default_caps_with_cpu()). - */ - smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; -} - -DEFINE_SPAPR_MACHINE(2, 12); - -static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_12_class_options(mc); - smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; - smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; - smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; -} - -DEFINE_SPAPR_MACHINE_TAGGED(2, 12, sxxm); - -/* - * pseries-2.11 - */ - -static void spapr_machine_2_11_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_12_class_options(mc); - smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; - compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); -} - -DEFINE_SPAPR_MACHINE(2, 11); - -/* - * pseries-2.10 - */ - -static void spapr_machine_2_10_class_options(MachineClass *mc) -{ - spapr_machine_2_11_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); -} - -DEFINE_SPAPR_MACHINE(2, 10); - -/* - * pseries-2.9 - */ - -static void spapr_machine_2_9_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, - }; - - spapr_machine_2_10_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - smc->pre_2_10_has_unused_icps = true; - smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; -} - -DEFINE_SPAPR_MACHINE(2, 9); - -/* - * pseries-2.8 - */ - -static void spapr_machine_2_8_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, - }; - - spapr_machine_2_9_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - mc->numa_mem_align_shift = 23; -} - -DEFINE_SPAPR_MACHINE(2, 8); - -/* - * pseries-2.7 - */ - -static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, - uint64_t *buid, hwaddr *pio, - hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, Error **errp) -{ - /* Legacy PHB placement for pseries-2.7 and earlier machine types */ - const uint64_t base_buid = 0x800000020000000ULL; - const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ - const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ - const hwaddr pio_offset = 0x80000000; /* 2 GiB */ - const uint32_t max_index = 255; - const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ - - uint64_t ram_top = MACHINE(spapr)->ram_size; - hwaddr phb0_base, phb_base; - int i; - - /* Do we have device memory? */ - if (MACHINE(spapr)->device_memory) { - /* Can't just use maxram_size, because there may be an - * alignment gap between normal and device memory regions - */ - ram_top = MACHINE(spapr)->device_memory->base + - memory_region_size(&MACHINE(spapr)->device_memory->mr); - } - - phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); - - if (index > max_index) { - error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", - max_index); - return false; - } - - *buid = base_buid + index; - for (i = 0; i < n_dma; ++i) { - liobns[i] = SPAPR_PCI_LIOBN(index, i); - } - - phb_base = phb0_base + index * phb_spacing; - *pio = phb_base + pio_offset; - *mmio32 = phb_base + mmio_offset; - /* - * We don't set the 64-bit MMIO window, relying on the PHB's - * fallback behaviour of automatically splitting a large "32-bit" - * window into contiguous 32-bit and 64-bit windows - */ - - return true; -} - -static void spapr_machine_2_7_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, - { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, - { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, - }; - - spapr_machine_2_8_class_options(mc); - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); - mc->default_machine_opts = "modern-hotplug-events=off"; - compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - smc->phb_placement = phb_placement_2_7; -} - -DEFINE_SPAPR_MACHINE(2, 7); - -/* - * pseries-2.6 - */ - -static void spapr_machine_2_6_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, - }; - - spapr_machine_2_7_class_options(mc); - mc->has_hotpluggable_cpus = false; - compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} - -DEFINE_SPAPR_MACHINE(2, 6); - -/* - * pseries-2.5 - */ - -static void spapr_machine_2_5_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { "spapr-vlan", "use-rx-buffer-pools", "off" }, - }; - - spapr_machine_2_6_class_options(mc); - smc->use_ohci_by_default = true; - compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} - -DEFINE_SPAPR_MACHINE(2, 5); - -/* - * pseries-2.4 - */ - -static void spapr_machine_2_4_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_5_class_options(mc); - smc->dr_lmb_enabled = false; - compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); -} - -DEFINE_SPAPR_MACHINE(2, 4); - -/* - * pseries-2.3 - */ - -static void spapr_machine_2_3_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, - }; - spapr_machine_2_4_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_SPAPR_MACHINE(2, 3); - -/* - * pseries-2.2 - */ - -static void spapr_machine_2_2_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, - }; - - spapr_machine_2_3_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; -} -DEFINE_SPAPR_MACHINE(2, 2); - -/* - * pseries-2.1 - */ - -static void spapr_machine_2_1_class_options(MachineClass *mc) -{ - spapr_machine_2_2_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); -} -DEFINE_SPAPR_MACHINE(2, 1); - static void spapr_machine_register_types(void) { type_register_static(&spapr_machine_info); diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index 2f74923560d..f2f5722d8ad 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -26,14 +26,15 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/visitor.h" -#include "sysemu/hw_accel.h" -#include "exec/ram_addr.h" +#include "system/hw_accel.h" +#include "system/ram_addr.h" #include "target/ppc/cpu.h" #include "target/ppc/mmu-hash64.h" #include "cpu-models.h" #include "kvm_ppc.h" #include "migration/vmstate.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" +#include "system/hostmem.h" #include "hw/ppc/spapr.h" @@ -696,6 +697,34 @@ static void cap_ail_mode_3_apply(SpaprMachineState *spapr, } } +static void cap_dawr1_apply(SpaprMachineState *spapr, uint8_t val, + Error **errp) +{ + ERRP_GUARD(); + + if (!val) { + return; /* Disable by default */ + } + + if (!ppc_type_check_compat(MACHINE(spapr)->cpu_type, + CPU_POWERPC_LOGICAL_3_10, 0, + spapr->max_compat_pvr)) { + error_setg(errp, "DAWR1 supported only on POWER10 and later CPUs"); + error_append_hint(errp, "Try appending -machine cap-dawr1=off\n"); + return; + } + + if (kvm_enabled()) { + if (!kvmppc_has_cap_dawr1()) { + error_setg(errp, "DAWR1 not supported by KVM."); + error_append_hint(errp, "Try appending -machine cap-dawr1=off"); + } else if (kvmppc_set_cap_dawr1(val) < 0) { + error_setg(errp, "Error enabling cap-dawr1 with KVM."); + error_append_hint(errp, "Try appending -machine cap-dawr1=off"); + } + } +} + SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { [SPAPR_CAP_HTM] = { .name = "htm", @@ -831,6 +860,15 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "bool", .apply = cap_ail_mode_3_apply, }, + [SPAPR_CAP_DAWR1] = { + .name = "dawr1", + .description = "Allow 2nd Data Address Watchpoint Register (DAWR1)", + .index = SPAPR_CAP_DAWR1, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_dawr1_apply, + }, }; static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, @@ -841,6 +879,11 @@ static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, caps = smc->default_caps; + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_10, + 0, spapr->max_compat_pvr)) { + caps.caps[SPAPR_CAP_DAWR1] = SPAPR_CAP_OFF; + } + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_00, 0, spapr->max_compat_pvr)) { caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; @@ -975,6 +1018,7 @@ SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST); SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI); SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE); SPAPR_CAP_MIG_STATE(ail_mode_3, SPAPR_CAP_AIL_MODE_3); +SPAPR_CAP_MIG_STATE(dawr1, SPAPR_CAP_DAWR1); void spapr_caps_init(SpaprMachineState *spapr) { @@ -1034,7 +1078,7 @@ void spapr_caps_add_properties(SpaprMachineClass *smc) for (i = 0; i < ARRAY_SIZE(capability_table); i++) { SpaprCapabilityInfo *cap = &capability_table[i]; g_autofree char *name = g_strdup_printf("cap-%s", cap->name); - g_autofree char *desc = g_strdup_printf("%s", cap->description); + g_autofree char *desc = g_strdup(cap->description); object_class_property_add(klass, name, cap->type, cap->get, cap->set, diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 56090abcd11..4952f9bd2cf 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -15,15 +15,15 @@ #include "target/ppc/cpu.h" #include "hw/ppc/spapr.h" #include "qapi/error.h" -#include "sysemu/cpus.h" -#include "sysemu/kvm.h" +#include "system/cpus.h" +#include "system/kvm.h" #include "target/ppc/kvm_ppc.h" #include "hw/ppc/ppc.h" #include "target/ppc/mmu-hash64.h" #include "target/ppc/power8-pmu.h" -#include "sysemu/numa.h" -#include "sysemu/reset.h" -#include "sysemu/hw_accel.h" +#include "system/numa.h" +#include "system/reset.h" +#include "system/hw_accel.h" #include "qemu/error-report.h" static void spapr_reset_vcpu(PowerPCCPU *cpu) @@ -37,6 +37,9 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu) cpu_reset(cs); + env->quiesced = true; /* set "RTAS stopped" state. */ + ppc_maybe_interrupt(env); + /* * "PowerPC Processor binding to IEEE 1275" defines the initial MSR state * as 32bit (MSR_SF=0) with MSR_ME=1 and MSR_FP=1 in "8.2.1. Initial @@ -98,6 +101,9 @@ void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, CPU(cpu)->halted = 0; /* Enable Power-saving mode Exit Cause exceptions */ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm); + + env->quiesced = false; /* clear "RTAS stopped" state. */ + ppc_maybe_interrupt(env); } /* @@ -197,9 +203,7 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc) { CPUPPCState *env = &cpu->env; - if (!sc->pre_3_0_migration) { - vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); - } + vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu); cpu_ppc_tb_free(env); qdev_unrealize(DEVICE(cpu)); @@ -275,6 +279,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr, env->spr_cb[SPR_PIR].default_value = cs->cpu_index; env->spr_cb[SPR_TIR].default_value = thread_index; + env->spr_cb[SPR_HASHPKEYR].default_value = spapr->hashpkey_val; + cpu_ppc_set_1lpar(cpu); /* Set time-base frequency to 512 MHz. vhyp must be set first. */ @@ -285,10 +291,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr, return false; } - if (!sc->pre_3_0_migration) { - vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state, - cpu->machine_data); - } + vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state, + cpu->machine_data); return true; } @@ -317,6 +321,7 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp) return NULL; } + env->chip_index = sc->node_id; env->core_index = cc->core_id; cpu->node_id = sc->node_id; @@ -364,21 +369,18 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) } } -static Property spapr_cpu_core_properties[] = { +static const Property spapr_cpu_core_properties[] = { DEFINE_PROP_INT32("node-id", SpaprCpuCore, node_id, CPU_UNSET_NUMA_NODE_ID), - DEFINE_PROP_BOOL("pre-3.0-migration", SpaprCpuCore, pre_3_0_migration, - false), - DEFINE_PROP_END_OF_LIST() }; -static void spapr_cpu_core_class_init(ObjectClass *oc, void *data) +static void spapr_cpu_core_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); SpaprCpuCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc); dc->realize = spapr_cpu_core_realize; dc->unrealize = spapr_cpu_core_unrealize; - dc->reset = spapr_cpu_core_reset; + device_class_set_legacy_reset(dc, spapr_cpu_core_reset); device_class_set_props(dc, spapr_cpu_core_properties); scc->cpu_type = data; } @@ -386,7 +388,7 @@ static void spapr_cpu_core_class_init(ObjectClass *oc, void *data) #define DEFINE_SPAPR_CPU_CORE_TYPE(cpu_model) \ { \ .parent = TYPE_SPAPR_CPU_CORE, \ - .class_data = (void *) POWERPC_CPU_TYPE_NAME(cpu_model), \ + .class_data = POWERPC_CPU_TYPE_NAME(cpu_model), \ .class_init = spapr_cpu_core_class_init, \ .name = SPAPR_CPU_CORE_TYPE_NAME(cpu_model), \ } @@ -411,6 +413,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power11_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), #endif diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 1484e3209d9..d2044b4fb52 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qemu/cutils.h" #include "hw/ppc/spapr_drc.h" #include "qom/object.h" @@ -23,11 +23,11 @@ #include "hw/ppc/spapr.h" /* for RTAS return codes */ #include "hw/pci-host/spapr.h" /* spapr_phb_remove_pci_device_cb callback */ #include "hw/ppc/spapr_nvdimm.h" -#include "sysemu/device_tree.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/reset.h" #include "trace.h" -#define DRC_CONTAINER_PATH "/dr-connector" +#define DRC_CONTAINER_PATH "dr-connector" #define DRC_INDEX_TYPE_SHIFT 28 #define DRC_INDEX_ID_MASK ((1ULL << DRC_INDEX_TYPE_SHIFT) - 1) @@ -514,6 +514,16 @@ static const VMStateDescription vmstate_spapr_drc = { } }; +static void drc_container_create(void) +{ + object_property_add_new_container(object_get_root(), DRC_CONTAINER_PATH); +} + +static Object *drc_container_get(void) +{ + return object_resolve_path_component(object_get_root(), DRC_CONTAINER_PATH); +} + static void drc_realize(DeviceState *d, Error **errp) { SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); @@ -529,7 +539,7 @@ static void drc_realize(DeviceState *d, Error **errp) * inaccessible by the guest, since lookups rely on this path * existing in the composition tree */ - root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + root_container = drc_container_get(); child_name = object_get_canonical_path_component(OBJECT(drc)); trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name); object_property_add_alias(root_container, link_name, @@ -543,12 +553,10 @@ static void drc_unrealize(DeviceState *d) { SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); g_autofree gchar *name = g_strdup_printf("%x", spapr_drc_index(drc)); - Object *root_container; trace_spapr_drc_unrealize(spapr_drc_index(drc)); vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc); - root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); - object_property_del(root_container, name); + object_property_del(drc_container_get(), name); } SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type, @@ -581,10 +589,12 @@ static void spapr_dr_connector_instance_init(Object *obj) drc->state = drck->empty_state; } -static void spapr_dr_connector_class_init(ObjectClass *k, void *data) +static void spapr_dr_connector_class_init(ObjectClass *k, const void *data) { DeviceClass *dk = DEVICE_CLASS(k); + drc_container_create(); + dk->realize = drc_realize; dk->unrealize = drc_unrealize; /* @@ -655,7 +665,7 @@ static void unrealize_physical(DeviceState *d) qemu_unregister_reset(drc_physical_reset, drcp); } -static void spapr_drc_physical_class_init(ObjectClass *k, void *data) +static void spapr_drc_physical_class_init(ObjectClass *k, const void *data) { DeviceClass *dk = DEVICE_CLASS(k); SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -669,7 +679,7 @@ static void spapr_drc_physical_class_init(ObjectClass *k, void *data) drck->empty_state = SPAPR_DRC_STATE_PHYSICAL_POWERON; } -static void spapr_drc_logical_class_init(ObjectClass *k, void *data) +static void spapr_drc_logical_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -680,7 +690,7 @@ static void spapr_drc_logical_class_init(ObjectClass *k, void *data) drck->empty_state = SPAPR_DRC_STATE_LOGICAL_UNUSABLE; } -static void spapr_drc_cpu_class_init(ObjectClass *k, void *data) +static void spapr_drc_cpu_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -691,7 +701,7 @@ static void spapr_drc_cpu_class_init(ObjectClass *k, void *data) drck->dt_populate = spapr_core_dt_populate; } -static void spapr_drc_pci_class_init(ObjectClass *k, void *data) +static void spapr_drc_pci_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -702,7 +712,7 @@ static void spapr_drc_pci_class_init(ObjectClass *k, void *data) drck->dt_populate = spapr_pci_dt_populate; } -static void spapr_drc_lmb_class_init(ObjectClass *k, void *data) +static void spapr_drc_lmb_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -713,7 +723,7 @@ static void spapr_drc_lmb_class_init(ObjectClass *k, void *data) drck->dt_populate = spapr_lmb_dt_populate; } -static void spapr_drc_phb_class_init(ObjectClass *k, void *data) +static void spapr_drc_phb_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -724,7 +734,7 @@ static void spapr_drc_phb_class_init(ObjectClass *k, void *data) drck->dt_populate = spapr_phb_dt_populate; } -static void spapr_drc_pmem_class_init(ObjectClass *k, void *data) +static void spapr_drc_pmem_class_init(ObjectClass *k, const void *data) { SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); @@ -796,9 +806,8 @@ static const TypeInfo spapr_drc_pmem_info = { SpaprDrc *spapr_drc_by_index(uint32_t index) { Object *obj; - g_autofree gchar *name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, - index); - obj = object_resolve_path(name, NULL); + g_autofree gchar *name = g_strdup_printf("%x", index); + obj = object_resolve_path_component(drc_container_get(), name); return !obj ? NULL : SPAPR_DR_CONNECTOR(obj); } @@ -860,7 +869,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) /* aliases for all DRConnector objects will be rooted in QOM * composition tree at DRC_CONTAINER_PATH */ - root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + root_container = drc_container_get(); object_property_iter_init(&iter, root_container); while ((prop = object_property_iter_next(&iter))) { @@ -953,7 +962,7 @@ void spapr_drc_reset_all(SpaprMachineState *spapr) ObjectProperty *prop; ObjectPropertyIterator iter; - drc_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + drc_container = drc_container_get(); restart: object_property_iter_init(&iter, drc_container); while ((prop = object_property_iter_next(&iter))) { diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index cb0eeee5874..832b0212f31 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -27,8 +27,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/device_tree.h" -#include "sysemu/runstate.h" +#include "system/device_tree.h" +#include "system/runstate.h" #include "hw/ppc/fdt.h" #include "hw/ppc/spapr.h" @@ -645,8 +645,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, /* we shouldn't be signaling hotplug events for resources * that don't support them */ - g_assert(false); - return; + g_assert_not_reached(); } if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) { diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 5e1d020e3df..1e936f35e44 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1,14 +1,15 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qapi/error.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "sysemu/tcg.h" +#include "system/hw_accel.h" +#include "system/runstate.h" +#include "system/tcg.h" #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "exec/tb-flush.h" +#include "exec/target_page.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" #include "hw/ppc/spapr.h" @@ -299,8 +300,10 @@ static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr, if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { if (kvm_enabled()) { kvmppc_icbi_range(cpu, pdst, len); - } else { + } else if (tcg_enabled()) { tb_flush(CPU(cpu)); + } else { + g_assert_not_reached(); } } @@ -578,6 +581,8 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr, CPUState *cs = CPU(cpu); SpaprCpuState *spapr_cpu; + assert(tcg_enabled()); /* KVM will have handled this */ + /* * -1 means confer to all other CPUs without dispatch counter check, * otherwise it's a targeted confer. @@ -818,11 +823,12 @@ static target_ulong h_set_mode_resource_set_ciabr(PowerPCCPU *cpu, return H_SUCCESS; } -static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu, - SpaprMachineState *spapr, - target_ulong mflags, - target_ulong value1, - target_ulong value2) +static target_ulong h_set_mode_resource_set_dawr(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong mflags, + target_ulong resource, + target_ulong value1, + target_ulong value2) { CPUPPCState *env = &cpu->env; @@ -835,8 +841,15 @@ static target_ulong h_set_mode_resource_set_dawr0(PowerPCCPU *cpu, return H_P4; } - ppc_store_dawr0(env, value1); - ppc_store_dawrx0(env, value2); + if (resource == H_SET_MODE_RESOURCE_SET_DAWR0) { + ppc_store_dawr0(env, value1); + ppc_store_dawrx0(env, value2); + } else if (resource == H_SET_MODE_RESOURCE_SET_DAWR1) { + ppc_store_dawr1(env, value1); + ppc_store_dawrx1(env, value2); + } else { + g_assert_not_reached(); + } return H_SUCCESS; } @@ -915,8 +928,9 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr, args[3]); break; case H_SET_MODE_RESOURCE_SET_DAWR0: - ret = h_set_mode_resource_set_dawr0(cpu, spapr, args[0], args[2], - args[3]); + case H_SET_MODE_RESOURCE_SET_DAWR1: + ret = h_set_mode_resource_set_dawr(cpu, spapr, args[0], args[1], + args[2], args[3]); break; case H_SET_MODE_RESOURCE_LE: ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]); @@ -968,7 +982,6 @@ static void spapr_check_setup_free_hpt(SpaprMachineState *spapr, /* RADIX->HASH || NOTHING->HASH : Allocate HPT */ spapr_setup_hpt(spapr); } - return; } #define FLAGS_MASK 0x01FULL diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index e3c01ef44f8..c2432a0c00c 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -21,10 +21,10 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_ppc.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "trace.h" #include "hw/ppc/spapr.h" @@ -668,11 +668,11 @@ int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, tcet->liobn, 0, tcet->nb_table << tcet->page_shift); } -static void spapr_tce_table_class_init(ObjectClass *klass, void *data) +static void spapr_tce_table_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = spapr_tce_table_realize; - dc->reset = spapr_tce_reset; + device_class_set_legacy_reset(dc, spapr_tce_reset); dc->unrealize = spapr_tce_table_unrealize; /* Reason: This is just an internal device for handling the hypercalls */ dc->user_creatable = false; @@ -693,7 +693,8 @@ static const TypeInfo spapr_tce_table_info = { .class_init = spapr_tce_table_class_init, }; -static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data) +static void spapr_iommu_memory_region_class_init(ObjectClass *klass, + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index aebd7eaabbf..d6d368dd08c 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -19,7 +19,7 @@ #include "hw/ppc/xics_spapr.h" #include "hw/qdev-properties.h" #include "cpu-models.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "trace.h" diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c index c02785756c1..10cf634da19 100644 --- a/hw/ppc/spapr_nested.c +++ b/hw/ppc/spapr_nested.c @@ -1,6 +1,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_long.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" #include "hw/ppc/spapr.h" @@ -64,10 +65,9 @@ static SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, target_ulong guestid) { - SpaprMachineStateNestedGuest *guest; - - guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); - return guest; + return spapr->nested.guests ? + g_hash_table_lookup(spapr->nested.guests, + GINT_TO_POINTER(guestid)) : NULL; } bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, @@ -593,26 +593,37 @@ static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, return false; } -static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, - target_ulong vcpuid) +static void *get_vcpu_state_ptr(SpaprMachineState *spapr, + SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) { assert(spapr_nested_vcpu_check(guest, vcpuid, false)); return &guest->vcpus[vcpuid].state; } -static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, - target_ulong vcpuid) +static void *get_vcpu_ptr(SpaprMachineState *spapr, + SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) { assert(spapr_nested_vcpu_check(guest, vcpuid, false)); return &guest->vcpus[vcpuid]; } -static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, +static void *get_guest_ptr(SpaprMachineState *spapr, + SpaprMachineStateNestedGuest *guest, target_ulong vcpuid) { return guest; /* for GSBE_NESTED */ } +static void *get_machine_ptr(SpaprMachineState *spapr, + SpaprMachineStateNestedGuest *guest, + target_ulong vcpuid) +{ + /* ignore guest and vcpuid for this */ + return &spapr->nested; +} + /* * set=1 means the L1 is trying to set some state * set=0 means the L1 is trying to get some state @@ -771,6 +782,7 @@ static void copy_logical_pvr(void *a, void *b, bool set) if (*pvr_logical_ptr) { switch (*pvr_logical_ptr) { + case CPU_POWERPC_LOGICAL_3_10_P11: case CPU_POWERPC_LOGICAL_3_10: pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; break; @@ -982,6 +994,7 @@ struct guest_state_element_type guest_state_element_types[] = { GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DPDES, dpdes), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), @@ -1010,7 +1023,15 @@ struct guest_state_element_type guest_state_element_types[] = { GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, - copy_state_hdecr) + copy_state_hdecr), + GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_INUSE, l0_guest_heap_inuse), + GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_MAX, l0_guest_heap_max), + GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_INUSE, + l0_guest_pgtable_size_inuse), + GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_MAX, + l0_guest_pgtable_size_max), + GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_RECLAIMED, + l0_guest_pgtable_reclaimed), }; void spapr_nested_gsb_init(void) @@ -1028,8 +1049,13 @@ void spapr_nested_gsb_init(void) else if (type->id >= GSB_VCPU_IN_BUFFER) /* 0x0c00 - 0xf000 Thread + RW */ type->flags = 0; + else if (type->id >= GSB_L0_GUEST_HEAP_INUSE) + + /*0x0800 - 0x0804 Hostwide Counters + RO */ + type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE | + GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; else if (type->id >= GSB_VCPU_LPVR) - /* 0x0003 - 0x0bff Guest + RW */ + /* 0x0003 - 0x07ff Guest + RW */ type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; else if (type->id >= GSB_HV_VCPU_STATE_SIZE) /* 0x0001 - 0x0002 Guest + RO */ @@ -1136,18 +1162,26 @@ static bool guest_state_request_check(struct guest_state_request *gsr) return false; } - if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { + if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE) { + /* Hostwide elements cant be clubbed with other types */ + if (!(gsr->flags & GUEST_STATE_REQUEST_HOST_WIDE)) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a host wide " + "Element ID:%04x.\n", id); + return false; + } + } else if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { /* guest wide element type */ if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { - qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " + qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a guest wide " "Element ID:%04x.\n", id); return false; } } else { /* thread wide element type */ - if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { - qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " - "Element ID:%04x.\n", id); + if (gsr->flags & (GUEST_STATE_REQUEST_GUEST_WIDE | + GUEST_STATE_REQUEST_HOST_WIDE)) { + qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a thread wide" + " Element ID:%04x.\n", id); return false; } } @@ -1184,6 +1218,12 @@ static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, return H_PARAMETER; } + /* P11 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P11_MODE; + } + /* P10 capabilities */ if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, spapr->max_compat_pvr)) { @@ -1226,7 +1266,10 @@ static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, env->gpr[4] = 1; /* set R5 to the first supported Power Processor Mode */ - if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P11_MODE_BMAP; + } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, spapr->max_compat_pvr)) { env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, @@ -1407,7 +1450,8 @@ static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, return H_SUCCESS; } -static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, +static target_ulong getset_state(SpaprMachineState *spapr, + SpaprMachineStateNestedGuest *guest, uint64_t vcpuid, struct guest_state_request *gsr) { @@ -1440,7 +1484,7 @@ static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, /* Get pointer to guest data to get/set */ if (type->location && type->copy) { - ptr = type->location(guest, vcpuid); + ptr = type->location(spapr, guest, vcpuid); assert(ptr); if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { return H_INVALID_ELEMENT_VALUE; @@ -1457,6 +1501,7 @@ static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, } static target_ulong map_and_getset_state(PowerPCCPU *cpu, + SpaprMachineState *spapr, SpaprMachineStateNestedGuest *guest, uint64_t vcpuid, struct guest_state_request *gsr) @@ -1480,7 +1525,7 @@ static target_ulong map_and_getset_state(PowerPCCPU *cpu, goto out1; } - rc = getset_state(guest, vcpuid, gsr); + rc = getset_state(spapr, guest, vcpuid, gsr); out1: address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); @@ -1498,27 +1543,46 @@ static target_ulong h_guest_getset_state(PowerPCCPU *cpu, target_ulong buf = args[3]; target_ulong buflen = args[4]; struct guest_state_request gsr; - SpaprMachineStateNestedGuest *guest; + SpaprMachineStateNestedGuest *guest = NULL; - guest = spapr_get_nested_guest(spapr, lpid); - if (!guest) { - return H_P2; - } gsr.buf = buf; assert(buflen <= GSB_MAX_BUF_SIZE); gsr.len = buflen; gsr.flags = 0; - if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { + + /* Works for both get/set state */ + if ((flags & H_GUEST_GET_STATE_FLAGS_GUEST_WIDE) || + (flags & H_GUEST_SET_STATE_FLAGS_GUEST_WIDE)) { gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; } - if (flags & ~H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { - return H_PARAMETER; /* flag not supported yet */ - } if (set) { + if (flags & ~H_GUEST_SET_STATE_FLAGS_MASK) { + return H_PARAMETER; + } gsr.flags |= GUEST_STATE_REQUEST_SET; + } else { + /* + * No reserved fields to be set in flags nor both + * GUEST/HOST wide bits + */ + if ((flags & ~H_GUEST_GET_STATE_FLAGS_MASK) || + (flags == H_GUEST_GET_STATE_FLAGS_MASK)) { + return H_PARAMETER; + } + + if (flags & H_GUEST_GET_STATE_FLAGS_HOST_WIDE) { + gsr.flags |= GUEST_STATE_REQUEST_HOST_WIDE; + } + } + + if (!(gsr.flags & GUEST_STATE_REQUEST_HOST_WIDE)) { + guest = spapr_get_nested_guest(spapr, lpid); + if (!guest) { + return H_P2; + } } - return map_and_getset_state(cpu, guest, vcpuid, &gsr); + return map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr); } static target_ulong h_guest_set_state(PowerPCCPU *cpu, @@ -1629,7 +1693,8 @@ static int get_exit_ids(uint64_t srr0, uint16_t ids[16]) return nr; } -static void exit_process_output_buffer(PowerPCCPU *cpu, +static void exit_process_output_buffer(SpaprMachineState *spapr, + PowerPCCPU *cpu, SpaprMachineStateNestedGuest *guest, target_ulong vcpuid, target_ulong *r3) @@ -1667,10 +1732,9 @@ static void exit_process_output_buffer(PowerPCCPU *cpu, gsr.gsb = gsb; gsr.len = VCPU_OUT_BUF_MIN_SZ; gsr.flags = 0; /* get + never guest wide */ - getset_state(guest, vcpuid, &gsr); + getset_state(spapr, guest, vcpuid, &gsr); address_space_unmap(CPU(cpu)->as, gsb, len, true, len); - return; } static @@ -1693,7 +1757,7 @@ void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp) exit_nested_store_l2(cpu, excp, vcpu); /* do the output buffer for run_vcpu*/ - exit_process_output_buffer(cpu, guest, vcpuid, &r3_return); + exit_process_output_buffer(spapr, cpu, guest, vcpuid, &r3_return); assert(env->spr[SPR_LPIDR] != 0); nested_load_state(cpu, spapr_cpu->nested_host_state); @@ -1808,7 +1872,7 @@ static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu, gsr.buf = vcpu->runbufin.addr; gsr.len = vcpu->runbufin.size; gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */ - rc = map_and_getset_state(cpu, guest, vcpuid, &gsr); + rc = map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr); if (rc == H_SUCCESS) { nested_papr_run_vcpu(cpu, lpid, vcpu); } else { diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index 7d2dfe5e3d2..72b4a6329fb 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -235,8 +235,6 @@ void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt) spapr_dt_nvdimm(spapr, fdt, offset, nvdimm); } g_slist_free(nvdimms); - - return; } static target_ulong h_scm_read_metadata(PowerPCCPU *cpu, @@ -884,22 +882,22 @@ static void spapr_nvdimm_unrealize(NVDIMMDevice *dimm) vmstate_unregister(NULL, &vmstate_spapr_nvdimm_states, dimm); } -static Property spapr_nvdimm_properties[] = { #ifdef CONFIG_LIBPMEM +static const Property spapr_nvdimm_properties[] = { DEFINE_PROP_BOOL("pmem-override", SpaprNVDIMMDevice, pmem_override, false), -#endif - DEFINE_PROP_END_OF_LIST(), }; +#endif -static void spapr_nvdimm_class_init(ObjectClass *oc, void *data) +static void spapr_nvdimm_class_init(ObjectClass *oc, const void *data) { - DeviceClass *dc = DEVICE_CLASS(oc); NVDIMMClass *nvc = NVDIMM_CLASS(oc); nvc->realize = spapr_nvdimm_realize; nvc->unrealize = spapr_nvdimm_unrealize; - device_class_set_props(dc, spapr_nvdimm_properties); +#ifdef CONFIG_LIBPMEM + device_class_set_props(DEVICE_CLASS(oc), spapr_nvdimm_properties); +#endif } static void spapr_nvdimm_init(Object *obj) diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c index 88e29536aa7..75ab4fe2623 100644 --- a/hw/ppc/spapr_ovec.c +++ b/hw/ppc/spapr_ovec.c @@ -15,7 +15,8 @@ #include "hw/ppc/spapr_ovec.h" #include "migration/vmstate.h" #include "qemu/bitmap.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "qemu/error-report.h" #include "trace.h" #include diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 7cf9904c354..1ac1185825e 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -34,7 +34,7 @@ #include "hw/pci/pci_host.h" #include "hw/ppc/spapr.h" #include "hw/pci-host/spapr.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include #include "trace.h" #include "qemu/error-report.h" @@ -45,10 +45,10 @@ #include "hw/pci/pci_ids.h" #include "hw/ppc/spapr_drc.h" #include "hw/qdev-properties.h" -#include "sysemu/device_tree.h" -#include "sysemu/kvm.h" -#include "sysemu/hostmem.h" -#include "sysemu/numa.h" +#include "system/device_tree.h" +#include "system/kvm.h" +#include "system/hostmem.h" +#include "system/numa.h" #include "hw/ppc/spapr_numa.h" #include "qemu/log.h" @@ -1237,10 +1237,6 @@ static void add_drcs(SpaprPhbState *phb, PCIBus *bus) int i; uint8_t chassis; - if (!phb->dr_enabled) { - return; - } - chassis = chassis_from_bus(bus); if (pci_bus_is_root(bus)) { @@ -1260,10 +1256,6 @@ static void remove_drcs(SpaprPhbState *phb, PCIBus *bus) int i; uint8_t chassis; - if (!phb->dr_enabled) { - return; - } - chassis = chassis_from_bus(bus); for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) { @@ -1291,8 +1283,7 @@ static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev, PciWalkFdt *p = opaque; int err; - if (p->err) { - /* Something's already broken, don't keep going */ + if (p->err || !pdev->enabled) { return; } @@ -1548,17 +1539,6 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler, PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))); uint32_t slotnr = PCI_SLOT(pdev->devfn); - if (!phb->dr_enabled) { - /* if this is a hotplug operation initiated by the user - * we need to let them know it's not enabled - */ - if (plugged_dev->hotplugged) { - error_setg(errp, "Bus '%s' does not support hotplugging", - phb->parent_obj.bus->qbus.name); - return; - } - } - if (IS_PCI_BRIDGE(plugged_dev)) { if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) { return; @@ -1569,7 +1549,9 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler, * hotplug, we do not allow functions to be hotplugged to a * slot that already has function 0 present */ - if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] && + if (plugged_dev->hotplugged && + !pci_is_vf(pdev) && + bus->devices[PCI_DEVFN(slotnr, 0)] && PCI_FUNC(pdev->devfn) != 0) { error_setg(errp, "PCI: slot %d function 0 already occupied by %s," " additional functions can no longer be exposed to guest.", @@ -1592,10 +1574,10 @@ static void spapr_pci_plug(HotplugHandler *plug_handler, uint32_t slotnr = PCI_SLOT(pdev->devfn); /* - * If DR is disabled we don't need to do anything in the case of - * hotplug or coldplug callbacks. + * If DR or the PCI device is disabled we don't need to do anything + * in the case of hotplug or coldplug callbacks. */ - if (!phb->dr_enabled) { + if (!pdev->enabled) { return; } @@ -1673,13 +1655,12 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler, PCIDevice *pdev = PCI_DEVICE(plugged_dev); SpaprDrc *drc = drc_from_dev(phb, pdev); - if (!phb->dr_enabled) { - error_setg(errp, "Bus '%s' does not support hotplugging", - phb->parent_obj.bus->qbus.name); + g_assert(drc); + + if (!drc->dev) { return; } - g_assert(drc); g_assert(drc->dev == plugged_dev); if (!spapr_drc_unplug_requested(drc)) { @@ -1847,30 +1828,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */ - if (sphb->mem64_win_size != 0) { - if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { - error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx - " (max 2 GiB)", sphb->mem_win_size); - return; - } - - /* 64-bit window defaults to identity mapping */ - sphb->mem64_win_pciaddr = sphb->mem64_win_addr; - } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { - /* - * For compatibility with old configuration, if no 64-bit MMIO - * window is specified, but the ordinary (32-bit) memory - * window is specified as > 2GiB, we treat it as a 2GiB 32-bit - * window, with a 64-bit MMIO window following on immediately - * afterwards - */ - sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem64_win_pciaddr = - SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE; + if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { + error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx + " (max 2 GiB)", sphb->mem_win_size); + return; } + /* 64-bit window defaults to identity mapping */ + sphb->mem64_win_pciaddr = sphb->mem64_win_addr; + if (spapr_pci_find_phb(spapr, sphb->buid)) { SpaprPhbState *s; @@ -2081,7 +2047,7 @@ static void spapr_phb_reset(DeviceState *qdev) g_hash_table_remove_all(sphb->msi); } -static Property spapr_phb_properties[] = { +static const Property spapr_phb_properties[] = { DEFINE_PROP_UINT32("index", SpaprPhbState, index, -1), DEFINE_PROP_UINT64("mem_win_size", SpaprPhbState, mem_win_size, SPAPR_PCI_MEM32_WIN_SIZE), @@ -2089,8 +2055,6 @@ static Property spapr_phb_properties[] = { SPAPR_PCI_MEM64_WIN_SIZE), DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size, SPAPR_PCI_IO_WIN_SIZE), - DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled, - true), /* Default DMA window is 0..1GB */ DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0), DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000), @@ -2101,13 +2065,10 @@ static Property spapr_phb_properties[] = { (1ULL << 12) | (1ULL << 16) | (1ULL << 21) | (1ULL << 24)), DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1), - DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState, - pre_2_8_migration, false), DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, pcie_ecs, true), DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState, pre_5_1_assoc, false), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_spapr_pci_lsi = { @@ -2140,20 +2101,6 @@ static int spapr_pci_pre_save(void *opaque) gpointer key, value; int i; - if (sphb->pre_2_8_migration) { - sphb->mig_liobn = sphb->dma_liobn[0]; - sphb->mig_mem_win_addr = sphb->mem_win_addr; - sphb->mig_mem_win_size = sphb->mem_win_size; - sphb->mig_io_win_addr = sphb->io_win_addr; - sphb->mig_io_win_size = sphb->io_win_size; - - if ((sphb->mem64_win_size != 0) - && (sphb->mem64_win_addr - == (sphb->mem_win_addr + sphb->mem_win_size))) { - sphb->mig_mem_win_size += sphb->mem64_win_size; - } - } - g_free(sphb->msi_devs); sphb->msi_devs = NULL; sphb->msi_devs_num = g_hash_table_size(sphb->msi); @@ -2200,13 +2147,6 @@ static int spapr_pci_post_load(void *opaque, int version_id) return 0; } -static bool pre_2_8_migration(void *opaque, int version_id) -{ - SpaprPhbState *sphb = opaque; - - return sphb->pre_2_8_migration; -} - static const VMStateDescription vmstate_spapr_pci = { .name = "spapr_pci", .version_id = 2, @@ -2216,11 +2156,6 @@ static const VMStateDescription vmstate_spapr_pci = { .post_load = spapr_pci_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL), - VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration), VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0, vmstate_spapr_pci_lsi, SpaprPciLsi), VMSTATE_INT32(msi_devs_num, SpaprPhbState), @@ -2238,7 +2173,7 @@ static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge, return sphb->dtbusname; } -static void spapr_phb_class_init(ObjectClass *klass, void *data) +static void spapr_phb_class_init(ObjectClass *klass, const void *data) { PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -2248,7 +2183,7 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data) dc->realize = spapr_phb_realize; dc->unrealize = spapr_phb_unrealize; device_class_set_props(dc, spapr_phb_properties); - dc->reset = spapr_phb_reset; + device_class_set_legacy_reset(dc, spapr_phb_reset); dc->vmsd = &vmstate_spapr_pci; /* Supported by TYPE_SPAPR_MACHINE */ dc->user_creatable = true; @@ -2265,7 +2200,7 @@ static const TypeInfo spapr_phb_info = { .instance_size = sizeof(SpaprPhbState), .instance_finalize = spapr_phb_finalizefn, .class_init = spapr_phb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index 76b2a3487b5..e318d0d912f 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -24,7 +24,7 @@ #include "hw/pci-host/spapr.h" #include "hw/pci/msix.h" #include "hw/pci/pci_device.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-container.h" #include "qemu/error-report.h" #include CONFIG_DEVICES /* CONFIG_VFIO_PCI */ @@ -85,7 +85,7 @@ static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) { - VFIOAddressSpace *space = vfio_get_address_space(as); + VFIOAddressSpace *space = vfio_address_space_get(as); VFIOContainerBase *bcontainer = NULL; if (QLIST_EMPTY(&space->containers)) { @@ -105,7 +105,7 @@ static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) } out: - vfio_put_address_space(space); + vfio_address_space_put(space); return container_of(bcontainer, VFIOContainer, bcontainer); } diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c index c2fda7ad209..6fec6070370 100644 --- a/hw/ppc/spapr_rng.c +++ b/hw/ppc/spapr_rng.c @@ -22,8 +22,8 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "sysemu/device_tree.h" -#include "sysemu/rng.h" +#include "system/device_tree.h" +#include "system/rng.h" #include "hw/ppc/spapr.h" #include "hw/qdev-properties.h" #include "kvm_ppc.h" @@ -130,14 +130,13 @@ static void spapr_rng_realize(DeviceState *dev, Error **errp) } } -static Property spapr_rng_properties[] = { +static const Property spapr_rng_properties[] = { DEFINE_PROP_BOOL("use-kvm", SpaprRngState, use_kvm, false), DEFINE_PROP_LINK("rng", SpaprRngState, backend, TYPE_RNG_BACKEND, RngBackend *), - DEFINE_PROP_END_OF_LIST(), }; -static void spapr_rng_class_init(ObjectClass *oc, void *data) +static void spapr_rng_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index f329693c554..78309dbb09d 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -28,12 +28,12 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/error-report.h" -#include "sysemu/sysemu.h" -#include "sysemu/device_tree.h" -#include "sysemu/cpus.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "sysemu/qtest.h" +#include "system/system.h" +#include "system/device_tree.h" +#include "system/cpus.h" +#include "system/hw_accel.h" +#include "system/runstate.h" +#include "system/qtest.h" #include "kvm_ppc.h" #include "hw/ppc/spapr.h" @@ -110,7 +110,8 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_, id = rtas_ld(args, 0); cpu = spapr_find_cpu(id); if (cpu != NULL) { - if (CPU(cpu)->halted) { + CPUPPCState *env = &cpu->env; + if (env->quiesced) { rtas_st(rets, 1, 0); } else { rtas_st(rets, 1, 2); @@ -215,6 +216,8 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, * For the same reason, set PSSCR_EC. */ env->spr[SPR_PSSCR] |= PSSCR_EC; + env->quiesced = true; /* set "RTAS stopped" state. */ + ppc_maybe_interrupt(env); cs->halted = 1; ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); kvmppc_set_reg_ppc_online(cpu, 0); @@ -565,7 +568,6 @@ static bool spapr_qtest_callback(CharBackend *chr, gchar **words) g_assert(rc == 0); res = qtest_rtas_call(words[1], nargs, args, nret, ret); - qtest_send_prefix(chr); qtest_sendf(chr, "OK %"PRIu64"\n", res); return true; diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c index deb3ea4e495..1f7d2d8f898 100644 --- a/hw/ppc/spapr_rtc.c +++ b/hw/ppc/spapr_rtc.c @@ -27,8 +27,8 @@ #include "qemu/osdep.h" #include "qemu/timer.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/rtc.h" #include "hw/ppc/spapr.h" #include "migration/vmstate.h" #include "qapi/error.h" @@ -163,7 +163,7 @@ static const VMStateDescription vmstate_spapr_rtc = { }, }; -static void spapr_rtc_class_init(ObjectClass *oc, void *data) +static void spapr_rtc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ppc/spapr_tpm_proxy.c b/hw/ppc/spapr_tpm_proxy.c index e10af35a185..1297b3ad569 100644 --- a/hw/ppc/spapr_tpm_proxy.c +++ b/hw/ppc/spapr_tpm_proxy.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/ppc/spapr.h" #include "hw/qdev-properties.h" #include "trace.h" @@ -41,8 +41,8 @@ static ssize_t tpm_execute(SpaprTpmProxy *tpm_proxy, target_ulong *args) target_ulong data_in_size = args[2]; uint64_t data_out = ppc64_phys_to_real(args[3]); target_ulong data_out_size = args[4]; - uint8_t buf_in[TPM_SPAPR_BUFSIZE]; - uint8_t buf_out[TPM_SPAPR_BUFSIZE]; + QEMU_UNINITIALIZED uint8_t buf_in[TPM_SPAPR_BUFSIZE]; + QEMU_UNINITIALIZED uint8_t buf_out[TPM_SPAPR_BUFSIZE]; ssize_t ret; trace_spapr_tpm_execute(data_in, data_in_size, data_out, data_out_size); @@ -145,12 +145,11 @@ static void spapr_tpm_proxy_unrealize(DeviceState *d) qemu_unregister_reset(spapr_tpm_proxy_reset, tpm_proxy); } -static Property spapr_tpm_proxy_properties[] = { +static const Property spapr_tpm_proxy_properties[] = { DEFINE_PROP_STRING("host-path", SpaprTpmProxy, host_path), - DEFINE_PROP_END_OF_LIST(), }; -static void spapr_tpm_proxy_class_init(ObjectClass *k, void *data) +static void spapr_tpm_proxy_class_init(ObjectClass *k, const void *data) { DeviceClass *dk = DEVICE_CLASS(k); diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index 3221874848d..7759436a4f5 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -27,8 +27,8 @@ #include "hw/loader.h" #include "elf.h" #include "hw/sysbus.h" -#include "sysemu/kvm.h" -#include "sysemu/device_tree.h" +#include "system/kvm.h" +#include "system/device_tree.h" #include "kvm_ppc.h" #include "migration/vmstate.h" @@ -50,7 +50,7 @@ static char *spapr_vio_get_dev_name(DeviceState *qdev) return g_strdup_printf("%s@%x", pc->dt_name, dev->reg); } -static void spapr_vio_bus_class_init(ObjectClass *klass, void *data) +static void spapr_vio_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -599,7 +599,7 @@ SpaprVioBus *spapr_vio_bus_init(void) return bus; } -static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data) +static void spapr_vio_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -631,11 +631,11 @@ const VMStateDescription vmstate_spapr_vio = { }, }; -static void vio_spapr_device_class_init(ObjectClass *klass, void *data) +static void vio_spapr_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->realize = spapr_vio_busdev_realize; - k->reset = spapr_vio_busdev_reset; + device_class_set_legacy_reset(k, spapr_vio_busdev_reset); k->bus_type = TYPE_SPAPR_VIO_BUS; } diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c index c02eaacfed0..46d78756e63 100644 --- a/hw/ppc/spapr_vof.c +++ b/hw/ppc/spapr_vof.c @@ -10,7 +10,7 @@ #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/fdt.h" #include "hw/ppc/vof.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qom/qom-qobject.h" #include "trace.h" diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index c49da1f46f7..c9969ae48a8 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -28,12 +28,12 @@ #include "exec/page-protection.h" #include "cpu.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/block/flash.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/reset.h" #include "hw/boards.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/loader.h" #include "elf.h" #include "qapi/error.h" @@ -67,29 +67,6 @@ static struct boot_info void *vfdt; } boot_info; -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; - - tlb = &env->tlb.tlbe[1]; - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0xffffffff */ - tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk) { PowerPCCPU *cpu; @@ -139,9 +116,10 @@ static void main_cpu_reset(void *opaque) env->gpr[3] = bi->fdt; env->nip = bi->bootstrap_pc; - /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); - env->gpr[6] = tswap32(EPAPR_MAGIC); + /* Create a mapping spanning the 32bit addr space. */ + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31); + booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31); + env->gpr[6] = EPAPR_MAGIC; env->gpr[7] = bi->ima_size; } @@ -168,7 +146,7 @@ static int xilinx_load_device_tree(MachineState *machine, /* Try the local "ppc.dtb" override. */ fdt = load_device_tree("ppc.dtb", &fdt_size); if (!fdt) { - path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + path = qemu_find_file(QEMU_FILE_TYPE_DTB, BINARY_DEVICE_TREE_FILE); if (path) { fdt = load_device_tree(path, &fdt_size); g_free(path); @@ -239,6 +217,7 @@ static void virtex_init(MachineState *machine) cpu_irq = qdev_get_gpio_in(DEVICE(cpu), PPC40x_INPUT_INT); dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_BIG); qdev_prop_set_uint32(dev, "kind-of-intr", 0); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); @@ -252,6 +231,7 @@ static void virtex_init(MachineState *machine) /* 2 timers at irq 2 @ 62 Mhz. */ dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_BIG); qdev_prop_set_uint32(dev, "one-timer-only", 0); qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -264,8 +244,8 @@ static void virtex_init(MachineState *machine) /* Boots a kernel elf binary. */ kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, - &entry, NULL, &high, NULL, 1, PPC_ELF_MACHINE, - 0, 0); + &entry, NULL, &high, NULL, + ELFDATA2MSB, PPC_ELF_MACHINE, 0, 0); boot_info.bootstrap_pc = entry & 0x00ffffff; if (kernel_size < 0) { diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c index b5b6514d79f..f14efa3a7c8 100644 --- a/hw/ppc/vof.c +++ b/hw/ppc/vof.c @@ -15,10 +15,10 @@ #include "qemu/units.h" #include "qemu/log.h" #include "qapi/error.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/ppc/vof.h" #include "hw/ppc/fdt.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qom/qom-qobject.h" #include "trace.h" diff --git a/hw/remote/iohub.c b/hw/remote/iohub.c index 40dfee4bad4..988d3285ccc 100644 --- a/hw/remote/iohub.c +++ b/hw/remote/iohub.c @@ -33,19 +33,6 @@ void remote_iohub_init(RemoteIOHubState *iohub) } } -void remote_iohub_finalize(RemoteIOHubState *iohub) -{ - int pirq; - - for (pirq = 0; pirq < REMOTE_IOHUB_NB_PIRQS; pirq++) { - qemu_set_fd_handler(event_notifier_get_fd(&iohub->resamplefds[pirq]), - NULL, NULL, NULL); - event_notifier_cleanup(&iohub->irqfds[pirq]); - event_notifier_cleanup(&iohub->resamplefds[pirq]); - qemu_mutex_destroy(&iohub->irq_level_lock[pirq]); - } -} - int remote_iohub_map_irq(PCIDevice *pci_dev, int intx) { return pci_dev->devfn; diff --git a/hw/remote/iommu.c b/hw/remote/iommu.c index 7c56aad0fc1..3e0758a21ed 100644 --- a/hw/remote/iommu.c +++ b/hw/remote/iommu.c @@ -13,8 +13,8 @@ #include "hw/remote/iommu.h" #include "hw/pci/pci_bus.h" #include "hw/pci/pci.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/memory.h" +#include "system/address-spaces.h" #include "trace.h" /** diff --git a/hw/remote/machine.c b/hw/remote/machine.c index fdc6c441bbd..e4b47838bad 100644 --- a/hw/remote/machine.c +++ b/hw/remote/machine.c @@ -16,7 +16,7 @@ #include "qemu/osdep.h" #include "hw/remote/machine.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qapi/error.h" #include "hw/pci/pci_host.h" #include "hw/remote/iohub.h" @@ -121,7 +121,7 @@ static void remote_machine_dev_unplug_cb(HotplugHandler *hotplug_dev, } } -static void remote_machine_class_init(ObjectClass *oc, void *data) +static void remote_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -146,7 +146,7 @@ static const TypeInfo remote_machine = { .instance_size = sizeof(RemoteMachineState), .instance_init = remote_machine_instance_init, .class_init = remote_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/remote/memory.c b/hw/remote/memory.c index 6d60da91e01..00193a552fa 100644 --- a/hw/remote/memory.c +++ b/hw/remote/memory.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "hw/remote/memory.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "qapi/error.h" static void remote_sysmem_reset(void) diff --git a/hw/remote/message.c b/hw/remote/message.c index 50f6bf2d495..273f1e0323c 100644 --- a/hw/remote/message.c +++ b/hw/remote/message.c @@ -13,12 +13,12 @@ #include "io/channel.h" #include "hw/remote/mpqemu-link.h" #include "qapi/error.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "hw/remote/memory.h" #include "hw/remote/iohub.h" -#include "sysemu/reset.h" +#include "system/reset.h" static void process_config_write(QIOChannel *ioc, PCIDevice *dev, MPQemuMsg *msg, Error **errp); @@ -215,13 +215,10 @@ static void process_bar_read(QIOChannel *ioc, MPQemuMsg *msg, Error **errp) static void process_device_reset_msg(QIOChannel *ioc, PCIDevice *dev, Error **errp) { - DeviceClass *dc = DEVICE_GET_CLASS(dev); DeviceState *s = DEVICE(dev); MPQemuMsg ret = { 0 }; - if (dc->reset) { - dc->reset(s); - } + device_cold_reset(s); ret.cmd = MPQEMU_CMD_RET; diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c index 4394dc4d821..49885a1db6e 100644 --- a/hw/remote/mpqemu-link.c +++ b/hw/remote/mpqemu-link.c @@ -17,7 +17,7 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "io/channel.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" #include "trace.h" /* @@ -110,7 +110,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, bql_unlock(); } - ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp); + ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, 0, errp); if (drop_bql && !iothread && !qemu_in_coroutine()) { bql_lock(); diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index a926f61ebe9..30ac74961dd 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -10,9 +10,9 @@ #include "qemu/int128.h" #include "qemu/range.h" -#include "exec/memory.h" +#include "system/memory.h" #include "exec/cpu-common.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/remote/mpqemu-link.h" diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c index fbc85a8d36f..b0165aa2a1d 100644 --- a/hw/remote/proxy.c +++ b/hw/remote/proxy.c @@ -21,7 +21,7 @@ #include "hw/remote/proxy-memory-listener.h" #include "qom/object.h" #include "qemu/event_notifier.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" static void probe_pci_info(PCIDevice *dev, Error **errp); static void proxy_device_reset(DeviceState *dev); @@ -191,12 +191,11 @@ static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val, config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE); } -static Property proxy_properties[] = { +static const Property proxy_properties[] = { DEFINE_PROP_STRING("fd", PCIProxyDev, fd), - DEFINE_PROP_END_OF_LIST(), }; -static void pci_proxy_dev_class_init(ObjectClass *klass, void *data) +static void pci_proxy_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -206,7 +205,7 @@ static void pci_proxy_dev_class_init(ObjectClass *klass, void *data) k->config_read = pci_proxy_read_config; k->config_write = pci_proxy_write_config; - dc->reset = proxy_device_reset; + device_class_set_legacy_reset(dc, proxy_device_reset); device_class_set_props(dc, proxy_properties); } @@ -216,7 +215,7 @@ static const TypeInfo pci_proxy_dev_type_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIProxyDev), .class_init = pci_proxy_dev_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c index dc27cc8da1f..85882902d7f 100644 --- a/hw/remote/remote-obj.c +++ b/hw/remote/remote-obj.c @@ -17,7 +17,7 @@ #include "hw/remote/machine.h" #include "io/channel-util.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/pci/pci.h" #include "qemu/sockets.h" #include "monitor/monitor.h" @@ -163,7 +163,7 @@ static void remote_object_finalize(Object *obj) g_free(o->devid); } -static void remote_object_class_init(ObjectClass *klass, void *data) +static void remote_object_class_init(ObjectClass *klass, const void *data) { RemoteObjectClass *k = REMOTE_OBJECT_CLASS(klass); @@ -188,7 +188,7 @@ static const TypeInfo remote_object_info = { .instance_finalize = remote_object_finalize, .class_size = sizeof(RemoteObjectClass), .class_init = remote_object_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 8dbafafb9e7..ea6165ebdce 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -43,7 +43,7 @@ #include "qom/object_interfaces.h" #include "qemu/error-report.h" #include "trace.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/boards.h" #include "hw/remote/machine.h" #include "qapi/error.h" @@ -52,12 +52,12 @@ #include "qemu/notify.h" #include "qemu/thread.h" #include "qemu/main-loop.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "libvfio-user.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "qemu/timer.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/remote/vfio-user-obj.h" @@ -358,7 +358,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset, int access_size; uint64_t val; - if (memory_access_is_direct(mr, is_write)) { + if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) { /** * Some devices expose a PCI expansion ROM, which could be buffer * based as compared to other regions which are primarily based on @@ -917,7 +917,7 @@ static void vfu_object_finalize(Object *obj) } } -static void vfu_object_class_init(ObjectClass *klass, void *data) +static void vfu_object_class_init(ObjectClass *klass, const void *data) { VfuObjectClass *k = VFU_OBJECT_CLASS(klass); @@ -944,7 +944,7 @@ static const TypeInfo vfu_object_info = { .instance_finalize = vfu_object_finalize, .class_size = sizeof(VfuObjectClass), .class_init = vfu_object_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index a2030e3a6ff..fc9c35bd981 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -1,3 +1,6 @@ +config RISCV_IOMMU + bool + config RISCV_NUMA bool @@ -22,6 +25,14 @@ config MICROCHIP_PFSOC select SIFIVE_PLIC select UNIMP +config MICROBLAZE_V + bool + default y + depends on RISCV32 || RISCV64 + select XILINX + select XILINX_AXI + select XILINX_ETHLITE + config OPENTITAN bool default y @@ -44,9 +55,10 @@ config RISCV_VIRT select PCI select PCI_EXPRESS_GENERIC_BRIDGE select PFLASH_CFI01 - select SERIAL + select SERIAL_MM select RISCV_ACLINT select RISCV_APLIC + select RISCV_IOMMU select RISCV_IMSIC select SIFIVE_PLIC select SIFIVE_TEST @@ -107,3 +119,12 @@ config SPIKE select HTIF select RISCV_ACLINT select SIFIVE_PLIC + +config XIANGSHAN_KUNMINGHU + bool + default y + depends on RISCV64 + select RISCV_ACLINT + select RISCV_APLIC + select RISCV_IMSIC + select SERIAL_MM diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 47281ca853d..828a867be39 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -27,17 +27,17 @@ #include "hw/riscv/boot.h" #include "hw/riscv/boot_opensbi.h" #include "elf.h" -#include "sysemu/device_tree.h" -#include "sysemu/qtest.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" +#include "system/device_tree.h" +#include "system/qtest.h" +#include "system/kvm.h" +#include "system/reset.h" #include bool riscv_is_32bit(RISCVHartArrayState *harts) { RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(&harts->harts[0]); - return mcc->misa_mxl_max == MXL_RV32; + return mcc->def->misa_mxl_max == MXL_RV32; } /* @@ -67,9 +67,16 @@ char *riscv_plic_hart_config_string(int hart_count) return g_strjoinv(",", (char **)vals); } -target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, +void riscv_boot_info_init(RISCVBootInfo *info, RISCVHartArrayState *harts) +{ + info->kernel_size = 0; + info->initrd_size = 0; + info->is_32bit = riscv_is_32bit(harts); +} + +target_ulong riscv_calc_kernel_start_addr(RISCVBootInfo *info, target_ulong firmware_end_addr) { - if (riscv_is_32bit(harts)) { + if (info->is_32bit) { return QEMU_ALIGN_UP(firmware_end_addr, 4 * MiB); } else { return QEMU_ALIGN_UP(firmware_end_addr, 2 * MiB); @@ -128,11 +135,11 @@ char *riscv_find_firmware(const char *firmware_filename, target_ulong riscv_find_and_load_firmware(MachineState *machine, const char *default_machine_firmware, - hwaddr firmware_load_addr, + hwaddr *firmware_load_addr, symbol_fn_t sym_cb) { char *firmware_filename; - target_ulong firmware_end_addr = firmware_load_addr; + target_ulong firmware_end_addr = *firmware_load_addr; firmware_filename = riscv_find_firmware(machine->firmware, default_machine_firmware); @@ -148,7 +155,7 @@ target_ulong riscv_find_and_load_firmware(MachineState *machine, } target_ulong riscv_load_firmware(const char *firmware_filename, - hwaddr firmware_load_addr, + hwaddr *firmware_load_addr, symbol_fn_t sym_cb) { uint64_t firmware_entry, firmware_end; @@ -159,22 +166,23 @@ target_ulong riscv_load_firmware(const char *firmware_filename, if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL, &firmware_entry, NULL, &firmware_end, NULL, 0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) { + *firmware_load_addr = firmware_entry; return firmware_end; } firmware_size = load_image_targphys_as(firmware_filename, - firmware_load_addr, + *firmware_load_addr, current_machine->ram_size, NULL); if (firmware_size > 0) { - return firmware_load_addr + firmware_size; + return *firmware_load_addr + firmware_size; } error_report("could not load firmware '%s'", firmware_filename); exit(1); } -static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry) +static void riscv_load_initrd(MachineState *machine, RISCVBootInfo *info) { const char *filename = machine->initrd_filename; uint64_t mem_size = machine->ram_size; @@ -195,7 +203,7 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry) * halfway into RAM, and for boards with 1GB of RAM or more we put * the initrd at 512MB. */ - start = kernel_entry + MIN(mem_size / 2, 512 * MiB); + start = info->image_low_addr + MIN(mem_size / 2, 512 * MiB); size = load_ramdisk(filename, start, mem_size - start); if (size == -1) { @@ -206,6 +214,9 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry) } } + info->initrd_start = start; + info->initrd_size = size; + /* Some RISC-V machines (e.g. opentitan) don't have a fdt. */ if (fdt) { end = start + size; @@ -214,14 +225,14 @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry) } } -target_ulong riscv_load_kernel(MachineState *machine, - RISCVHartArrayState *harts, - target_ulong kernel_start_addr, - bool load_initrd, - symbol_fn_t sym_cb) +void riscv_load_kernel(MachineState *machine, + RISCVBootInfo *info, + target_ulong kernel_start_addr, + bool load_initrd, + symbol_fn_t sym_cb) { const char *kernel_filename = machine->kernel_filename; - uint64_t kernel_load_base, kernel_entry; + ssize_t kernel_size; void *fdt = machine->fdt; g_assert(kernel_filename != NULL); @@ -233,21 +244,29 @@ target_ulong riscv_load_kernel(MachineState *machine, * the (expected) load address load address. This allows kernels to have * separate SBI and ELF entry points (used by FreeBSD, for example). */ - if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, - NULL, &kernel_load_base, NULL, NULL, 0, - EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) { - kernel_entry = kernel_load_base; + kernel_size = load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, NULL, + &info->image_low_addr, &info->image_high_addr, + NULL, ELFDATA2LSB, EM_RISCV, + 1, 0, NULL, true, sym_cb); + if (kernel_size > 0) { + info->kernel_size = kernel_size; goto out; } - if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL, - NULL, NULL, NULL) > 0) { + kernel_size = load_uimage_as(kernel_filename, &info->image_low_addr, + NULL, NULL, NULL, NULL, NULL); + if (kernel_size > 0) { + info->kernel_size = kernel_size; + info->image_high_addr = info->image_low_addr + kernel_size; goto out; } - if (load_image_targphys_as(kernel_filename, kernel_start_addr, - current_machine->ram_size, NULL) > 0) { - kernel_entry = kernel_start_addr; + kernel_size = load_image_targphys_as(kernel_filename, kernel_start_addr, + current_machine->ram_size, NULL); + if (kernel_size > 0) { + info->kernel_size = kernel_size; + info->image_low_addr = kernel_start_addr; + info->image_high_addr = info->image_low_addr + kernel_size; goto out; } @@ -256,23 +275,21 @@ target_ulong riscv_load_kernel(MachineState *machine, out: /* - * For 32 bit CPUs 'kernel_entry' can be sign-extended by + * For 32 bit CPUs 'image_low_addr' can be sign-extended by * load_elf_ram_sym(). */ - if (riscv_is_32bit(harts)) { - kernel_entry = extract64(kernel_entry, 0, 32); + if (info->is_32bit) { + info->image_low_addr = extract64(info->image_low_addr, 0, 32); } if (load_initrd && machine->initrd_filename) { - riscv_load_initrd(machine, kernel_entry); + riscv_load_initrd(machine, info); } if (fdt && machine->kernel_cmdline && *machine->kernel_cmdline) { qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", machine->kernel_cmdline); } - - return kernel_entry; } /* @@ -292,11 +309,12 @@ target_ulong riscv_load_kernel(MachineState *machine, * The FDT is fdt_packed() during the calculation. */ uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size, - MachineState *ms) + MachineState *ms, RISCVBootInfo *info) { int ret = fdt_pack(ms->fdt); hwaddr dram_end, temp; int fdtsize; + uint64_t dtb_start, dtb_start_limit; /* Should only fail if we've built a corrupted tree */ g_assert(ret == 0); @@ -307,6 +325,17 @@ uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size, exit(1); } + if (info->initrd_size) { + /* If initrd is successfully loaded, place DTB after it. */ + dtb_start_limit = info->initrd_start + info->initrd_size; + } else if (info->kernel_size) { + /* If only kernel is successfully loaded, place DTB after it. */ + dtb_start_limit = info->image_high_addr; + } else { + /* Otherwise, do not check DTB overlapping */ + dtb_start_limit = 0; + } + /* * A dram_size == 0, usually from a MemMapEntry[].size element, * means that the DRAM block goes all the way to ms->ram_size. @@ -316,13 +345,24 @@ uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size, /* * We should put fdt as far as possible to avoid kernel/initrd overwriting - * its content. But it should be addressable by 32 bit system as well. - * Thus, put it at an 2MB aligned address that less than fdt size from the - * end of dram or 3GB whichever is lesser. + * its content. But it should be addressable by 32 bit system as well in RV32. + * Thus, put it near to the end of dram in RV64, and put it near to the end + * of dram or 3GB whichever is lesser in RV32. */ - temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end; + if (!info->is_32bit) { + temp = dram_end; + } else { + temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end; + } - return QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB); + dtb_start = QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB); + + if (dtb_start_limit && (dtb_start < dtb_start_limit)) { + error_report("No enough memory to place DTB after kernel/initrd"); + exit(1); + } + + return dtb_start; } /* @@ -334,35 +374,39 @@ void riscv_load_fdt(hwaddr fdt_addr, void *fdt) uint32_t fdtsize = fdt_totalsize(fdt); /* copy in the device tree */ - qemu_fdt_dumpdtb(fdt, fdtsize); - rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr, &address_space_memory); qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, rom_ptr_for_as(&address_space_memory, fdt_addr, fdtsize)); } -void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base, - hwaddr rom_size, uint32_t reset_vec_size, +void riscv_rom_copy_firmware_info(MachineState *machine, + RISCVHartArrayState *harts, + hwaddr rom_base, hwaddr rom_size, + uint32_t reset_vec_size, uint64_t kernel_entry) { + struct fw_dynamic_info32 dinfo32; struct fw_dynamic_info dinfo; size_t dinfo_len; - if (sizeof(dinfo.magic) == 4) { - dinfo.magic = cpu_to_le32(FW_DYNAMIC_INFO_MAGIC_VALUE); - dinfo.version = cpu_to_le32(FW_DYNAMIC_INFO_VERSION); - dinfo.next_mode = cpu_to_le32(FW_DYNAMIC_INFO_NEXT_MODE_S); - dinfo.next_addr = cpu_to_le32(kernel_entry); + if (riscv_is_32bit(harts)) { + dinfo32.magic = cpu_to_le32(FW_DYNAMIC_INFO_MAGIC_VALUE); + dinfo32.version = cpu_to_le32(FW_DYNAMIC_INFO_VERSION); + dinfo32.next_mode = cpu_to_le32(FW_DYNAMIC_INFO_NEXT_MODE_S); + dinfo32.next_addr = cpu_to_le32(kernel_entry); + dinfo32.options = 0; + dinfo32.boot_hart = 0; + dinfo_len = sizeof(dinfo32); } else { dinfo.magic = cpu_to_le64(FW_DYNAMIC_INFO_MAGIC_VALUE); dinfo.version = cpu_to_le64(FW_DYNAMIC_INFO_VERSION); dinfo.next_mode = cpu_to_le64(FW_DYNAMIC_INFO_NEXT_MODE_S); dinfo.next_addr = cpu_to_le64(kernel_entry); + dinfo.options = 0; + dinfo.boot_hart = 0; + dinfo_len = sizeof(dinfo); } - dinfo.options = 0; - dinfo.boot_hart = 0; - dinfo_len = sizeof(dinfo); /** * copy the dynamic firmware info. This information is specific to @@ -374,7 +418,10 @@ void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base, exit(1); } - rom_add_blob_fixed_as("mrom.finfo", &dinfo, dinfo_len, + rom_add_blob_fixed_as("mrom.finfo", + riscv_is_32bit(harts) ? + (void *)&dinfo32 : (void *)&dinfo, + dinfo_len, rom_base + reset_vec_size, &address_space_memory); } @@ -430,7 +477,9 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts } rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), rom_base, &address_space_memory); - riscv_rom_copy_firmware_info(machine, rom_base, rom_size, sizeof(reset_vec), + riscv_rom_copy_firmware_info(machine, harts, + rom_base, rom_size, + sizeof(reset_vec), kernel_entry); } diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build index f872674093a..2a8d5b136cc 100644 --- a/hw/riscv/meson.build +++ b/hw/riscv/meson.build @@ -10,5 +10,9 @@ riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c')) riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c')) riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c')) riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) +riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files( + 'riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c', 'riscv-iommu-hpm.c')) +riscv_ss.add(when: 'CONFIG_MICROBLAZE_V', if_true: files('microblaze-v-generic.c')) +riscv_ss.add(when: 'CONFIG_XIANGSHAN_KUNMINGHU', if_true: files('xiangshan_kmh.c')) hw_arch += {'riscv': riscv_ss} diff --git a/hw/riscv/microblaze-v-generic.c b/hw/riscv/microblaze-v-generic.c new file mode 100644 index 00000000000..e863c50cbc9 --- /dev/null +++ b/hw/riscv/microblaze-v-generic.c @@ -0,0 +1,189 @@ +/* + * QEMU model of Microblaze V generic board. + * + * based on hw/microblaze/petalogix_ml605_mmu.c + * + * Copyright (c) 2011 Michal Simek + * Copyright (c) 2011 PetaLogix + * Copyright (c) 2009 Edgar E. Iglesias. + * Copyright (C) 2024, Advanced Micro Devices, Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Written by Sai Pavan Boddu . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "system/system.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/char/serial-mm.h" +#include "system/address-spaces.h" +#include "hw/char/xilinx_uartlite.h" +#include "hw/misc/unimp.h" + +#define LMB_BRAM_SIZE (128 * KiB) +#define MEMORY_BASEADDR 0x80000000 +#define INTC_BASEADDR 0x41200000 +#define TIMER_BASEADDR 0x41c00000 +#define TIMER_BASEADDR2 0x41c10000 +#define UARTLITE_BASEADDR 0x40600000 +#define ETHLITE_BASEADDR 0x40e00000 +#define UART16550_BASEADDR 0x44a10000 +#define AXIENET_BASEADDR 0x40c00000 +#define AXIDMA_BASEADDR 0x41e00000 +#define GPIO_BASEADDR 0x40000000 +#define GPIO_BASEADDR2 0x40010000 +#define GPIO_BASEADDR3 0x40020000 +#define I2C_BASEADDR 0x40800000 +#define QSPI_BASEADDR 0x44a00000 + +#define TIMER_IRQ 0 +#define UARTLITE_IRQ 1 +#define UART16550_IRQ 4 +#define ETHLITE_IRQ 5 +#define TIMER_IRQ2 6 +#define AXIENET_IRQ 7 +#define AXIDMA_IRQ1 8 +#define AXIDMA_IRQ0 9 + +static void mb_v_generic_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + DeviceState *dev, *dma, *eth0; + Object *ds, *cs; + int i; + RISCVCPU *cpu; + hwaddr ddr_base = MEMORY_BASEADDR; + MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + qemu_irq irq[32]; + MemoryRegion *sysmem = get_system_memory(); + + cpu = RISCV_CPU(object_new(machine->cpu_type)); + object_property_set_bool(OBJECT(cpu), "h", false, NULL); + object_property_set_bool(OBJECT(cpu), "d", false, NULL); + qdev_realize(DEVICE(cpu), NULL, &error_abort); + /* Attach emulated BRAM through the LMB. */ + memory_region_init_ram(phys_lmb_bram, NULL, + "mb_v.lmb_bram", LMB_BRAM_SIZE, + &error_fatal); + memory_region_add_subregion(sysmem, 0x00000000, phys_lmb_bram); + + memory_region_init_ram(phys_ram, NULL, "mb_v.ram", + ram_size, &error_fatal); + memory_region_add_subregion(sysmem, ddr_base, phys_ram); + + dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); + qdev_prop_set_uint32(dev, "kind-of-intr", + 1 << UARTLITE_IRQ); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(DEVICE(cpu), 11)); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + /* Uartlite */ + dev = qdev_new(TYPE_XILINX_UARTLITE); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, UARTLITE_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[UARTLITE_IRQ]); + + /* Full uart */ + serial_mm_init(sysmem, UART16550_BASEADDR + 0x1000, 2, + irq[UART16550_IRQ], 115200, serial_hd(1), + DEVICE_LITTLE_ENDIAN); + + /* 2 timers at irq 0 @ 100 Mhz. */ + dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); + qdev_prop_set_uint32(dev, "one-timer-only", 0); + qdev_prop_set_uint32(dev, "clock-frequency", 100000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); + + /* 2 timers at irq 3 @ 100 Mhz. */ + dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); + qdev_prop_set_uint32(dev, "one-timer-only", 0); + qdev_prop_set_uint32(dev, "clock-frequency", 100000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR2); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ2]); + + /* Emaclite */ + dev = qdev_new("xlnx.xps-ethernetlite"); + qdev_prop_set_enum(dev, "endianness", ENDIAN_MODE_LITTLE); + qemu_configure_nic_device(dev, true, NULL); + qdev_prop_set_uint32(dev, "tx-ping-pong", 0); + qdev_prop_set_uint32(dev, "rx-ping-pong", 0); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); + + /* axi ethernet and dma initialization. */ + eth0 = qdev_new("xlnx.axi-ethernet"); + dma = qdev_new("xlnx.axi-dma"); + + /* FIXME: attach to the sysbus instead */ + object_property_add_child(qdev_get_machine(), "xilinx-eth", OBJECT(eth0)); + object_property_add_child(qdev_get_machine(), "xilinx-dma", OBJECT(dma)); + + ds = object_property_get_link(OBJECT(dma), + "axistream-connected-target", NULL); + cs = object_property_get_link(OBJECT(dma), + "axistream-control-connected-target", NULL); + qemu_configure_nic_device(eth0, true, NULL); + qdev_prop_set_uint32(eth0, "rxmem", 0x1000); + qdev_prop_set_uint32(eth0, "txmem", 0x1000); + object_property_set_link(OBJECT(eth0), "axistream-connected", ds, + &error_abort); + object_property_set_link(OBJECT(eth0), "axistream-control-connected", cs, + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(eth0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(eth0), 0, AXIENET_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(eth0), 0, irq[AXIENET_IRQ]); + + ds = object_property_get_link(OBJECT(eth0), + "axistream-connected-target", NULL); + cs = object_property_get_link(OBJECT(eth0), + "axistream-control-connected-target", NULL); + qdev_prop_set_uint32(dma, "freqhz", 100000000); + object_property_set_link(OBJECT(dma), "axistream-connected", ds, + &error_abort); + object_property_set_link(OBJECT(dma), "axistream-control-connected", cs, + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dma), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, AXIDMA_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dma), 0, irq[AXIDMA_IRQ0]); + sysbus_connect_irq(SYS_BUS_DEVICE(dma), 1, irq[AXIDMA_IRQ1]); + + /* unimplemented devices */ + create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000); + create_unimplemented_device("gpio2", GPIO_BASEADDR2, 0x10000); + create_unimplemented_device("gpio3", GPIO_BASEADDR3, 0x10000); + create_unimplemented_device("i2c", I2C_BASEADDR, 0x10000); + create_unimplemented_device("qspi", QSPI_BASEADDR, 0x10000); +} + +static void mb_v_generic_machine_init(MachineClass *mc) +{ + mc->desc = "AMD Microblaze-V generic platform"; + mc->init = mb_v_generic_init; + mc->min_cpus = 1; + mc->max_cpus = 1; + mc->default_cpu_type = TYPE_RISCV_CPU_BASE; + mc->default_cpus = 1; +} + +DEFINE_MACHINE("amd-microblaze-v-generic", mb_v_generic_machine_init) diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c index 7725dfbde55..2e74783fce2 100644 --- a/hw/riscv/microchip_pfsoc.c +++ b/hw/riscv/microchip_pfsoc.c @@ -39,6 +39,7 @@ #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/sysbus.h" @@ -51,8 +52,8 @@ #include "hw/riscv/microchip_pfsoc.h" #include "hw/intc/riscv_aclint.h" #include "hw/intc/sifive_plic.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" +#include "system/device_tree.h" +#include "system/system.h" /* * The BIOS image used by this machine is called Hart Software Services (HSS). @@ -61,9 +62,6 @@ #define BIOS_FILENAME "hss.bin" #define RESET_VECTOR 0x20220000 -/* CLINT timebase frequency */ -#define CLINT_TIMEBASE_FREQ 1000000 - /* GEM version */ #define GEM_REVISION 0x0107010c @@ -193,6 +191,7 @@ static void microchip_pfsoc_soc_instance_init(Object *obj) static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); + MicrochipIcicleKitState *iks = MICROCHIP_ICICLE_KIT_MACHINE(ms); MicrochipPFSoCState *s = MICROCHIP_PFSOC(dev); const MemMapEntry *memmap = microchip_pfsoc_memmap; MemoryRegion *system_memory = get_system_memory(); @@ -253,7 +252,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) memmap[MICROCHIP_PFSOC_CLINT].base + RISCV_ACLINT_SWI_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, - CLINT_TIMEBASE_FREQ, false); + iks->clint_timebase_freq, false); /* L2 cache controller */ create_unimplemented_device("microchip.pfsoc.l2cc", @@ -479,7 +478,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) qspi_xip_mem); } -static void microchip_pfsoc_soc_class_init(ObjectClass *oc, void *data) +static void microchip_pfsoc_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -516,11 +515,11 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) uint64_t mem_low_size, mem_high_size; hwaddr firmware_load_addr; const char *firmware_name; - bool kernel_as_payload = false; target_ulong firmware_end_addr, kernel_start_addr; uint64_t kernel_entry; - uint32_t fdt_load_addr; + uint64_t fdt_load_addr; DriveInfo *dinfo = drive_get(IF_SD, 0, 0); + RISCVBootInfo boot_info; /* Sanity check on RAM size */ if (machine->ram_size < mc->default_ram_size) { @@ -578,65 +577,135 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) } /* - * We follow the following table to select which payload we execute. - * - * -bios | -kernel | payload - * -------+------------+-------- - * N | N | HSS - * Y | don't care | HSS - * N | Y | kernel + * We follow the following table to select which firmware we use. * - * This ensures backwards compatibility with how we used to expose -bios - * to users but allows them to run through direct kernel booting as well. - * - * When -kernel is used for direct boot, -dtb must be present to provide - * a valid device tree for the board, as we don't generate device tree. + * -bios | -kernel | firmware + * --------------+------------+-------- + * none | N | error + * none | Y | kernel + * NULL, default | N | BIOS_FILENAME + * NULL, default | Y | RISCV64_BIOS_BIN + * other | don't care | other */ - - if (machine->kernel_filename && machine->dtb) { - int fdt_size; - machine->fdt = load_device_tree(machine->dtb, &fdt_size); - if (!machine->fdt) { - error_report("load_device_tree() failed"); + if (machine->firmware && !strcmp(machine->firmware, "none")) { + if (!machine->kernel_filename) { + error_report("for -bios none, a kernel is required"); exit(1); } - firmware_name = RISCV64_BIOS_BIN; - firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base; - kernel_as_payload = true; - } - - if (!kernel_as_payload) { - firmware_name = BIOS_FILENAME; + firmware_name = NULL; + firmware_load_addr = RESET_VECTOR; + } else if (!machine->firmware || !strcmp(machine->firmware, "default")) { + if (machine->kernel_filename) { + firmware_name = RISCV64_BIOS_BIN; + firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base; + } else { + firmware_name = BIOS_FILENAME; + firmware_load_addr = RESET_VECTOR; + } + } else { + firmware_name = machine->firmware; firmware_load_addr = RESET_VECTOR; } - /* Load the firmware */ - firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name, - firmware_load_addr, NULL); + /* Load the firmware if necessary */ + firmware_end_addr = firmware_load_addr; + if (firmware_name) { + char *filename = riscv_find_firmware(firmware_name, NULL); + if (filename) { + firmware_end_addr = riscv_load_firmware(filename, + &firmware_load_addr, NULL); + g_free(filename); + } + } - if (kernel_as_payload) { - kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus, + riscv_boot_info_init(&boot_info, &s->soc.u_cpus); + if (machine->kernel_filename) { + kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info, firmware_end_addr); - kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus, - kernel_start_addr, true, NULL); + riscv_load_kernel(machine, &boot_info, kernel_start_addr, + true, NULL); + kernel_entry = boot_info.image_low_addr; + + if (machine->dtb) { + int fdt_size; + machine->fdt = load_device_tree(machine->dtb, &fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + + /* Compute the FDT load address in DRAM */ + hwaddr kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_LO].base; + hwaddr kernel_ram_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size; + + if (kernel_entry - kernel_ram_base >= kernel_ram_size) { + kernel_ram_base = memmap[MICROCHIP_PFSOC_DRAM_HI].base; + kernel_ram_size = mem_high_size; + } + + fdt_load_addr = riscv_compute_fdt_addr(kernel_ram_base, kernel_ram_size, + machine, &boot_info); + riscv_load_fdt(fdt_load_addr, machine->fdt); + } else { + warn_report_once("The QEMU microchip-icicle-kit machine does not " + "generate a device tree, so no device tree is " + "being provided to the guest."); + fdt_load_addr = 0; + } - /* Compute the fdt load address in dram */ - fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base, - memmap[MICROCHIP_PFSOC_DRAM_LO].size, - machine); - riscv_load_fdt(fdt_load_addr, machine->fdt); + hwaddr start_addr; + if (firmware_name) { + start_addr = firmware_load_addr; + } else { + start_addr = kernel_entry; + } /* Load the reset vector */ - riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, firmware_load_addr, + riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, start_addr, memmap[MICROCHIP_PFSOC_ENVM_DATA].base, memmap[MICROCHIP_PFSOC_ENVM_DATA].size, kernel_entry, fdt_load_addr); } } -static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) +static void microchip_icicle_kit_set_clint_timebase_freq(Object *obj, + Visitor *v, + const char *name, + void *opaque, + Error **errp) +{ + MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + s->clint_timebase_freq = value; +} + +static void microchip_icicle_kit_get_clint_timebase_freq(Object *obj, + Visitor *v, + const char *name, + void *opaque, + Error **errp) +{ + MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(obj); + uint32_t value = s->clint_timebase_freq; + + visit_type_uint32(v, name, &value, errp); +} + +static void microchip_icicle_kit_machine_instance_init(Object *obj) +{ + MicrochipIcicleKitState *m = MICROCHIP_ICICLE_KIT_MACHINE(obj); + m->clint_timebase_freq = 1000000; +} + +static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -647,6 +716,7 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) mc->min_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + 1; mc->default_cpus = mc->min_cpus; mc->default_ram_id = "microchip.icicle.kit.ram"; + mc->auto_create_sdcard = true; /* * Map 513 MiB high memory, the minimum required high memory size, because @@ -656,12 +726,20 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) * See memory_tests() in mss_ddr.c in the HSS source code. */ mc->default_ram_size = 1537 * MiB; + + object_class_property_add(oc, "clint-timebase-frequency", "uint32_t", + microchip_icicle_kit_get_clint_timebase_freq, + microchip_icicle_kit_set_clint_timebase_freq, + NULL, NULL); + object_class_property_set_description(oc, "clint-timebase-frequency", + "Set CLINT timebase frequency in Hz."); } static const TypeInfo microchip_icicle_kit_machine_typeinfo = { .name = MACHINE_TYPE_NAME("microchip-icicle-kit"), .parent = TYPE_MACHINE, .class_init = microchip_icicle_kit_machine_class_init, + .instance_init = microchip_icicle_kit_machine_instance_init, .instance_size = sizeof(MicrochipIcicleKitState), }; diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c index cf686f4ff1e..7a7b0120077 100644 --- a/hw/riscv/numa.c +++ b/hw/riscv/numa.c @@ -23,7 +23,7 @@ #include "hw/boards.h" #include "hw/qdev-properties.h" #include "hw/riscv/numa.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" static bool numa_enabled(const MachineState *ms) { diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index 436503f1ba6..d369a8a7dcd 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -27,7 +27,8 @@ #include "hw/misc/unimp.h" #include "hw/riscv/boot.h" #include "qemu/units.h" -#include "sysemu/sysemu.h" +#include "system/system.h" +#include "system/address-spaces.h" /* * This version of the OpenTitan machine currently supports @@ -81,6 +82,7 @@ static void opentitan_machine_init(MachineState *machine) OpenTitanState *s = OPENTITAN_MACHINE(machine); const MemMapEntry *memmap = ibex_memmap; MemoryRegion *sys_mem = get_system_memory(); + RISCVBootInfo boot_info; if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); @@ -98,17 +100,19 @@ static void opentitan_machine_init(MachineState *machine) memmap[IBEX_DEV_RAM].base, machine->ram); if (machine->firmware) { - riscv_load_firmware(machine->firmware, memmap[IBEX_DEV_RAM].base, NULL); + hwaddr firmware_load_addr = memmap[IBEX_DEV_RAM].base; + riscv_load_firmware(machine->firmware, &firmware_load_addr, NULL); } + riscv_boot_info_init(&boot_info, &s->soc.cpus); if (machine->kernel_filename) { - riscv_load_kernel(machine, &s->soc.cpus, + riscv_load_kernel(machine, &boot_info, memmap[IBEX_DEV_RAM].base, false, NULL); } } -static void opentitan_machine_class_init(ObjectClass *oc, void *data) +static void opentitan_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -305,12 +309,11 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) memmap[IBEX_DEV_IBEX_CFG].base, memmap[IBEX_DEV_IBEX_CFG].size); } -static Property lowrisc_ibex_soc_props[] = { +static const Property lowrisc_ibex_soc_props[] = { DEFINE_PROP_UINT32("resetvec", LowRISCIbexSoCState, resetvec, 0x20000400), - DEFINE_PROP_END_OF_LIST() }; -static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data) +static void lowrisc_ibex_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h new file mode 100644 index 00000000000..47fe01bee58 --- /dev/null +++ b/hw/riscv/riscv-iommu-bits.h @@ -0,0 +1,469 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2022-2023 Rivos Inc. + * Copyright © 2023 FORTH-ICS/CARV + * Copyright © 2023 RISC-V IOMMU Task Group + * + * RISC-V IOMMU - Register Layout and Data Structures. + * + * Based on the IOMMU spec version 1.0, 3/2023 + * https://github.com/riscv-non-isa/riscv-iommu + */ + +#ifndef HW_RISCV_IOMMU_BITS_H +#define HW_RISCV_IOMMU_BITS_H + +#define RISCV_IOMMU_SPEC_DOT_VER 0x010 + +#ifndef GENMASK_ULL +#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l)) +#endif + +/* + * struct riscv_iommu_fq_record - Fault/Event Queue Record + * See section 3.2 for more info. + */ +struct riscv_iommu_fq_record { + uint64_t hdr; + uint64_t _reserved; + uint64_t iotval; + uint64_t iotval2; +}; +/* Header fields */ +#define RISCV_IOMMU_FQ_HDR_CAUSE GENMASK_ULL(11, 0) +#define RISCV_IOMMU_FQ_HDR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_FQ_HDR_PV BIT_ULL(32) +#define RISCV_IOMMU_FQ_HDR_TTYPE GENMASK_ULL(39, 34) +#define RISCV_IOMMU_FQ_HDR_DID GENMASK_ULL(63, 40) + +/* + * struct riscv_iommu_pq_record - PCIe Page Request record + * For more infos on the PCIe Page Request queue see chapter 3.3. + */ +struct riscv_iommu_pq_record { + uint64_t hdr; + uint64_t payload; +}; +/* Header fields */ +#define RISCV_IOMMU_PREQ_HDR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_PREQ_HDR_PV BIT_ULL(32) +#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33) +#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34) +#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40) + +/* Payload fields */ +#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0) +#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1) +#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2) +#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0) +#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3) +#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12) + +/* Common field positions */ +#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10) +#define RISCV_IOMMU_QUEUE_LOGSZ_FIELD GENMASK_ULL(4, 0) +#define RISCV_IOMMU_QUEUE_INDEX_FIELD GENMASK_ULL(31, 0) +#define RISCV_IOMMU_QUEUE_ENABLE BIT(0) +#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1) +#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8) +#define RISCV_IOMMU_QUEUE_OVERFLOW BIT(9) +#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16) +#define RISCV_IOMMU_QUEUE_BUSY BIT(17) +#define RISCV_IOMMU_ATP_PPN_FIELD GENMASK_ULL(43, 0) +#define RISCV_IOMMU_ATP_MODE_FIELD GENMASK_ULL(63, 60) + +/* 5.3 IOMMU Capabilities (64bits) */ +#define RISCV_IOMMU_REG_CAP 0x0000 +#define RISCV_IOMMU_CAP_VERSION GENMASK_ULL(7, 0) +#define RISCV_IOMMU_CAP_SV32 BIT_ULL(8) +#define RISCV_IOMMU_CAP_SV39 BIT_ULL(9) +#define RISCV_IOMMU_CAP_SV48 BIT_ULL(10) +#define RISCV_IOMMU_CAP_SV57 BIT_ULL(11) +#define RISCV_IOMMU_CAP_SVRSW60T59B BIT_ULL(14) +#define RISCV_IOMMU_CAP_SV32X4 BIT_ULL(16) +#define RISCV_IOMMU_CAP_SV39X4 BIT_ULL(17) +#define RISCV_IOMMU_CAP_SV48X4 BIT_ULL(18) +#define RISCV_IOMMU_CAP_SV57X4 BIT_ULL(19) +#define RISCV_IOMMU_CAP_MSI_FLAT BIT_ULL(22) +#define RISCV_IOMMU_CAP_MSI_MRIF BIT_ULL(23) +#define RISCV_IOMMU_CAP_ATS BIT_ULL(25) +#define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26) +#define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28) +#define RISCV_IOMMU_CAP_HPM BIT_ULL(30) +#define RISCV_IOMMU_CAP_DBG BIT_ULL(31) +#define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32) +#define RISCV_IOMMU_CAP_PD8 BIT_ULL(38) +#define RISCV_IOMMU_CAP_PD17 BIT_ULL(39) +#define RISCV_IOMMU_CAP_PD20 BIT_ULL(40) + +enum riscv_iommu_igs_modes { + RISCV_IOMMU_CAP_IGS_MSI = 0, + RISCV_IOMMU_CAP_IGS_WSI, + RISCV_IOMMU_CAP_IGS_BOTH +}; + +/* 5.4 Features control register (32bits) */ +#define RISCV_IOMMU_REG_FCTL 0x0008 +#define RISCV_IOMMU_FCTL_BE BIT(0) +#define RISCV_IOMMU_FCTL_WSI BIT(1) +#define RISCV_IOMMU_FCTL_GXL BIT(2) + +/* 5.5 Device-directory-table pointer (64bits) */ +#define RISCV_IOMMU_REG_DDTP 0x0010 +#define RISCV_IOMMU_DDTP_MODE GENMASK_ULL(3, 0) +#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4) +#define RISCV_IOMMU_DDTP_PPN RISCV_IOMMU_PPN_FIELD + +enum riscv_iommu_ddtp_modes { + RISCV_IOMMU_DDTP_MODE_OFF = 0, + RISCV_IOMMU_DDTP_MODE_BARE = 1, + RISCV_IOMMU_DDTP_MODE_1LVL = 2, + RISCV_IOMMU_DDTP_MODE_2LVL = 3, + RISCV_IOMMU_DDTP_MODE_3LVL = 4, + RISCV_IOMMU_DDTP_MODE_MAX = 4 +}; + +/* 5.6 Command Queue Base (64bits) */ +#define RISCV_IOMMU_REG_CQB 0x0018 +#define RISCV_IOMMU_CQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD +#define RISCV_IOMMU_CQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.7 Command Queue head (32bits) */ +#define RISCV_IOMMU_REG_CQH 0x0020 + +/* 5.8 Command Queue tail (32bits) */ +#define RISCV_IOMMU_REG_CQT 0x0024 + +/* 5.9 Fault Queue Base (64bits) */ +#define RISCV_IOMMU_REG_FQB 0x0028 +#define RISCV_IOMMU_FQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD +#define RISCV_IOMMU_FQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.10 Fault Queue Head (32bits) */ +#define RISCV_IOMMU_REG_FQH 0x0030 + +/* 5.11 Fault Queue tail (32bits) */ +#define RISCV_IOMMU_REG_FQT 0x0034 + +/* 5.12 Page Request Queue base (64bits) */ +#define RISCV_IOMMU_REG_PQB 0x0038 +#define RISCV_IOMMU_PQB_LOG2SZ RISCV_IOMMU_QUEUE_LOGSZ_FIELD +#define RISCV_IOMMU_PQB_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.13 Page Request Queue head (32bits) */ +#define RISCV_IOMMU_REG_PQH 0x0040 + +/* 5.14 Page Request Queue tail (32bits) */ +#define RISCV_IOMMU_REG_PQT 0x0044 + +/* 5.15 Command Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_CQCSR 0x0048 +#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_CQCSR_CQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_CQCSR_CMD_TO BIT(9) +#define RISCV_IOMMU_CQCSR_CMD_ILL BIT(10) +#define RISCV_IOMMU_CQCSR_FENCE_W_IP BIT(11) +#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.16 Fault Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_FQCSR 0x004C +#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_FQCSR_FQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_FQCSR_FQOF RISCV_IOMMU_QUEUE_OVERFLOW +#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.17 Page Request Queue CSR (32bits) */ +#define RISCV_IOMMU_REG_PQCSR 0x0050 +#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_PQCSR_PQMF RISCV_IOMMU_QUEUE_MEM_FAULT +#define RISCV_IOMMU_PQCSR_PQOF RISCV_IOMMU_QUEUE_OVERFLOW +#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +/* 5.18 Interrupt Pending Status (32bits) */ +#define RISCV_IOMMU_REG_IPSR 0x0054 +#define RISCV_IOMMU_IPSR_CIP BIT(0) +#define RISCV_IOMMU_IPSR_FIP BIT(1) +#define RISCV_IOMMU_IPSR_PIP BIT(3) + +enum { + RISCV_IOMMU_INTR_CQ, + RISCV_IOMMU_INTR_FQ, + RISCV_IOMMU_INTR_PM, + RISCV_IOMMU_INTR_PQ, + RISCV_IOMMU_INTR_COUNT +}; + +#define RISCV_IOMMU_IOCOUNT_NUM 31 + +/* 5.19 Performance monitoring counter overflow status (32bits) */ +#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058 +#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0) + +/* 5.20 Performance monitoring counter inhibits (32bits) */ +#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C +#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0) + +/* 5.21 Performance monitoring cycles counter (64bits) */ +#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060 +#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0) +#define RISCV_IOMMU_IOHPMCYCLES_OVF BIT_ULL(63) + +/* 5.22 Performance monitoring event counters (31 * 64bits) */ +#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068 +#define RISCV_IOMMU_REG_IOHPMCTR(_n) \ + (RISCV_IOMMU_REG_IOHPMCTR_BASE + (_n * 0x8)) + +/* 5.23 Performance monitoring event selectors (31 * 64bits) */ +#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160 +#define RISCV_IOMMU_REG_IOHPMEVT(_n) \ + (RISCV_IOMMU_REG_IOHPMEVT_BASE + (_n * 0x8)) +#define RISCV_IOMMU_IOHPMEVT_EVENT_ID GENMASK_ULL(14, 0) +#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15) +#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16) +#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36) +#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60) +#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61) +#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62) +#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63) + +enum RISCV_IOMMU_HPMEVENT_id { + RISCV_IOMMU_HPMEVENT_INVALID = 0, + RISCV_IOMMU_HPMEVENT_URQ = 1, + RISCV_IOMMU_HPMEVENT_TRQ = 2, + RISCV_IOMMU_HPMEVENT_ATS_RQ = 3, + RISCV_IOMMU_HPMEVENT_TLB_MISS = 4, + RISCV_IOMMU_HPMEVENT_DD_WALK = 5, + RISCV_IOMMU_HPMEVENT_PD_WALK = 6, + RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7, + RISCV_IOMMU_HPMEVENT_G_WALKS = 8, + RISCV_IOMMU_HPMEVENT_MAX = 9 +}; + +/* 5.24 Translation request IOVA (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258 + +/* 5.25 Translation request control (64bits) */ +#define RISCV_IOMMU_REG_TR_REQ_CTL 0x0260 +#define RISCV_IOMMU_TR_REQ_CTL_GO_BUSY BIT_ULL(0) +#define RISCV_IOMMU_TR_REQ_CTL_NW BIT_ULL(3) +#define RISCV_IOMMU_TR_REQ_CTL_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_TR_REQ_CTL_DID GENMASK_ULL(63, 40) + +/* 5.26 Translation request response (64bits) */ +#define RISCV_IOMMU_REG_TR_RESPONSE 0x0268 +#define RISCV_IOMMU_TR_RESPONSE_FAULT BIT_ULL(0) +#define RISCV_IOMMU_TR_RESPONSE_S BIT_ULL(9) +#define RISCV_IOMMU_TR_RESPONSE_PPN RISCV_IOMMU_PPN_FIELD + +/* 5.27 Interrupt cause to vector (64bits) */ +#define RISCV_IOMMU_REG_ICVEC 0x02F8 +#define RISCV_IOMMU_ICVEC_CIV GENMASK_ULL(3, 0) +#define RISCV_IOMMU_ICVEC_FIV GENMASK_ULL(7, 4) +#define RISCV_IOMMU_ICVEC_PMIV GENMASK_ULL(11, 8) +#define RISCV_IOMMU_ICVEC_PIV GENMASK_ULL(15, 12) + +/* 5.28 MSI Configuration table (32 * 64bits) */ +#define RISCV_IOMMU_REG_MSI_CONFIG 0x0300 + +#define RISCV_IOMMU_REG_SIZE 0x1000 + +#define RISCV_IOMMU_DDTE_VALID BIT_ULL(0) +#define RISCV_IOMMU_DDTE_PPN RISCV_IOMMU_PPN_FIELD + +/* Struct riscv_iommu_dc - Device Context - section 2.1 */ +struct riscv_iommu_dc { + uint64_t tc; + uint64_t iohgatp; + uint64_t ta; + uint64_t fsc; + uint64_t msiptp; + uint64_t msi_addr_mask; + uint64_t msi_addr_pattern; + uint64_t _reserved; +}; + +/* Translation control fields */ +#define RISCV_IOMMU_DC_TC_V BIT_ULL(0) +#define RISCV_IOMMU_DC_TC_EN_ATS BIT_ULL(1) +#define RISCV_IOMMU_DC_TC_EN_PRI BIT_ULL(2) +#define RISCV_IOMMU_DC_TC_T2GPA BIT_ULL(3) +#define RISCV_IOMMU_DC_TC_DTF BIT_ULL(4) +#define RISCV_IOMMU_DC_TC_PDTV BIT_ULL(5) +#define RISCV_IOMMU_DC_TC_PRPR BIT_ULL(6) +#define RISCV_IOMMU_DC_TC_GADE BIT_ULL(7) +#define RISCV_IOMMU_DC_TC_SADE BIT_ULL(8) +#define RISCV_IOMMU_DC_TC_DPE BIT_ULL(9) +#define RISCV_IOMMU_DC_TC_SBE BIT_ULL(10) +#define RISCV_IOMMU_DC_TC_SXL BIT_ULL(11) + +/* Second-stage (aka G-stage) context fields */ +#define RISCV_IOMMU_DC_IOHGATP_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_IOHGATP_GSCID GENMASK_ULL(59, 44) +#define RISCV_IOMMU_DC_IOHGATP_MODE RISCV_IOMMU_ATP_MODE_FIELD + +enum riscv_iommu_dc_iohgatp_modes { + RISCV_IOMMU_DC_IOHGATP_MODE_BARE = 0, + RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4 = 8, + RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4 = 8, + RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4 = 9, + RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4 = 10 +}; + +/* Translation attributes fields */ +#define RISCV_IOMMU_DC_TA_PSCID GENMASK_ULL(31, 12) + +/* First-stage context fields */ +#define RISCV_IOMMU_DC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_FSC_MODE RISCV_IOMMU_ATP_MODE_FIELD + +/* Generic I/O MMU command structure - check section 3.1 */ +struct riscv_iommu_command { + uint64_t dword0; + uint64_t dword1; +}; + +#define RISCV_IOMMU_CMD_OPCODE GENMASK_ULL(6, 0) +#define RISCV_IOMMU_CMD_FUNC GENMASK_ULL(9, 7) + +#define RISCV_IOMMU_CMD_IOTINVAL_OPCODE 1 +#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA 0 +#define RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA 1 +#define RISCV_IOMMU_CMD_IOTINVAL_AV BIT_ULL(10) +#define RISCV_IOMMU_CMD_IOTINVAL_PSCID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_IOTINVAL_PSCV BIT_ULL(32) +#define RISCV_IOMMU_CMD_IOTINVAL_GV BIT_ULL(33) +#define RISCV_IOMMU_CMD_IOTINVAL_GSCID GENMASK_ULL(59, 44) + +#define RISCV_IOMMU_CMD_IOFENCE_OPCODE 2 +#define RISCV_IOMMU_CMD_IOFENCE_FUNC_C 0 +#define RISCV_IOMMU_CMD_IOFENCE_AV BIT_ULL(10) +#define RISCV_IOMMU_CMD_IOFENCE_DATA GENMASK_ULL(63, 32) + +#define RISCV_IOMMU_CMD_IODIR_OPCODE 3 +#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT 0 +#define RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT 1 +#define RISCV_IOMMU_CMD_IODIR_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_IODIR_DV BIT_ULL(33) +#define RISCV_IOMMU_CMD_IODIR_DID GENMASK_ULL(63, 40) + +/* 3.1.4 I/O MMU PCIe ATS */ +#define RISCV_IOMMU_CMD_ATS_OPCODE 4 +#define RISCV_IOMMU_CMD_ATS_FUNC_INVAL 0 +#define RISCV_IOMMU_CMD_ATS_FUNC_PRGR 1 +#define RISCV_IOMMU_CMD_ATS_PID GENMASK_ULL(31, 12) +#define RISCV_IOMMU_CMD_ATS_PV BIT_ULL(32) +#define RISCV_IOMMU_CMD_ATS_DSV BIT_ULL(33) +#define RISCV_IOMMU_CMD_ATS_RID GENMASK_ULL(55, 40) +#define RISCV_IOMMU_CMD_ATS_DSEG GENMASK_ULL(63, 56) +/* dword1 is the ATS payload, two different payload types for INVAL and PRGR */ + +/* ATS.PRGR payload */ +#define RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE GENMASK_ULL(47, 44) + +enum riscv_iommu_dc_fsc_atp_modes { + RISCV_IOMMU_DC_FSC_MODE_BARE = 0, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 = 8, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 = 8, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48 = 9, + RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57 = 10, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8 = 1, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17 = 2, + RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20 = 3 +}; + +enum riscv_iommu_fq_causes { + RISCV_IOMMU_FQ_CAUSE_INST_FAULT = 1, + RISCV_IOMMU_FQ_CAUSE_RD_ADDR_MISALIGNED = 4, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT = 5, + RISCV_IOMMU_FQ_CAUSE_WR_ADDR_MISALIGNED = 6, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT = 7, + RISCV_IOMMU_FQ_CAUSE_INST_FAULT_S = 12, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S = 13, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S = 15, + RISCV_IOMMU_FQ_CAUSE_INST_FAULT_VS = 20, + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS = 21, + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS = 23, + RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED = 256, + RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT = 257, + RISCV_IOMMU_FQ_CAUSE_DDT_INVALID = 258, + RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED = 259, + RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED = 260, + RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT = 261, + RISCV_IOMMU_FQ_CAUSE_MSI_INVALID = 262, + RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED = 263, + RISCV_IOMMU_FQ_CAUSE_MRIF_FAULT = 264, + RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT = 265, + RISCV_IOMMU_FQ_CAUSE_PDT_INVALID = 266, + RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED = 267, + RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED = 268, + RISCV_IOMMU_FQ_CAUSE_PDT_CORRUPTED = 269, + RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED = 270, + RISCV_IOMMU_FQ_CAUSE_MRIF_CORRUIPTED = 271, + RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR = 272, + RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT = 273, + RISCV_IOMMU_FQ_CAUSE_PT_CORRUPTED = 274 +}; + +/* MSI page table pointer */ +#define RISCV_IOMMU_DC_MSIPTP_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_DC_MSIPTP_MODE RISCV_IOMMU_ATP_MODE_FIELD +#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0 +#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1 + +/* 2.2 Process Directory Table */ +#define RISCV_IOMMU_PDTE_VALID BIT_ULL(0) +#define RISCV_IOMMU_PDTE_PPN RISCV_IOMMU_PPN_FIELD + +/* Translation attributes fields */ +#define RISCV_IOMMU_PC_TA_V BIT_ULL(0) +#define RISCV_IOMMU_PC_TA_RESERVED GENMASK_ULL(63, 32) + +/* First stage context fields */ +#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD +#define RISCV_IOMMU_PC_FSC_RESERVED GENMASK_ULL(59, 44) + +enum riscv_iommu_fq_ttypes { + RISCV_IOMMU_FQ_TTYPE_NONE = 0, + RISCV_IOMMU_FQ_TTYPE_UADDR_INST_FETCH = 1, + RISCV_IOMMU_FQ_TTYPE_UADDR_RD = 2, + RISCV_IOMMU_FQ_TTYPE_UADDR_WR = 3, + RISCV_IOMMU_FQ_TTYPE_TADDR_INST_FETCH = 5, + RISCV_IOMMU_FQ_TTYPE_TADDR_RD = 6, + RISCV_IOMMU_FQ_TTYPE_TADDR_WR = 7, + RISCV_IOMMU_FQ_TTYPE_PCIE_ATS_REQ = 8, + RISCV_IOMMU_FW_TTYPE_PCIE_MSG_REQ = 9, +}; + +/* + * struct riscv_iommu_msi_pte - MSI Page Table Entry + */ +struct riscv_iommu_msi_pte { + uint64_t pte; + uint64_t mrif_info; +}; + +/* Fields on pte */ +#define RISCV_IOMMU_MSI_PTE_V BIT_ULL(0) +#define RISCV_IOMMU_MSI_PTE_M GENMASK_ULL(2, 1) + +#define RISCV_IOMMU_MSI_PTE_M_MRIF 1 +#define RISCV_IOMMU_MSI_PTE_M_BASIC 3 + +/* When M == 1 (MRIF mode) */ +#define RISCV_IOMMU_MSI_PTE_MRIF_ADDR GENMASK_ULL(53, 7) +/* When M == 3 (basic mode) */ +#define RISCV_IOMMU_MSI_PTE_PPN RISCV_IOMMU_PPN_FIELD +#define RISCV_IOMMU_MSI_PTE_C BIT_ULL(63) + +/* Fields on mrif_info */ +#define RISCV_IOMMU_MSI_MRIF_NID GENMASK_ULL(9, 0) +#define RISCV_IOMMU_MSI_MRIF_NPPN RISCV_IOMMU_PPN_FIELD +#define RISCV_IOMMU_MSI_MRIF_NID_MSB BIT_ULL(60) + +#endif /* _RISCV_IOMMU_BITS_H_ */ diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c new file mode 100644 index 00000000000..c5034bff795 --- /dev/null +++ b/hw/riscv/riscv-iommu-hpm.c @@ -0,0 +1,381 @@ +/* + * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "cpu_bits.h" +#include "riscv-iommu-hpm.h" +#include "riscv-iommu.h" +#include "riscv-iommu-bits.h" +#include "trace.h" + +/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */ +static inline uint64_t get_cycles(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s) +{ + const uint64_t cycle = riscv_iommu_reg_get64( + s, RISCV_IOMMU_REG_IOHPMCYCLES); + const uint32_t inhibit = riscv_iommu_reg_get32( + s, RISCV_IOMMU_REG_IOCOUNTINH); + const uint64_t ctr_prev = s->hpmcycle_prev; + const uint64_t ctr_val = s->hpmcycle_val; + + trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val); + + if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { + /* + * Counter should not increment if inhibit bit is set. We can't really + * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated + * counter value to indicate that counter was not incremented. + */ + return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) | + (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF); + } + + return (ctr_val + get_cycles() - ctr_prev) | + (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF); +} + +static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx) +{ + const uint32_t off = ctr_idx << 3; + uint64_t cntr_val; + + cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]); + stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1); + + trace_riscv_iommu_hpm_incr_ctr(cntr_val); + + /* Handle the overflow scenario. */ + if (cntr_val == UINT64_MAX) { + /* + * Generate interrupt only if OF bit is clear. +1 to offset the cycle + * register OF bit. + */ + const uint32_t ovf = + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, + BIT(ctr_idx + 1), 0); + if (!get_field(ovf, BIT(ctr_idx + 1))) { + riscv_iommu_reg_mod64(s, + RISCV_IOMMU_REG_IOHPMEVT_BASE + off, + RISCV_IOMMU_IOHPMEVT_OF, + 0); + riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM); + } + } +} + +void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, + unsigned event_id) +{ + const uint32_t inhibit = riscv_iommu_reg_get32( + s, RISCV_IOMMU_REG_IOCOUNTINH); + uint32_t did_gscid; + uint32_t pid_pscid; + uint32_t ctr_idx; + gpointer value; + uint32_t ctrs; + uint64_t evt; + + if (!(s->cap & RISCV_IOMMU_CAP_HPM)) { + return; + } + + value = g_hash_table_lookup(s->hpm_event_ctr_map, + GUINT_TO_POINTER(event_id)); + if (value == NULL) { + return; + } + + for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) { + ctr_idx = ctz32(ctrs); + if (get_field(inhibit, BIT(ctr_idx + 1))) { + continue; + } + + evt = riscv_iommu_reg_get64(s, + RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3)); + + /* + * It's quite possible that event ID has been changed in counter + * but hashtable hasn't been updated yet. We don't want to increment + * counter for the old event ID. + */ + if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) { + continue; + } + + if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) { + did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID); + pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID); + } else { + did_gscid = ctx->devid; + pid_pscid = ctx->process_id; + } + + if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) { + /* + * If the transaction does not have a valid process_id, counter + * increments if device_id matches DID_GSCID. If the transaction + * has a valid process_id, counter increments if device_id + * matches DID_GSCID and process_id matches PID_PSCID. See + * IOMMU Specification, Chapter 5.23. Performance-monitoring + * event selector. + */ + if (ctx->process_id && + get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) { + continue; + } + } + + if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) { + uint32_t mask = ~0; + + if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) { + /* + * 1001 1011 mask = GSCID + * 0000 0111 mask = mask ^ (mask + 1) + * 1111 1000 mask = ~mask; + */ + mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID); + mask = mask ^ (mask + 1); + mask = ~mask; + } + + if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) != + (did_gscid & mask)) { + continue; + } + } + + hpm_incr_ctr(s, ctr_idx); + } +} + +/* Timer callback for cycle counter overflow. */ +void riscv_iommu_hpm_timer_cb(void *priv) +{ + RISCVIOMMUState *s = priv; + const uint32_t inhibit = riscv_iommu_reg_get32( + s, RISCV_IOMMU_REG_IOCOUNTINH); + uint32_t ovf; + + if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { + return; + } + + if (s->irq_overflow_left > 0) { + uint64_t irq_trigger_at = + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left; + timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at); + s->irq_overflow_left = 0; + return; + } + + ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); + if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) { + /* + * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to + * current clock value. The way we calculate iohpmcycs will overflow + * and return the correct value. This avoids the need to synchronize + * timer callback and write callback. + */ + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, + RISCV_IOMMU_IOCOUNTOVF_CY, 0); + riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES, + RISCV_IOMMU_IOHPMCYCLES_OVF, 0); + riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM); + } +} + +static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value) +{ + const uint32_t inhibit = riscv_iommu_reg_get32( + s, RISCV_IOMMU_REG_IOCOUNTINH); + uint64_t overflow_at, overflow_ns; + + if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) { + return; + } + + /* + * We are using INT64_MAX here instead to UINT64_MAX because cycle counter + * has 63-bit precision and INT64_MAX is the maximum it can store. + */ + if (value) { + overflow_ns = INT64_MAX - value + 1; + } else { + overflow_ns = INT64_MAX; + } + + overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns; + + if (overflow_at > INT64_MAX) { + s->irq_overflow_left = overflow_at - INT64_MAX; + overflow_at = INT64_MAX; + } + + timer_mod_anticipate_ns(s->hpm_timer, overflow_at); +} + +/* Updates the internal cycle counter state when iocntinh:CY is changed. */ +void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh) +{ + const uint32_t inhibit = riscv_iommu_reg_get32( + s, RISCV_IOMMU_REG_IOCOUNTINH); + + /* We only need to process CY bit toggle. */ + if (!(inhibit ^ prev_cy_inh)) { + return; + } + + trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh); + + if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) { + /* + * Cycle counter is enabled. Just start the timer again and update + * the clock snapshot value to point to the current time to make + * sure iohpmcycles read is correct. + */ + s->hpmcycle_prev = get_cycles(); + hpm_setup_timer(s, s->hpmcycle_val); + } else { + /* + * Cycle counter is disabled. Stop the timer and update the cycle + * counter to record the current value which is last programmed + * value + the cycles passed so far. + */ + s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev); + timer_del(s->hpm_timer); + } +} + +void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s) +{ + const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES); + const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); + + trace_riscv_iommu_hpm_cycle_write(ovf, val); + + /* + * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register. + */ + if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) && + !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, + RISCV_IOMMU_IOCOUNTOVF_CY); + } + + s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF; + s->hpmcycle_prev = get_cycles(); + hpm_setup_timer(s, s->hpmcycle_val); +} + +static inline bool check_valid_event_id(unsigned event_id) +{ + return event_id > RISCV_IOMMU_HPMEVENT_INVALID && + event_id < RISCV_IOMMU_HPMEVENT_MAX; +} + +static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata) +{ + uint32_t *pair = udata; + + if (GPOINTER_TO_UINT(value) & (1 << pair[0])) { + pair[1] = GPOINTER_TO_UINT(key); + return true; + } + + return false; +} + +/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */ +static void update_event_map(RISCVIOMMUState *s, uint64_t value, + uint32_t ctr_idx) +{ + unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID); + uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID }; + uint32_t new_value = 1 << ctr_idx; + gpointer data; + + /* + * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID + * remove the current mapping. + */ + if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) { + data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair); + + new_value = GPOINTER_TO_UINT(data) & ~(new_value); + if (new_value != 0) { + g_hash_table_replace(s->hpm_event_ctr_map, + GUINT_TO_POINTER(pair[1]), + GUINT_TO_POINTER(new_value)); + } else { + g_hash_table_remove(s->hpm_event_ctr_map, + GUINT_TO_POINTER(pair[1])); + } + + return; + } + + /* Update the counter mask if the event is already enabled. */ + if (g_hash_table_lookup_extended(s->hpm_event_ctr_map, + GUINT_TO_POINTER(event_id), + NULL, + &data)) { + new_value |= GPOINTER_TO_UINT(data); + } + + g_hash_table_insert(s->hpm_event_ctr_map, + GUINT_TO_POINTER(event_id), + GUINT_TO_POINTER(new_value)); +} + +void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg) +{ + const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3; + const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF); + uint64_t val = riscv_iommu_reg_get64(s, evt_reg); + + if (ctr_idx >= s->hpm_cntrs) { + return; + } + + trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val); + + /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */ + if (get_field(ovf, BIT(ctr_idx + 1)) && + !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) { + /* +1 to offset CYCLE register OF bit. */ + riscv_iommu_reg_mod32( + s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1)); + } + + if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) { + /* Reset EventID (WARL) field to invalid. */ + val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID, + RISCV_IOMMU_HPMEVENT_INVALID); + riscv_iommu_reg_set64(s, evt_reg, val); + } + + update_event_map(s, val, ctr_idx); +} diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h new file mode 100644 index 00000000000..5fc4ef2e8bd --- /dev/null +++ b/hw/riscv/riscv-iommu-hpm.h @@ -0,0 +1,33 @@ +/* + * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_RISCV_IOMMU_HPM_H +#define HW_RISCV_IOMMU_HPM_H + +#include "qom/object.h" +#include "hw/riscv/riscv-iommu.h" + +uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s); +void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, + unsigned event_id); +void riscv_iommu_hpm_timer_cb(void *priv); +void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh); +void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s); +void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg); + +#endif diff --git a/hw/riscv/riscv-iommu-pci.c b/hw/riscv/riscv-iommu-pci.c new file mode 100644 index 00000000000..cdb4a7a8f03 --- /dev/null +++ b/hw/riscv/riscv-iommu-pci.c @@ -0,0 +1,217 @@ +/* + * QEMU emulation of an RISC-V IOMMU + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "exec/target_page.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "hw/riscv/riscv_hart.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/host-utils.h" +#include "qom/object.h" + +#include "cpu_bits.h" +#include "riscv-iommu.h" +#include "riscv-iommu-bits.h" +#include "trace.h" + +/* RISC-V IOMMU PCI Device Emulation */ +#define RISCV_PCI_CLASS_SYSTEM_IOMMU 0x0806 + +/* + * 4 MSIx vectors for ICVEC, one for MRIF. The spec mentions in + * the "Placement and data flow" section that: + * + * "The interfaces related to recording an incoming MSI in a memory-resident + * interrupt file (MRIF) are implementation-specific. The partitioning of + * responsibility between the IOMMU and the IO bridge for recording the + * incoming MSI in an MRIF and generating the associated notice MSI are + * implementation-specific." + * + * We're making a design decision to create the MSIx for MRIF in the + * IOMMU MSIx emulation. + */ +#define RISCV_IOMMU_PCI_MSIX_VECTORS 5 + +/* + * 4 vectors that can be used by civ, fiv, pmiv and piv. Number of + * vectors is represented by 2^N, where N = number of writable bits + * in each cause. For 4 vectors we'll write 0b11 (3) in each reg. + */ +#define RISCV_IOMMU_PCI_ICVEC_VECTORS 0x3333 + +typedef struct RISCVIOMMUStatePci { + PCIDevice pci; /* Parent PCIe device state */ + uint16_t vendor_id; + uint16_t device_id; + uint8_t revision; + MemoryRegion bar0; /* PCI BAR (including MSI-x config) */ + RISCVIOMMUState iommu; /* common IOMMU state */ +} RISCVIOMMUStatePci; + +/* interrupt delivery callback */ +static void riscv_iommu_pci_notify(RISCVIOMMUState *iommu, unsigned vector) +{ + RISCVIOMMUStatePci *s = container_of(iommu, RISCVIOMMUStatePci, iommu); + + if (msix_enabled(&(s->pci))) { + msix_notify(&(s->pci), vector); + } +} + +static void riscv_iommu_pci_realize(PCIDevice *dev, Error **errp) +{ + RISCVIOMMUStatePci *s = DO_UPCAST(RISCVIOMMUStatePci, pci, dev); + RISCVIOMMUState *iommu = &s->iommu; + uint8_t *pci_conf = dev->config; + Error *err = NULL; + + pci_set_word(pci_conf + PCI_VENDOR_ID, s->vendor_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, s->vendor_id); + pci_set_word(pci_conf + PCI_DEVICE_ID, s->device_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, s->device_id); + pci_set_byte(pci_conf + PCI_REVISION_ID, s->revision); + + /* Set device id for trace / debug */ + DEVICE(iommu)->id = g_strdup_printf("%02x:%02x.%01x", + pci_dev_bus_num(dev), PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); + qdev_realize(DEVICE(iommu), NULL, errp); + + memory_region_init(&s->bar0, OBJECT(s), "riscv-iommu-bar0", + QEMU_ALIGN_UP(memory_region_size(&iommu->regs_mr), TARGET_PAGE_SIZE)); + memory_region_add_subregion(&s->bar0, 0, &iommu->regs_mr); + + pcie_endpoint_cap_init(dev, 0); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &s->bar0); + + int ret = msix_init(dev, RISCV_IOMMU_PCI_MSIX_VECTORS, + &s->bar0, 0, RISCV_IOMMU_REG_MSI_CONFIG, + &s->bar0, 0, RISCV_IOMMU_REG_MSI_CONFIG + 256, 0, &err); + + if (ret == -ENOTSUP) { + /* + * MSI-x is not supported by the platform. + * Driver should use timer/polling based notification handlers. + */ + warn_report_err(err); + } else if (ret < 0) { + error_propagate(errp, err); + return; + } else { + /* Mark all ICVEC MSIx vectors as used */ + for (int i = 0; i < RISCV_IOMMU_PCI_MSIX_VECTORS; i++) { + msix_vector_use(dev, i); + } + + iommu->notify = riscv_iommu_pci_notify; + } + + PCIBus *bus = pci_device_root_bus(dev); + if (!bus) { + error_setg(errp, "can't find PCIe root port for %02x:%02x.%x", + pci_bus_num(pci_get_bus(dev)), PCI_SLOT(dev->devfn), + PCI_FUNC(dev->devfn)); + return; + } + + riscv_iommu_pci_setup_iommu(iommu, bus, errp); +} + +static void riscv_iommu_pci_exit(PCIDevice *pci_dev) +{ + pci_setup_iommu(pci_device_root_bus(pci_dev), NULL, NULL); +} + +static const VMStateDescription riscv_iommu_vmstate = { + .name = "riscv-iommu", + .unmigratable = 1 +}; + +static void riscv_iommu_pci_init(Object *obj) +{ + RISCVIOMMUStatePci *s = RISCV_IOMMU_PCI(obj); + RISCVIOMMUState *iommu = &s->iommu; + + object_initialize_child(obj, "iommu", iommu, TYPE_RISCV_IOMMU); + qdev_alias_all_properties(DEVICE(iommu), obj); + + iommu->icvec_avail_vectors = RISCV_IOMMU_PCI_ICVEC_VECTORS; + riscv_iommu_set_cap_igs(iommu, RISCV_IOMMU_CAP_IGS_MSI); +} + +static const Property riscv_iommu_pci_properties[] = { + DEFINE_PROP_UINT16("vendor-id", RISCVIOMMUStatePci, vendor_id, + PCI_VENDOR_ID_REDHAT), + DEFINE_PROP_UINT16("device-id", RISCVIOMMUStatePci, device_id, + PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), + DEFINE_PROP_UINT8("revision", RISCVIOMMUStatePci, revision, 0x01), +}; + +static void riscv_iommu_pci_reset_hold(Object *obj, ResetType type) +{ + RISCVIOMMUStatePci *pci = RISCV_IOMMU_PCI(obj); + RISCVIOMMUState *iommu = &pci->iommu; + + riscv_iommu_reset(iommu); + + trace_riscv_iommu_pci_reset_hold(type); +} + +static void riscv_iommu_pci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = riscv_iommu_pci_reset_hold; + + k->realize = riscv_iommu_pci_realize; + k->exit = riscv_iommu_pci_exit; + k->class_id = RISCV_PCI_CLASS_SYSTEM_IOMMU; + dc->desc = "RISCV-IOMMU DMA Remapping device"; + dc->vmsd = &riscv_iommu_vmstate; + dc->hotpluggable = false; + dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, riscv_iommu_pci_properties); +} + +static const TypeInfo riscv_iommu_pci = { + .name = TYPE_RISCV_IOMMU_PCI, + .parent = TYPE_PCI_DEVICE, + .class_init = riscv_iommu_pci_class_init, + .instance_init = riscv_iommu_pci_init, + .instance_size = sizeof(RISCVIOMMUStatePci), + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { }, + }, +}; + +static void riscv_iommu_register_pci_types(void) +{ + type_register_static(&riscv_iommu_pci); +} + +type_init(riscv_iommu_register_pci_types); diff --git a/hw/riscv/riscv-iommu-sys.c b/hw/riscv/riscv-iommu-sys.c new file mode 100644 index 00000000000..e34d00aef64 --- /dev/null +++ b/hw/riscv/riscv-iommu-sys.c @@ -0,0 +1,248 @@ +/* + * QEMU emulation of an RISC-V IOMMU Platform Device + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "trace.h" + +#include "riscv-iommu.h" + +#define RISCV_IOMMU_SYSDEV_ICVEC_VECTORS 0x3333 + +#define RISCV_IOMMU_PCI_MSIX_VECTORS 5 + +/* RISC-V IOMMU System Platform Device Emulation */ + +struct RISCVIOMMUStateSys { + SysBusDevice parent; + uint64_t addr; + uint32_t base_irq; + DeviceState *irqchip; + RISCVIOMMUState iommu; + + /* Wired int support */ + qemu_irq irqs[RISCV_IOMMU_INTR_COUNT]; + + /* Memory Regions for MSIX table and pending bit entries. */ + MemoryRegion msix_table_mmio; + MemoryRegion msix_pba_mmio; + uint8_t *msix_table; + uint8_t *msix_pba; +}; + +static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVIOMMUStateSys *s = opaque; + + g_assert(addr + size <= RISCV_IOMMU_PCI_MSIX_VECTORS * PCI_MSIX_ENTRY_SIZE); + return pci_get_long(s->msix_table + addr); +} + +static void msix_table_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + RISCVIOMMUStateSys *s = opaque; + + g_assert(addr + size <= RISCV_IOMMU_PCI_MSIX_VECTORS * PCI_MSIX_ENTRY_SIZE); + pci_set_long(s->msix_table + addr, val); +} + +static const MemoryRegionOps msix_table_mmio_ops = { + .read = msix_table_mmio_read, + .write = msix_table_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .max_access_size = 4, + }, +}; + +static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVIOMMUStateSys *s = opaque; + + return pci_get_long(s->msix_pba + addr); +} + +static void msix_pba_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static const MemoryRegionOps msix_pba_mmio_ops = { + .read = msix_pba_mmio_read, + .write = msix_pba_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .max_access_size = 4, + }, +}; + +static void riscv_iommu_sysdev_init_msi(RISCVIOMMUStateSys *s, + uint32_t n_vectors) +{ + RISCVIOMMUState *iommu = &s->iommu; + uint32_t table_size = n_vectors * PCI_MSIX_ENTRY_SIZE; + uint32_t table_offset = RISCV_IOMMU_REG_MSI_CONFIG; + uint32_t pba_size = QEMU_ALIGN_UP(n_vectors, 64) / 8; + uint32_t pba_offset = RISCV_IOMMU_REG_MSI_CONFIG + 256; + + s->msix_table = g_malloc0(table_size); + s->msix_pba = g_malloc0(pba_size); + + memory_region_init_io(&s->msix_table_mmio, OBJECT(s), &msix_table_mmio_ops, + s, "msix-table", table_size); + memory_region_add_subregion(&iommu->regs_mr, table_offset, + &s->msix_table_mmio); + + memory_region_init_io(&s->msix_pba_mmio, OBJECT(s), &msix_pba_mmio_ops, s, + "msix-pba", pba_size); + memory_region_add_subregion(&iommu->regs_mr, pba_offset, + &s->msix_pba_mmio); +} + +static void riscv_iommu_sysdev_send_MSI(RISCVIOMMUStateSys *s, + uint32_t vector) +{ + uint8_t *table_entry = s->msix_table + vector * PCI_MSIX_ENTRY_SIZE; + uint64_t msi_addr = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + uint32_t msi_data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA); + MemTxResult result; + + address_space_stl_le(&address_space_memory, msi_addr, + msi_data, MEMTXATTRS_UNSPECIFIED, &result); + trace_riscv_iommu_sys_msi_sent(vector, msi_addr, msi_data, result); +} + +static void riscv_iommu_sysdev_notify(RISCVIOMMUState *iommu, + unsigned vector) +{ + RISCVIOMMUStateSys *s = container_of(iommu, RISCVIOMMUStateSys, iommu); + uint32_t fctl = riscv_iommu_reg_get32(iommu, RISCV_IOMMU_REG_FCTL); + + if (fctl & RISCV_IOMMU_FCTL_WSI) { + qemu_irq_pulse(s->irqs[vector]); + trace_riscv_iommu_sys_irq_sent(vector); + return; + } + + riscv_iommu_sysdev_send_MSI(s, vector); +} + +static void riscv_iommu_sys_realize(DeviceState *dev, Error **errp) +{ + RISCVIOMMUStateSys *s = RISCV_IOMMU_SYS(dev); + SysBusDevice *sysdev = SYS_BUS_DEVICE(s); + PCIBus *pci_bus; + qemu_irq irq; + + qdev_realize(DEVICE(&s->iommu), NULL, errp); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iommu.regs_mr); + if (s->addr) { + sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, s->addr); + } + + pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); + if (pci_bus) { + riscv_iommu_pci_setup_iommu(&s->iommu, pci_bus, errp); + } + + s->iommu.notify = riscv_iommu_sysdev_notify; + + /* 4 IRQs are defined starting from s->base_irq */ + for (int i = 0; i < RISCV_IOMMU_INTR_COUNT; i++) { + sysbus_init_irq(sysdev, &s->irqs[i]); + irq = qdev_get_gpio_in(s->irqchip, s->base_irq + i); + sysbus_connect_irq(sysdev, i, irq); + } + + riscv_iommu_sysdev_init_msi(s, RISCV_IOMMU_PCI_MSIX_VECTORS); +} + +static void riscv_iommu_sys_init(Object *obj) +{ + RISCVIOMMUStateSys *s = RISCV_IOMMU_SYS(obj); + RISCVIOMMUState *iommu = &s->iommu; + + object_initialize_child(obj, "iommu", iommu, TYPE_RISCV_IOMMU); + qdev_alias_all_properties(DEVICE(iommu), obj); + + iommu->icvec_avail_vectors = RISCV_IOMMU_SYSDEV_ICVEC_VECTORS; + riscv_iommu_set_cap_igs(iommu, RISCV_IOMMU_CAP_IGS_BOTH); +} + +static const Property riscv_iommu_sys_properties[] = { + DEFINE_PROP_UINT64("addr", RISCVIOMMUStateSys, addr, 0), + DEFINE_PROP_UINT32("base-irq", RISCVIOMMUStateSys, base_irq, 0), + DEFINE_PROP_LINK("irqchip", RISCVIOMMUStateSys, irqchip, + TYPE_DEVICE, DeviceState *), +}; + +static void riscv_iommu_sys_reset_hold(Object *obj, ResetType type) +{ + RISCVIOMMUStateSys *sys = RISCV_IOMMU_SYS(obj); + RISCVIOMMUState *iommu = &sys->iommu; + + riscv_iommu_reset(iommu); + + trace_riscv_iommu_sys_reset_hold(type); +} + +static void riscv_iommu_sys_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = riscv_iommu_sys_reset_hold; + + dc->realize = riscv_iommu_sys_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, riscv_iommu_sys_properties); +} + +static const TypeInfo riscv_iommu_sys = { + .name = TYPE_RISCV_IOMMU_SYS, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = riscv_iommu_sys_class_init, + .instance_init = riscv_iommu_sys_init, + .instance_size = sizeof(RISCVIOMMUStateSys), +}; + +static void riscv_iommu_register_sys(void) +{ + type_register_static(&riscv_iommu_sys); +} + +type_init(riscv_iommu_register_sys) diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c new file mode 100644 index 00000000000..96a7fbdefcf --- /dev/null +++ b/hw/riscv/riscv-iommu.c @@ -0,0 +1,2676 @@ +/* + * QEMU emulation of an RISC-V IOMMU + * + * Copyright (C) 2021-2023, Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qom/object.h" +#include "exec/target_page.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_device.h" +#include "hw/qdev-properties.h" +#include "hw/riscv/riscv_hart.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/timer.h" + +#include "cpu_bits.h" +#include "riscv-iommu.h" +#include "riscv-iommu-bits.h" +#include "riscv-iommu-hpm.h" +#include "trace.h" + +#define LIMIT_CACHE_CTX (1U << 7) +#define LIMIT_CACHE_IOT (1U << 20) + +/* Physical page number coversions */ +#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS) +#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS) + +typedef struct RISCVIOMMUEntry RISCVIOMMUEntry; + +/* Device assigned I/O address space */ +struct RISCVIOMMUSpace { + IOMMUMemoryRegion iova_mr; /* IOVA memory region for attached device */ + AddressSpace iova_as; /* IOVA address space for attached device */ + RISCVIOMMUState *iommu; /* Managing IOMMU device state */ + uint32_t devid; /* Requester identifier, AKA device_id */ + bool notifier; /* IOMMU unmap notifier enabled */ + QLIST_ENTRY(RISCVIOMMUSpace) list; +}; + +typedef enum RISCVIOMMUTransTag { + RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */ + RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */ + RISCV_IOMMU_TRANS_TAG_VG, /* G-stage only */ + RISCV_IOMMU_TRANS_TAG_VN, /* Nested translation */ +} RISCVIOMMUTransTag; + +/* Address translation cache entry */ +struct RISCVIOMMUEntry { + RISCVIOMMUTransTag tag; /* Translation Tag */ + uint64_t iova:44; /* IOVA Page Number */ + uint64_t pscid:20; /* Process Soft-Context identifier */ + uint64_t phys:44; /* Physical Page Number */ + uint64_t gscid:16; /* Guest Soft-Context identifier */ + uint64_t perm:2; /* IOMMU_RW flags */ +}; + +/* IOMMU index for transactions without process_id specified. */ +#define RISCV_IOMMU_NOPROCID 0 + +static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type) +{ + switch (vec_type) { + case RISCV_IOMMU_INTR_CQ: + return icvec & RISCV_IOMMU_ICVEC_CIV; + case RISCV_IOMMU_INTR_FQ: + return (icvec & RISCV_IOMMU_ICVEC_FIV) >> 4; + case RISCV_IOMMU_INTR_PM: + return (icvec & RISCV_IOMMU_ICVEC_PMIV) >> 8; + case RISCV_IOMMU_INTR_PQ: + return (icvec & RISCV_IOMMU_ICVEC_PIV) >> 12; + default: + g_assert_not_reached(); + } +} + +void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type) +{ + uint32_t ipsr, icvec, vector; + + if (!s->notify) { + return; + } + + icvec = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_ICVEC); + ipsr = riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, (1 << vec_type), 0); + + if (!(ipsr & (1 << vec_type))) { + vector = riscv_iommu_get_icvec_vector(icvec, vec_type); + s->notify(s, vector); + trace_riscv_iommu_notify_int_vector(vec_type, vector); + } +} + +static void riscv_iommu_fault(RISCVIOMMUState *s, + struct riscv_iommu_fq_record *ev) +{ + uint32_t ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR); + uint32_t head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQH) & s->fq_mask; + uint32_t tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQT) & s->fq_mask; + uint32_t next = (tail + 1) & s->fq_mask; + uint32_t devid = get_field(ev->hdr, RISCV_IOMMU_FQ_HDR_DID); + + trace_riscv_iommu_flt(s->parent_obj.id, PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), ev->hdr, ev->iotval); + + if (!(ctrl & RISCV_IOMMU_FQCSR_FQON) || + !!(ctrl & (RISCV_IOMMU_FQCSR_FQOF | RISCV_IOMMU_FQCSR_FQMF))) { + return; + } + + if (head == next) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, + RISCV_IOMMU_FQCSR_FQOF, 0); + } else { + dma_addr_t addr = s->fq_addr + tail * sizeof(*ev); + if (dma_memory_write(s->target_as, addr, ev, sizeof(*ev), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, + RISCV_IOMMU_FQCSR_FQMF, 0); + } else { + riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_FQT, next); + } + } + + if (ctrl & RISCV_IOMMU_FQCSR_FIE) { + riscv_iommu_notify(s, RISCV_IOMMU_INTR_FQ); + } +} + +static void riscv_iommu_pri(RISCVIOMMUState *s, + struct riscv_iommu_pq_record *pr) +{ + uint32_t ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR); + uint32_t head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQH) & s->pq_mask; + uint32_t tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQT) & s->pq_mask; + uint32_t next = (tail + 1) & s->pq_mask; + uint32_t devid = get_field(pr->hdr, RISCV_IOMMU_PREQ_HDR_DID); + + trace_riscv_iommu_pri(s->parent_obj.id, PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), pr->payload); + + if (!(ctrl & RISCV_IOMMU_PQCSR_PQON) || + !!(ctrl & (RISCV_IOMMU_PQCSR_PQOF | RISCV_IOMMU_PQCSR_PQMF))) { + return; + } + + if (head == next) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, + RISCV_IOMMU_PQCSR_PQOF, 0); + } else { + dma_addr_t addr = s->pq_addr + tail * sizeof(*pr); + if (dma_memory_write(s->target_as, addr, pr, sizeof(*pr), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, + RISCV_IOMMU_PQCSR_PQMF, 0); + } else { + riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_PQT, next); + } + } + + if (ctrl & RISCV_IOMMU_PQCSR_PIE) { + riscv_iommu_notify(s, RISCV_IOMMU_INTR_PQ); + } +} + +/* + * Discards all bits from 'val' whose matching bits in the same + * positions in the mask 'ext' are zeros, and packs the remaining + * bits from 'val' contiguously at the least-significant end of the + * result, keeping the same bit order as 'val' and filling any + * other bits at the most-significant end of the result with zeros. + * + * For example, for the following 'val' and 'ext', the return 'ret' + * will be: + * + * val = a b c d e f g h + * ext = 1 0 1 0 0 1 1 0 + * ret = 0 0 0 0 a c f g + * + * This function, taken from the riscv-iommu 1.0 spec, section 2.3.3 + * "Process to translate addresses of MSIs", is similar to bit manip + * function PEXT (Parallel bits extract) from x86. + */ +static uint64_t riscv_iommu_pext_u64(uint64_t val, uint64_t ext) +{ + uint64_t ret = 0; + uint64_t rot = 1; + + while (ext) { + if (ext & 1) { + if (val & 1) { + ret |= rot; + } + rot <<= 1; + } + val >>= 1; + ext >>= 1; + } + + return ret; +} + +/* Check if GPA matches MSI/MRIF pattern. */ +static bool riscv_iommu_msi_check(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, + dma_addr_t gpa) +{ + if (!s->enable_msi) { + return false; + } + + if (get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_MODE) != + RISCV_IOMMU_DC_MSIPTP_MODE_FLAT) { + return false; /* Invalid MSI/MRIF mode */ + } + + if ((PPN_DOWN(gpa) ^ ctx->msi_addr_pattern) & ~ctx->msi_addr_mask) { + return false; /* GPA not in MSI range defined by AIA IMSIC rules. */ + } + + return true; +} + +/* + * RISCV IOMMU Address Translation Lookup - Page Table Walk + * + * Note: Code is based on get_physical_address() from target/riscv/cpu_helper.c + * Both implementation can be merged into single helper function in future. + * Keeping them separate for now, as error reporting and flow specifics are + * sufficiently different for separate implementation. + * + * @s : IOMMU Device State + * @ctx : Translation context for device id and process address space id. + * @iotlb : translation data: physical address and access mode. + * @return : success or fault cause code. + */ +static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, + IOMMUTLBEntry *iotlb) +{ + dma_addr_t addr, base; + uint64_t satp, gatp, pte; + bool en_s, en_g; + struct { + unsigned char step; + unsigned char levels; + unsigned char ptidxbits; + unsigned char ptesize; + } sc[2]; + /* Translation stage phase */ + enum { + S_STAGE = 0, + G_STAGE = 1, + } pass; + MemTxResult ret; + + satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD); + gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD); + + en_s = satp != RISCV_IOMMU_DC_FSC_MODE_BARE; + en_g = gatp != RISCV_IOMMU_DC_IOHGATP_MODE_BARE; + + /* + * Early check for MSI address match when IOVA == GPA. + * Note that the (!en_s) condition means that the MSI + * page table may only be used when guest pages are + * mapped using the g-stage page table, whether single- + * or two-stage paging is enabled. It's unavoidable though, + * because the spec mandates that we do a first-stage + * translation before we check the MSI page table, which + * means we can't do an early MSI check unless we have + * strictly !en_s. + */ + if (!en_s && (iotlb->perm & IOMMU_WO) && + riscv_iommu_msi_check(s, ctx, iotlb->iova)) { + iotlb->target_as = &s->trap_as; + iotlb->translated_addr = iotlb->iova; + iotlb->addr_mask = ~TARGET_PAGE_MASK; + return 0; + } + + /* Exit early for pass-through mode. */ + if (!(en_s || en_g)) { + iotlb->translated_addr = iotlb->iova; + iotlb->addr_mask = ~TARGET_PAGE_MASK; + /* Allow R/W in pass-through mode */ + iotlb->perm = IOMMU_RW; + return 0; + } + + /* S/G translation parameters. */ + for (pass = 0; pass < 2; pass++) { + uint32_t sv_mode; + + sc[pass].step = 0; + if (pass ? (s->fctl & RISCV_IOMMU_FCTL_GXL) : + (ctx->tc & RISCV_IOMMU_DC_TC_SXL)) { + /* 32bit mode for GXL/SXL == 1 */ + switch (pass ? gatp : satp) { + case RISCV_IOMMU_DC_IOHGATP_MODE_BARE: + sc[pass].levels = 0; + sc[pass].ptidxbits = 0; + sc[pass].ptesize = 0; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4: + sv_mode = pass ? RISCV_IOMMU_CAP_SV32X4 : RISCV_IOMMU_CAP_SV32; + if (!(s->cap & sv_mode)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + sc[pass].levels = 2; + sc[pass].ptidxbits = 10; + sc[pass].ptesize = 4; + break; + default: + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + } else { + /* 64bit mode for GXL/SXL == 0 */ + switch (pass ? gatp : satp) { + case RISCV_IOMMU_DC_IOHGATP_MODE_BARE: + sc[pass].levels = 0; + sc[pass].ptidxbits = 0; + sc[pass].ptesize = 0; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4: + sv_mode = pass ? RISCV_IOMMU_CAP_SV39X4 : RISCV_IOMMU_CAP_SV39; + if (!(s->cap & sv_mode)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + sc[pass].levels = 3; + sc[pass].ptidxbits = 9; + sc[pass].ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4: + sv_mode = pass ? RISCV_IOMMU_CAP_SV48X4 : RISCV_IOMMU_CAP_SV48; + if (!(s->cap & sv_mode)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + sc[pass].levels = 4; + sc[pass].ptidxbits = 9; + sc[pass].ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4: + sv_mode = pass ? RISCV_IOMMU_CAP_SV57X4 : RISCV_IOMMU_CAP_SV57; + if (!(s->cap & sv_mode)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + sc[pass].levels = 5; + sc[pass].ptidxbits = 9; + sc[pass].ptesize = 8; + break; + default: + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + } + }; + + /* S/G stages translation tables root pointers */ + gatp = PPN_PHYS(get_field(ctx->gatp, RISCV_IOMMU_ATP_PPN_FIELD)); + satp = PPN_PHYS(get_field(ctx->satp, RISCV_IOMMU_ATP_PPN_FIELD)); + addr = (en_s && en_g) ? satp : iotlb->iova; + base = en_g ? gatp : satp; + pass = en_g ? G_STAGE : S_STAGE; + + do { + const unsigned widened = (pass && !sc[pass].step) ? 2 : 0; + const unsigned va_bits = widened + sc[pass].ptidxbits; + const unsigned va_skip = TARGET_PAGE_BITS + sc[pass].ptidxbits * + (sc[pass].levels - 1 - sc[pass].step); + const unsigned idx = (addr >> va_skip) & ((1 << va_bits) - 1); + const dma_addr_t pte_addr = base + idx * sc[pass].ptesize; + const bool ade = + ctx->tc & (pass ? RISCV_IOMMU_DC_TC_GADE : RISCV_IOMMU_DC_TC_SADE); + + /* Address range check before first level lookup */ + if (!sc[pass].step) { + const uint64_t va_len = va_skip + va_bits; + const uint64_t va_mask = (1ULL << va_len) - 1; + + if (pass == S_STAGE && va_len > 32) { + target_ulong mask, masked_msbs; + + mask = (1L << (TARGET_LONG_BITS - (va_len - 1))) - 1; + masked_msbs = (addr >> (va_len - 1)) & mask; + + if (masked_msbs != 0 && masked_msbs != mask) { + return (iotlb->perm & IOMMU_WO) ? + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S : + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S; + } + } else { + if ((addr & va_mask) != addr) { + return (iotlb->perm & IOMMU_WO) ? + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS : + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS; + } + } + } + + + if (pass == S_STAGE) { + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS); + } else { + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS); + } + + /* Read page table entry */ + if (sc[pass].ptesize == 4) { + uint32_t pte32 = 0; + ret = ldl_le_dma(s->target_as, pte_addr, &pte32, + MEMTXATTRS_UNSPECIFIED); + pte = pte32; + } else { + ret = ldq_le_dma(s->target_as, pte_addr, &pte, + MEMTXATTRS_UNSPECIFIED); + } + if (ret != MEMTX_OK) { + return (iotlb->perm & IOMMU_WO) ? RISCV_IOMMU_FQ_CAUSE_WR_FAULT + : RISCV_IOMMU_FQ_CAUSE_RD_FAULT; + } + + sc[pass].step++; + hwaddr ppn = pte >> PTE_PPN_SHIFT; + + if (!(pte & PTE_V)) { + break; /* Invalid PTE */ + } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { + base = PPN_PHYS(ppn); /* Inner PTE, continue walking */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { + break; /* Reserved leaf PTE flags: PTE_W */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { + break; /* Reserved leaf PTE flags: PTE_W + PTE_X */ + } else if (ppn & ((1ULL << (va_skip - TARGET_PAGE_BITS)) - 1)) { + break; /* Misaligned PPN */ + } else if ((iotlb->perm & IOMMU_RO) && !(pte & PTE_R)) { + break; /* Read access check failed */ + } else if ((iotlb->perm & IOMMU_WO) && !(pte & PTE_W)) { + break; /* Write access check failed */ + } else if ((iotlb->perm & IOMMU_RO) && !ade && !(pte & PTE_A)) { + break; /* Access bit not set */ + } else if ((iotlb->perm & IOMMU_WO) && !ade && !(pte & PTE_D)) { + break; /* Dirty bit not set */ + } else { + /* Leaf PTE, translation completed. */ + sc[pass].step = sc[pass].levels; + base = PPN_PHYS(ppn) | (addr & ((1ULL << va_skip) - 1)); + /* Update address mask based on smallest translation granularity */ + iotlb->addr_mask &= (1ULL << va_skip) - 1; + /* Continue with S-Stage translation? */ + if (pass && sc[0].step != sc[0].levels) { + pass = S_STAGE; + addr = iotlb->iova; + continue; + } + /* Translation phase completed (GPA or SPA) */ + iotlb->translated_addr = base; + iotlb->perm = (pte & PTE_W) ? ((pte & PTE_R) ? IOMMU_RW : IOMMU_WO) + : IOMMU_RO; + + /* Check MSI GPA address match */ + if (pass == S_STAGE && (iotlb->perm & IOMMU_WO) && + riscv_iommu_msi_check(s, ctx, base)) { + /* Trap MSI writes and return GPA address. */ + iotlb->target_as = &s->trap_as; + iotlb->addr_mask = ~TARGET_PAGE_MASK; + return 0; + } + + /* Continue with G-Stage translation? */ + if (!pass && en_g) { + pass = G_STAGE; + addr = base; + base = gatp; + sc[pass].step = 0; + continue; + } + + return 0; + } + + if (sc[pass].step == sc[pass].levels) { + break; /* Can't find leaf PTE */ + } + + /* Continue with G-Stage translation? */ + if (!pass && en_g) { + pass = G_STAGE; + addr = base; + base = gatp; + sc[pass].step = 0; + } + } while (1); + + return (iotlb->perm & IOMMU_WO) ? + (pass ? RISCV_IOMMU_FQ_CAUSE_WR_FAULT_VS : + RISCV_IOMMU_FQ_CAUSE_WR_FAULT_S) : + (pass ? RISCV_IOMMU_FQ_CAUSE_RD_FAULT_VS : + RISCV_IOMMU_FQ_CAUSE_RD_FAULT_S); +} + +static void riscv_iommu_report_fault(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx, + uint32_t fault_type, uint32_t cause, + bool pv, + uint64_t iotval, uint64_t iotval2) +{ + struct riscv_iommu_fq_record ev = { 0 }; + + if (ctx->tc & RISCV_IOMMU_DC_TC_DTF) { + switch (cause) { + case RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED: + case RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT: + case RISCV_IOMMU_FQ_CAUSE_DDT_INVALID: + case RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED: + case RISCV_IOMMU_FQ_CAUSE_DDT_CORRUPTED: + case RISCV_IOMMU_FQ_CAUSE_INTERNAL_DP_ERROR: + case RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT: + break; + default: + /* DTF prevents reporting a fault for this given cause */ + return; + } + } + + ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_CAUSE, cause); + ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_TTYPE, fault_type); + ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_DID, ctx->devid); + ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_PV, true); + + if (pv) { + ev.hdr = set_field(ev.hdr, RISCV_IOMMU_FQ_HDR_PID, ctx->process_id); + } + + ev.iotval = iotval; + ev.iotval2 = iotval2; + + riscv_iommu_fault(s, &ev); +} + +/* Redirect MSI write for given GPA. */ +static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx, uint64_t gpa, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + MemTxResult res; + dma_addr_t addr; + uint64_t intn; + uint32_t n190; + uint64_t pte[2]; + int fault_type = RISCV_IOMMU_FQ_TTYPE_UADDR_WR; + int cause; + + /* Interrupt File Number */ + intn = riscv_iommu_pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask); + if (intn >= 256) { + /* Interrupt file number out of range */ + res = MEMTX_ACCESS_ERROR; + cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; + goto err; + } + + /* fetch MSI PTE */ + addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN)); + addr = addr | (intn * sizeof(pte)); + res = dma_memory_read(s->target_as, addr, &pte, sizeof(pte), + MEMTXATTRS_UNSPECIFIED); + if (res != MEMTX_OK) { + if (res == MEMTX_DECODE_ERROR) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_PT_CORRUPTED; + } else { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; + } + goto err; + } + + le64_to_cpus(&pte[0]); + le64_to_cpus(&pte[1]); + + if (!(pte[0] & RISCV_IOMMU_MSI_PTE_V) || (pte[0] & RISCV_IOMMU_MSI_PTE_C)) { + /* + * The spec mentions that: "If msipte.C == 1, then further + * processing to interpret the PTE is implementation + * defined.". We'll abort with cause = 262 for this + * case too. + */ + res = MEMTX_ACCESS_ERROR; + cause = RISCV_IOMMU_FQ_CAUSE_MSI_INVALID; + goto err; + } + + switch (get_field(pte[0], RISCV_IOMMU_MSI_PTE_M)) { + case RISCV_IOMMU_MSI_PTE_M_BASIC: + /* MSI Pass-through mode */ + addr = PPN_PHYS(get_field(pte[0], RISCV_IOMMU_MSI_PTE_PPN)); + + trace_riscv_iommu_msi(s->parent_obj.id, PCI_BUS_NUM(ctx->devid), + PCI_SLOT(ctx->devid), PCI_FUNC(ctx->devid), + gpa, addr); + + res = dma_memory_write(s->target_as, addr, &data, size, attrs); + if (res != MEMTX_OK) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT; + goto err; + } + + return MEMTX_OK; + case RISCV_IOMMU_MSI_PTE_M_MRIF: + /* MRIF mode, continue. */ + break; + default: + res = MEMTX_ACCESS_ERROR; + cause = RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED; + goto err; + } + + /* + * Report an error for interrupt identities exceeding the maximum allowed + * for an IMSIC interrupt file (2047) or destination address is not 32-bit + * aligned. See IOMMU Specification, Chapter 2.3. MSI page tables. + */ + if ((data > 2047) || (gpa & 3)) { + res = MEMTX_ACCESS_ERROR; + cause = RISCV_IOMMU_FQ_CAUSE_MSI_MISCONFIGURED; + goto err; + } + + /* MSI MRIF mode, non atomic pending bit update */ + + /* MRIF pending bit address */ + addr = get_field(pte[0], RISCV_IOMMU_MSI_PTE_MRIF_ADDR) << 9; + addr = addr | ((data & 0x7c0) >> 3); + + trace_riscv_iommu_msi(s->parent_obj.id, PCI_BUS_NUM(ctx->devid), + PCI_SLOT(ctx->devid), PCI_FUNC(ctx->devid), + gpa, addr); + + /* MRIF pending bit mask */ + data = 1ULL << (data & 0x03f); + res = dma_memory_read(s->target_as, addr, &intn, sizeof(intn), attrs); + if (res != MEMTX_OK) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; + goto err; + } + + intn = intn | data; + res = dma_memory_write(s->target_as, addr, &intn, sizeof(intn), attrs); + if (res != MEMTX_OK) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT; + goto err; + } + + /* Get MRIF enable bits */ + addr = addr + sizeof(intn); + res = dma_memory_read(s->target_as, addr, &intn, sizeof(intn), attrs); + if (res != MEMTX_OK) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; + goto err; + } + + if (!(intn & data)) { + /* notification disabled, MRIF update completed. */ + return MEMTX_OK; + } + + /* Send notification message */ + addr = PPN_PHYS(get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NPPN)); + n190 = get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NID) | + (get_field(pte[1], RISCV_IOMMU_MSI_MRIF_NID_MSB) << 10); + + res = dma_memory_write(s->target_as, addr, &n190, sizeof(n190), attrs); + if (res != MEMTX_OK) { + cause = RISCV_IOMMU_FQ_CAUSE_MSI_WR_FAULT; + goto err; + } + + trace_riscv_iommu_mrif_notification(s->parent_obj.id, n190, addr); + + return MEMTX_OK; + +err: + riscv_iommu_report_fault(s, ctx, fault_type, cause, + !!ctx->process_id, 0, 0); + return res; +} + +/* + * Check device context configuration as described by the + * riscv-iommu spec section "Device-context configuration + * checks". + */ +static bool riscv_iommu_validate_device_ctx(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx) +{ + uint32_t fsc_mode, msi_mode; + uint64_t gatp; + + if (!(s->cap & RISCV_IOMMU_CAP_ATS) && + (ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS || + ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI || + ctx->tc & RISCV_IOMMU_DC_TC_PRPR)) { + return false; + } + + if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS) && + (ctx->tc & RISCV_IOMMU_DC_TC_T2GPA || + ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI)) { + return false; + } + + if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_PRI) && + ctx->tc & RISCV_IOMMU_DC_TC_PRPR) { + return false; + } + + if (!(s->cap & RISCV_IOMMU_CAP_T2GPA) && + ctx->tc & RISCV_IOMMU_DC_TC_T2GPA) { + return false; + } + + if (s->cap & RISCV_IOMMU_CAP_MSI_FLAT) { + msi_mode = get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_MODE); + + if (msi_mode != RISCV_IOMMU_DC_MSIPTP_MODE_OFF && + msi_mode != RISCV_IOMMU_DC_MSIPTP_MODE_FLAT) { + return false; + } + } + + gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD); + if (ctx->tc & RISCV_IOMMU_DC_TC_T2GPA && + gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) { + return false; + } + + fsc_mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE); + + if (ctx->tc & RISCV_IOMMU_DC_TC_PDTV) { + switch (fsc_mode) { + case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8: + if (!(s->cap & RISCV_IOMMU_CAP_PD8)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD17: + if (!(s->cap & RISCV_IOMMU_CAP_PD17)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20: + if (!(s->cap & RISCV_IOMMU_CAP_PD20)) { + return false; + } + break; + } + } else { + /* DC.tc.PDTV is 0 */ + if (ctx->tc & RISCV_IOMMU_DC_TC_DPE) { + return false; + } + + if (ctx->tc & RISCV_IOMMU_DC_TC_SXL) { + if (fsc_mode == RISCV_IOMMU_CAP_SV32 && + !(s->cap & RISCV_IOMMU_CAP_SV32)) { + return false; + } + } else { + switch (fsc_mode) { + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: + if (!(s->cap & RISCV_IOMMU_CAP_SV39)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: + if (!(s->cap & RISCV_IOMMU_CAP_SV48)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: + if (!(s->cap & RISCV_IOMMU_CAP_SV57)) { + return false; + } + break; + } + } + } + + /* + * CAP_END is always zero (only one endianess). FCTL_BE is + * always zero (little-endian accesses). Thus TC_SBE must + * always be LE, i.e. zero. + */ + if (ctx->tc & RISCV_IOMMU_DC_TC_SBE) { + return false; + } + + return true; +} + +/* + * Validate process context (PC) according to section + * "Process-context configuration checks". + */ +static bool riscv_iommu_validate_process_ctx(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx) +{ + uint32_t mode; + + if (get_field(ctx->ta, RISCV_IOMMU_PC_TA_RESERVED)) { + return false; + } + + if (get_field(ctx->satp, RISCV_IOMMU_PC_FSC_RESERVED)) { + return false; + } + + mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE); + switch (mode) { + case RISCV_IOMMU_DC_FSC_MODE_BARE: + /* sv39 and sv32 modes have the same value (8) */ + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: + break; + default: + return false; + } + + if (ctx->tc & RISCV_IOMMU_DC_TC_SXL) { + if (mode == RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV32 && + !(s->cap & RISCV_IOMMU_CAP_SV32)) { + return false; + } + } else { + switch (mode) { + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39: + if (!(s->cap & RISCV_IOMMU_CAP_SV39)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV48: + if (!(s->cap & RISCV_IOMMU_CAP_SV48)) { + return false; + } + break; + case RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57: + if (!(s->cap & RISCV_IOMMU_CAP_SV57)) { + return false; + } + break; + } + } + + return true; +} + +/* + * RISC-V IOMMU Device Context Loopkup - Device Directory Tree Walk + * + * @s : IOMMU Device State + * @ctx : Device Translation Context with devid and process_id set. + * @return : success or fault code. + */ +static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) +{ + const uint64_t ddtp = s->ddtp; + unsigned mode = get_field(ddtp, RISCV_IOMMU_DDTP_MODE); + dma_addr_t addr = PPN_PHYS(get_field(ddtp, RISCV_IOMMU_DDTP_PPN)); + struct riscv_iommu_dc dc; + /* Device Context format: 0: extended (64 bytes) | 1: base (32 bytes) */ + const int dc_fmt = !s->enable_msi; + const size_t dc_len = sizeof(dc) >> dc_fmt; + int depth; + uint64_t de; + + switch (mode) { + case RISCV_IOMMU_DDTP_MODE_OFF: + return RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED; + + case RISCV_IOMMU_DDTP_MODE_BARE: + /* mock up pass-through translation context */ + ctx->gatp = set_field(0, RISCV_IOMMU_ATP_MODE_FIELD, + RISCV_IOMMU_DC_IOHGATP_MODE_BARE); + ctx->satp = set_field(0, RISCV_IOMMU_ATP_MODE_FIELD, + RISCV_IOMMU_DC_FSC_MODE_BARE); + + ctx->tc = RISCV_IOMMU_DC_TC_V; + if (s->enable_ats) { + ctx->tc |= RISCV_IOMMU_DC_TC_EN_ATS; + } + + ctx->ta = 0; + ctx->msiptp = 0; + return 0; + + case RISCV_IOMMU_DDTP_MODE_1LVL: + depth = 0; + break; + + case RISCV_IOMMU_DDTP_MODE_2LVL: + depth = 1; + break; + + case RISCV_IOMMU_DDTP_MODE_3LVL: + depth = 2; + break; + + default: + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + + /* + * Check supported device id width (in bits). + * See IOMMU Specification, Chapter 6. Software guidelines. + * - if extended device-context format is used: + * 1LVL: 6, 2LVL: 15, 3LVL: 24 + * - if base device-context format is used: + * 1LVL: 7, 2LVL: 16, 3LVL: 24 + */ + if (ctx->devid >= (1 << (depth * 9 + 6 + (dc_fmt && depth != 2)))) { + return RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED; + } + + /* Device directory tree walk */ + for (; depth-- > 0; ) { + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK); + /* + * Select device id index bits based on device directory tree level + * and device context format. + * See IOMMU Specification, Chapter 2. Data Structures. + * - if extended device-context format is used: + * device index: [23:15][14:6][5:0] + * - if base device-context format is used: + * device index: [23:16][15:7][6:0] + */ + const int split = depth * 9 + 6 + dc_fmt; + addr |= ((ctx->devid >> split) << 3) & ~TARGET_PAGE_MASK; + if (dma_memory_read(s->target_as, addr, &de, sizeof(de), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + return RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT; + } + le64_to_cpus(&de); + if (!(de & RISCV_IOMMU_DDTE_VALID)) { + /* invalid directory entry */ + return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID; + } + if (de & ~(RISCV_IOMMU_DDTE_PPN | RISCV_IOMMU_DDTE_VALID)) { + /* reserved bits set */ + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN)); + } + + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK); + + /* index into device context entry page */ + addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK; + + memset(&dc, 0, sizeof(dc)); + if (dma_memory_read(s->target_as, addr, &dc, dc_len, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + return RISCV_IOMMU_FQ_CAUSE_DDT_LOAD_FAULT; + } + + /* Set translation context. */ + ctx->tc = le64_to_cpu(dc.tc); + ctx->gatp = le64_to_cpu(dc.iohgatp); + ctx->satp = le64_to_cpu(dc.fsc); + ctx->ta = le64_to_cpu(dc.ta); + ctx->msiptp = le64_to_cpu(dc.msiptp); + ctx->msi_addr_mask = le64_to_cpu(dc.msi_addr_mask); + ctx->msi_addr_pattern = le64_to_cpu(dc.msi_addr_pattern); + + if (!(ctx->tc & RISCV_IOMMU_DC_TC_V)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID; + } + + if (!riscv_iommu_validate_device_ctx(s, ctx)) { + return RISCV_IOMMU_FQ_CAUSE_DDT_MISCONFIGURED; + } + + /* FSC field checks */ + mode = get_field(ctx->satp, RISCV_IOMMU_DC_FSC_MODE); + addr = PPN_PHYS(get_field(ctx->satp, RISCV_IOMMU_DC_FSC_PPN)); + + if (!(ctx->tc & RISCV_IOMMU_DC_TC_PDTV)) { + if (ctx->process_id != RISCV_IOMMU_NOPROCID) { + /* PID is disabled */ + return RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED; + } + if (mode > RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV57) { + /* Invalid translation mode */ + return RISCV_IOMMU_FQ_CAUSE_DDT_INVALID; + } + return 0; + } + + if (ctx->process_id == RISCV_IOMMU_NOPROCID) { + if (!(ctx->tc & RISCV_IOMMU_DC_TC_DPE)) { + /* No default process_id enabled, set BARE mode */ + ctx->satp = 0ULL; + return 0; + } else { + /* Use default process_id #0 */ + ctx->process_id = 0; + } + } + + if (mode == RISCV_IOMMU_DC_FSC_MODE_BARE) { + /* No S-Stage translation, done. */ + return 0; + } + + /* FSC.TC.PDTV enabled */ + if (mode > RISCV_IOMMU_DC_FSC_PDTP_MODE_PD20) { + /* Invalid PDTP.MODE */ + return RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED; + } + + for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) { + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK); + + /* + * Select process id index bits based on process directory tree + * level. See IOMMU Specification, 2.2. Process-Directory-Table. + */ + const int split = depth * 9 + 8; + addr |= ((ctx->process_id >> split) << 3) & ~TARGET_PAGE_MASK; + if (dma_memory_read(s->target_as, addr, &de, sizeof(de), + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; + } + le64_to_cpus(&de); + if (!(de & RISCV_IOMMU_PDTE_VALID)) { + return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID; + } + addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PDTE_PPN)); + } + + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK); + + /* Leaf entry in PDT */ + addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK; + if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; + } + + /* Use FSC and TA from process directory entry. */ + ctx->ta = le64_to_cpu(dc.ta); + ctx->satp = le64_to_cpu(dc.fsc); + + if (!(ctx->ta & RISCV_IOMMU_PC_TA_V)) { + return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID; + } + + if (!riscv_iommu_validate_process_ctx(s, ctx)) { + return RISCV_IOMMU_FQ_CAUSE_PDT_MISCONFIGURED; + } + + return 0; +} + +/* Translation Context cache support */ +static gboolean riscv_iommu_ctx_equal(gconstpointer v1, gconstpointer v2) +{ + RISCVIOMMUContext *c1 = (RISCVIOMMUContext *) v1; + RISCVIOMMUContext *c2 = (RISCVIOMMUContext *) v2; + return c1->devid == c2->devid && + c1->process_id == c2->process_id; +} + +static guint riscv_iommu_ctx_hash(gconstpointer v) +{ + RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) v; + /* + * Generate simple hash of (process_id, devid) + * assuming 24-bit wide devid. + */ + return (guint)(ctx->devid) + ((guint)(ctx->process_id) << 24); +} + +static void riscv_iommu_ctx_inval_devid_procid(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value; + RISCVIOMMUContext *arg = (RISCVIOMMUContext *) data; + if (ctx->tc & RISCV_IOMMU_DC_TC_V && + ctx->devid == arg->devid && + ctx->process_id == arg->process_id) { + ctx->tc &= ~RISCV_IOMMU_DC_TC_V; + } +} + +static void riscv_iommu_ctx_inval_devid(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value; + RISCVIOMMUContext *arg = (RISCVIOMMUContext *) data; + if (ctx->tc & RISCV_IOMMU_DC_TC_V && + ctx->devid == arg->devid) { + ctx->tc &= ~RISCV_IOMMU_DC_TC_V; + } +} + +static void riscv_iommu_ctx_inval_all(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUContext *ctx = (RISCVIOMMUContext *) value; + if (ctx->tc & RISCV_IOMMU_DC_TC_V) { + ctx->tc &= ~RISCV_IOMMU_DC_TC_V; + } +} + +static void riscv_iommu_ctx_inval(RISCVIOMMUState *s, GHFunc func, + uint32_t devid, uint32_t process_id) +{ + GHashTable *ctx_cache; + RISCVIOMMUContext key = { + .devid = devid, + .process_id = process_id, + }; + ctx_cache = g_hash_table_ref(s->ctx_cache); + g_hash_table_foreach(ctx_cache, func, &key); + g_hash_table_unref(ctx_cache); +} + +/* Find or allocate translation context for a given {device_id, process_id} */ +static RISCVIOMMUContext *riscv_iommu_ctx(RISCVIOMMUState *s, + unsigned devid, unsigned process_id, + void **ref) +{ + GHashTable *ctx_cache; + RISCVIOMMUContext *ctx; + RISCVIOMMUContext key = { + .devid = devid, + .process_id = process_id, + }; + + ctx_cache = g_hash_table_ref(s->ctx_cache); + ctx = g_hash_table_lookup(ctx_cache, &key); + + if (ctx && (ctx->tc & RISCV_IOMMU_DC_TC_V)) { + *ref = ctx_cache; + return ctx; + } + + ctx = g_new0(RISCVIOMMUContext, 1); + ctx->devid = devid; + ctx->process_id = process_id; + + int fault = riscv_iommu_ctx_fetch(s, ctx); + if (!fault) { + if (g_hash_table_size(ctx_cache) >= LIMIT_CACHE_CTX) { + g_hash_table_unref(ctx_cache); + ctx_cache = g_hash_table_new_full(riscv_iommu_ctx_hash, + riscv_iommu_ctx_equal, + g_free, NULL); + g_hash_table_ref(ctx_cache); + g_hash_table_unref(qatomic_xchg(&s->ctx_cache, ctx_cache)); + } + g_hash_table_add(ctx_cache, ctx); + *ref = ctx_cache; + return ctx; + } + + g_hash_table_unref(ctx_cache); + *ref = NULL; + + riscv_iommu_report_fault(s, ctx, RISCV_IOMMU_FQ_TTYPE_UADDR_RD, + fault, !!process_id, 0, 0); + + g_free(ctx); + return NULL; +} + +static void riscv_iommu_ctx_put(RISCVIOMMUState *s, void *ref) +{ + if (ref) { + g_hash_table_unref((GHashTable *)ref); + } +} + +/* Find or allocate address space for a given device */ +static AddressSpace *riscv_iommu_space(RISCVIOMMUState *s, uint32_t devid) +{ + RISCVIOMMUSpace *as; + + /* FIXME: PCIe bus remapping for attached endpoints. */ + devid |= s->bus << 8; + + QLIST_FOREACH(as, &s->spaces, list) { + if (as->devid == devid) { + break; + } + } + + if (as == NULL) { + char name[64]; + as = g_new0(RISCVIOMMUSpace, 1); + + as->iommu = s; + as->devid = devid; + + snprintf(name, sizeof(name), "riscv-iommu-%04x:%02x.%d-iova", + PCI_BUS_NUM(as->devid), PCI_SLOT(as->devid), PCI_FUNC(as->devid)); + + /* IOVA address space, untranslated addresses */ + memory_region_init_iommu(&as->iova_mr, sizeof(as->iova_mr), + TYPE_RISCV_IOMMU_MEMORY_REGION, + OBJECT(as), "riscv_iommu", UINT64_MAX); + address_space_init(&as->iova_as, MEMORY_REGION(&as->iova_mr), name); + + QLIST_INSERT_HEAD(&s->spaces, as, list); + + trace_riscv_iommu_new(s->parent_obj.id, PCI_BUS_NUM(as->devid), + PCI_SLOT(as->devid), PCI_FUNC(as->devid)); + } + return &as->iova_as; +} + +/* Translation Object cache support */ +static gboolean riscv_iommu_iot_equal(gconstpointer v1, gconstpointer v2) +{ + RISCVIOMMUEntry *t1 = (RISCVIOMMUEntry *) v1; + RISCVIOMMUEntry *t2 = (RISCVIOMMUEntry *) v2; + return t1->gscid == t2->gscid && t1->pscid == t2->pscid && + t1->iova == t2->iova && t1->tag == t2->tag; +} + +static guint riscv_iommu_iot_hash(gconstpointer v) +{ + RISCVIOMMUEntry *t = (RISCVIOMMUEntry *) v; + return (guint)t->iova; +} + +/* GV: 0 AV: 0 PSCV: 0 GVMA: 0 */ +/* GV: 0 AV: 0 GVMA: 1 */ +static +void riscv_iommu_iot_inval_all(gpointer key, gpointer value, gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 0 AV: 0 PSCV: 1 GVMA: 0 */ +static +void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value, gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->pscid == arg->pscid) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 0 AV: 1 PSCV: 0 GVMA: 0 */ +static +void riscv_iommu_iot_inval_iova(gpointer key, gpointer value, gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->iova == arg->iova) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 0 AV: 1 PSCV: 1 GVMA: 0 */ +static void riscv_iommu_iot_inval_pscid_iova(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->pscid == arg->pscid && + iot->iova == arg->iova) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 1 AV: 0 PSCV: 0 GVMA: 0 */ +/* GV: 1 AV: 0 GVMA: 1 */ +static +void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value, gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->gscid == arg->gscid) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 1 AV: 0 PSCV: 1 GVMA: 0 */ +static void riscv_iommu_iot_inval_gscid_pscid(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->gscid == arg->gscid && + iot->pscid == arg->pscid) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 1 AV: 1 PSCV: 0 GVMA: 0 */ +/* GV: 1 AV: 1 GVMA: 1 */ +static void riscv_iommu_iot_inval_gscid_iova(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->gscid == arg->gscid && + iot->iova == arg->iova) { + iot->perm = IOMMU_NONE; + } +} + +/* GV: 1 AV: 1 PSCV: 1 GVMA: 0 */ +static void riscv_iommu_iot_inval_gscid_pscid_iova(gpointer key, gpointer value, + gpointer data) +{ + RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value; + RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data; + if (iot->tag == arg->tag && + iot->gscid == arg->gscid && + iot->pscid == arg->pscid && + iot->iova == arg->iova) { + iot->perm = IOMMU_NONE; + } +} + +/* caller should keep ref-count for iot_cache object */ +static RISCVIOMMUEntry *riscv_iommu_iot_lookup(RISCVIOMMUContext *ctx, + GHashTable *iot_cache, hwaddr iova, RISCVIOMMUTransTag transtag) +{ + RISCVIOMMUEntry key = { + .tag = transtag, + .gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID), + .pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID), + .iova = PPN_DOWN(iova), + }; + return g_hash_table_lookup(iot_cache, &key); +} + +/* caller should keep ref-count for iot_cache object */ +static void riscv_iommu_iot_update(RISCVIOMMUState *s, + GHashTable *iot_cache, RISCVIOMMUEntry *iot) +{ + if (!s->iot_limit) { + return; + } + + if (g_hash_table_size(s->iot_cache) >= s->iot_limit) { + iot_cache = g_hash_table_new_full(riscv_iommu_iot_hash, + riscv_iommu_iot_equal, + g_free, NULL); + g_hash_table_unref(qatomic_xchg(&s->iot_cache, iot_cache)); + } + g_hash_table_add(iot_cache, iot); +} + +static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func, + uint32_t gscid, uint32_t pscid, hwaddr iova, RISCVIOMMUTransTag transtag) +{ + GHashTable *iot_cache; + RISCVIOMMUEntry key = { + .tag = transtag, + .gscid = gscid, + .pscid = pscid, + .iova = PPN_DOWN(iova), + }; + + iot_cache = g_hash_table_ref(s->iot_cache); + g_hash_table_foreach(iot_cache, func, &key); + g_hash_table_unref(iot_cache); +} + +static RISCVIOMMUTransTag riscv_iommu_get_transtag(RISCVIOMMUContext *ctx) +{ + uint64_t satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD); + uint64_t gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD); + + if (satp == RISCV_IOMMU_DC_FSC_MODE_BARE) { + return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ? + RISCV_IOMMU_TRANS_TAG_BY : RISCV_IOMMU_TRANS_TAG_VG; + } else { + return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ? + RISCV_IOMMU_TRANS_TAG_SS : RISCV_IOMMU_TRANS_TAG_VN; + } +} + +static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx, + IOMMUTLBEntry *iotlb, bool enable_cache) +{ + RISCVIOMMUTransTag transtag = riscv_iommu_get_transtag(ctx); + RISCVIOMMUEntry *iot; + IOMMUAccessFlags perm; + bool enable_pid; + bool enable_pri; + GHashTable *iot_cache; + int fault; + + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ); + + iot_cache = g_hash_table_ref(s->iot_cache); + /* + * TC[32] is reserved for custom extensions, used here to temporarily + * enable automatic page-request generation for ATS queries. + */ + enable_pri = (iotlb->perm == IOMMU_NONE) && (ctx->tc & BIT_ULL(32)); + enable_pid = (ctx->tc & RISCV_IOMMU_DC_TC_PDTV); + + /* Check for ATS request. */ + if (iotlb->perm == IOMMU_NONE) { + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ); + /* Check if ATS is disabled. */ + if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) { + enable_pri = false; + fault = RISCV_IOMMU_FQ_CAUSE_TTYPE_BLOCKED; + goto done; + } + } + + iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova, transtag); + perm = iot ? iot->perm : IOMMU_NONE; + if (perm != IOMMU_NONE) { + iotlb->translated_addr = PPN_PHYS(iot->phys); + iotlb->addr_mask = ~TARGET_PAGE_MASK; + iotlb->perm = perm; + fault = 0; + goto done; + } + + riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS); + + /* Translate using device directory / page table information. */ + fault = riscv_iommu_spa_fetch(s, ctx, iotlb); + + if (!fault && iotlb->target_as == &s->trap_as) { + /* Do not cache trapped MSI translations */ + goto done; + } + + /* + * We made an implementation choice to not cache identity-mapped + * translations, as allowed by the specification, to avoid + * translation cache evictions for other devices sharing the + * IOMMU hardware model. + */ + if (!fault && iotlb->translated_addr != iotlb->iova && enable_cache) { + iot = g_new0(RISCVIOMMUEntry, 1); + iot->iova = PPN_DOWN(iotlb->iova); + iot->phys = PPN_DOWN(iotlb->translated_addr); + iot->gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID); + iot->pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID); + iot->perm = iotlb->perm; + iot->tag = transtag; + riscv_iommu_iot_update(s, iot_cache, iot); + } + +done: + g_hash_table_unref(iot_cache); + + if (enable_pri && fault) { + struct riscv_iommu_pq_record pr = {0}; + if (enable_pid) { + pr.hdr = set_field(RISCV_IOMMU_PREQ_HDR_PV, + RISCV_IOMMU_PREQ_HDR_PID, ctx->process_id); + } + pr.hdr = set_field(pr.hdr, RISCV_IOMMU_PREQ_HDR_DID, ctx->devid); + pr.payload = (iotlb->iova & TARGET_PAGE_MASK) | + RISCV_IOMMU_PREQ_PAYLOAD_M; + riscv_iommu_pri(s, &pr); + return fault; + } + + if (fault) { + unsigned ttype = RISCV_IOMMU_FQ_TTYPE_PCIE_ATS_REQ; + + if (iotlb->perm & IOMMU_RW) { + ttype = RISCV_IOMMU_FQ_TTYPE_UADDR_WR; + } else if (iotlb->perm & IOMMU_RO) { + ttype = RISCV_IOMMU_FQ_TTYPE_UADDR_RD; + } + + riscv_iommu_report_fault(s, ctx, ttype, fault, enable_pid, + iotlb->iova, iotlb->translated_addr); + return fault; + } + + return 0; +} + +/* IOMMU Command Interface */ +static MemTxResult riscv_iommu_iofence(RISCVIOMMUState *s, bool notify, + uint64_t addr, uint32_t data) +{ + /* + * ATS processing in this implementation of the IOMMU is synchronous, + * no need to wait for completions here. + */ + if (!notify) { + return MEMTX_OK; + } + + return dma_memory_write(s->target_as, addr, &data, sizeof(data), + MEMTXATTRS_UNSPECIFIED); +} + +static void riscv_iommu_ats(RISCVIOMMUState *s, + struct riscv_iommu_command *cmd, IOMMUNotifierFlag flag, + IOMMUAccessFlags perm, + void (*trace_fn)(const char *id)) +{ + RISCVIOMMUSpace *as = NULL; + IOMMUNotifier *n; + IOMMUTLBEvent event; + uint32_t pid; + uint32_t devid; + const bool pv = cmd->dword0 & RISCV_IOMMU_CMD_ATS_PV; + + if (cmd->dword0 & RISCV_IOMMU_CMD_ATS_DSV) { + /* Use device segment and requester id */ + devid = get_field(cmd->dword0, + RISCV_IOMMU_CMD_ATS_DSEG | RISCV_IOMMU_CMD_ATS_RID); + } else { + devid = get_field(cmd->dword0, RISCV_IOMMU_CMD_ATS_RID); + } + + pid = get_field(cmd->dword0, RISCV_IOMMU_CMD_ATS_PID); + + QLIST_FOREACH(as, &s->spaces, list) { + if (as->devid == devid) { + break; + } + } + + if (!as || !as->notifier) { + return; + } + + event.type = flag; + event.entry.perm = perm; + event.entry.target_as = s->target_as; + + IOMMU_NOTIFIER_FOREACH(n, &as->iova_mr) { + if (!pv || n->iommu_idx == pid) { + event.entry.iova = n->start; + event.entry.addr_mask = n->end - n->start; + trace_fn(as->iova_mr.parent_obj.name); + memory_region_notify_iommu_one(n, &event); + } + } +} + +static void riscv_iommu_ats_inval(RISCVIOMMUState *s, + struct riscv_iommu_command *cmd) +{ + return riscv_iommu_ats(s, cmd, IOMMU_NOTIFIER_DEVIOTLB_UNMAP, IOMMU_NONE, + trace_riscv_iommu_ats_inval); +} + +static void riscv_iommu_ats_prgr(RISCVIOMMUState *s, + struct riscv_iommu_command *cmd) +{ + unsigned resp_code = get_field(cmd->dword1, + RISCV_IOMMU_CMD_ATS_PRGR_RESP_CODE); + + /* Using the access flag to carry response code information */ + IOMMUAccessFlags perm = resp_code ? IOMMU_NONE : IOMMU_RW; + return riscv_iommu_ats(s, cmd, IOMMU_NOTIFIER_MAP, perm, + trace_riscv_iommu_ats_prgr); +} + +static void riscv_iommu_process_ddtp(RISCVIOMMUState *s) +{ + uint64_t old_ddtp = s->ddtp; + uint64_t new_ddtp = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_DDTP); + unsigned new_mode = get_field(new_ddtp, RISCV_IOMMU_DDTP_MODE); + unsigned old_mode = get_field(old_ddtp, RISCV_IOMMU_DDTP_MODE); + bool ok = false; + + /* + * Check for allowed DDTP.MODE transitions: + * {OFF, BARE} -> {OFF, BARE, 1LVL, 2LVL, 3LVL} + * {1LVL, 2LVL, 3LVL} -> {OFF, BARE} + */ + if (new_mode == old_mode || + new_mode == RISCV_IOMMU_DDTP_MODE_OFF || + new_mode == RISCV_IOMMU_DDTP_MODE_BARE) { + ok = true; + } else if (new_mode == RISCV_IOMMU_DDTP_MODE_1LVL || + new_mode == RISCV_IOMMU_DDTP_MODE_2LVL || + new_mode == RISCV_IOMMU_DDTP_MODE_3LVL) { + ok = old_mode == RISCV_IOMMU_DDTP_MODE_OFF || + old_mode == RISCV_IOMMU_DDTP_MODE_BARE; + } + + if (ok) { + /* clear reserved and busy bits, report back sanitized version */ + new_ddtp = set_field(new_ddtp & RISCV_IOMMU_DDTP_PPN, + RISCV_IOMMU_DDTP_MODE, new_mode); + } else { + new_ddtp = old_ddtp; + } + s->ddtp = new_ddtp; + + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_DDTP, new_ddtp); +} + +/* Command function and opcode field. */ +#define RISCV_IOMMU_CMD(func, op) (((func) << 7) | (op)) + +static void riscv_iommu_process_cq_tail(RISCVIOMMUState *s) +{ + struct riscv_iommu_command cmd; + MemTxResult res; + dma_addr_t addr; + uint32_t tail, head, ctrl; + uint64_t cmd_opcode; + GHFunc func; + + ctrl = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR); + tail = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQT) & s->cq_mask; + head = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQH) & s->cq_mask; + + /* Check for pending error or queue processing disabled */ + if (!(ctrl & RISCV_IOMMU_CQCSR_CQON) || + !!(ctrl & (RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_CQMF))) { + return; + } + + while (tail != head) { + addr = s->cq_addr + head * sizeof(cmd); + res = dma_memory_read(s->target_as, addr, &cmd, sizeof(cmd), + MEMTXATTRS_UNSPECIFIED); + + if (res != MEMTX_OK) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, + RISCV_IOMMU_CQCSR_CQMF, 0); + goto fault; + } + + trace_riscv_iommu_cmd(s->parent_obj.id, cmd.dword0, cmd.dword1); + + cmd_opcode = get_field(cmd.dword0, + RISCV_IOMMU_CMD_OPCODE | RISCV_IOMMU_CMD_FUNC); + + switch (cmd_opcode) { + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOFENCE_FUNC_C, + RISCV_IOMMU_CMD_IOFENCE_OPCODE): + res = riscv_iommu_iofence(s, + cmd.dword0 & RISCV_IOMMU_CMD_IOFENCE_AV, cmd.dword1 << 2, + get_field(cmd.dword0, RISCV_IOMMU_CMD_IOFENCE_DATA)); + + if (res != MEMTX_OK) { + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, + RISCV_IOMMU_CQCSR_CQMF, 0); + goto fault; + } + break; + + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA, + RISCV_IOMMU_CMD_IOTINVAL_OPCODE): + { + bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV); + bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV); + bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV); + uint32_t gscid = get_field(cmd.dword0, + RISCV_IOMMU_CMD_IOTINVAL_GSCID); + uint32_t pscid = get_field(cmd.dword0, + RISCV_IOMMU_CMD_IOTINVAL_PSCID); + hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK; + + if (pscv) { + /* illegal command arguments IOTINVAL.GVMA & PSCV == 1 */ + goto cmd_ill; + } + + func = riscv_iommu_iot_inval_all; + + if (gv) { + func = (av) ? riscv_iommu_iot_inval_gscid_iova : + riscv_iommu_iot_inval_gscid; + } + + riscv_iommu_iot_inval( + s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VG); + + riscv_iommu_iot_inval( + s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VN); + break; + } + + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA, + RISCV_IOMMU_CMD_IOTINVAL_OPCODE): + { + bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV); + bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV); + bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV); + uint32_t gscid = get_field(cmd.dword0, + RISCV_IOMMU_CMD_IOTINVAL_GSCID); + uint32_t pscid = get_field(cmd.dword0, + RISCV_IOMMU_CMD_IOTINVAL_PSCID); + hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK; + RISCVIOMMUTransTag transtag; + + if (gv) { + transtag = RISCV_IOMMU_TRANS_TAG_VN; + if (pscv) { + func = (av) ? riscv_iommu_iot_inval_gscid_pscid_iova : + riscv_iommu_iot_inval_gscid_pscid; + } else { + func = (av) ? riscv_iommu_iot_inval_gscid_iova : + riscv_iommu_iot_inval_gscid; + } + } else { + transtag = RISCV_IOMMU_TRANS_TAG_SS; + if (pscv) { + func = (av) ? riscv_iommu_iot_inval_pscid_iova : + riscv_iommu_iot_inval_pscid; + } else { + func = (av) ? riscv_iommu_iot_inval_iova : + riscv_iommu_iot_inval_all; + } + } + + riscv_iommu_iot_inval(s, func, gscid, pscid, iova, transtag); + break; + } + + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT, + RISCV_IOMMU_CMD_IODIR_OPCODE): + if (!(cmd.dword0 & RISCV_IOMMU_CMD_IODIR_DV)) { + /* invalidate all device context cache mappings */ + func = riscv_iommu_ctx_inval_all; + } else { + /* invalidate all device context matching DID */ + func = riscv_iommu_ctx_inval_devid; + } + riscv_iommu_ctx_inval(s, func, + get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_DID), 0); + break; + + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_PDT, + RISCV_IOMMU_CMD_IODIR_OPCODE): + if (!(cmd.dword0 & RISCV_IOMMU_CMD_IODIR_DV)) { + /* illegal command arguments IODIR_PDT & DV == 0 */ + goto cmd_ill; + } else { + func = riscv_iommu_ctx_inval_devid_procid; + } + riscv_iommu_ctx_inval(s, func, + get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_DID), + get_field(cmd.dword0, RISCV_IOMMU_CMD_IODIR_PID)); + break; + + /* ATS commands */ + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_ATS_FUNC_INVAL, + RISCV_IOMMU_CMD_ATS_OPCODE): + if (!s->enable_ats) { + goto cmd_ill; + } + + riscv_iommu_ats_inval(s, &cmd); + break; + + case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_ATS_FUNC_PRGR, + RISCV_IOMMU_CMD_ATS_OPCODE): + if (!s->enable_ats) { + goto cmd_ill; + } + + riscv_iommu_ats_prgr(s, &cmd); + break; + + default: + cmd_ill: + /* Invalid instruction, do not advance instruction index. */ + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, + RISCV_IOMMU_CQCSR_CMD_ILL, 0); + goto fault; + } + + /* Advance and update head pointer after command completes. */ + head = (head + 1) & s->cq_mask; + riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_CQH, head); + } + return; + +fault: + if (ctrl & RISCV_IOMMU_CQCSR_CIE) { + riscv_iommu_notify(s, RISCV_IOMMU_INTR_CQ); + } +} + +static void riscv_iommu_process_cq_control(RISCVIOMMUState *s) +{ + uint64_t base; + uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR); + uint32_t ctrl_clr; + bool enable = !!(ctrl_set & RISCV_IOMMU_CQCSR_CQEN); + bool active = !!(ctrl_set & RISCV_IOMMU_CQCSR_CQON); + + if (enable && !active) { + base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_CQB); + s->cq_mask = (2ULL << get_field(base, RISCV_IOMMU_CQB_LOG2SZ)) - 1; + s->cq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_CQB_PPN)); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQT], ~s->cq_mask); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_CQH], 0); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_CQT], 0); + ctrl_set = RISCV_IOMMU_CQCSR_CQON; + ctrl_clr = RISCV_IOMMU_CQCSR_BUSY | RISCV_IOMMU_CQCSR_CQMF | + RISCV_IOMMU_CQCSR_CMD_ILL | RISCV_IOMMU_CQCSR_CMD_TO | + RISCV_IOMMU_CQCSR_FENCE_W_IP; + } else if (!enable && active) { + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQT], ~0); + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_CQCSR_BUSY | RISCV_IOMMU_CQCSR_CQON; + } else { + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_CQCSR_BUSY; + } + + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, ctrl_set, ctrl_clr); +} + +static void riscv_iommu_process_fq_control(RISCVIOMMUState *s) +{ + uint64_t base; + uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR); + uint32_t ctrl_clr; + bool enable = !!(ctrl_set & RISCV_IOMMU_FQCSR_FQEN); + bool active = !!(ctrl_set & RISCV_IOMMU_FQCSR_FQON); + + if (enable && !active) { + base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_FQB); + s->fq_mask = (2ULL << get_field(base, RISCV_IOMMU_FQB_LOG2SZ)) - 1; + s->fq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_FQB_PPN)); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQH], ~s->fq_mask); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_FQH], 0); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_FQT], 0); + ctrl_set = RISCV_IOMMU_FQCSR_FQON; + ctrl_clr = RISCV_IOMMU_FQCSR_BUSY | RISCV_IOMMU_FQCSR_FQMF | + RISCV_IOMMU_FQCSR_FQOF; + } else if (!enable && active) { + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQH], ~0); + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_FQCSR_BUSY | RISCV_IOMMU_FQCSR_FQON; + } else { + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_FQCSR_BUSY; + } + + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, ctrl_set, ctrl_clr); +} + +static void riscv_iommu_process_pq_control(RISCVIOMMUState *s) +{ + uint64_t base; + uint32_t ctrl_set = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR); + uint32_t ctrl_clr; + bool enable = !!(ctrl_set & RISCV_IOMMU_PQCSR_PQEN); + bool active = !!(ctrl_set & RISCV_IOMMU_PQCSR_PQON); + + if (enable && !active) { + base = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_PQB); + s->pq_mask = (2ULL << get_field(base, RISCV_IOMMU_PQB_LOG2SZ)) - 1; + s->pq_addr = PPN_PHYS(get_field(base, RISCV_IOMMU_PQB_PPN)); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQH], ~s->pq_mask); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_PQH], 0); + stl_le_p(&s->regs_rw[RISCV_IOMMU_REG_PQT], 0); + ctrl_set = RISCV_IOMMU_PQCSR_PQON; + ctrl_clr = RISCV_IOMMU_PQCSR_BUSY | RISCV_IOMMU_PQCSR_PQMF | + RISCV_IOMMU_PQCSR_PQOF; + } else if (!enable && active) { + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQH], ~0); + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_PQCSR_BUSY | RISCV_IOMMU_PQCSR_PQON; + } else { + ctrl_set = 0; + ctrl_clr = RISCV_IOMMU_PQCSR_BUSY; + } + + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, ctrl_set, ctrl_clr); +} + +static void riscv_iommu_process_dbg(RISCVIOMMUState *s) +{ + uint64_t iova = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_IOVA); + uint64_t ctrl = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_TR_REQ_CTL); + unsigned devid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_DID); + unsigned pid = get_field(ctrl, RISCV_IOMMU_TR_REQ_CTL_PID); + RISCVIOMMUContext *ctx; + void *ref; + + if (!(ctrl & RISCV_IOMMU_TR_REQ_CTL_GO_BUSY)) { + return; + } + + ctx = riscv_iommu_ctx(s, devid, pid, &ref); + if (ctx == NULL) { + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, + RISCV_IOMMU_TR_RESPONSE_FAULT | + (RISCV_IOMMU_FQ_CAUSE_DMA_DISABLED << 10)); + } else { + IOMMUTLBEntry iotlb = { + .iova = iova, + .perm = ctrl & RISCV_IOMMU_TR_REQ_CTL_NW ? IOMMU_RO : IOMMU_RW, + .addr_mask = ~0, + .target_as = NULL, + }; + int fault = riscv_iommu_translate(s, ctx, &iotlb, false); + if (fault) { + iova = RISCV_IOMMU_TR_RESPONSE_FAULT | (((uint64_t) fault) << 10); + } else { + iova = iotlb.translated_addr & ~iotlb.addr_mask; + iova = set_field(0, RISCV_IOMMU_TR_RESPONSE_PPN, PPN_DOWN(iova)); + } + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_TR_RESPONSE, iova); + } + + riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0, + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); + riscv_iommu_ctx_put(s, ref); +} + +typedef void riscv_iommu_process_fn(RISCVIOMMUState *s); + +static void riscv_iommu_update_icvec(RISCVIOMMUState *s, uint64_t data) +{ + uint64_t icvec = 0; + + icvec |= MIN(data & RISCV_IOMMU_ICVEC_CIV, + s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_CIV); + + icvec |= MIN(data & RISCV_IOMMU_ICVEC_FIV, + s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_FIV); + + icvec |= MIN(data & RISCV_IOMMU_ICVEC_PMIV, + s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_PMIV); + + icvec |= MIN(data & RISCV_IOMMU_ICVEC_PIV, + s->icvec_avail_vectors & RISCV_IOMMU_ICVEC_PIV); + + trace_riscv_iommu_icvec_write(data, icvec); + + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_ICVEC, icvec); +} + +static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data) +{ + uint32_t cqcsr, fqcsr, pqcsr; + uint32_t ipsr_set = 0; + uint32_t ipsr_clr = 0; + + if (data & RISCV_IOMMU_IPSR_CIP) { + cqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_CQCSR); + + if (cqcsr & RISCV_IOMMU_CQCSR_CIE && + (cqcsr & RISCV_IOMMU_CQCSR_FENCE_W_IP || + cqcsr & RISCV_IOMMU_CQCSR_CMD_ILL || + cqcsr & RISCV_IOMMU_CQCSR_CMD_TO || + cqcsr & RISCV_IOMMU_CQCSR_CQMF)) { + ipsr_set |= RISCV_IOMMU_IPSR_CIP; + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_CIP; + } + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_CIP; + } + + if (data & RISCV_IOMMU_IPSR_FIP) { + fqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_FQCSR); + + if (fqcsr & RISCV_IOMMU_FQCSR_FIE && + (fqcsr & RISCV_IOMMU_FQCSR_FQOF || + fqcsr & RISCV_IOMMU_FQCSR_FQMF)) { + ipsr_set |= RISCV_IOMMU_IPSR_FIP; + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_FIP; + } + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_FIP; + } + + if (data & RISCV_IOMMU_IPSR_PIP) { + pqcsr = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_PQCSR); + + if (pqcsr & RISCV_IOMMU_PQCSR_PIE && + (pqcsr & RISCV_IOMMU_PQCSR_PQOF || + pqcsr & RISCV_IOMMU_PQCSR_PQMF)) { + ipsr_set |= RISCV_IOMMU_IPSR_PIP; + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_PIP; + } + } else { + ipsr_clr |= RISCV_IOMMU_IPSR_PIP; + } + + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr); +} + +static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s, + uint32_t regb, + bool prev_cy_inh) +{ + switch (regb) { + case RISCV_IOMMU_REG_IOCOUNTINH: + riscv_iommu_process_iocntinh_cy(s, prev_cy_inh); + break; + + case RISCV_IOMMU_REG_IOHPMCYCLES: + case RISCV_IOMMU_REG_IOHPMCYCLES + 4: + riscv_iommu_process_hpmcycle_write(s); + break; + + case RISCV_IOMMU_REG_IOHPMEVT_BASE ... + RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4: + riscv_iommu_process_hpmevt_write(s, regb & ~7); + break; + } +} + +/* + * Write the resulting value of 'data' for the reg specified + * by 'reg_addr', after considering read-only/read-write/write-clear + * bits, in the pointer 'dest'. + * + * The result is written in little-endian. + */ +static void riscv_iommu_write_reg_val(RISCVIOMMUState *s, + void *dest, hwaddr reg_addr, + int size, uint64_t data) +{ + uint64_t ro = ldn_le_p(&s->regs_ro[reg_addr], size); + uint64_t wc = ldn_le_p(&s->regs_wc[reg_addr], size); + uint64_t rw = ldn_le_p(&s->regs_rw[reg_addr], size); + + stn_le_p(dest, size, ((rw & ro) | (data & ~ro)) & ~(data & wc)); +} + +static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size, + MemTxAttrs attrs) +{ + riscv_iommu_process_fn *process_fn = NULL; + RISCVIOMMUState *s = opaque; + uint32_t regb = addr & ~3; + uint32_t busy = 0; + uint64_t val = 0; + bool cy_inh = false; + + if ((addr & (size - 1)) != 0) { + /* Unsupported MMIO alignment or access size */ + return MEMTX_ERROR; + } + + if (addr + size > RISCV_IOMMU_REG_MSI_CONFIG) { + /* Unsupported MMIO access location. */ + return MEMTX_ACCESS_ERROR; + } + + /* Track actionable MMIO write. */ + switch (regb) { + case RISCV_IOMMU_REG_DDTP: + case RISCV_IOMMU_REG_DDTP + 4: + process_fn = riscv_iommu_process_ddtp; + regb = RISCV_IOMMU_REG_DDTP; + busy = RISCV_IOMMU_DDTP_BUSY; + break; + + case RISCV_IOMMU_REG_CQT: + process_fn = riscv_iommu_process_cq_tail; + break; + + case RISCV_IOMMU_REG_CQCSR: + process_fn = riscv_iommu_process_cq_control; + busy = RISCV_IOMMU_CQCSR_BUSY; + break; + + case RISCV_IOMMU_REG_FQCSR: + process_fn = riscv_iommu_process_fq_control; + busy = RISCV_IOMMU_FQCSR_BUSY; + break; + + case RISCV_IOMMU_REG_PQCSR: + process_fn = riscv_iommu_process_pq_control; + busy = RISCV_IOMMU_PQCSR_BUSY; + break; + + case RISCV_IOMMU_REG_ICVEC: + case RISCV_IOMMU_REG_IPSR: + /* + * ICVEC and IPSR have special read/write procedures. We'll + * call their respective helpers and exit. + */ + riscv_iommu_write_reg_val(s, &val, addr, size, data); + + /* + * 'val' is stored as LE. Switch to host endianess + * before using it. + */ + val = le64_to_cpu(val); + + if (regb == RISCV_IOMMU_REG_ICVEC) { + riscv_iommu_update_icvec(s, val); + } else { + riscv_iommu_update_ipsr(s, val); + } + + return MEMTX_OK; + + case RISCV_IOMMU_REG_TR_REQ_CTL: + process_fn = riscv_iommu_process_dbg; + regb = RISCV_IOMMU_REG_TR_REQ_CTL; + busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY; + break; + + case RISCV_IOMMU_REG_IOCOUNTINH: + if (addr != RISCV_IOMMU_REG_IOCOUNTINH) { + break; + } + /* Store previous value of CY bit. */ + cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) & + RISCV_IOMMU_IOCOUNTINH_CY); + break; + + + default: + break; + } + + /* + * Registers update might be not synchronized with core logic. + * If system software updates register when relevant BUSY bit + * is set IOMMU behavior of additional writes to the register + * is UNSPECIFIED. + */ + riscv_iommu_write_reg_val(s, &s->regs_rw[addr], addr, size, data); + + /* Busy flag update, MSB 4-byte register. */ + if (busy) { + uint32_t rw = ldl_le_p(&s->regs_rw[regb]); + stl_le_p(&s->regs_rw[regb], rw | busy); + } + + /* Process HPM writes and update any internal state if needed. */ + if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF && + regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) { + riscv_iommu_process_hpm_writes(s, regb, cy_inh); + } + + if (process_fn) { + process_fn(s); + } + + return MEMTX_OK; +} + +static MemTxResult riscv_iommu_mmio_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, MemTxAttrs attrs) +{ + RISCVIOMMUState *s = opaque; + uint64_t val = -1; + uint8_t *ptr; + + if ((addr & (size - 1)) != 0) { + /* Unsupported MMIO alignment. */ + return MEMTX_ERROR; + } + + if (addr + size > RISCV_IOMMU_REG_MSI_CONFIG) { + return MEMTX_ACCESS_ERROR; + } + + /* Compute cycle register value. */ + if ((addr & ~7) == RISCV_IOMMU_REG_IOHPMCYCLES) { + val = riscv_iommu_hpmcycle_read(s); + ptr = (uint8_t *)&val + (addr & 7); + } else if ((addr & ~3) == RISCV_IOMMU_REG_IOCOUNTOVF) { + /* + * Software can read RISCV_IOMMU_REG_IOCOUNTOVF before timer + * callback completes. In which case CY_OF bit in + * RISCV_IOMMU_IOHPMCYCLES_OVF would be 0. Here we take the + * CY_OF bit state from RISCV_IOMMU_REG_IOHPMCYCLES register as + * it's not dependent over the timer callback and is computed + * from cycle overflow. + */ + val = ldq_le_p(&s->regs_rw[addr]); + val |= (riscv_iommu_hpmcycle_read(s) & RISCV_IOMMU_IOHPMCYCLES_OVF) + ? RISCV_IOMMU_IOCOUNTOVF_CY + : 0; + ptr = (uint8_t *)&val + (addr & 3); + } else { + ptr = &s->regs_rw[addr]; + } + + val = ldn_le_p(ptr, size); + + *data = val; + + return MEMTX_OK; +} + +static const MemoryRegionOps riscv_iommu_mmio_ops = { + .read_with_attrs = riscv_iommu_mmio_read, + .write_with_attrs = riscv_iommu_mmio_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + .unaligned = false, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + } +}; + +/* + * Translations matching MSI pattern check are redirected to "riscv-iommu-trap" + * memory region as untranslated address, for additional MSI/MRIF interception + * by IOMMU interrupt remapping implementation. + * Note: Device emulation code generating an MSI is expected to provide a valid + * memory transaction attributes with requested_id set. + */ +static MemTxResult riscv_iommu_trap_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size, MemTxAttrs attrs) +{ + RISCVIOMMUState* s = (RISCVIOMMUState *)opaque; + RISCVIOMMUContext *ctx; + MemTxResult res; + void *ref; + uint32_t devid = attrs.requester_id; + + if (attrs.unspecified) { + return MEMTX_ACCESS_ERROR; + } + + /* FIXME: PCIe bus remapping for attached endpoints. */ + devid |= s->bus << 8; + + ctx = riscv_iommu_ctx(s, devid, 0, &ref); + if (ctx == NULL) { + res = MEMTX_ACCESS_ERROR; + } else { + res = riscv_iommu_msi_write(s, ctx, addr, data, size, attrs); + } + riscv_iommu_ctx_put(s, ref); + return res; +} + +static MemTxResult riscv_iommu_trap_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, MemTxAttrs attrs) +{ + return MEMTX_ACCESS_ERROR; +} + +static const MemoryRegionOps riscv_iommu_trap_ops = { + .read_with_attrs = riscv_iommu_trap_read, + .write_with_attrs = riscv_iommu_trap_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + .unaligned = true, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + } +}; + +void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode) +{ + s->cap = set_field(s->cap, RISCV_IOMMU_CAP_IGS, mode); +} + +static void riscv_iommu_instance_init(Object *obj) +{ + RISCVIOMMUState *s = RISCV_IOMMU(obj); + + /* Enable translation debug interface */ + s->cap = RISCV_IOMMU_CAP_DBG; + + /* Report QEMU target physical address space limits */ + s->cap = set_field(s->cap, RISCV_IOMMU_CAP_PAS, + TARGET_PHYS_ADDR_SPACE_BITS); + + /* TODO: method to report supported PID bits */ + s->pid_bits = 8; /* restricted to size of MemTxAttrs.pid */ + s->cap |= RISCV_IOMMU_CAP_PD8; + + /* register storage */ + s->regs_rw = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE); + s->regs_ro = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE); + s->regs_wc = g_new0(uint8_t, RISCV_IOMMU_REG_SIZE); + + /* Mark all registers read-only */ + memset(s->regs_ro, 0xff, RISCV_IOMMU_REG_SIZE); + + /* Device translation context cache */ + s->ctx_cache = g_hash_table_new_full(riscv_iommu_ctx_hash, + riscv_iommu_ctx_equal, + g_free, NULL); + + s->iot_cache = g_hash_table_new_full(riscv_iommu_iot_hash, + riscv_iommu_iot_equal, + g_free, NULL); + + s->iommus.le_next = NULL; + s->iommus.le_prev = NULL; + QLIST_INIT(&s->spaces); +} + +static void riscv_iommu_realize(DeviceState *dev, Error **errp) +{ + RISCVIOMMUState *s = RISCV_IOMMU(dev); + + s->cap |= s->version & RISCV_IOMMU_CAP_VERSION; + if (s->enable_msi) { + s->cap |= RISCV_IOMMU_CAP_MSI_FLAT | RISCV_IOMMU_CAP_MSI_MRIF; + } + if (s->enable_ats) { + s->cap |= RISCV_IOMMU_CAP_ATS; + } + if (s->enable_s_stage) { + s->cap |= RISCV_IOMMU_CAP_SV32 | RISCV_IOMMU_CAP_SV39 | + RISCV_IOMMU_CAP_SV48 | RISCV_IOMMU_CAP_SV57; + } + if (s->enable_g_stage) { + s->cap |= RISCV_IOMMU_CAP_SV32X4 | RISCV_IOMMU_CAP_SV39X4 | + RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4 | + RISCV_IOMMU_CAP_SVRSW60T59B; + } + + if (s->hpm_cntrs > 0) { + /* Clip number of HPM counters to maximum supported (31). */ + if (s->hpm_cntrs > RISCV_IOMMU_IOCOUNT_NUM) { + s->hpm_cntrs = RISCV_IOMMU_IOCOUNT_NUM; + } + /* Enable hardware performance monitor interface */ + s->cap |= RISCV_IOMMU_CAP_HPM; + } + + /* Out-of-reset translation mode: OFF (DMA disabled) BARE (passthrough) */ + s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, s->enable_off ? + RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE); + + /* + * Register complete MMIO space, including MSI/PBA registers. + * Note, PCIDevice implementation will add overlapping MR for MSI/PBA, + * managed directly by the PCIDevice implementation. + */ + memory_region_init_io(&s->regs_mr, OBJECT(dev), &riscv_iommu_mmio_ops, s, + "riscv-iommu-regs", RISCV_IOMMU_REG_SIZE); + + /* Set power-on register state */ + stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_CAP], s->cap); + stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_FCTL], 0); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_FCTL], + ~(RISCV_IOMMU_FCTL_BE | RISCV_IOMMU_FCTL_WSI)); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_DDTP], + ~(RISCV_IOMMU_DDTP_PPN | RISCV_IOMMU_DDTP_MODE)); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQB], + ~(RISCV_IOMMU_CQB_LOG2SZ | RISCV_IOMMU_CQB_PPN)); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQB], + ~(RISCV_IOMMU_FQB_LOG2SZ | RISCV_IOMMU_FQB_PPN)); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQB], + ~(RISCV_IOMMU_PQB_LOG2SZ | RISCV_IOMMU_PQB_PPN)); + stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_CQCSR], RISCV_IOMMU_CQCSR_CQMF | + RISCV_IOMMU_CQCSR_CMD_TO | RISCV_IOMMU_CQCSR_CMD_ILL); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_CQCSR], RISCV_IOMMU_CQCSR_CQON | + RISCV_IOMMU_CQCSR_BUSY); + stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_FQCSR], RISCV_IOMMU_FQCSR_FQMF | + RISCV_IOMMU_FQCSR_FQOF); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_FQCSR], RISCV_IOMMU_FQCSR_FQON | + RISCV_IOMMU_FQCSR_BUSY); + stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_PQCSR], RISCV_IOMMU_PQCSR_PQMF | + RISCV_IOMMU_PQCSR_PQOF); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_PQCSR], RISCV_IOMMU_PQCSR_PQON | + RISCV_IOMMU_PQCSR_BUSY); + stl_le_p(&s->regs_wc[RISCV_IOMMU_REG_IPSR], ~0); + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_ICVEC], 0); + stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_DDTP], s->ddtp); + /* If debug registers enabled. */ + if (s->cap & RISCV_IOMMU_CAP_DBG) { + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_IOVA], 0); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_TR_REQ_CTL], + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); + } + + /* If HPM registers are enabled. */ + if (s->cap & RISCV_IOMMU_CAP_HPM) { + /* +1 for cycle counter bit. */ + stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOCOUNTINH], + ~((2 << s->hpm_cntrs) - 1)); + stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCYCLES], 0); + memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCTR_BASE], + 0x00, s->hpm_cntrs * 8); + memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMEVT_BASE], + 0x00, s->hpm_cntrs * 8); + } + + /* Memory region for downstream access, if specified. */ + if (s->target_mr) { + s->target_as = g_new0(AddressSpace, 1); + address_space_init(s->target_as, s->target_mr, + "riscv-iommu-downstream"); + } else { + /* Fallback to global system memory. */ + s->target_as = &address_space_memory; + } + + /* Memory region for untranslated MRIF/MSI writes */ + memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s, + "riscv-iommu-trap", ~0ULL); + address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as"); + + if (s->cap & RISCV_IOMMU_CAP_HPM) { + s->hpm_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, riscv_iommu_hpm_timer_cb, s); + s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal); + } +} + +static void riscv_iommu_unrealize(DeviceState *dev) +{ + RISCVIOMMUState *s = RISCV_IOMMU(dev); + + g_hash_table_unref(s->iot_cache); + g_hash_table_unref(s->ctx_cache); + + if (s->cap & RISCV_IOMMU_CAP_HPM) { + g_hash_table_unref(s->hpm_event_ctr_map); + timer_free(s->hpm_timer); + } +} + +void riscv_iommu_reset(RISCVIOMMUState *s) +{ + uint32_t reg_clr; + int ddtp_mode; + + /* + * Clear DDTP while setting DDTP_mode back to user + * initial setting. + */ + ddtp_mode = s->enable_off ? + RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE; + s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, ddtp_mode); + riscv_iommu_reg_set64(s, RISCV_IOMMU_REG_DDTP, s->ddtp); + + reg_clr = RISCV_IOMMU_CQCSR_CQEN | RISCV_IOMMU_CQCSR_CIE | + RISCV_IOMMU_CQCSR_CQON | RISCV_IOMMU_CQCSR_BUSY; + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_CQCSR, 0, reg_clr); + + reg_clr = RISCV_IOMMU_FQCSR_FQEN | RISCV_IOMMU_FQCSR_FIE | + RISCV_IOMMU_FQCSR_FQON | RISCV_IOMMU_FQCSR_BUSY; + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_FQCSR, 0, reg_clr); + + reg_clr = RISCV_IOMMU_PQCSR_PQEN | RISCV_IOMMU_PQCSR_PIE | + RISCV_IOMMU_PQCSR_PQON | RISCV_IOMMU_PQCSR_BUSY; + riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_PQCSR, 0, reg_clr); + + riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_TR_REQ_CTL, 0, + RISCV_IOMMU_TR_REQ_CTL_GO_BUSY); + + riscv_iommu_reg_set32(s, RISCV_IOMMU_REG_IPSR, 0); + + g_hash_table_remove_all(s->ctx_cache); + g_hash_table_remove_all(s->iot_cache); +} + +static const Property riscv_iommu_properties[] = { + DEFINE_PROP_UINT32("version", RISCVIOMMUState, version, + RISCV_IOMMU_SPEC_DOT_VER), + DEFINE_PROP_UINT32("bus", RISCVIOMMUState, bus, 0x0), + DEFINE_PROP_UINT32("ioatc-limit", RISCVIOMMUState, iot_limit, + LIMIT_CACHE_IOT), + DEFINE_PROP_BOOL("intremap", RISCVIOMMUState, enable_msi, TRUE), + DEFINE_PROP_BOOL("ats", RISCVIOMMUState, enable_ats, TRUE), + DEFINE_PROP_BOOL("off", RISCVIOMMUState, enable_off, TRUE), + DEFINE_PROP_BOOL("s-stage", RISCVIOMMUState, enable_s_stage, TRUE), + DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE), + DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs, + RISCV_IOMMU_IOCOUNT_NUM), +}; + +static void riscv_iommu_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* internal device for riscv-iommu-{pci/sys}, not user-creatable */ + dc->user_creatable = false; + dc->realize = riscv_iommu_realize; + dc->unrealize = riscv_iommu_unrealize; + device_class_set_props(dc, riscv_iommu_properties); +} + +static const TypeInfo riscv_iommu_info = { + .name = TYPE_RISCV_IOMMU, + .parent = TYPE_DEVICE, + .instance_size = sizeof(RISCVIOMMUState), + .instance_init = riscv_iommu_instance_init, + .class_init = riscv_iommu_class_init, +}; + +static const char *IOMMU_FLAG_STR[] = { + "NA", + "RO", + "WR", + "RW", +}; + +/* RISC-V IOMMU Memory Region - Address Translation Space */ +static IOMMUTLBEntry riscv_iommu_memory_region_translate( + IOMMUMemoryRegion *iommu_mr, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr); + RISCVIOMMUContext *ctx; + void *ref; + IOMMUTLBEntry iotlb = { + .iova = addr, + .target_as = as->iommu->target_as, + .addr_mask = ~0ULL, + .perm = flag, + }; + + ctx = riscv_iommu_ctx(as->iommu, as->devid, iommu_idx, &ref); + if (ctx == NULL) { + /* Translation disabled or invalid. */ + iotlb.addr_mask = 0; + iotlb.perm = IOMMU_NONE; + } else if (riscv_iommu_translate(as->iommu, ctx, &iotlb, true)) { + /* Translation disabled or fault reported. */ + iotlb.addr_mask = 0; + iotlb.perm = IOMMU_NONE; + } + + /* Trace all dma translations with original access flags. */ + trace_riscv_iommu_dma(as->iommu->parent_obj.id, PCI_BUS_NUM(as->devid), + PCI_SLOT(as->devid), PCI_FUNC(as->devid), iommu_idx, + IOMMU_FLAG_STR[flag & IOMMU_RW], iotlb.iova, + iotlb.translated_addr); + + riscv_iommu_ctx_put(as->iommu, ref); + + return iotlb; +} + +static int riscv_iommu_memory_region_notify( + IOMMUMemoryRegion *iommu_mr, IOMMUNotifierFlag old, + IOMMUNotifierFlag new, Error **errp) +{ + RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr); + + if (old == IOMMU_NOTIFIER_NONE) { + as->notifier = true; + trace_riscv_iommu_notifier_add(iommu_mr->parent_obj.name); + } else if (new == IOMMU_NOTIFIER_NONE) { + as->notifier = false; + trace_riscv_iommu_notifier_del(iommu_mr->parent_obj.name); + } + + return 0; +} + +static inline bool pci_is_iommu(PCIDevice *pdev) +{ + return pci_get_word(pdev->config + PCI_CLASS_DEVICE) == 0x0806; +} + +static AddressSpace *riscv_iommu_find_as(PCIBus *bus, void *opaque, int devfn) +{ + RISCVIOMMUState *s = (RISCVIOMMUState *) opaque; + PCIDevice *pdev = pci_find_device(bus, pci_bus_num(bus), devfn); + AddressSpace *as = NULL; + + if (pdev && pci_is_iommu(pdev)) { + return s->target_as; + } + + /* Find first registered IOMMU device */ + while (s->iommus.le_prev) { + s = *(s->iommus.le_prev); + } + + /* Find first matching IOMMU */ + while (s != NULL && as == NULL) { + as = riscv_iommu_space(s, PCI_BUILD_BDF(pci_bus_num(bus), devfn)); + s = s->iommus.le_next; + } + + return as ? as : &address_space_memory; +} + +static const PCIIOMMUOps riscv_iommu_ops = { + .get_address_space = riscv_iommu_find_as, +}; + +void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus, + Error **errp) +{ + if (bus->iommu_ops && + bus->iommu_ops->get_address_space == riscv_iommu_find_as) { + /* Allow multiple IOMMUs on the same PCIe bus, link known devices */ + RISCVIOMMUState *last = (RISCVIOMMUState *)bus->iommu_opaque; + QLIST_INSERT_AFTER(last, iommu, iommus); + } else if (!bus->iommu_ops && !bus->iommu_opaque) { + pci_setup_iommu(bus, &riscv_iommu_ops, iommu); + } else { + error_setg(errp, "can't register secondary IOMMU for PCI bus #%d", + pci_bus_num(bus)); + } +} + +static int riscv_iommu_memory_region_index(IOMMUMemoryRegion *iommu_mr, + MemTxAttrs attrs) +{ + return attrs.unspecified ? RISCV_IOMMU_NOPROCID : (int)attrs.pid; +} + +static int riscv_iommu_memory_region_index_len(IOMMUMemoryRegion *iommu_mr) +{ + RISCVIOMMUSpace *as = container_of(iommu_mr, RISCVIOMMUSpace, iova_mr); + return 1 << as->iommu->pid_bits; +} + +static void riscv_iommu_memory_region_init(ObjectClass *klass, const void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = riscv_iommu_memory_region_translate; + imrc->notify_flag_changed = riscv_iommu_memory_region_notify; + imrc->attrs_to_index = riscv_iommu_memory_region_index; + imrc->num_indexes = riscv_iommu_memory_region_index_len; +} + +static const TypeInfo riscv_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_RISCV_IOMMU_MEMORY_REGION, + .class_init = riscv_iommu_memory_region_init, +}; + +static void riscv_iommu_register_mr_types(void) +{ + type_register_static(&riscv_iommu_memory_region_info); + type_register_static(&riscv_iommu_info); +} + +type_init(riscv_iommu_register_mr_types); diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h new file mode 100644 index 00000000000..a31aa62144f --- /dev/null +++ b/hw/riscv/riscv-iommu.h @@ -0,0 +1,157 @@ +/* + * QEMU emulation of an RISC-V IOMMU + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_RISCV_IOMMU_STATE_H +#define HW_RISCV_IOMMU_STATE_H + +#include "qom/object.h" +#include "hw/qdev-properties.h" +#include "system/dma.h" +#include "hw/riscv/iommu.h" +#include "hw/riscv/riscv-iommu-bits.h" + +typedef enum riscv_iommu_igs_modes riscv_iommu_igs_mode; + +struct RISCVIOMMUState { + /*< private >*/ + DeviceState parent_obj; + + /*< public >*/ + uint32_t version; /* Reported interface version number */ + uint32_t pid_bits; /* process identifier width */ + uint32_t bus; /* PCI bus mapping for non-root endpoints */ + + uint64_t cap; /* IOMMU supported capabilities */ + uint64_t fctl; /* IOMMU enabled features */ + uint64_t icvec_avail_vectors; /* Available interrupt vectors in ICVEC */ + + bool enable_off; /* Enable out-of-reset OFF mode (DMA disabled) */ + bool enable_msi; /* Enable MSI remapping */ + bool enable_ats; /* Enable ATS support */ + bool enable_s_stage; /* Enable S/VS-Stage translation */ + bool enable_g_stage; /* Enable G-Stage translation */ + + /* IOMMU Internal State */ + uint64_t ddtp; /* Validated Device Directory Tree Root Pointer */ + + dma_addr_t cq_addr; /* Command queue base physical address */ + dma_addr_t fq_addr; /* Fault/event queue base physical address */ + dma_addr_t pq_addr; /* Page request queue base physical address */ + + uint32_t cq_mask; /* Command queue index bit mask */ + uint32_t fq_mask; /* Fault/event queue index bit mask */ + uint32_t pq_mask; /* Page request queue index bit mask */ + + /* interrupt notifier */ + void (*notify)(RISCVIOMMUState *iommu, unsigned vector); + + /* IOMMU target address space */ + AddressSpace *target_as; + MemoryRegion *target_mr; + + /* MSI / MRIF access trap */ + AddressSpace trap_as; + MemoryRegion trap_mr; + + GHashTable *ctx_cache; /* Device translation Context Cache */ + + GHashTable *iot_cache; /* IO Translated Address Cache */ + unsigned iot_limit; /* IO Translation Cache size limit */ + + /* MMIO Hardware Interface */ + MemoryRegion regs_mr; + uint8_t *regs_rw; /* register state (user write) */ + uint8_t *regs_wc; /* write-1-to-clear mask */ + uint8_t *regs_ro; /* read-only mask */ + + QLIST_ENTRY(RISCVIOMMUState) iommus; + QLIST_HEAD(, RISCVIOMMUSpace) spaces; + + /* HPM cycle counter */ + QEMUTimer *hpm_timer; + uint64_t hpmcycle_val; /* Current value of cycle register */ + uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */ + uint64_t irq_overflow_left; /* Value beyond INT64_MAX after overflow */ + + /* HPM event counters */ + GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */ + uint8_t hpm_cntrs; +}; + +void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus, + Error **errp); +void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode); +void riscv_iommu_reset(RISCVIOMMUState *s); +void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type); + +typedef struct RISCVIOMMUContext RISCVIOMMUContext; +/* Device translation context state. */ +struct RISCVIOMMUContext { + uint64_t devid:24; /* Requester Id, AKA device_id */ + uint64_t process_id:20; /* Process ID. PASID for PCIe */ + uint64_t tc; /* Translation Control */ + uint64_t ta; /* Translation Attributes */ + uint64_t satp; /* S-Stage address translation and protection */ + uint64_t gatp; /* G-Stage address translation and protection */ + uint64_t msi_addr_mask; /* MSI filtering - address mask */ + uint64_t msi_addr_pattern; /* MSI filtering - address pattern */ + uint64_t msiptp; /* MSI redirection page table pointer */ +}; + +/* private helpers */ + +/* Register helper functions */ +static inline uint32_t riscv_iommu_reg_mod32(RISCVIOMMUState *s, + unsigned idx, uint32_t set, uint32_t clr) +{ + uint32_t val = ldl_le_p(s->regs_rw + idx); + stl_le_p(s->regs_rw + idx, (val & ~clr) | set); + return val; +} + +static inline void riscv_iommu_reg_set32(RISCVIOMMUState *s, unsigned idx, + uint32_t set) +{ + stl_le_p(s->regs_rw + idx, set); +} + +static inline uint32_t riscv_iommu_reg_get32(RISCVIOMMUState *s, unsigned idx) +{ + return ldl_le_p(s->regs_rw + idx); +} + +static inline uint64_t riscv_iommu_reg_mod64(RISCVIOMMUState *s, unsigned idx, + uint64_t set, uint64_t clr) +{ + uint64_t val = ldq_le_p(s->regs_rw + idx); + stq_le_p(s->regs_rw + idx, (val & ~clr) | set); + return val; +} + +static inline void riscv_iommu_reg_set64(RISCVIOMMUState *s, unsigned idx, + uint64_t set) +{ + stq_le_p(s->regs_rw + idx, set); +} + +static inline uint64_t riscv_iommu_reg_get64(RISCVIOMMUState *s, + unsigned idx) +{ + return ldq_le_p(s->regs_rw + idx); +} +#endif diff --git a/hw/riscv/riscv_hart.c b/hw/riscv/riscv_hart.c index 613ea2aaa0b..7f2676008cf 100644 --- a/hw/riscv/riscv_hart.c +++ b/hw/riscv/riscv_hart.c @@ -21,19 +21,38 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/reset.h" +#include "system/reset.h" +#include "system/qtest.h" +#include "qemu/cutils.h" #include "hw/sysbus.h" #include "target/riscv/cpu.h" #include "hw/qdev-properties.h" #include "hw/riscv/riscv_hart.h" +#include "qemu/error-report.h" -static Property riscv_harts_props[] = { +static const Property riscv_harts_props[] = { DEFINE_PROP_UINT32("num-harts", RISCVHartArrayState, num_harts, 1), DEFINE_PROP_UINT32("hartid-base", RISCVHartArrayState, hartid_base, 0), DEFINE_PROP_STRING("cpu-type", RISCVHartArrayState, cpu_type), DEFINE_PROP_UINT64("resetvec", RISCVHartArrayState, resetvec, DEFAULT_RSTVEC), - DEFINE_PROP_END_OF_LIST(), + + /* + * Smrnmi implementation-defined interrupt and exception trap handlers. + * + * When an RNMI interrupt is detected, the hart then enters M-mode and + * jumps to the address defined by "rnmi-interrupt-vector". + * + * When the hart encounters an exception while executing in M-mode with + * the mnstatus.NMIE bit clear, the hart then jumps to the address + * defined by "rnmi-exception-vector". + */ + DEFINE_PROP_ARRAY("rnmi-interrupt-vector", RISCVHartArrayState, + num_rnmi_irqvec, rnmi_irqvec, qdev_prop_uint64, + uint64_t), + DEFINE_PROP_ARRAY("rnmi-exception-vector", RISCVHartArrayState, + num_rnmi_excpvec, rnmi_excpvec, qdev_prop_uint64, + uint64_t), }; static void riscv_harts_cpu_reset(void *opaque) @@ -42,11 +61,85 @@ static void riscv_harts_cpu_reset(void *opaque) cpu_reset(CPU(cpu)); } +#ifndef CONFIG_USER_ONLY +static void csr_call(char *cmd, uint64_t cpu_num, int csrno, uint64_t *val) +{ + RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(cpu_num)); + CPURISCVState *env = &cpu->env; + + int ret = RISCV_EXCP_NONE; + if (strcmp(cmd, "get_csr") == 0) { + ret = riscv_csrr(env, csrno, (target_ulong *)val); + } else if (strcmp(cmd, "set_csr") == 0) { + ret = riscv_csrrw(env, csrno, NULL, *(target_ulong *)val, + MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); + } + + g_assert(ret == RISCV_EXCP_NONE); +} + +static bool csr_qtest_callback(CharBackend *chr, gchar **words) +{ + if (strcmp(words[0], "csr") == 0) { + + uint64_t cpu; + uint64_t val; + int rc, csr; + + rc = qemu_strtou64(words[2], NULL, 0, &cpu); + g_assert(rc == 0); + rc = qemu_strtoi(words[3], NULL, 0, &csr); + g_assert(rc == 0); + rc = qemu_strtou64(words[4], NULL, 0, &val); + g_assert(rc == 0); + csr_call(words[1], cpu, csr, &val); + + qtest_sendf(chr, "OK 0 "TARGET_FMT_lx"\n", (target_ulong)val); + + return true; + } + + return false; +} + +static void riscv_cpu_register_csr_qtest_callback(void) +{ + static bool first = true; + if (first) { + first = false; + qtest_set_command_cb(csr_qtest_callback); + } +} +#endif + static bool riscv_hart_realize(RISCVHartArrayState *s, int idx, char *cpu_type, Error **errp) { object_initialize_child(OBJECT(s), "harts[*]", &s->harts[idx], cpu_type); qdev_prop_set_uint64(DEVICE(&s->harts[idx]), "resetvec", s->resetvec); + + if (s->harts[idx].cfg.ext_smrnmi) { + if (idx < s->num_rnmi_irqvec) { + qdev_prop_set_uint64(DEVICE(&s->harts[idx]), + "rnmi-interrupt-vector", s->rnmi_irqvec[idx]); + } + + if (idx < s->num_rnmi_excpvec) { + qdev_prop_set_uint64(DEVICE(&s->harts[idx]), + "rnmi-exception-vector", s->rnmi_excpvec[idx]); + } + } else { + if (s->num_rnmi_irqvec > 0) { + warn_report_once("rnmi-interrupt-vector property is ignored " + "because Smrnmi extension is not enabled."); + } + + if (s->num_rnmi_excpvec > 0) { + warn_report_once("rnmi-exception-vector property is ignored " + "because Smrnmi extension is not enabled."); + } + } + s->harts[idx].env.mhartid = s->hartid_base + idx; qemu_register_reset(riscv_harts_cpu_reset, &s->harts[idx]); return qdev_realize(DEVICE(&s->harts[idx]), NULL, errp); @@ -59,6 +152,10 @@ static void riscv_harts_realize(DeviceState *dev, Error **errp) s->harts = g_new0(RISCVCPU, s->num_harts); +#ifndef CONFIG_USER_ONLY + riscv_cpu_register_csr_qtest_callback(); +#endif + for (n = 0; n < s->num_harts; n++) { if (!riscv_hart_realize(s, n, s->cpu_type, errp)) { return; @@ -66,7 +163,7 @@ static void riscv_harts_realize(DeviceState *dev, Error **errp) } } -static void riscv_harts_class_init(ObjectClass *klass, void *data) +static void riscv_harts_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index 3888034c2b9..3e7f4411727 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -23,9 +23,9 @@ #include "qemu/error-report.h" #include "hw/intc/sifive_plic.h" #include "hw/intc/riscv_aclint.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/qdev-properties.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/riscv/boot.h" static const struct MemmapEntry { @@ -45,6 +45,7 @@ static void shakti_c_machine_state_init(MachineState *mstate) { ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); MemoryRegion *system_memory = get_system_memory(); + hwaddr firmware_load_addr = shakti_c_memmap[SHAKTI_C_RAM].base; /* Initialize SoC */ object_initialize_child(OBJECT(mstate), "soc", &sms->soc, @@ -56,23 +57,21 @@ static void shakti_c_machine_state_init(MachineState *mstate) shakti_c_memmap[SHAKTI_C_RAM].base, mstate->ram); + if (mstate->firmware) { + riscv_load_firmware(mstate->firmware, &firmware_load_addr, NULL); + } + /* ROM reset vector */ - riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, - shakti_c_memmap[SHAKTI_C_RAM].base, + riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, firmware_load_addr, shakti_c_memmap[SHAKTI_C_ROM].base, shakti_c_memmap[SHAKTI_C_ROM].size, 0, 0); - if (mstate->firmware) { - riscv_load_firmware(mstate->firmware, - shakti_c_memmap[SHAKTI_C_RAM].base, - NULL); - } } static void shakti_c_machine_instance_init(Object *obj) { } -static void shakti_c_machine_class_init(ObjectClass *klass, void *data) +static void shakti_c_machine_class_init(ObjectClass *klass, const void *data) { MachineClass *mc = MACHINE_CLASS(klass); static const char * const valid_cpu_types[] = { @@ -143,7 +142,7 @@ static void shakti_c_soc_state_realize(DeviceState *dev, Error **errp) shakti_c_memmap[SHAKTI_C_ROM].base, &sss->rom); } -static void shakti_c_soc_class_init(ObjectClass *klass, void *data) +static void shakti_c_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = shakti_c_soc_state_realize; diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c index 87d9602383a..7baed1958e0 100644 --- a/hw/riscv/sifive_e.c +++ b/hw/riscv/sifive_e.c @@ -35,7 +35,6 @@ #include "hw/boards.h" #include "hw/loader.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" #include "hw/misc/unimp.h" #include "target/riscv/cpu.h" #include "hw/riscv/riscv_hart.h" @@ -47,7 +46,7 @@ #include "hw/misc/sifive_e_prci.h" #include "hw/misc/sifive_e_aon.h" #include "chardev/char.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static const MemMapEntry sifive_e_memmap[] = { [SIFIVE_E_DEV_DEBUG] = { 0x0, 0x1000 }, @@ -79,6 +78,7 @@ static void sifive_e_machine_init(MachineState *machine) SiFiveEState *s = RISCV_E_MACHINE(machine); MemoryRegion *sys_mem = get_system_memory(); int i; + RISCVBootInfo boot_info; if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); @@ -114,8 +114,9 @@ static void sifive_e_machine_init(MachineState *machine) rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory); + riscv_boot_info_init(&boot_info, &s->soc.cpus); if (machine->kernel_filename) { - riscv_load_kernel(machine, &s->soc.cpus, + riscv_load_kernel(machine, &boot_info, memmap[SIFIVE_E_DEV_DTIM].base, false, NULL); } @@ -142,7 +143,7 @@ static void sifive_e_machine_instance_init(Object *obj) s->revb = false; } -static void sifive_e_machine_class_init(ObjectClass *oc, void *data) +static void sifive_e_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -283,7 +284,7 @@ static void sifive_e_soc_realize(DeviceState *dev, Error **errp) &s->xip_mem); } -static void sifive_e_soc_class_init(ObjectClass *oc, void *data) +static void sifive_e_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index af5f923f541..d69f942cfbe 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -43,7 +43,6 @@ #include "hw/irq.h" #include "hw/loader.h" #include "hw/sysbus.h" -#include "hw/char/serial.h" #include "hw/cpu/cluster.h" #include "hw/misc/unimp.h" #include "hw/sd/sd.h" @@ -57,9 +56,9 @@ #include "hw/intc/sifive_plic.h" #include "chardev/char.h" #include "net/eth.h" -#include "sysemu/device_tree.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/device_tree.h" +#include "system/runstate.h" +#include "system/system.h" #include @@ -515,17 +514,19 @@ static void sifive_u_machine_init(MachineState *machine) SiFiveUState *s = RISCV_U_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *flash0 = g_new(MemoryRegion, 1); - target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base; + hwaddr start_addr = memmap[SIFIVE_U_DEV_DRAM].base; target_ulong firmware_end_addr, kernel_start_addr; const char *firmware_name; uint32_t start_addr_hi32 = 0x00000000; + uint32_t fdt_load_addr_hi32 = 0x00000000; int i; - uint32_t fdt_load_addr; + uint64_t fdt_load_addr; uint64_t kernel_entry; DriveInfo *dinfo; BlockBackend *blk; DeviceState *flash_dev, *sd_dev, *card_dev; qemu_irq flash_cs, sd_cs; + RISCVBootInfo boot_info; /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_U_SOC); @@ -589,14 +590,15 @@ static void sifive_u_machine_init(MachineState *machine) firmware_name = riscv_default_firmware_name(&s->soc.u_cpus); firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name, - start_addr, NULL); + &start_addr, NULL); + riscv_boot_info_init(&boot_info, &s->soc.u_cpus); if (machine->kernel_filename) { - kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus, + kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info, firmware_end_addr); - - kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus, - kernel_start_addr, true, NULL); + riscv_load_kernel(machine, &boot_info, kernel_start_addr, + true, NULL); + kernel_entry = boot_info.image_low_addr; } else { /* * If dynamic firmware is used, it doesn't know where is the next mode @@ -607,11 +609,12 @@ static void sifive_u_machine_init(MachineState *machine) fdt_load_addr = riscv_compute_fdt_addr(memmap[SIFIVE_U_DEV_DRAM].base, memmap[SIFIVE_U_DEV_DRAM].size, - machine); + machine, &boot_info); riscv_load_fdt(fdt_load_addr, machine->fdt); if (!riscv_is_32bit(&s->soc.u_cpus)) { start_addr_hi32 = (uint64_t)start_addr >> 32; + fdt_load_addr_hi32 = fdt_load_addr >> 32; } /* reset vector */ @@ -626,7 +629,7 @@ static void sifive_u_machine_init(MachineState *machine) start_addr, /* start: .dword */ start_addr_hi32, fdt_load_addr, /* fdt_laddr: .dword */ - 0x00000000, + fdt_load_addr_hi32, 0x00000000, /* fw_dyn: */ }; @@ -646,7 +649,8 @@ static void sifive_u_machine_init(MachineState *machine) rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), memmap[SIFIVE_U_DEV_MROM].base, &address_space_memory); - riscv_rom_copy_firmware_info(machine, memmap[SIFIVE_U_DEV_MROM].base, + riscv_rom_copy_firmware_info(machine, &s->soc.u_cpus, + memmap[SIFIVE_U_DEV_MROM].base, memmap[SIFIVE_U_DEV_MROM].size, sizeof(reset_vec), kernel_entry); @@ -709,7 +713,7 @@ static void sifive_u_machine_instance_init(Object *obj) object_property_set_description(obj, "serial", "Board serial number"); } -static void sifive_u_machine_class_init(ObjectClass *oc, void *data) +static void sifive_u_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -720,6 +724,7 @@ static void sifive_u_machine_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = SIFIVE_U_CPU; mc->default_cpus = mc->min_cpus; mc->default_ram_id = "riscv.sifive.u.ram"; + mc->auto_create_sdcard = true; object_class_property_add_bool(oc, "start-in-flash", sifive_u_machine_get_start_in_flash, @@ -936,13 +941,12 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_QSPI2_IRQ)); } -static Property sifive_u_soc_props[] = { +static const Property sifive_u_soc_props[] = { DEFINE_PROP_UINT32("serial", SiFiveUSoCState, serial, OTP_SERIAL), DEFINE_PROP_STRING("cpu-type", SiFiveUSoCState, cpu_type), - DEFINE_PROP_END_OF_LIST() }; -static void sifive_u_soc_class_init(ObjectClass *oc, void *data) +static void sifive_u_soc_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index 64074395bc5..641aae8c019 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -36,8 +36,8 @@ #include "hw/char/riscv_htif.h" #include "hw/intc/riscv_aclint.h" #include "chardev/char.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" +#include "system/device_tree.h" +#include "system/system.h" #include @@ -198,13 +198,15 @@ static void spike_board_init(MachineState *machine) MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); target_ulong firmware_end_addr = memmap[SPIKE_DRAM].base; + hwaddr firmware_load_addr = memmap[SPIKE_DRAM].base; target_ulong kernel_start_addr; char *firmware_name; - uint32_t fdt_load_addr; + uint64_t fdt_load_addr; uint64_t kernel_entry; char *soc_name; int i, base_hartid, hart_count; bool htif_custom_base = false; + RISCVBootInfo boot_info; /* Check socket count limit */ if (SPIKE_SOCKETS_MAX < riscv_socket_count(machine)) { @@ -290,7 +292,7 @@ static void spike_board_init(MachineState *machine) /* Load firmware */ if (firmware_name) { firmware_end_addr = riscv_load_firmware(firmware_name, - memmap[SPIKE_DRAM].base, + &firmware_load_addr, htif_symbol_callback); g_free(firmware_name); } @@ -299,13 +301,14 @@ static void spike_board_init(MachineState *machine) create_fdt(s, memmap, riscv_is_32bit(&s->soc[0]), htif_custom_base); /* Load kernel */ + riscv_boot_info_init(&boot_info, &s->soc[0]); if (machine->kernel_filename) { - kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], + kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info, firmware_end_addr); - kernel_entry = riscv_load_kernel(machine, &s->soc[0], - kernel_start_addr, - true, htif_symbol_callback); + riscv_load_kernel(machine, &boot_info, kernel_start_addr, + true, htif_symbol_callback); + kernel_entry = boot_info.image_low_addr; } else { /* * If dynamic firmware is used, it doesn't know where is the next mode @@ -316,11 +319,11 @@ static void spike_board_init(MachineState *machine) fdt_load_addr = riscv_compute_fdt_addr(memmap[SPIKE_DRAM].base, memmap[SPIKE_DRAM].size, - machine); + machine, &boot_info); riscv_load_fdt(fdt_load_addr, machine->fdt); /* load the reset vector */ - riscv_setup_rom_reset_vec(machine, &s->soc[0], memmap[SPIKE_DRAM].base, + riscv_setup_rom_reset_vec(machine, &s->soc[0], firmware_load_addr, memmap[SPIKE_MROM].base, memmap[SPIKE_MROM].size, kernel_entry, fdt_load_addr); @@ -339,7 +342,7 @@ static void spike_machine_instance_init(Object *obj) { } -static void spike_machine_class_init(ObjectClass *oc, void *data) +static void spike_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/riscv/trace-events b/hw/riscv/trace-events new file mode 100644 index 00000000000..b50b14a6542 --- /dev/null +++ b/hw/riscv/trace-events @@ -0,0 +1,26 @@ +# See documentation at docs/devel/tracing.rst + +# riscv-iommu.c +riscv_iommu_new(const char *id, unsigned b, unsigned d, unsigned f) "%s: device attached %04x:%02x.%d" +riscv_iommu_flt(const char *id, unsigned b, unsigned d, unsigned f, uint64_t reason, uint64_t iova) "%s: fault %04x:%02x.%u reason: 0x%"PRIx64" iova: 0x%"PRIx64 +riscv_iommu_pri(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova) "%s: page request %04x:%02x.%u iova: 0x%"PRIx64 +riscv_iommu_dma(const char *id, unsigned b, unsigned d, unsigned f, unsigned pasid, const char *dir, uint64_t iova, uint64_t phys) "%s: translate %04x:%02x.%u #%u %s 0x%"PRIx64" -> 0x%"PRIx64 +riscv_iommu_msi(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova, uint64_t phys) "%s: translate %04x:%02x.%u MSI 0x%"PRIx64" -> 0x%"PRIx64 +riscv_iommu_mrif_notification(const char *id, uint32_t nid, uint64_t phys) "%s: sent MRIF notification 0x%x to 0x%"PRIx64 +riscv_iommu_cmd(const char *id, uint64_t l, uint64_t u) "%s: command 0x%"PRIx64" 0x%"PRIx64 +riscv_iommu_notifier_add(const char *id) "%s: dev-iotlb notifier added" +riscv_iommu_notifier_del(const char *id) "%s: dev-iotlb notifier removed" +riscv_iommu_notify_int_vector(uint32_t cause, uint32_t vector) "Interrupt cause 0x%x sent via vector 0x%x" +riscv_iommu_icvec_write(uint32_t orig, uint32_t actual) "ICVEC write: incoming 0x%x actual 0x%x" +riscv_iommu_ats(const char *id, unsigned b, unsigned d, unsigned f, uint64_t iova) "%s: translate request %04x:%02x.%u iova: 0x%"PRIx64 +riscv_iommu_ats_inval(const char *id) "%s: dev-iotlb invalidate" +riscv_iommu_ats_prgr(const char *id) "%s: dev-iotlb page request group response" +riscv_iommu_sys_irq_sent(uint32_t vector) "IRQ sent to vector %u" +riscv_iommu_sys_msi_sent(uint32_t vector, uint64_t msi_addr, uint32_t msi_data, uint32_t result) "MSI sent to vector %u msi_addr 0x%"PRIx64" msi_data 0x%x result %u" +riscv_iommu_sys_reset_hold(int reset_type) "reset type %d" +riscv_iommu_pci_reset_hold(int reset_type) "reset type %d" +riscv_iommu_hpm_read(uint64_t cycle, uint32_t inhibit, uint64_t ctr_prev, uint64_t ctr_val) "cycle 0x%"PRIx64" inhibit 0x%x ctr_prev 0x%"PRIx64" ctr_val 0x%"PRIx64 +riscv_iommu_hpm_incr_ctr(uint64_t cntr_val) "cntr_val 0x%"PRIx64 +riscv_iommu_hpm_iocntinh_cy(bool prev_cy_inh) "prev_cy_inh %d" +riscv_iommu_hpm_cycle_write(uint32_t ovf, uint64_t val) "ovf 0x%x val 0x%"PRIx64 +riscv_iommu_hpm_evt_write(uint32_t ctr_idx, uint32_t ovf, uint64_t val) "ctr_idx 0x%x ovf 0x%x val 0x%"PRIx64 diff --git a/hw/riscv/trace.h b/hw/riscv/trace.h new file mode 100644 index 00000000000..8c0e3ca1f3d --- /dev/null +++ b/hw/riscv/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_riscv.h" diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c index 36d6a3a412f..f1406cb6833 100644 --- a/hw/riscv/virt-acpi-build.c +++ b/hw/riscv/virt-acpi-build.c @@ -38,7 +38,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/reset.h" +#include "system/reset.h" #define ACPI_BUILD_TABLE_SIZE 0x20000 #define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index)) @@ -198,16 +198,43 @@ acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, aml_append(scope, dev); } +/* + * Add DSDT entry for the IOMMU platform device. + * ACPI ID for IOMMU is defined in the section 6.2 of RISC-V BRS spec. + * https://github.com/riscv-non-isa/riscv-brs/releases/download/v0.8/riscv-brs-spec.pdf + */ +static void acpi_dsdt_add_iommu_sys(Aml *scope, const MemMapEntry *iommu_memmap, + uint32_t iommu_irq) +{ + uint32_t i; + + Aml *dev = aml_device("IMU0"); + aml_append(dev, aml_name_decl("_HID", aml_string("RSCV0004"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(iommu_memmap->base, + iommu_memmap->size, AML_READ_WRITE)); + for (i = iommu_irq; i < iommu_irq + 4; i++) { + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_EDGE, AML_ACTIVE_LOW, + AML_EXCLUSIVE, &i, 1)); + } + + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + /* * Serial Port Console Redirection Table (SPCR) - * Rev: 1.07 + * Rev: 1.10 */ static void spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s) { + const char name[] = "."; AcpiSpcrData serial = { - .interface_type = 0, /* 16550 compatible */ + .interface_type = 0x12, /* 16550 compatible */ .base_addr.id = AML_AS_SYSTEM_MEMORY, .base_addr.width = 32, .base_addr.offset = 0, @@ -229,20 +256,22 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s) .pci_function = 0, .pci_flags = 0, .pci_segment = 0, + .uart_clk_freq = 0, + .precise_baudrate = 0, + .namespace_string_length = sizeof(name), + .namespace_string_offset = 88, }; - build_spcr(table_data, linker, &serial, 2, s->oem_id, s->oem_table_id); + build_spcr(table_data, linker, &serial, 4, s->oem_id, s->oem_table_id, + name); } /* RHCT Node[N] starts at offset 56 */ #define RHCT_NODE_ARRAY_OFFSET 56 /* - * ACPI spec, Revision 6.5+ - * 5.2.36 RISC-V Hart Capabilities Table (RHCT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 - * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view - * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view + * ACPI spec, Revision 6.6 + * 5.2.37 RISC-V Hart Capabilities Table (RHCT) */ static void build_rhct(GArray *table_data, BIOSLinker *linker, @@ -255,7 +284,7 @@ static void build_rhct(GArray *table_data, uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0; RISCVCPU *cpu = &s->soc[0].harts[0]; uint32_t mmu_offset = 0; - uint8_t satp_mode_max; + bool rv32 = riscv_cpu_is_32bit(cpu); g_autofree char *isa = NULL; AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, @@ -275,7 +304,7 @@ static void build_rhct(GArray *table_data, num_rhct_nodes++; } - if (cpu->cfg.satp_mode.supported != 0) { + if (!rv32 && cpu->cfg.max_satp_mode >= VM_1_10_SV39) { num_rhct_nodes++; } @@ -335,22 +364,21 @@ static void build_rhct(GArray *table_data, } /* MMU node structure */ - if (cpu->cfg.satp_mode.supported != 0) { - satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + if (!rv32 && cpu->cfg.max_satp_mode >= VM_1_10_SV39) { mmu_offset = table_data->len - table.table_offset; build_append_int_noprefix(table_data, 2, 2); /* Type */ build_append_int_noprefix(table_data, 8, 2); /* Length */ build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ build_append_int_noprefix(table_data, 0, 1); /* Reserved */ /* MMU Type */ - if (satp_mode_max == VM_1_10_SV57) { + if (cpu->cfg.max_satp_mode == VM_1_10_SV57) { build_append_int_noprefix(table_data, 2, 1); /* Sv57 */ - } else if (satp_mode_max == VM_1_10_SV48) { + } else if (cpu->cfg.max_satp_mode == VM_1_10_SV48) { build_append_int_noprefix(table_data, 1, 1); /* Sv48 */ - } else if (satp_mode_max == VM_1_10_SV39) { + } else if (cpu->cfg.max_satp_mode == VM_1_10_SV39) { build_append_int_noprefix(table_data, 0, 1); /* Sv39 */ } else { - assert(1); + g_assert_not_reached(); } } @@ -390,7 +418,10 @@ static void build_rhct(GArray *table_data, acpi_table_end(linker, &table); } -/* FADT */ +/* + * ACPI spec, Revision 6.6 + * 5.2.9 Fixed ACPI Description Table (MADT) + */ static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s, @@ -398,7 +429,7 @@ static void build_fadt_rev6(GArray *table_data, { AcpiFadtData fadt = { .rev = 6, - .minor_ver = 5, + .minor_ver = 6, .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, .xdsdt_tbl_offset = &dsdt_tbl_offset, }; @@ -444,6 +475,9 @@ static void build_dsdt(GArray *table_data, } acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ); + if (virt_is_iommu_sys_enabled(s)) { + acpi_dsdt_add_iommu_sys(scope, &memmap[VIRT_IOMMU_SYS], IOMMU_SYS_IRQ); + } if (socket_count == 1) { virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, @@ -474,11 +508,8 @@ static void build_dsdt(GArray *table_data, } /* - * ACPI spec, Revision 6.5+ + * ACPI spec, Revision 6.6 * 5.2.12 Multiple APIC Description Table (MADT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 - * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view - * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view */ static void build_madt(GArray *table_data, BIOSLinker *linker, @@ -503,7 +534,7 @@ static void build_madt(GArray *table_data, hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket); - AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, + AcpiTable table = { .sig = "APIC", .rev = 7, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; acpi_table_begin(&table, table_data); @@ -596,11 +627,190 @@ static void build_madt(GArray *table_data, acpi_table_end(linker, &table); } +#define ID_MAPPING_ENTRY_SIZE 20 +#define IOMMU_ENTRY_SIZE 40 +#define RISCV_INTERRUPT_WIRE_OFFSSET 40 +#define ROOT_COMPLEX_ENTRY_SIZE 20 +#define RIMT_NODE_OFFSET 48 + /* - * ACPI spec, Revision 6.5+ + * ID Mapping Structure + */ +static void build_rimt_id_mapping(GArray *table_data, uint32_t source_id_base, + uint32_t num_ids, uint32_t dest_id_base) +{ + /* Source ID Base */ + build_append_int_noprefix(table_data, source_id_base, 4); + /* Number of IDs */ + build_append_int_noprefix(table_data, num_ids, 4); + /* Destination Device ID Base */ + build_append_int_noprefix(table_data, source_id_base, 4); + /* Destination IOMMU Offset */ + build_append_int_noprefix(table_data, dest_id_base, 4); + /* Flags */ + build_append_int_noprefix(table_data, 0, 4); +} + +struct AcpiRimtIdMapping { + uint32_t source_id_base; + uint32_t num_ids; +}; +typedef struct AcpiRimtIdMapping AcpiRimtIdMapping; + +/* Build the rimt ID mapping to IOMMU for a given PCI host bridge */ +static int rimt_host_bridges(Object *obj, void *opaque) +{ + GArray *idmap_blob = opaque; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + int min_bus, max_bus; + + pci_bus_range(bus, &min_bus, &max_bus); + + AcpiRimtIdMapping idmap = { + .source_id_base = min_bus << 8, + .num_ids = (max_bus - min_bus + 1) << 8, + }; + g_array_append_val(idmap_blob, idmap); + } + } + + return 0; +} + +static int rimt_idmap_compare(gconstpointer a, gconstpointer b) +{ + AcpiRimtIdMapping *idmap_a = (AcpiRimtIdMapping *)a; + AcpiRimtIdMapping *idmap_b = (AcpiRimtIdMapping *)b; + + return idmap_a->source_id_base - idmap_b->source_id_base; +} + +/* + * RISC-V IO Mapping Table (RIMT) + * https://github.com/riscv-non-isa/riscv-acpi-rimt/releases/download/v0.99/rimt-spec.pdf + */ +static void build_rimt(GArray *table_data, BIOSLinker *linker, + RISCVVirtState *s) +{ + int i, nb_nodes, rc_mapping_count; + size_t node_size, iommu_offset = 0; + uint32_t id = 0; + g_autoptr(GArray) iommu_idmaps = g_array_new(false, true, + sizeof(AcpiRimtIdMapping)); + + AcpiTable table = { .sig = "RIMT", .rev = 1, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + acpi_table_begin(&table, table_data); + + object_child_foreach_recursive(object_get_root(), + rimt_host_bridges, iommu_idmaps); + + /* Sort the ID mapping by Source ID Base*/ + g_array_sort(iommu_idmaps, rimt_idmap_compare); + + nb_nodes = 2; /* RC, IOMMU */ + rc_mapping_count = iommu_idmaps->len; + /* Number of RIMT Nodes */ + build_append_int_noprefix(table_data, nb_nodes, 4); + + /* Offset to Array of RIMT Nodes */ + build_append_int_noprefix(table_data, RIMT_NODE_OFFSET, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + + iommu_offset = table_data->len - table.table_offset; + /* IOMMU Device Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type - IOMMU*/ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + node_size = IOMMU_ENTRY_SIZE; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, id++, 2); /* ID */ + if (virt_is_iommu_sys_enabled(s)) { + /* Hardware ID */ + build_append_int_noprefix(table_data, 'R', 1); + build_append_int_noprefix(table_data, 'S', 1); + build_append_int_noprefix(table_data, 'C', 1); + build_append_int_noprefix(table_data, 'V', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '4', 1); + /* Base Address */ + build_append_int_noprefix(table_data, + s->memmap[VIRT_IOMMU_SYS].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + } else { + /* Hardware ID */ + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '1', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '0', 1); + build_append_int_noprefix(table_data, '1', 1); + build_append_int_noprefix(table_data, '4', 1); + + build_append_int_noprefix(table_data, 0, 8); /* Base Address */ + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + } + + build_append_int_noprefix(table_data, 0, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, 0, 2); /* PCI Segment number */ + /* PCIe B/D/F */ + if (virt_is_iommu_sys_enabled(s)) { + build_append_int_noprefix(table_data, 0, 2); + } else { + build_append_int_noprefix(table_data, s->pci_iommu_bdf, 2); + } + /* Number of interrupt wires */ + build_append_int_noprefix(table_data, 0, 2); + /* Interrupt wire array offset */ + build_append_int_noprefix(table_data, RISCV_INTERRUPT_WIRE_OFFSSET, 2); + + /* PCIe Root Complex Node */ + build_append_int_noprefix(table_data, 1, 1); /* Type */ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + node_size = ROOT_COMPLEX_ENTRY_SIZE + + ID_MAPPING_ENTRY_SIZE * rc_mapping_count; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, id++, 2); /* ID */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* PCI Segment number */ + build_append_int_noprefix(table_data, 0, 2); + /* ID mapping array offset */ + build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 2); + /* Number of ID mappings */ + build_append_int_noprefix(table_data, rc_mapping_count, 2); + + /* Output Reference */ + AcpiRimtIdMapping *range; + + /* ID mapping array */ + for (i = 0; i < iommu_idmaps->len; i++) { + range = &g_array_index(iommu_idmaps, AcpiRimtIdMapping, i); + if (virt_is_iommu_sys_enabled(s)) { + range->source_id_base = 0; + } else { + range->source_id_base = s->pci_iommu_bdf + 1; + } + range->num_ids = 0xffff - s->pci_iommu_bdf; + build_rimt_id_mapping(table_data, range->source_id_base, + range->num_ids, iommu_offset); + } + + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 6.6 * 5.2.16 System Resource Affinity Table (SRAT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/25 - * https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view */ static void build_srat(GArray *table_data, BIOSLinker *linker, RISCVVirtState *vms) @@ -673,8 +883,16 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) acpi_add_table(table_offsets, tables_blob); build_rhct(tables_blob, tables->linker, s); + if (virt_is_iommu_sys_enabled(s) || s->pci_iommu_bdf) { + acpi_add_table(table_offsets, tables_blob); + build_rimt(tables_blob, tables->linker, s); + } + acpi_add_table(table_offsets, tables_blob); - spcr_setup(tables_blob, tables->linker, s); + + if (ms->acpi_spcr_enabled) { + spcr_setup(tables_blob, tables->linker, s); + } acpi_add_table(table_offsets, tables_blob); { diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 9981e0f6c9b..47e573f85ab 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -27,11 +27,13 @@ #include "hw/loader.h" #include "hw/sysbus.h" #include "hw/qdev-properties.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "target/riscv/cpu.h" #include "hw/core/sysbus-fdt.h" #include "target/riscv/pmu.h" #include "hw/riscv/riscv_hart.h" +#include "hw/riscv/iommu.h" +#include "hw/riscv/riscv-iommu-bits.h" #include "hw/riscv/virt.h" #include "hw/riscv/boot.h" #include "hw/riscv/numa.h" @@ -43,23 +45,33 @@ #include "hw/misc/sifive_test.h" #include "hw/platform-bus.h" #include "chardev/char.h" -#include "sysemu/device_tree.h" -#include "sysemu/sysemu.h" -#include "sysemu/tcg.h" -#include "sysemu/kvm.h" -#include "sysemu/tpm.h" -#include "sysemu/qtest.h" +#include "system/device_tree.h" +#include "system/system.h" +#include "system/tcg.h" +#include "system/kvm.h" +#include "system/tpm.h" +#include "system/qtest.h" #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" #include "hw/display/ramfb.h" #include "hw/acpi/aml-build.h" #include "qapi/qapi-visit-common.h" #include "hw/virtio/virtio-iommu.h" +#include "hw/uefi/var-service-api.h" /* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */ -static bool virt_use_kvm_aia(RISCVVirtState *s) +static bool virt_use_kvm_aia_aplic_imsic(RISCVVirtAIAType aia_type) { - return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC; + bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC; + + return riscv_is_kvm_aia_aplic_imsic(msimode); +} + +static bool virt_use_emulated_aplic(RISCVVirtAIAType aia_type) +{ + bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC; + + return riscv_use_emulated_aplic(msimode); } static bool virt_aclint_allowed(void) @@ -75,6 +87,7 @@ static const MemMapEntry virt_memmap[] = { [VIRT_CLINT] = { 0x2000000, 0x10000 }, [VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 }, [VIRT_PCIE_PIO] = { 0x3000000, 0x10000 }, + [VIRT_IOMMU_SYS] = { 0x3010000, 0x1000 }, [VIRT_PLATFORM_BUS] = { 0x4000000, 0x2000000 }, [VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) }, [VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) }, @@ -153,8 +166,8 @@ static void virt_flash_map1(PFlashCFI01 *flash, static void virt_flash_map(RISCVVirtState *s, MemoryRegion *sysmem) { - hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; - hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = s->memmap[VIRT_FLASH].base; virt_flash_map1(s->flash[0], flashbase, flashsize, sysmem); @@ -167,7 +180,7 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, { int pin, dev; uint32_t irq_map_stride = 0; - uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * + uint32_t full_irq_map[PCI_NUM_PINS * PCI_NUM_PINS * FDT_MAX_INT_MAP_WIDTH] = {}; uint32_t *irq_map = full_irq_map; @@ -179,11 +192,11 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, * possible slot) seeing the interrupt-map-mask will allow the table * to wrap to any number of devices. */ - for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { + for (dev = 0; dev < PCI_NUM_PINS; dev++) { int devfn = dev * 0x8; - for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { - int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); + for (pin = 0; pin < PCI_NUM_PINS; pin++) { + int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); int i = 0; /* Fill PCI address cells */ @@ -209,7 +222,7 @@ static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename, } qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map, - GPEX_NUM_IRQS * GPEX_NUM_IRQS * + PCI_NUM_PINS * PCI_NUM_PINS * irq_map_stride * sizeof(uint32_t)); qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", @@ -224,10 +237,10 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, uint32_t cpu_phandle; MachineState *ms = MACHINE(s); bool is_32_bit = riscv_is_32bit(&s->soc[0]); - uint8_t satp_mode_max; for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu]; + int8_t satp_mode_max = cpu_ptr->cfg.max_satp_mode; g_autofree char *cpu_name = NULL; g_autofree char *core_name = NULL; g_autofree char *intc_name = NULL; @@ -239,8 +252,7 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, s->soc[socket].hartid_base + cpu); qemu_fdt_add_subnode(ms->fdt, cpu_name); - if (cpu_ptr->cfg.satp_mode.supported != 0) { - satp_mode_max = satp_mode_max_from_map(cpu_ptr->cfg.satp_mode.map); + if (satp_mode_max != -1) { sv_name = g_strdup_printf("riscv,%s", satp_mode_str(satp_mode_max, is_32_bit)); qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name); @@ -288,31 +300,30 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, } } -static void create_fdt_socket_memory(RISCVVirtState *s, - const MemMapEntry *memmap, int socket) +static void create_fdt_socket_memory(RISCVVirtState *s, int socket) { g_autofree char *mem_name = NULL; - uint64_t addr, size; + hwaddr addr; + uint64_t size; MachineState *ms = MACHINE(s); - addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket); + addr = s->memmap[VIRT_DRAM].base + riscv_socket_mem_offset(ms, socket); size = riscv_socket_mem_size(ms, socket); - mem_name = g_strdup_printf("/memory@%lx", (long)addr); + mem_name = g_strdup_printf("/memory@%"HWADDR_PRIx, addr); qemu_fdt_add_subnode(ms->fdt, mem_name); - qemu_fdt_setprop_cells(ms->fdt, mem_name, "reg", - addr >> 32, addr, size >> 32, size); + qemu_fdt_setprop_sized_cells(ms->fdt, mem_name, "reg", 2, addr, 2, size); qemu_fdt_setprop_string(ms->fdt, mem_name, "device_type", "memory"); riscv_socket_fdt_write_id(ms, mem_name, socket); } static void create_fdt_socket_clint(RISCVVirtState *s, - const MemMapEntry *memmap, int socket, + int socket, uint32_t *intc_phandles) { int cpu; g_autofree char *clint_name = NULL; g_autofree uint32_t *clint_cells = NULL; - unsigned long clint_addr; + hwaddr clint_addr; MachineState *ms = MACHINE(s); static const char * const clint_compat[2] = { "sifive,clint0", "riscv,clint0" @@ -327,21 +338,22 @@ static void create_fdt_socket_clint(RISCVVirtState *s, clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); } - clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); - clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr); + clint_addr = s->memmap[VIRT_CLINT].base + + s->memmap[VIRT_CLINT].size * socket; + clint_name = g_strdup_printf("/soc/clint@%"HWADDR_PRIx, clint_addr); qemu_fdt_add_subnode(ms->fdt, clint_name); qemu_fdt_setprop_string_array(ms->fdt, clint_name, "compatible", (char **)&clint_compat, ARRAY_SIZE(clint_compat)); - qemu_fdt_setprop_cells(ms->fdt, clint_name, "reg", - 0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size); + qemu_fdt_setprop_sized_cells(ms->fdt, clint_name, "reg", + 2, clint_addr, 2, s->memmap[VIRT_CLINT].size); qemu_fdt_setprop(ms->fdt, clint_name, "interrupts-extended", clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); riscv_socket_fdt_write_id(ms, clint_name, socket); } static void create_fdt_socket_aclint(RISCVVirtState *s, - const MemMapEntry *memmap, int socket, + int socket, uint32_t *intc_phandles) { int cpu; @@ -368,13 +380,15 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2; if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) { - addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); + addr = s->memmap[VIRT_CLINT].base + + (s->memmap[VIRT_CLINT].size * socket); name = g_strdup_printf("/soc/mswi@%lx", addr); + qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "riscv,aclint-mswi"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, addr, 2, RISCV_ACLINT_SWI_SIZE); qemu_fdt_setprop(ms->fdt, name, "interrupts-extended", aclint_mswi_cells, aclint_cells_size); qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0); @@ -384,37 +398,38 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, } if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { - addr = memmap[VIRT_CLINT].base + + addr = s->memmap[VIRT_CLINT].base + (RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket); size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE; } else { - addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE + - (memmap[VIRT_CLINT].size * socket); - size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE; + addr = s->memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE + + (s->memmap[VIRT_CLINT].size * socket); + size = s->memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE; } name = g_strdup_printf("/soc/mtimer@%lx", addr); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "riscv,aclint-mtimer"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, addr + RISCV_ACLINT_DEFAULT_MTIME, - 0x0, size - RISCV_ACLINT_DEFAULT_MTIME, - 0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP, - 0x0, RISCV_ACLINT_DEFAULT_MTIME); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, addr + RISCV_ACLINT_DEFAULT_MTIME, + 2, size - RISCV_ACLINT_DEFAULT_MTIME, + 2, addr + RISCV_ACLINT_DEFAULT_MTIMECMP, + 2, RISCV_ACLINT_DEFAULT_MTIME); qemu_fdt_setprop(ms->fdt, name, "interrupts-extended", aclint_mtimer_cells, aclint_cells_size); riscv_socket_fdt_write_id(ms, name, socket); g_free(name); if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) { - addr = memmap[VIRT_ACLINT_SSWI].base + - (memmap[VIRT_ACLINT_SSWI].size * socket); + addr = s->memmap[VIRT_ACLINT_SSWI].base + + (s->memmap[VIRT_ACLINT_SSWI].size * socket); + name = g_strdup_printf("/soc/sswi@%lx", addr); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "riscv,aclint-sswi"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, addr, 2, s->memmap[VIRT_ACLINT_SSWI].size); qemu_fdt_setprop(ms->fdt, name, "interrupts-extended", aclint_sswi_cells, aclint_cells_size); qemu_fdt_setprop(ms->fdt, name, "interrupt-controller", NULL, 0); @@ -425,7 +440,7 @@ static void create_fdt_socket_aclint(RISCVVirtState *s, } static void create_fdt_socket_plic(RISCVVirtState *s, - const MemMapEntry *memmap, int socket, + int socket, uint32_t *phandle, uint32_t *intc_phandles, uint32_t *plic_phandles) { @@ -439,7 +454,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s, }; plic_phandles[socket] = (*phandle)++; - plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); + plic_addr = s->memmap[VIRT_PLIC].base + + (s->memmap[VIRT_PLIC].size * socket); plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); qemu_fdt_add_subnode(ms->fdt, plic_name); qemu_fdt_setprop_cell(ms->fdt, plic_name, @@ -477,8 +493,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s, s->soc[socket].num_harts * sizeof(uint32_t) * 4); } - qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg", - 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); + qemu_fdt_setprop_sized_cells(ms->fdt, plic_name, "reg", + 2, plic_addr, 2, s->memmap[VIRT_PLIC].size); qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev", VIRT_IRQCHIP_NUM_SOURCES - 1); riscv_socket_fdt_write_id(ms, plic_name, socket); @@ -487,8 +503,8 @@ static void create_fdt_socket_plic(RISCVVirtState *s, if (!socket) { platform_bus_add_all_fdt_nodes(ms->fdt, plic_name, - memmap[VIRT_PLATFORM_BUS].base, - memmap[VIRT_PLATFORM_BUS].size, + s->memmap[VIRT_PLATFORM_BUS].base, + s->memmap[VIRT_PLATFORM_BUS].size, VIRT_PLATFORM_BUS_IRQ); } } @@ -552,7 +568,6 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, FDT_IMSIC_INT_CELLS); qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0); qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0); - qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#msi-cells", 0); qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended", imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2); qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs, @@ -576,7 +591,7 @@ static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr, qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle); } -static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap, +static void create_fdt_imsic(RISCVVirtState *s, uint32_t *phandle, uint32_t *intc_phandles, uint32_t *msi_m_phandle, uint32_t *msi_s_phandle) { @@ -585,12 +600,12 @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap, if (!kvm_enabled()) { /* M-level IMSIC node */ - create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles, + create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_M].base, intc_phandles, *msi_m_phandle, true, 0); } /* S-level IMSIC node */ - create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles, + create_fdt_one_imsic(s, s->memmap[VIRT_IMSIC_S].base, intc_phandles, *msi_s_phandle, false, imsic_num_bits(s->aia_guests + 1)); @@ -640,8 +655,8 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle); } - qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg", - 0x0, aplic_addr, 0x0, aplic_size); + qemu_fdt_setprop_sized_cells(ms->fdt, aplic_name, "reg", + 2, aplic_addr, 2, aplic_size); qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources", VIRT_IRQCHIP_NUM_SOURCES); @@ -667,7 +682,7 @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket, } static void create_fdt_socket_aplic(RISCVVirtState *s, - const MemMapEntry *memmap, int socket, + int socket, uint32_t msi_m_phandle, uint32_t msi_s_phandle, uint32_t *phandle, @@ -684,18 +699,19 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, if (!kvm_enabled()) { /* M-level APLIC node */ - aplic_addr = memmap[VIRT_APLIC_M].base + - (memmap[VIRT_APLIC_M].size * socket); - create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size, + aplic_addr = s->memmap[VIRT_APLIC_M].base + + (s->memmap[VIRT_APLIC_M].size * socket); + create_fdt_one_aplic(s, socket, aplic_addr, + s->memmap[VIRT_APLIC_M].size, msi_m_phandle, intc_phandles, aplic_m_phandle, aplic_s_phandle, true, num_harts); } /* S-level APLIC node */ - aplic_addr = memmap[VIRT_APLIC_S].base + - (memmap[VIRT_APLIC_S].size * socket); - create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size, + aplic_addr = s->memmap[VIRT_APLIC_S].base + + (s->memmap[VIRT_APLIC_S].size * socket); + create_fdt_one_aplic(s, socket, aplic_addr, s->memmap[VIRT_APLIC_S].size, msi_s_phandle, intc_phandles, aplic_s_phandle, 0, false, num_harts); @@ -703,8 +719,8 @@ static void create_fdt_socket_aplic(RISCVVirtState *s, if (!socket) { g_autofree char *aplic_name = fdt_get_aplic_nodename(aplic_addr); platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name, - memmap[VIRT_PLATFORM_BUS].base, - memmap[VIRT_PLATFORM_BUS].size, + s->memmap[VIRT_PLATFORM_BUS].base, + s->memmap[VIRT_PLATFORM_BUS].size, VIRT_PLATFORM_BUS_IRQ); } @@ -722,7 +738,7 @@ static void create_fdt_pmu(RISCVVirtState *s) riscv_pmu_generate_fdt_node(ms->fdt, hart.pmu_avail_ctrs, pmu_name); } -static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, +static void create_fdt_sockets(RISCVVirtState *s, uint32_t *phandle, uint32_t *irq_mmio_phandle, uint32_t *irq_pcie_phandle, @@ -739,7 +755,7 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, qemu_fdt_add_subnode(ms->fdt, "/cpus"); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency", kvm_enabled() ? - kvm_riscv_get_timebase_frequency(first_cpu) : + kvm_riscv_get_timebase_frequency(&s->soc->harts[0]) : RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1); @@ -758,53 +774,54 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, create_fdt_socket_cpus(s, socket, clust_name, phandle, &intc_phandles[phandle_pos]); - create_fdt_socket_memory(s, memmap, socket); + create_fdt_socket_memory(s, socket); if (virt_aclint_allowed() && s->have_aclint) { - create_fdt_socket_aclint(s, memmap, socket, + create_fdt_socket_aclint(s, socket, &intc_phandles[phandle_pos]); } else if (tcg_enabled()) { - create_fdt_socket_clint(s, memmap, socket, + create_fdt_socket_clint(s, socket, &intc_phandles[phandle_pos]); } } if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { - create_fdt_imsic(s, memmap, phandle, intc_phandles, - &msi_m_phandle, &msi_s_phandle); + create_fdt_imsic(s, phandle, intc_phandles, + &msi_m_phandle, &msi_s_phandle); *msi_pcie_phandle = msi_s_phandle; } - /* KVM AIA only has one APLIC instance */ - if (kvm_enabled() && virt_use_kvm_aia(s)) { - create_fdt_socket_aplic(s, memmap, 0, + /* + * With KVM AIA aplic-imsic, using an irqchip without split + * mode, we'll use only one APLIC instance. + */ + if (!virt_use_emulated_aplic(s->aia_type)) { + create_fdt_socket_aplic(s, 0, msi_m_phandle, msi_s_phandle, phandle, &intc_phandles[0], xplic_phandles, ms->smp.cpus); + + *irq_mmio_phandle = xplic_phandles[0]; + *irq_virtio_phandle = xplic_phandles[0]; + *irq_pcie_phandle = xplic_phandles[0]; } else { phandle_pos = ms->smp.cpus; for (socket = (socket_count - 1); socket >= 0; socket--) { phandle_pos -= s->soc[socket].num_harts; if (s->aia_type == VIRT_AIA_TYPE_NONE) { - create_fdt_socket_plic(s, memmap, socket, phandle, + create_fdt_socket_plic(s, socket, phandle, &intc_phandles[phandle_pos], xplic_phandles); } else { - create_fdt_socket_aplic(s, memmap, socket, + create_fdt_socket_aplic(s, socket, msi_m_phandle, msi_s_phandle, phandle, &intc_phandles[phandle_pos], xplic_phandles, s->soc[socket].num_harts); } } - } - if (kvm_enabled() && virt_use_kvm_aia(s)) { - *irq_mmio_phandle = xplic_phandles[0]; - *irq_virtio_phandle = xplic_phandles[0]; - *irq_pcie_phandle = xplic_phandles[0]; - } else { for (socket = 0; socket < socket_count; socket++) { if (socket == 0) { *irq_mmio_phandle = xplic_phandles[socket]; @@ -824,21 +841,22 @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, riscv_socket_fdt_write_distance_matrix(ms); } -static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, - uint32_t irq_virtio_phandle) +static void create_fdt_virtio(RISCVVirtState *s, uint32_t irq_virtio_phandle) { int i; MachineState *ms = MACHINE(s); + hwaddr virtio_base = s->memmap[VIRT_VIRTIO].base; for (i = 0; i < VIRTIO_COUNT; i++) { - g_autofree char *name = g_strdup_printf("/soc/virtio_mmio@%lx", - (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size)); + g_autofree char *name = NULL; + uint64_t size = s->memmap[VIRT_VIRTIO].size; + hwaddr addr = virtio_base + i * size; + + name = g_strdup_printf("/soc/virtio_mmio@%"HWADDR_PRIx, addr); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "virtio,mmio"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, - 0x0, memmap[VIRT_VIRTIO].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", 2, addr, 2, size); qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_virtio_phandle); if (s->aia_type == VIRT_AIA_TYPE_NONE) { @@ -851,15 +869,16 @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, } } -static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, +static void create_fdt_pcie(RISCVVirtState *s, uint32_t irq_pcie_phandle, - uint32_t msi_pcie_phandle) + uint32_t msi_pcie_phandle, + uint32_t iommu_sys_phandle) { g_autofree char *name = NULL; MachineState *ms = MACHINE(s); - name = g_strdup_printf("/soc/pci@%lx", - (long) memmap[VIRT_PCIE_ECAM].base); + name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx, + s->memmap[VIRT_PCIE_ECAM].base); qemu_fdt_setprop_cell(ms->fdt, name, "#address-cells", FDT_PCI_ADDR_CELLS); qemu_fdt_setprop_cell(ms->fdt, name, "#interrupt-cells", @@ -870,36 +889,41 @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, qemu_fdt_setprop_string(ms->fdt, name, "device_type", "pci"); qemu_fdt_setprop_cell(ms->fdt, name, "linux,pci-domain", 0); qemu_fdt_setprop_cells(ms->fdt, name, "bus-range", 0, - memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1); + s->memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1); qemu_fdt_setprop(ms->fdt, name, "dma-coherent", NULL, 0); if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { qemu_fdt_setprop_cell(ms->fdt, name, "msi-parent", msi_pcie_phandle); } - qemu_fdt_setprop_cells(ms->fdt, name, "reg", 0, - memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", 2, + s->memmap[VIRT_PCIE_ECAM].base, 2, s->memmap[VIRT_PCIE_ECAM].size); qemu_fdt_setprop_sized_cells(ms->fdt, name, "ranges", 1, FDT_PCI_RANGE_IOPORT, 2, 0, - 2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size, + 2, s->memmap[VIRT_PCIE_PIO].base, 2, s->memmap[VIRT_PCIE_PIO].size, 1, FDT_PCI_RANGE_MMIO, - 2, memmap[VIRT_PCIE_MMIO].base, - 2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size, + 2, s->memmap[VIRT_PCIE_MMIO].base, + 2, s->memmap[VIRT_PCIE_MMIO].base, 2, s->memmap[VIRT_PCIE_MMIO].size, 1, FDT_PCI_RANGE_MMIO_64BIT, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size); + if (virt_is_iommu_sys_enabled(s)) { + qemu_fdt_setprop_cells(ms->fdt, name, "iommu-map", + 0, iommu_sys_phandle, 0, 0, 0, + iommu_sys_phandle, 0, 0xffff); + } + create_pcie_irq_map(s, ms->fdt, name, irq_pcie_phandle); } -static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, - uint32_t *phandle) +static void create_fdt_reset(RISCVVirtState *s, uint32_t *phandle) { char *name; uint32_t test_phandle; MachineState *ms = MACHINE(s); test_phandle = (*phandle)++; - name = g_strdup_printf("/soc/test@%lx", - (long)memmap[VIRT_TEST].base); + name = g_strdup_printf("/soc/test@%"HWADDR_PRIx, + s->memmap[VIRT_TEST].base); qemu_fdt_add_subnode(ms->fdt, name); { static const char * const compat[3] = { @@ -908,8 +932,9 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, qemu_fdt_setprop_string_array(ms->fdt, name, "compatible", (char **)&compat, ARRAY_SIZE(compat)); } - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, s->memmap[VIRT_TEST].base, + 2, s->memmap[VIRT_TEST].size); qemu_fdt_setprop_cell(ms->fdt, name, "phandle", test_phandle); test_phandle = qemu_fdt_get_phandle(ms->fdt, name); g_free(name); @@ -931,18 +956,19 @@ static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, g_free(name); } -static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, +static void create_fdt_uart(RISCVVirtState *s, uint32_t irq_mmio_phandle) { g_autofree char *name = NULL; MachineState *ms = MACHINE(s); - name = g_strdup_printf("/soc/serial@%lx", (long)memmap[VIRT_UART0].base); + name = g_strdup_printf("/soc/serial@%"HWADDR_PRIx, + s->memmap[VIRT_UART0].base); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "ns16550a"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, memmap[VIRT_UART0].base, - 0x0, memmap[VIRT_UART0].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, s->memmap[VIRT_UART0].base, + 2, s->memmap[VIRT_UART0].size); qemu_fdt_setprop_cell(ms->fdt, name, "clock-frequency", 3686400); qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_mmio_phandle); if (s->aia_type == VIRT_AIA_TYPE_NONE) { @@ -952,20 +978,23 @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, } qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name); + qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name); } -static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, +static void create_fdt_rtc(RISCVVirtState *s, uint32_t irq_mmio_phandle) { g_autofree char *name = NULL; MachineState *ms = MACHINE(s); - name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base); + name = g_strdup_printf("/soc/rtc@%"HWADDR_PRIx, + s->memmap[VIRT_RTC].base); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_setprop_string(ms->fdt, name, "compatible", "google,goldfish-rtc"); - qemu_fdt_setprop_cells(ms->fdt, name, "reg", - 0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size); + qemu_fdt_setprop_sized_cells(ms->fdt, name, "reg", + 2, s->memmap[VIRT_RTC].base, + 2, s->memmap[VIRT_RTC].size); qemu_fdt_setprop_cell(ms->fdt, name, "interrupt-parent", irq_mmio_phandle); if (s->aia_type == VIRT_AIA_TYPE_NONE) { @@ -975,11 +1004,11 @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, } } -static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) +static void create_fdt_flash(RISCVVirtState *s) { MachineState *ms = MACHINE(s); - hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; - hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + hwaddr flashsize = s->memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = s->memmap[VIRT_FLASH].base; g_autofree char *name = g_strdup_printf("/flash@%" PRIx64, flashbase); qemu_fdt_add_subnode(ms->fdt, name); @@ -990,11 +1019,11 @@ static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) qemu_fdt_setprop_cell(ms->fdt, name, "bank-width", 4); } -static void create_fdt_fw_cfg(RISCVVirtState *s, const MemMapEntry *memmap) +static void create_fdt_fw_cfg(RISCVVirtState *s) { MachineState *ms = MACHINE(s); - hwaddr base = memmap[VIRT_FW_CFG].base; - hwaddr size = memmap[VIRT_FW_CFG].size; + hwaddr base = s->memmap[VIRT_FW_CFG].base; + hwaddr size = s->memmap[VIRT_FW_CFG].size; g_autofree char *nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); qemu_fdt_add_subnode(ms->fdt, nodename); @@ -1013,8 +1042,8 @@ static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf) g_autofree char *iommu_node = NULL; g_autofree char *pci_node = NULL; - pci_node = g_strdup_printf("/soc/pci@%lx", - (long) virt_memmap[VIRT_PCIE_ECAM].base); + pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx, + s->memmap[VIRT_PCIE_ECAM].base); iommu_node = g_strdup_printf("%s/virtio_iommu@%x,%x", pci_node, PCI_SLOT(bdf), PCI_FUNC(bdf)); iommu_phandle = qemu_fdt_alloc_phandle(fdt); @@ -1033,27 +1062,98 @@ static void create_fdt_virtio_iommu(RISCVVirtState *s, uint16_t bdf) bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf); } +static void create_fdt_iommu_sys(RISCVVirtState *s, uint32_t irq_chip, + uint32_t msi_phandle, + uint32_t *iommu_sys_phandle) +{ + const char comp[] = "riscv,iommu"; + void *fdt = MACHINE(s)->fdt; + uint32_t iommu_phandle; + g_autofree char *iommu_node = NULL; + hwaddr addr = s->memmap[VIRT_IOMMU_SYS].base; + hwaddr size = s->memmap[VIRT_IOMMU_SYS].size; + uint32_t iommu_irq_map[RISCV_IOMMU_INTR_COUNT] = { + IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_CQ, + IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_FQ, + IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_PM, + IOMMU_SYS_IRQ + RISCV_IOMMU_INTR_PQ, + }; + + iommu_node = g_strdup_printf("/soc/iommu@%x", + (unsigned int) s->memmap[VIRT_IOMMU_SYS].base); + iommu_phandle = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_add_subnode(fdt, iommu_node); + + qemu_fdt_setprop(fdt, iommu_node, "compatible", comp, sizeof(comp)); + qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1); + qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle); + + qemu_fdt_setprop_sized_cells(fdt, iommu_node, "reg", 2, addr, 2, size); + qemu_fdt_setprop_cell(fdt, iommu_node, "interrupt-parent", irq_chip); + + qemu_fdt_setprop_cells(fdt, iommu_node, "interrupts", + iommu_irq_map[0], FDT_IRQ_TYPE_EDGE_LOW, + iommu_irq_map[1], FDT_IRQ_TYPE_EDGE_LOW, + iommu_irq_map[2], FDT_IRQ_TYPE_EDGE_LOW, + iommu_irq_map[3], FDT_IRQ_TYPE_EDGE_LOW); + + qemu_fdt_setprop_cell(fdt, iommu_node, "msi-parent", msi_phandle); + + *iommu_sys_phandle = iommu_phandle; +} + +static void create_fdt_iommu(RISCVVirtState *s, uint16_t bdf) +{ + const char comp[] = "riscv,pci-iommu"; + void *fdt = MACHINE(s)->fdt; + uint32_t iommu_phandle; + g_autofree char *iommu_node = NULL; + g_autofree char *pci_node = NULL; + + pci_node = g_strdup_printf("/soc/pci@%"HWADDR_PRIx, + s->memmap[VIRT_PCIE_ECAM].base); + iommu_node = g_strdup_printf("%s/iommu@%x", pci_node, bdf); + iommu_phandle = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_add_subnode(fdt, iommu_node); + + qemu_fdt_setprop(fdt, iommu_node, "compatible", comp, sizeof(comp)); + qemu_fdt_setprop_cell(fdt, iommu_node, "#iommu-cells", 1); + qemu_fdt_setprop_cell(fdt, iommu_node, "phandle", iommu_phandle); + qemu_fdt_setprop_cells(fdt, iommu_node, "reg", + bdf << 8, 0, 0, 0, 0); + qemu_fdt_setprop_cells(fdt, pci_node, "iommu-map", + 0, iommu_phandle, 0, bdf, + bdf + 1, iommu_phandle, bdf + 1, 0xffff - bdf); + s->pci_iommu_bdf = bdf; +} + static void finalize_fdt(RISCVVirtState *s) { uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1; uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1; + uint32_t iommu_sys_phandle = 1; - create_fdt_sockets(s, virt_memmap, &phandle, &irq_mmio_phandle, + create_fdt_sockets(s, &phandle, &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle, &msi_pcie_phandle); - create_fdt_virtio(s, virt_memmap, irq_virtio_phandle); + create_fdt_virtio(s, irq_virtio_phandle); - create_fdt_pcie(s, virt_memmap, irq_pcie_phandle, msi_pcie_phandle); + if (virt_is_iommu_sys_enabled(s)) { + create_fdt_iommu_sys(s, irq_mmio_phandle, msi_pcie_phandle, + &iommu_sys_phandle); + } + create_fdt_pcie(s, irq_pcie_phandle, msi_pcie_phandle, + iommu_sys_phandle); - create_fdt_reset(s, virt_memmap, &phandle); + create_fdt_reset(s, &phandle); - create_fdt_uart(s, virt_memmap, irq_mmio_phandle); + create_fdt_uart(s, irq_mmio_phandle); - create_fdt_rtc(s, virt_memmap, irq_mmio_phandle); + create_fdt_rtc(s, irq_mmio_phandle); } -static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) +static void create_fdt(RISCVVirtState *s) { MachineState *ms = MACHINE(s); uint8_t rng_seed[32]; @@ -1080,7 +1180,8 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) * The "/soc/pci@..." node is needed for PCIE hotplugs * that might happen before finalize_fdt(). */ - name = g_strdup_printf("/soc/pci@%lx", (long) memmap[VIRT_PCIE_ECAM].base); + name = g_strdup_printf("/soc/pci@%"HWADDR_PRIx, + s->memmap[VIRT_PCIE_ECAM].base); qemu_fdt_add_subnode(ms->fdt, name); qemu_fdt_add_subnode(ms->fdt, "/chosen"); @@ -1090,8 +1191,10 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); - create_fdt_flash(s, memmap); - create_fdt_fw_cfg(s, memmap); + qemu_fdt_add_subnode(ms->fdt, "/aliases"); + + create_fdt_flash(s); + create_fdt_fw_cfg(s); create_fdt_pmu(s); } @@ -1116,23 +1219,21 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, dev = qdev_new(TYPE_GPEX_HOST); /* Set GPEX object properties for the virt machine */ - object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE, + object_property_set_uint(OBJECT(dev), PCI_HOST_ECAM_BASE, ecam_base, NULL); - object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE, + object_property_set_int(OBJECT(dev), PCI_HOST_ECAM_SIZE, ecam_size, NULL); - object_property_set_uint(OBJECT(GPEX_HOST(dev)), - PCI_HOST_BELOW_4G_MMIO_BASE, + object_property_set_uint(OBJECT(dev), PCI_HOST_BELOW_4G_MMIO_BASE, mmio_base, NULL); - object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE, + object_property_set_int(OBJECT(dev), PCI_HOST_BELOW_4G_MMIO_SIZE, mmio_size, NULL); - object_property_set_uint(OBJECT(GPEX_HOST(dev)), - PCI_HOST_ABOVE_4G_MMIO_BASE, + object_property_set_uint(OBJECT(dev), PCI_HOST_ABOVE_4G_MMIO_BASE, high_mmio_base, NULL); - object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE, + object_property_set_int(OBJECT(dev), PCI_HOST_ABOVE_4G_MMIO_SIZE, high_mmio_size, NULL); - object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE, + object_property_set_uint(OBJECT(dev), PCI_HOST_PIO_BASE, pio_base, NULL); - object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE, + object_property_set_int(OBJECT(dev), PCI_HOST_PIO_SIZE, pio_size, NULL); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -1158,20 +1259,19 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i); sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i); } - GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus; + GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(dev)->bus; return dev; } -static FWCfgState *create_fw_cfg(const MachineState *ms) +static FWCfgState *create_fw_cfg(const MachineState *ms, hwaddr base) { - hwaddr base = virt_memmap[VIRT_FW_CFG].base; FWCfgState *fw_cfg; fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, @@ -1184,27 +1284,22 @@ static FWCfgState *create_fw_cfg(const MachineState *ms) static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket, int base_hartid, int hart_count) { - DeviceState *ret; g_autofree char *plic_hart_config = NULL; /* Per-socket PLIC hart topology configuration string */ plic_hart_config = riscv_plic_hart_config_string(hart_count); /* Per-socket PLIC */ - ret = sifive_plic_create( - memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size, - plic_hart_config, hart_count, base_hartid, - VIRT_IRQCHIP_NUM_SOURCES, - ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1), - VIRT_PLIC_PRIORITY_BASE, - VIRT_PLIC_PENDING_BASE, - VIRT_PLIC_ENABLE_BASE, - VIRT_PLIC_ENABLE_STRIDE, - VIRT_PLIC_CONTEXT_BASE, - VIRT_PLIC_CONTEXT_STRIDE, - memmap[VIRT_PLIC].size); - - return ret; + return sifive_plic_create( + memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size, + plic_hart_config, hart_count, base_hartid, + VIRT_IRQCHIP_NUM_SOURCES, + ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1), + VIRT_PLIC_PRIORITY_BASE, VIRT_PLIC_PENDING_BASE, + VIRT_PLIC_ENABLE_BASE, VIRT_PLIC_ENABLE_STRIDE, + VIRT_PLIC_CONTEXT_BASE, + VIRT_PLIC_CONTEXT_STRIDE, + memmap[VIRT_PLIC].size); } static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests, @@ -1212,7 +1307,7 @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests, int base_hartid, int hart_count) { int i; - hwaddr addr; + hwaddr addr = 0; uint32_t guest_bits; DeviceState *aplic_s = NULL; DeviceState *aplic_m = NULL; @@ -1262,6 +1357,10 @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests, VIRT_IRQCHIP_NUM_PRIO_BITS, msimode, false, aplic_m); + if (kvm_enabled() && msimode) { + riscv_aplic_set_kvm_msicfgaddr(RISCV_APLIC(aplic_s), addr); + } + return kvm_enabled() ? aplic_s : aplic_m; } @@ -1269,14 +1368,13 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip) { DeviceState *dev; SysBusDevice *sysbus; - const MemMapEntry *memmap = virt_memmap; int i; MemoryRegion *sysmem = get_system_memory(); dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS); - qdev_prop_set_uint32(dev, "mmio_size", memmap[VIRT_PLATFORM_BUS].size); + qdev_prop_set_uint32(dev, "mmio_size", s->memmap[VIRT_PLATFORM_BUS].size); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); s->platform_bus_dev = dev; @@ -1287,7 +1385,7 @@ static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip) } memory_region_add_subregion(sysmem, - memmap[VIRT_PLATFORM_BUS].base, + s->memmap[VIRT_PLATFORM_BUS].base, sysbus_mmio_get_region(sysbus, 0)); } @@ -1334,14 +1432,14 @@ static void virt_machine_done(Notifier *notifier, void *data) { RISCVVirtState *s = container_of(notifier, RISCVVirtState, machine_done); - const MemMapEntry *memmap = virt_memmap; MachineState *machine = MACHINE(s); - target_ulong start_addr = memmap[VIRT_DRAM].base; + hwaddr start_addr = s->memmap[VIRT_DRAM].base; target_ulong firmware_end_addr, kernel_start_addr; const char *firmware_name = riscv_default_firmware_name(&s->soc[0]); uint64_t fdt_load_addr; uint64_t kernel_entry = 0; BlockBackend *pflash_blk0; + RISCVBootInfo boot_info; /* * An user provided dtb must include everything, including @@ -1368,7 +1466,7 @@ static void virt_machine_done(Notifier *notifier, void *data) } firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name, - start_addr, NULL); + &start_addr, NULL); pflash_blk0 = pflash_cfi01_get_blk(s->flash[0]); if (pflash_blk0) { @@ -1379,34 +1477,36 @@ static void virt_machine_done(Notifier *notifier, void *data) * let's overwrite the address we jump to after reset to * the base of the flash. */ - start_addr = virt_memmap[VIRT_FLASH].base; + start_addr = s->memmap[VIRT_FLASH].base; } else { /* * Pflash was supplied but either KVM guest or bios is not none. * In this case, base of the flash would contain S-mode payload. */ riscv_setup_firmware_boot(machine); - kernel_entry = virt_memmap[VIRT_FLASH].base; + kernel_entry = s->memmap[VIRT_FLASH].base; } } + riscv_boot_info_init(&boot_info, &s->soc[0]); + if (machine->kernel_filename && !kernel_entry) { - kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], + kernel_start_addr = riscv_calc_kernel_start_addr(&boot_info, firmware_end_addr); - - kernel_entry = riscv_load_kernel(machine, &s->soc[0], - kernel_start_addr, true, NULL); + riscv_load_kernel(machine, &boot_info, kernel_start_addr, + true, NULL); + kernel_entry = boot_info.image_low_addr; } - fdt_load_addr = riscv_compute_fdt_addr(memmap[VIRT_DRAM].base, - memmap[VIRT_DRAM].size, - machine); + fdt_load_addr = riscv_compute_fdt_addr(s->memmap[VIRT_DRAM].base, + s->memmap[VIRT_DRAM].size, + machine, &boot_info); riscv_load_fdt(fdt_load_addr, machine->fdt); /* load the reset vector */ riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr, - virt_memmap[VIRT_MROM].base, - virt_memmap[VIRT_MROM].size, kernel_entry, + s->memmap[VIRT_MROM].base, + s->memmap[VIRT_MROM].size, kernel_entry, fdt_load_addr); /* @@ -1427,7 +1527,6 @@ static void virt_machine_done(Notifier *notifier, void *data) static void virt_machine_init(MachineState *machine) { - const MemMapEntry *memmap = virt_memmap; RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); @@ -1435,6 +1534,8 @@ static void virt_machine_init(MachineState *machine) int i, base_hartid, hart_count; int socket_count = riscv_socket_count(machine); + s->memmap = virt_memmap; + /* Check socket count limit */ if (VIRT_SOCKETS_MAX < socket_count) { error_report("number of sockets/nodes should be less than %d", @@ -1482,7 +1583,7 @@ static void virt_machine_init(MachineState *machine) if (virt_aclint_allowed() && s->have_aclint) { if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { /* Per-socket ACLINT MTIMER */ - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base + i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, @@ -1491,28 +1592,28 @@ static void virt_machine_init(MachineState *machine) RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); } else { /* Per-socket ACLINT MSWI, MTIMER, and SSWI */ - riscv_aclint_swi_create(memmap[VIRT_CLINT].base + - i * memmap[VIRT_CLINT].size, + riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base + + i * s->memmap[VIRT_CLINT].size, base_hartid, hart_count, false); - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + - i * memmap[VIRT_CLINT].size + + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base + + i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); - riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base + - i * memmap[VIRT_ACLINT_SSWI].size, + riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base + + i * s->memmap[VIRT_ACLINT_SSWI].size, base_hartid, hart_count, true); } } else if (tcg_enabled()) { /* Per-socket SiFive CLINT */ riscv_aclint_swi_create( - memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, + s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size, base_hartid, hart_count, false); - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base + - i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base + + i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE, RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); @@ -1520,11 +1621,11 @@ static void virt_machine_init(MachineState *machine) /* Per-socket interrupt controller */ if (s->aia_type == VIRT_AIA_TYPE_NONE) { - s->irqchip[i] = virt_create_plic(memmap, i, + s->irqchip[i] = virt_create_plic(s->memmap, i, base_hartid, hart_count); } else { s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests, - memmap, i, base_hartid, + s->memmap, i, base_hartid, hart_count); } @@ -1543,11 +1644,11 @@ static void virt_machine_init(MachineState *machine) } } - if (kvm_enabled() && virt_use_kvm_aia(s)) { + if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) { kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT, VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS, - memmap[VIRT_APLIC_S].base, - memmap[VIRT_IMSIC_S].base, + s->memmap[VIRT_APLIC_S].base, + s->memmap[VIRT_IMSIC_S].base, s->aia_guests); } @@ -1563,37 +1664,36 @@ static void virt_machine_init(MachineState *machine) virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE; } else { virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE; - virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size; + virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base + + machine->ram_size; virt_high_pcie_memmap.base = ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); } - s->memmap = virt_memmap; - /* register system main memory (actual RAM) */ - memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, - machine->ram); + memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base, + machine->ram); /* boot rom */ memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom", - memmap[VIRT_MROM].size, &error_fatal); - memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base, + s->memmap[VIRT_MROM].size, &error_fatal); + memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base, mask_rom); /* * Init fw_cfg. Must be done before riscv_load_fdt, otherwise the * device tree cannot be altered and we get FDT_ERR_NOSPACE. */ - s->fw_cfg = create_fw_cfg(machine); + s->fw_cfg = create_fw_cfg(machine, s->memmap[VIRT_FW_CFG].base); rom_set_fw(s->fw_cfg); /* SiFive Test MMIO device */ - sifive_test_create(memmap[VIRT_TEST].base); + sifive_test_create(s->memmap[VIRT_TEST].base); /* VirtIO MMIO devices */ for (i = 0; i < VIRTIO_COUNT; i++) { sysbus_create_simple("virtio-mmio", - memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, + s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size, qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i)); } @@ -1601,11 +1701,11 @@ static void virt_machine_init(MachineState *machine) create_platform_bus(s, mmio_irqchip); - serial_mm_init(system_memory, memmap[VIRT_UART0].base, + serial_mm_init(system_memory, s->memmap[VIRT_UART0].base, 0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193, serial_hd(0), DEVICE_LITTLE_ENDIAN); - sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base, + sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base, qdev_get_gpio_in(mmio_irqchip, RTC_IRQ)); for (i = 0; i < ARRAY_SIZE(s->flash); i++) { @@ -1623,7 +1723,23 @@ static void virt_machine_init(MachineState *machine) exit(1); } } else { - create_fdt(s, memmap); + create_fdt(s); + } + + if (virt_is_iommu_sys_enabled(s)) { + DeviceState *iommu_sys = qdev_new(TYPE_RISCV_IOMMU_SYS); + + object_property_set_uint(OBJECT(iommu_sys), "addr", + s->memmap[VIRT_IOMMU_SYS].base, + &error_fatal); + object_property_set_uint(OBJECT(iommu_sys), "base-irq", + IOMMU_SYS_IRQ, + &error_fatal); + object_property_set_link(OBJECT(iommu_sys), "irqchip", + OBJECT(mmio_irqchip), + &error_fatal); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(iommu_sys), &error_fatal); } s->machine_done.notify = virt_machine_done; @@ -1639,6 +1755,7 @@ static void virt_machine_instance_init(Object *obj) s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); s->acpi = ON_OFF_AUTO_AUTO; + s->iommu_sys = ON_OFF_AUTO_AUTO; } static char *virt_get_aia_guests(Object *obj, Error **errp) @@ -1711,6 +1828,28 @@ static void virt_set_aclint(Object *obj, bool value, Error **errp) s->have_aclint = value; } +bool virt_is_iommu_sys_enabled(RISCVVirtState *s) +{ + return s->iommu_sys == ON_OFF_AUTO_ON; +} + +static void virt_get_iommu_sys(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + OnOffAuto iommu_sys = s->iommu_sys; + + visit_type_OnOffAuto(v, name, &iommu_sys, errp); +} + +static void virt_set_iommu_sys(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &s->iommu_sys, errp); +} + bool virt_is_acpi_enabled(RISCVVirtState *s) { return s->acpi != ON_OFF_AUTO_OFF; @@ -1737,11 +1876,15 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, DeviceState *dev) { MachineClass *mc = MACHINE_GET_CLASS(machine); + RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); if (device_is_dynamic_sysbus(mc, dev) || - object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_RISCV_IOMMU_PCI)) { + s->iommu_sys = ON_OFF_AUTO_OFF; return HOTPLUG_HANDLER(machine); } + return NULL; } @@ -1762,9 +1905,14 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { create_fdt_virtio_iommu(s, pci_get_bdf(PCI_DEVICE(dev))); } + + if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_IOMMU_PCI)) { + create_fdt_iommu(s, pci_get_bdf(PCI_DEVICE(dev))); + s->iommu_sys = ON_OFF_AUTO_OFF; + } } -static void virt_machine_class_init(ObjectClass *oc, void *data) +static void virt_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -1789,6 +1937,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) hc->plug = virt_machine_device_plug_cb; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS); #ifdef CONFIG_TPM machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); #endif @@ -1823,6 +1972,12 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, "acpi", "Enable ACPI"); + + object_class_property_add(oc, "iommu-sys", "OnOffAuto", + virt_get_iommu_sys, virt_set_iommu_sys, + NULL, NULL); + object_class_property_set_description(oc, "iommu-sys", + "Enable IOMMU platform device"); } static const TypeInfo virt_machine_typeinfo = { @@ -1831,7 +1986,7 @@ static const TypeInfo virt_machine_typeinfo = { .class_init = virt_machine_class_init, .instance_init = virt_machine_instance_init, .instance_size = sizeof(RISCVVirtState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, diff --git a/hw/riscv/xiangshan_kmh.c b/hw/riscv/xiangshan_kmh.c new file mode 100644 index 00000000000..a95fd6174fd --- /dev/null +++ b/hw/riscv/xiangshan_kmh.c @@ -0,0 +1,220 @@ +/* + * QEMU RISC-V Board Compatible with the Xiangshan Kunminghu + * FPGA prototype platform + * + * Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Provides a board compatible with the Xiangshan Kunminghu + * FPGA prototype platform: + * + * 0) UART (16550A) + * 1) CLINT (Core-Local Interruptor) + * 2) IMSIC (Incoming MSI Controller) + * 3) APLIC (Advanced Platform-Level Interrupt Controller) + * + * More information can be found in our Github repository: + * https://github.com/OpenXiangShan/XiangShan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "system/address-spaces.h" +#include "hw/boards.h" +#include "hw/char/serial-mm.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/riscv_aplic.h" +#include "hw/intc/riscv_imsic.h" +#include "hw/qdev-properties.h" +#include "hw/riscv/boot.h" +#include "hw/riscv/xiangshan_kmh.h" +#include "hw/riscv/riscv_hart.h" +#include "system/system.h" + +static const MemMapEntry xiangshan_kmh_memmap[] = { + [XIANGSHAN_KMH_ROM] = { 0x1000, 0xF000 }, + [XIANGSHAN_KMH_UART0] = { 0x310B0000, 0x10000 }, + [XIANGSHAN_KMH_CLINT] = { 0x38000000, 0x10000 }, + [XIANGSHAN_KMH_APLIC_M] = { 0x31100000, 0x4000 }, + [XIANGSHAN_KMH_APLIC_S] = { 0x31120000, 0x4000 }, + [XIANGSHAN_KMH_IMSIC_M] = { 0x3A800000, 0x10000 }, + [XIANGSHAN_KMH_IMSIC_S] = { 0x3B000000, 0x80000 }, + [XIANGSHAN_KMH_DRAM] = { 0x80000000, 0x0 }, +}; + +static DeviceState *xiangshan_kmh_create_aia(uint32_t num_harts) +{ + int i; + const MemMapEntry *memmap = xiangshan_kmh_memmap; + hwaddr addr = 0; + DeviceState *aplic_m = NULL; + + /* M-level IMSICs */ + addr = memmap[XIANGSHAN_KMH_IMSIC_M].base; + for (i = 0; i < num_harts; i++) { + riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0), i, true, + 1, XIANGSHAN_KMH_IMSIC_NUM_IDS); + } + + /* S-level IMSICs */ + addr = memmap[XIANGSHAN_KMH_IMSIC_S].base; + for (i = 0; i < num_harts; i++) { + riscv_imsic_create(addr + + i * IMSIC_HART_SIZE(XIANGSHAN_KMH_IMSIC_GUEST_BITS), + i, false, 1 + XIANGSHAN_KMH_IMSIC_GUEST_BITS, + XIANGSHAN_KMH_IMSIC_NUM_IDS); + } + + /* M-level APLIC */ + aplic_m = riscv_aplic_create(memmap[XIANGSHAN_KMH_APLIC_M].base, + memmap[XIANGSHAN_KMH_APLIC_M].size, + 0, 0, XIANGSHAN_KMH_APLIC_NUM_SOURCES, + 1, true, true, NULL); + + /* S-level APLIC */ + riscv_aplic_create(memmap[XIANGSHAN_KMH_APLIC_S].base, + memmap[XIANGSHAN_KMH_APLIC_S].size, + 0, 0, XIANGSHAN_KMH_APLIC_NUM_SOURCES, + 1, true, false, aplic_m); + + return aplic_m; +} + +static void xiangshan_kmh_soc_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + XiangshanKmhSoCState *s = XIANGSHAN_KMH_SOC(dev); + const MemMapEntry *memmap = xiangshan_kmh_memmap; + MemoryRegion *system_memory = get_system_memory(); + uint32_t num_harts = ms->smp.cpus; + + qdev_prop_set_uint32(DEVICE(&s->cpus), "num-harts", num_harts); + qdev_prop_set_uint32(DEVICE(&s->cpus), "hartid-base", 0); + qdev_prop_set_string(DEVICE(&s->cpus), "cpu-type", + TYPE_RISCV_CPU_XIANGSHAN_KMH); + sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_fatal); + + /* AIA */ + s->irqchip = xiangshan_kmh_create_aia(num_harts); + + /* UART */ + serial_mm_init(system_memory, memmap[XIANGSHAN_KMH_UART0].base, 2, + qdev_get_gpio_in(s->irqchip, XIANGSHAN_KMH_UART0_IRQ), + 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); + + /* CLINT */ + riscv_aclint_swi_create(memmap[XIANGSHAN_KMH_CLINT].base, + 0, num_harts, false); + riscv_aclint_mtimer_create(memmap[XIANGSHAN_KMH_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, + 0, num_harts, RISCV_ACLINT_DEFAULT_MTIMECMP, + RISCV_ACLINT_DEFAULT_MTIME, + XIANGSHAN_KMH_CLINT_TIMEBASE_FREQ, true); + + /* ROM */ + memory_region_init_rom(&s->rom, OBJECT(dev), "xiangshan.kunminghu.rom", + memmap[XIANGSHAN_KMH_ROM].size, &error_fatal); + memory_region_add_subregion(system_memory, + memmap[XIANGSHAN_KMH_ROM].base, &s->rom); +} + +static void xiangshan_kmh_soc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xiangshan_kmh_soc_realize; + dc->user_creatable = false; +} + +static void xiangshan_kmh_soc_instance_init(Object *obj) +{ + XiangshanKmhSoCState *s = XIANGSHAN_KMH_SOC(obj); + + object_initialize_child(obj, "cpus", &s->cpus, TYPE_RISCV_HART_ARRAY); +} + +static const TypeInfo xiangshan_kmh_soc_info = { + .name = TYPE_XIANGSHAN_KMH_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiangshanKmhSoCState), + .instance_init = xiangshan_kmh_soc_instance_init, + .class_init = xiangshan_kmh_soc_class_init, +}; + +static void xiangshan_kmh_soc_register_types(void) +{ + type_register_static(&xiangshan_kmh_soc_info); +} +type_init(xiangshan_kmh_soc_register_types) + +static void xiangshan_kmh_machine_init(MachineState *machine) +{ + XiangshanKmhState *s = XIANGSHAN_KMH_MACHINE(machine); + const MemMapEntry *memmap = xiangshan_kmh_memmap; + MemoryRegion *system_memory = get_system_memory(); + hwaddr start_addr = memmap[XIANGSHAN_KMH_DRAM].base; + + /* Initialize SoC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, + TYPE_XIANGSHAN_KMH_SOC); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* Register RAM */ + memory_region_add_subregion(system_memory, + memmap[XIANGSHAN_KMH_DRAM].base, + machine->ram); + + /* ROM reset vector */ + riscv_setup_rom_reset_vec(machine, &s->soc.cpus, + start_addr, + memmap[XIANGSHAN_KMH_ROM].base, + memmap[XIANGSHAN_KMH_ROM].size, 0, 0); + if (machine->firmware) { + riscv_load_firmware(machine->firmware, &start_addr, NULL); + } + + /* Note: dtb has been integrated into firmware(OpenSBI) when compiling */ +} + +static void xiangshan_kmh_machine_class_init(ObjectClass *klass, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(klass); + static const char *const valid_cpu_types[] = { + TYPE_RISCV_CPU_XIANGSHAN_KMH, + NULL + }; + + mc->desc = "RISC-V Board compatible with the Xiangshan " \ + "Kunminghu FPGA prototype platform"; + mc->init = xiangshan_kmh_machine_init; + mc->max_cpus = XIANGSHAN_KMH_MAX_CPUS; + mc->default_cpu_type = TYPE_RISCV_CPU_XIANGSHAN_KMH; + mc->valid_cpu_types = valid_cpu_types; + mc->default_ram_id = "xiangshan.kunminghu.ram"; +} + +static const TypeInfo xiangshan_kmh_machine_info = { + .name = TYPE_XIANGSHAN_KMH_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(XiangshanKmhState), + .class_init = xiangshan_kmh_machine_class_init, +}; + +static void xiangshan_kmh_machine_register_types(void) +{ + type_register_static(&xiangshan_kmh_machine_info); +} +type_init(xiangshan_kmh_machine_register_types) diff --git a/hw/rtc/Kconfig b/hw/rtc/Kconfig index d0d8dda0840..315b0e4eccd 100644 --- a/hw/rtc/Kconfig +++ b/hw/rtc/Kconfig @@ -14,10 +14,6 @@ config M48T59 config PL031 bool -config TWL92230 - bool - depends on I2C - config MC146818RTC depends on ISA_BUS bool @@ -30,3 +26,8 @@ config GOLDFISH_RTC config LS7A_RTC bool + +config RS5C372_RTC + bool + depends on I2C + default y if I2C_DEVICES diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c index 2ac50b30cb8..a747bff534c 100644 --- a/hw/rtc/allwinner-rtc.c +++ b/hw/rtc/allwinner-rtc.c @@ -25,7 +25,7 @@ #include "qemu/module.h" #include "hw/qdev-properties.h" #include "hw/rtc/allwinner-rtc.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" #include "trace.h" /* RTC registers */ @@ -259,7 +259,7 @@ static void allwinner_rtc_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_rtc_ops = { .read = allwinner_rtc_read, .write = allwinner_rtc_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -311,16 +311,15 @@ static const VMStateDescription allwinner_rtc_vmstate = { } }; -static Property allwinner_rtc_properties[] = { +static const Property allwinner_rtc_properties[] = { DEFINE_PROP_INT32("base-year", AwRtcState, base_year, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void allwinner_rtc_class_init(ObjectClass *klass, void *data) +static void allwinner_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_rtc_reset; + device_class_set_legacy_reset(dc, allwinner_rtc_reset); dc->vmsd = &allwinner_rtc_vmstate; device_class_set_props(dc, allwinner_rtc_properties); } @@ -331,7 +330,7 @@ static void allwinner_rtc_sun4i_init(Object *obj) s->base_year = 2010; } -static void allwinner_rtc_sun4i_class_init(ObjectClass *klass, void *data) +static void allwinner_rtc_sun4i_class_init(ObjectClass *klass, const void *data) { AwRtcClass *arc = AW_RTC_CLASS(klass); @@ -347,7 +346,7 @@ static void allwinner_rtc_sun6i_init(Object *obj) s->base_year = 1970; } -static void allwinner_rtc_sun6i_class_init(ObjectClass *klass, void *data) +static void allwinner_rtc_sun6i_class_init(ObjectClass *klass, const void *data) { AwRtcClass *arc = AW_RTC_CLASS(klass); @@ -363,7 +362,7 @@ static void allwinner_rtc_sun7i_init(Object *obj) s->base_year = 1970; } -static void allwinner_rtc_sun7i_class_init(ObjectClass *klass, void *data) +static void allwinner_rtc_sun7i_class_init(ObjectClass *klass, const void *data) { AwRtcClass *arc = AW_RTC_CLASS(klass); allwinner_rtc_sun4i_class_init(klass, arc); diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c index 589d9a5a7a8..c4feea23a0b 100644 --- a/hw/rtc/aspeed_rtc.c +++ b/hw/rtc/aspeed_rtc.c @@ -11,7 +11,7 @@ #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/timer.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" #include "trace.h" @@ -156,13 +156,13 @@ static void aspeed_rtc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static void aspeed_rtc_class_init(ObjectClass *klass, void *data) +static void aspeed_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_rtc_realize; dc->vmsd = &vmstate_aspeed_rtc; - dc->reset = aspeed_rtc_reset; + device_class_set_legacy_reset(dc, aspeed_rtc_reset); } static const TypeInfo aspeed_rtc_info = { diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c index e479661c391..5f1ee2e410a 100644 --- a/hw/rtc/ds1338.c +++ b/hw/rtc/ds1338.c @@ -14,9 +14,9 @@ #include "hw/i2c/i2c.h" #include "migration/vmstate.h" #include "qemu/bcd.h" -#include "qemu/module.h" #include "qom/object.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" +#include "trace.h" /* Size of NVRAM including both the user-accessible area and the * secondary register area. @@ -126,6 +126,9 @@ static uint8_t ds1338_recv(I2CSlave *i2c) uint8_t res; res = s->nvram[s->ptr]; + + trace_ds1338_recv(s->ptr, res); + inc_regptr(s); return res; } @@ -134,6 +137,8 @@ static int ds1338_send(I2CSlave *i2c, uint8_t data) { DS1338State *s = DS1338(i2c); + trace_ds1338_send(s->ptr, data); + if (s->addr_byte) { s->ptr = data & (NVRAM_SIZE - 1); s->addr_byte = false; @@ -215,7 +220,7 @@ static void ds1338_reset(DeviceState *dev) s->addr_byte = false; } -static void ds1338_class_init(ObjectClass *klass, void *data) +static void ds1338_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -223,20 +228,17 @@ static void ds1338_class_init(ObjectClass *klass, void *data) k->event = ds1338_event; k->recv = ds1338_recv; k->send = ds1338_send; - dc->reset = ds1338_reset; + device_class_set_legacy_reset(dc, ds1338_reset); dc->vmsd = &vmstate_ds1338; } -static const TypeInfo ds1338_info = { - .name = TYPE_DS1338, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(DS1338State), - .class_init = ds1338_class_init, +static const TypeInfo ds1338_types[] = { + { + .name = TYPE_DS1338, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(DS1338State), + .class_init = ds1338_class_init, + }, }; -static void ds1338_register_types(void) -{ - type_register_static(&ds1338_info); -} - -type_init(ds1338_register_types) +DEFINE_TYPES(ds1338_types) diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c index 319371f97d1..624b4f61a85 100644 --- a/hw/rtc/exynos4210_rtc.c +++ b/hw/rtc/exynos4210_rtc.c @@ -38,7 +38,7 @@ #include "hw/arm/exynos4210.h" #include "qom/object.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" #define DEBUG_RTC 0 @@ -592,11 +592,11 @@ static void exynos4210_rtc_finalize(Object *obj) ptimer_free(s->ptimer_1Hz); } -static void exynos4210_rtc_class_init(ObjectClass *klass, void *data) +static void exynos4210_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_rtc_reset; + device_class_set_legacy_reset(dc, exynos4210_rtc_reset); dc->vmsd = &vmstate_exynos4210_rtc_state; } diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c index 01acf30b278..78df031cf29 100644 --- a/hw/rtc/goldfish_rtc.c +++ b/hw/rtc/goldfish_rtc.c @@ -27,8 +27,8 @@ #include "hw/sysbus.h" #include "qemu/bitops.h" #include "qemu/timer.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/rtc.h" #include "qemu/cutils.h" #include "qemu/log.h" @@ -178,38 +178,21 @@ static void goldfish_rtc_write(void *opaque, hwaddr offset, trace_goldfish_rtc_write(offset, value); } -static int goldfish_rtc_pre_save(void *opaque) -{ - uint64_t delta; - GoldfishRTCState *s = opaque; - - /* - * We want to migrate this offset, which sounds straightforward. - * Unfortunately, we cannot directly pass tick_offset because - * rtc_clock on destination Host might not be same source Host. - * - * To tackle, this we pass tick_offset relative to vm_clock from - * source Host and make it relative to rtc_clock at destination Host. - */ - delta = qemu_clock_get_ns(rtc_clock) - - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - s->tick_offset_vmstate = s->tick_offset + delta; - - return 0; -} - static int goldfish_rtc_post_load(void *opaque, int version_id) { - uint64_t delta; GoldfishRTCState *s = opaque; - /* - * We extract tick_offset from tick_offset_vmstate by doing - * reverse math compared to pre_save() function. - */ - delta = qemu_clock_get_ns(rtc_clock) - - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - s->tick_offset = s->tick_offset_vmstate - delta; + if (version_id < 3) { + /* + * Previous versions didn't migrate tick_offset directly. Instead, they + * migrated tick_offset_vmstate, which is a recalculation based on + * QEMU_CLOCK_VIRTUAL. We use tick_offset_vmstate when migrating from + * older versions. + */ + uint64_t delta = qemu_clock_get_ns(rtc_clock) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tick_offset = s->tick_offset_vmstate - delta; + } goldfish_rtc_set_alarm(s); @@ -239,8 +222,7 @@ static const MemoryRegionOps goldfish_rtc_ops[2] = { static const VMStateDescription goldfish_rtc_vmstate = { .name = TYPE_GOLDFISH_RTC, - .version_id = 2, - .pre_save = goldfish_rtc_pre_save, + .version_id = 3, .post_load = goldfish_rtc_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState), @@ -249,6 +231,7 @@ static const VMStateDescription goldfish_rtc_vmstate = { VMSTATE_UINT32(irq_pending, GoldfishRTCState), VMSTATE_UINT32(irq_enabled, GoldfishRTCState), VMSTATE_UINT32(time_high, GoldfishRTCState), + VMSTATE_UINT64_V(tick_offset, GoldfishRTCState, 3), VMSTATE_END_OF_LIST() } }; @@ -256,15 +239,8 @@ static const VMStateDescription goldfish_rtc_vmstate = { static void goldfish_rtc_reset(DeviceState *dev) { GoldfishRTCState *s = GOLDFISH_RTC(dev); - struct tm tm; timer_del(s->timer); - - qemu_get_timedate(&tm, 0); - s->tick_offset = mktimegm(&tm); - s->tick_offset *= NANOSECONDS_PER_SECOND; - s->tick_offset -= qemu_clock_get_ns(rtc_clock); - s->tick_offset_vmstate = 0; s->alarm_next = 0; s->alarm_running = 0; s->irq_pending = 0; @@ -275,6 +251,7 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp) { SysBusDevice *dev = SYS_BUS_DEVICE(d); GoldfishRTCState *s = GOLDFISH_RTC(d); + struct tm tm; memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_rtc_ops[s->big_endian], s, @@ -284,21 +261,25 @@ static void goldfish_rtc_realize(DeviceState *d, Error **errp) sysbus_init_irq(dev, &s->irq); s->timer = timer_new_ns(rtc_clock, goldfish_rtc_interrupt, s); + + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm); + s->tick_offset *= NANOSECONDS_PER_SECOND; + s->tick_offset -= qemu_clock_get_ns(rtc_clock); } -static Property goldfish_rtc_properties[] = { +static const Property goldfish_rtc_properties[] = { DEFINE_PROP_BOOL("big-endian", GoldfishRTCState, big_endian, false), - DEFINE_PROP_END_OF_LIST(), }; -static void goldfish_rtc_class_init(ObjectClass *klass, void *data) +static void goldfish_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, goldfish_rtc_properties); dc->realize = goldfish_rtc_realize; - dc->reset = goldfish_rtc_reset; + device_class_set_legacy_reset(dc, goldfish_rtc_reset); dc->vmsd = &goldfish_rtc_vmstate; } diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c index 3226b6105e8..10097b2db75 100644 --- a/hw/rtc/ls7a_rtc.c +++ b/hw/rtc/ls7a_rtc.c @@ -10,12 +10,12 @@ #include "hw/irq.h" #include "hw/register.h" #include "qemu/timer.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "migration/vmstate.h" #include "hw/misc/unimp.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" #include "hw/registerfields.h" #define SYS_TOYTRIM 0x20 @@ -464,12 +464,12 @@ static const VMStateDescription vmstate_ls7a_rtc = { } }; -static void ls7a_rtc_class_init(ObjectClass *klass, void *data) +static void ls7a_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_ls7a_rtc; dc->realize = ls7a_rtc_realize; - dc->reset = ls7a_rtc_reset; + device_class_set_legacy_reset(dc, ls7a_rtc_reset); dc->desc = "ls7a rtc"; } diff --git a/hw/rtc/m41t80.c b/hw/rtc/m41t80.c index e045c864bb4..c631ec3ea1d 100644 --- a/hw/rtc/m41t80.c +++ b/hw/rtc/m41t80.c @@ -14,7 +14,7 @@ #include "qemu/bcd.h" #include "hw/i2c/i2c.h" #include "qom/object.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" #define TYPE_M41T80 "m41t80" OBJECT_DECLARE_SIMPLE_TYPE(M41t80State, M41T80) @@ -94,7 +94,7 @@ static int m41t80_event(I2CSlave *i2c, enum i2c_event event) return 0; } -static void m41t80_class_init(ObjectClass *klass, void *data) +static void m41t80_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c index 5bb46f23838..9e2f6563a0a 100644 --- a/hw/rtc/m48t59-isa.c +++ b/hw/rtc/m48t59-isa.c @@ -77,11 +77,10 @@ static void m48txx_isa_toggle_lock(Nvram *obj, int lock) m48t59_toggle_lock(&d->state, lock); } -static Property m48t59_isa_properties[] = { +static const Property m48t59_isa_properties[] = { DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0), DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74), DEFINE_PROP_UINT8("irq", M48txxISAState, isairq, 8), - DEFINE_PROP_END_OF_LIST(), }; static void m48t59_reset_isa(DeviceState *d) @@ -114,23 +113,23 @@ static void m48t59_isa_realize(DeviceState *dev, Error **errp) } } -static void m48txx_isa_class_init(ObjectClass *klass, void *data) +static void m48txx_isa_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); NvramClass *nc = NVRAM_CLASS(klass); dc->realize = m48t59_isa_realize; - dc->reset = m48t59_reset_isa; + device_class_set_legacy_reset(dc, m48t59_reset_isa); device_class_set_props(dc, m48t59_isa_properties); nc->read = m48txx_isa_read; nc->write = m48txx_isa_write; nc->toggle_lock = m48txx_isa_toggle_lock; } -static void m48txx_isa_concrete_class_init(ObjectClass *klass, void *data) +static void m48txx_isa_concrete_class_init(ObjectClass *klass, const void *data) { M48txxISADeviceClass *u = M48TXX_ISA_CLASS(klass); - M48txxInfo *info = data; + const M48txxInfo *info = data; u->info = *info; } @@ -141,7 +140,7 @@ static const TypeInfo m48txx_isa_type_info = { .instance_size = sizeof(M48txxISAState), .abstract = true, .class_init = m48txx_isa_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NVRAM }, { } } @@ -161,7 +160,7 @@ static void m48t59_isa_register_types(void) for (i = 0; i < ARRAY_SIZE(m48txx_isa_info); i++) { isa_type_info.name = m48txx_isa_info[i].bus_name; isa_type_info.class_data = &m48txx_isa_info[i]; - type_register(&isa_type_info); + type_register_static(&isa_type_info); } } diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c index 1585a2d3997..68be2dad6f3 100644 --- a/hw/rtc/m48t59.c +++ b/hw/rtc/m48t59.c @@ -28,15 +28,15 @@ #include "hw/qdev-properties.h" #include "hw/rtc/m48t59.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/rtc.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/rtc.h" +#include "system/system.h" #include "hw/sysbus.h" #include "qapi/error.h" #include "qemu/bcd.h" #include "qemu/module.h" #include "trace.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "m48t59-internal.h" #include "migration/vmstate.h" @@ -618,18 +618,17 @@ static void m48txx_sysbus_toggle_lock(Nvram *obj, int lock) m48t59_toggle_lock(&d->state, lock); } -static Property m48t59_sysbus_properties[] = { +static const Property m48t59_sysbus_properties[] = { DEFINE_PROP_INT32("base-year", M48txxSysBusState, state.base_year, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void m48txx_sysbus_class_init(ObjectClass *klass, void *data) +static void m48txx_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); NvramClass *nc = NVRAM_CLASS(klass); dc->realize = m48t59_realize; - dc->reset = m48t59_reset_sysbus; + device_class_set_legacy_reset(dc, m48t59_reset_sysbus); device_class_set_props(dc, m48t59_sysbus_properties); dc->vmsd = &vmstate_m48t59; nc->read = m48txx_sysbus_read; @@ -637,10 +636,11 @@ static void m48txx_sysbus_class_init(ObjectClass *klass, void *data) nc->toggle_lock = m48txx_sysbus_toggle_lock; } -static void m48txx_sysbus_concrete_class_init(ObjectClass *klass, void *data) +static void m48txx_sysbus_concrete_class_init(ObjectClass *klass, + const void *data) { M48txxSysBusDeviceClass *u = M48TXX_SYS_BUS_CLASS(klass); - M48txxInfo *info = data; + const M48txxInfo *info = data; u->info = *info; } @@ -658,7 +658,7 @@ static const TypeInfo m48txx_sysbus_type_info = { .instance_init = m48t59_init1, .abstract = true, .class_init = m48txx_sysbus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NVRAM }, { } } @@ -679,7 +679,7 @@ static void m48t59_register_types(void) for (i = 0; i < ARRAY_SIZE(m48txx_sysbus_info); i++) { sysbus_type_info.name = m48txx_sysbus_info[i].bus_name; sysbus_type_info.class_data = &m48txx_sysbus_info[i]; - type_register(&sysbus_type_info); + type_register_static(&sysbus_type_info); } } diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 8ccee9a385d..f9f5cf396f0 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -32,11 +32,11 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "qemu/timer.h" -#include "sysemu/sysemu.h" -#include "sysemu/replay.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/replay.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/rtc.h" #include "hw/rtc/mc146818rtc.h" #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" @@ -819,7 +819,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = { static const VMStateDescription vmstate_rtc = { .name = "mc146818rtc", .version_id = 3, - .minimum_version_id = 1, + .minimum_version_id = 3, .pre_save = rtc_pre_save, .post_load = rtc_post_load, .fields = (const VMStateField[]) { @@ -829,13 +829,13 @@ static const VMStateDescription vmstate_rtc = { VMSTATE_TIMER_PTR(periodic_timer, MC146818RtcState), VMSTATE_INT64(next_periodic_time, MC146818RtcState), VMSTATE_UNUSED(3*8), - VMSTATE_UINT32_V(irq_coalesced, MC146818RtcState, 2), - VMSTATE_UINT32_V(period, MC146818RtcState, 2), - VMSTATE_UINT64_V(base_rtc, MC146818RtcState, 3), - VMSTATE_UINT64_V(last_update, MC146818RtcState, 3), - VMSTATE_INT64_V(offset, MC146818RtcState, 3), - VMSTATE_TIMER_PTR_V(update_timer, MC146818RtcState, 3), - VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3), + VMSTATE_UINT32(irq_coalesced, MC146818RtcState), + VMSTATE_UINT32(period, MC146818RtcState), + VMSTATE_UINT64(base_rtc, MC146818RtcState), + VMSTATE_UINT64(last_update, MC146818RtcState), + VMSTATE_INT64(offset, MC146818RtcState), + VMSTATE_TIMER_PTR(update_timer, MC146818RtcState), + VMSTATE_UINT64(next_alarm_time, MC146818RtcState), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { @@ -929,8 +929,6 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) memory_region_add_subregion(&s->io, 0, &s->coalesced_io); memory_region_add_coalescing(&s->coalesced_io, 0, 1); - qdev_set_legacy_instance_id(dev, s->io_base, 3); - object_property_add_tm(OBJECT(s), "date", rtc_get_date); qdev_init_gpio_out(dev, &s->irq, 1); @@ -960,13 +958,12 @@ MC146818RtcState *mc146818_rtc_init(ISABus *bus, int base_year, return s; } -static Property mc146818rtc_properties[] = { +static const Property mc146818rtc_properties[] = { DEFINE_PROP_INT32("base_year", MC146818RtcState, base_year, 1980), DEFINE_PROP_UINT16("iobase", MC146818RtcState, io_base, RTC_ISA_BASE), DEFINE_PROP_UINT8("irq", MC146818RtcState, isairq, RTC_ISA_IRQ), DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", MC146818RtcState, lost_tick_policy, LOST_TICK_POLICY_DISCARD), - DEFINE_PROP_END_OF_LIST(), }; static void rtc_reset_enter(Object *obj, ResetType type) @@ -1019,7 +1016,7 @@ static void rtc_build_aml(AcpiDevAmlIf *adev, Aml *scope) aml_append(scope, dev); } -static void rtc_class_initfn(ObjectClass *klass, void *data) +static void rtc_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -1039,7 +1036,7 @@ static const TypeInfo mc146818rtc_info = { .parent = TYPE_ISA_DEVICE, .instance_size = sizeof(MC146818RtcState), .class_init = rtc_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_ACPI_DEV_AML_IF }, { }, }, diff --git a/hw/rtc/meson.build b/hw/rtc/meson.build index 3ea2affe0b9..6c87864dc07 100644 --- a/hw/rtc/meson.build +++ b/hw/rtc/meson.build @@ -3,7 +3,6 @@ system_ss.add(when: 'CONFIG_DS1338', if_true: files('ds1338.c')) system_ss.add(when: 'CONFIG_M41T80', if_true: files('m41t80.c')) system_ss.add(when: 'CONFIG_M48T59', if_true: files('m48t59.c')) system_ss.add(when: 'CONFIG_PL031', if_true: files('pl031.c')) -system_ss.add(when: 'CONFIG_TWL92230', if_true: files('twl92230.c')) system_ss.add(when: ['CONFIG_ISA_BUS', 'CONFIG_M48T59'], if_true: files('m48t59-isa.c')) system_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-rtc.c')) @@ -14,3 +13,4 @@ system_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c')) system_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c')) system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c')) system_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c')) +system_ss.add(when: 'CONFIG_RS5C372_RTC', if_true: files('rs5c372.c')) diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c index 563bb4b446e..e545b9dfd64 100644 --- a/hw/rtc/pl031.c +++ b/hw/rtc/pl031.c @@ -18,8 +18,8 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "qemu/timer.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/rtc.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/module.h" @@ -319,7 +319,7 @@ static const VMStateDescription vmstate_pl031 = { } }; -static Property pl031_properties[] = { +static const Property pl031_properties[] = { /* * True to correctly migrate the tick offset of the RTC. False to * obtain backward migration compatibility with older QEMU versions, @@ -330,10 +330,9 @@ static Property pl031_properties[] = { */ DEFINE_PROP_BOOL("migrate-tick-offset", PL031State, migrate_tick_offset, true), - DEFINE_PROP_END_OF_LIST() }; -static void pl031_class_init(ObjectClass *klass, void *data) +static void pl031_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/rtc/rs5c372.c b/hw/rtc/rs5c372.c new file mode 100644 index 00000000000..bb924534a73 --- /dev/null +++ b/hw/rtc/rs5c372.c @@ -0,0 +1,236 @@ +/* + * Ricoh RS5C372, R222x I2C RTC + * + * Copyright (c) 2025 Bernhard Beschow + * + * Based on hw/rtc/ds1338.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/qdev-properties.h" +#include "hw/resettable.h" +#include "migration/vmstate.h" +#include "qemu/bcd.h" +#include "qom/object.h" +#include "system/rtc.h" +#include "trace.h" + +#define NVRAM_SIZE 0x10 + +/* Flags definitions */ +#define SECONDS_CH 0x80 +#define HOURS_PM 0x20 +#define CTRL2_24 0x20 + +#define TYPE_RS5C372 "rs5c372" +OBJECT_DECLARE_SIMPLE_TYPE(RS5C372State, RS5C372) + +struct RS5C372State { + I2CSlave parent_obj; + + int64_t offset; + uint8_t wday_offset; + uint8_t nvram[NVRAM_SIZE]; + uint8_t ptr; + uint8_t tx_format; + bool addr_byte; +}; + +static void capture_current_time(RS5C372State *s) +{ + /* + * Capture the current time into the secondary registers which will be + * actually read by the data transfer operation. + */ + struct tm now; + qemu_get_timedate(&now, s->offset); + s->nvram[0] = to_bcd(now.tm_sec); + s->nvram[1] = to_bcd(now.tm_min); + if (s->nvram[0xf] & CTRL2_24) { + s->nvram[2] = to_bcd(now.tm_hour); + } else { + int tmp = now.tm_hour; + if (tmp % 12 == 0) { + tmp += 12; + } + if (tmp <= 12) { + s->nvram[2] = to_bcd(tmp); + } else { + s->nvram[2] = HOURS_PM | to_bcd(tmp - 12); + } + } + s->nvram[3] = (now.tm_wday + s->wday_offset) % 7 + 1; + s->nvram[4] = to_bcd(now.tm_mday); + s->nvram[5] = to_bcd(now.tm_mon + 1); + s->nvram[6] = to_bcd(now.tm_year - 100); +} + +static void inc_regptr(RS5C372State *s) +{ + s->ptr = (s->ptr + 1) & (NVRAM_SIZE - 1); +} + +static int rs5c372_event(I2CSlave *i2c, enum i2c_event event) +{ + RS5C372State *s = RS5C372(i2c); + + switch (event) { + case I2C_START_RECV: + /* + * In h/w, capture happens on any START condition, not just a + * START_RECV, but there is no need to actually capture on + * START_SEND, because the guest can't get at that data + * without going through a START_RECV which would overwrite it. + */ + capture_current_time(s); + s->ptr = 0xf; + break; + case I2C_START_SEND: + s->addr_byte = true; + break; + default: + break; + } + + return 0; +} + +static uint8_t rs5c372_recv(I2CSlave *i2c) +{ + RS5C372State *s = RS5C372(i2c); + uint8_t res; + + res = s->nvram[s->ptr]; + + trace_rs5c372_recv(s->ptr, res); + + inc_regptr(s); + return res; +} + +static int rs5c372_send(I2CSlave *i2c, uint8_t data) +{ + RS5C372State *s = RS5C372(i2c); + + if (s->addr_byte) { + s->ptr = data >> 4; + s->tx_format = data & 0xf; + s->addr_byte = false; + return 0; + } + + trace_rs5c372_send(s->ptr, data); + + if (s->ptr < 7) { + /* Time register. */ + struct tm now; + qemu_get_timedate(&now, s->offset); + switch (s->ptr) { + case 0: + now.tm_sec = from_bcd(data & 0x7f); + break; + case 1: + now.tm_min = from_bcd(data & 0x7f); + break; + case 2: + if (s->nvram[0xf] & CTRL2_24) { + now.tm_hour = from_bcd(data & 0x3f); + } else { + int tmp = from_bcd(data & (HOURS_PM - 1)); + if (data & HOURS_PM) { + tmp += 12; + } + if (tmp % 12 == 0) { + tmp -= 12; + } + now.tm_hour = tmp; + } + break; + case 3: + { + /* + * The day field is supposed to contain a value in the range + * 1-7. Otherwise behavior is undefined. + */ + int user_wday = (data & 7) - 1; + s->wday_offset = (user_wday - now.tm_wday + 7) % 7; + } + break; + case 4: + now.tm_mday = from_bcd(data & 0x3f); + break; + case 5: + now.tm_mon = from_bcd(data & 0x1f) - 1; + break; + case 6: + now.tm_year = from_bcd(data) + 100; + break; + } + s->offset = qemu_timedate_diff(&now); + } else { + s->nvram[s->ptr] = data; + } + inc_regptr(s); + return 0; +} + +static void rs5c372_reset_hold(Object *obj, ResetType type) +{ + RS5C372State *s = RS5C372(obj); + + /* The clock is running and synchronized with the host */ + s->offset = 0; + s->wday_offset = 0; + memset(s->nvram, 0, NVRAM_SIZE); + s->ptr = 0; + s->addr_byte = false; +} + +static const VMStateDescription rs5c372_vmstate = { + .name = "rs5c372", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, RS5C372State), + VMSTATE_INT64(offset, RS5C372State), + VMSTATE_UINT8_V(wday_offset, RS5C372State, 2), + VMSTATE_UINT8_ARRAY(nvram, RS5C372State, NVRAM_SIZE), + VMSTATE_UINT8(ptr, RS5C372State), + VMSTATE_UINT8(tx_format, RS5C372State), + VMSTATE_BOOL(addr_byte, RS5C372State), + VMSTATE_END_OF_LIST() + } +}; + +static void rs5c372_init(Object *obj) +{ + qdev_prop_set_uint8(DEVICE(obj), "address", 0x32); +} + +static void rs5c372_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + k->event = rs5c372_event; + k->recv = rs5c372_recv; + k->send = rs5c372_send; + dc->vmsd = &rs5c372_vmstate; + rc->phases.hold = rs5c372_reset_hold; +} + +static const TypeInfo rs5c372_types[] = { + { + .name = TYPE_RS5C372, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(RS5C372State), + .instance_init = rs5c372_init, + .class_init = rs5c372_class_init, + }, +}; + +DEFINE_TYPES(rs5c372_types) diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c index ffcc0aa25d9..29e24ef6bed 100644 --- a/hw/rtc/sun4v-rtc.c +++ b/hw/rtc/sun4v-rtc.c @@ -75,7 +75,7 @@ static void sun4v_rtc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static void sun4v_rtc_class_init(ObjectClass *klass, void *data) +static void sun4v_rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/rtc/trace-events b/hw/rtc/trace-events index ebb311a5b0e..b9f2852d35f 100644 --- a/hw/rtc/trace-events +++ b/hw/rtc/trace-events @@ -22,6 +22,10 @@ pl031_set_alarm(uint32_t ticks) "alarm set for %u ticks" aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 +# ds1338.c +ds1338_recv(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] -> 0x%02" PRIx8 +ds1338_send(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] <- 0x%02" PRIx8 + # m48t59.c m48txx_nvram_io_read(uint64_t addr, uint64_t value) "io read addr:0x%04" PRIx64 " value:0x%02" PRIx64 m48txx_nvram_io_write(uint64_t addr, uint64_t value) "io write addr:0x%04" PRIx64 " value:0x%02" PRIx64 @@ -31,3 +35,7 @@ m48txx_nvram_mem_write(uint32_t addr, uint32_t value) "mem write addr:0x%04x val # goldfish_rtc.c goldfish_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 goldfish_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 + +# rs5c372.c +rs5c372_recv(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] -> 0x%02" PRIx8 +rs5c372_send(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] <- 0x%02" PRIx8 diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c deleted file mode 100644 index efd19a76e61..00000000000 --- a/hw/rtc/twl92230.c +++ /dev/null @@ -1,882 +0,0 @@ -/* - * TI TWL92230C energy-management companion device for the OMAP24xx. - * Aka. Menelaus (N4200 MENELAUS1_V2.2) - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/timer.h" -#include "hw/i2c/i2c.h" -#include "hw/irq.h" -#include "migration/qemu-file-types.h" -#include "migration/vmstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" -#include "qemu/bcd.h" -#include "qemu/module.h" -#include "qom/object.h" - -#define VERBOSE 1 - -#define TYPE_TWL92230 "twl92230" -OBJECT_DECLARE_SIMPLE_TYPE(MenelausState, TWL92230) - -struct MenelausState { - I2CSlave parent_obj; - - int firstbyte; - uint8_t reg; - - uint8_t vcore[5]; - uint8_t dcdc[3]; - uint8_t ldo[8]; - uint8_t sleep[2]; - uint8_t osc; - uint8_t detect; - uint16_t mask; - uint16_t status; - uint8_t dir; - uint8_t inputs; - uint8_t outputs; - uint8_t bbsms; - uint8_t pull[4]; - uint8_t mmc_ctrl[3]; - uint8_t mmc_debounce; - struct { - uint8_t ctrl; - uint16_t comp; - QEMUTimer *hz_tm; - int64_t next; - struct tm tm; - struct tm new; - struct tm alm; - int64_t sec_offset; - int64_t alm_sec; - int next_comp; - } rtc; - uint16_t rtc_next_vmstate; - qemu_irq out[4]; - uint8_t pwrbtn_state; -}; - -static inline void menelaus_update(MenelausState *s) -{ - qemu_set_irq(s->out[3], s->status & ~s->mask); -} - -static inline void menelaus_rtc_start(MenelausState *s) -{ - s->rtc.next += qemu_clock_get_ms(rtc_clock); - timer_mod(s->rtc.hz_tm, s->rtc.next); -} - -static inline void menelaus_rtc_stop(MenelausState *s) -{ - timer_del(s->rtc.hz_tm); - s->rtc.next -= qemu_clock_get_ms(rtc_clock); - if (s->rtc.next < 1) - s->rtc.next = 1; -} - -static void menelaus_rtc_update(MenelausState *s) -{ - qemu_get_timedate(&s->rtc.tm, s->rtc.sec_offset); -} - -static void menelaus_alm_update(MenelausState *s) -{ - if ((s->rtc.ctrl & 3) == 3) - s->rtc.alm_sec = qemu_timedate_diff(&s->rtc.alm) - s->rtc.sec_offset; -} - -static void menelaus_rtc_hz(void *opaque) -{ - MenelausState *s = (MenelausState *) opaque; - - s->rtc.next_comp --; - s->rtc.alm_sec --; - s->rtc.next += 1000; - timer_mod(s->rtc.hz_tm, s->rtc.next); - if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */ - menelaus_rtc_update(s); - if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec) - s->status |= 1 << 8; /* RTCTMR */ - else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min) - s->status |= 1 << 8; /* RTCTMR */ - else if (!s->rtc.tm.tm_hour) - s->status |= 1 << 8; /* RTCTMR */ - } else - s->status |= 1 << 8; /* RTCTMR */ - if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */ - if (s->rtc.alm_sec == 0) - s->status |= 1 << 9; /* RTCALM */ - /* TODO: wake-up */ - } - if (s->rtc.next_comp <= 0) { - s->rtc.next -= muldiv64((int16_t) s->rtc.comp, 1000, 0x8000); - s->rtc.next_comp = 3600; - } - menelaus_update(s); -} - -static void menelaus_reset(I2CSlave *i2c) -{ - MenelausState *s = TWL92230(i2c); - - s->reg = 0x00; - - s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */ - s->vcore[1] = 0x05; - s->vcore[2] = 0x02; - s->vcore[3] = 0x0c; - s->vcore[4] = 0x03; - s->dcdc[0] = 0x33; /* Depends on wiring */ - s->dcdc[1] = 0x03; - s->dcdc[2] = 0x00; - s->ldo[0] = 0x95; - s->ldo[1] = 0x7e; - s->ldo[2] = 0x00; - s->ldo[3] = 0x00; /* Depends on wiring */ - s->ldo[4] = 0x03; /* Depends on wiring */ - s->ldo[5] = 0x00; - s->ldo[6] = 0x00; - s->ldo[7] = 0x00; - s->sleep[0] = 0x00; - s->sleep[1] = 0x00; - s->osc = 0x01; - s->detect = 0x09; - s->mask = 0x0fff; - s->status = 0; - s->dir = 0x07; - s->outputs = 0x00; - s->bbsms = 0x00; - s->pull[0] = 0x00; - s->pull[1] = 0x00; - s->pull[2] = 0x00; - s->pull[3] = 0x00; - s->mmc_ctrl[0] = 0x03; - s->mmc_ctrl[1] = 0xc0; - s->mmc_ctrl[2] = 0x00; - s->mmc_debounce = 0x05; - - if (s->rtc.ctrl & 1) - menelaus_rtc_stop(s); - s->rtc.ctrl = 0x00; - s->rtc.comp = 0x0000; - s->rtc.next = 1000; - s->rtc.sec_offset = 0; - s->rtc.next_comp = 1800; - s->rtc.alm_sec = 1800; - s->rtc.alm.tm_sec = 0x00; - s->rtc.alm.tm_min = 0x00; - s->rtc.alm.tm_hour = 0x00; - s->rtc.alm.tm_mday = 0x01; - s->rtc.alm.tm_mon = 0x00; - s->rtc.alm.tm_year = 2004; - menelaus_update(s); -} - -static void menelaus_gpio_set(void *opaque, int line, int level) -{ - MenelausState *s = (MenelausState *) opaque; - - if (line < 3) { - /* No interrupt generated */ - s->inputs &= ~(1 << line); - s->inputs |= level << line; - return; - } - - if (!s->pwrbtn_state && level) { - s->status |= 1 << 11; /* PSHBTN */ - menelaus_update(s); - } - s->pwrbtn_state = level; -} - -#define MENELAUS_REV 0x01 -#define MENELAUS_VCORE_CTRL1 0x02 -#define MENELAUS_VCORE_CTRL2 0x03 -#define MENELAUS_VCORE_CTRL3 0x04 -#define MENELAUS_VCORE_CTRL4 0x05 -#define MENELAUS_VCORE_CTRL5 0x06 -#define MENELAUS_DCDC_CTRL1 0x07 -#define MENELAUS_DCDC_CTRL2 0x08 -#define MENELAUS_DCDC_CTRL3 0x09 -#define MENELAUS_LDO_CTRL1 0x0a -#define MENELAUS_LDO_CTRL2 0x0b -#define MENELAUS_LDO_CTRL3 0x0c -#define MENELAUS_LDO_CTRL4 0x0d -#define MENELAUS_LDO_CTRL5 0x0e -#define MENELAUS_LDO_CTRL6 0x0f -#define MENELAUS_LDO_CTRL7 0x10 -#define MENELAUS_LDO_CTRL8 0x11 -#define MENELAUS_SLEEP_CTRL1 0x12 -#define MENELAUS_SLEEP_CTRL2 0x13 -#define MENELAUS_DEVICE_OFF 0x14 -#define MENELAUS_OSC_CTRL 0x15 -#define MENELAUS_DETECT_CTRL 0x16 -#define MENELAUS_INT_MASK1 0x17 -#define MENELAUS_INT_MASK2 0x18 -#define MENELAUS_INT_STATUS1 0x19 -#define MENELAUS_INT_STATUS2 0x1a -#define MENELAUS_INT_ACK1 0x1b -#define MENELAUS_INT_ACK2 0x1c -#define MENELAUS_GPIO_CTRL 0x1d -#define MENELAUS_GPIO_IN 0x1e -#define MENELAUS_GPIO_OUT 0x1f -#define MENELAUS_BBSMS 0x20 -#define MENELAUS_RTC_CTRL 0x21 -#define MENELAUS_RTC_UPDATE 0x22 -#define MENELAUS_RTC_SEC 0x23 -#define MENELAUS_RTC_MIN 0x24 -#define MENELAUS_RTC_HR 0x25 -#define MENELAUS_RTC_DAY 0x26 -#define MENELAUS_RTC_MON 0x27 -#define MENELAUS_RTC_YR 0x28 -#define MENELAUS_RTC_WKDAY 0x29 -#define MENELAUS_RTC_AL_SEC 0x2a -#define MENELAUS_RTC_AL_MIN 0x2b -#define MENELAUS_RTC_AL_HR 0x2c -#define MENELAUS_RTC_AL_DAY 0x2d -#define MENELAUS_RTC_AL_MON 0x2e -#define MENELAUS_RTC_AL_YR 0x2f -#define MENELAUS_RTC_COMP_MSB 0x30 -#define MENELAUS_RTC_COMP_LSB 0x31 -#define MENELAUS_S1_PULL_EN 0x32 -#define MENELAUS_S1_PULL_DIR 0x33 -#define MENELAUS_S2_PULL_EN 0x34 -#define MENELAUS_S2_PULL_DIR 0x35 -#define MENELAUS_MCT_CTRL1 0x36 -#define MENELAUS_MCT_CTRL2 0x37 -#define MENELAUS_MCT_CTRL3 0x38 -#define MENELAUS_MCT_PIN_ST 0x39 -#define MENELAUS_DEBOUNCE1 0x3a - -static uint8_t menelaus_read(void *opaque, uint8_t addr) -{ - MenelausState *s = (MenelausState *) opaque; - - switch (addr) { - case MENELAUS_REV: - return 0x22; - - case MENELAUS_VCORE_CTRL1 ... MENELAUS_VCORE_CTRL5: - return s->vcore[addr - MENELAUS_VCORE_CTRL1]; - - case MENELAUS_DCDC_CTRL1 ... MENELAUS_DCDC_CTRL3: - return s->dcdc[addr - MENELAUS_DCDC_CTRL1]; - - case MENELAUS_LDO_CTRL1 ... MENELAUS_LDO_CTRL8: - return s->ldo[addr - MENELAUS_LDO_CTRL1]; - - case MENELAUS_SLEEP_CTRL1: - case MENELAUS_SLEEP_CTRL2: - return s->sleep[addr - MENELAUS_SLEEP_CTRL1]; - - case MENELAUS_DEVICE_OFF: - return 0; - - case MENELAUS_OSC_CTRL: - return s->osc | (1 << 7); /* CLK32K_GOOD */ - - case MENELAUS_DETECT_CTRL: - return s->detect; - - case MENELAUS_INT_MASK1: - return (s->mask >> 0) & 0xff; - case MENELAUS_INT_MASK2: - return (s->mask >> 8) & 0xff; - - case MENELAUS_INT_STATUS1: - return (s->status >> 0) & 0xff; - case MENELAUS_INT_STATUS2: - return (s->status >> 8) & 0xff; - - case MENELAUS_INT_ACK1: - case MENELAUS_INT_ACK2: - return 0; - - case MENELAUS_GPIO_CTRL: - return s->dir; - case MENELAUS_GPIO_IN: - return s->inputs | (~s->dir & s->outputs); - case MENELAUS_GPIO_OUT: - return s->outputs; - - case MENELAUS_BBSMS: - return s->bbsms; - - case MENELAUS_RTC_CTRL: - return s->rtc.ctrl; - case MENELAUS_RTC_UPDATE: - return 0x00; - case MENELAUS_RTC_SEC: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_sec); - case MENELAUS_RTC_MIN: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_min); - case MENELAUS_RTC_HR: - menelaus_rtc_update(s); - if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ - return to_bcd((s->rtc.tm.tm_hour % 12) + 1) | - (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */ - else - return to_bcd(s->rtc.tm.tm_hour); - case MENELAUS_RTC_DAY: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_mday); - case MENELAUS_RTC_MON: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_mon + 1); - case MENELAUS_RTC_YR: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_year - 2000); - case MENELAUS_RTC_WKDAY: - menelaus_rtc_update(s); - return to_bcd(s->rtc.tm.tm_wday); - case MENELAUS_RTC_AL_SEC: - return to_bcd(s->rtc.alm.tm_sec); - case MENELAUS_RTC_AL_MIN: - return to_bcd(s->rtc.alm.tm_min); - case MENELAUS_RTC_AL_HR: - if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ - return to_bcd((s->rtc.alm.tm_hour % 12) + 1) | - (!!(s->rtc.alm.tm_hour >= 12) << 7);/* AL_PM_nAM */ - else - return to_bcd(s->rtc.alm.tm_hour); - case MENELAUS_RTC_AL_DAY: - return to_bcd(s->rtc.alm.tm_mday); - case MENELAUS_RTC_AL_MON: - return to_bcd(s->rtc.alm.tm_mon + 1); - case MENELAUS_RTC_AL_YR: - return to_bcd(s->rtc.alm.tm_year - 2000); - case MENELAUS_RTC_COMP_MSB: - return (s->rtc.comp >> 8) & 0xff; - case MENELAUS_RTC_COMP_LSB: - return (s->rtc.comp >> 0) & 0xff; - - case MENELAUS_S1_PULL_EN: - return s->pull[0]; - case MENELAUS_S1_PULL_DIR: - return s->pull[1]; - case MENELAUS_S2_PULL_EN: - return s->pull[2]; - case MENELAUS_S2_PULL_DIR: - return s->pull[3]; - - case MENELAUS_MCT_CTRL1 ... MENELAUS_MCT_CTRL3: - return s->mmc_ctrl[addr - MENELAUS_MCT_CTRL1]; - case MENELAUS_MCT_PIN_ST: - /* TODO: return the real Card Detect */ - return 0; - case MENELAUS_DEBOUNCE1: - return s->mmc_debounce; - - default: -#ifdef VERBOSE - printf("%s: unknown register %02x\n", __func__, addr); -#endif - break; - } - return 0; -} - -static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) -{ - MenelausState *s = (MenelausState *) opaque; - int line; - struct tm tm; - - switch (addr) { - case MENELAUS_VCORE_CTRL1: - s->vcore[0] = (value & 0xe) | MIN(value & 0x1f, 0x12); - break; - case MENELAUS_VCORE_CTRL2: - s->vcore[1] = value; - break; - case MENELAUS_VCORE_CTRL3: - s->vcore[2] = MIN(value & 0x1f, 0x12); - break; - case MENELAUS_VCORE_CTRL4: - s->vcore[3] = MIN(value & 0x1f, 0x12); - break; - case MENELAUS_VCORE_CTRL5: - s->vcore[4] = value & 3; - /* XXX - * auto set to 3 on M_Active, nRESWARM - * auto set to 0 on M_WaitOn, M_Backup - */ - break; - - case MENELAUS_DCDC_CTRL1: - s->dcdc[0] = value & 0x3f; - break; - case MENELAUS_DCDC_CTRL2: - s->dcdc[1] = value & 0x07; - /* XXX - * auto set to 3 on M_Active, nRESWARM - * auto set to 0 on M_WaitOn, M_Backup - */ - break; - case MENELAUS_DCDC_CTRL3: - s->dcdc[2] = value & 0x07; - break; - - case MENELAUS_LDO_CTRL1: - s->ldo[0] = value; - break; - case MENELAUS_LDO_CTRL2: - s->ldo[1] = value & 0x7f; - /* XXX - * auto set to 0x7e on M_WaitOn, M_Backup - */ - break; - case MENELAUS_LDO_CTRL3: - s->ldo[2] = value & 3; - /* XXX - * auto set to 3 on M_Active, nRESWARM - * auto set to 0 on M_WaitOn, M_Backup - */ - break; - case MENELAUS_LDO_CTRL4: - s->ldo[3] = value & 3; - /* XXX - * auto set to 3 on M_Active, nRESWARM - * auto set to 0 on M_WaitOn, M_Backup - */ - break; - case MENELAUS_LDO_CTRL5: - s->ldo[4] = value & 3; - /* XXX - * auto set to 3 on M_Active, nRESWARM - * auto set to 0 on M_WaitOn, M_Backup - */ - break; - case MENELAUS_LDO_CTRL6: - s->ldo[5] = value & 3; - break; - case MENELAUS_LDO_CTRL7: - s->ldo[6] = value & 3; - break; - case MENELAUS_LDO_CTRL8: - s->ldo[7] = value & 3; - break; - - case MENELAUS_SLEEP_CTRL1: - case MENELAUS_SLEEP_CTRL2: - s->sleep[addr - MENELAUS_SLEEP_CTRL1] = value; - break; - - case MENELAUS_DEVICE_OFF: - if (value & 1) { - menelaus_reset(I2C_SLAVE(s)); - } - break; - - case MENELAUS_OSC_CTRL: - s->osc = value & 7; - break; - - case MENELAUS_DETECT_CTRL: - s->detect = value & 0x7f; - break; - - case MENELAUS_INT_MASK1: - s->mask &= 0xf00; - s->mask |= value << 0; - menelaus_update(s); - break; - case MENELAUS_INT_MASK2: - s->mask &= 0x0ff; - s->mask |= value << 8; - menelaus_update(s); - break; - - case MENELAUS_INT_ACK1: - s->status &= ~(((uint16_t) value) << 0); - menelaus_update(s); - break; - case MENELAUS_INT_ACK2: - s->status &= ~(((uint16_t) value) << 8); - menelaus_update(s); - break; - - case MENELAUS_GPIO_CTRL: - for (line = 0; line < 3; line ++) { - if (((s->dir ^ value) >> line) & 1) { - qemu_set_irq(s->out[line], - ((s->outputs & ~s->dir) >> line) & 1); - } - } - s->dir = value & 0x67; - break; - case MENELAUS_GPIO_OUT: - for (line = 0; line < 3; line ++) { - if ((((s->outputs ^ value) & ~s->dir) >> line) & 1) { - qemu_set_irq(s->out[line], (s->outputs >> line) & 1); - } - } - s->outputs = value & 0x07; - break; - - case MENELAUS_BBSMS: - s->bbsms = 0x0d; - break; - - case MENELAUS_RTC_CTRL: - if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */ - if (value & 1) - menelaus_rtc_start(s); - else - menelaus_rtc_stop(s); - } - s->rtc.ctrl = value & 0x1f; - menelaus_alm_update(s); - break; - case MENELAUS_RTC_UPDATE: - menelaus_rtc_update(s); - memcpy(&tm, &s->rtc.tm, sizeof(tm)); - switch (value & 0xf) { - case 0: - break; - case 1: - tm.tm_sec = s->rtc.new.tm_sec; - break; - case 2: - tm.tm_min = s->rtc.new.tm_min; - break; - case 3: - if (s->rtc.new.tm_hour > 23) - goto rtc_badness; - tm.tm_hour = s->rtc.new.tm_hour; - break; - case 4: - if (s->rtc.new.tm_mday < 1) - goto rtc_badness; - /* TODO check range */ - tm.tm_mday = s->rtc.new.tm_mday; - break; - case 5: - if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11) - goto rtc_badness; - tm.tm_mon = s->rtc.new.tm_mon; - break; - case 6: - tm.tm_year = s->rtc.new.tm_year; - break; - case 7: - /* TODO set .tm_mday instead */ - tm.tm_wday = s->rtc.new.tm_wday; - break; - case 8: - if (s->rtc.new.tm_hour > 23) - goto rtc_badness; - if (s->rtc.new.tm_mday < 1) - goto rtc_badness; - if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11) - goto rtc_badness; - tm.tm_sec = s->rtc.new.tm_sec; - tm.tm_min = s->rtc.new.tm_min; - tm.tm_hour = s->rtc.new.tm_hour; - tm.tm_mday = s->rtc.new.tm_mday; - tm.tm_mon = s->rtc.new.tm_mon; - tm.tm_year = s->rtc.new.tm_year; - break; - rtc_badness: - default: - fprintf(stderr, "%s: bad RTC_UPDATE value %02x\n", - __func__, value); - s->status |= 1 << 10; /* RTCERR */ - menelaus_update(s); - } - s->rtc.sec_offset = qemu_timedate_diff(&tm); - break; - case MENELAUS_RTC_SEC: - s->rtc.tm.tm_sec = from_bcd(value & 0x7f); - break; - case MENELAUS_RTC_MIN: - s->rtc.tm.tm_min = from_bcd(value & 0x7f); - break; - case MENELAUS_RTC_HR: - s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ - MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : - from_bcd(value & 0x3f); - break; - case MENELAUS_RTC_DAY: - s->rtc.tm.tm_mday = from_bcd(value); - break; - case MENELAUS_RTC_MON: - s->rtc.tm.tm_mon = MAX(1, from_bcd(value)) - 1; - break; - case MENELAUS_RTC_YR: - s->rtc.tm.tm_year = 2000 + from_bcd(value); - break; - case MENELAUS_RTC_WKDAY: - s->rtc.tm.tm_mday = from_bcd(value); - break; - case MENELAUS_RTC_AL_SEC: - s->rtc.alm.tm_sec = from_bcd(value & 0x7f); - menelaus_alm_update(s); - break; - case MENELAUS_RTC_AL_MIN: - s->rtc.alm.tm_min = from_bcd(value & 0x7f); - menelaus_alm_update(s); - break; - case MENELAUS_RTC_AL_HR: - s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ - MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : - from_bcd(value & 0x3f); - menelaus_alm_update(s); - break; - case MENELAUS_RTC_AL_DAY: - s->rtc.alm.tm_mday = from_bcd(value); - menelaus_alm_update(s); - break; - case MENELAUS_RTC_AL_MON: - s->rtc.alm.tm_mon = MAX(1, from_bcd(value)) - 1; - menelaus_alm_update(s); - break; - case MENELAUS_RTC_AL_YR: - s->rtc.alm.tm_year = 2000 + from_bcd(value); - menelaus_alm_update(s); - break; - case MENELAUS_RTC_COMP_MSB: - s->rtc.comp &= 0xff; - s->rtc.comp |= value << 8; - break; - case MENELAUS_RTC_COMP_LSB: - s->rtc.comp &= 0xff << 8; - s->rtc.comp |= value; - break; - - case MENELAUS_S1_PULL_EN: - s->pull[0] = value; - break; - case MENELAUS_S1_PULL_DIR: - s->pull[1] = value & 0x1f; - break; - case MENELAUS_S2_PULL_EN: - s->pull[2] = value; - break; - case MENELAUS_S2_PULL_DIR: - s->pull[3] = value & 0x1f; - break; - - case MENELAUS_MCT_CTRL1: - s->mmc_ctrl[0] = value & 0x7f; - break; - case MENELAUS_MCT_CTRL2: - s->mmc_ctrl[1] = value; - /* TODO update Card Detect interrupts */ - break; - case MENELAUS_MCT_CTRL3: - s->mmc_ctrl[2] = value & 0xf; - break; - case MENELAUS_DEBOUNCE1: - s->mmc_debounce = value & 0x3f; - break; - - default: -#ifdef VERBOSE - printf("%s: unknown register %02x\n", __func__, addr); -#endif - break; - } -} - -static int menelaus_event(I2CSlave *i2c, enum i2c_event event) -{ - MenelausState *s = TWL92230(i2c); - - if (event == I2C_START_SEND) - s->firstbyte = 1; - - return 0; -} - -static int menelaus_tx(I2CSlave *i2c, uint8_t data) -{ - MenelausState *s = TWL92230(i2c); - - /* Interpret register address byte */ - if (s->firstbyte) { - s->reg = data; - s->firstbyte = 0; - } else - menelaus_write(s, s->reg ++, data); - - return 0; -} - -static uint8_t menelaus_rx(I2CSlave *i2c) -{ - MenelausState *s = TWL92230(i2c); - - return menelaus_read(s, s->reg ++); -} - -/* Save restore 32 bit int as uint16_t - This is a Big hack, but it is how the old state did it. - Or we broke compatibility in the state, or we can't use struct tm - */ - -static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size, - const VMStateField *field) -{ - int *v = pv; - *v = qemu_get_be16(f); - return 0; -} - -static int put_int32_as_uint16(QEMUFile *f, void *pv, size_t size, - const VMStateField *field, JSONWriter *vmdesc) -{ - int *v = pv; - qemu_put_be16(f, *v); - - return 0; -} - -static const VMStateInfo vmstate_hack_int32_as_uint16 = { - .name = "int32_as_uint16", - .get = get_int32_as_uint16, - .put = put_int32_as_uint16, -}; - -#define VMSTATE_UINT16_HACK(_f, _s) \ - VMSTATE_SINGLE(_f, _s, 0, vmstate_hack_int32_as_uint16, int32_t) - - -static const VMStateDescription vmstate_menelaus_tm = { - .name = "menelaus_tm", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT16_HACK(tm_sec, struct tm), - VMSTATE_UINT16_HACK(tm_min, struct tm), - VMSTATE_UINT16_HACK(tm_hour, struct tm), - VMSTATE_UINT16_HACK(tm_mday, struct tm), - VMSTATE_UINT16_HACK(tm_min, struct tm), - VMSTATE_UINT16_HACK(tm_year, struct tm), - VMSTATE_END_OF_LIST() - } -}; - -static int menelaus_pre_save(void *opaque) -{ - MenelausState *s = opaque; - /* Should be <= 1000 */ - s->rtc_next_vmstate = s->rtc.next - qemu_clock_get_ms(rtc_clock); - - return 0; -} - -static int menelaus_post_load(void *opaque, int version_id) -{ - MenelausState *s = opaque; - - if (s->rtc.ctrl & 1) /* RTC_EN */ - menelaus_rtc_stop(s); - - s->rtc.next = s->rtc_next_vmstate; - - menelaus_alm_update(s); - menelaus_update(s); - if (s->rtc.ctrl & 1) /* RTC_EN */ - menelaus_rtc_start(s); - return 0; -} - -static const VMStateDescription vmstate_menelaus = { - .name = "menelaus", - .version_id = 0, - .minimum_version_id = 0, - .pre_save = menelaus_pre_save, - .post_load = menelaus_post_load, - .fields = (const VMStateField[]) { - VMSTATE_INT32(firstbyte, MenelausState), - VMSTATE_UINT8(reg, MenelausState), - VMSTATE_UINT8_ARRAY(vcore, MenelausState, 5), - VMSTATE_UINT8_ARRAY(dcdc, MenelausState, 3), - VMSTATE_UINT8_ARRAY(ldo, MenelausState, 8), - VMSTATE_UINT8_ARRAY(sleep, MenelausState, 2), - VMSTATE_UINT8(osc, MenelausState), - VMSTATE_UINT8(detect, MenelausState), - VMSTATE_UINT16(mask, MenelausState), - VMSTATE_UINT16(status, MenelausState), - VMSTATE_UINT8(dir, MenelausState), - VMSTATE_UINT8(inputs, MenelausState), - VMSTATE_UINT8(outputs, MenelausState), - VMSTATE_UINT8(bbsms, MenelausState), - VMSTATE_UINT8_ARRAY(pull, MenelausState, 4), - VMSTATE_UINT8_ARRAY(mmc_ctrl, MenelausState, 3), - VMSTATE_UINT8(mmc_debounce, MenelausState), - VMSTATE_UINT8(rtc.ctrl, MenelausState), - VMSTATE_UINT16(rtc.comp, MenelausState), - VMSTATE_UINT16(rtc_next_vmstate, MenelausState), - VMSTATE_STRUCT(rtc.new, MenelausState, 0, vmstate_menelaus_tm, - struct tm), - VMSTATE_STRUCT(rtc.alm, MenelausState, 0, vmstate_menelaus_tm, - struct tm), - VMSTATE_UINT8(pwrbtn_state, MenelausState), - VMSTATE_I2C_SLAVE(parent_obj, MenelausState), - VMSTATE_END_OF_LIST() - } -}; - -static void twl92230_realize(DeviceState *dev, Error **errp) -{ - MenelausState *s = TWL92230(dev); - - s->rtc.hz_tm = timer_new_ms(rtc_clock, menelaus_rtc_hz, s); - /* Three output pins plus one interrupt pin. */ - qdev_init_gpio_out(dev, s->out, 4); - - /* Three input pins plus one power-button pin. */ - qdev_init_gpio_in(dev, menelaus_gpio_set, 4); - - menelaus_reset(I2C_SLAVE(dev)); -} - -static void twl92230_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); - - dc->realize = twl92230_realize; - sc->event = menelaus_event; - sc->recv = menelaus_rx; - sc->send = menelaus_tx; - dc->vmsd = &vmstate_menelaus; -} - -static const TypeInfo twl92230_info = { - .name = TYPE_TWL92230, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(MenelausState), - .class_init = twl92230_class_init, -}; - -static void twl92230_register_types(void) -{ - type_register_static(&twl92230_info); -} - -type_init(twl92230_register_types) diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c index 613c6407a60..500982a8011 100644 --- a/hw/rtc/xlnx-zynqmp-rtc.c +++ b/hw/rtc/xlnx-zynqmp-rtc.c @@ -32,8 +32,8 @@ #include "qemu/module.h" #include "hw/irq.h" #include "qemu/cutils.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/system.h" +#include "system/rtc.h" #include "trace.h" #include "hw/rtc/xlnx-zynqmp-rtc.h" #include "migration/vmstate.h" @@ -251,11 +251,11 @@ static const VMStateDescription vmstate_rtc = { } }; -static void rtc_class_init(ObjectClass *klass, void *data) +static void rtc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = rtc_reset; + device_class_set_legacy_reset(dc, rtc_reset); dc->vmsd = &vmstate_rtc; } diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c index bb4746c5569..5b9004e9e15 100644 --- a/hw/rx/rx-gdbsim.c +++ b/hw/rx/rx-gdbsim.c @@ -24,9 +24,9 @@ #include "qapi/error.h" #include "hw/loader.h" #include "hw/rx/rx62n.h" -#include "sysemu/qtest.h" -#include "sysemu/device_tree.h" -#include "sysemu/reset.h" +#include "system/qtest.h" +#include "system/device_tree.h" +#include "system/reset.h" #include "hw/boards.h" #include "qom/object.h" @@ -110,9 +110,6 @@ static void rx_gdbsim_init(MachineState *machine) if (!kernel_filename) { if (machine->firmware) { rom_add_file_fixed(machine->firmware, RX62N_CFLASH_BASE, 0); - } else if (!qtest_enabled()) { - error_report("No bios or kernel specified"); - exit(1); } } @@ -127,7 +124,7 @@ static void rx_gdbsim_init(MachineState *machine) * the latter half of the SDRAM space. */ kernel_offset = machine->ram_size / 2; - rx_load_image(RX_CPU(first_cpu), kernel_filename, + rx_load_image(&s->mcu.cpu, kernel_filename, SDRAM_BASE + kernel_offset, kernel_offset); if (dtb_filename) { ram_addr_t dtb_offset; @@ -153,12 +150,12 @@ static void rx_gdbsim_init(MachineState *machine) qemu_register_reset_nosnapshotload(qemu_fdt_randomize_seeds, rom_ptr(SDRAM_BASE + dtb_offset, dtb_size)); /* Set dtb address to R1 */ - RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset; + s->mcu.cpu.env.regs[1] = SDRAM_BASE + dtb_offset; } } } -static void rx_gdbsim_class_init(ObjectClass *oc, void *data) +static void rx_gdbsim_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -168,7 +165,7 @@ static void rx_gdbsim_class_init(ObjectClass *oc, void *data) mc->default_ram_id = "ext-sdram"; } -static void rx62n7_class_init(ObjectClass *oc, void *data) +static void rx62n7_class_init(ObjectClass *oc, const void *data) { RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); @@ -178,7 +175,7 @@ static void rx62n7_class_init(ObjectClass *oc, void *data) mc->desc = "gdb simulator (R5F562N7 MCU and external RAM)"; }; -static void rx62n8_class_init(ObjectClass *oc, void *data) +static void rx62n8_class_init(ObjectClass *oc, const void *data) { RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c index 560f53a58a6..a2a243afa41 100644 --- a/hw/rx/rx62n.c +++ b/hw/rx/rx62n.c @@ -28,8 +28,8 @@ #include "hw/loader.h" #include "hw/sysbus.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" -#include "qapi/qmp/qlist.h" +#include "system/system.h" +#include "qobject/qlist.h" #include "qom/object.h" /* @@ -257,15 +257,14 @@ static void rx62n_realize(DeviceState *dev, Error **errp) register_sci(s, 0); } -static Property rx62n_properties[] = { +static const Property rx62n_properties[] = { DEFINE_PROP_LINK("main-bus", RX62NState, sysmem, TYPE_MEMORY_REGION, MemoryRegion *), DEFINE_PROP_BOOL("load-kernel", RX62NState, kernel, false), DEFINE_PROP_UINT32("xtal-frequency-hz", RX62NState, xtal_freq_hz, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void rx62n_class_init(ObjectClass *klass, void *data) +static void rx62n_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -273,7 +272,7 @@ static void rx62n_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, rx62n_properties); } -static void r5f562n7_class_init(ObjectClass *oc, void *data) +static void r5f562n7_class_init(ObjectClass *oc, const void *data) { RX62NClass *rxc = RX62N_MCU_CLASS(oc); @@ -282,7 +281,7 @@ static void r5f562n7_class_init(ObjectClass *oc, void *data) rxc->data_flash_size = 32 * KiB; }; -static void r5f562n8_class_init(ObjectClass *oc, void *data) +static void r5f562n8_class_init(ObjectClass *oc, const void *data) { RX62NClass *rxc = RX62N_MCU_CLASS(oc); diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c index 69e6783ade5..3f0d384fd80 100644 --- a/hw/s390x/3270-ccw.c +++ b/hw/s390x/3270-ccw.c @@ -150,15 +150,10 @@ static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp) g_free(sch); } -static Property emulated_ccw_3270_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - -static void emulated_ccw_3270_class_init(ObjectClass *klass, void *data) +static void emulated_ccw_3270_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, emulated_ccw_3270_properties); dc->realize = emulated_ccw_3270_realize; dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig index 3bbf4ae56e4..02ea1997013 100644 --- a/hw/s390x/Kconfig +++ b/hw/s390x/Kconfig @@ -7,6 +7,7 @@ config S390_CCW_VIRTIO imply VFIO_AP imply VFIO_CCW imply WDT_DIAG288 + imply PCI_BRIDGE imply PCIE_DEVICES imply IOMMUFD select PCI_EXPRESS @@ -15,3 +16,4 @@ config S390_CCW_VIRTIO select SCLPCONSOLE select VIRTIO_CCW select MSI_NONBROKEN + select VIRTIO_MEM_SUPPORTED diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c index ef8fa2b15be..edeb3dbef34 100644 --- a/hw/s390x/ap-bridge.c +++ b/hw/s390x/ap-bridge.c @@ -22,7 +22,7 @@ static char *ap_bus_get_dev_path(DeviceState *dev) return g_strdup_printf("/1"); } -static void ap_bus_class_init(ObjectClass *oc, void *data) +static void ap_bus_class_init(ObjectClass *oc, const void *data) { BusClass *k = BUS_CLASS(oc); @@ -61,7 +61,7 @@ void s390_init_ap(void) qbus_set_hotplug_handler(bus, OBJECT(dev)); } -static void ap_bridge_class_init(ObjectClass *oc, void *data) +static void ap_bridge_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -75,7 +75,7 @@ static const TypeInfo ap_bridge_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = 0, .class_init = ap_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/s390x/ap-device.c b/hw/s390x/ap-device.c index 237d1f19c50..733104403e5 100644 --- a/hw/s390x/ap-device.c +++ b/hw/s390x/ap-device.c @@ -12,7 +12,7 @@ #include "qapi/error.h" #include "hw/s390x/ap-device.h" -static void ap_class_init(ObjectClass *klass, void *data) +static void ap_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/s390x/ap-stub.c b/hw/s390x/ap-stub.c new file mode 100644 index 00000000000..001fe5f8b01 --- /dev/null +++ b/hw/s390x/ap-stub.c @@ -0,0 +1,21 @@ +/* + * VFIO based AP matrix device assignment + * + * Copyright 2025 IBM Corp. + * Author(s): Rorie Reyes + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/s390x/ap-bridge.h" + +int ap_chsc_sei_nt0_get_event(void *res) +{ + return EVENT_INFORMATION_NOT_STORED; +} + +bool ap_chsc_sei_nt0_have_event(void) +{ + return false; +} diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c index a7d682e5af9..8be1813b9e7 100644 --- a/hw/s390x/ccw-device.c +++ b/hw/s390x/ccw-device.c @@ -13,6 +13,10 @@ #include "ccw-device.h" #include "hw/qdev-properties.h" #include "qemu/module.h" +#include "ipl.h" +#include "qapi/visitor.h" +#include "qemu/ctype.h" +#include "qapi/error.h" static void ccw_device_refill_ids(CcwDevice *dev) { @@ -37,29 +41,69 @@ static bool ccw_device_realize(CcwDevice *dev, Error **errp) return true; } -static Property ccw_device_properties[] = { +static void ccw_device_get_loadparm(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CcwDevice *dev = CCW_DEVICE(obj); + char *str = g_strndup((char *) dev->loadparm, sizeof(dev->loadparm)); + + visit_type_str(v, name, &str, errp); + g_free(str); +} + +static void ccw_device_set_loadparm(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CcwDevice *dev = CCW_DEVICE(obj); + g_autofree char *val = NULL; + int index; + + index = object_property_get_int(obj, "bootindex", NULL); + + if (index < 0) { + error_setg(errp, "LOADPARM is only valid for boot devices!"); + } + + if (!visit_type_str(v, name, &val, errp)) { + return; + } + + s390_ipl_fmt_loadparm(dev->loadparm, val, errp); +} + +const PropertyInfo ccw_loadparm = { + .type = "str", + .description = "Up to 8 chars in set of [A-Za-z0-9. ] to select" + " a guest kernel", + .get = ccw_device_get_loadparm, + .set = ccw_device_set_loadparm, +}; + +static const Property ccw_device_properties[] = { DEFINE_PROP_CSS_DEV_ID("devno", CcwDevice, devno), DEFINE_PROP_CSS_DEV_ID_RO("dev_id", CcwDevice, dev_id), DEFINE_PROP_CSS_DEV_ID_RO("subch_id", CcwDevice, subch_id), - DEFINE_PROP_END_OF_LIST(), }; -static void ccw_device_reset(DeviceState *d) +static void ccw_device_reset_hold(Object *obj, ResetType type) { - CcwDevice *ccw_dev = CCW_DEVICE(d); + CcwDevice *ccw_dev = CCW_DEVICE(obj); css_reset_sch(ccw_dev->sch); } -static void ccw_device_class_init(ObjectClass *klass, void *data) +static void ccw_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CCWDeviceClass *k = CCW_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->realize = ccw_device_realize; k->refill_ids = ccw_device_refill_ids; device_class_set_props(dc, ccw_device_properties); - dc->reset = ccw_device_reset; + rc->phases.hold = ccw_device_reset_hold; dc->bus_type = TYPE_VIRTUAL_CSS_BUS; } diff --git a/hw/s390x/ccw-device.h b/hw/s390x/ccw-device.h index 5feeb0ee7a2..4439feb1403 100644 --- a/hw/s390x/ccw-device.h +++ b/hw/s390x/ccw-device.h @@ -26,6 +26,8 @@ struct CcwDevice { CssDevId dev_id; /* The actual busid of the virtual subchannel. */ CssDevId subch_id; + /* If set, use this loadparm value when device is boot target */ + uint8_t loadparm[8]; }; typedef struct CcwDevice CcwDevice; @@ -49,4 +51,9 @@ static inline CcwDevice *to_ccw_dev_fast(DeviceState *d) OBJECT_DECLARE_TYPE(CcwDevice, CCWDeviceClass, CCW_DEVICE) +extern const PropertyInfo ccw_loadparm; + +#define DEFINE_PROP_CCW_LOADPARM(_n, _s, _f) \ + DEFINE_PROP(_n, _s, _f, ccw_loadparm, typeof(uint8_t[8])) + #endif diff --git a/hw/s390x/cpu-topology.c b/hw/s390x/cpu-topology.c index f16bdf65faa..b513f8936e4 100644 --- a/hw/s390x/cpu-topology.c +++ b/hw/s390x/cpu-topology.c @@ -23,8 +23,8 @@ #include "target/s390x/cpu.h" #include "hw/s390x/s390-virtio-ccw.h" #include "hw/s390x/cpu-topology.h" -#include "qapi/qapi-commands-machine-target.h" -#include "qapi/qapi-events-machine-target.h" +#include "qapi/qapi-commands-machine-s390x.h" +#include "qapi/qapi-events-machine-s390x.h" /* * s390_topology is used to keep the topology information. @@ -105,7 +105,7 @@ static void s390_topology_init(MachineState *ms) */ void s390_handle_ptf(S390CPU *cpu, uint8_t r1, uintptr_t ra) { - CpuS390Polarization polarization; + S390CpuPolarization polarization; CPUS390XState *env = &cpu->env; uint64_t reg = env->regs[r1]; int fc = reg & S390_TOPO_FC_MASK; @@ -357,7 +357,7 @@ static void s390_change_topology(uint16_t core_id, bool has_book_id, uint16_t book_id, bool has_drawer_id, uint16_t drawer_id, bool has_entitlement, - CpuS390Entitlement entitlement, + S390CpuEntitlement entitlement, bool has_dedicated, bool dedicated, Error **errp) { @@ -446,7 +446,7 @@ void qmp_set_cpu_topology(uint16_t core, bool has_socket, uint16_t socket, bool has_book, uint16_t book, bool has_drawer, uint16_t drawer, - bool has_entitlement, CpuS390Entitlement entitlement, + bool has_entitlement, S390CpuEntitlement entitlement, bool has_dedicated, bool dedicated, Error **errp) { diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c index 8657ff7bf48..0f87b8c5c44 100644 --- a/hw/s390x/css-bridge.c +++ b/hw/s390x/css-bridge.c @@ -66,19 +66,11 @@ static char *virtual_css_bus_get_dev_path(DeviceState *dev) { CcwDevice *ccw_dev = CCW_DEVICE(dev); SubchDev *sch = ccw_dev->sch; - VirtualCssBridge *bridge = - VIRTUAL_CSS_BRIDGE(qdev_get_parent_bus(dev)->parent); - /* - * We can't provide a dev path for backward compatibility on - * older machines, as it is visible in the migration stream. - */ - return bridge->css_dev_path ? - g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno) : - NULL; + return g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno); } -static void virtual_css_bus_class_init(ObjectClass *klass, void *data) +static void virtual_css_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -120,25 +112,18 @@ VirtualCssBus *virtual_css_bus_init(void) /***************** Virtual-css Bus Bridge Device ********************/ -static Property virtual_css_bridge_properties[] = { - DEFINE_PROP_BOOL("css_dev_path", VirtualCssBridge, css_dev_path, - true), - DEFINE_PROP_END_OF_LIST(), -}; - static bool prop_get_true(Object *obj, Error **errp) { return true; } -static void virtual_css_bridge_class_init(ObjectClass *klass, void *data) +static void virtual_css_bridge_class_init(ObjectClass *klass, const void *data) { HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); hc->unplug = ccw_device_unplug; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); - device_class_set_props(dc, virtual_css_bridge_properties); object_class_property_add_bool(klass, "cssid-unrestricted", prop_get_true, NULL); object_class_property_set_description(klass, "cssid-unrestricted", @@ -151,7 +136,7 @@ static const TypeInfo virtual_css_bridge_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(VirtualCssBridge), .class_init = virtual_css_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/s390x/css.c b/hw/s390x/css.c index b2d5327dbf4..53444f68288 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -14,7 +14,7 @@ #include "qapi/visitor.h" #include "qemu/bitops.h" #include "qemu/error-report.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/s390x/ioinst.h" #include "hw/qdev-properties.h" #include "hw/s390x/css.h" @@ -23,8 +23,6 @@ #include "hw/s390x/s390-virtio-ccw.h" #include "hw/s390x/s390-ccw.h" -bool css_migration_enabled = true; - typedef struct CrwContainer { CRW crw; QTAILQ_ENTRY(CrwContainer) sibling; @@ -180,16 +178,10 @@ static const VMStateDescription vmstate_orb = { } }; -static bool vmstate_schdev_orb_needed(void *opaque) -{ - return css_migration_enabled; -} - static const VMStateDescription vmstate_schdev_orb = { .name = "s390_subch_dev/orb", .version_id = 1, .minimum_version_id = 1, - .needed = vmstate_schdev_orb_needed, .fields = (const VMStateField[]) { VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB), VMSTATE_END_OF_LIST() @@ -390,33 +382,12 @@ static int subch_dev_post_load(void *opaque, int version_id) css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s); } - if (css_migration_enabled) { - /* No compat voodoo to do ;) */ - return 0; - } - /* - * Hack alert. If we don't migrate the channel subsystem status - * we still need to find out if the guest enabled mss/mcss-e. - * If the subchannel is enabled, it certainly was able to access it, - * so adjust the max_ssid/max_cssid values for relevant ssid/cssid - * values. This is not watertight, but better than nothing. - */ - if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) { - if (s->ssid) { - channel_subsys.max_ssid = MAX_SSID; - } - if (s->cssid != channel_subsys.default_cssid) { - channel_subsys.max_cssid = MAX_CSSID; - } - } return 0; } void css_register_vmstate(void) { - if (css_migration_enabled) { - vmstate_register(NULL, 0, &vmstate_css, &channel_subsys); - } + vmstate_register(NULL, 0, &vmstate_css, &channel_subsys); } IndAddr *get_indicator(hwaddr ind_addr, int len) @@ -2463,7 +2434,7 @@ void css_reset(void) static void get_css_devid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; CssDevId *dev_id = object_field_prop_ptr(obj, prop); char buffer[] = "xx.x.xxxx"; char *p = buffer; @@ -2492,7 +2463,7 @@ static void get_css_devid(Object *obj, Visitor *v, const char *name, static void set_css_devid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; CssDevId *dev_id = object_field_prop_ptr(obj, prop); char *str; int num, n1, n2; @@ -2523,7 +2494,7 @@ static void set_css_devid(Object *obj, Visitor *v, const char *name, } const PropertyInfo css_devid_propinfo = { - .name = "str", + .type = "str", .description = "Identifier of an I/O device in the channel " "subsystem, example: fe.1.23ab", .get = get_css_devid, @@ -2531,7 +2502,7 @@ const PropertyInfo css_devid_propinfo = { }; const PropertyInfo css_devid_ro_propinfo = { - .name = "str", + .type = "str", .description = "Read-only identifier of an I/O device in the channel " "subsystem, example: fe.1.23ab", .get = get_css_devid, diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 06c1da0eced..fee286ea639 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -4,6 +4,7 @@ * handles SCLP event types * - Signal Quiesce - system power down * - ASCII Console Data - VT220 read and write + * - Control-Program Identification - Send OS data from guest to host * * Copyright IBM, Corp. 2012 * @@ -40,18 +41,12 @@ struct SCLPEventFacility { SysBusDevice parent_obj; SCLPEventsBus sbus; SCLPEvent quiesce, cpu_hotplug; + SCLPEventCPI cpi; /* guest's receive mask */ union { uint32_t receive_mask_pieces[2]; sccb_mask_t receive_mask; }; - /* - * when false, we keep the same broken, backwards compatible behaviour as - * before, allowing only masks of size exactly 4; when true, we implement - * the architecture correctly, allowing all valid mask sizes. Needed for - * migration toward older versions. - */ - bool allow_all_mask_sizes; /* length of the receive mask */ uint16_t mask_length; }; @@ -294,8 +289,7 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) uint16_t mask_length = be16_to_cpu(we_mask->mask_length); sccb_mask_t tmp_mask; - if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || - ((mask_length != 4) && !ef->allow_all_mask_sizes)) { + if (!mask_length || mask_length > SCLP_EVENT_MASK_LEN_MAX) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); return; } @@ -355,13 +349,6 @@ static bool vmstate_event_facility_mask64_needed(void *opaque) return (ef->receive_mask & 0xFFFFFFFF) != 0; } -static bool vmstate_event_facility_mask_length_needed(void *opaque) -{ - SCLPEventFacility *ef = opaque; - - return ef->allow_all_mask_sizes; -} - static const VMStateDescription vmstate_event_facility_mask64 = { .name = "vmstate-event-facility/mask64", .version_id = 0, @@ -377,7 +364,6 @@ static const VMStateDescription vmstate_event_facility_mask_length = { .name = "vmstate-event-facility/mask_length", .version_id = 0, .minimum_version_id = 0, - .needed = vmstate_event_facility_mask_length_needed, .fields = (const VMStateField[]) { VMSTATE_UINT16(mask_length, SCLPEventFacility), VMSTATE_END_OF_LIST() @@ -399,31 +385,12 @@ static const VMStateDescription vmstate_event_facility = { } }; -static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value, - Error **errp) -{ - SCLPEventFacility *ef = (SCLPEventFacility *)obj; - - ef->allow_all_mask_sizes = value; -} - -static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp) -{ - SCLPEventFacility *ef = (SCLPEventFacility *)obj; - - return ef->allow_all_mask_sizes; -} - static void init_event_facility(Object *obj) { SCLPEventFacility *event_facility = EVENT_FACILITY(obj); DeviceState *sdev = DEVICE(obj); event_facility->mask_length = 4; - event_facility->allow_all_mask_sizes = true; - object_property_add_bool(obj, "allow_all_mask_sizes", - sclp_event_get_allow_all_mask_sizes, - sclp_event_set_allow_all_mask_sizes); /* Spawn a new bus for SCLP events */ qbus_init(&event_facility->sbus, sizeof(event_facility->sbus), @@ -460,14 +427,14 @@ static void reset_event_facility(DeviceState *dev) sdev->receive_mask = 0; } -static void init_event_facility_class(ObjectClass *klass, void *data) +static void init_event_facility_class(ObjectClass *klass, const void *data) { SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(sbdc); SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc); dc->realize = realize_event_facility; - dc->reset = reset_event_facility; + device_class_set_legacy_reset(dc, reset_event_facility); dc->vmsd = &vmstate_event_facility; set_bit(DEVICE_CATEGORY_MISC, dc->categories); k->command_handler = command_handler; @@ -497,7 +464,7 @@ static void event_realize(DeviceState *qdev, Error **errp) } } -static void event_class_init(ObjectClass *klass, void *data) +static void event_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index e934bf89d15..2f082396c73 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -15,9 +15,9 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" #include "qapi/error.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/tcg.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/tcg.h" #include "elf.h" #include "hw/loader.h" #include "hw/qdev-properties.h" @@ -26,7 +26,6 @@ #include "hw/s390x/vfio-ccw.h" #include "hw/s390x/css.h" #include "hw/s390x/ebcdic.h" -#include "target/s390x/kvm/pv.h" #include "hw/scsi/scsi.h" #include "hw/virtio/virtio-net.h" #include "ipl.h" @@ -34,6 +33,7 @@ #include "qemu/config-file.h" #include "qemu/cutils.h" #include "qemu/option.h" +#include "qemu/ctype.h" #include "standard-headers/linux/virtio_ids.h" #define KERN_IMAGE_START 0x010000UL @@ -45,20 +45,20 @@ #define INITRD_PARM_START 0x010408UL #define PARMFILE_START 0x001000UL #define ZIPL_IMAGE_START 0x009000UL +#define BIOS_MAX_SIZE 0x300000UL #define IPL_PSW_MASK (PSW_MASK_32 | PSW_MASK_64) -static bool iplb_extended_needed(void *opaque) +/* Place the IPLB chain immediately before the BIOS in memory */ +static uint64_t find_iplb_chain_addr(uint64_t bios_addr, uint16_t count) { - S390IPLState *ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL)); - - return ipl->iplbext_migration; + return (bios_addr & TARGET_PAGE_MASK) + - (count * sizeof(IplParameterBlock)); } static const VMStateDescription vmstate_iplb_extended = { .name = "ipl/iplb_extended", .version_id = 0, .minimum_version_id = 0, - .needed = iplb_extended_needed, .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved_ext, IplParameterBlock, 4096 - 200), VMSTATE_END_OF_LIST() @@ -144,7 +144,14 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) * even if an external kernel has been defined. */ if (!ipl->kernel || ipl->enforce_bios) { - uint64_t fwbase = (MIN(ms->ram_size, 0x80000000U) - 0x200000) & ~0xffffUL; + uint64_t fwbase; + + if (ms->ram_size < BIOS_MAX_SIZE) { + error_setg(errp, "not enough RAM to load the BIOS file"); + return; + } + + fwbase = (MIN(ms->ram_size, 0x80000000U) - BIOS_MAX_SIZE) & ~0xffffUL; bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->firmware); if (bios_filename == NULL) { @@ -154,8 +161,8 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) bios_size = load_elf(bios_filename, NULL, bios_translate_addr, &fwbase, - &ipl->bios_start_addr, NULL, NULL, NULL, 1, - EM_S390, 0, 0); + &ipl->bios_start_addr, NULL, NULL, NULL, + ELFDATA2MSB, EM_S390, 0, 0); if (bios_size > 0) { /* Adjust ELF start address to final location */ ipl->bios_start_addr += fwbase; @@ -179,7 +186,7 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) if (ipl->kernel) { kernel_size = load_elf(ipl->kernel, NULL, NULL, NULL, &pentry, NULL, - NULL, NULL, 1, EM_S390, 0, 0); + NULL, NULL, ELFDATA2MSB, EM_S390, 0, 0); if (kernel_size < 0) { kernel_size = load_image_targphys(ipl->kernel, 0, ms->ram_size); if (kernel_size < 0) { @@ -252,8 +259,8 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) */ romptr = rom_ptr(INITRD_PARM_START, 16); if (romptr) { - stq_p(romptr, initrd_offset); - stq_p(romptr + 1, initrd_size); + stq_be_p(romptr, initrd_offset); + stq_be_p(romptr + 1, initrd_size); } } } @@ -275,16 +282,12 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) qemu_register_reset(resettable_cold_reset_fn, dev); } -static Property s390_ipl_properties[] = { +static const Property s390_ipl_properties[] = { DEFINE_PROP_STRING("kernel", S390IPLState, kernel), DEFINE_PROP_STRING("initrd", S390IPLState, initrd), DEFINE_PROP_STRING("cmdline", S390IPLState, cmdline), DEFINE_PROP_STRING("firmware", S390IPLState, firmware), - DEFINE_PROP_STRING("netboot_fw", S390IPLState, netboot_fw), DEFINE_PROP_BOOL("enforce_bios", S390IPLState, enforce_bios, false), - DEFINE_PROP_BOOL("iplbext_migration", S390IPLState, iplbext_migration, - true), - DEFINE_PROP_END_OF_LIST(), }; static void s390_ipl_set_boot_menu(S390IPLState *ipl) @@ -390,174 +393,162 @@ static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype) return ccw_dev; } -static bool s390_gen_initial_iplb(S390IPLState *ipl) +static uint64_t s390_ipl_map_iplb_chain(IplParameterBlock *iplb_chain) +{ + S390IPLState *ipl = get_ipl_device(); + uint16_t count = be16_to_cpu(ipl->qipl.chain_len); + uint64_t len = sizeof(IplParameterBlock) * count; + uint64_t chain_addr = find_iplb_chain_addr(ipl->bios_start_addr, count); + + cpu_physical_memory_write(chain_addr, iplb_chain, len); + return chain_addr; +} + +void s390_ipl_fmt_loadparm(uint8_t *loadparm, char *str, Error **errp) +{ + /* Initialize the loadparm with spaces */ + memset(loadparm, ' ', LOADPARM_LEN); + qdev_prop_sanitize_s390x_loadparm(loadparm, str, errp); +} + +void s390_ipl_convert_loadparm(char *ascii_lp, uint8_t *ebcdic_lp) +{ + int i; + + /* Initialize the loadparm with EBCDIC spaces (0x40) */ + memset(ebcdic_lp, '@', LOADPARM_LEN); + for (i = 0; i < LOADPARM_LEN && ascii_lp[i]; i++) { + ebcdic_lp[i] = ascii2ebcdic[(uint8_t) ascii_lp[i]]; + } +} + +static bool s390_build_iplb(DeviceState *dev_st, IplParameterBlock *iplb) { - DeviceState *dev_st; CcwDevice *ccw_dev = NULL; SCSIDevice *sd; int devtype; - - dev_st = get_boot_device(0); - if (dev_st) { - ccw_dev = s390_get_ccw_device(dev_st, &devtype); - } + uint8_t *lp; + g_autofree void *scsi_lp = NULL; /* * Currently allow IPL only from CCW devices. */ + ccw_dev = s390_get_ccw_device(dev_st, &devtype); if (ccw_dev) { + lp = ccw_dev->loadparm; + switch (devtype) { case CCW_DEVTYPE_SCSI: sd = SCSI_DEVICE(dev_st); - ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); - ipl->iplb.blk0_len = + scsi_lp = object_property_get_str(OBJECT(sd), "loadparm", NULL); + if (scsi_lp && strlen(scsi_lp) > 0) { + lp = scsi_lp; + } + iplb->len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); + iplb->blk0_len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); - ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI; - ipl->iplb.scsi.lun = cpu_to_be32(sd->lun); - ipl->iplb.scsi.target = cpu_to_be16(sd->id); - ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); - ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); - ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; + iplb->pbt = S390_IPL_TYPE_QEMU_SCSI; + iplb->scsi.lun = cpu_to_be32(sd->lun); + iplb->scsi.target = cpu_to_be16(sd->id); + iplb->scsi.channel = cpu_to_be16(sd->channel); + iplb->scsi.devno = cpu_to_be16(ccw_dev->sch->devno); + iplb->scsi.ssid = ccw_dev->sch->ssid & 3; break; case CCW_DEVTYPE_VFIO: - ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); - ipl->iplb.pbt = S390_IPL_TYPE_CCW; - ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); - ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; + iplb->len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); + iplb->pbt = S390_IPL_TYPE_CCW; + iplb->ccw.devno = cpu_to_be16(ccw_dev->sch->devno); + iplb->ccw.ssid = ccw_dev->sch->ssid & 3; break; case CCW_DEVTYPE_VIRTIO_NET: - ipl->netboot = true; - /* Fall through to CCW_DEVTYPE_VIRTIO case */ case CCW_DEVTYPE_VIRTIO: - ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); - ipl->iplb.blk0_len = + iplb->len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); + iplb->blk0_len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); - ipl->iplb.pbt = S390_IPL_TYPE_CCW; - ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); - ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; + iplb->pbt = S390_IPL_TYPE_CCW; + iplb->ccw.devno = cpu_to_be16(ccw_dev->sch->devno); + iplb->ccw.ssid = ccw_dev->sch->ssid & 3; break; } - if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) { - ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID; + /* If the device loadparm is empty use the global machine loadparm */ + if (memcmp(lp, NO_LOADPARM, 8) == 0) { + lp = S390_CCW_MACHINE(qdev_get_machine())->loadparm; } + s390_ipl_convert_loadparm((char *)lp, iplb->loadparm); + iplb->flags |= DIAG308_FLAGS_LP_VALID; + return true; } return false; } -int s390_ipl_set_loadparm(uint8_t *loadparm) +void s390_rebuild_iplb(uint16_t dev_index, IplParameterBlock *iplb) { - MachineState *machine = MACHINE(qdev_get_machine()); - char *lp = object_property_get_str(OBJECT(machine), "loadparm", NULL); - - if (lp) { - int i; - - /* lp is an uppercase string without leading/embedded spaces */ - for (i = 0; i < 8 && lp[i]; i++) { - loadparm[i] = ascii2ebcdic[(uint8_t) lp[i]]; - } - - if (i < 8) { - memset(loadparm + i, 0x40, 8 - i); /* fill with EBCDIC spaces */ - } - - g_free(lp); - return 0; - } + S390IPLState *ipl = get_ipl_device(); + uint16_t index; + index = ipl->rebuilt_iplb ? ipl->iplb_index : dev_index; - return -1; + ipl->rebuilt_iplb = s390_build_iplb(get_boot_device(index), iplb); + ipl->iplb_index = index; } -static int load_netboot_image(Error **errp) +static bool s390_init_all_iplbs(S390IPLState *ipl) { - MachineState *ms = MACHINE(qdev_get_machine()); - S390IPLState *ipl = get_ipl_device(); - char *netboot_filename; - MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *mr = NULL; - void *ram_ptr = NULL; - int img_size = -1; - - mr = memory_region_find(sysmem, 0, 1).mr; - if (!mr) { - error_setg(errp, "Failed to find memory region at address 0"); - return -1; - } + int iplb_num = 0; + IplParameterBlock iplb_chain[7]; + DeviceState *dev_st = get_boot_device(0); + Object *machine = qdev_get_machine(); - ram_ptr = memory_region_get_ram_ptr(mr); - if (!ram_ptr) { - error_setg(errp, "No RAM found"); - goto unref_mr; + /* + * Parse the boot devices. Generate an IPLB for only the first boot device + * which will later be set with DIAG308. + */ + if (!dev_st) { + ipl->qipl.chain_len = 0; + return false; } - netboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->netboot_fw); - if (netboot_filename == NULL) { - error_setg(errp, "Could not find network bootloader '%s'", - ipl->netboot_fw); - goto unref_mr; + /* If no machine loadparm was defined fill it with spaces */ + if (memcmp(S390_CCW_MACHINE(machine)->loadparm, NO_LOADPARM, 8) == 0) { + object_property_set_str(machine, "loadparm", " ", NULL); } - img_size = load_elf_ram(netboot_filename, NULL, NULL, NULL, - &ipl->start_addr, - NULL, NULL, NULL, 1, EM_S390, 0, 0, NULL, - false); - - if (img_size < 0) { - img_size = load_image_size(netboot_filename, ram_ptr, ms->ram_size); - ipl->start_addr = KERN_IMAGE_START; - } + iplb_num = 1; + s390_build_iplb(dev_st, &ipl->iplb); - if (img_size < 0) { - error_setg(errp, "Failed to load network bootloader"); + /* Index any fallback boot devices */ + while (get_boot_device(iplb_num)) { + iplb_num++; } - g_free(netboot_filename); - -unref_mr: - memory_region_unref(mr); - return img_size; -} - -static bool is_virtio_ccw_device_of_type(IplParameterBlock *iplb, - int virtio_id) -{ - uint8_t cssid; - uint8_t ssid; - uint16_t devno; - uint16_t schid; - SubchDev *sch = NULL; + if (iplb_num > MAX_BOOT_DEVS) { + warn_report("Excess boot devices defined! %d boot devices found, " + "but only the first %d will be considered.", + iplb_num, MAX_BOOT_DEVS); - if (iplb->pbt != S390_IPL_TYPE_CCW) { - return false; + iplb_num = MAX_BOOT_DEVS; } - devno = be16_to_cpu(iplb->ccw.devno); - ssid = iplb->ccw.ssid & 3; - - for (schid = 0; schid < MAX_SCHID; schid++) { - for (cssid = 0; cssid < MAX_CSSID; cssid++) { - sch = css_find_subch(1, cssid, ssid, schid); + ipl->qipl.chain_len = cpu_to_be16(iplb_num - 1); - if (sch && sch->devno == devno) { - return sch->id.cu_model == virtio_id; - } + /* + * Build fallback IPLBs for any boot devices above index 0, up to a + * maximum amount as defined in ipl.h + */ + if (iplb_num > 1) { + /* Start at 1 because the IPLB for boot index 0 is not chained */ + for (int i = 1; i < iplb_num; i++) { + dev_st = get_boot_device(i); + s390_build_iplb(dev_st, &iplb_chain[i - 1]); } - } - return false; -} -static bool is_virtio_net_device(IplParameterBlock *iplb) -{ - return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_NET); -} + ipl->qipl.next_iplb = cpu_to_be64(s390_ipl_map_iplb_chain(iplb_chain)); + } -static bool is_virtio_scsi_device(IplParameterBlock *iplb) -{ - return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_SCSI); + return iplb_num; } static void update_machine_ipl_properties(IplParameterBlock *iplb) @@ -577,7 +568,7 @@ static void update_machine_ipl_properties(IplParameterBlock *iplb) ascii_loadparm[i] = 0; object_property_set_str(machine, "loadparm", ascii_loadparm, &err); } else { - object_property_set_str(machine, "loadparm", "", &err); + object_property_set_str(machine, "loadparm", " ", &err); } if (err) { warn_report_err(err); @@ -599,7 +590,7 @@ void s390_ipl_update_diag308(IplParameterBlock *iplb) ipl->iplb = *iplb; ipl->iplb_valid = true; } - ipl->netboot = is_virtio_net_device(iplb); + update_machine_ipl_properties(iplb); } @@ -626,32 +617,14 @@ IplParameterBlock *s390_ipl_get_iplb(void) void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type) { S390IPLState *ipl = get_ipl_device(); - if (reset_type == S390_RESET_EXTERNAL || reset_type == S390_RESET_REIPL) { /* use CPU 0 for full resets */ ipl->reset_cpu_index = 0; } else { ipl->reset_cpu_index = cs->cpu_index; } - ipl->reset_type = reset_type; - if (reset_type == S390_RESET_REIPL && - ipl->iplb_valid && - !ipl->netboot && - ipl->iplb.pbt == S390_IPL_TYPE_CCW && - is_virtio_scsi_device(&ipl->iplb)) { - CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0), NULL); - - if (ccw_dev && - cpu_to_be16(ccw_dev->sch->devno) == ipl->iplb.ccw.devno && - (ccw_dev->sch->ssid & 3) == ipl->iplb.ccw.ssid) { - /* - * this is the original boot device's SCSI - * so restore IPL parameter info from it - */ - ipl->iplb_valid = s390_gen_initial_iplb(ipl); - } - } + ipl->reset_type = reset_type; if (reset_type == S390_RESET_MODIFIED_CLEAR || reset_type == S390_RESET_LOAD_NORMAL || reset_type == S390_RESET_PV) { @@ -702,7 +675,7 @@ static void s390_ipl_prepare_qipl(S390CPU *cpu) cpu_physical_memory_unmap(addr, len, 1, len); } -int s390_ipl_prepare_pv_header(Error **errp) +int s390_ipl_prepare_pv_header(struct S390PVResponse *pv_resp, Error **errp) { IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); IPLBlockPV *ipib_pv = &ipib->pv; @@ -711,12 +684,13 @@ int s390_ipl_prepare_pv_header(Error **errp) cpu_physical_memory_read(ipib_pv->pv_header_addr, hdr, ipib_pv->pv_header_len); - rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len, errp); + rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len, + pv_resp, errp); g_free(hdr); return rc; } -int s390_ipl_pv_unpack(void) +int s390_ipl_pv_unpack(struct S390PVResponse *pv_resp) { IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); IPLBlockPV *ipib_pv = &ipib->pv; @@ -725,7 +699,8 @@ int s390_ipl_pv_unpack(void) for (i = 0; i < ipib_pv->num_comp; i++) { rc = s390_pv_unpack(ipib_pv->components[i].addr, TARGET_PAGE_ALIGN(ipib_pv->components[i].size), - ipib_pv->components[i].tweak_pref); + ipib_pv->components[i].tweak_pref, + pv_resp); if (rc) { break; } @@ -743,13 +718,11 @@ void s390_ipl_prepare_cpu(S390CPU *cpu) if (!ipl->kernel || ipl->iplb_valid) { cpu->env.psw.addr = ipl->bios_start_addr; if (!ipl->iplb_valid) { - ipl->iplb_valid = s390_gen_initial_iplb(ipl); + ipl->iplb_valid = s390_init_all_iplbs(ipl); + } else { + ipl->qipl.chain_len = 0; } } - if (ipl->netboot) { - load_netboot_image(&error_fatal); - ipl->qipl.netboot_start_addr = cpu_to_be64(ipl->start_addr); - } s390_ipl_set_boot_menu(ipl); s390_ipl_prepare_qipl(cpu); } @@ -764,13 +737,13 @@ static void s390_ipl_reset(DeviceState *dev) } } -static void s390_ipl_class_init(ObjectClass *klass, void *data) +static void s390_ipl_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = s390_ipl_realize; device_class_set_props(dc, s390_ipl_properties); - dc->reset = s390_ipl_reset; + device_class_set_legacy_reset(dc, s390_ipl_reset); dc->vmsd = &vmstate_ipl; set_bit(DEVICE_CATEGORY_MISC, dc->categories); /* Reason: Loads the ROMs and thus can only be used one time - internally */ diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index 57cd1257697..505cded490c 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -14,101 +14,24 @@ #define HW_S390_IPL_H #include "cpu.h" -#include "exec/address-spaces.h" +#include "exec/target_page.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "hw/qdev-core.h" +#include "hw/s390x/ipl/qipl.h" #include "qom/object.h" - -struct IPLBlockPVComp { - uint64_t tweak_pref; - uint64_t addr; - uint64_t size; -} QEMU_PACKED; -typedef struct IPLBlockPVComp IPLBlockPVComp; - -struct IPLBlockPV { - uint8_t reserved18[87]; /* 0x18 */ - uint8_t version; /* 0x6f */ - uint32_t reserved70; /* 0x70 */ - uint32_t num_comp; /* 0x74 */ - uint64_t pv_header_addr; /* 0x78 */ - uint64_t pv_header_len; /* 0x80 */ - struct IPLBlockPVComp components[0]; -} QEMU_PACKED; -typedef struct IPLBlockPV IPLBlockPV; - -struct IplBlockCcw { - uint8_t reserved0[85]; - uint8_t ssid; - uint16_t devno; - uint8_t vm_flags; - uint8_t reserved3[3]; - uint32_t vm_parm_len; - uint8_t nss_name[8]; - uint8_t vm_parm[64]; - uint8_t reserved4[8]; -} QEMU_PACKED; -typedef struct IplBlockCcw IplBlockCcw; - -struct IplBlockFcp { - uint8_t reserved1[305 - 1]; - uint8_t opt; - uint8_t reserved2[3]; - uint16_t reserved3; - uint16_t devno; - uint8_t reserved4[4]; - uint64_t wwpn; - uint64_t lun; - uint32_t bootprog; - uint8_t reserved5[12]; - uint64_t br_lba; - uint32_t scp_data_len; - uint8_t reserved6[260]; - uint8_t scp_data[0]; -} QEMU_PACKED; -typedef struct IplBlockFcp IplBlockFcp; - -struct IplBlockQemuScsi { - uint32_t lun; - uint16_t target; - uint16_t channel; - uint8_t reserved0[77]; - uint8_t ssid; - uint16_t devno; -} QEMU_PACKED; -typedef struct IplBlockQemuScsi IplBlockQemuScsi; +#include "target/s390x/kvm/pv.h" #define DIAG308_FLAGS_LP_VALID 0x80 +#define MAX_BOOT_DEVS 8 /* Max number of devices that may have a bootindex */ -union IplParameterBlock { - struct { - uint32_t len; - uint8_t reserved0[3]; - uint8_t version; - uint32_t blk0_len; - uint8_t pbt; - uint8_t flags; - uint16_t reserved01; - uint8_t loadparm[8]; - union { - IplBlockCcw ccw; - IplBlockFcp fcp; - IPLBlockPV pv; - IplBlockQemuScsi scsi; - }; - } QEMU_PACKED; - struct { - uint8_t reserved1[110]; - uint16_t devno; - uint8_t reserved2[88]; - uint8_t reserved_ext[4096 - 200]; - } QEMU_PACKED; -} QEMU_PACKED; -typedef union IplParameterBlock IplParameterBlock; - -int s390_ipl_set_loadparm(uint8_t *loadparm); +void s390_ipl_convert_loadparm(char *ascii_lp, uint8_t *ebcdic_lp); +void s390_ipl_fmt_loadparm(uint8_t *loadparm, char *str, Error **errp); +void s390_rebuild_iplb(uint16_t index, IplParameterBlock *iplb); void s390_ipl_update_diag308(IplParameterBlock *iplb); -int s390_ipl_prepare_pv_header(Error **errp); -int s390_ipl_pv_unpack(void); +int s390_ipl_prepare_pv_header(struct S390PVResponse *pv_resp, + Error **errp); +int s390_ipl_pv_unpack(struct S390PVResponse *pv_resp); void s390_ipl_prepare_cpu(S390CPU *cpu); IplParameterBlock *s390_ipl_get_iplb(void); IplParameterBlock *s390_ipl_get_iplb_pv(void); @@ -131,27 +54,6 @@ void s390_ipl_clear_reset_request(void); #define QIPL_FLAG_BM_OPTS_CMD 0x80 #define QIPL_FLAG_BM_OPTS_ZIPL 0x40 -/* - * The QEMU IPL Parameters will be stored at absolute address - * 204 (0xcc) which means it is 32-bit word aligned but not - * double-word aligned. - * Placement of data fields in this area must account for - * their alignment needs. E.g., netboot_start_address must - * have an offset of 4 + n * 8 bytes within the struct in order - * to keep it double-word aligned. - * The total size of the struct must never exceed 28 bytes. - * This definition must be kept in sync with the definition - * in pc-bios/s390-ccw/iplb.h. - */ -struct QemuIplParameters { - uint8_t qipl_flags; - uint8_t reserved1[3]; - uint64_t netboot_start_addr; - uint32_t boot_menu_timeout; - uint8_t reserved2[12]; -} QEMU_PACKED; -typedef struct QemuIplParameters QemuIplParameters; - #define TYPE_S390_IPL "s390-ipl" OBJECT_DECLARE_SIMPLE_TYPE(S390IPLState, S390_IPL) @@ -168,7 +70,8 @@ struct S390IPLState { bool enforce_bios; bool iplb_valid; bool iplb_valid_pv; - bool netboot; + bool rebuilt_iplb; + uint16_t iplb_index; /* reset related properties don't have to be migrated or reset */ enum s390_reset reset_type; int reset_cpu_index; @@ -178,11 +81,9 @@ struct S390IPLState { char *initrd; char *cmdline; char *firmware; - char *netboot_fw; uint8_t cssid; uint8_t ssid; uint16_t devno; - bool iplbext_migration; }; QEMU_BUILD_BUG_MSG(offsetof(S390IPLState, iplb) & 3, "alignment of iplb wrong"); @@ -276,11 +177,14 @@ static inline bool iplb_valid_pv(IplParameterBlock *iplb) static inline bool iplb_valid(IplParameterBlock *iplb) { + uint32_t len = be32_to_cpu(iplb->len); + switch (iplb->pbt) { case S390_IPL_TYPE_FCP: - return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_FCP_LEN; + return len >= S390_IPLB_MIN_FCP_LEN; case S390_IPL_TYPE_CCW: - return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_CCW_LEN; + return len >= S390_IPLB_MIN_CCW_LEN; + case S390_IPL_TYPE_QEMU_SCSI: default: return false; } diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build index 482fd134205..1bc85837996 100644 --- a/hw/s390x/meson.build +++ b/hw/s390x/meson.build @@ -12,8 +12,8 @@ s390x_ss.add(files( 's390-pci-inst.c', 's390-skeys.c', 's390-stattrib.c', - 's390-virtio-hcall.c', 'sclp.c', + 'sclpcpi.c', 'sclpcpu.c', 'sclpquiesce.c', 'tod.c', @@ -28,9 +28,13 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files( s390x_ss.add(when: 'CONFIG_TCG', if_true: files( 'tod-tcg.c', )) -s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files('s390-virtio-ccw.c')) +s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files( + 's390-virtio-ccw.c', + 's390-hypercall.c', +)) s390x_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('3270-ccw.c')) s390x_ss.add(when: 'CONFIG_VFIO', if_true: files('s390-pci-vfio.c')) +s390x_ss.add(when: 'CONFIG_VFIO_AP', if_false: files('ap-stub.c')) virtio_ss = ss.source_set() virtio_ss.add(files('virtio-ccw.c')) @@ -48,8 +52,12 @@ endif virtio_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-ccw.c')) virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_MD', if_true: files('virtio-ccw-md.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-ccw-mem.c')) s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss) +s390x_ss.add(when: 'CONFIG_VIRTIO_MD', if_false: files('virtio-ccw-md-stubs.c')) + hw_arch += {'s390x': s390x_ss} hw_s390x_modules = {} diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index 3c097505508..10c81a4c893 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -18,7 +18,7 @@ #include "hw/s390x/css.h" #include "hw/s390x/css-bridge.h" #include "hw/s390x/s390-ccw.h" -#include "sysemu/sysemu.h" +#include "system/system.h" IOInstEnding s390_ccw_cmd_request(SubchDev *sch) { @@ -175,7 +175,7 @@ static void s390_ccw_instance_init(Object *obj) "/disk@0,0", DEVICE(obj)); } -static void s390_ccw_class_init(ObjectClass *klass, void *data) +static void s390_ccw_class_init(ObjectClass *klass, const void *data) { S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/s390-hypercall.c b/hw/s390x/s390-hypercall.c new file mode 100644 index 00000000000..ac1b08b2cd5 --- /dev/null +++ b/hw/s390x/s390-hypercall.c @@ -0,0 +1,85 @@ +/* + * Support for QEMU/KVM hypercalls on s390 + * + * Copyright 2012 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/s390x/s390-virtio-ccw.h" +#include "hw/s390x/s390-hypercall.h" +#include "hw/s390x/ioinst.h" +#include "hw/s390x/css.h" +#include "virtio-ccw.h" + +static int handle_virtio_notify(uint64_t mem) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + if (mem < ms->ram_size) { + /* Early printk */ + return 0; + } + return -EINVAL; +} + +static int handle_virtio_ccw_notify(uint64_t subch_id, uint64_t data) +{ + SubchDev *sch; + VirtIODevice *vdev; + int cssid, ssid, schid, m; + uint16_t vq_idx = data; + + if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { + return -EINVAL; + } + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + return -EINVAL; + } + + vdev = virtio_ccw_get_vdev(sch); + if (vq_idx >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, vq_idx)) { + return -EINVAL; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) { + virtio_queue_set_shadow_avail_idx(virtio_get_queue(vdev, vq_idx), + (data >> 16) & 0xFFFF); + } + + virtio_queue_notify(vdev, vq_idx); + return 0; +} + +static uint64_t handle_storage_limit(void) +{ + S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine()); + + return s390_get_memory_limit(s390ms) - 1; +} + +void handle_diag_500(S390CPU *cpu, uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + const uint64_t subcode = env->regs[1]; + + switch (subcode) { + case DIAG500_VIRTIO_NOTIFY: + env->regs[2] = handle_virtio_notify(env->regs[2]); + break; + case DIAG500_VIRTIO_CCW_NOTIFY: + env->regs[2] = handle_virtio_ccw_notify(env->regs[2], env->regs[3]); + break; + case DIAG500_STORAGE_LIMIT: + env->regs[2] = handle_storage_limit(); + break; + default: + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } +} diff --git a/hw/s390x/s390-hypercall.h b/hw/s390x/s390-hypercall.h new file mode 100644 index 00000000000..4f07209128c --- /dev/null +++ b/hw/s390x/s390-hypercall.h @@ -0,0 +1,25 @@ +/* + * Support for QEMU/KVM hypercalls on s390x + * + * Copyright IBM Corp. 2012, 2017 + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390_HYPERCALL_H +#define HW_S390_HYPERCALL_H + +#include "cpu.h" + +#define DIAG500_VIRTIO_NOTIFY 0 /* legacy, implemented as a NOP */ +#define DIAG500_VIRTIO_RESET 1 /* legacy */ +#define DIAG500_VIRTIO_SET_STATUS 2 /* legacy */ +#define DIAG500_VIRTIO_CCW_NOTIFY 3 /* KVM_S390_VIRTIO_CCW_NOTIFY */ +#define DIAG500_STORAGE_LIMIT 4 + +void handle_diag_500(S390CPU *cpu, uintptr_t ra); + +#endif /* HW_S390_HYPERCALL_H */ diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 3e57d5faca1..f87d2748b63 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -14,18 +14,21 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/visitor.h" +#include "exec/target_page.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-kvm.h" #include "hw/s390x/s390-pci-vfio.h" +#include "hw/s390x/s390-virtio-ccw.h" +#include "hw/boards.h" #include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/msi.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/reset.h" +#include "system/runstate.h" #include "trace.h" @@ -381,9 +384,9 @@ static uint64_t get_table_index(uint64_t iova, int8_t ett) return calc_sx(iova); case ZPCI_ETT_RT: return calc_rtx(iova); + default: + g_assert_not_reached(); } - - return -1; } static bool entry_isvalid(uint64_t entry, int8_t ett) @@ -394,22 +397,24 @@ static bool entry_isvalid(uint64_t entry, int8_t ett) case ZPCI_ETT_ST: case ZPCI_ETT_RT: return rt_entry_isvalid(entry); + default: + g_assert_not_reached(); } - - return false; } /* Return true if address translation is done */ static bool translate_iscomplete(uint64_t entry, int8_t ett) { switch (ett) { - case 0: + case ZPCI_ETT_ST: return (entry & ZPCI_TABLE_FC) ? true : false; - case 1: + case ZPCI_ETT_RT: return false; + case ZPCI_ETT_PT: + return true; + default: + g_assert_not_reached(); } - - return true; } static uint64_t get_frame_size(int8_t ett) @@ -421,9 +426,9 @@ static uint64_t get_frame_size(int8_t ett) return 1ULL << 20; case ZPCI_ETT_RT: return 1ULL << 31; + default: + g_assert_not_reached(); } - - return 0; } static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) @@ -435,9 +440,9 @@ static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) return get_st_pto(entry); case ZPCI_ETT_RT: return get_rt_sto(entry); + default: + g_assert_not_reached(); } - - return 0; } /** @@ -595,7 +600,6 @@ static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, * zpci device" construct. But when we support migration of vfio-pci * devices in future, we need to revisit this. */ - return; } static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, @@ -724,12 +728,42 @@ void s390_pci_iommu_enable(S390PCIIOMMU *iommu) g_free(name); } +void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + S390CcwMachineState *s390ms = S390_CCW_MACHINE(ms); + + /* + * For direct-mapping we must map the entire guest address space. Rather + * than using an iommu, create a memory region alias that maps GPA X to + * IOVA X + SDMA. VFIO will handle pinning via its memory listener. + */ + g_autofree char *name = g_strdup_printf("iommu-dm-s390-%04x", + iommu->pbdev->uid); + + iommu->dm_mr = g_malloc0(sizeof(*iommu->dm_mr)); + memory_region_init_alias(iommu->dm_mr, OBJECT(&iommu->mr), name, + get_system_memory(), 0, + s390_get_memory_limit(s390ms)); + iommu->enabled = true; + memory_region_add_subregion(&iommu->mr, iommu->pbdev->zpci_fn.sdma, + iommu->dm_mr); +} + void s390_pci_iommu_disable(S390PCIIOMMU *iommu) { iommu->enabled = false; g_hash_table_remove_all(iommu->iotlb); - memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr)); - object_unparent(OBJECT(&iommu->iommu_mr)); + if (iommu->dm_mr) { + memory_region_del_subregion(&iommu->mr, iommu->dm_mr); + object_unparent(OBJECT(iommu->dm_mr)); + g_free(iommu->dm_mr); + iommu->dm_mr = NULL; + } else { + memory_region_del_subregion(&iommu->mr, + MEMORY_REGION(&iommu->iommu_mr)); + object_unparent(OBJECT(&iommu->iommu_mr)); + } } static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) @@ -971,14 +1005,7 @@ static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, "this device"); } - if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { - PCIDevice *pdev = PCI_DEVICE(dev); - - if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { - error_setg(errp, "multifunction not supported in s390"); - return; - } - } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); if (!s390_pci_alloc_idx(s, pbdev)) { @@ -1069,6 +1096,18 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { pdev = PCI_DEVICE(dev); + /* + * Multifunction is not supported due to the lack of CLP. However, + * do not check for multifunction capability for SR-IOV devices because + * SR-IOV devices automatically add the multifunction capability whether + * the user intends to use the functions other than the PF. + */ + if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION && + !pdev->exp.sriov_cap) { + error_setg(errp, "multifunction not supported in s390"); + return; + } + if (!dev->id) { /* In the case the PCI device does not define an id */ /* we generate one based on the PCI address */ @@ -1080,6 +1119,16 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, pbdev = s390_pci_find_dev_by_target(s, dev->id); if (!pbdev) { + /* + * VFs are automatically created by PF, and creating zpci for them + * will result in unexpected usage of fids. Currently QEMU does not + * support multifunction for s390x so we don't need zpci for VFs + * anyway. + */ + if (pci_is_vf(pdev)) { + return; + } + pbdev = s390_pci_device_new(s, dev->id, errp); if (!pbdev) { return; @@ -1130,6 +1179,7 @@ static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, /* Always intercept emulated devices */ pbdev->interp = false; pbdev->forwarding_assist = false; + pbdev->rtr_avail = false; } if (s390_pci_msix_init(pbdev) && !pbdev->interp) { @@ -1167,7 +1217,10 @@ static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, int32_t devfn; pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); - g_assert(pbdev); + if (!pbdev) { + g_assert(pci_is_vf(pci_dev)); + return; + } s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, pbdev->fh, pbdev->fid); @@ -1206,7 +1259,11 @@ static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, * we've checked the PCI device already (to prevent endless recursion). */ pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); - g_assert(pbdev); + if (!pbdev) { + g_assert(pci_is_vf(PCI_DEVICE(dev))); + return; + } + pbdev->pci_unplug_request_processed = true; qdev_unplug(DEVICE(pbdev), errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { @@ -1318,12 +1375,12 @@ static void s390_pcihost_reset(DeviceState *dev) pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); } -static void s390_pcihost_class_init(ObjectClass *klass, void *data) +static void s390_pcihost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); - dc->reset = s390_pcihost_reset; + device_class_set_legacy_reset(dc, s390_pcihost_reset); dc->realize = s390_pcihost_realize; dc->unrealize = s390_pcihost_unrealize; hc->pre_plug = s390_pcihost_pre_plug; @@ -1338,7 +1395,7 @@ static const TypeInfo s390_pcihost_info = { .parent = TYPE_PCI_HOST_BRIDGE, .instance_size = sizeof(S390pciState), .class_init = s390_pcihost_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } @@ -1453,7 +1510,7 @@ static void s390_pci_device_reset(DeviceState *dev) static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint32(v, name, ptr, errp); @@ -1463,7 +1520,7 @@ static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); - Property *prop = opaque; + const Property *prop = opaque; uint32_t *ptr = object_field_prop_ptr(obj, prop); if (!visit_type_uint32(v, name, ptr, errp)) { @@ -1473,7 +1530,8 @@ static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, } static const PropertyInfo s390_pci_fid_propinfo = { - .name = "zpci_fid", + .type = "uint32", + .description = "zpci_fid", .get = s390_pci_get_fid, .set = s390_pci_set_fid, }; @@ -1481,14 +1539,15 @@ static const PropertyInfo s390_pci_fid_propinfo = { #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) -static Property s390_pci_device_properties[] = { +static const Property s390_pci_device_properties[] = { DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), DEFINE_PROP_STRING("target", S390PCIBusDevice, target), DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, true), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("relaxed-translation", S390PCIBusDevice, rtr_avail, + true), }; static const VMStateDescription s390_pci_device_vmstate = { @@ -1500,13 +1559,13 @@ static const VMStateDescription s390_pci_device_vmstate = { .unmigratable = 1, }; -static void s390_pci_device_class_init(ObjectClass *klass, void *data) +static void s390_pci_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "zpci device"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->reset = s390_pci_device_reset; + device_class_set_legacy_reset(dc, s390_pci_device_reset); dc->bus_type = TYPE_S390_PCI_BUS; dc->realize = s390_pci_device_realize; device_class_set_props(dc, s390_pci_device_properties); @@ -1526,7 +1585,8 @@ static const TypeInfo s390_pci_iommu_info = { .instance_size = sizeof(S390PCIIOMMU), }; -static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) +static void s390_iommu_memory_region_class_init(ObjectClass *klass, + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 30149546c08..a3bb5aa2216 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -13,9 +13,12 @@ #include "qemu/osdep.h" #include "exec/memop.h" -#include "exec/memory.h" +#include "exec/target_page.h" +#include "system/memory.h" #include "qemu/error-report.h" -#include "sysemu/hw_accel.h" +#include "qemu/bswap.h" +#include "system/hw_accel.h" +#include "hw/boards.h" #include "hw/pci/pci_device.h" #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" @@ -55,26 +58,26 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) uint64_t resume_token; rc = 0; - if (lduw_p(&rrb->request.hdr.len) != 32) { + if (lduw_be_p(&rrb->request.hdr.len) != 32) { res_code = CLP_RC_LEN; rc = -EINVAL; goto out; } - if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { + if ((ldl_be_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { res_code = CLP_RC_FMT; rc = -EINVAL; goto out; } - if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || - ldq_p(&rrb->request.reserved1) != 0) { + if ((ldl_be_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || + ldq_be_p(&rrb->request.reserved1) != 0) { res_code = CLP_RC_RESNOT0; rc = -EINVAL; goto out; } - resume_token = ldq_p(&rrb->request.resume_token); + resume_token = ldq_be_p(&rrb->request.resume_token); if (resume_token) { pbdev = s390_pci_find_dev_by_idx(s, resume_token); @@ -87,13 +90,13 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) pbdev = s390_pci_find_next_avail_dev(s, NULL); } - if (lduw_p(&rrb->response.hdr.len) < 48) { + if (lduw_be_p(&rrb->response.hdr.len) < 48) { res_code = CLP_RC_8K; rc = -EINVAL; goto out; } - initial_l2 = lduw_p(&rrb->response.hdr.len); + initial_l2 = lduw_be_p(&rrb->response.hdr.len); if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) != 0) { res_code = CLP_RC_LEN; @@ -102,33 +105,33 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) goto out; } - stl_p(&rrb->response.fmt, 0); - stq_p(&rrb->response.reserved1, 0); - stl_p(&rrb->response.mdd, FH_MASK_SHM); - stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); + stl_be_p(&rrb->response.fmt, 0); + stq_be_p(&rrb->response.reserved1, 0); + stl_be_p(&rrb->response.mdd, FH_MASK_SHM); + stw_be_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); rrb->response.flags = UID_CHECKING_ENABLED; rrb->response.entry_size = sizeof(ClpFhListEntry); i = 0; g_l2 = LIST_PCI_HDR_LEN; while (g_l2 < initial_l2 && pbdev) { - stw_p(&rrb->response.fh_list[i].device_id, + stw_be_p(&rrb->response.fh_list[i].device_id, pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); - stw_p(&rrb->response.fh_list[i].vendor_id, + stw_be_p(&rrb->response.fh_list[i].vendor_id, pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); /* Ignore RESERVED devices. */ - stl_p(&rrb->response.fh_list[i].config, + stl_be_p(&rrb->response.fh_list[i].config, pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); - stl_p(&rrb->response.fh_list[i].fid, pbdev->fid); - stl_p(&rrb->response.fh_list[i].fh, pbdev->fh); + stl_be_p(&rrb->response.fh_list[i].fid, pbdev->fid); + stl_be_p(&rrb->response.fh_list[i].fh, pbdev->fh); g_l2 += sizeof(ClpFhListEntry); /* Add endian check for DPRINTF? */ trace_s390_pci_list_entry(g_l2, - lduw_p(&rrb->response.fh_list[i].vendor_id), - lduw_p(&rrb->response.fh_list[i].device_id), - ldl_p(&rrb->response.fh_list[i].fid), - ldl_p(&rrb->response.fh_list[i].fh)); + lduw_be_p(&rrb->response.fh_list[i].vendor_id), + lduw_be_p(&rrb->response.fh_list[i].device_id), + ldl_be_p(&rrb->response.fh_list[i].fid), + ldl_be_p(&rrb->response.fh_list[i].fh)); pbdev = s390_pci_find_next_avail_dev(s, pbdev); i++; } @@ -138,13 +141,13 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) } else { resume_token = pbdev->fh & FH_MASK_INDEX; } - stq_p(&rrb->response.resume_token, resume_token); - stw_p(&rrb->response.hdr.len, g_l2); - stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); + stq_be_p(&rrb->response.resume_token, resume_token); + stw_be_p(&rrb->response.hdr.len, g_l2); + stw_be_p(&rrb->response.hdr.rsp, CLP_RC_OK); out: if (rc) { trace_s390_pci_list(rc); - stw_p(&rrb->response.hdr.rsp, res_code); + stw_be_p(&rrb->response.hdr.rsp, res_code); } return rc; } @@ -172,7 +175,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) return 0; } reqh = (ClpReqHdr *)buffer; - req_len = lduw_p(&reqh->len); + req_len = lduw_be_p(&reqh->len); if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { s390_program_interrupt(env, PGM_OPERAND, ra); return 0; @@ -184,7 +187,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) return 0; } resh = (ClpRspHdr *)(buffer + req_len); - res_len = lduw_p(&resh->len); + res_len = lduw_be_p(&resh->len); if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { s390_program_interrupt(env, PGM_OPERAND, ra); return 0; @@ -201,11 +204,11 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) } if (req_len != 32) { - stw_p(&resh->rsp, CLP_RC_LEN); + stw_be_p(&resh->rsp, CLP_RC_LEN); goto out; } - switch (lduw_p(&reqh->cmd)) { + switch (lduw_be_p(&reqh->cmd)) { case CLP_LIST_PCI: { ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; list_pci(rrb, &cc); @@ -215,9 +218,9 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; - pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh)); + pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqsetpci->fh)); if (!pbdev) { - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); goto out; } @@ -225,17 +228,17 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) case CLP_SET_ENABLE_PCI_FN: switch (reqsetpci->ndas) { case 0: - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); goto out; case 1: break; default: - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); goto out; } if (pbdev->fh & FH_MASK_ENABLE) { - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); goto out; } @@ -249,29 +252,29 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) /* Take this opportunity to make sure we are sync'd with host */ if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) || !(pbdev->fh & FH_MASK_ENABLE)) { - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); goto out; } } pbdev->fh |= FH_MASK_ENABLE; pbdev->state = ZPCI_FS_ENABLED; - stl_p(&ressetpci->fh, pbdev->fh); - stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); + stl_be_p(&ressetpci->fh, pbdev->fh); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; case CLP_SET_DISABLE_PCI_FN: if (!(pbdev->fh & FH_MASK_ENABLE)) { - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); goto out; } device_cold_reset(DEVICE(pbdev)); pbdev->fh &= ~FH_MASK_ENABLE; pbdev->state = ZPCI_FS_DISABLED; - stl_p(&ressetpci->fh, pbdev->fh); - stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); + stl_be_p(&ressetpci->fh, pbdev->fh); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK); break; default: trace_s390_pci_unknown("set-pci", reqsetpci->oc); - stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); break; } break; @@ -280,23 +283,23 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; - pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh)); + pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqquery->fh)); if (!pbdev) { - trace_s390_pci_nodev("query", ldl_p(&reqquery->fh)); - stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); + trace_s390_pci_nodev("query", ldl_be_p(&reqquery->fh)); + stw_be_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); goto out; } - stq_p(&resquery->sdma, pbdev->zpci_fn.sdma); - stq_p(&resquery->edma, pbdev->zpci_fn.edma); - stw_p(&resquery->pchid, pbdev->zpci_fn.pchid); - stw_p(&resquery->vfn, pbdev->zpci_fn.vfn); + stq_be_p(&resquery->sdma, pbdev->zpci_fn.sdma); + stq_be_p(&resquery->edma, pbdev->zpci_fn.edma); + stw_be_p(&resquery->pchid, pbdev->zpci_fn.pchid); + stw_be_p(&resquery->vfn, pbdev->zpci_fn.vfn); resquery->flags = pbdev->zpci_fn.flags; resquery->pfgid = pbdev->zpci_fn.pfgid; resquery->pft = pbdev->zpci_fn.pft; resquery->fmbl = pbdev->zpci_fn.fmbl; - stl_p(&resquery->fid, pbdev->zpci_fn.fid); - stl_p(&resquery->uid, pbdev->zpci_fn.uid); + stl_be_p(&resquery->fid, pbdev->zpci_fn.fid); + stl_be_p(&resquery->uid, pbdev->zpci_fn.uid); memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS); memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN); @@ -304,16 +307,16 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) uint32_t data = pci_get_long(pbdev->pdev->config + PCI_BASE_ADDRESS_0 + (i * 4)); - stl_p(&resquery->bar[i], data); + stl_be_p(&resquery->bar[i], data); resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? ctz64(pbdev->pdev->io_regions[i].size) : 0; trace_s390_pci_bar(i, - ldl_p(&resquery->bar[i]), + ldl_be_p(&resquery->bar[i]), pbdev->pdev->io_regions[i].size, resquery->bar_size[i]); } - stw_p(&resquery->hdr.rsp, CLP_RC_OK); + stw_be_p(&resquery->hdr.rsp, CLP_RC_OK); break; } case CLP_QUERY_PCI_FNGRP: { @@ -326,23 +329,23 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) if (!group) { /* We do not allow access to unknown groups */ /* The group must have been obtained with a vfio device */ - stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID); + stw_be_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID); goto out; } resgrp->fr = group->zpci_group.fr; - stq_p(&resgrp->dasm, group->zpci_group.dasm); - stq_p(&resgrp->msia, group->zpci_group.msia); - stw_p(&resgrp->mui, group->zpci_group.mui); - stw_p(&resgrp->i, group->zpci_group.i); - stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl); + stq_be_p(&resgrp->dasm, group->zpci_group.dasm); + stq_be_p(&resgrp->msia, group->zpci_group.msia); + stw_be_p(&resgrp->mui, group->zpci_group.mui); + stw_be_p(&resgrp->i, group->zpci_group.i); + stw_be_p(&resgrp->maxstbl, group->zpci_group.maxstbl); resgrp->version = group->zpci_group.version; resgrp->dtsm = group->zpci_group.dtsm; - stw_p(&resgrp->hdr.rsp, CLP_RC_OK); + stw_be_p(&resgrp->hdr.rsp, CLP_RC_OK); break; } default: - trace_s390_pci_unknown("clp", lduw_p(&reqh->cmd)); - stw_p(&resh->rsp, CLP_RC_CMD); + trace_s390_pci_unknown("clp", lduw_be_p(&reqh->cmd)); + stw_be_p(&resh->rsp, CLP_RC_CMD); break; } @@ -914,7 +917,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, for (i = 0; i < len / 8; i++) { result = memory_region_dispatch_write(mr, offset + i * 8, - ldq_p(buffer + i * 8), + ldq_be_p(buffer + i * 8), MO_64, MEMTXATTRS_UNSPECIFIED); if (result != MEMTX_OK) { s390_program_interrupt(env, PGM_OPERAND, ra); @@ -935,13 +938,13 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) { int ret, len; - uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data)); + uint8_t isc = FIB_DATA_ISC(ldl_be_p(&fib.data)); pbdev->routes.adapter.adapter_id = css_get_adapter_id( CSS_IO_ADAPTER_PCI, isc); - pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t)); - len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long); - pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len); + pbdev->summary_ind = get_indicator(ldq_be_p(&fib.aisb), sizeof(uint64_t)); + len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_be_p(&fib.data))) * sizeof(unsigned long); + pbdev->indicator = get_indicator(ldq_be_p(&fib.aibv), len); ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind); if (ret) { @@ -953,13 +956,13 @@ static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) goto out; } - pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); - pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); - pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); - pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); + pbdev->routes.adapter.summary_addr = ldq_be_p(&fib.aisb); + pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_be_p(&fib.data)); + pbdev->routes.adapter.ind_addr = ldq_be_p(&fib.aibv); + pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_be_p(&fib.data)); pbdev->isc = isc; - pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); - pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); + pbdev->noi = FIB_DATA_NOI(ldl_be_p(&fib.data)); + pbdev->sum = FIB_DATA_SUM(ldl_be_p(&fib.data)); trace_s390_pci_irqs("register", pbdev->routes.adapter.adapter_id); return 0; @@ -994,9 +997,9 @@ static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib, uintptr_t ra) { S390PCIIOMMU *iommu = pbdev->iommu; - uint64_t pba = ldq_p(&fib.pba); - uint64_t pal = ldq_p(&fib.pal); - uint64_t g_iota = ldq_p(&fib.iota); + uint64_t pba = ldq_be_p(&fib.pba); + uint64_t pal = ldq_be_p(&fib.pal); + uint64_t g_iota = ldq_be_p(&fib.iota); uint8_t dt = (g_iota >> 2) & 0x7; uint8_t t = (g_iota >> 11) & 0x1; @@ -1008,17 +1011,25 @@ static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib, } /* currently we only support designation type 1 with translation */ - if (!(dt == ZPCI_IOTA_RTTO && t)) { + if (t && dt != ZPCI_IOTA_RTTO) { error_report("unsupported ioat dt %d t %d", dt, t); s390_program_interrupt(env, PGM_OPERAND, ra); return -EINVAL; + } else if (!t && !pbdev->rtr_avail) { + error_report("relaxed translation not allowed"); + s390_program_interrupt(env, PGM_OPERAND, ra); + return -EINVAL; } iommu->pba = pba; iommu->pal = pal; iommu->g_iota = g_iota; - s390_pci_iommu_enable(iommu); + if (t) { + s390_pci_iommu_enable(iommu); + } else { + s390_pci_iommu_direct_map_enable(iommu); + } return 0; } @@ -1289,7 +1300,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, } break; case ZPCI_MOD_FC_SET_MEASURE: { - uint64_t fmb_addr = ldq_p(&fib.fmb_addr); + uint64_t fmb_addr = ldq_be_p(&fib.fmb_addr); if (fmb_addr & FMBK_MASK) { cc = ZPCI_PCI_LS_ERR; @@ -1399,17 +1410,17 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, return 0; } - stq_p(&fib.pba, pbdev->iommu->pba); - stq_p(&fib.pal, pbdev->iommu->pal); - stq_p(&fib.iota, pbdev->iommu->g_iota); - stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); - stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); - stq_p(&fib.fmb_addr, pbdev->fmb_addr); + stq_be_p(&fib.pba, pbdev->iommu->pba); + stq_be_p(&fib.pal, pbdev->iommu->pal); + stq_be_p(&fib.iota, pbdev->iommu->g_iota); + stq_be_p(&fib.aibv, pbdev->routes.adapter.ind_addr); + stq_be_p(&fib.aisb, pbdev->routes.adapter.summary_addr); + stq_be_p(&fib.fmb_addr, pbdev->fmb_addr); data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; - stl_p(&fib.data, data); + stl_be_p(&fib.data, data); out: if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c index 7dbbc76823a..aaf91319b4e 100644 --- a/hw/s390x/s390-pci-vfio.c +++ b/hw/s390x/s390-pci-vfio.c @@ -20,7 +20,8 @@ #include "hw/s390x/s390-pci-clp.h" #include "hw/s390x/s390-pci-vfio.h" #include "hw/vfio/pci.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-helpers.h" /* * Get the current DMA available count from vfio. Returns true if vfio is @@ -131,13 +132,28 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev, /* Store function type separately for type-specific behavior */ pbdev->pft = cap->pft; + /* + * If the device is a passthrough ISM device, disallow relaxed + * translation. + */ + if (pbdev->pft == ZPCI_PFT_ISM) { + pbdev->rtr_avail = false; + } + /* * If appropriate, reduce the size of the supported DMA aperture reported - * to the guest based upon the vfio DMA limit. + * to the guest based upon the vfio DMA limit. This is applicable for + * devices that are guaranteed to not use relaxed translation. If the + * device is capable of relaxed translation then we must advertise the + * full aperture. In this case, if translation is used then we will + * rely on the vfio DMA limit counting and use RPCIT CC1 / status 16 + * to request that the guest free DMA mappings as necessary. */ - vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS; - if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) { - pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1; + if (!pbdev->rtr_avail) { + vfio_size = pbdev->iommu->max_dma_limit << TARGET_PAGE_BITS; + if (vfio_size > 0 && vfio_size < cap->end_dma - cap->start_dma + 1) { + pbdev->zpci_fn.edma = cap->start_dma + vfio_size - 1; + } } } @@ -223,8 +239,11 @@ static void s390_pci_read_group(S390PCIBusDevice *pbdev, pbdev->pci_group = s390_group_create(pbdev->zpci_fn.pfgid, start_gid); resgrp = &pbdev->pci_group->zpci_group; + if (pbdev->rtr_avail) { + resgrp->fr |= CLP_RSP_QPCIG_MASK_RTR; + } if (cap->flags & VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH) { - resgrp->fr = 1; + resgrp->fr |= CLP_RSP_QPCIG_MASK_REFRESH; } resgrp->dasm = cap->dasm; resgrp->msia = cap->msi_addr; @@ -349,6 +368,4 @@ void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) s390_pci_read_group(pbdev, info); s390_pci_read_util(pbdev, info); s390_pci_read_pfip(pbdev, info); - - return; } diff --git a/hw/s390x/s390-skeys-kvm.c b/hw/s390x/s390-skeys-kvm.c index 3ff9d94b802..f3056d6d25c 100644 --- a/hw/s390x/s390-skeys-kvm.c +++ b/hw/s390x/s390-skeys-kvm.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "hw/s390x/storage-keys.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qemu/error-report.h" #include "qemu/module.h" @@ -52,7 +52,7 @@ static int kvm_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, return kvm_vm_ioctl(kvm_state, KVM_S390_SET_SKEYS, &args); } -static void kvm_s390_skeys_class_init(ObjectClass *oc, void *data) +static void kvm_s390_skeys_class_init(ObjectClass *oc, const void *data) { S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index bf22d6863e7..8eeecfd58fc 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -11,16 +11,17 @@ #include "qemu/osdep.h" #include "qemu/units.h" -#include "hw/boards.h" +#include "exec/target_page.h" +#include "hw/s390x/s390-virtio-ccw.h" #include "hw/qdev-properties.h" #include "hw/s390x/storage-keys.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" -#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-machine.h" +#include "qobject/qdict.h" #include "qemu/error-report.h" -#include "sysemu/memory_mapping.h" -#include "exec/address-spaces.h" -#include "sysemu/kvm.h" +#include "system/memory_mapping.h" +#include "system/address-spaces.h" +#include "system/kvm.h" #include "migration/qemu-file-types.h" #include "migration/register.h" #include "trace.h" @@ -142,7 +143,7 @@ void hmp_dump_skeys(Monitor *mon, const QDict *qdict) } } -void qmp_dump_skeys(const char *filename, Error **errp) +void s390_qmp_dump_skeys(const char *filename, Error **errp) { S390SKeysState *ss = s390_get_skeys_device(); S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); @@ -251,9 +252,9 @@ static bool qemu_s390_enable_skeys(S390SKeysState *ss) * g_once_init_enter() is good enough. */ if (g_once_init_enter(&initialized)) { - MachineState *machine = MACHINE(qdev_get_machine()); + S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine()); - skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + skeys->key_count = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE; skeys->keydata = g_malloc0(skeys->key_count); g_once_init_leave(&initialized, 1); } @@ -302,7 +303,7 @@ static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, return 0; } -static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) +static void qemu_s390_skeys_class_init(ObjectClass *oc, const void *data) { S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -316,14 +317,6 @@ static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) dc->user_creatable = false; } -static const TypeInfo qemu_s390_skeys_info = { - .name = TYPE_QEMU_S390_SKEYS, - .parent = TYPE_S390_SKEYS, - .instance_size = sizeof(QEMUS390SKeysState), - .class_init = qemu_s390_skeys_class_init, - .class_size = sizeof(S390SKeysClass), -}; - static void s390_storage_keys_save(QEMUFile *f, void *opaque) { S390SKeysState *ss = S390_SKEYS(opaque); @@ -469,40 +462,39 @@ static void s390_skeys_realize(DeviceState *dev, Error **errp) { S390SKeysState *ss = S390_SKEYS(dev); - if (ss->migration_enabled) { - register_savevm_live(TYPE_S390_SKEYS, 0, 1, - &savevm_s390_storage_keys, ss); - } + register_savevm_live(TYPE_S390_SKEYS, 0, 1, &savevm_s390_storage_keys, ss); } -static Property s390_skeys_props[] = { - DEFINE_PROP_BOOL("migration-enabled", S390SKeysState, migration_enabled, true), - DEFINE_PROP_END_OF_LIST(), -}; - -static void s390_skeys_class_init(ObjectClass *oc, void *data) +static void s390_skeys_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->hotpluggable = false; dc->realize = s390_skeys_realize; - device_class_set_props(dc, s390_skeys_props); set_bit(DEVICE_CATEGORY_MISC, dc->categories); } -static const TypeInfo s390_skeys_info = { - .name = TYPE_S390_SKEYS, - .parent = TYPE_DEVICE, - .instance_size = sizeof(S390SKeysState), - .class_init = s390_skeys_class_init, - .class_size = sizeof(S390SKeysClass), - .abstract = true, +static const TypeInfo s390_skeys_types[] = { + { + .name = TYPE_DUMP_SKEYS_INTERFACE, + .parent = TYPE_INTERFACE, + .class_size = sizeof(DumpSKeysInterface), + }, + { + .name = TYPE_S390_SKEYS, + .parent = TYPE_DEVICE, + .instance_size = sizeof(S390SKeysState), + .class_init = s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), + .abstract = true, + }, + { + .name = TYPE_QEMU_S390_SKEYS, + .parent = TYPE_S390_SKEYS, + .instance_size = sizeof(QEMUS390SKeysState), + .class_init = qemu_s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), + }, }; -static void qemu_s390_skeys_register_types(void) -{ - type_register_static(&s390_skeys_info); - type_register_static(&qemu_s390_skeys_info); -} - -type_init(qemu_s390_skeys_register_types) +DEFINE_TYPES(s390_skeys_types) diff --git a/hw/s390x/s390-stattrib-kvm.c b/hw/s390x/s390-stattrib-kvm.c index eeaa8110981..e1fee361dc3 100644 --- a/hw/s390x/s390-stattrib-kvm.c +++ b/hw/s390x/s390-stattrib-kvm.c @@ -10,12 +10,13 @@ */ #include "qemu/osdep.h" -#include "hw/boards.h" +#include "hw/s390x/s390-virtio-ccw.h" #include "migration/qemu-file.h" #include "hw/s390x/storage-attributes.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "exec/ram_addr.h" +#include "system/kvm.h" +#include "system/memory_mapping.h" +#include "system/ram_addr.h" #include "kvm/kvm_s390x.h" #include "qapi/error.h" @@ -84,8 +85,8 @@ static int kvm_s390_stattrib_set_stattr(S390StAttribState *sa, uint8_t *values) { KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); - MachineState *machine = MACHINE(qdev_get_machine()); - unsigned long max = machine->ram_size / TARGET_PAGE_SIZE; + S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine()); + unsigned long max = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE; if (start_gfn + count > max) { error_report("Out of memory bounds when setting storage attributes"); @@ -103,39 +104,57 @@ static int kvm_s390_stattrib_set_stattr(S390StAttribState *sa, static void kvm_s390_stattrib_synchronize(S390StAttribState *sa) { KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); - MachineState *machine = MACHINE(qdev_get_machine()); - unsigned long max = machine->ram_size / TARGET_PAGE_SIZE; - /* We do not need to reach the maximum buffer size allowed */ - unsigned long cx, len = KVM_S390_SKEYS_MAX / 2; + S390CcwMachineState *s390ms = S390_CCW_MACHINE(qdev_get_machine()); + unsigned long max = s390_get_memory_limit(s390ms) / TARGET_PAGE_SIZE; + unsigned long start_gfn, end_gfn, pages; + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; int r; struct kvm_s390_cmma_log clog = { .flags = 0, .mask = ~0ULL, }; - if (sas->incoming_buffer) { - for (cx = 0; cx + len <= max; cx += len) { - clog.start_gfn = cx; - clog.count = len; - clog.values = (uint64_t)(sas->incoming_buffer + cx); - r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); - if (r) { - error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r)); - return; - } - } - if (cx < max) { - clog.start_gfn = cx; - clog.count = max - cx; - clog.values = (uint64_t)(sas->incoming_buffer + cx); + if (!sas->incoming_buffer) { + return; + } + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); + + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE)); + assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE)); + + start_gfn = block->target_start / TARGET_PAGE_SIZE; + end_gfn = block->target_end / TARGET_PAGE_SIZE; + + while (start_gfn < end_gfn) { + /* Don't exceed the maximum buffer size. */ + pages = MIN(end_gfn - start_gfn, KVM_S390_SKEYS_MAX / 2); + + /* + * If we ever get guest physical memory beyond the configured + * memory limit, something went very wrong. + */ + assert(start_gfn + pages <= max); + + clog.start_gfn = start_gfn; + clog.count = pages; + clog.values = (uint64_t)(sas->incoming_buffer + start_gfn); r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); if (r) { error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r)); + goto out; } + + start_gfn += pages; } - g_free(sas->incoming_buffer); - sas->incoming_buffer = NULL; } + +out: + guest_phys_blocks_free(&guest_phys_blocks); + g_free(sas->incoming_buffer); + sas->incoming_buffer = NULL; } static int kvm_s390_stattrib_set_migrationmode(S390StAttribState *sa, bool val, @@ -166,10 +185,10 @@ static long long kvm_s390_stattrib_get_dirtycount(S390StAttribState *sa) static int kvm_s390_stattrib_get_active(S390StAttribState *sa) { - return kvm_s390_cmma_active() && sa->migration_enabled; + return kvm_s390_cmma_active(); } -static void kvm_s390_stattrib_class_init(ObjectClass *oc, void *data) +static void kvm_s390_stattrib_class_init(ObjectClass *oc, const void *data) { S390StAttribClass *sac = S390_STATTRIB_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c index c4259b53274..13a678a8037 100644 --- a/hw/s390x/s390-stattrib.c +++ b/hw/s390x/s390-stattrib.c @@ -16,9 +16,9 @@ #include "hw/qdev-properties.h" #include "hw/s390x/storage-attributes.h" #include "qemu/error-report.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "cpu.h" /* 512KiB cover 2GB of guest memory */ @@ -304,10 +304,10 @@ static int qemu_s390_set_migrationmode_stub(S390StAttribState *sa, bool value, static int qemu_s390_get_active(S390StAttribState *sa) { - return sa->migration_enabled; + return true; } -static void qemu_s390_stattrib_class_init(ObjectClass *oc, void *data) +static void qemu_s390_stattrib_class_init(ObjectClass *oc, const void *data) { S390StAttribClass *sa_cl = S390_STATTRIB_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -338,7 +338,7 @@ static const TypeInfo qemu_s390_stattrib_info = { static SaveVMHandlers savevm_s390_stattrib_handlers = { .save_setup = cmma_save_setup, .save_live_iterate = cmma_save_iterate, - .save_live_complete_precopy = cmma_save_complete, + .save_complete = cmma_save_complete, .state_pending_exact = cmma_state_pending, .state_pending_estimate = cmma_state_pending, .save_cleanup = cmma_save_cleanup, @@ -360,19 +360,13 @@ static void s390_stattrib_realize(DeviceState *dev, Error **errp) &savevm_s390_stattrib_handlers, dev); } -static Property s390_stattrib_props[] = { - DEFINE_PROP_BOOL("migration-enabled", S390StAttribState, migration_enabled, true), - DEFINE_PROP_END_OF_LIST(), -}; - -static void s390_stattrib_class_init(ObjectClass *oc, void *data) +static void s390_stattrib_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->realize = s390_stattrib_realize; - device_class_set_props(dc, s390_stattrib_props); } static void s390_stattrib_instance_init(Object *obj) diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index c483ff8064d..a79bd13275b 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -13,14 +13,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/ram_addr.h" -#include "exec/confidential-guest-support.h" +#include "system/ram_addr.h" +#include "system/confidential-guest-support.h" #include "hw/boards.h" -#include "hw/s390x/s390-virtio-hcall.h" #include "hw/s390x/sclp.h" #include "hw/s390x/s390_flic.h" -#include "hw/s390x/ioinst.h" -#include "hw/s390x/css.h" #include "virtio-ccw.h" #include "qemu/config-file.h" #include "qemu/ctype.h" @@ -29,7 +26,7 @@ #include "qemu/qemu-print.h" #include "qemu/units.h" #include "hw/s390x/s390-pci-bus.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/s390x/storage-keys.h" #include "hw/s390x/storage-attributes.h" #include "hw/s390x/event-facility.h" @@ -38,16 +35,21 @@ #include "hw/s390x/css-bridge.h" #include "hw/s390x/ap-bridge.h" #include "migration/register.h" -#include "cpu_models.h" +#include "target/s390x/cpu_models.h" #include "hw/nmi.h" #include "hw/qdev-properties.h" #include "hw/s390x/tod.h" -#include "sysemu/sysemu.h" -#include "sysemu/cpus.h" +#include "system/system.h" +#include "system/cpus.h" +#include "system/hostmem.h" #include "target/s390x/kvm/pv.h" #include "migration/blocker.h" #include "qapi/visitor.h" #include "hw/s390x/cpu-topology.h" +#include "kvm/kvm_s390x.h" +#include "hw/virtio/virtio-md-pci.h" +#include "hw/s390x/virtio-ccw-md.h" +#include "system/replay.h" #include CONFIG_DEVICES static Error *pv_mig_blocker; @@ -124,70 +126,86 @@ static void subsystem_reset(void) } } -static int virtio_ccw_hcall_notify(const uint64_t *args) +static void s390_set_memory_limit(S390CcwMachineState *s390ms, + uint64_t new_limit) { - uint64_t subch_id = args[0]; - uint64_t data = args[1]; - SubchDev *sch; - VirtIODevice *vdev; - int cssid, ssid, schid, m; - uint16_t vq_idx = data; + uint64_t hw_limit = 0; + int ret = 0; - if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { - return -EINVAL; + assert(!s390ms->memory_limit && new_limit); + if (kvm_enabled()) { + ret = kvm_s390_set_mem_limit(new_limit, &hw_limit); } - sch = css_find_subch(m, cssid, ssid, schid); - if (!sch || !css_subch_visible(sch)) { - return -EINVAL; - } - - vdev = virtio_ccw_get_vdev(sch); - if (vq_idx >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, vq_idx)) { - return -EINVAL; + if (ret == -E2BIG) { + error_report("host supports a maximum of %" PRIu64 " GB", + hw_limit / GiB); + exit(EXIT_FAILURE); + } else if (ret) { + error_report("setting the guest size failed"); + exit(EXIT_FAILURE); } + s390ms->memory_limit = new_limit; +} - if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFICATION_DATA)) { - virtio_queue_set_shadow_avail_idx(virtio_get_queue(vdev, vq_idx), - (data >> 16) & 0xFFFF); +static void s390_set_max_pagesize(S390CcwMachineState *s390ms, + uint64_t pagesize) +{ + assert(!s390ms->max_pagesize && pagesize); + if (kvm_enabled()) { + kvm_s390_set_max_pagesize(pagesize, &error_fatal); } - - virtio_queue_notify(vdev, vq_idx); - return 0; + s390ms->max_pagesize = pagesize; } -static int virtio_ccw_hcall_early_printk(const uint64_t *args) +static void s390_memory_init(MachineState *machine) { - uint64_t mem = args[0]; - MachineState *ms = MACHINE(qdev_get_machine()); + S390CcwMachineState *s390ms = S390_CCW_MACHINE(machine); + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *ram = machine->ram; + uint64_t ram_size = memory_region_size(ram); + uint64_t devmem_base, devmem_size; - if (mem < ms->ram_size) { - /* Early printk */ - return 0; + if (!QEMU_IS_ALIGNED(ram_size, 1 * MiB)) { + /* + * SCLP cannot possibly expose smaller granularity right now and KVM + * cannot handle smaller granularity. As we don't support NUMA, the + * region size directly corresponds to machine->ram_size, and the region + * is a single RAM memory region. + */ + error_report("ram size must be multiples of 1 MiB"); + exit(EXIT_FAILURE); } - return -EINVAL; -} -static void virtio_ccw_register_hcalls(void) -{ - s390_register_virtio_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, - virtio_ccw_hcall_notify); - /* Tolerate early printk. */ - s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY, - virtio_ccw_hcall_early_printk); -} + devmem_size = 0; + devmem_base = ram_size; +#ifdef CONFIG_MEM_DEVICE + if (machine->ram_size < machine->maxram_size) { -static void s390_memory_init(MemoryRegion *ram) -{ - MemoryRegion *sysmem = get_system_memory(); + /* + * Make sure memory devices have a sane default alignment, even + * when weird initial memory sizes are specified. + */ + devmem_base = QEMU_ALIGN_UP(devmem_base, 1 * GiB); + devmem_size = machine->maxram_size - machine->ram_size; + } +#endif + s390_set_memory_limit(s390ms, devmem_base + devmem_size); - /* allocate RAM for core */ + /* Map the initial memory. Must happen after setting the memory limit. */ memory_region_add_subregion(sysmem, 0, ram); + /* Initialize address space for memory devices. */ +#ifdef CONFIG_MEM_DEVICE + if (devmem_size) { + machine_memory_devices_init(machine, devmem_base, devmem_size); + } +#endif /* CONFIG_MEM_DEVICE */ + /* * Configure the maximum page size. As no memory devices were created * yet, this is the page size of initial memory only. */ - s390_set_max_pagesize(qemu_maxrampagesize(), &error_fatal); + s390_set_max_pagesize(s390ms, qemu_maxrampagesize()); /* Initialize storage key device */ s390_skeys_init(); /* Initialize storage attributes device */ @@ -197,11 +215,10 @@ static void s390_memory_init(MemoryRegion *ram) static void s390_init_ipl_dev(const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *firmware, - const char *netboot_fw, bool enforce_bios) + bool enforce_bios) { Object *new = object_new(TYPE_S390_IPL); DeviceState *dev = DEVICE(new); - char *netboot_fw_prop; if (kernel_filename) { qdev_prop_set_string(dev, "kernel", kernel_filename); @@ -212,11 +229,6 @@ static void s390_init_ipl_dev(const char *kernel_filename, qdev_prop_set_string(dev, "cmdline", kernel_cmdline); qdev_prop_set_string(dev, "firmware", firmware); qdev_prop_set_bit(dev, "enforce_bios", enforce_bios); - netboot_fw_prop = object_property_get_str(new, "netboot_fw", &error_abort); - if (!strlen(netboot_fw_prop)) { - qdev_prop_set_string(dev, "netboot_fw", netboot_fw); - } - g_free(netboot_fw_prop); object_property_add_child(qdev_get_machine(), TYPE_S390_IPL, new); object_unref(new); @@ -248,9 +260,21 @@ static void s390_create_sclpconsole(SCLPDevice *sclp, qdev_realize_and_unref(dev, ev_fac_bus, &error_fatal); } +static void s390_create_sclpcpi(SCLPDevice *sclp) +{ + SCLPEventFacility *ef = sclp->event_facility; + BusState *ev_fac_bus = sclp_get_event_facility_bus(ef); + DeviceState *dev; + + dev = qdev_new(TYPE_SCLP_EVENT_CPI); + object_property_add_child(OBJECT(ef), "sclpcpi", OBJECT(dev)); + qdev_realize_and_unref(dev, ev_fac_bus, &error_fatal); +} + static void ccw_init(MachineState *machine) { MachineClass *mc = MACHINE_GET_CLASS(machine); + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); S390CcwMachineState *ms = S390_CCW_MACHINE(machine); int ret; VirtualCssBus *css_bus; @@ -261,7 +285,7 @@ static void ccw_init(MachineState *machine) qdev_realize_and_unref(DEVICE(ms->sclp), NULL, &error_fatal); /* init memory + setup max page size. Required for the CPU model */ - s390_memory_init(machine->ram); + s390_memory_init(machine); /* init CPUs (incl. CPU model) early so s390_has_feature() works */ s390_init_cpus(machine); @@ -284,16 +308,13 @@ static void ccw_init(MachineState *machine) s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline, machine->initrd_filename, machine->firmware ?: "s390-ccw.img", - "s390-netboot.img", true); + true); dev = qdev_new(TYPE_S390_PCI_HOST_BRIDGE); object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE, OBJECT(dev)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - /* register hypercalls */ - virtio_ccw_register_hcalls(); - s390_enable_css_support(s390_cpu_addr2state(0)); ret = css_create_css_image(VIRTUAL_CSSID, true); @@ -314,6 +335,12 @@ static void ccw_init(MachineState *machine) /* init the TOD clock */ s390_init_tod(); + + /* init SCLP event Control-Program Identification */ + if (s390mc->use_cpi) { + s390_create_sclpcpi(ms->sclp); + } + } static void s390_cpu_plug(HotplugHandler *hotplug_dev, @@ -356,7 +383,8 @@ static void s390_machine_unprotect(S390CcwMachineState *ms) ram_block_discard_disable(false); } -static int s390_machine_protect(S390CcwMachineState *ms) +static int s390_machine_protect(S390CcwMachineState *ms, + struct S390PVResponse *pv_resp) { Error *local_err = NULL; int rc; @@ -399,19 +427,19 @@ static int s390_machine_protect(S390CcwMachineState *ms) } /* Set SE header and unpack */ - rc = s390_ipl_prepare_pv_header(&local_err); + rc = s390_ipl_prepare_pv_header(pv_resp, &local_err); if (rc) { goto out_err; } /* Decrypt image */ - rc = s390_ipl_pv_unpack(); + rc = s390_ipl_pv_unpack(pv_resp); if (rc) { goto out_err; } /* Verify integrity */ - rc = s390_pv_verify(); + rc = s390_pv_verify(pv_resp); if (rc) { goto out_err; } @@ -440,13 +468,26 @@ static void s390_pv_prepare_reset(S390CcwMachineState *ms) s390_pv_prep_reset(); } -static void s390_machine_reset(MachineState *machine, ShutdownCause reason) +static void s390_machine_reset(MachineState *machine, ResetType type) { S390CcwMachineState *ms = S390_CCW_MACHINE(machine); + struct S390PVResponse pv_resp; enum s390_reset reset_type; CPUState *cs, *t; S390CPU *cpu; + /* + * Temporarily drop the record/replay mutex to let rr_cpu_thread_fn() + * process the run_on_cpu() requests below. This is safe, because at this + * point one of the following is true: + * - All CPU threads are not running, either because the machine is being + * initialized, or because the guest requested a reset using diag 308. + * There is no risk to desync the record/replay state. + * - A snapshot is about to be loaded. The record/replay state consistency + * is not important. + */ + replay_mutex_unlock(); + /* get the reset parameters, reset them once done */ s390_ipl_get_reset_request(&cs, &reset_type); @@ -472,7 +513,7 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason) * Device reset includes CPU clear resets so this has to be * done AFTER the unprotect call above. */ - qemu_devices_reset(reason); + qemu_devices_reset(type); s390_crypto_reset(); /* configure and start the ipl CPU only */ @@ -519,14 +560,14 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason) } run_on_cpu(cs, s390_do_cpu_reset, RUN_ON_CPU_NULL); - if (s390_machine_protect(ms)) { - s390_pv_inject_reset_error(cs); + if (s390_machine_protect(ms, &pv_resp)) { + s390_pv_inject_reset_error(cs, pv_resp); /* * Continue after the diag308 so the guest knows something * went wrong. */ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); - return; + goto out_lock; } run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL); @@ -539,13 +580,54 @@ static void s390_machine_reset(MachineState *machine, ShutdownCause reason) run_on_cpu(t, s390_do_cpu_set_diag318, RUN_ON_CPU_HOST_ULONG(0)); } s390_ipl_clear_reset_request(); + +out_lock: + /* + * Re-take the record/replay mutex, temporarily dropping the BQL in order + * to satisfy the ordering requirements. + */ + bql_unlock(); + replay_mutex_lock(); + bql_lock(); +} + +static void s390_machine_device_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) { + virtio_ccw_md_pre_plug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) { + virtio_md_pci_pre_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp); + } } static void s390_machine_device_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { + S390CcwMachineState *s390ms = S390_CCW_MACHINE(hotplug_dev); + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { s390_cpu_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) { + /* + * At this point, the device is realized and set all memdevs mapped, so + * qemu_maxrampagesize() will pick up the page sizes of these memdevs + * as well. Before we plug the device and expose any RAM memory regions + * to the system, make sure we don't exceed the previously set max page + * size. While only relevant for KVM, there is not really any use case + * for this with TCG, so we'll unconditionally reject it. + */ + if (qemu_maxrampagesize() != s390ms->max_pagesize) { + error_setg(errp, "Memory device uses a bigger page size than" + " initial memory"); + return; + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) { + virtio_ccw_md_plug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp); + } else { + virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp); + } } } @@ -554,10 +636,25 @@ static void s390_machine_device_unplug_request(HotplugHandler *hotplug_dev, { if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { error_setg(errp, "CPU hot unplug not supported on this machine"); - return; + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) { + virtio_ccw_md_unplug_request(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), + errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) { + virtio_md_pci_unplug_request(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), + errp); } } +static void s390_machine_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW)) { + virtio_ccw_md_unplug(VIRTIO_MD_CCW(dev), MACHINE(hotplug_dev), errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) { + virtio_md_pci_unplug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp); + } + } + static CpuInstanceProperties s390_cpu_index_to_props(MachineState *ms, unsigned cpu_index) { @@ -604,7 +701,9 @@ static const CPUArchIdList *s390_possible_cpu_arch_ids(MachineState *ms) static HotplugHandler *s390_get_hotplug_handler(MachineState *machine, DeviceState *dev) { - if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_CCW) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MD_PCI)) { return HOTPLUG_HANDLER(machine); } return NULL; @@ -667,50 +766,6 @@ static inline void machine_set_dea_key_wrap(Object *obj, bool value, ms->dea_key_wrap = value; } -static S390CcwMachineClass *current_mc; - -/* - * Get the class of the s390-ccw-virtio machine that is currently in use. - * Note: libvirt is using the "none" machine to probe for the features of the - * host CPU, so in case this is called with the "none" machine, the function - * returns the TYPE_S390_CCW_MACHINE base class. In this base class, all the - * various "*_allowed" variables are enabled, so that the *_allowed() wrappers - * below return the correct default value for the "none" machine. - * - * Attention! Do *not* add additional new wrappers for CPU features (e.g. like - * the ri_allowed() wrapper) via this mechanism anymore. CPU features should - * be handled via the CPU models, i.e. checking with cpu_model_allowed() during - * CPU initialization and s390_has_feat() later should be sufficient. - */ -static S390CcwMachineClass *get_machine_class(void) -{ - if (unlikely(!current_mc)) { - /* - * No s390 ccw machine was instantiated, we are likely to - * be called for the 'none' machine. The properties will - * have their after-initialization values. - */ - current_mc = S390_CCW_MACHINE_CLASS( - object_class_by_name(TYPE_S390_CCW_MACHINE)); - } - return current_mc; -} - -bool ri_allowed(void) -{ - return get_machine_class()->ri_allowed; -} - -bool cpu_model_allowed(void) -{ - return get_machine_class()->cpu_model_allowed; -} - -bool hpage_1m_allowed(void) -{ - return get_machine_class()->hpage_1m_allowed; -} - static void machine_get_loadparm(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -728,48 +783,30 @@ static void machine_set_loadparm(Object *obj, Visitor *v, { S390CcwMachineState *ms = S390_CCW_MACHINE(obj); char *val; - int i; if (!visit_type_str(v, name, &val, errp)) { return; } - for (i = 0; i < sizeof(ms->loadparm) && val[i]; i++) { - uint8_t c = qemu_toupper(val[i]); /* mimic HMC */ - - if (('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || (c == '.') || - (c == ' ')) { - ms->loadparm[i] = c; - } else { - error_setg(errp, "LOADPARM: invalid character '%c' (ASCII 0x%02x)", - c, c); - return; - } - } - - for (; i < sizeof(ms->loadparm); i++) { - ms->loadparm[i] = ' '; /* pad right with spaces */ - } + s390_ipl_fmt_loadparm(ms->loadparm, val, errp); + g_free(val); } -static void ccw_machine_class_init(ObjectClass *oc, void *data) +static void ccw_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); NMIClass *nc = NMI_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + DumpSKeysInterface *dsi = DUMP_SKEYS_INTERFACE_CLASS(oc); - s390mc->ri_allowed = true; - s390mc->cpu_model_allowed = true; - s390mc->hpage_1m_allowed = true; s390mc->max_threads = 1; - mc->init = ccw_init; + s390mc->use_cpi = true; mc->reset = s390_machine_reset; mc->block_default_type = IF_VIRTIO; mc->no_cdrom = 1; mc->no_floppy = 1; mc->no_parallel = 1; - mc->no_sdcard = 1; mc->max_cpus = S390_MAX_CPUS; mc->has_hotpluggable_cpus = true; mc->smp_props.books_supported = true; @@ -780,11 +817,14 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data) mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids; /* it is overridden with 'host' cpu *in kvm_arch_init* */ mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu"); + hc->pre_plug = s390_machine_device_pre_plug; hc->plug = s390_machine_device_plug; hc->unplug_request = s390_machine_device_unplug_request; + hc->unplug = s390_machine_device_unplug; nc->nmi_monitor_handler = s390_nmi; mc->default_ram_id = "s390.ram"; mc->default_nic = "virtio-net-ccw"; + dsi->qmp_dump_skeys = s390_qmp_dump_skeys; object_class_property_add_bool(oc, "aes-key-wrap", machine_get_aes_key_wrap, @@ -823,39 +863,39 @@ static const TypeInfo ccw_machine_info = { .instance_init = s390_machine_initfn, .class_size = sizeof(S390CcwMachineClass), .class_init = ccw_machine_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_NMI }, { TYPE_HOTPLUG_HANDLER}, + { TYPE_DUMP_SKEYS_INTERFACE}, { } }, }; #define DEFINE_CCW_MACHINE_IMPL(latest, ...) \ + static void MACHINE_VER_SYM(mach_init, ccw, __VA_ARGS__)(MachineState *mach) \ + { \ + MACHINE_VER_SYM(instance_options, ccw, __VA_ARGS__)(mach); \ + ccw_init(mach); \ + } \ static void MACHINE_VER_SYM(class_init, ccw, __VA_ARGS__)( \ ObjectClass *oc, \ - void *data) \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ MACHINE_VER_SYM(class_options, ccw, __VA_ARGS__)(mc); \ mc->desc = "Virtual s390x machine (version " MACHINE_VER_STR(__VA_ARGS__) ")"; \ + mc->init = MACHINE_VER_SYM(mach_init, ccw, __VA_ARGS__); \ MACHINE_VER_DEPRECATION(__VA_ARGS__); \ if (latest) { \ mc->alias = "s390-ccw-virtio"; \ mc->is_default = true; \ } \ } \ - static void MACHINE_VER_SYM(instance_init, ccw, __VA_ARGS__)(Object *obj) \ - { \ - MachineState *machine = MACHINE(obj); \ - current_mc = S390_CCW_MACHINE_CLASS(MACHINE_GET_CLASS(machine)); \ - MACHINE_VER_SYM(instance_options, ccw, __VA_ARGS__)(machine); \ - } \ static const TypeInfo MACHINE_VER_SYM(info, ccw, __VA_ARGS__) = \ { \ .name = MACHINE_VER_TYPE_NAME("s390-ccw-virtio", __VA_ARGS__), \ .parent = TYPE_S390_CCW_MACHINE, \ .class_init = MACHINE_VER_SYM(class_init, ccw, __VA_ARGS__), \ - .instance_init = MACHINE_VER_SYM(instance_init, ccw, __VA_ARGS__), \ }; \ static void MACHINE_VER_SYM(register, ccw, __VA_ARGS__)(void) \ { \ @@ -871,14 +911,58 @@ static const TypeInfo ccw_machine_info = { DEFINE_CCW_MACHINE_IMPL(false, major, minor) +static void ccw_machine_10_1_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_10_1_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE_AS_LATEST(10, 1); + +static void ccw_machine_10_0_instance_options(MachineState *machine) +{ + ccw_machine_10_1_instance_options(machine); +} + +static void ccw_machine_10_0_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + s390mc->use_cpi = false; + + ccw_machine_10_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_0, hw_compat_10_0_len); +} +DEFINE_CCW_MACHINE(10, 0); + +static void ccw_machine_9_2_instance_options(MachineState *machine) +{ + ccw_machine_10_0_instance_options(machine); +} + +static void ccw_machine_9_2_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_S390_PCI_DEVICE, "relaxed-translation", "off", }, + }; + + ccw_machine_10_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_2, hw_compat_9_2_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_CCW_MACHINE(9, 2); + static void ccw_machine_9_1_instance_options(MachineState *machine) { + ccw_machine_9_2_instance_options(machine); } static void ccw_machine_9_1_class_options(MachineClass *mc) { + ccw_machine_9_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_1, hw_compat_9_1_len); } -DEFINE_CCW_MACHINE_AS_LATEST(9, 1); +DEFINE_CCW_MACHINE(9, 1); static void ccw_machine_9_0_instance_options(MachineState *machine) { @@ -1083,229 +1167,6 @@ static void ccw_machine_4_2_class_options(MachineClass *mc) } DEFINE_CCW_MACHINE(4, 2); -static void ccw_machine_4_1_instance_options(MachineState *machine) -{ - static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_1 }; - ccw_machine_4_2_instance_options(machine); - s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat); -} - -static void ccw_machine_4_1_class_options(MachineClass *mc) -{ - ccw_machine_4_2_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); -} -DEFINE_CCW_MACHINE(4, 1); - -static void ccw_machine_4_0_instance_options(MachineState *machine) -{ - static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_0 }; - ccw_machine_4_1_instance_options(machine); - s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat); -} - -static void ccw_machine_4_0_class_options(MachineClass *mc) -{ - ccw_machine_4_1_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); -} -DEFINE_CCW_MACHINE(4, 0); - -static void ccw_machine_3_1_instance_options(MachineState *machine) -{ - static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V3_1 }; - ccw_machine_4_0_instance_options(machine); - s390_cpudef_featoff_greater(14, 1, S390_FEAT_MULTIPLE_EPOCH); - s390_cpudef_group_featoff_greater(14, 1, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF); - s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat); -} - -static void ccw_machine_3_1_class_options(MachineClass *mc) -{ - ccw_machine_4_0_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); -} -DEFINE_CCW_MACHINE(3, 1); - -static void ccw_machine_3_0_instance_options(MachineState *machine) -{ - ccw_machine_3_1_instance_options(machine); -} - -static void ccw_machine_3_0_class_options(MachineClass *mc) -{ - S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); - - s390mc->hpage_1m_allowed = false; - ccw_machine_3_1_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); -} -DEFINE_CCW_MACHINE(3, 0); - -static void ccw_machine_2_12_instance_options(MachineState *machine) -{ - ccw_machine_3_0_instance_options(machine); - s390_cpudef_featoff_greater(11, 1, S390_FEAT_PPA15); - s390_cpudef_featoff_greater(11, 1, S390_FEAT_BPB); -} - -static void ccw_machine_2_12_class_options(MachineClass *mc) -{ - ccw_machine_3_0_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); -} -DEFINE_CCW_MACHINE(2, 12); - -#ifdef CONFIG_S390X_LEGACY_CPUS - -static void ccw_machine_2_11_instance_options(MachineState *machine) -{ - static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V2_11 }; - ccw_machine_2_12_instance_options(machine); - - /* before 2.12 we emulated the very first z900 */ - s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat); -} - -static void ccw_machine_2_11_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SCLP_EVENT_FACILITY, "allow_all_mask_sizes", "off", }, - }; - - ccw_machine_2_12_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_CCW_MACHINE(2, 11); - -static void ccw_machine_2_10_instance_options(MachineState *machine) -{ - ccw_machine_2_11_instance_options(machine); -} - -static void ccw_machine_2_10_class_options(MachineClass *mc) -{ - ccw_machine_2_11_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); -} -DEFINE_CCW_MACHINE(2, 10); - -static void ccw_machine_2_9_instance_options(MachineState *machine) -{ - ccw_machine_2_10_instance_options(machine); - s390_cpudef_featoff_greater(12, 1, S390_FEAT_ESOP); - s390_cpudef_featoff_greater(12, 1, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2); - s390_cpudef_featoff_greater(12, 1, S390_FEAT_ZPCI); - s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_INT_SUPPRESSION); - s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_EVENT_NOTIFICATION); -} - -static void ccw_machine_2_9_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_S390_STATTRIB, "migration-enabled", "off", }, - { TYPE_S390_FLIC_COMMON, "migration-enabled", "off", }, - }; - - ccw_machine_2_10_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - css_migration_enabled = false; -} -DEFINE_CCW_MACHINE(2, 9); - -static void ccw_machine_2_8_instance_options(MachineState *machine) -{ - ccw_machine_2_9_instance_options(machine); -} - -static void ccw_machine_2_8_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_S390_FLIC_COMMON, "adapter_routes_max_batch", "64", }, - }; - - ccw_machine_2_9_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_CCW_MACHINE(2, 8); - -static void ccw_machine_2_7_instance_options(MachineState *machine) -{ - ccw_machine_2_8_instance_options(machine); -} - -static void ccw_machine_2_7_class_options(MachineClass *mc) -{ - S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); - - s390mc->cpu_model_allowed = false; - ccw_machine_2_8_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); -} -DEFINE_CCW_MACHINE(2, 7); - -static void ccw_machine_2_6_instance_options(MachineState *machine) -{ - ccw_machine_2_7_instance_options(machine); -} - -static void ccw_machine_2_6_class_options(MachineClass *mc) -{ - S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_S390_IPL, "iplbext_migration", "off", }, - { TYPE_VIRTUAL_CSS_BRIDGE, "css_dev_path", "off", }, - }; - - s390mc->ri_allowed = false; - ccw_machine_2_7_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_CCW_MACHINE(2, 6); - -static void ccw_machine_2_5_instance_options(MachineState *machine) -{ - ccw_machine_2_6_instance_options(machine); -} - -static void ccw_machine_2_5_class_options(MachineClass *mc) -{ - ccw_machine_2_6_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); -} -DEFINE_CCW_MACHINE(2, 5); - -static void ccw_machine_2_4_instance_options(MachineState *machine) -{ - ccw_machine_2_5_instance_options(machine); -} - -static void ccw_machine_2_4_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_S390_SKEYS, "migration-enabled", "off", }, - { "virtio-blk-ccw", "max_revision", "0", }, - { "virtio-balloon-ccw", "max_revision", "0", }, - { "virtio-serial-ccw", "max_revision", "0", }, - { "virtio-9p-ccw", "max_revision", "0", }, - { "virtio-rng-ccw", "max_revision", "0", }, - { "virtio-net-ccw", "max_revision", "0", }, - { "virtio-scsi-ccw", "max_revision", "0", }, - { "vhost-scsi-ccw", "max_revision", "0", }, - }; - - ccw_machine_2_5_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_CCW_MACHINE(2, 4); - -#endif - static void ccw_machine_register_types(void) { type_register_static(&ccw_machine_info); diff --git a/hw/s390x/s390-virtio-hcall.c b/hw/s390x/s390-virtio-hcall.c deleted file mode 100644 index ec7cf8beb3f..00000000000 --- a/hw/s390x/s390-virtio-hcall.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Support for virtio hypercalls on s390 - * - * Copyright 2012 IBM Corp. - * Author(s): Cornelia Huck - * - * This work is licensed under the terms of the GNU GPL, version 2 or (at - * your option) any later version. See the COPYING file in the top-level - * directory. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "hw/s390x/s390-virtio-hcall.h" - -#define MAX_DIAG_SUBCODES 255 - -static s390_virtio_fn s390_diag500_table[MAX_DIAG_SUBCODES]; - -void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn) -{ - assert(code < MAX_DIAG_SUBCODES); - assert(!s390_diag500_table[code]); - - s390_diag500_table[code] = fn; -} - -int s390_virtio_hypercall(CPUS390XState *env) -{ - s390_virtio_fn fn; - - if (env->regs[1] < MAX_DIAG_SUBCODES) { - fn = s390_diag500_table[env->regs[1]]; - if (fn) { - env->regs[2] = fn(&env->regs[2]); - return 0; - } - } - - return -EINVAL; -} diff --git a/hw/s390x/s390-virtio-hcall.h b/hw/s390x/s390-virtio-hcall.h deleted file mode 100644 index 3ae6d6ae3a1..00000000000 --- a/hw/s390x/s390-virtio-hcall.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Support for virtio hypercalls on s390x - * - * Copyright IBM Corp. 2012, 2017 - * Author(s): Cornelia Huck - * - * This work is licensed under the terms of the GNU GPL, version 2 or (at - * your option) any later version. See the COPYING file in the top-level - * directory. - */ - -#ifndef HW_S390_VIRTIO_HCALL_H -#define HW_S390_VIRTIO_HCALL_H - -#include "standard-headers/asm-s390/virtio-ccw.h" -#include "cpu.h" - -/* The only thing that we need from the old kvm_virtio.h file */ -#define KVM_S390_VIRTIO_NOTIFY 0 - -typedef int (*s390_virtio_fn)(const uint64_t *args); -void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn); -int s390_virtio_hypercall(CPUS390XState *env); - -#endif /* HW_S390_VIRTIO_HCALL_H */ diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index e725dcd5fdf..9718564fa42 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -110,7 +110,6 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) MachineState *machine = MACHINE(qdev_get_machine()); int cpu_count; int rnsize, rnmax; - IplParameterBlock *ipib = s390_ipl_get_iplb(); int required_len = SCCB_REQ_LEN(ReadInfo, machine->possible_cpus->len); int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ? offsetof(ReadInfo, entries) : @@ -162,7 +161,11 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) read_info->rnsize2 = cpu_to_be32(rnsize); } - /* we don't support standby memory, maxram_size is never exposed */ + /* + * We don't support standby memory. maxram_size is used for sizing the + * memory device region, which is not exposed through SCLP but through + * diag500. + */ rnmax = machine->ram_size >> sclp->increment_size; if (rnmax < 0x10000) { read_info->rnmax = cpu_to_be16(rnmax); @@ -171,12 +174,8 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) read_info->rnmax2 = cpu_to_be64(rnmax); } - if (ipib && ipib->flags & DIAG308_FLAGS_LP_VALID) { - memcpy(&read_info->loadparm, &ipib->loadparm, - sizeof(read_info->loadparm)); - } else { - s390_ipl_set_loadparm(read_info->loadparm); - } + s390_ipl_convert_loadparm((char *)S390_CCW_MACHINE(machine)->loadparm, + read_info->loadparm); sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); } @@ -381,10 +380,7 @@ void sclp_service_interrupt(uint32_t sccb) /* qemu object creation and initialization functions */ static void sclp_realize(DeviceState *dev, Error **errp) { - MachineState *machine = MACHINE(qdev_get_machine()); SCLPDevice *sclp = SCLP(dev); - uint64_t hw_limit; - int ret; /* * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long @@ -394,14 +390,6 @@ static void sclp_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(sclp->event_facility), errp)) { return; } - - ret = s390_set_memory_limit(machine->maxram_size, &hw_limit); - if (ret == -E2BIG) { - error_setg(errp, "host supports a maximum of %" PRIu64 " GB", - hw_limit / GiB); - } else if (ret) { - error_setg(errp, "setting the guest size failed"); - } } static void sclp_memory_init(SCLPDevice *sclp) @@ -436,7 +424,7 @@ static void sclp_init(Object *obj) sclp_memory_init(sclp); } -static void sclp_class_init(ObjectClass *oc, void *data) +static void sclp_class_init(ObjectClass *oc, const void *data) { SCLPDeviceClass *sc = SCLP_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/s390x/sclpcpi.c b/hw/s390x/sclpcpi.c new file mode 100644 index 00000000000..7aa039d5100 --- /dev/null +++ b/hw/s390x/sclpcpi.c @@ -0,0 +1,212 @@ + /* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * SCLP event type 11 - Control-Program Identification (CPI): + * CPI is used to send program identifiers from the guest to the + * Service-Call Logical Processor (SCLP). It is not sent by the SCLP. + * + * Control-program identifiers provide data about the guest operating + * system. The control-program identifiers are: system type, system name, + * system level and sysplex name. + * + * In Linux, all the control-program identifiers are user configurable. The + * system type, system name, and sysplex name use EBCDIC characters from + * this set: capital A-Z, 0-9, $, @, #, and blank. In Linux, the system + * type, system name and sysplex name are arbitrary free-form texts. + * + * In Linux, the 8-byte hexadecimal system-level has the format + * 0x
, where: + * : is a 4-bit digit, its most significant bit indicates hypervisor use + * : is one digit that represents Linux distributions as follows + * 0: generic Linux + * 1: Red Hat Enterprise Linux + * 2: SUSE Linux Enterprise Server + * 3: Canonical Ubuntu + * 4: Fedora + * 5: openSUSE Leap + * 6: Debian GNU/Linux + * 7: Red Hat Enterprise Linux CoreOS + * : are two digits for a distribution-specific encoding of the major + * version of the distribution + *
: are two digits for a distribution-specific encoding of the minor + * version of the distribution + * : are four digits for the patch level of the distribution + * : are two digits for the major version of the kernel + * : are two digits for the minor version of the kernel + * : are two digits for the stable version of the kernel + * (e.g. 74872343805430528, when converted to hex is 0x010a000000060b00). On + * machines prior to z16, some of the values are not available to display. + * + * Sysplex refers to a cluster of logical partitions that communicates and + * co-operates with each other. + * + * The CPI feature is supported since 10.1. + * + * Copyright IBM, Corp. 2024 + * + * Authors: + * Shalini Chellathurai Saroja + * + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "hw/s390x/event-facility.h" +#include "hw/s390x/ebcdic.h" +#include "qapi/qapi-visit-machine.h" +#include "migration/vmstate.h" + +typedef struct Data { + uint8_t id_format; + uint8_t reserved0; + uint8_t system_type[8]; + uint64_t reserved1; + uint8_t system_name[8]; + uint64_t reserved2; + uint64_t system_level; + uint64_t reserved3; + uint8_t sysplex_name[8]; + uint8_t reserved4[16]; +} QEMU_PACKED Data; + +typedef struct ControlProgramIdMsg { + EventBufferHeader ebh; + Data data; +} QEMU_PACKED ControlProgramIdMsg; + +static bool can_handle_event(uint8_t type) +{ + return type == SCLP_EVENT_CTRL_PGM_ID; +} + +static sccb_mask_t send_mask(void) +{ + return 0; +} + +/* Enable SCLP to accept buffers of event type CPI from the control-program. */ +static sccb_mask_t receive_mask(void) +{ + return SCLP_EVENT_MASK_CTRL_PGM_ID; +} + +static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr) +{ + ControlProgramIdMsg *cpim = container_of(evt_buf_hdr, ControlProgramIdMsg, + ebh); + SCLPEventCPI *e = SCLP_EVENT_CPI(event); + + ascii_put(e->system_type, (char *)cpim->data.system_type, + sizeof(cpim->data.system_type)); + ascii_put(e->system_name, (char *)cpim->data.system_name, + sizeof(cpim->data.system_name)); + ascii_put(e->sysplex_name, (char *)cpim->data.sysplex_name, + sizeof(cpim->data.sysplex_name)); + e->system_level = ldq_be_p(&cpim->data.system_level); + e->timestamp = qemu_clock_get_ns(QEMU_CLOCK_HOST); + + cpim->ebh.flags = SCLP_EVENT_BUFFER_ACCEPTED; + return SCLP_RC_NORMAL_COMPLETION; +} + +static char *get_system_type(Object *obj, Error **errp) +{ + SCLPEventCPI *e = SCLP_EVENT_CPI(obj); + + return g_strndup((char *) e->system_type, sizeof(e->system_type)); +} + +static char *get_system_name(Object *obj, Error **errp) +{ + SCLPEventCPI *e = SCLP_EVENT_CPI(obj); + + return g_strndup((char *) e->system_name, sizeof(e->system_name)); +} + +static char *get_sysplex_name(Object *obj, Error **errp) +{ + SCLPEventCPI *e = SCLP_EVENT_CPI(obj); + + return g_strndup((char *) e->sysplex_name, sizeof(e->sysplex_name)); +} + +static void get_system_level(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SCLPEventCPI *e = SCLP_EVENT_CPI(obj); + + visit_type_uint64(v, name, &e->system_level, errp); +} + +static void get_timestamp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SCLPEventCPI *e = SCLP_EVENT_CPI(obj); + + visit_type_uint64(v, name, &e->timestamp, errp); +} + +static const VMStateDescription vmstate_sclpcpi = { + .name = "s390_control_program_id", + .version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_UINT8_ARRAY(system_type, SCLPEventCPI, 8), + VMSTATE_UINT8_ARRAY(system_name, SCLPEventCPI, 8), + VMSTATE_UINT64(system_level, SCLPEventCPI), + VMSTATE_UINT8_ARRAY(sysplex_name, SCLPEventCPI, 8), + VMSTATE_UINT64(timestamp, SCLPEventCPI), + VMSTATE_END_OF_LIST() + } +}; + +static void cpi_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCLPEventClass *k = SCLP_EVENT_CLASS(klass); + + dc->user_creatable = false; + dc->vmsd = &vmstate_sclpcpi; + + k->can_handle_event = can_handle_event; + k->get_send_mask = send_mask; + k->get_receive_mask = receive_mask; + k->write_event_data = write_event_data; + + object_class_property_add_str(klass, "system_type", get_system_type, NULL); + object_class_property_set_description(klass, "system_type", + "operating system e.g. \"LINUX \""); + + object_class_property_add_str(klass, "system_name", get_system_name, NULL); + object_class_property_set_description(klass, "system_name", + "user configurable name of the VM e.g. \"TESTVM \""); + + object_class_property_add_str(klass, "sysplex_name", get_sysplex_name, + NULL); + object_class_property_set_description(klass, "sysplex_name", + "name of the cluster which the VM belongs to, if any" + " e.g. \"PLEX \""); + + object_class_property_add(klass, "system_level", "uint64", get_system_level, + NULL, NULL, NULL); + object_class_property_set_description(klass, "system_level", + "distribution and kernel version in Linux e.g. 74872343805430528"); + + object_class_property_add(klass, "timestamp", "uint64", get_timestamp, + NULL, NULL, NULL); + object_class_property_set_description(klass, "timestamp", + "latest update of CPI data in nanoseconds since the UNIX EPOCH"); +} + +static const TypeInfo sclp_cpi_info = { + .name = TYPE_SCLP_EVENT_CPI, + .parent = TYPE_SCLP_EVENT, + .instance_size = sizeof(SCLPEventCPI), + .class_init = cpi_class_init, +}; + +static void sclp_cpi_register_types(void) +{ + type_register_static(&sclp_cpi_info); +} + +type_init(sclp_cpi_register_types) diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c index fa79891f5a4..4b6ebfed469 100644 --- a/hw/s390x/sclpcpu.c +++ b/hw/s390x/sclpcpu.c @@ -17,7 +17,7 @@ #include "hw/s390x/sclp.h" #include "qemu/module.h" #include "hw/s390x/event-facility.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" typedef struct ConfigMgtData { EventBufferHeader ebh; @@ -73,7 +73,7 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, return 1; } -static void sclp_cpu_class_init(ObjectClass *oc, void *data) +static void sclp_cpu_class_init(ObjectClass *oc, const void *data) { SCLPEventClass *k = SCLP_EVENT_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c index 14936aa94ba..da4c8f3e369 100644 --- a/hw/s390x/sclpquiesce.c +++ b/hw/s390x/sclpquiesce.c @@ -16,7 +16,7 @@ #include "hw/s390x/sclp.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/s390x/event-facility.h" typedef struct SignalQuiesce { @@ -112,12 +112,12 @@ static void quiesce_reset(DeviceState *dev) event->event_pending = false; } -static void quiesce_class_init(ObjectClass *klass, void *data) +static void quiesce_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCLPEventClass *k = SCLP_EVENT_CLASS(klass); - dc->reset = quiesce_reset; + device_class_set_legacy_reset(dc, quiesce_reset); dc->vmsd = &vmstate_sclpquiesce; set_bit(DEVICE_CATEGORY_MISC, dc->categories); /* diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c index 9588b90f2b9..c9b8896b638 100644 --- a/hw/s390x/tod-kvm.c +++ b/hw/s390x/tod-kvm.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/s390x/tod.h" #include "target/s390x/kvm/pv.h" #include "kvm/kvm_s390x.h" @@ -133,7 +133,7 @@ static void kvm_s390_tod_realize(DeviceState *dev, Error **errp) qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td); } -static void kvm_s390_tod_class_init(ObjectClass *oc, void *data) +static void kvm_s390_tod_class_init(ObjectClass *oc, const void *data) { S390TODClass *tdc = S390_TOD_CLASS(oc); diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c index 2d540dba65a..0cc96624e1b 100644 --- a/hw/s390x/tod-tcg.c +++ b/hw/s390x/tod-tcg.c @@ -16,7 +16,7 @@ #include "qemu/module.h" #include "cpu.h" #include "tcg/tcg_s390x.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) @@ -52,7 +52,7 @@ static void qemu_s390_tod_set(S390TODState *td, const S390TOD *tod, } } -static void qemu_s390_tod_class_init(ObjectClass *oc, void *data) +static void qemu_s390_tod_class_init(ObjectClass *oc, const void *data) { S390TODClass *tdc = S390_TOD_CLASS(oc); diff --git a/hw/s390x/tod.c b/hw/s390x/tod.c index c81b1c03383..3f913cc88ab 100644 --- a/hw/s390x/tod.c +++ b/hw/s390x/tod.c @@ -13,9 +13,9 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" -#include "sysemu/qtest.h" +#include "system/kvm.h" +#include "system/tcg.h" +#include "system/qtest.h" #include "migration/qemu-file-types.h" #include "migration/register.h" @@ -111,7 +111,7 @@ static void s390_tod_realize(DeviceState *dev, Error **errp) register_savevm_live("todclock", 0, 1, &savevm_tod, td); } -static void s390_tod_class_init(ObjectClass *oc, void *data) +static void s390_tod_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/s390x/vhost-scsi-ccw.c b/hw/s390x/vhost-scsi-ccw.c index 40dc14bbc71..8341b23a95c 100644 --- a/hw/s390x/vhost-scsi-ccw.c +++ b/hw/s390x/vhost-scsi-ccw.c @@ -41,13 +41,12 @@ static void vhost_ccw_scsi_instance_init(Object *obj) TYPE_VHOST_SCSI); } -static Property vhost_ccw_scsi_properties[] = { +static const Property vhost_ccw_scsi_properties[] = { DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data) +static void vhost_ccw_scsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c index 6c6f2692930..cc1b8227fc0 100644 --- a/hw/s390x/vhost-user-fs-ccw.c +++ b/hw/s390x/vhost-user-fs-ccw.c @@ -23,12 +23,11 @@ typedef struct VHostUserFSCcw { OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW) -static Property vhost_user_fs_ccw_properties[] = { +static const Property vhost_user_fs_ccw_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) @@ -49,7 +48,7 @@ static void vhost_user_fs_ccw_instance_init(Object *obj) TYPE_VHOST_USER_FS); } -static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data) +static void vhost_user_fs_ccw_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/vhost-vsock-ccw.c b/hw/s390x/vhost-vsock-ccw.c index 07845a9a006..552e9e86a4b 100644 --- a/hw/s390x/vhost-vsock-ccw.c +++ b/hw/s390x/vhost-vsock-ccw.c @@ -22,10 +22,9 @@ struct VHostVSockCCWState { VHostVSock vdev; }; -static Property vhost_vsock_ccw_properties[] = { +static const Property vhost_vsock_ccw_properties[] = { DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) @@ -36,7 +35,7 @@ static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) qdev_realize(vdev, BUS(&ccw_dev->bus), errp); } -static void vhost_vsock_ccw_class_init(ObjectClass *klass, void *data) +static void vhost_vsock_ccw_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-9p.c b/hw/s390x/virtio-ccw-9p.c index 6f931f5994c..72bf6ec80c8 100644 --- a/hw/s390x/virtio-ccw-9p.c +++ b/hw/s390x/virtio-ccw-9p.c @@ -41,15 +41,14 @@ static void virtio_ccw_9p_instance_init(Object *obj) TYPE_VIRTIO_9P); } -static Property virtio_ccw_9p_properties[] = { +static const Property virtio_ccw_9p_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_9p_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-balloon.c b/hw/s390x/virtio-ccw-balloon.c index 44287b9bbec..399b40f366e 100644 --- a/hw/s390x/virtio-ccw-balloon.c +++ b/hw/s390x/virtio-ccw-balloon.c @@ -46,15 +46,14 @@ static void virtio_ccw_balloon_instance_init(Object *obj) "guest-stats-polling-interval"); } -static Property virtio_ccw_balloon_properties[] = { +static const Property virtio_ccw_balloon_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_balloon_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-blk.c b/hw/s390x/virtio-ccw-blk.c index 8e0e58b77d8..7d8c4a75ced 100644 --- a/hw/s390x/virtio-ccw-blk.c +++ b/hw/s390x/virtio-ccw-blk.c @@ -43,15 +43,15 @@ static void virtio_ccw_blk_instance_init(Object *obj) "bootindex"); } -static Property virtio_ccw_blk_properties[] = { +static const Property virtio_ccw_blk_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm), }; -static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_blk_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-crypto.c b/hw/s390x/virtio-ccw-crypto.c index 0fa2f894439..75e714603b1 100644 --- a/hw/s390x/virtio-ccw-crypto.c +++ b/hw/s390x/virtio-ccw-crypto.c @@ -44,15 +44,14 @@ static void virtio_ccw_crypto_instance_init(Object *obj) TYPE_VIRTIO_CRYPTO); } -static Property virtio_ccw_crypto_properties[] = { +static const Property virtio_ccw_crypto_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_crypto_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_crypto_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-gpu.c b/hw/s390x/virtio-ccw-gpu.c index 0642c5281d9..edb6a47d376 100644 --- a/hw/s390x/virtio-ccw-gpu.c +++ b/hw/s390x/virtio-ccw-gpu.c @@ -42,15 +42,14 @@ static void virtio_ccw_gpu_instance_init(Object *obj) TYPE_VIRTIO_GPU); } -static Property virtio_ccw_gpu_properties[] = { +static const Property virtio_ccw_gpu_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_gpu_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_gpu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-input.c b/hw/s390x/virtio-ccw-input.c index 61a07ba38d0..2250d8cf981 100644 --- a/hw/s390x/virtio-ccw-input.c +++ b/hw/s390x/virtio-ccw-input.c @@ -43,15 +43,14 @@ static void virtio_ccw_input_realize(VirtioCcwDevice *ccw_dev, Error **errp) qdev_realize(vdev, BUS(&ccw_dev->bus), errp); } -static Property virtio_ccw_input_properties[] = { +static const Property virtio_ccw_input_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_input_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_input_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-md-stubs.c b/hw/s390x/virtio-ccw-md-stubs.c new file mode 100644 index 00000000000..e9378655503 --- /dev/null +++ b/hw/s390x/virtio-ccw-md-stubs.c @@ -0,0 +1,24 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/s390x/virtio-ccw-md.h" + +void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + error_setg(errp, "virtio based memory devices not supported"); +} + +void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + error_setg(errp, "virtio based memory devices not supported"); +} + +void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms, + Error **errp) +{ + error_setg(errp, "virtio based memory devices not supported"); +} + +void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + error_setg(errp, "virtio based memory devices not supported"); +} diff --git a/hw/s390x/virtio-ccw-md.c b/hw/s390x/virtio-ccw-md.c new file mode 100644 index 00000000000..0370f584505 --- /dev/null +++ b/hw/s390x/virtio-ccw-md.c @@ -0,0 +1,153 @@ +/* + * Virtio CCW support for abstract virtio based memory device + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/s390x/virtio-ccw-md.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + +void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + DeviceState *dev = DEVICE(vmd); + HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev); + MemoryDeviceState *md = MEMORY_DEVICE(vmd); + Error *local_err = NULL; + + if (!bus_handler && dev->hotplugged) { + /* + * Without a bus hotplug handler, we cannot control the plug/unplug + * order. We should never reach this point when hotplugging, but + * better add a safety net. + */ + error_setg(errp, "hotplug of virtio based memory devices not supported" + " on this bus."); + return; + } + + /* + * First, see if we can plug this memory device at all. If that + * succeeds, branch of to the actual hotplug handler. + */ + memory_device_pre_plug(md, ms, &local_err); + if (!local_err && bus_handler) { + hotplug_handler_pre_plug(bus_handler, dev, &local_err); + } + error_propagate(errp, local_err); +} + +void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + DeviceState *dev = DEVICE(vmd); + HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev); + MemoryDeviceState *md = MEMORY_DEVICE(vmd); + Error *local_err = NULL; + + /* + * Plug the memory device first and then branch off to the actual + * hotplug handler. If that one fails, we can easily undo the memory + * device bits. + */ + memory_device_plug(md, ms); + if (bus_handler) { + hotplug_handler_plug(bus_handler, dev, &local_err); + if (local_err) { + memory_device_unplug(md, ms); + } + } + error_propagate(errp, local_err); +} + +void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms, + Error **errp) +{ + VirtIOMDCcwClass *vmdc = VIRTIO_MD_CCW_GET_CLASS(vmd); + DeviceState *dev = DEVICE(vmd); + HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev); + HotplugHandlerClass *hdc; + Error *local_err = NULL; + + if (!vmdc->unplug_request_check) { + error_setg(errp, + "this virtio based memory devices cannot be unplugged"); + return; + } + + if (!bus_handler) { + error_setg(errp, "hotunplug of virtio based memory devices not" + "supported on this bus"); + return; + } + + vmdc->unplug_request_check(vmd, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * Forward the async request or turn it into a sync request (handling it + * like qdev_unplug()). + */ + hdc = HOTPLUG_HANDLER_GET_CLASS(bus_handler); + if (hdc->unplug_request) { + hotplug_handler_unplug_request(bus_handler, dev, &local_err); + } else { + virtio_ccw_md_unplug(vmd, ms, &local_err); + if (!local_err) { + object_unparent(OBJECT(dev)); + } + } +} + +void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp) +{ + DeviceState *dev = DEVICE(vmd); + HotplugHandler *bus_handler = qdev_get_bus_hotplug_handler(dev); + MemoryDeviceState *md = MEMORY_DEVICE(vmd); + Error *local_err = NULL; + + /* Unplug the memory device while it is still realized. */ + memory_device_unplug(md, ms); + + if (bus_handler) { + hotplug_handler_unplug(bus_handler, dev, &local_err); + if (local_err) { + /* Not expected to fail ... but still try to recover. */ + memory_device_plug(md, ms); + error_propagate(errp, local_err); + return; + } + } else { + /* Very unexpected, but let's just try to do the right thing. */ + warn_report("Unexpected unplug of virtio based memory device"); + qdev_unrealize(dev); + } +} + +static const TypeInfo virtio_ccw_md_info = { + .name = TYPE_VIRTIO_MD_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOMDCcw), + .class_size = sizeof(VirtIOMDCcwClass), + .abstract = true, + .interfaces = (const InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void virtio_ccw_md_register(void) +{ + type_register_static(&virtio_ccw_md_info); +} +type_init(virtio_ccw_md_register) diff --git a/hw/s390x/virtio-ccw-md.h b/hw/s390x/virtio-ccw-md.h new file mode 100644 index 00000000000..39ba864c927 --- /dev/null +++ b/hw/s390x/virtio-ccw-md.h @@ -0,0 +1,44 @@ +/* + * Virtio CCW support for abstract virtio based memory device + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_S390X_VIRTIO_CCW_MD_H +#define HW_S390X_VIRTIO_CCW_MD_H + +#include "virtio-ccw.h" +#include "qom/object.h" + +/* + * virtio-md-ccw: This extends VirtioCcwDevice. + */ +#define TYPE_VIRTIO_MD_CCW "virtio-md-ccw" + +OBJECT_DECLARE_TYPE(VirtIOMDCcw, VirtIOMDCcwClass, VIRTIO_MD_CCW) + +struct VirtIOMDCcwClass { + /* private */ + VirtIOCCWDeviceClass parent; + + /* public */ + void (*unplug_request_check)(VirtIOMDCcw *vmd, Error **errp); +}; + +struct VirtIOMDCcw { + VirtioCcwDevice parent_obj; +}; + +void virtio_ccw_md_pre_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp); +void virtio_ccw_md_plug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp); +void virtio_ccw_md_unplug_request(VirtIOMDCcw *vmd, MachineState *ms, + Error **errp); +void virtio_ccw_md_unplug(VirtIOMDCcw *vmd, MachineState *ms, Error **errp); + +#endif /* HW_S390X_VIRTIO_CCW_MD_H */ diff --git a/hw/s390x/virtio-ccw-mem.c b/hw/s390x/virtio-ccw-mem.c new file mode 100644 index 00000000000..daa485d189f --- /dev/null +++ b/hw/s390x/virtio-ccw-mem.c @@ -0,0 +1,225 @@ +/* + * virtio-mem CCW implementation + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw-mem.h" +#include "hw/mem/memory-device.h" +#include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-misc.h" + +static void virtio_ccw_mem_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_mem_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP, addr, errp); +} + +static uint64_t virtio_ccw_mem_get_addr(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP, + &error_abort); +} + +static MemoryRegion *virtio_ccw_mem_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md); + VirtIOMEM *vmem = &dev->vdev; + VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem); + + return vmc->get_memory_region(vmem, errp); +} + +static void virtio_ccw_mem_decide_memslots(MemoryDeviceState *md, + unsigned int limit) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md); + VirtIOMEM *vmem = VIRTIO_MEM(&dev->vdev); + VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem); + + vmc->decide_memslots(vmem, limit); +} + +static unsigned int virtio_ccw_mem_get_memslots(MemoryDeviceState *md) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md); + VirtIOMEM *vmem = VIRTIO_MEM(&dev->vdev); + VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem); + + return vmc->get_memslots(vmem); +} + +static uint64_t virtio_ccw_mem_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_SIZE_PROP, + errp); +} + +static void virtio_ccw_mem_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + VirtioMEMDeviceInfo *vi = g_new0(VirtioMEMDeviceInfo, 1); + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(md); + VirtIOMEM *vmem = &dev->vdev; + VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem); + DeviceState *vdev = DEVICE(md); + + if (vdev->id) { + vi->id = g_strdup(vdev->id); + } + + /* let the real device handle everything else */ + vpc->fill_device_info(vmem, vi); + + info->u.virtio_mem.data = vi; + info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM; +} + +static uint64_t virtio_ccw_mem_get_min_alignment(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_BLOCK_SIZE_PROP, + &error_abort); +} + +static void virtio_ccw_mem_size_change_notify(Notifier *notifier, void *data) +{ + VirtIOMEMCcw *dev = container_of(notifier, VirtIOMEMCcw, + size_change_notifier); + DeviceState *vdev = DEVICE(dev); + char *qom_path = object_get_canonical_path(OBJECT(dev)); + const uint64_t * const size_p = data; + + qapi_event_send_memory_device_size_change(vdev->id, *size_p, qom_path); + g_free(qom_path); +} + +static void virtio_ccw_mem_unplug_request_check(VirtIOMDCcw *vmd, Error **errp) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(vmd); + VirtIOMEM *vmem = &dev->vdev; + VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem); + + vpc->unplug_request_check(vmem, errp); +} + +static void virtio_ccw_mem_get_requested_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj); + + object_property_get(OBJECT(&dev->vdev), name, v, errp); +} + +static void virtio_ccw_mem_set_requested_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj); + DeviceState *vdev = DEVICE(obj); + + /* + * If we passed virtio_ccw_mem_unplug_request_check(), making sure that + * the requested size is 0, don't allow modifying the requested size + * anymore, otherwise the VM might end up hotplugging memory before + * handling the unplug request. + */ + if (vdev->pending_deleted_event) { + error_setg(errp, "'%s' cannot be changed if the device is in the" + " process of unplug", name); + return; + } + + object_property_set(OBJECT(&dev->vdev), name, v, errp); +} + +static const Property virtio_ccw_mem_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), +}; + +static void virtio_ccw_mem_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass); + VirtIOMDCcwClass *vmdc = VIRTIO_MD_CCW_CLASS(klass); + + k->realize = virtio_ccw_mem_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, virtio_ccw_mem_properties); + + mdc->get_addr = virtio_ccw_mem_get_addr; + mdc->set_addr = virtio_ccw_mem_set_addr; + mdc->get_plugged_size = virtio_ccw_mem_get_plugged_size; + mdc->get_memory_region = virtio_ccw_mem_get_memory_region; + mdc->decide_memslots = virtio_ccw_mem_decide_memslots; + mdc->get_memslots = virtio_ccw_mem_get_memslots; + mdc->fill_device_info = virtio_ccw_mem_fill_device_info; + mdc->get_min_alignment = virtio_ccw_mem_get_min_alignment; + + vmdc->unplug_request_check = virtio_ccw_mem_unplug_request_check; +} + +static void virtio_ccw_mem_instance_init(Object *obj) +{ + VirtIOMEMCcw *dev = VIRTIO_MEM_CCW(obj); + VirtIOMEMClass *vmc; + VirtIOMEM *vmem; + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MEM); + + dev->size_change_notifier.notify = virtio_ccw_mem_size_change_notify; + vmem = &dev->vdev; + vmc = VIRTIO_MEM_GET_CLASS(vmem); + /* + * We never remove the notifier again, as we expect both devices to + * disappear at the same time. + */ + vmc->add_size_change_notifier(vmem, &dev->size_change_notifier); + + object_property_add_alias(obj, VIRTIO_MEM_BLOCK_SIZE_PROP, + OBJECT(&dev->vdev), VIRTIO_MEM_BLOCK_SIZE_PROP); + object_property_add_alias(obj, VIRTIO_MEM_SIZE_PROP, OBJECT(&dev->vdev), + VIRTIO_MEM_SIZE_PROP); + object_property_add(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, "size", + virtio_ccw_mem_get_requested_size, + virtio_ccw_mem_set_requested_size, NULL, NULL); +} + +static const TypeInfo virtio_ccw_mem = { + .name = TYPE_VIRTIO_MEM_CCW, + .parent = TYPE_VIRTIO_MD_CCW, + .instance_size = sizeof(VirtIOMEMCcw), + .instance_init = virtio_ccw_mem_instance_init, + .class_init = virtio_ccw_mem_class_init, +}; + +static void virtio_ccw_mem_register_types(void) +{ + type_register_static(&virtio_ccw_mem); +} +type_init(virtio_ccw_mem_register_types) diff --git a/hw/s390x/virtio-ccw-mem.h b/hw/s390x/virtio-ccw-mem.h new file mode 100644 index 00000000000..738ab2c744b --- /dev/null +++ b/hw/s390x/virtio-ccw-mem.h @@ -0,0 +1,34 @@ +/* + * Virtio MEM CCW device + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_S390X_VIRTIO_CCW_MEM_H +#define HW_S390X_VIRTIO_CCW_MEM_H + +#include "virtio-ccw-md.h" +#include "hw/virtio/virtio-mem.h" +#include "qom/object.h" + +typedef struct VirtIOMEMCcw VirtIOMEMCcw; + +/* + * virtio-mem-ccw: This extends VirtIOMDCcw + */ +#define TYPE_VIRTIO_MEM_CCW "virtio-mem-ccw" +DECLARE_INSTANCE_CHECKER(VirtIOMEMCcw, VIRTIO_MEM_CCW, TYPE_VIRTIO_MEM_CCW) + +struct VirtIOMEMCcw { + VirtIOMDCcw parent_obj; + VirtIOMEM vdev; + Notifier size_change_notifier; +}; + +#endif /* HW_S390X_VIRTIO_CCW_MEM_H */ diff --git a/hw/s390x/virtio-ccw-net.c b/hw/s390x/virtio-ccw-net.c index 484e6176599..a7d4afbeb9e 100644 --- a/hw/s390x/virtio-ccw-net.c +++ b/hw/s390x/virtio-ccw-net.c @@ -46,15 +46,15 @@ static void virtio_ccw_net_instance_init(Object *obj) "bootindex"); } -static Property virtio_ccw_net_properties[] = { +static const Property virtio_ccw_net_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm), }; -static void virtio_ccw_net_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_net_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-rng.c b/hw/s390x/virtio-ccw-rng.c index a3fffb51384..3263287d45a 100644 --- a/hw/s390x/virtio-ccw-rng.c +++ b/hw/s390x/virtio-ccw-rng.c @@ -43,15 +43,14 @@ static void virtio_ccw_rng_instance_init(Object *obj) TYPE_VIRTIO_RNG); } -static Property virtio_ccw_rng_properties[] = { +static const Property virtio_ccw_rng_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_rng_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-scsi.c b/hw/s390x/virtio-ccw-scsi.c index d003f89f437..06b4c6c4a5a 100644 --- a/hw/s390x/virtio-ccw-scsi.c +++ b/hw/s390x/virtio-ccw-scsi.c @@ -53,15 +53,14 @@ static void virtio_ccw_scsi_instance_init(Object *obj) TYPE_VIRTIO_SCSI); } -static Property virtio_ccw_scsi_properties[] = { +static const Property virtio_ccw_scsi_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_scsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw-serial.c b/hw/s390x/virtio-ccw-serial.c index 8f8d2302f81..0dac590c080 100644 --- a/hw/s390x/virtio-ccw-serial.c +++ b/hw/s390x/virtio-ccw-serial.c @@ -53,15 +53,14 @@ static void virtio_ccw_serial_instance_init(Object *obj) TYPE_VIRTIO_SERIAL); } -static Property virtio_ccw_serial_properties[] = { +static const Property virtio_ccw_serial_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, VIRTIO_CCW_MAX_REV), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_serial_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index b4676909dd6..d2f85b39f30 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -12,8 +12,8 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/address-spaces.h" -#include "sysemu/kvm.h" +#include "system/address-spaces.h" +#include "system/kvm.h" #include "net/net.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" @@ -32,7 +32,7 @@ #include "trace.h" #include "hw/s390x/css-bridge.h" #include "hw/s390x/s390-virtio-ccw.h" -#include "sysemu/replay.h" +#include "system/replay.h" #define NR_CLASSIC_INDICATOR_BITS 64 @@ -913,14 +913,15 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector) } } -static void virtio_ccw_reset(DeviceState *d) +static void virtio_ccw_reset_hold(Object *obj, ResetType type) { - VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(obj); VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); virtio_ccw_reset_virtio(dev); - if (vdc->parent_reset) { - vdc->parent_reset(d); + + if (vdc->parent_phases.hold) { + vdc->parent_phases.hold(obj, type); } } @@ -1156,7 +1157,6 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) CcwDevice *ccw_dev = CCW_DEVICE(d); SubchDev *sch = ccw_dev->sch; int n = virtio_get_num_queues(vdev); - S390FLICState *flic = s390_get_flic(); if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { dev->max_rev = 0; @@ -1183,10 +1183,10 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) VIRTIO_QUEUE_MAX); return; } - if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { + if (virtio_get_num_queues(vdev) > ADAPTER_ROUTES_MAX_GSI) { error_setg(errp, "The number of virtqueues %d " "exceeds flic adapter route limit %d", n, - flic->adapter_routes_max_batch); + ADAPTER_ROUTES_MAX_GSI); return; } @@ -1228,16 +1228,18 @@ static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, virtio_ccw_stop_ioeventfd(_dev); } -static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); k->unplug = virtio_ccw_busdev_unplug; dc->realize = virtio_ccw_busdev_realize; dc->unrealize = virtio_ccw_busdev_unrealize; - device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, virtio_ccw_reset_hold, NULL, + &vdc->parent_phases); } static const TypeInfo virtio_ccw_device_info = { @@ -1260,7 +1262,7 @@ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name); } -static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) +static void virtio_ccw_bus_class_init(ObjectClass *klass, const void *data) { VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); BusClass *bus_class = BUS_CLASS(klass); diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h index fac186c8f64..c7a830a1944 100644 --- a/hw/s390x/virtio-ccw.h +++ b/hw/s390x/virtio-ccw.h @@ -57,7 +57,7 @@ struct VirtIOCCWDeviceClass { CCWDeviceClass parent_class; void (*realize)(VirtioCcwDevice *dev, Error **errp); void (*unrealize)(VirtioCcwDevice *dev); - void (*parent_reset)(DeviceState *dev); + ResettablePhases parent_phases; }; /* Performance improves when virtqueue kick processing is decoupled from the diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 42d9d2e4835..12c86eb7aaa 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -427,7 +427,7 @@ static void esp_pci_init(Object *obj) object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP); } -static void esp_pci_class_init(ObjectClass *klass, void *data) +static void esp_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -440,7 +440,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_STORAGE_SCSI; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter"; - dc->reset = esp_pci_hard_reset; + device_class_set_legacy_reset(dc, esp_pci_hard_reset); dc->vmsd = &vmstate_esp_pci_scsi; } @@ -450,7 +450,7 @@ static const TypeInfo esp_pci_info = { .instance_init = esp_pci_init, .instance_size = sizeof(PCIESPState), .class_init = esp_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -557,7 +557,7 @@ static void dc390_scsi_realize(PCIDevice *dev, Error **errp) contents[EE_CHKSUM2] = chksum >> 8; } -static void dc390_class_init(ObjectClass *klass, void *data) +static void dc390_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index b7af8256232..1d264c40e57 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -242,10 +242,7 @@ static uint32_t esp_get_stc(ESPState *s) static uint8_t esp_pdma_read(ESPState *s) { - uint8_t val; - - val = esp_fifo_pop(s); - return val; + return esp_fifo_pop(s); } static void esp_pdma_write(ESPState *s, uint8_t val) @@ -278,6 +275,7 @@ static int esp_select(ESPState *s) if (!s->current_dev) { /* No such drive */ s->rregs[ESP_RSTAT] = 0; + s->asc_mode = ESP_ASC_MODE_DIS; s->rregs[ESP_RINTR] = INTR_DC; esp_raise_irq(s); return -1; @@ -287,6 +285,7 @@ static int esp_select(ESPState *s) * Note that we deliberately don't raise the IRQ here: this will be done * either in esp_transfer_data() or esp_command_complete() */ + s->asc_mode = ESP_ASC_MODE_INI; return 0; } @@ -311,6 +310,7 @@ static void do_command_phase(ESPState *s) if (!current_lun) { /* No such drive */ s->rregs[ESP_RSTAT] = 0; + s->asc_mode = ESP_ASC_MODE_DIS; s->rregs[ESP_RINTR] = INTR_DC; s->rregs[ESP_RSEQ] = SEQ_0; esp_raise_irq(s); @@ -490,8 +490,10 @@ static void esp_do_dma(ESPState *s) case STAT_MO: if (s->dma_memory_read) { len = MIN(len, fifo8_num_free(&s->cmdfifo)); - s->dma_memory_read(s->dma_opaque, buf, len); - esp_set_tc(s, esp_get_tc(s) - len); + if (len) { + s->dma_memory_read(s->dma_opaque, buf, len); + esp_set_tc(s, esp_get_tc(s) - len); + } } else { len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); len = MIN(fifo8_num_free(&s->cmdfifo), len); @@ -544,9 +546,11 @@ static void esp_do_dma(ESPState *s) trace_esp_do_dma(cmdlen, len); if (s->dma_memory_read) { len = MIN(len, fifo8_num_free(&s->cmdfifo)); - s->dma_memory_read(s->dma_opaque, buf, len); - fifo8_push_all(&s->cmdfifo, buf, len); - esp_set_tc(s, esp_get_tc(s) - len); + if (len) { + s->dma_memory_read(s->dma_opaque, buf, len); + fifo8_push_all(&s->cmdfifo, buf, len); + esp_set_tc(s, esp_get_tc(s) - len); + } } else { len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); len = MIN(fifo8_num_free(&s->cmdfifo), len); @@ -575,8 +579,10 @@ static void esp_do_dma(ESPState *s) switch (s->rregs[ESP_CMD]) { case CMD_TI | CMD_DMA: if (s->dma_memory_read) { - s->dma_memory_read(s->dma_opaque, s->async_buf, len); - esp_set_tc(s, esp_get_tc(s) - len); + if (len) { + s->dma_memory_read(s->dma_opaque, s->async_buf, len); + esp_set_tc(s, esp_get_tc(s) - len); + } } else { /* Copy FIFO data to device */ len = MIN(s->async_len, ESP_FIFO_SZ); @@ -628,7 +634,9 @@ static void esp_do_dma(ESPState *s) switch (s->rregs[ESP_CMD]) { case CMD_TI | CMD_DMA: if (s->dma_memory_write) { - s->dma_memory_write(s->dma_opaque, s->async_buf, len); + if (len) { + s->dma_memory_write(s->dma_opaque, s->async_buf, len); + } } else { /* Copy device data to FIFO */ len = MIN(len, fifo8_num_free(&s->fifo)); @@ -678,6 +686,7 @@ static void esp_do_dma(ESPState *s) buf[0] = s->status; if (s->dma_memory_write) { + /* Length already non-zero */ s->dma_memory_write(s->dma_opaque, buf, len); } else { esp_fifo_push_buf(s, buf, len); @@ -712,6 +721,7 @@ static void esp_do_dma(ESPState *s) buf[0] = 0; if (s->dma_memory_write) { + /* Length already non-zero */ s->dma_memory_write(s->dma_opaque, buf, len); } else { esp_fifo_push_buf(s, buf, len); @@ -1015,6 +1025,7 @@ void esp_transfer_data(SCSIRequest *req, uint32_t len) */ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; + esp_raise_irq(s); break; case CMD_SELATNS | CMD_DMA: @@ -1025,20 +1036,21 @@ void esp_transfer_data(SCSIRequest *req, uint32_t len) */ s->rregs[ESP_RINTR] |= INTR_BS; s->rregs[ESP_RSEQ] = SEQ_MO; + esp_raise_irq(s); break; case CMD_TI | CMD_DMA: case CMD_TI: /* - * Bus service interrupt raised because of initial change to - * DATA phase + * If the final COMMAND phase data was transferred using a TI + * command, clear ESP_CMD to terminate the TI command and raise + * the completion interrupt */ s->rregs[ESP_CMD] = 0; s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); break; } - - esp_raise_irq(s); } /* @@ -1093,6 +1105,7 @@ void esp_hard_reset(ESPState *s) fifo8_reset(&s->cmdfifo); s->dma = 0; s->dma_cb = NULL; + s->asc_mode = ESP_ASC_MODE_DIS; s->rregs[ESP_CFG1] = 7; } @@ -1116,6 +1129,38 @@ static void parent_esp_reset(ESPState *s, int irq, int level) } } +static bool esp_cmd_is_valid(ESPState *s, uint8_t cmd) +{ + uint8_t cmd_group = (cmd & CMD_GRP_MASK) >> 4; + + /* Always allow misc commands */ + if (cmd_group == CMD_GRP_MISC) { + return true; + } + + switch (s->asc_mode) { + case ESP_ASC_MODE_DIS: + /* Disconnected mode: only allow disconnected commands */ + if (cmd_group == CMD_GRP_DISC) { + return true; + } + break; + + case ESP_ASC_MODE_INI: + /* Initiator mode: allow initiator commands */ + if (cmd_group == CMD_GRP_INIT) { + return true; + } + break; + + default: + g_assert_not_reached(); + } + + trace_esp_invalid_cmd(cmd, s->asc_mode); + return false; +} + static void esp_run_cmd(ESPState *s) { uint8_t cmd = s->rregs[ESP_CMD]; @@ -1161,6 +1206,7 @@ static void esp_run_cmd(ESPState *s) break; case CMD_MSGACC: trace_esp_mem_writeb_cmd_msgacc(cmd); + s->asc_mode = ESP_ASC_MODE_DIS; s->rregs[ESP_RINTR] |= INTR_DC; s->rregs[ESP_RSEQ] = 0; s->rregs[ESP_RFLAGS] = 0; @@ -1271,6 +1317,11 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) break; case ESP_CMD: s->rregs[saddr] = val; + if (!esp_cmd_is_valid(s, s->rregs[saddr])) { + s->rregs[ESP_RSTAT] |= INTR_IL; + esp_raise_irq(s); + break; + } esp_run_cmd(s); break; case ESP_WBUSID ... ESP_WSYNO: @@ -1328,6 +1379,14 @@ static bool esp_is_between_version_5_and_6(void *opaque, int version_id) return version_id >= 5 && version_id <= 6; } +static bool esp_is_version_8(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + + version_id = MIN(version_id, s->mig_version_id); + return version_id >= 8; +} + int esp_pre_save(void *opaque) { ESPState *s = ESP(object_resolve_path_component( @@ -1359,13 +1418,18 @@ static int esp_post_load(void *opaque, int version_id) } } + if (version_id < 8) { + /* Assume initiator mode to allow all commands to continue */ + s->asc_mode = ESP_ASC_MODE_INI; + } + s->mig_version_id = vmstate_esp.version_id; return 0; } const VMStateDescription vmstate_esp = { .name = "esp", - .version_id = 7, + .version_id = 8, .minimum_version_id = 3, .post_load = esp_post_load, .fields = (const VMStateField[]) { @@ -1397,6 +1461,7 @@ const VMStateDescription vmstate_esp = { esp_is_between_version_5_and_6), VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), VMSTATE_BOOL(drq_state, ESPState), + VMSTATE_UINT8_TEST(asc_mode, ESPState, esp_is_version_8), VMSTATE_END_OF_LIST() }, }; @@ -1571,12 +1636,12 @@ static const VMStateDescription vmstate_sysbus_esp_scsi = { } }; -static void sysbus_esp_class_init(ObjectClass *klass, void *data) +static void sysbus_esp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sysbus_esp_realize; - dc->reset = sysbus_esp_hard_reset; + device_class_set_legacy_reset(dc, sysbus_esp_hard_reset); dc->vmsd = &vmstate_sysbus_esp_scsi; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } @@ -1597,7 +1662,7 @@ static void esp_init(Object *obj) fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); } -static void esp_class_init(ObjectClass *klass, void *data) +static void esp_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index f1935e53280..9ea4aa0a853 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -19,7 +19,7 @@ #include "hw/pci/pci_device.h" #include "hw/scsi/scsi.h" #include "migration/vmstate.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -1112,7 +1112,7 @@ static void lsi_do_msgout(LSIState *s) static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count) { int n; - uint8_t buf[LSI_BUF_SIZE]; + QEMU_UNINITIALIZED uint8_t buf[LSI_BUF_SIZE]; trace_lsi_memcpy(dest, src, count); while (count) { @@ -2372,10 +2372,10 @@ static void lsi_scsi_exit(PCIDevice *dev) LSIState *s = LSI53C895A(dev); address_space_destroy(&s->pci_io_as); - timer_del(s->scripts_timer); + timer_free(s->scripts_timer); } -static void lsi_class_init(ObjectClass *klass, void *data) +static void lsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2386,7 +2386,7 @@ static void lsi_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_LSI_53C895A; k->class_id = PCI_CLASS_STORAGE_SCSI; k->subsystem_id = 0x1000; - dc->reset = lsi_scsi_reset; + device_class_set_legacy_reset(dc, lsi_scsi_reset); dc->vmsd = &vmstate_lsi_scsi; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } @@ -2396,13 +2396,13 @@ static const TypeInfo lsi_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(LSIState), .class_init = lsi_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void lsi53c810_class_init(ObjectClass *klass, void *data) +static void lsi53c810_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 2d0c6071771..844643d916f 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -21,9 +21,9 @@ #include "qemu/osdep.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" -#include "sysemu/dma.h" -#include "sysemu/block-backend.h" -#include "sysemu/rtc.h" +#include "system/dma.h" +#include "system/block-backend.h" +#include "system/rtc.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "qemu/iov.h" @@ -981,13 +981,11 @@ static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd) static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) { - struct mfi_pd_list info; - size_t dcmd_size = sizeof(info); + struct mfi_pd_list info = {}; BusChild *kid; uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks; dma_addr_t residual; - memset(&info, 0, dcmd_size); offset = 8; dcmd_limit = offset + sizeof(struct mfi_pd_address); if (cmd->iov_size < dcmd_limit) { @@ -1429,11 +1427,10 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) { - struct mfi_ctrl_props info; + struct mfi_ctrl_props info = {}; size_t dcmd_size = sizeof(info); dma_addr_t residual; - memset(&info, 0x0, dcmd_size); if (cmd->iov_size < dcmd_size) { trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, dcmd_size); @@ -1781,7 +1778,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) uint8_t cdb[16]; int len; struct SCSIDevice *sdev = NULL; - int target_id, lun_id, cdb_len; + int target_id, lun_id; lba_count = le32_to_cpu(cmd->frame->io.header.data_len); lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo); @@ -1790,7 +1787,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) target_id = cmd->frame->header.target_id; lun_id = cmd->frame->header.lun_id; - cdb_len = cmd->frame->header.cdb_len; if (target_id < MFI_MAX_LD && lun_id == 0) { sdev = scsi_device_find(&s->bus, 0, target_id, lun_id); @@ -1805,15 +1801,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) return MFI_STAT_DEVICE_NOT_FOUND; } - if (cdb_len > 16) { - trace_megasas_scsi_invalid_cdb_len( - mfi_frame_desc(frame_cmd), 1, target_id, lun_id, cdb_len); - megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE)); - cmd->frame->header.scsi_status = CHECK_CONDITION; - s->event_count++; - return MFI_STAT_SCSI_DONE_WITH_ERROR; - } - cmd->iov_size = lba_count * sdev->blocksize; if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) { megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE)); @@ -1824,7 +1811,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) megasas_encode_lba(cdb, lba_start, lba_count, is_write); cmd->req = scsi_req_new(sdev, cmd->index, - lun_id, cdb, cdb_len, cmd); + lun_id, cdb, sizeof(cdb), cmd); if (!cmd->req) { trace_megasas_scsi_req_alloc_failed( mfi_frame_desc(frame_cmd), target_id, lun_id); @@ -2236,7 +2223,6 @@ static uint64_t megasas_queue_read(void *opaque, hwaddr addr, static void megasas_queue_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { - return; } static const MemoryRegionOps megasas_queue_ops = { @@ -2458,7 +2444,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info); } -static Property megasas_properties_gen1[] = { +static const Property megasas_properties_gen1[] = { DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge, MEGASAS_DEFAULT_SGE), DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds, @@ -2469,10 +2455,9 @@ static Property megasas_properties_gen1[] = { DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT("use_jbod", MegasasState, flags, MEGASAS_FLAG_USE_JBOD, false), - DEFINE_PROP_END_OF_LIST(), }; -static Property megasas_properties_gen2[] = { +static const Property megasas_properties_gen2[] = { DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge, MEGASAS_DEFAULT_SGE), DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds, @@ -2483,7 +2468,6 @@ static Property megasas_properties_gen2[] = { DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), DEFINE_PROP_BIT("use_jbod", MegasasState, flags, MEGASAS_FLAG_USE_JBOD, false), - DEFINE_PROP_END_OF_LIST(), }; typedef struct MegasasInfo { @@ -2497,8 +2481,9 @@ typedef struct MegasasInfo { int mmio_bar; int osts; const VMStateDescription *vmsd; - Property *props; - InterfaceInfo *interfaces; + const Property *props; + size_t props_count; + const InterfaceInfo *interfaces; } MegasasInfo; static struct MegasasInfo megasas_devices[] = { @@ -2514,7 +2499,8 @@ static struct MegasasInfo megasas_devices[] = { .osts = MFI_1078_RM | 1, .vmsd = &vmstate_megasas_gen1, .props = megasas_properties_gen1, - .interfaces = (InterfaceInfo[]) { + .props_count = ARRAY_SIZE(megasas_properties_gen1), + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -2530,14 +2516,15 @@ static struct MegasasInfo megasas_devices[] = { .osts = MFI_GEN2_RM, .vmsd = &vmstate_megasas_gen2, .props = megasas_properties_gen2, - .interfaces = (InterfaceInfo[]) { + .props_count = ARRAY_SIZE(megasas_properties_gen2), + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { } }, } }; -static void megasas_class_init(ObjectClass *oc, void *data) +static void megasas_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -2556,8 +2543,8 @@ static void megasas_class_init(ObjectClass *oc, void *data) e->osts = info->osts; e->product_name = info->product_name; e->product_version = info->product_version; - device_class_set_props(dc, info->props); - dc->reset = megasas_scsi_reset; + device_class_set_props_n(dc, info->props, info->props_count); + device_class_set_legacy_reset(dc, megasas_scsi_reset); dc->vmsd = info->vmsd; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->desc = info->desc; @@ -2582,11 +2569,11 @@ static void megasas_register_types(void) type_info.name = info->name; type_info.parent = TYPE_MEGASAS_BASE; - type_info.class_data = (void *)info; + type_info.class_data = info; type_info.class_init = megasas_class_init; type_info.interfaces = info->interfaces; - type_register(&type_info); + type_register_static(&type_info); } } diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c index 0d5abb4b6cc..6cba92f3761 100644 --- a/hw/scsi/mptendian.c +++ b/hw/scsi/mptendian.c @@ -22,7 +22,7 @@ #include "qemu/osdep.h" #include "hw/pci/pci.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/pci/msi.h" #include "qemu/iov.h" #include "hw/scsi/scsi.h" diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index c5d3138c936..1ebe0b82a79 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/pci/msi.h" #include "qemu/iov.h" #include "qemu/main-loop.h" @@ -1410,14 +1410,13 @@ static const VMStateDescription vmstate_mptsas = { } }; -static Property mptsas_properties[] = { +static const Property mptsas_properties[] = { DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0), /* TODO: test MSI support under Windows */ DEFINE_PROP_ON_OFF_AUTO("msi", MPTSASState, msi, ON_OFF_AUTO_AUTO), - DEFINE_PROP_END_OF_LIST(), }; -static void mptsas1068_class_init(ObjectClass *oc, void *data) +static void mptsas1068_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -1431,7 +1430,7 @@ static void mptsas1068_class_init(ObjectClass *oc, void *data) pc->subsystem_id = 0x8000; pc->class_id = PCI_CLASS_STORAGE_SCSI; device_class_set_props(dc, mptsas_properties); - dc->reset = mptsas_reset; + device_class_set_legacy_reset(dc, mptsas_reset); dc->vmsd = &vmstate_mptsas; dc->desc = "LSI SAS 1068"; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); @@ -1442,7 +1441,7 @@ static const TypeInfo mptsas_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(MPTSASState), .class_init = mptsas1068_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -1450,7 +1449,7 @@ static const TypeInfo mptsas_info = { static void mptsas_register_types(void) { - type_register(&mptsas_info); + type_register_static(&mptsas_info); } type_init(mptsas_register_types) diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index 53eff5dd3d6..9b12ee7f1c6 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -9,12 +9,12 @@ #include "migration/qemu-file-types.h" #include "migration/vmstate.h" #include "scsi/constants.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/block-backend.h" +#include "system/blockdev.h" +#include "system/system.h" +#include "system/runstate.h" #include "trace.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/cutils.h" static char *scsibus_get_dev_path(DeviceState *dev); @@ -100,8 +100,15 @@ static void scsi_device_for_each_req_sync(SCSIDevice *s, assert(!runstate_is_running()); assert(qemu_in_main_thread()); - QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) { - fn(req, opaque); + /* + * Locking is not necessary because the guest is stopped and no other + * threads can be accessing the requests list, but take the lock for + * consistency. + */ + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) { + fn(req, opaque); + } } } @@ -115,21 +122,29 @@ static void scsi_device_for_each_req_async_bh(void *opaque) { g_autofree SCSIDeviceForEachReqAsyncData *data = opaque; SCSIDevice *s = data->s; - AioContext *ctx; - SCSIRequest *req; - SCSIRequest *next; + g_autoptr(GList) reqs = NULL; /* - * The BB cannot have changed contexts between this BH being scheduled and - * now: BBs' AioContexts, when they have a node attached, can only be - * changed via bdrv_try_change_aio_context(), in a drained section. While - * we have the in-flight counter incremented, that drain must block. + * Build a list of requests in this AioContext so fn() can be invoked later + * outside requests_lock. */ - ctx = blk_get_aio_context(s->conf.blk); - assert(ctx == qemu_get_current_aio_context()); + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + AioContext *ctx = qemu_get_current_aio_context(); + SCSIRequest *req; + SCSIRequest *next; + + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { + if (req->ctx == ctx) { + scsi_req_ref(req); /* dropped after calling fn() */ + reqs = g_list_prepend(reqs, req); + } + } + } - QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { - data->fn(req, data->fn_opaque); + /* Call fn() on each request */ + for (GList *elem = g_list_first(reqs); elem; elem = g_list_next(elem)) { + data->fn(elem->data, data->fn_opaque); + scsi_req_unref(elem->data); } /* Drop the reference taken by scsi_device_for_each_req_async() */ @@ -139,9 +154,35 @@ static void scsi_device_for_each_req_async_bh(void *opaque) blk_dec_in_flight(s->conf.blk); } +static void scsi_device_for_each_req_async_do_ctx(gpointer key, gpointer value, + gpointer user_data) +{ + AioContext *ctx = key; + SCSIDeviceForEachReqAsyncData *params = user_data; + SCSIDeviceForEachReqAsyncData *data; + + data = g_new(SCSIDeviceForEachReqAsyncData, 1); + data->s = params->s; + data->fn = params->fn; + data->fn_opaque = params->fn_opaque; + + /* + * Hold a reference to the SCSIDevice until + * scsi_device_for_each_req_async_bh() finishes. + */ + object_ref(OBJECT(data->s)); + + /* Paired with scsi_device_for_each_req_async_bh() */ + blk_inc_in_flight(data->s->conf.blk); + + aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, data); +} + /* * Schedule @fn() to be invoked for each enqueued request in device @s. @fn() - * runs in the AioContext that is executing the request. + * must be thread-safe because it runs concurrently in each AioContext that is + * executing a request. + * * Keeps the BlockBackend's in-flight counter incremented until everything is * done, so draining it will settle all scheduled @fn() calls. */ @@ -151,24 +192,26 @@ static void scsi_device_for_each_req_async(SCSIDevice *s, { assert(qemu_in_main_thread()); - SCSIDeviceForEachReqAsyncData *data = - g_new(SCSIDeviceForEachReqAsyncData, 1); - - data->s = s; - data->fn = fn; - data->fn_opaque = opaque; - - /* - * Hold a reference to the SCSIDevice until - * scsi_device_for_each_req_async_bh() finishes. - */ - object_ref(OBJECT(s)); + /* The set of AioContexts where the requests are being processed */ + g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL); + WITH_QEMU_LOCK_GUARD(&s->requests_lock) { + SCSIRequest *req; + QTAILQ_FOREACH(req, &s->requests, next) { + g_hash_table_add(aio_contexts, req->ctx); + } + } - /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */ - blk_inc_in_flight(s->conf.blk); - aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk), - scsi_device_for_each_req_async_bh, - data); + /* Schedule a BH for each AioContext */ + SCSIDeviceForEachReqAsyncData params = { + .s = s, + .fn = fn, + .fn_opaque = opaque, + }; + g_hash_table_foreach( + aio_contexts, + scsi_device_for_each_req_async_do_ctx, + ¶ms + ); } static void scsi_device_realize(SCSIDevice *s, Error **errp) @@ -349,6 +392,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp) dev->lun = lun; } + qemu_mutex_init(&dev->requests_lock); QTAILQ_INIT(&dev->requests); scsi_device_realize(dev, &local_err); if (local_err) { @@ -356,7 +400,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp) return; } dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), - scsi_dma_restart_cb, dev); + scsi_dma_restart_cb, NULL, dev); } static void scsi_qdev_unrealize(DeviceState *qdev) @@ -369,6 +413,8 @@ static void scsi_qdev_unrealize(DeviceState *qdev) scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); + qemu_mutex_destroy(&dev->requests_lock); + scsi_device_unrealize(dev); blockdev_mark_auto_del(dev->conf.blk); @@ -868,6 +914,7 @@ SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, } } + req->ctx = qemu_get_current_aio_context(); req->cmd = cmd; req->residual = req->cmd.xfer; @@ -964,7 +1011,10 @@ static void scsi_req_enqueue_internal(SCSIRequest *req) req->sg = NULL; } req->enqueued = true; - QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); + + WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) { + QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); + } } int32_t scsi_req_enqueue(SCSIRequest *req) @@ -984,7 +1034,9 @@ static void scsi_req_dequeue(SCSIRequest *req) trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); req->retry = false; if (req->enqueued) { - QTAILQ_REMOVE(&req->dev->requests, req, next); + WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) { + QTAILQ_REMOVE(&req->dev->requests, req, next); + } req->enqueued = false; scsi_req_unref(req); } @@ -1943,14 +1995,13 @@ const VMStateDescription vmstate_scsi_device = { } }; -static Property scsi_props[] = { +static const Property scsi_props[] = { DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void scsi_device_class_init(ObjectClass *klass, void *data) +static void scsi_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_STORAGE, k->categories); @@ -1962,8 +2013,7 @@ static void scsi_device_class_init(ObjectClass *klass, void *data) static void scsi_dev_instance_init(Object *obj) { - DeviceState *dev = DEVICE(obj); - SCSIDevice *s = SCSI_DEVICE(dev); + SCSIDevice *s = SCSI_DEVICE(obj); device_add_bootindex_property(obj, &s->conf.bootindex, "bootindex", NULL, @@ -1980,7 +2030,7 @@ static const TypeInfo scsi_device_type_info = { .instance_init = scsi_dev_instance_init, }; -static void scsi_bus_class_init(ObjectClass *klass, void *data) +static void scsi_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); @@ -1996,7 +2046,7 @@ static const TypeInfo scsi_bus_info = { .parent = TYPE_BUS, .instance_size = sizeof(SCSIBus), .class_init = scsi_bus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index a67092db6a6..b4782c6248d 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -32,13 +32,14 @@ #include "migration/vmstate.h" #include "hw/scsi/emulation.h" #include "scsi/constants.h" -#include "sysemu/block-backend.h" -#include "sysemu/blockdev.h" +#include "system/arch_init.h" +#include "system/block-backend.h" +#include "system/blockdev.h" #include "hw/block/block.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" -#include "sysemu/dma.h" -#include "sysemu/sysemu.h" +#include "system/dma.h" +#include "system/system.h" #include "qemu/cutils.h" #include "trace.h" #include "qom/object.h" @@ -65,9 +66,15 @@ OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) struct SCSIDiskClass { SCSIDeviceClass parent_class; + /* + * Callbacks receive ret == 0 for success. Errors are represented either as + * negative errno values, or as positive SAM status codes. For host_status + * errors, the function passes ret == -ENODEV and sets the host_status field + * of the SCSIRequest. + */ DMAIOFunc *dma_readv; DMAIOFunc *dma_writev; - bool (*need_fua_emulation)(SCSICommand *cmd); + bool (*need_fua)(SCSICommand *cmd); void (*update_sense)(SCSIRequest *r); }; @@ -78,7 +85,7 @@ typedef struct SCSIDiskReq { uint32_t sector_count; uint32_t buflen; bool started; - bool need_fua_emulation; + bool need_fua; struct iovec iov; QEMUIOVector qiov; BlockAcctCookie acct; @@ -98,12 +105,12 @@ struct SCSIDiskState { uint64_t max_unmap_size; uint64_t max_io_size; uint32_t quirks; - QEMUBH *bh; char *version; char *serial; char *vendor; char *product; char *device_id; + char *loadparm; /* only for s390x */ bool tray_open; bool tray_locked; /* @@ -217,22 +224,61 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); SCSISense sense = SENSE_CODE(NO_SENSE); - int error = 0; + int16_t host_status; + int error; bool req_has_sense = false; BlockErrorAction action; int status; + /* + * host_status should only be set for SG_IO requests that came back with a + * host_status error in scsi_block_sgio_complete(). This error path passes + * -ENODEV as the return value. + * + * Reset host_status in the request because we may still want to complete + * the request successfully with the 'stop' or 'ignore' error policy. + */ + host_status = r->req.host_status; + if (host_status != -1) { + assert(ret == -ENODEV); + r->req.host_status = -1; + } + if (ret < 0) { status = scsi_sense_from_errno(-ret, &sense); error = -ret; } else { /* A passthrough command has completed with nonzero status. */ status = ret; - if (status == CHECK_CONDITION) { + switch (status) { + case CHECK_CONDITION: req_has_sense = true; error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); - } else { + break; + case RESERVATION_CONFLICT: + /* + * Don't apply the error policy, always report to the guest. + * + * This is a passthrough code path, so it's not a backend error, but + * a response to an invalid guest request. + * + * Windows Failover Cluster validation intentionally sends invalid + * requests to verify that reservations work as intended. It is + * crucial that it sees the resulting errors. + * + * Treating a reservation conflict as a guest-side error is obvious + * when a pr-manager is in use. Without one, the situation is less + * clear, but there might be nothing that can be fixed on the host + * (like in the above example), and we don't want to be stuck in a + * loop where resuming the VM and retrying the request immediately + * stops it again. So always reporting is still the safer option in + * this case, too. + */ + error = 0; + break; + default: error = EINVAL; + break; } } @@ -242,8 +288,9 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) * are usually retried immediately, so do not post them to QMP and * do not account them as failed I/O. */ - if (req_has_sense && - scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { + if (!error || (req_has_sense && + scsi_sense_buf_is_guest_recoverable(r->req.sense, + sizeof(r->req.sense)))) { action = BLOCK_ERROR_ACTION_REPORT; acct_failed = false; } else { @@ -256,6 +303,10 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) if (acct_failed) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); } + if (host_status != -1) { + scsi_req_complete_failed(&r->req, host_status); + return true; + } if (req_has_sense) { sdc->update_sense(&r->req); } else if (status == CHECK_CONDITION) { @@ -283,7 +334,7 @@ static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) return true; } - if (ret < 0) { + if (ret != 0) { return scsi_handle_rw_error(r, ret, acct_failed); } @@ -295,9 +346,8 @@ static void scsi_aio_complete(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - /* The request must only run in the BlockBackend's AioContext */ - assert(blk_get_aio_context(s->qdev.conf.blk) == - qemu_get_current_aio_context()); + /* The request must run in its AioContext */ + assert(r->req.ctx == qemu_get_current_aio_context()); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -339,39 +389,16 @@ static bool scsi_is_cmd_fua(SCSICommand *cmd) } } -static void scsi_write_do_fua(SCSIDiskReq *r) -{ - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - - assert(r->req.aiocb == NULL); - assert(!r->req.io_canceled); - - if (r->need_fua_emulation) { - block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); - return; - } - - scsi_req_complete(&r->req, GOOD); - scsi_req_unref(&r->req); -} - static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) { assert(r->req.aiocb == NULL); - if (scsi_disk_req_check_error(r, ret, false)) { + if (scsi_disk_req_check_error(r, ret, ret > 0)) { goto done; } r->sector += r->sector_count; r->sector_count = 0; - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - scsi_write_do_fua(r); - return; - } else { - scsi_req_complete(&r->req, GOOD); - } + scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); @@ -385,9 +412,10 @@ static void scsi_dma_complete(void *opaque, int ret) assert(r->req.aiocb != NULL); r->req.aiocb = NULL; + /* ret > 0 is accounted for in scsi_disk_req_check_error() */ if (ret < 0) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); - } else { + } else if (ret == 0) { block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_dma_complete_noio(r, ret); @@ -395,15 +423,13 @@ static void scsi_dma_complete(void *opaque, int ret) static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) { - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; - /* The request must only run in the BlockBackend's AioContext */ - assert(blk_get_aio_context(s->qdev.conf.blk) == - qemu_get_current_aio_context()); + /* The request must run in its AioContext */ + assert(r->req.ctx == qemu_get_current_aio_context()); assert(r->req.aiocb == NULL); - if (scsi_disk_req_check_error(r, ret, false)) { + if (scsi_disk_req_check_error(r, ret, ret > 0)) { goto done; } @@ -424,9 +450,10 @@ static void scsi_read_complete(void *opaque, int ret) assert(r->req.aiocb != NULL); r->req.aiocb = NULL; + /* ret > 0 is accounted for in scsi_disk_req_check_error() */ if (ret < 0) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); - } else { + } else if (ret == 0) { block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); } @@ -450,8 +477,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret) if (r->req.sg) { dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); r->req.residual -= r->req.sg->size; - r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), - r->req.sg, r->sector << BDRV_SECTOR_BITS, + r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, sdc->dma_readv, r, scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE); @@ -515,7 +541,7 @@ static void scsi_read_data(SCSIRequest *req) first = !r->started; r->started = true; - if (first && r->need_fua_emulation) { + if (first && r->need_fua) { block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, BLOCK_ACCT_FLUSH); r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); @@ -526,15 +552,13 @@ static void scsi_read_data(SCSIRequest *req) static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) { - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; - /* The request must only run in the BlockBackend's AioContext */ - assert(blk_get_aio_context(s->qdev.conf.blk) == - qemu_get_current_aio_context()); + /* The request must run in its AioContext */ + assert(r->req.ctx == qemu_get_current_aio_context()); assert (r->req.aiocb == NULL); - if (scsi_disk_req_check_error(r, ret, false)) { + if (scsi_disk_req_check_error(r, ret, ret > 0)) { goto done; } @@ -542,8 +566,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { - scsi_write_do_fua(r); - return; + scsi_req_complete(&r->req, GOOD); } else { scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); @@ -562,9 +585,10 @@ static void scsi_write_complete(void * opaque, int ret) assert (r->req.aiocb != NULL); r->req.aiocb = NULL; + /* ret > 0 is accounted for in scsi_disk_req_check_error() */ if (ret < 0) { block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); - } else { + } else if (ret == 0) { block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_write_complete_noio(r, ret); @@ -575,6 +599,7 @@ static void scsi_write_data(SCSIRequest *req) SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + BlockCompletionFunc *cb; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); @@ -600,19 +625,17 @@ static void scsi_write_data(SCSIRequest *req) if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || r->req.cmd.buf[0] == VERIFY_16) { - if (r->req.sg) { - scsi_dma_complete_noio(r, 0); - } else { - scsi_write_complete_noio(r, 0); - } + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + cb = r->req.sg ? scsi_dma_complete : scsi_write_complete; + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, cb, r); return; } if (r->req.sg) { dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); r->req.residual -= r->req.sg->size; - r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), - r->req.sg, r->sector << BDRV_SECTOR_BITS, + r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, sdc->dma_writev, r, scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE); @@ -2344,7 +2367,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } - r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); + r->need_fua = sdc->need_fua(&r->req.cmd); if (r->sector_count == 0) { scsi_req_complete(&r->req, GOOD); } @@ -2815,26 +2838,13 @@ static void scsi_block_sgio_complete(void *opaque, int ret) if (ret == 0) { if (io_hdr->host_status != SCSI_HOST_OK) { - scsi_req_complete_failed(&r->req, io_hdr->host_status); - scsi_req_unref(&r->req); - return; - } - - if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { + r->req.host_status = io_hdr->host_status; + ret = -ENODEV; + } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { ret = BUSY; } else { ret = io_hdr->status; } - - if (ret > 0) { - if (scsi_handle_rw_error(r, ret, true)) { - scsi_req_unref(&r->req); - return; - } - - /* Ignore error. */ - ret = 0; - } } req->cb(req->cb_opaque, ret); @@ -3103,19 +3113,57 @@ BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, { SCSIDiskReq *r = opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); + int flags = r->need_fua ? BDRV_REQ_FUA : 0; + return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, flags, cb, cb_opaque); } -static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) +static char *scsi_property_get_loadparm(Object *obj, Error **errp) +{ + return g_strdup(SCSI_DISK_BASE(obj)->loadparm); +} + +static void scsi_property_set_loadparm(Object *obj, const char *value, + Error **errp) +{ + void *lp_str; + + if (object_property_get_int(obj, "bootindex", NULL) < 0) { + error_setg(errp, "'loadparm' is only valid for boot devices"); + return; + } + + lp_str = g_malloc0(strlen(value) + 1); + if (!qdev_prop_sanitize_s390x_loadparm(lp_str, value, errp)) { + g_free(lp_str); + return; + } + SCSI_DISK_BASE(obj)->loadparm = lp_str; +} + +static void scsi_property_add_specifics(DeviceClass *dc) +{ + ObjectClass *oc = OBJECT_CLASS(dc); + + /* The loadparm property is only supported on s390x */ + if (qemu_arch_available(QEMU_ARCH_S390X)) { + object_class_property_add_str(oc, "loadparm", + scsi_property_get_loadparm, + scsi_property_set_loadparm); + object_class_property_set_description(oc, "loadparm", + "load parameter (s390x only)"); + } +} + +static void scsi_disk_base_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); dc->fw_name = "disk"; - dc->reset = scsi_disk_reset; + device_class_set_legacy_reset(dc, scsi_disk_reset); sdc->dma_readv = scsi_dma_readv; sdc->dma_writev = scsi_dma_writev; - sdc->need_fua_emulation = scsi_is_cmd_fua; + sdc->need_fua = scsi_is_cmd_fua; } static const TypeInfo scsi_disk_base_info = { @@ -3139,12 +3187,12 @@ static const TypeInfo scsi_disk_base_info = { DEFINE_PROP_BOOL("migrate-emulated-scsi-request", SCSIDiskState, migrate_emulated_scsi_request, true) -static Property scsi_hd_properties[] = { +static const Property scsi_hd_properties[] = { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_BIT("removable", SCSIDiskState, features, SCSI_DISK_F_REMOVABLE, false), DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, - SCSI_DISK_F_DPOFUA, false), + SCSI_DISK_F_DPOFUA, true), DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), @@ -3159,7 +3207,6 @@ static Property scsi_hd_properties[] = { quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, 0), DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_scsi_disk_state = { @@ -3177,7 +3224,7 @@ static const VMStateDescription vmstate_scsi_disk_state = { } }; -static void scsi_hd_class_initfn(ObjectClass *klass, void *data) +static void scsi_hd_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); @@ -3189,6 +3236,8 @@ static void scsi_hd_class_initfn(ObjectClass *klass, void *data) dc->desc = "virtual SCSI disk"; device_class_set_props(dc, scsi_hd_properties); dc->vmsd = &vmstate_scsi_disk_state; + + scsi_property_add_specifics(dc); } static const TypeInfo scsi_hd_info = { @@ -3197,7 +3246,7 @@ static const TypeInfo scsi_hd_info = { .class_init = scsi_hd_class_initfn, }; -static Property scsi_cd_properties[] = { +static const Property scsi_cd_properties[] = { DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), @@ -3215,10 +3264,9 @@ static Property scsi_cd_properties[] = { 0), DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks, SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void scsi_cd_class_initfn(ObjectClass *klass, void *data) +static void scsi_cd_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); @@ -3229,6 +3277,8 @@ static void scsi_cd_class_initfn(ObjectClass *klass, void *data) dc->desc = "virtual SCSI CD-ROM"; device_class_set_props(dc, scsi_cd_properties); dc->vmsd = &vmstate_scsi_disk_state; + + scsi_property_add_specifics(dc); } static const TypeInfo scsi_cd_info = { @@ -3238,7 +3288,7 @@ static const TypeInfo scsi_cd_info = { }; #ifdef __linux__ -static Property scsi_block_properties[] = { +static const Property scsi_block_properties[] = { DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), @@ -3251,10 +3301,9 @@ static Property scsi_block_properties[] = { -1), DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, DEFAULT_IO_TIMEOUT), - DEFINE_PROP_END_OF_LIST(), }; -static void scsi_block_class_initfn(ObjectClass *klass, void *data) +static void scsi_block_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); @@ -3266,7 +3315,7 @@ static void scsi_block_class_initfn(ObjectClass *klass, void *data) sdc->dma_readv = scsi_block_dma_readv; sdc->dma_writev = scsi_block_dma_writev; sdc->update_sense = scsi_block_update_sense; - sdc->need_fua_emulation = scsi_block_no_fua; + sdc->need_fua = scsi_block_no_fua; dc->desc = "SCSI block device passthrough"; device_class_set_props(dc, scsi_block_properties); dc->vmsd = &vmstate_scsi_disk_state; diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index ee945f87e33..9e380a2109e 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -21,7 +21,7 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "hw/scsi/emulation.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "trace.h" #ifdef __linux__ @@ -772,12 +772,11 @@ static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); } -static Property scsi_generic_properties[] = { +static const Property scsi_generic_properties[] = { DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout, DEFAULT_IO_TIMEOUT), - DEFINE_PROP_END_OF_LIST(), }; static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, @@ -787,7 +786,7 @@ static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private); } -static void scsi_generic_class_initfn(ObjectClass *klass, void *data) +static void scsi_generic_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); @@ -797,7 +796,7 @@ static void scsi_generic_class_initfn(ObjectClass *klass, void *data) sc->parse_cdb = scsi_generic_parse_cdb; dc->fw_name = "disk"; dc->desc = "pass through generic scsi device (/dev/sg*)"; - dc->reset = scsi_generic_reset; + device_class_set_legacy_reset(dc, scsi_generic_reset); device_class_set_props(dc, scsi_generic_properties); dc->vmsd = &vmstate_scsi_device; } diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index c75a6c88079..20f70fb2729 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -1250,9 +1250,8 @@ static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off) return 0; } -static Property spapr_vscsi_properties[] = { +static const Property spapr_vscsi_properties[] = { DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_spapr_vscsi = { @@ -1268,7 +1267,7 @@ static const VMStateDescription vmstate_spapr_vscsi = { }, }; -static void spapr_vscsi_class_init(ObjectClass *klass, void *data) +static void spapr_vscsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index f0f2a98c2ee..6c2788e2026 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -198,6 +198,7 @@ esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)" esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)" esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)" esp_set_phase(const char *phase) "setting bus phase to %s" +esp_invalid_cmd(uint8_t cmd, uint8_t asc_mode) "command 0x%x asc_mode 0x%x" # esp-pci.c esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction" diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c index 4c8637045df..43525ba46d3 100644 --- a/hw/scsi/vhost-scsi-common.c +++ b/hw/scsi/vhost-scsi-common.c @@ -101,24 +101,25 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp) return ret; } -void vhost_scsi_common_stop(VHostSCSICommon *vsc) +int vhost_scsi_common_stop(VHostSCSICommon *vsc) { VirtIODevice *vdev = VIRTIO_DEVICE(vsc); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret = 0; - vhost_dev_stop(&vsc->dev, vdev, true); + ret = vhost_dev_stop(&vsc->dev, vdev, true); if (k->set_guest_notifiers) { - ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); - if (ret < 0) { - error_report("vhost guest notifier cleanup failed: %d", ret); + int r = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); + if (r < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return r; } } - assert(ret >= 0); vhost_dev_disable_notifiers(&vsc->dev, vdev); + return ret; } uint64_t vhost_scsi_common_get_features(VirtIODevice *vdev, uint64_t features, diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 49cff2a0cb5..cdf405b0f86 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -29,7 +29,7 @@ #include "hw/fw-path-provider.h" #include "hw/qdev-properties.h" #include "qemu/cutils.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* Features supported by host kernel. */ static const int kernel_feature_bits[] = { @@ -114,7 +114,7 @@ static void vhost_scsi_stop(VHostSCSI *s) vhost_scsi_common_stop(vsc); } -static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) +static int vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) { VHostSCSI *s = VHOST_SCSI(vdev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); @@ -125,7 +125,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) } if (vhost_dev_is_started(&vsc->dev) == start) { - return; + return 0; } if (start) { @@ -139,6 +139,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) } else { vhost_scsi_stop(s); } + return 0; } static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -172,7 +173,7 @@ static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue) struct vhost_dev *dev = &vsc->dev; struct vhost_vring_worker vq_worker; struct vhost_worker_state worker; - int i, ret; + int i, ret = 0; /* Use default worker */ if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) { @@ -314,7 +315,6 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) if (vhostfd >= 0) { close(vhostfd); } - return; } static void vhost_scsi_unrealize(DeviceState *dev) @@ -343,7 +343,7 @@ static struct vhost_dev *vhost_scsi_get_vhost(VirtIODevice *vdev) return &vsc->dev; } -static Property vhost_scsi_properties[] = { +static const Property vhost_scsi_properties[] = { DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd), DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn), DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), @@ -359,13 +359,15 @@ static Property vhost_scsi_properties[] = { DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, VIRTIO_SCSI_F_T10_PI, false), + DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features, + VIRTIO_SCSI_F_HOTPLUG, + false), DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false), DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon, conf.worker_per_virtqueue, false), - DEFINE_PROP_END_OF_LIST(), }; -static void vhost_scsi_class_init(ObjectClass *klass, void *data) +static void vhost_scsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -399,7 +401,7 @@ static const TypeInfo vhost_scsi_info = { .instance_size = sizeof(VHostSCSI), .class_init = vhost_scsi_class_init, .instance_init = vhost_scsi_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 55e4be5b343..25f2d894e7c 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -27,7 +27,7 @@ #include "hw/virtio/vhost-user-scsi.h" #include "hw/virtio/virtio.h" #include "chardev/char-fe.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* Features supported by the host application */ static const int user_feature_bits[] = { @@ -52,19 +52,19 @@ static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp) return ret; } -static void vhost_user_scsi_stop(VHostUserSCSI *s) +static int vhost_user_scsi_stop(VHostUserSCSI *s) { VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); if (!s->started_vu) { - return; + return 0; } s->started_vu = false; - vhost_scsi_common_stop(vsc); + return vhost_scsi_common_stop(vsc); } -static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) +static int vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserSCSI *s = (VHostUserSCSI *)vdev; DeviceState *dev = DEVICE(vdev); @@ -75,11 +75,11 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) int ret; if (!s->connected) { - return; + return -1; } if (vhost_dev_is_started(&vsc->dev) == should_start) { - return; + return 0; } if (should_start) { @@ -91,8 +91,12 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) qemu_chr_fe_disconnect(&vs->conf.chardev); } } else { - vhost_user_scsi_stop(s); + ret = vhost_user_scsi_stop(s); + if (ret) { + return ret; + } } + return 0; } static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -341,7 +345,7 @@ static void vhost_user_scsi_unrealize(DeviceState *dev) virtio_scsi_common_unrealize(dev); } -static Property vhost_user_scsi_properties[] = { +static const Property vhost_user_scsi_properties[] = { DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev), DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, @@ -360,7 +364,6 @@ static Property vhost_user_scsi_properties[] = { DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, VIRTIO_SCSI_F_T10_PI, false), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_scsi_reset(VirtIODevice *vdev) @@ -387,7 +390,7 @@ static const VMStateDescription vmstate_vhost_scsi = { }, }; -static void vhost_user_scsi_class_init(ObjectClass *klass, void *data) +static void vhost_user_scsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -423,7 +426,7 @@ static const TypeInfo vhost_user_scsi_info = { .instance_size = sizeof(VHostUserSCSI), .class_init = vhost_user_scsi_class_init, .instance_init = vhost_user_scsi_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 2806a121b24..95f13fb7c28 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -15,9 +15,10 @@ #include "qapi/error.h" #include "hw/virtio/virtio-scsi.h" #include "qemu/error-report.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/scsi/scsi.h" #include "scsi/constants.h" +#include "hw/virtio/iothread-vq-mapping.h" #include "hw/virtio/virtio-bus.h" /* Context: BQL held */ @@ -28,7 +29,14 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - if (vs->conf.iothread) { + if (vs->conf.iothread && vs->conf.iothread_vq_mapping_list) { + error_setg(errp, + "iothread and iothread-vq-mapping properties cannot be set " + "at the same time"); + return; + } + + if (vs->conf.iothread || vs->conf.iothread_vq_mapping_list) { if (!k->set_guest_notifiers || !k->ioeventfd_assign) { error_setg(errp, "device is incompatible with iothread " @@ -39,15 +47,64 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) error_setg(errp, "ioeventfd is required for iothread"); return; } - s->ctx = iothread_get_aio_context(vs->conf.iothread); - } else { - if (!virtio_device_ioeventfd_enabled(vdev)) { + } + + s->vq_aio_context = g_new(AioContext *, vs->conf.num_queues + + VIRTIO_SCSI_VQ_NUM_FIXED); + + /* + * Handle the ctrl virtqueue in the main loop thread where device resets + * can be performed. + */ + s->vq_aio_context[0] = qemu_get_aio_context(); + + /* + * Handle the event virtqueue in the main loop thread where its no_poll + * behavior won't stop IOThread polling. + */ + s->vq_aio_context[1] = qemu_get_aio_context(); + + if (vs->conf.iothread_vq_mapping_list) { + if (!iothread_vq_mapping_apply(vs->conf.iothread_vq_mapping_list, + &s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED], + vs->conf.num_queues, errp)) { + g_free(s->vq_aio_context); + s->vq_aio_context = NULL; return; } - s->ctx = qemu_get_aio_context(); + } else if (vs->conf.iothread) { + AioContext *ctx = iothread_get_aio_context(vs->conf.iothread); + for (uint16_t i = 0; i < vs->conf.num_queues; i++) { + s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx; + } + + /* Released in virtio_scsi_dataplane_cleanup() */ + object_ref(OBJECT(vs->conf.iothread)); + } else { + AioContext *ctx = qemu_get_aio_context(); + for (unsigned i = 0; i < vs->conf.num_queues; i++) { + s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx; + } } } +/* Context: BQL held */ +void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + + if (vs->conf.iothread_vq_mapping_list) { + iothread_vq_mapping_cleanup(vs->conf.iothread_vq_mapping_list); + } + + if (vs->conf.iothread) { + object_unref(OBJECT(vs->conf.iothread)); + } + + g_free(s->vq_aio_context); + s->vq_aio_context = NULL; +} + static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); @@ -66,31 +123,20 @@ static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) } /* Context: BH in IOThread */ -static void virtio_scsi_dataplane_stop_bh(void *opaque) +static void virtio_scsi_dataplane_stop_vq_bh(void *opaque) { - VirtIOSCSI *s = opaque; - VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + AioContext *ctx = qemu_get_current_aio_context(); + VirtQueue *vq = opaque; EventNotifier *host_notifier; - int i; - virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx); - host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq); + virtio_queue_aio_detach_host_notifier(vq, ctx); + host_notifier = virtio_queue_get_host_notifier(vq); /* * Test and clear notifier after disabling event, in case poll callback * didn't have time to run. */ virtio_queue_host_notifier_read(host_notifier); - - virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx); - host_notifier = virtio_queue_get_host_notifier(vs->event_vq); - virtio_queue_host_notifier_read(host_notifier); - - for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx); - host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]); - virtio_queue_host_notifier_read(host_notifier); - } } /* Context: BQL held */ @@ -154,11 +200,14 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) smp_wmb(); /* paired with aio_notify_accept() */ if (s->bus.drain_count == 0) { - virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); - virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); + virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, + s->vq_aio_context[0]); + virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, + s->vq_aio_context[1]); for (i = 0; i < vs->conf.num_queues; i++) { - virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); + AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i]; + virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], ctx); } } return 0; @@ -207,7 +256,11 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev) s->dataplane_stopping = true; if (s->bus.drain_count == 0) { - aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); + for (i = 0; i < vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED; i++) { + VirtQueue *vq = virtio_get_queue(&vs->parent_obj, i); + AioContext *ctx = s->vq_aio_context[i]; + aio_wait_bh_oneshot(ctx, virtio_scsi_dataplane_stop_vq_bh, vq); + } } blk_drain_all(); /* ensure there are no in-flight requests */ diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 9f02ceea099..34ae14f7bf9 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -22,11 +22,12 @@ #include "qemu/error-report.h" #include "qemu/iov.h" #include "qemu/module.h" -#include "sysemu/block-backend.h" -#include "sysemu/dma.h" +#include "system/block-backend.h" +#include "system/dma.h" #include "hw/qdev-properties.h" #include "hw/scsi/scsi.h" #include "scsi/constants.h" +#include "hw/virtio/iothread-vq-mapping.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "trace.h" @@ -47,7 +48,7 @@ typedef struct VirtIOSCSIReq { /* Used for two-stage request submission and TMFs deferred to BH */ QTAILQ_ENTRY(VirtIOSCSIReq) next; - /* Used for cancellation of request during TMFs */ + /* Used for cancellation of request during TMFs. Atomic. */ int remaining; SCSIRequest *sreq; @@ -102,13 +103,18 @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req) g_free(req); } -static void virtio_scsi_complete_req(VirtIOSCSIReq *req) +static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock) { VirtIOSCSI *s = req->dev; VirtQueue *vq = req->vq; VirtIODevice *vdev = VIRTIO_DEVICE(s); qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size); + + if (vq_lock) { + qemu_mutex_lock(vq_lock); + } + virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size); if (s->dataplane_started && !s->dataplane_fenced) { virtio_notify_irqfd(vdev, vq); @@ -116,6 +122,10 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req) virtio_notify(vdev, vq); } + if (vq_lock) { + qemu_mutex_unlock(vq_lock); + } + if (req->sreq) { req->sreq->hba_private = NULL; scsi_req_unref(req->sreq); @@ -123,34 +133,20 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req) virtio_scsi_free_req(req); } -static void virtio_scsi_complete_req_bh(void *opaque) +static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock) { - VirtIOSCSIReq *req = opaque; + virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers"); - virtio_scsi_complete_req(req); -} + if (vq_lock) { + qemu_mutex_lock(vq_lock); + } -/* - * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop - * thread cannot touch the virtqueue since that could race with an IOThread. - */ -static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req) -{ - VirtIOSCSI *s = req->dev; + virtqueue_detach_element(req->vq, &req->elem, 0); - if (!s->ctx || s->ctx == qemu_get_aio_context()) { - /* No need to schedule a BH when there is no IOThread */ - virtio_scsi_complete_req(req); - } else { - /* Run request completion in the IOThread */ - aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req); + if (vq_lock) { + qemu_mutex_unlock(vq_lock); } -} -static void virtio_scsi_bad_req(VirtIOSCSIReq *req) -{ - virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers"); - virtqueue_detach_element(req->vq, &req->elem, 0); virtio_scsi_free_req(req); } @@ -235,12 +231,21 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req, return 0; } -static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq) +static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock) { VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s; VirtIOSCSIReq *req; + if (vq_lock) { + qemu_mutex_lock(vq_lock); + } + req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size); + + if (vq_lock) { + qemu_mutex_unlock(vq_lock); + } + if (!req) { return NULL; } @@ -294,137 +299,157 @@ typedef struct { VirtIOSCSIReq *tmf_req; } VirtIOSCSICancelNotifier; +static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf) +{ + if (qatomic_fetch_dec(&tmf->remaining) == 1) { + trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun), + tmf->req.tmf.tag, tmf->resp.tmf.response); + + virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock); + } +} + static void virtio_scsi_cancel_notify(Notifier *notifier, void *data) { VirtIOSCSICancelNotifier *n = container_of(notifier, VirtIOSCSICancelNotifier, notifier); - if (--n->tmf_req->remaining == 0) { - VirtIOSCSIReq *req = n->tmf_req; - - trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun), - req->req.tmf.tag, req->resp.tmf.response); - virtio_scsi_complete_req(req); - } + virtio_scsi_tmf_dec_remaining(n->tmf_req); g_free(n); } -static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d) +static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r) { - if (s->dataplane_started && d && blk_is_available(d->conf.blk)) { - assert(blk_get_aio_context(d->conf.blk) == s->ctx); - } + VirtIOSCSICancelNotifier *notifier; + + assert(r->ctx == qemu_get_current_aio_context()); + + /* Decremented in virtio_scsi_cancel_notify() */ + qatomic_inc(&tmf->remaining); + + notifier = g_new(VirtIOSCSICancelNotifier, 1); + notifier->notifier.notify = virtio_scsi_cancel_notify; + notifier->tmf_req = tmf; + scsi_req_cancel_async(r, ¬ifier->notifier); } -static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req) +/* Execute a TMF on the requests in the current AioContext */ +static void virtio_scsi_do_tmf_aio_context(void *opaque) { - VirtIOSCSI *s = req->dev; - SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun); - BusChild *kid; - int target; + AioContext *ctx = qemu_get_current_aio_context(); + VirtIOSCSIReq *tmf = opaque; + VirtIOSCSI *s = tmf->dev; + SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun); + SCSIRequest *r; + bool match_tag; - switch (req->req.tmf.subtype) { - case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: - if (!d) { - req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; - goto out; - } - if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { - req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN; - goto out; - } - qatomic_inc(&s->resetting); - device_cold_reset(&d->qdev); - qatomic_dec(&s->resetting); + if (!d) { + tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; + virtio_scsi_tmf_dec_remaining(tmf); + return; + } + + /* + * This function could handle other subtypes that need to be processed in + * the request's AioContext in the future, but for now only request + * cancelation subtypes are performed here. + */ + switch (tmf->req.tmf.subtype) { + case VIRTIO_SCSI_T_TMF_ABORT_TASK: + match_tag = true; + break; + case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: + case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: + match_tag = false; break; + default: + g_assert_not_reached(); + } - case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: - target = req->req.tmf.lun[1]; - qatomic_inc(&s->resetting); + WITH_QEMU_LOCK_GUARD(&d->requests_lock) { + QTAILQ_FOREACH(r, &d->requests, next) { + VirtIOSCSIReq *cmd_req = r->hba_private; + assert(cmd_req); /* request has hba_private while enqueued */ - rcu_read_lock(); - QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) { - SCSIDevice *d1 = SCSI_DEVICE(kid->child); - if (d1->channel == 0 && d1->id == target) { - device_cold_reset(&d1->qdev); + if (r->ctx != ctx) { + continue; + } + if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) { + continue; } + virtio_scsi_tmf_cancel_req(tmf, r); } - rcu_read_unlock(); - - qatomic_dec(&s->resetting); - break; - - default: - g_assert_not_reached(); - break; } -out: - object_unref(OBJECT(d)); - virtio_scsi_complete_req_from_main_loop(req); + /* Incremented by virtio_scsi_do_tmf() */ + virtio_scsi_tmf_dec_remaining(tmf); + + object_unref(d); } -/* Some TMFs must be processed from the main loop thread */ -static void virtio_scsi_do_tmf_bh(void *opaque) +static void dummy_bh(void *opaque) { - VirtIOSCSI *s = opaque; - QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); - VirtIOSCSIReq *req; - VirtIOSCSIReq *tmp; + /* Do nothing */ +} +/* + * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs. + */ +static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s) +{ GLOBAL_STATE_CODE(); - WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { - QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { - QTAILQ_REMOVE(&s->tmf_bh_list, req, next); - QTAILQ_INSERT_TAIL(&reqs, req, next); - } + assert(!s->dataplane_started); - qemu_bh_delete(s->tmf_bh); - s->tmf_bh = NULL; - } + for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) { + AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i]; - QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) { - QTAILQ_REMOVE(&reqs, req, next); - virtio_scsi_do_one_tmf_bh(req); + /* Our BH only runs after previously scheduled BHs */ + aio_wait_bh_oneshot(ctx, dummy_bh, NULL); } } -static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s) +/* + * Run the TMF in a specific AioContext, handling only requests in that + * AioContext. This is necessary because requests can run in different + * AioContext and it is only possible to cancel them from the AioContext where + * they are running. + */ +static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf, + AioContext *ctx) { - VirtIOSCSIReq *req; - VirtIOSCSIReq *tmp; + /* Decremented in virtio_scsi_do_tmf_aio_context() */ + qatomic_inc(&tmf->remaining); - GLOBAL_STATE_CODE(); - - /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */ - if (s->tmf_bh) { - qemu_bh_delete(s->tmf_bh); - s->tmf_bh = NULL; - } - - QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { - QTAILQ_REMOVE(&s->tmf_bh_list, req, next); - - /* SAM-6 6.3.2 Hard reset */ - req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE; - virtio_scsi_complete_req(req); - } + /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */ + aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf); } -static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req) +/* + * Returns the AioContext for a given TMF's tag field or NULL. Note that the + * request identified by the tag may have completed by the time you can execute + * a BH in the AioContext, so don't assume the request still exists in your BH. + */ +static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d, + VirtIOSCSIReq *tmf) { - VirtIOSCSI *s = req->dev; + WITH_QEMU_LOCK_GUARD(&d->requests_lock) { + SCSIRequest *r; + SCSIRequest *next; + + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { + VirtIOSCSIReq *cmd_req = r->hba_private; - WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { - QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next); + /* hba_private is non-NULL while the request is enqueued */ + assert(cmd_req); - if (!s->tmf_bh) { - s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s); - qemu_bh_schedule(s->tmf_bh); + if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) { + return r->ctx; + } } } + return NULL; } /* Return 0 if the request is ready to be completed and return to guest; @@ -434,9 +459,9 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) { SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun); SCSIRequest *r, *next; + AioContext *ctx; int ret = 0; - virtio_scsi_ctx_check(s, d); /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */ req->resp.tmf.response = VIRTIO_SCSI_S_OK; @@ -451,7 +476,22 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) req->req.tmf.tag, req->req.tmf.subtype); switch (req->req.tmf.subtype) { - case VIRTIO_SCSI_T_TMF_ABORT_TASK: + case VIRTIO_SCSI_T_TMF_ABORT_TASK: { + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + + ctx = find_aio_context_for_tmf_tag(d, req); + if (ctx) { + virtio_scsi_defer_tmf_to_aio_context(req, ctx); + ret = -EINPROGRESS; + } + break; + } + case VIRTIO_SCSI_T_TMF_QUERY_TASK: if (!d) { goto fail; @@ -459,44 +499,82 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { goto incorrect_lun; } - QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { - VirtIOSCSIReq *cmd_req = r->hba_private; - if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) { - break; + + WITH_QEMU_LOCK_GUARD(&d->requests_lock) { + QTAILQ_FOREACH(r, &d->requests, next) { + VirtIOSCSIReq *cmd_req = r->hba_private; + assert(cmd_req); /* request has hba_private while enqueued */ + + if (cmd_req->req.cmd.tag == req->req.tmf.tag) { + /* + * "If the specified command is present in the task set, + * then return a service response set to FUNCTION + * SUCCEEDED". + */ + req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; + } } } - if (r) { - /* - * Assert that the request has not been completed yet, we - * check for it in the loop above. - */ - assert(r->hba_private); - if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) { - /* "If the specified command is present in the task set, then - * return a service response set to FUNCTION SUCCEEDED". - */ - req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; - } else { - VirtIOSCSICancelNotifier *notifier; - - req->remaining = 1; - notifier = g_new(VirtIOSCSICancelNotifier, 1); - notifier->tmf_req = req; - notifier->notifier.notify = virtio_scsi_cancel_notify; - scsi_req_cancel_async(r, ¬ifier->notifier); - ret = -EINPROGRESS; + break; + + case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + qatomic_inc(&s->resetting); + device_cold_reset(&d->qdev); + qatomic_dec(&s->resetting); + break; + + case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: { + BusChild *kid; + int target = req->req.tmf.lun[1]; + qatomic_inc(&s->resetting); + + rcu_read_lock(); + QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *d1 = SCSI_DEVICE(kid->child); + if (d1->channel == 0 && d1->id == target) { + device_cold_reset(&d1->qdev); } } + rcu_read_unlock(); + + qatomic_dec(&s->resetting); break; + } - case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: - case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: - virtio_scsi_defer_tmf_to_bh(req); + case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: + case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: { + g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL); + + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + + qatomic_inc(&req->remaining); + + for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) { + ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i]; + + if (!g_hash_table_add(aio_contexts, ctx)) { + continue; /* skip previously added AioContext */ + } + + virtio_scsi_defer_tmf_to_aio_context(req, ctx); + } + + virtio_scsi_tmf_dec_remaining(req); ret = -EINPROGRESS; break; + } - case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: - case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: if (!d) { goto fail; @@ -505,34 +583,19 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) goto incorrect_lun; } - /* Add 1 to "remaining" until virtio_scsi_do_tmf returns. - * This way, if the bus starts calling back to the notifiers - * even before we finish the loop, virtio_scsi_cancel_notify - * will not complete the TMF too early. - */ - req->remaining = 1; - QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { - if (r->hba_private) { - if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { - /* "If there is any command present in the task set, then - * return a service response set to FUNCTION SUCCEEDED". - */ - req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; - break; - } else { - VirtIOSCSICancelNotifier *notifier; - - req->remaining++; - notifier = g_new(VirtIOSCSICancelNotifier, 1); - notifier->notifier.notify = virtio_scsi_cancel_notify; - notifier->tmf_req = req; - scsi_req_cancel_async(r, ¬ifier->notifier); - } + WITH_QEMU_LOCK_GUARD(&d->requests_lock) { + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { + /* Request has hba_private while enqueued */ + assert(r->hba_private); + + /* + * "If there is any command present in the task set, then + * return a service response set to FUNCTION SUCCEEDED". + */ + req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; + break; } } - if (--req->remaining > 0) { - ret = -EINPROGRESS; - } break; case VIRTIO_SCSI_T_TMF_CLEAR_ACA: @@ -563,7 +626,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, &type, sizeof(type)) < sizeof(type)) { - virtio_scsi_bad_req(req); + virtio_scsi_bad_req(req, &s->ctrl_lock); return; } @@ -571,7 +634,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) if (type == VIRTIO_SCSI_T_TMF) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq), sizeof(VirtIOSCSICtrlTMFResp)) < 0) { - virtio_scsi_bad_req(req); + virtio_scsi_bad_req(req, &s->ctrl_lock); return; } else { r = virtio_scsi_do_tmf(s, req); @@ -581,7 +644,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) type == VIRTIO_SCSI_T_AN_SUBSCRIBE) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq), sizeof(VirtIOSCSICtrlANResp)) < 0) { - virtio_scsi_bad_req(req); + virtio_scsi_bad_req(req, &s->ctrl_lock); return; } else { req->req.an.event_requested = @@ -601,7 +664,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) type == VIRTIO_SCSI_T_AN_SUBSCRIBE) trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun), req->resp.an.response); - virtio_scsi_complete_req(req); + virtio_scsi_complete_req(req, &s->ctrl_lock); } else { assert(r == -EINPROGRESS); } @@ -611,7 +674,7 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req; - while ((req = virtio_scsi_pop_req(s, vq))) { + while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) { virtio_scsi_handle_ctrl_req(s, req); } } @@ -626,9 +689,12 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) */ static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s) { - if (!s->ctx || s->dataplane_started) { + if (s->dataplane_started) { return false; } + if (s->vq_aio_context[0] == qemu_get_aio_context()) { + return false; /* not using IOThreads */ + } virtio_device_start_ioeventfd(&s->parent_obj.parent_obj); return !s->dataplane_fenced; @@ -655,7 +721,7 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) * in virtio_scsi_command_complete. */ req->resp_size = sizeof(VirtIOSCSICmdResp); - virtio_scsi_complete_req(req); + virtio_scsi_complete_req(req, NULL); } static void virtio_scsi_command_failed(SCSIRequest *r) @@ -789,7 +855,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) virtio_scsi_fail_cmd_req(req); return -ENOTSUP; } else { - virtio_scsi_bad_req(req); + virtio_scsi_bad_req(req, NULL); return -EINVAL; } } @@ -802,7 +868,6 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) virtio_scsi_complete_cmd_req(req); return -ENOENT; } - virtio_scsi_ctx_check(s, d); req->sreq = scsi_req_new(d, req->req.cmd.tag, virtio_scsi_get_lun(req->req.cmd.lun), req->req.cmd.cdb, vs->cdb_size, req); @@ -844,7 +909,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) virtio_queue_set_notification(vq, 0); } - while ((req = virtio_scsi_pop_req(s, vq))) { + while ((req = virtio_scsi_pop_req(s, vq, NULL))) { ret = virtio_scsi_handle_cmd_req_prepare(s, req); if (!ret) { QTAILQ_INSERT_TAIL(&reqs, req, next); @@ -937,7 +1002,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev) assert(!s->dataplane_started); - virtio_scsi_reset_tmf_bh(s); + virtio_scsi_flush_defer_tmf_to_aio_context(s); qatomic_inc(&s->resetting); bus_cold_reset(BUS(&s->bus)); @@ -945,7 +1010,10 @@ static void virtio_scsi_reset(VirtIODevice *vdev) vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE; - s->events_dropped = false; + + WITH_QEMU_LOCK_GUARD(&s->event_lock) { + s->events_dropped = false; + } } typedef struct { @@ -974,19 +1042,21 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, return; } - req = virtio_scsi_pop_req(s, vs->event_vq); - if (!req) { - s->events_dropped = true; - return; - } + req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock); + WITH_QEMU_LOCK_GUARD(&s->event_lock) { + if (!req) { + s->events_dropped = true; + return; + } - if (s->events_dropped) { - event |= VIRTIO_SCSI_T_EVENTS_MISSED; - s->events_dropped = false; + if (s->events_dropped) { + event |= VIRTIO_SCSI_T_EVENTS_MISSED; + s->events_dropped = false; + } } if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) { - virtio_scsi_bad_req(req); + virtio_scsi_bad_req(req, &s->event_lock); return; } @@ -1006,12 +1076,18 @@ static void virtio_scsi_push_event(VirtIOSCSI *s, } trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason); - virtio_scsi_complete_req(req); + virtio_scsi_complete_req(req, &s->event_lock); } static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) { - if (s->events_dropped) { + bool events_dropped; + + WITH_QEMU_LOCK_GUARD(&s->event_lock) { + events_dropped = s->events_dropped; + } + + if (events_dropped) { VirtIOSCSIEventInfo info = { .event = VIRTIO_SCSI_T_NO_EVENT, }; @@ -1062,17 +1138,16 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, { VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); + AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED]; SCSIDevice *sd = SCSI_DEVICE(dev); - int ret; - if (s->ctx && !s->dataplane_fenced) { - if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { - return; - } - ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); - if (ret < 0) { - return; - } + if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) { + /* + * Try to make the BlockBackend's AioContext match ours. Ignore failure + * because I/O will still work although block jobs and other users + * might be slower when multiple AioContexts use a BlockBackend. + */ + blk_set_aio_context(sd->conf.blk, ctx, NULL); } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { @@ -1107,7 +1182,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); - if (s->ctx) { + if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) { /* If other users keep the BlockBackend in the iothread, that's ok */ blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); } @@ -1141,7 +1216,7 @@ static void virtio_scsi_drained_begin(SCSIBus *bus) for (uint32_t i = 0; i < total_queues; i++) { VirtQueue *vq = virtio_get_queue(vdev, i); - virtio_queue_aio_detach_host_notifier(vq, s->ctx); + virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]); } } @@ -1167,10 +1242,12 @@ static void virtio_scsi_drained_end(SCSIBus *bus) for (uint32_t i = 0; i < total_queues; i++) { VirtQueue *vq = virtio_get_queue(vdev, i); + AioContext *ctx = s->vq_aio_context[i]; + if (vq == vs->event_vq) { - virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx); + virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx); } else { - virtio_queue_aio_attach_host_notifier(vq, s->ctx); + virtio_queue_aio_attach_host_notifier(vq, ctx); } } } @@ -1239,8 +1316,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) VirtIOSCSI *s = VIRTIO_SCSI(dev); Error *err = NULL; - QTAILQ_INIT(&s->tmf_bh_list); - qemu_mutex_init(&s->tmf_bh_lock); + qemu_mutex_init(&s->ctrl_lock); + qemu_mutex_init(&s->event_lock); virtio_scsi_common_realize(dev, virtio_scsi_handle_ctrl, @@ -1275,18 +1352,19 @@ void virtio_scsi_common_unrealize(DeviceState *dev) virtio_cleanup(vdev); } +/* main loop */ static void virtio_scsi_device_unrealize(DeviceState *dev) { VirtIOSCSI *s = VIRTIO_SCSI(dev); - virtio_scsi_reset_tmf_bh(s); - + virtio_scsi_dataplane_cleanup(s); qbus_set_hotplug_handler(BUS(&s->bus), NULL); virtio_scsi_common_unrealize(dev); - qemu_mutex_destroy(&s->tmf_bh_lock); + qemu_mutex_destroy(&s->event_lock); + qemu_mutex_destroy(&s->ctrl_lock); } -static Property virtio_scsi_properties[] = { +static const Property virtio_scsi_properties[] = { DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, VIRTIO_SCSI_AUTO_NUM_QUEUES), DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI, @@ -1303,7 +1381,8 @@ static Property virtio_scsi_properties[] = { VIRTIO_SCSI_F_CHANGE, true), DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread, TYPE_IOTHREAD, IOThread *), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI, + parent_obj.conf.iothread_vq_mapping_list), }; static const VMStateDescription vmstate_virtio_scsi = { @@ -1316,7 +1395,7 @@ static const VMStateDescription vmstate_virtio_scsi = { }, }; -static void virtio_scsi_common_class_init(ObjectClass *klass, void *data) +static void virtio_scsi_common_class_init(ObjectClass *klass, const void *data) { VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -1325,7 +1404,7 @@ static void virtio_scsi_common_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } -static void virtio_scsi_class_init(ObjectClass *klass, void *data) +static void virtio_scsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -1359,7 +1438,7 @@ static const TypeInfo virtio_scsi_info = { .parent = TYPE_VIRTIO_SCSI_COMMON, .instance_size = sizeof(VirtIOSCSI), .class_init = virtio_scsi_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index cd7bf6aa015..7c98b1b8ea6 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -68,18 +68,7 @@ struct PVSCSIClass { OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI) -/* Compatibility flags for migration */ -#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0 -#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \ - (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT) -#define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1 -#define PVSCSI_COMPAT_DISABLE_PCIE \ - (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT) - -#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \ - ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION) -#define PVSCSI_MSI_OFFSET(s) \ - (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c) +#define PVSCSI_MSI_OFFSET (0x7c) #define PVSCSI_EXP_EP_OFFSET (0x40) typedef struct PVSCSIRingInfo { @@ -129,8 +118,6 @@ struct PVSCSIState { uint8_t msi_used; /* For migration compatibility */ PVSCSIRingInfo rings; /* Data transfer rings manager */ uint32_t resetting; /* Reset in progress */ - - uint32_t compat_flags; }; typedef struct PVSCSIRequest { @@ -1110,7 +1097,7 @@ pvscsi_init_msi(PVSCSIState *s) int res; PCIDevice *d = PCI_DEVICE(s); - res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS, + res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS, PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL); if (res < 0) { trace_pvscsi_init_msi_fail(res); @@ -1158,15 +1145,11 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) trace_pvscsi_state("init"); /* PCI subsystem ID, subsystem vendor ID, revision */ - if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) { - pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000); - } else { - pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, - PCI_VENDOR_ID_VMWARE); - pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, - PCI_DEVICE_ID_VMWARE_PVSCSI); - pci_config_set_revision(pci_dev->config, 0x2); - } + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, + PCI_VENDOR_ID_VMWARE); + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, + PCI_DEVICE_ID_VMWARE_PVSCSI); + pci_config_set_revision(pci_dev->config, 0x2); /* PCI latency timer = 255 */ pci_dev->config[PCI_LATENCY_TIMER] = 0xff; @@ -1234,21 +1217,8 @@ pvscsi_post_load(void *opaque, int version_id) return 0; } -static bool pvscsi_vmstate_need_pcie_device(void *opaque) -{ - PVSCSIState *s = PVSCSI(opaque); - - return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE); -} - -static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id) -{ - return !pvscsi_vmstate_need_pcie_device(opaque); -} - static const VMStateDescription vmstate_pvscsi_pcie_device = { .name = "pvscsi/pcie", - .needed = pvscsi_vmstate_need_pcie_device, .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), VMSTATE_END_OF_LIST() @@ -1262,9 +1232,6 @@ static const VMStateDescription vmstate_pvscsi = { .pre_save = pvscsi_pre_save, .post_load = pvscsi_post_load, .fields = (const VMStateField[]) { - VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState, - pvscsi_vmstate_test_pci_device, 0, - vmstate_pci_device, PCIDevice), VMSTATE_UINT8(msi_used, PVSCSIState), VMSTATE_UINT32(resetting, PVSCSIState), VMSTATE_UINT64(reg_interrupt_status, PVSCSIState), @@ -1296,33 +1263,19 @@ static const VMStateDescription vmstate_pvscsi = { } }; -static Property pvscsi_properties[] = { +static const Property pvscsi_properties[] = { DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1), - DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags, - PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false), - DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags, - PVSCSI_COMPAT_DISABLE_PCIE_BIT, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pvscsi_realize(DeviceState *qdev, Error **errp) +static void pvscsi_instance_init(Object *obj) { - PVSCSIClass *pvs_c = PVSCSI_GET_CLASS(qdev); - PCIDevice *pci_dev = PCI_DEVICE(qdev); - PVSCSIState *s = PVSCSI(qdev); - - if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) { - pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; - } - - pvs_c->parent_dc_realize(qdev, errp); + PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } -static void pvscsi_class_init(ObjectClass *klass, void *data) +static void pvscsi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); - PVSCSIClass *pvs_k = PVSCSI_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); k->realize = pvscsi_realizefn; @@ -1331,9 +1284,7 @@ static void pvscsi_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI; k->class_id = PCI_CLASS_STORAGE_SCSI; k->subsystem_id = 0x1000; - device_class_set_parent_realize(dc, pvscsi_realize, - &pvs_k->parent_dc_realize); - dc->reset = pvscsi_reset; + device_class_set_legacy_reset(dc, pvscsi_reset); dc->vmsd = &vmstate_pvscsi; device_class_set_props(dc, pvscsi_properties); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); @@ -1347,7 +1298,8 @@ static const TypeInfo pvscsi_info = { .class_size = sizeof(PVSCSIClass), .instance_size = sizeof(PVSCSIState), .class_init = pvscsi_class_init, - .interfaces = (InterfaceInfo[]) { + .instance_init = pvscsi_instance_init, + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/scsi/vmw_pvscsi.h b/hw/scsi/vmw_pvscsi.h index 17fcf662730..a3ae517e199 100644 --- a/hw/scsi/vmw_pvscsi.h +++ b/hw/scsi/vmw_pvscsi.h @@ -14,8 +14,8 @@ * details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * along with this program; if not, see + * . * * Maintained by: Arvind Kumar * diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index a1b7230633e..9d61b372e70 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -22,8 +22,8 @@ #include "qemu/module.h" #include "qemu/units.h" #include "qapi/error.h" -#include "sysemu/blockdev.h" -#include "sysemu/dma.h" +#include "system/blockdev.h" +#include "system/dma.h" #include "hw/qdev-properties.h" #include "hw/irq.h" #include "hw/sd/allwinner-sdhost.h" @@ -233,7 +233,7 @@ static void allwinner_sdhost_send_command(AwSdHostState *s) { SDRequest request; uint8_t resp[16]; - int rlen; + size_t rlen; /* Auto clear load flag */ s->command &= ~SD_CMDR_LOAD; @@ -246,10 +246,7 @@ static void allwinner_sdhost_send_command(AwSdHostState *s) request.arg = s->command_arg; /* Send request to SD bus */ - rlen = sdbus_do_command(&s->sdbus, &request, resp); - if (rlen < 0) { - goto error; - } + rlen = sdbus_do_command(&s->sdbus, &request, resp, sizeof(resp)); /* If the command has a response, store it in the response registers */ if ((s->command & SD_CMDR_RESPONSE)) { @@ -761,7 +758,7 @@ static void allwinner_sdhost_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_sdhost_ops = { .read = allwinner_sdhost_read, .write = allwinner_sdhost_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -808,10 +805,9 @@ static const VMStateDescription vmstate_allwinner_sdhost = { } }; -static Property allwinner_sdhost_properties[] = { +static const Property allwinner_sdhost_properties[] = { DEFINE_PROP_LINK("dma-memory", AwSdHostState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; static void allwinner_sdhost_init(Object *obj) @@ -889,24 +885,26 @@ static void allwinner_sdhost_reset(DeviceState *dev) } } -static void allwinner_sdhost_bus_class_init(ObjectClass *klass, void *data) +static void allwinner_sdhost_bus_class_init(ObjectClass *klass, + const void *data) { SDBusClass *sbc = SD_BUS_CLASS(klass); sbc->set_inserted = allwinner_sdhost_set_inserted; } -static void allwinner_sdhost_class_init(ObjectClass *klass, void *data) +static void allwinner_sdhost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = allwinner_sdhost_reset; + device_class_set_legacy_reset(dc, allwinner_sdhost_reset); dc->vmsd = &vmstate_allwinner_sdhost; dc->realize = allwinner_sdhost_realize; device_class_set_props(dc, allwinner_sdhost_properties); } -static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) +static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, + const void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 8 * KiB; @@ -914,7 +912,8 @@ static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) sc->can_calibrate = false; } -static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) +static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, + const void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 64 * KiB; @@ -923,7 +922,7 @@ static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) } static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass, - void *data) + const void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 64 * KiB; @@ -932,7 +931,7 @@ static void allwinner_sdhost_sun50i_a64_class_init(ObjectClass *klass, } static void allwinner_sdhost_sun50i_a64_emmc_class_init(ObjectClass *klass, - void *data) + const void *data) { AwSdHostClass *sc = AW_SDHOST_CLASS(klass); sc->max_desc_size = 8 * KiB; diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index 3b63926c3a2..fc38ad39ce1 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -24,8 +24,10 @@ #define ASPEED_SDHCI_DEBOUNCE_RESET 0x00000005 #define ASPEED_SDHCI_BUS 0x08 #define ASPEED_SDHCI_SDIO_140 0x10 +#define ASPEED_SDHCI_SDIO_144 0x14 #define ASPEED_SDHCI_SDIO_148 0x18 #define ASPEED_SDHCI_SDIO_240 0x20 +#define ASPEED_SDHCI_SDIO_244 0x24 #define ASPEED_SDHCI_SDIO_248 0x28 #define ASPEED_SDHCI_WP_POL 0xec #define ASPEED_SDHCI_CARD_DET 0xf0 @@ -35,21 +37,27 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) { - uint32_t val = 0; + uint64_t val = 0; AspeedSDHCIState *sdhci = opaque; switch (addr) { case ASPEED_SDHCI_SDIO_140: - val = (uint32_t)sdhci->slots[0].capareg; + val = extract64(sdhci->slots[0].capareg, 0, 32); + break; + case ASPEED_SDHCI_SDIO_144: + val = extract64(sdhci->slots[0].capareg, 32, 32); break; case ASPEED_SDHCI_SDIO_148: - val = (uint32_t)sdhci->slots[0].maxcurr; + val = extract64(sdhci->slots[0].maxcurr, 0, 32); break; case ASPEED_SDHCI_SDIO_240: - val = (uint32_t)sdhci->slots[1].capareg; + val = extract64(sdhci->slots[1].capareg, 0, 32); + break; + case ASPEED_SDHCI_SDIO_244: + val = extract64(sdhci->slots[1].capareg, 32, 32); break; case ASPEED_SDHCI_SDIO_248: - val = (uint32_t)sdhci->slots[1].maxcurr; + val = extract64(sdhci->slots[1].maxcurr, 0, 32); break; default: if (addr < ASPEED_SDHCI_REG_SIZE) { @@ -61,9 +69,9 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) } } - trace_aspeed_sdhci_read(addr, size, (uint64_t) val); + trace_aspeed_sdhci_read(addr, size, val); - return (uint64_t)val; + return val; } static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, @@ -79,16 +87,28 @@ static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, sdhci->regs[TO_REG(addr)] = (uint32_t)val & ~ASPEED_SDHCI_INFO_RESET; break; case ASPEED_SDHCI_SDIO_140: - sdhci->slots[0].capareg = (uint64_t)(uint32_t)val; + sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg, + 0, 32, val); + break; + case ASPEED_SDHCI_SDIO_144: + sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg, + 32, 32, val); break; case ASPEED_SDHCI_SDIO_148: - sdhci->slots[0].maxcurr = (uint64_t)(uint32_t)val; + sdhci->slots[0].maxcurr = deposit64(sdhci->slots[0].maxcurr, + 0, 32, val); break; case ASPEED_SDHCI_SDIO_240: - sdhci->slots[1].capareg = (uint64_t)(uint32_t)val; + sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg, + 0, 32, val); + break; + case ASPEED_SDHCI_SDIO_244: + sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg, + 32, 32, val); break; case ASPEED_SDHCI_SDIO_248: - sdhci->slots[1].maxcurr = (uint64_t)(uint32_t)val; + sdhci->slots[1].maxcurr = deposit64(sdhci->slots[0].maxcurr, + 0, 32, val); break; default: if (addr < ASPEED_SDHCI_REG_SIZE) { @@ -128,6 +148,7 @@ static void aspeed_sdhci_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedSDHCIState *sdhci = ASPEED_SDHCI(dev); + AspeedSDHCIClass *asc = ASPEED_SDHCI_GET_CLASS(sdhci); /* Create input irqs for the slots */ qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_sdhci_set_irq, @@ -147,7 +168,7 @@ static void aspeed_sdhci_realize(DeviceState *dev, Error **errp) } if (!object_property_set_uint(sdhci_slot, "capareg", - ASPEED_SDHCI_CAPABILITIES, errp)) { + asc->capareg, errp)) { return; } @@ -183,27 +204,84 @@ static const VMStateDescription vmstate_aspeed_sdhci = { }, }; -static Property aspeed_sdhci_properties[] = { +static const Property aspeed_sdhci_properties[] = { DEFINE_PROP_UINT8("num-slots", AspeedSDHCIState, num_slots, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_sdhci_class_init(ObjectClass *classp, void *data) +static void aspeed_sdhci_class_init(ObjectClass *classp, const void *data) { DeviceClass *dc = DEVICE_CLASS(classp); dc->realize = aspeed_sdhci_realize; - dc->reset = aspeed_sdhci_reset; + device_class_set_legacy_reset(dc, aspeed_sdhci_reset); dc->vmsd = &vmstate_aspeed_sdhci; device_class_set_props(dc, aspeed_sdhci_properties); } +static void aspeed_2400_sdhci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass); + + dc->desc = "ASPEED 2400 SDHCI Controller"; + asc->capareg = 0x0000000001e80080; +} + +static void aspeed_2500_sdhci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass); + + dc->desc = "ASPEED 2500 SDHCI Controller"; + asc->capareg = 0x0000000001e80080; +} + +static void aspeed_2600_sdhci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass); + + dc->desc = "ASPEED 2600 SDHCI Controller"; + asc->capareg = 0x0000000701f80080; +} + +static void aspeed_2700_sdhci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDHCIClass *asc = ASPEED_SDHCI_CLASS(klass); + + dc->desc = "ASPEED 2700 SDHCI Controller"; + asc->capareg = 0x0000000719f80080; +} + static const TypeInfo aspeed_sdhci_types[] = { { .name = TYPE_ASPEED_SDHCI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(AspeedSDHCIState), .class_init = aspeed_sdhci_class_init, + .class_size = sizeof(AspeedSDHCIClass), + .abstract = true, + }, + { + .name = TYPE_ASPEED_2400_SDHCI, + .parent = TYPE_ASPEED_SDHCI, + .class_init = aspeed_2400_sdhci_class_init, + }, + { + .name = TYPE_ASPEED_2500_SDHCI, + .parent = TYPE_ASPEED_SDHCI, + .class_init = aspeed_2500_sdhci_class_init, + }, + { + .name = TYPE_ASPEED_2600_SDHCI, + .parent = TYPE_ASPEED_SDHCI, + .class_init = aspeed_2600_sdhci_class_init, + }, + { + .name = TYPE_ASPEED_2700_SDHCI, + .parent = TYPE_ASPEED_SDHCI, + .class_init = aspeed_2700_sdhci_class_init, }, }; diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index 11c54dd4a73..f7cef7bb1cd 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/module.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "hw/irq.h" #include "hw/sd/bcm2835_sdhost.h" #include "migration/vmstate.h" @@ -113,15 +113,12 @@ static void bcm2835_sdhost_send_command(BCM2835SDHostState *s) { SDRequest request; uint8_t rsp[16]; - int rlen; + size_t rlen; request.cmd = s->cmd & SDCMD_CMD_MASK; request.arg = s->cmdarg; - rlen = sdbus_do_command(&s->sdbus, &request, rsp); - if (rlen < 0) { - goto error; - } + rlen = sdbus_do_command(&s->sdbus, &request, rsp, sizeof(rsp)); if (!(s->cmd & SDCMD_NO_RESPONSE)) { if (rlen == 0 || (rlen == 4 && (s->cmd & SDCMD_LONG_RESPONSE))) { goto error; @@ -428,11 +425,11 @@ static void bcm2835_sdhost_reset(DeviceState *dev) s->fifo_len = 0; } -static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data) +static void bcm2835_sdhost_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_sdhost_reset; + device_class_set_legacy_reset(dc, bcm2835_sdhost_reset); dc->vmsd = &vmstate_bcm2835_sdhost; } diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c index 7c8bc5464b9..d576855a1a8 100644 --- a/hw/sd/cadence_sdhci.c +++ b/hw/sd/cadence_sdhci.c @@ -165,13 +165,13 @@ static const VMStateDescription vmstate_cadence_sdhci = { }, }; -static void cadence_sdhci_class_init(ObjectClass *classp, void *data) +static void cadence_sdhci_class_init(ObjectClass *classp, const void *data) { DeviceClass *dc = DEVICE_CLASS(classp); dc->desc = "Cadence SD/SDIO/eMMC Host Controller (SD4HC)"; dc->realize = cadence_sdhci_realize; - dc->reset = cadence_sdhci_reset; + device_class_set_legacy_reset(dc, cadence_sdhci_reset); dc->vmsd = &vmstate_cadence_sdhci; } diff --git a/hw/sd/core.c b/hw/sd/core.c index 4b30218b520..d3c9017445e 100644 --- a/hw/sd/core.c +++ b/hw/sd/core.c @@ -90,7 +90,8 @@ void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts) } } -int sdbus_do_command(SDBus *sdbus, SDRequest *req, uint8_t *response) +size_t sdbus_do_command(SDBus *sdbus, SDRequest *req, + uint8_t *resp, size_t respsz) { SDState *card = get_card(sdbus); @@ -98,7 +99,7 @@ int sdbus_do_command(SDBus *sdbus, SDRequest *req, uint8_t *response) if (card) { SDCardClass *sc = SDMMC_COMMON_GET_CLASS(card); - return sc->do_command(card, req, response); + return sc->do_command(card, req, resp, respsz); } return 0; diff --git a/hw/sd/meson.build b/hw/sd/meson.build index bbb75af0c9b..b43d45bc564 100644 --- a/hw/sd/meson.build +++ b/hw/sd/meson.build @@ -5,7 +5,6 @@ system_ss.add(when: 'CONFIG_SDHCI_PCI', if_true: files('sdhci-pci.c')) system_ss.add(when: 'CONFIG_SSI_SD', if_true: files('ssi-sd.c')) system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_mmc.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) system_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c index fb51821e110..0233d7bd4d3 100644 --- a/hw/sd/npcm7xx_sdhci.c +++ b/hw/sd/npcm7xx_sdhci.c @@ -149,13 +149,13 @@ static const VMStateDescription vmstate_npcm7xx_sdhci = { }, }; -static void npcm7xx_sdhci_class_init(ObjectClass *classp, void *data) +static void npcm7xx_sdhci_class_init(ObjectClass *classp, const void *data) { DeviceClass *dc = DEVICE_CLASS(classp); dc->desc = "NPCM7xx SD/eMMC Host Controller"; dc->realize = npcm7xx_sdhci_realize; - dc->reset = npcm7xx_sdhci_reset; + device_class_set_legacy_reset(dc, npcm7xx_sdhci_reset); dc->vmsd = &vmstate_npcm7xx_sdhci; } diff --git a/hw/sd/omap_mmc.c b/hw/sd/omap_mmc.c index edd3cf2a1eb..5a1d25defaa 100644 --- a/hw/sd/omap_mmc.c +++ b/hw/sd/omap_mmc.c @@ -21,17 +21,22 @@ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qapi/error.h" #include "hw/irq.h" +#include "hw/sysbus.h" #include "hw/arm/omap.h" -#include "hw/sd/sdcard_legacy.h" +#include "hw/sd/sd.h" + +typedef struct OMAPMMCState { + SysBusDevice parent_obj; + + SDBus sdbus; -struct omap_mmc_s { qemu_irq irq; - qemu_irq *dma; - qemu_irq coverswitch; + qemu_irq dma_tx_gpio; + qemu_irq dma_rx_gpio; MemoryRegion iomem; omap_clk clk; - SDState *card; uint16_t last_cmd; uint16_t sdio; uint16_t rsp[8]; @@ -64,16 +69,15 @@ struct omap_mmc_s { int cdet_wakeup; int cdet_enable; - int cdet_state; qemu_irq cdet; -}; +} OMAPMMCState; -static void omap_mmc_interrupts_update(struct omap_mmc_s *s) +static void omap_mmc_interrupts_update(OMAPMMCState *s) { qemu_set_irq(s->irq, !!(s->status & s->mask)); } -static void omap_mmc_fifolevel_update(struct omap_mmc_s *host) +static void omap_mmc_fifolevel_update(OMAPMMCState *host) { if (!host->transfer && !host->fifo_len) { host->status &= 0xf3ff; @@ -83,40 +87,51 @@ static void omap_mmc_fifolevel_update(struct omap_mmc_s *host) if (host->fifo_len > host->af_level && host->ddir) { if (host->rx_dma) { host->status &= 0xfbff; - qemu_irq_raise(host->dma[1]); + qemu_irq_raise(host->dma_rx_gpio); } else host->status |= 0x0400; } else { host->status &= 0xfbff; - qemu_irq_lower(host->dma[1]); + qemu_irq_lower(host->dma_rx_gpio); } if (host->fifo_len < host->ae_level && !host->ddir) { if (host->tx_dma) { host->status &= 0xf7ff; - qemu_irq_raise(host->dma[0]); + qemu_irq_raise(host->dma_tx_gpio); } else host->status |= 0x0800; } else { - qemu_irq_lower(host->dma[0]); + qemu_irq_lower(host->dma_tx_gpio); host->status &= 0xf7ff; } } +/* These must match the encoding of the MMC_CMD Response field */ typedef enum { - sd_nore = 0, /* no response */ - sd_r1, /* normal response command */ - sd_r2, /* CID, CSD registers */ - sd_r3, /* OCR register */ - sd_r6 = 6, /* Published RCA response */ + sd_nore = 0, /* no response */ + sd_r1, /* normal response command */ + sd_r2, /* CID, CSD registers */ + sd_r3, /* OCR register */ + sd_r6 = 6, /* Published RCA response */ sd_r1b = -1, } sd_rsp_type_t; -static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir, - sd_cmd_type_t type, int busy, sd_rsp_type_t resptype, int init) +/* These must match the encoding of the MMC_CMD Type field */ +typedef enum { + SD_TYPE_BC = 0, /* broadcast -- no response */ + SD_TYPE_BCR = 1, /* broadcast with response */ + SD_TYPE_AC = 2, /* addressed -- no data transfer */ + SD_TYPE_ADTC = 3, /* addressed with data transfer */ +} MMCCmdType; + +static void omap_mmc_command(OMAPMMCState *host, int cmd, int dir, + MMCCmdType type, int busy, + sd_rsp_type_t resptype, int init) { uint32_t rspstatus, mask; - int rsplen, timeout; + size_t rsplen; + int timeout; SDRequest request; uint8_t response[16]; @@ -128,7 +143,7 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir, if (resptype == sd_r1 && busy) resptype = sd_r1b; - if (type == sd_adtc) { + if (type == SD_TYPE_ADTC) { host->fifo_start = 0; host->fifo_len = 0; host->transfer = 1; @@ -143,7 +158,7 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir, request.arg = host->arg; request.crc = 0; /* FIXME */ - rsplen = sd_do_command(host->card, &request, response); + rsplen = sdbus_do_command(&host->sdbus, &request, response, sizeof(response)); /* TODO: validate CRCs */ switch (resptype) { @@ -215,12 +230,12 @@ static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir, if (timeout) host->status |= 0x0080; else if (cmd == 12) - host->status |= 0x0005; /* Makes it more real */ + host->status |= 0x0005; /* Makes it more real */ else host->status |= 0x0001; } -static void omap_mmc_transfer(struct omap_mmc_s *host) +static void omap_mmc_transfer(OMAPMMCState *host) { uint8_t value; @@ -232,10 +247,10 @@ static void omap_mmc_transfer(struct omap_mmc_s *host) if (host->fifo_len > host->af_level) break; - value = sd_read_byte(host->card); + value = sdbus_read_byte(&host->sdbus); host->fifo[(host->fifo_start + host->fifo_len) & 31] = value; if (-- host->blen_counter) { - value = sd_read_byte(host->card); + value = sdbus_read_byte(&host->sdbus); host->fifo[(host->fifo_start + host->fifo_len) & 31] |= value << 8; host->blen_counter --; @@ -247,10 +262,10 @@ static void omap_mmc_transfer(struct omap_mmc_s *host) break; value = host->fifo[host->fifo_start] & 0xff; - sd_write_byte(host->card, value); + sdbus_write_byte(&host->sdbus, value); if (-- host->blen_counter) { value = host->fifo[host->fifo_start] >> 8; - sd_write_byte(host->card, value); + sdbus_write_byte(&host->sdbus, value); host->blen_counter --; } @@ -275,19 +290,19 @@ static void omap_mmc_transfer(struct omap_mmc_s *host) static void omap_mmc_update(void *opaque) { - struct omap_mmc_s *s = opaque; + OMAPMMCState *s = opaque; omap_mmc_transfer(s); omap_mmc_fifolevel_update(s); omap_mmc_interrupts_update(s); } -static void omap_mmc_pseudo_reset(struct omap_mmc_s *host) +static void omap_mmc_pseudo_reset(OMAPMMCState *host) { host->status = 0; host->fifo_len = 0; } -void omap_mmc_reset(struct omap_mmc_s *host) +static void omap_mmc_reset(OMAPMMCState *host) { host->last_cmd = 0; memset(host->rsp, 0, sizeof(host->rsp)); @@ -309,54 +324,47 @@ void omap_mmc_reset(struct omap_mmc_s *host) host->transfer = 0; host->cdet_wakeup = 0; host->cdet_enable = 0; - qemu_set_irq(host->coverswitch, host->cdet_state); host->clkdiv = 0; omap_mmc_pseudo_reset(host); - - /* Since we're still using the legacy SD API the card is not plugged - * into any bus, and we must reset it manually. When omap_mmc is - * QOMified this must move into the QOM reset function. - */ - device_cold_reset(DEVICE(host->card)); } static uint64_t omap_mmc_read(void *opaque, hwaddr offset, unsigned size) { uint16_t i; - struct omap_mmc_s *s = opaque; + OMAPMMCState *s = opaque; if (size != 2) { return omap_badwidth_read16(opaque, offset); } switch (offset) { - case 0x00: /* MMC_CMD */ + case 0x00: /* MMC_CMD */ return s->last_cmd; - case 0x04: /* MMC_ARGL */ + case 0x04: /* MMC_ARGL */ return s->arg & 0x0000ffff; - case 0x08: /* MMC_ARGH */ + case 0x08: /* MMC_ARGH */ return s->arg >> 16; - case 0x0c: /* MMC_CON */ + case 0x0c: /* MMC_CON */ return (s->dw << 15) | (s->mode << 12) | (s->enable << 11) | (s->be << 10) | s->clkdiv; - case 0x10: /* MMC_STAT */ + case 0x10: /* MMC_STAT */ return s->status; - case 0x14: /* MMC_IE */ + case 0x14: /* MMC_IE */ return s->mask; - case 0x18: /* MMC_CTO */ + case 0x18: /* MMC_CTO */ return s->cto; - case 0x1c: /* MMC_DTO */ + case 0x1c: /* MMC_DTO */ return s->dto; - case 0x20: /* MMC_DATA */ + case 0x20: /* MMC_DATA */ /* TODO: support 8-bit access */ i = s->fifo[s->fifo_start]; if (s->fifo_len == 0) { @@ -371,42 +379,42 @@ static uint64_t omap_mmc_read(void *opaque, hwaddr offset, unsigned size) omap_mmc_interrupts_update(s); return i; - case 0x24: /* MMC_BLEN */ + case 0x24: /* MMC_BLEN */ return s->blen_counter; - case 0x28: /* MMC_NBLK */ + case 0x28: /* MMC_NBLK */ return s->nblk_counter; - case 0x2c: /* MMC_BUF */ + case 0x2c: /* MMC_BUF */ return (s->rx_dma << 15) | (s->af_level << 8) | (s->tx_dma << 7) | s->ae_level; - case 0x30: /* MMC_SPI */ + case 0x30: /* MMC_SPI */ return 0x0000; - case 0x34: /* MMC_SDIO */ + case 0x34: /* MMC_SDIO */ return (s->cdet_wakeup << 2) | (s->cdet_enable) | s->sdio; - case 0x38: /* MMC_SYST */ + case 0x38: /* MMC_SYST */ return 0x0000; - case 0x3c: /* MMC_REV */ + case 0x3c: /* MMC_REV */ return s->rev; - case 0x40: /* MMC_RSP0 */ - case 0x44: /* MMC_RSP1 */ - case 0x48: /* MMC_RSP2 */ - case 0x4c: /* MMC_RSP3 */ - case 0x50: /* MMC_RSP4 */ - case 0x54: /* MMC_RSP5 */ - case 0x58: /* MMC_RSP6 */ - case 0x5c: /* MMC_RSP7 */ + case 0x40: /* MMC_RSP0 */ + case 0x44: /* MMC_RSP1 */ + case 0x48: /* MMC_RSP2 */ + case 0x4c: /* MMC_RSP3 */ + case 0x50: /* MMC_RSP4 */ + case 0x54: /* MMC_RSP5 */ + case 0x58: /* MMC_RSP6 */ + case 0x5c: /* MMC_RSP7 */ return s->rsp[(offset - 0x40) >> 2]; /* OMAP2-specific */ - case 0x60: /* MMC_IOSR */ - case 0x64: /* MMC_SYSC */ + case 0x60: /* MMC_IOSR */ + case 0x64: /* MMC_SYSC */ return 0; - case 0x68: /* MMC_SYSS */ - return 1; /* RSTD */ + case 0x68: /* MMC_SYSS */ + return 1; /* RSTD */ } OMAP_BAD_REG(offset); @@ -417,7 +425,7 @@ static void omap_mmc_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { int i; - struct omap_mmc_s *s = opaque; + OMAPMMCState *s = opaque; if (size != 2) { omap_badwidth_write16(opaque, offset, value); @@ -425,7 +433,7 @@ static void omap_mmc_write(void *opaque, hwaddr offset, } switch (offset) { - case 0x00: /* MMC_CMD */ + case 0x00: /* MMC_CMD */ if (!s->enable) break; @@ -433,24 +441,24 @@ static void omap_mmc_write(void *opaque, hwaddr offset, for (i = 0; i < 8; i ++) s->rsp[i] = 0x0000; omap_mmc_command(s, value & 63, (value >> 15) & 1, - (sd_cmd_type_t) ((value >> 12) & 3), - (value >> 11) & 1, - (sd_rsp_type_t) ((value >> 8) & 7), - (value >> 7) & 1); + (MMCCmdType)((value >> 12) & 3), + (value >> 11) & 1, + (sd_rsp_type_t) ((value >> 8) & 7), + (value >> 7) & 1); omap_mmc_update(s); break; - case 0x04: /* MMC_ARGL */ + case 0x04: /* MMC_ARGL */ s->arg &= 0xffff0000; s->arg |= 0x0000ffff & value; break; - case 0x08: /* MMC_ARGH */ + case 0x08: /* MMC_ARGH */ s->arg &= 0x0000ffff; s->arg |= value << 16; break; - case 0x0c: /* MMC_CON */ + case 0x0c: /* MMC_CON */ s->dw = (value >> 15) & 1; s->mode = (value >> 12) & 3; s->enable = (value >> 11) & 1; @@ -470,27 +478,27 @@ static void omap_mmc_write(void *opaque, hwaddr offset, omap_mmc_pseudo_reset(s); break; - case 0x10: /* MMC_STAT */ + case 0x10: /* MMC_STAT */ s->status &= ~value; omap_mmc_interrupts_update(s); break; - case 0x14: /* MMC_IE */ + case 0x14: /* MMC_IE */ s->mask = value & 0x7fff; omap_mmc_interrupts_update(s); break; - case 0x18: /* MMC_CTO */ + case 0x18: /* MMC_CTO */ s->cto = value & 0xff; if (s->cto > 0xfd && s->rev <= 1) printf("MMC: CTO of 0xff and 0xfe cannot be used!\n"); break; - case 0x1c: /* MMC_DTO */ + case 0x1c: /* MMC_DTO */ s->dto = value & 0xffff; break; - case 0x20: /* MMC_DATA */ + case 0x20: /* MMC_DATA */ /* TODO: support 8-bit access */ if (s->fifo_len == 32) break; @@ -501,18 +509,18 @@ static void omap_mmc_write(void *opaque, hwaddr offset, omap_mmc_interrupts_update(s); break; - case 0x24: /* MMC_BLEN */ + case 0x24: /* MMC_BLEN */ s->blen = (value & 0x07ff) + 1; s->blen_counter = s->blen; break; - case 0x28: /* MMC_NBLK */ + case 0x28: /* MMC_NBLK */ s->nblk = (value & 0x07ff) + 1; s->nblk_counter = s->nblk; s->blen_counter = s->blen; break; - case 0x2c: /* MMC_BUF */ + case 0x2c: /* MMC_BUF */ s->rx_dma = (value >> 15) & 1; s->af_level = (value >> 8) & 0x1f; s->tx_dma = (value >> 7) & 1; @@ -527,38 +535,38 @@ static void omap_mmc_write(void *opaque, hwaddr offset, break; /* SPI, SDIO and TEST modes unimplemented */ - case 0x30: /* MMC_SPI (OMAP1 only) */ + case 0x30: /* MMC_SPI (OMAP1 only) */ break; - case 0x34: /* MMC_SDIO */ + case 0x34: /* MMC_SDIO */ s->sdio = value & (s->rev >= 2 ? 0xfbf3 : 0x2020); s->cdet_wakeup = (value >> 9) & 1; s->cdet_enable = (value >> 2) & 1; break; - case 0x38: /* MMC_SYST */ + case 0x38: /* MMC_SYST */ break; - case 0x3c: /* MMC_REV */ - case 0x40: /* MMC_RSP0 */ - case 0x44: /* MMC_RSP1 */ - case 0x48: /* MMC_RSP2 */ - case 0x4c: /* MMC_RSP3 */ - case 0x50: /* MMC_RSP4 */ - case 0x54: /* MMC_RSP5 */ - case 0x58: /* MMC_RSP6 */ - case 0x5c: /* MMC_RSP7 */ + case 0x3c: /* MMC_REV */ + case 0x40: /* MMC_RSP0 */ + case 0x44: /* MMC_RSP1 */ + case 0x48: /* MMC_RSP2 */ + case 0x4c: /* MMC_RSP3 */ + case 0x50: /* MMC_RSP4 */ + case 0x54: /* MMC_RSP5 */ + case 0x58: /* MMC_RSP6 */ + case 0x5c: /* MMC_RSP7 */ OMAP_RO_REG(offset); break; /* OMAP2-specific */ - case 0x60: /* MMC_IOSR */ + case 0x60: /* MMC_IOSR */ if (value & 0xf) printf("MMC: SDIO bits used!\n"); break; - case 0x64: /* MMC_SYSC */ - if (value & (1 << 2)) /* SRTS */ + case 0x64: /* MMC_SYSC */ + if (value & (1 << 2)) /* SRTS */ omap_mmc_reset(s); break; - case 0x68: /* MMC_SYSS */ + case 0x68: /* MMC_SYSS */ OMAP_RO_REG(offset); break; @@ -573,92 +581,56 @@ static const MemoryRegionOps omap_mmc_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static void omap_mmc_cover_cb(void *opaque, int line, int level) +void omap_mmc_set_clk(DeviceState *dev, omap_clk clk) { - struct omap_mmc_s *host = opaque; + OMAPMMCState *s = OMAP_MMC(dev); - if (!host->cdet_state && level) { - host->status |= 0x0002; - omap_mmc_interrupts_update(host); - if (host->cdet_wakeup) { - /* TODO: Assert wake-up */ - } - } - - if (host->cdet_state != level) { - qemu_set_irq(host->coverswitch, level); - host->cdet_state = level; - } + s->clk = clk; } -struct omap_mmc_s *omap_mmc_init(hwaddr base, - MemoryRegion *sysmem, - BlockBackend *blk, - qemu_irq irq, qemu_irq dma[], omap_clk clk) +static void omap_mmc_reset_hold(Object *obj, ResetType type) { - struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1); - - s->irq = irq; - s->dma = dma; - s->clk = clk; - s->lines = 1; /* TODO: needs to be settable per-board */ - s->rev = 1; - - memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800); - memory_region_add_subregion(sysmem, base, &s->iomem); - - /* Instantiate the storage */ - s->card = sd_init(blk, false); - if (s->card == NULL) { - exit(1); - } + OMAPMMCState *s = OMAP_MMC(obj); omap_mmc_reset(s); - - return s; } -struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta, - BlockBackend *blk, qemu_irq irq, qemu_irq dma[], - omap_clk fclk, omap_clk iclk) +static void omap_mmc_initfn(Object *obj) { - struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1); - - s->irq = irq; - s->dma = dma; - s->clk = fclk; - s->lines = 4; - s->rev = 2; - - memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - /* Instantiate the storage */ - s->card = sd_init(blk, false); - if (s->card == NULL) { - exit(1); - } + OMAPMMCState *s = OMAP_MMC(obj); - s->cdet = qemu_allocate_irq(omap_mmc_cover_cb, s, 0); - sd_set_cb(s->card, NULL, s->cdet); + /* In theory these could be settable per-board */ + s->lines = 1; + s->rev = 1; - omap_mmc_reset(s); + memory_region_init_io(&s->iomem, obj, &omap_mmc_ops, s, "omap.mmc", 0x800); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); - return s; + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + qdev_init_gpio_out_named(DEVICE(obj), &s->dma_tx_gpio, "dma-tx", 1); + qdev_init_gpio_out_named(DEVICE(obj), &s->dma_rx_gpio, "dma-rx", 1); + + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(obj), "sd-bus"); } -void omap_mmc_handlers(struct omap_mmc_s *s, qemu_irq ro, qemu_irq cover) +static void omap_mmc_class_init(ObjectClass *oc, const void *data) { - if (s->cdet) { - sd_set_cb(s->card, ro, s->cdet); - s->coverswitch = cover; - qemu_set_irq(cover, s->cdet_state); - } else - sd_set_cb(s->card, ro, cover); + ResettableClass *rc = RESETTABLE_CLASS(oc); + + rc->phases.hold = omap_mmc_reset_hold; } -void omap_mmc_enable(struct omap_mmc_s *s, int enable) +static const TypeInfo omap_mmc_info = { + .name = TYPE_OMAP_MMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OMAPMMCState), + .instance_init = omap_mmc_initfn, + .class_init = omap_mmc_class_init, +}; + +static void omap_mmc_register_types(void) { - sd_enable(s->card, enable); + type_register_static(&omap_mmc_info); } + +type_init(omap_mmc_register_types) diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index e3633c2e6fc..5d56ead4d91 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "hw/irq.h" @@ -173,14 +173,12 @@ static void pl181_do_command(PL181State *s) { SDRequest request; uint8_t response[16]; - int rlen; + size_t rlen; request.cmd = s->cmd & PL181_CMD_INDEX; request.arg = s->cmdarg; trace_pl181_command_send(request.cmd, request.arg); - rlen = sdbus_do_command(&s->sdbus, &request, response); - if (rlen < 0) - goto error; + rlen = sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); if (s->cmd & PL181_CMD_RESPONSE) { if (rlen == 0 || (rlen == 4 && (s->cmd & PL181_CMD_LONGRESP))) goto error; @@ -509,17 +507,17 @@ static void pl181_init(Object *obj) qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus"); } -static void pl181_class_init(ObjectClass *klass, void *data) +static void pl181_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->vmsd = &vmstate_pl181; - k->reset = pl181_reset; + device_class_set_legacy_reset(k, pl181_reset); /* Reason: output IRQs should be wired up */ k->user_creatable = false; } -static void pl181_bus_class_init(ObjectClass *klass, void *data) +static void pl181_bus_class_init(ObjectClass *klass, const void *data) { SDBusClass *sbc = SD_BUS_CLASS(klass); diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c deleted file mode 100644 index 82529708c8a..00000000000 --- a/hw/sd/pxa2xx_mmci.c +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Intel XScale PXA255/270 MultiMediaCard/SD/SDIO Controller emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "hw/irq.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "hw/arm/pxa.h" -#include "hw/sd/sd.h" -#include "hw/qdev-properties.h" -#include "qemu/log.h" -#include "qemu/module.h" -#include "trace.h" -#include "qom/object.h" - -#define TYPE_PXA2XX_MMCI_BUS "pxa2xx-mmci-bus" -/* This is reusing the SDBus typedef from SD_BUS */ -DECLARE_INSTANCE_CHECKER(SDBus, PXA2XX_MMCI_BUS, - TYPE_PXA2XX_MMCI_BUS) - -struct PXA2xxMMCIState { - SysBusDevice parent_obj; - - MemoryRegion iomem; - qemu_irq irq; - qemu_irq rx_dma; - qemu_irq tx_dma; - qemu_irq inserted; - qemu_irq readonly; - - BlockBackend *blk; - SDBus sdbus; - - uint32_t status; - uint32_t clkrt; - uint32_t spi; - uint32_t cmdat; - uint32_t resp_tout; - uint32_t read_tout; - int32_t blklen; - int32_t numblk; - uint32_t intmask; - uint32_t intreq; - int32_t cmd; - uint32_t arg; - - int32_t active; - int32_t bytesleft; - uint8_t tx_fifo[64]; - uint32_t tx_start; - uint32_t tx_len; - uint8_t rx_fifo[32]; - uint32_t rx_start; - uint32_t rx_len; - uint16_t resp_fifo[9]; - uint32_t resp_len; - - int32_t cmdreq; -}; - -static bool pxa2xx_mmci_vmstate_validate(void *opaque, int version_id) -{ - PXA2xxMMCIState *s = opaque; - - return s->tx_start < ARRAY_SIZE(s->tx_fifo) - && s->rx_start < ARRAY_SIZE(s->rx_fifo) - && s->tx_len <= ARRAY_SIZE(s->tx_fifo) - && s->rx_len <= ARRAY_SIZE(s->rx_fifo) - && s->resp_len <= ARRAY_SIZE(s->resp_fifo); -} - - -static const VMStateDescription vmstate_pxa2xx_mmci = { - .name = "pxa2xx-mmci", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(status, PXA2xxMMCIState), - VMSTATE_UINT32(clkrt, PXA2xxMMCIState), - VMSTATE_UINT32(spi, PXA2xxMMCIState), - VMSTATE_UINT32(cmdat, PXA2xxMMCIState), - VMSTATE_UINT32(resp_tout, PXA2xxMMCIState), - VMSTATE_UINT32(read_tout, PXA2xxMMCIState), - VMSTATE_INT32(blklen, PXA2xxMMCIState), - VMSTATE_INT32(numblk, PXA2xxMMCIState), - VMSTATE_UINT32(intmask, PXA2xxMMCIState), - VMSTATE_UINT32(intreq, PXA2xxMMCIState), - VMSTATE_INT32(cmd, PXA2xxMMCIState), - VMSTATE_UINT32(arg, PXA2xxMMCIState), - VMSTATE_INT32(cmdreq, PXA2xxMMCIState), - VMSTATE_INT32(active, PXA2xxMMCIState), - VMSTATE_INT32(bytesleft, PXA2xxMMCIState), - VMSTATE_UINT32(tx_start, PXA2xxMMCIState), - VMSTATE_UINT32(tx_len, PXA2xxMMCIState), - VMSTATE_UINT32(rx_start, PXA2xxMMCIState), - VMSTATE_UINT32(rx_len, PXA2xxMMCIState), - VMSTATE_UINT32(resp_len, PXA2xxMMCIState), - VMSTATE_VALIDATE("fifo size incorrect", pxa2xx_mmci_vmstate_validate), - VMSTATE_UINT8_ARRAY(tx_fifo, PXA2xxMMCIState, 64), - VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxMMCIState, 32), - VMSTATE_UINT16_ARRAY(resp_fifo, PXA2xxMMCIState, 9), - VMSTATE_END_OF_LIST() - } -}; - -#define MMC_STRPCL 0x00 /* MMC Clock Start/Stop register */ -#define MMC_STAT 0x04 /* MMC Status register */ -#define MMC_CLKRT 0x08 /* MMC Clock Rate register */ -#define MMC_SPI 0x0c /* MMC SPI Mode register */ -#define MMC_CMDAT 0x10 /* MMC Command/Data register */ -#define MMC_RESTO 0x14 /* MMC Response Time-Out register */ -#define MMC_RDTO 0x18 /* MMC Read Time-Out register */ -#define MMC_BLKLEN 0x1c /* MMC Block Length register */ -#define MMC_NUMBLK 0x20 /* MMC Number of Blocks register */ -#define MMC_PRTBUF 0x24 /* MMC Buffer Partly Full register */ -#define MMC_I_MASK 0x28 /* MMC Interrupt Mask register */ -#define MMC_I_REG 0x2c /* MMC Interrupt Request register */ -#define MMC_CMD 0x30 /* MMC Command register */ -#define MMC_ARGH 0x34 /* MMC Argument High register */ -#define MMC_ARGL 0x38 /* MMC Argument Low register */ -#define MMC_RES 0x3c /* MMC Response FIFO */ -#define MMC_RXFIFO 0x40 /* MMC Receive FIFO */ -#define MMC_TXFIFO 0x44 /* MMC Transmit FIFO */ -#define MMC_RDWAIT 0x48 /* MMC RD_WAIT register */ -#define MMC_BLKS_REM 0x4c /* MMC Blocks Remaining register */ - -/* Bitfield masks */ -#define STRPCL_STOP_CLK (1 << 0) -#define STRPCL_STRT_CLK (1 << 1) -#define STAT_TOUT_RES (1 << 1) -#define STAT_CLK_EN (1 << 8) -#define STAT_DATA_DONE (1 << 11) -#define STAT_PRG_DONE (1 << 12) -#define STAT_END_CMDRES (1 << 13) -#define SPI_SPI_MODE (1 << 0) -#define CMDAT_RES_TYPE (3 << 0) -#define CMDAT_DATA_EN (1 << 2) -#define CMDAT_WR_RD (1 << 3) -#define CMDAT_DMA_EN (1 << 7) -#define CMDAT_STOP_TRAN (1 << 10) -#define INT_DATA_DONE (1 << 0) -#define INT_PRG_DONE (1 << 1) -#define INT_END_CMD (1 << 2) -#define INT_STOP_CMD (1 << 3) -#define INT_CLK_OFF (1 << 4) -#define INT_RXFIFO_REQ (1 << 5) -#define INT_TXFIFO_REQ (1 << 6) -#define INT_TINT (1 << 7) -#define INT_DAT_ERR (1 << 8) -#define INT_RES_ERR (1 << 9) -#define INT_RD_STALLED (1 << 10) -#define INT_SDIO_INT (1 << 11) -#define INT_SDIO_SACK (1 << 12) -#define PRTBUF_PRT_BUF (1 << 0) - -/* Route internal interrupt lines to the global IC and DMA */ -static void pxa2xx_mmci_int_update(PXA2xxMMCIState *s) -{ - uint32_t mask = s->intmask; - if (s->cmdat & CMDAT_DMA_EN) { - mask |= INT_RXFIFO_REQ | INT_TXFIFO_REQ; - - qemu_set_irq(s->rx_dma, !!(s->intreq & INT_RXFIFO_REQ)); - qemu_set_irq(s->tx_dma, !!(s->intreq & INT_TXFIFO_REQ)); - } - - qemu_set_irq(s->irq, !!(s->intreq & ~mask)); -} - -static void pxa2xx_mmci_fifo_update(PXA2xxMMCIState *s) -{ - if (!s->active) - return; - - if (s->cmdat & CMDAT_WR_RD) { - while (s->bytesleft && s->tx_len) { - sdbus_write_byte(&s->sdbus, s->tx_fifo[s->tx_start++]); - s->tx_start &= 0x1f; - s->tx_len --; - s->bytesleft --; - } - if (s->bytesleft) - s->intreq |= INT_TXFIFO_REQ; - } else - while (s->bytesleft && s->rx_len < 32) { - s->rx_fifo[(s->rx_start + (s->rx_len ++)) & 0x1f] = - sdbus_read_byte(&s->sdbus); - s->bytesleft --; - s->intreq |= INT_RXFIFO_REQ; - } - - if (!s->bytesleft) { - s->active = 0; - s->intreq |= INT_DATA_DONE; - s->status |= STAT_DATA_DONE; - - if (s->cmdat & CMDAT_WR_RD) { - s->intreq |= INT_PRG_DONE; - s->status |= STAT_PRG_DONE; - } - } - - pxa2xx_mmci_int_update(s); -} - -static void pxa2xx_mmci_wakequeues(PXA2xxMMCIState *s) -{ - int rsplen, i; - SDRequest request; - uint8_t response[16]; - - s->active = 1; - s->rx_len = 0; - s->tx_len = 0; - s->cmdreq = 0; - - request.cmd = s->cmd; - request.arg = s->arg; - request.crc = 0; /* FIXME */ - - rsplen = sdbus_do_command(&s->sdbus, &request, response); - s->intreq |= INT_END_CMD; - - memset(s->resp_fifo, 0, sizeof(s->resp_fifo)); - switch (s->cmdat & CMDAT_RES_TYPE) { -#define PXAMMCI_RESP(wd, value0, value1) \ - s->resp_fifo[(wd) + 0] |= (value0); \ - s->resp_fifo[(wd) + 1] |= (value1) << 8; - case 0: /* No response */ - goto complete; - - case 1: /* R1, R4, R5 or R6 */ - if (rsplen < 4) - goto timeout; - goto complete; - - case 2: /* R2 */ - if (rsplen < 16) - goto timeout; - goto complete; - - case 3: /* R3 */ - if (rsplen < 4) - goto timeout; - goto complete; - - complete: - for (i = 0; rsplen > 0; i ++, rsplen -= 2) { - PXAMMCI_RESP(i, response[i * 2], response[i * 2 + 1]); - } - s->status |= STAT_END_CMDRES; - - if (!(s->cmdat & CMDAT_DATA_EN)) - s->active = 0; - else - s->bytesleft = s->numblk * s->blklen; - - s->resp_len = 0; - break; - - timeout: - s->active = 0; - s->status |= STAT_TOUT_RES; - break; - } - - pxa2xx_mmci_fifo_update(s); -} - -static uint64_t pxa2xx_mmci_read(void *opaque, hwaddr offset, unsigned size) -{ - PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque; - uint32_t ret = 0; - - switch (offset) { - case MMC_STRPCL: - break; - case MMC_STAT: - ret = s->status; - break; - case MMC_CLKRT: - ret = s->clkrt; - break; - case MMC_SPI: - ret = s->spi; - break; - case MMC_CMDAT: - ret = s->cmdat; - break; - case MMC_RESTO: - ret = s->resp_tout; - break; - case MMC_RDTO: - ret = s->read_tout; - break; - case MMC_BLKLEN: - ret = s->blklen; - break; - case MMC_NUMBLK: - ret = s->numblk; - break; - case MMC_PRTBUF: - break; - case MMC_I_MASK: - ret = s->intmask; - break; - case MMC_I_REG: - ret = s->intreq; - break; - case MMC_CMD: - ret = s->cmd | 0x40; - break; - case MMC_ARGH: - ret = s->arg >> 16; - break; - case MMC_ARGL: - ret = s->arg & 0xffff; - break; - case MMC_RES: - ret = (s->resp_len < 9) ? s->resp_fifo[s->resp_len++] : 0; - break; - case MMC_RXFIFO: - while (size-- && s->rx_len) { - ret |= s->rx_fifo[s->rx_start++] << (size << 3); - s->rx_start &= 0x1f; - s->rx_len --; - } - s->intreq &= ~INT_RXFIFO_REQ; - pxa2xx_mmci_fifo_update(s); - break; - case MMC_RDWAIT: - break; - case MMC_BLKS_REM: - ret = s->numblk; - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: incorrect register 0x%02" HWADDR_PRIx "\n", - __func__, offset); - } - trace_pxa2xx_mmci_read(size, offset, ret); - - return ret; -} - -static void pxa2xx_mmci_write(void *opaque, - hwaddr offset, uint64_t value, unsigned size) -{ - PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque; - - trace_pxa2xx_mmci_write(size, offset, value); - switch (offset) { - case MMC_STRPCL: - if (value & STRPCL_STRT_CLK) { - s->status |= STAT_CLK_EN; - s->intreq &= ~INT_CLK_OFF; - - if (s->cmdreq && !(s->cmdat & CMDAT_STOP_TRAN)) { - s->status &= STAT_CLK_EN; - pxa2xx_mmci_wakequeues(s); - } - } - - if (value & STRPCL_STOP_CLK) { - s->status &= ~STAT_CLK_EN; - s->intreq |= INT_CLK_OFF; - s->active = 0; - } - - pxa2xx_mmci_int_update(s); - break; - - case MMC_CLKRT: - s->clkrt = value & 7; - break; - - case MMC_SPI: - s->spi = value & 0xf; - if (value & SPI_SPI_MODE) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: attempted to use card in SPI mode\n", __func__); - } - break; - - case MMC_CMDAT: - s->cmdat = value & 0x3dff; - s->active = 0; - s->cmdreq = 1; - if (!(value & CMDAT_STOP_TRAN)) { - s->status &= STAT_CLK_EN; - - if (s->status & STAT_CLK_EN) - pxa2xx_mmci_wakequeues(s); - } - - pxa2xx_mmci_int_update(s); - break; - - case MMC_RESTO: - s->resp_tout = value & 0x7f; - break; - - case MMC_RDTO: - s->read_tout = value & 0xffff; - break; - - case MMC_BLKLEN: - s->blklen = value & 0xfff; - break; - - case MMC_NUMBLK: - s->numblk = value & 0xffff; - break; - - case MMC_PRTBUF: - if (value & PRTBUF_PRT_BUF) { - s->tx_start ^= 32; - s->tx_len = 0; - } - pxa2xx_mmci_fifo_update(s); - break; - - case MMC_I_MASK: - s->intmask = value & 0x1fff; - pxa2xx_mmci_int_update(s); - break; - - case MMC_CMD: - s->cmd = value & 0x3f; - break; - - case MMC_ARGH: - s->arg &= 0x0000ffff; - s->arg |= value << 16; - break; - - case MMC_ARGL: - s->arg &= 0xffff0000; - s->arg |= value & 0x0000ffff; - break; - - case MMC_TXFIFO: - while (size-- && s->tx_len < 0x20) - s->tx_fifo[(s->tx_start + (s->tx_len ++)) & 0x1f] = - (value >> (size << 3)) & 0xff; - s->intreq &= ~INT_TXFIFO_REQ; - pxa2xx_mmci_fifo_update(s); - break; - - case MMC_RDWAIT: - case MMC_BLKS_REM: - break; - - default: - qemu_log_mask(LOG_GUEST_ERROR, - "%s: incorrect reg 0x%02" HWADDR_PRIx " " - "(value 0x%08" PRIx64 ")\n", __func__, offset, value); - } -} - -static const MemoryRegionOps pxa2xx_mmci_ops = { - .read = pxa2xx_mmci_read, - .write = pxa2xx_mmci_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) -{ - DeviceState *dev; - - dev = sysbus_create_simple(TYPE_PXA2XX_MMCI, base, irq); - qdev_connect_gpio_out_named(dev, "rx-dma", 0, rx_dma); - qdev_connect_gpio_out_named(dev, "tx-dma", 0, tx_dma); - - return PXA2XX_MMCI(dev); -} - -static void pxa2xx_mmci_set_inserted(DeviceState *dev, bool inserted) -{ - PXA2xxMMCIState *s = PXA2XX_MMCI(dev); - - qemu_set_irq(s->inserted, inserted); -} - -static void pxa2xx_mmci_set_readonly(DeviceState *dev, bool readonly) -{ - PXA2xxMMCIState *s = PXA2XX_MMCI(dev); - - qemu_set_irq(s->readonly, readonly); -} - -void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly, - qemu_irq coverswitch) -{ - DeviceState *dev = DEVICE(s); - - s->readonly = readonly; - s->inserted = coverswitch; - - pxa2xx_mmci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); - pxa2xx_mmci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); -} - -static void pxa2xx_mmci_reset(DeviceState *d) -{ - PXA2xxMMCIState *s = PXA2XX_MMCI(d); - - s->status = 0; - s->clkrt = 0; - s->spi = 0; - s->cmdat = 0; - s->resp_tout = 0; - s->read_tout = 0; - s->blklen = 0; - s->numblk = 0; - s->intmask = 0; - s->intreq = 0; - s->cmd = 0; - s->arg = 0; - s->active = 0; - s->bytesleft = 0; - s->tx_start = 0; - s->tx_len = 0; - s->rx_start = 0; - s->rx_len = 0; - s->resp_len = 0; - s->cmdreq = 0; - memset(s->tx_fifo, 0, sizeof(s->tx_fifo)); - memset(s->rx_fifo, 0, sizeof(s->rx_fifo)); - memset(s->resp_fifo, 0, sizeof(s->resp_fifo)); -} - -static void pxa2xx_mmci_instance_init(Object *obj) -{ - PXA2xxMMCIState *s = PXA2XX_MMCI(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - DeviceState *dev = DEVICE(obj); - - memory_region_init_io(&s->iomem, obj, &pxa2xx_mmci_ops, s, - "pxa2xx-mmci", 0x00100000); - sysbus_init_mmio(sbd, &s->iomem); - sysbus_init_irq(sbd, &s->irq); - qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1); - qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1); - - qbus_init(&s->sdbus, sizeof(s->sdbus), - TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); -} - -static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->vmsd = &vmstate_pxa2xx_mmci; - dc->reset = pxa2xx_mmci_reset; -} - -static void pxa2xx_mmci_bus_class_init(ObjectClass *klass, void *data) -{ - SDBusClass *sbc = SD_BUS_CLASS(klass); - - sbc->set_inserted = pxa2xx_mmci_set_inserted; - sbc->set_readonly = pxa2xx_mmci_set_readonly; -} - -static const TypeInfo pxa2xx_mmci_types[] = { - { - .name = TYPE_PXA2XX_MMCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PXA2xxMMCIState), - .instance_init = pxa2xx_mmci_instance_init, - .class_init = pxa2xx_mmci_class_init, - }, - { - .name = TYPE_PXA2XX_MMCI_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), - .class_init = pxa2xx_mmci_bus_class_init, - }, -}; - -DEFINE_TYPES(pxa2xx_mmci_types) diff --git a/hw/sd/sd.c b/hw/sd/sd.c index 07cb97d88c0..8c290595f01 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -37,9 +37,8 @@ #include "qemu/cutils.h" #include "hw/irq.h" #include "hw/registerfields.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/sd/sd.h" -#include "hw/sd/sdcard_legacy.h" #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/bitmap.h" @@ -62,6 +61,7 @@ typedef enum { sd_r0 = 0, /* no response */ sd_r1, /* normal response command */ + spi_r2, /* STATUS */ sd_r2_i, /* CID register */ sd_r2_s, /* CSD register */ sd_r3, /* OCR register */ @@ -71,6 +71,14 @@ typedef enum { sd_illegal = -2, } sd_rsp_type_t; +typedef enum { + sd_spi, + sd_bc, /* broadcast -- no response */ + sd_bcr, /* broadcast with response */ + sd_ac, /* addressed -- no data transfer */ + sd_adtc, /* addressed with data transfer */ +} sd_cmd_type_t; + enum SDCardModes { sd_inactive, sd_card_identification_mode, @@ -112,10 +120,6 @@ typedef struct SDProto { struct SDState { DeviceState parent_obj; - /* If true, created by sd_init() for a non-qdevified caller */ - /* TODO purge them with fire */ - bool me_no_qdev_me_kill_mammoth_with_rocks; - /* SD Memory Card Registers */ uint32_t ocr; uint8_t scr[8]; @@ -143,7 +147,6 @@ struct SDState { /* Runtime changeables */ - uint32_t mode; /* current card mode, one of SDCardModes */ int32_t state; /* current card state, one of SDCardStates */ uint32_t vhs; bool wp_switch; @@ -169,10 +172,7 @@ struct SDState { uint32_t data_offset; size_t data_size; uint8_t data[512]; - qemu_irq readonly_cb; - qemu_irq inserted_cb; QEMUTimer *ocr_power_timer; - bool enable; uint8_t dat_lines; bool cmd_line; }; @@ -247,6 +247,7 @@ static const char *sd_response_name(sd_rsp_type_t rsp) static const char *response_name[] = { [sd_r0] = "RESP#0 (no response)", [sd_r1] = "RESP#1 (normal cmd)", + [spi_r2] = "RESP#2 (STATUS reg)", [sd_r2_i] = "RESP#2 (CID reg)", [sd_r2_s] = "RESP#2 (CSD reg)", [sd_r3] = "RESP#3 (OCR reg)", @@ -291,12 +292,12 @@ static const char *sd_acmd_name(SDState *sd, uint8_t cmd) static uint8_t sd_get_dat_lines(SDState *sd) { - return sd->enable ? sd->dat_lines : 0; + return sd->dat_lines; } static bool sd_get_cmd_line(SDState *sd) { - return sd->enable ? sd->cmd_line : false; + return sd->cmd_line; } static void sd_set_voltage(SDState *sd, uint16_t millivolts) @@ -313,27 +314,24 @@ static void sd_set_voltage(SDState *sd, uint16_t millivolts) } } -static void sd_set_mode(SDState *sd) +static enum SDCardModes sd_mode(SDState *sd) { switch (sd->state) { case sd_inactive_state: - sd->mode = sd_inactive; - break; - + return sd_inactive; case sd_idle_state: case sd_ready_state: case sd_identification_state: - sd->mode = sd_card_identification_mode; - break; - + return sd_card_identification_mode; case sd_standby_state: case sd_transfer_state: case sd_sendingdata_state: case sd_receivingdata_state: case sd_programming_state: case sd_disconnect_state: - sd->mode = sd_data_transfer_mode; - break; + return sd_data_transfer_mode; + default: + g_assert_not_reached(); } } @@ -729,16 +727,82 @@ static int sd_req_crc_validate(SDRequest *req) return sd_crc7(buffer, 5) != req->crc; /* TODO */ } +static size_t sd_response_size(SDState *sd, sd_rsp_type_t rtype) +{ + switch (rtype) { + case sd_r1: + case sd_r1b: + return sd_is_spi(sd) ? 1 : 4; + + case spi_r2: + assert(sd_is_spi(sd)); + return 2; + + case sd_r2_i: + case sd_r2_s: + assert(!sd_is_spi(sd)); + return 16; + + case sd_r3: + case sd_r7: + return sd_is_spi(sd) ? 5 : 4; + + case sd_r6: + assert(!sd_is_spi(sd)); + return 4; + + case sd_r0: + case sd_illegal: + return sd_is_spi(sd) ? 1 : 0; + + default: + g_assert_not_reached(); + } +} + static void sd_response_r1_make(SDState *sd, uint8_t *response) { - stl_be_p(response, sd->card_status); + if (sd_is_spi(sd)) { + response[0] = sd->state == sd_idle_state + && !FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP); + response[0] |= FIELD_EX32(sd->card_status, CSR, ERASE_RESET) << 1; + response[0] |= FIELD_EX32(sd->card_status, CSR, ILLEGAL_COMMAND) << 2; + response[0] |= FIELD_EX32(sd->card_status, CSR, COM_CRC_ERROR) << 3; + response[0] |= FIELD_EX32(sd->card_status, CSR, ERASE_SEQ_ERROR) << 4; + response[0] |= FIELD_EX32(sd->card_status, CSR, ADDRESS_ERROR) << 5; + response[0] |= FIELD_EX32(sd->card_status, CSR, BLOCK_LEN_ERROR) << 6; + response[0] |= 0 << 7; + } else { + stl_be_p(response, sd->card_status); + } /* Clear the "clear on read" status bits */ sd->card_status &= ~CARD_STATUS_C; } +static void spi_response_r2_make(SDState *sd, uint8_t *resp) +{ + /* Prepend R1 */ + sd_response_r1_make(sd, resp); + + resp[1] = FIELD_EX32(sd->card_status, CSR, CARD_IS_LOCKED) << 0; + resp[1] |= (FIELD_EX32(sd->card_status, CSR, LOCK_UNLOCK_FAILED) + || FIELD_EX32(sd->card_status, CSR, WP_ERASE_SKIP)) << 1; + resp[1] |= FIELD_EX32(sd->card_status, CSR, ERROR) << 2; + resp[1] |= FIELD_EX32(sd->card_status, CSR, CC_ERROR) << 3; + resp[1] |= FIELD_EX32(sd->card_status, CSR, CARD_ECC_FAILED) << 4; + resp[1] |= FIELD_EX32(sd->card_status, CSR, WP_VIOLATION) << 5; + resp[1] |= FIELD_EX32(sd->card_status, CSR, ERASE_PARAM) << 6; + resp[1] |= FIELD_EX32(sd->card_status, CSR, OUT_OF_RANGE) << 7; +} + static void sd_response_r3_make(SDState *sd, uint8_t *response) { + if (sd_is_spi(sd)) { + /* Prepend R1 */ + sd_response_r1_make(sd, response); + response++; + } stl_be_p(response, sd->ocr & ACMD41_R3_MASK); } @@ -756,6 +820,11 @@ static void sd_response_r6_make(SDState *sd, uint8_t *response) static void sd_response_r7_make(SDState *sd, uint8_t *response) { + if (sd_is_spi(sd)) { + /* Prepend R1 */ + sd_response_r1_make(sd, response); + response++; + } stl_be_p(response, sd->vhs); } @@ -774,19 +843,12 @@ static uint32_t sd_blk_len(SDState *sd) */ static uint32_t sd_bootpart_offset(SDState *sd) { - bool partitions_enabled; unsigned partition_access; if (!sd->boot_part_size || !sd_is_emmc(sd)) { return 0; } - partitions_enabled = sd->ext_csd[EXT_CSD_PART_CONFIG] - & EXT_CSD_PART_CONFIG_EN_MASK; - if (!partitions_enabled) { - return 0; - } - partition_access = sd->ext_csd[EXT_CSD_PART_CONFIG] & EXT_CSD_PART_CONFIG_ACC_MASK; switch (partition_access) { @@ -833,7 +895,9 @@ static void sd_reset(DeviceState *dev) sect = 0; } size = sect << HWBLOCK_SHIFT; - size -= sd_bootpart_offset(sd); + if (sd_is_emmc(sd)) { + size -= sd->boot_part_size * 2; + } sect = sd_addr_to_wpnum(size) + 1; @@ -889,17 +953,10 @@ static void sd_cardchange(void *opaque, bool load, Error **errp) trace_sdcard_ejected(); } - if (sd->me_no_qdev_me_kill_mammoth_with_rocks) { - qemu_set_irq(sd->inserted_cb, inserted); - if (inserted) { - qemu_set_irq(sd->readonly_cb, readonly); - } - } else { - sdbus = SD_BUS(qdev_get_parent_bus(dev)); - sdbus_set_inserted(sdbus, inserted); - if (inserted) { - sdbus_set_readonly(sdbus, readonly); - } + sdbus = SD_BUS(qdev_get_parent_bus(dev)); + sdbus_set_inserted(sdbus, inserted); + if (inserted) { + sdbus_set_readonly(sdbus, readonly); } } @@ -964,7 +1021,7 @@ static const VMStateDescription sd_vmstate = { .minimum_version_id = 2, .pre_load = sd_vmstate_pre_load, .fields = (const VMStateField[]) { - VMSTATE_UINT32(mode, SDState), + VMSTATE_UNUSED(4), VMSTATE_INT32(state, SDState), VMSTATE_UINT8_ARRAY(cid, SDState, 16), VMSTATE_UINT8_ARRAY(csd, SDState, 16), @@ -987,7 +1044,7 @@ static const VMStateDescription sd_vmstate = { VMSTATE_UINT32(data_offset, SDState), VMSTATE_UINT8_ARRAY(data, SDState, 512), VMSTATE_UNUSED_V(1, 512), - VMSTATE_BOOL(enable, SDState), + VMSTATE_UNUSED(1), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { @@ -997,48 +1054,6 @@ static const VMStateDescription sd_vmstate = { }, }; -/* Legacy initialization function for use by non-qdevified callers */ -SDState *sd_init(BlockBackend *blk, bool is_spi) -{ - Object *obj; - DeviceState *dev; - SDState *sd; - Error *err = NULL; - - obj = object_new(is_spi ? TYPE_SD_CARD_SPI : TYPE_SD_CARD); - dev = DEVICE(obj); - if (!qdev_prop_set_drive_err(dev, "drive", blk, &err)) { - error_reportf_err(err, "sd_init failed: "); - return NULL; - } - - /* - * Realizing the device properly would put it into the QOM - * composition tree even though it is not plugged into an - * appropriate bus. That's a no-no. Hide the device from - * QOM/qdev, and call its qdev realize callback directly. - */ - object_ref(obj); - object_unparent(obj); - sd_realize(dev, &err); - if (err) { - error_reportf_err(err, "sd_init failed: "); - return NULL; - } - - sd = SD_CARD(dev); - sd->me_no_qdev_me_kill_mammoth_with_rocks = true; - return sd; -} - -void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert) -{ - sd->readonly_cb = readonly; - sd->inserted_cb = insert; - qemu_set_irq(readonly, sd->blk ? !blk_is_writable(sd->blk) : 0); - qemu_set_irq(insert, sd->blk ? blk_is_inserted(sd->blk) : 0); -} - static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) { trace_sdcard_read_block(addr, len); @@ -1306,7 +1321,7 @@ static sd_rsp_type_t sd_invalid_state_for_cmd(SDState *sd, SDRequest req) static sd_rsp_type_t sd_invalid_mode_for_cmd(SDState *sd, SDRequest req) { qemu_log_mask(LOG_GUEST_ERROR, "%s: CMD%i in a wrong mode: %s (spec %s)\n", - sd->proto->name, req.cmd, sd_mode_name(sd->mode), + sd->proto->name, req.cmd, sd_mode_name(sd_mode(sd)), sd_version_str(sd->spec_version)); return sd_illegal; @@ -1359,7 +1374,7 @@ static sd_rsp_type_t sd_cmd_to_sendingdata(SDState *sd, SDRequest req, const void *data, size_t size) { if (sd->state != sd_transfer_state) { - sd_invalid_state_for_cmd(sd, req); + return sd_invalid_state_for_cmd(sd, req); } sd->state = sd_sendingdata_state; @@ -1395,14 +1410,6 @@ static sd_rsp_type_t sd_cmd_GO_IDLE_STATE(SDState *sd, SDRequest req) return sd_is_spi(sd) ? sd_r1 : sd_r0; } -/* CMD1 */ -static sd_rsp_type_t spi_cmd_SEND_OP_COND(SDState *sd, SDRequest req) -{ - sd->state = sd_transfer_state; - - return sd_r1; -} - /* CMD2 */ static sd_rsp_type_t sd_cmd_ALL_SEND_CID(SDState *sd, SDRequest req) { @@ -1474,11 +1481,17 @@ static sd_rsp_type_t emmc_cmd_sleep_awake(SDState *sd, SDRequest req) /* CMD6 */ static sd_rsp_type_t sd_cmd_SWITCH_FUNCTION(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } - if (sd->state != sd_transfer_state) { - return sd_invalid_state_for_cmd(sd, req); + if (sd_is_spi(sd)) { + if (sd->state == sd_idle_state) { + return sd_invalid_state_for_cmd(sd, req); + } + } else { + if (sd->state != sd_transfer_state) { + return sd_invalid_state_for_cmd(sd, req); + } } sd_function_switch(sd, req.arg); @@ -1571,14 +1584,30 @@ static sd_rsp_type_t emmc_cmd_SEND_EXT_CSD(SDState *sd, SDRequest req) sd->ext_csd, sizeof(sd->ext_csd)); } -/* CMD9 */ -static sd_rsp_type_t spi_cmd_SEND_CSD(SDState *sd, SDRequest req) +static sd_rsp_type_t spi_cmd_SEND_CxD(SDState *sd, SDRequest req, + const void *data, size_t size) { + /* + * XXX as of v10.1.0-rc1 command is reached in sd_idle_state, + * so disable this check. if (sd->state != sd_standby_state) { return sd_invalid_state_for_cmd(sd, req); } - return sd_cmd_to_sendingdata(sd, req, sd_req_get_address(sd, req), - sd->csd, 16); + */ + + /* + * Since SPI returns CSD and CID on the DAT lines, + * switch to sd_transfer_state. + */ + sd->state = sd_transfer_state; + + return sd_cmd_to_sendingdata(sd, req, 0, data, size); +} + +/* CMD9 */ +static sd_rsp_type_t spi_cmd_SEND_CSD(SDState *sd, SDRequest req) +{ + return spi_cmd_SEND_CxD(sd, req, sd->csd, sizeof(sd->csd)); } static sd_rsp_type_t sd_cmd_SEND_CSD(SDState *sd, SDRequest req) @@ -1593,11 +1622,7 @@ static sd_rsp_type_t sd_cmd_SEND_CSD(SDState *sd, SDRequest req) /* CMD10 */ static sd_rsp_type_t spi_cmd_SEND_CID(SDState *sd, SDRequest req) { - if (sd->state != sd_standby_state) { - return sd_invalid_state_for_cmd(sd, req); - } - return sd_cmd_to_sendingdata(sd, req, sd_req_get_address(sd, req), - sd->cid, 16); + return spi_cmd_SEND_CxD(sd, req, sd->cid, sizeof(sd->cid)); } static sd_rsp_type_t sd_cmd_SEND_CID(SDState *sd, SDRequest req) @@ -1629,7 +1654,7 @@ static sd_rsp_type_t sd_cmd_STOP_TRANSMISSION(SDState *sd, SDRequest req) /* CMD13 */ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } @@ -1646,7 +1671,7 @@ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) } if (sd_is_spi(sd)) { - return sd_r2_s; + return spi_r2; } return sd_req_rca_same(sd, req) ? sd_r1 : sd_r0; @@ -1655,7 +1680,7 @@ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) /* CMD15 */ static sd_rsp_type_t sd_cmd_GO_INACTIVE_STATE(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } switch (sd->state) { @@ -1960,8 +1985,14 @@ static sd_rsp_type_t sd_acmd_SET_BUS_WIDTH(SDState *sd, SDRequest req) /* ACMD13 */ static sd_rsp_type_t sd_acmd_SD_STATUS(SDState *sd, SDRequest req) { - return sd_cmd_to_sendingdata(sd, req, 0, - sd->sd_status, sizeof(sd->sd_status)); + sd_rsp_type_t rsp; + + rsp = sd_cmd_to_sendingdata(sd, req, 0, + sd->sd_status, sizeof(sd->sd_status)); + if (sd_is_spi(sd) && rsp != sd_illegal) { + return spi_r2; + } + return rsp; } /* ACMD22 */ @@ -2021,6 +2052,9 @@ static sd_rsp_type_t sd_cmd_SEND_OP_COND(SDState *sd, SDRequest req) sd->state = sd_ready_state; } + if (sd_is_spi(sd)) { + return sd_r1; + } return sd_r3; } @@ -2052,7 +2086,9 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) if (req.cmd != 55 || sd->expecting_acmd) { trace_sdcard_normal_command(sd->proto->name, sd->last_cmd_name, req.cmd, - req.arg, sd_state_name(sd->state)); + req.arg, + sd_mode_name(sd_mode(sd)), + sd_state_name(sd->state)); } /* Not interpreting this as an app command */ @@ -2138,7 +2174,9 @@ static sd_rsp_type_t sd_app_command(SDState *sd, { sd->last_cmd_name = sd_acmd_name(sd, req.cmd); trace_sdcard_app_command(sd->proto->name, sd->last_cmd_name, - req.cmd, req.arg, sd_state_name(sd->state)); + req.cmd, req.arg, + sd_mode_name(sd_mode(sd)), + sd_state_name(sd->state)); sd->card_status |= APP_CMD; if (sd->proto->acmd[req.cmd].handler) { @@ -2193,13 +2231,14 @@ static bool cmd_valid_while_locked(SDState *sd, unsigned cmd) return cmd_class == 0 || cmd_class == 7; } -int sd_do_command(SDState *sd, SDRequest *req, - uint8_t *response) { +static size_t sd_do_command(SDState *sd, SDRequest *req, + uint8_t *response, size_t respsz) +{ int last_state; sd_rsp_type_t rtype; int rsplen; - if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) { + if (!sd->blk || !blk_is_inserted(sd->blk)) { return 0; } @@ -2237,7 +2276,6 @@ int sd_do_command(SDState *sd, SDRequest *req, } last_state = sd->state; - sd_set_mode(sd); if (sd->expecting_acmd) { sd->expecting_acmd = false; @@ -2257,36 +2295,37 @@ int sd_do_command(SDState *sd, SDRequest *req, } send_response: + rsplen = sd_response_size(sd, rtype); + assert(rsplen <= respsz); + switch (rtype) { case sd_r1: case sd_r1b: sd_response_r1_make(sd, response); - rsplen = 4; + break; + + case spi_r2: + spi_response_r2_make(sd, response); break; case sd_r2_i: memcpy(response, sd->cid, sizeof(sd->cid)); - rsplen = 16; break; case sd_r2_s: memcpy(response, sd->csd, sizeof(sd->csd)); - rsplen = 16; break; case sd_r3: sd_response_r3_make(sd, response); - rsplen = 4; break; case sd_r6: sd_response_r6_make(sd, response); - rsplen = 4; break; case sd_r7: sd_response_r7_make(sd, response); - rsplen = 4; break; case sd_r0: @@ -2298,7 +2337,6 @@ int sd_do_command(SDState *sd, SDRequest *req, sd->data_offset = 0; /* fall-through */ case sd_illegal: - rsplen = 0; break; default: g_assert_not_reached(); @@ -2346,12 +2384,13 @@ static bool sd_generic_read_byte(SDState *sd, uint8_t *value) return false; } -void sd_write_byte(SDState *sd, uint8_t value) +static void sd_write_byte(SDState *sd, uint8_t value) { int i; - if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) + if (!sd->blk || !blk_is_inserted(sd->blk)) { return; + } if (sd->state != sd_receivingdata_state) { qemu_log_mask(LOG_GUEST_ERROR, @@ -2475,23 +2514,26 @@ void sd_write_byte(SDState *sd, uint8_t value) } } -uint8_t sd_read_byte(SDState *sd) +static uint8_t sd_read_byte(SDState *sd) { /* TODO: Append CRCs */ + const uint8_t dummy_byte = 0x00; uint8_t ret; uint32_t io_len; - if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) - return 0x00; + if (!sd->blk || !blk_is_inserted(sd->blk)) { + return dummy_byte; + } if (sd->state != sd_sendingdata_state) { qemu_log_mask(LOG_GUEST_ERROR, "%s: not in Sending-Data state\n", __func__); - return 0x00; + return dummy_byte; } - if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) - return 0x00; + if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) { + return dummy_byte; + } io_len = sd_blk_len(sd); @@ -2517,7 +2559,7 @@ uint8_t sd_read_byte(SDState *sd) if (sd->data_offset == 0) { if (!address_in_range(sd, "READ_MULTIPLE_BLOCK", sd->data_start, io_len)) { - return 0x00; + return dummy_byte; } sd_blk_read(sd, sd->data_start, io_len); } @@ -2538,7 +2580,9 @@ uint8_t sd_read_byte(SDState *sd) break; default: - g_assert_not_reached(); + qemu_log_mask(LOG_GUEST_ERROR, "%s: DAT read illegal for command %s\n", + __func__, sd->last_cmd_name); + return dummy_byte; } return ret; @@ -2554,16 +2598,11 @@ static bool sd_data_ready(SDState *sd) return sd->state == sd_sendingdata_state; } -void sd_enable(SDState *sd, bool enable) -{ - sd->enable = enable; -} - static const SDProto sd_proto_spi = { .name = "SPI", .cmd = { [0] = {0, sd_spi, "GO_IDLE_STATE", sd_cmd_GO_IDLE_STATE}, - [1] = {0, sd_spi, "SEND_OP_COND", spi_cmd_SEND_OP_COND}, + [1] = {0, sd_spi, "SEND_OP_COND", sd_cmd_SEND_OP_COND}, [5] = {9, sd_spi, "IO_SEND_OP_COND", sd_cmd_optional}, [6] = {10, sd_spi, "SWITCH_FUNCTION", sd_cmd_SWITCH_FUNCTION}, [8] = {0, sd_spi, "SEND_IF_COND", sd_cmd_SEND_IF_COND}, @@ -2599,7 +2638,7 @@ static const SDProto sd_proto_spi = { [13] = {8, sd_spi, "SD_STATUS", sd_acmd_SD_STATUS}, [22] = {8, sd_spi, "SEND_NUM_WR_BLOCKS", sd_acmd_SEND_NUM_WR_BLOCKS}, [23] = {8, sd_spi, "SET_WR_BLK_ERASE_COUNT", sd_acmd_SET_WR_BLK_ERASE_COUNT}, - [41] = {8, sd_spi, "SEND_OP_COND", spi_cmd_SEND_OP_COND}, + [41] = {8, sd_spi, "SEND_OP_COND", sd_cmd_SEND_OP_COND}, [42] = {8, sd_spi, "SET_CLR_CARD_DETECT", sd_acmd_SET_CLR_CARD_DETECT}, [51] = {8, sd_spi, "SEND_SCR", sd_acmd_SEND_SCR}, }, @@ -2718,7 +2757,6 @@ static void sd_instance_init(Object *obj) sd->proto = sc->proto; sd->last_cmd_name = "UNSET"; - sd->enable = true; sd->ocr_power_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sd_ocr_powerup, sd); } @@ -2791,31 +2829,28 @@ static void emmc_realize(DeviceState *dev, Error **errp) sd_realize(dev, errp); } -static Property sdmmc_common_properties[] = { +static const Property sdmmc_common_properties[] = { DEFINE_PROP_DRIVE("drive", SDState, blk), - DEFINE_PROP_END_OF_LIST() }; -static Property sd_properties[] = { +static const Property sd_properties[] = { DEFINE_PROP_UINT8("spec_version", SDState, spec_version, SD_PHY_SPECv3_01_VERS), - DEFINE_PROP_END_OF_LIST() }; -static Property emmc_properties[] = { +static const Property emmc_properties[] = { DEFINE_PROP_UINT64("boot-partition-size", SDState, boot_part_size, 0), DEFINE_PROP_UINT8("boot-config", SDState, boot_config, 0x0), - DEFINE_PROP_END_OF_LIST() }; -static void sdmmc_common_class_init(ObjectClass *klass, void *data) +static void sdmmc_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SDCardClass *sc = SDMMC_COMMON_CLASS(klass); device_class_set_props(dc, sdmmc_common_properties); dc->vmsd = &sd_vmstate; - dc->reset = sd_reset; + device_class_set_legacy_reset(dc, sd_reset); dc->bus_type = TYPE_SD_BUS; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); @@ -2827,12 +2862,11 @@ static void sdmmc_common_class_init(ObjectClass *klass, void *data) sc->read_byte = sd_read_byte; sc->receive_ready = sd_receive_ready; sc->data_ready = sd_data_ready; - sc->enable = sd_enable; sc->get_inserted = sd_get_inserted; sc->get_readonly = sd_get_readonly; } -static void sd_class_init(ObjectClass *klass, void *data) +static void sd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SDCardClass *sc = SDMMC_COMMON_CLASS(klass); @@ -2851,7 +2885,7 @@ static void sd_class_init(ObjectClass *klass, void *data) * board to ensure that ssi transfers only occur when the chip select * is asserted. */ -static void sd_spi_class_init(ObjectClass *klass, void *data) +static void sd_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SDCardClass *sc = SDMMC_COMMON_CLASS(klass); @@ -2860,7 +2894,7 @@ static void sd_spi_class_init(ObjectClass *klass, void *data) sc->proto = &sd_proto_spi; } -static void emmc_class_init(ObjectClass *klass, void *data) +static void emmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SDCardClass *sc = SDMMC_COMMON_CLASS(klass); diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h index 5f3765f12d2..9f768c418e0 100644 --- a/hw/sd/sdhci-internal.h +++ b/hw/sd/sdhci-internal.h @@ -322,6 +322,6 @@ void sdhci_initfn(SDHCIState *s); void sdhci_uninitfn(SDHCIState *s); void sdhci_common_realize(SDHCIState *s, Error **errp); void sdhci_common_unrealize(SDHCIState *s); -void sdhci_common_class_init(ObjectClass *klass, void *data); +void sdhci_common_class_init(ObjectClass *klass, const void *data); #endif diff --git a/hw/sd/sdhci-pci.c b/hw/sd/sdhci-pci.c index 9b7bee8b3fb..c18b91fe63c 100644 --- a/hw/sd/sdhci-pci.c +++ b/hw/sd/sdhci-pci.c @@ -18,13 +18,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/sd/sdhci.h" #include "sdhci-internal.h" -static Property sdhci_pci_properties[] = { +static const Property sdhci_pci_properties[] = { DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), - DEFINE_PROP_END_OF_LIST(), }; static void sdhci_pci_realize(PCIDevice *dev, Error **errp) @@ -49,11 +49,12 @@ static void sdhci_pci_exit(PCIDevice *dev) { SDHCIState *s = PCI_SDHCI(dev); + qemu_free_irq(s->irq); sdhci_common_unrealize(s); sdhci_uninitfn(s); } -static void sdhci_pci_class_init(ObjectClass *klass, void *data) +static void sdhci_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -74,7 +75,7 @@ static const TypeInfo sdhci_pci_types[] = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(SDHCIState), .class_init = sdhci_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index d02c3e3963b..3c897e54b72 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -30,14 +30,13 @@ #include "qapi/error.h" #include "hw/irq.h" #include "hw/qdev-properties.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qemu/timer.h" #include "qemu/bitops.h" #include "hw/sd/sdhci.h" #include "migration/vmstate.h" #include "sdhci-internal.h" #include "qemu/log.h" -#include "qemu/module.h" #include "trace.h" #include "qom/object.h" @@ -234,7 +233,7 @@ static void sdhci_raise_insertion_irq(void *opaque) if (s->norintsts & SDHC_NIS_REMOVE) { timer_mod(s->insert_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); } else { s->prnsts = 0x1ff0000; if (s->norintstsen & SDHC_NISEN_INSERT) { @@ -252,7 +251,7 @@ static void sdhci_set_inserted(DeviceState *dev, bool level) if ((s->norintsts & SDHC_NIS_REMOVE) && level) { /* Give target some time to notice card ejection */ timer_mod(s->insert_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); } else { if (level) { s->prnsts = 0x1ff0000; @@ -275,6 +274,10 @@ static void sdhci_set_readonly(DeviceState *dev, bool level) { SDHCIState *s = (SDHCIState *)dev; + if (s->wp_inverted) { + level = !level; + } + if (level) { s->prnsts &= ~SDHC_WRITE_PROTECT; } else { @@ -290,9 +293,11 @@ static void sdhci_reset(SDHCIState *s) timer_del(s->insert_timer); timer_del(s->transfer_timer); - /* Set all registers to 0. Capabilities/Version registers are not cleared + /* + * Set all registers to 0. Capabilities/Version registers are not cleared * and assumed to always preserve their value, given to them during - * initialization */ + * initialization + */ memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); /* Reset other state based on current card insertion/readonly status */ @@ -302,11 +307,16 @@ static void sdhci_reset(SDHCIState *s) s->data_count = 0; s->stopped_state = sdhc_not_stopped; s->pending_insert_state = false; + if (s->vendor == SDHCI_VENDOR_FSL) { + s->norintstsen = 0x013f; + s->errintstsen = 0x117f; + } } static void sdhci_poweron_reset(DeviceState *dev) { - /* QOM (ie power-on) reset. This is identical to reset + /* + * QOM (ie power-on) reset. This is identical to reset * commanded via device register apart from handling of the * 'pending insert on powerup' quirk. */ @@ -327,7 +337,7 @@ static void sdhci_send_command(SDHCIState *s) { SDRequest request; uint8_t response[16]; - int rlen; + size_t rlen; bool timeout = false; s->errintsts = 0; @@ -336,7 +346,7 @@ static void sdhci_send_command(SDHCIState *s) request.arg = s->argument; trace_sdhci_send_command(request.cmd, request.arg); - rlen = sdbus_do_command(&s->sdbus, &request, response); + rlen = sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); if (s->cmdreg & SDHC_CMD_RESPONSE) { if (rlen == 4) { @@ -390,7 +400,7 @@ static void sdhci_end_transfer(SDHCIState *s) request.cmd = 0x0C; request.arg = 0; trace_sdhci_end_transfer(request.cmd, request.arg); - sdbus_do_command(&s->sdbus, &request, response); + sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); /* Auto CMD12 response goes to the upper Response register */ s->rspreg[3] = ldl_be_p(response); } @@ -446,8 +456,10 @@ static void sdhci_read_block_from_card(SDHCIState *s) s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; } - /* If stop at block gap request was set and it's not the last block of - * data - generate Block Event interrupt */ + /* + * If stop at block gap request was set and it's not the last block of + * data - generate Block Event interrupt + */ if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt != 1) { s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; @@ -549,8 +561,10 @@ static void sdhci_write_block_to_card(SDHCIState *s) sdhci_update_irq(s); } -/* Write @size bytes of @value data to host controller @s Buffer Data Port - * register */ +/* + * Write @size bytes of @value data to host controller @s Buffer Data Port + * register + */ static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) { unsigned i; @@ -595,9 +609,11 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) return; } - /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for + /* + * XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for * possible stop at page boundary if initial address is not page aligned, - * allow them to work properly */ + * allow them to work properly + */ if ((s->sdmasysad % boundary_chk) == 0) { page_aligned = true; } @@ -657,12 +673,13 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) } } + if (s->norintstsen & SDHC_NISEN_DMA) { + s->norintsts |= SDHC_NIS_DMA; + } + if (s->blkcnt == 0) { sdhci_end_transfer(s); } else { - if (s->norintstsen & SDHC_NISEN_DMA) { - s->norintsts |= SDHC_NIS_DMA; - } sdhci_update_irq(s); } } @@ -683,9 +700,22 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s) } s->blkcnt--; + if (s->norintstsen & SDHC_NISEN_DMA) { + s->norintsts |= SDHC_NIS_DMA; + } + sdhci_end_transfer(s); } +static void sdhci_sdma_transfer(SDHCIState *s) +{ + if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { + sdhci_sdma_transfer_single_block(s); + } else { + sdhci_sdma_transfer_multi_blocks(s); + } +} + typedef struct ADMADescr { hwaddr addr; uint16_t length; @@ -703,7 +733,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), MEMTXATTRS_UNSPECIFIED); adma2 = le64_to_cpu(adma2); - /* The spec does not specify endianness of descriptor table. + /* + * The spec does not specify endianness of descriptor table. * We currently assume that it is LE. */ dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; @@ -747,7 +778,7 @@ static void sdhci_do_adma(SDHCIState *s) const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; const MemTxAttrs attrs = { .memory = true }; ADMADescr dscr = {}; - MemTxResult res; + MemTxResult res = MEMTX_ERROR; int i; if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { @@ -846,6 +877,7 @@ static void sdhci_do_adma(SDHCIState *s) } } if (res != MEMTX_OK) { + s->data_count = 0; if (s->errintstsen & SDHC_EISEN_ADMAERR) { trace_sdhci_error("Set ADMA error flag"); s->errintsts |= SDHC_EIS_ADMAERR; @@ -915,12 +947,7 @@ static void sdhci_data_transfer(void *opaque) if (s->trnmod & SDHC_TRNS_DMA) { switch (SDHC_DMA_TYPE(s->hostctl1)) { case SDHC_CTRL_SDMA: - if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { - sdhci_sdma_transfer_single_block(s); - } else { - sdhci_sdma_transfer_multi_blocks(s); - } - + sdhci_sdma_transfer(s); break; case SDHC_CTRL_ADMA1_32: if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { @@ -977,8 +1004,10 @@ static bool sdhci_can_issue_command(SDHCIState *s) return true; } -/* The Buffer Data Port register must be accessed in sequential and - * continuous manner */ +/* + * The Buffer Data Port register must be accessed in sequential and + * continuous manner + */ static inline bool sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) { @@ -1162,11 +1191,7 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) if (!(mask & 0xFF000000) && s->blkcnt && (s->blksize & BLOCK_SIZE_MASK) && SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { - if (s->trnmod & SDHC_TRNS_MULTI) { - sdhci_sdma_transfer_multi_blocks(s); - } else { - sdhci_sdma_transfer_single_block(s); - } + sdhci_sdma_transfer(s); } } break; @@ -1206,8 +1231,10 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) MASKED_WRITE(s->argument, mask, value); break; case SDHC_TRNMOD: - /* DMA can be enabled only if it is supported as indicated by - * capabilities register */ + /* + * DMA can be enabled only if it is supported as indicated by + * capabilities register + */ if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { value &= ~SDHC_TRNS_DMA; } @@ -1279,8 +1306,10 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) } else { s->norintsts &= ~SDHC_NIS_ERR; } - /* Quirk for Raspberry Pi: pending card insert interrupt - * appears when first enabled after power on */ + /* + * Quirk for Raspberry Pi: pending card insert interrupt + * appears when first enabled after power on + */ if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { assert(s->pending_insert_quirk); s->norintsts |= SDHC_NIS_INSERT; @@ -1396,8 +1425,10 @@ void sdhci_initfn(SDHCIState *s) { qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); - s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); - s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); + s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + sdhci_raise_insertion_irq, s); + s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + sdhci_data_transfer, s); s->io_ops = &sdhci_mmio_le_ops; } @@ -1445,11 +1476,13 @@ void sdhci_common_realize(SDHCIState *s, Error **errp) void sdhci_common_unrealize(SDHCIState *s) { - /* This function is expected to be called only once for each class: + /* + * This function is expected to be called only once for each class: * - SysBus: via DeviceClass->unrealize(), * - PCI: via PCIDeviceClass->exit(). * However to avoid double-free and/or use-after-free we still nullify - * this variable (better safe than sorry!). */ + * this variable (better safe than sorry!). + */ g_free(s->fifo_buffer); s->fifo_buffer = NULL; } @@ -1513,24 +1546,25 @@ const VMStateDescription sdhci_vmstate = { }, }; -void sdhci_common_class_init(ObjectClass *klass, void *data) +void sdhci_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->vmsd = &sdhci_vmstate; - dc->reset = sdhci_poweron_reset; + device_class_set_legacy_reset(dc, sdhci_poweron_reset); } /* --- qdev SysBus --- */ -static Property sdhci_sysbus_properties[] = { +static const Property sdhci_sysbus_properties[] = { DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, false), DEFINE_PROP_LINK("dma", SDHCIState, dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BOOL("wp-inverted", SDHCIState, + wp_inverted, false), }; static void sdhci_sysbus_init(Object *obj) @@ -1586,7 +1620,7 @@ static void sdhci_sysbus_unrealize(DeviceState *dev) } } -static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) +static void sdhci_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1597,18 +1631,9 @@ static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) sdhci_common_class_init(klass, data); } -static const TypeInfo sdhci_sysbus_info = { - .name = TYPE_SYSBUS_SDHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SDHCIState), - .instance_init = sdhci_sysbus_init, - .instance_finalize = sdhci_sysbus_finalize, - .class_init = sdhci_sysbus_class_init, -}; - /* --- qdev bus master --- */ -static void sdhci_bus_class_init(ObjectClass *klass, void *data) +static void sdhci_bus_class_init(ObjectClass *klass, const void *data) { SDBusClass *sbc = SD_BUS_CLASS(klass); @@ -1616,13 +1641,6 @@ static void sdhci_bus_class_init(ObjectClass *klass, void *data) sbc->set_readonly = sdhci_set_readonly; } -static const TypeInfo sdhci_bus_info = { - .name = TYPE_SDHCI_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), - .class_init = sdhci_bus_class_init, -}; - /* --- qdev i.MX eSDHC --- */ #define USDHC_MIX_CTRL 0x48 @@ -1717,16 +1735,10 @@ usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) case USDHC_VENDOR_SPEC: s->vendor_spec = value; - switch (s->vendor) { - case SDHCI_VENDOR_IMX: - if (value & USDHC_IMX_FRC_SDCLK_ON) { - s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; - } else { - s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; - } - break; - default: - break; + if (value & USDHC_IMX_FRC_SDCLK_ON) { + s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; + } else { + s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; } break; @@ -1881,12 +1893,6 @@ static void imx_usdhc_init(Object *obj) s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; } -static const TypeInfo imx_usdhc_info = { - .name = TYPE_IMX_USDHC, - .parent = TYPE_SYSBUS_SDHCI, - .instance_init = imx_usdhc_init, -}; - /* --- qdev Samsung s3c --- */ #define S3C_SDHCI_CONTROL2 0x80 @@ -1945,18 +1951,31 @@ static void sdhci_s3c_init(Object *obj) s->io_ops = &sdhci_s3c_mmio_ops; } -static const TypeInfo sdhci_s3c_info = { - .name = TYPE_S3C_SDHCI , - .parent = TYPE_SYSBUS_SDHCI, - .instance_init = sdhci_s3c_init, +static const TypeInfo sdhci_types[] = { + { + .name = TYPE_SDHCI_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = sdhci_bus_class_init, + }, + { + .name = TYPE_SYSBUS_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SDHCIState), + .instance_init = sdhci_sysbus_init, + .instance_finalize = sdhci_sysbus_finalize, + .class_init = sdhci_sysbus_class_init, + }, + { + .name = TYPE_IMX_USDHC, + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = imx_usdhc_init, + }, + { + .name = TYPE_S3C_SDHCI, + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = sdhci_s3c_init, + }, }; -static void sdhci_register_types(void) -{ - type_register_static(&sdhci_sysbus_info); - type_register_static(&sdhci_bus_info); - type_register_static(&imx_usdhc_info); - type_register_static(&sdhci_s3c_info); -} - -type_init(sdhci_register_types) +DEFINE_TYPES(sdhci_types) diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 2dd070f978c..3aacbd03871 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -16,7 +16,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "hw/ssi/ssi.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" @@ -70,23 +70,6 @@ struct ssi_sd_state { #define TYPE_SSI_SD "ssi-sd" OBJECT_DECLARE_SIMPLE_TYPE(ssi_sd_state, SSI_SD) -/* State word bits. */ -#define SSI_SDR_LOCKED 0x0001 -#define SSI_SDR_WP_ERASE 0x0002 -#define SSI_SDR_ERROR 0x0004 -#define SSI_SDR_CC_ERROR 0x0008 -#define SSI_SDR_ECC_FAILED 0x0010 -#define SSI_SDR_WP_VIOLATION 0x0020 -#define SSI_SDR_ERASE_PARAM 0x0040 -#define SSI_SDR_OUT_OF_RANGE 0x0080 -#define SSI_SDR_IDLE 0x0100 -#define SSI_SDR_ERASE_RESET 0x0200 -#define SSI_SDR_ILLEGAL_COMMAND 0x0400 -#define SSI_SDR_COM_CRC_ERROR 0x0800 -#define SSI_SDR_ERASE_SEQ_ERROR 0x1000 -#define SSI_SDR_ADDRESS_ERROR 0x2000 -#define SSI_SDR_PARAMETER_ERROR 0x4000 - /* multiple block write */ #define SSI_TOKEN_MULTI_WRITE 0xfc /* terminate multiple block write */ @@ -104,7 +87,11 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) { ssi_sd_state *s = SSI_SD(dev); SDRequest request; - uint8_t longresp[16]; + uint8_t longresp[5]; + + if (!sdbus_get_inserted(&s->sdbus)) { + return SSI_DUMMY; + } /* * Special case: allow CMD12 (STOP TRANSMISSION) while reading data. @@ -146,8 +133,9 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) /* manually issue cmd12 to stop the transfer */ request.cmd = 12; request.arg = 0; - s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); - if (s->arglen <= 0) { + s->arglen = sdbus_do_command(&s->sdbus, &request, + longresp, sizeof(longresp)); + if (s->arglen == 0) { s->arglen = 1; /* a zero value indicates the card is busy */ s->response[0] = 0; @@ -170,73 +158,15 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) /* FIXME: Check CRC. */ request.cmd = s->cmd; request.arg = ldl_be_p(s->cmdarg); - DPRINTF("CMD%d arg 0x%08x\n", s->cmd, request.arg); - s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); - if (s->arglen <= 0) { - s->arglen = 1; - s->response[0] = 4; - DPRINTF("SD command failed\n"); - } else if (s->cmd == 8 || s->cmd == 58) { - /* CMD8/CMD58 returns R3/R7 response */ - DPRINTF("Returned R3/R7\n"); - s->arglen = 5; - s->response[0] = 1; - memcpy(&s->response[1], longresp, 4); - } else if (s->arglen != 4) { - BADF("Unexpected response to cmd %d\n", s->cmd); - /* Illegal command is about as near as we can get. */ - s->arglen = 1; - s->response[0] = 4; - } else { - /* All other commands return status. */ - uint32_t cardstatus; - uint16_t status; - /* CMD13 returns a 2-byte statuse work. Other commands - only return the first byte. */ - s->arglen = (s->cmd == 13) ? 2 : 1; - - /* handle R1b */ - if (s->cmd == 28 || s->cmd == 29 || s->cmd == 38) { - s->stopping = 1; - } + s->arglen = sdbus_do_command(&s->sdbus, &request, + longresp, sizeof(longresp)); + DPRINTF("CMD%d arg 0x%08x = %d\n", s->cmd, request.arg, s->arglen); + assert(s->arglen > 0); + memcpy(s->response, longresp, s->arglen); - cardstatus = ldl_be_p(longresp); - status = 0; - if (((cardstatus >> 9) & 0xf) < 4) - status |= SSI_SDR_IDLE; - if (cardstatus & ERASE_RESET) - status |= SSI_SDR_ERASE_RESET; - if (cardstatus & ILLEGAL_COMMAND) - status |= SSI_SDR_ILLEGAL_COMMAND; - if (cardstatus & COM_CRC_ERROR) - status |= SSI_SDR_COM_CRC_ERROR; - if (cardstatus & ERASE_SEQ_ERROR) - status |= SSI_SDR_ERASE_SEQ_ERROR; - if (cardstatus & ADDRESS_ERROR) - status |= SSI_SDR_ADDRESS_ERROR; - if (cardstatus & CARD_IS_LOCKED) - status |= SSI_SDR_LOCKED; - if (cardstatus & (LOCK_UNLOCK_FAILED | WP_ERASE_SKIP)) - status |= SSI_SDR_WP_ERASE; - if (cardstatus & SD_ERROR) - status |= SSI_SDR_ERROR; - if (cardstatus & CC_ERROR) - status |= SSI_SDR_CC_ERROR; - if (cardstatus & CARD_ECC_FAILED) - status |= SSI_SDR_ECC_FAILED; - if (cardstatus & WP_VIOLATION) - status |= SSI_SDR_WP_VIOLATION; - if (cardstatus & ERASE_PARAM) - status |= SSI_SDR_ERASE_PARAM; - if (cardstatus & (OUT_OF_RANGE | CID_CSD_OVERWRITE)) - status |= SSI_SDR_OUT_OF_RANGE; - /* ??? Don't know what Parameter Error really means, so - assume it's set if the second byte is nonzero. */ - if (status & 0xff) - status |= SSI_SDR_PARAMETER_ERROR; - s->response[0] = status >> 8; - s->response[1] = status; - DPRINTF("Card status 0x%02x\n", status); + /* handle R1b (busy signal) */ + if (s->cmd == 28 || s->cmd == 29 || s->cmd == 38) { + s->stopping = 1; } s->mode = SSI_SD_PREP_RESP; s->response_pos = 0; @@ -333,7 +263,7 @@ static int ssi_sd_post_load(void *opaque, int version_id) return -EINVAL; } if (s->mode == SSI_SD_CMDARG && - (s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) { + (s->arglen >= ARRAY_SIZE(s->cmdarg))) { return -EINVAL; } if (s->mode == SSI_SD_RESPONSE && @@ -389,7 +319,7 @@ static void ssi_sd_reset(DeviceState *dev) s->stopping = 0; } -static void ssi_sd_class_init(ObjectClass *klass, void *data) +static void ssi_sd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); @@ -398,7 +328,7 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data) k->transfer = ssi_sd_transfer; k->cs_polarity = SSI_CS_LOW; dc->vmsd = &vmstate_ssi_sd; - dc->reset = ssi_sd_reset; + device_class_set_legacy_reset(dc, ssi_sd_reset); /* Reason: GPIO chip-select line should be wired up */ dc->user_creatable = false; } diff --git a/hw/sd/trace-events b/hw/sd/trace-events index 43671dc7912..8d49840917e 100644 --- a/hw/sd/trace-events +++ b/hw/sd/trace-events @@ -37,8 +37,8 @@ sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of sdhci_capareg(const char *desc, uint16_t val) "%s: %u" # sd.c -sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%s %20s/ CMD%02d arg 0x%08x (state %s)" -sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%s %23s/ACMD%02d arg 0x%08x (state %s)" +sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *mode, const char *state) "%s %20s/ CMD%02d arg 0x%08x (mode %s, state %s)" +sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *mode, const char *state) "%s %23s/ACMD%02d arg 0x%08x (mode %s, state %s)" sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)" sdcard_powerup(void) "" sdcard_inquiry_cmd41(void) "" @@ -60,10 +60,6 @@ sdcard_set_voltage(uint16_t millivolts) "%u mV" sdcard_ext_csd_update(unsigned index, uint8_t oval, uint8_t nval) "index %u: 0x%02x -> 0x%02x" sdcard_switch(unsigned access, unsigned index, unsigned value, unsigned set) "SWITCH acc:%u idx:%u val:%u set:%u" -# pxa2xx_mmci.c -pxa2xx_mmci_read(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" -pxa2xx_mmci_write(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" - # pl181.c pl181_command_send(uint8_t cmd, uint32_t arg) "sending CMD%02d arg 0x%08" PRIx32 pl181_command_sent(void) "command sent" diff --git a/hw/sensor/adm1266.c b/hw/sensor/adm1266.c index 25b87a72961..9017ce6116c 100644 --- a/hw/sensor/adm1266.c +++ b/hw/sensor/adm1266.c @@ -223,7 +223,7 @@ static void adm1266_init(Object *obj) } } -static void adm1266_class_init(ObjectClass *klass, void *data) +static void adm1266_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/sensor/adm1272.c b/hw/sensor/adm1272.c index 3fc1e5d0ad9..0c739aa0d85 100644 --- a/hw/sensor/adm1272.c +++ b/hw/sensor/adm1272.c @@ -511,7 +511,7 @@ static void adm1272_init(Object *obj) } -static void adm1272_class_init(ObjectClass *klass, void *data) +static void adm1272_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c index 01c776dd7a8..bcf615423ae 100644 --- a/hw/sensor/dps310.c +++ b/hw/sensor/dps310.c @@ -197,7 +197,7 @@ static const VMStateDescription vmstate_dps310 = { } }; -static void dps310_class_init(ObjectClass *klass, void *data) +static void dps310_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -205,7 +205,7 @@ static void dps310_class_init(ObjectClass *klass, void *data) k->event = dps310_event; k->recv = dps310_rx; k->send = dps310_tx; - dc->reset = dps310_reset; + device_class_set_legacy_reset(dc, dps310_reset); dc->vmsd = &vmstate_dps310; } diff --git a/hw/sensor/emc141x.c b/hw/sensor/emc141x.c index 95079558e87..7b2ce383a12 100644 --- a/hw/sensor/emc141x.c +++ b/hw/sensor/emc141x.c @@ -265,19 +265,19 @@ static void emc141x_initfn(Object *obj) emc141x_set_temperature, NULL, NULL); } -static void emc141x_class_init(ObjectClass *klass, void *data) +static void emc141x_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - dc->reset = emc141x_reset; + device_class_set_legacy_reset(dc, emc141x_reset); k->event = emc141x_event; k->recv = emc141x_rx; k->send = emc141x_tx; dc->vmsd = &vmstate_emc141x; } -static void emc1413_class_init(ObjectClass *klass, void *data) +static void emc1413_class_init(ObjectClass *klass, const void *data) { EMC141XClass *ec = EMC141X_CLASS(klass); @@ -286,7 +286,7 @@ static void emc1413_class_init(ObjectClass *klass, void *data) ec->sensors_count = 3; } -static void emc1414_class_init(ObjectClass *klass, void *data) +static void emc1414_class_init(ObjectClass *klass, const void *data) { EMC141XClass *ec = EMC141X_CLASS(klass); diff --git a/hw/sensor/isl_pmbus_vr.c b/hw/sensor/isl_pmbus_vr.c index 304a66ea8b0..e8d29b08ff3 100644 --- a/hw/sensor/isl_pmbus_vr.c +++ b/hw/sensor/isl_pmbus_vr.c @@ -233,7 +233,7 @@ static void raa228000_init(Object *obj) isl_pmbus_vr_add_props(obj, flags, 1); } -static void isl_pmbus_vr_class_init(ObjectClass *klass, void *data, +static void isl_pmbus_vr_class_init(ObjectClass *klass, const void *data, uint8_t pages) { PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); @@ -242,7 +242,7 @@ static void isl_pmbus_vr_class_init(ObjectClass *klass, void *data, k->device_num_pages = pages; } -static void isl69260_class_init(ObjectClass *klass, void *data) +static void isl69260_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -251,7 +251,7 @@ static void isl69260_class_init(ObjectClass *klass, void *data) isl_pmbus_vr_class_init(klass, data, 2); } -static void raa228000_class_init(ObjectClass *klass, void *data) +static void raa228000_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -260,7 +260,7 @@ static void raa228000_class_init(ObjectClass *klass, void *data) isl_pmbus_vr_class_init(klass, data, 1); } -static void raa229004_class_init(ObjectClass *klass, void *data) +static void raa229004_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -269,7 +269,7 @@ static void raa229004_class_init(ObjectClass *klass, void *data) isl_pmbus_vr_class_init(klass, data, 2); } -static void isl69259_class_init(ObjectClass *klass, void *data) +static void isl69259_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c index 343ff989904..cd5773ae64e 100644 --- a/hw/sensor/lsm303dlhc_mag.c +++ b/hw/sensor/lsm303dlhc_mag.c @@ -28,7 +28,6 @@ #include "qapi/visitor.h" #include "qemu/module.h" #include "qemu/log.h" -#include "qemu/bswap.h" enum LSM303DLHCMagReg { LSM303DLHC_MAG_REG_CRA = 0x00, @@ -530,12 +529,12 @@ static void lsm303dlhc_mag_initfn(Object *obj) /* * Set the virtual method pointers (bus state change, tx/rx, etc.). */ -static void lsm303dlhc_mag_class_init(ObjectClass *klass, void *data) +static void lsm303dlhc_mag_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); - dc->reset = lsm303dlhc_mag_reset; + device_class_set_legacy_reset(dc, lsm303dlhc_mag_reset); dc->vmsd = &vmstate_lsm303dlhc_mag; k->event = lsm303dlhc_mag_event; k->recv = lsm303dlhc_mag_recv; diff --git a/hw/sensor/max31785.c b/hw/sensor/max31785.c index 3577a7c2180..c75581455a2 100644 --- a/hw/sensor/max31785.c +++ b/hw/sensor/max31785.c @@ -544,7 +544,7 @@ static void max31785_init(Object *obj) } } -static void max31785_class_init(ObjectClass *klass, void *data) +static void max31785_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/sensor/max34451.c b/hw/sensor/max34451.c index 93b53f3db2f..a369d2b5319 100644 --- a/hw/sensor/max34451.c +++ b/hw/sensor/max34451.c @@ -746,7 +746,7 @@ static void max34451_init(Object *obj) } -static void max34451_class_init(ObjectClass *klass, void *data) +static void max34451_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c index a8730d0b7f9..f5b61109e36 100644 --- a/hw/sensor/tmp105.c +++ b/hw/sensor/tmp105.c @@ -26,22 +26,28 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/module.h" +#include "hw/registerfields.h" +#include "trace.h" + +FIELD(CONFIG, SHUTDOWN_MODE, 0, 1) +FIELD(CONFIG, THERMOSTAT_MODE, 1, 1) +FIELD(CONFIG, POLARITY, 2, 1) +FIELD(CONFIG, FAULT_QUEUE, 3, 2) +FIELD(CONFIG, CONVERTER_RESOLUTION, 5, 2) +FIELD(CONFIG, ONE_SHOT, 7, 1) static void tmp105_interrupt_update(TMP105State *s) { - qemu_set_irq(s->pin, s->alarm ^ ((~s->config >> 2) & 1)); /* POL */ + qemu_set_irq(s->pin, s->alarm ^ FIELD_EX8(~s->config, CONFIG, POLARITY)); } -static void tmp105_alarm_update(TMP105State *s) +static void tmp105_alarm_update(TMP105State *s, bool one_shot) { - if ((s->config >> 0) & 1) { /* SD */ - if ((s->config >> 7) & 1) /* OS */ - s->config &= ~(1 << 7); /* OS */ - else - return; + if (FIELD_EX8(s->config, CONFIG, SHUTDOWN_MODE) && !one_shot) { + return; } - if (s->config >> 1 & 1) { + if (FIELD_EX8(s->config, CONFIG, THERMOSTAT_MODE)) { /* * TM == 1 : Interrupt mode. We signal Alert when the * temperature rises above T_high, and expect the guest to clear @@ -89,7 +95,8 @@ static void tmp105_get_temperature(Object *obj, Visitor *v, const char *name, visit_type_int(v, name, &value, errp); } -/* Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8 +/* + * Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8 * fixed point, so units are 1/256 centigrades. A simple ratio will do. */ static void tmp105_set_temperature(Object *obj, Visitor *v, const char *name, @@ -109,7 +116,7 @@ static void tmp105_set_temperature(Object *obj, Visitor *v, const char *name, s->temperature = (int16_t) (temp * 256 / 1000); - tmp105_alarm_update(s); + tmp105_alarm_update(s, false); } static const int tmp105_faultq[4] = { 1, 2, 4, 6 }; @@ -118,54 +125,60 @@ static void tmp105_read(TMP105State *s) { s->len = 0; - if ((s->config >> 1) & 1) { /* TM */ + if (FIELD_EX8(s->config, CONFIG, THERMOSTAT_MODE)) { s->alarm = 0; tmp105_interrupt_update(s); } switch (s->pointer & 3) { case TMP105_REG_TEMPERATURE: - s->buf[s->len ++] = (((uint16_t) s->temperature) >> 8); - s->buf[s->len ++] = (((uint16_t) s->temperature) >> 0) & - (0xf0 << ((~s->config >> 5) & 3)); /* R */ + s->buf[s->len++] = (((uint16_t) s->temperature) >> 8); + s->buf[s->len++] = (((uint16_t) s->temperature) >> 0) & + (0xf0 << (FIELD_EX8(~s->config, CONFIG, CONVERTER_RESOLUTION))); break; case TMP105_REG_CONFIG: - s->buf[s->len ++] = s->config; + s->buf[s->len++] = s->config; break; case TMP105_REG_T_LOW: - s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 8; - s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 0; + s->buf[s->len++] = ((uint16_t) s->limit[0]) >> 8; + s->buf[s->len++] = ((uint16_t) s->limit[0]) >> 0; break; case TMP105_REG_T_HIGH: - s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 8; - s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 0; + s->buf[s->len++] = ((uint16_t) s->limit[1]) >> 8; + s->buf[s->len++] = ((uint16_t) s->limit[1]) >> 0; break; } + + trace_tmp105_read(s->i2c.address, s->pointer); } static void tmp105_write(TMP105State *s) { + trace_tmp105_write(s->i2c.address, s->pointer); + switch (s->pointer & 3) { case TMP105_REG_TEMPERATURE: break; case TMP105_REG_CONFIG: - if (s->buf[0] & ~s->config & (1 << 0)) /* SD */ - printf("%s: TMP105 shutdown\n", __func__); - s->config = s->buf[0]; - s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */ - tmp105_alarm_update(s); + if (FIELD_EX8(s->buf[0] & ~s->config, CONFIG, SHUTDOWN_MODE)) { + trace_tmp105_write_shutdown(s->i2c.address); + } + s->config = FIELD_DP8(s->buf[0], CONFIG, ONE_SHOT, 0); + s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)]; + tmp105_alarm_update(s, FIELD_EX8(s->buf[0], CONFIG, ONE_SHOT)); break; case TMP105_REG_T_LOW: case TMP105_REG_T_HIGH: - if (s->len >= 3) + if (s->len >= 3) { s->limit[s->pointer & 1] = (int16_t) - ((((uint16_t) s->buf[0]) << 8) | s->buf[1]); - tmp105_alarm_update(s); + ((((uint16_t) s->buf[0]) << 8) | (s->buf[1] & 0xf0)); + } + tmp105_alarm_update(s, false); break; } } @@ -175,7 +188,7 @@ static uint8_t tmp105_rx(I2CSlave *i2c) TMP105State *s = TMP105(i2c); if (s->len < 2) { - return s->buf[s->len ++]; + return s->buf[s->len++]; } else { return 0xff; } @@ -215,7 +228,7 @@ static int tmp105_post_load(void *opaque, int version_id) { TMP105State *s = opaque; - s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */ + s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)]; tmp105_interrupt_update(s); return 0; @@ -273,7 +286,7 @@ static void tmp105_reset(I2CSlave *i2c) s->temperature = 0; s->pointer = 0; s->config = 0; - s->faults = tmp105_faultq[(s->config >> 3) & 3]; + s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)]; s->alarm = 0; s->detect_falling = false; @@ -300,7 +313,7 @@ static void tmp105_initfn(Object *obj) tmp105_set_temperature, NULL, NULL); } -static void tmp105_class_init(ObjectClass *klass, void *data) +static void tmp105_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); diff --git a/hw/sensor/tmp421.c b/hw/sensor/tmp421.c index b6f0b62ab11..3421c440869 100644 --- a/hw/sensor/tmp421.c +++ b/hw/sensor/tmp421.c @@ -68,7 +68,7 @@ struct TMP421State { struct TMP421Class { I2CSlaveClass parent_class; - DeviceInfo *dev; + const DeviceInfo *dev; }; #define TYPE_TMP421 "tmp421-generic" @@ -337,7 +337,7 @@ static void tmp421_realize(DeviceState *dev, Error **errp) tmp421_reset(&s->i2c); } -static void tmp421_class_init(ObjectClass *klass, void *data) +static void tmp421_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); @@ -382,9 +382,9 @@ static void tmp421_register_types(void) .name = devices[i].name, .parent = TYPE_TMP421, .class_init = tmp421_class_init, - .class_data = (void *) &devices[i], + .class_data = &devices[i], }; - type_register(&ti); + type_register_static(&ti); } } diff --git a/hw/sensor/trace-events b/hw/sensor/trace-events new file mode 100644 index 00000000000..a3fe54fa6dc --- /dev/null +++ b/hw/sensor/trace-events @@ -0,0 +1,6 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# tmp105.c +tmp105_read(uint8_t dev, uint8_t addr) "device: 0x%02x, addr: 0x%02x" +tmp105_write(uint8_t dev, uint8_t addr) "device: 0x%02x, addr 0x%02x" +tmp105_write_shutdown(uint8_t dev) "device: 0x%02x" diff --git a/hw/sensor/trace.h b/hw/sensor/trace.h new file mode 100644 index 00000000000..e4721560b03 --- /dev/null +++ b/hw/sensor/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sensor.h" diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig index 99a76a94c3f..1660d292d53 100644 --- a/hw/sh4/Kconfig +++ b/hw/sh4/Kconfig @@ -13,13 +13,6 @@ config R2D select SH7750 select SH_PCI -config SHIX - bool - default y - depends on SH4 - select SH7750 - select TC58128 - config SH7750 bool select SH_INTC diff --git a/hw/sh4/meson.build b/hw/sh4/meson.build index 70e814c3a28..7d27839fee6 100644 --- a/hw/sh4/meson.build +++ b/hw/sh4/meson.build @@ -4,6 +4,5 @@ sh4_ss.add(when: 'CONFIG_SH7750', if_true: files( 'sh7750_regnames.c', )) sh4_ss.add(when: 'CONFIG_R2D', if_true: files('r2d.c')) -sh4_ss.add(when: 'CONFIG_SHIX', if_true: files('shix.c')) hw_arch += {'sh4': sh4_ss} diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index e5ac6751bd5..d68c94e82ef 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -30,9 +30,9 @@ #include "cpu.h" #include "hw/sysbus.h" #include "hw/sh4/sh.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" @@ -43,6 +43,7 @@ #include "hw/loader.h" #include "hw/usb.h" #include "hw/block/flash.h" +#include "exec/tswap.h" #define FLASH_BASE 0x00000000 #define FLASH_SIZE (16 * MiB) @@ -62,6 +63,12 @@ #define PA_VERREG 0x32 #define PA_OUTPORT 0x36 +enum r2d_fpga_irq { + PCI_INTD, CF_IDE, CF_CD, PCI_INTC, SM501, KEY, RTC_A, RTC_T, + SDCARD, PCI_INTA, PCI_INTB, EXT, TP, + NR_IRQS +}; + typedef struct { uint16_t bcr; uint16_t irlmsk; @@ -87,15 +94,10 @@ typedef struct { /* output pin */ qemu_irq irl; + IRQState irq[NR_IRQS]; MemoryRegion iomem; } r2d_fpga_t; -enum r2d_fpga_irq { - PCI_INTD, CF_IDE, CF_CD, PCI_INTC, SM501, KEY, RTC_A, RTC_T, - SDCARD, PCI_INTA, PCI_INTB, EXT, TP, - NR_IRQS -}; - static const struct { short irl; uint16_t msk; } irqtab[NR_IRQS] = { [CF_IDE] = { 1, 1 << 9 }, [CF_CD] = { 2, 1 << 8 }, @@ -185,8 +187,8 @@ static const MemoryRegionOps r2d_fpga_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem, - hwaddr base, qemu_irq irl) +static r2d_fpga_t *r2d_fpga_init(MemoryRegion *sysmem, + hwaddr base, qemu_irq irl) { r2d_fpga_t *s; @@ -196,7 +198,10 @@ static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem, memory_region_init_io(&s->iomem, NULL, &r2d_fpga_ops, s, "r2d-fpga", 0x40); memory_region_add_subregion(sysmem, base, &s->iomem); - return qemu_allocate_irqs(r2d_fpga_irq_set, s, NR_IRQS); + + qemu_init_irqs(s->irq, NR_IRQS, r2d_fpga_irq_set, s); + + return s; } typedef struct ResetData { @@ -238,13 +243,13 @@ static void r2d_init(MachineState *machine) ResetData *reset_info; struct SH7750State *s; MemoryRegion *sdram = g_new(MemoryRegion, 1); - qemu_irq *irq; DriveInfo *dinfo; DeviceState *dev; SysBusDevice *busdev; MemoryRegion *address_space_mem = get_system_memory(); PCIBus *pci_bus; USBBus *usb_bus; + r2d_fpga_t *fpga; cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); env = &cpu->env; @@ -259,7 +264,7 @@ static void r2d_init(MachineState *machine) memory_region_add_subregion(address_space_mem, SDRAM_BASE, sdram); /* Register peripherals */ s = sh7750_init(cpu, address_space_mem); - irq = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s)); + fpga = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s)); dev = qdev_new("sh_pci"); busdev = SYS_BUS_DEVICE(dev); @@ -267,10 +272,10 @@ static void r2d_init(MachineState *machine) pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci")); sysbus_mmio_map(busdev, 0, P4ADDR(0x1e200000)); sysbus_mmio_map(busdev, 1, A7ADDR(0x1e200000)); - sysbus_connect_irq(busdev, 0, irq[PCI_INTA]); - sysbus_connect_irq(busdev, 1, irq[PCI_INTB]); - sysbus_connect_irq(busdev, 2, irq[PCI_INTC]); - sysbus_connect_irq(busdev, 3, irq[PCI_INTD]); + sysbus_connect_irq(busdev, 0, &fpga->irq[PCI_INTA]); + sysbus_connect_irq(busdev, 1, &fpga->irq[PCI_INTB]); + sysbus_connect_irq(busdev, 2, &fpga->irq[PCI_INTC]); + sysbus_connect_irq(busdev, 3, &fpga->irq[PCI_INTD]); dev = qdev_new("sysbus-sm501"); busdev = SYS_BUS_DEVICE(dev); @@ -280,15 +285,15 @@ static void r2d_init(MachineState *machine) sysbus_realize_and_unref(busdev, &error_fatal); sysbus_mmio_map(busdev, 0, 0x10000000); sysbus_mmio_map(busdev, 1, 0x13e00000); - sysbus_connect_irq(busdev, 0, irq[SM501]); + sysbus_connect_irq(busdev, 0, &fpga->irq[SM501]); /* onboard CF (True IDE mode, Master only). */ dinfo = drive_get(IF_IDE, 0, 0); dev = qdev_new("mmio-ide"); busdev = SYS_BUS_DEVICE(dev); + sysbus_connect_irq(busdev, 0, &fpga->irq[CF_IDE]); qdev_prop_set_uint32(dev, "shift", 1); sysbus_realize_and_unref(busdev, &error_fatal); - sysbus_connect_irq(busdev, 0, irq[CF_IDE]); sysbus_mmio_map(busdev, 0, 0x14001000); sysbus_mmio_map(busdev, 1, 0x1400080c); mmio_ide_init_drives(dev, dinfo, NULL); diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c index ebe0fd96d94..300eabc595d 100644 --- a/hw/sh4/sh7750.c +++ b/hw/sh4/sh7750.c @@ -28,18 +28,17 @@ #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/sh4/sh.h" -#include "sysemu/sysemu.h" +#include "system/system.h" +#include "target/sh4/cpu.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "sh7750_regs.h" #include "sh7750_regnames.h" #include "hw/sh4/sh_intc.h" #include "hw/timer/tmu012.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "trace.h" -#define NB_DEVICES 4 - typedef struct SH7750State { MemoryRegion iomem; MemoryRegion iomem_1f0; @@ -75,7 +74,6 @@ typedef struct SH7750State { uint16_t periph_portdira; /* Direction seen from the peripherals */ uint16_t periph_pdtrb; /* Imposed by the peripherals */ uint16_t periph_portdirb; /* Direction seen from the peripherals */ - sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ /* Cache */ uint32_t ccr; @@ -92,19 +90,6 @@ static inline int has_bcr3_and_bcr4(SH7750State *s) * I/O ports */ -int sh7750_register_io_device(SH7750State *s, sh7750_io_device *device) -{ - int i; - - for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] == NULL) { - s->devices[i] = device; - return 0; - } - } - return -1; -} - static uint16_t portdir(uint32_t v) { #define EVENPORTMASK(n) ((v & (1 << ((n) << 1))) >> (n)) @@ -142,63 +127,26 @@ static uint16_t portb_lines(SH7750State *s) (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ } -static void gen_port_interrupts(SH7750State *s) -{ - /* XXXXX interrupts not generated */ -} - static void porta_changed(SH7750State *s, uint16_t prev) { - uint16_t currenta, changes; - int i, r = 0; + uint16_t currenta; currenta = porta_lines(s); if (currenta == prev) { return; } trace_sh7750_porta(prev, currenta, s->pdtra, s->pctra); - changes = currenta ^ prev; - - for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } - } - - if (r) { - gen_port_interrupts(s); - } } static void portb_changed(SH7750State *s, uint16_t prev) { - uint16_t currentb, changes; - int i, r = 0; + uint16_t currentb; currentb = portb_lines(s); if (currentb == prev) { return; } trace_sh7750_portb(prev, currentb, s->pdtrb, s->pctrb); - changes = currentb ^ prev; - - for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } - } - - if (r) { - gen_port_interrupts(s); - } } /* diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c deleted file mode 100644 index eb3150b5bcf..00000000000 --- a/hw/sh4/shix.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * SHIX 2.0 board description - * - * Copyright (c) 2005 Samuel Tardieu - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -/* - * Shix 2.0 board by Alexis Polti, described at - * https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 - * - * More information in target/sh4/README.sh4 - */ -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "cpu.h" -#include "hw/sh4/sh.h" -#include "sysemu/qtest.h" -#include "hw/boards.h" -#include "hw/loader.h" -#include "qemu/error-report.h" - -#define BIOS_FILENAME "shix_bios.bin" -#define BIOS_ADDRESS 0xA0000000 - -static void shix_init(MachineState *machine) -{ - int ret; - SuperHCPU *cpu; - struct SH7750State *s; - MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *rom = g_new(MemoryRegion, 1); - MemoryRegion *sdram = g_new(MemoryRegion, 2); - const char *bios_name = machine->firmware ?: BIOS_FILENAME; - - cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); - - /* Allocate memory space */ - memory_region_init_rom(rom, NULL, "shix.rom", 0x4000, &error_fatal); - memory_region_add_subregion(sysmem, 0x00000000, rom); - memory_region_init_ram(&sdram[0], NULL, "shix.sdram1", 0x01000000, - &error_fatal); - memory_region_add_subregion(sysmem, 0x08000000, &sdram[0]); - memory_region_init_ram(&sdram[1], NULL, "shix.sdram2", 0x01000000, - &error_fatal); - memory_region_add_subregion(sysmem, 0x0c000000, &sdram[1]); - - /* Load BIOS in 0 (and access it through P2, 0xA0000000) */ - ret = load_image_targphys(bios_name, 0, 0x4000); - if (ret < 0 && !qtest_enabled()) { - error_report("Could not load SHIX bios '%s'", bios_name); - exit(1); - } - - /* Register peripherals */ - s = sh7750_init(cpu, sysmem); - /* XXXXX Check success */ - tc58128_init(s, "shix_linux_nand.bin", NULL); -} - -static void shix_machine_init(MachineClass *mc) -{ - mc->desc = "shix card"; - mc->init = shix_init; - mc->is_default = true; - mc->default_cpu_type = TYPE_SH7750R_CPU; - mc->deprecation_reason = "old and unmaintained"; -} - -DEFINE_MACHINE("shix", shix_machine_init) diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index a394514264c..1ac063cfb4b 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -17,11 +17,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/bswap.h" #include "qapi/error.h" #include "qemu/config-file.h" #include "qemu/module.h" #include "qemu/option.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/uuid.h" #include "hw/firmware/smbios.h" #include "hw/loader.h" @@ -1285,6 +1286,9 @@ static int save_opt_one(void *opaque, g_byte_array_append(data, (guint8 *)buf, ret); } + buf[0] = '\0'; + g_byte_array_append(data, (guint8 *)buf, 1); + qemu_close(fd); *opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1); diff --git a/hw/smbios/smbios_legacy.c b/hw/smbios/smbios_legacy.c index c37a8ee821f..14319d4897f 100644 --- a/hw/smbios/smbios_legacy.c +++ b/hw/smbios/smbios_legacy.c @@ -18,7 +18,7 @@ #include "qemu/osdep.h" #include "qemu/bswap.h" #include "hw/firmware/smbios.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" struct smbios_header { diff --git a/hw/smbios/smbios_type_38.c b/hw/smbios/smbios_type_38.c index 168b886647d..e9b856fcd96 100644 --- a/hw/smbios/smbios_type_38.c +++ b/hw/smbios/smbios_type_38.c @@ -72,7 +72,12 @@ static void smbios_build_one_type_38(IPMIFwInfo *info) " SMBIOS, ignoring this entry.", info->register_spacing); return; } - t->interrupt_number = info->interrupt_number; + if (info->irq_source == IPMI_ISA_IRQ) { + t->interrupt_number = info->interrupt_number; + } else { + /* TODO: How to handle PCI? */ + t->interrupt_number = 0; + } SMBIOS_BUILD_TABLE_POST; } diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c index 6aaa04cb191..0aeaad3becc 100644 --- a/hw/sparc/leon3.c +++ b/hw/sparc/leon3.c @@ -34,9 +34,9 @@ #include "qemu/timer.h" #include "hw/ptimer.h" #include "hw/qdev-properties.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/reset.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/reset.h" #include "hw/boards.h" #include "hw/loader.h" #include "elf.h" @@ -380,7 +380,7 @@ static void leon3_generic_hw_init(MachineState *machine) kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &entry, NULL, NULL, NULL, - 1 /* big endian */, EM_SPARC, 0, 0); + ELFDATA2MSB, EM_SPARC, 0, 0); if (kernel_size < 0) { kernel_size = load_uimage(kernel_filename, NULL, &entry, NULL, NULL, NULL); diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index d52e6a7213f..8ac7e625ef1 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -27,6 +27,7 @@ #include "qapi/error.h" #include "qemu/datadir.h" #include "cpu.h" +#include "exec/target_page.h" #include "hw/sysbus.h" #include "qemu/error-report.h" #include "qemu/timer.h" @@ -35,9 +36,9 @@ #include "migration/vmstate.h" #include "hw/sparc/sparc32_dma.h" #include "hw/block/fdc.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/system.h" #include "net/net.h" #include "hw/boards.h" #include "hw/scsi/esp.h" @@ -233,19 +234,13 @@ static unsigned long sun4m_load_kernel(const char *kernel_filename, kernel_size = 0; if (linux_boot) { - int bswap_needed; - -#ifdef BSWAP_NEEDED - bswap_needed = 1; -#else - bswap_needed = 0; -#endif kernel_size = load_elf(kernel_filename, NULL, translate_kernel_address, NULL, - NULL, NULL, NULL, NULL, 1, EM_SPARC, 0, 0); + NULL, NULL, NULL, NULL, + ELFDATA2MSB, EM_SPARC, 0, 0); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, - RAM_size - KERNEL_LOAD_ADDR, bswap_needed, + RAM_size - KERNEL_LOAD_ADDR, true, TARGET_PAGE_SIZE); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, @@ -600,7 +595,7 @@ static void idreg_realize(DeviceState *ds, Error **errp) sysbus_init_mmio(dev, &s->mem); } -static void idreg_class_init(ObjectClass *oc, void *data) +static void idreg_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -650,7 +645,7 @@ static void afx_realize(DeviceState *ds, Error **errp) sysbus_init_mmio(dev, &s->mem); } -static void afx_class_init(ObjectClass *oc, void *data) +static void afx_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -703,7 +698,7 @@ static void prom_init(hwaddr addr, const char *bios_name) if (filename) { ret = load_elf(filename, NULL, translate_prom_address, &addr, NULL, - NULL, NULL, NULL, 1, EM_SPARC, 0, 0); + NULL, NULL, NULL, ELFDATA2MSB, EM_SPARC, 0, 0); if (ret < 0 || ret > PROM_SIZE_MAX) { ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); } @@ -732,15 +727,10 @@ static void prom_realize(DeviceState *ds, Error **errp) sysbus_init_mmio(dev, &s->prom); } -static Property prom_properties[] = { - {/* end of property list */}, -}; - -static void prom_class_init(ObjectClass *klass, void *data) +static void prom_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, prom_properties); dc->realize = prom_realize; } @@ -781,7 +771,7 @@ static void ram_initfn(Object *obj) "Valid value is ID of a hostmem backend"); } -static void ram_class_init(ObjectClass *klass, void *data) +static void ram_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -979,7 +969,7 @@ static void sun4m_hw_init(MachineState *machine) sysbus_mmio_map(s, 0, hwdef->ms_kb_base); /* Logically OR both its IRQs together */ - ms_kb_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + ms_kb_orgate = qdev_new(TYPE_OR_IRQ); object_property_set_int(OBJECT(ms_kb_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(ms_kb_orgate, NULL, &error_fatal); sysbus_connect_irq(s, 0, qdev_get_gpio_in(ms_kb_orgate, 0)); @@ -1000,7 +990,7 @@ static void sun4m_hw_init(MachineState *machine) sysbus_mmio_map(s, 0, hwdef->serial_base); /* Logically OR both its IRQs together */ - serial_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + serial_orgate = qdev_new(TYPE_OR_IRQ); object_property_set_int(OBJECT(serial_orgate), "num-lines", 2, &error_fatal); qdev_realize_and_unref(serial_orgate, NULL, &error_fatal); @@ -1108,7 +1098,7 @@ enum { ss600mp_id, }; -static void sun4m_machine_class_init(ObjectClass *oc, void *data) +static void sun4m_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1119,7 +1109,7 @@ static void sun4m_machine_class_init(ObjectClass *oc, void *data) mc->default_ram_id = "sun4m.ram"; } -static void ss5_class_init(ObjectClass *oc, void *data) +static void ss5_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1156,7 +1146,7 @@ static void ss5_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss5_hwdef; } -static void ss10_class_init(ObjectClass *oc, void *data) +static void ss10_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1191,7 +1181,7 @@ static void ss10_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss10_hwdef; } -static void ss600mp_class_init(ObjectClass *oc, void *data) +static void ss600mp_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1224,7 +1214,7 @@ static void ss600mp_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss600mp_hwdef; } -static void ss20_class_init(ObjectClass *oc, void *data) +static void ss20_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1275,7 +1265,7 @@ static void ss20_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss20_hwdef; } -static void voyager_class_init(ObjectClass *oc, void *data) +static void voyager_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1307,7 +1297,7 @@ static void voyager_class_init(ObjectClass *oc, void *data) smc->hwdef = &voyager_hwdef; } -static void ss_lx_class_init(ObjectClass *oc, void *data) +static void ss_lx_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1340,7 +1330,7 @@ static void ss_lx_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss_lx_hwdef; } -static void ss4_class_init(ObjectClass *oc, void *data) +static void ss4_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1373,7 +1363,7 @@ static void ss4_class_init(ObjectClass *oc, void *data) smc->hwdef = &ss4_hwdef; } -static void scls_class_init(ObjectClass *oc, void *data) +static void scls_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); @@ -1405,7 +1395,7 @@ static void scls_class_init(ObjectClass *oc, void *data) smc->hwdef = &scls_hwdef; } -static void sbook_class_init(ObjectClass *oc, void *data) +static void sbook_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); diff --git a/hw/sparc/sun4m_iommu.c b/hw/sparc/sun4m_iommu.c index 06703b1d96e..a7ff36ee78c 100644 --- a/hw/sparc/sun4m_iommu.c +++ b/hw/sparc/sun4m_iommu.c @@ -29,7 +29,7 @@ #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "trace.h" /* @@ -238,7 +238,7 @@ static void iommu_mem_write(void *opaque, hwaddr addr, static const MemoryRegionOps iommu_mem_ops = { .read = iommu_mem_read, .write = iommu_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -368,16 +368,15 @@ static void iommu_init(Object *obj) sysbus_init_mmio(dev, &s->iomem); } -static Property iommu_properties[] = { +static const Property iommu_properties[] = { DEFINE_PROP_UINT32("version", IOMMUState, version, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void iommu_class_init(ObjectClass *klass, void *data) +static void iommu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = iommu_reset; + device_class_set_legacy_reset(dc, iommu_reset); dc->vmsd = &vmstate_iommu; device_class_set_props(dc, iommu_properties); } @@ -390,7 +389,8 @@ static const TypeInfo iommu_info = { .class_init = iommu_class_init, }; -static void sun4m_iommu_memory_region_class_init(ObjectClass *klass, void *data) +static void sun4m_iommu_memory_region_class_init(ObjectClass *klass, + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/sparc64/Kconfig b/hw/sparc64/Kconfig index 3b948a22907..f764c8a219d 100644 --- a/hw/sparc64/Kconfig +++ b/hw/sparc64/Kconfig @@ -10,6 +10,7 @@ config SUN4U select ISA_BUS select FDC_ISA select SERIAL_ISA + select SERIAL_MM select PCI_SABRE select IDE_CMD646 select PCKBD diff --git a/hw/sparc64/niagara.c b/hw/sparc64/niagara.c index ab3c4ec3463..1ffe92060ad 100644 --- a/hw/sparc64/niagara.c +++ b/hw/sparc64/niagara.c @@ -27,15 +27,15 @@ #include "qemu/units.h" #include "cpu.h" #include "hw/boards.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/misc/unimp.h" #include "hw/loader.h" #include "hw/sparc/sparc64.h" #include "hw/rtc/sun4v-rtc.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" -#include "sysemu/sysemu.h" +#include "system/qtest.h" +#include "system/system.h" #include "qapi/error.h" typedef struct NiagaraBoardState { @@ -157,7 +157,7 @@ static void niagara_init(MachineState *machine) sun4v_rtc_init(NIAGARA_RTC_BASE); } -static void niagara_class_init(ObjectClass *oc, void *data) +static void niagara_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c index 3091cde5862..9cffc92aa3b 100644 --- a/hw/sparc64/sparc64.c +++ b/hw/sparc64/sparc64.c @@ -29,7 +29,7 @@ #include "hw/boards.h" #include "hw/sparc/sparc64.h" #include "qemu/timer.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "trace.h" diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index 4ece1ac1ffc..e9f9b0a4cb9 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -28,13 +28,15 @@ #include "qapi/error.h" #include "qemu/datadir.h" #include "cpu.h" +#include "exec/target_page.h" #include "hw/irq.h" #include "hw/pci/pci.h" #include "hw/pci/pci_bridge.h" #include "hw/pci/pci_host.h" #include "hw/qdev-properties.h" #include "hw/pci-host/sabre.h" -#include "hw/char/serial.h" +#include "hw/char/serial-isa.h" +#include "hw/char/serial-mm.h" #include "hw/char/parallel-isa.h" #include "hw/rtc/m48t59.h" #include "migration/vmstate.h" @@ -42,8 +44,8 @@ #include "hw/block/fdc.h" #include "net/net.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/nvram/sun_nvram.h" #include "hw/nvram/chrp_nvram.h" @@ -167,21 +169,14 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename, kernel_size = 0; if (linux_boot) { - int bswap_needed; - -#ifdef BSWAP_NEEDED - bswap_needed = 1; -#else - bswap_needed = 0; -#endif kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, kernel_entry, - kernel_addr, &kernel_top, NULL, 1, EM_SPARCV9, 0, - 0); + kernel_addr, &kernel_top, NULL, + ELFDATA2MSB, EM_SPARCV9, 0, 0); if (kernel_size < 0) { *kernel_addr = KERNEL_LOAD_ADDR; *kernel_entry = KERNEL_LOAD_ADDR; kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, - RAM_size - KERNEL_LOAD_ADDR, bswap_needed, + RAM_size - KERNEL_LOAD_ADDR, true, TARGET_PAGE_SIZE); } if (kernel_size < 0) { @@ -253,7 +248,7 @@ static void power_mem_write(void *opaque, hwaddr addr, static const MemoryRegionOps power_mem_ops = { .read = power_mem_read, .write = power_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -271,7 +266,7 @@ static void power_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &d->power_mmio); } -static void power_class_init(ObjectClass *klass, void *data) +static void power_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -373,13 +368,12 @@ static void ebus_realize(PCIDevice *pci_dev, Error **errp) pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->bar1); } -static Property ebus_properties[] = { +static const Property ebus_properties[] = { DEFINE_PROP_UINT64("console-serial-base", EbusState, console_serial_base, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ebus_class_init(ObjectClass *klass, void *data) +static void ebus_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); @@ -397,7 +391,7 @@ static const TypeInfo ebus_info = { .parent = TYPE_PCI_DEVICE, .class_init = ebus_class_init, .instance_size = sizeof(EbusState), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -441,7 +435,7 @@ static void prom_init(hwaddr addr, const char *bios_name) filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { ret = load_elf(filename, NULL, translate_prom_address, &addr, - NULL, NULL, NULL, NULL, 1, EM_SPARCV9, 0, 0); + NULL, NULL, NULL, NULL, ELFDATA2MSB, EM_SPARCV9, 0, 0); if (ret < 0 || ret > PROM_SIZE_MAX) { ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); } @@ -470,15 +464,10 @@ static void prom_realize(DeviceState *ds, Error **errp) sysbus_init_mmio(dev, &s->prom); } -static Property prom_properties[] = { - {/* end of property list */}, -}; - -static void prom_class_init(ObjectClass *klass, void *data) +static void prom_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, prom_properties); dc->realize = prom_realize; } @@ -531,12 +520,11 @@ static void ram_init(hwaddr addr, ram_addr_t RAM_size) sysbus_mmio_map(s, 0, addr); } -static Property ram_properties[] = { +static const Property ram_properties[] = { DEFINE_PROP_UINT64("size", RamDevice, size, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ram_class_init(ObjectClass *klass, void *data) +static void ram_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -799,7 +787,7 @@ static GlobalProperty hw_compat_sparc64[] = { }; static const size_t hw_compat_sparc64_len = G_N_ELEMENTS(hw_compat_sparc64); -static void sun4u_class_init(ObjectClass *oc, void *data) +static void sun4u_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); @@ -823,13 +811,13 @@ static const TypeInfo sun4u_type = { .name = MACHINE_TYPE_NAME("sun4u"), .parent = TYPE_MACHINE, .class_init = sun4u_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, }; -static void sun4v_class_init(ObjectClass *oc, void *data) +static void sun4v_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/hw/sparc64/sun4u_iommu.c b/hw/sparc64/sun4u_iommu.c index 1c1dca712e3..14645f475a0 100644 --- a/hw/sparc64/sun4u_iommu.c +++ b/hw/sparc64/sun4u_iommu.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "hw/sysbus.h" #include "hw/sparc/sun4u_iommu.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qemu/log.h" #include "qemu/module.h" #include "trace.h" @@ -305,11 +305,11 @@ static void iommu_init(Object *obj) sysbus_init_mmio(sbd, &s->iomem); } -static void iommu_class_init(ObjectClass *klass, void *data) +static void iommu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = iommu_reset; + device_class_set_legacy_reset(dc, iommu_reset); } static const TypeInfo iommu_info = { @@ -320,7 +320,8 @@ static const TypeInfo iommu_info = { .class_init = iommu_class_init, }; -static void sun4u_iommu_memory_region_class_init(ObjectClass *klass, void *data) +static void sun4u_iommu_memory_region_class_init(ObjectClass *klass, + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/ssi/Kconfig b/hw/ssi/Kconfig index 8d180de7cf2..1bd56463c1e 100644 --- a/hw/ssi/Kconfig +++ b/hw/ssi/Kconfig @@ -28,3 +28,7 @@ config BCM2835_SPI config PNV_SPI bool select SSI + +config ALLWINNER_A10_SPI + bool + select SSI diff --git a/hw/ssi/allwinner-a10-spi.c b/hw/ssi/allwinner-a10-spi.c new file mode 100644 index 00000000000..6b7cca8d32a --- /dev/null +++ b/hw/ssi/allwinner-a10-spi.c @@ -0,0 +1,561 @@ +/* + * Allwinner SPI Bus Serial Interface Emulation + * + * Copyright (C) 2024 Strahinja Jankovic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ssi/allwinner-a10-spi.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +/* Allwinner SPI memory map */ +#define SPI_RXDATA_REG 0x00 /* receive data register */ +#define SPI_TXDATA_REG 0x04 /* transmit data register */ +#define SPI_CTL_REG 0x08 /* control register */ +#define SPI_INTCTL_REG 0x0c /* interrupt control register */ +#define SPI_INT_STA_REG 0x10 /* interrupt status register */ +#define SPI_DMACTL_REG 0x14 /* DMA control register */ +#define SPI_WAIT_REG 0x18 /* wait clock counter register */ +#define SPI_CCTL_REG 0x1c /* clock rate control register */ +#define SPI_BC_REG 0x20 /* burst control register */ +#define SPI_TC_REG 0x24 /* transmit counter register */ +#define SPI_FIFO_STA_REG 0x28 /* FIFO status register */ + +/* Data register */ +#define SPI_DATA_RESET 0 + +/* Control register */ +#define SPI_CTL_SDC (1 << 19) +#define SPI_CTL_TP_EN (1 << 18) +#define SPI_CTL_SS_LEVEL (1 << 17) +#define SPI_CTL_SS_CTRL (1 << 16) +#define SPI_CTL_DHB (1 << 15) +#define SPI_CTL_DDB (1 << 14) +#define SPI_CTL_SS (3 << 12) +#define SPI_CTL_SS_SHIFT 12 +#define SPI_CTL_RPSM (1 << 11) +#define SPI_CTL_XCH (1 << 10) +#define SPI_CTL_RF_RST (1 << 9) +#define SPI_CTL_TF_RST (1 << 8) +#define SPI_CTL_SSCTL (1 << 7) +#define SPI_CTL_LMTF (1 << 6) +#define SPI_CTL_DMAMC (1 << 5) +#define SPI_CTL_SSPOL (1 << 4) +#define SPI_CTL_POL (1 << 3) +#define SPI_CTL_PHA (1 << 2) +#define SPI_CTL_MODE (1 << 1) +#define SPI_CTL_EN (1 << 0) +#define SPI_CTL_MASK 0xFFFFFu +#define SPI_CTL_RESET 0x0002001Cu + +/* Interrupt control register */ +#define SPI_INTCTL_SS_INT_EN (1 << 17) +#define SPI_INTCTL_TX_INT_EN (1 << 16) +#define SPI_INTCTL_TF_UR_INT_EN (1 << 14) +#define SPI_INTCTL_TF_OF_INT_EN (1 << 13) +#define SPI_INTCTL_TF_E34_INT_EN (1 << 12) +#define SPI_INTCTL_TF_E14_INT_EN (1 << 11) +#define SPI_INTCTL_TF_FL_INT_EN (1 << 10) +#define SPI_INTCTL_TF_HALF_EMP_INT_EN (1 << 9) +#define SPI_INTCTL_TF_EMP_INT_EN (1 << 8) +#define SPI_INTCTL_RF_UR_INT_EN (1 << 6) +#define SPI_INTCTL_RF_OF_INT_EN (1 << 5) +#define SPI_INTCTL_RF_E34_INT_EN (1 << 4) +#define SPI_INTCTL_RF_E14_INT_EN (1 << 3) +#define SPI_INTCTL_RF_FU_INT_EN (1 << 2) +#define SPI_INTCTL_RF_HALF_FU_INT_EN (1 << 1) +#define SPI_INTCTL_RF_RDY_INT_EN (1 << 0) +#define SPI_INTCTL_MASK 0x37F7Fu +#define SPI_INTCTL_RESET 0 + +/* Interrupt status register */ +#define SPI_INT_STA_INT_CBF (1 << 31) +#define SPI_INT_STA_SSI (1 << 17) +#define SPI_INT_STA_TC (1 << 16) +#define SPI_INT_STA_TU (1 << 14) +#define SPI_INT_STA_TO (1 << 13) +#define SPI_INT_STA_TE34 (1 << 12) +#define SPI_INT_STA_TE14 (1 << 11) +#define SPI_INT_STA_TF (1 << 10) +#define SPI_INT_STA_THE (1 << 9) +#define SPI_INT_STA_TE (1 << 8) +#define SPI_INT_STA_RU (1 << 6) +#define SPI_INT_STA_RO (1 << 5) +#define SPI_INT_STA_RF34 (1 << 4) +#define SPI_INT_STA_RF14 (1 << 3) +#define SPI_INT_STA_RF (1 << 2) +#define SPI_INT_STA_RHF (1 << 1) +#define SPI_INT_STA_RR (1 << 0) +#define SPI_INT_STA_MASK 0x80037F7Fu +#define SPI_INT_STA_RESET 0x00001B00u + +/* DMA control register - not implemented */ +#define SPI_DMACTL_RESET 0 + +/* Wait clock register */ +#define SPI_WAIT_REG_WCC_MASK 0xFFFFu +#define SPI_WAIT_RESET 0 + +/* Clock control register - not implemented */ +#define SPI_CCTL_RESET 2 + +/* Burst count register */ +#define SPI_BC_BC_MASK 0xFFFFFFu +#define SPI_BC_RESET 0 + +/* Transmi counter register */ +#define SPI_TC_WTC_MASK 0xFFFFFFu +#define SPI_TC_RESET 0 + +/* FIFO status register */ +#define SPI_FIFO_STA_CNT_MASK 0x7F +#define SPI_FIFO_STA_TF_CNT_SHIFT 16 +#define SPI_FIFO_STA_RF_CNT_SHIFT 0 +#define SPI_FIFO_STA_RESET 0 + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + + +static const char *allwinner_a10_spi_get_regname(unsigned offset) +{ + switch (offset) { + case SPI_RXDATA_REG: + return "RXDATA"; + case SPI_TXDATA_REG: + return "TXDATA"; + case SPI_CTL_REG: + return "CTL"; + case SPI_INTCTL_REG: + return "INTCTL"; + case SPI_INT_STA_REG: + return "INT_STA"; + case SPI_DMACTL_REG: + return "DMACTL"; + case SPI_WAIT_REG: + return "WAIT"; + case SPI_CCTL_REG: + return "CCTL"; + case SPI_BC_REG: + return "BC"; + case SPI_TC_REG: + return "TC"; + case SPI_FIFO_STA_REG: + return "FIFO_STA"; + default: + return "[?]"; + } +} + +static bool allwinner_a10_spi_is_enabled(AWA10SPIState *s) +{ + return s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_EN; +} + +static void allwinner_a10_spi_txfifo_reset(AWA10SPIState *s) +{ + fifo8_reset(&s->tx_fifo); + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= (SPI_INT_STA_TE | SPI_INT_STA_TE14 | + SPI_INT_STA_THE | SPI_INT_STA_TE34); + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~(SPI_INT_STA_TU | SPI_INT_STA_TO); +} + +static void allwinner_a10_spi_rxfifo_reset(AWA10SPIState *s) +{ + fifo8_reset(&s->rx_fifo); + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= + ~(SPI_INT_STA_RU | SPI_INT_STA_RO | SPI_INT_STA_RF | SPI_INT_STA_RR | + SPI_INT_STA_RHF | SPI_INT_STA_RF14 | SPI_INT_STA_RF34); +} + +static uint8_t allwinner_a10_spi_selected_channel(AWA10SPIState *s) +{ + return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SS) >> SPI_CTL_SS_SHIFT; +} + +static void allwinner_a10_spi_reset_hold(Object *obj, ResetType type) +{ + AWA10SPIState *s = AW_A10_SPI(obj); + + s->regs[REG_INDEX(SPI_RXDATA_REG)] = SPI_DATA_RESET; + s->regs[REG_INDEX(SPI_TXDATA_REG)] = SPI_DATA_RESET; + s->regs[REG_INDEX(SPI_CTL_REG)] = SPI_CTL_RESET; + s->regs[REG_INDEX(SPI_INTCTL_REG)] = SPI_INTCTL_RESET; + s->regs[REG_INDEX(SPI_INT_STA_REG)] = SPI_INT_STA_RESET; + s->regs[REG_INDEX(SPI_DMACTL_REG)] = SPI_DMACTL_RESET; + s->regs[REG_INDEX(SPI_WAIT_REG)] = SPI_WAIT_RESET; + s->regs[REG_INDEX(SPI_CCTL_REG)] = SPI_CCTL_RESET; + s->regs[REG_INDEX(SPI_BC_REG)] = SPI_BC_RESET; + s->regs[REG_INDEX(SPI_TC_REG)] = SPI_TC_RESET; + s->regs[REG_INDEX(SPI_FIFO_STA_REG)] = SPI_FIFO_STA_RESET; + + allwinner_a10_spi_txfifo_reset(s); + allwinner_a10_spi_rxfifo_reset(s); +} + +static void allwinner_a10_spi_update_irq(AWA10SPIState *s) +{ + bool level; + + if (fifo8_is_empty(&s->rx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RR; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RR; + } + + if (fifo8_num_used(&s->rx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 2)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF14; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF14; + } + + if (fifo8_num_used(&s->rx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 1)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RHF; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RHF; + } + + if (fifo8_num_free(&s->rx_fifo) <= (AW_A10_SPI_FIFO_SIZE >> 2)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF34; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF34; + } + + if (fifo8_is_full(&s->rx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_RF; + } + + if (fifo8_is_empty(&s->tx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE; + } + + if (fifo8_num_free(&s->tx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 2)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE14; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE14; + } + + if (fifo8_num_free(&s->tx_fifo) >= (AW_A10_SPI_FIFO_SIZE >> 1)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_THE; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_THE; + } + + if (fifo8_num_used(&s->tx_fifo) <= (AW_A10_SPI_FIFO_SIZE >> 2)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TE34; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TE34; + } + + if (fifo8_is_full(&s->rx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TF; + } else { + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~SPI_INT_STA_TF; + } + + level = (s->regs[REG_INDEX(SPI_INT_STA_REG)] & + s->regs[REG_INDEX(SPI_INTCTL_REG)]) != 0; + + qemu_set_irq(s->irq, level); + + trace_allwinner_a10_spi_update_irq(level); +} + +static void allwinner_a10_spi_flush_txfifo(AWA10SPIState *s) +{ + uint32_t burst_count = s->regs[REG_INDEX(SPI_BC_REG)]; + uint32_t tx_burst = s->regs[REG_INDEX(SPI_TC_REG)]; + trace_allwinner_a10_spi_burst_length(tx_burst); + + trace_allwinner_a10_spi_flush_txfifo_begin(fifo8_num_used(&s->tx_fifo), + fifo8_num_used(&s->rx_fifo)); + + while (!fifo8_is_empty(&s->tx_fifo)) { + uint8_t tx = fifo8_pop(&s->tx_fifo); + uint8_t rx = 0; + bool fill_rx = true; + + trace_allwinner_a10_spi_tx(tx); + + /* Write one byte at a time */ + rx = ssi_transfer(s->bus, tx); + + trace_allwinner_a10_spi_rx(rx); + + /* Check DHB here to determine if RX bytes should be stored */ + if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_DHB) { + /* Store rx bytes only after WTC transfers */ + if (tx_burst > 0u) { + fill_rx = false; + tx_burst--; + } + } + + if (fill_rx) { + if (fifo8_is_full(&s->rx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_RF; + } else { + fifo8_push(&s->rx_fifo, rx); + } + } + + allwinner_a10_spi_update_irq(s); + + burst_count--; + + if (burst_count == 0) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TC; + s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_XCH; + break; + } + } + + if (fifo8_is_empty(&s->tx_fifo)) { + s->regs[REG_INDEX(SPI_INT_STA_REG)] |= SPI_INT_STA_TC; + s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_XCH; + } + + trace_allwinner_a10_spi_flush_txfifo_end(fifo8_num_used(&s->tx_fifo), + fifo8_num_used(&s->rx_fifo)); +} + +static uint64_t allwinner_a10_spi_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t value = 0; + AWA10SPIState *s = opaque; + uint32_t index = offset >> 2; + + if (offset > SPI_FIFO_STA_REG) { + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: Bad register at offset 0x%" HWADDR_PRIx "\n", + TYPE_AW_A10_SPI, __func__, offset); + return 0; + } + + value = s->regs[index]; + + if (allwinner_a10_spi_is_enabled(s)) { + switch (offset) { + case SPI_RXDATA_REG: + if (fifo8_is_empty(&s->rx_fifo)) { + /* value is undefined */ + value = 0xdeadbeef; + } else { + /* read from the RX FIFO */ + value = fifo8_pop(&s->rx_fifo); + } + break; + case SPI_TXDATA_REG: + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: Trying to read from TX FIFO\n", + TYPE_AW_A10_SPI, __func__); + + /* Reading from TXDATA gives 0 */ + break; + case SPI_FIFO_STA_REG: + /* Read current tx/rx fifo data count */ + value = fifo8_num_used(&s->tx_fifo) << SPI_FIFO_STA_TF_CNT_SHIFT | + fifo8_num_used(&s->rx_fifo) << SPI_FIFO_STA_RF_CNT_SHIFT; + break; + case SPI_CTL_REG: + case SPI_INTCTL_REG: + case SPI_INT_STA_REG: + case SPI_DMACTL_REG: + case SPI_WAIT_REG: + case SPI_CCTL_REG: + case SPI_BC_REG: + case SPI_TC_REG: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad offset 0x%x\n", __func__, + (uint32_t)offset); + break; + } + + allwinner_a10_spi_update_irq(s); + } + trace_allwinner_a10_spi_read(allwinner_a10_spi_get_regname(offset), value); + + return value; +} + +static bool allwinner_a10_spi_update_cs_level(AWA10SPIState *s, int cs_line_nr) +{ + if (cs_line_nr == allwinner_a10_spi_selected_channel(s)) { + return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SS_LEVEL) != 0; + } else { + return (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_SSPOL) != 0; + } +} + +static void allwinner_a10_spi_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + AWA10SPIState *s = opaque; + uint32_t index = offset >> 2; + int i = 0; + + if (offset > SPI_FIFO_STA_REG) { + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: Bad register at offset 0x%" HWADDR_PRIx "\n", + TYPE_AW_A10_SPI, __func__, offset); + return; + } + + trace_allwinner_a10_spi_write(allwinner_a10_spi_get_regname(offset), + (uint32_t)value); + + if (!allwinner_a10_spi_is_enabled(s)) { + /* Block is disabled */ + if (offset != SPI_CTL_REG) { + /* Ignore access */ + return; + } + } + + switch (offset) { + case SPI_RXDATA_REG: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to write to RX FIFO\n", + TYPE_AW_A10_SPI, __func__); + break; + case SPI_TXDATA_REG: + if (fifo8_is_full(&s->tx_fifo)) { + /* Ignore writes if queue is full */ + break; + } + + fifo8_push(&s->tx_fifo, (uint8_t)value); + + break; + case SPI_INT_STA_REG: + /* Handle W1C bits - everything except SPI_INT_STA_INT_CBF. */ + value &= ~SPI_INT_STA_INT_CBF; + s->regs[REG_INDEX(SPI_INT_STA_REG)] &= ~(value & SPI_INT_STA_MASK); + break; + case SPI_CTL_REG: + s->regs[REG_INDEX(SPI_CTL_REG)] = value; + + for (i = 0; i < AW_A10_SPI_CS_LINES_NR; i++) { + qemu_set_irq( + s->cs_lines[i], + allwinner_a10_spi_update_cs_level(s, i)); + } + + if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_XCH) { + /* Request to start emitting */ + allwinner_a10_spi_flush_txfifo(s); + } + if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_TF_RST) { + allwinner_a10_spi_txfifo_reset(s); + s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_TF_RST; + } + if (s->regs[REG_INDEX(SPI_CTL_REG)] & SPI_CTL_RF_RST) { + allwinner_a10_spi_rxfifo_reset(s); + s->regs[REG_INDEX(SPI_CTL_REG)] &= ~SPI_CTL_RF_RST; + } + break; + case SPI_INTCTL_REG: + case SPI_DMACTL_REG: + case SPI_WAIT_REG: + case SPI_CCTL_REG: + case SPI_BC_REG: + case SPI_TC_REG: + case SPI_FIFO_STA_REG: + s->regs[index] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad offset 0x%x\n", __func__, + (uint32_t)offset); + break; + } + + allwinner_a10_spi_update_irq(s); +} + +static const MemoryRegionOps allwinner_a10_spi_ops = { + .read = allwinner_a10_spi_read, + .write = allwinner_a10_spi_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription allwinner_a10_spi_vmstate = { + .name = TYPE_AW_A10_SPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_FIFO8(tx_fifo, AWA10SPIState), + VMSTATE_FIFO8(rx_fifo, AWA10SPIState), + VMSTATE_UINT32_ARRAY(regs, AWA10SPIState, AW_A10_SPI_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_a10_spi_realize(DeviceState *dev, Error **errp) +{ + AWA10SPIState *s = AW_A10_SPI(dev); + int i = 0; + + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_a10_spi_ops, s, + TYPE_AW_A10_SPI, AW_A10_SPI_IOSIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + s->bus = ssi_create_bus(dev, "spi"); + for (i = 0; i < AW_A10_SPI_CS_LINES_NR; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); + } + fifo8_create(&s->tx_fifo, AW_A10_SPI_FIFO_SIZE); + fifo8_create(&s->rx_fifo, AW_A10_SPI_FIFO_SIZE); +} + +static void allwinner_a10_spi_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = allwinner_a10_spi_reset_hold; + dc->vmsd = &allwinner_a10_spi_vmstate; + dc->realize = allwinner_a10_spi_realize; + dc->desc = "Allwinner A10 SPI Controller"; +} + +static const TypeInfo allwinner_a10_spi_type_info = { + .name = TYPE_AW_A10_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AWA10SPIState), + .class_init = allwinner_a10_spi_class_init, +}; + +static void allwinner_a10_spi_register_types(void) +{ + type_register_static(&allwinner_a10_spi_type_info); +} + +type_init(allwinner_a10_spi_register_types) diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index f39fb85a35e..e33496f5029 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -359,7 +359,7 @@ static const MemoryRegionOps aspeed_smc_flash_default_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 4, + .max_access_size = 8, }, }; @@ -417,7 +417,7 @@ static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) AspeedSMCState *s = fl->controller; trace_aspeed_smc_flash_select(fl->cs, unselect ? "un" : ""); - + s->unselect = unselect; qemu_set_irq(s->cs_lines[fl->cs], unselect); } @@ -670,29 +670,42 @@ static const MemoryRegionOps aspeed_smc_flash_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 4, + .max_access_size = 8, }, }; static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) { AspeedSMCState *s = fl->controller; - bool unselect; + bool unselect = false; + uint32_t old_mode; + uint32_t new_mode; - /* User mode selects the CS, other modes unselect */ - unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; + old_mode = s->regs[s->r_ctrl0 + fl->cs] & CTRL_CMD_MODE_MASK; + new_mode = value & CTRL_CMD_MODE_MASK; - /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ - if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) && - value & CTRL_CE_STOP_ACTIVE) { - unselect = true; + if (old_mode == CTRL_USERMODE) { + if (new_mode != CTRL_USERMODE) { + unselect = true; + } + + /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ + if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) && + value & CTRL_CE_STOP_ACTIVE) { + unselect = true; + } + } else { + if (new_mode != CTRL_USERMODE) { + unselect = true; + } } s->regs[s->r_ctrl0 + fl->cs] = value; - s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; - - aspeed_smc_flash_do_select(fl, unselect); + if (unselect != s->unselect) { + s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; + aspeed_smc_flash_do_select(fl, unselect); + } } static void aspeed_smc_reset(DeviceState *d) @@ -729,6 +742,8 @@ static void aspeed_smc_reset(DeviceState *d) qemu_set_irq(s->cs_lines[i], true); } + s->unselect = true; + /* setup the default segment register values and regions for all */ for (i = 0; i < asc->cs_num_max; ++i) { aspeed_smc_flash_set_segment_region(s, i, @@ -1261,30 +1276,30 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) static const VMStateDescription vmstate_aspeed_smc = { .name = "aspeed.smc", - .version_id = 2, + .version_id = 3, .minimum_version_id = 2, .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), VMSTATE_UINT8(snoop_index, AspeedSMCState), VMSTATE_UINT8(snoop_dummies, AspeedSMCState), + VMSTATE_BOOL_V(unselect, AspeedSMCState, 3), VMSTATE_END_OF_LIST() } }; -static Property aspeed_smc_properties[] = { +static const Property aspeed_smc_properties[] = { DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), DEFINE_PROP_UINT64("dram-base", AspeedSMCState, dram_base, 0), DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_smc_class_init(ObjectClass *klass, void *data) +static void aspeed_smc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_smc_realize; - dc->reset = aspeed_smc_reset; + device_class_set_legacy_reset(dc, aspeed_smc_reset); device_class_set_props(dc, aspeed_smc_properties); dc->vmsd = &vmstate_aspeed_smc; } @@ -1320,14 +1335,13 @@ static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); } -static Property aspeed_smc_flash_properties[] = { +static const Property aspeed_smc_flash_properties[] = { DEFINE_PROP_UINT8("cs", AspeedSMCFlash, cs, 0), DEFINE_PROP_LINK("controller", AspeedSMCFlash, controller, TYPE_ASPEED_SMC, AspeedSMCState *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_smc_flash_class_init(ObjectClass *klass, void *data) +static void aspeed_smc_flash_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -1369,7 +1383,7 @@ static const AspeedSegments aspeed_2400_smc_segments[] = { { 0x10000000, 32 * MiB }, }; -static void aspeed_2400_smc_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_smc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1415,7 +1429,7 @@ static const AspeedSegments aspeed_2400_fmc_segments[] = { { 0x2A000000, 32 * MiB } }; -static void aspeed_2400_fmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_fmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1459,7 +1473,7 @@ static int aspeed_2400_spi1_addr_width(const AspeedSMCState *s) return s->regs[R_SPI_CTRL0] & CTRL_AST2400_SPI_4BYTE ? 4 : 3; } -static void aspeed_2400_spi1_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_spi1_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1501,7 +1515,7 @@ static const AspeedSegments aspeed_2500_fmc_segments[] = { { 0x2A000000, 32 * MiB }, }; -static void aspeed_2500_fmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_fmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1541,7 +1555,7 @@ static const AspeedSegments aspeed_2500_spi1_segments[] = { { 0x32000000, 96 * MiB }, /* end address is readonly */ }; -static void aspeed_2500_spi1_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_spi1_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1577,7 +1591,7 @@ static const AspeedSegments aspeed_2500_spi2_segments[] = { { 0x3A000000, 96 * MiB }, /* end address is readonly */ }; -static void aspeed_2500_spi2_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_spi2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1660,7 +1674,7 @@ static const AspeedSegments aspeed_2600_fmc_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2600_fmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_fmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1701,7 +1715,7 @@ static const AspeedSegments aspeed_2600_spi1_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2600_spi1_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_spi1_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1742,7 +1756,7 @@ static const AspeedSegments aspeed_2600_spi2_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2600_spi2_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_spi2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1825,7 +1839,7 @@ static const AspeedSegments aspeed_1030_fmc_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_1030_fmc_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_fmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1843,7 +1857,8 @@ static void aspeed_1030_fmc_class_init(ObjectClass *klass, void *data) asc->resets = aspeed_1030_fmc_resets; asc->flash_window_base = 0x80000000; asc->flash_window_size = 0x10000000; - asc->features = ASPEED_SMC_FEATURE_DMA; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_WDT_CONTROL; asc->dma_flash_mask = 0x0FFFFFFC; asc->dma_dram_mask = 0x000BFFFC; asc->dma_start_length = 1; @@ -1865,7 +1880,7 @@ static const AspeedSegments aspeed_1030_spi1_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_1030_spi1_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_spi1_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -1903,7 +1918,7 @@ static const AspeedSegments aspeed_1030_spi2_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_1030_spi2_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_spi2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -2008,7 +2023,7 @@ static const AspeedSegments aspeed_2700_fmc_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2700_fmc_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_fmc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -2050,7 +2065,7 @@ static const AspeedSegments aspeed_2700_spi0_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2700_spi0_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_spi0_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -2090,7 +2105,7 @@ static const AspeedSegments aspeed_2700_spi1_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2700_spi1_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_spi1_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); @@ -2130,7 +2145,7 @@ static const AspeedSegments aspeed_2700_spi2_segments[] = { { 0x0, 0 }, /* disabled */ }; -static void aspeed_2700_spi2_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_spi2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); diff --git a/hw/ssi/bcm2835_spi.c b/hw/ssi/bcm2835_spi.c index 6ecb42d4e3b..bf8ba35e04c 100644 --- a/hw/ssi/bcm2835_spi.c +++ b/hw/ssi/bcm2835_spi.c @@ -264,11 +264,11 @@ static const VMStateDescription vmstate_bcm2835_spi = { } }; -static void bcm2835_spi_class_init(ObjectClass *klass, void *data) +static void bcm2835_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = bcm2835_spi_reset; + device_class_set_legacy_reset(dc, bcm2835_spi_reset); dc->realize = bcm2835_spi_realize; dc->vmsd = &vmstate_bcm2835_spi; } diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c index 863b5fd60e9..f05be687483 100644 --- a/hw/ssi/ibex_spi_host.c +++ b/hw/ssi/ibex_spi_host.c @@ -154,7 +154,6 @@ static void ibex_spi_host_reset(DeviceState *dev) ibex_spi_txfifo_reset(s); s->init_status = true; - return; } /* @@ -561,9 +560,8 @@ static const MemoryRegionOps ibex_spi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static Property ibex_spi_properties[] = { +static const Property ibex_spi_properties[] = { DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_ibex = { @@ -624,11 +622,11 @@ static void ibex_spi_host_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } -static void ibex_spi_host_class_init(ObjectClass *klass, void *data) +static void ibex_spi_host_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = ibex_spi_host_realize; - dc->reset = ibex_spi_host_reset; + device_class_set_legacy_reset(dc, ibex_spi_host_reset); dc->vmsd = &vmstate_ibex; device_class_set_props(dc, ibex_spi_properties); } diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c index 12d897d306f..1312f588d2e 100644 --- a/hw/ssi/imx_spi.c +++ b/hw/ssi/imx_spi.c @@ -475,13 +475,13 @@ static void imx_spi_realize(DeviceState *dev, Error **errp) fifo32_create(&s->rx_fifo, ECSPI_FIFO_SIZE); } -static void imx_spi_class_init(ObjectClass *klass, void *data) +static void imx_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_spi_realize; dc->vmsd = &vmstate_imx_spi; - dc->reset = imx_spi_reset; + device_class_set_legacy_reset(dc, imx_spi_reset); dc->desc = "i.MX SPI Controller"; } diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build index b7ad7fca3b3..6afb1ea2001 100644 --- a/hw/ssi/meson.build +++ b/hw/ssi/meson.build @@ -1,3 +1,4 @@ +system_ss.add(when: 'CONFIG_ALLWINNER_A10_SPI', if_true: files('allwinner-a10-spi.c')) system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_smc.c')) system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-spi.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_fiu.c', 'npcm_pspi.c')) @@ -9,7 +10,6 @@ system_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c')) system_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) -system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) system_ss.add(when: 'CONFIG_BCM2835_SPI', if_true: files('bcm2835_spi.c')) system_ss.add(when: 'CONFIG_PNV_SPI', if_true: files('pnv_spi.c')) diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c index 1d25ba23aa5..fd7ba7eeb04 100644 --- a/hw/ssi/mss-spi.c +++ b/hw/ssi/mss-spi.c @@ -398,12 +398,12 @@ static const VMStateDescription vmstate_mss_spi = { } }; -static void mss_spi_class_init(ObjectClass *klass, void *data) +static void mss_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mss_spi_realize; - dc->reset = mss_spi_reset; + device_class_set_legacy_reset(dc, mss_spi_reset); dc->vmsd = &vmstate_mss_spi; } diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c index 119c38c4156..056ce13394f 100644 --- a/hw/ssi/npcm7xx_fiu.c +++ b/hw/ssi/npcm7xx_fiu.c @@ -29,7 +29,7 @@ #include "trace.h" /* Up to 128 MiB of flash may be accessed directly as memory. */ -#define NPCM7XX_FIU_FLASH_WINDOW_SIZE (128 * MiB) +#define NPCM7XX_FIU_MAX_FLASH_WINDOW_SIZE (128 * MiB) /* Each module has 4 KiB of register space. Only a fraction of it is used. */ #define NPCM7XX_FIU_CTRL_REGS_SIZE (4 * KiB) @@ -507,6 +507,17 @@ static void npcm7xx_fiu_realize(DeviceState *dev, Error **errp) return; } + if (s->flash_size == 0) { + error_setg(errp, "%s: flash size must be set", dev->canonical_path); + return; + } + + if (s->flash_size > NPCM7XX_FIU_MAX_FLASH_WINDOW_SIZE) { + error_setg(errp, "%s: flash size should not exceed 128 MiB", + dev->canonical_path); + return; + } + s->spi = ssi_create_bus(dev, "spi"); s->cs_lines = g_new0(qemu_irq, s->cs_count); qdev_init_gpio_out_named(DEVICE(s), s->cs_lines, "cs", s->cs_count); @@ -525,7 +536,7 @@ static void npcm7xx_fiu_realize(DeviceState *dev, Error **errp) flash->fiu = s; memory_region_init_io(&flash->direct_access, OBJECT(s), &npcm7xx_fiu_flash_ops, &s->flash[i], "flash", - NPCM7XX_FIU_FLASH_WINDOW_SIZE); + s->flash_size); sysbus_init_mmio(sbd, &flash->direct_access); } } @@ -541,12 +552,12 @@ static const VMStateDescription vmstate_npcm7xx_fiu = { }, }; -static Property npcm7xx_fiu_properties[] = { +static const Property npcm7xx_fiu_properties[] = { DEFINE_PROP_INT32("cs-count", NPCM7xxFIUState, cs_count, 0), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_SIZE("flash-size", NPCM7xxFIUState, flash_size, 0), }; -static void npcm7xx_fiu_class_init(ObjectClass *klass, void *data) +static void npcm7xx_fiu_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ssi/npcm_pspi.c b/hw/ssi/npcm_pspi.c index 41a53235303..a31dcc050e8 100644 --- a/hw/ssi/npcm_pspi.c +++ b/hw/ssi/npcm_pspi.c @@ -199,7 +199,7 @@ static const VMStateDescription vmstate_npcm_pspi = { }; -static void npcm_pspi_class_init(ObjectClass *klass, void *data) +static void npcm_pspi_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ssi/omap_spi.c b/hw/ssi/omap_spi.c deleted file mode 100644 index 8f85c3e3918..00000000000 --- a/hw/ssi/omap_spi.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - * TI OMAP processor's Multichannel SPI emulation. - * - * Copyright (C) 2007-2009 Nokia Corporation - * - * Original code for OMAP2 by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "hw/arm/omap.h" - -/* Multichannel SPI */ -struct omap_mcspi_s { - MemoryRegion iomem; - qemu_irq irq; - int chnum; - - uint32_t sysconfig; - uint32_t systest; - uint32_t irqst; - uint32_t irqen; - uint32_t wken; - uint32_t control; - - struct omap_mcspi_ch_s { - qemu_irq txdrq; - qemu_irq rxdrq; - uint32_t (*txrx)(void *opaque, uint32_t, int); - void *opaque; - - uint32_t tx; - uint32_t rx; - - uint32_t config; - uint32_t status; - uint32_t control; - } ch[4]; -}; - -static inline void omap_mcspi_interrupt_update(struct omap_mcspi_s *s) -{ - qemu_set_irq(s->irq, s->irqst & s->irqen); -} - -static inline void omap_mcspi_dmarequest_update(struct omap_mcspi_ch_s *ch) -{ - qemu_set_irq(ch->txdrq, - (ch->control & 1) && /* EN */ - (ch->config & (1 << 14)) && /* DMAW */ - (ch->status & (1 << 1)) && /* TXS */ - ((ch->config >> 12) & 3) != 1); /* TRM */ - qemu_set_irq(ch->rxdrq, - (ch->control & 1) && /* EN */ - (ch->config & (1 << 15)) && /* DMAW */ - (ch->status & (1 << 0)) && /* RXS */ - ((ch->config >> 12) & 3) != 2); /* TRM */ -} - -static void omap_mcspi_transfer_run(struct omap_mcspi_s *s, int chnum) -{ - struct omap_mcspi_ch_s *ch = s->ch + chnum; - - if (!(ch->control & 1)) /* EN */ - return; - if ((ch->status & (1 << 0)) && /* RXS */ - ((ch->config >> 12) & 3) != 2 && /* TRM */ - !(ch->config & (1 << 19))) /* TURBO */ - goto intr_update; - if ((ch->status & (1 << 1)) && /* TXS */ - ((ch->config >> 12) & 3) != 1) /* TRM */ - goto intr_update; - - if (!(s->control & 1) || /* SINGLE */ - (ch->config & (1 << 20))) { /* FORCE */ - if (ch->txrx) - ch->rx = ch->txrx(ch->opaque, ch->tx, /* WL */ - 1 + (0x1f & (ch->config >> 7))); - } - - ch->tx = 0; - ch->status |= 1 << 2; /* EOT */ - ch->status |= 1 << 1; /* TXS */ - if (((ch->config >> 12) & 3) != 2) /* TRM */ - ch->status |= 1 << 0; /* RXS */ - -intr_update: - if ((ch->status & (1 << 0)) && /* RXS */ - ((ch->config >> 12) & 3) != 2 && /* TRM */ - !(ch->config & (1 << 19))) /* TURBO */ - s->irqst |= 1 << (2 + 4 * chnum); /* RX_FULL */ - if ((ch->status & (1 << 1)) && /* TXS */ - ((ch->config >> 12) & 3) != 1) /* TRM */ - s->irqst |= 1 << (0 + 4 * chnum); /* TX_EMPTY */ - omap_mcspi_interrupt_update(s); - omap_mcspi_dmarequest_update(ch); -} - -void omap_mcspi_reset(struct omap_mcspi_s *s) -{ - int ch; - - s->sysconfig = 0; - s->systest = 0; - s->irqst = 0; - s->irqen = 0; - s->wken = 0; - s->control = 4; - - for (ch = 0; ch < 4; ch ++) { - s->ch[ch].config = 0x060000; - s->ch[ch].status = 2; /* TXS */ - s->ch[ch].control = 0; - - omap_mcspi_dmarequest_update(s->ch + ch); - } - - omap_mcspi_interrupt_update(s); -} - -static uint64_t omap_mcspi_read(void *opaque, hwaddr addr, unsigned size) -{ - struct omap_mcspi_s *s = opaque; - int ch = 0; - uint32_t ret; - - if (size != 4) { - return omap_badwidth_read32(opaque, addr); - } - - switch (addr) { - case 0x00: /* MCSPI_REVISION */ - return 0x91; - - case 0x10: /* MCSPI_SYSCONFIG */ - return s->sysconfig; - - case 0x14: /* MCSPI_SYSSTATUS */ - return 1; /* RESETDONE */ - - case 0x18: /* MCSPI_IRQSTATUS */ - return s->irqst; - - case 0x1c: /* MCSPI_IRQENABLE */ - return s->irqen; - - case 0x20: /* MCSPI_WAKEUPENABLE */ - return s->wken; - - case 0x24: /* MCSPI_SYST */ - return s->systest; - - case 0x28: /* MCSPI_MODULCTRL */ - return s->control; - - case 0x68: ch ++; - /* fall through */ - case 0x54: ch ++; - /* fall through */ - case 0x40: ch ++; - /* fall through */ - case 0x2c: /* MCSPI_CHCONF */ - return s->ch[ch].config; - - case 0x6c: ch ++; - /* fall through */ - case 0x58: ch ++; - /* fall through */ - case 0x44: ch ++; - /* fall through */ - case 0x30: /* MCSPI_CHSTAT */ - return s->ch[ch].status; - - case 0x70: ch ++; - /* fall through */ - case 0x5c: ch ++; - /* fall through */ - case 0x48: ch ++; - /* fall through */ - case 0x34: /* MCSPI_CHCTRL */ - return s->ch[ch].control; - - case 0x74: ch ++; - /* fall through */ - case 0x60: ch ++; - /* fall through */ - case 0x4c: ch ++; - /* fall through */ - case 0x38: /* MCSPI_TX */ - return s->ch[ch].tx; - - case 0x78: ch ++; - /* fall through */ - case 0x64: ch ++; - /* fall through */ - case 0x50: ch ++; - /* fall through */ - case 0x3c: /* MCSPI_RX */ - s->ch[ch].status &= ~(1 << 0); /* RXS */ - ret = s->ch[ch].rx; - omap_mcspi_transfer_run(s, ch); - return ret; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static void omap_mcspi_write(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - struct omap_mcspi_s *s = opaque; - int ch = 0; - - if (size != 4) { - omap_badwidth_write32(opaque, addr, value); - return; - } - - switch (addr) { - case 0x00: /* MCSPI_REVISION */ - case 0x14: /* MCSPI_SYSSTATUS */ - case 0x30: /* MCSPI_CHSTAT0 */ - case 0x3c: /* MCSPI_RX0 */ - case 0x44: /* MCSPI_CHSTAT1 */ - case 0x50: /* MCSPI_RX1 */ - case 0x58: /* MCSPI_CHSTAT2 */ - case 0x64: /* MCSPI_RX2 */ - case 0x6c: /* MCSPI_CHSTAT3 */ - case 0x78: /* MCSPI_RX3 */ - OMAP_RO_REG(addr); - return; - - case 0x10: /* MCSPI_SYSCONFIG */ - if (value & (1 << 1)) /* SOFTRESET */ - omap_mcspi_reset(s); - s->sysconfig = value & 0x31d; - break; - - case 0x18: /* MCSPI_IRQSTATUS */ - if (!((s->control & (1 << 3)) && (s->systest & (1 << 11)))) { - s->irqst &= ~value; - omap_mcspi_interrupt_update(s); - } - break; - - case 0x1c: /* MCSPI_IRQENABLE */ - s->irqen = value & 0x1777f; - omap_mcspi_interrupt_update(s); - break; - - case 0x20: /* MCSPI_WAKEUPENABLE */ - s->wken = value & 1; - break; - - case 0x24: /* MCSPI_SYST */ - if (s->control & (1 << 3)) /* SYSTEM_TEST */ - if (value & (1 << 11)) { /* SSB */ - s->irqst |= 0x1777f; - omap_mcspi_interrupt_update(s); - } - s->systest = value & 0xfff; - break; - - case 0x28: /* MCSPI_MODULCTRL */ - if (value & (1 << 3)) /* SYSTEM_TEST */ - if (s->systest & (1 << 11)) { /* SSB */ - s->irqst |= 0x1777f; - omap_mcspi_interrupt_update(s); - } - s->control = value & 0xf; - break; - - case 0x68: ch ++; - /* fall through */ - case 0x54: ch ++; - /* fall through */ - case 0x40: ch ++; - /* fall through */ - case 0x2c: /* MCSPI_CHCONF */ - if ((value ^ s->ch[ch].config) & (3 << 14)) /* DMAR | DMAW */ - omap_mcspi_dmarequest_update(s->ch + ch); - if (((value >> 12) & 3) == 3) { /* TRM */ - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid TRM value (3)\n", - __func__); - } - if (((value >> 7) & 0x1f) < 3) { /* WL */ - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid WL value (%" PRIx64 ")\n", - __func__, (value >> 7) & 0x1f); - } - s->ch[ch].config = value & 0x7fffff; - break; - - case 0x70: ch ++; - /* fall through */ - case 0x5c: ch ++; - /* fall through */ - case 0x48: ch ++; - /* fall through */ - case 0x34: /* MCSPI_CHCTRL */ - if (value & ~s->ch[ch].control & 1) { /* EN */ - s->ch[ch].control |= 1; - omap_mcspi_transfer_run(s, ch); - } else - s->ch[ch].control = value & 1; - break; - - case 0x74: ch ++; - /* fall through */ - case 0x60: ch ++; - /* fall through */ - case 0x4c: ch ++; - /* fall through */ - case 0x38: /* MCSPI_TX */ - s->ch[ch].tx = value; - s->ch[ch].status &= ~(1 << 1); /* TXS */ - omap_mcspi_transfer_run(s, ch); - break; - - default: - OMAP_BAD_REG(addr); - return; - } -} - -static const MemoryRegionOps omap_mcspi_ops = { - .read = omap_mcspi_read, - .write = omap_mcspi_write, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum, - qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk) -{ - struct omap_mcspi_s *s = g_new0(struct omap_mcspi_s, 1); - struct omap_mcspi_ch_s *ch = s->ch; - - s->irq = irq; - s->chnum = chnum; - while (chnum --) { - ch->txdrq = *drq ++; - ch->rxdrq = *drq ++; - ch ++; - } - omap_mcspi_reset(s); - - memory_region_init_io(&s->iomem, NULL, &omap_mcspi_ops, s, "omap.mcspi", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - return s; -} - -void omap_mcspi_attach(struct omap_mcspi_s *s, - uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque, - int chipselect) -{ - if (chipselect < 0 || chipselect >= s->chnum) - hw_error("%s: Bad chipselect %i\n", __func__, chipselect); - - s->ch[chipselect].txrx = txrx; - s->ch[chipselect].opaque = opaque; -} diff --git a/hw/ssi/pl022.c b/hw/ssi/pl022.c index b8be8ddf0ea..1dc0bcbcc04 100644 --- a/hw/ssi/pl022.c +++ b/hw/ssi/pl022.c @@ -292,11 +292,11 @@ static void pl022_realize(DeviceState *dev, Error **errp) s->ssi = ssi_create_bus(dev, "ssi"); } -static void pl022_class_init(ObjectClass *klass, void *data) +static void pl022_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = pl022_reset; + device_class_set_legacy_reset(dc, pl022_reset); dc->vmsd = &vmstate_pl022; dc->realize = pl022_realize; } diff --git a/hw/ssi/pnv_spi.c b/hw/ssi/pnv_spi.c index c1297ab7330..f40e8836b9c 100644 --- a/hw/ssi/pnv_spi.c +++ b/hw/ssi/pnv_spi.c @@ -19,6 +19,8 @@ #define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F) #define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0) +#define PNV_SPI_FIFO_SIZE 16 +#define RDR_MATCH_FAILURE_LIMIT 16 /* * Macro from include/hw/ppc/fdt.h @@ -35,48 +37,14 @@ } \ } while (0) -/* PnvXferBuffer */ -typedef struct PnvXferBuffer { - - uint32_t len; - uint8_t *data; - -} PnvXferBuffer; - -/* pnv_spi_xfer_buffer_methods */ -static PnvXferBuffer *pnv_spi_xfer_buffer_new(void) -{ - PnvXferBuffer *payload = g_malloc0(sizeof(*payload)); - - return payload; -} - -static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload) -{ - free(payload->data); - free(payload); -} - -static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload, - uint32_t offset, uint32_t length) -{ - if (payload->len < (offset + length)) { - payload->len = offset + length; - payload->data = g_realloc(payload->data, payload->len); - } - return &payload->data[offset]; -} - static bool does_rdr_match(PnvSpi *s) { /* * According to spec, the mask bits that are 0 are compared and the * bits that are 1 are ignored. */ - uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, - s->regs[SPI_MM_REG]); - uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, - s->regs[SPI_MM_REG]); + uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, s->regs[SPI_MM_REG]); + uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, s->regs[SPI_MM_REG]); if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) & GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) { @@ -107,8 +75,8 @@ static uint8_t get_from_offset(PnvSpi *s, uint8_t offset) return byte; } -static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes, - uint8_t ecc_count, uint8_t shift_in_count) +static uint8_t read_from_frame(PnvSpi *s, uint8_t nr_bytes, uint8_t ecc_count, + uint8_t shift_in_count) { uint8_t byte; int count = 0; @@ -118,20 +86,24 @@ static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes, if ((ecc_count != 0) && (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) { shift_in_count = 0; - } else { - byte = read_buf[count]; + } else if (!fifo8_is_empty(&s->rx_fifo)) { + byte = fifo8_pop(&s->rx_fifo); trace_pnv_spi_shift_rx(byte, count); s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty RX_FIFO\n"); } count++; } /* end of while */ return shift_in_count; } -static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload) +static void spi_response(PnvSpi *s) { uint8_t ecc_count; uint8_t shift_in_count; + uint32_t rx_len; + int i; /* * Processing here must handle: @@ -144,13 +116,14 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload) * First check that the response payload is the exact same * number of bytes as the request payload was */ - if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) { + rx_len = fifo8_num_used(&s->rx_fifo); + if (rx_len != (s->N1_bytes + s->N2_bytes)) { qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in " "bytes, expected %d, got %d\n", - (s->N1_bytes + s->N2_bytes), rsp_payload->len); + (s->N1_bytes + s->N2_bytes), rx_len); } else { uint8_t ecc_control; - trace_pnv_spi_rx_received(rsp_payload->len); + trace_pnv_spi_rx_received(rx_len); trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx, s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx); /* @@ -175,15 +148,23 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload) /* Handle the N1 portion of the frame first */ if (s->N1_rx != 0) { trace_pnv_spi_rx_read_N1frame(); - shift_in_count = read_from_frame(s, &rsp_payload->data[0], - s->N1_bytes, ecc_count, shift_in_count); + shift_in_count = read_from_frame(s, s->N1_bytes, ecc_count, shift_in_count); } /* Handle the N2 portion of the frame */ if (s->N2_rx != 0) { + /* pop out N1_bytes from rx_fifo if not already */ + if (s->N1_rx == 0) { + for (i = 0; i < s->N1_bytes; i++) { + if (!fifo8_is_empty(&s->rx_fifo)) { + fifo8_pop(&s->rx_fifo); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty" + " RX_FIFO\n"); + } + } + } trace_pnv_spi_rx_read_N2frame(); - shift_in_count = read_from_frame(s, - &rsp_payload->data[s->N1_bytes], s->N2_bytes, - ecc_count, shift_in_count); + shift_in_count = read_from_frame(s, s->N2_bytes, ecc_count, shift_in_count); } if ((s->N1_rx + s->N2_rx) > 0) { /* @@ -210,46 +191,41 @@ static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload) } /* end of else */ } /* end of spi_response() */ -static void transfer(PnvSpi *s, PnvXferBuffer *payload) +static void transfer(PnvSpi *s) { - uint32_t tx; - uint32_t rx; - PnvXferBuffer *rsp_payload = NULL; + uint32_t tx, rx, payload_len; + uint8_t rx_byte; - rsp_payload = pnv_spi_xfer_buffer_new(); - for (int offset = 0; offset < payload->len; offset += s->transfer_len) { + payload_len = fifo8_num_used(&s->tx_fifo); + for (int offset = 0; offset < payload_len; offset += s->transfer_len) { tx = 0; for (int i = 0; i < s->transfer_len; i++) { - if ((offset + i) >= payload->len) { + if ((offset + i) >= payload_len) { tx <<= 8; + } else if (!fifo8_is_empty(&s->tx_fifo)) { + tx = (tx << 8) | fifo8_pop(&s->tx_fifo); } else { - tx = (tx << 8) | payload->data[offset + i]; + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO underflow\n"); } } rx = ssi_transfer(s->ssi_bus, tx); for (int i = 0; i < s->transfer_len; i++) { - if ((offset + i) >= payload->len) { + if ((offset + i) >= payload_len) { + break; + } + rx_byte = (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF; + if (!fifo8_is_full(&s->rx_fifo)) { + fifo8_push(&s->rx_fifo, rx_byte); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RX_FIFO is full\n"); break; } - *(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) = - (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF; } } - if (rsp_payload != NULL) { - spi_response(s, s->N1_bits, rsp_payload); - } -} - -static inline uint8_t get_seq_index(PnvSpi *s) -{ - return GETFIELD(SPI_STS_SEQ_INDEX, s->status); -} - -static inline void next_sequencer_fsm(PnvSpi *s) -{ - uint8_t seq_index = get_seq_index(s); - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1)); - s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT); + spi_response(s); + /* Reset fifo for next frame */ + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); } /* @@ -308,13 +284,11 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode) * If Forced Implicit mode and count control doesn't * indicate transmit then reset the tx count to 0 */ - if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, - s->regs[SPI_CTR_CFG_REG]) == 0) { + if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 0) { s->N1_tx = 0; } /* If rx count control for N1 is set, load the rx value */ - if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, - s->regs[SPI_CTR_CFG_REG]) == 1) { + if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) { s->N1_rx = s->N1_bytes; } } @@ -326,8 +300,7 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode) * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM * error bit. */ - uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, - s->regs[SPI_CLK_CFG_REG]); + uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]); if (ecc_control == 0 || ecc_control == 2) { if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) { qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when " @@ -338,8 +311,7 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode) } } else if (s->N1_bytes > PNV_SPI_REG_SIZE) { qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, " - "bytes = 0x%x, bits = 0x%x\n", - s->N1_bytes, s->N1_bits); + "bytes = 0x%x, bits = 0x%x\n", s->N1_bytes, s->N1_bits); s->N1_bytes = PNV_SPI_REG_SIZE; s->N1_bits = s->N1_bytes * 8; } @@ -348,19 +320,10 @@ static void calculate_N1(PnvSpi *s, uint8_t opcode) /* * Shift_N1 operation handler method */ -static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, - PnvXferBuffer **payload, bool send_n1_alone) +static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, bool send_n1_alone) { uint8_t n1_count; bool stop = false; - - /* - * If there isn't a current payload left over from a stopped sequence - * create a new one. - */ - if (*payload == NULL) { - *payload = pnv_spi_xfer_buffer_new(); - } /* * Use a combination of N1 counters to build the N1 portion of the * transmit payload. @@ -411,9 +374,13 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, */ uint8_t n1_byte = 0x00; n1_byte = get_from_offset(s, n1_count); - trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count); - *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) = - n1_byte; + if (!fifo8_is_full(&s->tx_fifo)) { + trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count); + fifo8_push(&s->tx_fifo, n1_byte); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n"); + break; + } } else { /* * We hit a shift_n1 opcode TX but the TDR is empty, tell the @@ -434,16 +401,17 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, * - we are receiving and the RDR is empty so we allow the operation * to proceed. */ - if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, - s->status) == 1)) { + if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) { trace_pnv_spi_sequencer_stop_requested("shift N1" "set for receive but RDR is full"); stop = true; break; - } else { + } else if (!fifo8_is_full(&s->tx_fifo)) { trace_pnv_spi_tx_append_FF("n1_byte"); - *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) - = 0xff; + fifo8_push(&s->tx_fifo, 0xff); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n"); + break; } } n1_count++; @@ -484,15 +452,13 @@ static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, */ if (send_n1_alone && !stop) { /* We have a TX and a full TDR or an RX and an empty RDR */ - trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len); - transfer(s, *payload); + trace_pnv_spi_tx_request("Shifting N1 frame", fifo8_num_used(&s->tx_fifo)); + transfer(s); /* The N1 frame shift is complete so reset the N1 counters */ s->N2_bits = 0; s->N2_bytes = 0; s->N2_tx = 0; s->N2_rx = 0; - pnv_spi_xfer_buffer_free(*payload); - *payload = NULL; } return stop; } /* end of operation_shiftn1() */ @@ -550,13 +516,11 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode) * If Forced Implicit mode and count control doesn't * indicate a receive then reset the rx count to 0 */ - if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, - s->regs[SPI_CTR_CFG_REG]) == 0) { + if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 0) { s->N2_rx = 0; } /* If tx count control for N2 is set, load the tx value */ - if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, - s->regs[SPI_CTR_CFG_REG]) == 1) { + if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) { s->N2_tx = s->N2_bytes; } } @@ -569,8 +533,7 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode) * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM * error bit. */ - uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, - s->regs[SPI_CLK_CFG_REG]); + uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]); if (ecc_control == 0 || ecc_control == 2) { if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) { /* Unsupported N2 shift size when ECC enabled */ @@ -588,19 +551,10 @@ static void calculate_N2(PnvSpi *s, uint8_t opcode) * Shift_N2 operation handler method */ -static bool operation_shiftn2(PnvSpi *s, uint8_t opcode, - PnvXferBuffer **payload) +static bool operation_shiftn2(PnvSpi *s, uint8_t opcode) { uint8_t n2_count; bool stop = false; - - /* - * If there isn't a current payload left over from a stopped sequence - * create a new one. - */ - if (*payload == NULL) { - *payload = pnv_spi_xfer_buffer_new(); - } /* * Use a combination of N2 counters to build the N2 portion of the * transmit payload. @@ -627,44 +581,47 @@ static bool operation_shiftn2(PnvSpi *s, uint8_t opcode, * code continue will end up building the payload twice in the same * buffer since RDR full causes a sequence stop and restart. */ - if ((s->N2_rx != 0) && - (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) { + if ((s->N2_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) { trace_pnv_spi_sequencer_stop_requested("shift N2 set" "for receive but RDR is full"); stop = true; break; } - if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < - PNV_SPI_REG_SIZE)) { + if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < PNV_SPI_REG_SIZE)) { /* Always append data for the N2 segment if it is set for TX */ uint8_t n2_byte = 0x00; n2_byte = get_from_offset(s, (s->N1_tx + n2_count)); - trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count)); - *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) - = n2_byte; - } else { + if (!fifo8_is_full(&s->tx_fifo)) { + trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count)); + fifo8_push(&s->tx_fifo, n2_byte); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n"); + break; + } + } else if (!fifo8_is_full(&s->tx_fifo)) { /* * Regardless of whether or not N2 is set for TX or RX, we need * the number of bytes in the payload to match the overall length * of the operation. */ trace_pnv_spi_tx_append_FF("n2_byte"); - *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) - = 0xff; + fifo8_push(&s->tx_fifo, 0xff); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n"); + break; } n2_count++; } /* end of while */ if (!stop) { /* We have a TX and a full TDR or an RX and an empty RDR */ - trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len); - transfer(s, *payload); + trace_pnv_spi_tx_request("Shifting N2 frame", fifo8_num_used(&s->tx_fifo)); + transfer(s); /* * If we are doing an N2 TX and the TDR is full we need to clear the * TDR_full status. Do this here instead of up in the loop above so we * don't log the message in every loop iteration. */ - if ((s->N2_tx != 0) && - (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) { + if ((s->N2_tx != 0) && (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) { s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0); } /* @@ -680,8 +637,6 @@ static bool operation_shiftn2(PnvSpi *s, uint8_t opcode, s->N1_bytes = 0; s->N1_tx = 0; s->N1_rx = 0; - pnv_spi_xfer_buffer_free(*payload); - *payload = NULL; } return stop; } /* end of operation_shiftn2()*/ @@ -698,20 +653,8 @@ static void operation_sequencer(PnvSpi *s) bool stop = false; /* Flag to stop the sequencer */ uint8_t opcode = 0; uint8_t masked_opcode = 0; + uint8_t seq_index; - /* - * PnvXferBuffer for containing the payload of the SPI frame. - * This is a static because there are cases where a sequence has to stop - * and wait for the target application to unload the RDR. If this occurs - * during a sequence where N1 is not sent alone and instead combined with - * N2 since the N1 tx length + the N2 tx length is less than the size of - * the TDR. - */ - static PnvXferBuffer *payload; - - if (payload == NULL) { - payload = pnv_spi_xfer_buffer_new(); - } /* * Clear the sequencer FSM error bit - general_SPI_status[3] * before starting a sequence. @@ -724,12 +667,17 @@ static void operation_sequencer(PnvSpi *s) if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) { s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0); } + /* + * SPI_STS_SEQ_INDEX of status register is kept in seq_index variable and + * updated back to status register at the end of operation_sequencer(). + */ + seq_index = GETFIELD(SPI_STS_SEQ_INDEX, s->status); /* * There are only 8 possible operation IDs to iterate through though * some operations may cause more than one frame to be sequenced. */ - while (get_seq_index(s) < NUM_SEQ_OPS) { - opcode = s->seq_op[get_seq_index(s)]; + while (seq_index < NUM_SEQ_OPS) { + opcode = s->seq_op[seq_index]; /* Set sequencer state to decode */ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE); /* @@ -746,7 +694,7 @@ static void operation_sequencer(PnvSpi *s) case SEQ_OP_STOP: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); /* A stop operation in any position stops the sequencer */ - trace_pnv_spi_sequencer_op("STOP", get_seq_index(s)); + trace_pnv_spi_sequencer_op("STOP", seq_index); stop = true; s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE); @@ -757,7 +705,7 @@ static void operation_sequencer(PnvSpi *s) case SEQ_OP_SELECT_SLAVE: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s)); + trace_pnv_spi_sequencer_op("SELECT_SLAVE", seq_index); /* * This device currently only supports a single responder * connection at position 0. De-selecting a responder is fine @@ -768,15 +716,12 @@ static void operation_sequencer(PnvSpi *s) if (s->responder_select == 0) { trace_pnv_spi_shifter_done(); qemu_set_irq(s->cs_line[0], 1); - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, - (get_seq_index(s) + 1)); + seq_index++; s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE); } else if (s->responder_select != 1) { qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 " - "not supported, select = 0x%x\n", - s->responder_select); - trace_pnv_spi_sequencer_stop_requested("invalid " - "responder select"); + "not supported, select = 0x%x\n", s->responder_select); + trace_pnv_spi_sequencer_stop_requested("invalid responder select"); s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE); stop = true; } else { @@ -796,13 +741,15 @@ static void operation_sequencer(PnvSpi *s) * applies once a valid responder select has occurred. */ s->shift_n1_done = false; - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } break; case SEQ_OP_SHIFT_N1: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s)); + trace_pnv_spi_sequencer_op("SHIFT_N1", seq_index); /* * Only allow a shift_n1 when the state is not IDLE or DONE. * In either of those two cases the sequencer is not in a proper @@ -834,13 +781,13 @@ static void operation_sequencer(PnvSpi *s) * transmission to the responder without requiring a refill of * the TDR between the two operations. */ - if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1]) - == SEQ_OP_SHIFT_N2) { + if ((seq_index != 7) && + PNV_SPI_MASKED_OPCODE(s->seq_op[(seq_index + 1)]) == + SEQ_OP_SHIFT_N2) { send_n1_alone = false; } - s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, - FSM_SHIFT_N1); - stop = operation_shiftn1(s, opcode, &payload, send_n1_alone); + s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N1); + stop = operation_shiftn1(s, opcode, send_n1_alone); if (stop) { /* * The operation code says to stop, this can occur if: @@ -857,27 +804,27 @@ static void operation_sequencer(PnvSpi *s) s->shift_n1_done = true; s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2); - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, - (get_seq_index(s) + 1)); + seq_index++; } else { /* * This is case (1) or (2) so the sequencer needs to * wait and NOT go to the next sequence yet. */ - s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, - FSM_WAIT); + s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT); } } else { /* Ok to move on to the next index */ s->shift_n1_done = true; - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } } break; case SEQ_OP_SHIFT_N2: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s)); + trace_pnv_spi_sequencer_op("SHIFT_N2", seq_index); if (!s->shift_n1_done) { qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a " "Shift_N1 is not done, shifter state = 0x%llx", @@ -888,31 +835,30 @@ static void operation_sequencer(PnvSpi *s) * error bit 3 (general_SPI_status[3]) in status reg. */ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1); - trace_pnv_spi_sequencer_stop_requested("shift_n2 " - "w/no shift_n1 done"); + trace_pnv_spi_sequencer_stop_requested("shift_n2 w/no shift_n1 done"); stop = true; } else { /* Ok to do a Shift_N2 */ - s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, - FSM_SHIFT_N2); - stop = operation_shiftn2(s, opcode, &payload); + s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2); + stop = operation_shiftn2(s, opcode); /* * If the operation code says to stop set the shifter state to * wait and stop */ if (stop) { - s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, - FSM_WAIT); + s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT); } else { /* Ok to move on to the next index */ - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } } break; case SEQ_OP_BRANCH_IFNEQ_RDR: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s)); + trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", seq_index); /* * The memory mapping register RDR match value is compared against * the 16 rightmost bytes of the RDR (potentially with masking). @@ -927,16 +873,26 @@ static void operation_sequencer(PnvSpi *s) rdr_matched = does_rdr_match(s); if (rdr_matched) { trace_pnv_spi_RDR_match("success"); + s->fail_count = 0; /* A match occurred, increment the sequencer index. */ - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } else { trace_pnv_spi_RDR_match("failed"); + s->fail_count++; /* * Branch the sequencer to the index coded into the op * code. */ - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, - PNV_SPI_OPCODE_LO_NIBBLE(opcode)); + seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode); + } + if (s->fail_count >= RDR_MATCH_FAILURE_LIMIT) { + qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RDR match failure" + " limit crossed %d times hence requesting " + "sequencer to stop.\n", + RDR_MATCH_FAILURE_LIMIT); + stop = true; } /* * Regardless of where the branch ended up we want the @@ -955,12 +911,13 @@ static void operation_sequencer(PnvSpi *s) case SEQ_OP_TRANSFER_TDR: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n"); - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT); break; case SEQ_OP_BRANCH_IFNEQ_INC_1: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s)); + trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", seq_index); /* * The spec says the loop should execute count compare + 1 times. * However we learned from engineering that we really only loop @@ -974,18 +931,19 @@ static void operation_sequencer(PnvSpi *s) * mask off all but the first three bits so we don't try to * access beyond the sequencer_operation_reg boundary. */ - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, - PNV_SPI_OPCODE_LO_NIBBLE(opcode)); + seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode); s->loop_counter_1++; } else { /* Continue to next index if loop counter is reached */ - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } break; case SEQ_OP_BRANCH_IFNEQ_INC_2: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); - trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s)); + trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", seq_index); uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2, s->regs[SPI_CTR_CFG_REG]); /* @@ -1000,19 +958,21 @@ static void operation_sequencer(PnvSpi *s) * mask off all but the first three bits so we don't try to * access beyond the sequencer_operation_reg boundary. */ - s->status = SETFIELD(SPI_STS_SEQ_INDEX, - s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode)); + seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode); s->loop_counter_2++; } else { /* Continue to next index if loop counter is reached */ - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, + SEQ_STATE_INDEX_INCREMENT); } break; default: s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE); /* Ignore unsupported operations. */ - next_sequencer_fsm(s); + seq_index++; + s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT); break; } /* end of switch */ /* @@ -1020,10 +980,10 @@ static void operation_sequencer(PnvSpi *s) * we need to go ahead and end things as if there was a STOP at the * end. */ - if (get_seq_index(s) == NUM_SEQ_OPS) { + if (seq_index == NUM_SEQ_OPS) { /* All 8 opcodes completed, sequencer idling */ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE); - s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0); + seq_index = 0; s->loop_counter_1 = 0; s->loop_counter_2 = 0; s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE); @@ -1034,7 +994,8 @@ static void operation_sequencer(PnvSpi *s) break; } } /* end of while */ - return; + /* Update sequencer index field in status.*/ + s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, seq_index); } /* end of operation_sequencer() */ /* @@ -1180,7 +1141,6 @@ static void pnv_spi_xscom_write(void *opaque, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom " "write at 0x%" PRIx32 "\n", reg); } - return; } static const MemoryRegionOps pnv_spi_xscom_ops = { @@ -1193,21 +1153,24 @@ static const MemoryRegionOps pnv_spi_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static Property pnv_spi_properties[] = { +static const Property pnv_spi_properties[] = { DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0), + DEFINE_PROP_UINT32("chip-id", PnvSpi, chip_id, 0), DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4), - DEFINE_PROP_END_OF_LIST(), }; static void pnv_spi_realize(DeviceState *dev, Error **errp) { PnvSpi *s = PNV_SPI(dev); - g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d", - s->spic_num); + g_autofree char *name = g_strdup_printf("chip%d." TYPE_PNV_SPI_BUS ".%d", + s->chip_id, s->spic_num); s->ssi_bus = ssi_create_bus(dev, name); s->cs_line = g_new0(qemu_irq, 1); qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1); + fifo8_create(&s->tx_fifo, PNV_SPI_FIFO_SIZE); + fifo8_create(&s->rx_fifo, PNV_SPI_FIFO_SIZE); + /* spi scoms */ pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops, s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE); @@ -1236,7 +1199,7 @@ static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt, return 0; } -static void pnv_spi_class_init(ObjectClass *klass, void *data) +static void pnv_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass); @@ -1245,7 +1208,7 @@ static void pnv_spi_class_init(ObjectClass *klass, void *data) dc->desc = "PowerNV SPI"; dc->realize = pnv_spi_realize; - dc->reset = do_reset; + device_class_set_legacy_reset(dc, do_reset); device_class_set_props(dc, pnv_spi_properties); } @@ -1254,7 +1217,7 @@ static const TypeInfo pnv_spi_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PnvSpi), .class_init = pnv_spi_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } diff --git a/hw/ssi/sifive_spi.c b/hw/ssi/sifive_spi.c index 1b4a401ca18..3e01fef61cd 100644 --- a/hw/ssi/sifive_spi.c +++ b/hw/ssi/sifive_spi.c @@ -328,17 +328,16 @@ static void sifive_spi_realize(DeviceState *dev, Error **errp) fifo8_create(&s->rx_fifo, FIFO_CAPACITY); } -static Property sifive_spi_properties[] = { +static const Property sifive_spi_properties[] = { DEFINE_PROP_UINT32("num-cs", SiFiveSPIState, num_cs, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void sifive_spi_class_init(ObjectClass *klass, void *data) +static void sifive_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, sifive_spi_properties); - dc->reset = sifive_spi_reset; + device_class_set_legacy_reset(dc, sifive_spi_reset); dc->realize = sifive_spi_realize; } diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index 3f357e8f16a..d0de640fe64 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -55,7 +55,7 @@ static bool ssi_bus_check_address(BusState *b, DeviceState *dev, Error **errp) return true; } -static void ssi_bus_class_init(ObjectClass *klass, void *data) +static void ssi_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); @@ -108,12 +108,11 @@ static void ssi_peripheral_realize(DeviceState *dev, Error **errp) ssc->realize(s, errp); } -static Property ssi_peripheral_properties[] = { +static const Property ssi_peripheral_properties[] = { DEFINE_PROP_UINT8("cs", SSIPeripheral, cs_index, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ssi_peripheral_class_init(ObjectClass *klass, void *data) +static void ssi_peripheral_class_init(ObjectClass *klass, const void *data) { SSIPeripheralClass *ssc = SSI_PERIPHERAL_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c index a37139fe5ac..871d57324d1 100644 --- a/hw/ssi/stm32f2xx_spi.c +++ b/hw/ssi/stm32f2xx_spi.c @@ -202,11 +202,11 @@ static void stm32f2xx_spi_init(Object *obj) s->ssi = ssi_create_bus(dev, "ssi"); } -static void stm32f2xx_spi_class_init(ObjectClass *klass, void *data) +static void stm32f2xx_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f2xx_spi_reset; + device_class_set_legacy_reset(dc, stm32f2xx_spi_reset); dc->vmsd = &vmstate_stm32f2xx_spi; } diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events index 089d269994a..2f36cf96b8b 100644 --- a/hw/ssi/trace-events +++ b/hw/ssi/trace-events @@ -53,3 +53,13 @@ pnv_spi_rx_read_N2frame(void) "" pnv_spi_shift_rx(uint8_t byte, uint32_t index) "byte = 0x%2.2x into RDR from payload index %d" pnv_spi_sequencer_stop_requested(const char* reason) "due to %s" pnv_spi_RDR_match(const char* result) "%s" + +# allwinner_a10_spi.c +allwinner_a10_spi_update_irq(uint32_t level) "IRQ level is %d" +allwinner_a10_spi_flush_txfifo_begin(uint32_t tx, uint32_t rx) "Begin: TX Fifo Size = %d, RX Fifo Size = %d" +allwinner_a10_spi_flush_txfifo_end(uint32_t tx, uint32_t rx) "End: TX Fifo Size = %d, RX Fifo Size = %d" +allwinner_a10_spi_burst_length(uint32_t len) "Burst length = %d" +allwinner_a10_spi_tx(uint8_t byte) "write 0x%02x" +allwinner_a10_spi_rx(uint8_t byte) "read 0x%02x" +allwinner_a10_spi_read(const char* regname, uint32_t value) "reg[%s] => 0x%08x" +allwinner_a10_spi_write(const char* regname, uint32_t value) "reg[%s] <= 0x%08x" diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index 2e0687ac907..4144c8a6270 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -25,6 +25,7 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" @@ -32,6 +33,7 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/ssi/ssi.h" #include "qom/object.h" @@ -83,6 +85,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XilinxSPI, XILINX_SPI) struct XilinxSPI { SysBusDevice parent_obj; + EndianMode model_endianness; MemoryRegion mmio; qemu_irq irq; @@ -313,14 +316,17 @@ spi_write(void *opaque, hwaddr addr, xlx_spi_update_irq(s); } -static const MemoryRegionOps spi_ops = { - .read = spi_read, - .write = spi_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } +static const MemoryRegionOps spi_ops[2] = { + [0 ... 1] = { + .read = spi_read, + .write = spi_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, }; static void xilinx_spi_realize(DeviceState *dev, Error **errp) @@ -329,6 +335,12 @@ static void xilinx_spi_realize(DeviceState *dev, Error **errp) XilinxSPI *s = XILINX_SPI(dev); int i; + if (s->model_endianness == ENDIAN_MODE_UNSPECIFIED) { + error_setg(errp, TYPE_XILINX_SPI " property 'endianness'" + " must be set to 'big' or 'little'"); + return; + } + DB_PRINT("\n"); s->spi = ssi_create_bus(dev, "spi"); @@ -339,7 +351,8 @@ static void xilinx_spi_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->cs_lines[i]); } - memory_region_init_io(&s->mmio, OBJECT(s), &spi_ops, s, + memory_region_init_io(&s->mmio, OBJECT(s), + &spi_ops[s->model_endianness == ENDIAN_MODE_BIG], s, "xilinx-spi", R_MAX * 4); sysbus_init_mmio(sbd, &s->mmio); @@ -361,17 +374,17 @@ static const VMStateDescription vmstate_xilinx_spi = { } }; -static Property xilinx_spi_properties[] = { +static const Property xilinx_spi_properties[] = { + DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XilinxSPI, model_endianness), DEFINE_PROP_UINT8("num-ss-bits", XilinxSPI, num_cs, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_spi_class_init(ObjectClass *klass, void *data) +static void xilinx_spi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = xilinx_spi_realize; - dc->reset = xlx_spi_reset; + device_class_set_legacy_reset(dc, xlx_spi_reset); device_class_set_props(dc, xilinx_spi_properties); dc->vmsd = &vmstate_xilinx_spi; } diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 71952a410d8..a79f3b8e494 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -33,7 +33,7 @@ #include "hw/ssi/xilinx_spips.h" #include "qapi/error.h" #include "hw/register.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "migration/blocker.h" #include "migration/vmstate.h" @@ -620,7 +620,9 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) } else if (s->snoop_state == SNOOP_STRIPING || s->snoop_state == SNOOP_NONE) { for (i = 0; i < num_effective_busses(s); ++i) { - tx_rx[i] = fifo8_pop(&s->tx_fifo); + if (!fifo8_is_empty(&s->tx_fifo)) { + tx_rx[i] = fifo8_pop(&s->tx_fifo); + } } stripe8(tx_rx, num_effective_busses(s), false); } else if (s->snoop_state >= SNOOP_ADDR) { @@ -1418,19 +1420,17 @@ static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { } }; -static Property xilinx_zynqmp_qspips_properties[] = { +static const Property xilinx_zynqmp_qspips_properties[] = { DEFINE_PROP_UINT32("dma-burst-size", XlnxZynqMPQSPIPS, dma_burst_size, 64), - DEFINE_PROP_END_OF_LIST(), }; -static Property xilinx_spips_properties[] = { +static const Property xilinx_spips_properties[] = { DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1), DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4), DEFINE_PROP_UINT8("num-txrx-bytes", XilinxSPIPS, num_txrx_bytes, 1), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_qspips_class_init(ObjectClass *klass, void * data) +static void xilinx_qspips_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); @@ -1442,13 +1442,13 @@ static void xilinx_qspips_class_init(ObjectClass *klass, void * data) xsc->tx_fifo_size = TXFF_A_Q; } -static void xilinx_spips_class_init(ObjectClass *klass, void *data) +static void xilinx_spips_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); dc->realize = xilinx_spips_realize; - dc->reset = xilinx_spips_reset; + device_class_set_legacy_reset(dc, xilinx_spips_reset); device_class_set_props(dc, xilinx_spips_properties); dc->vmsd = &vmstate_xilinx_spips; @@ -1458,13 +1458,13 @@ static void xilinx_spips_class_init(ObjectClass *klass, void *data) xsc->tx_fifo_size = TXFF_A; } -static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data) +static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); dc->realize = xlnx_zynqmp_qspips_realize; - dc->reset = xlnx_zynqmp_qspips_reset; + device_class_set_legacy_reset(dc, xlnx_zynqmp_qspips_reset); dc->vmsd = &vmstate_xlnx_zynqmp_qspips; device_class_set_props(dc, xilinx_zynqmp_qspips_properties); xsc->reg_ops = &xlnx_zynqmp_qspips_ops; diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c index c479138ec1c..56d51ce0e30 100644 --- a/hw/ssi/xlnx-versal-ospi.c +++ b/hw/ssi/xlnx-versal-ospi.c @@ -1825,18 +1825,17 @@ static const VMStateDescription vmstate_xlnx_versal_ospi = { } }; -static Property xlnx_versal_ospi_properties[] = { +static const Property xlnx_versal_ospi_properties[] = { DEFINE_PROP_BOOL("dac-with-indac", XlnxVersalOspi, dac_with_indac, false), DEFINE_PROP_BOOL("indac-write-disabled", XlnxVersalOspi, ind_write_disabled, false), - DEFINE_PROP_END_OF_LIST(), }; -static void xlnx_versal_ospi_class_init(ObjectClass *klass, void *data) +static void xlnx_versal_ospi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xlnx_versal_ospi_reset; + device_class_set_legacy_reset(dc, xlnx_versal_ospi_reset); dc->realize = xlnx_versal_ospi_realize; dc->vmsd = &vmstate_xlnx_versal_ospi; device_class_set_props(dc, xlnx_versal_ospi_properties); diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig index 61fbb62b65c..b3d823ce2c3 100644 --- a/hw/timer/Kconfig +++ b/hw/timer/Kconfig @@ -12,6 +12,12 @@ config A9_GTIMER config HPET bool default y if PC + # The HPET has both a Rust and a C implementation + select HPET_C if !HAVE_RUST + select X_HPET_RUST if HAVE_RUST + +config HPET_C + bool config I8254 bool @@ -21,6 +27,9 @@ config ALLWINNER_A10_PIT bool select PTIMER +config PXA2XX_TIMER + bool + config SIFIVE_PWM bool diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c index 64d80cdf6a3..690140f5a6c 100644 --- a/hw/timer/a9gtimer.c +++ b/hw/timer/a9gtimer.c @@ -32,7 +32,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "hw/core/cpu.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #ifndef A9_GTIMER_ERR_DEBUG #define A9_GTIMER_ERR_DEBUG 0 @@ -373,18 +373,17 @@ static const VMStateDescription vmstate_a9_gtimer = { } }; -static Property a9_gtimer_properties[] = { +static const Property a9_gtimer_properties[] = { DEFINE_PROP_UINT32("num-cpu", A9GTimerState, num_cpu, 0), - DEFINE_PROP_END_OF_LIST() }; -static void a9_gtimer_class_init(ObjectClass *klass, void *data) +static void a9_gtimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = a9_gtimer_realize; dc->vmsd = &vmstate_a9_gtimer; - dc->reset = a9_gtimer_reset; + device_class_set_legacy_reset(dc, a9_gtimer_reset); device_class_set_props(dc, a9_gtimer_properties); } diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c index a524de13817..e4c353273a1 100644 --- a/hw/timer/allwinner-a10-pit.c +++ b/hw/timer/allwinner-a10-pit.c @@ -185,15 +185,14 @@ static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value, static const MemoryRegionOps a10_pit_ops = { .read = a10_pit_read, .write = a10_pit_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; -static Property a10_pit_properties[] = { +static const Property a10_pit_properties[] = { DEFINE_PROP_UINT32("clk0-freq", AwA10PITState, clk_freq[0], 0), DEFINE_PROP_UINT32("clk1-freq", AwA10PITState, clk_freq[1], 0), DEFINE_PROP_UINT32("clk2-freq", AwA10PITState, clk_freq[2], 0), DEFINE_PROP_UINT32("clk3-freq", AwA10PITState, clk_freq[3], 0), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_a10_pit = { @@ -289,11 +288,11 @@ static void a10_pit_finalize(Object *obj) } } -static void a10_pit_class_init(ObjectClass *klass, void *data) +static void a10_pit_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = a10_pit_reset; + device_class_set_legacy_reset(dc, a10_pit_reset); device_class_set_props(dc, a10_pit_properties); dc->desc = "allwinner a10 timer"; dc->vmsd = &vmstate_a10_pit; diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c index bca4cee0e4e..7cc5915e9ec 100644 --- a/hw/timer/arm_mptimer.c +++ b/hw/timer/arm_mptimer.c @@ -300,18 +300,17 @@ static const VMStateDescription vmstate_arm_mptimer = { } }; -static Property arm_mptimer_properties[] = { +static const Property arm_mptimer_properties[] = { DEFINE_PROP_UINT32("num-cpu", ARMMPTimerState, num_cpu, 0), - DEFINE_PROP_END_OF_LIST() }; -static void arm_mptimer_class_init(ObjectClass *klass, void *data) +static void arm_mptimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = arm_mptimer_realize; dc->vmsd = &vmstate_arm_mptimer; - dc->reset = arm_mptimer_reset; + device_class_set_legacy_reset(dc, arm_mptimer_reset); device_class_set_props(dc, arm_mptimer_properties); } diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c index 0940e03f1d9..56638ff5cdc 100644 --- a/hw/timer/arm_timer.c +++ b/hw/timer/arm_timer.c @@ -387,13 +387,12 @@ static const TypeInfo icp_pit_info = { .instance_init = icp_pit_init, }; -static Property sp804_properties[] = { +static const Property sp804_properties[] = { DEFINE_PROP_UINT32("freq0", SP804State, freq0, 1000000), DEFINE_PROP_UINT32("freq1", SP804State, freq1, 1000000), - DEFINE_PROP_END_OF_LIST(), }; -static void sp804_class_init(ObjectClass *klass, void *data) +static void sp804_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c index f6b1acef271..7e4ddcd4058 100644 --- a/hw/timer/armv7m_systick.c +++ b/hw/timer/armv7m_systick.c @@ -285,12 +285,12 @@ static const VMStateDescription vmstate_systick = { } }; -static void systick_class_init(ObjectClass *klass, void *data) +static void systick_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_systick; - dc->reset = systick_reset; + device_class_set_legacy_reset(dc, systick_reset); dc->realize = systick_realize; } diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index fc5c94bdf36..57db03512f4 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -239,9 +239,8 @@ static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg) return value; } -static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size) +static uint64_t aspeed_timer_read_common(AspeedTimerCtrlState *s, hwaddr offset) { - AspeedTimerCtrlState *s = opaque; const int reg = (offset & 0xf) / 4; uint64_t value; @@ -256,10 +255,11 @@ static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size) value = aspeed_timer_get_value(&s->timers[(offset >> 4) - 1], reg); break; default: - value = ASPEED_TIMER_GET_CLASS(s)->read(s, offset); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + value = 0; break; } - trace_aspeed_timer_read(offset, size, value); return value; } @@ -276,7 +276,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, old_reload = t->reload; t->reload = calculate_min_ticks(t, value); - /* If the reload value was not previously set, or zero, and + /* + * If the reload value was not previously set, or zero, and * the current value is valid, try to start the timer if it is * enabled. */ @@ -312,7 +313,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, } } -/* Control register operations are broken out into helpers that can be +/* + * Control register operations are broken out into helpers that can be * explicitly called on aspeed_timer_reset(), but also from * aspeed_timer_ctrl_op(). */ @@ -396,7 +398,8 @@ static void aspeed_timer_set_ctrl(AspeedTimerCtrlState *s, uint32_t reg) AspeedTimer *t; const uint8_t enable_mask = BIT(op_enable); - /* Handle a dependency between the 'enable' and remaining three + /* + * Handle a dependency between the 'enable' and remaining three * configuration bits - i.e. if more than one bit in the control set has * changed, including the 'enable' bit, then we want either disable the * timer and perform configuration, or perform configuration and then @@ -428,12 +431,11 @@ static void aspeed_timer_set_ctrl2(AspeedTimerCtrlState *s, uint32_t value) trace_aspeed_timer_set_ctrl2(value); } -static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value, - unsigned size) +static void aspeed_timer_write_common(AspeedTimerCtrlState *s, hwaddr offset, + uint64_t value) { const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); const int reg = (offset & 0xf) / 4; - AspeedTimerCtrlState *s = opaque; switch (offset) { /* Control Registers */ @@ -448,11 +450,25 @@ static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value, aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS) - 1, reg, tv); break; default: - ASPEED_TIMER_GET_CLASS(s)->write(s, offset, value); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); break; } } +static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedTimerCtrlState *s = ASPEED_TIMER(opaque); + return ASPEED_TIMER_GET_CLASS(s)->read(s, offset); +} + +static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + AspeedTimerCtrlState *s = ASPEED_TIMER(opaque); + ASPEED_TIMER_GET_CLASS(s)->write(s, offset, value); +} + static const MemoryRegionOps aspeed_timer_ops = { .read = aspeed_timer_read, .write = aspeed_timer_write, @@ -472,12 +488,15 @@ static uint64_t aspeed_2400_timer_read(AspeedTimerCtrlState *s, hwaddr offset) break; case 0x38: case 0x3C: - default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); value = 0; break; + default: + value = aspeed_timer_read_common(s, offset); + break; } + trace_aspeed_timer_read(offset, value); return value; } @@ -492,10 +511,12 @@ static void aspeed_2400_timer_write(AspeedTimerCtrlState *s, hwaddr offset, break; case 0x38: case 0x3C: - default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); break; + default: + aspeed_timer_write_common(s, offset, value); + break; } } @@ -511,12 +532,15 @@ static uint64_t aspeed_2500_timer_read(AspeedTimerCtrlState *s, hwaddr offset) value = s->ctrl3 & BIT(0); break; case 0x3C: - default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); value = 0; break; + default: + value = aspeed_timer_read_common(s, offset); + break; } + trace_aspeed_timer_read(offset, value); return value; } @@ -545,8 +569,7 @@ static void aspeed_2500_timer_write(AspeedTimerCtrlState *s, hwaddr offset, break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", - __func__, offset); + aspeed_timer_write_common(s, offset, value); break; } } @@ -561,12 +584,15 @@ static uint64_t aspeed_2600_timer_read(AspeedTimerCtrlState *s, hwaddr offset) break; case 0x38: case 0x3C: - default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); value = 0; break; + default: + value = aspeed_timer_read_common(s, offset); + break; } + trace_aspeed_timer_read(offset, value); return value; } @@ -577,17 +603,209 @@ static void aspeed_2600_timer_write(AspeedTimerCtrlState *s, hwaddr offset, switch (offset) { case 0x34: - s->irq_sts &= tv; + s->irq_sts &= ~tv; break; case 0x3C: aspeed_timer_set_ctrl(s, s->ctrl & ~tv); break; - case 0x38: - default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); break; + default: + aspeed_timer_write_common(s, offset, value); + break; + } +} + +static void aspeed_2700_timer_set_ctrl(AspeedTimerCtrlState *s, int index, + uint32_t reg) +{ + const uint8_t overflow_interrupt_mask = BIT(op_overflow_interrupt); + const uint8_t external_clock_mask = BIT(op_external_clock); + const uint8_t pulse_enable_mask = BIT(op_pulse_enable); + const uint8_t enable_mask = BIT(op_enable); + AspeedTimer *t; + uint8_t t_old; + uint8_t t_new; + int shift; + + /* + * Only 1 will set the specific bits to 1 + * Handle a dependency between the 'enable' and remaining three + * configuration bits - i.e. if more than one bit in the control set has + * set, including the 'enable' bit, perform configuration and then + * enable the timer. + * Interrupt Status bit should not be set. + */ + + t = &s->timers[index]; + shift = index * TIMER_CTRL_BITS; + + t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK; + t_new = reg & TIMER_CTRL_MASK; + + if (!(t_old & external_clock_mask) && + (t_new & external_clock_mask)) { + aspeed_timer_ctrl_external_clock(t, true); + s->ctrl = deposit32(s->ctrl, shift + op_external_clock, 1, 1); + } + + if (!(t_old & overflow_interrupt_mask) && + (t_new & overflow_interrupt_mask)) { + aspeed_timer_ctrl_overflow_interrupt(t, true); + s->ctrl = deposit32(s->ctrl, shift + op_overflow_interrupt, 1, 1); + } + + + if (!(t_old & pulse_enable_mask) && + (t_new & pulse_enable_mask)) { + aspeed_timer_ctrl_pulse_enable(t, true); + s->ctrl = deposit32(s->ctrl, shift + op_pulse_enable, 1, 1); + } + + /* If we are enabling, do so last */ + if (!(t_old & enable_mask) && + (t_new & enable_mask)) { + aspeed_timer_ctrl_enable(t, true); + s->ctrl = deposit32(s->ctrl, shift + op_enable, 1, 1); + } +} + +static void aspeed_2700_timer_clear_ctrl(AspeedTimerCtrlState *s, int index, + uint32_t reg) +{ + const uint8_t overflow_interrupt_mask = BIT(op_overflow_interrupt); + const uint8_t external_clock_mask = BIT(op_external_clock); + const uint8_t pulse_enable_mask = BIT(op_pulse_enable); + const uint8_t enable_mask = BIT(op_enable); + AspeedTimer *t; + uint8_t t_old; + uint8_t t_new; + int shift; + + /* + * Only 1 will clear the specific bits to 0 + * Handle a dependency between the 'enable' and remaining three + * configuration bits - i.e. if more than one bit in the control set has + * clear, including the 'enable' bit, then disable the timer and perform + * configuration + */ + + t = &s->timers[index]; + shift = index * TIMER_CTRL_BITS; + + t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK; + t_new = reg & TIMER_CTRL_MASK; + + /* If we are disabling, do so first */ + if ((t_old & enable_mask) && + (t_new & enable_mask)) { + aspeed_timer_ctrl_enable(t, false); + s->ctrl = deposit32(s->ctrl, shift + op_enable, 1, 0); + } + + if ((t_old & external_clock_mask) && + (t_new & external_clock_mask)) { + aspeed_timer_ctrl_external_clock(t, false); + s->ctrl = deposit32(s->ctrl, shift + op_external_clock, 1, 0); + } + + if ((t_old & overflow_interrupt_mask) && + (t_new & overflow_interrupt_mask)) { + aspeed_timer_ctrl_overflow_interrupt(t, false); + s->ctrl = deposit32(s->ctrl, shift + op_overflow_interrupt, 1, 0); + } + + if ((t_old & pulse_enable_mask) && + (t_new & pulse_enable_mask)) { + aspeed_timer_ctrl_pulse_enable(t, false); + s->ctrl = deposit32(s->ctrl, shift + op_pulse_enable, 1, 0); + } + + /* Clear interrupt status */ + if (reg & 0x10000) { + s->irq_sts = deposit32(s->irq_sts, index, 1, 0); + } +} + +static uint64_t aspeed_2700_timer_read(AspeedTimerCtrlState *s, hwaddr offset) +{ + uint32_t timer_offset = offset & 0x3f; + int timer_index = offset >> 6; + uint64_t value = 0; + + if (timer_index >= ASPEED_TIMER_NR_TIMERS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%" PRIx64 " out of bounds\n", + __func__, offset); + return 0; + } + + switch (timer_offset) { + /* + * Counter Status + * Counter Reload + * Counter First Matching + * Counter Second Matching + */ + case 0x00 ... 0x0C: + value = aspeed_timer_get_value(&s->timers[timer_index], + timer_offset >> 2); + break; + /* Counter Control and Interrupt Status */ + case 0x10: + value = deposit64(value, 0, 4, + extract32(s->ctrl, timer_index * 4, 4)); + value = deposit64(value, 16, 1, + extract32(s->irq_sts, timer_index, 1)); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" + PRIx64"\n", __func__, offset); + value = 0; + break; + } + trace_aspeed_timer_read(offset, value); + return value; +} + +static void aspeed_2700_timer_write(AspeedTimerCtrlState *s, hwaddr offset, + uint64_t value) +{ + const uint32_t timer_value = (uint32_t)(value & 0xFFFFFFFF); + uint32_t timer_offset = offset & 0x3f; + int timer_index = offset >> 6; + + if (timer_index >= ASPEED_TIMER_NR_TIMERS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%" PRIx64 " out of bounds\n", + __func__, offset); + } + + switch (timer_offset) { + /* + * Counter Status + * Counter Reload + * Counter First Matching + * Counter Second Matching + */ + case 0x00 ... 0x0C: + aspeed_timer_set_value(s, timer_index, timer_offset >> 2, + timer_value); + break; + /* Counter Control Set and Interrupt Status */ + case 0x10: + aspeed_2700_timer_set_ctrl(s, timer_index, timer_value); + break; + /* Counter Control Clear and Interrupr Status */ + case 0x14: + aspeed_2700_timer_clear_ctrl(s, timer_index, timer_value); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" + PRIx64"\n", __func__, offset); + break; } } @@ -623,7 +841,8 @@ static void aspeed_timer_reset(DeviceState *dev) for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { AspeedTimer *t = &s->timers[i]; - /* Explicitly call helpers to avoid any conditional behaviour through + /* + * Explicitly call helpers to avoid any conditional behaviour through * aspeed_timer_set_ctrl(). */ aspeed_timer_ctrl_enable(t, false); @@ -671,18 +890,17 @@ static const VMStateDescription vmstate_aspeed_timer_state = { } }; -static Property aspeed_timer_properties[] = { +static const Property aspeed_timer_properties[] = { DEFINE_PROP_LINK("scu", AspeedTimerCtrlState, scu, TYPE_ASPEED_SCU, AspeedSCUState *), - DEFINE_PROP_END_OF_LIST(), }; -static void timer_class_init(ObjectClass *klass, void *data) +static void timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_timer_realize; - dc->reset = aspeed_timer_reset; + device_class_set_legacy_reset(dc, aspeed_timer_reset); dc->desc = "ASPEED Timer"; dc->vmsd = &vmstate_aspeed_timer_state; device_class_set_props(dc, aspeed_timer_properties); @@ -697,7 +915,7 @@ static const TypeInfo aspeed_timer_info = { .abstract = true, }; -static void aspeed_2400_timer_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); @@ -713,7 +931,7 @@ static const TypeInfo aspeed_2400_timer_info = { .class_init = aspeed_2400_timer_class_init, }; -static void aspeed_2500_timer_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); @@ -729,7 +947,7 @@ static const TypeInfo aspeed_2500_timer_info = { .class_init = aspeed_2500_timer_class_init, }; -static void aspeed_2600_timer_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); @@ -745,7 +963,7 @@ static const TypeInfo aspeed_2600_timer_info = { .class_init = aspeed_2600_timer_class_init, }; -static void aspeed_1030_timer_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); @@ -761,6 +979,22 @@ static const TypeInfo aspeed_1030_timer_info = { .class_init = aspeed_1030_timer_class_init, }; +static void aspeed_2700_timer_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); + + dc->desc = "ASPEED 2700 Timer"; + awc->read = aspeed_2700_timer_read; + awc->write = aspeed_2700_timer_write; +} + +static const TypeInfo aspeed_2700_timer_info = { + .name = TYPE_ASPEED_2700_TIMER, + .parent = TYPE_ASPEED_TIMER, + .class_init = aspeed_2700_timer_class_init, +}; + static void aspeed_timer_register_types(void) { type_register_static(&aspeed_timer_info); @@ -768,6 +1002,7 @@ static void aspeed_timer_register_types(void) type_register_static(&aspeed_2500_timer_info); type_register_static(&aspeed_2600_timer_info); type_register_static(&aspeed_1030_timer_info); + type_register_static(&aspeed_2700_timer_info); } type_init(aspeed_timer_register_types) diff --git a/hw/timer/avr_timer16.c b/hw/timer/avr_timer16.c index c48555da525..012d8290018 100644 --- a/hw/timer/avr_timer16.c +++ b/hw/timer/avr_timer16.c @@ -542,11 +542,10 @@ static const MemoryRegionOps avr_timer16_ifr_ops = { .impl = {.max_access_size = 1} }; -static Property avr_timer16_properties[] = { +static const Property avr_timer16_properties[] = { DEFINE_PROP_UINT8("id", struct AVRTimer16State, id, 0), DEFINE_PROP_UINT64("cpu-frequency-hz", struct AVRTimer16State, cpu_freq_hz, 0), - DEFINE_PROP_END_OF_LIST(), }; static void avr_timer16_pr(void *opaque, int irq, int level) @@ -596,11 +595,11 @@ static void avr_timer16_realize(DeviceState *dev, Error **errp) s->enabled = true; } -static void avr_timer16_class_init(ObjectClass *klass, void *data) +static void avr_timer16_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = avr_timer16_reset; + device_class_set_legacy_reset(dc, avr_timer16_reset); dc->realize = avr_timer16_realize; device_class_set_props(dc, avr_timer16_properties); } diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c index 3ec64604ee5..7929aaa882a 100644 --- a/hw/timer/bcm2835_systmr.c +++ b/hw/timer/bcm2835_systmr.c @@ -154,12 +154,12 @@ static const VMStateDescription bcm2835_systmr_vmstate = { } }; -static void bcm2835_systmr_class_init(ObjectClass *klass, void *data) +static void bcm2835_systmr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = bcm2835_systmr_realize; - dc->reset = bcm2835_systmr_reset; + device_class_set_legacy_reset(dc, bcm2835_systmr_reset); dc->vmsd = &bcm2835_systmr_vmstate; } diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c index 54dbd4c5646..9c7ba168768 100644 --- a/hw/timer/cadence_ttc.c +++ b/hw/timer/cadence_ttc.c @@ -451,7 +451,7 @@ static const VMStateDescription vmstate_cadence_ttc = { } }; -static void cadence_ttc_class_init(ObjectClass *klass, void *data) +static void cadence_ttc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c index ddf9070c3c0..34c550a3f98 100644 --- a/hw/timer/cmsdk-apb-dualtimer.c +++ b/hw/timer/cmsdk-apb-dualtimer.c @@ -534,13 +534,13 @@ static const VMStateDescription cmsdk_apb_dualtimer_vmstate = { } }; -static void cmsdk_apb_dualtimer_class_init(ObjectClass *klass, void *data) +static void cmsdk_apb_dualtimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cmsdk_apb_dualtimer_realize; dc->vmsd = &cmsdk_apb_dualtimer_vmstate; - dc->reset = cmsdk_apb_dualtimer_reset; + device_class_set_legacy_reset(dc, cmsdk_apb_dualtimer_reset); } static const TypeInfo cmsdk_apb_dualtimer_info = { diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c index 814545c7832..4095267b4aa 100644 --- a/hw/timer/cmsdk-apb-timer.c +++ b/hw/timer/cmsdk-apb-timer.c @@ -261,13 +261,13 @@ static const VMStateDescription cmsdk_apb_timer_vmstate = { } }; -static void cmsdk_apb_timer_class_init(ObjectClass *klass, void *data) +static void cmsdk_apb_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cmsdk_apb_timer_realize; dc->vmsd = &cmsdk_apb_timer_vmstate; - dc->reset = cmsdk_apb_timer_reset; + device_class_set_legacy_reset(dc, cmsdk_apb_timer_reset); } static const TypeInfo cmsdk_apb_timer_info = { diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c index 9fc5c1d8a46..355138d3547 100644 --- a/hw/timer/digic-timer.c +++ b/hw/timer/digic-timer.c @@ -161,11 +161,11 @@ static void digic_timer_finalize(Object *obj) ptimer_free(s->ptimer); } -static void digic_timer_class_init(ObjectClass *klass, void *class_data) +static void digic_timer_class_init(ObjectClass *klass, const void *class_data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = digic_timer_reset; + device_class_set_legacy_reset(dc, digic_timer_reset); dc->vmsd = &vmstate_digic_timer; } diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c deleted file mode 100644 index dd6d96b0a10..00000000000 --- a/hw/timer/etraxfs_timer.c +++ /dev/null @@ -1,407 +0,0 @@ -/* - * QEMU ETRAX Timers - * - * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "hw/sysbus.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "qemu/timer.h" -#include "hw/irq.h" -#include "hw/ptimer.h" -#include "qom/object.h" - -#define D(x) - -#define RW_TMR0_DIV 0x00 -#define R_TMR0_DATA 0x04 -#define RW_TMR0_CTRL 0x08 -#define RW_TMR1_DIV 0x10 -#define R_TMR1_DATA 0x14 -#define RW_TMR1_CTRL 0x18 -#define R_TIME 0x38 -#define RW_WD_CTRL 0x40 -#define R_WD_STAT 0x44 -#define RW_INTR_MASK 0x48 -#define RW_ACK_INTR 0x4c -#define R_INTR 0x50 -#define R_MASKED_INTR 0x54 - -#define TYPE_ETRAX_FS_TIMER "etraxfs-timer" -typedef struct ETRAXTimerState ETRAXTimerState; -DECLARE_INSTANCE_CHECKER(ETRAXTimerState, ETRAX_TIMER, - TYPE_ETRAX_FS_TIMER) - -struct ETRAXTimerState { - SysBusDevice parent_obj; - - MemoryRegion mmio; - qemu_irq irq; - qemu_irq nmi; - - ptimer_state *ptimer_t0; - ptimer_state *ptimer_t1; - ptimer_state *ptimer_wd; - - uint32_t wd_hits; - - /* Control registers. */ - uint32_t rw_tmr0_div; - uint32_t r_tmr0_data; - uint32_t rw_tmr0_ctrl; - - uint32_t rw_tmr1_div; - uint32_t r_tmr1_data; - uint32_t rw_tmr1_ctrl; - - uint32_t rw_wd_ctrl; - - uint32_t rw_intr_mask; - uint32_t rw_ack_intr; - uint32_t r_intr; - uint32_t r_masked_intr; -}; - -static const VMStateDescription vmstate_etraxfs = { - .name = "etraxfs", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_PTIMER(ptimer_t0, ETRAXTimerState), - VMSTATE_PTIMER(ptimer_t1, ETRAXTimerState), - VMSTATE_PTIMER(ptimer_wd, ETRAXTimerState), - - VMSTATE_UINT32(wd_hits, ETRAXTimerState), - - VMSTATE_UINT32(rw_tmr0_div, ETRAXTimerState), - VMSTATE_UINT32(r_tmr0_data, ETRAXTimerState), - VMSTATE_UINT32(rw_tmr0_ctrl, ETRAXTimerState), - - VMSTATE_UINT32(rw_tmr1_div, ETRAXTimerState), - VMSTATE_UINT32(r_tmr1_data, ETRAXTimerState), - VMSTATE_UINT32(rw_tmr1_ctrl, ETRAXTimerState), - - VMSTATE_UINT32(rw_wd_ctrl, ETRAXTimerState), - - VMSTATE_UINT32(rw_intr_mask, ETRAXTimerState), - VMSTATE_UINT32(rw_ack_intr, ETRAXTimerState), - VMSTATE_UINT32(r_intr, ETRAXTimerState), - VMSTATE_UINT32(r_masked_intr, ETRAXTimerState), - - VMSTATE_END_OF_LIST() - } -}; - -static uint64_t -timer_read(void *opaque, hwaddr addr, unsigned int size) -{ - ETRAXTimerState *t = opaque; - uint32_t r = 0; - - switch (addr) { - case R_TMR0_DATA: - r = ptimer_get_count(t->ptimer_t0); - break; - case R_TMR1_DATA: - r = ptimer_get_count(t->ptimer_t1); - break; - case R_TIME: - r = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 10; - break; - case RW_INTR_MASK: - r = t->rw_intr_mask; - break; - case R_MASKED_INTR: - r = t->r_intr & t->rw_intr_mask; - break; - default: - D(printf ("%s %x\n", __func__, addr)); - break; - } - return r; -} - -static void update_ctrl(ETRAXTimerState *t, int tnum) -{ - unsigned int op; - unsigned int freq; - unsigned int freq_hz; - unsigned int div; - uint32_t ctrl; - - ptimer_state *timer; - - if (tnum == 0) { - ctrl = t->rw_tmr0_ctrl; - div = t->rw_tmr0_div; - timer = t->ptimer_t0; - } else { - ctrl = t->rw_tmr1_ctrl; - div = t->rw_tmr1_div; - timer = t->ptimer_t1; - } - - - op = ctrl & 3; - freq = ctrl >> 2; - freq_hz = 32000000; - - switch (freq) - { - case 0: - case 1: - D(printf ("extern or disabled timer clock?\n")); - break; - case 4: freq_hz = 29493000; break; - case 5: freq_hz = 32000000; break; - case 6: freq_hz = 32768000; break; - case 7: freq_hz = 100000000; break; - default: - abort(); - break; - } - - D(printf ("freq_hz=%d div=%d\n", freq_hz, div)); - ptimer_transaction_begin(timer); - ptimer_set_freq(timer, freq_hz); - ptimer_set_limit(timer, div, 0); - - switch (op) - { - case 0: - /* Load. */ - ptimer_set_limit(timer, div, 1); - break; - case 1: - /* Hold. */ - ptimer_stop(timer); - break; - case 2: - /* Run. */ - ptimer_run(timer, 0); - break; - default: - abort(); - break; - } - ptimer_transaction_commit(timer); -} - -static void timer_update_irq(ETRAXTimerState *t) -{ - t->r_intr &= ~(t->rw_ack_intr); - t->r_masked_intr = t->r_intr & t->rw_intr_mask; - - D(printf("%s: masked_intr=%x\n", __func__, t->r_masked_intr)); - qemu_set_irq(t->irq, !!t->r_masked_intr); -} - -static void timer0_hit(void *opaque) -{ - ETRAXTimerState *t = opaque; - t->r_intr |= 1; - timer_update_irq(t); -} - -static void timer1_hit(void *opaque) -{ - ETRAXTimerState *t = opaque; - t->r_intr |= 2; - timer_update_irq(t); -} - -static void watchdog_hit(void *opaque) -{ - ETRAXTimerState *t = opaque; - if (t->wd_hits == 0) { - /* real hw gives a single tick before resetting but we are - a bit friendlier to compensate for our slower execution. */ - ptimer_set_count(t->ptimer_wd, 10); - ptimer_run(t->ptimer_wd, 1); - qemu_irq_raise(t->nmi); - } - else - qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - - t->wd_hits++; -} - -static inline void timer_watchdog_update(ETRAXTimerState *t, uint32_t value) -{ - unsigned int wd_en = t->rw_wd_ctrl & (1 << 8); - unsigned int wd_key = t->rw_wd_ctrl >> 9; - unsigned int wd_cnt = t->rw_wd_ctrl & 511; - unsigned int new_key = value >> 9 & ((1 << 7) - 1); - unsigned int new_cmd = (value >> 8) & 1; - - /* If the watchdog is enabled, they written key must match the - complement of the previous. */ - wd_key = ~wd_key & ((1 << 7) - 1); - - if (wd_en && wd_key != new_key) - return; - - D(printf("en=%d new_key=%x oldkey=%x cmd=%d cnt=%d\n", - wd_en, new_key, wd_key, new_cmd, wd_cnt)); - - if (t->wd_hits) - qemu_irq_lower(t->nmi); - - t->wd_hits = 0; - - ptimer_transaction_begin(t->ptimer_wd); - ptimer_set_freq(t->ptimer_wd, 760); - if (wd_cnt == 0) - wd_cnt = 256; - ptimer_set_count(t->ptimer_wd, wd_cnt); - if (new_cmd) - ptimer_run(t->ptimer_wd, 1); - else - ptimer_stop(t->ptimer_wd); - - t->rw_wd_ctrl = value; - ptimer_transaction_commit(t->ptimer_wd); -} - -static void -timer_write(void *opaque, hwaddr addr, - uint64_t val64, unsigned int size) -{ - ETRAXTimerState *t = opaque; - uint32_t value = val64; - - switch (addr) - { - case RW_TMR0_DIV: - t->rw_tmr0_div = value; - break; - case RW_TMR0_CTRL: - D(printf ("RW_TMR0_CTRL=%x\n", value)); - t->rw_tmr0_ctrl = value; - update_ctrl(t, 0); - break; - case RW_TMR1_DIV: - t->rw_tmr1_div = value; - break; - case RW_TMR1_CTRL: - D(printf ("RW_TMR1_CTRL=%x\n", value)); - t->rw_tmr1_ctrl = value; - update_ctrl(t, 1); - break; - case RW_INTR_MASK: - D(printf ("RW_INTR_MASK=%x\n", value)); - t->rw_intr_mask = value; - timer_update_irq(t); - break; - case RW_WD_CTRL: - timer_watchdog_update(t, value); - break; - case RW_ACK_INTR: - t->rw_ack_intr = value; - timer_update_irq(t); - t->rw_ack_intr = 0; - break; - default: - printf("%s " HWADDR_FMT_plx " %x\n", __func__, addr, value); - break; - } -} - -static const MemoryRegionOps timer_ops = { - .read = timer_read, - .write = timer_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static void etraxfs_timer_reset_enter(Object *obj, ResetType type) -{ - ETRAXTimerState *t = ETRAX_TIMER(obj); - - ptimer_transaction_begin(t->ptimer_t0); - ptimer_stop(t->ptimer_t0); - ptimer_transaction_commit(t->ptimer_t0); - ptimer_transaction_begin(t->ptimer_t1); - ptimer_stop(t->ptimer_t1); - ptimer_transaction_commit(t->ptimer_t1); - ptimer_transaction_begin(t->ptimer_wd); - ptimer_stop(t->ptimer_wd); - ptimer_transaction_commit(t->ptimer_wd); - t->rw_wd_ctrl = 0; - t->r_intr = 0; - t->rw_intr_mask = 0; -} - -static void etraxfs_timer_reset_hold(Object *obj, ResetType type) -{ - ETRAXTimerState *t = ETRAX_TIMER(obj); - - qemu_irq_lower(t->irq); -} - -static void etraxfs_timer_realize(DeviceState *dev, Error **errp) -{ - ETRAXTimerState *t = ETRAX_TIMER(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - - t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_LEGACY); - t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_LEGACY); - t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_LEGACY); - - sysbus_init_irq(sbd, &t->irq); - sysbus_init_irq(sbd, &t->nmi); - - memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, - "etraxfs-timer", 0x5c); - sysbus_init_mmio(sbd, &t->mmio); -} - -static void etraxfs_timer_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - ResettableClass *rc = RESETTABLE_CLASS(klass); - - dc->realize = etraxfs_timer_realize; - dc->vmsd = &vmstate_etraxfs; - rc->phases.enter = etraxfs_timer_reset_enter; - rc->phases.hold = etraxfs_timer_reset_hold; -} - -static const TypeInfo etraxfs_timer_info = { - .name = TYPE_ETRAX_FS_TIMER, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(ETRAXTimerState), - .class_init = etraxfs_timer_class_init, -}; - -static void etraxfs_timer_register_types(void) -{ - type_register_static(&etraxfs_timer_info); -} - -type_init(etraxfs_timer_register_types) diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c index 75098cdb555..bb0f9c8b9a6 100644 --- a/hw/timer/exynos4210_mct.c +++ b/hw/timer/exynos4210_mct.c @@ -815,7 +815,7 @@ static uint32_t exynos4210_ltick_cnt_get_cnto(struct tick_timer *s) /* Both are counting */ icnto = remain / s->tcntb; if (icnto) { - tcnto = remain % (icnto * s->tcntb); + tcnto = remain % ((uint64_t)icnto * s->tcntb); } else { tcnto = remain % s->tcntb; } @@ -1546,11 +1546,11 @@ static void exynos4210_mct_finalize(Object *obj) } } -static void exynos4210_mct_class_init(ObjectClass *klass, void *data) +static void exynos4210_mct_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_mct_reset; + device_class_set_legacy_reset(dc, exynos4210_mct_reset); dc->vmsd = &vmstate_exynos4210_mct_state; } diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c index ca330e9446c..69f737a8e6f 100644 --- a/hw/timer/exynos4210_pwm.c +++ b/hw/timer/exynos4210_pwm.c @@ -420,11 +420,11 @@ static void exynos4210_pwm_finalize(Object *obj) } } -static void exynos4210_pwm_class_init(ObjectClass *klass, void *data) +static void exynos4210_pwm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = exynos4210_pwm_reset; + device_class_set_legacy_reset(dc, exynos4210_pwm_reset); dc->vmsd = &vmstate_exynos4210_pwm_state; } diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c index 49908854510..0e06fa09e99 100644 --- a/hw/timer/grlib_gptimer.c +++ b/hw/timer/grlib_gptimer.c @@ -403,19 +403,18 @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &unit->iomem); } -static Property grlib_gptimer_properties[] = { +static const Property grlib_gptimer_properties[] = { DEFINE_PROP_UINT32("frequency", GPTimerUnit, freq_hz, 40000000), DEFINE_PROP_UINT32("irq-line", GPTimerUnit, irq_line, 8), DEFINE_PROP_UINT32("nr-timers", GPTimerUnit, nr_timers, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void grlib_gptimer_class_init(ObjectClass *klass, void *data) +static void grlib_gptimer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = grlib_gptimer_realize; - dc->reset = grlib_gptimer_reset; + device_class_set_legacy_reset(dc, grlib_gptimer_reset); device_class_set_props(dc, grlib_gptimer_properties); } diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 471950adef1..cb48cc151f1 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -36,10 +36,12 @@ #include "hw/rtc/mc146818rtc_regs.h" #include "migration/vmstate.h" #include "hw/timer/i8254.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/object.h" #include "trace.h" +struct hpet_fw_config hpet_fw_cfg = {.count = UINT8_MAX}; + #define HPET_MSI_SUPPORT 0 OBJECT_DECLARE_SIMPLE_TYPE(HPETState, HPET) @@ -75,6 +77,7 @@ struct HPETState { uint8_t rtc_irq_level; qemu_irq pit_enabled; uint8_t num_timers; + uint8_t num_timers_save; uint32_t intcap; HPETTimer timer[HPET_MAX_TIMERS]; @@ -235,15 +238,12 @@ static int hpet_pre_save(void *opaque) s->hpet_counter = hpet_get_ticks(s); } - return 0; -} - -static int hpet_pre_load(void *opaque) -{ - HPETState *s = opaque; - - /* version 1 only supports 3, later versions will load the actual value */ - s->num_timers = HPET_MIN_TIMERS; + /* + * The number of timers must match on source and destination, but it was + * also added to the migration stream. Check that it matches the value + * that was configured. + */ + s->num_timers_save = s->num_timers; return 0; } @@ -251,12 +251,7 @@ static bool hpet_validate_num_timers(void *opaque, int version_id) { HPETState *s = opaque; - if (s->num_timers < HPET_MIN_TIMERS) { - return false; - } else if (s->num_timers > HPET_MAX_TIMERS) { - return false; - } - return true; + return s->num_timers == s->num_timers_save; } static int hpet_post_load(void *opaque, int version_id) @@ -275,16 +270,6 @@ static int hpet_post_load(void *opaque, int version_id) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); } - /* Push number of timers into capability returned via HPET_ID */ - s->capability &= ~HPET_ID_NUM_TIM_MASK; - s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT; - hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability; - - /* Derive HPET_MSI_SUPPORT from the capability of the first timer. */ - s->flags &= ~(1 << HPET_MSI_SUPPORT); - if (s->timer[0].config & HPET_TN_FSB_CAP) { - s->flags |= 1 << HPET_MSI_SUPPORT; - } return 0; } @@ -343,17 +328,16 @@ static const VMStateDescription vmstate_hpet_timer = { static const VMStateDescription vmstate_hpet = { .name = "hpet", .version_id = 2, - .minimum_version_id = 1, + .minimum_version_id = 2, .pre_save = hpet_pre_save, - .pre_load = hpet_pre_load, .post_load = hpet_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT64(config, HPETState), VMSTATE_UINT64(isr, HPETState), VMSTATE_UINT64(hpet_counter, HPETState), - VMSTATE_UINT8_V(num_timers, HPETState, 2), - VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers), - VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0, + VMSTATE_UINT8(num_timers_save, HPETState), + VMSTATE_VALIDATE("num_timers must match", hpet_validate_num_timers), + VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers_save, 0, vmstate_hpet_timer, HPETTimer), VMSTATE_END_OF_LIST() }, @@ -442,30 +426,11 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, uint64_t cur_tick; trace_hpet_ram_read(addr); + addr &= ~4; - /*address range of all TN regs*/ - if (addr >= 0x100 && addr <= 0x3ff) { - uint8_t timer_id = (addr - 0x100) / 0x20; - HPETTimer *timer = &s->timer[timer_id]; - - if (timer_id > s->num_timers) { - trace_hpet_timer_id_out_of_range(timer_id); - return 0; - } - - switch (addr & 0x18) { - case HPET_TN_CFG: // including interrupt capabilities - return timer->config >> shift; - case HPET_TN_CMP: // comparator register - return timer->cmp >> shift; - case HPET_TN_ROUTE: - return timer->fsb >> shift; - default: - trace_hpet_ram_read_invalid(); - break; - } - } else { - switch (addr & ~4) { + /*address range of all global regs*/ + if (addr <= 0xff) { + switch (addr) { case HPET_ID: // including HPET_PERIOD return s->capability >> shift; case HPET_CFG: @@ -484,6 +449,26 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, trace_hpet_ram_read_invalid(); break; } + } else { + uint8_t timer_id = (addr - 0x100) / 0x20; + HPETTimer *timer = &s->timer[timer_id]; + + if (timer_id > s->num_timers) { + trace_hpet_timer_id_out_of_range(timer_id); + return 0; + } + + switch (addr & 0x1f) { + case HPET_TN_CFG: // including interrupt capabilities + return timer->config >> shift; + case HPET_TN_CMP: // comparator register + return timer->cmp >> shift; + case HPET_TN_ROUTE: + return timer->fsb >> shift; + default: + trace_hpet_ram_read_invalid(); + break; + } } return 0; } @@ -498,9 +483,67 @@ static void hpet_ram_write(void *opaque, hwaddr addr, uint64_t old_val, new_val, cleared; trace_hpet_ram_write(addr, value); + addr &= ~4; - /*address range of all TN regs*/ - if (addr >= 0x100 && addr <= 0x3ff) { + /*address range of all global regs*/ + if (addr <= 0xff) { + switch (addr) { + case HPET_ID: + return; + case HPET_CFG: + old_val = s->config; + new_val = deposit64(old_val, shift, len, value); + new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); + s->config = new_val; + if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { + /* Enable main counter and interrupt generation. */ + s->hpet_offset = + ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + for (i = 0; i < s->num_timers; i++) { + if (timer_enabled(&s->timer[i]) && (s->isr & (1 << i))) { + update_irq(&s->timer[i], 1); + } + hpet_set_timer(&s->timer[i]); + } + } else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) { + /* Halt main counter and disable interrupt generation. */ + s->hpet_counter = hpet_get_ticks(s); + for (i = 0; i < s->num_timers; i++) { + hpet_del_timer(&s->timer[i]); + } + } + /* i8254 and RTC output pins are disabled + * when HPET is in legacy mode */ + if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + qemu_set_irq(s->pit_enabled, 0); + qemu_irq_lower(s->irqs[0]); + qemu_irq_lower(s->irqs[RTC_ISA_IRQ]); + } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + qemu_irq_lower(s->irqs[0]); + qemu_set_irq(s->pit_enabled, 1); + qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level); + } + break; + case HPET_STATUS: + new_val = value << shift; + cleared = new_val & s->isr; + for (i = 0; i < s->num_timers; i++) { + if (cleared & (1 << i)) { + update_irq(&s->timer[i], 0); + } + } + break; + case HPET_COUNTER: + if (hpet_enabled(s)) { + trace_hpet_ram_write_counter_write_while_enabled(); + } + s->hpet_counter = deposit64(s->hpet_counter, shift, len, value); + break; + default: + trace_hpet_ram_write_invalid(); + break; + } + } else { uint8_t timer_id = (addr - 0x100) / 0x20; HPETTimer *timer = &s->timer[timer_id]; @@ -566,63 +609,6 @@ static void hpet_ram_write(void *opaque, hwaddr addr, break; } return; - } else { - switch (addr & ~4) { - case HPET_ID: - return; - case HPET_CFG: - old_val = s->config; - new_val = deposit64(old_val, shift, len, value); - new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); - s->config = new_val; - if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { - /* Enable main counter and interrupt generation. */ - s->hpet_offset = - ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - for (i = 0; i < s->num_timers; i++) { - if (timer_enabled(&s->timer[i]) && (s->isr & (1 << i))) { - update_irq(&s->timer[i], 1); - } - hpet_set_timer(&s->timer[i]); - } - } else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) { - /* Halt main counter and disable interrupt generation. */ - s->hpet_counter = hpet_get_ticks(s); - for (i = 0; i < s->num_timers; i++) { - hpet_del_timer(&s->timer[i]); - } - } - /* i8254 and RTC output pins are disabled - * when HPET is in legacy mode */ - if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { - qemu_set_irq(s->pit_enabled, 0); - qemu_irq_lower(s->irqs[0]); - qemu_irq_lower(s->irqs[RTC_ISA_IRQ]); - } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { - qemu_irq_lower(s->irqs[0]); - qemu_set_irq(s->pit_enabled, 1); - qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level); - } - break; - case HPET_STATUS: - new_val = value << shift; - cleared = new_val & s->isr; - for (i = 0; i < s->num_timers; i++) { - if (cleared & (1 << i)) { - update_irq(&s->timer[i], 0); - } - } - break; - case HPET_COUNTER: - if (hpet_enabled(s)) { - trace_hpet_ram_write_counter_write_while_enabled(); - } - s->hpet_counter = deposit64(s->hpet_counter, shift, len, value); - break; - default: - trace_hpet_ram_write_invalid(); - break; - } } } @@ -665,8 +651,8 @@ static void hpet_reset(DeviceState *d) s->hpet_counter = 0ULL; s->hpet_offset = 0ULL; s->config = 0ULL; - hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability; - hpet_cfg.hpet[s->hpet_id].address = sbd->mmio[0].addr; + hpet_fw_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability; + hpet_fw_cfg.hpet[s->hpet_id].address = sbd->mmio[0].addr; /* to document that the RTC lowers its output on reset as well */ s->rtc_irq_level = 0; @@ -705,30 +691,31 @@ static void hpet_realize(DeviceState *dev, Error **errp) int i; HPETTimer *timer; + if (s->num_timers < HPET_MIN_TIMERS || s->num_timers > HPET_MAX_TIMERS) { + error_setg(errp, "hpet.num_timers must be between %d and %d", + HPET_MIN_TIMERS, HPET_MAX_TIMERS); + return; + } if (!s->intcap) { - warn_report("Hpet's intcap not initialized"); + error_setg(errp, "hpet.hpet-intcap not initialized"); + return; } - if (hpet_cfg.count == UINT8_MAX) { + if (hpet_fw_cfg.count == UINT8_MAX) { /* first instance */ - hpet_cfg.count = 0; + hpet_fw_cfg.count = 0; } - if (hpet_cfg.count == 8) { - error_setg(errp, "Only 8 instances of HPET is allowed"); + if (hpet_fw_cfg.count == 8) { + error_setg(errp, "Only 8 instances of HPET are allowed"); return; } - s->hpet_id = hpet_cfg.count++; + s->hpet_id = hpet_fw_cfg.count++; for (i = 0; i < HPET_NUM_IRQ_ROUTES; i++) { sysbus_init_irq(sbd, &s->irqs[i]); } - if (s->num_timers < HPET_MIN_TIMERS) { - s->num_timers = HPET_MIN_TIMERS; - } else if (s->num_timers > HPET_MAX_TIMERS) { - s->num_timers = HPET_MAX_TIMERS; - } for (i = 0; i < HPET_MAX_TIMERS; i++) { timer = &s->timer[i]; timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer); @@ -736,7 +723,7 @@ static void hpet_realize(DeviceState *dev, Error **errp) timer->state = s; } - /* 64-bit main counter; LegacyReplacementRoute. */ + /* 64-bit General Capabilities and ID Register; LegacyReplacementRoute. */ s->capability = 0x8086a001ULL; s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT; s->capability |= ((uint64_t)(HPET_CLK_PERIOD * FS_PER_NS) << 32); @@ -745,20 +732,19 @@ static void hpet_realize(DeviceState *dev, Error **errp) qdev_init_gpio_out(dev, &s->pit_enabled, 1); } -static Property hpet_device_properties[] = { +static const Property hpet_device_properties[] = { DEFINE_PROP_UINT8("timers", HPETState, num_timers, HPET_MIN_TIMERS), DEFINE_PROP_BIT("msi", HPETState, flags, HPET_MSI_SUPPORT, false), DEFINE_PROP_UINT32(HPET_INTCAP, HPETState, intcap, 0), DEFINE_PROP_BOOL("hpet-offset-saved", HPETState, hpet_offset_saved, true), - DEFINE_PROP_END_OF_LIST(), }; -static void hpet_device_class_init(ObjectClass *klass, void *data) +static void hpet_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = hpet_realize; - dc->reset = hpet_reset; + device_class_set_legacy_reset(dc, hpet_reset); dc->vmsd = &vmstate_hpet; device_class_set_props(dc, hpet_device_properties); } diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c index c235496fc9c..4b25c487f79 100644 --- a/hw/timer/i8254.c +++ b/hw/timer/i8254.c @@ -350,7 +350,7 @@ static void pit_realizefn(DeviceState *dev, Error **errp) pc->parent_realize(dev, errp); } -static void pit_class_initfn(ObjectClass *klass, void *data) +static void pit_class_initfn(ObjectClass *klass, const void *data) { PITClass *pc = PIT_CLASS(klass); PITCommonClass *k = PIT_COMMON_CLASS(klass); @@ -360,7 +360,7 @@ static void pit_class_initfn(ObjectClass *klass, void *data) k->set_channel_gate = pit_set_channel_gate; k->get_channel_info = pit_get_channel_info_common; k->post_load = pit_post_load; - dc->reset = pit_reset; + device_class_set_legacy_reset(dc, pit_reset); } static const TypeInfo pit_info = { diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c index 28fdabc3218..ad091594cde 100644 --- a/hw/timer/i8254_common.c +++ b/hw/timer/i8254_common.c @@ -238,12 +238,11 @@ static const VMStateDescription vmstate_pit_common = { } }; -static Property pit_common_properties[] = { +static const Property pit_common_properties[] = { DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), - DEFINE_PROP_END_OF_LIST(), }; -static void pit_common_class_init(ObjectClass *klass, void *data) +static void pit_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c index 4917388d45a..c7320ef30fa 100644 --- a/hw/timer/ibex_timer.c +++ b/hw/timer/ibex_timer.c @@ -263,9 +263,8 @@ static const VMStateDescription vmstate_ibex_timer = { } }; -static Property ibex_timer_properties[] = { +static const Property ibex_timer_properties[] = { DEFINE_PROP_UINT32("timebase-freq", IbexTimerState, timebase_freq, 10000), - DEFINE_PROP_END_OF_LIST(), }; static void ibex_timer_init(Object *obj) @@ -287,11 +286,11 @@ static void ibex_timer_realize(DeviceState *dev, Error **errp) } -static void ibex_timer_class_init(ObjectClass *klass, void *data) +static void ibex_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = ibex_timer_reset; + device_class_set_legacy_reset(dc, ibex_timer_reset); dc->vmsd = &vmstate_ibex_timer; dc->realize = ibex_timer_realize; device_class_set_props(dc, ibex_timer_properties); diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index bd625203aaf..6123321c352 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -427,12 +427,12 @@ static void imx_epit_dev_reset(DeviceState *dev) imx_epit_reset(s, true); } -static void imx_epit_class_init(ObjectClass *klass, void *data) +static void imx_epit_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_epit_realize; - dc->reset = imx_epit_dev_reset; + device_class_set_legacy_reset(dc, imx_epit_dev_reset); dc->vmsd = &vmstate_imx_timer_epit; dc->desc = "i.MX periodic timer"; } diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index a8edaec8673..8c7cbfdeac3 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -18,18 +18,7 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "qemu/log.h" - -#ifndef DEBUG_IMX_GPT -#define DEBUG_IMX_GPT 0 -#endif - -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_IMX_GPT) { \ - fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPT, \ - __func__, ##args); \ - } \ - } while (0) +#include "trace.h" static const char *imx_gpt_reg_name(uint32_t reg) { @@ -137,6 +126,17 @@ static const IMXClk imx7_gpt_clocks[] = { CLK_NONE, /* 111 not defined */ }; +static const IMXClk imx8mp_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz */ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_EXT, /* 011 External clock */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_HIGH, /* 101 ipg_clk_16M */ + CLK_NONE, /* 110 not defined */ + CLK_NONE, /* 111 not defined */ +}; + /* Must be called from within ptimer_transaction_begin/commit block */ static void imx_gpt_set_freq(IMXGPTState *s) { @@ -145,7 +145,7 @@ static void imx_gpt_set_freq(IMXGPTState *s) s->freq = imx_ccm_get_clock_frequency(s->ccm, s->clocks[clksrc]) / (1 + s->pr); - DPRINTF("Setting clksrc %d to frequency %d\n", clksrc, s->freq); + trace_imx_gpt_set_freq(clksrc, s->freq); if (s->freq) { ptimer_set_freq(s->timer, s->freq); @@ -317,7 +317,7 @@ static uint64_t imx_gpt_read(void *opaque, hwaddr offset, unsigned size) break; } - DPRINTF("(%s) = 0x%08x\n", imx_gpt_reg_name(offset >> 2), reg_value); + trace_imx_gpt_read(imx_gpt_reg_name(offset >> 2), reg_value); return reg_value; } @@ -384,8 +384,7 @@ static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, IMXGPTState *s = IMX_GPT(opaque); uint32_t oldreg; - DPRINTF("(%s, value = 0x%08x)\n", imx_gpt_reg_name(offset >> 2), - (uint32_t)value); + trace_imx_gpt_write(imx_gpt_reg_name(offset >> 2), (uint32_t)value); switch (offset >> 2) { case 0: @@ -485,7 +484,7 @@ static void imx_gpt_timeout(void *opaque) { IMXGPTState *s = IMX_GPT(opaque); - DPRINTF("\n"); + trace_imx_gpt_timeout(); s->sr |= s->next_int; s->next_int = 0; @@ -519,12 +518,12 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp) s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_LEGACY); } -static void imx_gpt_class_init(ObjectClass *klass, void *data) +static void imx_gpt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = imx_gpt_realize; - dc->reset = imx_gpt_reset; + device_class_set_legacy_reset(dc, imx_gpt_reset); dc->vmsd = &vmstate_imx_timer_gpt; dc->desc = "i.MX general timer"; } @@ -564,6 +563,13 @@ static void imx7_gpt_init(Object *obj) s->clocks = imx7_gpt_clocks; } +static void imx8mp_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx8mp_gpt_clocks; +} + static const TypeInfo imx25_gpt_info = { .name = TYPE_IMX25_GPT, .parent = TYPE_SYS_BUS_DEVICE, @@ -596,6 +602,12 @@ static const TypeInfo imx7_gpt_info = { .instance_init = imx7_gpt_init, }; +static const TypeInfo imx8mp_gpt_info = { + .name = TYPE_IMX8MP_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx8mp_gpt_init, +}; + static void imx_gpt_register_types(void) { type_register_static(&imx25_gpt_info); @@ -603,6 +615,7 @@ static void imx_gpt_register_types(void) type_register_static(&imx6_gpt_info); type_register_static(&imx6ul_gpt_info); type_register_static(&imx7_gpt_info); + type_register_static(&imx8mp_gpt_info); } type_init(imx_gpt_register_types) diff --git a/hw/timer/meson.build b/hw/timer/meson.build index 80427852e02..178321c029c 100644 --- a/hw/timer/meson.build +++ b/hw/timer/meson.build @@ -10,11 +10,10 @@ system_ss.add(when: 'CONFIG_CMSDK_APB_TIMER', if_true: files('cmsdk-apb-timer.c' system_ss.add(when: 'CONFIG_RENESAS_TMR', if_true: files('renesas_tmr.c')) system_ss.add(when: 'CONFIG_RENESAS_CMT', if_true: files('renesas_cmt.c')) system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-timer.c')) -system_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_timer.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c')) system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c')) system_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c')) -system_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c')) +system_ss.add(when: 'CONFIG_HPET_C', if_true: files('hpet.c')) system_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c')) system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c')) @@ -22,9 +21,7 @@ system_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gictimer.c')) system_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-timer.c')) system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_timer.c')) system_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_timer.c')) -system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gptimer.c')) -system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c')) -system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c')) +system_ss.add(when: 'CONFIG_PXA2XX_TIMER', if_true: files('pxa2xx_timer.c')) system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c')) system_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c')) system_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c')) diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c index b66aed56ead..2ce821178b6 100644 --- a/hw/timer/mss-timer.c +++ b/hw/timer/mss-timer.c @@ -279,14 +279,13 @@ static const VMStateDescription vmstate_mss_timer = { } }; -static Property mss_timer_properties[] = { +static const Property mss_timer_properties[] = { /* Libero GUI shows 100Mhz as default for clocks */ DEFINE_PROP_UINT32("clock-frequency", MSSTimerState, freq_hz, 100 * 1000000), - DEFINE_PROP_END_OF_LIST(), }; -static void mss_timer_class_init(ObjectClass *klass, void *data) +static void mss_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c index c55ba022353..6a116ad54b7 100644 --- a/hw/timer/npcm7xx_timer.c +++ b/hw/timer/npcm7xx_timer.c @@ -689,7 +689,7 @@ static const VMStateDescription vmstate_npcm7xx_timer_ctrl = { }, }; -static void npcm7xx_timer_class_init(ObjectClass *klass, void *data) +static void npcm7xx_timer_class_init(ObjectClass *klass, const void *data) { ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index a33166a8817..e228fdebc34 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -379,16 +379,15 @@ static const VMStateDescription vmstate_nrf51_timer = { } }; -static Property nrf51_timer_properties[] = { +static const Property nrf51_timer_properties[] = { DEFINE_PROP_UINT8("id", NRF51TimerState, id, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void nrf51_timer_class_init(ObjectClass *klass, void *data) +static void nrf51_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = nrf51_timer_reset; + device_class_set_legacy_reset(dc, nrf51_timer_reset); dc->vmsd = &vmstate_nrf51_timer; device_class_set_props(dc, nrf51_timer_properties); } diff --git a/hw/timer/omap_gptimer.c b/hw/timer/omap_gptimer.c deleted file mode 100644 index 34e6af7aff5..00000000000 --- a/hw/timer/omap_gptimer.c +++ /dev/null @@ -1,512 +0,0 @@ -/* - * TI OMAP2 general purpose timers emulation. - * - * Copyright (C) 2007-2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "qemu/timer.h" -#include "hw/arm/omap.h" - -/* GP timers */ -struct omap_gp_timer_s { - MemoryRegion iomem; - qemu_irq irq; - qemu_irq wkup; - qemu_irq in; - qemu_irq out; - omap_clk clk; - QEMUTimer *timer; - QEMUTimer *match; - struct omap_target_agent_s *ta; - - int in_val; - int out_val; - int64_t time; - int64_t rate; - int64_t ticks_per_sec; - - int16_t config; - int status; - int it_ena; - int wu_ena; - int enable; - int inout; - int capt2; - int pt; - enum { - gpt_trigger_none, gpt_trigger_overflow, gpt_trigger_both - } trigger; - enum { - gpt_capture_none, gpt_capture_rising, - gpt_capture_falling, gpt_capture_both - } capture; - int scpwm; - int ce; - int pre; - int ptv; - int ar; - int st; - int posted; - uint32_t val; - uint32_t load_val; - uint32_t capture_val[2]; - uint32_t match_val; - int capt_num; - - uint16_t writeh; /* LSB */ - uint16_t readh; /* MSB */ -}; - -#define GPT_TCAR_IT (1 << 2) -#define GPT_OVF_IT (1 << 1) -#define GPT_MAT_IT (1 << 0) - -static inline void omap_gp_timer_intr(struct omap_gp_timer_s *timer, int it) -{ - if (timer->it_ena & it) { - if (!timer->status) - qemu_irq_raise(timer->irq); - - timer->status |= it; - /* Or are the status bits set even when masked? - * i.e. is masking applied before or after the status register? */ - } - - if (timer->wu_ena & it) - qemu_irq_pulse(timer->wkup); -} - -static inline void omap_gp_timer_out(struct omap_gp_timer_s *timer, int level) -{ - if (!timer->inout && timer->out_val != level) { - timer->out_val = level; - qemu_set_irq(timer->out, level); - } -} - -static inline uint32_t omap_gp_timer_read(struct omap_gp_timer_s *timer) -{ - uint64_t distance; - - if (timer->st && timer->rate) { - distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time; - distance = muldiv64(distance, timer->rate, timer->ticks_per_sec); - - if (distance >= 0xffffffff - timer->val) - return 0xffffffff; - else - return timer->val + distance; - } else - return timer->val; -} - -static inline void omap_gp_timer_sync(struct omap_gp_timer_s *timer) -{ - if (timer->st) { - timer->val = omap_gp_timer_read(timer); - timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - } -} - -static inline void omap_gp_timer_update(struct omap_gp_timer_s *timer) -{ - int64_t expires, matches; - - if (timer->st && timer->rate) { - expires = muldiv64(0x100000000ll - timer->val, - timer->ticks_per_sec, timer->rate); - timer_mod(timer->timer, timer->time + expires); - - if (timer->ce && timer->match_val >= timer->val) { - matches = muldiv64(timer->ticks_per_sec, - timer->match_val - timer->val, timer->rate); - timer_mod(timer->match, timer->time + matches); - } else - timer_del(timer->match); - } else { - timer_del(timer->timer); - timer_del(timer->match); - omap_gp_timer_out(timer, timer->scpwm); - } -} - -static inline void omap_gp_timer_trigger(struct omap_gp_timer_s *timer) -{ - if (timer->pt) - /* TODO in overflow-and-match mode if the first event to - * occur is the match, don't toggle. */ - omap_gp_timer_out(timer, !timer->out_val); - else - /* TODO inverted pulse on timer->out_val == 1? */ - qemu_irq_pulse(timer->out); -} - -static void omap_gp_timer_tick(void *opaque) -{ - struct omap_gp_timer_s *timer = opaque; - - if (!timer->ar) { - timer->st = 0; - timer->val = 0; - } else { - timer->val = timer->load_val; - timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - } - - if (timer->trigger == gpt_trigger_overflow || - timer->trigger == gpt_trigger_both) - omap_gp_timer_trigger(timer); - - omap_gp_timer_intr(timer, GPT_OVF_IT); - omap_gp_timer_update(timer); -} - -static void omap_gp_timer_match(void *opaque) -{ - struct omap_gp_timer_s *timer = opaque; - - if (timer->trigger == gpt_trigger_both) - omap_gp_timer_trigger(timer); - - omap_gp_timer_intr(timer, GPT_MAT_IT); -} - -static void omap_gp_timer_input(void *opaque, int line, int on) -{ - struct omap_gp_timer_s *s = opaque; - int trigger; - - switch (s->capture) { - default: - case gpt_capture_none: - trigger = 0; - break; - case gpt_capture_rising: - trigger = !s->in_val && on; - break; - case gpt_capture_falling: - trigger = s->in_val && !on; - break; - case gpt_capture_both: - trigger = (s->in_val == !on); - break; - } - s->in_val = on; - - if (s->inout && trigger && s->capt_num < 2) { - s->capture_val[s->capt_num] = omap_gp_timer_read(s); - - if (s->capt2 == s->capt_num ++) - omap_gp_timer_intr(s, GPT_TCAR_IT); - } -} - -static void omap_gp_timer_clk_update(void *opaque, int line, int on) -{ - struct omap_gp_timer_s *timer = opaque; - - omap_gp_timer_sync(timer); - timer->rate = on ? omap_clk_getrate(timer->clk) : 0; - omap_gp_timer_update(timer); -} - -static void omap_gp_timer_clk_setup(struct omap_gp_timer_s *timer) -{ - omap_clk_adduser(timer->clk, - qemu_allocate_irq(omap_gp_timer_clk_update, timer, 0)); - timer->rate = omap_clk_getrate(timer->clk); -} - -void omap_gp_timer_reset(struct omap_gp_timer_s *s) -{ - s->config = 0x000; - s->status = 0; - s->it_ena = 0; - s->wu_ena = 0; - s->inout = 0; - s->capt2 = 0; - s->capt_num = 0; - s->pt = 0; - s->trigger = gpt_trigger_none; - s->capture = gpt_capture_none; - s->scpwm = 0; - s->ce = 0; - s->pre = 0; - s->ptv = 0; - s->ar = 0; - s->st = 0; - s->posted = 1; - s->val = 0x00000000; - s->load_val = 0x00000000; - s->capture_val[0] = 0x00000000; - s->capture_val[1] = 0x00000000; - s->match_val = 0x00000000; - omap_gp_timer_update(s); -} - -static uint32_t omap_gp_timer_readw(void *opaque, hwaddr addr) -{ - struct omap_gp_timer_s *s = opaque; - - switch (addr) { - case 0x00: /* TIDR */ - return 0x21; - - case 0x10: /* TIOCP_CFG */ - return s->config; - - case 0x14: /* TISTAT */ - /* ??? When's this bit reset? */ - return 1; /* RESETDONE */ - - case 0x18: /* TISR */ - return s->status; - - case 0x1c: /* TIER */ - return s->it_ena; - - case 0x20: /* TWER */ - return s->wu_ena; - - case 0x24: /* TCLR */ - return (s->inout << 14) | - (s->capt2 << 13) | - (s->pt << 12) | - (s->trigger << 10) | - (s->capture << 8) | - (s->scpwm << 7) | - (s->ce << 6) | - (s->pre << 5) | - (s->ptv << 2) | - (s->ar << 1) | - (s->st << 0); - - case 0x28: /* TCRR */ - return omap_gp_timer_read(s); - - case 0x2c: /* TLDR */ - return s->load_val; - - case 0x30: /* TTGR */ - return 0xffffffff; - - case 0x34: /* TWPS */ - return 0x00000000; /* No posted writes pending. */ - - case 0x38: /* TMAR */ - return s->match_val; - - case 0x3c: /* TCAR1 */ - return s->capture_val[0]; - - case 0x40: /* TSICR */ - return s->posted << 2; - - case 0x44: /* TCAR2 */ - return s->capture_val[1]; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static uint32_t omap_gp_timer_readh(void *opaque, hwaddr addr) -{ - struct omap_gp_timer_s *s = opaque; - uint32_t ret; - - if (addr & 2) - return s->readh; - else { - ret = omap_gp_timer_readw(opaque, addr); - s->readh = ret >> 16; - return ret & 0xffff; - } -} - -static void omap_gp_timer_write(void *opaque, hwaddr addr, uint32_t value) -{ - struct omap_gp_timer_s *s = opaque; - - switch (addr) { - case 0x00: /* TIDR */ - case 0x14: /* TISTAT */ - case 0x34: /* TWPS */ - case 0x3c: /* TCAR1 */ - case 0x44: /* TCAR2 */ - OMAP_RO_REG(addr); - break; - - case 0x10: /* TIOCP_CFG */ - s->config = value & 0x33d; - if (((value >> 3) & 3) == 3) /* IDLEMODE */ - fprintf(stderr, "%s: illegal IDLEMODE value in TIOCP_CFG\n", - __func__); - if (value & 2) /* SOFTRESET */ - omap_gp_timer_reset(s); - break; - - case 0x18: /* TISR */ - if (value & GPT_TCAR_IT) - s->capt_num = 0; - if (s->status && !(s->status &= ~value)) - qemu_irq_lower(s->irq); - break; - - case 0x1c: /* TIER */ - s->it_ena = value & 7; - break; - - case 0x20: /* TWER */ - s->wu_ena = value & 7; - break; - - case 0x24: /* TCLR */ - omap_gp_timer_sync(s); - s->inout = (value >> 14) & 1; - s->capt2 = (value >> 13) & 1; - s->pt = (value >> 12) & 1; - s->trigger = (value >> 10) & 3; - if (s->capture == gpt_capture_none && - ((value >> 8) & 3) != gpt_capture_none) - s->capt_num = 0; - s->capture = (value >> 8) & 3; - s->scpwm = (value >> 7) & 1; - s->ce = (value >> 6) & 1; - s->pre = (value >> 5) & 1; - s->ptv = (value >> 2) & 7; - s->ar = (value >> 1) & 1; - s->st = (value >> 0) & 1; - if (s->inout && s->trigger != gpt_trigger_none) - fprintf(stderr, "%s: GP timer pin must be an output " - "for this trigger mode\n", __func__); - if (!s->inout && s->capture != gpt_capture_none) - fprintf(stderr, "%s: GP timer pin must be an input " - "for this capture mode\n", __func__); - if (s->trigger == gpt_trigger_none) - omap_gp_timer_out(s, s->scpwm); - /* TODO: make sure this doesn't overflow 32-bits */ - s->ticks_per_sec = NANOSECONDS_PER_SECOND << (s->pre ? s->ptv + 1 : 0); - omap_gp_timer_update(s); - break; - - case 0x28: /* TCRR */ - s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - s->val = value; - omap_gp_timer_update(s); - break; - - case 0x2c: /* TLDR */ - s->load_val = value; - break; - - case 0x30: /* TTGR */ - s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - s->val = s->load_val; - omap_gp_timer_update(s); - break; - - case 0x38: /* TMAR */ - omap_gp_timer_sync(s); - s->match_val = value; - omap_gp_timer_update(s); - break; - - case 0x40: /* TSICR */ - s->posted = (value >> 2) & 1; - if (value & 2) /* How much exactly are we supposed to reset? */ - omap_gp_timer_reset(s); - break; - - default: - OMAP_BAD_REG(addr); - } -} - -static void omap_gp_timer_writeh(void *opaque, hwaddr addr, uint32_t value) -{ - struct omap_gp_timer_s *s = opaque; - - if (addr & 2) - omap_gp_timer_write(opaque, addr, (value << 16) | s->writeh); - else - s->writeh = (uint16_t) value; -} - -static uint64_t omap_gp_timer_readfn(void *opaque, hwaddr addr, - unsigned size) -{ - switch (size) { - case 1: - return omap_badwidth_read32(opaque, addr); - case 2: - return omap_gp_timer_readh(opaque, addr); - case 4: - return omap_gp_timer_readw(opaque, addr); - default: - g_assert_not_reached(); - } -} - -static void omap_gp_timer_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - switch (size) { - case 1: - omap_badwidth_write32(opaque, addr, value); - break; - case 2: - omap_gp_timer_writeh(opaque, addr, value); - break; - case 4: - omap_gp_timer_write(opaque, addr, value); - break; - default: - g_assert_not_reached(); - } -} - -static const MemoryRegionOps omap_gp_timer_ops = { - .read = omap_gp_timer_readfn, - .write = omap_gp_timer_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta, - qemu_irq irq, omap_clk fclk, omap_clk iclk) -{ - struct omap_gp_timer_s *s = g_new0(struct omap_gp_timer_s, 1); - - s->ta = ta; - s->irq = irq; - s->clk = fclk; - s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_tick, s); - s->match = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_match, s); - s->in = qemu_allocate_irq(omap_gp_timer_input, s, 0); - omap_gp_timer_reset(s); - omap_gp_timer_clk_setup(s); - - memory_region_init_io(&s->iomem, NULL, &omap_gp_timer_ops, s, "omap.gptimer", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - return s; -} diff --git a/hw/timer/omap_synctimer.c b/hw/timer/omap_synctimer.c deleted file mode 100644 index d93a9344ede..00000000000 --- a/hw/timer/omap_synctimer.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * TI OMAP2 32kHz sync timer emulation. - * - * Copyright (C) 2007-2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) any later version of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ -#include "qemu/osdep.h" -#include "qemu/timer.h" -#include "hw/arm/omap.h" -struct omap_synctimer_s { - MemoryRegion iomem; - uint32_t val; - uint16_t readh; -}; - -/* 32-kHz Sync Timer of the OMAP2 */ -static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) { - return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000, - NANOSECONDS_PER_SECOND); -} - -void omap_synctimer_reset(struct omap_synctimer_s *s) -{ - s->val = omap_synctimer_read(s); -} - -static uint32_t omap_synctimer_readw(void *opaque, hwaddr addr) -{ - struct omap_synctimer_s *s = opaque; - - switch (addr) { - case 0x00: /* 32KSYNCNT_REV */ - return 0x21; - - case 0x10: /* CR */ - return omap_synctimer_read(s) - s->val; - } - - OMAP_BAD_REG(addr); - return 0; -} - -static uint32_t omap_synctimer_readh(void *opaque, hwaddr addr) -{ - struct omap_synctimer_s *s = opaque; - uint32_t ret; - - if (addr & 2) - return s->readh; - else { - ret = omap_synctimer_readw(opaque, addr); - s->readh = ret >> 16; - return ret & 0xffff; - } -} - -static uint64_t omap_synctimer_readfn(void *opaque, hwaddr addr, - unsigned size) -{ - switch (size) { - case 1: - return omap_badwidth_read32(opaque, addr); - case 2: - return omap_synctimer_readh(opaque, addr); - case 4: - return omap_synctimer_readw(opaque, addr); - default: - g_assert_not_reached(); - } -} - -static void omap_synctimer_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - OMAP_BAD_REG(addr); -} - -static const MemoryRegionOps omap_synctimer_ops = { - .read = omap_synctimer_readfn, - .write = omap_synctimer_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta, - struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk) -{ - struct omap_synctimer_s *s = g_malloc0(sizeof(*s)); - - omap_synctimer_reset(s); - memory_region_init_io(&s->iomem, NULL, &omap_synctimer_ops, s, "omap.synctimer", - omap_l4_region_size(ta, 0)); - omap_l4_attach(ta, 0, &s->iomem); - - return s; -} diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c index 6479ab1a8b3..6d4ac31574e 100644 --- a/hw/timer/pxa2xx_timer.c +++ b/hw/timer/pxa2xx_timer.c @@ -11,51 +11,49 @@ #include "hw/irq.h" #include "hw/qdev-properties.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "hw/arm/pxa.h" +#include "system/runstate.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" -#include "sysemu/watchdog.h" - -#define OSMR0 0x00 -#define OSMR1 0x04 -#define OSMR2 0x08 -#define OSMR3 0x0c -#define OSMR4 0x80 -#define OSMR5 0x84 -#define OSMR6 0x88 -#define OSMR7 0x8c -#define OSMR8 0x90 -#define OSMR9 0x94 -#define OSMR10 0x98 -#define OSMR11 0x9c -#define OSCR 0x10 /* OS Timer Count */ -#define OSCR4 0x40 -#define OSCR5 0x44 -#define OSCR6 0x48 -#define OSCR7 0x4c -#define OSCR8 0x50 -#define OSCR9 0x54 -#define OSCR10 0x58 -#define OSCR11 0x5c -#define OSSR 0x14 /* Timer status register */ -#define OWER 0x18 -#define OIER 0x1c /* Interrupt enable register 3-0 to E3-E0 */ -#define OMCR4 0xc0 /* OS Match Control registers */ -#define OMCR5 0xc4 -#define OMCR6 0xc8 -#define OMCR7 0xcc -#define OMCR8 0xd0 -#define OMCR9 0xd4 -#define OMCR10 0xd8 -#define OMCR11 0xdc -#define OSNR 0x20 - -#define PXA25X_FREQ 3686400 /* 3.6864 MHz */ -#define PXA27X_FREQ 3250000 /* 3.25 MHz */ +#include "system/watchdog.h" + +#define OSMR0 0x00 +#define OSMR1 0x04 +#define OSMR2 0x08 +#define OSMR3 0x0c +#define OSMR4 0x80 +#define OSMR5 0x84 +#define OSMR6 0x88 +#define OSMR7 0x8c +#define OSMR8 0x90 +#define OSMR9 0x94 +#define OSMR10 0x98 +#define OSMR11 0x9c +#define OSCR 0x10 /* OS Timer Count */ +#define OSCR4 0x40 +#define OSCR5 0x44 +#define OSCR6 0x48 +#define OSCR7 0x4c +#define OSCR8 0x50 +#define OSCR9 0x54 +#define OSCR10 0x58 +#define OSCR11 0x5c +#define OSSR 0x14 /* Timer status register */ +#define OWER 0x18 +#define OIER 0x1c /* Interrupt enable register 3-0 to E3-E0 */ +#define OMCR4 0xc0 /* OS Match Control registers */ +#define OMCR5 0xc4 +#define OMCR6 0xc8 +#define OMCR7 0xcc +#define OMCR8 0xd0 +#define OMCR9 0xd4 +#define OMCR10 0xd8 +#define OMCR11 0xdc +#define OSNR 0x20 + +#define PXA25X_FREQ 3686400 /* 3.6864 MHz */ static int pxa2xx_timer4_freq[8] = { [0] = 0, @@ -108,7 +106,7 @@ struct PXA2xxTimerInfo { PXA2xxTimer4 tm4[8]; }; -#define PXA2XX_TIMER_HAVE_TM4 0 +#define PXA2XX_TIMER_HAVE_TM4 0 static inline int pxa2xx_timer_has_tm4(PXA2xxTimerInfo *s) { @@ -232,7 +230,7 @@ static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset, NANOSECONDS_PER_SECOND); case OIER: return s->irq_enabled; - case OSSR: /* Status register */ + case OSSR: /* Status register */ return s->events; case OWER: return s->reset3; @@ -338,7 +336,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset, case OIER: s->irq_enabled = value & 0xfff; break; - case OSSR: /* Status register */ + case OSSR: /* Status register */ value &= s->events; s->events &= ~value; for (i = 0; i < 4; i ++, value >>= 1) @@ -347,7 +345,7 @@ static void pxa2xx_timer_write(void *opaque, hwaddr offset, if (pxa2xx_timer_has_tm4(s) && !(s->events & 0xff0) && value) qemu_irq_lower(s->irq4); break; - case OWER: /* XXX: Reset on OSMR3 match? */ + case OWER: /* XXX: Reset on OSMR3 match? */ s->reset3 = value; break; case OMCR7: tm ++; @@ -551,14 +549,13 @@ static const VMStateDescription vmstate_pxa2xx_timer_regs = { } }; -static Property pxa25x_timer_dev_properties[] = { +static const Property pxa25x_timer_dev_properties[] = { DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA25X_FREQ), DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags, PXA2XX_TIMER_HAVE_TM4, false), - DEFINE_PROP_END_OF_LIST(), }; -static void pxa25x_timer_dev_class_init(ObjectClass *klass, void *data) +static void pxa25x_timer_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -573,29 +570,7 @@ static const TypeInfo pxa25x_timer_dev_info = { .class_init = pxa25x_timer_dev_class_init, }; -static Property pxa27x_timer_dev_properties[] = { - DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA27X_FREQ), - DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags, - PXA2XX_TIMER_HAVE_TM4, true), - DEFINE_PROP_END_OF_LIST(), -}; - -static void pxa27x_timer_dev_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->desc = "PXA27x timer"; - device_class_set_props(dc, pxa27x_timer_dev_properties); -} - -static const TypeInfo pxa27x_timer_dev_info = { - .name = "pxa27x-timer", - .parent = TYPE_PXA2XX_TIMER, - .instance_size = sizeof(PXA2xxTimerInfo), - .class_init = pxa27x_timer_dev_class_init, -}; - -static void pxa2xx_timer_class_init(ObjectClass *oc, void *data) +static void pxa2xx_timer_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -616,7 +591,6 @@ static void pxa2xx_timer_register_types(void) { type_register_static(&pxa2xx_timer_type_info); type_register_static(&pxa25x_timer_dev_info); - type_register_static(&pxa27x_timer_dev_info); } type_init(pxa2xx_timer_register_types) diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c index 08832932d2a..cdff7f47f16 100644 --- a/hw/timer/renesas_cmt.c +++ b/hw/timer/renesas_cmt.c @@ -253,17 +253,16 @@ static const VMStateDescription vmstate_rcmt = { } }; -static Property rcmt_properties[] = { +static const Property rcmt_properties[] = { DEFINE_PROP_UINT64("input-freq", RCMTState, input_freq, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void rcmt_class_init(ObjectClass *klass, void *data) +static void rcmt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_rcmt; - dc->reset = rcmt_reset; + device_class_set_legacy_reset(dc, rcmt_reset); device_class_set_props(dc, rcmt_properties); } diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index 1d47d0615a4..95707f2b8c5 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -463,17 +463,16 @@ static const VMStateDescription vmstate_rtmr = { } }; -static Property rtmr_properties[] = { +static const Property rtmr_properties[] = { DEFINE_PROP_UINT64("input-freq", RTMRState, input_freq, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void rtmr_class_init(ObjectClass *klass, void *data) +static void rtmr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_rtmr; - dc->reset = rtmr_reset; + device_class_set_legacy_reset(dc, rtmr_reset); device_class_set_props(dc, rtmr_properties); } diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c index 77889397669..d4fa32c9d65 100644 --- a/hw/timer/sh_timer.c +++ b/hw/timer/sh_timer.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/log.h" #include "hw/irq.h" #include "hw/sh4/sh.h" diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c index e8610c37dd3..e85e389f7a5 100644 --- a/hw/timer/sifive_pwm.c +++ b/hw/timer/sifive_pwm.c @@ -404,11 +404,10 @@ static const VMStateDescription vmstate_sifive_pwm = { } }; -static Property sifive_pwm_properties[] = { +static const Property sifive_pwm_properties[] = { /* 0.5Ghz per spec after FSBL */ DEFINE_PROP_UINT64("clock-frequency", struct SiFivePwmState, freq_hz, 500000000ULL), - DEFINE_PROP_END_OF_LIST(), }; static void sifive_pwm_init(Object *obj) @@ -442,11 +441,11 @@ static void sifive_pwm_realize(DeviceState *dev, Error **errp) sifive_pwm_interrupt_3, s); } -static void sifive_pwm_class_init(ObjectClass *klass, void *data) +static void sifive_pwm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = sifive_pwm_reset; + device_class_set_legacy_reset(dc, sifive_pwm_reset); device_class_set_props(dc, sifive_pwm_properties); dc->vmsd = &vmstate_sifive_pwm; dc->realize = sifive_pwm_realize; diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c index 5507b0145b5..3e071fbdb4c 100644 --- a/hw/timer/slavio_timer.c +++ b/hw/timer/slavio_timer.c @@ -420,16 +420,15 @@ static void slavio_timer_init(Object *obj) } } -static Property slavio_timer_properties[] = { +static const Property slavio_timer_properties[] = { DEFINE_PROP_UINT32("num_cpus", SLAVIO_TIMERState, num_cpus, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void slavio_timer_class_init(ObjectClass *klass, void *data) +static void slavio_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = slavio_timer_reset; + device_class_set_legacy_reset(dc, slavio_timer_reset); dc->vmsd = &vmstate_slavio_timer; device_class_set_props(dc, slavio_timer_properties); } diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c index daceedf964e..31f77acf61e 100644 --- a/hw/timer/sse-counter.c +++ b/hw/timer/sse-counter.c @@ -448,13 +448,13 @@ static const VMStateDescription sse_counter_vmstate = { } }; -static void sse_counter_class_init(ObjectClass *klass, void *data) +static void sse_counter_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sse_counter_realize; dc->vmsd = &sse_counter_vmstate; - dc->reset = sse_counter_reset; + device_class_set_legacy_reset(dc, sse_counter_reset); } static const TypeInfo sse_counter_info = { diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c index cb20a9eb79e..866d5eef8af 100644 --- a/hw/timer/sse-timer.c +++ b/hw/timer/sse-timer.c @@ -440,18 +440,17 @@ static const VMStateDescription sse_timer_vmstate = { } }; -static Property sse_timer_properties[] = { +static const Property sse_timer_properties[] = { DEFINE_PROP_LINK("counter", SSETimer, counter, TYPE_SSE_COUNTER, SSECounter *), - DEFINE_PROP_END_OF_LIST(), }; -static void sse_timer_class_init(ObjectClass *klass, void *data) +static void sse_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = sse_timer_realize; dc->vmsd = &sse_timer_vmstate; - dc->reset = sse_timer_reset; + device_class_set_legacy_reset(dc, sse_timer_reset); device_class_set_props(dc, sse_timer_properties); } diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c index f28958cefca..d97b2f8309c 100644 --- a/hw/timer/stellaris-gptm.c +++ b/hw/timer/stellaris-gptm.c @@ -308,7 +308,7 @@ static void stellaris_gptm_realize(DeviceState *dev, Error **errp) s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]); } -static void stellaris_gptm_class_init(ObjectClass *klass, void *data) +static void stellaris_gptm_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c index de4208b1a61..be844e7f5a3 100644 --- a/hw/timer/stm32f2xx_timer.c +++ b/hw/timer/stm32f2xx_timer.c @@ -298,10 +298,9 @@ static const VMStateDescription vmstate_stm32f2xx_timer = { } }; -static Property stm32f2xx_timer_properties[] = { +static const Property stm32f2xx_timer_properties[] = { DEFINE_PROP_UINT64("clock-frequency", struct STM32F2XXTimerState, freq_hz, 1000000000), - DEFINE_PROP_END_OF_LIST(), }; static void stm32f2xx_timer_init(Object *obj) @@ -321,11 +320,11 @@ static void stm32f2xx_timer_realize(DeviceState *dev, Error **errp) s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s); } -static void stm32f2xx_timer_class_init(ObjectClass *klass, void *data) +static void stm32f2xx_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = stm32f2xx_timer_reset; + device_class_set_legacy_reset(dc, stm32f2xx_timer_reset); device_class_set_props(dc, stm32f2xx_timer_properties); dc->vmsd = &vmstate_stm32f2xx_timer; dc->realize = stm32f2xx_timer_realize; diff --git a/hw/timer/trace-events b/hw/timer/trace-events index f48a712801e..c5b6db49f58 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -31,7 +31,7 @@ aspeed_timer_ctrl_overflow_interrupt(uint8_t i, bool enable) "Timer %" PRIu8 ": aspeed_timer_ctrl_pulse_enable(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" aspeed_timer_set_ctrl2(uint32_t value) "Value: 0x%" PRIx32 aspeed_timer_set_value(int timer, int reg, uint32_t value) "Timer %d register %d: 0x%" PRIx32 -aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRIx64 ": of size %u: 0x%" PRIx64 +aspeed_timer_read(uint64_t offset, uint64_t value) "From 0x%" PRIx64 ": 0x%" PRIx64 # armv7m_systick.c systick_reload(void) "systick reload" @@ -49,6 +49,12 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" +# imx_gpt.c +imx_gpt_set_freq(uint32_t clksrc, uint32_t freq) "Setting clksrc %u to %u Hz" +imx_gpt_read(const char *name, uint64_t value) "%s -> 0x%08" PRIx64 +imx_gpt_write(const char *name, uint64_t value) "%s <- 0x%08" PRIx64 +imx_gpt_timeout(void) "" + # npcm7xx_timer.c npcm7xx_timer_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 npcm7xx_timer_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c index 32a9df69e0b..ff4a224d08f 100644 --- a/hw/timer/xilinx_timer.c +++ b/hw/timer/xilinx_timer.c @@ -3,6 +3,9 @@ * * Copyright (c) 2009 Edgar E. Iglesias. * + * DS573: https://docs.amd.com/v/u/en-US/xps_timer + * LogiCORE IP XPS Timer/Counter (v1.02a) + * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights @@ -23,10 +26,12 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/ptimer.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "qemu/log.h" #include "qemu/module.h" #include "qom/object.h" @@ -69,6 +74,7 @@ struct XpsTimerState { SysBusDevice parent_obj; + EndianMode model_endianness; MemoryRegion mmio; qemu_irq irq; uint8_t one_timer_only; @@ -189,14 +195,21 @@ timer_write(void *opaque, hwaddr addr, timer_update_irq(t); } -static const MemoryRegionOps timer_ops = { - .read = timer_read, - .write = timer_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } +static const MemoryRegionOps timer_ops[2] = { + [0 ... 1] = { + .read = timer_read, + .write = timer_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + }, + [0].endianness = DEVICE_LITTLE_ENDIAN, + [1].endianness = DEVICE_BIG_ENDIAN, }; static void timer_hit(void *opaque) @@ -216,6 +229,12 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp) XpsTimerState *t = XILINX_TIMER(dev); unsigned int i; + if (t->model_endianness == ENDIAN_MODE_UNSPECIFIED) { + error_setg(errp, TYPE_XILINX_TIMER " property 'endianness'" + " must be set to 'big' or 'little'"); + return; + } + /* Init all the ptimers. */ t->timers = g_malloc0(sizeof t->timers[0] * num_timers(t)); for (i = 0; i < num_timers(t); i++) { @@ -229,8 +248,9 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp) ptimer_transaction_commit(xt->ptimer); } - memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, "xlnx.xps-timer", - R_MAX * 4 * num_timers(t)); + memory_region_init_io(&t->mmio, OBJECT(t), + &timer_ops[t->model_endianness == ENDIAN_MODE_BIG], + t, "xlnx.xps-timer", R_MAX * 4 * num_timers(t)); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &t->mmio); } @@ -242,13 +262,13 @@ static void xilinx_timer_init(Object *obj) sysbus_init_irq(SYS_BUS_DEVICE(obj), &t->irq); } -static Property xilinx_timer_properties[] = { +static const Property xilinx_timer_properties[] = { + DEFINE_PROP_ENDIAN_NODEFAULT("endianness", XpsTimerState, model_endianness), DEFINE_PROP_UINT32("clock-frequency", XpsTimerState, freq_hz, 62 * 1000000), DEFINE_PROP_UINT8("one-timer-only", XpsTimerState, one_timer_only, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void xilinx_timer_class_init(ObjectClass *klass, void *data) +static void xilinx_timer_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index 5cd5a2533b8..bc7a78f898c 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -18,15 +18,15 @@ #include "qemu/module.h" #include "qapi/error.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/qdev-properties.h" #include "hw/pci/pci_ids.h" #include "hw/acpi/tpm.h" #include "migration/vmstate.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" -#include "sysemu/reset.h" -#include "sysemu/xen.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" +#include "system/reset.h" +#include "system/xen.h" #include "tpm_prop.h" #include "tpm_ppi.h" #include "trace.h" @@ -226,10 +226,9 @@ static const VMStateDescription vmstate_tpm_crb = { } }; -static Property tpm_crb_properties[] = { +static const Property tpm_crb_properties[] = { DEFINE_PROP_TPMBE("tpmdev", CRBState, tpmbe), DEFINE_PROP_BOOL("ppi", CRBState, ppi_enabled, true), - DEFINE_PROP_END_OF_LIST(), }; static void tpm_crb_reset(void *dev) @@ -316,7 +315,7 @@ static void tpm_crb_realize(DeviceState *dev, Error **errp) } } -static void tpm_crb_class_init(ObjectClass *klass, void *data) +static void tpm_crb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); TPMIfClass *tc = TPM_IF_CLASS(klass); @@ -338,7 +337,7 @@ static const TypeInfo tpm_crb_info = { .parent = TYPE_DEVICE, .instance_size = sizeof(CRBState), .class_init = tpm_crb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c index f27ed6c35ed..984d3d10808 100644 --- a/hw/tpm/tpm_ppi.c +++ b/hw/tpm/tpm_ppi.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qemu/memalign.h" #include "qapi/error.h" -#include "sysemu/memory_mapping.h" +#include "system/memory_mapping.h" #include "migration/vmstate.h" #include "hw/qdev-core.h" #include "hw/acpi/tpm.h" diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h index bf5d4a300f0..88f316ee957 100644 --- a/hw/tpm/tpm_ppi.h +++ b/hw/tpm/tpm_ppi.h @@ -12,7 +12,7 @@ #ifndef TPM_TPM_PPI_H #define TPM_TPM_PPI_H -#include "exec/memory.h" +#include "system/memory.h" typedef struct TPMPPI { MemoryRegion ram; diff --git a/hw/tpm/tpm_prop.h b/hw/tpm/tpm_prop.h index bbd4225d667..c4df74805a7 100644 --- a/hw/tpm/tpm_prop.h +++ b/hw/tpm/tpm_prop.h @@ -22,7 +22,7 @@ #ifndef HW_TPM_PROP_H #define HW_TPM_PROP_H -#include "sysemu/tpm_backend.h" +#include "system/tpm_backend.h" #include "hw/qdev-properties.h" extern const PropertyInfo qdev_prop_tpm; diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c index e084e987e6e..ea608ba4c80 100644 --- a/hw/tpm/tpm_spapr.c +++ b/hw/tpm/tpm_spapr.c @@ -19,8 +19,8 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" #include "tpm_prop.h" #include "hw/ppc/spapr.h" @@ -206,7 +206,6 @@ static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) break; default: g_assert_not_reached(); - break; } trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data)); spapr_tpm_send_crq(dev, &local_crq); @@ -365,10 +364,9 @@ static const VMStateDescription vmstate_spapr_vtpm = { } }; -static Property tpm_spapr_properties[] = { +static const Property tpm_spapr_properties[] = { DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev), DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver), - DEFINE_PROP_END_OF_LIST(), }; static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp) @@ -389,7 +387,7 @@ static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp) s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX); } -static void tpm_spapr_class_init(ObjectClass *klass, void *data) +static void tpm_spapr_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); @@ -416,7 +414,7 @@ static const TypeInfo tpm_spapr_info = { .parent = TYPE_VIO_SPAPR_DEVICE, .instance_size = sizeof(SpaprTpmState), .class_init = tpm_spapr_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } diff --git a/hw/tpm/tpm_tis.h b/hw/tpm/tpm_tis.h index 6f14896b979..184632ff66b 100644 --- a/hw/tpm/tpm_tis.h +++ b/hw/tpm/tpm_tis.h @@ -24,7 +24,7 @@ #ifndef TPM_TPM_TIS_H #define TPM_TPM_TIS_H -#include "sysemu/tpm_backend.h" +#include "system/tpm_backend.h" #include "tpm_ppi.h" #define TPM_TIS_NUM_LOCALITIES 5 /* per spec */ diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index 1bfa28bfd95..cdd0df11374 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -34,8 +34,8 @@ #include "hw/pci/pci_ids.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm_util.h" +#include "system/tpm_backend.h" +#include "system/tpm_util.h" #include "tpm_ppi.h" #include "trace.h" diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c index 4bb09655b40..5ce84dc7a45 100644 --- a/hw/tpm/tpm_tis_i2c.c +++ b/hw/tpm/tpm_tis_i2c.c @@ -211,8 +211,6 @@ static inline void tpm_tis_i2c_clear_data(TPMStateI2C *i2cst) i2cst->tis_addr = 0xffffffff; i2cst->reg_name = NULL; memset(i2cst->data, 0, sizeof(i2cst->data)); - - return; } /* Send data to TPM */ @@ -281,8 +279,6 @@ static inline void tpm_tis_i2c_tpm_send(TPMStateI2C *i2cst) tpm_tis_i2c_clear_data(i2cst); } - - return; } /* Callback from TPM to indicate that response is copied */ @@ -491,9 +487,8 @@ static int tpm_tis_i2c_send(I2CSlave *i2c, uint8_t data) return 1; } -static Property tpm_tis_i2c_properties[] = { +static const Property tpm_tis_i2c_properties[] = { DEFINE_PROP_TPMBE("tpmdev", TPMStateI2C, state.be_driver), - DEFINE_PROP_END_OF_LIST(), }; static void tpm_tis_i2c_realizefn(DeviceState *dev, Error **errp) @@ -531,14 +526,14 @@ static void tpm_tis_i2c_reset(DeviceState *dev) return tpm_tis_reset(s); } -static void tpm_tis_i2c_class_init(ObjectClass *klass, void *data) +static void tpm_tis_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); TPMIfClass *tc = TPM_IF_CLASS(klass); dc->realize = tpm_tis_i2c_realizefn; - dc->reset = tpm_tis_i2c_reset; + device_class_set_legacy_reset(dc, tpm_tis_i2c_reset); dc->vmsd = &vmstate_tpm_tis_i2c; device_class_set_props(dc, tpm_tis_i2c_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -557,7 +552,7 @@ static const TypeInfo tpm_tis_i2c_info = { .parent = TYPE_I2C_SLAVE, .instance_size = sizeof(TPMStateI2C), .class_init = tpm_tis_i2c_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c index 8887b3c9c49..dce83057a94 100644 --- a/hw/tpm/tpm_tis_isa.c +++ b/hw/tpm/tpm_tis_isa.c @@ -91,11 +91,10 @@ static void tpm_tis_isa_reset(DeviceState *dev) return tpm_tis_reset(s); } -static Property tpm_tis_isa_properties[] = { +static const Property tpm_tis_isa_properties[] = { DEFINE_PROP_UINT32("irq", TPMStateISA, state.irq_num, TPM_TIS_IRQ), DEFINE_PROP_TPMBE("tpmdev", TPMStateISA, state.be_driver), DEFINE_PROP_BOOL("ppi", TPMStateISA, state.ppi_enabled, true), - DEFINE_PROP_END_OF_LIST(), }; static void tpm_tis_isa_initfn(Object *obj) @@ -167,7 +166,7 @@ static void build_tpm_tis_isa_aml(AcpiDevAmlIf *adev, Aml *scope) aml_append(scope, dev); } -static void tpm_tis_isa_class_init(ObjectClass *klass, void *data) +static void tpm_tis_isa_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); TPMIfClass *tc = TPM_IF_CLASS(klass); @@ -177,7 +176,7 @@ static void tpm_tis_isa_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_tpm_tis_isa; tc->model = TPM_MODEL_TPM_TIS; dc->realize = tpm_tis_isa_realizefn; - dc->reset = tpm_tis_isa_reset; + device_class_set_legacy_reset(dc, tpm_tis_isa_reset); tc->request_completed = tpm_tis_isa_request_completed; tc->get_version = tpm_tis_isa_get_tpm_version; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -190,7 +189,7 @@ static const TypeInfo tpm_tis_isa_info = { .instance_size = sizeof(TPMStateISA), .instance_init = tpm_tis_isa_initfn, .class_init = tpm_tis_isa_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { TYPE_ACPI_DEV_AML_IF }, { } diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c index 941f7f7f62c..2ffa85852ac 100644 --- a/hw/tpm/tpm_tis_sysbus.c +++ b/hw/tpm/tpm_tis_sysbus.c @@ -90,10 +90,9 @@ static void tpm_tis_sysbus_reset(DeviceState *dev) return tpm_tis_reset(s); } -static Property tpm_tis_sysbus_properties[] = { +static const Property tpm_tis_sysbus_properties[] = { DEFINE_PROP_UINT32("irq", TPMStateSysBus, state.irq_num, TPM_TIS_IRQ), DEFINE_PROP_TPMBE("tpmdev", TPMStateSysBus, state.be_driver), - DEFINE_PROP_END_OF_LIST(), }; static void tpm_tis_sysbus_initfn(Object *obj) @@ -125,7 +124,7 @@ static void tpm_tis_sysbus_realizefn(DeviceState *dev, Error **errp) } } -static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data) +static void tpm_tis_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); TPMIfClass *tc = TPM_IF_CLASS(klass); @@ -134,8 +133,7 @@ static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_tpm_tis_sysbus; tc->model = TPM_MODEL_TPM_TIS; dc->realize = tpm_tis_sysbus_realizefn; - dc->user_creatable = true; - dc->reset = tpm_tis_sysbus_reset; + device_class_set_legacy_reset(dc, tpm_tis_sysbus_reset); tc->request_completed = tpm_tis_sysbus_request_completed; tc->get_version = tpm_tis_sysbus_get_tpm_version; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -143,11 +141,11 @@ static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data) static const TypeInfo tpm_tis_sysbus_info = { .name = TYPE_TPM_TIS_SYSBUS, - .parent = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, .instance_size = sizeof(TPMStateSysBus), .instance_init = tpm_tis_sysbus_initfn, .class_init = tpm_tis_sysbus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } diff --git a/hw/tricore/tc27x_soc.c b/hw/tricore/tc27x_soc.c index ecd92717b50..f3b84980e52 100644 --- a/hw/tricore/tc27x_soc.c +++ b/hw/tricore/tc27x_soc.c @@ -201,19 +201,14 @@ static void tc27x_soc_init(Object *obj) object_initialize_child(obj, "tc27x", &s->cpu, sc->cpu_type); } -static Property tc27x_soc_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - -static void tc27x_soc_class_init(ObjectClass *klass, void *data) +static void tc27x_soc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = tc27x_soc_realize; - device_class_set_props(dc, tc27x_soc_properties); } -static void tc277d_soc_class_init(ObjectClass *oc, void *data) +static void tc277d_soc_class_init(ObjectClass *oc, const void *data) { TC27XSoCClass *sc = TC27X_SOC_CLASS(oc); diff --git a/hw/tricore/triboard.c b/hw/tricore/triboard.c index 4dba0259cd3..cb45b01d2d2 100644 --- a/hw/tricore/triboard.c +++ b/hw/tricore/triboard.c @@ -31,22 +31,20 @@ #include "hw/tricore/triboard.h" #include "hw/tricore/tc27x_soc.h" -static void tricore_load_kernel(const char *kernel_filename) +static void tricore_load_kernel(TriCoreCPU *cpu, const char *kernel_filename) { uint64_t entry; long kernel_size; - TriCoreCPU *cpu; CPUTriCoreState *env; kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &entry, NULL, - NULL, NULL, 0, + NULL, NULL, ELFDATA2LSB, EM_TRICORE, 1, 0); if (kernel_size <= 0) { error_report("no kernel file '%s'", kernel_filename); exit(1); } - cpu = TRICORE_CPU(first_cpu); env = &cpu->env; env->PC = entry; } @@ -62,12 +60,12 @@ static void triboard_machine_init(MachineState *machine) sysbus_realize(SYS_BUS_DEVICE(&ms->tc27x_soc), &error_fatal); if (machine->kernel_filename) { - tricore_load_kernel(machine->kernel_filename); + tricore_load_kernel(&ms->tc27x_soc.cpu, machine->kernel_filename); } } static void triboard_machine_tc277d_class_init(ObjectClass *oc, - void *data) + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); TriBoardMachineClass *amc = TRIBOARD_MACHINE_CLASS(oc); diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c index c29db8b451c..3facfdfd611 100644 --- a/hw/tricore/tricore_testboard.c +++ b/hw/tricore/tricore_testboard.c @@ -42,7 +42,7 @@ static void tricore_load_kernel(CPUTriCoreState *env) kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL, NULL, NULL, &entry, NULL, - NULL, NULL, 0, + NULL, NULL, ELFDATA2LSB, EM_TRICORE, 1, 0); if (kernel_size <= 0) { error_report("no kernel file '%s'", diff --git a/hw/tricore/tricore_testdevice.c b/hw/tricore/tricore_testdevice.c index 9028d970b00..e8daf952982 100644 --- a/hw/tricore/tricore_testdevice.c +++ b/hw/tricore/tricore_testdevice.c @@ -47,7 +47,7 @@ static const MemoryRegionOps tricore_testdevice_ops = { .min_access_size = 4, .max_access_size = 4, }, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void tricore_testdevice_init(Object *obj) @@ -58,16 +58,11 @@ static void tricore_testdevice_init(Object *obj) "tricore_testdevice", 0x4); } -static Property tricore_testdevice_properties[] = { - DEFINE_PROP_END_OF_LIST() -}; - -static void tricore_testdevice_class_init(ObjectClass *klass, void *data) +static void tricore_testdevice_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, tricore_testdevice_properties); - dc->reset = tricore_testdevice_reset; + device_class_set_legacy_reset(dc, tricore_testdevice_reset); } static const TypeInfo tricore_testdevice_info = { diff --git a/hw/uefi/Kconfig b/hw/uefi/Kconfig new file mode 100644 index 00000000000..046d55320e1 --- /dev/null +++ b/hw/uefi/Kconfig @@ -0,0 +1,3 @@ +config UEFI_VARS + bool + default y if X86_64 || AARCH64 || RISCV64 || LOONGARCH64 diff --git a/hw/uefi/LIMITATIONS.md b/hw/uefi/LIMITATIONS.md new file mode 100644 index 00000000000..29308bd587a --- /dev/null +++ b/hw/uefi/LIMITATIONS.md @@ -0,0 +1,7 @@ +known issues and limitations +---------------------------- + +* works only on little endian hosts + - accessing structs in guest ram is done without endian conversion. +* works only for 64-bit guests + - UINTN is mapped to uint64_t, for 32-bit guests that would be uint32_t diff --git a/hw/uefi/hardware-info.c b/hw/uefi/hardware-info.c new file mode 100644 index 00000000000..930502a4df3 --- /dev/null +++ b/hw/uefi/hardware-info.c @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * pass hardware information to uefi + * + * see OvmfPkg/Library/HardwareInfoLib/ in edk2 + */ + +#include "qemu/osdep.h" + +#include "hw/nvram/fw_cfg.h" +#include "hw/uefi/hardware-info.h" + +static void *blob; +static uint64_t blobsize; + +void hardware_info_register(HARDWARE_INFO_TYPE type, void *info, uint64_t infosize) +{ + HARDWARE_INFO_HEADER hdr = { + .type.value = cpu_to_le64(type), + .size = cpu_to_le64(infosize), + }; + + blob = g_realloc(blob, blobsize + sizeof(hdr) + infosize); + memcpy(blob + blobsize, &hdr, sizeof(hdr)); + blobsize += sizeof(hdr); + memcpy(blob + blobsize, info, infosize); + blobsize += infosize; + + fw_cfg_modify_file(fw_cfg_find(), "etc/hardware-info", blob, blobsize); +} diff --git a/hw/uefi/meson.build b/hw/uefi/meson.build new file mode 100644 index 00000000000..91eb95f89e6 --- /dev/null +++ b/hw/uefi/meson.build @@ -0,0 +1,21 @@ +system_ss.add(files('hardware-info.c')) + +uefi_vars_ss = ss.source_set() +if (config_all_devices.has_key('CONFIG_UEFI_VARS')) + uefi_vars_ss.add(files('var-service-core.c', + 'var-service-json.c', + 'var-service-vars.c', + 'var-service-auth.c', + 'var-service-guid.c', + 'var-service-utils.c', + 'var-service-policy.c', + 'var-service-sysbus.c')) + uefi_vars_ss.add(when: gnutls, + if_true: files('var-service-pkcs7.c'), + if_false: files('var-service-pkcs7-stub.c')) + uefi_vars_ss.add(files('var-service-siglist.c')) +endif + +modules += { 'hw-uefi' : { + 'vars' : uefi_vars_ss, +}} diff --git a/hw/uefi/trace-events b/hw/uefi/trace-events new file mode 100644 index 00000000000..3694712a946 --- /dev/null +++ b/hw/uefi/trace-events @@ -0,0 +1,17 @@ +# device +uefi_reg_read(uint64_t addr, unsigned size) "addr 0x%" PRIx64 ", size %u" +uefi_reg_write(uint64_t addr, uint64_t val, unsigned size) "addr 0x%" PRIx64 ", val 0x%" PRIx64 ", size %d" +uefi_hard_reset(void) "" + +# generic uefi +uefi_variable(const char *context, const char *name, uint64_t size, const char *uuid) "context %s, name %s, size %" PRIu64 ", uuid %s" +uefi_status(const char *context, const char *name) "context %s, status %s" +uefi_event(const char *name) "event %s" + +# variable protocol +uefi_vars_proto_cmd(const char *cmd) "cmd %s" +uefi_vars_security_violation(const char *reason) "reason %s" + +# variable policy protocol +uefi_vars_policy_cmd(const char *cmd) "cmd %s" +uefi_vars_policy_deny(const char *reason) "reason %s" diff --git a/hw/uefi/trace.h b/hw/uefi/trace.h new file mode 100644 index 00000000000..6aa1c938960 --- /dev/null +++ b/hw/uefi/trace.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "trace/trace-hw_uefi.h" diff --git a/hw/uefi/var-service-auth.c b/hw/uefi/var-service-auth.c new file mode 100644 index 00000000000..fba5a0956a5 --- /dev/null +++ b/hw/uefi/var-service-auth.c @@ -0,0 +1,361 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - AuthVariableLib + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +static const uint16_t name_pk[] = u"PK"; +static const uint16_t name_kek[] = u"KEK"; +static const uint16_t name_db[] = u"db"; +static const uint16_t name_dbx[] = u"dbx"; +static const uint16_t name_setup_mode[] = u"SetupMode"; +static const uint16_t name_sigs_support[] = u"SignatureSupport"; +static const uint16_t name_sb[] = u"SecureBoot"; +static const uint16_t name_sb_enable[] = u"SecureBootEnable"; +static const uint16_t name_custom_mode[] = u"CustomMode"; +static const uint16_t name_vk[] = u"VendorKeys"; +static const uint16_t name_vk_nv[] = u"VendorKeysNv"; + +static const uint32_t sigdb_attrs = + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS | + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS; + +static void set_secure_boot(uefi_vars_state *uv, uint8_t sb) +{ + uefi_vars_set_variable(uv, EfiGlobalVariable, + name_sb, sizeof(name_sb), + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + &sb, sizeof(sb)); +} + +static void set_secure_boot_enable(uefi_vars_state *uv, uint8_t sbe) +{ + uefi_vars_set_variable(uv, EfiSecureBootEnableDisable, + name_sb_enable, sizeof(name_sb_enable), + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS, + &sbe, sizeof(sbe)); +} + +static void set_setup_mode(uefi_vars_state *uv, uint8_t sm) +{ + uefi_vars_set_variable(uv, EfiGlobalVariable, + name_setup_mode, sizeof(name_setup_mode), + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + &sm, sizeof(sm)); +} + +static void set_custom_mode(uefi_vars_state *uv, uint8_t cm) +{ + uefi_vars_set_variable(uv, EfiCustomModeEnable, + name_custom_mode, sizeof(name_custom_mode), + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS, + &cm, sizeof(cm)); +} + +static void set_signature_support(uefi_vars_state *uv) +{ + QemuUUID sigs_support[5]; + + sigs_support[0] = EfiCertSha256Guid; + sigs_support[1] = EfiCertSha384Guid; + sigs_support[2] = EfiCertSha512Guid; + sigs_support[3] = EfiCertRsa2048Guid; + sigs_support[4] = EfiCertX509Guid; + + uefi_vars_set_variable(uv, EfiGlobalVariable, + name_sigs_support, sizeof(name_sigs_support), + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + sigs_support, sizeof(sigs_support)); +} + +static bool setup_mode_is_active(uefi_vars_state *uv) +{ + uefi_variable *var; + uint8_t *value; + + var = uefi_vars_find_variable(uv, EfiGlobalVariable, + name_setup_mode, sizeof(name_setup_mode)); + if (var) { + value = var->data; + if (value[0] == SETUP_MODE) { + return true; + } + } + return false; +} + +static bool custom_mode_is_active(uefi_vars_state *uv) +{ + uefi_variable *var; + uint8_t *value; + + var = uefi_vars_find_variable(uv, EfiCustomModeEnable, + name_custom_mode, sizeof(name_custom_mode)); + if (var) { + value = var->data; + if (value[0] == CUSTOM_SECURE_BOOT_MODE) { + return true; + } + } + return false; +} + +bool uefi_vars_is_sb_pk(uefi_variable *var) +{ + if (qemu_uuid_is_equal(&var->guid, &EfiGlobalVariable) && + uefi_str_equal(var->name, var->name_size, name_pk, sizeof(name_pk))) { + return true; + } + return false; +} + +static bool uefi_vars_is_sb_kek(uefi_variable *var) +{ + if (qemu_uuid_is_equal(&var->guid, &EfiGlobalVariable) && + uefi_str_equal(var->name, var->name_size, name_kek, sizeof(name_kek))) { + return true; + } + return false; +} + +static bool uefi_vars_is_sb_db(uefi_variable *var) +{ + if (!qemu_uuid_is_equal(&var->guid, &EfiImageSecurityDatabase)) { + return false; + } + if (uefi_str_equal(var->name, var->name_size, name_db, sizeof(name_db))) { + return true; + } + if (uefi_str_equal(var->name, var->name_size, name_dbx, sizeof(name_dbx))) { + return true; + } + return false; +} + +bool uefi_vars_is_sb_any(uefi_variable *var) +{ + if (uefi_vars_is_sb_pk(var) || + uefi_vars_is_sb_kek(var) || + uefi_vars_is_sb_db(var)) { + return true; + } + return false; +} + +static uefi_variable *uefi_vars_find_siglist(uefi_vars_state *uv, + uefi_variable *var) +{ + if (uefi_vars_is_sb_pk(var)) { + return uefi_vars_find_variable(uv, EfiGlobalVariable, + name_pk, sizeof(name_pk)); + } + if (uefi_vars_is_sb_kek(var)) { + return uefi_vars_find_variable(uv, EfiGlobalVariable, + name_pk, sizeof(name_pk)); + } + if (uefi_vars_is_sb_db(var)) { + return uefi_vars_find_variable(uv, EfiGlobalVariable, + name_kek, sizeof(name_kek)); + } + + return NULL; +} + +static efi_status uefi_vars_check_auth_2_sb(uefi_vars_state *uv, + uefi_variable *var, + mm_variable_access *va, + void *data, + uint64_t data_offset) +{ + variable_auth_2 *auth = data; + uefi_variable *siglist; + + if (custom_mode_is_active(uv)) { + /* no authentication in custom mode */ + return EFI_SUCCESS; + } + + if (setup_mode_is_active(uv) && !uefi_vars_is_sb_pk(var)) { + /* no authentication in setup mode (except PK) */ + return EFI_SUCCESS; + } + + if (auth->hdr_length == 24) { + /* no signature (auth->cert_data is empty) */ + return EFI_SECURITY_VIOLATION; + } + + siglist = uefi_vars_find_siglist(uv, var); + if (!siglist && setup_mode_is_active(uv) && uefi_vars_is_sb_pk(var)) { + /* check PK is self-signed */ + uefi_variable tmp = { + .guid = EfiGlobalVariable, + .name = (uint16_t *)name_pk, + .name_size = sizeof(name_pk), + .attributes = sigdb_attrs, + .data = data + data_offset, + .data_size = va->data_size - data_offset, + }; + return uefi_vars_check_pkcs7_2(&tmp, NULL, NULL, va, data); + } + + return uefi_vars_check_pkcs7_2(siglist, NULL, NULL, va, data); +} + +efi_status uefi_vars_check_auth_2(uefi_vars_state *uv, uefi_variable *var, + mm_variable_access *va, void *data) +{ + variable_auth_2 *auth = data; + uint64_t data_offset; + efi_status status; + + if (va->data_size < sizeof(*auth)) { + return EFI_SECURITY_VIOLATION; + } + if (uadd64_overflow(sizeof(efi_time), auth->hdr_length, &data_offset)) { + return EFI_SECURITY_VIOLATION; + } + if (va->data_size < data_offset) { + return EFI_SECURITY_VIOLATION; + } + + if (auth->hdr_revision != 0x0200 || + auth->hdr_cert_type != WIN_CERT_TYPE_EFI_GUID || + !qemu_uuid_is_equal(&auth->guid_cert_type, &EfiCertTypePkcs7Guid)) { + return EFI_UNSUPPORTED; + } + + if (uefi_vars_is_sb_any(var)) { + /* secure boot variables */ + status = uefi_vars_check_auth_2_sb(uv, var, va, data, data_offset); + if (status != EFI_SUCCESS) { + return status; + } + } else { + /* other authenticated variables */ + status = uefi_vars_check_pkcs7_2(NULL, + &var->digest, &var->digest_size, + va, data); + if (status != EFI_SUCCESS) { + return status; + } + } + + /* checks passed, set variable data */ + var->time = auth->timestamp; + if (va->data_size - data_offset > 0) { + var->data = g_malloc(va->data_size - data_offset); + memcpy(var->data, data + data_offset, va->data_size - data_offset); + var->data_size = va->data_size - data_offset; + } + + return EFI_SUCCESS; +} + +efi_status uefi_vars_check_secure_boot(uefi_vars_state *uv, uefi_variable *var) +{ + uint8_t *value = var->data; + + if (uefi_vars_is_sb_any(var)) { + if (var->attributes != sigdb_attrs) { + return EFI_INVALID_PARAMETER; + } + } + + /* reject SecureBootEnable updates if force_secure_boot is set */ + if (qemu_uuid_is_equal(&var->guid, &EfiSecureBootEnableDisable) && + uefi_str_equal(var->name, var->name_size, + name_sb_enable, sizeof(name_sb_enable)) && + uv->force_secure_boot && + value[0] != SECURE_BOOT_ENABLE) { + return EFI_WRITE_PROTECTED; + } + + /* reject CustomMode updates if disable_custom_mode is set */ + if (qemu_uuid_is_equal(&var->guid, &EfiCustomModeEnable) && + uefi_str_equal(var->name, var->name_size, + name_custom_mode, sizeof(name_custom_mode)) && + uv->disable_custom_mode) { + return EFI_WRITE_PROTECTED; + } + + return EFI_SUCCESS; +} + +/* AuthVariableLibInitialize */ +void uefi_vars_auth_init(uefi_vars_state *uv) +{ + uefi_variable *pk_var, *sbe_var; + uint8_t platform_mode, sb, sbe, vk; + + /* SetupMode */ + pk_var = uefi_vars_find_variable(uv, EfiGlobalVariable, + name_pk, sizeof(name_pk)); + if (!pk_var) { + platform_mode = SETUP_MODE; + } else { + platform_mode = USER_MODE; + } + set_setup_mode(uv, platform_mode); + + /* SignatureSupport */ + set_signature_support(uv); + + /* SecureBootEnable */ + sbe = SECURE_BOOT_DISABLE; + sbe_var = uefi_vars_find_variable(uv, EfiSecureBootEnableDisable, + name_sb_enable, sizeof(name_sb_enable)); + if (sbe_var) { + if (platform_mode == USER_MODE) { + sbe = ((uint8_t *)sbe_var->data)[0]; + } + } else if (platform_mode == USER_MODE) { + sbe = SECURE_BOOT_ENABLE; + set_secure_boot_enable(uv, sbe); + } + + if (uv->force_secure_boot && sbe != SECURE_BOOT_ENABLE) { + sbe = SECURE_BOOT_ENABLE; + set_secure_boot_enable(uv, sbe); + } + + /* SecureBoot */ + if ((sbe == SECURE_BOOT_ENABLE) && (platform_mode == USER_MODE)) { + sb = SECURE_BOOT_MODE_ENABLE; + } else { + sb = SECURE_BOOT_MODE_DISABLE; + } + set_secure_boot(uv, sb); + + /* CustomMode */ + set_custom_mode(uv, STANDARD_SECURE_BOOT_MODE); + + vk = 0; + uefi_vars_set_variable(uv, EfiGlobalVariable, + name_vk_nv, sizeof(name_vk_nv), + EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS, + &vk, sizeof(vk)); + uefi_vars_set_variable(uv, EfiGlobalVariable, + name_vk, sizeof(name_vk), + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + &vk, sizeof(vk)); + + /* flush to disk */ + uefi_vars_json_save(uv); +} diff --git a/hw/uefi/var-service-core.c b/hw/uefi/var-service-core.c new file mode 100644 index 00000000000..6ab8df091aa --- /dev/null +++ b/hw/uefi/var-service-core.c @@ -0,0 +1,322 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device + */ +#include "qemu/osdep.h" +#include "qemu/crc32c.h" +#include "system/dma.h" +#include "migration/vmstate.h" + +#include "hw/uefi/var-service.h" +#include "hw/uefi/var-service-api.h" +#include "hw/uefi/var-service-edk2.h" + +#include "trace.h" + +static int uefi_vars_pre_load(void *opaque) +{ + uefi_vars_state *uv = opaque; + + uefi_vars_clear_all(uv); + uefi_vars_policies_clear(uv); + g_free(uv->buffer); + return 0; +} + +static int uefi_vars_post_load(void *opaque, int version_id) +{ + uefi_vars_state *uv = opaque; + + uefi_vars_update_storage(uv); + uefi_vars_json_save(uv); + uv->buffer = g_malloc(uv->buf_size); + return 0; +} + +const VMStateDescription vmstate_uefi_vars = { + .name = "uefi-vars", + .pre_load = uefi_vars_pre_load, + .post_load = uefi_vars_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(sts, uefi_vars_state), + VMSTATE_UINT32(buf_size, uefi_vars_state), + VMSTATE_UINT32(buf_addr_lo, uefi_vars_state), + VMSTATE_UINT32(buf_addr_hi, uefi_vars_state), + VMSTATE_UINT32(pio_xfer_offset, uefi_vars_state), + VMSTATE_VBUFFER_ALLOC_UINT32(pio_xfer_buffer, uefi_vars_state, + 0, NULL, buf_size), + VMSTATE_BOOL(end_of_dxe, uefi_vars_state), + VMSTATE_BOOL(ready_to_boot, uefi_vars_state), + VMSTATE_BOOL(exit_boot_service, uefi_vars_state), + VMSTATE_BOOL(policy_locked, uefi_vars_state), + VMSTATE_UINT64(used_storage, uefi_vars_state), + VMSTATE_QTAILQ_V(variables, uefi_vars_state, 0, + vmstate_uefi_variable, uefi_variable, next), + VMSTATE_QTAILQ_V(var_policies, uefi_vars_state, 0, + vmstate_uefi_var_policy, uefi_var_policy, next), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t uefi_vars_cmd_mm(uefi_vars_state *uv, bool dma_mode) +{ + hwaddr dma; + mm_header *mhdr; + uint64_t size; + uint32_t retval; + + dma = uv->buf_addr_lo | ((hwaddr)uv->buf_addr_hi << 32); + mhdr = (mm_header *) uv->buffer; + + if (!uv->buffer || uv->buf_size < sizeof(*mhdr)) { + return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE; + } + + /* read header */ + if (dma_mode) { + dma_memory_read(&address_space_memory, dma, + uv->buffer, sizeof(*mhdr), + MEMTXATTRS_UNSPECIFIED); + } else { + memcpy(uv->buffer, uv->pio_xfer_buffer, sizeof(*mhdr)); + } + + if (uadd64_overflow(sizeof(*mhdr), mhdr->length, &size)) { + return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE; + } + if (uv->buf_size < size) { + return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE; + } + + /* read buffer (excl header) */ + if (dma_mode) { + dma_memory_read(&address_space_memory, dma + sizeof(*mhdr), + uv->buffer + sizeof(*mhdr), mhdr->length, + MEMTXATTRS_UNSPECIFIED); + } else { + memcpy(uv->buffer + sizeof(*mhdr), + uv->pio_xfer_buffer + sizeof(*mhdr), + mhdr->length); + } + memset(uv->buffer + size, 0, uv->buf_size - size); + + /* dispatch */ + if (qemu_uuid_is_equal(&mhdr->guid, &EfiSmmVariableProtocolGuid)) { + retval = uefi_vars_mm_vars_proto(uv); + + } else if (qemu_uuid_is_equal(&mhdr->guid, &VarCheckPolicyLibMmiHandlerGuid)) { + retval = uefi_vars_mm_check_policy_proto(uv); + + } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEndOfDxeEventGroupGuid)) { + trace_uefi_event("end-of-dxe"); + uv->end_of_dxe = true; + retval = UEFI_VARS_STS_SUCCESS; + + } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEventReadyToBootGuid)) { + trace_uefi_event("ready-to-boot"); + uv->ready_to_boot = true; + retval = UEFI_VARS_STS_SUCCESS; + + } else if (qemu_uuid_is_equal(&mhdr->guid, &EfiEventExitBootServicesGuid)) { + trace_uefi_event("exit-boot-service"); + uv->exit_boot_service = true; + retval = UEFI_VARS_STS_SUCCESS; + + } else { + retval = UEFI_VARS_STS_ERR_NOT_SUPPORTED; + } + + /* write buffer */ + if (dma_mode) { + dma_memory_write(&address_space_memory, dma, + uv->buffer, sizeof(*mhdr) + mhdr->length, + MEMTXATTRS_UNSPECIFIED); + } else { + memcpy(uv->pio_xfer_buffer + sizeof(*mhdr), + uv->buffer + sizeof(*mhdr), + sizeof(*mhdr) + mhdr->length); + } + + return retval; +} + +static void uefi_vars_soft_reset(uefi_vars_state *uv) +{ + g_free(uv->buffer); + uv->buffer = NULL; + uv->buf_size = 0; + uv->buf_addr_lo = 0; + uv->buf_addr_hi = 0; +} + +void uefi_vars_hard_reset(uefi_vars_state *uv) +{ + trace_uefi_hard_reset(); + uefi_vars_soft_reset(uv); + + uv->end_of_dxe = false; + uv->ready_to_boot = false; + uv->exit_boot_service = false; + uv->policy_locked = false; + + uefi_vars_clear_volatile(uv); + uefi_vars_policies_clear(uv); + uefi_vars_auth_init(uv); +} + +static uint32_t uefi_vars_cmd(uefi_vars_state *uv, uint32_t cmd) +{ + switch (cmd) { + case UEFI_VARS_CMD_RESET: + uefi_vars_soft_reset(uv); + return UEFI_VARS_STS_SUCCESS; + case UEFI_VARS_CMD_DMA_MM: + return uefi_vars_cmd_mm(uv, true); + case UEFI_VARS_CMD_PIO_MM: + return uefi_vars_cmd_mm(uv, false); + case UEFI_VARS_CMD_PIO_ZERO_OFFSET: + uv->pio_xfer_offset = 0; + return UEFI_VARS_STS_SUCCESS; + default: + return UEFI_VARS_STS_ERR_NOT_SUPPORTED; + } +} + +static uint64_t uefi_vars_read(void *opaque, hwaddr addr, unsigned size) +{ + uefi_vars_state *uv = opaque; + uint64_t retval = -1; + void *xfer_ptr; + + trace_uefi_reg_read(addr, size); + + switch (addr) { + case UEFI_VARS_REG_MAGIC: + retval = UEFI_VARS_MAGIC_VALUE; + break; + case UEFI_VARS_REG_CMD_STS: + retval = uv->sts; + break; + case UEFI_VARS_REG_BUFFER_SIZE: + retval = uv->buf_size; + break; + case UEFI_VARS_REG_DMA_BUFFER_ADDR_LO: + retval = uv->buf_addr_lo; + break; + case UEFI_VARS_REG_DMA_BUFFER_ADDR_HI: + retval = uv->buf_addr_hi; + break; + case UEFI_VARS_REG_PIO_BUFFER_TRANSFER: + if (uv->pio_xfer_offset + size > uv->buf_size) { + retval = 0; + break; + } + xfer_ptr = uv->pio_xfer_buffer + uv->pio_xfer_offset; + switch (size) { + case 1: + retval = *(uint8_t *)xfer_ptr; + break; + case 2: + retval = *(uint16_t *)xfer_ptr; + break; + case 4: + retval = *(uint32_t *)xfer_ptr; + break; + case 8: + retval = *(uint64_t *)xfer_ptr; + break; + } + uv->pio_xfer_offset += size; + break; + case UEFI_VARS_REG_PIO_BUFFER_CRC32C: + retval = crc32c(0xffffffff, uv->pio_xfer_buffer, uv->pio_xfer_offset); + break; + case UEFI_VARS_REG_FLAGS: + retval = 0; + if (uv->use_pio) { + retval |= UEFI_VARS_FLAG_USE_PIO; + } + } + return retval; +} + +static void uefi_vars_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + uefi_vars_state *uv = opaque; + void *xfer_ptr; + + trace_uefi_reg_write(addr, val, size); + + switch (addr) { + case UEFI_VARS_REG_CMD_STS: + uv->sts = uefi_vars_cmd(uv, val); + break; + case UEFI_VARS_REG_BUFFER_SIZE: + if (val > MAX_BUFFER_SIZE) { + val = MAX_BUFFER_SIZE; + } + uv->buf_size = val; + g_free(uv->buffer); + g_free(uv->pio_xfer_buffer); + uv->buffer = g_malloc0(uv->buf_size); + uv->pio_xfer_buffer = g_malloc0(uv->buf_size); + break; + case UEFI_VARS_REG_DMA_BUFFER_ADDR_LO: + uv->buf_addr_lo = val; + break; + case UEFI_VARS_REG_DMA_BUFFER_ADDR_HI: + uv->buf_addr_hi = val; + break; + case UEFI_VARS_REG_PIO_BUFFER_TRANSFER: + if (uv->pio_xfer_offset + size > uv->buf_size) { + break; + } + xfer_ptr = uv->pio_xfer_buffer + uv->pio_xfer_offset; + switch (size) { + case 1: + *(uint8_t *)xfer_ptr = val; + break; + case 2: + *(uint16_t *)xfer_ptr = val; + break; + case 4: + *(uint32_t *)xfer_ptr = val; + break; + case 8: + *(uint64_t *)xfer_ptr = val; + break; + } + uv->pio_xfer_offset += size; + break; + case UEFI_VARS_REG_PIO_BUFFER_CRC32C: + case UEFI_VARS_REG_FLAGS: + default: + break; + } +} + +static const MemoryRegionOps uefi_vars_ops = { + .read = uefi_vars_read, + .write = uefi_vars_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 2, + .max_access_size = 4, + }, +}; + +void uefi_vars_init(Object *obj, uefi_vars_state *uv) +{ + QTAILQ_INIT(&uv->variables); + QTAILQ_INIT(&uv->var_policies); + uv->jsonfd = -1; + memory_region_init_io(&uv->mr, obj, &uefi_vars_ops, uv, + "uefi-vars", UEFI_VARS_REGS_SIZE); +} + +void uefi_vars_realize(uefi_vars_state *uv, Error **errp) +{ + uefi_vars_json_init(uv, errp); + uefi_vars_json_load(uv, errp); +} diff --git a/hw/uefi/var-service-guid.c b/hw/uefi/var-service-guid.c new file mode 100644 index 00000000000..eba3655c8d3 --- /dev/null +++ b/hw/uefi/var-service-guid.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - GUIDs + */ + +#include "qemu/osdep.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +/* variable namespaces */ + +const QemuUUID EfiGlobalVariable = { + .data = UUID_LE(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, + 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) +}; + +const QemuUUID EfiImageSecurityDatabase = { + .data = UUID_LE(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, + 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) +}; + +const QemuUUID EfiCustomModeEnable = { + .data = UUID_LE(0xc076ec0c, 0x7028, 0x4399, 0xa0, 0x72, + 0x71, 0xee, 0x5c, 0x44, 0x8b, 0x9f) +}; + +const QemuUUID EfiSecureBootEnableDisable = { + .data = UUID_LE(0xf0a30bc7, 0xaf08, 0x4556, 0x99, 0xc4, + 0x0, 0x10, 0x9, 0xc9, 0x3a, 0x44) +}; + +/* signatures */ + +const QemuUUID EfiCertSha256Guid = { + .data = UUID_LE(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, + 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28) +}; + +const QemuUUID EfiCertSha384Guid = { + .data = UUID_LE(0xff3e5307, 0x9fd0, 0x48c9, 0x85, 0xf1, + 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x1) +}; + +const QemuUUID EfiCertSha512Guid = { + .data = UUID_LE(0x93e0fae, 0xa6c4, 0x4f50, 0x9f, 0x1b, + 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a) +}; + +const QemuUUID EfiCertRsa2048Guid = { + .data = UUID_LE(0x3c5766e8, 0x269c, 0x4e34, 0xaa, 0x14, + 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6) +}; + +const QemuUUID EfiCertX509Guid = { + .data = UUID_LE(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, + 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72) +}; + +const QemuUUID EfiCertTypePkcs7Guid = { + .data = UUID_LE(0x4aafd29d, 0x68df, 0x49ee, 0x8a, 0xa9, + 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7) +}; + +/* + * mm_header.guid values that the guest DXE/BDS phases use for + * sending requests to management mode + */ + +const QemuUUID EfiSmmVariableProtocolGuid = { + .data = UUID_LE(0xed32d533, 0x99e6, 0x4209, 0x9c, 0xc0, + 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7) +}; + +const QemuUUID VarCheckPolicyLibMmiHandlerGuid = { + .data = UUID_LE(0xda1b0d11, 0xd1a7, 0x46c4, 0x9d, 0xc9, + 0xf3, 0x71, 0x48, 0x75, 0xc6, 0xeb) +}; + +/* + * mm_header.guid values that the guest DXE/BDS phases use for + * reporting event groups being signaled to management mode + */ + +const QemuUUID EfiEndOfDxeEventGroupGuid = { + .data = UUID_LE(0x02ce967a, 0xdd7e, 0x4FFc, 0x9e, 0xe7, + 0x81, 0x0c, 0xF0, 0x47, 0x08, 0x80) +}; + +const QemuUUID EfiEventReadyToBootGuid = { + .data = UUID_LE(0x7ce88Fb3, 0x4bd7, 0x4679, 0x87, 0xa8, + 0xa8, 0xd8, 0xde, 0xe5, 0x0d, 0x2b) +}; + +const QemuUUID EfiEventExitBootServicesGuid = { + .data = UUID_LE(0x27abF055, 0xb1b8, 0x4c26, 0x80, 0x48, + 0x74, 0x8F, 0x37, 0xba, 0xa2, 0xdF) +}; diff --git a/hw/uefi/var-service-json.c b/hw/uefi/var-service-json.c new file mode 100644 index 00000000000..f5f15568333 --- /dev/null +++ b/hw/uefi/var-service-json.c @@ -0,0 +1,257 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - serialize non-volatile varstore from/to json, + * using qapi + * + * tools which can read/write these json files: + * - https://gitlab.com/kraxel/virt-firmware + * - https://github.com/awslabs/python-uefivars + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +#include "qobject/qobject.h" +#include "qobject/qjson.h" + +#include "qapi/dealloc-visitor.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/qobject-output-visitor.h" +#include "qapi/qapi-types-uefi.h" +#include "qapi/qapi-visit-uefi.h" + +static char *generate_hexstr(void *data, size_t len) +{ + static const char hex[] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', + }; + uint8_t *src = data; + char *dest; + size_t i; + + dest = g_malloc(len * 2 + 1); + for (i = 0; i < len * 2;) { + dest[i++] = hex[*src >> 4]; + dest[i++] = hex[*src & 15]; + src++; + } + dest[i++] = 0; + + return dest; +} + +static UefiVarStore *uefi_vars_to_qapi(uefi_vars_state *uv) +{ + UefiVarStore *vs; + UefiVariableList **tail; + UefiVariable *v; + QemuUUID be; + uefi_variable *var; + + vs = g_new0(UefiVarStore, 1); + vs->version = 2; + tail = &vs->variables; + + QTAILQ_FOREACH(var, &uv->variables, next) { + if (!(var->attributes & EFI_VARIABLE_NON_VOLATILE)) { + continue; + } + + v = g_new0(UefiVariable, 1); + be = qemu_uuid_bswap(var->guid); + v->guid = qemu_uuid_unparse_strdup(&be); + v->name = uefi_ucs2_to_ascii(var->name, var->name_size); + v->attr = var->attributes; + + v->data = generate_hexstr(var->data, var->data_size); + + if (var->attributes & + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) { + v->time = generate_hexstr(&var->time, sizeof(var->time)); + if (var->digest && var->digest_size) { + v->digest = generate_hexstr(var->digest, var->digest_size); + } + } + + QAPI_LIST_APPEND(tail, v); + } + return vs; +} + +static unsigned parse_hexchar(char c) +{ + switch (c) { + case '0' ... '9': return c - '0'; + case 'a' ... 'f': return c - 'a' + 0xa; + case 'A' ... 'F': return c - 'A' + 0xA; + default: return 0; + } +} + +static void parse_hexstr(void *dest, char *src, int len) +{ + uint8_t *data = dest; + size_t i; + + for (i = 0; i < len; i += 2) { + *(data++) = + parse_hexchar(src[i]) << 4 | + parse_hexchar(src[i + 1]); + } +} + +static void uefi_vars_from_qapi(uefi_vars_state *uv, UefiVarStore *vs) +{ + UefiVariableList *item; + UefiVariable *v; + QemuUUID be; + uefi_variable *var; + uint8_t *data; + size_t i, len; + + for (item = vs->variables; item != NULL; item = item->next) { + v = item->value; + + var = g_new0(uefi_variable, 1); + var->attributes = v->attr; + qemu_uuid_parse(v->guid, &be); + var->guid = qemu_uuid_bswap(be); + + len = strlen(v->name); + var->name_size = len * 2 + 2; + var->name = g_malloc(var->name_size); + for (i = 0; i <= len; i++) { + var->name[i] = v->name[i]; + } + + len = strlen(v->data); + var->data_size = len / 2; + var->data = data = g_malloc(var->data_size); + parse_hexstr(var->data, v->data, len); + + if (v->time && strlen(v->time) == 32) { + parse_hexstr(&var->time, v->time, 32); + } + + if (v->digest) { + len = strlen(v->digest); + var->digest_size = len / 2; + var->digest = g_malloc(var->digest_size); + parse_hexstr(var->digest, v->digest, len); + } + + QTAILQ_INSERT_TAIL(&uv->variables, var, next); + } +} + +static GString *uefi_vars_to_json(uefi_vars_state *uv) +{ + UefiVarStore *vs = uefi_vars_to_qapi(uv); + QObject *qobj = NULL; + Visitor *v; + GString *gstr; + + v = qobject_output_visitor_new(&qobj); + if (visit_type_UefiVarStore(v, NULL, &vs, NULL)) { + visit_complete(v, &qobj); + } + visit_free(v); + qapi_free_UefiVarStore(vs); + + gstr = qobject_to_json_pretty(qobj, true); + qobject_unref(qobj); + + return gstr; +} + +void uefi_vars_json_init(uefi_vars_state *uv, Error **errp) +{ + if (uv->jsonfile) { + uv->jsonfd = qemu_create(uv->jsonfile, O_RDWR | O_BINARY, 0666, errp); + } +} + +void uefi_vars_json_save(uefi_vars_state *uv) +{ + g_autoptr(GString) gstr = NULL; + int rc; + + if (uv->jsonfd == -1) { + return; + } + + gstr = uefi_vars_to_json(uv); + + rc = lseek(uv->jsonfd, 0, SEEK_SET); + if (rc < 0) { + warn_report("%s: lseek error", __func__); + return; + } + + rc = ftruncate(uv->jsonfd, 0); + if (rc != 0) { + warn_report("%s: ftruncate error", __func__); + return; + } + + rc = write(uv->jsonfd, gstr->str, gstr->len); + if (rc != gstr->len) { + warn_report("%s: write error", __func__); + return; + } + + fsync(uv->jsonfd); +} + +void uefi_vars_json_load(uefi_vars_state *uv, Error **errp) +{ + UefiVarStore *vs; + QObject *qobj; + Visitor *v; + char *str; + ssize_t len; + int rc; + + if (uv->jsonfd == -1) { + return; + } + + len = lseek(uv->jsonfd, 0, SEEK_END); + if (len < 0) { + warn_report("%s: lseek error", __func__); + return; + } + if (len == 0) { + /* empty file */ + return; + } + + str = g_malloc(len + 1); + lseek(uv->jsonfd, 0, SEEK_SET); + rc = read(uv->jsonfd, str, len); + if (rc != len) { + warn_report("%s: read error", __func__); + g_free(str); + return; + } + str[len] = 0; + + qobj = qobject_from_json(str, errp); + v = qobject_input_visitor_new(qobj); + visit_type_UefiVarStore(v, NULL, &vs, errp); + visit_free(v); + + if (!(*errp)) { + uefi_vars_from_qapi(uv, vs); + uefi_vars_update_storage(uv); + } + + qapi_free_UefiVarStore(vs); + qobject_unref(qobj); + g_free(str); +} diff --git a/hw/uefi/var-service-pkcs7-stub.c b/hw/uefi/var-service-pkcs7-stub.c new file mode 100644 index 00000000000..118cba446d4 --- /dev/null +++ b/hw/uefi/var-service-pkcs7-stub.c @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - pkcs7 stubs + */ +#include "qemu/osdep.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist, + void **digest, uint32_t *digest_size, + mm_variable_access *va, void *data) +{ + return EFI_WRITE_PROTECTED; +} diff --git a/hw/uefi/var-service-pkcs7.c b/hw/uefi/var-service-pkcs7.c new file mode 100644 index 00000000000..32accf4e44e --- /dev/null +++ b/hw/uefi/var-service-pkcs7.c @@ -0,0 +1,436 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - pkcs7 verification + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "system/dma.h" + +#include +#include +#include + +#include "hw/uefi/var-service.h" + +#define AUTHVAR_DIGEST_ALGO GNUTLS_DIG_SHA256 +#define AUTHVAR_DIGEST_SIZE 32 + +/* + * Replicate the signed data for signature verification. + */ +static gnutls_datum_t *build_signed_data(mm_variable_access *va, void *data) +{ + variable_auth_2 *auth = data; + uint64_t data_offset = sizeof(efi_time) + auth->hdr_length; + uint16_t *name = (void *)va + sizeof(mm_variable_access); + gnutls_datum_t *sdata; + uint64_t pos = 0; + + sdata = g_new(gnutls_datum_t, 1); + sdata->size = (va->name_size - 2 + + sizeof(QemuUUID) + + sizeof(va->attributes) + + sizeof(auth->timestamp) + + va->data_size - data_offset); + sdata->data = g_malloc(sdata->size); + + /* Variable Name (without terminating \0) */ + memcpy(sdata->data + pos, name, va->name_size - 2); + pos += va->name_size - 2; + + /* Variable Namespace Guid */ + memcpy(sdata->data + pos, &va->guid, sizeof(va->guid)); + pos += sizeof(va->guid); + + /* Attributes */ + memcpy(sdata->data + pos, &va->attributes, sizeof(va->attributes)); + pos += sizeof(va->attributes); + + /* TimeStamp */ + memcpy(sdata->data + pos, &auth->timestamp, sizeof(auth->timestamp)); + pos += sizeof(auth->timestamp); + + /* Variable Content */ + memcpy(sdata->data + pos, data + data_offset, va->data_size - data_offset); + pos += va->data_size - data_offset; + + assert(pos == sdata->size); + return sdata; +} + +/* + * See WrapPkcs7Data() in edk2. + * + * UEFI spec allows pkcs7 signatures being used without the envelope which + * identifies them as pkcs7 signatures. openssl and gnutls will not parse them + * without the envelope though. So add it if needed. + */ +static void wrap_pkcs7(gnutls_datum_t *pkcs7) +{ + static uint8_t signed_data_oid[9] = { + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02 + }; + gnutls_datum_t wrap; + + if (pkcs7->data[4] == 0x06 && + pkcs7->data[5] == 0x09 && + memcmp(pkcs7->data + 6, signed_data_oid, sizeof(signed_data_oid)) == 0 && + pkcs7->data[15] == 0x0a && + pkcs7->data[16] == 0x82) { + return; + } + + wrap.size = pkcs7->size + 19; + wrap.data = g_malloc(wrap.size); + + wrap.data[0] = 0x30; + wrap.data[1] = 0x82; + wrap.data[2] = (wrap.size - 4) >> 8; + wrap.data[3] = (wrap.size - 4) & 0xff; + wrap.data[4] = 0x06; + wrap.data[5] = 0x09; + memcpy(wrap.data + 6, signed_data_oid, sizeof(signed_data_oid)); + + wrap.data[15] = 0xa0; + wrap.data[16] = 0x82; + wrap.data[17] = pkcs7->size >> 8; + wrap.data[18] = pkcs7->size & 0xff; + memcpy(wrap.data + 19, pkcs7->data, pkcs7->size); + + g_free(pkcs7->data); + *pkcs7 = wrap; +} + +static gnutls_datum_t *build_pkcs7(void *data) +{ + variable_auth_2 *auth = data; + gnutls_datum_t *pkcs7; + + pkcs7 = g_new(gnutls_datum_t, 1); + pkcs7->size = auth->hdr_length - 24; + pkcs7->data = g_malloc(pkcs7->size); + memcpy(pkcs7->data, data + 16 + 24, pkcs7->size); + + wrap_pkcs7(pkcs7); + + return pkcs7; +} + +/* + * Read UEFI signature database, store x509 all certificates found in + * gnutls_x509_trust_list_t. + */ +static gnutls_x509_trust_list_t build_trust_list_sb(uefi_variable *var) +{ + gnutls_x509_trust_list_t tlist; + gnutls_datum_t cert_data; + gnutls_x509_crt_t cert; + uefi_vars_siglist siglist; + uefi_vars_cert *c; + int rc; + + rc = gnutls_x509_trust_list_init(&tlist, 0); + if (rc < 0) { + warn_report("gnutls_x509_trust_list_init error: %s", + gnutls_strerror(rc)); + return NULL; + } + + uefi_vars_siglist_init(&siglist); + uefi_vars_siglist_parse(&siglist, var->data, var->data_size); + + QTAILQ_FOREACH(c, &siglist.x509, next) { + cert_data.size = c->size; + cert_data.data = c->data; + + rc = gnutls_x509_crt_init(&cert); + if (rc < 0) { + warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc)); + break; + } + rc = gnutls_x509_crt_import(cert, &cert_data, GNUTLS_X509_FMT_DER); + if (rc < 0) { + warn_report("gnutls_x509_crt_import error: %s", + gnutls_strerror(rc)); + gnutls_x509_crt_deinit(cert); + break; + } + rc = gnutls_x509_trust_list_add_cas(tlist, &cert, 1, 0); + if (rc < 0) { + warn_report("gnutls_x509_crt_import error: %s", + gnutls_strerror(rc)); + gnutls_x509_crt_deinit(cert); + break; + } + } + + uefi_vars_siglist_free(&siglist); + + return tlist; +} + +static int build_digest_authvar(gnutls_x509_crt_t signer, + gnutls_x509_crt_t root, + uint8_t *hash_digest) +{ + char *cn; + size_t cn_size = 0; + uint8_t fp[AUTHVAR_DIGEST_SIZE]; + size_t fp_size = sizeof(fp); + gnutls_hash_hd_t hash; + int rc; + + /* get signer CN */ + rc = gnutls_x509_crt_get_dn_by_oid(signer, GNUTLS_OID_X520_COMMON_NAME, + 0, 0, NULL, &cn_size); + if (rc != GNUTLS_E_SHORT_MEMORY_BUFFER) { + warn_report("gnutls_x509_crt_get_dn_by_oid error #1: %s", + gnutls_strerror(rc)); + return rc; + } + + cn = g_malloc(cn_size); + rc = gnutls_x509_crt_get_dn_by_oid(signer, GNUTLS_OID_X520_COMMON_NAME, + 0, 0, cn, &cn_size); + if (rc < 0) { + warn_report("gnutls_x509_crt_get_dn_by_oid error #2: %s", + gnutls_strerror(rc)); + goto err; + } + + /* get root certificate fingerprint */ + rc = gnutls_x509_crt_get_fingerprint(root, AUTHVAR_DIGEST_ALGO, + fp, &fp_size); + if (rc < 0) { + warn_report("gnutls_x509_crt_get_fingerprint error: %s", + gnutls_strerror(rc)); + goto err; + } + + /* digest both items */ + rc = gnutls_hash_init(&hash, AUTHVAR_DIGEST_ALGO); + if (rc < 0) { + warn_report("gnutls_hash_init error: %s", + gnutls_strerror(rc)); + goto err; + } + rc = gnutls_hash(hash, cn, cn_size); + if (rc < 0) { + warn_report("gnutls_hash error: %s", + gnutls_strerror(rc)); + goto err; + } + rc = gnutls_hash(hash, fp, fp_size); + if (rc < 0) { + warn_report("gnutls_hash error: %s", + gnutls_strerror(rc)); + goto err; + } + gnutls_hash_deinit(hash, hash_digest); + + return 0; + +err: + g_free(cn); + return rc; +} + +/* + * uefi spec 2.9, section 8.2.2 + * + * For EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS variables which are + * NOT secure boot variables we should track the root certificate of the trust + * chain, and the subject CN of the signer certificate. + * + * So we'll go store a digest of these two items so we can verify this. Also + * create a gnutls_x509_trust_list_t with the root certificate, so + * gnutls_pkcs7_verify() will pass (assuming the signature is otherwise + * correct). + */ +static gnutls_x509_trust_list_t build_trust_list_authvar(gnutls_pkcs7_t pkcs7, + uint8_t *hash_digest) +{ + gnutls_datum_t signer_data = { 0 }; + gnutls_datum_t root_data = { 0 }; + gnutls_x509_crt_t signer = NULL; + gnutls_x509_crt_t root = NULL; + gnutls_x509_trust_list_t tlist = NULL; + int n, rc; + + n = gnutls_pkcs7_get_crt_count(pkcs7); + + /* first is signer certificate */ + rc = gnutls_pkcs7_get_crt_raw2(pkcs7, 0, &signer_data); + if (rc < 0) { + warn_report("gnutls_pkcs7_get_crt_raw2(0) error: %s", + gnutls_strerror(rc)); + goto done; + } + rc = gnutls_x509_crt_init(&signer); + if (rc < 0) { + warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc)); + goto done; + } + rc = gnutls_x509_crt_import(signer, &signer_data, GNUTLS_X509_FMT_DER); + if (rc < 0) { + warn_report("gnutls_x509_crt_import error: %s", + gnutls_strerror(rc)); + gnutls_x509_crt_deinit(signer); + goto done; + } + + /* last is root-of-trust certificate (can be identical to signer) */ + rc = gnutls_pkcs7_get_crt_raw2(pkcs7, n - 1, &root_data); + if (rc < 0) { + warn_report("gnutls_pkcs7_get_crt_raw2(%d) error: %s", + n - 1, gnutls_strerror(rc)); + goto done; + } + rc = gnutls_x509_crt_init(&root); + if (rc < 0) { + warn_report("gnutls_x509_crt_init error: %s", gnutls_strerror(rc)); + goto done; + } + rc = gnutls_x509_crt_import(root, &root_data, GNUTLS_X509_FMT_DER); + if (rc < 0) { + warn_report("gnutls_x509_crt_import error: %s", + gnutls_strerror(rc)); + goto done; + } + + /* calc digest for signer CN + root cert */ + rc = build_digest_authvar(signer, root, hash_digest); + if (rc < 0) { + goto done; + } + + /* add root to trust list */ + rc = gnutls_x509_trust_list_init(&tlist, 0); + if (rc < 0) { + warn_report("gnutls_x509_trust_list_init error: %s", + gnutls_strerror(rc)); + goto done; + } + rc = gnutls_x509_trust_list_add_cas(tlist, &root, 1, 0); + if (rc < 0) { + warn_report("gnutls_x509_crt_import error: %s", + gnutls_strerror(rc)); + gnutls_x509_trust_list_deinit(tlist, 1); + tlist = NULL; + goto done; + } else { + /* ownership passed to tlist */ + root = NULL; + } + +done: + if (signer_data.data) { + gnutls_free(signer_data.data); + } + if (root_data.data) { + gnutls_free(root_data.data); + } + if (signer) { + gnutls_x509_crt_deinit(signer); + } + if (root) { + gnutls_x509_crt_deinit(root); + } + return tlist; +} + +static void free_datum(gnutls_datum_t *ptr) +{ + if (!ptr) { + return; + } + g_free(ptr->data); + g_free(ptr); +} + +static void gnutls_log_stderr(int level, const char *msg) +{ + if (strncmp(msg, "ASSERT:", 7) == 0) { + return; + } + fprintf(stderr, " %d: %s", level, msg); +} + +/* + * pkcs7 signature verification (EFI_VARIABLE_AUTHENTICATION_2). + */ +efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist, + void **digest, uint32_t *digest_size, + mm_variable_access *va, void *data) +{ + gnutls_x509_trust_list_t tlist = NULL; + gnutls_datum_t *signed_data = NULL; + gnutls_datum_t *pkcs7_data = NULL; + gnutls_pkcs7_t pkcs7 = NULL; + efi_status status = EFI_SECURITY_VIOLATION; + int rc; + + if (0) { + /* gnutls debug logging */ + static bool first = true; + + if (first) { + first = false; + gnutls_global_set_log_function(gnutls_log_stderr); + gnutls_global_set_log_level(99); + } + } + + signed_data = build_signed_data(va, data); + pkcs7_data = build_pkcs7(data); + + rc = gnutls_pkcs7_init(&pkcs7); + if (rc < 0) { + warn_report("gnutls_pkcs7_init error: %s", gnutls_strerror(rc)); + goto out; + } + + rc = gnutls_pkcs7_import(pkcs7, pkcs7_data, GNUTLS_X509_FMT_DER); + if (rc < 0) { + warn_report("gnutls_pkcs7_import error: %s", gnutls_strerror(rc)); + goto out; + } + + if (siglist) { + /* secure boot variables */ + tlist = build_trust_list_sb(siglist); + } else if (digest && digest_size) { + /* other authenticated variables */ + *digest_size = AUTHVAR_DIGEST_SIZE; + *digest = g_malloc(*digest_size); + tlist = build_trust_list_authvar(pkcs7, *digest); + } else { + /* should not happen */ + goto out; + } + + rc = gnutls_pkcs7_verify(pkcs7, tlist, + NULL, 0, + 0, signed_data, + GNUTLS_VERIFY_DISABLE_TIME_CHECKS | + GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS); + if (rc < 0) { + warn_report("gnutls_pkcs7_verify error: %s", gnutls_strerror(rc)); + goto out; + } + + /* check passed */ + status = EFI_SUCCESS; + +out: + free_datum(signed_data); + free_datum(pkcs7_data); + if (tlist) { + gnutls_x509_trust_list_deinit(tlist, 1); + } + if (pkcs7) { + gnutls_pkcs7_deinit(pkcs7); + } + return status; +} diff --git a/hw/uefi/var-service-policy.c b/hw/uefi/var-service-policy.c new file mode 100644 index 00000000000..58da4adbeba --- /dev/null +++ b/hw/uefi/var-service-policy.c @@ -0,0 +1,370 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - VarCheckPolicyLibMmiHandler implementation + * + * variable policy specs: + * https://github.com/tianocore/edk2/blob/master/MdeModulePkg/Library/VariablePolicyLib/ReadMe.md + */ +#include "qemu/osdep.h" +#include "system/dma.h" +#include "migration/vmstate.h" + +#include "hw/uefi/var-service.h" +#include "hw/uefi/var-service-api.h" +#include "hw/uefi/var-service-edk2.h" + +#include "trace.h" + +static void calc_policy(uefi_var_policy *pol); + +static int uefi_var_policy_post_load(void *opaque, int version_id) +{ + uefi_var_policy *pol = opaque; + + calc_policy(pol); + return 0; +} + +const VMStateDescription vmstate_uefi_var_policy = { + .name = "uefi-var-policy", + .post_load = uefi_var_policy_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(entry_size, uefi_var_policy), + VMSTATE_VBUFFER_ALLOC_UINT32(entry, uefi_var_policy, + 0, NULL, entry_size), + VMSTATE_END_OF_LIST() + }, +}; + +static void print_policy_entry(variable_policy_entry *pe) +{ + uint16_t *name = (void *)pe + pe->offset_to_name; + + fprintf(stderr, "%s:\n", __func__); + + fprintf(stderr, " name ´"); + while (*name) { + fprintf(stderr, "%c", *name); + name++; + } + fprintf(stderr, "', version=%d.%d, size=%d\n", + pe->version >> 16, pe->version & 0xffff, pe->size); + + if (pe->min_size) { + fprintf(stderr, " size min=%d\n", pe->min_size); + } + if (pe->max_size != UINT32_MAX) { + fprintf(stderr, " size max=%u\n", pe->max_size); + } + if (pe->attributes_must_have) { + fprintf(stderr, " attr must=0x%x\n", pe->attributes_must_have); + } + if (pe->attributes_cant_have) { + fprintf(stderr, " attr cant=0x%x\n", pe->attributes_cant_have); + } + if (pe->lock_policy_type) { + fprintf(stderr, " lock policy type %d\n", pe->lock_policy_type); + } +} + +static gboolean wildcard_str_equal(uefi_var_policy *pol, + uefi_variable *var) +{ + return uefi_str_equal_ex(pol->name, pol->name_size, + var->name, var->name_size, + true); +} + +static uefi_var_policy *find_policy(uefi_vars_state *uv, QemuUUID guid, + uint16_t *name, uint64_t name_size) +{ + uefi_var_policy *pol; + + QTAILQ_FOREACH(pol, &uv->var_policies, next) { + if (!qemu_uuid_is_equal(&pol->entry->namespace, &guid)) { + continue; + } + if (!uefi_str_equal(pol->name, pol->name_size, + name, name_size)) { + continue; + } + return pol; + } + return NULL; +} + +static uefi_var_policy *wildcard_find_policy(uefi_vars_state *uv, + uefi_variable *var) +{ + uefi_var_policy *pol; + + QTAILQ_FOREACH(pol, &uv->var_policies, next) { + if (!qemu_uuid_is_equal(&pol->entry->namespace, &var->guid)) { + continue; + } + if (!wildcard_str_equal(pol, var)) { + continue; + } + return pol; + } + return NULL; +} + +static void calc_policy(uefi_var_policy *pol) +{ + variable_policy_entry *pe = pol->entry; + unsigned int i; + + pol->name = (void *)pol->entry + pe->offset_to_name; + pol->name_size = pe->size - pe->offset_to_name; + + for (i = 0; i < pol->name_size / 2; i++) { + if (pol->name[i] == '#') { + pol->hashmarks++; + } + } +} + +uefi_var_policy *uefi_vars_add_policy(uefi_vars_state *uv, + variable_policy_entry *pe) +{ + uefi_var_policy *pol, *p; + + pol = g_new0(uefi_var_policy, 1); + pol->entry = g_malloc(pe->size); + memcpy(pol->entry, pe, pe->size); + pol->entry_size = pe->size; + + calc_policy(pol); + + /* keep list sorted by priority, add to tail of priority group */ + QTAILQ_FOREACH(p, &uv->var_policies, next) { + if ((p->hashmarks > pol->hashmarks) || + (!p->name_size && pol->name_size)) { + QTAILQ_INSERT_BEFORE(p, pol, next); + return pol; + } + } + + QTAILQ_INSERT_TAIL(&uv->var_policies, pol, next); + return pol; +} + +efi_status uefi_vars_policy_check(uefi_vars_state *uv, + uefi_variable *var, + gboolean is_newvar) +{ + uefi_var_policy *pol; + variable_policy_entry *pe; + variable_lock_on_var_state *lvarstate; + uint16_t *lvarname; + size_t lvarnamesize; + uefi_variable *lvar; + + if (!uv->end_of_dxe) { + return EFI_SUCCESS; + } + + pol = wildcard_find_policy(uv, var); + if (!pol) { + return EFI_SUCCESS; + } + pe = pol->entry; + + uefi_trace_variable(__func__, var->guid, var->name, var->name_size); + print_policy_entry(pe); + + if ((var->attributes & pe->attributes_must_have) != pe->attributes_must_have) { + trace_uefi_vars_policy_deny("must-have-attr"); + return EFI_INVALID_PARAMETER; + } + if ((var->attributes & pe->attributes_cant_have) != 0) { + trace_uefi_vars_policy_deny("cant-have-attr"); + return EFI_INVALID_PARAMETER; + } + + if (var->data_size < pe->min_size) { + trace_uefi_vars_policy_deny("min-size"); + return EFI_INVALID_PARAMETER; + } + if (var->data_size > pe->max_size) { + trace_uefi_vars_policy_deny("max-size"); + return EFI_INVALID_PARAMETER; + } + + switch (pe->lock_policy_type) { + case VARIABLE_POLICY_TYPE_NO_LOCK: + break; + + case VARIABLE_POLICY_TYPE_LOCK_NOW: + trace_uefi_vars_policy_deny("lock-now"); + return EFI_WRITE_PROTECTED; + + case VARIABLE_POLICY_TYPE_LOCK_ON_CREATE: + if (!is_newvar) { + trace_uefi_vars_policy_deny("lock-on-create"); + return EFI_WRITE_PROTECTED; + } + break; + + case VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE: + lvarstate = (void *)pol->entry + sizeof(*pe); + lvarname = (void *)pol->entry + sizeof(*pe) + sizeof(*lvarstate); + lvarnamesize = pe->offset_to_name - sizeof(*pe) - sizeof(*lvarstate); + + uefi_trace_variable(__func__, lvarstate->namespace, + lvarname, lvarnamesize); + lvar = uefi_vars_find_variable(uv, lvarstate->namespace, + lvarname, lvarnamesize); + if (lvar && lvar->data_size == 1) { + uint8_t *value = lvar->data; + if (lvarstate->value == *value) { + return EFI_WRITE_PROTECTED; + } + } + break; + } + + return EFI_SUCCESS; +} + +void uefi_vars_policies_clear(uefi_vars_state *uv) +{ + uefi_var_policy *pol; + + while (!QTAILQ_EMPTY(&uv->var_policies)) { + pol = QTAILQ_FIRST(&uv->var_policies); + QTAILQ_REMOVE(&uv->var_policies, pol, next); + g_free(pol->entry); + g_free(pol); + } +} + +static size_t uefi_vars_mm_policy_error(mm_header *mhdr, + mm_check_policy *mchk, + uint64_t status) +{ + mchk->result = status; + return sizeof(*mchk); +} + +static uint32_t uefi_vars_mm_check_policy_is_enabled(uefi_vars_state *uv, + mm_header *mhdr, + mm_check_policy *mchk, + void *func) +{ + mm_check_policy_is_enabled *mpar = func; + size_t length; + + length = sizeof(*mchk) + sizeof(*mpar); + if (mhdr->length < length) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + + mpar->state = TRUE; + mchk->result = EFI_SUCCESS; + return sizeof(*mchk); +} + +static uint32_t uefi_vars_mm_check_policy_register(uefi_vars_state *uv, + mm_header *mhdr, + mm_check_policy *mchk, + void *func) +{ + variable_policy_entry *pe = func; + uefi_var_policy *pol; + uint64_t length; + + if (uadd64_overflow(sizeof(*mchk), pe->size, &length)) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + if (pe->size < sizeof(*pe)) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + if (pe->offset_to_name < sizeof(*pe)) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + + if (pe->lock_policy_type == VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE && + pe->offset_to_name < sizeof(*pe) + sizeof(variable_lock_on_var_state)) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + + /* check space for minimum string length */ + if (pe->size < (size_t)pe->offset_to_name) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_BAD_BUFFER_SIZE); + } + + if (!uefi_str_is_valid((void *)pe + pe->offset_to_name, + pe->size - pe->offset_to_name, + false)) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_INVALID_PARAMETER); + } + + pol = find_policy(uv, pe->namespace, + (void *)pe + pe->offset_to_name, + pe->size - pe->offset_to_name); + if (pol) { + return uefi_vars_mm_policy_error(mhdr, mchk, EFI_ALREADY_STARTED); + } + + uefi_vars_add_policy(uv, pe); + + mchk->result = EFI_SUCCESS; + return sizeof(*mchk); +} + +uint32_t uefi_vars_mm_check_policy_proto(uefi_vars_state *uv) +{ + static const char *fnames[] = { + "zero", + "disable", + "is-enabled", + "register", + "dump", + "lock", + }; + const char *fname; + mm_header *mhdr = (mm_header *) uv->buffer; + mm_check_policy *mchk = (mm_check_policy *) (uv->buffer + sizeof(*mhdr)); + void *func = (uv->buffer + sizeof(*mhdr) + sizeof(*mchk)); + + if (mhdr->length < sizeof(*mchk)) { + return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE; + } + + fname = mchk->command < ARRAY_SIZE(fnames) + ? fnames[mchk->command] + : "unknown"; + trace_uefi_vars_policy_cmd(fname); + + switch (mchk->command) { + case VAR_CHECK_POLICY_COMMAND_DISABLE: + mchk->result = EFI_UNSUPPORTED; + break; + case VAR_CHECK_POLICY_COMMAND_IS_ENABLED: + uefi_vars_mm_check_policy_is_enabled(uv, mhdr, mchk, func); + break; + case VAR_CHECK_POLICY_COMMAND_REGISTER: + if (uv->policy_locked) { + mchk->result = EFI_WRITE_PROTECTED; + } else { + uefi_vars_mm_check_policy_register(uv, mhdr, mchk, func); + } + break; + case VAR_CHECK_POLICY_COMMAND_LOCK: + uv->policy_locked = true; + mchk->result = EFI_SUCCESS; + break; + default: + mchk->result = EFI_UNSUPPORTED; + break; + } + + uefi_trace_status(__func__, mchk->result); + return UEFI_VARS_STS_SUCCESS; +} diff --git a/hw/uefi/var-service-siglist.c b/hw/uefi/var-service-siglist.c new file mode 100644 index 00000000000..8948f1b7847 --- /dev/null +++ b/hw/uefi/var-service-siglist.c @@ -0,0 +1,212 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - parse and generate efi signature databases + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +/* + * Add x509 certificate to list (with duplicate check). + */ +static void uefi_vars_siglist_add_x509(uefi_vars_siglist *siglist, + QemuUUID *owner, + void *data, uint64_t size) +{ + uefi_vars_cert *c; + + QTAILQ_FOREACH(c, &siglist->x509, next) { + if (c->size != size) { + continue; + } + if (memcmp(c->data, data, size) != 0) { + continue; + } + return; + } + + c = g_malloc(sizeof(*c) + size); + c->owner = *owner; + c->size = size; + memcpy(c->data, data, size); + QTAILQ_INSERT_TAIL(&siglist->x509, c, next); +} + +/* + * Add sha256 hash to list (with duplicate check). + */ +static void uefi_vars_siglist_add_sha256(uefi_vars_siglist *siglist, + QemuUUID *owner, + void *data) +{ + uefi_vars_hash *h; + + QTAILQ_FOREACH(h, &siglist->sha256, next) { + if (memcmp(h->data, data, 32) != 0) { + continue; + } + return; + } + + h = g_malloc(sizeof(*h) + 32); + h->owner = *owner; + memcpy(h->data, data, 32); + QTAILQ_INSERT_TAIL(&siglist->sha256, h, next); +} + +void uefi_vars_siglist_init(uefi_vars_siglist *siglist) +{ + memset(siglist, 0, sizeof(*siglist)); + QTAILQ_INIT(&siglist->x509); + QTAILQ_INIT(&siglist->sha256); +} + +void uefi_vars_siglist_free(uefi_vars_siglist *siglist) +{ + uefi_vars_cert *c, *cs; + uefi_vars_hash *h, *hs; + + QTAILQ_FOREACH_SAFE(c, &siglist->x509, next, cs) { + QTAILQ_REMOVE(&siglist->x509, c, next); + g_free(c); + } + QTAILQ_FOREACH_SAFE(h, &siglist->sha256, next, hs) { + QTAILQ_REMOVE(&siglist->sha256, h, next); + g_free(h); + } +} + +/* + * Parse UEFI signature list. + */ +void uefi_vars_siglist_parse(uefi_vars_siglist *siglist, + void *data, uint64_t size) +{ + efi_siglist *efilist; + uint64_t start; + + while (size) { + if (size < sizeof(*efilist)) { + break; + } + efilist = data; + if (size < efilist->siglist_size) { + break; + } + + if (uadd64_overflow(sizeof(*efilist), efilist->header_size, &start)) { + break; + } + if (efilist->sig_size <= sizeof(QemuUUID)) { + break; + } + + if (qemu_uuid_is_equal(&efilist->guid_type, &EfiCertX509Guid)) { + if (start + efilist->sig_size != efilist->siglist_size) { + break; + } + uefi_vars_siglist_add_x509(siglist, + (QemuUUID *)(data + start), + data + start + sizeof(QemuUUID), + efilist->sig_size - sizeof(QemuUUID)); + + } else if (qemu_uuid_is_equal(&efilist->guid_type, &EfiCertSha256Guid)) { + if (efilist->sig_size != sizeof(QemuUUID) + 32) { + break; + } + if (start + efilist->sig_size > efilist->siglist_size) { + break; + } + while (start <= efilist->siglist_size - efilist->sig_size) { + uefi_vars_siglist_add_sha256(siglist, + (QemuUUID *)(data + start), + data + start + sizeof(QemuUUID)); + start += efilist->sig_size; + } + + } else { + QemuUUID be = qemu_uuid_bswap(efilist->guid_type); + char *str_uuid = qemu_uuid_unparse_strdup(&be); + warn_report("%s: unknown type (%s)", __func__, str_uuid); + g_free(str_uuid); + } + + data += efilist->siglist_size; + size -= efilist->siglist_size; + } +} + +uint64_t uefi_vars_siglist_blob_size(uefi_vars_siglist *siglist) +{ + uefi_vars_cert *c; + uefi_vars_hash *h; + uint64_t size = 0; + + QTAILQ_FOREACH(c, &siglist->x509, next) { + size += sizeof(efi_siglist) + sizeof(QemuUUID) + c->size; + } + + if (!QTAILQ_EMPTY(&siglist->sha256)) { + size += sizeof(efi_siglist); + QTAILQ_FOREACH(h, &siglist->sha256, next) { + size += sizeof(QemuUUID) + 32; + } + } + + return size; +} + +/* + * Generate UEFI signature list. + */ +void uefi_vars_siglist_blob_generate(uefi_vars_siglist *siglist, + void *data, uint64_t size) +{ + uefi_vars_cert *c; + uefi_vars_hash *h; + efi_siglist *efilist; + uint64_t pos = 0, start; + uint32_t i; + + QTAILQ_FOREACH(c, &siglist->x509, next) { + efilist = data + pos; + efilist->guid_type = EfiCertX509Guid; + efilist->sig_size = sizeof(QemuUUID) + c->size; + efilist->header_size = 0; + + start = pos + sizeof(efi_siglist); + memcpy(data + start, + &c->owner, sizeof(QemuUUID)); + memcpy(data + start + sizeof(QemuUUID), + c->data, c->size); + + efilist->siglist_size = sizeof(efi_siglist) + efilist->sig_size; + pos += efilist->siglist_size; + } + + if (!QTAILQ_EMPTY(&siglist->sha256)) { + efilist = data + pos; + efilist->guid_type = EfiCertSha256Guid; + efilist->sig_size = sizeof(QemuUUID) + 32; + efilist->header_size = 0; + + i = 0; + start = pos + sizeof(efi_siglist); + QTAILQ_FOREACH(h, &siglist->sha256, next) { + memcpy(data + start + efilist->sig_size * i, + &h->owner, sizeof(QemuUUID)); + memcpy(data + start + efilist->sig_size * i + sizeof(QemuUUID), + h->data, 32); + i++; + } + + efilist->siglist_size = sizeof(efi_siglist) + efilist->sig_size * i; + pos += efilist->siglist_size; + } + + assert(pos == size); +} diff --git a/hw/uefi/var-service-sysbus.c b/hw/uefi/var-service-sysbus.c new file mode 100644 index 00000000000..a5aa218e260 --- /dev/null +++ b/hw/uefi/var-service-sysbus.c @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - sysbus variant. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" + +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" + +#include "hw/uefi/hardware-info.h" +#include "hw/uefi/var-service.h" +#include "hw/uefi/var-service-api.h" + +OBJECT_DECLARE_SIMPLE_TYPE(uefi_vars_sysbus_state, UEFI_VARS_SYSBUS) + +struct uefi_vars_sysbus_state { + SysBusDevice parent_obj; + struct uefi_vars_state state; +}; + +static const VMStateDescription vmstate_uefi_vars_sysbus = { + .name = TYPE_UEFI_VARS_SYSBUS, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, uefi_vars_sysbus_state, 0, + vmstate_uefi_vars, uefi_vars_state), + VMSTATE_END_OF_LIST() + } +}; + +static const Property uefi_vars_sysbus_properties[] = { + DEFINE_PROP_SIZE("size", uefi_vars_sysbus_state, state.max_storage, + 256 * 1024), + DEFINE_PROP_STRING("jsonfile", uefi_vars_sysbus_state, state.jsonfile), + DEFINE_PROP_BOOL("force-secure-boot", uefi_vars_sysbus_state, + state.force_secure_boot, false), + DEFINE_PROP_BOOL("disable-custom-mode", uefi_vars_sysbus_state, + state.disable_custom_mode, false), + DEFINE_PROP_BOOL("use-pio", uefi_vars_sysbus_state, + state.use_pio, false), +}; + +static void uefi_vars_sysbus_init(Object *obj) +{ + uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(obj); + + uefi_vars_init(obj, &uv->state); +} + +static void uefi_vars_sysbus_reset(DeviceState *dev) +{ + uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(dev); + + uefi_vars_hard_reset(&uv->state); +} + +static void uefi_vars_sysbus_realize(DeviceState *dev, Error **errp) +{ + uefi_vars_sysbus_state *uv = UEFI_VARS_SYSBUS(dev); + SysBusDevice *sysbus = SYS_BUS_DEVICE(dev); + + sysbus_init_mmio(sysbus, &uv->state.mr); + uefi_vars_realize(&uv->state, errp); +} + +static void uefi_vars_sysbus_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = uefi_vars_sysbus_realize; + dc->vmsd = &vmstate_uefi_vars_sysbus; + dc->user_creatable = true; + device_class_set_legacy_reset(dc, uefi_vars_sysbus_reset); + device_class_set_props(dc, uefi_vars_sysbus_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +/* generic: hardware discovery via FDT */ +static const TypeInfo uefi_vars_sysbus_info = { + .name = TYPE_UEFI_VARS_SYSBUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(uefi_vars_sysbus_state), + .instance_init = uefi_vars_sysbus_init, + .class_init = uefi_vars_sysbus_class_init, +}; +module_obj(TYPE_UEFI_VARS_SYSBUS); + +static void uefi_vars_x64_realize(DeviceState *dev, Error **errp) +{ + HARDWARE_INFO_SIMPLE_DEVICE hwinfo = { + .mmio_address = cpu_to_le64(0xfef10000), + }; + SysBusDevice *sysbus = SYS_BUS_DEVICE(dev); + + uefi_vars_sysbus_realize(dev, errp); + + hardware_info_register(HardwareInfoQemuUefiVars, + &hwinfo, sizeof(hwinfo)); + sysbus_mmio_map(sysbus, 0, hwinfo.mmio_address); +} + +static void uefi_vars_x64_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = uefi_vars_x64_realize; +} + +/* x64: hardware discovery via etc/hardware-info fw_cfg */ +static const TypeInfo uefi_vars_x64_info = { + .name = TYPE_UEFI_VARS_X64, + .parent = TYPE_UEFI_VARS_SYSBUS, + .class_init = uefi_vars_x64_class_init, +}; +module_obj(TYPE_UEFI_VARS_X64); + +static void uefi_vars_sysbus_register_types(void) +{ + type_register_static(&uefi_vars_sysbus_info); + type_register_static(&uefi_vars_x64_info); +} + +type_init(uefi_vars_sysbus_register_types) diff --git a/hw/uefi/var-service-utils.c b/hw/uefi/var-service-utils.c new file mode 100644 index 00000000000..258013f436a --- /dev/null +++ b/hw/uefi/var-service-utils.c @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - helper functions for ucs2 strings and tracing + */ +#include "qemu/osdep.h" +#include "system/dma.h" + +#include "hw/uefi/var-service.h" + +#include "trace.h" + +/* ------------------------------------------------------------------ */ + +/* + * string helper functions. + * + * Most of the time uefi ucs2 strings are NULL-terminated, except + * sometimes when they are not (for example in variable policies). + */ + +gboolean uefi_str_is_valid(const uint16_t *str, size_t len, + gboolean must_be_null_terminated) +{ + size_t pos = 0; + + for (;;) { + if (pos == len) { + if (must_be_null_terminated) { + return false; + } else { + return true; + } + } + switch (str[pos]) { + case 0: + /* end of string */ + return true; + case 0xd800 ... 0xdfff: + /* reject surrogates */ + return false; + default: + /* char is good, check next */ + break; + } + pos++; + } +} + +size_t uefi_strlen(const uint16_t *str, size_t len) +{ + size_t pos = 0; + + for (;;) { + if (pos == len) { + return pos; + } + if (str[pos] == 0) { + return pos; + } + pos++; + } +} + +gboolean uefi_str_equal_ex(const uint16_t *a, size_t alen, + const uint16_t *b, size_t blen, + gboolean wildcards_in_a) +{ + size_t pos = 0; + + alen = alen / 2; + blen = blen / 2; + for (;;) { + if (pos == alen && pos == blen) { + return true; + } + if (pos == alen && b[pos] == 0) { + return true; + } + if (pos == blen && a[pos] == 0) { + return true; + } + if (pos == alen || pos == blen) { + return false; + } + if (a[pos] == 0 && b[pos] == 0) { + return true; + } + + if (wildcards_in_a && a[pos] == '#') { + if (!isxdigit(b[pos])) { + return false; + } + } else { + if (a[pos] != b[pos]) { + return false; + } + } + pos++; + } +} + +gboolean uefi_str_equal(const uint16_t *a, size_t alen, + const uint16_t *b, size_t blen) +{ + return uefi_str_equal_ex(a, alen, b, blen, false); +} + +char *uefi_ucs2_to_ascii(const uint16_t *ucs2, uint64_t ucs2_size) +{ + char *str = g_malloc0(ucs2_size / 2 + 1); + int i; + + for (i = 0; i * 2 < ucs2_size; i++) { + if (ucs2[i] == 0) { + break; + } + if (ucs2[i] < 128) { + str[i] = ucs2[i]; + } else { + str[i] = '?'; + } + } + str[i] = 0; + return str; +} + +/* ------------------------------------------------------------------ */ +/* time helper functions */ + +int uefi_time_compare(efi_time *a, efi_time *b) +{ + if (a->year < b->year) { + return -1; + } + if (a->year > b->year) { + return 1; + } + + if (a->month < b->month) { + return -1; + } + if (a->month > b->month) { + return 1; + } + + if (a->day < b->day) { + return -1; + } + if (a->day > b->day) { + return 1; + } + + if (a->hour < b->hour) { + return -1; + } + if (a->hour > b->hour) { + return 1; + } + + if (a->minute < b->minute) { + return -1; + } + if (a->minute > b->minute) { + return 1; + } + + if (a->second < b->second) { + return -1; + } + if (a->second > b->second) { + return 1; + } + + if (a->nanosecond < b->nanosecond) { + return -1; + } + if (a->nanosecond > b->nanosecond) { + return 1; + } + + return 0; +} + +/* ------------------------------------------------------------------ */ +/* tracing helper functions */ + +void uefi_trace_variable(const char *action, QemuUUID guid, + const uint16_t *name, uint64_t name_size) +{ + QemuUUID be = qemu_uuid_bswap(guid); + char *str_uuid = qemu_uuid_unparse_strdup(&be); + char *str_name = uefi_ucs2_to_ascii(name, name_size); + + trace_uefi_variable(action, str_name, name_size, str_uuid); + + g_free(str_name); + g_free(str_uuid); +} + +void uefi_trace_status(const char *action, efi_status status) +{ + switch (status) { + case EFI_SUCCESS: + trace_uefi_status(action, "success"); + break; + case EFI_INVALID_PARAMETER: + trace_uefi_status(action, "invalid parameter"); + break; + case EFI_UNSUPPORTED: + trace_uefi_status(action, "unsupported"); + break; + case EFI_BAD_BUFFER_SIZE: + trace_uefi_status(action, "bad buffer size"); + break; + case EFI_BUFFER_TOO_SMALL: + trace_uefi_status(action, "buffer too small"); + break; + case EFI_WRITE_PROTECTED: + trace_uefi_status(action, "write protected"); + break; + case EFI_OUT_OF_RESOURCES: + trace_uefi_status(action, "out of resources"); + break; + case EFI_NOT_FOUND: + trace_uefi_status(action, "not found"); + break; + case EFI_ACCESS_DENIED: + trace_uefi_status(action, "access denied"); + break; + case EFI_ALREADY_STARTED: + trace_uefi_status(action, "already started"); + break; + case EFI_SECURITY_VIOLATION: + trace_uefi_status(action, "security violation"); + break; + default: + trace_uefi_status(action, "unknown error"); + break; + } +} diff --git a/hw/uefi/var-service-vars.c b/hw/uefi/var-service-vars.c new file mode 100644 index 00000000000..8533533ea5c --- /dev/null +++ b/hw/uefi/var-service-vars.c @@ -0,0 +1,730 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi vars device - EfiSmmVariableProtocol implementation + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "system/dma.h" +#include "migration/vmstate.h" + +#include "hw/uefi/var-service.h" +#include "hw/uefi/var-service-api.h" +#include "hw/uefi/var-service-edk2.h" + +#include "trace.h" + +#define EFI_VARIABLE_ATTRIBUTE_SUPPORTED \ + (EFI_VARIABLE_NON_VOLATILE | \ + EFI_VARIABLE_BOOTSERVICE_ACCESS | \ + EFI_VARIABLE_RUNTIME_ACCESS | \ + EFI_VARIABLE_HARDWARE_ERROR_RECORD | \ + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_APPEND_WRITE) + + +const VMStateDescription vmstate_uefi_time = { + .name = "uefi-time", + .fields = (VMStateField[]) { + VMSTATE_UINT16(year, efi_time), + VMSTATE_UINT8(month, efi_time), + VMSTATE_UINT8(day, efi_time), + VMSTATE_UINT8(hour, efi_time), + VMSTATE_UINT8(minute, efi_time), + VMSTATE_UINT8(second, efi_time), + VMSTATE_UINT32(nanosecond, efi_time), + VMSTATE_END_OF_LIST() + }, +}; + +const VMStateDescription vmstate_uefi_variable = { + .name = "uefi-variable", + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY_V(guid.data, uefi_variable, sizeof(QemuUUID), 0), + VMSTATE_UINT32(name_size, uefi_variable), + VMSTATE_UINT32(data_size, uefi_variable), + VMSTATE_UINT32(attributes, uefi_variable), + VMSTATE_VBUFFER_ALLOC_UINT32(name, uefi_variable, 0, NULL, name_size), + VMSTATE_VBUFFER_ALLOC_UINT32(data, uefi_variable, 0, NULL, data_size), + VMSTATE_STRUCT(time, uefi_variable, 0, vmstate_uefi_time, efi_time), + VMSTATE_END_OF_LIST() + }, +}; + +uefi_variable *uefi_vars_find_variable(uefi_vars_state *uv, QemuUUID guid, + const uint16_t *name, uint64_t name_size) +{ + uefi_variable *var; + + QTAILQ_FOREACH(var, &uv->variables, next) { + if (!uefi_str_equal(var->name, var->name_size, + name, name_size)) { + continue; + } + if (!qemu_uuid_is_equal(&var->guid, &guid)) { + continue; + } + if (!var->data_size) { + /* in process of being created/updated */ + continue; + } + return var; + } + return NULL; +} + +static uefi_variable *add_variable(uefi_vars_state *uv, QemuUUID guid, + const uint16_t *name, uint64_t name_size, + uint32_t attributes) +{ + uefi_variable *var; + + var = g_new0(uefi_variable, 1); + var->guid = guid; + var->name = g_malloc(name_size); + memcpy(var->name, name, name_size); + var->name_size = name_size; + var->attributes = attributes; + + var->attributes &= ~EFI_VARIABLE_APPEND_WRITE; + + QTAILQ_INSERT_TAIL(&uv->variables, var, next); + return var; +} + +static void del_variable(uefi_vars_state *uv, uefi_variable *var) +{ + if (!var) { + return; + } + + QTAILQ_REMOVE(&uv->variables, var, next); + g_free(var->data); + g_free(var->name); + g_free(var->digest); + g_free(var); +} + +static size_t variable_size(uefi_variable *var) +{ + size_t size; + + size = sizeof(*var); + size += var->name_size; + size += var->data_size; + size += var->digest_size; + return size; +} + +void uefi_vars_set_variable(uefi_vars_state *uv, QemuUUID guid, + const uint16_t *name, uint64_t name_size, + uint32_t attributes, + void *data, uint64_t data_size) +{ + uefi_variable *old_var, *new_var; + + uefi_trace_variable(__func__, guid, name, name_size); + + old_var = uefi_vars_find_variable(uv, guid, name, name_size); + if (old_var) { + uv->used_storage -= variable_size(old_var); + del_variable(uv, old_var); + } + + new_var = add_variable(uv, guid, name, name_size, attributes); + new_var->data = g_malloc(data_size); + new_var->data_size = data_size; + memcpy(new_var->data, data, data_size); + uv->used_storage += variable_size(new_var); +} + +void uefi_vars_clear_volatile(uefi_vars_state *uv) +{ + uefi_variable *var, *n; + + QTAILQ_FOREACH_SAFE(var, &uv->variables, next, n) { + if (var->attributes & EFI_VARIABLE_NON_VOLATILE) { + continue; + } + uv->used_storage -= variable_size(var); + del_variable(uv, var); + } +} + +void uefi_vars_clear_all(uefi_vars_state *uv) +{ + uefi_variable *var, *n; + + QTAILQ_FOREACH_SAFE(var, &uv->variables, next, n) { + del_variable(uv, var); + } + uv->used_storage = 0; +} + +void uefi_vars_update_storage(uefi_vars_state *uv) +{ + uefi_variable *var; + + uv->used_storage = 0; + QTAILQ_FOREACH(var, &uv->variables, next) { + uv->used_storage += variable_size(var); + } +} + +static gboolean check_access(uefi_vars_state *uv, uefi_variable *var) +{ + if (!uv->exit_boot_service) { + if (!(var->attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)) { + return false; + } + } else { + if (!(var->attributes & EFI_VARIABLE_RUNTIME_ACCESS)) { + return false; + } + } + return true; +} + +static efi_status check_update(uefi_vars_state *uv, uefi_variable *old_var, + uefi_variable *new_var) +{ + efi_status status; + + if (old_var) { + if (!check_access(uv, old_var)) { + return EFI_ACCESS_DENIED; + } + } + + if (new_var) { + if (new_var->attributes & ~EFI_VARIABLE_ATTRIBUTE_SUPPORTED) { + return EFI_UNSUPPORTED; + } + if (!check_access(uv, new_var)) { + return EFI_ACCESS_DENIED; + } + } + + if (old_var && new_var) { + if (old_var->attributes != new_var->attributes) { + return EFI_INVALID_PARAMETER; + } + } + + if (new_var) { + /* create + update */ + status = uefi_vars_policy_check(uv, new_var, old_var == NULL); + } else { + /* delete */ + g_assert(old_var); + status = uefi_vars_policy_check(uv, old_var, false); + } + if (status != EFI_SUCCESS) { + return status; + } + + status = uefi_vars_check_secure_boot(uv, new_var ?: old_var); + if (status != EFI_SUCCESS) { + return status; + } + + return EFI_SUCCESS; +} + +static void append_write(uefi_variable *old_var, + uefi_variable *new_var) +{ + uefi_vars_siglist siglist; + uint64_t size; + void *data; + + uefi_vars_siglist_init(&siglist); + uefi_vars_siglist_parse(&siglist, old_var->data, old_var->data_size); + uefi_vars_siglist_parse(&siglist, new_var->data, new_var->data_size); + + size = uefi_vars_siglist_blob_size(&siglist); + data = g_malloc(size); + uefi_vars_siglist_blob_generate(&siglist, data, size); + + g_free(new_var->data); + new_var->data = data; + new_var->data_size = size; + + uefi_vars_siglist_free(&siglist); +} + +static size_t uefi_vars_mm_error(mm_header *mhdr, mm_variable *mvar, + uint64_t status) +{ + mvar->status = status; + return sizeof(*mvar); +} + +static size_t uefi_vars_mm_get_variable(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_variable_access *va = func; + uint16_t *name; + void *data; + uefi_variable *var; + uint64_t length; + + length = sizeof(*mvar) + sizeof(*va); + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + if (va->name_size > uv->max_storage || + va->data_size > uv->max_storage) { + return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES); + } + + name = func + sizeof(*va); + if (uadd64_overflow(length, va->name_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + if (!uefi_str_is_valid(name, va->name_size, true)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER); + } + + uefi_trace_variable(__func__, va->guid, name, va->name_size); + + var = uefi_vars_find_variable(uv, va->guid, name, va->name_size); + if (!var) { + return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND); + } + + /* check permissions etc. */ + if (!check_access(uv, var)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_ACCESS_DENIED); + } + + data = func + sizeof(*va) + va->name_size; + if (uadd64_overflow(length, va->data_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (uv->buf_size < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + va->attributes = var->attributes; + if (va->data_size < var->data_size) { + va->data_size = var->data_size; + length -= va->data_size; + mvar->status = EFI_BUFFER_TOO_SMALL; + } else { + va->data_size = var->data_size; + memcpy(data, var->data, var->data_size); + mvar->status = EFI_SUCCESS; + } + return length; +} + +static size_t +uefi_vars_mm_get_next_variable(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_next_variable *nv = func; + uefi_variable *var; + uint16_t *name; + uint64_t length; + + length = sizeof(*mvar) + sizeof(*nv); + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + if (nv->name_size > uv->max_storage) { + return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES); + } + + name = func + sizeof(*nv); + if (uadd64_overflow(length, nv->name_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + if (!uefi_str_is_valid(name, nv->name_size, true)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER); + } + + if (uefi_strlen(name, nv->name_size) == 0) { + /* empty string -> first */ + var = QTAILQ_FIRST(&uv->variables); + while (var && !check_access(uv, var)) { + var = QTAILQ_NEXT(var, next); + } + if (!var) { + return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND); + } + } else { + var = uefi_vars_find_variable(uv, nv->guid, name, nv->name_size); + if (!var) { + return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER); + } + do { + var = QTAILQ_NEXT(var, next); + } while (var && !check_access(uv, var)); + if (!var) { + return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND); + } + } + + length = sizeof(*mvar) + sizeof(*nv) + var->name_size; + if (uv->buf_size < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + nv->guid = var->guid; + nv->name_size = var->name_size; + memcpy(name, var->name, var->name_size); + mvar->status = EFI_SUCCESS; + return length; +} + +static bool uefi_vars_mm_digest_compare(uefi_variable *old_var, + uefi_variable *new_var) +{ + if (!old_var->digest || + !new_var->digest || + !old_var->digest_size || + !new_var->digest_size) { + /* should not happen */ + trace_uefi_vars_security_violation("inconsistent authvar digest state"); + return false; + } + if (old_var->digest_size != new_var->digest_size) { + trace_uefi_vars_security_violation("authvar digest size mismatch"); + return false; + } + if (memcmp(old_var->digest, new_var->digest, + old_var->digest_size) != 0) { + trace_uefi_vars_security_violation("authvar digest data mismatch"); + return false; + } + return true; +} + +static size_t uefi_vars_mm_set_variable(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_variable_access *va = func; + uint32_t attributes = 0; + uint16_t *name; + void *data; + uefi_variable *old_var, *new_var; + uint64_t length; + size_t new_storage; + efi_status status; + + length = sizeof(*mvar) + sizeof(*va); + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + if (va->name_size > uv->max_storage || + va->data_size > uv->max_storage) { + return uefi_vars_mm_error(mhdr, mvar, EFI_OUT_OF_RESOURCES); + } + + name = func + sizeof(*va); + if (uadd64_overflow(length, va->name_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + data = func + sizeof(*va) + va->name_size; + if (uadd64_overflow(length, va->data_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + g_assert(va->name_size < G_MAXUINT32); + g_assert(va->data_size < G_MAXUINT32); + + if (!uefi_str_is_valid(name, va->name_size, true)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_INVALID_PARAMETER); + } + + uefi_trace_variable(__func__, va->guid, name, va->name_size); + + old_var = uefi_vars_find_variable(uv, va->guid, name, va->name_size); + if (va->data_size) { + new_var = add_variable(uv, va->guid, name, va->name_size, + va->attributes); + if (va->attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) { + /* not implemented (deprecated in uefi spec) */ + warn_report("%s: AUTHENTICATED_WRITE_ACCESS", __func__); + mvar->status = EFI_UNSUPPORTED; + goto rollback; + } else if (va->attributes & + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) { + status = uefi_vars_check_auth_2(uv, new_var, va, data); + if (status != EFI_SUCCESS) { + mvar->status = status; + goto rollback; + } + if (old_var && new_var) { + if (uefi_time_compare(&old_var->time, &new_var->time) > 0) { + trace_uefi_vars_security_violation("time check failed"); + mvar->status = EFI_SECURITY_VIOLATION; + goto rollback; + } + if (old_var->digest_size || new_var->digest_size) { + if (!uefi_vars_mm_digest_compare(old_var, new_var)) { + mvar->status = EFI_SECURITY_VIOLATION; + goto rollback; + } + } + } + } else { + new_var->data = g_malloc(va->data_size); + memcpy(new_var->data, data, va->data_size); + new_var->data_size = va->data_size; + } + if (!new_var->data) { + /* we land here when deleting authenticated variables */ + del_variable(uv, new_var); + new_var = NULL; + } + } else { + new_var = NULL; + } + + if (!old_var && !new_var) { + /* delete non-existing variable -> nothing to do */ + mvar->status = EFI_SUCCESS; + return sizeof(*mvar); + } + + /* check permissions etc. */ + status = check_update(uv, old_var, new_var); + if (status != EFI_SUCCESS) { + mvar->status = status; + goto rollback; + } + + if (va->attributes & EFI_VARIABLE_APPEND_WRITE && old_var && new_var) { + /* merge signature databases */ + if (!uefi_vars_is_sb_any(new_var)) { + mvar->status = EFI_UNSUPPORTED; + goto rollback; + } + append_write(old_var, new_var); + } + + /* check storage space */ + new_storage = uv->used_storage; + if (old_var) { + new_storage -= variable_size(old_var); + } + if (new_var) { + new_storage += variable_size(new_var); + } + if (new_storage > uv->max_storage) { + mvar->status = EFI_OUT_OF_RESOURCES; + goto rollback; + } + + attributes = new_var + ? new_var->attributes + : old_var->attributes; + + /* all good, commit */ + del_variable(uv, old_var); + uv->used_storage = new_storage; + + if (attributes & EFI_VARIABLE_NON_VOLATILE) { + uefi_vars_json_save(uv); + } + + if (new_var && uefi_vars_is_sb_pk(new_var)) { + uefi_vars_auth_init(uv); + } + + mvar->status = EFI_SUCCESS; + return sizeof(*mvar); + +rollback: + del_variable(uv, new_var); + return sizeof(*mvar); +} + +static size_t uefi_vars_mm_variable_info(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_variable_info *vi = func; + uint64_t length; + + length = sizeof(*mvar) + sizeof(*vi); + if (uv->buf_size < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + vi->max_storage_size = uv->max_storage; + vi->free_storage_size = uv->max_storage - uv->used_storage; + vi->max_variable_size = uv->max_storage >> 2; + vi->attributes = 0; + + mvar->status = EFI_SUCCESS; + return length; +} + +static size_t +uefi_vars_mm_get_payload_size(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_get_payload_size *ps = func; + uint64_t length; + + length = sizeof(*mvar) + sizeof(*ps); + if (uv->buf_size < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + ps->payload_size = uv->buf_size; + mvar->status = EFI_SUCCESS; + return length; +} + +static size_t +uefi_vars_mm_lock_variable(uefi_vars_state *uv, mm_header *mhdr, + mm_variable *mvar, void *func) +{ + mm_lock_variable *lv = func; + variable_policy_entry *pe; + uint16_t *name, *dest; + uint64_t length; + + length = sizeof(*mvar) + sizeof(*lv); + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + name = func + sizeof(*lv); + if (uadd64_overflow(length, lv->name_size, &length)) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + if (mhdr->length < length) { + return uefi_vars_mm_error(mhdr, mvar, EFI_BAD_BUFFER_SIZE); + } + + uefi_trace_variable(__func__, lv->guid, name, lv->name_size); + + pe = g_malloc0(sizeof(*pe) + lv->name_size); + pe->version = VARIABLE_POLICY_ENTRY_REVISION; + pe->size = sizeof(*pe) + lv->name_size; + pe->offset_to_name = sizeof(*pe); + pe->namespace = lv->guid; + pe->min_size = 0; + pe->max_size = UINT32_MAX; + pe->attributes_must_have = 0; + pe->attributes_cant_have = 0; + pe->lock_policy_type = VARIABLE_POLICY_TYPE_LOCK_NOW; + + dest = (void *)pe + pe->offset_to_name; + memcpy(dest, name, lv->name_size); + + uefi_vars_add_policy(uv, pe); + g_free(pe); + + mvar->status = EFI_SUCCESS; + return length; +} + +uint32_t uefi_vars_mm_vars_proto(uefi_vars_state *uv) +{ + static const char *fnames[] = { + "zero", + "get-variable", + "get-next-variable-name", + "set-variable", + "query-variable-info", + "ready-to-boot", + "exit-boot-service", + "get-statistics", + "lock-variable", + "var-check-prop-set", + "var-check-prop-get", + "get-payload-size", + "init-runtime-cache-contect", + "sync-runtime-cache", + "get-runtime-cache-info", + }; + const char *fname; + uint64_t length; + + mm_header *mhdr = (mm_header *) uv->buffer; + mm_variable *mvar = (mm_variable *) (uv->buffer + sizeof(*mhdr)); + void *func = (uv->buffer + sizeof(*mhdr) + sizeof(*mvar)); + + if (mhdr->length < sizeof(*mvar)) { + return UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE; + } + + fname = mvar->function < ARRAY_SIZE(fnames) + ? fnames[mvar->function] + : "unknown"; + trace_uefi_vars_proto_cmd(fname); + + switch (mvar->function) { + case SMM_VARIABLE_FUNCTION_GET_VARIABLE: + length = uefi_vars_mm_get_variable(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME: + length = uefi_vars_mm_get_next_variable(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_SET_VARIABLE: + length = uefi_vars_mm_set_variable(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO: + length = uefi_vars_mm_variable_info(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_LOCK_VARIABLE: + length = uefi_vars_mm_lock_variable(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE: + length = uefi_vars_mm_get_payload_size(uv, mhdr, mvar, func); + break; + + case SMM_VARIABLE_FUNCTION_READY_TO_BOOT: + trace_uefi_event("ready-to-boot"); + uv->ready_to_boot = true; + mvar->status = EFI_SUCCESS; + length = 0; + break; + + case SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE: + trace_uefi_event("exit-boot-service"); + uv->exit_boot_service = true; + mvar->status = EFI_SUCCESS; + length = 0; + break; + + default: + length = uefi_vars_mm_error(mhdr, mvar, EFI_UNSUPPORTED); + break; + } + + if (mhdr->length < length) { + mvar->status = EFI_BUFFER_TOO_SMALL; + } + + uefi_trace_status(__func__, mvar->status); + return UEFI_VARS_STS_SUCCESS; +} diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c index 81bfff9b4ec..2d8ffd72c5d 100644 --- a/hw/ufs/lu.c +++ b/hw/ufs/lu.c @@ -14,7 +14,7 @@ #include "qemu/memalign.h" #include "hw/scsi/scsi.h" #include "scsi/constants.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qemu/cutils.h" #include "trace.h" #include "ufs.h" @@ -194,7 +194,7 @@ static int ufs_emulate_wlun_inquiry(UfsRequest *req, uint8_t *outbuf, static UfsReqResult ufs_emulate_scsi_cmd(UfsLu *lu, UfsRequest *req) { uint8_t lun = lu->lun; - uint8_t outbuf[4096]; + QEMU_UNINITIALIZED uint8_t outbuf[4096]; uint8_t sense_buf[UFS_SENSE_SIZE]; uint8_t scsi_status; int len = 0; @@ -274,10 +274,9 @@ static UfsReqResult ufs_process_scsi_cmd(UfsLu *lu, UfsRequest *req) return UFS_REQUEST_NO_COMPLETE; } -static Property ufs_lu_props[] = { +static const Property ufs_lu_props[] = { DEFINE_PROP_DRIVE("drive", UfsLu, conf.blk), DEFINE_PROP_UINT8("lun", UfsLu, lun, 0), - DEFINE_PROP_END_OF_LIST(), }; static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp) @@ -420,7 +419,7 @@ static void ufs_lu_unrealize(DeviceState *dev) } } -static void ufs_lu_class_init(ObjectClass *oc, void *data) +static void ufs_lu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c index 945a0ea1273..0577747f469 100644 --- a/hw/ufs/ufs.c +++ b/hw/ufs/ufs.c @@ -25,6 +25,7 @@ #include "qapi/error.h" #include "migration/vmstate.h" #include "scsi/constants.h" +#include "hw/irq.h" #include "trace.h" #include "ufs.h" @@ -34,6 +35,11 @@ #define UFS_MAX_NUTMRS 8 #define UFS_MCQ_QCFGPTR 2 +/* Each value represents the temperature in celsius as (value - 80) */ +#define UFS_TEMPERATURE 120 +#define UFS_TOO_HIGH_TEMP_BOUNDARY 160 +#define UFS_TOO_LOW_TEMP_BOUNDARY 60 + static void ufs_exec_req(UfsRequest *req); static void ufs_clear_req(UfsRequest *req); @@ -838,6 +844,42 @@ static const MemoryRegionOps ufs_mmio_ops = { }, }; +static void ufs_update_ee_status(UfsHc *u) +{ + uint16_t ee_status = be16_to_cpu(u->attributes.exception_event_status); + uint8_t high_temp_thresh = u->attributes.device_too_high_temp_boundary; + uint8_t low_temp_thresh = u->attributes.device_too_low_temp_boundary; + + if (u->temperature >= high_temp_thresh) { + ee_status |= MASK_EE_TOO_HIGH_TEMP; + } else { + ee_status &= ~MASK_EE_TOO_HIGH_TEMP; + } + + if (u->temperature <= low_temp_thresh) { + ee_status |= MASK_EE_TOO_LOW_TEMP; + } else { + ee_status &= ~MASK_EE_TOO_LOW_TEMP; + } + + u->attributes.exception_event_status = cpu_to_be16(ee_status); +} + +static bool ufs_check_exception_event_alert(UfsHc *u, uint8_t trans_type) +{ + uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control); + uint16_t ee_status; + + if (trans_type != UFS_UPIU_TRANSACTION_RESPONSE) { + return false; + } + + ufs_update_ee_status(u); + + ee_status = be16_to_cpu(u->attributes.exception_event_status); + + return ee_control & ee_status; +} void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags, uint8_t response, uint8_t scsi_status, @@ -848,9 +890,19 @@ void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags, req->rsp_upiu.header.flags = flags; req->rsp_upiu.header.response = response; req->rsp_upiu.header.scsi_status = scsi_status; + req->rsp_upiu.header.device_inf = + ufs_check_exception_event_alert(req->hc, trans_type); req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length); } +void ufs_build_query_response(UfsRequest *req) +{ + req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode; + req->rsp_upiu.qr.idn = req->req_upiu.qr.idn; + req->rsp_upiu.qr.index = req->req_upiu.qr.index; + req->rsp_upiu.qr.selector = req->req_upiu.qr.selector; +} + static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req) { UfsHc *u = req->hc; @@ -1034,6 +1086,25 @@ static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op) return UFS_QUERY_RESULT_SUCCESS; } +static inline uint8_t ufs_read_device_temp(UfsHc *u) +{ + uint8_t feat_sup = u->device_desc.ufs_features_support; + bool high_temp_sup, low_temp_sup, high_temp_en, low_temp_en; + uint16_t ee_control = be16_to_cpu(u->attributes.exception_event_control); + + high_temp_sup = feat_sup & UFS_DEV_HIGH_TEMP_NOTIF; + low_temp_sup = feat_sup & UFS_DEV_LOW_TEMP_NOTIF; + high_temp_en = ee_control & MASK_EE_TOO_HIGH_TEMP; + low_temp_en = ee_control & MASK_EE_TOO_LOW_TEMP; + + if ((high_temp_sup && high_temp_en) || + (low_temp_sup && low_temp_en)) { + return u->temperature; + } + + return 0; +} + static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn) { switch (idn) { @@ -1064,6 +1135,7 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn) case UFS_QUERY_ATTR_IDN_EE_CONTROL: return be16_to_cpu(u->attributes.exception_event_control); case UFS_QUERY_ATTR_IDN_EE_STATUS: + ufs_update_ee_status(u); return be16_to_cpu(u->attributes.exception_event_status); case UFS_QUERY_ATTR_IDN_SECONDS_PASSED: return be32_to_cpu(u->attributes.seconds_passed); @@ -1078,7 +1150,8 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn) case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME: return u->attributes.ref_clk_gating_wait_time; case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP: - return u->attributes.device_case_rough_temperaure; + u->attributes.device_case_rough_temperature = ufs_read_device_temp(u); + return u->attributes.device_case_rough_temperature; case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND: return u->attributes.device_too_high_temp_boundary; case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND: @@ -1103,10 +1176,13 @@ static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn) return 0; } -static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value) +static QueryRespCode ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value) { switch (idn) { case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL: + if (value > UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE) { + return UFS_QUERY_RESULT_INVALID_VALUE; + } u->attributes.active_icc_level = value; break; case UFS_QUERY_ATTR_IDN_MAX_DATA_IN: @@ -1134,6 +1210,7 @@ static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value) u->attributes.psa_data_size = cpu_to_be32(value); break; } + return UFS_QUERY_RESULT_SUCCESS; } static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op) @@ -1150,13 +1227,13 @@ static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op) if (op == UFS_QUERY_ATTR_READ) { value = ufs_read_attr_value(u, idn); + ret = UFS_QUERY_RESULT_SUCCESS; } else { value = be32_to_cpu(req->req_upiu.qr.value); - ufs_write_attr_value(u, idn, value); + ret = ufs_write_attr_value(u, idn, value); } - req->rsp_upiu.qr.value = cpu_to_be32(value); - return UFS_QUERY_RESULT_SUCCESS; + return ret; } static const RpmbUnitDescriptor rpmb_unit_desc = { @@ -1279,9 +1356,12 @@ static QueryRespCode ufs_read_desc(UfsRequest *req) UfsHc *u = req->hc; QueryRespCode status; uint8_t idn = req->req_upiu.qr.idn; + uint8_t selector = req->req_upiu.qr.selector; uint16_t length = be16_to_cpu(req->req_upiu.qr.length); InterconnectDescriptor desc; - + if (selector != 0) { + return UFS_QUERY_RESULT_INVALID_SELECTOR; + } switch (idn) { case UFS_QUERY_DESC_IDN_DEVICE: memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc)); @@ -1327,10 +1407,6 @@ static QueryRespCode ufs_read_desc(UfsRequest *req) if (length > req->rsp_upiu.qr.data[0]) { length = req->rsp_upiu.qr.data[0]; } - req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode; - req->rsp_upiu.qr.idn = req->req_upiu.qr.idn; - req->rsp_upiu.qr.index = req->req_upiu.qr.index; - req->rsp_upiu.qr.selector = req->req_upiu.qr.selector; req->rsp_upiu.qr.length = cpu_to_be16(length); return status; @@ -1411,6 +1487,7 @@ static UfsReqResult ufs_exec_query_cmd(UfsRequest *req) data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length); ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0, data_segment_length); + ufs_build_query_response(req); if (status != UFS_QUERY_RESULT_SUCCESS) { return UFS_REQUEST_FAIL; @@ -1623,7 +1700,7 @@ static void ufs_init_hc(UfsHc *u) cap = FIELD_DP32(cap, CAP, OODDS, 0); cap = FIELD_DP32(cap, CAP, UICDMETMS, 0); cap = FIELD_DP32(cap, CAP, CS, 0); - cap = FIELD_DP32(cap, CAP, LSDBS, 1); + cap = FIELD_DP32(cap, CAP, LSDBS, 0); cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq); u->reg.cap = cap; @@ -1665,15 +1742,19 @@ static void ufs_init_hc(UfsHc *u) u->device_desc.ud_0_base_offset = 0x16; u->device_desc.ud_config_p_length = 0x1A; u->device_desc.device_rtt_cap = 0x02; + u->device_desc.ufs_features_support = UFS_DEV_HIGH_TEMP_NOTIF | + UFS_DEV_LOW_TEMP_NOTIF; u->device_desc.queue_depth = u->params.nutrs; u->device_desc.product_revision_level = 0x04; + u->device_desc.extended_ufs_features_support = + cpu_to_be32(UFS_DEV_HIGH_TEMP_NOTIF | UFS_DEV_LOW_TEMP_NOTIF); memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor)); u->geometry_desc.length = sizeof(GeometryDescriptor); u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY; u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0; - u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */ - u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */ + u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4MB: 8192 * 512B */ + u->geometry_desc.allocation_unit_size = 0x1; /* 4MB: 1 segment */ u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */ u->geometry_desc.max_in_buffer_size = 0x8; u->geometry_desc.max_out_buffer_size = 0x8; @@ -1690,9 +1771,17 @@ static void ufs_init_hc(UfsHc *u) /* configure descriptor is not supported */ u->attributes.config_descr_lock = 0x01; u->attributes.max_num_of_rtt = 0x02; + u->attributes.device_too_high_temp_boundary = UFS_TOO_HIGH_TEMP_BOUNDARY; + u->attributes.device_too_low_temp_boundary = UFS_TOO_LOW_TEMP_BOUNDARY; memset(&u->flags, 0, sizeof(u->flags)); u->flags.permanently_disable_fw_update = 1; + + /* + * The temperature value is fixed to UFS_TEMPERATURE and does not change + * dynamically + */ + u->temperature = UFS_TEMPERATURE; } static void ufs_realize(PCIDevice *pci_dev, Error **errp) @@ -1720,6 +1809,8 @@ static void ufs_exit(PCIDevice *pci_dev) { UfsHc *u = UFS(pci_dev); + qemu_free_irq(u->irq); + qemu_bh_delete(u->doorbell_bh); qemu_bh_delete(u->complete_bh); @@ -1740,13 +1831,12 @@ static void ufs_exit(PCIDevice *pci_dev) } } -static Property ufs_props[] = { +static const Property ufs_props[] = { DEFINE_PROP_STRING("serial", UfsHc, params.serial), DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32), DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8), DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false), DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription ufs_vmstate = { @@ -1754,7 +1844,7 @@ static const VMStateDescription ufs_vmstate = { .unmigratable = 1, }; -static void ufs_class_init(ObjectClass *oc, void *data) +static void ufs_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); @@ -1790,7 +1880,7 @@ static char *ufs_bus_get_dev_path(DeviceState *dev) return qdev_get_dev_path(bus->parent); } -static void ufs_bus_class_init(ObjectClass *class, void *data) +static void ufs_bus_class_init(ObjectClass *class, const void *data) { BusClass *bc = BUS_CLASS(class); bc->get_dev_path = ufs_bus_get_dev_path; @@ -1802,7 +1892,7 @@ static const TypeInfo ufs_info = { .parent = TYPE_PCI_DEVICE, .class_init = ufs_class_init, .instance_size = sizeof(UfsHc), - .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} }, + .interfaces = (const InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} }, }; static const TypeInfo ufs_bus_info = { diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h index 6c9382cbc4d..3799d97f30d 100644 --- a/hw/ufs/ufs.h +++ b/hw/ufs/ufs.h @@ -146,6 +146,8 @@ typedef struct UfsHc { /* MCQ properties */ UfsSq *sq[UFS_MAX_MCQ_QNUM]; UfsCq *cq[UFS_MAX_MCQ_QNUM]; + + uint8_t temperature; } UfsHc; static inline uint32_t ufs_mcq_sq_tail(UfsHc *u, uint32_t qid) @@ -228,6 +230,7 @@ static inline bool is_wlun(uint8_t lun) void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags, uint8_t response, uint8_t scsi_status, uint16_t data_segment_length); +void ufs_build_query_response(UfsRequest *req); void ufs_complete_req(UfsRequest *req, UfsReqResult req_result); void ufs_init_wlu(UfsLu *wlu, uint8_t wlun); #endif /* HW_UFS_UFS_H */ diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig index 84bc7fbe36c..69c663be52f 100644 --- a/hw/usb/Kconfig +++ b/hw/usb/Kconfig @@ -53,18 +53,10 @@ config USB_XHCI_SYSBUS bool select USB_XHCI -config USB_MUSB - bool - select USB - config USB_DWC2 bool select USB -config TUSB6010 - bool - select USB_MUSB - config USB_HUB bool default y @@ -151,3 +143,7 @@ config USB_DWC3 config XLNX_USB_SUBSYS bool select USB_DWC3 + +config USB_CHIPIDEA + bool + select USB_EHCI_SYSBUS diff --git a/hw/usb/bus-stub.c b/hw/usb/bus-stub.c index fcabe8429e6..cd0c317b718 100644 --- a/hw/usb/bus-stub.c +++ b/hw/usb/bus-stub.c @@ -10,7 +10,7 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "monitor/monitor.h" #include "hw/usb.h" diff --git a/hw/usb/bus.c b/hw/usb/bus.c index bfab2807d75..8dd2ce415eb 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -6,7 +6,7 @@ #include "qapi/type-helpers.h" #include "qemu/error-report.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "migration/vmstate.h" #include "monitor/monitor.h" #include "trace.h" @@ -18,16 +18,15 @@ static char *usb_get_dev_path(DeviceState *dev); static char *usb_get_fw_dev_path(DeviceState *qdev); static void usb_qdev_unrealize(DeviceState *qdev); -static Property usb_props[] = { +static const Property usb_props[] = { DEFINE_PROP_STRING("port", USBDevice, port_path), DEFINE_PROP_STRING("serial", USBDevice, serial), DEFINE_PROP_BIT("msos-desc", USBDevice, flags, USB_DEV_FLAG_MSOS_DESC_ENABLE, true), DEFINE_PROP_STRING("pcap", USBDevice, pcap_filename), - DEFINE_PROP_END_OF_LIST() }; -static void usb_bus_class_init(ObjectClass *klass, void *data) +static void usb_bus_class_init(ObjectClass *klass, const void *data) { BusClass *k = BUS_CLASS(klass); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); @@ -43,7 +42,7 @@ static const TypeInfo usb_bus_info = { .parent = TYPE_BUS, .instance_size = sizeof(USBBus), .class_init = usb_bus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } @@ -412,7 +411,7 @@ void usb_claim_port(USBDevice *dev, Error **errp) } else { if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), "usb-hub") != 0) { /* Create a new hub and chain it on */ - hub = usb_try_new("usb-hub"); + hub = USB_DEVICE(qdev_try_new("usb-hub")); if (hub) { usb_realize_and_unref(hub, bus, NULL); } @@ -663,7 +662,8 @@ USBDevice *usbdevice_create(const char *driver) return NULL; } - dev = f->usbdevice_init ? f->usbdevice_init() : usb_new(f->name); + dev = f->usbdevice_init ? f->usbdevice_init() + : USB_DEVICE(qdev_new(f->name)); if (!dev) { error_report("Failed to create USB device '%s'", f->name); return NULL; @@ -713,7 +713,7 @@ static void usb_device_instance_init(Object *obj) } } -static void usb_device_class_init(ObjectClass *klass, void *data) +static void usb_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->bus_type = TYPE_USB_BUS; diff --git a/hw/usb/canokey.c b/hw/usb/canokey.c index b306eeb20e9..cbefbb5daf8 100644 --- a/hw/usb/canokey.c +++ b/hw/usb/canokey.c @@ -197,8 +197,8 @@ static void canokey_handle_data(USBDevice *dev, USBPacket *p) switch (p->pid) { case USB_TOKEN_OUT: trace_canokey_handle_data_out(ep_out, p->iov.size); - usb_packet_copy(p, key->ep_out_buffer[ep_out], p->iov.size); out_pos = 0; + /* segment packet into (possibly multiple) ep_out */ while (out_pos != p->iov.size) { /* * key->ep_out[ep_out] set by prepare_receive @@ -207,8 +207,8 @@ static void canokey_handle_data(USBDevice *dev, USBPacket *p) * to be the buffer length */ out_len = MIN(p->iov.size - out_pos, key->ep_out_size[ep_out]); - memcpy(key->ep_out[ep_out], - key->ep_out_buffer[ep_out] + out_pos, out_len); + /* usb_packet_copy would update the pos offset internally */ + usb_packet_copy(p, key->ep_out[ep_out], out_len); out_pos += out_len; /* update ep_out_size to actual len */ key->ep_out_size[ep_out] = out_len; @@ -296,12 +296,11 @@ static void canokey_unrealize(USBDevice *base) trace_canokey_unrealize(); } -static Property canokey_properties[] = { +static const Property canokey_properties[] = { DEFINE_PROP_STRING("file", CanoKeyState, file), - DEFINE_PROP_END_OF_LIST(), }; -static void canokey_class_init(ObjectClass *klass, void *data) +static void canokey_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/canokey.h b/hw/usb/canokey.h index e528889d332..1b60d734850 100644 --- a/hw/usb/canokey.h +++ b/hw/usb/canokey.h @@ -24,8 +24,6 @@ #define CANOKEY_EP_NUM 3 /* BULK/INTR IN can be up to 1352 bytes, e.g. get key info */ #define CANOKEY_EP_IN_BUFFER_SIZE 2048 -/* BULK OUT can be up to 270 bytes, e.g. PIV import cert */ -#define CANOKEY_EP_OUT_BUFFER_SIZE 512 typedef enum { CANOKEY_EP_IN_WAIT, @@ -59,8 +57,6 @@ typedef struct CanoKeyState { /* OUT pointer to canokey recv buffer */ uint8_t *ep_out[CANOKEY_EP_NUM]; uint32_t ep_out_size[CANOKEY_EP_NUM]; - /* For large BULK OUT, multiple write to ep_out is needed */ - uint8_t ep_out_buffer[CANOKEY_EP_NUM][CANOKEY_EP_OUT_BUFFER_SIZE]; /* Properties */ char *file; /* canokey-file */ diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c index 3ee9c73b879..c21cefd82d2 100644 --- a/hw/usb/ccid-card-emulated.c +++ b/hw/usb/ccid-card-emulated.c @@ -582,17 +582,16 @@ static void emulated_unrealize(CCIDCardState *base) qemu_mutex_destroy(&card->event_list_mutex); } -static Property emulated_card_properties[] = { +static const Property emulated_card_properties[] = { DEFINE_PROP_STRING("backend", EmulatedState, backend_str), DEFINE_PROP_STRING("cert1", EmulatedState, cert1), DEFINE_PROP_STRING("cert2", EmulatedState, cert2), DEFINE_PROP_STRING("cert3", EmulatedState, cert3), DEFINE_PROP_STRING("db", EmulatedState, db), DEFINE_PROP_UINT8("debug", EmulatedState, debug, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void emulated_class_initfn(ObjectClass *klass, void *data) +static void emulated_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CCIDCardClass *cc = CCID_CARD_CLASS(klass); diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c index a5157039042..1eea21a7337 100644 --- a/hw/usb/ccid-card-passthru.c +++ b/hw/usb/ccid-card-passthru.c @@ -388,13 +388,12 @@ static const VMStateDescription passthru_vmstate = { } }; -static Property passthru_card_properties[] = { +static const Property passthru_card_properties[] = { DEFINE_PROP_CHR("chardev", PassthruState, cs), DEFINE_PROP_UINT8("debug", PassthruState, debug, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void passthru_class_initfn(ObjectClass *klass, void *data) +static void passthru_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CCIDCardClass *cc = CCID_CARD_CLASS(klass); diff --git a/hw/usb/chipidea.c b/hw/usb/chipidea.c index b1c85404d6f..250c2b3bca9 100644 --- a/hw/usb/chipidea.c +++ b/hw/usb/chipidea.c @@ -144,7 +144,7 @@ static void chipidea_init(Object *obj) } } -static void chipidea_class_init(ObjectClass *klass, void *data) +static void chipidea_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass); diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c index 1897fff9e6f..26af709f314 100644 --- a/hw/usb/dev-audio.c +++ b/hw/usb/dev-audio.c @@ -990,15 +990,14 @@ static const VMStateDescription vmstate_usb_audio = { .unmigratable = 1, }; -static Property usb_audio_properties[] = { +static const Property usb_audio_properties[] = { DEFINE_AUDIO_PROPERTIES(USBAudioState, card), DEFINE_PROP_UINT32("debug", USBAudioState, debug, 0), DEFINE_PROP_UINT32("buffer", USBAudioState, buffer_user, 0), DEFINE_PROP_BOOL("multi", USBAudioState, multi, false), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_audio_class_init(ObjectClass *klass, void *data) +static void usb_audio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *k = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index 9e358c934ef..96623aa3223 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -491,14 +491,14 @@ static const uint8_t qemu_tablet_hid_report_descriptor[] = { 0xa1, 0x00, /* Collection (Physical) */ 0x05, 0x09, /* Usage Page (Button) */ 0x19, 0x01, /* Usage Minimum (1) */ - 0x29, 0x03, /* Usage Maximum (3) */ + 0x29, 0x05, /* Usage Maximum (5) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ - 0x95, 0x03, /* Report Count (3) */ + 0x95, 0x05, /* Report Count (5) */ 0x75, 0x01, /* Report Size (1) */ 0x81, 0x02, /* Input (Data, Variable, Absolute) */ 0x95, 0x01, /* Report Count (1) */ - 0x75, 0x05, /* Report Size (5) */ + 0x75, 0x03, /* Report Size (3) */ 0x81, 0x01, /* Input (Constant) */ 0x05, 0x01, /* Usage Page (Generic Desktop) */ 0x09, 0x30, /* Usage (X) */ @@ -774,7 +774,7 @@ static const VMStateDescription vmstate_usb_kbd = { } }; -static void usb_hid_class_initfn(ObjectClass *klass, void *data) +static void usb_hid_class_initfn(ObjectClass *klass, const void *data) { USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -793,14 +793,13 @@ static const TypeInfo usb_hid_type_info = { .class_init = usb_hid_class_initfn, }; -static Property usb_tablet_properties[] = { +static const Property usb_tablet_properties[] = { DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), DEFINE_PROP_STRING("display", USBHIDState, display), DEFINE_PROP_UINT32("head", USBHIDState, head, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_tablet_class_initfn(ObjectClass *klass, void *data) +static void usb_tablet_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -818,12 +817,11 @@ static const TypeInfo usb_tablet_info = { .class_init = usb_tablet_class_initfn, }; -static Property usb_mouse_properties[] = { +static const Property usb_mouse_properties[] = { DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_mouse_class_initfn(ObjectClass *klass, void *data) +static void usb_mouse_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -841,13 +839,12 @@ static const TypeInfo usb_mouse_info = { .class_init = usb_mouse_class_initfn, }; -static Property usb_keyboard_properties[] = { +static const Property usb_keyboard_properties[] = { DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), DEFINE_PROP_STRING("display", USBHIDState, display), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_keyboard_class_initfn(ObjectClass *klass, void *data) +static void usb_keyboard_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 06e9537d035..a19350d9c42 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -479,6 +479,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, usb_hub_port_clear(port, PORT_STAT_SUSPEND); port->wPortChange = 0; } + break; default: goto fail; } @@ -664,13 +665,12 @@ static const VMStateDescription vmstate_usb_hub = { } }; -static Property usb_hub_properties[] = { +static const Property usb_hub_properties[] = { DEFINE_PROP_UINT32("ports", USBHubState, num_ports, 8), DEFINE_PROP_BOOL("port-power", USBHubState, port_power, false), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_hub_class_initfn(ObjectClass *klass, void *data) +static void usb_hub_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 554b397e887..ce45c9cd061 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -1234,8 +1234,6 @@ static void usb_mtp_object_delete(MTPState *s, uint32_t handle, default: g_assert_not_reached(); } - - return; } static void usb_mtp_command(MTPState *s, MTPControl *c) @@ -2078,14 +2076,13 @@ static const VMStateDescription vmstate_usb_mtp = { } }; -static Property mtp_properties[] = { +static const Property mtp_properties[] = { DEFINE_PROP_STRING("rootdir", MTPState, root), DEFINE_PROP_STRING("desc", MTPState, desc), DEFINE_PROP_BOOL("readonly", MTPState, readonly, true), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_mtp_class_initfn(ObjectClass *klass, void *data) +static void usb_mtp_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c index d00d68b21dc..81cc09dcac9 100644 --- a/hw/usb/dev-network.c +++ b/hw/usb/dev-network.c @@ -33,7 +33,7 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "qemu/config-file.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/iov.h" #include "qemu/module.h" #include "qemu/cutils.h" @@ -1407,12 +1407,11 @@ static const VMStateDescription vmstate_usb_net = { .unmigratable = 1, }; -static Property net_properties[] = { +static const Property net_properties[] = { DEFINE_NIC_PROPERTIES(USBNetState, conf), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_net_class_initfn(ObjectClass *klass, void *data) +static void usb_net_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c index 63047d79cfd..1c116d8b0fe 100644 --- a/hw/usb/dev-serial.c +++ b/hw/usb/dev-serial.c @@ -472,8 +472,6 @@ static void usb_serial_token_in(USBSerialState *s, USBPacket *p) s->recv_ptr = (s->recv_ptr + len) % RECV_BUF; packet_len -= len + 2; } - - return; } static void usb_serial_handle_data(USBDevice *dev, USBPacket *p) @@ -624,7 +622,7 @@ static USBDevice *usb_braille_init(void) return NULL; } - dev = usb_new("usb-braille"); + dev = USB_DEVICE(qdev_new("usb-braille")); qdev_prop_set_chr(&dev->qdev, "chardev", cdrv); return dev; } @@ -634,13 +632,12 @@ static const VMStateDescription vmstate_usb_serial = { .unmigratable = 1, }; -static Property serial_properties[] = { +static const Property serial_properties[] = { DEFINE_PROP_CHR("chardev", USBSerialState, cs), DEFINE_PROP_BOOL("always-plugged", USBSerialState, always_plugged, false), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_serial_dev_class_init(ObjectClass *klass, void *data) +static void usb_serial_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -661,7 +658,7 @@ static const TypeInfo usb_serial_dev_type_info = { .class_init = usb_serial_dev_class_init, }; -static void usb_serial_class_initfn(ObjectClass *klass, void *data) +static void usb_serial_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -677,12 +674,11 @@ static const TypeInfo serial_info = { .class_init = usb_serial_class_initfn, }; -static Property braille_properties[] = { +static const Property braille_properties[] = { DEFINE_PROP_CHR("chardev", USBSerialState, cs), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_braille_class_initfn(ObjectClass *klass, void *data) +static void usb_braille_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index c0d63e04251..6ce7154feed 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -1069,7 +1069,6 @@ static void ccid_handle_bulk_out(USBCCIDState *s, USBPacket *p) err: p->status = USB_RET_STALL; s->bulk_out_pos = 0; - return; } static void ccid_bulk_in_copy_to_guest(USBCCIDState *s, USBPacket *p, @@ -1171,9 +1170,8 @@ static Answer *ccid_peek_next_answer(USBCCIDState *s) : &s->pending_answers[s->pending_answers_start % PENDING_ANSWERS_NUM]; } -static Property ccid_props[] = { +static const Property ccid_props[] = { DEFINE_PROP_UINT32("slot", struct CCIDCardState, slot, 0), - DEFINE_PROP_END_OF_LIST(), }; static const TypeInfo ccid_bus_info = { @@ -1431,12 +1429,11 @@ static const VMStateDescription ccid_vmstate = { } }; -static Property ccid_properties[] = { +static const Property ccid_properties[] = { DEFINE_PROP_UINT8("debug", USBCCIDState, debug, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ccid_class_initfn(ObjectClass *klass, void *data) +static void ccid_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); @@ -1461,13 +1458,13 @@ static const TypeInfo ccid_info = { .parent = TYPE_USB_DEVICE, .instance_size = sizeof(USBCCIDState), .class_init = ccid_class_initfn, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } }; -static void ccid_card_class_init(ObjectClass *klass, void *data) +static void ccid_card_class_init(ObjectClass *klass, const void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->bus_type = TYPE_CCID_BUS; diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c index 1e5c5c711f5..df6ab7f656b 100644 --- a/hw/usb/dev-storage-bot.c +++ b/hw/usb/dev-storage-bot.c @@ -40,7 +40,7 @@ static void usb_msd_bot_realize(USBDevice *dev, Error **errp) usb_msd_handle_reset(dev); } -static void usb_msd_class_bot_initfn(ObjectClass *klass, void *data) +static void usb_msd_class_bot_initfn(ObjectClass *klass, const void *data) { USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c index 6147387dc61..dabe1563593 100644 --- a/hw/usb/dev-storage-classic.c +++ b/hw/usb/dev-storage-classic.c @@ -13,8 +13,8 @@ #include "hw/usb.h" #include "hw/usb/desc.h" #include "hw/usb/msd.h" -#include "sysemu/sysemu.h" -#include "sysemu/block-backend.h" +#include "system/system.h" +#include "system/block-backend.h" static const struct SCSIBusInfo usb_msd_scsi_info_storage = { .tcq = false, @@ -67,15 +67,14 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) s->scsi_dev = scsi_dev; } -static Property msd_properties[] = { +static const Property msd_properties[] = { DEFINE_BLOCK_PROPERTIES(MSDState, conf), DEFINE_BLOCK_ERROR_PROPERTIES(MSDState, conf), DEFINE_PROP_BOOL("removable", MSDState, removable, false), DEFINE_PROP_BOOL("commandlog", MSDState, commandlog, false), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_msd_class_storage_initfn(ObjectClass *klass, void *data) +static void usb_msd_class_storage_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index 341e505bd0a..b13fe345c45 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -177,7 +177,7 @@ static const USBDesc desc = { .str = desc_strings, }; -static void usb_msd_packet_complete(MSDState *s) +static void usb_msd_packet_complete(MSDState *s, int status) { USBPacket *p = s->packet; @@ -187,6 +187,7 @@ static void usb_msd_packet_complete(MSDState *s) * usb_packet_complete returns. */ trace_usb_msd_packet_complete(); + p->status = status; s->packet = NULL; usb_packet_complete(&s->dev, p); } @@ -196,8 +197,7 @@ static void usb_msd_fatal_error(MSDState *s) trace_usb_msd_fatal_error(); if (s->packet) { - s->packet->status = USB_RET_STALL; - usb_msd_packet_complete(s); + usb_msd_packet_complete(s, USB_RET_STALL); } /* @@ -255,8 +255,8 @@ void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) usb_msd_copy_data(s, p); p = s->packet; if (p && p->actual_length == p->iov.size) { - p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ - usb_msd_packet_complete(s); + /* USB_RET_SUCCESS status clears previous ASYNC status */ + usb_msd_packet_complete(s, USB_RET_SUCCESS); } } } @@ -295,8 +295,8 @@ void usb_msd_command_complete(SCSIRequest *req, size_t resid) s->mode = USB_MSDM_CSW; } } - p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ - usb_msd_packet_complete(s); + /* USB_RET_SUCCESS status clears previous ASYNC status */ + usb_msd_packet_complete(s, USB_RET_SUCCESS); } else if (s->data_len == 0) { s->mode = USB_MSDM_CSW; } @@ -332,8 +332,7 @@ void usb_msd_handle_reset(USBDevice *dev) assert(s->req == NULL); if (s->packet) { - s->packet->status = USB_RET_STALL; - usb_msd_packet_complete(s); + usb_msd_packet_complete(s, USB_RET_STALL); } memset(&s->csw, 0, sizeof(s->csw)); @@ -586,7 +585,7 @@ static const VMStateDescription vmstate_usb_msd = { } }; -static void usb_msd_class_initfn_common(ObjectClass *klass, void *data) +static void usb_msd_class_initfn_common(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 1804cb67997..21cc2835c62 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -914,7 +914,6 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) err_stream: error_report("%s: invalid stream %d", __func__, p->stream); p->status = USB_RET_STALL; - return; } static void usb_uas_unrealize(USBDevice *dev) @@ -953,12 +952,11 @@ static const VMStateDescription vmstate_usb_uas = { } }; -static Property uas_properties[] = { +static const Property uas_properties[] = { DEFINE_PROP_UINT32("log-scsi-req", UASDevice, requestlog, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_uas_class_initfn(ObjectClass *klass, void *data) +static void usb_uas_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c index 7177c17f031..f4b71a21472 100644 --- a/hw/usb/dev-wacom.c +++ b/hw/usb/dev-wacom.c @@ -420,7 +420,7 @@ static const VMStateDescription vmstate_usb_wacom = { .unmigratable = 1, }; -static void usb_wacom_class_init(ObjectClass *klass, void *data) +static void usb_wacom_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index b4f0652c7d2..83864505bb8 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -1448,12 +1448,11 @@ const VMStateDescription vmstate_dwc2_state = { } }; -static Property dwc2_usb_properties[] = { +static const Property dwc2_usb_properties[] = { DEFINE_PROP_UINT32("usb_version", DWC2State, usb_version, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void dwc2_class_init(ObjectClass *klass, void *data) +static void dwc2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); DWC2Class *c = DWC2_USB_CLASS(klass); diff --git a/hw/usb/hcd-dwc2.h b/hw/usb/hcd-dwc2.h index 9c3d88ea14b..2d5a5690fc2 100644 --- a/hw/usb/hcd-dwc2.h +++ b/hw/usb/hcd-dwc2.h @@ -23,7 +23,7 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "hw/usb.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #define DWC2_MMIO_SIZE 0x11000 diff --git a/hw/usb/hcd-dwc3.c b/hw/usb/hcd-dwc3.c index 09d8e25b971..98a342b8b82 100644 --- a/hw/usb/hcd-dwc3.c +++ b/hw/usb/hcd-dwc3.c @@ -343,6 +343,8 @@ REG32(GFLADJ, 0x530) FIELD(GFLADJ, GFLADJ_REFCLK_FLADJ, 8, 14) FIELD(GFLADJ, GFLADJ_30MHZ_SDBND_SEL, 7, 1) FIELD(GFLADJ, GFLADJ_30MHZ, 0, 6) +REG32(GUSB2RHBCTL, 0x540) + FIELD(GUSB2RHBCTL, OVRD_L1TIMEOUT, 0, 4) #define DWC3_GLOBAL_OFFSET 0xC100 static void reset_csr(USBDWC3 * s) @@ -560,6 +562,9 @@ static const RegisterAccessInfo usb_dwc3_regs_info[] = { .rsvd = 0x40, .ro = 0x400040, .unimp = 0xffffffff, + },{ .name = "GUSB2RHBCTL", .addr = A_GUSB2RHBCTL, + .rsvd = 0xfffffff0, + .unimp = 0xffffffff, } }; @@ -656,17 +661,16 @@ static const VMStateDescription vmstate_usb_dwc3 = { } }; -static Property usb_dwc3_properties[] = { +static const Property usb_dwc3_properties[] = { DEFINE_PROP_UINT32("DWC_USB3_USERID", USBDWC3, cfg.dwc_usb3_user, 0x12345678), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_dwc3_class_init(ObjectClass *klass, void *data) +static void usb_dwc3_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = usb_dwc3_reset; + device_class_set_legacy_reset(dc, usb_dwc3_reset); dc->realize = usb_dwc3_realize; dc->vmsd = &vmstate_usb_dwc3; device_class_set_props(dc, usb_dwc3_properties); diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c index 3ff54edf62a..38ad3406b32 100644 --- a/hw/usb/hcd-ehci-pci.c +++ b/hw/usb/hcd-ehci-pci.c @@ -135,9 +135,8 @@ static void usb_ehci_pci_write_config(PCIDevice *dev, uint32_t addr, i->ehci.as = busmaster ? pci_get_address_space(dev) : &address_space_memory; } -static Property ehci_pci_properties[] = { +static const Property ehci_pci_properties[] = { DEFINE_PROP_UINT32("maxframes", EHCIPCIState, ehci.maxframes, 128), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_ehci_pci = { @@ -151,7 +150,7 @@ static const VMStateDescription vmstate_ehci_pci = { } }; -static void ehci_class_init(ObjectClass *klass, void *data) +static void ehci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -162,7 +161,7 @@ static void ehci_class_init(ObjectClass *klass, void *data) k->config_write = usb_ehci_pci_write_config; dc->vmsd = &vmstate_ehci_pci; device_class_set_props(dc, ehci_pci_properties); - dc->reset = usb_ehci_pci_reset; + device_class_set_legacy_reset(dc, usb_ehci_pci_reset); } static const TypeInfo ehci_pci_type_info = { @@ -173,17 +172,17 @@ static const TypeInfo ehci_pci_type_info = { .instance_finalize = usb_ehci_pci_finalize, .abstract = true, .class_init = ehci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -static void ehci_data_class_init(ObjectClass *klass, void *data) +static void ehci_data_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - EHCIPCIInfo *i = data; + const EHCIPCIInfo *i = data; k->vendor_id = i->vendor_id; k->device_id = i->device_id; @@ -228,7 +227,7 @@ static void ehci_pci_register_types(void) for (i = 0; i < ARRAY_SIZE(ehci_pci_info); i++) { ehci_type_info.name = ehci_pci_info[i].name; ehci_type_info.class_data = ehci_pci_info + i; - type_register(&ehci_type_info); + type_register_static(&ehci_type_info); } } diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c index fe1dabd0bbe..0449f5fa6d6 100644 --- a/hw/usb/hcd-ehci-sysbus.c +++ b/hw/usb/hcd-ehci-sysbus.c @@ -19,7 +19,6 @@ #include "hw/qdev-properties.h" #include "hw/usb/hcd-ehci.h" #include "migration/vmstate.h" -#include "qemu/module.h" static const VMStateDescription vmstate_ehci_sysbus = { .name = "ehci-sysbus", @@ -31,11 +30,10 @@ static const VMStateDescription vmstate_ehci_sysbus = { } }; -static Property ehci_sysbus_properties[] = { +static const Property ehci_sysbus_properties[] = { DEFINE_PROP_UINT32("maxframes", EHCISysBusState, ehci.maxframes, 128), DEFINE_PROP_BOOL("companion-enable", EHCISysBusState, ehci.companion_enable, false), - DEFINE_PROP_END_OF_LIST(), }; static void usb_ehci_sysbus_realize(DeviceState *dev, Error **errp) @@ -82,7 +80,7 @@ static void ehci_sysbus_finalize(Object *obj) usb_ehci_finalize(s); } -static void ehci_sysbus_class_init(ObjectClass *klass, void *data) +static void ehci_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass); @@ -93,22 +91,11 @@ static void ehci_sysbus_class_init(ObjectClass *klass, void *data) dc->realize = usb_ehci_sysbus_realize; dc->vmsd = &vmstate_ehci_sysbus; device_class_set_props(dc, ehci_sysbus_properties); - dc->reset = usb_ehci_sysbus_reset; + device_class_set_legacy_reset(dc, usb_ehci_sysbus_reset); set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_type_info = { - .name = TYPE_SYS_BUS_EHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(EHCISysBusState), - .instance_init = ehci_sysbus_init, - .instance_finalize = ehci_sysbus_finalize, - .abstract = true, - .class_init = ehci_sysbus_class_init, - .class_size = sizeof(SysBusEHCIClass), -}; - -static void ehci_platform_class_init(ObjectClass *oc, void *data) +static void ehci_platform_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -118,13 +105,7 @@ static void ehci_platform_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_platform_type_info = { - .name = TYPE_PLATFORM_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_platform_class_init, -}; - -static void ehci_exynos4210_class_init(ObjectClass *oc, void *data) +static void ehci_exynos4210_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -134,13 +115,7 @@ static void ehci_exynos4210_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_exynos4210_type_info = { - .name = TYPE_EXYNOS4210_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_exynos4210_class_init, -}; - -static void ehci_aw_h3_class_init(ObjectClass *oc, void *data) +static void ehci_aw_h3_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -150,13 +125,7 @@ static void ehci_aw_h3_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_aw_h3_type_info = { - .name = TYPE_AW_H3_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_aw_h3_class_init, -}; - -static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data) +static void ehci_npcm7xx_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -168,13 +137,7 @@ static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_npcm7xx_type_info = { - .name = TYPE_NPCM7XX_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_npcm7xx_class_init, -}; - -static void ehci_tegra2_class_init(ObjectClass *oc, void *data) +static void ehci_tegra2_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -184,12 +147,6 @@ static void ehci_tegra2_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_tegra2_type_info = { - .name = TYPE_TEGRA2_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_tegra2_class_init, -}; - static void ehci_ppc4xx_init(Object *o) { EHCISysBusState *s = SYS_BUS_EHCI(o); @@ -197,7 +154,7 @@ static void ehci_ppc4xx_init(Object *o) s->ehci.companion_enable = true; } -static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data) +static void ehci_ppc4xx_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -207,13 +164,6 @@ static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_ppc4xx_type_info = { - .name = TYPE_PPC4xx_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_ppc4xx_class_init, - .instance_init = ehci_ppc4xx_init, -}; - /* * Faraday FUSBH200 USB 2.0 EHCI */ @@ -270,7 +220,7 @@ static void fusbh200_ehci_init(Object *obj) &f->mem_vendor); } -static void fusbh200_ehci_class_init(ObjectClass *oc, void *data) +static void fusbh200_ehci_class_init(ObjectClass *oc, const void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); @@ -282,24 +232,55 @@ static void fusbh200_ehci_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_fusbh200_type_info = { - .name = TYPE_FUSBH200_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .instance_size = sizeof(FUSBH200EHCIState), - .instance_init = fusbh200_ehci_init, - .class_init = fusbh200_ehci_class_init, +static const TypeInfo ehci_sysbus_types[] = { + { + .name = TYPE_SYS_BUS_EHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(EHCISysBusState), + .instance_init = ehci_sysbus_init, + .instance_finalize = ehci_sysbus_finalize, + .abstract = true, + .class_init = ehci_sysbus_class_init, + .class_size = sizeof(SysBusEHCIClass), + }, + { + .name = TYPE_PLATFORM_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_platform_class_init, + }, + { + .name = TYPE_EXYNOS4210_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_exynos4210_class_init, + }, + { + .name = TYPE_AW_H3_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_aw_h3_class_init, + }, + { + .name = TYPE_NPCM7XX_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_npcm7xx_class_init, + }, + { + .name = TYPE_TEGRA2_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_tegra2_class_init, + }, + { + .name = TYPE_PPC4xx_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_ppc4xx_class_init, + .instance_init = ehci_ppc4xx_init, + }, + { + .name = TYPE_FUSBH200_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .instance_size = sizeof(FUSBH200EHCIState), + .instance_init = fusbh200_ehci_init, + .class_init = fusbh200_ehci_class_init, + }, }; -static void ehci_sysbus_register_types(void) -{ - type_register_static(&ehci_type_info); - type_register_static(&ehci_platform_type_info); - type_register_static(&ehci_exynos4210_type_info); - type_register_static(&ehci_aw_h3_type_info); - type_register_static(&ehci_npcm7xx_type_info); - type_register_static(&ehci_tegra2_type_info); - type_register_static(&ehci_ppc4xx_type_info); - type_register_static(&ehci_fusbh200_type_info); -} - -type_init(ehci_sysbus_register_types) +DEFINE_TYPES(ehci_sysbus_types) diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 01864d46499..b090f253656 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -35,7 +35,7 @@ #include "trace.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #define FRAME_TIMER_FREQ 1000 #define FRAME_TIMER_NS (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ) @@ -2287,7 +2287,8 @@ static void ehci_work_bh(void *opaque) ehci_update_frindex(ehci, skipped_uframes); ehci->last_run_ns += UFRAME_TIMER_NS * skipped_uframes; uframes -= skipped_uframes; - DPRINTF("WARNING - EHCI skipped %d uframes\n", skipped_uframes); + DPRINTF("WARNING - EHCI skipped %"PRIu64" uframes\n", + skipped_uframes); } for (i = 0; i < uframes; i++) { diff --git a/hw/usb/hcd-ehci.h b/hw/usb/hcd-ehci.h index 56a1c09d1f3..ffd6c5108eb 100644 --- a/hw/usb/hcd-ehci.h +++ b/hw/usb/hcd-ehci.h @@ -20,7 +20,7 @@ #include "qemu/timer.h" #include "hw/usb.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/pci/pci_device.h" #include "hw/sysbus.h" diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c deleted file mode 100644 index 6dca373cb1f..00000000000 --- a/hw/usb/hcd-musb.c +++ /dev/null @@ -1,1553 +0,0 @@ -/* - * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics, - * USB2.0 OTG compliant core used in various chips. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - * - * Only host-mode and non-DMA accesses are currently supported. - */ -#include "qemu/osdep.h" -#include "qemu/timer.h" -#include "hw/usb.h" -#include "hw/usb/hcd-musb.h" -#include "hw/irq.h" -#include "hw/hw.h" - -/* Common USB registers */ -#define MUSB_HDRC_FADDR 0x00 /* 8-bit */ -#define MUSB_HDRC_POWER 0x01 /* 8-bit */ - -#define MUSB_HDRC_INTRTX 0x02 /* 16-bit */ -#define MUSB_HDRC_INTRRX 0x04 -#define MUSB_HDRC_INTRTXE 0x06 -#define MUSB_HDRC_INTRRXE 0x08 -#define MUSB_HDRC_INTRUSB 0x0a /* 8 bit */ -#define MUSB_HDRC_INTRUSBE 0x0b /* 8 bit */ -#define MUSB_HDRC_FRAME 0x0c /* 16-bit */ -#define MUSB_HDRC_INDEX 0x0e /* 8 bit */ -#define MUSB_HDRC_TESTMODE 0x0f /* 8 bit */ - -/* Per-EP registers in indexed mode */ -#define MUSB_HDRC_EP_IDX 0x10 /* 8-bit */ - -/* EP FIFOs */ -#define MUSB_HDRC_FIFO 0x20 - -/* Additional Control Registers */ -#define MUSB_HDRC_DEVCTL 0x60 /* 8 bit */ - -/* These are indexed */ -#define MUSB_HDRC_TXFIFOSZ 0x62 /* 8 bit (see masks) */ -#define MUSB_HDRC_RXFIFOSZ 0x63 /* 8 bit (see masks) */ -#define MUSB_HDRC_TXFIFOADDR 0x64 /* 16 bit offset shifted right 3 */ -#define MUSB_HDRC_RXFIFOADDR 0x66 /* 16 bit offset shifted right 3 */ - -/* Some more registers */ -#define MUSB_HDRC_VCTRL 0x68 /* 8 bit */ -#define MUSB_HDRC_HWVERS 0x6c /* 8 bit */ - -/* Added in HDRC 1.9(?) & MHDRC 1.4 */ -/* ULPI pass-through */ -#define MUSB_HDRC_ULPI_VBUSCTL 0x70 -#define MUSB_HDRC_ULPI_REGDATA 0x74 -#define MUSB_HDRC_ULPI_REGADDR 0x75 -#define MUSB_HDRC_ULPI_REGCTL 0x76 - -/* Extended config & PHY control */ -#define MUSB_HDRC_ENDCOUNT 0x78 /* 8 bit */ -#define MUSB_HDRC_DMARAMCFG 0x79 /* 8 bit */ -#define MUSB_HDRC_PHYWAIT 0x7a /* 8 bit */ -#define MUSB_HDRC_PHYVPLEN 0x7b /* 8 bit */ -#define MUSB_HDRC_HS_EOF1 0x7c /* 8 bit, units of 546.1 us */ -#define MUSB_HDRC_FS_EOF1 0x7d /* 8 bit, units of 533.3 ns */ -#define MUSB_HDRC_LS_EOF1 0x7e /* 8 bit, units of 1.067 us */ - -/* Per-EP BUSCTL registers */ -#define MUSB_HDRC_BUSCTL 0x80 - -/* Per-EP registers in flat mode */ -#define MUSB_HDRC_EP 0x100 - -/* offsets to registers in flat model */ -#define MUSB_HDRC_TXMAXP 0x00 /* 16 bit apparently */ -#define MUSB_HDRC_TXCSR 0x02 /* 16 bit apparently */ -#define MUSB_HDRC_CSR0 MUSB_HDRC_TXCSR /* re-used for EP0 */ -#define MUSB_HDRC_RXMAXP 0x04 /* 16 bit apparently */ -#define MUSB_HDRC_RXCSR 0x06 /* 16 bit apparently */ -#define MUSB_HDRC_RXCOUNT 0x08 /* 16 bit apparently */ -#define MUSB_HDRC_COUNT0 MUSB_HDRC_RXCOUNT /* re-used for EP0 */ -#define MUSB_HDRC_TXTYPE 0x0a /* 8 bit apparently */ -#define MUSB_HDRC_TYPE0 MUSB_HDRC_TXTYPE /* re-used for EP0 */ -#define MUSB_HDRC_TXINTERVAL 0x0b /* 8 bit apparently */ -#define MUSB_HDRC_NAKLIMIT0 MUSB_HDRC_TXINTERVAL /* re-used for EP0 */ -#define MUSB_HDRC_RXTYPE 0x0c /* 8 bit apparently */ -#define MUSB_HDRC_RXINTERVAL 0x0d /* 8 bit apparently */ -#define MUSB_HDRC_FIFOSIZE 0x0f /* 8 bit apparently */ -#define MUSB_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */ - -/* "Bus control" registers */ -#define MUSB_HDRC_TXFUNCADDR 0x00 -#define MUSB_HDRC_TXHUBADDR 0x02 -#define MUSB_HDRC_TXHUBPORT 0x03 - -#define MUSB_HDRC_RXFUNCADDR 0x04 -#define MUSB_HDRC_RXHUBADDR 0x06 -#define MUSB_HDRC_RXHUBPORT 0x07 - -/* - * MUSBHDRC Register bit masks - */ - -/* POWER */ -#define MGC_M_POWER_ISOUPDATE 0x80 -#define MGC_M_POWER_SOFTCONN 0x40 -#define MGC_M_POWER_HSENAB 0x20 -#define MGC_M_POWER_HSMODE 0x10 -#define MGC_M_POWER_RESET 0x08 -#define MGC_M_POWER_RESUME 0x04 -#define MGC_M_POWER_SUSPENDM 0x02 -#define MGC_M_POWER_ENSUSPEND 0x01 - -/* INTRUSB */ -#define MGC_M_INTR_SUSPEND 0x01 -#define MGC_M_INTR_RESUME 0x02 -#define MGC_M_INTR_RESET 0x04 -#define MGC_M_INTR_BABBLE 0x04 -#define MGC_M_INTR_SOF 0x08 -#define MGC_M_INTR_CONNECT 0x10 -#define MGC_M_INTR_DISCONNECT 0x20 -#define MGC_M_INTR_SESSREQ 0x40 -#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */ -#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */ - -/* DEVCTL */ -#define MGC_M_DEVCTL_BDEVICE 0x80 -#define MGC_M_DEVCTL_FSDEV 0x40 -#define MGC_M_DEVCTL_LSDEV 0x20 -#define MGC_M_DEVCTL_VBUS 0x18 -#define MGC_S_DEVCTL_VBUS 3 -#define MGC_M_DEVCTL_HM 0x04 -#define MGC_M_DEVCTL_HR 0x02 -#define MGC_M_DEVCTL_SESSION 0x01 - -/* TESTMODE */ -#define MGC_M_TEST_FORCE_HOST 0x80 -#define MGC_M_TEST_FIFO_ACCESS 0x40 -#define MGC_M_TEST_FORCE_FS 0x20 -#define MGC_M_TEST_FORCE_HS 0x10 -#define MGC_M_TEST_PACKET 0x08 -#define MGC_M_TEST_K 0x04 -#define MGC_M_TEST_J 0x02 -#define MGC_M_TEST_SE0_NAK 0x01 - -/* CSR0 */ -#define MGC_M_CSR0_FLUSHFIFO 0x0100 -#define MGC_M_CSR0_TXPKTRDY 0x0002 -#define MGC_M_CSR0_RXPKTRDY 0x0001 - -/* CSR0 in Peripheral mode */ -#define MGC_M_CSR0_P_SVDSETUPEND 0x0080 -#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040 -#define MGC_M_CSR0_P_SENDSTALL 0x0020 -#define MGC_M_CSR0_P_SETUPEND 0x0010 -#define MGC_M_CSR0_P_DATAEND 0x0008 -#define MGC_M_CSR0_P_SENTSTALL 0x0004 - -/* CSR0 in Host mode */ -#define MGC_M_CSR0_H_NO_PING 0x0800 -#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */ -#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */ -#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080 -#define MGC_M_CSR0_H_STATUSPKT 0x0040 -#define MGC_M_CSR0_H_REQPKT 0x0020 -#define MGC_M_CSR0_H_ERROR 0x0010 -#define MGC_M_CSR0_H_SETUPPKT 0x0008 -#define MGC_M_CSR0_H_RXSTALL 0x0004 - -/* CONFIGDATA */ -#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */ -#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */ -#define MGC_M_CONFIGDATA_BIGENDIAN 0x20 -#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ -#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ -#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */ -#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ -#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* Width, 0 => 8b, 1 => 16b */ - -/* TXCSR in Peripheral and Host mode */ -#define MGC_M_TXCSR_AUTOSET 0x8000 -#define MGC_M_TXCSR_ISO 0x4000 -#define MGC_M_TXCSR_MODE 0x2000 -#define MGC_M_TXCSR_DMAENAB 0x1000 -#define MGC_M_TXCSR_FRCDATATOG 0x0800 -#define MGC_M_TXCSR_DMAMODE 0x0400 -#define MGC_M_TXCSR_CLRDATATOG 0x0040 -#define MGC_M_TXCSR_FLUSHFIFO 0x0008 -#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002 -#define MGC_M_TXCSR_TXPKTRDY 0x0001 - -/* TXCSR in Peripheral mode */ -#define MGC_M_TXCSR_P_INCOMPTX 0x0080 -#define MGC_M_TXCSR_P_SENTSTALL 0x0020 -#define MGC_M_TXCSR_P_SENDSTALL 0x0010 -#define MGC_M_TXCSR_P_UNDERRUN 0x0004 - -/* TXCSR in Host mode */ -#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200 -#define MGC_M_TXCSR_H_DATATOGGLE 0x0100 -#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080 -#define MGC_M_TXCSR_H_RXSTALL 0x0020 -#define MGC_M_TXCSR_H_ERROR 0x0004 - -/* RXCSR in Peripheral and Host mode */ -#define MGC_M_RXCSR_AUTOCLEAR 0x8000 -#define MGC_M_RXCSR_DMAENAB 0x2000 -#define MGC_M_RXCSR_DISNYET 0x1000 -#define MGC_M_RXCSR_DMAMODE 0x0800 -#define MGC_M_RXCSR_INCOMPRX 0x0100 -#define MGC_M_RXCSR_CLRDATATOG 0x0080 -#define MGC_M_RXCSR_FLUSHFIFO 0x0010 -#define MGC_M_RXCSR_DATAERROR 0x0008 -#define MGC_M_RXCSR_FIFOFULL 0x0002 -#define MGC_M_RXCSR_RXPKTRDY 0x0001 - -/* RXCSR in Peripheral mode */ -#define MGC_M_RXCSR_P_ISO 0x4000 -#define MGC_M_RXCSR_P_SENTSTALL 0x0040 -#define MGC_M_RXCSR_P_SENDSTALL 0x0020 -#define MGC_M_RXCSR_P_OVERRUN 0x0004 - -/* RXCSR in Host mode */ -#define MGC_M_RXCSR_H_AUTOREQ 0x4000 -#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400 -#define MGC_M_RXCSR_H_DATATOGGLE 0x0200 -#define MGC_M_RXCSR_H_RXSTALL 0x0040 -#define MGC_M_RXCSR_H_REQPKT 0x0020 -#define MGC_M_RXCSR_H_ERROR 0x0004 - -/* HUBADDR */ -#define MGC_M_HUBADDR_MULTI_TT 0x80 - -/* ULPI: Added in HDRC 1.9(?) & MHDRC 1.4 */ -#define MGC_M_ULPI_VBCTL_USEEXTVBUSIND 0x02 -#define MGC_M_ULPI_VBCTL_USEEXTVBUS 0x01 -#define MGC_M_ULPI_REGCTL_INT_ENABLE 0x08 -#define MGC_M_ULPI_REGCTL_READNOTWRITE 0x04 -#define MGC_M_ULPI_REGCTL_COMPLETE 0x02 -#define MGC_M_ULPI_REGCTL_REG 0x01 - -/* #define MUSB_DEBUG */ - -#ifdef MUSB_DEBUG -#define TRACE(fmt, ...) fprintf(stderr, "%s@%d: " fmt "\n", __func__, \ - __LINE__, ##__VA_ARGS__) -#else -#define TRACE(...) -#endif - - -static void musb_attach(USBPort *port); -static void musb_detach(USBPort *port); -static void musb_child_detach(USBPort *port, USBDevice *child); -static void musb_schedule_cb(USBPort *port, USBPacket *p); -static void musb_async_cancel_device(MUSBState *s, USBDevice *dev); - -static USBPortOps musb_port_ops = { - .attach = musb_attach, - .detach = musb_detach, - .child_detach = musb_child_detach, - .complete = musb_schedule_cb, -}; - -static USBBusOps musb_bus_ops = { -}; - -typedef struct MUSBPacket MUSBPacket; -typedef struct MUSBEndPoint MUSBEndPoint; - -struct MUSBPacket { - USBPacket p; - MUSBEndPoint *ep; - int dir; -}; - -struct MUSBEndPoint { - uint16_t faddr[2]; - uint8_t haddr[2]; - uint8_t hport[2]; - uint16_t csr[2]; - uint16_t maxp[2]; - uint16_t rxcount; - uint8_t type[2]; - uint8_t interval[2]; - uint8_t config; - uint8_t fifosize; - int timeout[2]; /* Always in microframes */ - - uint8_t *buf[2]; - int fifolen[2]; - int fifostart[2]; - int fifoaddr[2]; - MUSBPacket packey[2]; - int status[2]; - int ext_size[2]; - - /* For callbacks' use */ - int epnum; - int interrupt[2]; - MUSBState *musb; - USBCallback *delayed_cb[2]; - QEMUTimer *intv_timer[2]; -}; - -struct MUSBState { - qemu_irq irqs[musb_irq_max]; - USBBus bus; - USBPort port; - - int idx; - uint8_t devctl; - uint8_t power; - uint8_t faddr; - - uint8_t intr; - uint8_t mask; - uint16_t tx_intr; - uint16_t tx_mask; - uint16_t rx_intr; - uint16_t rx_mask; - - int setup_len; - int session; - - uint8_t buf[0x8000]; - - /* Duplicating the world since 2008!... probably we should have 32 - * logical, single endpoints instead. */ - MUSBEndPoint ep[16]; -}; - -void musb_reset(MUSBState *s) -{ - int i; - - s->faddr = 0x00; - s->devctl = 0; - s->power = MGC_M_POWER_HSENAB; - s->tx_intr = 0x0000; - s->rx_intr = 0x0000; - s->tx_mask = 0xffff; - s->rx_mask = 0xffff; - s->intr = 0x00; - s->mask = 0x06; - s->idx = 0; - - s->setup_len = 0; - s->session = 0; - memset(s->buf, 0, sizeof(s->buf)); - - /* TODO: _DW */ - s->ep[0].config = MGC_M_CONFIGDATA_SOFTCONE | MGC_M_CONFIGDATA_DYNFIFO; - for (i = 0; i < 16; i ++) { - s->ep[i].fifosize = 64; - s->ep[i].maxp[0] = 0x40; - s->ep[i].maxp[1] = 0x40; - s->ep[i].musb = s; - s->ep[i].epnum = i; - usb_packet_init(&s->ep[i].packey[0].p); - usb_packet_init(&s->ep[i].packey[1].p); - } -} - -struct MUSBState *musb_init(DeviceState *parent_device, int gpio_base) -{ - MUSBState *s = g_malloc0(sizeof(*s)); - int i; - - for (i = 0; i < musb_irq_max; i++) { - s->irqs[i] = qdev_get_gpio_in(parent_device, gpio_base + i); - } - - musb_reset(s); - - usb_bus_new(&s->bus, sizeof(s->bus), &musb_bus_ops, parent_device); - usb_register_port(&s->bus, &s->port, s, 0, &musb_port_ops, - USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); - - return s; -} - -static void musb_vbus_set(MUSBState *s, int level) -{ - if (level) - s->devctl |= 3 << MGC_S_DEVCTL_VBUS; - else - s->devctl &= ~MGC_M_DEVCTL_VBUS; - - qemu_set_irq(s->irqs[musb_set_vbus], level); -} - -static void musb_intr_set(MUSBState *s, int line, int level) -{ - if (!level) { - s->intr &= ~(1 << line); - qemu_irq_lower(s->irqs[line]); - } else if (s->mask & (1 << line)) { - s->intr |= 1 << line; - qemu_irq_raise(s->irqs[line]); - } -} - -static void musb_tx_intr_set(MUSBState *s, int line, int level) -{ - if (!level) { - s->tx_intr &= ~(1 << line); - if (!s->tx_intr) - qemu_irq_lower(s->irqs[musb_irq_tx]); - } else if (s->tx_mask & (1 << line)) { - s->tx_intr |= 1 << line; - qemu_irq_raise(s->irqs[musb_irq_tx]); - } -} - -static void musb_rx_intr_set(MUSBState *s, int line, int level) -{ - if (line) { - if (!level) { - s->rx_intr &= ~(1 << line); - if (!s->rx_intr) - qemu_irq_lower(s->irqs[musb_irq_rx]); - } else if (s->rx_mask & (1 << line)) { - s->rx_intr |= 1 << line; - qemu_irq_raise(s->irqs[musb_irq_rx]); - } - } else - musb_tx_intr_set(s, line, level); -} - -uint32_t musb_core_intr_get(MUSBState *s) -{ - return (s->rx_intr << 15) | s->tx_intr; -} - -void musb_core_intr_clear(MUSBState *s, uint32_t mask) -{ - if (s->rx_intr) { - s->rx_intr &= mask >> 15; - if (!s->rx_intr) - qemu_irq_lower(s->irqs[musb_irq_rx]); - } - - if (s->tx_intr) { - s->tx_intr &= mask & 0xffff; - if (!s->tx_intr) - qemu_irq_lower(s->irqs[musb_irq_tx]); - } -} - -void musb_set_size(MUSBState *s, int epnum, int size, int is_tx) -{ - s->ep[epnum].ext_size[!is_tx] = size; - s->ep[epnum].fifostart[0] = 0; - s->ep[epnum].fifostart[1] = 0; - s->ep[epnum].fifolen[0] = 0; - s->ep[epnum].fifolen[1] = 0; -} - -static void musb_session_update(MUSBState *s, int prev_dev, int prev_sess) -{ - int detect_prev = prev_dev && prev_sess; - int detect = !!s->port.dev && s->session; - - if (detect && !detect_prev) { - /* Let's skip the ID pin sense and VBUS sense formalities and - * and signal a successful SRP directly. This should work at least - * for the Linux driver stack. */ - musb_intr_set(s, musb_irq_connect, 1); - - if (s->port.dev->speed == USB_SPEED_LOW) { - s->devctl &= ~MGC_M_DEVCTL_FSDEV; - s->devctl |= MGC_M_DEVCTL_LSDEV; - } else { - s->devctl |= MGC_M_DEVCTL_FSDEV; - s->devctl &= ~MGC_M_DEVCTL_LSDEV; - } - - /* A-mode? */ - s->devctl &= ~MGC_M_DEVCTL_BDEVICE; - - /* Host-mode bit? */ - s->devctl |= MGC_M_DEVCTL_HM; -#if 1 - musb_vbus_set(s, 1); -#endif - } else if (!detect && detect_prev) { -#if 1 - musb_vbus_set(s, 0); -#endif - } -} - -/* Attach or detach a device on our only port. */ -static void musb_attach(USBPort *port) -{ - MUSBState *s = (MUSBState *) port->opaque; - - musb_intr_set(s, musb_irq_vbus_request, 1); - musb_session_update(s, 0, s->session); -} - -static void musb_detach(USBPort *port) -{ - MUSBState *s = (MUSBState *) port->opaque; - - musb_async_cancel_device(s, port->dev); - - musb_intr_set(s, musb_irq_disconnect, 1); - musb_session_update(s, 1, s->session); -} - -static void musb_child_detach(USBPort *port, USBDevice *child) -{ - MUSBState *s = (MUSBState *) port->opaque; - - musb_async_cancel_device(s, child); -} - -static void musb_cb_tick0(void *opaque) -{ - MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - - ep->delayed_cb[0](&ep->packey[0].p, opaque); -} - -static void musb_cb_tick1(void *opaque) -{ - MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - - ep->delayed_cb[1](&ep->packey[1].p, opaque); -} - -#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0) - -static void musb_schedule_cb(USBPort *port, USBPacket *packey) -{ - MUSBPacket *p = container_of(packey, MUSBPacket, p); - MUSBEndPoint *ep = p->ep; - int dir = p->dir; - int timeout = 0; - - if (ep->status[dir] == USB_RET_NAK) - timeout = ep->timeout[dir]; - else if (ep->interrupt[dir]) - timeout = 8; - else { - musb_cb_tick(ep); - return; - } - - if (!ep->intv_timer[dir]) - ep->intv_timer[dir] = timer_new_ns(QEMU_CLOCK_VIRTUAL, musb_cb_tick, ep); - - timer_mod(ep->intv_timer[dir], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - muldiv64(timeout, NANOSECONDS_PER_SECOND, 8000)); -} - -static int musb_timeout(int ttype, int speed, int val) -{ -#if 1 - return val << 3; -#endif - - switch (ttype) { - case USB_ENDPOINT_XFER_CONTROL: - if (val < 2) - return 0; - else if (speed == USB_SPEED_HIGH) - return 1 << (val - 1); - else - return 8 << (val - 1); - - case USB_ENDPOINT_XFER_INT: - if (speed == USB_SPEED_HIGH) - if (val < 2) - return 0; - else - return 1 << (val - 1); - else - return val << 3; - - case USB_ENDPOINT_XFER_BULK: - case USB_ENDPOINT_XFER_ISOC: - if (val < 2) - return 0; - else if (speed == USB_SPEED_HIGH) - return 1 << (val - 1); - else - return 8 << (val - 1); - /* TODO: what with low-speed Bulk and Isochronous? */ - } - - hw_error("bad interval\n"); -} - -static void musb_packet(MUSBState *s, MUSBEndPoint *ep, - int epnum, int pid, int len, USBCallback cb, int dir) -{ - USBDevice *dev; - USBEndpoint *uep; - int idx = epnum && dir; - int id; - int ttype; - - /* ep->type[0,1] contains: - * in bits 7:6 the speed (0 - invalid, 1 - high, 2 - full, 3 - slow) - * in bits 5:4 the transfer type (BULK / INT) - * in bits 3:0 the EP num - */ - ttype = epnum ? (ep->type[idx] >> 4) & 3 : 0; - - ep->timeout[dir] = musb_timeout(ttype, - ep->type[idx] >> 6, ep->interval[idx]); - ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT; - ep->delayed_cb[dir] = cb; - - /* A wild guess on the FADDR semantics... */ - dev = usb_find_device(&s->port, ep->faddr[idx]); - if (dev == NULL) { - return; - } - uep = usb_ep_get(dev, pid, ep->type[idx] & 0xf); - id = pid | (dev->addr << 16) | (uep->nr << 8); - usb_packet_setup(&ep->packey[dir].p, pid, uep, 0, id, false, true); - usb_packet_addbuf(&ep->packey[dir].p, ep->buf[idx], len); - ep->packey[dir].ep = ep; - ep->packey[dir].dir = dir; - - usb_handle_packet(dev, &ep->packey[dir].p); - - if (ep->packey[dir].p.status == USB_RET_ASYNC) { - usb_device_flush_ep_queue(dev, uep); - ep->status[dir] = len; - return; - } - - if (ep->packey[dir].p.status == USB_RET_SUCCESS) { - ep->status[dir] = ep->packey[dir].p.actual_length; - } else { - ep->status[dir] = ep->packey[dir].p.status; - } - musb_schedule_cb(&s->port, &ep->packey[dir].p); -} - -static void musb_tx_packet_complete(USBPacket *packey, void *opaque) -{ - /* Unfortunately we can't use packey->devep because that's the remote - * endpoint number and may be different than our local. */ - MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - int epnum = ep->epnum; - MUSBState *s = ep->musb; - - ep->fifostart[0] = 0; - ep->fifolen[0] = 0; -#ifdef CLEAR_NAK - if (ep->status[0] != USB_RET_NAK) { -#endif - if (epnum) - ep->csr[0] &= ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY); - else - ep->csr[0] &= ~MGC_M_CSR0_TXPKTRDY; -#ifdef CLEAR_NAK - } -#endif - - /* Clear all of the error bits first */ - if (epnum) - ep->csr[0] &= ~(MGC_M_TXCSR_H_ERROR | MGC_M_TXCSR_H_RXSTALL | - MGC_M_TXCSR_H_NAKTIMEOUT); - else - ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | - MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); - - if (ep->status[0] == USB_RET_STALL) { - /* Command not supported by target! */ - ep->status[0] = 0; - - if (epnum) - ep->csr[0] |= MGC_M_TXCSR_H_RXSTALL; - else - ep->csr[0] |= MGC_M_CSR0_H_RXSTALL; - } - - if (ep->status[0] == USB_RET_NAK) { - ep->status[0] = 0; - - /* NAK timeouts are only generated in Bulk transfers and - * Data-errors in Isochronous. */ - if (ep->interrupt[0]) { - return; - } - - if (epnum) - ep->csr[0] |= MGC_M_TXCSR_H_NAKTIMEOUT; - else - ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT; - } - - if (ep->status[0] < 0) { - if (ep->status[0] == USB_RET_BABBLE) - musb_intr_set(s, musb_irq_rst_babble, 1); - - /* Pretend we've tried three times already and failed (in - * case of USB_TOKEN_SETUP). */ - if (epnum) - ep->csr[0] |= MGC_M_TXCSR_H_ERROR; - else - ep->csr[0] |= MGC_M_CSR0_H_ERROR; - - musb_tx_intr_set(s, epnum, 1); - return; - } - /* TODO: check len for over/underruns of an OUT packet? */ - -#ifdef SETUPLEN_HACK - if (!epnum && ep->packey[0].pid == USB_TOKEN_SETUP) - s->setup_len = ep->packey[0].data[6]; -#endif - - /* In DMA mode: if no error, assert DMA request for this EP, - * and skip the interrupt. */ - musb_tx_intr_set(s, epnum, 1); -} - -static void musb_rx_packet_complete(USBPacket *packey, void *opaque) -{ - /* Unfortunately we can't use packey->devep because that's the remote - * endpoint number and may be different than our local. */ - MUSBEndPoint *ep = (MUSBEndPoint *) opaque; - int epnum = ep->epnum; - MUSBState *s = ep->musb; - - ep->fifostart[1] = 0; - ep->fifolen[1] = 0; - -#ifdef CLEAR_NAK - if (ep->status[1] != USB_RET_NAK) { -#endif - ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; - if (!epnum) - ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT; -#ifdef CLEAR_NAK - } -#endif - - /* Clear all of the imaginable error bits first */ - ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL | - MGC_M_RXCSR_DATAERROR); - if (!epnum) - ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | - MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); - - if (ep->status[1] == USB_RET_STALL) { - ep->status[1] = 0; - - ep->csr[1] |= MGC_M_RXCSR_H_RXSTALL; - if (!epnum) - ep->csr[0] |= MGC_M_CSR0_H_RXSTALL; - } - - if (ep->status[1] == USB_RET_NAK) { - ep->status[1] = 0; - - /* NAK timeouts are only generated in Bulk transfers and - * Data-errors in Isochronous. */ - if (ep->interrupt[1]) { - musb_packet(s, ep, epnum, USB_TOKEN_IN, - packey->iov.size, musb_rx_packet_complete, 1); - return; - } - - ep->csr[1] |= MGC_M_RXCSR_DATAERROR; - if (!epnum) - ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT; - } - - if (ep->status[1] < 0) { - if (ep->status[1] == USB_RET_BABBLE) { - musb_intr_set(s, musb_irq_rst_babble, 1); - return; - } - - /* Pretend we've tried three times already and failed (in - * case of a control transfer). */ - ep->csr[1] |= MGC_M_RXCSR_H_ERROR; - if (!epnum) - ep->csr[0] |= MGC_M_CSR0_H_ERROR; - - musb_rx_intr_set(s, epnum, 1); - return; - } - /* TODO: check len for over/underruns of an OUT packet? */ - /* TODO: perhaps make use of e->ext_size[1] here. */ - - if (!(ep->csr[1] & (MGC_M_RXCSR_H_RXSTALL | MGC_M_RXCSR_DATAERROR))) { - ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY; - if (!epnum) - ep->csr[0] |= MGC_M_CSR0_RXPKTRDY; - - ep->rxcount = ep->status[1]; /* XXX: MIN(packey->len, ep->maxp[1]); */ - /* In DMA mode: assert DMA request for this EP */ - } - - /* Only if DMA has not been asserted */ - musb_rx_intr_set(s, epnum, 1); -} - -static void musb_async_cancel_device(MUSBState *s, USBDevice *dev) -{ - int ep, dir; - - for (ep = 0; ep < 16; ep++) { - for (dir = 0; dir < 2; dir++) { - if (!usb_packet_is_inflight(&s->ep[ep].packey[dir].p) || - s->ep[ep].packey[dir].p.ep->dev != dev) { - continue; - } - usb_cancel_packet(&s->ep[ep].packey[dir].p); - /* status updates needed here? */ - } - } -} - -static void musb_tx_rdy(MUSBState *s, int epnum) -{ - MUSBEndPoint *ep = s->ep + epnum; - int pid; - int total, valid = 0; - TRACE("start %d, len %d", ep->fifostart[0], ep->fifolen[0] ); - ep->fifostart[0] += ep->fifolen[0]; - ep->fifolen[0] = 0; - - /* XXX: how's the total size of the packet retrieved exactly in - * the generic case? */ - total = ep->maxp[0] & 0x3ff; - - if (ep->ext_size[0]) { - total = ep->ext_size[0]; - ep->ext_size[0] = 0; - valid = 1; - } - - /* If the packet is not fully ready yet, wait for a next segment. */ - if (epnum && (ep->fifostart[0]) < total) - return; - - if (!valid) - total = ep->fifostart[0]; - - pid = USB_TOKEN_OUT; - if (!epnum && (ep->csr[0] & MGC_M_CSR0_H_SETUPPKT)) { - pid = USB_TOKEN_SETUP; - if (total != 8) { - TRACE("illegal SETUPPKT length of %i bytes", total); - } - /* Controller should retry SETUP packets three times on errors - * but it doesn't make sense for us to do that. */ - } - - musb_packet(s, ep, epnum, pid, total, musb_tx_packet_complete, 0); -} - -static void musb_rx_req(MUSBState *s, int epnum) -{ - MUSBEndPoint *ep = s->ep + epnum; - int total; - - /* If we already have a packet, which didn't fit into the - * 64 bytes of the FIFO, only move the FIFO start and return. (Obsolete) */ - if (ep->packey[1].p.pid == USB_TOKEN_IN && ep->status[1] >= 0 && - (ep->fifostart[1]) + ep->rxcount < - ep->packey[1].p.iov.size) { - TRACE("0x%08x, %d", ep->fifostart[1], ep->rxcount ); - ep->fifostart[1] += ep->rxcount; - ep->fifolen[1] = 0; - - ep->rxcount = MIN(ep->packey[0].p.iov.size - (ep->fifostart[1]), - ep->maxp[1]); - - ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; - if (!epnum) - ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT; - - /* Clear all of the error bits first */ - ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL | - MGC_M_RXCSR_DATAERROR); - if (!epnum) - ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | - MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); - - ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY; - if (!epnum) - ep->csr[0] |= MGC_M_CSR0_RXPKTRDY; - musb_rx_intr_set(s, epnum, 1); - return; - } - - /* The driver sets maxp[1] to 64 or less because it knows the hardware - * FIFO is this deep. Bigger packets get split in - * usb_generic_handle_packet but we can also do the splitting locally - * for performance. It turns out we can also have a bigger FIFO and - * ignore the limit set in ep->maxp[1]. The Linux MUSB driver deals - * OK with single packets of even 32KB and we avoid splitting, however - * usb_msd.c sometimes sends a packet bigger than what Linux expects - * (e.g. 8192 bytes instead of 4096) and we get an OVERRUN. Splitting - * hides this overrun from Linux. Up to 4096 everything is fine - * though. Currently this is disabled. - * - * XXX: mind ep->fifosize. */ - total = MIN(ep->maxp[1] & 0x3ff, sizeof(s->buf)); - -#ifdef SETUPLEN_HACK - /* Why should *we* do that instead of Linux? */ - if (!epnum) { - if (ep->packey[0].p.devaddr == 2) { - total = MIN(s->setup_len, 8); - } else { - total = MIN(s->setup_len, 64); - } - s->setup_len -= total; - } -#endif - - musb_packet(s, ep, epnum, USB_TOKEN_IN, total, musb_rx_packet_complete, 1); -} - -static uint8_t musb_read_fifo(MUSBEndPoint *ep) -{ - uint8_t value; - if (ep->fifolen[1] >= 64) { - /* We have a FIFO underrun */ - TRACE("EP%d FIFO is now empty, stop reading", ep->epnum); - return 0x00000000; - } - /* In DMA mode clear RXPKTRDY and set REQPKT automatically - * (if AUTOREQ is set) */ - - ep->csr[1] &= ~MGC_M_RXCSR_FIFOFULL; - value=ep->buf[1][ep->fifostart[1] + ep->fifolen[1] ++]; - TRACE("EP%d 0x%02x, %d", ep->epnum, value, ep->fifolen[1] ); - return value; -} - -static void musb_write_fifo(MUSBEndPoint *ep, uint8_t value) -{ - TRACE("EP%d = %02x", ep->epnum, value); - if (ep->fifolen[0] >= 64) { - /* We have a FIFO overrun */ - TRACE("EP%d FIFO exceeded 64 bytes, stop feeding data", ep->epnum); - return; - } - - ep->buf[0][ep->fifostart[0] + ep->fifolen[0] ++] = value; - ep->csr[0] |= MGC_M_TXCSR_FIFONOTEMPTY; -} - -static void musb_ep_frame_cancel(MUSBEndPoint *ep, int dir) -{ - if (ep->intv_timer[dir]) - timer_del(ep->intv_timer[dir]); -} - -/* Bus control */ -static uint8_t musb_busctl_readb(void *opaque, int ep, int addr) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - /* For USB2.0 HS hubs only */ - case MUSB_HDRC_TXHUBADDR: - return s->ep[ep].haddr[0]; - case MUSB_HDRC_TXHUBPORT: - return s->ep[ep].hport[0]; - case MUSB_HDRC_RXHUBADDR: - return s->ep[ep].haddr[1]; - case MUSB_HDRC_RXHUBPORT: - return s->ep[ep].hport[1]; - - default: - TRACE("unknown register 0x%02x", addr); - return 0x00; - }; -} - -static void musb_busctl_writeb(void *opaque, int ep, int addr, uint8_t value) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXFUNCADDR: - s->ep[ep].faddr[0] = value; - break; - case MUSB_HDRC_RXFUNCADDR: - s->ep[ep].faddr[1] = value; - break; - case MUSB_HDRC_TXHUBADDR: - s->ep[ep].haddr[0] = value; - break; - case MUSB_HDRC_TXHUBPORT: - s->ep[ep].hport[0] = value; - break; - case MUSB_HDRC_RXHUBADDR: - s->ep[ep].haddr[1] = value; - break; - case MUSB_HDRC_RXHUBPORT: - s->ep[ep].hport[1] = value; - break; - - default: - TRACE("unknown register 0x%02x", addr); - break; - }; -} - -static uint16_t musb_busctl_readh(void *opaque, int ep, int addr) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXFUNCADDR: - return s->ep[ep].faddr[0]; - case MUSB_HDRC_RXFUNCADDR: - return s->ep[ep].faddr[1]; - - default: - return musb_busctl_readb(s, ep, addr) | - (musb_busctl_readb(s, ep, addr | 1) << 8); - }; -} - -static void musb_busctl_writeh(void *opaque, int ep, int addr, uint16_t value) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXFUNCADDR: - s->ep[ep].faddr[0] = value; - break; - case MUSB_HDRC_RXFUNCADDR: - s->ep[ep].faddr[1] = value; - break; - - default: - musb_busctl_writeb(s, ep, addr, value & 0xff); - musb_busctl_writeb(s, ep, addr | 1, value >> 8); - }; -} - -/* Endpoint control */ -static uint8_t musb_ep_readb(void *opaque, int ep, int addr) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXTYPE: - return s->ep[ep].type[0]; - case MUSB_HDRC_TXINTERVAL: - return s->ep[ep].interval[0]; - case MUSB_HDRC_RXTYPE: - return s->ep[ep].type[1]; - case MUSB_HDRC_RXINTERVAL: - return s->ep[ep].interval[1]; - case (MUSB_HDRC_FIFOSIZE & ~1): - return 0x00; - case MUSB_HDRC_FIFOSIZE: - return ep ? s->ep[ep].fifosize : s->ep[ep].config; - case MUSB_HDRC_RXCOUNT: - return s->ep[ep].rxcount; - - default: - TRACE("unknown register 0x%02x", addr); - return 0x00; - }; -} - -static void musb_ep_writeb(void *opaque, int ep, int addr, uint8_t value) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXTYPE: - s->ep[ep].type[0] = value; - break; - case MUSB_HDRC_TXINTERVAL: - s->ep[ep].interval[0] = value; - musb_ep_frame_cancel(&s->ep[ep], 0); - break; - case MUSB_HDRC_RXTYPE: - s->ep[ep].type[1] = value; - break; - case MUSB_HDRC_RXINTERVAL: - s->ep[ep].interval[1] = value; - musb_ep_frame_cancel(&s->ep[ep], 1); - break; - case (MUSB_HDRC_FIFOSIZE & ~1): - break; - case MUSB_HDRC_FIFOSIZE: - TRACE("somebody messes with fifosize (now %i bytes)", value); - s->ep[ep].fifosize = value; - break; - default: - TRACE("unknown register 0x%02x", addr); - break; - }; -} - -static uint16_t musb_ep_readh(void *opaque, int ep, int addr) -{ - MUSBState *s = (MUSBState *) opaque; - uint16_t ret; - - switch (addr) { - case MUSB_HDRC_TXMAXP: - return s->ep[ep].maxp[0]; - case MUSB_HDRC_TXCSR: - return s->ep[ep].csr[0]; - case MUSB_HDRC_RXMAXP: - return s->ep[ep].maxp[1]; - case MUSB_HDRC_RXCSR: - ret = s->ep[ep].csr[1]; - - /* TODO: This and other bits probably depend on - * ep->csr[1] & MGC_M_RXCSR_AUTOCLEAR. */ - if (s->ep[ep].csr[1] & MGC_M_RXCSR_AUTOCLEAR) - s->ep[ep].csr[1] &= ~MGC_M_RXCSR_RXPKTRDY; - - return ret; - case MUSB_HDRC_RXCOUNT: - return s->ep[ep].rxcount; - - default: - return musb_ep_readb(s, ep, addr) | - (musb_ep_readb(s, ep, addr | 1) << 8); - }; -} - -static void musb_ep_writeh(void *opaque, int ep, int addr, uint16_t value) -{ - MUSBState *s = (MUSBState *) opaque; - - switch (addr) { - case MUSB_HDRC_TXMAXP: - s->ep[ep].maxp[0] = value; - break; - case MUSB_HDRC_TXCSR: - if (ep) { - s->ep[ep].csr[0] &= value & 0xa6; - s->ep[ep].csr[0] |= value & 0xff59; - } else { - s->ep[ep].csr[0] &= value & 0x85; - s->ep[ep].csr[0] |= value & 0xf7a; - } - - musb_ep_frame_cancel(&s->ep[ep], 0); - - if ((ep && (value & MGC_M_TXCSR_FLUSHFIFO)) || - (!ep && (value & MGC_M_CSR0_FLUSHFIFO))) { - s->ep[ep].fifolen[0] = 0; - s->ep[ep].fifostart[0] = 0; - if (ep) - s->ep[ep].csr[0] &= - ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY); - else - s->ep[ep].csr[0] &= - ~(MGC_M_CSR0_TXPKTRDY | MGC_M_CSR0_RXPKTRDY); - } - if ( - (ep && -#ifdef CLEAR_NAK - (value & MGC_M_TXCSR_TXPKTRDY) && - !(value & MGC_M_TXCSR_H_NAKTIMEOUT)) || -#else - (value & MGC_M_TXCSR_TXPKTRDY)) || -#endif - (!ep && -#ifdef CLEAR_NAK - (value & MGC_M_CSR0_TXPKTRDY) && - !(value & MGC_M_CSR0_H_NAKTIMEOUT))) -#else - (value & MGC_M_CSR0_TXPKTRDY))) -#endif - musb_tx_rdy(s, ep); - if (!ep && - (value & MGC_M_CSR0_H_REQPKT) && -#ifdef CLEAR_NAK - !(value & (MGC_M_CSR0_H_NAKTIMEOUT | - MGC_M_CSR0_RXPKTRDY))) -#else - !(value & MGC_M_CSR0_RXPKTRDY)) -#endif - musb_rx_req(s, ep); - break; - - case MUSB_HDRC_RXMAXP: - s->ep[ep].maxp[1] = value; - break; - case MUSB_HDRC_RXCSR: - /* (DMA mode only) */ - if ( - (value & MGC_M_RXCSR_H_AUTOREQ) && - !(value & MGC_M_RXCSR_RXPKTRDY) && - (s->ep[ep].csr[1] & MGC_M_RXCSR_RXPKTRDY)) - value |= MGC_M_RXCSR_H_REQPKT; - - s->ep[ep].csr[1] &= 0x102 | (value & 0x4d); - s->ep[ep].csr[1] |= value & 0xfeb0; - - musb_ep_frame_cancel(&s->ep[ep], 1); - - if (value & MGC_M_RXCSR_FLUSHFIFO) { - s->ep[ep].fifolen[1] = 0; - s->ep[ep].fifostart[1] = 0; - s->ep[ep].csr[1] &= ~(MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY); - /* If double buffering and we have two packets ready, flush - * only the first one and set up the fifo at the second packet. */ - } -#ifdef CLEAR_NAK - if ((value & MGC_M_RXCSR_H_REQPKT) && !(value & MGC_M_RXCSR_DATAERROR)) -#else - if (value & MGC_M_RXCSR_H_REQPKT) -#endif - musb_rx_req(s, ep); - break; - case MUSB_HDRC_RXCOUNT: - s->ep[ep].rxcount = value; - break; - - default: - musb_ep_writeb(s, ep, addr, value & 0xff); - musb_ep_writeb(s, ep, addr | 1, value >> 8); - }; -} - -/* Generic control */ -static uint32_t musb_readb(void *opaque, hwaddr addr) -{ - MUSBState *s = (MUSBState *) opaque; - int ep, i; - uint8_t ret; - - switch (addr) { - case MUSB_HDRC_FADDR: - return s->faddr; - case MUSB_HDRC_POWER: - return s->power; - case MUSB_HDRC_INTRUSB: - ret = s->intr; - for (i = 0; i < sizeof(ret) * 8; i ++) - if (ret & (1 << i)) - musb_intr_set(s, i, 0); - return ret; - case MUSB_HDRC_INTRUSBE: - return s->mask; - case MUSB_HDRC_INDEX: - return s->idx; - case MUSB_HDRC_TESTMODE: - return 0x00; - - case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): - return musb_ep_readb(s, s->idx, addr & 0xf); - - case MUSB_HDRC_DEVCTL: - return s->devctl; - - case MUSB_HDRC_TXFIFOSZ: - case MUSB_HDRC_RXFIFOSZ: - case MUSB_HDRC_VCTRL: - /* TODO */ - return 0x00; - - case MUSB_HDRC_HWVERS: - return (1 << 10) | 400; - - case (MUSB_HDRC_VCTRL | 1): - case (MUSB_HDRC_HWVERS | 1): - case (MUSB_HDRC_DEVCTL | 1): - return 0x00; - - case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): - ep = (addr >> 3) & 0xf; - return musb_busctl_readb(s, ep, addr & 0x7); - - case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): - ep = (addr >> 4) & 0xf; - return musb_ep_readb(s, ep, addr & 0xf); - - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - return musb_read_fifo(s->ep + ep); - - default: - TRACE("unknown register 0x%02x", (int) addr); - return 0x00; - }; -} - -static void musb_writeb(void *opaque, hwaddr addr, uint32_t value) -{ - MUSBState *s = (MUSBState *) opaque; - int ep; - - switch (addr) { - case MUSB_HDRC_FADDR: - s->faddr = value & 0x7f; - break; - case MUSB_HDRC_POWER: - s->power = (value & 0xef) | (s->power & 0x10); - /* MGC_M_POWER_RESET is also read-only in Peripheral Mode */ - if ((value & MGC_M_POWER_RESET) && s->port.dev) { - usb_device_reset(s->port.dev); - /* Negotiate high-speed operation if MGC_M_POWER_HSENAB is set. */ - if ((value & MGC_M_POWER_HSENAB) && - s->port.dev->speed == USB_SPEED_HIGH) - s->power |= MGC_M_POWER_HSMODE; /* Success */ - /* Restart frame counting. */ - } - if (value & MGC_M_POWER_SUSPENDM) { - /* When all transfers finish, suspend and if MGC_M_POWER_ENSUSPEND - * is set, also go into low power mode. Frame counting stops. */ - /* XXX: Cleared when the interrupt register is read */ - } - if (value & MGC_M_POWER_RESUME) { - /* Wait 20ms and signal resuming on the bus. Frame counting - * restarts. */ - } - break; - case MUSB_HDRC_INTRUSB: - break; - case MUSB_HDRC_INTRUSBE: - s->mask = value & 0xff; - break; - case MUSB_HDRC_INDEX: - s->idx = value & 0xf; - break; - case MUSB_HDRC_TESTMODE: - break; - - case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): - musb_ep_writeb(s, s->idx, addr & 0xf, value); - break; - - case MUSB_HDRC_DEVCTL: - s->session = !!(value & MGC_M_DEVCTL_SESSION); - musb_session_update(s, - !!s->port.dev, - !!(s->devctl & MGC_M_DEVCTL_SESSION)); - - /* It seems this is the only R/W bit in this register? */ - s->devctl &= ~MGC_M_DEVCTL_SESSION; - s->devctl |= value & MGC_M_DEVCTL_SESSION; - break; - - case MUSB_HDRC_TXFIFOSZ: - case MUSB_HDRC_RXFIFOSZ: - case MUSB_HDRC_VCTRL: - /* TODO */ - break; - - case (MUSB_HDRC_VCTRL | 1): - case (MUSB_HDRC_DEVCTL | 1): - break; - - case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): - ep = (addr >> 3) & 0xf; - musb_busctl_writeb(s, ep, addr & 0x7, value); - break; - - case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): - ep = (addr >> 4) & 0xf; - musb_ep_writeb(s, ep, addr & 0xf, value); - break; - - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - musb_write_fifo(s->ep + ep, value & 0xff); - break; - - default: - TRACE("unknown register 0x%02x", (int) addr); - break; - }; -} - -static uint32_t musb_readh(void *opaque, hwaddr addr) -{ - MUSBState *s = (MUSBState *) opaque; - int ep, i; - uint16_t ret; - - switch (addr) { - case MUSB_HDRC_INTRTX: - ret = s->tx_intr; - /* Auto clear */ - for (i = 0; i < sizeof(ret) * 8; i ++) - if (ret & (1 << i)) - musb_tx_intr_set(s, i, 0); - return ret; - case MUSB_HDRC_INTRRX: - ret = s->rx_intr; - /* Auto clear */ - for (i = 0; i < sizeof(ret) * 8; i ++) - if (ret & (1 << i)) - musb_rx_intr_set(s, i, 0); - return ret; - case MUSB_HDRC_INTRTXE: - return s->tx_mask; - case MUSB_HDRC_INTRRXE: - return s->rx_mask; - - case MUSB_HDRC_FRAME: - /* TODO */ - return 0x0000; - case MUSB_HDRC_TXFIFOADDR: - return s->ep[s->idx].fifoaddr[0]; - case MUSB_HDRC_RXFIFOADDR: - return s->ep[s->idx].fifoaddr[1]; - - case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): - return musb_ep_readh(s, s->idx, addr & 0xf); - - case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): - ep = (addr >> 3) & 0xf; - return musb_busctl_readh(s, ep, addr & 0x7); - - case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): - ep = (addr >> 4) & 0xf; - return musb_ep_readh(s, ep, addr & 0xf); - - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - return (musb_read_fifo(s->ep + ep) | musb_read_fifo(s->ep + ep) << 8); - - default: - return musb_readb(s, addr) | (musb_readb(s, addr | 1) << 8); - }; -} - -static void musb_writeh(void *opaque, hwaddr addr, uint32_t value) -{ - MUSBState *s = (MUSBState *) opaque; - int ep; - - switch (addr) { - case MUSB_HDRC_INTRTXE: - s->tx_mask = value; - /* XXX: the masks seem to apply on the raising edge like with - * edge-triggered interrupts, thus no need to update. I may be - * wrong though. */ - break; - case MUSB_HDRC_INTRRXE: - s->rx_mask = value; - break; - - case MUSB_HDRC_FRAME: - /* TODO */ - break; - case MUSB_HDRC_TXFIFOADDR: - s->ep[s->idx].fifoaddr[0] = value; - s->ep[s->idx].buf[0] = - s->buf + ((value << 3) & 0x7ff ); - break; - case MUSB_HDRC_RXFIFOADDR: - s->ep[s->idx].fifoaddr[1] = value; - s->ep[s->idx].buf[1] = - s->buf + ((value << 3) & 0x7ff); - break; - - case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): - musb_ep_writeh(s, s->idx, addr & 0xf, value); - break; - - case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): - ep = (addr >> 3) & 0xf; - musb_busctl_writeh(s, ep, addr & 0x7, value); - break; - - case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): - ep = (addr >> 4) & 0xf; - musb_ep_writeh(s, ep, addr & 0xf, value); - break; - - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - musb_write_fifo(s->ep + ep, value & 0xff); - musb_write_fifo(s->ep + ep, (value >> 8) & 0xff); - break; - - default: - musb_writeb(s, addr, value & 0xff); - musb_writeb(s, addr | 1, value >> 8); - }; -} - -static uint32_t musb_readw(void *opaque, hwaddr addr) -{ - MUSBState *s = (MUSBState *) opaque; - int ep; - - switch (addr) { - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - return ( musb_read_fifo(s->ep + ep) | - musb_read_fifo(s->ep + ep) << 8 | - musb_read_fifo(s->ep + ep) << 16 | - musb_read_fifo(s->ep + ep) << 24 ); - default: - TRACE("unknown register 0x%02x", (int) addr); - return 0x00000000; - }; -} - -static void musb_writew(void *opaque, hwaddr addr, uint32_t value) -{ - MUSBState *s = (MUSBState *) opaque; - int ep; - - switch (addr) { - case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): - ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; - musb_write_fifo(s->ep + ep, value & 0xff); - musb_write_fifo(s->ep + ep, (value >> 8 ) & 0xff); - musb_write_fifo(s->ep + ep, (value >> 16) & 0xff); - musb_write_fifo(s->ep + ep, (value >> 24) & 0xff); - break; - default: - TRACE("unknown register 0x%02x", (int) addr); - break; - }; -} - -MUSBReadFunc * const musb_read[] = { - musb_readb, - musb_readh, - musb_readw, -}; - -MUSBWriteFunc * const musb_write[] = { - musb_writeb, - musb_writeh, - musb_writew, -}; diff --git a/hw/usb/hcd-ohci-pci.c b/hw/usb/hcd-ohci-pci.c index 33ed9b6f5a5..94d1077eb9e 100644 --- a/hw/usb/hcd-ohci-pci.c +++ b/hw/usb/hcd-ohci-pci.c @@ -109,11 +109,10 @@ static void usb_ohci_reset_pci(DeviceState *d) ohci_hard_reset(s); } -static Property ohci_pci_properties[] = { +static const Property ohci_pci_properties[] = { DEFINE_PROP_STRING("masterbus", OHCIPCIState, masterbus), DEFINE_PROP_UINT32("num-ports", OHCIPCIState, num_ports, 3), DEFINE_PROP_UINT32("firstport", OHCIPCIState, firstport, 0), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_ohci = { @@ -127,7 +126,7 @@ static const VMStateDescription vmstate_ohci = { } }; -static void ohci_pci_class_init(ObjectClass *klass, void *data) +static void ohci_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -142,7 +141,7 @@ static void ohci_pci_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, ohci_pci_properties); dc->hotpluggable = false; dc->vmsd = &vmstate_ohci; - dc->reset = usb_ohci_reset_pci; + device_class_set_legacy_reset(dc, usb_ohci_reset_pci); } static const TypeInfo ohci_pci_info = { @@ -150,7 +149,7 @@ static const TypeInfo ohci_pci_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(OHCIPCIState), .class_init = ohci_pci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/usb/hcd-ohci-sysbus.c b/hw/usb/hcd-ohci-sysbus.c index 6fba7f50f87..3fc6cce44b0 100644 --- a/hw/usb/hcd-ohci-sysbus.c +++ b/hw/usb/hcd-ohci-sysbus.c @@ -57,15 +57,14 @@ static void ohci_sysbus_reset(DeviceState *dev) ohci_hard_reset(ohci); } -static Property ohci_sysbus_properties[] = { +static const Property ohci_sysbus_properties[] = { DEFINE_PROP_STRING("masterbus", OHCISysBusState, masterbus), DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3), DEFINE_PROP_UINT32("firstport", OHCISysBusState, firstport, 0), DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void ohci_sysbus_class_init(ObjectClass *klass, void *data) +static void ohci_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -73,7 +72,7 @@ static void ohci_sysbus_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); dc->desc = "OHCI USB Controller"; device_class_set_props(dc, ohci_sysbus_properties); - dc->reset = ohci_sysbus_reset; + device_class_set_legacy_reset(dc, ohci_sysbus_reset); } static const TypeInfo ohci_sysbus_types[] = { diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 71b54914d32..72a9f9f4749 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -577,7 +577,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed) USBDevice *dev; USBEndpoint *ep; USBPacket *pkt; - uint8_t buf[8192]; + QEMU_UNINITIALIZED uint8_t buf[8192]; bool int_req; struct ohci_iso_td iso_td; uint32_t addr; diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h index e1827227ac3..3cc35a5cdc5 100644 --- a/hw/usb/hcd-ohci.h +++ b/hw/usb/hcd-ohci.h @@ -22,7 +22,7 @@ #define HCD_OHCI_H #include "hw/sysbus.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/usb.h" #include "qom/object.h" diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index a03cf22e69f..4822c704f69 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -36,7 +36,7 @@ #include "qapi/error.h" #include "qemu/timer.h" #include "qemu/iov.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "trace.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -67,7 +67,7 @@ struct UHCIPCIDeviceClass { UHCIInfo info; }; -/* +/* * Pending async transaction. * 'packet' must be the first field because completion * handler does "(UHCIAsync *) pkt" cast. @@ -220,8 +220,9 @@ static void uhci_async_cancel(UHCIAsync *async) uhci_async_unlink(async); trace_usb_uhci_packet_cancel(async->queue->token, async->td_addr, async->done); - if (!async->done) + if (!async->done) { usb_cancel_packet(&async->packet); + } uhci_async_free(async); } @@ -322,7 +323,7 @@ static void uhci_reset(DeviceState *dev) s->fl_base_addr = 0; s->sof_timing = 64; - for(i = 0; i < UHCI_PORTS; i++) { + for (i = 0; i < UHCI_PORTS; i++) { port = &s->ports[i]; port->ctrl = 0x0080; if (port->port.dev && port->port.dev->attached) { @@ -387,8 +388,8 @@ static void uhci_port_write(void *opaque, hwaddr addr, trace_usb_uhci_mmio_writew(addr, val); - switch(addr) { - case 0x00: + switch (addr) { + case UHCI_USBCMD: if ((val & UHCI_CMD_RS) && !(s->cmd & UHCI_CMD_RS)) { /* start frame processing */ trace_usb_uhci_schedule_start(); @@ -404,7 +405,7 @@ static void uhci_port_write(void *opaque, hwaddr addr, int i; /* send reset on the USB bus */ - for(i = 0; i < UHCI_PORTS; i++) { + for (i = 0; i < UHCI_PORTS; i++) { port = &s->ports[i]; usb_device_reset(port->port.dev); } @@ -423,34 +424,38 @@ static void uhci_port_write(void *opaque, hwaddr addr, } } break; - case 0x02: + case UHCI_USBSTS: s->status &= ~val; - /* XXX: the chip spec is not coherent, so we add a hidden - register to distinguish between IOC and SPD */ - if (val & UHCI_STS_USBINT) + /* + * XXX: the chip spec is not coherent, so we add a hidden + * register to distinguish between IOC and SPD + */ + if (val & UHCI_STS_USBINT) { s->status2 = 0; + } uhci_update_irq(s); break; - case 0x04: + case UHCI_USBINTR: s->intr = val; uhci_update_irq(s); break; - case 0x06: - if (s->status & UHCI_STS_HCHALTED) + case UHCI_USBFRNUM: + if (s->status & UHCI_STS_HCHALTED) { s->frnum = val & 0x7ff; + } break; - case 0x08: + case UHCI_USBFLBASEADD: s->fl_base_addr &= 0xffff0000; s->fl_base_addr |= val & ~0xfff; break; - case 0x0a: + case UHCI_USBFLBASEADD + 2: s->fl_base_addr &= 0x0000ffff; s->fl_base_addr |= (val << 16); break; - case 0x0c: + case UHCI_USBSOF: s->sof_timing = val & 0xff; break; - case 0x10 ... 0x1f: + case UHCI_USBPORTSC1 ... UHCI_USBPORTSC4: { UHCIPort *port; USBDevice *dev; @@ -464,8 +469,8 @@ static void uhci_port_write(void *opaque, hwaddr addr, dev = port->port.dev; if (dev && dev->attached) { /* port reset */ - if ( (val & UHCI_PORT_RESET) && - !(port->ctrl & UHCI_PORT_RESET) ) { + if ((val & UHCI_PORT_RESET) && + !(port->ctrl & UHCI_PORT_RESET)) { usb_device_reset(dev); } } @@ -487,29 +492,29 @@ static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size) UHCIState *s = opaque; uint32_t val; - switch(addr) { - case 0x00: + switch (addr) { + case UHCI_USBCMD: val = s->cmd; break; - case 0x02: + case UHCI_USBSTS: val = s->status; break; - case 0x04: + case UHCI_USBINTR: val = s->intr; break; - case 0x06: + case UHCI_USBFRNUM: val = s->frnum; break; - case 0x08: + case UHCI_USBFLBASEADD: val = s->fl_base_addr & 0xffff; break; - case 0x0a: + case UHCI_USBFLBASEADD + 2: val = (s->fl_base_addr >> 16) & 0xffff; break; - case 0x0c: + case UHCI_USBSOF: val = s->sof_timing; break; - case 0x10 ... 0x1f: + case UHCI_USBPORTSC1 ... UHCI_USBPORTSC4: { UHCIPort *port; int n; @@ -533,12 +538,13 @@ static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size) } /* signal resume if controller suspended */ -static void uhci_resume (void *opaque) +static void uhci_resume(void *opaque) { UHCIState *s = (UHCIState *)opaque; - if (!s) + if (!s) { return; + } if (s->cmd & UHCI_CMD_EGSM) { s->cmd |= UHCI_CMD_FGR; @@ -674,7 +680,8 @@ static int uhci_handle_td_error(UHCIState *s, UHCI_TD *td, uint32_t td_addr, return ret; } -static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_t *int_mask) +static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, + uint32_t *int_mask) { int len = 0, max_len; uint8_t pid; @@ -682,8 +689,9 @@ static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_ max_len = ((td->token >> 21) + 1) & 0x7ff; pid = td->token & 0xff; - if (td->ctrl & TD_CTRL_IOS) + if (td->ctrl & TD_CTRL_IOS) { td->ctrl &= ~TD_CTRL_ACTIVE; + } if (async->packet.status != USB_RET_SUCCESS) { return uhci_handle_td_error(s, td, async->td_addr, @@ -693,12 +701,15 @@ static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_ len = async->packet.actual_length; td->ctrl = (td->ctrl & ~0x7ff) | ((len - 1) & 0x7ff); - /* The NAK bit may have been set by a previous frame, so clear it - here. The docs are somewhat unclear, but win2k relies on this - behavior. */ + /* + * The NAK bit may have been set by a previous frame, so clear it + * here. The docs are somewhat unclear, but win2k relies on this + * behavior. + */ td->ctrl &= ~(TD_CTRL_ACTIVE | TD_CTRL_NAK); - if (td->ctrl & TD_CTRL_IOC) + if (td->ctrl & TD_CTRL_IOC) { *int_mask |= 0x01; + } if (pid == USB_TOKEN_IN) { pci_dma_write(&s->dev, td->buffer, async->buf, len); @@ -780,9 +791,11 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, if (async) { if (queuing) { - /* we are busy filling the queue, we are not prepared - to consume completed packages then, just leave them - in async state */ + /* + * we are busy filling the queue, we are not prepared + * to consume completed packages then, just leave them + * in async state + */ return TD_RESULT_ASYNC_CONT; } if (!async->done) { @@ -832,7 +845,7 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, } usb_packet_addbuf(&async->packet, async->buf, max_len); - switch(pid) { + switch (pid) { case USB_TOKEN_OUT: case USB_TOKEN_SETUP: pci_dma_read(&s->dev, td->buffer, async->buf, max_len); @@ -911,12 +924,15 @@ static void qhdb_reset(QhDb *db) static int qhdb_insert(QhDb *db, uint32_t addr) { int i; - for (i = 0; i < db->count; i++) - if (db->addr[i] == addr) + for (i = 0; i < db->count; i++) { + if (db->addr[i] == addr) { return 1; + } + } - if (db->count >= UHCI_MAX_QUEUES) + if (db->count >= UHCI_MAX_QUEUES) { return 1; + } db->addr[db->count++] = addr; return 0; @@ -970,8 +986,10 @@ static void uhci_process_frame(UHCIState *s) for (cnt = FRAME_MAX_LOOPS; is_valid(link) && cnt; cnt--) { if (!s->completions_only && s->frame_bytes >= s->frame_bandwidth) { - /* We've reached the usb 1.1 bandwidth, which is - 1280 bytes/frame, stop processing */ + /* + * We've reached the usb 1.1 bandwidth, which is + * 1280 bytes/frame, stop processing + */ trace_usb_uhci_frame_stop_bandwidth(); break; } @@ -1120,8 +1138,10 @@ static void uhci_frame_timer(void *opaque) uhci_async_validate_begin(s); uhci_process_frame(s); uhci_async_validate_end(s); - /* The spec says frnum is the frame currently being processed, and - * the guest must look at frnum - 1 on interrupt, so inc frnum now */ + /* + * The spec says frnum is the frame currently being processed, and + * the guest must look at frnum - 1 on interrupt, so inc frnum now + */ s->frnum = (s->frnum + 1) & 0x7ff; s->expire_time += frame_t; } @@ -1174,7 +1194,7 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) if (s->masterbus) { USBPort *ports[UHCI_PORTS]; - for(i = 0; i < UHCI_PORTS; i++) { + for (i = 0; i < UHCI_PORTS; i++) { ports[i] = &s->ports[i].port; } usb_register_companion(s->masterbus, ports, UHCI_PORTS, @@ -1200,8 +1220,10 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) memory_region_init_io(&s->io_bar, OBJECT(s), &uhci_ioport_ops, s, "uhci", 0x20); - /* Use region 4 for consistency with real hardware. BSD guests seem - to rely on this. */ + /* + * Use region 4 for consistency with real hardware. BSD guests seem + * to rely on this. + */ pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); } @@ -1227,27 +1249,25 @@ static void usb_uhci_exit(PCIDevice *dev) } } -static Property uhci_properties_companion[] = { +static const Property uhci_properties_companion[] = { DEFINE_PROP_STRING("masterbus", UHCIState, masterbus), DEFINE_PROP_UINT32("firstport", UHCIState, firstport, 0), DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280), DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128), - DEFINE_PROP_END_OF_LIST(), }; -static Property uhci_properties_standalone[] = { +static const Property uhci_properties_standalone[] = { DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280), DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128), - DEFINE_PROP_END_OF_LIST(), }; -static void uhci_class_init(ObjectClass *klass, void *data) +static void uhci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->class_id = PCI_CLASS_SERIAL_USB; dc->vmsd = &vmstate_uhci; - dc->reset = uhci_reset; + device_class_set_legacy_reset(dc, uhci_reset); set_bit(DEVICE_CATEGORY_USB, dc->categories); } @@ -1258,18 +1278,18 @@ static const TypeInfo uhci_pci_type_info = { .class_size = sizeof(UHCIPCIDeviceClass), .abstract = true, .class_init = uhci_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, }; -void uhci_data_class_init(ObjectClass *klass, void *data) +void uhci_data_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); UHCIPCIDeviceClass *u = UHCI_CLASS(klass); - UHCIInfo *info = data; + const UHCIInfo *info = data; k->realize = info->realize ? info->realize : usb_uhci_common_realize; k->exit = info->unplug ? usb_uhci_exit : NULL; @@ -1362,7 +1382,7 @@ static void uhci_register_types(void) for (i = 0; i < ARRAY_SIZE(uhci_info); i++) { uhci_type_info.name = uhci_info[i].name; uhci_type_info.class_data = uhci_info + i; - type_register(&uhci_type_info); + type_register_static(&uhci_type_info); } } diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index 6d26b94e929..e0a6525505c 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -28,7 +28,7 @@ #ifndef HW_USB_HCD_UHCI_H #define HW_USB_HCD_UHCI_H -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/timer.h" #include "hw/pci/pci_device.h" #include "hw/usb.h" @@ -88,7 +88,7 @@ typedef struct UHCIInfo { bool notuser; /* disallow user_creatable */ } UHCIInfo; -void uhci_data_class_init(ObjectClass *klass, void *data); +void uhci_data_class_init(ObjectClass *klass, const void *data); void usb_uhci_common_realize(PCIDevice *dev, Error **errp); #define TYPE_PIIX3_USB_UHCI "piix3-usb-uhci" diff --git a/hw/usb/hcd-xhci-nec.c b/hw/usb/hcd-xhci-nec.c index 0c063b3697d..9e0fea26f40 100644 --- a/hw/usb/hcd-xhci-nec.c +++ b/hw/usb/hcd-xhci-nec.c @@ -30,20 +30,15 @@ OBJECT_DECLARE_SIMPLE_TYPE(XHCINecState, NEC_XHCI) struct XHCINecState { - /*< private >*/ XHCIPciState parent_obj; - /*< public >*/ - uint32_t flags; + uint32_t intrs; uint32_t slots; }; -static Property nec_xhci_properties[] = { - DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO), - DEFINE_PROP_ON_OFF_AUTO("msix", XHCIPciState, msix, ON_OFF_AUTO_AUTO), +static const Property nec_xhci_properties[] = { DEFINE_PROP_UINT32("intrs", XHCINecState, intrs, XHCI_MAXINTRS), DEFINE_PROP_UINT32("slots", XHCINecState, slots, XHCI_MAXSLOTS), - DEFINE_PROP_END_OF_LIST(), }; static void nec_xhci_instance_init(Object *obj) @@ -51,12 +46,11 @@ static void nec_xhci_instance_init(Object *obj) XHCIPciState *pci = XHCI_PCI(obj); XHCINecState *nec = NEC_XHCI(obj); - pci->xhci.flags = nec->flags; pci->xhci.numintrs = nec->intrs; pci->xhci.numslots = nec->slots; } -static void nec_xhci_class_init(ObjectClass *klass, void *data) +static void nec_xhci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c index 264d7ebb776..b93c80b09d8 100644 --- a/hw/usb/hcd-xhci-pci.c +++ b/hw/usb/hcd-xhci-pci.c @@ -74,6 +74,7 @@ static bool xhci_pci_intr_raise(XHCIState *xhci, int n, bool level) } if (msi_enabled(pci_dev) && level) { + n %= msi_nr_vectors_allocated(pci_dev); msi_notify(pci_dev, n); return true; } @@ -81,6 +82,21 @@ static bool xhci_pci_intr_raise(XHCIState *xhci, int n, bool level) return false; } +static bool xhci_pci_intr_mapping_conditional(XHCIState *xhci) +{ + XHCIPciState *s = container_of(xhci, XHCIPciState, xhci); + PCIDevice *pci_dev = PCI_DEVICE(s); + + /* + * Implementation of the "conditional-intr-mapping" property, which only + * enables interrupter mapping if MSI or MSI-X is available and active. + * Forces all events onto interrupter/event ring 0 in pin-based IRQ mode. + * Provides compatibility with macOS guests on machine types where MSI(-X) + * is not available. + */ + return msix_enabled(pci_dev) || msi_enabled(pci_dev); +} + static void xhci_pci_reset(DeviceState *dev) { XHCIPciState *s = XHCI_PCI(dev); @@ -94,7 +110,7 @@ static int xhci_pci_vmstate_post_load(void *opaque, int version_id) PCIDevice *pci_dev = PCI_DEVICE(s); int intr; - for (intr = 0; intr < s->xhci.numintrs; intr++) { + for (intr = 0; intr < s->xhci.numintrs; intr++) { if (s->xhci.intr[intr].msix_used) { msix_vector_use(pci_dev, intr); } else { @@ -118,6 +134,9 @@ static void usb_xhci_pci_realize(struct PCIDevice *dev, Error **errp) object_property_set_link(OBJECT(&s->xhci), "host", OBJECT(s), NULL); s->xhci.intr_update = xhci_pci_intr_update; s->xhci.intr_raise = xhci_pci_intr_raise; + if (s->conditional_intr_mapping) { + s->xhci.intr_mapping_supported = xhci_pci_intr_mapping_conditional; + } if (!qdev_realize(DEVICE(&s->xhci), NULL, errp)) { return; } @@ -197,17 +216,29 @@ static void xhci_instance_init(Object *obj) qdev_alias_all_properties(DEVICE(&s->xhci), obj); } -static void xhci_class_init(ObjectClass *klass, void *data) +static const Property xhci_pci_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", XHCIPciState, msix, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BOOL("conditional-intr-mapping", XHCIPciState, + conditional_intr_mapping, false), +}; + +static void xhci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xhci_pci_reset; + device_class_set_legacy_reset(dc, xhci_pci_reset); dc->vmsd = &vmstate_xhci_pci; set_bit(DEVICE_CATEGORY_USB, dc->categories); k->realize = usb_xhci_pci_realize; k->exit = usb_xhci_pci_exit; k->class_id = PCI_CLASS_SERIAL_USB; + device_class_set_props(dc, xhci_pci_properties); + object_class_property_set_description(klass, "conditional-intr-mapping", + "When true, disables interrupter mapping for pin-based IRQ mode. " + "Intended to be used with guest drivers with questionable behaviour, " + "such as macOS's."); } static const TypeInfo xhci_pci_info = { @@ -217,14 +248,14 @@ static const TypeInfo xhci_pci_info = { .class_init = xhci_class_init, .instance_init = xhci_instance_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } }, }; -static void qemu_xhci_class_init(ObjectClass *klass, void *data) +static void qemu_xhci_class_init(ObjectClass *klass, const void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); diff --git a/hw/usb/hcd-xhci-pci.h b/hw/usb/hcd-xhci-pci.h index 08f70ce97cc..5b61ae84555 100644 --- a/hw/usb/hcd-xhci-pci.h +++ b/hw/usb/hcd-xhci-pci.h @@ -40,6 +40,7 @@ typedef struct XHCIPciState { XHCIState xhci; OnOffAuto msi; OnOffAuto msix; + bool conditional_intr_mapping; } XHCIPciState; #endif diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c index d93bae31f93..244698e5f2b 100644 --- a/hw/usb/hcd-xhci-sysbus.c +++ b/hw/usb/hcd-xhci-sysbus.c @@ -82,10 +82,9 @@ void xhci_sysbus_build_aml(Aml *scope, uint32_t mmio, unsigned int irq) aml_append(scope, dev); } -static Property xhci_sysbus_props[] = { +static const Property xhci_sysbus_props[] = { DEFINE_PROP_UINT32("intrs", XHCISysbusState, xhci.numintrs, XHCI_MAXINTRS), DEFINE_PROP_UINT32("slots", XHCISysbusState, xhci.numslots, XHCI_MAXSLOTS), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_xhci_sysbus = { @@ -97,11 +96,11 @@ static const VMStateDescription vmstate_xhci_sysbus = { } }; -static void xhci_sysbus_class_init(ObjectClass *klass, void *data) +static void xhci_sysbus_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = xhci_sysbus_reset; + device_class_set_legacy_reset(dc, xhci_sysbus_reset); dc->realize = xhci_sysbus_realize; dc->vmsd = &vmstate_xhci_sysbus; device_class_set_props(dc, xhci_sysbus_props); diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index b6411f0bdac..292c378bfc9 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -644,6 +644,11 @@ static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v) dma_addr_t erdp; unsigned int dp_idx; + if (xhci->numintrs == 1 || + (xhci->intr_mapping_supported && !xhci->intr_mapping_supported(xhci))) { + v = 0; + } + if (v >= xhci->numintrs) { DPRINTF("intr nr out of range (%d >= %d)\n", v, xhci->numintrs); return; @@ -1182,6 +1187,12 @@ static void xhci_ep_free_xfer(XHCITransfer *xfer) g_free(xfer); } +static void xhci_xfer_unmap(XHCITransfer *xfer) +{ + usb_packet_unmap(&xfer->packet, &xfer->sgl); + qemu_sglist_destroy(&xfer->sgl); +} + static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report) { int killed = 0; @@ -1193,6 +1204,7 @@ static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report) if (t->running_async) { usb_cancel_packet(&t->packet); + xhci_xfer_unmap(t); t->running_async = 0; killed = 1; } @@ -1475,12 +1487,6 @@ static int xhci_xfer_create_sgl(XHCITransfer *xfer, int in_xfer) return -1; } -static void xhci_xfer_unmap(XHCITransfer *xfer) -{ - usb_packet_unmap(&xfer->packet, &xfer->sgl); - qemu_sglist_destroy(&xfer->sgl); -} - static void xhci_xfer_report(XHCITransfer *xfer) { uint32_t edtla = 0; @@ -2810,9 +2816,15 @@ static uint64_t xhci_port_read(void *ptr, hwaddr reg, unsigned size) case 0x08: /* PORTLI */ ret = 0; break; - case 0x0c: /* reserved */ + case 0x0c: /* PORTHLPMC */ + ret = 0; + qemu_log_mask(LOG_UNIMP, "%s: read from port register PORTHLPMC", + __func__); + break; default: - trace_usb_xhci_unimplemented("port read", reg); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from port offset 0x%" HWADDR_PRIx, + __func__, reg); ret = 0; } @@ -2881,9 +2893,22 @@ static void xhci_port_write(void *ptr, hwaddr reg, } break; case 0x04: /* PORTPMSC */ + case 0x0c: /* PORTHLPMC */ + qemu_log_mask(LOG_UNIMP, + "%s: write 0x%" PRIx64 + " (%u bytes) to port register at offset 0x%" HWADDR_PRIx, + __func__, val, size, reg); + break; case 0x08: /* PORTLI */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: Write to read-only PORTLI register", + __func__); + break; default: - trace_usb_xhci_unimplemented("port write", reg); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write 0x%" PRIx64 " (%u bytes) to unknown port " + "register at offset 0x%" HWADDR_PRIx, + __func__, val, size, reg); + break; } } @@ -3605,23 +3630,22 @@ const VMStateDescription vmstate_xhci = { } }; -static Property xhci_properties[] = { +static const Property xhci_properties[] = { DEFINE_PROP_BIT("streams", XHCIState, flags, XHCI_FLAG_ENABLE_STREAMS, true), DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4), DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4), DEFINE_PROP_LINK("host", XHCIState, hostOpaque, TYPE_DEVICE, DeviceState *), - DEFINE_PROP_END_OF_LIST(), }; -static void xhci_class_init(ObjectClass *klass, void *data) +static void xhci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = usb_xhci_realize; dc->unrealize = usb_xhci_unrealize; - dc->reset = xhci_reset; + device_class_set_legacy_reset(dc, xhci_reset); device_class_set_props(dc, xhci_properties); dc->user_creatable = false; } diff --git a/hw/usb/hcd-xhci.h b/hw/usb/hcd-xhci.h index fe16d7ad055..9c3974f1489 100644 --- a/hw/usb/hcd-xhci.h +++ b/hw/usb/hcd-xhci.h @@ -25,7 +25,7 @@ #include "hw/usb.h" #include "hw/usb/xhci.h" -#include "sysemu/dma.h" +#include "system/dma.h" OBJECT_DECLARE_SIMPLE_TYPE(XHCIState, XHCI) @@ -193,6 +193,11 @@ typedef struct XHCIState { uint32_t max_pstreams_mask; void (*intr_update)(XHCIState *s, int n, bool enable); bool (*intr_raise)(XHCIState *s, int n, bool level); + /* + * Callback for special-casing interrupter mapping support. NULL for most + * implementations, for defaulting to enabled mapping unless numintrs == 1. + */ + bool (*intr_mapping_supported)(XHCIState *s); DeviceState *hostOpaque; /* Operational Registers */ diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index 691bc881fbc..b74670ae256 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -51,8 +51,8 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "trace.h" #include "hw/qdev-properties.h" @@ -1758,7 +1758,7 @@ static const VMStateDescription vmstate_usb_host = { } }; -static Property usb_host_dev_properties[] = { +static const Property usb_host_dev_properties[] = { DEFINE_PROP_UINT32("hostbus", USBHostDevice, match.bus_num, 0), DEFINE_PROP_UINT32("hostaddr", USBHostDevice, match.addr, 0), DEFINE_PROP_STRING("hostport", USBHostDevice, match.port), @@ -1779,10 +1779,9 @@ static Property usb_host_dev_properties[] = { USB_HOST_OPT_PIPELINE, true), DEFINE_PROP_BOOL("suppress-remote-wake", USBHostDevice, suppress_remote_wake, true), - DEFINE_PROP_END_OF_LIST(), }; -static void usb_host_class_initfn(ObjectClass *klass, void *data) +static void usb_host_class_initfn(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c index 18917d7599e..c25566d0fae 100644 --- a/hw/usb/imx-usb-phy.c +++ b/hw/usb/imx-usb-phy.c @@ -214,11 +214,11 @@ static void imx_usbphy_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); } -static void imx_usbphy_class_init(ObjectClass *klass, void *data) +static void imx_usbphy_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->reset = imx_usbphy_reset; + device_class_set_legacy_reset(dc, imx_usbphy_reset); dc->vmsd = &vmstate_imx_usbphy; dc->desc = "i.MX USB PHY Module"; dc->realize = imx_usbphy_realize; diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c index f350eae443d..4f03ef4ba9a 100644 --- a/hw/usb/libhw.c +++ b/hw/usb/libhw.c @@ -21,7 +21,7 @@ */ #include "qemu/osdep.h" #include "hw/usb.h" -#include "sysemu/dma.h" +#include "system/dma.h" int usb_packet_map(USBPacket *p, QEMUSGList *sgl) { diff --git a/hw/usb/meson.build b/hw/usb/meson.build index d7de1003e3d..17360a5b5a4 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -23,12 +23,10 @@ system_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c')) system_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c')) system_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c')) system_ss.add(when: 'CONFIG_USB_XHCI_NEC', if_true: files('hcd-xhci-nec.c')) -system_ss.add(when: 'CONFIG_USB_MUSB', if_true: files('hcd-musb.c')) system_ss.add(when: 'CONFIG_USB_DWC2', if_true: files('hcd-dwc2.c')) system_ss.add(when: 'CONFIG_USB_DWC3', if_true: files('hcd-dwc3.c')) +system_ss.add(when: 'CONFIG_USB_CHIPIDEA', if_true: files('chipidea.c')) -system_ss.add(when: 'CONFIG_TUSB6010', if_true: files('tusb6010.c')) -system_ss.add(when: 'CONFIG_IMX', if_true: files('chipidea.c')) system_ss.add(when: 'CONFIG_IMX_USBPHY', if_true: files('imx-usb-phy.c')) system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686-uhci-pci.c')) system_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-usb2-ctrl-regs.c')) diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index 0f2dd2e5040..f516ff42a11 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -30,8 +30,8 @@ #include "qemu/units.h" #include "qapi/error.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" #include "qemu/iov.h" @@ -2573,17 +2573,16 @@ static const VMStateDescription usbredir_vmstate = { } }; -static Property usbredir_properties[] = { +static const Property usbredir_properties[] = { DEFINE_PROP_CHR("chardev", USBRedirDevice, cs), DEFINE_PROP_UINT8("debug", USBRedirDevice, debug, usbredirparser_warning), DEFINE_PROP_STRING("filter", USBRedirDevice, filter_str), DEFINE_PROP_BOOL("streams", USBRedirDevice, enable_streams, true), DEFINE_PROP_BOOL("suppress-remote-wake", USBRedirDevice, suppress_remote_wake, true), - DEFINE_PROP_END_OF_LIST(), }; -static void usbredir_class_initfn(ObjectClass *klass, void *data) +static void usbredir_class_initfn(ObjectClass *klass, const void *data) { USBDeviceClass *uc = USB_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/usb/tusb6010.c b/hw/usb/tusb6010.c deleted file mode 100644 index 1dd4071e683..00000000000 --- a/hw/usb/tusb6010.c +++ /dev/null @@ -1,850 +0,0 @@ -/* - * Texas Instruments TUSB6010 emulation. - * Based on reverse-engineering of a linux driver. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/module.h" -#include "qemu/timer.h" -#include "hw/usb.h" -#include "hw/usb/hcd-musb.h" -#include "hw/arm/omap.h" -#include "hw/hw.h" -#include "hw/irq.h" -#include "hw/sysbus.h" -#include "qom/object.h" - -#define TYPE_TUSB6010 "tusb6010" -OBJECT_DECLARE_SIMPLE_TYPE(TUSBState, TUSB6010) - -struct TUSBState { - SysBusDevice parent_obj; - - MemoryRegion iomem[2]; - qemu_irq irq; - MUSBState *musb; - QEMUTimer *otg_timer; - QEMUTimer *pwr_timer; - - int power; - uint32_t scratch; - uint16_t test_reset; - uint32_t prcm_config; - uint32_t prcm_mngmt; - uint16_t otg_status; - uint32_t dev_config; - int host_mode; - uint32_t intr; - uint32_t intr_ok; - uint32_t mask; - uint32_t usbip_intr; - uint32_t usbip_mask; - uint32_t gpio_intr; - uint32_t gpio_mask; - uint32_t gpio_config; - uint32_t dma_intr; - uint32_t dma_mask; - uint32_t dma_map; - uint32_t dma_config; - uint32_t ep0_config; - uint32_t rx_config[15]; - uint32_t tx_config[15]; - uint32_t wkup_mask; - uint32_t pullup[2]; - uint32_t control_config; - uint32_t otg_timer_val; -}; - -#define TUSB_DEVCLOCK 60000000 /* 60 MHz */ - -#define TUSB_VLYNQ_CTRL 0x004 - -/* Mentor Graphics OTG core registers. */ -#define TUSB_BASE_OFFSET 0x400 - -/* FIFO registers, 32-bit. */ -#define TUSB_FIFO_BASE 0x600 - -/* Device System & Control registers, 32-bit. */ -#define TUSB_SYS_REG_BASE 0x800 - -#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) -#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) -#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) -#define TUSB_DEV_CONF_SOFT_ID (1 << 1) -#define TUSB_DEV_CONF_ID_SEL (1 << 0) - -#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) -#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) -#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) -#define TUSB_PHY_OTG_CTRL_O_ID_PULLUP (1 << 23) -#define TUSB_PHY_OTG_CTRL_O_VBUS_DET_EN (1 << 19) -#define TUSB_PHY_OTG_CTRL_O_SESS_END_EN (1 << 18) -#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) -#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) -#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) -#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) -#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) -#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) -#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) -#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) -#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) -#define TUSB_PHY_OTG_CTRL_PHYREF_CLK(v) (((v) & 3) << 7) -#define TUSB_PHY_OTG_CTRL_PD (1 << 6) -#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) -#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) -#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) -#define TUSB_PHY_OTG_CTRL_RESET (1 << 2) -#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) -#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) - -/* OTG status register */ -#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) -#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) -#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) -#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) -#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) -#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) -#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) -#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) -#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) -#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) -#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) - -#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) -#define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) -#define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) -#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) - -/* PRCM configuration register */ -#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) -#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) -#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) - -/* PRCM management register */ -#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) -#define TUSB_PRCM_MNGMT_SRP_FIX_TMR(v) (((v) & 0xf) << 25) -#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) -#define TUSB_PRCM_MNGMT_VBUS_VAL_TMR(v) (((v) & 0xf) << 20) -#define TUSB_PRCM_MNGMT_VBUS_VAL_FLT_EN (1 << 19) -#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) -#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) -#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) -#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) -#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) -#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) -#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) -#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) -#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) -#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) - -/* Wake-up source clear and mask registers */ -#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) -#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) -#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) -#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) -#define TUSB_PRCM_WGPIO_7 (1 << 12) -#define TUSB_PRCM_WGPIO_6 (1 << 11) -#define TUSB_PRCM_WGPIO_5 (1 << 10) -#define TUSB_PRCM_WGPIO_4 (1 << 9) -#define TUSB_PRCM_WGPIO_3 (1 << 8) -#define TUSB_PRCM_WGPIO_2 (1 << 7) -#define TUSB_PRCM_WGPIO_1 (1 << 6) -#define TUSB_PRCM_WGPIO_0 (1 << 5) -#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ -#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ -#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ -#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ -#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ - -#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) -#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) -#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) -#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) -#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) -#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) -#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) -#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) -#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) -#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) -#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) -#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) -#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) -#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) -#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) -#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) - -/* NOR flash interrupt source registers */ -#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) -#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) -#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) -#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) -#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) -#define TUSB_INT_SRC_USB_IP_CORE (1 << 17) -#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) -#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) -#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) -#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) -#define TUSB_INT_SRC_DEV_READY (1 << 12) -#define TUSB_INT_SRC_USB_IP_TX (1 << 9) -#define TUSB_INT_SRC_USB_IP_RX (1 << 8) -#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) -#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) -#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) -#define TUSB_INT_SRC_USB_IP_CONN (1 << 4) -#define TUSB_INT_SRC_USB_IP_SOF (1 << 3) -#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) -#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) -#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) - -#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) -#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) -#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) -#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) -#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) -#define TUSB_EP_IN_SIZE (TUSB_SYS_REG_BASE + 0x10c) -#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) -#define TUSB_EP_OUT_SIZE (TUSB_SYS_REG_BASE + 0x14c) -#define TUSB_EP_MAX_PACKET_SIZE_OFFSET (TUSB_SYS_REG_BASE + 0x188) -#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) -#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) -#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) - -#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) -#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) - -/* Device System & Control register bitfields */ -#define TUSB_INT_CTRL_CONF_INT_RLCYC(v) (((v) & 0x7) << 18) -#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) -#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) -#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) -#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) -#define TUSB_DMA_REQ_CONF_DMA_RQ_EN(v) (((v) & 0x3f) << 20) -#define TUSB_DMA_REQ_CONF_DMA_RQ_ASR(v) (((v) & 0xf) << 16) -#define TUSB_EP0_CONFIG_SW_EN (1 << 8) -#define TUSB_EP0_CONFIG_DIR_TX (1 << 7) -#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) -#define TUSB_EP_CONFIG_SW_EN (1 << 31) -#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) -#define TUSB_PROD_TEST_RESET_VAL 0xa596 - -static void tusb_intr_update(TUSBState *s) -{ - if (s->control_config & TUSB_INT_CTRL_CONF_INT_POLARITY) - qemu_set_irq(s->irq, s->intr & ~s->mask & s->intr_ok); - else - qemu_set_irq(s->irq, (!(s->intr & ~s->mask)) & s->intr_ok); -} - -static void tusb_usbip_intr_update(TUSBState *s) -{ - /* TX interrupt in the MUSB */ - if (s->usbip_intr & 0x0000ffff & ~s->usbip_mask) - s->intr |= TUSB_INT_SRC_USB_IP_TX; - else - s->intr &= ~TUSB_INT_SRC_USB_IP_TX; - - /* RX interrupt in the MUSB */ - if (s->usbip_intr & 0xffff0000 & ~s->usbip_mask) - s->intr |= TUSB_INT_SRC_USB_IP_RX; - else - s->intr &= ~TUSB_INT_SRC_USB_IP_RX; - - /* XXX: What about TUSB_INT_SRC_USB_IP_CORE? */ - - tusb_intr_update(s); -} - -static void tusb_dma_intr_update(TUSBState *s) -{ - if (s->dma_intr & ~s->dma_mask) - s->intr |= TUSB_INT_SRC_TXRX_DMA_DONE; - else - s->intr &= ~TUSB_INT_SRC_TXRX_DMA_DONE; - - tusb_intr_update(s); -} - -static void tusb_gpio_intr_update(TUSBState *s) -{ - /* TODO: How is this signalled? */ -} - -static uint32_t tusb_async_readb(void *opaque, hwaddr addr) -{ - TUSBState *s = (TUSBState *) opaque; - - switch (addr & 0xfff) { - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - return musb_read[0](s->musb, addr & 0x1ff); - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - return musb_read[0](s->musb, 0x20 + ((addr >> 3) & 0x3c)); - } - - printf("%s: unknown register at %03x\n", - __func__, (int) (addr & 0xfff)); - return 0; -} - -static uint32_t tusb_async_readh(void *opaque, hwaddr addr) -{ - TUSBState *s = (TUSBState *) opaque; - - switch (addr & 0xfff) { - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - return musb_read[1](s->musb, addr & 0x1ff); - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - return musb_read[1](s->musb, 0x20 + ((addr >> 3) & 0x3c)); - } - - printf("%s: unknown register at %03x\n", - __func__, (int) (addr & 0xfff)); - return 0; -} - -static uint32_t tusb_async_readw(void *opaque, hwaddr addr) -{ - TUSBState *s = (TUSBState *) opaque; - int offset = addr & 0xfff; - int epnum; - uint32_t ret; - - switch (offset) { - case TUSB_DEV_CONF: - return s->dev_config; - - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - return musb_read[2](s->musb, offset & 0x1ff); - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - return musb_read[2](s->musb, 0x20 + ((addr >> 3) & 0x3c)); - - case TUSB_PHY_OTG_CTRL_ENABLE: - case TUSB_PHY_OTG_CTRL: - return 0x00; /* TODO */ - - case TUSB_DEV_OTG_STAT: - ret = s->otg_status; -#if 0 - if (!(s->prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) - ret &= ~TUSB_DEV_OTG_STAT_VBUS_VALID; -#endif - return ret; - case TUSB_DEV_OTG_TIMER: - return s->otg_timer_val; - - case TUSB_PRCM_REV: - return 0x20; - case TUSB_PRCM_CONF: - return s->prcm_config; - case TUSB_PRCM_MNGMT: - return s->prcm_mngmt; - case TUSB_PRCM_WAKEUP_SOURCE: - case TUSB_PRCM_WAKEUP_CLEAR: /* TODO: What does this one return? */ - return 0x00000000; - case TUSB_PRCM_WAKEUP_MASK: - return s->wkup_mask; - - case TUSB_PULLUP_1_CTRL: - return s->pullup[0]; - case TUSB_PULLUP_2_CTRL: - return s->pullup[1]; - - case TUSB_INT_CTRL_REV: - return 0x20; - case TUSB_INT_CTRL_CONF: - return s->control_config; - - case TUSB_USBIP_INT_SRC: - case TUSB_USBIP_INT_SET: /* TODO: What do these two return? */ - case TUSB_USBIP_INT_CLEAR: - return s->usbip_intr; - case TUSB_USBIP_INT_MASK: - return s->usbip_mask; - - case TUSB_DMA_INT_SRC: - case TUSB_DMA_INT_SET: /* TODO: What do these two return? */ - case TUSB_DMA_INT_CLEAR: - return s->dma_intr; - case TUSB_DMA_INT_MASK: - return s->dma_mask; - - case TUSB_GPIO_INT_SRC: /* TODO: What do these two return? */ - case TUSB_GPIO_INT_SET: - case TUSB_GPIO_INT_CLEAR: - return s->gpio_intr; - case TUSB_GPIO_INT_MASK: - return s->gpio_mask; - - case TUSB_INT_SRC: - case TUSB_INT_SRC_SET: /* TODO: What do these two return? */ - case TUSB_INT_SRC_CLEAR: - return s->intr; - case TUSB_INT_MASK: - return s->mask; - - case TUSB_GPIO_REV: - return 0x30; - case TUSB_GPIO_CONF: - return s->gpio_config; - - case TUSB_DMA_CTRL_REV: - return 0x30; - case TUSB_DMA_REQ_CONF: - return s->dma_config; - case TUSB_EP0_CONF: - return s->ep0_config; - case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b): - epnum = (offset - TUSB_EP_IN_SIZE) >> 2; - return s->tx_config[epnum]; - case TUSB_DMA_EP_MAP: - return s->dma_map; - case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b): - epnum = (offset - TUSB_EP_OUT_SIZE) >> 2; - return s->rx_config[epnum]; - case TUSB_EP_MAX_PACKET_SIZE_OFFSET ... - (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b): - return 0x00000000; /* TODO */ - case TUSB_WAIT_COUNT: - return 0x00; /* TODO */ - - case TUSB_SCRATCH_PAD: - return s->scratch; - - case TUSB_PROD_TEST_RESET: - return s->test_reset; - - /* DIE IDs */ - case TUSB_DIDR1_LO: - return 0xa9453c59; - case TUSB_DIDR1_HI: - return 0x54059adf; - } - - printf("%s: unknown register at %03x\n", __func__, offset); - return 0; -} - -static void tusb_async_writeb(void *opaque, hwaddr addr, - uint32_t value) -{ - TUSBState *s = (TUSBState *) opaque; - - switch (addr & 0xfff) { - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - musb_write[0](s->musb, addr & 0x1ff, value); - break; - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - musb_write[0](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); - break; - - default: - printf("%s: unknown register at %03x\n", - __func__, (int) (addr & 0xfff)); - return; - } -} - -static void tusb_async_writeh(void *opaque, hwaddr addr, - uint32_t value) -{ - TUSBState *s = (TUSBState *) opaque; - - switch (addr & 0xfff) { - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - musb_write[1](s->musb, addr & 0x1ff, value); - break; - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - musb_write[1](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); - break; - - default: - printf("%s: unknown register at %03x\n", - __func__, (int) (addr & 0xfff)); - return; - } -} - -static void tusb_async_writew(void *opaque, hwaddr addr, - uint32_t value) -{ - TUSBState *s = (TUSBState *) opaque; - int offset = addr & 0xfff; - int epnum; - - switch (offset) { - case TUSB_VLYNQ_CTRL: - break; - - case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): - musb_write[2](s->musb, offset & 0x1ff, value); - break; - - case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): - musb_write[2](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); - break; - - case TUSB_DEV_CONF: - s->dev_config = value; - s->host_mode = (value & TUSB_DEV_CONF_USB_HOST_MODE); - if (value & TUSB_DEV_CONF_PROD_TEST_MODE) - hw_error("%s: Product Test mode not allowed\n", __func__); - break; - - case TUSB_PHY_OTG_CTRL_ENABLE: - case TUSB_PHY_OTG_CTRL: - return; /* TODO */ - case TUSB_DEV_OTG_TIMER: - s->otg_timer_val = value; - if (value & TUSB_DEV_OTG_TIMER_ENABLE) - timer_mod(s->otg_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - muldiv64(TUSB_DEV_OTG_TIMER_VAL(value), - NANOSECONDS_PER_SECOND, TUSB_DEVCLOCK)); - else - timer_del(s->otg_timer); - break; - - case TUSB_PRCM_CONF: - s->prcm_config = value; - break; - case TUSB_PRCM_MNGMT: - s->prcm_mngmt = value; - break; - case TUSB_PRCM_WAKEUP_CLEAR: - break; - case TUSB_PRCM_WAKEUP_MASK: - s->wkup_mask = value; - break; - - case TUSB_PULLUP_1_CTRL: - s->pullup[0] = value; - break; - case TUSB_PULLUP_2_CTRL: - s->pullup[1] = value; - break; - case TUSB_INT_CTRL_CONF: - s->control_config = value; - tusb_intr_update(s); - break; - - case TUSB_USBIP_INT_SET: - s->usbip_intr |= value; - tusb_usbip_intr_update(s); - break; - case TUSB_USBIP_INT_CLEAR: - s->usbip_intr &= ~value; - tusb_usbip_intr_update(s); - musb_core_intr_clear(s->musb, ~value); - break; - case TUSB_USBIP_INT_MASK: - s->usbip_mask = value; - tusb_usbip_intr_update(s); - break; - - case TUSB_DMA_INT_SET: - s->dma_intr |= value; - tusb_dma_intr_update(s); - break; - case TUSB_DMA_INT_CLEAR: - s->dma_intr &= ~value; - tusb_dma_intr_update(s); - break; - case TUSB_DMA_INT_MASK: - s->dma_mask = value; - tusb_dma_intr_update(s); - break; - - case TUSB_GPIO_INT_SET: - s->gpio_intr |= value; - tusb_gpio_intr_update(s); - break; - case TUSB_GPIO_INT_CLEAR: - s->gpio_intr &= ~value; - tusb_gpio_intr_update(s); - break; - case TUSB_GPIO_INT_MASK: - s->gpio_mask = value; - tusb_gpio_intr_update(s); - break; - - case TUSB_INT_SRC_SET: - s->intr |= value; - tusb_intr_update(s); - break; - case TUSB_INT_SRC_CLEAR: - s->intr &= ~value; - tusb_intr_update(s); - break; - case TUSB_INT_MASK: - s->mask = value; - tusb_intr_update(s); - break; - - case TUSB_GPIO_CONF: - s->gpio_config = value; - break; - case TUSB_DMA_REQ_CONF: - s->dma_config = value; - break; - case TUSB_EP0_CONF: - s->ep0_config = value & 0x1ff; - musb_set_size(s->musb, 0, TUSB_EP0_CONFIG_XFR_SIZE(value), - value & TUSB_EP0_CONFIG_DIR_TX); - break; - case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b): - epnum = (offset - TUSB_EP_IN_SIZE) >> 2; - s->tx_config[epnum] = value; - musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 1); - break; - case TUSB_DMA_EP_MAP: - s->dma_map = value; - break; - case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b): - epnum = (offset - TUSB_EP_OUT_SIZE) >> 2; - s->rx_config[epnum] = value; - musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 0); - break; - case TUSB_EP_MAX_PACKET_SIZE_OFFSET ... - (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b): - return; /* TODO */ - case TUSB_WAIT_COUNT: - return; /* TODO */ - - case TUSB_SCRATCH_PAD: - s->scratch = value; - break; - - case TUSB_PROD_TEST_RESET: - s->test_reset = value; - break; - - default: - printf("%s: unknown register at %03x\n", __func__, offset); - return; - } -} - -static uint64_t tusb_async_readfn(void *opaque, hwaddr addr, unsigned size) -{ - switch (size) { - case 1: - return tusb_async_readb(opaque, addr); - case 2: - return tusb_async_readh(opaque, addr); - case 4: - return tusb_async_readw(opaque, addr); - default: - g_assert_not_reached(); - } -} - -static void tusb_async_writefn(void *opaque, hwaddr addr, - uint64_t value, unsigned size) -{ - switch (size) { - case 1: - tusb_async_writeb(opaque, addr, value); - break; - case 2: - tusb_async_writeh(opaque, addr, value); - break; - case 4: - tusb_async_writew(opaque, addr, value); - break; - default: - g_assert_not_reached(); - } -} - -static const MemoryRegionOps tusb_async_ops = { - .read = tusb_async_readfn, - .write = tusb_async_writefn, - .valid.min_access_size = 1, - .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, -}; - -static void tusb_otg_tick(void *opaque) -{ - TUSBState *s = (TUSBState *) opaque; - - s->otg_timer_val = 0; - s->intr |= TUSB_INT_SRC_OTG_TIMEOUT; - tusb_intr_update(s); -} - -static void tusb_power_tick(void *opaque) -{ - TUSBState *s = (TUSBState *) opaque; - - if (s->power) { - s->intr_ok = ~0; - tusb_intr_update(s); - } -} - -static void tusb_musb_core_intr(void *opaque, int source, int level) -{ - TUSBState *s = (TUSBState *) opaque; - uint16_t otg_status = s->otg_status; - - switch (source) { - case musb_set_vbus: - if (level) - otg_status |= TUSB_DEV_OTG_STAT_VBUS_VALID; - else - otg_status &= ~TUSB_DEV_OTG_STAT_VBUS_VALID; - - /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN set? */ - /* XXX: only if TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN set? */ - if (s->otg_status != otg_status) { - s->otg_status = otg_status; - s->intr |= TUSB_INT_SRC_VBUS_SENSE_CHNG; - tusb_intr_update(s); - } - break; - - case musb_set_session: - /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN set? */ - /* XXX: only if TUSB_PRCM_MNGMT_OTG_SESS_END_EN set? */ - if (level) { - s->otg_status |= TUSB_DEV_OTG_STAT_SESS_VALID; - s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_END; - } else { - s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_VALID; - s->otg_status |= TUSB_DEV_OTG_STAT_SESS_END; - } - - /* XXX: some IRQ or anything? */ - break; - - case musb_irq_tx: - case musb_irq_rx: - s->usbip_intr = musb_core_intr_get(s->musb); - /* Fall through. */ - default: - if (level) - s->intr |= 1 << source; - else - s->intr &= ~(1 << source); - tusb_intr_update(s); - break; - } -} - -static void tusb6010_power(TUSBState *s, int on) -{ - if (!on) { - s->power = 0; - } else if (!s->power && on) { - s->power = 1; - /* Pull the interrupt down after TUSB6010 comes up. */ - s->intr_ok = 0; - tusb_intr_update(s); - timer_mod(s->pwr_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - NANOSECONDS_PER_SECOND / 2); - } -} - -static void tusb6010_irq(void *opaque, int source, int level) -{ - if (source) { - tusb_musb_core_intr(opaque, source - 1, level); - } else { - tusb6010_power(opaque, level); - } -} - -static void tusb6010_reset(DeviceState *dev) -{ - TUSBState *s = TUSB6010(dev); - int i; - - s->test_reset = TUSB_PROD_TEST_RESET_VAL; - s->host_mode = 0; - s->dev_config = 0; - s->otg_status = 0; /* !TUSB_DEV_OTG_STAT_ID_STATUS means host mode */ - s->power = 0; - s->mask = 0xffffffff; - s->intr = 0x00000000; - s->otg_timer_val = 0; - s->scratch = 0; - s->prcm_config = 0; - s->prcm_mngmt = 0; - s->intr_ok = 0; - s->usbip_intr = 0; - s->usbip_mask = 0; - s->gpio_intr = 0; - s->gpio_mask = 0; - s->gpio_config = 0; - s->dma_intr = 0; - s->dma_mask = 0; - s->dma_map = 0; - s->dma_config = 0; - s->ep0_config = 0; - s->wkup_mask = 0; - s->pullup[0] = s->pullup[1] = 0; - s->control_config = 0; - for (i = 0; i < 15; i++) { - s->rx_config[i] = s->tx_config[i] = 0; - } - musb_reset(s->musb); -} - -static void tusb6010_realize(DeviceState *dev, Error **errp) -{ - TUSBState *s = TUSB6010(dev); - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - - s->otg_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_otg_tick, s); - s->pwr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_power_tick, s); - memory_region_init_io(&s->iomem[1], OBJECT(s), &tusb_async_ops, s, - "tusb-async", UINT32_MAX); - sysbus_init_mmio(sbd, &s->iomem[0]); - sysbus_init_mmio(sbd, &s->iomem[1]); - sysbus_init_irq(sbd, &s->irq); - qdev_init_gpio_in(dev, tusb6010_irq, musb_irq_max + 1); - s->musb = musb_init(dev, 1); -} - -static void tusb6010_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = tusb6010_realize; - dc->reset = tusb6010_reset; -} - -static const TypeInfo tusb6010_info = { - .name = TYPE_TUSB6010, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(TUSBState), - .class_init = tusb6010_class_init, -}; - -static void tusb6010_register_types(void) -{ - type_register_static(&tusb6010_info); -} - -type_init(tusb6010_register_types) diff --git a/hw/usb/u2f-emulated.c b/hw/usb/u2f-emulated.c index 63cceaa5fc8..ace5eceadd9 100644 --- a/hw/usb/u2f-emulated.c +++ b/hw/usb/u2f-emulated.c @@ -369,16 +369,15 @@ static void u2f_emulated_unrealize(U2FKeyState *base) } } -static Property u2f_emulated_properties[] = { +static const Property u2f_emulated_properties[] = { DEFINE_PROP_STRING("dir", U2FEmulatedState, dir), DEFINE_PROP_STRING("cert", U2FEmulatedState, cert), DEFINE_PROP_STRING("privkey", U2FEmulatedState, privkey), DEFINE_PROP_STRING("entropy", U2FEmulatedState, entropy), DEFINE_PROP_STRING("counter", U2FEmulatedState, counter), - DEFINE_PROP_END_OF_LIST(), }; -static void u2f_emulated_class_init(ObjectClass *klass, void *data) +static void u2f_emulated_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); U2FKeyClass *kc = U2F_KEY_CLASS(klass); diff --git a/hw/usb/u2f-passthru.c b/hw/usb/u2f-passthru.c index c4a783d1286..fa8d9cdda89 100644 --- a/hw/usb/u2f-passthru.c +++ b/hw/usb/u2f-passthru.c @@ -516,12 +516,11 @@ static const VMStateDescription u2f_passthru_vmstate = { } }; -static Property u2f_passthru_properties[] = { +static const Property u2f_passthru_properties[] = { DEFINE_PROP_STRING("hidraw", U2FPassthruState, hidraw), - DEFINE_PROP_END_OF_LIST(), }; -static void u2f_passthru_class_init(ObjectClass *klass, void *data) +static void u2f_passthru_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); U2FKeyClass *kc = U2F_KEY_CLASS(klass); diff --git a/hw/usb/u2f.c b/hw/usb/u2f.c index 1fb59cf404f..b051a999d3a 100644 --- a/hw/usb/u2f.c +++ b/hw/usb/u2f.c @@ -317,7 +317,7 @@ const VMStateDescription vmstate_u2f_key = { } }; -static void u2f_key_class_init(ObjectClass *klass, void *data) +static void u2f_key_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); USBDeviceClass *uc = USB_DEVICE_CLASS(klass); diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 13901625c0c..fa46a7da01c 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -30,8 +30,8 @@ #include "hw/xen/xen-legacy-backend.h" #include "monitor/qdev.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "hw/xen/interface/io/usbif.h" @@ -755,10 +755,10 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port, qdict = qdict_new(); qdict_put_str(qdict, "driver", "usb-host"); - tmp = g_strdup_printf("%s.0", usbif->xendev.qdev.id); + tmp = g_strdup_printf("%s.0", DEVICE(&usbif->xendev)->id); qdict_put_str(qdict, "bus", tmp); g_free(tmp); - tmp = g_strdup_printf("%s-%u", usbif->xendev.qdev.id, port); + tmp = g_strdup_printf("%s-%u", DEVICE(&usbif->xendev)->id, port); qdict_put_str(qdict, "id", tmp); g_free(tmp); qdict_put_int(qdict, "port", port); @@ -1022,7 +1022,7 @@ static void usbback_alloc(struct XenLegacyDevice *xendev) usbif = container_of(xendev, struct usbback_info, xendev); usb_bus_new(&usbif->bus, sizeof(usbif->bus), &xen_usb_bus_ops, - DEVICE(&xendev->qdev)); + DEVICE(xendev)); for (i = 0; i < USBBACK_MAXPORTS; i++) { p = &(usbif->ports[i].port); usb_register_port(&usbif->bus, p, usbif, i, &xen_usb_port_ops, diff --git a/hw/usb/xlnx-usb-subsystem.c b/hw/usb/xlnx-usb-subsystem.c index d8deeb6ced5..98967ef49f1 100644 --- a/hw/usb/xlnx-usb-subsystem.c +++ b/hw/usb/xlnx-usb-subsystem.c @@ -69,7 +69,7 @@ static void versal_usb2_init(Object *obj) object_property_add_alias(obj, "dma", OBJECT(&s->dwc3.sysbus_xhci), "dma"); } -static void versal_usb2_class_init(ObjectClass *klass, void *data) +static void versal_usb2_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/usb/xlnx-versal-usb2-ctrl-regs.c b/hw/usb/xlnx-versal-usb2-ctrl-regs.c index 66c793a6021..4114672d4f4 100644 --- a/hw/usb/xlnx-versal-usb2-ctrl-regs.c +++ b/hw/usb/xlnx-versal-usb2-ctrl-regs.c @@ -202,7 +202,7 @@ static const VMStateDescription vmstate_usb2_ctrl_regs = { } }; -static void usb2_ctrl_regs_class_init(ObjectClass *klass, void *data) +static void usb2_ctrl_regs_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); diff --git a/hw/vfio-user/Kconfig b/hw/vfio-user/Kconfig new file mode 100644 index 00000000000..24bdf7af903 --- /dev/null +++ b/hw/vfio-user/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +config VFIO_USER + bool + default y + depends on VFIO_PCI + diff --git a/hw/vfio-user/container.c b/hw/vfio-user/container.c new file mode 100644 index 00000000000..d589dd90f50 --- /dev/null +++ b/hw/vfio-user/container.c @@ -0,0 +1,357 @@ +/* + * Container for vfio-user IOMMU type: rather than communicating with the kernel + * vfio driver, we communicate over a socket to a server using the vfio-user + * protocol. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include +#include "qemu/osdep.h" + +#include "hw/vfio-user/container.h" +#include "hw/vfio-user/device.h" +#include "hw/vfio-user/trace.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-listener.h" +#include "qapi/error.h" + +/* + * When DMA space is the physical address space, the region add/del listeners + * will fire during memory update transactions. These depend on BQL being held, + * so do any resulting map/demap ops async while keeping BQL. + */ +static void vfio_user_listener_begin(VFIOContainerBase *bcontainer) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + + container->proxy->async_ops = true; +} + +static void vfio_user_listener_commit(VFIOContainerBase *bcontainer) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + + /* wait here for any async requests sent during the transaction */ + container->proxy->async_ops = false; + vfio_user_wait_reqs(container->proxy); +} + +static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb, bool unmap_all) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + Error *local_err = NULL; + int ret = 0; + + VFIOUserDMAUnmap *msgp = g_malloc(sizeof(*msgp)); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_UNMAP, sizeof(*msgp), 0); + msgp->argsz = sizeof(struct vfio_iommu_type1_dma_unmap); + msgp->flags = unmap_all ? VFIO_DMA_UNMAP_FLAG_ALL : 0; + msgp->iova = iova; + msgp->size = size; + trace_vfio_user_dma_unmap(msgp->iova, msgp->size, msgp->flags, + container->proxy->async_ops); + + if (container->proxy->async_ops) { + if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, NULL, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + } else { + if (!vfio_user_send_wait(container->proxy, &msgp->hdr, NULL, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + ret = -msgp->hdr.error_reply; + } + + g_free(msgp); + } + + return ret; +} + +static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly, + MemoryRegion *mrp) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + int fd = memory_region_get_fd(mrp); + Error *local_err = NULL; + int ret = 0; + + VFIOUserFDs *fds = NULL; + VFIOUserDMAMap *msgp = g_malloc0(sizeof(*msgp)); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DMA_MAP, sizeof(*msgp), 0); + msgp->argsz = sizeof(struct vfio_iommu_type1_dma_map); + msgp->flags = VFIO_DMA_MAP_FLAG_READ; + msgp->offset = 0; + msgp->iova = iova; + msgp->size = size; + + /* + * vaddr enters as a QEMU process address; make it either a file offset + * for mapped areas or leave as 0. + */ + if (fd != -1) { + msgp->offset = qemu_ram_block_host_offset(mrp->ram_block, vaddr); + } + + if (!readonly) { + msgp->flags |= VFIO_DMA_MAP_FLAG_WRITE; + } + + trace_vfio_user_dma_map(msgp->iova, msgp->size, msgp->offset, msgp->flags, + container->proxy->async_ops); + + /* + * The async_ops case sends without blocking. They're later waited for in + * vfio_send_wait_reqs. + */ + if (container->proxy->async_ops) { + /* can't use auto variable since we don't block */ + if (fd != -1) { + fds = vfio_user_getfds(1); + fds->send_fds = 1; + fds->fds[0] = fd; + } + + if (!vfio_user_send_nowait(container->proxy, &msgp->hdr, fds, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + } else { + VFIOUserFDs local_fds = { 1, 0, &fd }; + + fds = fd != -1 ? &local_fds : NULL; + + if (!vfio_user_send_wait(container->proxy, &msgp->hdr, fds, + 0, &local_err)) { + error_report_err(local_err); + ret = -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + ret = -msgp->hdr.error_reply; + } + + g_free(msgp); + } + + return ret; +} + +static int +vfio_user_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, + bool start, Error **errp) +{ + error_setg_errno(errp, ENOTSUP, "Not supported"); + return -ENOTSUP; +} + +static int vfio_user_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, hwaddr iova, + hwaddr size, Error **errp) +{ + error_setg_errno(errp, ENOTSUP, "Not supported"); + return -ENOTSUP; +} + +static bool vfio_user_setup(VFIOContainerBase *bcontainer, Error **errp) +{ + VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, + bcontainer); + + assert(container->proxy->dma_pgsizes != 0); + bcontainer->pgsizes = container->proxy->dma_pgsizes; + bcontainer->dma_max_mappings = container->proxy->max_dma; + + /* No live migration support yet. */ + bcontainer->dirty_pages_supported = false; + bcontainer->max_dirty_bitmap_size = container->proxy->max_bitmap; + bcontainer->dirty_pgsizes = container->proxy->migr_pgsize; + + return true; +} + +static VFIOUserContainer *vfio_user_create_container(VFIODevice *vbasedev, + Error **errp) +{ + VFIOUserContainer *container; + + container = VFIO_IOMMU_USER(object_new(TYPE_VFIO_IOMMU_USER)); + container->proxy = vbasedev->proxy; + return container; +} + +/* + * Try to mirror vfio_container_connect() as much as possible. + */ +static VFIOUserContainer * +vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev, + Error **errp) +{ + VFIOContainerBase *bcontainer; + VFIOUserContainer *container; + VFIOAddressSpace *space; + VFIOIOMMUClass *vioc; + int ret; + + space = vfio_address_space_get(as); + + container = vfio_user_create_container(vbasedev, errp); + if (!container) { + goto put_space_exit; + } + + bcontainer = &container->bcontainer; + + ret = ram_block_uncoordinated_discard_disable(true); + if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); + goto free_container_exit; + } + + vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + assert(vioc->setup); + + if (!vioc->setup(bcontainer, errp)) { + goto enable_discards_exit; + } + + vfio_address_space_insert(space, bcontainer); + + if (!vfio_listener_register(bcontainer, errp)) { + goto listener_release_exit; + } + + bcontainer->initialized = true; + + return container; + +listener_release_exit: + vfio_listener_unregister(bcontainer); + if (vioc->release) { + vioc->release(bcontainer); + } + +enable_discards_exit: + ram_block_uncoordinated_discard_disable(false); + +free_container_exit: + object_unref(container); + +put_space_exit: + vfio_address_space_put(space); + + return NULL; +} + +static void vfio_user_container_disconnect(VFIOUserContainer *container) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + VFIOAddressSpace *space = bcontainer->space; + + ram_block_uncoordinated_discard_disable(false); + + vfio_listener_unregister(bcontainer); + if (vioc->release) { + vioc->release(bcontainer); + } + + object_unref(container); + + vfio_address_space_put(space); +} + +static bool vfio_user_device_get(VFIOUserContainer *container, + VFIODevice *vbasedev, Error **errp) +{ + struct vfio_device_info info = { .argsz = sizeof(info) }; + + + if (!vfio_user_get_device_info(vbasedev->proxy, &info, errp)) { + return false; + } + + vbasedev->fd = -1; + + vfio_device_prepare(vbasedev, &container->bcontainer, &info); + + return true; +} + +/* + * vfio_user_device_attach: attach a device to a new container. + */ +static bool vfio_user_device_attach(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + VFIOUserContainer *container; + + container = vfio_user_container_connect(as, vbasedev, errp); + if (container == NULL) { + error_prepend(errp, "failed to connect proxy"); + return false; + } + + return vfio_user_device_get(container, vbasedev, errp); +} + +static void vfio_user_device_detach(VFIODevice *vbasedev) +{ + VFIOUserContainer *container = container_of(vbasedev->bcontainer, + VFIOUserContainer, bcontainer); + + vfio_device_unprepare(vbasedev); + + vfio_user_container_disconnect(container); +} + +static int vfio_user_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + /* ->needs_reset is always false for vfio-user. */ + return 0; +} + +static void vfio_iommu_user_class_init(ObjectClass *klass, const void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->setup = vfio_user_setup; + vioc->listener_begin = vfio_user_listener_begin, + vioc->listener_commit = vfio_user_listener_commit, + vioc->dma_map = vfio_user_dma_map; + vioc->dma_unmap = vfio_user_dma_unmap; + vioc->attach_device = vfio_user_device_attach; + vioc->detach_device = vfio_user_device_detach; + vioc->set_dirty_page_tracking = vfio_user_set_dirty_page_tracking; + vioc->query_dirty_bitmap = vfio_user_query_dirty_bitmap; + vioc->pci_hot_reset = vfio_user_pci_hot_reset; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_USER, + .parent = TYPE_VFIO_IOMMU, + .instance_size = sizeof(VFIOUserContainer), + .class_init = vfio_iommu_user_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio-user/container.h b/hw/vfio-user/container.h new file mode 100644 index 00000000000..2bb1fa13431 --- /dev/null +++ b/hw/vfio-user/container.h @@ -0,0 +1,23 @@ +/* + * vfio-user specific definitions. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_USER_CONTAINER_H +#define HW_VFIO_USER_CONTAINER_H + +#include "qemu/osdep.h" + +#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio-user/proxy.h" + +/* MMU container sub-class for vfio-user. */ +typedef struct VFIOUserContainer { + VFIOContainerBase bcontainer; + VFIOUserProxy *proxy; +} VFIOUserContainer; + +OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserContainer, VFIO_IOMMU_USER); + +#endif /* HW_VFIO_USER_CONTAINER_H */ diff --git a/hw/vfio-user/device.c b/hw/vfio-user/device.c new file mode 100644 index 00000000000..0609a7dc254 --- /dev/null +++ b/hw/vfio-user/device.c @@ -0,0 +1,441 @@ +/* + * vfio protocol over a UNIX socket device handling. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/lockable.h" +#include "qemu/thread.h" + +#include "hw/vfio-user/device.h" +#include "hw/vfio-user/trace.h" + +/* + * These are to defend against a malign server trying + * to force us to run out of memory. + */ +#define VFIO_USER_MAX_REGIONS 100 +#define VFIO_USER_MAX_IRQS 50 + +bool vfio_user_get_device_info(VFIOUserProxy *proxy, + struct vfio_device_info *info, Error **errp) +{ + VFIOUserDeviceInfo msg; + uint32_t argsz = sizeof(msg) - sizeof(msg.hdr); + + memset(&msg, 0, sizeof(msg)); + vfio_user_request_msg(&msg.hdr, VFIO_USER_DEVICE_GET_INFO, sizeof(msg), 0); + msg.argsz = argsz; + + if (!vfio_user_send_wait(proxy, &msg.hdr, NULL, 0, errp)) { + return false; + } + + if (msg.hdr.flags & VFIO_USER_ERROR) { + error_setg_errno(errp, -msg.hdr.error_reply, + "VFIO_USER_DEVICE_GET_INFO failed"); + return false; + } + + trace_vfio_user_get_info(msg.num_regions, msg.num_irqs); + + memcpy(info, &msg.argsz, argsz); + + /* defend against a malicious server */ + if (info->num_regions > VFIO_USER_MAX_REGIONS || + info->num_irqs > VFIO_USER_MAX_IRQS) { + error_setg_errno(errp, EINVAL, "invalid reply"); + return false; + } + + return true; +} + +void vfio_user_device_reset(VFIOUserProxy *proxy) +{ + Error *local_err = NULL; + VFIOUserHdr hdr; + + vfio_user_request_msg(&hdr, VFIO_USER_DEVICE_RESET, sizeof(hdr), 0); + + if (!vfio_user_send_wait(proxy, &hdr, NULL, 0, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return; + } + + if (hdr.flags & VFIO_USER_ERROR) { + error_printf("reset reply error %d\n", hdr.error_reply); + } +} + +static int vfio_user_get_region_info(VFIOUserProxy *proxy, + struct vfio_region_info *info, + VFIOUserFDs *fds) +{ + g_autofree VFIOUserRegionInfo *msgp = NULL; + Error *local_err = NULL; + uint32_t size; + + /* data returned can be larger than vfio_region_info */ + if (info->argsz < sizeof(*info)) { + error_printf("vfio_user_get_region_info argsz too small\n"); + return -E2BIG; + } + if (fds != NULL && fds->send_fds != 0) { + error_printf("vfio_user_get_region_info can't send FDs\n"); + return -EINVAL; + } + + size = info->argsz + sizeof(VFIOUserHdr); + msgp = g_malloc0(size); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_GET_REGION_INFO, + sizeof(*msgp), 0); + msgp->argsz = info->argsz; + msgp->index = info->index; + + if (!vfio_user_send_wait(proxy, &msgp->hdr, fds, size, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + return -msgp->hdr.error_reply; + } + trace_vfio_user_get_region_info(msgp->index, msgp->flags, msgp->size); + + memcpy(info, &msgp->argsz, info->argsz); + + /* + * If at least one region is directly mapped into the VM, then we can no + * longer rely on the sequential nature of vfio-user request handling to + * ensure that posted writes are completed before a subsequent read. In this + * case, disable posted write support. This is a per-device property, not + * per-region. + */ + if (info->flags & VFIO_REGION_INFO_FLAG_MMAP) { + vfio_user_disable_posted_writes(proxy); + } + + return 0; +} + +static int vfio_user_device_io_get_region_info(VFIODevice *vbasedev, + struct vfio_region_info *info, + int *fd) +{ + VFIOUserFDs fds = { 0, 1, fd}; + int ret; + + if (info->index > vbasedev->num_regions) { + return -EINVAL; + } + + ret = vfio_user_get_region_info(vbasedev->proxy, info, &fds); + if (ret) { + return ret; + } + + /* cap_offset in valid area */ + if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) && + (info->cap_offset < sizeof(*info) || info->cap_offset > info->argsz)) { + return -EINVAL; + } + + return 0; +} + +static int vfio_user_device_io_get_irq_info(VFIODevice *vbasedev, + struct vfio_irq_info *info) +{ + VFIOUserProxy *proxy = vbasedev->proxy; + Error *local_err = NULL; + VFIOUserIRQInfo msg; + + memset(&msg, 0, sizeof(msg)); + vfio_user_request_msg(&msg.hdr, VFIO_USER_DEVICE_GET_IRQ_INFO, + sizeof(msg), 0); + msg.argsz = info->argsz; + msg.index = info->index; + + if (!vfio_user_send_wait(proxy, &msg.hdr, NULL, 0, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + if (msg.hdr.flags & VFIO_USER_ERROR) { + return -msg.hdr.error_reply; + } + trace_vfio_user_get_irq_info(msg.index, msg.flags, msg.count); + + memcpy(info, &msg.argsz, sizeof(*info)); + return 0; +} + +static int irq_howmany(int *fdp, uint32_t cur, uint32_t max) +{ + int n = 0; + + if (fdp[cur] != -1) { + do { + n++; + } while (n < max && fdp[cur + n] != -1); + } else { + do { + n++; + } while (n < max && fdp[cur + n] == -1); + } + + return n; +} + +static int vfio_user_device_io_set_irqs(VFIODevice *vbasedev, + struct vfio_irq_set *irq) +{ + VFIOUserProxy *proxy = vbasedev->proxy; + g_autofree VFIOUserIRQSet *msgp = NULL; + uint32_t size, nfds, send_fds, sent_fds, max; + Error *local_err = NULL; + + if (irq->argsz < sizeof(*irq)) { + error_printf("vfio_user_set_irqs argsz too small\n"); + return -EINVAL; + } + + /* + * Handle simple case + */ + if ((irq->flags & VFIO_IRQ_SET_DATA_EVENTFD) == 0) { + size = sizeof(VFIOUserHdr) + irq->argsz; + msgp = g_malloc0(size); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS, size, 0); + msgp->argsz = irq->argsz; + msgp->flags = irq->flags; + msgp->index = irq->index; + msgp->start = irq->start; + msgp->count = irq->count; + trace_vfio_user_set_irqs(msgp->index, msgp->start, msgp->count, + msgp->flags); + + if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + return -msgp->hdr.error_reply; + } + + return 0; + } + + /* + * Calculate the number of FDs to send + * and adjust argsz + */ + nfds = (irq->argsz - sizeof(*irq)) / sizeof(int); + irq->argsz = sizeof(*irq); + msgp = g_malloc0(sizeof(*msgp)); + /* + * Send in chunks if over max_send_fds + */ + for (sent_fds = 0; nfds > sent_fds; sent_fds += send_fds) { + VFIOUserFDs *arg_fds, loop_fds; + + /* must send all valid FDs or all invalid FDs in single msg */ + max = nfds - sent_fds; + if (max > proxy->max_send_fds) { + max = proxy->max_send_fds; + } + send_fds = irq_howmany((int *)irq->data, sent_fds, max); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS, + sizeof(*msgp), 0); + msgp->argsz = irq->argsz; + msgp->flags = irq->flags; + msgp->index = irq->index; + msgp->start = irq->start + sent_fds; + msgp->count = send_fds; + trace_vfio_user_set_irqs(msgp->index, msgp->start, msgp->count, + msgp->flags); + + loop_fds.send_fds = send_fds; + loop_fds.recv_fds = 0; + loop_fds.fds = (int *)irq->data + sent_fds; + arg_fds = loop_fds.fds[0] != -1 ? &loop_fds : NULL; + + if (!vfio_user_send_wait(proxy, &msgp->hdr, arg_fds, 0, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + return -msgp->hdr.error_reply; + } + } + + return 0; +} + +static int vfio_user_device_io_region_read(VFIODevice *vbasedev, uint8_t index, + off_t off, uint32_t count, + void *data) +{ + g_autofree VFIOUserRegionRW *msgp = NULL; + VFIOUserProxy *proxy = vbasedev->proxy; + int size = sizeof(*msgp) + count; + Error *local_err = NULL; + + if (count > proxy->max_xfer_size) { + return -EINVAL; + } + + msgp = g_malloc0(size); + vfio_user_request_msg(&msgp->hdr, VFIO_USER_REGION_READ, sizeof(*msgp), 0); + msgp->offset = off; + msgp->region = index; + msgp->count = count; + trace_vfio_user_region_rw(msgp->region, msgp->offset, msgp->count); + + if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, size, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + return -msgp->hdr.error_reply; + } else if (msgp->count > count) { + return -E2BIG; + } else { + memcpy(data, &msgp->data, msgp->count); + } + + return msgp->count; +} + +/* + * If this is a posted write, and VFIO_PROXY_NO_POST is not set, then we are OK + * to send the write to the socket without waiting for the server's reply: + * a subsequent read (of any region) will not pass the posted write, as all + * messages are handled sequentially. + */ +static int vfio_user_device_io_region_write(VFIODevice *vbasedev, uint8_t index, + off_t off, unsigned count, + void *data, bool post) +{ + VFIOUserRegionRW *msgp = NULL; + VFIOUserProxy *proxy = vbasedev->proxy; + int size = sizeof(*msgp) + count; + Error *local_err = NULL; + bool can_multi; + int flags = 0; + int ret; + + if (count > proxy->max_xfer_size) { + return -EINVAL; + } + + if (proxy->flags & VFIO_PROXY_NO_POST) { + post = false; + } + + if (post) { + flags |= VFIO_USER_NO_REPLY; + } + + /* write eligible to be in a WRITE_MULTI msg ? */ + can_multi = (proxy->flags & VFIO_PROXY_USE_MULTI) && post && + count <= VFIO_USER_MULTI_DATA; + + /* + * This should be a rare case, so first check without the lock, + * if we're wrong, vfio_send_queued() will flush any posted writes + * we missed here + */ + if (proxy->wr_multi != NULL || + (proxy->num_outgoing > VFIO_USER_OUT_HIGH && can_multi)) { + + /* + * re-check with lock + * + * if already building a WRITE_MULTI msg, + * add this one if possible else flush pending before + * sending the current one + * + * else if outgoing queue is over the highwater, + * start a new WRITE_MULTI message + */ + WITH_QEMU_LOCK_GUARD(&proxy->lock) { + if (proxy->wr_multi != NULL) { + if (can_multi) { + vfio_user_add_multi(proxy, index, off, count, data); + return count; + } + vfio_user_flush_multi(proxy); + } else if (proxy->num_outgoing > VFIO_USER_OUT_HIGH && can_multi) { + vfio_user_create_multi(proxy); + vfio_user_add_multi(proxy, index, off, count, data); + return count; + } + } + } + + msgp = g_malloc0(size); + vfio_user_request_msg(&msgp->hdr, VFIO_USER_REGION_WRITE, size, flags); + msgp->offset = off; + msgp->region = index; + msgp->count = count; + memcpy(&msgp->data, data, count); + trace_vfio_user_region_rw(msgp->region, msgp->offset, msgp->count); + + /* async send will free msg after it's sent */ + if (post) { + if (!vfio_user_send_async(proxy, &msgp->hdr, NULL, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + return -EFAULT; + } + + return count; + } + + if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, &local_err)) { + error_prepend(&local_err, "%s: ", __func__); + error_report_err(local_err); + g_free(msgp); + return -EFAULT; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + ret = -msgp->hdr.error_reply; + } else { + ret = count; + } + + g_free(msgp); + return ret; +} + +/* + * Socket-based io_ops + */ +VFIODeviceIOOps vfio_user_device_io_ops_sock = { + .get_region_info = vfio_user_device_io_get_region_info, + .get_irq_info = vfio_user_device_io_get_irq_info, + .set_irqs = vfio_user_device_io_set_irqs, + .region_read = vfio_user_device_io_region_read, + .region_write = vfio_user_device_io_region_write, + +}; diff --git a/hw/vfio-user/device.h b/hw/vfio-user/device.h new file mode 100644 index 00000000000..d183a3950e2 --- /dev/null +++ b/hw/vfio-user/device.h @@ -0,0 +1,24 @@ +#ifndef VFIO_USER_DEVICE_H +#define VFIO_USER_DEVICE_H + +/* + * vfio protocol over a UNIX socket device handling. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "linux/vfio.h" + +#include "hw/vfio-user/proxy.h" + +bool vfio_user_get_device_info(VFIOUserProxy *proxy, + struct vfio_device_info *info, Error **errp); + +void vfio_user_device_reset(VFIOUserProxy *proxy); + +extern VFIODeviceIOOps vfio_user_device_io_ops_sock; + +#endif /* VFIO_USER_DEVICE_H */ diff --git a/hw/vfio-user/meson.build b/hw/vfio-user/meson.build new file mode 100644 index 00000000000..2ed0ae5b1dd --- /dev/null +++ b/hw/vfio-user/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +vfio_user_ss = ss.source_set() +vfio_user_ss.add(files( + 'container.c', + 'device.c', + 'pci.c', + 'proxy.c', +)) + +system_ss.add_all(when: 'CONFIG_VFIO_USER', if_true: vfio_user_ss) diff --git a/hw/vfio-user/pci.c b/hw/vfio-user/pci.c new file mode 100644 index 00000000000..be71c777291 --- /dev/null +++ b/hw/vfio-user/pci.c @@ -0,0 +1,475 @@ +/* + * vfio PCI device over a UNIX socket. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include "qemu/osdep.h" +#include "qapi-visit-sockets.h" +#include "qemu/error-report.h" + +#include "hw/qdev-properties.h" +#include "hw/vfio/pci.h" +#include "hw/vfio-user/device.h" +#include "hw/vfio-user/proxy.h" + +#define TYPE_VFIO_USER_PCI "vfio-user-pci" +OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserPCIDevice, VFIO_USER_PCI) + +struct VFIOUserPCIDevice { + VFIOPCIDevice device; + SocketAddress *socket; + bool send_queued; /* all sends are queued */ + uint32_t wait_time; /* timeout for message replies */ + bool no_post; /* all region writes are sync */ +}; + +/* + * The server maintains the device's pending interrupts, + * via its MSIX table and PBA, so we treat these accesses + * like PCI config space and forward them. + */ +static uint64_t vfio_user_pba_read(void *opaque, hwaddr addr, + unsigned size) +{ + VFIOPCIDevice *vdev = opaque; + VFIORegion *region = &vdev->bars[vdev->msix->pba_bar].region; + uint64_t data; + + /* server copy is what matters */ + data = vfio_region_read(region, addr + vdev->msix->pba_offset, size); + return data; +} + +static void vfio_user_pba_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + /* dropped */ +} + +static const MemoryRegionOps vfio_user_pba_ops = { + .read = vfio_user_pba_read, + .write = vfio_user_pba_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_user_msix_setup(VFIOPCIDevice *vdev) +{ + MemoryRegion *vfio_reg, *msix_reg, *pba_reg; + + pba_reg = g_new0(MemoryRegion, 1); + vdev->msix->pba_region = pba_reg; + + vfio_reg = vdev->bars[vdev->msix->pba_bar].mr; + msix_reg = &vdev->pdev.msix_pba_mmio; + memory_region_init_io(pba_reg, OBJECT(vdev), &vfio_user_pba_ops, vdev, + "VFIO MSIX PBA", int128_get64(msix_reg->size)); + memory_region_add_subregion_overlap(vfio_reg, vdev->msix->pba_offset, + pba_reg, 1); +} + +static void vfio_user_msix_teardown(VFIOPCIDevice *vdev) +{ + MemoryRegion *mr, *sub; + + mr = vdev->bars[vdev->msix->pba_bar].mr; + sub = vdev->msix->pba_region; + memory_region_del_subregion(mr, sub); + + g_free(vdev->msix->pba_region); + vdev->msix->pba_region = NULL; +} + +static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) +{ + PCIDevice *pdev = &vdev->pdev; + VFIOUserProxy *proxy = vdev->vbasedev.proxy; + VFIOUserDMARW *res; + MemTxResult r; + size_t size; + + if (msg->hdr.size < sizeof(*msg)) { + vfio_user_send_error(proxy, &msg->hdr, EINVAL); + return; + } + if (msg->count > proxy->max_xfer_size) { + vfio_user_send_error(proxy, &msg->hdr, E2BIG); + return; + } + + /* switch to our own message buffer */ + size = msg->count + sizeof(VFIOUserDMARW); + res = g_malloc0(size); + memcpy(res, msg, sizeof(*res)); + g_free(msg); + + r = pci_dma_read(pdev, res->offset, &res->data, res->count); + + switch (r) { + case MEMTX_OK: + if (res->hdr.flags & VFIO_USER_NO_REPLY) { + g_free(res); + return; + } + vfio_user_send_reply(proxy, &res->hdr, size); + break; + case MEMTX_ERROR: + vfio_user_send_error(proxy, &res->hdr, EFAULT); + break; + case MEMTX_DECODE_ERROR: + vfio_user_send_error(proxy, &res->hdr, ENODEV); + break; + case MEMTX_ACCESS_ERROR: + vfio_user_send_error(proxy, &res->hdr, EPERM); + break; + default: + error_printf("vfio_user_dma_read unknown error %d\n", r); + vfio_user_send_error(vdev->vbasedev.proxy, &res->hdr, EINVAL); + } +} + +static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) +{ + PCIDevice *pdev = &vdev->pdev; + VFIOUserProxy *proxy = vdev->vbasedev.proxy; + MemTxResult r; + + if (msg->hdr.size < sizeof(*msg)) { + vfio_user_send_error(proxy, &msg->hdr, EINVAL); + return; + } + /* make sure transfer count isn't larger than the message data */ + if (msg->count > msg->hdr.size - sizeof(*msg)) { + vfio_user_send_error(proxy, &msg->hdr, E2BIG); + return; + } + + r = pci_dma_write(pdev, msg->offset, &msg->data, msg->count); + + switch (r) { + case MEMTX_OK: + if ((msg->hdr.flags & VFIO_USER_NO_REPLY) == 0) { + vfio_user_send_reply(proxy, &msg->hdr, sizeof(msg->hdr)); + } else { + g_free(msg); + } + break; + case MEMTX_ERROR: + vfio_user_send_error(proxy, &msg->hdr, EFAULT); + break; + case MEMTX_DECODE_ERROR: + vfio_user_send_error(proxy, &msg->hdr, ENODEV); + break; + case MEMTX_ACCESS_ERROR: + vfio_user_send_error(proxy, &msg->hdr, EPERM); + break; + default: + error_printf("vfio_user_dma_write unknown error %d\n", r); + vfio_user_send_error(vdev->vbasedev.proxy, &msg->hdr, EINVAL); + } +} + +/* + * Incoming request message callback. + * + * Runs off main loop, so BQL held. + */ +static void vfio_user_pci_process_req(void *opaque, VFIOUserMsg *msg) +{ + VFIOPCIDevice *vdev = opaque; + VFIOUserHdr *hdr = msg->hdr; + + /* no incoming PCI requests pass FDs */ + if (msg->fds != NULL) { + vfio_user_send_error(vdev->vbasedev.proxy, hdr, EINVAL); + vfio_user_putfds(msg); + return; + } + + switch (hdr->command) { + case VFIO_USER_DMA_READ: + vfio_user_dma_read(vdev, (VFIOUserDMARW *)hdr); + break; + case VFIO_USER_DMA_WRITE: + vfio_user_dma_write(vdev, (VFIOUserDMARW *)hdr); + break; + default: + error_printf("vfio_user_pci_process_req unknown cmd %d\n", + hdr->command); + vfio_user_send_error(vdev->vbasedev.proxy, hdr, ENOSYS); + } +} + +/* + * Emulated devices don't use host hot reset + */ +static void vfio_user_compute_needs_reset(VFIODevice *vbasedev) +{ + vbasedev->needs_reset = false; +} + +static Object *vfio_user_pci_get_object(VFIODevice *vbasedev) +{ + VFIOUserPCIDevice *vdev = container_of(vbasedev, VFIOUserPCIDevice, + device.vbasedev); + + return OBJECT(vdev); +} + +static VFIODeviceOps vfio_user_pci_ops = { + .vfio_compute_needs_reset = vfio_user_compute_needs_reset, + .vfio_eoi = vfio_pci_intx_eoi, + .vfio_get_object = vfio_user_pci_get_object, + /* No live migration support yet. */ + .vfio_save_config = NULL, + .vfio_load_config = NULL, +}; + +static void vfio_user_pci_realize(PCIDevice *pdev, Error **errp) +{ + ERRP_GUARD(); + VFIOUserPCIDevice *udev = VFIO_USER_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIODevice *vbasedev = &vdev->vbasedev; + const char *sock_name; + AddressSpace *as; + SocketAddress addr; + VFIOUserProxy *proxy; + + if (!udev->socket) { + error_setg(errp, "No socket specified"); + error_append_hint(errp, "e.g. -device '{" + "\"driver\":\"vfio-user-pci\", " + "\"socket\": {\"path\": \"/tmp/vfio-user.sock\", " + "\"type\": \"unix\"}'" + "}'\n"); + return; + } + + sock_name = udev->socket->u.q_unix.path; + + vbasedev->name = g_strdup_printf("vfio-user:%s", sock_name); + + memset(&addr, 0, sizeof(addr)); + addr.type = SOCKET_ADDRESS_TYPE_UNIX; + addr.u.q_unix.path = (char *)sock_name; + proxy = vfio_user_connect_dev(&addr, errp); + if (!proxy) { + return; + } + vbasedev->proxy = proxy; + vfio_user_set_handler(vbasedev, vfio_user_pci_process_req, vdev); + + vbasedev->name = g_strdup_printf("vfio-user:%s", sock_name); + + if (udev->send_queued) { + proxy->flags |= VFIO_PROXY_FORCE_QUEUED; + } + + if (udev->no_post) { + proxy->flags |= VFIO_PROXY_NO_POST; + } + + /* user specified or 5 sec default */ + proxy->wait_time = udev->wait_time; + + if (!vfio_user_validate_version(proxy, errp)) { + goto error; + } + + /* + * Use socket-based device I/O instead of vfio kernel driver. + */ + vbasedev->io_ops = &vfio_user_device_io_ops_sock; + + /* + * vfio-user devices are effectively mdevs (don't use a host iommu). + */ + vbasedev->mdev = true; + + /* + * Enable per-region fds. + */ + vbasedev->use_region_fds = true; + + as = pci_device_iommu_address_space(pdev); + if (!vfio_device_attach_by_iommu_type(TYPE_VFIO_IOMMU_USER, + vbasedev->name, vbasedev, + as, errp)) { + goto error; + } + + if (!vfio_pci_populate_device(vdev, errp)) { + goto error; + } + + if (!vfio_pci_config_setup(vdev, errp)) { + goto error; + } + + /* + * vfio_pci_config_setup will have registered the device's BARs + * and setup any MSIX BARs, so errors after it succeeds must + * use out_teardown + */ + + if (!vfio_pci_add_capabilities(vdev, errp)) { + goto out_teardown; + } + + if (vdev->msix != NULL) { + vfio_user_msix_setup(vdev); + } + + if (!vfio_pci_interrupt_setup(vdev, errp)) { + goto out_teardown; + } + + vfio_pci_register_err_notifier(vdev); + vfio_pci_register_req_notifier(vdev); + + return; + +out_teardown: + vfio_pci_teardown_msi(vdev); + vfio_pci_bars_exit(vdev); +error: + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); + vfio_pci_put_device(vdev); +} + +static void vfio_user_instance_init(Object *obj) +{ + PCIDevice *pci_dev = PCI_DEVICE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIODevice *vbasedev = &vdev->vbasedev; + + device_add_bootindex_property(obj, &vdev->bootindex, + "bootindex", NULL, + &pci_dev->qdev); + vdev->host.domain = ~0U; + vdev->host.bus = ~0U; + vdev->host.slot = ~0U; + vdev->host.function = ~0U; + + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_user_pci_ops, + DEVICE(vdev), false); + + vdev->nv_gpudirect_clique = 0xFF; + + /* + * QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command + * line, therefore, no need to wait to realize like other devices. + */ + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; +} + +static void vfio_user_instance_finalize(Object *obj) +{ + VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIODevice *vbasedev = &vdev->vbasedev; + + if (vdev->msix != NULL) { + vfio_user_msix_teardown(vdev); + } + + vfio_pci_put_device(vdev); + + if (vbasedev->proxy != NULL) { + vfio_user_disconnect(vbasedev->proxy); + } +} + +static void vfio_user_pci_reset(DeviceState *dev) +{ + VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev); + VFIODevice *vbasedev = &vdev->vbasedev; + + vfio_pci_pre_reset(vdev); + + if (vbasedev->reset_works) { + vfio_user_device_reset(vbasedev->proxy); + } + + vfio_pci_post_reset(vdev); +} + +static const Property vfio_user_pci_dev_properties[] = { + DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, + vendor_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, + device_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice, + sub_vendor_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice, + sub_device_id, PCI_ANY_ID), + DEFINE_PROP_BOOL("x-send-queued", VFIOUserPCIDevice, send_queued, false), + DEFINE_PROP_UINT32("x-msg-timeout", VFIOUserPCIDevice, wait_time, 5000), + DEFINE_PROP_BOOL("x-no-posted-writes", VFIOUserPCIDevice, no_post, false), +}; + +static void vfio_user_pci_set_socket(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + VFIOUserPCIDevice *udev = VFIO_USER_PCI(obj); + bool success; + + if (udev->device.vbasedev.proxy) { + error_setg(errp, "Proxy is connected"); + return; + } + + qapi_free_SocketAddress(udev->socket); + + udev->socket = NULL; + + success = visit_type_SocketAddress(v, name, &udev->socket, errp); + + if (!success) { + return; + } + + if (udev->socket->type != SOCKET_ADDRESS_TYPE_UNIX) { + error_setg(errp, "Unsupported socket type %s", + SocketAddressType_str(udev->socket->type)); + qapi_free_SocketAddress(udev->socket); + udev->socket = NULL; + return; + } +} + +static void vfio_user_pci_dev_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); + + device_class_set_legacy_reset(dc, vfio_user_pci_reset); + device_class_set_props(dc, vfio_user_pci_dev_properties); + + object_class_property_add(klass, "socket", "SocketAddress", NULL, + vfio_user_pci_set_socket, NULL, NULL); + object_class_property_set_description(klass, "socket", + "SocketAddress (UNIX sockets only)"); + + dc->desc = "VFIO over socket PCI device assignment"; + pdc->realize = vfio_user_pci_realize; +} + +static const TypeInfo vfio_user_pci_dev_info = { + .name = TYPE_VFIO_USER_PCI, + .parent = TYPE_VFIO_PCI_BASE, + .instance_size = sizeof(VFIOUserPCIDevice), + .class_init = vfio_user_pci_dev_class_init, + .instance_init = vfio_user_instance_init, + .instance_finalize = vfio_user_instance_finalize, +}; + +static void register_vfio_user_dev_type(void) +{ + type_register_static(&vfio_user_pci_dev_info); +} + + type_init(register_vfio_user_dev_type) diff --git a/hw/vfio-user/protocol.h b/hw/vfio-user/protocol.h new file mode 100644 index 00000000000..3249a4a6b62 --- /dev/null +++ b/hw/vfio-user/protocol.h @@ -0,0 +1,242 @@ +#ifndef VFIO_USER_PROTOCOL_H +#define VFIO_USER_PROTOCOL_H + +/* + * vfio protocol over a UNIX socket. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * Each message has a standard header that describes the command + * being sent, which is almost always a VFIO ioctl(). + * + * The header may be followed by command-specific data, such as the + * region and offset info for read and write commands. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +typedef struct { + uint16_t id; + uint16_t command; + uint32_t size; + uint32_t flags; + uint32_t error_reply; +} VFIOUserHdr; + +/* VFIOUserHdr commands */ +enum vfio_user_command { + VFIO_USER_VERSION = 1, + VFIO_USER_DMA_MAP = 2, + VFIO_USER_DMA_UNMAP = 3, + VFIO_USER_DEVICE_GET_INFO = 4, + VFIO_USER_DEVICE_GET_REGION_INFO = 5, + VFIO_USER_DEVICE_GET_REGION_IO_FDS = 6, + VFIO_USER_DEVICE_GET_IRQ_INFO = 7, + VFIO_USER_DEVICE_SET_IRQS = 8, + VFIO_USER_REGION_READ = 9, + VFIO_USER_REGION_WRITE = 10, + VFIO_USER_DMA_READ = 11, + VFIO_USER_DMA_WRITE = 12, + VFIO_USER_DEVICE_RESET = 13, + VFIO_USER_DIRTY_PAGES = 14, + VFIO_USER_REGION_WRITE_MULTI = 15, + VFIO_USER_MAX, +}; + +/* VFIOUserHdr flags */ +#define VFIO_USER_REQUEST 0x0 +#define VFIO_USER_REPLY 0x1 +#define VFIO_USER_TYPE 0xF + +#define VFIO_USER_NO_REPLY 0x10 +#define VFIO_USER_ERROR 0x20 + + +/* + * VFIO_USER_VERSION + */ +typedef struct { + VFIOUserHdr hdr; + uint16_t major; + uint16_t minor; + char capabilities[]; +} VFIOUserVersion; + +#define VFIO_USER_MAJOR_VER 0 +#define VFIO_USER_MINOR_VER 0 + +#define VFIO_USER_CAP "capabilities" + +/* "capabilities" members */ +#define VFIO_USER_CAP_MAX_FDS "max_msg_fds" +#define VFIO_USER_CAP_MAX_XFER "max_data_xfer_size" +#define VFIO_USER_CAP_PGSIZES "pgsizes" +#define VFIO_USER_CAP_MAP_MAX "max_dma_maps" +#define VFIO_USER_CAP_MIGR "migration" +#define VFIO_USER_CAP_MULTI "write_multiple" + +/* "migration" members */ +#define VFIO_USER_CAP_PGSIZE "pgsize" +#define VFIO_USER_CAP_MAX_BITMAP "max_bitmap_size" + +/* + * Max FDs mainly comes into play when a device supports multiple interrupts + * where each ones uses an eventfd to inject it into the guest. + * It is clamped by the the number of FDs the qio channel supports in a + * single message. + */ +#define VFIO_USER_DEF_MAX_FDS 8 +#define VFIO_USER_MAX_MAX_FDS 16 + +/* + * Max transfer limits the amount of data in region and DMA messages. + * Region R/W will be very small (limited by how much a single instruction + * can process) so just use a reasonable limit here. + */ +#define VFIO_USER_DEF_MAX_XFER (1024 * 1024) +#define VFIO_USER_MAX_MAX_XFER (64 * 1024 * 1024) + +/* + * Default pagesizes supported is 4k. + */ +#define VFIO_USER_DEF_PGSIZE 4096 + +/* + * Default max number of DMA mappings is stolen from the + * linux kernel "dma_entry_limit" + */ +#define VFIO_USER_DEF_MAP_MAX 65535 + +/* + * Default max bitmap size is also take from the linux kernel, + * where usage of signed ints limits the VA range to 2^31 bytes. + * Dividing that by the number of bits per byte yields 256MB + */ +#define VFIO_USER_DEF_MAX_BITMAP (256 * 1024 * 1024) + +/* + * VFIO_USER_DMA_MAP + * imported from struct vfio_iommu_type1_dma_map + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint64_t offset; /* FD offset */ + uint64_t iova; + uint64_t size; +} VFIOUserDMAMap; + +/* + * VFIO_USER_DMA_UNMAP + * imported from struct vfio_iommu_type1_dma_unmap + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint64_t iova; + uint64_t size; +} VFIOUserDMAUnmap; + +/* + * VFIO_USER_DEVICE_GET_INFO + * imported from struct vfio_device_info + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint32_t num_regions; + uint32_t num_irqs; +} VFIOUserDeviceInfo; + +/* + * VFIO_USER_DEVICE_GET_REGION_INFO + * imported from struct vfio_region_info + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint32_t index; + uint32_t cap_offset; + uint64_t size; + uint64_t offset; +} VFIOUserRegionInfo; + +/* + * VFIO_USER_DEVICE_GET_IRQ_INFO + * imported from struct vfio_irq_info + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint32_t index; + uint32_t count; +} VFIOUserIRQInfo; + +/* + * VFIO_USER_DEVICE_SET_IRQS + * imported from struct vfio_irq_set + */ +typedef struct { + VFIOUserHdr hdr; + uint32_t argsz; + uint32_t flags; + uint32_t index; + uint32_t start; + uint32_t count; +} VFIOUserIRQSet; + +/* + * VFIO_USER_REGION_READ + * VFIO_USER_REGION_WRITE + */ +typedef struct { + VFIOUserHdr hdr; + uint64_t offset; + uint32_t region; + uint32_t count; + char data[]; +} VFIOUserRegionRW; + +/* + * VFIO_USER_DMA_READ + * VFIO_USER_DMA_WRITE + */ +typedef struct { + VFIOUserHdr hdr; + uint64_t offset; + uint32_t count; + char data[]; +} VFIOUserDMARW; + +/* imported from struct vfio_bitmap */ +typedef struct { + uint64_t pgsize; + uint64_t size; + char data[]; +} VFIOUserBitmap; + +/* + * VFIO_USER_REGION_WRITE_MULTI + */ +#define VFIO_USER_MULTI_DATA 8 +#define VFIO_USER_MULTI_MAX 200 + +typedef struct { + uint64_t offset; + uint32_t region; + uint32_t count; + char data[VFIO_USER_MULTI_DATA]; +} VFIOUserWROne; + +typedef struct { + VFIOUserHdr hdr; + uint64_t wr_cnt; + VFIOUserWROne wrs[VFIO_USER_MULTI_MAX]; +} VFIOUserWRMulti; + +#endif /* VFIO_USER_PROTOCOL_H */ diff --git a/hw/vfio-user/proxy.c b/hw/vfio-user/proxy.c new file mode 100644 index 00000000000..2275d3fe395 --- /dev/null +++ b/hw/vfio-user/proxy.c @@ -0,0 +1,1358 @@ +/* + * vfio protocol over a UNIX socket. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include + +#include "hw/vfio/vfio-device.h" +#include "hw/vfio-user/proxy.h" +#include "hw/vfio-user/trace.h" +#include "qapi/error.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qnum.h" +#include "qemu/error-report.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qemu/thread.h" +#include "system/iothread.h" + +static IOThread *vfio_user_iothread; + +static void vfio_user_shutdown(VFIOUserProxy *proxy); +static VFIOUserMsg *vfio_user_getmsg(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds); +static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg); + +static void vfio_user_recv(void *opaque); +static void vfio_user_send(void *opaque); + +static void vfio_user_request(void *opaque); + +static inline void vfio_user_set_error(VFIOUserHdr *hdr, uint32_t err) +{ + hdr->flags |= VFIO_USER_ERROR; + hdr->error_reply = err; +} + +/* + * Functions called by main, CPU, or iothread threads + */ + +static void vfio_user_shutdown(VFIOUserProxy *proxy) +{ + qio_channel_shutdown(proxy->ioc, QIO_CHANNEL_SHUTDOWN_READ, NULL); + qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx, NULL, + proxy->ctx, NULL, NULL); +} + +/* + * Same return values as qio_channel_writev_full(): + * + * QIO_CHANNEL_ERR_BLOCK: *errp not set + * -1: *errp will be populated + * otherwise: bytes written + */ +static ssize_t vfio_user_send_qio(VFIOUserProxy *proxy, VFIOUserMsg *msg, + Error **errp) +{ + VFIOUserFDs *fds = msg->fds; + struct iovec iov = { + .iov_base = msg->hdr, + .iov_len = msg->hdr->size, + }; + size_t numfds = 0; + int *fdp = NULL; + ssize_t ret; + + if (fds != NULL && fds->send_fds != 0) { + numfds = fds->send_fds; + fdp = fds->fds; + } + + ret = qio_channel_writev_full(proxy->ioc, &iov, 1, fdp, numfds, 0, errp); + + if (ret == -1) { + vfio_user_set_error(msg->hdr, EIO); + vfio_user_shutdown(proxy); + } + trace_vfio_user_send_write(msg->hdr->id, ret); + + return ret; +} + +static VFIOUserMsg *vfio_user_getmsg(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds) +{ + VFIOUserMsg *msg; + + msg = QTAILQ_FIRST(&proxy->free); + if (msg != NULL) { + QTAILQ_REMOVE(&proxy->free, msg, next); + } else { + msg = g_malloc0(sizeof(*msg)); + qemu_cond_init(&msg->cv); + } + + msg->hdr = hdr; + msg->fds = fds; + return msg; +} + +/* + * Recycle a message list entry to the free list. + */ +static void vfio_user_recycle(VFIOUserProxy *proxy, VFIOUserMsg *msg) +{ + if (msg->type == VFIO_MSG_NONE) { + error_printf("vfio_user_recycle - freeing free msg\n"); + return; + } + + /* free msg buffer if no one is waiting to consume the reply */ + if (msg->type == VFIO_MSG_NOWAIT || msg->type == VFIO_MSG_ASYNC) { + g_free(msg->hdr); + if (msg->fds != NULL) { + g_free(msg->fds); + } + } + + msg->type = VFIO_MSG_NONE; + msg->hdr = NULL; + msg->fds = NULL; + msg->complete = false; + msg->pending = false; + QTAILQ_INSERT_HEAD(&proxy->free, msg, next); +} + +VFIOUserFDs *vfio_user_getfds(int numfds) +{ + VFIOUserFDs *fds = g_malloc0(sizeof(*fds) + (numfds * sizeof(int))); + + fds->fds = (int *)((char *)fds + sizeof(*fds)); + + return fds; +} + +/* + * Functions only called by iothread + */ + +/* + * Process a received message. + */ +static void vfio_user_process(VFIOUserProxy *proxy, VFIOUserMsg *msg, + bool isreply) +{ + + /* + * Replies signal a waiter, if none just check for errors + * and free the message buffer. + * + * Requests get queued for the BH. + */ + if (isreply) { + msg->complete = true; + if (msg->type == VFIO_MSG_WAIT) { + qemu_cond_signal(&msg->cv); + } else { + if (msg->hdr->flags & VFIO_USER_ERROR) { + error_printf("vfio_user_process: error reply on async "); + error_printf("request command %x error %s\n", + msg->hdr->command, + strerror(msg->hdr->error_reply)); + } + /* youngest nowait msg has been ack'd */ + if (proxy->last_nowait == msg) { + proxy->last_nowait = NULL; + } + vfio_user_recycle(proxy, msg); + } + } else { + QTAILQ_INSERT_TAIL(&proxy->incoming, msg, next); + qemu_bh_schedule(proxy->req_bh); + } +} + +/* + * Complete a partial message read + */ +static int vfio_user_complete(VFIOUserProxy *proxy, Error **errp) +{ + VFIOUserMsg *msg = proxy->part_recv; + size_t msgleft = proxy->recv_left; + bool isreply; + char *data; + int ret; + + data = (char *)msg->hdr + (msg->hdr->size - msgleft); + while (msgleft > 0) { + ret = qio_channel_read(proxy->ioc, data, msgleft, errp); + + /* error or would block */ + if (ret <= 0) { + /* try for rest on next iternation */ + if (ret == QIO_CHANNEL_ERR_BLOCK) { + proxy->recv_left = msgleft; + } + return ret; + } + trace_vfio_user_recv_read(msg->hdr->id, ret); + + msgleft -= ret; + data += ret; + } + + /* + * Read complete message, process it. + */ + proxy->part_recv = NULL; + proxy->recv_left = 0; + isreply = (msg->hdr->flags & VFIO_USER_TYPE) == VFIO_USER_REPLY; + vfio_user_process(proxy, msg, isreply); + + /* return positive value */ + return 1; +} + +/* + * Receive and process one incoming message. + * + * For replies, find matching outgoing request and wake any waiters. + * For requests, queue in incoming list and run request BH. + */ +static int vfio_user_recv_one(VFIOUserProxy *proxy, Error **errp) +{ + VFIOUserMsg *msg = NULL; + g_autofree int *fdp = NULL; + VFIOUserFDs *reqfds; + VFIOUserHdr hdr; + struct iovec iov = { + .iov_base = &hdr, + .iov_len = sizeof(hdr), + }; + bool isreply = false; + int i, ret; + size_t msgleft, numfds = 0; + char *data = NULL; + char *buf = NULL; + + /* + * Complete any partial reads + */ + if (proxy->part_recv != NULL) { + ret = vfio_user_complete(proxy, errp); + + /* still not complete, try later */ + if (ret == QIO_CHANNEL_ERR_BLOCK) { + return ret; + } + + if (ret <= 0) { + goto fatal; + } + /* else fall into reading another msg */ + } + + /* + * Read header + */ + ret = qio_channel_readv_full(proxy->ioc, &iov, 1, &fdp, &numfds, 0, + errp); + if (ret == QIO_CHANNEL_ERR_BLOCK) { + return ret; + } + + /* read error or other side closed connection */ + if (ret <= 0) { + goto fatal; + } + + if (ret < sizeof(hdr)) { + error_setg(errp, "short read of header"); + goto fatal; + } + + /* + * Validate header + */ + if (hdr.size < sizeof(VFIOUserHdr)) { + error_setg(errp, "bad header size"); + goto fatal; + } + switch (hdr.flags & VFIO_USER_TYPE) { + case VFIO_USER_REQUEST: + isreply = false; + break; + case VFIO_USER_REPLY: + isreply = true; + break; + default: + error_setg(errp, "unknown message type"); + goto fatal; + } + trace_vfio_user_recv_hdr(proxy->sockname, hdr.id, hdr.command, hdr.size, + hdr.flags); + + /* + * For replies, find the matching pending request. + * For requests, reap incoming FDs. + */ + if (isreply) { + QTAILQ_FOREACH(msg, &proxy->pending, next) { + if (hdr.id == msg->id) { + break; + } + } + if (msg == NULL) { + error_setg(errp, "unexpected reply"); + goto err; + } + QTAILQ_REMOVE(&proxy->pending, msg, next); + + /* + * Process any received FDs + */ + if (numfds != 0) { + if (msg->fds == NULL || msg->fds->recv_fds < numfds) { + error_setg(errp, "unexpected FDs"); + goto err; + } + msg->fds->recv_fds = numfds; + memcpy(msg->fds->fds, fdp, numfds * sizeof(int)); + } + } else { + if (numfds != 0) { + reqfds = vfio_user_getfds(numfds); + memcpy(reqfds->fds, fdp, numfds * sizeof(int)); + } else { + reqfds = NULL; + } + } + + /* + * Put the whole message into a single buffer. + */ + if (isreply) { + if (hdr.size > msg->rsize) { + error_setg(errp, "reply larger than recv buffer"); + goto err; + } + *msg->hdr = hdr; + data = (char *)msg->hdr + sizeof(hdr); + } else { + if (hdr.size > proxy->max_xfer_size + sizeof(VFIOUserDMARW)) { + error_setg(errp, "vfio_user_recv request larger than max"); + goto err; + } + buf = g_malloc0(hdr.size); + memcpy(buf, &hdr, sizeof(hdr)); + data = buf + sizeof(hdr); + msg = vfio_user_getmsg(proxy, (VFIOUserHdr *)buf, reqfds); + msg->type = VFIO_MSG_REQ; + } + + /* + * Read rest of message. + */ + msgleft = hdr.size - sizeof(hdr); + while (msgleft > 0) { + ret = qio_channel_read(proxy->ioc, data, msgleft, errp); + + /* prepare to complete read on next iternation */ + if (ret == QIO_CHANNEL_ERR_BLOCK) { + proxy->part_recv = msg; + proxy->recv_left = msgleft; + return ret; + } + + if (ret <= 0) { + goto fatal; + } + trace_vfio_user_recv_read(hdr.id, ret); + + msgleft -= ret; + data += ret; + } + + vfio_user_process(proxy, msg, isreply); + return 0; + + /* + * fatal means the other side closed or we don't trust the stream + * err means this message is corrupt + */ +fatal: + vfio_user_shutdown(proxy); + proxy->state = VFIO_PROXY_ERROR; + + /* set error if server side closed */ + if (ret == 0) { + error_setg(errp, "server closed socket"); + } + +err: + for (i = 0; i < numfds; i++) { + close(fdp[i]); + } + if (isreply && msg != NULL) { + /* force an error to keep sending thread from hanging */ + vfio_user_set_error(msg->hdr, EINVAL); + msg->complete = true; + qemu_cond_signal(&msg->cv); + } + return -1; +} + +static void vfio_user_recv(void *opaque) +{ + VFIOUserProxy *proxy = opaque; + + QEMU_LOCK_GUARD(&proxy->lock); + + if (proxy->state == VFIO_PROXY_CONNECTED) { + Error *local_err = NULL; + + while (vfio_user_recv_one(proxy, &local_err) == 0) { + ; + } + + if (local_err != NULL) { + error_report_err(local_err); + } + } +} + +/* + * Send a single message, same return semantics as vfio_user_send_qio(). + * + * Sent async messages are freed, others are moved to pending queue. + */ +static ssize_t vfio_user_send_one(VFIOUserProxy *proxy, Error **errp) +{ + VFIOUserMsg *msg; + ssize_t ret; + + msg = QTAILQ_FIRST(&proxy->outgoing); + ret = vfio_user_send_qio(proxy, msg, errp); + if (ret < 0) { + return ret; + } + + QTAILQ_REMOVE(&proxy->outgoing, msg, next); + proxy->num_outgoing--; + if (msg->type == VFIO_MSG_ASYNC) { + vfio_user_recycle(proxy, msg); + } else { + QTAILQ_INSERT_TAIL(&proxy->pending, msg, next); + msg->pending = true; + } + + return ret; +} + +/* + * Send messages from outgoing queue when the socket buffer has space. + * If we deplete 'outgoing', remove ourselves from the poll list. + */ +static void vfio_user_send(void *opaque) +{ + VFIOUserProxy *proxy = opaque; + + QEMU_LOCK_GUARD(&proxy->lock); + + if (proxy->state == VFIO_PROXY_CONNECTED) { + while (!QTAILQ_EMPTY(&proxy->outgoing)) { + Error *local_err = NULL; + int ret; + + ret = vfio_user_send_one(proxy, &local_err); + + if (ret == QIO_CHANNEL_ERR_BLOCK) { + return; + } else if (ret == -1) { + error_report_err(local_err); + return; + } + } + qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx, + vfio_user_recv, NULL, NULL, proxy); + + /* queue empty - send any pending multi write msgs */ + if (proxy->wr_multi != NULL) { + vfio_user_flush_multi(proxy); + } + } +} + +static void vfio_user_close_cb(void *opaque) +{ + VFIOUserProxy *proxy = opaque; + + QEMU_LOCK_GUARD(&proxy->lock); + + proxy->state = VFIO_PROXY_CLOSED; + qemu_cond_signal(&proxy->close_cv); +} + + +/* + * Functions called by main or CPU threads + */ + +/* + * Process incoming requests. + * + * The bus-specific callback has the form: + * request(opaque, msg) + * where 'opaque' was specified in vfio_user_set_handler + * and 'msg' is the inbound message. + * + * The callback is responsible for disposing of the message buffer, + * usually by re-using it when calling vfio_send_reply or vfio_send_error, + * both of which free their message buffer when the reply is sent. + * + * If the callback uses a new buffer, it needs to free the old one. + */ +static void vfio_user_request(void *opaque) +{ + VFIOUserProxy *proxy = opaque; + VFIOUserMsgQ new, free; + VFIOUserMsg *msg, *m1; + + /* reap all incoming */ + QTAILQ_INIT(&new); + WITH_QEMU_LOCK_GUARD(&proxy->lock) { + QTAILQ_FOREACH_SAFE(msg, &proxy->incoming, next, m1) { + QTAILQ_REMOVE(&proxy->incoming, msg, next); + QTAILQ_INSERT_TAIL(&new, msg, next); + } + } + + /* process list */ + QTAILQ_INIT(&free); + QTAILQ_FOREACH_SAFE(msg, &new, next, m1) { + QTAILQ_REMOVE(&new, msg, next); + trace_vfio_user_recv_request(msg->hdr->command); + proxy->request(proxy->req_arg, msg); + QTAILQ_INSERT_HEAD(&free, msg, next); + } + + /* free list */ + WITH_QEMU_LOCK_GUARD(&proxy->lock) { + QTAILQ_FOREACH_SAFE(msg, &free, next, m1) { + vfio_user_recycle(proxy, msg); + } + } +} + +/* + * Messages are queued onto the proxy's outgoing list. + * + * It handles 3 types of messages: + * + * async messages - replies and posted writes + * + * There will be no reply from the server, so message + * buffers are freed after they're sent. + * + * nowait messages - map/unmap during address space transactions + * + * These are also sent async, but a reply is expected so that + * vfio_wait_reqs() can wait for the youngest nowait request. + * They transition from the outgoing list to the pending list + * when sent, and are freed when the reply is received. + * + * wait messages - all other requests + * + * The reply to these messages is waited for by their caller. + * They also transition from outgoing to pending when sent, but + * the message buffer is returned to the caller with the reply + * contents. The caller is responsible for freeing these messages. + * + * As an optimization, if the outgoing list and the socket send + * buffer are empty, the message is sent inline instead of being + * added to the outgoing list. The rest of the transitions are + * unchanged. + */ +static bool vfio_user_send_queued(VFIOUserProxy *proxy, VFIOUserMsg *msg, + Error **errp) +{ + int ret; + + /* older coalesced writes go first */ + if (proxy->wr_multi != NULL && + ((msg->hdr->flags & VFIO_USER_TYPE) == VFIO_USER_REQUEST)) { + vfio_user_flush_multi(proxy); + } + + /* + * Unsent outgoing msgs - add to tail + */ + if (!QTAILQ_EMPTY(&proxy->outgoing)) { + QTAILQ_INSERT_TAIL(&proxy->outgoing, msg, next); + proxy->num_outgoing++; + return true; + } + + /* + * Try inline - if blocked, queue it and kick send poller + */ + if (proxy->flags & VFIO_PROXY_FORCE_QUEUED) { + ret = QIO_CHANNEL_ERR_BLOCK; + } else { + ret = vfio_user_send_qio(proxy, msg, errp); + } + + if (ret == QIO_CHANNEL_ERR_BLOCK) { + QTAILQ_INSERT_HEAD(&proxy->outgoing, msg, next); + proxy->num_outgoing = 1; + qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx, + vfio_user_recv, proxy->ctx, + vfio_user_send, proxy); + return true; + } + if (ret == -1) { + return false; + } + + /* + * Sent - free async, add others to pending + */ + if (msg->type == VFIO_MSG_ASYNC) { + vfio_user_recycle(proxy, msg); + } else { + QTAILQ_INSERT_TAIL(&proxy->pending, msg, next); + msg->pending = true; + } + + return true; +} + +/* + * nowait send - vfio_wait_reqs() can wait for it later + * + * Returns false if we did not successfully receive a reply message, in which + * case @errp will be populated. + * + * In either case, ownership of @hdr and @fds is taken, and the caller must + * *not* free them itself. + */ +bool vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, int rsize, Error **errp) +{ + VFIOUserMsg *msg; + + QEMU_LOCK_GUARD(&proxy->lock); + + msg = vfio_user_getmsg(proxy, hdr, fds); + msg->id = hdr->id; + msg->rsize = rsize ? rsize : hdr->size; + msg->type = VFIO_MSG_NOWAIT; + + if (hdr->flags & VFIO_USER_NO_REPLY) { + error_setg_errno(errp, EINVAL, "%s on NO_REPLY message", __func__); + vfio_user_recycle(proxy, msg); + return false; + } + + if (!vfio_user_send_queued(proxy, msg, errp)) { + vfio_user_recycle(proxy, msg); + return false; + } + + proxy->last_nowait = msg; + + return true; +} + +/* + * Returns false if we did not successfully receive a reply message, in which + * case @errp will be populated. + * + * In either case, the caller must free @hdr and @fds if needed. + */ +bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, int rsize, Error **errp) +{ + VFIOUserMsg *msg; + bool ok = false; + + if (hdr->flags & VFIO_USER_NO_REPLY) { + error_setg_errno(errp, EINVAL, "%s on NO_REPLY message", __func__); + return false; + } + + qemu_mutex_lock(&proxy->lock); + + msg = vfio_user_getmsg(proxy, hdr, fds); + msg->id = hdr->id; + msg->rsize = rsize ? rsize : hdr->size; + msg->type = VFIO_MSG_WAIT; + + ok = vfio_user_send_queued(proxy, msg, errp); + + if (ok) { + while (!msg->complete) { + if (!qemu_cond_timedwait(&msg->cv, &proxy->lock, + proxy->wait_time)) { + VFIOUserMsgQ *list; + + list = msg->pending ? &proxy->pending : &proxy->outgoing; + QTAILQ_REMOVE(list, msg, next); + error_setg_errno(errp, ETIMEDOUT, + "timed out waiting for reply"); + ok = false; + break; + } + } + } + + vfio_user_recycle(proxy, msg); + + qemu_mutex_unlock(&proxy->lock); + + return ok; +} + +/* + * async send - msg can be queued, but will be freed when sent + * + * Returns false on failure, in which case @errp will be populated. + * + * In either case, ownership of @hdr and @fds is taken, and the caller must + * *not* free them itself. + */ +bool vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, Error **errp) +{ + VFIOUserMsg *msg; + + QEMU_LOCK_GUARD(&proxy->lock); + + msg = vfio_user_getmsg(proxy, hdr, fds); + msg->id = hdr->id; + msg->rsize = 0; + msg->type = VFIO_MSG_ASYNC; + + if (!(hdr->flags & (VFIO_USER_NO_REPLY | VFIO_USER_REPLY))) { + error_setg_errno(errp, EINVAL, "%s on sync message", __func__); + vfio_user_recycle(proxy, msg); + return false; + } + + if (!vfio_user_send_queued(proxy, msg, errp)) { + vfio_user_recycle(proxy, msg); + return false; + } + + return true; +} + +void vfio_user_wait_reqs(VFIOUserProxy *proxy) +{ + VFIOUserMsg *msg; + + /* + * Any DMA map/unmap requests sent in the middle + * of a memory region transaction were sent nowait. + * Wait for them here. + */ + qemu_mutex_lock(&proxy->lock); + if (proxy->last_nowait != NULL) { + /* + * Change type to WAIT to wait for reply + */ + msg = proxy->last_nowait; + msg->type = VFIO_MSG_WAIT; + proxy->last_nowait = NULL; + while (!msg->complete) { + if (!qemu_cond_timedwait(&msg->cv, &proxy->lock, + proxy->wait_time)) { + VFIOUserMsgQ *list; + + list = msg->pending ? &proxy->pending : &proxy->outgoing; + QTAILQ_REMOVE(list, msg, next); + error_printf("vfio_wait_reqs - timed out\n"); + break; + } + } + + if (msg->hdr->flags & VFIO_USER_ERROR) { + error_printf("vfio_user_wait_reqs - error reply on async "); + error_printf("request: command %x error %s\n", msg->hdr->command, + strerror(msg->hdr->error_reply)); + } + + /* + * Change type back to NOWAIT to free + */ + msg->type = VFIO_MSG_NOWAIT; + vfio_user_recycle(proxy, msg); + } + + qemu_mutex_unlock(&proxy->lock); +} + +/* + * Reply to an incoming request. + */ +void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size) +{ + Error *local_err = NULL; + + if (size < sizeof(VFIOUserHdr)) { + error_printf("%s: size too small", __func__); + g_free(hdr); + return; + } + + /* + * convert header to associated reply + */ + hdr->flags = VFIO_USER_REPLY; + hdr->size = size; + + if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) { + error_report_err(local_err); + } +} + +/* + * Send an error reply to an incoming request. + */ +void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error) +{ + Error *local_err = NULL; + + /* + * convert header to associated reply + */ + hdr->flags = VFIO_USER_REPLY; + hdr->flags |= VFIO_USER_ERROR; + hdr->error_reply = error; + hdr->size = sizeof(*hdr); + + if (!vfio_user_send_async(proxy, hdr, NULL, &local_err)) { + error_report_err(local_err); + } +} + +/* + * Close FDs erroneously received in an incoming request. + */ +void vfio_user_putfds(VFIOUserMsg *msg) +{ + VFIOUserFDs *fds = msg->fds; + int i; + + for (i = 0; i < fds->recv_fds; i++) { + close(fds->fds[i]); + } + g_free(fds); + msg->fds = NULL; +} + +void +vfio_user_disable_posted_writes(VFIOUserProxy *proxy) +{ + WITH_QEMU_LOCK_GUARD(&proxy->lock) { + proxy->flags |= VFIO_PROXY_NO_POST; + } +} + +static QLIST_HEAD(, VFIOUserProxy) vfio_user_sockets = + QLIST_HEAD_INITIALIZER(vfio_user_sockets); + +VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp) +{ + VFIOUserProxy *proxy; + QIOChannelSocket *sioc; + QIOChannel *ioc; + char *sockname; + + if (addr->type != SOCKET_ADDRESS_TYPE_UNIX) { + error_setg(errp, "vfio_user_connect - bad address family"); + return NULL; + } + sockname = addr->u.q_unix.path; + + sioc = qio_channel_socket_new(); + ioc = QIO_CHANNEL(sioc); + if (qio_channel_socket_connect_sync(sioc, addr, errp)) { + object_unref(OBJECT(ioc)); + return NULL; + } + qio_channel_set_blocking(ioc, false, NULL); + + proxy = g_malloc0(sizeof(VFIOUserProxy)); + proxy->sockname = g_strdup_printf("unix:%s", sockname); + proxy->ioc = ioc; + + /* init defaults */ + proxy->max_xfer_size = VFIO_USER_DEF_MAX_XFER; + proxy->max_send_fds = VFIO_USER_DEF_MAX_FDS; + proxy->max_dma = VFIO_USER_DEF_MAP_MAX; + proxy->dma_pgsizes = VFIO_USER_DEF_PGSIZE; + proxy->max_bitmap = VFIO_USER_DEF_MAX_BITMAP; + proxy->migr_pgsize = VFIO_USER_DEF_PGSIZE; + + proxy->flags = VFIO_PROXY_CLIENT; + proxy->state = VFIO_PROXY_CONNECTED; + + qemu_mutex_init(&proxy->lock); + qemu_cond_init(&proxy->close_cv); + + if (vfio_user_iothread == NULL) { + vfio_user_iothread = iothread_create("VFIO user", errp); + } + + proxy->ctx = iothread_get_aio_context(vfio_user_iothread); + proxy->req_bh = qemu_bh_new(vfio_user_request, proxy); + + QTAILQ_INIT(&proxy->outgoing); + QTAILQ_INIT(&proxy->incoming); + QTAILQ_INIT(&proxy->free); + QTAILQ_INIT(&proxy->pending); + QLIST_INSERT_HEAD(&vfio_user_sockets, proxy, next); + + return proxy; +} + +void vfio_user_set_handler(VFIODevice *vbasedev, + void (*handler)(void *opaque, VFIOUserMsg *msg), + void *req_arg) +{ + VFIOUserProxy *proxy = vbasedev->proxy; + + proxy->request = handler; + proxy->req_arg = req_arg; + qio_channel_set_aio_fd_handler(proxy->ioc, proxy->ctx, + vfio_user_recv, NULL, NULL, proxy); +} + +void vfio_user_disconnect(VFIOUserProxy *proxy) +{ + VFIOUserMsg *r1, *r2; + + qemu_mutex_lock(&proxy->lock); + + /* our side is quitting */ + if (proxy->state == VFIO_PROXY_CONNECTED) { + vfio_user_shutdown(proxy); + if (!QTAILQ_EMPTY(&proxy->pending)) { + error_printf("vfio_user_disconnect: outstanding requests\n"); + } + } + object_unref(OBJECT(proxy->ioc)); + proxy->ioc = NULL; + qemu_bh_delete(proxy->req_bh); + proxy->req_bh = NULL; + + proxy->state = VFIO_PROXY_CLOSING; + QTAILQ_FOREACH_SAFE(r1, &proxy->outgoing, next, r2) { + qemu_cond_destroy(&r1->cv); + QTAILQ_REMOVE(&proxy->outgoing, r1, next); + g_free(r1); + } + QTAILQ_FOREACH_SAFE(r1, &proxy->incoming, next, r2) { + qemu_cond_destroy(&r1->cv); + QTAILQ_REMOVE(&proxy->incoming, r1, next); + g_free(r1); + } + QTAILQ_FOREACH_SAFE(r1, &proxy->pending, next, r2) { + qemu_cond_destroy(&r1->cv); + QTAILQ_REMOVE(&proxy->pending, r1, next); + g_free(r1); + } + QTAILQ_FOREACH_SAFE(r1, &proxy->free, next, r2) { + qemu_cond_destroy(&r1->cv); + QTAILQ_REMOVE(&proxy->free, r1, next); + g_free(r1); + } + + /* + * Make sure the iothread isn't blocking anywhere + * with a ref to this proxy by waiting for a BH + * handler to run after the proxy fd handlers were + * deleted above. + */ + aio_bh_schedule_oneshot(proxy->ctx, vfio_user_close_cb, proxy); + + while (proxy->state != VFIO_PROXY_CLOSED) { + qemu_cond_wait(&proxy->close_cv, &proxy->lock); + } + + /* we now hold the only ref to proxy */ + qemu_mutex_unlock(&proxy->lock); + qemu_cond_destroy(&proxy->close_cv); + qemu_mutex_destroy(&proxy->lock); + + QLIST_REMOVE(proxy, next); + if (QLIST_EMPTY(&vfio_user_sockets)) { + iothread_destroy(vfio_user_iothread); + vfio_user_iothread = NULL; + } + + g_free(proxy->sockname); + g_free(proxy); +} + +void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd, + uint32_t size, uint32_t flags) +{ + static uint16_t next_id; + + hdr->id = qatomic_fetch_inc(&next_id); + hdr->command = cmd; + hdr->size = size; + hdr->flags = (flags & ~VFIO_USER_TYPE) | VFIO_USER_REQUEST; + hdr->error_reply = 0; +} + +struct cap_entry { + const char *name; + bool (*check)(VFIOUserProxy *proxy, QObject *qobj, Error **errp); +}; + +static bool caps_parse(VFIOUserProxy *proxy, QDict *qdict, + struct cap_entry caps[], Error **errp) +{ + QObject *qobj; + struct cap_entry *p; + + for (p = caps; p->name != NULL; p++) { + qobj = qdict_get(qdict, p->name); + if (qobj != NULL) { + if (!p->check(proxy, qobj, errp)) { + return false; + } + qdict_del(qdict, p->name); + } + } + + /* warning, for now */ + if (qdict_size(qdict) != 0) { + warn_report("spurious capabilities"); + } + return true; +} + +static bool check_migr_pgsize(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t pgsize; + + if (qn == NULL || !qnum_get_try_uint(qn, &pgsize)) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_PGSIZE); + return false; + } + + /* must be larger than default */ + if (pgsize & (VFIO_USER_DEF_PGSIZE - 1)) { + error_setg(errp, "pgsize 0x%"PRIx64" too small", pgsize); + return false; + } + + proxy->migr_pgsize = pgsize; + return true; +} + +static bool check_bitmap(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t bitmap_size; + + if (qn == NULL || !qnum_get_try_uint(qn, &bitmap_size)) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_BITMAP); + return false; + } + + /* can only lower it */ + if (bitmap_size > VFIO_USER_DEF_MAX_BITMAP) { + error_setg(errp, "%s too large", VFIO_USER_CAP_MAX_BITMAP); + return false; + } + + proxy->max_bitmap = bitmap_size; + return true; +} + +static struct cap_entry caps_migr[] = { + { VFIO_USER_CAP_PGSIZE, check_migr_pgsize }, + { VFIO_USER_CAP_MAX_BITMAP, check_bitmap }, + { NULL } +}; + +static bool check_max_fds(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t max_send_fds; + + if (qn == NULL || !qnum_get_try_uint(qn, &max_send_fds) || + max_send_fds > VFIO_USER_MAX_MAX_FDS) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_FDS); + return false; + } + proxy->max_send_fds = max_send_fds; + return true; +} + +static bool check_max_xfer(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t max_xfer_size; + + if (qn == NULL || !qnum_get_try_uint(qn, &max_xfer_size) || + max_xfer_size > VFIO_USER_MAX_MAX_XFER) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_XFER); + return false; + } + proxy->max_xfer_size = max_xfer_size; + return true; +} + +static bool check_pgsizes(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t pgsizes; + + if (qn == NULL || !qnum_get_try_uint(qn, &pgsizes)) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_PGSIZES); + return false; + } + + /* must be larger than default */ + if (pgsizes & (VFIO_USER_DEF_PGSIZE - 1)) { + error_setg(errp, "pgsize 0x%"PRIx64" too small", pgsizes); + return false; + } + + proxy->dma_pgsizes = pgsizes; + return true; +} + +static bool check_max_dma(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QNum *qn = qobject_to(QNum, qobj); + uint64_t max_dma; + + if (qn == NULL || !qnum_get_try_uint(qn, &max_dma)) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MAP_MAX); + return false; + } + + /* can only lower it */ + if (max_dma > VFIO_USER_DEF_MAP_MAX) { + error_setg(errp, "%s too large", VFIO_USER_CAP_MAP_MAX); + return false; + } + + proxy->max_dma = max_dma; + return true; +} + +static bool check_migr(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QDict *qdict = qobject_to(QDict, qobj); + + if (qdict == NULL) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MAX_FDS); + return true; + } + return caps_parse(proxy, qdict, caps_migr, errp); +} + +static bool check_multi(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QBool *qb = qobject_to(QBool, qobj); + + if (qb == NULL) { + error_setg(errp, "malformed %s", VFIO_USER_CAP_MULTI); + return false; + } + if (qbool_get_bool(qb)) { + proxy->flags |= VFIO_PROXY_USE_MULTI; + } + return true; +} + +static struct cap_entry caps_cap[] = { + { VFIO_USER_CAP_MAX_FDS, check_max_fds }, + { VFIO_USER_CAP_MAX_XFER, check_max_xfer }, + { VFIO_USER_CAP_PGSIZES, check_pgsizes }, + { VFIO_USER_CAP_MAP_MAX, check_max_dma }, + { VFIO_USER_CAP_MIGR, check_migr }, + { VFIO_USER_CAP_MULTI, check_multi }, + { NULL } +}; + +static bool check_cap(VFIOUserProxy *proxy, QObject *qobj, Error **errp) +{ + QDict *qdict = qobject_to(QDict, qobj); + + if (qdict == NULL) { + error_setg(errp, "malformed %s", VFIO_USER_CAP); + return false; + } + return caps_parse(proxy, qdict, caps_cap, errp); +} + +static struct cap_entry ver_0_0[] = { + { VFIO_USER_CAP, check_cap }, + { NULL } +}; + +static bool caps_check(VFIOUserProxy *proxy, int minor, const char *caps, + Error **errp) +{ + QObject *qobj; + QDict *qdict; + bool ret; + + qobj = qobject_from_json(caps, NULL); + if (qobj == NULL) { + error_setg(errp, "malformed capabilities %s", caps); + return false; + } + qdict = qobject_to(QDict, qobj); + if (qdict == NULL) { + error_setg(errp, "capabilities %s not an object", caps); + qobject_unref(qobj); + return false; + } + ret = caps_parse(proxy, qdict, ver_0_0, errp); + + qobject_unref(qobj); + return ret; +} + +static GString *caps_json(void) +{ + QDict *dict = qdict_new(); + QDict *capdict = qdict_new(); + QDict *migdict = qdict_new(); + GString *str; + + qdict_put_int(migdict, VFIO_USER_CAP_PGSIZE, VFIO_USER_DEF_PGSIZE); + qdict_put_int(migdict, VFIO_USER_CAP_MAX_BITMAP, VFIO_USER_DEF_MAX_BITMAP); + qdict_put_obj(capdict, VFIO_USER_CAP_MIGR, QOBJECT(migdict)); + + qdict_put_int(capdict, VFIO_USER_CAP_MAX_FDS, VFIO_USER_MAX_MAX_FDS); + qdict_put_int(capdict, VFIO_USER_CAP_MAX_XFER, VFIO_USER_DEF_MAX_XFER); + qdict_put_int(capdict, VFIO_USER_CAP_PGSIZES, VFIO_USER_DEF_PGSIZE); + qdict_put_int(capdict, VFIO_USER_CAP_MAP_MAX, VFIO_USER_DEF_MAP_MAX); + qdict_put_bool(capdict, VFIO_USER_CAP_MULTI, true); + + qdict_put_obj(dict, VFIO_USER_CAP, QOBJECT(capdict)); + + str = qobject_to_json(QOBJECT(dict)); + qobject_unref(dict); + return str; +} + +bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp) +{ + g_autofree VFIOUserVersion *msgp = NULL; + GString *caps; + char *reply; + int size, caplen; + + caps = caps_json(); + caplen = caps->len + 1; + size = sizeof(*msgp) + caplen; + msgp = g_malloc0(size); + + vfio_user_request_msg(&msgp->hdr, VFIO_USER_VERSION, size, 0); + msgp->major = VFIO_USER_MAJOR_VER; + msgp->minor = VFIO_USER_MINOR_VER; + memcpy(&msgp->capabilities, caps->str, caplen); + g_string_free(caps, true); + trace_vfio_user_version(msgp->major, msgp->minor, msgp->capabilities); + + if (!vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, errp)) { + return false; + } + + if (msgp->hdr.flags & VFIO_USER_ERROR) { + error_setg_errno(errp, msgp->hdr.error_reply, "version reply"); + return false; + } + + if (msgp->major != VFIO_USER_MAJOR_VER || + msgp->minor > VFIO_USER_MINOR_VER) { + error_setg(errp, "incompatible server version"); + return false; + } + + reply = msgp->capabilities; + if (reply[msgp->hdr.size - sizeof(*msgp) - 1] != '\0') { + error_setg(errp, "corrupt version reply"); + return false; + } + + if (!caps_check(proxy, msgp->minor, reply, errp)) { + return false; + } + + trace_vfio_user_version(msgp->major, msgp->minor, msgp->capabilities); + return true; +} + +void vfio_user_flush_multi(VFIOUserProxy *proxy) +{ + VFIOUserMsg *msg; + VFIOUserWRMulti *wm = proxy->wr_multi; + Error *local_err = NULL; + + proxy->wr_multi = NULL; + + /* adjust size for actual # of writes */ + wm->hdr.size -= (VFIO_USER_MULTI_MAX - wm->wr_cnt) * sizeof(VFIOUserWROne); + + msg = vfio_user_getmsg(proxy, &wm->hdr, NULL); + msg->id = wm->hdr.id; + msg->rsize = 0; + msg->type = VFIO_MSG_ASYNC; + trace_vfio_user_wrmulti("flush", wm->wr_cnt); + + if (!vfio_user_send_queued(proxy, msg, &local_err)) { + error_report_err(local_err); + vfio_user_recycle(proxy, msg); + } +} + +void vfio_user_create_multi(VFIOUserProxy *proxy) +{ + VFIOUserWRMulti *wm; + + wm = g_malloc0(sizeof(*wm)); + vfio_user_request_msg(&wm->hdr, VFIO_USER_REGION_WRITE_MULTI, + sizeof(*wm), VFIO_USER_NO_REPLY); + proxy->wr_multi = wm; +} + +void vfio_user_add_multi(VFIOUserProxy *proxy, uint8_t index, + off_t offset, uint32_t count, void *data) +{ + VFIOUserWRMulti *wm = proxy->wr_multi; + VFIOUserWROne *w1 = &wm->wrs[wm->wr_cnt]; + + w1->offset = offset; + w1->region = index; + w1->count = count; + memcpy(&w1->data, data, count); + + wm->wr_cnt++; + trace_vfio_user_wrmulti("add", wm->wr_cnt); + if (wm->wr_cnt == VFIO_USER_MULTI_MAX || + proxy->num_outgoing < VFIO_USER_OUT_LOW) { + vfio_user_flush_multi(proxy); + } +} diff --git a/hw/vfio-user/proxy.h b/hw/vfio-user/proxy.h new file mode 100644 index 00000000000..61e64a0020f --- /dev/null +++ b/hw/vfio-user/proxy.h @@ -0,0 +1,135 @@ +#ifndef VFIO_USER_PROXY_H +#define VFIO_USER_PROXY_H + +/* + * vfio protocol over a UNIX socket. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "io/channel.h" +#include "io/channel-socket.h" + +#include "qemu/queue.h" +#include "qemu/sockets.h" +#include "qemu/thread.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio-user/protocol.h" + +typedef struct { + int send_fds; + int recv_fds; + int *fds; +} VFIOUserFDs; + +enum msg_type { + VFIO_MSG_NONE, + VFIO_MSG_ASYNC, + VFIO_MSG_WAIT, + VFIO_MSG_NOWAIT, + VFIO_MSG_REQ, +}; + +typedef struct VFIOUserMsg { + QTAILQ_ENTRY(VFIOUserMsg) next; + VFIOUserHdr *hdr; + VFIOUserFDs *fds; + uint32_t rsize; + uint32_t id; + QemuCond cv; + bool complete; + bool pending; + enum msg_type type; +} VFIOUserMsg; + + +enum proxy_state { + VFIO_PROXY_CONNECTED = 1, + VFIO_PROXY_ERROR = 2, + VFIO_PROXY_CLOSING = 3, + VFIO_PROXY_CLOSED = 4, +}; + +typedef QTAILQ_HEAD(VFIOUserMsgQ, VFIOUserMsg) VFIOUserMsgQ; + +typedef struct VFIOUserProxy { + QLIST_ENTRY(VFIOUserProxy) next; + char *sockname; + struct QIOChannel *ioc; + void (*request)(void *opaque, VFIOUserMsg *msg); + void *req_arg; + uint64_t max_xfer_size; + uint64_t max_send_fds; + uint64_t max_dma; + uint64_t dma_pgsizes; + uint64_t max_bitmap; + uint64_t migr_pgsize; + int flags; + uint32_t wait_time; + QemuCond close_cv; + AioContext *ctx; + QEMUBH *req_bh; + bool async_ops; + + /* + * above only changed when BQL is held + * below are protected by per-proxy lock + */ + QemuMutex lock; + VFIOUserMsgQ free; + VFIOUserMsgQ pending; + VFIOUserMsgQ incoming; + VFIOUserMsgQ outgoing; + VFIOUserMsg *last_nowait; + VFIOUserMsg *part_recv; + size_t recv_left; + VFIOUserWRMulti *wr_multi; + int num_outgoing; + enum proxy_state state; +} VFIOUserProxy; + +/* VFIOProxy flags */ +#define VFIO_PROXY_CLIENT 0x1 +#define VFIO_PROXY_FORCE_QUEUED 0x4 +#define VFIO_PROXY_NO_POST 0x8 +#define VFIO_PROXY_USE_MULTI 0x16 + +/* coalescing high and low water marks for VFIOProxy num_outgoing */ +#define VFIO_USER_OUT_HIGH 1024 +#define VFIO_USER_OUT_LOW 128 + +typedef struct VFIODevice VFIODevice; + +VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp); +void vfio_user_disconnect(VFIOUserProxy *proxy); +void vfio_user_set_handler(VFIODevice *vbasedev, + void (*handler)(void *opaque, VFIOUserMsg *msg), + void *reqarg); +bool vfio_user_validate_version(VFIOUserProxy *proxy, Error **errp); + +VFIOUserFDs *vfio_user_getfds(int numfds); +void vfio_user_putfds(VFIOUserMsg *msg); + +void vfio_user_disable_posted_writes(VFIOUserProxy *proxy); + +void vfio_user_request_msg(VFIOUserHdr *hdr, uint16_t cmd, + uint32_t size, uint32_t flags); +void vfio_user_wait_reqs(VFIOUserProxy *proxy); +bool vfio_user_send_wait(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, int rsize, Error **errp); +bool vfio_user_send_nowait(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, int rsize, Error **errp); +bool vfio_user_send_async(VFIOUserProxy *proxy, VFIOUserHdr *hdr, + VFIOUserFDs *fds, Error **errp); + +void vfio_user_send_reply(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int size); +void vfio_user_send_error(VFIOUserProxy *proxy, VFIOUserHdr *hdr, int error); + +void vfio_user_flush_multi(VFIOUserProxy *proxy); +void vfio_user_create_multi(VFIOUserProxy *proxy); +void vfio_user_add_multi(VFIOUserProxy *proxy, uint8_t index, + off_t offset, uint32_t count, void *data); + +#endif /* VFIO_USER_PROXY_H */ diff --git a/hw/vfio-user/trace-events b/hw/vfio-user/trace-events new file mode 100644 index 00000000000..abb67f4c119 --- /dev/null +++ b/hw/vfio-user/trace-events @@ -0,0 +1,20 @@ +# See docs/devel/tracing.rst for syntax documentation. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# common.c +vfio_user_recv_hdr(const char *name, uint16_t id, uint16_t cmd, uint32_t size, uint32_t flags) " (%s) id 0x%x cmd 0x%x size 0x%x flags 0x%x" +vfio_user_recv_read(uint16_t id, int read) " id 0x%x read 0x%x" +vfio_user_recv_request(uint16_t cmd) " command 0x%x" +vfio_user_send_write(uint16_t id, int wrote) " id 0x%x wrote 0x%x" +vfio_user_version(uint16_t major, uint16_t minor, const char *caps) " major %d minor %d caps: %s" +vfio_user_get_info(uint32_t nregions, uint32_t nirqs) " #regions %d #irqs %d" +vfio_user_get_region_info(uint32_t index, uint32_t flags, uint64_t size) " index %d flags 0x%x size 0x%"PRIx64 +vfio_user_region_rw(uint32_t region, uint64_t off, uint32_t count) " region %d offset 0x%"PRIx64" count %d" +vfio_user_get_irq_info(uint32_t index, uint32_t flags, uint32_t count) " index %d flags 0x%x count %d" +vfio_user_set_irqs(uint32_t index, uint32_t start, uint32_t count, uint32_t flags) " index %d start %d count %d flags 0x%x" +vfio_user_wrmulti(const char *s, uint64_t wr_cnt) " %s count 0x%"PRIx64 + +# container.c +vfio_user_dma_map(uint64_t iova, uint64_t size, uint64_t off, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" off 0x%"PRIx64" flags 0x%x async_ops %d" +vfio_user_dma_unmap(uint64_t iova, uint64_t size, uint32_t flags, bool async_ops) " iova 0x%"PRIx64" size 0x%"PRIx64" flags 0x%x async_ops %d" diff --git a/hw/vfio-user/trace.h b/hw/vfio-user/trace.h new file mode 100644 index 00000000000..9cf02d9506d --- /dev/null +++ b/hw/vfio-user/trace.h @@ -0,0 +1,4 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "trace/trace-hw_vfio_user.h" diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig index 7cdba0560aa..91d9023b79b 100644 --- a/hw/vfio/Kconfig +++ b/hw/vfio/Kconfig @@ -1,3 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + config VFIO bool depends on LINUX diff --git a/hw/vfio/amd-xgbe.c b/hw/vfio/amd-xgbe.c index 96bd608b8dd..58f590e385b 100644 --- a/hw/vfio/amd-xgbe.c +++ b/hw/vfio/amd-xgbe.c @@ -15,12 +15,14 @@ #include "hw/vfio/vfio-amd-xgbe.h" #include "migration/vmstate.h" #include "qemu/module.h" +#include "qemu/error-report.h" static void amd_xgbe_realize(DeviceState *dev, Error **errp) { VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); VFIOAmdXgbeDeviceClass *k = VFIO_AMD_XGBE_DEVICE_GET_CLASS(dev); + warn_report("-device vfio-amd-xgbe is deprecated"); vdev->compat = g_strdup("amd,xgbe-seattle-v1a"); vdev->num_compat = 1; @@ -32,7 +34,7 @@ static const VMStateDescription vfio_platform_amd_xgbe_vmstate = { .unmigratable = 1, }; -static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data) +static void vfio_amd_xgbe_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VFIOAmdXgbeDeviceClass *vcxc = @@ -41,8 +43,6 @@ static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data) &vcxc->parent_realize); dc->desc = "VFIO AMD XGBE"; dc->vmsd = &vfio_platform_amd_xgbe_vmstate; - /* Supported by TYPE_VIRT_MACHINE */ - dc->user_creatable = true; } static const TypeInfo vfio_amd_xgbe_dev_info = { diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c index 71bf32b83c5..7719f245797 100644 --- a/hw/vfio/ap.c +++ b/hw/vfio/ap.c @@ -10,16 +10,19 @@ * directory. */ +#include #include "qemu/osdep.h" #include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include #include "qapi/error.h" -#include "hw/vfio/vfio-common.h" -#include "sysemu/iommufd.h" +#include "hw/vfio/vfio-device.h" +#include "system/iommufd.h" #include "hw/s390x/ap-device.h" +#include "hw/s390x/css.h" #include "qemu/error-report.h" #include "qemu/event_notifier.h" +#include "qemu/lockable.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/option.h" @@ -28,7 +31,7 @@ #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "hw/s390x/ap-bridge.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/object.h" #define TYPE_VFIO_AP_DEVICE "vfio-ap" @@ -37,8 +40,23 @@ struct VFIOAPDevice { APDevice apdev; VFIODevice vdev; EventNotifier req_notifier; + EventNotifier cfg_notifier; }; +typedef struct APConfigChgEvent { + QTAILQ_ENTRY(APConfigChgEvent) next; +} APConfigChgEvent; + +static QTAILQ_HEAD(, APConfigChgEvent) cfg_chg_events = + QTAILQ_HEAD_INITIALIZER(cfg_chg_events); + +static QemuMutex cfg_chg_events_lock; + +static void __attribute__((constructor)) vfio_ap_global_init(void) +{ + qemu_mutex_init(&cfg_chg_events_lock); +} + OBJECT_DECLARE_SIMPLE_TYPE(VFIOAPDevice, VFIO_AP_DEVICE) static void vfio_ap_compute_needs_reset(VFIODevice *vdev) @@ -70,14 +88,65 @@ static void vfio_ap_req_notifier_handler(void *opaque) } } +static void vfio_ap_cfg_chg_notifier_handler(void *opaque) +{ + APConfigChgEvent *cfg_chg_event; + VFIOAPDevice *vapdev = opaque; + + if (!event_notifier_test_and_clear(&vapdev->cfg_notifier)) { + return; + } + + cfg_chg_event = g_new0(APConfigChgEvent, 1); + + WITH_QEMU_LOCK_GUARD(&cfg_chg_events_lock) { + QTAILQ_INSERT_TAIL(&cfg_chg_events, cfg_chg_event, next); + } + + css_generate_css_crws(0); + +} + +int ap_chsc_sei_nt0_get_event(void *res) +{ + ChscSeiNt0Res *nt0_res = (ChscSeiNt0Res *)res; + APConfigChgEvent *cfg_chg_event; + + WITH_QEMU_LOCK_GUARD(&cfg_chg_events_lock) { + if (QTAILQ_EMPTY(&cfg_chg_events)) { + return EVENT_INFORMATION_NOT_STORED; + } + + cfg_chg_event = QTAILQ_FIRST(&cfg_chg_events); + QTAILQ_REMOVE(&cfg_chg_events, cfg_chg_event, next); + } + + memset(nt0_res, 0, sizeof(*nt0_res)); + g_free(cfg_chg_event); + nt0_res->flags |= PENDING_EVENT_INFO_BITMASK; + nt0_res->length = sizeof(ChscSeiNt0Res); + nt0_res->code = NT0_RES_RESPONSE_CODE; + nt0_res->nt = NT0_RES_NT_DEFAULT; + nt0_res->rs = NT0_RES_RS_AP_CHANGE; + nt0_res->cc = NT0_RES_CC_AP_CHANGE; + + return EVENT_INFORMATION_STORED; +} + +bool ap_chsc_sei_nt0_have_event(void) +{ + QEMU_LOCK_GUARD(&cfg_chg_events_lock); + return !QTAILQ_EMPTY(&cfg_chg_events); +} + static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev, unsigned int irq, Error **errp) { int fd; - size_t argsz; + int ret; IOHandler *fd_read; EventNotifier *notifier; - g_autofree struct vfio_irq_info *irq_info = NULL; + struct vfio_irq_info irq_info; VFIODevice *vdev = &vapdev->vdev; switch (irq) { @@ -85,6 +154,10 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev, notifier = &vapdev->req_notifier; fd_read = vfio_ap_req_notifier_handler; break; + case VFIO_AP_CFG_CHG_IRQ_INDEX: + notifier = &vapdev->cfg_notifier; + fd_read = vfio_ap_cfg_chg_notifier_handler; + break; default: error_setg(errp, "vfio: Unsupported device irq(%d)", irq); return false; @@ -96,14 +169,15 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev, return false; } - argsz = sizeof(*irq_info); - irq_info = g_malloc0(argsz); - irq_info->index = irq; - irq_info->argsz = argsz; + ret = vfio_device_get_irq_info(vdev, irq, &irq_info); + + if (ret < 0) { + error_setg_errno(errp, -ret, "vfio: Error getting irq info"); + return false; + } - if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, - irq_info) < 0 || irq_info->count < 1) { - error_setg_errno(errp, errno, "vfio: Error getting irq info"); + if (irq_info.count < 1) { + error_setg(errp, "vfio: Error getting irq info, count=0"); return false; } @@ -117,8 +191,8 @@ static bool vfio_ap_register_irq_notifier(VFIOAPDevice *vapdev, fd = event_notifier_get_fd(notifier); qemu_set_fd_handler(fd, fd_read, NULL, vapdev); - if (!vfio_set_irq_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd, - errp)) { + if (!vfio_device_irq_set_signaling(vdev, irq, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd, + errp)) { qemu_set_fd_handler(fd, NULL, NULL, vapdev); event_notifier_cleanup(notifier); } @@ -136,13 +210,16 @@ static void vfio_ap_unregister_irq_notifier(VFIOAPDevice *vapdev, case VFIO_AP_REQ_IRQ_INDEX: notifier = &vapdev->req_notifier; break; + case VFIO_AP_CFG_CHG_IRQ_INDEX: + notifier = &vapdev->cfg_notifier; + break; default: error_report("vfio: Unsupported device irq(%d)", irq); return; } - if (!vfio_set_irq_signaling(&vapdev->vdev, irq, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + if (!vfio_device_irq_set_signaling(&vapdev->vdev, irq, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { warn_reportf_err(err, VFIO_MSG_PREFIX, vapdev->vdev.name); } @@ -162,7 +239,7 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp) return; } - if (!vfio_attach_device(vbasedev->name, vbasedev, + if (!vfio_device_attach(vbasedev->name, vbasedev, &address_space_memory, errp)) { goto error; } @@ -175,11 +252,20 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp) warn_report_err(err); } + if (!vfio_ap_register_irq_notifier(vapdev, VFIO_AP_CFG_CHG_IRQ_INDEX, &err)) + { + /* + * Report this error, but do not make it a failing condition. + * Lack of this IRQ in the host does not prevent normal operation. + */ + warn_report_err(err); + } + return; error: error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); - g_free(vbasedev->name); + vfio_device_free_name(vbasedev); } static void vfio_ap_unrealize(DeviceState *dev) @@ -187,17 +273,17 @@ static void vfio_ap_unrealize(DeviceState *dev) VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev); vfio_ap_unregister_irq_notifier(vapdev, VFIO_AP_REQ_IRQ_INDEX); - vfio_detach_device(&vapdev->vdev); - g_free(vapdev->vdev.name); + vfio_ap_unregister_irq_notifier(vapdev, VFIO_AP_CFG_CHG_IRQ_INDEX); + vfio_device_detach(&vapdev->vdev); + vfio_device_free_name(&vapdev->vdev); } -static Property vfio_ap_properties[] = { +static const Property vfio_ap_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev), #ifdef CONFIG_IOMMUFD DEFINE_PROP_LINK("iommufd", VFIOAPDevice, vdev.iommufd, TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), #endif - DEFINE_PROP_END_OF_LIST(), }; static void vfio_ap_reset(DeviceState *dev) @@ -242,7 +328,7 @@ static void vfio_ap_set_fd(Object *obj, const char *str, Error **errp) } #endif -static void vfio_ap_class_init(ObjectClass *klass, void *data) +static void vfio_ap_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -256,8 +342,17 @@ static void vfio_ap_class_init(ObjectClass *klass, void *data) dc->realize = vfio_ap_realize; dc->unrealize = vfio_ap_unrealize; dc->hotpluggable = true; - dc->reset = vfio_ap_reset; + device_class_set_legacy_reset(dc, vfio_ap_reset); dc->bus_type = TYPE_AP_BUS; + + object_class_property_set_description(klass, /* 3.1 */ + "sysfsdev", + "Host sysfs path of assigned device"); +#ifdef CONFIG_IOMMUFD + object_class_property_set_description(klass, /* 9.0 */ + "iommufd", + "Set host IOMMUFD backend device"); +#endif } static const TypeInfo vfio_ap_info = { diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c index 87c382e7361..03f2ff57630 100644 --- a/hw/vfio/calxeda-xgmac.c +++ b/hw/vfio/calxeda-xgmac.c @@ -15,12 +15,14 @@ #include "hw/vfio/vfio-calxeda-xgmac.h" #include "migration/vmstate.h" #include "qemu/module.h" +#include "qemu/error-report.h" static void calxeda_xgmac_realize(DeviceState *dev, Error **errp) { VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev); + warn_report("-device vfio-calxeda-xgmac is deprecated"); vdev->compat = g_strdup("calxeda,hb-xgmac"); vdev->num_compat = 1; @@ -32,7 +34,7 @@ static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = { .unmigratable = 1, }; -static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data) +static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VFIOCalxedaXgmacDeviceClass *vcxc = @@ -41,8 +43,6 @@ static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data) &vcxc->parent_realize); dc->desc = "VFIO Calxeda XGMAC"; dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate; - /* Supported by TYPE_VIRT_MACHINE */ - dc->user_creatable = true; } static const TypeInfo vfio_calxeda_xgmac_dev_info = { diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 115862f4303..9560b8d851b 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -21,13 +21,13 @@ #include #include "qapi/error.h" -#include "hw/vfio/vfio-common.h" -#include "sysemu/iommufd.h" +#include "hw/vfio/vfio-device.h" +#include "system/iommufd.h" #include "hw/s390x/s390-ccw.h" #include "hw/s390x/vfio-ccw.h" #include "hw/qdev-properties.h" #include "hw/s390x/ccw-device.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" @@ -51,17 +51,8 @@ struct VFIOCCWDevice { EventNotifier crw_notifier; EventNotifier req_notifier; bool force_orb_pfch; - bool warned_orb_pfch; }; -static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch, - const char *msg) -{ - warn_report_once_cond(&vcdev->warned_orb_pfch, - "vfio-ccw (devno %x.%x.%04x): %s", - sch->cssid, sch->ssid, sch->devno, msg); -} - static void vfio_ccw_compute_needs_reset(VFIODevice *vdev) { vdev->needs_reset = false; @@ -83,7 +74,8 @@ static IOInstEnding vfio_ccw_handle_request(SubchDev *sch) if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) { sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH; - warn_once_pfch(vcdev, sch, "PFCH flag forced"); + warn_report_once("vfio-ccw (devno %x.%x.%04x): PFCH flag forced", + sch->cssid, sch->ssid, sch->devno); } QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB)); @@ -384,8 +376,8 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev, Error **errp) { VFIODevice *vdev = &vcdev->vdev; - g_autofree struct vfio_irq_info *irq_info = NULL; - size_t argsz; + struct vfio_irq_info irq_info; + int ret; int fd; EventNotifier *notifier; IOHandler *fd_read; @@ -414,13 +406,15 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev, return false; } - argsz = sizeof(*irq_info); - irq_info = g_malloc0(argsz); - irq_info->index = irq; - irq_info->argsz = argsz; - if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, - irq_info) < 0 || irq_info->count < 1) { - error_setg_errno(errp, errno, "vfio: Error getting irq info"); + ret = vfio_device_get_irq_info(vdev, irq, &irq_info); + + if (ret < 0) { + error_setg_errno(errp, -ret, "vfio: Error getting irq info"); + return false; + } + + if (irq_info.count < 1) { + error_setg(errp, "vfio: Error getting irq info, count=0"); return false; } @@ -434,8 +428,8 @@ static bool vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev, fd = event_notifier_get_fd(notifier); qemu_set_fd_handler(fd, fd_read, NULL, vcdev); - if (!vfio_set_irq_signaling(vdev, irq, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { + if (!vfio_device_irq_set_signaling(vdev, irq, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { qemu_set_fd_handler(fd, NULL, NULL, vcdev); event_notifier_cleanup(notifier); } @@ -464,8 +458,8 @@ static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev, return; } - if (!vfio_set_irq_signaling(&vcdev->vdev, irq, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + if (!vfio_device_irq_set_signaling(&vcdev->vdev, irq, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name); } @@ -496,7 +490,7 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) return false; } - ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info); + ret = vfio_device_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info); if (ret) { error_setg_errno(errp, -ret, "vfio: Error getting config info"); return false; @@ -510,11 +504,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) vcdev->io_region_offset = info->offset; vcdev->io_region = g_malloc0(info->size); - g_free(info); /* check for the optional async command region */ - ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, - VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info); + ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info); if (!ret) { vcdev->async_cmd_region_size = info->size; if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) { @@ -523,11 +516,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) } vcdev->async_cmd_region_offset = info->offset; vcdev->async_cmd_region = g_malloc0(info->size); - g_free(info); } - ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, - VFIO_REGION_SUBTYPE_CCW_SCHIB, &info); + ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_SCHIB, &info); if (!ret) { vcdev->schib_region_size = info->size; if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) { @@ -536,11 +528,10 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) } vcdev->schib_region_offset = info->offset; vcdev->schib_region = g_malloc(info->size); - g_free(info); } - ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, - VFIO_REGION_SUBTYPE_CCW_CRW, &info); + ret = vfio_device_get_region_info_type(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_CRW, &info); if (!ret) { vcdev->crw_region_size = info->size; @@ -550,7 +541,6 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) } vcdev->crw_region_offset = info->offset; vcdev->crw_region = g_malloc(info->size); - g_free(info); } return true; @@ -560,7 +550,6 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) g_free(vcdev->schib_region); g_free(vcdev->async_cmd_region); g_free(vcdev->io_region); - g_free(info); return false; } @@ -591,7 +580,7 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp) goto out_unrealize; } - if (!vfio_attach_device(cdev->mdevid, vbasedev, + if (!vfio_device_attach(cdev->mdevid, vbasedev, &address_space_memory, errp)) { goto out_attach_dev_err; } @@ -628,9 +617,9 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp) out_io_notifier_err: vfio_ccw_put_region(vcdev); out_region_err: - vfio_detach_device(vbasedev); + vfio_device_detach(vbasedev); out_attach_dev_err: - g_free(vbasedev->name); + vfio_device_free_name(vbasedev); out_unrealize: if (cdc->unrealize) { cdc->unrealize(cdev); @@ -647,22 +636,22 @@ static void vfio_ccw_unrealize(DeviceState *dev) vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX); vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX); vfio_ccw_put_region(vcdev); - vfio_detach_device(&vcdev->vdev); - g_free(vcdev->vdev.name); + vfio_device_detach(&vcdev->vdev); + vfio_device_free_name(&vcdev->vdev); if (cdc->unrealize) { cdc->unrealize(cdev); } } -static Property vfio_ccw_properties[] = { +static const Property vfio_ccw_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev), DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false), #ifdef CONFIG_IOMMUFD DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd, TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), #endif - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_CCW_LOADPARM("loadparm", CcwDevice, loadparm), }; static const VMStateDescription vfio_ccw_vmstate = { @@ -697,7 +686,7 @@ static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp) } #endif -static void vfio_ccw_class_init(ObjectClass *klass, void *data) +static void vfio_ccw_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); @@ -711,12 +700,27 @@ static void vfio_ccw_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->realize = vfio_ccw_realize; dc->unrealize = vfio_ccw_unrealize; - dc->reset = vfio_ccw_reset; + device_class_set_legacy_reset(dc, vfio_ccw_reset); cdc->handle_request = vfio_ccw_handle_request; cdc->handle_halt = vfio_ccw_handle_halt; cdc->handle_clear = vfio_ccw_handle_clear; cdc->handle_store = vfio_ccw_handle_store; + + object_class_property_set_description(klass, /* 2.10 */ + "sysfsdev", + "Host sysfs path of assigned device"); + object_class_property_set_description(klass, /* 3.0 */ + "force-orb-pfch", + "Force unlimited prefetch"); +#ifdef CONFIG_IOMMUFD + object_class_property_set_description(klass, /* 9.0 */ + "iommufd", + "Set host IOMMUFD backend device"); +#endif + object_class_property_set_description(klass, /* 9.2 */ + "loadparm", + "Define which devices that can be used for booting"); } static const TypeInfo vfio_ccw_info = { diff --git a/hw/vfio/common.c b/hw/vfio/common.c deleted file mode 100644 index 36d0cf6585b..00000000000 --- a/hw/vfio/common.c +++ /dev/null @@ -1,1573 +0,0 @@ -/* - * generic functions used by VFIO devices - * - * Copyright Red Hat, Inc. 2012 - * - * Authors: - * Alex Williamson - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Based on qemu-kvm device-assignment: - * Adapted for KVM by Qumranet. - * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) - * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) - * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) - * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) - * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) - */ - -#include "qemu/osdep.h" -#include -#ifdef CONFIG_KVM -#include -#endif -#include - -#include "hw/vfio/vfio-common.h" -#include "hw/vfio/pci.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#include "exec/ram_addr.h" -#include "hw/hw.h" -#include "qemu/error-report.h" -#include "qemu/main-loop.h" -#include "qemu/range.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "trace.h" -#include "qapi/error.h" -#include "migration/misc.h" -#include "migration/blocker.h" -#include "migration/qemu-file.h" -#include "sysemu/tpm.h" - -VFIODeviceList vfio_device_list = - QLIST_HEAD_INITIALIZER(vfio_device_list); -static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = - QLIST_HEAD_INITIALIZER(vfio_address_spaces); - -#ifdef CONFIG_KVM -/* - * We have a single VFIO pseudo device per KVM VM. Once created it lives - * for the life of the VM. Closing the file descriptor only drops our - * reference to it and the device's reference to kvm. Therefore once - * initialized, this file descriptor is only released on QEMU exit and - * we'll re-use it should another vfio device be attached before then. - */ -int vfio_kvm_device_fd = -1; -#endif - -/* - * Device state interfaces - */ - -bool vfio_mig_active(void) -{ - VFIODevice *vbasedev; - - if (QLIST_EMPTY(&vfio_device_list)) { - return false; - } - - QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { - if (vbasedev->migration_blocker) { - return false; - } - } - return true; -} - -static Error *multiple_devices_migration_blocker; - -/* - * Multiple devices migration is allowed only if all devices support P2P - * migration. Single device migration is allowed regardless of P2P migration - * support. - */ -static bool vfio_multiple_devices_migration_is_supported(void) -{ - VFIODevice *vbasedev; - unsigned int device_num = 0; - bool all_support_p2p = true; - - QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { - if (vbasedev->migration) { - device_num++; - - if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) { - all_support_p2p = false; - } - } - } - - return all_support_p2p || device_num <= 1; -} - -int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp) -{ - int ret; - - if (vfio_multiple_devices_migration_is_supported()) { - return 0; - } - - if (vbasedev->enable_migration == ON_OFF_AUTO_ON) { - error_setg(errp, "Multiple VFIO devices migration is supported only if " - "all of them support P2P migration"); - return -EINVAL; - } - - if (multiple_devices_migration_blocker) { - return 0; - } - - error_setg(&multiple_devices_migration_blocker, - "Multiple VFIO devices migration is supported only if all of " - "them support P2P migration"); - ret = migrate_add_blocker_normal(&multiple_devices_migration_blocker, errp); - - return ret; -} - -void vfio_unblock_multiple_devices_migration(void) -{ - if (!multiple_devices_migration_blocker || - !vfio_multiple_devices_migration_is_supported()) { - return; - } - - migrate_del_blocker(&multiple_devices_migration_blocker); -} - -bool vfio_viommu_preset(VFIODevice *vbasedev) -{ - return vbasedev->bcontainer->space->as != &address_space_memory; -} - -static void vfio_set_migration_error(int ret) -{ - if (migration_is_setup_or_active()) { - migration_file_set_error(ret, NULL); - } -} - -bool vfio_device_state_is_running(VFIODevice *vbasedev) -{ - VFIOMigration *migration = vbasedev->migration; - - return migration->device_state == VFIO_DEVICE_STATE_RUNNING || - migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P; -} - -bool vfio_device_state_is_precopy(VFIODevice *vbasedev) -{ - VFIOMigration *migration = vbasedev->migration; - - return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY || - migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P; -} - -static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer) -{ - VFIODevice *vbasedev; - - if (!migration_is_active() && !migration_is_device()) { - return false; - } - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - VFIOMigration *migration = vbasedev->migration; - - if (!migration) { - return false; - } - - if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF && - (vfio_device_state_is_running(vbasedev) || - vfio_device_state_is_precopy(vbasedev))) { - return false; - } - } - return true; -} - -bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer) -{ - VFIODevice *vbasedev; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) { - return false; - } - if (!vbasedev->dirty_pages_supported) { - return false; - } - } - - return true; -} - -/* - * Check if all VFIO devices are running and migration is active, which is - * essentially equivalent to the migration being in pre-copy phase. - */ -bool -vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer) -{ - VFIODevice *vbasedev; - - if (!migration_is_active()) { - return false; - } - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - VFIOMigration *migration = vbasedev->migration; - - if (!migration) { - return false; - } - - if (vfio_device_state_is_running(vbasedev) || - vfio_device_state_is_precopy(vbasedev)) { - continue; - } else { - return false; - } - } - return true; -} - -static bool vfio_listener_skipped_section(MemoryRegionSection *section) -{ - return (!memory_region_is_ram(section->mr) && - !memory_region_is_iommu(section->mr)) || - memory_region_is_protected(section->mr) || - /* - * Sizing an enabled 64-bit BAR can cause spurious mappings to - * addresses in the upper part of the 64-bit address space. These - * are never accessed by the CPU and beyond the address width of - * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. - */ - section->offset_within_address_space & (1ULL << 63); -} - -/* Called with rcu_read_lock held. */ -static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, - ram_addr_t *ram_addr, bool *read_only, - Error **errp) -{ - bool ret, mr_has_discard_manager; - - ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only, - &mr_has_discard_manager, errp); - if (ret && mr_has_discard_manager) { - /* - * Malicious VMs might trigger discarding of IOMMU-mapped memory. The - * pages will remain pinned inside vfio until unmapped, resulting in a - * higher memory consumption than expected. If memory would get - * populated again later, there would be an inconsistency between pages - * pinned by vfio and pages seen by QEMU. This is the case until - * unmapped from the IOMMU (e.g., during device reset). - * - * With malicious guests, we really only care about pinning more memory - * than expected. RLIMIT_MEMLOCK set for the user/process can never be - * exceeded and can be used to mitigate this problem. - */ - warn_report_once("Using vfio with vIOMMUs and coordinated discarding of" - " RAM (e.g., virtio-mem) works, however, malicious" - " guests can trigger pinning of more memory than" - " intended via an IOMMU. It's possible to mitigate " - " by setting/adjusting RLIMIT_MEMLOCK."); - } - return ret; -} - -static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) -{ - VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); - VFIOContainerBase *bcontainer = giommu->bcontainer; - hwaddr iova = iotlb->iova + giommu->iommu_offset; - void *vaddr; - int ret; - Error *local_err = NULL; - - trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", - iova, iova + iotlb->addr_mask); - - if (iotlb->target_as != &address_space_memory) { - error_report("Wrong target AS \"%s\", only system memory is allowed", - iotlb->target_as->name ? iotlb->target_as->name : "none"); - vfio_set_migration_error(-EINVAL); - return; - } - - rcu_read_lock(); - - if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { - bool read_only; - - if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, &local_err)) { - error_report_err(local_err); - goto out; - } - /* - * vaddr is only valid until rcu_read_unlock(). But after - * vfio_dma_map has set up the mapping the pages will be - * pinned by the kernel. This makes sure that the RAM backend - * of vaddr will always be there, even if the memory object is - * destroyed and its backing memory munmap-ed. - */ - ret = vfio_container_dma_map(bcontainer, iova, - iotlb->addr_mask + 1, vaddr, - read_only); - if (ret) { - error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx", %p) = %d (%s)", - bcontainer, iova, - iotlb->addr_mask + 1, vaddr, ret, strerror(-ret)); - } - } else { - ret = vfio_container_dma_unmap(bcontainer, iova, - iotlb->addr_mask + 1, iotlb); - if (ret) { - error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%s)", - bcontainer, iova, - iotlb->addr_mask + 1, ret, strerror(-ret)); - vfio_set_migration_error(ret); - } - } -out: - rcu_read_unlock(); -} - -static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, - MemoryRegionSection *section) -{ - VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, - listener); - VFIOContainerBase *bcontainer = vrdl->bcontainer; - const hwaddr size = int128_get64(section->size); - const hwaddr iova = section->offset_within_address_space; - int ret; - - /* Unmap with a single call. */ - ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL); - if (ret) { - error_report("%s: vfio_container_dma_unmap() failed: %s", __func__, - strerror(-ret)); - } -} - -static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, - MemoryRegionSection *section) -{ - VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, - listener); - VFIOContainerBase *bcontainer = vrdl->bcontainer; - const hwaddr end = section->offset_within_region + - int128_get64(section->size); - hwaddr start, next, iova; - void *vaddr; - int ret; - - /* - * Map in (aligned within memory region) minimum granularity, so we can - * unmap in minimum granularity later. - */ - for (start = section->offset_within_region; start < end; start = next) { - next = ROUND_UP(start + 1, vrdl->granularity); - next = MIN(next, end); - - iova = start - section->offset_within_region + - section->offset_within_address_space; - vaddr = memory_region_get_ram_ptr(section->mr) + start; - - ret = vfio_container_dma_map(bcontainer, iova, next - start, - vaddr, section->readonly); - if (ret) { - /* Rollback */ - vfio_ram_discard_notify_discard(rdl, section); - return ret; - } - } - return 0; -} - -static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) -{ - RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); - VFIORamDiscardListener *vrdl; - - /* Ignore some corner cases not relevant in practice. */ - g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE)); - g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space, - TARGET_PAGE_SIZE)); - g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE)); - - vrdl = g_new0(VFIORamDiscardListener, 1); - vrdl->bcontainer = bcontainer; - vrdl->mr = section->mr; - vrdl->offset_within_address_space = section->offset_within_address_space; - vrdl->size = int128_get64(section->size); - vrdl->granularity = ram_discard_manager_get_min_granularity(rdm, - section->mr); - - g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); - g_assert(bcontainer->pgsizes && - vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes)); - - ram_discard_listener_init(&vrdl->listener, - vfio_ram_discard_notify_populate, - vfio_ram_discard_notify_discard, true); - ram_discard_manager_register_listener(rdm, &vrdl->listener, section); - QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next); - - /* - * Sanity-check if we have a theoretically problematic setup where we could - * exceed the maximum number of possible DMA mappings over time. We assume - * that each mapped section in the same address space as a RamDiscardManager - * section consumes exactly one DMA mapping, with the exception of - * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections - * in the same address space as RamDiscardManager sections. - * - * We assume that each section in the address space consumes one memslot. - * We take the number of KVM memory slots as a best guess for the maximum - * number of sections in the address space we could have over time, - * also consuming DMA mappings. - */ - if (bcontainer->dma_max_mappings) { - unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; - -#ifdef CONFIG_KVM - if (kvm_enabled()) { - max_memslots = kvm_get_max_memslots(); - } -#endif - - QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { - hwaddr start, end; - - start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, - vrdl->granularity); - end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size, - vrdl->granularity); - vrdl_mappings += (end - start) / vrdl->granularity; - vrdl_count++; - } - - if (vrdl_mappings + max_memslots - vrdl_count > - bcontainer->dma_max_mappings) { - warn_report("%s: possibly running out of DMA mappings. E.g., try" - " increasing the 'block-size' of virtio-mem devies." - " Maximum possible DMA mappings: %d, Maximum possible" - " memslots: %d", __func__, bcontainer->dma_max_mappings, - max_memslots); - } - } -} - -static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) -{ - RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); - VFIORamDiscardListener *vrdl = NULL; - - QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { - if (vrdl->mr == section->mr && - vrdl->offset_within_address_space == - section->offset_within_address_space) { - break; - } - } - - if (!vrdl) { - hw_error("vfio: Trying to unregister missing RAM discard listener"); - } - - ram_discard_manager_unregister_listener(rdm, &vrdl->listener); - QLIST_REMOVE(vrdl, next); - g_free(vrdl); -} - -static bool vfio_known_safe_misalignment(MemoryRegionSection *section) -{ - MemoryRegion *mr = section->mr; - - if (!TPM_IS_CRB(mr->owner)) { - return false; - } - - /* this is a known safe misaligned region, just trace for debug purpose */ - trace_vfio_known_safe_misalignment(memory_region_name(mr), - section->offset_within_address_space, - section->offset_within_region, - qemu_real_host_page_size()); - return true; -} - -static bool vfio_listener_valid_section(MemoryRegionSection *section, - const char *name) -{ - if (vfio_listener_skipped_section(section)) { - trace_vfio_listener_region_skip(name, - section->offset_within_address_space, - section->offset_within_address_space + - int128_get64(int128_sub(section->size, int128_one()))); - return false; - } - - if (unlikely((section->offset_within_address_space & - ~qemu_real_host_page_mask()) != - (section->offset_within_region & ~qemu_real_host_page_mask()))) { - if (!vfio_known_safe_misalignment(section)) { - error_report("%s received unaligned region %s iova=0x%"PRIx64 - " offset_within_region=0x%"PRIx64 - " qemu_real_host_page_size=0x%"PRIxPTR, - __func__, memory_region_name(section->mr), - section->offset_within_address_space, - section->offset_within_region, - qemu_real_host_page_size()); - } - return false; - } - - return true; -} - -static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, - hwaddr *out_iova, hwaddr *out_end, - Int128 *out_llend) -{ - Int128 llend; - hwaddr iova; - - iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); - - if (int128_ge(int128_make64(iova), llend)) { - return false; - } - - *out_iova = iova; - *out_end = int128_get64(int128_sub(llend, int128_one())); - if (out_llend) { - *out_llend = llend; - } - return true; -} - -static void vfio_listener_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - hwaddr iova, end; - Int128 llend, llsize; - void *vaddr; - int ret; - Error *err = NULL; - - if (!vfio_listener_valid_section(section, "region_add")) { - return; - } - - if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, - &llend)) { - if (memory_region_is_ram_device(section->mr)) { - trace_vfio_listener_region_add_no_dma_map( - memory_region_name(section->mr), - section->offset_within_address_space, - int128_getlo(section->size), - qemu_real_host_page_size()); - } - return; - } - - if (!vfio_container_add_section_window(bcontainer, section, &err)) { - goto fail; - } - - memory_region_ref(section->mr); - - if (memory_region_is_iommu(section->mr)) { - VFIOGuestIOMMU *giommu; - IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); - int iommu_idx; - - trace_vfio_listener_region_add_iommu(section->mr->name, iova, end); - /* - * FIXME: For VFIO iommu types which have KVM acceleration to - * avoid bouncing all map/unmaps through qemu this way, this - * would be the right place to wire that up (tell the KVM - * device emulation the VFIO iommu handles to use). - */ - giommu = g_malloc0(sizeof(*giommu)); - giommu->iommu_mr = iommu_mr; - giommu->iommu_offset = section->offset_within_address_space - - section->offset_within_region; - giommu->bcontainer = bcontainer; - llend = int128_add(int128_make64(section->offset_within_region), - section->size); - llend = int128_sub(llend, int128_one()); - iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, - MEMTXATTRS_UNSPECIFIED); - iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, - IOMMU_NOTIFIER_IOTLB_EVENTS, - section->offset_within_region, - int128_get64(llend), - iommu_idx); - - ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, - &err); - if (ret) { - g_free(giommu); - goto fail; - } - QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next); - memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); - - return; - } - - /* Here we assume that memory_region_is_ram(section->mr)==true */ - - /* - * For RAM memory regions with a RamDiscardManager, we only want to map the - * actually populated parts - and update the mapping whenever we're notified - * about changes. - */ - if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_register_ram_discard_listener(bcontainer, section); - return; - } - - vaddr = memory_region_get_ram_ptr(section->mr) + - section->offset_within_region + - (iova - section->offset_within_address_space); - - trace_vfio_listener_region_add_ram(iova, end, vaddr); - - llsize = int128_sub(llend, int128_make64(iova)); - - if (memory_region_is_ram_device(section->mr)) { - hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; - - if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { - trace_vfio_listener_region_add_no_dma_map( - memory_region_name(section->mr), - section->offset_within_address_space, - int128_getlo(section->size), - pgmask + 1); - return; - } - } - - ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize), - vaddr, section->readonly); - if (ret) { - error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx", %p) = %d (%s)", - bcontainer, iova, int128_get64(llsize), vaddr, ret, - strerror(-ret)); - if (memory_region_is_ram_device(section->mr)) { - /* Allow unexpected mappings not to be fatal for RAM devices */ - error_report_err(err); - return; - } - goto fail; - } - - return; - -fail: - if (memory_region_is_ram_device(section->mr)) { - error_reportf_err(err, "PCI p2p may not work: "); - return; - } - /* - * On the initfn path, store the first error in the container so we - * can gracefully fail. Runtime, there's not much we can do other - * than throw a hardware error. - */ - if (!bcontainer->initialized) { - if (!bcontainer->error) { - error_propagate_prepend(&bcontainer->error, err, - "Region %s: ", - memory_region_name(section->mr)); - } else { - error_free(err); - } - } else { - error_report_err(err); - hw_error("vfio: DMA mapping failed, unable to continue"); - } -} - -static void vfio_listener_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - hwaddr iova, end; - Int128 llend, llsize; - int ret; - bool try_unmap = true; - - if (!vfio_listener_valid_section(section, "region_del")) { - return; - } - - if (memory_region_is_iommu(section->mr)) { - VFIOGuestIOMMU *giommu; - - trace_vfio_listener_region_del_iommu(section->mr->name); - QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { - if (MEMORY_REGION(giommu->iommu_mr) == section->mr && - giommu->n.start == section->offset_within_region) { - memory_region_unregister_iommu_notifier(section->mr, - &giommu->n); - QLIST_REMOVE(giommu, giommu_next); - g_free(giommu); - break; - } - } - - /* - * FIXME: We assume the one big unmap below is adequate to - * remove any individual page mappings in the IOMMU which - * might have been copied into VFIO. This works for a page table - * based IOMMU where a big unmap flattens a large range of IO-PTEs. - * That may not be true for all IOMMU types. - */ - } - - if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, - &llend)) { - return; - } - - llsize = int128_sub(llend, int128_make64(iova)); - - trace_vfio_listener_region_del(iova, end); - - if (memory_region_is_ram_device(section->mr)) { - hwaddr pgmask; - - pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; - try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); - } else if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_unregister_ram_discard_listener(bcontainer, section); - /* Unregistering will trigger an unmap. */ - try_unmap = false; - } - - if (try_unmap) { - if (int128_eq(llsize, int128_2_64())) { - /* The unmap ioctl doesn't accept a full 64-bit span. */ - llsize = int128_rshift(llsize, 1); - ret = vfio_container_dma_unmap(bcontainer, iova, - int128_get64(llsize), NULL); - if (ret) { - error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%s)", - bcontainer, iova, int128_get64(llsize), ret, - strerror(-ret)); - } - iova += int128_get64(llsize); - } - ret = vfio_container_dma_unmap(bcontainer, iova, - int128_get64(llsize), NULL); - if (ret) { - error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%s)", - bcontainer, iova, int128_get64(llsize), ret, - strerror(-ret)); - } - } - - memory_region_unref(section->mr); - - vfio_container_del_section_window(bcontainer, section); -} - -typedef struct VFIODirtyRanges { - hwaddr min32; - hwaddr max32; - hwaddr min64; - hwaddr max64; - hwaddr minpci64; - hwaddr maxpci64; -} VFIODirtyRanges; - -typedef struct VFIODirtyRangesListener { - VFIOContainerBase *bcontainer; - VFIODirtyRanges ranges; - MemoryListener listener; -} VFIODirtyRangesListener; - -static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, - VFIOContainerBase *bcontainer) -{ - VFIOPCIDevice *pcidev; - VFIODevice *vbasedev; - Object *owner; - - owner = memory_region_owner(section->mr); - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev); - if (OBJECT(pcidev) == owner) { - return true; - } - } - - return false; -} - -static void vfio_dirty_tracking_update_range(VFIODirtyRanges *range, - hwaddr iova, hwaddr end, - bool update_pci) -{ - hwaddr *min, *max; - - /* - * The address space passed to the dirty tracker is reduced to three ranges: - * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the - * PCI 64-bit hole. - * - * The underlying reports of dirty will query a sub-interval of each of - * these ranges. - * - * The purpose of the three range handling is to handle known cases of big - * holes in the address space, like the x86 AMD 1T hole, and firmware (like - * OVMF) which may relocate the pci-hole64 to the end of the address space. - * The latter would otherwise generate large ranges for tracking, stressing - * the limits of supported hardware. The pci-hole32 will always be below 4G - * (overlapping or not) so it doesn't need special handling and is part of - * the 32-bit range. - * - * The alternative would be an IOVATree but that has a much bigger runtime - * overhead and unnecessary complexity. - */ - if (update_pci && iova >= UINT32_MAX) { - min = &range->minpci64; - max = &range->maxpci64; - } else { - min = (end <= UINT32_MAX) ? &range->min32 : &range->min64; - max = (end <= UINT32_MAX) ? &range->max32 : &range->max64; - } - if (*min > iova) { - *min = iova; - } - if (*max < end) { - *max = end; - } - - trace_vfio_device_dirty_tracking_update(iova, end, *min, *max); -} - -static void vfio_dirty_tracking_update(MemoryListener *listener, - MemoryRegionSection *section) -{ - VFIODirtyRangesListener *dirty = - container_of(listener, VFIODirtyRangesListener, listener); - hwaddr iova, end; - - if (!vfio_listener_valid_section(section, "tracking_update") || - !vfio_get_section_iova_range(dirty->bcontainer, section, - &iova, &end, NULL)) { - return; - } - - vfio_dirty_tracking_update_range(&dirty->ranges, iova, end, - vfio_section_is_vfio_pci(section, dirty->bcontainer)); -} - -static const MemoryListener vfio_dirty_tracking_listener = { - .name = "vfio-tracking", - .region_add = vfio_dirty_tracking_update, -}; - -static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, - VFIODirtyRanges *ranges) -{ - VFIODirtyRangesListener dirty; - - memset(&dirty, 0, sizeof(dirty)); - dirty.ranges.min32 = UINT32_MAX; - dirty.ranges.min64 = UINT64_MAX; - dirty.ranges.minpci64 = UINT64_MAX; - dirty.listener = vfio_dirty_tracking_listener; - dirty.bcontainer = bcontainer; - - memory_listener_register(&dirty.listener, - bcontainer->space->as); - - *ranges = dirty.ranges; - - /* - * The memory listener is synchronous, and used to calculate the range - * to dirty tracking. Unregister it after we are done as we are not - * interested in any follow-up updates. - */ - memory_listener_unregister(&dirty.listener); -} - -static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) -{ - uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), - sizeof(uint64_t))] = {}; - struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; - VFIODevice *vbasedev; - - feature->argsz = sizeof(buf); - feature->flags = VFIO_DEVICE_FEATURE_SET | - VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (!vbasedev->dirty_tracking) { - continue; - } - - if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) { - warn_report("%s: Failed to stop DMA logging, err %d (%s)", - vbasedev->name, -errno, strerror(errno)); - } - vbasedev->dirty_tracking = false; - } -} - -static struct vfio_device_feature * -vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer, - VFIODirtyRanges *tracking) -{ - struct vfio_device_feature *feature; - size_t feature_size; - struct vfio_device_feature_dma_logging_control *control; - struct vfio_device_feature_dma_logging_range *ranges; - - feature_size = sizeof(struct vfio_device_feature) + - sizeof(struct vfio_device_feature_dma_logging_control); - feature = g_try_malloc0(feature_size); - if (!feature) { - errno = ENOMEM; - return NULL; - } - feature->argsz = feature_size; - feature->flags = VFIO_DEVICE_FEATURE_SET | - VFIO_DEVICE_FEATURE_DMA_LOGGING_START; - - control = (struct vfio_device_feature_dma_logging_control *)feature->data; - control->page_size = qemu_real_host_page_size(); - - /* - * DMA logging uAPI guarantees to support at least a number of ranges that - * fits into a single host kernel base page. - */ - control->num_ranges = !!tracking->max32 + !!tracking->max64 + - !!tracking->maxpci64; - ranges = g_try_new0(struct vfio_device_feature_dma_logging_range, - control->num_ranges); - if (!ranges) { - g_free(feature); - errno = ENOMEM; - - return NULL; - } - - control->ranges = (uintptr_t)ranges; - if (tracking->max32) { - ranges->iova = tracking->min32; - ranges->length = (tracking->max32 - tracking->min32) + 1; - ranges++; - } - if (tracking->max64) { - ranges->iova = tracking->min64; - ranges->length = (tracking->max64 - tracking->min64) + 1; - ranges++; - } - if (tracking->maxpci64) { - ranges->iova = tracking->minpci64; - ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1; - } - - trace_vfio_device_dirty_tracking_start(control->num_ranges, - tracking->min32, tracking->max32, - tracking->min64, tracking->max64, - tracking->minpci64, tracking->maxpci64); - - return feature; -} - -static void vfio_device_feature_dma_logging_start_destroy( - struct vfio_device_feature *feature) -{ - struct vfio_device_feature_dma_logging_control *control = - (struct vfio_device_feature_dma_logging_control *)feature->data; - struct vfio_device_feature_dma_logging_range *ranges = - (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges; - - g_free(ranges); - g_free(feature); -} - -static bool vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer, - Error **errp) -{ - struct vfio_device_feature *feature; - VFIODirtyRanges ranges; - VFIODevice *vbasedev; - int ret = 0; - - vfio_dirty_tracking_init(bcontainer, &ranges); - feature = vfio_device_feature_dma_logging_start_create(bcontainer, - &ranges); - if (!feature) { - error_setg_errno(errp, errno, "Failed to prepare DMA logging"); - return false; - } - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (vbasedev->dirty_tracking) { - continue; - } - - ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature); - if (ret) { - ret = -errno; - error_setg_errno(errp, errno, "%s: Failed to start DMA logging", - vbasedev->name); - goto out; - } - vbasedev->dirty_tracking = true; - } - -out: - if (ret) { - vfio_devices_dma_logging_stop(bcontainer); - } - - vfio_device_feature_dma_logging_start_destroy(feature); - - return ret == 0; -} - -static bool vfio_listener_log_global_start(MemoryListener *listener, - Error **errp) -{ - ERRP_GUARD(); - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - bool ret; - - if (vfio_devices_all_device_dirty_tracking(bcontainer)) { - ret = vfio_devices_dma_logging_start(bcontainer, errp); - } else { - ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp) == 0; - } - - if (!ret) { - error_prepend(errp, "vfio: Could not start dirty page tracking - "); - } - return ret; -} - -static void vfio_listener_log_global_stop(MemoryListener *listener) -{ - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - Error *local_err = NULL; - int ret = 0; - - if (vfio_devices_all_device_dirty_tracking(bcontainer)) { - vfio_devices_dma_logging_stop(bcontainer); - } else { - ret = vfio_container_set_dirty_page_tracking(bcontainer, false, - &local_err); - } - - if (ret) { - error_prepend(&local_err, - "vfio: Could not stop dirty page tracking - "); - error_report_err(local_err); - vfio_set_migration_error(ret); - } -} - -static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, - hwaddr size, void *bitmap) -{ - uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + - sizeof(struct vfio_device_feature_dma_logging_report), - sizeof(uint64_t))] = {}; - struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; - struct vfio_device_feature_dma_logging_report *report = - (struct vfio_device_feature_dma_logging_report *)feature->data; - - report->iova = iova; - report->length = size; - report->page_size = qemu_real_host_page_size(); - report->bitmap = (uintptr_t)bitmap; - - feature->argsz = sizeof(buf); - feature->flags = VFIO_DEVICE_FEATURE_GET | - VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT; - - if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) { - return -errno; - } - - return 0; -} - -int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) -{ - VFIODevice *vbasedev; - int ret; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - ret = vfio_device_dma_logging_report(vbasedev, iova, size, - vbmap->bitmap); - if (ret) { - error_setg_errno(errp, -ret, - "%s: Failed to get DMA logging report, iova: " - "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx, - vbasedev->name, iova, size); - - return ret; - } - } - - return 0; -} - -int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, - uint64_t size, ram_addr_t ram_addr, Error **errp) -{ - bool all_device_dirty_tracking = - vfio_devices_all_device_dirty_tracking(bcontainer); - uint64_t dirty_pages; - VFIOBitmap vbmap; - int ret; - - if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { - cpu_physical_memory_set_dirty_range(ram_addr, size, - tcg_enabled() ? DIRTY_CLIENTS_ALL : - DIRTY_CLIENTS_NOCODE); - return 0; - } - - ret = vfio_bitmap_alloc(&vbmap, size); - if (ret) { - error_setg_errno(errp, -ret, - "Failed to allocate dirty tracking bitmap"); - return ret; - } - - if (all_device_dirty_tracking) { - ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size, - errp); - } else { - ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size, - errp); - } - - if (ret) { - goto out; - } - - dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, - vbmap.pages); - - trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages); -out: - g_free(vbmap.bitmap); - - return ret; -} - -typedef struct { - IOMMUNotifier n; - VFIOGuestIOMMU *giommu; -} vfio_giommu_dirty_notifier; - -static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) -{ - vfio_giommu_dirty_notifier *gdn = container_of(n, - vfio_giommu_dirty_notifier, n); - VFIOGuestIOMMU *giommu = gdn->giommu; - VFIOContainerBase *bcontainer = giommu->bcontainer; - hwaddr iova = iotlb->iova + giommu->iommu_offset; - ram_addr_t translated_addr; - Error *local_err = NULL; - int ret = -EINVAL; - - trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); - - if (iotlb->target_as != &address_space_memory) { - error_report("Wrong target AS \"%s\", only system memory is allowed", - iotlb->target_as->name ? iotlb->target_as->name : "none"); - goto out; - } - - rcu_read_lock(); - if (!vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL, &local_err)) { - error_report_err(local_err); - goto out_unlock; - } - - ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1, - translated_addr, &local_err); - if (ret) { - error_prepend(&local_err, - "vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") failed - ", bcontainer, iova, - iotlb->addr_mask + 1); - error_report_err(local_err); - } - -out_unlock: - rcu_read_unlock(); - -out: - if (ret) { - vfio_set_migration_error(ret); - } -} - -static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, - void *opaque) -{ - const hwaddr size = int128_get64(section->size); - const hwaddr iova = section->offset_within_address_space; - const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) + - section->offset_within_region; - VFIORamDiscardListener *vrdl = opaque; - Error *local_err = NULL; - int ret; - - /* - * Sync the whole mapped region (spanning multiple individual mappings) - * in one go. - */ - ret = vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr, - &local_err); - if (ret) { - error_report_err(local_err); - } - return ret; -} - -static int -vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) -{ - RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); - VFIORamDiscardListener *vrdl = NULL; - - QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { - if (vrdl->mr == section->mr && - vrdl->offset_within_address_space == - section->offset_within_address_space) { - break; - } - } - - if (!vrdl) { - hw_error("vfio: Trying to sync missing RAM discard listener"); - } - - /* - * We only want/can synchronize the bitmap for actually mapped parts - - * which correspond to populated parts. Replay all populated parts. - */ - return ram_discard_manager_replay_populated(rdm, section, - vfio_ram_discard_get_dirty_bitmap, - &vrdl); -} - -static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) -{ - VFIOGuestIOMMU *giommu; - bool found = false; - Int128 llend; - vfio_giommu_dirty_notifier gdn; - int idx; - - QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { - if (MEMORY_REGION(giommu->iommu_mr) == section->mr && - giommu->n.start == section->offset_within_region) { - found = true; - break; - } - } - - if (!found) { - return 0; - } - - gdn.giommu = giommu; - idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr, - MEMTXATTRS_UNSPECIFIED); - - llend = int128_add(int128_make64(section->offset_within_region), - section->size); - llend = int128_sub(llend, int128_one()); - - iommu_notifier_init(&gdn.n, vfio_iommu_map_dirty_notify, IOMMU_NOTIFIER_MAP, - section->offset_within_region, int128_get64(llend), - idx); - memory_region_iommu_replay(giommu->iommu_mr, &gdn.n); - - return 0; -} - -static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, Error **errp) -{ - ram_addr_t ram_addr; - - if (memory_region_is_iommu(section->mr)) { - return vfio_sync_iommu_dirty_bitmap(bcontainer, section); - } else if (memory_region_has_ram_discard_manager(section->mr)) { - int ret; - - ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section); - if (ret) { - error_setg(errp, - "Failed to sync dirty bitmap with RAM discard listener"); - } - return ret; - } - - ram_addr = memory_region_get_ram_addr(section->mr) + - section->offset_within_region; - - return vfio_get_dirty_bitmap(bcontainer, - REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), - int128_get64(section->size), ram_addr, errp); -} - -static void vfio_listener_log_sync(MemoryListener *listener, - MemoryRegionSection *section) -{ - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - int ret; - Error *local_err = NULL; - - if (vfio_listener_skipped_section(section)) { - return; - } - - if (vfio_devices_all_dirty_tracking(bcontainer)) { - ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err); - if (ret) { - error_report_err(local_err); - vfio_set_migration_error(ret); - } - } -} - -const MemoryListener vfio_memory_listener = { - .name = "vfio", - .region_add = vfio_listener_region_add, - .region_del = vfio_listener_region_del, - .log_global_start = vfio_listener_log_global_start, - .log_global_stop = vfio_listener_log_global_stop, - .log_sync = vfio_listener_log_sync, -}; - -void vfio_reset_handler(void *opaque) -{ - VFIODevice *vbasedev; - - QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { - if (vbasedev->dev->realized) { - vbasedev->ops->vfio_compute_needs_reset(vbasedev); - } - } - - QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { - if (vbasedev->dev->realized && vbasedev->needs_reset) { - vbasedev->ops->vfio_hot_reset_multi(vbasedev); - } - } -} - -int vfio_kvm_device_add_fd(int fd, Error **errp) -{ -#ifdef CONFIG_KVM - struct kvm_device_attr attr = { - .group = KVM_DEV_VFIO_FILE, - .attr = KVM_DEV_VFIO_FILE_ADD, - .addr = (uint64_t)(unsigned long)&fd, - }; - - if (!kvm_enabled()) { - return 0; - } - - if (vfio_kvm_device_fd < 0) { - struct kvm_create_device cd = { - .type = KVM_DEV_TYPE_VFIO, - }; - - if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { - error_setg_errno(errp, errno, "Failed to create KVM VFIO device"); - return -errno; - } - - vfio_kvm_device_fd = cd.fd; - } - - if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { - error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device", - fd); - return -errno; - } -#endif - return 0; -} - -int vfio_kvm_device_del_fd(int fd, Error **errp) -{ -#ifdef CONFIG_KVM - struct kvm_device_attr attr = { - .group = KVM_DEV_VFIO_FILE, - .attr = KVM_DEV_VFIO_FILE_DEL, - .addr = (uint64_t)(unsigned long)&fd, - }; - - if (vfio_kvm_device_fd < 0) { - error_setg(errp, "KVM VFIO device isn't created yet"); - return -EINVAL; - } - - if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { - error_setg_errno(errp, errno, - "Failed to remove fd %d from KVM VFIO device", fd); - return -errno; - } -#endif - return 0; -} - -VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) -{ - VFIOAddressSpace *space; - - QLIST_FOREACH(space, &vfio_address_spaces, list) { - if (space->as == as) { - return space; - } - } - - /* No suitable VFIOAddressSpace, create a new one */ - space = g_malloc0(sizeof(*space)); - space->as = as; - QLIST_INIT(&space->containers); - - if (QLIST_EMPTY(&vfio_address_spaces)) { - qemu_register_reset(vfio_reset_handler, NULL); - } - - QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); - - return space; -} - -void vfio_put_address_space(VFIOAddressSpace *space) -{ - if (!QLIST_EMPTY(&space->containers)) { - return; - } - - QLIST_REMOVE(space, list); - g_free(space); - - if (QLIST_EMPTY(&vfio_address_spaces)) { - qemu_unregister_reset(vfio_reset_handler, NULL); - } -} - -void vfio_address_space_insert(VFIOAddressSpace *space, - VFIOContainerBase *bcontainer) -{ - QLIST_INSERT_HEAD(&space->containers, bcontainer, next); - bcontainer->space = space; -} - -struct vfio_device_info *vfio_get_device_info(int fd) -{ - struct vfio_device_info *info; - uint32_t argsz = sizeof(*info); - - info = g_malloc0(argsz); - -retry: - info->argsz = argsz; - - if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) { - g_free(info); - return NULL; - } - - if (info->argsz > argsz) { - argsz = info->argsz; - info = g_realloc(info, argsz); - goto retry; - } - - return info; -} - -bool vfio_attach_device(char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp) -{ - const VFIOIOMMUClass *ops = - VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY)); - HostIOMMUDevice *hiod = NULL; - - if (vbasedev->iommufd) { - ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); - } - - assert(ops); - - - if (!vbasedev->mdev) { - hiod = HOST_IOMMU_DEVICE(object_new(ops->hiod_typename)); - vbasedev->hiod = hiod; - } - - if (!ops->attach_device(name, vbasedev, as, errp)) { - object_unref(hiod); - vbasedev->hiod = NULL; - return false; - } - - return true; -} - -void vfio_detach_device(VFIODevice *vbasedev) -{ - if (!vbasedev->bcontainer) { - return; - } - object_unref(vbasedev->hiod); - VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev); -} diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c index 809b1576742..708ac4d95e5 100644 --- a/hw/vfio/container-base.c +++ b/hw/vfio/container-base.c @@ -10,29 +10,98 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ +#include +#include + #include "qemu/osdep.h" +#include "system/tcg.h" +#include "system/ram_addr.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-device.h" /* vfio_device_reset_handler */ +#include "system/reset.h" +#include "vfio-helpers.h" + +#include "trace.h" + +static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = + QLIST_HEAD_INITIALIZER(vfio_address_spaces); + +VFIOAddressSpace *vfio_address_space_get(AddressSpace *as) +{ + VFIOAddressSpace *space; + + QLIST_FOREACH(space, &vfio_address_spaces, list) { + if (space->as == as) { + return space; + } + } + + /* No suitable VFIOAddressSpace, create a new one */ + space = g_malloc0(sizeof(*space)); + space->as = as; + QLIST_INIT(&space->containers); + + if (QLIST_EMPTY(&vfio_address_spaces)) { + qemu_register_reset(vfio_device_reset_handler, NULL); + } + + QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); + + return space; +} + +void vfio_address_space_put(VFIOAddressSpace *space) +{ + if (!QLIST_EMPTY(&space->containers)) { + return; + } + + QLIST_REMOVE(space, list); + g_free(space); + + if (QLIST_EMPTY(&vfio_address_spaces)) { + qemu_unregister_reset(vfio_device_reset_handler, NULL); + } +} + +void vfio_address_space_insert(VFIOAddressSpace *space, + VFIOContainerBase *bcontainer) +{ + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); + bcontainer->space = space; +} int vfio_container_dma_map(VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly) + void *vaddr, bool readonly, MemoryRegion *mr) { VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + RAMBlock *rb = mr->ram_block; + int mfd = rb ? qemu_ram_get_fd(rb) : -1; + if (mfd >= 0 && vioc->dma_map_file) { + unsigned long start = vaddr - qemu_ram_get_host_addr(rb); + unsigned long offset = qemu_ram_get_fd_offset(rb); + + if (!vioc->dma_map_file(bcontainer, iova, size, mfd, start + offset, + readonly)) { + return 0; + } + } g_assert(vioc->dma_map); - return vioc->dma_map(bcontainer, iova, size, vaddr, readonly); + return vioc->dma_map(bcontainer, iova, size, vaddr, readonly, mr); } int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb) + IOMMUTLBEntry *iotlb, bool unmap_all) { VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); g_assert(vioc->dma_unmap); - return vioc->dma_unmap(bcontainer, iova, size, iotlb); + return vioc->dma_unmap(bcontainer, iova, size, iotlb, unmap_all); } bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, @@ -64,16 +133,86 @@ int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, bool start, Error **errp) { VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + int ret; if (!bcontainer->dirty_pages_supported) { return 0; } g_assert(vioc->set_dirty_page_tracking); - return vioc->set_dirty_page_tracking(bcontainer, start, errp); + if (bcontainer->dirty_pages_started == start) { + return 0; + } + + ret = vioc->set_dirty_page_tracking(bcontainer, start, errp); + if (!ret) { + bcontainer->dirty_pages_started = start; + } + + return ret; +} + +static bool vfio_container_devices_dirty_tracking_is_started( + const VFIOContainerBase *bcontainer) +{ + VFIODevice *vbasedev; + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (!vbasedev->dirty_tracking) { + return false; + } + } + + return true; } -int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, +bool vfio_container_dirty_tracking_is_started( + const VFIOContainerBase *bcontainer) +{ + return vfio_container_devices_dirty_tracking_is_started(bcontainer) || + bcontainer->dirty_pages_started; +} + +bool vfio_container_devices_dirty_tracking_is_supported( + const VFIOContainerBase *bcontainer) +{ + VFIODevice *vbasedev; + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) { + return false; + } + if (!vbasedev->dirty_pages_supported) { + return false; + } + } + + return true; +} + +static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, + hwaddr size, void *bitmap) +{ + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + + sizeof(struct vfio_device_feature_dma_logging_report), + sizeof(uint64_t))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + struct vfio_device_feature_dma_logging_report *report = + (struct vfio_device_feature_dma_logging_report *)feature->data; + + report->iova = iova; + report->length = size; + report->page_size = qemu_real_host_page_size(); + report->bitmap = (uintptr_t)bitmap; + + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_GET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT; + + return vbasedev->io_ops->device_feature(vbasedev, feature); +} + +static int vfio_container_iommu_query_dirty_bitmap(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) { VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); @@ -83,6 +222,74 @@ int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, errp); } +static int vfio_container_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) +{ + VFIODevice *vbasedev; + int ret; + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + ret = vfio_device_dma_logging_report(vbasedev, iova, size, + vbmap->bitmap); + if (ret) { + error_setg_errno(errp, -ret, + "%s: Failed to get DMA logging report, iova: " + "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx, + vbasedev->name, iova, size); + + return ret; + } + } + + return 0; +} + +int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, + uint64_t size, ram_addr_t ram_addr, Error **errp) +{ + bool all_device_dirty_tracking = + vfio_container_devices_dirty_tracking_is_supported(bcontainer); + uint64_t dirty_pages; + VFIOBitmap vbmap; + int ret; + + if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { + cpu_physical_memory_set_dirty_range(ram_addr, size, + tcg_enabled() ? DIRTY_CLIENTS_ALL : + DIRTY_CLIENTS_NOCODE); + return 0; + } + + ret = vfio_bitmap_alloc(&vbmap, size); + if (ret) { + error_setg_errno(errp, -ret, + "Failed to allocate dirty tracking bitmap"); + return ret; + } + + if (all_device_dirty_tracking) { + ret = vfio_container_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size, + errp); + } else { + ret = vfio_container_iommu_query_dirty_bitmap(bcontainer, &vbmap, iova, size, + errp); + } + + if (ret) { + goto out; + } + + dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, + vbmap.pages); + + trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr, + dirty_pages); +out: + g_free(vbmap.bitmap); + + return ret; +} + static gpointer copy_iova_range(gconstpointer src, gpointer data) { Range *source = (Range *)src; @@ -103,7 +310,7 @@ static void vfio_container_instance_finalize(Object *obj) VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); VFIOGuestIOMMU *giommu, *tmp; - QLIST_REMOVE(bcontainer, next); + QLIST_SAFE_REMOVE(bcontainer, next); QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { memory_region_unregister_iommu_notifier( diff --git a/hw/vfio/container.c b/hw/vfio/container.c index 9ccdb639ac8..3e13feaa74c 100644 --- a/hw/vfio/container.c +++ b/hw/vfio/container.c @@ -22,18 +22,26 @@ #include #include -#include "hw/vfio/vfio-common.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" -#include "exec/ram_addr.h" +#include "hw/vfio/vfio-device.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "system/ram_addr.h" #include "qemu/error-report.h" #include "qemu/range.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "trace.h" #include "qapi/error.h" +#include "migration/cpr.h" +#include "migration/blocker.h" #include "pci.h" +#include "hw/vfio/vfio-container.h" +#include "vfio-helpers.h" +#include "vfio-listener.h" -VFIOGroupList vfio_group_list = +#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio" + +typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; +static VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) @@ -112,12 +120,9 @@ static int vfio_dma_unmap_bitmap(const VFIOContainer *container, return ret; } -/* - * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 - */ -static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb) +static int vfio_legacy_dma_unmap_one(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) { const VFIOContainer *container = container_of(bcontainer, VFIOContainer, bcontainer); @@ -131,8 +136,10 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, int ret; Error *local_err = NULL; - if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) { - if (!vfio_devices_all_device_dirty_tracking(bcontainer) && + g_assert(!cpr_is_incoming()); + + if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) { + if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) && bcontainer->dirty_pages_supported) { return vfio_dma_unmap_bitmap(container, iova, size, iotlb); } @@ -159,12 +166,11 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); continue; } - error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); return -errno; } if (need_dirty_sync) { - ret = vfio_get_dirty_bitmap(bcontainer, iova, size, + ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size, iotlb->translated_addr, &local_err); if (ret) { error_report_err(local_err); @@ -175,8 +181,37 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, return 0; } +/* + * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 + */ +static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb, bool unmap_all) +{ + int ret; + + if (unmap_all) { + /* The unmap ioctl doesn't accept a full 64-bit span. */ + Int128 llsize = int128_rshift(int128_2_64(), 1); + + ret = vfio_legacy_dma_unmap_one(bcontainer, 0, int128_get64(llsize), + iotlb); + + if (ret == 0) { + ret = vfio_legacy_dma_unmap_one(bcontainer, int128_get64(llsize), + int128_get64(llsize), iotlb); + } + + } else { + ret = vfio_legacy_dma_unmap_one(bcontainer, iova, size, iotlb); + } + + return ret; +} + static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly) + ram_addr_t size, void *vaddr, bool readonly, + MemoryRegion *mr) { const VFIOContainer *container = container_of(bcontainer, VFIOContainer, bcontainer); @@ -199,12 +234,11 @@ static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, */ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || (errno == EBUSY && - vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && + vfio_legacy_dma_unmap(bcontainer, iova, size, NULL, false) == 0 && ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { return 0; } - error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); return -errno; } @@ -275,37 +309,6 @@ static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, return ret; } -static struct vfio_info_cap_header * -vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) -{ - if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { - return NULL; - } - - return vfio_get_cap((void *)info, info->cap_offset, id); -} - -bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, - unsigned int *avail) -{ - struct vfio_info_cap_header *hdr; - struct vfio_iommu_type1_info_dma_avail *cap; - - /* If the capability cannot be found, assume no DMA limiting */ - hdr = vfio_get_iommu_type1_info_cap(info, - VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); - if (!hdr) { - return false; - } - - if (avail != NULL) { - cap = (void *) hdr; - *avail = cap->avail; - } - - return true; -} - static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, VFIOContainerBase *bcontainer) { @@ -332,7 +335,7 @@ static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, return true; } -static void vfio_kvm_device_add_group(VFIOGroup *group) +static void vfio_group_add_kvm_device(VFIOGroup *group) { Error *err = NULL; @@ -341,7 +344,7 @@ static void vfio_kvm_device_add_group(VFIOGroup *group) } } -static void vfio_kvm_device_del_group(VFIOGroup *group) +static void vfio_group_del_kvm_device(VFIOGroup *group) { Error *err = NULL; @@ -426,7 +429,12 @@ static VFIOContainer *vfio_create_container(int fd, VFIOGroup *group, return NULL; } - if (!vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { + /* + * During CPR, just set the container type and skip the ioctls, as the + * container and group are already configured in the kernel. + */ + if (!cpr_is_incoming() && + !vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { return NULL; } @@ -537,16 +545,10 @@ static bool vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) return true; } -static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as, - Error **errp) +static bool vfio_container_attach_discard_disable(VFIOContainer *container, + VFIOGroup *group, Error **errp) { - VFIOContainer *container; - VFIOContainerBase *bcontainer; - int ret, fd; - VFIOAddressSpace *space; - VFIOIOMMUClass *vioc; - - space = vfio_get_address_space(as); + int ret; /* * VFIO is currently incompatible with discarding of RAM insofar as the @@ -579,108 +581,158 @@ static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as, * details once we know which type of IOMMU we are using. */ - QLIST_FOREACH(bcontainer, &space->containers, next) { - container = container_of(bcontainer, VFIOContainer, bcontainer); - if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { - ret = vfio_ram_block_discard_disable(container, true); - if (ret) { - error_setg_errno(errp, -ret, - "Cannot set discarding of RAM broken"); - if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, - &container->fd)) { - error_report("vfio: error disconnecting group %d from" - " container", group->groupid); - } - return false; - } - group->container = container; - QLIST_INSERT_HEAD(&container->group_list, group, container_next); - vfio_kvm_device_add_group(group); - return true; + ret = vfio_ram_block_discard_disable(container, true); + if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); + if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { + error_report("vfio: error disconnecting group %d from" + " container", group->groupid); } } + return !ret; +} - fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp); - if (fd < 0) { - goto put_space_exit; +static bool vfio_container_group_add(VFIOContainer *container, VFIOGroup *group, + Error **errp) +{ + if (!vfio_container_attach_discard_disable(container, group, errp)) { + return false; + } + group->container = container; + QLIST_INSERT_HEAD(&container->group_list, group, container_next); + vfio_group_add_kvm_device(group); + /* + * Remember the container fd for each group, so we can attach to the same + * container after CPR. + */ + cpr_resave_fd("vfio_container_for_group", group->groupid, container->fd); + return true; +} + +static void vfio_container_group_del(VFIOContainer *container, VFIOGroup *group) +{ + QLIST_REMOVE(group, container_next); + group->container = NULL; + vfio_group_del_kvm_device(group); + vfio_ram_block_discard_disable(container, false); + cpr_delete_fd("vfio_container_for_group", group->groupid); +} + +static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as, + Error **errp) +{ + VFIOContainer *container; + VFIOContainerBase *bcontainer; + int ret, fd = -1; + VFIOAddressSpace *space; + VFIOIOMMUClass *vioc = NULL; + bool new_container = false; + bool group_was_added = false; + + space = vfio_address_space_get(as); + fd = cpr_find_fd("vfio_container_for_group", group->groupid); + + if (!cpr_is_incoming()) { + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOContainer, bcontainer); + if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { + return vfio_container_group_add(container, group, errp); + } + } + + fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp); + if (fd < 0) { + goto fail; + } + } else { + /* + * For incoming CPR, the group is already attached in the kernel. + * If a container with matching fd is found, then update the + * userland group list and return. If not, then after the loop, + * create the container struct and group list. + */ + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOContainer, bcontainer); + + if (vfio_cpr_container_match(container, group, fd)) { + return vfio_container_group_add(container, group, errp); + } + } } ret = ioctl(fd, VFIO_GET_API_VERSION); if (ret != VFIO_API_VERSION) { error_setg(errp, "supported vfio version: %d, " "reported version: %d", VFIO_API_VERSION, ret); - goto close_fd_exit; + goto fail; } container = vfio_create_container(fd, group, errp); if (!container) { - goto close_fd_exit; + goto fail; } + new_container = true; bcontainer = &container->bcontainer; - if (!vfio_cpr_register_container(bcontainer, errp)) { - goto free_container_exit; - } - - ret = vfio_ram_block_discard_disable(container, true); - if (ret) { - error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); - goto unregister_container_exit; + if (!vfio_legacy_cpr_register_container(container, errp)) { + goto fail; } vioc = VFIO_IOMMU_GET_CLASS(bcontainer); assert(vioc->setup); if (!vioc->setup(bcontainer, errp)) { - goto enable_discards_exit; + goto fail; } - vfio_kvm_device_add_group(group); - vfio_address_space_insert(space, bcontainer); - group->container = container; - QLIST_INSERT_HEAD(&container->group_list, group, container_next); - - bcontainer->listener = vfio_memory_listener; - memory_listener_register(&bcontainer->listener, bcontainer->space->as); + if (!vfio_container_group_add(container, group, errp)) { + goto fail; + } + group_was_added = true; - if (bcontainer->error) { - error_propagate_prepend(errp, bcontainer->error, - "memory listener initialization failed: "); - goto listener_release_exit; + /* + * If CPR, register the listener later, after all state that may + * affect regions and mapping boundaries has been cpr load'ed. Later, + * the listener will invoke its callback on each flat section and call + * dma_map to supply the new vaddr, and the calls will match the mappings + * remembered by the kernel. + */ + if (!cpr_is_incoming()) { + if (!vfio_listener_register(bcontainer, errp)) { + goto fail; + } } bcontainer->initialized = true; return true; -listener_release_exit: - QLIST_REMOVE(group, container_next); - vfio_kvm_device_del_group(group); - memory_listener_unregister(&bcontainer->listener); - if (vioc->release) { - vioc->release(bcontainer); - } - -enable_discards_exit: - vfio_ram_block_discard_disable(container, false); - -unregister_container_exit: - vfio_cpr_unregister_container(bcontainer); - -free_container_exit: - object_unref(container); -close_fd_exit: - close(fd); +fail: + if (new_container) { + vfio_listener_unregister(bcontainer); + } -put_space_exit: - vfio_put_address_space(space); + if (group_was_added) { + vfio_container_group_del(container, group); + } + if (vioc && vioc->release) { + vioc->release(bcontainer); + } + if (new_container) { + vfio_legacy_cpr_unregister_container(container); + object_unref(container); + } + if (fd >= 0) { + close(fd); + } + vfio_address_space_put(space); return false; } -static void vfio_disconnect_container(VFIOGroup *group) +static void vfio_container_disconnect(VFIOGroup *group) { VFIOContainer *container = group->container; VFIOContainerBase *bcontainer = &container->bcontainer; @@ -688,6 +740,7 @@ static void vfio_disconnect_container(VFIOGroup *group) QLIST_REMOVE(group, container_next); group->container = NULL; + cpr_delete_fd("vfio_container_for_group", group->groupid); /* * Explicitly release the listener first before unset container, @@ -695,7 +748,7 @@ static void vfio_disconnect_container(VFIOGroup *group) * group. */ if (QLIST_EMPTY(&container->group_list)) { - memory_listener_unregister(&bcontainer->listener); + vfio_listener_unregister(bcontainer); if (vioc->release) { vioc->release(bcontainer); } @@ -709,16 +762,16 @@ static void vfio_disconnect_container(VFIOGroup *group) if (QLIST_EMPTY(&container->group_list)) { VFIOAddressSpace *space = bcontainer->space; - trace_vfio_disconnect_container(container->fd); - vfio_cpr_unregister_container(bcontainer); + trace_vfio_container_disconnect(container->fd); + vfio_legacy_cpr_unregister_container(container); close(container->fd); object_unref(container); - vfio_put_address_space(space); + vfio_address_space_put(space); } } -static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) +static VFIOGroup *vfio_group_get(int groupid, AddressSpace *as, Error **errp) { ERRP_GUARD(); VFIOGroup *group; @@ -741,7 +794,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) group = g_malloc0(sizeof(*group)); snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); - group->fd = qemu_open(path, O_RDWR, errp); + group->fd = cpr_open_fd(path, O_RDWR, "vfio_group", groupid, errp); if (group->fd < 0) { goto free_group_exit; } @@ -762,7 +815,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) group->groupid = groupid; QLIST_INIT(&group->device_list); - if (!vfio_connect_container(group, as, errp)) { + if (!vfio_container_connect(group, as, errp)) { error_prepend(errp, "failed to setup container for group %d: ", groupid); goto close_fd_exit; @@ -773,6 +826,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) return group; close_fd_exit: + cpr_delete_fd("vfio_group", groupid); close(group->fd); free_group_exit: @@ -781,7 +835,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) return NULL; } -static void vfio_put_group(VFIOGroup *group) +static void vfio_group_put(VFIOGroup *group) { if (!group || !QLIST_EMPTY(&group->device_list)) { return; @@ -790,21 +844,22 @@ static void vfio_put_group(VFIOGroup *group) if (!group->ram_block_discard_allowed) { vfio_ram_block_discard_disable(group->container, false); } - vfio_kvm_device_del_group(group); - vfio_disconnect_container(group); + vfio_group_del_kvm_device(group); + vfio_container_disconnect(group); QLIST_REMOVE(group, next); - trace_vfio_put_group(group->fd); + trace_vfio_group_put(group->fd); + cpr_delete_fd("vfio_group", group->groupid); close(group->fd); g_free(group); } -static bool vfio_get_device(VFIOGroup *group, const char *name, +static bool vfio_device_get(VFIOGroup *group, const char *name, VFIODevice *vbasedev, Error **errp) { g_autofree struct vfio_device_info *info = NULL; int fd; - fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); + fd = vfio_cpr_group_get_device_fd(group->fd, name); if (fd < 0) { error_setg_errno(errp, errno, "error getting device from group %d", group->groupid); @@ -817,8 +872,7 @@ static bool vfio_get_device(VFIOGroup *group, const char *name, info = vfio_get_device_info(fd); if (!info) { error_setg_errno(errp, errno, "error getting device info"); - close(fd); - return false; + goto fail; } /* @@ -832,8 +886,7 @@ static bool vfio_get_device(VFIOGroup *group, const char *name, if (!QLIST_EMPTY(&group->device_list)) { error_setg(errp, "Inconsistent setting of support for discarding " "RAM (e.g., balloon) within group"); - close(fd); - return false; + goto fail; } if (!group->ram_block_discard_allowed) { @@ -842,33 +895,35 @@ static bool vfio_get_device(VFIOGroup *group, const char *name, } } + vfio_device_prepare(vbasedev, &group->container->bcontainer, info); + vbasedev->fd = fd; vbasedev->group = group; QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); - vbasedev->num_irqs = info->num_irqs; - vbasedev->num_regions = info->num_regions; - vbasedev->flags = info->flags; - - trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs); - - vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); + trace_vfio_device_get(name, info->flags, info->num_regions, info->num_irqs); return true; + +fail: + close(fd); + cpr_delete_fd(name, 0); + return false; } -static void vfio_put_base_device(VFIODevice *vbasedev) +static void vfio_device_put(VFIODevice *vbasedev) { if (!vbasedev->group) { return; } QLIST_REMOVE(vbasedev, next); vbasedev->group = NULL; - trace_vfio_put_base_device(vbasedev->fd); + trace_vfio_device_put(vbasedev->fd); + cpr_delete_fd(vbasedev->name, 0); close(vbasedev->fd); } -static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) +static int vfio_device_get_groupid(VFIODevice *vbasedev, Error **errp) { char *tmp, group_path[PATH_MAX]; g_autofree char *group_name = NULL; @@ -896,29 +951,24 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) } /* - * vfio_attach_device: attach a device to a security context + * vfio_device_attach: attach a device to a security context * @name and @vbasedev->name are likely to be different depending * on the type of the device, hence the need for passing @name */ static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, AddressSpace *as, Error **errp) { - int groupid = vfio_device_groupid(vbasedev, errp); + int groupid = vfio_device_get_groupid(vbasedev, errp); VFIODevice *vbasedev_iter; VFIOGroup *group; - VFIOContainerBase *bcontainer; if (groupid < 0) { return false; } - trace_vfio_attach_device(vbasedev->name, groupid); + trace_vfio_device_attach(vbasedev->name, groupid); - if (!vfio_device_hiod_realize(vbasedev, errp)) { - return false; - } - - group = vfio_get_group(groupid, as, errp); + group = vfio_group_get(groupid, as, errp); if (!group) { return false; } @@ -926,33 +976,51 @@ static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { error_setg(errp, "device is already attached"); - vfio_put_group(group); - return false; + goto group_put_exit; } } - if (!vfio_get_device(group, name, vbasedev, errp)) { - vfio_put_group(group); - return false; + if (!vfio_device_get(group, name, vbasedev, errp)) { + goto group_put_exit; } - bcontainer = &group->container->bcontainer; - vbasedev->bcontainer = bcontainer; - QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); - QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); + if (!vfio_device_hiod_create_and_realize(vbasedev, + TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, + errp)) { + goto device_put_exit; + } + + if (vbasedev->mdev) { + error_setg(&vbasedev->cpr.mdev_blocker, + "CPR does not support vfio mdev %s", vbasedev->name); + if (migrate_add_blocker_modes(&vbasedev->cpr.mdev_blocker, errp, + MIG_MODE_CPR_TRANSFER, -1) < 0) { + goto hiod_unref_exit; + } + } return true; + +hiod_unref_exit: + object_unref(vbasedev->hiod); +device_put_exit: + vfio_device_put(vbasedev); +group_put_exit: + vfio_group_put(group); + return false; } static void vfio_legacy_detach_device(VFIODevice *vbasedev) { VFIOGroup *group = vbasedev->group; - QLIST_REMOVE(vbasedev, global_next); - QLIST_REMOVE(vbasedev, container_next); - vbasedev->bcontainer = NULL; - trace_vfio_detach_device(vbasedev->name, group->groupid); - vfio_put_base_device(vbasedev); - vfio_put_group(group); + trace_vfio_device_detach(vbasedev->name, group->groupid); + + vfio_device_unprepare(vbasedev); + + migrate_del_blocker(&vbasedev->cpr.mdev_blocker); + object_unref(vbasedev->hiod); + vfio_device_put(vbasedev); + vfio_group_put(group); } static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) @@ -1123,12 +1191,10 @@ static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) return ret; } -static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data) +static void vfio_iommu_legacy_class_init(ObjectClass *klass, const void *data) { VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); - vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO; - vioc->setup = vfio_legacy_setup; vioc->dma_map = vfio_legacy_dma_map; vioc->dma_unmap = vfio_legacy_dma_unmap; @@ -1187,7 +1253,7 @@ static void vfio_iommu_legacy_instance_init(Object *obj) QLIST_INIT(&container->group_list); } -static void hiod_legacy_vfio_class_init(ObjectClass *oc, void *data) +static void hiod_legacy_vfio_class_init(ObjectClass *oc, const void *data) { HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc); diff --git a/hw/vfio/cpr-iommufd.c b/hw/vfio/cpr-iommufd.c new file mode 100644 index 00000000000..148a06d552f --- /dev/null +++ b/hw/vfio/cpr-iommufd.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2024-2025 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/vfio/vfio-cpr.h" +#include "hw/vfio/vfio-device.h" +#include "migration/blocker.h" +#include "migration/cpr.h" +#include "migration/migration.h" +#include "migration/vmstate.h" +#include "system/iommufd.h" +#include "vfio-iommufd.h" +#include "trace.h" + +typedef struct CprVFIODevice { + char *name; + unsigned int namelen; + uint32_t ioas_id; + int devid; + uint32_t hwpt_id; + QLIST_ENTRY(CprVFIODevice) next; +} CprVFIODevice; + +static const VMStateDescription vmstate_cpr_vfio_device = { + .name = "cpr vfio device", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(namelen, CprVFIODevice), + VMSTATE_VBUFFER_ALLOC_UINT32(name, CprVFIODevice, 0, NULL, namelen), + VMSTATE_INT32(devid, CprVFIODevice), + VMSTATE_UINT32(ioas_id, CprVFIODevice), + VMSTATE_UINT32(hwpt_id, CprVFIODevice), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_cpr_vfio_devices = { + .name = CPR_STATE "/vfio devices", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]){ + VMSTATE_QLIST_V(vfio_devices, CprState, 1, vmstate_cpr_vfio_device, + CprVFIODevice, next), + VMSTATE_END_OF_LIST() + } +}; + +static void vfio_cpr_save_device(VFIODevice *vbasedev) +{ + CprVFIODevice *elem = g_new0(CprVFIODevice, 1); + + elem->name = g_strdup(vbasedev->name); + elem->namelen = strlen(vbasedev->name) + 1; + elem->ioas_id = vbasedev->cpr.ioas_id; + elem->devid = vbasedev->devid; + elem->hwpt_id = vbasedev->cpr.hwpt_id; + QLIST_INSERT_HEAD(&cpr_state.vfio_devices, elem, next); +} + +static CprVFIODevice *find_device(const char *name) +{ + CprVFIODeviceList *head = &cpr_state.vfio_devices; + CprVFIODevice *elem; + + QLIST_FOREACH(elem, head, next) { + if (!strcmp(elem->name, name)) { + return elem; + } + } + return NULL; +} + +static void vfio_cpr_delete_device(const char *name) +{ + CprVFIODevice *elem = find_device(name); + + if (elem) { + QLIST_REMOVE(elem, next); + g_free(elem->name); + g_free(elem); + } +} + +static bool vfio_cpr_find_device(VFIODevice *vbasedev) +{ + CprVFIODevice *elem = find_device(vbasedev->name); + + if (elem) { + vbasedev->cpr.ioas_id = elem->ioas_id; + vbasedev->devid = elem->devid; + vbasedev->cpr.hwpt_id = elem->hwpt_id; + trace_vfio_cpr_find_device(elem->ioas_id, elem->devid, elem->hwpt_id); + return true; + } + return false; +} + +static bool vfio_cpr_supported(IOMMUFDBackend *be, Error **errp) +{ + if (!iommufd_change_process_capable(be)) { + if (errp) { + error_setg(errp, "vfio iommufd backend does not support " + "IOMMU_IOAS_CHANGE_PROCESS"); + } + return false; + } + return true; +} + +static int iommufd_cpr_pre_save(void *opaque) +{ + IOMMUFDBackend *be = opaque; + + /* + * The process has not changed yet, but proactively try the ioctl, + * and it will fail if any DMA mappings are not supported. + */ + if (!iommufd_change_process_capable(be)) { + error_report("some memory regions do not support " + "IOMMU_IOAS_CHANGE_PROCESS"); + return -1; + } + return 0; +} + +static int iommufd_cpr_post_load(void *opaque, int version_id) +{ + IOMMUFDBackend *be = opaque; + Error *local_err = NULL; + + if (!iommufd_change_process(be, &local_err)) { + error_report_err(local_err); + return -1; + } + return 0; +} + +static const VMStateDescription iommufd_cpr_vmstate = { + .name = "iommufd", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = iommufd_cpr_pre_save, + .post_load = iommufd_cpr_post_load, + .needed = cpr_incoming_needed, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + } +}; + +bool vfio_iommufd_cpr_register_iommufd(IOMMUFDBackend *be, Error **errp) +{ + Error **cpr_blocker = &be->cpr_blocker; + + if (!vfio_cpr_supported(be, cpr_blocker)) { + return migrate_add_blocker_modes(cpr_blocker, errp, + MIG_MODE_CPR_TRANSFER, -1) == 0; + } + + vmstate_register(NULL, -1, &iommufd_cpr_vmstate, be); + + return true; +} + +void vfio_iommufd_cpr_unregister_iommufd(IOMMUFDBackend *be) +{ + vmstate_unregister(NULL, &iommufd_cpr_vmstate, be); + migrate_del_blocker(&be->cpr_blocker); +} + +bool vfio_iommufd_cpr_register_container(VFIOIOMMUFDContainer *container, + Error **errp) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + + migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, + vfio_cpr_reboot_notifier, + MIG_MODE_CPR_REBOOT); + + vfio_cpr_add_kvm_notifier(); + + return true; +} + +void vfio_iommufd_cpr_unregister_container(VFIOIOMMUFDContainer *container) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + + migration_remove_notifier(&bcontainer->cpr_reboot_notifier); +} + +void vfio_iommufd_cpr_register_device(VFIODevice *vbasedev) +{ + if (!cpr_is_incoming()) { + /* + * Beware fd may have already been saved by vfio_device_set_fd, + * so call resave to avoid a duplicate entry. + */ + cpr_resave_fd(vbasedev->name, 0, vbasedev->fd); + vfio_cpr_save_device(vbasedev); + } +} + +void vfio_iommufd_cpr_unregister_device(VFIODevice *vbasedev) +{ + cpr_delete_fd(vbasedev->name, 0); + vfio_cpr_delete_device(vbasedev->name); +} + +void vfio_cpr_load_device(VFIODevice *vbasedev) +{ + if (cpr_is_incoming()) { + bool ret = vfio_cpr_find_device(vbasedev); + g_assert(ret); + + if (vbasedev->fd < 0) { + vbasedev->fd = cpr_find_fd(vbasedev->name, 0); + } + } +} diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c new file mode 100644 index 00000000000..553b203e9b6 --- /dev/null +++ b/hw/vfio/cpr-legacy.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2021-2025 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include +#include "qemu/osdep.h" +#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-listener.h" +#include "migration/blocker.h" +#include "migration/cpr.h" +#include "migration/migration.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + +static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) +{ + struct vfio_iommu_type1_dma_unmap unmap = { + .argsz = sizeof(unmap), + .flags = VFIO_DMA_UNMAP_FLAG_VADDR | VFIO_DMA_UNMAP_FLAG_ALL, + .iova = 0, + .size = 0, + }; + if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { + error_setg_errno(errp, errno, "vfio_dma_unmap_vaddr_all"); + return false; + } + container->cpr.vaddr_unmapped = true; + return true; +} + +/* + * Set the new @vaddr for any mappings registered during cpr load. + * The incoming state is cleared thereafter. + */ +static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, void *vaddr, + bool readonly, MemoryRegion *mr) +{ + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + struct vfio_iommu_type1_dma_map map = { + .argsz = sizeof(map), + .flags = VFIO_DMA_MAP_FLAG_VADDR, + .vaddr = (__u64)(uintptr_t)vaddr, + .iova = iova, + .size = size, + }; + + g_assert(cpr_is_incoming()); + + if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) { + return -errno; + } + + return 0; +} + +static void vfio_region_remap(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, + cpr.remap_listener); + vfio_container_region_add(&container->bcontainer, section, true); +} + +static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) +{ + if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) { + error_setg(errp, "VFIO container does not support VFIO_UPDATE_VADDR"); + return false; + + } else if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UNMAP_ALL)) { + error_setg(errp, "VFIO container does not support VFIO_UNMAP_ALL"); + return false; + + } else { + return true; + } +} + +static int vfio_container_pre_save(void *opaque) +{ + VFIOContainer *container = opaque; + Error *local_err = NULL; + + if (!vfio_dma_unmap_vaddr_all(container, &local_err)) { + error_report_err(local_err); + return -1; + } + return 0; +} + +static int vfio_container_post_load(void *opaque, int version_id) +{ + VFIOContainer *container = opaque; + VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + dma_map_fn saved_dma_map = vioc->dma_map; + Error *local_err = NULL; + + /* During incoming CPR, divert calls to dma_map. */ + vioc->dma_map = vfio_legacy_cpr_dma_map; + + if (!vfio_listener_register(bcontainer, &local_err)) { + error_report_err(local_err); + return -1; + } + + /* Restore original dma_map function */ + vioc->dma_map = saved_dma_map; + + return 0; +} + +static const VMStateDescription vfio_container_vmstate = { + .name = "vfio-container", + .version_id = 0, + .minimum_version_id = 0, + .priority = MIG_PRI_LOW, /* Must happen after devices and groups */ + .pre_save = vfio_container_pre_save, + .post_load = vfio_container_post_load, + .needed = cpr_incoming_needed, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + } +}; + +static int vfio_cpr_fail_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) +{ + VFIOContainer *container = + container_of(notifier, VFIOContainer, cpr.transfer_notifier); + VFIOContainerBase *bcontainer = &container->bcontainer; + + if (e->type != MIG_EVENT_PRECOPY_FAILED) { + return 0; + } + + if (container->cpr.vaddr_unmapped) { + /* + * Force a call to vfio_region_remap for each mapped section by + * temporarily registering a listener, and temporarily diverting + * dma_map to vfio_legacy_cpr_dma_map. The latter restores vaddr. + */ + + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + dma_map_fn saved_dma_map = vioc->dma_map; + vioc->dma_map = vfio_legacy_cpr_dma_map; + + container->cpr.remap_listener = (MemoryListener) { + .name = "vfio cpr recover", + .region_add = vfio_region_remap + }; + memory_listener_register(&container->cpr.remap_listener, + bcontainer->space->as); + memory_listener_unregister(&container->cpr.remap_listener); + container->cpr.vaddr_unmapped = false; + vioc->dma_map = saved_dma_map; + } + return 0; +} + +bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + Error **cpr_blocker = &container->cpr.blocker; + + migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, + vfio_cpr_reboot_notifier, + MIG_MODE_CPR_REBOOT); + + if (!vfio_cpr_supported(container, cpr_blocker)) { + return migrate_add_blocker_modes(cpr_blocker, errp, + MIG_MODE_CPR_TRANSFER, -1) == 0; + } + + vfio_cpr_add_kvm_notifier(); + + vmstate_register(NULL, -1, &vfio_container_vmstate, container); + + migration_add_notifier_mode(&container->cpr.transfer_notifier, + vfio_cpr_fail_notifier, + MIG_MODE_CPR_TRANSFER); + return true; +} + +void vfio_legacy_cpr_unregister_container(VFIOContainer *container) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + + migration_remove_notifier(&bcontainer->cpr_reboot_notifier); + migrate_del_blocker(&container->cpr.blocker); + vmstate_unregister(NULL, &vfio_container_vmstate, container); + migration_remove_notifier(&container->cpr.transfer_notifier); +} + +/* + * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after + * succeeding for others, so the latter have lost their vaddr. Call this + * to restore vaddr for a section with a giommu. + * + * The giommu already exists. Find it and replay it, which calls + * vfio_legacy_cpr_dma_map further down the stack. + */ +void vfio_cpr_giommu_remap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + VFIOGuestIOMMU *giommu = NULL; + hwaddr as_offset = section->offset_within_address_space; + hwaddr iommu_offset = as_offset - section->offset_within_region; + + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { + if (giommu->iommu_mr == IOMMU_MEMORY_REGION(section->mr) && + giommu->iommu_offset == iommu_offset) { + break; + } + } + g_assert(giommu); + memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); +} + +/* + * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after + * succeeding for others, so the latter have lost their vaddr. Call this + * to restore vaddr for a section with a RamDiscardManager. + * + * The ram discard listener already exists. Call its populate function + * directly, which calls vfio_legacy_cpr_dma_map. + */ +bool vfio_cpr_ram_discard_register_listener(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = + vfio_find_ram_discard_listener(bcontainer, section); + + g_assert(vrdl); + return vrdl->listener.notify_populate(&vrdl->listener, section) == 0; +} + +int vfio_cpr_group_get_device_fd(int d, const char *name) +{ + const int id = 0; + int fd = cpr_find_fd(name, id); + + if (fd < 0) { + fd = ioctl(d, VFIO_GROUP_GET_DEVICE_FD, name); + if (fd >= 0) { + cpr_save_fd(name, id, fd); + } + } + return fd; +} + +static bool same_device(int fd1, int fd2) +{ + struct stat st1, st2; + + return !fstat(fd1, &st1) && !fstat(fd2, &st2) && st1.st_dev == st2.st_dev; +} + +bool vfio_cpr_container_match(VFIOContainer *container, VFIOGroup *group, + int fd) +{ + if (container->fd == fd) { + return true; + } + if (!same_device(container->fd, fd)) { + return false; + } + /* + * Same device, different fd. This occurs when the container fd is + * cpr_save'd multiple times, once for each groupid, so SCM_RIGHTS + * produces duplicates. De-dup it. + */ + cpr_delete_fd("vfio_container_for_group", group->groupid); + close(fd); + cpr_save_fd("vfio_container_for_group", group->groupid, container->fd); + return true; +} diff --git a/hw/vfio/cpr.c b/hw/vfio/cpr.c index 87e51fcee17..a831243e020 100644 --- a/hw/vfio/cpr.c +++ b/hw/vfio/cpr.c @@ -6,13 +6,17 @@ */ #include "qemu/osdep.h" -#include "hw/vfio/vfio-common.h" -#include "migration/misc.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-cpr.h" +#include "hw/vfio/pci.h" +#include "hw/pci/msix.h" +#include "hw/pci/msi.h" +#include "migration/cpr.h" #include "qapi/error.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" -static int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, - MigrationEvent *e, Error **errp) +int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) { if (e->type == MIG_EVENT_PRECOPY_SETUP && !runstate_check(RUN_STATE_SUSPENDED) && !vm_get_suspended()) { @@ -25,15 +29,265 @@ static int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, return 0; } -bool vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp) +#define STRDUP_VECTOR_FD_NAME(vdev, name) \ + g_strdup_printf("%s_%s", (vdev)->vbasedev.name, (name)) + +void vfio_cpr_save_vector_fd(VFIOPCIDevice *vdev, const char *name, int nr, + int fd) +{ + g_autofree char *fdname = STRDUP_VECTOR_FD_NAME(vdev, name); + cpr_save_fd(fdname, nr, fd); +} + +int vfio_cpr_load_vector_fd(VFIOPCIDevice *vdev, const char *name, int nr) +{ + g_autofree char *fdname = STRDUP_VECTOR_FD_NAME(vdev, name); + return cpr_find_fd(fdname, nr); +} + +void vfio_cpr_delete_vector_fd(VFIOPCIDevice *vdev, const char *name, int nr) +{ + g_autofree char *fdname = STRDUP_VECTOR_FD_NAME(vdev, name); + cpr_delete_fd(fdname, nr); +} + +static void vfio_cpr_claim_vectors(VFIOPCIDevice *vdev, int nr_vectors, + bool msix) +{ + int i, fd; + bool pending = false; + PCIDevice *pdev = &vdev->pdev; + + vdev->nr_vectors = nr_vectors; + vdev->msi_vectors = g_new0(VFIOMSIVector, nr_vectors); + vdev->interrupt = msix ? VFIO_INT_MSIX : VFIO_INT_MSI; + + vfio_pci_prepare_kvm_msi_virq_batch(vdev); + + for (i = 0; i < nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + + fd = vfio_cpr_load_vector_fd(vdev, "interrupt", i); + if (fd >= 0) { + vfio_pci_vector_init(vdev, i); + vfio_pci_msi_set_handler(vdev, i, true); + } + + if (vfio_cpr_load_vector_fd(vdev, "kvm_interrupt", i) >= 0) { + vfio_pci_add_kvm_msi_virq(vdev, vector, i, msix); + } else { + vdev->msi_vectors[i].virq = -1; + } + + if (msix && msix_is_pending(pdev, i) && msix_is_masked(pdev, i)) { + set_bit(i, vdev->msix->pending); + pending = true; + } + } + + vfio_pci_commit_kvm_msi_virq_batch(vdev); + + if (msix) { + memory_region_set_enabled(&pdev->msix_pba_mmio, pending); + } +} + +/* + * The kernel may change non-emulated config bits. Exclude them from the + * changed-bits check in get_pci_config_device. + */ +static int vfio_cpr_pci_pre_load(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + PCIDevice *pdev = &vdev->pdev; + int size = MIN(pci_config_size(pdev), vdev->config_size); + int i; + + for (i = 0; i < size; i++) { + pdev->cmask[i] &= vdev->emulated_config_bits[i]; + } + + return 0; +} + +static int vfio_cpr_pci_post_load(void *opaque, int version_id) +{ + VFIOPCIDevice *vdev = opaque; + PCIDevice *pdev = &vdev->pdev; + int nr_vectors; + + vfio_sub_page_bar_update_mappings(vdev); + + if (msix_enabled(pdev)) { + vfio_pci_msix_set_notifiers(vdev); + nr_vectors = vdev->msix->entries; + vfio_cpr_claim_vectors(vdev, nr_vectors, true); + + } else if (msi_enabled(pdev)) { + nr_vectors = msi_nr_vectors_allocated(pdev); + vfio_cpr_claim_vectors(vdev, nr_vectors, false); + + } else if (vfio_pci_read_config(pdev, PCI_INTERRUPT_PIN, 1)) { + Error *local_err = NULL; + if (!vfio_pci_intx_enable(vdev, &local_err)) { + error_report_err(local_err); + return -1; + } + } + + return 0; +} + +static bool pci_msix_present(void *opaque, int version_id) +{ + PCIDevice *pdev = opaque; + + return msix_present(pdev); +} + +static const VMStateDescription vfio_intx_vmstate = { + .name = "vfio-cpr-intx", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(pending, VFIOINTx), + VMSTATE_UINT32(route.mode, VFIOINTx), + VMSTATE_INT32(route.irq, VFIOINTx), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_VFIO_INTX(_field, _state) { \ + .name = (stringify(_field)), \ + .size = sizeof(VFIOINTx), \ + .vmsd = &vfio_intx_vmstate, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, VFIOINTx), \ +} + +const VMStateDescription vfio_cpr_pci_vmstate = { + .name = "vfio-cpr-pci", + .version_id = 0, + .minimum_version_id = 0, + .pre_load = vfio_cpr_pci_pre_load, + .post_load = vfio_cpr_pci_post_load, + .needed = cpr_incoming_needed, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), + VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, pci_msix_present), + VMSTATE_VFIO_INTX(intx, VFIOPCIDevice), + VMSTATE_END_OF_LIST() + } +}; + +static NotifierWithReturn kvm_close_notifier; + +static int vfio_cpr_kvm_close_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, + Error **errp) +{ + if (e->type == MIG_EVENT_PRECOPY_DONE) { + vfio_kvm_device_close(); + } + return 0; +} + +void vfio_cpr_add_kvm_notifier(void) +{ + if (!kvm_close_notifier.notify) { + migration_add_notifier_mode(&kvm_close_notifier, + vfio_cpr_kvm_close_notifier, + MIG_MODE_CPR_TRANSFER); + } +} + +static int set_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq, bool enable) +{ + if (enable) { + return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, virq); + } else { + return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, virq); + } +} + +static int vfio_cpr_set_msi_virq(VFIOPCIDevice *vdev, Error **errp, bool enable) +{ + const char *op = (enable ? "enable" : "disable"); + PCIDevice *pdev = &vdev->pdev; + int i, nr_vectors, ret = 0; + + if (msix_enabled(pdev)) { + nr_vectors = vdev->msix->entries; + + } else if (msi_enabled(pdev)) { + nr_vectors = msi_nr_vectors_allocated(pdev); + + } else if (vfio_pci_read_config(pdev, PCI_INTERRUPT_PIN, 1)) { + ret = set_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, + &vdev->intx.unmask, vdev->intx.route.irq, + enable); + if (ret) { + error_setg_errno(errp, -ret, "failed to %s INTx irq %d", + op, vdev->intx.route.irq); + return ret; + } + vfio_pci_intx_set_handler(vdev, enable); + return ret; + + } else { + return 0; + } + + for (i = 0; i < nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + if (vector->use) { + ret = set_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, + NULL, vector->virq, enable); + if (ret) { + error_setg_errno(errp, -ret, + "failed to %s msi vector %d virq %d", + op, i, vector->virq); + return ret; + } + vfio_pci_msi_set_handler(vdev, i, enable); + } + } + + return ret; +} + +/* + * When CPR starts, detach IRQs from the VFIO device so future interrupts + * are posted to kvm_interrupt, which is preserved in new QEMU. Interrupts + * that were already posted to the old KVM instance, but not delivered to the + * VCPU, are recovered via KVM_GET_LAPIC and pushed to the new KVM instance + * in new QEMU. + * + * If CPR fails, reattach the IRQs. + */ +static int vfio_cpr_pci_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) +{ + VFIOPCIDevice *vdev = + container_of(notifier, VFIOPCIDevice, cpr.transfer_notifier); + + if (e->type == MIG_EVENT_PRECOPY_SETUP) { + return vfio_cpr_set_msi_virq(vdev, errp, false); + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { + return vfio_cpr_set_msi_virq(vdev, errp, true); + } + return 0; +} + +void vfio_cpr_pci_register_device(VFIOPCIDevice *vdev) { - migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, - vfio_cpr_reboot_notifier, - MIG_MODE_CPR_REBOOT); - return true; + migration_add_notifier_mode(&vdev->cpr.transfer_notifier, + vfio_cpr_pci_notifier, + MIG_MODE_CPR_TRANSFER); } -void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer) +void vfio_cpr_pci_unregister_device(VFIOPCIDevice *vdev) { - migration_remove_notifier(&bcontainer->cpr_reboot_notifier); + migration_remove_notifier(&vdev->cpr.transfer_notifier); } diff --git a/hw/vfio/device.c b/hw/vfio/device.c new file mode 100644 index 00000000000..c0e25ac6d43 --- /dev/null +++ b/hw/vfio/device.c @@ -0,0 +1,614 @@ +/* + * VFIO device + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include + +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/pci.h" +#include "hw/iommu.h" +#include "hw/hw.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "migration/cpr.h" +#include "migration/blocker.h" +#include "monitor/monitor.h" +#include "vfio-helpers.h" + +VFIODeviceList vfio_device_list = + QLIST_HEAD_INITIALIZER(vfio_device_list); + +/* + * We want to differentiate hot reset of multiple in-use devices vs + * hot reset of a single in-use device. VFIO_DEVICE_RESET will already + * handle the case of doing hot resets when there is only a single + * device per bus. The in-use here refers to how many VFIODevices are + * affected. A hot reset that affects multiple devices, but only a + * single in-use device, means that we can call it from our bus + * ->reset() callback since the extent is effectively a single + * device. This allows us to make use of it in the hotplug path. When + * there are multiple in-use devices, we can only trigger the hot + * reset during a system reset and thus from our reset handler. We + * separate _one vs _multi here so that we don't overlap and do a + * double reset on the system reset path where both our reset handler + * and ->reset() callback are used. Calling _one() will only do a hot + * reset for the one in-use devices case, calling _multi() will do + * nothing if a _one() would have been sufficient. + */ +void vfio_device_reset_handler(void *opaque) +{ + VFIODevice *vbasedev; + + trace_vfio_device_reset_handler(); + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { + if (vbasedev->dev->realized) { + vbasedev->ops->vfio_compute_needs_reset(vbasedev); + } + } + + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { + if (vbasedev->dev->realized && vbasedev->needs_reset) { + vbasedev->ops->vfio_hot_reset_multi(vbasedev); + } + } +} + +/* + * Common VFIO interrupt disable + */ +void vfio_device_irq_disable(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, + .index = index, + .start = 0, + .count = 0, + }; + + vbasedev->io_ops->set_irqs(vbasedev, &irq_set); +} + +void vfio_device_irq_unmask(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, + .index = index, + .start = 0, + .count = 1, + }; + + vbasedev->io_ops->set_irqs(vbasedev, &irq_set); +} + +void vfio_device_irq_mask(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, + .index = index, + .start = 0, + .count = 1, + }; + + vbasedev->io_ops->set_irqs(vbasedev, &irq_set); +} + +static inline const char *action_to_str(int action) +{ + switch (action) { + case VFIO_IRQ_SET_ACTION_MASK: + return "MASK"; + case VFIO_IRQ_SET_ACTION_UNMASK: + return "UNMASK"; + case VFIO_IRQ_SET_ACTION_TRIGGER: + return "TRIGGER"; + default: + return "UNKNOWN ACTION"; + } +} + +static const char *index_to_str(VFIODevice *vbasedev, int index) +{ + if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { + return NULL; + } + + switch (index) { + case VFIO_PCI_INTX_IRQ_INDEX: + return "INTX"; + case VFIO_PCI_MSI_IRQ_INDEX: + return "MSI"; + case VFIO_PCI_MSIX_IRQ_INDEX: + return "MSIX"; + case VFIO_PCI_ERR_IRQ_INDEX: + return "ERR"; + case VFIO_PCI_REQ_IRQ_INDEX: + return "REQ"; + default: + return NULL; + } +} + +bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex, + int action, int fd, Error **errp) +{ + ERRP_GUARD(); + g_autofree struct vfio_irq_set *irq_set = NULL; + int argsz; + const char *name; + int32_t *pfd; + + argsz = sizeof(*irq_set) + sizeof(*pfd); + + irq_set = g_malloc0(argsz); + irq_set->argsz = argsz; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; + irq_set->index = index; + irq_set->start = subindex; + irq_set->count = 1; + pfd = (int32_t *)&irq_set->data; + *pfd = fd; + + if (!vbasedev->io_ops->set_irqs(vbasedev, irq_set)) { + return true; + } + + error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure"); + + name = index_to_str(vbasedev, index); + if (name) { + error_prepend(errp, "%s-%d: ", name, subindex); + } else { + error_prepend(errp, "index %d-%d: ", index, subindex); + } + error_prepend(errp, + "Failed to %s %s eventfd signaling for interrupt ", + fd < 0 ? "tear down" : "set up", action_to_str(action)); + return false; +} + +int vfio_device_get_irq_info(VFIODevice *vbasedev, int index, + struct vfio_irq_info *info) +{ + memset(info, 0, sizeof(*info)); + + info->argsz = sizeof(*info); + info->index = index; + + return vbasedev->io_ops->get_irq_info(vbasedev, info); +} + +int vfio_device_get_region_info(VFIODevice *vbasedev, int index, + struct vfio_region_info **info) +{ + size_t argsz = sizeof(struct vfio_region_info); + int fd = -1; + int ret; + + /* check cache */ + if (vbasedev->reginfo[index] != NULL) { + *info = vbasedev->reginfo[index]; + return 0; + } + + *info = g_malloc0(argsz); + + (*info)->index = index; +retry: + (*info)->argsz = argsz; + + ret = vbasedev->io_ops->get_region_info(vbasedev, *info, &fd); + if (ret != 0) { + g_free(*info); + *info = NULL; + return ret; + } + + if ((*info)->argsz > argsz) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + + if (fd != -1) { + close(fd); + fd = -1; + } + + goto retry; + } + + /* fill cache */ + vbasedev->reginfo[index] = *info; + if (vbasedev->region_fds != NULL) { + vbasedev->region_fds[index] = fd; + } + + return 0; +} + +int vfio_device_get_region_fd(VFIODevice *vbasedev, int index) +{ + return vbasedev->region_fds ? + vbasedev->region_fds[index] : + vbasedev->fd; +} + +int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type, + uint32_t subtype, struct vfio_region_info **info) +{ + int i; + + for (i = 0; i < vbasedev->num_regions; i++) { + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_type *cap_type; + + if (vfio_device_get_region_info(vbasedev, i, info)) { + continue; + } + + hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); + if (!hdr) { + continue; + } + + cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); + + trace_vfio_device_get_region_info_type(vbasedev->name, i, + cap_type->type, cap_type->subtype); + + if (cap_type->type == type && cap_type->subtype == subtype) { + return 0; + } + } + + *info = NULL; + return -ENODEV; +} + +bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) +{ + struct vfio_region_info *info = NULL; + bool ret = false; + + if (!vfio_device_get_region_info(vbasedev, region, &info)) { + if (vfio_get_region_info_cap(info, cap_type)) { + ret = true; + } + } + + return ret; +} + +bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp) +{ + ERRP_GUARD(); + struct stat st; + + if (vbasedev->fd < 0) { + if (stat(vbasedev->sysfsdev, &st) < 0) { + error_setg_errno(errp, errno, "no such host device"); + error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); + return false; + } + /* User may specify a name, e.g: VFIO platform device */ + if (!vbasedev->name) { + vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); + } + } else { + if (!vbasedev->iommufd) { + error_setg(errp, "Use FD passing only with iommufd backend"); + return false; + } + if (!vbasedev->name) { + + if (vbasedev->dev->id) { + vbasedev->name = g_strdup(vbasedev->dev->id); + return true; + } else { + /* + * Assign a name so any function printing it will not break. + * The fd number changes across processes, so this cannot be + * used as an invariant name for CPR. + */ + vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); + error_setg(&vbasedev->cpr.id_blocker, + "vfio device with fd=%d needs an id property", + vbasedev->fd); + return migrate_add_blocker_modes(&vbasedev->cpr.id_blocker, + errp, MIG_MODE_CPR_TRANSFER, + -1) == 0; + } + } + } + + return true; +} + +void vfio_device_free_name(VFIODevice *vbasedev) +{ + g_clear_pointer(&vbasedev->name, g_free); + migrate_del_blocker(&vbasedev->cpr.id_blocker); +} + +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) +{ + vbasedev->fd = cpr_get_fd_param(vbasedev->dev->id, str, 0, errp); +} + +static VFIODeviceIOOps vfio_device_io_ops_ioctl; + +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard) +{ + vbasedev->type = type; + vbasedev->ops = ops; + vbasedev->io_ops = &vfio_device_io_ops_ioctl; + vbasedev->dev = dev; + vbasedev->fd = -1; + vbasedev->use_region_fds = false; + + vbasedev->ram_block_discard_allowed = ram_discard; +} + +int vfio_device_get_aw_bits(VFIODevice *vdev) +{ + /* + * iova_ranges is a sorted list. For old kernels that support + * VFIO but not support query of iova ranges, iova_ranges is NULL, + * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned. + */ + GList *l = g_list_last(vdev->bcontainer->iova_ranges); + + if (l) { + Range *range = l->data; + return range_get_last_bit(range) + 1; + } + + return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX; +} + +bool vfio_device_is_mdev(VFIODevice *vbasedev) +{ + g_autofree char *subsys = NULL; + g_autofree char *tmp = NULL; + + if (!vbasedev->sysfsdev) { + return false; + } + + tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev); + subsys = realpath(tmp, NULL); + return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); +} + +bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev, + const char *typename, Error **errp) +{ + HostIOMMUDevice *hiod; + + if (vbasedev->mdev) { + return true; + } + + hiod = HOST_IOMMU_DEVICE(object_new(typename)); + + if (!HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp)) { + object_unref(hiod); + return false; + } + + vbasedev->hiod = hiod; + return true; +} + +VFIODevice *vfio_get_vfio_device(Object *obj) +{ + if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) { + return &VFIO_PCI_BASE(obj)->vbasedev; + } else { + return NULL; + } +} + +bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name, + VFIODevice *vbasedev, AddressSpace *as, + Error **errp) +{ + const VFIOIOMMUClass *ops = + VFIO_IOMMU_CLASS(object_class_by_name(iommu_type)); + + assert(ops); + + return ops->attach_device(name, vbasedev, as, errp); +} + +bool vfio_device_attach(char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + const char *iommu_type = vbasedev->iommufd ? + TYPE_VFIO_IOMMU_IOMMUFD : + TYPE_VFIO_IOMMU_LEGACY; + + return vfio_device_attach_by_iommu_type(iommu_type, name, vbasedev, + as, errp); +} + +void vfio_device_detach(VFIODevice *vbasedev) +{ + if (!vbasedev->bcontainer) { + return; + } + VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev); +} + +void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, + struct vfio_device_info *info) +{ + int i; + + vbasedev->num_irqs = info->num_irqs; + vbasedev->num_regions = info->num_regions; + vbasedev->flags = info->flags; + vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); + + vbasedev->bcontainer = bcontainer; + QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); + + QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); + + vbasedev->reginfo = g_new0(struct vfio_region_info *, + vbasedev->num_regions); + if (vbasedev->use_region_fds) { + vbasedev->region_fds = g_new0(int, vbasedev->num_regions); + for (i = 0; i < vbasedev->num_regions; i++) { + vbasedev->region_fds[i] = -1; + } + } +} + +void vfio_device_unprepare(VFIODevice *vbasedev) +{ + int i; + + for (i = 0; i < vbasedev->num_regions; i++) { + g_free(vbasedev->reginfo[i]); + if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) { + close(vbasedev->region_fds[i]); + } + } + + g_clear_pointer(&vbasedev->reginfo, g_free); + g_clear_pointer(&vbasedev->region_fds, g_free); + + QLIST_REMOVE(vbasedev, container_next); + QLIST_REMOVE(vbasedev, global_next); + vbasedev->bcontainer = NULL; +} + +static VFIOPCIDevice *vfio_pci_from_vfio_device(VFIODevice *vbasedev) +{ + if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) { + return container_of(vbasedev, VFIOPCIDevice, vbasedev); + } + return NULL; +} + +bool vfio_device_get_viommu_flags_want_nesting(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = vfio_pci_from_vfio_device(vbasedev); + + if (vdev) { + return !!(pci_device_get_viommu_flags(&vdev->pdev) & + VIOMMU_FLAG_WANT_NESTING_PARENT); + } + return false; +} + +/* + * Traditional ioctl() based io + */ + +static int vfio_device_io_device_feature(VFIODevice *vbasedev, + struct vfio_device_feature *feature) +{ + int ret; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature); + + return ret < 0 ? -errno : ret; +} + +static int vfio_device_io_get_region_info(VFIODevice *vbasedev, + struct vfio_region_info *info, + int *fd) +{ + int ret; + + *fd = -1; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, info); + + return ret < 0 ? -errno : ret; +} + +static int vfio_device_io_get_irq_info(VFIODevice *vbasedev, + struct vfio_irq_info *info) +{ + int ret; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, info); + + return ret < 0 ? -errno : ret; +} + +static int vfio_device_io_set_irqs(VFIODevice *vbasedev, + struct vfio_irq_set *irqs) +{ + int ret; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irqs); + + return ret < 0 ? -errno : ret; +} + +static int vfio_device_io_region_read(VFIODevice *vbasedev, uint8_t index, + off_t off, uint32_t size, void *data) +{ + struct vfio_region_info *info; + int ret; + + ret = vfio_device_get_region_info(vbasedev, index, &info); + if (ret != 0) { + return ret; + } + + ret = pread(vbasedev->fd, data, size, info->offset + off); + + return ret < 0 ? -errno : ret; +} + +static int vfio_device_io_region_write(VFIODevice *vbasedev, uint8_t index, + off_t off, uint32_t size, void *data, + bool post) +{ + struct vfio_region_info *info; + int ret; + + ret = vfio_device_get_region_info(vbasedev, index, &info); + if (ret != 0) { + return ret; + } + + ret = pwrite(vbasedev->fd, data, size, info->offset + off); + + return ret < 0 ? -errno : ret; +} + +static VFIODeviceIOOps vfio_device_io_ops_ioctl = { + .device_feature = vfio_device_io_device_feature, + .get_region_info = vfio_device_io_get_region_info, + .get_irq_info = vfio_device_io_get_irq_info, + .set_irqs = vfio_device_io_set_irqs, + .region_read = vfio_device_io_region_read, + .region_write = vfio_device_io_region_write, +}; diff --git a/hw/vfio/display.c b/hw/vfio/display.c index ea87830fe0d..faacd9019a5 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -16,9 +16,9 @@ #include "qemu/error-report.h" #include "hw/display/edid.h" -#include "ui/console.h" #include "qapi/error.h" #include "pci.h" +#include "vfio-display.h" #include "trace.h" #ifndef DRM_PLANE_TYPE_PRIMARY @@ -104,7 +104,6 @@ static void vfio_display_edid_update(VFIOPCIDevice *vdev, bool enabled, err: trace_vfio_display_edid_write_error(); - return; } static void vfio_display_edid_ui_info(void *opaque, uint32_t idx, @@ -130,10 +129,10 @@ static bool vfio_display_edid_init(VFIOPCIDevice *vdev, Error **errp) int fd = vdev->vbasedev.fd; int ret; - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_GFX, - VFIO_REGION_SUBTYPE_GFX_EDID, - &dpy->edid_info); + ret = vfio_device_get_region_info_type(&vdev->vbasedev, + VFIO_REGION_TYPE_GFX, + VFIO_REGION_SUBTYPE_GFX_EDID, + &dpy->edid_info); if (ret) { /* Failed to get GFX edid info, allow to go through without edid. */ return true; @@ -214,6 +213,7 @@ static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev, struct vfio_device_gfx_plane_info plane; VFIODMABuf *dmabuf; int fd, ret; + uint32_t offset = 0; memset(&plane, 0, sizeof(plane)); plane.argsz = sizeof(plane); @@ -246,10 +246,10 @@ static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev, dmabuf = g_new0(VFIODMABuf, 1); dmabuf->dmabuf_id = plane.dmabuf_id; - dmabuf->buf = qemu_dmabuf_new(plane.width, plane.height, - plane.stride, 0, 0, plane.width, + dmabuf->buf = qemu_dmabuf_new(plane.width, plane.height, &offset, + &plane.stride, 0, 0, plane.width, plane.height, plane.drm_format, - plane.drm_format_mod, fd, false, false); + plane.drm_format_mod, &fd, 1, false, false); if (plane_type == DRM_PLANE_TYPE_CURSOR) { vfio_display_update_cursor(dmabuf, &plane); @@ -365,7 +365,7 @@ static bool vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp) &vfio_display_dmabuf_ops, vdev); if (vdev->enable_ramfb) { - vdev->dpy->ramfb = ramfb_setup(errp); + vdev->dpy->ramfb = ramfb_setup(vdev->use_legacy_x86_rom, errp); if (!vdev->dpy->ramfb) { return false; } @@ -494,7 +494,7 @@ static bool vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp) &vfio_display_region_ops, vdev); if (vdev->enable_ramfb) { - vdev->dpy->ramfb = ramfb_setup(errp); + vdev->dpy->ramfb = ramfb_setup(vdev->use_legacy_x86_rom, errp); if (!vdev->dpy->ramfb) { return false; } diff --git a/hw/vfio/helpers.c b/hw/vfio/helpers.c index ea15c79db0a..23d13e5db5f 100644 --- a/hw/vfio/helpers.c +++ b/hw/vfio/helpers.c @@ -22,240 +22,11 @@ #include "qemu/osdep.h" #include -#include "hw/vfio/vfio-common.h" +#include "system/kvm.h" +#include "hw/vfio/vfio-device.h" #include "hw/hw.h" -#include "trace.h" #include "qapi/error.h" -#include "qemu/error-report.h" -#include "monitor/monitor.h" - -/* - * Common VFIO interrupt disable - */ -void vfio_disable_irqindex(VFIODevice *vbasedev, int index) -{ - struct vfio_irq_set irq_set = { - .argsz = sizeof(irq_set), - .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, - .index = index, - .start = 0, - .count = 0, - }; - - ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); -} - -void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) -{ - struct vfio_irq_set irq_set = { - .argsz = sizeof(irq_set), - .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, - .index = index, - .start = 0, - .count = 1, - }; - - ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); -} - -void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) -{ - struct vfio_irq_set irq_set = { - .argsz = sizeof(irq_set), - .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, - .index = index, - .start = 0, - .count = 1, - }; - - ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); -} - -static inline const char *action_to_str(int action) -{ - switch (action) { - case VFIO_IRQ_SET_ACTION_MASK: - return "MASK"; - case VFIO_IRQ_SET_ACTION_UNMASK: - return "UNMASK"; - case VFIO_IRQ_SET_ACTION_TRIGGER: - return "TRIGGER"; - default: - return "UNKNOWN ACTION"; - } -} - -static const char *index_to_str(VFIODevice *vbasedev, int index) -{ - if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { - return NULL; - } - - switch (index) { - case VFIO_PCI_INTX_IRQ_INDEX: - return "INTX"; - case VFIO_PCI_MSI_IRQ_INDEX: - return "MSI"; - case VFIO_PCI_MSIX_IRQ_INDEX: - return "MSIX"; - case VFIO_PCI_ERR_IRQ_INDEX: - return "ERR"; - case VFIO_PCI_REQ_IRQ_INDEX: - return "REQ"; - default: - return NULL; - } -} - -bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, - int action, int fd, Error **errp) -{ - ERRP_GUARD(); - g_autofree struct vfio_irq_set *irq_set = NULL; - int argsz; - const char *name; - int32_t *pfd; - - argsz = sizeof(*irq_set) + sizeof(*pfd); - - irq_set = g_malloc0(argsz); - irq_set->argsz = argsz; - irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; - irq_set->index = index; - irq_set->start = subindex; - irq_set->count = 1; - pfd = (int32_t *)&irq_set->data; - *pfd = fd; - - if (!ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { - return true; - } - - error_setg_errno(errp, errno, "VFIO_DEVICE_SET_IRQS failure"); - - name = index_to_str(vbasedev, index); - if (name) { - error_prepend(errp, "%s-%d: ", name, subindex); - } else { - error_prepend(errp, "index %d-%d: ", index, subindex); - } - error_prepend(errp, - "Failed to %s %s eventfd signaling for interrupt ", - fd < 0 ? "tear down" : "set up", action_to_str(action)); - return false; -} - -/* - * IO Port/MMIO - Beware of the endians, VFIO is always little endian - */ -void vfio_region_write(void *opaque, hwaddr addr, - uint64_t data, unsigned size) -{ - VFIORegion *region = opaque; - VFIODevice *vbasedev = region->vbasedev; - union { - uint8_t byte; - uint16_t word; - uint32_t dword; - uint64_t qword; - } buf; - - switch (size) { - case 1: - buf.byte = data; - break; - case 2: - buf.word = cpu_to_le16(data); - break; - case 4: - buf.dword = cpu_to_le32(data); - break; - case 8: - buf.qword = cpu_to_le64(data); - break; - default: - hw_error("vfio: unsupported write size, %u bytes", size); - break; - } - - if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { - error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 - ",%d) failed: %m", - __func__, vbasedev->name, region->nr, - addr, data, size); - } - - trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); - - /* - * A read or write to a BAR always signals an INTx EOI. This will - * do nothing if not pending (including not in INTx mode). We assume - * that a BAR access is in response to an interrupt and that BAR - * accesses will service the interrupt. Unfortunately, we don't know - * which access will service the interrupt, so we're potentially - * getting quite a few host interrupts per guest interrupt. - */ - vbasedev->ops->vfio_eoi(vbasedev); -} - -uint64_t vfio_region_read(void *opaque, - hwaddr addr, unsigned size) -{ - VFIORegion *region = opaque; - VFIODevice *vbasedev = region->vbasedev; - union { - uint8_t byte; - uint16_t word; - uint32_t dword; - uint64_t qword; - } buf; - uint64_t data = 0; - - if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { - error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", - __func__, vbasedev->name, region->nr, - addr, size); - return (uint64_t)-1; - } - switch (size) { - case 1: - data = buf.byte; - break; - case 2: - data = le16_to_cpu(buf.word); - break; - case 4: - data = le32_to_cpu(buf.dword); - break; - case 8: - data = le64_to_cpu(buf.qword); - break; - default: - hw_error("vfio: unsupported read size, %u bytes", size); - break; - } - - trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); - - /* Same as write above */ - vbasedev->ops->vfio_eoi(vbasedev); - - return data; -} - -const MemoryRegionOps vfio_region_ops = { - .read = vfio_region_read, - .write = vfio_region_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 1, - .max_access_size = 8, - }, -}; +#include "vfio-helpers.h" int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size) { @@ -304,399 +75,154 @@ vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) return vfio_get_cap((void *)info, info->cap_offset, id); } -static int vfio_setup_region_sparse_mmaps(VFIORegion *region, - struct vfio_region_info *info) +struct vfio_info_cap_header * +vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) { - struct vfio_info_cap_header *hdr; - struct vfio_region_info_cap_sparse_mmap *sparse; - int i, j; - - hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); - if (!hdr) { - return -ENODEV; - } - - sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); - - trace_vfio_region_sparse_mmap_header(region->vbasedev->name, - region->nr, sparse->nr_areas); - - region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); - - for (i = 0, j = 0; i < sparse->nr_areas; i++) { - if (sparse->areas[i].size) { - trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, - sparse->areas[i].offset + - sparse->areas[i].size - 1); - region->mmaps[j].offset = sparse->areas[i].offset; - region->mmaps[j].size = sparse->areas[i].size; - j++; - } + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; } - region->nr_mmaps = j; - region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); - - return 0; + return vfio_get_cap((void *)info, info->cap_offset, id); } -int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, - int index, const char *name) +bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, + unsigned int *avail) { - g_autofree struct vfio_region_info *info = NULL; - int ret; + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_dma_avail *cap; - ret = vfio_get_region_info(vbasedev, index, &info); - if (ret) { - return ret; + /* If the capability cannot be found, assume no DMA limiting */ + hdr = vfio_get_iommu_type1_info_cap(info, + VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); + if (!hdr) { + return false; } - region->vbasedev = vbasedev; - region->flags = info->flags; - region->size = info->size; - region->fd_offset = info->offset; - region->nr = index; - - if (region->size) { - region->mem = g_new0(MemoryRegion, 1); - memory_region_init_io(region->mem, obj, &vfio_region_ops, - region, name, region->size); - - if (!vbasedev->no_mmap && - region->flags & VFIO_REGION_INFO_FLAG_MMAP) { - - ret = vfio_setup_region_sparse_mmaps(region, info); - - if (ret) { - region->nr_mmaps = 1; - region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); - region->mmaps[0].offset = 0; - region->mmaps[0].size = region->size; - } - } + if (avail != NULL) { + cap = (void *) hdr; + *avail = cap->avail; } - trace_vfio_region_setup(vbasedev->name, index, name, - region->flags, region->fd_offset, region->size); - return 0; + return true; } -static void vfio_subregion_unmap(VFIORegion *region, int index) -{ - trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), - region->mmaps[index].offset, - region->mmaps[index].offset + - region->mmaps[index].size - 1); - memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); - munmap(region->mmaps[index].mmap, region->mmaps[index].size); - object_unparent(OBJECT(®ion->mmaps[index].mem)); - region->mmaps[index].mmap = NULL; -} +#ifdef CONFIG_KVM +/* + * We have a single VFIO pseudo device per KVM VM. Once created it lives + * for the life of the VM. Closing the file descriptor only drops our + * reference to it and the device's reference to kvm. Therefore once + * initialized, this file descriptor is only released on QEMU exit and + * we'll re-use it should another vfio device be attached before then. + */ +int vfio_kvm_device_fd = -1; +#endif -int vfio_region_mmap(VFIORegion *region) +void vfio_kvm_device_close(void) { - int i, prot = 0; - char *name; - - if (!region->mem) { - return 0; - } - - prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; - prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; - - for (i = 0; i < region->nr_mmaps; i++) { - region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, - MAP_SHARED, region->vbasedev->fd, - region->fd_offset + - region->mmaps[i].offset); - if (region->mmaps[i].mmap == MAP_FAILED) { - int ret = -errno; - - trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, - region->fd_offset + - region->mmaps[i].offset, - region->fd_offset + - region->mmaps[i].offset + - region->mmaps[i].size - 1, ret); - - region->mmaps[i].mmap = NULL; - - for (i--; i >= 0; i--) { - vfio_subregion_unmap(region, i); - } - - return ret; - } - - name = g_strdup_printf("%s mmaps[%d]", - memory_region_name(region->mem), i); - memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, - memory_region_owner(region->mem), - name, region->mmaps[i].size, - region->mmaps[i].mmap); - g_free(name); - memory_region_add_subregion(region->mem, region->mmaps[i].offset, - ®ion->mmaps[i].mem); - - trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), - region->mmaps[i].offset, - region->mmaps[i].offset + - region->mmaps[i].size - 1); +#ifdef CONFIG_KVM + kvm_close(); + if (vfio_kvm_device_fd != -1) { + close(vfio_kvm_device_fd); + vfio_kvm_device_fd = -1; } - - return 0; +#endif } -void vfio_region_unmap(VFIORegion *region) +int vfio_kvm_device_add_fd(int fd, Error **errp) { - int i; - - if (!region->mem) { - return; - } +#ifdef CONFIG_KVM + struct kvm_device_attr attr = { + .group = KVM_DEV_VFIO_FILE, + .attr = KVM_DEV_VFIO_FILE_ADD, + .addr = (uint64_t)(unsigned long)&fd, + }; - for (i = 0; i < region->nr_mmaps; i++) { - if (region->mmaps[i].mmap) { - vfio_subregion_unmap(region, i); - } + if (!kvm_enabled()) { + return 0; } -} - -void vfio_region_exit(VFIORegion *region) -{ - int i; - if (!region->mem) { - return; - } + if (vfio_kvm_device_fd < 0) { + struct kvm_create_device cd = { + .type = KVM_DEV_TYPE_VFIO, + }; - for (i = 0; i < region->nr_mmaps; i++) { - if (region->mmaps[i].mmap) { - memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); + if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { + error_setg_errno(errp, errno, "Failed to create KVM VFIO device"); + return -errno; } - } - - trace_vfio_region_exit(region->vbasedev->name, region->nr); -} - -void vfio_region_finalize(VFIORegion *region) -{ - int i; - if (!region->mem) { - return; + vfio_kvm_device_fd = cd.fd; } - for (i = 0; i < region->nr_mmaps; i++) { - if (region->mmaps[i].mmap) { - munmap(region->mmaps[i].mmap, region->mmaps[i].size); - object_unparent(OBJECT(®ion->mmaps[i].mem)); - } + if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { + error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device", + fd); + return -errno; } - - object_unparent(OBJECT(region->mem)); - - g_free(region->mem); - g_free(region->mmaps); - - trace_vfio_region_finalize(region->vbasedev->name, region->nr); - - region->mem = NULL; - region->mmaps = NULL; - region->nr_mmaps = 0; - region->size = 0; - region->flags = 0; - region->nr = 0; +#endif + return 0; } -void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) +int vfio_kvm_device_del_fd(int fd, Error **errp) { - int i; - - if (!region->mem) { - return; - } +#ifdef CONFIG_KVM + struct kvm_device_attr attr = { + .group = KVM_DEV_VFIO_FILE, + .attr = KVM_DEV_VFIO_FILE_DEL, + .addr = (uint64_t)(unsigned long)&fd, + }; - for (i = 0; i < region->nr_mmaps; i++) { - if (region->mmaps[i].mmap) { - memory_region_set_enabled(®ion->mmaps[i].mem, enabled); - } + if (vfio_kvm_device_fd < 0) { + error_setg(errp, "KVM VFIO device isn't created yet"); + return -EINVAL; } - trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), - enabled); -} - -int vfio_get_region_info(VFIODevice *vbasedev, int index, - struct vfio_region_info **info) -{ - size_t argsz = sizeof(struct vfio_region_info); - - *info = g_malloc0(argsz); - - (*info)->index = index; -retry: - (*info)->argsz = argsz; - - if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { - g_free(*info); - *info = NULL; + if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { + error_setg_errno(errp, errno, + "Failed to remove fd %d from KVM VFIO device", fd); return -errno; } - - if ((*info)->argsz > argsz) { - argsz = (*info)->argsz; - *info = g_realloc(*info, argsz); - - goto retry; - } - +#endif return 0; } -int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, - uint32_t subtype, struct vfio_region_info **info) -{ - int i; - - for (i = 0; i < vbasedev->num_regions; i++) { - struct vfio_info_cap_header *hdr; - struct vfio_region_info_cap_type *cap_type; - - if (vfio_get_region_info(vbasedev, i, info)) { - continue; - } - - hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); - if (!hdr) { - g_free(*info); - continue; - } - - cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); - - trace_vfio_get_dev_region(vbasedev->name, i, - cap_type->type, cap_type->subtype); - - if (cap_type->type == type && cap_type->subtype == subtype) { - return 0; - } - - g_free(*info); - } - - *info = NULL; - return -ENODEV; -} - -bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) +struct vfio_device_info *vfio_get_device_info(int fd) { - g_autofree struct vfio_region_info *info = NULL; - bool ret = false; + struct vfio_device_info *info; + uint32_t argsz = sizeof(*info); - if (!vfio_get_region_info(vbasedev, region, &info)) { - if (vfio_get_region_info_cap(info, cap_type)) { - ret = true; - } - } + info = g_malloc0(argsz); - return ret; -} +retry: + info->argsz = argsz; -bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp) -{ - ERRP_GUARD(); - struct stat st; - - if (vbasedev->fd < 0) { - if (stat(vbasedev->sysfsdev, &st) < 0) { - error_setg_errno(errp, errno, "no such host device"); - error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); - return false; - } - /* User may specify a name, e.g: VFIO platform device */ - if (!vbasedev->name) { - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - } - } else { - if (!vbasedev->iommufd) { - error_setg(errp, "Use FD passing only with iommufd backend"); - return false; - } - /* - * Give a name with fd so any function printing out vbasedev->name - * will not break. - */ - if (!vbasedev->name) { - vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); - } + if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) { + g_free(info); + return NULL; } - return true; -} - -void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) -{ - ERRP_GUARD(); - int fd = monitor_fd_param(monitor_cur(), str, errp); - - if (fd < 0) { - error_prepend(errp, "Could not parse remote object fd %s:", str); - return; + if (info->argsz > argsz) { + argsz = info->argsz; + info = g_realloc(info, argsz); + goto retry; } - vbasedev->fd = fd; -} -void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, - DeviceState *dev, bool ram_discard) -{ - vbasedev->type = type; - vbasedev->ops = ops; - vbasedev->dev = dev; - vbasedev->fd = -1; - - vbasedev->ram_block_discard_allowed = ram_discard; + return info; } -int vfio_device_get_aw_bits(VFIODevice *vdev) +bool vfio_arch_wants_loading_config_after_iter(void) { /* - * iova_ranges is a sorted list. For old kernels that support - * VFIO but not support query of iova ranges, iova_ranges is NULL, - * in this case HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX(64) is returned. + * Starting the config load only after all iterables were loaded (during + * non-iterables loading phase) is required for ARM64 due to this platform + * VFIO dependency on interrupt controller being loaded first. + * + * See commit d329f5032e17 ("vfio: Move the saving of the config space to + * the right place in VFIO migration"). */ - GList *l = g_list_last(vdev->bcontainer->iova_ranges); - - if (l) { - Range *range = l->data; - return range_get_last_bit(range) + 1; - } - - return HOST_IOMMU_DEVICE_CAP_AW_BITS_MAX; -} - -bool vfio_device_is_mdev(VFIODevice *vbasedev) -{ - g_autofree char *subsys = NULL; - g_autofree char *tmp = NULL; - - if (!vbasedev->sysfsdev) { - return false; - } - - tmp = g_strdup_printf("%s/subsystem", vbasedev->sysfsdev); - subsys = realpath(tmp, NULL); - return subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); -} - -bool vfio_device_hiod_realize(VFIODevice *vbasedev, Error **errp) -{ - HostIOMMUDevice *hiod = vbasedev->hiod; - - if (!hiod) { - return true; - } - - return HOST_IOMMU_DEVICE_GET_CLASS(hiod)->realize(hiod, vbasedev, errp); +#if defined(TARGET_ARM) + return true; +#else + return false; +#endif } diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c index d320d032a7f..ee0767b0b89 100644 --- a/hw/vfio/igd.c +++ b/hw/vfio/igd.c @@ -14,9 +14,12 @@ #include "qemu/units.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "hw/boards.h" #include "hw/hw.h" #include "hw/nvram/fw_cfg.h" #include "pci.h" +#include "pci-quirks.h" #include "trace.h" /* @@ -59,49 +62,152 @@ */ static int igd_gen(VFIOPCIDevice *vdev) { - if ((vdev->device_id & 0xfff) == 0xa84) { - return 8; /* Broxton */ + /* + * Device IDs for Broxton/Apollo Lake are 0x0a84, 0x1a84, 0x1a85, 0x5a84 + * and 0x5a85, match bit 11:1 here + * Prefix 0x0a is taken by Haswell, this rule should be matched first. + */ + if ((vdev->device_id & 0xffe) == 0xa84) { + return 9; } switch (vdev->device_id & 0xff00) { - /* Old, untested, unavailable, unknown */ - case 0x0000: - case 0x2500: - case 0x2700: - case 0x2900: - case 0x2a00: - case 0x2e00: - case 0x3500: - case 0xa000: - return -1; - /* SandyBridge, IvyBridge, ValleyView, Haswell */ - case 0x0100: - case 0x0400: - case 0x0a00: - case 0x0c00: - case 0x0d00: - case 0x0f00: + case 0x0100: /* SandyBridge, IvyBridge */ return 6; - /* BroadWell, CherryView, SkyLake, KabyLake */ - case 0x1600: - case 0x1900: - case 0x2200: - case 0x5900: + case 0x0400: /* Haswell */ + case 0x0a00: /* Haswell */ + case 0x0c00: /* Haswell */ + case 0x0d00: /* Haswell */ + case 0x0f00: /* Valleyview/Bay Trail */ + return 7; + case 0x1600: /* Broadwell */ + case 0x2200: /* Cherryview */ return 8; + case 0x1900: /* Skylake */ + case 0x3100: /* Gemini Lake */ + case 0x5900: /* Kaby Lake */ + case 0x3e00: /* Coffee Lake */ + case 0x9B00: /* Comet Lake */ + return 9; + case 0x8A00: /* Ice Lake */ + case 0x4500: /* Elkhart Lake */ + case 0x4E00: /* Jasper Lake */ + return 11; + case 0x9A00: /* Tiger Lake */ + case 0x4C00: /* Rocket Lake */ + case 0x4600: /* Alder Lake */ + case 0xA700: /* Raptor Lake */ + return 12; } - return 8; /* Assume newer is compatible */ + /* + * Unfortunately, Intel changes it's specification quite often. This makes + * it impossible to use a suitable default value for unknown devices. + * Return -1 for not applying any generation-specific quirks. + */ + return -1; } -typedef struct VFIOIGDQuirk { - struct VFIOPCIDevice *vdev; - uint32_t index; - uint32_t bdsm; -} VFIOIGDQuirk; - +#define IGD_ASLS 0xfc /* ASL Storage Register */ #define IGD_GMCH 0x50 /* Graphics Control Register */ #define IGD_BDSM 0x5c /* Base Data of Stolen Memory */ +#define IGD_BDSM_GEN11 0xc0 /* Base Data of Stolen Memory of gen 11 and later */ + +#define IGD_GMCH_VGA_DISABLE BIT(1) +#define IGD_GMCH_GEN6_GMS_SHIFT 3 /* SNB_GMCH in i915 */ +#define IGD_GMCH_GEN6_GMS_MASK 0x1f +#define IGD_GMCH_GEN8_GMS_SHIFT 8 /* BDW_GMCH in i915 */ +#define IGD_GMCH_GEN8_GMS_MASK 0xff +static uint64_t igd_stolen_memory_size(int gen, uint32_t gmch) +{ + uint64_t gms; + + if (gen < 8) { + gms = (gmch >> IGD_GMCH_GEN6_GMS_SHIFT) & IGD_GMCH_GEN6_GMS_MASK; + } else { + gms = (gmch >> IGD_GMCH_GEN8_GMS_SHIFT) & IGD_GMCH_GEN8_GMS_MASK; + } + + if (gen < 9) { + return gms * 32 * MiB; + } else { + if (gms < 0xf0) { + return gms * 32 * MiB; + } else { + return (gms - 0xf0 + 1) * 4 * MiB; + } + } + + return 0; +} + +/* + * The OpRegion includes the Video BIOS Table, which seems important for + * telling the driver what sort of outputs it has. Without this, the device + * may work in the guest, but we may not get output. This also requires BIOS + * support to reserve and populate a section of guest memory sufficient for + * the table and to write the base address of that memory to the ASLS register + * of the IGD device. + */ +static bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, + struct vfio_region_info *info, + Error **errp) +{ + int ret; + + vdev->igd_opregion = g_malloc0(info->size); + ret = pread(vdev->vbasedev.fd, vdev->igd_opregion, + info->size, info->offset); + if (ret != info->size) { + error_setg(errp, "failed to read IGD OpRegion"); + g_free(vdev->igd_opregion); + vdev->igd_opregion = NULL; + return false; + } + + /* + * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to + * allocate 32bit reserved memory for, copy these contents into, and write + * the reserved memory base address to the device ASLS register at 0xFC. + * Alignment of this reserved region seems flexible, but using a 4k page + * alignment seems to work well. This interface assumes a single IGD + * device, which may be at VM address 00:02.0 in legacy mode or another + * address in UPT mode. + * + * NB, there may be future use cases discovered where the VM should have + * direct interaction with the host OpRegion, in which case the write to + * the ASLS register would trigger MemoryRegion setup to enable that. + */ + fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion", + vdev->igd_opregion, info->size); + + trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name); + + return true; +} + +static bool vfio_pci_igd_opregion_detect(VFIOPCIDevice *vdev, + struct vfio_region_info **opregion) +{ + int ret; + + ret = vfio_device_get_region_info_type(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, opregion); + if (ret) { + return false; + } + + /* Hotplugging is not supported for opregion access */ + if (vdev->pdev.qdev.hotplugged) { + warn_report("IGD device detected, but OpRegion is not supported " + "on hotplugged device."); + return false; + } + + return true; +} /* * The rather short list of registers that we copy from the host devices. @@ -188,7 +294,8 @@ static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp) } } -static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data) +static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -204,7 +311,7 @@ static const TypeInfo vfio_pci_igd_lpc_bridge_info = { .name = "vfio-pci-igd-lpc-bridge", .parent = TYPE_PCI_DEVICE, .class_init = vfio_pci_igd_lpc_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, @@ -239,374 +346,377 @@ static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev, return ret; } -/* - * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE - * entry, older IGDs use 2MB and 32bit. Each PTE maps a 4k page. Therefore - * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index - * for programming the GTT. - * - * See linux:include/drm/i915_drm.h for shift and mask values. - */ -static int vfio_igd_gtt_max(VFIOPCIDevice *vdev) +static bool vfio_pci_igd_setup_lpc_bridge(VFIOPCIDevice *vdev, Error **errp) { - uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch)); - int ggms, gen = igd_gen(vdev); + struct vfio_region_info *host = NULL; + struct vfio_region_info *lpc = NULL; + PCIDevice *lpc_bridge; + int ret; + + /* + * Copying IDs or creating new devices are not supported on hotplug + */ + if (vdev->pdev.qdev.hotplugged) { + error_setg(errp, "IGD LPC is not supported on hotplugged device"); + return false; + } - gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch)); - ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3; - if (gen > 6) { - ggms = 1 << ggms; + /* + * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we + * can stuff host values into, so if there's already one there and it's not + * one we can hack on, this quirk is no-go. Sorry Q35. + */ + lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), + 0, PCI_DEVFN(0x1f, 0)); + if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge), + "vfio-pci-igd-lpc-bridge")) { + error_setg(errp, + "Cannot create LPC bridge due to existing device at 1f.0"); + return false; } - ggms *= MiB; + /* + * Check whether we have all the vfio device specific regions to + * support LPC quirk (added in Linux v4.6). + */ + ret = vfio_device_get_region_info_type(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc); + if (ret) { + error_setg(errp, "IGD LPC bridge access is not supported by kernel"); + return false; + } - return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8); -} + ret = vfio_device_get_region_info_type(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host); + if (ret) { + error_setg(errp, "IGD host bridge access is not supported by kernel"); + return false; + } -/* - * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes. - * Somehow the host stolen memory range is used for this, but how the ROM gets - * it is a mystery, perhaps it's hardcoded into the ROM. Thankfully though, it - * reprograms the GTT through the IOBAR where we can trap it and transpose the - * programming to the VM allocated buffer. That buffer gets reserved by the VM - * firmware via the fw_cfg entry added below. Here we're just monitoring the - * IOBAR address and data registers to detect a write sequence targeting the - * GTTADR. This code is developed by observed behavior and doesn't have a - * direct spec reference, unfortunately. - */ -static uint64_t vfio_igd_quirk_data_read(void *opaque, - hwaddr addr, unsigned size) -{ - VFIOIGDQuirk *igd = opaque; - VFIOPCIDevice *vdev = igd->vdev; + /* Create/modify LPC bridge */ + ret = vfio_pci_igd_lpc_init(vdev, lpc); + if (ret) { + error_setg(errp, "Failed to create/modify LPC bridge for IGD"); + return false; + } - igd->index = ~0; + /* Stuff some host values into the VM PCI host bridge */ + ret = vfio_pci_igd_host_init(vdev, host); + if (ret) { + error_setg(errp, "Failed to modify host bridge for IGD"); + return false; + } - return vfio_region_read(&vdev->bars[4].region, addr + 4, size); + return true; } -static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr, - uint64_t data, unsigned size) +static bool vfio_pci_igd_override_gms(int gen, uint32_t gms, uint32_t *gmch) { - VFIOIGDQuirk *igd = opaque; - VFIOPCIDevice *vdev = igd->vdev; - uint64_t val = data; - int gen = igd_gen(vdev); - - /* - * Programming the GGMS starts at index 0x1 and uses every 4th index (ie. - * 0x1, 0x5, 0x9, 0xd,...). For pre-Gen8 each 4-byte write is a whole PTE - * entry, with 0th bit enable set. For Gen8 and up, PTEs are 64bit, so - * entries 0x5 & 0xd are the high dword, in our case zero. Each PTE points - * to a 4k page, which we translate to a page from the VM allocated region, - * pointed to by the BDSM register. If this is not set, we fail. - * - * We trap writes to the full configured GTT size, but we typically only - * see the vBIOS writing up to (nearly) the 1MB barrier. In fact it often - * seems to miss the last entry for an even 1MB GTT. Doing a gratuitous - * write of that last entry does work, but is hopefully unnecessary since - * we clear the previous GTT on initialization. - */ - if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) { - if (gen < 8 || (igd->index % 8 == 1)) { - uint32_t base; - - base = pci_get_long(vdev->pdev.config + IGD_BDSM); - if (!base) { - hw_error("vfio-igd: Guest attempted to program IGD GTT before " - "BIOS reserved stolen memory. Unsupported BIOS?"); - } - - val = data - igd->bdsm + base; + bool ret = false; + + if (gen == -1) { + error_report("x-igd-gms is not supported on this device"); + } else if (gen < 8) { + if (gms <= 0x10) { + *gmch &= ~(IGD_GMCH_GEN6_GMS_MASK << IGD_GMCH_GEN6_GMS_SHIFT); + *gmch |= gms << IGD_GMCH_GEN6_GMS_SHIFT; + ret = true; } else { - val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */ + error_report(QERR_INVALID_PARAMETER_VALUE, "x-igd-gms", "0~0x10"); + } + } else if (gen == 8) { + if (gms <= 0x40) { + *gmch &= ~(IGD_GMCH_GEN8_GMS_MASK << IGD_GMCH_GEN8_GMS_SHIFT); + *gmch |= gms << IGD_GMCH_GEN8_GMS_SHIFT; + ret = true; + } else { + error_report(QERR_INVALID_PARAMETER_VALUE, "x-igd-gms", "0~0x40"); + } + } else { + /* 0x0 to 0x40: 32MB increments starting at 0MB */ + /* 0xf0 to 0xfe: 4MB increments starting at 4MB */ + if ((gms <= 0x40) || (gms >= 0xf0 && gms <= 0xfe)) { + *gmch &= ~(IGD_GMCH_GEN8_GMS_MASK << IGD_GMCH_GEN8_GMS_SHIFT); + *gmch |= gms << IGD_GMCH_GEN8_GMS_SHIFT; + ret = true; + } else { + error_report(QERR_INVALID_PARAMETER_VALUE, + "x-igd-gms", "0~0x40 or 0xf0~0xfe"); } - - trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name, - igd->index, data, val); } - vfio_region_write(&vdev->bars[4].region, addr + 4, val, size); - - igd->index = ~0; + return ret; } -static const MemoryRegionOps vfio_igd_data_quirk = { - .read = vfio_igd_quirk_data_read, - .write = vfio_igd_quirk_data_write, - .endianness = DEVICE_LITTLE_ENDIAN, -}; +#define IGD_GGC_MMIO_OFFSET 0x108040 +#define IGD_BDSM_MMIO_OFFSET 0x1080C0 -static uint64_t vfio_igd_quirk_index_read(void *opaque, - hwaddr addr, unsigned size) +void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr) { - VFIOIGDQuirk *igd = opaque; - VFIOPCIDevice *vdev = igd->vdev; + VFIOQuirk *ggc_quirk, *bdsm_quirk; + VFIOConfigMirrorQuirk *ggc_mirror, *bdsm_mirror; + int gen; - igd->index = ~0; + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || + !vfio_is_vga(vdev) || nr != 0) { + return; + } - return vfio_region_read(&vdev->bars[4].region, addr, size); -} + /* Only on IGD Gen6-12 device needs quirks in BAR 0 */ + gen = igd_gen(vdev); + if (gen < 6) { + return; + } -static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr, - uint64_t data, unsigned size) -{ - VFIOIGDQuirk *igd = opaque; - VFIOPCIDevice *vdev = igd->vdev; + if (vdev->igd_gms) { + ggc_quirk = vfio_quirk_alloc(1); + ggc_mirror = ggc_quirk->data = g_malloc0(sizeof(*ggc_mirror)); + ggc_mirror->mem = ggc_quirk->mem; + ggc_mirror->vdev = vdev; + ggc_mirror->bar = nr; + ggc_mirror->offset = IGD_GGC_MMIO_OFFSET; + ggc_mirror->config_offset = IGD_GMCH; + + memory_region_init_io(ggc_mirror->mem, OBJECT(vdev), + &vfio_generic_mirror_quirk, ggc_mirror, + "vfio-igd-ggc-quirk", 2); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + ggc_mirror->offset, ggc_mirror->mem, + 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, ggc_quirk, next); + } - igd->index = data; + bdsm_quirk = vfio_quirk_alloc(1); + bdsm_mirror = bdsm_quirk->data = g_malloc0(sizeof(*bdsm_mirror)); + bdsm_mirror->mem = bdsm_quirk->mem; + bdsm_mirror->vdev = vdev; + bdsm_mirror->bar = nr; + bdsm_mirror->offset = IGD_BDSM_MMIO_OFFSET; + bdsm_mirror->config_offset = (gen < 11) ? IGD_BDSM : IGD_BDSM_GEN11; + + memory_region_init_io(bdsm_mirror->mem, OBJECT(vdev), + &vfio_generic_mirror_quirk, bdsm_mirror, + "vfio-igd-bdsm-quirk", (gen < 11) ? 4 : 8); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bdsm_mirror->offset, bdsm_mirror->mem, + 1); - vfio_region_write(&vdev->bars[4].region, addr, data, size); + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, bdsm_quirk, next); } -static const MemoryRegionOps vfio_igd_index_quirk = { - .read = vfio_igd_quirk_index_read, - .write = vfio_igd_quirk_index_write, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr) +static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) { - g_autofree struct vfio_region_info *rom = NULL; - g_autofree struct vfio_region_info *opregion = NULL; - g_autofree struct vfio_region_info *host = NULL; - g_autofree struct vfio_region_info *lpc = NULL; - VFIOQuirk *quirk; - VFIOIGDQuirk *igd; - PCIDevice *lpc_bridge; - int i, ret, ggms_mb, gms_mb = 0, gen; + struct vfio_region_info *opregion = NULL; + int ret, gen; + uint64_t gms_size = 0; uint64_t *bdsm_size; uint32_t gmch; - uint16_t cmd_orig, cmd; + bool legacy_mode_enabled = false; Error *err = NULL; - /* - * This must be an Intel VGA device at address 00:02.0 for us to even - * consider enabling legacy mode. The vBIOS has dependencies on the - * PCI bus address. - */ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || - !vfio_is_vga(vdev) || nr != 4 || - &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev), - 0, PCI_DEVFN(0x2, 0))) { - return; + !vfio_is_vga(vdev)) { + return true; } - /* - * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we - * can stuff host values into, so if there's already one there and it's not - * one we can hack on, legacy mode is no-go. Sorry Q35. - */ - lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), - 0, PCI_DEVFN(0x1f, 0)); - if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge), - "vfio-pci-igd-lpc-bridge")) { - error_report("IGD device %s cannot support legacy mode due to existing " - "devices at address 1f.0", vdev->vbasedev.name); - return; + /* IGD device always comes with OpRegion */ + if (!vfio_pci_igd_opregion_detect(vdev, &opregion)) { + return true; } + info_report("OpRegion detected on Intel display %x.", vdev->device_id); - /* - * IGD is not a standard, they like to change their specs often. We - * only attempt to support back to SandBridge and we hope that newer - * devices maintain compatibility with generation 8. - */ gen = igd_gen(vdev); - if (gen != 6 && gen != 8) { - error_report("IGD device %s is unsupported in legacy mode, " - "try SandyBridge or newer", vdev->vbasedev.name); - return; - } + gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4); /* - * Most of what we're doing here is to enable the ROM to run, so if - * there's no ROM, there's no point in setting up this quirk. - * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support. + * For backward compatibility, enable legacy mode when + * - Device geneation is 6 to 9 (including both) + * - IGD claims VGA cycles on host + * - Machine type is i440fx (pc_piix) + * - IGD device is at guest BDF 00:02.0 + * - Not manually disabled by x-igd-legacy-mode=off */ - ret = vfio_get_region_info(&vdev->vbasedev, - VFIO_PCI_ROM_REGION_INDEX, &rom); - if ((ret || !rom->size) && !vdev->pdev.romfile) { - error_report("IGD device %s has no ROM, legacy mode disabled", - vdev->vbasedev.name); - return; - } + if ((vdev->igd_legacy_mode != ON_OFF_AUTO_OFF) && + (gen >= 6 && gen <= 9) && + !(gmch & IGD_GMCH_VGA_DISABLE) && + !strcmp(MACHINE_GET_CLASS(qdev_get_machine())->family, "pc_piix") && + (&vdev->pdev == pci_find_device(pci_device_root_bus(&vdev->pdev), + 0, PCI_DEVFN(0x2, 0)))) { + /* + * IGD legacy mode requires: + * - VBIOS in ROM BAR or file + * - VGA IO/MMIO ranges are claimed by IGD + * - OpRegion + * - Same LPC bridge and Host bridge VID/DID/SVID/SSID as host + */ + struct vfio_region_info *rom = NULL; + + legacy_mode_enabled = true; + info_report("IGD legacy mode enabled, " + "use x-igd-legacy-mode=off to disable it if unwanted."); + + /* + * Most of what we're doing here is to enable the ROM to run, so if + * there's no ROM, there's no point in setting up this quirk. + * NB. We only seem to get BIOS ROMs, so UEFI VM would need CSM support. + */ + ret = vfio_device_get_region_info(&vdev->vbasedev, + VFIO_PCI_ROM_REGION_INDEX, &rom); + if ((ret || !rom->size) && !vdev->pdev.romfile) { + error_setg(&err, "Device has no ROM"); + goto error; + } - /* - * Ignore the hotplug corner case, mark the ROM failed, we can't - * create the devices we need for legacy mode in the hotplug scenario. - */ - if (vdev->pdev.qdev.hotplugged) { - error_report("IGD device %s hotplugged, ROM disabled, " - "legacy mode disabled", vdev->vbasedev.name); - vdev->rom_read_failed = true; - return; - } + /* + * If VGA is not already enabled, try to enable it. We shouldn't be + * using legacy mode without VGA. + */ + if (!vdev->vga) { + if (vfio_populate_vga(vdev, &err)) { + vfio_pci_config_register_vga(vdev); + } else { + error_setg(&err, "Unable to enable VGA access"); + goto error; + } + } - /* - * Check whether we have all the vfio device specific regions to - * support legacy mode (added in Linux v4.6). If not, bail. - */ - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, - VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion); - if (ret) { - error_report("IGD device %s does not support OpRegion access," - "legacy mode disabled", vdev->vbasedev.name); - return; + /* Enable OpRegion and LPC bridge quirk */ + vdev->features |= VFIO_FEATURE_ENABLE_IGD_OPREGION; + vdev->features |= VFIO_FEATURE_ENABLE_IGD_LPC; + } else if (vdev->igd_legacy_mode == ON_OFF_AUTO_ON) { + error_setg(&err, + "Machine is not i440fx, assigned BDF is not 00:02.0, " + "or device %04x (gen %d) doesn't support legacy mode", + vdev->device_id, gen); + goto error; } - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, - VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host); - if (ret) { - error_report("IGD device %s does not support host bridge access," - "legacy mode disabled", vdev->vbasedev.name); - return; + /* Setup OpRegion access */ + if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) && + !vfio_pci_igd_opregion_init(vdev, opregion, errp)) { + goto error; } - ret = vfio_get_dev_region_info(&vdev->vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, - VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc); - if (ret) { - error_report("IGD device %s does not support LPC bridge access," - "legacy mode disabled", vdev->vbasedev.name); - return; + /* Setup LPC bridge / Host bridge PCI IDs */ + if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_LPC) && + !vfio_pci_igd_setup_lpc_bridge(vdev, errp)) { + goto error; } - gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4); - /* - * If IGD VGA Disable is clear (expected) and VGA is not already enabled, - * try to enable it. Probably shouldn't be using legacy mode without VGA, - * but also no point in us enabling VGA if disabled in hardware. + * ASLS (OpRegion address) is read-only, emulated + * It contains HPA, guest firmware need to reprogram it with GPA. */ - if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) { - error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); - error_report("IGD device %s failed to enable VGA access, " - "legacy mode disabled", vdev->vbasedev.name); - return; - } - - /* Create our LPC/ISA bridge */ - ret = vfio_pci_igd_lpc_init(vdev, lpc); - if (ret) { - error_report("IGD device %s failed to create LPC bridge, " - "legacy mode disabled", vdev->vbasedev.name); - return; - } + pci_set_long(vdev->pdev.config + IGD_ASLS, 0); + pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0); + pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0); - /* Stuff some host values into the VM PCI host bridge */ - ret = vfio_pci_igd_host_init(vdev, host); - if (ret) { - error_report("IGD device %s failed to modify host bridge, " - "legacy mode disabled", vdev->vbasedev.name); - return; - } - - /* Setup OpRegion access */ - if (!vfio_pci_igd_opregion_init(vdev, opregion, &err)) { - error_append_hint(&err, "IGD legacy mode disabled\n"); - error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); - return; - } - - /* Setup our quirk to munge GTT addresses to the VM allocated buffer */ - quirk = vfio_quirk_alloc(2); - igd = quirk->data = g_malloc0(sizeof(*igd)); - igd->vdev = vdev; - igd->index = ~0; - igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4); - igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */ - - memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk, - igd, "vfio-igd-index-quirk", 4); - memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, - 0, &quirk->mem[0], 1); - - memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk, - igd, "vfio-igd-data-quirk", 4); - memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, - 4, &quirk->mem[1], 1); - - QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + /* + * Allow user to override dsm size using x-igd-gms option, in multiples of + * 32MiB. This option should only be used when the desired size cannot be + * set from DVMT Pre-Allocated option in host BIOS. + */ + if (vdev->igd_gms) { + if (!vfio_pci_igd_override_gms(gen, vdev->igd_gms, &gmch)) { + return false; + } - /* Determine the size of stolen memory needed for GTT */ - ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3; - if (gen > 6) { - ggms_mb = 1 << ggms_mb; + /* GMCH is read-only, emulated */ + pci_set_long(vdev->pdev.config + IGD_GMCH, gmch); + pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0); + pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0); } - /* - * Assume we have no GMS memory, but allow it to be overridden by device - * option (experimental). The spec doesn't actually allow zero GMS when - * when IVD (IGD VGA Disable) is clear, but the claim is that it's unused, - * so let's not waste VM memory for it. - */ - gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8)); + if (gen > 0) { + gms_size = igd_stolen_memory_size(gen, gmch); - if (vdev->igd_gms) { - if (vdev->igd_gms <= 0x10) { - gms_mb = vdev->igd_gms * 32; - gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8); + /* BDSM is read-write, emulated. BIOS needs to be able to write it */ + if (gen < 11) { + pci_set_long(vdev->pdev.config + IGD_BDSM, 0); + pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0); + pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0); } else { - error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms); - vdev->igd_gms = 0; + pci_set_quad(vdev->pdev.config + IGD_BDSM_GEN11, 0); + pci_set_quad(vdev->pdev.wmask + IGD_BDSM_GEN11, ~0); + pci_set_quad(vdev->emulated_config_bits + IGD_BDSM_GEN11, ~0); } } /* * Request reserved memory for stolen memory via fw_cfg. VM firmware * must allocate a 1MB aligned reserved memory region below 4GB with - * the requested size (in bytes) for use by the Intel PCI class VGA - * device at VM address 00:02.0. The base address of this reserved - * memory region must be written to the device BDSM register at PCI - * config offset 0x5C. + * the requested size (in bytes) for use by the IGD device. The base + * address of this reserved memory region must be written to the + * device BDSM register. + * For newer device without BDSM register, this fw_cfg item is 0. */ bdsm_size = g_malloc(sizeof(*bdsm_size)); - *bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB); + *bdsm_size = cpu_to_le64(gms_size); fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size", bdsm_size, sizeof(*bdsm_size)); - /* GMCH is read-only, emulated */ - pci_set_long(vdev->pdev.config + IGD_GMCH, gmch); - pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0); - pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0); + trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, (gms_size / MiB)); - /* BDSM is read-write, emulated. The BIOS needs to be able to write it */ - pci_set_long(vdev->pdev.config + IGD_BDSM, 0); - pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0); - pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0); + return true; +error: /* - * This IOBAR gives us access to GTTADR, which allows us to write to - * the GTT itself. So let's go ahead and write zero to all the GTT - * entries to avoid spurious DMA faults. Be sure I/O access is enabled - * before talking to the device. + * When legacy mode is implicity enabled, continue on error, + * to keep compatibility */ - if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig), - vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) { - error_report("IGD device %s - failed to read PCI command register", - vdev->vbasedev.name); + if (legacy_mode_enabled && (vdev->igd_legacy_mode == ON_OFF_AUTO_AUTO)) { + error_report_err(err); + error_report("IGD legacy mode disabled"); + return true; + } + + error_propagate(errp, err); + return false; +} + +/* + * KVMGT/GVT-g vGPU exposes an emulated OpRegion. So far, users have to specify + * x-igd-opregion=on to enable the access. + * TODO: Check VID/DID and enable opregion access automatically + */ +static bool vfio_pci_kvmgt_config_quirk(VFIOPCIDevice *vdev, Error **errp) +{ + struct vfio_region_info *opregion = NULL; + int gen; + + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || + !vfio_is_vga(vdev)) { + return true; } - cmd = cmd_orig | PCI_COMMAND_IO; + /* FIXME: Cherryview is Gen8, but don't support GVT-g */ + gen = igd_gen(vdev); + if (gen != 8 && gen != 9) { + return true; + } - if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd), - vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) { - error_report("IGD device %s - failed to write PCI command register", - vdev->vbasedev.name); + if (!vfio_pci_igd_opregion_detect(vdev, &opregion)) { + /* Should never reach here, KVMGT always emulates OpRegion */ + return false; } - for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) { - vfio_region_write(&vdev->bars[4].region, 0, i, 4); - vfio_region_write(&vdev->bars[4].region, 4, 0, 4); + if ((vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) && + !vfio_pci_igd_opregion_init(vdev, opregion, errp)) { + return false; } - if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig), - vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) { - error_report("IGD device %s - failed to restore PCI command register", - vdev->vbasedev.name); + return true; +} + +bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) +{ + /* KVMGT/GVT-g vGPU is exposed as mdev */ + if (vdev->vbasedev.mdev) { + return vfio_pci_kvmgt_config_quirk(vdev, errp); } - trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb); + return vfio_pci_igd_config_quirk(vdev, errp); } diff --git a/hw/vfio/iommufd-stubs.c b/hw/vfio/iommufd-stubs.c new file mode 100644 index 00000000000..0be52761753 --- /dev/null +++ b/hw/vfio/iommufd-stubs.c @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2025 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "migration/cpr.h" +#include "migration/vmstate.h" + +const VMStateDescription vmstate_cpr_vfio_devices = { + .name = CPR_STATE "/vfio devices", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]){ + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c index e7bece4ea10..1fc9f27dec9 100644 --- a/hw/vfio/iommufd.c +++ b/hw/vfio/iommufd.c @@ -15,20 +15,28 @@ #include #include -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-device.h" #include "qemu/error-report.h" #include "trace.h" #include "qapi/error.h" -#include "sysemu/iommufd.h" +#include "system/iommufd.h" #include "hw/qdev-core.h" -#include "sysemu/reset.h" +#include "hw/vfio/vfio-cpr.h" +#include "system/reset.h" #include "qemu/cutils.h" #include "qemu/chardev_open.h" +#include "migration/cpr.h" #include "pci.h" -#include "exec/ram_addr.h" +#include "vfio-iommufd.h" +#include "vfio-helpers.h" +#include "vfio-listener.h" + +#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO \ + TYPE_HOST_IOMMU_DEVICE_IOMMUFD "-vfio" static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly) + ram_addr_t size, void *vaddr, bool readonly, + MemoryRegion *mr) { const VFIOIOMMUFDContainer *container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); @@ -38,13 +46,42 @@ static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, iova, size, vaddr, readonly); } +static int iommufd_cdev_map_file(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + int fd, unsigned long start, bool readonly) +{ + const VFIOIOMMUFDContainer *container = + container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + + return iommufd_backend_map_file_dma(container->be, + container->ioas_id, + iova, size, fd, start, readonly); +} + static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb) + IOMMUTLBEntry *iotlb, bool unmap_all) { const VFIOIOMMUFDContainer *container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + /* unmap in halves */ + if (unmap_all) { + Int128 llsize = int128_rshift(int128_2_64(), 1); + int ret; + + ret = iommufd_backend_unmap_dma(container->be, container->ioas_id, + 0, int128_get64(llsize)); + + if (ret == 0) { + ret = iommufd_backend_unmap_dma(container->be, container->ioas_id, + int128_get64(llsize), + int128_get64(llsize)); + } + + return ret; + } + /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */ return iommufd_backend_unmap_dma(container->be, container->ioas_id, iova, size); @@ -85,6 +122,10 @@ static bool iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) goto err_kvm_device_add; } + if (cpr_is_incoming()) { + goto skip_bind; + } + /* Bind device to iommufd */ bind.iommufd = iommufd->fd; if (ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind)) { @@ -96,6 +137,8 @@ static bool iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) vbasedev->devid = bind.out_devid; trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name, vbasedev->fd, vbasedev->devid); + +skip_bind: return true; err_bind: iommufd_cdev_kvm_device_del(vbasedev); @@ -281,14 +324,22 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, { ERRP_GUARD(); IOMMUFDBackend *iommufd = vbasedev->iommufd; - uint32_t flags = 0; + uint32_t type = 0, flags = 0; + uint64_t hw_caps; VFIOIOASHwpt *hwpt; uint32_t hwpt_id; int ret; /* Try to find a domain */ QLIST_FOREACH(hwpt, &container->hwpt_list, next) { - ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp); + if (!cpr_is_incoming()) { + ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp); + } else if (vbasedev->cpr.hwpt_id == hwpt->hwpt_id) { + ret = 0; + } else { + continue; + } + if (ret) { /* -EINVAL means the domain is incompatible with the device. */ if (ret == -EINVAL) { @@ -305,6 +356,7 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, return false; } else { vbasedev->hwpt = hwpt; + vbasedev->cpr.hwpt_id = hwpt->hwpt_id; QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next); vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt); return true; @@ -318,10 +370,30 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, * vfio_migration_realize() may decide to use VF dirty tracking * instead. */ - if (vbasedev->hiod->caps.hw_caps & IOMMU_HW_CAP_DIRTY_TRACKING) { + if (!iommufd_backend_get_device_info(vbasedev->iommufd, vbasedev->devid, + &type, NULL, 0, &hw_caps, NULL, + errp)) { + return false; + } + + if (hw_caps & IOMMU_HW_CAP_DIRTY_TRACKING) { flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING; } + /* + * If vIOMMU requests VFIO's cooperation to create nesting parent HWPT, + * force to create it so that it could be reused by vIOMMU to create + * nested HWPT. + */ + if (vfio_device_get_viommu_flags_want_nesting(vbasedev)) { + flags |= IOMMU_HWPT_ALLOC_NEST_PARENT; + } + + if (cpr_is_incoming()) { + hwpt_id = vbasedev->cpr.hwpt_id; + goto skip_alloc; + } + if (!iommufd_backend_alloc_hwpt(iommufd, vbasedev->devid, container->ioas_id, flags, IOMMU_HWPT_DATA_NONE, 0, NULL, @@ -329,19 +401,20 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, return false; } + ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt_id, errp); + if (ret) { + iommufd_backend_free_id(container->be, hwpt_id); + return false; + } + +skip_alloc: hwpt = g_malloc0(sizeof(*hwpt)); hwpt->hwpt_id = hwpt_id; hwpt->hwpt_flags = flags; QLIST_INIT(&hwpt->device_list); - ret = iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt->hwpt_id, errp); - if (ret) { - iommufd_backend_free_id(container->be, hwpt->hwpt_id); - g_free(hwpt); - return false; - } - vbasedev->hwpt = hwpt; + vbasedev->cpr.hwpt_id = hwpt->hwpt_id; vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt); QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next); QLIST_INSERT_HEAD(&container->hwpt_list, hwpt, next); @@ -379,7 +452,9 @@ static bool iommufd_cdev_attach_container(VFIODevice *vbasedev, return iommufd_cdev_autodomains_get(vbasedev, container, errp); } - return !iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp); + /* If CPR, we are already attached to ioas_id. */ + return cpr_is_incoming() || + !iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp); } static void iommufd_cdev_detach_container(VFIODevice *vbasedev, @@ -404,7 +479,8 @@ static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container) if (!QLIST_EMPTY(&bcontainer->device_list)) { return; } - memory_listener_unregister(&bcontainer->listener); + vfio_iommufd_cpr_unregister_container(container); + vfio_listener_unregister(bcontainer); iommufd_backend_free_id(container->be, container->ioas_id); object_unref(container); } @@ -467,11 +543,14 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, VFIOAddressSpace *space; struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; int ret, devfd; + bool res; uint32_t ioas_id; Error *err = NULL; const VFIOIOMMUClass *iommufd_vioc = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + vfio_cpr_load_device(vbasedev); + if (vbasedev->fd < 0) { devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp); if (devfd < 0) { @@ -486,18 +565,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, goto err_connect_bind; } - space = vfio_get_address_space(as); - - /* - * The HostIOMMUDevice data from legacy backend is static and doesn't need - * any information from the (type1-iommu) backend to be initialized. In - * contrast however, the IOMMUFD HostIOMMUDevice data requires the iommufd - * FD to be connected and having a devid to be able to successfully call - * iommufd_backend_get_device_info(). - */ - if (!vfio_device_hiod_realize(vbasedev, errp)) { - goto err_alloc_ioas; - } + space = vfio_address_space_get(as); /* try to attach to an existing container in this space */ QLIST_FOREACH(bcontainer, &space->containers, next) { @@ -506,7 +574,16 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, vbasedev->iommufd != container->be) { continue; } - if (!iommufd_cdev_attach_container(vbasedev, container, &err)) { + + if (!cpr_is_incoming()) { + res = iommufd_cdev_attach_container(vbasedev, container, &err); + } else if (vbasedev->cpr.ioas_id == container->ioas_id) { + res = true; + } else { + continue; + } + + if (!res) { const char *msg = error_get_pretty(err); trace_iommufd_cdev_fail_attach_existing_container(msg); @@ -515,14 +592,19 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, } else { ret = iommufd_cdev_ram_block_discard_disable(true); if (ret) { - error_setg(errp, - "Cannot set discarding of RAM broken (%d)", ret); + error_setg_errno(errp, -ret, + "Cannot set discarding of RAM broken"); goto err_discard_disable; } goto found_container; } } + if (cpr_is_incoming()) { + ioas_id = vbasedev->cpr.ioas_id; + goto skip_ioas_alloc; + } + /* Need to allocate a new dedicated container */ if (!iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp)) { goto err_alloc_ioas; @@ -530,10 +612,12 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id); +skip_ioas_alloc: container = VFIO_IOMMU_IOMMUFD(object_new(TYPE_VFIO_IOMMU_IOMMUFD)); container->be = vbasedev->iommufd; container->ioas_id = ioas_id; QLIST_INIT(&container->hwpt_list); + vbasedev->cpr.ioas_id = ioas_id; bcontainer = &container->bcontainer; vfio_address_space_insert(space, bcontainer); @@ -544,6 +628,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, ret = iommufd_cdev_ram_block_discard_disable(true); if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); goto err_discard_disable; } @@ -555,12 +640,11 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, bcontainer->pgsizes = qemu_real_host_page_size(); } - bcontainer->listener = vfio_memory_listener; - memory_listener_register(&bcontainer->listener, bcontainer->space->as); + if (!vfio_listener_register(bcontainer, errp)) { + goto err_listener_register; + } - if (bcontainer->error) { - error_propagate_prepend(errp, bcontainer->error, - "memory listener initialization failed: "); + if (!vfio_iommufd_cpr_register_container(container, errp)) { goto err_listener_register; } @@ -573,7 +657,12 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, goto err_listener_register; } - if (!vfio_cpr_register_container(bcontainer, errp)) { + /* + * Do not move this code before attachment! The nested IOMMU support + * needs device and hwpt id which are generated only after attachment. + */ + if (!vfio_device_hiod_create_and_realize(vbasedev, + TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO, errp)) { goto err_listener_register; } @@ -585,14 +674,8 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, iommufd_cdev_ram_block_discard_disable(false); } - vbasedev->group = 0; - vbasedev->num_irqs = dev_info.num_irqs; - vbasedev->num_regions = dev_info.num_regions; - vbasedev->flags = dev_info.flags; - vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); - vbasedev->bcontainer = bcontainer; - QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); - QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); + vfio_device_prepare(vbasedev, bcontainer, &dev_info); + vfio_iommufd_cpr_register_device(vbasedev); trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs, vbasedev->num_regions, vbasedev->flags); @@ -605,7 +688,7 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, err_attach_container: iommufd_cdev_container_destroy(container); err_alloc_ioas: - vfio_put_address_space(space); + vfio_address_space_put(space); iommufd_cdev_unbind_and_disconnect(vbasedev); err_connect_bind: close(vbasedev->fd); @@ -619,19 +702,18 @@ static void iommufd_cdev_detach(VFIODevice *vbasedev) VFIOIOMMUFDContainer *container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); - QLIST_REMOVE(vbasedev, global_next); - QLIST_REMOVE(vbasedev, container_next); - vbasedev->bcontainer = NULL; + vfio_device_unprepare(vbasedev); if (!vbasedev->ram_block_discard_allowed) { iommufd_cdev_ram_block_discard_disable(false); } - vfio_cpr_unregister_container(bcontainer); + object_unref(vbasedev->hiod); iommufd_cdev_detach_container(vbasedev, container); iommufd_cdev_container_destroy(container); - vfio_put_address_space(space); + vfio_address_space_put(space); + vfio_iommufd_cpr_unregister_device(vbasedev); iommufd_cdev_unbind_and_disconnect(vbasedev); close(vbasedev->fd); } @@ -786,13 +868,12 @@ static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single) return ret; } -static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data) +static void vfio_iommu_iommufd_class_init(ObjectClass *klass, const void *data) { VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); - vioc->hiod_typename = TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO; - vioc->dma_map = iommufd_cdev_map; + vioc->dma_map_file = iommufd_cdev_map_file; vioc->dma_unmap = iommufd_cdev_unmap; vioc->attach_device = iommufd_cdev_attach; vioc->detach_device = iommufd_cdev_detach; @@ -801,28 +882,52 @@ static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data) vioc->query_dirty_bitmap = iommufd_query_dirty_bitmap; }; +static bool +host_iommu_device_iommufd_vfio_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + uint32_t hwpt_id, Error **errp) +{ + VFIODevice *vbasedev = HOST_IOMMU_DEVICE(idev)->agent; + + return !iommufd_cdev_attach_ioas_hwpt(vbasedev, hwpt_id, errp); +} + +static bool +host_iommu_device_iommufd_vfio_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + Error **errp) +{ + VFIODevice *vbasedev = HOST_IOMMU_DEVICE(idev)->agent; + + return iommufd_cdev_detach_ioas_hwpt(vbasedev, errp); +} + static bool hiod_iommufd_vfio_realize(HostIOMMUDevice *hiod, void *opaque, Error **errp) { VFIODevice *vdev = opaque; + HostIOMMUDeviceIOMMUFD *idev; HostIOMMUDeviceCaps *caps = &hiod->caps; - enum iommu_hw_info_type type; - union { - struct iommu_hw_info_vtd vtd; - } data; + VendorCaps *vendor_caps = &caps->vendor_caps; + enum iommu_hw_info_type type = 0; + uint8_t pasid_log2; uint64_t hw_caps; hiod->agent = opaque; - if (!iommufd_backend_get_device_info(vdev->iommufd, vdev->devid, - &type, &data, sizeof(data), - &hw_caps, errp)) { + if (!iommufd_backend_get_device_info(vdev->iommufd, vdev->devid, &type, + vendor_caps, sizeof(*vendor_caps), + &hw_caps, &pasid_log2, errp)) { return false; } hiod->name = g_strdup(vdev->name); caps->type = type; caps->hw_caps = hw_caps; + caps->max_pasid_log2 = pasid_log2; + + idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod); + idev->iommufd = vdev->iommufd; + idev->devid = vdev->devid; + idev->hwpt_id = vdev->hwpt->hwpt_id; return true; } @@ -846,13 +951,17 @@ hiod_iommufd_vfio_get_page_size_mask(HostIOMMUDevice *hiod) } -static void hiod_iommufd_vfio_class_init(ObjectClass *oc, void *data) +static void hiod_iommufd_vfio_class_init(ObjectClass *oc, const void *data) { HostIOMMUDeviceClass *hiodc = HOST_IOMMU_DEVICE_CLASS(oc); + HostIOMMUDeviceIOMMUFDClass *idevc = HOST_IOMMU_DEVICE_IOMMUFD_CLASS(oc); hiodc->realize = hiod_iommufd_vfio_realize; hiodc->get_iova_ranges = hiod_iommufd_vfio_get_iova_ranges; hiodc->get_page_size_mask = hiod_iommufd_vfio_get_page_size_mask; + + idevc->attach_hwpt = host_iommu_device_iommufd_vfio_attach_hwpt; + idevc->detach_hwpt = host_iommu_device_iommufd_vfio_detach_hwpt; }; static const TypeInfo types[] = { diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c new file mode 100644 index 00000000000..f498e23a937 --- /dev/null +++ b/hw/vfio/listener.c @@ -0,0 +1,1253 @@ +/* + * generic functions used by VFIO devices + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include +#ifdef CONFIG_KVM +#include +#endif +#include + +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/pci.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "system/ram_addr.h" +#include "hw/hw.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/range.h" +#include "system/kvm.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "migration/misc.h" +#include "migration/qemu-file.h" +#include "system/tcg.h" +#include "system/tpm.h" +#include "vfio-migration-internal.h" +#include "vfio-helpers.h" +#include "vfio-listener.h" + +/* + * Device state interfaces + */ + + +static bool vfio_log_sync_needed(const VFIOContainerBase *bcontainer) +{ + VFIODevice *vbasedev; + + if (!vfio_container_dirty_tracking_is_started(bcontainer)) { + return false; + } + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + VFIOMigration *migration = vbasedev->migration; + + if (!migration) { + return false; + } + + if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF && + (vfio_device_state_is_running(vbasedev) || + vfio_device_state_is_precopy(vbasedev))) { + return false; + } + } + return true; +} + +static bool vfio_listener_skipped_section(MemoryRegionSection *section) +{ + return (!memory_region_is_ram(section->mr) && + !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || + /* + * Sizing an enabled 64-bit BAR can cause spurious mappings to + * addresses in the upper part of the 64-bit address space. These + * are never accessed by the CPU and beyond the address width of + * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. + */ + section->offset_within_address_space & (1ULL << 63); +} + +/* + * Called with rcu_read_lock held. + * The returned MemoryRegion must not be accessed after calling rcu_read_unlock. + */ +static MemoryRegion *vfio_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p, + Error **errp) +{ + MemoryRegion *mr; + + mr = memory_translate_iotlb(iotlb, xlat_p, errp); + if (mr && memory_region_has_ram_discard_manager(mr)) { + /* + * Malicious VMs might trigger discarding of IOMMU-mapped memory. The + * pages will remain pinned inside vfio until unmapped, resulting in a + * higher memory consumption than expected. If memory would get + * populated again later, there would be an inconsistency between pages + * pinned by vfio and pages seen by QEMU. This is the case until + * unmapped from the IOMMU (e.g., during device reset). + * + * With malicious guests, we really only care about pinning more memory + * than expected. RLIMIT_MEMLOCK set for the user/process can never be + * exceeded and can be used to mitigate this problem. + */ + warn_report_once("Using vfio with vIOMMUs and coordinated discarding of" + " RAM (e.g., virtio-mem) works, however, malicious" + " guests can trigger pinning of more memory than" + " intended via an IOMMU. It's possible to mitigate " + " by setting/adjusting RLIMIT_MEMLOCK."); + } + return mr; +} + +static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); + VFIOContainerBase *bcontainer = giommu->bcontainer; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + MemoryRegion *mr; + hwaddr xlat; + void *vaddr; + int ret; + Error *local_err = NULL; + + trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", + iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_setg(&local_err, + "Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + if (migration_is_running()) { + migration_file_set_error(-EINVAL, local_err); + } else { + error_report_err(local_err); + } + return; + } + + rcu_read_lock(); + + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { + bool read_only; + + mr = vfio_translate_iotlb(iotlb, &xlat, &local_err); + if (!mr) { + error_report_err(local_err); + goto out; + } + vaddr = memory_region_get_ram_ptr(mr) + xlat; + read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly; + + /* + * vaddr is only valid until rcu_read_unlock(). But after + * vfio_dma_map has set up the mapping the pages will be + * pinned by the kernel. This makes sure that the RAM backend + * of vaddr will always be there, even if the memory object is + * destroyed and its backing memory munmap-ed. + */ + ret = vfio_container_dma_map(bcontainer, iova, + iotlb->addr_mask + 1, vaddr, + read_only, mr); + if (ret) { + error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx", %p) = %d (%s)", + bcontainer, iova, + iotlb->addr_mask + 1, vaddr, ret, strerror(-ret)); + } + } else { + ret = vfio_container_dma_unmap(bcontainer, iova, + iotlb->addr_mask + 1, iotlb, false); + if (ret) { + error_setg(&local_err, + "vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%s)", + bcontainer, iova, + iotlb->addr_mask + 1, ret, strerror(-ret)); + if (migration_is_running()) { + migration_file_set_error(ret, local_err); + } else { + error_report_err(local_err); + } + } + } +out: + rcu_read_unlock(); +} + +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, + MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, + listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; + const hwaddr size = int128_get64(section->size); + const hwaddr iova = section->offset_within_address_space; + int ret; + + /* Unmap with a single call. */ + ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL, false); + if (ret) { + error_report("%s: vfio_container_dma_unmap() failed: %s", __func__, + strerror(-ret)); + } +} + +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, + MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, + listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; + const hwaddr end = section->offset_within_region + + int128_get64(section->size); + hwaddr start, next, iova; + void *vaddr; + int ret; + + /* + * Map in (aligned within memory region) minimum granularity, so we can + * unmap in minimum granularity later. + */ + for (start = section->offset_within_region; start < end; start = next) { + next = ROUND_UP(start + 1, vrdl->granularity); + next = MIN(next, end); + + iova = start - section->offset_within_region + + section->offset_within_address_space; + vaddr = memory_region_get_ram_ptr(section->mr) + start; + + ret = vfio_container_dma_map(bcontainer, iova, next - start, + vaddr, section->readonly, section->mr); + if (ret) { + /* Rollback */ + vfio_ram_discard_notify_discard(rdl, section); + return ret; + } + } + return 0; +} + +static void vfio_ram_discard_register_listener(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + int target_page_size = qemu_target_page_size(); + VFIORamDiscardListener *vrdl; + + /* Ignore some corner cases not relevant in practice. */ + g_assert(QEMU_IS_ALIGNED(section->offset_within_region, target_page_size)); + g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space, + target_page_size)); + g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), target_page_size)); + + vrdl = g_new0(VFIORamDiscardListener, 1); + vrdl->bcontainer = bcontainer; + vrdl->mr = section->mr; + vrdl->offset_within_address_space = section->offset_within_address_space; + vrdl->size = int128_get64(section->size); + vrdl->granularity = ram_discard_manager_get_min_granularity(rdm, + section->mr); + + g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); + g_assert(bcontainer->pgsizes && + vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes)); + + ram_discard_listener_init(&vrdl->listener, + vfio_ram_discard_notify_populate, + vfio_ram_discard_notify_discard, true); + ram_discard_manager_register_listener(rdm, &vrdl->listener, section); + QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next); + + /* + * Sanity-check if we have a theoretically problematic setup where we could + * exceed the maximum number of possible DMA mappings over time. We assume + * that each mapped section in the same address space as a RamDiscardManager + * section consumes exactly one DMA mapping, with the exception of + * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections + * in the same address space as RamDiscardManager sections. + * + * We assume that each section in the address space consumes one memslot. + * We take the number of KVM memory slots as a best guess for the maximum + * number of sections in the address space we could have over time, + * also consuming DMA mappings. + */ + if (bcontainer->dma_max_mappings) { + unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; + +#ifdef CONFIG_KVM + if (kvm_enabled()) { + max_memslots = kvm_get_max_memslots(); + } +#endif + + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { + hwaddr start, end; + + start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, + vrdl->granularity); + end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size, + vrdl->granularity); + vrdl_mappings += (end - start) / vrdl->granularity; + vrdl_count++; + } + + if (vrdl_mappings + max_memslots - vrdl_count > + bcontainer->dma_max_mappings) { + warn_report("%s: possibly running out of DMA mappings. E.g., try" + " increasing the 'block-size' of virtio-mem devies." + " Maximum possible DMA mappings: %d, Maximum possible" + " memslots: %d", __func__, bcontainer->dma_max_mappings, + max_memslots); + } + } +} + +static void vfio_ram_discard_unregister_listener(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + VFIORamDiscardListener *vrdl = NULL; + + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { + if (vrdl->mr == section->mr && + vrdl->offset_within_address_space == + section->offset_within_address_space) { + break; + } + } + + if (!vrdl) { + hw_error("vfio: Trying to unregister missing RAM discard listener"); + } + + ram_discard_manager_unregister_listener(rdm, &vrdl->listener); + QLIST_REMOVE(vrdl, next); + g_free(vrdl); +} + +static bool vfio_known_safe_misalignment(MemoryRegionSection *section) +{ + MemoryRegion *mr = section->mr; + + if (!TPM_IS_CRB(mr->owner)) { + return false; + } + + /* this is a known safe misaligned region, just trace for debug purpose */ + trace_vfio_known_safe_misalignment(memory_region_name(mr), + section->offset_within_address_space, + section->offset_within_region, + qemu_real_host_page_size()); + return true; +} + +static bool vfio_listener_valid_section(MemoryRegionSection *section, + const char *name) +{ + if (vfio_listener_skipped_section(section)) { + trace_vfio_listener_region_skip(name, + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return false; + } + + if (unlikely((section->offset_within_address_space & + ~qemu_real_host_page_mask()) != + (section->offset_within_region & ~qemu_real_host_page_mask()))) { + if (!vfio_known_safe_misalignment(section)) { + error_report("%s received unaligned region %s iova=0x%"PRIx64 + " offset_within_region=0x%"PRIx64 + " qemu_real_host_page_size=0x%"PRIxPTR, + __func__, memory_region_name(section->mr), + section->offset_within_address_space, + section->offset_within_region, + qemu_real_host_page_size()); + } + return false; + } + + return true; +} + +static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + hwaddr *out_iova, hwaddr *out_end, + Int128 *out_llend) +{ + Int128 llend; + hwaddr iova; + + iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); + llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask())); + + if (int128_ge(int128_make64(iova), llend)) { + return false; + } + + *out_iova = iova; + *out_end = int128_get64(int128_sub(llend, int128_one())); + if (out_llend) { + *out_llend = llend; + } + return true; +} + +static void vfio_listener_begin(MemoryListener *listener) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + void (*listener_begin)(VFIOContainerBase *bcontainer); + + listener_begin = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_begin; + + if (listener_begin) { + listener_begin(bcontainer); + } +} + +static void vfio_listener_commit(MemoryListener *listener) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + void (*listener_commit)(VFIOContainerBase *bcontainer); + + listener_commit = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_commit; + + if (listener_commit) { + listener_commit(bcontainer); + } +} + +static void vfio_device_error_append(VFIODevice *vbasedev, Error **errp) +{ + /* + * MMIO region mapping failures are not fatal but in this case PCI + * peer-to-peer transactions are broken. + */ + if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) { + error_append_hint(errp, "%s: PCI peer-to-peer transactions " + "on BARs are not supported.\n", vbasedev->name); + } +} + +VFIORamDiscardListener *vfio_find_ram_discard_listener( + VFIOContainerBase *bcontainer, MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = NULL; + + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { + if (vrdl->mr == section->mr && + vrdl->offset_within_address_space == + section->offset_within_address_space) { + break; + } + } + + if (!vrdl) { + hw_error("vfio: Trying to sync missing RAM discard listener"); + /* does not return */ + } + return vrdl; +} + +static void vfio_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + vfio_container_region_add(bcontainer, section, false); +} + +void vfio_container_region_add(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + bool cpr_remap) +{ + hwaddr iova, end; + Int128 llend, llsize; + void *vaddr; + int ret; + Error *err = NULL; + + if (!vfio_listener_valid_section(section, "region_add")) { + return; + } + + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { + if (memory_region_is_ram_device(section->mr)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + qemu_real_host_page_size()); + } + return; + } + + /* PPC64/pseries machine only */ + if (!vfio_container_add_section_window(bcontainer, section, &err)) { + goto mmio_dma_error; + } + + memory_region_ref(section->mr); + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); + int iommu_idx; + + trace_vfio_listener_region_add_iommu(section->mr->name, iova, end); + + if (cpr_remap) { + vfio_cpr_giommu_remap(bcontainer, section); + } + + /* + * FIXME: For VFIO iommu types which have KVM acceleration to + * avoid bouncing all map/unmaps through qemu this way, this + * would be the right place to wire that up (tell the KVM + * device emulation the VFIO iommu handles to use). + */ + giommu = g_malloc0(sizeof(*giommu)); + giommu->iommu_mr = iommu_mr; + giommu->iommu_offset = section->offset_within_address_space - + section->offset_within_region; + giommu->bcontainer = bcontainer; + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, + MEMTXATTRS_UNSPECIFIED); + iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, + IOMMU_NOTIFIER_IOTLB_EVENTS, + section->offset_within_region, + int128_get64(llend), + iommu_idx); + + ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, + &err); + if (ret) { + g_free(giommu); + goto fail; + } + QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next); + memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); + + return; + } + + /* Here we assume that memory_region_is_ram(section->mr)==true */ + + /* + * For RAM memory regions with a RamDiscardManager, we only want to map the + * actually populated parts - and update the mapping whenever we're notified + * about changes. + */ + if (memory_region_has_ram_discard_manager(section->mr)) { + if (!cpr_remap) { + vfio_ram_discard_register_listener(bcontainer, section); + } else if (!vfio_cpr_ram_discard_register_listener(bcontainer, + section)) { + goto fail; + } + return; + } + + vaddr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (iova - section->offset_within_address_space); + + trace_vfio_listener_region_add_ram(iova, end, vaddr); + + llsize = int128_sub(llend, int128_make64(iova)); + + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; + + if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + pgmask + 1); + return; + } + } + + ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize), + vaddr, section->readonly, section->mr); + if (ret) { + error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx", %p) = %d (%s)", + bcontainer, iova, int128_get64(llsize), vaddr, ret, + strerror(-ret)); + mmio_dma_error: + if (memory_region_is_ram_device(section->mr)) { + /* Allow unexpected mappings not to be fatal for RAM devices */ + VFIODevice *vbasedev = + vfio_get_vfio_device(memory_region_owner(section->mr)); + vfio_device_error_append(vbasedev, &err); + warn_report_err_once(err); + return; + } + goto fail; + } + + return; + +fail: + if (!bcontainer->initialized) { + /* + * At machine init time or when the device is attached to the + * VM, store the first error in the container so we can + * gracefully fail the device realize routine. + */ + if (!bcontainer->error) { + error_propagate_prepend(&bcontainer->error, err, + "Region %s: ", + memory_region_name(section->mr)); + } else { + error_free(err); + } + } else { + /* + * At runtime, there's not much we can do other than throw a + * hardware error. + */ + error_report_err(err); + hw_error("vfio: DMA mapping failed, unable to continue"); + } +} + +static void vfio_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + hwaddr iova, end; + Int128 llend, llsize; + int ret; + bool try_unmap = true; + + if (!vfio_listener_valid_section(section, "region_del")) { + return; + } + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + + trace_vfio_listener_region_del_iommu(section->mr->name); + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu_mr) == section->mr && + giommu->n.start == section->offset_within_region) { + memory_region_unregister_iommu_notifier(section->mr, + &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + break; + } + } + + /* + * FIXME: We assume the one big unmap below is adequate to + * remove any individual page mappings in the IOMMU which + * might have been copied into VFIO. This works for a page table + * based IOMMU where a big unmap flattens a large range of IO-PTEs. + * That may not be true for all IOMMU types. + */ + } + + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { + return; + } + + llsize = int128_sub(llend, int128_make64(iova)); + + trace_vfio_listener_region_del(iova, end); + + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask; + + pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; + try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); + } else if (memory_region_has_ram_discard_manager(section->mr)) { + vfio_ram_discard_unregister_listener(bcontainer, section); + /* Unregistering will trigger an unmap. */ + try_unmap = false; + } + + if (try_unmap) { + bool unmap_all = false; + + if (int128_eq(llsize, int128_2_64())) { + unmap_all = true; + llsize = int128_zero(); + } + ret = vfio_container_dma_unmap(bcontainer, iova, int128_get64(llsize), + NULL, unmap_all); + if (ret) { + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%s)", + bcontainer, iova, int128_get64(llsize), ret, + strerror(-ret)); + } + } + + memory_region_unref(section->mr); + + /* PPC64/pseries machine only */ + vfio_container_del_section_window(bcontainer, section); +} + +typedef struct VFIODirtyRanges { + hwaddr min32; + hwaddr max32; + hwaddr min64; + hwaddr max64; + hwaddr minpci64; + hwaddr maxpci64; +} VFIODirtyRanges; + +typedef struct VFIODirtyRangesListener { + VFIOContainerBase *bcontainer; + VFIODirtyRanges ranges; + MemoryListener listener; +} VFIODirtyRangesListener; + +static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, + VFIOContainerBase *bcontainer) +{ + VFIOPCIDevice *pcidev; + VFIODevice *vbasedev; + Object *owner; + + owner = memory_region_owner(section->mr); + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + if (OBJECT(pcidev) == owner) { + return true; + } + } + + return false; +} + +static void vfio_dirty_tracking_update_range(VFIODirtyRanges *range, + hwaddr iova, hwaddr end, + bool update_pci) +{ + hwaddr *min, *max; + + /* + * The address space passed to the dirty tracker is reduced to three ranges: + * one for 32-bit DMA ranges, one for 64-bit DMA ranges and one for the + * PCI 64-bit hole. + * + * The underlying reports of dirty will query a sub-interval of each of + * these ranges. + * + * The purpose of the three range handling is to handle known cases of big + * holes in the address space, like the x86 AMD 1T hole, and firmware (like + * OVMF) which may relocate the pci-hole64 to the end of the address space. + * The latter would otherwise generate large ranges for tracking, stressing + * the limits of supported hardware. The pci-hole32 will always be below 4G + * (overlapping or not) so it doesn't need special handling and is part of + * the 32-bit range. + * + * The alternative would be an IOVATree but that has a much bigger runtime + * overhead and unnecessary complexity. + */ + if (update_pci && iova >= UINT32_MAX) { + min = &range->minpci64; + max = &range->maxpci64; + } else { + min = (end <= UINT32_MAX) ? &range->min32 : &range->min64; + max = (end <= UINT32_MAX) ? &range->max32 : &range->max64; + } + if (*min > iova) { + *min = iova; + } + if (*max < end) { + *max = end; + } + + trace_vfio_device_dirty_tracking_update(iova, end, *min, *max); +} + +static void vfio_dirty_tracking_update(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIODirtyRangesListener *dirty = + container_of(listener, VFIODirtyRangesListener, listener); + hwaddr iova, end; + + if (!vfio_listener_valid_section(section, "tracking_update") || + !vfio_get_section_iova_range(dirty->bcontainer, section, + &iova, &end, NULL)) { + return; + } + + vfio_dirty_tracking_update_range(&dirty->ranges, iova, end, + vfio_section_is_vfio_pci(section, dirty->bcontainer)); +} + +static const MemoryListener vfio_dirty_tracking_listener = { + .name = "vfio-tracking", + .region_add = vfio_dirty_tracking_update, +}; + +static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, + VFIODirtyRanges *ranges) +{ + VFIODirtyRangesListener dirty; + + memset(&dirty, 0, sizeof(dirty)); + dirty.ranges.min32 = UINT32_MAX; + dirty.ranges.min64 = UINT64_MAX; + dirty.ranges.minpci64 = UINT64_MAX; + dirty.listener = vfio_dirty_tracking_listener; + dirty.bcontainer = bcontainer; + + memory_listener_register(&dirty.listener, + bcontainer->space->as); + + *ranges = dirty.ranges; + + /* + * The memory listener is synchronous, and used to calculate the range + * to dirty tracking. Unregister it after we are done as we are not + * interested in any follow-up updates. + */ + memory_listener_unregister(&dirty.listener); +} + +static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) +{ + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), + sizeof(uint64_t))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + VFIODevice *vbasedev; + + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP; + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + int ret; + + if (!vbasedev->dirty_tracking) { + continue; + } + + ret = vbasedev->io_ops->device_feature(vbasedev, feature); + + if (ret != 0) { + warn_report("%s: Failed to stop DMA logging, err %d (%s)", + vbasedev->name, -ret, strerror(-ret)); + } + vbasedev->dirty_tracking = false; + } +} + +static struct vfio_device_feature * +vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer, + VFIODirtyRanges *tracking) +{ + struct vfio_device_feature *feature; + size_t feature_size; + struct vfio_device_feature_dma_logging_control *control; + struct vfio_device_feature_dma_logging_range *ranges; + + feature_size = sizeof(struct vfio_device_feature) + + sizeof(struct vfio_device_feature_dma_logging_control); + feature = g_try_malloc0(feature_size); + if (!feature) { + errno = ENOMEM; + return NULL; + } + feature->argsz = feature_size; + feature->flags = VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_START; + + control = (struct vfio_device_feature_dma_logging_control *)feature->data; + control->page_size = qemu_real_host_page_size(); + + /* + * DMA logging uAPI guarantees to support at least a number of ranges that + * fits into a single host kernel base page. + */ + control->num_ranges = !!tracking->max32 + !!tracking->max64 + + !!tracking->maxpci64; + ranges = g_try_new0(struct vfio_device_feature_dma_logging_range, + control->num_ranges); + if (!ranges) { + g_free(feature); + errno = ENOMEM; + + return NULL; + } + + control->ranges = (uintptr_t)ranges; + if (tracking->max32) { + ranges->iova = tracking->min32; + ranges->length = (tracking->max32 - tracking->min32) + 1; + ranges++; + } + if (tracking->max64) { + ranges->iova = tracking->min64; + ranges->length = (tracking->max64 - tracking->min64) + 1; + ranges++; + } + if (tracking->maxpci64) { + ranges->iova = tracking->minpci64; + ranges->length = (tracking->maxpci64 - tracking->minpci64) + 1; + } + + trace_vfio_device_dirty_tracking_start(control->num_ranges, + tracking->min32, tracking->max32, + tracking->min64, tracking->max64, + tracking->minpci64, tracking->maxpci64); + + return feature; +} + +static void vfio_device_feature_dma_logging_start_destroy( + struct vfio_device_feature *feature) +{ + struct vfio_device_feature_dma_logging_control *control = + (struct vfio_device_feature_dma_logging_control *)feature->data; + struct vfio_device_feature_dma_logging_range *ranges = + (struct vfio_device_feature_dma_logging_range *)(uintptr_t)control->ranges; + + g_free(ranges); + g_free(feature); +} + +static bool vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer, + Error **errp) +{ + struct vfio_device_feature *feature; + VFIODirtyRanges ranges; + VFIODevice *vbasedev; + int ret = 0; + + vfio_dirty_tracking_init(bcontainer, &ranges); + feature = vfio_device_feature_dma_logging_start_create(bcontainer, + &ranges); + if (!feature) { + error_setg_errno(errp, errno, "Failed to prepare DMA logging"); + return false; + } + + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (vbasedev->dirty_tracking) { + continue; + } + + ret = vbasedev->io_ops->device_feature(vbasedev, feature); + if (ret) { + error_setg_errno(errp, -ret, "%s: Failed to start DMA logging", + vbasedev->name); + goto out; + } + vbasedev->dirty_tracking = true; + } + +out: + if (ret) { + vfio_devices_dma_logging_stop(bcontainer); + } + + vfio_device_feature_dma_logging_start_destroy(feature); + + return ret == 0; +} + +static bool vfio_listener_log_global_start(MemoryListener *listener, + Error **errp) +{ + ERRP_GUARD(); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + bool ret; + + if (vfio_container_devices_dirty_tracking_is_supported(bcontainer)) { + ret = vfio_devices_dma_logging_start(bcontainer, errp); + } else { + ret = vfio_container_set_dirty_page_tracking(bcontainer, true, errp) == 0; + } + + if (!ret) { + error_prepend(errp, "vfio: Could not start dirty page tracking - "); + } + return ret; +} + +static void vfio_listener_log_global_stop(MemoryListener *listener) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + Error *local_err = NULL; + int ret = 0; + + if (vfio_container_devices_dirty_tracking_is_supported(bcontainer)) { + vfio_devices_dma_logging_stop(bcontainer); + } else { + ret = vfio_container_set_dirty_page_tracking(bcontainer, false, + &local_err); + } + + if (ret) { + error_prepend(&local_err, + "vfio: Could not stop dirty page tracking - "); + if (migration_is_running()) { + migration_file_set_error(ret, local_err); + } else { + error_report_err(local_err); + } + } +} + +typedef struct { + IOMMUNotifier n; + VFIOGuestIOMMU *giommu; +} vfio_giommu_dirty_notifier; + +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + vfio_giommu_dirty_notifier *gdn = container_of(n, + vfio_giommu_dirty_notifier, n); + VFIOGuestIOMMU *giommu = gdn->giommu; + VFIOContainerBase *bcontainer = giommu->bcontainer; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + ram_addr_t translated_addr; + Error *local_err = NULL; + int ret = -EINVAL; + MemoryRegion *mr; + hwaddr xlat; + + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_setg(&local_err, + "Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + goto out; + } + + rcu_read_lock(); + mr = vfio_translate_iotlb(iotlb, &xlat, &local_err); + if (!mr) { + goto out_unlock; + } + translated_addr = memory_region_get_ram_addr(mr) + xlat; + + ret = vfio_container_query_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1, + translated_addr, &local_err); + if (ret) { + error_prepend(&local_err, + "vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") failed - ", bcontainer, iova, + iotlb->addr_mask + 1); + } + +out_unlock: + rcu_read_unlock(); + +out: + if (ret) { + if (migration_is_running()) { + migration_file_set_error(ret, local_err); + } else { + error_report_err(local_err); + } + } +} + +static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr size = int128_get64(section->size); + const hwaddr iova = section->offset_within_address_space; + const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; + VFIORamDiscardListener *vrdl = opaque; + Error *local_err = NULL; + int ret; + + /* + * Sync the whole mapped region (spanning multiple individual mappings) + * in one go. + */ + ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr, + &local_err); + if (ret) { + error_report_err(local_err); + } + return ret; +} + +static int +vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + VFIORamDiscardListener *vrdl = + vfio_find_ram_discard_listener(bcontainer, section); + + /* + * We only want/can synchronize the bitmap for actually mapped parts - + * which correspond to populated parts. Replay all populated parts. + */ + return ram_discard_manager_replay_populated(rdm, section, + vfio_ram_discard_query_dirty_bitmap, + &vrdl); +} + +static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + VFIOGuestIOMMU *giommu; + bool found = false; + Int128 llend; + vfio_giommu_dirty_notifier gdn; + int idx; + + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu_mr) == section->mr && + giommu->n.start == section->offset_within_region) { + found = true; + break; + } + } + + if (!found) { + return 0; + } + + gdn.giommu = giommu; + idx = memory_region_iommu_attrs_to_index(giommu->iommu_mr, + MEMTXATTRS_UNSPECIFIED); + + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + + iommu_notifier_init(&gdn.n, vfio_iommu_map_dirty_notify, IOMMU_NOTIFIER_MAP, + section->offset_within_region, int128_get64(llend), + idx); + memory_region_iommu_replay(giommu->iommu_mr, &gdn.n); + + return 0; +} + +static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, Error **errp) +{ + ram_addr_t ram_addr; + + if (memory_region_is_iommu(section->mr)) { + return vfio_sync_iommu_dirty_bitmap(bcontainer, section); + } else if (memory_region_has_ram_discard_manager(section->mr)) { + int ret; + + ret = vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section); + if (ret) { + error_setg(errp, + "Failed to sync dirty bitmap with RAM discard listener"); + } + return ret; + } + + ram_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; + + return vfio_container_query_dirty_bitmap(bcontainer, + REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), + int128_get64(section->size), ram_addr, errp); +} + +static void vfio_listener_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); + int ret; + Error *local_err = NULL; + + if (vfio_listener_skipped_section(section)) { + return; + } + + if (vfio_log_sync_needed(bcontainer)) { + ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err); + if (ret) { + if (migration_is_running()) { + migration_file_set_error(ret, local_err); + } else { + error_report_err(local_err); + } + } + } +} + +static const MemoryListener vfio_memory_listener = { + .name = "vfio", + .begin = vfio_listener_begin, + .commit = vfio_listener_commit, + .region_add = vfio_listener_region_add, + .region_del = vfio_listener_region_del, + .log_global_start = vfio_listener_log_global_start, + .log_global_stop = vfio_listener_log_global_stop, + .log_sync = vfio_listener_log_sync, +}; + +bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp) +{ + bcontainer->listener = vfio_memory_listener; + memory_listener_register(&bcontainer->listener, bcontainer->space->as); + + if (bcontainer->error) { + error_propagate_prepend(errp, bcontainer->error, + "memory listener initialization failed: "); + return false; + } + + return true; +} + +void vfio_listener_unregister(VFIOContainerBase *bcontainer) +{ + memory_listener_unregister(&bcontainer->listener); +} diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build index bba776f75cc..bfaf6be8054 100644 --- a/hw/vfio/meson.build +++ b/hw/vfio/meson.build @@ -1,26 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + vfio_ss = ss.source_set() vfio_ss.add(files( - 'helpers.c', - 'common.c', + 'listener.c', 'container-base.c', 'container.c', - 'migration.c', - 'cpr.c', + 'helpers.c', )) vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c')) -vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files( - 'iommufd.c', -)) vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( - 'display.c', 'pci-quirks.c', 'pci.c', )) vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c')) vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c')) -vfio_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c')) -vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c')) vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c')) vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c')) specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss) + +system_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c')) +system_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c')) +system_ss.add(when: 'CONFIG_VFIO', if_true: files( + 'cpr.c', + 'cpr-legacy.c', + 'device.c', + 'migration.c', + 'migration-multifd.c', + 'region.c', +)) +system_ss.add(when: ['CONFIG_VFIO', 'CONFIG_IOMMUFD'], if_true: files( + 'iommufd.c', + 'cpr-iommufd.c', +)) +system_ss.add(when: 'CONFIG_IOMMUFD', if_false: files('iommufd-stubs.c')) +system_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( + 'display.c', +)) diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c new file mode 100644 index 00000000000..e4785031a73 --- /dev/null +++ b/hw/vfio/migration-multifd.c @@ -0,0 +1,780 @@ +/* + * Multifd VFIO migration + * + * Copyright (C) 2024,2025 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/vfio/vfio-device.h" +#include "migration/misc.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qemu/thread.h" +#include "io/channel-buffer.h" +#include "migration/qemu-file.h" +#include "migration-multifd.h" +#include "vfio-migration-internal.h" +#include "trace.h" +#include "vfio-helpers.h" + +#define VFIO_DEVICE_STATE_CONFIG_STATE (1) + +#define VFIO_DEVICE_STATE_PACKET_VER_CURRENT (0) + +typedef struct VFIODeviceStatePacket { + uint32_t version; + uint32_t idx; + uint32_t flags; + uint8_t data[0]; +} QEMU_PACKED VFIODeviceStatePacket; + +bool vfio_load_config_after_iter(VFIODevice *vbasedev) +{ + if (vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_ON) { + return true; + } else if (vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_OFF) { + return false; + } + + assert(vbasedev->migration_load_config_after_iter == ON_OFF_AUTO_AUTO); + return vfio_arch_wants_loading_config_after_iter(); +} + +/* type safety */ +typedef struct VFIOStateBuffers { + GArray *array; +} VFIOStateBuffers; + +typedef struct VFIOStateBuffer { + bool is_present; + char *data; + size_t len; +} VFIOStateBuffer; + +typedef struct VFIOMultifd { + bool load_bufs_thread_running; + bool load_bufs_thread_want_exit; + + bool load_bufs_iter_done; + QemuCond load_bufs_iter_done_cond; + + VFIOStateBuffers load_bufs; + QemuCond load_bufs_buffer_ready_cond; + QemuCond load_bufs_thread_finished_cond; + QemuMutex load_bufs_mutex; /* Lock order: this lock -> BQL */ + uint32_t load_buf_idx; + uint32_t load_buf_idx_last; + size_t load_buf_queued_pending_buffers_size; +} VFIOMultifd; + +static void vfio_state_buffer_clear(gpointer data) +{ + VFIOStateBuffer *lb = data; + + if (!lb->is_present) { + return; + } + + g_clear_pointer(&lb->data, g_free); + lb->is_present = false; +} + +static void vfio_state_buffers_init(VFIOStateBuffers *bufs) +{ + bufs->array = g_array_new(FALSE, TRUE, sizeof(VFIOStateBuffer)); + g_array_set_clear_func(bufs->array, vfio_state_buffer_clear); +} + +static void vfio_state_buffers_destroy(VFIOStateBuffers *bufs) +{ + g_clear_pointer(&bufs->array, g_array_unref); +} + +static void vfio_state_buffers_assert_init(VFIOStateBuffers *bufs) +{ + assert(bufs->array); +} + +static unsigned int vfio_state_buffers_size_get(VFIOStateBuffers *bufs) +{ + return bufs->array->len; +} + +static void vfio_state_buffers_size_set(VFIOStateBuffers *bufs, + unsigned int size) +{ + g_array_set_size(bufs->array, size); +} + +static VFIOStateBuffer *vfio_state_buffers_at(VFIOStateBuffers *bufs, + unsigned int idx) +{ + return &g_array_index(bufs->array, VFIOStateBuffer, idx); +} + +/* called with load_bufs_mutex locked */ +static bool vfio_load_state_buffer_insert(VFIODevice *vbasedev, + VFIODeviceStatePacket *packet, + size_t packet_total_size, + Error **errp) +{ + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + VFIOStateBuffer *lb; + size_t data_size = packet_total_size - sizeof(*packet); + + vfio_state_buffers_assert_init(&multifd->load_bufs); + if (packet->idx >= vfio_state_buffers_size_get(&multifd->load_bufs)) { + vfio_state_buffers_size_set(&multifd->load_bufs, packet->idx + 1); + } + + lb = vfio_state_buffers_at(&multifd->load_bufs, packet->idx); + if (lb->is_present) { + error_setg(errp, "%s: state buffer %" PRIu32 " already filled", + vbasedev->name, packet->idx); + return false; + } + + assert(packet->idx >= multifd->load_buf_idx); + + multifd->load_buf_queued_pending_buffers_size += data_size; + if (multifd->load_buf_queued_pending_buffers_size > + vbasedev->migration_max_queued_buffers_size) { + error_setg(errp, + "%s: queuing state buffer %" PRIu32 + " would exceed the size max of %" PRIu64, + vbasedev->name, packet->idx, + vbasedev->migration_max_queued_buffers_size); + return false; + } + + lb->data = g_memdup2(&packet->data, data_size); + lb->len = data_size; + lb->is_present = true; + + return true; +} + +bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size, + Error **errp) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + VFIODeviceStatePacket *packet = (VFIODeviceStatePacket *)data; + + if (!vfio_multifd_transfer_enabled(vbasedev)) { + error_setg(errp, + "%s: got device state packet but not doing multifd transfer", + vbasedev->name); + return false; + } + + assert(multifd); + + if (data_size < sizeof(*packet)) { + error_setg(errp, "%s: packet too short at %zu (min is %zu)", + vbasedev->name, data_size, sizeof(*packet)); + return false; + } + + packet->version = be32_to_cpu(packet->version); + if (packet->version != VFIO_DEVICE_STATE_PACKET_VER_CURRENT) { + error_setg(errp, "%s: packet has unknown version %" PRIu32, + vbasedev->name, packet->version); + return false; + } + + packet->idx = be32_to_cpu(packet->idx); + packet->flags = be32_to_cpu(packet->flags); + + if (packet->idx == UINT32_MAX) { + error_setg(errp, "%s: packet index is invalid", vbasedev->name); + return false; + } + + trace_vfio_load_state_device_buffer_incoming(vbasedev->name, packet->idx); + + /* + * Holding BQL here would violate the lock order and can cause + * a deadlock once we attempt to lock load_bufs_mutex below. + */ + assert(!bql_locked()); + + WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) { + /* config state packet should be the last one in the stream */ + if (packet->flags & VFIO_DEVICE_STATE_CONFIG_STATE) { + multifd->load_buf_idx_last = packet->idx; + } + + if (!vfio_load_state_buffer_insert(vbasedev, packet, data_size, + errp)) { + return false; + } + + qemu_cond_signal(&multifd->load_bufs_buffer_ready_cond); + } + + return true; +} + +static bool vfio_load_bufs_thread_load_config(VFIODevice *vbasedev, + Error **errp) +{ + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + VFIOStateBuffer *lb; + g_autoptr(QIOChannelBuffer) bioc = NULL; + g_autoptr(QEMUFile) f_out = NULL, f_in = NULL; + uint64_t mig_header; + int ret; + + assert(multifd->load_buf_idx == multifd->load_buf_idx_last); + lb = vfio_state_buffers_at(&multifd->load_bufs, multifd->load_buf_idx); + assert(lb->is_present); + + bioc = qio_channel_buffer_new(lb->len); + qio_channel_set_name(QIO_CHANNEL(bioc), "vfio-device-config-load"); + + f_out = qemu_file_new_output(QIO_CHANNEL(bioc)); + qemu_put_buffer(f_out, (uint8_t *)lb->data, lb->len); + + ret = qemu_fflush(f_out); + if (ret) { + error_setg(errp, "%s: load config state flush failed: %d", + vbasedev->name, ret); + return false; + } + + qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); + f_in = qemu_file_new_input(QIO_CHANNEL(bioc)); + + mig_header = qemu_get_be64(f_in); + if (mig_header != VFIO_MIG_FLAG_DEV_CONFIG_STATE) { + error_setg(errp, "%s: expected FLAG_DEV_CONFIG_STATE but got %" PRIx64, + vbasedev->name, mig_header); + return false; + } + + bql_lock(); + ret = vfio_load_device_config_state(f_in, vbasedev); + bql_unlock(); + + if (ret < 0) { + error_setg(errp, "%s: vfio_load_device_config_state() failed: %d", + vbasedev->name, ret); + return false; + } + + return true; +} + +static VFIOStateBuffer *vfio_load_state_buffer_get(VFIOMultifd *multifd) +{ + VFIOStateBuffer *lb; + unsigned int bufs_len; + + bufs_len = vfio_state_buffers_size_get(&multifd->load_bufs); + if (multifd->load_buf_idx >= bufs_len) { + assert(multifd->load_buf_idx == bufs_len); + return NULL; + } + + lb = vfio_state_buffers_at(&multifd->load_bufs, + multifd->load_buf_idx); + if (!lb->is_present) { + return NULL; + } + + return lb; +} + +static bool vfio_load_state_buffer_write(VFIODevice *vbasedev, + VFIOStateBuffer *lb, + Error **errp) +{ + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + g_autofree char *buf = NULL; + char *buf_cur; + size_t buf_len; + + if (!lb->len) { + return true; + } + + trace_vfio_load_state_device_buffer_load_start(vbasedev->name, + multifd->load_buf_idx); + + /* lb might become re-allocated when we drop the lock */ + buf = g_steal_pointer(&lb->data); + buf_cur = buf; + buf_len = lb->len; + while (buf_len > 0) { + ssize_t wr_ret; + int errno_save; + + /* + * Loading data to the device takes a while, + * drop the lock during this process. + */ + qemu_mutex_unlock(&multifd->load_bufs_mutex); + wr_ret = write(migration->data_fd, buf_cur, buf_len); + errno_save = errno; + qemu_mutex_lock(&multifd->load_bufs_mutex); + + if (wr_ret < 0) { + error_setg(errp, + "%s: writing state buffer %" PRIu32 " failed: %d", + vbasedev->name, multifd->load_buf_idx, errno_save); + return false; + } + + assert(wr_ret <= buf_len); + buf_len -= wr_ret; + buf_cur += wr_ret; + + assert(multifd->load_buf_queued_pending_buffers_size >= wr_ret); + multifd->load_buf_queued_pending_buffers_size -= wr_ret; + } + + trace_vfio_load_state_device_buffer_load_end(vbasedev->name, + multifd->load_buf_idx); + + return true; +} + +static bool vfio_load_bufs_thread_want_exit(VFIOMultifd *multifd, + bool *should_quit) +{ + return multifd->load_bufs_thread_want_exit || qatomic_read(should_quit); +} + +/* + * This thread is spawned by vfio_multifd_switchover_start() which gets + * called upon encountering the switchover point marker in main migration + * stream. + * + * It exits after either: + * * completing loading the remaining device state and device config, OR: + * * encountering some error while doing the above, OR: + * * being forcefully aborted by the migration core by it setting should_quit + * or by vfio_load_cleanup_load_bufs_thread() setting + * multifd->load_bufs_thread_want_exit. + */ +static bool vfio_load_bufs_thread(void *opaque, bool *should_quit, Error **errp) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + bool ret = false; + + trace_vfio_load_bufs_thread_start(vbasedev->name); + + assert(multifd); + QEMU_LOCK_GUARD(&multifd->load_bufs_mutex); + + assert(multifd->load_bufs_thread_running); + + while (true) { + VFIOStateBuffer *lb; + + /* + * Always check cancellation first after the buffer_ready wait below in + * case that cond was signalled by vfio_load_cleanup_load_bufs_thread(). + */ + if (vfio_load_bufs_thread_want_exit(multifd, should_quit)) { + error_setg(errp, "operation cancelled"); + goto thread_exit; + } + + assert(multifd->load_buf_idx <= multifd->load_buf_idx_last); + + lb = vfio_load_state_buffer_get(multifd); + if (!lb) { + trace_vfio_load_state_device_buffer_starved(vbasedev->name, + multifd->load_buf_idx); + qemu_cond_wait(&multifd->load_bufs_buffer_ready_cond, + &multifd->load_bufs_mutex); + continue; + } + + if (multifd->load_buf_idx == multifd->load_buf_idx_last) { + break; + } + + if (multifd->load_buf_idx == 0) { + trace_vfio_load_state_device_buffer_start(vbasedev->name); + } + + if (!vfio_load_state_buffer_write(vbasedev, lb, errp)) { + goto thread_exit; + } + + if (multifd->load_buf_idx == multifd->load_buf_idx_last - 1) { + trace_vfio_load_state_device_buffer_end(vbasedev->name); + } + + multifd->load_buf_idx++; + } + + if (vfio_load_config_after_iter(vbasedev)) { + while (!multifd->load_bufs_iter_done) { + qemu_cond_wait(&multifd->load_bufs_iter_done_cond, + &multifd->load_bufs_mutex); + + /* + * Need to re-check cancellation immediately after wait in case + * cond was signalled by vfio_load_cleanup_load_bufs_thread(). + */ + if (vfio_load_bufs_thread_want_exit(multifd, should_quit)) { + error_setg(errp, "operation cancelled"); + goto thread_exit; + } + } + } + + if (!vfio_load_bufs_thread_load_config(vbasedev, errp)) { + goto thread_exit; + } + + ret = true; + +thread_exit: + /* + * Notify possibly waiting vfio_load_cleanup_load_bufs_thread() that + * this thread is exiting. + */ + multifd->load_bufs_thread_running = false; + qemu_cond_signal(&multifd->load_bufs_thread_finished_cond); + + trace_vfio_load_bufs_thread_end(vbasedev->name); + + return ret; +} + +int vfio_load_state_config_load_ready(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + int ret = 0; + + if (!vfio_multifd_transfer_enabled(vbasedev)) { + error_report("%s: got DEV_CONFIG_LOAD_READY outside multifd transfer", + vbasedev->name); + return -EINVAL; + } + + if (!vfio_load_config_after_iter(vbasedev)) { + error_report("%s: got DEV_CONFIG_LOAD_READY but was disabled", + vbasedev->name); + return -EINVAL; + } + + assert(multifd); + + /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */ + bql_unlock(); + WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) { + if (multifd->load_bufs_iter_done) { + /* Can't print error here as we're outside BQL */ + ret = -EINVAL; + break; + } + + multifd->load_bufs_iter_done = true; + qemu_cond_signal(&multifd->load_bufs_iter_done_cond); + } + bql_lock(); + + if (ret) { + error_report("%s: duplicate DEV_CONFIG_LOAD_READY", + vbasedev->name); + } + + return ret; +} + +static VFIOMultifd *vfio_multifd_new(void) +{ + VFIOMultifd *multifd = g_new(VFIOMultifd, 1); + + vfio_state_buffers_init(&multifd->load_bufs); + + qemu_mutex_init(&multifd->load_bufs_mutex); + + multifd->load_buf_idx = 0; + multifd->load_buf_idx_last = UINT32_MAX; + multifd->load_buf_queued_pending_buffers_size = 0; + qemu_cond_init(&multifd->load_bufs_buffer_ready_cond); + + multifd->load_bufs_iter_done = false; + qemu_cond_init(&multifd->load_bufs_iter_done_cond); + + multifd->load_bufs_thread_running = false; + multifd->load_bufs_thread_want_exit = false; + qemu_cond_init(&multifd->load_bufs_thread_finished_cond); + + return multifd; +} + +/* + * Terminates vfio_load_bufs_thread by setting + * multifd->load_bufs_thread_want_exit and signalling all the conditions + * the thread could be blocked on. + * + * Waits for the thread to signal that it had finished. + */ +static void vfio_load_cleanup_load_bufs_thread(VFIOMultifd *multifd) +{ + /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */ + bql_unlock(); + WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) { + while (multifd->load_bufs_thread_running) { + multifd->load_bufs_thread_want_exit = true; + + qemu_cond_signal(&multifd->load_bufs_buffer_ready_cond); + qemu_cond_signal(&multifd->load_bufs_iter_done_cond); + qemu_cond_wait(&multifd->load_bufs_thread_finished_cond, + &multifd->load_bufs_mutex); + } + } + bql_lock(); +} + +static void vfio_multifd_free(VFIOMultifd *multifd) +{ + vfio_load_cleanup_load_bufs_thread(multifd); + + qemu_cond_destroy(&multifd->load_bufs_thread_finished_cond); + qemu_cond_destroy(&multifd->load_bufs_iter_done_cond); + vfio_state_buffers_destroy(&multifd->load_bufs); + qemu_cond_destroy(&multifd->load_bufs_buffer_ready_cond); + qemu_mutex_destroy(&multifd->load_bufs_mutex); + + g_free(multifd); +} + +void vfio_multifd_cleanup(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + g_clear_pointer(&migration->multifd, vfio_multifd_free); +} + +bool vfio_multifd_transfer_supported(void) +{ + return multifd_device_state_supported() && + migrate_send_switchover_start(); +} + +bool vfio_multifd_transfer_enabled(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + return migration->multifd_transfer; +} + +bool vfio_multifd_setup(VFIODevice *vbasedev, bool alloc_multifd, Error **errp) +{ + VFIOMigration *migration = vbasedev->migration; + + /* + * Make a copy of this setting at the start in case it is changed + * mid-migration. + */ + if (vbasedev->migration_multifd_transfer == ON_OFF_AUTO_AUTO) { + migration->multifd_transfer = vfio_multifd_transfer_supported(); + } else { + migration->multifd_transfer = + vbasedev->migration_multifd_transfer == ON_OFF_AUTO_ON; + } + + if (!vfio_multifd_transfer_enabled(vbasedev)) { + /* Nothing further to check or do */ + return true; + } + + if (!vfio_multifd_transfer_supported()) { + error_setg(errp, + "%s: Multifd device transfer requested but unsupported in the current config", + vbasedev->name); + return false; + } + + if (alloc_multifd) { + assert(!migration->multifd); + migration->multifd = vfio_multifd_new(); + } + + return true; +} + +void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f) +{ + assert(vfio_multifd_transfer_enabled(vbasedev)); + + /* + * Emit dummy NOP data on the main migration channel since the actual + * device state transfer is done via multifd channels. + */ + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); +} + +static bool +vfio_save_complete_precopy_thread_config_state(VFIODevice *vbasedev, + char *idstr, + uint32_t instance_id, + uint32_t idx, + Error **errp) +{ + g_autoptr(QIOChannelBuffer) bioc = NULL; + g_autoptr(QEMUFile) f = NULL; + int ret; + g_autofree VFIODeviceStatePacket *packet = NULL; + size_t packet_len; + + bioc = qio_channel_buffer_new(0); + qio_channel_set_name(QIO_CHANNEL(bioc), "vfio-device-config-save"); + + f = qemu_file_new_output(QIO_CHANNEL(bioc)); + + if (vfio_save_device_config_state(f, vbasedev, errp)) { + return false; + } + + ret = qemu_fflush(f); + if (ret) { + error_setg(errp, "%s: save config state flush failed: %d", + vbasedev->name, ret); + return false; + } + + packet_len = sizeof(*packet) + bioc->usage; + packet = g_malloc0(packet_len); + packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT); + packet->idx = cpu_to_be32(idx); + packet->flags = cpu_to_be32(VFIO_DEVICE_STATE_CONFIG_STATE); + memcpy(&packet->data, bioc->data, bioc->usage); + + if (!multifd_queue_device_state(idstr, instance_id, + (char *)packet, packet_len)) { + error_setg(errp, "%s: multifd config data queuing failed", + vbasedev->name); + return false; + } + + vfio_migration_add_bytes_transferred(packet_len); + + return true; +} + +/* + * This thread is spawned by the migration core directly via + * .save_complete_precopy_thread SaveVMHandler. + * + * It exits after either: + * * completing saving the remaining device state and device config, OR: + * * encountering some error while doing the above, OR: + * * being forcefully aborted by the migration core by + * multifd_device_state_save_thread_should_exit() returning true. + */ +bool +vfio_multifd_save_complete_precopy_thread(SaveCompletePrecopyThreadData *d, + Error **errp) +{ + VFIODevice *vbasedev = d->handler_opaque; + VFIOMigration *migration = vbasedev->migration; + bool ret = false; + g_autofree VFIODeviceStatePacket *packet = NULL; + uint32_t idx; + + if (!vfio_multifd_transfer_enabled(vbasedev)) { + /* Nothing to do, vfio_save_complete_precopy() does the transfer. */ + return true; + } + + trace_vfio_save_complete_precopy_thread_start(vbasedev->name, + d->idstr, d->instance_id); + + /* We reach here with device state STOP or STOP_COPY only */ + if (vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY, + VFIO_DEVICE_STATE_STOP, errp)) { + goto thread_exit; + } + + packet = g_malloc0(sizeof(*packet) + migration->data_buffer_size); + packet->version = cpu_to_be32(VFIO_DEVICE_STATE_PACKET_VER_CURRENT); + + for (idx = 0; ; idx++) { + ssize_t data_size; + size_t packet_size; + + if (multifd_device_state_save_thread_should_exit()) { + error_setg(errp, "operation cancelled"); + goto thread_exit; + } + + data_size = read(migration->data_fd, &packet->data, + migration->data_buffer_size); + if (data_size < 0) { + error_setg(errp, "%s: reading state buffer %" PRIu32 " failed: %d", + vbasedev->name, idx, errno); + goto thread_exit; + } else if (data_size == 0) { + break; + } + + packet->idx = cpu_to_be32(idx); + packet_size = sizeof(*packet) + data_size; + + if (!multifd_queue_device_state(d->idstr, d->instance_id, + (char *)packet, packet_size)) { + error_setg(errp, "%s: multifd data queuing failed", vbasedev->name); + goto thread_exit; + } + + vfio_migration_add_bytes_transferred(packet_size); + } + + if (!vfio_save_complete_precopy_thread_config_state(vbasedev, + d->idstr, + d->instance_id, + idx, errp)) { + goto thread_exit; + } + + ret = true; + +thread_exit: + trace_vfio_save_complete_precopy_thread_end(vbasedev->name, ret); + + return ret; +} + +int vfio_multifd_switchover_start(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIOMultifd *multifd = migration->multifd; + + assert(multifd); + + /* The lock order is load_bufs_mutex -> BQL so unlock BQL here first */ + bql_unlock(); + WITH_QEMU_LOCK_GUARD(&multifd->load_bufs_mutex) { + assert(!multifd->load_bufs_thread_running); + multifd->load_bufs_thread_running = true; + } + bql_lock(); + + qemu_loadvm_start_load_thread(vfio_load_bufs_thread, vbasedev); + + return 0; +} diff --git a/hw/vfio/migration-multifd.h b/hw/vfio/migration-multifd.h new file mode 100644 index 00000000000..82d2d3a1fd3 --- /dev/null +++ b/hw/vfio/migration-multifd.h @@ -0,0 +1,37 @@ +/* + * Multifd VFIO migration + * + * Copyright (C) 2024,2025 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_MIGRATION_MULTIFD_H +#define HW_VFIO_MIGRATION_MULTIFD_H + +#include "hw/vfio/vfio-device.h" + +bool vfio_multifd_setup(VFIODevice *vbasedev, bool alloc_multifd, Error **errp); +void vfio_multifd_cleanup(VFIODevice *vbasedev); + +bool vfio_multifd_transfer_supported(void); +bool vfio_multifd_transfer_enabled(VFIODevice *vbasedev); + +bool vfio_load_config_after_iter(VFIODevice *vbasedev); +bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size, + Error **errp); + +int vfio_load_state_config_load_ready(VFIODevice *vbasedev); + +void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f); + +bool +vfio_multifd_save_complete_precopy_thread(SaveCompletePrecopyThreadData *d, + Error **errp); + +int vfio_multifd_switchover_start(VFIODevice *vbasedev); + +#endif diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 262d42a46e5..4c06e3db936 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -15,38 +15,23 @@ #include #include -#include "sysemu/runstate.h" -#include "hw/vfio/vfio-common.h" +#include "system/runstate.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-migration.h" #include "migration/misc.h" #include "migration/savevm.h" #include "migration/vmstate.h" #include "migration/qemu-file.h" #include "migration/register.h" #include "migration/blocker.h" +#include "migration-multifd.h" #include "qapi/error.h" #include "qapi/qapi-events-vfio.h" #include "exec/ramlist.h" -#include "exec/ram_addr.h" #include "pci.h" #include "trace.h" #include "hw/hw.h" - -/* - * Flags to be used as unique delimiters for VFIO devices in the migration - * stream. These flags are composed as: - * 0xffffffff => MSB 32-bit all 1s - * 0xef10 => Magic ID, represents emulated (virtual) function IO - * 0x0000 => 16-bits reserved for flags - * - * The beginning of state information is marked by _DEV_CONFIG_STATE, - * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a - * certain state information is marked by _END_OF_STATE. - */ -#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL) -#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL) -#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL) -#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL) -#define VFIO_MIG_FLAG_DEV_INIT_DATA_SENT (0xffffffffef100005ULL) +#include "vfio-migration-internal.h" /* * This is an arbitrary size based on migration of mlx5 devices, where typically @@ -55,7 +40,7 @@ */ #define VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE (1 * MiB) -static int64_t bytes_transferred; +static unsigned long bytes_transferred; static const char *mig_state_to_str(enum vfio_device_mig_state state) { @@ -81,7 +66,7 @@ static const char *mig_state_to_str(enum vfio_device_mig_state state) } } -static VfioMigrationState +static QapiVfioMigrationState mig_state_to_qapi_state(enum vfio_device_mig_state state) { switch (state) { @@ -136,10 +121,10 @@ static void vfio_migration_set_device_state(VFIODevice *vbasedev, vfio_migration_send_event(vbasedev); } -static int vfio_migration_set_state(VFIODevice *vbasedev, - enum vfio_device_mig_state new_state, - enum vfio_device_mig_state recover_state, - Error **errp) +int vfio_migration_set_state(VFIODevice *vbasedev, + enum vfio_device_mig_state new_state, + enum vfio_device_mig_state recover_state, + Error **errp) { VFIOMigration *migration = vbasedev->migration; uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + @@ -254,8 +239,7 @@ static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev, return ret; } -static int vfio_save_device_config_state(QEMUFile *f, void *opaque, - Error **errp) +int vfio_save_device_config_state(QEMUFile *f, void *opaque, Error **errp) { VFIODevice *vbasedev = opaque; int ret; @@ -280,11 +264,13 @@ static int vfio_save_device_config_state(QEMUFile *f, void *opaque, return ret; } -static int vfio_load_device_config_state(QEMUFile *f, void *opaque) +int vfio_load_device_config_state(QEMUFile *f, void *opaque) { VFIODevice *vbasedev = opaque; uint64_t data; + trace_vfio_load_device_config_state_start(vbasedev->name); + if (vbasedev->ops && vbasedev->ops->vfio_load_config) { int ret; @@ -303,7 +289,7 @@ static int vfio_load_device_config_state(QEMUFile *f, void *opaque) return -EINVAL; } - trace_vfio_load_device_config_state(vbasedev->name); + trace_vfio_load_device_config_state_end(vbasedev->name); return qemu_file_get_error(f); } @@ -370,6 +356,10 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration) * please refer to the Linux kernel VFIO uAPI. */ if (errno == ENOMSG) { + if (!migration->event_precopy_empty_hit) { + trace_vfio_save_block_precopy_empty_hit(migration->vbasedev->name); + migration->event_precopy_empty_hit = true; + } return 0; } @@ -379,10 +369,13 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration) return 0; } + /* Non-empty read: re-arm the trace event */ + migration->event_precopy_empty_hit = false; + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); qemu_put_be64(f, data_size); qemu_put_buffer(f, migration->data_buffer, data_size); - bytes_transferred += data_size; + vfio_migration_add_bytes_transferred(data_size); trace_vfio_save_block(migration->vbasedev->name, data_size); @@ -460,6 +453,10 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp) uint64_t stop_copy_size = VFIO_MIG_DEFAULT_DATA_BUFFER_SIZE; int ret; + if (!vfio_multifd_setup(vbasedev, false, errp)) { + return -EINVAL; + } + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE); vfio_query_stop_copy_size(vbasedev, &stop_copy_size); @@ -472,6 +469,9 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp) return -ENOMEM; } + migration->event_save_iterate_started = false; + migration->event_precopy_empty_hit = false; + if (vfio_precopy_supported(vbasedev)) { switch (migration->device_state) { case VFIO_DEVICE_STATE_RUNNING: @@ -513,6 +513,9 @@ static void vfio_save_cleanup(void *opaque) Error *local_err = NULL; int ret; + /* Currently a NOP, done for symmetry with load_cleanup() */ + vfio_multifd_cleanup(vbasedev); + /* * Changing device state from STOP_COPY to STOP can take time. Do it here, * after migration has completed, so it won't increase downtime. @@ -576,9 +579,6 @@ static void vfio_state_pending_exact(void *opaque, uint64_t *must_precopy, if (vfio_device_state_is_precopy(vbasedev)) { vfio_query_precopy_size(migration); - - *must_precopy += - migration->precopy_init_size + migration->precopy_dirty_size; } trace_vfio_state_pending_exact(vbasedev->name, *must_precopy, *can_postcopy, @@ -605,6 +605,11 @@ static int vfio_save_iterate(QEMUFile *f, void *opaque) VFIOMigration *migration = vbasedev->migration; ssize_t data_size; + if (!migration->event_save_iterate_started) { + trace_vfio_save_iterate_start(vbasedev->name); + migration->event_save_iterate_started = true; + } + data_size = vfio_save_block(f, migration); if (data_size < 0) { return data_size; @@ -633,6 +638,13 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) int ret; Error *local_err = NULL; + if (vfio_multifd_transfer_enabled(vbasedev)) { + vfio_multifd_emit_dummy_eos(vbasedev, f); + return 0; + } + + trace_vfio_save_complete_precopy_start(vbasedev->name); + /* We reach here with device state STOP or STOP_COPY only */ ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY, VFIO_DEVICE_STATE_STOP, &local_err); @@ -662,6 +674,15 @@ static void vfio_save_state(QEMUFile *f, void *opaque) Error *local_err = NULL; int ret; + if (vfio_multifd_transfer_enabled(vbasedev)) { + if (vfio_load_config_after_iter(vbasedev)) { + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_LOAD_READY); + } else { + vfio_multifd_emit_dummy_eos(vbasedev, f); + } + return; + } + ret = vfio_save_device_config_state(f, opaque, &local_err); if (ret) { error_prepend(&local_err, @@ -674,15 +695,28 @@ static void vfio_save_state(QEMUFile *f, void *opaque) static int vfio_load_setup(QEMUFile *f, void *opaque, Error **errp) { VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; - return vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING, - vbasedev->migration->device_state, errp); + if (!vfio_multifd_setup(vbasedev, true, errp)) { + return -EINVAL; + } + + ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RESUMING, + migration->device_state, errp); + if (ret) { + return ret; + } + + return 0; } static int vfio_load_cleanup(void *opaque) { VFIODevice *vbasedev = opaque; + vfio_multifd_cleanup(vbasedev); + vfio_migration_cleanup(vbasedev); trace_vfio_load_cleanup(vbasedev->name); @@ -703,6 +737,13 @@ static int vfio_load_state(QEMUFile *f, void *opaque, int version_id) switch (data) { case VFIO_MIG_FLAG_DEV_CONFIG_STATE: { + if (vfio_multifd_transfer_enabled(vbasedev)) { + error_report("%s: got DEV_CONFIG_STATE in main migration " + "channel but doing multifd transfer", + vbasedev->name); + return -EINVAL; + } + return vfio_load_device_config_state(f, opaque); } case VFIO_MIG_FLAG_DEV_SETUP_STATE: @@ -747,6 +788,10 @@ static int vfio_load_state(QEMUFile *f, void *opaque, int version_id) return ret; } + case VFIO_MIG_FLAG_DEV_CONFIG_LOAD_READY: + { + return vfio_load_state_config_load_ready(vbasedev); + } default: error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data); return -EINVAL; @@ -768,6 +813,17 @@ static bool vfio_switchover_ack_needed(void *opaque) return vfio_precopy_supported(vbasedev); } +static int vfio_switchover_start(void *opaque) +{ + VFIODevice *vbasedev = opaque; + + if (vfio_multifd_transfer_enabled(vbasedev)) { + return vfio_multifd_switchover_start(vbasedev); + } + + return 0; +} + static const SaveVMHandlers savevm_vfio_handlers = { .save_prepare = vfio_save_prepare, .save_setup = vfio_save_setup, @@ -776,12 +832,18 @@ static const SaveVMHandlers savevm_vfio_handlers = { .state_pending_exact = vfio_state_pending_exact, .is_active_iterate = vfio_is_active_iterate, .save_live_iterate = vfio_save_iterate, - .save_live_complete_precopy = vfio_save_complete_precopy, + .save_complete = vfio_save_complete_precopy, .save_state = vfio_save_state, .load_setup = vfio_load_setup, .load_cleanup = vfio_load_cleanup, .load_state = vfio_load_state, .switchover_ack_needed = vfio_switchover_ack_needed, + /* + * Multifd support + */ + .load_state_buffer = vfio_multifd_load_state_buffer, + .switchover_start = vfio_switchover_start, + .save_complete_precopy_thread = vfio_multifd_save_complete_precopy_thread, }; /* ---------------------------------------------------------------------- */ @@ -962,13 +1024,72 @@ static int vfio_migration_init(VFIODevice *vbasedev) vfio_vmstate_change_prepare : NULL; migration->vm_state = qdev_add_vm_change_state_handler_full( - vbasedev->dev, vfio_vmstate_change, prepare_cb, vbasedev); + vbasedev->dev, vfio_vmstate_change, prepare_cb, NULL, vbasedev); migration_add_notifier(&migration->migration_state, vfio_migration_state_notifier); return 0; } +static Error *multiple_devices_migration_blocker; + +/* + * Multiple devices migration is allowed only if all devices support P2P + * migration. Single device migration is allowed regardless of P2P migration + * support. + */ +static bool vfio_multiple_devices_migration_is_supported(void) +{ + VFIODevice *vbasedev; + unsigned int device_num = 0; + bool all_support_p2p = true; + + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { + if (vbasedev->migration) { + device_num++; + + if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) { + all_support_p2p = false; + } + } + } + + return all_support_p2p || device_num <= 1; +} + +static int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp) +{ + if (vfio_multiple_devices_migration_is_supported()) { + return 0; + } + + if (vbasedev->enable_migration == ON_OFF_AUTO_ON) { + error_setg(errp, "Multiple VFIO devices migration is supported only if " + "all of them support P2P migration"); + return -EINVAL; + } + + if (multiple_devices_migration_blocker) { + return 0; + } + + error_setg(&multiple_devices_migration_blocker, + "Multiple VFIO devices migration is supported only if all of " + "them support P2P migration"); + return migrate_add_blocker_normal(&multiple_devices_migration_blocker, + errp); +} + +static void vfio_unblock_multiple_devices_migration(void) +{ + if (!multiple_devices_migration_blocker || + !vfio_multiple_devices_migration_is_supported()) { + return; + } + + migrate_del_blocker(&multiple_devices_migration_blocker); +} + static void vfio_migration_deinit(VFIODevice *vbasedev) { VFIOMigration *migration = vbasedev->migration; @@ -995,14 +1116,40 @@ static int vfio_block_migration(VFIODevice *vbasedev, Error *err, Error **errp) /* ---------------------------------------------------------------------- */ -int64_t vfio_mig_bytes_transferred(void) +int64_t vfio_migration_bytes_transferred(void) +{ + return MIN(qatomic_read(&bytes_transferred), INT64_MAX); +} + +void vfio_migration_reset_bytes_transferred(void) +{ + qatomic_set(&bytes_transferred, 0); +} + +void vfio_migration_add_bytes_transferred(unsigned long val) { - return bytes_transferred; + qatomic_add(&bytes_transferred, val); +} + +bool vfio_migration_active(void) +{ + VFIODevice *vbasedev; + + if (QLIST_EMPTY(&vfio_device_list)) { + return false; + } + + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { + if (vbasedev->migration_blocker) { + return false; + } + } + return true; } -void vfio_reset_bytes_transferred(void) +static bool vfio_viommu_preset(VFIODevice *vbasedev) { - bytes_transferred = 0; + return vbasedev->bcontainer->space->as != &address_space_memory; } /* @@ -1081,3 +1228,19 @@ void vfio_migration_exit(VFIODevice *vbasedev) migrate_del_blocker(&vbasedev->migration_blocker); } + +bool vfio_device_state_is_running(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + return migration->device_state == VFIO_DEVICE_STATE_RUNNING || + migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P; +} + +bool vfio_device_state_is_precopy(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + return migration->device_state == VFIO_DEVICE_STATE_PRE_COPY || + migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P; +} diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 39dae72497e..3f002252acf 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -25,6 +25,7 @@ #include "hw/nvram/fw_cfg.h" #include "hw/qdev-properties.h" #include "pci.h" +#include "pci-quirks.h" #include "trace.h" /* @@ -66,40 +67,6 @@ bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev) * Device specific region quirks (mostly backdoors to PCI config space) */ -/* - * The generic window quirks operate on an address and data register, - * vfio_generic_window_address_quirk handles the address register and - * vfio_generic_window_data_quirk handles the data register. These ops - * pass reads and writes through to hardware until a value matching the - * stored address match/mask is written. When this occurs, the data - * register access emulated PCI config space for the device rather than - * passing through accesses. This enables devices where PCI config space - * is accessible behind a window register to maintain the virtualization - * provided through vfio. - */ -typedef struct VFIOConfigWindowMatch { - uint32_t match; - uint32_t mask; -} VFIOConfigWindowMatch; - -typedef struct VFIOConfigWindowQuirk { - struct VFIOPCIDevice *vdev; - - uint32_t address_val; - - uint32_t address_offset; - uint32_t data_offset; - - bool window_enabled; - uint8_t bar; - - MemoryRegion *addr_mem; - MemoryRegion *data_mem; - - uint32_t nr_matches; - VFIOConfigWindowMatch matches[]; -} VFIOConfigWindowQuirk; - static uint64_t vfio_generic_window_quirk_address_read(void *opaque, hwaddr addr, unsigned size) @@ -135,7 +102,7 @@ static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr, } } -static const MemoryRegionOps vfio_generic_window_address_quirk = { +const MemoryRegionOps vfio_generic_window_address_quirk = { .read = vfio_generic_window_quirk_address_read, .write = vfio_generic_window_quirk_address_write, .endianness = DEVICE_LITTLE_ENDIAN, @@ -178,26 +145,12 @@ static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr, addr + window->data_offset, data, size); } -static const MemoryRegionOps vfio_generic_window_data_quirk = { +const MemoryRegionOps vfio_generic_window_data_quirk = { .read = vfio_generic_window_quirk_data_read, .write = vfio_generic_window_quirk_data_write, .endianness = DEVICE_LITTLE_ENDIAN, }; -/* - * The generic mirror quirk handles devices which expose PCI config space - * through a region within a BAR. When enabled, reads and writes are - * redirected through to emulated PCI config space. XXX if PCI config space - * used memory regions, this could just be an alias. - */ -typedef struct VFIOConfigMirrorQuirk { - struct VFIOPCIDevice *vdev; - uint32_t offset; - uint8_t bar; - MemoryRegion *mem; - uint8_t data[]; -} VFIOConfigMirrorQuirk; - static uint64_t vfio_generic_quirk_mirror_read(void *opaque, hwaddr addr, unsigned size) { @@ -209,6 +162,7 @@ static uint64_t vfio_generic_quirk_mirror_read(void *opaque, (void)vfio_region_read(&vdev->bars[mirror->bar].region, addr + mirror->offset, size); + addr += mirror->config_offset; data = vfio_pci_read_config(&vdev->pdev, addr, size); trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name, memory_region_name(mirror->mem), @@ -222,13 +176,14 @@ static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr, VFIOConfigMirrorQuirk *mirror = opaque; VFIOPCIDevice *vdev = mirror->vdev; + addr += mirror->config_offset; vfio_pci_write_config(&vdev->pdev, addr, data, size); trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name, memory_region_name(mirror->mem), addr, data); } -static const MemoryRegionOps vfio_generic_mirror_quirk = { +const MemoryRegionOps vfio_generic_mirror_quirk = { .read = vfio_generic_quirk_mirror_read, .write = vfio_generic_quirk_mirror_write, .endianness = DEVICE_LITTLE_ENDIAN, @@ -448,7 +403,7 @@ static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr) /* This windows doesn't seem to be used except by legacy VGA code */ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || - !vdev->vga || nr != 4) { + !vdev->vga || nr != 4 || !vdev->bars[4].ioport) { return; } @@ -1159,59 +1114,19 @@ static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr) trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name); } -#define IGD_ASLS 0xfc /* ASL Storage Register */ - /* - * The OpRegion includes the Video BIOS Table, which seems important for - * telling the driver what sort of outputs it has. Without this, the device - * may work in the guest, but we may not get output. This also requires BIOS - * support to reserve and populate a section of guest memory sufficient for - * the table and to write the base address of that memory to the ASLS register - * of the IGD device. + * Common quirk probe entry points. */ -bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, - struct vfio_region_info *info, Error **errp) +bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp) { - int ret; - - vdev->igd_opregion = g_malloc0(info->size); - ret = pread(vdev->vbasedev.fd, vdev->igd_opregion, - info->size, info->offset); - if (ret != info->size) { - error_setg(errp, "failed to read IGD OpRegion"); - g_free(vdev->igd_opregion); - vdev->igd_opregion = NULL; +#ifdef CONFIG_VFIO_IGD + if (!vfio_probe_igd_config_quirk(vdev, errp)) { return false; } - - /* - * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to - * allocate 32bit reserved memory for, copy these contents into, and write - * the reserved memory base address to the device ASLS register at 0xFC. - * Alignment of this reserved region seems flexible, but using a 4k page - * alignment seems to work well. This interface assumes a single IGD - * device, which may be at VM address 00:02.0 in legacy mode or another - * address in UPT mode. - * - * NB, there may be future use cases discovered where the VM should have - * direct interaction with the host OpRegion, in which case the write to - * the ASLS register would trigger MemoryRegion setup to enable that. - */ - fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion", - vdev->igd_opregion, info->size); - - trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name); - - pci_set_long(vdev->pdev.config + IGD_ASLS, 0); - pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0); - pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0); - +#endif return true; } -/* - * Common quirk probe entry points. - */ void vfio_vga_quirk_setup(VFIOPCIDevice *vdev) { vfio_vga_probe_ati_3c3_quirk(vdev); @@ -1259,7 +1174,7 @@ void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr) vfio_probe_nvidia_bar0_quirk(vdev, nr); vfio_probe_rtl8168_bar2_quirk(vdev, nr); #ifdef CONFIG_VFIO_IGD - vfio_probe_igd_bar4_quirk(vdev, nr); + vfio_probe_igd_bar0_quirk(vdev, nr); #endif } @@ -1498,7 +1413,7 @@ static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint8_t *ptr = object_field_prop_ptr(obj, prop); visit_type_uint8(v, name, ptr, errp); @@ -1508,7 +1423,7 @@ static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - Property *prop = opaque; + const Property *prop = opaque; uint8_t value, *ptr = object_field_prop_ptr(obj, prop); if (!visit_type_uint8(v, name, &value, errp)) { @@ -1524,7 +1439,7 @@ static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v, } const PropertyInfo qdev_prop_nv_gpudirect_clique = { - .name = "uint4", + .type = "uint8", .description = "NVIDIA GPUDirect Clique ID (0 - 15)", .get = get_nv_gpudirect_clique_id, .set = set_nv_gpudirect_clique_id, diff --git a/hw/vfio/pci-quirks.h b/hw/vfio/pci-quirks.h new file mode 100644 index 00000000000..d1532e379b1 --- /dev/null +++ b/hw/vfio/pci-quirks.h @@ -0,0 +1,72 @@ +/* + * vfio generic region quirks (mostly backdoors to PCI config space) + * + * Copyright Red Hat, Inc. 2012-2015 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ +#ifndef HW_VFIO_VFIO_PCI_QUIRKS_H +#define HW_VFIO_VFIO_PCI_QUIRKS_H + +#include "qemu/osdep.h" +#include "exec/memop.h" + +/* + * The generic window quirks operate on an address and data register, + * vfio_generic_window_address_quirk handles the address register and + * vfio_generic_window_data_quirk handles the data register. These ops + * pass reads and writes through to hardware until a value matching the + * stored address match/mask is written. When this occurs, the data + * register access emulated PCI config space for the device rather than + * passing through accesses. This enables devices where PCI config space + * is accessible behind a window register to maintain the virtualization + * provided through vfio. + */ +typedef struct VFIOConfigWindowMatch { + uint32_t match; + uint32_t mask; +} VFIOConfigWindowMatch; + +typedef struct VFIOConfigWindowQuirk { + struct VFIOPCIDevice *vdev; + + uint32_t address_val; + + uint32_t address_offset; + uint32_t data_offset; + + bool window_enabled; + uint8_t bar; + + MemoryRegion *addr_mem; + MemoryRegion *data_mem; + + uint32_t nr_matches; + VFIOConfigWindowMatch matches[]; +} VFIOConfigWindowQuirk; + +extern const MemoryRegionOps vfio_generic_window_address_quirk; +extern const MemoryRegionOps vfio_generic_window_data_quirk; + +/* + * The generic mirror quirk handles devices which expose PCI config space + * through a region within a BAR. When enabled, reads and writes are + * redirected through to emulated PCI config space. XXX if PCI config space + * used memory regions, this could just be an alias. + */ +typedef struct VFIOConfigMirrorQuirk { + struct VFIOPCIDevice *vdev; + uint32_t offset; /* Offset in BAR */ + uint32_t config_offset; /* Offset in PCI config space */ + uint8_t bar; + MemoryRegion *mem; + uint8_t data[]; +} VFIOConfigMirrorQuirk; + +extern const MemoryRegionOps vfio_generic_mirror_quirk; + +#endif /* HW_VFIO_VFIO_PCI_QUIRKS_H */ diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 2407720c353..8da9ab286b6 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -24,28 +24,31 @@ #include #include "hw/hw.h" +#include "hw/iommu.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/pci/pci_bridge.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "hw/vfio/vfio-cpr.h" #include "migration/vmstate.h" -#include "qapi/qmp/qdict.h" +#include "migration/cpr.h" +#include "qobject/qdict.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/range.h" #include "qemu/units.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" +#include "system/kvm.h" +#include "system/runstate.h" #include "pci.h" #include "trace.h" #include "qapi/error.h" #include "migration/blocker.h" #include "migration/qemu-file.h" -#include "sysemu/iommufd.h" - -#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" +#include "system/iommufd.h" +#include "vfio-migration-internal.h" +#include "vfio-helpers.h" /* Protected by BQL */ static KVMRouteChange vfio_route_change; @@ -54,6 +57,36 @@ static void vfio_disable_interrupts(VFIOPCIDevice *vdev); static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled); static void vfio_msi_disable_common(VFIOPCIDevice *vdev); +/* Create new or reuse existing eventfd */ +static bool vfio_notifier_init(VFIOPCIDevice *vdev, EventNotifier *e, + const char *name, int nr, Error **errp) +{ + int fd, ret; + + fd = vfio_cpr_load_vector_fd(vdev, name, nr); + if (fd >= 0) { + event_notifier_init_fd(e, fd); + return true; + } + + ret = event_notifier_init(e, 0); + if (ret) { + error_setg_errno(errp, -ret, "vfio_notifier_init %s failed", name); + return false; + } + + fd = event_notifier_get_fd(e); + vfio_cpr_save_vector_fd(vdev, name, nr, fd); + return true; +} + +static void vfio_notifier_cleanup(VFIOPCIDevice *vdev, EventNotifier *e, + const char *name, int nr) +{ + vfio_cpr_delete_vector_fd(vdev, name, nr); + event_notifier_cleanup(e); +} + /* * Disabling BAR mmaping can be slow, but toggling it around INTx can * also be a huge overhead. We try to get the best of both worlds by @@ -101,7 +134,7 @@ static void vfio_intx_interrupt(void *opaque) } } -static void vfio_intx_eoi(VFIODevice *vbasedev) +void vfio_pci_intx_eoi(VFIODevice *vbasedev) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); @@ -109,11 +142,11 @@ static void vfio_intx_eoi(VFIODevice *vbasedev) return; } - trace_vfio_intx_eoi(vbasedev->name); + trace_vfio_pci_intx_eoi(vbasedev->name); vdev->intx.pending = false; pci_irq_deassert(&vdev->pdev); - vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); } static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) @@ -129,13 +162,12 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) /* Get to a known interrupt state */ qemu_set_fd_handler(irq_fd, NULL, NULL, vdev); - vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; pci_irq_deassert(&vdev->pdev); /* Get an eventfd for resample/unmask */ - if (event_notifier_init(&vdev->intx.unmask, 0)) { - error_setg(errp, "event_notifier_init failed eoi"); + if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) { goto fail; } @@ -147,15 +179,15 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) goto fail_irqfd; } - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, - VFIO_IRQ_SET_ACTION_UNMASK, - event_notifier_get_fd(&vdev->intx.unmask), - errp)) { + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_UNMASK, + event_notifier_get_fd(&vdev->intx.unmask), + errp)) { goto fail_vfio; } /* Let'em rip */ - vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.kvm_accel = true; @@ -167,16 +199,46 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, vdev->intx.route.irq); fail_irqfd: - event_notifier_cleanup(&vdev->intx.unmask); + vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); fail: qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev); - vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); return false; #else return true; #endif } +static bool vfio_cpr_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) +{ +#ifdef CONFIG_KVM + if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || + vdev->intx.route.mode != PCI_INTX_ENABLED || + !kvm_resamplefds_enabled()) { + return true; + } + + if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) { + return false; + } + + if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, + &vdev->intx.interrupt, + &vdev->intx.unmask, + vdev->intx.route.irq)) { + error_setg_errno(errp, errno, "failed to setup resample irqfd"); + vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); + return false; + } + + vdev->intx.kvm_accel = true; + trace_vfio_intx_enable_kvm(vdev->vbasedev.name); + return true; +#else + return true; +#endif +} + static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) { #ifdef CONFIG_KVM @@ -188,7 +250,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) * Get to a known state, hardware masked, QEMU ready to accept new * interrupts, QEMU IRQ de-asserted. */ - vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; pci_irq_deassert(&vdev->pdev); @@ -199,7 +261,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) } /* We only need to close the eventfd for VFIO to cleanup the kernel side */ - event_notifier_cleanup(&vdev->intx.unmask); + vfio_notifier_cleanup(vdev, &vdev->intx.unmask, "intx-unmask", 0); /* QEMU starts listening for interrupt events. */ qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt), @@ -208,7 +270,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) vdev->intx.kvm_accel = false; /* If we've missed an event, let it re-fire through QEMU */ - vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_unmask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); trace_vfio_intx_disable_kvm(vdev->vbasedev.name); #endif @@ -234,12 +296,12 @@ static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route) } /* Re-enable the interrupt in cased we missed an EOI */ - vfio_intx_eoi(&vdev->vbasedev); + vfio_pci_intx_eoi(&vdev->vbasedev); } static void vfio_intx_routing_notifier(PCIDevice *pdev) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); PCIINTxRoute route; if (vdev->interrupt != VFIO_INT_INTx) { @@ -266,14 +328,19 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); Error *err = NULL; int32_t fd; - int ret; if (!pin) { return true; } - vfio_disable_interrupts(vdev); + /* + * Do not alter interrupt state during vfio_realize and cpr load. + * The incoming state is cleared thereafter. + */ + if (!cpr_is_incoming()) { + vfio_disable_interrupts(vdev); + } vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ pci_config_set_interrupt_pin(vdev->pdev.config, pin); @@ -289,18 +356,25 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) } #endif - ret = event_notifier_init(&vdev->intx.interrupt, 0); - if (ret) { - error_setg_errno(errp, -ret, "event_notifier_init failed"); + if (!vfio_notifier_init(vdev, &vdev->intx.interrupt, "intx-interrupt", 0, + errp)) { return false; } fd = event_notifier_get_fd(&vdev->intx.interrupt); qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev); - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, + + if (cpr_is_incoming()) { + if (!vfio_cpr_intx_enable_kvm(vdev, &err)) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + goto skip_signaling; + } + + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { qemu_set_fd_handler(fd, NULL, NULL, vdev); - event_notifier_cleanup(&vdev->intx.interrupt); + vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0); return false; } @@ -308,6 +382,7 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } +skip_signaling: vdev->interrupt = VFIO_INT_INTx; trace_vfio_intx_enable(vdev->vbasedev.name); @@ -320,20 +395,33 @@ static void vfio_intx_disable(VFIOPCIDevice *vdev) timer_del(vdev->intx.mmap_timer); vfio_intx_disable_kvm(vdev); - vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; pci_irq_deassert(&vdev->pdev); vfio_mmap_set_enabled(vdev, true); fd = event_notifier_get_fd(&vdev->intx.interrupt); qemu_set_fd_handler(fd, NULL, NULL, vdev); - event_notifier_cleanup(&vdev->intx.interrupt); + vfio_notifier_cleanup(vdev, &vdev->intx.interrupt, "intx-interrupt", 0); vdev->interrupt = VFIO_INT_NONE; trace_vfio_intx_disable(vdev->vbasedev.name); } +bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp) +{ + return vfio_intx_enable(vdev, errp); +} + +void vfio_pci_intx_set_handler(VFIOPCIDevice *vdev, bool enable) +{ + int fd = event_notifier_get_fd(&vdev->intx.interrupt); + IOHandler *handler = (enable ? vfio_intx_interrupt : NULL); + + qemu_set_fd_handler(fd, handler, NULL, vdev); +} + /* * MSI/X */ @@ -372,6 +460,15 @@ static void vfio_msi_interrupt(void *opaque) notify(&vdev->pdev, nr); } +void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr, bool enable) +{ + VFIOMSIVector *vector = &vdev->msi_vectors[nr]; + int fd = event_notifier_get_fd(&vector->interrupt); + IOHandler *handler = (enable ? vfio_msi_interrupt : NULL); + + qemu_set_fd_handler(fd, handler, NULL, vector); +} + /* * Get MSI-X enabled, but no vector enabled, by setting vector 0 with an invalid * fd to kernel. @@ -379,7 +476,7 @@ static void vfio_msi_interrupt(void *opaque) static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev) { g_autofree struct vfio_irq_set *irq_set = NULL; - int ret = 0, argsz; + int argsz; int32_t *fd; argsz = sizeof(*irq_set) + sizeof(*fd); @@ -394,9 +491,7 @@ static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev) fd = (int32_t *)&irq_set->data; *fd = -1; - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); - - return ret; + return vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set); } static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) @@ -453,15 +548,15 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) fds[i] = fd; } - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); + ret = vdev->vbasedev.io_ops->set_irqs(&vdev->vbasedev, irq_set); g_free(irq_set); return ret; } -static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, - int vector_n, bool msix) +void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, + int vector_n, bool msix) { if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { return; @@ -471,13 +566,16 @@ static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, vector_n, &vdev->pdev); } -static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector) +static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector, int nr) { + const char *name = "kvm_interrupt"; + if (vector->virq < 0) { return; } - if (event_notifier_init(&vector->kvm_interrupt, 0)) { + if (!vfio_notifier_init(vector->vdev, &vector->kvm_interrupt, name, nr, + NULL)) { goto fail_notifier; } @@ -489,19 +587,20 @@ static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector) return; fail_kvm: - event_notifier_cleanup(&vector->kvm_interrupt); + vfio_notifier_cleanup(vector->vdev, &vector->kvm_interrupt, name, nr); fail_notifier: kvm_irqchip_release_virq(kvm_state, vector->virq); vector->virq = -1; } -static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) +static void vfio_remove_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, + int nr) { kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, vector->virq); kvm_irqchip_release_virq(kvm_state, vector->virq); vector->virq = -1; - event_notifier_cleanup(&vector->kvm_interrupt); + vfio_notifier_cleanup(vdev, &vector->kvm_interrupt, "kvm_interrupt", nr); } static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg, @@ -511,10 +610,47 @@ static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg, kvm_irqchip_commit_routes(kvm_state); } +static void set_irq_signalling(VFIODevice *vbasedev, VFIOMSIVector *vector, + unsigned int nr) +{ + Error *err = NULL; + int32_t fd; + + if (vector->virq >= 0) { + fd = event_notifier_get_fd(&vector->kvm_interrupt); + } else { + fd = event_notifier_get_fd(&vector->interrupt); + } + + if (!vfio_device_irq_set_signaling(vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr, + VFIO_IRQ_SET_ACTION_TRIGGER, + fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); + } +} + +void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr) +{ + VFIOMSIVector *vector = &vdev->msi_vectors[nr]; + PCIDevice *pdev = &vdev->pdev; + Error *local_err = NULL; + + vector->vdev = vdev; + vector->virq = -1; + if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", nr, + &local_err)) { + error_report_err(local_err); + } + vector->use = true; + if (vdev->interrupt == VFIO_INT_MSIX) { + msix_vector_use(pdev, nr); + } +} + static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, MSIMessage *msg, IOHandler *handler) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); VFIOMSIVector *vector; int ret; bool resizing = !!(vdev->nr_vectors < nr + 1); @@ -524,13 +660,7 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, vector = &vdev->msi_vectors[nr]; if (!vector->use) { - vector->vdev = vdev; - vector->virq = -1; - if (event_notifier_init(&vector->interrupt, 0)) { - error_report("vfio: Error: event_notifier_init failed"); - } - vector->use = true; - msix_vector_use(pdev, nr); + vfio_pci_vector_init(vdev, nr); } qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), @@ -542,19 +672,19 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, */ if (vector->virq >= 0) { if (!msg) { - vfio_remove_kvm_msi_virq(vector); + vfio_remove_kvm_msi_virq(vdev, vector, nr); } else { vfio_update_kvm_msi_virq(vector, *msg, pdev); } } else { if (msg) { if (vdev->defer_kvm_irq_routing) { - vfio_add_kvm_msi_virq(vdev, vector, nr, true); + vfio_pci_add_kvm_msi_virq(vdev, vector, nr, true); } else { vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state); - vfio_add_kvm_msi_virq(vdev, vector, nr, true); + vfio_pci_add_kvm_msi_virq(vdev, vector, nr, true); kvm_irqchip_commit_route_changes(&vfio_route_change); - vfio_connect_kvm_msi_virq(vector); + vfio_connect_kvm_msi_virq(vector, nr); } } } @@ -576,27 +706,14 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, if (!vdev->defer_kvm_irq_routing) { if (vdev->msix->noresize && resizing) { - vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); + vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); ret = vfio_enable_vectors(vdev, true); if (ret) { - error_report("vfio: failed to enable vectors, %d", ret); + error_report("vfio: failed to enable vectors, %s", + strerror(-ret)); } } else { - Error *err = NULL; - int32_t fd; - - if (vector->virq >= 0) { - fd = event_notifier_get_fd(&vector->kvm_interrupt); - } else { - fd = event_notifier_get_fd(&vector->interrupt); - } - - if (!vfio_set_irq_signaling(&vdev->vbasedev, - VFIO_PCI_MSIX_IRQ_INDEX, nr, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, - &err)) { - error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); - } + set_irq_signalling(&vdev->vbasedev, vector, nr); } } @@ -614,12 +731,21 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, static int vfio_msix_vector_use(PCIDevice *pdev, unsigned int nr, MSIMessage msg) { + /* + * Ignore the callback from msix_set_vector_notifiers during resume. + * The necessary subset of these actions is called from + * vfio_cpr_claim_vectors during post load. + */ + if (cpr_is_incoming()) { + return 0; + } + return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); } static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); VFIOMSIVector *vector = &vdev->msi_vectors[nr]; trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); @@ -636,7 +762,7 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) int32_t fd = event_notifier_get_fd(&vector->interrupt); Error *err = NULL; - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr, VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); @@ -644,14 +770,20 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) } } -static void vfio_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev) +void vfio_pci_msix_set_notifiers(VFIOPCIDevice *vdev) +{ + msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, + vfio_msix_vector_release, NULL); +} + +void vfio_pci_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev) { assert(!vdev->defer_kvm_irq_routing); vdev->defer_kvm_irq_routing = true; vfio_route_change = kvm_irqchip_begin_route_changes(kvm_state); } -static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) +void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) { int i; @@ -661,7 +793,7 @@ static void vfio_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) kvm_irqchip_commit_route_changes(&vfio_route_change); for (i = 0; i < vdev->nr_vectors; i++) { - vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i]); + vfio_connect_kvm_msi_virq(&vdev->msi_vectors[i], i); } } @@ -681,19 +813,20 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev) * routes once rather than per vector provides a substantial * performance improvement. */ - vfio_prepare_kvm_msi_virq_batch(vdev); + vfio_pci_prepare_kvm_msi_virq_batch(vdev); if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, vfio_msix_vector_release, NULL)) { error_report("vfio: msix_set_vector_notifiers failed"); } - vfio_commit_kvm_msi_virq_batch(vdev); + vfio_pci_commit_kvm_msi_virq_batch(vdev); if (vdev->nr_vectors) { ret = vfio_enable_vectors(vdev, true); if (ret) { - error_report("vfio: failed to enable vectors, %d", ret); + error_report("vfio: failed to enable vectors, %s", + strerror(-ret)); } } else { /* @@ -710,7 +843,8 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev) */ ret = vfio_enable_msix_no_vec(vdev); if (ret) { - error_report("vfio: failed to enable MSI-X, %d", ret); + error_report("vfio: failed to enable MSI-X, %s", + strerror(-ret)); } } @@ -730,19 +864,21 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) * Deferring to commit the KVM routes once rather than per vector * provides a substantial performance improvement. */ - vfio_prepare_kvm_msi_virq_batch(vdev); + vfio_pci_prepare_kvm_msi_virq_batch(vdev); vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); for (i = 0; i < vdev->nr_vectors; i++) { VFIOMSIVector *vector = &vdev->msi_vectors[i]; + Error *local_err = NULL; vector->vdev = vdev; vector->virq = -1; vector->use = true; - if (event_notifier_init(&vector->interrupt, 0)) { - error_report("vfio: Error: event_notifier_init failed"); + if (!vfio_notifier_init(vdev, &vector->interrupt, "interrupt", i, + &local_err)) { + error_report_err(local_err); } qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), @@ -752,10 +888,10 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) * Attempt to enable route through KVM irqchip, * default to userspace handling if unavailable. */ - vfio_add_kvm_msi_virq(vdev, vector, i, false); + vfio_pci_add_kvm_msi_virq(vdev, vector, i, false); } - vfio_commit_kvm_msi_virq_batch(vdev); + vfio_pci_commit_kvm_msi_virq_batch(vdev); /* Set interrupt type prior to possible interrupts */ vdev->interrupt = VFIO_INT_MSI; @@ -763,7 +899,8 @@ static void vfio_msi_enable(VFIOPCIDevice *vdev) ret = vfio_enable_vectors(vdev, false); if (ret) { if (ret < 0) { - error_report("vfio: Error: Failed to setup MSI fds: %m"); + error_report("vfio: Error: Failed to setup MSI fds: %s", + strerror(-ret)); } else { error_report("vfio: Error: Failed to enable %d " "MSI vectors, retry with %d", vdev->nr_vectors, ret); @@ -797,11 +934,11 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev) VFIOMSIVector *vector = &vdev->msi_vectors[i]; if (vdev->msi_vectors[i].use) { if (vector->virq >= 0) { - vfio_remove_kvm_msi_virq(vector); + vfio_remove_kvm_msi_virq(vdev, vector, i); } qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), NULL, NULL, NULL); - event_notifier_cleanup(&vector->interrupt); + vfio_notifier_cleanup(vdev, &vector->interrupt, "interrupt", i); } } @@ -833,7 +970,7 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev) * Always clear MSI-X IRQ index. A PF device could have enabled * MSI-X with no vectors. See vfio_msix_enable(). */ - vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); + vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); vfio_msi_disable_common(vdev); if (!vfio_intx_enable(vdev, &err)) { @@ -850,7 +987,7 @@ static void vfio_msi_disable(VFIOPCIDevice *vdev) { Error *err = NULL; - vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); + vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); vfio_msi_disable_common(vdev); vfio_intx_enable(vdev, &err); if (err) { @@ -879,18 +1016,22 @@ static void vfio_update_msi(VFIOPCIDevice *vdev) static void vfio_pci_load_rom(VFIOPCIDevice *vdev) { - g_autofree struct vfio_region_info *reg_info = NULL; + VFIODevice *vbasedev = &vdev->vbasedev; + struct vfio_region_info *reg_info = NULL; uint64_t size; off_t off = 0; ssize_t bytes; + int ret; - if (vfio_get_region_info(&vdev->vbasedev, - VFIO_PCI_ROM_REGION_INDEX, ®_info)) { - error_report("vfio: Error getting ROM info: %m"); + ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_ROM_REGION_INDEX, + ®_info); + + if (ret != 0) { + error_report("vfio: Error getting ROM info: %s", strerror(-ret)); return; } - trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size, + trace_vfio_pci_load_rom(vbasedev->name, (unsigned long)reg_info->size, (unsigned long)reg_info->offset, (unsigned long)reg_info->flags); @@ -899,8 +1040,7 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev) if (!vdev->rom_size) { vdev->rom_read_failed = true; - error_report("vfio-pci: Cannot read device rom at " - "%s", vdev->vbasedev.name); + error_report("vfio-pci: Cannot read device rom at %s", vbasedev->name); error_printf("Device option ROM contents are probably invalid " "(check dmesg).\nSkip option ROM probe with rombar=0, " "or load from file with romfile=\n"); @@ -911,18 +1051,22 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev) memset(vdev->rom, 0xff, size); while (size) { - bytes = pread(vdev->vbasedev.fd, vdev->rom + off, - size, vdev->rom_offset + off); + bytes = vbasedev->io_ops->region_read(vbasedev, + VFIO_PCI_ROM_REGION_INDEX, + off, size, vdev->rom + off); + if (bytes == 0) { break; } else if (bytes > 0) { off += bytes; size -= bytes; } else { - if (errno == EINTR || errno == EAGAIN) { + if (bytes == -EINTR || bytes == -EAGAIN) { continue; } - error_report("vfio: Error reading device ROM: %m"); + error_report("vfio: Error reading device ROM: %s", + strreaderror(bytes)); + break; } } @@ -958,6 +1102,24 @@ static void vfio_pci_load_rom(VFIOPCIDevice *vdev) } } +/* "Raw" read of underlying config space. */ +static int vfio_pci_config_space_read(VFIOPCIDevice *vdev, off_t offset, + uint32_t size, void *data) +{ + return vdev->vbasedev.io_ops->region_read(&vdev->vbasedev, + VFIO_PCI_CONFIG_REGION_INDEX, + offset, size, data); +} + +/* "Raw" write of underlying config space. */ +static int vfio_pci_config_space_write(VFIOPCIDevice *vdev, off_t offset, + uint32_t size, void *data) +{ + return vdev->vbasedev.io_ops->region_write(&vdev->vbasedev, + VFIO_PCI_CONFIG_REGION_INDEX, + offset, size, data, false); +} + static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) { VFIOPCIDevice *vdev = opaque; @@ -1010,11 +1172,9 @@ static const MemoryRegionOps vfio_rom_ops = { static void vfio_pci_size_rom(VFIOPCIDevice *vdev) { + VFIODevice *vbasedev = &vdev->vbasedev; uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); - off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; - DeviceState *dev = DEVICE(vdev); char *name; - int fd = vdev->vbasedev.fd; if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { /* Since pci handles romfile, just print a message and return */ @@ -1031,11 +1191,12 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) * Use the same size ROM BAR as the physical device. The contents * will get filled in later when the guest tries to read it. */ - if (pread(fd, &orig, 4, offset) != 4 || - pwrite(fd, &size, 4, offset) != 4 || - pread(fd, &size, 4, offset) != 4 || - pwrite(fd, &orig, 4, offset) != 4) { - error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name); + if (vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4 || + vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 || + vfio_pci_config_space_read(vdev, PCI_ROM_ADDRESS, 4, &size) != 4 || + vfio_pci_config_space_write(vdev, PCI_ROM_ADDRESS, 4, &orig) != 4) { + + error_report("%s(%s) ROM access failed", __func__, vbasedev->name); return; } @@ -1046,12 +1207,12 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) } if (vfio_opt_rom_in_denylist(vdev)) { - if (dev->opts && qdict_haskey(dev->opts, "rombar")) { + if (vdev->pdev.rom_bar > 0) { warn_report("Device at %s is known to cause system instability" " issues during option rom execution", vdev->vbasedev.name); error_printf("Proceeding anyway since user specified" - " non zero value for rombar\n"); + " positive value for rombar\n"); } else { warn_report("Rom loading for device at %s has been disabled" " due to system instability issues", @@ -1168,7 +1329,7 @@ static const MemoryRegionOps vfio_vga_ops = { */ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); VFIORegion *region = &vdev->bars[bar].region; MemoryRegion *mmap_mr, *region_mr, *base_mr; PCIIORegion *r; @@ -1214,7 +1375,8 @@ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) */ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIODevice *vbasedev = &vdev->vbasedev; uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); @@ -1227,12 +1389,12 @@ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { ssize_t ret; - ret = pread(vdev->vbasedev.fd, &phys_val, len, - vdev->config_offset + addr); + ret = vfio_pci_config_space_read(vdev, addr, len, &phys_val); if (ret != len) { - error_report("%s(%s, 0x%x, 0x%x) failed: %m", - __func__, vdev->vbasedev.name, addr, len); - return -errno; + error_report("%s(%s, 0x%x, 0x%x) failed: %s", + __func__, vbasedev->name, addr, len, + strreaderror(ret)); + return -1; } phys_val = le32_to_cpu(phys_val); } @@ -1247,16 +1409,19 @@ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, uint32_t val, int len) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIODevice *vbasedev = &vdev->vbasedev; uint32_t val_le = cpu_to_le32(val); + int ret; trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); /* Write everything to VFIO, let it filter out what we can't write */ - if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr) - != len) { - error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m", - __func__, vdev->vbasedev.name, addr, val, len); + ret = vfio_pci_config_space_write(vdev, addr, len, &val_le); + if (ret != len) { + error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %s", + __func__, vbasedev->name, addr, val, len, + strwriteerror(ret)); } /* MSI/MSI-X Enabling/Disabling */ @@ -1344,9 +1509,11 @@ static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) int ret, entries; Error *err = NULL; - if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), - vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { - error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS"); + ret = vfio_pci_config_space_read(vdev, pos + PCI_CAP_FLAGS, + sizeof(ctrl), &ctrl); + if (ret != sizeof(ctrl)) { + error_setg(errp, "failed reading MSI PCI_CAP_FLAGS: %s", + strreaderror(ret)); return false; } ctrl = le16_to_cpu(ctrl); @@ -1379,8 +1546,8 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev) * If the host driver allows mapping of a MSIX data, we are going to * do map the entire BAR and emulate MSIX table on top of that. */ - if (vfio_has_region_cap(&vdev->vbasedev, region->nr, - VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) { + if (vfio_device_has_region_cap(&vdev->vbasedev, region->nr, + VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) { return; } @@ -1452,7 +1619,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) int target_bar = -1; size_t msix_sz; - if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) { + if (!vdev->msix || vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { return true; } @@ -1464,7 +1631,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) /* PCI BARs must be a power of 2 */ msix_sz = pow2ceil(msix_sz); - if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) { + if (vdev->msix_relo == OFF_AUTO_PCIBAR_AUTO) { /* * TODO: Lookup table for known devices. * @@ -1479,7 +1646,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) return false; } } else { - target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0); + target_bar = (int)(vdev->msix_relo - OFF_AUTO_PCIBAR_BAR0); } /* I/O port BARs cannot host MSI-X structures */ @@ -1553,31 +1720,35 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) uint8_t pos; uint16_t ctrl; uint32_t table, pba; - int ret, fd = vdev->vbasedev.fd; - struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info), - .index = VFIO_PCI_MSIX_IRQ_INDEX }; + struct vfio_irq_info irq_info; VFIOMSIXInfo *msix; + int ret; pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); if (!pos) { return true; } - if (pread(fd, &ctrl, sizeof(ctrl), - vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) { - error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS"); + ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_FLAGS, + sizeof(ctrl), &ctrl); + if (ret != sizeof(ctrl)) { + error_setg(errp, "failed to read PCI MSIX FLAGS: %s", + strreaderror(ret)); return false; } - if (pread(fd, &table, sizeof(table), - vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { - error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE"); + ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_TABLE, + sizeof(table), &table); + if (ret != sizeof(table)) { + error_setg(errp, "failed to read PCI MSIX TABLE: %s", + strreaderror(ret)); return false; } - if (pread(fd, &pba, sizeof(pba), - vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { - error_setg_errno(errp, errno, "failed to read PCI MSIX PBA"); + ret = vfio_pci_config_space_read(vdev, pos + PCI_MSIX_PBA, + sizeof(pba), &pba); + if (ret != sizeof(pba)) { + error_setg(errp, "failed to read PCI MSIX PBA: %s", strreaderror(ret)); return false; } @@ -1592,7 +1763,8 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); + ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, + &irq_info); if (ret < 0) { error_setg_errno(errp, -ret, "failed to get MSI-X irq info"); g_free(msix); @@ -1624,7 +1796,7 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU, PCI_DEVICE_ID_KUNLUN_VF)) { msix->pba_offset = 0xb400; - } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) { + } else if (vdev->msix_relo == OFF_AUTO_PCIBAR_OFF) { error_setg(errp, "hardware reports invalid configuration, " "MSIX PBA outside of specified BAR"); g_free(msix); @@ -1699,7 +1871,7 @@ static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) return true; } -static void vfio_teardown_msi(VFIOPCIDevice *vdev) +void vfio_pci_teardown_msi(VFIOPCIDevice *vdev) { msi_uninit(&vdev->pdev); @@ -1736,10 +1908,10 @@ static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr) } /* Determine what type of BAR this is for registration */ - ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), - vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); + ret = vfio_pci_config_space_read(vdev, PCI_BASE_ADDRESS_0 + (4 * nr), + sizeof(pci_bar), &pci_bar); if (ret != sizeof(pci_bar)) { - error_report("vfio: Failed to read BAR %d (%m)", nr); + error_report("vfio: Failed to read BAR %d: %s", nr, strreaderror(ret)); return; } @@ -1749,6 +1921,9 @@ static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr) bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK); bar->size = bar->region.size; + + /* IO regions are sync, memory can be async */ + bar->region.post_wr = (bar->ioport == 0); } static void vfio_bars_prepare(VFIOPCIDevice *vdev) @@ -1795,7 +1970,7 @@ static void vfio_bars_register(VFIOPCIDevice *vdev) } } -static void vfio_bars_exit(VFIOPCIDevice *vdev) +void vfio_pci_bars_exit(VFIOPCIDevice *vdev) { int i; @@ -2216,8 +2391,12 @@ static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) break; case PCI_CAP_ID_PM: vfio_check_pm_reset(vdev, pos); - vdev->pm_cap = pos; - ret = pci_add_capability(pdev, cap_id, pos, size, errp) >= 0; + ret = pci_pm_init(pdev, pos, errp) >= 0; + /* + * PCI-core config space emulation needs write access to the power + * state enabled for tracking BAR mapping relative to PM state. + */ + pci_set_word(pdev->wmask + pos + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK); break; case PCI_CAP_ID_AF: vfio_check_af_flr(vdev, pos); @@ -2291,6 +2470,11 @@ static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) static void vfio_add_ext_cap(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; + HostIOMMUDevice *hiod = vdev->vbasedev.hiod; + HostIOMMUDeviceClass *hiodc = HOST_IOMMU_DEVICE_GET_CLASS(hiod); + uint8_t max_pasid_log2 = 0; + bool pasid_cap_added = false; + uint64_t hw_caps; uint32_t header; uint16_t cap_id, next, size; uint8_t cap_ver; @@ -2368,22 +2552,46 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev) pcie_add_capability(pdev, cap_id, cap_ver, next, size); } break; + case PCI_EXT_CAP_ID_PASID: + pasid_cap_added = true; + /* fallthrough */ default: pcie_add_capability(pdev, cap_id, cap_ver, next, size); } } + /* + * If PCI_EXT_CAP_ID_PASID not present, try to get information from the host + */ + if (!pasid_cap_added && hiodc->get_pasid) { + max_pasid_log2 = hiodc->get_pasid(hiod, &hw_caps); + } + + /* + * If supported, adds the PASID capability in the end of the PCIE config + * space. TODO: Add option for enabling pasid at a safe offset. + */ + if (max_pasid_log2 && (pci_device_get_viommu_flags(pdev) & + VIOMMU_FLAG_PASID_SUPPORTED)) { + bool exec_perm = (hw_caps & IOMMU_HW_CAP_PCI_PASID_EXEC) ? true : false; + bool priv_mod = (hw_caps & IOMMU_HW_CAP_PCI_PASID_PRIV) ? true : false; + + pcie_pasid_init(pdev, PCIE_CONFIG_SPACE_SIZE - PCI_EXT_CAP_PASID_SIZEOF, + max_pasid_log2, exec_perm, priv_mod); + /* PASID capability is fully emulated by QEMU */ + memset(vdev->emulated_config_bits + pdev->exp.pasid_cap, 0xff, 8); + } + /* Cleanup chain head ID if necessary */ if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) { pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0); } g_free(config); - return; } -static bool vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp) +bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp) { PCIDevice *pdev = &vdev->pdev; @@ -2407,18 +2615,27 @@ void vfio_pci_pre_reset(VFIOPCIDevice *vdev) vfio_disable_interrupts(vdev); + /* + * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master. + * Also put INTx Disable in known state. + */ + cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE); + vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); + /* Make sure the device is in D0 */ - if (vdev->pm_cap) { + if (pdev->pm_cap) { uint16_t pmcsr; uint8_t state; - pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); + pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2); state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (state) { pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); + vfio_pci_write_config(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); /* vfio handles the necessary delay here */ - pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); + pmcsr = vfio_pci_read_config(pdev, pdev->pm_cap + PCI_PM_CTRL, 2); state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (state) { error_report("vfio: Unable to power on device, stuck in D%d", @@ -2426,34 +2643,27 @@ void vfio_pci_pre_reset(VFIOPCIDevice *vdev) } } } - - /* - * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master. - * Also put INTx Disable in known state. - */ - cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); - cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | - PCI_COMMAND_INTX_DISABLE); - vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); } void vfio_pci_post_reset(VFIOPCIDevice *vdev) { + VFIODevice *vbasedev = &vdev->vbasedev; Error *err = NULL; - int nr; + int ret, nr; if (!vfio_intx_enable(vdev, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) { - off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr); + off_t addr = PCI_BASE_ADDRESS_0 + (4 * nr); uint32_t val = 0; uint32_t len = sizeof(val); - if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) { - error_report("%s(%s) reset bar %d failed: %m", __func__, - vdev->vbasedev.name, nr); + ret = vfio_pci_config_space_write(vdev, addr, len, &val); + if (ret != len) { + error_report("%s(%s) reset bar %d failed: %s", __func__, + vbasedev->name, nr, strwriteerror(ret)); } } @@ -2654,10 +2864,24 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) return ret; } +void vfio_sub_page_bar_update_mappings(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + int page_size = qemu_real_host_page_size(); + int bar; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + PCIIORegion *r = &pdev->io_regions[bar]; + if (r->addr != PCI_BAR_UNMAPPED && r->size > 0 && r->size < page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } +} + static VFIODeviceOps vfio_pci_ops = { .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, - .vfio_eoi = vfio_intx_eoi, + .vfio_eoi = vfio_pci_intx_eoi, .vfio_get_object = vfio_pci_get_object, .vfio_save_config = vfio_pci_save_config, .vfio_load_config = vfio_pci_load_config, @@ -2666,10 +2890,10 @@ static VFIODeviceOps vfio_pci_ops = { bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) { VFIODevice *vbasedev = &vdev->vbasedev; - g_autofree struct vfio_region_info *reg_info = NULL; + struct vfio_region_info *reg_info = NULL; int ret; - ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, ®_info); + ret = vfio_device_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, ®_info); if (ret) { error_setg_errno(errp, -ret, "failed getting region info for VGA region index %d", @@ -2721,18 +2945,14 @@ bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) "vfio-vga-io@0x3c0", QEMU_PCI_VGA_IO_HI_SIZE); - pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, - &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, - &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); - return true; } -static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) +bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp) { VFIODevice *vbasedev = &vdev->vbasedev; - g_autofree struct vfio_region_info *reg_info = NULL; - struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; + struct vfio_region_info *reg_info = NULL; + struct vfio_irq_info irq_info; int i, ret = -1; /* Sanity check device */ @@ -2767,14 +2987,14 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) QLIST_INIT(&vdev->bars[i].quirks); } - ret = vfio_get_region_info(vbasedev, - VFIO_PCI_CONFIG_REGION_INDEX, ®_info); + ret = vfio_device_get_region_info(vbasedev, + VFIO_PCI_CONFIG_REGION_INDEX, ®_info); if (ret) { error_setg_errno(errp, -ret, "failed to get config info"); return false; } - trace_vfio_populate_device_config(vdev->vbasedev.name, + trace_vfio_pci_populate_device_config(vdev->vbasedev.name, (unsigned long)reg_info->size, (unsigned long)reg_info->offset, (unsigned long)reg_info->flags); @@ -2793,12 +3013,10 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) } } - irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; - - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); + ret = vfio_device_get_irq_info(vbasedev, VFIO_PCI_ERR_IRQ_INDEX, &irq_info); if (ret) { /* This can fail for an old kernel or legacy PCI dev */ - trace_vfio_populate_device_get_irq_info_failure(strerror(errno)); + trace_vfio_pci_populate_device_get_irq_info_failure(strerror(-ret)); } else if (irq_info.count == 1) { vdev->pci_aer = true; } else { @@ -2810,11 +3028,24 @@ static bool vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) return true; } -static void vfio_pci_put_device(VFIOPCIDevice *vdev) +void vfio_pci_put_device(VFIOPCIDevice *vdev) { - vfio_detach_device(&vdev->vbasedev); + vfio_display_finalize(vdev); + vfio_bars_finalize(vdev); + vfio_cpr_pci_unregister_device(vdev); + g_free(vdev->emulated_config_bits); + g_free(vdev->rom); + /* + * XXX Leaking igd_opregion is not an oversight, we can't remove the + * fw_cfg entry therefore leaking this allocation seems like the safest + * option. + * + * g_free(vdev->igd_opregion); + */ + + vfio_device_detach(&vdev->vbasedev); - g_free(vdev->vbasedev.name); + vfio_device_free_name(&vdev->vbasedev); g_free(vdev->msix); } @@ -2846,7 +3077,7 @@ static void vfio_err_notifier_handler(void *opaque) * and continue after disabling error recovery support for the * device. */ -static void vfio_register_err_notifier(VFIOPCIDevice *vdev) +void vfio_pci_register_err_notifier(VFIOPCIDevice *vdev) { Error *err = NULL; int32_t fd; @@ -2855,8 +3086,9 @@ static void vfio_register_err_notifier(VFIOPCIDevice *vdev) return; } - if (event_notifier_init(&vdev->err_notifier, 0)) { - error_report("vfio: Unable to init event notifier for error detection"); + if (!vfio_notifier_init(vdev, &vdev->err_notifier, "err_notifier", 0, + &err)) { + error_report_err(err); vdev->pci_aer = false; return; } @@ -2864,11 +3096,16 @@ static void vfio_register_err_notifier(VFIOPCIDevice *vdev) fd = event_notifier_get_fd(&vdev->err_notifier); qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev); - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + /* Do not alter irq_signaling during vfio_realize for cpr */ + if (cpr_is_incoming()) { + return; + } + + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); qemu_set_fd_handler(fd, NULL, NULL, vdev); - event_notifier_cleanup(&vdev->err_notifier); + vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0); vdev->pci_aer = false; } } @@ -2881,13 +3118,13 @@ static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev) return; } - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), NULL, NULL, vdev); - event_notifier_cleanup(&vdev->err_notifier); + vfio_notifier_cleanup(vdev, &vdev->err_notifier, "err_notifier", 0); } static void vfio_req_notifier_handler(void *opaque) @@ -2905,35 +3142,43 @@ static void vfio_req_notifier_handler(void *opaque) } } -static void vfio_register_req_notifier(VFIOPCIDevice *vdev) +void vfio_pci_register_req_notifier(VFIOPCIDevice *vdev) { - struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info), - .index = VFIO_PCI_REQ_IRQ_INDEX }; + struct vfio_irq_info irq_info; Error *err = NULL; int32_t fd; + int ret; if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { return; } - if (ioctl(vdev->vbasedev.fd, - VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) { + ret = vfio_device_get_irq_info(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, + &irq_info); + if (ret < 0 || irq_info.count < 1) { return; } - if (event_notifier_init(&vdev->req_notifier, 0)) { - error_report("vfio: Unable to init event notifier for device request"); + if (!vfio_notifier_init(vdev, &vdev->req_notifier, "req_notifier", 0, + &err)) { + error_report_err(err); return; } fd = event_notifier_get_fd(&vdev->req_notifier); qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev); - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + /* Do not alter irq_signaling during vfio_realize for cpr */ + if (cpr_is_incoming()) { + vdev->req_enabled = true; + return; + } + + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); qemu_set_fd_handler(fd, NULL, NULL, vdev); - event_notifier_cleanup(&vdev->req_notifier); + vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0); } else { vdev->req_enabled = true; } @@ -2947,87 +3192,42 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) return; } - if (!vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + if (!vfio_device_irq_set_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), NULL, NULL, vdev); - event_notifier_cleanup(&vdev->req_notifier); + vfio_notifier_cleanup(vdev, &vdev->req_notifier, "req_notifier", 0); vdev->req_enabled = false; } -static void vfio_realize(PCIDevice *pdev, Error **errp) +void vfio_pci_config_register_vga(VFIOPCIDevice *vdev) { - ERRP_GUARD(); - VFIOPCIDevice *vdev = VFIO_PCI(pdev); - VFIODevice *vbasedev = &vdev->vbasedev; - int i, ret; - char uuid[UUID_STR_LEN]; - g_autofree char *name = NULL; - - if (vbasedev->fd < 0 && !vbasedev->sysfsdev) { - if (!(~vdev->host.domain || ~vdev->host.bus || - ~vdev->host.slot || ~vdev->host.function)) { - error_setg(errp, "No provided host device"); - error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " -#ifdef CONFIG_IOMMUFD - "or -device vfio-pci,fd=DEVICE_FD " -#endif - "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); - return; - } - vbasedev->sysfsdev = - g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x", - vdev->host.domain, vdev->host.bus, - vdev->host.slot, vdev->host.function); - } - - if (!vfio_device_get_name(vbasedev, errp)) { - return; - } + assert(vdev->vga != NULL); - /* - * Mediated devices *might* operate compatibly with discarding of RAM, but - * we cannot know for certain, it depends on whether the mdev vendor driver - * stays in sync with the active working set of the guest driver. Prevent - * the x-balloon-allowed option unless this is minimally an mdev device. - */ - vbasedev->mdev = vfio_device_is_mdev(vbasedev); - - trace_vfio_mdev(vbasedev->name, vbasedev->mdev); - - if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) { - error_setg(errp, "x-balloon-allowed only potentially compatible " - "with mdev devices"); - goto error; - } - - if (!qemu_uuid_is_null(&vdev->vf_token)) { - qemu_uuid_unparse(&vdev->vf_token, uuid); - name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid); - } else { - name = g_strdup(vbasedev->name); - } + pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); +} - if (!vfio_attach_device(name, vbasedev, - pci_device_iommu_address_space(pdev), errp)) { - goto error; - } +bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) +{ + PCIDevice *pdev = &vdev->pdev; + VFIODevice *vbasedev = &vdev->vbasedev; + uint32_t config_space_size; + int ret; - if (!vfio_populate_device(vdev, errp)) { - goto error; - } + config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size); /* Get a copy of config space */ - ret = pread(vbasedev->fd, vdev->pdev.config, - MIN(pci_config_size(&vdev->pdev), vdev->config_size), - vdev->config_offset); - if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { - ret = ret < 0 ? -errno : -EFAULT; - error_setg_errno(errp, -ret, "failed to read device config space"); - goto error; + ret = vfio_pci_config_space_read(vdev, 0, config_space_size, + vdev->pdev.config); + if (ret < (int)config_space_size) { + ret = ret < 0 ? -ret : EFAULT; + error_setg_errno(errp, ret, "failed to read device config space"); + return false; } /* vfio emulates a lot for us, but some bits need extra love */ @@ -3046,7 +3246,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (vdev->vendor_id != PCI_ANY_ID) { if (vdev->vendor_id >= 0xffff) { error_setg(errp, "invalid PCI vendor ID provided"); - goto error; + return false; } vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); trace_vfio_pci_emulated_vendor_id(vbasedev->name, vdev->vendor_id); @@ -3057,7 +3257,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (vdev->device_id != PCI_ANY_ID) { if (vdev->device_id > 0xffff) { error_setg(errp, "invalid PCI device ID provided"); - goto error; + return false; } vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); trace_vfio_pci_emulated_device_id(vbasedev->name, vdev->device_id); @@ -3068,7 +3268,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (vdev->sub_vendor_id != PCI_ANY_ID) { if (vdev->sub_vendor_id > 0xffff) { error_setg(errp, "invalid PCI subsystem vendor ID provided"); - goto error; + return false; } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, vdev->sub_vendor_id, ~0); @@ -3079,13 +3279,30 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (vdev->sub_device_id != PCI_ANY_ID) { if (vdev->sub_device_id > 0xffff) { error_setg(errp, "invalid PCI subsystem device ID provided"); - goto error; + return false; } vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); trace_vfio_pci_emulated_sub_device_id(vbasedev->name, vdev->sub_device_id); } + /* + * Class code is a 24-bit value at config space 0x09. Allow overriding it + * with any 24-bit value. + */ + if (vdev->class_code != PCI_ANY_ID) { + if (vdev->class_code > 0xffffff) { + error_setg(errp, "invalid PCI class code provided"); + return false; + } + /* Higher 24 bits of PCI_CLASS_REVISION are class code */ + vfio_add_emulated_long(vdev, PCI_CLASS_REVISION, + vdev->class_code << 8, ~0xff); + trace_vfio_pci_emulated_class_code(vbasedev->name, vdev->class_code); + } else { + vdev->class_code = pci_get_long(pdev->config + PCI_CLASS_REVISION) >> 8; + } + /* QEMU can change multi-function devices to single function, or reverse */ vdev->emulated_config_bits[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_MULTI_FUNCTION; @@ -3110,53 +3327,21 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) vfio_bars_prepare(vdev); if (!vfio_msix_early_setup(vdev, errp)) { - goto error; + return false; } vfio_bars_register(vdev); - if (!vbasedev->mdev && - !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) { - error_prepend(errp, "Failed to set iommu_device: "); - goto out_teardown; + if (vdev->vga && vfio_is_vga(vdev)) { + vfio_pci_config_register_vga(vdev); } - if (!vfio_add_capabilities(vdev, errp)) { - goto out_unset_idev; - } - - if (vdev->vga) { - vfio_vga_quirk_setup(vdev); - } - - for (i = 0; i < PCI_ROM_SLOT; i++) { - vfio_bar_quirk_setup(vdev, i); - } - - if (!vdev->igd_opregion && - vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) { - g_autofree struct vfio_region_info *opregion = NULL; - - if (vdev->pdev.qdev.hotplugged) { - error_setg(errp, - "cannot support IGD OpRegion feature on hotplugged " - "device"); - goto out_unset_idev; - } - - ret = vfio_get_dev_region_info(vbasedev, - VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, - VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion); - if (ret) { - error_setg_errno(errp, -ret, - "does not support requested IGD OpRegion feature"); - goto out_unset_idev; - } + return true; +} - if (!vfio_pci_igd_opregion_init(vdev, opregion, errp)) { - goto out_unset_idev; - } - } +bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp) +{ + PCIDevice *pdev = &vdev->pdev; /* QEMU emulates all of MSI & MSIX */ if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { @@ -3171,14 +3356,117 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, - vfio_intx_mmap_enable, vdev); + vfio_intx_mmap_enable, vdev); pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_routing_notifier); vdev->irqchip_change_notifier.notify = vfio_irqchip_change; kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier); - if (!vfio_intx_enable(vdev, errp)) { - goto out_deregister; + + /* + * During CPR, do not call vfio_intx_enable at this time. Instead, + * call it from vfio_pci_post_load after the intx routing data has + * been loaded from vmstate. + */ + if (!cpr_is_incoming() && !vfio_intx_enable(vdev, errp)) { + timer_free(vdev->intx.mmap_timer); + pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + return false; + } + } + return true; +} + +static void vfio_pci_realize(PCIDevice *pdev, Error **errp) +{ + ERRP_GUARD(); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIODevice *vbasedev = &vdev->vbasedev; + int i; + char uuid[UUID_STR_LEN]; + g_autofree char *name = NULL; + + if (vbasedev->fd < 0 && !vbasedev->sysfsdev) { + if (!(~vdev->host.domain || ~vdev->host.bus || + ~vdev->host.slot || ~vdev->host.function)) { + error_setg(errp, "No provided host device"); + error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " +#ifdef CONFIG_IOMMUFD + "or -device vfio-pci,fd=DEVICE_FD " +#endif + "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); + return; } + vbasedev->sysfsdev = + g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x", + vdev->host.domain, vdev->host.bus, + vdev->host.slot, vdev->host.function); + } + + if (!vfio_device_get_name(vbasedev, errp)) { + return; + } + + /* + * Mediated devices *might* operate compatibly with discarding of RAM, but + * we cannot know for certain, it depends on whether the mdev vendor driver + * stays in sync with the active working set of the guest driver. Prevent + * the x-balloon-allowed option unless this is minimally an mdev device. + */ + vbasedev->mdev = vfio_device_is_mdev(vbasedev); + + trace_vfio_mdev(vbasedev->name, vbasedev->mdev); + + if (vbasedev->ram_block_discard_allowed && !vbasedev->mdev) { + error_setg(errp, "x-balloon-allowed only potentially compatible " + "with mdev devices"); + goto error; + } + + if (!qemu_uuid_is_null(&vdev->vf_token)) { + qemu_uuid_unparse(&vdev->vf_token, uuid); + name = g_strdup_printf("%s vf_token=%s", vbasedev->name, uuid); + } else { + name = g_strdup(vbasedev->name); + } + + if (!vfio_device_attach(name, vbasedev, + pci_device_iommu_address_space(pdev), errp)) { + goto error; + } + + if (!vfio_pci_populate_device(vdev, errp)) { + goto error; + } + + if (!vfio_pci_config_setup(vdev, errp)) { + goto error; + } + + if (!vbasedev->mdev && + !pci_device_set_iommu_device(pdev, vbasedev->hiod, errp)) { + error_prepend(errp, "Failed to set vIOMMU: "); + goto out_teardown; + } + + if (!vfio_pci_add_capabilities(vdev, errp)) { + goto out_unset_idev; + } + + if (!vfio_config_quirk_setup(vdev, errp)) { + goto out_unset_idev; + } + + if (vdev->vga) { + vfio_vga_quirk_setup(vdev); + } + + for (i = 0; i < PCI_ROM_SLOT; i++) { + vfio_bar_quirk_setup(vdev, i); + } + + if (!vfio_pci_interrupt_setup(vdev, errp)) { + goto out_unset_idev; } if (vdev->display != ON_OFF_AUTO_OFF) { @@ -3221,9 +3509,10 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) } } - vfio_register_err_notifier(vdev); - vfio_register_req_notifier(vdev); + vfio_pci_register_err_notifier(vdev); + vfio_pci_register_req_notifier(vdev); vfio_setup_resetfn_quirk(vdev); + vfio_cpr_pci_register_device(vdev); return; @@ -3243,33 +3532,22 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) pci_device_unset_iommu_device(pdev); } out_teardown: - vfio_teardown_msi(vdev); - vfio_bars_exit(vdev); + vfio_pci_teardown_msi(vdev); + vfio_pci_bars_exit(vdev); error: error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); } static void vfio_instance_finalize(Object *obj) { - VFIOPCIDevice *vdev = VFIO_PCI(obj); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); - vfio_display_finalize(vdev); - vfio_bars_finalize(vdev); - g_free(vdev->emulated_config_bits); - g_free(vdev->rom); - /* - * XXX Leaking igd_opregion is not an oversight, we can't remove the - * fw_cfg entry therefore leaking this allocation seems like the safest - * option. - * - * g_free(vdev->igd_opregion); - */ vfio_pci_put_device(vdev); } static void vfio_exitfn(PCIDevice *pdev) { - VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; vfio_unregister_req_notifier(vdev); @@ -3282,9 +3560,9 @@ static void vfio_exitfn(PCIDevice *pdev) if (vdev->intx.mmap_timer) { timer_free(vdev->intx.mmap_timer); } - vfio_teardown_msi(vdev); + vfio_pci_teardown_msi(vdev); vfio_pci_disable_rp_atomics(vdev); - vfio_bars_exit(vdev); + vfio_pci_bars_exit(vdev); vfio_migration_exit(vbasedev); if (!vbasedev->mdev) { pci_device_unset_iommu_device(pdev); @@ -3293,7 +3571,12 @@ static void vfio_exitfn(PCIDevice *pdev) static void vfio_pci_reset(DeviceState *dev) { - VFIOPCIDevice *vdev = VFIO_PCI(dev); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev); + + /* Do not reset the device during qemu_system_reset prior to cpr load */ + if (cpr_is_incoming()) { + return; + } trace_vfio_pci_reset(vdev->vbasedev.name); @@ -3333,7 +3616,7 @@ static void vfio_pci_reset(DeviceState *dev) static void vfio_instance_init(Object *obj) { PCIDevice *pci_dev = PCI_DEVICE(obj); - VFIOPCIDevice *vdev = VFIO_PCI(obj); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); VFIODevice *vbasedev = &vdev->vbasedev; device_add_bootindex_property(obj, &vdev->bootindex, @@ -3352,9 +3635,43 @@ static void vfio_instance_init(Object *obj) /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command * line, therefore, no need to wait to realize like other devices */ pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; + + /* + * A device that is resuming for cpr is already configured, so do not + * reset it during qemu_system_reset prior to cpr load, else interrupts + * may be lost. + */ + pci_dev->cap_present |= QEMU_PCI_SKIP_RESET_ON_CPR; +} + +static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); + + dc->desc = "VFIO PCI base device"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pdc->exit = vfio_exitfn; + pdc->config_read = vfio_pci_read_config; + pdc->config_write = vfio_pci_write_config; } -static Property vfio_pci_dev_properties[] = { +static const TypeInfo vfio_pci_base_dev_info = { + .name = TYPE_VFIO_PCI_BASE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(VFIOPCIDevice), + .abstract = true, + .class_init = vfio_pci_base_dev_class_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, +}; + +static PropertyInfo vfio_pci_migration_multifd_transfer_prop; + +static const Property vfio_pci_dev_properties[] = { DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token), DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev), @@ -3375,9 +3692,22 @@ static Property vfio_pci_dev_properties[] = { DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features, VFIO_FEATURE_ENABLE_REQ_BIT, true), DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features, - VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false), + VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, true), + DEFINE_PROP_BIT("x-igd-lpc", VFIOPCIDevice, features, + VFIO_FEATURE_ENABLE_IGD_LPC_BIT, false), + DEFINE_PROP_ON_OFF_AUTO("x-igd-legacy-mode", VFIOPCIDevice, + igd_legacy_mode, ON_OFF_AUTO_AUTO), DEFINE_PROP_ON_OFF_AUTO("enable-migration", VFIOPCIDevice, vbasedev.enable_migration, ON_OFF_AUTO_AUTO), + DEFINE_PROP("x-migration-multifd-transfer", VFIOPCIDevice, + vbasedev.migration_multifd_transfer, + vfio_pci_migration_multifd_transfer_prop, OnOffAuto, + .set_default = true, .defval.i = ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("x-migration-load-config-after-iter", VFIOPCIDevice, + vbasedev.migration_load_config_after_iter, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_SIZE("x-migration-max-queued-buffers-size", VFIOPCIDevice, + vbasedev.migration_max_queued_buffers_size, UINT64_MAX), DEFINE_PROP_BOOL("migration-events", VFIOPCIDevice, vbasedev.migration_events, false), DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false), @@ -3398,72 +3728,213 @@ static Property vfio_pci_dev_properties[] = { sub_vendor_id, PCI_ANY_ID), DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice, sub_device_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-class-code", VFIOPCIDevice, + class_code, PCI_ANY_ID), DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0), DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice, nv_gpudirect_clique, qdev_prop_nv_gpudirect_clique, uint8_t), DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo, - OFF_AUTOPCIBAR_OFF), + OFF_AUTO_PCIBAR_OFF), #ifdef CONFIG_IOMMUFD DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd, TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), #endif DEFINE_PROP_BOOL("skip-vsc-check", VFIOPCIDevice, skip_vsc_check, true), - DEFINE_PROP_END_OF_LIST(), }; #ifdef CONFIG_IOMMUFD static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp) { - vfio_device_set_fd(&VFIO_PCI(obj)->vbasedev, str, errp); + VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + vfio_device_set_fd(&vdev->vbasedev, str, errp); } #endif -static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) +static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); - dc->reset = vfio_pci_reset; + device_class_set_legacy_reset(dc, vfio_pci_reset); device_class_set_props(dc, vfio_pci_dev_properties); #ifdef CONFIG_IOMMUFD object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd); #endif + dc->vmsd = &vfio_cpr_pci_vmstate; dc->desc = "VFIO-based PCI device assignment"; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); - pdc->realize = vfio_realize; - pdc->exit = vfio_exitfn; - pdc->config_read = vfio_pci_read_config; - pdc->config_write = vfio_pci_write_config; + pdc->realize = vfio_pci_realize; + + object_class_property_set_description(klass, /* 1.3 */ + "host", + "Host PCI address [domain:] of assigned device"); + object_class_property_set_description(klass, /* 1.3 */ + "x-intx-mmap-timeout-ms", + "When EOI is not provided by KVM/QEMU, wait time " + "(milliseconds) to re-enable device direct access " + "after INTx (DEBUG)"); + object_class_property_set_description(klass, /* 1.5 */ + "x-vga", + "Expose VGA address spaces for device"); + object_class_property_set_description(klass, /* 2.3 */ + "x-req", + "Disable device request notification support (DEBUG)"); + object_class_property_set_description(klass, /* 2.4 and 2.5 */ + "x-no-mmap", + "Disable MMAP for device. Allows to trace MMIO " + "accesses (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-no-kvm-intx", + "Disable direct VFIO->KVM INTx injection. Allows to " + "trace INTx interrupts (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-no-kvm-msi", + "Disable direct VFIO->KVM MSI injection. Allows to " + "trace MSI interrupts (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-no-kvm-msix", + "Disable direct VFIO->KVM MSIx injection. Allows to " + "trace MSIx interrupts (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-pci-vendor-id", + "Override PCI Vendor ID with provided value (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-pci-device-id", + "Override PCI device ID with provided value (DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-pci-sub-vendor-id", + "Override PCI Subsystem Vendor ID with provided value " + "(DEBUG)"); + object_class_property_set_description(klass, /* 2.5 */ + "x-pci-sub-device-id", + "Override PCI Subsystem Device ID with provided value " + "(DEBUG)"); + object_class_property_set_description(klass, /* 2.6 */ + "sysfsdev", + "Host sysfs path of assigned device"); + object_class_property_set_description(klass, /* 2.7 */ + "x-igd-opregion", + "Expose host IGD OpRegion to guest"); + object_class_property_set_description(klass, /* 2.7 (See c4c45e943e51) */ + "x-igd-gms", + "Override IGD data stolen memory size (32MiB units)"); + object_class_property_set_description(klass, /* 2.11 */ + "x-nv-gpudirect-clique", + "Add NVIDIA GPUDirect capability indicating P2P DMA " + "clique for device [0-15]"); + object_class_property_set_description(klass, /* 2.12 */ + "x-no-geforce-quirks", + "Disable GeForce quirks (for NVIDIA Quadro/GRID/Tesla). " + "Improves performance"); + object_class_property_set_description(klass, /* 2.12 */ + "display", + "Enable display support for device, ex. vGPU"); + object_class_property_set_description(klass, /* 2.12 */ + "x-msix-relocation", + "Specify MSI-X MMIO relocation to the end of specified " + "existing BAR or new BAR to avoid virtualization overhead " + "due to adjacent device registers"); + object_class_property_set_description(klass, /* 3.0 */ + "x-no-kvm-ioeventfd", + "Disable registration of ioeventfds with KVM (DEBUG)"); + object_class_property_set_description(klass, /* 3.0 */ + "x-no-vfio-ioeventfd", + "Disable linking of KVM ioeventfds to VFIO ioeventfds " + "(DEBUG)"); + object_class_property_set_description(klass, /* 3.1 */ + "x-balloon-allowed", + "Override allowing ballooning with device (DEBUG, DANGER)"); + object_class_property_set_description(klass, /* 3.2 */ + "xres", + "Set X display resolution the vGPU should use"); + object_class_property_set_description(klass, /* 3.2 */ + "yres", + "Set Y display resolution the vGPU should use"); + object_class_property_set_description(klass, /* 5.2 */ + "x-pre-copy-dirty-page-tracking", + "Disable dirty pages tracking during iterative phase " + "(DEBUG)"); + object_class_property_set_description(klass, /* 5.2, 8.0 non-experimetal */ + "enable-migration", + "Enale device migration. Also requires a host VFIO PCI " + "variant or mdev driver with migration support enabled"); + object_class_property_set_description(klass, /* 8.1 */ + "vf-token", + "Specify UUID VF token. Required for VF when PF is owned " + "by another VFIO driver"); +#ifdef CONFIG_IOMMUFD + object_class_property_set_description(klass, /* 9.0 */ + "iommufd", + "Set host IOMMUFD backend device"); +#endif + object_class_property_set_description(klass, /* 9.1 */ + "x-device-dirty-page-tracking", + "Disable device dirty page tracking and use " + "container-based dirty page tracking"); + object_class_property_set_description(klass, /* 9.1 */ + "migration-events", + "Emit VFIO migration QAPI event when a VFIO device " + "changes its migration state. For management applications"); + object_class_property_set_description(klass, /* 9.1 */ + "skip-vsc-check", + "Skip config space check for Vendor Specific Capability. " + "Setting to false will enforce strict checking of VSC content " + "(DEBUG)"); + object_class_property_set_description(klass, /* 10.0 */ + "x-migration-multifd-transfer", + "Transfer this device state via " + "multifd channels when live migrating it"); + object_class_property_set_description(klass, /* 10.1 */ + "x-migration-load-config-after-iter", + "Start the config load only after " + "all iterables were loaded (during " + "non-iterables loading phase) when " + "doing live migration of device state " + "via multifd channels"); + object_class_property_set_description(klass, /* 10.1 */ + "x-migration-max-queued-buffers-size", + "Maximum size of in-flight VFIO " + "device state buffers queued at the " + "destination when doing live " + "migration of device state via " + "multifd channels"); } static const TypeInfo vfio_pci_dev_info = { .name = TYPE_VFIO_PCI, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(VFIOPCIDevice), + .parent = TYPE_VFIO_PCI_BASE, .class_init = vfio_pci_dev_class_init, .instance_init = vfio_instance_init, .instance_finalize = vfio_instance_finalize, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { } - }, }; -static Property vfio_pci_dev_nohotplug_properties[] = { +static const Property vfio_pci_dev_nohotplug_properties[] = { DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false), + DEFINE_PROP_BOOL("use-legacy-x86-rom", VFIOPCIDevice, + use_legacy_x86_rom, false), DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate, ON_OFF_AUTO_AUTO), - DEFINE_PROP_END_OF_LIST(), }; -static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data) +static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, vfio_pci_dev_nohotplug_properties); dc->hotpluggable = false; + + object_class_property_set_description(klass, /* 3.1 */ + "ramfb", + "Enable ramfb to provide pre-boot graphics for devices " + "enabling display option"); + object_class_property_set_description(klass, /* 8.2 */ + "x-ramfb-migrate", + "Override default migration support for ramfb support " + "(DEBUG)"); + object_class_property_set_description(klass, /* 10.1 */ + "use-legacy-x86-rom", + "Controls loading of a legacy VGA BIOS ROM"); } static const TypeInfo vfio_pci_nohotplug_dev_info = { @@ -3475,6 +3946,18 @@ static const TypeInfo vfio_pci_nohotplug_dev_info = { static void register_vfio_pci_dev_type(void) { + /* + * Ordinary ON_OFF_AUTO property isn't runtime-mutable, but source VM can + * run for a long time before being migrated so it is desirable to have a + * fallback mechanism to the old way of transferring VFIO device state if + * it turns to be necessary. + * The following makes this type of property have the same mutability level + * as ordinary migration parameters. + */ + vfio_pci_migration_multifd_transfer_prop = qdev_prop_on_off_auto; + vfio_pci_migration_multifd_transfer_prop.realized_set_allowed = true; + + type_register_static(&vfio_pci_base_dev_info); type_register_static(&vfio_pci_dev_info); type_register_static(&vfio_pci_nohotplug_dev_info); } diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index bf67df2fbc0..810a842f4a1 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -12,14 +12,17 @@ #ifndef HW_VFIO_VFIO_PCI_H #define HW_VFIO_VFIO_PCI_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/pci/pci_device.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/types.h" +#include "hw/vfio/vfio-device.h" +#include "hw/vfio/vfio-region.h" #include "qemu/event_notifier.h" #include "qemu/queue.h" #include "qemu/timer.h" #include "qom/object.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" +#include "vfio-display.h" #define PCI_ANY_ID (~0) @@ -114,10 +117,10 @@ typedef struct VFIOMSIXInfo { uint32_t pba_offset; unsigned long *pending; bool noresize; + MemoryRegion *pba_region; } VFIOMSIXInfo; -#define TYPE_VFIO_PCI "vfio-pci" -OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI) +OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI_BASE) struct VFIOPCIDevice { PCIDevice pdev; @@ -146,6 +149,7 @@ struct VFIOPCIDevice { uint32_t device_id; uint32_t sub_vendor_id; uint32_t sub_device_id; + uint32_t class_code; uint32_t features; #define VFIO_FEATURE_ENABLE_VGA_BIT 0 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) @@ -154,13 +158,16 @@ struct VFIOPCIDevice { #define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2 #define VFIO_FEATURE_ENABLE_IGD_OPREGION \ (1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT) +#define VFIO_FEATURE_ENABLE_IGD_LPC_BIT 3 +#define VFIO_FEATURE_ENABLE_IGD_LPC \ + (1 << VFIO_FEATURE_ENABLE_IGD_LPC_BIT) OnOffAuto display; uint32_t display_xres; uint32_t display_yres; int32_t bootindex; + OnOffAuto igd_legacy_mode; uint32_t igd_gms; OffAutoPCIBAR msix_relo; - uint8_t pm_cap; uint8_t nv_gpudirect_clique; bool pci_aer; bool req_enabled; @@ -174,12 +181,14 @@ struct VFIOPCIDevice { bool no_kvm_ioeventfd; bool no_vfio_ioeventfd; bool enable_ramfb; + bool use_legacy_x86_rom; OnOffAuto ramfb_migrate; bool defer_kvm_irq_routing; bool clear_parent_atomics_on_exit; bool skip_vsc_check; VFIODisplay *dpy; Notifier irqchip_change_notifier; + VFIOPCICPR cpr; }; /* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */ @@ -191,12 +200,20 @@ static inline bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t de static inline bool vfio_is_vga(VFIOPCIDevice *vdev) { - PCIDevice *pdev = &vdev->pdev; - uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); - - return class == PCI_CLASS_DISPLAY_VGA; + return (vdev->class_code >> 8) == PCI_CLASS_DISPLAY_VGA; } +/* MSI/MSI-X/INTx */ +void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr); +void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, + int vector_n, bool msix); +void vfio_pci_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev); +void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev); +bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_intx_set_handler(VFIOPCIDevice *vdev, bool enable); +void vfio_pci_msix_set_notifiers(VFIOPCIDevice *vdev); +void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr, bool enable); + uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, uint32_t val, int len); @@ -204,7 +221,9 @@ void vfio_pci_write_config(PCIDevice *pdev, uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size); void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size); +void vfio_sub_page_bar_update_mappings(VFIOPCIDevice *vdev); bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev); +bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp); void vfio_vga_quirk_setup(VFIOPCIDevice *vdev); void vfio_vga_quirk_exit(VFIOPCIDevice *vdev); void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev); @@ -215,7 +234,8 @@ void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev); bool vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp); void vfio_quirk_reset(VFIOPCIDevice *vdev); VFIOQuirk *vfio_quirk_alloc(int nr_mem); -void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr); +void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr); +bool vfio_probe_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp); extern const PropertyInfo qdev_prop_nv_gpudirect_clique; @@ -227,14 +247,22 @@ int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); -bool vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, - struct vfio_region_info *info, - Error **errp); - void vfio_display_reset(VFIOPCIDevice *vdev); bool vfio_display_probe(VFIOPCIDevice *vdev, Error **errp); void vfio_display_finalize(VFIOPCIDevice *vdev); extern const VMStateDescription vfio_display_vmstate; +void vfio_pci_bars_exit(VFIOPCIDevice *vdev); +bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_config_register_vga(VFIOPCIDevice *vdev); +bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp); +bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_intx_eoi(VFIODevice *vbasedev); +void vfio_pci_put_device(VFIOPCIDevice *vdev); +bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_register_err_notifier(VFIOPCIDevice *vdev); +void vfio_pci_register_req_notifier(VFIOPCIDevice *vdev); +void vfio_pci_teardown_msi(VFIOPCIDevice *vdev); + #endif /* HW_VFIO_VFIO_PCI_H */ diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index a85c199c76a..5c1795a26fe 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -21,22 +21,23 @@ #include #include "hw/vfio/vfio-platform.h" -#include "sysemu/iommufd.h" +#include "system/iommufd.h" #include "migration/vmstate.h" #include "qemu/error-report.h" #include "qemu/lockable.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/range.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/memory.h" +#include "system/address-spaces.h" #include "qemu/queue.h" #include "hw/sysbus.h" #include "trace.h" #include "hw/irq.h" #include "hw/platform-bus.h" #include "hw/qdev-properties.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" +#include "hw/vfio/vfio-region.h" /* * Functions used whatever the injection method @@ -118,8 +119,8 @@ static int vfio_set_trigger_eventfd(VFIOINTp *intp, qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp); - if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); qemu_set_fd_handler(fd, NULL, NULL, NULL); return -EINVAL; @@ -300,7 +301,7 @@ static void vfio_platform_eoi(VFIODevice *vbasedev) if (vfio_irq_is_automasked(intp)) { /* unmasks the physical level-sensitive IRQ */ - vfio_unmask_single_irqindex(vbasedev, intp->pin); + vfio_device_irq_unmask(vbasedev, intp->pin); } /* a single IRQ can be active at a time */ @@ -356,8 +357,8 @@ static int vfio_set_resample_eventfd(VFIOINTp *intp) Error *err = NULL; qemu_set_fd_handler(fd, NULL, NULL, NULL); - if (!vfio_set_irq_signaling(vbasedev, intp->pin, 0, - VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) { + if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0, + VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) { error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); return -EINVAL; } @@ -418,7 +419,6 @@ static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) abort(); fail_irqfd: vfio_start_eventfd_injection(sbdev, irq); - return; } /* VFIO skeleton */ @@ -474,10 +474,10 @@ static bool vfio_populate_device(VFIODevice *vbasedev, Error **errp) QSIMPLEQ_INIT(&vdev->pending_intp_queue); for (i = 0; i < vbasedev->num_irqs; i++) { - struct vfio_irq_info irq = { .argsz = sizeof(irq) }; + struct vfio_irq_info irq; + + ret = vfio_device_get_irq_info(vbasedev, i, &irq); - irq.index = i; - ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); if (ret) { error_setg_errno(errp, -ret, "failed to get device irq info"); goto irq_err; @@ -530,7 +530,7 @@ static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp) { /* @fd takes precedence over @sysfsdev which takes precedence over @host */ if (vbasedev->fd < 0 && vbasedev->sysfsdev) { - g_free(vbasedev->name); + vfio_device_free_name(vbasedev); vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); } else if (vbasedev->fd < 0) { if (!vbasedev->name || strchr(vbasedev->name, '/')) { @@ -546,7 +546,7 @@ static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp) return false; } - if (!vfio_attach_device(vbasedev->name, vbasedev, + if (!vfio_device_attach(vbasedev->name, vbasedev, &address_space_memory, errp)) { return false; } @@ -555,7 +555,7 @@ static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp) return true; } - vfio_detach_device(vbasedev); + vfio_device_detach(vbasedev); return false; } @@ -575,6 +575,7 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp) VFIODevice *vbasedev = &vdev->vbasedev; int i; + warn_report("-device vfio-platform is deprecated"); qemu_mutex_init(&vdev->intp_mutex); trace_vfio_platform_realize(vbasedev->sysfsdev ? @@ -629,7 +630,7 @@ static const VMStateDescription vfio_platform_vmstate = { .unmigratable = 1, }; -static Property vfio_platform_dev_properties[] = { +static const Property vfio_platform_dev_properties[] = { DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), @@ -640,7 +641,6 @@ static Property vfio_platform_dev_properties[] = { DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd, TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), #endif - DEFINE_PROP_END_OF_LIST(), }; static void vfio_platform_instance_init(Object *obj) @@ -659,7 +659,7 @@ static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp) } #endif -static void vfio_platform_class_init(ObjectClass *klass, void *data) +static void vfio_platform_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); @@ -673,13 +673,35 @@ static void vfio_platform_class_init(ObjectClass *klass, void *data) dc->desc = "VFIO-based platform device assignment"; sbc->connect_irq_notifier = vfio_start_irqfd_injection; set_bit(DEVICE_CATEGORY_MISC, dc->categories); - /* Supported by TYPE_VIRT_MACHINE */ - dc->user_creatable = true; + + object_class_property_set_description(klass, /* 2.4 */ + "host", + "Host device name of assigned device"); + object_class_property_set_description(klass, /* 2.4 and 2.5 */ + "x-no-mmap", + "Disable MMAP for device. Allows to trace MMIO " + "accesses (DEBUG)"); + object_class_property_set_description(klass, /* 2.4 */ + "mmap-timeout-ms", + "When EOI is not provided by KVM/QEMU, wait time " + "(milliseconds) to re-enable device direct access " + "after level interrupt (DEBUG)"); + object_class_property_set_description(klass, /* 2.4 */ + "x-irqfd", + "Allow disabling irqfd support (DEBUG)"); + object_class_property_set_description(klass, /* 2.6 */ + "sysfsdev", + "Host sysfs path of assigned device"); +#ifdef CONFIG_IOMMUFD + object_class_property_set_description(klass, /* 9.0 */ + "iommufd", + "Set host IOMMUFD backend device"); +#endif } static const TypeInfo vfio_platform_dev_info = { .name = TYPE_VFIO_PLATFORM, - .parent = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, .instance_size = sizeof(VFIOPlatformDevice), .instance_init = vfio_platform_instance_init, .class_init = vfio_platform_class_init, diff --git a/hw/vfio/region.c b/hw/vfio/region.c new file mode 100644 index 00000000000..1649e409eff --- /dev/null +++ b/hw/vfio/region.c @@ -0,0 +1,403 @@ +/* + * VFIO regions + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include + +#include "hw/vfio/vfio-region.h" +#include "hw/vfio/vfio-device.h" +#include "hw/hw.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "monitor/monitor.h" +#include "vfio-helpers.h" + +/* + * IO Port/MMIO - Beware of the endians, VFIO is always little endian + */ +void vfio_region_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIORegion *region = opaque; + VFIODevice *vbasedev = region->vbasedev; + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + int ret; + + switch (size) { + case 1: + buf.byte = data; + break; + case 2: + buf.word = cpu_to_le16(data); + break; + case 4: + buf.dword = cpu_to_le32(data); + break; + case 8: + buf.qword = cpu_to_le64(data); + break; + default: + hw_error("vfio: unsupported write size, %u bytes", size); + break; + } + + ret = vbasedev->io_ops->region_write(vbasedev, region->nr, + addr, size, &buf, region->post_wr); + if (ret != size) { + error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 + ",%d) failed: %s", + __func__, vbasedev->name, region->nr, + addr, data, size, strwriteerror(ret)); + } + + trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); + + /* + * A read or write to a BAR always signals an INTx EOI. This will + * do nothing if not pending (including not in INTx mode). We assume + * that a BAR access is in response to an interrupt and that BAR + * accesses will service the interrupt. Unfortunately, we don't know + * which access will service the interrupt, so we're potentially + * getting quite a few host interrupts per guest interrupt. + */ + vbasedev->ops->vfio_eoi(vbasedev); +} + +uint64_t vfio_region_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIORegion *region = opaque; + VFIODevice *vbasedev = region->vbasedev; + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + uint64_t data = 0; + int ret; + + ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf); + if (ret != size) { + error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %s", + __func__, vbasedev->name, region->nr, + addr, size, strreaderror(ret)); + return (uint64_t)-1; + } + switch (size) { + case 1: + data = buf.byte; + break; + case 2: + data = le16_to_cpu(buf.word); + break; + case 4: + data = le32_to_cpu(buf.dword); + break; + case 8: + data = le64_to_cpu(buf.qword); + break; + default: + hw_error("vfio: unsupported read size, %u bytes", size); + break; + } + + trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); + + /* Same as write above */ + vbasedev->ops->vfio_eoi(vbasedev); + + return data; +} + +static const MemoryRegionOps vfio_region_ops = { + .read = vfio_region_read, + .write = vfio_region_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static int vfio_setup_region_sparse_mmaps(VFIORegion *region, + struct vfio_region_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_sparse_mmap *sparse; + int i, j; + + hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); + if (!hdr) { + return -ENODEV; + } + + sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); + + trace_vfio_region_sparse_mmap_header(region->vbasedev->name, + region->nr, sparse->nr_areas); + + region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); + + for (i = 0, j = 0; i < sparse->nr_areas; i++) { + if (sparse->areas[i].size) { + trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, + sparse->areas[i].offset + + sparse->areas[i].size - 1); + region->mmaps[j].offset = sparse->areas[i].offset; + region->mmaps[j].size = sparse->areas[i].size; + j++; + } + } + + region->nr_mmaps = j; + region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); + + return 0; +} + +int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, + int index, const char *name) +{ + struct vfio_region_info *info = NULL; + int ret; + + ret = vfio_device_get_region_info(vbasedev, index, &info); + if (ret) { + return ret; + } + + region->vbasedev = vbasedev; + region->flags = info->flags; + region->size = info->size; + region->fd_offset = info->offset; + region->nr = index; + region->post_wr = false; + + if (region->size) { + region->mem = g_new0(MemoryRegion, 1); + memory_region_init_io(region->mem, obj, &vfio_region_ops, + region, name, region->size); + + if (!vbasedev->no_mmap && + region->flags & VFIO_REGION_INFO_FLAG_MMAP) { + + ret = vfio_setup_region_sparse_mmaps(region, info); + + if (ret) { + region->nr_mmaps = 1; + region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); + region->mmaps[0].offset = 0; + region->mmaps[0].size = region->size; + } + } + } + + trace_vfio_region_setup(vbasedev->name, index, name, + region->flags, region->fd_offset, region->size); + return 0; +} + +static void vfio_subregion_unmap(VFIORegion *region, int index) +{ + trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), + region->mmaps[index].offset, + region->mmaps[index].offset + + region->mmaps[index].size - 1); + memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); + munmap(region->mmaps[index].mmap, region->mmaps[index].size); + object_unparent(OBJECT(®ion->mmaps[index].mem)); + region->mmaps[index].mmap = NULL; +} + +int vfio_region_mmap(VFIORegion *region) +{ + int i, ret, prot = 0; + char *name; + int fd; + + if (!region->mem) { + return 0; + } + + prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; + prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; + + for (i = 0; i < region->nr_mmaps; i++) { + size_t align = MIN(pow2ceil(region->mmaps[i].size), 1 * GiB); + void *map_base, *map_align; + + /* + * Align the mmap for more efficient mapping in the kernel. Ideally + * we'd know the PMD and PUD mapping sizes to use as discrete alignment + * intervals, but we don't. As of Linux v6.12, the largest PUD size + * supporting huge pfnmap is 1GiB (ARCH_SUPPORTS_PUD_PFNMAP is only set + * on x86_64). Align by power-of-two size, capped at 1GiB. + * + * NB. qemu_memalign() and friends actually allocate memory, whereas + * the region size here can exceed host memory, therefore we manually + * create an oversized anonymous mapping and clean it up for alignment. + */ + map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (map_base == MAP_FAILED) { + ret = -errno; + goto no_mmap; + } + + fd = vfio_device_get_region_fd(region->vbasedev, region->nr); + + map_align = (void *)ROUND_UP((uintptr_t)map_base, (uintptr_t)align); + munmap(map_base, map_align - map_base); + munmap(map_align + region->mmaps[i].size, + align - (map_align - map_base)); + + region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot, + MAP_SHARED | MAP_FIXED, fd, + region->fd_offset + + region->mmaps[i].offset); + if (region->mmaps[i].mmap == MAP_FAILED) { + ret = -errno; + goto no_mmap; + } + + name = g_strdup_printf("%s mmaps[%d]", + memory_region_name(region->mem), i); + memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, + memory_region_owner(region->mem), + name, region->mmaps[i].size, + region->mmaps[i].mmap); + g_free(name); + memory_region_add_subregion(region->mem, region->mmaps[i].offset, + ®ion->mmaps[i].mem); + + trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), + region->mmaps[i].offset, + region->mmaps[i].offset + + region->mmaps[i].size - 1); + } + + return 0; + +no_mmap: + trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, + region->fd_offset + region->mmaps[i].offset, + region->fd_offset + region->mmaps[i].offset + + region->mmaps[i].size - 1, ret); + + region->mmaps[i].mmap = NULL; + + for (i--; i >= 0; i--) { + vfio_subregion_unmap(region, i); + } + + return ret; +} + +void vfio_region_unmap(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + vfio_subregion_unmap(region, i); + } + } +} + +void vfio_region_exit(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); + } + } + + trace_vfio_region_exit(region->vbasedev->name, region->nr); +} + +void vfio_region_finalize(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + munmap(region->mmaps[i].mmap, region->mmaps[i].size); + object_unparent(OBJECT(®ion->mmaps[i].mem)); + } + } + + object_unparent(OBJECT(region->mem)); + + g_free(region->mem); + g_free(region->mmaps); + + trace_vfio_region_finalize(region->vbasedev->name, region->nr); + + region->mem = NULL; + region->mmaps = NULL; + region->nr_mmaps = 0; + region->size = 0; + region->flags = 0; + region->nr = 0; +} + +void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + memory_region_set_enabled(®ion->mmaps[i].mem, enabled); + } + } + + trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), + enabled); +} diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 018bd204819..564b70ef97f 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -11,23 +11,30 @@ #include "qemu/osdep.h" #include #include -#ifdef CONFIG_KVM -#include -#endif -#include "sysemu/kvm.h" -#include "exec/address-spaces.h" +#include "system/kvm.h" +#include "system/hostmem.h" +#include "system/address-spaces.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-container.h" #include "hw/hw.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "trace.h" +#include "vfio-helpers.h" + +typedef struct VFIOHostDMAWindow { + hwaddr min_iova; + hwaddr max_iova; + uint64_t iova_pgsizes; + QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next; +} VFIOHostDMAWindow; typedef struct VFIOSpaprContainer { VFIOContainer container; MemoryListener prereg_listener; QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; + unsigned int levels; } VFIOSpaprContainer; OBJECT_DECLARE_SIMPLE_TYPE(VFIOSpaprContainer, VFIO_IOMMU_SPAPR); @@ -232,15 +239,17 @@ static int vfio_spapr_remove_window(VFIOContainer *container, return 0; } -static int vfio_spapr_create_window(VFIOContainer *container, +static bool vfio_spapr_create_window(VFIOContainer *container, MemoryRegionSection *section, - hwaddr *pgsize) + hwaddr *pgsize, Error **errp) { int ret = 0; VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; - unsigned entries, bits_total, bits_per_level, max_levels; + unsigned entries, bits_total, bits_per_level, max_levels, ddw_levels; struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; long rampagesize = qemu_minrampagesize(); @@ -254,11 +263,11 @@ static int vfio_spapr_create_window(VFIOContainer *container, pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1)); pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0; if (!pagesize) { - error_report("Host doesn't support page size 0x%"PRIx64 - ", the supported mask is 0x%lx", - memory_region_iommu_get_min_page_size(iommu_mr), - bcontainer->pgsizes); - return -EINVAL; + error_setg_errno(errp, EINVAL, "Host doesn't support page size 0x%"PRIx64 + ", the supported mask is 0x%lx", + memory_region_iommu_get_min_page_size(iommu_mr), + bcontainer->pgsizes); + return false; } /* @@ -293,28 +302,41 @@ static int vfio_spapr_create_window(VFIOContainer *container, */ bits_per_level = ctz64(qemu_real_host_page_size()) + 8; create.levels = bits_total / bits_per_level; - if (bits_total % bits_per_level) { - ++create.levels; - } - max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size()); - for ( ; create.levels <= max_levels; ++create.levels) { - ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); - if (!ret) { - break; + + ddw_levels = scontainer->levels; + if (ddw_levels > 1) { + if (bits_total % bits_per_level) { + ++create.levels; + } + max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size()); + for ( ; create.levels <= max_levels; ++create.levels) { + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); + if (!ret) { + break; + } } + } else { /* ddw_levels == 1 */ + if (create.levels > ddw_levels) { + error_setg_errno(errp, EINVAL, "Host doesn't support multi-level TCE tables" + ". Use larger IO page size. Supported mask is 0x%lx", + bcontainer->pgsizes); + return false; + } + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); } + if (ret) { - error_report("Failed to create a window, ret = %d (%m)", ret); - return -errno; + error_setg_errno(errp, errno, "Failed to create a window, ret = %d", ret); + return false; } if (create.start_addr != section->offset_within_address_space) { vfio_spapr_remove_window(container, create.start_addr); - error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64, - section->offset_within_address_space, - (uint64_t)create.start_addr); - return -EINVAL; + error_setg_errno(errp, EINVAL, "Host doesn't support DMA window at %"HWADDR_PRIx + ", must be %"PRIx64, section->offset_within_address_space, + (uint64_t)create.start_addr); + return false; } trace_vfio_spapr_create_window(create.page_shift, create.levels, @@ -322,7 +344,7 @@ static int vfio_spapr_create_window(VFIOContainer *container, create.start_addr); *pgsize = pagesize; - return 0; + return true; } static bool @@ -379,9 +401,8 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer, } } - ret = vfio_spapr_create_window(container, section, &pgsize); - if (ret) { - error_setg_errno(errp, -ret, "Failed to create SPAPR window"); + ret = vfio_spapr_create_window(container, section, &pgsize, errp); + if (!ret) { return false; } @@ -504,6 +525,8 @@ static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer, goto listener_unregister_exit; } + scontainer->levels = info.ddw.levels; + if (v2) { bcontainer->pgsizes = info.ddw.pgsizes; /* @@ -536,7 +559,7 @@ static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer, return false; } -static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data) +static void vfio_iommu_spapr_class_init(ObjectClass *klass, const void *data) { VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 98bd4dccead..fc6ed230d0c 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -1,8 +1,10 @@ # See docs/devel/tracing.rst for syntax documentation. +# +# SPDX-License-Identifier: GPL-2.0-or-later # pci.c vfio_intx_interrupt(const char *name, char line) " (%s) Pin %c" -vfio_intx_eoi(const char *name) " (%s) EOI" +vfio_pci_intx_eoi(const char *name) " (%s) EOI" vfio_intx_enable_kvm(const char *name) " (%s) KVM INTx accel enabled" vfio_intx_disable_kvm(const char *name) " (%s) KVM INTx accel disabled" vfio_intx_update(const char *name, int new_irq, int target_irq) " (%s) IRQ moved %d -> %d" @@ -27,7 +29,7 @@ vfio_vga_read(uint64_t addr, int size, uint64_t data) " (0x%"PRIx64", %d) = 0x%" vfio_pci_read_config(const char *name, int addr, int len, int val) " (%s, @0x%x, len=0x%x) 0x%x" vfio_pci_write_config(const char *name, int addr, int val, int len) " (%s, @0x%x, 0x%x, len=0x%x)" vfio_msi_setup(const char *name, int pos) "%s PCI MSI CAP @0x%x" -vfio_msix_early_setup(const char *name, int pos, int table_bar, int offset, int entries, bool noresize) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d, noresize %d" +vfio_msix_early_setup(const char *name, int pos, int table_bar, uint64_t offset, int entries, bool noresize) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%"PRIx64", entries %d, noresize %d" vfio_check_pcie_flr(const char *name) "%s Supports FLR via PCIe cap" vfio_check_pm_reset(const char *name) "%s Supports PM reset" vfio_check_af_flr(const char *name) "%s Supports FLR via AF cap" @@ -35,10 +37,8 @@ vfio_pci_hot_reset(const char *name, const char *type) " (%s) %s" vfio_pci_hot_reset_has_dep_devices(const char *name) "%s: hot reset dependent devices:" vfio_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int group_id) "\t%04x:%02x:%02x.%x group %d" vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %s" -vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device '%s' config: size: 0x%lx, offset: 0x%lx, flags: 0x%lx" -vfio_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s" -vfio_attach_device(const char *name, int group_id) " (%s) group %d" -vfio_detach_device(const char *name, int group_id) " (%s) group %d" +vfio_pci_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device '%s' config: size: 0x%lx, offset: 0x%lx, flags: 0x%lx" +vfio_pci_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s" vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d" vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x" vfio_pci_reset(const char *name) " (%s)" @@ -48,6 +48,7 @@ vfio_pci_emulated_vendor_id(const char *name, uint16_t val) "%s 0x%04x" vfio_pci_emulated_device_id(const char *name, uint16_t val) "%s 0x%04x" vfio_pci_emulated_sub_vendor_id(const char *name, uint16_t val) "%s 0x%04x" vfio_pci_emulated_sub_device_id(const char *name, uint16_t val) "%s 0x%04x" +vfio_pci_emulated_class_code(const char *name, uint32_t val) "%s 0x%06x" # pci-quirks.c vfio_quirk_rom_in_denylist(const char *name, uint16_t vid, uint16_t did) "%s %04x:%04x" @@ -89,9 +90,7 @@ vfio_pci_igd_bdsm_enabled(const char *name, int size) "%s %dMB" vfio_pci_igd_host_bridge_enabled(const char *name) "%s" vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s" -# common.c -vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" -vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64 +# listener.c vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64 vfio_listener_region_skip(const char *name, uint64_t start, uint64_t end) "SKIPPING %s 0x%"PRIx64" - 0x%"PRIx64 vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" @@ -103,10 +102,21 @@ vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t si vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64 vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, uint64_t max) "section 0x%"PRIx64" - 0x%"PRIx64" -> update [0x%"PRIx64" - 0x%"PRIx64"]" vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64, uint64_t minpci, uint64_t maxpci) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"], pci64:[0x%"PRIx64" - 0x%"PRIx64"]" -vfio_disconnect_container(int fd) "close container->fd=%d" -vfio_put_group(int fd) "close group->fd=%d" -vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" -vfio_put_base_device(int fd) "close vdev->fd=%d" +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 + +# container-base.c +vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 + +# container.c +vfio_container_disconnect(int fd) "close container->fd=%d" +vfio_group_put(int fd) "close group->fd=%d" +vfio_device_get(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" +vfio_device_put(int fd) "close vdev->fd=%d" +vfio_legacy_dma_unmap_overflow_workaround(void) "" + +# region.c +vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" +vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64 vfio_region_setup(const char *dev, int index, const char *name, unsigned long flags, unsigned long offset, unsigned long size) "Device %s, region %d \"%s\", flags: 0x%lx, offset: 0x%lx, size: 0x%lx" vfio_region_mmap_fault(const char *name, int index, unsigned long offset, unsigned long size, int fault) "Region %s mmaps[%d], [0x%lx - 0x%lx], fault: %d" vfio_region_mmap(const char *name, unsigned long offset, unsigned long end) "Region %s [0x%lx - 0x%lx]" @@ -116,10 +126,6 @@ vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps e vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]" vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" -vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" -vfio_legacy_dma_unmap_overflow_workaround(void) "" -vfio_get_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 -vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 # platform.c vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s" @@ -148,22 +154,36 @@ vfio_display_edid_update(uint32_t prefx, uint32_t prefy) "%ux%u" vfio_display_edid_write_error(void) "" # migration.c +vfio_load_bufs_thread_start(const char *name) " (%s)" +vfio_load_bufs_thread_end(const char *name) " (%s)" vfio_load_cleanup(const char *name) " (%s)" -vfio_load_device_config_state(const char *name) " (%s)" +vfio_load_device_config_state_start(const char *name) " (%s)" +vfio_load_device_config_state_end(const char *name) " (%s)" vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 -vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size 0x%"PRIx64" ret %d" +vfio_load_state_device_data(const char *name, uint64_t data_size, int ret) " (%s) size %"PRIu64" ret %d" +vfio_load_state_device_buffer_incoming(const char *name, uint32_t idx) " (%s) idx %"PRIu32 +vfio_load_state_device_buffer_start(const char *name) " (%s)" +vfio_load_state_device_buffer_starved(const char *name, uint32_t idx) " (%s) idx %"PRIu32 +vfio_load_state_device_buffer_load_start(const char *name, uint32_t idx) " (%s) idx %"PRIu32 +vfio_load_state_device_buffer_load_end(const char *name, uint32_t idx) " (%s) idx %"PRIu32 +vfio_load_state_device_buffer_end(const char *name) " (%s)" vfio_migration_realize(const char *name) " (%s)" vfio_migration_set_device_state(const char *name, const char *state) " (%s) state %s" vfio_migration_set_state(const char *name, const char *new_state, const char *recover_state) " (%s) new state %s, recover state %s" vfio_migration_state_notifier(const char *name, int state) " (%s) state %d" vfio_save_block(const char *name, int data_size) " (%s) data_size %d" +vfio_save_block_precopy_empty_hit(const char *name) " (%s)" vfio_save_cleanup(const char *name) " (%s)" vfio_save_complete_precopy(const char *name, int ret) " (%s) ret %d" +vfio_save_complete_precopy_start(const char *name) " (%s)" +vfio_save_complete_precopy_thread_start(const char *name, const char *idstr, uint32_t instance_id) " (%s) idstr %s instance %"PRIu32 +vfio_save_complete_precopy_thread_end(const char *name, int ret) " (%s) ret %d" vfio_save_device_config_state(const char *name) " (%s)" -vfio_save_iterate(const char *name, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64 -vfio_save_setup(const char *name, uint64_t data_buffer_size) " (%s) data buffer size 0x%"PRIx64 -vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64 -vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" stopcopy size 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64 +vfio_save_iterate(const char *name, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy initial size %"PRIu64" precopy dirty size %"PRIu64 +vfio_save_iterate_start(const char *name) " (%s)" +vfio_save_setup(const char *name, uint64_t data_buffer_size) " (%s) data buffer size %"PRIu64 +vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64 +vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" stopcopy size %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64 vfio_vmstate_change(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" vfio_vmstate_change_prepare(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" @@ -177,3 +197,12 @@ iommufd_cdev_fail_attach_existing_container(const char *msg) " %s" iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d" iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d" iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d" + +# cpr-iommufd.c +vfio_cpr_find_device(uint32_t ioas_id, int devid, uint32_t hwpt_id) "ioas_id %u, devid %d, hwpt_id %u" + +# device.c +vfio_device_get_region_info_type(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" +vfio_device_reset_handler(void) "" +vfio_device_attach(const char *name, int group_id) " (%s) group %d" +vfio_device_detach(const char *name, int group_id) " (%s) group %d" diff --git a/hw/vfio/trace.h b/hw/vfio/trace.h index 5a343aa59cc..b34b61ddb28 100644 --- a/hw/vfio/trace.h +++ b/hw/vfio/trace.h @@ -1 +1,4 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + */ #include "trace/trace-hw_vfio.h" diff --git a/hw/vfio/types.h b/hw/vfio/types.h new file mode 100644 index 00000000000..c19334ff25a --- /dev/null +++ b/hw/vfio/types.h @@ -0,0 +1,23 @@ +/* + * VFIO types definition + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_VFIO_VFIO_TYPES_H +#define HW_VFIO_VFIO_TYPES_H + +/* + * TYPE_VFIO_PCI_BASE is an abstract type used to share code + * between VFIO implementations that use a kernel driver + * with those that use user sockets. + */ +#define TYPE_VFIO_PCI_BASE "vfio-pci-base" + +#define TYPE_VFIO_PCI "vfio-pci" +/* TYPE_VFIO_PCI shares struct VFIOPCIDevice. */ + +#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" + +#endif /* HW_VFIO_VFIO_TYPES_H */ diff --git a/hw/vfio/vfio-display.h b/hw/vfio/vfio-display.h new file mode 100644 index 00000000000..2606c34b396 --- /dev/null +++ b/hw/vfio/vfio-display.h @@ -0,0 +1,42 @@ +/* + * VFIO display + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_DISPLAY_H +#define HW_VFIO_VFIO_DISPLAY_H + +#include "ui/console.h" +#include "hw/display/ramfb.h" +#include "hw/vfio/vfio-region.h" + +typedef struct VFIODMABuf { + QemuDmaBuf *buf; + uint32_t pos_x, pos_y, pos_updates; + uint32_t hot_x, hot_y, hot_updates; + int dmabuf_id; + QTAILQ_ENTRY(VFIODMABuf) next; +} VFIODMABuf; + +typedef struct VFIODisplay { + QemuConsole *con; + RAMFBState *ramfb; + struct vfio_region_info *edid_info; + struct vfio_region_gfx_edid *edid_regs; + uint8_t *edid_blob; + QEMUTimer *edid_link_timer; + struct { + VFIORegion buffer; + DisplaySurface *surface; + } region; + struct { + QTAILQ_HEAD(, VFIODMABuf) bufs; + VFIODMABuf *primary; + VFIODMABuf *cursor; + } dmabuf; +} VFIODisplay; + +#endif /* HW_VFIO_VFIO_DISPLAY_H */ diff --git a/hw/vfio/vfio-helpers.h b/hw/vfio/vfio-helpers.h new file mode 100644 index 00000000000..ce317580800 --- /dev/null +++ b/hw/vfio/vfio-helpers.h @@ -0,0 +1,37 @@ +/* + * VFIO helpers + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_HELPERS_H +#define HW_VFIO_VFIO_HELPERS_H + +#ifdef CONFIG_LINUX +#include + +extern int vfio_kvm_device_fd; + +struct vfio_info_cap_header * +vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id); +struct vfio_info_cap_header * +vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id); +struct vfio_info_cap_header * +vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id); +struct vfio_info_cap_header * +vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id); +bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, + unsigned int *avail); +#endif + +int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size); +struct vfio_device_info *vfio_get_device_info(int fd); + +int vfio_kvm_device_add_fd(int fd, Error **errp); +int vfio_kvm_device_del_fd(int fd, Error **errp); + +bool vfio_arch_wants_loading_config_after_iter(void); + +#endif /* HW_VFIO_VFIO_HELPERS_H */ diff --git a/hw/vfio/vfio-iommufd.h b/hw/vfio/vfio-iommufd.h new file mode 100644 index 00000000000..07ea0f43049 --- /dev/null +++ b/hw/vfio/vfio-iommufd.h @@ -0,0 +1,34 @@ +/* + * VFIO iommufd + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_IOMMUFD_H +#define HW_VFIO_VFIO_IOMMUFD_H + +#include "hw/vfio/vfio-container-base.h" + +typedef struct VFIODevice VFIODevice; + +typedef struct VFIOIOASHwpt { + uint32_t hwpt_id; + uint32_t hwpt_flags; + QLIST_HEAD(, VFIODevice) device_list; + QLIST_ENTRY(VFIOIOASHwpt) next; +} VFIOIOASHwpt; + +typedef struct IOMMUFDBackend IOMMUFDBackend; + +typedef struct VFIOIOMMUFDContainer { + VFIOContainerBase bcontainer; + IOMMUFDBackend *be; + uint32_t ioas_id; + QLIST_HEAD(, VFIOIOASHwpt) hwpt_list; +} VFIOIOMMUFDContainer; + +OBJECT_DECLARE_SIMPLE_TYPE(VFIOIOMMUFDContainer, VFIO_IOMMU_IOMMUFD); + +#endif /* HW_VFIO_VFIO_IOMMUFD_H */ diff --git a/hw/vfio/vfio-listener.h b/hw/vfio/vfio-listener.h new file mode 100644 index 00000000000..eb69ddd374f --- /dev/null +++ b/hw/vfio/vfio-listener.h @@ -0,0 +1,15 @@ +/* + * VFIO MemoryListener services + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_LISTENER_H +#define HW_VFIO_VFIO_LISTENER_H + +bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp); +void vfio_listener_unregister(VFIOContainerBase *bcontainer); + +#endif /* HW_VFIO_VFIO_LISTENER_H */ diff --git a/hw/vfio/vfio-migration-internal.h b/hw/vfio/vfio-migration-internal.h new file mode 100644 index 00000000000..814fbd9ebae --- /dev/null +++ b/hw/vfio/vfio-migration-internal.h @@ -0,0 +1,74 @@ +/* + * VFIO migration + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_MIGRATION_INTERNAL_H +#define HW_VFIO_VFIO_MIGRATION_INTERNAL_H + +#ifdef CONFIG_LINUX +#include +#endif + +#include "qemu/notify.h" + +/* + * Flags to be used as unique delimiters for VFIO devices in the migration + * stream. These flags are composed as: + * 0xffffffff => MSB 32-bit all 1s + * 0xef10 => Magic ID, represents emulated (virtual) function IO + * 0x0000 => 16-bits reserved for flags + * + * The beginning of state information is marked by _DEV_CONFIG_STATE, + * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a + * certain state information is marked by _END_OF_STATE. + */ +#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL) +#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL) +#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL) +#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL) +#define VFIO_MIG_FLAG_DEV_INIT_DATA_SENT (0xffffffffef100005ULL) +#define VFIO_MIG_FLAG_DEV_CONFIG_LOAD_READY (0xffffffffef100006ULL) + +typedef struct VFIODevice VFIODevice; +typedef struct VFIOMultifd VFIOMultifd; + +typedef struct VFIOMigration { + struct VFIODevice *vbasedev; + VMChangeStateEntry *vm_state; + NotifierWithReturn migration_state; + uint32_t device_state; + int data_fd; + void *data_buffer; + size_t data_buffer_size; + uint64_t mig_flags; + uint64_t precopy_init_size; + uint64_t precopy_dirty_size; + bool multifd_transfer; + VFIOMultifd *multifd; + bool initial_data_sent; + + bool event_save_iterate_started; + bool event_precopy_empty_hit; +} VFIOMigration; + +bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp); +void vfio_migration_exit(VFIODevice *vbasedev); +bool vfio_device_state_is_running(VFIODevice *vbasedev); +bool vfio_device_state_is_precopy(VFIODevice *vbasedev); +int vfio_save_device_config_state(QEMUFile *f, void *opaque, Error **errp); +int vfio_load_device_config_state(QEMUFile *f, void *opaque); + +#ifdef CONFIG_LINUX +int vfio_migration_set_state(VFIODevice *vbasedev, + enum vfio_device_mig_state new_state, + enum vfio_device_mig_state recover_state, + Error **errp); +#endif + +void vfio_migration_add_bytes_transferred(unsigned long val); + +#endif /* HW_VFIO_VFIO_MIGRATION_INTERNAL_H */ diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index aa63ff7fd41..7648a2d68da 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -6,6 +6,10 @@ config VIRTIO_RNG default y depends on VIRTIO +config VIRTIO_NSM + bool + depends on LIBCBOR && VIRTIO + config VIRTIO_IOMMU bool default y @@ -16,6 +20,7 @@ config VIRTIO_PCI default y if PCI_DEVICES depends on PCI select VIRTIO + select VIRTIO_MD_SUPPORTED config VIRTIO_MMIO bool @@ -24,6 +29,7 @@ config VIRTIO_MMIO config VIRTIO_CCW bool select VIRTIO + select VIRTIO_MD_SUPPORTED config VIRTIO_BALLOON bool @@ -35,10 +41,17 @@ config VIRTIO_CRYPTO default y depends on VIRTIO +# not all virtio transports support memory devices; if none does, +# no need to include the code +config VIRTIO_MD_SUPPORTED + bool + config VIRTIO_MD bool + depends on VIRTIO_MD_SUPPORTED select MEM_DEVICE +# selected by the board if it has the required support code config VIRTIO_PMEM_SUPPORTED bool @@ -46,9 +59,11 @@ config VIRTIO_PMEM bool default y depends on VIRTIO + depends on VIRTIO_MD_SUPPORTED depends on VIRTIO_PMEM_SUPPORTED select VIRTIO_MD +# selected by the board if it has the required support code config VIRTIO_MEM_SUPPORTED bool @@ -57,6 +72,7 @@ config VIRTIO_MEM default y depends on VIRTIO depends on LINUX + depends on VIRTIO_MD_SUPPORTED depends on VIRTIO_MEM_SUPPORTED select VIRTIO_MD @@ -109,4 +125,4 @@ config VHOST_USER_SND config VHOST_USER_SCMI bool default y - depends on VIRTIO && VHOST_USER + depends on VIRTIO && VHOST_USER && ARM diff --git a/hw/virtio/cbor-helpers.c b/hw/virtio/cbor-helpers.c new file mode 100644 index 00000000000..49f55df3994 --- /dev/null +++ b/hw/virtio/cbor-helpers.c @@ -0,0 +1,321 @@ +/* + * QEMU CBOR helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "hw/virtio/cbor-helpers.h" + +bool qemu_cbor_map_add(cbor_item_t *map, cbor_item_t *key, cbor_item_t *value) +{ + bool success = false; + struct cbor_pair pair = (struct cbor_pair) { + .key = cbor_move(key), + .value = cbor_move(value) + }; + + success = cbor_map_add(map, pair); + if (!success) { + cbor_incref(pair.key); + cbor_incref(pair.value); + } + + return success; +} + +bool qemu_cbor_array_push(cbor_item_t *array, cbor_item_t *value) +{ + bool success = false; + + success = cbor_array_push(array, cbor_move(value)); + if (!success) { + cbor_incref(value); + } + + return success; +} + +bool qemu_cbor_add_bool_to_map(cbor_item_t *map, const char *key, bool value) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_bool(value); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_uint8_to_map(cbor_item_t *map, const char *key, + uint8_t value) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_uint8(value); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_map_to_map(cbor_item_t *map, const char *key, + size_t nested_map_size, + cbor_item_t **nested_map) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_new_definite_map(nested_map_size); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + *nested_map = value_cbor; + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_bytestring_to_map(cbor_item_t *map, const char *key, + uint8_t *arr, size_t len) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_bytestring(arr, len); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_null_to_map(cbor_item_t *map, const char *key) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_new_null(); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_string_to_map(cbor_item_t *map, const char *key, + const char *value) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_string(value); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_uint8_array_to_map(cbor_item_t *map, const char *key, + uint8_t *arr, size_t len) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_new_definite_array(len); + if (!value_cbor) { + goto cleanup; + } + + for (int i = 0; i < len; ++i) { + cbor_item_t *tmp = cbor_build_uint8(arr[i]); + if (!tmp) { + goto cleanup; + } + if (!qemu_cbor_array_push(value_cbor, tmp)) { + cbor_decref(&tmp); + goto cleanup; + } + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_uint8_key_bytestring_to_map(cbor_item_t *map, uint8_t key, + uint8_t *buf, size_t len) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_uint8(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_bytestring(buf, len); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +bool qemu_cbor_add_uint64_to_map(cbor_item_t *map, const char *key, + uint64_t value) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + + key_cbor = cbor_build_string(key); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_build_uint64(value); + if (!value_cbor) { + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} diff --git a/hw/virtio/iothread-vq-mapping.c b/hw/virtio/iothread-vq-mapping.c new file mode 100644 index 00000000000..15909eb9332 --- /dev/null +++ b/hw/virtio/iothread-vq-mapping.c @@ -0,0 +1,131 @@ +/* + * IOThread Virtqueue Mapping + * + * Copyright Red Hat, Inc + * + * SPDX-License-Identifier: GPL-2.0-only + */ + +#include "qemu/osdep.h" +#include "system/iothread.h" +#include "hw/virtio/iothread-vq-mapping.h" + +static bool +iothread_vq_mapping_validate(IOThreadVirtQueueMappingList *list, uint16_t + num_queues, Error **errp) +{ + g_autofree unsigned long *vqs = bitmap_new(num_queues); + g_autoptr(GHashTable) iothreads = + g_hash_table_new(g_str_hash, g_str_equal); + + for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) { + const char *name = node->value->iothread; + uint16List *vq; + + if (!iothread_by_id(name)) { + error_setg(errp, "IOThread \"%s\" object does not exist", name); + return false; + } + + if (!g_hash_table_add(iothreads, (gpointer)name)) { + error_setg(errp, + "duplicate IOThread name \"%s\" in iothread-vq-mapping", + name); + return false; + } + + if (node != list) { + if (!!node->value->vqs != !!list->value->vqs) { + error_setg(errp, "either all items in iothread-vq-mapping " + "must have vqs or none of them must have it"); + return false; + } + } + + for (vq = node->value->vqs; vq; vq = vq->next) { + if (vq->value >= num_queues) { + error_setg(errp, "vq index %u for IOThread \"%s\" must be " + "less than num_queues %u in iothread-vq-mapping", + vq->value, name, num_queues); + return false; + } + + if (test_and_set_bit(vq->value, vqs)) { + error_setg(errp, "cannot assign vq %u to IOThread \"%s\" " + "because it is already assigned", vq->value, name); + return false; + } + } + } + + if (list->value->vqs) { + for (uint16_t i = 0; i < num_queues; i++) { + if (!test_bit(i, vqs)) { + error_setg(errp, + "missing vq %u IOThread assignment in iothread-vq-mapping", + i); + return false; + } + } + } + + return true; +} + +bool iothread_vq_mapping_apply( + IOThreadVirtQueueMappingList *list, + AioContext **vq_aio_context, + uint16_t num_queues, + Error **errp) +{ + IOThreadVirtQueueMappingList *node; + size_t num_iothreads = 0; + size_t cur_iothread = 0; + + if (!iothread_vq_mapping_validate(list, num_queues, errp)) { + return false; + } + + for (node = list; node; node = node->next) { + num_iothreads++; + } + + for (node = list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + AioContext *ctx = iothread_get_aio_context(iothread); + + /* Released in virtio_blk_vq_aio_context_cleanup() */ + object_ref(OBJECT(iothread)); + + if (node->value->vqs) { + uint16List *vq; + + /* Explicit vq:IOThread assignment */ + for (vq = node->value->vqs; vq; vq = vq->next) { + assert(vq->value < num_queues); + vq_aio_context[vq->value] = ctx; + } + } else { + /* Round-robin vq:IOThread assignment */ + for (unsigned i = cur_iothread; i < num_queues; + i += num_iothreads) { + vq_aio_context[i] = ctx; + } + } + + cur_iothread++; + } + + return true; +} + +void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list) +{ + IOThreadVirtQueueMappingList *node; + + for (node = list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + object_unref(OBJECT(iothread)); + } +} + diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index 621fc65454c..3ea7b3cec83 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -1,5 +1,7 @@ system_virtio_ss = ss.source_set() system_virtio_ss.add(files('virtio-bus.c')) +system_virtio_ss.add(files('iothread-vq-mapping.c')) +system_virtio_ss.add(files('virtio-config-io.c')) system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c')) system_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c')) system_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c')) @@ -9,11 +11,11 @@ system_virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c') specific_virtio_ss = ss.source_set() specific_virtio_ss.add(files('virtio.c')) -specific_virtio_ss.add(files('virtio-config-io.c', 'virtio-qmp.c')) +specific_virtio_ss.add(files('virtio-qmp.c')) if have_vhost system_virtio_ss.add(files('vhost.c')) - specific_virtio_ss.add(files('vhost-backend.c', 'vhost-iova-tree.c')) + system_virtio_ss.add(files('vhost-backend.c', 'vhost-iova-tree.c')) if have_vhost_user # fixme - this really should be generic specific_virtio_ss.add(files('vhost-user.c')) @@ -42,21 +44,22 @@ if have_vhost endif if have_vhost_vdpa system_virtio_ss.add(files('vhost-vdpa.c')) - specific_virtio_ss.add(files('vhost-shadow-virtqueue.c')) + system_virtio_ss.add(files('vhost-shadow-virtqueue.c')) endif else system_virtio_ss.add(files('vhost-stub.c')) endif +system_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c')) +system_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c')) specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c')) specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c')) -specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c')) -specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) -specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c')) -specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c')) +system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: files('virtio-nsm.c')) +system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('cbor-helpers.c'), libcbor]) +system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) @@ -65,11 +68,13 @@ virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk- virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('virtio-nsm-pci.c', 'cbor-helpers.c'), libcbor]) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-scsi-pci.c')) @@ -82,12 +87,13 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MD', if_true: files('virtio-md-pci.c')) -specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) +system_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: system_virtio_ss) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) -system_ss.add(when: 'CONFIG_VIRTIO_MD', if_false: files('virtio-md-stubs.c')) +system_ss.add(when: ['CONFIG_VIRTIO_MD', 'CONFIG_VIRTIO_PCI'], + if_false: files('virtio-md-stubs.c')) system_ss.add(files('virtio-hmp-cmds.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 04e36ae047d..76f0d458b29 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -108,7 +108,7 @@ virtio_pci_notify_write(uint64_t addr, uint64_t val, unsigned int size) "0x%" PR virtio_pci_notify_write_pio(uint64_t addr, uint64_t val, unsigned int size) "0x%" PRIx64" = 0x%" PRIx64 " (%d)" # hw/virtio/virtio-iommu.c -virtio_iommu_device_reset(void) "reset!" +virtio_iommu_device_reset_exit(void) "reset!" virtio_iommu_system_reset(void) "system reset!" virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64 virtio_iommu_device_status(uint8_t status) "driver status = %d" diff --git a/hw/virtio/vdpa-dev-pci.c b/hw/virtio/vdpa-dev-pci.c index 5446e6b393e..30681121465 100644 --- a/hw/virtio/vdpa-dev-pci.c +++ b/hw/virtio/vdpa-dev-pci.c @@ -48,10 +48,6 @@ static void vhost_vdpa_device_pci_instance_init(Object *obj) "bootindex"); } -static Property vhost_vdpa_device_pci_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - static int vhost_vdpa_device_pci_post_init(VhostVdpaDevice *v, Error **errp) { VhostVdpaDevicePCI *dev = container_of(v, VhostVdpaDevicePCI, vdev); @@ -74,13 +70,13 @@ vhost_vdpa_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(DEVICE(&dev->vdev), BUS(&vpci_dev->bus), errp); } -static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, void *data) +static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); set_bit(DEVICE_CATEGORY_MISC, dc->categories); - device_class_set_props(dc, vhost_vdpa_device_pci_properties); k->realize = vhost_vdpa_device_pci_realize; } diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c index 64b96b226c3..d1da40afc80 100644 --- a/hw/virtio/vdpa-dev.c +++ b/hw/virtio/vdpa-dev.c @@ -26,8 +26,8 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/vdpa-dev.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/runstate.h" static void vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -312,7 +312,7 @@ static void vhost_vdpa_device_stop(VirtIODevice *vdev) vhost_dev_disable_notifiers(&s->dev, vdev); } -static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) +static int vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) { VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); bool should_start = virtio_device_started(vdev, status); @@ -324,7 +324,7 @@ static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) } if (s->started == should_start) { - return; + return 0; } if (should_start) { @@ -335,12 +335,12 @@ static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) } else { vhost_vdpa_device_stop(vdev); } + return 0; } -static Property vhost_vdpa_device_properties[] = { +static const Property vhost_vdpa_device_properties[] = { DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev), DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0), - DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_vhost_vdpa_device = { @@ -354,7 +354,7 @@ static const VMStateDescription vmstate_vhost_vdpa_device = { }, }; -static void vhost_vdpa_device_class_init(ObjectClass *klass, void *data) +static void vhost_vdpa_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c index 3d03395a77b..fa4147b7732 100644 --- a/hw/virtio/vhost-iova-tree.c +++ b/hw/virtio/vhost-iova-tree.c @@ -28,12 +28,18 @@ struct VhostIOVATree { /* IOVA address to qemu memory maps. */ IOVATree *iova_taddr_map; + + /* Allocated IOVA addresses */ + IOVATree *iova_map; + + /* GPA->IOVA address memory maps */ + IOVATree *gpa_iova_map; }; /** - * Create a new IOVA tree + * Create a new VhostIOVATree * - * Returns the new IOVA tree + * Returns the new VhostIOVATree. */ VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last) { @@ -44,25 +50,29 @@ VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last) tree->iova_last = iova_last; tree->iova_taddr_map = iova_tree_new(); + tree->iova_map = iova_tree_new(); + tree->gpa_iova_map = gpa_tree_new(); return tree; } /** - * Delete an iova tree + * Delete a VhostIOVATree */ void vhost_iova_tree_delete(VhostIOVATree *iova_tree) { iova_tree_destroy(iova_tree->iova_taddr_map); + iova_tree_destroy(iova_tree->iova_map); + iova_tree_destroy(iova_tree->gpa_iova_map); g_free(iova_tree); } /** * Find the IOVA address stored from a memory address * - * @tree: The iova tree + * @tree: The VhostIOVATree * @map: The map with the memory address * - * Return the stored mapping, or NULL if not found. + * Returns the stored IOVA->HVA mapping, or NULL if not found. */ const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree, const DMAMap *map) @@ -71,40 +81,111 @@ const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree, } /** - * Allocate a new mapping + * Allocate a new IOVA range and add the mapping to the IOVA->HVA tree * - * @tree: The iova tree - * @map: The iova map + * @tree: The VhostIOVATree + * @map: The IOVA mapping + * @taddr: The translated address (HVA) * * Returns: * - IOVA_OK if the map fits in the container * - IOVA_ERR_INVALID if the map does not make sense (like size overflow) * - IOVA_ERR_NOMEM if tree cannot allocate more space. * - * It returns assignated iova in map->iova if return value is VHOST_DMA_MAP_OK. + * It returns an assigned IOVA in map->iova if the return value is IOVA_OK. */ -int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map) +int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map, hwaddr taddr) { + int ret; + /* Some vhost devices do not like addr 0. Skip first page */ hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size(); - if (map->translated_addr + map->size < map->translated_addr || - map->perm == IOMMU_NONE) { + if (taddr + map->size < taddr || map->perm == IOMMU_NONE) { return IOVA_ERR_INVALID; } - /* Allocate a node in IOVA address */ - return iova_tree_alloc_map(tree->iova_taddr_map, map, iova_first, - tree->iova_last); + /* Allocate a node in the IOVA-only tree */ + ret = iova_tree_alloc_map(tree->iova_map, map, iova_first, tree->iova_last); + if (unlikely(ret != IOVA_OK)) { + return ret; + } + + /* Insert a node in the IOVA->HVA tree */ + map->translated_addr = taddr; + return iova_tree_insert(tree->iova_taddr_map, map); } /** - * Remove existing mappings from iova tree + * Remove existing mappings from the IOVA-only and IOVA->HVA trees * - * @iova_tree: The vhost iova tree + * @iova_tree: The VhostIOVATree * @map: The map to remove */ void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map) { iova_tree_remove(iova_tree->iova_taddr_map, map); + iova_tree_remove(iova_tree->iova_map, map); +} + +/** + * Find the IOVA address stored from a guest memory address (GPA) + * + * @tree: The VhostIOVATree + * @map: The map with the guest memory address + * + * Returns the stored GPA->IOVA mapping, or NULL if not found. + */ +const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *tree, + const DMAMap *map) +{ + return iova_tree_find_iova(tree->gpa_iova_map, map); +} + +/** + * Allocate a new IOVA range and add the mapping to the GPA->IOVA tree + * + * @tree: The VhostIOVATree + * @map: The IOVA mapping + * @taddr: The translated address (GPA) + * + * Returns: + * - IOVA_OK if the map fits both containers + * - IOVA_ERR_INVALID if the map does not make sense (like size overflow) + * - IOVA_ERR_NOMEM if the IOVA-only tree cannot allocate more space + * + * It returns an assigned IOVA in map->iova if the return value is IOVA_OK. + */ +int vhost_iova_tree_map_alloc_gpa(VhostIOVATree *tree, DMAMap *map, hwaddr taddr) +{ + int ret; + + /* Some vhost devices don't like addr 0. Skip first page */ + hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size(); + + if (taddr + map->size < taddr || map->perm == IOMMU_NONE) { + return IOVA_ERR_INVALID; + } + + /* Allocate a node in the IOVA-only tree */ + ret = iova_tree_alloc_map(tree->iova_map, map, iova_first, tree->iova_last); + if (unlikely(ret != IOVA_OK)) { + return ret; + } + + /* Insert a node in the GPA->IOVA tree */ + map->translated_addr = taddr; + return gpa_tree_insert(tree->gpa_iova_map, map); +} + +/** + * Remove existing mappings from the IOVA-only and GPA->IOVA trees + * + * @tree: The VhostIOVATree + * @map: The map to remove + */ +void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map) +{ + iova_tree_remove(iova_tree->gpa_iova_map, map); + iova_tree_remove(iova_tree->iova_map, map); } diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h index 4adfd79ff03..08f63b61cd1 100644 --- a/hw/virtio/vhost-iova-tree.h +++ b/hw/virtio/vhost-iova-tree.h @@ -11,7 +11,7 @@ #define HW_VIRTIO_VHOST_IOVA_TREE_H #include "qemu/iova-tree.h" -#include "exec/memory.h" +#include "system/memory.h" typedef struct VhostIOVATree VhostIOVATree; @@ -21,7 +21,13 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete); const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree, const DMAMap *map); -int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map); +int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map, + hwaddr taddr); void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map); +const DMAMap *vhost_iova_tree_find_gpa(const VhostIOVATree *iova_tree, + const DMAMap *map); +int vhost_iova_tree_map_alloc_gpa(VhostIOVATree *iova_tree, DMAMap *map, + hwaddr taddr); +void vhost_iova_tree_remove_gpa(VhostIOVATree *iova_tree, DMAMap map); #endif diff --git a/hw/virtio/vhost-scsi-pci.c b/hw/virtio/vhost-scsi-pci.c index 08980bc23bd..7399acef8e2 100644 --- a/hw/virtio/vhost-scsi-pci.c +++ b/hw/virtio/vhost-scsi-pci.c @@ -38,10 +38,9 @@ struct VHostSCSIPCI { VHostSCSI vdev; }; -static Property vhost_scsi_pci_properties[] = { +static const Property vhost_scsi_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -62,7 +61,7 @@ static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data) +static void vhost_scsi_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index fc5f408f778..2481d49345d 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -78,24 +78,39 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) * @vaddr: Translated IOVA addresses * @iovec: Source qemu's VA addresses * @num: Length of iovec and minimum length of vaddr + * @gpas: Descriptors' GPAs, if backed by guest memory */ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, hwaddr *addrs, const struct iovec *iovec, - size_t num) + size_t num, const hwaddr *gpas) { if (num == 0) { return true; } for (size_t i = 0; i < num; ++i) { - DMAMap needle = { - .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, - .size = iovec[i].iov_len, - }; Int128 needle_last, map_last; size_t off; + const DMAMap *map; + DMAMap needle; + + /* Check if the descriptor is backed by guest memory */ + if (gpas) { + /* Search the GPA->IOVA tree */ + needle = (DMAMap) { + .translated_addr = gpas[i], + .size = iovec[i].iov_len, + }; + map = vhost_iova_tree_find_gpa(svq->iova_tree, &needle); + } else { + /* Search the IOVA->HVA tree */ + needle = (DMAMap) { + .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, + .size = iovec[i].iov_len, + }; + map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); + } - const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); /* * Map cannot be NULL since iova map contains all guest space and * qemu already has a physical address mapped @@ -130,6 +145,7 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, * @sg: Cache for hwaddr * @iovec: The iovec from the guest * @num: iovec length + * @addr: Descriptors' GPAs, if backed by guest memory * @more_descs: True if more descriptors come in the chain * @write: True if they are writeable descriptors * @@ -137,7 +153,8 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, */ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, const struct iovec *iovec, size_t num, - bool more_descs, bool write) + const hwaddr *addr, bool more_descs, + bool write) { uint16_t i = svq->free_head, last = svq->free_head; unsigned n; @@ -149,7 +166,7 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, return true; } - ok = vhost_svq_translate_addr(svq, sg, iovec, num); + ok = vhost_svq_translate_addr(svq, sg, iovec, num, addr); if (unlikely(!ok)) { return false; } @@ -165,17 +182,18 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, descs[i].len = cpu_to_le32(iovec[n].iov_len); last = i; - i = cpu_to_le16(svq->desc_next[i]); + i = svq->desc_next[i]; } - svq->free_head = le16_to_cpu(svq->desc_next[last]); + svq->free_head = svq->desc_next[last]; return true; } static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, const struct iovec *out_sg, size_t out_num, + const hwaddr *out_addr, const struct iovec *in_sg, size_t in_num, - unsigned *head) + const hwaddr *in_addr, unsigned *head) { unsigned avail_idx; vring_avail_t *avail = svq->vring.avail; @@ -191,13 +209,14 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, - false); + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, out_addr, + in_num > 0, false); if (unlikely(!ok)) { return false; } - ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, in_addr, false, + true); if (unlikely(!ok)) { return false; } @@ -228,10 +247,12 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) smp_mb(); if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) { - uint16_t avail_event = *(uint16_t *)(&svq->vring.used->ring[svq->vring.num]); + uint16_t avail_event = le16_to_cpu( + *(uint16_t *)(&svq->vring.used->ring[svq->vring.num])); needs_kick = vring_need_event(avail_event, svq->shadow_avail_idx, svq->shadow_avail_idx - 1); } else { - needs_kick = !(svq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + needs_kick = + !(svq->vring.used->flags & cpu_to_le16(VRING_USED_F_NO_NOTIFY)); } if (!needs_kick) { @@ -247,8 +268,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq) * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full */ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, - size_t out_num, const struct iovec *in_sg, size_t in_num, - VirtQueueElement *elem) + size_t out_num, const hwaddr *out_addr, + const struct iovec *in_sg, size_t in_num, + const hwaddr *in_addr, VirtQueueElement *elem) { unsigned qemu_head; unsigned ndescs = in_num + out_num; @@ -258,7 +280,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, return -ENOSPC; } - ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); + ok = vhost_svq_add_split(svq, out_sg, out_num, out_addr, in_sg, in_num, + in_addr, &qemu_head); if (unlikely(!ok)) { return -EINVAL; } @@ -274,8 +297,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, static int vhost_svq_add_element(VhostShadowVirtqueue *svq, VirtQueueElement *elem) { - return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, - elem->in_num, elem); + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->out_addr, + elem->in_sg, elem->in_num, elem->in_addr, elem); } /** @@ -365,7 +388,7 @@ static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) return true; } - svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); + svq->shadow_used_idx = le16_to_cpu(*(volatile uint16_t *)used_idx); return svq->last_used_idx != svq->shadow_used_idx; } @@ -383,7 +406,7 @@ static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq) { if (virtio_vdev_has_feature(svq->vdev, VIRTIO_RING_F_EVENT_IDX)) { uint16_t *used_event = (uint16_t *)&svq->vring.avail->ring[svq->vring.num]; - *used_event = svq->shadow_used_idx; + *used_event = cpu_to_le16(svq->shadow_used_idx); } else { svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); } @@ -408,12 +431,13 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq, uint16_t num, uint16_t i) { for (uint16_t j = 0; j < (num - 1); ++j) { - i = le16_to_cpu(svq->desc_next[i]); + i = svq->desc_next[i]; } return i; } +G_GNUC_WARN_UNUSED_RESULT static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len) { @@ -526,10 +550,11 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) { size_t len = 0; - uint32_t r; while (num--) { + g_autofree VirtQueueElement *elem = NULL; int64_t start_us = g_get_monotonic_time(); + uint32_t r = 0; do { if (vhost_svq_more_used(svq)) { @@ -541,7 +566,7 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) } } while (true); - vhost_svq_get_buf(svq, &r); + elem = vhost_svq_get_buf(svq, &r); len += r; } @@ -681,7 +706,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, svq->desc_state = g_new0(SVQDescState, svq->vring.num); svq->desc_next = g_new0(uint16_t, svq->vring.num); for (unsigned i = 0; i < svq->vring.num - 1; i++) { - svq->desc_next[i] = cpu_to_le16(i + 1); + svq->desc_next[i] = i + 1; } } diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 19c842a15b7..9c273739d6d 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -118,8 +118,9 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq); void vhost_svq_push_elem(VhostShadowVirtqueue *svq, const VirtQueueElement *elem, uint32_t len); int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, - size_t out_num, const struct iovec *in_sg, size_t in_num, - VirtQueueElement *elem); + size_t out_num, const hwaddr *out_addr, + const struct iovec *in_sg, size_t in_num, + const hwaddr *in_addr, VirtQueueElement *elem); size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num); void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c index 2bc3423326e..ff67a020b47 100644 --- a/hw/virtio/vhost-user-base.c +++ b/hw/virtio/vhost-user-base.c @@ -66,7 +66,7 @@ static void vub_start(VirtIODevice *vdev) vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); } -static void vub_stop(VirtIODevice *vdev) +static int vub_stop(VirtIODevice *vdev) { VHostUserBase *vub = VHOST_USER_BASE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); @@ -74,34 +74,39 @@ static void vub_stop(VirtIODevice *vdev) int ret; if (!k->set_guest_notifiers) { - return; + return 0; } - vhost_dev_stop(&vub->vhost_dev, vdev, true); + ret = vhost_dev_stop(&vub->vhost_dev, vdev, true); - ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false); - if (ret < 0) { + if (k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false) < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); - return; + return -1; } vhost_dev_disable_notifiers(&vub->vhost_dev, vdev); + return ret; } -static void vub_set_status(VirtIODevice *vdev, uint8_t status) +static int vub_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserBase *vub = VHOST_USER_BASE(vdev); bool should_start = virtio_device_should_start(vdev, status); if (vhost_dev_is_started(&vub->vhost_dev) == should_start) { - return; + return 0; } if (should_start) { vub_start(vdev); } else { - vub_stop(vdev); + int ret; + ret = vub_stop(vdev); + if (ret < 0) { + return ret; + } } + return 0; } /* @@ -348,7 +353,7 @@ static void vub_device_unrealize(DeviceState *dev) do_vhost_user_cleanup(vdev, vub); } -static void vub_class_init(ObjectClass *klass, void *data) +static void vub_class_init(ObjectClass *klass, const void *data) { VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user-blk-pci.c b/hw/virtio/vhost-user-blk-pci.c index eef8641a989..904369f5a3d 100644 --- a/hw/virtio/vhost-user-blk-pci.c +++ b/hw/virtio/vhost-user-blk-pci.c @@ -43,11 +43,10 @@ struct VHostUserBlkPCI { VHostUserBlk vdev; }; -static Property vhost_user_blk_pci_properties[] = { +static const Property vhost_user_blk_pci_properties[] = { DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -66,7 +65,7 @@ static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_blk_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c index efaf55d3dd4..f10bac874e7 100644 --- a/hw/virtio/vhost-user-device-pci.c +++ b/hw/virtio/vhost-user-device-pci.c @@ -31,7 +31,8 @@ static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_device_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_device_pci_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-device.c b/hw/virtio/vhost-user-device.c index 67aa934710d..3939bdf7552 100644 --- a/hw/virtio/vhost-user-device.c +++ b/hw/virtio/vhost-user-device.c @@ -29,16 +29,15 @@ static const VMStateDescription vud_vmstate = { .unmigratable = 1, }; -static Property vud_properties[] = { +static const Property vud_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), DEFINE_PROP_UINT16("virtio-id", VHostUserBase, virtio_id, 0), DEFINE_PROP_UINT32("vq_size", VHostUserBase, vq_size, 64), DEFINE_PROP_UINT32("num_vqs", VHostUserBase, num_vqs, 1), DEFINE_PROP_UINT32("config_size", VHostUserBase, config_size, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void vud_class_init(ObjectClass *klass, void *data) +static void vud_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user-fs-pci.c b/hw/virtio/vhost-user-fs-pci.c index 6829b8b7435..1490c118bca 100644 --- a/hw/virtio/vhost-user-fs-pci.c +++ b/hw/virtio/vhost-user-fs-pci.c @@ -29,10 +29,9 @@ typedef struct VHostUserFSPCI VHostUserFSPCI; DECLARE_INSTANCE_CHECKER(VHostUserFSPCI, VHOST_USER_FS_PCI, TYPE_VHOST_USER_FS_PCI) -static Property vhost_user_fs_pci_properties[] = { +static const Property vhost_user_fs_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_fs_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -48,7 +47,7 @@ static void vhost_user_fs_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_fs_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_fs_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index 32ee7f496d6..e77c69eb12e 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -23,7 +23,7 @@ #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user-fs.h" #include "monitor/monitor.h" -#include "sysemu/sysemu.h" +#include "system/system.h" static const int user_feature_bits[] = { VIRTIO_F_VERSION_1, @@ -100,7 +100,7 @@ static void vuf_start(VirtIODevice *vdev) vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); } -static void vuf_stop(VirtIODevice *vdev) +static int vuf_stop(VirtIODevice *vdev) { VHostUserFS *fs = VHOST_USER_FS(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); @@ -108,34 +108,39 @@ static void vuf_stop(VirtIODevice *vdev) int ret; if (!k->set_guest_notifiers) { - return; + return 0; } - vhost_dev_stop(&fs->vhost_dev, vdev, true); + ret = vhost_dev_stop(&fs->vhost_dev, vdev, true); - ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); - if (ret < 0) { + if (k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false) < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); - return; + return -1; } vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); + return ret; } -static void vuf_set_status(VirtIODevice *vdev, uint8_t status) +static int vuf_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserFS *fs = VHOST_USER_FS(vdev); bool should_start = virtio_device_should_start(vdev, status); if (vhost_dev_is_started(&fs->vhost_dev) == should_start) { - return; + return 0; } if (should_start) { vuf_start(vdev); } else { - vuf_stop(vdev); + int ret; + ret = vuf_stop(vdev); + if (ret < 0) { + return ret; + } } + return 0; } static uint64_t vuf_get_features(VirtIODevice *vdev, @@ -267,7 +272,6 @@ static void vuf_device_realize(DeviceState *dev, Error **errp) g_free(fs->req_vqs); virtio_cleanup(vdev); g_free(fs->vhost_dev.vqs); - return; } static void vuf_device_unrealize(DeviceState *dev) @@ -403,13 +407,12 @@ static const VMStateDescription vuf_backend_vmstate = { }, }; -static Property vuf_properties[] = { +static const Property vuf_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev), DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag), DEFINE_PROP_UINT16("num-request-queues", VHostUserFS, conf.num_request_queues, 1), DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128), - DEFINE_PROP_END_OF_LIST(), }; static void vuf_instance_init(Object *obj) @@ -420,7 +423,7 @@ static void vuf_instance_init(Object *obj) "/filesystem@0", DEVICE(obj)); } -static void vuf_class_init(ObjectClass *klass, void *data) +static void vuf_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user-gpio-pci.c b/hw/virtio/vhost-user-gpio-pci.c index b3028a24a19..9b165b54f8f 100644 --- a/hw/virtio/vhost-user-gpio-pci.c +++ b/hw/virtio/vhost-user-gpio-pci.c @@ -32,7 +32,7 @@ static void vhost_user_gpio_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_gpio_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_gpio_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c index 9f37c254159..a7fd49b10a3 100644 --- a/hw/virtio/vhost-user-gpio.c +++ b/hw/virtio/vhost-user-gpio.c @@ -14,9 +14,8 @@ #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_gpio.h" -static Property vgpio_properties[] = { +static const Property vgpio_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), - DEFINE_PROP_END_OF_LIST(), }; static void vgpio_realize(DeviceState *dev, Error **errp) @@ -37,7 +36,7 @@ static const VMStateDescription vu_gpio_vmstate = { .unmigratable = 1, }; -static void vu_gpio_class_init(ObjectClass *klass, void *data) +static void vu_gpio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); diff --git a/hw/virtio/vhost-user-i2c-pci.c b/hw/virtio/vhost-user-i2c-pci.c index 00ac10941fa..692cd66fde6 100644 --- a/hw/virtio/vhost-user-i2c-pci.c +++ b/hw/virtio/vhost-user-i2c-pci.c @@ -32,7 +32,7 @@ static void vhost_user_i2c_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_i2c_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_i2c_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index a464f5e0399..ae007fe97d9 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -14,9 +14,8 @@ #include "qemu/error-report.h" #include "standard-headers/linux/virtio_ids.h" -static Property vi2c_properties[] = { +static const Property vi2c_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), - DEFINE_PROP_END_OF_LIST(), }; static void vi2c_realize(DeviceState *dev, Error **errp) @@ -37,7 +36,7 @@ static const VMStateDescription vu_i2c_vmstate = { .unmigratable = 1, }; -static void vu_i2c_class_init(ObjectClass *klass, void *data) +static void vu_i2c_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); diff --git a/hw/virtio/vhost-user-input.c b/hw/virtio/vhost-user-input.c index bedec0468c3..5cfc5bbb564 100644 --- a/hw/virtio/vhost-user-input.c +++ b/hw/virtio/vhost-user-input.c @@ -7,9 +7,8 @@ #include "qemu/osdep.h" #include "hw/virtio/virtio-input.h" -static Property vinput_properties[] = { +static const Property vinput_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), - DEFINE_PROP_END_OF_LIST(), }; static void vinput_realize(DeviceState *dev, Error **errp) @@ -31,7 +30,7 @@ static const VMStateDescription vmstate_vhost_input = { .unmigratable = 1, }; -static void vhost_input_class_init(ObjectClass *klass, void *data) +static void vhost_input_class_init(ObjectClass *klass, const void *data) { VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user-rng-pci.c b/hw/virtio/vhost-user-rng-pci.c index f64935453b6..9f45fc6f355 100644 --- a/hw/virtio/vhost-user-rng-pci.c +++ b/hw/virtio/vhost-user-rng-pci.c @@ -23,10 +23,9 @@ typedef struct VHostUserRNGPCI VHostUserRNGPCI; DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI, TYPE_VHOST_USER_RNG_PCI) -static Property vhost_user_rng_pci_properties[] = { +static const Property vhost_user_rng_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -41,7 +40,7 @@ static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_rng_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 01879c863df..61dadcda054 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -20,9 +20,8 @@ static const VMStateDescription vu_rng_vmstate = { .unmigratable = 1, }; -static Property vrng_properties[] = { +static const Property vrng_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), - DEFINE_PROP_END_OF_LIST(), }; static void vu_rng_base_realize(DeviceState *dev, Error **errp) @@ -38,7 +37,7 @@ static void vu_rng_base_realize(DeviceState *dev, Error **errp) vubs->parent_realize(dev, errp); } -static void vu_rng_class_init(ObjectClass *klass, void *data) +static void vu_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); diff --git a/hw/virtio/vhost-user-scmi-pci.c b/hw/virtio/vhost-user-scmi-pci.c index 7f53af7fced..0ab56a50bb3 100644 --- a/hw/virtio/vhost-user-scmi-pci.c +++ b/hw/virtio/vhost-user-scmi-pci.c @@ -31,7 +31,7 @@ static void vhost_user_scmi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_scmi_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_scmi_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c index 300847e6726..f9264c4374e 100644 --- a/hw/virtio/vhost-user-scmi.c +++ b/hw/virtio/vhost-user-scmi.c @@ -83,7 +83,7 @@ static int vu_scmi_start(VirtIODevice *vdev) return ret; } -static void vu_scmi_stop(VirtIODevice *vdev) +static int vu_scmi_stop(VirtIODevice *vdev) { VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); @@ -93,41 +93,46 @@ static void vu_scmi_stop(VirtIODevice *vdev) /* vhost_dev_is_started() check in the callers is not fully reliable. */ if (!scmi->started_vu) { - return; + return 0; } scmi->started_vu = false; if (!k->set_guest_notifiers) { - return; + return 0; } - vhost_dev_stop(vhost_dev, vdev, true); + ret = vhost_dev_stop(vhost_dev, vdev, true); - ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false); - if (ret < 0) { + if (k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false) < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); - return; + return -1; } vhost_dev_disable_notifiers(vhost_dev, vdev); + return ret; } -static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status) +static int vu_scmi_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); bool should_start = virtio_device_should_start(vdev, status); if (!scmi->connected) { - return; + return -1; } if (vhost_dev_is_started(&scmi->vhost_dev) == should_start) { - return; + return 0; } if (should_start) { vu_scmi_start(vdev); } else { - vu_scmi_stop(vdev); + int ret; + ret = vu_scmi_stop(vdev); + if (ret < 0) { + return ret; + } } + return 0; } static uint64_t vu_scmi_get_features(VirtIODevice *vdev, uint64_t features, @@ -258,8 +263,6 @@ static void vu_scmi_device_realize(DeviceState *dev, Error **errp) qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL, dev, NULL, true); - - return; } static void vu_scmi_device_unrealize(DeviceState *dev) @@ -277,12 +280,11 @@ static const VMStateDescription vu_scmi_vmstate = { .unmigratable = 1, }; -static Property vu_scmi_properties[] = { +static const Property vu_scmi_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev), - DEFINE_PROP_END_OF_LIST(), }; -static void vu_scmi_class_init(ObjectClass *klass, void *data) +static void vu_scmi_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user-scsi-pci.c b/hw/virtio/vhost-user-scsi-pci.c index 75882e3cf94..994e51a37bb 100644 --- a/hw/virtio/vhost-user-scsi-pci.c +++ b/hw/virtio/vhost-user-scsi-pci.c @@ -29,7 +29,7 @@ #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/loader.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/virtio/virtio-pci.h" #include "qom/object.h" @@ -44,10 +44,9 @@ struct VHostUserSCSIPCI { VHostUserSCSI vdev; }; -static Property vhost_user_scsi_pci_properties[] = { +static const Property vhost_user_scsi_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -68,7 +67,7 @@ static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_scsi_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-snd-pci.c b/hw/virtio/vhost-user-snd-pci.c index d61cfdae631..f5015fb6c4e 100644 --- a/hw/virtio/vhost-user-snd-pci.c +++ b/hw/virtio/vhost-user-snd-pci.c @@ -23,10 +23,6 @@ typedef struct VHostUserSoundPCI VHostUserSoundPCI; DECLARE_INSTANCE_CHECKER(VHostUserSoundPCI, VHOST_USER_SND_PCI, TYPE_VHOST_USER_SND_PCI) -static Property vhost_user_snd_pci_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(vpci_dev); @@ -37,14 +33,13 @@ static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_snd_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_snd_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); k->realize = vhost_user_snd_pci_realize; set_bit(DEVICE_CATEGORY_SOUND, dc->categories); - device_class_set_props(dc, vhost_user_snd_pci_properties); pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ pcidev_k->revision = 0x00; diff --git a/hw/virtio/vhost-user-snd.c b/hw/virtio/vhost-user-snd.c index 9a217543f85..732411c6551 100644 --- a/hw/virtio/vhost-user-snd.c +++ b/hw/virtio/vhost-user-snd.c @@ -16,30 +16,45 @@ #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_snd.h" +static const VirtIOFeature feature_sizes[] = { + {.flags = 1ULL << VIRTIO_SND_F_CTLS, + .end = endof(struct virtio_snd_config, controls)}, + {} +}; + +static const VirtIOConfigSizeParams cfg_size_params = { + .min_size = endof(struct virtio_snd_config, chmaps), + .max_size = sizeof(struct virtio_snd_config), + .feature_sizes = feature_sizes +}; + static const VMStateDescription vu_snd_vmstate = { .name = "vhost-user-snd", .unmigratable = 1, }; -static Property vsnd_properties[] = { +static const Property vsnd_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserBase, chardev), - DEFINE_PROP_END_OF_LIST(), + DEFINE_PROP_BIT64("controls", VHostUserBase, + parent_obj.host_features, VIRTIO_SND_F_CTLS, false), }; static void vu_snd_base_realize(DeviceState *dev, Error **errp) { VHostUserBase *vub = VHOST_USER_BASE(dev); VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev); + VirtIODevice *vdev = &vub->parent_obj; vub->virtio_id = VIRTIO_ID_SOUND; vub->num_vqs = 4; - vub->config_size = sizeof(struct virtio_snd_config); + vub->config_size = virtio_get_config_size(&cfg_size_params, + vdev->host_features); vub->vq_size = 64; vubs->parent_realize(dev, errp); } -static void vu_snd_class_init(ObjectClass *klass, void *data) +static void vu_snd_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass); diff --git a/hw/virtio/vhost-user-vsock-pci.c b/hw/virtio/vhost-user-vsock-pci.c index e5a86e80136..adb877b6e05 100644 --- a/hw/virtio/vhost-user-vsock-pci.c +++ b/hw/virtio/vhost-user-vsock-pci.c @@ -31,9 +31,8 @@ struct VHostUserVSockPCI { /* vhost-user-vsock-pci */ -static Property vhost_user_vsock_pci_properties[] = { +static const Property vhost_user_vsock_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -47,7 +46,8 @@ static void vhost_user_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_user_vsock_pci_class_init(ObjectClass *klass, void *data) +static void vhost_user_vsock_pci_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c index da3b0e02297..993c2873482 100644 --- a/hw/virtio/vhost-user-vsock.c +++ b/hw/virtio/vhost-user-vsock.c @@ -54,23 +54,28 @@ const VhostDevConfigOps vsock_ops = { .vhost_dev_config_notifier = vuv_handle_config_change, }; -static void vuv_set_status(VirtIODevice *vdev, uint8_t status) +static int vuv_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); bool should_start = virtio_device_should_start(vdev, status); + int ret; if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { - return; + return 0; } if (should_start) { - int ret = vhost_vsock_common_start(vdev); + ret = vhost_vsock_common_start(vdev); if (ret < 0) { - return; + return ret; } } else { - vhost_vsock_common_stop(vdev); + ret = vhost_vsock_common_stop(vdev); + if (ret < 0) { + return ret; + } } + return 0; } static uint64_t vuv_get_features(VirtIODevice *vdev, @@ -128,7 +133,6 @@ static void vuv_device_realize(DeviceState *dev, Error **errp) err_virtio: vhost_vsock_common_unrealize(vdev); vhost_user_cleanup(&vsock->vhost_user); - return; } static void vuv_device_unrealize(DeviceState *dev) @@ -148,12 +152,11 @@ static void vuv_device_unrealize(DeviceState *dev) } -static Property vuv_properties[] = { +static const Property vuv_properties[] = { DEFINE_PROP_CHR("chardev", VHostUserVSock, conf.chardev), - DEFINE_PROP_END_OF_LIST(), }; -static void vuv_class_init(ObjectClass *klass, void *data) +static void vuv_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 00561daa06e..1e1d6b0d6e0 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -19,16 +19,16 @@ #include "hw/virtio/virtio-net.h" #include "chardev/char-fe.h" #include "io/channel-socket.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/uuid.h" #include "qemu/sockets.h" -#include "sysemu/runstate.h" -#include "sysemu/cryptodev.h" +#include "system/runstate.h" +#include "system/cryptodev.h" #include "migration/postcopy-ram.h" #include "trace.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include #include @@ -654,8 +654,6 @@ static void scrub_shadow_regions(struct vhost_dev *dev, } *nr_rem_reg = rm_idx; *nr_add_reg = add_idx; - - return; } static int send_remove_regions(struct vhost_dev *dev, @@ -1185,9 +1183,16 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) { - assert(n && n->unmap_addr); - munmap(n->unmap_addr, qemu_real_host_page_size()); - n->unmap_addr = NULL; + if (n->unmap_addr) { + munmap(n->unmap_addr, qemu_real_host_page_size()); + n->unmap_addr = NULL; + } + if (n->destroy) { + memory_region_transaction_begin(); + object_unparent(OBJECT(&n->mr)); + memory_region_transaction_commit(); + g_free(n); + } } /* @@ -1195,17 +1200,28 @@ static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) * under rcu. */ static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n, - VirtIODevice *vdev) + VirtIODevice *vdev, bool destroy) { + /* + * if destroy == false and n->addr == NULL, we have nothing to do. + * so, just return. + */ + if (!n || (!destroy && !n->addr)) { + return; + } + if (n->addr) { if (vdev) { + memory_region_transaction_begin(); virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); + memory_region_transaction_commit(); } assert(!n->unmap_addr); n->unmap_addr = n->addr; n->addr = NULL; - call_rcu(n, vhost_user_host_notifier_free, rcu); } + n->destroy = destroy; + call_rcu(n, vhost_user_host_notifier_free, rcu); } static int vhost_user_set_vring_base(struct vhost_dev *dev, @@ -1279,9 +1295,7 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev, struct vhost_user *u = dev->opaque; VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); - if (n) { - vhost_user_host_notifier_remove(n, dev->vdev); - } + vhost_user_host_notifier_remove(n, dev->vdev, false); ret = vhost_user_write(dev, &msg, NULL, 0); if (ret < 0) { @@ -1562,7 +1576,7 @@ static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev, * new mapped address. */ n = fetch_or_create_notifier(user, queue_idx); - vhost_user_host_notifier_remove(n, vdev); + vhost_user_host_notifier_remove(n, vdev, false); if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { return 0; @@ -1607,9 +1621,14 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev, QemuUUID uuid; memcpy(uuid.data, object->uuid, sizeof(object->uuid)); - return virtio_add_vhost_device(&uuid, dev); + return !virtio_add_vhost_device(&uuid, dev); } +/* + * Handle VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE backend requests. + * + * Return: 0 on success, 1 on error. + */ static int vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, VhostUserShared *object) @@ -1623,16 +1642,16 @@ vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid); if (dev != owner) { /* Not allowed to remove non-owned entries */ - return 0; + return 1; } break; } default: /* Not allowed to remove non-owned entries */ - return 0; + return 1; } - return virtio_remove_resource(&uuid); + return !virtio_remove_resource(&uuid); } static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr, @@ -2736,15 +2755,7 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev, static void vhost_user_state_destroy(gpointer data) { VhostUserHostNotifier *n = (VhostUserHostNotifier *) data; - if (n) { - vhost_user_host_notifier_remove(n, NULL); - object_unparent(OBJECT(&n->mr)); - /* - * We can't free until vhost_user_host_notifier_remove has - * done it's thing so schedule the free with RCU. - */ - g_free_rcu(n, rcu); - } + vhost_user_host_notifier_remove(n, NULL, true); } bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) @@ -2765,9 +2776,7 @@ void vhost_user_cleanup(VhostUserState *user) if (!user->chr) { return; } - memory_region_transaction_begin(); user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); - memory_region_transaction_commit(); user->chr = NULL; } diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 3cdaa12ed5e..7061b6e1a38 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -20,7 +20,7 @@ #include "hw/virtio/virtio-net.h" #include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-vdpa.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "migration/blocker.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" @@ -209,6 +209,8 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) int ret; Int128 llend; Error *local_err = NULL; + MemoryRegion *mr; + hwaddr xlat; if (iotlb->target_as != &address_space_memory) { error_report("Wrong target AS \"%s\", only system memory is allowed", @@ -228,11 +230,14 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { bool read_only; - if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL, - &local_err)) { + mr = memory_translate_iotlb(iotlb, &xlat, &local_err); + if (!mr) { error_report_err(local_err); return; } + vaddr = memory_region_get_ram_ptr(mr) + xlat; + read_only = !(iotlb->perm & IOMMU_WO) || mr->readonly; + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1, vaddr, read_only); if (ret) { @@ -288,8 +293,6 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); - - return; } static void vhost_vdpa_iommu_region_del(MemoryListener *listener, @@ -360,14 +363,20 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); if (s->shadow_data) { int r; + hwaddr gpa = section->offset_within_address_space; - mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, mem_region.size = int128_get64(llsize) - 1, mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), - r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region); + r = vhost_iova_tree_map_alloc_gpa(s->iova_tree, &mem_region, gpa); if (unlikely(r != IOVA_OK)) { error_report("Can't allocate a mapping (%d)", r); + + if (mem_region.translated_addr == gpa) { + error_report("Insertion to GPA->IOVA tree failed"); + /* Remove the mapping from the IOVA-only tree */ + goto fail_map; + } goto fail; } @@ -386,7 +395,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, fail_map: if (s->shadow_data) { - vhost_iova_tree_remove(s->iova_tree, mem_region); + vhost_iova_tree_remove_gpa(s->iova_tree, mem_region); } fail: @@ -440,21 +449,18 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, if (s->shadow_data) { const DMAMap *result; - const void *vaddr = memory_region_get_ram_ptr(section->mr) + - section->offset_within_region + - (iova - section->offset_within_address_space); DMAMap mem_region = { - .translated_addr = (hwaddr)(uintptr_t)vaddr, + .translated_addr = section->offset_within_address_space, .size = int128_get64(llsize) - 1, }; - result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region); + result = vhost_iova_tree_find_gpa(s->iova_tree, &mem_region); if (!result) { /* The memory listener map wasn't mapped */ return; } iova = result->iova; - vhost_iova_tree_remove(s->iova_tree, *result); + vhost_iova_tree_remove_gpa(s->iova_tree, *result); } vhost_vdpa_iotlb_batch_begin_once(s); /* @@ -593,6 +599,36 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) v->shadow_vqs = g_steal_pointer(&shadow_vqs); } +static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + uint64_t features; + uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | + 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | + 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | + 0x1ULL << VHOST_BACKEND_F_SUSPEND; + int r; + + if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { + return -EFAULT; + } + + features &= f; + + if (vhost_vdpa_first_dev(dev)) { + r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); + if (r) { + return -EFAULT; + } + } + + dev->backend_cap = features; + v->shared->backend_cap = features; + + return 0; +} + static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) { struct vhost_vdpa *v = opaque; @@ -602,7 +638,12 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) v->dev = dev; dev->opaque = opaque ; - v->shared->listener = vhost_vdpa_memory_listener; + + ret = vhost_vdpa_set_backend_cap(dev); + if (unlikely(ret != 0)) { + return ret; + } + vhost_vdpa_init_svq(dev, v); error_propagate(&dev->migration_blocker, v->migration_blocker); @@ -638,6 +679,7 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); + v->shared->listener = vhost_vdpa_memory_listener; return 0; } @@ -840,36 +882,6 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev, return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); } -static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) -{ - struct vhost_vdpa *v = dev->opaque; - - uint64_t features; - uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | - 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | - 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | - 0x1ULL << VHOST_BACKEND_F_SUSPEND; - int r; - - if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { - return -EFAULT; - } - - features &= f; - - if (vhost_vdpa_first_dev(dev)) { - r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); - if (r) { - return -EFAULT; - } - } - - dev->backend_cap = features; - v->shared->backend_cap = features; - - return 0; -} - static int vhost_vdpa_get_device_id(struct vhost_dev *dev, uint32_t *device_id) { @@ -887,8 +899,14 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev) ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); trace_vhost_vdpa_reset_device(dev); + if (ret) { + return ret; + } + + memory_listener_unregister(&v->shared->listener); + v->shared->listener_registered = false; v->suspended = false; - return ret; + return 0; } static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) @@ -1142,16 +1160,23 @@ static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, * * @v: Vhost-vdpa device * @needle: The area to search iova + * @taddr: The translated address (HVA) * @errorp: Error pointer */ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, - Error **errp) + hwaddr taddr, Error **errp) { int r; - r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle, taddr); if (unlikely(r != IOVA_OK)) { error_setg(errp, "Cannot allocate iova (%d)", r); + + if (needle->translated_addr == taddr) { + error_append_hint(errp, "Insertion to IOVA->HVA tree failed"); + /* Remove the mapping from the IOVA-only tree */ + vhost_iova_tree_remove(v->shared->iova_tree, *needle); + } return false; } @@ -1192,11 +1217,11 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, vhost_svq_get_vring_addr(svq, &svq_addr); driver_region = (DMAMap) { - .translated_addr = svq_addr.desc_user_addr, .size = driver_size - 1, .perm = IOMMU_RO, }; - ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); + ok = vhost_vdpa_svq_map_ring(v, &driver_region, svq_addr.desc_user_addr, + errp); if (unlikely(!ok)) { error_prepend(errp, "Cannot create vq driver region: "); return false; @@ -1206,11 +1231,11 @@ static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, addr->avail_user_addr = driver_region.iova + avail_offset; device_region = (DMAMap) { - .translated_addr = svq_addr.used_user_addr, .size = device_size - 1, .perm = IOMMU_RW, }; - ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); + ok = vhost_vdpa_svq_map_ring(v, &device_region, svq_addr.used_user_addr, + errp); if (unlikely(!ok)) { error_prepend(errp, "Cannot create vq device region: "); vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); @@ -1365,7 +1390,15 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) "IOMMU and try again"); return -1; } - memory_listener_register(&v->shared->listener, dev->vdev->dma_as); + if (v->shared->listener_registered && + dev->vdev->dma_as != v->shared->listener.address_space) { + memory_listener_unregister(&v->shared->listener); + v->shared->listener_registered = false; + } + if (!v->shared->listener_registered) { + memory_listener_register(&v->shared->listener, dev->vdev->dma_as); + v->shared->listener_registered = true; + } return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } @@ -1375,8 +1408,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) static void vhost_vdpa_reset_status(struct vhost_dev *dev) { - struct vhost_vdpa *v = dev->opaque; - if (!vhost_vdpa_last_dev(dev)) { return; } @@ -1384,7 +1415,6 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev) vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->shared->listener); } static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, @@ -1518,12 +1548,27 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev, static int vhost_vdpa_set_owner(struct vhost_dev *dev) { + int r; + struct vhost_vdpa *v; + if (!vhost_vdpa_first_dev(dev)) { return 0; } trace_vhost_vdpa_set_owner(dev); - return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); + r = vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); + if (unlikely(r < 0)) { + return r; + } + + /* + * Being optimistic and listening address space memory. If the device + * uses vIOMMU, it is changed at vhost_vdpa_dev_start. + */ + v = dev->opaque; + memory_listener_register(&v->shared->listener, &address_space_memory); + v->shared->listener_registered = true; + return 0; } static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, @@ -1555,7 +1600,6 @@ const VhostOps vdpa_ops = { .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, .vhost_set_vring_call = vhost_vdpa_set_vring_call, .vhost_get_features = vhost_vdpa_get_features, - .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, .vhost_set_owner = vhost_vdpa_set_owner, .vhost_set_vring_endian = NULL, .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c index fd88df25602..c6c44d8989f 100644 --- a/hw/virtio/vhost-vsock-common.c +++ b/hw/virtio/vhost-vsock-common.c @@ -95,7 +95,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev) return ret; } -void vhost_vsock_common_stop(VirtIODevice *vdev) +int vhost_vsock_common_stop(VirtIODevice *vdev) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); @@ -103,18 +103,18 @@ void vhost_vsock_common_stop(VirtIODevice *vdev) int ret; if (!k->set_guest_notifiers) { - return; + return 0; } - vhost_dev_stop(&vvc->vhost_dev, vdev, true); + ret = vhost_dev_stop(&vvc->vhost_dev, vdev, true); - ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false); - if (ret < 0) { + if (k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false) < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); - return; + return -1; } vhost_dev_disable_notifiers(&vvc->vhost_dev, vdev); + return ret; } @@ -285,13 +285,12 @@ static struct vhost_dev *vhost_vsock_common_get_vhost(VirtIODevice *vdev) return &vvc->vhost_dev; } -static Property vhost_vsock_common_properties[] = { +static const Property vhost_vsock_common_properties[] = { DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket, ON_OFF_AUTO_AUTO), - DEFINE_PROP_END_OF_LIST(), }; -static void vhost_vsock_common_class_init(ObjectClass *klass, void *data) +static void vhost_vsock_common_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost-vsock-pci.c b/hw/virtio/vhost-vsock-pci.c index 9f34414d381..0022a713d43 100644 --- a/hw/virtio/vhost-vsock-pci.c +++ b/hw/virtio/vhost-vsock-pci.c @@ -35,9 +35,8 @@ struct VHostVSockPCI { /* vhost-vsock-pci */ -static Property vhost_vsock_pci_properties[] = { +static const Property vhost_vsock_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), - DEFINE_PROP_END_OF_LIST(), }; static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -57,7 +56,7 @@ static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data) +static void vhost_vsock_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 3d4a5a97f48..6e4088831fc 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -67,37 +67,38 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start) } -static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status) +static int vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); bool should_start = virtio_device_should_start(vdev, status); int ret; if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) { - return; + return 0; } if (should_start) { ret = vhost_vsock_common_start(vdev); if (ret < 0) { - return; + return 0; } ret = vhost_vsock_set_running(vdev, 1); if (ret < 0) { vhost_vsock_common_stop(vdev); error_report("Error starting vhost vsock: %d", -ret); - return; + return 0; } } else { ret = vhost_vsock_set_running(vdev, 0); if (ret < 0) { error_report("vhost vsock set running failed: %d", ret); - return; + return 0; } vhost_vsock_common_stop(vdev); } + return 0; } static uint64_t vhost_vsock_get_features(VirtIODevice *vdev, @@ -205,13 +206,12 @@ static void vhost_vsock_device_unrealize(DeviceState *dev) vhost_vsock_common_unrealize(vdev); } -static Property vhost_vsock_properties[] = { +static const Property vhost_vsock_properties[] = { DEFINE_PROP_UINT64("guest-cid", VHostVSock, conf.guest_cid, 0), DEFINE_PROP_STRING("vhostfd", VHostVSock, conf.vhostfd), - DEFINE_PROP_END_OF_LIST(), }; -static void vhost_vsock_class_init(ObjectClass *klass, void *data) +static void vhost_vsock_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 06fc71746e7..6557c58d12a 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -26,7 +26,7 @@ #include "hw/mem/memory-device.h" #include "migration/blocker.h" #include "migration/qemu-file-types.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "trace.h" /* enabled until disconnected backend stabilizes */ @@ -47,12 +47,6 @@ static struct vhost_log *vhost_log[VHOST_BACKEND_TYPE_MAX]; static struct vhost_log *vhost_log_shm[VHOST_BACKEND_TYPE_MAX]; static QLIST_HEAD(, vhost_dev) vhost_log_devs[VHOST_BACKEND_TYPE_MAX]; -/* Memslots used by backends that support private memslots (without an fd). */ -static unsigned int used_memslots; - -/* Memslots used by backends that only support shared memslots (with an fd). */ -static unsigned int used_shared_memslots; - static QLIST_HEAD(, vhost_dev) vhost_devices = QLIST_HEAD_INITIALIZER(vhost_devices); @@ -74,15 +68,15 @@ unsigned int vhost_get_free_memslots(void) QLIST_FOREACH(hdev, &vhost_devices, entry) { unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); - unsigned int cur_free; + unsigned int cur_free = r - hdev->mem->nregions; - if (hdev->vhost_ops->vhost_backend_no_private_memslots && - hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { - cur_free = r - used_shared_memslots; + if (unlikely(r < hdev->mem->nregions)) { + warn_report_once("used (%u) vhost backend memory slots exceed" + " the device limit (%u).", hdev->mem->nregions, r); + free = 0; } else { - cur_free = r - used_memslots; + free = MIN(free, cur_free); } - free = MIN(free, cur_free); } return free; } @@ -666,13 +660,6 @@ static void vhost_commit(MemoryListener *listener) dev->mem = g_realloc(dev->mem, regions_size); dev->mem->nregions = dev->n_mem_sections; - if (dev->vhost_ops->vhost_backend_no_private_memslots && - dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { - used_shared_memslots = dev->mem->nregions; - } else { - used_memslots = dev->mem->nregions; - } - for (i = 0; i < dev->n_mem_sections; i++) { struct vhost_memory_region *cur_vmr = dev->mem->regions + i; struct MemoryRegionSection *mrs = dev->mem_sections + i; @@ -732,7 +719,6 @@ static void vhost_commit(MemoryListener *listener) memory_region_unref(old_sections[n_old_sections].mr); } g_free(old_sections); - return; } /* Adds the section data to the tmp_section structure. @@ -1124,7 +1110,8 @@ static bool vhost_log_global_start(MemoryListener *listener, Error **errp) r = vhost_migration_log(listener, true); if (r < 0) { - abort(); + error_setg_errno(errp, -r, "vhost: Failed to start logging"); + return false; } return true; } @@ -1135,7 +1122,8 @@ static void vhost_log_global_stop(MemoryListener *listener) r = vhost_migration_log(listener, false); if (r < 0) { - abort(); + /* Not fatal, so report it, but take no further action */ + warn_report("vhost: Failed to stop logging"); } } @@ -1368,25 +1356,30 @@ int vhost_virtqueue_start(struct vhost_dev *dev, return r; } -void vhost_virtqueue_stop(struct vhost_dev *dev, - struct VirtIODevice *vdev, - struct vhost_virtqueue *vq, - unsigned idx) +static int do_vhost_virtqueue_stop(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx, bool force) { int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); struct vhost_vring_state state = { .index = vhost_vq_index, }; - int r; + int r = 0; if (virtio_queue_get_desc_addr(vdev, idx) == 0) { /* Don't stop the virtqueue which might have not been started */ - return; + return 0; } - r = dev->vhost_ops->vhost_get_vring_base(dev, &state); - if (r < 0) { - VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); + if (!force) { + r = dev->vhost_ops->vhost_get_vring_base(dev, &state); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); + } + } + + if (r < 0 || force) { /* Connection to the backend is broken, so let's sync internal * last avail idx to the device used idx. */ @@ -1412,6 +1405,15 @@ void vhost_virtqueue_stop(struct vhost_dev *dev, 0, virtio_queue_get_avail_size(vdev, idx)); vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, virtio_queue_get_desc_size(vdev, idx)); + return r; +} + +int vhost_virtqueue_stop(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx) +{ + return do_vhost_virtqueue_stop(dev, vdev, vq, idx, false); } static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, @@ -1619,15 +1621,11 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); /* - * The listener we registered properly updated the corresponding counter. - * So we can trust that these values are accurate. + * The listener we registered properly setup the number of required + * memslots in vhost_commit(). */ - if (hdev->vhost_ops->vhost_backend_no_private_memslots && - hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { - used = used_shared_memslots; - } else { - used = used_memslots; - } + used = hdev->mem->nregions; + /* * We assume that all reserved memslots actually require a real memslot * in our vhost backend. This might not be true, for example, if the @@ -1682,9 +1680,9 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) memset(hdev, 0, sizeof(struct vhost_dev)); } -static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, - VirtIODevice *vdev, - unsigned int nvqs) +void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, + VirtIODevice *vdev, + unsigned int nvqs) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); int i, r; @@ -1930,62 +1928,6 @@ void vhost_dev_free_inflight(struct vhost_inflight *inflight) } } -static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, - uint64_t new_size) -{ - Error *err = NULL; - int fd = -1; - void *addr = qemu_memfd_alloc("vhost-inflight", new_size, - F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, - &fd, &err); - - if (err) { - error_report_err(err); - return -ENOMEM; - } - - vhost_dev_free_inflight(inflight); - inflight->offset = 0; - inflight->addr = addr; - inflight->fd = fd; - inflight->size = new_size; - - return 0; -} - -void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) -{ - if (inflight->addr) { - qemu_put_be64(f, inflight->size); - qemu_put_be16(f, inflight->queue_size); - qemu_put_buffer(f, inflight->addr, inflight->size); - } else { - qemu_put_be64(f, 0); - } -} - -int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) -{ - uint64_t size; - - size = qemu_get_be64(f); - if (!size) { - return 0; - } - - if (inflight->size != size) { - int ret = vhost_dev_resize_inflight(inflight, size); - if (ret < 0) { - return ret; - } - } - inflight->queue_size = qemu_get_be16(f); - - qemu_get_buffer(f, inflight->addr, size); - - return 0; -} - int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) { int r; @@ -2151,11 +2093,22 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) * vhost-kernel code requires for this.*/ for (i = 0; i < hdev->nvqs; ++i) { struct vhost_virtqueue *vq = hdev->vqs + i; - vhost_device_iotlb_miss(hdev, vq->used_phys, true); + r = vhost_device_iotlb_miss(hdev, vq->used_phys, true); + if (r) { + goto fail_iotlb; + } } } vhost_start_config_intr(hdev); return 0; +fail_iotlb: + if (vhost_dev_has_iommu(hdev) && + hdev->vhost_ops->vhost_set_iotlb_callback) { + hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); + } + if (hdev->vhost_ops->vhost_dev_start) { + hdev->vhost_ops->vhost_dev_start(hdev, false); + } fail_start: if (vrings) { vhost_dev_set_vring_enable(hdev, false); @@ -2181,9 +2134,11 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) } /* Host notifiers must be enabled at this point. */ -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) +static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, + bool vrings, bool force) { int i; + int rc = 0; /* should only be called after backend is connected */ assert(hdev->vhost_ops); @@ -2202,10 +2157,11 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) vhost_dev_set_vring_enable(hdev, false); } for (i = 0; i < hdev->nvqs; ++i) { - vhost_virtqueue_stop(hdev, - vdev, - hdev->vqs + i, - hdev->vq_index + i); + rc |= do_vhost_virtqueue_stop(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i, + force); } if (hdev->vhost_ops->vhost_reset_status) { hdev->vhost_ops->vhost_reset_status(hdev); @@ -2222,6 +2178,18 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) hdev->started = false; vdev->vhost_started = false; hdev->vdev = NULL; + return rc; +} + +int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) +{ + return do_vhost_dev_stop(hdev, vdev, vrings, false); +} + +int vhost_dev_force_stop(struct vhost_dev *hdev, VirtIODevice *vdev, + bool vrings) +{ + return do_vhost_dev_stop(hdev, vdev, vrings, true); } int vhost_net_set_backend(struct vhost_dev *hdev, diff --git a/hw/virtio/virtio-9p-pci.c b/hw/virtio/virtio-9p-pci.c index 94c14f0b98c..594742ff656 100644 --- a/hw/virtio/virtio-9p-pci.c +++ b/hw/virtio/virtio-9p-pci.c @@ -43,14 +43,13 @@ static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static Property virtio_9p_pci_properties[] = { +static const Property virtio_9p_pci_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_9p_pci_class_init(ObjectClass *klass, void *data) +static void virtio_9p_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); diff --git a/hw/virtio/virtio-acpi.c b/hw/virtio/virtio-acpi.c index 230a6695001..85becef03c0 100644 --- a/hw/virtio/virtio-acpi.c +++ b/hw/virtio/virtio-acpi.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0-or-later /* * virtio ACPI Support * diff --git a/hw/virtio/virtio-balloon-pci.c b/hw/virtio/virtio-balloon-pci.c index ce2645ba718..96e88b6b86d 100644 --- a/hw/virtio/virtio-balloon-pci.c +++ b/hw/virtio/virtio-balloon-pci.c @@ -35,16 +35,27 @@ struct VirtIOBalloonPCI { VirtIOBalloon vdev; }; +static const Property virtio_balloon_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), +}; + static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vdev); + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 2; + } + vpci_dev->class_code = PCI_CLASS_OTHERS; qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data) +static void virtio_balloon_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); @@ -55,6 +66,7 @@ static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data) pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; + device_class_set_props(dc, virtio_balloon_properties); } static void virtio_balloon_pci_instance_init(Object *obj) diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 609e39a821f..db787d00b31 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -22,16 +22,16 @@ #include "hw/mem/pc-dimm.h" #include "hw/qdev-properties.h" #include "hw/boards.h" -#include "sysemu/balloon.h" +#include "system/balloon.h" #include "hw/virtio/virtio-balloon.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qapi/error.h" #include "qapi/qapi-events-machine.h" #include "qapi/visitor.h" #include "trace.h" #include "qemu/error-report.h" #include "migration/misc.h" - +#include "system/reset.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" @@ -167,19 +167,33 @@ static void balloon_deflate_page(VirtIOBalloon *balloon, } } +/* + * All stats upto VIRTIO_BALLOON_S_NR /must/ have a + * non-NULL name declared here, since these are used + * as keys for populating the QDict with stats + */ static const char *balloon_stat_names[] = { [VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in", [VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out", [VIRTIO_BALLOON_S_MAJFLT] = "stat-major-faults", [VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults", [VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory", + [VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory", [VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory", [VIRTIO_BALLOON_S_CACHES] = "stat-disk-caches", [VIRTIO_BALLOON_S_HTLB_PGALLOC] = "stat-htlb-pgalloc", [VIRTIO_BALLOON_S_HTLB_PGFAIL] = "stat-htlb-pgfail", - [VIRTIO_BALLOON_S_NR] = NULL + + [VIRTIO_BALLOON_S_OOM_KILL] = "stat-oom-kills", + [VIRTIO_BALLOON_S_ALLOC_STALL] = "stat-alloc-stalls", + [VIRTIO_BALLOON_S_ASYNC_SCAN] = "stat-async-scans", + [VIRTIO_BALLOON_S_DIRECT_SCAN] = "stat-direct-scans", + [VIRTIO_BALLOON_S_ASYNC_RECLAIM] = "stat-async-reclaims", + + [VIRTIO_BALLOON_S_DIRECT_RECLAIM] = "stat-direct-reclaims", }; +G_STATIC_ASSERT(G_N_ELEMENTS(balloon_stat_names) == VIRTIO_BALLOON_S_NR); /* * reset_stats - Mark all items in the stats array as unset @@ -896,6 +910,8 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) } reset_stats(s); + s->stats_last_update = 0; + qemu_register_resettable(OBJECT(dev)); } static void virtio_balloon_device_unrealize(DeviceState *dev) @@ -903,6 +919,7 @@ static void virtio_balloon_device_unrealize(DeviceState *dev) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); + qemu_unregister_resettable(OBJECT(dev)); if (s->free_page_bh) { qemu_bh_delete(s->free_page_bh); object_unref(OBJECT(s->iothread)); @@ -941,7 +958,7 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev) s->poison_val = 0; } -static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) +static int virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) { VirtIOBalloon *s = VIRTIO_BALLOON(vdev); @@ -971,6 +988,28 @@ static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) qemu_mutex_unlock(&s->free_page_lock); } } + return 0; +} + +static ResettableState *virtio_balloon_get_reset_state(Object *obj) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(obj); + return &s->reset_state; +} + +static void virtio_balloon_reset_enter(Object *obj, ResetType type) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(obj); + + /* + * When waking up from standby/suspend-to-ram, do not reset stats. + */ + if (type == RESET_TYPE_WAKEUP) { + return; + } + + reset_stats(s); + s->stats_last_update = 0; } static void virtio_balloon_instance_init(Object *obj) @@ -1001,7 +1040,7 @@ static const VMStateDescription vmstate_virtio_balloon = { }, }; -static Property virtio_balloon_properties[] = { +static const Property virtio_balloon_properties[] = { DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features, VIRTIO_BALLOON_F_DEFLATE_ON_OOM, false), DEFINE_PROP_BIT("free-page-hint", VirtIOBalloon, host_features, @@ -1018,13 +1057,13 @@ static Property virtio_balloon_properties[] = { qemu_4_0_config_size, false), DEFINE_PROP_LINK("iothread", VirtIOBalloon, iothread, TYPE_IOTHREAD, IOThread *), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_balloon_class_init(ObjectClass *klass, void *data) +static void virtio_balloon_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_props(dc, virtio_balloon_properties); dc->vmsd = &vmstate_virtio_balloon; @@ -1037,6 +1076,9 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data) vdc->get_features = virtio_balloon_get_features; vdc->set_status = virtio_balloon_set_status; vdc->vmsd = &vmstate_virtio_balloon_device; + + rc->get_state = virtio_balloon_get_reset_state; + rc->phases.enter = virtio_balloon_reset_enter; } static const TypeInfo virtio_balloon_info = { diff --git a/hw/virtio/virtio-blk-pci.c b/hw/virtio/virtio-blk-pci.c index 9743bee965a..fd33bbd7e83 100644 --- a/hw/virtio/virtio-blk-pci.c +++ b/hw/virtio/virtio-blk-pci.c @@ -38,13 +38,12 @@ struct VirtIOBlkPCI { VirtIOBlock vdev; }; -static Property virtio_blk_pci_properties[] = { +static const Property virtio_blk_pci_properties[] = { DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -64,7 +63,7 @@ static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) +static void virtio_blk_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c index 896feb37a1c..11adfbf3aba 100644 --- a/hw/virtio/virtio-bus.c +++ b/hw/virtio/virtio-bus.c @@ -28,7 +28,7 @@ #include "qapi/error.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" /* #define DEBUG_VIRTIO_BUS */ @@ -348,7 +348,7 @@ bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev) return klass->iommu_enabled(qbus->parent); } -static void virtio_bus_class_init(ObjectClass *klass, void *data) +static void virtio_bus_class_init(ObjectClass *klass, const void *data) { BusClass *bus_class = BUS_CLASS(klass); bus_class->get_dev_path = virtio_bus_get_dev_path; diff --git a/hw/virtio/virtio-config-io.c b/hw/virtio/virtio-config-io.c index ad78e0b9bc5..f58d90b6e31 100644 --- a/hw/virtio/virtio-config-io.c +++ b/hw/virtio/virtio-config-io.c @@ -11,7 +11,6 @@ #include "qemu/osdep.h" #include "hw/virtio/virtio.h" -#include "cpu.h" uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) { diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c index 0783dc2f7ed..868abc03a9e 100644 --- a/hw/virtio/virtio-crypto-pci.c +++ b/hw/virtio/virtio-crypto-pci.c @@ -37,11 +37,10 @@ struct VirtIOCryptoPCI { VirtIOCrypto vdev; }; -static Property virtio_crypto_pci_properties[] = { +static const Property virtio_crypto_pci_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -60,7 +59,7 @@ static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data) +static void virtio_crypto_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 5034768bff0..517f2089c5a 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -22,7 +22,7 @@ #include "hw/virtio/virtio-crypto.h" #include "hw/qdev-properties.h" #include "standard-headers/linux/virtio_ids.h" -#include "sysemu/cryptodev-vhost.h" +#include "system/cryptodev-vhost.h" #define VIRTIO_CRYPTO_VM_VERSION 1 @@ -461,7 +461,7 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq, req->in_iov = NULL; req->in_num = 0; req->in_len = 0; - req->flags = QCRYPTODEV_BACKEND_ALG__MAX; + req->flags = QCRYPTODEV_BACKEND_ALGO_TYPE__MAX; memset(&req->op_info, 0x00, sizeof(req->op_info)); } @@ -471,7 +471,7 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) return; } - if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) { + if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) { size_t max_len; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; @@ -486,7 +486,7 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) memset(op_info, 0, sizeof(*op_info) + max_len); g_free(op_info); } - } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { + } else if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) { CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; if (op_info) { g_free(op_info->src); @@ -571,10 +571,10 @@ static void virtio_crypto_req_complete(void *opaque, int ret) VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); uint8_t status = -ret; - if (req->flags == QCRYPTODEV_BACKEND_ALG_SYM) { + if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_SYM) { virtio_crypto_sym_input_data_helper(vdev, req, status, req->op_info.u.sym_op_info); - } else if (req->flags == QCRYPTODEV_BACKEND_ALG_ASYM) { + } else if (req->flags == QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM) { virtio_crypto_akcipher_input_data_helper(vdev, req, status, req->op_info.u.asym_op_info); } @@ -884,7 +884,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) switch (opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: - op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_SYM; + op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALGO_TYPE_SYM; ret = virtio_crypto_handle_sym_req(vcrypto, &req.u.sym_req, op_info, out_iov, out_num); @@ -894,7 +894,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: case VIRTIO_CRYPTO_AKCIPHER_SIGN: case VIRTIO_CRYPTO_AKCIPHER_VERIFY: - op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALG_ASYM; + op_info->algtype = request->flags = QCRYPTODEV_BACKEND_ALGO_TYPE_ASYM; ret = virtio_crypto_handle_asym_req(vcrypto, &req.u.akcipher_req, op_info, out_iov, out_num); @@ -1008,19 +1008,19 @@ static uint32_t virtio_crypto_init_services(uint32_t qservices) { uint32_t vservices = 0; - if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_CIPHER)) { + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_CIPHER)) { vservices |= (1 << VIRTIO_CRYPTO_SERVICE_CIPHER); } - if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_HASH)) { + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_HASH)) { vservices |= (1 << VIRTIO_CRYPTO_SERVICE_HASH); } - if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_MAC)) { + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC)) { vservices |= (1 << VIRTIO_CRYPTO_SERVICE_MAC); } - if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AEAD)) { + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AEAD)) { vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AEAD); } - if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_AKCIPHER)) { + if (qservices & (1 << QCRYPTODEV_BACKEND_SERVICE_TYPE_AKCIPHER)) { vservices |= (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER); } @@ -1128,10 +1128,9 @@ static const VMStateDescription vmstate_virtio_crypto = { }, }; -static Property virtio_crypto_properties[] = { +static const Property virtio_crypto_properties[] = { DEFINE_PROP_LINK("cryptodev", VirtIOCrypto, conf.cryptodev, TYPE_CRYPTODEV_BACKEND, CryptoDevBackend *), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config) @@ -1198,11 +1197,12 @@ static void virtio_crypto_vhost_status(VirtIOCrypto *c, uint8_t status) } } -static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status) +static int virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); virtio_crypto_vhost_status(vcrypto, status); + return 0; } static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx, @@ -1247,13 +1247,25 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx) static struct vhost_dev *virtio_crypto_get_vhost(VirtIODevice *vdev) { VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); - CryptoDevBackend *b = vcrypto->cryptodev; - CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; - CryptoDevBackendVhost *vhost_crypto = cryptodev_get_vhost(cc, b, 0); + CryptoDevBackend *b; + CryptoDevBackendClient *cc; + CryptoDevBackendVhost *vhost_crypto; + + b = vcrypto->cryptodev; + if (!b) { + return NULL; + } + + cc = b->conf.peers.ccs[0]; + vhost_crypto = cryptodev_get_vhost(cc, b, 0); + if (!vhost_crypto) { + return NULL; + } + return &vhost_crypto->dev; } -static void virtio_crypto_class_init(ObjectClass *klass, void *data) +static void virtio_crypto_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/virtio-hmp-cmds.c b/hw/virtio/virtio-hmp-cmds.c index 477c97dea21..7d8677bcf0d 100644 --- a/hw/virtio/virtio-hmp-cmds.c +++ b/hw/virtio/virtio-hmp-cmds.c @@ -9,7 +9,7 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/qapi-commands-virtio.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" static void hmp_virtio_dump_protocols(Monitor *mon, diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c index a53edf46c43..3be5358b4cd 100644 --- a/hw/virtio/virtio-input-pci.c +++ b/hw/virtio/virtio-input-pci.c @@ -37,9 +37,8 @@ struct VirtIOInputHIDPCI { VirtIOInputHID vdev; }; -static Property virtio_input_pci_properties[] = { +static const Property virtio_input_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -51,7 +50,7 @@ static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_input_pci_class_init(ObjectClass *klass, void *data) +static void virtio_input_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); @@ -64,7 +63,8 @@ static void virtio_input_pci_class_init(ObjectClass *klass, void *data) pcidev_k->class_id = PCI_CLASS_INPUT_OTHER; } -static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data) +static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, + const void *data) { PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); @@ -72,7 +72,7 @@ static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data) } static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass, - void *data) + const void *data) { PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c index cbdfe4c591c..8123c6f83a3 100644 --- a/hw/virtio/virtio-iommu-pci.c +++ b/hw/virtio/virtio-iommu-pci.c @@ -34,12 +34,11 @@ struct VirtIOIOMMUPCI { VirtIOIOMMU vdev; }; -static Property virtio_iommu_pci_properties[] = { +static const Property virtio_iommu_pci_properties[] = { DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), DEFINE_PROP_ARRAY("reserved-regions", VirtIOIOMMUPCI, vdev.nr_prop_resv_regions, vdev.prop_resv_regions, qdev_prop_reserved_region, ReservedRegion), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -74,7 +73,7 @@ static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_iommu_pci_class_init(ObjectClass *klass, void *data) +static void virtio_iommu_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 59ef4fb2174..3500f1b0820 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -25,9 +25,9 @@ #include "exec/target_page.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" +#include "system/kvm.h" +#include "system/reset.h" +#include "system/system.h" #include "qemu/reserved-region.h" #include "qemu/units.h" #include "qapi/error.h" @@ -1504,11 +1504,11 @@ static void virtio_iommu_device_unrealize(DeviceState *dev) virtio_cleanup(vdev); } -static void virtio_iommu_device_reset(VirtIODevice *vdev) +static void virtio_iommu_device_reset_exit(Object *obj, ResetType type) { - VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); + VirtIOIOMMU *s = VIRTIO_IOMMU(obj); - trace_virtio_iommu_device_reset(); + trace_virtio_iommu_device_reset_exit(); if (s->domains) { g_tree_destroy(s->domains); @@ -1522,9 +1522,10 @@ static void virtio_iommu_device_reset(VirtIODevice *vdev) NULL, NULL, virtio_iommu_put_endpoint); } -static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status) +static int virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status) { trace_virtio_iommu_device_status(status); + return 0; } static void virtio_iommu_instance_init(Object *obj) @@ -1655,20 +1656,20 @@ static const VMStateDescription vmstate_virtio_iommu = { }, }; -static Property virtio_iommu_properties[] = { +static const Property virtio_iommu_properties[] = { DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, TYPE_PCI_BUS, PCIBus *), DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU, boot_bypass, true), DEFINE_PROP_GRANULE_MODE("granule", VirtIOIOMMU, granule_mode, GRANULE_MODE_HOST), DEFINE_PROP_UINT8("aw-bits", VirtIOIOMMU, aw_bits, 64), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_iommu_class_init(ObjectClass *klass, void *data) +static void virtio_iommu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); device_class_set_props(dc, virtio_iommu_properties); dc->vmsd = &vmstate_virtio_iommu; @@ -1676,7 +1677,12 @@ static void virtio_iommu_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->realize = virtio_iommu_device_realize; vdc->unrealize = virtio_iommu_device_unrealize; - vdc->reset = virtio_iommu_device_reset; + + /* + * Use 'exit' reset phase to make sure all DMA requests + * have been quiesced during 'enter' or 'hold' phase + */ + rc->phases.exit = virtio_iommu_device_reset_exit; vdc->get_config = virtio_iommu_get_config; vdc->set_config = virtio_iommu_set_config; vdc->get_features = virtio_iommu_get_features; @@ -1685,7 +1691,7 @@ static void virtio_iommu_class_init(ObjectClass *klass, void *data) } static void virtio_iommu_memory_region_class_init(ObjectClass *klass, - void *data) + const void *data) { IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); diff --git a/hw/virtio/virtio-md-pci.c b/hw/virtio/virtio-md-pci.c index 9ec50676626..9278b32cf84 100644 --- a/hw/virtio/virtio-md-pci.c +++ b/hw/virtio/virtio-md-pci.c @@ -138,7 +138,7 @@ static const TypeInfo virtio_md_pci_info = { .instance_size = sizeof(VirtIOMDPCI), .class_size = sizeof(VirtIOMDPCIClass), .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_MEMORY_DEVICE }, { } }, diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index 1b4e9a3284a..f592eb1a784 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -22,6 +22,10 @@ static void virtio_mem_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) VirtIOMEMPCI *mem_pci = VIRTIO_MEM_PCI(vpci_dev); DeviceState *vdev = DEVICE(&mem_pci->vdev); + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 2; + } + virtio_pci_force_virtio_1(vpci_dev); qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } @@ -152,7 +156,14 @@ static void virtio_mem_pci_set_requested_size(Object *obj, Visitor *v, object_property_set(OBJECT(&pci_mem->vdev), name, v, errp); } -static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) +static const Property virtio_mem_pci_class_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), +}; + +static void virtio_mem_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); @@ -164,6 +175,7 @@ static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_OTHERS; + device_class_set_props(dc, virtio_mem_pci_class_properties); mdc->get_addr = virtio_mem_pci_get_addr; mdc->set_addr = virtio_mem_pci_set_addr; diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index ef64bf1b4a1..c46f6f9c3e2 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -15,20 +15,20 @@ #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/units.h" -#include "sysemu/numa.h" -#include "sysemu/sysemu.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/numa.h" +#include "system/system.h" +#include "system/reset.h" +#include "system/runstate.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-mem.h" #include "qapi/error.h" #include "qapi/visitor.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "migration/misc.h" #include "hw/boards.h" #include "hw/qdev-properties.h" -#include CONFIG_DEVICES +#include "hw/acpi/acpi.h" #include "trace.h" static const VMStateDescription vmstate_virtio_mem_device_early; @@ -61,6 +61,8 @@ static uint32_t virtio_mem_default_thp_size(void) } else if (qemu_real_host_page_size() == 64 * KiB) { default_thp_size = 512 * MiB; } +#elif defined(__s390x__) + default_thp_size = 1 * MiB; #endif return default_thp_size; @@ -88,6 +90,7 @@ static uint32_t virtio_mem_default_thp_size(void) static uint32_t thp_size; #define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +#define HPAGE_PATH "/sys/kernel/mm/transparent_hugepage/" static uint32_t virtio_mem_thp_size(void) { gchar *content = NULL; @@ -98,6 +101,12 @@ static uint32_t virtio_mem_thp_size(void) return thp_size; } + /* No THP -> no restrictions. */ + if (!g_file_test(HPAGE_PATH, G_FILE_TEST_EXISTS)) { + thp_size = VIRTIO_MEM_MIN_BLOCK_SIZE; + return thp_size; + } + /* * Try to probe the actual THP size, fallback to (sane but eventually * incorrect) default sizes. @@ -161,7 +170,7 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb) * necessary (as the section size can change). But it's more likely that the * section size will rather get smaller and not bigger over time. */ -#if defined(TARGET_X86_64) || defined(TARGET_I386) +#if defined(TARGET_X86_64) || defined(TARGET_I386) || defined(TARGET_S390X) #define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB)) #elif defined(TARGET_ARM) #define VIRTIO_MEM_USABLE_EXTENT (2 * (512 * MiB)) @@ -181,7 +190,7 @@ static bool virtio_mem_is_busy(void) * after plugging them) until we're running on the destination (as we didn't * migrate these blocks when they were unplugged). */ - return migration_in_incoming_postcopy() || !migration_is_idle(); + return migration_in_incoming_postcopy() || migration_is_running(); } typedef int (*virtio_mem_range_cb)(VirtIOMEM *vmem, void *arg, @@ -235,28 +244,6 @@ static int virtio_mem_for_each_plugged_range(VirtIOMEM *vmem, void *arg, return ret; } -/* - * Adjust the memory section to cover the intersection with the given range. - * - * Returns false if the intersection is empty, otherwise returns true. - */ -static bool virtio_mem_intersect_memory_section(MemoryRegionSection *s, - uint64_t offset, uint64_t size) -{ - uint64_t start = MAX(s->offset_within_region, offset); - uint64_t end = MIN(s->offset_within_region + int128_get64(s->size), - offset + size); - - if (end <= start) { - return false; - } - - s->offset_within_address_space += start - s->offset_within_region; - s->offset_within_region = start; - s->size = int128_make64(end - start); - return true; -} - typedef int (*virtio_mem_section_cb)(MemoryRegionSection *s, void *arg); static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, @@ -278,7 +265,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, first_bit + 1) - 1; size = (last_bit - first_bit + 1) * vmem->block_size; - if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) { + if (!memory_region_section_intersect_range(&tmp, offset, size)) { break; } ret = cb(&tmp, arg); @@ -310,7 +297,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, first_bit + 1) - 1; size = (last_bit - first_bit + 1) * vmem->block_size; - if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) { + if (!memory_region_section_intersect_range(&tmp, offset, size)) { break; } ret = cb(&tmp, arg); @@ -346,7 +333,7 @@ static void virtio_mem_notify_unplug(VirtIOMEM *vmem, uint64_t offset, QLIST_FOREACH(rdl, &vmem->rdl_list, next) { MemoryRegionSection tmp = *rdl->section; - if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) { + if (!memory_region_section_intersect_range(&tmp, offset, size)) { continue; } rdl->notify_discard(rdl, &tmp); @@ -362,7 +349,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, QLIST_FOREACH(rdl, &vmem->rdl_list, next) { MemoryRegionSection tmp = *rdl->section; - if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) { + if (!memory_region_section_intersect_range(&tmp, offset, size)) { continue; } ret = rdl->notify_populate(rdl, &tmp); @@ -379,7 +366,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, if (rdl2 == rdl) { break; } - if (!virtio_mem_intersect_memory_section(&tmp, offset, size)) { + if (!memory_region_section_intersect_range(&tmp, offset, size)) { continue; } rdl2->notify_discard(rdl2, &tmp); @@ -874,15 +861,16 @@ static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features, MachineState *ms = MACHINE(qdev_get_machine()); VirtIOMEM *vmem = VIRTIO_MEM(vdev); - if (ms->numa_state) { -#if defined(CONFIG_ACPI) + if (ms->numa_state && acpi_builtin()) { virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM); -#endif } assert(vmem->unplugged_inaccessible != ON_OFF_AUTO_AUTO); if (vmem->unplugged_inaccessible == ON_OFF_AUTO_ON) { virtio_add_feature(&features, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE); } + if (qemu_wakeup_suspend_enabled()) { + virtio_add_feature(&features, VIRTIO_MEM_F_PERSISTENT_SUSPEND); + } return features; } @@ -895,18 +883,6 @@ static int virtio_mem_validate_features(VirtIODevice *vdev) return 0; } -static void virtio_mem_system_reset(void *opaque) -{ - VirtIOMEM *vmem = VIRTIO_MEM(opaque); - - /* - * During usual resets, we will unplug all memory and shrink the usable - * region size. This is, however, not possible in all scenarios. Then, - * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL). - */ - virtio_mem_unplug_all(vmem); -} - static void virtio_mem_prepare_mr(VirtIOMEM *vmem) { const uint64_t region_size = memory_region_size(&vmem->memdev->mr); @@ -958,6 +934,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) VirtIOMEM *vmem = VIRTIO_MEM(dev); uint64_t page_size; RAMBlock *rb; + Object *obj; int ret; if (!vmem->memdev) { @@ -990,7 +967,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) return; } - if (enable_mlock) { + if (should_mlock(mlock_state)) { error_setg(errp, "Incompatible with mlock"); return; } @@ -1070,6 +1047,17 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) return; } + /* + * Set ourselves as RamDiscardManager before the plug handler maps the + * memory region and exposes it via an address space. + */ + if (memory_region_set_ram_discard_manager(&vmem->memdev->mr, + RAM_DISCARD_MANAGER(vmem))) { + error_setg(errp, "Failed to set RamDiscardManager"); + ram_block_coordinated_discard_require(false); + return; + } + /* * We don't know at this point whether shared RAM is migrated using * QEMU or migrated using the file content. "x-ignore-shared" will be @@ -1084,6 +1072,7 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb)); if (ret) { error_setg_errno(errp, -ret, "Unexpected error discarding RAM"); + memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); ram_block_coordinated_discard_require(false); return; } @@ -1123,14 +1112,28 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) vmstate_register_any(VMSTATE_IF(vmem), &vmstate_virtio_mem_device_early, vmem); } - qemu_register_reset(virtio_mem_system_reset, vmem); /* - * Set ourselves as RamDiscardManager before the plug handler maps the - * memory region and exposes it via an address space. + * We only want to unplug all memory to start with a clean slate when + * it is safe for the guest -- during system resets that call + * qemu_devices_reset(). + * + * We'll filter out selected qemu_devices_reset() calls used for other + * purposes, like resetting all devices during wakeup from suspend on + * x86 based on the reset type passed to qemu_devices_reset(). + * + * Unplugging all memory during simple device resets can result in the VM + * unexpectedly losing RAM, corrupting VM state. + * + * Simple device resets (or resets triggered by getting a parent device + * reset) must not change the state of plugged memory blocks. Therefore, + * we need a dedicated reset object that only gets called during + * qemu_devices_reset(). */ - memory_region_set_ram_discard_manager(&vmem->memdev->mr, - RAM_DISCARD_MANAGER(vmem)); + obj = object_new(TYPE_VIRTIO_MEM_SYSTEM_RESET); + vmem->system_reset = VIRTIO_MEM_SYSTEM_RESET(obj); + vmem->system_reset->vmem = vmem; + qemu_register_resettable(obj); } static void virtio_mem_device_unrealize(DeviceState *dev) @@ -1138,12 +1141,9 @@ static void virtio_mem_device_unrealize(DeviceState *dev) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOMEM *vmem = VIRTIO_MEM(dev); - /* - * The unplug handler unmapped the memory region, it cannot be - * found via an address space anymore. Unset ourselves. - */ - memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); - qemu_unregister_reset(virtio_mem_system_reset, vmem); + qemu_unregister_resettable(OBJECT(vmem->system_reset)); + object_unref(OBJECT(vmem->system_reset)); + if (vmem->early_migration) { vmstate_unregister(VMSTATE_IF(vmem), &vmstate_virtio_mem_device_early, vmem); @@ -1153,6 +1153,11 @@ static void virtio_mem_device_unrealize(DeviceState *dev) virtio_del_queue(vdev, 0); virtio_cleanup(vdev); g_free(vmem->bitmap); + /* + * The unplug handler unmapped the memory region, it cannot be + * found via an address space anymore. Unset ourselves. + */ + memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); ram_block_coordinated_discard_require(false); } @@ -1682,7 +1687,7 @@ static void virtio_mem_instance_finalize(Object *obj) vmem->mr = NULL; } -static Property virtio_mem_properties[] = { +static const Property virtio_mem_properties[] = { DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0), DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0), DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false), @@ -1696,7 +1701,6 @@ static Property virtio_mem_properties[] = { early_migration, true), DEFINE_PROP_BOOL(VIRTIO_MEM_DYNAMIC_MEMSLOTS_PROP, VirtIOMEM, dynamic_memslots, false), - DEFINE_PROP_END_OF_LIST(), }; static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager *rdm, @@ -1728,7 +1732,7 @@ static bool virtio_mem_rdm_is_populated(const RamDiscardManager *rdm, } struct VirtIOMEMReplayData { - void *fn; + ReplayRamDiscardState fn; void *opaque; }; @@ -1736,12 +1740,12 @@ static int virtio_mem_rdm_replay_populated_cb(MemoryRegionSection *s, void *arg) { struct VirtIOMEMReplayData *data = arg; - return ((ReplayRamPopulate)data->fn)(s, data->opaque); + return data->fn(s, data->opaque); } static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm, MemoryRegionSection *s, - ReplayRamPopulate replay_fn, + ReplayRamDiscardState replay_fn, void *opaque) { const VirtIOMEM *vmem = VIRTIO_MEM(rdm); @@ -1760,14 +1764,13 @@ static int virtio_mem_rdm_replay_discarded_cb(MemoryRegionSection *s, { struct VirtIOMEMReplayData *data = arg; - ((ReplayRamDiscard)data->fn)(s, data->opaque); - return 0; + return data->fn(s, data->opaque); } -static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, - MemoryRegionSection *s, - ReplayRamDiscard replay_fn, - void *opaque) +static int virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *s, + ReplayRamDiscardState replay_fn, + void *opaque) { const VirtIOMEM *vmem = VIRTIO_MEM(rdm); struct VirtIOMEMReplayData data = { @@ -1776,8 +1779,8 @@ static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, }; g_assert(s->mr == &vmem->memdev->mr); - virtio_mem_for_each_unplugged_section(vmem, s, &data, - virtio_mem_rdm_replay_discarded_cb); + return virtio_mem_for_each_unplugged_section(vmem, s, &data, + virtio_mem_rdm_replay_discarded_cb); } static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm, @@ -1843,7 +1846,7 @@ static void virtio_mem_unplug_request_check(VirtIOMEM *vmem, Error **errp) } } -static void virtio_mem_class_init(ObjectClass *klass, void *data) +static void virtio_mem_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); @@ -1885,7 +1888,7 @@ static const TypeInfo virtio_mem_info = { .instance_finalize = virtio_mem_instance_finalize, .class_init = virtio_mem_class_init, .class_size = sizeof(VirtIOMEMClass), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_RAM_DISCARD_MANAGER }, { } }, @@ -1897,3 +1900,49 @@ static void virtio_register_types(void) } type_init(virtio_register_types) + +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(VirtioMemSystemReset, virtio_mem_system_reset, VIRTIO_MEM_SYSTEM_RESET, OBJECT, { TYPE_RESETTABLE_INTERFACE }, { }) + +static void virtio_mem_system_reset_init(Object *obj) +{ +} + +static void virtio_mem_system_reset_finalize(Object *obj) +{ +} + +static ResettableState *virtio_mem_system_reset_get_state(Object *obj) +{ + VirtioMemSystemReset *vmem_reset = VIRTIO_MEM_SYSTEM_RESET(obj); + + return &vmem_reset->reset_state; +} + +static void virtio_mem_system_reset_hold(Object *obj, ResetType type) +{ + VirtioMemSystemReset *vmem_reset = VIRTIO_MEM_SYSTEM_RESET(obj); + VirtIOMEM *vmem = vmem_reset->vmem; + + /* + * When waking up from standby/suspend-to-ram, do not unplug any memory. + */ + if (type == RESET_TYPE_WAKEUP) { + return; + } + + /* + * During usual resets, we will unplug all memory and shrink the usable + * region size. This is, however, not possible in all scenarios. Then, + * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL). + */ + virtio_mem_unplug_all(vmem); +} + +static void virtio_mem_system_reset_class_init(ObjectClass *klass, + const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->get_state = virtio_mem_system_reset_get_state; + rc->phases.hold = virtio_mem_system_reset_hold; +} diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 320428ac0d3..532c67107ba 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -28,8 +28,8 @@ #include "migration/qemu-file-types.h" #include "qemu/host-utils.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/replay.h" +#include "system/kvm.h" +#include "system/replay.h" #include "hw/virtio/virtio-mmio.h" #include "qemu/error-report.h" #include "qemu/log.h" @@ -751,13 +751,12 @@ static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp) /* virtio-mmio device */ -static Property virtio_mmio_properties[] = { +static const Property virtio_mmio_properties[] = { DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, format_transport_address, true), DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true), DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags, VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_mmio_realizefn(DeviceState *d, Error **errp) @@ -785,12 +784,12 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) sysbus_init_mmio(sbd, &proxy->iomem); } -static void virtio_mmio_class_init(ObjectClass *klass, void *data) +static void virtio_mmio_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = virtio_mmio_realizefn; - dc->reset = virtio_mmio_reset; + device_class_set_legacy_reset(dc, virtio_mmio_reset); set_bit(DEVICE_CATEGORY_MISC, dc->categories); device_class_set_props(dc, virtio_mmio_properties); } @@ -856,7 +855,7 @@ static void virtio_mmio_vmstate_change(DeviceState *d, bool running) } } -static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) +static void virtio_mmio_bus_class_init(ObjectClass *klass, const void *data) { BusClass *bus_class = BUS_CLASS(klass); VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); diff --git a/hw/virtio/virtio-net-pci.c b/hw/virtio/virtio-net-pci.c index e03543a70a7..f857a84f11c 100644 --- a/hw/virtio/virtio-net-pci.c +++ b/hw/virtio/virtio-net-pci.c @@ -38,12 +38,11 @@ struct VirtIONetPCI { VirtIONet vdev; }; -static Property virtio_net_properties[] = { +static const Property virtio_net_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -64,7 +63,7 @@ static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_net_pci_class_init(ObjectClass *klass, void *data) +static void virtio_net_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -75,6 +74,7 @@ static void virtio_net_pci_class_init(ObjectClass *klass, void *data) k->device_id = PCI_DEVICE_ID_VIRTIO_NET; k->revision = VIRTIO_PCI_ABI_VERSION; k->class_id = PCI_CLASS_NETWORK_ETHERNET; + k->sriov_vf_user_creatable = true; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); device_class_set_props(dc, virtio_net_properties); vpciklass->realize = virtio_net_pci_realize; diff --git a/hw/virtio/virtio-nsm-pci.c b/hw/virtio/virtio-nsm-pci.c new file mode 100644 index 00000000000..ec243963e1e --- /dev/null +++ b/hw/virtio/virtio-nsm-pci.c @@ -0,0 +1,73 @@ +/* + * AWS Nitro Secure Module (NSM) device + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-nsm.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIONsmPCI VirtIONsmPCI; + +#define TYPE_VIRTIO_NSM_PCI "virtio-nsm-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIONsmPCI, VIRTIO_NSM_PCI, + TYPE_VIRTIO_NSM_PCI) + +struct VirtIONsmPCI { + VirtIOPCIProxy parent_obj; + VirtIONSM vdev; +}; + +static void virtio_nsm_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIONsmPCI *vnsm = VIRTIO_NSM_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&vnsm->vdev); + + virtio_pci_force_virtio_1(vpci_dev); + + if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) { + return; + } +} + +static void virtio_nsm_pci_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + + k->realize = virtio_nsm_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static void virtio_nsm_initfn(Object *obj) +{ + VirtIONsmPCI *dev = VIRTIO_NSM_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_NSM); +} + +static const VirtioPCIDeviceTypeInfo virtio_nsm_pci_info = { + .base_name = TYPE_VIRTIO_NSM_PCI, + .generic_name = "virtio-nsm-pci", + .instance_size = sizeof(VirtIONsmPCI), + .instance_init = virtio_nsm_initfn, + .class_init = virtio_nsm_pci_class_init, +}; + +static void virtio_nsm_pci_register(void) +{ + virtio_pci_types_register(&virtio_nsm_pci_info); +} + +type_init(virtio_nsm_pci_register) diff --git a/hw/virtio/virtio-nsm.c b/hw/virtio/virtio-nsm.c new file mode 100644 index 00000000000..3bf5e7009a4 --- /dev/null +++ b/hw/virtio/virtio-nsm.c @@ -0,0 +1,1737 @@ +/* + * AWS Nitro Secure Module (NSM) device + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/guest-random.h" +#include "qapi/error.h" + +#include "crypto/hash.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-nsm.h" +#include "hw/virtio/cbor-helpers.h" +#include "standard-headers/linux/virtio_ids.h" + +#define NSM_REQUEST_MAX_SIZE 0x1000 +#define NSM_RESPONSE_BUF_SIZE 0x3000 +#define NSM_RND_BUF_SIZE 256 + +enum NSMResponseTypes { + NSM_SUCCESS = 0, + NSM_INVALID_ARGUMENT = 1, + NSM_INVALID_INDEX = 2, + NSM_READONLY_INDEX = 3, + NSM_INVALID_OPERATION = 4, + NSM_BUFFER_TOO_SMALL = 5, + NSM_INPUT_TOO_LARGE = 6, + NSM_INTERNAL_ERROR = 7, +}; + +static const char *error_string(enum NSMResponseTypes type) +{ + const char *str; + switch (type) { + case NSM_INVALID_ARGUMENT: + str = "InvalidArgument"; + break; + case NSM_INVALID_INDEX: + str = "InvalidIndex"; + break; + case NSM_READONLY_INDEX: + str = "ReadOnlyIndex"; + break; + case NSM_INVALID_OPERATION: + str = "InvalidOperation"; + break; + case NSM_BUFFER_TOO_SMALL: + str = "BufferTooSmall"; + break; + case NSM_INPUT_TOO_LARGE: + str = "InputTooLarge"; + break; + default: + str = "InternalError"; + break; + } + + return str; +} + +/* + * Error response structure: + * + * { + * Map(1) { + * key = String("Error"), + * value = String(error_name) + * } + * } + * + * where error_name can be one of the following: + * InvalidArgument + * InvalidIndex + * InvalidResponse + * ReadOnlyIndex + * InvalidOperation + * BufferTooSmall + * InputTooLarge + * InternalError + */ + +static bool error_response(struct iovec *response, enum NSMResponseTypes error, + Error **errp) +{ + cbor_item_t *root; + size_t len; + bool r = false; + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + + if (!qemu_cbor_add_string_to_map(root, "Error", error_string(error))) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + error_setg(errp, "Response buffer is small for %s response", + error_string(error)); + goto out; + } + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + return r; + + err: + error_setg(errp, "Failed to initialize %s response", error_string(error)); + goto out; +} + +/* + * GetRandom response structure: + * + * { + * Map(1) { + * key = String("GetRandom"), + * value = Map(1) { + * key = String("random"), + * value = Byte_String() + * } + * } + * } + */ +static bool handle_get_random(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root, *nested_map; + size_t len; + uint8_t rnd[NSM_RND_BUF_SIZE]; + bool r = false; + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + + if (!qemu_cbor_add_map_to_map(root, "GetRandom", 1, &nested_map)) { + goto err; + } + + qemu_guest_getrandom_nofail(rnd, NSM_RND_BUF_SIZE); + + if (!qemu_cbor_add_bytestring_to_map(nested_map, "random", rnd, + NSM_RND_BUF_SIZE)) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) { + r = true; + } + goto out; + } + + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + return r; + + err: + error_setg(errp, "Failed to initialize GetRandom response"); + goto out; +} + +/* + * DescribeNSM response structure: + * + * { + * Map(1) { + * key = String("DescribeNSM"), + * value = Map(7) { + * key = String("digest"), + * value = String("SHA384"), + * key = String("max_pcrs"), + * value = Uint8(32), + * key = String("module_id"), + * value = String("i-1234-enc5678"), + * key = String("locked_pcrs"), + * value = Array(), + * key = String("version_major"), + * value = Uint8(1), + * key = String("version_minor"), + * value = Uint8(0), + * key = String("version_patch"), + * value = Uint8(0) + * } + * } + * } + */ +static bool handle_describe_nsm(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root, *nested_map; + uint16_t locked_pcrs_cnt = 0; + uint8_t locked_pcrs_ind[NSM_MAX_PCRS]; + size_t len; + bool r = false; + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + + if (!qemu_cbor_add_map_to_map(root, "DescribeNSM", 7, &nested_map)) { + goto err; + } + + if (!qemu_cbor_add_string_to_map(nested_map, "digest", vnsm->digest)) { + goto err; + } + + if (!qemu_cbor_add_uint8_to_map(nested_map, "max_pcrs", vnsm->max_pcrs)) { + goto err; + } + + if (!qemu_cbor_add_string_to_map(nested_map, "module_id", + vnsm->module_id)) { + goto err; + } + + for (uint8_t i = 0; i < NSM_MAX_PCRS; ++i) { + if (vnsm->pcrs[i].locked) { + locked_pcrs_ind[locked_pcrs_cnt++] = i; + } + } + if (!qemu_cbor_add_uint8_array_to_map(nested_map, "locked_pcrs", + locked_pcrs_ind, locked_pcrs_cnt)) { + goto err; + } + + if (!qemu_cbor_add_uint8_to_map(nested_map, "version_major", + vnsm->version_major)) { + goto err; + } + + if (!qemu_cbor_add_uint8_to_map(nested_map, "version_minor", + vnsm->version_minor)) { + goto err; + } + + if (!qemu_cbor_add_uint8_to_map(nested_map, "version_patch", + vnsm->version_patch)) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) { + r = true; + } + goto out; + } + + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + return r; + + err: + error_setg(errp, "Failed to initialize DescribeNSM response"); + goto out; +} + +/* + * DescribePCR request structure: + * + * { + * Map(1) { + * key = String("DescribePCR"), + * value = Map(1) { + * key = String("index"), + * value = Uint8(pcr) + * } + * } + * } + */ +typedef struct NSMDescribePCRReq { + uint8_t index; +} NSMDescribePCRReq; + +static enum NSMResponseTypes get_nsm_describe_pcr_req( + uint8_t *req, size_t len, + NSMDescribePCRReq *nsm_req) +{ + size_t size; + uint8_t *str; + struct cbor_pair *pair; + cbor_item_t *item = NULL; + struct cbor_load_result result; + enum NSMResponseTypes r = NSM_INVALID_OPERATION; + + item = cbor_load(req, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + pair = cbor_map_handle(item); + if (!cbor_isa_map(pair->value)) { + goto cleanup; + } + size = cbor_map_size(pair->value); + if (size < 1) { + goto cleanup; + } + + pair = cbor_map_handle(pair->value); + for (int i = 0; i < size; ++i) { + if (!cbor_isa_string(pair[i].key)) { + continue; + } + + str = cbor_string_handle(pair[i].key); + if (str && cbor_string_length(pair[i].key) == 5 && + memcmp(str, "index", 5) == 0) { + if (!cbor_isa_uint(pair[i].value) || + cbor_int_get_width(pair[i].value) != CBOR_INT_8) { + break; + } + + nsm_req->index = cbor_get_uint8(pair[i].value); + r = NSM_SUCCESS; + break; + } + } + + cleanup: + if (item) { + cbor_decref(&item); + } + return r; +} + +/* + * DescribePCR response structure: + * + * { + * Map(1) { + * key = String("DescribePCR"), + * value = Map(2) { + * key = String("data"), + * value = Byte_String(), + * key = String("lock"), + * value = Bool() + * } + * } + * } + */ +static bool handle_describe_pcr(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root = NULL; + cbor_item_t *nested_map; + size_t len; + NSMDescribePCRReq nsm_req; + enum NSMResponseTypes type; + struct PCRInfo *pcr; + bool r = false; + + type = get_nsm_describe_pcr_req(request->iov_base, request->iov_len, + &nsm_req); + if (type != NSM_SUCCESS) { + if (error_response(response, type, errp)) { + r = true; + } + goto out; + } + if (nsm_req.index >= vnsm->max_pcrs) { + if (error_response(response, NSM_INVALID_INDEX, errp)) { + r = true; + } + goto out; + } + pcr = &(vnsm->pcrs[nsm_req.index]); + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + + if (!qemu_cbor_add_map_to_map(root, "DescribePCR", 2, &nested_map)) { + goto err; + } + + if (!qemu_cbor_add_bytestring_to_map(nested_map, "data", pcr->data, + QCRYPTO_HASH_DIGEST_LEN_SHA384)) { + goto err; + } + + if (!qemu_cbor_add_bool_to_map(nested_map, "lock", pcr->locked)) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) { + r = true; + } + goto out; + } + + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + return r; + + err: + error_setg(errp, "Failed to initialize DescribePCR response"); + goto out; +} + +/* + * ExtendPCR request structure: + * + * { + * Map(1) { + * key = String("ExtendPCR"), + * value = Map(2) { + * key = String("index"), + * value = Uint8(pcr), + * key = String("data"), + * value = Byte_String(data) || String(data), + * } + * } + * } + */ +typedef struct NSMExtendPCRReq { + uint8_t index; + uint16_t data_len; + uint8_t data[NSM_REQUEST_MAX_SIZE]; +} NSMExtendPCRReq; + +static enum NSMResponseTypes get_nsm_extend_pcr_req(uint8_t *req, size_t len, + NSMExtendPCRReq *nsm_req) +{ + cbor_item_t *item = NULL; + size_t size ; + uint8_t *str; + bool index_found = false; + bool data_found = false; + struct cbor_pair *pair; + struct cbor_load_result result; + enum NSMResponseTypes r = NSM_INVALID_OPERATION; + + item = cbor_load(req, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + pair = cbor_map_handle(item); + if (!cbor_isa_map(pair->value)) { + goto cleanup; + } + size = cbor_map_size(pair->value); + if (size < 2) { + goto cleanup; + } + + pair = cbor_map_handle(pair->value); + for (int i = 0; i < size; ++i) { + if (!cbor_isa_string(pair[i].key)) { + continue; + } + str = cbor_string_handle(pair[i].key); + if (!str) { + continue; + } + + if (cbor_string_length(pair[i].key) == 5 && + memcmp(str, "index", 5) == 0) { + if (!cbor_isa_uint(pair[i].value) || + cbor_int_get_width(pair[i].value) != CBOR_INT_8) { + goto cleanup; + } + nsm_req->index = cbor_get_uint8(pair[i].value); + index_found = true; + continue; + } + + if (cbor_string_length(pair[i].key) == 4 && + memcmp(str, "data", 4) == 0) { + if (cbor_isa_bytestring(pair[i].value)) { + str = cbor_bytestring_handle(pair[i].value); + if (!str) { + goto cleanup; + } + nsm_req->data_len = cbor_bytestring_length(pair[i].value); + } else if (cbor_isa_string(pair[i].value)) { + str = cbor_string_handle(pair[i].value); + if (!str) { + goto cleanup; + } + nsm_req->data_len = cbor_string_length(pair[i].value); + } else { + goto cleanup; + } + /* + * nsm_req->data_len will be smaller than NSM_REQUEST_MAX_SIZE as + * we already check for the max request size before processing + * any request. So it's safe to copy. + */ + memcpy(nsm_req->data, str, nsm_req->data_len); + data_found = true; + continue; + } + } + + if (index_found && data_found) { + r = NSM_SUCCESS; + } + + cleanup: + if (item) { + cbor_decref(&item); + } + return r; +} + +/* + * ExtendPCR response structure: + * + * { + * Map(1) { + * key = String("ExtendPCR"), + * value = Map(1) { + * key = String("data"), + * value = Byte_String() + * } + * } + * } + */ +static bool handle_extend_pcr(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root = NULL; + cbor_item_t *nested_map; + size_t len; + struct PCRInfo *pcr; + enum NSMResponseTypes type; + bool r = false; + g_autofree NSMExtendPCRReq *nsm_req = g_malloc(sizeof(NSMExtendPCRReq)); + + type = get_nsm_extend_pcr_req(request->iov_base, request->iov_len, + nsm_req); + if (type != NSM_SUCCESS) { + if (error_response(response, type, errp)) { + r = true; + } + goto out; + } + if (nsm_req->index >= vnsm->max_pcrs) { + if (error_response(response, NSM_INVALID_INDEX, errp)) { + r = true; + } + goto out; + } + + pcr = &(vnsm->pcrs[nsm_req->index]); + + if (pcr->locked) { + if (error_response(response, NSM_READONLY_INDEX, errp)) { + r = true; + } + goto out; + } + + if (!vnsm->extend_pcr(vnsm, nsm_req->index, nsm_req->data, + nsm_req->data_len)) { + if (error_response(response, NSM_INTERNAL_ERROR, errp)) { + r = true; + } + goto out; + } + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + + if (!qemu_cbor_add_map_to_map(root, "ExtendPCR", 1, &nested_map)) { + goto err; + } + + if (!qemu_cbor_add_bytestring_to_map(nested_map, "data", pcr->data, + QCRYPTO_HASH_DIGEST_LEN_SHA384)) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) { + r = true; + } + goto out; + } + + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + return r; + + err: + error_setg(errp, "Failed to initialize DescribePCR response"); + goto out; +} + +/* + * LockPCR request structure: + * + * { + * Map(1) { + * key = String("LockPCR"), + * value = Map(1) { + * key = String("index"), + * value = Uint8(pcr) + * } + * } + * } + */ +typedef struct NSMLockPCRReq { + uint8_t index; +} NSMLockPCRReq; + +static enum NSMResponseTypes get_nsm_lock_pcr_req(uint8_t *req, size_t len, + NSMLockPCRReq *nsm_req) +{ + cbor_item_t *item = NULL; + size_t size; + uint8_t *str; + struct cbor_pair *pair; + struct cbor_load_result result; + enum NSMResponseTypes r = NSM_INVALID_OPERATION; + + item = cbor_load(req, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + pair = cbor_map_handle(item); + if (!cbor_isa_map(pair->value)) { + goto cleanup; + } + size = cbor_map_size(pair->value); + if (size < 1) { + goto cleanup; + } + + pair = cbor_map_handle(pair->value); + for (int i = 0; i < size; ++i) { + if (!cbor_isa_string(pair[i].key)) { + continue; + } + str = cbor_string_handle(pair[i].key); + if (str && cbor_string_length(pair[i].key) == 5 && + memcmp(str, "index", 5) == 0) { + if (!cbor_isa_uint(pair[i].value) || + cbor_int_get_width(pair[i].value) != CBOR_INT_8) { + break; + } + + nsm_req->index = cbor_get_uint8(pair[i].value); + r = NSM_SUCCESS; + break; + } + } + + cleanup: + if (item) { + cbor_decref(&item); + } + return r; +} + +/* + * LockPCR success response structure: + * { + * String("LockPCR") + * } + */ +static bool handle_lock_pcr(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root = NULL; + size_t len; + NSMLockPCRReq nsm_req; + enum NSMResponseTypes type; + struct PCRInfo *pcr; + bool r = false; + + type = get_nsm_lock_pcr_req(request->iov_base, request->iov_len, &nsm_req); + if (type != NSM_SUCCESS) { + if (error_response(response, type, errp)) { + r = true; + } + goto cleanup; + } + if (nsm_req.index >= vnsm->max_pcrs) { + if (error_response(response, NSM_INVALID_INDEX, errp)) { + r = true; + } + goto cleanup; + } + + pcr = &(vnsm->pcrs[nsm_req.index]); + + if (pcr->locked) { + if (error_response(response, NSM_READONLY_INDEX, errp)) { + r = true; + } + goto cleanup; + } + + pcr->locked = true; + + root = cbor_build_string("LockPCR"); + if (!root) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) { + r = true; + } + goto cleanup; + } + + response->iov_len = len; + r = true; + goto cleanup; + + err: + error_setg(errp, "Failed to initialize LockPCR response"); + + cleanup: + if (root) { + cbor_decref(&root); + } + return r; +} + +/* + * LockPCRs request structure: + * + * { + * Map(1) { + * key = String("LockPCRs"), + * value = Map(1) { + * key = String("range"), + * value = Uint8(pcr) + * } + * } + * } + */ +typedef struct NSMLockPCRsReq { + uint16_t range; +} NSMLockPCRsReq; + +static enum NSMResponseTypes get_nsm_lock_pcrs_req(uint8_t *req, size_t len, + NSMLockPCRsReq *nsm_req) +{ + cbor_item_t *item = NULL; + size_t size; + uint8_t *str; + struct cbor_pair *pair; + struct cbor_load_result result; + enum NSMResponseTypes r = NSM_INVALID_OPERATION; + + item = cbor_load(req, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + pair = cbor_map_handle(item); + if (!cbor_isa_map(pair->value)) { + goto cleanup; + } + size = cbor_map_size(pair->value); + if (size < 1) { + goto cleanup; + } + + pair = cbor_map_handle(pair->value); + for (int i = 0; i < size; ++i) { + if (!cbor_isa_string(pair[i].key)) { + continue; + } + str = cbor_string_handle(pair[i].key); + if (str && cbor_string_length(pair[i].key) == 5 && + memcmp(str, "range", 5) == 0) { + if (!cbor_isa_uint(pair[i].value) || + cbor_int_get_width(pair[i].value) != CBOR_INT_8) { + break; + } + + nsm_req->range = cbor_get_uint8(pair[i].value); + r = NSM_SUCCESS; + break; + } + } + + cleanup: + if (item) { + cbor_decref(&item); + } + return r; +} + +/* + * LockPCRs success response structure: + * { + * String("LockPCRs") + * } + */ +static bool handle_lock_pcrs(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root = NULL; + size_t len; + NSMLockPCRsReq nsm_req; + enum NSMResponseTypes type; + bool r = false; + + type = get_nsm_lock_pcrs_req(request->iov_base, request->iov_len, &nsm_req); + if (type != NSM_SUCCESS) { + if (error_response(response, type, errp)) { + r = true; + } + goto cleanup; + } + if (nsm_req.range > vnsm->max_pcrs) { + if (error_response(response, NSM_INVALID_INDEX, errp)) { + r = true; + } + goto cleanup; + } + + for (int i = 0; i < nsm_req.range; ++i) { + vnsm->pcrs[i].locked = true; + } + + root = cbor_build_string("LockPCRs"); + if (!root) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_BUFFER_TOO_SMALL, errp)) { + r = true; + } + goto cleanup; + } + + response->iov_len = len; + r = true; + goto cleanup; + + err: + error_setg(errp, "Failed to initialize response"); + + cleanup: + if (root) { + cbor_decref(&root); + } + return r; +} + +/* + * Attestation request structure: + * + * Map(1) { + * key = String("Attestation"), + * value = Map(3) { + * key = String("user_data"), + * value = Byte_String() || null, // Optional + * key = String("nonce"), + * value = Byte_String() || null, // Optional + * key = String("public_key"), + * value = Byte_String() || null, // Optional + * } + * } + * } + */ + +struct AttestationProperty { + bool is_null; /* True if property is not present in map or is null */ + uint16_t len; + uint8_t buf[NSM_REQUEST_MAX_SIZE]; +}; + +typedef struct NSMAttestationReq { + struct AttestationProperty public_key; + struct AttestationProperty user_data; + struct AttestationProperty nonce; +} NSMAttestationReq; + +static bool fill_attestation_property(struct AttestationProperty *prop, + cbor_item_t *value) +{ + uint8_t *str; + bool ret = false; + + if (cbor_is_null(value)) { + prop->is_null = true; + ret = true; + goto out; + } else if (cbor_isa_bytestring(value)) { + str = cbor_bytestring_handle(value); + if (!str) { + goto out; + } + prop->len = cbor_bytestring_length(value); + } else if (cbor_isa_string(value)) { + str = cbor_string_handle(value); + if (!str) { + goto out; + } + prop->len = cbor_string_length(value); + } else { + goto out; + } + + /* + * prop->len will be smaller than NSM_REQUEST_MAX_SIZE as we + * already check for the max request size before processing + * any request. So it's safe to copy. + */ + memcpy(prop->buf, str, prop->len); + prop->is_null = false; + ret = true; + + out: + return ret; +} + +static enum NSMResponseTypes get_nsm_attestation_req(uint8_t *req, size_t len, + NSMAttestationReq *nsm_req) +{ + cbor_item_t *item = NULL; + size_t size; + uint8_t *str; + struct cbor_pair *pair; + struct cbor_load_result result; + enum NSMResponseTypes r = NSM_INVALID_OPERATION; + + nsm_req->public_key.is_null = true; + nsm_req->user_data.is_null = true; + nsm_req->nonce.is_null = true; + + item = cbor_load(req, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + pair = cbor_map_handle(item); + if (!cbor_isa_map(pair->value)) { + goto cleanup; + } + size = cbor_map_size(pair->value); + if (size == 0) { + r = NSM_SUCCESS; + goto cleanup; + } + + pair = cbor_map_handle(pair->value); + for (int i = 0; i < size; ++i) { + if (!cbor_isa_string(pair[i].key)) { + continue; + } + + str = cbor_string_handle(pair[i].key); + if (!str) { + continue; + } + + if (cbor_string_length(pair[i].key) == 10 && + memcmp(str, "public_key", 10) == 0) { + if (!fill_attestation_property(&(nsm_req->public_key), + pair[i].value)) { + goto cleanup; + } + continue; + } + + if (cbor_string_length(pair[i].key) == 9 && + memcmp(str, "user_data", 9) == 0) { + if (!fill_attestation_property(&(nsm_req->user_data), + pair[i].value)) { + goto cleanup; + } + continue; + } + + if (cbor_string_length(pair[i].key) == 5 && + memcmp(str, "nonce", 5) == 0) { + if (!fill_attestation_property(&(nsm_req->nonce), pair[i].value)) { + goto cleanup; + } + continue; + } + } + + r = NSM_SUCCESS; + + cleanup: + if (item) { + cbor_decref(&item); + } + return r; +} + +static bool add_protected_header_to_cose(cbor_item_t *cose) +{ + cbor_item_t *map = NULL; + cbor_item_t *key = NULL; + cbor_item_t *value = NULL; + cbor_item_t *bs = NULL; + size_t len; + bool r = false; + size_t buf_len = 4096; + g_autofree uint8_t *buf = g_malloc(buf_len); + + map = cbor_new_definite_map(1); + if (!map) { + goto cleanup; + } + key = cbor_build_uint8(1); + if (!key) { + goto cleanup; + } + value = cbor_new_int8(); + if (!value) { + goto cleanup; + } + cbor_mark_negint(value); + /* we don't actually sign the data, so we use -1 as the 'alg' value */ + cbor_set_uint8(value, 0); + + if (!qemu_cbor_map_add(map, key, value)) { + goto cleanup; + } + + len = cbor_serialize(map, buf, buf_len); + if (len == 0) { + goto cleanup_map; + } + + bs = cbor_build_bytestring(buf, len); + if (!bs) { + goto cleanup_map; + } + if (!qemu_cbor_array_push(cose, bs)) { + cbor_decref(&bs); + goto cleanup_map; + } + r = true; + goto cleanup_map; + + cleanup: + if (key) { + cbor_decref(&key); + } + if (value) { + cbor_decref(&value); + } + + cleanup_map: + if (map) { + cbor_decref(&map); + } + return r; +} + +static bool add_unprotected_header_to_cose(cbor_item_t *cose) +{ + cbor_item_t *map = cbor_new_definite_map(0); + if (!map) { + goto cleanup; + } + if (!qemu_cbor_array_push(cose, map)) { + goto cleanup; + } + + return true; + + cleanup: + if (map) { + cbor_decref(&map); + } + return false; +} + +static bool add_ca_bundle_to_payload(cbor_item_t *map) +{ + cbor_item_t *key_cbor = NULL; + cbor_item_t *value_cbor = NULL; + cbor_item_t *bs = NULL; + uint8_t zero[64] = {0}; + + key_cbor = cbor_build_string("cabundle"); + if (!key_cbor) { + goto cleanup; + } + value_cbor = cbor_new_definite_array(1); + if (!value_cbor) { + goto cleanup; + } + bs = cbor_build_bytestring(zero, 64); + if (!bs) { + goto cleanup; + } + if (!qemu_cbor_array_push(value_cbor, bs)) { + cbor_decref(&bs); + goto cleanup; + } + if (!qemu_cbor_map_add(map, key_cbor, value_cbor)) { + goto cleanup; + } + + return true; + + cleanup: + if (key_cbor) { + cbor_decref(&key_cbor); + } + if (value_cbor) { + cbor_decref(&value_cbor); + } + return false; +} + +static bool add_payload_to_cose(cbor_item_t *cose, VirtIONSM *vnsm, + NSMAttestationReq *req) +{ + cbor_item_t *root = NULL; + cbor_item_t *nested_map; + cbor_item_t *bs = NULL; + size_t locked_cnt; + uint8_t ind[NSM_MAX_PCRS]; + size_t payload_map_size = 9; + size_t len; + struct PCRInfo *pcr; + uint8_t zero[64] = {0}; + bool r = false; + size_t buf_len = 16384; + g_autofree uint8_t *buf = g_malloc(buf_len); + + root = cbor_new_definite_map(payload_map_size); + if (!root) { + goto cleanup; + } + if (!qemu_cbor_add_string_to_map(root, "module_id", vnsm->module_id)) { + goto cleanup; + } + if (!qemu_cbor_add_string_to_map(root, "digest", vnsm->digest)) { + goto cleanup; + } + if (!qemu_cbor_add_uint64_to_map(root, "timestamp", + (uint64_t) time(NULL) * 1000)) { + goto cleanup; + } + + locked_cnt = 0; + for (uint8_t i = 0; i < NSM_MAX_PCRS; ++i) { + if (vnsm->pcrs[i].locked) { + ind[locked_cnt++] = i; + } + } + if (!qemu_cbor_add_map_to_map(root, "pcrs", locked_cnt, &nested_map)) { + goto cleanup; + } + for (uint8_t i = 0; i < locked_cnt; ++i) { + pcr = &(vnsm->pcrs[ind[i]]); + if (!qemu_cbor_add_uint8_key_bytestring_to_map( + nested_map, ind[i], + pcr->data, + QCRYPTO_HASH_DIGEST_LEN_SHA384)) { + goto cleanup; + } + } + if (!qemu_cbor_add_bytestring_to_map(root, "certificate", zero, 64)) { + goto cleanup; + } + if (!add_ca_bundle_to_payload(root)) { + goto cleanup; + } + + if (req->public_key.is_null) { + if (!qemu_cbor_add_null_to_map(root, "public_key")) { + goto cleanup; + } + } else if (!qemu_cbor_add_bytestring_to_map(root, "public_key", + req->public_key.buf, + req->public_key.len)) { + goto cleanup; + } + + if (req->user_data.is_null) { + if (!qemu_cbor_add_null_to_map(root, "user_data")) { + goto cleanup; + } + } else if (!qemu_cbor_add_bytestring_to_map(root, "user_data", + req->user_data.buf, + req->user_data.len)) { + goto cleanup; + } + + if (req->nonce.is_null) { + if (!qemu_cbor_add_null_to_map(root, "nonce")) { + goto cleanup; + } + } else if (!qemu_cbor_add_bytestring_to_map(root, "nonce", + req->nonce.buf, + req->nonce.len)) { + goto cleanup; + } + + len = cbor_serialize(root, buf, buf_len); + if (len == 0) { + goto cleanup; + } + + bs = cbor_build_bytestring(buf, len); + if (!bs) { + goto cleanup; + } + if (!qemu_cbor_array_push(cose, bs)) { + cbor_decref(&bs); + goto cleanup; + } + + r = true; + + cleanup: + if (root) { + cbor_decref(&root); + } + return r; +} + +static bool add_signature_to_cose(cbor_item_t *cose) +{ + cbor_item_t *bs = NULL; + uint8_t zero[64] = {0}; + + /* we don't actually sign the data, so we just put 64 zero bytes */ + bs = cbor_build_bytestring(zero, 64); + if (!bs) { + goto cleanup; + } + + if (!qemu_cbor_array_push(cose, bs)) { + goto cleanup; + } + + return true; + + cleanup: + if (bs) { + cbor_decref(&bs); + } + return false; +} + +/* + * Attestation response structure: + * + * { + * Map(1) { + * key = String("Attestation"), + * value = Map(1) { + * key = String("document"), + * value = Byte_String() + * } + * } + * } + * + * The document is a serialized COSE sign1 blob of the structure: + * { + * Array(4) { + * [0] { ByteString() }, // serialized protected header + * [1] { Map(0) }, // 0 length map + * [2] { ByteString() }, // serialized payload + * [3] { ByteString() }, // signature + * } + * } + * + * where [0] protected header is a serialized CBOR blob of the structure: + * { + * Map(1) { + * key = Uint8(1) // alg + * value = NegativeInt8() // Signing algorithm + * } + * } + * + * [2] payload is serialized CBOR blob of the structure: + * { + * Map(9) { + * [0] { key = String("module_id"), value = String(module_id) }, + * [1] { key = String("digest"), value = String("SHA384") }, + * [2] { + * key = String("timestamp"), + * value = Uint64(unix epoch of when document was created) + * }, + * [3] { + * key = String("pcrs"), + * value = Map(locked_pcr_cnt) { + * key = Uint8(pcr_index), + * value = ByteString(pcr_data) + * }, + * }, + * [4] { + * key = String("certificate"), + * value = ByteString(Signing certificate) + * }, + * [5] { key = String("cabundle"), value = Array(N) { ByteString()... } }, + * [6] { key = String("public_key"), value = ByteString() || null }, + * [7] { key = String("user_data"), value = ByteString() || null}, + * [8] { key = String("nonce"), value = ByteString() || null}, + * } + * } + */ +static bool handle_attestation(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp) +{ + cbor_item_t *root = NULL; + cbor_item_t *cose = NULL; + cbor_item_t *nested_map; + size_t len; + enum NSMResponseTypes type; + bool r = false; + size_t buf_len = 16384; + g_autofree uint8_t *buf = g_malloc(buf_len); + g_autofree NSMAttestationReq *nsm_req = g_malloc(sizeof(NSMAttestationReq)); + + nsm_req->public_key.is_null = true; + nsm_req->user_data.is_null = true; + nsm_req->nonce.is_null = true; + + type = get_nsm_attestation_req(request->iov_base, request->iov_len, + nsm_req); + if (type != NSM_SUCCESS) { + if (error_response(response, type, errp)) { + r = true; + } + goto out; + } + + cose = cbor_new_definite_array(4); + if (!cose) { + goto err; + } + if (!add_protected_header_to_cose(cose)) { + goto err; + } + if (!add_unprotected_header_to_cose(cose)) { + goto err; + } + if (!add_payload_to_cose(cose, vnsm, nsm_req)) { + goto err; + } + if (!add_signature_to_cose(cose)) { + goto err; + } + + len = cbor_serialize(cose, buf, buf_len); + if (len == 0) { + goto err; + } + + root = cbor_new_definite_map(1); + if (!root) { + goto err; + } + if (!qemu_cbor_add_map_to_map(root, "Attestation", 1, &nested_map)) { + goto err; + } + if (!qemu_cbor_add_bytestring_to_map(nested_map, "document", buf, len)) { + goto err; + } + + len = cbor_serialize(root, response->iov_base, response->iov_len); + if (len == 0) { + if (error_response(response, NSM_INPUT_TOO_LARGE, errp)) { + r = true; + } + goto out; + } + + response->iov_len = len; + r = true; + + out: + if (root) { + cbor_decref(&root); + } + if (cose) { + cbor_decref(&cose); + } + return r; + + err: + error_setg(errp, "Failed to initialize Attestation response"); + goto out; +} + +enum CBOR_ROOT_TYPE { + CBOR_ROOT_TYPE_STRING = 0, + CBOR_ROOT_TYPE_MAP = 1, +}; + +struct nsm_cmd { + char name[16]; + /* + * There are 2 types of request + * 1) String(); "GetRandom", "DescribeNSM" + * 2) Map(1) { key: String(), value: ... } + */ + enum CBOR_ROOT_TYPE root_type; + bool (*response_fn)(VirtIONSM *vnsm, struct iovec *request, + struct iovec *response, Error **errp); +}; + +const struct nsm_cmd nsm_cmds[] = { + { "GetRandom", CBOR_ROOT_TYPE_STRING, handle_get_random }, + { "DescribeNSM", CBOR_ROOT_TYPE_STRING, handle_describe_nsm }, + { "DescribePCR", CBOR_ROOT_TYPE_MAP, handle_describe_pcr }, + { "ExtendPCR", CBOR_ROOT_TYPE_MAP, handle_extend_pcr }, + { "LockPCR", CBOR_ROOT_TYPE_MAP, handle_lock_pcr }, + { "LockPCRs", CBOR_ROOT_TYPE_MAP, handle_lock_pcrs }, + { "Attestation", CBOR_ROOT_TYPE_MAP, handle_attestation }, +}; + +static const struct nsm_cmd *get_nsm_request_cmd(uint8_t *buf, size_t len) +{ + size_t size; + uint8_t *req; + enum CBOR_ROOT_TYPE root_type; + struct cbor_load_result result; + cbor_item_t *item = cbor_load(buf, len, &result); + if (!item || result.error.code != CBOR_ERR_NONE) { + goto cleanup; + } + + if (cbor_isa_string(item)) { + size = cbor_string_length(item); + req = cbor_string_handle(item); + root_type = CBOR_ROOT_TYPE_STRING; + } else if (cbor_isa_map(item) && cbor_map_size(item) == 1) { + struct cbor_pair *handle = cbor_map_handle(item); + if (cbor_isa_string(handle->key)) { + size = cbor_string_length(handle->key); + req = cbor_string_handle(handle->key); + root_type = CBOR_ROOT_TYPE_MAP; + } else { + goto cleanup; + } + } else { + goto cleanup; + } + + if (size == 0 || req == NULL) { + goto cleanup; + } + + for (int i = 0; i < ARRAY_SIZE(nsm_cmds); ++i) { + if (nsm_cmds[i].root_type == root_type && + strlen(nsm_cmds[i].name) == size && + memcmp(nsm_cmds[i].name, req, size) == 0) { + cbor_decref(&item); + return &nsm_cmds[i]; + } + } + + cleanup: + if (item) { + cbor_decref(&item); + } + return NULL; +} + +static bool get_nsm_request_response(VirtIONSM *vnsm, struct iovec *req, + struct iovec *resp, Error **errp) +{ + const struct nsm_cmd *cmd; + + if (req->iov_len > NSM_REQUEST_MAX_SIZE) { + if (error_response(resp, NSM_INPUT_TOO_LARGE, errp)) { + return true; + } + error_setg(errp, "Failed to initialize InputTooLarge response"); + return false; + } + + cmd = get_nsm_request_cmd(req->iov_base, req->iov_len); + + if (cmd == NULL) { + if (error_response(resp, NSM_INVALID_OPERATION, errp)) { + return true; + } + error_setg(errp, "Failed to initialize InvalidOperation response"); + return false; + } + + return cmd->response_fn(vnsm, req, resp, errp); +} + +static void handle_input(VirtIODevice *vdev, VirtQueue *vq) +{ + g_autofree VirtQueueElement *out_elem = NULL; + g_autofree VirtQueueElement *in_elem = NULL; + VirtIONSM *vnsm = VIRTIO_NSM(vdev); + Error *err = NULL; + size_t sz; + struct iovec req = {.iov_base = NULL, .iov_len = 0}; + struct iovec res = {.iov_base = NULL, .iov_len = 0}; + + out_elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!out_elem) { + /* nothing in virtqueue */ + return; + } + + sz = iov_size(out_elem->out_sg, out_elem->out_num); + if (sz == 0) { + virtio_error(vdev, "Expected non-zero sized request buffer in " + "virtqueue"); + goto cleanup; + } + + in_elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!in_elem) { + virtio_error(vdev, "Expected response buffer after request buffer " + "in virtqueue"); + goto cleanup; + } + if (iov_size(in_elem->in_sg, in_elem->in_num) != NSM_RESPONSE_BUF_SIZE) { + virtio_error(vdev, "Expected response buffer of length 0x3000"); + goto cleanup; + } + + req.iov_base = g_malloc(sz); + req.iov_len = iov_to_buf(out_elem->out_sg, out_elem->out_num, 0, + req.iov_base, sz); + if (req.iov_len != sz) { + virtio_error(vdev, "Failed to copy request buffer"); + goto cleanup; + } + + res.iov_base = g_malloc(NSM_RESPONSE_BUF_SIZE); + res.iov_len = NSM_RESPONSE_BUF_SIZE; + + if (!get_nsm_request_response(vnsm, &req, &res, &err)) { + error_report_err(err); + virtio_error(vdev, "Failed to get NSM request response"); + goto cleanup; + } + + sz = iov_from_buf(in_elem->in_sg, in_elem->in_num, 0, res.iov_base, + res.iov_len); + if (sz != res.iov_len) { + virtio_error(vdev, "Failed to copy response buffer"); + goto cleanup; + } + + g_free(req.iov_base); + g_free(res.iov_base); + virtqueue_push(vq, out_elem, 0); + virtqueue_push(vq, in_elem, sz); + virtio_notify(vdev, vq); + return; + + cleanup: + g_free(req.iov_base); + g_free(res.iov_base); + if (out_elem) { + virtqueue_detach_element(vq, out_elem, 0); + } + if (in_elem) { + virtqueue_detach_element(vq, in_elem, 0); + } +} + +static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp) +{ + return f; +} + +static bool extend_pcr(VirtIONSM *vnsm, int ind, uint8_t *data, uint16_t len) +{ + Error *err = NULL; + struct PCRInfo *pcr = &(vnsm->pcrs[ind]); + size_t digest_len = QCRYPTO_HASH_DIGEST_LEN_SHA384; + uint8_t result[QCRYPTO_HASH_DIGEST_LEN_SHA384]; + uint8_t *ptr = result; + struct iovec iov[2] = { + { .iov_base = pcr->data, .iov_len = QCRYPTO_HASH_DIGEST_LEN_SHA384 }, + { .iov_base = data, .iov_len = len }, + }; + + if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA384, iov, 2, &ptr, &digest_len, + &err) < 0) { + return false; + } + + memcpy(pcr->data, result, QCRYPTO_HASH_DIGEST_LEN_SHA384); + return true; +} + +static void lock_pcr(VirtIONSM *vnsm, int ind) +{ + vnsm->pcrs[ind].locked = true; +} + +static void virtio_nsm_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIONSM *vnsm = VIRTIO_NSM(dev); + + vnsm->max_pcrs = NSM_MAX_PCRS; + vnsm->digest = (char *) "SHA384"; + if (vnsm->module_id == NULL) { + vnsm->module_id = (char *) "i-234-enc5678"; + } + vnsm->version_major = 1; + vnsm->version_minor = 0; + vnsm->version_patch = 0; + vnsm->extend_pcr = extend_pcr; + vnsm->lock_pcr = lock_pcr; + + virtio_init(vdev, VIRTIO_ID_NITRO_SEC_MOD, 0); + + vnsm->vq = virtio_add_queue(vdev, 2, handle_input); +} + +static void virtio_nsm_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + + virtio_del_queue(vdev, 0); + virtio_cleanup(vdev); +} + +static const VMStateDescription vmstate_pcr_info_entry = { + .name = "pcr_info_entry", + .minimum_version_id = 1, + .version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_BOOL(locked, struct PCRInfo), + VMSTATE_UINT8_ARRAY(data, struct PCRInfo, + QCRYPTO_HASH_DIGEST_LEN_SHA384), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_nsm_device = { + .name = "virtio-nsm-device", + .minimum_version_id = 1, + .version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_ARRAY(pcrs, VirtIONSM, NSM_MAX_PCRS, 1, + vmstate_pcr_info_entry, struct PCRInfo), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_nsm = { + .name = "virtio-nsm", + .minimum_version_id = 1, + .version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static const Property virtio_nsm_properties[] = { + DEFINE_PROP_STRING("module-id", VirtIONSM, module_id), +}; + +static void virtio_nsm_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_nsm_properties); + dc->vmsd = &vmstate_virtio_nsm; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_nsm_device_realize; + vdc->unrealize = virtio_nsm_device_unrealize; + vdc->get_features = get_features; + vdc->vmsd = &vmstate_virtio_nsm_device; +} + +static const TypeInfo virtio_nsm_info = { + .name = TYPE_VIRTIO_NSM, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIONSM), + .class_init = virtio_nsm_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_nsm_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 9534730bba1..767216d7959 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -30,15 +30,16 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" +#include "qemu/bswap.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/loader.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/virtio/virtio-pci.h" #include "qemu/range.h" #include "hw/virtio/virtio-bus.h" #include "qapi/visitor.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "trace.h" #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) @@ -146,9 +147,7 @@ static const VMStateDescription vmstate_virtio_pci = { static bool virtio_pci_has_extra_state(DeviceState *d) { - VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - - return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; + return true; } static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) @@ -615,8 +614,12 @@ static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, reg = &proxy->regs[i]; if (*off >= reg->offset && *off + len <= reg->offset + reg->size) { - *off -= reg->offset; - return ®->mr; + MemoryRegionSection mrs = memory_region_find(®->mr, + *off - reg->offset, len); + assert(mrs.mr); + *off = mrs.offset_within_region; + memory_region_unref(mrs.mr); + return mrs.mr; } } @@ -866,6 +869,9 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtQueue *vq; + if (!proxy->vector_irqfd && vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) + return -1; + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { *n = virtio_config_get_guest_notifier(vdev); *vector = vdev->config_vector; @@ -1208,7 +1214,12 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, static bool virtio_pci_query_guest_notifiers(DeviceState *d) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - return msix_enabled(&proxy->pci_dev); + + if (msix_enabled(&proxy->pci_dev)) { + return true; + } else { + return pci_irq_disabled(&proxy->pci_dev); + } } static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) @@ -1955,6 +1966,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) uint8_t *config; uint32_t size; VirtIODevice *vdev = virtio_bus_get_device(bus); + int16_t res; /* * Virtio capabilities present without @@ -2050,6 +2062,8 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) if (modern_pio) { memory_region_init(&proxy->io_bar, OBJECT(proxy), "virtio-pci-io", 0x4); + address_space_init(&proxy->modern_cfg_io_as, &proxy->io_bar, + "virtio-pci-cfg-io-as"); pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); @@ -2100,6 +2114,18 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); } + + if (pci_is_vf(&proxy->pci_dev)) { + pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset); + proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF; + } else { + res = pcie_sriov_pf_init_from_user_created_vfs( + &proxy->pci_dev, proxy->last_pcie_cap_offset, errp); + if (res > 0) { + proxy->last_pcie_cap_offset += res; + virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV); + } + } } static void virtio_pci_device_unplugged(DeviceState *d) @@ -2173,6 +2199,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) /* PCI BAR regions must be powers of 2 */ pow2ceil(proxy->notify.offset + proxy->notify.size)); + address_space_init(&proxy->modern_cfg_mem_as, &proxy->modern_bar, + "virtio-pci-cfg-mem-as"); + if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } @@ -2187,19 +2216,16 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) if (pcie_port && pci_is_express(pci_dev)) { int pos; - uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; + proxy->last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; pos = pcie_endpoint_cap_init(pci_dev, 0); assert(pos > 0); - pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, - PCI_PM_SIZEOF, errp); + pos = pci_pm_init(pci_dev, 0, errp); if (pos < 0) { return; } - pci_dev->exp.pm_cap = pos; - /* * Indicates that this function complies with revision 1.2 of the * PCI Power Management Interface Specification. @@ -2207,9 +2233,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); if (proxy->flags & VIRTIO_PCI_FLAG_AER) { - pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, + pcie_aer_init(pci_dev, PCI_ERR_VER, proxy->last_pcie_cap_offset, PCI_ERR_SIZEOF, NULL); - last_pcie_cap_offset += PCI_ERR_SIZEOF; + proxy->last_pcie_cap_offset += PCI_ERR_SIZEOF; } if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { @@ -2234,9 +2260,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) } if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { - pcie_ats_init(pci_dev, last_pcie_cap_offset, + pcie_ats_init(pci_dev, proxy->last_pcie_cap_offset, proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); - last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; + proxy->last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; } if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { @@ -2262,12 +2288,18 @@ static void virtio_pci_exit(PCIDevice *pci_dev) VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && !pci_bus_is_root(pci_get_bus(pci_dev)); + bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; + pcie_sriov_pf_exit(&proxy->pci_dev); msix_uninit_exclusive_bar(pci_dev); if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && pci_is_express(pci_dev)) { pcie_aer_exit(pci_dev); } + address_space_destroy(&proxy->modern_cfg_mem_as); + if (modern_pio) { + address_space_destroy(&proxy->modern_cfg_io_as); + } } static void virtio_pci_reset(DeviceState *qdev) @@ -2293,11 +2325,11 @@ static bool virtio_pci_no_soft_reset(PCIDevice *dev) { uint16_t pmcsr; - if (!pci_is_express(dev) || !dev->exp.pm_cap) { + if (!pci_is_express(dev) || !(dev->cap_present & QEMU_PCI_CAP_PM)) { return false; } - pmcsr = pci_get_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL); + pmcsr = pci_get_word(dev->config + dev->pm_cap + PCI_PM_CTRL); /* * When No_Soft_Reset bit is set and the device @@ -2326,21 +2358,17 @@ static void virtio_pci_bus_reset_hold(Object *obj, ResetType type) if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { pci_word_test_and_clear_mask( - dev->config + dev->exp.pm_cap + PCI_PM_CTRL, + dev->config + dev->pm_cap + PCI_PM_CTRL, PCI_PM_CTRL_STATE_MASK); } } } -static Property virtio_pci_properties[] = { +static const Property virtio_pci_properties[] = { DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), - DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, - VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), - DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, - VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, @@ -2361,7 +2389,6 @@ static Property virtio_pci_properties[] = { VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_AER_BIT, false), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) @@ -2370,15 +2397,22 @@ static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); PCIDevice *pci_dev = &proxy->pci_dev; - if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && - virtio_pci_modern(proxy)) { + if (virtio_pci_modern(proxy)) { pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; } vpciklass->parent_dc_realize(qdev, errp); } -static void virtio_pci_class_init(ObjectClass *klass, void *data) +static int virtio_pci_sync_config(DeviceState *dev, Error **errp) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + return qdev_sync_config(DEVICE(vdev), errp); +} + +static void virtio_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -2394,6 +2428,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data) device_class_set_parent_realize(dc, virtio_pci_dc_realize, &vpciklass->parent_dc_realize); rc->phases.hold = virtio_pci_bus_reset_hold; + dc->sync_config = virtio_pci_sync_config; } static const TypeInfo virtio_pci_info = { @@ -2405,14 +2440,13 @@ static const TypeInfo virtio_pci_info = { .abstract = true, }; -static Property virtio_pci_generic_properties[] = { +static const Property virtio_pci_generic_properties[] = { DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, ON_OFF_AUTO_AUTO), DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_pci_base_class_init(ObjectClass *klass, void *data) +static void virtio_pci_base_class_init(ObjectClass *klass, const void *data) { const VirtioPCIDeviceTypeInfo *t = data; if (t->class_init) { @@ -2420,7 +2454,7 @@ static void virtio_pci_base_class_init(ObjectClass *klass, void *data) } } -static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) +static void virtio_pci_generic_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -2460,7 +2494,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) .name = t->generic_name, .parent = base_type_info.name, .class_init = virtio_pci_generic_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } @@ -2476,18 +2510,18 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) generic_type_info.parent = base_name; generic_type_info.class_init = virtio_pci_base_class_init; - generic_type_info.class_data = (void *)t; + generic_type_info.class_data = t; assert(!t->non_transitional_name); assert(!t->transitional_name); } else { base_type_info.class_init = virtio_pci_base_class_init; - base_type_info.class_data = (void *)t; + base_type_info.class_data = t; } - type_register(&base_type_info); + type_register_static(&base_type_info); if (generic_type_info.name) { - type_register(&generic_type_info); + type_register_static(&generic_type_info); } if (t->non_transitional_name) { @@ -2495,13 +2529,13 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) .name = t->non_transitional_name, .parent = base_type_info.name, .instance_init = virtio_pci_non_transitional_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { } }, }; - type_register(&non_transitional_type_info); + type_register_static(&non_transitional_type_info); } if (t->transitional_name) { @@ -2509,7 +2543,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) .name = t->transitional_name, .parent = base_type_info.name, .instance_init = virtio_pci_transitional_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { /* * Transitional virtio devices work only as Conventional PCI * devices because they require PIO ports. @@ -2518,7 +2552,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) { } }, }; - type_register(&transitional_type_info); + type_register_static(&transitional_type_info); } g_free(base_name); } @@ -2565,7 +2599,7 @@ static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); } -static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) +static void virtio_pci_bus_class_init(ObjectClass *klass, const void *data) { BusClass *bus_class = BUS_CLASS(klass); VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c index cfe7f3b67ca..babd91c21f0 100644 --- a/hw/virtio/virtio-pmem-pci.c +++ b/hw/virtio/virtio-pmem-pci.c @@ -80,7 +80,7 @@ static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md, info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM; } -static void virtio_pmem_pci_class_init(ObjectClass *klass, void *data) +static void virtio_pmem_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c index c3512c2dae3..3416ea1827c 100644 --- a/hw/virtio/virtio-pmem.c +++ b/hw/virtio/virtio-pmem.c @@ -21,7 +21,7 @@ #include "hw/virtio/virtio-access.h" #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_pmem.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "block/aio.h" #include "block/thread-pool.h" #include "trace.h" @@ -155,14 +155,13 @@ static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem, return &pmem->memdev->mr; } -static Property virtio_pmem_properties[] = { +static const Property virtio_pmem_properties[] = { DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0), DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev, TYPE_MEMORY_BACKEND, HostMemoryBackend *), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_pmem_class_init(ObjectClass *klass, void *data) +static void virtio_pmem_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index 1dd96ed20f1..3b6377cf0d2 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -15,8 +15,8 @@ #include "qapi/error.h" #include "qapi/qapi-commands-virtio.h" #include "qapi/qapi-commands-qom.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qobject.h" +#include "qobject/qjson.h" #include "hw/virtio/vhost-user.h" #include "standard-headers/linux/virtio_ids.h" @@ -121,6 +121,12 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = { FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_STATUS, \ "VHOST_USER_PROTOCOL_F_STATUS: Querying and notifying back-end " "device status supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SHARED_OBJECT, \ + "VHOST_USER_PROTOCOL_F_SHARED_OBJECT: Backend shared object " + "supported"), + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_DEVICE_STATE, \ + "VHOST_USER_PROTOCOL_F_DEVICE_STATE: Backend device state transfer " + "supported"), { -1, "" } }; @@ -450,6 +456,9 @@ static const qmp_virtio_feature_map_t virtio_mem_feature_map[] = { FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \ "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be " "accessed"), + FEATURE_ENTRY(VIRTIO_MEM_F_PERSISTENT_SUSPEND, \ + "VIRTIO_MEM_F_PERSISTENT_SUSPND: Plugged memory will remain " + "plugged when suspending+resuming"), { -1, "" } }; #endif diff --git a/hw/virtio/virtio-rng-pci.c b/hw/virtio/virtio-rng-pci.c index 6e76f8b57be..39b600356ea 100644 --- a/hw/virtio/virtio-rng-pci.c +++ b/hw/virtio/virtio-rng-pci.c @@ -32,12 +32,11 @@ struct VirtIORngPCI { VirtIORNG vdev; }; -static Property virtio_rng_properties[] = { +static const Property virtio_rng_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -54,7 +53,7 @@ static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_rng_pci_class_init(ObjectClass *klass, void *data) +static void virtio_rng_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index 7cf31da071f..3df5d2576ec 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -17,8 +17,8 @@ #include "hw/virtio/virtio.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-rng.h" -#include "sysemu/rng.h" -#include "sysemu/runstate.h" +#include "system/rng.h" +#include "system/runstate.h" #include "qom/object_interfaces.h" #include "trace.h" @@ -159,17 +159,18 @@ static void check_rate_limit(void *opaque) vrng->activate_timer = true; } -static void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status) +static int virtio_rng_set_status(VirtIODevice *vdev, uint8_t status) { VirtIORNG *vrng = VIRTIO_RNG(vdev); if (!vdev->vm_running) { - return; + return 0; } vdev->status = status; /* Something changed, try to process buffers */ virtio_rng_process(vrng); + return 0; } static void virtio_rng_device_realize(DeviceState *dev, Error **errp) @@ -249,7 +250,7 @@ static const VMStateDescription vmstate_virtio_rng = { }, }; -static Property virtio_rng_properties[] = { +static const Property virtio_rng_properties[] = { /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If * you have an entropy source capable of generating more entropy than this * and you can pass it through via virtio-rng, then hats off to you. Until @@ -258,10 +259,9 @@ static Property virtio_rng_properties[] = { DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX), DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16), DEFINE_PROP_LINK("rng", VirtIORNG, conf.rng, TYPE_RNG_BACKEND, RngBackend *), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_rng_class_init(ObjectClass *klass, void *data) +static void virtio_rng_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/virtio/virtio-scsi-pci.c b/hw/virtio/virtio-scsi-pci.c index e8e3442f382..af877592070 100644 --- a/hw/virtio/virtio-scsi-pci.c +++ b/hw/virtio/virtio-scsi-pci.c @@ -35,12 +35,11 @@ struct VirtIOSCSIPCI { VirtIOSCSI vdev; }; -static Property virtio_scsi_pci_properties[] = { +static const Property virtio_scsi_pci_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), - DEFINE_PROP_END_OF_LIST(), }; static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) @@ -73,7 +72,7 @@ static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data) +static void virtio_scsi_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio-serial-pci.c b/hw/virtio/virtio-serial-pci.c index cea31adcc4c..3f212ffe52f 100644 --- a/hw/virtio/virtio-serial-pci.c +++ b/hw/virtio/virtio-serial-pci.c @@ -69,15 +69,14 @@ static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) qdev_realize(vdev, BUS(&vpci_dev->bus), errp); } -static Property virtio_serial_pci_properties[] = { +static const Property virtio_serial_pci_properties[] = { DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), - DEFINE_PROP_END_OF_LIST(), }; -static void virtio_serial_pci_class_init(ObjectClass *klass, void *data) +static void virtio_serial_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 9e10cbc0587..9a81ad912e0 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -20,7 +20,7 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "exec/tswap.h" +#include "qemu/target-info.h" #include "qom/object_interfaces.h" #include "hw/core/cpu.h" #include "hw/virtio/virtio.h" @@ -30,8 +30,8 @@ #include "hw/virtio/virtio-bus.h" #include "hw/qdev-properties.h" #include "hw/virtio/virtio-access.h" -#include "sysemu/dma.h" -#include "sysemu/runstate.h" +#include "system/dma.h" +#include "system/runstate.h" #include "virtio-qmp.h" #include "standard-headers/linux/virtio_ids.h" @@ -205,6 +205,15 @@ static const char *virtio_id_to_name(uint16_t device_id) return name; } +static void virtio_check_indirect_feature(VirtIODevice *vdev) +{ + if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Device %s: indirect_desc was not negotiated!\n", + vdev->name); + } +} + /* Called within call_rcu(). */ static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) { @@ -929,18 +938,18 @@ static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { - unsigned int i, steps, max_steps; + unsigned int i, steps, max_steps, ndescs; i = vq->used_idx % vq->vring.num; steps = 0; /* - * We shouldn't need to increase 'i' by more than the distance - * between used_idx and last_avail_idx. + * We shouldn't need to increase 'i' by more than or equal to + * the distance between used_idx and last_avail_idx (max_steps). */ max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num; /* Search for element in vq->used_elems */ - while (steps <= max_steps) { + while (steps < max_steps) { /* Found element, set length and mark as filled */ if (vq->used_elems[i].index == elem->index) { vq->used_elems[i].len = len; @@ -948,8 +957,18 @@ static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, break; } - i += vq->used_elems[i].ndescs; - steps += vq->used_elems[i].ndescs; + ndescs = vq->used_elems[i].ndescs; + + /* Defensive sanity check */ + if (unlikely(ndescs == 0 || ndescs > vq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: %s invalid ndescs %u at position %u\n", + __func__, vq->vdev->name, ndescs, i); + return; + } + + i += ndescs; + steps += ndescs; if (i >= vq->vring.num) { i -= vq->vring.num; @@ -1680,8 +1699,8 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) VirtIODevice *vdev = vq->vdev; VirtQueueElement *elem = NULL; unsigned out_num, in_num, elem_entries; - hwaddr addr[VIRTQUEUE_MAX_SIZE]; - struct iovec iov[VIRTQUEUE_MAX_SIZE]; + hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE]; + struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE]; VRingDesc desc; int rc; @@ -1733,6 +1752,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) virtio_error(vdev, "Invalid size for indirect buffer table"); goto done; } + virtio_check_indirect_feature(vdev); /* loop over the indirect descriptor table */ len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, @@ -1826,8 +1846,8 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) VirtIODevice *vdev = vq->vdev; VirtQueueElement *elem = NULL; unsigned out_num, in_num, elem_entries; - hwaddr addr[VIRTQUEUE_MAX_SIZE]; - struct iovec iov[VIRTQUEUE_MAX_SIZE]; + hwaddr QEMU_UNINITIALIZED addr[VIRTQUEUE_MAX_SIZE]; + struct iovec QEMU_UNINITIALIZED iov[VIRTQUEUE_MAX_SIZE]; VRingPackedDesc desc; uint16_t id; int rc; @@ -1870,6 +1890,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) virtio_error(vdev, "Invalid size for indirect buffer table"); goto done; } + virtio_check_indirect_feature(vdev); /* loop over the indirect descriptor table */ len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, @@ -2221,12 +2242,12 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) { VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); trace_virtio_set_status(vdev, val); + int ret = 0; if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && val & VIRTIO_CONFIG_S_FEATURES_OK) { - int ret = virtio_validate_features(vdev); - + ret = virtio_validate_features(vdev); if (ret) { return ret; } @@ -2239,16 +2260,20 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) } if (k->set_status) { - k->set_status(vdev, val); + ret = k->set_status(vdev, val); + if (ret) { + qemu_log("set %s status to %d failed, old status: %d\n", + vdev->name, val, vdev->status); + } } vdev->status = val; - return 0; + return ret; } static enum virtio_device_endian virtio_default_endian(void) { - if (target_words_bigendian()) { + if (target_big_endian()) { return VIRTIO_DEVICE_ENDIAN_BIG; } else { return VIRTIO_DEVICE_ENDIAN_LITTLE; @@ -2316,45 +2341,6 @@ void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index) } } -void virtio_reset(void *opaque) -{ - VirtIODevice *vdev = opaque; - VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - int i; - - virtio_set_status(vdev, 0); - if (current_cpu) { - /* Guest initiated reset */ - vdev->device_endian = virtio_current_cpu_endian(); - } else { - /* System reset */ - vdev->device_endian = virtio_default_endian(); - } - - if (vdev->vhost_started && k->get_vhost) { - vhost_reset_device(k->get_vhost(vdev)); - } - - if (k->reset) { - k->reset(vdev); - } - - vdev->start_on_kick = false; - vdev->started = false; - vdev->broken = false; - vdev->guest_features = 0; - vdev->queue_sel = 0; - vdev->status = 0; - vdev->disabled = false; - qatomic_set(&vdev->isr, 0); - vdev->config_vector = VIRTIO_NO_VECTOR; - virtio_notify_vector(vdev, vdev->config_vector); - - for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { - __virtio_queue_reset(vdev, i); - } -} - void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) { if (!vdev->vq[n].vring.num) { @@ -3165,6 +3151,49 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) return ret; } +void virtio_reset(void *opaque) +{ + VirtIODevice *vdev = opaque; + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + int i; + + virtio_set_status(vdev, 0); + if (current_cpu) { + /* Guest initiated reset */ + vdev->device_endian = virtio_current_cpu_endian(); + } else { + /* System reset */ + vdev->device_endian = virtio_default_endian(); + } + + if (k->get_vhost) { + struct vhost_dev *hdev = k->get_vhost(vdev); + /* Only reset when vhost back-end is connected */ + if (hdev && hdev->vhost_ops) { + vhost_reset_device(hdev); + } + } + + if (k->reset) { + k->reset(vdev); + } + + vdev->start_on_kick = false; + vdev->started = false; + vdev->broken = false; + virtio_set_features_nocheck(vdev, 0); + vdev->queue_sel = 0; + vdev->status = 0; + vdev->disabled = false; + qatomic_set(&vdev->isr, 0); + vdev->config_vector = VIRTIO_NO_VECTOR; + virtio_notify_vector(vdev, vdev->config_vector); + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + __virtio_queue_reset(vdev, i); + } +} + static void virtio_device_check_notification_compatibility(VirtIODevice *vdev, Error **errp) { @@ -3258,6 +3287,13 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) return -1; } + if (vdc->pre_load_queues) { + ret = vdc->pre_load_queues(vdev, num); + if (ret) { + return ret; + } + } + for (i = 0; i < num; i++) { vdev->vq[i].vring.num = qemu_get_be32(f); if (k->has_variable_vring_alignment) { @@ -3408,7 +3444,7 @@ void virtio_cleanup(VirtIODevice *vdev) qemu_del_vm_change_state_handler(vdev->vmstate); } -static void virtio_vmstate_change(void *opaque, bool running, RunState state) +static int virtio_vmstate_change(void *opaque, bool running, RunState state) { VirtIODevice *vdev = opaque; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); @@ -3425,8 +3461,12 @@ static void virtio_vmstate_change(void *opaque, bool running, RunState state) } if (!backend_run) { - virtio_set_status(vdev, vdev->status); + int ret = virtio_set_status(vdev, vdev->status); + if (ret) { + return ret; + } } + return 0; } void virtio_instance_init_common(Object *proxy_obj, void *data, @@ -3478,7 +3518,7 @@ void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size) vdev->config = NULL; } vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), - virtio_vmstate_change, vdev); + NULL, virtio_vmstate_change, vdev); vdev->device_endian = virtio_default_endian(); vdev->use_guest_notifier_mask = true; } @@ -3637,7 +3677,6 @@ static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev, int n) { /* We don't have a reference like avail idx in shared memory */ - return; } static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev, @@ -3662,10 +3701,9 @@ void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n) { /* used idx was updated through set_last_avail_idx() */ - return; } -static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n) +static void virtio_queue_split_update_used_idx(VirtIODevice *vdev, int n) { RCU_READ_LOCK_GUARD(); if (vdev->vq[n].vring.desc) { @@ -3678,7 +3716,7 @@ void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { return virtio_queue_packed_update_used_idx(vdev, n); } else { - return virtio_split_packed_update_used_idx(vdev, n); + return virtio_queue_split_update_used_idx(vdev, n); } } @@ -4001,13 +4039,12 @@ static void virtio_device_instance_finalize(Object *obj) g_free(vdev->vector_queues); } -static Property virtio_properties[] = { +static const Property virtio_properties[] = { DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true), DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true), DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice, disable_legacy_check, false), - DEFINE_PROP_END_OF_LIST(), }; static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev) @@ -4130,7 +4167,7 @@ void virtio_device_release_ioeventfd(VirtIODevice *vdev) virtio_bus_release_ioeventfd(vbus); } -static void virtio_device_class_init(ObjectClass *klass, void *data) +static void virtio_device_class_init(ObjectClass *klass, const void *data) { /* Set the default value here. */ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); diff --git a/hw/vmapple/Kconfig b/hw/vmapple/Kconfig new file mode 100644 index 00000000000..2382b297672 --- /dev/null +++ b/hw/vmapple/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +config VMAPPLE_AES + bool + +config VMAPPLE_BDIF + bool + +config VMAPPLE_CFG + bool + +config VMAPPLE_VIRTIO_BLK + bool + +config VMAPPLE + bool + depends on ARM + depends on HVF + default y if ARM + imply PCI_DEVICES + select ARM_GICV3 + select PLATFORM_BUS + select PCI_EXPRESS + select PCI_EXPRESS_GENERIC_BRIDGE + select PL011 # UART + select PL031 # RTC + select PL061 # GPIO + select GPIO_PWR + select PVPANIC_MMIO + select VMAPPLE_AES + select VMAPPLE_BDIF + select VMAPPLE_CFG + select MAC_PVG_MMIO + select VMAPPLE_VIRTIO_BLK diff --git a/hw/vmapple/aes.c b/hw/vmapple/aes.c new file mode 100644 index 00000000000..a4853a98f8c --- /dev/null +++ b/hw/vmapple/aes.c @@ -0,0 +1,581 @@ +/* + * QEMU Apple AES device emulation + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "crypto/hash.h" +#include "crypto/aes.h" +#include "crypto/cipher.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/vmapple/vmapple.h" +#include "migration/vmstate.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "system/dma.h" + +OBJECT_DECLARE_SIMPLE_TYPE(AESState, APPLE_AES) + +#define MAX_FIFO_SIZE 9 + +#define CMD_KEY 0x1 +#define CMD_KEY_CONTEXT_SHIFT 27 +#define CMD_KEY_CONTEXT_MASK (0x1 << CMD_KEY_CONTEXT_SHIFT) +#define CMD_KEY_SELECT_MAX_IDX 0x7 +#define CMD_KEY_SELECT_SHIFT 24 +#define CMD_KEY_SELECT_MASK (CMD_KEY_SELECT_MAX_IDX << CMD_KEY_SELECT_SHIFT) +#define CMD_KEY_KEY_LEN_NUM 4u +#define CMD_KEY_KEY_LEN_SHIFT 22 +#define CMD_KEY_KEY_LEN_MASK ((CMD_KEY_KEY_LEN_NUM - 1u) << CMD_KEY_KEY_LEN_SHIFT) +#define CMD_KEY_ENCRYPT_SHIFT 20 +#define CMD_KEY_ENCRYPT_MASK (0x1 << CMD_KEY_ENCRYPT_SHIFT) +#define CMD_KEY_BLOCK_MODE_SHIFT 16 +#define CMD_KEY_BLOCK_MODE_MASK (0x3 << CMD_KEY_BLOCK_MODE_SHIFT) +#define CMD_IV 0x2 +#define CMD_IV_CONTEXT_SHIFT 26 +#define CMD_IV_CONTEXT_MASK (0x3 << CMD_KEY_CONTEXT_SHIFT) +#define CMD_DSB 0x3 +#define CMD_SKG 0x4 +#define CMD_DATA 0x5 +#define CMD_DATA_KEY_CTX_SHIFT 27 +#define CMD_DATA_KEY_CTX_MASK (0x1 << CMD_DATA_KEY_CTX_SHIFT) +#define CMD_DATA_IV_CTX_SHIFT 25 +#define CMD_DATA_IV_CTX_MASK (0x3 << CMD_DATA_IV_CTX_SHIFT) +#define CMD_DATA_LEN_MASK 0xffffff +#define CMD_STORE_IV 0x6 +#define CMD_STORE_IV_ADDR_MASK 0xffffff +#define CMD_WRITE_REG 0x7 +#define CMD_FLAG 0x8 +#define CMD_FLAG_STOP_MASK BIT(26) +#define CMD_FLAG_RAISE_IRQ_MASK BIT(27) +#define CMD_FLAG_INFO_MASK 0xff +#define CMD_MAX 0x10 + +#define CMD_SHIFT 28 + +#define REG_STATUS 0xc +#define REG_STATUS_DMA_READ_RUNNING BIT(0) +#define REG_STATUS_DMA_READ_PENDING BIT(1) +#define REG_STATUS_DMA_WRITE_RUNNING BIT(2) +#define REG_STATUS_DMA_WRITE_PENDING BIT(3) +#define REG_STATUS_BUSY BIT(4) +#define REG_STATUS_EXECUTING BIT(5) +#define REG_STATUS_READY BIT(6) +#define REG_STATUS_TEXT_DPA_SEEDED BIT(7) +#define REG_STATUS_UNWRAP_DPA_SEEDED BIT(8) + +#define REG_IRQ_STATUS 0x18 +#define REG_IRQ_STATUS_INVALID_CMD BIT(2) +#define REG_IRQ_STATUS_FLAG BIT(5) +#define REG_IRQ_ENABLE 0x1c +#define REG_WATERMARK 0x20 +#define REG_Q_STATUS 0x24 +#define REG_FLAG_INFO 0x30 +#define REG_FIFO 0x200 + +static const uint32_t key_lens[CMD_KEY_KEY_LEN_NUM] = { + [0] = 16, + [1] = 24, + [2] = 32, + [3] = 64, +}; + +typedef struct Key { + uint32_t key_len; + uint8_t key[32]; +} Key; + +typedef struct IV { + uint32_t iv[4]; +} IV; + +static Key builtin_keys[CMD_KEY_SELECT_MAX_IDX + 1] = { + [1] = { + .key_len = 32, + .key = { 0x1 }, + }, + [2] = { + .key_len = 32, + .key = { 0x2 }, + }, + [3] = { + .key_len = 32, + .key = { 0x3 }, + } +}; + +struct AESState { + SysBusDevice parent_obj; + + qemu_irq irq; + MemoryRegion iomem1; + MemoryRegion iomem2; + AddressSpace *as; + + uint32_t status; + uint32_t q_status; + uint32_t irq_status; + uint32_t irq_enable; + uint32_t watermark; + uint32_t flag_info; + uint32_t fifo[MAX_FIFO_SIZE]; + uint32_t fifo_idx; + Key key[2]; + IV iv[4]; + bool is_encrypt; + QCryptoCipherMode block_mode; +}; + +static void aes_update_irq(AESState *s) +{ + qemu_set_irq(s->irq, !!(s->irq_status & s->irq_enable)); +} + +static uint64_t aes1_read(void *opaque, hwaddr offset, unsigned size) +{ + AESState *s = opaque; + uint64_t res = 0; + + switch (offset) { + case REG_STATUS: + res = s->status; + break; + case REG_IRQ_STATUS: + res = s->irq_status; + break; + case REG_IRQ_ENABLE: + res = s->irq_enable; + break; + case REG_WATERMARK: + res = s->watermark; + break; + case REG_Q_STATUS: + res = s->q_status; + break; + case REG_FLAG_INFO: + res = s->flag_info; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: Unknown AES MMIO offset %" PRIx64 "\n", + __func__, offset); + break; + } + + trace_aes_read(offset, res); + + return res; +} + +static void fifo_append(AESState *s, uint64_t val) +{ + if (s->fifo_idx == MAX_FIFO_SIZE) { + /* Exceeded the FIFO. Bail out */ + return; + } + + s->fifo[s->fifo_idx++] = val; +} + +static bool has_payload(AESState *s, uint32_t elems) +{ + return s->fifo_idx >= elems + 1; +} + +static bool cmd_key(AESState *s) +{ + uint32_t cmd = s->fifo[0]; + uint32_t key_select = (cmd & CMD_KEY_SELECT_MASK) >> CMD_KEY_SELECT_SHIFT; + uint32_t ctxt = (cmd & CMD_KEY_CONTEXT_MASK) >> CMD_KEY_CONTEXT_SHIFT; + uint32_t key_len; + + switch ((cmd & CMD_KEY_BLOCK_MODE_MASK) >> CMD_KEY_BLOCK_MODE_SHIFT) { + case 0: + s->block_mode = QCRYPTO_CIPHER_MODE_ECB; + break; + case 1: + s->block_mode = QCRYPTO_CIPHER_MODE_CBC; + break; + default: + return false; + } + + s->is_encrypt = cmd & CMD_KEY_ENCRYPT_MASK; + key_len = key_lens[(cmd & CMD_KEY_KEY_LEN_MASK) >> CMD_KEY_KEY_LEN_SHIFT]; + + if (key_select) { + trace_aes_cmd_key_select_builtin(ctxt, key_select, + s->is_encrypt ? "en" : "de", + QCryptoCipherMode_str(s->block_mode)); + s->key[ctxt] = builtin_keys[key_select]; + } else { + trace_aes_cmd_key_select_new(ctxt, key_len, + s->is_encrypt ? "en" : "de", + QCryptoCipherMode_str(s->block_mode)); + if (key_len > sizeof(s->key[ctxt].key)) { + return false; + } + if (!has_payload(s, key_len / sizeof(uint32_t))) { + /* wait for payload */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); + return false; + } + memcpy(&s->key[ctxt].key, &s->fifo[1], key_len); + s->key[ctxt].key_len = key_len; + } + + return true; +} + +static bool cmd_iv(AESState *s) +{ + uint32_t cmd = s->fifo[0]; + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; + + if (!has_payload(s, 4)) { + /* wait for payload */ + return false; + } + memcpy(&s->iv[ctxt].iv, &s->fifo[1], sizeof(s->iv[ctxt].iv)); + trace_aes_cmd_iv(ctxt, s->fifo[1], s->fifo[2], s->fifo[3], s->fifo[4]); + + return true; +} + +static void dump_data(const char *desc, const void *p, size_t len) +{ + static const size_t MAX_LEN = 0x1000; + char hex[MAX_LEN * 2 + 1] = ""; + + if (len > MAX_LEN) { + return; + } + + qemu_hexdump_to_buffer(hex, sizeof(hex), p, len); + trace_aes_dump_data(desc, hex); +} + +static bool cmd_data(AESState *s) +{ + uint32_t cmd = s->fifo[0]; + uint32_t ctxt_iv = 0; + uint32_t ctxt_key = (cmd & CMD_DATA_KEY_CTX_MASK) >> CMD_DATA_KEY_CTX_SHIFT; + uint32_t len = cmd & CMD_DATA_LEN_MASK; + uint64_t src_addr = s->fifo[2]; + uint64_t dst_addr = s->fifo[3]; + QCryptoCipherAlgo alg; + g_autoptr(QCryptoCipher) cipher = NULL; + g_autoptr(GByteArray) src = NULL; + g_autoptr(GByteArray) dst = NULL; + MemTxResult r; + + src_addr |= ((uint64_t)s->fifo[1] << 16) & 0xffff00000000ULL; + dst_addr |= ((uint64_t)s->fifo[1] << 32) & 0xffff00000000ULL; + + trace_aes_cmd_data(ctxt_key, ctxt_iv, src_addr, dst_addr, len); + + if (!has_payload(s, 3)) { + /* wait for payload */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); + return false; + } + + if (ctxt_key >= ARRAY_SIZE(s->key) || + ctxt_iv >= ARRAY_SIZE(s->iv)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key or iv\n", __func__); + return false; + } + + src = g_byte_array_sized_new(len); + g_byte_array_set_size(src, len); + dst = g_byte_array_sized_new(len); + g_byte_array_set_size(dst, len); + + r = dma_memory_read(s->as, src_addr, src->data, len, MEMTXATTRS_UNSPECIFIED); + if (r != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA read of %"PRIu32" bytes " + "from 0x%"PRIx64" failed. (r=%d)\n", + __func__, len, src_addr, r); + return false; + } + + dump_data("cmd_data(): src_data=", src->data, len); + + switch (s->key[ctxt_key].key_len) { + case 128 / 8: + alg = QCRYPTO_CIPHER_ALGO_AES_128; + break; + case 192 / 8: + alg = QCRYPTO_CIPHER_ALGO_AES_192; + break; + case 256 / 8: + alg = QCRYPTO_CIPHER_ALGO_AES_256; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid key length\n", __func__); + return false; + } + cipher = qcrypto_cipher_new(alg, s->block_mode, + s->key[ctxt_key].key, + s->key[ctxt_key].key_len, NULL); + if (!cipher) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to create cipher object\n", + __func__); + return false; + } + if (s->block_mode != QCRYPTO_CIPHER_MODE_ECB) { + if (qcrypto_cipher_setiv(cipher, (void *)s->iv[ctxt_iv].iv, + sizeof(s->iv[ctxt_iv].iv), NULL) != 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to set IV\n", __func__); + return false; + } + } + if (s->is_encrypt) { + if (qcrypto_cipher_encrypt(cipher, src->data, dst->data, len, NULL) != 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Encryption failed\n", __func__); + return false; + } + } else { + if (qcrypto_cipher_decrypt(cipher, src->data, dst->data, len, NULL) != 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Decryption failed\n", __func__); + return false; + } + } + + dump_data("cmd_data(): dst_data=", dst->data, len); + r = dma_memory_write(s->as, dst_addr, dst->data, len, MEMTXATTRS_UNSPECIFIED); + if (r != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA write of %"PRIu32" bytes " + "to 0x%"PRIx64" failed. (r=%d)\n", + __func__, len, src_addr, r); + return false; + } + + return true; +} + +static bool cmd_store_iv(AESState *s) +{ + uint32_t cmd = s->fifo[0]; + uint32_t ctxt = (cmd & CMD_IV_CONTEXT_MASK) >> CMD_IV_CONTEXT_SHIFT; + uint64_t addr = s->fifo[1]; + MemTxResult dma_result; + + if (!has_payload(s, 1)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No payload\n", __func__); + return false; + } + + if (ctxt >= ARRAY_SIZE(s->iv)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid context. ctxt = %u, allowed: 0..%zu\n", + __func__, ctxt, ARRAY_SIZE(s->iv) - 1); + return false; + } + + addr |= ((uint64_t)cmd << 32) & 0xff00000000ULL; + dma_result = dma_memory_write(&address_space_memory, addr, + &s->iv[ctxt].iv, sizeof(s->iv[ctxt].iv), + MEMTXATTRS_UNSPECIFIED); + + trace_aes_cmd_store_iv(ctxt, addr, s->iv[ctxt].iv[0], s->iv[ctxt].iv[1], + s->iv[ctxt].iv[2], s->iv[ctxt].iv[3]); + + return dma_result == MEMTX_OK; +} + +static bool cmd_flag(AESState *s) +{ + uint32_t cmd = s->fifo[0]; + uint32_t raise_irq = cmd & CMD_FLAG_RAISE_IRQ_MASK; + + /* We always process data when it's coming in, so fire an IRQ immediately */ + if (raise_irq) { + s->irq_status |= REG_IRQ_STATUS_FLAG; + } + + s->flag_info = cmd & CMD_FLAG_INFO_MASK; + + trace_aes_cmd_flag(!!raise_irq, s->flag_info); + + return true; +} + +static void fifo_process(AESState *s) +{ + uint32_t cmd = s->fifo[0] >> CMD_SHIFT; + bool success = false; + + if (!s->fifo_idx) { + return; + } + + switch (cmd) { + case CMD_KEY: + success = cmd_key(s); + break; + case CMD_IV: + success = cmd_iv(s); + break; + case CMD_DATA: + success = cmd_data(s); + break; + case CMD_STORE_IV: + success = cmd_store_iv(s); + break; + case CMD_FLAG: + success = cmd_flag(s); + break; + default: + s->irq_status |= REG_IRQ_STATUS_INVALID_CMD; + break; + } + + if (success) { + s->fifo_idx = 0; + } + + trace_aes_fifo_process(cmd, success); +} + +static void aes1_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + AESState *s = opaque; + + trace_aes_write(offset, val); + + switch (offset) { + case REG_IRQ_STATUS: + s->irq_status &= ~val; + break; + case REG_IRQ_ENABLE: + s->irq_enable = val; + break; + case REG_FIFO: + fifo_append(s, val); + fifo_process(s); + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Unknown AES MMIO offset %"PRIx64", data %"PRIx64"\n", + __func__, offset, val); + return; + } + + aes_update_irq(s); +} + +static const MemoryRegionOps aes1_ops = { + .read = aes1_read, + .write = aes1_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t aes2_read(void *opaque, hwaddr offset, unsigned size) +{ + uint64_t res = 0; + + switch (offset) { + case 0: + res = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Unknown AES MMIO 2 offset %"PRIx64"\n", + __func__, offset); + break; + } + + trace_aes_2_read(offset, res); + + return res; +} + +static void aes2_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + trace_aes_2_write(offset, val); + + switch (offset) { + default: + qemu_log_mask(LOG_UNIMP, + "%s: Unknown AES MMIO 2 offset %"PRIx64", data %"PRIx64"\n", + __func__, offset, val); + return; + } +} + +static const MemoryRegionOps aes2_ops = { + .read = aes2_read, + .write = aes2_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void aes_reset(Object *obj, ResetType type) +{ + AESState *s = APPLE_AES(obj); + + s->status = 0x3f80; + s->q_status = 2; + s->irq_status = 0; + s->irq_enable = 0; + s->watermark = 0; +} + +static void aes_init(Object *obj) +{ + AESState *s = APPLE_AES(obj); + + memory_region_init_io(&s->iomem1, obj, &aes1_ops, s, TYPE_APPLE_AES, 0x4000); + memory_region_init_io(&s->iomem2, obj, &aes2_ops, s, TYPE_APPLE_AES, 0x4000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem1); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem2); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); + s->as = &address_space_memory; +} + +static void aes_class_init(ObjectClass *klass, const void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.hold = aes_reset; +} + +static const TypeInfo aes_info = { + .name = TYPE_APPLE_AES, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AESState), + .class_init = aes_class_init, + .instance_init = aes_init, +}; + +static void aes_register_types(void) +{ + type_register_static(&aes_info); +} + +type_init(aes_register_types) diff --git a/hw/vmapple/bdif.c b/hw/vmapple/bdif.c new file mode 100644 index 00000000000..5ccd3745819 --- /dev/null +++ b/hw/vmapple/bdif.c @@ -0,0 +1,274 @@ +/* + * VMApple Backdoor Interface + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "hw/vmapple/vmapple.h" +#include "hw/sysbus.h" +#include "hw/block/block.h" +#include "qapi/error.h" +#include "system/block-backend.h" +#include "system/dma.h" + +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleBdifState, VMAPPLE_BDIF) + +struct VMAppleBdifState { + SysBusDevice parent_obj; + + BlockBackend *aux; + BlockBackend *root; + MemoryRegion mmio; +}; + +#define VMAPPLE_BDIF_SIZE 0x00200000 + +#define REG_DEVID_MASK 0xffff0000 +#define DEVID_ROOT 0x00000000 +#define DEVID_AUX 0x00010000 +#define DEVID_USB 0x00100000 + +#define REG_STATUS 0x0 +#define REG_STATUS_ACTIVE BIT(0) +#define REG_CFG 0x4 +#define REG_CFG_ACTIVE BIT(1) +#define REG_UNK1 0x8 +#define REG_BUSY 0x10 +#define REG_BUSY_READY BIT(0) +#define REG_UNK2 0x400 +#define REG_CMD 0x408 +#define REG_NEXT_DEVICE 0x420 +#define REG_UNK3 0x434 + +typedef struct VblkSector { + uint32_t pad; + uint32_t pad2; + uint32_t sector; + uint32_t pad3; +} VblkSector; + +typedef struct VblkReqCmd { + uint64_t addr; + uint32_t len; + uint32_t flags; +} VblkReqCmd; + +typedef struct VblkReq { + VblkReqCmd sector; + VblkReqCmd data; + VblkReqCmd retval; +} VblkReq; + +#define VBLK_DATA_FLAGS_READ 0x00030001 +#define VBLK_DATA_FLAGS_WRITE 0x00010001 + +#define VBLK_RET_SUCCESS 0 +#define VBLK_RET_FAILED 1 + +static uint64_t bdif_read(void *opaque, hwaddr offset, unsigned size) +{ + uint64_t ret = -1; + uint64_t devid = offset & REG_DEVID_MASK; + + switch (offset & ~REG_DEVID_MASK) { + case REG_STATUS: + ret = REG_STATUS_ACTIVE; + break; + case REG_CFG: + ret = REG_CFG_ACTIVE; + break; + case REG_UNK1: + ret = 0x420; + break; + case REG_BUSY: + ret = REG_BUSY_READY; + break; + case REG_UNK2: + ret = 0x1; + break; + case REG_UNK3: + ret = 0x0; + break; + case REG_NEXT_DEVICE: + switch (devid) { + case DEVID_ROOT: + ret = 0x8000000; + break; + case DEVID_AUX: + ret = 0x10000; + break; + } + break; + } + + trace_bdif_read(offset, size, ret); + return ret; +} + +static void le2cpu_sector(VblkSector *sector) +{ + sector->sector = le32_to_cpu(sector->sector); +} + +static void le2cpu_reqcmd(VblkReqCmd *cmd) +{ + cmd->addr = le64_to_cpu(cmd->addr); + cmd->len = le32_to_cpu(cmd->len); + cmd->flags = le32_to_cpu(cmd->flags); +} + +static void le2cpu_req(VblkReq *req) +{ + le2cpu_reqcmd(&req->sector); + le2cpu_reqcmd(&req->data); + le2cpu_reqcmd(&req->retval); +} + +static void vblk_cmd(uint64_t devid, BlockBackend *blk, uint64_t gp_addr, + uint64_t static_off) +{ + VblkReq req; + VblkSector sector; + uint64_t off = 0; + g_autofree char *buf = NULL; + uint8_t ret = VBLK_RET_FAILED; + int r; + MemTxResult dma_result; + + dma_result = dma_memory_read(&address_space_memory, gp_addr, + &req, sizeof(req), MEMTXATTRS_UNSPECIFIED); + if (dma_result != MEMTX_OK) { + goto out; + } + + le2cpu_req(&req); + + if (req.sector.len != sizeof(sector)) { + goto out; + } + + /* Read the vblk command */ + dma_result = dma_memory_read(&address_space_memory, req.sector.addr, + §or, sizeof(sector), + MEMTXATTRS_UNSPECIFIED); + if (dma_result != MEMTX_OK) { + goto out; + } + le2cpu_sector(§or); + + off = sector.sector * 512ULL + static_off; + + /* Sanity check that we're not allocating bogus sizes */ + if (req.data.len > 128 * MiB) { + goto out; + } + + buf = g_malloc0(req.data.len); + switch (req.data.flags) { + case VBLK_DATA_FLAGS_READ: + r = blk_pread(blk, off, req.data.len, buf, 0); + trace_bdif_vblk_read(devid == DEVID_AUX ? "aux" : "root", + req.data.addr, off, req.data.len, r); + if (r < 0) { + goto out; + } + dma_result = dma_memory_write(&address_space_memory, req.data.addr, buf, + req.data.len, MEMTXATTRS_UNSPECIFIED); + if (dma_result == MEMTX_OK) { + ret = VBLK_RET_SUCCESS; + } + break; + case VBLK_DATA_FLAGS_WRITE: + /* Not needed, iBoot only reads */ + break; + default: + break; + } + +out: + dma_memory_write(&address_space_memory, req.retval.addr, &ret, 1, + MEMTXATTRS_UNSPECIFIED); +} + +static void bdif_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + VMAppleBdifState *s = opaque; + uint64_t devid = (offset & REG_DEVID_MASK); + + trace_bdif_write(offset, size, value); + + switch (offset & ~REG_DEVID_MASK) { + case REG_CMD: + switch (devid) { + case DEVID_ROOT: + vblk_cmd(devid, s->root, value, 0x0); + break; + case DEVID_AUX: + vblk_cmd(devid, s->aux, value, 0x0); + break; + } + break; + } +} + +static const MemoryRegionOps bdif_ops = { + .read = bdif_read, + .write = bdif_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void bdif_init(Object *obj) +{ + VMAppleBdifState *s = VMAPPLE_BDIF(obj); + + memory_region_init_io(&s->mmio, obj, &bdif_ops, obj, + "VMApple Backdoor Interface", VMAPPLE_BDIF_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static const Property bdif_properties[] = { + DEFINE_PROP_DRIVE("aux", VMAppleBdifState, aux), + DEFINE_PROP_DRIVE("root", VMAppleBdifState, root), +}; + +static void bdif_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "VMApple Backdoor Interface"; + device_class_set_props(dc, bdif_properties); +} + +static const TypeInfo bdif_info = { + .name = TYPE_VMAPPLE_BDIF, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VMAppleBdifState), + .instance_init = bdif_init, + .class_init = bdif_class_init, +}; + +static void bdif_register_types(void) +{ + type_register_static(&bdif_info); +} + +type_init(bdif_register_types) diff --git a/hw/vmapple/cfg.c b/hw/vmapple/cfg.c new file mode 100644 index 00000000000..3d58a29f69d --- /dev/null +++ b/hw/vmapple/cfg.c @@ -0,0 +1,195 @@ +/* + * VMApple Configuration Region + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/vmapple/vmapple.h" +#include "hw/sysbus.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "net/net.h" + +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleCfgState, VMAPPLE_CFG) + +#define VMAPPLE_CFG_SIZE 0x00010000 + +typedef struct VMAppleCfg { + uint32_t version; /* 0x000 */ + uint32_t nr_cpus; /* 0x004 */ + uint32_t unk1; /* 0x008 */ + uint32_t unk2; /* 0x00c */ + uint32_t unk3; /* 0x010 */ + uint32_t unk4; /* 0x014 */ + uint64_t ecid; /* 0x018 */ + uint64_t ram_size; /* 0x020 */ + uint32_t run_installer1; /* 0x028 */ + uint32_t unk5; /* 0x02c */ + uint32_t unk6; /* 0x030 */ + uint32_t run_installer2; /* 0x034 */ + uint32_t rnd; /* 0x038 */ + uint32_t unk7; /* 0x03c */ + MACAddr mac_en0; /* 0x040 */ + uint8_t pad1[2]; + MACAddr mac_en1; /* 0x048 */ + uint8_t pad2[2]; + MACAddr mac_wifi0; /* 0x050 */ + uint8_t pad3[2]; + MACAddr mac_bt0; /* 0x058 */ + uint8_t pad4[2]; + uint8_t reserved[0xa0]; /* 0x060 */ + uint32_t cpu_ids[0x80]; /* 0x100 */ + uint8_t scratch[0x200]; /* 0x180 */ + char serial[32]; /* 0x380 */ + char unk8[32]; /* 0x3a0 */ + char model[32]; /* 0x3c0 */ + uint8_t unk9[32]; /* 0x3e0 */ + uint32_t unk10; /* 0x400 */ + char soc_name[32]; /* 0x404 */ +} VMAppleCfg; + +struct VMAppleCfgState { + SysBusDevice parent_obj; + VMAppleCfg cfg; + + MemoryRegion mem; + char *serial; + char *model; + char *soc_name; +}; + +static void vmapple_cfg_reset(Object *obj, ResetType type) +{ + VMAppleCfgState *s = VMAPPLE_CFG(obj); + VMAppleCfg *cfg; + + cfg = memory_region_get_ram_ptr(&s->mem); + memset(cfg, 0, VMAPPLE_CFG_SIZE); + *cfg = s->cfg; +} + +static bool set_fixlen_property_or_error(char *restrict dst, + const char *restrict src, + size_t dst_size, Error **errp, + const char *property_name) +{ + ERRP_GUARD(); + size_t len; + + len = g_strlcpy(dst, src, dst_size); + if (len < dst_size) { /* len does not count nul terminator */ + return true; + } + + error_setg(errp, "Provided value too long for property '%s'", property_name); + error_append_hint(errp, "length (%zu) exceeds maximum of %zu\n", + len, dst_size - 1); + return false; +} + +#define set_fixlen_property_or_return(dst_array, src, errp, property_name) \ + do { \ + if (!set_fixlen_property_or_error((dst_array), (src), \ + ARRAY_SIZE(dst_array), \ + (errp), (property_name))) { \ + return; \ + } \ + } while (0) + +static void vmapple_cfg_realize(DeviceState *dev, Error **errp) +{ + VMAppleCfgState *s = VMAPPLE_CFG(dev); + uint32_t i; + + if (!s->serial) { + s->serial = g_strdup("1234"); + } + if (!s->model) { + s->model = g_strdup("VM0001"); + } + if (!s->soc_name) { + s->soc_name = g_strdup("Apple M1 (Virtual)"); + } + + set_fixlen_property_or_return(s->cfg.serial, s->serial, errp, "serial"); + set_fixlen_property_or_return(s->cfg.model, s->model, errp, "model"); + set_fixlen_property_or_return(s->cfg.soc_name, s->soc_name, errp, "soc_name"); + set_fixlen_property_or_return(s->cfg.unk8, "D/A", errp, "unk8"); + s->cfg.version = 2; + s->cfg.unk1 = 1; + s->cfg.unk2 = 1; + s->cfg.unk3 = 0x20; + s->cfg.unk4 = 0; + s->cfg.unk5 = 1; + s->cfg.unk6 = 1; + s->cfg.unk7 = 0; + s->cfg.unk10 = 1; + + if (s->cfg.nr_cpus > ARRAY_SIZE(s->cfg.cpu_ids)) { + error_setg(errp, + "Failed to create %u CPUs, vmapple machine supports %zu max", + s->cfg.nr_cpus, ARRAY_SIZE(s->cfg.cpu_ids)); + return; + } + for (i = 0; i < s->cfg.nr_cpus; i++) { + s->cfg.cpu_ids[i] = i; + } +} + +static void vmapple_cfg_init(Object *obj) +{ + VMAppleCfgState *s = VMAPPLE_CFG(obj); + + memory_region_init_ram(&s->mem, obj, "VMApple Config", VMAPPLE_CFG_SIZE, + &error_fatal); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mem); +} + +static const Property vmapple_cfg_properties[] = { + DEFINE_PROP_UINT32("nr-cpus", VMAppleCfgState, cfg.nr_cpus, 1), + DEFINE_PROP_UINT64("ecid", VMAppleCfgState, cfg.ecid, 0), + DEFINE_PROP_UINT64("ram-size", VMAppleCfgState, cfg.ram_size, 0), + DEFINE_PROP_UINT32("run_installer1", VMAppleCfgState, cfg.run_installer1, 0), + DEFINE_PROP_UINT32("run_installer2", VMAppleCfgState, cfg.run_installer2, 0), + DEFINE_PROP_UINT32("rnd", VMAppleCfgState, cfg.rnd, 0), + DEFINE_PROP_MACADDR("mac-en0", VMAppleCfgState, cfg.mac_en0), + DEFINE_PROP_MACADDR("mac-en1", VMAppleCfgState, cfg.mac_en1), + DEFINE_PROP_MACADDR("mac-wifi0", VMAppleCfgState, cfg.mac_wifi0), + DEFINE_PROP_MACADDR("mac-bt0", VMAppleCfgState, cfg.mac_bt0), + DEFINE_PROP_STRING("serial", VMAppleCfgState, serial), + DEFINE_PROP_STRING("model", VMAppleCfgState, model), + DEFINE_PROP_STRING("soc_name", VMAppleCfgState, soc_name), +}; + +static void vmapple_cfg_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = vmapple_cfg_realize; + dc->desc = "VMApple Configuration Region"; + device_class_set_props(dc, vmapple_cfg_properties); + rc->phases.hold = vmapple_cfg_reset; +} + +static const TypeInfo vmapple_cfg_info = { + .name = TYPE_VMAPPLE_CFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VMAppleCfgState), + .instance_init = vmapple_cfg_init, + .class_init = vmapple_cfg_class_init, +}; + +static void vmapple_cfg_register_types(void) +{ + type_register_static(&vmapple_cfg_info); +} + +type_init(vmapple_cfg_register_types) diff --git a/hw/vmapple/meson.build b/hw/vmapple/meson.build new file mode 100644 index 00000000000..23bc4c999e3 --- /dev/null +++ b/hw/vmapple/meson.build @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +system_ss.add(when: 'CONFIG_VMAPPLE_AES', if_true: files('aes.c')) +system_ss.add(when: 'CONFIG_VMAPPLE_BDIF', if_true: files('bdif.c')) +system_ss.add(when: 'CONFIG_VMAPPLE_CFG', if_true: files('cfg.c')) +system_ss.add(when: 'CONFIG_VMAPPLE_VIRTIO_BLK', if_true: files('virtio-blk.c')) +specific_ss.add(when: 'CONFIG_VMAPPLE', if_true: files('vmapple.c')) diff --git a/hw/vmapple/trace-events b/hw/vmapple/trace-events new file mode 100644 index 00000000000..93380ede145 --- /dev/null +++ b/hw/vmapple/trace-events @@ -0,0 +1,21 @@ +# See docs/devel/tracing.rst for syntax documentation. +# SPDX-License-Identifier: GPL-2.0-or-later + +# aes.c +aes_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 +aes_cmd_key_select_builtin(uint32_t ctx, uint32_t key_id, const char *direction, const char *cipher) "[%d] Selecting builtin key %d to %scrypt with %s" +aes_cmd_key_select_new(uint32_t ctx, uint32_t key_len, const char *direction, const char *cipher) "[%d] Selecting new key size=%d to %scrypt with %s" +aes_cmd_iv(uint32_t ctx, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] 0x%08x 0x%08x 0x%08x 0x%08x" +aes_cmd_data(uint32_t key, uint32_t iv, uint64_t src, uint64_t dst, uint32_t len) "[key=%d iv=%d] src=0x%"PRIx64" dst=0x%"PRIx64" len=0x%x" +aes_cmd_store_iv(uint32_t ctx, uint64_t addr, uint32_t iv0, uint32_t iv1, uint32_t iv2, uint32_t iv3) "[%d] addr=0x%"PRIx64"x -> 0x%08x 0x%08x 0x%08x 0x%08x" +aes_cmd_flag(uint32_t raise, uint32_t flag_info) "raise=%d flag_info=0x%x" +aes_fifo_process(uint32_t cmd, bool success) "cmd=%d success=%d" +aes_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 +aes_2_read(uint64_t offset, uint64_t res) "offset=0x%"PRIx64" res=0x%"PRIx64 +aes_2_write(uint64_t offset, uint64_t val) "offset=0x%"PRIx64" val=0x%"PRIx64 +aes_dump_data(const char *desc, const char *hex) "%s%s" + +# bdif.c +bdif_read(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 +bdif_write(uint64_t offset, uint32_t size, uint64_t value) "offset=0x%"PRIx64" size=0x%x value=0x%"PRIx64 +bdif_vblk_read(const char *dev, uint64_t addr, uint64_t offset, uint32_t len, int r) "dev=%s addr=0x%"PRIx64" off=0x%"PRIx64" size=0x%x r=%d" diff --git a/hw/vmapple/trace.h b/hw/vmapple/trace.h new file mode 100644 index 00000000000..d099d5ecd9e --- /dev/null +++ b/hw/vmapple/trace.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "trace/trace-hw_vmapple.h" diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c new file mode 100644 index 00000000000..9de9aaae0bf --- /dev/null +++ b/hw/vmapple/virtio-blk.c @@ -0,0 +1,204 @@ +/* + * VMApple specific VirtIO Block implementation + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * VMApple uses almost standard VirtIO Block, but with a few key differences: + * + * - Different PCI device/vendor ID + * - An additional "type" identifier to differentiate AUX and Root volumes + * - An additional BARRIER command + */ + +#include "qemu/osdep.h" +#include "hw/vmapple/vmapple.h" +#include "hw/virtio/virtio-blk.h" +#include "hw/virtio/virtio-pci.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" + +#define TYPE_VMAPPLE_VIRTIO_BLK "vmapple-virtio-blk" +OBJECT_DECLARE_TYPE(VMAppleVirtIOBlk, VMAppleVirtIOBlkClass, VMAPPLE_VIRTIO_BLK) + +typedef struct VMAppleVirtIOBlkClass { + VirtIOBlkClass parent; + + void (*get_config)(VirtIODevice *vdev, uint8_t *config); +} VMAppleVirtIOBlkClass; + +typedef struct VMAppleVirtIOBlk { + VirtIOBlock parent_obj; + + uint32_t apple_type; +} VMAppleVirtIOBlk; + +/* + * vmapple-virtio-blk-pci: This extends VirtioPCIProxy. + */ +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleVirtIOBlkPCI, VMAPPLE_VIRTIO_BLK_PCI) + +#define VIRTIO_BLK_T_APPLE_BARRIER 0x10000 + +static bool vmapple_virtio_blk_handle_unknown_request(VirtIOBlockReq *req, + MultiReqBuffer *mrb, + uint32_t type) +{ + switch (type) { + case VIRTIO_BLK_T_APPLE_BARRIER: + qemu_log_mask(LOG_UNIMP, "%s: Barrier requests are currently no-ops\n", + __func__); + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); + g_free(req); + return true; + default: + return false; + } +} + +/* + * VMApple virtio-blk uses the same config format as normal virtio, with one + * exception: It adds an "apple type" specififer at the same location that + * the spec reserves for max_secure_erase_sectors. Let's hook into the + * get_config code path here, run it as usual and then patch in the apple type. + */ +static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VMAppleVirtIOBlk *dev = VMAPPLE_VIRTIO_BLK(vdev); + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_GET_CLASS(dev); + struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config; + + vvbk->get_config(vdev, config); + + g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned)); + + /* Apple abuses the field for max_secure_erase_sectors as type id */ + stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type); +} + +static void vmapple_virtio_blk_class_init(ObjectClass *klass, const void *data) +{ + VirtIOBlkClass *vbk = VIRTIO_BLK_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VMAppleVirtIOBlkClass *vvbk = VMAPPLE_VIRTIO_BLK_CLASS(klass); + + vbk->handle_unknown_request = vmapple_virtio_blk_handle_unknown_request; + vvbk->get_config = vdc->get_config; + vdc->get_config = vmapple_virtio_blk_get_config; +} + +static const TypeInfo vmapple_virtio_blk_info = { + .name = TYPE_VMAPPLE_VIRTIO_BLK, + .parent = TYPE_VIRTIO_BLK, + .instance_size = sizeof(VMAppleVirtIOBlk), + .class_size = sizeof(VMAppleVirtIOBlkClass), + .class_init = vmapple_virtio_blk_class_init, +}; + +/* PCI Devices */ + +struct VMAppleVirtIOBlkPCI { + VirtIOPCIProxy parent_obj; + + VMAppleVirtIOBlk vdev; + VMAppleVirtioBlkVariant variant; +}; + +static const Property vmapple_virtio_blk_pci_properties[] = { + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT("variant", VMAppleVirtIOBlkPCI, variant, + VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED), +}; + +static void vmapple_virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + ERRP_GUARD(); + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIOBlkConf *conf = &dev->vdev.parent_obj.conf; + + if (dev->variant == VM_APPLE_VIRTIO_BLK_VARIANT_UNSPECIFIED) { + error_setg(errp, "vmapple virtio block device variant unspecified"); + error_append_hint(errp, + "Variant property must be set to 'aux' or 'root'.\n" + "Use a regular virtio-blk-pci device instead when " + "neither is applicaple.\n"); + return; + } + + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { + conf->num_queues = virtio_pci_optimal_num_queues(0); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = conf->num_queues + 1; + } + + /* + * We don't support zones, but we need the additional config space size. + * Let's just expose the feature so the rest of the virtio-blk logic + * allocates enough space for us. The guest will ignore zones anyway. + */ + virtio_add_feature(&dev->vdev.parent_obj.host_features, VIRTIO_BLK_F_ZONED); + /* Propagate the apple type down to the virtio-blk device */ + dev->vdev.apple_type = dev->variant; + /* and spawn the virtio-blk device */ + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); + + /* + * The virtio-pci machinery adjusts its vendor/device ID based on whether + * we support modern or legacy virtio. Let's patch it back to the Apple + * identifiers here. + */ + pci_config_set_vendor_id(vpci_dev->pci_dev.config, PCI_VENDOR_ID_APPLE); + pci_config_set_device_id(vpci_dev->pci_dev.config, + PCI_DEVICE_ID_APPLE_VIRTIO_BLK); +} + +static void vmapple_virtio_blk_pci_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, vmapple_virtio_blk_pci_properties); + k->realize = vmapple_virtio_blk_pci_realize; + pcidev_k->vendor_id = PCI_VENDOR_ID_APPLE; + pcidev_k->device_id = PCI_DEVICE_ID_APPLE_VIRTIO_BLK; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void vmapple_virtio_blk_pci_instance_init(Object *obj) +{ + VMAppleVirtIOBlkPCI *dev = VMAPPLE_VIRTIO_BLK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VMAPPLE_VIRTIO_BLK); +} + +static const VirtioPCIDeviceTypeInfo vmapple_virtio_blk_pci_info = { + .generic_name = TYPE_VMAPPLE_VIRTIO_BLK_PCI, + .instance_size = sizeof(VMAppleVirtIOBlkPCI), + .instance_init = vmapple_virtio_blk_pci_instance_init, + .class_init = vmapple_virtio_blk_pci_class_init, +}; + +static void vmapple_virtio_blk_register_types(void) +{ + type_register_static(&vmapple_virtio_blk_info); + virtio_pci_types_register(&vmapple_virtio_blk_pci_info); +} + +type_init(vmapple_virtio_blk_register_types) diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c new file mode 100644 index 00000000000..16e6110b68f --- /dev/null +++ b/hw/vmapple/vmapple.c @@ -0,0 +1,618 @@ +/* + * VMApple machine emulation + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * VMApple is the device model that the macOS built-in hypervisor called + * "Virtualization.framework" exposes to Apple Silicon macOS guests. The + * machine model in this file implements the same device model in QEMU, but + * does not use any code from Virtualization.Framework. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/datadir.h" +#include "qemu/error-report.h" +#include "qemu/guest-random.h" +#include "qemu/help-texts.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/units.h" +#include "monitor/qdev.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/usb.h" +#include "hw/arm/boot.h" +#include "hw/arm/primecell.h" +#include "hw/char/pl011.h" +#include "hw/intc/arm_gic.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/misc/pvpanic.h" +#include "hw/pci-host/gpex.h" +#include "hw/usb/hcd-xhci-pci.h" +#include "hw/virtio/virtio-pci.h" +#include "hw/vmapple/vmapple.h" +#include "net/net.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qapi-visit-common.h" +#include "qobject/qlist.h" +#include "standard-headers/linux/input.h" +#include "system/hvf.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/system.h" + +struct VMAppleMachineState { + MachineState parent; + + Notifier machine_done; + struct arm_boot_info bootinfo; + const MemMapEntry *memmap; + const int *irqmap; + DeviceState *gic; + DeviceState *cfg; + DeviceState *pvpanic; + Notifier powerdown_notifier; + PCIBus *bus; + MemoryRegion fw_mr; + MemoryRegion ecam_alias; + uint64_t uuid; +}; + +#define TYPE_VMAPPLE_MACHINE MACHINE_TYPE_NAME("vmapple") +OBJECT_DECLARE_SIMPLE_TYPE(VMAppleMachineState, VMAPPLE_MACHINE) + +/* Number of external interrupt lines to configure the GIC with */ +#define NUM_IRQS 256 + +enum { + VMAPPLE_FIRMWARE, + VMAPPLE_CONFIG, + VMAPPLE_MEM, + VMAPPLE_GIC_DIST, + VMAPPLE_GIC_REDIST, + VMAPPLE_UART, + VMAPPLE_RTC, + VMAPPLE_PCIE, + VMAPPLE_PCIE_MMIO, + VMAPPLE_PCIE_ECAM, + VMAPPLE_GPIO, + VMAPPLE_PVPANIC, + VMAPPLE_APV_GFX, + VMAPPLE_APV_IOSFC, + VMAPPLE_AES_1, + VMAPPLE_AES_2, + VMAPPLE_BDOOR, + VMAPPLE_MEMMAP_LAST, +}; + +static const MemMapEntry memmap[] = { + [VMAPPLE_FIRMWARE] = { 0x00100000, 0x00100000 }, + [VMAPPLE_CONFIG] = { 0x00400000, 0x00010000 }, + + [VMAPPLE_GIC_DIST] = { 0x10000000, 0x00010000 }, + [VMAPPLE_GIC_REDIST] = { 0x10010000, 0x00400000 }, + + [VMAPPLE_UART] = { 0x20010000, 0x00010000 }, + [VMAPPLE_RTC] = { 0x20050000, 0x00001000 }, + [VMAPPLE_GPIO] = { 0x20060000, 0x00001000 }, + [VMAPPLE_PVPANIC] = { 0x20070000, 0x00000002 }, + [VMAPPLE_BDOOR] = { 0x30000000, 0x00200000 }, + [VMAPPLE_APV_GFX] = { 0x30200000, 0x00010000 }, + [VMAPPLE_APV_IOSFC] = { 0x30210000, 0x00010000 }, + [VMAPPLE_AES_1] = { 0x30220000, 0x00004000 }, + [VMAPPLE_AES_2] = { 0x30230000, 0x00004000 }, + [VMAPPLE_PCIE_ECAM] = { 0x40000000, 0x10000000 }, + [VMAPPLE_PCIE_MMIO] = { 0x50000000, 0x1fff0000 }, + + /* Actual RAM size depends on configuration */ + [VMAPPLE_MEM] = { 0x70000000ULL, GiB}, +}; + +static const int irqmap[] = { + [VMAPPLE_UART] = 1, + [VMAPPLE_RTC] = 2, + [VMAPPLE_GPIO] = 0x5, + [VMAPPLE_APV_IOSFC] = 0x10, + [VMAPPLE_APV_GFX] = 0x11, + [VMAPPLE_AES_1] = 0x12, + [VMAPPLE_PCIE] = 0x20, +}; + +#define GPEX_NUM_IRQS 16 + +static void create_bdif(VMAppleMachineState *vms, MemoryRegion *mem) +{ + DeviceState *bdif; + SysBusDevice *bdif_sb; + DriveInfo *di_aux = drive_get(IF_PFLASH, 0, 0); + DriveInfo *di_root = drive_get(IF_PFLASH, 0, 1); + + if (!di_aux) { + error_report("No AUX device. Please specify one as pflash drive."); + exit(1); + } + + if (!di_root) { + /* Fall back to the first IF_VIRTIO device as root device */ + di_root = drive_get(IF_VIRTIO, 0, 0); + } + + if (!di_root) { + error_report("No root device. Please specify one as virtio drive."); + exit(1); + } + + /* PV backdoor device */ + bdif = qdev_new(TYPE_VMAPPLE_BDIF); + bdif_sb = SYS_BUS_DEVICE(bdif); + sysbus_mmio_map(bdif_sb, 0, vms->memmap[VMAPPLE_BDOOR].base); + + qdev_prop_set_drive(DEVICE(bdif), "aux", blk_by_legacy_dinfo(di_aux)); + qdev_prop_set_drive(DEVICE(bdif), "root", blk_by_legacy_dinfo(di_root)); + + sysbus_realize_and_unref(bdif_sb, &error_fatal); +} + +static void create_pvpanic(VMAppleMachineState *vms, MemoryRegion *mem) +{ + SysBusDevice *pvpanic; + + vms->pvpanic = qdev_new(TYPE_PVPANIC_MMIO_DEVICE); + pvpanic = SYS_BUS_DEVICE(vms->pvpanic); + sysbus_mmio_map(pvpanic, 0, vms->memmap[VMAPPLE_PVPANIC].base); + + sysbus_realize_and_unref(pvpanic, &error_fatal); +} + +static bool create_cfg(VMAppleMachineState *vms, MemoryRegion *mem, + Error **errp) +{ + ERRP_GUARD(); + SysBusDevice *cfg; + MachineState *machine = MACHINE(vms); + uint32_t rnd = 1; + + vms->cfg = qdev_new(TYPE_VMAPPLE_CFG); + cfg = SYS_BUS_DEVICE(vms->cfg); + sysbus_mmio_map(cfg, 0, vms->memmap[VMAPPLE_CONFIG].base); + + qemu_guest_getrandom_nofail(&rnd, sizeof(rnd)); + + qdev_prop_set_uint32(vms->cfg, "nr-cpus", machine->smp.cpus); + qdev_prop_set_uint64(vms->cfg, "ecid", vms->uuid); + qdev_prop_set_uint64(vms->cfg, "ram-size", machine->ram_size); + qdev_prop_set_uint32(vms->cfg, "rnd", rnd); + + if (!sysbus_realize_and_unref(cfg, errp)) { + error_prepend(errp, "Error creating vmapple cfg device: "); + return false; + } + + return true; +} + +static void create_gfx(VMAppleMachineState *vms, MemoryRegion *mem) +{ + int irq_gfx = vms->irqmap[VMAPPLE_APV_GFX]; + int irq_iosfc = vms->irqmap[VMAPPLE_APV_IOSFC]; + SysBusDevice *gfx; + + gfx = SYS_BUS_DEVICE(qdev_new("apple-gfx-mmio")); + sysbus_mmio_map(gfx, 0, vms->memmap[VMAPPLE_APV_GFX].base); + sysbus_mmio_map(gfx, 1, vms->memmap[VMAPPLE_APV_IOSFC].base); + sysbus_connect_irq(gfx, 0, qdev_get_gpio_in(vms->gic, irq_gfx)); + sysbus_connect_irq(gfx, 1, qdev_get_gpio_in(vms->gic, irq_iosfc)); + sysbus_realize_and_unref(gfx, &error_fatal); +} + +static void create_aes(VMAppleMachineState *vms, MemoryRegion *mem) +{ + int irq = vms->irqmap[VMAPPLE_AES_1]; + SysBusDevice *aes; + + aes = SYS_BUS_DEVICE(qdev_new(TYPE_APPLE_AES)); + sysbus_mmio_map(aes, 0, vms->memmap[VMAPPLE_AES_1].base); + sysbus_mmio_map(aes, 1, vms->memmap[VMAPPLE_AES_2].base); + sysbus_connect_irq(aes, 0, qdev_get_gpio_in(vms->gic, irq)); + sysbus_realize_and_unref(aes, &error_fatal); +} + +static int arm_gic_ppi_index(int cpu_nr, int ppi_index) +{ + return NUM_IRQS + cpu_nr * GIC_INTERNAL + ppi_index; +} + +static void create_gic(VMAppleMachineState *vms, MemoryRegion *mem) +{ + MachineState *ms = MACHINE(vms); + /* We create a standalone GIC */ + SysBusDevice *gicbusdev; + QList *redist_region_count; + int i; + unsigned int smp_cpus = ms->smp.cpus; + + vms->gic = qdev_new(gicv3_class_name()); + qdev_prop_set_uint32(vms->gic, "revision", 3); + qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); + /* + * Note that the num-irq property counts both internal and external + * interrupts; there are always 32 of the former (mandated by GIC spec). + */ + qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32); + + uint32_t redist0_capacity = + vms->memmap[VMAPPLE_GIC_REDIST].size / GICV3_REDIST_SIZE; + uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); + + redist_region_count = qlist_new(); + qlist_append_int(redist_region_count, redist0_count); + qdev_prop_set_array(vms->gic, "redist-region-count", redist_region_count); + + gicbusdev = SYS_BUS_DEVICE(vms->gic); + sysbus_realize_and_unref(gicbusdev, &error_fatal); + sysbus_mmio_map(gicbusdev, 0, vms->memmap[VMAPPLE_GIC_DIST].base); + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VMAPPLE_GIC_REDIST].base); + + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < smp_cpus; i++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + + /* Map the virt timer to PPI 27 */ + qdev_connect_gpio_out(cpudev, GTIMER_VIRT, + qdev_get_gpio_in(vms->gic, + arm_gic_ppi_index(i, 27))); + + /* Map the GIC IRQ and FIQ lines to CPU */ + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + } +} + +static void create_uart(const VMAppleMachineState *vms, int uart, + MemoryRegion *mem, Chardev *chr) +{ + hwaddr base = vms->memmap[uart].base; + int irq = vms->irqmap[uart]; + DeviceState *dev = qdev_new(TYPE_PL011); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_add_subregion(mem, base, + sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); +} + +static void create_rtc(const VMAppleMachineState *vms) +{ + hwaddr base = vms->memmap[VMAPPLE_RTC].base; + int irq = vms->irqmap[VMAPPLE_RTC]; + + sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq)); +} + +static DeviceState *gpio_key_dev; +static void vmapple_powerdown_req(Notifier *n, void *opaque) +{ + /* use gpio Pin 3 for power button event */ + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); +} + +static void create_gpio_devices(const VMAppleMachineState *vms, int gpio, + MemoryRegion *mem) +{ + DeviceState *pl061_dev; + hwaddr base = vms->memmap[gpio].base; + int irq = vms->irqmap[gpio]; + SysBusDevice *s; + + pl061_dev = qdev_new("pl061"); + /* Pull lines down to 0 if not driven by the PL061 */ + qdev_prop_set_uint32(pl061_dev, "pullups", 0); + qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff); + s = SYS_BUS_DEVICE(pl061_dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); + gpio_key_dev = sysbus_create_simple("gpio-key", -1, + qdev_get_gpio_in(pl061_dev, 3)); +} + +static void vmapple_firmware_init(VMAppleMachineState *vms, + MemoryRegion *sysmem) +{ + hwaddr size = vms->memmap[VMAPPLE_FIRMWARE].size; + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; + const char *bios_name; + int image_size; + char *fname; + + bios_name = MACHINE(vms)->firmware; + if (!bios_name) { + error_report("No firmware specified"); + exit(1); + } + + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!fname) { + error_report("Could not find ROM image '%s'", bios_name); + exit(1); + } + + memory_region_init_ram(&vms->fw_mr, NULL, "firmware", size, &error_fatal); + image_size = load_image_mr(fname, &vms->fw_mr); + + g_free(fname); + if (image_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + + memory_region_add_subregion(get_system_memory(), base, &vms->fw_mr); +} + +static void create_pcie(VMAppleMachineState *vms) +{ + hwaddr base_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].base; + hwaddr size_mmio = vms->memmap[VMAPPLE_PCIE_MMIO].size; + hwaddr base_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].base; + hwaddr size_ecam = vms->memmap[VMAPPLE_PCIE_ECAM].size; + int irq = vms->irqmap[VMAPPLE_PCIE]; + MemoryRegion *mmio_alias; + MemoryRegion *mmio_reg; + MemoryRegion *ecam_reg; + DeviceState *dev; + int i; + PCIHostState *pci; + DeviceState *usb_controller; + USBBus *usb_bus; + + dev = qdev_new(TYPE_GPEX_HOST); + qdev_prop_set_uint32(dev, "num-irqs", GPEX_NUM_IRQS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Map only the first size_ecam bytes of ECAM space */ + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(&vms->ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, size_ecam); + memory_region_add_subregion(get_system_memory(), base_ecam, + &vms->ecam_alias); + + /* + * Map the MMIO window from [0x50000000-0x7fff0000] in PCI space into + * system address space at [0x50000000-0x7fff0000]. + */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, base_mmio, size_mmio); + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(vms->gic, irq + i)); + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); + } + + pci = PCI_HOST_BRIDGE(dev); + vms->bus = pci->bus; + g_assert(vms->bus); + + while ((dev = qemu_create_nic_device("virtio-net-pci", true, NULL))) { + qdev_realize_and_unref(dev, BUS(vms->bus), &error_fatal); + } + + if (defaults_enabled()) { + usb_controller = qdev_new(TYPE_QEMU_XHCI); + qdev_realize_and_unref(usb_controller, BUS(pci->bus), &error_fatal); + + usb_bus = USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS, + &error_fatal)); + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-tablet"); + } +} + +static void vmapple_reset(void *opaque) +{ + VMAppleMachineState *vms = opaque; + hwaddr base = vms->memmap[VMAPPLE_FIRMWARE].base; + + cpu_set_pc(first_cpu, base); +} + +static void mach_vmapple_init(MachineState *machine) +{ + VMAppleMachineState *vms = VMAPPLE_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *possible_cpus; + MemoryRegion *sysmem = get_system_memory(); + int n; + unsigned int smp_cpus = machine->smp.cpus; + unsigned int max_cpus = machine->smp.max_cpus; + + vms->memmap = memmap; + machine->usb = true; + + possible_cpus = mc->possible_cpu_arch_ids(machine); + assert(possible_cpus->len == max_cpus); + for (n = 0; n < possible_cpus->len; n++) { + Object *cpu; + CPUState *cs; + + if (n >= smp_cpus) { + break; + } + + cpu = object_new(possible_cpus->cpus[n].type); + object_property_set_int(cpu, "mp-affinity", + possible_cpus->cpus[n].arch_id, &error_fatal); + + cs = CPU(cpu); + cs->cpu_index = n; + + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpu), + &error_fatal); + + if (object_property_find(cpu, "has_el3")) { + object_property_set_bool(cpu, "has_el3", false, &error_fatal); + } + if (object_property_find(cpu, "has_el2")) { + object_property_set_bool(cpu, "has_el2", false, &error_fatal); + } + object_property_set_int(cpu, "psci-conduit", QEMU_PSCI_CONDUIT_HVC, + &error_fatal); + + /* Secondary CPUs start in PSCI powered-down state */ + if (n > 0) { + object_property_set_bool(cpu, "start-powered-off", true, + &error_fatal); + } + + object_property_set_link(cpu, "memory", OBJECT(sysmem), &error_abort); + qdev_realize(DEVICE(cpu), NULL, &error_fatal); + object_unref(cpu); + } + + memory_region_add_subregion(sysmem, vms->memmap[VMAPPLE_MEM].base, + machine->ram); + + create_gic(vms, sysmem); + create_bdif(vms, sysmem); + create_pvpanic(vms, sysmem); + create_aes(vms, sysmem); + create_gfx(vms, sysmem); + create_uart(vms, VMAPPLE_UART, sysmem, serial_hd(0)); + create_rtc(vms); + create_pcie(vms); + + create_gpio_devices(vms, VMAPPLE_GPIO, sysmem); + + vmapple_firmware_init(vms, sysmem); + create_cfg(vms, sysmem, &error_fatal); + + /* connect powerdown request */ + vms->powerdown_notifier.notify = vmapple_powerdown_req; + qemu_register_powerdown_notifier(&vms->powerdown_notifier); + + vms->bootinfo.ram_size = machine->ram_size; + vms->bootinfo.board_id = -1; + vms->bootinfo.loader_start = vms->memmap[VMAPPLE_MEM].base; + vms->bootinfo.skip_dtb_autoload = true; + vms->bootinfo.firmware_loaded = true; + arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); + + qemu_register_reset(vmapple_reset, vms); +} + +static CpuInstanceProperties +vmapple_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + + +static int64_t vmapple_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + return idx % ms->numa_state->num_nodes; +} + +static const CPUArchIdList *vmapple_possible_cpu_arch_ids(MachineState *ms) +{ + int n; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = + arm_build_mp_affinity(n, GICV3_TARGETLIST_BITS); + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n; + } + return ms->possible_cpus; +} + +static GlobalProperty vmapple_compat_defaults[] = { + { TYPE_VIRTIO_PCI, "disable-legacy", "on" }, + /* + * macOS XHCI driver attempts to schedule events onto even rings 1 & 2 + * even when (as here) there is no MSI(-X) support. Disabling interrupter + * mapping in the XHCI controller works around the problem. + */ + { TYPE_XHCI_PCI, "conditional-intr-mapping", "on" }, +}; + +static void vmapple_machine_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = mach_vmapple_init; + mc->max_cpus = 32; + mc->block_default_type = IF_VIRTIO; + mc->no_cdrom = 1; + mc->pci_allow_0_address = true; + mc->minimum_page_bits = 12; + mc->possible_cpu_arch_ids = vmapple_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = vmapple_cpu_index_to_props; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("host"); + mc->get_default_cpu_node_id = vmapple_get_default_cpu_node_id; + mc->default_ram_id = "mach-vmapple.ram"; + mc->desc = "Apple aarch64 Virtual Machine"; + + compat_props_add(mc->compat_props, vmapple_compat_defaults, + G_N_ELEMENTS(vmapple_compat_defaults)); +} + +static void vmapple_instance_init(Object *obj) +{ + VMAppleMachineState *vms = VMAPPLE_MACHINE(obj); + + vms->irqmap = irqmap; + + object_property_add_uint64_ptr(obj, "uuid", &vms->uuid, + OBJ_PROP_FLAG_READWRITE); + object_property_set_description(obj, "uuid", "Machine UUID (SDOM)"); +} + +static const TypeInfo vmapple_machine_info = { + .name = TYPE_VMAPPLE_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(VMAppleMachineState), + .class_init = vmapple_machine_class_init, + .instance_init = vmapple_instance_init, +}; + +static void machvmapple_machine_init(void) +{ + type_register_static(&vmapple_machine_info); +} +type_init(machvmapple_machine_init); + diff --git a/hw/watchdog/allwinner-wdt.c b/hw/watchdog/allwinner-wdt.c index d35711c7c5b..8fcd7766752 100644 --- a/hw/watchdog/allwinner-wdt.c +++ b/hw/watchdog/allwinner-wdt.c @@ -28,7 +28,7 @@ #include "hw/sysbus.h" #include "hw/registerfields.h" #include "hw/watchdog/allwinner-wdt.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "migration/vmstate.h" /* WDT registers */ @@ -275,7 +275,7 @@ static void allwinner_wdt_write(void *opaque, hwaddr offset, static const MemoryRegionOps allwinner_wdt_ops = { .read = allwinner_wdt_read, .write = allwinner_wdt_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -348,7 +348,7 @@ static void allwinner_wdt_realize(DeviceState *dev, Error **errp) ptimer_transaction_commit(s->timer); } -static void allwinner_wdt_class_init(ObjectClass *klass, void *data) +static void allwinner_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ResettableClass *rc = RESETTABLE_CLASS(klass); @@ -358,7 +358,7 @@ static void allwinner_wdt_class_init(ObjectClass *klass, void *data) dc->vmsd = &allwinner_wdt_vmstate; } -static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, void *data) +static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, const void *data) { AwWdtClass *awc = AW_WDT_CLASS(klass); @@ -371,7 +371,7 @@ static void allwinner_wdt_sun4i_class_init(ObjectClass *klass, void *data) awc->get_intv_value = allwinner_wdt_sun4i_get_intv_value; } -static void allwinner_wdt_sun6i_class_init(ObjectClass *klass, void *data) +static void allwinner_wdt_sun6i_class_init(ObjectClass *klass, const void *data) { AwWdtClass *awc = AW_WDT_CLASS(klass); diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c index 3091e5c3d54..6a8d07ca569 100644 --- a/hw/watchdog/cmsdk-apb-watchdog.c +++ b/hw/watchdog/cmsdk-apb-watchdog.c @@ -12,8 +12,8 @@ /* * This is a model of the "APB watchdog" which is part of the Cortex-M * System Design Kit (CMSDK) and documented in the Cortex-M System - * Design Kit Technical Reference Manual (ARM DDI0479C): - * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + * Design Kit Technical Reference Manual (ARM DDI0479): + * https://developer.arm.com/documentation/ddi0479/ * * We also support the variant of this device found in the TI * Stellaris/Luminary boards and documented in: @@ -25,7 +25,7 @@ #include "trace.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "hw/qdev-properties.h" @@ -196,16 +196,13 @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset, switch (offset) { case A_WDOGLOAD: - /* - * Reset the load value and the current count, and make sure - * we're counting. - */ + /* Reset the load value and the current count. */ ptimer_transaction_begin(s->timer); ptimer_set_limit(s->timer, value, 1); - ptimer_run(s->timer, 0); ptimer_transaction_commit(s->timer); break; - case A_WDOGCONTROL: + case A_WDOGCONTROL: { + uint32_t prev_control = s->control; if (s->is_luminary && 0 != (R_WDOGCONTROL_INTEN_MASK & s->control)) { /* * The Luminary version of this device ignores writes to @@ -215,8 +212,25 @@ static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset, break; } s->control = value & R_WDOGCONTROL_VALID_MASK; + if (R_WDOGCONTROL_INTEN_MASK & (s->control ^ prev_control)) { + ptimer_transaction_begin(s->timer); + if (R_WDOGCONTROL_INTEN_MASK & s->control) { + /* + * Set HIGH to enable the counter and the interrupt. Reloads + * the counter from the value in WDOGLOAD when the interrupt + * is enabled, after previously being disabled. + */ + ptimer_set_count(s->timer, ptimer_get_limit(s->timer)); + ptimer_run(s->timer, 0); + } else { + /* Or LOW to disable the counter and interrupt. */ + ptimer_stop(s->timer); + } + ptimer_transaction_commit(s->timer); + } cmsdk_apb_watchdog_update(s); break; + } case A_WDOGINTCLR: s->intstatus = 0; ptimer_transaction_begin(s->timer); @@ -305,8 +319,14 @@ static void cmsdk_apb_watchdog_reset(DeviceState *dev) s->resetstatus = 0; /* Set the limit and the count */ ptimer_transaction_begin(s->timer); + /* + * We need to stop the ptimer before setting its limit reset value. If the + * order is the opposite when the code executes the stop after setting a new + * limit it may want to recalculate the count based on the current time (if + * the timer was currently running) and it won't get the proper reset value. + */ + ptimer_stop(s->timer); ptimer_set_limit(s->timer, 0xffffffff, 1); - ptimer_run(s->timer, 0); ptimer_transaction_commit(s->timer); } @@ -374,13 +394,13 @@ static const VMStateDescription cmsdk_apb_watchdog_vmstate = { } }; -static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, void *data) +static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = cmsdk_apb_watchdog_realize; dc->vmsd = &cmsdk_apb_watchdog_vmstate; - dc->reset = cmsdk_apb_watchdog_reset; + device_class_set_legacy_reset(dc, cmsdk_apb_watchdog_reset); } static const TypeInfo cmsdk_apb_watchdog_info = { diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c index d437535cc66..ce84849df0e 100644 --- a/hw/watchdog/sbsa_gwdt.c +++ b/hw/watchdog/sbsa_gwdt.c @@ -16,8 +16,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/reset.h" -#include "sysemu/watchdog.h" +#include "system/reset.h" +#include "system/watchdog.h" #include "hw/qdev-properties.h" #include "hw/watchdog/sbsa_gwdt.h" #include "qemu/timer.h" @@ -174,7 +174,6 @@ static void sbsa_gwdt_write(void *opaque, hwaddr offset, uint64_t data, qemu_log_mask(LOG_GUEST_ERROR, "bad address in control frame write :" " 0x%x\n", (int)offset); } - return; } static void wdt_sbsa_gwdt_reset(DeviceState *dev) @@ -262,7 +261,7 @@ static void wdt_sbsa_gwdt_realize(DeviceState *dev, Error **errp) dev); } -static Property wdt_sbsa_gwdt_props[] = { +static const Property wdt_sbsa_gwdt_props[] = { /* * Timer frequency in Hz. This must match the frequency used by * the CPU's generic timer. Default 62.5Hz matches QEMU's legacy @@ -270,15 +269,14 @@ static Property wdt_sbsa_gwdt_props[] = { */ DEFINE_PROP_UINT64("clock-frequency", struct SBSA_GWDTState, freq, 62500000), - DEFINE_PROP_END_OF_LIST(), }; -static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, void *data) +static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = wdt_sbsa_gwdt_realize; - dc->reset = wdt_sbsa_gwdt_reset; + device_class_set_legacy_reset(dc, wdt_sbsa_gwdt_reset); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_sbsa_gwdt; diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c index 2bb1d3c5325..5b3f50de3a1 100644 --- a/hw/watchdog/spapr_watchdog.c +++ b/hw/watchdog/spapr_watchdog.c @@ -249,7 +249,7 @@ static void spapr_wdt_realize(DeviceState *dev, Error **errp) &w->leave_others, OBJ_PROP_FLAG_READ); } -static void spapr_wdt_class_init(ObjectClass *oc, void *data) +static void spapr_wdt_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c index 955046161bf..07213739488 100644 --- a/hw/watchdog/watchdog.c +++ b/hw/watchdog/watchdog.c @@ -26,8 +26,8 @@ #include "qapi/error.h" #include "qapi/qapi-commands-run-state.h" #include "qapi/qapi-events-run-state.h" -#include "sysemu/runstate.h" -#include "sysemu/watchdog.h" +#include "system/runstate.h" +#include "system/watchdog.h" #include "hw/nmi.h" #include "qemu/help_option.h" #include "trace.h" @@ -85,7 +85,7 @@ void watchdog_perform_action(void) break; default: - assert(0); + g_assert_not_reached(); } } diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 75685c56470..30226435efc 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -13,7 +13,7 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qemu/timer.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "hw/watchdog/wdt_aspeed.h" @@ -51,11 +51,20 @@ #define WDT_TIMEOUT_CLEAR (0x14 / 4) #define WDT_RESTART_MAGIC 0x4755 +#define WDT_SW_RESET_ENABLE 0xAEEDF123 #define AST2600_SCU_RESET_CONTROL1 (0x40 / 4) #define SCU_RESET_CONTROL1 (0x04 / 4) #define SCU_RESET_SDRAM BIT(0) +static bool aspeed_wdt_is_soc_reset_mode(const AspeedWDTState *s) +{ + uint32_t mode; + + mode = extract32(s->regs[WDT_CTRL], 5, 2); + return (mode == WDT_CTRL_RESET_MODE_SOC); +} + static bool aspeed_wdt_is_enabled(const AspeedWDTState *s) { return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE; @@ -199,19 +208,23 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, case WDT_TIMEOUT_STATUS: case WDT_TIMEOUT_CLEAR: case WDT_RESET_MASK2: - case WDT_SW_RESET_CTRL: case WDT_SW_RESET_MASK1: case WDT_SW_RESET_MASK2: qemu_log_mask(LOG_UNIMP, "%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n", __func__, offset); break; + case WDT_SW_RESET_CTRL: + if (aspeed_wdt_is_soc_reset_mode(s) && + (data == WDT_SW_RESET_ENABLE)) { + watchdog_perform_action(); + } + break; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", __func__, offset); } - return; } static const VMStateDescription vmstate_aspeed_wdt = { @@ -278,7 +291,8 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp) s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev); - /* FIXME: This setting should be derived from the SCU hw strapping + /* + * FIXME: This setting should be derived from the SCU hw strapping * register SCU70 */ s->pclk_freq = PCLK_HZ; @@ -288,19 +302,18 @@ static void aspeed_wdt_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); } -static Property aspeed_wdt_properties[] = { +static const Property aspeed_wdt_properties[] = { DEFINE_PROP_LINK("scu", AspeedWDTState, scu, TYPE_ASPEED_SCU, AspeedSCUState *), - DEFINE_PROP_END_OF_LIST(), }; -static void aspeed_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->desc = "ASPEED Watchdog Controller"; dc->realize = aspeed_wdt_realize; - dc->reset = aspeed_wdt_reset; + device_class_set_legacy_reset(dc, aspeed_wdt_reset); set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_aspeed_wdt; device_class_set_props(dc, aspeed_wdt_properties); @@ -316,7 +329,7 @@ static const TypeInfo aspeed_wdt_info = { .abstract = true, }; -static void aspeed_2400_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_2400_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); @@ -353,7 +366,7 @@ static void aspeed_2500_wdt_reset_pulse(AspeedWDTState *s, uint32_t property) } } -static void aspeed_2500_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_2500_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); @@ -376,7 +389,7 @@ static const TypeInfo aspeed_2500_wdt_info = { .class_init = aspeed_2500_wdt_class_init, }; -static void aspeed_2600_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_2600_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); @@ -399,7 +412,7 @@ static const TypeInfo aspeed_2600_wdt_info = { .class_init = aspeed_2600_wdt_class_init, }; -static void aspeed_1030_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_1030_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); @@ -422,7 +435,7 @@ static const TypeInfo aspeed_1030_wdt_info = { .class_init = aspeed_1030_wdt_class_init, }; -static void aspeed_2700_wdt_class_init(ObjectClass *klass, void *data) +static void aspeed_2700_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c index 1b73b16fb35..1275353e8e5 100644 --- a/hw/watchdog/wdt_diag288.c +++ b/hw/watchdog/wdt_diag288.c @@ -12,8 +12,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/reset.h" -#include "sysemu/watchdog.h" +#include "system/reset.h" +#include "system/watchdog.h" #include "qemu/timer.h" #include "hw/watchdog/wdt_diag288.h" #include "migration/vmstate.h" @@ -108,14 +108,14 @@ static void wdt_diag288_unrealize(DeviceState *dev) timer_free(diag288->timer); } -static void wdt_diag288_class_init(ObjectClass *klass, void *data) +static void wdt_diag288_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); DIAG288Class *diag288 = DIAG288_CLASS(klass); dc->realize = wdt_diag288_realize; dc->unrealize = wdt_diag288_unrealize; - dc->reset = wdt_diag288_reset; + device_class_set_legacy_reset(dc, wdt_diag288_reset); dc->hotpluggable = false; set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_diag288; diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index 8bce0509cd5..bb8a2766b69 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -23,7 +23,7 @@ #include "qemu/module.h" #include "qemu/timer.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/pci/pci_device.h" #include "migration/vmstate.h" #include "qom/object.h" @@ -457,7 +457,7 @@ static void i6300esb_exit(PCIDevice *dev) timer_free(d->timer); } -static void i6300esb_class_init(ObjectClass *klass, void *data) +static void i6300esb_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -469,7 +469,7 @@ static void i6300esb_class_init(ObjectClass *klass, void *data) k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_ESB_9; k->class_id = PCI_CLASS_SYSTEM_OTHER; - dc->reset = i6300esb_reset; + device_class_set_legacy_reset(dc, i6300esb_reset); dc->vmsd = &vmstate_i6300esb; set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->desc = "Intel 6300ESB"; @@ -480,7 +480,7 @@ static const TypeInfo i6300esb_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(I6300State), .class_init = i6300esb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index eea8da60596..51a26a4cbbc 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -22,7 +22,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/timer.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "hw/isa/isa.h" #include "migration/vmstate.h" #include "qom/object.h" @@ -128,12 +128,12 @@ static void wdt_ib700_reset(DeviceState *dev) timer_del(s->timer); } -static void wdt_ib700_class_init(ObjectClass *klass, void *data) +static void wdt_ib700_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = wdt_ib700_realize; - dc->reset = wdt_ib700_reset; + device_class_set_legacy_reset(dc, wdt_ib700_reset); dc->vmsd = &vmstate_ib700; set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->desc = "iBASE 700"; diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index 6452fc4721d..10151a15d08 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -12,7 +12,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/module.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" @@ -39,7 +39,6 @@ static void imx2_wdt_expired(void *opaque) /* Perform watchdog action if watchdog is enabled */ if (s->wcr & IMX2_WDT_WCR_WDE) { - s->wrsr = IMX2_WDT_WRSR_TOUT; watchdog_perform_action(); } } @@ -282,19 +281,18 @@ static void imx2_wdt_realize(DeviceState *dev, Error **errp) } } -static Property imx2_wdt_properties[] = { +static const Property imx2_wdt_properties[] = { DEFINE_PROP_BOOL("pretimeout-support", IMX2WdtState, pretimeout_support, false), - DEFINE_PROP_END_OF_LIST() }; -static void imx2_wdt_class_init(ObjectClass *klass, void *data) +static void imx2_wdt_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, imx2_wdt_properties); dc->realize = imx2_wdt_realize; - dc->reset = imx2_wdt_reset; + device_class_set_legacy_reset(dc, imx2_wdt_reset); dc->vmsd = &vmstate_imx2_wdt; dc->desc = "i.MX2 watchdog timer"; set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); diff --git a/hw/xen/meson.build b/hw/xen/meson.build index d887fa9ba43..a1850e76988 100644 --- a/hw/xen/meson.build +++ b/hw/xen/meson.build @@ -9,12 +9,16 @@ system_ss.add(when: ['CONFIG_XEN_BUS'], if_true: files( system_ss.add(when: ['CONFIG_XEN', xen], if_true: files( 'xen-operations.c', +), +if_false: files( + 'xen_stubs.c', )) xen_specific_ss = ss.source_set() xen_specific_ss.add(files( 'xen-mapcache.c', 'xen-hvm-common.c', + 'xen-pvh-common.c', )) if have_xen_pci_passthrough xen_specific_ss.add(files( diff --git a/hw/xen/trace-events b/hw/xen/trace-events index d1b27f6c11b..b67942d07b4 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -38,7 +38,7 @@ xen_device_remove_watch(const char *type, char *name, const char *node, const ch xs_node_create(const char *node) "%s" xs_node_destroy(const char *node) "%s" xs_node_vprintf(char *path, char *value) "%s %s" -xs_node_vscanf(char *path, char *value) "%s %s" +xs_node_read(const char *path, const char *value) "%s %s" xs_node_watch(char *path) "%s" xs_node_unwatch(char *path) "%s" @@ -64,6 +64,10 @@ destroy_hvm_domain_cannot_acquire_handle(void) "Cannot acquire xenctrl handle" destroy_hvm_domain_failed_action(const char *action, int sts, char *errno_s) "xc_domain_shutdown failed to issue %s, sts %d, %s" destroy_hvm_domain_action(int xen_domid, const char *action) "Issued domain %d %s" +# xen-pvh-common.c +xen_create_virtio_mmio_devices(int i, int irq, uint64_t base) "Created virtio-mmio device %d: irq %d base 0x%"PRIx64 +xen_enable_tpm(uint64_t addr) "Connected tpmdev at address 0x%"PRIx64 + # xen-mapcache.c xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 diff --git a/hw/xen/xen-bus-helper.c b/hw/xen/xen-bus-helper.c index b2b2cc9c5d5..288fad422be 100644 --- a/hw/xen/xen-bus-helper.c +++ b/hw/xen/xen-bus-helper.c @@ -105,25 +105,22 @@ int xs_node_vscanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *node, const char *key, Error **errp, const char *fmt, va_list ap) { - char *path, *value; + char *value; int rc; - path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : - g_strdup(key); - value = qemu_xen_xs_read(h, tid, path, NULL); - - trace_xs_node_vscanf(path, value); + if (node && strlen(node) != 0) { + value = xs_node_read(h, tid, NULL, errp, "%s/%s", node, key); + } else { + value = xs_node_read(h, tid, NULL, errp, "%s", key); + } if (value) { rc = vsscanf(value, fmt, ap); } else { - error_setg_errno(errp, errno, "failed to read from '%s'", - path); rc = EOF; } free(value); - g_free(path); return rc; } @@ -142,6 +139,28 @@ int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid, return rc; } +char *xs_node_read(struct qemu_xs_handle *h, xs_transaction_t tid, + unsigned int *len, Error **errp, + const char *path_fmt, ...) +{ + char *path, *value; + va_list ap; + + va_start(ap, path_fmt); + path = g_strdup_vprintf(path_fmt, ap); + va_end(ap); + + value = qemu_xen_xs_read(h, tid, path, len); + trace_xs_node_read(path, value); + if (!value) { + error_setg_errno(errp, errno, "failed to read from '%s'", path); + } + + g_free(path); + + return value; +} + struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node, const char *key, xs_watch_fn fn, void *opaque, Error **errp) diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index 95b207ac8b4..6bd2e546f6b 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -18,8 +18,8 @@ #include "hw/xen/xen-bus-helper.h" #include "monitor/monitor.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "sysemu/sysemu.h" +#include "qobject/qdict.h" +#include "system/system.h" #include "net/net.h" #include "trace.h" @@ -156,8 +156,8 @@ static void xen_bus_backend_create(XenBus *xenbus, const char *type, !strcmp(key[i], "hotplug-status")) continue; - if (xs_node_scanf(xenbus->xsh, tid, path, key[i], NULL, "%ms", - &val) == 1) { + val = xs_node_read(xenbus->xsh, tid, NULL, NULL, "%s/%s", path, key[i]); + if (val) { qdict_put_str(opts, key[i], val); free(val); } @@ -353,10 +353,10 @@ static void xen_bus_realize(BusState *bus, Error **errp) xs_node_watch(xenbus->xsh, node, key, xen_bus_backend_changed, xenbus, &local_err); if (local_err) { - /* This need not be treated as a hard error so don't propagate */ - error_reportf_err(local_err, - "failed to set up '%s' enumeration watch: ", - type[i]); + warn_reportf_err(local_err, + "failed to set up '%s' enumeration watch: ", + type[i]); + local_err = NULL; } g_free(node); @@ -380,7 +380,7 @@ static void xen_bus_unplug_request(HotplugHandler *hotplug, xen_device_unplug(xendev, errp); } -static void xen_bus_class_init(ObjectClass *class, void *data) +static void xen_bus_class_init(ObjectClass *class, const void *data) { BusClass *bus_class = BUS_CLASS(class); HotplugHandlerClass *hotplug_class = HOTPLUG_HANDLER_CLASS(class); @@ -399,7 +399,7 @@ static const TypeInfo xen_bus_type_info = { .instance_size = sizeof(XenBus), .class_size = sizeof(XenBusClass), .class_init = xen_bus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, @@ -650,6 +650,16 @@ int xen_device_frontend_scanf(XenDevice *xendev, const char *key, return rc; } +char *xen_device_frontend_read(XenDevice *xendev, const char *key) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + + g_assert(xenbus->xsh); + + return xs_node_read(xenbus->xsh, XBT_NULL, NULL, NULL, "%s/%s", + xendev->frontend_path, key); +} + static void xen_device_frontend_set_state(XenDevice *xendev, enum xenbus_state state, bool publish) @@ -1092,13 +1102,12 @@ static void xen_device_realize(DeviceState *dev, Error **errp) xen_device_unrealize(dev); } -static Property xen_device_props[] = { +static const Property xen_device_props[] = { DEFINE_PROP_UINT16("frontend-id", XenDevice, frontend_id, DOMID_INVALID), - DEFINE_PROP_END_OF_LIST() }; -static void xen_device_class_init(ObjectClass *class, void *data) +static void xen_device_class_init(ObjectClass *class, const void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c index 3a9d6f981b3..78e0bc8f644 100644 --- a/hw/xen/xen-hvm-common.c +++ b/hw/xen/xen-hvm-common.c @@ -1,14 +1,21 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/error-report.h" #include "qapi/error.h" +#include "exec/target_long.h" #include "exec/target_page.h" #include "trace.h" +#include "hw/hw.h" #include "hw/pci/pci_host.h" #include "hw/xen/xen-hvm-common.h" #include "hw/xen/xen-bus.h" #include "hw/boards.h" #include "hw/xen/arch_hvm.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/xen.h" +#include "system/xen-mapcache.h" MemoryRegion xen_memory, xen_grants; @@ -667,6 +674,8 @@ static int xen_map_ioreq_server(XenIOState *state) xen_pfn_t ioreq_pfn; xen_pfn_t bufioreq_pfn; evtchn_port_t bufioreq_evtchn; + unsigned long num_frames = 1; + unsigned long frame = 1; int rc; /* @@ -675,65 +684,85 @@ static int xen_map_ioreq_server(XenIOState *state) */ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); + + if (state->has_bufioreq) { + frame = 0; + num_frames = 2; + } state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, XENMEM_resource_ioreq_server, - state->ioservid, 0, 2, + state->ioservid, + frame, num_frames, &addr, PROT_READ | PROT_WRITE, 0); if (state->fres != NULL) { trace_xen_map_resource_ioreq(state->ioservid, addr); - state->buffered_io_page = addr; - state->shared_page = addr + XC_PAGE_SIZE; + state->shared_page = addr; + if (state->has_bufioreq) { + state->buffered_io_page = addr; + state->shared_page = addr + XC_PAGE_SIZE; + } } else if (errno != EOPNOTSUPP) { error_report("failed to map ioreq server resources: error %d handle=%p", errno, xen_xc); return -1; } - rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, - (state->shared_page == NULL) ? - &ioreq_pfn : NULL, - (state->buffered_io_page == NULL) ? - &bufioreq_pfn : NULL, - &bufioreq_evtchn); - if (rc < 0) { - error_report("failed to get ioreq server info: error %d handle=%p", - errno, xen_xc); - return rc; - } + /* + * If we fail to map the shared page with xenforeignmemory_map_resource() + * or if we're using buffered ioreqs, we need xen_get_ioreq_server_info() + * to provide the addresses to map the shared page and/or to get the + * event-channel port for buffered ioreqs. + */ + if (state->shared_page == NULL || state->has_bufioreq) { + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, + (state->shared_page == NULL) ? + &ioreq_pfn : NULL, + (state->has_bufioreq && + state->buffered_io_page == NULL) ? + &bufioreq_pfn : NULL, + &bufioreq_evtchn); + if (rc < 0) { + error_report("failed to get ioreq server info: error %d handle=%p", + errno, xen_xc); + return rc; + } - if (state->shared_page == NULL) { - trace_xen_map_ioreq_server_shared_page(ioreq_pfn); + if (state->shared_page == NULL) { + trace_xen_map_ioreq_server_shared_page(ioreq_pfn); - state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ | PROT_WRITE, - 1, &ioreq_pfn, NULL); + state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &ioreq_pfn, NULL); + } if (state->shared_page == NULL) { error_report("map shared IO page returned error %d handle=%p", errno, xen_xc); } - } - if (state->buffered_io_page == NULL) { - trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn); + if (state->has_bufioreq && state->buffered_io_page == NULL) { + trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn); - state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ | PROT_WRITE, - 1, &bufioreq_pfn, - NULL); - if (state->buffered_io_page == NULL) { - error_report("map buffered IO page returned error %d", errno); - return -1; + state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &bufioreq_pfn, + NULL); + if (state->buffered_io_page == NULL) { + error_report("map buffered IO page returned error %d", errno); + return -1; + } } } - if (state->shared_page == NULL || state->buffered_io_page == NULL) { + if (state->shared_page == NULL || + (state->has_bufioreq && state->buffered_io_page == NULL)) { return -1; } - trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn); - - state->bufioreq_remote_port = bufioreq_evtchn; + if (state->has_bufioreq) { + trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn); + state->bufioreq_remote_port = bufioreq_evtchn; + } return 0; } @@ -830,14 +859,15 @@ static void xen_do_ioreq_register(XenIOState *state, state->ioreq_local_port[i] = rc; } - rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, - state->bufioreq_remote_port); - if (rc == -1) { - error_report("buffered evtchn bind error %d", errno); - goto err; + if (state->has_bufioreq) { + rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, + state->bufioreq_remote_port); + if (rc == -1) { + error_report("buffered evtchn bind error %d", errno); + goto err; + } + state->bufioreq_local_port = rc; } - state->bufioreq_local_port = rc; - /* Init RAM management */ #ifdef XEN_COMPAT_PHYSMAP xen_map_cache_init(xen_phys_offset_to_gaddr, state); @@ -865,6 +895,7 @@ static void xen_do_ioreq_register(XenIOState *state, } void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, + uint8_t handle_bufioreq, const MemoryListener *xen_memory_listener) { int rc; @@ -883,7 +914,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, goto err; } - rc = xen_create_ioreq_server(xen_domid, &state->ioservid); + state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF; + rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid); if (!rc) { xen_do_ioreq_register(state, max_cpus, xen_memory_listener); } else { diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index 5514184f9ca..5ed53f8943e 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -147,24 +147,6 @@ void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, } } -int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, - bool to_domain, - XenGrantCopySegment segs[], - unsigned int nr_segs) -{ - int rc; - - assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); - - rc = qemu_xen_gnttab_grant_copy(xendev->gnttabdev, to_domain, xen_domid, - segs, nr_segs, NULL); - if (rc) { - xen_pv_printf(xendev, 0, "xengnttab_grant_copy failed: %s\n", - strerror(-rc)); - } - return rc; -} - /* * get xen backend device, allocate a new one if it doesn't exist. */ @@ -181,7 +163,7 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom, /* init new xendev */ xendev = g_malloc0(ops->size); - object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND); + object_initialize(xendev, ops->size, TYPE_XENBACKEND); OBJECT(xendev)->free = g_free; qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev), &error_fatal); @@ -653,29 +635,22 @@ int xen_be_bind_evtchn(struct XenLegacyDevice *xendev) } -static Property xendev_properties[] = { - DEFINE_PROP_END_OF_LIST(), -}; - -static void xendev_class_init(ObjectClass *klass, void *data) +static void xendev_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, xendev_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); - /* xen-backend devices can be plugged/unplugged dynamically */ - dc->user_creatable = true; dc->bus_type = TYPE_XENSYSBUS; } static const TypeInfo xendev_type_info = { .name = TYPE_XENBACKEND, - .parent = TYPE_DEVICE, + .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, .class_init = xendev_class_init, - .instance_size = sizeof(struct XenLegacyDevice), + .instance_size = sizeof(XenLegacyDevice), }; -static void xen_sysbus_class_init(ObjectClass *klass, void *data) +static void xen_sysbus_class_init(ObjectClass *klass, const void *data) { HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); @@ -686,28 +661,15 @@ static const TypeInfo xensysbus_info = { .name = TYPE_XENSYSBUS, .parent = TYPE_BUS, .class_init = xen_sysbus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } } }; -static Property xen_sysdev_properties[] = { - {/* end of property list */}, -}; - -static void xen_sysdev_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - device_class_set_props(dc, xen_sysdev_properties); -} - static const TypeInfo xensysdev_info = { .name = TYPE_XENSYSDEV, .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SysBusDevice), - .class_init = xen_sysdev_class_init, }; static void xenbe_register_types(void) diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c index 18ba7b1d8f5..e31d3797025 100644 --- a/hw/xen/xen-mapcache.c +++ b/hw/xen/xen-mapcache.c @@ -18,8 +18,8 @@ #include "hw/xen/xen_native.h" #include "qemu/bitmap.h" -#include "sysemu/runstate.h" -#include "sysemu/xen-mapcache.h" +#include "system/runstate.h" +#include "system/xen-mapcache.h" #include "trace.h" #include @@ -75,7 +75,8 @@ typedef struct MapCache { } MapCache; static MapCache *mapcache; -static MapCache *mapcache_grants; +static MapCache *mapcache_grants_ro; +static MapCache *mapcache_grants_rw; static xengnttab_handle *xen_region_gnttabdev; static inline void mapcache_lock(MapCache *mc) @@ -176,9 +177,12 @@ void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) * Grant mappings must use XC_PAGE_SIZE granularity since we can't * map anything beyond the number of pages granted to us. */ - mapcache_grants = xen_map_cache_init_single(f, opaque, - XC_PAGE_SHIFT, - max_mcache_size); + mapcache_grants_ro = xen_map_cache_init_single(f, opaque, + XC_PAGE_SHIFT, + max_mcache_size); + mapcache_grants_rw = xen_map_cache_init_single(f, opaque, + XC_PAGE_SHIFT, + max_mcache_size); setrlimit(RLIMIT_AS, &rlimit_as); } @@ -376,12 +380,12 @@ static uint8_t *xen_map_cache_unlocked(MapCache *mc, entry = &mc->entry[address_index % mc->nr_buckets]; - while (entry && (lock || entry->lock) && entry->vaddr_base && - (entry->paddr_index != address_index || entry->size != cache_size || + while (entry && (!entry->vaddr_base || + entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping))) { - if (!free_entry && !entry->lock) { + if (!free_entry && (!entry->lock || !entry->vaddr_base)) { free_entry = entry; free_pentry = pentry; } @@ -456,9 +460,13 @@ uint8_t *xen_map_cache(MemoryRegion *mr, bool is_write) { bool grant = xen_mr_is_grants(mr); - MapCache *mc = grant ? mapcache_grants : mapcache; + MapCache *mc = mapcache; uint8_t *p; + if (grant) { + mc = is_write ? mapcache_grants_rw : mapcache_grants_ro; + } + if (grant && !lock) { /* * Grants are only supported via address_space_map(). Anything @@ -523,7 +531,10 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr) addr = xen_ram_addr_from_mapcache_single(mapcache, ptr); if (addr == RAM_ADDR_INVALID) { - addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr); + addr = xen_ram_addr_from_mapcache_single(mapcache_grants_ro, ptr); + } + if (addr == RAM_ADDR_INVALID) { + addr = xen_ram_addr_from_mapcache_single(mapcache_grants_rw, ptr); } return addr; @@ -626,7 +637,8 @@ static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer) static void xen_invalidate_map_cache_entry_all(uint8_t *buffer) { xen_invalidate_map_cache_entry_single(mapcache, buffer); - xen_invalidate_map_cache_entry_single(mapcache_grants, buffer); + xen_invalidate_map_cache_entry_single(mapcache_grants_ro, buffer); + xen_invalidate_map_cache_entry_single(mapcache_grants_rw, buffer); } static void xen_invalidate_map_cache_entry_bh(void *opaque) @@ -700,7 +712,6 @@ void xen_invalidate_map_cache(void) bdrv_drain_all(); xen_invalidate_map_cache_single(mapcache); - xen_invalidate_map_cache_single(mapcache_grants); } static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc, diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c new file mode 100644 index 00000000000..b93ff80c859 --- /dev/null +++ b/hw/xen/xen-pvh-common.c @@ -0,0 +1,399 @@ +/* + * QEMU Xen PVH machine - common code. + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "qapi/visitor.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "system/tpm.h" +#include "system/tpm_backend.h" +#include "system/runstate.h" +#include "hw/xen/xen-pvh-common.h" +#include "trace.h" + +static const MemoryListener xen_memory_listener = { + .region_add = xen_region_add, + .region_del = xen_region_del, + .log_start = NULL, + .log_stop = NULL, + .log_sync = NULL, + .log_global_start = NULL, + .log_global_stop = NULL, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, +}; + +static void xen_pvh_init_ram(XenPVHMachineState *s, + MemoryRegion *sysmem) +{ + MachineState *ms = MACHINE(s); + ram_addr_t block_len, ram_size[2]; + + if (ms->ram_size <= s->cfg.ram_low.size) { + ram_size[0] = ms->ram_size; + ram_size[1] = 0; + block_len = s->cfg.ram_low.base + ram_size[0]; + } else { + ram_size[0] = s->cfg.ram_low.size; + ram_size[1] = ms->ram_size - s->cfg.ram_low.size; + block_len = s->cfg.ram_high.base + ram_size[1]; + } + + memory_region_init_ram(&xen_memory, NULL, "xen.ram", block_len, + &error_fatal); + + memory_region_init_alias(&s->ram.low, NULL, "xen.ram.lo", &xen_memory, + s->cfg.ram_low.base, ram_size[0]); + memory_region_add_subregion(sysmem, s->cfg.ram_low.base, &s->ram.low); + if (ram_size[1] > 0) { + memory_region_init_alias(&s->ram.high, NULL, "xen.ram.hi", &xen_memory, + s->cfg.ram_high.base, ram_size[1]); + memory_region_add_subregion(sysmem, s->cfg.ram_high.base, &s->ram.high); + } + + /* Setup support for grants. */ + memory_region_init_ram(&xen_grants, NULL, "xen.grants", block_len, + &error_fatal); + memory_region_add_subregion(sysmem, XEN_GRANT_ADDR_OFF, &xen_grants); +} + +static void xen_set_irq(void *opaque, int irq, int level) +{ + if (xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level)) { + error_report("xendevicemodel_set_irq_level failed"); + } +} + +static void xen_create_virtio_mmio_devices(XenPVHMachineState *s) +{ + int i; + + /* + * We create the transports in reverse order. Since qbus_realize() + * prepends (not appends) new child buses, the decrementing loop below will + * create a list of virtio-mmio buses with increasing base addresses. + * + * When a -device option is processed from the command line, + * qbus_find_recursive() picks the next free virtio-mmio bus in forwards + * order. + * + * This is what the Xen tools expect. + */ + for (i = s->cfg.virtio_mmio_num - 1; i >= 0; i--) { + hwaddr base = s->cfg.virtio_mmio.base + i * s->cfg.virtio_mmio.size; + qemu_irq irq = qemu_allocate_irq(xen_set_irq, NULL, + s->cfg.virtio_mmio_irq_base + i); + + sysbus_create_simple("virtio-mmio", base, irq); + + trace_xen_create_virtio_mmio_devices(i, + s->cfg.virtio_mmio_irq_base + i, + base); + } +} + +#ifdef CONFIG_TPM +static void xen_enable_tpm(XenPVHMachineState *s) +{ + Error *errp = NULL; + DeviceState *dev; + SysBusDevice *busdev; + + TPMBackend *be = qemu_find_tpm_be("tpm0"); + if (be == NULL) { + error_report("Couldn't find tmp0 backend"); + return; + } + dev = qdev_new(TYPE_TPM_TIS_SYSBUS); + object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp); + object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, s->cfg.tpm.base); + + trace_xen_enable_tpm(s->cfg.tpm.base); +} +#endif + +/* + * We use the GPEX PCIe controller with its internal INTX PCI interrupt + * swizzling. This swizzling is emulated in QEMU and routes all INTX + * interrupts from endpoints down to only 4 INTX interrupts. + * See include/hw/pci/pci.h : pci_swizzle() + */ +static inline void xenpvh_gpex_init(XenPVHMachineState *s, + XenPVHMachineClass *xpc, + MemoryRegion *sysmem) +{ + MemoryRegion *ecam_reg; + MemoryRegion *mmio_reg; + DeviceState *dev; + int i; + + object_initialize_child(OBJECT(s), "gpex", &s->pci.gpex, + TYPE_GPEX_HOST); + dev = DEVICE(&s->pci.gpex); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(sysmem, s->cfg.pci_ecam.base, ecam_reg); + + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + + if (s->cfg.pci_mmio.size) { + memory_region_init_alias(&s->pci.mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, + s->cfg.pci_mmio.base, s->cfg.pci_mmio.size); + memory_region_add_subregion(sysmem, s->cfg.pci_mmio.base, + &s->pci.mmio_alias); + } + + if (s->cfg.pci_mmio_high.size) { + memory_region_init_alias(&s->pci.mmio_high_alias, OBJECT(dev), + "pcie-mmio-high", + mmio_reg, s->cfg.pci_mmio_high.base, s->cfg.pci_mmio_high.size); + memory_region_add_subregion(sysmem, s->cfg.pci_mmio_high.base, + &s->pci.mmio_high_alias); + } + + /* + * PVH implementations with PCI enabled must provide set_pci_intx_irq() + * and optionally an implementation of set_pci_link_route(). + */ + assert(xpc->set_pci_intx_irq); + + for (i = 0; i < PCI_NUM_PINS; i++) { + qemu_irq irq = qemu_allocate_irq(xpc->set_pci_intx_irq, s, i); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); + gpex_set_irq_num(GPEX_HOST(dev), i, s->cfg.pci_intx_irq_base + i); + if (xpc->set_pci_link_route) { + xpc->set_pci_link_route(i, s->cfg.pci_intx_irq_base + i); + } + } +} + +static void xen_pvh_init(MachineState *ms) +{ + XenPVHMachineState *s = XEN_PVH_MACHINE(ms); + XenPVHMachineClass *xpc = XEN_PVH_MACHINE_GET_CLASS(s); + MemoryRegion *sysmem = get_system_memory(); + + if (ms->ram_size == 0) { + warn_report("%s: ram size not specified. QEMU machine started" + " without IOREQ (no emulated devices including virtio)", + MACHINE_CLASS(object_get_class(OBJECT(ms)))->desc); + return; + } + + xen_pvh_init_ram(s, sysmem); + xen_register_ioreq(&s->ioreq, ms->smp.max_cpus, + xpc->handle_bufioreq, + &xen_memory_listener); + + if (s->cfg.virtio_mmio_num) { + xen_create_virtio_mmio_devices(s); + } + +#ifdef CONFIG_TPM + if (xpc->has_tpm) { + if (s->cfg.tpm.base) { + xen_enable_tpm(s); + } else { + warn_report("tpm-base-addr is not set. TPM will not be enabled"); + } + } +#endif + + /* Non-zero pci-ecam-size enables PCI. */ + if (s->cfg.pci_ecam.size) { + if (s->cfg.pci_ecam.size != 256 * MiB) { + error_report("pci-ecam-size only supports values 0 or 0x10000000"); + exit(EXIT_FAILURE); + } + if (!s->cfg.pci_intx_irq_base) { + error_report("PCI enabled but pci-intx-irq-base not set"); + exit(EXIT_FAILURE); + } + + xenpvh_gpex_init(s, xpc, sysmem); + } + + /* Call the implementation specific init. */ + if (xpc->init) { + xpc->init(ms); + } +} + +#define XEN_PVH_PROP_MEMMAP_SETTER(n, f) \ +static void xen_pvh_set_ ## n ## _ ## f(Object *obj, Visitor *v, \ + const char *name, void *opaque, \ + Error **errp) \ +{ \ + XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); \ + uint64_t value; \ + \ + if (!visit_type_size(v, name, &value, errp)) { \ + return; \ + } \ + xp->cfg.n.f = value; \ +} + +#define XEN_PVH_PROP_MEMMAP_GETTER(n, f) \ +static void xen_pvh_get_ ## n ## _ ## f(Object *obj, Visitor *v, \ + const char *name, void *opaque, \ + Error **errp) \ +{ \ + XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); \ + uint64_t value = xp->cfg.n.f; \ + \ + visit_type_uint64(v, name, &value, errp); \ +} + +#define XEN_PVH_PROP_MEMMAP_BASE(n) \ + XEN_PVH_PROP_MEMMAP_SETTER(n, base) \ + XEN_PVH_PROP_MEMMAP_GETTER(n, base) \ + +#define XEN_PVH_PROP_MEMMAP_SIZE(n) \ + XEN_PVH_PROP_MEMMAP_SETTER(n, size) \ + XEN_PVH_PROP_MEMMAP_GETTER(n, size) + +#define XEN_PVH_PROP_MEMMAP(n) \ + XEN_PVH_PROP_MEMMAP_BASE(n) \ + XEN_PVH_PROP_MEMMAP_SIZE(n) + +XEN_PVH_PROP_MEMMAP(ram_low) +XEN_PVH_PROP_MEMMAP(ram_high) +/* TPM only has a base-addr option. */ +XEN_PVH_PROP_MEMMAP_BASE(tpm) +XEN_PVH_PROP_MEMMAP(virtio_mmio) +XEN_PVH_PROP_MEMMAP(pci_ecam) +XEN_PVH_PROP_MEMMAP(pci_mmio) +XEN_PVH_PROP_MEMMAP(pci_mmio_high) + +static void xen_pvh_set_pci_intx_irq_base(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + xp->cfg.pci_intx_irq_base = value; +} + +static void xen_pvh_get_pci_intx_irq_base(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + XenPVHMachineState *xp = XEN_PVH_MACHINE(obj); + uint32_t value = xp->cfg.pci_intx_irq_base; + + visit_type_uint32(v, name, &value, errp); +} + +void xen_pvh_class_setup_common_props(XenPVHMachineClass *xpc) +{ + ObjectClass *oc = OBJECT_CLASS(xpc); + MachineClass *mc = MACHINE_CLASS(xpc); + +#define OC_MEMMAP_PROP_BASE(c, prop_name, name) \ +do { \ + object_class_property_add(c, prop_name "-base", "uint64_t", \ + xen_pvh_get_ ## name ## _base, \ + xen_pvh_set_ ## name ## _base, NULL, NULL); \ + object_class_property_set_description(oc, prop_name "-base", \ + "Set base address for " prop_name); \ +} while (0) + +#define OC_MEMMAP_PROP_SIZE(c, prop_name, name) \ +do { \ + object_class_property_add(c, prop_name "-size", "uint64_t", \ + xen_pvh_get_ ## name ## _size, \ + xen_pvh_set_ ## name ## _size, NULL, NULL); \ + object_class_property_set_description(oc, prop_name "-size", \ + "Set memory range size for " prop_name); \ +} while (0) + +#define OC_MEMMAP_PROP(c, prop_name, name) \ +do { \ + OC_MEMMAP_PROP_BASE(c, prop_name, name); \ + OC_MEMMAP_PROP_SIZE(c, prop_name, name); \ +} while (0) + + /* + * We provide memmap properties to allow Xen to move things to other + * addresses for example when users need to accomodate the memory-map + * for 1:1 mapped devices/memory. + */ + OC_MEMMAP_PROP(oc, "ram-low", ram_low); + OC_MEMMAP_PROP(oc, "ram-high", ram_high); + + if (xpc->has_virtio_mmio) { + OC_MEMMAP_PROP(oc, "virtio-mmio", virtio_mmio); + } + + if (xpc->has_pci) { + OC_MEMMAP_PROP(oc, "pci-ecam", pci_ecam); + OC_MEMMAP_PROP(oc, "pci-mmio", pci_mmio); + OC_MEMMAP_PROP(oc, "pci-mmio-high", pci_mmio_high); + + object_class_property_add(oc, "pci-intx-irq-base", "uint32_t", + xen_pvh_get_pci_intx_irq_base, + xen_pvh_set_pci_intx_irq_base, + NULL, NULL); + object_class_property_set_description(oc, "pci-intx-irq-base", + "Set PCI INTX interrupt base line."); + } + +#ifdef CONFIG_TPM + if (xpc->has_tpm) { + object_class_property_add(oc, "tpm-base-addr", "uint64_t", + xen_pvh_get_tpm_base, + xen_pvh_set_tpm_base, + NULL, NULL); + object_class_property_set_description(oc, "tpm-base-addr", + "Set Base address for TPM device."); + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); + } +#endif +} + +static void xen_pvh_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = xen_pvh_init; + + mc->desc = "Xen PVH machine"; + mc->max_cpus = 1; + mc->default_machine_opts = "accel=xen"; + /* Set to zero to make sure that the real ram size is passed. */ + mc->default_ram_size = 0; +} + +static const TypeInfo xen_pvh_info = { + .name = TYPE_XEN_PVH_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(XenPVHMachineState), + .class_size = sizeof(XenPVHMachineClass), + .class_init = xen_pvh_class_init, +}; + +static void xen_pvh_register_types(void) +{ + type_register_static(&xen_pvh_info); +} + +type_init(xen_pvh_register_types); diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c index 2150869f60c..5698cc7c0eb 100644 --- a/hw/xen/xen_devconfig.c +++ b/hw/xen/xen_devconfig.c @@ -1,8 +1,8 @@ #include "qemu/osdep.h" #include "hw/xen/xen-legacy-backend.h" #include "qemu/option.h" -#include "sysemu/blockdev.h" -#include "sysemu/sysemu.h" +#include "system/blockdev.h" +#include "system/system.h" /* ------------------------------------------------------------- */ @@ -66,11 +66,3 @@ int xen_config_dev_vkbd(int vdev) xen_config_dev_dirs("vkbd", "vkbd", vdev, fe, be, sizeof(fe)); return xen_config_dev_all(fe, be); } - -int xen_config_dev_console(int vdev) -{ - char fe[256], be[256]; - - xen_config_dev_dirs("console", "console", vdev, fe, be, sizeof(fe)); - return xen_config_dev_all(fe, be); -} diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 3635d1b39f7..006b5b55f24 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -54,6 +54,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include #include "hw/pci/pci.h" @@ -766,6 +767,57 @@ static void xen_pt_destroy(PCIDevice *d) { } /* init */ +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 42000 +static bool xen_pt_need_gsi(void) +{ + FILE *fp; + int len; + /* + * The max length of guest_type is "PVH"+'\n'+'\0', it is 5, + * so here set the length of type to be twice. + */ + char type[10]; + const char *guest_type = "/sys/hypervisor/guest_type"; + + fp = fopen(guest_type, "r"); + if (!fp) { + error_report("Cannot open %s: %s", guest_type, strerror(errno)); + return false; + } + + if (fgets(type, sizeof(type), fp)) { + len = strlen(type); + if (len) { + type[len - 1] = '\0'; + if (!strcmp(type, "PVH")) { + fclose(fp); + return true; + } + } + } + + fclose(fp); + return false; +} + +static int xen_pt_map_pirq_for_gsi(PCIDevice *d, int *pirq) +{ + int gsi; + XenPCIPassthroughState *s = XEN_PT_DEVICE(d); + + gsi = xc_pcidev_get_gsi(xen_xc, + PCI_SBDF(s->real_device.domain, + s->real_device.bus, + s->real_device.dev, + s->real_device.func)); + if (gsi >= 0) { + return xc_physdev_map_pirq_gsi(xen_xc, xen_domid, gsi, pirq); + } + + return gsi; +} +#endif + static void xen_pt_realize(PCIDevice *d, Error **errp) { ERRP_GUARD(); @@ -847,7 +899,16 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) goto out; } +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 42000 + if (xen_pt_need_gsi()) { + rc = xen_pt_map_pirq_for_gsi(d, &pirq); + } else { + rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq); + } +#else rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq); +#endif + if (rc < 0) { XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n", machine_irq, pirq, errno); @@ -931,10 +992,9 @@ static void xen_pt_unregister_device(PCIDevice *d) xen_pt_destroy(d); } -static Property xen_pci_passthrough_properties[] = { +static const Property xen_pci_passthrough_properties[] = { DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr), DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false), - DEFINE_PROP_END_OF_LIST(), }; static void xen_pci_passthrough_instance_init(Object *obj) @@ -988,7 +1048,7 @@ static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) xpdc->pci_qdev_realize(qdev, errp); } -static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) +static void xen_pci_passthrough_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -1020,7 +1080,7 @@ static const TypeInfo xen_pci_passthrough_info = { .class_init = xen_pci_passthrough_class_init, .class_size = sizeof(XenPTDeviceClass), .instance_init = xen_pci_passthrough_instance_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { INTERFACE_PCIE_DEVICE }, { }, diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c index 6c2e3f4840f..2c0cec97234 100644 --- a/hw/xen/xen_pt_graphics.c +++ b/hw/xen/xen_pt_graphics.c @@ -347,7 +347,7 @@ static const IGDDeviceIDInfo igd_combo_id_infos[] = { {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */ }; -static void isa_bridge_class_init(ObjectClass *klass, void *data) +static void isa_bridge_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); @@ -363,7 +363,7 @@ static const TypeInfo isa_bridge_info = { .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIDevice), .class_init = isa_bridge_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, }, diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c index c5ad71e8dc4..fe95b62d133 100644 --- a/hw/xen/xen_pvdev.c +++ b/hw/xen/xen_pvdev.c @@ -22,6 +22,7 @@ #include "qemu/main-loop.h" #include "hw/qdev-core.h" #include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen-bus-helper.h" #include "hw/xen/xen_pvdev.h" /* private */ @@ -81,12 +82,9 @@ int xenstore_write_str(const char *base, const char *node, const char *val) char *xenstore_read_str(const char *base, const char *node) { - char abspath[XEN_BUFSIZE]; - unsigned int len; char *str, *ret = NULL; - snprintf(abspath, sizeof(abspath), "%s/%s", base, node); - str = qemu_xen_xs_read(xenstore, 0, abspath, &len); + str = xs_node_read(xenstore, 0, NULL, NULL, "%s/%s", base, node); if (str != NULL) { /* move to qemu-allocated memory to make sure * callers can safely g_free() stuff. */ @@ -275,7 +273,7 @@ void xen_pv_del_xendev(struct XenLegacyDevice *xendev) QTAILQ_REMOVE(&xendevs, xendev, next); - qdev_unplug(&xendev->qdev, NULL); + qdev_unplug(DEVICE(xendev), NULL); } void xen_pv_insert_xendev(struct XenLegacyDevice *xendev) diff --git a/hw/xen/xen_stubs.c b/hw/xen/xen_stubs.c new file mode 100644 index 00000000000..5e565df3929 --- /dev/null +++ b/hw/xen/xen_stubs.c @@ -0,0 +1,51 @@ +/* + * Various stubs for xen functions + * + * Those functions are used only if xen_enabled(). This file is linked only if + * CONFIG_XEN is not set, so they should never be called. + * + * Copyright (c) 2025 Linaro, Ltd. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/xen.h" +#include "system/xen-mapcache.h" + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ + g_assert_not_reached(); +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, + struct MemoryRegion *mr, Error **errp) +{ + g_assert_not_reached(); +} + +bool xen_mr_is_memory(MemoryRegion *mr) +{ + g_assert_not_reached(); +} + +void xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + g_assert_not_reached(); +} + +ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +{ + g_assert_not_reached(); +} + +uint8_t *xen_map_cache(MemoryRegion *mr, + hwaddr phys_addr, + hwaddr size, + ram_addr_t ram_addr_offset, + uint8_t lock, + bool dma, + bool is_write) +{ + g_assert_not_reached(); +} diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c index 24395f42cbb..99c02492ef9 100644 --- a/hw/xenpv/xen_machine_pv.c +++ b/hw/xenpv/xen_machine_pv.c @@ -27,8 +27,8 @@ #include "hw/boards.h" #include "hw/xen/xen-legacy-backend.h" #include "hw/xen/xen-bus.h" -#include "sysemu/block-backend.h" -#include "sysemu/sysemu.h" +#include "system/block-backend.h" +#include "system/system.h" static void xen_init_pv(MachineState *machine) { diff --git a/hw/xtensa/Kconfig b/hw/xtensa/Kconfig index fc5c785cfac..1f0492d89c0 100644 --- a/hw/xtensa/Kconfig +++ b/hw/xtensa/Kconfig @@ -18,4 +18,4 @@ config XTENSA_XTFPGA select DEVICE_TREE select OPENCORES_ETH select PFLASH_CFI01 - select SERIAL + select SERIAL_MM diff --git a/hw/xtensa/bootparam.h b/hw/xtensa/bootparam.h index f57ff850bcb..4418c78d5bb 100644 --- a/hw/xtensa/bootparam.h +++ b/hw/xtensa/bootparam.h @@ -2,6 +2,7 @@ #define HW_XTENSA_BOOTPARAM_H #include "exec/cpu-common.h" +#include "exec/tswap.h" #define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/ #define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */ diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c index 8cef88c61bc..e3885316106 100644 --- a/hw/xtensa/pic_cpu.c +++ b/hw/xtensa/pic_cpu.c @@ -27,6 +27,7 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/cpu-interrupt.h" #include "hw/irq.h" #include "qemu/log.h" #include "qemu/timer.h" diff --git a/hw/xtensa/sim.c b/hw/xtensa/sim.c index 2160e619645..49d17e7bb29 100644 --- a/hw/xtensa/sim.c +++ b/hw/xtensa/sim.c @@ -27,12 +27,12 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/reset.h" -#include "sysemu/sysemu.h" +#include "system/reset.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/loader.h" #include "elf.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/error-report.h" #include "xtensa_memory.h" #include "xtensa_sim.h" @@ -100,7 +100,8 @@ void xtensa_sim_load_kernel(XtensaCPU *cpu, MachineState *machine) if (kernel_filename) { uint64_t elf_entry; int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu, - &elf_entry, NULL, NULL, NULL, TARGET_BIG_ENDIAN, + &elf_entry, NULL, NULL, NULL, + TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB, EM_XTENSA, 0, 0); if (success > 0) { diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c index 5310a888613..b10866ccd85 100644 --- a/hw/xtensa/virt.c +++ b/hw/xtensa/virt.c @@ -27,13 +27,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/pci-host/gpex.h" #include "net/net.h" #include "elf.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/error-report.h" #include "xtensa_memory.h" #include "xtensa_sim.h" @@ -93,7 +93,7 @@ static void create_pcie(MachineState *ms, CPUXtensaState *env, int irq_base, /* Connect IRQ lines. */ extints = xtensa_get_extints(env); - for (i = 0; i < GPEX_NUM_IRQS; i++) { + for (i = 0; i < PCI_NUM_PINS; i++) { void *q = extints[irq_base + i]; sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q); diff --git a/hw/xtensa/xtensa_memory.c b/hw/xtensa/xtensa_memory.c index 2c1095f0170..13a6077d868 100644 --- a/hw/xtensa/xtensa_memory.c +++ b/hw/xtensa/xtensa_memory.c @@ -27,7 +27,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/error-report.h" #include "xtensa_memory.h" diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c index 955e8867a36..6efffae466b 100644 --- a/hw/xtensa/xtfpga.c +++ b/hw/xtensa/xtfpga.c @@ -29,20 +29,21 @@ #include "qemu/units.h" #include "qapi/error.h" #include "cpu.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/qdev-properties.h" #include "elf.h" -#include "exec/memory.h" -#include "hw/char/serial.h" +#include "system/memory.h" +#include "exec/tswap.h" +#include "hw/char/serial-mm.h" #include "net/net.h" #include "hw/sysbus.h" #include "hw/block/flash.h" #include "chardev/char.h" -#include "sysemu/device_tree.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" +#include "system/device_tree.h" +#include "system/reset.h" +#include "system/runstate.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "bootparam.h" @@ -397,7 +398,8 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) uint64_t elf_entry; int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu, - &elf_entry, NULL, NULL, NULL, TARGET_BIG_ENDIAN, + &elf_entry, NULL, NULL, NULL, + TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB, EM_XTENSA, 0, 0); if (success > 0) { entry_point = elf_entry; @@ -415,8 +417,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) } } if (entry_point != env->pc) { - uint8_t boot[] = { -#if TARGET_BIG_ENDIAN + uint8_t boot_be[] = { 0x60, 0x00, 0x08, /* j 1f */ 0x00, /* .literal_position */ 0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */ @@ -425,7 +426,8 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) 0x10, 0xff, 0xfe, /* l32r a0, entry_pc */ 0x12, 0xff, 0xfe, /* l32r a2, entry_a2 */ 0x0a, 0x00, 0x00, /* jx a0 */ -#else + }; + uint8_t boot_le[] = { 0x06, 0x02, 0x00, /* j 1f */ 0x00, /* .literal_position */ 0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */ @@ -434,14 +436,16 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) 0x01, 0xfe, 0xff, /* l32r a0, entry_pc */ 0x21, 0xfe, 0xff, /* l32r a2, entry_a2 */ 0xa0, 0x00, 0x00, /* jx a0 */ -#endif }; + const size_t boot_sz = TARGET_BIG_ENDIAN ? sizeof(boot_be) + : sizeof(boot_le); + uint8_t *boot = TARGET_BIG_ENDIAN ? boot_be : boot_le; uint32_t entry_pc = tswap32(entry_point); uint32_t entry_a2 = tswap32(tagptr); memcpy(boot + 4, &entry_pc, sizeof(entry_pc)); memcpy(boot + 8, &entry_a2, sizeof(entry_a2)); - cpu_physical_memory_write(env->pc, boot, sizeof(boot)); + cpu_physical_memory_write(env->pc, boot, boot_sz); } } else { if (flash) { @@ -581,7 +585,7 @@ static void xtfpga_kc705_nommu_init(MachineState *machine) xtfpga_init(&kc705_board, machine); } -static void xtfpga_lx60_class_init(ObjectClass *oc, void *data) +static void xtfpga_lx60_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -598,7 +602,7 @@ static const TypeInfo xtfpga_lx60_type = { .class_init = xtfpga_lx60_class_init, }; -static void xtfpga_lx60_nommu_class_init(ObjectClass *oc, void *data) +static void xtfpga_lx60_nommu_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -615,7 +619,7 @@ static const TypeInfo xtfpga_lx60_nommu_type = { .class_init = xtfpga_lx60_nommu_class_init, }; -static void xtfpga_lx200_class_init(ObjectClass *oc, void *data) +static void xtfpga_lx200_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -632,7 +636,7 @@ static const TypeInfo xtfpga_lx200_type = { .class_init = xtfpga_lx200_class_init, }; -static void xtfpga_lx200_nommu_class_init(ObjectClass *oc, void *data) +static void xtfpga_lx200_nommu_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -649,7 +653,7 @@ static const TypeInfo xtfpga_lx200_nommu_type = { .class_init = xtfpga_lx200_nommu_class_init, }; -static void xtfpga_ml605_class_init(ObjectClass *oc, void *data) +static void xtfpga_ml605_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -666,7 +670,7 @@ static const TypeInfo xtfpga_ml605_type = { .class_init = xtfpga_ml605_class_init, }; -static void xtfpga_ml605_nommu_class_init(ObjectClass *oc, void *data) +static void xtfpga_ml605_nommu_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -683,7 +687,7 @@ static const TypeInfo xtfpga_ml605_nommu_type = { .class_init = xtfpga_ml605_nommu_class_init, }; -static void xtfpga_kc705_class_init(ObjectClass *oc, void *data) +static void xtfpga_kc705_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -700,7 +704,7 @@ static const TypeInfo xtfpga_kc705_type = { .class_init = xtfpga_kc705_class_init, }; -static void xtfpga_kc705_nommu_class_init(ObjectClass *oc, void *data) +static void xtfpga_kc705_nommu_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/include/accel/accel-cpu-ops.h b/include/accel/accel-cpu-ops.h new file mode 100644 index 00000000000..0674764914f --- /dev/null +++ b/include/accel/accel-cpu-ops.h @@ -0,0 +1,95 @@ +/* + * Accelerator per-vCPU handlers + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_ACCEL_CPU_OPS_H +#define QEMU_ACCEL_CPU_OPS_H + +#include "qemu/accel.h" +#include "exec/vaddr.h" +#include "qom/object.h" + +#define ACCEL_OPS_SUFFIX "-ops" +#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX +#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS) + +DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS) + +/** + * struct AccelOpsClass - accelerator interfaces + * + * This structure is used to abstract accelerator differences from the + * core CPU code. Not all have to be implemented. + */ +struct AccelOpsClass { + /*< private >*/ + ObjectClass parent_class; + /*< public >*/ + + /* initialization function called when accel is chosen */ + void (*ops_init)(AccelClass *ac); + + bool (*cpus_are_resettable)(void); + void (*cpu_reset_hold)(CPUState *cpu); + + void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */ + void (*kick_vcpu_thread)(CPUState *cpu); + bool (*cpu_thread_is_idle)(CPUState *cpu); + + /** + * synchronize_post_reset: + * synchronize_post_init: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers to the hardware accelerator + * (QEMU is the reference). + */ + void (*synchronize_post_reset)(CPUState *cpu); + void (*synchronize_post_init)(CPUState *cpu); + /** + * synchronize_state: + * synchronize_pre_loadvm: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers from the hardware accelerator + * (the hardware accelerator is the reference). + */ + void (*synchronize_state)(CPUState *cpu); + void (*synchronize_pre_loadvm)(CPUState *cpu); + + /* handle_interrupt is mandatory. */ + void (*handle_interrupt)(CPUState *cpu, int mask); + + /* get_vcpu_stats: Append statistics of this @cpu to @buf */ + void (*get_vcpu_stats)(CPUState *cpu, GString *buf); + + /** + * @get_virtual_clock: fetch virtual clock + * @set_virtual_clock: set virtual clock + * + * These allow the timer subsystem to defer to the accelerator to + * fetch time. The set function is needed if the accelerator wants + * to track the changes to time as the timer is warped through + * various timer events. + */ + int64_t (*get_virtual_clock)(void); + void (*set_virtual_clock)(int64_t time); + + int64_t (*get_elapsed_ticks)(void); + + /* gdbstub hooks */ + bool (*supports_guest_debug)(void); + int (*update_guest_debug)(CPUState *cpu); + int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); + int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); + void (*remove_all_breakpoints)(CPUState *cpu); +}; + +void generic_handle_interrupt(CPUState *cpu, int mask); + +#endif /* QEMU_ACCEL_CPU_OPS_H */ diff --git a/include/accel/accel-cpu-target.h b/include/accel/accel-cpu-target.h new file mode 100644 index 00000000000..6feb344e29b --- /dev/null +++ b/include/accel/accel-cpu-target.h @@ -0,0 +1,31 @@ +/* + * Accelerator interface, specializes CPUClass + * This header is used only by target-specific code. + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ACCEL_CPU_TARGET_H +#define ACCEL_CPU_TARGET_H + +/* + * This header is used to define new accelerator-specific target-specific + * accelerator cpu subclasses. + * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific. + * + * Do not try to use for any other purpose than the implementation of new + * subclasses in target/, or the accel implementation itself in accel/ + */ + +#include "qom/object.h" +#include "accel/accel-cpu.h" +#include "cpu.h" + +#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE +#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU) +DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU) + +#endif /* ACCEL_CPU_H */ diff --git a/include/accel/accel-cpu.h b/include/accel/accel-cpu.h new file mode 100644 index 00000000000..9e7eede7c3c --- /dev/null +++ b/include/accel/accel-cpu.h @@ -0,0 +1,23 @@ +/* + * Accelerator interface, specializes CPUClass + * + * Copyright 2021 SUSE LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ACCEL_CPU_H +#define ACCEL_CPU_H + +#include "qom/object.h" +#include "hw/core/cpu.h" + +typedef struct AccelCPUClass { + ObjectClass parent_class; + + void (*cpu_class_init)(CPUClass *cc); + void (*cpu_instance_init)(CPUState *cpu); + bool (*cpu_target_realize)(CPUState *cpu, Error **errp); +} AccelCPUClass; + +#endif /* ACCEL_CPU_H */ diff --git a/include/accel/accel-ops.h b/include/accel/accel-ops.h new file mode 100644 index 00000000000..23a8c246e15 --- /dev/null +++ b/include/accel/accel-ops.h @@ -0,0 +1,51 @@ +/* + * Accelerator handlers + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ACCEL_OPS_H +#define ACCEL_OPS_H + +#include "exec/hwaddr.h" +#include "qemu/accel.h" +#include "qom/object.h" + +struct AccelState { + Object parent_obj; +}; + +struct AccelClass { + ObjectClass parent_class; + + const char *name; + /* Cached by accel_init_ops_interfaces() when created */ + AccelOpsClass *ops; + + int (*init_machine)(AccelState *as, MachineState *ms); + bool (*cpu_common_realize)(CPUState *cpu, Error **errp); + void (*cpu_common_unrealize)(CPUState *cpu); + /* get_stats: Append statistics to @buf */ + void (*get_stats)(AccelState *as, GString *buf); + + /* system related hooks */ + void (*setup_post)(AccelState *as); + void (*pre_resume_vm)(AccelState *as, bool step_pending); + bool (*has_memory)(AccelState *accel, AddressSpace *as, + hwaddr start_addr, hwaddr size); + + /* gdbstub related hooks */ + int (*gdbstub_supported_sstep_flags)(AccelState *as); + + bool *allowed; + /* + * Array of global properties that would be applied when specific + * accelerator is chosen. It works like MachineClass.compat_props + * but it's for accelerators not machines. Accelerator-provided + * global properties may be overridden by machine-type + * compat_props or user-provided global properties. + */ + GPtrArray *compat_props; +}; + +#endif /* ACCEL_OPS_H */ diff --git a/include/accel/tcg/cpu-ldst-common.h b/include/accel/tcg/cpu-ldst-common.h new file mode 100644 index 00000000000..8bf17c2fab0 --- /dev/null +++ b/include/accel/tcg/cpu-ldst-common.h @@ -0,0 +1,122 @@ +/* + * Software MMU support + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_CPU_LDST_COMMON_H +#define ACCEL_TCG_CPU_LDST_COMMON_H + +#ifndef CONFIG_TCG +#error Can only include this header with TCG +#endif + +#include "exec/memopidx.h" +#include "exec/vaddr.h" +#include "exec/mmu-access-type.h" +#include "qemu/int128.h" + +uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr ptr, MemOpIdx oi, uintptr_t ra); +Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra); + +void cpu_stb_mmu(CPUArchState *env, vaddr ptr, uint8_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stw_mmu(CPUArchState *env, vaddr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_mmu(CPUArchState *env, vaddr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_mmu(CPUArchState *env, vaddr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val, + MemOpIdx oi, uintptr_t ra); + +uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, vaddr addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, vaddr addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, vaddr addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, vaddr addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, vaddr addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, vaddr addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, vaddr addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); + +#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ +TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ + (CPUArchState *env, vaddr addr, TYPE val, \ + MemOpIdx oi, uintptr_t retaddr); + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) +#else +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) +#endif + +GEN_ATOMIC_HELPER_ALL(fetch_add) +GEN_ATOMIC_HELPER_ALL(fetch_sub) +GEN_ATOMIC_HELPER_ALL(fetch_and) +GEN_ATOMIC_HELPER_ALL(fetch_or) +GEN_ATOMIC_HELPER_ALL(fetch_xor) +GEN_ATOMIC_HELPER_ALL(fetch_smin) +GEN_ATOMIC_HELPER_ALL(fetch_umin) +GEN_ATOMIC_HELPER_ALL(fetch_smax) +GEN_ATOMIC_HELPER_ALL(fetch_umax) + +GEN_ATOMIC_HELPER_ALL(add_fetch) +GEN_ATOMIC_HELPER_ALL(sub_fetch) +GEN_ATOMIC_HELPER_ALL(and_fetch) +GEN_ATOMIC_HELPER_ALL(or_fetch) +GEN_ATOMIC_HELPER_ALL(xor_fetch) +GEN_ATOMIC_HELPER_ALL(smin_fetch) +GEN_ATOMIC_HELPER_ALL(umin_fetch) +GEN_ATOMIC_HELPER_ALL(smax_fetch) +GEN_ATOMIC_HELPER_ALL(umax_fetch) + +GEN_ATOMIC_HELPER_ALL(xchg) + +#undef GEN_ATOMIC_HELPER_ALL +#undef GEN_ATOMIC_HELPER + +Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, vaddr addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, vaddr addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); + +uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, + MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr, + MemOpIdx oi, uintptr_t ra); + +#endif /* ACCEL_TCG_CPU_LDST_COMMON_H */ diff --git a/include/accel/tcg/cpu-ldst.h b/include/accel/tcg/cpu-ldst.h new file mode 100644 index 00000000000..0de7f5eaa6b --- /dev/null +++ b/include/accel/tcg/cpu-ldst.h @@ -0,0 +1,505 @@ +/* + * Software MMU support (per-target) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +/* + * Generate inline load/store functions for all MMU modes (typically + * at least _user and _kernel) as well as _data versions, for all data + * sizes. + * + * Used by target op helpers. + * + * The syntax for the accessors is: + * + * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) + * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) + * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) + * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) + * + * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) + * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) + * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) + * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) + * + * sign is: + * (empty): for 32 and 64 bit sizes + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * end is: + * (empty): for target native endian, or for 8 bit access + * _be: for forced big endian + * _le: for forced little endian + * + * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". + * The "mmuidx" suffix carries an extra mmu_idx argument that specifies + * the index to use; the "data" and "code" suffixes take the index from + * cpu_mmu_index(). + * + * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the + * MemOp including alignment requirements. The alignment will be enforced. + */ +#ifndef ACCEL_TCG_CPU_LDST_H +#define ACCEL_TCG_CPU_LDST_H + +#ifndef CONFIG_TCG +#error Can only include this header with TCG +#endif + +#include "exec/cpu-common.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/abi_ptr.h" + +#if defined(CONFIG_USER_ONLY) +#include "user/guest-host.h" +#endif /* CONFIG_USER_ONLY */ + +static inline uint32_t +cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +static inline int +cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); +} + +static inline uint32_t +cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + return cpu_ldw_mmu(env, addr, oi, ra); +} + +static inline int +cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); +} + +static inline uint32_t +cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + return cpu_ldl_mmu(env, addr, oi, ra); +} + +static inline uint64_t +cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); + return cpu_ldq_mmu(env, addr, oi, ra); +} + +static inline uint32_t +cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + return cpu_ldw_mmu(env, addr, oi, ra); +} + +static inline int +cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); +} + +static inline uint32_t +cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + return cpu_ldl_mmu(env, addr, oi, ra); +} + +static inline uint64_t +cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); + return cpu_ldq_mmu(env, addr, oi, ra); +} + +static inline void +cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + cpu_stb_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + cpu_stw_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + cpu_stl_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx); + cpu_stq_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + cpu_stw_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + cpu_stl_mmu(env, addr, val, oi, ra); +} + +static inline void +cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx); + cpu_stq_mmu(env, addr, val, oi, ra); +} + +/*--------------------------*/ + +static inline uint32_t +cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline int +cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int8_t)cpu_ldub_data_ra(env, addr, ra); +} + +static inline uint32_t +cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline int +cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); +} + +static inline uint32_t +cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline uint64_t +cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline uint32_t +cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline int +cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); +} + +static inline uint32_t +cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline uint64_t +cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra); +} + +static inline void +cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, uint32_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +static inline void +cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, uint64_t val, uintptr_t ra) +{ + int mmu_index = cpu_mmu_index(env_cpu(env), false); + cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra); +} + +/*--------------------------*/ + +static inline uint32_t +cpu_ldub_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldub_data_ra(env, addr, 0); +} + +static inline int +cpu_ldsb_data(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_data(env, addr); +} + +static inline uint32_t +cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_be_data_ra(env, addr, 0); +} + +static inline int +cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_be_data(env, addr); +} + +static inline uint32_t +cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_be_data_ra(env, addr, 0); +} + +static inline uint64_t +cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_be_data_ra(env, addr, 0); +} + +static inline uint32_t +cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_le_data_ra(env, addr, 0); +} + +static inline int +cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_le_data(env, addr); +} + +static inline uint32_t +cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_le_data_ra(env, addr, 0); +} + +static inline uint64_t +cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_le_data_ra(env, addr, 0); +} + +static inline void +cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stb_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_be_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_be_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_be_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_le_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_le_data_ra(env, addr, val, 0); +} + +static inline void +cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_le_data_ra(env, addr, val, 0); +} + +#if TARGET_BIG_ENDIAN +# define cpu_lduw_data cpu_lduw_be_data +# define cpu_ldsw_data cpu_ldsw_be_data +# define cpu_ldl_data cpu_ldl_be_data +# define cpu_ldq_data cpu_ldq_be_data +# define cpu_lduw_data_ra cpu_lduw_be_data_ra +# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra +# define cpu_ldl_data_ra cpu_ldl_be_data_ra +# define cpu_ldq_data_ra cpu_ldq_be_data_ra +# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra +# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra +# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra +# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra +# define cpu_stw_data cpu_stw_be_data +# define cpu_stl_data cpu_stl_be_data +# define cpu_stq_data cpu_stq_be_data +# define cpu_stw_data_ra cpu_stw_be_data_ra +# define cpu_stl_data_ra cpu_stl_be_data_ra +# define cpu_stq_data_ra cpu_stq_be_data_ra +# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra +# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra +# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra +#else +# define cpu_lduw_data cpu_lduw_le_data +# define cpu_ldsw_data cpu_ldsw_le_data +# define cpu_ldl_data cpu_ldl_le_data +# define cpu_ldq_data cpu_ldq_le_data +# define cpu_lduw_data_ra cpu_lduw_le_data_ra +# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra +# define cpu_ldl_data_ra cpu_ldl_le_data_ra +# define cpu_ldq_data_ra cpu_ldq_le_data_ra +# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra +# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra +# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra +# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra +# define cpu_stw_data cpu_stw_le_data +# define cpu_stl_data cpu_stl_le_data +# define cpu_stq_data cpu_stq_le_data +# define cpu_stw_data_ra cpu_stw_le_data_ra +# define cpu_stl_data_ra cpu_stl_le_data_ra +# define cpu_stq_data_ra cpu_stq_le_data_ra +# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra +# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra +# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra +#endif + +static inline uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) +{ + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); + return cpu_ldb_code_mmu(env, addr, oi, 0); +} + +static inline uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) +{ + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); + return cpu_ldw_code_mmu(env, addr, oi, 0); +} + +static inline uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) +{ + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); + return cpu_ldl_code_mmu(env, addr, oi, 0); +} + +static inline uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) +{ + CPUState *cs = env_cpu(env); + MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); + return cpu_ldq_code_mmu(env, addr, oi, 0); +} + +#endif /* ACCEL_TCG_CPU_LDST_H */ diff --git a/include/accel/tcg/cpu-mmu-index.h b/include/accel/tcg/cpu-mmu-index.h new file mode 100644 index 00000000000..e681a90844c --- /dev/null +++ b/include/accel/tcg/cpu-mmu-index.h @@ -0,0 +1,42 @@ +/* + * cpu_mmu_index() + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_CPU_MMU_INDEX_H +#define ACCEL_TCG_CPU_MMU_INDEX_H + +#include "hw/core/cpu.h" +#include "accel/tcg/cpu-ops.h" +#include "tcg/debug-assert.h" +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_USER_ONLY +# include "cpu.h" +# endif +#endif + +/** + * cpu_mmu_index: + * @env: The cpu environment + * @ifetch: True for code access, false for data access. + * + * Return the core mmu index for the current translation regime. + * This function is used by generic TCG code paths. + */ +static inline int cpu_mmu_index(CPUState *cs, bool ifetch) +{ +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_USER_ONLY + return MMU_USER_IDX; +# endif +#endif + + int ret = cs->cc->tcg_ops->mmu_index(cs, ifetch); + tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES); + return ret; +} + +#endif /* ACCEL_TCG_CPU_MMU_INDEX_H */ diff --git a/include/accel/tcg/cpu-ops.h b/include/accel/tcg/cpu-ops.h new file mode 100644 index 00000000000..dd8ea300168 --- /dev/null +++ b/include/accel/tcg/cpu-ops.h @@ -0,0 +1,333 @@ +/* + * TCG CPU-specific operations + * + * Copyright 2021 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_CPU_OPS_H +#define TCG_CPU_OPS_H + +#include "exec/breakpoint.h" +#include "exec/hwaddr.h" +#include "exec/memattrs.h" +#include "exec/memop.h" +#include "exec/mmu-access-type.h" +#include "exec/vaddr.h" +#include "accel/tcg/tb-cpu-state.h" +#include "tcg/tcg-mo.h" + +struct TCGCPUOps { + /** + * mttcg_supported: multi-threaded TCG is supported + * + * Target (TCG frontend) supports: + * - atomic instructions + * - memory ordering primitives (barriers) + */ + bool mttcg_supported; + + /** + * @precise_smc: Stores which modify code within the current TB force + * the TB to exit; the next executed instruction will see + * the result of the store. + */ + bool precise_smc; + + /** + * @guest_default_memory_order: default barrier that is required + * for the guest memory ordering. + */ + TCGBar guest_default_memory_order; + + /** + * @initialize: Initialize TCG state + * + * Called when the first CPU is realized. + */ + void (*initialize)(void); + /** + * @translate_code: Translate guest instructions to TCGOps + * @cpu: cpu context + * @tb: translation block + * @max_insns: max number of instructions to translate + * @pc: guest virtual program counter address + * @host_pc: host physical program counter address + * + * This function must be provided by the target, which should create + * the target-specific DisasContext, and then invoke translator_loop. + */ + void (*translate_code)(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); + /** + * @get_tb_cpu_state: Extract CPU state for a TCG #TranslationBlock + * + * Fill in all data required to select or compile a TranslationBlock. + */ + TCGTBCPUState (*get_tb_cpu_state)(CPUState *cs); + /** + * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock + * + * This is called when we abandon execution of a TB before starting it, + * and must set all parts of the CPU state which the previous TB in the + * chain may not have updated. + * By default, when this is NULL, a call is made to @set_pc(tb->pc). + * + * If more state needs to be restored, the target must implement a + * function to restore all the state, and register it here. + */ + void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); + /** + * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn + * + * This is called when we unwind state in the middle of a TB, + * usually before raising an exception. Set all part of the CPU + * state which are tracked insn-by-insn in the target-specific + * arguments to start_insn, passed as @data. + */ + void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, + const uint64_t *data); + + /** @cpu_exec_enter: Callback for cpu_exec preparation */ + void (*cpu_exec_enter)(CPUState *cpu); + /** @cpu_exec_exit: Callback for cpu_exec cleanup */ + void (*cpu_exec_exit)(CPUState *cpu); + /** @debug_excp_handler: Callback for handling debug exceptions */ + void (*debug_excp_handler)(CPUState *cpu); + + /** @mmu_index: Callback for choosing softmmu mmu index */ + int (*mmu_index)(CPUState *cpu, bool ifetch); + +#ifdef CONFIG_USER_ONLY + /** + * @fake_user_interrupt: Callback for 'fake exception' handling. + * + * Simulate 'fake exception' which will be handled outside the + * cpu execution loop (hack for x86 user mode). + */ + void (*fake_user_interrupt)(CPUState *cpu); + + /** + * record_sigsegv: + * @cpu: cpu context + * @addr: faulting guest address + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * We are about to raise SIGSEGV with si_code set for @maperr, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide anything to the signal + * handler with anything besides the user context registers, and + * the siginfo_t, then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided + * so that a "normal" cpu exception can be raised. In this case, + * the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigsegv)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + /** + * record_sigbus: + * @cpu: cpu context + * @addr: misaligned guest address + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * We are about to raise SIGBUS with si_code BUS_ADRALN, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide the signal handler with + * anything besides the user context registers, and the siginfo_t, + * then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu do_unaligned_access code, + * @ra is provided so that a "normal" cpu exception can be raised. + * In this case, the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigbus)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); + + /** + * untagged_addr: Remove an ignored tag from an address + * @cpu: cpu context + * @addr: tagged guest address + */ + vaddr (*untagged_addr)(CPUState *cs, vaddr addr); +#else + /** @do_interrupt: Callback for interrupt handling. */ + void (*do_interrupt)(CPUState *cpu); + /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ + bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + /** @cpu_exec_reset: Callback for reset in cpu_exec. */ + void (*cpu_exec_reset)(CPUState *cpu); + /** + * @cpu_exec_halt: Callback for handling halt in cpu_exec. + * + * The target CPU should do any special processing here that it needs + * to do when the CPU is in the halted state. + * + * Return true to indicate that the CPU should now leave halt, false + * if it should remain in the halted state. (This should generally + * be the same value that cpu_has_work() would return.) + * + * This method must be provided. If the target does not need to + * do anything special for halt, the same function used for its + * SysemuCPUOps::has_work method can be used here, as they have the + * same function signature. + */ + bool (*cpu_exec_halt)(CPUState *cpu); + /** + * @tlb_fill_align: Handle a softmmu tlb miss + * @cpu: cpu context + * @out: output page properties + * @addr: virtual address + * @access_type: read, write or execute + * @mmu_idx: mmu context + * @memop: memory operation for the access + * @size: memory access size, or 0 for whole page + * @probe: test only, no fault + * @ra: host return address for exception unwind + * + * If the access is valid, fill in @out and return true. + * Otherwise if probe is true, return false. + * Otherwise raise an exception and do not return. + * + * The alignment check for the access is deferred to this hook, + * so that the target can determine the priority of any alignment + * fault with respect to other potential faults from paging. + * Zero may be passed for @memop to skip any alignment check + * for non-memory-access operations such as probing. + */ + bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); + /** + * @tlb_fill: Handle a softmmu tlb miss + * + * If the access is valid, call tlb_set_page and return true; + * if the access is invalid and probe is true, return false; + * otherwise raise an exception and do not return. + */ + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + /** + * @pointer_wrap: + * + * We have incremented @base to @result, resulting in a page change. + * For the current cpu state, adjust @result for possible overflow. + */ + vaddr (*pointer_wrap)(CPUState *cpu, int mmu_idx, vaddr result, vaddr base); + /** + * @do_transaction_failed: Callback for handling failed memory transactions + * (ie bus faults or external aborts; not MMU faults) + */ + void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr, + unsigned size, MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr); + /** + * @do_unaligned_access: Callback for unaligned access handling + * The callback must exit via raising an exception. + */ + G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + + /** + * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM + */ + vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); + + /** + * @debug_check_watchpoint: return true if the architectural + * watchpoint whose address has matched should really fire, used by ARM + * and RISC-V + */ + bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); + + /** + * @debug_check_breakpoint: return true if the architectural + * breakpoint whose PC has matched should really fire. + */ + bool (*debug_check_breakpoint)(CPUState *cpu); + + /** + * @io_recompile_replay_branch: Callback for cpu_io_recompile. + * + * The cpu has been stopped, and cpu_restore_state_from_tb has been + * called. If the faulting instruction is in a delay slot, and the + * target architecture requires re-execution of the branch, then + * adjust the cpu state as required and return true. + */ + bool (*io_recompile_replay_branch)(CPUState *cpu, + const TranslationBlock *tb); + /** + * @need_replay_interrupt: Return %true if @interrupt_request + * needs to be recorded for replay purposes. + */ + bool (*need_replay_interrupt)(int interrupt_request); +#endif /* !CONFIG_USER_ONLY */ +}; + +#if defined(CONFIG_USER_ONLY) + +static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs atr, int fl, uintptr_t ra) +{ +} + +static inline int cpu_watchpoint_address_matches(CPUState *cpu, + vaddr addr, vaddr len) +{ + return 0; +} + +#else + +/** + * cpu_check_watchpoint: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * @attrs: memory access attributes + * @flags: watchpoint access type + * @ra: unwind return address + * + * Check for a watchpoint hit in [addr, addr+len) of the type + * specified by @flags. Exit via exception with a hit. + */ +void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, + MemTxAttrs attrs, int flags, uintptr_t ra); + +/** + * cpu_watchpoint_address_matches: + * @cpu: cpu context + * @addr: guest virtual address + * @len: access length + * + * Return the watchpoint flags that apply to [addr, addr+len). + * If no watchpoint is registered for the range, the result is 0. + */ +int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); + +/* + * Common pointer_wrap implementations. + */ +vaddr cpu_pointer_wrap_notreached(CPUState *, int, vaddr, vaddr); +vaddr cpu_pointer_wrap_uint32(CPUState *, int, vaddr, vaddr); + +#endif + +#endif /* TCG_CPU_OPS_H */ diff --git a/include/accel/tcg/getpc.h b/include/accel/tcg/getpc.h new file mode 100644 index 00000000000..0fc08addcf3 --- /dev/null +++ b/include/accel/tcg/getpc.h @@ -0,0 +1,20 @@ +/* + * Get host pc for helper unwinding. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_GETPC_H +#define ACCEL_TCG_GETPC_H + +/* GETPC is the true target of the return instruction that we'll execute. */ +#ifdef CONFIG_TCG_INTERPRETER +extern __thread uintptr_t tci_tb_ptr; +# define GETPC() tci_tb_ptr +#else +# define GETPC() \ + ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) +#endif + +#endif /* ACCEL_TCG_GETPC_H */ diff --git a/include/accel/tcg/helper-retaddr.h b/include/accel/tcg/helper-retaddr.h new file mode 100644 index 00000000000..037fda2b83d --- /dev/null +++ b/include/accel/tcg/helper-retaddr.h @@ -0,0 +1,43 @@ +/* + * Get user helper pc for memory unwinding. + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef ACCEL_TCG_HELPER_RETADDR_H +#define ACCEL_TCG_HELPER_RETADDR_H + +/* + * For user-only, helpers that use guest to host address translation + * must protect the actual host memory access by recording 'retaddr' + * for the signal handler. This is required for a race condition in + * which another thread unmaps the page between a probe and the + * actual access. + */ +#ifdef CONFIG_USER_ONLY +extern __thread uintptr_t helper_retaddr; + +static inline void set_helper_retaddr(uintptr_t ra) +{ + helper_retaddr = ra; + /* + * Ensure that this write is visible to the SIGSEGV handler that + * may be invoked due to a subsequent invalid memory operation. + */ + signal_barrier(); +} + +static inline void clear_helper_retaddr(void) +{ + /* + * Ensure that previous memory operations have succeeded before + * removing the data visible to the signal handler. + */ + signal_barrier(); + helper_retaddr = 0; +} +#else +#define set_helper_retaddr(ra) do { } while (0) +#define clear_helper_retaddr() do { } while (0) +#endif + +#endif /* ACCEL_TCG_HELPER_RETADDR_H */ diff --git a/include/accel/tcg/iommu.h b/include/accel/tcg/iommu.h new file mode 100644 index 00000000000..90cfd6c0ed1 --- /dev/null +++ b/include/accel/tcg/iommu.h @@ -0,0 +1,41 @@ +/* + * TCG IOMMU translations. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ +#ifndef ACCEL_TCG_IOMMU_H +#define ACCEL_TCG_IOMMU_H + +#ifdef CONFIG_USER_ONLY +#error Cannot include accel/tcg/iommu.h from user emulation +#endif + +#include "exec/hwaddr.h" +#include "exec/memattrs.h" + +/** + * iotlb_to_section: + * @cpu: CPU performing the access + * @index: TCG CPU IOTLB entry + * + * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that + * it refers to. @index will have been initially created and returned + * by memory_region_section_get_iotlb(). + */ +MemoryRegionSection *iotlb_to_section(CPUState *cpu, + hwaddr index, MemTxAttrs attrs); + +MemoryRegionSection *address_space_translate_for_iotlb(CPUState *cpu, + int asidx, + hwaddr addr, + hwaddr *xlat, + hwaddr *plen, + MemTxAttrs attrs, + int *prot); + +hwaddr memory_region_section_get_iotlb(CPUState *cpu, + MemoryRegionSection *section); + +#endif + diff --git a/include/accel/tcg/probe.h b/include/accel/tcg/probe.h new file mode 100644 index 00000000000..dd9ecbbdf11 --- /dev/null +++ b/include/accel/tcg/probe.h @@ -0,0 +1,122 @@ +/* + * Probe guest virtual addresses for access permissions. + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ +#ifndef ACCEL_TCG_PROBE_H +#define ACCEL_TCG_PROBE_H + +#include "exec/mmu-access-type.h" +#include "exec/vaddr.h" + +/** + * probe_access: + * @env: CPUArchState + * @addr: guest virtual address to look up + * @size: size of the access + * @access_type: read, write or execute permission + * @mmu_idx: MMU index to use for lookup + * @retaddr: return address for unwinding + * + * Look up the guest virtual address @addr. Raise an exception if the + * page does not satisfy @access_type. Raise an exception if the + * access (@addr, @size) hits a watchpoint. For writes, mark a clean + * page as dirty. + * + * Finally, return the host address for a page that is backed by RAM, + * or NULL if the page requires I/O. + */ +void *probe_access(CPUArchState *env, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); + +static inline void *probe_write(CPUArchState *env, vaddr addr, int size, + int mmu_idx, uintptr_t retaddr) +{ + return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); +} + +static inline void *probe_read(CPUArchState *env, vaddr addr, int size, + int mmu_idx, uintptr_t retaddr) +{ + return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); +} + +/** + * probe_access_flags: + * @env: CPUArchState + * @addr: guest virtual address to look up + * @size: size of the access + * @access_type: read, write or execute permission + * @mmu_idx: MMU index to use for lookup + * @nonfault: suppress the fault + * @phost: return value for host address + * @retaddr: return address for unwinding + * + * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for + * the page, and storing the host address for RAM in @phost. + * + * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. + * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. + * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. + * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. + */ +int probe_access_flags(CPUArchState *env, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr); + +#ifndef CONFIG_USER_ONLY + +/** + * probe_access_full: + * Like probe_access_flags, except also return into @pfull. + * + * The CPUTLBEntryFull structure returned via @pfull is transient + * and must be consumed or copied immediately, before any further + * access or changes to TLB @mmu_idx. + * + * This function will not fault if @nonfault is set, but will + * return TLB_INVALID_MASK if the page is not mapped, or is not + * accessible with @access_type. + * + * This function will return TLB_MMIO in order to force the access + * to be handled out-of-line if plugins wish to instrument the access. + */ +int probe_access_full(CPUArchState *env, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, + CPUTLBEntryFull **pfull, uintptr_t retaddr); + +/** + * probe_access_full_mmu: + * Like probe_access_full, except: + * + * This function is intended to be used for page table accesses by + * the target mmu itself. Since such page walking happens while + * handling another potential mmu fault, this function never raises + * exceptions (akin to @nonfault true for probe_access_full). + * Likewise this function does not trigger plugin instrumentation. + */ +int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, + MMUAccessType access_type, int mmu_idx, + void **phost, CPUTLBEntryFull **pfull); + +#endif /* !CONFIG_USER_ONLY */ + +/** + * tlb_vaddr_to_host: + * @env: CPUArchState + * @addr: guest virtual address to look up + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index to use for lookup + * + * Look up the specified guest virtual index in the TCG softmmu TLB. + * If we can translate a host virtual address suitable for direct RAM + * access, without causing a guest exception, then return it. + * Otherwise (TLB entry is for an I/O access, guest software + * TLB fill required, etc) return NULL. + */ +void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr, + MMUAccessType access_type, int mmu_idx); + +#endif diff --git a/include/accel/tcg/tb-cpu-state.h b/include/accel/tcg/tb-cpu-state.h new file mode 100644 index 00000000000..8f912900ca6 --- /dev/null +++ b/include/accel/tcg/tb-cpu-state.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Definition of TCGTBCPUState. + */ + +#ifndef EXEC_TB_CPU_STATE_H +#define EXEC_TB_CPU_STATE_H + +#include "exec/vaddr.h" + +typedef struct TCGTBCPUState { + vaddr pc; + uint32_t flags; + uint32_t cflags; + uint64_t cs_base; +} TCGTBCPUState; + +#endif diff --git a/include/block/aio.h b/include/block/aio.h index 4ee81936ed5..99ff48420b1 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -20,6 +20,7 @@ #include "qemu/coroutine-core.h" #include "qemu/queue.h" #include "qemu/event_notifier.h" +#include "qemu/lockcnt.h" #include "qemu/thread.h" #include "qemu/timer.h" #include "block/graph-lock.h" @@ -53,7 +54,7 @@ typedef void QEMUBHFunc(void *opaque); typedef bool AioPollFn(void *opaque); typedef void IOHandler(void *opaque); -struct ThreadPool; +struct ThreadPoolAio; struct LinuxAioState; typedef struct LuringState LuringState; @@ -122,6 +123,10 @@ struct BHListSlice { typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; +typedef struct AioPolledEvent { + int64_t ns; /* current polling time in nanoseconds */ +} AioPolledEvent; + struct AioContext { GSource source; @@ -206,7 +211,7 @@ struct AioContext { /* Thread pool for performing work and receiving completion callbacks. * Has its own locking. */ - struct ThreadPool *thread_pool; + struct ThreadPoolAio *thread_pool; #ifdef CONFIG_LINUX_AIO struct LinuxAioState *linux_aio; @@ -228,7 +233,6 @@ struct AioContext { int poll_disable_cnt; /* Polling mode parameters */ - int64_t poll_ns; /* current polling time in nanoseconds */ int64_t poll_max_ns; /* maximum polling time in nanoseconds */ int64_t poll_grow; /* polling time growth factor */ int64_t poll_shrink; /* polling time shrink factor */ @@ -499,8 +503,8 @@ void aio_set_event_notifier_poll(AioContext *ctx, */ GSource *aio_get_g_source(AioContext *ctx); -/* Return the ThreadPool bound to this AioContext */ -struct ThreadPool *aio_get_thread_pool(AioContext *ctx); +/* Return the ThreadPoolAio bound to this AioContext */ +struct ThreadPoolAio *aio_get_thread_pool(AioContext *ctx); /* Setup the LinuxAioState bound to this AioContext */ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); diff --git a/include/block/aio_task.h b/include/block/aio_task.h index 18a9c41f4e6..c81d637617e 100644 --- a/include/block/aio_task.h +++ b/include/block/aio_task.h @@ -40,8 +40,6 @@ void aio_task_pool_free(AioTaskPool *); /* error code of failed task or 0 if all is OK */ int aio_task_pool_status(AioTaskPool *pool); -bool aio_task_pool_empty(AioTaskPool *pool); - /* User provides filled @task, however task->pool will be set automatically */ void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task); diff --git a/include/block/block-common.h b/include/block/block-common.h index 338fe5ff7a4..c8c626daeaa 100644 --- a/include/block/block-common.h +++ b/include/block/block-common.h @@ -257,6 +257,7 @@ typedef enum { #define BDRV_OPT_AUTO_READ_ONLY "auto-read-only" #define BDRV_OPT_DISCARD "discard" #define BDRV_OPT_FORCE_SHARE "force-share" +#define BDRV_OPT_ACTIVE "active" #define BDRV_SECTOR_BITS 9 @@ -332,6 +333,17 @@ typedef enum { #define BDRV_BLOCK_RECURSE 0x40 #define BDRV_BLOCK_COMPRESSED 0x80 +/* + * Block status hints: the bitwise-or of these flags emphasize what + * the caller hopes to learn, and some drivers may be able to give + * faster answers by doing less work when the hint permits. + */ +#define BDRV_WANT_ZERO BDRV_BLOCK_ZERO +#define BDRV_WANT_OFFSET_VALID BDRV_BLOCK_OFFSET_VALID +#define BDRV_WANT_ALLOCATED BDRV_BLOCK_ALLOCATED +#define BDRV_WANT_PRECISE (BDRV_WANT_ZERO | BDRV_WANT_OFFSET_VALID | \ + BDRV_WANT_OFFSET_VALID) + typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; typedef struct BDRVReopenState { @@ -355,7 +367,6 @@ typedef enum BlockOpType { BLOCK_OP_TYPE_CHANGE, BLOCK_OP_TYPE_COMMIT_SOURCE, BLOCK_OP_TYPE_COMMIT_TARGET, - BLOCK_OP_TYPE_DATAPLANE, BLOCK_OP_TYPE_DRIVE_DEL, BLOCK_OP_TYPE_EJECT, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, diff --git a/include/block/block-copy.h b/include/block/block-copy.h index bdc703bacd8..dd5cc82f3b2 100644 --- a/include/block/block-copy.h +++ b/include/block/block-copy.h @@ -28,6 +28,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target, BlockDriverState *copy_bitmap_bs, const BdrvDirtyBitmap *bitmap, bool discard_source, + uint64_t min_cluster_size, Error **errp); /* Function should be called prior any actual copy request */ diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index bd7cecd1cf4..62da83c6165 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -74,13 +74,14 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, int GRAPH_WRLOCK bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, Error **errp); -int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, - Error **errp); -BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, - int flags, Error **errp); +int GRAPH_UNLOCKED +bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, Error **errp); +BlockDriverState * GRAPH_UNLOCKED +bdrv_insert_node(BlockDriverState *bs, QDict *node_options, int flags, + Error **errp); int bdrv_drop_filter(BlockDriverState *bs, Error **errp); -BdrvChild * no_coroutine_fn +BdrvChild * no_coroutine_fn GRAPH_UNLOCKED bdrv_open_child(const char *filename, QDict *options, const char *bdref_key, BlockDriverState *parent, const BdrvChildClass *child_class, BdrvChildRole child_role, bool allow_none, Error **errp); @@ -90,9 +91,10 @@ bdrv_co_open_child(const char *filename, QDict *options, const char *bdref_key, BlockDriverState *parent, const BdrvChildClass *child_class, BdrvChildRole child_role, bool allow_none, Error **errp); -int bdrv_open_file_child(const char *filename, - QDict *options, const char *bdref_key, - BlockDriverState *parent, Error **errp); +int GRAPH_UNLOCKED +bdrv_open_file_child(const char *filename, QDict *options, + const char *bdref_key, BlockDriverState *parent, + Error **errp); BlockDriverState * no_coroutine_fn bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); @@ -100,11 +102,9 @@ bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); BlockDriverState * coroutine_fn no_co_wrapper bdrv_co_open_blockdev_ref(BlockdevRef *ref, Error **errp); -int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, - Error **errp); int GRAPH_WRLOCK -bdrv_set_backing_hd_drained(BlockDriverState *bs, BlockDriverState *backing_hd, - Error **errp); +bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, + Error **errp); int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, const char *bdref_key, Error **errp); @@ -123,11 +123,12 @@ BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv, Error **errp); BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, int flags, Error **errp); -BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, - BlockDriverState *bs, QDict *options, - bool keep_old_opts); +BlockReopenQueue * GRAPH_UNLOCKED +bdrv_reopen_queue(BlockReopenQueue *bs_queue, BlockDriverState *bs, + QDict *options, bool keep_old_opts); void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue); -int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); +int GRAPH_UNLOCKED +bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, Error **errp); int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, @@ -143,9 +144,10 @@ int bdrv_commit(BlockDriverState *bs); int GRAPH_RDLOCK bdrv_make_empty(BdrvChild *c, Error **errp); void bdrv_register(BlockDriver *bdrv); -int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, - const char *backing_file_str, - bool backing_mask_protocol); +int GRAPH_UNLOCKED +bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, + const char *backing_file_str, + bool backing_mask_protocol); BlockDriverState * GRAPH_RDLOCK bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs); @@ -175,21 +177,27 @@ BlockDriverState * GRAPH_RDLOCK check_to_replace_node(BlockDriverState *parent_bs, const char *node_name, Error **errp); + +bool GRAPH_RDLOCK bdrv_is_inactive(BlockDriverState *bs); + int no_coroutine_fn GRAPH_RDLOCK bdrv_activate(BlockDriverState *bs, Error **errp); int coroutine_fn no_co_wrapper_bdrv_rdlock bdrv_co_activate(BlockDriverState *bs, Error **errp); +int no_coroutine_fn GRAPH_RDLOCK +bdrv_inactivate(BlockDriverState *bs, Error **errp); + void bdrv_activate_all(Error **errp); -int bdrv_inactivate_all(void); +int GRAPH_UNLOCKED bdrv_inactivate_all(void); int bdrv_flush_all(void); -void bdrv_close_all(void); -void bdrv_drain_all_begin(void); +void GRAPH_UNLOCKED bdrv_close_all(void); +void GRAPH_UNLOCKED bdrv_drain_all_begin(void); void bdrv_drain_all_begin_nopoll(void); void bdrv_drain_all_end(void); -void bdrv_drain_all(void); +void GRAPH_UNLOCKED bdrv_drain_all(void); void bdrv_aio_cancel(BlockAIOCB *acb); @@ -268,11 +276,16 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); int bdrv_debug_resume(BlockDriverState *bs, const char *tag); bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); -bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); -int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, - BdrvChild *ignore_child, Error **errp); +bool GRAPH_RDLOCK +bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, + GHashTable *visited, Transaction *tran, + Error **errp); +int GRAPH_UNLOCKED +bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp); +int GRAPH_RDLOCK +bdrv_try_change_aio_context_locked(BlockDriverState *bs, AioContext *ctx, + BdrvChild *ignore_child, Error **errp); int GRAPH_RDLOCK bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); diff --git a/include/block/block-io.h b/include/block/block-io.h index b49e0537dd4..4cf83fb3674 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -161,6 +161,8 @@ bdrv_is_allocated_above(BlockDriverState *bs, BlockDriverState *base, int coroutine_fn GRAPH_RDLOCK bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes); +int coroutine_fn GRAPH_RDLOCK +bdrv_co_is_all_zeroes(BlockDriverState *bs); int GRAPH_RDLOCK bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg, @@ -429,7 +431,7 @@ bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent, * * This function can be recursive. */ -void bdrv_drained_begin(BlockDriverState *bs); +void GRAPH_UNLOCKED bdrv_drained_begin(BlockDriverState *bs); /** * bdrv_do_drained_begin_quiesce: diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index ebb4e56a503..034c0634c8d 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -248,7 +248,7 @@ struct BlockDriver { int GRAPH_UNLOCKED_PTR (*bdrv_open)( BlockDriverState *bs, QDict *options, int flags, Error **errp); - void (*bdrv_close)(BlockDriverState *bs); + void GRAPH_UNLOCKED_PTR (*bdrv_close)(BlockDriverState *bs); int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create)( BlockdevCreateOptions *opts, Error **errp); @@ -396,9 +396,23 @@ struct BlockDriver { int GRAPH_RDLOCK_PTR (*bdrv_probe_geometry)( BlockDriverState *bs, HDGeometry *geo); + /** + * Hot add a BDS's child. Used in combination with bdrv_del_child, so the + * user can take a child offline when it is broken and take a new child + * online. + * + * All block nodes must be drained. + */ void GRAPH_WRLOCK_PTR (*bdrv_add_child)( BlockDriverState *parent, BlockDriverState *child, Error **errp); + /** + * Hot remove a BDS's child. Used in combination with bdrv_add_child, so the + * user can take a child offline when it is broken and take a new child + * online. + * + * All block nodes must be drained. + */ void GRAPH_WRLOCK_PTR (*bdrv_del_child)( BlockDriverState *parent, BdrvChild *child, Error **errp); @@ -506,10 +520,6 @@ struct BlockDriver { BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_flush)( BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque); - BlockAIOCB * GRAPH_RDLOCK_PTR (*bdrv_aio_pdiscard)( - BlockDriverState *bs, int64_t offset, int bytes, - BlockCompletionFunc *cb, void *opaque); - int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_readv)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); @@ -608,15 +618,16 @@ struct BlockDriver { * according to the current layer, and should only need to set * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID, * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing - * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See - * block.h for the overall meaning of the bits. As a hint, the - * flag want_zero is true if the caller cares more about precise - * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for - * overall allocation (favor larger *pnum, perhaps by reporting - * _DATA instead of _ZERO). The block layer guarantees input - * clamped to bdrv_getlength() and aligned to request_alignment, - * as well as non-NULL pnum, map, and file; in turn, the driver - * must return an error or set pnum to an aligned non-zero value. + * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). The + * caller will synthesize BDRV_BLOCK_ALLOCATED based on the + * non-zero results. See block.h for the overall meaning of the + * bits. As a hint, the flags in @mode may include a bitwise-or + * of BDRV_WANT_ALLOCATED, BDRV_WANT_OFFSET_VALID, or + * BDRV_WANT_ZERO based on what the caller is looking for in the + * results. The block layer guarantees input clamped to + * bdrv_getlength() and aligned to request_alignment, as well as + * non-NULL pnum, map, and file; in turn, the driver must return + * an error or set pnum to an aligned non-zero value. * * Note that @bytes is just a hint on how big of a region the * caller wants to inspect. It is not a limit on *pnum. @@ -628,8 +639,8 @@ struct BlockDriver { * to clamping *pnum for return to its caller. */ int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_block_status)( - BlockDriverState *bs, - bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum, + BlockDriverState *bs, unsigned int mode, + int64_t offset, int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file); /* @@ -653,8 +664,8 @@ struct BlockDriver { QEMUIOVector *qiov, size_t qiov_offset); int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_snapshot_block_status)( - BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, - int64_t *pnum, int64_t *map, BlockDriverState **file); + BlockDriverState *bs, unsigned int mode, int64_t offset, + int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file); int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_pdiscard_snapshot)( BlockDriverState *bs, int64_t offset, int64_t bytes); @@ -986,9 +997,21 @@ struct BdrvChildClass { bool backing_mask_protocol, Error **errp); - bool (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, - GHashTable *visited, Transaction *tran, - Error **errp); + /* + * Notifies the parent that the child is trying to change its AioContext. + * The parent may in turn change the AioContext of other nodes in the same + * transaction. Returns true if the change is possible and the transaction + * can be continued. Returns false and sets @errp if not and the transaction + * must be aborted. + * + * @visited will accumulate all visited BdrvChild objects. The caller is + * responsible for freeing the list afterwards. + * + * Must be called with the affected block nodes drained. + */ + bool GRAPH_RDLOCK_PTR (*change_aio_ctx)(BdrvChild *child, AioContext *ctx, + GHashTable *visited, + Transaction *tran, Error **errp); /* * I/O API functions. These functions are thread-safe. @@ -1230,7 +1253,7 @@ struct BlockDriverState { /* do we need to tell the quest if we have a volatile write cache? */ int enable_write_cache; - /* Accessed with atomic ops. */ + /* Accessed only in the main thread. */ int quiesce_counter; unsigned int write_gen; /* Current data generation */ diff --git a/include/block/block_int-global-state.h b/include/block/block_int-global-state.h index eb2d92a2261..e7c8f1a8567 100644 --- a/include/block/block_int-global-state.h +++ b/include/block/block_int-global-state.h @@ -139,7 +139,7 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, * @buf_size: The amount of data that can be in flight at one time. * @mode: Whether to collapse all images in the chain to the target. * @backing_mode: How to establish the target's backing chain after completion. - * @zero_target: Whether the target should be explicitly zero-initialized + * @target_is_zero: Whether the target already is zero-initialized. * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. * @unmap: Whether to unmap target where source sectors only contain zeroes. @@ -159,7 +159,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs, int creation_flags, int64_t speed, uint32_t granularity, int64_t buf_size, MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, - bool zero_target, + bool target_is_zero, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, const char *filter_node_name, @@ -179,6 +179,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs, * all ".has_*" fields are ignored. * @on_source_error: The action to take upon error reading from the source. * @on_target_error: The action to take upon error writing to the target. + * @on_cbw_error: The action to take upon error in copy-before-write operations. * @creation_flags: Flags that control the behavior of the Job lifetime. * See @BlockJobCreateFlags * @cb: Completion function for the job. @@ -198,6 +199,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, BackupPerf *perf, BlockdevOnError on_source_error, BlockdevOnError on_target_error, + OnCbwError on_cbw_error, int creation_flags, BlockCompletionFunc *cb, void *opaque, JobTxn *txn, Error **errp); diff --git a/include/block/block_int-io.h b/include/block/block_int-io.h index 4a7cf2b4fdc..4f94eb3c5a2 100644 --- a/include/block/block_int-io.h +++ b/include/block/block_int-io.h @@ -38,8 +38,8 @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset); int coroutine_fn GRAPH_RDLOCK bdrv_co_snapshot_block_status( - BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes, - int64_t *pnum, int64_t *map, BlockDriverState **file); + BlockDriverState *bs, unsigned int mode, int64_t offset, + int64_t bytes, int64_t *pnum, int64_t *map, BlockDriverState **file); int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes); diff --git a/include/block/blockjob.h b/include/block/blockjob.h index 7061ab7201a..85284cb25e2 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -137,6 +137,8 @@ BlockJob *block_job_get_locked(const char *id); * Add @bs to the list of BlockDriverState that are involved in * @job. This means that all operations will be blocked on @bs while * @job exists. + * + * All block nodes must be drained. */ int GRAPH_WRLOCK block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, @@ -149,7 +151,7 @@ block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, * Remove all BlockDriverStates from the list of nodes that are involved in the * job. This removes the blockers added with block_job_add_bdrv(). */ -void block_job_remove_all_bdrv(BlockJob *job); +void GRAPH_UNLOCKED block_job_remove_all_bdrv(BlockJob *job); /** * block_job_has_bdrv: diff --git a/include/block/export.h b/include/block/export.h index f2fe0f8078f..4bd9531d4d9 100644 --- a/include/block/export.h +++ b/include/block/export.h @@ -29,6 +29,9 @@ typedef struct BlockExportDriver { */ size_t instance_size; + /* True if the export type supports running on an inactive node */ + bool supports_inactive; + /* Creates and starts a new block export */ int (*create)(BlockExport *, BlockExportOptions *, Error **); diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index d7545e82d06..95bf5ede40b 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -20,8 +20,6 @@ #ifndef GRAPH_LOCK_H #define GRAPH_LOCK_H -#include "qemu/clang-tsa.h" - /** * Graph Lock API * This API provides a rwlock used to protect block layer @@ -114,10 +112,21 @@ void unregister_aiocontext(AioContext *ctx); void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA bdrv_graph_wrlock(void); +/* + * bdrv_graph_wrlock_drained: + * Similar to bdrv_graph_wrlock, but will begin a drained section before + * locking. + */ +void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA +bdrv_graph_wrlock_drained(void); + /* * bdrv_graph_wrunlock: * Write finished, reset global has_writer to 0 and restart * all readers that are waiting. + * + * Also ends the drained section if bdrv_graph_wrlock_drained() was used to lock + * the graph. */ void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA bdrv_graph_wrunlock(void); @@ -209,31 +218,38 @@ typedef struct GraphLockable { } GraphLockable; * unlocked. TSA_ASSERT_SHARED() makes sure that the following calls know that * we hold the lock while unlocking is left unchecked. */ -static inline GraphLockable * TSA_ASSERT_SHARED(graph_lock) TSA_NO_TSA coroutine_fn +static inline GraphLockable * TSA_ACQUIRE_SHARED(graph_lock) coroutine_fn graph_lockable_auto_lock(GraphLockable *x) { bdrv_graph_co_rdlock(); return x; } -static inline void TSA_NO_TSA coroutine_fn -graph_lockable_auto_unlock(GraphLockable *x) +static inline void TSA_RELEASE_SHARED(graph_lock) coroutine_fn +graph_lockable_auto_unlock(GraphLockable **x) { bdrv_graph_co_rdunlock(); } -G_DEFINE_AUTOPTR_CLEANUP_FUNC(GraphLockable, graph_lockable_auto_unlock) +#define GRAPH_AUTO_UNLOCK __attribute__((cleanup(graph_lockable_auto_unlock))) +/* + * @var is only used to break the loop after the first iteration. + * @unlock_var can't be unlocked and then set to NULL because TSA wants the lock + * to be held at the start of every iteration of the loop. + */ #define WITH_GRAPH_RDLOCK_GUARD_(var) \ - for (g_autoptr(GraphLockable) var = graph_lockable_auto_lock(GML_OBJ_()); \ + for (GraphLockable *unlock_var GRAPH_AUTO_UNLOCK = \ + graph_lockable_auto_lock(GML_OBJ_()), \ + *var = unlock_var; \ var; \ - graph_lockable_auto_unlock(var), var = NULL) + var = NULL) #define WITH_GRAPH_RDLOCK_GUARD() \ WITH_GRAPH_RDLOCK_GUARD_(glue(graph_lockable_auto, __COUNTER__)) #define GRAPH_RDLOCK_GUARD(x) \ - g_autoptr(GraphLockable) \ + GraphLockable * GRAPH_AUTO_UNLOCK \ glue(graph_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \ graph_lockable_auto_lock(GML_OBJ_()) diff --git a/include/block/nbd.h b/include/block/nbd.h index 4e7bd6342f9..92987c76fd6 100644 --- a/include/block/nbd.h +++ b/include/block/nbd.h @@ -33,6 +33,19 @@ typedef struct NBDMetaContexts NBDMetaContexts; extern const BlockExportDriver blk_exp_nbd; +/* + * NBD_DEFAULT_HANDSHAKE_MAX_SECS: Number of seconds in which client must + * succeed at NBD_OPT_GO before being forcefully dropped as too slow. + */ +#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10 + +/* + * NBD_DEFAULT_MAX_CONNECTIONS: Number of client sockets to allow at + * once; must be large enough to allow a MULTI_CONN-aware client like + * nbdcopy to create its typical number of 8-16 sockets. + */ +#define NBD_DEFAULT_MAX_CONNECTIONS 100 + /* Handshake phase structs - this struct is passed on the wire */ typedef struct NBDOption { @@ -403,18 +416,21 @@ AioContext *nbd_export_aio_context(NBDExport *exp); NBDExport *nbd_export_find(const char *name); void nbd_client_new(QIOChannelSocket *sioc, + uint32_t handshake_max_secs, QCryptoTLSCreds *tlscreds, const char *tlsauthz, - void (*close_fn)(NBDClient *, bool)); + void (*close_fn)(NBDClient *, bool), + void *owner); +void *nbd_client_owner(NBDClient *client); void nbd_client_get(NBDClient *client); void nbd_client_put(NBDClient *client); void nbd_server_is_qemu_nbd(int max_connections); bool nbd_server_is_running(void); int nbd_server_max_connections(void); -void nbd_server_start(SocketAddress *addr, const char *tls_creds, - const char *tls_authz, uint32_t max_connections, - Error **errp); +void nbd_server_start(SocketAddress *addr, uint32_t handshake_max_secs, + const char *tls_creds, const char *tls_authz, + uint32_t max_connections, Error **errp); void nbd_server_start_options(NbdServerOptions *arg, Error **errp); /* nbd_read diff --git a/include/block/nvme.h b/include/block/nvme.h index 5298bc4a286..358e516e38b 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -142,9 +142,9 @@ enum NvmeCapMask { ((cap) |= (uint64_t)((val) & CAP_CMBS_MASK) << CAP_CMBS_SHIFT) enum NvmeCapCss { - NVME_CAP_CSS_NVM = 1 << 0, - NVME_CAP_CSS_CSI_SUPP = 1 << 6, - NVME_CAP_CSS_ADMIN_ONLY = 1 << 7, + NVME_CAP_CSS_NCSS = 1 << 0, + NVME_CAP_CSS_IOCSS = 1 << 6, + NVME_CAP_CSS_NOIOCSS = 1 << 7, }; enum NvmeCcShift { @@ -177,7 +177,7 @@ enum NvmeCcMask { enum NvmeCcCss { NVME_CC_CSS_NVM = 0x0, - NVME_CC_CSS_CSI = 0x6, + NVME_CC_CSS_ALL = 0x6, NVME_CC_CSS_ADMIN_ONLY = 0x7, }; @@ -906,8 +906,7 @@ enum NvmeStatusCodes { NVME_SGL_DESCR_TYPE_INVALID = 0x0011, NVME_INVALID_USE_OF_CMB = 0x0012, NVME_INVALID_PRP_OFFSET = 0x0013, - NVME_CMD_SET_CMB_REJECTED = 0x002b, - NVME_INVALID_CMD_SET = 0x002c, + NVME_COMMAND_INTERRUPTED = 0x0021, NVME_FDP_DISABLED = 0x0029, NVME_INVALID_PHID_LIST = 0x002a, NVME_LBA_RANGE = 0x0080, @@ -940,6 +939,10 @@ enum NvmeStatusCodes { NVME_INVALID_SEC_CTRL_STATE = 0x0120, NVME_INVALID_NUM_RESOURCES = 0x0121, NVME_INVALID_RESOURCE_ID = 0x0122, + NVME_IOCS_NOT_SUPPORTED = 0x0129, + NVME_IOCS_NOT_ENABLED = 0x012a, + NVME_IOCS_COMBINATION_REJECTED = 0x012b, + NVME_INVALID_IOCS = 0x012c, NVME_CONFLICTING_ATTRS = 0x0180, NVME_INVALID_PROT_INFO = 0x0181, NVME_WRITE_TO_RO = 0x0182, @@ -1015,6 +1018,40 @@ typedef struct QEMU_PACKED NvmeSmartLog { uint8_t reserved2[320]; } NvmeSmartLog; +typedef struct QEMU_PACKED NvmeSmartLogExtended { + uint64_t physical_media_units_written[2]; + uint64_t physical_media_units_read[2]; + uint64_t bad_user_blocks; + uint64_t bad_system_nand_blocks; + uint64_t xor_recovery_count; + uint64_t uncorrectable_read_error_count; + uint64_t soft_ecc_error_count; + uint64_t end2end_correction_counts; + uint8_t system_data_percent_used; + uint8_t refresh_counts[7]; + uint64_t user_data_erase_counts; + uint16_t thermal_throttling_stat_and_count; + uint16_t dssd_spec_version[3]; + uint64_t pcie_correctable_error_count; + uint32_t incomplete_shutdowns; + uint32_t rsvd116; + uint8_t percent_free_blocks; + uint8_t rsvd121[7]; + uint16_t capacity_health; + uint8_t nvme_errata_ver; + uint8_t rsvd131[5]; + uint64_t unaligned_io; + uint64_t security_ver_num; + uint64_t total_nuse; + uint64_t plp_start_count[2]; + uint64_t endurance_estimate[2]; + uint64_t pcie_retraining_count; + uint64_t power_state_change_count; + uint8_t rsvd208[286]; + uint16_t log_page_version; + uint64_t log_page_guid[2]; +} NvmeSmartLogExtended; + #define NVME_SMART_WARN_MAX 6 enum NvmeSmartWarn { NVME_SMART_SPARE = 1 << 0, @@ -1052,6 +1089,12 @@ enum NvmeLogIdentifier { NVME_LOG_FDP_RUH_USAGE = 0x21, NVME_LOG_FDP_STATS = 0x22, NVME_LOG_FDP_EVENTS = 0x23, + NVME_LOG_VENDOR_START = 0xc0, + NVME_LOG_VENDOR_END = 0xff, +}; + +enum NvmeOcpLogIdentifier { + NVME_OCP_EXTENDED_SMART_INFO = 0xc0, }; typedef struct QEMU_PACKED NvmePSD { @@ -1077,6 +1120,7 @@ enum NvmeIdCns { NVME_ID_CNS_CS_NS = 0x05, NVME_ID_CNS_CS_CTRL = 0x06, NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, + NVME_ID_CNS_CS_IND_NS = 0x08, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12, @@ -1087,6 +1131,7 @@ enum NvmeIdCns { NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a, NVME_ID_CNS_CS_NS_PRESENT = 0x1b, NVME_ID_CNS_IO_COMMAND_SET = 0x1c, + NVME_ID_CNS_CS_IND_NS_ALLOCATED = 0x1f, }; typedef struct QEMU_PACKED NvmeIdCtrl { @@ -1165,6 +1210,8 @@ typedef struct NvmeIdCtrlZoned { uint8_t rsvd1[4095]; } NvmeIdCtrlZoned; +#define NVME_ID_CTRL_NVM_DMRL_MAX 255 + typedef struct NvmeIdCtrlNvm { uint8_t vsl; uint8_t wzsl; @@ -1182,6 +1229,7 @@ enum NvmeIdCtrlOaes { enum NvmeIdCtrlCtratt { NVME_CTRATT_ENDGRPS = 1 << 4, NVME_CTRATT_ELBAS = 1 << 15, + NVME_CTRATT_MEM = 1 << 16, NVME_CTRATT_FDPS = 1 << 19, }; @@ -1189,9 +1237,10 @@ enum NvmeIdCtrlOacs { NVME_OACS_SECURITY = 1 << 0, NVME_OACS_FORMAT = 1 << 1, NVME_OACS_FW = 1 << 2, - NVME_OACS_NS_MGMT = 1 << 3, + NVME_OACS_NMS = 1 << 3, NVME_OACS_DIRECTIVES = 1 << 5, - NVME_OACS_DBBUF = 1 << 8, + NVME_OACS_VMS = 1 << 7, + NVME_OACS_DBCS = 1 << 8, }; enum NvmeIdCtrlOncs { @@ -1285,6 +1334,8 @@ enum NvmeNsAttachmentOperation { #define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff) #define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000) +#define NVME_ID_CTRL_CTRATT_MEM(ctratt) (ctratt & NVME_CTRATT_MEM) + enum NvmeFeatureIds { NVME_ARBITRATION = 0x1, NVME_POWER_MANAGEMENT = 0x2, @@ -1413,9 +1464,28 @@ typedef struct QEMU_PACKED NvmeIdNsNvm { uint8_t pic; uint8_t rsvd9[3]; uint32_t elbaf[NVME_MAX_NLBAF]; - uint8_t rsvd268[3828]; + uint32_t npdgl; + uint32_t nprg; + uint32_t npra; + uint32_t nors; + uint32_t npdal; + uint8_t rsvd288[3808]; } NvmeIdNsNvm; +typedef struct QEMU_PACKED NvmeIdNsInd { + uint8_t nsfeat; + uint8_t nmic; + uint8_t rescap; + uint8_t fpi; + uint32_t anagrpid; + uint8_t nsattr; + uint8_t rsvd9; + uint16_t nvmsetid; + uint16_t endgrpid; + uint8_t nstat; + uint8_t rsvd15[4081]; +} NvmeIdNsInd; + typedef struct QEMU_PACKED NvmeIdNsDescr { uint8_t nidt; uint8_t nidl; @@ -1436,8 +1506,10 @@ enum NvmeNsIdentifierType { NVME_NIDT_CSI = 0x04, }; -enum NvmeIdNsNmic { - NVME_NMIC_NS_SHARED = 1 << 0, +enum NvmeIdNsIndependent { + NVME_ID_NS_IND_NMIC_SHRNS = 1 << 0, + NVME_ID_NS_IND_NMIC_DISNS = 1 << 1, + NVME_ID_NS_IND_NSTAT_NRDY = 1 << 0, }; enum NvmeCsi { @@ -1515,6 +1587,16 @@ enum NvmeIdNsMc { NVME_ID_NS_MC_SEPARATE = 1 << 1, }; +enum NvmeIdNsNsfeat { + NVME_ID_NS_NSFEAT_THINP = 1 << 0, + NVME_ID_NS_NSFEAT_NSABPNS = 1 << 1, + NVME_ID_NS_NSFEAT_DAE = 1 << 2, + NVME_ID_NS_NSFEAT_UIDREUSE = 1 << 3, + NVME_ID_NS_NSFEAT_OPTPERF_ALL = 3 << 4, + NVME_ID_NS_NSFEAT_MAM = 1 << 6, + NVME_ID_NS_NSFEAT_OPTRPERF = 1 << 7, +}; + #define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK) enum NvmePIFormat { @@ -1863,6 +1945,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64); QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512); QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512); + QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLogExtended) != 512); QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZoned) != 4096); @@ -1870,6 +1953,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4); QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsInd) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16); diff --git a/include/block/qdict.h b/include/block/qdict.h index b4c28d96a9e..53c4df4cb26 100644 --- a/include/block/qdict.h +++ b/include/block/qdict.h @@ -10,7 +10,7 @@ #ifndef BLOCK_QDICT_H #define BLOCK_QDICT_H -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" QObject *qdict_crumple(const QDict *src, Error **errp); void qdict_flatten(QDict *qdict); diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h index 626706827f5..6570244496a 100644 --- a/include/block/raw-aio.h +++ b/include/block/raw-aio.h @@ -17,6 +17,7 @@ #define QEMU_RAW_AIO_H #include "block/aio.h" +#include "block/block-common.h" #include "qemu/iov.h" /* AIO request types */ @@ -58,11 +59,18 @@ void laio_cleanup(LinuxAioState *s); /* laio_co_submit: submit I/O requests in the thread's current AioContext. */ int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov, - int type, uint64_t dev_max_batch); + int type, BdrvRequestFlags flags, + uint64_t dev_max_batch); bool laio_has_fdsync(int); +bool laio_has_fua(void); void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context); void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); +#else +static inline bool laio_has_fua(void) +{ + return false; +} #endif /* io_uring.c - Linux io_uring implementation */ #ifdef CONFIG_LINUX_IO_URING @@ -71,9 +79,16 @@ void luring_cleanup(LuringState *s); /* luring_co_submit: submit I/O requests in the thread's current AioContext. */ int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset, - QEMUIOVector *qiov, int type); + QEMUIOVector *qiov, int type, + BdrvRequestFlags flags); void luring_detach_aio_context(LuringState *s, AioContext *old_context); void luring_attach_aio_context(LuringState *s, AioContext *new_context); +bool luring_has_fua(void); +#else +static inline bool luring_has_fua(void) +{ + return false; +} #endif #ifdef _WIN32 diff --git a/include/block/snapshot.h b/include/block/snapshot.h index 304cc6ea61c..2316a43e265 100644 --- a/include/block/snapshot.h +++ b/include/block/snapshot.h @@ -90,9 +90,9 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs, bool bdrv_all_can_snapshot(bool has_devices, strList *devices, Error **errp); -int bdrv_all_delete_snapshot(const char *name, - bool has_devices, strList *devices, - Error **errp); +int GRAPH_UNLOCKED +bdrv_all_delete_snapshot(const char *name, bool has_devices, strList *devices, + Error **errp); int bdrv_all_goto_snapshot(const char *name, bool has_devices, strList *devices, Error **errp); diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h index 948ff5f30c3..dd48cf07e85 100644 --- a/include/block/thread-pool.h +++ b/include/block/thread-pool.h @@ -24,20 +24,70 @@ typedef int ThreadPoolFunc(void *opaque); -typedef struct ThreadPool ThreadPool; +typedef struct ThreadPoolAio ThreadPoolAio; -ThreadPool *thread_pool_new(struct AioContext *ctx); -void thread_pool_free(ThreadPool *pool); +ThreadPoolAio *thread_pool_new_aio(struct AioContext *ctx); +void thread_pool_free_aio(ThreadPoolAio *pool); /* - * thread_pool_submit* API: submit I/O requests in the thread's + * thread_pool_submit_{aio,co} API: submit I/O requests in the thread's * current AioContext. */ BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockCompletionFunc *cb, void *opaque); int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg); -void thread_pool_submit(ThreadPoolFunc *func, void *arg); +void thread_pool_update_params(ThreadPoolAio *pool, struct AioContext *ctx); + +/* ------------------------------------------- */ +/* Generic thread pool types and methods below */ +typedef struct ThreadPool ThreadPool; + +/* Create a new thread pool. Never returns NULL. */ +ThreadPool *thread_pool_new(void); + +/* + * Free the thread pool. + * Waits for all the previously submitted work to complete before performing + * the actual freeing operation. + */ +void thread_pool_free(ThreadPool *pool); + +/* + * Submit a new work (task) for the pool. + * + * @opaque_destroy is an optional GDestroyNotify for the @opaque argument + * to the work function at @func. + */ +void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, + void *opaque, GDestroyNotify opaque_destroy); + +/* + * Submit a new work (task) for the pool, making sure it starts getting + * processed immediately, launching a new thread for it if necessary. + * + * @opaque_destroy is an optional GDestroyNotify for the @opaque argument + * to the work function at @func. + */ +void thread_pool_submit_immediate(ThreadPool *pool, ThreadPoolFunc *func, + void *opaque, GDestroyNotify opaque_destroy); + +/* + * Wait for all previously submitted work to complete before returning. + * + * Can be used as a barrier between two sets of tasks executed on a thread + * pool without destroying it or in a performance sensitive path where the + * caller just wants to wait for all tasks to complete while deferring the + * pool free operation for later, less performance sensitive time. + */ +void thread_pool_wait(ThreadPool *pool); -void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx); +/* Set the maximum number of threads in the pool. */ +bool thread_pool_set_max_threads(ThreadPool *pool, int max_threads); + +/* + * Adjust the maximum number of threads in the pool to give each task its + * own thread (exactly one thread per task). + */ +bool thread_pool_adjust_max_threads_to_work(ThreadPool *pool); #endif diff --git a/include/block/ufs.h b/include/block/ufs.h index 92da7a89b9f..a3ee62b027a 100644 --- a/include/block/ufs.h +++ b/include/block/ufs.h @@ -461,7 +461,7 @@ typedef struct Attributes { uint8_t psa_state; uint32_t psa_data_size; uint8_t ref_clk_gating_wait_time; - uint8_t device_case_rough_temperaure; + uint8_t device_case_rough_temperature; uint8_t device_too_high_temp_boundary; uint8_t device_too_low_temp_boundary; uint8_t throttling_status; @@ -763,6 +763,12 @@ typedef struct QEMU_PACKED UtpTaskReqDesc { */ #define UFS_WB_EXCEED_LIFETIME 0x0B +/* + * The range of valid value of Active ICC attritbute + * is from 0x00 to 0x0F. + */ +#define UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE 0x0F + /* * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU * request/response packet @@ -1067,6 +1073,11 @@ enum health_desc_param { UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4, }; +enum { + UFS_DEV_HIGH_TEMP_NOTIF = BIT(4), + UFS_DEV_LOW_TEMP_NOTIF = BIT(5), +}; + /* WriteBooster buffer mode */ enum { UFS_WB_BUF_MODE_LU_DEDICATED = 0x0, @@ -1085,6 +1096,12 @@ enum ufs_lu_wp_type { UFS_LU_PERM_WP = 0x02, }; +/* Exception event mask values */ +enum { + MASK_EE_TOO_HIGH_TEMP = BIT(3), + MASK_EE_TOO_LOW_TEMP = BIT(4), +}; + /* UTP QUERY Transaction Specific Fields OpCode */ enum query_opcode { UFS_UPIU_QUERY_OPCODE_NOP = 0x0, diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h index 3310449eaf0..8ef05b3dd09 100644 --- a/include/chardev/char-fe.h +++ b/include/chardev/char-fe.h @@ -20,7 +20,7 @@ struct CharBackend { IOReadHandler *chr_read; BackendChangeHandler *chr_be_change; void *opaque; - int tag; + unsigned int tag; bool fe_is_open; }; diff --git a/include/chardev/char-socket.h b/include/chardev/char-socket.h index 0708ca6fa97..d6d13ad37f9 100644 --- a/include/chardev/char-socket.h +++ b/include/chardev/char-socket.h @@ -74,7 +74,7 @@ struct SocketChardev { bool is_websock; GSource *reconnect_timer; - int64_t reconnect_time; + int64_t reconnect_time_ms; bool connect_err_reported; QIOTask *connect_task; diff --git a/include/chardev/char.h b/include/chardev/char.h index 01df55f9e8c..429852f8d9d 100644 --- a/include/chardev/char.h +++ b/include/chardev/char.h @@ -232,6 +232,7 @@ OBJECT_DECLARE_TYPE(Chardev, ChardevClass, CHARDEV) #define TYPE_CHARDEV_NULL "chardev-null" #define TYPE_CHARDEV_MUX "chardev-mux" +#define TYPE_CHARDEV_HUB "chardev-hub" #define TYPE_CHARDEV_RINGBUF "chardev-ringbuf" #define TYPE_CHARDEV_PTY "chardev-pty" #define TYPE_CHARDEV_CONSOLE "chardev-console" diff --git a/include/crypto/afsplit.h b/include/crypto/afsplit.h index 4894d643308..06f28fe67c8 100644 --- a/include/crypto/afsplit.h +++ b/include/crypto/afsplit.h @@ -46,7 +46,7 @@ * * splitkey = g_new0(uint8_t, nkey * stripes); * - * if (qcrypto_afsplit_encode(QCRYPTO_HASH_ALG_SHA256, + * if (qcrypto_afsplit_encode(QCRYPTO_HASH_ALGO_SHA256, * nkey, stripes, * masterkey, splitkey, errp) < 0) { * g_free(splitkey); @@ -71,7 +71,7 @@ * * masterkey = g_new0(uint8_t, nkey); * - * if (qcrypto_afsplit_decode(QCRYPTO_HASH_ALG_SHA256, + * if (qcrypto_afsplit_decode(QCRYPTO_HASH_ALGO_SHA256, * nkey, stripes, * splitkey, masterkey, errp) < 0) { * g_free(splitkey); @@ -102,7 +102,7 @@ * * Returns: 0 on success, -1 on error; */ -int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash, +int qcrypto_afsplit_encode(QCryptoHashAlgo hash, size_t blocklen, uint32_t stripes, const uint8_t *in, @@ -124,7 +124,7 @@ int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash, * * Returns: 0 on success, -1 on error; */ -int qcrypto_afsplit_decode(QCryptoHashAlgorithm hash, +int qcrypto_afsplit_decode(QCryptoHashAlgo hash, size_t blocklen, uint32_t stripes, const uint8_t *in, diff --git a/include/crypto/block.h b/include/crypto/block.h index 5b5d0398006..b013d27f008 100644 --- a/include/crypto/block.h +++ b/include/crypto/block.h @@ -287,7 +287,7 @@ QCryptoIVGen *qcrypto_block_get_ivgen(QCryptoBlock *block); * * Returns: the hash algorithm */ -QCryptoHashAlgorithm qcrypto_block_get_kdf_hash(QCryptoBlock *block); +QCryptoHashAlgo qcrypto_block_get_kdf_hash(QCryptoBlock *block); /** * qcrypto_block_get_payload_offset: diff --git a/include/crypto/cipher.h b/include/crypto/cipher.h index 083e12a7d9e..92939310ef0 100644 --- a/include/crypto/cipher.h +++ b/include/crypto/cipher.h @@ -26,7 +26,7 @@ typedef struct QCryptoCipher QCryptoCipher; typedef struct QCryptoCipherDriver QCryptoCipherDriver; -/* See also "QCryptoCipherAlgorithm" and "QCryptoCipherMode" +/* See also "QCryptoCipherAlgo" and "QCryptoCipherMode" * enums defined in qapi/crypto.json */ /** @@ -50,12 +50,12 @@ typedef struct QCryptoCipherDriver QCryptoCipherDriver; * size_t keylen = 16; * uint8_t iv = ....; * - * if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALG_AES_128)) { + * if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128)) { * error_report(errp, "Feature requires AES cipher support"); * return -1; * } * - * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128, + * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128, * QCRYPTO_CIPHER_MODE_CBC, * key, keylen, * errp); @@ -78,7 +78,7 @@ typedef struct QCryptoCipherDriver QCryptoCipherDriver; */ struct QCryptoCipher { - QCryptoCipherAlgorithm alg; + QCryptoCipherAlgo alg; QCryptoCipherMode mode; const QCryptoCipherDriver *driver; }; @@ -93,7 +93,7 @@ struct QCryptoCipher { * * Returns: true if the algorithm is supported, false otherwise */ -bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, +bool qcrypto_cipher_supports(QCryptoCipherAlgo alg, QCryptoCipherMode mode); /** @@ -106,7 +106,7 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, * * Returns: the block size in bytes */ -size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg); +size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgo alg); /** @@ -117,7 +117,7 @@ size_t qcrypto_cipher_get_block_len(QCryptoCipherAlgorithm alg); * * Returns: the key size in bytes */ -size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg); +size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgo alg); /** @@ -130,7 +130,7 @@ size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg); * * Returns: the IV size in bytes, or 0 if no IV is permitted */ -size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg, +size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgo alg, QCryptoCipherMode mode); @@ -156,7 +156,7 @@ size_t qcrypto_cipher_get_iv_len(QCryptoCipherAlgorithm alg, * * Returns: a new cipher object, or NULL on error */ -QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgorithm alg, +QCryptoCipher *qcrypto_cipher_new(QCryptoCipherAlgo alg, QCryptoCipherMode mode, const uint8_t *key, size_t nkey, Error **errp); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 54d87aa2a13..1868d4a0f78 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -23,7 +24,23 @@ #include "qapi/qapi-types-crypto.h" -/* See also "QCryptoHashAlgorithm" defined in qapi/crypto.json */ +#define QCRYPTO_HASH_DIGEST_LEN_MD5 16 +#define QCRYPTO_HASH_DIGEST_LEN_SHA1 20 +#define QCRYPTO_HASH_DIGEST_LEN_SHA224 28 +#define QCRYPTO_HASH_DIGEST_LEN_SHA256 32 +#define QCRYPTO_HASH_DIGEST_LEN_SHA384 48 +#define QCRYPTO_HASH_DIGEST_LEN_SHA512 64 +#define QCRYPTO_HASH_DIGEST_LEN_RIPEMD160 20 +#define QCRYPTO_HASH_DIGEST_LEN_SM3 32 + +/* See also "QCryptoHashAlgo" defined in qapi/crypto.json */ + +typedef struct QCryptoHash QCryptoHash; +struct QCryptoHash { + QCryptoHashAlgo alg; + void *opaque; + void *driver; +}; /** * qcrypto_hash_supports: @@ -34,7 +51,7 @@ * * Returns: true if the algorithm is supported, false otherwise */ -gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg); +gboolean qcrypto_hash_supports(QCryptoHashAlgo alg); /** @@ -45,7 +62,7 @@ gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg); * * Returns: the digest length in bytes */ -size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg); +size_t qcrypto_hash_digest_len(QCryptoHashAlgo alg); /** * qcrypto_hash_bytesv: @@ -57,15 +74,22 @@ size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg); * @errp: pointer to a NULL-initialized error object * * Computes the hash across all the memory regions - * present in @iov. The @result pointer will be - * filled with raw bytes representing the computed - * hash, which will have length @resultlen. The - * memory pointer in @result must be released - * with a call to g_free() when no longer required. + * present in @iov. + * + * If @result_len is set to a non-zero value by the caller, then + * @result must hold a pointer that is @result_len in size, and + * @result_len match the size of the hash output. The digest will + * be written into @result. + * + * If @result_len is set to zero, then this function will allocate + * a buffer to hold the hash output digest, storing a pointer to + * the buffer in @result, and setting @result_len to its size. + * The memory referenced in @result must be released with a call + * to g_free() when no longer required by the caller. * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg, +int qcrypto_hash_bytesv(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, uint8_t **result, @@ -82,15 +106,22 @@ int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg, * @errp: pointer to a NULL-initialized error object * * Computes the hash across all the memory region - * @buf of length @len. The @result pointer will be - * filled with raw bytes representing the computed - * hash, which will have length @resultlen. The - * memory pointer in @result must be released - * with a call to g_free() when no longer required. + * @buf of length @len. + * + * If @result_len is set to a non-zero value by the caller, then + * @result must hold a pointer that is @result_len in size, and + * @result_len match the size of the hash output. The digest will + * be written into @result. + * + * If @result_len is set to zero, then this function will allocate + * a buffer to hold the hash output digest, storing a pointer to + * the buffer in @result, and setting @result_len to its size. + * The memory referenced in @result must be released with a call + * to g_free() when no longer required by the caller. * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_bytes(QCryptoHashAlgorithm alg, +int qcrypto_hash_bytes(QCryptoHashAlgo alg, const char *buf, size_t len, uint8_t **result, @@ -114,12 +145,132 @@ int qcrypto_hash_bytes(QCryptoHashAlgorithm alg, * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_digestv(QCryptoHashAlgorithm alg, +int qcrypto_hash_digestv(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, char **digest, Error **errp); +/** + * qcrypto_hash_updatev: + * @hash: hash object from qcrypto_hash_new + * @iov: the array of memory regions to hash + * @niov: the length of @iov + * @errp: pointer to a NULL-initialized error object + * + * Updates the given hash object with all the memory regions + * present in @iov. + * + * Returns: 0 on success, -1 on error + */ +int qcrypto_hash_updatev(QCryptoHash *hash, + const struct iovec *iov, + size_t niov, + Error **errp); +/** + * qcrypto_hash_update: + * @hash: hash object from qcrypto_hash_new + * @buf: the memory region to hash + * @len: the length of @buf + * @errp: pointer to a NULL-initialized error object + * + * Updates the given hash object with the data from + * the given buffer. + * + * Returns: 0 on success, -1 on error + */ +int qcrypto_hash_update(QCryptoHash *hash, + const char *buf, + size_t len, + Error **errp); + +/** + * qcrypto_hash_finalize_digest: + * @hash: the hash object to finalize + * @digest: pointer to hold output hash + * @errp: pointer to a NULL-initialized error object + * + * Computes the hash from the given hash object. Hash object + * is expected to have its data updated from the qcrypto_hash_update function. + * The @digest pointer will be filled with the printable hex digest of the + * computed hash, which will be terminated by '\0'. The memory pointer + * in @digest must be released with a call to g_free() when + * no longer required. + * + * Returns: 0 on success, -1 on error + */ +int qcrypto_hash_finalize_digest(QCryptoHash *hash, + char **digest, + Error **errp); + +/** + * qcrypto_hash_finalize_base64: + * @hash_ctx: hash object to finalize + * @base64: pointer to store the hash result in + * @errp: pointer to a NULL-initialized error object + * + * Computes the hash from the given hash object. Hash object + * is expected to have it's data updated from the qcrypto_hash_update function. + * The @base64 pointer will be filled with the base64 encoding of the computed + * hash, which will be terminated by '\0'. The memory pointer in @base64 + * must be released with a call to g_free() when no longer required. + * + * Returns: 0 on success, -1 on error + */ +int qcrypto_hash_finalize_base64(QCryptoHash *hash, + char **base64, + Error **errp); + +/** + * qcrypto_hash_finalize_bytes: + * @hash_ctx: hash object to finalize + * @result: pointer to store the hash result in + * @result_len: Pointer to store the length of the result in + * @errp: pointer to a NULL-initialized error object + * + * Computes the hash from the given hash object. Hash object + * is expected to have it's data updated from the qcrypto_hash_update function. + * + * If @result_len is set to a non-zero value by the caller, then + * @result must hold a pointer that is @result_len in size, and + * @result_len match the size of the hash output. The digest will + * be written into @result. + * + * If @result_len is set to zero, then this function will allocate + * a buffer to hold the hash output digest, storing a pointer to + * the buffer in @result, and setting @result_len to its size. + * The memory referenced in @result must be released with a call + * to g_free() when no longer required by the caller. + * + * Returns: 0 on success, -1 on error + */ +int qcrypto_hash_finalize_bytes(QCryptoHash *hash, + uint8_t **result, + size_t *result_len, + Error **errp); + +/** + * qcrypto_hash_new: + * @alg: the hash algorithm + * @errp: pointer to a NULL-initialized error object + * + * Creates a new hashing context for the chosen algorithm for + * usage with qcrypto_hash_update. + * + * Returns: New hash object with the given algorithm, or NULL on error. + */ +QCryptoHash *qcrypto_hash_new(QCryptoHashAlgo alg, Error **errp); + +/** + * qcrypto_hash_free: + * @hash: hash object to free + * + * Frees a hashing context for the chosen algorithm. + */ +void qcrypto_hash_free(QCryptoHash *hash); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHash, qcrypto_hash_free) + /** * qcrypto_hash_digest: * @alg: the hash algorithm @@ -137,7 +288,7 @@ int qcrypto_hash_digestv(QCryptoHashAlgorithm alg, * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_digest(QCryptoHashAlgorithm alg, +int qcrypto_hash_digest(QCryptoHashAlgo alg, const char *buf, size_t len, char **digest, @@ -160,7 +311,7 @@ int qcrypto_hash_digest(QCryptoHashAlgorithm alg, * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_base64v(QCryptoHashAlgorithm alg, +int qcrypto_hash_base64v(QCryptoHashAlgo alg, const struct iovec *iov, size_t niov, char **base64, @@ -183,7 +334,7 @@ int qcrypto_hash_base64v(QCryptoHashAlgorithm alg, * * Returns: 0 on success, -1 on error */ -int qcrypto_hash_base64(QCryptoHashAlgorithm alg, +int qcrypto_hash_base64(QCryptoHashAlgo alg, const char *buf, size_t len, char **base64, diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h index ad4d7784161..da8a1e3ceb9 100644 --- a/include/crypto/hmac.h +++ b/include/crypto/hmac.h @@ -16,7 +16,7 @@ typedef struct QCryptoHmac QCryptoHmac; struct QCryptoHmac { - QCryptoHashAlgorithm alg; + QCryptoHashAlgo alg; void *opaque; void *driver; }; @@ -31,7 +31,7 @@ struct QCryptoHmac { * Returns: * true if the algorithm is supported, false otherwise */ -bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg); +bool qcrypto_hmac_supports(QCryptoHashAlgo alg); /** * qcrypto_hmac_new: @@ -52,7 +52,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgorithm alg); * Returns: * a new hmac object, or NULL on error */ -QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgorithm alg, +QCryptoHmac *qcrypto_hmac_new(QCryptoHashAlgo alg, const uint8_t *key, size_t nkey, Error **errp); @@ -77,11 +77,18 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHmac, qcrypto_hmac_free) * @errp: pointer to a NULL-initialized error object * * Computes the hmac across all the memory regions - * present in @iov. The @result pointer will be - * filled with raw bytes representing the computed - * hmac, which will have length @resultlen. The - * memory pointer in @result must be released - * with a call to g_free() when no longer required. + * present in @iov. + * + * If @result_len is set to a non-zero value by the caller, then + * @result must hold a pointer that is @result_len in size, and + * @result_len match the size of the hash output. The digest will + * be written into @result. + * + * If @result_len is set to zero, then this function will allocate + * a buffer to hold the hash output digest, storing a pointer to + * the buffer in @result, and setting @result_len to its size. + * The memory referenced in @result must be released with a call + * to g_free() when no longer required by the caller. * * Returns: * 0 on success, -1 on error @@ -103,11 +110,18 @@ int qcrypto_hmac_bytesv(QCryptoHmac *hmac, * @errp: pointer to a NULL-initialized error object * * Computes the hmac across all the memory region - * @buf of length @len. The @result pointer will be - * filled with raw bytes representing the computed - * hmac, which will have length @resultlen. The - * memory pointer in @result must be released - * with a call to g_free() when no longer required. + * @buf of length @len. + * + * If @result_len is set to a non-zero value by the caller, then + * @result must hold a pointer that is @result_len in size, and + * @result_len match the size of the hash output. The digest will + * be written into @result. + * + * If @result_len is set to zero, then this function will allocate + * a buffer to hold the hash output digest, storing a pointer to + * the buffer in @result, and setting @result_len to its size. + * The memory referenced in @result must be released with a call + * to g_free() when no longer required by the caller. * * Returns: * 0 on success, -1 on error diff --git a/include/crypto/ivgen.h b/include/crypto/ivgen.h index a09d5732daf..bfa5d28103f 100644 --- a/include/crypto/ivgen.h +++ b/include/crypto/ivgen.h @@ -44,22 +44,22 @@ * * g_assert((ndata % 512) == 0); * - * QCryptoIVGen *ivgen = qcrypto_ivgen_new(QCRYPTO_IVGEN_ALG_ESSIV, - * QCRYPTO_CIPHER_ALG_AES_128, - * QCRYPTO_HASH_ALG_SHA256, + * QCryptoIVGen *ivgen = qcrypto_ivgen_new(QCRYPTO_IV_GEN_ALGO_ESSIV, + * QCRYPTO_CIPHER_ALGO_AES_128, + * QCRYPTO_HASH_ALGO_SHA256, * key, nkey, errp); * if (!ivgen) { * return -1; * } * - * QCryptoCipher *cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128, + * QCryptoCipher *cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128, * QCRYPTO_CIPHER_MODE_CBC, * key, nkey, errp); * if (!cipher) { * goto error; * } * - * niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128, + * niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALGO_AES_128, * QCRYPTO_CIPHER_MODE_CBC); * iv = g_new0(uint8_t, niv); * @@ -97,7 +97,7 @@ typedef struct QCryptoIVGen QCryptoIVGen; -/* See also QCryptoIVGenAlgorithm enum in qapi/crypto.json */ +/* See also QCryptoIVGenAlgo enum in qapi/crypto.json */ /** @@ -113,19 +113,19 @@ typedef struct QCryptoIVGen QCryptoIVGen; * are required or not depends on the choice of @alg * requested. * - * - QCRYPTO_IVGEN_ALG_PLAIN + * - QCRYPTO_IV_GEN_ALGO_PLAIN * * The IVs are generated by the 32-bit truncated sector * number. This should never be used for block devices * that are larger than 2^32 sectors in size. * All the other parameters are unused. * - * - QCRYPTO_IVGEN_ALG_PLAIN64 + * - QCRYPTO_IV_GEN_ALGO_PLAIN64 * * The IVs are generated by the 64-bit sector number. * All the other parameters are unused. * - * - QCRYPTO_IVGEN_ALG_ESSIV: + * - QCRYPTO_IV_GEN_ALGO_ESSIV: * * The IVs are generated by encrypting the 64-bit sector * number with a hash of an encryption key. The @cipheralg, @@ -133,9 +133,9 @@ typedef struct QCryptoIVGen QCryptoIVGen; * * Returns: a new IV generator, or NULL on error */ -QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgorithm alg, - QCryptoCipherAlgorithm cipheralg, - QCryptoHashAlgorithm hash, +QCryptoIVGen *qcrypto_ivgen_new(QCryptoIVGenAlgo alg, + QCryptoCipherAlgo cipheralg, + QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, Error **errp); @@ -167,7 +167,7 @@ int qcrypto_ivgen_calculate(QCryptoIVGen *ivgen, * * Returns: the IV generator algorithm */ -QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen); +QCryptoIVGenAlgo qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen); /** @@ -179,7 +179,7 @@ QCryptoIVGenAlgorithm qcrypto_ivgen_get_algorithm(QCryptoIVGen *ivgen); * * Returns: the cipher algorithm */ -QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen); +QCryptoCipherAlgo qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen); /** @@ -191,7 +191,7 @@ QCryptoCipherAlgorithm qcrypto_ivgen_get_cipher(QCryptoIVGen *ivgen); * * Returns: the hash algorithm */ -QCryptoHashAlgorithm qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen); +QCryptoHashAlgo qcrypto_ivgen_get_hash(QCryptoIVGen *ivgen); /** diff --git a/include/crypto/pbkdf.h b/include/crypto/pbkdf.h index 2c31a44a276..cf59fce610e 100644 --- a/include/crypto/pbkdf.h +++ b/include/crypto/pbkdf.h @@ -38,7 +38,7 @@ * .... * * char *password = "a-typical-awful-user-password"; - * size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALG_AES_128); + * size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALGO_AES_128); * uint8_t *salt = g_new0(uint8_t, nkey); * uint8_t *key = g_new0(uint8_t, nkey); * int iterations; @@ -50,7 +50,7 @@ * return -1; * } * - * iterations = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALG_SHA256, + * iterations = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALGO_SHA256, * (const uint8_t *)password, * strlen(password), * salt, nkey, errp); @@ -60,7 +60,7 @@ * return -1; * } * - * if (qcrypto_pbkdf2(QCRYPTO_HASH_ALG_SHA256, + * if (qcrypto_pbkdf2(QCRYPTO_HASH_ALGO_SHA256, * (const uint8_t *)password, strlen(password), * salt, nkey, iterations, key, nkey, errp) < 0) { * g_free(key); @@ -70,7 +70,7 @@ * * g_free(salt); * - * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128, + * cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALGO_AES_128, * QCRYPTO_CIPHER_MODE_ECB, * key, nkey, errp); * g_free(key); @@ -92,7 +92,7 @@ * * Returns true if supported, false otherwise */ -bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash); +bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash); /** @@ -119,7 +119,7 @@ bool qcrypto_pbkdf2_supports(QCryptoHashAlgorithm hash); * * Returns: 0 on success, -1 on error */ -int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, +int qcrypto_pbkdf2(QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, const uint8_t *salt, size_t nsalt, uint64_t iterations, @@ -147,7 +147,7 @@ int qcrypto_pbkdf2(QCryptoHashAlgorithm hash, * * Returns: number of iterations in 1 second, -1 on error */ -uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgorithm hash, +uint64_t qcrypto_pbkdf2_count_iters(QCryptoHashAlgo hash, const uint8_t *key, size_t nkey, const uint8_t *salt, size_t nsalt, size_t nout, diff --git a/include/crypto/tlssession.h b/include/crypto/tlssession.h index f694a5c3c55..2f62ce2d67a 100644 --- a/include/crypto/tlssession.h +++ b/include/crypto/tlssession.h @@ -75,12 +75,14 @@ * GINT_TO_POINTER(fd)); * * while (1) { - * if (qcrypto_tls_session_handshake(sess, errp) < 0) { + * int ret = qcrypto_tls_session_handshake(sess, errp); + * + * if (ret < 0) { * qcrypto_tls_session_free(sess); * return -1; * } * - * switch(qcrypto_tls_session_get_handshake_status(sess)) { + * switch(ret) { * case QCRYPTO_TLS_HANDSHAKE_COMPLETE: * if (qcrypto_tls_session_check_credentials(sess, errp) < )) { * qcrypto_tls_session_free(sess); @@ -163,6 +165,20 @@ void qcrypto_tls_session_free(QCryptoTLSSession *sess); G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free) +/** + * qcrypto_tls_session_require_thread_safety: + * @sess: the TLS session object + * + * Mark that this TLS session will require thread safety + * for concurrent I/O in both directions. This must be + * called before the handshake is performed. + * + * This will activate a workaround for GNUTLS thread + * safety issues, where appropriate for the negotiated + * TLS session parameters. + */ +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess); + /** * qcrypto_tls_session_check_credentials: * @sess: the TLS session object @@ -170,7 +186,7 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free) * * Validate the peer's credentials after a successful * TLS handshake. It is an error to call this before - * qcrypto_tls_session_get_handshake_status() returns + * qcrypto_tls_session_handshake() returns * QCRYPTO_TLS_HANDSHAKE_COMPLETE * * Returns 0 if the credentials validated, -1 on error @@ -226,7 +242,7 @@ void qcrypto_tls_session_set_callbacks(QCryptoTLSSession *sess, * registered with qcrypto_tls_session_set_callbacks() * * It is an error to call this before - * qcrypto_tls_session_get_handshake_status() returns + * qcrypto_tls_session_handshake() returns * QCRYPTO_TLS_HANDSHAKE_COMPLETE * * Returns: the number of bytes sent, @@ -256,7 +272,7 @@ ssize_t qcrypto_tls_session_write(QCryptoTLSSession *sess, * opposed to an error. * * It is an error to call this before - * qcrypto_tls_session_get_handshake_status() returns + * qcrypto_tls_session_handshake() returns * QCRYPTO_TLS_HANDSHAKE_COMPLETE * * Returns: the number of bytes received, @@ -289,8 +305,7 @@ size_t qcrypto_tls_session_check_pending(QCryptoTLSSession *sess); * the underlying data channel is non-blocking, then * this method may return control before the handshake * is complete. On non-blocking channels the - * qcrypto_tls_session_get_handshake_status() method - * should be used to determine whether the handshake + * return value determines whether the handshake * has completed, or is waiting to send or receive * data. In the latter cases, the caller should setup * an event loop watch and call this method again @@ -306,22 +321,27 @@ typedef enum { QCRYPTO_TLS_HANDSHAKE_RECVING, } QCryptoTLSSessionHandshakeStatus; +typedef enum { + QCRYPTO_TLS_BYE_COMPLETE, + QCRYPTO_TLS_BYE_SENDING, + QCRYPTO_TLS_BYE_RECVING, +} QCryptoTLSSessionByeStatus; + /** - * qcrypto_tls_session_get_handshake_status: - * @sess: the TLS session object - * - * Check the status of the TLS handshake. This - * is used with non-blocking data channels to - * determine whether the handshake is waiting - * to send or receive further data to/from the - * remote peer. + * qcrypto_tls_session_bye: + * @session: the TLS session object + * @errp: pointer to a NULL-initialized error object * - * Once this returns QCRYPTO_TLS_HANDSHAKE_COMPLETE - * it is permitted to send/receive payload data on - * the channel + * Start, or continue, a TLS termination sequence. If the underlying + * data channel is non-blocking, then this method may return control + * before the termination is complete. The return value will indicate + * whether the termination has completed, or is waiting to send or + * receive data. In the latter cases, the caller should setup an event + * loop watch and call this method again once the underlying data + * channel is ready to read or write again. */ -QCryptoTLSSessionHandshakeStatus -qcrypto_tls_session_get_handshake_status(QCryptoTLSSession *sess); +int +qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp); /** * qcrypto_tls_session_get_key_size: diff --git a/include/crypto/x509-utils.h b/include/crypto/x509-utils.h new file mode 100644 index 00000000000..1e99661a711 --- /dev/null +++ b/include/crypto/x509-utils.h @@ -0,0 +1,22 @@ +/* + * X.509 certificate related helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef QCRYPTO_X509_UTILS_H +#define QCRYPTO_X509_UTILS_H + +#include "crypto/hash.h" + +int qcrypto_get_x509_cert_fingerprint(uint8_t *cert, size_t size, + QCryptoHashAlgo hash, + uint8_t *result, + size_t *resultlen, + Error **errp); + +#endif diff --git a/include/disas/capstone.h b/include/disas/capstone.h index a11985151d3..c43033f7f60 100644 --- a/include/disas/capstone.h +++ b/include/disas/capstone.h @@ -4,6 +4,7 @@ #ifdef CONFIG_CAPSTONE #define CAPSTONE_AARCH64_COMPAT_HEADER +#define CAPSTONE_SYSTEMZ_COMPAT_HEADER #include #else diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index a1d26ce903c..3b50ecfb540 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -232,10 +232,6 @@ enum bfd_architecture #define bfd_mach_avrxmega5 105 #define bfd_mach_avrxmega6 106 #define bfd_mach_avrxmega7 107 - bfd_arch_cris, /* Axis CRIS */ -#define bfd_mach_cris_v0_v10 255 -#define bfd_mach_cris_v32 32 -#define bfd_mach_cris_v10_v32 1032 bfd_arch_microblaze, /* Xilinx MicroBlaze. */ bfd_arch_moxie, /* The Moxie core. */ bfd_arch_ia64, /* HP/Intel ia64 */ @@ -448,8 +444,6 @@ int print_insn_w65 (bfd_vma, disassemble_info*); int print_insn_d10v (bfd_vma, disassemble_info*); int print_insn_v850 (bfd_vma, disassemble_info*); int print_insn_tic30 (bfd_vma, disassemble_info*); -int print_insn_crisv32 (bfd_vma, disassemble_info*); -int print_insn_crisv10 (bfd_vma, disassemble_info*); int print_insn_microblaze (bfd_vma, disassemble_info*); int print_insn_ia64 (bfd_vma, disassemble_info*); int print_insn_xtensa (bfd_vma, disassemble_info*); diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h deleted file mode 100644 index 02dc4e518f0..00000000000 --- a/include/exec/confidential-guest-support.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * QEMU Confidential Guest support - * This interface describes the common pieces between various - * schemes for protecting guest memory or other state against a - * compromised hypervisor. This includes memory encryption (AMD's - * SEV and Intel's MKTME) or special protection modes (PEF on POWER, - * or PV on s390x). - * - * Copyright Red Hat. - * - * Authors: - * David Gibson - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * later. See the COPYING file in the top-level directory. - * - */ -#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H -#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H - -#ifndef CONFIG_USER_ONLY - -#include "qom/object.h" - -#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support" -OBJECT_DECLARE_TYPE(ConfidentialGuestSupport, - ConfidentialGuestSupportClass, - CONFIDENTIAL_GUEST_SUPPORT) - - -struct ConfidentialGuestSupport { - Object parent; - - /* - * True if the machine should use guest_memfd for RAM. - */ - bool require_guest_memfd; - - /* - * ready: flag set by CGS initialization code once it's ready to - * start executing instructions in a potentially-secure - * guest - * - * The definition here is a bit fuzzy, because this is essentially - * part of a self-sanity-check, rather than a strict mechanism. - * - * It's not feasible to have a single point in the common machine - * init path to configure confidential guest support, because - * different mechanisms have different interdependencies requiring - * initialization in different places, often in arch or machine - * type specific code. It's also usually not possible to check - * for invalid configurations until that initialization code. - * That means it would be very easy to have a bug allowing CGS - * init to be bypassed entirely in certain configurations. - * - * Silently ignoring a requested security feature would be bad, so - * to avoid that we check late in init that this 'ready' flag is - * set if CGS was requested. If the CGS init hasn't happened, and - * so 'ready' is not set, we'll abort. - */ - bool ready; -}; - -typedef struct ConfidentialGuestSupportClass { - ObjectClass parent; - - int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); - int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp); -} ConfidentialGuestSupportClass; - -static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs, - Error **errp) -{ - ConfidentialGuestSupportClass *klass; - - klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); - if (klass->kvm_init) { - return klass->kvm_init(cgs, errp); - } - - return 0; -} - -static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs, - Error **errp) -{ - ConfidentialGuestSupportClass *klass; - - klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); - if (klass->kvm_reset) { - return klass->kvm_reset(cgs, errp); - } - - return 0; -} - -#endif /* !CONFIG_USER_ONLY */ - -#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */ diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h deleted file mode 100644 index 6f09b86e7f3..00000000000 --- a/include/exec/cpu-all.h +++ /dev/null @@ -1,375 +0,0 @@ -/* - * defines common to all virtual CPUs - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CPU_ALL_H -#define CPU_ALL_H - -#include "exec/page-protection.h" -#include "exec/cpu-common.h" -#include "exec/memory.h" -#include "exec/tswap.h" -#include "hw/core/cpu.h" - -/* some important defines: - * - * HOST_BIG_ENDIAN : whether the host cpu is big endian and - * otherwise little endian. - * - * TARGET_BIG_ENDIAN : same for the target cpu - */ - -#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN -#define BSWAP_NEEDED -#endif - -/* Target-endianness CPU memory access functions. These fit into the - * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. - */ -#if TARGET_BIG_ENDIAN -#define lduw_p(p) lduw_be_p(p) -#define ldsw_p(p) ldsw_be_p(p) -#define ldl_p(p) ldl_be_p(p) -#define ldq_p(p) ldq_be_p(p) -#define stw_p(p, v) stw_be_p(p, v) -#define stl_p(p, v) stl_be_p(p, v) -#define stq_p(p, v) stq_be_p(p, v) -#define ldn_p(p, sz) ldn_be_p(p, sz) -#define stn_p(p, sz, v) stn_be_p(p, sz, v) -#else -#define lduw_p(p) lduw_le_p(p) -#define ldsw_p(p) ldsw_le_p(p) -#define ldl_p(p) ldl_le_p(p) -#define ldq_p(p) ldq_le_p(p) -#define stw_p(p, v) stw_le_p(p, v) -#define stl_p(p, v) stl_le_p(p, v) -#define stq_p(p, v) stq_le_p(p, v) -#define ldn_p(p, sz) ldn_le_p(p, sz) -#define stn_p(p, sz, v) stn_le_p(p, sz, v) -#endif - -/* MMU memory access macros */ - -#if defined(CONFIG_USER_ONLY) -#include "user/abitypes.h" - -/* - * If non-zero, the guest virtual address space is a contiguous subset - * of the host virtual address space, i.e. '-R reserved_va' is in effect - * either from the command-line or by default. The value is the last - * byte of the guest address space e.g. UINT32_MAX. - * - * If zero, the host and guest virtual address spaces are intermingled. - */ -extern unsigned long reserved_va; - -/* - * Limit the guest addresses as best we can. - * - * When not using -R reserved_va, we cannot really limit the guest - * to less address space than the host. For 32-bit guests, this - * acts as a sanity check that we're not giving the guest an address - * that it cannot even represent. For 64-bit guests... the address - * might not be what the real kernel would give, but it is at least - * representable in the guest. - * - * TODO: Improve address allocation to avoid this problem, and to - * avoid setting bits at the top of guest addresses that might need - * to be used for tags. - */ -#define GUEST_ADDR_MAX_ \ - ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ - UINT32_MAX : ~0ul) -#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) - -#else - -#include "exec/hwaddr.h" - -#define SUFFIX -#define ARG1 as -#define ARG1_DECL AddressSpace *as -#define TARGET_ENDIANNESS -#include "exec/memory_ldst.h.inc" - -#define SUFFIX _cached_slow -#define ARG1 cache -#define ARG1_DECL MemoryRegionCache *cache -#define TARGET_ENDIANNESS -#include "exec/memory_ldst.h.inc" - -static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) -{ - address_space_stl_notdirty(as, addr, val, - MEMTXATTRS_UNSPECIFIED, NULL); -} - -#define SUFFIX -#define ARG1 as -#define ARG1_DECL AddressSpace *as -#define TARGET_ENDIANNESS -#include "exec/memory_ldst_phys.h.inc" - -/* Inline fast path for direct RAM access. */ -#define ENDIANNESS -#include "exec/memory_ldst_cached.h.inc" - -#define SUFFIX _cached -#define ARG1 cache -#define ARG1_DECL MemoryRegionCache *cache -#define TARGET_ENDIANNESS -#include "exec/memory_ldst_phys.h.inc" -#endif - -/* page related stuff */ - -#ifdef TARGET_PAGE_BITS_VARY -# include "exec/page-vary.h" -extern const TargetPageBits target_page; -# ifdef CONFIG_DEBUG_TCG -# define TARGET_PAGE_BITS ({ assert(target_page.decided); \ - target_page.bits; }) -# define TARGET_PAGE_MASK ({ assert(target_page.decided); \ - (target_long)target_page.mask; }) -# else -# define TARGET_PAGE_BITS target_page.bits -# define TARGET_PAGE_MASK ((target_long)target_page.mask) -# endif -# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) -#else -# define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS -# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) -# define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) -#endif - -#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) - -#if defined(CONFIG_USER_ONLY) -void page_dump(FILE *f); - -typedef int (*walk_memory_regions_fn)(void *, target_ulong, - target_ulong, unsigned long); -int walk_memory_regions(void *, walk_memory_regions_fn); - -int page_get_flags(target_ulong address); -void page_set_flags(target_ulong start, target_ulong last, int flags); -void page_reset_target_data(target_ulong start, target_ulong last); - -/** - * page_check_range - * @start: first byte of range - * @len: length of range - * @flags: flags required for each page - * - * Return true if every page in [@start, @start+@len) has @flags set. - * Return false if any page is unmapped. Thus testing flags == 0 is - * equivalent to testing for flags == PAGE_VALID. - */ -bool page_check_range(target_ulong start, target_ulong last, int flags); - -/** - * page_check_range_empty: - * @start: first byte of range - * @last: last byte of range - * Context: holding mmap lock - * - * Return true if the entire range [@start, @last] is unmapped. - * The memory lock must be held so that the caller will can ensure - * the result stays true until a new mapping can be installed. - */ -bool page_check_range_empty(target_ulong start, target_ulong last); - -/** - * page_find_range_empty - * @min: first byte of search range - * @max: last byte of search range - * @len: size of the hole required - * @align: alignment of the hole required (power of 2) - * - * If there is a range [x, x+@len) within [@min, @max] such that - * x % @align == 0, then return x. Otherwise return -1. - * The memory lock must be held, as the caller will want to ensure - * the returned range stays empty until a new mapping can be installed. - */ -target_ulong page_find_range_empty(target_ulong min, target_ulong max, - target_ulong len, target_ulong align); - -/** - * page_get_target_data(address) - * @address: guest virtual address - * - * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate - * with the guest page at @address, allocating it if necessary. The - * caller should already have verified that the address is valid. - * - * The memory will be freed when the guest page is deallocated, - * e.g. with the munmap system call. - */ -void *page_get_target_data(target_ulong address) - __attribute__((returns_nonnull)); -#endif - -CPUArchState *cpu_copy(CPUArchState *env); - -/* Flags for use in ENV->INTERRUPT_PENDING. - - The numbers assigned here are non-sequential in order to preserve - binary compatibility with the vmstate dump. Bit 0 (0x0001) was - previously used for CPU_INTERRUPT_EXIT, and is cleared when loading - the vmstate dump. */ - -/* External hardware interrupt pending. This is typically used for - interrupts from devices. */ -#define CPU_INTERRUPT_HARD 0x0002 - -/* Exit the current TB. This is typically used when some system-level device - makes some change to the memory mapping. E.g. the a20 line change. */ -#define CPU_INTERRUPT_EXITTB 0x0004 - -/* Halt the CPU. */ -#define CPU_INTERRUPT_HALT 0x0020 - -/* Debug event pending. */ -#define CPU_INTERRUPT_DEBUG 0x0080 - -/* Reset signal. */ -#define CPU_INTERRUPT_RESET 0x0400 - -/* Several target-specific external hardware interrupts. Each target/cpu.h - should define proper names based on these defines. */ -#define CPU_INTERRUPT_TGT_EXT_0 0x0008 -#define CPU_INTERRUPT_TGT_EXT_1 0x0010 -#define CPU_INTERRUPT_TGT_EXT_2 0x0040 -#define CPU_INTERRUPT_TGT_EXT_3 0x0200 -#define CPU_INTERRUPT_TGT_EXT_4 0x1000 - -/* Several target-specific internal interrupts. These differ from the - preceding target-specific interrupts in that they are intended to - originate from within the cpu itself, typically in response to some - instruction being executed. These, therefore, are not masked while - single-stepping within the debugger. */ -#define CPU_INTERRUPT_TGT_INT_0 0x0100 -#define CPU_INTERRUPT_TGT_INT_1 0x0800 -#define CPU_INTERRUPT_TGT_INT_2 0x2000 - -/* First unused bit: 0x4000. */ - -/* The set of all bits that should be masked when single-stepping. */ -#define CPU_INTERRUPT_SSTEP_MASK \ - (CPU_INTERRUPT_HARD \ - | CPU_INTERRUPT_TGT_EXT_0 \ - | CPU_INTERRUPT_TGT_EXT_1 \ - | CPU_INTERRUPT_TGT_EXT_2 \ - | CPU_INTERRUPT_TGT_EXT_3 \ - | CPU_INTERRUPT_TGT_EXT_4) - -#ifdef CONFIG_USER_ONLY - -/* - * Allow some level of source compatibility with softmmu. We do not - * support any of the more exotic features, so only invalid pages may - * be signaled by probe_access_flags(). - */ -#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) -#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2)) -#define TLB_WATCHPOINT 0 - -static inline int cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return MMU_USER_IDX; -} -#else - -/* - * Flags stored in the low bits of the TLB virtual address. - * These are defined so that fast path ram access is all zeros. - * The flags all must be between TARGET_PAGE_BITS and - * maximum address alignment bit. - * - * Use TARGET_PAGE_BITS_MIN so that these bits are constant - * when TARGET_PAGE_BITS_VARY is in effect. - * - * The count, if not the placement of these bits is known - * to tcg/tcg-op-ldst.c, check_max_alignment(). - */ -/* Zero if TLB entry is valid. */ -#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) -/* Set if TLB entry references a clean RAM page. The iotlb entry will - contain the page physical address. */ -#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) -/* Set if TLB entry is an IO callback. */ -#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) -/* Set if TLB entry writes ignored. */ -#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4)) -/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ -#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5)) - -/* - * Use this mask to check interception with an alignment mask - * in a TCG backend. - */ -#define TLB_FLAGS_MASK \ - (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ - | TLB_FORCE_SLOW | TLB_DISCARD_WRITE) - -/* - * Flags stored in CPUTLBEntryFull.slow_flags[x]. - * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. - */ -/* Set if TLB entry requires byte swap. */ -#define TLB_BSWAP (1 << 0) -/* Set if TLB entry contains a watchpoint. */ -#define TLB_WATCHPOINT (1 << 1) -/* Set if TLB entry requires aligned accesses. */ -#define TLB_CHECK_ALIGNED (1 << 2) - -#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED) - -/* The two sets of flags must not overlap. */ -QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); - -/** - * tlb_hit_page: return true if page aligned @addr is a hit against the - * TLB entry @tlb_addr - * - * @addr: virtual address to test (must be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) -{ - return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); -} - -/** - * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr - * - * @addr: virtual address to test (need not be page aligned) - * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) - */ -static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) -{ - return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); -} - -#endif /* !CONFIG_USER_ONLY */ - -/* Validate correct placement of CPUArchState. */ -#include "cpu.h" -QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); -QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); - -#endif /* CPU_ALL_H */ diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 2e1b499cb71..dd8fe6b8351 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -9,9 +9,7 @@ #define CPU_COMMON_H #include "exec/vaddr.h" -#ifndef CONFIG_USER_ONLY #include "exec/hwaddr.h" -#endif #include "hw/core/cpu.h" #include "tcg/debug-assert.h" #include "exec/page-protection.h" @@ -40,20 +38,12 @@ int cpu_get_free_index(void); void tcg_iommu_init_notifier_list(CPUState *cpu); void tcg_iommu_free_notifier_list(CPUState *cpu); -#if !defined(CONFIG_USER_ONLY) - enum device_endian { DEVICE_NATIVE_ENDIAN, DEVICE_BIG_ENDIAN, DEVICE_LITTLE_ENDIAN, }; -#if HOST_BIG_ENDIAN -#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN -#else -#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN -#endif - /* address in the RAM (different from a physical address) */ #if defined(CONFIG_XEN_BACKEND) typedef uint64_t ram_addr_t; @@ -67,7 +57,7 @@ typedef uintptr_t ram_addr_t; /* memory API */ -void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); +void qemu_ram_remap(ram_addr_t addr); /* This should not be used by devices. */ ram_addr_t qemu_ram_addr_from_host(void *ptr); ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); @@ -95,6 +85,7 @@ void qemu_ram_unset_idstr(RAMBlock *block); const char *qemu_ram_get_idstr(RAMBlock *rb); void *qemu_ram_get_host_addr(RAMBlock *rb); ram_addr_t qemu_ram_get_offset(RAMBlock *rb); +ram_addr_t qemu_ram_get_fd_offset(RAMBlock *rb); ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); ram_addr_t qemu_ram_get_max_length(RAMBlock *rb); bool qemu_ram_is_shared(RAMBlock *rb); @@ -159,6 +150,7 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len, bool is_write, hwaddr access_len); bool cpu_physical_memory_is_io(hwaddr phys_addr); +bool cpu_physical_memory_is_ram(hwaddr phys_addr); /* Coalesced MMIO regions are areas where write operations can be reordered. * This usually implies that write operations are side-effect free. This allows @@ -176,8 +168,6 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, size_t length); -#endif - /* Returns: 0 on success, -1 on error */ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, void *ptr, size_t len, bool is_write); @@ -186,12 +176,7 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, void list_cpus(void); #ifdef CONFIG_TCG - -bool tcg_cflags_has(CPUState *cpu, uint32_t flags); -void tcg_cflags_set(CPUState *cpu, uint32_t flags); - -/* current cflags for hashing/comparison */ -uint32_t curr_cflags(CPUState *cpu); +#include "qemu/atomic.h" /** * cpu_unwind_state_data: @@ -199,7 +184,7 @@ uint32_t curr_cflags(CPUState *cpu); * @host_pc: the host pc within the translation * @data: output data * - * Attempt to load the the unwind state for a host pc occurring in + * Attempt to load the unwind state for a host pc occurring in * translated code. If @host_pc is not in translated code, the * function returns false; otherwise @data is loaded. * This is the same unwind info as given to restore_state_to_opc. @@ -218,6 +203,23 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); */ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); +/** + * cpu_loop_exit_requested: + * @cpu: The CPU state to be tested + * + * Indicate if somebody asked for a return of the CPU to the main loop + * (e.g., via cpu_exit() or cpu_interrupt()). + * + * This is helpful for architectures that support interruptible + * instructions. After writing back all state to registers/memory, this + * call can be used to check if it makes sense to return to the main loop + * or to continue executing the interruptible instruction. + */ +static inline bool cpu_loop_exit_requested(CPUState *cpu) +{ + return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0; +} + G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); #endif /* CONFIG_TCG */ @@ -239,34 +241,25 @@ static inline ArchCPU *env_archcpu(CPUArchState *env) } /** - * env_cpu(env) + * env_cpu_const(env) * @env: The architecture environment * * Return the CPUState associated with the environment. */ -static inline CPUState *env_cpu(CPUArchState *env) +static inline const CPUState *env_cpu_const(const CPUArchState *env) { return (void *)env - sizeof(CPUState); } -#ifndef CONFIG_USER_ONLY /** - * cpu_mmu_index: - * @env: The cpu environment - * @ifetch: True for code access, false for data access. - * - * Return the core mmu index for the current translation regime. - * This function is used by generic TCG code paths. + * env_cpu(env) + * @env: The architecture environment * - * The user-only version of this function is inline in cpu-all.h, - * where it always returns MMU_USER_IDX. + * Return the CPUState associated with the environment. */ -static inline int cpu_mmu_index(CPUState *cs, bool ifetch) +static inline CPUState *env_cpu(CPUArchState *env) { - int ret = cs->cc->mmu_index(cs, ifetch); - tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES); - return ret; + return (CPUState *)env_cpu_const(env); } -#endif /* !CONFIG_USER_ONLY */ #endif /* CPU_COMMON_H */ diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 0dbef3010cb..e01acb7c906 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -23,14 +23,6 @@ #error cpu.h included from common code #endif -#include "qemu/host-utils.h" -#include "qemu/thread.h" -#ifndef CONFIG_USER_ONLY -#include "exec/hwaddr.h" -#endif -#include "exec/memattrs.h" -#include "hw/core/cpu.h" - #include "cpu-param.h" #ifndef TARGET_LONG_BITS @@ -42,42 +34,10 @@ #ifndef TARGET_VIRT_ADDR_SPACE_BITS # error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h #endif -#ifndef TARGET_PAGE_BITS -# ifdef TARGET_PAGE_BITS_VARY -# ifndef TARGET_PAGE_BITS_MIN -# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h -# endif -# else -# error TARGET_PAGE_BITS must be defined in cpu-param.h -# endif +#if !defined(TARGET_PAGE_BITS) && !defined(TARGET_PAGE_BITS_VARY) +# error TARGET_PAGE_BITS must be defined in cpu-param.h #endif #include "exec/target_long.h" -#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG) -#define CPU_TLB_DYN_MIN_BITS 6 -#define CPU_TLB_DYN_DEFAULT_BITS 8 - -# if HOST_LONG_BITS == 32 -/* Make sure we do not require a double-word shift for the TLB load */ -# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) -# else /* HOST_LONG_BITS == 64 */ -/* - * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == - * 2**34 == 16G of address space. This is roughly what one would expect a - * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel - * Skylake's Level-2 STLB has 16 1G entries. - * Also, make sure we do not size the TLB past the guest's address space. - */ -# ifdef TARGET_PAGE_BITS_VARY -# define CPU_TLB_DYN_MAX_BITS \ - MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) -# else -# define CPU_TLB_DYN_MAX_BITS \ - MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) -# endif -# endif - -#endif /* CONFIG_SOFTMMU && CONFIG_TCG */ - #endif diff --git a/include/exec/cpu-interrupt.h b/include/exec/cpu-interrupt.h new file mode 100644 index 00000000000..40715193ca3 --- /dev/null +++ b/include/exec/cpu-interrupt.h @@ -0,0 +1,70 @@ +/* + * Flags for use with cpu_interrupt() + * + * Copyright (c) 2003 Fabrice Bellard + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef CPU_INTERRUPT_H +#define CPU_INTERRUPT_H + +/* + * The numbers assigned here are non-sequential in order to preserve binary + * compatibility with the vmstate dump. Bit 0 (0x0001) was previously used + * for CPU_INTERRUPT_EXIT, and is cleared when loading the vmstate dump. + */ + +/* + * External hardware interrupt pending. + * This is typically used for interrupts from devices. + */ +#define CPU_INTERRUPT_HARD 0x0002 + +/* + * Exit the current TB. This is typically used when some system-level device + * makes some change to the memory mapping. E.g. the a20 line change. + */ +#define CPU_INTERRUPT_EXITTB 0x0004 + +/* Halt the CPU. */ +#define CPU_INTERRUPT_HALT 0x0020 + +/* Debug event pending. */ +#define CPU_INTERRUPT_DEBUG 0x0080 + +/* Reset signal. */ +#define CPU_INTERRUPT_RESET 0x0400 + +/* + * Several target-specific external hardware interrupts. Each target/cpu.h + * should define proper names based on these defines. + */ +#define CPU_INTERRUPT_TGT_EXT_0 0x0008 +#define CPU_INTERRUPT_TGT_EXT_1 0x0010 +#define CPU_INTERRUPT_TGT_EXT_2 0x0040 +#define CPU_INTERRUPT_TGT_EXT_3 0x0200 +#define CPU_INTERRUPT_TGT_EXT_4 0x1000 + +/* + * Several target-specific internal interrupts. These differ from the + * preceding target-specific interrupts in that they are intended to + * originate from within the cpu itself, typically in response to some + * instruction being executed. These, therefore, are not masked while + * single-stepping within the debugger. + */ +#define CPU_INTERRUPT_TGT_INT_0 0x0100 +#define CPU_INTERRUPT_TGT_INT_1 0x0800 +#define CPU_INTERRUPT_TGT_INT_2 0x2000 + +/* First unused bit: 0x4000. */ + +/* The set of all bits that should be masked when single-stepping. */ +#define CPU_INTERRUPT_SSTEP_MASK \ + (CPU_INTERRUPT_HARD \ + | CPU_INTERRUPT_TGT_EXT_0 \ + | CPU_INTERRUPT_TGT_EXT_1 \ + | CPU_INTERRUPT_TGT_EXT_2 \ + | CPU_INTERRUPT_TGT_EXT_3 \ + | CPU_INTERRUPT_TGT_EXT_4) + +#endif /* CPU_INTERRUPT_H */ diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h deleted file mode 100644 index dac12bd8eb3..00000000000 --- a/include/exec/cpu_ldst.h +++ /dev/null @@ -1,416 +0,0 @@ -/* - * Software MMU support (per-target) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - * - */ - -/* - * Generate inline load/store functions for all MMU modes (typically - * at least _user and _kernel) as well as _data versions, for all data - * sizes. - * - * Used by target op helpers. - * - * The syntax for the accessors is: - * - * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) - * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) - * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) - * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) - * - * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) - * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) - * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) - * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) - * - * sign is: - * (empty): for 32 and 64 bit sizes - * u : unsigned - * s : signed - * - * size is: - * b: 8 bits - * w: 16 bits - * l: 32 bits - * q: 64 bits - * - * end is: - * (empty): for target native endian, or for 8 bit access - * _be: for forced big endian - * _le: for forced little endian - * - * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". - * The "mmuidx" suffix carries an extra mmu_idx argument that specifies - * the index to use; the "data" and "code" suffixes take the index from - * cpu_mmu_index(). - * - * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the - * MemOp including alignment requirements. The alignment will be enforced. - */ -#ifndef CPU_LDST_H -#define CPU_LDST_H - -#ifndef CONFIG_TCG -#error Can only include this header with TCG -#endif - -#include "exec/memopidx.h" -#include "exec/abi_ptr.h" -#include "exec/mmu-access-type.h" -#include "qemu/int128.h" - -#if defined(CONFIG_USER_ONLY) - -#include "user/guest-base.h" - -#ifndef TARGET_TAGGED_ADDRESSES -static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x) -{ - return x; -} -#endif - -/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ -static inline void *g2h_untagged(abi_ptr x) -{ - return (void *)((uintptr_t)(x) + guest_base); -} - -static inline void *g2h(CPUState *cs, abi_ptr x) -{ - return g2h_untagged(cpu_untagged_addr(cs, x)); -} - -static inline bool guest_addr_valid_untagged(abi_ulong x) -{ - return x <= GUEST_ADDR_MAX; -} - -static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) -{ - return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1; -} - -#define h2g_valid(x) \ - (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \ - (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX) - -#define h2g_nocheck(x) ({ \ - uintptr_t __ret = (uintptr_t)(x) - guest_base; \ - (abi_ptr)__ret; \ -}) - -#define h2g(x) ({ \ - /* Check if given address fits target address space */ \ - assert(h2g_valid(x)); \ - h2g_nocheck(x); \ -}) - -#endif /* CONFIG_USER_ONLY */ - -uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); -int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); -uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); -int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); -uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); -uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); -uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); -int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); -uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); -uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); - -uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); - -void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); -void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); -void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); -void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); -void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); -void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); -void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); - -void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t ra); -void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t ra); -void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t ra); -void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t ra); -void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t ra); -void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t ra); -void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t ra); - -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, - int mmu_idx, uintptr_t ra); - -void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, - int mmu_idx, uintptr_t ra); -void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, - int mmu_idx, uintptr_t ra); -void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, - int mmu_idx, uintptr_t ra); -void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, - int mmu_idx, uintptr_t ra); -void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, - int mmu_idx, uintptr_t ra); -void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, - int mmu_idx, uintptr_t ra); -void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, - int mmu_idx, uintptr_t ra); - -uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); -uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); -uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); -uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); -Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra); - -void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, - MemOpIdx oi, uintptr_t ra); -void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, - MemOpIdx oi, uintptr_t ra); - -uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr, - uint32_t cmpv, uint32_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr, - uint32_t cmpv, uint32_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr, - uint32_t cmpv, uint32_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr, - uint64_t cmpv, uint64_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr, - uint32_t cmpv, uint32_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr, - uint32_t cmpv, uint32_t newv, - MemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr, - uint64_t cmpv, uint64_t newv, - MemOpIdx oi, uintptr_t retaddr); - -#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ -TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ - (CPUArchState *env, abi_ptr addr, TYPE val, \ - MemOpIdx oi, uintptr_t retaddr); - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) -#else -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) -#endif - -GEN_ATOMIC_HELPER_ALL(fetch_add) -GEN_ATOMIC_HELPER_ALL(fetch_sub) -GEN_ATOMIC_HELPER_ALL(fetch_and) -GEN_ATOMIC_HELPER_ALL(fetch_or) -GEN_ATOMIC_HELPER_ALL(fetch_xor) -GEN_ATOMIC_HELPER_ALL(fetch_smin) -GEN_ATOMIC_HELPER_ALL(fetch_umin) -GEN_ATOMIC_HELPER_ALL(fetch_smax) -GEN_ATOMIC_HELPER_ALL(fetch_umax) - -GEN_ATOMIC_HELPER_ALL(add_fetch) -GEN_ATOMIC_HELPER_ALL(sub_fetch) -GEN_ATOMIC_HELPER_ALL(and_fetch) -GEN_ATOMIC_HELPER_ALL(or_fetch) -GEN_ATOMIC_HELPER_ALL(xor_fetch) -GEN_ATOMIC_HELPER_ALL(smin_fetch) -GEN_ATOMIC_HELPER_ALL(umin_fetch) -GEN_ATOMIC_HELPER_ALL(smax_fetch) -GEN_ATOMIC_HELPER_ALL(umax_fetch) - -GEN_ATOMIC_HELPER_ALL(xchg) - -#undef GEN_ATOMIC_HELPER_ALL -#undef GEN_ATOMIC_HELPER - -Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr, - Int128 cmpv, Int128 newv, - MemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr, - Int128 cmpv, Int128 newv, - MemOpIdx oi, uintptr_t retaddr); - -#if TARGET_BIG_ENDIAN -# define cpu_lduw_data cpu_lduw_be_data -# define cpu_ldsw_data cpu_ldsw_be_data -# define cpu_ldl_data cpu_ldl_be_data -# define cpu_ldq_data cpu_ldq_be_data -# define cpu_lduw_data_ra cpu_lduw_be_data_ra -# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra -# define cpu_ldl_data_ra cpu_ldl_be_data_ra -# define cpu_ldq_data_ra cpu_ldq_be_data_ra -# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra -# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra -# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra -# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra -# define cpu_stw_data cpu_stw_be_data -# define cpu_stl_data cpu_stl_be_data -# define cpu_stq_data cpu_stq_be_data -# define cpu_stw_data_ra cpu_stw_be_data_ra -# define cpu_stl_data_ra cpu_stl_be_data_ra -# define cpu_stq_data_ra cpu_stq_be_data_ra -# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra -# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra -# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra -#else -# define cpu_lduw_data cpu_lduw_le_data -# define cpu_ldsw_data cpu_ldsw_le_data -# define cpu_ldl_data cpu_ldl_le_data -# define cpu_ldq_data cpu_ldq_le_data -# define cpu_lduw_data_ra cpu_lduw_le_data_ra -# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra -# define cpu_ldl_data_ra cpu_ldl_le_data_ra -# define cpu_ldq_data_ra cpu_ldq_le_data_ra -# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra -# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra -# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra -# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra -# define cpu_stw_data cpu_stw_le_data -# define cpu_stl_data cpu_stl_le_data -# define cpu_stq_data cpu_stq_le_data -# define cpu_stw_data_ra cpu_stw_le_data_ra -# define cpu_stl_data_ra cpu_stl_le_data_ra -# define cpu_stq_data_ra cpu_stq_le_data_ra -# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra -# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra -# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra -#endif - -uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); -uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); -uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); -uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra); - -uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); -uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); -uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); -uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); - -/** - * tlb_vaddr_to_host: - * @env: CPUArchState - * @addr: guest virtual address to look up - * @access_type: 0 for read, 1 for write, 2 for execute - * @mmu_idx: MMU index to use for lookup - * - * Look up the specified guest virtual index in the TCG softmmu TLB. - * If we can translate a host virtual address suitable for direct RAM - * access, without causing a guest exception, then return it. - * Otherwise (TLB entry is for an I/O access, guest software - * TLB fill required, etc) return NULL. - */ -#ifdef CONFIG_USER_ONLY -static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, - MMUAccessType access_type, int mmu_idx) -{ - return g2h(env_cpu(env), addr); -} -#else -void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, - MMUAccessType access_type, int mmu_idx); -#endif - -/* - * For user-only, helpers that use guest to host address translation - * must protect the actual host memory access by recording 'retaddr' - * for the signal handler. This is required for a race condition in - * which another thread unmaps the page between a probe and the - * actual access. - */ -#ifdef CONFIG_USER_ONLY -extern __thread uintptr_t helper_retaddr; - -static inline void set_helper_retaddr(uintptr_t ra) -{ - helper_retaddr = ra; - /* - * Ensure that this write is visible to the SIGSEGV handler that - * may be invoked due to a subsequent invalid memory operation. - */ - signal_barrier(); -} - -static inline void clear_helper_retaddr(void) -{ - /* - * Ensure that previous memory operations have succeeded before - * removing the data visible to the signal handler. - */ - signal_barrier(); - helper_retaddr = 0; -} -#else -#define set_helper_retaddr(ra) do { } while (0) -#define clear_helper_retaddr() do { } while (0) -#endif - -#endif /* CPU_LDST_H */ diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h index ef18642a329..03ed7e21659 100644 --- a/include/exec/cputlb.h +++ b/include/exec/cputlb.h @@ -21,15 +21,266 @@ #define CPUTLB_H #include "exec/cpu-common.h" +#include "exec/hwaddr.h" +#include "exec/memattrs.h" +#include "exec/vaddr.h" -#ifdef CONFIG_TCG - -#if !defined(CONFIG_USER_ONLY) -/* cputlb.c */ +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) void tlb_protect_code(ram_addr_t ram_addr); void tlb_unprotect_code(ram_addr_t ram_addr); #endif -#endif /* CONFIG_TCG */ - +#ifndef CONFIG_USER_ONLY +void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length); +void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); #endif + +/** + * tlb_set_page_full: + * @cpu: CPU context + * @mmu_idx: mmu index of the tlb to modify + * @addr: virtual address of the entry to add + * @full: the details of the tlb entry + * + * Add an entry to @cpu tlb index @mmu_idx. All of the fields of + * @full must be filled, except for xlat_section, and constitute + * the complete description of the translated page. + * + * This is generally called by the target tlb_fill function after + * having performed a successful page table walk to find the physical + * address and attributes for the translation. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only + * used by tlb_flush_page. + */ +void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, + CPUTLBEntryFull *full); + +/** + * tlb_set_page_with_attrs: + * @cpu: CPU to add this TLB entry for + * @addr: virtual address of page to add entry for + * @paddr: physical address of the page + * @attrs: memory transaction attributes + * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) + * @mmu_idx: MMU index to insert TLB entry for + * @size: size of the page in bytes + * + * Add an entry to this CPU's TLB (a mapping from virtual address + * @addr to physical address @paddr) with the specified memory + * transaction attributes. This is generally called by the target CPU + * specific code after it has been called through the tlb_fill() + * entry point and performed a successful page table walk to find + * the physical address and attributes for the virtual address + * which provoked the TLB miss. + * + * At most one entry for a given virtual address is permitted. Only a + * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only + * used by tlb_flush_page. + */ +void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, + hwaddr paddr, MemTxAttrs attrs, + int prot, int mmu_idx, vaddr size); + +/** + * tlb_set_page: + * + * This function is equivalent to calling tlb_set_page_with_attrs() + * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided + * as a convenience for CPUs which don't use memory transaction attributes. + */ +void tlb_set_page(CPUState *cpu, vaddr addr, + hwaddr paddr, int prot, + int mmu_idx, vaddr size); + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) +/** + * tlb_flush_page: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of the specified CPU, for all + * MMU indexes. + */ +void tlb_flush_page(CPUState *cpu, vaddr addr); + +/** + * tlb_flush_page_all_cpus_synced: + * @cpu: src CPU of the flush + * @addr: virtual address of page to be flushed + * + * Flush one page from the TLB of all CPUs, for all + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); + +/** + * tlb_flush: + * @cpu: CPU whose TLB should be flushed + * + * Flush the entire TLB for the specified CPU. Most CPU architectures + * allow the implementation to drop entries from the TLB at any time + * so this is generally safe. If more selective flushing is required + * use one of the other functions for efficiency. + */ +void tlb_flush(CPUState *cpu); + +/** + * tlb_flush_all_cpus_synced: + * @cpu: src CPU of the flush + * + * Flush the entire TLB for all CPUs, for all MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_all_cpus_synced(CPUState *src_cpu); + +/** + * tlb_flush_page_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, + uint16_t idxmap); + +/** + * tlb_flush_page_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of MMU indexes to flush + * + * Flush one page from the TLB of all CPUs, for the specified + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap); + +/** + * tlb_flush_by_mmuidx: + * @cpu: CPU whose TLB should be flushed + * @wait: If true ensure synchronisation by exiting the cpu_loop + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from the TLB of the specified CPU, for the specified + * MMU indexes. + */ +void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); + +/** + * tlb_flush_by_mmuidx_all_cpus_synced: + * @cpu: Originating CPU of the flush + * @idxmap: bitmap of MMU indexes to flush + * + * Flush all entries from the TLB of all CPUs, for the specified + * MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. + */ +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); + +/** + * tlb_flush_page_bits_by_mmuidx + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of page to be flushed + * @idxmap: bitmap of mmu indexes to flush + * @bits: number of significant bits in address + * + * Similar to tlb_flush_page_mask, but with a bitmap of indexes. + */ +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, + uint16_t idxmap, unsigned bits); + +/* Similarly, with broadcast and syncing. */ +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap, + unsigned bits); + +/** + * tlb_flush_range_by_mmuidx + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of the start of the range to be flushed + * @len: length of range to be flushed + * @idxmap: bitmap of mmu indexes to flush + * @bits: number of significant bits in address + * + * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), + * comparing only the low @bits worth of each virtual page. + */ +void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, + vaddr len, uint16_t idxmap, + unsigned bits); + +/* Similarly, with broadcast and syncing. */ +void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + vaddr len, + uint16_t idxmap, + unsigned bits); +#else +static inline void tlb_flush_page(CPUState *cpu, vaddr addr) +{ +} +static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) +{ +} +static inline void tlb_flush(CPUState *cpu) +{ +} +static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) +{ +} +static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, + vaddr addr, uint16_t idxmap) +{ +} + +static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +{ +} +static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + uint16_t idxmap) +{ +} +static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, + uint16_t idxmap) +{ +} +static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, + vaddr addr, + uint16_t idxmap, + unsigned bits) +{ +} +static inline void +tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, + uint16_t idxmap, unsigned bits) +{ +} +static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, + vaddr len, uint16_t idxmap, + unsigned bits) +{ +} +static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, + vaddr addr, + vaddr len, + uint16_t idxmap, + unsigned bits) +{ +} +#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ +#endif /* CPUTLB_H */ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h deleted file mode 100644 index 72240ef4263..00000000000 --- a/include/exec/exec-all.h +++ /dev/null @@ -1,604 +0,0 @@ -/* - * internal execution defines for qemu - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#ifndef EXEC_ALL_H -#define EXEC_ALL_H - -#include "cpu.h" -#if defined(CONFIG_USER_ONLY) -#include "exec/abi_ptr.h" -#include "exec/cpu_ldst.h" -#endif -#include "exec/mmu-access-type.h" -#include "exec/translation-block.h" -#include "qemu/clang-tsa.h" - -/** - * cpu_loop_exit_requested: - * @cpu: The CPU state to be tested - * - * Indicate if somebody asked for a return of the CPU to the main loop - * (e.g., via cpu_exit() or cpu_interrupt()). - * - * This is helpful for architectures that support interruptible - * instructions. After writing back all state to registers/memory, this - * call can be used to check if it makes sense to return to the main loop - * or to continue executing the interruptible instruction. - */ -static inline bool cpu_loop_exit_requested(CPUState *cpu) -{ - return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0; -} - -#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) -/* cputlb.c */ -/** - * tlb_init - initialize a CPU's TLB - * @cpu: CPU whose TLB should be initialized - */ -void tlb_init(CPUState *cpu); -/** - * tlb_destroy - destroy a CPU's TLB - * @cpu: CPU whose TLB should be destroyed - */ -void tlb_destroy(CPUState *cpu); -/** - * tlb_flush_page: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of the specified CPU, for all - * MMU indexes. - */ -void tlb_flush_page(CPUState *cpu, vaddr addr); -/** - * tlb_flush_page_all_cpus_synced: - * @cpu: src CPU of the flush - * @addr: virtual address of page to be flushed - * - * Flush one page from the TLB of all CPUs, for all - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); -/** - * tlb_flush: - * @cpu: CPU whose TLB should be flushed - * - * Flush the entire TLB for the specified CPU. Most CPU architectures - * allow the implementation to drop entries from the TLB at any time - * so this is generally safe. If more selective flushing is required - * use one of the other functions for efficiency. - */ -void tlb_flush(CPUState *cpu); -/** - * tlb_flush_all_cpus_synced: - * @cpu: src CPU of the flush - * - * Flush the entire TLB for all CPUs, for all MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_all_cpus_synced(CPUState *src_cpu); -/** - * tlb_flush_page_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_page_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush - * - * Flush one page from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_by_mmuidx: - * @cpu: CPU whose TLB should be flushed - * @wait: If true ensure synchronisation by exiting the cpu_loop - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of the specified CPU, for the specified - * MMU indexes. - */ -void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); -/** - * tlb_flush_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @idxmap: bitmap of MMU indexes to flush - * - * Flush all entries from the TLB of all CPUs, for the specified - * MMU indexes. - * - * When this function returns, no CPUs will subsequently perform - * translations using the flushed TLBs. - */ -void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); - -/** - * tlb_flush_page_bits_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * Similar to tlb_flush_page_mask, but with a bitmap of indexes. - */ -void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_page_bits_by_mmuidx_all_cpus_synced - (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); - -/** - * tlb_flush_range_by_mmuidx - * @cpu: CPU whose TLB should be flushed - * @addr: virtual address of the start of the range to be flushed - * @len: length of range to be flushed - * @idxmap: bitmap of mmu indexes to flush - * @bits: number of significant bits in address - * - * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), - * comparing only the low @bits worth of each virtual page. - */ -void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits); - -/* Similarly, with broadcast and syncing. */ -void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits); - -/** - * tlb_set_page_full: - * @cpu: CPU context - * @mmu_idx: mmu index of the tlb to modify - * @addr: virtual address of the entry to add - * @full: the details of the tlb entry - * - * Add an entry to @cpu tlb index @mmu_idx. All of the fields of - * @full must be filled, except for xlat_section, and constitute - * the complete description of the translated page. - * - * This is generally called by the target tlb_fill function after - * having performed a successful page table walk to find the physical - * address and attributes for the translation. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only - * used by tlb_flush_page. - */ -void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr, - CPUTLBEntryFull *full); - -/** - * tlb_set_page_with_attrs: - * @cpu: CPU to add this TLB entry for - * @addr: virtual address of page to add entry for - * @paddr: physical address of the page - * @attrs: memory transaction attributes - * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) - * @mmu_idx: MMU index to insert TLB entry for - * @size: size of the page in bytes - * - * Add an entry to this CPU's TLB (a mapping from virtual address - * @addr to physical address @paddr) with the specified memory - * transaction attributes. This is generally called by the target CPU - * specific code after it has been called through the tlb_fill() - * entry point and performed a successful page table walk to find - * the physical address and attributes for the virtual address - * which provoked the TLB miss. - * - * At most one entry for a given virtual address is permitted. Only a - * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only - * used by tlb_flush_page. - */ -void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, - hwaddr paddr, MemTxAttrs attrs, - int prot, int mmu_idx, vaddr size); -/* tlb_set_page: - * - * This function is equivalent to calling tlb_set_page_with_attrs() - * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided - * as a convenience for CPUs which don't use memory transaction attributes. - */ -void tlb_set_page(CPUState *cpu, vaddr addr, - hwaddr paddr, int prot, - int mmu_idx, vaddr size); -#else -static inline void tlb_init(CPUState *cpu) -{ -} -static inline void tlb_destroy(CPUState *cpu) -{ -} -static inline void tlb_flush_page(CPUState *cpu, vaddr addr) -{ -} -static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) -{ -} -static inline void tlb_flush(CPUState *cpu) -{ -} -static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) -{ -} -static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, - vaddr addr, uint16_t idxmap) -{ -} - -static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) -{ -} -static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - uint16_t idxmap) -{ -} -static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, - uint16_t idxmap) -{ -} -static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, - vaddr addr, - uint16_t idxmap, - unsigned bits) -{ -} -static inline void -tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits) -{ -} -static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits) -{ -} -#endif - -#if defined(CONFIG_TCG) - -/** - * probe_access: - * @env: CPUArchState - * @addr: guest virtual address to look up - * @size: size of the access - * @access_type: read, write or execute permission - * @mmu_idx: MMU index to use for lookup - * @retaddr: return address for unwinding - * - * Look up the guest virtual address @addr. Raise an exception if the - * page does not satisfy @access_type. Raise an exception if the - * access (@addr, @size) hits a watchpoint. For writes, mark a clean - * page as dirty. - * - * Finally, return the host address for a page that is backed by RAM, - * or NULL if the page requires I/O. - */ -void *probe_access(CPUArchState *env, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); - -static inline void *probe_write(CPUArchState *env, vaddr addr, int size, - int mmu_idx, uintptr_t retaddr) -{ - return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); -} - -static inline void *probe_read(CPUArchState *env, vaddr addr, int size, - int mmu_idx, uintptr_t retaddr) -{ - return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); -} - -/** - * probe_access_flags: - * @env: CPUArchState - * @addr: guest virtual address to look up - * @size: size of the access - * @access_type: read, write or execute permission - * @mmu_idx: MMU index to use for lookup - * @nonfault: suppress the fault - * @phost: return value for host address - * @retaddr: return address for unwinding - * - * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for - * the page, and storing the host address for RAM in @phost. - * - * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK. - * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags. - * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags. - * For simplicity, all "mmio-like" flags are folded to TLB_MMIO. - */ -int probe_access_flags(CPUArchState *env, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - bool nonfault, void **phost, uintptr_t retaddr); - -#ifndef CONFIG_USER_ONLY - -/** - * probe_access_full: - * Like probe_access_flags, except also return into @pfull. - * - * The CPUTLBEntryFull structure returned via @pfull is transient - * and must be consumed or copied immediately, before any further - * access or changes to TLB @mmu_idx. - */ -int probe_access_full(CPUArchState *env, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - bool nonfault, void **phost, - CPUTLBEntryFull **pfull, uintptr_t retaddr); - -/** - * probe_access_mmu() - Like probe_access_full except cannot fault and - * doesn't trigger instrumentation. - * - * @env: CPUArchState - * @vaddr: virtual address to probe - * @size: size of the probe - * @access_type: read, write or execute permission - * @mmu_idx: softmmu index - * @phost: ptr to return value host address or NULL - * @pfull: ptr to return value CPUTLBEntryFull structure or NULL - * - * The CPUTLBEntryFull structure returned via @pfull is transient - * and must be consumed or copied immediately, before any further - * access or changes to TLB @mmu_idx. - * - * Returns: TLB flags as per probe_access_flags() - */ -int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - void **phost, CPUTLBEntryFull **pfull); - -#endif /* !CONFIG_USER_ONLY */ -#endif /* CONFIG_TCG */ - -static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) -{ -#ifdef CONFIG_USER_ONLY - return tb->itree.start; -#else - return tb->page_addr[0]; -#endif -} - -static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) -{ -#ifdef CONFIG_USER_ONLY - tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK; - return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next; -#else - return tb->page_addr[1]; -#endif -} - -static inline void tb_set_page_addr0(TranslationBlock *tb, - tb_page_addr_t addr) -{ -#ifdef CONFIG_USER_ONLY - tb->itree.start = addr; - /* - * To begin, we record an interval of one byte. When the translation - * loop encounters a second page, the interval will be extended to - * include the first byte of the second page, which is sufficient to - * allow tb_page_addr1() above to work properly. The final corrected - * interval will be set by tb_page_add() from tb->size before the - * node is added to the interval tree. - */ - tb->itree.last = addr; -#else - tb->page_addr[0] = addr; -#endif -} - -static inline void tb_set_page_addr1(TranslationBlock *tb, - tb_page_addr_t addr) -{ -#ifdef CONFIG_USER_ONLY - /* Extend the interval to the first byte of the second page. See above. */ - tb->itree.last = addr; -#else - tb->page_addr[1] = addr; -#endif -} - -/* TranslationBlock invalidate API */ -void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last); -void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); - -/* GETPC is the true target of the return instruction that we'll execute. */ -#if defined(CONFIG_TCG_INTERPRETER) -extern __thread uintptr_t tci_tb_ptr; -# define GETPC() tci_tb_ptr -#else -# define GETPC() \ - ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) -#endif - -/* The true return address will often point to a host insn that is part of - the next translated guest insn. Adjust the address backward to point to - the middle of the call insn. Subtracting one would do the job except for - several compressed mode architectures (arm, mips) which set the low bit - to indicate the compressed mode; subtracting two works around that. It - is also the case that there are no host isas that contain a call insn - smaller than 4 bytes, so we don't worry about special-casing this. */ -#define GETPC_ADJ 2 - -#if !defined(CONFIG_USER_ONLY) - -/** - * iotlb_to_section: - * @cpu: CPU performing the access - * @index: TCG CPU IOTLB entry - * - * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that - * it refers to. @index will have been initially created and returned - * by memory_region_section_get_iotlb(). - */ -struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, - hwaddr index, MemTxAttrs attrs); -#endif - -/** - * get_page_addr_code_hostp() - * @env: CPUArchState - * @addr: guest virtual address of guest code - * - * See get_page_addr_code() (full-system version) for documentation on the - * return value. - * - * Sets *@hostp (when @hostp is non-NULL) as follows. - * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp - * to the host address where @addr's content is kept. - * - * Note: this function can trigger an exception. - */ -tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, - void **hostp); - -/** - * get_page_addr_code() - * @env: CPUArchState - * @addr: guest virtual address of guest code - * - * If we cannot translate and execute from the entire RAM page, or if - * the region is not backed by RAM, returns -1. Otherwise, returns the - * ram_addr_t corresponding to the guest code at @addr. - * - * Note: this function can trigger an exception. - */ -static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, - vaddr addr) -{ - return get_page_addr_code_hostp(env, addr, NULL); -} - -#if defined(CONFIG_USER_ONLY) -void TSA_NO_TSA mmap_lock(void); -void TSA_NO_TSA mmap_unlock(void); -bool have_mmap_lock(void); - -static inline void mmap_unlock_guard(void *unused) -{ - mmap_unlock(); -} - -#define WITH_MMAP_LOCK_GUARD() \ - for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \ - = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1) - -/** - * adjust_signal_pc: - * @pc: raw pc from the host signal ucontext_t. - * @is_write: host memory operation was write, or read-modify-write. - * - * Alter @pc as required for unwinding. Return the type of the - * guest memory access -- host reads may be for guest execution. - */ -MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); - -/** - * handle_sigsegv_accerr_write: - * @cpu: the cpu context - * @old_set: the sigset_t from the signal ucontext_t - * @host_pc: the host pc, adjusted for the signal - * @host_addr: the host address of the fault - * - * Return true if the write fault has been handled, and should be re-tried. - */ -bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, - uintptr_t host_pc, abi_ptr guest_addr); - -/** - * cpu_loop_exit_sigsegv: - * @cpu: the cpu context - * @addr: the guest address of the fault - * @access_type: access was read/write/execute - * @maperr: true for invalid page, false for permission fault - * @ra: host pc for unwinding - * - * Use the TCGCPUOps hook to record cpu state, do guest operating system - * specific things to raise SIGSEGV, and jump to the main cpu loop. - */ -G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, - MMUAccessType access_type, - bool maperr, uintptr_t ra); - -/** - * cpu_loop_exit_sigbus: - * @cpu: the cpu context - * @addr: the guest address of the alignment fault - * @access_type: access was read/write/execute - * @ra: host pc for unwinding - * - * Use the TCGCPUOps hook to record cpu state, do guest operating system - * specific things to raise SIGBUS, and jump to the main cpu loop. - */ -G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, - MMUAccessType access_type, - uintptr_t ra); - -#else -static inline void mmap_lock(void) {} -static inline void mmap_unlock(void) {} -#define WITH_MMAP_LOCK_GUARD() - -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); -void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length); - -MemoryRegionSection * -address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, - hwaddr *xlat, hwaddr *plen, - MemTxAttrs attrs, int *prot); -hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section); -#endif - -#endif diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h index d73f424f565..a16c0051ce0 100644 --- a/include/exec/gdbstub.h +++ b/include/exec/gdbstub.h @@ -49,12 +49,18 @@ void gdb_unregister_coprocessor_all(CPUState *cpu); /** * gdbserver_start: start the gdb server * @port_or_device: connection spec for gdb + * @errp: error handle * * For CONFIG_USER this is either a tcp port or a path to a fifo. For * system emulation you can use a full chardev spec for your gdbserver * port. + * + * The error handle should be either &error_fatal (for start-up) or + * &error_warn (for QMP/HMP initiated sessions). + * + * Returns true when server successfully started. */ -int gdbserver_start(const char *port_or_device); +bool gdbserver_start(const char *port_or_device, Error **errp); /** * gdb_feature_builder_init() - Initialize GDBFeatureBuilder. @@ -118,6 +124,20 @@ const GDBFeature *gdb_find_static_feature(const char *xmlname); */ int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); +/** + * gdb_write_register() - Write a register associated with a CPU. + * @cpu: The CPU associated with the register. + * @buf: The buffer that the register contents will be set to. + * @reg: The register's number returned by gdb_find_feature_register(). + * + * The size of @buf must be at least the size of the register being + * written. + * + * Return: The number of written bytes, or 0 if an error occurred (for + * example, an unknown register was provided). + */ +int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg); + /** * typedef GDBRegDesc - a register description from gdbstub */ diff --git a/include/exec/helper-head.h.inc b/include/exec/helper-head.h.inc index 5ef467a79d1..5b248fd7138 100644 --- a/include/exec/helper-head.h.inc +++ b/include/exec/helper-head.h.inc @@ -23,6 +23,7 @@ #define dh_alias_ptr ptr #define dh_alias_cptr ptr #define dh_alias_env ptr +#define dh_alias_fpst ptr #define dh_alias_void void #define dh_alias_noreturn noreturn #define dh_alias(t) glue(dh_alias_, t) @@ -39,6 +40,7 @@ #define dh_ctype_ptr void * #define dh_ctype_cptr const void * #define dh_ctype_env CPUArchState * +#define dh_ctype_fpst float_status * #define dh_ctype_void void #define dh_ctype_noreturn G_NORETURN void #define dh_ctype(t) dh_ctype_##t @@ -56,6 +58,17 @@ # define dh_ctype_tl target_ulong #endif /* COMPILING_PER_TARGET */ +#if __SIZEOF_POINTER__ == 4 +# define dh_alias_vaddr i32 +# define dh_typecode_vaddr dh_typecode_i32 +#elif __SIZEOF_POINTER__ == 8 +# define dh_alias_vaddr i64 +# define dh_typecode_vaddr dh_typecode_i64 +#else +# error "sizeof pointer is different from {4,8}" +#endif /* __SIZEOF_POINTER__ */ +# define dh_ctype_vaddr uintptr_t + /* We can't use glue() here because it falls foul of C preprocessor recursive expansion rules. */ #define dh_retvar_decl0_void void @@ -96,6 +109,7 @@ #define dh_typecode_f64 dh_typecode_i64 #define dh_typecode_cptr dh_typecode_ptr #define dh_typecode_env dh_typecode_ptr +#define dh_typecode_fpst dh_typecode_ptr #define dh_typecode(t) dh_typecode_##t #define dh_callflag_i32 0 diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h index 16782ef46c8..76e6c25becb 100644 --- a/include/exec/helper-proto-common.h +++ b/include/exec/helper-proto-common.h @@ -13,4 +13,6 @@ #include "exec/helper-proto.h.inc" #undef HELPER_H +#include "accel/tcg/getpc.h" + #endif /* HELPER_PROTO_COMMON_H */ diff --git a/include/exec/icount.h b/include/exec/icount.h new file mode 100644 index 00000000000..7a26b40084d --- /dev/null +++ b/include/exec/icount.h @@ -0,0 +1,76 @@ +/* + * icount - Instruction Counter API + * CPU timers state API + * + * Copyright 2020 SUSE LLC + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef EXEC_ICOUNT_H +#define EXEC_ICOUNT_H + +/** + * ICountMode: icount enablement state: + * + * @ICOUNT_DISABLED: Disabled - Do not count executed instructions. + * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option + * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift + */ +typedef enum { + ICOUNT_DISABLED = 0, + ICOUNT_PRECISE, + ICOUNT_ADAPTATIVE, +} ICountMode; + +#ifdef CONFIG_TCG +extern ICountMode use_icount; +#define icount_enabled() (use_icount) +#else +#define icount_enabled() ICOUNT_DISABLED +#endif + +/* Protect the CONFIG_USER_ONLY test vs poisoning. */ +#if defined(COMPILING_PER_TARGET) || defined(COMPILING_SYSTEM_VS_USER) +# ifdef CONFIG_USER_ONLY +# undef icount_enabled +# define icount_enabled() ICOUNT_DISABLED +# endif +#endif + +/* + * Update the icount with the executed instructions. Called by + * cpus-tcg vCPU thread so the main-loop can see time has moved forward. + */ +void icount_update(CPUState *cpu); + +/* get raw icount value */ +int64_t icount_get_raw(void); + +/* return the virtual CPU time in ns, based on the instruction counter. */ +int64_t icount_get(void); +/* + * convert an instruction counter value to ns, based on the icount shift. + * This shift is set as a fixed value with the icount "shift" option + * (precise mode), or it is constantly approximated and corrected at + * runtime in adaptive mode. + */ +int64_t icount_to_ns(int64_t icount); + +/** + * icount_configure: configure the icount options, including "shift" + * @opts: Options to parse + * @errp: pointer to a NULL-initialized error object + * + * Return: true on success, else false setting @errp with error + */ +bool icount_configure(QemuOpts *opts, Error **errp); + +/* used by tcg vcpu thread to calc icount budget */ +int64_t icount_round(int64_t count); + +/* if the CPUs are idle, start accounting real time to virtual clock. */ +void icount_start_warp_timer(void); +void icount_account_warp_timer(void); +void icount_notify_exit(void); + +#endif /* EXEC_ICOUNT_H */ diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index 14cdd8d5824..52ee9552491 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -23,12 +23,6 @@ * different semantics. */ typedef struct MemTxAttrs { - /* Bus masters which don't specify any attributes will get this - * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can - * distinguish "all attributes deliberately clear" from - * "didn't specify" if necessary. - */ - unsigned int unspecified:1; /* * ARM/AMBA: TrustZone Secure access * x86: System Management Mode access @@ -50,16 +44,40 @@ typedef struct MemTxAttrs { * (see MEMTX_ACCESS_ERROR). */ unsigned int memory:1; + /* Debug access that can even write to ROM. */ + unsigned int debug:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; + + /* + * PID (PCI PASID) support: Limited to 8 bits process identifier. + */ + unsigned int pid:8; + + /* PCI - IOMMU operations, see PCIAddressType */ + unsigned int address_type:1; + + /* + * Bus masters which don't specify any attributes will get this + * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can + * distinguish "all attributes deliberately clear" from + * "didn't specify" if necessary. "debug" can be set alongside + * "unspecified". + */ + bool unspecified; + + uint8_t _reserved1; + uint16_t _reserved2; } MemTxAttrs; +QEMU_BUILD_BUG_ON(sizeof(MemTxAttrs) > 8); + /* Bus masters which don't specify any attributes will get this, * which has all attribute bits clear except the topmost one * (so that we can distinguish "all attributes deliberately clear" * from "didn't specify" if necessary). */ -#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) +#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = true }) /* New-style MMIO accessors can indicate that the transaction failed. * A zero (MEMTX_OK) response means success; anything else is a failure diff --git a/include/exec/memop.h b/include/exec/memop.h index f881fe7af4e..cf7da3362e8 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -91,8 +91,12 @@ typedef enum MemOp { * Depending on alignment, one or both will be single-copy atomic. * This is the atomicity e.g. of Arm FEAT_LSE2 LDP. * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts - * by the alignment. E.g. if the address is 0 mod 4, then each - * 4-byte subobject is single-copy atomic. + * by the alignment. E.g. if an 8-byte value is accessed at an + * address which is 0 mod 8, then the whole 8-byte access is + * single-copy atomic; otherwise, if it is accessed at 0 mod 4 + * then each 4-byte subobject is single-copy atomic; otherwise + * if it is accessed at 0 mod 2 then the four 2-byte subobjects + * are single-copy atomic. * This is the atomicity e.g. of IBM Power. * MO_ATOM_NONE: the operation has no atomicity requirements. * @@ -158,16 +162,57 @@ static inline unsigned memop_size(MemOp op) static inline MemOp size_memop(unsigned size) { #ifdef CONFIG_DEBUG_TCG - /* Power of 2 up to 8. */ - assert((size & (size - 1)) == 0 && size >= 1 && size <= 8); + /* Power of 2 up to 1024 */ + assert(is_power_of_2(size) && size >= 1 && size <= (1 << MO_SIZE)); #endif return (MemOp)ctz32(size); } -/* Big endianness from MemOp. */ -static inline bool memop_big_endian(MemOp op) +/** + * memop_alignment_bits: + * @memop: MemOp value + * + * Extract the alignment size from the memop. + */ +static inline unsigned memop_alignment_bits(MemOp memop) +{ + unsigned a = memop & MO_AMASK; + + if (a == MO_UNALN) { + /* No alignment required. */ + a = 0; + } else if (a == MO_ALIGN) { + /* A natural alignment requirement. */ + a = memop & MO_SIZE; + } else { + /* A specific alignment requirement. */ + a = a >> MO_ASHIFT; + } + return a; +} + +/* + * memop_atomicity_bits: + * @memop: MemOp value + * + * Extract the atomicity size from the memop. + */ +static inline unsigned memop_atomicity_bits(MemOp memop) { - return (op & MO_BSWAP) == MO_BE; + unsigned size = memop & MO_SIZE; + + switch (memop & MO_ATOM_MASK) { + case MO_ATOM_NONE: + size = MO_8; + break; + case MO_ATOM_IFALIGN_PAIR: + case MO_ATOM_WITHIN16_PAIR: + size = size ? size - 1 : 0; + break; + default: + break; + } + return size; } #endif diff --git a/include/exec/memory_ldst.h.inc b/include/exec/memory_ldst.h.inc index 92ad74e9560..7270235c600 100644 --- a/include/exec/memory_ldst.h.inc +++ b/include/exec/memory_ldst.h.inc @@ -19,7 +19,6 @@ * License along with this library; if not, see . */ -#ifdef TARGET_ENDIANNESS uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL, @@ -34,7 +33,6 @@ void glue(address_space_stl, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); void glue(address_space_stq, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); -#else uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, hwaddr addr, MemTxAttrs attrs, MemTxResult *result); uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL, @@ -63,9 +61,7 @@ void glue(address_space_stq_le, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); void glue(address_space_stq_be, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); -#endif #undef ARG1_DECL #undef ARG1 #undef SUFFIX -#undef TARGET_ENDIANNESS diff --git a/include/exec/memory_ldst_phys.h.inc b/include/exec/memory_ldst_phys.h.inc index ecd678610d1..db67de75251 100644 --- a/include/exec/memory_ldst_phys.h.inc +++ b/include/exec/memory_ldst_phys.h.inc @@ -19,7 +19,6 @@ * License along with this library; if not, see . */ -#ifdef TARGET_ENDIANNESS static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr) { return glue(address_space_lduw, SUFFIX)(ARG1, addr, @@ -55,7 +54,7 @@ static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val) glue(address_space_stq, SUFFIX)(ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } -#else + static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr) { return glue(address_space_ldub, SUFFIX)(ARG1, addr, @@ -139,9 +138,7 @@ static inline void glue(stq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t va glue(address_space_stq_be, SUFFIX)(ARG1, addr, val, MEMTXATTRS_UNSPECIFIED, NULL); } -#endif #undef ARG1_DECL #undef ARG1 #undef SUFFIX -#undef TARGET_ENDIANNESS diff --git a/include/exec/mmap-lock.h b/include/exec/mmap-lock.h new file mode 100644 index 00000000000..50ffdab9c5d --- /dev/null +++ b/include/exec/mmap-lock.h @@ -0,0 +1,33 @@ +/* + * QEMU user-only mmap lock, with stubs for system mode + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ +#ifndef EXEC_MMAP_LOCK_H +#define EXEC_MMAP_LOCK_H + +#ifdef CONFIG_USER_ONLY + +void TSA_NO_TSA mmap_lock(void); +void TSA_NO_TSA mmap_unlock(void); +bool have_mmap_lock(void); + +static inline void mmap_unlock_guard(void *unused) +{ + mmap_unlock(); +} + +#define WITH_MMAP_LOCK_GUARD() \ + for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard))) \ + = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1) + +#else + +static inline void mmap_lock(void) {} +static inline void mmap_unlock(void) {} +#define WITH_MMAP_LOCK_GUARD() + +#endif /* CONFIG_USER_ONLY */ +#endif /* EXEC_MMAP_LOCK_H */ diff --git a/include/exec/page-vary.h b/include/exec/page-vary.h index 54ddde308a1..101c25911c1 100644 --- a/include/exec/page-vary.h +++ b/include/exec/page-vary.h @@ -49,4 +49,13 @@ bool set_preferred_target_page_bits(int bits); */ void finalize_target_page_bits(void); +/** + * migration_legacy_page_bits + * + * For migration compatibility with qemu v2.9, prior to the introduction + * of the configuration/target-page-bits section, return the value of + * TARGET_PAGE_BITS that the target had then. + */ +int migration_legacy_page_bits(void); + #endif /* EXEC_PAGE_VARY_H */ diff --git a/include/exec/poison.h b/include/exec/poison.h index 792a83f493e..a779adbb7a6 100644 --- a/include/exec/poison.h +++ b/include/exec/poison.h @@ -11,7 +11,6 @@ #pragma GCC poison TARGET_AARCH64 #pragma GCC poison TARGET_ALPHA #pragma GCC poison TARGET_ARM -#pragma GCC poison TARGET_CRIS #pragma GCC poison TARGET_HEXAGON #pragma GCC poison TARGET_HPPA #pragma GCC poison TARGET_LOONGARCH64 @@ -36,35 +35,17 @@ #pragma GCC poison TARGET_HAS_BFLT #pragma GCC poison TARGET_NAME -#pragma GCC poison TARGET_SUPPORTS_MTTCG #pragma GCC poison TARGET_BIG_ENDIAN -#pragma GCC poison BSWAP_NEEDED +#pragma GCC poison TCG_GUEST_DEFAULT_MO #pragma GCC poison TARGET_LONG_BITS #pragma GCC poison TARGET_FMT_lx #pragma GCC poison TARGET_FMT_ld #pragma GCC poison TARGET_FMT_lu -#pragma GCC poison TARGET_PAGE_SIZE -#pragma GCC poison TARGET_PAGE_MASK -#pragma GCC poison TARGET_PAGE_BITS -#pragma GCC poison TARGET_PAGE_ALIGN - -#pragma GCC poison CPU_INTERRUPT_HARD -#pragma GCC poison CPU_INTERRUPT_EXITTB -#pragma GCC poison CPU_INTERRUPT_HALT -#pragma GCC poison CPU_INTERRUPT_DEBUG -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 -#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 -#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 +#pragma GCC poison TARGET_PHYS_ADDR_SPACE_BITS #pragma GCC poison CONFIG_ALPHA_DIS -#pragma GCC poison CONFIG_CRIS_DIS #pragma GCC poison CONFIG_HPPA_DIS #pragma GCC poison CONFIG_I386_DIS #pragma GCC poison CONFIG_HEXAGON_DIS @@ -85,4 +66,11 @@ #pragma GCC poison CONFIG_WHPX #pragma GCC poison CONFIG_XEN +#ifndef COMPILING_SYSTEM_VS_USER +#pragma GCC poison CONFIG_USER_ONLY +#pragma GCC poison CONFIG_SOFTMMU +#endif + +#pragma GCC poison KVM_HAVE_MCE_INJECTION + #endif diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h index 2ad2a81accf..d9cfe530bea 100644 --- a/include/exec/ramlist.h +++ b/include/exec/ramlist.h @@ -50,6 +50,7 @@ typedef struct RAMList { /* RCU-enabled, writes protected by the ramlist lock. */ QLIST_HEAD(, RAMBlock) blocks; DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM]; + unsigned int num_dirty_blocks; uint32_t version; QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; } RAMList; diff --git a/include/exec/target_page.h b/include/exec/target_page.h index 98ffbb5c239..ca0ebbc8bbd 100644 --- a/include/exec/target_page.h +++ b/include/exec/target_page.h @@ -14,10 +14,54 @@ #ifndef EXEC_TARGET_PAGE_H #define EXEC_TARGET_PAGE_H -size_t qemu_target_page_size(void); -int qemu_target_page_mask(void); -int qemu_target_page_bits(void); -int qemu_target_page_bits_min(void); +/* + * If compiling per-target, get the real values. + * For generic code, reuse the mechanism for variable page size. + */ +#ifdef COMPILING_PER_TARGET +#include "cpu-param.h" +#include "exec/target_long.h" +#define TARGET_PAGE_TYPE target_long +#else +#define TARGET_PAGE_BITS_VARY +#define TARGET_PAGE_TYPE int +#endif + +#ifdef TARGET_PAGE_BITS_VARY +# include "exec/page-vary.h" +extern const TargetPageBits target_page; +# ifdef CONFIG_DEBUG_TCG +# define TARGET_PAGE_BITS ({ assert(target_page.decided); \ + target_page.bits; }) +# define TARGET_PAGE_MASK ({ assert(target_page.decided); \ + (TARGET_PAGE_TYPE)target_page.mask; }) +# else +# define TARGET_PAGE_BITS target_page.bits +# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)target_page.mask) +# endif +# define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) +#else +# define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) +# define TARGET_PAGE_MASK ((TARGET_PAGE_TYPE)-1 << TARGET_PAGE_BITS) +#endif + +#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) + +static inline size_t qemu_target_page_size(void) +{ + return TARGET_PAGE_SIZE; +} + +static inline int qemu_target_page_mask(void) +{ + return TARGET_PAGE_MASK; +} + +static inline int qemu_target_page_bits(void) +{ + return TARGET_PAGE_BITS; +} size_t qemu_target_pages_to_MiB(size_t pages); + #endif diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h index dc5a5faa0b3..03b5a8ffc71 100644 --- a/include/exec/tlb-common.h +++ b/include/exec/tlb-common.h @@ -19,14 +19,14 @@ #ifndef EXEC_TLB_COMMON_H #define EXEC_TLB_COMMON_H 1 -#define CPU_TLB_ENTRY_BITS 5 +#define CPU_TLB_ENTRY_BITS (HOST_LONG_BITS == 32 ? 4 : 5) /* Minimalized TLB entry for use by TCG fast path. */ typedef union CPUTLBEntry { struct { - uint64_t addr_read; - uint64_t addr_write; - uint64_t addr_code; + uintptr_t addr_read; + uintptr_t addr_write; + uintptr_t addr_code; /* * Addend to virtual address to get host address. IO accesses * use the corresponding iotlb value. @@ -37,7 +37,7 @@ typedef union CPUTLBEntry { * Padding to get a power of two size, as well as index * access to addr_{read,write,code}. */ - uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)]; + uintptr_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uintptr_t)]; } CPUTLBEntry; QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); diff --git a/include/exec/tlb-flags.h b/include/exec/tlb-flags.h new file mode 100644 index 00000000000..357e79095c9 --- /dev/null +++ b/include/exec/tlb-flags.h @@ -0,0 +1,86 @@ +/* + * TLB flags definition + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef TLB_FLAGS_H +#define TLB_FLAGS_H + +/* + * Flags returned for lookup of a TLB virtual address. + */ + +#ifdef CONFIG_USER_ONLY + +/* + * Allow some level of source compatibility with softmmu. + * Invalid is set when the page does not have requested permissions. + * MMIO is set when we want the target helper to use the functional + * interface for load/store so that plugins see the access. + */ +#define TLB_INVALID_MASK (1 << 0) +#define TLB_MMIO (1 << 1) +#define TLB_WATCHPOINT 0 + +#else + +/* + * Flags stored in CPUTLBEntryFull.slow_flags[x]. + * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. + */ + +/* Set if TLB entry requires byte swap. */ +#define TLB_BSWAP (1 << 0) +/* Set if TLB entry contains a watchpoint. */ +#define TLB_WATCHPOINT (1 << 1) +/* Set if TLB entry requires aligned accesses. */ +#define TLB_CHECK_ALIGNED (1 << 2) +/* Set if TLB entry writes ignored. */ +#define TLB_DISCARD_WRITE (1 << 3) +/* Set if TLB entry is an IO callback. */ +#define TLB_MMIO (1 << 4) + +#define TLB_SLOW_FLAGS_MASK \ + (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED | \ + TLB_DISCARD_WRITE | TLB_MMIO) + +/* + * Flags stored in CPUTLBEntry.addr_idx[x]. + * These must be above the largest alignment (64 bytes), + * and below the smallest page size (1024 bytes). + * This leaves bits [9:6] available for use. + */ + +/* Zero if TLB entry is valid. */ +#define TLB_INVALID_MASK (1 << 6) +/* Set if TLB entry references a clean RAM page. */ +#define TLB_NOTDIRTY (1 << 7) +/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ +#define TLB_FORCE_SLOW (1 << 8) + +/* + * Use this mask to check interception with an alignment mask + * in a TCG backend. + */ +#define TLB_FLAGS_MASK \ + (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_FORCE_SLOW) + +/* The two sets of flags must not overlap. */ +QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); + +#endif /* !CONFIG_USER_ONLY */ + +#endif /* TLB_FLAGS_H */ diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h deleted file mode 100644 index 85c9460c7ca..00000000000 --- a/include/exec/translate-all.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Translated block handling - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef TRANSLATE_ALL_H -#define TRANSLATE_ALL_H - -#include "exec/exec-all.h" - - -/* translate-all.c */ -void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr); - -#ifdef CONFIG_USER_ONLY -void page_protect(tb_page_addr_t page_addr); -int page_unprotect(target_ulong address, uintptr_t pc); -#endif - -#endif /* TRANSLATE_ALL_H */ diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h index a6d1af6e9be..cdce399ebab 100644 --- a/include/exec/translation-block.h +++ b/include/exec/translation-block.h @@ -7,10 +7,13 @@ #ifndef EXEC_TRANSLATION_BLOCK_H #define EXEC_TRANSLATION_BLOCK_H +#include "qemu/atomic.h" #include "qemu/thread.h" #include "exec/cpu-common.h" +#include "exec/vaddr.h" #ifdef CONFIG_USER_ONLY #include "qemu/interval-tree.h" +#include "exec/target_page.h" #endif /* @@ -152,4 +155,60 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb) return qatomic_read(&tb->cflags); } +bool tcg_cflags_has(CPUState *cpu, uint32_t flags); +void tcg_cflags_set(CPUState *cpu, uint32_t flags); + +static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb) +{ +#ifdef CONFIG_USER_ONLY + return tb->itree.start; +#else + return tb->page_addr[0]; +#endif +} + +static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb) +{ +#ifdef CONFIG_USER_ONLY + tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK; + return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next; +#else + return tb->page_addr[1]; +#endif +} + +static inline void tb_set_page_addr0(TranslationBlock *tb, + tb_page_addr_t addr) +{ +#ifdef CONFIG_USER_ONLY + tb->itree.start = addr; + /* + * To begin, we record an interval of one byte. When the translation + * loop encounters a second page, the interval will be extended to + * include the first byte of the second page, which is sufficient to + * allow tb_page_addr1() above to work properly. The final corrected + * interval will be set by tb_page_add() from tb->size before the + * node is added to the interval tree. + */ + tb->itree.last = addr; +#else + tb->page_addr[0] = addr; +#endif +} + +static inline void tb_set_page_addr1(TranslationBlock *tb, + tb_page_addr_t addr) +{ +#ifdef CONFIG_USER_ONLY + /* Extend the interval to the first byte of the second page. See above. */ + tb->itree.last = addr; +#else + tb->page_addr[1] = addr; +#endif +} + +/* TranslationBlock invalidate API */ +void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start, + tb_page_addr_t last); + #endif /* EXEC_TRANSLATION_BLOCK_H */ diff --git a/include/exec/translator.h b/include/exec/translator.h index 25004dfb76e..3c326555696 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -18,23 +18,9 @@ * member in your target-specific DisasContext. */ -#include "qemu/bswap.h" +#include "exec/memop.h" #include "exec/vaddr.h" -/** - * gen_intermediate_code - * @cpu: cpu context - * @tb: translation block - * @max_insns: max number of instructions to translate - * @pc: guest virtual program counter address - * @host_pc: host physical program counter address - * - * This function must be provided by the target, which should create - * the target-specific DisasContext, and then invoke translator_loop. - */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc); - /** * DisasJumpType: * @DISAS_NEXT: Next instruction in program order. @@ -71,7 +57,6 @@ typedef enum DisasJumpType { * @is_jmp: What instruction to disassemble next. * @num_insns: Number of translated instructions (including current). * @max_insns: Maximum number of instructions to be translated in this TB. - * @singlestep_enabled: "Hardware" single stepping enabled. * @plugin_enabled: TCG plugin enabled in this TB. * @fake_insn: True if translator_fake_ldb used. * @insn_start: The last op emitted by the insn_start hook, @@ -86,9 +71,9 @@ struct DisasContextBase { DisasJumpType is_jmp; int num_insns; int max_insns; - bool singlestep_enabled; bool plugin_enabled; bool fake_insn; + uint8_t code_mmuidx; struct TCGOp *insn_start; void *host_addr[2]; @@ -196,42 +181,53 @@ bool translator_io_start(DisasContextBase *db); */ uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc); -uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc); -uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc); -uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc); +uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian); +uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian); +uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db, + vaddr pc, MemOp endian); + +#ifdef COMPILING_PER_TARGET +static inline uint16_t +translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc) +{ + return translator_lduw_end(env, db, pc, MO_TE); +} + +static inline uint32_t +translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc) +{ + return translator_ldl_end(env, db, pc, MO_TE); +} + +static inline uint64_t +translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc) +{ + return translator_ldq_end(env, db, pc, MO_TE); +} static inline uint16_t translator_lduw_swap(CPUArchState *env, DisasContextBase *db, vaddr pc, bool do_swap) { - uint16_t ret = translator_lduw(env, db, pc); - if (do_swap) { - ret = bswap16(ret); - } - return ret; + return translator_lduw_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP)); } static inline uint32_t translator_ldl_swap(CPUArchState *env, DisasContextBase *db, vaddr pc, bool do_swap) { - uint32_t ret = translator_ldl(env, db, pc); - if (do_swap) { - ret = bswap32(ret); - } - return ret; + return translator_ldl_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP)); } static inline uint64_t translator_ldq_swap(CPUArchState *env, DisasContextBase *db, vaddr pc, bool do_swap) { - uint64_t ret = translator_ldq(env, db, pc); - if (do_swap) { - ret = bswap64(ret); - } - return ret; + return translator_ldq_end(env, db, pc, MO_TE ^ (do_swap * MO_BSWAP)); } +#endif /* COMPILING_PER_TARGET */ /** * translator_fake_ld - fake instruction load @@ -269,16 +265,15 @@ bool translator_st(const DisasContextBase *db, void *dest, */ size_t translator_st_len(const DisasContextBase *db); -#ifdef COMPILING_PER_TARGET -/* - * Return whether addr is on the same page as where disassembly started. +/** + * translator_is_same_page + * @db: disassembly context + * @addr: virtual address within TB + * + * Return whether @addr is on the same page as where disassembly started. * Translators can use this to enforce the rule that only single-insn * translation blocks are allowed to cross page boundaries. */ -static inline bool is_same_page(const DisasContextBase *db, vaddr addr) -{ - return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0; -} -#endif +bool translator_is_same_page(const DisasContextBase *db, vaddr addr); #endif /* EXEC__TRANSLATOR_H */ diff --git a/include/exec/tswap.h b/include/exec/tswap.h index b7a41913475..72219e2c431 100644 --- a/include/exec/tswap.h +++ b/include/exec/tswap.h @@ -9,17 +9,7 @@ #define TSWAP_H #include "qemu/bswap.h" - -/** - * target_words_bigendian: - * Returns true if the (default) endianness of the target is big endian, - * false otherwise. Note that in target-specific code, you can use - * TARGET_BIG_ENDIAN directly instead. On the other hand, common - * code should normally never need to know about the endianness of the - * target, so please do *not* use this function unless you know very well - * what you are doing! - */ -bool target_words_bigendian(void); +#include "qemu/target-info.h" /* * If we're in target-specific code, we can hard-code the swapping @@ -28,7 +18,7 @@ bool target_words_bigendian(void); #ifdef COMPILING_PER_TARGET #define target_needs_bswap() (HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN) #else -#define target_needs_bswap() (target_words_bigendian() != HOST_BIG_ENDIAN) +#define target_needs_bswap() (HOST_BIG_ENDIAN != target_big_endian()) #endif /* COMPILING_PER_TARGET */ static inline uint16_t tswap16(uint16_t s) diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h index b9844afc773..28bec632fbb 100644 --- a/include/exec/vaddr.h +++ b/include/exec/vaddr.h @@ -6,13 +6,15 @@ /** * vaddr: * Type wide enough to contain any #target_ulong virtual address. + * We do not support 64-bit guest on 32-host and detect at configure time. + * Therefore, a host pointer width will always fit a guest pointer. */ -typedef uint64_t vaddr; -#define VADDR_PRId PRId64 -#define VADDR_PRIu PRIu64 -#define VADDR_PRIo PRIo64 -#define VADDR_PRIx PRIx64 -#define VADDR_PRIX PRIX64 -#define VADDR_MAX UINT64_MAX +typedef uintptr_t vaddr; +#define VADDR_PRId PRIdPTR +#define VADDR_PRIu PRIuPTR +#define VADDR_PRIo PRIoPTR +#define VADDR_PRIx PRIxPTR +#define VADDR_PRIX PRIXPTR +#define VADDR_MAX UINTPTR_MAX #endif diff --git a/include/exec/watchpoint.h b/include/exec/watchpoint.h new file mode 100644 index 00000000000..4b6668826c7 --- /dev/null +++ b/include/exec/watchpoint.h @@ -0,0 +1,41 @@ +/* + * CPU watchpoints + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#ifndef EXEC_WATCHPOINT_H +#define EXEC_WATCHPOINT_H + +#if defined(CONFIG_USER_ONLY) +static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + return -ENOSYS; +} + +static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags) +{ + return -ENOSYS; +} + +static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, + CPUWatchpoint *wp) +{ +} + +static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +{ +} +#else +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint); +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags); +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); +void cpu_watchpoint_remove_all(CPUState *cpu, int mask); +#endif + +#endif /* EXEC_WATCHPOINT_H */ diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h index 94cbe073ec5..90862f5cd22 100644 --- a/include/fpu/softfloat-helpers.h +++ b/include/fpu/softfloat-helpers.h @@ -75,6 +75,36 @@ static inline void set_floatx80_rounding_precision(FloatX80RoundPrec val, status->floatx80_rounding_precision = val; } +static inline void set_floatx80_behaviour(FloatX80Behaviour b, + float_status *status) +{ + status->floatx80_behaviour = b; +} + +static inline void set_float_2nan_prop_rule(Float2NaNPropRule rule, + float_status *status) +{ + status->float_2nan_prop_rule = rule; +} + +static inline void set_float_3nan_prop_rule(Float3NaNPropRule rule, + float_status *status) +{ + status->float_3nan_prop_rule = rule; +} + +static inline void set_float_infzeronan_rule(FloatInfZeroNaNRule rule, + float_status *status) +{ + status->float_infzeronan_rule = rule; +} + +static inline void set_float_default_nan_pattern(uint8_t dnan_pattern, + float_status *status) +{ + status->default_nan_pattern = dnan_pattern; +} + static inline void set_flush_to_zero(bool val, float_status *status) { status->flush_to_zero = val; @@ -85,6 +115,12 @@ static inline void set_flush_inputs_to_zero(bool val, float_status *status) status->flush_inputs_to_zero = val; } +static inline void set_float_ftz_detection(FloatFTZDetection d, + float_status *status) +{ + status->ftz_detection = d; +} + static inline void set_default_nan_mode(bool val, float_status *status) { status->default_nan_mode = val; @@ -95,50 +131,79 @@ static inline void set_snan_bit_is_one(bool val, float_status *status) status->snan_bit_is_one = val; } -static inline void set_use_first_nan(bool val, float_status *status) -{ - status->use_first_nan = val; -} - static inline void set_no_signaling_nans(bool val, float_status *status) { status->no_signaling_nans = val; } -static inline bool get_float_detect_tininess(float_status *status) +static inline bool get_float_detect_tininess(const float_status *status) { return status->tininess_before_rounding; } -static inline FloatRoundMode get_float_rounding_mode(float_status *status) +static inline FloatRoundMode get_float_rounding_mode(const float_status *status) { return status->float_rounding_mode; } -static inline int get_float_exception_flags(float_status *status) +static inline int get_float_exception_flags(const float_status *status) { return status->float_exception_flags; } static inline FloatX80RoundPrec -get_floatx80_rounding_precision(float_status *status) +get_floatx80_rounding_precision(const float_status *status) { return status->floatx80_rounding_precision; } -static inline bool get_flush_to_zero(float_status *status) +static inline FloatX80Behaviour +get_floatx80_behaviour(const float_status *status) +{ + return status->floatx80_behaviour; +} + +static inline Float2NaNPropRule +get_float_2nan_prop_rule(const float_status *status) +{ + return status->float_2nan_prop_rule; +} + +static inline Float3NaNPropRule +get_float_3nan_prop_rule(const float_status *status) +{ + return status->float_3nan_prop_rule; +} + +static inline FloatInfZeroNaNRule +get_float_infzeronan_rule(const float_status *status) +{ + return status->float_infzeronan_rule; +} + +static inline uint8_t get_float_default_nan_pattern(const float_status *status) +{ + return status->default_nan_pattern; +} + +static inline bool get_flush_to_zero(const float_status *status) { return status->flush_to_zero; } -static inline bool get_flush_inputs_to_zero(float_status *status) +static inline bool get_flush_inputs_to_zero(const float_status *status) { return status->flush_inputs_to_zero; } -static inline bool get_default_nan_mode(float_status *status) +static inline bool get_default_nan_mode(const float_status *status) { return status->default_nan_mode; } +static inline FloatFTZDetection get_float_ftz_detection(const float_status *status) +{ + return status->ftz_detection; +} + #endif /* SOFTFLOAT_HELPERS_H */ diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index 0884ec4ef7a..1af2a0cb14b 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -80,6 +80,8 @@ this code that are retained. #ifndef SOFTFLOAT_TYPES_H #define SOFTFLOAT_TYPES_H +#include "hw/registerfields.h" + /* * Software IEC/IEEE floating-point types. */ @@ -138,6 +140,8 @@ typedef enum __attribute__((__packed__)) { float_round_to_odd = 5, /* Not an IEEE rounding mode: round to closest odd, overflow to inf */ float_round_to_odd_inf = 6, + /* Not an IEEE rounding mode: round to nearest even, overflow to max */ + float_round_nearest_even_max = 7, } FloatRoundMode; /* @@ -150,8 +154,10 @@ enum { float_flag_overflow = 0x0004, float_flag_underflow = 0x0008, float_flag_inexact = 0x0010, - float_flag_input_denormal = 0x0020, - float_flag_output_denormal = 0x0040, + /* We flushed an input denormal to 0 (because of flush_inputs_to_zero) */ + float_flag_input_denormal_flushed = 0x0020, + /* We flushed an output denormal to 0 (because of flush_to_zero) */ + float_flag_output_denormal_flushed = 0x0040, float_flag_invalid_isi = 0x0080, /* inf - inf */ float_flag_invalid_imz = 0x0100, /* inf * 0 */ float_flag_invalid_idi = 0x0200, /* inf / inf */ @@ -159,6 +165,13 @@ enum { float_flag_invalid_sqrt = 0x0800, /* sqrt(-x) */ float_flag_invalid_cvti = 0x1000, /* non-nan to integer */ float_flag_invalid_snan = 0x2000, /* any operand was snan */ + /* + * An input was denormal and we used it (without flushing it to zero). + * Not set if we do not actually use the denormal input (e.g. + * because some other input was a NaN, or because the operation + * wasn't actually carried out (divide-by-zero; invalid)) + */ + float_flag_input_denormal_used = 0x4000, }; /* @@ -170,6 +183,193 @@ typedef enum __attribute__((__packed__)) { floatx80_precision_s, } FloatX80RoundPrec; +/* + * 2-input NaN propagation rule. Individual architectures have + * different rules for which input NaN is propagated to the output + * when there is more than one NaN on the input. + * + * If default_nan_mode is enabled then it is valid not to set a + * NaN propagation rule, because the softfloat code guarantees + * not to try to pick a NaN to propagate in default NaN mode. + * When not in default-NaN mode, it is an error for the target + * not to set the rule in float_status, and we will assert if + * we need to handle an input NaN and no rule was selected. + */ +typedef enum __attribute__((__packed__)) { + /* No propagation rule specified */ + float_2nan_prop_none = 0, + /* Prefer SNaN over QNaN, then operand A over B */ + float_2nan_prop_s_ab, + /* Prefer SNaN over QNaN, then operand B over A */ + float_2nan_prop_s_ba, + /* Prefer A over B regardless of SNaN vs QNaN */ + float_2nan_prop_ab, + /* Prefer B over A regardless of SNaN vs QNaN */ + float_2nan_prop_ba, + /* + * This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + float_2nan_prop_x87, +} Float2NaNPropRule; + +/* + * 3-input NaN propagation rule, for fused multiply-add. Individual + * architectures have different rules for which input NaN is + * propagated to the output when there is more than one NaN on the + * input. + * + * If default_nan_mode is enabled then it is valid not to set a NaN + * propagation rule, because the softfloat code guarantees not to try + * to pick a NaN to propagate in default NaN mode. When not in + * default-NaN mode, it is an error for the target not to set the rule + * in float_status if it uses a muladd, and we will assert if we need + * to handle an input NaN and no rule was selected. + * + * The naming scheme for Float3NaNPropRule values is: + * float_3nan_prop_s_abc: + * = "Prefer SNaN over QNaN, then operand A over B over C" + * float_3nan_prop_abc: + * = "Prefer A over B over C regardless of SNaN vs QNAN" + * + * For QEMU, the multiply-add operation is A * B + C. + */ + +/* + * We set the Float3NaNPropRule enum values up so we can select the + * right value in pickNaNMulAdd in a data driven way. + */ +FIELD(3NAN, 1ST, 0, 2) /* which operand is most preferred ? */ +FIELD(3NAN, 2ND, 2, 2) /* which operand is next most preferred ? */ +FIELD(3NAN, 3RD, 4, 2) /* which operand is least preferred ? */ +FIELD(3NAN, SNAN, 6, 1) /* do we prefer SNaN over QNaN ? */ + +#define PROPRULE(X, Y, Z) \ + ((X << R_3NAN_1ST_SHIFT) | (Y << R_3NAN_2ND_SHIFT) | (Z << R_3NAN_3RD_SHIFT)) + +typedef enum __attribute__((__packed__)) { + float_3nan_prop_none = 0, /* No propagation rule specified */ + float_3nan_prop_abc = PROPRULE(0, 1, 2), + float_3nan_prop_acb = PROPRULE(0, 2, 1), + float_3nan_prop_bac = PROPRULE(1, 0, 2), + float_3nan_prop_bca = PROPRULE(1, 2, 0), + float_3nan_prop_cab = PROPRULE(2, 0, 1), + float_3nan_prop_cba = PROPRULE(2, 1, 0), + float_3nan_prop_s_abc = float_3nan_prop_abc | R_3NAN_SNAN_MASK, + float_3nan_prop_s_acb = float_3nan_prop_acb | R_3NAN_SNAN_MASK, + float_3nan_prop_s_bac = float_3nan_prop_bac | R_3NAN_SNAN_MASK, + float_3nan_prop_s_bca = float_3nan_prop_bca | R_3NAN_SNAN_MASK, + float_3nan_prop_s_cab = float_3nan_prop_cab | R_3NAN_SNAN_MASK, + float_3nan_prop_s_cba = float_3nan_prop_cba | R_3NAN_SNAN_MASK, +} Float3NaNPropRule; + +#undef PROPRULE + +/* + * Rule for result of fused multiply-add 0 * Inf + NaN. + * This must be a NaN, but implementations differ on whether this + * is the input NaN or the default NaN. + * + * You don't need to set this if default_nan_mode is enabled. + * When not in default-NaN mode, it is an error for the target + * not to set the rule in float_status if it uses muladd, and we + * will assert if we need to handle an input NaN and no rule was + * selected. + */ +typedef enum __attribute__((__packed__)) { + /* No propagation rule specified */ + float_infzeronan_none = 0, + /* Result is never the default NaN (so always the input NaN) */ + float_infzeronan_dnan_never = 1, + /* Result is always the default NaN */ + float_infzeronan_dnan_always = 2, + /* Result is the default NaN if the input NaN is quiet */ + float_infzeronan_dnan_if_qnan = 3, + /* + * Don't raise Invalid for 0 * Inf + NaN. Default is to raise. + * IEEE 754-2008 section 7.2 makes it implementation defined whether + * 0 * Inf + QNaN raises Invalid or not. Note that 0 * Inf + SNaN will + * raise the Invalid flag for the SNaN anyway. + * + * This is a flag which can be ORed in with any of the above + * DNaN behaviour options. + */ + float_infzeronan_suppress_invalid = (1 << 7), +} FloatInfZeroNaNRule; + +/* + * When flush_to_zero is set, should we detect denormal results to + * be flushed before or after rounding? For most architectures this + * should be set to match the tininess_before_rounding setting, + * but a few architectures, e.g. MIPS MSA, detect FTZ before + * rounding but tininess after rounding. + * + * This enum is arranged so that the default if the target doesn't + * configure it matches the default for tininess_before_rounding + * (i.e. "after rounding"). + */ +typedef enum __attribute__((__packed__)) { + float_ftz_after_rounding = 0, + float_ftz_before_rounding = 1, +} FloatFTZDetection; + +/* + * floatx80 is primarily used by x86 and m68k, and there are + * differences in the handling, largely related to the explicit + * Integer bit which floatx80 has and the other float formats do not. + * These flag values allow specification of the target's requirements + * and can be ORed together to set floatx80_behaviour. + */ +typedef enum __attribute__((__packed__)) { + /* In the default Infinity value, is the Integer bit 0 ? */ + floatx80_default_inf_int_bit_is_zero = 1, + /* + * Are Pseudo-infinities (Inf with the Integer bit zero) valid? + * If so, floatx80_is_infinity() will return true for them. + * If not, floatx80_invalid_encoding will return false for them, + * and using them as inputs to a float op will raise Invalid. + */ + floatx80_pseudo_inf_valid = 2, + /* + * Are Pseudo-NaNs (NaNs where the Integer bit is zero) valid? + * If not, floatx80_invalid_encoding() will return false for them, + * and using them as inputs to a float op will raise Invalid. + */ + floatx80_pseudo_nan_valid = 4, + /* + * Are Unnormals (0 < exp < 0x7fff, Integer bit zero) valid? + * If not, floatx80_invalid_encoding() will return false for them, + * and using them as inputs to a float op will raise Invalid. + */ + floatx80_unnormal_valid = 8, + + /* + * If the exponent is 0 and the Integer bit is set, Intel call + * this a "pseudo-denormal"; x86 supports that only on input + * (treating them as denormals by ignoring the Integer bit). + * For m68k, the integer bit is considered validly part of the + * input value when the exponent is 0, and may be 0 or 1, + * giving extra range. They may also be generated as outputs. + * (The m68k manual actually calls these values part of the + * normalized number range, not the denormalized number range.) + * + * By default you get the Intel behaviour where the Integer + * bit is ignored; if this is set then the Integer bit value + * is honoured, m68k-style. + * + * Either way, floatx80_invalid_encoding() will always accept + * pseudo-denormals. + */ + floatx80_pseudo_denormal_valid = 16, +} FloatX80Behaviour; + /* * Floating Point Status. Individual architectures may maintain * several versions of float_status for different functions. The @@ -181,19 +381,34 @@ typedef struct float_status { uint16_t float_exception_flags; FloatRoundMode float_rounding_mode; FloatX80RoundPrec floatx80_rounding_precision; + FloatX80Behaviour floatx80_behaviour; + Float2NaNPropRule float_2nan_prop_rule; + Float3NaNPropRule float_3nan_prop_rule; + FloatInfZeroNaNRule float_infzeronan_rule; bool tininess_before_rounding; - /* should denormalised results go to zero and set the inexact flag? */ + /* should denormalised results go to zero and set output_denormal_flushed? */ bool flush_to_zero; - /* should denormalised inputs go to zero and set the input_denormal flag? */ + /* do we detect and flush denormal results before or after rounding? */ + FloatFTZDetection ftz_detection; + /* should denormalised inputs go to zero and set input_denormal_flushed? */ bool flush_inputs_to_zero; bool default_nan_mode; + /* + * The pattern to use for the default NaN. Here the high bit specifies + * the default NaN's sign bit, and bits 6..0 specify the high bits of the + * fractional part. The low bits of the fractional part are copies of bit 0. + * The exponent of the default NaN is (as for any NaN) always all 1s. + * Note that a value of 0 here is not a valid NaN. The target must set + * this to the correct non-zero value, or we will assert when trying to + * create a default NaN. + */ + uint8_t default_nan_pattern; /* * The flags below are not used on all specializations and may * constant fold away (see snan_bit_is_one()/no_signalling_nans() in * softfloat-specialize.inc.c) */ bool snan_bit_is_one; - bool use_first_nan; bool no_signaling_nans; /* should overflowed results subtract re_bias to its exponent? */ bool rebias_overflow; diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index eb64075b9cb..c18ab2cb609 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -120,14 +120,16 @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status); | Using these differs from negating an input or output before calling | the muladd function in that this means that a NaN doesn't have its | sign bit inverted before it is propagated. -| We also support halving the result before rounding, as a special -| case to support the ARM fused-sqrt-step instruction FRSQRTS. +| +| With float_muladd_suppress_add_product_zero, if A or B is zero +| such that the product is a true zero, then return C without addition. +| This preserves the sign of C when C is +/- 0. Used for Hexagon. *----------------------------------------------------------------------------*/ enum { float_muladd_negate_c = 1, float_muladd_negate_product = 2, float_muladd_negate_result = 4, - float_muladd_halve_result = 8, + float_muladd_suppress_add_product_zero = 8, }; /*---------------------------------------------------------------------------- @@ -238,6 +240,8 @@ float16 float16_add(float16, float16, float_status *status); float16 float16_sub(float16, float16, float_status *status); float16 float16_mul(float16, float16, float_status *status); float16 float16_muladd(float16, float16, float16, int, float_status *status); +float16 float16_muladd_scalbn(float16, float16, float16, + int, int, float_status *status); float16 float16_div(float16, float16, float_status *status); float16 float16_scalbn(float16, int, float_status *status); float16 float16_min(float16, float16, float_status *status); @@ -597,6 +601,8 @@ float32 float32_mul(float32, float32, float_status *status); float32 float32_div(float32, float32, float_status *status); float32 float32_rem(float32, float32, float_status *status); float32 float32_muladd(float32, float32, float32, int, float_status *status); +float32 float32_muladd_scalbn(float32, float32, float32, + int, int, float_status *status); float32 float32_sqrt(float32, float_status *status); float32 float32_exp2(float32, float_status *status); float32 float32_log2(float32, float_status *status); @@ -792,6 +798,8 @@ float64 float64_mul(float64, float64, float_status *status); float64 float64_div(float64, float64, float_status *status); float64 float64_rem(float64, float64, float_status *status); float64 float64_muladd(float64, float64, float64, int, float_status *status); +float64 float64_muladd_scalbn(float64, float64, float64, + int, int, float_status *status); float64 float64_sqrt(float64, float_status *status); float64 float64_log2(float64, float_status *status); FloatRelation float64_compare(float64, float64, float_status *status); @@ -952,7 +960,7 @@ float128 floatx80_to_float128(floatx80, float_status *status); /*---------------------------------------------------------------------------- | The pattern for an extended double-precision inf. *----------------------------------------------------------------------------*/ -extern const floatx80 floatx80_infinity; +floatx80 floatx80_default_inf(bool zSign, float_status *status); /*---------------------------------------------------------------------------- | Software IEC/IEEE extended double-precision operations. @@ -987,14 +995,19 @@ static inline floatx80 floatx80_chs(floatx80 a) return a; } -static inline bool floatx80_is_infinity(floatx80 a) +static inline bool floatx80_is_infinity(floatx80 a, float_status *status) { -#if defined(TARGET_M68K) - return (a.high & 0x7fff) == floatx80_infinity.high && !(a.low << 1); -#else - return (a.high & 0x7fff) == floatx80_infinity.high && - a.low == floatx80_infinity.low; -#endif + /* + * It's target-specific whether the Integer bit is permitted + * to be 0 in a valid Infinity value. (x86 says no, m68k says yes). + */ + bool intbit = a.low >> 63; + + if (!intbit && + !(status->floatx80_behaviour & floatx80_pseudo_inf_valid)) { + return false; + } + return (a.high & 0x7fff) == 0x7fff && !(a.low << 1); } static inline bool floatx80_is_neg(floatx80 a) @@ -1060,41 +1073,45 @@ static inline bool floatx80_unordered_quiet(floatx80 a, floatx80 b, /*---------------------------------------------------------------------------- | Return whether the given value is an invalid floatx80 encoding. -| Invalid floatx80 encodings arise when the integer bit is not set, but -| the exponent is not zero. The only times the integer bit is permitted to -| be zero is in subnormal numbers and the value zero. -| This includes what the Intel software developer's manual calls pseudo-NaNs, -| pseudo-infinities and un-normal numbers. It does not include -| pseudo-denormals, which must still be correctly handled as inputs even -| if they are never generated as outputs. +| Invalid floatx80 encodings may arise when the integer bit is not set +| correctly; this is target-specific. In Intel terminology the +| categories are: +| exp == 0, int = 0, mantissa == 0 : zeroes +| exp == 0, int = 0, mantissa != 0 : denormals +| exp == 0, int = 1 : pseudo-denormals +| 0 < exp < 0x7fff, int = 0 : unnormals +| 0 < exp < 0x7fff, int = 1 : normals +| exp == 0x7fff, int = 0, mantissa == 0 : pseudo-infinities +| exp == 0x7fff, int = 1, mantissa == 0 : infinities +| exp == 0x7fff, int = 0, mantissa != 0 : pseudo-NaNs +| exp == 0x7fff, int = 1, mantissa == 0 : NaNs +| +| The usual IEEE cases of zero, denormal, normal, inf and NaN are always valid. +| x87 permits as input also pseudo-denormals. +| m68k permits all those and also pseudo-infinities, pseudo-NaNs and unnormals. +| +| Since we don't have a target that handles floatx80 but prohibits +| pseudo-denormals in input, we don't currently have a floatx80_behaviour +| flag for that case, but instead always accept it. Conveniently this +| means that all cases with either exponent 0 or the integer bit set are +| valid for all targets. *----------------------------------------------------------------------------*/ -static inline bool floatx80_invalid_encoding(floatx80 a) -{ -#if defined(TARGET_M68K) - /*------------------------------------------------------------------------- - | With m68k, the explicit integer bit can be zero in the case of: - | - zeros (exp == 0, mantissa == 0) - | - denormalized numbers (exp == 0, mantissa != 0) - | - unnormalized numbers (exp != 0, exp < 0x7FFF) - | - infinities (exp == 0x7FFF, mantissa == 0) - | - not-a-numbers (exp == 0x7FFF, mantissa != 0) - | - | For infinities and NaNs, the explicit integer bit can be either one or - | zero. - | - | The IEEE 754 standard does not define a zero integer bit. Such a number - | is an unnormalized number. Hardware does not directly support - | denormalized and unnormalized numbers, but implicitly supports them by - | trapping them as unimplemented data types, allowing efficient conversion - | in software. - | - | See "M68000 FAMILY PROGRAMMER’S REFERENCE MANUAL", - | "1.6 FLOATING-POINT DATA TYPES" - *------------------------------------------------------------------------*/ - return false; -#else - return (a.low & (1ULL << 63)) == 0 && (a.high & 0x7FFF) != 0; -#endif +static inline bool floatx80_invalid_encoding(floatx80 a, float_status *s) +{ + if ((a.low >> 63) || (a.high & 0x7fff) == 0) { + /* Anything with the Integer bit set or the exponent 0 is valid */ + return false; + } + + if ((a.high & 0x7fff) == 0x7fff) { + if (a.low) { + return !(s->floatx80_behaviour & floatx80_pseudo_nan_valid); + } else { + return !(s->floatx80_behaviour & floatx80_pseudo_inf_valid); + } + } else { + return !(s->floatx80_behaviour & floatx80_unnormal_valid); + } } #define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) diff --git a/include/gdbstub/commands.h b/include/gdbstub/commands.h index 40f0514fe9f..bff3674872e 100644 --- a/include/gdbstub/commands.h +++ b/include/gdbstub/commands.h @@ -1,5 +1,5 @@ #ifndef GDBSTUB_COMMANDS_H -#define GDBSTUB +#define GDBSTUB_COMMANDS_H typedef void (*GdbCmdHandler)(GArray *params, void *user_ctx); diff --git a/include/gdbstub/helpers.h b/include/gdbstub/helpers.h index 26140ef1ac0..b685afac436 100644 --- a/include/gdbstub/helpers.h +++ b/include/gdbstub/helpers.h @@ -16,7 +16,8 @@ #error "gdbstub helpers should only be included by target specific code" #endif -#include "exec/tswap.h" +#include "qemu/bswap.h" +#include "qemu/target-info.h" #include "cpu-param.h" /* @@ -33,40 +34,49 @@ static inline int gdb_get_reg8(GByteArray *buf, uint8_t val) static inline int gdb_get_reg16(GByteArray *buf, uint16_t val) { - uint16_t to_word = tswap16(val); - g_byte_array_append(buf, (uint8_t *) &to_word, 2); + if (target_big_endian()) { + cpu_to_be16s(&val); + } else { + cpu_to_le16s(&val); + } + g_byte_array_append(buf, (uint8_t *) &val, 2); return 2; } static inline int gdb_get_reg32(GByteArray *buf, uint32_t val) { - uint32_t to_long = tswap32(val); - g_byte_array_append(buf, (uint8_t *) &to_long, 4); + if (target_big_endian()) { + cpu_to_be32s(&val); + } else { + cpu_to_le32s(&val); + } + g_byte_array_append(buf, (uint8_t *) &val, 4); return 4; } static inline int gdb_get_reg64(GByteArray *buf, uint64_t val) { - uint64_t to_quad = tswap64(val); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); + if (target_big_endian()) { + cpu_to_be64s(&val); + } else { + cpu_to_le64s(&val); + } + g_byte_array_append(buf, (uint8_t *) &val, 8); return 8; } static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi, uint64_t val_lo) { - uint64_t to_quad; -#if TARGET_BIG_ENDIAN - to_quad = tswap64(val_hi); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); - to_quad = tswap64(val_lo); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); -#else - to_quad = tswap64(val_lo); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); - to_quad = tswap64(val_hi); - g_byte_array_append(buf, (uint8_t *) &to_quad, 8); -#endif + uint64_t tmp[2]; + if (target_big_endian()) { + tmp[0] = cpu_to_be64(val_hi); + tmp[1] = cpu_to_be64(val_lo); + } else { + tmp[0] = cpu_to_le64(val_lo); + tmp[1] = cpu_to_le64(val_hi); + } + g_byte_array_append(buf, (uint8_t *)&tmp, 16); return 16; } @@ -95,9 +105,13 @@ static inline uint8_t *gdb_get_reg_ptr(GByteArray *buf, int len) #if TARGET_LONG_BITS == 64 #define gdb_get_regl(buf, val) gdb_get_reg64(buf, val) #define ldtul_p(addr) ldq_p(addr) +#define ldtul_le_p(addr) ldq_le_p(addr) +#define ldtul_be_p(addr) ldq_be_p(addr) #else #define gdb_get_regl(buf, val) gdb_get_reg32(buf, val) #define ldtul_p(addr) ldl_p(addr) +#define ldtul_le_p(addr) ldl_le_p(addr) +#define ldtul_be_p(addr) ldl_be_p(addr) #endif #endif /* _GDBSTUB_HELPERS_H_ */ diff --git a/include/gdbstub/syscalls.h b/include/gdbstub/syscalls.h index 54ff7245a11..d63228e96b4 100644 --- a/include/gdbstub/syscalls.h +++ b/include/gdbstub/syscalls.h @@ -3,7 +3,7 @@ * * Copyright (c) 2023 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef _SYSCALLS_H_ diff --git a/include/gdbstub/user.h b/include/gdbstub/user.h index 3b8358e3dab..654986d483b 100644 --- a/include/gdbstub/user.h +++ b/include/gdbstub/user.h @@ -3,7 +3,7 @@ * * Copyright (c) 2022 Linaro Ltd * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef GDBSTUB_USER_H diff --git a/include/glib-compat.h b/include/glib-compat.h index 86be439ba0e..2e32b90f051 100644 --- a/include/glib-compat.h +++ b/include/glib-compat.h @@ -36,6 +36,13 @@ #include #endif +/* + * These functions perform function pointer casts which can cause function call + * failure on Emscripten. Use g_slist_sort_with_data and g_list_sort_with_data + * instead of these functions. + */ +#pragma GCC poison g_slist_sort g_list_sort + /* * Note that because of the GLIB_VERSION_MAX_ALLOWED constant above, allowing * use of functions from newer GLib via this compat header needs a little diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index 0e6e82b339f..2e6e3419987 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -112,7 +112,6 @@ typedef struct AcpiSpcrData { uint8_t flow_control; uint8_t terminal_type; uint8_t language; - uint8_t reserved1; uint16_t pci_device_id; /* Must be 0xffff if not PCI device */ uint16_t pci_vendor_id; /* Must be 0xffff if not PCI device */ uint8_t pci_bus; @@ -120,7 +119,11 @@ typedef struct AcpiSpcrData { uint8_t pci_function; uint32_t pci_flags; uint8_t pci_segment; - uint32_t reserved2; + uint32_t uart_clk_freq; + uint32_t precise_baudrate; + uint32_t namespace_string_length; + uint32_t namespace_string_offset; + char namespace_string[]; } AcpiSpcrData; #define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h index e0e51e85b41..4b8ee094c41 100644 --- a/include/hw/acpi/acpi.h +++ b/include/hw/acpi/acpi.h @@ -21,7 +21,7 @@ */ #include "qemu/notify.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/acpi/acpi_dev_interface.h" /* @@ -150,6 +150,9 @@ struct ACPIREGS { Notifier wakeup; }; +/* Return whether ACPI subsystem is built in */ +bool acpi_builtin(void); + /* PM_TMR */ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable); void acpi_pm_tmr_calc_overflow_time(ACPIREGS *ar); diff --git a/include/hw/acpi/acpi_egm_memory.h b/include/hw/acpi/acpi_egm_memory.h new file mode 100644 index 00000000000..113851bebac --- /dev/null +++ b/include/hw/acpi/acpi_egm_memory.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#ifndef ACPI_EGM_MEMORY_H +#define ACPI_EGM_MEMORY_H + +#include "qom/object_interfaces.h" + +#define TYPE_ACPI_EGM_MEMORY "acpi-egm-memory" + +typedef struct AcpiEgmMemory { + /* private */ + Object parent; + + /* public */ + char *pci_dev; + uint16_t node; +} AcpiEgmMemory; + +void build_acpi_egm_memory_dsdt(Aml *dev, int bus); + +#endif diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h deleted file mode 100644 index a304bad73e0..00000000000 --- a/include/hw/acpi/acpi_generic_initiator.h +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved - */ - -#ifndef ACPI_GENERIC_INITIATOR_H -#define ACPI_GENERIC_INITIATOR_H - -#include "qom/object_interfaces.h" - -#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator" - -typedef struct AcpiGenericInitiator { - /* private */ - Object parent; - - /* public */ - char *pci_dev; - uint16_t node; -} AcpiGenericInitiator; - -/* - * ACPI 6.3: - * Table 5-81 Flags – Generic Initiator Affinity Structure - */ -typedef enum { - /* - * If clear, the OSPM ignores the contents of the Generic - * Initiator/Port Affinity Structure. This allows system firmware - * to populate the SRAT with a static number of structures, but only - * enable them as necessary. - */ - GEN_AFFINITY_ENABLED = (1 << 0), -} GenericAffinityFlags; - -/* - * ACPI 6.3: - * Table 5-80 Device Handle - PCI - */ -typedef struct PCIDeviceHandle { - uint16_t segment; - uint16_t bdf; -} PCIDeviceHandle; - -void build_srat_generic_pci_initiator(GArray *table_data); - -#endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index a3784155cb3..ad1fa07f0f1 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -218,6 +218,7 @@ struct AcpiBuildTables { GArray *tcpalog; GArray *vmgenid; GArray *hardware_errors; + GArray *smmuv3_devs; BIOSLinker *linker; } AcpiBuildTables; @@ -486,6 +487,13 @@ Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset, void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags); +void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node, + uint16_t segment, uint8_t bus, + uint8_t devfn); + +void build_srat_acpi_generic_port(GArray *table_data, uint32_t node, + const char *hid, uint32_t uid); + void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); @@ -500,5 +508,5 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, void build_spcr(GArray *table_data, BIOSLinker *linker, const AcpiSpcrData *f, const uint8_t rev, - const char *oem_id, const char *oem_table_id); + const char *oem_id, const char *oem_table_id, const char *name); #endif diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h index 40af3550b56..2c5b055327a 100644 --- a/include/hw/acpi/generic_event_device.h +++ b/include/hw/acpi/generic_event_device.h @@ -63,12 +63,13 @@ #include "hw/acpi/memory_hotplug.h" #include "hw/acpi/ghes.h" #include "hw/acpi/cpu.h" +#include "hw/acpi/pcihp.h" #include "qom/object.h" #define ACPI_POWER_BUTTON_DEVICE "PWRB" #define TYPE_ACPI_GED "acpi-ged" -OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) +OBJECT_DECLARE_TYPE(AcpiGedState, AcpiGedClass, ACPI_GED) #define ACPI_GED_EVT_SEL_OFFSET 0x0 #define ACPI_GED_EVT_SEL_LEN 0x4 @@ -81,8 +82,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) /* ACPI_GED_REG_RESET value for reset*/ #define ACPI_GED_RESET_VALUE 0x42 -/* ACPI_GED_REG_SLEEP_CTL.SLP_TYP value for S5 (aka poweroff) */ -#define ACPI_GED_SLP_TYP_S5 0x05 +/* [ACPI 5.0 Chapter 4.8.3.7] Sleep Control and Status Register */ +#define ACPI_GED_SLP_TYP_POS 0x2 /* SLP_TYPx Bit Offset */ +#define ACPI_GED_SLP_TYP_MASK 0x07 /* SLP_TYPx 3-bit mask */ +#define ACPI_GED_SLP_TYP_S5 0x05 /* System _S5 State (Soft Off) */ +#define ACPI_GED_SLP_EN 0x20 /* SLP_EN write-only bit */ #define GED_DEVICE "GED" #define AML_GED_EVT_REG "EREG" @@ -98,6 +102,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) #define ACPI_GED_PWR_DOWN_EVT 0x2 #define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4 #define ACPI_GED_CPU_HOTPLUG_EVT 0x8 +#define ACPI_GED_PCI_HOTPLUG_EVT 0x10 typedef struct GEDState { MemoryRegion evt; @@ -105,18 +110,31 @@ typedef struct GEDState { uint32_t sel; } GEDState; +#define ACPI_PCIHP_REGION_NAME "pcihp container" +#define ACPI_MEMHP_REGION_NAME "memhp container" + struct AcpiGedState { SysBusDevice parent_obj; MemHotplugState memhp_state; MemoryRegion container_memhp; CPUHotplugState cpuhp_state; MemoryRegion container_cpuhp; + AcpiPciHpState pcihp_state; + MemoryRegion container_pcihp; GEDState ged_state; uint32_t ged_event_bitmap; qemu_irq irq; AcpiGhesState ghes_state; }; +typedef struct AcpiGedClass { + /* */ + SysBusDeviceClass parent_class; + + /*< public >*/ + ResettablePhases parent_phases; +} AcpiGedClass; + void build_ged_aml(Aml *table, const char* name, HotplugHandler *hotplug_dev, uint32_t ged_irq, AmlRegionSpace rs, hwaddr ged_base); void acpi_dsdt_add_power_button(Aml *scope); diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h index 674f6958e90..578a582203c 100644 --- a/include/hw/acpi/ghes.h +++ b/include/hw/acpi/ghes.h @@ -23,6 +23,7 @@ #define ACPI_GHES_H #include "hw/acpi/bios-linker-loader.h" +#include "qapi/error.h" /* * Values for Hardware Error Notification Type field @@ -59,26 +60,27 @@ enum AcpiGhesNotifyType { enum { ACPI_HEST_SRC_ID_SEA = 0, /* future ids go here */ - ACPI_HEST_SRC_ID_RESERVED, + + ACPI_GHES_ERROR_SOURCE_COUNT }; typedef struct AcpiGhesState { - uint64_t ghes_addr_le; + uint64_t hw_error_le; bool present; /* True if GHES is present at all on this board */ } AcpiGhesState; -void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker); -void acpi_build_hest(GArray *table_data, BIOSLinker *linker, +void acpi_build_hest(GArray *table_data, GArray *hardware_errors, + BIOSLinker *linker, const char *oem_id, const char *oem_table_id); void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s, GArray *hardware_errors); -int acpi_ghes_record_errors(uint8_t notify, uint64_t error_physical_addr); +int acpi_ghes_memory_errors(uint16_t source_id, uint64_t error_physical_addr); /** * acpi_ghes_present: Report whether ACPI GHES table is present * * Returns: true if the system has an ACPI GHES table and it is - * safe to call acpi_ghes_record_errors() to record a memory error. + * safe to call acpi_ghes_memory_errors() to record a memory error. */ bool acpi_ghes_present(void); #endif diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h index 2faf7f0caeb..245fe08dc24 100644 --- a/include/hw/acpi/ich9.h +++ b/include/hw/acpi/ich9.h @@ -46,6 +46,7 @@ typedef struct ICH9LPCPMRegs { uint32_t smi_en; uint32_t smi_en_wmask; uint32_t smi_sts; + uint32_t smi_sts_wmask; qemu_irq irq; /* SCI */ @@ -68,6 +69,11 @@ typedef struct ICH9LPCPMRegs { bool smm_compat; bool enable_tco; TCOIORegs tco_regs; + + bool swsmi_timer_enabled; + bool periodic_timer_enabled; + QEMUTimer *swsmi_timer; + QEMUTimer *periodic_timer; } ICH9LPCPMRegs; #define ACPI_PM_PROP_TCO_ENABLED "enable_tco" diff --git a/include/hw/acpi/ich9_tco.h b/include/hw/acpi/ich9_tco.h index 2562a7cf39b..b3c3f69451e 100644 --- a/include/hw/acpi/ich9_tco.h +++ b/include/hw/acpi/ich9_tco.h @@ -10,7 +10,7 @@ #ifndef HW_ACPI_TCO_H #define HW_ACPI_TCO_H -#include "exec/memory.h" +#include "system/memory.h" #include "migration/vmstate.h" /* As per ICH9 spec, the internal timer has an error of ~0.6s on every tick */ diff --git a/include/hw/acpi/ich9_timer.h b/include/hw/acpi/ich9_timer.h new file mode 100644 index 00000000000..5112df43850 --- /dev/null +++ b/include/hw/acpi/ich9_timer.h @@ -0,0 +1,23 @@ +/* + * QEMU ICH9 Timer emulation + * + * Copyright (c) 2024 Dominic Prinz + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_ACPI_ICH9_TIMER_H +#define HW_ACPI_ICH9_TIMER_H + +#include "hw/acpi/ich9.h" + +void ich9_pm_update_swsmi_timer(ICH9LPCPMRegs *pm, bool enable); + +void ich9_pm_swsmi_timer_init(ICH9LPCPMRegs *pm); + +void ich9_pm_update_periodic_timer(ICH9LPCPMRegs *pm, bool enable); + +void ich9_pm_periodic_timer_init(ICH9LPCPMRegs *pm); + +#endif diff --git a/include/hw/acpi/pci.h b/include/hw/acpi/pci.h index 467a99461cd..20b672575fc 100644 --- a/include/hw/acpi/pci.h +++ b/include/hw/acpi/pci.h @@ -36,8 +36,12 @@ typedef struct AcpiMcfgInfo { void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, const char *oem_id, const char *oem_table_id); -Aml *aml_pci_device_dsm(void); -void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus); void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope); + +void build_srat_generic_affinity_structures(GArray *table_data); + +Aml *build_pci_host_bridge_osc_method(bool enable_native_pcie_hotplug); +Aml *build_pci_bridge_edsm(void); + #endif diff --git a/include/hw/acpi/pcihp.h b/include/hw/acpi/pcihp.h index ac21a95913c..ca6a2588258 100644 --- a/include/hw/acpi/pcihp.h +++ b/include/hw/acpi/pcihp.h @@ -3,7 +3,7 @@ * * QEMU supports PCI hotplug via ACPI. This module * implements the interface between QEMU and the ACPI BIOS. - * Interface specification - see docs/specs/acpi_pci_hotplug.txt + * Interface specification - see docs/specs/acpi_pci_hotplug.rst * * Copyright (c) 2013, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com) * Copyright (c) 2006 Fabrice Bellard @@ -28,11 +28,18 @@ #define HW_ACPI_PCIHP_H #include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" #include "hw/hotplug.h" #define ACPI_PCIHP_IO_BASE_PROP "acpi-pcihp-io-base" #define ACPI_PCIHP_IO_LEN_PROP "acpi-pcihp-io-len" +/* PCI Hot-plug registers bases. See docs/specs/acpi_pci_hotplug.rst */ +#define ACPI_PCIHP_SEJ_BASE 0x8 +#define ACPI_PCIHP_BNMR_BASE 0x10 + +#define ACPI_PCIHP_SIZE 0x0018 + typedef struct AcpiPciHpPciStatus { uint32_t up; uint32_t down; @@ -55,10 +62,10 @@ typedef struct AcpiPciHpState { bool use_acpi_root_pci_hotplug; } AcpiPciHpState; -void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root, +void acpi_pcihp_init(Object *owner, AcpiPciHpState *, MemoryRegion *io, uint16_t io_base); -bool acpi_pcihp_is_hotpluggbale_bus(AcpiPciHpState *s, BusState *bus); +bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus); void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, @@ -69,6 +76,14 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, DeviceState *dev, Error **errp); +void build_acpi_pci_hotplug(Aml *table, AmlRegionSpace rs, uint64_t pcihp_addr); +void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar); +void build_append_pcihp_resources(Aml *table, + uint64_t io_addr, uint64_t io_len); +bool build_append_notification_callback(Aml *parent_scope, const PCIBus *bus); + +void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus); + /* Called on reset */ void acpi_pcihp_reset(AcpiPciHpState *s); diff --git a/include/hw/acpi/tpm.h b/include/hw/acpi/tpm.h index 579c45f5baf..9d0fe6f2f96 100644 --- a/include/hw/acpi/tpm.h +++ b/include/hw/acpi/tpm.h @@ -19,7 +19,7 @@ #include "qemu/units.h" #include "hw/registerfields.h" #include "hw/acpi/aml-build.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #ifdef CONFIG_TPM diff --git a/include/hw/acpi/vmclock.h b/include/hw/acpi/vmclock.h new file mode 100644 index 00000000000..5605605812c --- /dev/null +++ b/include/hw/acpi/vmclock.h @@ -0,0 +1,34 @@ +#ifndef ACPI_VMCLOCK_H +#define ACPI_VMCLOCK_H + +#include "hw/acpi/bios-linker-loader.h" +#include "hw/qdev-core.h" +#include "qemu/uuid.h" +#include "qom/object.h" + +#define TYPE_VMCLOCK "vmclock" + +#define VMCLOCK_ADDR 0xfeffb000 +#define VMCLOCK_SIZE 0x1000 + +OBJECT_DECLARE_SIMPLE_TYPE(VmclockState, VMCLOCK) + +struct vmclock_abi; + +struct VmclockState { + DeviceState parent_obj; + MemoryRegion clk_page; + uint64_t physaddr; + struct vmclock_abi *clk; +}; + +/* returns NULL unless there is exactly one device */ +static inline Object *find_vmclock_dev(void) +{ + return object_resolve_path_type("", TYPE_VMCLOCK, NULL); +} + +void vmclock_build_acpi(VmclockState *vms, GArray *table_data, + BIOSLinker *linker, const char *oem_id); + +#endif diff --git a/include/hw/adc/max111x.h b/include/hw/adc/max111x.h deleted file mode 100644 index beff59c815d..00000000000 --- a/include/hw/adc/max111x.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Maxim MAX1110/1111 ADC chip emulation. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPLv2. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#ifndef HW_MISC_MAX111X_H -#define HW_MISC_MAX111X_H - -#include "hw/ssi/ssi.h" -#include "qom/object.h" - -/* - * This is a model of the Maxim MAX1110/1111 ADC chip, which for QEMU - * is an SSI slave device. It has either 4 (max1110) or 8 (max1111) - * 8-bit ADC channels. - * - * QEMU interface: - * + GPIO inputs 0..3 (for max1110) or 0..7 (for max1111): set the value - * of each ADC input, as an unsigned 8-bit value - * + GPIO output 0: interrupt line - * + Properties "input0" to "input3" (max1110) or "input0" to "input7" - * (max1111): initial reset values for ADC inputs. - * - * Known bugs: - * + the interrupt line is not correctly implemented, and will never - * be lowered once it has been asserted. - */ -struct MAX111xState { - SSIPeripheral parent_obj; - - qemu_irq interrupt; - /* Values of inputs at system reset (settable by QOM property) */ - uint8_t reset_input[8]; - - uint8_t tb1, rb2, rb3; - int cycle; - - uint8_t input[8]; - int inputs, com; -}; - -#define TYPE_MAX_111X "max111x" - -OBJECT_DECLARE_SIMPLE_TYPE(MAX111xState, MAX_111X) - -#define TYPE_MAX_1110 "max1110" -#define TYPE_MAX_1111 "max1111" - -#endif diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h index 67a9a17b862..445ba1be212 100644 --- a/include/hw/arm/allwinner-a10.h +++ b/include/hw/arm/allwinner-a10.h @@ -12,8 +12,9 @@ #include "hw/misc/allwinner-a10-ccm.h" #include "hw/misc/allwinner-a10-dramc.h" #include "hw/i2c/allwinner-i2c.h" +#include "hw/ssi/allwinner-a10-spi.h" #include "hw/watchdog/allwinner-wdt.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "target/arm/cpu.h" #include "qom/object.h" @@ -40,6 +41,7 @@ struct AwA10State { AllwinnerAHCIState sata; AwSdHostState mmc0; AWI2CState i2c0; + AWA10SPIState spi0; AwRtcState rtc; AwWdtState wdt; MemoryRegion sram_a; diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h index 24ba4e1bf41..db897c86f0b 100644 --- a/include/hw/arm/allwinner-h3.h +++ b/include/hw/arm/allwinner-h3.h @@ -49,7 +49,7 @@ #include "hw/i2c/allwinner-i2c.h" #include "hw/watchdog/allwinner-wdt.h" #include "target/arm/cpu.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" /** * Allwinner H3 device list diff --git a/include/hw/arm/allwinner-r40.h b/include/hw/arm/allwinner-r40.h index 614e74b7ed4..f8a0e94251e 100644 --- a/include/hw/arm/allwinner-r40.h +++ b/include/hw/arm/allwinner-r40.h @@ -35,7 +35,7 @@ #include "hw/usb/hcd-ehci.h" #include "hw/watchdog/allwinner-wdt.h" #include "target/arm/cpu.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" enum { AW_R40_DEV_SRAM_A1, diff --git a/include/hw/arm/aspeed.h b/include/hw/arm/aspeed.h index cbeacb214ca..6c364556565 100644 --- a/include/hw/arm/aspeed.h +++ b/include/hw/arm/aspeed.h @@ -35,10 +35,14 @@ struct AspeedMachineClass { uint32_t hw_strap2; const char *fmc_model; const char *spi_model; + const char *spi2_model; uint32_t num_cs; + uint32_t num_cs2; uint32_t macs_mask; void (*i2c_init)(AspeedMachineState *bmc); uint32_t uart_default; + bool sdhci_wp_inverted; + bool vbootrom; }; diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 624d489e0d6..217ef0eafd6 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -39,11 +39,11 @@ #include "hw/misc/unimp.h" #include "hw/misc/aspeed_peci.h" #include "hw/fsi/aspeed_apb2opb.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #include "hw/intc/arm_gicv3.h" -#define ASPEED_SPIS_NUM 2 -#define ASPEED_EHCIS_NUM 2 +#define ASPEED_SPIS_NUM 3 +#define ASPEED_EHCIS_NUM 4 #define ASPEED_WDTS_NUM 8 #define ASPEED_CPUS_NUM 4 #define ASPEED_MACS_NUM 4 @@ -59,6 +59,7 @@ struct AspeedSoCState { MemoryRegion sram; MemoryRegion spi_boot_container; MemoryRegion spi_boot; + MemoryRegion vbootrom; AddressSpace dram_as; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; @@ -90,6 +91,8 @@ struct AspeedSoCState { SerialMM uart[ASPEED_UARTS_NUM]; Clock *sysclk; UnimplementedDeviceState iomem; + UnimplementedDeviceState iomem0; + UnimplementedDeviceState iomem1; UnimplementedDeviceState video; UnimplementedDeviceState emmc_boot_controller; UnimplementedDeviceState dpmcu; @@ -97,6 +100,7 @@ struct AspeedSoCState { UnimplementedDeviceState espi; UnimplementedDeviceState udc; UnimplementedDeviceState sgpiom; + UnimplementedDeviceState ltpi; UnimplementedDeviceState jtag[ASPEED_JTAG_NUM]; AspeedAPB2OPBState fsi[2]; }; @@ -128,7 +132,7 @@ struct Aspeed27x0SoCState { AspeedSoCState parent; ARMCPU cpu[ASPEED_CPUS_NUM]; - AspeedINTCState intc; + AspeedINTCState intc[2]; GICv3State gic; MemoryRegion dram_empty; }; @@ -142,13 +146,36 @@ struct Aspeed10x0SoCState { ARMv7MState armv7m; }; +struct Aspeed27x0SSPSoCState { + AspeedSoCState parent; + AspeedINTCState intc[2]; + UnimplementedDeviceState ipc[2]; + UnimplementedDeviceState scuio; + + ARMv7MState armv7m; +}; + +#define TYPE_ASPEED27X0SSP_SOC "aspeed27x0ssp-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0SSPSoCState, ASPEED27X0SSP_SOC) + +struct Aspeed27x0TSPSoCState { + AspeedSoCState parent; + AspeedINTCState intc[2]; + UnimplementedDeviceState ipc[2]; + UnimplementedDeviceState scuio; + + ARMv7MState armv7m; +}; + +#define TYPE_ASPEED27X0TSP_SOC "aspeed27x0tsp-soc" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0TSPSoCState, ASPEED27X0TSP_SOC) + #define TYPE_ASPEED10X0_SOC "aspeed10x0-soc" OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC) struct AspeedSoCClass { DeviceClass parent_class; - const char *name; /** valid_cpu_types: NULL terminated array of a single CPU type. */ const char * const *valid_cpu_types; uint32_t silicon_rev; @@ -170,8 +197,12 @@ struct AspeedSoCClass { const char *aspeed_soc_cpu_type(AspeedSoCClass *sc); enum { + ASPEED_DEV_VBOOTROM, ASPEED_DEV_SPI_BOOT, ASPEED_DEV_IOMEM, + ASPEED_DEV_IOMEM0, + ASPEED_DEV_IOMEM1, + ASPEED_DEV_LTPI, ASPEED_DEV_UART0, ASPEED_DEV_UART1, ASPEED_DEV_UART2, @@ -193,8 +224,11 @@ enum { ASPEED_DEV_SPI2, ASPEED_DEV_EHCI1, ASPEED_DEV_EHCI2, + ASPEED_DEV_EHCI3, + ASPEED_DEV_EHCI4, ASPEED_DEV_VIC, ASPEED_DEV_INTC, + ASPEED_DEV_INTCIO, ASPEED_DEV_SDMC, ASPEED_DEV_SCU, ASPEED_DEV_ADC, @@ -249,6 +283,8 @@ enum { ASPEED_DEV_SLIIO, ASPEED_GIC_DIST, ASPEED_GIC_REDIST, + ASPEED_DEV_IPC0, + ASPEED_DEV_IPC1, }; qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index 80c492d7421..a2e22bda8a5 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -132,6 +132,9 @@ struct arm_boot_info { bool secure_board_setup; arm_endianness endianness; + + /* CPU having load the kernel and that should be the first to boot. */ + ARMCPU *primary_cpu; }; /** @@ -160,6 +163,7 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, * @binfo: struct describing the boot environment * @addr_limit: upper limit of the available memory area at @addr * @as: address space to load image to + * @cpu: ARM CPU object * * Load a device tree supplied by the machine or by the user with the * '-dtb' command line option, and put it at offset @addr in target @@ -176,7 +180,8 @@ AddressSpace *arm_boot_address_space(ARMCPU *cpu, * Note: Must not be called unless have_dtb(binfo) is true. */ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, - hwaddr addr_limit, AddressSpace *as, MachineState *ms); + hwaddr addr_limit, AddressSpace *as, MachineState *ms, + ARMCPU *cpu); /* Write a secure board setup routine with a dummy handler for SMCs */ void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, diff --git a/include/hw/arm/bsa.h b/include/hw/arm/bsa.h index 8eaab603c03..13ed2d2ac19 100644 --- a/include/hw/arm/bsa.h +++ b/include/hw/arm/bsa.h @@ -22,6 +22,8 @@ #define QEMU_ARM_BSA_H /* These are architectural INTID values */ +#define ARCH_TIMER_S_EL2_VIRT_IRQ 19 +#define ARCH_TIMER_S_EL2_IRQ 20 #define VIRTUAL_PMU_IRQ 23 #define ARCH_GIC_MAINT_IRQ 25 #define ARCH_TIMER_NS_EL2_IRQ 26 diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h index df2f83980f0..b68d4334a0c 100644 --- a/include/hw/arm/fsl-imx25.h +++ b/include/hw/arm/fsl-imx25.h @@ -29,7 +29,7 @@ #include "hw/sd/sdhci.h" #include "hw/usb/chipidea.h" #include "hw/watchdog/wdt_imx2.h" -#include "exec/memory.h" +#include "system/memory.h" #include "target/arm/cpu.h" #include "qom/object.h" diff --git a/include/hw/arm/fsl-imx31.h b/include/hw/arm/fsl-imx31.h index 40c593a5cf3..41232a22373 100644 --- a/include/hw/arm/fsl-imx31.h +++ b/include/hw/arm/fsl-imx31.h @@ -25,7 +25,7 @@ #include "hw/i2c/imx_i2c.h" #include "hw/gpio/imx_gpio.h" #include "hw/watchdog/wdt_imx2.h" -#include "exec/memory.h" +#include "system/memory.h" #include "target/arm/cpu.h" #include "qom/object.h" diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h index 61c593ffd24..124bbd478fd 100644 --- a/include/hw/arm/fsl-imx6.h +++ b/include/hw/arm/fsl-imx6.h @@ -33,7 +33,8 @@ #include "hw/usb/chipidea.h" #include "hw/usb/imx-usb-phy.h" #include "hw/pci-host/designware.h" -#include "exec/memory.h" +#include "hw/or-irq.h" +#include "system/memory.h" #include "cpu.h" #include "qom/object.h" @@ -73,6 +74,7 @@ struct FslIMX6State { ChipideaState usb[FSL_IMX6_NUM_USBS]; IMXFECState eth; DesignwarePCIEHost pcie; + OrIRQState pcie4_msi_irq; MemoryRegion rom; MemoryRegion caam; MemoryRegion ocram; @@ -457,7 +459,7 @@ struct FslIMX6State { #define FSL_IMX6_PCIE1_IRQ 120 #define FSL_IMX6_PCIE2_IRQ 121 #define FSL_IMX6_PCIE3_IRQ 122 -#define FSL_IMX6_PCIE4_IRQ 123 +#define FSL_IMX6_PCIE4_MSI_IRQ 123 #define FSL_IMX6_DCIC1_IRQ 124 #define FSL_IMX6_DCIC2_IRQ 125 #define FSL_IMX6_MLB150_HIGH_IRQ 126 diff --git a/include/hw/arm/fsl-imx6ul.h b/include/hw/arm/fsl-imx6ul.h index 8277b0e8b2c..4e3209b25b2 100644 --- a/include/hw/arm/fsl-imx6ul.h +++ b/include/hw/arm/fsl-imx6ul.h @@ -33,7 +33,7 @@ #include "hw/net/imx_fec.h" #include "hw/usb/chipidea.h" #include "hw/usb/imx-usb-phy.h" -#include "exec/memory.h" +#include "system/memory.h" #include "cpu.h" #include "qom/object.h" #include "qemu/units.h" diff --git a/include/hw/arm/fsl-imx7.h b/include/hw/arm/fsl-imx7.h index 411fa1c2e37..aa7818c4999 100644 --- a/include/hw/arm/fsl-imx7.h +++ b/include/hw/arm/fsl-imx7.h @@ -36,6 +36,7 @@ #include "hw/net/imx_fec.h" #include "hw/pci-host/designware.h" #include "hw/usb/chipidea.h" +#include "hw/or-irq.h" #include "cpu.h" #include "qom/object.h" #include "qemu/units.h" @@ -85,6 +86,7 @@ struct FslIMX7State { IMX7GPRState gpr; ChipideaState usb[FSL_IMX7_NUM_USBS]; DesignwarePCIEHost pcie; + OrIRQState pcie4_msi_irq; MemoryRegion rom; MemoryRegion caam; MemoryRegion ocram; @@ -428,7 +430,7 @@ enum FslIMX7IRQs { FSL_IMX7_PCI_INTA_IRQ = 125, FSL_IMX7_PCI_INTB_IRQ = 124, FSL_IMX7_PCI_INTC_IRQ = 123, - FSL_IMX7_PCI_INTD_IRQ = 122, + FSL_IMX7_PCI_INTD_MSI_IRQ = 122, FSL_IMX7_UART7_IRQ = 126, diff --git a/include/hw/arm/fsl-imx8mp.h b/include/hw/arm/fsl-imx8mp.h new file mode 100644 index 00000000000..d016f7d3371 --- /dev/null +++ b/include/hw/arm/fsl-imx8mp.h @@ -0,0 +1,284 @@ +/* + * i.MX 8M Plus SoC Definitions + * + * Copyright (c) 2024, Bernhard Beschow + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef FSL_IMX8MP_H +#define FSL_IMX8MP_H + +#include "cpu.h" +#include "hw/char/imx_serial.h" +#include "hw/gpio/imx_gpio.h" +#include "hw/i2c/imx_i2c.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/misc/imx7_snvs.h" +#include "hw/misc/imx8mp_analog.h" +#include "hw/misc/imx8mp_ccm.h" +#include "hw/net/imx_fec.h" +#include "hw/or-irq.h" +#include "hw/pci-host/designware.h" +#include "hw/pci-host/fsl_imx8m_phy.h" +#include "hw/sd/sdhci.h" +#include "hw/ssi/imx_spi.h" +#include "hw/timer/imx_gpt.h" +#include "hw/usb/hcd-dwc3.h" +#include "hw/watchdog/wdt_imx2.h" +#include "hw/sysbus.h" +#include "qom/object.h" +#include "qemu/units.h" + +#define TYPE_FSL_IMX8MP "fsl-imx8mp" +OBJECT_DECLARE_SIMPLE_TYPE(FslImx8mpState, FSL_IMX8MP) + +#define FSL_IMX8MP_RAM_START 0x40000000 +#define FSL_IMX8MP_RAM_SIZE_MAX (8 * GiB) + +enum FslImx8mpConfiguration { + FSL_IMX8MP_NUM_CPUS = 4, + FSL_IMX8MP_NUM_ECSPIS = 3, + FSL_IMX8MP_NUM_GPIOS = 5, + FSL_IMX8MP_NUM_GPTS = 6, + FSL_IMX8MP_NUM_I2CS = 6, + FSL_IMX8MP_NUM_IRQS = 160, + FSL_IMX8MP_NUM_UARTS = 4, + FSL_IMX8MP_NUM_USBS = 2, + FSL_IMX8MP_NUM_USDHCS = 3, + FSL_IMX8MP_NUM_WDTS = 3, +}; + +struct FslImx8mpState { + SysBusDevice parent_obj; + + ARMCPU cpu[FSL_IMX8MP_NUM_CPUS]; + GICv3State gic; + IMXGPTState gpt[FSL_IMX8MP_NUM_GPTS]; + IMXGPIOState gpio[FSL_IMX8MP_NUM_GPIOS]; + IMX8MPCCMState ccm; + IMX8MPAnalogState analog; + IMX7SNVSState snvs; + IMXSPIState spi[FSL_IMX8MP_NUM_ECSPIS]; + IMXI2CState i2c[FSL_IMX8MP_NUM_I2CS]; + IMXSerialState uart[FSL_IMX8MP_NUM_UARTS]; + IMXFECState enet; + SDHCIState usdhc[FSL_IMX8MP_NUM_USDHCS]; + IMX2WdtState wdt[FSL_IMX8MP_NUM_WDTS]; + USBDWC3 usb[FSL_IMX8MP_NUM_USBS]; + DesignwarePCIEHost pcie; + FslImx8mPciePhyState pcie_phy; + OrIRQState gpt5_gpt6_irq; + MemoryRegion ocram; + + uint32_t phy_num; + bool phy_connected; +}; + +enum FslImx8mpMemoryRegions { + FSL_IMX8MP_A53_DAP, + FSL_IMX8MP_AIPS1_CONFIGURATION, + FSL_IMX8MP_AIPS2_CONFIGURATION, + FSL_IMX8MP_AIPS3_CONFIGURATION, + FSL_IMX8MP_AIPS4_CONFIGURATION, + FSL_IMX8MP_AIPS5_CONFIGURATION, + FSL_IMX8MP_ANA_OSC, + FSL_IMX8MP_ANA_PLL, + FSL_IMX8MP_ANA_TSENSOR, + FSL_IMX8MP_APBH_DMA, + FSL_IMX8MP_ASRC, + FSL_IMX8MP_AUDIO_BLK_CTRL, + FSL_IMX8MP_AUDIO_DSP, + FSL_IMX8MP_AUDIO_XCVR_RX, + FSL_IMX8MP_AUD_IRQ_STEER, + FSL_IMX8MP_BOOT_ROM, + FSL_IMX8MP_BOOT_ROM_PROTECTED, + FSL_IMX8MP_CAAM, + FSL_IMX8MP_CAAM_MEM, + FSL_IMX8MP_CCM, + FSL_IMX8MP_CSU, + FSL_IMX8MP_DDR_BLK_CTRL, + FSL_IMX8MP_DDR_CTL, + FSL_IMX8MP_DDR_PERF_MON, + FSL_IMX8MP_DDR_PHY, + FSL_IMX8MP_DDR_PHY_BROADCAST, + FSL_IMX8MP_ECSPI1, + FSL_IMX8MP_ECSPI2, + FSL_IMX8MP_ECSPI3, + FSL_IMX8MP_EDMA_CHANNELS, + FSL_IMX8MP_EDMA_MANAGEMENT_PAGE, + FSL_IMX8MP_ENET1, + FSL_IMX8MP_ENET2_TSN, + FSL_IMX8MP_FLEXCAN1, + FSL_IMX8MP_FLEXCAN2, + FSL_IMX8MP_GIC_DIST, + FSL_IMX8MP_GIC_REDIST, + FSL_IMX8MP_GPC, + FSL_IMX8MP_GPIO1, + FSL_IMX8MP_GPIO2, + FSL_IMX8MP_GPIO3, + FSL_IMX8MP_GPIO4, + FSL_IMX8MP_GPIO5, + FSL_IMX8MP_GPT1, + FSL_IMX8MP_GPT2, + FSL_IMX8MP_GPT3, + FSL_IMX8MP_GPT4, + FSL_IMX8MP_GPT5, + FSL_IMX8MP_GPT6, + FSL_IMX8MP_GPU2D, + FSL_IMX8MP_GPU3D, + FSL_IMX8MP_HDMI_TX, + FSL_IMX8MP_HDMI_TX_AUDLNK_MSTR, + FSL_IMX8MP_HSIO_BLK_CTL, + FSL_IMX8MP_I2C1, + FSL_IMX8MP_I2C2, + FSL_IMX8MP_I2C3, + FSL_IMX8MP_I2C4, + FSL_IMX8MP_I2C5, + FSL_IMX8MP_I2C6, + FSL_IMX8MP_INTERCONNECT, + FSL_IMX8MP_IOMUXC, + FSL_IMX8MP_IOMUXC_GPR, + FSL_IMX8MP_IPS_DEWARP, + FSL_IMX8MP_ISI, + FSL_IMX8MP_ISP1, + FSL_IMX8MP_ISP2, + FSL_IMX8MP_LCDIF1, + FSL_IMX8MP_LCDIF2, + FSL_IMX8MP_MEDIA_BLK_CTL, + FSL_IMX8MP_MIPI_CSI1, + FSL_IMX8MP_MIPI_CSI2, + FSL_IMX8MP_MIPI_DSI1, + FSL_IMX8MP_MU_1_A, + FSL_IMX8MP_MU_1_B, + FSL_IMX8MP_MU_2_A, + FSL_IMX8MP_MU_2_B, + FSL_IMX8MP_MU_3_A, + FSL_IMX8MP_MU_3_B, + FSL_IMX8MP_NPU, + FSL_IMX8MP_OCOTP_CTRL, + FSL_IMX8MP_OCRAM, + FSL_IMX8MP_OCRAM_S, + FSL_IMX8MP_PCIE1, + FSL_IMX8MP_PCIE1_MEM, + FSL_IMX8MP_PCIE_PHY1, + FSL_IMX8MP_PDM, + FSL_IMX8MP_PERFMON1, + FSL_IMX8MP_PERFMON2, + FSL_IMX8MP_PWM1, + FSL_IMX8MP_PWM2, + FSL_IMX8MP_PWM3, + FSL_IMX8MP_PWM4, + FSL_IMX8MP_QOSC, + FSL_IMX8MP_QSPI, + FSL_IMX8MP_QSPI1_RX_BUFFER, + FSL_IMX8MP_QSPI1_TX_BUFFER, + FSL_IMX8MP_QSPI_MEM, + FSL_IMX8MP_RAM, + FSL_IMX8MP_RDC, + FSL_IMX8MP_SAI1, + FSL_IMX8MP_SAI2, + FSL_IMX8MP_SAI3, + FSL_IMX8MP_SAI5, + FSL_IMX8MP_SAI6, + FSL_IMX8MP_SAI7, + FSL_IMX8MP_SDMA1, + FSL_IMX8MP_SDMA2, + FSL_IMX8MP_SDMA3, + FSL_IMX8MP_SEMAPHORE1, + FSL_IMX8MP_SEMAPHORE2, + FSL_IMX8MP_SEMAPHORE_HS, + FSL_IMX8MP_SNVS_HP, + FSL_IMX8MP_SPBA1, + FSL_IMX8MP_SPBA2, + FSL_IMX8MP_SRC, + FSL_IMX8MP_SYSCNT_CMP, + FSL_IMX8MP_SYSCNT_CTRL, + FSL_IMX8MP_SYSCNT_RD, + FSL_IMX8MP_TCM_DTCM, + FSL_IMX8MP_TCM_ITCM, + FSL_IMX8MP_TZASC, + FSL_IMX8MP_UART1, + FSL_IMX8MP_UART2, + FSL_IMX8MP_UART3, + FSL_IMX8MP_UART4, + FSL_IMX8MP_USB1, + FSL_IMX8MP_USB2, + FSL_IMX8MP_USB1_DEV, + FSL_IMX8MP_USB2_DEV, + FSL_IMX8MP_USB1_OTG, + FSL_IMX8MP_USB2_OTG, + FSL_IMX8MP_USB1_GLUE, + FSL_IMX8MP_USB2_GLUE, + FSL_IMX8MP_USDHC1, + FSL_IMX8MP_USDHC2, + FSL_IMX8MP_USDHC3, + FSL_IMX8MP_VPU, + FSL_IMX8MP_VPU_BLK_CTRL, + FSL_IMX8MP_VPU_G1_DECODER, + FSL_IMX8MP_VPU_G2_DECODER, + FSL_IMX8MP_VPU_VC8000E_ENCODER, + FSL_IMX8MP_WDOG1, + FSL_IMX8MP_WDOG2, + FSL_IMX8MP_WDOG3, +}; + +enum FslImx8mpIrqs { + FSL_IMX8MP_USDHC1_IRQ = 22, + FSL_IMX8MP_USDHC2_IRQ = 23, + FSL_IMX8MP_USDHC3_IRQ = 24, + + FSL_IMX8MP_UART1_IRQ = 26, + FSL_IMX8MP_UART2_IRQ = 27, + FSL_IMX8MP_UART3_IRQ = 28, + FSL_IMX8MP_UART4_IRQ = 29, + FSL_IMX8MP_UART5_IRQ = 30, + FSL_IMX8MP_UART6_IRQ = 16, + + FSL_IMX8MP_ECSPI1_IRQ = 31, + FSL_IMX8MP_ECSPI2_IRQ = 32, + FSL_IMX8MP_ECSPI3_IRQ = 33, + + FSL_IMX8MP_I2C1_IRQ = 35, + FSL_IMX8MP_I2C2_IRQ = 36, + FSL_IMX8MP_I2C3_IRQ = 37, + FSL_IMX8MP_I2C4_IRQ = 38, + + FSL_IMX8MP_USB1_IRQ = 40, + FSL_IMX8MP_USB2_IRQ = 41, + + FSL_IMX8MP_GPT1_IRQ = 55, + FSL_IMX8MP_GPT2_IRQ = 54, + FSL_IMX8MP_GPT3_IRQ = 53, + FSL_IMX8MP_GPT4_IRQ = 52, + FSL_IMX8MP_GPT5_GPT6_IRQ = 51, + + FSL_IMX8MP_GPIO1_LOW_IRQ = 64, + FSL_IMX8MP_GPIO1_HIGH_IRQ = 65, + FSL_IMX8MP_GPIO2_LOW_IRQ = 66, + FSL_IMX8MP_GPIO2_HIGH_IRQ = 67, + FSL_IMX8MP_GPIO3_LOW_IRQ = 68, + FSL_IMX8MP_GPIO3_HIGH_IRQ = 69, + FSL_IMX8MP_GPIO4_LOW_IRQ = 70, + FSL_IMX8MP_GPIO4_HIGH_IRQ = 71, + FSL_IMX8MP_GPIO5_LOW_IRQ = 72, + FSL_IMX8MP_GPIO5_HIGH_IRQ = 73, + + FSL_IMX8MP_I2C5_IRQ = 76, + FSL_IMX8MP_I2C6_IRQ = 77, + + FSL_IMX8MP_WDOG1_IRQ = 78, + FSL_IMX8MP_WDOG2_IRQ = 79, + FSL_IMX8MP_WDOG3_IRQ = 10, + + FSL_IMX8MP_ENET1_MAC_IRQ = 118, + FSL_IMX6_ENET1_MAC_1588_IRQ = 121, + + FSL_IMX8MP_PCI_INTA_IRQ = 126, + FSL_IMX8MP_PCI_INTB_IRQ = 125, + FSL_IMX8MP_PCI_INTC_IRQ = 124, + FSL_IMX8MP_PCI_INTD_IRQ = 123, + FSL_IMX8MP_PCI_MSI_IRQ = 140, +}; + +#endif /* FSL_IMX8MP_H */ diff --git a/include/hw/arm/max78000_soc.h b/include/hw/arm/max78000_soc.h new file mode 100644 index 00000000000..a203079ee9a --- /dev/null +++ b/include/hw/arm/max78000_soc.h @@ -0,0 +1,50 @@ +/* + * MAX78000 SOC + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_ARM_MAX78000_SOC_H +#define HW_ARM_MAX78000_SOC_H + +#include "hw/or-irq.h" +#include "hw/arm/armv7m.h" +#include "hw/misc/max78000_aes.h" +#include "hw/misc/max78000_gcr.h" +#include "hw/misc/max78000_icc.h" +#include "hw/char/max78000_uart.h" +#include "hw/misc/max78000_trng.h" +#include "qom/object.h" + +#define TYPE_MAX78000_SOC "max78000-soc" +OBJECT_DECLARE_SIMPLE_TYPE(MAX78000State, MAX78000_SOC) + +#define FLASH_BASE_ADDRESS 0x10000000 +#define FLASH_SIZE (512 * 1024) +#define SRAM_BASE_ADDRESS 0x20000000 +#define SRAM_SIZE (128 * 1024) + +/* The MAX78k has 2 instruction caches; only icc0 matters, icc1 is for RISC */ +#define MAX78000_NUM_ICC 2 +#define MAX78000_NUM_UART 3 + +struct MAX78000State { + SysBusDevice parent_obj; + + ARMv7MState armv7m; + + MemoryRegion sram; + MemoryRegion flash; + + Max78000GcrState gcr; + Max78000IccState icc[MAX78000_NUM_ICC]; + Max78000UartState uart[MAX78000_NUM_UART]; + Max78000TrngState trng; + Max78000AesState aes; + + Clock *sysclk; +}; + +#endif diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h index 4e0d2101885..56536565b74 100644 --- a/include/hw/arm/npcm7xx.h +++ b/include/hw/arm/npcm7xx.h @@ -23,8 +23,8 @@ #include "hw/gpio/npcm7xx_gpio.h" #include "hw/i2c/npcm7xx_smbus.h" #include "hw/mem/npcm7xx_mc.h" -#include "hw/misc/npcm7xx_clk.h" -#include "hw/misc/npcm7xx_gcr.h" +#include "hw/misc/npcm_clk.h" +#include "hw/misc/npcm_gcr.h" #include "hw/misc/npcm7xx_mft.h" #include "hw/misc/npcm7xx_pwm.h" #include "hw/misc/npcm7xx_rng.h" @@ -89,8 +89,8 @@ struct NPCM7xxState { MemoryRegion ram3; MemoryRegion *dram; - NPCM7xxGCRState gcr; - NPCM7xxCLKState clk; + NPCMGCRState gcr; + NPCMCLKState clk; NPCM7xxTimerCtrlState tim[3]; NPCM7xxADCState adc; NPCM7xxPWMState pwm[NPCM7XX_NR_PWM_MODULES]; diff --git a/include/hw/arm/npcm8xx.h b/include/hw/arm/npcm8xx.h new file mode 100644 index 00000000000..a8377db4905 --- /dev/null +++ b/include/hw/arm/npcm8xx.h @@ -0,0 +1,132 @@ +/* + * Nuvoton NPCM8xx SoC family. + * + * Copyright 2022 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef NPCM8XX_H +#define NPCM8XX_H + +#include "hw/adc/npcm7xx_adc.h" +#include "hw/core/split-irq.h" +#include "hw/cpu/cluster.h" +#include "hw/gpio/npcm7xx_gpio.h" +#include "hw/i2c/npcm7xx_smbus.h" +#include "hw/intc/arm_gic_common.h" +#include "hw/mem/npcm7xx_mc.h" +#include "hw/misc/npcm_clk.h" +#include "hw/misc/npcm_gcr.h" +#include "hw/misc/npcm7xx_mft.h" +#include "hw/misc/npcm7xx_pwm.h" +#include "hw/misc/npcm7xx_rng.h" +#include "hw/net/npcm_gmac.h" +#include "hw/net/npcm_pcs.h" +#include "hw/nvram/npcm7xx_otp.h" +#include "hw/sd/npcm7xx_sdhci.h" +#include "hw/timer/npcm7xx_timer.h" +#include "hw/ssi/npcm7xx_fiu.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/usb/hcd-ohci.h" +#include "target/arm/cpu.h" +#include "hw/ssi/npcm_pspi.h" + +#define NPCM8XX_MAX_NUM_CPUS (4) + +/* The first half of the address space is reserved for DDR4 DRAM. */ +#define NPCM8XX_DRAM_BA (0x00000000) +#define NPCM8XX_DRAM_SZ (2 * GiB) + +/* Magic addresses for setting up direct kernel booting and SMP boot stubs. */ +#define NPCM8XX_LOADER_START (0x00000000) /* Start of SDRAM */ +#define NPCM8XX_SMP_LOADER_START (0xffff0000) /* Boot ROM */ +#define NPCM8XX_SMP_BOOTREG_ADDR (0xf080013c) /* GCR.SCRPAD */ +#define NPCM8XX_BOARD_SETUP_ADDR (0xffff1000) /* Boot ROM */ + +#define NPCM8XX_NR_PWM_MODULES 3 + +struct NPCM8xxMachine { + MachineState parent_obj; + + /* + * PWM fan splitter. each splitter connects to one PWM output and + * multiple MFT inputs. + */ + SplitIRQ fan_splitter[NPCM8XX_NR_PWM_MODULES * + NPCM7XX_PWM_PER_MODULE]; +}; + + +struct NPCM8xxMachineClass { + MachineClass parent_class; + + const char *soc_type; +}; + +#define TYPE_NPCM8XX_MACHINE MACHINE_TYPE_NAME("npcm8xx") +OBJECT_DECLARE_TYPE(NPCM8xxMachine, NPCM8xxMachineClass, NPCM8XX_MACHINE) + +struct NPCM8xxState { + DeviceState parent_obj; + + ARMCPU cpu[NPCM8XX_MAX_NUM_CPUS]; + CPUClusterState cpu_cluster; + GICState gic; + + MemoryRegion sram; + MemoryRegion irom; + MemoryRegion ram3; + MemoryRegion *dram; + + NPCMGCRState gcr; + NPCMCLKState clk; + NPCM7xxTimerCtrlState tim[3]; + NPCM7xxADCState adc; + NPCM7xxPWMState pwm[NPCM8XX_NR_PWM_MODULES]; + NPCM7xxMFTState mft[8]; + NPCM7xxOTPState fuse_array; + NPCM7xxMCState mc; + NPCM7xxRNGState rng; + NPCM7xxGPIOState gpio[8]; + NPCM7xxSMBusState smbus[27]; + EHCISysBusState ehci[2]; + OHCISysBusState ohci[2]; + NPCM7xxFIUState fiu[3]; + NPCMGMACState gmac[4]; + NPCMPCSState pcs; + NPCM7xxSDHCIState mmc; + NPCMPSPIState pspi; +}; + +struct NPCM8xxClass { + DeviceClass parent_class; + + /* Bitmask of modules that are permanently disabled on this chip. */ + uint32_t disabled_modules; + /* Number of CPU cores enabled in this SoC class. */ + uint32_t num_cpus; +}; + +#define TYPE_NPCM8XX "npcm8xx" +OBJECT_DECLARE_TYPE(NPCM8xxState, NPCM8xxClass, NPCM8XX) + +/** + * npcm8xx_load_kernel - Loads memory with everything needed to boot + * @machine - The machine containing the SoC to be booted. + * @soc - The SoC containing the CPU to be booted. + * + * This will set up the ARM boot info structure for the specific NPCM8xx + * derivative and call arm_load_kernel() to set up loading of the kernel, etc. + * into memory, if requested by the user. + */ +void npcm8xx_load_kernel(MachineState *machine, NPCM8xxState *soc); + +#endif /* NPCM8XX_H */ diff --git a/include/hw/arm/nrf51_soc.h b/include/hw/arm/nrf51_soc.h index e52a56e75e0..f88ab1b7d3e 100644 --- a/include/hw/arm/nrf51_soc.h +++ b/include/hw/arm/nrf51_soc.h @@ -30,7 +30,7 @@ struct NRF51State { SysBusDevice parent_obj; /*< public >*/ - ARMv7MState cpu; + ARMv7MState armv7m; NRF51UARTState uart; NRF51RNGState rng; diff --git a/include/hw/arm/omap.h b/include/hw/arm/omap.h index 40ee8ea9e56..bdb2e887e47 100644 --- a/include/hw/arm/omap.h +++ b/include/hw/arm/omap.h @@ -20,39 +20,29 @@ #ifndef HW_ARM_OMAP_H #define HW_ARM_OMAP_H -#include "exec/memory.h" -#include "hw/input/tsc2xxx.h" +#include "system/memory.h" #include "target/arm/cpu-qom.h" #include "qemu/log.h" #include "qom/object.h" -# define OMAP_EMIFS_BASE 0x00000000 -# define OMAP2_Q0_BASE 0x00000000 -# define OMAP_CS0_BASE 0x00000000 -# define OMAP_CS1_BASE 0x04000000 -# define OMAP_CS2_BASE 0x08000000 -# define OMAP_CS3_BASE 0x0c000000 -# define OMAP_EMIFF_BASE 0x10000000 -# define OMAP_IMIF_BASE 0x20000000 -# define OMAP_LOCALBUS_BASE 0x30000000 -# define OMAP2_Q1_BASE 0x40000000 -# define OMAP2_L4_BASE 0x48000000 -# define OMAP2_SRAM_BASE 0x40200000 -# define OMAP2_L3_BASE 0x68000000 -# define OMAP2_Q2_BASE 0x80000000 -# define OMAP2_Q3_BASE 0xc0000000 -# define OMAP_MPUI_BASE 0xe1000000 - -# define OMAP730_SRAM_SIZE 0x00032000 -# define OMAP15XX_SRAM_SIZE 0x00030000 -# define OMAP16XX_SRAM_SIZE 0x00004000 -# define OMAP1611_SRAM_SIZE 0x0003e800 -# define OMAP242X_SRAM_SIZE 0x000a0000 -# define OMAP243X_SRAM_SIZE 0x00010000 -# define OMAP_CS0_SIZE 0x04000000 -# define OMAP_CS1_SIZE 0x04000000 -# define OMAP_CS2_SIZE 0x04000000 -# define OMAP_CS3_SIZE 0x04000000 +#define OMAP_EMIFS_BASE 0x00000000 +#define OMAP_CS0_BASE 0x00000000 +#define OMAP_CS1_BASE 0x04000000 +#define OMAP_CS2_BASE 0x08000000 +#define OMAP_CS3_BASE 0x0c000000 +#define OMAP_EMIFF_BASE 0x10000000 +#define OMAP_IMIF_BASE 0x20000000 +#define OMAP_LOCALBUS_BASE 0x30000000 +#define OMAP_MPUI_BASE 0xe1000000 + +#define OMAP730_SRAM_SIZE 0x00032000 +#define OMAP15XX_SRAM_SIZE 0x00030000 +#define OMAP16XX_SRAM_SIZE 0x00004000 +#define OMAP1611_SRAM_SIZE 0x0003e800 +#define OMAP_CS0_SIZE 0x04000000 +#define OMAP_CS1_SIZE 0x04000000 +#define OMAP_CS2_SIZE 0x04000000 +#define OMAP_CS3_SIZE 0x04000000 /* omap_clk.c */ struct omap_mpu_state_s; @@ -69,7 +59,7 @@ int64_t omap_clk_getrate(omap_clk clk); void omap_clk_reparent(omap_clk clk, omap_clk parent); /* omap_intc.c */ -#define TYPE_OMAP_INTC "common-omap-intc" +#define TYPE_OMAP_INTC "omap-intc" typedef struct OMAPIntcState OMAPIntcState; DECLARE_INSTANCE_CHECKER(OMAPIntcState, OMAP_INTC, TYPE_OMAP_INTC) @@ -106,385 +96,241 @@ typedef struct Omap1GpioState Omap1GpioState; DECLARE_INSTANCE_CHECKER(Omap1GpioState, OMAP1_GPIO, TYPE_OMAP1_GPIO) -#define TYPE_OMAP2_GPIO "omap2-gpio" -typedef struct Omap2GpioState Omap2GpioState; -DECLARE_INSTANCE_CHECKER(Omap2GpioState, OMAP2_GPIO, - TYPE_OMAP2_GPIO) - /* TODO: clock framework (see above) */ void omap_gpio_set_clk(Omap1GpioState *gpio, omap_clk clk); -void omap2_gpio_set_iclk(Omap2GpioState *gpio, omap_clk clk); -void omap2_gpio_set_fclk(Omap2GpioState *gpio, uint8_t i, omap_clk clk); - -/* OMAP2 l4 Interconnect */ -struct omap_l4_s; -struct omap_l4_region_s { - hwaddr offset; - size_t size; - int access; -}; -struct omap_l4_agent_info_s { - int ta; - int region; - int regions; - int ta_region; -}; -struct omap_target_agent_s { - MemoryRegion iomem; - struct omap_l4_s *bus; - int regions; - const struct omap_l4_region_s *start; - hwaddr base; - uint32_t component; - uint32_t control; - uint32_t status; -}; -struct omap_l4_s *omap_l4_init(MemoryRegion *address_space, - hwaddr base, int ta_num); - -struct omap_target_agent_s; -struct omap_target_agent_s *omap_l4ta_get( - struct omap_l4_s *bus, - const struct omap_l4_region_s *regions, - const struct omap_l4_agent_info_s *agents, - int cs); -hwaddr omap_l4_attach(struct omap_target_agent_s *ta, - int region, MemoryRegion *mr); -hwaddr omap_l4_region_base(struct omap_target_agent_s *ta, - int region); -hwaddr omap_l4_region_size(struct omap_target_agent_s *ta, - int region); - -/* OMAP2 SDRAM controller */ -struct omap_sdrc_s; -struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem, - hwaddr base); -void omap_sdrc_reset(struct omap_sdrc_s *s); - -/* OMAP2 general purpose memory controller */ -struct omap_gpmc_s; -struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, - hwaddr base, - qemu_irq irq, qemu_irq drq); -void omap_gpmc_reset(struct omap_gpmc_s *s); -void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem); -void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand); - /* * Common IRQ numbers for level 1 interrupt handler * See /usr/include/asm-arm/arch-omap/irqs.h in Linux. */ -# define OMAP_INT_CAMERA 1 -# define OMAP_INT_FIQ 3 -# define OMAP_INT_RTDX 6 -# define OMAP_INT_DSP_MMU_ABORT 7 -# define OMAP_INT_HOST 8 -# define OMAP_INT_ABORT 9 -# define OMAP_INT_BRIDGE_PRIV 13 -# define OMAP_INT_GPIO_BANK1 14 -# define OMAP_INT_UART3 15 -# define OMAP_INT_TIMER3 16 -# define OMAP_INT_DMA_CH0_6 19 -# define OMAP_INT_DMA_CH1_7 20 -# define OMAP_INT_DMA_CH2_8 21 -# define OMAP_INT_DMA_CH3 22 -# define OMAP_INT_DMA_CH4 23 -# define OMAP_INT_DMA_CH5 24 -# define OMAP_INT_DMA_LCD 25 -# define OMAP_INT_TIMER1 26 -# define OMAP_INT_WD_TIMER 27 -# define OMAP_INT_BRIDGE_PUB 28 -# define OMAP_INT_TIMER2 30 -# define OMAP_INT_LCD_CTRL 31 +#define OMAP_INT_CAMERA 1 +#define OMAP_INT_FIQ 3 +#define OMAP_INT_RTDX 6 +#define OMAP_INT_DSP_MMU_ABORT 7 +#define OMAP_INT_HOST 8 +#define OMAP_INT_ABORT 9 +#define OMAP_INT_BRIDGE_PRIV 13 +#define OMAP_INT_GPIO_BANK1 14 +#define OMAP_INT_UART3 15 +#define OMAP_INT_TIMER3 16 +#define OMAP_INT_DMA_CH0_6 19 +#define OMAP_INT_DMA_CH1_7 20 +#define OMAP_INT_DMA_CH2_8 21 +#define OMAP_INT_DMA_CH3 22 +#define OMAP_INT_DMA_CH4 23 +#define OMAP_INT_DMA_CH5 24 +#define OMAP_INT_DMA_LCD 25 +#define OMAP_INT_TIMER1 26 +#define OMAP_INT_WD_TIMER 27 +#define OMAP_INT_BRIDGE_PUB 28 +#define OMAP_INT_TIMER2 30 +#define OMAP_INT_LCD_CTRL 31 /* * Common OMAP-15xx IRQ numbers for level 1 interrupt handler */ -# define OMAP_INT_15XX_IH2_IRQ 0 -# define OMAP_INT_15XX_LB_MMU 17 -# define OMAP_INT_15XX_LOCAL_BUS 29 +#define OMAP_INT_15XX_IH2_IRQ 0 +#define OMAP_INT_15XX_LB_MMU 17 +#define OMAP_INT_15XX_LOCAL_BUS 29 /* * OMAP-1510 specific IRQ numbers for level 1 interrupt handler */ -# define OMAP_INT_1510_SPI_TX 4 -# define OMAP_INT_1510_SPI_RX 5 -# define OMAP_INT_1510_DSP_MAILBOX1 10 -# define OMAP_INT_1510_DSP_MAILBOX2 11 +#define OMAP_INT_1510_SPI_TX 4 +#define OMAP_INT_1510_SPI_RX 5 +#define OMAP_INT_1510_DSP_MAILBOX1 10 +#define OMAP_INT_1510_DSP_MAILBOX2 11 /* * OMAP-310 specific IRQ numbers for level 1 interrupt handler */ -# define OMAP_INT_310_McBSP2_TX 4 -# define OMAP_INT_310_McBSP2_RX 5 -# define OMAP_INT_310_HSB_MAILBOX1 12 -# define OMAP_INT_310_HSAB_MMU 18 +#define OMAP_INT_310_McBSP2_TX 4 +#define OMAP_INT_310_McBSP2_RX 5 +#define OMAP_INT_310_HSB_MAILBOX1 12 +#define OMAP_INT_310_HSAB_MMU 18 /* * OMAP-1610 specific IRQ numbers for level 1 interrupt handler */ -# define OMAP_INT_1610_IH2_IRQ 0 -# define OMAP_INT_1610_IH2_FIQ 2 -# define OMAP_INT_1610_McBSP2_TX 4 -# define OMAP_INT_1610_McBSP2_RX 5 -# define OMAP_INT_1610_DSP_MAILBOX1 10 -# define OMAP_INT_1610_DSP_MAILBOX2 11 -# define OMAP_INT_1610_LCD_LINE 12 -# define OMAP_INT_1610_GPTIMER1 17 -# define OMAP_INT_1610_GPTIMER2 18 -# define OMAP_INT_1610_SSR_FIFO_0 29 +#define OMAP_INT_1610_IH2_IRQ 0 +#define OMAP_INT_1610_IH2_FIQ 2 +#define OMAP_INT_1610_McBSP2_TX 4 +#define OMAP_INT_1610_McBSP2_RX 5 +#define OMAP_INT_1610_DSP_MAILBOX1 10 +#define OMAP_INT_1610_DSP_MAILBOX2 11 +#define OMAP_INT_1610_LCD_LINE 12 +#define OMAP_INT_1610_GPTIMER1 17 +#define OMAP_INT_1610_GPTIMER2 18 +#define OMAP_INT_1610_SSR_FIFO_0 29 /* * OMAP-730 specific IRQ numbers for level 1 interrupt handler */ -# define OMAP_INT_730_IH2_FIQ 0 -# define OMAP_INT_730_IH2_IRQ 1 -# define OMAP_INT_730_USB_NON_ISO 2 -# define OMAP_INT_730_USB_ISO 3 -# define OMAP_INT_730_ICR 4 -# define OMAP_INT_730_EAC 5 -# define OMAP_INT_730_GPIO_BANK1 6 -# define OMAP_INT_730_GPIO_BANK2 7 -# define OMAP_INT_730_GPIO_BANK3 8 -# define OMAP_INT_730_McBSP2TX 10 -# define OMAP_INT_730_McBSP2RX 11 -# define OMAP_INT_730_McBSP2RX_OVF 12 -# define OMAP_INT_730_LCD_LINE 14 -# define OMAP_INT_730_GSM_PROTECT 15 -# define OMAP_INT_730_TIMER3 16 -# define OMAP_INT_730_GPIO_BANK5 17 -# define OMAP_INT_730_GPIO_BANK6 18 -# define OMAP_INT_730_SPGIO_WR 29 +#define OMAP_INT_730_IH2_FIQ 0 +#define OMAP_INT_730_IH2_IRQ 1 +#define OMAP_INT_730_USB_NON_ISO 2 +#define OMAP_INT_730_USB_ISO 3 +#define OMAP_INT_730_ICR 4 +#define OMAP_INT_730_EAC 5 +#define OMAP_INT_730_GPIO_BANK1 6 +#define OMAP_INT_730_GPIO_BANK2 7 +#define OMAP_INT_730_GPIO_BANK3 8 +#define OMAP_INT_730_McBSP2TX 10 +#define OMAP_INT_730_McBSP2RX 11 +#define OMAP_INT_730_McBSP2RX_OVF 12 +#define OMAP_INT_730_LCD_LINE 14 +#define OMAP_INT_730_GSM_PROTECT 15 +#define OMAP_INT_730_TIMER3 16 +#define OMAP_INT_730_GPIO_BANK5 17 +#define OMAP_INT_730_GPIO_BANK6 18 +#define OMAP_INT_730_SPGIO_WR 29 /* * Common IRQ numbers for level 2 interrupt handler */ -# define OMAP_INT_KEYBOARD 1 -# define OMAP_INT_uWireTX 2 -# define OMAP_INT_uWireRX 3 -# define OMAP_INT_I2C 4 -# define OMAP_INT_MPUIO 5 -# define OMAP_INT_USB_HHC_1 6 -# define OMAP_INT_McBSP3TX 10 -# define OMAP_INT_McBSP3RX 11 -# define OMAP_INT_McBSP1TX 12 -# define OMAP_INT_McBSP1RX 13 -# define OMAP_INT_UART1 14 -# define OMAP_INT_UART2 15 -# define OMAP_INT_USB_W2FC 20 -# define OMAP_INT_1WIRE 21 -# define OMAP_INT_OS_TIMER 22 -# define OMAP_INT_OQN 23 -# define OMAP_INT_GAUGE_32K 24 -# define OMAP_INT_RTC_TIMER 25 -# define OMAP_INT_RTC_ALARM 26 -# define OMAP_INT_DSP_MMU 28 +#define OMAP_INT_KEYBOARD 1 +#define OMAP_INT_uWireTX 2 +#define OMAP_INT_uWireRX 3 +#define OMAP_INT_I2C 4 +#define OMAP_INT_MPUIO 5 +#define OMAP_INT_USB_HHC_1 6 +#define OMAP_INT_McBSP3TX 10 +#define OMAP_INT_McBSP3RX 11 +#define OMAP_INT_McBSP1TX 12 +#define OMAP_INT_McBSP1RX 13 +#define OMAP_INT_UART1 14 +#define OMAP_INT_UART2 15 +#define OMAP_INT_USB_W2FC 20 +#define OMAP_INT_1WIRE 21 +#define OMAP_INT_OS_TIMER 22 +#define OMAP_INT_OQN 23 +#define OMAP_INT_GAUGE_32K 24 +#define OMAP_INT_RTC_TIMER 25 +#define OMAP_INT_RTC_ALARM 26 +#define OMAP_INT_DSP_MMU 28 /* * OMAP-1510 specific IRQ numbers for level 2 interrupt handler */ -# define OMAP_INT_1510_BT_MCSI1TX 16 -# define OMAP_INT_1510_BT_MCSI1RX 17 -# define OMAP_INT_1510_SoSSI_MATCH 19 -# define OMAP_INT_1510_MEM_STICK 27 -# define OMAP_INT_1510_COM_SPI_RO 31 +#define OMAP_INT_1510_BT_MCSI1TX 16 +#define OMAP_INT_1510_BT_MCSI1RX 17 +#define OMAP_INT_1510_SoSSI_MATCH 19 +#define OMAP_INT_1510_MEM_STICK 27 +#define OMAP_INT_1510_COM_SPI_RO 31 /* * OMAP-310 specific IRQ numbers for level 2 interrupt handler */ -# define OMAP_INT_310_FAC 0 -# define OMAP_INT_310_USB_HHC_2 7 -# define OMAP_INT_310_MCSI1_FE 16 -# define OMAP_INT_310_MCSI2_FE 17 -# define OMAP_INT_310_USB_W2FC_ISO 29 -# define OMAP_INT_310_USB_W2FC_NON_ISO 30 -# define OMAP_INT_310_McBSP2RX_OF 31 +#define OMAP_INT_310_FAC 0 +#define OMAP_INT_310_USB_HHC_2 7 +#define OMAP_INT_310_MCSI1_FE 16 +#define OMAP_INT_310_MCSI2_FE 17 +#define OMAP_INT_310_USB_W2FC_ISO 29 +#define OMAP_INT_310_USB_W2FC_NON_ISO 30 +#define OMAP_INT_310_McBSP2RX_OF 31 /* * OMAP-1610 specific IRQ numbers for level 2 interrupt handler */ -# define OMAP_INT_1610_FAC 0 -# define OMAP_INT_1610_USB_HHC_2 7 -# define OMAP_INT_1610_USB_OTG 8 -# define OMAP_INT_1610_SoSSI 9 -# define OMAP_INT_1610_BT_MCSI1TX 16 -# define OMAP_INT_1610_BT_MCSI1RX 17 -# define OMAP_INT_1610_SoSSI_MATCH 19 -# define OMAP_INT_1610_MEM_STICK 27 -# define OMAP_INT_1610_McBSP2RX_OF 31 -# define OMAP_INT_1610_STI 32 -# define OMAP_INT_1610_STI_WAKEUP 33 -# define OMAP_INT_1610_GPTIMER3 34 -# define OMAP_INT_1610_GPTIMER4 35 -# define OMAP_INT_1610_GPTIMER5 36 -# define OMAP_INT_1610_GPTIMER6 37 -# define OMAP_INT_1610_GPTIMER7 38 -# define OMAP_INT_1610_GPTIMER8 39 -# define OMAP_INT_1610_GPIO_BANK2 40 -# define OMAP_INT_1610_GPIO_BANK3 41 -# define OMAP_INT_1610_MMC2 42 -# define OMAP_INT_1610_CF 43 -# define OMAP_INT_1610_WAKE_UP_REQ 46 -# define OMAP_INT_1610_GPIO_BANK4 48 -# define OMAP_INT_1610_SPI 49 -# define OMAP_INT_1610_DMA_CH6 53 -# define OMAP_INT_1610_DMA_CH7 54 -# define OMAP_INT_1610_DMA_CH8 55 -# define OMAP_INT_1610_DMA_CH9 56 -# define OMAP_INT_1610_DMA_CH10 57 -# define OMAP_INT_1610_DMA_CH11 58 -# define OMAP_INT_1610_DMA_CH12 59 -# define OMAP_INT_1610_DMA_CH13 60 -# define OMAP_INT_1610_DMA_CH14 61 -# define OMAP_INT_1610_DMA_CH15 62 -# define OMAP_INT_1610_NAND 63 +#define OMAP_INT_1610_FAC 0 +#define OMAP_INT_1610_USB_HHC_2 7 +#define OMAP_INT_1610_USB_OTG 8 +#define OMAP_INT_1610_SoSSI 9 +#define OMAP_INT_1610_BT_MCSI1TX 16 +#define OMAP_INT_1610_BT_MCSI1RX 17 +#define OMAP_INT_1610_SoSSI_MATCH 19 +#define OMAP_INT_1610_MEM_STICK 27 +#define OMAP_INT_1610_McBSP2RX_OF 31 +#define OMAP_INT_1610_STI 32 +#define OMAP_INT_1610_STI_WAKEUP 33 +#define OMAP_INT_1610_GPTIMER3 34 +#define OMAP_INT_1610_GPTIMER4 35 +#define OMAP_INT_1610_GPTIMER5 36 +#define OMAP_INT_1610_GPTIMER6 37 +#define OMAP_INT_1610_GPTIMER7 38 +#define OMAP_INT_1610_GPTIMER8 39 +#define OMAP_INT_1610_GPIO_BANK2 40 +#define OMAP_INT_1610_GPIO_BANK3 41 +#define OMAP_INT_1610_MMC2 42 +#define OMAP_INT_1610_CF 43 +#define OMAP_INT_1610_WAKE_UP_REQ 46 +#define OMAP_INT_1610_GPIO_BANK4 48 +#define OMAP_INT_1610_SPI 49 +#define OMAP_INT_1610_DMA_CH6 53 +#define OMAP_INT_1610_DMA_CH7 54 +#define OMAP_INT_1610_DMA_CH8 55 +#define OMAP_INT_1610_DMA_CH9 56 +#define OMAP_INT_1610_DMA_CH10 57 +#define OMAP_INT_1610_DMA_CH11 58 +#define OMAP_INT_1610_DMA_CH12 59 +#define OMAP_INT_1610_DMA_CH13 60 +#define OMAP_INT_1610_DMA_CH14 61 +#define OMAP_INT_1610_DMA_CH15 62 +#define OMAP_INT_1610_NAND 63 /* * OMAP-730 specific IRQ numbers for level 2 interrupt handler */ -# define OMAP_INT_730_HW_ERRORS 0 -# define OMAP_INT_730_NFIQ_PWR_FAIL 1 -# define OMAP_INT_730_CFCD 2 -# define OMAP_INT_730_CFIREQ 3 -# define OMAP_INT_730_I2C 4 -# define OMAP_INT_730_PCC 5 -# define OMAP_INT_730_MPU_EXT_NIRQ 6 -# define OMAP_INT_730_SPI_100K_1 7 -# define OMAP_INT_730_SYREN_SPI 8 -# define OMAP_INT_730_VLYNQ 9 -# define OMAP_INT_730_GPIO_BANK4 10 -# define OMAP_INT_730_McBSP1TX 11 -# define OMAP_INT_730_McBSP1RX 12 -# define OMAP_INT_730_McBSP1RX_OF 13 -# define OMAP_INT_730_UART_MODEM_IRDA_2 14 -# define OMAP_INT_730_UART_MODEM_1 15 -# define OMAP_INT_730_MCSI 16 -# define OMAP_INT_730_uWireTX 17 -# define OMAP_INT_730_uWireRX 18 -# define OMAP_INT_730_SMC_CD 19 -# define OMAP_INT_730_SMC_IREQ 20 -# define OMAP_INT_730_HDQ_1WIRE 21 -# define OMAP_INT_730_TIMER32K 22 -# define OMAP_INT_730_MMC_SDIO 23 -# define OMAP_INT_730_UPLD 24 -# define OMAP_INT_730_USB_HHC_1 27 -# define OMAP_INT_730_USB_HHC_2 28 -# define OMAP_INT_730_USB_GENI 29 -# define OMAP_INT_730_USB_OTG 30 -# define OMAP_INT_730_CAMERA_IF 31 -# define OMAP_INT_730_RNG 32 -# define OMAP_INT_730_DUAL_MODE_TIMER 33 -# define OMAP_INT_730_DBB_RF_EN 34 -# define OMAP_INT_730_MPUIO_KEYPAD 35 -# define OMAP_INT_730_SHA1_MD5 36 -# define OMAP_INT_730_SPI_100K_2 37 -# define OMAP_INT_730_RNG_IDLE 38 -# define OMAP_INT_730_MPUIO 39 -# define OMAP_INT_730_LLPC_LCD_CTRL_OFF 40 -# define OMAP_INT_730_LLPC_OE_FALLING 41 -# define OMAP_INT_730_LLPC_OE_RISING 42 -# define OMAP_INT_730_LLPC_VSYNC 43 -# define OMAP_INT_730_WAKE_UP_REQ 46 -# define OMAP_INT_730_DMA_CH6 53 -# define OMAP_INT_730_DMA_CH7 54 -# define OMAP_INT_730_DMA_CH8 55 -# define OMAP_INT_730_DMA_CH9 56 -# define OMAP_INT_730_DMA_CH10 57 -# define OMAP_INT_730_DMA_CH11 58 -# define OMAP_INT_730_DMA_CH12 59 -# define OMAP_INT_730_DMA_CH13 60 -# define OMAP_INT_730_DMA_CH14 61 -# define OMAP_INT_730_DMA_CH15 62 -# define OMAP_INT_730_NAND 63 - -/* - * OMAP-24xx common IRQ numbers - */ -# define OMAP_INT_24XX_STI 4 -# define OMAP_INT_24XX_SYS_NIRQ 7 -# define OMAP_INT_24XX_L3_IRQ 10 -# define OMAP_INT_24XX_PRCM_MPU_IRQ 11 -# define OMAP_INT_24XX_SDMA_IRQ0 12 -# define OMAP_INT_24XX_SDMA_IRQ1 13 -# define OMAP_INT_24XX_SDMA_IRQ2 14 -# define OMAP_INT_24XX_SDMA_IRQ3 15 -# define OMAP_INT_243X_MCBSP2_IRQ 16 -# define OMAP_INT_243X_MCBSP3_IRQ 17 -# define OMAP_INT_243X_MCBSP4_IRQ 18 -# define OMAP_INT_243X_MCBSP5_IRQ 19 -# define OMAP_INT_24XX_GPMC_IRQ 20 -# define OMAP_INT_24XX_GUFFAW_IRQ 21 -# define OMAP_INT_24XX_IVA_IRQ 22 -# define OMAP_INT_24XX_EAC_IRQ 23 -# define OMAP_INT_24XX_CAM_IRQ 24 -# define OMAP_INT_24XX_DSS_IRQ 25 -# define OMAP_INT_24XX_MAIL_U0_MPU 26 -# define OMAP_INT_24XX_DSP_UMA 27 -# define OMAP_INT_24XX_DSP_MMU 28 -# define OMAP_INT_24XX_GPIO_BANK1 29 -# define OMAP_INT_24XX_GPIO_BANK2 30 -# define OMAP_INT_24XX_GPIO_BANK3 31 -# define OMAP_INT_24XX_GPIO_BANK4 32 -# define OMAP_INT_243X_GPIO_BANK5 33 -# define OMAP_INT_24XX_MAIL_U3_MPU 34 -# define OMAP_INT_24XX_WDT3 35 -# define OMAP_INT_24XX_WDT4 36 -# define OMAP_INT_24XX_GPTIMER1 37 -# define OMAP_INT_24XX_GPTIMER2 38 -# define OMAP_INT_24XX_GPTIMER3 39 -# define OMAP_INT_24XX_GPTIMER4 40 -# define OMAP_INT_24XX_GPTIMER5 41 -# define OMAP_INT_24XX_GPTIMER6 42 -# define OMAP_INT_24XX_GPTIMER7 43 -# define OMAP_INT_24XX_GPTIMER8 44 -# define OMAP_INT_24XX_GPTIMER9 45 -# define OMAP_INT_24XX_GPTIMER10 46 -# define OMAP_INT_24XX_GPTIMER11 47 -# define OMAP_INT_24XX_GPTIMER12 48 -# define OMAP_INT_24XX_PKA_IRQ 50 -# define OMAP_INT_24XX_SHA1MD5_IRQ 51 -# define OMAP_INT_24XX_RNG_IRQ 52 -# define OMAP_INT_24XX_MG_IRQ 53 -# define OMAP_INT_24XX_I2C1_IRQ 56 -# define OMAP_INT_24XX_I2C2_IRQ 57 -# define OMAP_INT_24XX_MCBSP1_IRQ_TX 59 -# define OMAP_INT_24XX_MCBSP1_IRQ_RX 60 -# define OMAP_INT_24XX_MCBSP2_IRQ_TX 62 -# define OMAP_INT_24XX_MCBSP2_IRQ_RX 63 -# define OMAP_INT_243X_MCBSP1_IRQ 64 -# define OMAP_INT_24XX_MCSPI1_IRQ 65 -# define OMAP_INT_24XX_MCSPI2_IRQ 66 -# define OMAP_INT_24XX_SSI1_IRQ0 67 -# define OMAP_INT_24XX_SSI1_IRQ1 68 -# define OMAP_INT_24XX_SSI2_IRQ0 69 -# define OMAP_INT_24XX_SSI2_IRQ1 70 -# define OMAP_INT_24XX_SSI_GDD_IRQ 71 -# define OMAP_INT_24XX_UART1_IRQ 72 -# define OMAP_INT_24XX_UART2_IRQ 73 -# define OMAP_INT_24XX_UART3_IRQ 74 -# define OMAP_INT_24XX_USB_IRQ_GEN 75 -# define OMAP_INT_24XX_USB_IRQ_NISO 76 -# define OMAP_INT_24XX_USB_IRQ_ISO 77 -# define OMAP_INT_24XX_USB_IRQ_HGEN 78 -# define OMAP_INT_24XX_USB_IRQ_HSOF 79 -# define OMAP_INT_24XX_USB_IRQ_OTG 80 -# define OMAP_INT_24XX_VLYNQ_IRQ 81 -# define OMAP_INT_24XX_MMC_IRQ 83 -# define OMAP_INT_24XX_MS_IRQ 84 -# define OMAP_INT_24XX_FAC_IRQ 85 -# define OMAP_INT_24XX_MCSPI3_IRQ 91 -# define OMAP_INT_243X_HS_USB_MC 92 -# define OMAP_INT_243X_HS_USB_DMA 93 -# define OMAP_INT_243X_CARKIT 94 -# define OMAP_INT_34XX_GPTIMER12 95 +#define OMAP_INT_730_HW_ERRORS 0 +#define OMAP_INT_730_NFIQ_PWR_FAIL 1 +#define OMAP_INT_730_CFCD 2 +#define OMAP_INT_730_CFIREQ 3 +#define OMAP_INT_730_I2C 4 +#define OMAP_INT_730_PCC 5 +#define OMAP_INT_730_MPU_EXT_NIRQ 6 +#define OMAP_INT_730_SPI_100K_1 7 +#define OMAP_INT_730_SYREN_SPI 8 +#define OMAP_INT_730_VLYNQ 9 +#define OMAP_INT_730_GPIO_BANK4 10 +#define OMAP_INT_730_McBSP1TX 11 +#define OMAP_INT_730_McBSP1RX 12 +#define OMAP_INT_730_McBSP1RX_OF 13 +#define OMAP_INT_730_UART_MODEM_IRDA_2 14 +#define OMAP_INT_730_UART_MODEM_1 15 +#define OMAP_INT_730_MCSI 16 +#define OMAP_INT_730_uWireTX 17 +#define OMAP_INT_730_uWireRX 18 +#define OMAP_INT_730_SMC_CD 19 +#define OMAP_INT_730_SMC_IREQ 20 +#define OMAP_INT_730_HDQ_1WIRE 21 +#define OMAP_INT_730_TIMER32K 22 +#define OMAP_INT_730_MMC_SDIO 23 +#define OMAP_INT_730_UPLD 24 +#define OMAP_INT_730_USB_HHC_1 27 +#define OMAP_INT_730_USB_HHC_2 28 +#define OMAP_INT_730_USB_GENI 29 +#define OMAP_INT_730_USB_OTG 30 +#define OMAP_INT_730_CAMERA_IF 31 +#define OMAP_INT_730_RNG 32 +#define OMAP_INT_730_DUAL_MODE_TIMER 33 +#define OMAP_INT_730_DBB_RF_EN 34 +#define OMAP_INT_730_MPUIO_KEYPAD 35 +#define OMAP_INT_730_SHA1_MD5 36 +#define OMAP_INT_730_SPI_100K_2 37 +#define OMAP_INT_730_RNG_IDLE 38 +#define OMAP_INT_730_MPUIO 39 +#define OMAP_INT_730_LLPC_LCD_CTRL_OFF 40 +#define OMAP_INT_730_LLPC_OE_FALLING 41 +#define OMAP_INT_730_LLPC_OE_RISING 42 +#define OMAP_INT_730_LLPC_VSYNC 43 +#define OMAP_INT_730_WAKE_UP_REQ 46 +#define OMAP_INT_730_DMA_CH6 53 +#define OMAP_INT_730_DMA_CH7 54 +#define OMAP_INT_730_DMA_CH8 55 +#define OMAP_INT_730_DMA_CH9 56 +#define OMAP_INT_730_DMA_CH10 57 +#define OMAP_INT_730_DMA_CH11 58 +#define OMAP_INT_730_DMA_CH12 59 +#define OMAP_INT_730_DMA_CH13 60 +#define OMAP_INT_730_DMA_CH14 61 +#define OMAP_INT_730_DMA_CH15 62 +#define OMAP_INT_730_NAND 63 /* omap_dma.c */ enum omap_dma_model { omap_dma_3_0, omap_dma_3_1, omap_dma_3_2, - omap_dma_4, }; struct soc_dma_s; @@ -507,9 +353,9 @@ struct dma_irq_map { enum omap_dma_port { emiff = 0, emifs, - imif, /* omap16xx: ocp_t1 */ + imif, /* omap16xx: ocp_t1 */ tipb, - local, /* omap16xx: ocp_t2 */ + local, /* omap16xx: ocp_t2 */ tipb_mpui, __omap_dma_port_last, }; @@ -572,157 +418,71 @@ struct omap_dma_lcd_channel_s { * DMA request numbers for OMAP1 * See /usr/include/asm-arm/arch-omap/dma.h in Linux. */ -# define OMAP_DMA_NO_DEVICE 0 -# define OMAP_DMA_MCSI1_TX 1 -# define OMAP_DMA_MCSI1_RX 2 -# define OMAP_DMA_I2C_RX 3 -# define OMAP_DMA_I2C_TX 4 -# define OMAP_DMA_EXT_NDMA_REQ0 5 -# define OMAP_DMA_EXT_NDMA_REQ1 6 -# define OMAP_DMA_UWIRE_TX 7 -# define OMAP_DMA_MCBSP1_TX 8 -# define OMAP_DMA_MCBSP1_RX 9 -# define OMAP_DMA_MCBSP3_TX 10 -# define OMAP_DMA_MCBSP3_RX 11 -# define OMAP_DMA_UART1_TX 12 -# define OMAP_DMA_UART1_RX 13 -# define OMAP_DMA_UART2_TX 14 -# define OMAP_DMA_UART2_RX 15 -# define OMAP_DMA_MCBSP2_TX 16 -# define OMAP_DMA_MCBSP2_RX 17 -# define OMAP_DMA_UART3_TX 18 -# define OMAP_DMA_UART3_RX 19 -# define OMAP_DMA_CAMERA_IF_RX 20 -# define OMAP_DMA_MMC_TX 21 -# define OMAP_DMA_MMC_RX 22 -# define OMAP_DMA_NAND 23 /* Not in OMAP310 */ -# define OMAP_DMA_IRQ_LCD_LINE 24 /* Not in OMAP310 */ -# define OMAP_DMA_MEMORY_STICK 25 /* Not in OMAP310 */ -# define OMAP_DMA_USB_W2FC_RX0 26 -# define OMAP_DMA_USB_W2FC_RX1 27 -# define OMAP_DMA_USB_W2FC_RX2 28 -# define OMAP_DMA_USB_W2FC_TX0 29 -# define OMAP_DMA_USB_W2FC_TX1 30 -# define OMAP_DMA_USB_W2FC_TX2 31 +#define OMAP_DMA_NO_DEVICE 0 +#define OMAP_DMA_MCSI1_TX 1 +#define OMAP_DMA_MCSI1_RX 2 +#define OMAP_DMA_I2C_RX 3 +#define OMAP_DMA_I2C_TX 4 +#define OMAP_DMA_EXT_NDMA_REQ0 5 +#define OMAP_DMA_EXT_NDMA_REQ1 6 +#define OMAP_DMA_UWIRE_TX 7 +#define OMAP_DMA_MCBSP1_TX 8 +#define OMAP_DMA_MCBSP1_RX 9 +#define OMAP_DMA_MCBSP3_TX 10 +#define OMAP_DMA_MCBSP3_RX 11 +#define OMAP_DMA_UART1_TX 12 +#define OMAP_DMA_UART1_RX 13 +#define OMAP_DMA_UART2_TX 14 +#define OMAP_DMA_UART2_RX 15 +#define OMAP_DMA_MCBSP2_TX 16 +#define OMAP_DMA_MCBSP2_RX 17 +#define OMAP_DMA_UART3_TX 18 +#define OMAP_DMA_UART3_RX 19 +#define OMAP_DMA_CAMERA_IF_RX 20 +#define OMAP_DMA_MMC_TX 21 +#define OMAP_DMA_MMC_RX 22 +#define OMAP_DMA_NAND 23 /* Not in OMAP310 */ +#define OMAP_DMA_IRQ_LCD_LINE 24 /* Not in OMAP310 */ +#define OMAP_DMA_MEMORY_STICK 25 /* Not in OMAP310 */ +#define OMAP_DMA_USB_W2FC_RX0 26 +#define OMAP_DMA_USB_W2FC_RX1 27 +#define OMAP_DMA_USB_W2FC_RX2 28 +#define OMAP_DMA_USB_W2FC_TX0 29 +#define OMAP_DMA_USB_W2FC_TX1 30 +#define OMAP_DMA_USB_W2FC_TX2 31 /* These are only for 1610 */ -# define OMAP_DMA_CRYPTO_DES_IN 32 -# define OMAP_DMA_SPI_TX 33 -# define OMAP_DMA_SPI_RX 34 -# define OMAP_DMA_CRYPTO_HASH 35 -# define OMAP_DMA_CCP_ATTN 36 -# define OMAP_DMA_CCP_FIFO_NOT_EMPTY 37 -# define OMAP_DMA_CMT_APE_TX_CHAN_0 38 -# define OMAP_DMA_CMT_APE_RV_CHAN_0 39 -# define OMAP_DMA_CMT_APE_TX_CHAN_1 40 -# define OMAP_DMA_CMT_APE_RV_CHAN_1 41 -# define OMAP_DMA_CMT_APE_TX_CHAN_2 42 -# define OMAP_DMA_CMT_APE_RV_CHAN_2 43 -# define OMAP_DMA_CMT_APE_TX_CHAN_3 44 -# define OMAP_DMA_CMT_APE_RV_CHAN_3 45 -# define OMAP_DMA_CMT_APE_TX_CHAN_4 46 -# define OMAP_DMA_CMT_APE_RV_CHAN_4 47 -# define OMAP_DMA_CMT_APE_TX_CHAN_5 48 -# define OMAP_DMA_CMT_APE_RV_CHAN_5 49 -# define OMAP_DMA_CMT_APE_TX_CHAN_6 50 -# define OMAP_DMA_CMT_APE_RV_CHAN_6 51 -# define OMAP_DMA_CMT_APE_TX_CHAN_7 52 -# define OMAP_DMA_CMT_APE_RV_CHAN_7 53 -# define OMAP_DMA_MMC2_TX 54 -# define OMAP_DMA_MMC2_RX 55 -# define OMAP_DMA_CRYPTO_DES_OUT 56 - -/* - * DMA request numbers for the OMAP2 - */ -# define OMAP24XX_DMA_NO_DEVICE 0 -# define OMAP24XX_DMA_XTI_DMA 1 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_EXT_DMAREQ0 2 -# define OMAP24XX_DMA_EXT_DMAREQ1 3 -# define OMAP24XX_DMA_GPMC 4 -# define OMAP24XX_DMA_GFX 5 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_DSS 6 -# define OMAP24XX_DMA_VLYNQ_TX 7 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_CWT 8 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_AES_TX 9 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_AES_RX 10 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_DES_TX 11 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_DES_RX 12 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_SHA1MD5_RX 13 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_EXT_DMAREQ2 14 -# define OMAP24XX_DMA_EXT_DMAREQ3 15 -# define OMAP24XX_DMA_EXT_DMAREQ4 16 -# define OMAP24XX_DMA_EAC_AC_RD 17 -# define OMAP24XX_DMA_EAC_AC_WR 18 -# define OMAP24XX_DMA_EAC_MD_UL_RD 19 -# define OMAP24XX_DMA_EAC_MD_UL_WR 20 -# define OMAP24XX_DMA_EAC_MD_DL_RD 21 -# define OMAP24XX_DMA_EAC_MD_DL_WR 22 -# define OMAP24XX_DMA_EAC_BT_UL_RD 23 -# define OMAP24XX_DMA_EAC_BT_UL_WR 24 -# define OMAP24XX_DMA_EAC_BT_DL_RD 25 -# define OMAP24XX_DMA_EAC_BT_DL_WR 26 -# define OMAP24XX_DMA_I2C1_TX 27 -# define OMAP24XX_DMA_I2C1_RX 28 -# define OMAP24XX_DMA_I2C2_TX 29 -# define OMAP24XX_DMA_I2C2_RX 30 -# define OMAP24XX_DMA_MCBSP1_TX 31 -# define OMAP24XX_DMA_MCBSP1_RX 32 -# define OMAP24XX_DMA_MCBSP2_TX 33 -# define OMAP24XX_DMA_MCBSP2_RX 34 -# define OMAP24XX_DMA_SPI1_TX0 35 -# define OMAP24XX_DMA_SPI1_RX0 36 -# define OMAP24XX_DMA_SPI1_TX1 37 -# define OMAP24XX_DMA_SPI1_RX1 38 -# define OMAP24XX_DMA_SPI1_TX2 39 -# define OMAP24XX_DMA_SPI1_RX2 40 -# define OMAP24XX_DMA_SPI1_TX3 41 -# define OMAP24XX_DMA_SPI1_RX3 42 -# define OMAP24XX_DMA_SPI2_TX0 43 -# define OMAP24XX_DMA_SPI2_RX0 44 -# define OMAP24XX_DMA_SPI2_TX1 45 -# define OMAP24XX_DMA_SPI2_RX1 46 - -# define OMAP24XX_DMA_UART1_TX 49 -# define OMAP24XX_DMA_UART1_RX 50 -# define OMAP24XX_DMA_UART2_TX 51 -# define OMAP24XX_DMA_UART2_RX 52 -# define OMAP24XX_DMA_UART3_TX 53 -# define OMAP24XX_DMA_UART3_RX 54 -# define OMAP24XX_DMA_USB_W2FC_TX0 55 -# define OMAP24XX_DMA_USB_W2FC_RX0 56 -# define OMAP24XX_DMA_USB_W2FC_TX1 57 -# define OMAP24XX_DMA_USB_W2FC_RX1 58 -# define OMAP24XX_DMA_USB_W2FC_TX2 59 -# define OMAP24XX_DMA_USB_W2FC_RX2 60 -# define OMAP24XX_DMA_MMC1_TX 61 -# define OMAP24XX_DMA_MMC1_RX 62 -# define OMAP24XX_DMA_MS 63 /* Not in OMAP2420 */ -# define OMAP24XX_DMA_EXT_DMAREQ5 64 - -/* omap[123].c */ -/* OMAP2 gp timer */ -struct omap_gp_timer_s; -struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta, - qemu_irq irq, omap_clk fclk, omap_clk iclk); -void omap_gp_timer_reset(struct omap_gp_timer_s *s); - -/* OMAP2 sysctimer */ -struct omap_synctimer_s; -struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta, - struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk); -void omap_synctimer_reset(struct omap_synctimer_s *s); +#define OMAP_DMA_CRYPTO_DES_IN 32 +#define OMAP_DMA_SPI_TX 33 +#define OMAP_DMA_SPI_RX 34 +#define OMAP_DMA_CRYPTO_HASH 35 +#define OMAP_DMA_CCP_ATTN 36 +#define OMAP_DMA_CCP_FIFO_NOT_EMPTY 37 +#define OMAP_DMA_CMT_APE_TX_CHAN_0 38 +#define OMAP_DMA_CMT_APE_RV_CHAN_0 39 +#define OMAP_DMA_CMT_APE_TX_CHAN_1 40 +#define OMAP_DMA_CMT_APE_RV_CHAN_1 41 +#define OMAP_DMA_CMT_APE_TX_CHAN_2 42 +#define OMAP_DMA_CMT_APE_RV_CHAN_2 43 +#define OMAP_DMA_CMT_APE_TX_CHAN_3 44 +#define OMAP_DMA_CMT_APE_RV_CHAN_3 45 +#define OMAP_DMA_CMT_APE_TX_CHAN_4 46 +#define OMAP_DMA_CMT_APE_RV_CHAN_4 47 +#define OMAP_DMA_CMT_APE_TX_CHAN_5 48 +#define OMAP_DMA_CMT_APE_RV_CHAN_5 49 +#define OMAP_DMA_CMT_APE_TX_CHAN_6 50 +#define OMAP_DMA_CMT_APE_RV_CHAN_6 51 +#define OMAP_DMA_CMT_APE_TX_CHAN_7 52 +#define OMAP_DMA_CMT_APE_RV_CHAN_7 53 +#define OMAP_DMA_MMC2_TX 54 +#define OMAP_DMA_MMC2_RX 55 +#define OMAP_DMA_CRYPTO_DES_OUT 56 struct omap_uart_s; struct omap_uart_s *omap_uart_init(hwaddr base, qemu_irq irq, omap_clk fclk, omap_clk iclk, qemu_irq txdma, qemu_irq rxdma, const char *label, Chardev *chr); -struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem, - struct omap_target_agent_s *ta, - qemu_irq irq, omap_clk fclk, omap_clk iclk, - qemu_irq txdma, qemu_irq rxdma, - const char *label, Chardev *chr); void omap_uart_reset(struct omap_uart_s *s); struct omap_mpuio_s; @@ -731,17 +491,6 @@ void omap_mpuio_out_set(struct omap_mpuio_s *s, int line, qemu_irq handler); void omap_mpuio_key(struct omap_mpuio_s *s, int row, int col, int down); struct omap_uwire_s; -void omap_uwire_attach(struct omap_uwire_s *s, - uWireSlave *slave, int chipselect); - -/* OMAP2 spi */ -struct omap_mcspi_s; -struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum, - qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk); -void omap_mcspi_attach(struct omap_mcspi_s *s, - uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque, - int chipselect); -void omap_mcspi_reset(struct omap_mcspi_s *s); struct I2SCodec { void *opaque; @@ -770,9 +519,6 @@ struct I2SCodec { struct omap_mcbsp_s; void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave); -void omap_tap_init(struct omap_target_agent_s *ta, - struct omap_mpu_state_s *mpu); - /* omap_lcdc.c */ struct omap_lcd_panel_s; void omap_lcdc_reset(struct omap_lcd_panel_s *s); @@ -782,61 +528,29 @@ struct omap_lcd_panel_s *omap_lcdc_init(MemoryRegion *sysmem, struct omap_dma_lcd_channel_s *dma, omap_clk clk); -/* omap_dss.c */ -struct rfbi_chip_s { - void *opaque; - void (*write)(void *opaque, int dc, uint16_t value); - void (*block)(void *opaque, int dc, void *buf, size_t len, int pitch); - uint16_t (*read)(void *opaque, int dc); -}; -struct omap_dss_s; -void omap_dss_reset(struct omap_dss_s *s); -struct omap_dss_s *omap_dss_init(struct omap_target_agent_s *ta, - MemoryRegion *sysmem, - hwaddr l3_base, - qemu_irq irq, qemu_irq drq, - omap_clk fck1, omap_clk fck2, omap_clk ck54m, - omap_clk ick1, omap_clk ick2); -void omap_rfbi_attach(struct omap_dss_s *s, int cs, struct rfbi_chip_s *chip); - /* omap_mmc.c */ -struct omap_mmc_s; -struct omap_mmc_s *omap_mmc_init(hwaddr base, - MemoryRegion *sysmem, - BlockBackend *blk, - qemu_irq irq, qemu_irq dma[], omap_clk clk); -struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta, - BlockBackend *blk, qemu_irq irq, qemu_irq dma[], - omap_clk fclk, omap_clk iclk); -void omap_mmc_reset(struct omap_mmc_s *s); -void omap_mmc_handlers(struct omap_mmc_s *s, qemu_irq ro, qemu_irq cover); -void omap_mmc_enable(struct omap_mmc_s *s, int enable); +#define TYPE_OMAP_MMC "omap-mmc" +OBJECT_DECLARE_SIMPLE_TYPE(OMAPMMCState, OMAP_MMC) + +DeviceState *omap_mmc_init(hwaddr base, + MemoryRegion *sysmem, + qemu_irq irq, qemu_irq dma[], omap_clk clk); +/* TODO: clock framework (see above) */ +void omap_mmc_set_clk(DeviceState *dev, omap_clk clk); + /* omap_i2c.c */ I2CBus *omap_i2c_bus(DeviceState *omap_i2c); -# define cpu_is_omap310(cpu) (cpu->mpu_model == omap310) -# define cpu_is_omap1510(cpu) (cpu->mpu_model == omap1510) -# define cpu_is_omap1610(cpu) (cpu->mpu_model == omap1610) -# define cpu_is_omap1710(cpu) (cpu->mpu_model == omap1710) -# define cpu_is_omap2410(cpu) (cpu->mpu_model == omap2410) -# define cpu_is_omap2420(cpu) (cpu->mpu_model == omap2420) -# define cpu_is_omap2430(cpu) (cpu->mpu_model == omap2430) -# define cpu_is_omap3430(cpu) (cpu->mpu_model == omap3430) -# define cpu_is_omap3630(cpu) (cpu->mpu_model == omap3630) - -# define cpu_is_omap15xx(cpu) \ +#define cpu_is_omap310(cpu) (cpu->mpu_model == omap310) +#define cpu_is_omap1510(cpu) (cpu->mpu_model == omap1510) +#define cpu_is_omap1610(cpu) (cpu->mpu_model == omap1610) +#define cpu_is_omap1710(cpu) (cpu->mpu_model == omap1710) + +#define cpu_is_omap15xx(cpu) \ (cpu_is_omap310(cpu) || cpu_is_omap1510(cpu)) -# define cpu_is_omap16xx(cpu) \ +#define cpu_is_omap16xx(cpu) \ (cpu_is_omap1610(cpu) || cpu_is_omap1710(cpu)) -# define cpu_is_omap24xx(cpu) \ - (cpu_is_omap2410(cpu) || cpu_is_omap2420(cpu) || cpu_is_omap2430(cpu)) - -# define cpu_class_omap1(cpu) \ - (cpu_is_omap15xx(cpu) || cpu_is_omap16xx(cpu)) -# define cpu_class_omap2(cpu) cpu_is_omap24xx(cpu) -# define cpu_class_omap3(cpu) \ - (cpu_is_omap3430(cpu) || cpu_is_omap3630(cpu)) struct omap_mpu_state_s { enum omap_mpu_model { @@ -844,13 +558,6 @@ struct omap_mpu_state_s { omap1510, omap1610, omap1710, - omap2410, - omap2420, - omap2422, - omap2423, - omap2430, - omap3430, - omap3630, } mpu_model; ARMCPU *cpu; @@ -897,7 +604,7 @@ struct omap_mpu_state_s { /* MPU public TIPB peripherals */ struct omap_32khz_timer_s *os_timer; - struct omap_mmc_s *mmc; + DeviceState *mmc; struct omap_mpuio_s *mpuio; @@ -960,33 +667,12 @@ struct omap_mpu_state_s { uint16_t dsp_idlect2; uint16_t dsp_rstct2; } clkm; - - /* OMAP2-only peripherals */ - struct omap_l4_s *l4; - - struct omap_gp_timer_s *gptimer[12]; - struct omap_synctimer_s *synctimer; - - struct omap_prcm_s *prcm; - struct omap_sdrc_s *sdrc; - struct omap_gpmc_s *gpmc; - struct omap_sysctl_s *sysc; - - struct omap_mcspi_s *mcspi[2]; - - struct omap_dss_s *dss; - - struct omap_eac_s *eac; }; /* omap1.c */ struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *sdram, const char *core); -/* omap2.c */ -struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram, - const char *core); - uint32_t omap_badwidth_read8(void *opaque, hwaddr addr); void omap_badwidth_write8(void *opaque, hwaddr addr, uint32_t value); @@ -999,43 +685,14 @@ void omap_badwidth_write32(void *opaque, hwaddr addr, void omap_mpu_wakeup(void *opaque, int irq, int req); -# define OMAP_BAD_REG(paddr) \ +#define OMAP_BAD_REG(paddr) \ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad register %#08"HWADDR_PRIx"\n", \ __func__, paddr) -# define OMAP_RO_REG(paddr) \ +#define OMAP_RO_REG(paddr) \ qemu_log_mask(LOG_GUEST_ERROR, "%s: Read-only register %#08" \ HWADDR_PRIx "\n", \ __func__, paddr) -/* OMAP-specific Linux bootloader tags for the ATAG_BOARD area - * (Board-specific tags are not here) - */ -#define OMAP_TAG_CLOCK 0x4f01 -#define OMAP_TAG_MMC 0x4f02 -#define OMAP_TAG_SERIAL_CONSOLE 0x4f03 -#define OMAP_TAG_USB 0x4f04 -#define OMAP_TAG_LCD 0x4f05 -#define OMAP_TAG_GPIO_SWITCH 0x4f06 -#define OMAP_TAG_UART 0x4f07 -#define OMAP_TAG_FBMEM 0x4f08 -#define OMAP_TAG_STI_CONSOLE 0x4f09 -#define OMAP_TAG_CAMERA_SENSOR 0x4f0a -#define OMAP_TAG_PARTITION 0x4f0b -#define OMAP_TAG_TEA5761 0x4f10 -#define OMAP_TAG_TMP105 0x4f11 -#define OMAP_TAG_BOOT_REASON 0x4f80 -#define OMAP_TAG_FLASH_PART_STR 0x4f81 -#define OMAP_TAG_VERSION_STR 0x4f82 - -enum { - OMAP_GPIOSW_TYPE_COVER = 0 << 4, - OMAP_GPIOSW_TYPE_CONNECTION = 1 << 4, - OMAP_GPIOSW_TYPE_ACTIVITY = 2 << 4, -}; - -#define OMAP_GPIOSW_INVERTED 0x0001 -#define OMAP_GPIOSW_OUTPUT 0x0002 - -# define OMAP_MPUI_REG_MASK 0x000007ff +#define OMAP_MPUI_REG_MASK 0x000007ff #endif diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h deleted file mode 100644 index 4c6caee1134..00000000000 --- a/include/hw/arm/pxa.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Intel XScale PXA255/270 processor support. - * - * Copyright (c) 2006 Openedhand Ltd. - * Written by Andrzej Zaborowski - * - * This code is licensed under the GNU GPL v2. - */ - -#ifndef PXA_H -#define PXA_H - -#include "exec/memory.h" -#include "target/arm/cpu-qom.h" -#include "hw/pcmcia.h" -#include "qom/object.h" - -/* Interrupt numbers */ -# define PXA2XX_PIC_SSP3 0 -# define PXA2XX_PIC_USBH2 2 -# define PXA2XX_PIC_USBH1 3 -# define PXA2XX_PIC_KEYPAD 4 -# define PXA2XX_PIC_PWRI2C 6 -# define PXA25X_PIC_HWUART 7 -# define PXA27X_PIC_OST_4_11 7 -# define PXA2XX_PIC_GPIO_0 8 -# define PXA2XX_PIC_GPIO_1 9 -# define PXA2XX_PIC_GPIO_X 10 -# define PXA2XX_PIC_I2S 13 -# define PXA26X_PIC_ASSP 15 -# define PXA25X_PIC_NSSP 16 -# define PXA27X_PIC_SSP2 16 -# define PXA2XX_PIC_LCD 17 -# define PXA2XX_PIC_I2C 18 -# define PXA2XX_PIC_ICP 19 -# define PXA2XX_PIC_STUART 20 -# define PXA2XX_PIC_BTUART 21 -# define PXA2XX_PIC_FFUART 22 -# define PXA2XX_PIC_MMC 23 -# define PXA2XX_PIC_SSP 24 -# define PXA2XX_PIC_DMA 25 -# define PXA2XX_PIC_OST_0 26 -# define PXA2XX_PIC_RTC1HZ 30 -# define PXA2XX_PIC_RTCALARM 31 - -/* DMA requests */ -# define PXA2XX_RX_RQ_I2S 2 -# define PXA2XX_TX_RQ_I2S 3 -# define PXA2XX_RX_RQ_BTUART 4 -# define PXA2XX_TX_RQ_BTUART 5 -# define PXA2XX_RX_RQ_FFUART 6 -# define PXA2XX_TX_RQ_FFUART 7 -# define PXA2XX_RX_RQ_SSP1 13 -# define PXA2XX_TX_RQ_SSP1 14 -# define PXA2XX_RX_RQ_SSP2 15 -# define PXA2XX_TX_RQ_SSP2 16 -# define PXA2XX_RX_RQ_ICP 17 -# define PXA2XX_TX_RQ_ICP 18 -# define PXA2XX_RX_RQ_STUART 19 -# define PXA2XX_TX_RQ_STUART 20 -# define PXA2XX_RX_RQ_MMCI 21 -# define PXA2XX_TX_RQ_MMCI 22 -# define PXA2XX_USB_RQ(x) ((x) + 24) -# define PXA2XX_RX_RQ_SSP3 66 -# define PXA2XX_TX_RQ_SSP3 67 - -# define PXA2XX_SDRAM_BASE 0xa0000000 -# define PXA2XX_INTERNAL_BASE 0x5c000000 -# define PXA2XX_INTERNAL_SIZE 0x40000 - -/* pxa2xx_pic.c */ -DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu); - -/* pxa2xx_gpio.c */ -DeviceState *pxa2xx_gpio_init(hwaddr base, - ARMCPU *cpu, DeviceState *pic, int lines); -void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler); - -/* pxa2xx_dma.c */ -DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq); -DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq); - -/* pxa2xx_lcd.c */ -typedef struct PXA2xxLCDState PXA2xxLCDState; -PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem, - hwaddr base, qemu_irq irq); -void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler); - -/* pxa2xx_mmci.c */ -#define TYPE_PXA2XX_MMCI "pxa2xx-mmci" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxMMCIState, PXA2XX_MMCI) - -PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma); -void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly, - qemu_irq coverswitch); - -/* pxa2xx_pcmcia.c */ -#define TYPE_PXA2XX_PCMCIA "pxa2xx-pcmcia" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPCMCIAState, PXA2XX_PCMCIA) - -int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card); -int pxa2xx_pcmcia_detach(void *opaque); -void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq); - -/* pxa2xx_keypad.c */ -struct keymap { - int8_t column; - int8_t row; -}; -typedef struct PXA2xxKeyPadState PXA2xxKeyPadState; -PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem, - hwaddr base, - qemu_irq irq); -void pxa27x_register_keypad(PXA2xxKeyPadState *kp, - const struct keymap *map, int size); - -/* pxa2xx.c */ -#define TYPE_PXA2XX_I2C "pxa2xx_i2c" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CState, PXA2XX_I2C) - -PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base, - qemu_irq irq, uint32_t page_size); -I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s); - -typedef struct PXA2xxI2SState PXA2xxI2SState; - -#define TYPE_PXA2XX_FIR "pxa2xx-fir" -OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxFIrState, PXA2XX_FIR) - -typedef struct { - ARMCPU *cpu; - DeviceState *pic; - qemu_irq reset; - MemoryRegion sdram; - MemoryRegion internal; - MemoryRegion cm_iomem; - MemoryRegion mm_iomem; - MemoryRegion pm_iomem; - DeviceState *dma; - DeviceState *gpio; - PXA2xxLCDState *lcd; - SSIBus **ssp; - PXA2xxI2CState *i2c[2]; - PXA2xxMMCIState *mmc; - PXA2xxPCMCIAState *pcmcia[2]; - PXA2xxI2SState *i2s; - PXA2xxFIrState *fir; - PXA2xxKeyPadState *kp; - - /* Power management */ - hwaddr pm_base; - uint32_t pm_regs[0x40]; - - /* Clock management */ - hwaddr cm_base; - uint32_t cm_regs[4]; - uint32_t clkcfg; - - /* Memory management */ - hwaddr mm_base; - uint32_t mm_regs[0x1a]; - - /* Performance monitoring */ - uint32_t pmnc; -} PXA2xxState; - -struct PXA2xxI2SState { - MemoryRegion iomem; - qemu_irq irq; - qemu_irq rx_dma; - qemu_irq tx_dma; - void (*data_req)(void *, int, int); - - uint32_t control[2]; - uint32_t status; - uint32_t mask; - uint32_t clk; - - int enable; - int rx_len; - int tx_len; - void (*codec_out)(void *, uint32_t); - uint32_t (*codec_in)(void *); - void *opaque; - - int fifo_len; - uint32_t fifo[16]; -}; - -# define PA_FMT "0x%08lx" - -PXA2xxState *pxa270_init(unsigned int sdram_size, const char *revision); -PXA2xxState *pxa255_init(unsigned int sdram_size); - -#endif /* PXA_H */ diff --git a/include/hw/arm/sharpsl.h b/include/hw/arm/sharpsl.h index e986b28c527..1e3992fcd00 100644 --- a/include/hw/arm/sharpsl.h +++ b/include/hw/arm/sharpsl.h @@ -11,7 +11,7 @@ /* zaurus.c */ -#define SL_PXA_PARAM_BASE 0xa0000a00 +#define SL_PXA_PARAM_BASE 0xa0000a00 void sl_bootparam_write(hwaddr ptr); #endif diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index d1a4a64551d..75b83b2b4ae 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -110,7 +110,6 @@ typedef struct SMMUTransCfg { /* Used by stage-1 only. */ bool aa64; /* arch64 or aarch32 translation table */ bool record_faults; /* record fault events */ - uint64_t ttb; /* TT base address */ uint8_t oas; /* output address width */ uint8_t tbi; /* Top Byte Ignore */ int asid; @@ -143,6 +142,11 @@ typedef struct SMMUIOTLBKey { uint8_t level; } SMMUIOTLBKey; +typedef struct SMMUSIDRange { + uint32_t start; + uint32_t end; +} SMMUSIDRange; + struct SMMUState { /* */ SysBusDevice dev; @@ -157,6 +161,8 @@ struct SMMUState { QLIST_HEAD(, SMMUDevice) devices_with_notifiers; uint8_t bus_num; PCIBus *primary_bus; + bool smmu_per_bus; /* SMMU is specific to the primary_bus */ + const PCIIOMMUOps *iommu_ops; }; struct SMMUBaseClass { @@ -175,6 +181,12 @@ OBJECT_DECLARE_TYPE(SMMUState, SMMUBaseClass, ARM_SMMU) /* Return the SMMUPciBus handle associated to a PCI bus number */ SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num); +/* Return the SMMUPciBus handle associated to a PCI bus */ +SMMUPciBus *smmu_get_sbus(SMMUState *s, PCIBus *bus); + +/* Initialize SMMUDevice handle associated to a SMMUPCIBus */ +void smmu_init_sdev(SMMUState *s, SMMUDevice *sdev, PCIBus *bus, int devfn); + /* Return the stream ID of an SMMU device */ static inline uint16_t smmu_get_sid(SMMUDevice *sdev) { @@ -220,6 +232,7 @@ void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova, uint8_t tg, uint64_t num_pages, uint8_t ttl); void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg, uint64_t num_pages, uint8_t ttl); +void smmu_configs_inv_sid_range(SMMUState *s, SMMUSIDRange sid_range); /* Unmap the range of all the notifiers registered to any IOMMU mr */ void smmu_inv_notifiers_all(SMMUState *s); diff --git a/include/hw/arm/smmuv3.h b/include/hw/arm/smmuv3.h index d183a627669..99d40dbb906 100644 --- a/include/hw/arm/smmuv3.h +++ b/include/hw/arm/smmuv3.h @@ -63,6 +63,16 @@ struct SMMUv3State { qemu_irq irq[4]; QemuMutex mutex; char *stage; + + /* SMMU has HW accelerator support for nested S1 + s2 */ + bool accel; + struct SMMUv3AccelState *s_accel; + Error *migration_blocker; + bool ril; + bool ats; + uint8_t oas; + bool pasid; + bool cmdqv; }; typedef enum { diff --git a/include/hw/arm/soc_dma.h b/include/hw/arm/soc_dma.h index e93a7499a80..bcdb91425a5 100644 --- a/include/hw/arm/soc_dma.h +++ b/include/hw/arm/soc_dma.h @@ -54,7 +54,7 @@ struct soc_dma_ch_s { int bytes; /* Initialised by the DMA module, call soc_dma_ch_update after writing. */ enum soc_dma_access_type type[2]; - hwaddr vaddr[2]; /* Updated by .transfer_fn(). */ + hwaddr vaddr[2]; /* Updated by .transfer_fn(). */ /* Private */ void *paddr[2]; soc_dma_io_t io_fn[2]; @@ -70,7 +70,7 @@ struct soc_dma_ch_s { struct soc_dma_s { /* Following fields are set by the SoC DMA module and can be used * by anybody. */ - uint64_t drqbmp; /* Is zeroed by soc_dma_reset() */ + uint64_t drqbmp; /* Is zeroed by soc_dma_reset() */ qemu_irq *drq; void *opaque; int64_t freq; diff --git a/include/hw/arm/stm32f405_soc.h b/include/hw/arm/stm32f405_soc.h index d15c03c4b5d..2eeada64ded 100644 --- a/include/hw/arm/stm32f405_soc.h +++ b/include/hw/arm/stm32f405_soc.h @@ -25,6 +25,7 @@ #ifndef HW_ARM_STM32F405_SOC_H #define HW_ARM_STM32F405_SOC_H +#include "hw/misc/stm32_rcc.h" #include "hw/misc/stm32f4xx_syscfg.h" #include "hw/timer/stm32f2xx_timer.h" #include "hw/char/stm32f2xx_usart.h" @@ -55,6 +56,7 @@ struct STM32F405State { ARMv7MState armv7m; + STM32RccState rcc; STM32F4xxSyscfgState syscfg; STM32F4xxExtiState exti; STM32F2XXUsartState usart[STM_NUM_USARTS]; diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h index c243fb0e7f9..c2fae6e23fe 100644 --- a/include/hw/arm/stm32l4x5_soc.h +++ b/include/hw/arm/stm32l4x5_soc.h @@ -24,7 +24,7 @@ #ifndef HW_ARM_STM32L4x5_SOC_H #define HW_ARM_STM32L4x5_SOC_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/arm/armv7m.h" #include "hw/or-irq.h" #include "hw/misc/stm32l4x5_syscfg.h" diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index ab961bb6a9b..0963356fc26 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -36,17 +36,22 @@ #include "hw/arm/boot.h" #include "hw/arm/bsa.h" #include "hw/block/flash.h" -#include "sysemu/kvm.h" +#include "hw/cxl/cxl.h" +#include "system/kvm.h" #include "hw/intc/arm_gicv3_common.h" #include "qom/object.h" #define NUM_GICV2M_SPIS 64 #define NUM_VIRTIO_TRANSPORTS 32 #define NUM_SMMU_IRQS 4 +#define NUM_SMMU_CMDQV_IRQS 1 /* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */ #define PVTIME_SIZE_PER_CPU 64 +/* GPIO pins */ +#define GPIO_PIN_POWER_BUTTON 3 + enum { VIRT_FLASH, VIRT_MEM, @@ -76,12 +81,14 @@ enum { VIRT_ACPI_GED, VIRT_NVDIMM_ACPI, VIRT_PVTIME, + VIRT_ACPI_PCIHP, VIRT_LOWMEMMAP_LAST, }; /* indices of IO regions located after the RAM */ enum { VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST, + VIRT_CXL_HOST, VIRT_HIGH_PCIE_ECAM, VIRT_HIGH_PCIE_MMIO, }; @@ -114,14 +121,8 @@ typedef enum VirtGICType { struct VirtMachineClass { MachineClass parent; - bool disallow_affinity_adjustment; - bool no_its; bool no_tcg_its; - bool no_pmu; - bool claim_edge_triggered_timers; - bool smbios_old_sys_ver; bool no_highmem_compact; - bool no_highmem_ecam; bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */ bool kvm_no_adjvtime; bool no_kvm_steal_time; @@ -131,6 +132,7 @@ struct VirtMachineClass { bool no_cpu_topology; bool no_tcg_lpa2; bool no_ns_el2_virt_timer_irq; + bool no_nested_smmu; }; struct VirtMachineState { @@ -142,6 +144,7 @@ struct VirtMachineState { bool secure; bool highmem; bool highmem_compact; + bool highmem_cxl; bool highmem_ecam; bool highmem_mmio; bool highmem_redists; @@ -176,6 +179,9 @@ struct VirtMachineState { char *oem_id; char *oem_table_id; bool ns_el2_virt_timer_irq; + CXLState cxl_devices_state; + bool legacy_smmuv3_present; + bool pci_preserve_config; }; #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) diff --git a/include/hw/arm/xen_arch_hvm.h b/include/hw/arm/xen_arch_hvm.h deleted file mode 100644 index 8fd645e7232..00000000000 --- a/include/hw/arm/xen_arch_hvm.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef HW_XEN_ARCH_ARM_HVM_H -#define HW_XEN_ARCH_ARM_HVM_H - -#include -void arch_handle_ioreq(XenIOState *state, ioreq_t *req); -void arch_xen_set_memory(XenIOState *state, - MemoryRegionSection *section, - bool add); -#endif diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index 025beb5532d..05ed641b6b6 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -78,6 +78,7 @@ struct Versal { struct { PL011State uart[XLNX_VERSAL_NR_UARTS]; CadenceGEMState gem[XLNX_VERSAL_NR_GEMS]; + OrIRQState gem_irq_orgate[XLNX_VERSAL_NR_GEMS]; XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS]; VersalUsb2 usb; CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index 48f79480921..c137ac59e85 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -116,6 +116,7 @@ struct XlnxZynqMPState { MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS]; CadenceGEMState gem[XLNX_ZYNQMP_NUM_GEMS]; + OrIRQState gem_irq_orgate[XLNX_ZYNQMP_NUM_GEMS]; CadenceUARTState uart[XLNX_ZYNQMP_NUM_UARTS]; XlnxZynqMPCANState can[XLNX_ZYNQMP_NUM_CAN]; SysbusAHCIState sata; diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h index 2b5ccd92f46..3671f0174d0 100644 --- a/include/hw/block/flash.h +++ b/include/hw/block/flash.h @@ -44,38 +44,6 @@ PFlashCFI02 *pflash_cfi02_register(hwaddr base, uint16_t unlock_addr1, int be); -/* nand.c */ -DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id); -void nand_setpins(DeviceState *dev, uint8_t cle, uint8_t ale, - uint8_t ce, uint8_t wp, uint8_t gnd); -void nand_getpins(DeviceState *dev, int *rb); -void nand_setio(DeviceState *dev, uint32_t value); -uint32_t nand_getio(DeviceState *dev); -uint32_t nand_getbuswidth(DeviceState *dev); - -#define NAND_MFR_TOSHIBA 0x98 -#define NAND_MFR_SAMSUNG 0xec -#define NAND_MFR_FUJITSU 0x04 -#define NAND_MFR_NATIONAL 0x8f -#define NAND_MFR_RENESAS 0x07 -#define NAND_MFR_STMICRO 0x20 -#define NAND_MFR_HYNIX 0xad -#define NAND_MFR_MICRON 0x2c - -/* onenand.c */ -void *onenand_raw_otp(DeviceState *onenand_device); - -/* ecc.c */ -typedef struct { - uint8_t cp; /* Column parity */ - uint16_t lp[2]; /* Line parity */ - uint16_t count; -} ECCState; - -uint8_t ecc_digest(ECCState *s, uint8_t sample); -void ecc_reset(ECCState *s); -extern const VMStateDescription vmstate_ecc_state; - /* m25p80.c */ #define TYPE_M25P80 "m25p80-generic" diff --git a/include/hw/boards.h b/include/hw/boards.h index 48ff6d8b93f..f94713e6e29 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -3,13 +3,14 @@ #ifndef HW_BOARDS_H #define HW_BOARDS_H -#include "exec/memory.h" -#include "sysemu/hostmem.h" -#include "sysemu/blockdev.h" +#include "system/memory.h" +#include "system/hostmem.h" +#include "system/blockdev.h" #include "qapi/qapi-types-machine.h" #include "qemu/module.h" #include "qom/object.h" #include "hw/core/cpu.h" +#include "hw/resettable.h" #define TYPE_MACHINE_SUFFIX "-machine" @@ -43,8 +44,16 @@ void machine_set_cpu_numa_node(MachineState *machine, Error **errp); void machine_parse_smp_config(MachineState *ms, const SMPConfiguration *config, Error **errp); +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp); unsigned int machine_topo_get_cores_per_socket(const MachineState *ms); unsigned int machine_topo_get_threads_per_socket(const MachineState *ms); +CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms, + CacheLevelAndType cache); +void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache, + CpuTopologyLevel level); +bool machine_check_smp_cache(const MachineState *ms, Error **errp); void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size); /** @@ -145,6 +154,10 @@ typedef struct { * @books_supported - whether books are supported by the machine * @drawers_supported - whether drawers are supported by the machine * @modules_supported - whether modules are supported by the machine + * @cache_supported - whether cache (l1d, l1i, l2 and l3) configuration are + * supported by the machine + * @has_caches - whether cache properties are explicitly specified in the + * user provided smp-cache configuration */ typedef struct { bool prefer_sockets; @@ -154,6 +167,8 @@ typedef struct { bool books_supported; bool drawers_supported; bool modules_supported; + bool cache_supported[CACHE_LEVEL_AND_TYPE__MAX]; + bool has_caches; } SMPCompatProps; /** @@ -215,6 +230,10 @@ typedef struct { * Return the type of KVM corresponding to the kvm-type string option or * computed based on other criteria such as the host kernel capabilities. * kvm-type may be NULL if it is not needed. + * @hvf_get_physical_address_range: + * Returns the physical address range in bits to use for the HVF virtual + * machine based on the current boards memory map. This may be NULL if it + * is not needed. * @numa_mem_supported: * true if '--numa node.mem' option is supported and false otherwise * @hotplug_allowed: @@ -253,9 +272,10 @@ struct MachineClass { const char *deprecation_reason; void (*init)(MachineState *state); - void (*reset)(MachineState *state, ShutdownCause reason); + void (*reset)(MachineState *state, ResetType type); void (*wakeup)(MachineState *state); int (*kvm_type)(MachineState *machine, const char *arg); + int (*hvf_get_physical_address_range)(MachineState *machine); BlockInterfaceType block_default_type; int units_per_default_bus; @@ -266,9 +286,8 @@ struct MachineClass { no_parallel:1, no_floppy:1, no_cdrom:1, - no_sdcard:1, - pci_allow_0_address:1, - legacy_fw_cfg_order:1; + pci_allow_0_address:1; + bool auto_create_sdcard; bool is_default; const char *default_machine_opts; const char *default_boot_order; @@ -308,6 +327,8 @@ struct MachineClass { int64_t (*get_default_cpu_node_id)(const MachineState *ms, int idx); ram_addr_t (*fixup_ram_size)(ram_addr_t size); uint64_t smbios_memory_device_size; + bool (*create_default_memdev)(MachineState *ms, const char *path, + Error **errp); }; /** @@ -363,6 +384,10 @@ typedef struct CpuTopology { unsigned int max_cpus; } CpuTopology; +typedef struct SmpCache { + SmpCacheProperties props[CACHE_LEVEL_AND_TYPE__MAX]; +} SmpCache; + /** * MachineState: */ @@ -387,6 +412,7 @@ struct MachineState { bool enable_graphics; ConfidentialGuestSupport *cgs; HostMemoryBackend *memdev; + bool aux_ram_share; /* * convenience alias to ram_memdev_id backend memory region * or to numa container memory region @@ -408,13 +434,16 @@ struct MachineState { BootConfiguration boot_config; char *kernel_filename; char *kernel_cmdline; + char *shim_filename; char *initrd_filename; const char *cpu_type; AccelState *accelerator; CPUArchIdList *possible_cpus; CpuTopology smp; + SmpCache smp_cache; struct NVDIMMState *nvdimms_state; struct NumaState *numa_state; + bool acpi_spcr_enabled; }; /* @@ -607,7 +636,11 @@ struct MachineState { /* * How many years/major releases for each phase * of the life cycle. Assumes use of versioning - * scheme where major is bumped each year + * scheme where major is bumped each year. + * + * These values must match the ver_machine_deprecation_version + * and ver_machine_deletion_version logic in docs/conf.py and + * the text in docs/about/deprecated.rst */ #define MACHINE_VER_DELETION_MAJOR 6 #define MACHINE_VER_DEPRECATION_MAJOR 3 @@ -621,11 +654,42 @@ struct MachineState { " years old are subject to deletion after " \ stringify(MACHINE_VER_DELETION_MAJOR) " years" -#define _MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor) \ +#define _MACHINE_VER_IS_CURRENT_EXPIRED(cutoff, major, minor) \ (((QEMU_VERSION_MAJOR - major) > cutoff) || \ (((QEMU_VERSION_MAJOR - major) == cutoff) && \ (QEMU_VERSION_MINOR - minor) >= 0)) +#define _MACHINE_VER_IS_NEXT_MINOR_EXPIRED(cutoff, major, minor) \ + (((QEMU_VERSION_MAJOR - major) > cutoff) || \ + (((QEMU_VERSION_MAJOR - major) == cutoff) && \ + ((QEMU_VERSION_MINOR + 1) - minor) >= 0)) + +#define _MACHINE_VER_IS_NEXT_MAJOR_EXPIRED(cutoff, major, minor) \ + ((((QEMU_VERSION_MAJOR + 1) - major) > cutoff) || \ + ((((QEMU_VERSION_MAJOR + 1) - major) == cutoff) && \ + (0 - minor) >= 0)) + +/* + * - The first check applies to formal releases + * - The second check applies to dev snapshots / release candidates + * where the next major version is the same. + * e.g. 9.0.50, 9.1.50, 9.0.90, 9.1.90 + * - The third check applies to dev snapshots / release candidates + * where the next major version will change. + * e.g. 9.2.50, 9.2.90 + * + * NB: this assumes we do 3 minor releases per year, before bumping major, + * and dev snapshots / release candidates are numbered with micro >= 50 + * If this ever changes the logic below will need modifying.... + */ +#define _MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor) \ + ((QEMU_VERSION_MICRO < 50 && \ + _MACHINE_VER_IS_CURRENT_EXPIRED(cutoff, major, minor)) || \ + (QEMU_VERSION_MICRO >= 50 && QEMU_VERSION_MINOR < 2 && \ + _MACHINE_VER_IS_NEXT_MINOR_EXPIRED(cutoff, major, minor)) || \ + (QEMU_VERSION_MICRO >= 50 && QEMU_VERSION_MINOR == 2 && \ + _MACHINE_VER_IS_NEXT_MAJOR_EXPIRED(cutoff, major, minor))) + #define _MACHINE_VER_IS_EXPIRED2(cutoff, major, minor) \ _MACHINE_VER_IS_EXPIRED_IMPL(cutoff, major, minor) #define _MACHINE_VER_IS_EXPIRED3(cutoff, major, minor, micro) \ @@ -690,33 +754,16 @@ struct MachineState { * suitable period of time has passed, it will cause * execution of the method to return, avoiding registration * of the machine - * - * The new deprecation and deletion policy for versioned - * machine types was introduced in QEMU 9.1.0. - * - * Under the new policy a number of old machine types (any - * prior to 2.12) would be liable for immediate deletion - * which would be a violation of our historical deprecation - * and removal policy - * - * Thus deletions are temporarily gated on existance of - * the env variable "QEMU_DELETE_MACHINES" / QEMU version - * number >= 10.1.0. This gate can be deleted in the 10.1.0 - * dev cycle */ #define MACHINE_VER_DELETION(...) \ do { \ if (MACHINE_VER_SHOULD_DELETE(__VA_ARGS__)) { \ - if (getenv("QEMU_DELETE_MACHINES") || \ - QEMU_VERSION_MAJOR > 10 || (QEMU_VERSION_MAJOR == 10 && \ - QEMU_VERSION_MINOR >= 1)) { \ - return; \ - } \ + return; \ } \ } while (0) #define DEFINE_MACHINE(namestr, machine_initfn) \ - static void machine_initfn##_class_init(ObjectClass *oc, void *data) \ + static void machine_initfn##_class_init(ObjectClass *oc, const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ machine_initfn(mc); \ @@ -732,6 +779,15 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_10_0[]; +extern const size_t hw_compat_10_0_len; + +extern GlobalProperty hw_compat_9_2[]; +extern const size_t hw_compat_9_2_len; + +extern GlobalProperty hw_compat_9_1[]; +extern const size_t hw_compat_9_1_len; + extern GlobalProperty hw_compat_9_0[]; extern const size_t hw_compat_9_0_len; @@ -807,19 +863,4 @@ extern const size_t hw_compat_2_7_len; extern GlobalProperty hw_compat_2_6[]; extern const size_t hw_compat_2_6_len; -extern GlobalProperty hw_compat_2_5[]; -extern const size_t hw_compat_2_5_len; - -extern GlobalProperty hw_compat_2_4[]; -extern const size_t hw_compat_2_4_len; - -extern GlobalProperty hw_compat_2_3[]; -extern const size_t hw_compat_2_3_len; - -extern GlobalProperty hw_compat_2_2[]; -extern const size_t hw_compat_2_2_len; - -extern GlobalProperty hw_compat_2_1[]; -extern const size_t hw_compat_2_1_len; - #endif diff --git a/include/hw/char/escc.h b/include/hw/char/escc.h index 5669a5b811a..8c4c6a7730c 100644 --- a/include/hw/char/escc.h +++ b/include/hw/char/escc.h @@ -46,6 +46,9 @@ typedef struct ESCCChannelState { uint8_t rx, tx; QemuInputHandlerState *hs; char *sunkbd_layout; + int sunmouse_dx; + int sunmouse_dy; + int sunmouse_buttons; } ESCCChannelState; struct ESCCState { diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h index 65f0e97c76b..90ba3ff18c2 100644 --- a/include/hw/char/imx_serial.h +++ b/include/hw/char/imx_serial.h @@ -109,13 +109,13 @@ struct IMXSerialState { uint32_t ucr1; uint32_t ucr2; uint32_t uts1; + uint32_t ufcr; /* * The registers below are implemented just so that the * guest OS sees what it has written */ uint32_t onems; - uint32_t ufcr; uint32_t ubmr; uint32_t ubrc; uint32_t ucr3; diff --git a/include/hw/char/max78000_uart.h b/include/hw/char/max78000_uart.h new file mode 100644 index 00000000000..cf90d51dbfa --- /dev/null +++ b/include/hw/char/max78000_uart.h @@ -0,0 +1,78 @@ +/* + * MAX78000 UART + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_MAX78000_UART_H +#define HW_MAX78000_UART_H + +#include "hw/sysbus.h" +#include "chardev/char-fe.h" +#include "qemu/fifo8.h" +#include "qom/object.h" + +#define UART_CTRL 0x0 +#define UART_STATUS 0x4 +#define UART_INT_EN 0x8 +#define UART_INT_FL 0xc +#define UART_CLKDIV 0x10 +#define UART_OSR 0x14 +#define UART_TXPEEK 0x18 +#define UART_PNR 0x1c +#define UART_FIFO 0x20 +#define UART_DMA 0x30 +#define UART_WKEN 0x34 +#define UART_WKFL 0x38 + +/* CTRL */ +#define UART_CTF_DIS (1 << 7) +#define UART_FLUSH_TX (1 << 8) +#define UART_FLUSH_RX (1 << 9) +#define UART_BCLKEN (1 << 15) +#define UART_BCLKRDY (1 << 19) + +/* STATUS */ +#define UART_RX_LVL 8 +#define UART_TX_EM (1 << 6) +#define UART_RX_FULL (1 << 5) +#define UART_RX_EM (1 << 4) + +/* PNR (Pin Control Register) */ +#define UART_CTS 1 +#define UART_RTS (1 << 1) + +/* INT_EN / INT_FL */ +#define UART_RX_THD (1 << 4) +#define UART_TX_HE (1 << 6) + +#define UART_RXBUFLEN 0x100 +#define TYPE_MAX78000_UART "max78000-uart" +OBJECT_DECLARE_SIMPLE_TYPE(Max78000UartState, MAX78000_UART) + +struct Max78000UartState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t ctrl; + uint32_t status; + uint32_t int_en; + uint32_t int_fl; + uint32_t clkdiv; + uint32_t osr; + uint32_t txpeek; + uint32_t pnr; + uint32_t fifo; + uint32_t dma; + uint32_t wken; + uint32_t wkfl; + + Fifo8 rx_fifo; + + CharBackend chr; + qemu_irq irq; +}; +#endif /* HW_STM32F2XX_USART_H */ diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h index b0e14ca3554..a7b8b1b08b3 100644 --- a/include/hw/char/mchp_pfsoc_mmuart.h +++ b/include/hw/char/mchp_pfsoc_mmuart.h @@ -29,7 +29,7 @@ #define HW_MCHP_PFSOC_MMUART_H #include "hw/sysbus.h" -#include "hw/char/serial.h" +#include "hw/char/serial-mm.h" #define MCHP_PFSOC_MMUART_REG_COUNT 13 diff --git a/include/hw/char/parallel-isa.h b/include/hw/char/parallel-isa.h index 5284b2ffecc..3edaf9dbe49 100644 --- a/include/hw/char/parallel-isa.h +++ b/include/hw/char/parallel-isa.h @@ -12,7 +12,7 @@ #include "parallel.h" -#include "exec/ioport.h" +#include "system/ioport.h" #include "hw/isa/isa.h" #include "qom/object.h" diff --git a/include/hw/char/parallel.h b/include/hw/char/parallel.h index cfb97cc7cc9..7b04478226b 100644 --- a/include/hw/char/parallel.h +++ b/include/hw/char/parallel.h @@ -1,7 +1,7 @@ #ifndef HW_PARALLEL_H #define HW_PARALLEL_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/isa/isa.h" #include "hw/irq.h" #include "chardev/char-fe.h" diff --git a/include/hw/char/pl011.h b/include/hw/char/pl011.h index d8538021323..299ca9b18bb 100644 --- a/include/hw/char/pl011.h +++ b/include/hw/char/pl011.h @@ -32,7 +32,6 @@ struct PL011State { SysBusDevice parent_obj; MemoryRegion iomem; - uint32_t readbuff; uint32_t flags; uint32_t lcr; uint32_t rsr; @@ -53,6 +52,11 @@ struct PL011State { Clock *clk; bool migrate_clk; const unsigned char *id; + /* + * Since some users embed this struct directly, we must + * ensure that the C struct is at least as big as the Rust one. + */ + uint8_t padding_for_rust[16]; }; DeviceState *pl011_create(hwaddr addr, qemu_irq irq, Chardev *chr); diff --git a/include/hw/char/riscv_htif.h b/include/hw/char/riscv_htif.h index df493fdf6bc..ee0ca299024 100644 --- a/include/hw/char/riscv_htif.h +++ b/include/hw/char/riscv_htif.h @@ -22,7 +22,7 @@ #include "chardev/char.h" #include "chardev/char-fe.h" -#include "exec/memory.h" +#include "system/memory.h" #define TYPE_HTIF_UART "riscv.htif.uart" diff --git a/include/hw/char/serial-isa.h b/include/hw/char/serial-isa.h new file mode 100644 index 00000000000..8517afa128a --- /dev/null +++ b/include/hw/char/serial-isa.h @@ -0,0 +1,38 @@ +/* + * QEMU ISA 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_SERIAL_ISA_H +#define HW_SERIAL_ISA_H + +#include "hw/isa/isa.h" + +#define MAX_ISA_SERIAL_PORTS 4 + +#define TYPE_ISA_SERIAL "isa-serial" +void serial_hds_isa_init(ISABus *bus, int from, int to); +void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase); +void isa_serial_set_enabled(ISADevice *serial, bool enabled); + +#endif diff --git a/include/hw/char/serial-mm.h b/include/hw/char/serial-mm.h new file mode 100644 index 00000000000..77abd098e0d --- /dev/null +++ b/include/hw/char/serial-mm.h @@ -0,0 +1,52 @@ +/* + * QEMU 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_SERIAL_MM_H +#define HW_SERIAL_MM_H + +#include "hw/char/serial.h" +#include "system/memory.h" +#include "chardev/char.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_SERIAL_MM "serial-mm" +OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM) + +struct SerialMM { + SysBusDevice parent; + + SerialState serial; + + uint8_t regshift; + uint8_t endianness; +}; + +SerialMM *serial_mm_init(MemoryRegion *address_space, + hwaddr base, int regshift, + qemu_irq irq, int baudbase, + Chardev *chr, enum device_endian end); + +#endif diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h index 6e14099ee7f..4bf90a46f3c 100644 --- a/include/hw/char/serial.h +++ b/include/hw/char/serial.h @@ -27,10 +27,8 @@ #define HW_SERIAL_H #include "chardev/char-fe.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/fifo8.h" -#include "chardev/char.h" -#include "hw/sysbus.h" #include "qom/object.h" #define UART_FIFO_LENGTH 16 /* 16550A Fifo Length */ @@ -81,38 +79,10 @@ struct SerialState { }; typedef struct SerialState SerialState; -struct SerialMM { - SysBusDevice parent; - - SerialState serial; - - uint8_t regshift; - uint8_t endianness; -}; - extern const VMStateDescription vmstate_serial; extern const MemoryRegionOps serial_io_ops; -void serial_set_frequency(SerialState *s, uint32_t frequency); - #define TYPE_SERIAL "serial" OBJECT_DECLARE_SIMPLE_TYPE(SerialState, SERIAL) -#define TYPE_SERIAL_MM "serial-mm" -OBJECT_DECLARE_SIMPLE_TYPE(SerialMM, SERIAL_MM) - -SerialMM *serial_mm_init(MemoryRegion *address_space, - hwaddr base, int regshift, - qemu_irq irq, int baudbase, - Chardev *chr, enum device_endian end); - -/* serial-isa.c */ - -#define MAX_ISA_SERIAL_PORTS 4 - -#define TYPE_ISA_SERIAL "isa-serial" -void serial_hds_isa_init(ISABus *bus, int from, int to); -void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase); -void isa_serial_set_enabled(ISADevice *serial, bool enabled); - #endif diff --git a/include/hw/char/sifive_uart.h b/include/hw/char/sifive_uart.h index 7f6c79f8bdb..0846cf62188 100644 --- a/include/hw/char/sifive_uart.h +++ b/include/hw/char/sifive_uart.h @@ -24,6 +24,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" #include "qom/object.h" +#include "qemu/fifo8.h" enum { SIFIVE_UART_TXFIFO = 0, @@ -48,9 +49,13 @@ enum { SIFIVE_UART_IP_RXWM = 2 /* Receive watermark interrupt pending */ }; +#define SIFIVE_UART_TXFIFO_FULL 0x80000000 + #define SIFIVE_UART_GET_TXCNT(txctrl) ((txctrl >> 16) & 0x7) #define SIFIVE_UART_GET_RXCNT(rxctrl) ((rxctrl >> 16) & 0x7) + #define SIFIVE_UART_RX_FIFO_SIZE 8 +#define SIFIVE_UART_TX_FIFO_SIZE 8 #define TYPE_SIFIVE_UART "riscv.sifive.uart" OBJECT_DECLARE_SIMPLE_TYPE(SiFiveUARTState, SIFIVE_UART) @@ -63,13 +68,20 @@ struct SiFiveUARTState { qemu_irq irq; MemoryRegion mmio; CharBackend chr; - uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE]; - uint8_t rx_fifo_len; + + uint32_t txfifo; uint32_t ie; uint32_t ip; uint32_t txctrl; uint32_t rxctrl; uint32_t div; + + uint8_t rx_fifo[SIFIVE_UART_RX_FIFO_SIZE]; + uint8_t rx_fifo_len; + + Fifo8 tx_fifo; + + QEMUTimer *fifo_trigger_handle; }; SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base, diff --git a/include/hw/clock.h b/include/hw/clock.h index eb58599131c..a279bd4ba5e 100644 --- a/include/hw/clock.h +++ b/include/hw/clock.h @@ -141,14 +141,6 @@ Clock *clock_new(Object *parent, const char *name); void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque, unsigned int events); -/** - * clock_clear_callback: - * @clk: the clock to delete the callback from - * - * Unregister the callback registered with clock_set_callback. - */ -void clock_clear_callback(Clock *clk); - /** * clock_set_source: * @clk: the clock. diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h deleted file mode 100644 index 24dad45ab9e..00000000000 --- a/include/hw/core/accel-cpu.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Accelerator interface, specializes CPUClass - * This header is used only by target-specific code. - * - * Copyright 2021 SUSE LLC - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef ACCEL_CPU_H -#define ACCEL_CPU_H - -/* - * This header is used to define new accelerator-specific target-specific - * accelerator cpu subclasses. - * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific. - * - * Do not try to use for any other purpose than the implementation of new - * subclasses in target/, or the accel implementation itself in accel/ - */ - -#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE -#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU) -typedef struct AccelCPUClass AccelCPUClass; -DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU) - -typedef struct AccelCPUClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ - - void (*cpu_class_init)(CPUClass *cc); - void (*cpu_instance_init)(CPUState *cpu); - bool (*cpu_target_realize)(CPUState *cpu, Error **errp); -} AccelCPUClass; - -#endif /* ACCEL_CPU_H */ diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 1c9c775df65..5eaf41a566f 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -33,6 +33,7 @@ #include "qemu/bitmap.h" #include "qemu/rcu_queue.h" #include "qemu/queue.h" +#include "qemu/lockcnt.h" #include "qemu/thread.h" #include "qom/object.h" @@ -101,11 +102,9 @@ struct SysemuCPUOps; * CPUClass: * @class_by_name: Callback to map -cpu command line model name to an * instantiatable CPU type. + * @list_cpus: list available CPU models and flags. * @parse_features: Callback to parse command line arguments. * @reset_dump_flags: #CPUDumpFlags to use for reset logging. - * @has_work: Callback for checking if there is work to do. - * @mmu_index: Callback for choosing softmmu mmu index; - * may be used internally by memory_rw_debug without TCG. * @memory_rw_debug: Callback for GDB memory access. * @dump_state: Callback for dumping state. * @query_cpu_fast: @@ -123,17 +122,24 @@ struct SysemuCPUOps; * @get_pc: Callback for getting the Program Counter register. * As above, with the semantics of the target architecture. * @gdb_read_register: Callback for letting GDB read a register. + * No more than @gdb_num_core_regs registers can be read. * @gdb_write_register: Callback for letting GDB write a register. + * No more than @gdb_num_core_regs registers can be written. * @gdb_adjust_breakpoint: Callback for adjusting the address of a * breakpoint. Used by AVR to handle a gdb mis-feature with * its Harvard architecture split code and data. * @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer * from @gdb_core_xml_file. * @gdb_core_xml_file: File name for core registers GDB XML description. + * @gdb_get_core_xml_file: Optional callback that returns the file name for + * the core registers GDB XML description. The returned value is expected to + * be a simple constant string: the caller will not g_free() it. If this + * is NULL then @gdb_core_xml_file will be used instead. * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop * before the insn which triggers a watchpoint rather than after it. * @gdb_arch_name: Optional callback that returns the architecture name known - * to GDB. The caller must free the returned string with g_free. + * to GDB. The returned value is expected to be a simple constant string: + * the caller will not g_free() it. * @disas_set_info: Setup architecture specific components of disassembly info * @adjust_watchpoint_address: Perform a target-specific adjustment to an * address before attempting to match it against watchpoints. @@ -148,12 +154,11 @@ struct CPUClass { /*< public >*/ ObjectClass *(*class_by_name)(const char *cpu_model); + void (*list_cpus)(void); void (*parse_features)(const char *typename, char *str, Error **errp); - bool (*has_work)(CPUState *cpu); - int (*mmu_index)(CPUState *cpu, bool ifetch); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, - uint8_t *buf, int len, bool is_write); + uint8_t *buf, size_t len, bool is_write); void (*dump_state)(CPUState *cpu, FILE *, int flags); void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value); int64_t (*get_arch_id)(CPUState *cpu); @@ -165,6 +170,7 @@ struct CPUClass { const char *gdb_core_xml_file; const gchar * (*gdb_arch_name)(CPUState *cpu); + const char * (*gdb_get_core_xml_file)(CPUState *cpu); void (*disas_set_info)(CPUState *cpu, disassemble_info *info); @@ -205,7 +211,7 @@ struct CPUClass { * so the layout is not as critical as that of CPUTLBEntry. This is * also why we don't want to combine the two structs. */ -typedef struct CPUTLBEntryFull { +struct CPUTLBEntryFull { /* * @xlat_section contains: * - in the lower TARGET_PAGE_BITS, a physical section number @@ -261,7 +267,7 @@ typedef struct CPUTLBEntryFull { bool guarded; } arm; } extra; -} CPUTLBEntryFull; +}; /* * Data elements that are per MMU mode, minus the bits accessed by @@ -350,6 +356,8 @@ typedef union IcountDecr { * from CPUArchState, via small negative offsets. * @can_do_io: True if memory-mapped IO is allowed. * @plugin_mem_cbs: active plugin memory callbacks + * @plugin_mem_value_low: 64 lower bits of latest accessed mem value. + * @plugin_mem_value_high: 64 higher bits of latest accessed mem value. */ typedef struct CPUNegativeOffsetState { CPUTLB tlb; @@ -358,6 +366,9 @@ typedef struct CPUNegativeOffsetState { * The callback pointer are accessed via TCG (see gen_empty_mem_helper). */ GArray *plugin_mem_cbs; + uint64_t plugin_mem_value_low; + uint64_t plugin_mem_value_high; + int32_t plugin_cb_flags; #endif IcountDecr icount_decr; bool can_do_io; @@ -402,7 +413,6 @@ struct qemu_work_item; * Under TCG this value is propagated to @tcg_cflags. * See TranslationBlock::TCG CF_CLUSTER_MASK. * @tcg_cflags: Pre-computed cflags for this cpu. - * @nr_cores: Number of cores within this CPU package. * @nr_threads: Number of threads within this CPU core. * @thread: Host thread details, only live once @created is #true * @sem: WIN32 only semaphore used only for qtest @@ -432,6 +442,7 @@ struct qemu_work_item; * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. * @accel: Pointer to accelerator specific state. + * @vcpu_dirty: Hardware accelerator is not synchronized with QEMU state * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to @work_list. * @work_list: List of pending asynchronous work. @@ -461,7 +472,6 @@ struct CPUState { CPUClass *cc; /*< public >*/ - int nr_cores; int nr_threads; struct QemuThread *thread; @@ -529,7 +539,6 @@ struct CPUState { uint32_t kvm_fetch_index; uint64_t dirty_pages; int kvm_vcpu_stats_fd; - bool vcpu_dirty; /* Use by accel-block: CPU is executing an ioctl() */ QemuLockCnt in_ioctl_lock; @@ -545,6 +554,7 @@ struct CPUState { uint32_t halted; int32_t exception_index; + bool vcpu_dirty; AccelCPUState *accel; /* Used to keep track of an outstanding cpu throttle thread for migration @@ -579,7 +589,7 @@ QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) != static inline CPUArchState *cpu_env(CPUState *cpu) { - /* We validate that CPUArchState follows CPUState in cpu-all.h. */ + /* We validate that CPUArchState follows CPUState in cpu-target.c */ return (CPUArchState *)(cpu + 1); } @@ -594,15 +604,6 @@ extern CPUTailQ cpus_queue; extern __thread CPUState *current_cpu; -/** - * qemu_tcg_mttcg_enabled: - * Check whether we are running MultiThread TCG or not. - * - * Returns: %true if we are in MTTCG mode %false otherwise. - */ -extern bool mttcg_enabled; -#define qemu_tcg_mttcg_enabled() (mttcg_enabled) - /** * cpu_paging_enabled: * @cpu: The CPU whose state is to be inspected. @@ -622,8 +623,6 @@ bool cpu_paging_enabled(const CPUState *cpu); bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, Error **errp); -#if !defined(CONFIG_USER_ONLY) - /** * cpu_write_elf64_note: * @f: pointer to a function that writes memory to a file @@ -673,8 +672,6 @@ int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, */ GuestPanicInformation *cpu_get_crash_info(CPUState *cpu); -#endif /* !CONFIG_USER_ONLY */ - /** * CPUDumpFlags: * @CPU_DUMP_CODE: @@ -698,7 +695,6 @@ enum CPUDumpFlags { */ void cpu_dump_state(CPUState *cpu, FILE *f, int flags); -#ifndef CONFIG_USER_ONLY /** * cpu_get_phys_page_attrs_debug: * @cpu: The CPU to obtain the physical page address for. @@ -745,7 +741,15 @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs); */ bool cpu_virtio_is_big_endian(CPUState *cpu); -#endif /* CONFIG_USER_ONLY */ +/** + * cpu_has_work: + * @cpu: The vCPU to check. + * + * Checks whether the CPU has work to do. + * + * Returns: %true if the CPU has work, %false otherwise. + */ +bool cpu_has_work(CPUState *cpu); /** * cpu_list_add: @@ -811,22 +815,6 @@ CPUState *cpu_create(const char *typename); */ const char *parse_cpu_option(const char *cpu_option); -/** - * cpu_has_work: - * @cpu: The vCPU to check. - * - * Checks whether the CPU has work to do. - * - * Returns: %true if the CPU has work, %false otherwise. - */ -static inline bool cpu_has_work(CPUState *cpu) -{ - CPUClass *cc = CPU_GET_CLASS(cpu); - - g_assert(cc->has_work); - return cc->has_work(cpu); -} - /** * qemu_cpu_is_self: * @cpu: The vCPU to check against. @@ -963,9 +951,7 @@ void cpu_interrupt(CPUState *cpu, int mask); */ static inline void cpu_set_pc(CPUState *cpu, vaddr addr) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - cc->set_pc(cpu, addr); + cpu->cc->set_pc(cpu, addr); } /** @@ -1114,36 +1100,6 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) return false; } -#if defined(CONFIG_USER_ONLY) -static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint) -{ - return -ENOSYS; -} - -static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, - vaddr len, int flags) -{ - return -ENOSYS; -} - -static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu, - CPUWatchpoint *wp) -{ -} - -static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask) -{ -} -#else -int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, - int flags, CPUWatchpoint **watchpoint); -int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, - vaddr len, int flags); -void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); -void cpu_watchpoint_remove_all(CPUState *cpu, int mask); -#endif - /** * cpu_get_address_space: * @cpu: CPU to get address space from @@ -1159,31 +1115,23 @@ G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) /* $(top_srcdir)/cpu.c */ void cpu_class_init_props(DeviceClass *dc); +void cpu_exec_class_post_init(CPUClass *cc); void cpu_exec_initfn(CPUState *cpu); +void cpu_vmstate_register(CPUState *cpu); +void cpu_vmstate_unregister(CPUState *cpu); bool cpu_exec_realizefn(CPUState *cpu, Error **errp); void cpu_exec_unrealizefn(CPUState *cpu); void cpu_exec_reset_hold(CPUState *cpu); -const char *target_name(void); - -#ifdef COMPILING_PER_TARGET - -#ifndef CONFIG_USER_ONLY - extern const VMStateDescription vmstate_cpu_common; -#define VMSTATE_CPU() { \ - .name = "parent_obj", \ - .size = sizeof(CPUState), \ - .vmsd = &vmstate_cpu_common, \ - .flags = VMS_STRUCT, \ - .offset = 0, \ -} -#endif /* !CONFIG_USER_ONLY */ - -#endif /* COMPILING_PER_TARGET */ - #define UNASSIGNED_CPU_INDEX -1 #define UNASSIGNED_CLUSTER_INDEX -1 +enum CacheType { + DATA_CACHE, + INSTRUCTION_CACHE, + UNIFIED_CACHE +}; + #endif diff --git a/include/hw/core/resetcontainer.h b/include/hw/core/resetcontainer.h index 23db0c7a880..daeb18c1ea8 100644 --- a/include/hw/core/resetcontainer.h +++ b/include/hw/core/resetcontainer.h @@ -20,7 +20,7 @@ #include "qom/object.h" #define TYPE_RESETTABLE_CONTAINER "resettable-container" -OBJECT_DECLARE_TYPE(ResettableContainer, ResettableContainerClass, RESETTABLE_CONTAINER) +OBJECT_DECLARE_SIMPLE_TYPE(ResettableContainer, RESETTABLE_CONTAINER) /** * resettable_container_add: Add a resettable object to the container diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h index 24d003fe041..877892373f9 100644 --- a/include/hw/core/sysemu-cpu-ops.h +++ b/include/hw/core/sysemu-cpu-ops.h @@ -7,8 +7,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef SYSEMU_CPU_OPS_H -#define SYSEMU_CPU_OPS_H +#ifndef SYSTEM_CPU_OPS_H +#define SYSTEM_CPU_OPS_H #include "hw/core/cpu.h" @@ -16,6 +16,10 @@ * struct SysemuCPUOps: System operations specific to a CPU class */ typedef struct SysemuCPUOps { + /** + * @has_work: Callback for checking if there is work to do. + */ + bool (*has_work)(CPUState *cpu); /* MANDATORY NON-NULL */ /** * @get_memory_mapping: Callback for obtaining the memory mappings. */ @@ -89,4 +93,4 @@ typedef struct SysemuCPUOps { } SysemuCPUOps; -#endif /* SYSEMU_CPU_OPS_H */ +#endif /* SYSTEM_CPU_OPS_H */ diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h deleted file mode 100644 index 34318cf0e60..00000000000 --- a/include/hw/core/tcg-cpu-ops.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * TCG CPU-specific operations - * - * Copyright 2021 SUSE LLC - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef TCG_CPU_OPS_H -#define TCG_CPU_OPS_H - -#include "exec/breakpoint.h" -#include "exec/hwaddr.h" -#include "exec/memattrs.h" -#include "exec/mmu-access-type.h" -#include "exec/vaddr.h" - -struct TCGCPUOps { - /** - * @initialize: Initialize TCG state - * - * Called when the first CPU is realized. - */ - void (*initialize)(void); - /** - * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock - * - * This is called when we abandon execution of a TB before starting it, - * and must set all parts of the CPU state which the previous TB in the - * chain may not have updated. - * By default, when this is NULL, a call is made to @set_pc(tb->pc). - * - * If more state needs to be restored, the target must implement a - * function to restore all the state, and register it here. - */ - void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); - /** - * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn - * - * This is called when we unwind state in the middle of a TB, - * usually before raising an exception. Set all part of the CPU - * state which are tracked insn-by-insn in the target-specific - * arguments to start_insn, passed as @data. - */ - void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, - const uint64_t *data); - - /** @cpu_exec_enter: Callback for cpu_exec preparation */ - void (*cpu_exec_enter)(CPUState *cpu); - /** @cpu_exec_exit: Callback for cpu_exec cleanup */ - void (*cpu_exec_exit)(CPUState *cpu); - /** @debug_excp_handler: Callback for handling debug exceptions */ - void (*debug_excp_handler)(CPUState *cpu); - -#ifdef CONFIG_USER_ONLY - /** - * @fake_user_interrupt: Callback for 'fake exception' handling. - * - * Simulate 'fake exception' which will be handled outside the - * cpu execution loop (hack for x86 user mode). - */ - void (*fake_user_interrupt)(CPUState *cpu); - - /** - * record_sigsegv: - * @cpu: cpu context - * @addr: faulting guest address - * @access_type: access was read/write/execute - * @maperr: true for invalid page, false for permission fault - * @ra: host pc for unwinding - * - * We are about to raise SIGSEGV with si_code set for @maperr, - * and si_addr set for @addr. Record anything further needed - * for the signal ucontext_t. - * - * If the emulated kernel does not provide anything to the signal - * handler with anything besides the user context registers, and - * the siginfo_t, then this hook need do nothing and may be omitted. - * Otherwise, record the data and return; the caller will raise - * the signal, unwind the cpu state, and return to the main loop. - * - * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided - * so that a "normal" cpu exception can be raised. In this case, - * the signal must be raised by the architecture cpu_loop. - */ - void (*record_sigsegv)(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - bool maperr, uintptr_t ra); - /** - * record_sigbus: - * @cpu: cpu context - * @addr: misaligned guest address - * @access_type: access was read/write/execute - * @ra: host pc for unwinding - * - * We are about to raise SIGBUS with si_code BUS_ADRALN, - * and si_addr set for @addr. Record anything further needed - * for the signal ucontext_t. - * - * If the emulated kernel does not provide the signal handler with - * anything besides the user context registers, and the siginfo_t, - * then this hook need do nothing and may be omitted. - * Otherwise, record the data and return; the caller will raise - * the signal, unwind the cpu state, and return to the main loop. - * - * If it is simpler to re-use the sysemu do_unaligned_access code, - * @ra is provided so that a "normal" cpu exception can be raised. - * In this case, the signal must be raised by the architecture cpu_loop. - */ - void (*record_sigbus)(CPUState *cpu, vaddr addr, - MMUAccessType access_type, uintptr_t ra); -#else - /** @do_interrupt: Callback for interrupt handling. */ - void (*do_interrupt)(CPUState *cpu); - /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ - bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); - /** - * @cpu_exec_halt: Callback for handling halt in cpu_exec. - * - * The target CPU should do any special processing here that it needs - * to do when the CPU is in the halted state. - * - * Return true to indicate that the CPU should now leave halt, false - * if it should remain in the halted state. (This should generally - * be the same value that cpu_has_work() would return.) - * - * This method must be provided. If the target does not need to - * do anything special for halt, the same function used for its - * CPUClass::has_work method can be used here, as they have the - * same function signature. - */ - bool (*cpu_exec_halt)(CPUState *cpu); - /** - * @tlb_fill: Handle a softmmu tlb miss - * - * If the access is valid, call tlb_set_page and return true; - * if the access is invalid and probe is true, return false; - * otherwise raise an exception and do not return. - */ - bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - /** - * @do_transaction_failed: Callback for handling failed memory transactions - * (ie bus faults or external aborts; not MMU faults) - */ - void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr, - unsigned size, MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, uintptr_t retaddr); - /** - * @do_unaligned_access: Callback for unaligned access handling - * The callback must exit via raising an exception. - */ - G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); - - /** - * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM - */ - vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); - - /** - * @debug_check_watchpoint: return true if the architectural - * watchpoint whose address has matched should really fire, used by ARM - * and RISC-V - */ - bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); - - /** - * @debug_check_breakpoint: return true if the architectural - * breakpoint whose PC has matched should really fire. - */ - bool (*debug_check_breakpoint)(CPUState *cpu); - - /** - * @io_recompile_replay_branch: Callback for cpu_io_recompile. - * - * The cpu has been stopped, and cpu_restore_state_from_tb has been - * called. If the faulting instruction is in a delay slot, and the - * target architecture requires re-execution of the branch, then - * adjust the cpu state as required and return true. - */ - bool (*io_recompile_replay_branch)(CPUState *cpu, - const TranslationBlock *tb); - /** - * @need_replay_interrupt: Return %true if @interrupt_request - * needs to be recorded for replay purposes. - */ - bool (*need_replay_interrupt)(int interrupt_request); -#endif /* !CONFIG_USER_ONLY */ -}; - -#if defined(CONFIG_USER_ONLY) - -static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs atr, int fl, uintptr_t ra) -{ -} - -static inline int cpu_watchpoint_address_matches(CPUState *cpu, - vaddr addr, vaddr len) -{ - return 0; -} - -#else - -/** - * cpu_check_watchpoint: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * @attrs: memory access attributes - * @flags: watchpoint access type - * @ra: unwind return address - * - * Check for a watchpoint hit in [addr, addr+len) of the type - * specified by @flags. Exit via exception with a hit. - */ -void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, - MemTxAttrs attrs, int flags, uintptr_t ra); - -/** - * cpu_watchpoint_address_matches: - * @cpu: cpu context - * @addr: guest virtual address - * @len: access length - * - * Return the watchpoint flags that apply to [addr, addr+len). - * If no watchpoint is registered for the range, the result is 0. - */ -int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); - -#endif - -#endif /* TCG_CPU_OPS_H */ diff --git a/include/hw/cris/etraxfs.h b/include/hw/cris/etraxfs.h deleted file mode 100644 index 012c4e99743..00000000000 --- a/include/hw/cris/etraxfs.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * QEMU ETRAX System Emulator - * - * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#ifndef HW_ETRAXFS_H -#define HW_ETRAXFS_H - -#include "net/net.h" -#include "hw/cris/etraxfs_dma.h" -#include "hw/qdev-properties.h" -#include "hw/sysbus.h" -#include "qapi/error.h" - -DeviceState *etraxfs_eth_init(hwaddr base, int phyaddr, - struct etraxfs_dma_client *dma_out, - struct etraxfs_dma_client *dma_in); - -static inline DeviceState *etraxfs_ser_create(hwaddr addr, - qemu_irq irq, - Chardev *chr) -{ - DeviceState *dev; - SysBusDevice *s; - - dev = qdev_new("etraxfs-serial"); - s = SYS_BUS_DEVICE(dev); - qdev_prop_set_chr(dev, "chardev", chr); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_mmio_map(s, 0, addr); - sysbus_connect_irq(s, 0, irq); - return dev; -} - -#endif diff --git a/include/hw/cris/etraxfs_dma.h b/include/hw/cris/etraxfs_dma.h deleted file mode 100644 index 095d76b9560..00000000000 --- a/include/hw/cris/etraxfs_dma.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef HW_ETRAXFS_DMA_H -#define HW_ETRAXFS_DMA_H - -#include "exec/hwaddr.h" - -struct dma_context_metadata { - /* data descriptor md */ - uint16_t metadata; -}; - -struct etraxfs_dma_client -{ - /* DMA controller. */ - int channel; - void *ctrl; - - /* client. */ - struct { - int (*push)(void *opaque, unsigned char *buf, - int len, bool eop); - void (*pull)(void *opaque); - void (*metadata_push)(void *opaque, - const struct dma_context_metadata *md); - void *opaque; - } client; -}; - -void *etraxfs_dmac_init(hwaddr base, int nr_channels); -void etraxfs_dmac_connect(void *opaque, int channel, qemu_irq *line, - int input); -void etraxfs_dmac_connect_client(void *opaque, int c, - struct etraxfs_dma_client *cl); -int etraxfs_dmac_input(struct etraxfs_dma_client *client, - void *buf, int len, int eop); - -#endif diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h index 75e47b68644..998f495a984 100644 --- a/include/hw/cxl/cxl.h +++ b/include/hw/cxl/cxl.h @@ -23,10 +23,13 @@ #define CXL_DEVICE_REG_BAR_IDX 2 #define CXL_WINDOW_MAX 10 +#define CXL_NUM_EXTENTS_SUPPORTED 512 typedef struct PXBCXLDev PXBCXLDev; typedef struct CXLFixedWindow { + SysBusDevice parent_obj; + int index; uint64_t size; char **targets; PXBCXLDev *target_hbs[16]; @@ -37,12 +40,13 @@ typedef struct CXLFixedWindow { MemoryRegion mr; hwaddr base; } CXLFixedWindow; +#define TYPE_CXL_FMW "cxl-fmw" +OBJECT_DECLARE_SIMPLE_TYPE(CXLFixedWindow, CXL_FMW) typedef struct CXLState { bool is_enabled; MemoryRegion host_mr; unsigned int next_mr_idx; - GList *fixed_windows; CXLFixedMemoryWindowOptionsList *cfmw_list; } CXLState; diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index fdd0f4e62b3..89411c80936 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -133,6 +133,15 @@ typedef enum { CXL_MBOX_MAX = 0x20 } CXLRetCode; +/* r3.2 Section 7.6.7.6.2: Table 7-66: DSMAS Flags Bits */ +typedef enum { + CXL_DSMAS_FLAGS_NONVOLATILE = 2, + CXL_DSMAS_FLAGS_SHARABLE = 3, + CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY = 4, + CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT = 5, + CXL_DSMAS_FLAGS_RDONLY = 6, +} CXLDSMASFlags; + typedef struct CXLCCI CXLCCI; typedef struct cxl_device_state CXLDeviceState; struct cxl_cmd; @@ -176,10 +185,12 @@ typedef struct CXLCCI { uint16_t opcode; uint16_t complete_pct; uint16_t ret_code; /* Current value of retcode */ + bool aborted; uint64_t starttime; /* set by each bg cmd, cleared by the bg_timer when complete */ uint64_t runtime; QEMUTimer *timer; + QemuMutex lock; /* serializes mbox abort vs timer cb */ } bg; /* firmware update */ @@ -201,6 +212,7 @@ typedef struct CXLCCI { DeviceState *d; /* Pointer to the device hosting the protocol conversion */ DeviceState *intf; + bool initialized; } CXLCCI; typedef struct cxl_device_state { @@ -264,8 +276,8 @@ void cxl_device_register_block_init(Object *obj, CXLDeviceState *dev, typedef struct CXLType3Dev CXLType3Dev; typedef struct CSWMBCCIDev CSWMBCCIDev; /* Set up default values for the register block */ -void cxl_device_register_init_t3(CXLType3Dev *ct3d); -void cxl_device_register_init_swcci(CSWMBCCIDev *sw); +void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n); +void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n); /* * CXL r3.1 Section 8.2.8.1: CXL Device Capabilities Array Register @@ -316,6 +328,7 @@ void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max); void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf, DeviceState *d, size_t payload_max); void cxl_init_cci(CXLCCI *cci, size_t payload_max); +void cxl_destroy_cci(CXLCCI *cci); void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256], size_t payload_max); int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd, @@ -463,18 +476,6 @@ typedef struct CXLMemPatrolScrubWriteAttrs { #define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 /* CXL memory device DDR5 ECS control attributes */ -typedef struct CXLMemECSReadAttrs { - uint8_t ecs_log_cap; - uint8_t ecs_cap; - uint16_t ecs_config; - uint8_t ecs_flags; -} QEMU_PACKED CXLMemECSReadAttrs; - -typedef struct CXLMemECSWriteAttrs { - uint8_t ecs_log_cap; - uint16_t ecs_config; -} QEMU_PACKED CXLMemECSWriteAttrs; - #define CXL_ECS_GET_FEATURE_VERSION 0x01 #define CXL_ECS_SET_FEATURE_VERSION 0x01 #define CXL_ECS_LOG_ENTRY_TYPE_DEFAULT 0x01 @@ -483,6 +484,26 @@ typedef struct CXLMemECSWriteAttrs { #define CXL_ECS_MODE_DEFAULT 0 #define CXL_ECS_NUM_MEDIA_FRUS 3 /* Default */ +typedef struct CXLMemECSFRUReadAttrs { + uint8_t ecs_cap; + uint16_t ecs_config; + uint8_t ecs_flags; +} QEMU_PACKED CXLMemECSFRUReadAttrs; + +typedef struct CXLMemECSReadAttrs { + uint8_t ecs_log_cap; + CXLMemECSFRUReadAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS]; +} QEMU_PACKED CXLMemECSReadAttrs; + +typedef struct CXLMemECSFRUWriteAttrs { + uint16_t ecs_config; +} QEMU_PACKED CXLMemECSFRUWriteAttrs; + +typedef struct CXLMemECSWriteAttrs { + uint8_t ecs_log_cap; + CXLMemECSFRUWriteAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS]; +} QEMU_PACKED CXLMemECSWriteAttrs; + #define DCD_MAX_NUM_REGION 8 typedef struct CXLDCExtentRaw { @@ -518,6 +539,14 @@ typedef struct CXLDCRegion { uint32_t dsmadhandle; uint8_t flags; unsigned long *blk_bitmap; + uint64_t supported_blk_size_bitmask; + QemuMutex bitmap_lock; + /* Following bools make up dsmas flags, as defined in the CDAT */ + bool nonvolatile; + bool sharable; + bool hw_managed_coherency; + bool ic_specific_dc_management; + bool rdonly; } CXLDCRegion; typedef struct CXLSetFeatureInfo { @@ -528,6 +557,21 @@ typedef struct CXLSetFeatureInfo { size_t data_size; } CXLSetFeatureInfo; +struct CXLSanitizeInfo; + +typedef struct CXLAlertConfig { + uint8_t valid_alerts; + uint8_t enable_alerts; + uint8_t life_used_crit_alert_thresh; + uint8_t life_used_warn_thresh; + uint16_t over_temp_crit_alert_thresh; + uint16_t under_temp_crit_alert_thresh; + uint16_t over_temp_warn_thresh; + uint16_t under_temp_warn_thresh; + uint16_t cor_vmem_err_warn_thresh; + uint16_t cor_pmem_err_warn_thresh; +} QEMU_PACKED CXLAlertConfig; + struct CXLType3Dev { /* Private */ PCIDevice parent_obj; @@ -549,6 +593,12 @@ struct CXLType3Dev { CXLCCI vdm_fm_owned_ld_mctp_cci; CXLCCI ld0_cci; + CXLAlertConfig alert_config; + + /* PCIe link characteristics */ + PCIExpLinkSpeed speed; + PCIExpLinkWidth width; + /* DOE */ DOECap doe_cdat; @@ -571,8 +621,8 @@ struct CXLType3Dev { CXLMemPatrolScrubReadAttrs patrol_scrub_attrs; CXLMemPatrolScrubWriteAttrs patrol_scrub_wr_attrs; /* ECS control attributes */ - CXLMemECSReadAttrs ecs_attrs[CXL_ECS_NUM_MEDIA_FRUS]; - CXLMemECSWriteAttrs ecs_wr_attrs[CXL_ECS_NUM_MEDIA_FRUS]; + CXLMemECSReadAttrs ecs_attrs; + CXLMemECSWriteAttrs ecs_wr_attrs; struct dynamic_capacity { HostMemoryBackend *host_dc; @@ -585,11 +635,14 @@ struct CXLType3Dev { CXLDCExtentList extents; CXLDCExtentGroupList extents_pending; uint32_t total_extent_count; + uint32_t nr_extents_accepted; uint32_t ext_list_gen_seq; uint8_t num_regions; /* 0-8 regions */ CXLDCRegion regions[DCD_MAX_NUM_REGION]; } dc; + + struct CXLSanitizeInfo *media_op_sanitize; }; #define TYPE_CXL_TYPE3 "cxl-type3" @@ -661,11 +714,22 @@ CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group, uint16_t shared_seq); void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list, CXLDCExtentGroup *group); -void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list); +uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list); void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len); void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len); bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len); +void cxl_assign_event_header(CXLEventRecordHdr *hdr, + const QemuUUID *uuid, uint32_t flags, + uint8_t length, uint64_t timestamp); +void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d, + CXLDCEventType type, + CXLDCExtentRaw extents[], + uint32_t ext_count); +bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list, + uint64_t dpa, uint64_t len); +bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list, + uint64_t dpa, uint64_t len); #endif diff --git a/include/hw/cxl/cxl_events.h b/include/hw/cxl/cxl_events.h index 38cadaa0f34..758b075a64b 100644 --- a/include/hw/cxl/cxl_events.h +++ b/include/hw/cxl/cxl_events.h @@ -184,4 +184,19 @@ typedef struct CXLEventDynamicCapacity { uint32_t tags_avail; } QEMU_PACKED CXLEventDynamicCapacity; +/* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */ +static const QemuUUID dynamic_capacity_uuid = { + .data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f, + 0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a), +}; + +typedef enum CXLDCEventType { + DC_EVENT_ADD_CAPACITY = 0x0, + DC_EVENT_RELEASE_CAPACITY = 0x1, + DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2, + DC_EVENT_REGION_CONFIG_UPDATED = 0x3, + DC_EVENT_ADD_CAPACITY_RSP = 0x4, + DC_EVENT_CAPACITY_RELEASED = 0x5, +} CXLDCEventType; + #endif /* CXL_EVENTS_H */ diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h index c9bc9c7c500..cd3c368c86a 100644 --- a/include/hw/cxl/cxl_host.h +++ b/include/hw/cxl/cxl_host.h @@ -14,8 +14,11 @@ #define CXL_HOST_H void cxl_machine_init(Object *obj, CXLState *state); -void cxl_fmws_link_targets(CXLState *stat, Error **errp); +void cxl_fmws_link_targets(Error **errp); void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp); +hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr); +void cxl_fmws_update_mmio(void); +GSList *cxl_fmws_get_all_sorted(void); extern const MemoryRegionOps cfmws_ops; diff --git a/include/hw/cxl/cxl_mailbox.h b/include/hw/cxl/cxl_mailbox.h index beb048052e1..a05d7cb5b7d 100644 --- a/include/hw/cxl/cxl_mailbox.h +++ b/include/hw/cxl/cxl_mailbox.h @@ -8,11 +8,18 @@ #ifndef CXL_MAILBOX_H #define CXL_MAILBOX_H +#define CXL_MBOX_CONFIG_CHANGE_COLD_RESET (1) #define CXL_MBOX_IMMEDIATE_CONFIG_CHANGE (1 << 1) #define CXL_MBOX_IMMEDIATE_DATA_CHANGE (1 << 2) #define CXL_MBOX_IMMEDIATE_POLICY_CHANGE (1 << 3) #define CXL_MBOX_IMMEDIATE_LOG_CHANGE (1 << 4) #define CXL_MBOX_SECURITY_STATE_CHANGE (1 << 5) #define CXL_MBOX_BACKGROUND_OPERATION (1 << 6) +#define CXL_MBOX_BACKGROUND_OPERATION_ABORT (1 << 7) +#define CXL_MBOX_SECONDARY_MBOX_SUPPORTED (1 << 8) +#define CXL_MBOX_REQUEST_ABORT_BACKGROUND_OP_SUPPORTED (1 << 9) +#define CXL_MBOX_CEL_10_TO_11_VALID (1 << 10) +#define CXL_MBOX_CONFIG_CHANGE_CONV_RESET (1 << 11) +#define CXL_MBOX_CONFIG_CHANGE_CXL_RESET (1 << 12) #endif diff --git a/include/hw/display/blizzard.h b/include/hw/display/blizzard.h deleted file mode 100644 index 5b330188354..00000000000 --- a/include/hw/display/blizzard.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef HW_DISPLAY_BLIZZARD_H -#define HW_DISPLAY_BLIZZARD_H - - -void *s1d13745_init(qemu_irq gpio_int); -void s1d13745_write(void *opaque, int dc, uint16_t value); -void s1d13745_write_block(void *opaque, int dc, - void *buf, size_t len, int pitch); -uint16_t s1d13745_read(void *opaque, int dc); - -#endif diff --git a/include/hw/display/edid.h b/include/hw/display/edid.h index 520f8ec2027..91c0a428af0 100644 --- a/include/hw/display/edid.h +++ b/include/hw/display/edid.h @@ -1,6 +1,8 @@ #ifndef EDID_H #define EDID_H +#define EDID_NAME_MAX_LENGTH 12 + typedef struct qemu_edid_info { const char *vendor; /* http://www.uefi.org/pnp_id_list */ const char *name; diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h index 27cebefc9e7..0fae1f33a65 100644 --- a/include/hw/display/macfb.h +++ b/include/hw/display/macfb.h @@ -13,7 +13,7 @@ #ifndef MACFB_H #define MACFB_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/irq.h" #include "hw/nubus/nubus.h" #include "hw/sysbus.h" diff --git a/include/hw/display/ramfb.h b/include/hw/display/ramfb.h index a7e00191445..172aa6dc89e 100644 --- a/include/hw/display/ramfb.h +++ b/include/hw/display/ramfb.h @@ -6,7 +6,7 @@ /* ramfb.c */ typedef struct RAMFBState RAMFBState; void ramfb_display_update(QemuConsole *con, RAMFBState *s); -RAMFBState *ramfb_setup(Error **errp); +RAMFBState *ramfb_setup(bool romfile, Error **errp); extern const VMStateDescription ramfb_vmstate; diff --git a/include/hw/display/tc6393xb.h b/include/hw/display/tc6393xb.h deleted file mode 100644 index f9263bf98a6..00000000000 --- a/include/hw/display/tc6393xb.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Toshiba TC6393XB I/O Controller. - * Found in Sharp Zaurus SL-6000 (tosa) or some - * Toshiba e-Series PDAs. - * - * Copyright (c) 2007 Hervé Poussineau - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef HW_DISPLAY_TC6393XB_H -#define HW_DISPLAY_TC6393XB_H - -typedef struct TC6393xbState TC6393xbState; - -TC6393xbState *tc6393xb_init(struct MemoryRegion *sysmem, - uint32_t base, qemu_irq irq); -qemu_irq tc6393xb_l3v_get(TC6393xbState *s); - -#endif diff --git a/include/hw/dma/i8257.h b/include/hw/dma/i8257.h index 4342e4a91ed..33b6286d5a4 100644 --- a/include/hw/dma/i8257.h +++ b/include/hw/dma/i8257.h @@ -2,7 +2,7 @@ #define HW_I8257_H #include "hw/isa/isa.h" -#include "exec/ioport.h" +#include "system/ioport.h" #include "qom/object.h" #define TYPE_I8257 "i8257" diff --git a/include/hw/dma/xlnx-zdma.h b/include/hw/dma/xlnx-zdma.h index efc75217d59..9c57c499102 100644 --- a/include/hw/dma/xlnx-zdma.h +++ b/include/hw/dma/xlnx-zdma.h @@ -31,7 +31,7 @@ #include "hw/sysbus.h" #include "hw/register.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #define ZDMA_R_MAX (0x204 / 4) diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h index 40537a848b4..484b2e377f0 100644 --- a/include/hw/dma/xlnx_dpdma.h +++ b/include/hw/dma/xlnx_dpdma.h @@ -26,8 +26,7 @@ #define XLNX_DPDMA_H #include "hw/sysbus.h" -#include "ui/console.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #define XLNX_DPDMA_REG_ARRAY_SIZE (0x1000 >> 2) diff --git a/include/hw/fsi/aspeed_apb2opb.h b/include/hw/fsi/aspeed_apb2opb.h index f6a2387abf2..878619eafa7 100644 --- a/include/hw/fsi/aspeed_apb2opb.h +++ b/include/hw/fsi/aspeed_apb2opb.h @@ -8,7 +8,7 @@ #ifndef FSI_ASPEED_APB2OPB_H #define FSI_ASPEED_APB2OPB_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/fsi/fsi-master.h" #include "hw/sysbus.h" diff --git a/include/hw/fsi/cfam.h b/include/hw/fsi/cfam.h index 7abc3b287be..cceb4bd6f18 100644 --- a/include/hw/fsi/cfam.h +++ b/include/hw/fsi/cfam.h @@ -7,7 +7,7 @@ #ifndef FSI_CFAM_H #define FSI_CFAM_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/fsi/fsi.h" #include "hw/fsi/lbus.h" diff --git a/include/hw/fsi/fsi-master.h b/include/hw/fsi/fsi-master.h index 68e5f56db2e..b634ecd3938 100644 --- a/include/hw/fsi/fsi-master.h +++ b/include/hw/fsi/fsi-master.h @@ -7,7 +7,7 @@ #ifndef FSI_FSI_MASTER_H #define FSI_FSI_MASTER_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "hw/fsi/fsi.h" #include "hw/fsi/cfam.h" diff --git a/include/hw/fsi/fsi.h b/include/hw/fsi/fsi.h index e00f6ef078c..f34765ed80b 100644 --- a/include/hw/fsi/fsi.h +++ b/include/hw/fsi/fsi.h @@ -7,7 +7,7 @@ #ifndef FSI_FSI_H #define FSI_FSI_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "hw/fsi/lbus.h" #include "qemu/bitops.h" diff --git a/include/hw/fsi/lbus.h b/include/hw/fsi/lbus.h index 558268c013f..12519073cd3 100644 --- a/include/hw/fsi/lbus.h +++ b/include/hw/fsi/lbus.h @@ -9,7 +9,7 @@ #include "hw/qdev-core.h" #include "qemu/units.h" -#include "exec/memory.h" +#include "system/memory.h" #define TYPE_FSI_LBUS_DEVICE "fsi.lbus.device" OBJECT_DECLARE_SIMPLE_TYPE(FSILBusDevice, FSI_LBUS_DEVICE) diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h index 90a12ae3182..e6b2fe71b50 100644 --- a/include/hw/gpio/aspeed_gpio.h +++ b/include/hw/gpio/aspeed_gpio.h @@ -70,12 +70,14 @@ typedef struct AspeedGPIOReg { } AspeedGPIOReg; struct AspeedGPIOClass { - SysBusDevice parent_obj; + SysBusDeviceClass parent_class; const GPIOSetProperties *props; uint32_t nr_gpio_pins; uint32_t nr_gpio_sets; const AspeedGPIOReg *reg_table; unsigned reg_table_count; + uint64_t mem_size; + const MemoryRegionOps *reg_ops; }; struct AspeedGPIOState { @@ -88,7 +90,7 @@ struct AspeedGPIOState { qemu_irq irq; qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET]; -/* Parallel GPIO Registers */ + /* Parallel GPIO Registers */ uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS]; struct GPIOSets { uint32_t data_value; /* Reflects pin values */ diff --git a/include/hw/gpio/npcm7xx_gpio.h b/include/hw/gpio/npcm7xx_gpio.h index b1d771bd776..7c0bf61a960 100644 --- a/include/hw/gpio/npcm7xx_gpio.h +++ b/include/hw/gpio/npcm7xx_gpio.h @@ -15,7 +15,7 @@ #ifndef NPCM7XX_GPIO_H #define NPCM7XX_GPIO_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" /* Number of pins managed by each controller. */ diff --git a/include/hw/hw.h b/include/hw/hw.h index 045c1c8b09b..1b33d12b7fc 100644 --- a/include/hw/hw.h +++ b/include/hw/hw.h @@ -1,10 +1,6 @@ #ifndef QEMU_HW_H #define QEMU_HW_H -#ifdef CONFIG_USER_ONLY -#error Cannot include hw/hw.h from user emulation -#endif - G_NORETURN void hw_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2); #endif diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h index 4a2297307b0..fffc5ce342f 100644 --- a/include/hw/hyperv/hyperv-proto.h +++ b/include/hw/hyperv/hyperv-proto.h @@ -61,6 +61,18 @@ #define HV_MESSAGE_X64_APIC_EOI 0x80010004 #define HV_MESSAGE_X64_LEGACY_FP_ERROR 0x80010005 +/* + * Hyper-V Synthetic debug options MSR + */ +#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 +#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 +#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 +#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 +#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 +#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF + +#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) + /* * Message flags */ diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h index d717b4e13d4..63a8b65278f 100644 --- a/include/hw/hyperv/hyperv.h +++ b/include/hw/hyperv/hyperv.h @@ -10,7 +10,8 @@ #ifndef HW_HYPERV_HYPERV_H #define HW_HYPERV_HYPERV_H -#include "cpu-qom.h" +#include "exec/hwaddr.h" +#include "hw/core/cpu.h" #include "hw/hyperv/hyperv-proto.h" typedef struct HvSintRoute HvSintRoute; diff --git a/include/hw/hyperv/vmbus.h b/include/hw/hyperv/vmbus.h index 5c505852f22..06b948bbb08 100644 --- a/include/hw/hyperv/vmbus.h +++ b/include/hw/hyperv/vmbus.h @@ -10,8 +10,8 @@ #ifndef HW_HYPERV_VMBUS_H #define HW_HYPERV_VMBUS_H -#include "sysemu/sysemu.h" -#include "sysemu/dma.h" +#include "system/system.h" +#include "system/dma.h" #include "hw/qdev-core.h" #include "migration/vmstate.h" #include "hw/hyperv/vmbus-proto.h" diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h index fad5e9259a5..2daacc10ce0 100644 --- a/include/hw/i2c/aspeed_i2c.h +++ b/include/hw/i2c/aspeed_i2c.h @@ -14,8 +14,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * with this program; if not, see . */ #ifndef ASPEED_I2C_H @@ -31,12 +30,14 @@ #define TYPE_ASPEED_2500_I2C TYPE_ASPEED_I2C "-ast2500" #define TYPE_ASPEED_2600_I2C TYPE_ASPEED_I2C "-ast2600" #define TYPE_ASPEED_1030_I2C TYPE_ASPEED_I2C "-ast1030" +#define TYPE_ASPEED_2700_I2C TYPE_ASPEED_I2C "-ast2700" OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C) #define ASPEED_I2C_NR_BUSSES 16 #define ASPEED_I2C_SHARE_POOL_SIZE 0x800 +#define ASPEED_I2C_BUS_POOL_SIZE 0x20 #define ASPEED_I2C_OLD_NUM_REG 11 -#define ASPEED_I2C_NEW_NUM_REG 22 +#define ASPEED_I2C_NEW_NUM_REG 28 #define A_I2CD_M_STOP_CMD BIT(5) #define A_I2CD_M_RX_CMD BIT(3) @@ -225,6 +226,15 @@ REG32(I2CS_DMA_LEN_STS, 0x4c) FIELD(I2CS_DMA_LEN_STS, TX_LEN, 0, 13) REG32(I2CC_DMA_ADDR, 0x50) REG32(I2CC_DMA_LEN, 0x54) +/* DMA 64bits */ +REG32(I2CM_DMA_TX_ADDR_HI, 0x60) + FIELD(I2CM_DMA_TX_ADDR_HI, ADDR_HI, 0, 7) +REG32(I2CM_DMA_RX_ADDR_HI, 0x64) + FIELD(I2CM_DMA_RX_ADDR_HI, ADDR_HI, 0, 7) +REG32(I2CS_DMA_TX_ADDR_HI, 0x68) + FIELD(I2CS_DMA_TX_ADDR_HI, ADDR_HI, 0, 7) +REG32(I2CS_DMA_RX_ADDR_HI, 0x6c) + FIELD(I2CS_DMA_RX_ADDR_HI, ADDR_HI, 0, 7) struct AspeedI2CState; @@ -239,12 +249,15 @@ struct AspeedI2CBus { I2CSlave *slave; MemoryRegion mr; + MemoryRegion mr_pool; I2CBus *bus; uint8_t id; qemu_irq irq; uint32_t regs[ASPEED_I2C_NEW_NUM_REG]; + uint8_t pool[ASPEED_I2C_BUS_POOL_SIZE]; + uint64_t dma_dram_offset; }; struct AspeedI2CState { @@ -275,15 +288,19 @@ struct AspeedI2CClass { uint8_t num_busses; uint8_t reg_size; + uint32_t reg_gap_size; uint8_t gap; qemu_irq (*bus_get_irq)(AspeedI2CBus *); uint64_t pool_size; hwaddr pool_base; + uint32_t pool_gap_size; uint8_t *(*bus_pool_base)(AspeedI2CBus *); bool check_sram; bool has_dma; + bool has_share_pool; uint64_t mem_size; + bool has_dma64; }; static inline bool aspeed_i2c_is_new_mode(AspeedI2CState *s) @@ -363,14 +380,6 @@ static inline uint32_t aspeed_i2c_bus_dma_len_offset(AspeedI2CBus *bus) return R_I2CD_DMA_LEN; } -static inline uint32_t aspeed_i2c_bus_dma_addr_offset(AspeedI2CBus *bus) -{ - if (aspeed_i2c_is_new_mode(bus->controller)) { - return R_I2CC_DMA_ADDR; - } - return R_I2CD_DMA_ADDR; -} - static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus) { return SHARED_ARRAY_FIELD_EX32(bus->regs, aspeed_i2c_bus_ctrl_offset(bus), diff --git a/include/hw/i2c/npcm7xx_smbus.h b/include/hw/i2c/npcm7xx_smbus.h index dc45963c0e7..9c544c561b7 100644 --- a/include/hw/i2c/npcm7xx_smbus.h +++ b/include/hw/i2c/npcm7xx_smbus.h @@ -16,7 +16,7 @@ #ifndef NPCM7XX_SMBUS_H #define NPCM7XX_SMBUS_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/i2c/i2c.h" #include "hw/irq.h" #include "hw/sysbus.h" diff --git a/include/hw/i2c/pm_smbus.h b/include/hw/i2c/pm_smbus.h index 0d74207efb5..dafe0df4f69 100644 --- a/include/hw/i2c/pm_smbus.h +++ b/include/hw/i2c/pm_smbus.h @@ -1,7 +1,7 @@ #ifndef PM_SMBUS_H #define PM_SMBUS_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/i2c/smbus_master.h" #define PM_SMBUS_MAX_MSG_SIZE 32 diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index d6e85833da5..429278da618 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -22,7 +22,7 @@ #define QEMU_APIC_INTERNAL_H #include "cpu.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/timer.h" #include "target/i386/cpu-qom.h" #include "qom/object.h" diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h index 846c7260854..3988deca854 100644 --- a/include/hw/i386/hostmem-epc.h +++ b/include/hw/i386/hostmem-epc.h @@ -12,7 +12,7 @@ #ifndef QEMU_HOSTMEM_EPC_H #define QEMU_HOSTMEM_EPC_H -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc" diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index 1eb05c29fc9..e95477e8554 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -45,8 +45,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE) #define DMAR_REG_SIZE 0x230 #define VTD_HOST_AW_39BIT 39 #define VTD_HOST_AW_48BIT 48 -#define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_39BIT +#define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_48BIT #define VTD_HAW_MASK(aw) ((1ULL << (aw)) - 1) +#define VTD_MGAW_FROM_CAP(cap) ((cap >> 16) & 0x3fULL) #define DMAR_REPORT_F_INTR (1) @@ -152,9 +153,10 @@ struct VTDIOTLBEntry { uint64_t gfn; uint16_t domain_id; uint32_t pasid; - uint64_t slpte; + uint64_t pte; uint64_t mask; uint8_t access_flags; + uint8_t pgtt; }; /* VT-d Source-ID Qualifier types */ @@ -262,6 +264,7 @@ struct IntelIOMMUState { bool caching_mode; /* RO - is cap CM enabled? */ bool scalable_mode; /* RO - is Scalable Mode supported? */ + bool flts; /* RO - is stage-1 translation supported? */ bool snoop_control; /* RO - is SNP filed supported? */ dma_addr_t root; /* Current root table pointer */ @@ -305,6 +308,10 @@ struct IntelIOMMUState { bool dma_drain; /* Whether DMA r/w draining enabled */ bool dma_translation; /* Whether DMA translation supported */ bool pasid; /* Whether to support PASID */ + bool fs1gp; /* First Stage 1-GByte Page Support */ + + /* Transient Mapping, Reserved(0) since VTD spec revision 3.2 */ + bool stale_tm; /* * Protects IOMMU states in general. Currently it protects the diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h index fad97a891dc..b9ac34a3ef1 100644 --- a/include/hw/i386/microvm.h +++ b/include/hw/i386/microvm.h @@ -78,6 +78,8 @@ struct MicrovmMachineClass { X86MachineClass parent; HotplugHandler *(*orig_hotplug_handler)(MachineState *machine, DeviceState *dev); + void (*x86_load_linux)(X86MachineState *x86ms, FWCfgState *fw_cfg, + int acpi_data_size, bool pvh_enabled); }; struct MicrovmMachineState { diff --git a/include/hw/i386/nitro_enclave.h b/include/hw/i386/nitro_enclave.h new file mode 100644 index 00000000000..885163ff640 --- /dev/null +++ b/include/hw/i386/nitro_enclave.h @@ -0,0 +1,62 @@ +/* + * AWS nitro-enclave machine + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef HW_I386_NITRO_ENCLAVE_H +#define HW_I386_NITRO_ENCLAVE_H + +#include "crypto/hash.h" +#include "hw/i386/microvm.h" +#include "qom/object.h" +#include "hw/virtio/virtio-nsm.h" + +/* Machine type options */ +#define NITRO_ENCLAVE_VSOCK_CHARDEV_ID "vsock" +#define NITRO_ENCLAVE_ID "id" +#define NITRO_ENCLAVE_PARENT_ROLE "parent-role" +#define NITRO_ENCLAVE_PARENT_ID "parent-id" + +struct NitroEnclaveMachineClass { + MicrovmMachineClass parent; + + void (*parent_init)(MachineState *state); + void (*parent_reset)(MachineState *machine, ResetType type); +}; + +struct NitroEnclaveMachineState { + MicrovmMachineState parent; + + /* Machine type options */ + char *vsock; + /* Enclave identifier */ + char *id; + /* Parent instance IAM role ARN */ + char *parent_role; + /* Parent instance identifier */ + char *parent_id; + + /* Machine state */ + VirtIONSM *vnsm; + + /* kernel + ramdisks + cmdline SHA384 hash */ + uint8_t image_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384]; + /* kernel + boot ramdisk + cmdline SHA384 hash */ + uint8_t bootstrap_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384]; + /* application ramdisk(s) SHA384 hash */ + uint8_t app_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384]; + /* certificate fingerprint SHA384 hash */ + uint8_t fingerprint_hash[QCRYPTO_HASH_DIGEST_LEN_SHA384]; + bool signature_found; +}; + +#define TYPE_NITRO_ENCLAVE_MACHINE MACHINE_TYPE_NAME("nitro-enclave") +OBJECT_DECLARE_TYPE(NitroEnclaveMachineState, NitroEnclaveMachineClass, + NITRO_ENCLAVE_MACHINE) + +#endif diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 4e55d7ef6ea..79b72c54dd3 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -107,7 +107,6 @@ struct PCMachineClass { /* RAM / address space compat: */ bool gigabyte_align; bool has_reserved_memory; - bool broken_reserved_end; bool enforce_amd_1tb_hole; bool isa_bios_alias; @@ -215,6 +214,15 @@ void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size); /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_10_0[]; +extern const size_t pc_compat_10_0_len; + +extern GlobalProperty pc_compat_9_2[]; +extern const size_t pc_compat_9_2_len; + +extern GlobalProperty pc_compat_9_1[]; +extern const size_t pc_compat_9_1_len; + extern GlobalProperty pc_compat_9_0[]; extern const size_t pc_compat_9_0_len; @@ -290,17 +298,9 @@ extern const size_t pc_compat_2_7_len; extern GlobalProperty pc_compat_2_6[]; extern const size_t pc_compat_2_6_len; -extern GlobalProperty pc_compat_2_5[]; -extern const size_t pc_compat_2_5_len; - -extern GlobalProperty pc_compat_2_4[]; -extern const size_t pc_compat_2_4_len; - -extern GlobalProperty pc_compat_2_3[]; -extern const size_t pc_compat_2_3_len; - #define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \ - static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \ + static void pc_machine_##suffix##_class_init(ObjectClass *oc, \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ optsfn(mc); \ @@ -313,11 +313,11 @@ extern const size_t pc_compat_2_3_len; }; \ static void pc_machine_init_##suffix(void) \ { \ - type_register(&pc_machine_type_##suffix); \ + type_register_static(&pc_machine_type_##suffix); \ } \ type_init(pc_machine_init_##suffix) -#define DEFINE_PC_VER_MACHINE(namesym, namestr, initfn, ...) \ +#define DEFINE_PC_VER_MACHINE(namesym, namestr, initfn, isdefault, malias, ...) \ static void MACHINE_VER_SYM(init, namesym, __VA_ARGS__)( \ MachineState *machine) \ { \ @@ -325,12 +325,14 @@ extern const size_t pc_compat_2_3_len; } \ static void MACHINE_VER_SYM(class_init, namesym, __VA_ARGS__)( \ ObjectClass *oc, \ - void *data) \ + const void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ MACHINE_VER_SYM(options, namesym, __VA_ARGS__)(mc); \ mc->init = MACHINE_VER_SYM(init, namesym, __VA_ARGS__); \ MACHINE_VER_DEPRECATION(__VA_ARGS__); \ + mc->is_default = isdefault; \ + mc->alias = malias; \ } \ static const TypeInfo MACHINE_VER_SYM(info, namesym, __VA_ARGS__) = \ { \ @@ -341,7 +343,7 @@ extern const size_t pc_compat_2_3_len; static void MACHINE_VER_SYM(register, namesym, __VA_ARGS__)(void) \ { \ MACHINE_VER_DELETION(__VA_ARGS__); \ - type_register(&MACHINE_VER_SYM(info, namesym, __VA_ARGS__)); \ + type_register_static(&MACHINE_VER_SYM(info, namesym, __VA_ARGS__)); \ } \ type_init(MACHINE_VER_SYM(register, namesym, __VA_ARGS__)); diff --git a/include/hw/i386/tdvf.h b/include/hw/i386/tdvf.h new file mode 100644 index 00000000000..e75c8d1acc6 --- /dev/null +++ b/include/hw/i386/tdvf.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2025 Intel Corporation + * Author: Isaku Yamahata + * + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_I386_TDVF_H +#define HW_I386_TDVF_H + +#include "qemu/osdep.h" + +#define TDVF_SECTION_TYPE_BFV 0 +#define TDVF_SECTION_TYPE_CFV 1 +#define TDVF_SECTION_TYPE_TD_HOB 2 +#define TDVF_SECTION_TYPE_TEMP_MEM 3 + +#define TDVF_SECTION_ATTRIBUTES_MR_EXTEND (1U << 0) +#define TDVF_SECTION_ATTRIBUTES_PAGE_AUG (1U << 1) + +typedef struct TdxFirmwareEntry { + uint32_t data_offset; + uint32_t data_len; + uint64_t address; + uint64_t size; + uint32_t type; + uint32_t attributes; + + void *mem_ptr; +} TdxFirmwareEntry; + +typedef struct TdxFirmware { + void *mem_ptr; + + uint32_t nr_entries; + TdxFirmwareEntry *entries; +} TdxFirmware; + +#define for_each_tdx_fw_entry(fw, e) \ + for (e = (fw)->entries; e != (fw)->entries + (fw)->nr_entries; e++) + +int tdvf_parse_metadata(TdxFirmware *fw, void *flash_ptr, int size); + +#endif /* HW_I386_TDVF_H */ diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h index dff49fce115..f6380f1ed75 100644 --- a/include/hw/i386/topology.h +++ b/include/hw/i386/topology.h @@ -39,7 +39,7 @@ * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). */ - +#include "qapi/qapi-types-machine-common.h" #include "qemu/bitops.h" /* @@ -62,21 +62,7 @@ typedef struct X86CPUTopoInfo { unsigned threads_per_core; } X86CPUTopoInfo; -/* - * CPUTopoLevel is the general i386 topology hierarchical representation, - * ordered by increasing hierarchical relationship. - * Its enumeration value is not bound to the type value of Intel (CPUID[0x1F]) - * or AMD (CPUID[0x80000026]). - */ -enum CPUTopoLevel { - CPU_TOPO_LEVEL_INVALID, - CPU_TOPO_LEVEL_SMT, - CPU_TOPO_LEVEL_CORE, - CPU_TOPO_LEVEL_MODULE, - CPU_TOPO_LEVEL_DIE, - CPU_TOPO_LEVEL_PACKAGE, - CPU_TOPO_LEVEL_MAX, -}; +#define CPU_TOPOLOGY_LEVEL_INVALID CPU_TOPOLOGY_LEVEL__MAX /* Return the bit width needed for 'count' IDs */ static unsigned apicid_bitwidth_for_count(unsigned count) @@ -135,9 +121,10 @@ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info) } /* - * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID + * Make APIC ID for the CPU based on topology and IDs of each topology level. * - * The caller must make sure core_id < nr_cores and smt_id < nr_threads. + * The caller must make sure the ID of each level doesn't exceed the width of + * the level. */ static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info, const X86CPUTopoIDs *topo_ids) @@ -212,8 +199,33 @@ static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info, */ static inline bool x86_has_extended_topo(unsigned long *topo_bitmap) { - return test_bit(CPU_TOPO_LEVEL_MODULE, topo_bitmap) || - test_bit(CPU_TOPO_LEVEL_DIE, topo_bitmap); + return test_bit(CPU_TOPOLOGY_LEVEL_MODULE, topo_bitmap) || + test_bit(CPU_TOPOLOGY_LEVEL_DIE, topo_bitmap); +} + +static inline unsigned x86_module_per_pkg(X86CPUTopoInfo *topo_info) +{ + return topo_info->modules_per_die * topo_info->dies_per_pkg; +} + +static inline unsigned x86_cores_per_pkg(X86CPUTopoInfo *topo_info) +{ + return topo_info->cores_per_module * x86_module_per_pkg(topo_info); +} + +static inline unsigned x86_threads_per_pkg(X86CPUTopoInfo *topo_info) +{ + return topo_info->threads_per_core * x86_cores_per_pkg(topo_info); +} + +static inline unsigned x86_threads_per_module(X86CPUTopoInfo *topo_info) +{ + return topo_info->threads_per_core * topo_info->cores_per_module; +} + +static inline unsigned x86_threads_per_die(X86CPUTopoInfo *topo_info) +{ + return x86_threads_per_module(topo_info) * topo_info->modules_per_die; } #endif /* HW_I386_TOPOLOGY_H */ diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index d43cb3908e6..8755cad50a3 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -18,22 +18,18 @@ #define HW_I386_X86_H #include "exec/hwaddr.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/boards.h" #include "hw/i386/topology.h" #include "hw/intc/ioapic.h" #include "hw/isa/isa.h" #include "qom/object.h" +#include "system/igvm-cfg.h" struct X86MachineClass { - /*< private >*/ MachineClass parent; - /*< public >*/ - - /* TSC rate migration: */ - bool save_tsc_khz; /* use DMA capable linuxboot option rom */ bool fwcfg_dma_enabled; /* CPU and apic information: */ @@ -97,6 +93,8 @@ struct X86MachineState { * which means no limitation on the guest's bus locks. */ uint64_t bus_lock_ratelimit; + + IgvmCfg *igvm; }; #define X86_MACHINE_SMM "smm" diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h deleted file mode 100644 index 1000f8f5433..00000000000 --- a/include/hw/i386/xen_arch_hvm.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef HW_XEN_ARCH_I386_HVM_H -#define HW_XEN_ARCH_I386_HVM_H - -#include -#include "hw/xen/xen-hvm-common.h" - -void arch_handle_ioreq(XenIOState *state, ioreq_t *req); -void arch_xen_set_memory(XenIOState *state, - MemoryRegionSection *section, - bool add); -#endif diff --git a/include/hw/ide/ahci-pci.h b/include/hw/ide/ahci-pci.h index c2ee6169625..face1a9a4a4 100644 --- a/include/hw/ide/ahci-pci.h +++ b/include/hw/ide/ahci-pci.h @@ -9,6 +9,7 @@ #include "qom/object.h" #include "hw/ide/ahci.h" #include "hw/pci/pci_device.h" +#include "hw/irq.h" #define TYPE_ICH9_AHCI "ich9-ahci" OBJECT_DECLARE_SIMPLE_TYPE(AHCIPCIState, ICH9_AHCI) @@ -17,6 +18,7 @@ struct AHCIPCIState { PCIDevice parent_obj; AHCIState ahci; + IRQState irq; }; #endif diff --git a/include/hw/ide/ahci.h b/include/hw/ide/ahci.h index ba31e75ff9b..cd07b87811b 100644 --- a/include/hw/ide/ahci.h +++ b/include/hw/ide/ahci.h @@ -24,7 +24,7 @@ #ifndef HW_IDE_AHCI_H #define HW_IDE_AHCI_H -#include "exec/memory.h" +#include "system/memory.h" typedef struct AHCIDevice AHCIDevice; @@ -37,8 +37,6 @@ typedef struct AHCIControlRegs { } AHCIControlRegs; typedef struct AHCIState { - DeviceState *container; - AHCIDevice *dev; AHCIControlRegs control_regs; MemoryRegion mem; diff --git a/include/hw/ide/ide-bus.h b/include/hw/ide/ide-bus.h index 4841a7dcd63..121b455fcdb 100644 --- a/include/hw/ide/ide-bus.h +++ b/include/hw/ide/ide-bus.h @@ -1,7 +1,7 @@ #ifndef HW_IDE_BUS_H #define HW_IDE_BUS_H -#include "exec/ioport.h" +#include "system/ioport.h" #include "hw/ide/ide-dev.h" #include "hw/ide/ide-dma.h" diff --git a/include/hw/ide/ide-dev.h b/include/hw/ide/ide-dev.h index 9a0d71db4e1..92e88687800 100644 --- a/include/hw/ide/ide-dev.h +++ b/include/hw/ide/ide-dev.h @@ -20,7 +20,7 @@ #ifndef IDE_DEV_H #define IDE_DEV_H -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/qdev-properties.h" #include "hw/block/block.h" diff --git a/include/hw/input/lm832x.h b/include/hw/input/lm832x.h deleted file mode 100644 index e0e5d5ef20e..00000000000 --- a/include/hw/input/lm832x.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 or - * (at your option) version 3 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, see . - */ - -#ifndef HW_INPUT_LM832X_H -#define HW_INPUT_LM832X_H - -#define TYPE_LM8323 "lm8323" - -void lm832x_key_event(DeviceState *dev, int key, int state); - -#endif diff --git a/include/hw/input/tsc2xxx.h b/include/hw/input/tsc2xxx.h deleted file mode 100644 index 00eca17674e..00000000000 --- a/include/hw/input/tsc2xxx.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * TI touchscreen controller - * - * Copyright (c) 2006 Andrzej Zaborowski - * Copyright (C) 2008 Nokia Corporation - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef HW_INPUT_TSC2XXX_H -#define HW_INPUT_TSC2XXX_H - -typedef struct MouseTransformInfo { - /* Touchscreen resolution */ - int x; - int y; - /* Calibration values as used/generated by tslib */ - int a[7]; -} MouseTransformInfo; - -typedef struct uWireSlave { - uint16_t (*receive)(void *opaque); - void (*send)(void *opaque, uint16_t data); - void *opaque; -} uWireSlave; - -/* tsc210x.c */ -uWireSlave *tsc2102_init(qemu_irq pint); -uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav); -I2SCodec *tsc210x_codec(uWireSlave *chip); -uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len); -void tsc210x_set_transform(uWireSlave *chip, const MouseTransformInfo *info); -void tsc210x_key_event(uWireSlave *chip, int key, int down); - -/* tsc2005.c */ -void *tsc2005_init(qemu_irq pintdav); -uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len); -void tsc2005_set_transform(void *opaque, const MouseTransformInfo *info); - -#endif diff --git a/include/hw/intc/arm_gic.h b/include/hw/intc/arm_gic.h index 48f6a51a70a..be923f7ed88 100644 --- a/include/hw/intc/arm_gic.h +++ b/include/hw/intc/arm_gic.h @@ -27,6 +27,9 @@ * implement the security extensions * + QOM property "has-virtualization-extensions": set true if the GIC should * implement the virtualization extensions + * + QOM property "first-cpu-index": index of the first cpu attached to the + * GIC (default 0). The CPUs connected to the GIC are assumed to be + * first-cpu-index, first-cpu-index + 1, ... first-cpu-index + num-cpu - 1. * + unnamed GPIO inputs: (where P is number of SPIs, i.e. num-irq - 32) * [0..P-1] SPIs * [P..P+31] PPIs for CPU 0 diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h index 97fea4102d3..93a3cc2bf81 100644 --- a/include/hw/intc/arm_gic_common.h +++ b/include/hw/intc/arm_gic_common.h @@ -129,6 +129,8 @@ struct GICState { uint32_t num_lrs; uint32_t num_cpu; + /* cpu_index of the first CPU, attached to this GIC. */ + uint32_t first_cpu_index; MemoryRegion iomem; /* Distributor */ /* This is just so we can have an opaque pointer which identifies diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index cd09bee3bc4..c18503869f9 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -51,13 +51,13 @@ /* Maximum number of list registers (architectural limit) */ #define GICV3_LR_MAX 16 -/* For some distributor fields we want to model the array of 32-bit +/* + * For some distributor fields we want to model the array of 32-bit * register values which hold various bitmaps corresponding to enabled, - * pending, etc bits. These macros and functions facilitate that; the - * APIs are generally modelled on the generic bitmap.h functions - * (which are unsuitable here because they use 'unsigned long' as the - * underlying storage type, which is very awkward when you need to - * access the data as 32-bit values.) + * pending, etc bits. We use the set_bit32() etc family of functions + * from bitops.h for this. For a few cases we need to implement some + * extra operations. + * * Each bitmap contains a bit for each interrupt. Although there is * space for the PPIs and SGIs, those bits (the first 32) are never * used as that state lives in the redistributor. The unused bits are @@ -65,39 +65,13 @@ * avoids bugs where we forget to subtract GIC_INTERNAL from an * interrupt number. */ -#define GICV3_BMP_SIZE DIV_ROUND_UP(GICV3_MAXIRQ, 32) - -#define GIC_DECLARE_BITMAP(name) \ - uint32_t name[GICV3_BMP_SIZE] - -#define GIC_BIT_MASK(nr) (1U << ((nr) % 32)) -#define GIC_BIT_WORD(nr) ((nr) / 32) - -static inline void gic_bmp_set_bit(int nr, uint32_t *addr) -{ - uint32_t mask = GIC_BIT_MASK(nr); - uint32_t *p = addr + GIC_BIT_WORD(nr); - - *p |= mask; -} - -static inline void gic_bmp_clear_bit(int nr, uint32_t *addr) -{ - uint32_t mask = GIC_BIT_MASK(nr); - uint32_t *p = addr + GIC_BIT_WORD(nr); - - *p &= ~mask; -} - -static inline int gic_bmp_test_bit(int nr, const uint32_t *addr) -{ - return 1U & (addr[GIC_BIT_WORD(nr)] >> (nr & 31)); -} +#define GIC_DECLARE_BITMAP(name) DECLARE_BITMAP32(name, GICV3_MAXIRQ) +#define GICV3_BMP_SIZE BITS_TO_U32S(GICV3_MAXIRQ) static inline void gic_bmp_replace_bit(int nr, uint32_t *addr, int val) { - uint32_t mask = GIC_BIT_MASK(nr); - uint32_t *p = addr + GIC_BIT_WORD(nr); + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); *p &= ~mask; *p |= (val & 1U) << (nr % 32); @@ -106,7 +80,7 @@ static inline void gic_bmp_replace_bit(int nr, uint32_t *addr, int val) /* Return a pointer to the 32-bit word containing the specified bit. */ static inline uint32_t *gic_bmp_ptr32(uint32_t *addr, int nr) { - return addr + GIC_BIT_WORD(nr); + return addr + BIT32_WORD(nr); } typedef struct GICv3State GICv3State; @@ -257,6 +231,7 @@ struct GICv3State { uint32_t num_cpu; uint32_t num_irq; uint32_t revision; + uint32_t maint_irq; bool lpi_enable; bool nmi_support; bool security_extn; @@ -301,15 +276,15 @@ struct GICv3State { #define GICV3_BITMAP_ACCESSORS(BMP) \ static inline void gicv3_gicd_##BMP##_set(GICv3State *s, int irq) \ { \ - gic_bmp_set_bit(irq, s->BMP); \ + set_bit32(irq, s->BMP); \ } \ static inline int gicv3_gicd_##BMP##_test(GICv3State *s, int irq) \ { \ - return gic_bmp_test_bit(irq, s->BMP); \ + return test_bit32(irq, s->BMP); \ } \ static inline void gicv3_gicd_##BMP##_clear(GICv3State *s, int irq) \ { \ - gic_bmp_clear_bit(irq, s->BMP); \ + clear_bit32(irq, s->BMP); \ } \ static inline void gicv3_gicd_##BMP##_replace(GICv3State *s, \ int irq, int value) \ diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h index 7dc712b38d2..3c7b543b018 100644 --- a/include/hw/intc/arm_gicv3_its_common.h +++ b/include/hw/intc/arm_gicv3_its_common.h @@ -128,7 +128,7 @@ struct GICv3ITSCommonClass { * Return the ITS class name to use depending on whether KVM acceleration * and KVM CAP_SIGNAL_MSI are supported * - * Returns: class name to use or NULL + * Returns: class name to use */ const char *its_class_name(void); diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h index 89fe8aedaa9..7b9964fe7e4 100644 --- a/include/hw/intc/armv7m_nvic.h +++ b/include/hw/intc/armv7m_nvic.h @@ -189,21 +189,7 @@ int armv7m_nvic_raw_execution_priority(NVICState *s); * @secure: the security state to test * This corresponds to the pseudocode IsReqExecPriNeg(). */ -#ifndef CONFIG_USER_ONLY bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure); -#else -static inline bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure) -{ - return false; -} -#endif -#ifndef CONFIG_USER_ONLY bool armv7m_nvic_can_take_pending_exception(NVICState *s); -#else -static inline bool armv7m_nvic_can_take_pending_exception(NVICState *s) -{ - return true; -} -#endif #endif diff --git a/include/hw/intc/aspeed_intc.h b/include/hw/intc/aspeed_intc.h index 18cb43476cd..51288384a56 100644 --- a/include/hw/intc/aspeed_intc.h +++ b/include/hw/intc/aspeed_intc.h @@ -14,10 +14,24 @@ #define TYPE_ASPEED_INTC "aspeed.intc" #define TYPE_ASPEED_2700_INTC TYPE_ASPEED_INTC "-ast2700" +#define TYPE_ASPEED_2700_INTCIO TYPE_ASPEED_INTC "io-ast2700" +#define TYPE_ASPEED_2700SSP_INTC TYPE_ASPEED_INTC "-ast2700ssp" +#define TYPE_ASPEED_2700SSP_INTCIO TYPE_ASPEED_INTC "io-ast2700ssp" +#define TYPE_ASPEED_2700TSP_INTC TYPE_ASPEED_INTC "-ast2700tsp" +#define TYPE_ASPEED_2700TSP_INTCIO TYPE_ASPEED_INTC "io-ast2700tsp" + OBJECT_DECLARE_TYPE(AspeedINTCState, AspeedINTCClass, ASPEED_INTC) -#define ASPEED_INTC_NR_REGS (0x2000 >> 2) -#define ASPEED_INTC_NR_INTS 9 +#define ASPEED_INTC_MAX_INPINS 10 +#define ASPEED_INTC_MAX_OUTPINS 19 + +typedef struct AspeedINTCIRQ { + int inpin_idx; + int outpin_idx; + int num_outpins; + uint32_t enable_reg; + uint32_t status_reg; +} AspeedINTCIRQ; struct AspeedINTCState { /*< private >*/ @@ -25,20 +39,29 @@ struct AspeedINTCState { /*< public >*/ MemoryRegion iomem; - uint32_t regs[ASPEED_INTC_NR_REGS]; - OrIRQState orgates[ASPEED_INTC_NR_INTS]; - qemu_irq output_pins[ASPEED_INTC_NR_INTS]; + MemoryRegion iomem_container; + + uint32_t *regs; + OrIRQState orgates[ASPEED_INTC_MAX_INPINS]; + qemu_irq output_pins[ASPEED_INTC_MAX_OUTPINS]; - uint32_t enable[ASPEED_INTC_NR_INTS]; - uint32_t mask[ASPEED_INTC_NR_INTS]; - uint32_t pending[ASPEED_INTC_NR_INTS]; + uint32_t enable[ASPEED_INTC_MAX_INPINS]; + uint32_t mask[ASPEED_INTC_MAX_INPINS]; + uint32_t pending[ASPEED_INTC_MAX_INPINS]; }; struct AspeedINTCClass { SysBusDeviceClass parent_class; uint32_t num_lines; - uint32_t num_ints; + uint32_t num_inpins; + uint32_t num_outpins; + uint64_t mem_size; + uint64_t nr_regs; + uint64_t reg_offset; + const MemoryRegionOps *reg_ops; + const AspeedINTCIRQ *irq_table; + int irq_table_count; }; #endif /* ASPEED_INTC_H */ diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index 626a37dfa1f..4795bdc15f5 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -5,85 +5,28 @@ * Copyright (C) 2021 Loongson Technology Corporation Limited */ -#include "hw/sysbus.h" -#include "hw/loongarch/virt.h" - #ifndef LOONGARCH_EXTIOI_H #define LOONGARCH_EXTIOI_H -#define LS3A_INTC_IP 8 -#define EXTIOI_IRQS (256) -#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) -/* irq from EXTIOI is routed to no more than 4 cpus */ -#define EXTIOI_CPUS (4) -/* map to ipnum per 32 irqs */ -#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) -#define EXTIOI_IRQS_COREMAP_SIZE 256 -#define EXTIOI_IRQS_NODETYPE_COUNT 16 -#define EXTIOI_IRQS_GROUP_COUNT 8 - -#define APIC_OFFSET 0x400 -#define APIC_BASE (0x1000ULL + APIC_OFFSET) +#include "hw/intc/loongarch_extioi_common.h" -#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET) -#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET) -#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET) -#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET) -#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET) -#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET) -#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET) -#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET) -#define EXTIOI_ISR_START (0x700 - APIC_OFFSET) -#define EXTIOI_ISR_END (0x720 - APIC_OFFSET) -#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET) -#define EXTIOI_COREISR_END (0xB20 - APIC_OFFSET) -#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) -#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) -#define EXTIOI_SIZE 0x800 +#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" +OBJECT_DECLARE_TYPE(LoongArchExtIOIState, LoongArchExtIOIClass, LOONGARCH_EXTIOI) -#define EXTIOI_VIRT_BASE (0x40000000) -#define EXTIOI_VIRT_SIZE (0x1000) -#define EXTIOI_VIRT_FEATURES (0x0) -#define EXTIOI_HAS_VIRT_EXTENSION (0) -#define EXTIOI_HAS_ENABLE_OPTION (1) -#define EXTIOI_HAS_INT_ENCODE (2) -#define EXTIOI_HAS_CPU_ENCODE (3) -#define EXTIOI_VIRT_HAS_FEATURES (BIT(EXTIOI_HAS_VIRT_EXTENSION) \ - | BIT(EXTIOI_HAS_ENABLE_OPTION) \ - | BIT(EXTIOI_HAS_CPU_ENCODE)) -#define EXTIOI_VIRT_CONFIG (0x4) -#define EXTIOI_ENABLE (1) -#define EXTIOI_ENABLE_INT_ENCODE (2) -#define EXTIOI_ENABLE_CPU_ENCODE (3) -#define EXTIOI_VIRT_COREMAP_START (0x40) -#define EXTIOI_VIRT_COREMAP_END (0x240) +struct LoongArchExtIOIState { + LoongArchExtIOICommonState parent_obj; + int dev_fd; +}; -typedef struct ExtIOICore { - uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT]; - DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS); - qemu_irq parent_irq[LS3A_INTC_IP]; -} ExtIOICore; +struct LoongArchExtIOIClass { + LoongArchExtIOICommonClass parent_class; -#define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" -OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI) -struct LoongArchExtIOI { - SysBusDevice parent_obj; - uint32_t num_cpu; - uint32_t features; - uint32_t status; - /* hardware state */ - uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; - uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; - uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t enable[EXTIOI_IRQS / 32]; - uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; - uint32_t coremap[EXTIOI_IRQS / 4]; - uint32_t sw_pending[EXTIOI_IRQS / 32]; - uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; - uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq irq[EXTIOI_IRQS]; - ExtIOICore *cpu; - MemoryRegion extioi_system_mem; - MemoryRegion virt_extend; + DeviceRealize parent_realize; + ResettablePhases parent_phases; }; + +void kvm_extioi_realize(DeviceState *dev, Error **errp); +int kvm_extioi_get(void *opaque); +int kvm_extioi_put(void *opaque, int version_id); + #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_extioi_common.h b/include/hw/intc/loongarch_extioi_common.h new file mode 100644 index 00000000000..c021ccee0fa --- /dev/null +++ b/include/hw/intc/loongarch_extioi_common.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch 3A5000 ext interrupt controller definitions + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_EXTIOI_COMMON_H +#define LOONGARCH_EXTIOI_COMMON_H + +#include "qom/object.h" +#include "hw/sysbus.h" +#include "hw/loongarch/virt.h" + +#define LS3A_INTC_IP 8 +#define EXTIOI_IRQS (256) +#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) +/* irq from EXTIOI is routed to no more than 4 cpus */ +#define EXTIOI_CPUS (4) +/* map to ipnum per 32 irqs */ +#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) +#define EXTIOI_IRQS_COREMAP_SIZE 256 +#define EXTIOI_IRQS_NODETYPE_COUNT 16 +#define EXTIOI_IRQS_GROUP_COUNT 8 + +#define APIC_OFFSET 0x400 +#define APIC_BASE (0x1000ULL + APIC_OFFSET) +#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET) +#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET) +#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET) +#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET) +#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET) +#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET) +#define EXTIOI_ISR_START (0x700 - APIC_OFFSET) +#define EXTIOI_ISR_END (0x720 - APIC_OFFSET) +#define EXTIOI_COREISR_START (0x800 - APIC_OFFSET) +#define EXTIOI_COREISR_END (0x820 - APIC_OFFSET) +#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) +#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) +#define EXTIOI_SIZE 0x800 + +#define EXTIOI_VIRT_BASE (0x40000000) +#define EXTIOI_VIRT_SIZE (0x1000) +#define EXTIOI_VIRT_FEATURES (0x0) +#define EXTIOI_HAS_VIRT_EXTENSION (0) +#define EXTIOI_HAS_ENABLE_OPTION (1) +#define EXTIOI_HAS_INT_ENCODE (2) +#define EXTIOI_HAS_CPU_ENCODE (3) +#define EXTIOI_VIRT_HAS_FEATURES (BIT(EXTIOI_HAS_VIRT_EXTENSION) \ + | BIT(EXTIOI_HAS_ENABLE_OPTION) \ + | BIT(EXTIOI_HAS_CPU_ENCODE)) +#define EXTIOI_VIRT_CONFIG (0x4) +#define EXTIOI_ENABLE (1) +#define EXTIOI_ENABLE_INT_ENCODE (2) +#define EXTIOI_ENABLE_CPU_ENCODE (3) +#define EXTIOI_VIRT_COREMAP_START (0x40) +#define EXTIOI_VIRT_COREMAP_END (0x240) + +#define TYPE_LOONGARCH_EXTIOI_COMMON "loongarch_extioi_common" +OBJECT_DECLARE_TYPE(LoongArchExtIOICommonState, + LoongArchExtIOICommonClass, LOONGARCH_EXTIOI_COMMON) + +typedef struct ExtIOICore { + uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT]; + DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS); + qemu_irq parent_irq[LS3A_INTC_IP]; + uint64_t arch_id; + CPUState *cpu; +} ExtIOICore; + +struct LoongArchExtIOICommonState { + SysBusDevice parent_obj; + uint32_t num_cpu; + uint32_t features; + uint32_t status; + /* hardware state */ + uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; + uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; + uint32_t isr[EXTIOI_IRQS / 32]; + uint32_t enable[EXTIOI_IRQS / 32]; + uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; + uint32_t coremap[EXTIOI_IRQS / 4]; + uint32_t sw_pending[EXTIOI_IRQS / 32]; + uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; + uint8_t sw_coremap[EXTIOI_IRQS]; + qemu_irq irq[EXTIOI_IRQS]; + ExtIOICore *cpu; + MemoryRegion extioi_system_mem; + MemoryRegion virt_extend; +}; + +struct LoongArchExtIOICommonClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; + ResettablePhases parent_phases; + int (*pre_save)(void *s); + int (*post_load)(void *s, int version_id); +}; +#endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h new file mode 100644 index 00000000000..5175a6b0044 --- /dev/null +++ b/include/hw/intc/loongarch_ipi.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch IPI interrupt header files + * + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_IPI_H +#define HW_LOONGARCH_IPI_H + +#include "qom/object.h" +#include "hw/intc/loongson_ipi_common.h" + +#define TYPE_LOONGARCH_IPI "loongarch_ipi" +OBJECT_DECLARE_TYPE(LoongarchIPIState, LoongarchIPIClass, LOONGARCH_IPI) + +struct LoongarchIPIState { + LoongsonIPICommonState parent_obj; + int dev_fd; +}; + +struct LoongarchIPIClass { + LoongsonIPICommonClass parent_class; + DeviceRealize parent_realize; + ResettablePhases parent_phases; +}; + +void kvm_ipi_realize(DeviceState *dev, Error **errp); +int kvm_ipi_get(void *opaque); +int kvm_ipi_put(void *opaque, int version_id); + +#endif diff --git a/include/hw/intc/loongarch_pch_pic.h b/include/hw/intc/loongarch_pch_pic.h index d5437e88f28..a46b6f8985d 100644 --- a/include/hw/intc/loongarch_pch_pic.h +++ b/include/hw/intc/loongarch_pch_pic.h @@ -5,65 +5,29 @@ * Copyright (c) 2021 Loongson Technology Corporation Limited */ -#include "hw/sysbus.h" +#ifndef HW_LOONGARCH_PCH_PIC_H +#define HW_LOONGARCH_PCH_PIC_H -#define TYPE_LOONGARCH_PCH_PIC "loongarch_pch_pic" -#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PCH_PIC#name -OBJECT_DECLARE_SIMPLE_TYPE(LoongArchPCHPIC, LOONGARCH_PCH_PIC) +#include "hw/intc/loongarch_pic_common.h" -#define PCH_PIC_INT_ID_VAL 0x7000000UL -#define PCH_PIC_INT_ID_VER 0x1UL +#define TYPE_LOONGARCH_PIC "loongarch_pic" +#define PCH_PIC_NAME(name) TYPE_LOONGARCH_PIC#name +OBJECT_DECLARE_TYPE(LoongarchPICState, LoongarchPICClass, LOONGARCH_PIC) -#define PCH_PIC_INT_ID_LO 0x00 -#define PCH_PIC_INT_ID_HI 0x04 -#define PCH_PIC_INT_MASK_LO 0x20 -#define PCH_PIC_INT_MASK_HI 0x24 -#define PCH_PIC_HTMSI_EN_LO 0x40 -#define PCH_PIC_HTMSI_EN_HI 0x44 -#define PCH_PIC_INT_EDGE_LO 0x60 -#define PCH_PIC_INT_EDGE_HI 0x64 -#define PCH_PIC_INT_CLEAR_LO 0x80 -#define PCH_PIC_INT_CLEAR_HI 0x84 -#define PCH_PIC_AUTO_CTRL0_LO 0xc0 -#define PCH_PIC_AUTO_CTRL0_HI 0xc4 -#define PCH_PIC_AUTO_CTRL1_LO 0xe0 -#define PCH_PIC_AUTO_CTRL1_HI 0xe4 -#define PCH_PIC_ROUTE_ENTRY_OFFSET 0x100 -#define PCH_PIC_ROUTE_ENTRY_END 0x13f -#define PCH_PIC_HTMSI_VEC_OFFSET 0x200 -#define PCH_PIC_HTMSI_VEC_END 0x23f -#define PCH_PIC_INT_STATUS_LO 0x3a0 -#define PCH_PIC_INT_STATUS_HI 0x3a4 -#define PCH_PIC_INT_POL_LO 0x3e0 -#define PCH_PIC_INT_POL_HI 0x3e4 - -#define STATUS_LO_START 0 -#define STATUS_HI_START 0x4 -#define POL_LO_START 0x40 -#define POL_HI_START 0x44 -struct LoongArchPCHPIC { - SysBusDevice parent_obj; - qemu_irq parent_irq[64]; - uint64_t int_mask; /*0x020 interrupt mask register*/ - uint64_t htmsi_en; /*0x040 1=msi*/ - uint64_t intedge; /*0x060 edge=1 level =0*/ - uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/ - uint64_t auto_crtl0; /*0x0c0*/ - uint64_t auto_crtl1; /*0x0e0*/ - uint64_t last_intirr; /* edge detection */ - uint64_t intirr; /* 0x380 interrupt request register */ - uint64_t intisr; /* 0x3a0 interrupt service register */ - /* - * 0x3e0 interrupt level polarity selection - * register 0 for high level trigger - */ - uint64_t int_polarity; +struct LoongarchPICState { + LoongArchPICCommonState parent_obj; + int dev_fd; +}; - uint8_t route_entry[64]; /*0x100 - 0x138*/ - uint8_t htmsi_vector[64]; /*0x200 - 0x238*/ +struct LoongarchPICClass { + LoongArchPICCommonClass parent_class; - MemoryRegion iomem32_low; - MemoryRegion iomem32_high; - MemoryRegion iomem8; - unsigned int irq_num; + DeviceRealize parent_realize; + ResettablePhases parent_phases; }; + +void kvm_pic_realize(DeviceState *dev, Error **errp); +int kvm_pic_get(void *opaque); +int kvm_pic_put(void *opaque, int version_id); + +#endif /* HW_LOONGARCH_PCH_PIC_H */ diff --git a/include/hw/intc/loongarch_pic_common.h b/include/hw/intc/loongarch_pic_common.h new file mode 100644 index 00000000000..f774c975d40 --- /dev/null +++ b/include/hw/intc/loongarch_pic_common.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch 7A1000 I/O interrupt controller definitions + * Copyright (c) 2024 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGARCH_PIC_COMMON_H +#define HW_LOONGARCH_PIC_COMMON_H + +#include "hw/pci-host/ls7a.h" +#include "hw/sysbus.h" + +#define PCH_PIC_INT_ID 0x00 +#define PCH_PIC_INT_ID_VAL 0x7 +#define PCH_PIC_INT_ID_VER 0x1 +#define PCH_PIC_INT_MASK 0x20 +#define PCH_PIC_HTMSI_EN 0x40 +#define PCH_PIC_INT_EDGE 0x60 +#define PCH_PIC_INT_CLEAR 0x80 +#define PCH_PIC_AUTO_CTRL0 0xc0 +#define PCH_PIC_AUTO_CTRL1 0xe0 +#define PCH_PIC_ROUTE_ENTRY 0x100 +#define PCH_PIC_ROUTE_ENTRY_END 0x13f +#define PCH_PIC_HTMSI_VEC 0x200 +#define PCH_PIC_HTMSI_VEC_END 0x23f +#define PCH_PIC_INT_REQUEST 0x380 +#define PCH_PIC_INT_STATUS 0x3a0 +#define PCH_PIC_INT_POL 0x3e0 + +#define TYPE_LOONGARCH_PIC_COMMON "loongarch_pic_common" +OBJECT_DECLARE_TYPE(LoongArchPICCommonState, + LoongArchPICCommonClass, LOONGARCH_PIC_COMMON) + +union LoongArchPIC_ID { + struct { + uint8_t _reserved_0[3]; + uint8_t id; + uint8_t version; + uint8_t _reserved_1; + uint8_t irq_num; + uint8_t _reserved_2; + } QEMU_PACKED desc; + uint64_t data; +}; + +struct LoongArchPICCommonState { + SysBusDevice parent_obj; + + qemu_irq parent_irq[64]; + union LoongArchPIC_ID id; /* 0x00 interrupt ID register */ + uint64_t int_mask; /* 0x020 interrupt mask register */ + uint64_t htmsi_en; /* 0x040 1=msi */ + uint64_t intedge; /* 0x060 edge=1 level=0 */ + uint64_t intclr; /* 0x080 clean edge int, set 1 clean, 0 noused */ + uint64_t auto_crtl0; /* 0x0c0 */ + uint64_t auto_crtl1; /* 0x0e0 */ + uint64_t last_intirr; /* edge detection */ + uint64_t intirr; /* 0x380 interrupt request register */ + uint64_t intisr; /* 0x3a0 interrupt service register */ + /* + * 0x3e0 interrupt level polarity selection + * register 0 for high level trigger + */ + uint64_t int_polarity; + + uint8_t route_entry[64]; /* 0x100 - 0x138 */ + uint8_t htmsi_vector[64]; /* 0x200 - 0x238 */ + + MemoryRegion iomem; + unsigned int irq_num; +}; + +struct LoongArchPICCommonClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; + ResettablePhases parent_phases; + int (*pre_save)(LoongArchPICCommonState *s); + int (*post_load)(LoongArchPICCommonState *s, int version_id); +}; +#endif /* HW_LOONGARCH_PIC_COMMON_H */ diff --git a/include/hw/intc/loongson_ipi.h b/include/hw/intc/loongson_ipi.h index 3f795edbf3c..4e517cc8dc4 100644 --- a/include/hw/intc/loongson_ipi.h +++ b/include/hw/intc/loongson_ipi.h @@ -8,49 +8,24 @@ #ifndef HW_LOONGSON_IPI_H #define HW_LOONGSON_IPI_H +#include "qom/object.h" +#include "hw/intc/loongson_ipi_common.h" #include "hw/sysbus.h" -/* Mainy used by iocsr read and write */ -#define SMP_IPI_MAILBOX 0x1000ULL -#define CORE_STATUS_OFF 0x0 -#define CORE_EN_OFF 0x4 -#define CORE_SET_OFF 0x8 -#define CORE_CLEAR_OFF 0xc -#define CORE_BUF_20 0x20 -#define CORE_BUF_28 0x28 -#define CORE_BUF_30 0x30 -#define CORE_BUF_38 0x38 -#define IOCSR_IPI_SEND 0x40 -#define IOCSR_MAIL_SEND 0x48 -#define IOCSR_ANY_SEND 0x158 - -#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND) -#define MAIL_SEND_OFFSET 0 -#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) - -#define IPI_MBX_NUM 4 - #define TYPE_LOONGSON_IPI "loongson_ipi" -OBJECT_DECLARE_SIMPLE_TYPE(LoongsonIPI, LOONGSON_IPI) +OBJECT_DECLARE_TYPE(LoongsonIPIState, LoongsonIPIClass, LOONGSON_IPI) + +struct LoongsonIPIClass { + LoongsonIPICommonClass parent_class; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; +}; + +struct LoongsonIPIState { + LoongsonIPICommonState parent_obj; -typedef struct IPICore { - LoongsonIPI *ipi; MemoryRegion *ipi_mmio_mem; - uint32_t status; - uint32_t en; - uint32_t set; - uint32_t clear; - /* 64bit buf divide into 2 32bit buf */ - uint32_t buf[IPI_MBX_NUM * 2]; - qemu_irq irq; -} IPICore; - -struct LoongsonIPI { - SysBusDevice parent_obj; - MemoryRegion ipi_iocsr_mem; - MemoryRegion ipi64_iocsr_mem; - uint32_t num_cpu; - IPICore *cpu; }; #endif diff --git a/include/hw/intc/loongson_ipi_common.h b/include/hw/intc/loongson_ipi_common.h new file mode 100644 index 00000000000..e58ce2aa1c4 --- /dev/null +++ b/include/hw/intc/loongson_ipi_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Loongson ipi interrupt header files + * + * Copyright (C) 2021 Loongson Technology Corporation Limited + */ + +#ifndef HW_LOONGSON_IPI_COMMON_H +#define HW_LOONGSON_IPI_COMMON_H + +#include "qom/object.h" +#include "hw/sysbus.h" +#include "exec/memattrs.h" + +#define IPI_MBX_NUM 4 + +#define TYPE_LOONGSON_IPI_COMMON "loongson_ipi_common" +OBJECT_DECLARE_TYPE(LoongsonIPICommonState, + LoongsonIPICommonClass, LOONGSON_IPI_COMMON) + +typedef struct IPICore { + LoongsonIPICommonState *ipi; + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + /* 64bit buf divide into 2 32-bit buf */ + uint32_t buf[IPI_MBX_NUM * 2]; + qemu_irq irq; + uint64_t arch_id; + CPUState *cpu; +} IPICore; + +struct LoongsonIPICommonState { + SysBusDevice parent_obj; + + MemoryRegion ipi_iocsr_mem; + MemoryRegion ipi64_iocsr_mem; + uint32_t num_cpu; + IPICore *cpu; +}; + +struct LoongsonIPICommonClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; + AddressSpace *(*get_iocsr_as)(CPUState *cpu); + int (*cpu_by_arch_id)(LoongsonIPICommonState *lics, int64_t id, + int *index, CPUState **pcs); + int (*pre_save)(void *opaque); + int (*post_load)(void *opaque, int version_id); +}; + +MemTxResult loongson_ipi_core_readl(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs); +MemTxResult loongson_ipi_core_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs); + +/* Mainy used by iocsr read and write */ +#define SMP_IPI_MAILBOX 0x1000ULL + +#define CORE_STATUS_OFF 0x0 +#define CORE_EN_OFF 0x4 +#define CORE_SET_OFF 0x8 +#define CORE_CLEAR_OFF 0xc +#define CORE_BUF_20 0x20 +#define CORE_BUF_28 0x28 +#define CORE_BUF_30 0x30 +#define CORE_BUF_38 0x38 +#define IOCSR_IPI_SEND 0x40 +#define IOCSR_MAIL_SEND 0x48 +#define IOCSR_ANY_SEND 0x158 + +#define MAIL_SEND_ADDR (SMP_IPI_MAILBOX + IOCSR_MAIL_SEND) +#define MAIL_SEND_OFFSET 0 +#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) + +#endif diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h index de8532fbc3a..489b9133c26 100644 --- a/include/hw/intc/riscv_aplic.h +++ b/include/hw/intc/riscv_aplic.h @@ -68,9 +68,17 @@ struct RISCVAPLICState { uint32_t num_irqs; bool msimode; bool mmode; + + /* To support KVM aia=aplic-imsic with irqchip split mode */ + bool kvm_splitmode; + uint32_t kvm_msicfgaddr; + uint32_t kvm_msicfgaddrH; }; void riscv_aplic_add_child(DeviceState *parent, DeviceState *child); +bool riscv_is_kvm_aia_aplic_imsic(bool msimode); +bool riscv_use_emulated_aplic(bool msimode); +void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr); DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources, diff --git a/include/hw/iommu.h b/include/hw/iommu.h new file mode 100644 index 00000000000..52e7f0cd963 --- /dev/null +++ b/include/hw/iommu.h @@ -0,0 +1,20 @@ +/* + * General vIOMMU flags + * + * Copyright (C) 2025 Intel Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_IOMMU_H +#define HW_IOMMU_H + +#include "qemu/bitops.h" + +enum { + /* Nesting parent HWPT will be reused by vIOMMU to create nested HWPT */ + VIOMMU_FLAG_WANT_NESTING_PARENT = BIT_ULL(0), + VIOMMU_FLAG_PASID_SUPPORTED = BIT_ULL(1), +}; + +#endif /* HW_IOMMU_H */ diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h index cbcdda509d3..00f397fd020 100644 --- a/include/hw/ipack/ipack.h +++ b/include/hw/ipack/ipack.h @@ -12,6 +12,7 @@ #define QEMU_IPACK_H #include "hw/qdev-core.h" +#include "hw/irq.h" #include "qom/object.h" @@ -19,10 +20,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(IPackBus, IPACK_BUS) struct IPackBus { - /*< private >*/ BusState parent_obj; - /* All fields are private */ uint8_t n_slots; uint8_t free_slot; qemu_irq_handler set_irq; @@ -58,13 +57,11 @@ struct IPackDeviceClass { }; struct IPackDevice { - /*< private >*/ DeviceState parent_obj; - /*< public >*/ int32_t slot; /* IRQ objects for the IndustryPack INT0# and INT1# */ - qemu_irq *irq; + IRQState irq[2]; }; extern const VMStateDescription vmstate_ipack_device; diff --git a/include/hw/ipmi/ipmi.h b/include/hw/ipmi/ipmi.h index 77a7213ed93..cd581aa1342 100644 --- a/include/hw/ipmi/ipmi.h +++ b/include/hw/ipmi/ipmi.h @@ -25,7 +25,7 @@ #ifndef HW_IPMI_H #define HW_IPMI_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "qom/object.h" @@ -41,6 +41,15 @@ enum ipmi_op { IPMI_SEND_NMI }; +/* Channel properties */ +#define IPMI_CHANNEL_IPMB 0x00 +#define IPMI_CHANNEL_SYSTEM 0x0f +#define IPMI_CHANNEL_MEDIUM_IPMB 0x01 +#define IPMI_CHANNEL_MEDIUM_SYSTEM 0x0c +#define IPMI_CHANNEL_PROTOCOL_IPMB 0x01 +#define IPMI_CHANNEL_PROTOCOL_KCS 0x05 +#define IPMI_CHANNEL_PROTOCOL_BT_15 0x08 + #define IPMI_CC_INVALID_CMD 0xc1 #define IPMI_CC_COMMAND_INVALID_FOR_LUN 0xc2 #define IPMI_CC_TIMEOUT 0xc3 @@ -76,6 +85,7 @@ typedef struct IPMIFwInfo { int interface_type; uint8_t ipmi_spec_major_revision; uint8_t ipmi_spec_minor_revision; + uint8_t ipmi_channel_protocol; uint8_t i2c_slave_address; uint32_t uuid; @@ -90,6 +100,11 @@ typedef struct IPMIFwInfo { } memspace; int interrupt_number; + enum { + IPMI_NO_IRQ = 0, + IPMI_ISA_IRQ, + IPMI_PCI_IRQ, + } irq_source; enum { IPMI_LEVEL_IRQ, IPMI_EDGE_IRQ diff --git a/include/hw/irq.h b/include/hw/irq.h index 645b73d2512..b3012237acd 100644 --- a/include/hw/irq.h +++ b/include/hw/irq.h @@ -1,9 +1,20 @@ #ifndef QEMU_IRQ_H #define QEMU_IRQ_H +#include "qom/object.h" + /* Generic IRQ/GPIO pin infrastructure. */ #define TYPE_IRQ "irq" +OBJECT_DECLARE_SIMPLE_TYPE(IRQState, IRQ) + +struct IRQState { + Object parent_obj; + + qemu_irq_handler handler; + void *opaque; + int n; +}; void qemu_set_irq(qemu_irq irq, int level); @@ -23,6 +34,24 @@ static inline void qemu_irq_pulse(qemu_irq irq) qemu_set_irq(irq, 0); } +/* + * Init a single IRQ. The irq is assigned with a handler, an opaque data + * and the interrupt number. + */ +void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque, + int n); + +/** + * qemu_init_irqs: Initialize an array of IRQs. + * + * @irq: Array of IRQs to initialize + * @count: number of IRQs to initialize + * @handler: handler to assign to each IRQ + * @opaque: opaque data to pass to @handler + */ +void qemu_init_irqs(IRQState irq[], size_t count, + qemu_irq_handler handler, void *opaque); + /* Returns an array of N IRQs. Each IRQ is assigned the argument handler and * opaque data. */ diff --git a/include/hw/isa/apm.h b/include/hw/isa/apm.h index b6e070c00e9..0834539045a 100644 --- a/include/hw/isa/apm.h +++ b/include/hw/isa/apm.h @@ -1,7 +1,7 @@ #ifndef APM_H #define APM_H -#include "exec/memory.h" +#include "system/memory.h" #define APM_CNT_IOPORT 0xb2 #define ACPI_PORT_SMI_CMD APM_CNT_IOPORT diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h index 40d6224a4e6..a82c5f1004f 100644 --- a/include/hw/isa/isa.h +++ b/include/hw/isa/isa.h @@ -3,8 +3,8 @@ /* ISA bus */ -#include "exec/memory.h" -#include "exec/ioport.h" +#include "system/memory.h" +#include "system/ioport.h" #include "hw/qdev-core.h" #include "qom/object.h" diff --git a/include/hw/isa/superio.h b/include/hw/isa/superio.h index 0dc45104d47..14d051348b4 100644 --- a/include/hw/isa/superio.h +++ b/include/hw/isa/superio.h @@ -10,7 +10,7 @@ #ifndef HW_ISA_SUPERIO_H #define HW_ISA_SUPERIO_H -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/isa/isa.h" #include "qom/object.h" diff --git a/include/hw/loader-fit.h b/include/hw/loader-fit.h index 0832e379dc9..9a43490ed63 100644 --- a/include/hw/loader-fit.h +++ b/include/hw/loader-fit.h @@ -30,12 +30,27 @@ struct fit_loader_match { struct fit_loader { const struct fit_loader_match *matches; hwaddr (*addr_to_phys)(void *opaque, uint64_t addr); - const void *(*fdt_filter)(void *opaque, const void *fdt, - const void *match_data, hwaddr *load_addr); + void *(*fdt_filter)(void *opaque, const void *fdt, + const void *match_data, hwaddr *load_addr); const void *(*kernel_filter)(void *opaque, const void *kernel, hwaddr *load_addr, hwaddr *entry_addr); }; -int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque); +/** + * load_fit: load a FIT format image + * @ldr: structure defining board specific properties and hooks + * @filename: image to load + * @pfdt: pointer to update with address of FDT blob + * @opaque: opaque value passed back to the hook functions in @ldr + * Returns: 0 on success, or a negative errno on failure + * + * @pfdt is used to tell the caller about the FDT blob. On return, it + * has been set to point to the FDT blob, and it is now the caller's + * responsibility to free that memory with g_free(). Usually the caller + * will want to pass in &machine->fdt here, to record the FDT blob for + * the dumpdtb option and QMP/HMP commands. + */ +int load_fit(const struct fit_loader *ldr, const char *filename, void **pfdt, + void *opaque); #endif /* HW_LOADER_FIT_H */ diff --git a/include/hw/loader.h b/include/hw/loader.h index 7f6d06b956f..c96b5e141c6 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -101,7 +101,7 @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz, * Returns the size of the decompressed payload if decompression was performed * successfully. */ -ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size); +ssize_t unpack_efi_zboot_image(uint8_t **buffer, ssize_t *size); #define ELF_LOAD_FAILED -1 #define ELF_LOAD_NOT_ELF -2 @@ -120,7 +120,7 @@ const char *load_elf_strerror(ssize_t error); * @lowaddr: Populated with lowest loaded address. Ignored if NULL. * @highaddr: Populated with highest loaded address. Ignored if NULL. * @pflags: Populated with ELF processor-specific flags. Ignore if NULL. - * @bigendian: Expected ELF endianness. 0 for LE otherwise BE + * @elf_data_order: Expected ELF endianness (ELFDATA2LSB or ELFDATA2MSB). * @elf_machine: Expected ELF machine type * @clear_lsb: Set to mask off LSB of addresses (Some architectures use * this for non-address data) @@ -151,30 +151,18 @@ ssize_t load_elf_ram_sym(const char *filename, uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, uint64_t *highaddr, - uint32_t *pflags, int big_endian, int elf_machine, + uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab, AddressSpace *as, bool load_rom, symbol_fn_t sym_cb); -/** load_elf_ram: - * Same as load_elf_ram_sym(), but doesn't allow the caller to specify a - * symbol callback function - */ -ssize_t load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, int clear_lsb, - int data_swab, AddressSpace *as, bool load_rom); - /** load_elf_as: - * Same as load_elf_ram(), but always loads the elf as ROM + * Same as load_elf_ram_sym(), but always loads the elf as ROM */ ssize_t load_elf_as(const char *filename, uint64_t (*elf_note_fn)(void *, void *, bool), uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, + uint64_t *highaddr, uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab, AddressSpace *as); @@ -186,7 +174,7 @@ ssize_t load_elf(const char *filename, uint64_t (*elf_note_fn)(void *, void *, bool), uint64_t (*translate_fn)(void *, uint64_t), void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, + uint64_t *highaddr, uint32_t *pflags, int elf_data_order, int elf_machine, int clear_lsb, int data_swab); /** load_elf_hdr: @@ -202,7 +190,7 @@ ssize_t load_elf(const char *filename, void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp); ssize_t load_aout(const char *filename, hwaddr addr, int max_sz, - int bswap_needed, hwaddr target_page_size); + bool big_endian, hwaddr target_page_size); #define LOAD_UIMAGE_LOADADDR_INVALID (-1) @@ -282,8 +270,6 @@ int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data, AddressSpace *as); int rom_check_and_register_reset(void); void rom_set_fw(FWCfgState *f); -void rom_set_order_override(int order); -void rom_reset_order_override(void); /** * rom_transaction_begin: diff --git a/include/hw/loongarch/boot.h b/include/hw/loongarch/boot.h index b3b870df1f0..9819f7fbe30 100644 --- a/include/hw/loongarch/boot.h +++ b/include/hw/loongarch/boot.h @@ -102,11 +102,10 @@ struct loongarch_boot_info { const char *kernel_cmdline; const char *initrd_filename; uint64_t a0, a1, a2; + uint64_t initrd_addr; + uint64_t initrd_size; }; -extern struct memmap_entry *memmap_table; -extern unsigned memmap_entries; - struct memmap_entry { uint64_t address; uint64_t length; diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 603c1cebdb2..602feab0f0d 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -8,10 +8,8 @@ #ifndef HW_LOONGARCH_H #define HW_LOONGARCH_H -#include "target/loongarch/cpu.h" #include "hw/boards.h" #include "qemu/queue.h" -#include "hw/intc/loongson_ipi.h" #include "hw/block/flash.h" #include "hw/loongarch/boot.h" @@ -32,6 +30,7 @@ #define VIRT_GED_EVT_ADDR 0x100e0000 #define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) #define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) +#define VIRT_GED_CPUHP_ADDR (VIRT_GED_REG_ADDR + ACPI_GED_REG_COUNT) #define COMMAND_LINE_SIZE 512 @@ -62,9 +61,23 @@ struct LoongArchVirtMachineState { MemoryRegion iocsr_mem; AddressSpace as_iocsr; struct loongarch_boot_info bootinfo; + DeviceState *ipi; + DeviceState *extioi; + struct memmap_entry *memmap_table; + unsigned int memmap_entries; }; #define TYPE_LOONGARCH_VIRT_MACHINE MACHINE_TYPE_NAME("virt") OBJECT_DECLARE_SIMPLE_TYPE(LoongArchVirtMachineState, LOONGARCH_VIRT_MACHINE) -void loongarch_acpi_setup(LoongArchVirtMachineState *lvms); +void virt_acpi_setup(LoongArchVirtMachineState *lvms); +void virt_fdt_setup(LoongArchVirtMachineState *lvms); + +static inline bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms) +{ + if (lvms->veiointc == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + #endif diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h index 34365c98608..9caaed9692b 100644 --- a/include/hw/m68k/q800.h +++ b/include/hw/m68k/q800.h @@ -26,7 +26,7 @@ #include "hw/boards.h" #include "qom/object.h" #include "target/m68k/cpu-qom.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/m68k/q800-glue.h" #include "hw/misc/mac_via.h" #include "hw/net/dp8393x.h" diff --git a/include/hw/mem/npcm7xx_mc.h b/include/hw/mem/npcm7xx_mc.h index 7ed38be2431..568cc35fdd7 100644 --- a/include/hw/mem/npcm7xx_mc.h +++ b/include/hw/mem/npcm7xx_mc.h @@ -16,7 +16,7 @@ #ifndef NPCM7XX_MC_H #define NPCM7XX_MC_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" /** diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h index fe0f3ea963b..e0dbdd43dca 100644 --- a/include/hw/mem/pc-dimm.h +++ b/include/hw/mem/pc-dimm.h @@ -16,7 +16,7 @@ #ifndef QEMU_PC_DIMM_H #define QEMU_PC_DIMM_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "qom/object.h" diff --git a/include/hw/mips/cps.h b/include/hw/mips/cps.h index 04d636246ab..05ef9f76b74 100644 --- a/include/hw/mips/cps.h +++ b/include/hw/mips/cps.h @@ -38,6 +38,7 @@ struct MIPSCPSState { uint32_t num_vp; uint32_t num_irq; char *cpu_type; + bool cpu_is_bigendian; MemoryRegion container; MIPSGCRState gcr; diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h index 101799f7d3e..1f3672ba5f5 100644 --- a/include/hw/mips/mips.h +++ b/include/hw/mips/mips.h @@ -7,7 +7,7 @@ /* Kernels can be configured with 64KB pages */ #define INITRD_PAGE_SIZE (64 * KiB) -#include "exec/memory.h" +#include "system/memory.h" /* bonito.c */ PCIBus *bonito_init(qemu_irq *pic); diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h index ecb1b67de81..d5d07c6c021 100644 --- a/include/hw/misc/aspeed_hace.h +++ b/include/hw/misc/aspeed_hace.h @@ -1,6 +1,7 @@ /* * ASPEED Hash and Crypto Engine * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (C) 2021 IBM Corp. * * SPDX-License-Identifier: GPL-2.0-or-later @@ -10,16 +11,17 @@ #define ASPEED_HACE_H #include "hw/sysbus.h" +#include "crypto/hash.h" #define TYPE_ASPEED_HACE "aspeed.hace" #define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400" #define TYPE_ASPEED_AST2500_HACE TYPE_ASPEED_HACE "-ast2500" #define TYPE_ASPEED_AST2600_HACE TYPE_ASPEED_HACE "-ast2600" #define TYPE_ASPEED_AST1030_HACE TYPE_ASPEED_HACE "-ast1030" +#define TYPE_ASPEED_AST2700_HACE TYPE_ASPEED_HACE "-ast2700" OBJECT_DECLARE_TYPE(AspeedHACEState, AspeedHACEClass, ASPEED_HACE) -#define ASPEED_HACE_NR_REGS (0x64 >> 2) #define ASPEED_HACE_MAX_SG 256 /* max number of entries */ struct AspeedHACEState { @@ -28,23 +30,30 @@ struct AspeedHACEState { MemoryRegion iomem; qemu_irq irq; - struct iovec iov_cache[ASPEED_HACE_MAX_SG]; - uint32_t regs[ASPEED_HACE_NR_REGS]; + uint32_t *regs; uint32_t total_req_len; - uint32_t iov_count; MemoryRegion *dram_mr; AddressSpace dram_as; + + QCryptoHash *hash_ctx; }; struct AspeedHACEClass { SysBusDeviceClass parent_class; + const MemoryRegionOps *reg_ops; uint32_t src_mask; uint32_t dest_mask; uint32_t key_mask; uint32_t hash_mask; + uint64_t nr_regs; + bool raise_crypt_interrupt_workaround; + uint32_t src_hi_mask; + uint32_t dest_hi_mask; + uint32_t key_hi_mask; + bool has_dma64; }; #endif /* ASPEED_HACE_H */ diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h index 356be95e458..684b48b7222 100644 --- a/include/hw/misc/aspeed_scu.h +++ b/include/hw/misc/aspeed_scu.h @@ -54,6 +54,8 @@ struct AspeedSCUState { #define AST2700_A0_SILICON_REV 0x06000103U #define AST2720_A0_SILICON_REV 0x06000203U #define AST2750_A0_SILICON_REV 0x06000003U +#define AST2700_A1_SILICON_REV 0x06010103U +#define AST2750_A1_SILICON_REV 0x06010003U #define ASPEED_IS_AST2500(si_rev) ((((si_rev) >> 24) & 0xff) == 0x04) diff --git a/include/hw/misc/auxbus.h b/include/hw/misc/auxbus.h index 03cacdee426..ccd18ce2095 100644 --- a/include/hw/misc/auxbus.h +++ b/include/hw/misc/auxbus.h @@ -25,7 +25,7 @@ #ifndef HW_MISC_AUXBUS_H #define HW_MISC_AUXBUS_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "qom/object.h" diff --git a/include/hw/misc/cbus.h b/include/hw/misc/cbus.h deleted file mode 100644 index 5334984020d..00000000000 --- a/include/hw/misc/cbus.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma / - * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms. - * Based on reverse-engineering of a linux driver. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef HW_MISC_CBUS_H -#define HW_MISC_CBUS_H - - -typedef struct { - qemu_irq clk; - qemu_irq dat; - qemu_irq sel; -} CBus; - -CBus *cbus_init(qemu_irq dat_out); -void cbus_attach(CBus *bus, void *slave_opaque); - -void *retu_init(qemu_irq irq, int vilma); -void *tahvo_init(qemu_irq irq, int betty); - -void retu_key_event(void *retu, int state); - -#endif diff --git a/include/hw/misc/imx8mp_analog.h b/include/hw/misc/imx8mp_analog.h new file mode 100644 index 00000000000..955f03215a0 --- /dev/null +++ b/include/hw/misc/imx8mp_analog.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2025 Bernhard Beschow + * + * i.MX8MP ANALOG IP block emulation code + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef IMX8MP_ANALOG_H +#define IMX8MP_ANALOG_H + +#include "qom/object.h" +#include "hw/sysbus.h" + +enum IMX8MPAnalogRegisters { + ANALOG_AUDIO_PLL1_GEN_CTRL = 0x000 / 4, + ANALOG_AUDIO_PLL1_FDIV_CTL0 = 0x004 / 4, + ANALOG_AUDIO_PLL1_FDIV_CTL1 = 0x008 / 4, + ANALOG_AUDIO_PLL1_SSCG_CTRL = 0x00c / 4, + ANALOG_AUDIO_PLL1_MNIT_CTRL = 0x010 / 4, + ANALOG_AUDIO_PLL2_GEN_CTRL = 0x014 / 4, + ANALOG_AUDIO_PLL2_FDIV_CTL0 = 0x018 / 4, + ANALOG_AUDIO_PLL2_FDIV_CTL1 = 0x01c / 4, + ANALOG_AUDIO_PLL2_SSCG_CTRL = 0x020 / 4, + ANALOG_AUDIO_PLL2_MNIT_CTRL = 0x024 / 4, + ANALOG_VIDEO_PLL1_GEN_CTRL = 0x028 / 4, + ANALOG_VIDEO_PLL1_FDIV_CTL0 = 0x02c / 4, + ANALOG_VIDEO_PLL1_FDIV_CTL1 = 0x030 / 4, + ANALOG_VIDEO_PLL1_SSCG_CTRL = 0x034 / 4, + ANALOG_VIDEO_PLL1_MNIT_CTRL = 0x038 / 4, + ANALOG_DRAM_PLL_GEN_CTRL = 0x050 / 4, + ANALOG_DRAM_PLL_FDIV_CTL0 = 0x054 / 4, + ANALOG_DRAM_PLL_FDIV_CTL1 = 0x058 / 4, + ANALOG_DRAM_PLL_SSCG_CTRL = 0x05c / 4, + ANALOG_DRAM_PLL_MNIT_CTRL = 0x060 / 4, + ANALOG_GPU_PLL_GEN_CTRL = 0x064 / 4, + ANALOG_GPU_PLL_FDIV_CTL0 = 0x068 / 4, + ANALOG_GPU_PLL_LOCKD_CTRL = 0x06c / 4, + ANALOG_GPU_PLL_MNIT_CTRL = 0x070 / 4, + ANALOG_VPU_PLL_GEN_CTRL = 0x074 / 4, + ANALOG_VPU_PLL_FDIV_CTL0 = 0x078 / 4, + ANALOG_VPU_PLL_LOCKD_CTRL = 0x07c / 4, + ANALOG_VPU_PLL_MNIT_CTRL = 0x080 / 4, + ANALOG_ARM_PLL_GEN_CTRL = 0x084 / 4, + ANALOG_ARM_PLL_FDIV_CTL0 = 0x088 / 4, + ANALOG_ARM_PLL_LOCKD_CTRL = 0x08c / 4, + ANALOG_ARM_PLL_MNIT_CTRL = 0x090 / 4, + ANALOG_SYS_PLL1_GEN_CTRL = 0x094 / 4, + ANALOG_SYS_PLL1_FDIV_CTL0 = 0x098 / 4, + ANALOG_SYS_PLL1_LOCKD_CTRL = 0x09c / 4, + ANALOG_SYS_PLL1_MNIT_CTRL = 0x100 / 4, + ANALOG_SYS_PLL2_GEN_CTRL = 0x104 / 4, + ANALOG_SYS_PLL2_FDIV_CTL0 = 0x108 / 4, + ANALOG_SYS_PLL2_LOCKD_CTRL = 0x10c / 4, + ANALOG_SYS_PLL2_MNIT_CTRL = 0x110 / 4, + ANALOG_SYS_PLL3_GEN_CTRL = 0x114 / 4, + ANALOG_SYS_PLL3_FDIV_CTL0 = 0x118 / 4, + ANALOG_SYS_PLL3_LOCKD_CTRL = 0x11c / 4, + ANALOG_SYS_PLL3_MNIT_CTRL = 0x120 / 4, + ANALOG_OSC_MISC_CFG = 0x124 / 4, + ANALOG_ANAMIX_PLL_MNIT_CTL = 0x128 / 4, + + ANALOG_DIGPROG = 0x800 / 4, + ANALOG_MAX, +}; + +#define TYPE_IMX8MP_ANALOG "imx8mp.analog" +OBJECT_DECLARE_SIMPLE_TYPE(IMX8MPAnalogState, IMX8MP_ANALOG) + +struct IMX8MPAnalogState { + SysBusDevice parent_obj; + + struct { + MemoryRegion container; + MemoryRegion analog; + } mmio; + + uint32_t analog[ANALOG_MAX]; +}; + +#endif /* IMX8MP_ANALOG_H */ diff --git a/include/hw/misc/imx8mp_ccm.h b/include/hw/misc/imx8mp_ccm.h new file mode 100644 index 00000000000..685c8582ff8 --- /dev/null +++ b/include/hw/misc/imx8mp_ccm.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2025 Bernhard Beschow + * + * i.MX 8M Plus CCM IP block emulation code + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef IMX8MP_CCM_H +#define IMX8MP_CCM_H + +#include "hw/misc/imx_ccm.h" +#include "qom/object.h" + +enum IMX8MPCCMRegisters { + CCM_MAX = 0xc6fc / sizeof(uint32_t) + 1, +}; + +#define TYPE_IMX8MP_CCM "imx8mp.ccm" +OBJECT_DECLARE_SIMPLE_TYPE(IMX8MPCCMState, IMX8MP_CCM) + +struct IMX8MPCCMState { + IMXCCMState parent_obj; + + MemoryRegion iomem; + + uint32_t ccm[CCM_MAX]; +}; + +#endif /* IMX8MP_CCM_H */ diff --git a/include/hw/misc/ivshmem-flat.h b/include/hw/misc/ivshmem-flat.h new file mode 100644 index 00000000000..3eca99004ef --- /dev/null +++ b/include/hw/misc/ivshmem-flat.h @@ -0,0 +1,86 @@ +/* + * Inter-VM Shared Memory Flat Device + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2023 Linaro Ltd. + * Authors: + * Gustavo Romero + * + */ + +#ifndef IVSHMEM_FLAT_H +#define IVSHMEM_FLAT_H + +#include "qemu/queue.h" +#include "qemu/event_notifier.h" +#include "chardev/char-fe.h" +#include "system/memory.h" +#include "qom/object.h" +#include "hw/sysbus.h" + +#define IVSHMEM_MAX_VECTOR_NUM 64 + +/* + * QEMU interface: + * + QOM property "chardev" is the character device id of the ivshmem server + * socket + * + QOM property "shmem-size" sets the size of the RAM region shared between + * the device and the ivshmem server + * + sysbus MMIO region 0: device I/O mapped registers + * + sysbus MMIO region 1: shared memory with ivshmem server + * + sysbus IRQ 0: single output interrupt + */ + +#define TYPE_IVSHMEM_FLAT "ivshmem-flat" +typedef struct IvshmemFTState IvshmemFTState; + +DECLARE_INSTANCE_CHECKER(IvshmemFTState, IVSHMEM_FLAT, TYPE_IVSHMEM_FLAT) + +/* Ivshmem registers. See docs/specs/ivshmem-spec.rst for details. */ +enum ivshmem_registers { + INTMASK = 0, + INTSTATUS = 4, + IVPOSITION = 8, + DOORBELL = 12, +}; + +typedef struct VectorInfo { + EventNotifier event_notifier; + uint16_t id; +} VectorInfo; + +typedef struct IvshmemPeer { + QTAILQ_ENTRY(IvshmemPeer) next; + VectorInfo vector[IVSHMEM_MAX_VECTOR_NUM]; + int vector_counter; + uint16_t id; +} IvshmemPeer; + +struct IvshmemFTState { + SysBusDevice parent_obj; + + uint64_t msg_buf; + int msg_buffered_bytes; + + QTAILQ_HEAD(, IvshmemPeer) peer; + IvshmemPeer own; + + CharBackend server_chr; + + /* IRQ */ + qemu_irq irq; + + /* I/O registers */ + MemoryRegion iomem; + uint32_t intmask; + uint32_t intstatus; + uint32_t ivposition; + uint32_t doorbell; + + /* Shared memory */ + MemoryRegion shmem; + int shmem_fd; + uint32_t shmem_size; +}; + +#endif /* IVSHMEM_FLAT_H */ diff --git a/include/hw/misc/lasi.h b/include/hw/misc/lasi.h index f01c0f680a4..0bdfb11b501 100644 --- a/include/hw/misc/lasi.h +++ b/include/hw/misc/lasi.h @@ -12,7 +12,7 @@ #ifndef LASI_H #define LASI_H -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/pci/pci_host.h" #include "hw/boards.h" diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h index 63cdcf7c69c..6a152281505 100644 --- a/include/hw/misc/mac_via.h +++ b/include/hw/misc/mac_via.h @@ -9,7 +9,7 @@ #ifndef HW_MISC_MAC_VIA_H #define HW_MISC_MAC_VIA_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" #include "hw/misc/mos6522.h" #include "hw/input/adb.h" diff --git a/include/hw/misc/max78000_aes.h b/include/hw/misc/max78000_aes.h new file mode 100644 index 00000000000..407c45ef61e --- /dev/null +++ b/include/hw/misc/max78000_aes.h @@ -0,0 +1,68 @@ +/* + * MAX78000 AES + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_MAX78000_AES_H +#define HW_MAX78000_AES_H + +#include "hw/sysbus.h" +#include "crypto/aes.h" +#include "qom/object.h" + +#define TYPE_MAX78000_AES "max78000-aes" +OBJECT_DECLARE_SIMPLE_TYPE(Max78000AesState, MAX78000_AES) + +#define CTRL 0 +#define STATUS 4 +#define INTFL 8 +#define INTEN 0xc +#define FIFO 0x10 + +#define KEY_BASE 0x400 +#define KEY_END 0x420 + +/* CTRL */ +#define TYPE (1 << 9 | 1 << 8) +#define KEY_SIZE (1 << 7 | 1 << 6) +#define OUTPUT_FLUSH (1 << 5) +#define INPUT_FLUSH (1 << 4) +#define START (1 << 3) + +#define AES_EN (1 << 0) + +/* STATUS */ +#define OUTPUT_FULL (1 << 4) +#define OUTPUT_EMPTY (1 << 3) +#define INPUT_FULL (1 << 2) +#define INPUT_EMPTY (1 << 1) +#define BUSY (1 << 0) + +/* INTFL*/ +#define DONE (1 << 0) + +struct Max78000AesState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t ctrl; + uint32_t status; + uint32_t intfl; + uint32_t inten; + uint32_t data_index; + uint8_t data[16]; + + uint8_t key[32]; + AES_KEY internal_key; + + uint32_t result_index; + uint8_t result[16]; + + + qemu_irq irq; +}; + +#endif diff --git a/include/hw/misc/max78000_gcr.h b/include/hw/misc/max78000_gcr.h new file mode 100644 index 00000000000..d5858a40f3b --- /dev/null +++ b/include/hw/misc/max78000_gcr.h @@ -0,0 +1,131 @@ +/* + * MAX78000 Global Control Register + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_MAX78000_GCR_H +#define HW_MAX78000_GCR_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_MAX78000_GCR "max78000-gcr" +OBJECT_DECLARE_SIMPLE_TYPE(Max78000GcrState, MAX78000_GCR) + +#define SYSCTRL 0x0 +#define RST0 0x4 +#define CLKCTRL 0x8 +#define PM 0xc +#define PCLKDIV 0x18 +#define PCLKDIS0 0x24 +#define MEMCTRL 0x28 +#define MEMZ 0x2c +#define SYSST 0x40 +#define RST1 0x44 +#define PCKDIS1 0x48 +#define EVENTEN 0x4c +#define REVISION 0x50 +#define SYSIE 0x54 +#define ECCERR 0x64 +#define ECCED 0x68 +#define ECCIE 0x6c +#define ECCADDR 0x70 + +/* RST0 */ +#define SYSTEM_RESET (1 << 31) +#define PERIPHERAL_RESET (1 << 30) +#define SOFT_RESET (1 << 29) +#define UART2_RESET (1 << 28) + +#define ADC_RESET (1 << 26) +#define CNN_RESET (1 << 25) +#define TRNG_RESET (1 << 24) + +#define RTC_RESET (1 << 17) +#define I2C0_RESET (1 << 16) + +#define SPI1_RESET (1 << 13) +#define UART1_RESET (1 << 12) +#define UART0_RESET (1 << 11) + +#define TMR3_RESET (1 << 8) +#define TMR2_RESET (1 << 7) +#define TMR1_RESET (1 << 6) +#define TMR0_RESET (1 << 5) + +#define GPIO1_RESET (1 << 3) +#define GPIO0_RESET (1 << 2) +#define WDT0_RESET (1 << 1) +#define DMA_RESET (1 << 0) + +/* CLKCTRL */ +#define SYSCLK_RDY (1 << 13) + +/* MEMZ */ +#define ram0 (1 << 0) +#define ram1 (1 << 1) +#define ram2 (1 << 2) +#define ram3 (1 << 3) + +/* RST1 */ +#define CPU1_RESET (1 << 31) + +#define SIMO_RESET (1 << 25) +#define DVS_RESET (1 << 24) + +#define I2C2_RESET (1 << 20) +#define I2S_RESET (1 << 19) + +#define SMPHR_RESET (1 << 16) + +#define SPI0_RESET (1 << 11) +#define AES_RESET (1 << 10) +#define CRC_RESET (1 << 9) + +#define PT_RESET (1 << 1) +#define I2C1_RESET (1 << 0) + + +#define SYSRAM0_START 0x20000000 +#define SYSRAM1_START 0x20008000 +#define SYSRAM2_START 0x20010000 +#define SYSRAM3_START 0x2001C000 + +struct Max78000GcrState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t sysctrl; + uint32_t rst0; + uint32_t clkctrl; + uint32_t pm; + uint32_t pclkdiv; + uint32_t pclkdis0; + uint32_t memctrl; + uint32_t memz; + uint32_t sysst; + uint32_t rst1; + uint32_t pckdis1; + uint32_t eventen; + uint32_t revision; + uint32_t sysie; + uint32_t eccerr; + uint32_t ecced; + uint32_t eccie; + uint32_t eccaddr; + + MemoryRegion *sram; + AddressSpace sram_as; + + DeviceState *uart0; + DeviceState *uart1; + DeviceState *uart2; + DeviceState *trng; + DeviceState *aes; + +}; + +#endif diff --git a/include/hw/misc/max78000_icc.h b/include/hw/misc/max78000_icc.h new file mode 100644 index 00000000000..6fe2bb7a156 --- /dev/null +++ b/include/hw/misc/max78000_icc.h @@ -0,0 +1,33 @@ +/* + * MAX78000 Instruction Cache + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_MAX78000_ICC_H +#define HW_MAX78000_ICC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_MAX78000_ICC "max78000-icc" +OBJECT_DECLARE_SIMPLE_TYPE(Max78000IccState, MAX78000_ICC) + +#define ICC_INFO 0x0 +#define ICC_SZ 0x4 +#define ICC_CTRL 0x100 +#define ICC_INVALIDATE 0x700 + +struct Max78000IccState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t info; + uint32_t sz; + uint32_t ctrl; +}; + +#endif diff --git a/include/hw/misc/max78000_trng.h b/include/hw/misc/max78000_trng.h new file mode 100644 index 00000000000..c5a8129b6a0 --- /dev/null +++ b/include/hw/misc/max78000_trng.h @@ -0,0 +1,35 @@ +/* + * MAX78000 True Random Number Generator + * + * Copyright (c) 2025 Jackson Donaldson + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_MAX78000_TRNG_H +#define HW_MAX78000_TRNG_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_MAX78000_TRNG "max78000-trng" +OBJECT_DECLARE_SIMPLE_TYPE(Max78000TrngState, MAX78000_TRNG) + +#define CTRL 0 +#define STATUS 4 +#define DATA 8 + +#define RND_IE (1 << 1) + +struct Max78000TrngState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t ctrl; + uint32_t status; + uint32_t data; + + qemu_irq irq; +}; + +#endif diff --git a/include/hw/misc/mos6522.h b/include/hw/misc/mos6522.h index fba45668aba..920871a598b 100644 --- a/include/hw/misc/mos6522.h +++ b/include/hw/misc/mos6522.h @@ -154,7 +154,7 @@ struct MOS6522State { OBJECT_DECLARE_TYPE(MOS6522State, MOS6522DeviceClass, MOS6522) struct MOS6522DeviceClass { - DeviceClass parent_class; + SysBusDeviceClass parent_class; ResettablePhases parent_phases; void (*portB_write)(MOS6522State *dev); diff --git a/include/hw/misc/npcm7xx_clk.h b/include/hw/misc/npcm7xx_clk.h deleted file mode 100644 index 5ed4a4672b3..00000000000 --- a/include/hw/misc/npcm7xx_clk.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Nuvoton NPCM7xx Clock Control Registers. - * - * Copyright 2020 Google LLC - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - */ -#ifndef NPCM7XX_CLK_H -#define NPCM7XX_CLK_H - -#include "exec/memory.h" -#include "hw/clock.h" -#include "hw/sysbus.h" - -/* - * Number of registers in our device state structure. Don't change this without - * incrementing the version_id in the vmstate. - */ -#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t)) - -#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in" - -/* Maximum amount of clock inputs in a SEL module. */ -#define NPCM7XX_CLK_SEL_MAX_INPUT 5 - -/* PLLs in CLK module. */ -typedef enum NPCM7xxClockPLL { - NPCM7XX_CLOCK_PLL0, - NPCM7XX_CLOCK_PLL1, - NPCM7XX_CLOCK_PLL2, - NPCM7XX_CLOCK_PLLG, - NPCM7XX_CLOCK_NR_PLLS, -} NPCM7xxClockPLL; - -/* SEL/MUX in CLK module. */ -typedef enum NPCM7xxClockSEL { - NPCM7XX_CLOCK_PIXCKSEL, - NPCM7XX_CLOCK_MCCKSEL, - NPCM7XX_CLOCK_CPUCKSEL, - NPCM7XX_CLOCK_CLKOUTSEL, - NPCM7XX_CLOCK_UARTCKSEL, - NPCM7XX_CLOCK_TIMCKSEL, - NPCM7XX_CLOCK_SDCKSEL, - NPCM7XX_CLOCK_GFXMSEL, - NPCM7XX_CLOCK_SUCKSEL, - NPCM7XX_CLOCK_NR_SELS, -} NPCM7xxClockSEL; - -/* Dividers in CLK module. */ -typedef enum NPCM7xxClockDivider { - NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */ - NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */ - NPCM7XX_CLOCK_MC_DIVIDER, - NPCM7XX_CLOCK_AXI_DIVIDER, - NPCM7XX_CLOCK_AHB_DIVIDER, - NPCM7XX_CLOCK_AHB3_DIVIDER, - NPCM7XX_CLOCK_SPI0_DIVIDER, - NPCM7XX_CLOCK_SPIX_DIVIDER, - NPCM7XX_CLOCK_APB1_DIVIDER, - NPCM7XX_CLOCK_APB2_DIVIDER, - NPCM7XX_CLOCK_APB3_DIVIDER, - NPCM7XX_CLOCK_APB4_DIVIDER, - NPCM7XX_CLOCK_APB5_DIVIDER, - NPCM7XX_CLOCK_CLKOUT_DIVIDER, - NPCM7XX_CLOCK_UART_DIVIDER, - NPCM7XX_CLOCK_TIMER_DIVIDER, - NPCM7XX_CLOCK_ADC_DIVIDER, - NPCM7XX_CLOCK_MMC_DIVIDER, - NPCM7XX_CLOCK_SDHC_DIVIDER, - NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */ - NPCM7XX_CLOCK_UTMI_DIVIDER, - NPCM7XX_CLOCK_NR_DIVIDERS, -} NPCM7xxClockConverter; - -typedef struct NPCM7xxCLKState NPCM7xxCLKState; - -/** - * struct NPCM7xxClockPLLState - A PLL module in CLK module. - * @name: The name of the module. - * @clk: The CLK module that owns this module. - * @clock_in: The input clock of this module. - * @clock_out: The output clock of this module. - * @reg: The control registers for this PLL module. - */ -typedef struct NPCM7xxClockPLLState { - DeviceState parent; - - const char *name; - NPCM7xxCLKState *clk; - Clock *clock_in; - Clock *clock_out; - - int reg; -} NPCM7xxClockPLLState; - -/** - * struct NPCM7xxClockSELState - A SEL module in CLK module. - * @name: The name of the module. - * @clk: The CLK module that owns this module. - * @input_size: The size of inputs of this module. - * @clock_in: The input clocks of this module. - * @clock_out: The output clocks of this module. - * @offset: The offset of this module in the control register. - * @len: The length of this module in the control register. - */ -typedef struct NPCM7xxClockSELState { - DeviceState parent; - - const char *name; - NPCM7xxCLKState *clk; - uint8_t input_size; - Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT]; - Clock *clock_out; - - int offset; - int len; -} NPCM7xxClockSELState; - -/** - * struct NPCM7xxClockDividerState - A Divider module in CLK module. - * @name: The name of the module. - * @clk: The CLK module that owns this module. - * @clock_in: The input clock of this module. - * @clock_out: The output clock of this module. - * @divide: The function the divider uses to divide the input. - * @reg: The index of the control register that contains the divisor. - * @offset: The offset of the divisor in the control register. - * @len: The length of the divisor in the control register. - * @divisor: The divisor for a constant divisor - */ -typedef struct NPCM7xxClockDividerState { - DeviceState parent; - - const char *name; - NPCM7xxCLKState *clk; - Clock *clock_in; - Clock *clock_out; - - uint32_t (*divide)(struct NPCM7xxClockDividerState *s); - union { - struct { - int reg; - int offset; - int len; - }; - int divisor; - }; -} NPCM7xxClockDividerState; - -struct NPCM7xxCLKState { - SysBusDevice parent; - - MemoryRegion iomem; - - /* Clock converters */ - NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS]; - NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS]; - NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS]; - - uint32_t regs[NPCM7XX_CLK_NR_REGS]; - - /* Time reference for SECCNT and CNTR25M, initialized by power on reset */ - int64_t ref_ns; - - /* The incoming reference clock. */ - Clock *clkref; -}; - -#define TYPE_NPCM7XX_CLK "npcm7xx-clk" -OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxCLKState, NPCM7XX_CLK) - -#endif /* NPCM7XX_CLK_H */ diff --git a/include/hw/misc/npcm7xx_gcr.h b/include/hw/misc/npcm7xx_gcr.h deleted file mode 100644 index c0bbdda77e5..00000000000 --- a/include/hw/misc/npcm7xx_gcr.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Nuvoton NPCM7xx System Global Control Registers. - * - * Copyright 2020 Google LLC - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - */ -#ifndef NPCM7XX_GCR_H -#define NPCM7XX_GCR_H - -#include "exec/memory.h" -#include "hw/sysbus.h" - -/* - * NPCM7XX PWRON STRAP bit fields - * 12: SPI0 powered by VSBV3 at 1.8V - * 11: System flash attached to BMC - * 10: BSP alternative pins. - * 9:8: Flash UART command route enabled. - * 7: Security enabled. - * 6: HI-Z state control. - * 5: ECC disabled. - * 4: Reserved - * 3: JTAG2 enabled. - * 2:0: CPU and DRAM clock frequency. - */ -#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12) -#define NPCM7XX_PWRON_STRAP_SFAB BIT(11) -#define NPCM7XX_PWRON_STRAP_BSPA BIT(10) -#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8) -#define FUP_NORM_UART2 3 -#define FUP_PROG_UART3 2 -#define FUP_PROG_UART2 1 -#define FUP_NORM_UART3 0 -#define NPCM7XX_PWRON_STRAP_SECEN BIT(7) -#define NPCM7XX_PWRON_STRAP_HIZ BIT(6) -#define NPCM7XX_PWRON_STRAP_ECC BIT(5) -#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4) -#define NPCM7XX_PWRON_STRAP_J2EN BIT(3) -#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x) -#define CKFRQ_SKIPINIT 0x000 -#define CKFRQ_DEFAULT 0x111 - -/* - * Number of registers in our device state structure. Don't change this without - * incrementing the version_id in the vmstate. - */ -#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t)) - -struct NPCM7xxGCRState { - SysBusDevice parent; - - MemoryRegion iomem; - - uint32_t regs[NPCM7XX_GCR_NR_REGS]; - - uint32_t reset_pwron; - uint32_t reset_mdlr; - uint32_t reset_intcr3; -}; - -#define TYPE_NPCM7XX_GCR "npcm7xx-gcr" -OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxGCRState, NPCM7XX_GCR) - -#endif /* NPCM7XX_GCR_H */ diff --git a/include/hw/misc/npcm7xx_mft.h b/include/hw/misc/npcm7xx_mft.h index d6384382cea..e4b997a6ad3 100644 --- a/include/hw/misc/npcm7xx_mft.h +++ b/include/hw/misc/npcm7xx_mft.h @@ -16,7 +16,7 @@ #ifndef NPCM7XX_MFT_H #define NPCM7XX_MFT_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/clock.h" #include "hw/irq.h" #include "hw/sysbus.h" diff --git a/include/hw/misc/npcm_clk.h b/include/hw/misc/npcm_clk.h new file mode 100644 index 00000000000..52e972f4604 --- /dev/null +++ b/include/hw/misc/npcm_clk.h @@ -0,0 +1,195 @@ +/* + * Nuvoton NPCM7xx/8xx Clock Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef NPCM_CLK_H +#define NPCM_CLK_H + +#include "system/memory.h" +#include "hw/clock.h" +#include "hw/sysbus.h" + +#define NPCM7XX_CLK_NR_REGS (0x70 / sizeof(uint32_t)) +#define NPCM8XX_CLK_NR_REGS (0xc4 / sizeof(uint32_t)) +/* + * Number of maximum registers in NPCM device state structure. Don't change + * this without incrementing the version_id in the vmstate. + */ +#define NPCM_CLK_MAX_NR_REGS NPCM8XX_CLK_NR_REGS + +#define NPCM7XX_WATCHDOG_RESET_GPIO_IN "npcm7xx-clk-watchdog-reset-gpio-in" + +/* Maximum amount of clock inputs in a SEL module. */ +#define NPCM7XX_CLK_SEL_MAX_INPUT 5 + +/* PLLs in CLK module. */ +typedef enum NPCM7xxClockPLL { + NPCM7XX_CLOCK_PLL0, + NPCM7XX_CLOCK_PLL1, + NPCM7XX_CLOCK_PLL2, + NPCM7XX_CLOCK_PLLG, + NPCM7XX_CLOCK_NR_PLLS, +} NPCM7xxClockPLL; + +/* SEL/MUX in CLK module. */ +typedef enum NPCM7xxClockSEL { + NPCM7XX_CLOCK_PIXCKSEL, + NPCM7XX_CLOCK_MCCKSEL, + NPCM7XX_CLOCK_CPUCKSEL, + NPCM7XX_CLOCK_CLKOUTSEL, + NPCM7XX_CLOCK_UARTCKSEL, + NPCM7XX_CLOCK_TIMCKSEL, + NPCM7XX_CLOCK_SDCKSEL, + NPCM7XX_CLOCK_GFXMSEL, + NPCM7XX_CLOCK_SUCKSEL, + NPCM7XX_CLOCK_NR_SELS, +} NPCM7xxClockSEL; + +/* Dividers in CLK module. */ +typedef enum NPCM7xxClockDivider { + NPCM7XX_CLOCK_PLL1D2, /* PLL1/2 */ + NPCM7XX_CLOCK_PLL2D2, /* PLL2/2 */ + NPCM7XX_CLOCK_MC_DIVIDER, + NPCM7XX_CLOCK_AXI_DIVIDER, + NPCM7XX_CLOCK_AHB_DIVIDER, + NPCM7XX_CLOCK_AHB3_DIVIDER, + NPCM7XX_CLOCK_SPI0_DIVIDER, + NPCM7XX_CLOCK_SPIX_DIVIDER, + NPCM7XX_CLOCK_APB1_DIVIDER, + NPCM7XX_CLOCK_APB2_DIVIDER, + NPCM7XX_CLOCK_APB3_DIVIDER, + NPCM7XX_CLOCK_APB4_DIVIDER, + NPCM7XX_CLOCK_APB5_DIVIDER, + NPCM7XX_CLOCK_CLKOUT_DIVIDER, + NPCM7XX_CLOCK_UART_DIVIDER, + NPCM7XX_CLOCK_TIMER_DIVIDER, + NPCM7XX_CLOCK_ADC_DIVIDER, + NPCM7XX_CLOCK_MMC_DIVIDER, + NPCM7XX_CLOCK_SDHC_DIVIDER, + NPCM7XX_CLOCK_GFXM_DIVIDER, /* divide by 3 */ + NPCM7XX_CLOCK_UTMI_DIVIDER, + NPCM7XX_CLOCK_NR_DIVIDERS, +} NPCM7xxClockConverter; + +typedef struct NPCMCLKState NPCMCLKState; + +/** + * struct NPCM7xxClockPLLState - A PLL module in CLK module. + * @name: The name of the module. + * @clk: The CLK module that owns this module. + * @clock_in: The input clock of this module. + * @clock_out: The output clock of this module. + * @reg: The control registers for this PLL module. + */ +typedef struct NPCM7xxClockPLLState { + DeviceState parent; + + const char *name; + NPCMCLKState *clk; + Clock *clock_in; + Clock *clock_out; + + int reg; +} NPCM7xxClockPLLState; + +/** + * struct NPCM7xxClockSELState - A SEL module in CLK module. + * @name: The name of the module. + * @clk: The CLK module that owns this module. + * @input_size: The size of inputs of this module. + * @clock_in: The input clocks of this module. + * @clock_out: The output clocks of this module. + * @offset: The offset of this module in the control register. + * @len: The length of this module in the control register. + */ +typedef struct NPCM7xxClockSELState { + DeviceState parent; + + const char *name; + NPCMCLKState *clk; + uint8_t input_size; + Clock *clock_in[NPCM7XX_CLK_SEL_MAX_INPUT]; + Clock *clock_out; + + int offset; + int len; +} NPCM7xxClockSELState; + +/** + * struct NPCM7xxClockDividerState - A Divider module in CLK module. + * @name: The name of the module. + * @clk: The CLK module that owns this module. + * @clock_in: The input clock of this module. + * @clock_out: The output clock of this module. + * @divide: The function the divider uses to divide the input. + * @reg: The index of the control register that contains the divisor. + * @offset: The offset of the divisor in the control register. + * @len: The length of the divisor in the control register. + * @divisor: The divisor for a constant divisor + */ +typedef struct NPCM7xxClockDividerState { + DeviceState parent; + + const char *name; + NPCMCLKState *clk; + Clock *clock_in; + Clock *clock_out; + + uint32_t (*divide)(struct NPCM7xxClockDividerState *s); + union { + struct { + int reg; + int offset; + int len; + }; + int divisor; + }; +} NPCM7xxClockDividerState; + +struct NPCMCLKState { + SysBusDevice parent; + + MemoryRegion iomem; + + /* Clock converters */ + /* + * TODO: Implement unique clock converters for NPCM8xx. + * NPCM8xx adds a few more clock outputs. + */ + NPCM7xxClockPLLState plls[NPCM7XX_CLOCK_NR_PLLS]; + NPCM7xxClockSELState sels[NPCM7XX_CLOCK_NR_SELS]; + NPCM7xxClockDividerState dividers[NPCM7XX_CLOCK_NR_DIVIDERS]; + + uint32_t regs[NPCM_CLK_MAX_NR_REGS]; + + /* Time reference for SECCNT and CNTR25M, initialized by power on reset */ + int64_t ref_ns; + + /* The incoming reference clock. */ + Clock *clkref; +}; + +typedef struct NPCMCLKClass { + SysBusDeviceClass parent; + + size_t nr_regs; + const uint32_t *cold_reset_values; +} NPCMCLKClass; + +#define TYPE_NPCM_CLK "npcm-clk" +OBJECT_DECLARE_TYPE(NPCMCLKState, NPCMCLKClass, NPCM_CLK) +#define TYPE_NPCM7XX_CLK "npcm7xx-clk" +#define TYPE_NPCM8XX_CLK "npcm8xx-clk" + +#endif /* NPCM_CLK_H */ diff --git a/include/hw/misc/npcm_gcr.h b/include/hw/misc/npcm_gcr.h new file mode 100644 index 00000000000..702e7fddb1f --- /dev/null +++ b/include/hw/misc/npcm_gcr.h @@ -0,0 +1,86 @@ +/* + * Nuvoton NPCM7xx/8xx System Global Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ +#ifndef NPCM_GCR_H +#define NPCM_GCR_H + +#include "system/memory.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +/* + * NPCM7XX PWRON STRAP bit fields + * 12: SPI0 powered by VSBV3 at 1.8V + * 11: System flash attached to BMC + * 10: BSP alternative pins. + * 9:8: Flash UART command route enabled. + * 7: Security enabled. + * 6: HI-Z state control. + * 5: ECC disabled. + * 4: Reserved + * 3: JTAG2 enabled. + * 2:0: CPU and DRAM clock frequency. + */ +#define NPCM7XX_PWRON_STRAP_SPI0F18 BIT(12) +#define NPCM7XX_PWRON_STRAP_SFAB BIT(11) +#define NPCM7XX_PWRON_STRAP_BSPA BIT(10) +#define NPCM7XX_PWRON_STRAP_FUP(x) ((x) << 8) +#define FUP_NORM_UART2 3 +#define FUP_PROG_UART3 2 +#define FUP_PROG_UART2 1 +#define FUP_NORM_UART3 0 +#define NPCM7XX_PWRON_STRAP_SECEN BIT(7) +#define NPCM7XX_PWRON_STRAP_HIZ BIT(6) +#define NPCM7XX_PWRON_STRAP_ECC BIT(5) +#define NPCM7XX_PWRON_STRAP_RESERVE1 BIT(4) +#define NPCM7XX_PWRON_STRAP_J2EN BIT(3) +#define NPCM7XX_PWRON_STRAP_CKFRQ(x) (x) +#define CKFRQ_SKIPINIT 0x000 +#define CKFRQ_DEFAULT 0x111 + +/* + * Number of registers in our device state structure. Don't change this without + * incrementing the version_id in the vmstate. + */ +#define NPCM_GCR_MAX_NR_REGS NPCM8XX_GCR_NR_REGS +#define NPCM7XX_GCR_NR_REGS (0x148 / sizeof(uint32_t)) +#define NPCM8XX_GCR_NR_REGS (0xf80 / sizeof(uint32_t)) + +typedef struct NPCMGCRState { + SysBusDevice parent; + + MemoryRegion iomem; + + uint32_t regs[NPCM_GCR_MAX_NR_REGS]; + + uint32_t reset_pwron; + uint32_t reset_mdlr; + uint32_t reset_intcr3; + uint32_t reset_scrpad_b; +} NPCMGCRState; + +typedef struct NPCMGCRClass { + SysBusDeviceClass parent; + + size_t nr_regs; + const uint32_t *cold_reset_values; +} NPCMGCRClass; + +#define TYPE_NPCM_GCR "npcm-gcr" +#define TYPE_NPCM7XX_GCR "npcm7xx-gcr" +#define TYPE_NPCM8XX_GCR "npcm8xx-gcr" +OBJECT_DECLARE_TYPE(NPCMGCRState, NPCMGCRClass, NPCM_GCR) + +#endif /* NPCM_GCR_H */ diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h index 9a71a5ad0d7..5098693437a 100644 --- a/include/hw/misc/pvpanic.h +++ b/include/hw/misc/pvpanic.h @@ -15,7 +15,7 @@ #ifndef HW_MISC_PVPANIC_H #define HW_MISC_PVPANIC_H -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #include "standard-headers/misc/pvpanic.h" @@ -26,6 +26,7 @@ #define TYPE_PVPANIC_ISA_DEVICE "pvpanic" #define TYPE_PVPANIC_PCI_DEVICE "pvpanic-pci" +#define TYPE_PVPANIC_MMIO_DEVICE "pvpanic-mmio" #define PVPANIC_IOPORT_PROP "ioport" diff --git a/include/hw/misc/stm32_rcc.h b/include/hw/misc/stm32_rcc.h new file mode 100644 index 00000000000..ffbdf202ea5 --- /dev/null +++ b/include/hw/misc/stm32_rcc.h @@ -0,0 +1,91 @@ +/* + * STM32 RCC (only reset and enable registers are implemented) + * + * Copyright (c) 2024 Román Cárdenas + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_STM32_RCC_H +#define HW_STM32_RCC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define STM32_RCC_CR 0x00 +#define STM32_RCC_PLL_CFGR 0x04 +#define STM32_RCC_CFGR 0x08 +#define STM32_RCC_CIR 0x0C +#define STM32_RCC_AHB1_RSTR 0x10 +#define STM32_RCC_AHB2_RSTR 0x14 +#define STM32_RCC_AHB3_RSTR 0x18 + +#define STM32_RCC_APB1_RSTR 0x20 +#define STM32_RCC_APB2_RSTR 0x24 + +#define STM32_RCC_AHB1_ENR 0x30 +#define STM32_RCC_AHB2_ENR 0x34 +#define STM32_RCC_AHB3_ENR 0x38 + +#define STM32_RCC_APB1_ENR 0x40 +#define STM32_RCC_APB2_ENR 0x44 + +#define STM32_RCC_AHB1_LPENR 0x50 +#define STM32_RCC_AHB2_LPENR 0x54 +#define STM32_RCC_AHB3_LPENR 0x58 + +#define STM32_RCC_APB1_LPENR 0x60 +#define STM32_RCC_APB2_LPENR 0x64 + +#define STM32_RCC_BDCR 0x70 +#define STM32_RCC_CSR 0x74 + +#define STM32_RCC_SSCGR 0x80 +#define STM32_RCC_PLLI2SCFGR 0x84 +#define STM32_RCC_PLLSAI_CFGR 0x88 +#define STM32_RCC_DCKCFGR 0x8C +#define STM32_RCC_CKGATENR 0x90 +#define STM32_RCC_DCKCFGR2 0x94 + +#define STM32_RCC_NREGS ((STM32_RCC_DCKCFGR2 >> 2) + 1) +#define STM32_RCC_PERIPHERAL_SIZE 0x400 +#define STM32_RCC_NIRQS (32 * 5) /* 32 bits per reg, 5 en/rst regs */ + +#define STM32_RCC_GPIO_IRQ_OFFSET 0 + +#define TYPE_STM32_RCC "stm32.rcc" + +typedef struct STM32RccState STM32RccState; + +DECLARE_INSTANCE_CHECKER(STM32RccState, STM32_RCC, TYPE_STM32_RCC) + +#define NUM_GPIO_EVENT_IN_LINES 16 + +struct STM32RccState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + uint32_t regs[STM32_RCC_NREGS]; + + qemu_irq enable_irq[STM32_RCC_NIRQS]; + qemu_irq reset_irq[STM32_RCC_NIRQS]; +}; + +#endif /* HW_STM32_RCC_H */ diff --git a/include/hw/misc/stm32l4x5_syscfg.h b/include/hw/misc/stm32l4x5_syscfg.h index 23bb5641507..c450df2b9ea 100644 --- a/include/hw/misc/stm32l4x5_syscfg.h +++ b/include/hw/misc/stm32l4x5_syscfg.h @@ -48,6 +48,7 @@ struct Stm32l4x5SyscfgState { uint32_t swpr2; qemu_irq gpio_out[GPIO_NUM_PINS]; + Clock *clk; }; #endif diff --git a/include/hw/misc/vmcoreinfo.h b/include/hw/misc/vmcoreinfo.h index 0b7b55d400a..1aa44771632 100644 --- a/include/hw/misc/vmcoreinfo.h +++ b/include/hw/misc/vmcoreinfo.h @@ -16,10 +16,9 @@ #include "standard-headers/linux/qemu_fw_cfg.h" #include "qom/object.h" -#define VMCOREINFO_DEVICE "vmcoreinfo" +#define TYPE_VMCOREINFO "vmcoreinfo" typedef struct VMCoreInfoState VMCoreInfoState; -DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO, - VMCOREINFO_DEVICE) +DECLARE_INSTANCE_CHECKER(VMCoreInfoState, VMCOREINFO, TYPE_VMCOREINFO) typedef struct fw_cfg_vmcoreinfo FWCfgVMCoreInfo; @@ -33,7 +32,7 @@ struct VMCoreInfoState { /* returns NULL unless there is exactly one device */ static inline VMCoreInfoState *vmcoreinfo_find(void) { - Object *o = object_resolve_path_type("", VMCOREINFO_DEVICE, NULL); + Object *o = object_resolve_path_type("", TYPE_VMCOREINFO, NULL); return o ? VMCOREINFO(o) : NULL; } diff --git a/include/hw/misc/xlnx-versal-trng.h b/include/hw/misc/xlnx-versal-trng.h index 0bcef8a6132..d96f8f9eff3 100644 --- a/include/hw/misc/xlnx-versal-trng.h +++ b/include/hw/misc/xlnx-versal-trng.h @@ -50,6 +50,7 @@ typedef struct XlnxVersalTRng { uint64_t forced_prng_count; uint64_t tst_seed[2]; + RegisterInfoArray *reg_array; uint32_t regs[RMAX_XLNX_VERSAL_TRNG]; RegisterInfo regs_info[RMAX_XLNX_VERSAL_TRNG]; } XlnxVersalTRng; diff --git a/include/hw/net/dp8393x.h b/include/hw/net/dp8393x.h index 4a3f7478be4..24273dc1f43 100644 --- a/include/hw/net/dp8393x.h +++ b/include/hw/net/dp8393x.h @@ -22,7 +22,7 @@ #include "hw/sysbus.h" #include "net/net.h" -#include "exec/memory.h" +#include "system/memory.h" #define SONIC_REG_COUNT 0x40 diff --git a/include/hw/net/imx_fec.h b/include/hw/net/imx_fec.h index 2d13290c787..83b21637eeb 100644 --- a/include/hw/net/imx_fec.h +++ b/include/hw/net/imx_fec.h @@ -31,6 +31,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(IMXFECState, IMX_FEC) #define TYPE_IMX_ENET "imx.enet" #include "hw/sysbus.h" +#include "hw/net/lan9118_phy.h" +#include "hw/irq.h" #include "net/net.h" #define ENET_EIR 1 @@ -264,11 +266,8 @@ struct IMXFECState { uint32_t tx_descriptor[ENET_TX_RING_NUM]; uint32_t tx_ring_num; - uint32_t phy_status; - uint32_t phy_control; - uint32_t phy_advertise; - uint32_t phy_int; - uint32_t phy_int_mask; + Lan9118PhyState mii; + IRQState mii_irq; uint32_t phy_num; bool phy_connected; struct IMXFECState *phy_consumer; diff --git a/include/hw/net/lan9118_phy.h b/include/hw/net/lan9118_phy.h new file mode 100644 index 00000000000..af12fc33d5f --- /dev/null +++ b/include/hw/net/lan9118_phy.h @@ -0,0 +1,37 @@ +/* + * SMSC LAN9118 PHY emulation + * + * Copyright (c) 2009 CodeSourcery, LLC. + * Written by Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_NET_LAN9118_PHY_H +#define HW_NET_LAN9118_PHY_H + +#include "qom/object.h" +#include "hw/sysbus.h" + +#define TYPE_LAN9118_PHY "lan9118-phy" +OBJECT_DECLARE_SIMPLE_TYPE(Lan9118PhyState, LAN9118_PHY) + +typedef struct Lan9118PhyState { + SysBusDevice parent_obj; + + uint16_t status; + uint16_t control; + uint16_t advertise; + uint16_t ints; + uint16_t int_mask; + qemu_irq irq; + bool link_down; +} Lan9118PhyState; + +void lan9118_phy_update_link(Lan9118PhyState *s, bool link_down); +void lan9118_phy_reset(Lan9118PhyState *s); +uint16_t lan9118_phy_read(Lan9118PhyState *s, int reg); +void lan9118_phy_write(Lan9118PhyState *s, int reg, uint16_t val); + +#endif diff --git a/include/hw/net/mii.h b/include/hw/net/mii.h index f7feddac9bd..55bf7c92a1c 100644 --- a/include/hw/net/mii.h +++ b/include/hw/net/mii.h @@ -71,6 +71,7 @@ #define MII_BMSR_JABBER (1 << 1) /* Jabber detected */ #define MII_BMSR_EXTCAP (1 << 0) /* Ext-reg capability */ +#define MII_ANAR_RFAULT (1 << 13) /* Say we can detect faults */ #define MII_ANAR_PAUSE_ASYM (1 << 11) /* Try for asymmetric pause */ #define MII_ANAR_PAUSE (1 << 10) /* Try for pause */ #define MII_ANAR_TXFD (1 << 8) @@ -78,6 +79,7 @@ #define MII_ANAR_10FD (1 << 6) #define MII_ANAR_10 (1 << 5) #define MII_ANAR_CSMACD (1 << 0) +#define MII_ANAR_SELECT (0x001f) /* Selector bits */ #define MII_ANLPAR_ACK (1 << 14) #define MII_ANLPAR_PAUSEASY (1 << 11) /* can pause asymmetrically */ @@ -112,6 +114,10 @@ #define RTL8201CP_PHYID1 0x0000 #define RTL8201CP_PHYID2 0x8201 +/* SMSC LAN9118 */ +#define SMSCLAN9118_PHYID1 0x0007 +#define SMSCLAN9118_PHYID2 0xc0d1 + /* RealTek 8211E */ #define RTL8211E_PHYID1 0x001c #define RTL8211E_PHYID2 0xc915 diff --git a/include/hw/net/msf2-emac.h b/include/hw/net/msf2-emac.h index 846ba6e6dce..b5d9127e46c 100644 --- a/include/hw/net/msf2-emac.h +++ b/include/hw/net/msf2-emac.h @@ -23,7 +23,7 @@ */ #include "hw/sysbus.h" -#include "exec/memory.h" +#include "system/memory.h" #include "net/net.h" #include "net/eth.h" #include "qom/object.h" diff --git a/include/hw/net/npcm_pcs.h b/include/hw/net/npcm_pcs.h new file mode 100644 index 00000000000..d5c481ad70d --- /dev/null +++ b/include/hw/net/npcm_pcs.h @@ -0,0 +1,42 @@ +/* + * Nuvoton NPCM8xx PCS Module + * + * Copyright 2022 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef NPCM_PCS_H +#define NPCM_PCS_H + +#include "hw/sysbus.h" + +#define NPCM_PCS_NR_SR_CTLS (0x12 / sizeof(uint16_t)) +#define NPCM_PCS_NR_SR_MIIS (0x20 / sizeof(uint16_t)) +#define NPCM_PCS_NR_SR_TIMS (0x22 / sizeof(uint16_t)) +#define NPCM_PCS_NR_VR_MIIS (0x1c6 / sizeof(uint16_t)) + +struct NPCMPCSState { + SysBusDevice parent; + + MemoryRegion iomem; + + uint16_t indirect_access_base; + uint16_t sr_ctl[NPCM_PCS_NR_SR_CTLS]; + uint16_t sr_mii[NPCM_PCS_NR_SR_MIIS]; + uint16_t sr_tim[NPCM_PCS_NR_SR_TIMS]; + uint16_t vr_mii[NPCM_PCS_NR_VR_MIIS]; +}; + +#define TYPE_NPCM_PCS "npcm-pcs" +OBJECT_DECLARE_SIMPLE_TYPE(NPCMPCSState, NPCM_PCS) + +#endif /* NPCM_PCS_H */ diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h index fee79b71d16..7825840dcac 100644 --- a/include/hw/nubus/nubus.h +++ b/include/hw/nubus/nubus.h @@ -11,7 +11,7 @@ #include "hw/qdev-properties.h" #include "hw/sysbus.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "qom/object.h" #include "qemu/units.h" diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index d173998803c..d41b9328fd1 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -4,7 +4,7 @@ #include "exec/hwaddr.h" #include "standard-headers/linux/qemu_fw_cfg.h" #include "hw/sysbus.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "qom/object.h" #define TYPE_FW_CFG "fw_cfg" @@ -30,8 +30,9 @@ struct FWCfgDataGeneratorClass { * @obj: the object implementing this interface * @errp: pointer to a NULL-initialized error object * - * Returns: reference to a byte array containing the data on success, - * or NULL on error. + * Returns: A byte array containing data to add, or NULL without + * @errp set if no data is required, or NULL with @errp + * set on failure. * * The caller should release the reference when no longer * required. @@ -41,14 +42,6 @@ struct FWCfgDataGeneratorClass { typedef struct fw_cfg_file FWCfgFile; -#define FW_CFG_ORDER_OVERRIDE_VGA 70 -#define FW_CFG_ORDER_OVERRIDE_NIC 80 -#define FW_CFG_ORDER_OVERRIDE_USER 100 -#define FW_CFG_ORDER_OVERRIDE_DEVICE 110 - -void fw_cfg_set_order_override(FWCfgState *fw_cfg, int order); -void fw_cfg_reset_order_override(FWCfgState *fw_cfg); - typedef struct FWCfgFiles { uint32_t count; FWCfgFile f[]; @@ -74,8 +67,6 @@ struct FWCfgState { uint32_t cur_offset; Notifier machine_ready; - int fw_cfg_order_override; - bool dma_enabled; dma_addr_t dma_addr; AddressSpace *dma_as; @@ -291,37 +282,31 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename, void *data, size_t len); /** - * fw_cfg_add_from_generator: + * fw_cfg_add_file_from_generator: * @s: fw_cfg device being modified * @filename: name of new fw_cfg file item - * @gen_id: name of object implementing FW_CFG_DATA_GENERATOR interface + * @part: name of object implementing FW_CFG_DATA_GENERATOR interface + * @parent: the object in which to resolve the @part * @errp: pointer to a NULL initialized error object * - * Add a new NAMED fw_cfg item with the content generated from the - * @gen_id object. The data generated by the @gen_id object is copied - * into the data structure of the fw_cfg device. + * If the @part object generates content, add a new NAMED fw_cfg item with it. + * The data generated by the @part object is copied into the data structure of + * the fw_cfg device. * The next available (unused) selector key starting at FW_CFG_FILE_FIRST * will be used; also, a new entry will be added to the file directory * structure residing at key value FW_CFG_FILE_DIR, containing the item name, * data size, and assigned selector key value. * - * Returns: %true on success, %false on error. - */ -bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename, - const char *gen_id, Error **errp); - -/** - * fw_cfg_add_extra_pci_roots: - * @bus: main pci root bus to be scanned from - * @s: fw_cfg device being modified + * If the @part object does not generate content, no fw_cfg item is added. * - * Add a new fw_cfg item... + * Returns: %true on success, %false on error. */ -void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s); +bool fw_cfg_add_file_from_generator(FWCfgState *s, + Object *parent, const char *part, + const char *filename, Error **errp); FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, AddressSpace *dma_as); -FWCfgState *fw_cfg_init_io(uint32_t iobase); FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr); FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr, hwaddr data_addr, uint32_t data_width, diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h index b39eb0490ff..dfd2a44ef0b 100644 --- a/include/hw/nvram/fw_cfg_acpi.h +++ b/include/hw/nvram/fw_cfg_acpi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ACPI support for fw_cfg * diff --git a/include/hw/nvram/mac_nvram.h b/include/hw/nvram/mac_nvram.h index 0c4dfaeff60..e9d8398f849 100644 --- a/include/hw/nvram/mac_nvram.h +++ b/include/hw/nvram/mac_nvram.h @@ -26,7 +26,7 @@ #ifndef MAC_NVRAM_H #define MAC_NVRAM_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" #define MACIO_NVRAM_SIZE 0x2000 diff --git a/include/hw/nvram/npcm7xx_otp.h b/include/hw/nvram/npcm7xx_otp.h index ea4b5d07310..77b05f8b82c 100644 --- a/include/hw/nvram/npcm7xx_otp.h +++ b/include/hw/nvram/npcm7xx_otp.h @@ -16,7 +16,7 @@ #ifndef NPCM7XX_OTP_H #define NPCM7XX_OTP_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" /* Each OTP module holds 8192 bits of one-time programmable storage */ diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h index 6fc13f8cc17..58acbe9f51b 100644 --- a/include/hw/nvram/xlnx-bbram.h +++ b/include/hw/nvram/xlnx-bbram.h @@ -26,7 +26,7 @@ #ifndef XLNX_BBRAM_H #define XLNX_BBRAM_H -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/qdev-core.h" #include "hw/irq.h" #include "hw/sysbus.h" @@ -47,6 +47,7 @@ struct XlnxBBRam { bool bbram8_wo; bool blk_ro; + RegisterInfoArray *reg_array; uint32_t regs[RMAX_XLNX_BBRAM]; RegisterInfo regs_info[RMAX_XLNX_BBRAM]; }; diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h index cff7924106a..ef14fb0528a 100644 --- a/include/hw/nvram/xlnx-efuse.h +++ b/include/hw/nvram/xlnx-efuse.h @@ -27,7 +27,7 @@ #ifndef XLNX_EFUSE_H #define XLNX_EFUSE_H -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "hw/qdev-core.h" #define TYPE_XLNX_EFUSE "xlnx-efuse" diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h index 86e2261b9a3..afa4f4f9960 100644 --- a/include/hw/nvram/xlnx-versal-efuse.h +++ b/include/hw/nvram/xlnx-versal-efuse.h @@ -44,6 +44,7 @@ struct XlnxVersalEFuseCtrl { void *extra_pg0_lock_spec; /* Opaque property */ uint32_t extra_pg0_lock_n16; + RegisterInfoArray *reg_array; uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; }; diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h index f5beacc2e6a..7fb12df3fbb 100644 --- a/include/hw/nvram/xlnx-zynqmp-efuse.h +++ b/include/hw/nvram/xlnx-zynqmp-efuse.h @@ -37,6 +37,7 @@ struct XlnxZynqMPEFuse { qemu_irq irq; XlnxEFuse *efuse; + RegisterInfoArray *reg_array; uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX]; RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX]; }; diff --git a/include/hw/openrisc/boot.h b/include/hw/openrisc/boot.h index 25a313d63a1..9b4d88072c4 100644 --- a/include/hw/openrisc/boot.h +++ b/include/hw/openrisc/boot.h @@ -20,6 +20,7 @@ #define OPENRISC_BOOT_H #include "exec/cpu-defs.h" +#include "hw/boards.h" hwaddr openrisc_load_kernel(ram_addr_t ram_size, const char *kernel_filename, @@ -28,7 +29,7 @@ hwaddr openrisc_load_kernel(ram_addr_t ram_size, hwaddr openrisc_load_initrd(void *fdt, const char *filename, hwaddr load_start, uint64_t mem_size); -uint32_t openrisc_load_fdt(void *fdt, hwaddr load_start, +uint32_t openrisc_load_fdt(MachineState *ms, void *fdt, hwaddr load_start, uint64_t mem_size); #endif /* OPENRISC_BOOT_H */ diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h index 12635139f62..f208397ffe9 100644 --- a/include/hw/pci-bridge/cxl_upstream_port.h +++ b/include/hw/pci-bridge/cxl_upstream_port.h @@ -12,6 +12,10 @@ typedef struct CXLUpstreamPort { /*< public >*/ CXLComponentState cxl_cstate; CXLCCI swcci; + + PCIExpLinkSpeed speed; + PCIExpLinkWidth width; + DOECap doe_cdat; uint64_t sn; } CXLUpstreamPort; diff --git a/include/hw/pci-host/astro.h b/include/hw/pci-host/astro.h index e2966917cd9..832125a05af 100644 --- a/include/hw/pci-host/astro.h +++ b/include/hw/pci-host/astro.h @@ -24,6 +24,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ElroyState, ELROY_PCI_HOST_BRIDGE) #define LMMIO_DIST_BASE_ADDR 0xf4000000ULL #define LMMIO_DIST_BASE_SIZE 0x4000000ULL +#define LMMIO_DIRECT_RANGES 4 + #define IOS_DIST_BASE_ADDR 0xfffee00000ULL #define IOS_DIST_BASE_SIZE 0x10000ULL @@ -83,9 +85,7 @@ struct AstroState { struct ElroyState *elroy[ELROY_NUM]; MemoryRegion this_mem; - - MemoryRegion pci_mmio; - MemoryRegion pci_io; + MemoryRegion lmmio_direct[LMMIO_DIRECT_RANGES]; IOMMUMemoryRegion iommu; AddressSpace iommu_as; diff --git a/include/hw/pci-host/designware.h b/include/hw/pci-host/designware.h index 908f3d946be..a35a3bd06c8 100644 --- a/include/hw/pci-host/designware.h +++ b/include/hw/pci-host/designware.h @@ -25,13 +25,18 @@ #include "hw/pci/pci_bridge.h" #include "qom/object.h" +#define TYPE_DESIGNWARE_PCIE_ROOT_BUS "designware-pcie-root-BUS" +OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERootBus, DESIGNWARE_PCIE_ROOT_BUS) + #define TYPE_DESIGNWARE_PCIE_HOST "designware-pcie-host" OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIEHost, DESIGNWARE_PCIE_HOST) #define TYPE_DESIGNWARE_PCIE_ROOT "designware-pcie-root" OBJECT_DECLARE_SIMPLE_TYPE(DesignwarePCIERoot, DESIGNWARE_PCIE_ROOT) -struct DesignwarePCIERoot; +struct DesignwarePCIERootBus { + PCIBus parent; +}; typedef struct DesignwarePCIEViewport { DesignwarePCIERoot *root; @@ -88,6 +93,7 @@ struct DesignwarePCIEHost { MemoryRegion io; qemu_irq irqs[4]; + qemu_irq msi; } pci; MemoryRegion mmio; diff --git a/include/hw/pci-host/dino.h b/include/hw/pci-host/dino.h index fd7975c7984..5dc8cdf6108 100644 --- a/include/hw/pci-host/dino.h +++ b/include/hw/pci-host/dino.h @@ -109,10 +109,6 @@ static const uint32_t reg800_keep_bits[DINO800_REGS] = { struct DinoState { PCIHostState parent_obj; - /* - * PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops, - * so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops. - */ uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */ uint32_t iar0; diff --git a/include/hw/pci-host/fsl_imx8m_phy.h b/include/hw/pci-host/fsl_imx8m_phy.h new file mode 100644 index 00000000000..5f1b212fd97 --- /dev/null +++ b/include/hw/pci-host/fsl_imx8m_phy.h @@ -0,0 +1,28 @@ +/* + * i.MX8 PCIe PHY emulation + * + * Copyright (c) 2025 Bernhard Beschow + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_PCIHOST_FSLIMX8MPCIEPHY_H +#define HW_PCIHOST_FSLIMX8MPCIEPHY_H + +#include "hw/sysbus.h" +#include "qom/object.h" +#include "system/memory.h" + +#define TYPE_FSL_IMX8M_PCIE_PHY "fsl-imx8m-pcie-phy" +OBJECT_DECLARE_SIMPLE_TYPE(FslImx8mPciePhyState, FSL_IMX8M_PCIE_PHY) + +#define FSL_IMX8M_PCIE_PHY_DATA_SIZE 0x800 + +struct FslImx8mPciePhyState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint8_t data[FSL_IMX8M_PCIE_PHY_DATA_SIZE]; +}; + +#endif diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h index dce883573ba..7eea16e7284 100644 --- a/include/hw/pci-host/gpex.h +++ b/include/hw/pci-host/gpex.h @@ -32,8 +32,6 @@ OBJECT_DECLARE_SIMPLE_TYPE(GPEXHost, GPEX_HOST) #define TYPE_GPEX_ROOT_DEVICE "gpex-root" OBJECT_DECLARE_SIMPLE_TYPE(GPEXRootState, GPEX_ROOT_DEVICE) -#define GPEX_NUM_IRQS 4 - struct GPEXRootState { /*< private >*/ PCIDevice parent_obj; @@ -47,8 +45,11 @@ struct GPEXConfig { MemMapEntry pio; int irq; PCIBus *bus; + bool pci_native_hotplug; + bool preserve_config; }; +typedef struct GPEXIrq GPEXIrq; struct GPEXHost { /*< private >*/ PCIExpressHost parent_obj; @@ -60,8 +61,8 @@ struct GPEXHost { MemoryRegion io_mmio; MemoryRegion io_ioport_window; MemoryRegion io_mmio_window; - qemu_irq irq[GPEX_NUM_IRQS]; - int irq_num[GPEX_NUM_IRQS]; + GPEXIrq *irq; + uint8_t num_irqs; bool allow_unmapped_accesses; diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h index cd7c9ec7bc2..79d4ea85012 100644 --- a/include/hw/pci-host/ls7a.h +++ b/include/hw/pci-host/ls7a.h @@ -36,17 +36,18 @@ #define VIRT_PCH_PIC_IRQ_NUM 32 #define VIRT_GSI_BASE 64 #define VIRT_DEVICE_IRQS 16 +#define VIRT_UART_COUNT 4 #define VIRT_UART_IRQ (VIRT_GSI_BASE + 2) #define VIRT_UART_BASE 0x1fe001e0 -#define VIRT_UART_SIZE 0X100 -#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3) +#define VIRT_UART_SIZE 0x100 +#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6) #define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000) #define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100) #define VIRT_RTC_LEN 0x100 -#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4) +#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7) #define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 #define VIRT_PLATFORM_BUS_SIZE 0x2000000 #define VIRT_PLATFORM_BUS_NUM_IRQS 2 -#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5) +#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8) #endif diff --git a/include/hw/pci-host/pam.h b/include/hw/pci-host/pam.h index 005916f826b..44f39081604 100644 --- a/include/hw/pci-host/pam.h +++ b/include/hw/pci-host/pam.h @@ -50,7 +50,7 @@ * 0xf0000 - 0xfffff System BIOS Area Memory Segments */ -#include "exec/memory.h" +#include "system/memory.h" #define SMRAM_C_BASE 0xa0000 #define SMRAM_C_END 0xc0000 diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h index 8abee78e4d4..8a80c0c667a 100644 --- a/include/hw/pci-host/pnv_phb4.h +++ b/include/hw/pci-host/pnv_phb4.h @@ -13,6 +13,7 @@ #include "hw/pci-host/pnv_phb.h" #include "hw/pci/pci_bus.h" #include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_nest_pervasive.h" #include "hw/ppc/xive.h" #include "qom/object.h" @@ -174,6 +175,9 @@ struct PnvPhb4PecState { uint32_t index; uint32_t chip_id; + /* Pervasive chiplet control */ + PnvNestChipletPervasive nest_pervasive; + /* Nest registers, excuding per-stack */ #define PHB4_PEC_NEST_REGS_COUNT 0xf uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT]; @@ -196,6 +200,7 @@ struct PnvPhb4PecState { struct PnvPhb4PecClass { DeviceClass parent_class; + uint32_t (*xscom_cplt_base)(PnvPhb4PecState *pec); uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec); uint32_t xscom_nest_size; uint32_t (*xscom_pci_base)(PnvPhb4PecState *pec); diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h index 22fadfa3ed7..ddafc3f2e3d 100644 --- a/include/hw/pci-host/q35.h +++ b/include/hw/pci-host/q35.h @@ -181,8 +181,6 @@ struct Q35PCIHost { #define MCH_PCIE_DEV 1 #define MCH_PCIE_FUNC 0 -uint64_t mch_mcfg_base(void); - /* * Arbitrary but unique BNF number for IOAPIC device. * diff --git a/include/hw/pci-host/remote.h b/include/hw/pci-host/remote.h index 690a01f0fe9..5264c359369 100644 --- a/include/hw/pci-host/remote.h +++ b/include/hw/pci-host/remote.h @@ -11,7 +11,7 @@ #ifndef PCI_HOST_REMOTE_H #define PCI_HOST_REMOTE_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/pci/pcie_host.h" #define TYPE_REMOTE_PCIHOST "remote-pcihost" diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h index 3778aac27b6..0db87f12815 100644 --- a/include/hw/pci-host/spapr.h +++ b/include/hw/pci-host/spapr.h @@ -53,7 +53,6 @@ struct SpaprPhbState { uint32_t index; uint64_t buid; char *dtbusname; - bool dr_enabled; MemoryRegion memspace, iospace; hwaddr mem_win_addr, mem_win_size, mem64_win_addr, mem64_win_size; @@ -84,10 +83,6 @@ struct SpaprPhbState { bool pcie_ecs; /* Allow access to PCIe extended config space? */ /* Fields for migration compatibility hacks */ - bool pre_2_8_migration; - uint32_t mig_liobn; - hwaddr mig_mem_win_addr, mig_mem_win_size; - hwaddr mig_io_win_addr, mig_io_win_size; bool pre_5_1_assoc; }; diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h index 0e6f257e451..11ef9454c13 100644 --- a/include/hw/pci/msix.h +++ b/include/hw/pci/msix.h @@ -32,6 +32,7 @@ int msix_present(PCIDevice *dev); bool msix_is_masked(PCIDevice *dev, unsigned vector); void msix_set_pending(PCIDevice *dev, unsigned vector); void msix_clr_pending(PCIDevice *dev, int vector); +int msix_is_pending(PCIDevice *dev, unsigned vector); void msix_vector_use(PCIDevice *dev, unsigned vector); void msix_vector_unuse(PCIDevice *dev, unsigned vector); diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index eb26cac8109..0d3b3519033 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -1,9 +1,9 @@ #ifndef QEMU_PCI_H #define QEMU_PCI_H -#include "exec/memory.h" -#include "sysemu/dma.h" -#include "sysemu/host_iommu_device.h" +#include "system/memory.h" +#include "system/dma.h" +#include "system/host_iommu_device.h" /* PCI includes legacy ISA access. */ #include "hw/isa/isa.h" @@ -16,13 +16,17 @@ extern bool pci_available; #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) -#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn)) +#define PCI_BUILD_BDF(bus, devfn) (((bus) << 8) | (devfn)) #define PCI_BDF_TO_DEVFN(x) ((x) & 0xff) #define PCI_BUS_MAX 256 #define PCI_DEVFN_MAX 256 #define PCI_SLOT_MAX 32 #define PCI_FUNC_MAX 8 +#define PCI_SBDF(seg, bus, dev, func) \ + ((((uint32_t)(seg)) << 16) | \ + (PCI_BUILD_BDF(bus, PCI_DEVFN(dev, func)))) + /* Class, Vendor and Device IDs from Linux's pci_ids.h */ #include "hw/pci/pci_ids.h" @@ -116,6 +120,7 @@ extern bool pci_available; #define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011 #define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012 #define PCI_DEVICE_ID_REDHAT_UFS 0x0013 +#define PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014 #define PCI_DEVICE_ID_REDHAT_QXL 0x0100 #define FMT_PCIBUS PRIx64 @@ -129,6 +134,15 @@ struct PCIHostDeviceAddress { unsigned int function; }; +/* + * Represents the Address Type (AT) field in a PCI request, + * see MemTxAttrs.address_type + */ +typedef enum PCIAddressType { + PCI_AT_UNTRANSLATED = 0, /* Default when no attribute is set */ + PCI_AT_TRANSLATED = 1, +} PCIAddressType; + typedef void PCIConfigWriteFunc(PCIDevice *pci_dev, uint32_t address, uint32_t data, int len); typedef uint32_t PCIConfigReadFunc(PCIDevice *pci_dev, @@ -213,6 +227,12 @@ enum { QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR), #define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12 QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR), +#define QEMU_PCIE_EXT_TAG_BITNR 13 + QEMU_PCIE_EXT_TAG = (1 << QEMU_PCIE_EXT_TAG_BITNR), +#define QEMU_PCI_CAP_PM_BITNR 14 + QEMU_PCI_CAP_PM = (1 << QEMU_PCI_CAP_PM_BITNR), +#define QEMU_PCI_SKIP_RESET_ON_CPR_BITNR 15 + QEMU_PCI_SKIP_RESET_ON_CPR = (1 << QEMU_PCI_SKIP_RESET_ON_CPR_BITNR), }; typedef struct PCIINTxRoute { @@ -294,6 +314,9 @@ int pci_bus_get_irq_level(PCIBus *bus, int irq_num); uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus); void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask); void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask); +bool pci_bus_add_fw_cfg_extra_pci_roots(FWCfgState *fw_cfg, + PCIBus *bus, + Error **errp); /* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */ static inline int pci_swizzle(int slot, int pin) { @@ -363,6 +386,28 @@ void pci_bus_get_w64_range(PCIBus *bus, Range *range); void pci_device_deassert_intx(PCIDevice *dev); +/* Page Request Interface */ +typedef enum { + IOMMU_PRI_RESP_SUCCESS, + IOMMU_PRI_RESP_INVALID_REQUEST, + IOMMU_PRI_RESP_FAILURE, +} IOMMUPRIResponseCode; + +typedef struct IOMMUPRIResponse { + IOMMUPRIResponseCode response_code; + uint16_t prgi; +} IOMMUPRIResponse; + +struct IOMMUPRINotifier; + +typedef void (*IOMMUPRINotify)(struct IOMMUPRINotifier *notifier, + IOMMUPRIResponse *response); + +typedef struct IOMMUPRINotifier { + IOMMUPRINotify notify; +} IOMMUPRINotifier; + +#define PCI_PRI_PRGI_MASK 0x1ffU /** * struct PCIIOMMUOps: callbacks structure for specific IOMMU handlers @@ -417,12 +462,350 @@ typedef struct PCIIOMMUOps { * @devfn: device and function number of the PCI device. */ void (*unset_iommu_device)(PCIBus *bus, void *opaque, int devfn); + /** + * @get_viommu_flags: get vIOMMU flags + * + * Optional callback, if not implemented, then vIOMMU doesn't support + * exposing flags to other sub-system, e.g., VFIO. Each flag can be + * an expectation or request to other sub-system or just a pure vIOMMU + * capability. vIOMMU can choose which flags to expose. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * Returns: 64bit bitmap with each bit represents a flag that vIOMMU + * wants to expose. See VIOMMU_FLAG_* in include/hw/iommu.h for all + * possible flags currently used. These flags are theoretical which + * are only determined by vIOMMU device properties and independent on + * the actual host capabilities they may depend on. + */ + uint64_t (*get_viommu_flags)(void *opaque); + /** + * @get_iotlb_info: get properties required to initialize a device IOTLB. + * + * Callback required if devices are allowed to cache translations. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @addr_width: the address width of the IOMMU (output parameter). + * + * @min_page_size: the page size of the IOMMU (output parameter). + */ + void (*get_iotlb_info)(void *opaque, uint8_t *addr_width, + uint32_t *min_page_size); + /** + * @init_iotlb_notifier: initialize an IOMMU notifier. + * + * Optional callback. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @n: the notifier to be initialized. + * + * @fn: the callback to be installed. + * + * @user_opaque: a user pointer that can be used to track a state. + */ + void (*init_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn, + IOMMUNotifier *n, IOMMUNotify fn, + void *user_opaque); + /** + * @register_iotlb_notifier: setup an IOTLB invalidation notifier. + * + * Callback required if devices are allowed to cache translations. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to watch. + * + * @n: the notifier to register. + */ + void (*register_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, IOMMUNotifier *n); + /** + * @unregister_iotlb_notifier: remove an IOTLB invalidation notifier. + * + * Callback required if devices are allowed to cache translations. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to stop watching. + * + * @n: the notifier to unregister. + */ + void (*unregister_iotlb_notifier)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, IOMMUNotifier *n); + /** + * @ats_request_translation: issue an ATS request. + * + * Callback required if devices are allowed to use the address + * translation service. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to use for the request. + * + * @priv_req: privileged mode bit (PASID TLP). + * + * @exec_req: execute request bit (PASID TLP). + * + * @addr: start address of the memory range to be translated. + * + * @length: length of the memory range in bytes. + * + * @no_write: request a read-only translation (if supported). + * + * @result: buffer in which the TLB entries will be stored. + * + * @result_length: result buffer length. + * + * @err_count: number of untranslated subregions. + * + * Returns: the number of translations stored in the result buffer, or + * -ENOMEM if the buffer is not large enough. + */ + ssize_t (*ats_request_translation)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, bool priv_req, + bool exec_req, hwaddr addr, + size_t length, bool no_write, + IOMMUTLBEntry *result, + size_t result_length, + uint32_t *err_count); + /** + * @pri_register_notifier: setup the PRI completion callback. + * + * Callback required if devices are allowed to use the page request + * interface. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to track. + * + * @notifier: the notifier to register. + */ + void (*pri_register_notifier)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, IOMMUPRINotifier *notifier); + /** + * @pri_unregister_notifier: remove the PRI completion callback. + * + * Callback required if devices are allowed to use the page request + * interface. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to stop tracking. + */ + void (*pri_unregister_notifier)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid); + /** + * @pri_request_page: issue a PRI request. + * + * Callback required if devices are allowed to use the page request + * interface. + * + * @bus: the #PCIBus of the PCI device. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number of the PCI device. + * + * @pasid: the pasid of the address space to use for the request. + * + * @priv_req: privileged mode bit (PASID TLP). + * + * @exec_req: execute request bit (PASID TLP). + * + * @addr: untranslated address of the requested page. + * + * @lpig: last page in group. + * + * @prgi: page request group index. + * + * @is_read: request read access. + * + * @is_write: request write access. + */ + int (*pri_request_page)(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, bool priv_req, bool exec_req, + hwaddr addr, bool lpig, uint16_t prgi, bool is_read, + bool is_write); + /** + * @get_msi_address_space: get the address space for MSI doorbell address + * for devices + * + * Optional callback which returns a pointer to an #AddressSpace. This + * is required if MSI doorbell also gets translated through IOMMU(eg: ARM) + * + * @bus: the #PCIBus being accessed. + * + * @opaque: the data passed to pci_setup_iommu(). + * + * @devfn: device and function number + */ + AddressSpace * (*get_msi_address_space)(PCIBus *bus, void *opaque, + int devfn); } PCIIOMMUOps; +bool pci_device_get_iommu_bus_devfn(PCIDevice *dev, PCIBus **piommu_bus, + PCIBus **aliased_bus, int *aliased_devfn); AddressSpace *pci_device_iommu_address_space(PCIDevice *dev); bool pci_device_set_iommu_device(PCIDevice *dev, HostIOMMUDevice *hiod, Error **errp); void pci_device_unset_iommu_device(PCIDevice *dev); +AddressSpace *pci_device_iommu_msi_address_space(PCIDevice *dev); + +/** + * pci_device_get_viommu_flags: get vIOMMU flags. + * + * Returns a 64bit bitmap with each bit represents a vIOMMU exposed + * flags, 0 if vIOMMU doesn't support that. + * + * @dev: PCI device pointer. + */ +uint64_t pci_device_get_viommu_flags(PCIDevice *dev); + +/** + * pci_iommu_get_iotlb_info: get properties required to initialize a + * device IOTLB. + * + * Returns 0 on success, or a negative errno otherwise. + * + * @dev: the device that wants to get the information. + * @addr_width: the address width of the IOMMU (output parameter). + * @min_page_size: the page size of the IOMMU (output parameter). + */ +int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width, + uint32_t *min_page_size); + +/** + * pci_iommu_init_iotlb_notifier: initialize an IOMMU notifier. + * + * This function is used by devices before registering an IOTLB notifier. + * + * @dev: the device. + * @n: the notifier to be initialized. + * @fn: the callback to be installed. + * @opaque: a user pointer that can be used to track a state. + */ +int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n, + IOMMUNotify fn, void *opaque); + +/** + * pci_ats_request_translation: perform an ATS request. + * + * Returns the number of translations stored in @result in case of success, + * a negative error code otherwise. + * -ENOMEM is returned when the result buffer is not large enough to store + * all the translations. + * + * @dev: the ATS-capable PCI device. + * @pasid: the pasid of the address space in which the translation will be done. + * @priv_req: privileged mode bit (PASID TLP). + * @exec_req: execute request bit (PASID TLP). + * @addr: start address of the memory range to be translated. + * @length: length of the memory range in bytes. + * @no_write: request a read-only translation (if supported). + * @result: buffer in which the TLB entries will be stored. + * @result_length: result buffer length. + * @err_count: number of untranslated subregions. + */ +ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid, + bool priv_req, bool exec_req, + hwaddr addr, size_t length, + bool no_write, IOMMUTLBEntry *result, + size_t result_length, + uint32_t *err_count); + +/** + * pci_pri_request_page: perform a PRI request. + * + * Returns 0 if the PRI request has been sent to the guest OS, + * an error code otherwise. + * + * @dev: the PRI-capable PCI device. + * @pasid: the pasid of the address space in which the translation will be done. + * @priv_req: privileged mode bit (PASID TLP). + * @exec_req: execute request bit (PASID TLP). + * @addr: untranslated address of the requested page. + * @lpig: last page in group. + * @prgi: page request group index. + * @is_read: request read access. + * @is_write: request write access. + */ +int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req, + bool exec_req, hwaddr addr, bool lpig, + uint16_t prgi, bool is_read, bool is_write); + +/** + * pci_pri_register_notifier: register the PRI callback for a given address + * space. + * + * Returns 0 on success, an error code otherwise. + * + * @dev: the PRI-capable PCI device. + * @pasid: the pasid of the address space to track. + * @notifier: the notifier to register. + */ +int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUPRINotifier *notifier); + +/** + * pci_pri_unregister_notifier: remove the PRI callback from a given address + * space. + * + * @dev: the PRI-capable PCI device. + * @pasid: the pasid of the address space to stop tracking. + */ +void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid); + +/** + * pci_iommu_register_iotlb_notifier: register a notifier for changes to + * IOMMU translation entries in a specific address space. + * + * Returns 0 on success, or a negative errno otherwise. + * + * @dev: the device that wants to get notified. + * @pasid: the pasid of the address space to track. + * @n: the notifier to register. + */ +int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUNotifier *n); + +/** + * pci_iommu_unregister_iotlb_notifier: unregister a notifier that has been + * registerd with pci_iommu_register_iotlb_notifier. + * + * Returns 0 on success, or a negative errno otherwise. + * + * @dev: the device that wants to stop notifications. + * @pasid: the pasid of the address space to stop tracking. + * @n: the notifier to unregister. + */ +int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, + IOMMUNotifier *n); /** * pci_setup_iommu: Initialize specific IOMMU handlers for a PCIBus @@ -435,6 +818,8 @@ void pci_device_unset_iommu_device(PCIDevice *dev); */ void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque); +void pci_setup_iommu_per_bus(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque); + pcibus_t pci_bar_address(PCIDevice *d, int reg, uint8_t type, pcibus_t size); @@ -656,6 +1041,7 @@ void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev); qemu_irq pci_allocate_irq(PCIDevice *pci_dev); void pci_set_irq(PCIDevice *pci_dev, int level); +int pci_irq_disabled(PCIDevice *d); static inline void pci_irq_assert(PCIDevice *pci_dev) { @@ -667,17 +1053,9 @@ static inline void pci_irq_deassert(PCIDevice *pci_dev) pci_set_irq(pci_dev, 0); } -/* - * FIXME: PCI does not work this way. - * All the callers to this method should be fixed. - */ -static inline void pci_irq_pulse(PCIDevice *pci_dev) -{ - pci_irq_assert(pci_dev); - pci_irq_deassert(pci_dev); -} - MSIMessage pci_get_msi_message(PCIDevice *dev, int vector); +void pci_set_enabled(PCIDevice *pci_dev, bool state); void pci_set_power(PCIDevice *pci_dev, bool state); +int pci_pm_init(PCIDevice *pci_dev, uint8_t offset, Error **errp); #endif diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index 5cd452115ad..b61360b9007 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -14,8 +14,8 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * along with this program; if not, see + * . * * split out pci bus specific stuff from pci.[hc] to pci_bridge.[hc] * Copyright (c) 2009 Isaku Yamahata @@ -72,6 +72,8 @@ struct PCIBridge { */ MemoryRegion address_space_mem; MemoryRegion address_space_io; + AddressSpace as_mem; + AddressSpace as_io; PCIBridgeWindows windows; @@ -102,6 +104,9 @@ typedef struct PXBPCIEDev { PXBDev parent_obj; } PXBPCIEDev; +#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus" +#define TYPE_PXB_CXL_BUS "pxb-cxl-bus" +#define TYPE_PXB_PCIE_DEV "pxb-pcie" #define TYPE_PXB_DEV "pxb" OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV) diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h index 22613125462..c7384467888 100644 --- a/include/hw/pci/pci_bus.h +++ b/include/hw/pci/pci_bus.h @@ -35,6 +35,7 @@ struct PCIBus { enum PCIBusFlags flags; const PCIIOMMUOps *iommu_ops; void *iommu_opaque; + bool iommu_per_bus; uint8_t devfn_min; uint32_t slot_reserved_mask; pci_set_irq_fn set_irq; diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h index 15694f24894..eee03385686 100644 --- a/include/hw/pci/pci_device.h +++ b/include/hw/pci/pci_device.h @@ -38,6 +38,8 @@ struct PCIDeviceClass { uint16_t subsystem_id; /* only for header type = 0 */ const char *romfile; /* rom bar */ + + bool sriov_vf_user_creatable; }; enum PCIReqIDType { @@ -57,7 +59,7 @@ typedef struct PCIReqIDCache PCIReqIDCache; struct PCIDevice { DeviceState qdev; bool partially_hotplugged; - bool has_power; + bool enabled; /* PCI config space */ uint8_t *config; @@ -88,6 +90,7 @@ struct PCIDevice { char name[64]; PCIIORegion io_regions[PCI_NUM_REGIONS]; AddressSpace bus_master_as; + bool is_master; MemoryRegion bus_master_container_region; MemoryRegion bus_master_enable_region; @@ -105,6 +108,9 @@ struct PCIDevice { /* Capability bits */ uint32_t cap_present; + /* Offset of PM capability in config space */ + uint8_t pm_cap; + /* Offset of MSI-X capability in config space */ uint8_t msix_cap; @@ -148,7 +154,7 @@ struct PCIDevice { uint32_t romsize; bool has_rom; MemoryRegion rom; - uint32_t rom_bar; + int32_t rom_bar; /* INTx routing notifier */ PCIINTxRoutingNotifier intx_routing_notifier; @@ -167,6 +173,15 @@ struct PCIDevice { /* ID of standby device in net_failover pair */ char *failover_pair_id; uint32_t acpi_index; + + /* + * Indirect DMA region bounce buffer size as configured for the device. This + * is a configuration parameter that is reflected into bus_master_as when + * realizing the device. + */ + uint32_t max_bounce_buffer_size; + + char *sriov_pf; }; static inline int pci_intx(PCIDevice *pci_dev) @@ -199,7 +214,7 @@ static inline int pci_is_express_downstream_port(const PCIDevice *d) static inline int pci_is_vf(const PCIDevice *d) { - return d->exp.sriov_vf.pf != NULL; + return d->sriov_pf || d->exp.sriov_vf.pf != NULL; } static inline uint32_t pci_config_size(const PCIDevice *d) diff --git a/include/hw/pci/pci_host.h b/include/hw/pci/pci_host.h index e52d8ec2cdc..954dd446fa4 100644 --- a/include/hw/pci/pci_host.h +++ b/include/hw/pci/pci_host.h @@ -68,6 +68,5 @@ uint32_t pci_data_read(PCIBus *s, uint32_t addr, unsigned len); extern const MemoryRegionOps pci_host_conf_le_ops; extern const MemoryRegionOps pci_host_conf_be_ops; extern const MemoryRegionOps pci_host_data_le_ops; -extern const MemoryRegionOps pci_host_data_be_ops; #endif /* PCI_HOST_H */ diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h index f1a53fea8d6..33e2898be95 100644 --- a/include/hw/pci/pci_ids.h +++ b/include/hw/pci/pci_ids.h @@ -191,6 +191,7 @@ #define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 #define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b #define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 +#define PCI_DEVICE_ID_APPLE_VIRTIO_BLK 0x1a00 #define PCI_VENDOR_ID_SUN 0x108e #define PCI_DEVICE_ID_SUN_EBUS 0x1000 diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 5eddb909769..ff6ce08e135 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -58,8 +58,6 @@ typedef enum { struct PCIExpressDevice { /* Offset of express capability in config space */ uint8_t exp_cap; - /* Offset of Power Management capability in config space */ - uint8_t pm_cap; /* SLOT */ bool hpev_notified; /* Logical AND of conditions for hot plug event. @@ -72,8 +70,10 @@ struct PCIExpressDevice { uint16_t aer_cap; PCIEAERLog aer_log; - /* Offset of ATS capability in config space */ + /* Offset of ATS, PRI and PASID capabilities in config space */ uint16_t ats_cap; + uint16_t pasid_cap; + uint16_t pri_cap; /* ACS */ uint16_t acs_cap; @@ -141,6 +141,8 @@ void pcie_acs_reset(PCIDevice *dev); void pcie_ari_init(PCIDevice *dev, uint16_t offset); void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num); void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned); +void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width, + PCIExpLinkSpeed speed); void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); @@ -150,4 +152,13 @@ void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); + +void pcie_pasid_init(PCIDevice *dev, uint16_t offset, uint8_t pasid_width, + bool exec_perm, bool priv_mod); +void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap, + bool prg_response_pasid_req); + +bool pcie_pri_enabled(const PCIDevice *dev); +bool pcie_pasid_enabled(const PCIDevice *dev); +bool pcie_ats_enabled(const PCIDevice *dev); #endif /* QEMU_PCIE_H */ diff --git a/include/hw/pci/pcie_host.h b/include/hw/pci/pcie_host.h index 82d92177da9..f09de76bfe6 100644 --- a/include/hw/pci/pcie_host.h +++ b/include/hw/pci/pcie_host.h @@ -22,7 +22,7 @@ #define PCIE_HOST_H #include "hw/pci/pci_host.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #define TYPE_PCIE_HOST_BRIDGE "pcie-host-bridge" diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index 90e6cf45b87..7cd7af8cfa4 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -72,7 +72,6 @@ struct PCIESlot { }; void pcie_chassis_create(uint8_t chassis_number); -PCIESlot *pcie_chassis_find_slot(uint8_t chassis, uint16_t slot); int pcie_chassis_add_slot(struct PCIESlot *slot); void pcie_chassis_del_slot(PCIESlot *s); diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 9d3b6868dce..33a22229fe2 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -86,6 +86,14 @@ typedef enum PCIExpLinkWidth { #define PCI_ARI_VER 1 #define PCI_ARI_SIZEOF 8 +/* PASID */ +#define PCI_PASID_VER 1 +#define PCI_EXT_CAP_PASID_MAX_WIDTH 20 +#define PCI_PASID_CAP_WIDTH_SHIFT 8 + +/* PRI */ +#define PCI_PRI_VER 1 + /* AER */ #define PCI_ERR_VER 2 #define PCI_ERR_SIZEOF 0x48 diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h index 450cbef6c20..aeaa38cf345 100644 --- a/include/hw/pci/pcie_sriov.h +++ b/include/hw/pci/pcie_sriov.h @@ -16,10 +16,9 @@ #include "hw/pci/pci.h" typedef struct PCIESriovPF { - uint16_t num_vfs; /* Number of virtual functions created */ uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */ - const char *vfname; /* Reference to the device type used for the VFs */ PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */ + bool vf_user_created; /* If VFs are created by user */ } PCIESriovPF; typedef struct PCIESriovVF { @@ -27,10 +26,11 @@ typedef struct PCIESriovVF { uint16_t vf_number; /* Logical VF number of this function */ } PCIESriovVF; -void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, +bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, const char *vfname, uint16_t vf_dev_id, uint16_t init_vfs, uint16_t total_vfs, - uint16_t vf_offset, uint16_t vf_stride); + uint16_t vf_offset, uint16_t vf_stride, + Error **errp); void pcie_sriov_pf_exit(PCIDevice *dev); /* Set up a VF bar in the SR/IOV bar area */ @@ -41,6 +41,26 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, MemoryRegion *memory); +/** + * pcie_sriov_pf_init_from_user_created_vfs() - Initialize PF with user-created + * VFs, adding ARI to PF + * @dev: A PCIe device being realized. + * @offset: The offset of the SR-IOV capability. + * @errp: pointer to Error*, to store an error if it happens. + * + * Initializes a PF with user-created VFs, adding the ARI extended capability to + * the PF. The VFs should call pcie_ari_init() to form an ARI device. + * + * Return: The size of added capabilities. 0 if the user did not create VFs. + * -1 if failed. + */ +int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev, + uint16_t offset, + Error **errp); + +bool pcie_sriov_register_device(PCIDevice *dev, Error **errp); +void pcie_sriov_unregister_device(PCIDevice *dev); + /* * Default (minimal) page size support values * as required by the SR/IOV standard: @@ -58,6 +78,8 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize); void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, uint32_t val, int len); +void pcie_sriov_pf_post_load(PCIDevice *dev); + /* Reset SR/IOV */ void pcie_sriov_pf_reset(PCIDevice *dev); diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h index a0789df1538..ad1089567a0 100644 --- a/include/hw/pci/shpc.h +++ b/include/hw/pci/shpc.h @@ -1,7 +1,7 @@ #ifndef SHPC_H #define SHPC_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/hotplug.h" #include "hw/pci/pci_device.h" #include "migration/vmstate.h" diff --git a/include/hw/pcmcia.h b/include/hw/pcmcia.h deleted file mode 100644 index ab268027511..00000000000 --- a/include/hw/pcmcia.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef HW_PCMCIA_H -#define HW_PCMCIA_H - -/* PCMCIA/Cardbus */ - -#include "hw/qdev-core.h" -#include "qom/object.h" - -typedef struct PCMCIASocket { - qemu_irq irq; - bool attached; -} PCMCIASocket; - -#define TYPE_PCMCIA_CARD "pcmcia-card" -OBJECT_DECLARE_TYPE(PCMCIACardState, PCMCIACardClass, PCMCIA_CARD) - -struct PCMCIACardState { - /*< private >*/ - DeviceState parent_obj; - /*< public >*/ - - PCMCIASocket *slot; -}; - -struct PCMCIACardClass { - /*< private >*/ - DeviceClass parent_class; - /*< public >*/ - - int (*attach)(PCMCIACardState *state); - int (*detach)(PCMCIACardState *state); - - const uint8_t *cis; - int cis_len; - - /* Only valid if attached */ - uint8_t (*attr_read)(PCMCIACardState *card, uint32_t address); - void (*attr_write)(PCMCIACardState *card, uint32_t address, uint8_t value); - uint16_t (*common_read)(PCMCIACardState *card, uint32_t address); - void (*common_write)(PCMCIACardState *card, - uint32_t address, uint16_t value); - uint16_t (*io_read)(PCMCIACardState *card, uint32_t address); - void (*io_write)(PCMCIACardState *card, uint32_t address, uint16_t value); -}; - -#define CISTPL_DEVICE 0x01 /* 5V Device Information Tuple */ -#define CISTPL_NO_LINK 0x14 /* No Link Tuple */ -#define CISTPL_VERS_1 0x15 /* Level 1 Version Tuple */ -#define CISTPL_JEDEC_C 0x18 /* JEDEC ID Tuple */ -#define CISTPL_JEDEC_A 0x19 /* JEDEC ID Tuple */ -#define CISTPL_CONFIG 0x1a /* Configuration Tuple */ -#define CISTPL_CFTABLE_ENTRY 0x1b /* 16-bit PCCard Configuration */ -#define CISTPL_DEVICE_OC 0x1c /* Additional Device Information */ -#define CISTPL_DEVICE_OA 0x1d /* Additional Device Information */ -#define CISTPL_DEVICE_GEO 0x1e /* Additional Device Information */ -#define CISTPL_DEVICE_GEO_A 0x1f /* Additional Device Information */ -#define CISTPL_MANFID 0x20 /* Manufacture ID Tuple */ -#define CISTPL_FUNCID 0x21 /* Function ID Tuple */ -#define CISTPL_FUNCE 0x22 /* Function Extension Tuple */ -#define CISTPL_END 0xff /* Tuple End */ -#define CISTPL_ENDMARK 0xff - -/* dscm1xxxx.c */ -PCMCIACardState *dscm1xxxx_init(DriveInfo *bdrv); - -#endif diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h index 4a3f644516b..896ee4a2b17 100644 --- a/include/hw/ppc/mac_dbdma.h +++ b/include/hw/ppc/mac_dbdma.h @@ -23,9 +23,9 @@ #ifndef HW_MAC_DBDMA_H #define HW_MAC_DBDMA_H -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/iov.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/sysbus.h" #include "qom/object.h" @@ -44,10 +44,6 @@ struct DBDMA_io { DBDMA_end dma_end; /* DMA is in progress, don't start another one */ bool processing; - /* DMA request */ - void *dma_mem; - dma_addr_t dma_len; - DMADirection dir; }; /* diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index fcb6699150c..d8fca079f2f 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -205,9 +205,8 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV9_OCC_SENSOR_BASE(chip) (PNV9_OCC_COMMON_AREA_BASE + \ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id)) -#define PNV9_HOMER_SIZE 0x0000000000400000ull #define PNV9_HOMER_BASE(chip) \ - (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV9_HOMER_SIZE) + (0x203ffd800000ull + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE) /* * POWER10 MMIO base addresses - 16TB stride per chip @@ -250,8 +249,7 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV10_OCC_SENSOR_BASE(chip) (PNV10_OCC_COMMON_AREA_BASE + \ PNV_OCC_SENSOR_DATA_BLOCK_BASE((chip)->chip_id)) -#define PNV10_HOMER_SIZE 0x0000000000400000ull #define PNV10_HOMER_BASE(chip) \ - (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV10_HOMER_SIZE) + (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE) #endif /* PPC_PNV_H */ diff --git a/include/hw/ppc/pnv_homer.h b/include/hw/ppc/pnv_homer.h index b1c5d498dc5..a6f2710fa16 100644 --- a/include/hw/ppc/pnv_homer.h +++ b/include/hw/ppc/pnv_homer.h @@ -41,19 +41,21 @@ struct PnvHomer { PnvChip *chip; MemoryRegion pba_regs; - MemoryRegion regs; + MemoryRegion mem; + hwaddr base; }; struct PnvHomerClass { DeviceClass parent_class; + /* Get base address of HOMER memory */ + hwaddr (*get_base)(PnvChip *chip); + /* Size of HOMER memory */ + int size; + int pba_size; const MemoryRegionOps *pba_ops; - int homer_size; - const MemoryRegionOps *homer_ops; - - hwaddr core_max_base; }; #endif /* PPC_PNV_HOMER_H */ diff --git a/include/hw/ppc/pnv_lpc.h b/include/hw/ppc/pnv_lpc.h index 174add4c53b..266d56214f8 100644 --- a/include/hw/ppc/pnv_lpc.h +++ b/include/hw/ppc/pnv_lpc.h @@ -20,7 +20,7 @@ #ifndef PPC_PNV_LPC_H #define PPC_PNV_LPC_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/ppc/pnv.h" #include "hw/qdev-core.h" #include "hw/isa/isa.h" /* For ISA_NUM_IRQS */ diff --git a/include/hw/ppc/pnv_occ.h b/include/hw/ppc/pnv_occ.h index df321244e3b..013ea2e53e3 100644 --- a/include/hw/ppc/pnv_occ.h +++ b/include/hw/ppc/pnv_occ.h @@ -20,7 +20,7 @@ #ifndef PPC_PNV_OCC_H #define PPC_PNV_OCC_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #define TYPE_PNV_OCC "pnv-occ" @@ -41,11 +41,17 @@ DECLARE_INSTANCE_CHECKER(PnvOCC, PNV10_OCC, TYPE_PNV10_OCC) struct PnvOCC { DeviceState xd; + /* OCC dynamic model is driven by this timer. */ + QEMUTimer state_machine_timer; + /* OCC Misc interrupt */ uint64_t occmisc; qemu_irq psi_irq; + /* OCCs operate on regions of HOMER memory */ + PnvHomer *homer; + MemoryRegion xscom_regs; MemoryRegion sram_regs; }; @@ -53,6 +59,9 @@ struct PnvOCC { struct PnvOCCClass { DeviceClass parent_class; + hwaddr opal_shared_memory_offset; /* offset in HOMER */ + uint8_t opal_shared_memory_version; + int xscom_size; const MemoryRegionOps *xscom_ops; }; diff --git a/include/hw/ppc/pnv_pnor.h b/include/hw/ppc/pnv_pnor.h index 2e37ac88bf1..b44cafe918d 100644 --- a/include/hw/ppc/pnv_pnor.h +++ b/include/hw/ppc/pnv_pnor.h @@ -13,9 +13,11 @@ #include "hw/sysbus.h" /* - * PNOR offset on the LPC FW address space + * PNOR offset on the LPC FW address space. For now this should be 0 because + * skiboot 7.1 has a bug where IDSEL > 0 (LPC FW address > 256MB) access is + * not performed correctly. */ -#define PNOR_SPI_OFFSET 0x0c000000UL +#define PNOR_SPI_OFFSET 0x00000000UL #define TYPE_PNV_PNOR "pnv-pnor" OBJECT_DECLARE_SIMPLE_TYPE(PnvPnor, PNV_PNOR) @@ -26,6 +28,7 @@ struct PnvPnor { BlockBackend *blk; uint8_t *storage; + uint32_t lpc_address; /* Offset within LPC FW space */ int64_t size; MemoryRegion mmio; }; diff --git a/include/hw/ppc/pnv_sbe.h b/include/hw/ppc/pnv_sbe.h index b6b378ad14c..48a8b86a80b 100644 --- a/include/hw/ppc/pnv_sbe.h +++ b/include/hw/ppc/pnv_sbe.h @@ -20,7 +20,7 @@ #ifndef PPC_PNV_SBE_H #define PPC_PNV_SBE_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #define TYPE_PNV_SBE "pnv-sbe" diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h index 648388a5995..b14549db703 100644 --- a/include/hw/ppc/pnv_xscom.h +++ b/include/hw/ppc/pnv_xscom.h @@ -20,7 +20,7 @@ #ifndef PPC_PNV_XSCOM_H #define PPC_PNV_XSCOM_H -#include "exec/memory.h" +#include "system/memory.h" typedef struct PnvXScomInterface PnvXScomInterface; typedef struct PnvChip PnvChip; @@ -126,6 +126,8 @@ struct PnvXScomInterfaceClass { #define PNV9_XSCOM_PEC_PCI_BASE 0xd010800 #define PNV9_XSCOM_PEC_PCI_SIZE 0x200 +#define PNV9_XSCOM_PEC_NEST_CPLT_BASE 0x0d000000 + /* XSCOM PCI "pass-through" window to PHB SCOM */ #define PNV9_XSCOM_PEC_PCI_STK0 0x100 #define PNV9_XSCOM_PEC_PCI_STK1 0x140 @@ -197,6 +199,8 @@ struct PnvXScomInterfaceClass { #define PNV10_XSCOM_PEC_NEST_BASE 0x3011800 /* index goes downwards ... */ #define PNV10_XSCOM_PEC_NEST_SIZE 0x100 +#define PNV10_XSCOM_PEC_NEST_CPLT_BASE 0x08000000 + #define PNV10_XSCOM_PEC_PCI_BASE 0x8010800 /* index goes upwards ... */ #define PNV10_XSCOM_PEC_PCI_SIZE 0x200 diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h index d5d119ea7f5..8a14d623f8c 100644 --- a/include/hw/ppc/ppc.h +++ b/include/hw/ppc/ppc.h @@ -116,6 +116,13 @@ enum { #define PPC_SERIAL_MM_BAUDBASE 399193 +#ifndef CONFIG_USER_ONLY +void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa, + hwaddr len); +void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa, + target_ulong size); +#endif + /* ppc_booke.c */ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags); #endif diff --git a/include/hw/ppc/ppc4xx.h b/include/hw/ppc/ppc4xx.h index 1bd9b8821b0..2e94b006730 100644 --- a/include/hw/ppc/ppc4xx.h +++ b/include/hw/ppc/ppc4xx.h @@ -26,7 +26,7 @@ #define PPC4XX_H #include "hw/ppc/ppc.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" /* diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index f6de3e99721..39bd5bd5ed3 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -2,7 +2,7 @@ #define HW_SPAPR_H #include "qemu/units.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/boards.h" #include "hw/ppc/spapr_drc.h" #include "hw/mem/pc-dimm.h" @@ -83,8 +83,10 @@ typedef enum { #define SPAPR_CAP_AIL_MODE_3 0x0C /* Nested PAPR */ #define SPAPR_CAP_NESTED_PAPR 0x0D +/* DAWR1 */ +#define SPAPR_CAP_DAWR1 0x0E /* Num Caps */ -#define SPAPR_CAP_NUM (SPAPR_CAP_NESTED_PAPR + 1) +#define SPAPR_CAP_NUM (SPAPR_CAP_DAWR1 + 1) /* * Capability Values @@ -141,11 +143,8 @@ struct SpaprMachineClass { MachineClass parent_class; /*< public >*/ - bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */ bool dr_phb_enabled; /* enable dynamic-reconfig/hotplug of PHBs */ bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */ - bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */ - bool pre_2_10_has_unused_icps; bool legacy_irq_allocation; uint32_t nr_xirqs; bool broken_host_serial_model; /* present real host info to the guest */ @@ -204,6 +203,7 @@ struct SpaprMachineState { uint32_t fdt_initial_size; void *fdt_blob; uint8_t fdt_rng_seed[32]; + uint64_t hashpkey_val; long kernel_size; bool kernel_le; uint64_t kernel_addr; @@ -409,6 +409,7 @@ struct SpaprMachineState { #define H_SET_MODE_RESOURCE_SET_DAWR0 2 #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 #define H_SET_MODE_RESOURCE_LE 4 +#define H_SET_MODE_RESOURCE_SET_DAWR1 5 /* Flags for H_SET_MODE_RESOURCE_LE */ #define H_SET_MODE_ENDIAN_BIG 0 @@ -1006,6 +1007,7 @@ extern const VMStateDescription vmstate_spapr_cap_fwnmi; extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate; extern const VMStateDescription vmstate_spapr_cap_ail_mode_3; extern const VMStateDescription vmstate_spapr_wdt; +extern const VMStateDescription vmstate_spapr_cap_dawr1; static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap) { diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h index 69a52e39b85..68f70834832 100644 --- a/include/hw/ppc/spapr_cpu_core.h +++ b/include/hw/ppc/spapr_cpu_core.h @@ -28,7 +28,6 @@ struct SpaprCpuCore { /*< public >*/ PowerPCCPU **threads; int node_id; - bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */ }; struct SpaprCpuCoreClass { diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h index 02a63b36666..9ff42909c93 100644 --- a/include/hw/ppc/spapr_drc.h +++ b/include/hw/ppc/spapr_drc.h @@ -15,7 +15,7 @@ #include #include "qom/object.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/qdev-core.h" #include "qapi/error.h" diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h index 93ef14adcc5..f7be0d5a955 100644 --- a/include/hw/ppc/spapr_nested.h +++ b/include/hw/ppc/spapr_nested.h @@ -11,7 +11,13 @@ #define GSB_TB_OFFSET 0x0004 /* Timebase Offset */ #define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */ #define GSB_PROCESS_TBL 0x0006 /* Process Table */ - /* RESERVED 0x0007 - 0x0BFF */ + /* RESERVED 0x0007 - 0x07FF */ +#define GSB_L0_GUEST_HEAP_INUSE 0x0800 /* Guest Management Heap Size */ +#define GSB_L0_GUEST_HEAP_MAX 0x0801 /* Guest Management Heap Max Size */ +#define GSB_L0_GUEST_PGTABLE_SIZE_INUSE 0x0802 /* Guest Pagetable Size */ +#define GSB_L0_GUEST_PGTABLE_SIZE_MAX 0x0803 /* Guest Pagetable Max Size */ +#define GSB_L0_GUEST_PGTABLE_RECLAIMED 0x0804 /* Pagetable Reclaim in bytes */ + /* RESERVED 0x0805 - 0xBFF */ #define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */ #define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */ #define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */ @@ -99,7 +105,8 @@ #define GSB_VCPU_SPR_HASHKEYR 0x1050 #define GSB_VCPU_SPR_HASHPKEYR 0x1051 #define GSB_VCPU_SPR_CTRL 0x1052 - /* RESERVED 0x1053 - 0x1FFF */ +#define GSB_VCPU_SPR_DPDES 0x1053 + /* RESERVED 0x1054 - 0x1FFF */ #define GSB_VCPU_SPR_CR 0x2000 #define GSB_VCPU_SPR_PIDR 0x2001 #define GSB_VCPU_SPR_DSISR 0x2002 @@ -195,6 +202,38 @@ typedef struct SpaprMachineStateNested { #define NESTED_API_PAPR 2 bool capabilities_set; uint32_t pvr_base; + + /** + * l0_guest_heap_inuse: The currently used bytes in the Hypervisor's Guest + * Management Space associated with the Host Partition. + **/ + uint64_t l0_guest_heap_inuse; + + /** + * host_heap_max: The maximum bytes available in the Hypervisor's Guest + * Management Space associated with the Host Partition. + **/ + uint64_t l0_guest_heap_max; + + /** + * host_pagetable: The currently used bytes in the Hypervisor's Guest + * Page Table Management Space associated with the Host Partition. + **/ + uint64_t l0_guest_pgtable_size_inuse; + + /** + * host_pagetable_max: The maximum bytes available in the Hypervisor's Guest + * Page Table Management Space associated with the Host Partition. + **/ + uint64_t l0_guest_pgtable_size_max; + + /** + * host_pagetable_reclaim: The amount of space in bytes that has been + * reclaimed due to overcommit in the Hypervisor's Guest Page Table + * Management Space associated with the Host Partition. + **/ + uint64_t l0_guest_pgtable_reclaimed; + GHashTable *guests; } SpaprMachineStateNested; @@ -210,11 +249,14 @@ typedef struct SpaprMachineStateNestedGuest { #define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000 #define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000 #define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000 -#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \ +#define H_GUEST_CAPABILITIES_P11_MODE 0x1000000000000000 +#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P11_MODE | \ + H_GUEST_CAPABILITIES_P10_MODE | \ H_GUEST_CAPABILITIES_P9_MODE) #define H_GUEST_CAP_COPY_MEM_BMAP 0 #define H_GUEST_CAP_P9_MODE_BMAP 1 #define H_GUEST_CAP_P10_MODE_BMAP 2 +#define H_GUEST_CAP_P11_MODE_BMAP 3 #define PAPR_NESTED_GUEST_MAX 4096 #define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL #define PAPR_NESTED_GUEST_VCPU_MAX 2048 @@ -225,9 +267,15 @@ typedef struct SpaprMachineStateNestedGuest { #define HVMASK_HDEXCR 0x00000000FFFFFFFF #define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF #define GSB_MAX_BUF_SIZE (1024 * 1024) -#define H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE 0x8000000000000000 -#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1 -#define GUEST_STATE_REQUEST_SET 0x2 +#define H_GUEST_GET_STATE_FLAGS_MASK 0xC000000000000000ULL +#define H_GUEST_SET_STATE_FLAGS_MASK 0x8000000000000000ULL +#define H_GUEST_SET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL +#define H_GUEST_GET_STATE_FLAGS_GUEST_WIDE 0x8000000000000000ULL +#define H_GUEST_GET_STATE_FLAGS_HOST_WIDE 0x4000000000000000ULL + +#define GUEST_STATE_REQUEST_GUEST_WIDE 0x1 +#define GUEST_STATE_REQUEST_HOST_WIDE 0x2 +#define GUEST_STATE_REQUEST_SET 0x4 /* * As per ISA v3.1B, following bits are reserved: @@ -247,6 +295,15 @@ typedef struct SpaprMachineStateNestedGuest { .copy = (c) \ } +#define GSBE_NESTED_MACHINE_DW(i, f) { \ + .id = (i), \ + .size = 8, \ + .location = get_machine_ptr, \ + .offset = offsetof(struct SpaprMachineStateNested, f), \ + .copy = copy_state_8to8, \ + .mask = HVMASK_DEFAULT \ +} + #define GSBE_NESTED(i, sz, f, c) { \ .id = (i), \ .size = (sz), \ @@ -505,9 +562,11 @@ struct guest_state_element_type { uint16_t id; int size; #define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1 -#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2 +#define GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE 0x2 +#define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x4 uint16_t flags; - void *(*location)(SpaprMachineStateNestedGuest *, target_ulong); + void *(*location)(struct SpaprMachineState *, SpaprMachineStateNestedGuest *, + target_ulong); size_t offset; void (*copy)(void *, void *, bool); uint64_t mask; diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h index 7eae1a48478..b8de4b06fb1 100644 --- a/include/hw/ppc/spapr_vio.h +++ b/include/hw/ppc/spapr_vio.h @@ -23,7 +23,7 @@ */ #include "hw/ppc/spapr.h" -#include "sysemu/dma.h" +#include "system/dma.h" #include "hw/irq.h" #include "qom/object.h" diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h index d3f293da8b7..3a0fbffe54c 100644 --- a/include/hw/ppc/vof.h +++ b/include/hw/ppc/vof.h @@ -7,8 +7,8 @@ #define HW_VOF_H #include "qom/object.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "exec/cpu-defs.h" typedef struct Vof { diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index e94d53405f5..097fcdf00f9 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -28,7 +28,7 @@ #ifndef XICS_H #define XICS_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "qom/object.h" diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 28c181faa29..b7ca8544e43 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -130,17 +130,15 @@ * TCTX Thread interrupt Context * * - * Copyright (c) 2017-2018, IBM Corporation. - * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * Copyright (c) 2017-2024, IBM Corporation. * + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef PPC_XIVE_H #define PPC_XIVE_H -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/sysbus.h" #include "hw/ppc/xive_regs.h" #include "qom/object.h" @@ -218,7 +216,7 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc) xsrc->esb_shift == XIVE_ESB_4K_2PAGE; } -static inline size_t xive_source_esb_len(XiveSource *xsrc) +static inline uint64_t xive_source_esb_len(XiveSource *xsrc) { return (1ull << xsrc->esb_shift) * xsrc->nr_irqs; } @@ -367,6 +365,11 @@ static inline uint32_t xive_tctx_word2(uint8_t *ring) return *((uint32_t *) &ring[TM_WORD2]); } +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring); +bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr); +bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr); +uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr); + /* * XIVE Router */ @@ -423,7 +426,9 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas); typedef struct XiveTCTXMatch { XiveTCTX *tctx; + int count; uint8_t ring; + bool precluded; } XiveTCTXMatch; #define TYPE_XIVE_PRESENTER "xive-presenter" @@ -437,22 +442,29 @@ DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER, struct XivePresenterClass { InterfaceClass parent; - int (*match_nvt)(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match); + bool (*match_nvt)(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); bool (*in_kernel)(const XivePresenter *xptr); uint32_t (*get_config)(XivePresenter *xptr); + int (*broadcast)(XivePresenter *xptr, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority); }; int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool cam_ignore, uint32_t logic_serv); -bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv); +bool xive_presenter_match(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); + +uint32_t xive_get_vpgroup_size(uint32_t nvp_index); +uint8_t xive_get_group_level(bool crowd, bool ignore, + uint32_t nvp_blk, uint32_t nvp_index); /* * XIVE Fabric (Interface between Interrupt Controller and Machine) @@ -467,10 +479,12 @@ DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC, struct XiveFabricClass { InterfaceClass parent; - int (*match_nvt)(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match); + bool (*match_nvt)(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); + int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority); }; /* @@ -510,8 +524,23 @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) 0 : 1 << (XIVE_PRIORITY_MAX - priority); } +static inline uint8_t xive_priority_to_pipr(uint8_t priority) +{ + return priority > XIVE_PRIORITY_MAX ? 0xFF : priority; +} + +/* + * Convert an Interrupt Pending Buffer (IPB) register to a Pending + * Interrupt Priority Register (PIPR), which contains the priority of + * the most favored pending notification. + */ +static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) +{ + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; +} + /* - * XIVE Thread Interrupt Management Aera (TIMA) + * XIVE Thread Interrupt Management Area (TIMA) * * This region gives access to the registers of the thread interrupt * management context. It is four page wide, each page providing a @@ -523,6 +552,30 @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) #define XIVE_TM_OS_PAGE 0x2 #define XIVE_TM_USER_PAGE 0x3 +/* + * The TCTX (TIMA) has 4 rings (phys, pool, os, user), but only signals + * (raises an interrupt on) the CPU from 3 of them. Phys and pool both + * cause a hypervisor privileged interrupt so interrupts presented on + * those rings signal using the phys ring. This helper returns the signal + * regs from the given ring. + */ +static inline uint8_t *xive_tctx_signal_regs(XiveTCTX *tctx, uint8_t ring) +{ + /* + * This is a good point to add invariants to ensure nothing has tried to + * signal using the POOL ring. + */ + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + if (ring == TM_QW2_HV_POOL) { + /* POOL and PHYS rings share the signal regs (PIPR, NSR, CPPR) */ + ring = TM_QW3_HV_PHYS; + } + return &tctx->regs[ring]; +} + void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, @@ -532,8 +585,12 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf); Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); void xive_tctx_reset(XiveTCTX *tctx); void xive_tctx_destroy(XiveTCTX *tctx); -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); -void xive_tctx_reset_os_signal(XiveTCTX *tctx); +void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level); +void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level); +void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring); /* * KVM XIVE device helpers diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h index ab68f8d157f..f4437e2c79a 100644 --- a/include/hw/ppc/xive2.h +++ b/include/hw/ppc/xive2.h @@ -1,11 +1,9 @@ /* * QEMU PowerPC XIVE2 interrupt controller model (POWER10) * - * Copyright (c) 2019-2022, IBM Corporation. - * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * Copyright (c) 2019-2024, IBM Corporation. * + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef PPC_XIVE2_H @@ -31,9 +29,11 @@ OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER); * Configuration flags */ -#define XIVE2_GEN1_TIMA_OS 0x00000001 -#define XIVE2_VP_SAVE_RESTORE 0x00000002 -#define XIVE2_THREADID_8BITS 0x00000004 +#define XIVE2_GEN1_TIMA_OS 0x00000001 +#define XIVE2_VP_SAVE_RESTORE 0x00000002 +#define XIVE2_THREADID_8BITS 0x00000004 +#define XIVE2_EN_VP_GRP_PRIORITY 0x00000008 +#define XIVE2_VP_INT_PRIO 0x00000030 typedef struct Xive2RouterClass { SysBusDeviceClass parent; @@ -53,6 +53,12 @@ typedef struct Xive2RouterClass { Xive2Nvp *nvp); int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp, uint8_t word_number); + int (*get_nvgc)(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); + int (*write_nvgc)(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); uint8_t (*get_block_id)(Xive2Router *xrtr); uint32_t (*get_config)(Xive2Router *xrtr); } Xive2RouterClass; @@ -67,9 +73,16 @@ int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp); int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp, uint8_t word_number); +int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); +int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); uint32_t xive2_router_get_config(Xive2Router *xrtr); void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); +void xive2_notify(Xive2Router *xrtr, uint32_t lisn, bool pq_checked); /* * XIVE2 Presenter (POWER10) @@ -78,7 +91,17 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, - bool cam_ignore, uint32_t logic_serv); + bool crowd, bool cam_ignore, + uint32_t logic_serv); + +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, + uint8_t blk, uint32_t idx, + uint16_t offset); + +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, + bool crowd, + uint8_t blk, uint32_t idx, + uint16_t offset, uint16_t val); /* * XIVE2 END ESBs (POWER10) @@ -103,9 +126,32 @@ typedef struct Xive2EndSource { * XIVE2 Thread Interrupt Management Area (POWER10) */ +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size); - +void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority); +void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size); +void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size); +void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); #endif /* PPC_XIVE2_H */ diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h index 4349d009d00..2a3e60abadb 100644 --- a/include/hw/ppc/xive2_regs.h +++ b/include/hw/ppc/xive2_regs.h @@ -1,10 +1,9 @@ /* * QEMU PowerPC XIVE2 internal structure definitions (POWER10) * - * Copyright (c) 2019-2022, IBM Corporation. + * Copyright (c) 2019-2024, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef PPC_XIVE2_REGS_H @@ -19,16 +18,18 @@ * mode (P10), the CAM line is slightly different as the VP space was * increased. */ -#define TM2_QW0W2_VU PPC_BIT32(0) +#define TM2_W2_VALID PPC_BIT32(0) +#define TM2_W2_HW PPC_BIT32(1) +#define TM2_QW0W2_VU TM2_W2_VALID #define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31) -#define TM2_QW1W2_VO PPC_BIT32(0) -#define TM2_QW1W2_HO PPC_BIT32(1) +#define TM2_QW1W2_VO TM2_W2_VALID +#define TM2_QW1W2_HO TM2_W2_HW #define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31) -#define TM2_QW2W2_VP PPC_BIT32(0) -#define TM2_QW2W2_HP PPC_BIT32(1) +#define TM2_QW2W2_VP TM2_W2_VALID +#define TM2_QW2W2_HP TM2_W2_HW #define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31) -#define TM2_QW3W2_VT PPC_BIT32(0) -#define TM2_QW3W2_HT PPC_BIT32(1) +#define TM2_QW3W2_VT TM2_W2_VALID +#define TM2_QW3W2_HT TM2_W2_HW #define TM2_QW3W2_LP PPC_BIT32(6) #define TM2_QW3W2_LE PPC_BIT32(7) @@ -38,15 +39,18 @@ typedef struct Xive2Eas { uint64_t w; -#define EAS2_VALID PPC_BIT(0) -#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ -#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ -#define EAS2_MASKED PPC_BIT(32) /* Masked */ -#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ +#define EAS2_VALID PPC_BIT(0) +#define EAS2_QOS PPC_BIT(1, 2) /* Quality of Service(unimp) */ +#define EAS2_RESUME PPC_BIT(3) /* END Resume(unimp) */ +#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ +#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ +#define EAS2_MASKED PPC_BIT(32) /* Masked */ +#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ } Xive2Eas; #define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) #define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) +#define xive2_eas_is_resume(eas) (be64_to_cpu((eas)->w) & EAS2_RESUME) void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf); @@ -86,6 +90,7 @@ typedef struct Xive2End { #define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31) uint32_t w3; #define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24) +#define END2_W3_CL PPC_BIT32(27) #define END2_W3_QSIZE PPC_BITMASK32(28, 31) uint32_t w4; #define END2_W4_END_BLOCK PPC_BITMASK32(4, 7) @@ -150,7 +155,12 @@ typedef struct Xive2Nvp { uint32_t w0; #define NVP2_W0_VALID PPC_BIT32(0) #define NVP2_W0_HW PPC_BIT32(7) +#define NVP2_W0_L PPC_BIT32(8) +#define NVP2_W0_G PPC_BIT32(9) +#define NVP2_W0_T PPC_BIT32(10) +#define NVP2_W0_P PPC_BIT32(11) #define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */ +#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31) uint32_t w1; #define NVP2_W1_CO PPC_BIT32(13) #define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15) @@ -160,6 +170,8 @@ typedef struct Xive2Nvp { #define NVP2_W2_CPPR PPC_BITMASK32(0, 7) #define NVP2_W2_IPB PPC_BITMASK32(8, 15) #define NVP2_W2_LSMFB PPC_BITMASK32(16, 23) +#define NVP2_W2_T PPC_BIT32(27) +#define NVP2_W2_LGS PPC_BITMASK32(28, 31) uint32_t w3; uint32_t w4; #define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */ @@ -171,7 +183,9 @@ typedef struct Xive2Nvp { #define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7) #define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31) uint32_t w6; +#define NVP2_W6_REPORTING_LINE PPC_BITMASK32(4, 31) uint32_t w7; +#define NVP2_W7_REPORTING_LINE PPC_BITMASK32(0, 23) } Xive2Nvp; #define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID) @@ -196,9 +210,9 @@ static inline uint32_t xive2_nvp_idx(uint32_t cam_line) return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1); } -static inline uint32_t xive2_nvp_blk(uint32_t cam_line) +static inline uint8_t xive2_nvp_blk(uint32_t cam_line) { - return (cam_line >> XIVE2_NVP_SHIFT) & 0xf; + return (uint8_t)((cam_line >> XIVE2_NVP_SHIFT) & 0xf); } void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf); @@ -209,7 +223,11 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf); typedef struct Xive2Nvgc { uint32_t w0; #define NVGC2_W0_VALID PPC_BIT32(0) +#define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31) uint32_t w1; +#define NVGC2_W1_PSIZE PPC_BITMASK32(0, 1) +#define NVGC2_W1_END_BLK PPC_BITMASK32(4, 7) +#define NVGC2_W1_END_IDX PPC_BITMASK32(8, 31) uint32_t w2; uint32_t w3; uint32_t w4; @@ -218,4 +236,16 @@ typedef struct Xive2Nvgc { uint32_t w7; } Xive2Nvgc; +#define xive2_nvgc_is_valid(nvgc) (be32_to_cpu((nvgc)->w0) & NVGC2_W0_VALID) + +void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, + GString *buf); + +#define NVx_BACKLOG_OP PPC_BITMASK(52, 53) +#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) + +/* split the 6-bit crowd/group level */ +#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11) +#define NVx_GROUP_LVL(level) (level & 0b1111) + #endif /* PPC_XIVE2_REGS_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index b9db7abc2e9..54bc6c53b46 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -7,10 +7,9 @@ * access to the different fields. * * - * Copyright (c) 2016-2018, IBM Corporation. + * Copyright (c) 2016-2024, IBM Corporation. * - * This code is licensed under the GPL version 2 or later. See the - * COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef PPC_XIVE_REGS_H @@ -77,8 +76,11 @@ #define TM_LSMFB 0x3 /* - + + + */ #define TM_ACK_CNT 0x4 /* - + - - */ #define TM_INC 0x5 /* - + - + */ +#define TM_LGS 0x5 /* + + + + */ /* Rename P10 */ #define TM_AGE 0x6 /* - + - + */ +#define TM_T 0x6 /* - + - + */ /* Rename P10 */ #define TM_PIPR 0x7 /* - + - + */ +#define TM_OGEN 0xF /* - + - - */ /* P10 only */ #define TM_WORD0 0x0 #define TM_WORD1 0x4 @@ -98,6 +100,7 @@ #define TM_QW3W2_LP PPC_BIT32(6) #define TM_QW3W2_LE PPC_BIT32(7) #define TM_QW3W2_T PPC_BIT32(31) +#define TM_QW3B8_VT PPC_BIT8(0) /* * In addition to normal loads to "peek" and writes (only when invalid) @@ -114,26 +117,42 @@ * Then we have all these "special" CI ops at these offset that trigger * all sorts of side effects: */ -#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/ -#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ +#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg */ +#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ #define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */ -#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user - * context */ -#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ -#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS - * context to reg */ -#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool - * context to reg*/ -#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ -#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd - * line */ -#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ -#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even - * line */ -#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user */ + /* context */ +#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ +#define TM_SPC_PULL_OS_CTX_G2 0x810 /* Load32/Load64 Pull/Invalidate OS */ + /* context to reg */ +#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS */ + /* context to reg */ +#define TM_SPC_PULL_POOL_CTX_G2 0x820 /* Load32/Load64 Pull/Invalidate Pool */ + /* context to reg */ +#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool */ + /* context to reg */ +#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ +#define TM_SPC_PULL_PHYS_CTX_G2 0x830 /* Load32 Pull phys ctx to reg */ +#define TM_SPC_PULL_PHYS_CTX 0x838 /* Load8 Pull phys ctx to reg */ +#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd */ + /* line */ +#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ +#define TM_SPC_PULL_OS_CTX_OL 0xc18 /* Pull/Invalidate OS context to */ + /* odd Thread reporting line */ +#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even */ + /* line */ +#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +#define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */ /* XXX more... */ -/* NSR fields for the various QW ack types */ +/* + * NSR fields for the various QW ack types + * + * P10 has an extra bit in QW3 for the group level instead of the + * reserved 'i' bit. Since it is not used and we don't support group + * interrupts on P9, we use the P10 definition for the group level so + * that we can have common macros for the NSR + */ #define TM_QW0_NSR_EB PPC_BIT8(0) #define TM_QW1_NSR_EO PPC_BIT8(0) #define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) @@ -141,8 +160,15 @@ #define TM_QW3_NSR_HE_POOL 1 #define TM_QW3_NSR_HE_PHYS 2 #define TM_QW3_NSR_HE_LSI 3 -#define TM_QW3_NSR_I PPC_BIT8(2) -#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) +#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7) +/* + * On P10, the format of the 6-bit group level is: 2 bits for the + * crowd size and 4 bits for the group size. Since group/crowd size is + * always a power of 2, we encode the log. For example, group_level=4 + * means crowd size = 0 and group size = 16 (2^4) + * Same encoding is used in the NVP and NVGC structures for + * PGoFirst and PGoNext fields + */ /* * EAS (Event Assignment Structure) diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 77bfcbdf732..530f3da7021 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -95,6 +95,7 @@ typedef void (*DeviceUnrealize)(DeviceState *dev); typedef void (*DeviceReset)(DeviceState *dev); typedef void (*BusRealize)(BusState *bus, Error **errp); typedef void (*BusUnrealize)(BusState *bus); +typedef int (*DeviceSyncConfig)(DeviceState *dev, Error **errp); /** * struct DeviceClass - The base class for all devices. @@ -103,6 +104,9 @@ typedef void (*BusUnrealize)(BusState *bus); * property is changed to %true. * @unrealize: Callback function invoked when the #DeviceState:realized * property is changed to %false. + * @sync_config: Callback function invoked when QMP command device-sync-config + * is called. Should synchronize device configuration from host to guest part + * and notify the guest about the change. * @hotpluggable: indicates if #DeviceClass is hotpluggable, available * as readonly "hotpluggable" property of #DeviceState instance * @@ -132,7 +136,13 @@ struct DeviceClass { * ensures a compile-time error if someone attempts to assign * dc->props directly. */ - Property *props_; + const Property *props_; + + /** + * @props_count_: number of elements in @props_; should only be + * assigned by using device_class_set_props(). + */ + uint16_t props_count_; /** * @user_creatable: Can user instantiate with -device / device_add? @@ -152,16 +162,17 @@ struct DeviceClass { /* callbacks */ /** - * @reset: deprecated device reset method pointer + * @legacy_reset: deprecated device reset method pointer * * Modern code should use the ResettableClass interface to * implement a multi-phase reset. * * TODO: remove once every reset callback is unused */ - DeviceReset reset; + DeviceReset legacy_reset; DeviceRealize realize; DeviceUnrealize unrealize; + DeviceSyncConfig sync_config; /** * @vmsd: device state serialisation description for @@ -237,10 +248,6 @@ struct DeviceState { * @pending_deleted_expires_ms: optional timeout for deletion events */ int64_t pending_deleted_expires_ms; - /** - * @opts: QDict of options for the device - */ - QDict *opts; /** * @hotplugged: was device added after PHASE_MACHINE_READY? */ @@ -533,7 +540,8 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, int required_for_version); HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev); HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev); -bool qdev_hotplug_allowed(DeviceState *dev, Error **errp); +bool qdev_hotplug_allowed(DeviceState *dev, BusState *bus, Error **errp); +bool qdev_hotunplug_allowed(DeviceState *dev, Error **errp); /** * qdev_get_hotplug_handler() - Get handler responsible for device wiring @@ -547,6 +555,7 @@ bool qdev_hotplug_allowed(DeviceState *dev, Error **errp); */ HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev); void qdev_unplug(DeviceState *dev, Error **errp); +int qdev_sync_config(DeviceState *dev, Error **errp); void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void qdev_machine_creation_done(void); @@ -929,29 +938,38 @@ char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev); /** * device_class_set_props(): add a set of properties to an device * @dc: the parent DeviceClass all devices inherit - * @props: an array of properties, terminate by DEFINE_PROP_END_OF_LIST() + * @props: an array of properties * * This will add a set of properties to the object. It will fault if * you attempt to add an existing property defined by a parent class. * To modify an inherited property you need to use???? + * + * Validate that @props has at least one Property. + * Validate that @props is an array, not a pointer, via ARRAY_SIZE. + * Validate that the array does not have a legacy terminator at compile-time; + * requires -O2 and the array to be const. */ -void device_class_set_props(DeviceClass *dc, Property *props); +#define device_class_set_props(dc, props) \ + do { \ + QEMU_BUILD_BUG_ON(sizeof(props) == 0); \ + size_t props_count_ = ARRAY_SIZE(props); \ + if ((props)[props_count_ - 1].name == NULL) { \ + qemu_build_not_reached(); \ + } \ + device_class_set_props_n((dc), (props), props_count_); \ + } while (0) /** - * device_class_set_parent_reset() - legacy set device reset handlers - * @dc: device class - * @dev_reset: function pointer to reset handler - * @parent_reset: function pointer to parents reset handler - * - * Modern code should use the ResettableClass interface to - * implement a multi-phase reset instead. + * device_class_set_props_n(): add a set of properties to an device + * @dc: the parent DeviceClass all devices inherit + * @props: an array of properties + * @n: ARRAY_SIZE(@props) * - * TODO: remove the function when DeviceClass's reset method - * is not used anymore. + * This will add a set of properties to the object. It will fault if + * you attempt to add an existing property defined by a parent class. + * To modify an inherited property you need to use???? */ -void device_class_set_parent_reset(DeviceClass *dc, - DeviceReset dev_reset, - DeviceReset *parent_reset); +void device_class_set_props_n(DeviceClass *dc, const Property *props, size_t n); /** * device_class_set_parent_realize() - set up for chaining realize fns @@ -969,6 +987,19 @@ void device_class_set_parent_realize(DeviceClass *dc, DeviceRealize dev_realize, DeviceRealize *parent_realize); +/** + * device_class_set_legacy_reset(): set the DeviceClass::reset method + * @dc: The device class + * @dev_reset: the reset function + * + * This function sets the DeviceClass::reset method. This is widely + * used in existing code, but new code should prefer to use the + * Resettable API as documented in docs/devel/reset.rst. + * In addition, devices which need to chain to their parent class's + * reset methods or which need to be subclassed must use Resettable. + */ +void device_class_set_legacy_reset(DeviceClass *dc, + DeviceReset dev_reset); /** * device_class_set_parent_unrealize() - set up for chaining unrealize fns @@ -993,6 +1024,26 @@ const char *qdev_fw_name(DeviceState *dev); void qdev_assert_realized_properly(void); Object *qdev_get_machine(void); +/** + * qdev_create_fake_machine(): Create a fake machine container. + * + * .. note:: + * This function is a kludge for user emulation (USER_ONLY) + * because when thread (TYPE_CPU) are realized, qdev_realize() + * access a machine container. + */ +void qdev_create_fake_machine(void); + +/** + * machine_get_container: + * @name: The name of container to lookup + * + * Get a container of the machine (QOM path "/machine/NAME"). + * + * Returns: the machine container object. + */ +Object *machine_get_container(const char *name); + /** * qdev_get_human_name() - Return a human-readable name for a device * @dev: The device. Must be a valid and non-NULL pointer. diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h index 438f65389f7..9601a11a090 100644 --- a/include/hw/qdev-properties-system.h +++ b/include/hw/qdev-properties-system.h @@ -3,6 +3,9 @@ #include "hw/qdev-properties.h" +bool qdev_prop_sanitize_s390x_loadparm(uint8_t *loadparm, const char *str, + Error **errp); + extern const PropertyInfo qdev_prop_chr; extern const PropertyInfo qdev_prop_macaddr; extern const PropertyInfo qdev_prop_reserved_region; @@ -27,6 +30,9 @@ extern const PropertyInfo qdev_prop_pcie_link_speed; extern const PropertyInfo qdev_prop_pcie_link_width; extern const PropertyInfo qdev_prop_cpus390entitlement; extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; +extern const PropertyInfo qdev_prop_endian_mode; +extern const PropertyInfo qdev_prop_vmapple_virtio_blk_variant; +extern const PropertyInfo qdev_prop_virtio_gpu_output_list; #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) @@ -88,10 +94,25 @@ extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; #define DEFINE_PROP_CPUS390ENTITLEMENT(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \ - CpuS390Entitlement) + S390CpuEntitlement) #define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \ DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ IOThreadVirtQueueMappingList *) +#define DEFINE_PROP_ENDIAN(_name, _state, _field, _default) \ + DEFINE_PROP_UNSIGNED(_name, _state, _field, _default, \ + qdev_prop_endian_mode, EndianMode) +#define DEFINE_PROP_ENDIAN_NODEFAULT(_name, _state, _field) \ + DEFINE_PROP_ENDIAN(_name, _state, _field, ENDIAN_MODE_UNSPECIFIED) + +#define DEFINE_PROP_VMAPPLE_VIRTIO_BLK_VARIANT(_name, _state, _fld, _default) \ + DEFINE_PROP_UNSIGNED(_name, _state, _fld, _default, \ + qdev_prop_vmapple_virtio_blk_variant, \ + VMAppleVirtioBlkVariant) + +#define DEFINE_PROP_VIRTIO_GPU_OUTPUT_LIST(_name, _state, _field) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_virtio_gpu_output_list, \ + VirtIOGPUOutputList *) + #endif diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 09aa04ca1e2..0197aa4995e 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -16,43 +16,54 @@ struct Property { const char *name; const PropertyInfo *info; ptrdiff_t offset; - uint8_t bitnr; + const char *link_type; uint64_t bitmask; - bool set_default; union { int64_t i; uint64_t u; } defval; - int arrayoffset; const PropertyInfo *arrayinfo; + int arrayoffset; int arrayfieldsize; - const char *link_type; + uint8_t bitnr; + bool set_default; }; struct PropertyInfo { - const char *name; + const char *type; const char *description; const QEnumLookup *enum_table; bool realized_set_allowed; /* allow setting property on realized device */ - int (*print)(Object *obj, Property *prop, char *dest, size_t len); + int (*print)(Object *obj, const Property *prop, char *dest, size_t len); void (*set_default_value)(ObjectProperty *op, const Property *prop); ObjectProperty *(*create)(ObjectClass *oc, const char *name, - Property *prop); + const Property *prop); ObjectPropertyAccessor *get; ObjectPropertyAccessor *set; ObjectPropertyRelease *release; }; +/** + * struct OnOffAutoBit64 - OnOffAuto storage with 64 elements. + * @on_bits: Bitmap of elements with "on". + * @auto_bits: Bitmap of elements with "auto". + */ +typedef struct OnOffAutoBit64 { + uint64_t on_bits; + uint64_t auto_bits; +} OnOffAutoBit64; + /*** qdev-properties.c ***/ extern const PropertyInfo qdev_prop_bit; extern const PropertyInfo qdev_prop_bit64; +extern const PropertyInfo qdev_prop_on_off_auto_bit64; extern const PropertyInfo qdev_prop_bool; -extern const PropertyInfo qdev_prop_enum; extern const PropertyInfo qdev_prop_uint8; extern const PropertyInfo qdev_prop_uint16; extern const PropertyInfo qdev_prop_uint32; +extern const PropertyInfo qdev_prop_usize; extern const PropertyInfo qdev_prop_int32; extern const PropertyInfo qdev_prop_uint64; extern const PropertyInfo qdev_prop_uint64_checkmask; @@ -100,6 +111,13 @@ extern const PropertyInfo qdev_prop_link; .set_default = true, \ .defval.u = (bool)_defval) +#define DEFINE_PROP_ON_OFF_AUTO_BIT64(_name, _state, _field, _bit, _defval) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_on_off_auto_bit64, \ + OnOffAutoBit64, \ + .bitnr = (_bit), \ + .set_default = true, \ + .defval.i = (OnOffAuto)_defval) + #define DEFINE_PROP_BOOL(_name, _state, _field, _defval) \ DEFINE_PROP(_name, _state, _field, qdev_prop_bool, bool, \ .set_default = true, \ @@ -171,9 +189,6 @@ extern const PropertyInfo qdev_prop_link; #define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t) -#define DEFINE_PROP_END_OF_LIST() \ - {} - /* * Set properties between creation and realization. * @@ -204,7 +219,7 @@ void qdev_prop_set_enum(DeviceState *dev, const char *name, int value); /* Takes ownership of @values */ void qdev_prop_set_array(DeviceState *dev, const char *name, QList *values); -void *object_field_prop_ptr(Object *obj, Property *prop); +void *object_field_prop_ptr(Object *obj, const Property *prop); void qdev_prop_register_global(GlobalProperty *prop); const GlobalProperty *qdev_find_global_prop(Object *obj, @@ -223,7 +238,7 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, Object *obj, * On error, store error in @errp. Static properties access data in a struct. * The type of the QOM property is derived from prop->info. */ -void qdev_property_add_static(DeviceState *dev, Property *prop); +void qdev_property_add_static(DeviceState *dev, const Property *prop); /** * qdev_alias_all_properties: Create aliases on source for all target properties diff --git a/include/hw/register.h b/include/hw/register.h index 6a076cfcdf0..a913c52aee5 100644 --- a/include/hw/register.h +++ b/include/hw/register.h @@ -12,7 +12,7 @@ #define REGISTER_H #include "hw/qdev-core.h" -#include "exec/memory.h" +#include "system/memory.h" #include "hw/registerfields.h" #include "qom/object.h" diff --git a/include/hw/remote/iohub.h b/include/hw/remote/iohub.h index 6a8444f9a97..09ee6c77b68 100644 --- a/include/hw/remote/iohub.h +++ b/include/hw/remote/iohub.h @@ -37,6 +37,5 @@ void remote_iohub_set_irq(void *opaque, int pirq, int level); void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg); void remote_iohub_init(RemoteIOHubState *iohub); -void remote_iohub_finalize(RemoteIOHubState *iohub); #endif diff --git a/include/hw/remote/proxy-memory-listener.h b/include/hw/remote/proxy-memory-listener.h index c4f3efb9285..ec516d82676 100644 --- a/include/hw/remote/proxy-memory-listener.h +++ b/include/hw/remote/proxy-memory-listener.h @@ -9,7 +9,7 @@ #ifndef PROXY_MEMORY_LISTENER_H #define PROXY_MEMORY_LISTENER_H -#include "exec/memory.h" +#include "system/memory.h" #include "io/channel.h" typedef struct ProxyMemoryListener { diff --git a/include/hw/resettable.h b/include/hw/resettable.h index 7e249deb8b5..fd862f1e9f1 100644 --- a/include/hw/resettable.h +++ b/include/hw/resettable.h @@ -29,6 +29,7 @@ typedef struct ResettableState ResettableState; * Types of reset. * * + Cold: reset resulting from a power cycle of the object. + * + Wakeup: reset resulting from a wake-up from a suspended state. * * TODO: Support has to be added to handle more types. In particular, * ResettableState structure needs to be expanded. @@ -36,6 +37,9 @@ typedef struct ResettableState ResettableState; typedef enum ResetType { RESET_TYPE_COLD, RESET_TYPE_SNAPSHOT_LOAD, + RESET_TYPE_WAKEUP, + RESET_TYPE_S390_CPU_INITIAL, + RESET_TYPE_S390_CPU_NORMAL, } ResetType; /* @@ -88,14 +92,6 @@ typedef enum ResetType { * @get_state: Mandatory method which must return a pointer to a * ResettableState. * - * @get_transitional_function: transitional method to handle Resettable objects - * not yet fully moved to this interface. It will be removed as soon as it is - * not needed anymore. This method is optional and may return a pointer to a - * function to be used instead of the phases. If the method exists and returns - * a non-NULL function pointer then that function is executed as a replacement - * of the 'hold' phase method taking the object as argument. The two other phase - * methods are not executed. - * * @child_foreach: Executes a given callback on every Resettable child. Child * in this context means a child in the qbus tree, so the children of a qbus * are the devices on it, and the children of a device are all the buses it @@ -107,8 +103,6 @@ typedef void (*ResettableEnterPhase)(Object *obj, ResetType type); typedef void (*ResettableHoldPhase)(Object *obj, ResetType type); typedef void (*ResettableExitPhase)(Object *obj, ResetType type); typedef ResettableState * (*ResettableGetState)(Object *obj); -typedef void (*ResettableTrFunction)(Object *obj); -typedef ResettableTrFunction (*ResettableGetTrFunction)(Object *obj); typedef void (*ResettableChildCallback)(Object *, void *opaque, ResetType type); typedef void (*ResettableChildForeach)(Object *obj, @@ -128,9 +122,6 @@ struct ResettableClass { /* State access method */ ResettableGetState get_state; - /* Transitional method for legacy reset compatibility */ - ResettableGetTrFunction get_transitional_function; - /* Hierarchy handling method */ ResettableChildForeach child_foreach; }; diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h index a2e4ae9cb02..7d59b2e6c63 100644 --- a/include/hw/riscv/boot.h +++ b/include/hw/riscv/boot.h @@ -27,36 +27,50 @@ #define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin" #define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin" +typedef struct RISCVBootInfo { + ssize_t kernel_size; + hwaddr image_low_addr; + hwaddr image_high_addr; + + hwaddr initrd_start; + ssize_t initrd_size; + + bool is_32bit; +} RISCVBootInfo; + bool riscv_is_32bit(RISCVHartArrayState *harts); char *riscv_plic_hart_config_string(int hart_count); -target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, +void riscv_boot_info_init(RISCVBootInfo *info, RISCVHartArrayState *harts); +target_ulong riscv_calc_kernel_start_addr(RISCVBootInfo *info, target_ulong firmware_end_addr); target_ulong riscv_find_and_load_firmware(MachineState *machine, const char *default_machine_firmware, - hwaddr firmware_load_addr, + hwaddr *firmware_load_addr, symbol_fn_t sym_cb); const char *riscv_default_firmware_name(RISCVHartArrayState *harts); char *riscv_find_firmware(const char *firmware_filename, const char *default_machine_firmware); target_ulong riscv_load_firmware(const char *firmware_filename, - hwaddr firmware_load_addr, + hwaddr *firmware_load_addr, symbol_fn_t sym_cb); -target_ulong riscv_load_kernel(MachineState *machine, - RISCVHartArrayState *harts, - target_ulong firmware_end_addr, - bool load_initrd, - symbol_fn_t sym_cb); -uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size, - MachineState *ms); +void riscv_load_kernel(MachineState *machine, + RISCVBootInfo *info, + target_ulong kernel_start_addr, + bool load_initrd, + symbol_fn_t sym_cb); +uint64_t riscv_compute_fdt_addr(hwaddr dram_base, hwaddr dram_size, + MachineState *ms, RISCVBootInfo *info); void riscv_load_fdt(hwaddr fdt_addr, void *fdt); void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts, hwaddr saddr, hwaddr rom_base, hwaddr rom_size, uint64_t kernel_entry, uint64_t fdt_load_addr); -void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base, +void riscv_rom_copy_firmware_info(MachineState *machine, + RISCVHartArrayState *harts, + hwaddr rom_base, hwaddr rom_size, uint32_t reset_vec_size, uint64_t kernel_entry); diff --git a/include/hw/riscv/boot_opensbi.h b/include/hw/riscv/boot_opensbi.h index 1b749663dca..18664a174b5 100644 --- a/include/hw/riscv/boot_opensbi.h +++ b/include/hw/riscv/boot_opensbi.h @@ -58,4 +58,33 @@ struct fw_dynamic_info { target_long boot_hart; }; +/** Representation dynamic info passed by previous booting stage */ +struct fw_dynamic_info32 { + /** Info magic */ + int32_t magic; + /** Info version */ + int32_t version; + /** Next booting stage address */ + int32_t next_addr; + /** Next booting stage mode */ + int32_t next_mode; + /** Options for OpenSBI library */ + int32_t options; + /** + * Preferred boot HART id + * + * It is possible that the previous booting stage uses same link + * address as the FW_DYNAMIC firmware. In this case, the relocation + * lottery mechanism can potentially overwrite the previous booting + * stage while other HARTs are still running in the previous booting + * stage leading to boot-time crash. To avoid this boot-time crash, + * the previous booting stage can specify last HART that will jump + * to the FW_DYNAMIC firmware as the preferred boot HART. + * + * To avoid specifying a preferred boot HART, the previous booting + * stage can set it to -1UL which will force the FW_DYNAMIC firmware + * to use the relocation lottery mechanism. + */ + int32_t boot_hart; +}; #endif diff --git a/include/hw/riscv/iommu.h b/include/hw/riscv/iommu.h new file mode 100644 index 00000000000..8a8acfc3f07 --- /dev/null +++ b/include/hw/riscv/iommu.h @@ -0,0 +1,42 @@ +/* + * QEMU emulation of an RISC-V IOMMU + * + * Copyright (C) 2022-2023 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_RISCV_IOMMU_H +#define HW_RISCV_IOMMU_H + +#include "qemu/osdep.h" +#include "qom/object.h" + +#define TYPE_RISCV_IOMMU "riscv-iommu" +OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUState, RISCV_IOMMU) +typedef struct RISCVIOMMUState RISCVIOMMUState; + +#define TYPE_RISCV_IOMMU_MEMORY_REGION "riscv-iommu-mr" +typedef struct RISCVIOMMUSpace RISCVIOMMUSpace; + +#define TYPE_RISCV_IOMMU_PCI "riscv-iommu-pci" +OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUStatePci, RISCV_IOMMU_PCI) +typedef struct RISCVIOMMUStatePci RISCVIOMMUStatePci; + +#define TYPE_RISCV_IOMMU_SYS "riscv-iommu-device" +OBJECT_DECLARE_SIMPLE_TYPE(RISCVIOMMUStateSys, RISCV_IOMMU_SYS) +typedef struct RISCVIOMMUStateSys RISCVIOMMUStateSys; + +#define FDT_IRQ_TYPE_EDGE_LOW 1 + +#endif diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h index daef086da60..7ca9b976c10 100644 --- a/include/hw/riscv/microchip_pfsoc.h +++ b/include/hw/riscv/microchip_pfsoc.h @@ -67,6 +67,7 @@ typedef struct MicrochipIcicleKitState { MachineState parent_obj; /*< public >*/ + uint32_t clint_timebase_freq; MicrochipPFSoCState soc; } MicrochipIcicleKitState; diff --git a/include/hw/riscv/numa.h b/include/hw/riscv/numa.h index 8f5280211d1..147f01619b9 100644 --- a/include/hw/riscv/numa.h +++ b/include/hw/riscv/numa.h @@ -21,7 +21,7 @@ #include "hw/boards.h" #include "hw/sysbus.h" -#include "sysemu/numa.h" +#include "system/numa.h" /** * riscv_socket_count: diff --git a/include/hw/riscv/riscv_hart.h b/include/hw/riscv/riscv_hart.h index 912b4a2682d..a6ed73a1956 100644 --- a/include/hw/riscv/riscv_hart.h +++ b/include/hw/riscv/riscv_hart.h @@ -38,6 +38,10 @@ struct RISCVHartArrayState { uint32_t hartid_base; char *cpu_type; uint64_t resetvec; + uint32_t num_rnmi_irqvec; + uint64_t *rnmi_irqvec; + uint32_t num_rnmi_excpvec; + uint64_t *rnmi_excpvec; RISCVCPU *harts; }; diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index c0dc41ff9a1..7b4c2c8b7de 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -62,6 +62,8 @@ struct RISCVVirtState { OnOffAuto acpi; const MemMapEntry *memmap; struct GPEXHost *gpex_host; + OnOffAuto iommu_sys; + uint16_t pci_iommu_bdf; }; enum { @@ -84,7 +86,8 @@ enum { VIRT_PCIE_MMIO, VIRT_PCIE_PIO, VIRT_PLATFORM_BUS, - VIRT_PCIE_ECAM + VIRT_PCIE_ECAM, + VIRT_IOMMU_SYS, }; enum { @@ -93,6 +96,7 @@ enum { VIRTIO_IRQ = 1, /* 1 to 8 */ VIRTIO_COUNT = 8, PCIE_IRQ = 0x20, /* 32 to 35 */ + IOMMU_SYS_IRQ = 0x24, /* 36-39 */ VIRT_PLATFORM_BUS_IRQ = 64, /* 64 to 95 */ }; @@ -129,6 +133,7 @@ enum { 1 + FDT_APLIC_INT_CELLS) bool virt_is_acpi_enabled(RISCVVirtState *s); +bool virt_is_iommu_sys_enabled(RISCVVirtState *s); void virt_acpi_setup(RISCVVirtState *vms); uint32_t imsic_num_bits(uint32_t count); diff --git a/include/hw/riscv/xiangshan_kmh.h b/include/hw/riscv/xiangshan_kmh.h new file mode 100644 index 00000000000..c5dc6b1a9a7 --- /dev/null +++ b/include/hw/riscv/xiangshan_kmh.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * QEMU RISC-V Board Compatible with the Xiangshan Kunminghu + * FPGA prototype platform + * + * Copyright (c) 2025 Beijing Institute of Open Source Chip (BOSC) + * + */ + +#ifndef HW_XIANGSHAN_KMH_H +#define HW_XIANGSHAN_KMH_H + +#include "hw/boards.h" +#include "hw/riscv/riscv_hart.h" + +#define XIANGSHAN_KMH_MAX_CPUS 16 + +typedef struct XiangshanKmhSoCState { + /*< private >*/ + DeviceState parent_obj; + + /*< public >*/ + RISCVHartArrayState cpus; + DeviceState *irqchip; + MemoryRegion rom; +} XiangshanKmhSoCState; + +#define TYPE_XIANGSHAN_KMH_SOC "xiangshan.kunminghu.soc" +DECLARE_INSTANCE_CHECKER(XiangshanKmhSoCState, XIANGSHAN_KMH_SOC, + TYPE_XIANGSHAN_KMH_SOC) + +typedef struct XiangshanKmhState { + /*< private >*/ + MachineState parent_obj; + + /*< public >*/ + XiangshanKmhSoCState soc; +} XiangshanKmhState; + +#define TYPE_XIANGSHAN_KMH_MACHINE MACHINE_TYPE_NAME("xiangshan-kunminghu") +DECLARE_INSTANCE_CHECKER(XiangshanKmhState, XIANGSHAN_KMH_MACHINE, + TYPE_XIANGSHAN_KMH_MACHINE) + +enum { + XIANGSHAN_KMH_ROM, + XIANGSHAN_KMH_UART0, + XIANGSHAN_KMH_CLINT, + XIANGSHAN_KMH_APLIC_M, + XIANGSHAN_KMH_APLIC_S, + XIANGSHAN_KMH_IMSIC_M, + XIANGSHAN_KMH_IMSIC_S, + XIANGSHAN_KMH_DRAM, +}; + +enum { + XIANGSHAN_KMH_UART0_IRQ = 10, +}; + +/* Indicating Timebase-freq (1MHZ) */ +#define XIANGSHAN_KMH_CLINT_TIMEBASE_FREQ 1000000 + +#define XIANGSHAN_KMH_IMSIC_NUM_IDS 255 +#define XIANGSHAN_KMH_IMSIC_NUM_GUESTS 7 +#define XIANGSHAN_KMH_IMSIC_GUEST_BITS 3 + +#define XIANGSHAN_KMH_APLIC_NUM_SOURCES 96 + +#endif diff --git a/include/hw/s390x/ap-bridge.h b/include/hw/s390x/ap-bridge.h index 470e439a98e..7efc52928db 100644 --- a/include/hw/s390x/ap-bridge.h +++ b/include/hw/s390x/ap-bridge.h @@ -16,4 +16,43 @@ void s390_init_ap(void); +typedef struct ChscSeiNt0Res { + uint16_t length; + uint16_t code; + uint8_t reserved1; + uint16_t reserved2; + uint8_t nt; +#define PENDING_EVENT_INFO_BITMASK 0x80; + uint8_t flags; + uint8_t reserved3; + uint8_t rs; + uint8_t cc; +} QEMU_PACKED ChscSeiNt0Res; + +#define NT0_RES_RESPONSE_CODE 1 +#define NT0_RES_NT_DEFAULT 0 +#define NT0_RES_RS_AP_CHANGE 5 +#define NT0_RES_CC_AP_CHANGE 3 + +#define EVENT_INFORMATION_NOT_STORED 1 +#define EVENT_INFORMATION_STORED 0 + +/** + * ap_chsc_sei_nt0_get_event - Retrieve the next pending AP config + * change event + * @res: Pointer to a ChscSeiNt0Res struct to be filled with event + * data + * + * This function checks for any pending AP config change events and, + * if present, populates the provided response structure with the + * appropriate SEI NT0 fields. + * + * Return: + * EVENT_INFORMATION_STORED - An event was available and written to @res + * EVENT_INFORMATION_NOT_STORED - No event was available + */ +int ap_chsc_sei_nt0_get_event(void *res); + +bool ap_chsc_sei_nt0_have_event(void); + #endif diff --git a/include/hw/s390x/cpu-topology.h b/include/hw/s390x/cpu-topology.h index c064f427e94..d5e9aa43f8f 100644 --- a/include/hw/s390x/cpu-topology.h +++ b/include/hw/s390x/cpu-topology.h @@ -13,7 +13,7 @@ #include "qemu/queue.h" #include "hw/boards.h" -#include "qapi/qapi-types-machine-target.h" +#include "qapi/qapi-types-machine-s390x.h" #define S390_TOPOLOGY_CPU_IFL 0x03 @@ -37,7 +37,7 @@ typedef struct S390TopologyEntry { typedef struct S390Topology { uint8_t *cores_per_socket; - CpuS390Polarization polarization; + S390CpuPolarization polarization; } S390Topology; typedef QTAILQ_HEAD(, S390TopologyEntry) S390TopologyList; @@ -57,7 +57,7 @@ static inline void s390_topology_setup_cpu(MachineState *ms, static inline void s390_topology_reset(void) { /* Unreachable, CPU topology not implemented for TCG */ - assert(false); + g_assert_not_reached(); } #endif diff --git a/include/hw/s390x/css-bridge.h b/include/hw/s390x/css-bridge.h index deb606d71f1..4f874ed781b 100644 --- a/include/hw/s390x/css-bridge.h +++ b/include/hw/s390x/css-bridge.h @@ -19,7 +19,6 @@ /* virtual css bridge */ struct VirtualCssBridge { SysBusDevice sysbus_dev; - bool css_dev_path; }; #define TYPE_VIRTUAL_CSS_BRIDGE "virtual-css-bridge" diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h index 8289e458370..0b0400a9d4f 100644 --- a/include/hw/s390x/css.h +++ b/include/hw/s390x/css.h @@ -15,7 +15,7 @@ #include "hw/s390x/adapter.h" #include "hw/s390x/s390_flic.h" #include "hw/s390x/ioinst.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "target/s390x/cpu-qom.h" /* Channel subsystem constants. */ @@ -238,7 +238,6 @@ uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc); void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable, uint8_t flags, Error **errp); -#ifndef CONFIG_USER_ONLY SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid); bool css_subch_visible(SubchDev *sch); @@ -262,7 +261,6 @@ int css_enable_mss(void); IOInstEnding css_do_rsch(SubchDev *sch); int css_do_rchp(uint8_t cssid, uint8_t chpid); bool css_present(uint8_t cssid); -#endif extern const PropertyInfo css_devid_ro_propinfo; @@ -333,10 +331,4 @@ static inline int ccw_dstream_read_buf(CcwDataStream *cds, void *buff, int len) #define ccw_dstream_read(cds, v) ccw_dstream_read_buf((cds), &(v), sizeof(v)) #define ccw_dstream_write(cds, v) ccw_dstream_write_buf((cds), &(v), sizeof(v)) -/** - * true if (vmstate based) migration of the channel subsystem - * is enabled, false if it is disabled. - */ -extern bool css_migration_enabled; - #endif diff --git a/include/hw/s390x/event-facility.h b/include/hw/s390x/event-facility.h index ff874e792d3..eac7a511003 100644 --- a/include/hw/s390x/event-facility.h +++ b/include/hw/s390x/event-facility.h @@ -25,6 +25,7 @@ #define SCLP_EVENT_MESSAGE 0x02 #define SCLP_EVENT_CONFIG_MGT_DATA 0x04 #define SCLP_EVENT_PMSGCMD 0x09 +#define SCLP_EVENT_CTRL_PGM_ID 0x0b #define SCLP_EVENT_ASCII_CONSOLE_DATA 0x1a #define SCLP_EVENT_SIGNAL_QUIESCE 0x1d @@ -35,6 +36,7 @@ #define SCLP_EVENT_MASK_MSG SCLP_EVMASK(SCLP_EVENT_MESSAGE) #define SCLP_EVENT_MASK_CONFIG_MGT_DATA SCLP_EVMASK(SCLP_EVENT_CONFIG_MGT_DATA) #define SCLP_EVENT_MASK_PMSGCMD SCLP_EVMASK(SCLP_EVENT_PMSGCMD) +#define SCLP_EVENT_MASK_CTRL_PGM_ID SCLP_EVMASK(SCLP_EVENT_CTRL_PGM_ID) #define SCLP_EVENT_MASK_MSG_ASCII SCLP_EVMASK(SCLP_EVENT_ASCII_CONSOLE_DATA) #define SCLP_EVENT_MASK_SIGNAL_QUIESCE SCLP_EVMASK(SCLP_EVENT_SIGNAL_QUIESCE) @@ -191,6 +193,21 @@ struct SCLPEventClass { bool (*can_handle_event)(uint8_t type); }; +#define TYPE_SCLP_EVENT_CPI "sclpcpi" +typedef struct SCLPEventCPIClass SCLPEventCPIClass; +typedef struct SCLPEventCPI SCLPEventCPI; +OBJECT_DECLARE_TYPE(SCLPEventCPI, SCLPEventCPIClass, + SCLP_EVENT_CPI) + +struct SCLPEventCPI { + SCLPEvent event; + uint8_t system_type[8]; + uint8_t system_name[8]; + uint64_t system_level; + uint8_t sysplex_name[8]; + uint64_t timestamp; +}; + #define TYPE_SCLP_EVENT_FACILITY "s390-sclp-event-facility" typedef struct SCLPEventFacility SCLPEventFacility; typedef struct SCLPEventFacilityClass SCLPEventFacilityClass; diff --git a/include/hw/s390x/ipl/qipl.h b/include/hw/s390x/ipl/qipl.h new file mode 100644 index 00000000000..6824391111c --- /dev/null +++ b/include/hw/s390x/ipl/qipl.h @@ -0,0 +1,127 @@ +/* + * S/390 boot structures + * + * Copyright 2024 IBM Corp. + * Author(s): Jared Rossi + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef S390X_QIPL_H +#define S390X_QIPL_H + +/* Boot Menu flags */ +#define QIPL_FLAG_BM_OPTS_CMD 0x80 +#define QIPL_FLAG_BM_OPTS_ZIPL 0x40 + +#define QIPL_ADDRESS 0xcc +#define LOADPARM_LEN 8 +#define NO_LOADPARM "\0\0\0\0\0\0\0\0" + +/* + * The QEMU IPL Parameters will be stored at absolute address + * 204 (0xcc) which means it is 32-bit word aligned but not + * double-word aligned. Placement of 64-bit data fields in this + * area must account for their alignment needs. + * The total size of the struct must never exceed 28 bytes. + */ +struct QemuIplParameters { + uint8_t qipl_flags; + uint8_t index; + uint8_t reserved1[2]; + uint64_t reserved2; + uint32_t boot_menu_timeout; + uint8_t reserved3[2]; + uint16_t chain_len; + uint64_t next_iplb; +} QEMU_PACKED; +typedef struct QemuIplParameters QemuIplParameters; + +struct IPLBlockPVComp { + uint64_t tweak_pref; + uint64_t addr; + uint64_t size; +} QEMU_PACKED; +typedef struct IPLBlockPVComp IPLBlockPVComp; + +struct IPLBlockPV { + uint8_t reserved18[87]; /* 0x18 */ + uint8_t version; /* 0x6f */ + uint32_t reserved70; /* 0x70 */ + uint32_t num_comp; /* 0x74 */ + uint64_t pv_header_addr; /* 0x78 */ + uint64_t pv_header_len; /* 0x80 */ + struct IPLBlockPVComp components[0]; +} QEMU_PACKED; +typedef struct IPLBlockPV IPLBlockPV; + +struct IplBlockCcw { + uint8_t reserved0[85]; + uint8_t ssid; + uint16_t devno; + uint8_t vm_flags; + uint8_t reserved3[3]; + uint32_t vm_parm_len; + uint8_t nss_name[8]; + uint8_t vm_parm[64]; + uint8_t reserved4[8]; +} QEMU_PACKED; +typedef struct IplBlockCcw IplBlockCcw; + +struct IplBlockFcp { + uint8_t reserved1[305 - 1]; + uint8_t opt; + uint8_t reserved2[3]; + uint16_t reserved3; + uint16_t devno; + uint8_t reserved4[4]; + uint64_t wwpn; + uint64_t lun; + uint32_t bootprog; + uint8_t reserved5[12]; + uint64_t br_lba; + uint32_t scp_data_len; + uint8_t reserved6[260]; + uint8_t scp_data[0]; +} QEMU_PACKED; +typedef struct IplBlockFcp IplBlockFcp; + +struct IplBlockQemuScsi { + uint32_t lun; + uint16_t target; + uint16_t channel; + uint8_t reserved0[77]; + uint8_t ssid; + uint16_t devno; +} QEMU_PACKED; +typedef struct IplBlockQemuScsi IplBlockQemuScsi; + +union IplParameterBlock { + struct { + uint32_t len; + uint8_t reserved0[3]; + uint8_t version; + uint32_t blk0_len; + uint8_t pbt; + uint8_t flags; + uint16_t reserved01; + uint8_t loadparm[LOADPARM_LEN]; + union { + IplBlockCcw ccw; + IplBlockFcp fcp; + IPLBlockPV pv; + IplBlockQemuScsi scsi; + }; + } QEMU_PACKED; + struct { + uint8_t reserved1[110]; + uint16_t devno; + uint8_t reserved2[88]; + uint8_t reserved_ext[4096 - 200]; + } QEMU_PACKED; +} QEMU_PACKED; +typedef union IplParameterBlock IplParameterBlock; + +#endif diff --git a/include/hw/s390x/s390-pci-bus.h b/include/hw/s390x/s390-pci-bus.h index 2c43ea123f0..04944d4fed7 100644 --- a/include/hw/s390x/s390-pci-bus.h +++ b/include/hw/s390x/s390-pci-bus.h @@ -277,6 +277,7 @@ struct S390PCIIOMMU { AddressSpace as; MemoryRegion mr; IOMMUMemoryRegion iommu_mr; + MemoryRegion *dm_mr; bool enabled; uint64_t g_iota; uint64_t pba; @@ -362,6 +363,7 @@ struct S390PCIBusDevice { bool interp; bool forwarding_assist; bool aif; + bool rtr_avail; QTAILQ_ENTRY(S390PCIBusDevice) link; }; @@ -389,6 +391,7 @@ int pci_chsc_sei_nt2_have_event(void); void s390_pci_sclp_configure(SCCB *sccb); void s390_pci_sclp_deconfigure(SCCB *sccb); void s390_pci_iommu_enable(S390PCIIOMMU *iommu); +void s390_pci_iommu_direct_map_enable(S390PCIIOMMU *iommu); void s390_pci_iommu_disable(S390PCIIOMMU *iommu); void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, uint64_t faddr, uint32_t e); diff --git a/include/hw/s390x/s390-pci-clp.h b/include/hw/s390x/s390-pci-clp.h index 03b7f9ba5f7..6a635d693ba 100644 --- a/include/hw/s390x/s390-pci-clp.h +++ b/include/hw/s390x/s390-pci-clp.h @@ -158,6 +158,7 @@ typedef struct ClpRspQueryPciGrp { #define CLP_RSP_QPCIG_MASK_NOI 0xfff uint16_t i; uint8_t version; +#define CLP_RSP_QPCIG_MASK_RTR 0x20 #define CLP_RSP_QPCIG_MASK_FRAME 0x2 #define CLP_RSP_QPCIG_MASK_REFRESH 0x1 uint8_t fr; diff --git a/include/hw/s390x/s390-pci-inst.h b/include/hw/s390x/s390-pci-inst.h index a55c448aad2..5cb8da540be 100644 --- a/include/hw/s390x/s390-pci-inst.h +++ b/include/hw/s390x/s390-pci-inst.h @@ -15,7 +15,7 @@ #define HW_S390_PCI_INST_H #include "s390-pci-bus.h" -#include "sysemu/dma.h" +#include "system/dma.h" /* Load/Store status codes */ #define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 diff --git a/include/hw/s390x/s390-virtio-ccw.h b/include/hw/s390x/s390-virtio-ccw.h index 996864a34e2..526078a4e2b 100644 --- a/include/hw/s390x/s390-virtio-ccw.h +++ b/include/hw/s390x/s390-virtio-ccw.h @@ -29,10 +29,19 @@ struct S390CcwMachineState { bool dea_key_wrap; bool pv; uint8_t loadparm[8]; + uint64_t memory_limit; + uint64_t max_pagesize; SCLPDevice *sclp; }; +static inline uint64_t s390_get_memory_limit(S390CcwMachineState *s390ms) +{ + /* We expect to be called only after the limit was set. */ + assert(s390ms->memory_limit); + return s390ms->memory_limit; +} + #define S390_PTF_REASON_NONE (0x00 << 8) #define S390_PTF_REASON_DONE (0x01 << 8) #define S390_PTF_REASON_BUSY (0x02 << 8) @@ -44,17 +53,8 @@ struct S390CcwMachineClass { MachineClass parent_class; /*< public >*/ - bool ri_allowed; - bool cpu_model_allowed; - bool hpage_1m_allowed; int max_threads; + bool use_cpi; }; -/* runtime-instrumentation allowed by the machine */ -bool ri_allowed(void); -/* cpu model allowed by the machine */ -bool cpu_model_allowed(void); -/* 1M huge page mappings allowed by the machine */ -bool hpage_1m_allowed(void); - #endif diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h index 4d66c5e42ef..91edaaca409 100644 --- a/include/hw/s390x/s390_flic.h +++ b/include/hw/s390x/s390_flic.h @@ -41,10 +41,7 @@ OBJECT_DECLARE_TYPE(S390FLICState, S390FLICStateClass, struct S390FLICState { SysBusDevice parent_obj; - /* to limit AdapterRoutes.num_routes for compat */ - uint32_t adapter_routes_max_batch; bool ais_supported; - bool migration_enabled; }; diff --git a/include/hw/s390x/storage-attributes.h b/include/hw/s390x/storage-attributes.h index 8921a04d514..b5c6d8fa552 100644 --- a/include/hw/s390x/storage-attributes.h +++ b/include/hw/s390x/storage-attributes.h @@ -25,7 +25,6 @@ OBJECT_DECLARE_TYPE(S390StAttribState, S390StAttribClass, S390_STATTRIB) struct S390StAttribState { DeviceState parent_obj; uint64_t migration_cur_gfn; - bool migration_enabled; }; diff --git a/include/hw/s390x/storage-keys.h b/include/hw/s390x/storage-keys.h index 976ffb20396..ac303001f57 100644 --- a/include/hw/s390x/storage-keys.h +++ b/include/hw/s390x/storage-keys.h @@ -21,8 +21,6 @@ OBJECT_DECLARE_TYPE(S390SKeysState, S390SKeysClass, S390_SKEYS) struct S390SKeysState { DeviceState parent_obj; - bool migration_enabled; - }; @@ -124,7 +122,23 @@ int s390_skeys_set(S390SKeysState *ks, uint64_t start_gfn, S390SKeysState *s390_get_skeys_device(void); +void s390_qmp_dump_skeys(const char *filename, Error **errp); void hmp_dump_skeys(Monitor *mon, const QDict *qdict); void hmp_info_skeys(Monitor *mon, const QDict *qdict); +#define TYPE_DUMP_SKEYS_INTERFACE "dump-skeys-interface" + +typedef struct DumpSKeysInterface DumpSKeysInterface; +DECLARE_CLASS_CHECKERS(DumpSKeysInterface, DUMP_SKEYS_INTERFACE, + TYPE_DUMP_SKEYS_INTERFACE) + +struct DumpSKeysInterface { + InterfaceClass parent_class; + + /** + * @qmp_dump_skeys: Callback to dump guest's storage keys to @filename. + */ + void (*qmp_dump_skeys)(const char *filename, Error **errp); +}; + #endif /* S390_STORAGE_KEYS_H */ diff --git a/include/hw/s390x/vfio-ccw.h b/include/hw/s390x/vfio-ccw.h index 4209d27657c..1e0922dca11 100644 --- a/include/hw/s390x/vfio-ccw.h +++ b/include/hw/s390x/vfio-ccw.h @@ -14,7 +14,7 @@ #ifndef HW_VFIO_CCW_H #define HW_VFIO_CCW_H -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-device.h" #include "hw/s390x/s390-ccw.h" #include "hw/s390x/ccw-device.h" #include "qom/object.h" diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h index 533d856aa34..3526bad7464 100644 --- a/include/hw/scsi/esp.h +++ b/include/hw/scsi/esp.h @@ -14,7 +14,11 @@ typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len); #define ESP_FIFO_SZ 16 #define ESP_CMDFIFO_SZ 32 -typedef struct ESPState ESPState; +enum ESPASCMode { + ESP_ASC_MODE_DIS = 0, /* Disconnected */ + ESP_ASC_MODE_INI = 1, /* Initiator */ + ESP_ASC_MODE_TGT = 2 /* Target */ +}; #define TYPE_ESP "esp" OBJECT_DECLARE_SIMPLE_TYPE(ESPState, ESP) @@ -40,6 +44,7 @@ struct ESPState { uint8_t cmdfifo_cdb_offset; uint8_t lun; uint32_t do_cmd; + uint8_t asc_mode; bool data_ready; int dma_enabled; @@ -106,6 +111,13 @@ struct SysBusESPState { #define CMD_DMA 0x80 #define CMD_CMD 0x7f +#define CMD_GRP_MASK 0x70 + +#define CMD_GRP_MISC 0x00 +#define CMD_GRP_INIT 0x01 +#define CMD_GRP_TRGT 0x02 +#define CMD_GRP_DISC 0x04 + #define CMD_NOP 0x00 #define CMD_FLUSH 0x01 #define CMD_RESET 0x02 @@ -140,6 +152,7 @@ struct SysBusESPState { #define INTR_FC 0x08 #define INTR_BS 0x10 #define INTR_DC 0x20 +#define INTR_IL 0x40 #define INTR_RST 0x80 #define SEQ_0 0x0 diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index c3d5e17e385..90ee192b4d4 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -24,6 +24,7 @@ struct SCSIRequest { SCSIBus *bus; SCSIDevice *dev; const SCSIReqOps *ops; + AioContext *ctx; uint32_t refcount; uint32_t tag; uint32_t lun; @@ -48,6 +49,8 @@ struct SCSIRequest { bool dma_started; BlockAIOCB *aiocb; QEMUSGList *sg; + + /* Protected by SCSIDevice->requests_lock */ QTAILQ_ENTRY(SCSIRequest) next; }; @@ -76,10 +79,7 @@ struct SCSIDevice uint8_t sense[SCSI_SENSE_BUF_SIZE]; uint32_t sense_len; - /* - * The requests list is only accessed from the AioContext that executes - * requests or from the main loop when IOThread processing is stopped. - */ + QemuMutex requests_lock; /* protects the requests list */ QTAILQ_HEAD(, SCSIRequest) requests; uint32_t channel; diff --git a/include/hw/sd/aspeed_sdhci.h b/include/hw/sd/aspeed_sdhci.h index 057bc5f3d13..4ef17704711 100644 --- a/include/hw/sd/aspeed_sdhci.h +++ b/include/hw/sd/aspeed_sdhci.h @@ -13,9 +13,12 @@ #include "qom/object.h" #define TYPE_ASPEED_SDHCI "aspeed.sdhci" -OBJECT_DECLARE_SIMPLE_TYPE(AspeedSDHCIState, ASPEED_SDHCI) +#define TYPE_ASPEED_2400_SDHCI TYPE_ASPEED_SDHCI "-ast2400" +#define TYPE_ASPEED_2500_SDHCI TYPE_ASPEED_SDHCI "-ast2500" +#define TYPE_ASPEED_2600_SDHCI TYPE_ASPEED_SDHCI "-ast2600" +#define TYPE_ASPEED_2700_SDHCI TYPE_ASPEED_SDHCI "-ast2700" +OBJECT_DECLARE_TYPE(AspeedSDHCIState, AspeedSDHCIClass, ASPEED_SDHCI) -#define ASPEED_SDHCI_CAPABILITIES 0x01E80080 #define ASPEED_SDHCI_NUM_SLOTS 2 #define ASPEED_SDHCI_NUM_REGS (ASPEED_SDHCI_REG_SIZE / sizeof(uint32_t)) #define ASPEED_SDHCI_REG_SIZE 0x100 @@ -32,4 +35,10 @@ struct AspeedSDHCIState { uint32_t regs[ASPEED_SDHCI_NUM_REGS]; }; +struct AspeedSDHCIClass { + SysBusDeviceClass parent_class; + + uint64_t capareg; +}; + #endif /* ASPEED_SDHCI_H */ diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h index d35a839f5ef..55d363f58fb 100644 --- a/include/hw/sd/sd.h +++ b/include/hw/sd/sd.h @@ -75,14 +75,6 @@ typedef enum { UHS_III = 3, /* currently not supported */ } sd_uhs_mode_t; -typedef enum { - sd_spi, - sd_bc, /* broadcast -- no response */ - sd_bcr, /* broadcast with response */ - sd_ac, /* addressed -- no data transfer */ - sd_adtc, /* addressed with data transfer */ -} sd_cmd_type_t; - typedef struct { uint8_t cmd; uint32_t arg; @@ -104,7 +96,17 @@ struct SDCardClass { DeviceClass parent_class; /*< public >*/ - int (*do_command)(SDState *sd, SDRequest *req, uint8_t *response); + /** + * Process a SD command request. + * @sd: card + * @req: command request + * @resp: buffer to receive the command response + * @respsz: size of @resp buffer + * + * Return: size of the response + */ + size_t (*do_command)(SDState *sd, SDRequest *req, + uint8_t *resp, size_t respsz); /** * Write a byte to a SD card. * @sd: card @@ -127,7 +129,6 @@ struct SDCardClass { void (*set_voltage)(SDState *sd, uint16_t millivolts); uint8_t (*get_dat_lines)(SDState *sd); bool (*get_cmd_line)(SDState *sd); - void (*enable)(SDState *sd, bool enable); bool (*get_inserted)(SDState *sd); bool (*get_readonly)(SDState *sd); void (*set_cid)(SDState *sd); @@ -162,7 +163,16 @@ struct SDBusClass { void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts); uint8_t sdbus_get_dat_lines(SDBus *sdbus); bool sdbus_get_cmd_line(SDBus *sdbus); -int sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *response); +/** + * sdbus_do_command: Process a SD command request + * @sd: card + * @req: command request + * @resp: buffer to receive the command response + * @respsz: size of @resp buffer + * + * Return: size of the response + */ +size_t sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *resp, size_t respsz); /** * Write a byte to a SD bus. * @sd: bus diff --git a/include/hw/sd/sdcard_legacy.h b/include/hw/sd/sdcard_legacy.h deleted file mode 100644 index 0dc38895551..00000000000 --- a/include/hw/sd/sdcard_legacy.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * SD Memory Card emulation (deprecated legacy API) - * - * Copyright (c) 2006 Andrzej Zaborowski - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef HW_SDCARD_LEGACY_H -#define HW_SDCARD_LEGACY_H - -#include "hw/sd/sd.h" - -/* Legacy functions to be used only by non-qdevified callers */ -SDState *sd_init(BlockBackend *blk, bool is_spi); -int sd_do_command(SDState *card, SDRequest *request, uint8_t *response); -void sd_write_byte(SDState *card, uint8_t value); -uint8_t sd_read_byte(SDState *card); -void sd_set_cb(SDState *card, qemu_irq readonly, qemu_irq insert); - -/* sd_enable should not be used -- it is only used on the nseries boards, - * where it is part of a broken implementation of the MMC card slot switch - * (there should be two card slots which are multiplexed to a single MMC - * controller, but instead we model it with one card and controller and - * disable the card when the second slot is selected, so it looks like the - * second slot is always empty). - */ -void sd_enable(SDState *card, bool enable); - -#endif /* HW_SDCARD_LEGACY_H */ diff --git a/include/hw/sd/sdhci.h b/include/hw/sd/sdhci.h index 6cd2822f1d1..51fb30ea528 100644 --- a/include/hw/sd/sdhci.h +++ b/include/hw/sd/sdhci.h @@ -100,11 +100,16 @@ struct SDHCIState { uint8_t sd_spec_version; uint8_t uhs_mode; uint8_t vendor; /* For vendor specific functionality */ + /* + * Write Protect pin default active low for detecting SD card + * to be protected. Set wp_inverted to invert the signal. + */ + bool wp_inverted; }; typedef struct SDHCIState SDHCIState; #define SDHCI_VENDOR_NONE 0 -#define SDHCI_VENDOR_IMX 1 +#define SDHCI_VENDOR_FSL 2 /* * Controller does not provide transfer-complete interrupt when not diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h index ec716cdd458..c82feef8d0a 100644 --- a/include/hw/sh4/sh.h +++ b/include/hw/sh4/sh.h @@ -38,29 +38,10 @@ struct SH7750State; struct SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem); -typedef struct { - /* The callback will be triggered if any of the designated lines change */ - uint16_t portamask_trigger; - uint16_t portbmask_trigger; - /* Return 0 if no action was taken */ - int (*port_change_cb) (uint16_t porta, uint16_t portb, - uint16_t *periph_pdtra, - uint16_t *periph_portdira, - uint16_t *periph_pdtrb, - uint16_t *periph_portdirb); -} sh7750_io_device; - -int sh7750_register_io_device(struct SH7750State *s, - sh7750_io_device *device); - -/* sh_serial.c */ #define TYPE_SH_SERIAL "sh-serial" #define SH_SERIAL_FEAT_SCIF (1 << 0) /* sh7750.c */ qemu_irq sh7750_irl(struct SH7750State *s); -/* tc58128.c */ -int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2); - #endif diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h index f62d5c5e136..94f183121eb 100644 --- a/include/hw/sh4/sh_intc.h +++ b/include/hw/sh4/sh_intc.h @@ -1,7 +1,7 @@ #ifndef SH_INTC_H #define SH_INTC_H -#include "exec/memory.h" +#include "system/memory.h" typedef unsigned char intc_enum; diff --git a/include/hw/southbridge/ich9.h b/include/hw/southbridge/ich9.h index fd01649d04b..1e231e89c92 100644 --- a/include/hw/southbridge/ich9.h +++ b/include/hw/southbridge/ich9.h @@ -7,7 +7,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_device.h" #include "hw/rtc/mc146818rtc.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/notify.h" #include "qom/object.h" @@ -196,8 +196,12 @@ struct ICH9LPCState { #define ICH9_PMIO_GPE0_LEN 16 #define ICH9_PMIO_SMI_EN 0x30 #define ICH9_PMIO_SMI_EN_APMC_EN (1 << 5) +#define ICH9_PMIO_SMI_EN_SWSMI_EN (1 << 6) #define ICH9_PMIO_SMI_EN_TCO_EN (1 << 13) +#define ICH9_PMIO_SMI_EN_PERIODIC_EN (1 << 14) #define ICH9_PMIO_SMI_STS 0x34 +#define ICH9_PMIO_SMI_STS_SWSMI_STS (1 << 6) +#define ICH9_PMIO_SMI_STS_PERIODIC_STS (1 << 14) #define ICH9_PMIO_TCO_RLD 0x60 #define ICH9_PMIO_TCO_LEN 32 diff --git a/include/hw/ssi/allwinner-a10-spi.h b/include/hw/ssi/allwinner-a10-spi.h new file mode 100644 index 00000000000..da46e29a278 --- /dev/null +++ b/include/hw/ssi/allwinner-a10-spi.h @@ -0,0 +1,57 @@ +/* + * Allwinner SPI Bus Serial Interface registers definition + * + * Copyright (C) 2024 Strahinja Jankovic. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ALLWINNER_A10_SPI_H +#define ALLWINNER_A10_SPI_H + +#include "hw/ssi/ssi.h" +#include "hw/sysbus.h" +#include "qemu/fifo8.h" +#include "qom/object.h" + +/** Size of register I/O address space used by SPI device */ +#define AW_A10_SPI_IOSIZE (0x1000) + +/** Total number of known registers */ +#define AW_A10_SPI_REGS_NUM (AW_A10_SPI_IOSIZE / sizeof(uint32_t)) +#define AW_A10_SPI_FIFO_SIZE (64) +#define AW_A10_SPI_CS_LINES_NR (4) + +#define TYPE_AW_A10_SPI "allwinner.spi" +OBJECT_DECLARE_SIMPLE_TYPE(AWA10SPIState, AW_A10_SPI) + +struct AWA10SPIState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion iomem; + SSIBus *bus; + qemu_irq irq; + qemu_irq cs_lines[AW_A10_SPI_CS_LINES_NR]; + + uint32_t regs[AW_A10_SPI_REGS_NUM]; + + Fifo8 rx_fifo; + Fifo8 tx_fifo; +}; + +#endif /* ALLWINNER_A10_SPI_H */ diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index 234dca32b01..25b95e74060 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -82,6 +82,7 @@ struct AspeedSMCState { uint8_t snoop_index; uint8_t snoop_dummies; + bool unselect; }; typedef struct AspeedSegments { diff --git a/include/hw/ssi/npcm7xx_fiu.h b/include/hw/ssi/npcm7xx_fiu.h index a3a17042896..7ebd422ca6c 100644 --- a/include/hw/ssi/npcm7xx_fiu.h +++ b/include/hw/ssi/npcm7xx_fiu.h @@ -60,6 +60,7 @@ struct NPCM7xxFIUState { int32_t cs_count; int32_t active_cs; qemu_irq *cs_lines; + uint64_t flash_size; NPCM7xxFIUFlash *flash; SSIBus *spi; diff --git a/include/hw/ssi/pnv_spi.h b/include/hw/ssi/pnv_spi.h index 8815f67d453..c591a0663da 100644 --- a/include/hw/ssi/pnv_spi.h +++ b/include/hw/ssi/pnv_spi.h @@ -23,6 +23,7 @@ #include "hw/ssi/ssi.h" #include "hw/sysbus.h" +#include "qemu/fifo8.h" #define TYPE_PNV_SPI "pnv-spi" OBJECT_DECLARE_SIMPLE_TYPE(PnvSpi, PNV_SPI) @@ -30,15 +31,19 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvSpi, PNV_SPI) #define PNV_SPI_REG_SIZE 8 #define PNV_SPI_REGS 7 -#define TYPE_PNV_SPI_BUS "pnv-spi-bus" +#define TYPE_PNV_SPI_BUS "spi" typedef struct PnvSpi { SysBusDevice parent_obj; SSIBus *ssi_bus; qemu_irq *cs_line; MemoryRegion xscom_spic_regs; + Fifo8 tx_fifo; + Fifo8 rx_fifo; + uint8_t fail_count; /* RDR Match failure counter */ /* SPI object number */ uint32_t spic_num; + uint32_t chip_id; uint8_t transfer_len; uint8_t responder_select; /* To verify if shift_n1 happens prior to shift_n2 */ diff --git a/include/hw/sysbus.h b/include/hw/sysbus.h index 3cb29a480eb..18fde8a7b48 100644 --- a/include/hw/sysbus.h +++ b/include/hw/sysbus.h @@ -4,7 +4,7 @@ /* Devices attached directly to the main system bus. */ #include "hw/qdev-core.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" #define QDEV_MAX_MMIO 32 @@ -19,6 +19,8 @@ DECLARE_INSTANCE_CHECKER(BusState, SYSTEM_BUS, OBJECT_DECLARE_TYPE(SysBusDevice, SysBusDeviceClass, SYS_BUS_DEVICE) +#define TYPE_DYNAMIC_SYS_BUS_DEVICE "dynamic-sysbus-device" + /** * SysBusDeviceClass: * @@ -80,9 +82,9 @@ void sysbus_connect_irq(SysBusDevice *dev, int n, qemu_irq irq); bool sysbus_is_irq_connected(SysBusDevice *dev, int n); qemu_irq sysbus_get_connected_irq(SysBusDevice *dev, int n); void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr); +int sysbus_mmio_map_name(SysBusDevice *dev, const char*name, hwaddr addr); void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr, int priority); -void sysbus_mmio_unmap(SysBusDevice *dev, int n); bool sysbus_realize(SysBusDevice *dev, Error **errp); bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp); diff --git a/include/hw/timer/aspeed_timer.h b/include/hw/timer/aspeed_timer.h index 07dc6b6f2cb..a850625a055 100644 --- a/include/hw/timer/aspeed_timer.h +++ b/include/hw/timer/aspeed_timer.h @@ -16,8 +16,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * with this program; if not, see . */ #ifndef ASPEED_TIMER_H #define ASPEED_TIMER_H @@ -32,6 +31,7 @@ OBJECT_DECLARE_TYPE(AspeedTimerCtrlState, AspeedTimerClass, ASPEED_TIMER) #define TYPE_ASPEED_2500_TIMER TYPE_ASPEED_TIMER "-ast2500" #define TYPE_ASPEED_2600_TIMER TYPE_ASPEED_TIMER "-ast2600" #define TYPE_ASPEED_1030_TIMER TYPE_ASPEED_TIMER "-ast1030" +#define TYPE_ASPEED_2700_TIMER TYPE_ASPEED_TIMER "-ast2700" #define ASPEED_TIMER_NR_TIMERS 8 diff --git a/include/hw/timer/hpet.h b/include/hw/timer/hpet.h index d17a8d43199..c2656f7f0be 100644 --- a/include/hw/timer/hpet.h +++ b/include/hw/timer/hpet.h @@ -58,7 +58,6 @@ #define HPET_TN_CFG_WRITE_MASK 0x7f4e #define HPET_TN_INT_ROUTE_SHIFT 9 #define HPET_TN_INT_ROUTE_CAP_SHIFT 32 -#define HPET_TN_CFG_BITS_READONLY_OR_RESERVED 0xffff80b1U struct hpet_fw_entry { @@ -74,7 +73,7 @@ struct hpet_fw_config struct hpet_fw_entry hpet[8]; } QEMU_PACKED; -extern struct hpet_fw_config hpet_cfg; +extern struct hpet_fw_config hpet_fw_cfg; #define TYPE_HPET "hpet" diff --git a/include/hw/timer/imx_gpt.h b/include/hw/timer/imx_gpt.h index 5a1230da35e..5488f7e4df5 100644 --- a/include/hw/timer/imx_gpt.h +++ b/include/hw/timer/imx_gpt.h @@ -80,6 +80,7 @@ #define TYPE_IMX6_GPT "imx6.gpt" #define TYPE_IMX6UL_GPT "imx6ul.gpt" #define TYPE_IMX7_GPT "imx7.gpt" +#define TYPE_IMX8MP_GPT "imx8mp.gpt" #define TYPE_IMX_GPT TYPE_IMX25_GPT diff --git a/include/hw/timer/npcm7xx_timer.h b/include/hw/timer/npcm7xx_timer.h index d45c051b56a..e287375dce6 100644 --- a/include/hw/timer/npcm7xx_timer.h +++ b/include/hw/timer/npcm7xx_timer.h @@ -16,7 +16,7 @@ #ifndef NPCM7XX_TIMER_H #define NPCM7XX_TIMER_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/sysbus.h" #include "qemu/timer.h" diff --git a/include/hw/tricore/triboard.h b/include/hw/tricore/triboard.h index 4fdd2d7d97b..ca49a0c7520 100644 --- a/include/hw/tricore/triboard.h +++ b/include/hw/tricore/triboard.h @@ -20,8 +20,8 @@ #include "qapi/error.h" #include "hw/boards.h" -#include "sysemu/sysemu.h" -#include "exec/address-spaces.h" +#include "system/system.h" +#include "system/address-spaces.h" #include "qom/object.h" #include "hw/tricore/tc27x_soc.h" diff --git a/include/hw/tricore/tricore.h b/include/hw/tricore/tricore.h index c19ed3f013e..4ffc0fe1d65 100644 --- a/include/hw/tricore/tricore.h +++ b/include/hw/tricore/tricore.h @@ -1,7 +1,7 @@ #ifndef HW_TRICORE_H #define HW_TRICORE_H -#include "exec/memory.h" +#include "system/memory.h" struct tricore_boot_info { uint64_t ram_size; diff --git a/include/hw/uefi/hardware-info.h b/include/hw/uefi/hardware-info.h new file mode 100644 index 00000000000..94c38cff200 --- /dev/null +++ b/include/hw/uefi/hardware-info.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * pass hardware information to uefi + * + * see OvmfPkg/Library/HardwareInfoLib/ in edk2 + */ +#ifndef QEMU_UEFI_HARDWARE_INFO_H +#define QEMU_UEFI_HARDWARE_INFO_H + +/* data structures */ + +typedef enum { + HardwareInfoTypeUndefined = 0, + HardwareInfoTypeHostBridge = 1, + HardwareInfoQemuUefiVars = 2, +} HARDWARE_INFO_TYPE; + +typedef struct { + union { + uint64_t uint64; + HARDWARE_INFO_TYPE value; + } type; + uint64_t size; +} HARDWARE_INFO_HEADER; + +typedef struct { + uint64_t mmio_address; +} HARDWARE_INFO_SIMPLE_DEVICE; + +/* qemu functions */ + +void hardware_info_register(HARDWARE_INFO_TYPE type, void *info, uint64_t size); + +#endif /* QEMU_UEFI_HARDWARE_INFO_H */ diff --git a/include/hw/uefi/var-service-api.h b/include/hw/uefi/var-service-api.h new file mode 100644 index 00000000000..0d71638f3ef --- /dev/null +++ b/include/hw/uefi/var-service-api.h @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi-vars device - API of the virtual device for guest/host communication. + */ +#ifndef QEMU_UEFI_VAR_SERVICE_API_H +#define QEMU_UEFI_VAR_SERVICE_API_H + +/* qom: device names */ +#define TYPE_UEFI_VARS_X64 "uefi-vars-x64" +#define TYPE_UEFI_VARS_SYSBUS "uefi-vars-sysbus" + +/* sysbus: fdt node path */ +#define UEFI_VARS_FDT_NODE "qemu-uefi-vars" +#define UEFI_VARS_FDT_COMPAT "qemu,uefi-vars" + +/* registers */ +#define UEFI_VARS_REG_MAGIC 0x00 /* 16 bit */ +#define UEFI_VARS_REG_CMD_STS 0x02 /* 16 bit */ +#define UEFI_VARS_REG_BUFFER_SIZE 0x04 /* 32 bit */ +#define UEFI_VARS_REG_DMA_BUFFER_ADDR_LO 0x08 /* 32 bit */ +#define UEFI_VARS_REG_DMA_BUFFER_ADDR_HI 0x0c /* 32 bit */ +#define UEFI_VARS_REG_PIO_BUFFER_TRANSFER 0x10 /* 8-64 bit */ +#define UEFI_VARS_REG_PIO_BUFFER_CRC32C 0x18 /* 32 bit (read-only) */ +#define UEFI_VARS_REG_FLAGS 0x1c /* 32 bit */ +#define UEFI_VARS_REGS_SIZE 0x20 + +/* flags register */ +#define UEFI_VARS_FLAG_USE_PIO (1 << 0) + +/* magic value */ +#define UEFI_VARS_MAGIC_VALUE 0xef1 + +/* command values */ +#define UEFI_VARS_CMD_RESET 0x01 +#define UEFI_VARS_CMD_DMA_MM 0x02 +#define UEFI_VARS_CMD_PIO_MM 0x03 +#define UEFI_VARS_CMD_PIO_ZERO_OFFSET 0x04 + +/* status values */ +#define UEFI_VARS_STS_SUCCESS 0x00 +#define UEFI_VARS_STS_BUSY 0x01 +#define UEFI_VARS_STS_ERR_UNKNOWN 0x10 +#define UEFI_VARS_STS_ERR_NOT_SUPPORTED 0x11 +#define UEFI_VARS_STS_ERR_BAD_BUFFER_SIZE 0x12 + + +#endif /* QEMU_UEFI_VAR_SERVICE_API_H */ diff --git a/include/hw/uefi/var-service-edk2.h b/include/hw/uefi/var-service-edk2.h new file mode 100644 index 00000000000..c743a8df948 --- /dev/null +++ b/include/hw/uefi/var-service-edk2.h @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi-vars device - structs and defines from edk2 + * + * Note: The edk2 UINTN type has been mapped to uint64_t, + * so the structs are compatible with 64bit edk2 builds. + */ +#ifndef QEMU_UEFI_VAR_SERVICE_EDK2_H +#define QEMU_UEFI_VAR_SERVICE_EDK2_H + +#include "qemu/uuid.h" + +#define MAX_BIT 0x8000000000000000ULL +#define ENCODE_ERROR(StatusCode) (MAX_BIT | (StatusCode)) +#define EFI_SUCCESS 0 +#define EFI_INVALID_PARAMETER ENCODE_ERROR(2) +#define EFI_UNSUPPORTED ENCODE_ERROR(3) +#define EFI_BAD_BUFFER_SIZE ENCODE_ERROR(4) +#define EFI_BUFFER_TOO_SMALL ENCODE_ERROR(5) +#define EFI_WRITE_PROTECTED ENCODE_ERROR(8) +#define EFI_OUT_OF_RESOURCES ENCODE_ERROR(9) +#define EFI_NOT_FOUND ENCODE_ERROR(14) +#define EFI_ACCESS_DENIED ENCODE_ERROR(15) +#define EFI_ALREADY_STARTED ENCODE_ERROR(20) +#define EFI_SECURITY_VIOLATION ENCODE_ERROR(26) + +#define EFI_VARIABLE_NON_VOLATILE 0x01 +#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x02 +#define EFI_VARIABLE_RUNTIME_ACCESS 0x04 +#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x08 +#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x10 /* deprecated */ +#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x20 +#define EFI_VARIABLE_APPEND_WRITE 0x40 + +/* SecureBootEnable */ +#define SECURE_BOOT_ENABLE 1 +#define SECURE_BOOT_DISABLE 0 + +/* SecureBoot */ +#define SECURE_BOOT_MODE_ENABLE 1 +#define SECURE_BOOT_MODE_DISABLE 0 + +/* CustomMode */ +#define CUSTOM_SECURE_BOOT_MODE 1 +#define STANDARD_SECURE_BOOT_MODE 0 + +/* SetupMode */ +#define SETUP_MODE 1 +#define USER_MODE 0 + +typedef uint64_t efi_status; +typedef struct mm_header mm_header; + +/* EFI_MM_COMMUNICATE_HEADER */ +struct mm_header { + QemuUUID guid; + uint64_t length; +}; + +/* --- EfiSmmVariableProtocol ---------------------------------------- */ + +#define SMM_VARIABLE_FUNCTION_GET_VARIABLE 1 +#define SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME 2 +#define SMM_VARIABLE_FUNCTION_SET_VARIABLE 3 +#define SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO 4 +#define SMM_VARIABLE_FUNCTION_READY_TO_BOOT 5 +#define SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE 6 +#define SMM_VARIABLE_FUNCTION_LOCK_VARIABLE 8 +#define SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE 11 + +typedef struct mm_variable mm_variable; +typedef struct mm_variable_access mm_variable_access; +typedef struct mm_next_variable mm_next_variable; +typedef struct mm_next_variable mm_lock_variable; +typedef struct mm_variable_info mm_variable_info; +typedef struct mm_get_payload_size mm_get_payload_size; + +/* SMM_VARIABLE_COMMUNICATE_HEADER */ +struct mm_variable { + uint64_t function; + uint64_t status; +}; + +/* SMM_VARIABLE_COMMUNICATE_ACCESS_VARIABLE */ +struct QEMU_PACKED mm_variable_access { + QemuUUID guid; + uint64_t data_size; + uint64_t name_size; + uint32_t attributes; + /* Name */ + /* Data */ +}; + +/* SMM_VARIABLE_COMMUNICATE_GET_NEXT_VARIABLE_NAME */ +struct mm_next_variable { + QemuUUID guid; + uint64_t name_size; + /* Name */ +}; + +/* SMM_VARIABLE_COMMUNICATE_QUERY_VARIABLE_INFO */ +struct QEMU_PACKED mm_variable_info { + uint64_t max_storage_size; + uint64_t free_storage_size; + uint64_t max_variable_size; + uint32_t attributes; +}; + +/* SMM_VARIABLE_COMMUNICATE_GET_PAYLOAD_SIZE */ +struct mm_get_payload_size { + uint64_t payload_size; +}; + +/* --- VarCheckPolicyLibMmiHandler ----------------------------------- */ + +#define VAR_CHECK_POLICY_COMMAND_DISABLE 0x01 +#define VAR_CHECK_POLICY_COMMAND_IS_ENABLED 0x02 +#define VAR_CHECK_POLICY_COMMAND_REGISTER 0x03 +#define VAR_CHECK_POLICY_COMMAND_DUMP 0x04 +#define VAR_CHECK_POLICY_COMMAND_LOCK 0x05 + +typedef struct mm_check_policy mm_check_policy; +typedef struct mm_check_policy_is_enabled mm_check_policy_is_enabled; +typedef struct mm_check_policy_dump_params mm_check_policy_dump_params; + +/* VAR_CHECK_POLICY_COMM_HEADER */ +struct QEMU_PACKED mm_check_policy { + uint32_t signature; + uint32_t revision; + uint32_t command; + uint64_t result; +}; + +/* VAR_CHECK_POLICY_COMM_IS_ENABLED_PARAMS */ +struct QEMU_PACKED mm_check_policy_is_enabled { + uint8_t state; +}; + +/* VAR_CHECK_POLICY_COMM_DUMP_PARAMS */ +struct QEMU_PACKED mm_check_policy_dump_params { + uint32_t page_requested; + uint32_t total_size; + uint32_t page_size; + uint8_t has_more; +}; + +/* --- Edk2VariablePolicyProtocol ------------------------------------ */ + +#define VARIABLE_POLICY_ENTRY_REVISION 0x00010000 + +#define VARIABLE_POLICY_TYPE_NO_LOCK 0 +#define VARIABLE_POLICY_TYPE_LOCK_NOW 1 +#define VARIABLE_POLICY_TYPE_LOCK_ON_CREATE 2 +#define VARIABLE_POLICY_TYPE_LOCK_ON_VAR_STATE 3 + +typedef struct variable_policy_entry variable_policy_entry; +typedef struct variable_lock_on_var_state variable_lock_on_var_state; + +/* VARIABLE_POLICY_ENTRY */ +struct variable_policy_entry { + uint32_t version; + uint16_t size; + uint16_t offset_to_name; + QemuUUID namespace; + uint32_t min_size; + uint32_t max_size; + uint32_t attributes_must_have; + uint32_t attributes_cant_have; + uint8_t lock_policy_type; + uint8_t padding[3]; + /* LockPolicy */ + /* Name */ +}; + +/* VARIABLE_LOCK_ON_VAR_STATE_POLICY */ +struct variable_lock_on_var_state { + QemuUUID namespace; + uint8_t value; + uint8_t padding; + /* Name */ +}; + +/* --- variable authentication --------------------------------------- */ + +#define WIN_CERT_TYPE_EFI_GUID 0x0EF1 + +typedef struct efi_time efi_time; +typedef struct efi_siglist efi_siglist; +typedef struct variable_auth_2 variable_auth_2; + +/* EFI_TIME */ +struct efi_time { + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t hour; + uint8_t minute; + uint8_t second; + uint8_t pad1; + uint32_t nanosecond; + int16_t timezone; + uint8_t daylight; + uint8_t pad2; +}; + +/* EFI_SIGNATURE_LIST */ +struct efi_siglist { + QemuUUID guid_type; + uint32_t siglist_size; + uint32_t header_size; + uint32_t sig_size; +}; + +/* EFI_VARIABLE_AUTHENTICATION_2 */ +struct variable_auth_2 { + struct efi_time timestamp; + + /* WIN_CERTIFICATE_UEFI_GUID */ + uint32_t hdr_length; + uint16_t hdr_revision; + uint16_t hdr_cert_type; + QemuUUID guid_cert_type; + uint8_t cert_data[]; +}; + +#endif /* QEMU_UEFI_VAR_SERVICE_EDK2_H */ diff --git a/include/hw/uefi/var-service.h b/include/hw/uefi/var-service.h new file mode 100644 index 00000000000..f7ceac4ce24 --- /dev/null +++ b/include/hw/uefi/var-service.h @@ -0,0 +1,191 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * uefi-vars device - state struct and function prototypes + */ +#ifndef QEMU_UEFI_VAR_SERVICE_H +#define QEMU_UEFI_VAR_SERVICE_H + +#include "qemu/uuid.h" +#include "qemu/queue.h" + +#include "hw/uefi/var-service-edk2.h" + +#define MAX_BUFFER_SIZE (64 * 1024) + +typedef struct uefi_variable uefi_variable; +typedef struct uefi_var_policy uefi_var_policy; +typedef struct uefi_vars_state uefi_vars_state; + +typedef struct uefi_vars_cert uefi_vars_cert; +typedef struct uefi_vars_hash uefi_vars_hash; +typedef struct uefi_vars_siglist uefi_vars_siglist; + +struct uefi_variable { + QemuUUID guid; + uint16_t *name; + uint32_t name_size; + uint32_t attributes; + void *data; + uint32_t data_size; + efi_time time; + void *digest; + uint32_t digest_size; + QTAILQ_ENTRY(uefi_variable) next; +}; + +struct uefi_var_policy { + variable_policy_entry *entry; + uint32_t entry_size; + uint16_t *name; + uint32_t name_size; + + /* number of hashmarks (wildcard character) in name */ + uint32_t hashmarks; + + QTAILQ_ENTRY(uefi_var_policy) next; +}; + +struct uefi_vars_state { + MemoryRegion mr; + uint16_t sts; + uint32_t buf_size; + uint32_t buf_addr_lo; + uint32_t buf_addr_hi; + uint8_t *buffer; + QTAILQ_HEAD(, uefi_variable) variables; + QTAILQ_HEAD(, uefi_var_policy) var_policies; + + /* pio transfer buffer */ + uint32_t pio_xfer_offset; + uint8_t *pio_xfer_buffer; + + /* boot phases */ + bool end_of_dxe; + bool ready_to_boot; + bool exit_boot_service; + bool policy_locked; + + /* storage accounting */ + uint64_t max_storage; + uint64_t used_storage; + + /* config options */ + char *jsonfile; + int jsonfd; + bool force_secure_boot; + bool disable_custom_mode; + bool use_pio; +}; + +struct uefi_vars_cert { + QTAILQ_ENTRY(uefi_vars_cert) next; + QemuUUID owner; + uint64_t size; + uint8_t data[]; +}; + +struct uefi_vars_hash { + QTAILQ_ENTRY(uefi_vars_hash) next; + QemuUUID owner; + uint8_t data[]; +}; + +struct uefi_vars_siglist { + QTAILQ_HEAD(, uefi_vars_cert) x509; + QTAILQ_HEAD(, uefi_vars_hash) sha256; +}; + +/* vars-service-guid.c */ +extern const QemuUUID EfiGlobalVariable; +extern const QemuUUID EfiImageSecurityDatabase; +extern const QemuUUID EfiCustomModeEnable; +extern const QemuUUID EfiSecureBootEnableDisable; + +extern const QemuUUID EfiCertSha256Guid; +extern const QemuUUID EfiCertSha384Guid; +extern const QemuUUID EfiCertSha512Guid; +extern const QemuUUID EfiCertRsa2048Guid; +extern const QemuUUID EfiCertX509Guid; +extern const QemuUUID EfiCertTypePkcs7Guid; + +extern const QemuUUID EfiSmmVariableProtocolGuid; +extern const QemuUUID VarCheckPolicyLibMmiHandlerGuid; + +extern const QemuUUID EfiEndOfDxeEventGroupGuid; +extern const QemuUUID EfiEventReadyToBootGuid; +extern const QemuUUID EfiEventExitBootServicesGuid; + +/* vars-service-utils.c */ +gboolean uefi_str_is_valid(const uint16_t *str, size_t len, + gboolean must_be_null_terminated); +size_t uefi_strlen(const uint16_t *str, size_t len); +gboolean uefi_str_equal_ex(const uint16_t *a, size_t alen, + const uint16_t *b, size_t blen, + gboolean wildcards_in_a); +gboolean uefi_str_equal(const uint16_t *a, size_t alen, + const uint16_t *b, size_t blen); +char *uefi_ucs2_to_ascii(const uint16_t *ucs2, uint64_t ucs2_size); +int uefi_time_compare(efi_time *a, efi_time *b); +void uefi_trace_variable(const char *action, QemuUUID guid, + const uint16_t *name, uint64_t name_size); +void uefi_trace_status(const char *action, efi_status status); + +/* vars-service-core.c */ +extern const VMStateDescription vmstate_uefi_vars; +void uefi_vars_init(Object *obj, uefi_vars_state *uv); +void uefi_vars_realize(uefi_vars_state *uv, Error **errp); +void uefi_vars_hard_reset(uefi_vars_state *uv); + +/* vars-service-json.c */ +void uefi_vars_json_init(uefi_vars_state *uv, Error **errp); +void uefi_vars_json_save(uefi_vars_state *uv); +void uefi_vars_json_load(uefi_vars_state *uv, Error **errp); + +/* vars-service-vars.c */ +extern const VMStateDescription vmstate_uefi_variable; +uefi_variable *uefi_vars_find_variable(uefi_vars_state *uv, QemuUUID guid, + const uint16_t *name, + uint64_t name_size); +void uefi_vars_set_variable(uefi_vars_state *uv, QemuUUID guid, + const uint16_t *name, uint64_t name_size, + uint32_t attributes, + void *data, uint64_t data_size); +void uefi_vars_clear_volatile(uefi_vars_state *uv); +void uefi_vars_clear_all(uefi_vars_state *uv); +void uefi_vars_update_storage(uefi_vars_state *uv); +uint32_t uefi_vars_mm_vars_proto(uefi_vars_state *uv); + +/* vars-service-auth.c */ +bool uefi_vars_is_sb_pk(uefi_variable *var); +bool uefi_vars_is_sb_any(uefi_variable *var); +efi_status uefi_vars_check_auth_2(uefi_vars_state *uv, uefi_variable *var, + mm_variable_access *va, void *data); +efi_status uefi_vars_check_secure_boot(uefi_vars_state *uv, uefi_variable *var); +void uefi_vars_auth_init(uefi_vars_state *uv); + +/* vars-service-pkcs7.c */ +efi_status uefi_vars_check_pkcs7_2(uefi_variable *siglist, + void **digest, uint32_t *digest_size, + mm_variable_access *va, void *data); + +/* vars-service-siglist.c */ +void uefi_vars_siglist_init(uefi_vars_siglist *siglist); +void uefi_vars_siglist_free(uefi_vars_siglist *siglist); +void uefi_vars_siglist_parse(uefi_vars_siglist *siglist, + void *data, uint64_t size); +uint64_t uefi_vars_siglist_blob_size(uefi_vars_siglist *siglist); +void uefi_vars_siglist_blob_generate(uefi_vars_siglist *siglist, + void *data, uint64_t size); + +/* vars-service-policy.c */ +extern const VMStateDescription vmstate_uefi_var_policy; +efi_status uefi_vars_policy_check(uefi_vars_state *uv, + uefi_variable *var, + gboolean is_newvar); +void uefi_vars_policies_clear(uefi_vars_state *uv); +uefi_var_policy *uefi_vars_add_policy(uefi_vars_state *uv, + variable_policy_entry *pe); +uint32_t uefi_vars_mm_check_policy_proto(uefi_vars_state *uv); + +#endif /* QEMU_UEFI_VAR_SERVICE_H */ diff --git a/include/hw/usb.h b/include/hw/usb.h index d46d96779ad..26a9f3ecdee 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -25,7 +25,7 @@ * THE SOFTWARE. */ -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "qemu/iov.h" #include "qemu/queue.h" @@ -579,16 +579,6 @@ void usb_pcap_init(FILE *fp); void usb_pcap_ctrl(USBPacket *p, bool setup); void usb_pcap_data(USBPacket *p, bool setup); -static inline USBDevice *usb_new(const char *name) -{ - return USB_DEVICE(qdev_new(name)); -} - -static inline USBDevice *usb_try_new(const char *name) -{ - return USB_DEVICE(qdev_try_new(name)); -} - static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp) { return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); @@ -596,7 +586,7 @@ static inline bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **er static inline USBDevice *usb_create_simple(USBBus *bus, const char *name) { - USBDevice *dev = usb_new(name); + USBDevice *dev = USB_DEVICE(qdev_new(name)); usb_realize_and_unref(dev, bus, &error_abort); return dev; diff --git a/include/hw/usb/dwc2-regs.h b/include/hw/usb/dwc2-regs.h index 0bf3f2aa17f..b8b42665431 100644 --- a/include/hw/usb/dwc2-regs.h +++ b/include/hw/usb/dwc2-regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */ /* * Imported from the Linux kernel file drivers/usb/dwc2/hw.h, commit * a89bae709b3492b478480a2c9734e7e9393b279c ("usb: dwc2: Move @@ -838,7 +838,7 @@ struct dwc2_dma_desc { uint32_t status; uint32_t buf; -} __packed; +} QEMU_PACKED; /* Host Mode DMA descriptor status quadlet */ diff --git a/include/hw/usb/hcd-dwc3.h b/include/hw/usb/hcd-dwc3.h index f752a27e940..dbdf12b21d7 100644 --- a/include/hw/usb/hcd-dwc3.h +++ b/include/hw/usb/hcd-dwc3.h @@ -35,7 +35,7 @@ #define USB_DWC3(obj) \ OBJECT_CHECK(USBDWC3, (obj), TYPE_USB_DWC3) -#define USB_DWC3_R_MAX ((0x530 / 4) + 1) +#define USB_DWC3_R_MAX (0x600 / 4) #define DWC3_SIZE 0x10000 typedef struct USBDWC3 { diff --git a/include/hw/usb/hcd-musb.h b/include/hw/usb/hcd-musb.h deleted file mode 100644 index 4d4b1ec0fc2..00000000000 --- a/include/hw/usb/hcd-musb.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics, - * USB2.0 OTG compliant core used in various chips. - * - * Only host-mode and non-DMA accesses are currently supported. - * - * Copyright (C) 2008 Nokia Corporation - * Written by Andrzej Zaborowski - * - * SPDX-License-Identifier: GPL-2.0-or-later - */ - -#ifndef HW_USB_HCD_MUSB_H -#define HW_USB_HCD_MUSB_H - -#include "exec/hwaddr.h" - -enum musb_irq_source_e { - musb_irq_suspend = 0, - musb_irq_resume, - musb_irq_rst_babble, - musb_irq_sof, - musb_irq_connect, - musb_irq_disconnect, - musb_irq_vbus_request, - musb_irq_vbus_error, - musb_irq_rx, - musb_irq_tx, - musb_set_vbus, - musb_set_session, - /* Add new interrupts here */ - musb_irq_max /* total number of interrupts defined */ -}; - -/* TODO convert hcd-musb to QOM/qdev and remove MUSBReadFunc/MUSBWriteFunc */ -typedef void MUSBWriteFunc(void *opaque, hwaddr addr, uint32_t value); -typedef uint32_t MUSBReadFunc(void *opaque, hwaddr addr); -extern MUSBReadFunc * const musb_read[]; -extern MUSBWriteFunc * const musb_write[]; - -typedef struct MUSBState MUSBState; - -MUSBState *musb_init(DeviceState *parent_device, int gpio_base); -void musb_reset(MUSBState *s); -uint32_t musb_core_intr_get(MUSBState *s); -void musb_core_intr_clear(MUSBState *s, uint32_t mask); -void musb_set_size(MUSBState *s, int epnum, int size, int is_tx); - -#endif diff --git a/include/hw/usb/uhci-regs.h b/include/hw/usb/uhci-regs.h index fd45d29db0f..5b81714e5c2 100644 --- a/include/hw/usb/uhci-regs.h +++ b/include/hw/usb/uhci-regs.h @@ -1,6 +1,17 @@ #ifndef HW_USB_UHCI_REGS_H #define HW_USB_UHCI_REGS_H +#define UHCI_USBCMD 0 +#define UHCI_USBSTS 2 +#define UHCI_USBINTR 4 +#define UHCI_USBFRNUM 6 +#define UHCI_USBFLBASEADD 8 +#define UHCI_USBSOF 0x0c +#define UHCI_USBPORTSC1 0x10 +#define UHCI_USBPORTSC2 0x12 +#define UHCI_USBPORTSC3 0x14 +#define UHCI_USBPORTSC4 0x16 + #define UHCI_CMD_FGR (1 << 4) #define UHCI_CMD_EGSM (1 << 3) #define UHCI_CMD_GRESET (1 << 2) diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h deleted file mode 100644 index fed499b199f..00000000000 --- a/include/hw/vfio/vfio-common.h +++ /dev/null @@ -1,311 +0,0 @@ -/* - * common header for vfio based device assignment support - * - * Copyright Red Hat, Inc. 2012 - * - * Authors: - * Alex Williamson - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Based on qemu-kvm device-assignment: - * Adapted for KVM by Qumranet. - * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) - * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) - * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) - * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) - * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) - */ - -#ifndef HW_VFIO_VFIO_COMMON_H -#define HW_VFIO_VFIO_COMMON_H - -#include "exec/memory.h" -#include "qemu/queue.h" -#include "qemu/notify.h" -#include "ui/console.h" -#include "hw/display/ramfb.h" -#ifdef CONFIG_LINUX -#include -#endif -#include "sysemu/sysemu.h" -#include "hw/vfio/vfio-container-base.h" -#include "sysemu/host_iommu_device.h" -#include "sysemu/iommufd.h" - -#define VFIO_MSG_PREFIX "vfio %s: " - -enum { - VFIO_DEVICE_TYPE_PCI = 0, - VFIO_DEVICE_TYPE_PLATFORM = 1, - VFIO_DEVICE_TYPE_CCW = 2, - VFIO_DEVICE_TYPE_AP = 3, -}; - -typedef struct VFIOMmap { - MemoryRegion mem; - void *mmap; - off_t offset; - size_t size; -} VFIOMmap; - -typedef struct VFIORegion { - struct VFIODevice *vbasedev; - off_t fd_offset; /* offset of region within device fd */ - MemoryRegion *mem; /* slow, read/write access */ - size_t size; - uint32_t flags; /* VFIO region flags (rd/wr/mmap) */ - uint32_t nr_mmaps; - VFIOMmap *mmaps; - uint8_t nr; /* cache the region number for debug */ -} VFIORegion; - -typedef struct VFIOMigration { - struct VFIODevice *vbasedev; - VMChangeStateEntry *vm_state; - NotifierWithReturn migration_state; - uint32_t device_state; - int data_fd; - void *data_buffer; - size_t data_buffer_size; - uint64_t mig_flags; - uint64_t precopy_init_size; - uint64_t precopy_dirty_size; - bool initial_data_sent; -} VFIOMigration; - -struct VFIOGroup; - -typedef struct VFIOContainer { - VFIOContainerBase bcontainer; - int fd; /* /dev/vfio/vfio, empowered by the attached groups */ - unsigned iommu_type; - QLIST_HEAD(, VFIOGroup) group_list; -} VFIOContainer; - -OBJECT_DECLARE_SIMPLE_TYPE(VFIOContainer, VFIO_IOMMU_LEGACY); - -typedef struct VFIOHostDMAWindow { - hwaddr min_iova; - hwaddr max_iova; - uint64_t iova_pgsizes; - QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next; -} VFIOHostDMAWindow; - -typedef struct IOMMUFDBackend IOMMUFDBackend; - -typedef struct VFIOIOASHwpt { - uint32_t hwpt_id; - uint32_t hwpt_flags; - QLIST_HEAD(, VFIODevice) device_list; - QLIST_ENTRY(VFIOIOASHwpt) next; -} VFIOIOASHwpt; - -typedef struct VFIOIOMMUFDContainer { - VFIOContainerBase bcontainer; - IOMMUFDBackend *be; - uint32_t ioas_id; - QLIST_HEAD(, VFIOIOASHwpt) hwpt_list; -} VFIOIOMMUFDContainer; - -OBJECT_DECLARE_SIMPLE_TYPE(VFIOIOMMUFDContainer, VFIO_IOMMU_IOMMUFD); - -typedef struct VFIODeviceOps VFIODeviceOps; - -typedef struct VFIODevice { - QLIST_ENTRY(VFIODevice) next; - QLIST_ENTRY(VFIODevice) container_next; - QLIST_ENTRY(VFIODevice) global_next; - struct VFIOGroup *group; - VFIOContainerBase *bcontainer; - char *sysfsdev; - char *name; - DeviceState *dev; - int fd; - int type; - bool mdev; - bool reset_works; - bool needs_reset; - bool no_mmap; - bool ram_block_discard_allowed; - OnOffAuto enable_migration; - bool migration_events; - VFIODeviceOps *ops; - unsigned int num_irqs; - unsigned int num_regions; - unsigned int flags; - VFIOMigration *migration; - Error *migration_blocker; - OnOffAuto pre_copy_dirty_page_tracking; - OnOffAuto device_dirty_page_tracking; - bool dirty_pages_supported; - bool dirty_tracking; - bool iommu_dirty_tracking; - HostIOMMUDevice *hiod; - int devid; - IOMMUFDBackend *iommufd; - VFIOIOASHwpt *hwpt; - QLIST_ENTRY(VFIODevice) hwpt_next; -} VFIODevice; - -struct VFIODeviceOps { - void (*vfio_compute_needs_reset)(VFIODevice *vdev); - int (*vfio_hot_reset_multi)(VFIODevice *vdev); - void (*vfio_eoi)(VFIODevice *vdev); - Object *(*vfio_get_object)(VFIODevice *vdev); - - /** - * @vfio_save_config - * - * Save device config state - * - * @vdev: #VFIODevice for which to save the config - * @f: #QEMUFile where to send the data - * @errp: pointer to Error*, to store an error if it happens. - * - * Returns zero to indicate success and negative for error - */ - int (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f, Error **errp); - - /** - * @vfio_load_config - * - * Load device config state - * - * @vdev: #VFIODevice for which to load the config - * @f: #QEMUFile where to get the data - * - * Returns zero to indicate success and negative for error - */ - int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f); -}; - -typedef struct VFIOGroup { - int fd; - int groupid; - VFIOContainer *container; - QLIST_HEAD(, VFIODevice) device_list; - QLIST_ENTRY(VFIOGroup) next; - QLIST_ENTRY(VFIOGroup) container_next; - bool ram_block_discard_allowed; -} VFIOGroup; - -#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio" -#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO \ - TYPE_HOST_IOMMU_DEVICE_IOMMUFD "-vfio" - -typedef struct VFIODMABuf { - QemuDmaBuf *buf; - uint32_t pos_x, pos_y, pos_updates; - uint32_t hot_x, hot_y, hot_updates; - int dmabuf_id; - QTAILQ_ENTRY(VFIODMABuf) next; -} VFIODMABuf; - -typedef struct VFIODisplay { - QemuConsole *con; - RAMFBState *ramfb; - struct vfio_region_info *edid_info; - struct vfio_region_gfx_edid *edid_regs; - uint8_t *edid_blob; - QEMUTimer *edid_link_timer; - struct { - VFIORegion buffer; - DisplaySurface *surface; - } region; - struct { - QTAILQ_HEAD(, VFIODMABuf) bufs; - VFIODMABuf *primary; - VFIODMABuf *cursor; - } dmabuf; -} VFIODisplay; - -VFIOAddressSpace *vfio_get_address_space(AddressSpace *as); -void vfio_put_address_space(VFIOAddressSpace *space); -void vfio_address_space_insert(VFIOAddressSpace *space, - VFIOContainerBase *bcontainer); - -void vfio_disable_irqindex(VFIODevice *vbasedev, int index); -void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index); -void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index); -bool vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, - int action, int fd, Error **errp); -void vfio_region_write(void *opaque, hwaddr addr, - uint64_t data, unsigned size); -uint64_t vfio_region_read(void *opaque, - hwaddr addr, unsigned size); -int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, - int index, const char *name); -int vfio_region_mmap(VFIORegion *region); -void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled); -void vfio_region_unmap(VFIORegion *region); -void vfio_region_exit(VFIORegion *region); -void vfio_region_finalize(VFIORegion *region); -void vfio_reset_handler(void *opaque); -struct vfio_device_info *vfio_get_device_info(int fd); -bool vfio_device_is_mdev(VFIODevice *vbasedev); -bool vfio_device_hiod_realize(VFIODevice *vbasedev, Error **errp); -bool vfio_attach_device(char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp); -void vfio_detach_device(VFIODevice *vbasedev); - -int vfio_kvm_device_add_fd(int fd, Error **errp); -int vfio_kvm_device_del_fd(int fd, Error **errp); - -bool vfio_cpr_register_container(VFIOContainerBase *bcontainer, Error **errp); -void vfio_cpr_unregister_container(VFIOContainerBase *bcontainer); - -extern const MemoryRegionOps vfio_region_ops; -typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; -typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList; -extern VFIOGroupList vfio_group_list; -extern VFIODeviceList vfio_device_list; -extern const MemoryListener vfio_memory_listener; -extern int vfio_kvm_device_fd; - -bool vfio_mig_active(void); -int vfio_block_multiple_devices_migration(VFIODevice *vbasedev, Error **errp); -void vfio_unblock_multiple_devices_migration(void); -bool vfio_viommu_preset(VFIODevice *vbasedev); -int64_t vfio_mig_bytes_transferred(void); -void vfio_reset_bytes_transferred(void); -bool vfio_device_state_is_running(VFIODevice *vbasedev); -bool vfio_device_state_is_precopy(VFIODevice *vbasedev); - -#ifdef CONFIG_LINUX -int vfio_get_region_info(VFIODevice *vbasedev, int index, - struct vfio_region_info **info); -int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, - uint32_t subtype, struct vfio_region_info **info); -bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type); -struct vfio_info_cap_header * -vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id); -bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, - unsigned int *avail); -struct vfio_info_cap_header * -vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id); -struct vfio_info_cap_header * -vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id); -#endif - -bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp); -void vfio_migration_exit(VFIODevice *vbasedev); - -int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size); -bool -vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer); -bool -vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer); -int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp); -int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, - uint64_t size, ram_addr_t ram_addr, Error **errp); - -/* Returns 0 on success, or a negative errno. */ -bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp); -void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp); -void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, - DeviceState *dev, bool ram_discard); -int vfio_device_get_aw_bits(VFIODevice *vdev); -#endif /* HW_VFIO_VFIO_COMMON_H */ diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h index 62a8b60d87d..bded6e993ff 100644 --- a/include/hw/vfio/vfio-container-base.h +++ b/include/hw/vfio/vfio-container-base.h @@ -13,7 +13,7 @@ #ifndef HW_VFIO_VFIO_CONTAINER_BASE_H #define HW_VFIO_VFIO_CONTAINER_BASE_H -#include "exec/memory.h" +#include "system/memory.h" typedef struct VFIODevice VFIODevice; typedef struct VFIOIOMMUClass VFIOIOMMUClass; @@ -44,6 +44,7 @@ typedef struct VFIOContainerBase { unsigned long pgsizes; unsigned int dma_max_mappings; bool dirty_pages_supported; + bool dirty_pages_started; /* Protected by BQL */ QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; QLIST_ENTRY(VFIOContainerBase) next; @@ -70,12 +71,17 @@ typedef struct VFIORamDiscardListener { QLIST_ENTRY(VFIORamDiscardListener) next; } VFIORamDiscardListener; +VFIOAddressSpace *vfio_address_space_get(AddressSpace *as); +void vfio_address_space_put(VFIOAddressSpace *space); +void vfio_address_space_insert(VFIOAddressSpace *space, + VFIOContainerBase *bcontainer); + int vfio_container_dma_map(VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly); + void *vaddr, bool readonly, MemoryRegion *mr); int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb); + IOMMUTLBEntry *iotlb, bool unmap_all); bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, MemoryRegionSection *section, Error **errp); @@ -83,8 +89,12 @@ void vfio_container_del_section_window(VFIOContainerBase *bcontainer, MemoryRegionSection *section); int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, bool start, Error **errp); +bool vfio_container_dirty_tracking_is_started( + const VFIOContainerBase *bcontainer); +bool vfio_container_devices_dirty_tracking_is_supported( + const VFIOContainerBase *bcontainer); int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp); + uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp); GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer); @@ -99,25 +109,121 @@ vfio_container_get_page_size_mask(const VFIOContainerBase *bcontainer) #define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy" #define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr" #define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd" +#define TYPE_VFIO_IOMMU_USER TYPE_VFIO_IOMMU "-user" OBJECT_DECLARE_TYPE(VFIOContainerBase, VFIOIOMMUClass, VFIO_IOMMU) struct VFIOIOMMUClass { ObjectClass parent_class; - /* Properties */ - const char *hiod_typename; - - /* basic feature */ + /** + * @setup + * + * Perform basic setup of the container, including configuring IOMMU + * capabilities, IOVA ranges, supported page sizes, etc. + * + * @bcontainer: #VFIOContainerBase + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns true to indicate success and false for error. + */ bool (*setup)(VFIOContainerBase *bcontainer, Error **errp); + + /** + * @listener_begin + * + * Called at the beginning of an address space update transaction. + * See #MemoryListener. + * + * @bcontainer: #VFIOContainerBase + */ + void (*listener_begin)(VFIOContainerBase *bcontainer); + + /** + * @listener_commit + * + * Called at the end of an address space update transaction, + * See #MemoryListener. + * + * @bcontainer: #VFIOContainerBase + */ + void (*listener_commit)(VFIOContainerBase *bcontainer); + + /** + * @dma_map + * + * Map an address range into the container. Note that the memory region is + * referenced within an RCU read lock region across this call. + * + * @bcontainer: #VFIOContainerBase to use + * @iova: start address to map + * @size: size of the range to map + * @vaddr: process virtual address of mapping + * @readonly: true if mapping should be readonly + * @mr: the memory region for this mapping + * + * Returns 0 to indicate success and -errno otherwise. + */ int (*dma_map)(const VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly); + void *vaddr, bool readonly, MemoryRegion *mr); + /** + * @dma_map_file + * + * Map a file range for the container. + * + * @bcontainer: #VFIOContainerBase to use for map + * @iova: start address to map + * @size: size of the range to map + * @fd: descriptor of the file to map + * @start: starting file offset of the range to map + * @readonly: map read only if true + */ + int (*dma_map_file)(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + int fd, unsigned long start, bool readonly); + /** + * @dma_unmap + * + * Unmap an address range from the container. + * + * @bcontainer: #VFIOContainerBase to use for unmap + * @iova: start address to unmap + * @size: size of the range to unmap + * @iotlb: The IOMMU TLB mapping entry (or NULL) + * @unmap_all: if set, unmap the entire address space + * + * Returns 0 to indicate success and -errno otherwise. + */ int (*dma_unmap)(const VFIOContainerBase *bcontainer, hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb); + IOMMUTLBEntry *iotlb, bool unmap_all); + + + /** + * @attach_device + * + * Associate the given device with a container and do some related + * initialization of the device context. + * + * @name: name of the device + * @vbasedev: the device + * @as: address space to use + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns true to indicate success and false for error. + */ bool (*attach_device)(const char *name, VFIODevice *vbasedev, AddressSpace *as, Error **errp); + + /* + * @detach_device + * + * Detach the given device from its container and clean up any necessary + * state. + * + * @vbasedev: the device to disassociate + */ void (*detach_device)(VFIODevice *vbasedev); /* migration feature */ @@ -132,7 +238,7 @@ struct VFIOIOMMUClass { * @start: indicates whether to start or stop dirty pages tracking * @errp: pointer to Error*, to store an error if it happens. * - * Returns zero to indicate success and negative for error + * Returns zero to indicate success and negative for error. */ int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer, bool start, Error **errp); @@ -147,7 +253,7 @@ struct VFIOIOMMUClass { * @size: size of iova range * @errp: pointer to Error*, to store an error if it happens. * - * Returns zero to indicate success and negative for error + * Returns zero to indicate success and negative for error. */ int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp); @@ -162,4 +268,11 @@ struct VFIOIOMMUClass { MemoryRegionSection *section); void (*release)(VFIOContainerBase *bcontainer); }; + +VFIORamDiscardListener *vfio_find_ram_discard_listener( + VFIOContainerBase *bcontainer, MemoryRegionSection *section); + +void vfio_container_region_add(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, bool cpr_remap); + #endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */ diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h new file mode 100644 index 00000000000..21e5807e48e --- /dev/null +++ b/include/hw/vfio/vfio-container.h @@ -0,0 +1,38 @@ +/* + * VFIO container + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_CONTAINER_H +#define HW_VFIO_CONTAINER_H + +#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-cpr.h" + +typedef struct VFIOContainer VFIOContainer; +typedef struct VFIODevice VFIODevice; + +typedef struct VFIOGroup { + int fd; + int groupid; + VFIOContainer *container; + QLIST_HEAD(, VFIODevice) device_list; + QLIST_ENTRY(VFIOGroup) next; + QLIST_ENTRY(VFIOGroup) container_next; + bool ram_block_discard_allowed; +} VFIOGroup; + +typedef struct VFIOContainer { + VFIOContainerBase bcontainer; + int fd; /* /dev/vfio/vfio, empowered by the attached groups */ + unsigned iommu_type; + QLIST_HEAD(, VFIOGroup) group_list; + VFIOContainerCPR cpr; +} VFIOContainer; + +OBJECT_DECLARE_SIMPLE_TYPE(VFIOContainer, VFIO_IOMMU_LEGACY); + +#endif /* HW_VFIO_CONTAINER_H */ diff --git a/include/hw/vfio/vfio-cpr.h b/include/hw/vfio/vfio-cpr.h new file mode 100644 index 00000000000..d37daffbc5b --- /dev/null +++ b/include/hw/vfio/vfio-cpr.h @@ -0,0 +1,87 @@ +/* + * VFIO CPR + * + * Copyright (c) 2025 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_CPR_H +#define HW_VFIO_VFIO_CPR_H + +#include "migration/misc.h" +#include "system/memory.h" + +struct VFIOContainer; +struct VFIOContainerBase; +struct VFIOGroup; +struct VFIODevice; +struct VFIOPCIDevice; +struct VFIOIOMMUFDContainer; +struct IOMMUFDBackend; + +typedef int (*dma_map_fn)(const struct VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, void *vaddr, + bool readonly, MemoryRegion *mr); + +typedef struct VFIOContainerCPR { + Error *blocker; + bool vaddr_unmapped; + NotifierWithReturn transfer_notifier; + MemoryListener remap_listener; +} VFIOContainerCPR; + +typedef struct VFIODeviceCPR { + Error *mdev_blocker; + Error *id_blocker; + uint32_t hwpt_id; + uint32_t ioas_id; +} VFIODeviceCPR; + +typedef struct VFIOPCICPR { + NotifierWithReturn transfer_notifier; +} VFIOPCICPR; + +bool vfio_legacy_cpr_register_container(struct VFIOContainer *container, + Error **errp); +void vfio_legacy_cpr_unregister_container(struct VFIOContainer *container); + +int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, MigrationEvent *e, + Error **errp); + +bool vfio_iommufd_cpr_register_container(struct VFIOIOMMUFDContainer *container, + Error **errp); +void vfio_iommufd_cpr_unregister_container( + struct VFIOIOMMUFDContainer *container); +bool vfio_iommufd_cpr_register_iommufd(struct IOMMUFDBackend *be, Error **errp); +void vfio_iommufd_cpr_unregister_iommufd(struct IOMMUFDBackend *be); +void vfio_iommufd_cpr_register_device(struct VFIODevice *vbasedev); +void vfio_iommufd_cpr_unregister_device(struct VFIODevice *vbasedev); +void vfio_cpr_load_device(struct VFIODevice *vbasedev); + +int vfio_cpr_group_get_device_fd(int d, const char *name); + +bool vfio_cpr_container_match(struct VFIOContainer *container, + struct VFIOGroup *group, int fd); + +void vfio_cpr_giommu_remap(struct VFIOContainerBase *bcontainer, + MemoryRegionSection *section); + +bool vfio_cpr_ram_discard_register_listener( + struct VFIOContainerBase *bcontainer, MemoryRegionSection *section); + +void vfio_cpr_save_vector_fd(struct VFIOPCIDevice *vdev, const char *name, + int nr, int fd); +int vfio_cpr_load_vector_fd(struct VFIOPCIDevice *vdev, const char *name, + int nr); +void vfio_cpr_delete_vector_fd(struct VFIOPCIDevice *vdev, const char *name, + int nr); + +extern const VMStateDescription vfio_cpr_pci_vmstate; +extern const VMStateDescription vmstate_cpr_vfio_devices; + +void vfio_cpr_add_kvm_notifier(void); +void vfio_cpr_pci_register_device(struct VFIOPCIDevice *vdev); +void vfio_cpr_pci_unregister_device(struct VFIOPCIDevice *vdev); + +#endif /* HW_VFIO_VFIO_CPR_H */ diff --git a/include/hw/vfio/vfio-device.h b/include/hw/vfio/vfio-device.h new file mode 100644 index 00000000000..df5ee5b41ba --- /dev/null +++ b/include/hw/vfio/vfio-device.h @@ -0,0 +1,293 @@ +/* + * VFIO Device interface + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#ifndef HW_VFIO_VFIO_COMMON_H +#define HW_VFIO_VFIO_COMMON_H + +#include "system/memory.h" +#include "qemu/queue.h" +#ifdef CONFIG_LINUX +#include +#endif +#include "system/system.h" +#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-cpr.h" +#include "system/host_iommu_device.h" +#include "system/iommufd.h" + +#define VFIO_MSG_PREFIX "vfio %s: " + +enum { + VFIO_DEVICE_TYPE_PCI = 0, + VFIO_DEVICE_TYPE_PLATFORM = 1, + VFIO_DEVICE_TYPE_CCW = 2, + VFIO_DEVICE_TYPE_AP = 3, +}; + +typedef struct VFIODeviceOps VFIODeviceOps; +typedef struct VFIODeviceIOOps VFIODeviceIOOps; +typedef struct VFIOMigration VFIOMigration; + +typedef struct IOMMUFDBackend IOMMUFDBackend; +typedef struct VFIOIOASHwpt VFIOIOASHwpt; +typedef struct VFIOUserProxy VFIOUserProxy; + +typedef struct VFIODevice { + QLIST_ENTRY(VFIODevice) next; + QLIST_ENTRY(VFIODevice) container_next; + QLIST_ENTRY(VFIODevice) global_next; + struct VFIOGroup *group; + VFIOContainerBase *bcontainer; + char *sysfsdev; + char *name; + DeviceState *dev; + int fd; + int type; + bool mdev; + bool reset_works; + bool needs_reset; + bool no_mmap; + bool ram_block_discard_allowed; + OnOffAuto enable_migration; + OnOffAuto migration_multifd_transfer; + OnOffAuto migration_load_config_after_iter; + uint64_t migration_max_queued_buffers_size; + bool migration_events; + bool use_region_fds; + VFIODeviceOps *ops; + VFIODeviceIOOps *io_ops; + unsigned int num_irqs; + unsigned int num_regions; + unsigned int flags; + VFIOMigration *migration; + Error *migration_blocker; + OnOffAuto pre_copy_dirty_page_tracking; + OnOffAuto device_dirty_page_tracking; + bool dirty_pages_supported; + bool dirty_tracking; /* Protected by BQL */ + bool iommu_dirty_tracking; + HostIOMMUDevice *hiod; + int devid; + IOMMUFDBackend *iommufd; + VFIOIOASHwpt *hwpt; + QLIST_ENTRY(VFIODevice) hwpt_next; + struct vfio_region_info **reginfo; + int *region_fds; + VFIODeviceCPR cpr; + VFIOUserProxy *proxy; +} VFIODevice; + +struct VFIODeviceOps { + void (*vfio_compute_needs_reset)(VFIODevice *vdev); + int (*vfio_hot_reset_multi)(VFIODevice *vdev); + void (*vfio_eoi)(VFIODevice *vdev); + Object *(*vfio_get_object)(VFIODevice *vdev); + + /** + * @vfio_save_config + * + * Save device config state + * + * @vdev: #VFIODevice for which to save the config + * @f: #QEMUFile where to send the data + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns zero to indicate success and negative for error + */ + int (*vfio_save_config)(VFIODevice *vdev, QEMUFile *f, Error **errp); + + /** + * @vfio_load_config + * + * Load device config state + * + * @vdev: #VFIODevice for which to load the config + * @f: #QEMUFile where to get the data + * + * Returns zero to indicate success and negative for error + */ + int (*vfio_load_config)(VFIODevice *vdev, QEMUFile *f); +}; + +/* + * Given a return value of either a short number of bytes read or -errno, + * construct a meaningful error message. + */ +#define strreaderror(ret) \ + (ret < 0 ? strerror(-ret) : "short read") + +/* + * Given a return value of either a short number of bytes written or -errno, + * construct a meaningful error message. + */ +#define strwriteerror(ret) \ + (ret < 0 ? strerror(-ret) : "short write") + +void vfio_device_irq_disable(VFIODevice *vbasedev, int index); +void vfio_device_irq_unmask(VFIODevice *vbasedev, int index); +void vfio_device_irq_mask(VFIODevice *vbasedev, int index); +bool vfio_device_irq_set_signaling(VFIODevice *vbasedev, int index, int subindex, + int action, int fd, Error **errp); + +void vfio_device_reset_handler(void *opaque); +bool vfio_device_is_mdev(VFIODevice *vbasedev); +bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev, + const char *typename, Error **errp); +bool vfio_device_attach(char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp); +bool vfio_device_attach_by_iommu_type(const char *iommu_type, char *name, + VFIODevice *vbasedev, AddressSpace *as, + Error **errp); +void vfio_device_detach(VFIODevice *vbasedev); +VFIODevice *vfio_get_vfio_device(Object *obj); + +typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList; +extern VFIODeviceList vfio_device_list; + +#ifdef CONFIG_LINUX +/* + * How devices communicate with the server. The default option is through + * ioctl() to the kernel VFIO driver, but vfio-user can use a socket to a remote + * process. + */ +struct VFIODeviceIOOps { + /** + * @device_feature + * + * Fill in feature info for the given device. + * + * @vdev: #VFIODevice to use + * @feat: feature information to fill in + * + * Returns 0 on success or -errno. + */ + int (*device_feature)(VFIODevice *vdev, struct vfio_device_feature *feat); + + /** + * @get_region_info + * + * Get the information for a given region on the device. + * + * @vdev: #VFIODevice to use + * @info: set @info->index to the region index to look up; the rest of the + * struct will be filled in on success + * @fd: pointer to the fd for the region; will be -1 if not found + * + * Returns 0 on success or -errno. + */ + int (*get_region_info)(VFIODevice *vdev, + struct vfio_region_info *info, int *fd); + + /** + * @get_irq_info + * + * @vdev: #VFIODevice to use + * @irq: set @irq->index to the IRQ index to look up; the rest of the struct + * will be filled in on success + * + * Returns 0 on success or -errno. + */ + int (*get_irq_info)(VFIODevice *vdev, struct vfio_irq_info *irq); + + /** + * @set_irqs + * + * Configure IRQs. + * + * @vdev: #VFIODevice to use + * @irqs: IRQ configuration as defined by VFIO docs. + * + * Returns 0 on success or -errno. + */ + int (*set_irqs)(VFIODevice *vdev, struct vfio_irq_set *irqs); + + /** + * @region_read + * + * Read part of a region. + * + * @vdev: #VFIODevice to use + * @nr: region index + * @off: offset within the region + * @size: size in bytes to read + * @data: buffer to read into + * + * Returns number of bytes read on success or -errno. + */ + int (*region_read)(VFIODevice *vdev, uint8_t nr, off_t off, uint32_t size, + void *data); + + /** + * @region_write + * + * Write part of a region. + * + * @vdev: #VFIODevice to use + * @nr: region index + * @off: offset within the region + * @size: size in bytes to write + * @data: buffer to write from + * @post: true if this is a posted write + * + * Returns number of bytes write on success or -errno. + */ + int (*region_write)(VFIODevice *vdev, uint8_t nr, off_t off, uint32_t size, + void *data, bool post); +}; + +void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, + struct vfio_device_info *info); + +void vfio_device_unprepare(VFIODevice *vbasedev); + +bool vfio_device_get_viommu_flags_want_nesting(VFIODevice *vbasedev); + +int vfio_device_get_region_info(VFIODevice *vbasedev, int index, + struct vfio_region_info **info); +int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type, + uint32_t subtype, struct vfio_region_info **info); + +/** + * Return the fd for mapping this region. This is either the device's fd (for + * e.g. kernel vfio), or a per-region fd (for vfio-user). + * + * @vbasedev: #VFIODevice to use + * @index: region index + * + * Returns the fd. + */ +int vfio_device_get_region_fd(VFIODevice *vbasedev, int index); + +bool vfio_device_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type); + +int vfio_device_get_irq_info(VFIODevice *vbasedev, int index, + struct vfio_irq_info *info); +#endif + +/* Returns 0 on success, or a negative errno. */ +bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp); +void vfio_device_free_name(VFIODevice *vbasedev); +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp); +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard); +int vfio_device_get_aw_bits(VFIODevice *vdev); + +void vfio_kvm_device_close(void); +#endif /* HW_VFIO_VFIO_COMMON_H */ diff --git a/include/hw/vfio/vfio-migration.h b/include/hw/vfio/vfio-migration.h new file mode 100644 index 00000000000..0d4ecd33d5d --- /dev/null +++ b/include/hw/vfio/vfio-migration.h @@ -0,0 +1,16 @@ +/* + * VFIO migration interface + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_MIGRATION_H +#define HW_VFIO_VFIO_MIGRATION_H + +bool vfio_migration_active(void); +int64_t vfio_migration_bytes_transferred(void); +void vfio_migration_reset_bytes_transferred(void); + +#endif /* HW_VFIO_VFIO_MIGRATION_H */ diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h index c414c3dffcc..256d8500b70 100644 --- a/include/hw/vfio/vfio-platform.h +++ b/include/hw/vfio/vfio-platform.h @@ -17,7 +17,7 @@ #define HW_VFIO_VFIO_PLATFORM_H #include "hw/sysbus.h" -#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio-device.h" #include "qemu/event_notifier.h" #include "qemu/queue.h" #include "qom/object.h" @@ -47,6 +47,8 @@ typedef struct VFIOINTp { /* function type for user side eventfd handler */ typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp); +typedef struct VFIORegion VFIORegion; + struct VFIOPlatformDevice { SysBusDevice sbdev; VFIODevice vbasedev; /* not a QOM object */ diff --git a/include/hw/vfio/vfio-region.h b/include/hw/vfio/vfio-region.h new file mode 100644 index 00000000000..ede6e0c8f99 --- /dev/null +++ b/include/hw/vfio/vfio-region.h @@ -0,0 +1,48 @@ +/* + * VFIO region + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_REGION_H +#define HW_VFIO_REGION_H + +#include "system/memory.h" + +typedef struct VFIOMmap { + MemoryRegion mem; + void *mmap; + off_t offset; + size_t size; +} VFIOMmap; + +typedef struct VFIODevice VFIODevice; + +typedef struct VFIORegion { + struct VFIODevice *vbasedev; + off_t fd_offset; /* offset of region within device fd */ + MemoryRegion *mem; /* slow, read/write access */ + size_t size; + uint32_t flags; /* VFIO region flags (rd/wr/mmap) */ + uint32_t nr_mmaps; + VFIOMmap *mmaps; + uint8_t nr; /* cache the region number for debug */ + bool post_wr; /* writes can be posted */ +} VFIORegion; + + +void vfio_region_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size); +uint64_t vfio_region_read(void *opaque, + hwaddr addr, unsigned size); +int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, + int index, const char *name); +int vfio_region_mmap(VFIORegion *region); +void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled); +void vfio_region_unmap(VFIORegion *region); +void vfio_region_exit(VFIORegion *region); +void vfio_region_finalize(VFIORegion *region); + +#endif /* HW_VFIO_REGION_H */ diff --git a/include/hw/virtio/cbor-helpers.h b/include/hw/virtio/cbor-helpers.h new file mode 100644 index 00000000000..f25fd481add --- /dev/null +++ b/include/hw/virtio/cbor-helpers.h @@ -0,0 +1,45 @@ +/* + * QEMU CBOR helpers + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef QEMU_VIRTIO_CBOR_HELPERS_H +#define QEMU_VIRTIO_CBOR_HELPERS_H + +#include + +bool qemu_cbor_map_add(cbor_item_t *map, cbor_item_t *key, cbor_item_t *value); + +bool qemu_cbor_array_push(cbor_item_t *array, cbor_item_t *value); + +bool qemu_cbor_add_bool_to_map(cbor_item_t *map, const char *key, bool value); + +bool qemu_cbor_add_uint8_to_map(cbor_item_t *map, const char *key, + uint8_t value); + +bool qemu_cbor_add_map_to_map(cbor_item_t *map, const char *key, + size_t nested_map_size, + cbor_item_t **nested_map); + +bool qemu_cbor_add_bytestring_to_map(cbor_item_t *map, const char *key, + uint8_t *arr, size_t len); + +bool qemu_cbor_add_null_to_map(cbor_item_t *map, const char *key); + +bool qemu_cbor_add_string_to_map(cbor_item_t *map, const char *key, + const char *value); + +bool qemu_cbor_add_uint8_array_to_map(cbor_item_t *map, const char *key, + uint8_t *arr, size_t len); + +bool qemu_cbor_add_uint8_key_bytestring_to_map(cbor_item_t *map, uint8_t key, + uint8_t *buf, size_t len); + +bool qemu_cbor_add_uint64_to_map(cbor_item_t *map, const char *key, + uint64_t value); +#endif diff --git a/include/hw/virtio/iothread-vq-mapping.h b/include/hw/virtio/iothread-vq-mapping.h new file mode 100644 index 00000000000..57335c3703c --- /dev/null +++ b/include/hw/virtio/iothread-vq-mapping.h @@ -0,0 +1,45 @@ +/* + * IOThread Virtqueue Mapping + * + * Copyright Red Hat, Inc + * + * SPDX-License-Identifier: GPL-2.0-only + */ + +#ifndef HW_VIRTIO_IOTHREAD_VQ_MAPPING_H +#define HW_VIRTIO_IOTHREAD_VQ_MAPPING_H + +#include "qapi/error.h" +#include "qapi/qapi-types-virtio.h" + +/** + * iothread_vq_mapping_apply: + * @list: The mapping of virtqueues to IOThreads. + * @vq_aio_context: The array of AioContext pointers to fill in. + * @num_queues: The length of @vq_aio_context. + * @errp: If an error occurs, a pointer to the area to store the error. + * + * Fill in the AioContext for each virtqueue in the @vq_aio_context array given + * the iothread-vq-mapping parameter in @list. + * + * iothread_vq_mapping_cleanup() must be called to free IOThread object + * references after this function returns success. + * + * Returns: %true on success, %false on failure. + **/ +bool iothread_vq_mapping_apply( + IOThreadVirtQueueMappingList *list, + AioContext **vq_aio_context, + uint16_t num_queues, + Error **errp); + +/** + * iothread_vq_mapping_cleanup: + * @list: The mapping of virtqueues to IOThreads. + * + * Release IOThread object references that were acquired by + * iothread_vq_mapping_apply(). + */ +void iothread_vq_mapping_cleanup(IOThreadVirtQueueMappingList *list); + +#endif /* HW_VIRTIO_IOTHREAD_VQ_MAPPING_H */ diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 70c2e8ffeee..d6df209a2f0 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -11,7 +11,7 @@ #ifndef VHOST_BACKEND_H #define VHOST_BACKEND_H -#include "exec/memory.h" +#include "system/memory.h" typedef enum VhostBackendType { VHOST_BACKEND_TYPE_NONE = 0, diff --git a/include/hw/virtio/vhost-scsi-common.h b/include/hw/virtio/vhost-scsi-common.h index c5d2c094550..d54d9c916f2 100644 --- a/include/hw/virtio/vhost-scsi-common.h +++ b/include/hw/virtio/vhost-scsi-common.h @@ -40,7 +40,7 @@ struct VHostSCSICommon { }; int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp); -void vhost_scsi_common_stop(VHostSCSICommon *vsc); +int vhost_scsi_common_stop(VHostSCSICommon *vsc); char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus, DeviceState *dev); void vhost_scsi_common_set_config(VirtIODevice *vdev, const uint8_t *config); diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h index ea085ee1ed9..a10f7856725 100644 --- a/include/hw/virtio/vhost-user-blk.h +++ b/include/hw/virtio/vhost-user-blk.h @@ -50,6 +50,8 @@ struct VHostUserBlk { bool connected; /* vhost_user_blk_start/vhost_user_blk_stop */ bool started_vu; + + bool skip_get_vring_base_on_force_shutdown; }; #endif diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index 324cd8663ab..9a3f238b43c 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -54,6 +54,7 @@ typedef struct VhostUserHostNotifier { void *addr; void *unmap_addr; int idx; + bool destroy; } VhostUserHostNotifier; /** diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 0a9575b469b..449bf5c840c 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -43,7 +43,21 @@ typedef struct vhost_vdpa_shared { struct vhost_vdpa_iova_range iova_range; QLIST_HEAD(, vdpa_iommu) iommu_list; - /* IOVA mapping used by the Shadow Virtqueue */ + /* + * IOVA mapping used by the Shadow Virtqueue + * + * It is shared among all ASID for simplicity, whether CVQ shares ASID with + * guest or not: + * - Memory listener need access to guest's memory addresses allocated in + * the IOVA tree. + * - There should be plenty of IOVA address space for both ASID not to + * worry about collisions between them. Guest's translations are still + * validated with virtio virtqueue_pop so there is no risk for the guest + * to access memory that it shouldn't. + * + * To allocate a iova tree per ASID is doable but it complicates the code + * and it is not worth it for the moment. + */ VhostIOVATree *iova_tree; /* Copy of backend features */ @@ -51,6 +65,12 @@ typedef struct vhost_vdpa_shared { bool iotlb_batch_begin_sent; + /* + * The memory listener has been registered, so DMA maps have been sent to + * the device. + */ + bool listener_registered; + /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ bool shadow_data; diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h index 75a74e8a995..01bf6062af5 100644 --- a/include/hw/virtio/vhost-vsock-common.h +++ b/include/hw/virtio/vhost-vsock-common.h @@ -42,7 +42,7 @@ struct VHostVSockCommon { }; int vhost_vsock_common_start(VirtIODevice *vdev); -void vhost_vsock_common_stop(VirtIODevice *vdev); +int vhost_vsock_common_stop(VirtIODevice *vdev); int vhost_vsock_common_pre_save(void *opaque); int vhost_vsock_common_post_load(void *opaque, int version_id); void vhost_vsock_common_realize(VirtIODevice *vdev); diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index d75faf46e9a..66be6afc884 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -1,9 +1,10 @@ #ifndef VHOST_H #define VHOST_H +#include "net/vhost_net.h" #include "hw/virtio/vhost-backend.h" #include "hw/virtio/virtio.h" -#include "exec/memory.h" +#include "system/memory.h" #define VHOST_F_DEVICE_IOTLB 63 #define VHOST_USER_F_PROTOCOL_FEATURES 30 @@ -143,6 +144,10 @@ struct vhost_net { struct vhost_dev dev; struct vhost_virtqueue vqs[2]; int backend; + const int *feature_bits; + int max_tx_queue_size; + SaveAcketFeatures *save_acked_features; + bool is_vhost_user; NetClientState *nc; }; @@ -171,6 +176,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, */ void vhost_dev_cleanup(struct vhost_dev *hdev); +void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, + VirtIODevice *vdev, + unsigned int nvqs); + /** * vhost_dev_enable_notifiers() - enable event notifiers * @hdev: common vhost_dev structure @@ -228,8 +237,25 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); * Stop the vhost device. After the device is stopped the notifiers * can be disabled (@vhost_dev_disable_notifiers) and the device can * be torn down (@vhost_dev_cleanup). + * + * Return: 0 on success, != 0 on error when stopping dev. + */ +int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + +/** + * vhost_dev_force_stop() - force stop the vhost device + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * @vrings: true to have vrings disabled in this call + * + * Force stop the vhost device. After the device is stopped the notifiers + * can be disabled (@vhost_dev_disable_notifiers) and the device can + * be torn down (@vhost_dev_cleanup). Unlike @vhost_dev_stop, this doesn't + * attempt to flush in-flight backend requests by skipping GET_VRING_BASE + * entirely. */ -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); +int vhost_dev_force_stop(struct vhost_dev *hdev, VirtIODevice *vdev, + bool vrings); /** * DOC: vhost device configuration handling @@ -329,13 +355,11 @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev, struct vhost_virtqueue *vq, unsigned idx); -void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, - struct vhost_virtqueue *vq, unsigned idx); +int vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, unsigned idx); void vhost_dev_reset_inflight(struct vhost_inflight *inflight); void vhost_dev_free_inflight(struct vhost_inflight *inflight); -void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); -int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev); int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight); @@ -363,7 +387,14 @@ static inline int vhost_reset_device(struct vhost_dev *hdev) * Returns true if the device supports these commands, and false if it * does not. */ +#ifdef CONFIG_VHOST bool vhost_supports_device_state(struct vhost_dev *dev); +#else +static inline bool vhost_supports_device_state(struct vhost_dev *dev) +{ + return false; +} +#endif /** * vhost_set_device_state_fd(): Begin transfer of internal state from/to @@ -446,7 +477,15 @@ int vhost_check_device_state(struct vhost_dev *dev, Error **errp); * * Returns 0 on success, and -errno otherwise. */ +#ifdef CONFIG_VHOST int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); +#else +static inline int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, + Error **errp) +{ + return -ENOSYS; +} +#endif /** * vhost_load_backend_state(): High-level function to load a vhost @@ -463,6 +502,14 @@ int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); * * Returns 0 on success, and -errno otherwise. */ +#ifdef CONFIG_VHOST int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); +#else +static inline int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, + Error **errp) +{ + return -ENOSYS; +} +#endif #endif diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h index cace2a315f4..cdfbd943ae8 100644 --- a/include/hw/virtio/virtio-acpi.h +++ b/include/hw/virtio/virtio-acpi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ACPI support for virtio */ diff --git a/include/hw/virtio/virtio-balloon.h b/include/hw/virtio/virtio-balloon.h index 5139cf8ab68..0456c211c6e 100644 --- a/include/hw/virtio/virtio-balloon.h +++ b/include/hw/virtio/virtio-balloon.h @@ -16,8 +16,9 @@ #define QEMU_VIRTIO_BALLOON_H #include "standard-headers/linux/virtio_balloon.h" +#include "hw/resettable.h" #include "hw/virtio/virtio.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" #include "qom/object.h" #define TYPE_VIRTIO_BALLOON "virtio-balloon-device" @@ -71,6 +72,9 @@ struct VirtIOBalloon { bool qemu_4_0_config_size; uint32_t poison_val; + + /* State of the resettable container */ + ResettableState reset_state; }; #endif diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index 5c14110c4b1..3d8dee7ec15 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -17,14 +17,14 @@ #include "standard-headers/linux/virtio_blk.h" #include "hw/virtio/virtio.h" #include "hw/block/block.h" -#include "sysemu/iothread.h" -#include "sysemu/block-backend.h" -#include "sysemu/block-ram-registrar.h" +#include "system/iothread.h" +#include "system/block-backend.h" +#include "system/block-ram-registrar.h" #include "qom/object.h" #include "qapi/qapi-types-virtio.h" #define TYPE_VIRTIO_BLK "virtio-blk-device" -OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) +OBJECT_DECLARE_TYPE(VirtIOBlock, VirtIOBlkClass, VIRTIO_BLK) /* This is the last element of the write scatter-gather list */ struct virtio_blk_inhdr @@ -100,6 +100,15 @@ typedef struct MultiReqBuffer { bool is_write; } MultiReqBuffer; +typedef struct VirtIOBlkClass { + /*< private >*/ + VirtioDeviceClass parent; + /*< public >*/ + bool (*handle_unknown_request)(VirtIOBlockReq *req, MultiReqBuffer *mrb, + uint32_t type); +} VirtIOBlkClass; + void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); +void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status); #endif diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h index 348749f5d5c..2d56513693e 100644 --- a/include/hw/virtio/virtio-crypto.h +++ b/include/hw/virtio/virtio-crypto.h @@ -16,8 +16,8 @@ #include "standard-headers/linux/virtio_crypto.h" #include "hw/virtio/virtio.h" -#include "sysemu/iothread.h" -#include "sysemu/cryptodev.h" +#include "system/iothread.h" +#include "system/cryptodev.h" #include "qom/object.h" diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 7a59379f5a7..9f16f89a36d 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -19,7 +19,8 @@ #include "ui/console.h" #include "hw/virtio/virtio.h" #include "qemu/log.h" -#include "sysemu/vhost-user-backend.h" +#include "system/vhost-user-backend.h" +#include "qapi/qapi-types-virtio.h" #include "standard-headers/linux/virtio_gpu.h" #include "standard-headers/linux/virtio_ids.h" @@ -51,9 +52,7 @@ struct virtio_gpu_simple_resource { unsigned int iov_cnt; uint32_t scanout_bitmask; pixman_image_t *image; -#ifdef WIN32 - HANDLE handle; -#endif + qemu_pixman_shareable share_handle; uint64_t hostmem; uint64_t blob_size; @@ -99,6 +98,8 @@ enum virtio_gpu_base_conf_flags { VIRTIO_GPU_FLAG_BLOB_ENABLED, VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED, VIRTIO_GPU_FLAG_RUTABAGA_ENABLED, + VIRTIO_GPU_FLAG_VENUS_ENABLED, + VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED, }; #define virtio_gpu_virgl_enabled(_cfg) \ @@ -115,8 +116,12 @@ enum virtio_gpu_base_conf_flags { (_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED)) #define virtio_gpu_rutabaga_enabled(_cfg) \ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED)) +#define virtio_gpu_resource_uuid_enabled(_cfg) \ + (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED)) #define virtio_gpu_hostmem_enabled(_cfg) \ (_cfg.hostmem > 0) +#define virtio_gpu_venus_enabled(_cfg) \ + (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VENUS_ENABLED)) struct virtio_gpu_base_conf { uint32_t max_outputs; @@ -124,6 +129,7 @@ struct virtio_gpu_base_conf { uint32_t xres; uint32_t yres; uint64_t hostmem; + VirtIOGPUOutputList *outputs; }; struct virtio_gpu_ctrl_command { @@ -163,6 +169,7 @@ struct VirtIOGPUBaseClass { #define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \ DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \ + DEFINE_PROP_VIRTIO_GPU_OUTPUT_LIST("outputs", _state, _conf.outputs), \ DEFINE_PROP_BIT("edid", _state, _conf.flags, \ VIRTIO_GPU_FLAG_EDID_ENABLED, true), \ DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \ @@ -196,8 +203,6 @@ struct VirtIOGPU { uint64_t hostmem; bool processing_cmdq; - QEMUTimer *fence_poll; - QEMUTimer *print_stats; uint32_t inflight; struct { @@ -211,6 +216,8 @@ struct VirtIOGPU { QTAILQ_HEAD(, VGPUDMABuf) bufs; VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS]; } dmabuf; + + GArray *capset_ids; }; struct VirtIOGPUClass { @@ -226,11 +233,23 @@ struct VirtIOGPUClass { Error **errp); }; +/* VirtIOGPUGL renderer states */ +typedef enum { + RS_START, /* starting state */ + RS_INIT_FAILED, /* failed initialisation */ + RS_INITED, /* initialised and working */ + RS_RESET, /* inited and reset pending, moves to start after reset */ +} RenderState; + struct VirtIOGPUGL { struct VirtIOGPU parent_obj; - bool renderer_inited; - bool renderer_reset; + RenderState renderer_state; + + QEMUTimer *fence_poll; + QEMUTimer *print_stats; + + QEMUBH *cmdq_resume_bh; }; struct VhostUserGPU { @@ -320,6 +339,21 @@ void virtio_gpu_update_cursor_data(VirtIOGPU *g, struct virtio_gpu_scanout *s, uint32_t resource_id); +/** + * virtio_gpu_scanout_blob_to_fb() - fill out fb based on scanout data + * fb: the frame-buffer descriptor to fill out + * ss: the scanout blob data + * blob_size: size of scanout blob data + * + * This will check we have enough space for the frame taking into + * account that stride. + * + * Returns true on success, otherwise logs guest error and returns false + */ +bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_set_scanout_blob *ss, + uint64_t blob_size); + /* virtio-gpu-udmabuf.c */ bool virtio_gpu_have_udmabuf(void); void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res); @@ -330,6 +364,13 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g, struct virtio_gpu_framebuffer *fb, struct virtio_gpu_rect *r); +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r); +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id); + /* virtio-gpu-3d.c */ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); @@ -337,6 +378,6 @@ void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g); void virtio_gpu_virgl_reset(VirtIOGPU *g); int virtio_gpu_virgl_init(VirtIOGPU *g); -int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g); +GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g); #endif diff --git a/include/hw/virtio/virtio-input.h b/include/hw/virtio/virtio-input.h index e69c0aeca38..e097b0b5217 100644 --- a/include/hw/virtio/virtio-input.h +++ b/include/hw/virtio/virtio-input.h @@ -4,7 +4,7 @@ #include "hw/virtio/vhost-user.h" #include "hw/virtio/vhost-user-base.h" #include "ui/input.h" -#include "sysemu/vhost-user-backend.h" +#include "system/vhost-user-backend.h" /* ----------------------------------------------------------------- */ /* virtio input protocol */ diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h index 7db4210b160..3b86050f2cf 100644 --- a/include/hw/virtio/virtio-iommu.h +++ b/include/hw/virtio/virtio-iommu.h @@ -25,7 +25,7 @@ #include "hw/pci/pci.h" #include "qom/object.h" #include "qapi/qapi-types-virtio.h" -#include "sysemu/host_iommu_device.h" +#include "system/host_iommu_device.h" #define TYPE_VIRTIO_IOMMU "virtio-iommu-device" #define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci" diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h index 5f5b02b8f93..e0ab31b45a4 100644 --- a/include/hw/virtio/virtio-mem.h +++ b/include/hw/virtio/virtio-mem.h @@ -14,9 +14,10 @@ #define HW_VIRTIO_MEM_H #include "standard-headers/linux/virtio_mem.h" +#include "hw/resettable.h" #include "hw/virtio/virtio.h" #include "qapi/qapi-types-misc.h" -#include "sysemu/hostmem.h" +#include "system/hostmem.h" #include "qom/object.h" #define TYPE_VIRTIO_MEM "virtio-mem" @@ -24,6 +25,10 @@ OBJECT_DECLARE_TYPE(VirtIOMEM, VirtIOMEMClass, VIRTIO_MEM) +#define TYPE_VIRTIO_MEM_SYSTEM_RESET "virtio-mem-system-reset" + +OBJECT_DECLARE_SIMPLE_TYPE(VirtioMemSystemReset, VIRTIO_MEM_SYSTEM_RESET) + #define VIRTIO_MEM_MEMDEV_PROP "memdev" #define VIRTIO_MEM_NODE_PROP "node" #define VIRTIO_MEM_SIZE_PROP "size" @@ -115,11 +120,21 @@ struct VirtIOMEM { /* listeners to notify on plug/unplug activity. */ QLIST_HEAD(, RamDiscardListener) rdl_list; + + /* Catch system resets -> qemu_devices_reset() only. */ + VirtioMemSystemReset *system_reset; +}; + +struct VirtioMemSystemReset { + Object parent; + + ResettableState reset_state; + VirtIOMEM *vmem; }; struct VirtIOMEMClass { /* private */ - VirtIODevice parent; + VirtioDeviceClass parent_class; /* public */ void (*fill_device_info)(const VirtIOMEM *vmen, VirtioMEMDeviceInfo *vi); diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 060c23c04d2..73fdefc0dcb 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -102,7 +102,7 @@ typedef struct VirtioNetRscStat { /* Rsc unit general info used to checking if can coalescing */ typedef struct VirtioNetRscUnit { void *ip; /* ip header */ - uint16_t *ip_plen; /* data len pointer in ip header field */ + void *ip_plen; /* pointer to unaligned uint16_t data len in ip header */ struct tcp_header *tcp; /* tcp header */ uint16_t tcp_hdrlen; /* tcp header len */ uint16_t payload; /* pure payload without virtio/eth/ip/tcp */ @@ -144,7 +144,11 @@ typedef struct VirtioNetRssData { bool enabled_software_rss; bool redirect; bool populate_hash; - uint32_t hash_types; + bool peer_hash_available; + uint32_t runtime_hash_types; + uint32_t supported_hash_types; + uint32_t peer_hash_types; + OnOffAutoBit64 specified_hash_types; uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; uint16_t indirections_len; uint16_t *indirections_table; diff --git a/include/hw/virtio/virtio-nsm.h b/include/hw/virtio/virtio-nsm.h new file mode 100644 index 00000000000..57ddbbbf3f5 --- /dev/null +++ b/include/hw/virtio/virtio-nsm.h @@ -0,0 +1,49 @@ +/* + * AWS Nitro Secure Module (NSM) device + * + * Copyright (c) 2024 Dorjoy Chowdhury + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#ifndef QEMU_VIRTIO_NSM_H +#define QEMU_VIRTIO_NSM_H + +#include "crypto/hash.h" +#include "hw/virtio/virtio.h" +#include "qom/object.h" + +#define NSM_MAX_PCRS 32 + +#define TYPE_VIRTIO_NSM "virtio-nsm-device" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIONSM, VIRTIO_NSM) +#define VIRTIO_NSM_GET_PARENT_CLASS(obj) \ + OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_NSM) + +struct PCRInfo { + bool locked; + uint8_t data[QCRYPTO_HASH_DIGEST_LEN_SHA384]; +}; + +struct VirtIONSM { + VirtIODevice parent_obj; + + /* Only one vq - guest puts request and response buffers on it */ + VirtQueue *vq; + + /* NSM State */ + uint16_t max_pcrs; + struct PCRInfo pcrs[NSM_MAX_PCRS]; + char *digest; + char *module_id; + uint8_t version_major; + uint8_t version_minor; + uint8_t version_patch; + + bool (*extend_pcr)(VirtIONSM *vnsm, int ind, uint8_t *data, uint16_t len); + void (*lock_pcr)(VirtIONSM *vnsm, int ind); +}; + +#endif diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h index 9e67ba38c74..eab5394898d 100644 --- a/include/hw/virtio/virtio-pci.h +++ b/include/hw/virtio/virtio-pci.h @@ -32,9 +32,7 @@ DECLARE_OBJ_CHECKERS(VirtioPCIBusState, VirtioPCIBusClass, enum { VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, - VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, - VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, VIRTIO_PCI_FLAG_ATS_BIT, VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, @@ -54,12 +52,6 @@ enum { * vcpu thread using ioeventfd for some devices. */ #define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT) -/* virtio version flags */ -#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT) - -/* migrate extra state */ -#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT) - /* have pio notification for modern device ? */ #define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \ (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT) @@ -147,11 +139,15 @@ struct VirtIOPCIProxy { }; MemoryRegion modern_bar; MemoryRegion io_bar; + /* address space for VirtIOPCIRegions */ + AddressSpace modern_cfg_mem_as; + AddressSpace modern_cfg_io_as; uint32_t legacy_io_bar_idx; uint32_t msix_bar_idx; uint32_t modern_io_bar_idx; uint32_t modern_mem_bar_idx; int config_cap; + uint16_t last_pcie_cap_offset; uint32_t flags; bool disable_modern; bool ignore_backend_features; @@ -252,8 +248,8 @@ typedef struct VirtioPCIDeviceTypeInfo { size_t class_size; void (*instance_init)(Object *obj); void (*instance_finalize)(Object *obj); - void (*class_init)(ObjectClass *klass, void *data); - InterfaceInfo *interfaces; + void (*class_init)(ObjectClass *klass, const void *data); + const InterfaceInfo *interfaces; } VirtioPCIDeviceTypeInfo; /* Register virtio-pci type(s). @t must be static. */ diff --git a/include/hw/virtio/virtio-pmem.h b/include/hw/virtio/virtio-pmem.h index fc4fd1f7fe8..9cce600d0b2 100644 --- a/include/hw/virtio/virtio-pmem.h +++ b/include/hw/virtio/virtio-pmem.h @@ -36,7 +36,7 @@ struct VirtIOPMEM { struct VirtIOPMEMClass { /* private */ - VirtIODevice parent; + VirtioDeviceClass parent_class; /* public */ void (*fill_device_info)(const VirtIOPMEM *pmem, VirtioPMEMDeviceInfo *vi); diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h index 82734255d99..7e6d27f9f08 100644 --- a/include/hw/virtio/virtio-rng.h +++ b/include/hw/virtio/virtio-rng.h @@ -13,7 +13,7 @@ #define QEMU_VIRTIO_RNG_H #include "hw/virtio/virtio.h" -#include "sysemu/rng.h" +#include "system/rng.h" #include "standard-headers/linux/virtio_rng.h" #include "qom/object.h" diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h index 7be01059185..31e852ed6c4 100644 --- a/include/hw/virtio/virtio-scsi.h +++ b/include/hw/virtio/virtio-scsi.h @@ -22,7 +22,8 @@ #include "hw/virtio/virtio.h" #include "hw/scsi/scsi.h" #include "chardev/char-fe.h" -#include "sysemu/iothread.h" +#include "qapi/qapi-types-virtio.h" +#include "system/iothread.h" #define TYPE_VIRTIO_SCSI_COMMON "virtio-scsi-common" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICommon, VIRTIO_SCSI_COMMON) @@ -60,6 +61,7 @@ struct VirtIOSCSIConf { CharBackend chardev; uint32_t boot_tpgt; IOThread *iothread; + IOThreadVirtQueueMappingList *iothread_vq_mapping_list; }; struct VirtIOSCSI; @@ -82,18 +84,14 @@ struct VirtIOSCSI { SCSIBus bus; int resetting; /* written from main loop thread, read from any thread */ + + QemuMutex event_lock; /* protects event_vq and events_dropped */ bool events_dropped; - /* - * TMFs deferred to main loop BH. These fields are protected by - * tmf_bh_lock. - */ - QemuMutex tmf_bh_lock; - QEMUBH *tmf_bh; - QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list; + QemuMutex ctrl_lock; /* protects ctrl_vq */ /* Fields for dataplane below */ - AioContext *ctx; /* one iothread per virtio-scsi-pci for now */ + AioContext **vq_aio_context; /* per-virtqueue AioContext pointer */ bool dataplane_started; bool dataplane_starting; @@ -111,6 +109,7 @@ void virtio_scsi_common_realize(DeviceState *dev, void virtio_scsi_common_unrealize(DeviceState *dev); void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp); +void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s); int virtio_scsi_dataplane_start(VirtIODevice *s); void virtio_scsi_dataplane_stop(VirtIODevice *s); diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 0fcbc5c0c6f..c594764f23f 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -14,7 +14,7 @@ #ifndef QEMU_VIRTIO_H #define QEMU_VIRTIO_H -#include "exec/memory.h" +#include "system/memory.h" #include "hw/qdev-core.h" #include "net/net.h" #include "migration/vmstate.h" @@ -186,7 +186,7 @@ struct VirtioDeviceClass { void (*get_config)(VirtIODevice *vdev, uint8_t *config); void (*set_config)(VirtIODevice *vdev, const uint8_t *config); void (*reset)(VirtIODevice *vdev); - void (*set_status)(VirtIODevice *vdev, uint8_t val); + int (*set_status)(VirtIODevice *vdev, uint8_t val); /* Device must validate queue_index. */ void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index); /* Device must validate queue_index. */ @@ -210,6 +210,14 @@ struct VirtioDeviceClass { void (*guest_notifier_mask)(VirtIODevice *vdev, int n, bool mask); int (*start_ioeventfd)(VirtIODevice *vdev); void (*stop_ioeventfd)(VirtIODevice *vdev); + /* + * Called before loading queues. + * If the number of queues change at runtime, use @n to know the + * number and add or remove queues accordingly. + * Note that this function is called in the middle of loading vmsd; + * no assumption should be made on states being loaded from vmsd. + */ + int (*pre_load_queues)(VirtIODevice *vdev, uint32_t n); /* Saving and loading of a device; trying to deprecate save/load * use vmsd for new devices. */ @@ -223,6 +231,7 @@ struct VirtioDeviceClass { int (*post_load)(VirtIODevice *vdev); const VMStateDescription *vmsd; bool (*primary_unplug_pending)(void *opaque); + /* May be called even when vdev->vhost_started is false */ struct vhost_dev *(*get_vhost)(VirtIODevice *vdev); void (*toggle_device_iotlb)(VirtIODevice *vdev); }; diff --git a/include/hw/vmapple/vmapple.h b/include/hw/vmapple/vmapple.h new file mode 100644 index 00000000000..9c1ad1bd8c3 --- /dev/null +++ b/include/hw/vmapple/vmapple.h @@ -0,0 +1,23 @@ +/* + * Devices specific to the VMApple machine type + * + * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VMAPPLE_VMAPPLE_H +#define HW_VMAPPLE_VMAPPLE_H + +#define TYPE_APPLE_AES "apple-aes" + +#define TYPE_VMAPPLE_BDIF "vmapple-bdif" + +#define TYPE_VMAPPLE_CFG "vmapple-cfg" + +#define TYPE_VMAPPLE_VIRTIO_BLK_PCI "vmapple-virtio-blk-pci" + +#endif /* HW_VMAPPLE_VMAPPLE_H */ diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h index c7c515220d2..8bacaa4ec41 100644 --- a/include/hw/xen/arch_hvm.h +++ b/include/hw/xen/arch_hvm.h @@ -1,5 +1,11 @@ -#if defined(TARGET_I386) || defined(TARGET_X86_64) -#include "hw/i386/xen_arch_hvm.h" -#elif defined(TARGET_ARM) || defined(TARGET_ARM_64) -#include "hw/arm/xen_arch_hvm.h" +#ifndef HW_XEN_ARCH_HVM_H +#define HW_XEN_ARCH_HVM_H + +#include +#include "hw/xen/xen-hvm-common.h" + +void arch_handle_ioreq(XenIOState *state, ioreq_t *req); +void arch_xen_set_memory(XenIOState *state, + MemoryRegionSection *section, + bool add); #endif diff --git a/include/hw/xen/interface/io/blkif.h b/include/hw/xen/interface/io/blkif.h index 22f1eef0c0c..c5527999d1b 100644 --- a/include/hw/xen/interface/io/blkif.h +++ b/include/hw/xen/interface/io/blkif.h @@ -324,7 +324,7 @@ * access (even when it should be read-only). If the frontend hits the * maximum number of allowed persistently mapped grants, it can fallback * to non persistent mode. This will cause a performance degradation, - * since the the backend driver will still try to map those grants + * since the backend driver will still try to map those grants * persistently. Since the persistent grants protocol is compatible with * the previous protocol, a frontend driver can choose to work in * persistent mode even when the backend doesn't support it. diff --git a/include/hw/xen/xen-block.h b/include/hw/xen/xen-block.h index d692ea75802..449a7f75fb5 100644 --- a/include/hw/xen/xen-block.h +++ b/include/hw/xen/xen-block.h @@ -11,7 +11,7 @@ #include "hw/xen/xen-bus.h" #include "hw/block/block.h" #include "hw/block/dataplane/xen-block.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" #include "qom/object.h" typedef enum XenBlockVdevType { diff --git a/include/hw/xen/xen-bus-helper.h b/include/hw/xen/xen-bus-helper.h index d8dcc2f0107..e9911115b32 100644 --- a/include/hw/xen/xen-bus-helper.h +++ b/include/hw/xen/xen-bus-helper.h @@ -38,6 +38,15 @@ int xs_node_scanf(struct qemu_xs_handle *h, xs_transaction_t tid, const char *fmt, ...) G_GNUC_SCANF(6, 7); +/* + * Unlike other functions here, the printf-formatted path_fmt is for + * the XenStore path, not the contents of the node. + */ +char *xs_node_read(struct qemu_xs_handle *h, xs_transaction_t tid, + unsigned int *len, Error **errp, + const char *path_fmt, ...) + G_GNUC_PRINTF(5, 6); + /* Watch node/key unless node is empty, in which case watch key */ struct qemu_xs_watch *xs_node_watch(struct qemu_xs_handle *h, const char *node, const char *key, xs_watch_fn fn, diff --git a/include/hw/xen/xen-bus.h b/include/hw/xen/xen-bus.h index 38d40afa379..bdbf1ed6fd0 100644 --- a/include/hw/xen/xen-bus.h +++ b/include/hw/xen/xen-bus.h @@ -8,9 +8,10 @@ #ifndef HW_XEN_BUS_H #define HW_XEN_BUS_H +#include "hw/qdev-core.h" #include "hw/xen/xen_backend_ops.h" -#include "hw/sysbus.h" #include "qemu/notify.h" +#include "qemu/queue.h" #include "qom/object.h" typedef struct XenEventChannel XenEventChannel; @@ -91,6 +92,7 @@ void xen_device_frontend_printf(XenDevice *xendev, const char *key, int xen_device_frontend_scanf(XenDevice *xendev, const char *key, const char *fmt, ...) G_GNUC_SCANF(3, 4); +char *xen_device_frontend_read(XenDevice *xendev, const char *key); void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, Error **errp); diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h index 3d796235dc1..19df5600a39 100644 --- a/include/hw/xen/xen-hvm-common.h +++ b/include/hw/xen/xen-hvm-common.h @@ -1,18 +1,10 @@ #ifndef HW_XEN_HVM_COMMON_H #define HW_XEN_HVM_COMMON_H -#include "qemu/units.h" - -#include "cpu.h" -#include "hw/pci/pci.h" -#include "hw/hw.h" +#include "qemu/queue.h" +#include "exec/hwaddr.h" #include "hw/xen/xen_native.h" -#include "hw/xen/xen-legacy-backend.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" -#include "sysemu/xen-mapcache.h" -#include "qemu/error-report.h" +#include "hw/xen/xen_backend_ops.h" #include extern MemoryRegion xen_memory; @@ -81,6 +73,8 @@ typedef struct XenIOState { QLIST_HEAD(, XenPciDevice) dev_list; DeviceListener device_listener; + bool has_bufioreq; + Notifier exit; } XenIOState; @@ -95,6 +89,7 @@ void xen_device_unrealize(DeviceListener *listener, DeviceState *dev); void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate); void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, + uint8_t handle_bufioreq, const MemoryListener *xen_memory_listener); void cpu_ioreq_pio(ioreq_t *req); diff --git a/include/hw/xen/xen-legacy-backend.h b/include/hw/xen/xen-legacy-backend.h index 943732b8d15..2d0cbfecade 100644 --- a/include/hw/xen/xen-legacy-backend.h +++ b/include/hw/xen/xen-legacy-backend.h @@ -3,7 +3,6 @@ #include "hw/xen/xen_backend_ops.h" #include "hw/xen/xen_pvdev.h" -#include "net/net.h" #include "qom/object.h" #define TYPE_XENSYSDEV "xen-sysdev" @@ -50,10 +49,6 @@ void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, uint32_t *refs, unsigned int nr_refs); -int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, - bool to_domain, XenGrantCopySegment segs[], - unsigned int nr_segs); - static inline void *xen_be_map_grant_ref(struct XenLegacyDevice *xendev, uint32_t ref, int prot) { @@ -70,6 +65,5 @@ static inline void xen_be_unmap_grant_ref(struct XenLegacyDevice *xendev, void xen_config_cleanup(void); int xen_config_dev_vfb(int vdev, const char *type); int xen_config_dev_vkbd(int vdev); -int xen_config_dev_console(int vdev); #endif /* HW_XEN_LEGACY_BACKEND_H */ diff --git a/include/hw/xen/xen-pvh-common.h b/include/hw/xen/xen-pvh-common.h new file mode 100644 index 00000000000..5db83d88ecc --- /dev/null +++ b/include/hw/xen/xen-pvh-common.h @@ -0,0 +1,91 @@ +/* + * QEMU Xen PVH machine - common code. + * + * Copyright (c) 2024 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef XEN_PVH_COMMON_H__ +#define XEN_PVH_COMMON_H__ + +#include "system/memory.h" +#include "qom/object.h" +#include "hw/boards.h" +#include "hw/pci-host/gpex.h" +#include "hw/xen/xen-hvm-common.h" + +#define TYPE_XEN_PVH_MACHINE MACHINE_TYPE_NAME("xen-pvh-base") +OBJECT_DECLARE_TYPE(XenPVHMachineState, XenPVHMachineClass, + XEN_PVH_MACHINE) + +struct XenPVHMachineClass { + MachineClass parent; + + /* PVH implementation specific init. */ + void (*init)(MachineState *state); + + /* + * set_pci_intx_irq - Deliver INTX irqs to the guest. + * + * @opaque: pointer to XenPVHMachineState. + * @irq: IRQ after swizzling, between 0-3. + * @level: IRQ level. + */ + void (*set_pci_intx_irq)(void *opaque, int irq, int level); + + /* + * set_pci_link_route: - optional implementation call to setup + * routing between INTX IRQ (0 - 3) and GSI's. + * + * @line: line the INTx line (0 => A .. 3 => B) + * @irq: GSI + */ + int (*set_pci_link_route)(uint8_t line, uint8_t irq); + + /* Allow implementations to optionally enable buffered ioreqs. */ + uint8_t handle_bufioreq; + + /* + * Each implementation can optionally enable features that it + * supports and are known to work. + */ + bool has_pci; + bool has_tpm; + bool has_virtio_mmio; +}; + +struct XenPVHMachineState { + /*< private >*/ + MachineState parent; + + XenIOState ioreq; + + struct { + MemoryRegion low; + MemoryRegion high; + } ram; + + struct { + GPEXHost gpex; + MemoryRegion mmio_alias; + MemoryRegion mmio_high_alias; + } pci; + + struct { + MemMapEntry ram_low, ram_high; + MemMapEntry tpm; + + /* Virtio-mmio */ + MemMapEntry virtio_mmio; + uint32_t virtio_mmio_num; + uint32_t virtio_mmio_irq_base; + + /* PCI */ + MemMapEntry pci_ecam, pci_mmio, pci_mmio_high; + uint32_t pci_intx_irq_base; + } cfg; +}; + +void xen_pvh_class_setup_common_props(XenPVHMachineClass *xpc); +#endif diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index ecb89ecfc14..e94c6e5a318 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -24,8 +24,6 @@ #define __XEN_INTERFACE_VERSION__ 0x00040e00 #endif -#include "exec/cpu-common.h" - /* xen-machine.c */ enum xen_mode { XEN_DISABLED = 0, /* xen support disabled (default) */ diff --git a/include/hw/xen/xen_native.h b/include/hw/xen/xen_native.h index 1a5ad693a4d..5caf91a6162 100644 --- a/include/hw/xen/xen_native.h +++ b/include/hw/xen/xen_native.h @@ -464,10 +464,11 @@ static inline void xen_unmap_pcidev(domid_t dom, } static inline int xen_create_ioreq_server(domid_t dom, + int handle_bufioreq, ioservid_t *ioservid) { int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom, - HVM_IOREQSRV_BUFIOREQ_ATOMIC, + handle_bufioreq, ioservid); if (rc == 0) { diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h index 0c984440476..629bec90d09 100644 --- a/include/hw/xen/xen_pvdev.h +++ b/include/hw/xen/xen_pvdev.h @@ -1,7 +1,7 @@ #ifndef QEMU_HW_XEN_PVDEV_H #define QEMU_HW_XEN_PVDEV_H -#include "hw/qdev-core.h" +#include "hw/sysbus.h" #include "hw/xen/xen_backend_ops.h" /* ------------------------------------------------------------- */ @@ -32,7 +32,8 @@ struct XenDevOps { }; struct XenLegacyDevice { - DeviceState qdev; + SysBusDevice parent_obj; + const char *type; int dom; int dev; diff --git a/include/hw/xtensa/mx_pic.h b/include/hw/xtensa/mx_pic.h index 500424c8d35..cd316d86eb6 100644 --- a/include/hw/xtensa/mx_pic.h +++ b/include/hw/xtensa/mx_pic.h @@ -28,7 +28,7 @@ #ifndef XTENSA_MX_PIC_H #define XTENSA_MX_PIC_H -#include "exec/memory.h" +#include "system/memory.h" struct XtensaMxPic; typedef struct XtensaMxPic XtensaMxPic; diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h index ab15577d384..a88cf8b3a9f 100644 --- a/include/io/channel-socket.h +++ b/include/io/channel-socket.h @@ -261,5 +261,18 @@ QIOChannelSocket * qio_channel_socket_accept(QIOChannelSocket *ioc, Error **errp); +/** + * qio_channel_socket_set_send_buffer: + * @ioc: the socket channel object + * @size: buffer size + * @errp: pointer to a NULL-initialized error object + * + * Set the underlying socket send buffer size. + * + * Retruns: 0 on success, or -1 on error. + */ +int qio_channel_socket_set_send_buffer(QIOChannelSocket *ioc, + size_t size, + Error **errp); #endif /* QIO_CHANNEL_SOCKET_H */ diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h index 26c67f17e2d..7e9023570d8 100644 --- a/include/io/channel-tls.h +++ b/include/io/channel-tls.h @@ -49,8 +49,20 @@ struct QIOChannelTLS { QCryptoTLSSession *session; QIOChannelShutdown shutdown; guint hs_ioc_tag; + guint bye_ioc_tag; }; +/** + * qio_channel_tls_bye: + * @ioc: the TLS channel object + * @errp: pointer to a NULL-initialized error object + * + * Perform the TLS session termination. This method will return + * immediately and the termination will continue in the background, + * provided the main loop is running. + */ +void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp); + /** * qio_channel_tls_new_server: * @master: the underlying channel object diff --git a/include/io/channel.h b/include/io/channel.h index bdf0bca92ae..234e5db70dd 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -35,6 +35,7 @@ OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1 #define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1 +#define QIO_CHANNEL_READ_FLAG_RELAXED_EOF 0x2 typedef enum QIOChannelFeature QIOChannelFeature; @@ -45,6 +46,7 @@ enum QIOChannelFeature { QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, QIO_CHANNEL_FEATURE_READ_MSG_PEEK, QIO_CHANNEL_FEATURE_SEEKABLE, + QIO_CHANNEL_FEATURE_CONCURRENT_IO, }; @@ -885,6 +887,7 @@ void qio_channel_set_aio_fd_handler(QIOChannel *ioc, * @niov: the length of the @iov array * @fds: an array of file handles to read * @nfds: number of file handles in @fds + * @flags: read flags (QIO_CHANNEL_READ_FLAG_*) * @errp: pointer to a NULL-initialized error object * * @@ -903,6 +906,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp); /** diff --git a/include/libdecnumber/dconfig.h b/include/libdecnumber/dconfig.h index 2bc0ba7f144..e67ecc1b5f6 100644 --- a/include/libdecnumber/dconfig.h +++ b/include/libdecnumber/dconfig.h @@ -23,9 +23,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ #if HOST_BIG_ENDIAN diff --git a/include/libdecnumber/decContext.h b/include/libdecnumber/decContext.h index cea6e4279ec..5bb64e13325 100644 --- a/include/libdecnumber/decContext.h +++ b/include/libdecnumber/decContext.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal Context module header */ diff --git a/include/libdecnumber/decDPD.h b/include/libdecnumber/decDPD.h index 26a21ec8ed9..8eb455277be 100644 --- a/include/libdecnumber/decDPD.h +++ b/include/libdecnumber/decDPD.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------------ */ /* Binary Coded Decimal and Densely Packed Decimal conversion lookup tables */ diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h index 41bc2a0d36f..bf37af83e6b 100644 --- a/include/libdecnumber/decNumber.h +++ b/include/libdecnumber/decNumber.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal Number arithmetic module header */ diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h index 6198ca85930..0959f6606bb 100644 --- a/include/libdecnumber/decNumberLocal.h +++ b/include/libdecnumber/decNumberLocal.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* decNumber package local type, tuning, and macro definitions */ diff --git a/include/libdecnumber/dpd/decimal128.h b/include/libdecnumber/dpd/decimal128.h index aff261e5566..c57180baf8b 100644 --- a/include/libdecnumber/dpd/decimal128.h +++ b/include/libdecnumber/dpd/decimal128.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 128-bit format module header */ diff --git a/include/libdecnumber/dpd/decimal128Local.h b/include/libdecnumber/dpd/decimal128Local.h index 97654277193..2948ab2534e 100644 --- a/include/libdecnumber/dpd/decimal128Local.h +++ b/include/libdecnumber/dpd/decimal128Local.h @@ -23,9 +23,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ #if !defined(DECIMAL128LOCAL) diff --git a/include/libdecnumber/dpd/decimal32.h b/include/libdecnumber/dpd/decimal32.h index 6cb9e436204..9a179334975 100644 --- a/include/libdecnumber/dpd/decimal32.h +++ b/include/libdecnumber/dpd/decimal32.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 32-bit format module header */ diff --git a/include/libdecnumber/dpd/decimal64.h b/include/libdecnumber/dpd/decimal64.h index f29e57064d7..5c3d0bb43cf 100644 --- a/include/libdecnumber/dpd/decimal64.h +++ b/include/libdecnumber/dpd/decimal64.h @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 64-bit format module header */ diff --git a/include/migration/client-options.h b/include/migration/client-options.h index 59f4b55cf4f..289c9d77622 100644 --- a/include/migration/client-options.h +++ b/include/migration/client-options.h @@ -10,6 +10,10 @@ #ifndef QEMU_MIGRATION_CLIENT_OPTIONS_H #define QEMU_MIGRATION_CLIENT_OPTIONS_H + +/* properties */ +bool migrate_send_switchover_start(void); + /* capabilities */ bool migrate_background_snapshot(void); diff --git a/include/migration/cpr.h b/include/migration/cpr.h new file mode 100644 index 00000000000..3fc19a74efd --- /dev/null +++ b/include/migration/cpr.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021, 2024 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef MIGRATION_CPR_H +#define MIGRATION_CPR_H + +#include "qapi/qapi-types-migration.h" +#include "qemu/queue.h" + +#define MIG_MODE_NONE -1 + +#define QEMU_CPR_FILE_MAGIC 0x51435052 +#define QEMU_CPR_FILE_VERSION 0x00000001 +#define CPR_STATE "CprState" + +typedef QLIST_HEAD(CprFdList, CprFd) CprFdList; +typedef QLIST_HEAD(CprVFIODeviceList, CprVFIODevice) CprVFIODeviceList; + +typedef struct CprState { + CprFdList fds; + CprVFIODeviceList vfio_devices; +} CprState; + +extern CprState cpr_state; + +void cpr_save_fd(const char *name, int id, int fd); +void cpr_delete_fd(const char *name, int id); +int cpr_find_fd(const char *name, int id); +void cpr_resave_fd(const char *name, int id, int fd); +int cpr_open_fd(const char *path, int flags, const char *name, int id, + Error **errp); + +MigMode cpr_get_incoming_mode(void); +void cpr_set_incoming_mode(MigMode mode); +bool cpr_is_incoming(void); + +int cpr_state_save(MigrationChannel *channel, Error **errp); +int cpr_state_load(MigrationChannel *channel, Error **errp); +void cpr_state_close(void); +struct QIOChannel *cpr_state_ioc(void); + +bool cpr_incoming_needed(void *opaque); +int cpr_get_fd_param(const char *name, const char *fdname, int index, + Error **errp); + +QEMUFile *cpr_transfer_output(MigrationChannel *channel, Error **errp); +QEMUFile *cpr_transfer_input(MigrationChannel *channel, Error **errp); + +#endif diff --git a/include/migration/misc.h b/include/migration/misc.h index bfadc5613ba..a261f99d890 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -39,25 +39,25 @@ void precopy_add_notifier(NotifierWithReturn *n); void precopy_remove_notifier(NotifierWithReturn *n); int precopy_notify(PrecopyNotifyReason reason, Error **errp); -void ram_mig_init(void); void qemu_guest_free_page_hint(void *addr, size_t len); bool migrate_ram_is_ignored(RAMBlock *block); /* migration/block.c */ AnnounceParameters *migrate_announce_params(void); + /* migration/savevm.c */ void dump_vmstate_json_to_file(FILE *out_fp); +void qemu_loadvm_start_load_thread(MigrationLoadThread function, + void *opaque); /* migration/migration.c */ void migration_object_init(void); void migration_shutdown(void); -bool migration_is_idle(void); -bool migration_is_active(void); -bool migration_is_device(void); + +bool migration_is_running(void); bool migration_thread_is_self(void); -bool migration_is_setup_or_active(void); typedef enum MigrationEventType { MIG_EVENT_PRECOPY_SETUP, @@ -96,7 +96,6 @@ void migration_add_notifier_mode(NotifierWithReturn *notify, MigrationNotifyFunc func, MigMode mode); void migration_remove_notifier(NotifierWithReturn *notify); -bool migration_is_running(void); void migration_file_set_error(int ret, Error *err); /* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */ @@ -108,7 +107,37 @@ bool migration_incoming_postcopy_advised(void); /* True if background snapshot is active */ bool migration_in_bg_snapshot(void); -/* migration/block-dirty-bitmap.c */ -void dirty_bitmap_mig_init(void); +/* Wrapper for block active/inactive operations */ +bool migration_block_activate(Error **errp); +bool migration_block_inactivate(void); + +/* True if @uri starts with a syntactically valid URI prefix */ +bool migrate_is_uri(const char *uri); + +/* Parse @uri and return @channel, returning true on success */ +bool migrate_uri_parse(const char *uri, MigrationChannel **channel, + Error **errp); + +/* migration/multifd-device-state.c */ +typedef struct SaveCompletePrecopyThreadData { + SaveCompletePrecopyThreadHandler hdlr; + char *idstr; + uint32_t instance_id; + void *handler_opaque; +} SaveCompletePrecopyThreadData; + +bool multifd_queue_device_state(char *idstr, uint32_t instance_id, + char *data, size_t len); +bool multifd_device_state_supported(void); + +void +multifd_spawn_device_state_save_thread(SaveCompletePrecopyThreadHandler hdlr, + char *idstr, uint32_t instance_id, + void *opaque); + +bool multifd_device_state_save_thread_should_exit(void); + +void multifd_abort_device_state_save_threads(void); +bool multifd_join_device_state_save_threads(void); #endif diff --git a/include/migration/register.h b/include/migration/register.h index f60e797894e..ae79794cdd4 100644 --- a/include/migration/register.h +++ b/include/migration/register.h @@ -69,39 +69,52 @@ typedef struct SaveVMHandlers { /** * @save_cleanup * - * Uninitializes the data structures on the source + * Uninitializes the data structures on the source. + * Note that this handler can be called even if save_setup + * wasn't called earlier. * * @opaque: data pointer passed to register_savevm_live() */ void (*save_cleanup)(void *opaque); /** - * @save_live_complete_postcopy + * @save_complete * - * Called at the end of postcopy for all postcopyable devices. + * Transmits the last section for the device containing any + * remaining data at the end phase of migration. + * + * For precopy, this will be invoked _during_ the switchover phase + * after source VM is stopped. + * + * For postcopy, this will be invoked _after_ the switchover phase + * (except some very unusual cases, like PMEM ramblocks), while + * destination VM can be running. * * @f: QEMUFile where to send the data * @opaque: data pointer passed to register_savevm_live() * * Returns zero to indicate success and negative for error */ - int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque); + int (*save_complete)(QEMUFile *f, void *opaque); /** - * @save_live_complete_precopy - * - * Transmits the last section for the device containing any - * remaining data at the end of a precopy phase. When postcopy is - * enabled, devices that support postcopy will skip this step, - * where the final data will be flushed at the end of postcopy via - * @save_live_complete_postcopy instead. - * - * @f: QEMUFile where to send the data - * @opaque: data pointer passed to register_savevm_live() + * @save_complete_precopy_thread (invoked in a separate thread) + * + * Called at the end of a precopy phase from a separate worker thread + * in configurations where multifd device state transfer is supported + * in order to perform asynchronous transmission of the remaining data in + * parallel with @save_complete handlers. + * When postcopy is enabled, devices that support postcopy will skip this + * step. + * + * @d: a #SaveCompletePrecopyThreadData containing parameters that the + * handler may need, including this device section idstr and instance_id, + * and opaque data pointer passed to register_savevm_live(). + * @errp: pointer to Error*, to store an error if it happens. * - * Returns zero to indicate success and negative for error + * Returns true to indicate success and false for errors. */ - int (*save_live_complete_precopy)(QEMUFile *f, void *opaque); + SaveCompletePrecopyThreadHandler save_complete_precopy_thread; /* This runs both outside and inside the BQL. */ @@ -168,6 +181,21 @@ typedef struct SaveVMHandlers { /* This runs outside the BQL! */ + /** + * @save_postcopy_prepare + * + * This hook will be invoked on the source side right before switching + * to postcopy (before VM stopped). + * + * @f: QEMUFile where to send the data + * @opaque: Data pointer passed to register_savevm_live() + * @errp: Error** used to report error message + * + * Returns: true if succeeded, false if error occured. When false is + * returned, @errp must be set. + */ + bool (*save_postcopy_prepare)(QEMUFile *f, void *opaque, Error **errp); + /** * @state_pending_estimate * @@ -227,6 +255,21 @@ typedef struct SaveVMHandlers { */ int (*load_state)(QEMUFile *f, void *opaque, int version_id); + /** + * @load_state_buffer (invoked outside the BQL) + * + * Load device state buffer provided to qemu_loadvm_load_state_buffer(). + * + * @opaque: data pointer passed to register_savevm_live() + * @buf: the data buffer to load + * @len: the data length in buffer + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns true to indicate success and false for errors. + */ + bool (*load_state_buffer)(void *opaque, char *buf, size_t len, + Error **errp); + /** * @load_setup * @@ -244,6 +287,8 @@ typedef struct SaveVMHandlers { * @load_cleanup * * Uninitializes the data structures on the destination. + * Note that this handler can be called even if load_setup + * wasn't called earlier. * * @opaque: data pointer passed to register_savevm_live() * @@ -275,6 +320,18 @@ typedef struct SaveVMHandlers { * otherwise */ bool (*switchover_ack_needed)(void *opaque); + + /** + * @switchover_start + * + * Notifies that the switchover has started. Called only on + * the destination. + * + * @opaque: data pointer passed to register_savevm_live() + * + * Returns zero to indicate success and negative for error + */ + int (*switchover_start)(void *opaque); } SaveVMHandlers; /** diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index f313f2f4086..1ff7bd9ac42 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -155,7 +155,11 @@ enum VMStateFlags { }; typedef enum { - MIG_PRI_DEFAULT = 0, + MIG_PRI_UNINITIALIZED = 0, /* An uninitialized priority field maps to */ + /* MIG_PRI_DEFAULT in save_state_priority */ + + MIG_PRI_LOW, /* Must happen after default */ + MIG_PRI_DEFAULT, MIG_PRI_IOMMU, /* Must happen before PCI devices */ MIG_PRI_PCI_BUS, /* Must happen before IOMMU */ MIG_PRI_VIRTIO_MEM, /* Must happen before IOMMU */ @@ -230,6 +234,7 @@ extern const VMStateInfo vmstate_info_uint8; extern const VMStateInfo vmstate_info_uint16; extern const VMStateInfo vmstate_info_uint32; extern const VMStateInfo vmstate_info_uint64; +extern const VMStateInfo vmstate_info_fd; /** Put this in the stream when migrating a null pointer.*/ #define VMS_NULLPTR_MARKER (0x30U) /* '0' */ @@ -902,6 +907,9 @@ extern const VMStateInfo vmstate_info_qlist; #define VMSTATE_UINT64_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t) +#define VMSTATE_FD_V(_f, _s, _v) \ + VMSTATE_SINGLE(_f, _s, _v, vmstate_info_fd, int32_t) + #ifdef CONFIG_LINUX #define VMSTATE_U8_V(_f, _s, _v) \ @@ -936,6 +944,9 @@ extern const VMStateInfo vmstate_info_qlist; #define VMSTATE_UINT64(_f, _s) \ VMSTATE_UINT64_V(_f, _s, 0) +#define VMSTATE_FD(_f, _s) \ + VMSTATE_FD_V(_f, _s, 0) + #ifdef CONFIG_LINUX #define VMSTATE_U8(_f, _s) \ @@ -1009,6 +1020,8 @@ extern const VMStateInfo vmstate_info_qlist; #define VMSTATE_UINT64_TEST(_f, _s, _t) \ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_uint64, uint64_t) +#define VMSTATE_FD_TEST(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_info_fd, int32_t) #define VMSTATE_TIMER_PTR_TEST(_f, _s, _test) \ VMSTATE_POINTER_TEST(_f, _s, _test, vmstate_info_timer, QEMUTimer *) diff --git a/include/net/checksum.h b/include/net/checksum.h index 7dec37e56c7..188e4cca0b7 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -30,7 +30,7 @@ uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq); uint16_t net_checksum_finish(uint32_t sum); uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto, uint8_t *addrs, uint8_t *buf); -void net_checksum_calculate(uint8_t *data, int length, int csum_flag); +void net_checksum_calculate(void *data, int length, int csum_flag); static inline uint32_t net_checksum_add(int len, uint8_t *buf) diff --git a/include/net/eth.h b/include/net/eth.h index 3b80b6e07f3..14c34f530fe 100644 --- a/include/net/eth.h +++ b/include/net/eth.h @@ -56,7 +56,7 @@ struct ip_header { uint8_t ip_p; /* protocol */ uint16_t ip_sum; /* checksum */ uint32_t ip_src, ip_dst; /* source and destination address */ -}; +} QEMU_PACKED; typedef struct tcp_header { uint16_t th_sport; /* source port */ diff --git a/include/net/net.h b/include/net/net.h index c8f679761bf..84ee18e0f97 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -60,6 +60,7 @@ typedef bool (HasVnetHdrLen)(NetClientState *, int); typedef void (SetOffload)(NetClientState *, int, int, int, int, int, int, int); typedef int (GetVnetHdrLen)(NetClientState *); typedef void (SetVnetHdrLen)(NetClientState *, int); +typedef bool (GetVnetHashSupportedTypes)(NetClientState *, uint32_t *); typedef int (SetVnetLE)(NetClientState *, bool); typedef int (SetVnetBE)(NetClientState *, bool); typedef struct SocketReadState SocketReadState; @@ -67,6 +68,7 @@ typedef void (SocketReadStateFinalize)(SocketReadState *rs); typedef void (NetAnnounce)(NetClientState *); typedef bool (SetSteeringEBPF)(NetClientState *, int); typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **); +typedef struct vhost_net *(GetVHostNet)(NetClientState *nc); typedef struct NetClientInfo { NetClientDriver type; @@ -89,9 +91,11 @@ typedef struct NetClientInfo { SetVnetHdrLen *set_vnet_hdr_len; SetVnetLE *set_vnet_le; SetVnetBE *set_vnet_be; + GetVnetHashSupportedTypes *get_vnet_hash_supported_types; NetAnnounce *announce; SetSteeringEBPF *set_steering_ebpf; NetCheckPeerType *check_peer_type; + GetVHostNet *get_vhost_net; } NetClientInfo; struct NetClientState { @@ -172,9 +176,6 @@ ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov, int iovcnt, NetPacketSent *sent_cb); ssize_t qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size); ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size); -ssize_t qemu_receive_packet_iov(NetClientState *nc, - const struct iovec *iov, - int iovcnt); ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size); ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf, int size, NetPacketSent *sent_cb); @@ -192,6 +193,7 @@ void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo, int uso4, int uso6); int qemu_get_vnet_hdr_len(NetClientState *nc); void qemu_set_vnet_hdr_len(NetClientState *nc, int len); +bool qemu_get_vnet_hash_supported_types(NetClientState *nc, uint32_t *types); int qemu_set_vnet_le(NetClientState *nc, bool is_le); int qemu_set_vnet_be(NetClientState *nc, bool is_be); void qemu_macaddr_default_if_unset(MACAddr *macaddr); @@ -301,13 +303,13 @@ void net_client_parse(QemuOptsList *opts_list, const char *optstr); void show_netdevs(void); void net_init_clients(void); void net_check_clients(void); +void net_client_set_link(NetClientState **ncs, int queues, bool up); void net_cleanup(void); void hmp_host_net_add(Monitor *mon, const QDict *qdict); void hmp_host_net_remove(Monitor *mon, const QDict *qdict); void netdev_add(QemuOpts *opts, Error **errp); int net_hub_id_for_client(NetClientState *nc, int *id); -NetClientState *net_hub_port_find(int hub_id); #define DEFAULT_NETWORK_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifup" #define DEFAULT_NETWORK_DOWN_SCRIPT CONFIG_SYSCONFDIR "/qemu-ifdown" diff --git a/include/net/queue.h b/include/net/queue.h index 9f2f289d771..2e686b1b61d 100644 --- a/include/net/queue.h +++ b/include/net/queue.h @@ -59,10 +59,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue, const uint8_t *data, size_t size); -ssize_t qemu_net_queue_receive_iov(NetQueue *queue, - const struct iovec *iov, - int iovcnt); - ssize_t qemu_net_queue_send(NetQueue *queue, NetClientState *sender, unsigned flags, diff --git a/include/net/tap.h b/include/net/tap.h index 5d585515f9e..6f34f13eae4 100644 --- a/include/net/tap.h +++ b/include/net/tap.h @@ -33,7 +33,4 @@ int tap_disable(NetClientState *nc); int tap_get_fd(NetClientState *nc); -struct vhost_net; -struct vhost_net *tap_get_vhost_net(NetClientState *nc); - #endif /* QEMU_NET_TAP_H */ diff --git a/include/net/vhost-user.h b/include/net/vhost-user.h deleted file mode 100644 index 35bf6197098..00000000000 --- a/include/net/vhost-user.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * vhost-user.h - * - * Copyright (c) 2013 Virtual Open Systems Sarl. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef VHOST_USER_H -#define VHOST_USER_H - -struct vhost_net; -struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc); -uint64_t vhost_user_get_acked_features(NetClientState *nc); -void vhost_user_save_acked_features(NetClientState *nc); - -#endif /* VHOST_USER_H */ diff --git a/include/net/vhost-vdpa.h b/include/net/vhost-vdpa.h index b81f9a6f2a0..f8d7d6c9045 100644 --- a/include/net/vhost-vdpa.h +++ b/include/net/vhost-vdpa.h @@ -14,8 +14,4 @@ #define TYPE_VHOST_VDPA "vhost-vdpa" -struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc); - -extern const int vdpa_feature_bits[]; - #endif /* VHOST_VDPA_H */ diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index c6a5361a2ae..879781dad7b 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -7,11 +7,19 @@ struct vhost_net; typedef struct vhost_net VHostNetState; +typedef uint64_t (GetAckedFeatures)(NetClientState *nc); +typedef void (SaveAcketFeatures)(NetClientState *nc); + typedef struct VhostNetOptions { VhostBackendType backend_type; NetClientState *net_backend; uint32_t busyloop_timeout; unsigned int nvqs; + const int *feature_bits; + int max_tx_queue_size; + bool is_vhost_user; + GetAckedFeatures *get_acked_features; + SaveAcketFeatures *save_acked_features; void *opaque; } VhostNetOptions; @@ -41,7 +49,7 @@ void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask); int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr); VHostNetState *get_vhost_net(NetClientState *nc); -int vhost_set_vring_enable(NetClientState * nc, int enable); +int vhost_net_set_vring_enable(NetClientState *nc, int enable); uint64_t vhost_net_get_acked_features(VHostNetState *net); diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h index 8b7b25c0b5a..ea65e10744a 100644 --- a/include/qapi/compat-policy.h +++ b/include/qapi/compat-policy.h @@ -18,7 +18,7 @@ extern CompatPolicy compat_policy; -bool compat_policy_input_ok(unsigned special_features, +bool compat_policy_input_ok(uint64_t features, const CompatPolicy *policy, ErrorClass error_class, const char *kind, const char *name, diff --git a/include/qapi/error-internal.h b/include/qapi/error-internal.h new file mode 100644 index 00000000000..ff18a2086d2 --- /dev/null +++ b/include/qapi/error-internal.h @@ -0,0 +1,35 @@ +/* + * QEMU Error Objects - struct definition + * + * Copyright IBM, Corp. 2011 + * Copyright (C) 2011-2015 Red Hat, Inc. + * + * Authors: + * Anthony Liguori + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ + +#ifndef QAPI_ERROR_INTERNAL_H + +struct Error +{ + char *msg; + ErrorClass err_class; + + /* Used for error_abort only, may be NULL. */ + const char *func; + + /* + * src might be NUL-terminated or not. If it is, src_len is negative. + * If it is not, src_len is the length. + */ + const char *src; + int src_len; + int line; + GString *hint; +}; + +#endif diff --git a/include/qapi/error.h b/include/qapi/error.h index 71f8fb2c50e..41e38163804 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -437,6 +437,8 @@ Error *error_copy(const Error *err); */ void error_free(Error *err); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(Error, error_free) + /* * Convenience function to assert that *@errp is set, then silently free it. */ @@ -466,6 +468,18 @@ void warn_reportf_err(Error *err, const char *fmt, ...) void error_reportf_err(Error *err, const char *fmt, ...) G_GNUC_PRINTF(2, 3); +/* + * Similar to warn_report_err(), except it prints the message just once. + * Return true when it prints, false otherwise. + */ +bool warn_report_err_once_cond(bool *printed, Error *err); + +#define warn_report_err_once(err) \ + ({ \ + static bool print_once_; \ + warn_report_err_once_cond(&print_once_, err); \ + }) + /* * Just like error_setg(), except you get to specify the error class. * Note: use of error classes other than ERROR_CLASS_GENERIC_ERROR is diff --git a/include/qapi/qmp-registry.h b/include/qapi/qmp-registry.h new file mode 100644 index 00000000000..e0ee1ad3ac6 --- /dev/null +++ b/include/qapi/qmp-registry.h @@ -0,0 +1,67 @@ +/* + * Core Definitions for QAPI/QMP Dispatch + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_QMP_DISPATCH_H +#define QAPI_QMP_DISPATCH_H + +#include "monitor/monitor.h" +#include "qemu/queue.h" + +typedef void (QmpCommandFunc)(QDict *, QObject **, Error **); + +typedef enum QmpCommandOptions +{ + QCO_NO_SUCCESS_RESP = (1U << 0), + QCO_ALLOW_OOB = (1U << 1), + QCO_ALLOW_PRECONFIG = (1U << 2), + QCO_COROUTINE = (1U << 3), +} QmpCommandOptions; + +typedef struct QmpCommand +{ + const char *name; + /* Runs in coroutine context if QCO_COROUTINE is set */ + QmpCommandFunc *fn; + QmpCommandOptions options; + uint64_t features; + QTAILQ_ENTRY(QmpCommand) node; + bool enabled; + const char *disable_reason; +} QmpCommand; + +typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList; + +void qmp_register_command(QmpCommandList *cmds, const char *name, + QmpCommandFunc *fn, QmpCommandOptions options, + uint64_t features); +const QmpCommand *qmp_find_command(const QmpCommandList *cmds, + const char *name); +void qmp_disable_command(QmpCommandList *cmds, const char *name, + const char *err_msg); +void qmp_enable_command(QmpCommandList *cmds, const char *name); + +bool qmp_command_is_enabled(const QmpCommand *cmd); +bool qmp_command_available(const QmpCommand *cmd, Error **errp); +const char *qmp_command_name(const QmpCommand *cmd); +bool qmp_has_success_response(const QmpCommand *cmd); +QDict *qmp_error_response(Error *err); +QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request, + bool allow_oob, Monitor *cur_mon); +bool qmp_is_oob(const QDict *dict); + +typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque); + +void qmp_for_each_command(const QmpCommandList *cmds, qmp_cmd_callback_fn fn, + void *opaque); + +#endif diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h deleted file mode 100644 index f2e956813a1..00000000000 --- a/include/qapi/qmp/dispatch.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Core Definitions for QAPI/QMP Dispatch - * - * Copyright IBM, Corp. 2011 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. - * See the COPYING.LIB file in the top-level directory. - * - */ - -#ifndef QAPI_QMP_DISPATCH_H -#define QAPI_QMP_DISPATCH_H - -#include "monitor/monitor.h" -#include "qemu/queue.h" - -typedef void (QmpCommandFunc)(QDict *, QObject **, Error **); - -typedef enum QmpCommandOptions -{ - QCO_NO_SUCCESS_RESP = (1U << 0), - QCO_ALLOW_OOB = (1U << 1), - QCO_ALLOW_PRECONFIG = (1U << 2), - QCO_COROUTINE = (1U << 3), -} QmpCommandOptions; - -typedef struct QmpCommand -{ - const char *name; - /* Runs in coroutine context if QCO_COROUTINE is set */ - QmpCommandFunc *fn; - QmpCommandOptions options; - unsigned special_features; - QTAILQ_ENTRY(QmpCommand) node; - bool enabled; - const char *disable_reason; -} QmpCommand; - -typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList; - -void qmp_register_command(QmpCommandList *cmds, const char *name, - QmpCommandFunc *fn, QmpCommandOptions options, - unsigned special_features); -const QmpCommand *qmp_find_command(const QmpCommandList *cmds, - const char *name); -void qmp_disable_command(QmpCommandList *cmds, const char *name, - const char *err_msg); -void qmp_enable_command(QmpCommandList *cmds, const char *name); - -bool qmp_command_is_enabled(const QmpCommand *cmd); -bool qmp_command_available(const QmpCommand *cmd, Error **errp); -const char *qmp_command_name(const QmpCommand *cmd); -bool qmp_has_success_response(const QmpCommand *cmd); -QDict *qmp_error_response(Error *err); -QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *request, - bool allow_oob, Monitor *cur_mon); -bool qmp_is_oob(const QDict *dict); - -typedef void (*qmp_cmd_callback_fn)(const QmpCommand *cmd, void *opaque); - -void qmp_for_each_command(const QmpCommandList *cmds, qmp_cmd_callback_fn fn, - void *opaque); - -#endif diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 38e89762b3a..d1db6f18cd6 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -23,10 +23,4 @@ #define QERR_MISSING_PARAMETER \ "Parameter '%s' is missing" -#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \ - "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" - -#define QERR_UNSUPPORTED \ - "this feature or command is not currently supported" - #endif /* QERROR_H */ diff --git a/include/qapi/util.h b/include/qapi/util.h index b8254247b8d..29bc4eb865a 100644 --- a/include/qapi/util.h +++ b/include/qapi/util.h @@ -18,7 +18,7 @@ typedef enum { typedef struct QEnumLookup { const char *const *array; - const unsigned char *const special_features; + const uint64_t *const features; const int size; } QEnumLookup; diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h index 2badec5ba46..7beb0dbfa5d 100644 --- a/include/qapi/visitor-impl.h +++ b/include/qapi/visitor-impl.h @@ -115,11 +115,11 @@ struct Visitor /* Optional */ bool (*policy_reject)(Visitor *v, const char *name, - unsigned special_features, Error **errp); + uint64_t features, Error **errp); /* Optional */ bool (*policy_skip)(Visitor *v, const char *name, - unsigned special_features); + uint64_t features); /* Must be set */ VisitorType type; diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index 27b85d4700f..f6a9b0743f7 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -463,29 +463,29 @@ bool visit_optional(Visitor *v, const char *name, bool *present); /* * Should we reject member @name due to policy? * - * @special_features is the member's special features encoded as a - * bitset of QapiSpecialFeature. + * @features is the member's special features encoded as a + * bitset of QapiFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ bool visit_policy_reject(Visitor *v, const char *name, - unsigned special_features, Error **errp); + uint64_t features, Error **errp); /* * * Should we skip member @name due to policy? * - * @special_features is the member's special features encoded as a - * bitset of QapiSpecialFeature. + * @features is the member's special features encoded as a + * bitset of QapiFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ bool visit_policy_skip(Visitor *v, const char *name, - unsigned special_features); + uint64_t features); /* * Set policy for handling deprecated management interfaces. diff --git a/include/qemu-main.h b/include/qemu-main.h index 940960a7dbc..2ee83bedff3 100644 --- a/include/qemu-main.h +++ b/include/qemu-main.h @@ -5,7 +5,19 @@ #ifndef QEMU_MAIN_H #define QEMU_MAIN_H -int qemu_default_main(void); +/* + * The function to run on the main (initial) thread of the process. + * NULL means QEMU's main event loop. + * When non-NULL, QEMU's main event loop will run on a purposely created + * thread, after which the provided function pointer will be invoked on + * the initial thread. + * This is useful on platforms which treat the main thread as special + * (macOS/Darwin) and/or require all UI API calls to occur from the main + * thread. Those platforms can initialise it to a specific function, + * while UI implementations may reset it to NULL during their init if they + * will handle system and UI events on the main thread via QEMU's own main + * event loop. + */ extern int (*qemu_main)(void); #endif /* QEMU_MAIN_H */ diff --git a/include/qemu/accel.h b/include/qemu/accel.h index 972a849a2b9..d3638c7bfda 100644 --- a/include/qemu/accel.h +++ b/include/qemu/accel.h @@ -26,39 +26,8 @@ #include "qom/object.h" #include "exec/hwaddr.h" -struct AccelState { - /*< private >*/ - Object parent_obj; -}; - -typedef struct AccelClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ - - const char *name; - int (*init_machine)(MachineState *ms); -#ifndef CONFIG_USER_ONLY - void (*setup_post)(MachineState *ms, AccelState *accel); - bool (*has_memory)(MachineState *ms, AddressSpace *as, - hwaddr start_addr, hwaddr size); -#endif - bool (*cpu_common_realize)(CPUState *cpu, Error **errp); - void (*cpu_common_unrealize)(CPUState *cpu); - - /* gdbstub related hooks */ - int (*gdbstub_supported_sstep_flags)(void); - - bool *allowed; - /* - * Array of global properties that would be applied when specific - * accelerator is chosen. It works like MachineClass.compat_props - * but it's for accelerators not machines. Accelerator-provided - * global properties may be overridden by machine-type - * compat_props or user-provided global properties. - */ - GPtrArray *compat_props; -} AccelClass; +typedef struct AccelState AccelState; +typedef struct AccelClass AccelClass; #define TYPE_ACCEL "accel" @@ -78,12 +47,12 @@ const char *current_accel_name(void); void accel_init_interfaces(AccelClass *ac); -#ifndef CONFIG_USER_ONLY int accel_init_machine(AccelState *accel, MachineState *ms); /* Called just before os_setup_post (ie just before drop OS privs) */ void accel_setup_post(MachineState *ms); -#endif /* !CONFIG_USER_ONLY */ + +void accel_pre_resume(MachineState *ms, bool step_pending); /** * accel_cpu_instance_init: diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index dc4118ddd9e..f80cba24cf7 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -56,25 +56,13 @@ */ #define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST) -/* Sanity check that the size of an atomic operation isn't "overly large". +/* + * Sanity check that the size of an atomic operation isn't "overly large". * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not * want to use them because we ought not need them, and this lets us do a * bit of sanity checking that other 32-bit hosts might build. - * - * That said, we have a problem on 64-bit ILP32 hosts in that in order to - * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS. - * We'd prefer not want to pull in everything else TCG related, so handle - * those few cases by hand. - * - * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for - * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) & - * n64 (LP64) ABIs are both detected using __mips64. */ -#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64) -# define ATOMIC_REG_SIZE 8 -#else -# define ATOMIC_REG_SIZE sizeof(void *) -#endif +#define ATOMIC_REG_SIZE sizeof(void *) /* Weak atomic operations prevent the compiler moving other * loads/stores past the atomic operation load/store. However there is @@ -128,7 +116,7 @@ _val; \ }) #define qatomic_rcu_read(ptr) \ - qatomic_rcu_read_internal((ptr), MAKE_IDENTFIER(_val)) + qatomic_rcu_read_internal((ptr), MAKE_IDENTIFIER(_val)) #define qatomic_rcu_set(ptr, i) do { \ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h index 88af6d4ea3f..31e5c48d8fa 100644 --- a/include/qemu/atomic128.h +++ b/include/qemu/atomic128.h @@ -13,6 +13,7 @@ #ifndef QEMU_ATOMIC128_H #define QEMU_ATOMIC128_H +#include "qemu/atomic.h" #include "qemu/int128.h" /* @@ -58,7 +59,7 @@ * Therefore, special case each platform. */ -#include "host/atomic128-cas.h" -#include "host/atomic128-ldst.h" +#include "host/atomic128-cas.h.inc" +#include "host/atomic128-ldst.h.inc" #endif /* QEMU_ATOMIC128_H */ diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h index 1cf288445f3..0044333cb51 100644 --- a/include/qemu/bitmap.h +++ b/include/qemu/bitmap.h @@ -69,6 +69,14 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] +/* + * This is for use with the bit32 versions of set_bit() etc; + * we don't currently support the full range of bitmap operations + * on bitmaps backed by an array of uint32_t. + */ +#define DECLARE_BITMAP32(name, bits) \ + uint32_t name[BITS_TO_U32S(bits)] + #define small_nbits(nbits) \ ((nbits) <= BITS_PER_LONG) diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index 2c0a2fe7512..c7b838a6283 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -18,16 +18,47 @@ #define BITS_PER_BYTE CHAR_BIT #define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#define BITS_TO_U32S(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint32_t)) #define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #define MAKE_64BIT_MASK(shift, length) \ (((~0ULL) >> (64 - (length))) << (shift)) +/** + * DOC: Functions operating on arrays of bits + * + * We provide a set of functions which work on arbitrary-length arrays of + * bits. These come in several flavours which vary in what the type of the + * underlying storage for the bits is: + * + * - Bits stored in an array of 'unsigned long': set_bit(), clear_bit(), etc + * - Bits stored in an array of 'uint32_t': set_bit32(), clear_bit32(), etc + * + * Because the 'unsigned long' type has a size which varies between + * host systems, the versions using 'uint32_t' are often preferable. + * This is particularly the case in a device model where there may + * be some guest-visible register view of the bit array. + * + * We do not currently implement uint32_t versions of find_last_bit(), + * find_next_bit(), find_next_zero_bit(), find_first_bit() or + * find_first_zero_bit(), because we haven't yet needed them. If you + * need them you should implement them similarly to the 'unsigned long' + * versions. + * + * You can declare a bitmap to be used with these functions via the + * DECLARE_BITMAP and DECLARE_BITMAP32 macros in bitmap.h. + */ + +/** + * DOC: 'unsigned long' bit array APIs + */ + +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) + /** * set_bit - Set a bit in memory * @nr: the bit to set @@ -224,6 +255,141 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr, return find_next_zero_bit(addr, size, 0); } +/** + * DOC: 'uint32_t' bit array APIs + */ + +#define BIT32_MASK(nr) (1UL << ((nr) % 32)) +#define BIT32_WORD(nr) ((nr) / 32) + +/** + * set_bit32 - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + */ +static inline void set_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + + *p |= mask; +} + +/** + * set_bit32_atomic - Set a bit in memory atomically + * @nr: the bit to set + * @addr: the address to start counting from + */ +static inline void set_bit32_atomic(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + + qatomic_or(p, mask); +} + +/** + * clear_bit32 - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + */ +static inline void clear_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + + *p &= ~mask; +} + +/** + * clear_bit32_atomic - Clears a bit in memory atomically + * @nr: Bit to clear + * @addr: Address to start counting from + */ +static inline void clear_bit32_atomic(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + + return qatomic_and(p, ~mask); +} + +/** + * change_bit32 - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + */ +static inline void change_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + + *p ^= mask; +} + +/** + * test_and_set_bit32 - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + */ +static inline int test_and_set_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + uint32_t old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * test_and_clear_bit32 - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + */ +static inline int test_and_clear_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + uint32_t old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/** + * test_and_change_bit32 - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + */ +static inline int test_and_change_bit32(long nr, uint32_t *addr) +{ + uint32_t mask = BIT32_MASK(nr); + uint32_t *p = addr + BIT32_WORD(nr); + uint32_t old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit32 - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit32(long nr, const uint32_t *addr) +{ + return 1U & (addr[BIT32_WORD(nr)] >> (nr & 31)); +} + +/** + * DOC: Miscellaneous bit operations on single values + * + * These functions are a collection of useful operations + * (rotations, bit extract, bit deposit, etc) on single + * integer values. + */ + /** * rol8 - rotate an 8-bit value left * @word: value to rotate diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index ad22910a5d1..39ba64046a6 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -1,6 +1,8 @@ #ifndef BSWAP_H #define BSWAP_H +#include "qemu/target-info.h" + #undef bswap16 #define bswap16(_x) __builtin_bswap16(_x) #undef bswap32 @@ -140,6 +142,8 @@ CPU_CONVERT(le, 16, uint16_t) CPU_CONVERT(le, 32, uint32_t) CPU_CONVERT(le, 64, uint64_t) +#undef CPU_CONVERT + /* * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is * a compile-time constant if you pass in a constant. So this can be @@ -203,9 +207,6 @@ CPU_CONVERT(le, 64, uint64_t) * te : target endian * (except for byte accesses, which have no endian infix). * - * The target endian accessors are obviously only available to source - * files which are built per-target; they are defined in cpu-all.h. - * * In all cases these functions take a host pointer. * For accessors that take a guest address rather than a * host address, see the cpu_{ld,st}_* accessors defined in @@ -433,4 +434,75 @@ DO_STN_LDN_P(be) #undef le_bswaps #undef be_bswaps + +/* Return ld{word}_{le,be}_p following target endianness. */ +#define LOAD_IMPL(word, args...) \ +do { \ + if (target_big_endian()) { \ + return glue(glue(ld, word), _be_p)(args); \ + } else { \ + return glue(glue(ld, word), _le_p)(args); \ + } \ +} while (0) + +static inline int lduw_p(const void *ptr) +{ + LOAD_IMPL(uw, ptr); +} + +static inline int ldsw_p(const void *ptr) +{ + LOAD_IMPL(sw, ptr); +} + +static inline int ldl_p(const void *ptr) +{ + LOAD_IMPL(l, ptr); +} + +static inline uint64_t ldq_p(const void *ptr) +{ + LOAD_IMPL(q, ptr); +} + +static inline uint64_t ldn_p(const void *ptr, int sz) +{ + LOAD_IMPL(n, ptr, sz); +} + +#undef LOAD_IMPL + +/* Call st{word}_{le,be}_p following target endianness. */ +#define STORE_IMPL(word, args...) \ +do { \ + if (target_big_endian()) { \ + glue(glue(st, word), _be_p)(args); \ + } else { \ + glue(glue(st, word), _le_p)(args); \ + } \ +} while (0) + + +static inline void stw_p(void *ptr, uint16_t v) +{ + STORE_IMPL(w, ptr, v); +} + +static inline void stl_p(void *ptr, uint32_t v) +{ + STORE_IMPL(l, ptr, v); +} + +static inline void stq_p(void *ptr, uint64_t v) +{ + STORE_IMPL(q, ptr, v); +} + +static inline void stn_p(void *ptr, int sz, uint64_t v) +{ + STORE_IMPL(n, ptr, sz, v); +} + +#undef STORE_IMPL + #endif /* BSWAP_H */ diff --git a/include/qemu/cacheflush.h b/include/qemu/cacheflush.h index ae20bcda733..76eb55d818d 100644 --- a/include/qemu/cacheflush.h +++ b/include/qemu/cacheflush.h @@ -26,6 +26,13 @@ static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) /* icache is coherent and does not require flushing. */ } +#elif defined(EMSCRIPTEN) + +static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len) +{ + /* Wasm doesn't have executable region of memory. */ +} + #else void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len); diff --git a/include/qemu/clang-tsa.h b/include/qemu/clang-tsa.h deleted file mode 100644 index ba06fb8c924..00000000000 --- a/include/qemu/clang-tsa.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef CLANG_TSA_H -#define CLANG_TSA_H - -/* - * Copyright 2018 Jarkko Hietaniemi - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without - * limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* http://clang.llvm.org/docs/ThreadSafetyAnalysis.html - * - * TSA is available since clang 3.6-ish. - */ -#ifdef __clang__ -# define TSA(x) __attribute__((x)) -#else -# define TSA(x) /* No TSA, make TSA attributes no-ops. */ -#endif - -/* TSA_CAPABILITY() is used to annotate typedefs: - * - * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex; - */ -#define TSA_CAPABILITY(x) TSA(capability(x)) - -/* TSA_GUARDED_BY() is used to annotate global variables, - * the data is guarded: - * - * Foo foo TSA_GUARDED_BY(mutex); - */ -#define TSA_GUARDED_BY(x) TSA(guarded_by(x)) - -/* TSA_PT_GUARDED_BY() is used to annotate global pointers, the data - * behind the pointer is guarded. - * - * Foo* ptr TSA_PT_GUARDED_BY(mutex); - */ -#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x)) - -/* The TSA_REQUIRES() is used to annotate functions: the caller of the - * function MUST hold the resource, the function will NOT release it. - * - * More than one mutex may be specified, comma-separated. - * - * void Foo(void) TSA_REQUIRES(mutex); - */ -#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__)) -#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__)) - -/* TSA_EXCLUDES() is used to annotate functions: the caller of the - * function MUST NOT hold resource, the function first acquires the - * resource, and then releases it. - * - * More than one mutex may be specified, comma-separated. - * - * void Foo(void) TSA_EXCLUDES(mutex); - */ -#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__)) - -/* TSA_ACQUIRE() is used to annotate functions: the caller of the - * function MUST NOT hold the resource, the function will acquire the - * resource, but NOT release it. - * - * More than one mutex may be specified, comma-separated. - * - * void Foo(void) TSA_ACQUIRE(mutex); - */ -#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__)) -#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__)) - -/* TSA_RELEASE() is used to annotate functions: the caller of the - * function MUST hold the resource, but the function will then release it. - * - * More than one mutex may be specified, comma-separated. - * - * void Foo(void) TSA_RELEASE(mutex); - */ -#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__)) -#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__)) - -/* TSA_NO_TSA is used to annotate functions. Use only when you need to. - * - * void Foo(void) TSA_NO_TSA; - */ -#define TSA_NO_TSA TSA(no_thread_safety_analysis) - -/* - * TSA_ASSERT() is used to annotate functions: This function will assert that - * the lock is held. When it returns, the caller of the function is assumed to - * already hold the resource. - * - * More than one mutex may be specified, comma-separated. - */ -#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__)) -#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__)) - -#endif /* #ifndef CLANG_TSA_H */ diff --git a/include/qemu/co-shared-resource.h b/include/qemu/co-shared-resource.h index 78ca5850f8f..41be1a81312 100644 --- a/include/qemu/co-shared-resource.h +++ b/include/qemu/co-shared-resource.h @@ -44,13 +44,6 @@ SharedResource *shres_create(uint64_t total); */ void shres_destroy(SharedResource *s); -/* - * Try to allocate an amount of @n. Return true on success, and false - * if there is too little left of the collective resource to fulfill - * the request. - */ -bool co_try_get_from_shres(SharedResource *s, uint64_t n); - /* * Allocate an amount of @n, and, if necessary, yield until * that becomes possible. diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 554c5ce7df7..1c2b673c058 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -22,12 +22,7 @@ #define QEMU_EXTERN_C extern #endif -#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) -# define QEMU_PACKED __attribute__((gcc_struct, packed)) -#else -# define QEMU_PACKED __attribute__((packed)) -#endif - +#define QEMU_PACKED __attribute__((packed)) #define QEMU_ALIGNED(X) __attribute__((aligned(X))) #ifndef glue @@ -38,7 +33,7 @@ #endif /* Expands into an identifier stemN, where N is another number each time */ -#define MAKE_IDENTFIER(stem) glue(stem, __COUNTER__) +#define MAKE_IDENTIFIER(stem) glue(stem, __COUNTER__) #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) @@ -187,19 +182,6 @@ #define QEMU_DISABLE_CFI #endif -/* - * Apple clang version 14 has a bug in its __builtin_subcll(); define - * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it. - * When a version of Apple clang which has this bug fixed is released - * we can add an upper bound to this check. - * See https://gitlab.com/qemu-project/qemu/-/issues/1631 - * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details. - * The bug never made it into any upstream LLVM releases, only Apple ones. - */ -#if defined(__apple_build_version__) && __clang_major__ >= 14 -#define BUILTIN_SUBCLL_BROKEN -#endif - #if __has_attribute(annotate) #define QEMU_ANNOTATE(x) __attribute__((annotate(x))) #else @@ -212,6 +194,122 @@ # define QEMU_USED #endif +/* + * Disable -ftrivial-auto-var-init on a local variable. + * + * Use this in cases where there a method in the device I/O path (or other + * important hot paths), that has large variables on the stack. A rule of + * thumb is that "large" means a method with 4kb data in the local stack + * frame. Any variables which are KB in size, should be annotated with this + * attribute, to pre-emptively eliminate any potential overhead from the + * compiler's implicit zero'ing of memory. + * + * Given that this turns off a security hardening feature, when using this + * to flag variables, it is important that the code is double-checked to + * ensure there is no possible use of uninitialized data in the method. + */ +#if __has_attribute(uninitialized) +# define QEMU_UNINITIALIZED __attribute__((uninitialized)) +#else +# define QEMU_UNINITIALIZED +#endif + +/* + * http://clang.llvm.org/docs/ThreadSafetyAnalysis.html + * + * TSA is available since clang 3.6-ish. + */ +#ifdef __clang__ +# define TSA(x) __attribute__((x)) +#else +# define TSA(x) /* No TSA, make TSA attributes no-ops. */ +#endif + +/* + * TSA_CAPABILITY() is used to annotate typedefs: + * + * typedef pthread_mutex_t TSA_CAPABILITY("mutex") tsa_mutex; + */ +#define TSA_CAPABILITY(x) TSA(capability(x)) + +/* + * TSA_GUARDED_BY() is used to annotate global variables, + * the data is guarded: + * + * Foo foo TSA_GUARDED_BY(mutex); + */ +#define TSA_GUARDED_BY(x) TSA(guarded_by(x)) + +/* + * TSA_PT_GUARDED_BY() is used to annotate global pointers, the data + * behind the pointer is guarded. + * + * Foo* ptr TSA_PT_GUARDED_BY(mutex); + */ +#define TSA_PT_GUARDED_BY(x) TSA(pt_guarded_by(x)) + +/* + * The TSA_REQUIRES() is used to annotate functions: the caller of the + * function MUST hold the resource, the function will NOT release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_REQUIRES(mutex); + */ +#define TSA_REQUIRES(...) TSA(requires_capability(__VA_ARGS__)) +#define TSA_REQUIRES_SHARED(...) TSA(requires_shared_capability(__VA_ARGS__)) + +/* + * TSA_EXCLUDES() is used to annotate functions: the caller of the + * function MUST NOT hold resource, the function first acquires the + * resource, and then releases it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_EXCLUDES(mutex); + */ +#define TSA_EXCLUDES(...) TSA(locks_excluded(__VA_ARGS__)) + +/* + * TSA_ACQUIRE() is used to annotate functions: the caller of the + * function MUST NOT hold the resource, the function will acquire the + * resource, but NOT release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_ACQUIRE(mutex); + */ +#define TSA_ACQUIRE(...) TSA(acquire_capability(__VA_ARGS__)) +#define TSA_ACQUIRE_SHARED(...) TSA(acquire_shared_capability(__VA_ARGS__)) + +/* + * TSA_RELEASE() is used to annotate functions: the caller of the + * function MUST hold the resource, but the function will then release it. + * + * More than one mutex may be specified, comma-separated. + * + * void Foo(void) TSA_RELEASE(mutex); + */ +#define TSA_RELEASE(...) TSA(release_capability(__VA_ARGS__)) +#define TSA_RELEASE_SHARED(...) TSA(release_shared_capability(__VA_ARGS__)) + +/* + * TSA_NO_TSA is used to annotate functions. Use only when you need to. + * + * void Foo(void) TSA_NO_TSA; + */ +#define TSA_NO_TSA TSA(no_thread_safety_analysis) + +/* + * TSA_ASSERT() is used to annotate functions: This function will assert that + * the lock is held. When it returns, the caller of the function is assumed to + * already hold the resource. + * + * More than one mutex may be specified, comma-separated. + */ +#define TSA_ASSERT(...) TSA(assert_capability(__VA_ARGS__)) +#define TSA_ASSERT_SHARED(...) TSA(assert_shared_capability(__VA_ARGS__)) + /* * Ugly CPP trick that is like "defined FOO", but also works in C * code. Useful to replace #ifdef with "if" statements; assumes diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index ff3084538b8..e545bbf620f 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -16,6 +16,7 @@ #define QEMU_COROUTINE_H #include "qemu/coroutine-core.h" +#include "qemu/atomic.h" #include "qemu/queue.h" #include "qemu/timer.h" diff --git a/include/qemu/crc-ccitt.h b/include/qemu/crc-ccitt.h index 8918dafe078..ce28e297209 100644 --- a/include/qemu/crc-ccitt.h +++ b/include/qemu/crc-ccitt.h @@ -8,7 +8,7 @@ * * From Linux kernel v5.10 include/linux/crc-ccitt.h * - * SPDX-License-Identifier: GPL-2.0 + * SPDX-License-Identifier: GPL-2.0-only */ #ifndef CRC_CCITT_H diff --git a/include/qemu/cutils.h b/include/qemu/cutils.h index da15547bfbd..36c68ce86c5 100644 --- a/include/qemu/cutils.h +++ b/include/qemu/cutils.h @@ -241,13 +241,10 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n); int qemu_pstrcmp0(const char **str1, const char **str2); /* Find program directory, and save it for later usage with - * qemu_get_exec_dir(). + * get_relocated_path(). * Try OS specific API first, if not working, parse from argv0. */ void qemu_init_exec_dir(const char *argv0); -/* Get the saved exec dir. */ -const char *qemu_get_exec_dir(void); - /** * get_relocated_path: * @dir: the directory (typically a `CONFIG_*DIR` variable) to be relocated. @@ -305,4 +302,19 @@ GString *qemu_hexdump_line(GString *str, const void *buf, size_t len, void qemu_hexdump(FILE *fp, const char *prefix, const void *bufptr, size_t size); +/** + * qemu_hexdump_to_buffer: + * @buffer: output string buffer + * @buffer_size: amount of available space in buffer. Must be at least + * data_size*2+1. + * @data: input bytes + * @data_size: number of bytes in data + * + * Converts the @data_size bytes in @data into hex digit pairs, writing them to + * @buffer. Finally, a nul terminating character is written; @buffer therefore + * needs space for (data_size*2+1) chars. + */ +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, + const uint8_t *restrict data, size_t data_size); + #endif diff --git a/include/qemu/datadir.h b/include/qemu/datadir.h index 21f9097f582..cca32af3008 100644 --- a/include/qemu/datadir.h +++ b/include/qemu/datadir.h @@ -1,11 +1,16 @@ #ifndef QEMU_DATADIR_H #define QEMU_DATADIR_H -#define QEMU_FILE_TYPE_BIOS 0 -#define QEMU_FILE_TYPE_KEYMAP 1 +typedef enum { + QEMU_FILE_TYPE_BIOS, + QEMU_FILE_TYPE_DTB, + QEMU_FILE_TYPE_KEYMAP, +} QemuFileType; + /** * qemu_find_file: * @type: QEMU_FILE_TYPE_BIOS (for BIOS, VGA BIOS) + * QEMU_FILE_TYPE_DTB (for device tree blobs) * or QEMU_FILE_TYPE_KEYMAP (for keymaps). * @name: Relative or absolute file name * @@ -20,7 +25,7 @@ * * Returns: a path that can access @name, or NULL if no matching file exists. */ -char *qemu_find_file(int type, const char *name); +char *qemu_find_file(QemuFileType type, const char *name); void qemu_add_default_firmwarepath(void); void qemu_add_data_dir(char *path); void qemu_list_data_dirs(void); diff --git a/include/qemu/envlist.h b/include/qemu/envlist.h index 6006dfae44c..b2883f66599 100644 --- a/include/qemu/envlist.h +++ b/include/qemu/envlist.h @@ -7,8 +7,6 @@ envlist_t *envlist_create(void); void envlist_free(envlist_t *); int envlist_setenv(envlist_t *, const char *); int envlist_unsetenv(envlist_t *, const char *); -int envlist_parse_set(envlist_t *, const char *); -int envlist_parse_unset(envlist_t *, const char *); char **envlist_to_environ(const envlist_t *, size_t *); #endif /* ENVLIST_H */ diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h index d1d06754d84..4f768d4ee38 100644 --- a/include/qemu/fifo8.h +++ b/include/qemu/fifo8.h @@ -62,6 +62,17 @@ void fifo8_push_all(Fifo8 *fifo, const uint8_t *data, uint32_t num); */ uint8_t fifo8_pop(Fifo8 *fifo); +/** + * fifo8_peek: + * @fifo: fifo to peek from + * + * Peek the data byte at the current head of the FIFO. Clients are responsible + * for checking for emptyness using fifo8_is_empty(). + * + * Returns: The peeked data byte. + */ +uint8_t fifo8_peek(Fifo8 *fifo); + /** * fifo8_pop_buf: * @fifo: FIFO to pop from @@ -76,6 +87,20 @@ uint8_t fifo8_pop(Fifo8 *fifo); */ uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen); +/** + * fifo8_peek_buf: + * @fifo: FIFO to read from + * @dest: the buffer to write the data into (can be NULL) + * @destlen: size of @dest and maximum number of bytes to peek + * + * Peek a number of elements from the FIFO up to a maximum of @destlen. + * The peeked data is copied into the @dest buffer. + * Care is taken when the data wraps around in the ring buffer. + * + * Returns: number of bytes peeked. + */ +uint32_t fifo8_peek_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen); + /** * fifo8_pop_bufptr: * @fifo: FIFO to pop from diff --git a/include/qemu/futex.h b/include/qemu/futex.h index 91ae88966e1..607613eec83 100644 --- a/include/qemu/futex.h +++ b/include/qemu/futex.h @@ -1,5 +1,5 @@ /* - * Wrappers around Linux futex syscall + * Wrappers around Linux futex syscall and similar * * Copyright Red Hat, Inc. 2017 * @@ -11,17 +11,35 @@ * */ +/* + * Note that a wake-up can also be caused by common futex usage patterns in + * unrelated code that happened to have previously used the futex word's + * memory location (e.g., typical futex-based implementations of Pthreads + * mutexes can cause this under some conditions). Therefore, qemu_futex_wait() + * callers should always conservatively assume that it is a spurious wake-up, + * and use the futex word's value (i.e., the user-space synchronization scheme) + * to decide whether to continue to block or not. + */ + #ifndef QEMU_FUTEX_H #define QEMU_FUTEX_H +#define HAVE_FUTEX + +#ifdef CONFIG_LINUX #include #include #define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__) -static inline void qemu_futex_wake(void *f, int n) +static inline void qemu_futex_wake_all(void *f) { - qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0); + qemu_futex(f, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); +} + +static inline void qemu_futex_wake_single(void *f) +{ + qemu_futex(f, FUTEX_WAKE, 1, NULL, NULL, 0); } static inline void qemu_futex_wait(void *f, unsigned val) @@ -37,5 +55,25 @@ static inline void qemu_futex_wait(void *f, unsigned val) } } } +#elif defined(CONFIG_WIN32) +#include + +static inline void qemu_futex_wake_all(void *f) +{ + WakeByAddressAll(f); +} + +static inline void qemu_futex_wake_single(void *f) +{ + WakeByAddressSingle(f); +} + +static inline void qemu_futex_wait(void *f, unsigned val) +{ + WaitOnAddress(f, &val, sizeof(val), INFINITE); +} +#else +#undef HAVE_FUTEX +#endif #endif /* QEMU_FUTEX_H */ diff --git a/include/qemu/help-texts.h b/include/qemu/help-texts.h index 353ab2ad8b0..bc8fab91697 100644 --- a/include/qemu/help-texts.h +++ b/include/qemu/help-texts.h @@ -2,7 +2,7 @@ #define QEMU_HELP_TEXTS_H /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2024 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2025 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ diff --git a/include/qemu/host-pci-mmio.h b/include/qemu/host-pci-mmio.h new file mode 100644 index 00000000000..a8ed9938ac5 --- /dev/null +++ b/include/qemu/host-pci-mmio.h @@ -0,0 +1,136 @@ +/* + * API for host PCI MMIO accesses (e.g. Linux VFIO BARs) + * + * Copyright 2025 IBM Corp. + * Author(s): Farhan Ali + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HOST_PCI_MMIO_H +#define HOST_PCI_MMIO_H + +#include "qemu/bswap.h" +#include "qemu/s390x_pci_mmio.h" + +static inline uint8_t host_pci_ldub_p(const void *ioaddr) +{ + uint8_t ret = 0; +#ifdef __s390x__ + ret = s390x_pci_mmio_read_8(ioaddr); +#else + ret = ldub_p(ioaddr); +#endif + + return ret; +} + +static inline uint16_t host_pci_lduw_le_p(const void *ioaddr) +{ + uint16_t ret = 0; +#ifdef __s390x__ + ret = le16_to_cpu(s390x_pci_mmio_read_16(ioaddr)); +#else + ret = lduw_le_p(ioaddr); +#endif + + return ret; +} + +static inline uint32_t host_pci_ldl_le_p(const void *ioaddr) +{ + uint32_t ret = 0; +#ifdef __s390x__ + ret = le32_to_cpu(s390x_pci_mmio_read_32(ioaddr)); +#else + ret = ldl_le_p(ioaddr); +#endif + + return ret; +} + +static inline uint64_t host_pci_ldq_le_p(const void *ioaddr) +{ + uint64_t ret = 0; +#ifdef __s390x__ + ret = le64_to_cpu(s390x_pci_mmio_read_64(ioaddr)); +#else + ret = ldq_le_p(ioaddr); +#endif + + return ret; +} + +static inline void host_pci_stb_p(void *ioaddr, uint8_t val) +{ +#ifdef __s390x__ + s390x_pci_mmio_write_8(ioaddr, val); +#else + stb_p(ioaddr, val); +#endif +} + +static inline void host_pci_stw_le_p(void *ioaddr, uint16_t val) +{ +#ifdef __s390x__ + s390x_pci_mmio_write_16(ioaddr, cpu_to_le16(val)); +#else + stw_le_p(ioaddr, val); +#endif +} + +static inline void host_pci_stl_le_p(void *ioaddr, uint32_t val) +{ +#ifdef __s390x__ + s390x_pci_mmio_write_32(ioaddr, cpu_to_le32(val)); +#else + stl_le_p(ioaddr, val); +#endif +} + +static inline void host_pci_stq_le_p(void *ioaddr, uint64_t val) +{ +#ifdef __s390x__ + s390x_pci_mmio_write_64(ioaddr, cpu_to_le64(val)); +#else + stq_le_p(ioaddr, val); +#endif +} + +static inline uint64_t host_pci_ldn_le_p(const void *ioaddr, int sz) +{ + switch (sz) { + case 1: + return host_pci_ldub_p(ioaddr); + case 2: + return host_pci_lduw_le_p(ioaddr); + case 4: + return host_pci_ldl_le_p(ioaddr); + case 8: + return host_pci_ldq_le_p(ioaddr); + default: + g_assert_not_reached(); + } +} + +static inline void host_pci_stn_le_p(void *ioaddr, int sz, uint64_t v) +{ + switch (sz) { + case 1: + host_pci_stb_p(ioaddr, v); + break; + case 2: + host_pci_stw_le_p(ioaddr, v); + break; + case 4: + host_pci_stl_le_p(ioaddr, v); + break; + case 8: + host_pci_stq_le_p(ioaddr, v); + break; + default: + g_assert_not_reached(); + } +} + +#endif diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index ead97d354d6..dd558589cb5 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -313,6 +313,15 @@ static inline int ctpop8(uint8_t val) return __builtin_popcount(val); } +/* + * parity8 - return the parity (1 = odd) of an 8-bit value. + * @val: The value to search + */ +static inline int parity8(uint8_t val) +{ + return __builtin_parity(val); +} + /** * ctpop16 - count the population of one bits in a 16-bit value. * @val: The value to search @@ -668,7 +677,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry) */ static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow) { -#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN) +#if __has_builtin(__builtin_subcll) unsigned long long b = *pborrow; x = __builtin_subcll(x, y, b, &b); *pborrow = b & 1; diff --git a/include/qemu/iov.h b/include/qemu/iov.h index 63a1c01965d..9535673c13d 100644 --- a/include/qemu/iov.h +++ b/include/qemu/iov.h @@ -1,6 +1,7 @@ /* * Helpers for using (partial) iovecs. * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (C) 2010 Red Hat, Inc. * * Author(s): @@ -30,7 +31,7 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt); * only part of data will be copied, up to the end of the iovec. * Number of bytes actually copied will be returned, which is * min(bytes, iov_size(iov)-offset) - * `Offset' must point to the inside of iovec. + * Returns 0 when `offset' points to the outside of iovec. */ size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt, size_t offset, const void *buf, size_t bytes); @@ -66,15 +67,42 @@ iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, /** * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements, * starting at byte offset `start', to value `fillc', repeating it - * `bytes' number of times. `Offset' must point to the inside of iovec. + * `bytes' number of times. * If `bytes' is large enough, only last bytes portion of iovec, * up to the end of it, will be filled with the specified value. * Function return actual number of bytes processed, which is * min(size, iov_size(iov) - offset). + * Returns 0 when `offset' points to the outside of iovec. */ size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt, size_t offset, int fillc, size_t bytes); +/* + * Send/recv data from/to iovec buffers directly, with the provided + * socket flags. + * + * `offset' bytes in the beginning of iovec buffer are skipped and + * next `bytes' bytes are used, which must be within data of iovec. + * + * r = iov_send_recv_with_flags(sockfd, sockflags, iov, iovcnt, + * offset, bytes, true); + * + * is logically equivalent to + * + * char *buf = malloc(bytes); + * iov_to_buf(iov, iovcnt, offset, buf, bytes); + * r = send(sockfd, buf, bytes, sockflags); + * free(buf); + * + * For iov_send_recv_with_flags() _whole_ area being sent or received + * should be within the iovec, not only beginning of it. + */ +ssize_t iov_send_recv_with_flags(int sockfd, int sockflags, + const struct iovec *iov, + unsigned iov_cnt, size_t offset, + size_t bytes, + bool do_send); + /* * Send/recv data from/to iovec buffers directly * diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h index 2a10a7052e4..14e82a22d58 100644 --- a/include/qemu/iova-tree.h +++ b/include/qemu/iova-tree.h @@ -23,7 +23,7 @@ * for the thread safety issue. */ -#include "exec/memory.h" +#include "system/memory.h" #include "exec/hwaddr.h" #define IOVA_OK (0) @@ -40,6 +40,28 @@ typedef struct DMAMap { } QEMU_PACKED DMAMap; typedef gboolean (*iova_tree_iterator)(DMAMap *map); +/** + * gpa_tree_new: + * + * Create a new GPA->IOVA tree. + * + * Returns: the tree point on success, or NULL otherwise. + */ +IOVATree *gpa_tree_new(void); + +/** + * gpa_tree_insert: + * + * @tree: The GPA->IOVA tree we're inserting the mapping to + * @map: The GPA->IOVA mapping to insert + * + * Inserts a GPA range to the GPA->IOVA tree. If there are overlapped + * ranges, IOVA_ERR_OVERLAP will be returned. + * + * Return: 0 if successful, < 0 otherwise. + */ +int gpa_tree_insert(IOVATree *tree, const DMAMap *map); + /** * iova_tree_new: * @@ -111,31 +133,6 @@ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map); */ const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map); -/** - * iova_tree_find_address: - * - * @tree: the iova tree to search from - * @iova: the iova address to find - * - * Similar to iova_tree_find(), but it tries to find mapping with - * range iova=iova & size=0. - * - * Return: same as iova_tree_find(). - */ -const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova); - -/** - * iova_tree_foreach: - * - * @tree: the iova tree to iterate on - * @iterator: the iterator for the mappings, return true to stop - * - * Iterate over the iova tree. - * - * Return: 1 if found any overlap, 0 if not, <0 if error. - */ -void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator); - /** * iova_tree_alloc_map: * diff --git a/include/qemu/job.h b/include/qemu/job.h index 2b873f25768..ead31578d3d 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -263,7 +263,7 @@ struct JobDriver { * This callback will not be invoked if the job has already failed. * If it fails, abort and then clean will be called. */ - int (*prepare)(Job *job); + int GRAPH_UNLOCKED_PTR (*prepare)(Job *job); /** * If the callback is not NULL, it will be invoked when all the jobs @@ -283,7 +283,7 @@ struct JobDriver { * All jobs will complete with a call to either .commit() or .abort() but * never both. */ - void (*abort)(Job *job); + void GRAPH_UNLOCKED_PTR (*abort)(Job *job); /** * If the callback is not NULL, it will be invoked after a call to either @@ -545,6 +545,9 @@ bool job_is_ready(Job *job); /* Same as job_is_ready(), but called with job lock held. */ bool job_is_ready_locked(Job *job); +/** Returns whether the job is paused. Called with job_mutex *not* held. */ +bool job_is_paused(Job *job); + /** * Request @job to pause at the next pause point. Must be paired with * job_resume(). If the job is supposed to be resumed by user action, call diff --git a/include/qemu/lockcnt.h b/include/qemu/lockcnt.h new file mode 100644 index 00000000000..5a2800e3f18 --- /dev/null +++ b/include/qemu/lockcnt.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QemuLockCnt implementation + * + * Copyright Red Hat, Inc. 2017 + * + * Author: + * Paolo Bonzini + * + */ + +#ifndef QEMU_LOCKCNT_H +#define QEMU_LOCKCNT_H + +#include "qemu/thread.h" + +typedef struct QemuLockCnt QemuLockCnt; + +struct QemuLockCnt { +#ifndef HAVE_FUTEX + QemuMutex mutex; +#endif + unsigned count; +}; + +/** + * qemu_lockcnt_init: initialize a QemuLockcnt + * @lockcnt: the lockcnt to initialize + * + * Initialize lockcnt's counter to zero and prepare its mutex + * for usage. + */ +void qemu_lockcnt_init(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_destroy: destroy a QemuLockcnt + * @lockcnt: the lockcnt to destruct + * + * Destroy lockcnt's mutex. + */ +void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc: increment a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + * + * If the lockcnt's count is zero, wait for critical sections + * to finish and increment lockcnt's count to 1. If the count + * is not zero, just increment it. + * + * Because this function can wait on the mutex, it must not be + * called while the lockcnt's mutex is held by the current thread. + * For the same reason, qemu_lockcnt_inc can also contribute to + * AB-BA deadlocks. This is a sample deadlock scenario:: + * + * thread 1 thread 2 + * ------------------------------------------------------- + * qemu_lockcnt_lock(&lc1); + * qemu_lockcnt_lock(&lc2); + * qemu_lockcnt_inc(&lc2); + * qemu_lockcnt_inc(&lc1); + */ +void qemu_lockcnt_inc(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec: decrement a QemuLockCnt's counter + * @lockcnt: the lockcnt to operate on + */ +void qemu_lockcnt_dec(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and + * possibly lock it. + * @lockcnt: the lockcnt to operate on + * + * Decrement lockcnt's count. If the new count is zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and + * lock it. + * @lockcnt: the lockcnt to operate on + * + * If the count is 1, decrement the count to zero, lock + * the mutex and return true. Otherwise, return false. + */ +bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on + * + * Remember that concurrent visits are not blocked unless the count is + * also zero. You can use qemu_lockcnt_count to check for this inside a + * critical section. + */ +void qemu_lockcnt_lock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. + * @lockcnt: the lockcnt to operate on. + */ +void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. + * @lockcnt: the lockcnt to operate on. + * + * This is the same as + * + * qemu_lockcnt_unlock(lockcnt); + * qemu_lockcnt_inc(lockcnt); + * + * but more efficient. + */ +void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); + +/** + * qemu_lockcnt_count: query a LockCnt's count. + * @lockcnt: the lockcnt to query. + * + * Note that the count can change at any time. Still, while the + * lockcnt is locked, one can usefully check whether the count + * is non-zero. + */ +unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); + +#endif diff --git a/include/qemu/log.h b/include/qemu/log.h index e10e24cd4fc..aae72985f0d 100644 --- a/include/qemu/log.h +++ b/include/qemu/log.h @@ -37,6 +37,7 @@ bool qemu_log_separate(void); #define LOG_PER_THREAD (1 << 20) #define CPU_LOG_TB_VPU (1 << 21) #define LOG_TB_OP_PLUGIN (1 << 22) +#define LOG_INVALID_MEM (1 << 23) /* Lock/unlock output. */ @@ -83,6 +84,8 @@ typedef struct QEMULogItem { extern const QEMULogItem qemu_log_items[]; +ssize_t rust_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream); + bool qemu_set_log(int log_flags, Error **errp); bool qemu_set_log_filename(const char *filename, Error **errp); bool qemu_set_log_filename_flags(const char *name, int flags, Error **errp); diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 5764db157c9..4e2436b1968 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -27,7 +27,7 @@ #include "block/aio.h" #include "qom/object.h" -#include "sysemu/event-loop-base.h" +#include "system/event-loop-base.h" #define SIG_IPI SIGUSR1 @@ -247,6 +247,14 @@ void event_notifier_set_handler(EventNotifier *e, GSource *iohandler_get_g_source(void); AioContext *iohandler_get_aio_context(void); +/** + * rust_bql_mock_lock: + * + * Called from Rust doctests to make bql_lock() return true. + * Do not touch. + */ +void rust_bql_mock_lock(void); + /** * bql_locked: Return lock status of the Big QEMU Lock (BQL) * @@ -262,6 +270,21 @@ AioContext *iohandler_get_aio_context(void); */ bool bql_locked(void); +/** + * bql_block: Allow/deny releasing the BQL + * + * The Big QEMU Lock (BQL) is used to provide interior mutability to + * Rust code, but this only works if other threads cannot run while + * the Rust code has an active borrow. This is because C code in + * other threads could come in and mutate data under the Rust code's + * feet. + * + * @increase: Whether to increase or decrease the blocking counter. + * Releasing the BQL while the counter is nonzero triggers + * an assertion failure. + */ +void bql_block_unlock(bool increase); + /** * qemu_in_main_thread: return whether it's possible to safely access * the global state of the block layer. diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 720ed21a7e4..96fe51bc390 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -8,7 +8,7 @@ * To avoid getting into possible circular include dependencies, this * file should not include any other QEMU headers, with the exceptions * of config-host.h, config-target.h, qemu/compiler.h, - * sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and + * system/os-posix.h, system/os-win32.h, system/os-wasm.h, glib-compat.h and * qemu/typedefs.h, all of which are doing a similar job to this file * and are under similar constraints. * @@ -128,7 +128,7 @@ QEMU_EXTERN_C int daemon(int, int); #include #include #include -/* setjmp must be declared before sysemu/os-win32.h +/* setjmp must be declared before system/os-win32.h * because it is redefined there. */ #include #include @@ -161,11 +161,15 @@ QEMU_EXTERN_C int daemon(int, int); #include "glib-compat.h" #ifdef _WIN32 -#include "sysemu/os-win32.h" +#include "system/os-win32.h" #endif -#ifdef CONFIG_POSIX -#include "sysemu/os-posix.h" +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) +#include "system/os-posix.h" +#endif + +#if defined(EMSCRIPTEN) +#include "system/os-wasm.h" #endif #ifdef __cplusplus @@ -297,6 +301,10 @@ void QEMU_ERROR("code path is reachable") #error building with G_DISABLE_ASSERT is not supported #endif +#ifndef OFF_MAX +#define OFF_MAX (sizeof (off_t) == 8 ? INT64_MAX : INT32_MAX) +#endif + #ifndef O_LARGEFILE #define O_LARGEFILE 0 #endif @@ -399,7 +407,7 @@ void QEMU_ERROR("code path is reachable") }) #undef MIN #define MIN(a, b) \ - MIN_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b)) + MIN_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b)) #define MAX_INTERNAL(a, b, _a, _b) \ ({ \ @@ -408,7 +416,7 @@ void QEMU_ERROR("code path is reachable") }) #undef MAX #define MAX(a, b) \ - MAX_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b)) + MAX_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b)) #ifdef __COVERITY__ # define MIN_CONST(a, b) ((a) < (b) ? (a) : (b)) @@ -440,7 +448,7 @@ void QEMU_ERROR("code path is reachable") _a == 0 ? _b : (_b == 0 || _b > _a) ? _a : _b; \ }) #define MIN_NON_ZERO(a, b) \ - MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTFIER(_a), MAKE_IDENTFIER(_b)) + MIN_NON_ZERO_INTERNAL((a), (b), MAKE_IDENTIFIER(_a), MAKE_IDENTIFIER(_b)) /* * Round number down to multiple. Safe when m is not a power of 2 (see @@ -505,6 +513,7 @@ int qemu_daemon(int nochdir, int noclose); void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared, bool noreserve); void qemu_anon_ram_free(void *ptr, size_t size); +int qemu_shm_alloc(size_t size, Error **errp); #ifdef _WIN32 #define HAVE_CHARDEV_SERIAL 1 @@ -626,6 +635,15 @@ bool qemu_write_pidfile(const char *pidfile, Error **errp); int qemu_get_thread_id(void); +/** + * qemu_kill_thread: + * @tid: thread id. + * @sig: host signal. + * + * Send @sig to one of QEMU's own threads with identifier @tid. + */ +int qemu_kill_thread(int tid, int sig); + #ifndef CONFIG_IOVEC struct iovec { void *iov_base; @@ -757,6 +775,17 @@ static inline void qemu_reset_optind(void) int qemu_fdatasync(int fd); +/** + * qemu_close_all_open_fd: + * + * Close all open file descriptors except the ones supplied in the @skip array + * + * @skip: ordered array of distinct file descriptors that should not be closed + * if any, or NULL. + * @nskip: number of entries in the @skip array or 0 if @skip is NULL. + */ +void qemu_close_all_open_fd(const int *skip, unsigned int nskip); + /** * Sync changes made to the memory mapped file back to the backing * storage. For POSIX compliant systems this will fallback diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h index 71c1123308e..6065ec7aaf7 100644 --- a/include/qemu/plugin-memory.h +++ b/include/qemu/plugin-memory.h @@ -9,7 +9,6 @@ #ifndef PLUGIN_MEMORY_H #define PLUGIN_MEMORY_H -#include "exec/cpu-defs.h" #include "exec/hwaddr.h" struct qemu_plugin_hwaddr { diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index af5f9db4692..f355c7cb8ab 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -167,6 +167,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret); void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + uint64_t value_low, + uint64_t value_high, MemOpIdx oi, enum qemu_plugin_mem_rw rw); void qemu_plugin_flush_cb(void); @@ -207,6 +209,21 @@ void qemu_plugin_user_prefork_lock(void); */ void qemu_plugin_user_postfork(bool is_child); +enum qemu_plugin_cb_flags tcg_call_to_qemu_plugin_cb_flags(int flags); + +static inline void qemu_plugin_set_cb_flags(CPUState *cpu, + enum qemu_plugin_cb_flags flags) +{ + assert(cpu); + cpu->neg.plugin_cb_flags = flags; +} + +static inline enum qemu_plugin_cb_flags qemu_plugin_get_cb_flags(void) +{ + assert(current_cpu); + return current_cpu->neg.plugin_cb_flags; +} + #else /* !CONFIG_PLUGIN */ static inline void qemu_plugin_add_opts(void) @@ -251,6 +268,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) { } static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + uint64_t value_low, + uint64_t value_high, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { } diff --git a/include/qemu/pmem.h b/include/qemu/pmem.h index d2d7ad085cc..e12a67ba2c0 100644 --- a/include/qemu/pmem.h +++ b/include/qemu/pmem.h @@ -22,7 +22,6 @@ pmem_memcpy_persist(void *pmemdest, const void *src, size_t len) /* If 'pmem' option is 'on', we should always have libpmem support, or qemu will report a error and exit, never come here. */ g_assert_not_reached(); - return NULL; } static inline void diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index c71c705b699..c450106af18 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -57,11 +57,26 @@ typedef uint64_t qemu_plugin_id_t; * - Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline. * Those functions are replaced by *_per_vcpu variants, which guarantee * thread-safety for operations. + * + * version 3: + * - modified arguments and return value of qemu_plugin_insn_data to copy + * the data into a user-provided buffer instead of returning a pointer + * to the data. + * + * version 4: + * - added qemu_plugin_read_memory_vaddr + * + * version 5: + * - added qemu_plugin_write_memory_vaddr + * - added qemu_plugin_read_memory_hwaddr + * - added qemu_plugin_write_memory_hwaddr + * - added qemu_plugin_write_register + * - added qemu_plugin_translate_vaddr */ extern QEMU_PLUGIN_EXPORT int qemu_plugin_version; -#define QEMU_PLUGIN_VERSION 3 +#define QEMU_PLUGIN_VERSION 5 /** * struct qemu_info_t - system information for plugins @@ -246,9 +261,6 @@ typedef struct { * @QEMU_PLUGIN_CB_NO_REGS: callback does not access the CPU's regs * @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs * @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs - * - * Note: currently QEMU_PLUGIN_CB_RW_REGS is unused, plugins cannot change - * system register state. */ enum qemu_plugin_cb_flags { QEMU_PLUGIN_CB_NO_REGS, @@ -262,6 +274,29 @@ enum qemu_plugin_mem_rw { QEMU_PLUGIN_MEM_RW, }; +enum qemu_plugin_mem_value_type { + QEMU_PLUGIN_MEM_VALUE_U8, + QEMU_PLUGIN_MEM_VALUE_U16, + QEMU_PLUGIN_MEM_VALUE_U32, + QEMU_PLUGIN_MEM_VALUE_U64, + QEMU_PLUGIN_MEM_VALUE_U128, +}; + +/* typedef qemu_plugin_mem_value - value accessed during a load/store */ +typedef struct { + enum qemu_plugin_mem_value_type type; + union { + uint8_t u8; + uint16_t u16; + uint32_t u32; + uint64_t u64; + struct { + uint64_t low; + uint64_t high; + } u128; + } data; +} qemu_plugin_mem_value; + /** * enum qemu_plugin_cond - condition to enable callback * @@ -551,6 +586,15 @@ bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info); QEMU_PLUGIN_API bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info); +/** + * qemu_plugin_mem_get_value() - return last value loaded/stored + * @info: opaque memory transaction handle + * + * Returns: memory value + */ +QEMU_PLUGIN_API +qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info); + /** * qemu_plugin_get_hwaddr() - return handle for memory operation * @info: opaque memory info structure @@ -763,6 +807,7 @@ void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, qemu_plugin_udata_cb_t cb, void *userdata); /* returns how many vcpus were started at this point */ +QEMU_PLUGIN_API int qemu_plugin_num_vcpus(void); /** @@ -830,7 +875,8 @@ struct qemu_plugin_register; /** * typedef qemu_plugin_reg_descriptor - register descriptions * - * @handle: opaque handle for retrieving value with qemu_plugin_read_register + * @handle: opaque handle for retrieving value with qemu_plugin_read_register or + * writing value with qemu_plugin_write_register * @name: register name * @feature: optional feature descriptor, can be NULL */ @@ -859,7 +905,12 @@ GArray *qemu_plugin_get_registers(void); * @buf: A GByteArray for the data owned by the plugin * * This function is only available in a context that register read access is - * explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag. + * explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag, if called inside a + * callback that can be registered with a qemu_plugin_cb_flags argument. This + * function can also be used in any callback context that does not use a flags + * argument, such as in a callback registered with + * qemu_plugin_register_vcpu_init_cb(), except for callbacks registered with + * qemu_plugin_register_atexit_cb() and qemu_plugin_register_flush_cb(). * * Returns the size of the read register. The content of @buf is in target byte * order. On failure returns -1. @@ -868,6 +919,166 @@ QEMU_PLUGIN_API int qemu_plugin_read_register(struct qemu_plugin_register *handle, GByteArray *buf); +/** + * qemu_plugin_write_register() - write register for current vCPU + * + * @handle: a @qemu_plugin_reg_handle handle + * @buf: A GByteArray for the data owned by the plugin + * + * This function is only available in a context that register read access is + * explicitly requested via the QEMU_PLUGIN_CB_RW_REGS flag, if called inside a + * callback that can be registered with a qemu_plugin_cb_flags argument. This + * function can also be used in any callback context that does not use a flags + * argument, such as in a callback registered with + * qemu_plugin_register_vcpu_init_cb(), except for callbacks registered with + * qemu_plugin_register_atexit_cb() and qemu_plugin_register_flush_cb(). + * + * The size of @buf must be at least the size of the requested register. + * Attempting to write a register with @buf smaller than the register size + * will result in a crash or other undesired behavior. + * + * Returns the number of bytes written. On failure returns 0. + */ +QEMU_PLUGIN_API +int qemu_plugin_write_register(struct qemu_plugin_register *handle, + GByteArray *buf); + +/** + * qemu_plugin_read_memory_vaddr() - read from memory using a virtual address + * + * @addr: A virtual address to read from + * @data: A byte array to store data into + * @len: The number of bytes to read, starting from @addr + * + * @len bytes of data is read starting at @addr and stored into @data. If @data + * is not large enough to hold @len bytes, it will be expanded to the necessary + * size, reallocating if necessary. @len must be greater than 0. + * + * This function does not ensure writes are flushed prior to reading, so + * callers should take care when calling this function in plugin callbacks to + * avoid attempting to read data which may not yet be written and should use + * the memory callback API instead. + * + * Returns true on success and false on failure. + */ +QEMU_PLUGIN_API +bool qemu_plugin_read_memory_vaddr(uint64_t addr, + GByteArray *data, size_t len); + +/** + * qemu_plugin_write_memory_vaddr() - write to memory using a virtual address + * + * @addr: A virtual address to write to + * @data: A byte array containing the data to write + * + * The contents of @data will be written to memory starting at the virtual + * address @addr. + * + * This function does not guarantee consistency of writes, nor does it ensure + * that pending writes are flushed either before or after the write takes place, + * so callers should take care to only call this function in vCPU context (i.e. + * in callbacks) and avoid depending on the existence of data written using this + * function which may be overwritten afterward. + * + * Returns true on success and false on failure. + */ +QEMU_PLUGIN_API +bool qemu_plugin_write_memory_vaddr(uint64_t addr, + GByteArray *data); + +/** + * enum qemu_plugin_hwaddr_operation_result - result of a memory operation + * + * @QEMU_PLUGIN_HWADDR_OPERATION_OK: hwaddr operation succeeded + * @QEMU_PLUGIN_HWADDR_OPERATION_ERROR: unexpected error occurred + * @QEMU_PLUGIN_HWADDR_OPERATION_DEVICE_ERROR: error in memory device + * @QEMU_PLUGIN_HWADDR_OPERATION_ACCESS_DENIED: permission error + * @QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS: address was invalid + * @QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS_SPACE: invalid address space + */ +enum qemu_plugin_hwaddr_operation_result { + QEMU_PLUGIN_HWADDR_OPERATION_OK, + QEMU_PLUGIN_HWADDR_OPERATION_ERROR, + QEMU_PLUGIN_HWADDR_OPERATION_DEVICE_ERROR, + QEMU_PLUGIN_HWADDR_OPERATION_ACCESS_DENIED, + QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS, + QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS_SPACE, +}; + +/** + * qemu_plugin_read_memory_hwaddr() - read from memory using a hardware address + * + * @addr: The physical address to read from + * @data: A byte array to store data into + * @len: The number of bytes to read, starting from @addr + * + * @len bytes of data is read from the current memory space for the current + * vCPU starting at @addr and stored into @data. If @data is not large enough to + * hold @len bytes, it will be expanded to the necessary size, reallocating if + * necessary. @len must be greater than 0. + * + * This function does not ensure writes are flushed prior to reading, so + * callers should take care when calling this function in plugin callbacks to + * avoid attempting to read data which may not yet be written and should use + * the memory callback API instead. + * + * This function is only valid for softmmu targets. + * + * Returns a qemu_plugin_hwaddr_operation_result indicating the result of the + * operation. + */ +QEMU_PLUGIN_API +enum qemu_plugin_hwaddr_operation_result +qemu_plugin_read_memory_hwaddr(uint64_t addr, GByteArray *data, size_t len); + +/** + * qemu_plugin_write_memory_hwaddr() - write to memory using a hardware address + * + * @addr: A physical address to write to + * @data: A byte array containing the data to write + * + * The contents of @data will be written to memory starting at the hardware + * address @addr in the current address space for the current vCPU. + * + * This function does not guarantee consistency of writes, nor does it ensure + * that pending writes are flushed either before or after the write takes place, + * so callers should take care when calling this function in plugin callbacks to + * avoid depending on the existence of data written using this function which + * may be overwritten afterward. In addition, this function requires that the + * pages containing the address are not locked. Practically, this means that you + * should not write instruction memory in a current translation block inside a + * callback registered with qemu_plugin_register_vcpu_tb_trans_cb. + * + * You can, for example, write instruction memory in a current translation block + * in a callback registered with qemu_plugin_register_vcpu_tb_exec_cb, although + * be aware that the write will not be flushed until after the translation block + * has finished executing. In general, this function should be used to write + * data memory or to patch code at a known address, not in a current translation + * block. + * + * This function is only valid for softmmu targets. + * + * Returns a qemu_plugin_hwaddr_operation_result indicating the result of the + * operation. + */ +QEMU_PLUGIN_API +enum qemu_plugin_hwaddr_operation_result +qemu_plugin_write_memory_hwaddr(uint64_t addr, GByteArray *data); + +/** + * qemu_plugin_translate_vaddr() - translate virtual address for current vCPU + * + * @vaddr: virtual address to translate + * @hwaddr: pointer to store the physical address + * + * This function is only valid in vCPU context (i.e. in callbacks) and is only + * valid for softmmu targets. + * + * Returns true on success and false on failure. + */ +QEMU_PLUGIN_API +bool qemu_plugin_translate_vaddr(uint64_t vaddr, uint64_t *hwaddr); + /** * qemu_plugin_scoreboard_new() - alloc a new scoreboard * diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index fea058aa9f8..020dbe4d8b7 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -17,8 +17,8 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * License along with this library; if not, see + * . * * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h index 4e6298d4730..bfd5900fda0 100644 --- a/include/qemu/rcu_queue.h +++ b/include/qemu/rcu_queue.h @@ -17,8 +17,8 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * License along with this library; if not, see + * . * * Copyright (c) 2013 Mike D. Day, IBM Corporation. * diff --git a/include/qemu/reserved-region.h b/include/qemu/reserved-region.h index 8e6f0a97e22..9026cf08fdf 100644 --- a/include/qemu/reserved-region.h +++ b/include/qemu/reserved-region.h @@ -20,7 +20,7 @@ #ifndef QEMU_RESERVED_REGION_H #define QEMU_RESERVED_REGION_H -#include "exec/memory.h" +#include "system/memory.h" /* * Insert a new region into a sorted list of reserved regions. In case diff --git a/include/qemu/s390x_pci_mmio.h b/include/qemu/s390x_pci_mmio.h new file mode 100644 index 00000000000..c5f63ecefa1 --- /dev/null +++ b/include/qemu/s390x_pci_mmio.h @@ -0,0 +1,24 @@ +/* + * s390x PCI MMIO definitions + * + * Copyright 2025 IBM Corp. + * Author(s): Farhan Ali + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef S390X_PCI_MMIO_H +#define S390X_PCI_MMIO_H + +#ifdef __s390x__ +uint8_t s390x_pci_mmio_read_8(const void *ioaddr); +uint16_t s390x_pci_mmio_read_16(const void *ioaddr); +uint32_t s390x_pci_mmio_read_32(const void *ioaddr); +uint64_t s390x_pci_mmio_read_64(const void *ioaddr); + +void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val); +void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val); +void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val); +void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val); +#endif /* __s390x__ */ + +#endif /* S390X_PCI_MMIO_H */ diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index d935fd80da8..c562690d893 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -61,7 +61,6 @@ int socket_set_fast_reuse(int fd); int inet_ai_family_from_address(InetSocketAddress *addr, Error **errp); int inet_parse(InetSocketAddress *addr, const char *str, Error **errp); -int inet_connect(const char *str, Error **errp); int inet_connect_saddr(InetSocketAddress *saddr, Error **errp); NetworkAddressFamily inet_netfamily(int family); @@ -117,21 +116,6 @@ socket_sockaddr_to_address(struct sockaddr_storage *sa, */ SocketAddress *socket_local_address(int fd, Error **errp); -/** - * socket_remote_address: - * @fd: the socket file handle - * @errp: pointer to uninitialized error object - * - * Get the string representation of the remote socket - * address. A pointer to the allocated address information - * struct will be returned, which the caller is required to - * release with a call qapi_free_SocketAddress() when no - * longer required. - * - * Returns: the socket address struct, or NULL on error - */ -SocketAddress *socket_remote_address(int fd, Error **errp); - /** * socket_address_flatten: * @addr: the socket address to flatten diff --git a/include/qemu/target-info-impl.h b/include/qemu/target-info-impl.h new file mode 100644 index 00000000000..17887f64e26 --- /dev/null +++ b/include/qemu/target-info-impl.h @@ -0,0 +1,36 @@ +/* + * QEMU TargetInfo structure definition + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_TARGET_INFO_IMPL_H +#define QEMU_TARGET_INFO_IMPL_H + +#include "qapi/qapi-types-machine.h" + +typedef struct TargetInfo { + /* runtime equivalent of TARGET_NAME definition */ + const char *target_name; + /* related to TARGET_ARCH definition */ + SysEmuTarget target_arch; + /* runtime equivalent of TARGET_LONG_BITS definition */ + unsigned long_bits; + /* runtime equivalent of CPU_RESOLVING_TYPE definition */ + const char *cpu_type; + /* QOM typename machines for this binary must implement */ + const char *machine_typename; + /* related to TARGET_BIG_ENDIAN definition */ + EndianMode endianness; +} TargetInfo; + +/** + * target_info: + * + * Returns: The TargetInfo structure definition for this target binary. + */ +const TargetInfo *target_info(void); + +#endif diff --git a/include/qemu/target-info-qapi.h b/include/qemu/target-info-qapi.h new file mode 100644 index 00000000000..d5ce0523238 --- /dev/null +++ b/include/qemu/target-info-qapi.h @@ -0,0 +1,29 @@ +/* + * QEMU target info API (returning QAPI types) + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_TARGET_INFO_EXTRA_H +#define QEMU_TARGET_INFO_EXTRA_H + +#include "qapi/qapi-types-common.h" +#include "qapi/qapi-types-machine.h" + +/** + * target_arch: + * + * Returns: QAPI SysEmuTarget enum (e.g. SYS_EMU_TARGET_X86_64). + */ +SysEmuTarget target_arch(void); + +/** + * target_endian_mode: + * + * Returns: QAPI EndianMode enum (e.g. ENDIAN_MODE_LITTLE). + */ +EndianMode target_endian_mode(void); + +#endif diff --git a/include/qemu/target-info.h b/include/qemu/target-info.h new file mode 100644 index 00000000000..abcf25db6fa --- /dev/null +++ b/include/qemu/target-info.h @@ -0,0 +1,53 @@ +/* + * QEMU target info API (returning native types) + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_TARGET_INFO_H +#define QEMU_TARGET_INFO_H + +/** + * target_name: + * + * Returns: Canonical target name (i.e. "i386"). + */ +const char *target_name(void); + +/** + * target_long_bits: + * + * Returns: number of bits in a long type for this target (i.e. 64). + */ +unsigned target_long_bits(void); + +/** + * target_machine_typename: + * + * Returns: Name of the QOM interface implemented by machines + * usable on this target binary. + */ +const char *target_machine_typename(void); + +/** + * target_cpu_type: + * + * Returns: target CPU base QOM type name (i.e. TYPE_X86_CPU). + */ +const char *target_cpu_type(void); + +/** + * target_big_endian: + * + * Returns: %true if the (default) endianness of the target is big endian, + * %false otherwise. + * + * Common code should normally never need to know about the endianness of + * the target, so please do *not* use this function unless you know very + * well what you are doing! + */ +bool target_big_endian(void); + +#endif diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h index 5f2f3d1386b..758808b705e 100644 --- a/include/qemu/thread-posix.h +++ b/include/qemu/thread-posix.h @@ -32,15 +32,6 @@ struct QemuSemaphore { unsigned int count; }; -struct QemuEvent { -#ifndef __linux__ - pthread_mutex_t lock; - pthread_cond_t cond; -#endif - unsigned value; - bool initialized; -}; - struct QemuThread { pthread_t thread; }; diff --git a/include/qemu/thread-win32.h b/include/qemu/thread-win32.h index d95af4498fc..da9e7322990 100644 --- a/include/qemu/thread-win32.h +++ b/include/qemu/thread-win32.h @@ -28,12 +28,6 @@ struct QemuSemaphore { bool initialized; }; -struct QemuEvent { - int value; - HANDLE event; - bool initialized; -}; - typedef struct QemuThreadData QemuThreadData; struct QemuThread { QemuThreadData *data; diff --git a/include/qemu/thread.h b/include/qemu/thread.h index fb74e21c08a..f0302ed01fd 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -3,14 +3,32 @@ #include "qemu/processor.h" #include "qemu/atomic.h" -#include "qemu/clang-tsa.h" +#include "qemu/futex.h" typedef struct QemuCond QemuCond; typedef struct QemuSemaphore QemuSemaphore; -typedef struct QemuEvent QemuEvent; typedef struct QemuLockCnt QemuLockCnt; typedef struct QemuThread QemuThread; +/* + * QemuEvent + * ========= + * + * QemuEvent is an implementation of Win32 manual-reset event object. + * For details, refer to: + * https://learn.microsoft.com/en-us/windows/win32/sync/using-event-objects + * + * QemuEvent is more lightweight than QemuSemaphore when HAVE_FUTEX is defined. + */ +typedef struct QemuEvent { +#ifndef HAVE_FUTEX + pthread_mutex_t lock; + pthread_cond_t cond; +#endif + unsigned value; + bool initialized; +} QemuEvent; + #ifdef _WIN32 #include "qemu/thread-win32.h" #else @@ -293,115 +311,4 @@ static inline void qemu_spin_unlock(QemuSpin *spin) #endif } -struct QemuLockCnt { -#ifndef CONFIG_LINUX - QemuMutex mutex; -#endif - unsigned count; -}; - -/** - * qemu_lockcnt_init: initialize a QemuLockcnt - * @lockcnt: the lockcnt to initialize - * - * Initialize lockcnt's counter to zero and prepare its mutex - * for usage. - */ -void qemu_lockcnt_init(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_destroy: destroy a QemuLockcnt - * @lockcnt: the lockcnt to destruct - * - * Destroy lockcnt's mutex. - */ -void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_inc: increment a QemuLockCnt's counter - * @lockcnt: the lockcnt to operate on - * - * If the lockcnt's count is zero, wait for critical sections - * to finish and increment lockcnt's count to 1. If the count - * is not zero, just increment it. - * - * Because this function can wait on the mutex, it must not be - * called while the lockcnt's mutex is held by the current thread. - * For the same reason, qemu_lockcnt_inc can also contribute to - * AB-BA deadlocks. This is a sample deadlock scenario: - * - * thread 1 thread 2 - * ------------------------------------------------------- - * qemu_lockcnt_lock(&lc1); - * qemu_lockcnt_lock(&lc2); - * qemu_lockcnt_inc(&lc2); - * qemu_lockcnt_inc(&lc1); - */ -void qemu_lockcnt_inc(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_dec: decrement a QemuLockCnt's counter - * @lockcnt: the lockcnt to operate on - */ -void qemu_lockcnt_dec(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and - * possibly lock it. - * @lockcnt: the lockcnt to operate on - * - * Decrement lockcnt's count. If the new count is zero, lock - * the mutex and return true. Otherwise, return false. - */ -bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and - * lock it. - * @lockcnt: the lockcnt to operate on - * - * If the count is 1, decrement the count to zero, lock - * the mutex and return true. Otherwise, return false. - */ -bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. - * @lockcnt: the lockcnt to operate on - * - * Remember that concurrent visits are not blocked unless the count is - * also zero. You can use qemu_lockcnt_count to check for this inside a - * critical section. - */ -void qemu_lockcnt_lock(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. - * @lockcnt: the lockcnt to operate on. - */ -void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. - * @lockcnt: the lockcnt to operate on. - * - * This is the same as - * - * qemu_lockcnt_unlock(lockcnt); - * qemu_lockcnt_inc(lockcnt); - * - * but more efficient. - */ -void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); - -/** - * qemu_lockcnt_count: query a LockCnt's count. - * @lockcnt: the lockcnt to query. - * - * Note that the count can change at any time. Still, while the - * lockcnt is locked, one can usefully check whether the count - * is non-zero. - */ -unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); - #endif diff --git a/include/qemu/timed-average.h b/include/qemu/timed-average.h index 08245e7a109..dfd8d653fa7 100644 --- a/include/qemu/timed-average.h +++ b/include/qemu/timed-average.h @@ -8,10 +8,12 @@ * Benoît Canet * Alberto Garcia * + * SPDX-License-Identifier: GPL-2.0-or-later + * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or - * (at your option) version 3 or any later version. + * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/include/qemu/timer.h b/include/qemu/timer.h index fa56ec9481d..abd2204f3be 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -190,16 +190,6 @@ bool qemu_clock_use_for_deadline(QEMUClockType type); */ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask); -/** - * qemu_clock_get_main_loop_timerlist: - * @type: the clock type - * - * Return the default timer list associated with a clock. - * - * Returns: the default timer list - */ -QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type); - /** * qemu_clock_nofify: * @type: the clock type @@ -326,17 +316,6 @@ bool timerlist_expired(QEMUTimerList *timer_list); */ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list); -/** - * timerlist_get_clock: - * @timer_list: the timer list to operate on - * - * Determine the clock type associated with a timer list. - * - * Returns: the clock type associated with the - * timer list. - */ -QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); - /** * timerlist_run_timers: * @timer_list: the timer list to use @@ -528,6 +507,8 @@ static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, * with an AioContext---each of them runs its timer callbacks in its own * AioContext thread. * + * The timer returned must be freed using timer_free(). + * * Returns: a pointer to the timer */ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, @@ -551,6 +532,8 @@ static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, * and associate it with the default timer list for the clock type @type. * See timer_new_full for details. * + * The timer returned must be freed using timer_free(). + * * Returns: a pointer to the timer */ static inline QEMUTimer *timer_new(QEMUClockType type, int scale, @@ -569,6 +552,8 @@ static inline QEMUTimer *timer_new(QEMUClockType type, int scale, * associated with the clock. * See timer_new_full for details. * + * The timer returned must be freed using timer_free(). + * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, @@ -587,6 +572,8 @@ static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, * associated with the clock. * See timer_new_full for details. * + * The timer returned must be freed using timer_free(). + * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, @@ -605,6 +592,8 @@ static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, * associated with the clock. * See timer_new_full for details. * + * The timer returned must be freed using timer_free(). + * * Returns: a pointer to the newly created timer */ static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb, diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 9d222dc3762..4a94af9665a 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -22,6 +22,7 @@ * Please keep this list in case-insensitive alphabetical order. */ typedef struct AccelCPUState AccelCPUState; +typedef struct AccelOpsClass AccelOpsClass; typedef struct AccelState AccelState; typedef struct AddressSpace AddressSpace; typedef struct AioContext AioContext; @@ -40,6 +41,7 @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct CPUArchState CPUArchState; typedef struct CPUPluginState CPUPluginState; typedef struct CPUState CPUState; +typedef struct CPUTLBEntryFull CPUTLBEntryFull; typedef struct DeviceState DeviceState; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; typedef struct DisasContextBase DisasContextBase; @@ -107,6 +109,7 @@ typedef struct QString QString; typedef struct RAMBlock RAMBlock; typedef struct Range Range; typedef struct ReservedRegion ReservedRegion; +typedef struct SaveCompletePrecopyThreadData SaveCompletePrecopyThreadData; typedef struct SHPCDevice SHPCDevice; typedef struct SSIBus SSIBus; typedef struct TCGCPUOps TCGCPUOps; @@ -130,5 +133,9 @@ typedef struct IRQState *qemu_irq; * Function types */ typedef void (*qemu_irq_handler)(void *opaque, int n, int level); +typedef bool (*MigrationLoadThread)(void *opaque, bool *should_quit, + Error **errp); +typedef bool (*SaveCompletePrecopyThreadHandler)(SaveCompletePrecopyThreadData *d, + Error **errp); #endif /* QEMU_TYPEDEFS_H */ diff --git a/include/qemu/userfaultfd.h b/include/qemu/userfaultfd.h index 18a4314212d..a1979308d7e 100644 --- a/include/qemu/userfaultfd.h +++ b/include/qemu/userfaultfd.h @@ -39,7 +39,6 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr, int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake); int uffd_wakeup(int uffd_fd, void *addr, uint64_t length); int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count); -bool uffd_poll_events(int uffd_fd, int tmo); #endif /* CONFIG_LINUX */ diff --git a/include/qapi/qmp/json-parser.h b/include/qobject/json-parser.h similarity index 100% rename from include/qapi/qmp/json-parser.h rename to include/qobject/json-parser.h diff --git a/include/qapi/qmp/json-writer.h b/include/qobject/json-writer.h similarity index 100% rename from include/qapi/qmp/json-writer.h rename to include/qobject/json-writer.h diff --git a/include/qapi/qmp/qbool.h b/include/qobject/qbool.h similarity index 94% rename from include/qapi/qmp/qbool.h rename to include/qobject/qbool.h index 0d09726939b..b348e17867b 100644 --- a/include/qapi/qmp/qbool.h +++ b/include/qobject/qbool.h @@ -14,7 +14,7 @@ #ifndef QBOOL_H #define QBOOL_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" struct QBool { struct QObjectBase_ base; diff --git a/include/qapi/qmp/qdict.h b/include/qobject/qdict.h similarity index 98% rename from include/qapi/qmp/qdict.h rename to include/qobject/qdict.h index 82e90fc0722..903e6e5462f 100644 --- a/include/qapi/qmp/qdict.h +++ b/include/qobject/qdict.h @@ -13,7 +13,7 @@ #ifndef QDICT_H #define QDICT_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" #include "qemu/queue.h" #define QDICT_BUCKET_MAX 512 diff --git a/include/qapi/qmp/qjson.h b/include/qobject/qjson.h similarity index 100% rename from include/qapi/qmp/qjson.h rename to include/qobject/qjson.h diff --git a/include/qapi/qmp/qlist.h b/include/qobject/qlist.h similarity index 98% rename from include/qapi/qmp/qlist.h rename to include/qobject/qlist.h index e4e985d4356..0377bf824ec 100644 --- a/include/qapi/qmp/qlist.h +++ b/include/qobject/qlist.h @@ -13,7 +13,7 @@ #ifndef QLIST_H #define QLIST_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" #include "qemu/queue.h" typedef struct QListEntry { diff --git a/include/qapi/qmp/qlit.h b/include/qobject/qlit.h similarity index 100% rename from include/qapi/qmp/qlit.h rename to include/qobject/qlit.h diff --git a/include/qapi/qmp/qnull.h b/include/qobject/qnull.h similarity index 94% rename from include/qapi/qmp/qnull.h rename to include/qobject/qnull.h index 7feb7c7d830..4423836a0c6 100644 --- a/include/qapi/qmp/qnull.h +++ b/include/qobject/qnull.h @@ -13,7 +13,7 @@ #ifndef QNULL_H #define QNULL_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" struct QNull { struct QObjectBase_ base; diff --git a/include/qapi/qmp/qnum.h b/include/qobject/qnum.h similarity index 98% rename from include/qapi/qmp/qnum.h rename to include/qobject/qnum.h index e86788dd2e3..1ce24b36681 100644 --- a/include/qapi/qmp/qnum.h +++ b/include/qobject/qnum.h @@ -15,7 +15,7 @@ #ifndef QNUM_H #define QNUM_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" typedef enum { QNUM_I64, diff --git a/include/qapi/qmp/qobject.h b/include/qobject/qobject.h similarity index 97% rename from include/qapi/qmp/qobject.h rename to include/qobject/qobject.h index 89b97d88bca..a6244d0ce00 100644 --- a/include/qapi/qmp/qobject.h +++ b/include/qobject/qobject.h @@ -34,7 +34,7 @@ #include "qapi/qapi-builtin-types.h" -/* Not for use outside include/qapi/qmp/ */ +/* Not for use outside include/qobject/ */ struct QObjectBase_ { QType type; size_t refcnt; @@ -54,7 +54,7 @@ struct QObject { typeof(obj) _obj = (obj); \ _obj ? container_of(&_obj->base, QObject, base) : NULL; \ }) -#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTFIER(_obj)) +#define QOBJECT(obj) QOBJECT_INTERNAL((obj), MAKE_IDENTIFIER(_obj)) /* Required for qobject_to() */ #define QTYPE_CAST_TO_QNull QTYPE_QNULL diff --git a/include/qapi/qmp/qstring.h b/include/qobject/qstring.h similarity index 96% rename from include/qapi/qmp/qstring.h rename to include/qobject/qstring.h index 318d815d6a4..1e2abe4032d 100644 --- a/include/qapi/qmp/qstring.h +++ b/include/qobject/qstring.h @@ -13,7 +13,7 @@ #ifndef QSTRING_H #define QSTRING_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" struct QString { struct QObjectBase_ base; diff --git a/include/qom/object.h b/include/qom/object.h index 13d3a655ddf..26df6137b91 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -26,6 +26,7 @@ typedef struct InterfaceClass InterfaceClass; typedef struct InterfaceInfo InterfaceInfo; #define TYPE_OBJECT "object" +#define TYPE_CONTAINER "container" typedef struct ObjectProperty ObjectProperty; @@ -279,7 +280,7 @@ struct Object static void \ module_obj_name##_finalize(Object *obj); \ static void \ - module_obj_name##_class_init(ObjectClass *oc, void *data); \ + module_obj_name##_class_init(ObjectClass *oc, const void *data); \ static void \ module_obj_name##_init(Object *obj); \ \ @@ -293,7 +294,7 @@ struct Object .class_size = CLASS_SIZE, \ .class_init = module_obj_name##_class_init, \ .abstract = ABSTRACT, \ - .interfaces = (InterfaceInfo[]) { __VA_ARGS__ } , \ + .interfaces = (const InterfaceInfo[]) { __VA_ARGS__ } , \ }; \ \ static void \ @@ -444,7 +445,8 @@ struct Object * class will have already been initialized so the type is only responsible * for initializing its own members. * @instance_post_init: This function is called to finish initialization of - * an object, after all @instance_init functions were called. + * an object, after all @instance_init functions were called, as well as + * @instance_post_init functions for the parent classes. * @instance_finalize: This function is called during object destruction. This * is called before the parent @instance_finalize function has been called. * An object should only free the members that are unique to its type in this @@ -485,11 +487,11 @@ struct TypeInfo bool abstract; size_t class_size; - void (*class_init)(ObjectClass *klass, void *data); - void (*class_base_init)(ObjectClass *klass, void *data); - void *class_data; + void (*class_init)(ObjectClass *klass, const void *data); + void (*class_base_init)(ObjectClass *klass, const void *data); + const void *class_data; - InterfaceInfo *interfaces; + const InterfaceInfo *interfaces; }; /** @@ -572,12 +574,15 @@ struct InterfaceInfo { * * The class for all interfaces. Subclasses of this class should only add * virtual methods. + * + * Note that most of the fields of ObjectClass are unused (all except + * "type", in fact). They are only present in InterfaceClass to allow + * @object_class_dynamic_cast to work with both regular classes and interfaces. */ struct InterfaceClass { ObjectClass parent_class; /* private: */ - ObjectClass *concrete_class; Type interface_type; }; @@ -880,24 +885,10 @@ const char *object_get_typename(const Object *obj); * type_register_static: * @info: The #TypeInfo of the new type. * - * @info and all of the strings it points to should exist for the life time - * that the type is registered. - * * Returns: the new #Type. */ Type type_register_static(const TypeInfo *info); -/** - * type_register: - * @info: The #TypeInfo of the new type - * - * Unlike type_register_static(), this call does not require @info or its - * string members to continue to exist after the call returns. - * - * Returns: the new #Type. - */ -Type type_register(const TypeInfo *info); - /** * type_register_static_array: * @infos: The array of the new type #TypeInfo structures. @@ -1523,6 +1514,16 @@ const char *object_property_get_type(Object *obj, const char *name, */ Object *object_get_root(void); +/** + * object_get_container: + * @name: the name of container to lookup + * + * Lookup a root level container. + * + * Returns: the container with @name. + */ +Object *object_get_container(const char *name); + /** * object_get_objects_root: @@ -1569,8 +1570,8 @@ char *object_get_canonical_path(const Object *obj); /** * object_resolve_path: * @path: the path to resolve - * @ambiguous: returns true if the path resolution failed because of an - * ambiguous match + * @ambiguous: (out) (optional): location to store whether the lookup failed + * because it was ambiguous, or %NULL. Set to %false on success. * * There are two types of supported paths--absolute paths and partial paths. * @@ -1587,7 +1588,7 @@ char *object_get_canonical_path(const Object *obj); * only one match is found. If more than one match is found, a flag is * returned to indicate that the match was ambiguous. * - * Returns: The matched object or NULL on path lookup failure. + * Returns: The matched object or %NULL on path lookup failure. */ Object *object_resolve_path(const char *path, bool *ambiguous); @@ -1595,10 +1596,10 @@ Object *object_resolve_path(const char *path, bool *ambiguous); * object_resolve_path_type: * @path: the path to resolve * @typename: the type to look for. - * @ambiguous: returns true if the path resolution failed because of an - * ambiguous match + * @ambiguous: (out) (optional): location to store whether the lookup failed + * because it was ambiguous, or %NULL. Set to %false on success. * - * This is similar to object_resolve_path. However, when looking for a + * This is similar to object_resolve_path(). However, when looking for a * partial path only matches that implement the given type are considered. * This restricts the search and avoids spuriously flagging matches as * ambiguous. @@ -2020,25 +2021,18 @@ int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), int object_child_foreach_recursive(Object *obj, int (*fn)(Object *child, void *opaque), void *opaque); -/** - * container_get: - * @root: root of the #path, e.g., object_get_root() - * @path: path to the container - * - * Return a container object whose path is @path. Create more containers - * along the path if necessary. - * - * Returns: the container object. - */ -Object *container_get(Object *root, const char *path); /** - * object_type_get_instance_size: - * @typename: Name of the Type whose instance_size is required + * object_property_add_new_container: + * @obj: the parent object + * @name: the name of the parent object's property to add + * + * Add a newly created container object to a parent object. * - * Returns the instance_size of the given @typename. + * Returns: the newly created container object. Its reference count is 1, + * and the reference is owned by the parent object. */ -size_t object_type_get_instance_size(const char *typename); +Object *object_property_add_new_container(Object *obj, const char *name); /** * object_property_help: diff --git a/include/semihosting/console.h b/include/semihosting/console.h index bd78e5f03fc..1c12e178ee3 100644 --- a/include/semihosting/console.h +++ b/include/semihosting/console.h @@ -9,8 +9,6 @@ #ifndef SEMIHOST_CONSOLE_H #define SEMIHOST_CONSOLE_H -#include "cpu.h" - /** * qemu_semihosting_console_read: * @cs: CPUState diff --git a/include/semihosting/semihost.h b/include/semihosting/semihost.h index 97d2a2ba996..b03e6375787 100644 --- a/include/semihosting/semihost.h +++ b/include/semihosting/semihost.h @@ -26,32 +26,6 @@ typedef enum SemihostingTarget { SEMIHOSTING_TARGET_GDB } SemihostingTarget; -#ifdef CONFIG_USER_ONLY -static inline bool semihosting_enabled(bool is_user) -{ - return true; -} - -static inline SemihostingTarget semihosting_get_target(void) -{ - return SEMIHOSTING_TARGET_AUTO; -} - -static inline const char *semihosting_get_arg(int i) -{ - return NULL; -} - -static inline int semihosting_get_argc(void) -{ - return 0; -} - -static inline const char *semihosting_get_cmdline(void) -{ - return NULL; -} -#else /* !CONFIG_USER_ONLY */ /** * semihosting_enabled: * @is_user: true if guest code is in usermode (i.e. not privileged) @@ -59,17 +33,18 @@ static inline const char *semihosting_get_cmdline(void) * Return true if guest code is allowed to make semihosting calls. */ bool semihosting_enabled(bool is_user); + SemihostingTarget semihosting_get_target(void); const char *semihosting_get_arg(int i); int semihosting_get_argc(void); const char *semihosting_get_cmdline(void); void semihosting_arg_fallback(const char *file, const char *cmd); + /* for vl.c hooks */ void qemu_semihosting_enable(void); int qemu_semihosting_config_options(const char *optstr); void qemu_semihosting_chardev_init(void); void qemu_semihosting_console_init(Chardev *); -#endif /* CONFIG_USER_ONLY */ void qemu_semihosting_guestfd_init(void); #endif /* SEMIHOST_H */ diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h index b5937c619a6..6627c45fb28 100644 --- a/include/semihosting/syscalls.h +++ b/include/semihosting/syscalls.h @@ -9,6 +9,7 @@ #ifndef SEMIHOSTING_SYSCALLS_H #define SEMIHOSTING_SYSCALLS_H +#include "exec/cpu-defs.h" #include "gdbstub/syscalls.h" /* diff --git a/include/semihosting/uaccess.h b/include/semihosting/uaccess.h index c2fa5a655de..2093a498277 100644 --- a/include/semihosting/uaccess.h +++ b/include/semihosting/uaccess.h @@ -15,52 +15,107 @@ #endif #include "exec/cpu-common.h" -#include "exec/cpu-defs.h" #include "exec/tswap.h" #include "exec/page-protection.h" +#include "exec/vaddr.h" +/** + * get_user_u64: + * + * Returns: 0 on success, -1 on error. + */ #define get_user_u64(val, addr) \ ({ uint64_t val_ = 0; \ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ &val_, sizeof(val_), 0); \ (val) = tswap64(val_); ret_; }) +/** + * get_user_u32: + * + * Returns: 0 on success, -1 on error. + */ #define get_user_u32(val, addr) \ ({ uint32_t val_ = 0; \ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ &val_, sizeof(val_), 0); \ (val) = tswap32(val_); ret_; }) +/** + * get_user_u8: + * + * Returns: 0 on success, -1 on error. + */ #define get_user_u8(val, addr) \ ({ uint8_t val_ = 0; \ int ret_ = cpu_memory_rw_debug(env_cpu(env), (addr), \ &val_, sizeof(val_), 0); \ (val) = val_; ret_; }) +/** + * get_user_ual: + * + * Returns: 0 on success, -1 on error. + */ #define get_user_ual(arg, p) get_user_u32(arg, p) +/** + * put_user_u64: + * + * Returns: 0 on success, -1 on error. + */ #define put_user_u64(val, addr) \ ({ uint64_t val_ = tswap64(val); \ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); }) +/** + * put_user_u32: + * + * Returns: 0 on success, -1 on error. + */ #define put_user_u32(val, addr) \ ({ uint32_t val_ = tswap32(val); \ cpu_memory_rw_debug(env_cpu(env), (addr), &val_, sizeof(val_), 1); }) +/** + * put_user_ual: + * + * Returns: 0 on success, -1 on error. + */ #define put_user_ual(arg, p) put_user_u32(arg, p) -void *uaccess_lock_user(CPUArchState *env, target_ulong addr, - target_ulong len, bool copy); +/** + * uaccess_lock_user: + * + * The returned pointer should be freed using uaccess_unlock_user(). + */ +void *uaccess_lock_user(CPUArchState *env, vaddr addr, + size_t len, bool copy); +/** + * lock_user: + * + * The returned pointer should be freed using unlock_user(). + */ #define lock_user(type, p, len, copy) uaccess_lock_user(env, p, len, copy) -char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr); +/** + * uaccess_lock_user_string: + * + * The returned string should be freed using uaccess_unlock_user(). + */ +char *uaccess_lock_user_string(CPUArchState *env, vaddr addr); +/** + * uaccess_lock_user_string: + * + * The returned string should be freed using unlock_user(). + */ #define lock_user_string(p) uaccess_lock_user_string(env, p) void uaccess_unlock_user(CPUArchState *env, void *p, - target_ulong addr, target_ulong len); + vaddr addr, size_t len); #define unlock_user(s, args, len) uaccess_unlock_user(env, s, args, len) -ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr); +ssize_t uaccess_strlen_user(CPUArchState *env, vaddr addr); #define target_strlen(p) uaccess_strlen_user(env, p) #endif /* SEMIHOSTING_SOFTMMU_UACCESS_H */ diff --git a/include/standard-headers/asm-x86/setup_data.h b/include/standard-headers/asm-x86/setup_data.h index 09355f54c55..2e446c1d858 100644 --- a/include/standard-headers/asm-x86/setup_data.h +++ b/include/standard-headers/asm-x86/setup_data.h @@ -13,12 +13,13 @@ #define SETUP_CC_BLOB 7 #define SETUP_IMA 8 #define SETUP_RNG_SEED 9 -#define SETUP_ENUM_MAX SETUP_RNG_SEED +#define SETUP_KEXEC_KHO 10 +#define SETUP_ENUM_MAX SETUP_KEXEC_KHO #define SETUP_INDIRECT (1<<31) #define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include "standard-headers/linux/types.h" @@ -78,6 +79,16 @@ struct ima_setup_data { uint64_t size; } QEMU_PACKED; -#endif /* __ASSEMBLY__ */ +/* + * Locations of kexec handover metadata + */ +struct kho_data { + uint64_t fdt_addr; + uint64_t fdt_size; + uint64_t scratch_addr; + uint64_t scratch_size; +} QEMU_PACKED; + +#endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_SETUP_DATA_H */ diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index b72917073d8..c8309d378bf 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -420,6 +420,8 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_ARM 0x08 #define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09 #define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a +#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b +#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c /* add more to the end as needed */ @@ -701,6 +703,31 @@ extern "C" { */ #define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15) +/* + * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression + * on integrated graphics + * + * The main surface is Tile 4 and at plane index 0. For semi-planar formats + * like NV12, the Y and UV planes are Tile 4 and are located at plane indices + * 0 and 1, respectively. The CCS for all planes are stored outside of the + * GEM object in a reserved memory area dedicated for the storage of the + * CCS data for all compressible GEM objects. + */ +#define I915_FORMAT_MOD_4_TILED_LNL_CCS fourcc_mod_code(INTEL, 16) + +/* + * Intel Color Control Surfaces (CCS) for graphics ver. 20 unified compression + * on discrete graphics + * + * The main surface is Tile 4 and at plane index 0. For semi-planar formats + * like NV12, the Y and UV planes are Tile 4 and are located at plane indices + * 0 and 1, respectively. The CCS for all planes are stored outside of the + * GEM object in a reserved memory area dedicated for the storage of the + * CCS data for all compressible GEM objects. The GEM object must be stored in + * contiguous memory with a size aligned to 64KB + */ +#define I915_FORMAT_MOD_4_TILED_BMG_CCS fourcc_mod_code(INTEL, 17) + /* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * @@ -1427,6 +1454,90 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) */ #define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0) +/* MediaTek modifiers + * Bits Parameter Notes + * ----- ------------------------ --------------------------------------------- + * 7: 0 TILE LAYOUT Values are MTK_FMT_MOD_TILE_* + * 15: 8 COMPRESSION Values are MTK_FMT_MOD_COMPRESS_* + * 23:16 10 BIT LAYOUT Values are MTK_FMT_MOD_10BIT_LAYOUT_* + * + */ + +#define DRM_FORMAT_MOD_MTK(__flags) fourcc_mod_code(MTK, __flags) + +/* + * MediaTek Tiled Modifier + * The lowest 8 bits of the modifier is used to specify the tiling + * layout. Only the 16L_32S tiling is used for now, but we define an + * "untiled" version and leave room for future expansion. + */ +#define MTK_FMT_MOD_TILE_MASK 0xf +#define MTK_FMT_MOD_TILE_NONE 0x0 +#define MTK_FMT_MOD_TILE_16L32S 0x1 + +/* + * Bits 8-15 specify compression options + */ +#define MTK_FMT_MOD_COMPRESS_MASK (0xf << 8) +#define MTK_FMT_MOD_COMPRESS_NONE (0x0 << 8) +#define MTK_FMT_MOD_COMPRESS_V1 (0x1 << 8) + +/* + * Bits 16-23 specify how the bits of 10 bit formats are + * stored out in memory + */ +#define MTK_FMT_MOD_10BIT_LAYOUT_MASK (0xf << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_PACKED (0x0 << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_LSBTILED (0x1 << 16) +#define MTK_FMT_MOD_10BIT_LAYOUT_LSBRASTER (0x2 << 16) + +/* alias for the most common tiling format */ +#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S) + +/* + * Apple GPU-tiled layouts. + * + * Apple GPUs support nonlinear tilings with optional lossless compression. + * + * GPU-tiled images are divided into 16KiB tiles: + * + * Bytes per pixel Tile size + * --------------- --------- + * 1 128x128 + * 2 128x64 + * 4 64x64 + * 8 64x32 + * 16 32x32 + * + * Tiles are raster-order. Pixels within a tile are interleaved (Morton order). + * + * Compressed images pad the body to 128-bytes and are immediately followed by a + * metadata section. The metadata section rounds the image dimensions to + * powers-of-two and contains 8 bytes for each 16x16 compression subtile. + * Subtiles are interleaved (Morton order). + * + * All images are 128-byte aligned. + * + * These layouts fundamentally do not have meaningful strides. No matter how we + * specify strides for these layouts, userspace unaware of Apple image layouts + * will be unable to use correctly the specified stride for any purpose. + * Userspace aware of the image layouts do not use strides. The most "correct" + * convention would be setting the image stride to 0. Unfortunately, some + * software assumes the stride is at least (width * bytes per pixel). We + * therefore require that stride equals (width * bytes per pixel). Since the + * stride is arbitrary here, we pick the simplest convention. + * + * Although containing two sections, compressed image layouts are treated in + * software as a single plane. This is modelled after AFBC, a similar + * scheme. Attempting to separate the sections to be "explicit" in DRM would + * only generate more confusion, as software does not treat the image this way. + * + * For detailed information on the hardware image layouts, see + * https://docs.mesa3d.org/drivers/asahi.html#image-layouts + */ +#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1) +#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2) + /* * AMD modifiers * @@ -1475,6 +1586,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) #define AMD_FMT_MOD_TILE_VER_GFX10 2 #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 #define AMD_FMT_MOD_TILE_VER_GFX11 4 +#define AMD_FMT_MOD_TILE_VER_GFX12 5 /* * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical @@ -1485,13 +1597,31 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) /* * 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has * GFX9 as canonical version. + * + * 64K_D_2D on GFX12 is identical to 64K_D on GFX11. */ #define AMD_FMT_MOD_TILE_GFX9_64K_D 10 +#define AMD_FMT_MOD_TILE_GFX9_4K_D_X 22 #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 #define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26 #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 +/* Gfx12 swizzle modes: + * 0 - LINEAR + * 1 - 256B_2D - 2D block dimensions + * 2 - 4KB_2D + * 3 - 64KB_2D + * 4 - 256KB_2D + * 5 - 4KB_3D - 3D block dimensions + * 6 - 64KB_3D + * 7 - 256KB_3D + */ +#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1 +#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2 +#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3 +#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4 + #define AMD_FMT_MOD_DCC_BLOCK_64B 0 #define AMD_FMT_MOD_DCC_BLOCK_128B 1 #define AMD_FMT_MOD_DCC_BLOCK_256B 2 diff --git a/include/standard-headers/linux/const.h b/include/standard-headers/linux/const.h index 1eb84b5087f..95ede233420 100644 --- a/include/standard-headers/linux/const.h +++ b/include/standard-headers/linux/const.h @@ -28,6 +28,23 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#if !defined(__ASSEMBLY__) +/* + * Missing __asm__ support + * + * __BIT128() would not work in the __asm__ code, as it shifts an + * 'unsigned __int128' data type as direct representation of + * 128 bit constants is not supported in the gcc compiler, as + * they get silently truncated. + * + * TODO: Please revisit this implementation when gcc compiler + * starts representing 128 bit constants directly like long + * and unsigned long etc. Subsequently drop the comment for + * GENMASK_U128() which would then start supporting __asm__ code. + */ +#define _BIT128(x) ((unsigned __int128)(1) << (x)) +#endif + #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h index b0b4b68410f..cef0d207a62 100644 --- a/include/standard-headers/linux/ethtool.h +++ b/include/standard-headers/linux/ethtool.h @@ -681,6 +681,8 @@ enum ethtool_link_ext_substate_module { * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics * @ETH_SS_STATS_RMON: names of RMON statistics + * @ETH_SS_STATS_PHY: names of PHY(dev) statistics + * @ETH_SS_TS_FLAGS: hardware timestamping flags * * @ETH_SS_COUNT: number of defined string sets */ @@ -706,6 +708,8 @@ enum ethtool_stringset { ETH_SS_STATS_ETH_MAC, ETH_SS_STATS_ETH_CTRL, ETH_SS_STATS_RMON, + ETH_SS_STATS_PHY, + ETH_SS_TS_FLAGS, /* add new constants above here */ ETH_SS_COUNT @@ -752,6 +756,197 @@ enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_HIGH, }; +/** + * enum ethtool_c33_pse_ext_state - groups of PSE extended states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION: Group of error_condition states + * @ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID: Group of mr_mps_valid states + * @ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE: Group of mr_pse_enable states + * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED: Group of option_detect_ted + * states + * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM: Group of option_vport_lim states + * @ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED: Group of ovld_detected states + * @ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE: Group of pd_dll_power_type + * states + * @ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE: Group of power_not_available + * states + * @ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED: Group of short_detected states + */ +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED, +}; + +/** + * enum ethtool_c33_pse_ext_substate_mr_mps_valid - mr_mps_valid states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD: Underload + * state + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN: Port is not + * connected + * + * The PSE monitors either the DC or AC Maintain Power Signature + * (MPS, see 33.2.9.1). This variable indicates the presence or absence of + * a valid MPS. + */ +enum ethtool_c33_pse_ext_substate_mr_mps_valid { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN, +}; + +/** + * enum ethtool_c33_pse_ext_substate_error_condition - error_condition states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT: Non-existing + * port number + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT: Undefined port + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT: Internal + * hardware fault + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON: + * Communication error after force on + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS: Unknown + * port status + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF: Host + * crash turn off + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN: + * Host crash force shutdown + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE: Configuration + * change + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP: Over + * temperature detected + * + * error_condition is a variable indicating the status of + * implementation-specific fault conditions or optionally other system faults + * that prevent the PSE from meeting the specifications in Table 33–11 and that + * require the PSE not to source power. These error conditions are different + * from those monitored by the state diagrams in Figure 33–10. + */ +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP, +}; + +/** + * enum ethtool_c33_pse_ext_substate_mr_pse_enable - mr_pse_enable states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE: Disable + * pin active + * + * mr_pse_enable is control variable that selects PSE operation and test + * functions. + */ +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +/** + * enum ethtool_c33_pse_ext_substate_option_detect_ted - option_detect_ted + * states functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS: Detection + * in process + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR: + * Connection check error + * + * option_detect_ted is a variable indicating if detection can be performed + * by the PSE during the ted_timer interval. + */ +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR, +}; + +/** + * enum ethtool_c33_pse_ext_substate_option_vport_lim - option_vport_lim states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE: Main supply + * voltage is high + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE: Main supply + * voltage is low + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION: Voltage + * injection into the port + * + * option_vport_lim is an optional variable indicates if VPSE is out of the + * operating range during normal operating state. + */ +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION, +}; + +/** + * enum ethtool_c33_pse_ext_substate_ovld_detected - ovld_detected states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD: Overload state + * + * ovld_detected is a variable indicating if the PSE output current has been + * in an overload condition (see 33.2.7.6) for at least TCUT of a one-second + * sliding time. + */ +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +/** + * enum ethtool_c33_pse_ext_substate_power_not_available - power_not_available + * states functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED: Power + * budget exceeded for the controller + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET: + * Configured port power limit exceeded controller power budget + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT: + * Power request from PD exceeds port limit + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT: Power + * denied due to Hardware power limit + * + * power_not_available is a variable that is asserted in an + * implementation-dependent manner when the PSE is no longer capable of + * sourcing sufficient power to support the attached PD. Sufficient power + * is defined by classification; see 33.2.6. + */ +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT, +}; + +/** + * enum ethtool_c33_pse_ext_substate_short_detected - short_detected states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION: Short + * condition was detected + * + * short_detected is a variable indicating if the PSE output current has been + * in a short circuit condition for TLIM within a sliding window (see 33.2.7.7). + */ +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + /** * enum ethtool_pse_types - Types of PSE controller. * @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown @@ -877,6 +1072,24 @@ enum ethtool_mm_verify_status { ETHTOOL_MM_VERIFY_STATUS_DISABLED, }; +/** + * enum ethtool_module_fw_flash_status - plug-in module firmware flashing status + * @ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED: The firmware flashing process has + * started. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS: The firmware flashing process + * is in progress. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED: The firmware flashing process was + * completed successfully. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR: The firmware flashing process was + * stopped due to an error. + */ +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR, +}; + /** * struct ethtool_gstrings - string set for data tagging * @cmd: Command number = %ETHTOOL_GSTRINGS @@ -1845,6 +2058,25 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + ETHTOOL_LINK_MODE_200000baseCR_Full_BIT = 103, + ETHTOOL_LINK_MODE_200000baseKR_Full_BIT = 104, + ETHTOOL_LINK_MODE_200000baseDR_Full_BIT = 105, + ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT = 106, + ETHTOOL_LINK_MODE_200000baseSR_Full_BIT = 107, + ETHTOOL_LINK_MODE_200000baseVR_Full_BIT = 108, + ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT = 109, + ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT = 110, + ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT = 111, + ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT = 112, + ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT = 113, + ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT = 114, + ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT = 115, + ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT = 116, + ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT = 117, + ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT = 118, + ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT = 119, + ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT = 120, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS @@ -2057,73 +2289,81 @@ static inline int ethtool_validate_duplex(uint8_t duplex) * be exploited to reduce the RSS queue spread. */ #define RXH_XFRM_SYM_XOR (1 << 0) +/* Similar to SYM_XOR, except that one copy of the XOR'ed fields is replaced by + * an OR of the same fields + */ +#define RXH_XFRM_SYM_OR_XOR (1 << 1) #define RXH_XFRM_NO_CHANGE 0xff -/* L2-L4 network traffic flow types */ -#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ -#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ -#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ -#define AH_ESP_V4_FLOW 0x04 /* hash only */ -#define TCP_V6_FLOW 0x05 /* hash or spec (tcp_ip6_spec; nfc only) */ -#define UDP_V6_FLOW 0x06 /* hash or spec (udp_ip6_spec; nfc only) */ -#define SCTP_V6_FLOW 0x07 /* hash or spec (sctp_ip6_spec; nfc only) */ -#define AH_ESP_V6_FLOW 0x08 /* hash only */ -#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ -#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ -#define AH_V6_FLOW 0x0b /* hash or spec (ah_ip6_spec; nfc only) */ -#define ESP_V6_FLOW 0x0c /* hash or spec (esp_ip6_spec; nfc only) */ -#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ -#define IP_USER_FLOW IPV4_USER_FLOW -#define IPV6_USER_FLOW 0x0e /* spec only (usr_ip6_spec; nfc only) */ -#define IPV4_FLOW 0x10 /* hash only */ -#define IPV6_FLOW 0x11 /* hash only */ -#define ETHER_FLOW 0x12 /* spec only (ether_spec) */ - -/* Used for GTP-U IPv4 and IPv6. - * The format of GTP packets only includes - * elements such as TEID and GTP version. - * It is primarily intended for data communication of the UE. - */ -#define GTPU_V4_FLOW 0x13 /* hash only */ -#define GTPU_V6_FLOW 0x14 /* hash only */ - -/* Use for GTP-C IPv4 and v6. - * The format of these GTP packets does not include TEID. - * Primarily expected to be used for communication - * to create sessions for UE data communication, - * commonly referred to as CSR (Create Session Request). - */ -#define GTPC_V4_FLOW 0x15 /* hash only */ -#define GTPC_V6_FLOW 0x16 /* hash only */ - -/* Use for GTP-C IPv4 and v6. - * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID. - * After session creation, it becomes this packet. - * This is mainly used for requests to realize UE handover. - */ -#define GTPC_TEID_V4_FLOW 0x17 /* hash only */ -#define GTPC_TEID_V6_FLOW 0x18 /* hash only */ - -/* Use for GTP-U and extended headers for the PSC (PDU Session Container). - * The format of these GTP packets includes TEID and QFI. - * In 5G communication using UPF (User Plane Function), - * data communication with this extended header is performed. - */ -#define GTPU_EH_V4_FLOW 0x19 /* hash only */ -#define GTPU_EH_V6_FLOW 0x1a /* hash only */ - -/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers. - * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by - * UL/DL included in the PSC. - * There are differences in the data included based on Downlink/Uplink, - * and can be used to distinguish packets. - * The functions described so far are useful when you want to - * handle communication from the mobile network in UPF, PGW, etc. - */ -#define GTPU_UL_V4_FLOW 0x1b /* hash only */ -#define GTPU_UL_V6_FLOW 0x1c /* hash only */ -#define GTPU_DL_V4_FLOW 0x1d /* hash only */ -#define GTPU_DL_V6_FLOW 0x1e /* hash only */ +enum { + /* L2-L4 network traffic flow types */ + TCP_V4_FLOW = 0x01, /* hash or spec (tcp_ip4_spec) */ + UDP_V4_FLOW = 0x02, /* hash or spec (udp_ip4_spec) */ + SCTP_V4_FLOW = 0x03, /* hash or spec (sctp_ip4_spec) */ + AH_ESP_V4_FLOW = 0x04, /* hash only */ + TCP_V6_FLOW = 0x05, /* hash or spec (tcp_ip6_spec; nfc only) */ + UDP_V6_FLOW = 0x06, /* hash or spec (udp_ip6_spec; nfc only) */ + SCTP_V6_FLOW = 0x07, /* hash or spec (sctp_ip6_spec; nfc only) */ + AH_ESP_V6_FLOW = 0x08, /* hash only */ + AH_V4_FLOW = 0x09, /* hash or spec (ah_ip4_spec) */ + ESP_V4_FLOW = 0x0a, /* hash or spec (esp_ip4_spec) */ + AH_V6_FLOW = 0x0b, /* hash or spec (ah_ip6_spec; nfc only) */ + ESP_V6_FLOW = 0x0c, /* hash or spec (esp_ip6_spec; nfc only) */ + IPV4_USER_FLOW = 0x0d, /* spec only (usr_ip4_spec) */ + IP_USER_FLOW = IPV4_USER_FLOW, + IPV6_USER_FLOW = 0x0e, /* spec only (usr_ip6_spec; nfc only) */ + IPV4_FLOW = 0x10, /* hash only */ + IPV6_FLOW = 0x11, /* hash only */ + ETHER_FLOW = 0x12, /* spec only (ether_spec) */ + + /* Used for GTP-U IPv4 and IPv6. + * The format of GTP packets only includes + * elements such as TEID and GTP version. + * It is primarily intended for data communication of the UE. + */ + GTPU_V4_FLOW = 0x13, /* hash only */ + GTPU_V6_FLOW = 0x14, /* hash only */ + + /* Use for GTP-C IPv4 and v6. + * The format of these GTP packets does not include TEID. + * Primarily expected to be used for communication + * to create sessions for UE data communication, + * commonly referred to as CSR (Create Session Request). + */ + GTPC_V4_FLOW = 0x15, /* hash only */ + GTPC_V6_FLOW = 0x16, /* hash only */ + + /* Use for GTP-C IPv4 and v6. + * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID. + * After session creation, it becomes this packet. + * This is mainly used for requests to realize UE handover. + */ + GTPC_TEID_V4_FLOW = 0x17, /* hash only */ + GTPC_TEID_V6_FLOW = 0x18, /* hash only */ + + /* Use for GTP-U and extended headers for the PSC (PDU Session Container). + * The format of these GTP packets includes TEID and QFI. + * In 5G communication using UPF (User Plane Function), + * data communication with this extended header is performed. + */ + GTPU_EH_V4_FLOW = 0x19, /* hash only */ + GTPU_EH_V6_FLOW = 0x1a, /* hash only */ + + /* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers. + * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by + * UL/DL included in the PSC. + * There are differences in the data included based on Downlink/Uplink, + * and can be used to distinguish packets. + * The functions described so far are useful when you want to + * handle communication from the mobile network in UPF, PGW, etc. + */ + GTPU_UL_V4_FLOW = 0x1b, /* hash only */ + GTPU_UL_V6_FLOW = 0x1c, /* hash only */ + GTPU_DL_V4_FLOW = 0x1d, /* hash only */ + GTPU_DL_V6_FLOW = 0x1e, /* hash only */ + + __FLOW_TYPE_COUNT, +}; /* Flag to enable additional fields in struct ethtool_rx_flow_spec */ #define FLOW_EXT 0x80000000 @@ -2316,6 +2556,11 @@ struct ethtool_link_settings { uint8_t master_slave_state; uint8_t rate_matching; uint32_t reserved[7]; + /* Linux builds with -Wflex-array-member-not-at-end but does + * not use the "link_mode_masks" member. Leave it defined for + * userspace for now, and when userspace wants to start using + * -Wfamnae, we'll need a new solution. + */ uint32_t link_mode_masks[]; /* layout of link_mode_masks fields: * uint32_t map_supported[link_mode_masks_nwords]; @@ -2323,4 +2568,20 @@ struct ethtool_link_settings { * uint32_t map_lp_advertising[link_mode_masks_nwords]; */ }; + +/** + * enum phy_upstream - Represents the upstream component a given PHY device + * is connected to, as in what is on the other end of the MII bus. Most PHYs + * will be attached to an Ethernet MAC controller, but in some cases, there's + * an intermediate PHY used as a media-converter, which will driver another + * MII interface as its output. + * @PHY_UPSTREAM_MAC: Upstream component is a MAC (a switch port, + * or ethernet controller) + * @PHY_UPSTREAM_PHY: Upstream component is a PHY (likely a media converter) + */ +enum phy_upstream { + PHY_UPSTREAM_MAC, + PHY_UPSTREAM_PHY, +}; + #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index bac9dbc49f8..d8b2fd67e16 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -217,6 +217,24 @@ * - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag * - add FUSE_NO_EXPORT_SUPPORT init flag * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag + * + * 7.41 + * - add FUSE_ALLOW_IDMAP + * 7.42 + * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data + * structures: + * - struct fuse_uring_ent_in_out + * - struct fuse_uring_req_header + * - struct fuse_uring_cmd_req + * - FUSE_URING_IN_OUT_HEADER_SZ + * - FUSE_URING_OP_IN_OUT_SZ + * - enum fuse_uring_cmd + * + * 7.43 + * - add FUSE_REQUEST_TIMEOUT + * + * 7.44 + * - add FUSE_NOTIFY_INC_EPOCH */ #ifndef _LINUX_FUSE_H @@ -248,7 +266,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 40 +#define FUSE_KERNEL_MINOR_VERSION 44 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -417,6 +435,10 @@ struct fuse_file_lock { * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit * of the request ID indicates resend requests + * FUSE_ALLOW_IDMAP: allow creation of idmapped mounts + * FUSE_OVER_IO_URING: Indicate that client supports io-uring + * FUSE_REQUEST_TIMEOUT: kernel supports timing out requests. + * init_out.request_timeout contains the timeout (in secs) */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -459,9 +481,11 @@ struct fuse_file_lock { #define FUSE_PASSTHROUGH (1ULL << 37) #define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) #define FUSE_HAS_RESEND (1ULL << 39) - /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP +#define FUSE_ALLOW_IDMAP (1ULL << 40) +#define FUSE_OVER_IO_URING (1ULL << 41) +#define FUSE_REQUEST_TIMEOUT (1ULL << 42) /** * CUSE INIT request/reply flags @@ -646,6 +670,7 @@ enum fuse_notify_code { FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_INC_EPOCH = 8, FUSE_NOTIFY_CODE_MAX, }; @@ -889,7 +914,8 @@ struct fuse_init_out { uint16_t map_alignment; uint32_t flags2; uint32_t max_stack_depth; - uint32_t unused[6]; + uint16_t request_timeout; + uint16_t unused[11]; }; #define CUSE_INIT_INFO_MAX 4096 @@ -980,6 +1006,21 @@ struct fuse_fallocate_in { */ #define FUSE_UNIQUE_RESEND (1ULL << 63) +/** + * This value will be set by the kernel to + * (struct fuse_in_header).{uid,gid} fields in + * case when: + * - fuse daemon enabled FUSE_ALLOW_IDMAP + * - idmapping information is not available and uid/gid + * can not be mapped in accordance with an idmapping. + * + * Note: an idmapping information always available + * for inode creation operations like: + * FUSE_MKNOD, FUSE_SYMLINK, FUSE_MKDIR, FUSE_TMPFILE, + * FUSE_CREATE and FUSE_RENAME2 (with RENAME_WHITEOUT). + */ +#define FUSE_INVALID_UIDGID ((uint32_t)(-1)) + struct fuse_in_header { uint32_t len; uint32_t opcode; @@ -1182,4 +1223,67 @@ struct fuse_supp_groups { uint32_t groups[]; }; +/** + * Size of the ring buffer header + */ +#define FUSE_URING_IN_OUT_HEADER_SZ 128 +#define FUSE_URING_OP_IN_OUT_SZ 128 + +/* Used as part of the fuse_uring_req_header */ +struct fuse_uring_ent_in_out { + uint64_t flags; + + /* + * commit ID to be used in a reply to a ring request (see also + * struct fuse_uring_cmd_req) + */ + uint64_t commit_id; + + /* size of user payload buffer */ + uint32_t payload_sz; + uint32_t padding; + + uint64_t reserved; +}; + +/** + * Header for all fuse-io-uring requests + */ +struct fuse_uring_req_header { + /* struct fuse_in_header / struct fuse_out_header */ + char in_out[FUSE_URING_IN_OUT_HEADER_SZ]; + + /* per op code header */ + char op_in[FUSE_URING_OP_IN_OUT_SZ]; + + struct fuse_uring_ent_in_out ring_ent_in_out; +}; + +/** + * sqe commands to the kernel + */ +enum fuse_uring_cmd { + FUSE_IO_URING_CMD_INVALID = 0, + + /* register the request buffer and fetch a fuse request */ + FUSE_IO_URING_CMD_REGISTER = 1, + + /* commit fuse request result and fetch next request */ + FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2, +}; + +/** + * In the 80B command area of the SQE. + */ +struct fuse_uring_cmd_req { + uint64_t flags; + + /* entry identifier for commits */ + uint64_t commit_id; + + /* queue the command is for (queue index) */ + uint16_t qid; + uint8_t padding[6]; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h index 2221b0c3834..a82ff795e06 100644 --- a/include/standard-headers/linux/input-event-codes.h +++ b/include/standard-headers/linux/input-event-codes.h @@ -519,6 +519,7 @@ #define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */ #define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */ #define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */ +#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */ #define KEY_DEL_EOL 0x1c0 #define KEY_DEL_EOS 0x1c1 @@ -618,6 +619,8 @@ #define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */ #define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */ #define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */ +#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */ +#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ @@ -922,7 +925,8 @@ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ #define SW_MACHINE_COVER 0x10 /* set = cover closed */ -#define SW_MAX_ 0x10 +#define SW_USB_INSERT 0x11 /* set = USB audio device connected */ +#define SW_MAX_ 0x11 #define SW_CNT (SW_MAX_+1) /* diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index 94c00996e63..a3a3e942ded 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -340,7 +340,8 @@ #define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */ #define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */ #define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */ -#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 +#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 /* Mask Bit */ +#define PCI_MSIX_ENTRY_CTRL_ST 0xffff0000 /* Steering Tag */ /* CompactPCI Hotswap Register */ @@ -485,6 +486,7 @@ #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ +#define PCI_EXP_FLAGS_FLIT 0x8000 /* Flit Mode Supported */ #define PCI_EXP_DEVCAP 0x04 /* Device capabilities */ #define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */ #define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */ @@ -532,7 +534,7 @@ #define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */ #define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */ -#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ +#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ @@ -634,9 +636,11 @@ #define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */ #define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */ #define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */ -#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */ +#define PCI_EXP_RTCTL_RRS_SVE 0x0010 /* Config RRS Software Visibility Enable */ +#define PCI_EXP_RTCTL_CRSSVE PCI_EXP_RTCTL_RRS_SVE /* compatibility */ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ -#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ +#define PCI_EXP_RTCAP_RRS_SV 0x0001 /* Config RRS Software Visibility */ +#define PCI_EXP_RTCAP_CRSVIS PCI_EXP_RTCAP_RRS_SV /* compatibility */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ #define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ @@ -657,10 +661,12 @@ #define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */ #define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ +#define PCI_EXP_DEVCAP2_TPH_COMP_MASK 0x00003000 /* TPH completer support */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ +#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX 0x00c00000 /* Max End-End TLP Prefixes */ #define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ @@ -676,6 +682,7 @@ #define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */ #define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */ +#define PCI_EXP_LNKCAP2_SLS 0x000000fe /* Supported Link Speeds Vector */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ @@ -740,9 +747,11 @@ #define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ +#define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */ #define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */ #define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE +#define PCI_EXT_CAP_ID_PL_64GT 0x31 /* Physical Layer 64.0 GT/s */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_64GT #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -783,10 +792,13 @@ /* Same bits as above */ #define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/ #define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */ -#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ -#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ -#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ -#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ +#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */ +#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */ +#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */ +#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ +#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */ +#define PCI_ERR_CAP_TLP_LOG_FLIT 0x00040000 /* TLP was logged in Flit Mode */ +#define PCI_ERR_CAP_TLP_LOG_SIZE 0x00f80000 /* Logged TLP Size (only in Flit mode) */ #define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */ #define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */ #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ @@ -802,6 +814,7 @@ #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ #define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */ +#define PCI_ERR_PREFIX_LOG 0x38 /* TLP Prefix LOG Register (up to 16 bytes) */ /* Virtual Channel */ #define PCI_VC_PORT_CAP1 0x04 @@ -995,9 +1008,6 @@ #define PCI_ACS_CTRL 0x06 /* ACS Control Register */ #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ -#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */ -#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */ - /* SATA capability */ #define PCI_SATA_REGS 4 /* SATA REGs specifier */ #define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */ @@ -1007,7 +1017,7 @@ /* Resizable BARs */ #define PCI_REBAR_CAP 4 /* capability register */ -#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */ +#define PCI_REBAR_CAP_SIZES 0xFFFFFFF0 /* supported BAR sizes */ #define PCI_REBAR_CTRL 8 /* control register */ #define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */ #define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */ @@ -1020,15 +1030,34 @@ #define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */ #define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */ +/* TPH Completer Support */ +#define PCI_EXP_DEVCAP2_TPH_COMP_NONE 0x0 /* None */ +#define PCI_EXP_DEVCAP2_TPH_COMP_TPH_ONLY 0x1 /* TPH only */ +#define PCI_EXP_DEVCAP2_TPH_COMP_EXT_TPH 0x3 /* TPH and Extended TPH */ + /* TPH Requester */ #define PCI_TPH_CAP 4 /* capability register */ -#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */ -#define PCI_TPH_LOC_NONE 0x000 /* no location */ -#define PCI_TPH_LOC_CAP 0x200 /* in capability */ -#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */ -#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */ -#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */ -#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */ +#define PCI_TPH_CAP_ST_NS 0x00000001 /* No ST Mode Supported */ +#define PCI_TPH_CAP_ST_IV 0x00000002 /* Interrupt Vector Mode Supported */ +#define PCI_TPH_CAP_ST_DS 0x00000004 /* Device Specific Mode Supported */ +#define PCI_TPH_CAP_EXT_TPH 0x00000100 /* Ext TPH Requester Supported */ +#define PCI_TPH_CAP_LOC_MASK 0x00000600 /* ST Table Location */ +#define PCI_TPH_LOC_NONE 0x00000000 /* Not present */ +#define PCI_TPH_LOC_CAP 0x00000200 /* In capability */ +#define PCI_TPH_LOC_MSIX 0x00000400 /* In MSI-X */ +#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST Table Size */ +#define PCI_TPH_CAP_ST_SHIFT 16 /* ST Table Size shift */ +#define PCI_TPH_BASE_SIZEOF 0xc /* Size with no ST table */ + +#define PCI_TPH_CTRL 8 /* control register */ +#define PCI_TPH_CTRL_MODE_SEL_MASK 0x00000007 /* ST Mode Select */ +#define PCI_TPH_ST_NS_MODE 0x0 /* No ST Mode */ +#define PCI_TPH_ST_IV_MODE 0x1 /* Interrupt Vector Mode */ +#define PCI_TPH_ST_DS_MODE 0x2 /* Device Specific Mode */ +#define PCI_TPH_CTRL_REQ_EN_MASK 0x00000300 /* TPH Requester Enable */ +#define PCI_TPH_REQ_DISABLE 0x0 /* No TPH requests allowed */ +#define PCI_TPH_REQ_TPH_ONLY 0x1 /* TPH only requests allowed */ +#define PCI_TPH_REQ_EXT_TPH 0x3 /* Extended TPH requests allowed */ /* Downstream Port Containment */ #define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */ @@ -1036,8 +1065,9 @@ #define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */ #define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */ #define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */ -#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */ +#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size [3:0] */ #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ +#define PCI_EXP_DPC_RP_PIO_LOG_SIZE4 0x2000 /* RP PIO Log Size [4] */ #define PCI_EXP_DPC_CTL 0x06 /* DPC control */ #define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */ @@ -1115,12 +1145,55 @@ #define PCI_DLF_CAP 0x04 /* Capabilities Register */ #define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ +/* Secondary PCIe Capability 8.0 GT/s */ +#define PCI_SECPCI_LE_CTRL 0x0c /* Lane Equalization Control Register */ + /* Physical Layer 16.0 GT/s */ #define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ #define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 +/* Physical Layer 32.0 GT/s */ +#define PCI_PL_32GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ + +/* Physical Layer 64.0 GT/s */ +#define PCI_PL_64GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ + +/* Native PCIe Enclosure Management */ +#define PCI_NPEM_CAP 0x04 /* NPEM capability register */ +#define PCI_NPEM_CAP_CAPABLE 0x00000001 /* NPEM Capable */ + +#define PCI_NPEM_CTRL 0x08 /* NPEM control register */ +#define PCI_NPEM_CTRL_ENABLE 0x00000001 /* NPEM Enable */ + +/* + * Native PCIe Enclosure Management indication bits and Reset command bit + * are corresponding for capability and control registers. + */ +#define PCI_NPEM_CMD_RESET 0x00000002 /* Reset Command */ +#define PCI_NPEM_IND_OK 0x00000004 /* OK */ +#define PCI_NPEM_IND_LOCATE 0x00000008 /* Locate */ +#define PCI_NPEM_IND_FAIL 0x00000010 /* Fail */ +#define PCI_NPEM_IND_REBUILD 0x00000020 /* Rebuild */ +#define PCI_NPEM_IND_PFA 0x00000040 /* Predicted Failure Analysis */ +#define PCI_NPEM_IND_HOTSPARE 0x00000080 /* Hot Spare */ +#define PCI_NPEM_IND_ICA 0x00000100 /* In Critical Array */ +#define PCI_NPEM_IND_IFA 0x00000200 /* In Failed Array */ +#define PCI_NPEM_IND_IDT 0x00000400 /* Device Type */ +#define PCI_NPEM_IND_DISABLED 0x00000800 /* Disabled */ +#define PCI_NPEM_IND_SPEC_0 0x01000000 +#define PCI_NPEM_IND_SPEC_1 0x02000000 +#define PCI_NPEM_IND_SPEC_2 0x04000000 +#define PCI_NPEM_IND_SPEC_3 0x08000000 +#define PCI_NPEM_IND_SPEC_4 0x10000000 +#define PCI_NPEM_IND_SPEC_5 0x20000000 +#define PCI_NPEM_IND_SPEC_6 0x40000000 +#define PCI_NPEM_IND_SPEC_7 0x80000000 + +#define PCI_NPEM_STATUS 0x0c /* NPEM status register */ +#define PCI_NPEM_STATUS_CC 0x00000001 /* Command Completed */ + /* Data Object Exchange */ #define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */ #define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */ @@ -1146,9 +1219,12 @@ #define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff #define PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER 0x0000ff00 #define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff -#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000 +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE 0x00ff0000 #define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000 +/* Deprecated old name, replaced with PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE */ +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE + /* Compute Express Link (CXL r3.1, sec 8.1.5) */ #define PCI_DVSEC_CXL_PORT 3 #define PCI_DVSEC_CXL_PORT_CTL 0x0c diff --git a/include/standard-headers/linux/virtio_balloon.h b/include/standard-headers/linux/virtio_balloon.h index f343bfefd82..3121cd2e0e1 100644 --- a/include/standard-headers/linux/virtio_balloon.h +++ b/include/standard-headers/linux/virtio_balloon.h @@ -71,7 +71,13 @@ struct virtio_balloon_config { #define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */ #define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */ #define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ -#define VIRTIO_BALLOON_S_NR 10 +#define VIRTIO_BALLOON_S_OOM_KILL 10 /* OOM killer invocations */ +#define VIRTIO_BALLOON_S_ALLOC_STALL 11 /* Stall count of memory allocatoin */ +#define VIRTIO_BALLOON_S_ASYNC_SCAN 12 /* Amount of memory scanned asynchronously */ +#define VIRTIO_BALLOON_S_DIRECT_SCAN 13 /* Amount of memory scanned directly */ +#define VIRTIO_BALLOON_S_ASYNC_RECLAIM 14 /* Amount of memory reclaimed asynchronously */ +#define VIRTIO_BALLOON_S_DIRECT_RECLAIM 15 /* Amount of memory reclaimed directly */ +#define VIRTIO_BALLOON_S_NR 16 #define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \ VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \ @@ -83,7 +89,13 @@ struct virtio_balloon_config { VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \ VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \ VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \ - VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \ + VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures", \ + VIRTIO_BALLOON_S_NAMES_prefix "oom-kills", \ + VIRTIO_BALLOON_S_NAMES_prefix "alloc-stalls", \ + VIRTIO_BALLOON_S_NAMES_prefix "async-scans", \ + VIRTIO_BALLOON_S_NAMES_prefix "direct-scans", \ + VIRTIO_BALLOON_S_NAMES_prefix "async-reclaims", \ + VIRTIO_BALLOON_S_NAMES_prefix "direct-reclaims" \ } #define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("") diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h index 68066dafb6d..4d350ae5957 100644 --- a/include/standard-headers/linux/virtio_crypto.h +++ b/include/standard-headers/linux/virtio_crypto.h @@ -329,6 +329,7 @@ struct virtio_crypto_op_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00) #define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01) + /* akcipher sign/verify opcodes are deprecated */ #define VIRTIO_CRYPTO_AKCIPHER_SIGN \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02) #define VIRTIO_CRYPTO_AKCIPHER_VERIFY \ diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h index 2db643ed8fb..00cd3f04af9 100644 --- a/include/standard-headers/linux/virtio_gpu.h +++ b/include/standard-headers/linux/virtio_gpu.h @@ -309,8 +309,10 @@ struct virtio_gpu_cmd_submit { #define VIRTIO_GPU_CAPSET_VIRGL 1 #define VIRTIO_GPU_CAPSET_VIRGL2 2 -/* 3 is reserved for gfxstream */ +#define VIRTIO_GPU_CAPSET_GFXSTREAM_VULKAN 3 #define VIRTIO_GPU_CAPSET_VENUS 4 +#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5 +#define VIRTIO_GPU_CAPSET_DRM 6 /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ struct virtio_gpu_get_capset_info { diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h index fc594fe5fcb..982e854f14e 100644 --- a/include/standard-headers/linux/virtio_net.h +++ b/include/standard-headers/linux/virtio_net.h @@ -327,6 +327,19 @@ struct virtio_net_rss_config { uint8_t hash_key_data[/* hash_key_length */]; }; +struct virtio_net_rss_config_hdr { + uint32_t hash_types; + uint16_t indirection_table_mask; + uint16_t unclassified_queue; + uint16_t indirection_table[/* 1 + indirection_table_mask */]; +}; + +struct virtio_net_rss_config_trailer { + uint16_t max_tx_vq; + uint8_t hash_key_length; + uint8_t hash_key_data[/* hash_key_length */]; +}; + #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG 1 /* diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h index 4010216103e..09e964e6eee 100644 --- a/include/standard-headers/linux/virtio_pci.h +++ b/include/standard-headers/linux/virtio_pci.h @@ -40,6 +40,7 @@ #define _LINUX_VIRTIO_PCI_H #include "standard-headers/linux/types.h" +#include "standard-headers/linux/kernel.h" #ifndef VIRTIO_PCI_NO_LEGACY @@ -115,6 +116,8 @@ #define VIRTIO_PCI_CAP_PCI_CFG 5 /* Additional shared memory capability */ #define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8 +/* PCI vendor data configuration */ +#define VIRTIO_PCI_CAP_VENDOR_CFG 9 /* This is the PCI capability header: */ struct virtio_pci_cap { @@ -129,6 +132,18 @@ struct virtio_pci_cap { uint32_t length; /* Length of the structure, in bytes. */ }; +/* This is the PCI vendor data capability header: */ +struct virtio_pci_vndr_data { + uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + uint8_t cap_next; /* Generic PCI field: next ptr. */ + uint8_t cap_len; /* Generic PCI field: capability length */ + uint8_t cfg_type; /* Identifies the structure. */ + uint16_t vendor_id; /* Identifies the vendor-specific format. */ + /* For Vendor Definition */ + /* Pads structure to a multiple of 4 bytes */ + /* Reads must not have side effects */ +}; + struct virtio_pci_cap64 { struct virtio_pci_cap cap; uint32_t offset_hi; /* Most sig 32 bits of offset */ @@ -231,6 +246,7 @@ struct virtio_pci_cfg_cap { #define VIRTIO_ADMIN_CMD_LIST_USE 0x1 /* Admin command group type. */ +#define VIRTIO_ADMIN_GROUP_TYPE_SELF 0x0 #define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1 /* Transitional device admin command. */ @@ -240,6 +256,17 @@ struct virtio_pci_cfg_cap { #define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 #define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 +/* Device parts access commands. */ +#define VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY 0x7 +#define VIRTIO_ADMIN_CMD_DEVICE_CAP_GET 0x8 +#define VIRTIO_ADMIN_CMD_DRIVER_CAP_SET 0x9 +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE 0xa +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY 0xd +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET 0xe +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET 0xf +#define VIRTIO_ADMIN_CMD_DEV_PARTS_SET 0x10 +#define VIRTIO_ADMIN_CMD_DEV_MODE_SET 0x11 + struct virtio_admin_cmd_hdr { uint16_t opcode; /* @@ -286,4 +313,123 @@ struct virtio_admin_cmd_notify_info_result { struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; }; +#define VIRTIO_DEV_PARTS_CAP 0x0000 + +struct virtio_dev_parts_cap { + uint8_t get_parts_resource_objects_limit; + uint8_t set_parts_resource_objects_limit; +}; + +#define MAX_CAP_ID __KERNEL_DIV_ROUND_UP(VIRTIO_DEV_PARTS_CAP + 1, 64) + +struct virtio_admin_cmd_query_cap_id_result { + uint64_t supported_caps[MAX_CAP_ID]; +}; + +struct virtio_admin_cmd_cap_get_data { + uint16_t id; + uint8_t reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + uint16_t id; + uint8_t reserved[6]; + uint8_t cap_specific_data[]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + uint16_t type; + uint8_t reserved[2]; + uint32_t id; /* Indicates unique resource object id per resource object type */ +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + uint64_t flags; + uint8_t resource_obj_specific_data[]; +}; + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS 0 + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET 0 +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET 1 + +struct virtio_resource_obj_dev_parts { + uint8_t type; + uint8_t reserved[7]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_COUNT 1 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_LIST 2 + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + uint8_t type; + uint8_t reserved[7]; +}; + +#define VIRTIO_DEV_PART_F_OPTIONAL 0 + +struct virtio_dev_part_hdr { + uint16_t part_type; + uint8_t flags; + uint8_t reserved; + union { + struct { + uint32_t offset; + uint32_t reserved; + } pci_common_cfg; + struct { + uint16_t index; + uint8_t reserved[6]; + } vq_index; + } selector; + uint32_t length; +}; + +struct virtio_dev_part { + struct virtio_dev_part_hdr hdr; + uint8_t value[]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + uint32_t size; + uint32_t reserved; + } parts_size; + struct { + uint32_t count; + uint32_t reserved; + } hdr_list_count; + struct { + uint32_t count; + uint32_t reserved; + struct virtio_dev_part_hdr hdrs[]; + } hdr_list; + }; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_SELECTED 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL 1 + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + uint8_t type; + uint8_t reserved[7]; + struct virtio_dev_part_hdr hdr_list[]; +}; + +struct virtio_admin_cmd_dev_parts_set_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + struct virtio_dev_part parts[]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED 0 + +struct virtio_admin_cmd_dev_mode_set_data { + uint8_t flags; +}; + #endif diff --git a/include/standard-headers/linux/virtio_snd.h b/include/standard-headers/linux/virtio_snd.h index 860f12e0a4e..160d57899f8 100644 --- a/include/standard-headers/linux/virtio_snd.h +++ b/include/standard-headers/linux/virtio_snd.h @@ -25,7 +25,7 @@ struct virtio_snd_config { uint32_t streams; /* # of available channel maps */ uint32_t chmaps; - /* # of available control elements */ + /* # of available control elements (if VIRTIO_SND_F_CTLS) */ uint32_t controls; }; diff --git a/include/standard-headers/linux/vmclock-abi.h b/include/standard-headers/linux/vmclock-abi.h new file mode 100644 index 00000000000..15b0316cb4c --- /dev/null +++ b/include/standard-headers/linux/vmclock-abi.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ + +/* + * This structure provides a vDSO-style clock to VM guests, exposing the + * relationship (or lack thereof) between the CPU clock (TSC, timebase, arch + * counter, etc.) and real time. It is designed to address the problem of + * live migration, which other clock enlightenments do not. + * + * When a guest is live migrated, this affects the clock in two ways. + * + * First, even between identical hosts the actual frequency of the underlying + * counter will change within the tolerances of its specification (typically + * ±50PPM, or 4 seconds a day). This frequency also varies over time on the + * same host, but can be tracked by NTP as it generally varies slowly. With + * live migration there is a step change in the frequency, with no warning. + * + * Second, there may be a step change in the value of the counter itself, as + * its accuracy is limited by the precision of the NTP synchronization on the + * source and destination hosts. + * + * So any calibration (NTP, PTP, etc.) which the guest has done on the source + * host before migration is invalid, and needs to be redone on the new host. + * + * In its most basic mode, this structure provides only an indication to the + * guest that live migration has occurred. This allows the guest to know that + * its clock is invalid and take remedial action. For applications that need + * reliable accurate timestamps (e.g. distributed databases), the structure + * can be mapped all the way to userspace. This allows the application to see + * directly for itself that the clock is disrupted and take appropriate + * action, even when using a vDSO-style method to get the time instead of a + * system call. + * + * In its more advanced mode. this structure can also be used to expose the + * precise relationship of the CPU counter to real time, as calibrated by the + * host. This means that userspace applications can have accurate time + * immediately after live migration, rather than having to pause operations + * and wait for NTP to recover. This mode does, of course, rely on the + * counter being reliable and consistent across CPUs. + * + * Note that this must be true UTC, never with smeared leap seconds. If a + * guest wishes to construct a smeared clock, it can do so. Presenting a + * smeared clock through this interface would be problematic because it + * actually messes with the apparent counter *period*. A linear smearing + * of 1 ms per second would effectively tweak the counter period by 1000PPM + * at the start/end of the smearing period, while a sinusoidal smear would + * basically be impossible to represent. + * + * This structure is offered with the intent that it be adopted into the + * nascent virtio-rtc standard, as a virtio-rtc that does not address the live + * migration problem seems a little less than fit for purpose. For that + * reason, certain fields use precisely the same numeric definitions as in + * the virtio-rtc proposal. The structure can also be exposed through an ACPI + * device with the CID "VMCLOCK", modelled on the "VMGENID" device except for + * the fact that it uses a real _CRS to convey the address of the structure + * (which should be a full page, to allow for mapping directly to userspace). + */ + +#ifndef __VMCLOCK_ABI_H__ +#define __VMCLOCK_ABI_H__ + +#include "standard-headers/linux/types.h" + +struct vmclock_abi { + /* CONSTANT FIELDS */ + uint32_t magic; +#define VMCLOCK_MAGIC 0x4b4c4356 /* "VCLK" */ + uint32_t size; /* Size of region containing this structure */ + uint16_t version; /* 1 */ + uint8_t counter_id; /* Matches VIRTIO_RTC_COUNTER_xxx except INVALID */ +#define VMCLOCK_COUNTER_ARM_VCNT 0 +#define VMCLOCK_COUNTER_X86_TSC 1 +#define VMCLOCK_COUNTER_INVALID 0xff + uint8_t time_type; /* Matches VIRTIO_RTC_TYPE_xxx */ +#define VMCLOCK_TIME_UTC 0 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_TAI 1 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_MONOTONIC 2 /* Since undefined epoch */ +#define VMCLOCK_TIME_INVALID_SMEARED 3 /* Not supported */ +#define VMCLOCK_TIME_INVALID_MAYBE_SMEARED 4 /* Not supported */ + + /* NON-CONSTANT FIELDS PROTECTED BY SEQCOUNT LOCK */ + uint32_t seq_count; /* Low bit means an update is in progress */ + /* + * This field changes to another non-repeating value when the CPU + * counter is disrupted, for example on live migration. This lets + * the guest know that it should discard any calibration it has + * performed of the counter against external sources (NTP/PTP/etc.). + */ + uint64_t disruption_marker; + uint64_t flags; + /* Indicates that the tai_offset_sec field is valid */ +#define VMCLOCK_FLAG_TAI_OFFSET_VALID (1 << 0) + /* + * Optionally used to notify guests of pending maintenance events. + * A guest which provides latency-sensitive services may wish to + * remove itself from service if an event is coming up. Two flags + * indicate the approximate imminence of the event. + */ +#define VMCLOCK_FLAG_DISRUPTION_SOON (1 << 1) /* About a day */ +#define VMCLOCK_FLAG_DISRUPTION_IMMINENT (1 << 2) /* About an hour */ +#define VMCLOCK_FLAG_PERIOD_ESTERROR_VALID (1 << 3) +#define VMCLOCK_FLAG_PERIOD_MAXERROR_VALID (1 << 4) +#define VMCLOCK_FLAG_TIME_ESTERROR_VALID (1 << 5) +#define VMCLOCK_FLAG_TIME_MAXERROR_VALID (1 << 6) + /* + * If the MONOTONIC flag is set then (other than leap seconds) it is + * guaranteed that the time calculated according this structure at + * any given moment shall never appear to be later than the time + * calculated via the structure at any *later* moment. + * + * In particular, a timestamp based on a counter reading taken + * immediately after setting the low bit of seq_count (and the + * associated memory barrier), using the previously-valid time and + * period fields, shall never be later than a timestamp based on + * a counter reading taken immediately before *clearing* the low + * bit again after the update, using the about-to-be-valid fields. + */ +#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7) + + uint8_t pad[2]; + uint8_t clock_status; +#define VMCLOCK_STATUS_UNKNOWN 0 +#define VMCLOCK_STATUS_INITIALIZING 1 +#define VMCLOCK_STATUS_SYNCHRONIZED 2 +#define VMCLOCK_STATUS_FREERUNNING 3 +#define VMCLOCK_STATUS_UNRELIABLE 4 + + /* + * The time exposed through this device is never smeared. This field + * corresponds to the 'subtype' field in virtio-rtc, which indicates + * the smearing method. However in this case it provides a *hint* to + * the guest operating system, such that *if* the guest OS wants to + * provide its users with an alternative clock which does not follow + * UTC, it may do so in a fashion consistent with the other systems + * in the nearby environment. + */ + uint8_t leap_second_smearing_hint; /* Matches VIRTIO_RTC_SUBTYPE_xxx */ +#define VMCLOCK_SMEARING_STRICT 0 +#define VMCLOCK_SMEARING_NOON_LINEAR 1 +#define VMCLOCK_SMEARING_UTC_SLS 2 + uint16_t tai_offset_sec; /* Actually two's complement signed */ + uint8_t leap_indicator; + /* + * This field is based on the VIRTIO_RTC_LEAP_xxx values as defined + * in the current draft of virtio-rtc, but since smearing cannot be + * used with the shared memory device, some values are not used. + * + * The _POST_POS and _POST_NEG values allow the guest to perform + * its own smearing during the day or so after a leap second when + * such smearing may need to continue being applied for a leap + * second which is now theoretically "historical". + */ +#define VMCLOCK_LEAP_NONE 0x00 /* No known nearby leap second */ +#define VMCLOCK_LEAP_PRE_POS 0x01 /* Positive leap second at EOM */ +#define VMCLOCK_LEAP_PRE_NEG 0x02 /* Negative leap second at EOM */ +#define VMCLOCK_LEAP_POS 0x03 /* Set during 23:59:60 second */ +#define VMCLOCK_LEAP_POST_POS 0x04 +#define VMCLOCK_LEAP_POST_NEG 0x05 + + /* Bit shift for counter_period_frac_sec and its error rate */ + uint8_t counter_period_shift; + /* + * Paired values of counter and UTC at a given point in time. + */ + uint64_t counter_value; + /* + * Counter period, and error margin of same. The unit of these + * fields is 1/2^(64 + counter_period_shift) of a second. + */ + uint64_t counter_period_frac_sec; + uint64_t counter_period_esterror_rate_frac_sec; + uint64_t counter_period_maxerror_rate_frac_sec; + + /* + * Time according to time_type field above. + */ + uint64_t time_sec; /* Seconds since time_type epoch */ + uint64_t time_frac_sec; /* Units of 1/2^64 of a second */ + uint64_t time_esterror_nanosec; + uint64_t time_maxerror_nanosec; +}; + +#endif /* __VMCLOCK_ABI_H__ */ diff --git a/include/standard-headers/uefi/uefi.h b/include/standard-headers/uefi/uefi.h new file mode 100644 index 00000000000..5256349ec0b --- /dev/null +++ b/include/standard-headers/uefi/uefi.h @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2025 Intel Corporation + * + * Author: Isaku Yamahata + * + * Xiaoyao Li + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_I386_UEFI_H +#define HW_I386_UEFI_H + +/***************************************************************************/ +/* + * basic EFI definitions + * supplemented with UEFI Specification Version 2.8 (Errata A) + * released February 2020 + */ +/* UEFI integer is little endian */ + +typedef struct { + uint32_t Data1; + uint16_t Data2; + uint16_t Data3; + uint8_t Data4[8]; +} EFI_GUID; + +typedef enum { + EfiReservedMemoryType, + EfiLoaderCode, + EfiLoaderData, + EfiBootServicesCode, + EfiBootServicesData, + EfiRuntimeServicesCode, + EfiRuntimeServicesData, + EfiConventionalMemory, + EfiUnusableMemory, + EfiACPIReclaimMemory, + EfiACPIMemoryNVS, + EfiMemoryMappedIO, + EfiMemoryMappedIOPortSpace, + EfiPalCode, + EfiPersistentMemory, + EfiUnacceptedMemoryType, + EfiMaxMemoryType +} EFI_MEMORY_TYPE; + +#define EFI_HOB_HANDOFF_TABLE_VERSION 0x0009 + +#define EFI_HOB_TYPE_HANDOFF 0x0001 +#define EFI_HOB_TYPE_MEMORY_ALLOCATION 0x0002 +#define EFI_HOB_TYPE_RESOURCE_DESCRIPTOR 0x0003 +#define EFI_HOB_TYPE_GUID_EXTENSION 0x0004 +#define EFI_HOB_TYPE_FV 0x0005 +#define EFI_HOB_TYPE_CPU 0x0006 +#define EFI_HOB_TYPE_MEMORY_POOL 0x0007 +#define EFI_HOB_TYPE_FV2 0x0009 +#define EFI_HOB_TYPE_LOAD_PEIM_UNUSED 0x000A +#define EFI_HOB_TYPE_UEFI_CAPSULE 0x000B +#define EFI_HOB_TYPE_FV3 0x000C +#define EFI_HOB_TYPE_UNUSED 0xFFFE +#define EFI_HOB_TYPE_END_OF_HOB_LIST 0xFFFF + +typedef struct { + uint16_t HobType; + uint16_t HobLength; + uint32_t Reserved; +} EFI_HOB_GENERIC_HEADER; + +typedef uint64_t EFI_PHYSICAL_ADDRESS; +typedef uint32_t EFI_BOOT_MODE; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + uint32_t Version; + EFI_BOOT_MODE BootMode; + EFI_PHYSICAL_ADDRESS EfiMemoryTop; + EFI_PHYSICAL_ADDRESS EfiMemoryBottom; + EFI_PHYSICAL_ADDRESS EfiFreeMemoryTop; + EFI_PHYSICAL_ADDRESS EfiFreeMemoryBottom; + EFI_PHYSICAL_ADDRESS EfiEndOfHobList; +} EFI_HOB_HANDOFF_INFO_TABLE; + +#define EFI_RESOURCE_SYSTEM_MEMORY 0x00000000 +#define EFI_RESOURCE_MEMORY_MAPPED_IO 0x00000001 +#define EFI_RESOURCE_IO 0x00000002 +#define EFI_RESOURCE_FIRMWARE_DEVICE 0x00000003 +#define EFI_RESOURCE_MEMORY_MAPPED_IO_PORT 0x00000004 +#define EFI_RESOURCE_MEMORY_RESERVED 0x00000005 +#define EFI_RESOURCE_IO_RESERVED 0x00000006 +#define EFI_RESOURCE_MEMORY_UNACCEPTED 0x00000007 +#define EFI_RESOURCE_MAX_MEMORY_TYPE 0x00000008 + +#define EFI_RESOURCE_ATTRIBUTE_PRESENT 0x00000001 +#define EFI_RESOURCE_ATTRIBUTE_INITIALIZED 0x00000002 +#define EFI_RESOURCE_ATTRIBUTE_TESTED 0x00000004 +#define EFI_RESOURCE_ATTRIBUTE_SINGLE_BIT_ECC 0x00000008 +#define EFI_RESOURCE_ATTRIBUTE_MULTIPLE_BIT_ECC 0x00000010 +#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_1 0x00000020 +#define EFI_RESOURCE_ATTRIBUTE_ECC_RESERVED_2 0x00000040 +#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTED 0x00000080 +#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTED 0x00000100 +#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTED 0x00000200 +#define EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE 0x00000400 +#define EFI_RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE 0x00000800 +#define EFI_RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE 0x00001000 +#define EFI_RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE 0x00002000 +#define EFI_RESOURCE_ATTRIBUTE_16_BIT_IO 0x00004000 +#define EFI_RESOURCE_ATTRIBUTE_32_BIT_IO 0x00008000 +#define EFI_RESOURCE_ATTRIBUTE_64_BIT_IO 0x00010000 +#define EFI_RESOURCE_ATTRIBUTE_UNCACHED_EXPORTED 0x00020000 +#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTED 0x00040000 +#define EFI_RESOURCE_ATTRIBUTE_READ_ONLY_PROTECTABLE 0x00080000 +#define EFI_RESOURCE_ATTRIBUTE_READ_PROTECTABLE 0x00100000 +#define EFI_RESOURCE_ATTRIBUTE_WRITE_PROTECTABLE 0x00200000 +#define EFI_RESOURCE_ATTRIBUTE_EXECUTION_PROTECTABLE 0x00400000 +#define EFI_RESOURCE_ATTRIBUTE_PERSISTENT 0x00800000 +#define EFI_RESOURCE_ATTRIBUTE_PERSISTABLE 0x01000000 +#define EFI_RESOURCE_ATTRIBUTE_MORE_RELIABLE 0x02000000 + +typedef uint32_t EFI_RESOURCE_TYPE; +typedef uint32_t EFI_RESOURCE_ATTRIBUTE_TYPE; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + EFI_GUID Owner; + EFI_RESOURCE_TYPE ResourceType; + EFI_RESOURCE_ATTRIBUTE_TYPE ResourceAttribute; + EFI_PHYSICAL_ADDRESS PhysicalStart; + uint64_t ResourceLength; +} EFI_HOB_RESOURCE_DESCRIPTOR; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + EFI_GUID Name; + + /* guid specific data follows */ +} EFI_HOB_GUID_TYPE; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + EFI_PHYSICAL_ADDRESS BaseAddress; + uint64_t Length; +} EFI_HOB_FIRMWARE_VOLUME; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + EFI_PHYSICAL_ADDRESS BaseAddress; + uint64_t Length; + EFI_GUID FvName; + EFI_GUID FileName; +} EFI_HOB_FIRMWARE_VOLUME2; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + EFI_PHYSICAL_ADDRESS BaseAddress; + uint64_t Length; + uint32_t AuthenticationStatus; + bool ExtractedFv; + EFI_GUID FvName; + EFI_GUID FileName; +} EFI_HOB_FIRMWARE_VOLUME3; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + uint8_t SizeOfMemorySpace; + uint8_t SizeOfIoSpace; + uint8_t Reserved[6]; +} EFI_HOB_CPU; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; +} EFI_HOB_MEMORY_POOL; + +typedef struct { + EFI_HOB_GENERIC_HEADER Header; + + EFI_PHYSICAL_ADDRESS BaseAddress; + uint64_t Length; +} EFI_HOB_UEFI_CAPSULE; + +#define EFI_HOB_OWNER_ZERO \ + ((EFI_GUID){ 0x00000000, 0x0000, 0x0000, \ + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }) + +#endif diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h deleted file mode 100644 index a0886722305..00000000000 --- a/include/sysemu/accel-ops.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Accelerator OPS, used for cpus.c module - * - * Copyright 2021 SUSE LLC - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef ACCEL_OPS_H -#define ACCEL_OPS_H - -#include "exec/cpu-common.h" -#include "qom/object.h" - -#define ACCEL_OPS_SUFFIX "-ops" -#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX -#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS) - -typedef struct AccelOpsClass AccelOpsClass; -DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS) - -/** - * struct AccelOpsClass - accelerator interfaces - * - * This structure is used to abstract accelerator differences from the - * core CPU code. Not all have to be implemented. - */ -struct AccelOpsClass { - /*< private >*/ - ObjectClass parent_class; - /*< public >*/ - - /* initialization function called when accel is chosen */ - void (*ops_init)(AccelOpsClass *ops); - - bool (*cpus_are_resettable)(void); - void (*cpu_reset_hold)(CPUState *cpu); - - void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */ - void (*kick_vcpu_thread)(CPUState *cpu); - bool (*cpu_thread_is_idle)(CPUState *cpu); - - void (*synchronize_post_reset)(CPUState *cpu); - void (*synchronize_post_init)(CPUState *cpu); - void (*synchronize_state)(CPUState *cpu); - void (*synchronize_pre_loadvm)(CPUState *cpu); - void (*synchronize_pre_resume)(bool step_pending); - - void (*handle_interrupt)(CPUState *cpu, int mask); - - /** - * @get_virtual_clock: fetch virtual clock - * @set_virtual_clock: set virtual clock - * - * These allow the timer subsystem to defer to the accelerator to - * fetch time. The set function is needed if the accelerator wants - * to track the changes to time as the timer is warped through - * various timer events. - */ - int64_t (*get_virtual_clock)(void); - void (*set_virtual_clock)(int64_t time); - - int64_t (*get_elapsed_ticks)(void); - - /* gdbstub hooks */ - bool (*supports_guest_debug)(void); - int (*update_guest_debug)(CPUState *cpu); - int (*insert_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); - int (*remove_breakpoint)(CPUState *cpu, int type, vaddr addr, vaddr len); - void (*remove_all_breakpoints)(CPUState *cpu); -}; - -#endif /* ACCEL_OPS_H */ diff --git a/include/sysemu/cpu-timers.h b/include/sysemu/cpu-timers.h deleted file mode 100644 index 7bfa960fbd6..00000000000 --- a/include/sysemu/cpu-timers.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * CPU timers state API - * - * Copyright 2020 SUSE LLC - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ -#ifndef SYSEMU_CPU_TIMERS_H -#define SYSEMU_CPU_TIMERS_H - -#include "qemu/timer.h" - -/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */ -void cpu_timers_init(void); - -/* icount - Instruction Counter API */ - -/** - * ICountMode: icount enablement state: - * - * @ICOUNT_DISABLED: Disabled - Do not count executed instructions. - * @ICOUNT_PRECISE: Enabled - Fixed conversion of insn to ns via "shift" option - * @ICOUNT_ADAPTATIVE: Enabled - Runtime adaptive algorithm to compute shift - */ -typedef enum { - ICOUNT_DISABLED = 0, - ICOUNT_PRECISE, - ICOUNT_ADAPTATIVE, -} ICountMode; - -#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) -extern ICountMode use_icount; -#define icount_enabled() (use_icount) -#else -#define icount_enabled() ICOUNT_DISABLED -#endif - -/* - * Update the icount with the executed instructions. Called by - * cpus-tcg vCPU thread so the main-loop can see time has moved forward. - */ -void icount_update(CPUState *cpu); - -/* get raw icount value */ -int64_t icount_get_raw(void); - -/* return the virtual CPU time in ns, based on the instruction counter. */ -int64_t icount_get(void); -/* - * convert an instruction counter value to ns, based on the icount shift. - * This shift is set as a fixed value with the icount "shift" option - * (precise mode), or it is constantly approximated and corrected at - * runtime in adaptive mode. - */ -int64_t icount_to_ns(int64_t icount); - -/** - * icount_configure: configure the icount options, including "shift" - * @opts: Options to parse - * @errp: pointer to a NULL-initialized error object - * - * Return: true on success, else false setting @errp with error - */ -bool icount_configure(QemuOpts *opts, Error **errp); - -/* used by tcg vcpu thread to calc icount budget */ -int64_t icount_round(int64_t count); - -/* if the CPUs are idle, start accounting real time to virtual clock. */ -void icount_start_warp_timer(void); -void icount_account_warp_timer(void); -void icount_notify_exit(void); - -/* - * CPU Ticks and Clock - */ - -/* Caller must hold BQL */ -void cpu_enable_ticks(void); -/* Caller must hold BQL */ -void cpu_disable_ticks(void); - -/* - * return the time elapsed in VM between vm_start and vm_stop. - * cpu_get_ticks() uses units of the host CPU cycle counter. - */ -int64_t cpu_get_ticks(void); - -/* - * Returns the monotonic time elapsed in VM, i.e., - * the time between vm_start and vm_stop - */ -int64_t cpu_get_clock(void); - -void qemu_timer_notify_cb(void *opaque, QEMUClockType type); - -/* get/set VIRTUAL clock and VM elapsed ticks via the cpus accel interface */ -int64_t cpus_get_virtual_clock(void); -void cpus_set_virtual_clock(int64_t new_time); -int64_t cpus_get_elapsed_ticks(void); - -#endif /* SYSEMU_CPU_TIMERS_H */ diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h deleted file mode 100644 index 730f927f034..00000000000 --- a/include/sysemu/hvf.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * QEMU Hypervisor.framework (HVF) support - * - * Copyright Google Inc., 2017 - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -/* header to be included in non-HVF-specific code */ - -#ifndef HVF_H -#define HVF_H - -#include "qemu/accel.h" -#include "qom/object.h" - -#ifdef COMPILING_PER_TARGET -#include "cpu.h" - -#ifdef CONFIG_HVF -extern bool hvf_allowed; -#define hvf_enabled() (hvf_allowed) -#else /* !CONFIG_HVF */ -#define hvf_enabled() 0 -#endif /* !CONFIG_HVF */ - -#endif /* COMPILING_PER_TARGET */ - -#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf") - -typedef struct HVFState HVFState; -DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE, - TYPE_HVF_ACCEL) - -#ifdef COMPILING_PER_TARGET -struct hvf_sw_breakpoint { - vaddr pc; - vaddr saved_insn; - int use_count; - QTAILQ_ENTRY(hvf_sw_breakpoint) entry; -}; - -struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, - vaddr pc); -int hvf_sw_breakpoints_active(CPUState *cpu); - -int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); -int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); -int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); -int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); -void hvf_arch_remove_all_hw_breakpoints(void); - -/* - * hvf_update_guest_debug: - * @cs: CPUState for the CPU to update - * - * Update guest to enable or disable debugging. Per-arch specifics will be - * handled by calling down to hvf_arch_update_guest_debug. - */ -int hvf_update_guest_debug(CPUState *cpu); -void hvf_arch_update_guest_debug(CPUState *cpu); - -/* - * Return whether the guest supports debugging. - */ -bool hvf_arch_supports_guest_debug(void); -#endif /* COMPILING_PER_TARGET */ - -#endif diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h deleted file mode 100644 index 5b28d17ba1f..00000000000 --- a/include/sysemu/hvf_int.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * QEMU Hypervisor.framework (HVF) support - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -/* header to be included in HVF-specific code */ - -#ifndef HVF_INT_H -#define HVF_INT_H - -#ifdef __aarch64__ -#include -typedef hv_vcpu_t hvf_vcpuid; -#else -#include -typedef hv_vcpuid_t hvf_vcpuid; -#endif - -/* hvf_slot flags */ -#define HVF_SLOT_LOG (1 << 0) - -typedef struct hvf_slot { - uint64_t start; - uint64_t size; - uint8_t *mem; - int slot_id; - uint32_t flags; - MemoryRegion *region; -} hvf_slot; - -typedef struct hvf_vcpu_caps { - uint64_t vmx_cap_pinbased; - uint64_t vmx_cap_procbased; - uint64_t vmx_cap_procbased2; - uint64_t vmx_cap_entry; - uint64_t vmx_cap_exit; - uint64_t vmx_cap_preemption_timer; -} hvf_vcpu_caps; - -struct HVFState { - AccelState parent; - hvf_slot slots[32]; - int num_slots; - - hvf_vcpu_caps *hvf_caps; - uint64_t vtimer_offset; - QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints; -}; -extern HVFState *hvf_state; - -struct AccelCPUState { - hvf_vcpuid fd; - void *exit; - bool vtimer_masked; - sigset_t unblock_ipi_mask; - bool guest_debug_enabled; - bool dirty; -}; - -void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line, - const char *exp); -#define assert_hvf_ok(EX) assert_hvf_ok_impl((EX), __FILE__, __LINE__, #EX) -const char *hvf_return_string(hv_return_t ret); -int hvf_arch_init(void); -int hvf_arch_init_vcpu(CPUState *cpu); -void hvf_arch_vcpu_destroy(CPUState *cpu); -int hvf_vcpu_exec(CPUState *); -hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t); -int hvf_put_registers(CPUState *); -int hvf_get_registers(CPUState *); -void hvf_kick_vcpu_thread(CPUState *cpu); - -#endif diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h deleted file mode 100644 index c71b77e71f3..00000000000 --- a/include/sysemu/hw_accel.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * QEMU Hardware accelerators support - * - * Copyright 2016 Google, Inc. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_HW_ACCEL_H -#define QEMU_HW_ACCEL_H - -#include "hw/core/cpu.h" -#include "sysemu/kvm.h" -#include "sysemu/hvf.h" -#include "sysemu/whpx.h" -#include "sysemu/nvmm.h" - -void cpu_synchronize_state(CPUState *cpu); -void cpu_synchronize_post_reset(CPUState *cpu); -void cpu_synchronize_post_init(CPUState *cpu); -void cpu_synchronize_pre_loadvm(CPUState *cpu); - -#endif /* QEMU_HW_ACCEL_H */ diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h deleted file mode 100644 index 4c4886c7787..00000000000 --- a/include/sysemu/iommufd.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * iommufd container backend declaration - * - * Copyright (C) 2024 Intel Corporation. - * Copyright Red Hat, Inc. 2024 - * - * Authors: Yi Liu - * Eric Auger - * Zhenzhong Duan - * - * SPDX-License-Identifier: GPL-2.0-or-later - */ - -#ifndef SYSEMU_IOMMUFD_H -#define SYSEMU_IOMMUFD_H - -#include "qom/object.h" -#include "exec/hwaddr.h" -#include "exec/cpu-common.h" -#include "sysemu/host_iommu_device.h" - -#define TYPE_IOMMUFD_BACKEND "iommufd" -OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND) - -struct IOMMUFDBackendClass { - ObjectClass parent_class; -}; - -struct IOMMUFDBackend { - Object parent; - - /*< protected >*/ - int fd; /* /dev/iommu file descriptor */ - bool owned; /* is the /dev/iommu opened internally */ - uint32_t users; - - /*< public >*/ -}; - -bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp); -void iommufd_backend_disconnect(IOMMUFDBackend *be); - -bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, - Error **errp); -void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id); -int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly); -int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, - hwaddr iova, ram_addr_t size); -bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid, - uint32_t *type, void *data, uint32_t len, - uint64_t *caps, Error **errp); -bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id, - uint32_t pt_id, uint32_t flags, - uint32_t data_type, uint32_t data_len, - void *data_ptr, uint32_t *out_hwpt, - Error **errp); -bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be, uint32_t hwpt_id, - bool start, Error **errp); -bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be, uint32_t hwpt_id, - uint64_t iova, ram_addr_t size, - uint64_t page_size, uint64_t *data, - Error **errp); - -#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD TYPE_HOST_IOMMU_DEVICE "-iommufd" -#endif diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h deleted file mode 100644 index 9cf14ca3d5b..00000000000 --- a/include/sysemu/kvm.h +++ /dev/null @@ -1,581 +0,0 @@ -/* - * QEMU KVM support - * - * Copyright IBM, Corp. 2008 - * - * Authors: - * Anthony Liguori - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -/* header to be included in non-KVM-specific code */ - -#ifndef QEMU_KVM_H -#define QEMU_KVM_H - -#include "exec/memattrs.h" -#include "qemu/accel.h" -#include "qom/object.h" - -#ifdef COMPILING_PER_TARGET -# ifdef CONFIG_KVM -# include -# define CONFIG_KVM_IS_POSSIBLE -# endif -#else -# define CONFIG_KVM_IS_POSSIBLE -#endif - -#ifdef CONFIG_KVM_IS_POSSIBLE - -extern bool kvm_allowed; -extern bool kvm_kernel_irqchip; -extern bool kvm_split_irqchip; -extern bool kvm_async_interrupts_allowed; -extern bool kvm_halt_in_kernel_allowed; -extern bool kvm_resamplefds_allowed; -extern bool kvm_msi_via_irqfd_allowed; -extern bool kvm_gsi_routing_allowed; -extern bool kvm_gsi_direct_mapping; -extern bool kvm_readonly_mem_allowed; -extern bool kvm_msi_use_devid; - -#define kvm_enabled() (kvm_allowed) -/** - * kvm_irqchip_in_kernel: - * - * Returns: true if an in-kernel irqchip was created. - * What this actually means is architecture and machine model - * specific: on PC, for instance, it means that the LAPIC - * is in kernel. This function should never be used from generic - * target-independent code: use one of the following functions or - * some other specific check instead. - */ -#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) - -/** - * kvm_irqchip_is_split: - * - * Returns: true if the irqchip implementation is split between - * user and kernel space. The details are architecture and - * machine specific. On PC, it means that the PIC, IOAPIC, and - * PIT are in user space while the LAPIC is in the kernel. - */ -#define kvm_irqchip_is_split() (kvm_split_irqchip) - -/** - * kvm_async_interrupts_enabled: - * - * Returns: true if we can deliver interrupts to KVM - * asynchronously (ie by ioctl from any thread at any time) - * rather than having to do interrupt delivery synchronously - * (where the vcpu must be stopped at a suitable point first). - */ -#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) - -/** - * kvm_halt_in_kernel - * - * Returns: true if halted cpus should still get a KVM_RUN ioctl to run - * inside of kernel space. This only works if MP state is implemented. - */ -#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed) - -/** - * kvm_irqfds_enabled: - * - * Returns: true if we can use irqfds to inject interrupts into - * a KVM CPU (ie the kernel supports irqfds and we are running - * with a configuration where it is meaningful to use them). - * - * Always available if running with in-kernel irqchip. - */ -#define kvm_irqfds_enabled() kvm_irqchip_in_kernel() - -/** - * kvm_resamplefds_enabled: - * - * Returns: true if we can use resamplefds to inject interrupts into - * a KVM CPU (ie the kernel supports resamplefds and we are running - * with a configuration where it is meaningful to use them). - */ -#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed) - -/** - * kvm_msi_via_irqfd_enabled: - * - * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) - * to a KVM CPU via an irqfd. This requires that the kernel supports - * this and that we're running in a configuration that permits it. - */ -#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) - -/** - * kvm_gsi_routing_enabled: - * - * Returns: true if GSI routing is enabled (ie the kernel supports - * it and we're running in a configuration that permits it). - */ -#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) - -/** - * kvm_gsi_direct_mapping: - * - * Returns: true if GSI direct mapping is enabled. - */ -#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping) - -/** - * kvm_readonly_mem_enabled: - * - * Returns: true if KVM readonly memory is enabled (ie the kernel - * supports it and we're running in a configuration that permits it). - */ -#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) - -/** - * kvm_msi_devid_required: - * Returns: true if KVM requires a device id to be provided while - * defining an MSI routing entry. - */ -#define kvm_msi_devid_required() (kvm_msi_use_devid) - -#else - -#define kvm_enabled() (0) -#define kvm_irqchip_in_kernel() (false) -#define kvm_irqchip_is_split() (false) -#define kvm_async_interrupts_enabled() (false) -#define kvm_halt_in_kernel() (false) -#define kvm_irqfds_enabled() (false) -#define kvm_resamplefds_enabled() (false) -#define kvm_msi_via_irqfd_enabled() (false) -#define kvm_gsi_routing_allowed() (false) -#define kvm_gsi_direct_mapping() (false) -#define kvm_readonly_mem_enabled() (false) -#define kvm_msi_devid_required() (false) - -#endif /* CONFIG_KVM_IS_POSSIBLE */ - -struct kvm_run; -struct kvm_irq_routing_entry; - -typedef struct KVMCapabilityInfo { - const char *name; - int value; -} KVMCapabilityInfo; - -#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } -#define KVM_CAP_LAST_INFO { NULL, 0 } - -struct KVMState; - -#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") -typedef struct KVMState KVMState; -DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE, - TYPE_KVM_ACCEL) - -extern KVMState *kvm_state; -typedef struct Notifier Notifier; - -typedef struct KVMRouteChange { - KVMState *s; - int changes; -} KVMRouteChange; - -/* external API */ - -unsigned int kvm_get_max_memslots(void); -unsigned int kvm_get_free_memslots(void); -bool kvm_has_sync_mmu(void); -int kvm_has_vcpu_events(void); -int kvm_max_nested_state_length(void); -int kvm_has_gsi_routing(void); - -/** - * kvm_arm_supports_user_irq - * - * Not all KVM implementations support notifications for kernel generated - * interrupt events to user space. This function indicates whether the current - * KVM implementation does support them. - * - * Returns: true if KVM supports using kernel generated IRQs from user space - */ -bool kvm_arm_supports_user_irq(void); - - -int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); -int kvm_on_sigbus(int code, void *addr); - -#ifdef COMPILING_PER_TARGET -#include "cpu.h" - -void kvm_flush_coalesced_mmio_buffer(void); - -/** - * kvm_update_guest_debug(): ensure KVM debug structures updated - * @cs: the CPUState for this cpu - * @reinject_trap: KVM trap injection control - * - * There are usually per-arch specifics which will be handled by - * calling down to kvm_arch_update_guest_debug after the generic - * fields have been set. - */ -#ifdef TARGET_KVM_HAVE_GUEST_DEBUG -int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); -#else -static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) -{ - return -EINVAL; -} -#endif - -/* internal API */ - -int kvm_ioctl(KVMState *s, int type, ...); - -int kvm_vm_ioctl(KVMState *s, int type, ...); - -int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); - -/** - * kvm_device_ioctl - call an ioctl on a kvm device - * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE - * @type: The device-ctrl ioctl number - * - * Returns: -errno on error, nonnegative on success - */ -int kvm_device_ioctl(int fd, int type, ...); - -/** - * kvm_vm_check_attr - check for existence of a specific vm attribute - * @s: The KVMState pointer - * @group: the group - * @attr: the attribute of that group to query for - * - * Returns: 1 if the attribute exists - * 0 if the attribute either does not exist or if the vm device - * interface is unavailable - */ -int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr); - -/** - * kvm_device_check_attr - check for existence of a specific device attribute - * @fd: The device file descriptor - * @group: the group - * @attr: the attribute of that group to query for - * - * Returns: 1 if the attribute exists - * 0 if the attribute either does not exist or if the vm device - * interface is unavailable - */ -int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr); - -/** - * kvm_device_access - set or get value of a specific device attribute - * @fd: The device file descriptor - * @group: the group - * @attr: the attribute of that group to set or get - * @val: pointer to a storage area for the value - * @write: true for set and false for get operation - * @errp: error object handle - * - * Returns: 0 on success - * < 0 on error - * Use kvm_device_check_attr() in order to check for the availability - * of optional attributes. - */ -int kvm_device_access(int fd, int group, uint64_t attr, - void *val, bool write, Error **errp); - -/** - * kvm_create_device - create a KVM device for the device control API - * @KVMState: The KVMState pointer - * @type: The KVM device type (see Documentation/virtual/kvm/devices in the - * kernel source) - * @test: If true, only test if device can be created, but don't actually - * create the device. - * - * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd; - */ -int kvm_create_device(KVMState *s, uint64_t type, bool test); - -/** - * kvm_device_supported - probe whether KVM supports specific device - * - * @vmfd: The fd handler for VM - * @type: type of device - * - * @return: true if supported, otherwise false. - */ -bool kvm_device_supported(int vmfd, uint64_t type); - -/** - * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU - * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created. - * - * @returns: 0 when success, errno (<0) when failed. - */ -int kvm_create_vcpu(CPUState *cpu); - -/** - * kvm_park_vcpu - Park QEMU KVM vCPU context - * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked. - * - * @returns: none - */ -void kvm_park_vcpu(CPUState *cpu); - -/** - * kvm_unpark_vcpu - unpark QEMU KVM vCPU context - * @s: KVM State - * @vcpu_id: Architecture vCPU ID of the parked vCPU - * - * @returns: KVM fd - */ -int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id); - -/** - * kvm_create_and_park_vcpu - Create and park a KVM vCPU - * @cpu: QOM CPUState object for which KVM vCPU has to be created and parked. - * - * @returns: 0 when success, errno (<0) when failed. - */ -int kvm_create_and_park_vcpu(CPUState *cpu); - -/* Arch specific hooks */ - -extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; - -void kvm_arch_accel_class_init(ObjectClass *oc); - -void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); -MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); - -int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); - -int kvm_arch_process_async_events(CPUState *cpu); - -int kvm_arch_get_registers(CPUState *cpu); - -/* state subset only touched by the VCPU itself during runtime */ -#define KVM_PUT_RUNTIME_STATE 1 -/* state subset modified during VCPU reset */ -#define KVM_PUT_RESET_STATE 2 -/* full state set, modified during initialization or on vmload */ -#define KVM_PUT_FULL_STATE 3 - -int kvm_arch_put_registers(CPUState *cpu, int level); - -int kvm_arch_get_default_type(MachineState *ms); - -int kvm_arch_init(MachineState *ms, KVMState *s); - -int kvm_arch_init_vcpu(CPUState *cpu); -int kvm_arch_destroy_vcpu(CPUState *cpu); - -bool kvm_vcpu_id_is_valid(int vcpu_id); - -/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */ -unsigned long kvm_arch_vcpu_id(CPUState *cpu); - -#ifdef KVM_HAVE_MCE_INJECTION -void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); -#endif - -void kvm_arch_init_irq_routing(KVMState *s); - -int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, - uint64_t address, uint32_t data, PCIDevice *dev); - -/* Notify arch about newly added MSI routes */ -int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, - int vector, PCIDevice *dev); -/* Notify arch about released MSI routes */ -int kvm_arch_release_virq_post(int virq); - -int kvm_arch_msi_data_to_gsi(uint32_t data); - -int kvm_set_irq(KVMState *s, int irq, int level); -int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); - -void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); - -void kvm_irqchip_add_change_notifier(Notifier *n); -void kvm_irqchip_remove_change_notifier(Notifier *n); -void kvm_irqchip_change_notify(void); - -struct kvm_guest_debug; -struct kvm_debug_exit_arch; - -struct kvm_sw_breakpoint { - vaddr pc; - vaddr saved_insn; - int use_count; - QTAILQ_ENTRY(kvm_sw_breakpoint) entry; -}; - -struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, - vaddr pc); - -int kvm_sw_breakpoints_active(CPUState *cpu); - -int kvm_arch_insert_sw_breakpoint(CPUState *cpu, - struct kvm_sw_breakpoint *bp); -int kvm_arch_remove_sw_breakpoint(CPUState *cpu, - struct kvm_sw_breakpoint *bp); -int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); -int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); -void kvm_arch_remove_all_hw_breakpoints(void); - -void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); - -bool kvm_arch_stop_on_emulation_error(CPUState *cpu); - -int kvm_check_extension(KVMState *s, unsigned int extension); - -int kvm_vm_check_extension(KVMState *s, unsigned int extension); - -#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \ - ({ \ - struct kvm_enable_cap cap = { \ - .cap = capability, \ - .flags = cap_flags, \ - }; \ - uint64_t args_tmp[] = { __VA_ARGS__ }; \ - size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ - memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ - kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \ - }) - -#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \ - ({ \ - struct kvm_enable_cap cap = { \ - .cap = capability, \ - .flags = cap_flags, \ - }; \ - uint64_t args_tmp[] = { __VA_ARGS__ }; \ - size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ - memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ - kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \ - }) - -void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); - -int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, - hwaddr *phys_addr); - -#endif /* COMPILING_PER_TARGET */ - -void kvm_cpu_synchronize_state(CPUState *cpu); - -void kvm_init_cpu_signals(CPUState *cpu); - -/** - * kvm_irqchip_add_msi_route - Add MSI route for specific vector - * @c: KVMRouteChange instance. - * @vector: which vector to add. This can be either MSI/MSIX - * vector. The function will automatically detect whether - * MSI/MSIX is enabled, and fetch corresponding MSI - * message. - * @dev: Owner PCI device to add the route. If @dev is specified - * as @NULL, an empty MSI message will be inited. - * @return: virq (>=0) when success, errno (<0) when failed. - */ -int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, - PCIDevice *dev); -void kvm_irqchip_commit_routes(KVMState *s); - -static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) -{ - return (KVMRouteChange) { .s = s, .changes = 0 }; -} - -static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c) -{ - if (c->changes) { - kvm_irqchip_commit_routes(c->s); - c->changes = 0; - } -} - -int kvm_irqchip_get_virq(KVMState *s); -void kvm_irqchip_release_virq(KVMState *s, int virq); - -void kvm_add_routing_entry(KVMState *s, - struct kvm_irq_routing_entry *entry); - -int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, - EventNotifier *rn, int virq); -int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, - int virq); -int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, - EventNotifier *rn, qemu_irq irq); -int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, - qemu_irq irq); -void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi); -void kvm_init_irq_routing(KVMState *s); - -bool kvm_kernel_irqchip_allowed(void); -bool kvm_kernel_irqchip_required(void); -bool kvm_kernel_irqchip_split(void); - -/** - * kvm_arch_irqchip_create: - * @KVMState: The KVMState pointer - * - * Allow architectures to create an in-kernel irq chip themselves. - * - * Returns: < 0: error - * 0: irq chip was not created - * > 0: irq chip was created - */ -int kvm_arch_irqchip_create(KVMState *s); - -/** - * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl - * @id: The register ID - * @source: The pointer to the value to be set. It must point to a variable - * of the correct type/size for the register being accessed. - * - * Returns: 0 on success, or a negative errno on failure. - */ -int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); - -/** - * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl - * @id: The register ID - * @target: The pointer where the value is to be stored. It must point to a - * variable of the correct type/size for the register being accessed. - * - * Returns: 0 on success, or a negative errno on failure. - */ -int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); - -/* Notify resamplefd for EOI of specific interrupts. */ -void kvm_resample_fd_notify(int gsi); - -bool kvm_dirty_ring_enabled(void); - -uint32_t kvm_dirty_ring_size(void); - -void kvm_mark_guest_state_protected(void); - -/** - * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page - * reported for the VM. - */ -bool kvm_hwpoisoned_mem(void); - -int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp); - -int kvm_set_memory_attributes_private(hwaddr start, uint64_t size); -int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size); - -int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private); - -#endif diff --git a/include/sysemu/nvmm.h b/include/sysemu/nvmm.h deleted file mode 100644 index 6971ddb3a5a..00000000000 --- a/include/sysemu/nvmm.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. - * - * NetBSD Virtual Machine Monitor (NVMM) accelerator support. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -/* header to be included in non-NVMM-specific code */ - -#ifndef QEMU_NVMM_H -#define QEMU_NVMM_H - -#ifdef COMPILING_PER_TARGET - -#ifdef CONFIG_NVMM - -int nvmm_enabled(void); - -#else /* CONFIG_NVMM */ - -#define nvmm_enabled() (0) - -#endif /* CONFIG_NVMM */ - -#endif /* COMPILING_PER_TARGET */ - -#endif /* QEMU_NVMM_H */ diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h deleted file mode 100644 index e210a37abf0..00000000000 --- a/include/sysemu/runstate.h +++ /dev/null @@ -1,111 +0,0 @@ -#ifndef SYSEMU_RUNSTATE_H -#define SYSEMU_RUNSTATE_H - -#include "qapi/qapi-types-run-state.h" -#include "qemu/notify.h" - -bool runstate_check(RunState state); -void runstate_set(RunState new_state); -RunState runstate_get(void); -bool runstate_is_running(void); -bool runstate_needs_reset(void); - -typedef void VMChangeStateHandler(void *opaque, bool running, RunState state); - -VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, - void *opaque); -VMChangeStateEntry *qemu_add_vm_change_state_handler_prio( - VMChangeStateHandler *cb, void *opaque, int priority); -VMChangeStateEntry * -qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb, - VMChangeStateHandler *prepare_cb, - void *opaque, int priority); -VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, - VMChangeStateHandler *cb, - void *opaque); -VMChangeStateEntry *qdev_add_vm_change_state_handler_full( - DeviceState *dev, VMChangeStateHandler *cb, - VMChangeStateHandler *prepare_cb, void *opaque); -void qemu_del_vm_change_state_handler(VMChangeStateEntry *e); -/** - * vm_state_notify: Notify the state of the VM - * - * @running: whether the VM is running or not. - * @state: the #RunState of the VM. - */ -void vm_state_notify(bool running, RunState state); - -static inline bool shutdown_caused_by_guest(ShutdownCause cause) -{ - return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN; -} - -/* - * In a "live" state, the vcpu clock is ticking, and the runstate notifiers - * think we are running. - */ -static inline bool runstate_is_live(RunState state) -{ - return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED; -} - -void vm_start(void); - -/** - * vm_prepare_start: Prepare for starting/resuming the VM - * - * @step_pending: whether any of the CPUs is about to be single-stepped by gdb - */ -int vm_prepare_start(bool step_pending); - -/** - * vm_resume: If @state is a live state, start the vm and set the state, - * else just set the state. - * - * @state: the state to restore - */ -void vm_resume(RunState state); - -int vm_stop(RunState state); -int vm_stop_force_state(RunState state); -int vm_shutdown(void); -void vm_set_suspended(bool suspended); -bool vm_get_suspended(void); - -typedef enum WakeupReason { - /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */ - QEMU_WAKEUP_REASON_NONE = 0, - QEMU_WAKEUP_REASON_RTC, - QEMU_WAKEUP_REASON_PMTIMER, - QEMU_WAKEUP_REASON_OTHER, -} WakeupReason; - -void qemu_system_reset_request(ShutdownCause reason); -void qemu_system_suspend_request(void); -void qemu_register_suspend_notifier(Notifier *notifier); -bool qemu_wakeup_suspend_enabled(void); -void qemu_system_wakeup_request(WakeupReason reason, Error **errp); -void qemu_system_wakeup_enable(WakeupReason reason, bool enabled); -void qemu_register_wakeup_notifier(Notifier *notifier); -void qemu_register_wakeup_support(void); -void qemu_system_shutdown_request_with_code(ShutdownCause reason, - int exit_code); -void qemu_system_shutdown_request(ShutdownCause reason); -void qemu_system_powerdown_request(void); -void qemu_register_powerdown_notifier(Notifier *notifier); -void qemu_register_shutdown_notifier(Notifier *notifier); -void qemu_system_debug_request(void); -void qemu_system_vmstop_request(RunState reason); -void qemu_system_vmstop_request_prepare(void); -bool qemu_vmstop_requested(RunState *r); -ShutdownCause qemu_shutdown_requested_get(void); -ShutdownCause qemu_reset_requested_get(void); -void qemu_system_killed(int signal, pid_t pid); -void qemu_system_reset(ShutdownCause reason); -void qemu_system_guest_panicked(GuestPanicInformation *info); -void qemu_system_guest_crashloaded(GuestPanicInformation *info); -void qemu_system_guest_pvshutdown(void); -bool qemu_system_dump_in_progress(void); - -#endif - diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h deleted file mode 100644 index 5b4397eeb80..00000000000 --- a/include/sysemu/sysemu.h +++ /dev/null @@ -1,115 +0,0 @@ -#ifndef SYSEMU_H -#define SYSEMU_H -/* Misc. things related to the system emulator. */ - -#include "qemu/timer.h" -#include "qemu/notify.h" -#include "qemu/uuid.h" - -/* vl.c */ - -extern int only_migratable; -extern const char *qemu_name; -extern QemuUUID qemu_uuid; -extern bool qemu_uuid_set; - -const char *qemu_get_vm_name(void); - -void qemu_add_exit_notifier(Notifier *notify); -void qemu_remove_exit_notifier(Notifier *notify); - -void qemu_add_machine_init_done_notifier(Notifier *notify); -void qemu_remove_machine_init_done_notifier(Notifier *notify); - -void configure_rtc(QemuOpts *opts); - -void qemu_init_subsystems(void); - -extern int autostart; - -typedef enum { - VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL, - VGA_TCX, VGA_CG3, VGA_DEVICE, VGA_VIRTIO, - VGA_TYPE_MAX, -} VGAInterfaceType; - -extern int vga_interface_type; -extern bool vga_interface_created; - -extern int graphic_width; -extern int graphic_height; -extern int graphic_depth; -extern int display_opengl; -extern const char *keyboard_layout; -extern int graphic_rotate; -extern int old_param; -extern uint8_t *boot_splash_filedata; -extern bool enable_mlock; -extern bool enable_cpu_pm; -extern QEMUClockType rtc_clock; - -#define MAX_OPTION_ROMS 16 -typedef struct QEMUOptionRom { - const char *name; - int32_t bootindex; -} QEMUOptionRom; -extern QEMUOptionRom option_rom[MAX_OPTION_ROMS]; -extern int nb_option_roms; - -#define MAX_PROM_ENVS 128 -extern const char *prom_envs[MAX_PROM_ENVS]; -extern unsigned int nb_prom_envs; - -/* serial ports */ - -/* Return the Chardev for serial port i, or NULL if none */ -Chardev *serial_hd(int i); - -/* parallel ports */ - -#define MAX_PARALLEL_PORTS 3 - -extern Chardev *parallel_hds[MAX_PARALLEL_PORTS]; - -void add_boot_device_path(int32_t bootindex, DeviceState *dev, - const char *suffix); -char *get_boot_devices_list(size_t *size); - -DeviceState *get_boot_device(uint32_t position); -void check_boot_index(int32_t bootindex, Error **errp); -void del_boot_device_path(DeviceState *dev, const char *suffix); -void device_add_bootindex_property(Object *obj, int32_t *bootindex, - const char *name, const char *suffix, - DeviceState *dev); -void restore_boot_order(void *opaque); -void validate_bootdevices(const char *devices, Error **errp); -void add_boot_device_lchs(DeviceState *dev, const char *suffix, - uint32_t lcyls, uint32_t lheads, uint32_t lsecs); -void del_boot_device_lchs(DeviceState *dev, const char *suffix); -char *get_boot_devices_lchs_list(size_t *size); - -/* handler to set the boot_device order for a specific type of MachineClass */ -typedef void QEMUBootSetHandler(void *opaque, const char *boot_order, - Error **errp); -void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque); -void qemu_boot_set(const char *boot_order, Error **errp); - -bool defaults_enabled(void); - -void qemu_init(int argc, char **argv); -int qemu_main_loop(void); -void qemu_cleanup(int); - -extern QemuOptsList qemu_legacy_drive_opts; -extern QemuOptsList qemu_common_drive_opts; -extern QemuOptsList qemu_drive_opts; -extern QemuOptsList bdrv_runtime_opts; -extern QemuOptsList qemu_chardev_opts; -extern QemuOptsList qemu_device_opts; -extern QemuOptsList qemu_netdev_opts; -extern QemuOptsList qemu_nic_opts; -extern QemuOptsList qemu_net_opts; -extern QemuOptsList qemu_global_opts; -extern QemuOptsList qemu_semihosting_config_opts; - -#endif diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h deleted file mode 100644 index 5e2ca9aab3d..00000000000 --- a/include/sysemu/tcg.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * QEMU TCG support - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -/* header to be included in non-TCG-specific code */ - -#ifndef SYSEMU_TCG_H -#define SYSEMU_TCG_H - -#ifdef CONFIG_TCG -extern bool tcg_allowed; -#define tcg_enabled() (tcg_allowed) -#else -#define tcg_enabled() 0 -#endif - -#endif diff --git a/include/sysemu/whpx.h b/include/sysemu/whpx.h deleted file mode 100644 index 00ff409b682..00000000000 --- a/include/sysemu/whpx.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * QEMU Windows Hypervisor Platform accelerator (WHPX) support - * - * Copyright Microsoft, Corp. 2017 - * - * Authors: - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -/* header to be included in non-WHPX-specific code */ - -#ifndef QEMU_WHPX_H -#define QEMU_WHPX_H - -#ifdef COMPILING_PER_TARGET - -#ifdef CONFIG_WHPX - -int whpx_enabled(void); -bool whpx_apic_in_platform(void); - -#else /* CONFIG_WHPX */ - -#define whpx_enabled() (0) -#define whpx_apic_in_platform() (0) - -#endif /* CONFIG_WHPX */ - -#endif /* COMPILING_PER_TARGET */ - -#endif /* QEMU_WHPX_H */ diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h deleted file mode 100644 index b5e3ea1bc02..00000000000 --- a/include/sysemu/xen-mapcache.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#ifndef XEN_MAPCACHE_H -#define XEN_MAPCACHE_H - -#include "exec/cpu-common.h" -#include "sysemu/xen.h" - -typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset, - ram_addr_t size); -#ifdef CONFIG_XEN_IS_POSSIBLE - -void xen_map_cache_init(phys_offset_to_gaddr_t f, - void *opaque); -uint8_t *xen_map_cache(MemoryRegion *mr, hwaddr phys_addr, hwaddr size, - ram_addr_t ram_addr_offset, - uint8_t lock, bool dma, - bool is_write); -ram_addr_t xen_ram_addr_from_mapcache(void *ptr); -void xen_invalidate_map_cache_entry(uint8_t *buffer); -void xen_invalidate_map_cache(void); -uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, - hwaddr new_phys_addr, - hwaddr size); -#else - -static inline void xen_map_cache_init(phys_offset_to_gaddr_t f, - void *opaque) -{ -} - -static inline uint8_t *xen_map_cache(MemoryRegion *mr, - hwaddr phys_addr, - hwaddr size, - ram_addr_t ram_addr_offset, - uint8_t lock, - bool dma, - bool is_write) -{ - abort(); -} - -static inline ram_addr_t xen_ram_addr_from_mapcache(void *ptr) -{ - abort(); -} - -static inline void xen_invalidate_map_cache_entry(uint8_t *buffer) -{ -} - -static inline void xen_invalidate_map_cache(void) -{ -} - -static inline uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, - hwaddr new_phys_addr, - hwaddr size) -{ - abort(); -} - -#endif - -#endif /* XEN_MAPCACHE_H */ diff --git a/include/sysemu/xen.h b/include/sysemu/xen.h deleted file mode 100644 index d70eacfbe28..00000000000 --- a/include/sysemu/xen.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * QEMU Xen support - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -/* header to be included in non-Xen-specific code */ - -#ifndef SYSEMU_XEN_H -#define SYSEMU_XEN_H - -#ifdef CONFIG_USER_ONLY -#error Cannot include sysemu/xen.h from user emulation -#endif - -#include "exec/cpu-common.h" - -#ifdef COMPILING_PER_TARGET -# ifdef CONFIG_XEN -# define CONFIG_XEN_IS_POSSIBLE -# endif -#else -# define CONFIG_XEN_IS_POSSIBLE -#endif /* COMPILING_PER_TARGET */ - -#ifdef CONFIG_XEN_IS_POSSIBLE - -extern bool xen_allowed; - -#define xen_enabled() (xen_allowed) - -void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length); -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, - struct MemoryRegion *mr, Error **errp); - -#else /* !CONFIG_XEN_IS_POSSIBLE */ - -#define xen_enabled() 0 -static inline void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) -{ - /* nothing */ -} -static inline void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, - MemoryRegion *mr, Error **errp) -{ - g_assert_not_reached(); -} - -#endif /* CONFIG_XEN_IS_POSSIBLE */ - -bool xen_mr_is_memory(MemoryRegion *mr); -bool xen_mr_is_grants(MemoryRegion *mr); -#endif diff --git a/include/sysemu/accel-blocker.h b/include/system/accel-blocker.h similarity index 98% rename from include/sysemu/accel-blocker.h rename to include/system/accel-blocker.h index f07f3683585..e10099d6a9a 100644 --- a/include/sysemu/accel-blocker.h +++ b/include/system/accel-blocker.h @@ -14,7 +14,7 @@ #ifndef ACCEL_BLOCKER_H #define ACCEL_BLOCKER_H -#include "sysemu/cpus.h" +#include "system/cpus.h" void accel_blocker_init(void); diff --git a/include/exec/address-spaces.h b/include/system/address-spaces.h similarity index 89% rename from include/exec/address-spaces.h rename to include/system/address-spaces.h index 0d0aa61d689..72d17afb0ff 100644 --- a/include/exec/address-spaces.h +++ b/include/system/address-spaces.h @@ -11,16 +11,14 @@ * */ -#ifndef EXEC_ADDRESS_SPACES_H -#define EXEC_ADDRESS_SPACES_H +#ifndef SYSTEM_ADDRESS_SPACES_H +#define SYSTEM_ADDRESS_SPACES_H /* * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless * you're one of them. */ -#ifndef CONFIG_USER_ONLY - /* Get the root memory region. This interface should only be used temporarily * until a proper bus interface is available. */ @@ -35,5 +33,3 @@ extern AddressSpace address_space_memory; extern AddressSpace address_space_io; #endif - -#endif diff --git a/include/sysemu/arch_init.h b/include/system/arch_init.h similarity index 87% rename from include/sysemu/arch_init.h rename to include/system/arch_init.h index 8d041aa84eb..51e24c3091e 100644 --- a/include/sysemu/arch_init.h +++ b/include/system/arch_init.h @@ -6,7 +6,6 @@ enum { QEMU_ARCH_ALL = -1, QEMU_ARCH_ALPHA = (1 << 0), QEMU_ARCH_ARM = (1 << 1), - QEMU_ARCH_CRIS = (1 << 2), QEMU_ARCH_I386 = (1 << 3), QEMU_ARCH_M68K = (1 << 4), QEMU_ARCH_MICROBLAZE = (1 << 6), @@ -26,8 +25,6 @@ enum { QEMU_ARCH_LOONGARCH = (1 << 23), }; -extern const uint32_t arch_type; - -void qemu_init_arch_modules(void); +bool qemu_arch_available(unsigned qemu_arch_mask); #endif diff --git a/include/sysemu/balloon.h b/include/system/balloon.h similarity index 100% rename from include/sysemu/balloon.h rename to include/system/balloon.h diff --git a/include/sysemu/block-backend-common.h b/include/system/block-backend-common.h similarity index 100% rename from include/sysemu/block-backend-common.h rename to include/system/block-backend-common.h diff --git a/include/sysemu/block-backend-global-state.h b/include/system/block-backend-global-state.h similarity index 88% rename from include/sysemu/block-backend-global-state.h rename to include/system/block-backend-global-state.h index 49c12b0fa93..c3849640df9 100644 --- a/include/sysemu/block-backend-global-state.h +++ b/include/system/block-backend-global-state.h @@ -54,9 +54,8 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp); void monitor_remove_blk(BlockBackend *blk); BlockBackendPublic *blk_get_public(BlockBackend *blk); -BlockBackend *blk_by_public(BlockBackendPublic *public); -void blk_remove_bs(BlockBackend *blk); +void GRAPH_UNLOCKED blk_remove_bs(BlockBackend *blk); int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp); int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp); bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs); @@ -67,7 +66,6 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm); void blk_iostatus_enable(BlockBackend *blk); BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk); -void blk_iostatus_disable(BlockBackend *blk); void blk_iostatus_reset(BlockBackend *blk); int blk_attach_dev(BlockBackend *blk, DeviceState *dev); void blk_detach_dev(BlockBackend *blk, DeviceState *dev); @@ -76,24 +74,18 @@ BlockBackend *blk_by_dev(void *dev); BlockBackend *blk_by_qdev_id(const char *id, Error **errp); void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque); -void blk_activate(BlockBackend *blk, Error **errp); - int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags); void blk_aio_cancel(BlockAIOCB *acb); int blk_commit_all(void); bool blk_in_drain(BlockBackend *blk); -void blk_drain(BlockBackend *blk); -void blk_drain_all(void); +void GRAPH_UNLOCKED blk_drain(BlockBackend *blk); +void GRAPH_UNLOCKED blk_drain_all(void); void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, BlockdevOnError on_write_error); bool blk_supports_write_perm(BlockBackend *blk); bool blk_is_sg(BlockBackend *blk); void blk_set_enable_write_cache(BlockBackend *blk, bool wce); int blk_get_flags(BlockBackend *blk); -bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp); -void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason); -void blk_op_block_all(BlockBackend *blk, Error *reason); -void blk_op_unblock_all(BlockBackend *blk, Error *reason); int blk_set_aio_context(BlockBackend *blk, AioContext *new_context, Error **errp); void blk_add_aio_context_notifier(BlockBackend *blk, @@ -105,7 +97,6 @@ void blk_remove_aio_context_notifier(BlockBackend *blk, void (*detach_aio_context)(void *), void *opaque); void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify); -void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify); BlockBackendRootState *blk_get_root_state(BlockBackend *blk); void blk_update_root_state(BlockBackend *blk); bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk); @@ -118,7 +109,7 @@ int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz); int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo); void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg); -void blk_io_limits_disable(BlockBackend *blk); +void GRAPH_UNLOCKED blk_io_limits_disable(BlockBackend *blk); void blk_io_limits_enable(BlockBackend *blk, const char *group); void blk_io_limits_update_group(BlockBackend *blk, const char *group); void blk_set_force_allow_inactivate(BlockBackend *blk); diff --git a/include/sysemu/block-backend-io.h b/include/system/block-backend-io.h similarity index 98% rename from include/sysemu/block-backend-io.h rename to include/system/block-backend-io.h index d174275a5cb..ba8dfcc7d04 100644 --- a/include/sysemu/block-backend-io.h +++ b/include/system/block-backend-io.h @@ -32,6 +32,13 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow); void blk_set_disable_request_queuing(BlockBackend *blk, bool disable); bool blk_iostatus_is_enabled(const BlockBackend *blk); +/* + * Return the qdev ID, or if no ID is assigned the QOM path, + * of the block device attached to the BlockBackend. + * + * The caller is responsible for releasing the value returned + * with g_free() after use. + */ char *blk_get_attached_dev_id(BlockBackend *blk); BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, diff --git a/include/sysemu/block-backend.h b/include/system/block-backend.h similarity index 100% rename from include/sysemu/block-backend.h rename to include/system/block-backend.h diff --git a/include/sysemu/block-ram-registrar.h b/include/system/block-ram-registrar.h similarity index 100% rename from include/sysemu/block-ram-registrar.h rename to include/system/block-ram-registrar.h diff --git a/include/sysemu/blockdev.h b/include/system/blockdev.h similarity index 100% rename from include/sysemu/blockdev.h rename to include/system/blockdev.h diff --git a/include/system/confidential-guest-support.h b/include/system/confidential-guest-support.h new file mode 100644 index 00000000000..0cc8b26e644 --- /dev/null +++ b/include/system/confidential-guest-support.h @@ -0,0 +1,183 @@ +/* + * QEMU Confidential Guest support + * This interface describes the common pieces between various + * schemes for protecting guest memory or other state against a + * compromised hypervisor. This includes memory encryption (AMD's + * SEV and Intel's MKTME) or special protection modes (PEF on POWER, + * or PV on s390x). + * + * Copyright Red Hat. + * + * Authors: + * David Gibson + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + */ +#ifndef QEMU_CONFIDENTIAL_GUEST_SUPPORT_H +#define QEMU_CONFIDENTIAL_GUEST_SUPPORT_H + +#include "qom/object.h" +#include "exec/hwaddr.h" + +#define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support" +OBJECT_DECLARE_TYPE(ConfidentialGuestSupport, + ConfidentialGuestSupportClass, + CONFIDENTIAL_GUEST_SUPPORT) + + +typedef enum ConfidentialGuestPlatformType { + CGS_PLATFORM_SEV, + CGS_PLATFORM_SEV_ES, + CGS_PLATFORM_SEV_SNP, +} ConfidentialGuestPlatformType; + +typedef enum ConfidentialGuestMemoryType { + CGS_MEM_RAM, + CGS_MEM_RESERVED, + CGS_MEM_ACPI, + CGS_MEM_NVS, + CGS_MEM_UNUSABLE, +} ConfidentialGuestMemoryType; + +typedef struct ConfidentialGuestMemoryMapEntry { + uint64_t gpa; + uint64_t size; + ConfidentialGuestMemoryType type; +} ConfidentialGuestMemoryMapEntry; + +typedef enum ConfidentialGuestPageType { + CGS_PAGE_TYPE_NORMAL, + CGS_PAGE_TYPE_VMSA, + CGS_PAGE_TYPE_ZERO, + CGS_PAGE_TYPE_UNMEASURED, + CGS_PAGE_TYPE_SECRETS, + CGS_PAGE_TYPE_CPUID, + CGS_PAGE_TYPE_REQUIRED_MEMORY, +} ConfidentialGuestPageType; + +typedef enum ConfidentialGuestPolicyType { + GUEST_POLICY_SEV, +} ConfidentialGuestPolicyType; + +struct ConfidentialGuestSupport { + Object parent; + + /* + * True if the machine should use guest_memfd for RAM. + */ + bool require_guest_memfd; + + /* + * ready: flag set by CGS initialization code once it's ready to + * start executing instructions in a potentially-secure + * guest + * + * The definition here is a bit fuzzy, because this is essentially + * part of a self-sanity-check, rather than a strict mechanism. + * + * It's not feasible to have a single point in the common machine + * init path to configure confidential guest support, because + * different mechanisms have different interdependencies requiring + * initialization in different places, often in arch or machine + * type specific code. It's also usually not possible to check + * for invalid configurations until that initialization code. + * That means it would be very easy to have a bug allowing CGS + * init to be bypassed entirely in certain configurations. + * + * Silently ignoring a requested security feature would be bad, so + * to avoid that we check late in init that this 'ready' flag is + * set if CGS was requested. If the CGS init hasn't happened, and + * so 'ready' is not set, we'll abort. + */ + bool ready; +}; + +typedef struct ConfidentialGuestSupportClass { + ObjectClass parent; + + int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); + int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp); + + /* + * Check to see if this confidential guest supports a particular + * platform or configuration. + * + * Return true if supported or false if not supported. + */ + bool (*check_support)(ConfidentialGuestPlatformType platform, + uint16_t platform_version, uint8_t highest_vtl, + uint64_t shared_gpa_boundary); + + /* + * Configure part of the state of a guest for a particular set of data, page + * type and gpa. This can be used for example to pre-populate and measure + * guest memory contents, define private ranges or set the initial CPU state + * for one or more CPUs. + * + * If memory_type is CGS_PAGE_TYPE_VMSA then ptr points to the initial CPU + * context for a virtual CPU. The format of the data depends on the type of + * confidential virtual machine. For example, for SEV-ES ptr will point to a + * vmcb_save_area structure that should be copied into guest memory at the + * address specified in gpa. The cpu_index parameter contains the index of + * the CPU the VMSA applies to. + */ + int (*set_guest_state)(hwaddr gpa, uint8_t *ptr, uint64_t len, + ConfidentialGuestPageType memory_type, + uint16_t cpu_index, Error **errp); + + /* + * Set the guest policy. The policy can be used to configure the + * confidential platform, such as if debug is enabled or not and can contain + * information about expected launch measurements, signed verification of + * guest configuration and other platform data. + * + * The format of the policy data is specific to each platform. For example, + * SEV-SNP uses a policy bitfield in the 'policy' argument and provides an + * ID block and ID authentication in the 'policy_data' parameters. The type + * of policy data is identified by the 'policy_type' argument. + */ + int (*set_guest_policy)(ConfidentialGuestPolicyType policy_type, + uint64_t policy, + void *policy_data1, uint32_t policy_data1_size, + void *policy_data2, uint32_t policy_data2_size, + Error **errp); + + /* + * Iterate the system memory map, getting the entry with the given index + * that can be populated into guest memory. + * + * Returns 0 for ok, 1 if the index is out of range and -1 on error. + */ + int (*get_mem_map_entry)(int index, ConfidentialGuestMemoryMapEntry *entry, + Error **errp); +} ConfidentialGuestSupportClass; + +static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs, + Error **errp) +{ + ConfidentialGuestSupportClass *klass; + + klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); + if (klass->kvm_init) { + return klass->kvm_init(cgs, errp); + } + + return 0; +} + +static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs, + Error **errp) +{ + ConfidentialGuestSupportClass *klass; + + klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); + if (klass->kvm_reset) { + return klass->kvm_reset(cgs, errp); + } + + return 0; +} + +#endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */ diff --git a/include/sysemu/cpu-throttle.h b/include/system/cpu-throttle.h similarity index 83% rename from include/sysemu/cpu-throttle.h rename to include/system/cpu-throttle.h index d65bdef6d03..44bf6a53893 100644 --- a/include/sysemu/cpu-throttle.h +++ b/include/system/cpu-throttle.h @@ -16,8 +16,8 @@ * */ -#ifndef SYSEMU_CPU_THROTTLE_H -#define SYSEMU_CPU_THROTTLE_H +#ifndef SYSTEM_CPU_THROTTLE_H +#define SYSTEM_CPU_THROTTLE_H #include "qemu/timer.h" @@ -65,4 +65,18 @@ bool cpu_throttle_active(void); */ int cpu_throttle_get_percentage(void); -#endif /* SYSEMU_CPU_THROTTLE_H */ +/** + * cpu_throttle_dirty_sync_timer_tick: + * + * Dirty sync timer hook. + */ +void cpu_throttle_dirty_sync_timer_tick(void *opaque); + +/** + * cpu_throttle_dirty_sync_timer: + * + * Start or stop the dirty sync timer. + */ +void cpu_throttle_dirty_sync_timer(bool enable); + +#endif /* SYSTEM_CPU_THROTTLE_H */ diff --git a/include/sysemu/cpu-timers-internal.h b/include/system/cpu-timers-internal.h similarity index 100% rename from include/sysemu/cpu-timers-internal.h rename to include/system/cpu-timers-internal.h diff --git a/include/system/cpu-timers.h b/include/system/cpu-timers.h new file mode 100644 index 00000000000..a1abed0d7a0 --- /dev/null +++ b/include/system/cpu-timers.h @@ -0,0 +1,46 @@ +/* + * CPU timers state API + * + * Copyright 2020 SUSE LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ +#ifndef SYSTEM_CPU_TIMERS_H +#define SYSTEM_CPU_TIMERS_H + +#include "qemu/timer.h" + +/* init the whole cpu timers API, including icount, ticks, and cpu_throttle */ +void cpu_timers_init(void); + +/* + * CPU Ticks and Clock + */ + +/* Caller must hold BQL */ +void cpu_enable_ticks(void); +/* Caller must hold BQL */ +void cpu_disable_ticks(void); + +/* + * return the time elapsed in VM between vm_start and vm_stop. + * cpu_get_ticks() uses units of the host CPU cycle counter. + */ +int64_t cpu_get_ticks(void); + +/* + * Returns the monotonic time elapsed in VM, i.e., + * the time between vm_start and vm_stop + */ +int64_t cpu_get_clock(void); + +void qemu_timer_notify_cb(void *opaque, QEMUClockType type); + +/* get/set VIRTUAL clock and VM elapsed ticks via the cpus accel interface */ +int64_t cpus_get_virtual_clock(void); +void cpus_set_virtual_clock(int64_t new_time); +int64_t cpus_get_elapsed_ticks(void); + +#endif /* SYSTEM_CPU_TIMERS_H */ diff --git a/include/sysemu/cpus.h b/include/system/cpus.h similarity index 86% rename from include/sysemu/cpus.h rename to include/system/cpus.h index b4a566cfe75..69be6a77a75 100644 --- a/include/sysemu/cpus.h +++ b/include/system/cpus.h @@ -1,19 +1,12 @@ #ifndef QEMU_CPUS_H #define QEMU_CPUS_H -#include "sysemu/accel-ops.h" - /* register accel-specific operations */ void cpus_register_accel(const AccelOpsClass *i); /* return registers ops */ const AccelOpsClass *cpus_get_accel(void); -/* accel/dummy-cpus.c */ - -/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */ -void dummy_start_vcpu_thread(CPUState *); - /* interface available for cpus accelerator threads */ /* For temporary buffers for forming a name */ @@ -38,8 +31,6 @@ void resume_all_vcpus(void); void pause_all_vcpus(void); void cpu_stop_current(void); -extern int icount_align_option; - /* Unblock cpu */ void qemu_cpu_kick_self(void); diff --git a/include/sysemu/cryptodev-vhost-user.h b/include/system/cryptodev-vhost-user.h similarity index 97% rename from include/sysemu/cryptodev-vhost-user.h rename to include/system/cryptodev-vhost-user.h index 60710502c2d..5138c146fa2 100644 --- a/include/sysemu/cryptodev-vhost-user.h +++ b/include/system/cryptodev-vhost-user.h @@ -24,7 +24,7 @@ #ifndef CRYPTODEV_VHOST_USER_H #define CRYPTODEV_VHOST_USER_H -#include "sysemu/cryptodev-vhost.h" +#include "system/cryptodev-vhost.h" #define VHOST_USER_MAX_AUTH_KEY_LEN 512 #define VHOST_USER_MAX_CIPHER_KEY_LEN 64 diff --git a/include/sysemu/cryptodev-vhost.h b/include/system/cryptodev-vhost.h similarity index 99% rename from include/sysemu/cryptodev-vhost.h rename to include/system/cryptodev-vhost.h index 4c3c22acae5..b0bb09e70a3 100644 --- a/include/sysemu/cryptodev-vhost.h +++ b/include/system/cryptodev-vhost.h @@ -28,7 +28,7 @@ #include "hw/virtio/vhost-backend.h" #include "chardev/char.h" -#include "sysemu/cryptodev.h" +#include "system/cryptodev.h" typedef struct CryptoDevBackendVhostOptions { diff --git a/include/sysemu/cryptodev.h b/include/system/cryptodev.h similarity index 99% rename from include/sysemu/cryptodev.h rename to include/system/cryptodev.h index 96d3998b935..b20822df0dc 100644 --- a/include/sysemu/cryptodev.h +++ b/include/system/cryptodev.h @@ -178,7 +178,7 @@ typedef struct CryptoDevBackendAsymOpInfo { typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret); typedef struct CryptoDevBackendOpInfo { - QCryptodevBackendAlgType algtype; + QCryptodevBackendAlgoType algtype; uint32_t op_code; uint32_t queue_index; CryptoDevCompletionFunc cb; diff --git a/include/sysemu/device_tree.h b/include/system/device_tree.h similarity index 99% rename from include/sysemu/device_tree.h rename to include/system/device_tree.h index eb601522f88..49d8482ed4e 100644 --- a/include/sysemu/device_tree.h +++ b/include/system/device_tree.h @@ -133,8 +133,6 @@ int qemu_fdt_add_path(void *fdt, const char *path); sizeof(qdt_tmp)); \ } while (0) -void qemu_fdt_dumpdtb(void *fdt, int size); - /** * qemu_fdt_setprop_sized_cells_from_array: * @fdt: device tree blob diff --git a/include/sysemu/dirtylimit.h b/include/system/dirtylimit.h similarity index 100% rename from include/sysemu/dirtylimit.h rename to include/system/dirtylimit.h diff --git a/include/sysemu/dirtyrate.h b/include/system/dirtyrate.h similarity index 100% rename from include/sysemu/dirtyrate.h rename to include/system/dirtyrate.h diff --git a/include/sysemu/dma.h b/include/system/dma.h similarity index 95% rename from include/sysemu/dma.h rename to include/system/dma.h index a1ac5bc1b54..82e7ad54374 100644 --- a/include/sysemu/dma.h +++ b/include/system/dma.h @@ -10,8 +10,8 @@ #ifndef DMA_H #define DMA_H -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/memory.h" +#include "system/address-spaces.h" #include "block/block.h" #include "block/accounting.h" @@ -152,7 +152,7 @@ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, } /** - * address_space_write: Write to address space from DMA controller. + * dma_memory_write: Write to address space from DMA controller. * * Return a MemTxResult indicating whether the operation succeeded * or failed (eg unassigned memory, device rejected the transaction, @@ -189,7 +189,7 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len, MemTxAttrs attrs); /** - * address_space_map: Map a physical memory region into a host virtual address. + * dma_memory_map: Map a physical memory region into a host virtual address. * * May map a subset of the requested range, given by and returned in @plen. * May return %NULL and set *@plen to zero(0), if resources needed to perform @@ -216,16 +216,15 @@ static inline void *dma_memory_map(AddressSpace *as, } /** - * address_space_unmap: Unmaps a memory region previously mapped - * by dma_memory_map() + * dma_memory_unmap: Unmaps a memory region previously mapped by dma_memory_map() * * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE. * @access_len gives the amount of memory that was actually read or written * by the caller. * * @as: #AddressSpace used - * @buffer: host pointer as returned by address_space_map() - * @len: buffer length as returned by address_space_map() + * @buffer: host pointer as returned by dma_memory_map() + * @len: buffer length as returned by dma_memory_map() * @dir: indicates the transfer direction * @access_len: amount of data actually transferred */ @@ -291,8 +290,7 @@ typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov, BlockCompletionFunc *cb, void *cb_opaque, void *opaque); -BlockAIOCB *dma_blk_io(AioContext *ctx, - QEMUSGList *sg, uint64_t offset, uint32_t align, +BlockAIOCB *dma_blk_io(QEMUSGList *sg, uint64_t offset, uint32_t align, DMAIOFunc *io_func, void *io_func_opaque, BlockCompletionFunc *cb, void *opaque, DMADirection dir); BlockAIOCB *dma_blk_read(BlockBackend *blk, diff --git a/include/sysemu/dump-arch.h b/include/system/dump-arch.h similarity index 100% rename from include/sysemu/dump-arch.h rename to include/system/dump-arch.h diff --git a/include/sysemu/dump.h b/include/system/dump.h similarity index 99% rename from include/sysemu/dump.h rename to include/system/dump.h index d702854853f..607bd7b220e 100644 --- a/include/sysemu/dump.h +++ b/include/system/dump.h @@ -39,8 +39,8 @@ #define DUMP_LEVEL (1) #define DISKDUMP_HEADER_BLOCKS (1) -#include "sysemu/dump-arch.h" -#include "sysemu/memory_mapping.h" +#include "system/dump-arch.h" +#include "system/memory_mapping.h" typedef struct QEMU_PACKED MakedumpfileHeader { char signature[16]; /* = "makedumpfile" */ diff --git a/include/sysemu/event-loop-base.h b/include/system/event-loop-base.h similarity index 100% rename from include/sysemu/event-loop-base.h rename to include/system/event-loop-base.h diff --git a/include/sysemu/host_iommu_device.h b/include/system/host_iommu_device.h similarity index 75% rename from include/sysemu/host_iommu_device.h rename to include/system/host_iommu_device.h index 809cced4ba5..3773c549779 100644 --- a/include/sysemu/host_iommu_device.h +++ b/include/system/host_iommu_device.h @@ -14,6 +14,13 @@ #include "qom/object.h" #include "qapi/error.h" +#ifdef CONFIG_LINUX +#include "linux/iommufd.h" + +typedef union VendorCaps { + struct iommu_hw_info_vtd vtd; + struct iommu_hw_info_arm_smmuv3 smmuv3; +} VendorCaps; /** * struct HostIOMMUDeviceCaps - Define host IOMMU device capabilities. @@ -22,11 +29,19 @@ * * @hw_caps: host platform IOMMU capabilities (e.g. on IOMMUFD this represents * the @out_capabilities value returned from IOMMU_GET_HW_INFO ioctl) + * @max_pasid_log2: width of PASIDs supported by host IOMMU device + * + * @vendor_caps: host platform IOMMU vendor specific capabilities (e.g. on + * IOMMUFD this represents a user-space buffer filled by kernel + * with host IOMMU @type specific hardware information data) */ typedef struct HostIOMMUDeviceCaps { uint32_t type; uint64_t hw_caps; + uint8_t max_pasid_log2; + VendorCaps vendor_caps; } HostIOMMUDeviceCaps; +#endif #define TYPE_HOST_IOMMU_DEVICE "host-iommu-device" OBJECT_DECLARE_TYPE(HostIOMMUDevice, HostIOMMUDeviceClass, HOST_IOMMU_DEVICE) @@ -38,7 +53,9 @@ struct HostIOMMUDevice { void *agent; /* pointer to agent device, ie. VFIO or VDPA device */ PCIBus *aliased_bus; int aliased_devfn; +#ifdef CONFIG_LINUX HostIOMMUDeviceCaps caps; +#endif }; /** @@ -98,6 +115,18 @@ struct HostIOMMUDeviceClass { * @hiod: handle to the host IOMMU device */ uint64_t (*get_page_size_mask)(HostIOMMUDevice *hiod); + /** + * @get_pasid: Get PASID support information along this + * @hiod Host IOMMU device + * Optional callback. If not implemented, PASID not supported + * + * @hiod: handle to the host IOMMU device + * + * @out_hw_caps: Output the generic iommu capability info which includes + * device PASID CAP info + * Returns the width of PASIDs. Zero means no PASID support + */ + uint8_t (*get_pasid)(HostIOMMUDevice *hiod, uint64_t *out_hw_caps); }; /* diff --git a/include/sysemu/hostmem.h b/include/system/hostmem.h similarity index 91% rename from include/sysemu/hostmem.h rename to include/system/hostmem.h index de47ae59e4b..88fa791ac78 100644 --- a/include/sysemu/hostmem.h +++ b/include/system/hostmem.h @@ -10,13 +10,13 @@ * See the COPYING file in the top-level directory. */ -#ifndef SYSEMU_HOSTMEM_H -#define SYSEMU_HOSTMEM_H +#ifndef SYSTEM_HOSTMEM_H +#define SYSTEM_HOSTMEM_H -#include "sysemu/numa.h" +#include "system/numa.h" #include "qapi/qapi-types-machine.h" #include "qom/object.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/bitmap.h" #include "qemu/thread-context.h" @@ -39,6 +39,8 @@ OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass, */ #define TYPE_MEMORY_BACKEND_FILE "memory-backend-file" +#define TYPE_MEMORY_BACKEND_MEMFD "memory-backend-memfd" + /** * HostMemoryBackendClass: @@ -91,4 +93,7 @@ bool host_memory_backend_is_mapped(HostMemoryBackend *backend); size_t host_memory_backend_pagesize(HostMemoryBackend *memdev); char *host_memory_backend_get_name(HostMemoryBackend *backend); +long qemu_minrampagesize(void); +long qemu_maxrampagesize(void); + #endif diff --git a/include/system/hvf.h b/include/system/hvf.h new file mode 100644 index 00000000000..d3dcf088b3f --- /dev/null +++ b/include/system/hvf.h @@ -0,0 +1,39 @@ +/* + * QEMU Hypervisor.framework (HVF) support + * + * Copyright Google Inc., 2017 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* header to be included in non-HVF-specific code */ + +#ifndef HVF_H +#define HVF_H + +#include "qemu/accel.h" + +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_HVF +# define CONFIG_HVF_IS_POSSIBLE +# endif /* !CONFIG_HVF */ +#else +# define CONFIG_HVF_IS_POSSIBLE +#endif /* COMPILING_PER_TARGET */ + +#ifdef CONFIG_HVF_IS_POSSIBLE +extern bool hvf_allowed; +#define hvf_enabled() (hvf_allowed) +#else /* !CONFIG_HVF_IS_POSSIBLE */ +#define hvf_enabled() 0 +#endif /* !CONFIG_HVF_IS_POSSIBLE */ + +#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf") + +typedef struct HVFState HVFState; +DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE, + TYPE_HVF_ACCEL) + +#endif diff --git a/include/system/hvf_int.h b/include/system/hvf_int.h new file mode 100644 index 00000000000..a3b06a3e75b --- /dev/null +++ b/include/system/hvf_int.h @@ -0,0 +1,114 @@ +/* + * QEMU Hypervisor.framework (HVF) support + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* header to be included in HVF-specific code */ + +#ifndef HVF_INT_H +#define HVF_INT_H + +#include "qemu/queue.h" +#include "exec/vaddr.h" +#include "qom/object.h" +#include "accel/accel-ops.h" + +#ifdef __aarch64__ +#include +typedef hv_vcpu_t hvf_vcpuid; +#else +#include +typedef hv_vcpuid_t hvf_vcpuid; +#endif + +/* hvf_slot flags */ +#define HVF_SLOT_LOG (1 << 0) + +typedef struct hvf_slot { + uint64_t start; + uint64_t size; + uint8_t *mem; + int slot_id; + uint32_t flags; + MemoryRegion *region; +} hvf_slot; + +typedef struct hvf_vcpu_caps { + uint64_t vmx_cap_pinbased; + uint64_t vmx_cap_procbased; + uint64_t vmx_cap_procbased2; + uint64_t vmx_cap_entry; + uint64_t vmx_cap_exit; + uint64_t vmx_cap_preemption_timer; +} hvf_vcpu_caps; + +struct HVFState { + AccelState parent_obj; + + hvf_slot slots[32]; + int num_slots; + + hvf_vcpu_caps *hvf_caps; + uint64_t vtimer_offset; + QTAILQ_HEAD(, hvf_sw_breakpoint) hvf_sw_breakpoints; +}; +extern HVFState *hvf_state; + +struct AccelCPUState { + hvf_vcpuid fd; + void *exit; + bool vtimer_masked; + sigset_t unblock_ipi_mask; + bool guest_debug_enabled; +}; + +void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line, + const char *exp); +#define assert_hvf_ok(EX) assert_hvf_ok_impl((EX), __FILE__, __LINE__, #EX) +const char *hvf_return_string(hv_return_t ret); +int hvf_arch_init(void); +hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range); +int hvf_arch_init_vcpu(CPUState *cpu); +void hvf_arch_vcpu_destroy(CPUState *cpu); +int hvf_vcpu_exec(CPUState *); +hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t); +int hvf_put_registers(CPUState *); +int hvf_get_registers(CPUState *); +void hvf_kick_vcpu_thread(CPUState *cpu); + +struct hvf_sw_breakpoint { + vaddr pc; + vaddr saved_insn; + int use_count; + QTAILQ_ENTRY(hvf_sw_breakpoint) entry; +}; + +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, + vaddr pc); +int hvf_sw_breakpoints_active(CPUState *cpu); + +int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); +int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); +int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); +int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); +void hvf_arch_remove_all_hw_breakpoints(void); + +/* + * hvf_update_guest_debug: + * @cs: CPUState for the CPU to update + * + * Update guest to enable or disable debugging. Per-arch specifics will be + * handled by calling down to hvf_arch_update_guest_debug. + */ +int hvf_update_guest_debug(CPUState *cpu); +void hvf_arch_update_guest_debug(CPUState *cpu); + +/* + * Return whether the guest supports debugging. + */ +bool hvf_arch_supports_guest_debug(void); + +#endif diff --git a/include/system/hw_accel.h b/include/system/hw_accel.h new file mode 100644 index 00000000000..fa9228d5d2d --- /dev/null +++ b/include/system/hw_accel.h @@ -0,0 +1,42 @@ +/* + * QEMU Hardware accelerators support + * + * Copyright 2016 Google, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_HW_ACCEL_H +#define QEMU_HW_ACCEL_H + +#include "hw/core/cpu.h" +#include "system/kvm.h" +#include "system/hvf.h" +#include "system/whpx.h" +#include "system/nvmm.h" + +/** + * cpu_synchronize_state: + * cpu_synchronize_pre_loadvm: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers from the hardware accelerator + * (the hardware accelerator is the reference). + */ +void cpu_synchronize_state(CPUState *cpu); +void cpu_synchronize_pre_loadvm(CPUState *cpu); + +/** + * cpu_synchronize_post_reset: + * cpu_synchronize_post_init: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers to the hardware accelerator + * (QEMU is the reference). + */ +void cpu_synchronize_post_reset(CPUState *cpu); +void cpu_synchronize_post_init(CPUState *cpu); + +#endif /* QEMU_HW_ACCEL_H */ diff --git a/include/system/igvm-cfg.h b/include/system/igvm-cfg.h new file mode 100644 index 00000000000..944f23a814d --- /dev/null +++ b/include/system/igvm-cfg.h @@ -0,0 +1,49 @@ +/* + * QEMU IGVM interface + * + * Copyright (C) 2024 SUSE + * + * Authors: + * Roy Hopkins + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_IGVM_CFG_H +#define QEMU_IGVM_CFG_H + +#include "qom/object.h" + +typedef struct IgvmCfg { + ObjectClass parent_class; + + /* + * filename: Filename that specifies a file that contains the configuration + * of the guest in Independent Guest Virtual Machine (IGVM) + * format. + */ + char *filename; +} IgvmCfg; + +typedef struct IgvmCfgClass { + ObjectClass parent_class; + + /* + * If an IGVM filename has been specified then process the IGVM file. + * Performs a no-op if no filename has been specified. + * If onlyVpContext is true then only the IGVM_VHT_VP_CONTEXT entries + * in the IGVM file will be processed, allowing information about the + * CPU state to be determined before processing the entire file. + * + * Returns 0 for ok and -1 on error. + */ + int (*process)(IgvmCfg *cfg, ConfidentialGuestSupport *cgs, + bool onlyVpContext, Error **errp); + +} IgvmCfgClass; + +#define TYPE_IGVM_CFG "igvm-cfg" + +OBJECT_DECLARE_TYPE(IgvmCfg, IgvmCfgClass, IGVM_CFG) + +#endif diff --git a/include/system/iommufd.h b/include/system/iommufd.h new file mode 100644 index 00000000000..e5419dfd57e --- /dev/null +++ b/include/system/iommufd.h @@ -0,0 +1,179 @@ +/* + * iommufd container backend declaration + * + * Copyright (C) 2024 Intel Corporation. + * Copyright Red Hat, Inc. 2024 + * + * Authors: Yi Liu + * Eric Auger + * Zhenzhong Duan + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SYSTEM_IOMMUFD_H +#define SYSTEM_IOMMUFD_H + +#include "qom/object.h" +#include "exec/hwaddr.h" +#include "exec/cpu-common.h" +#include "system/host_iommu_device.h" + +#define TYPE_IOMMUFD_BACKEND "iommufd" +OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND) + +struct IOMMUFDBackendClass { + ObjectClass parent_class; +}; + +struct IOMMUFDBackend { + Object parent; + + /*< protected >*/ + int fd; /* /dev/iommu file descriptor */ + bool owned; /* is the /dev/iommu opened internally */ + Error *cpr_blocker;/* set if be does not support CPR */ + uint32_t users; + + /*< public >*/ +}; + +/* + * Virtual IOMMU object that respresents physical IOMMU's virtualization + * support + */ +typedef struct IOMMUFDViommu { + IOMMUFDBackend *iommufd; + uint32_t s2_hwpt_id; /* Id of stage 2 HWPT */ + uint32_t viommu_id; /* virtual IOMMU ID of allocated object */ +} IOMMUFDViommu; + +/* + * Virtual device object for a physical device bind to a vIOMMU. + */ +typedef struct IOMMUFDVdev { + uint32_t vdev_id; /* Virtual device ID */ + uint32_t dev_id; /* Physical device ID */ +} IOMMUFDVdev; + +typedef struct IOMMUFDVeventq { + IOMMUFDViommu *viommu; + uint32_t veventq_id; + uint32_t veventq_fd; +} IOMMUFDVeventq; + +typedef struct IOMMUFDHWqueue { + IOMMUFDViommu *viommu; + uint32_t hw_queue_id; +} IOMMUFDHWqueue; + +bool iommufd_backend_connect(IOMMUFDBackend *be, Error **errp); +void iommufd_backend_disconnect(IOMMUFDBackend *be); + +bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, + Error **errp); +void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id); +int iommufd_backend_map_file_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size, int fd, + unsigned long start, bool readonly); +int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly); +int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size); +bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid, + uint32_t *type, void *data, uint32_t len, + uint64_t *caps, uint8_t *pasid_log2, + Error **errp); +bool iommufd_backend_alloc_hwpt(IOMMUFDBackend *be, uint32_t dev_id, + uint32_t pt_id, uint32_t flags, + uint32_t data_type, uint32_t data_len, + void *data_ptr, uint32_t *out_hwpt, + Error **errp); +bool iommufd_backend_alloc_viommu(IOMMUFDBackend *be, uint32_t dev_id, + uint32_t viommu_type, uint32_t hwpt_id, + void *data_ptr, uint32_t len, + uint32_t *out_hwpt, Error **errp); + +bool iommufd_backend_alloc_vdev(IOMMUFDBackend *be, uint32_t dev_id, + uint32_t viommu_id, uint64_t virt_id, + uint32_t *out_vdev_id, Error **errp); + +bool iommufd_backend_alloc_veventq(IOMMUFDBackend *be, uint32_t viommu_id, + uint32_t type, uint32_t depth, + uint32_t *out_veventq_id, + uint32_t *out_veventq_fd, Error **errp); + +bool iommufd_backend_alloc_hw_queue(IOMMUFDBackend *be, uint32_t viommu_id, + uint32_t data_type, uint32_t index, + uint64_t addr, uint64_t length, + uint32_t *out_hw_queue_id, Error **errp); + +bool iommufd_backend_viommu_mmap(IOMMUFDBackend *be, uint32_t viommu_id, + uint64_t size, off_t offset, void **ptr); + +bool iommufd_backend_set_dirty_tracking(IOMMUFDBackend *be, uint32_t hwpt_id, + bool start, Error **errp); +bool iommufd_backend_get_dirty_bitmap(IOMMUFDBackend *be, uint32_t hwpt_id, + uint64_t iova, ram_addr_t size, + uint64_t page_size, uint64_t *data, + Error **errp); +bool iommufd_backend_invalidate_cache(IOMMUFDBackend *be, uint32_t id, + uint32_t data_type, uint32_t entry_len, + uint32_t *entry_num, void *data, + Error **errp); + +bool iommufd_change_process_capable(IOMMUFDBackend *be); +bool iommufd_change_process(IOMMUFDBackend *be, Error **errp); + +#define TYPE_HOST_IOMMU_DEVICE_IOMMUFD TYPE_HOST_IOMMU_DEVICE "-iommufd" +OBJECT_DECLARE_TYPE(HostIOMMUDeviceIOMMUFD, HostIOMMUDeviceIOMMUFDClass, + HOST_IOMMU_DEVICE_IOMMUFD) + +/* Overload of the host IOMMU device for the iommufd backend */ +struct HostIOMMUDeviceIOMMUFD { + HostIOMMUDevice parent_obj; + + IOMMUFDBackend *iommufd; + uint32_t devid; + uint32_t hwpt_id; +}; + +struct HostIOMMUDeviceIOMMUFDClass { + HostIOMMUDeviceClass parent_class; + + /** + * @attach_hwpt: attach host IOMMU device to IOMMUFD hardware page table. + * VFIO and VDPA device can have different implementation. + * + * Mandatory callback. + * + * @idev: host IOMMU device backed by IOMMUFD backend. + * + * @hwpt_id: ID of IOMMUFD hardware page table. + * + * @errp: pass an Error out when attachment fails. + * + * Returns: true on success, false on failure. + */ + bool (*attach_hwpt)(HostIOMMUDeviceIOMMUFD *idev, uint32_t hwpt_id, + Error **errp); + /** + * @detach_hwpt: detach host IOMMU device from IOMMUFD hardware page table. + * VFIO and VDPA device can have different implementation. + * + * Mandatory callback. + * + * @idev: host IOMMU device backed by IOMMUFD backend. + * + * @errp: pass an Error out when attachment fails. + * + * Returns: true on success, false on failure. + */ + bool (*detach_hwpt)(HostIOMMUDeviceIOMMUFD *idev, Error **errp); +}; + +bool host_iommu_device_iommufd_attach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + uint32_t hwpt_id, Error **errp); +bool host_iommu_device_iommufd_detach_hwpt(HostIOMMUDeviceIOMMUFD *idev, + Error **errp); +#endif diff --git a/include/exec/ioport.h b/include/system/ioport.h similarity index 96% rename from include/exec/ioport.h rename to include/system/ioport.h index 4397f12f932..780ea5a6768 100644 --- a/include/exec/ioport.h +++ b/include/system/ioport.h @@ -21,10 +21,10 @@ * IO ports API */ -#ifndef IOPORT_H -#define IOPORT_H +#ifndef SYSTEM_IOPORT_H +#define SYSTEM_IOPORT_H -#include "exec/memory.h" +#include "system/memory.h" #define MAX_IOPORTS (64 * 1024) #define IOPORTS_MASK (MAX_IOPORTS - 1) @@ -39,9 +39,7 @@ typedef struct MemoryRegionPortio { #define PORTIO_END_OF_LIST() { } -#ifndef CONFIG_USER_ONLY extern const MemoryRegionOps unassigned_io_ops; -#endif void cpu_outb(uint32_t addr, uint8_t val); void cpu_outw(uint32_t addr, uint16_t val); diff --git a/include/sysemu/iothread.h b/include/system/iothread.h similarity index 97% rename from include/sysemu/iothread.h rename to include/system/iothread.h index 2102a90eca6..d95c17a6456 100644 --- a/include/sysemu/iothread.h +++ b/include/system/iothread.h @@ -17,7 +17,7 @@ #include "block/aio.h" #include "qemu/thread.h" #include "qom/object.h" -#include "sysemu/event-loop-base.h" +#include "system/event-loop-base.h" #define TYPE_IOTHREAD "iothread" diff --git a/include/system/kvm.h b/include/system/kvm.h new file mode 100644 index 00000000000..3c7d3147366 --- /dev/null +++ b/include/system/kvm.h @@ -0,0 +1,582 @@ +/* + * QEMU KVM support + * + * Copyright IBM, Corp. 2008 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* header to be included in non-KVM-specific code */ + +#ifndef QEMU_KVM_H +#define QEMU_KVM_H + +#include "exec/memattrs.h" +#include "qemu/accel.h" +#include "qom/object.h" + +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_KVM +# include +# define CONFIG_KVM_IS_POSSIBLE +# endif +#else +# define CONFIG_KVM_IS_POSSIBLE +#endif + +#ifdef CONFIG_KVM_IS_POSSIBLE + +extern bool kvm_allowed; +extern bool kvm_kernel_irqchip; +extern bool kvm_split_irqchip; +extern bool kvm_async_interrupts_allowed; +extern bool kvm_halt_in_kernel_allowed; +extern bool kvm_resamplefds_allowed; +extern bool kvm_msi_via_irqfd_allowed; +extern bool kvm_gsi_routing_allowed; +extern bool kvm_gsi_direct_mapping; +extern bool kvm_readonly_mem_allowed; +extern bool kvm_msi_use_devid; +extern bool kvm_pre_fault_memory_supported; + +#define kvm_enabled() (kvm_allowed) +/** + * kvm_irqchip_in_kernel: + * + * Returns: true if an in-kernel irqchip was created. + * What this actually means is architecture and machine model + * specific: on PC, for instance, it means that the LAPIC + * is in kernel. This function should never be used from generic + * target-independent code: use one of the following functions or + * some other specific check instead. + */ +#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) + +/** + * kvm_irqchip_is_split: + * + * Returns: true if the irqchip implementation is split between + * user and kernel space. The details are architecture and + * machine specific. On PC, it means that the PIC, IOAPIC, and + * PIT are in user space while the LAPIC is in the kernel. + */ +#define kvm_irqchip_is_split() (kvm_split_irqchip) + +/** + * kvm_async_interrupts_enabled: + * + * Returns: true if we can deliver interrupts to KVM + * asynchronously (ie by ioctl from any thread at any time) + * rather than having to do interrupt delivery synchronously + * (where the vcpu must be stopped at a suitable point first). + */ +#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) + +/** + * kvm_halt_in_kernel + * + * Returns: true if halted cpus should still get a KVM_RUN ioctl to run + * inside of kernel space. This only works if MP state is implemented. + */ +#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed) + +/** + * kvm_irqfds_enabled: + * + * Returns: true if we can use irqfds to inject interrupts into + * a KVM CPU (ie the kernel supports irqfds and we are running + * with a configuration where it is meaningful to use them). + * + * Always available if running with in-kernel irqchip. + */ +#define kvm_irqfds_enabled() kvm_irqchip_in_kernel() + +/** + * kvm_resamplefds_enabled: + * + * Returns: true if we can use resamplefds to inject interrupts into + * a KVM CPU (ie the kernel supports resamplefds and we are running + * with a configuration where it is meaningful to use them). + */ +#define kvm_resamplefds_enabled() (kvm_resamplefds_allowed) + +/** + * kvm_msi_via_irqfd_enabled: + * + * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) + * to a KVM CPU via an irqfd. This requires that the kernel supports + * this and that we're running in a configuration that permits it. + */ +#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) + +/** + * kvm_gsi_routing_enabled: + * + * Returns: true if GSI routing is enabled (ie the kernel supports + * it and we're running in a configuration that permits it). + */ +#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) + +/** + * kvm_gsi_direct_mapping: + * + * Returns: true if GSI direct mapping is enabled. + */ +#define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping) + +/** + * kvm_readonly_mem_enabled: + * + * Returns: true if KVM readonly memory is enabled (ie the kernel + * supports it and we're running in a configuration that permits it). + */ +#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) + +/** + * kvm_msi_devid_required: + * Returns: true if KVM requires a device id to be provided while + * defining an MSI routing entry. + */ +#define kvm_msi_devid_required() (kvm_msi_use_devid) + +#else + +#define kvm_enabled() (0) +#define kvm_irqchip_in_kernel() (false) +#define kvm_irqchip_is_split() (false) +#define kvm_async_interrupts_enabled() (false) +#define kvm_halt_in_kernel() (false) +#define kvm_irqfds_enabled() (false) +#define kvm_resamplefds_enabled() (false) +#define kvm_msi_via_irqfd_enabled() (false) +#define kvm_gsi_routing_allowed() (false) +#define kvm_gsi_direct_mapping() (false) +#define kvm_readonly_mem_enabled() (false) +#define kvm_msi_devid_required() (false) + +#endif /* CONFIG_KVM_IS_POSSIBLE */ + +struct kvm_run; +struct kvm_irq_routing_entry; + +typedef struct KVMCapabilityInfo { + const char *name; + int value; +} KVMCapabilityInfo; + +#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } +#define KVM_CAP_LAST_INFO { NULL, 0 } + +struct KVMState; + +#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") +typedef struct KVMState KVMState; +DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE, + TYPE_KVM_ACCEL) + +extern KVMState *kvm_state; +typedef struct Notifier Notifier; + +typedef struct KVMRouteChange { + KVMState *s; + int changes; +} KVMRouteChange; + +/* external API */ + +unsigned int kvm_get_max_memslots(void); +unsigned int kvm_get_free_memslots(void); +bool kvm_has_sync_mmu(void); +int kvm_has_vcpu_events(void); +int kvm_max_nested_state_length(void); +int kvm_has_gsi_routing(void); +void kvm_close(void); + +/** + * kvm_arm_supports_user_irq + * + * Not all KVM implementations support notifications for kernel generated + * interrupt events to user space. This function indicates whether the current + * KVM implementation does support them. + * + * Returns: true if KVM supports using kernel generated IRQs from user space + */ +bool kvm_arm_supports_user_irq(void); + + +int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); +int kvm_on_sigbus(int code, void *addr); + +int kvm_check_extension(KVMState *s, unsigned int extension); + +int kvm_vm_ioctl(KVMState *s, unsigned long type, ...); + +void kvm_flush_coalesced_mmio_buffer(void); + +#ifdef COMPILING_PER_TARGET +#include "cpu.h" + +/** + * kvm_update_guest_debug(): ensure KVM debug structures updated + * @cs: the CPUState for this cpu + * @reinject_trap: KVM trap injection control + * + * There are usually per-arch specifics which will be handled by + * calling down to kvm_arch_update_guest_debug after the generic + * fields have been set. + */ +#ifdef TARGET_KVM_HAVE_GUEST_DEBUG +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); +#else +static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) +{ + return -EINVAL; +} +#endif + +/* internal API */ + +int kvm_ioctl(KVMState *s, unsigned long type, ...); + +int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...); + +/** + * kvm_device_ioctl - call an ioctl on a kvm device + * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE + * @type: The device-ctrl ioctl number + * + * Returns: -errno on error, nonnegative on success + */ +int kvm_device_ioctl(int fd, unsigned long type, ...); + +/** + * kvm_vm_check_attr - check for existence of a specific vm attribute + * @s: The KVMState pointer + * @group: the group + * @attr: the attribute of that group to query for + * + * Returns: 1 if the attribute exists + * 0 if the attribute either does not exist or if the vm device + * interface is unavailable + */ +int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr); + +/** + * kvm_device_check_attr - check for existence of a specific device attribute + * @fd: The device file descriptor + * @group: the group + * @attr: the attribute of that group to query for + * + * Returns: 1 if the attribute exists + * 0 if the attribute either does not exist or if the vm device + * interface is unavailable + */ +int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr); + +/** + * kvm_device_access - set or get value of a specific device attribute + * @fd: The device file descriptor + * @group: the group + * @attr: the attribute of that group to set or get + * @val: pointer to a storage area for the value + * @write: true for set and false for get operation + * @errp: error object handle + * + * Returns: 0 on success + * < 0 on error + * Use kvm_device_check_attr() in order to check for the availability + * of optional attributes. + */ +int kvm_device_access(int fd, int group, uint64_t attr, + void *val, bool write, Error **errp); + +/** + * kvm_create_device - create a KVM device for the device control API + * @KVMState: The KVMState pointer + * @type: The KVM device type (see Documentation/virtual/kvm/devices in the + * kernel source) + * @test: If true, only test if device can be created, but don't actually + * create the device. + * + * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd; + */ +int kvm_create_device(KVMState *s, uint64_t type, bool test); + +/** + * kvm_device_supported - probe whether KVM supports specific device + * + * @vmfd: The fd handler for VM + * @type: type of device + * + * @return: true if supported, otherwise false. + */ +bool kvm_device_supported(int vmfd, uint64_t type); + +/** + * kvm_park_vcpu - Park QEMU KVM vCPU context + * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked. + * + * @returns: none + */ +void kvm_park_vcpu(CPUState *cpu); + +/** + * kvm_unpark_vcpu - unpark QEMU KVM vCPU context + * @s: KVM State + * @vcpu_id: Architecture vCPU ID of the parked vCPU + * + * @returns: KVM fd + */ +int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id); + +/** + * kvm_create_and_park_vcpu - Create and park a KVM vCPU + * @cpu: QOM CPUState object for which KVM vCPU has to be created and parked. + * + * @returns: 0 when success, errno (<0) when failed. + */ +int kvm_create_and_park_vcpu(CPUState *cpu); + +/* Arch specific hooks */ + +extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; + +void kvm_arch_accel_class_init(ObjectClass *oc); + +void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); +MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); + +int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); + +int kvm_arch_process_async_events(CPUState *cpu); + +int kvm_arch_get_registers(CPUState *cpu, Error **errp); + +/* state subset only touched by the VCPU itself during runtime */ +#define KVM_PUT_RUNTIME_STATE 1 +/* state subset modified during VCPU reset */ +#define KVM_PUT_RESET_STATE 2 +/* full state set, modified during initialization or on vmload */ +#define KVM_PUT_FULL_STATE 3 + +int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp); + +int kvm_arch_get_default_type(MachineState *ms); + +int kvm_arch_init(MachineState *ms, KVMState *s); + +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp); +int kvm_arch_init_vcpu(CPUState *cpu); +int kvm_arch_destroy_vcpu(CPUState *cpu); + +#ifdef TARGET_KVM_HAVE_RESET_PARKED_VCPU +void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd); +#else +static inline void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd) +{ +} +#endif + +bool kvm_vcpu_id_is_valid(int vcpu_id); + +/* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */ +unsigned long kvm_arch_vcpu_id(CPUState *cpu); + +void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); + +void kvm_arch_init_irq_routing(KVMState *s); + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev); + +/* Notify arch about newly added MSI routes */ +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev); +/* Notify arch about released MSI routes */ +int kvm_arch_release_virq_post(int virq); + +int kvm_arch_msi_data_to_gsi(uint32_t data); + +int kvm_set_irq(KVMState *s, int irq, int level); +int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); + +void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); + +void kvm_irqchip_add_change_notifier(Notifier *n); +void kvm_irqchip_remove_change_notifier(Notifier *n); +void kvm_irqchip_change_notify(void); + +struct kvm_guest_debug; +struct kvm_debug_exit_arch; + +struct kvm_sw_breakpoint { + vaddr pc; + vaddr saved_insn; + int use_count; + QTAILQ_ENTRY(kvm_sw_breakpoint) entry; +}; + +struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, + vaddr pc); + +int kvm_sw_breakpoints_active(CPUState *cpu); + +int kvm_arch_insert_sw_breakpoint(CPUState *cpu, + struct kvm_sw_breakpoint *bp); +int kvm_arch_remove_sw_breakpoint(CPUState *cpu, + struct kvm_sw_breakpoint *bp); +int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); +int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); +void kvm_arch_remove_all_hw_breakpoints(void); + +void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); + +bool kvm_arch_stop_on_emulation_error(CPUState *cpu); + +int kvm_vm_check_extension(KVMState *s, unsigned int extension); + +#define kvm_vm_enable_cap(s, capability, cap_flags, ...) \ + ({ \ + struct kvm_enable_cap cap = { \ + .cap = capability, \ + .flags = cap_flags, \ + }; \ + uint64_t args_tmp[] = { __VA_ARGS__ }; \ + size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ + memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ + kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \ + }) + +#define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \ + ({ \ + struct kvm_enable_cap cap = { \ + .cap = capability, \ + .flags = cap_flags, \ + }; \ + uint64_t args_tmp[] = { __VA_ARGS__ }; \ + size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ + memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ + kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \ + }) + +void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); + +int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, + hwaddr *phys_addr); + +#endif /* COMPILING_PER_TARGET */ + +void kvm_cpu_synchronize_state(CPUState *cpu); + +void kvm_init_cpu_signals(CPUState *cpu); + +/** + * kvm_irqchip_add_msi_route - Add MSI route for specific vector + * @c: KVMRouteChange instance. + * @vector: which vector to add. This can be either MSI/MSIX + * vector. The function will automatically detect whether + * MSI/MSIX is enabled, and fetch corresponding MSI + * message. + * @dev: Owner PCI device to add the route. If @dev is specified + * as @NULL, an empty MSI message will be inited. + * @return: virq (>=0) when success, errno (<0) when failed. + */ +int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev); +void kvm_irqchip_commit_routes(KVMState *s); + +static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) +{ + return (KVMRouteChange) { .s = s, .changes = 0 }; +} + +static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c) +{ + if (c->changes) { + kvm_irqchip_commit_routes(c->s); + c->changes = 0; + } +} + +int kvm_irqchip_get_virq(KVMState *s); +void kvm_irqchip_release_virq(KVMState *s, int virq); + +void kvm_add_routing_entry(KVMState *s, + struct kvm_irq_routing_entry *entry); + +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq); +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq); +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, qemu_irq irq); +int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, + qemu_irq irq); +void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi); +void kvm_init_irq_routing(KVMState *s); + +bool kvm_kernel_irqchip_allowed(void); +bool kvm_kernel_irqchip_required(void); +bool kvm_kernel_irqchip_split(void); + +/** + * kvm_arch_irqchip_create: + * @KVMState: The KVMState pointer + * + * Allow architectures to create an in-kernel irq chip themselves. + * + * Returns: < 0: error + * 0: irq chip was not created + * > 0: irq chip was created + */ +int kvm_arch_irqchip_create(KVMState *s); + +/** + * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl + * @id: The register ID + * @source: The pointer to the value to be set. It must point to a variable + * of the correct type/size for the register being accessed. + * + * Returns: 0 on success, or a negative errno on failure. + */ +int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); + +/** + * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl + * @id: The register ID + * @target: The pointer where the value is to be stored. It must point to a + * variable of the correct type/size for the register being accessed. + * + * Returns: 0 on success, or a negative errno on failure. + */ +int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); + +/* Notify resamplefd for EOI of specific interrupts. */ +void kvm_resample_fd_notify(int gsi); + +bool kvm_dirty_ring_enabled(void); + +uint32_t kvm_dirty_ring_size(void); + +void kvm_mark_guest_state_protected(void); + +/** + * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page + * reported for the VM. + */ +bool kvm_hwpoisoned_mem(void); + +int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp); + +int kvm_set_memory_attributes_private(hwaddr start, uint64_t size); +int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size); + +int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private); + +#endif diff --git a/include/sysemu/kvm_int.h b/include/system/kvm_int.h similarity index 83% rename from include/sysemu/kvm_int.h rename to include/system/kvm_int.h index 1d8fb1473bd..9247493b029 100644 --- a/include/sysemu/kvm_int.h +++ b/include/system/kvm_int.h @@ -9,11 +9,12 @@ #ifndef QEMU_KVM_INT_H #define QEMU_KVM_INT_H -#include "exec/memory.h" +#include "system/memory.h" #include "qapi/qapi-types-common.h" #include "qemu/accel.h" #include "qemu/queue.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" +#include "accel/accel-ops.h" #include "hw/boards.h" #include "hw/i386/topology.h" #include "io/channel-socket.h" @@ -45,7 +46,8 @@ typedef struct KVMMemoryUpdate { typedef struct KVMMemoryListener { MemoryListener listener; KVMSlot *slots; - unsigned int nr_used_slots; + unsigned int nr_slots_used; + unsigned int nr_slots_allocated; int as_id; QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add; QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del; @@ -102,8 +104,8 @@ struct KVMDirtyRingReaper { struct KVMState { AccelState parent_obj; - - int nr_slots; + /* Max number of KVM slots supported */ + int nr_slots_max; int fd; int vmfd; int coalesced_mmio; @@ -122,10 +124,19 @@ struct KVMState bool sync_mmu; bool guest_state_protected; uint64_t manual_dirty_log_protect; - /* The man page (and posix) say ioctl numbers are signed int, but - * they're not. Linux, glibc and *BSD all treat ioctl numbers as - * unsigned, and treating them as signed here can break things */ - unsigned irq_set_ioctl; + /* + * Older POSIX says that ioctl numbers are signed int, but in + * practice they are not. (Newer POSIX doesn't specify ioctl + * at all.) Linux, glibc and *BSD all treat ioctl numbers as + * unsigned, and real-world ioctl values like KVM_GET_XSAVE have + * bit 31 set, which means that passing them via an 'int' will + * result in sign-extension when they get converted back to the + * 'unsigned long' which the ioctl() prototype uses. Luckily Linux + * always treats the argument as an unsigned 32-bit int, so any + * possible sign-extension is deliberately ignored, but for + * consistency we keep to the same type that glibc is using. + */ + unsigned long irq_set_ioctl; unsigned int sigmask_len; GHashTable *gsimap; #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/sysemu/kvm_xen.h b/include/system/kvm_xen.h similarity index 93% rename from include/sysemu/kvm_xen.h rename to include/system/kvm_xen.h index 961c702c4e7..7d0e69f1334 100644 --- a/include/sysemu/kvm_xen.h +++ b/include/system/kvm_xen.h @@ -9,8 +9,8 @@ * */ -#ifndef QEMU_SYSEMU_KVM_XEN_H -#define QEMU_SYSEMU_KVM_XEN_H +#ifndef QEMU_SYSTEM_KVM_XEN_H +#define QEMU_SYSTEM_KVM_XEN_H /* The KVM API uses these to indicate "no GPA" or "no GFN" */ #define INVALID_GPA UINT64_MAX @@ -41,4 +41,4 @@ uint16_t kvm_xen_get_evtchn_max_pirq(void); #define XEN_SPECIAL_PFN(x) ((XEN_SPECIAL_AREA_ADDR >> TARGET_PAGE_BITS) + \ XEN_SPECIALPAGE_##x) -#endif /* QEMU_SYSEMU_KVM_XEN_H */ +#endif /* QEMU_SYSTEM_KVM_XEN_H */ diff --git a/include/exec/memory.h b/include/system/memory.h similarity index 93% rename from include/exec/memory.h rename to include/system/memory.h index 02f7528ec06..e2cd6ed1261 100644 --- a/include/exec/memory.h +++ b/include/system/memory.h @@ -11,10 +11,8 @@ * */ -#ifndef MEMORY_H -#define MEMORY_H - -#ifndef CONFIG_USER_ONLY +#ifndef SYSTEM_MEMORY_H +#define SYSTEM_MEMORY_H #include "exec/cpu-common.h" #include "exec/hwaddr.h" @@ -110,15 +108,34 @@ struct MemoryRegionSection { typedef struct IOMMUTLBEntry IOMMUTLBEntry; -/* See address_space_translate: bit 0 is read, bit 1 is write. */ +/* + * See address_space_translate: + * - bit 0 : read + * - bit 1 : write + * - bit 2 : exec + * - bit 3 : priv + * - bit 4 : global + * - bit 5 : untranslated only + */ typedef enum { IOMMU_NONE = 0, IOMMU_RO = 1, IOMMU_WO = 2, IOMMU_RW = 3, + IOMMU_EXEC = 4, + IOMMU_PRIV = 8, + IOMMU_GLOBAL = 16, + IOMMU_UNTRANSLATED_ONLY = 32, } IOMMUAccessFlags; -#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) +#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | \ + ((w) ? IOMMU_WO : 0)) +#define IOMMU_ACCESS_FLAG_FULL(r, w, x, p, g, uo) \ + (IOMMU_ACCESS_FLAG(r, w) | \ + ((x) ? IOMMU_EXEC : 0) | \ + ((p) ? IOMMU_PRIV : 0) | \ + ((g) ? IOMMU_GLOBAL : 0) | \ + ((uo) ? IOMMU_UNTRANSLATED_ONLY : 0)) struct IOMMUTLBEntry { AddressSpace *target_as; @@ -126,6 +143,7 @@ struct IOMMUTLBEntry { hwaddr translated_addr; hwaddr addr_mask; /* 0xfff = 4k translation */ IOMMUAccessFlags perm; + uint32_t pasid; }; /* @@ -184,6 +202,7 @@ struct IOMMUNotifier { hwaddr start; hwaddr end; int iommu_idx; + void *opaque; QLIST_ENTRY(IOMMUNotifier) node; }; typedef struct IOMMUNotifier IOMMUNotifier; @@ -246,6 +265,16 @@ typedef struct IOMMUTLBEvent { /* RAM can be private that has kvm guest memfd backend */ #define RAM_GUEST_MEMFD (1 << 12) +/* + * In RAMBlock creation functions, if MAP_SHARED is 0 in the flags parameter, + * the implementation may still create a shared mapping if other conditions + * require it. Callers who specifically want a private mapping, eg objects + * specified by the user, must pass RAM_PRIVATE. + * After RAMBlock creation, MAP_SHARED in the block's flags indicates whether + * the block is shared or private, and MAP_PRIVATE is omitted. + */ +#define RAM_PRIVATE (1 << 13) + static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, IOMMUNotifierFlag flags, hwaddr start, hwaddr end, @@ -566,8 +595,20 @@ static inline void ram_discard_listener_init(RamDiscardListener *rdl, rdl->double_discard_supported = double_discard_supported; } -typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); -typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); +/** + * typedef ReplayRamDiscardState: + * + * The callback handler for #RamDiscardManagerClass.replay_populated/ + * #RamDiscardManagerClass.replay_discarded to invoke on populated/discarded + * parts. + * + * @section: the #MemoryRegionSection of populated/discarded part + * @opaque: pointer to forward to the callback + * + * Returns 0 on success, or a negative error if failed. + */ +typedef int (*ReplayRamDiscardState)(MemoryRegionSection *section, + void *opaque); /* * RamDiscardManagerClass: @@ -641,36 +682,38 @@ struct RamDiscardManagerClass { /** * @replay_populated: * - * Call the #ReplayRamPopulate callback for all populated parts within the - * #MemoryRegionSection via the #RamDiscardManager. + * Call the #ReplayRamDiscardState callback for all populated parts within + * the #MemoryRegionSection via the #RamDiscardManager. * * In case any call fails, no further calls are made. * * @rdm: the #RamDiscardManager * @section: the #MemoryRegionSection - * @replay_fn: the #ReplayRamPopulate callback + * @replay_fn: the #ReplayRamDiscardState callback * @opaque: pointer to forward to the callback * * Returns 0 on success, or a negative error if any notification failed. */ int (*replay_populated)(const RamDiscardManager *rdm, MemoryRegionSection *section, - ReplayRamPopulate replay_fn, void *opaque); + ReplayRamDiscardState replay_fn, void *opaque); /** * @replay_discarded: * - * Call the #ReplayRamDiscard callback for all discarded parts within the - * #MemoryRegionSection via the #RamDiscardManager. + * Call the #ReplayRamDiscardState callback for all discarded parts within + * the #MemoryRegionSection via the #RamDiscardManager. * * @rdm: the #RamDiscardManager * @section: the #MemoryRegionSection - * @replay_fn: the #ReplayRamDiscard callback + * @replay_fn: the #ReplayRamDiscardState callback * @opaque: pointer to forward to the callback + * + * Returns 0 on success, or a negative error if any notification failed. */ - void (*replay_discarded)(const RamDiscardManager *rdm, - MemoryRegionSection *section, - ReplayRamDiscard replay_fn, void *opaque); + int (*replay_discarded)(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, void *opaque); /** * @register_listener: @@ -711,15 +754,41 @@ uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, const MemoryRegionSection *section); +/** + * ram_discard_manager_replay_populated: + * + * A wrapper to call the #RamDiscardManagerClass.replay_populated callback + * of the #RamDiscardManager. + * + * @rdm: the #RamDiscardManager + * @section: the #MemoryRegionSection + * @replay_fn: the #ReplayRamDiscardState callback + * @opaque: pointer to forward to the callback + * + * Returns 0 on success, or a negative error if any notification failed. + */ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, MemoryRegionSection *section, - ReplayRamPopulate replay_fn, + ReplayRamDiscardState replay_fn, void *opaque); -void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, - MemoryRegionSection *section, - ReplayRamDiscard replay_fn, - void *opaque); +/** + * ram_discard_manager_replay_discarded: + * + * A wrapper to call the #RamDiscardManagerClass.replay_discarded callback + * of the #RamDiscardManager. + * + * @rdm: the #RamDiscardManager + * @section: the #MemoryRegionSection + * @replay_fn: the #ReplayRamDiscardState callback + * @opaque: pointer to forward to the callback + * + * Returns 0 on success, or a negative error if any notification failed. + */ +int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, + void *opaque); void ram_discard_manager_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, @@ -729,21 +798,20 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, RamDiscardListener *rdl); /** - * memory_get_xlat_addr: Extract addresses from a TLB entry + * memory_translate_iotlb: Extract addresses from a TLB entry. + * Called with rcu_read_lock held. * * @iotlb: pointer to an #IOMMUTLBEntry - * @vaddr: virtual address - * @ram_addr: RAM address - * @read_only: indicates if writes are allowed - * @mr_has_discard_manager: indicates memory is controlled by a - * RamDiscardManager + * @xlat_p: return the offset of the entry from the start of the returned + * MemoryRegion. * @errp: pointer to Error*, to store an error if it happens. * - * Return: true on success, else false setting @errp with error. + * Return: On success, return the MemoryRegion containing the @iotlb translated + * addr. The MemoryRegion must not be accessed after rcu_read_unlock. + * On failure, return NULL, setting @errp with error. */ -bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, - ram_addr_t *ram_addr, bool *read_only, - bool *mr_has_discard_manager, Error **errp); +MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p, + Error **errp); typedef struct CoalescedMemoryRange CoalescedMemoryRange; typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; @@ -784,7 +852,6 @@ struct MemoryRegion { bool terminates; bool ram_device; bool enabled; - bool warning_printed; /* For reservations */ uint8_t vga_logging_count; MemoryRegion *alias; hwaddr alias_offset; @@ -1084,13 +1151,7 @@ typedef struct AddressSpaceMapClient { QLIST_ENTRY(AddressSpaceMapClient) link; } AddressSpaceMapClient; -typedef struct { - MemoryRegion *mr; - void *buffer; - hwaddr addr; - hwaddr len; - bool in_use; -} BounceBuffer; +#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096) /** * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects @@ -1110,8 +1171,17 @@ struct AddressSpace { QTAILQ_HEAD(, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; - /* Bounce buffer to use for this address space. */ - BounceBuffer bounce; + /* + * Maximum DMA bounce buffer size used for indirect memory map requests. + * This limits the total size of bounce buffer allocations made for + * DMA requests to indirect memory regions within this AddressSpace. DMA + * requests that exceed the limit (e.g. due to overly large requested size + * or concurrent DMA requests having claimed too much buffer space) will be + * rejected and left to the caller to handle. + */ + size_t max_bounce_buffer_size; + /* Total size of bounce buffers currently allocated, atomically accessed */ + size_t bounce_buffer_size; /* List of callbacks to invoke when buffers free up */ QemuMutex map_client_list_lock; QLIST_HEAD(, AddressSpaceMapClient) map_client_list; @@ -1191,7 +1261,7 @@ static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s); /** - * memory_region_section_new_copy: Free a copied memory region section + * memory_region_section_free_copy: Free a copied memory region section * * Free a copy of a memory section created via memory_region_section_new_copy(). * properly dropping references on all relevant members. @@ -1200,6 +1270,36 @@ MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s); */ void memory_region_section_free_copy(MemoryRegionSection *s); +/** + * memory_region_section_intersect_range: Adjust the memory section to cover + * the intersection with the given range. + * + * @s: the #MemoryRegionSection to be adjusted + * @offset: the offset of the given range in the memory region + * @size: the size of the given range + * + * Returns false if the intersection is empty, otherwise returns true. + */ +static inline bool memory_region_section_intersect_range(MemoryRegionSection *s, + uint64_t offset, + uint64_t size) +{ + uint64_t start = MAX(s->offset_within_region, offset); + Int128 end = int128_min(int128_add(int128_make64(s->offset_within_region), + s->size), + int128_add(int128_make64(offset), + int128_make64(size))); + + if (int128_le(end, int128_make64(start))) { + return false; + } + + s->offset_within_address_space += start - s->offset_within_region; + s->offset_within_region = start; + s->size = int128_sub(end, int128_make64(start)); + return true; +} + /** * memory_region_init: Initialize a memory region * @@ -1852,7 +1952,7 @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); * memory_region_unregister_iommu_notifier: unregister a notifier for * changes to IOMMU translation entries. * - * @mr: the memory region which was observed and for which notity_stopped() + * @mr: the memory region which was observed and for which notify_stopped() * needs to be called * @n: the notifier to be removed. */ @@ -2150,7 +2250,7 @@ void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); * only useful on RAM regions. * * @mr: the region being updated. - * @readonly: whether rhe region is to be ROM or RAM. + * @readonly: whether the region is to be ROM or RAM. */ void memory_region_set_readonly(MemoryRegion *mr, bool readonly); @@ -2161,7 +2261,7 @@ void memory_region_set_readonly(MemoryRegion *mr, bool readonly); * only useful on RAM regions. * * @mr: the region being updated. - * @nonvolatile: whether rhe region is to be non-volatile. + * @nonvolatile: whether the region is to be non-volatile. */ void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); @@ -2458,13 +2558,13 @@ static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr) * * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion * that does not cover RAM, or a #MemoryRegion that already has a - * #RamDiscardManager assigned. + * #RamDiscardManager assigned. Return 0 if the rdm is set successfully. * * @mr: the #MemoryRegion * @rdm: #RamDiscardManager to set */ -void memory_region_set_ram_discard_manager(MemoryRegion *mr, - RamDiscardManager *rdm); +int memory_region_set_ram_discard_manager(MemoryRegion *mr, + RamDiscardManager *rdm); /** * memory_region_find: translate an address/size relative to a @@ -2507,7 +2607,7 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, void memory_global_dirty_log_sync(bool last_stage); /** - * memory_global_dirty_log_sync: synchronize the dirty log for all memory + * memory_global_after_dirty_log_sync: synchronize the dirty log for all memory * * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. * This function must be called after the dirty log bitmap is cleared, and @@ -2720,6 +2820,12 @@ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, #define ARG1_DECL AddressSpace *as #include "exec/memory_ldst.h.inc" +static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +{ + address_space_stl_notdirty(as, addr, val, + MEMTXATTRS_UNSPECIFIED, NULL); +} + #define SUFFIX #define ARG1 as #define ARG1_DECL AddressSpace *as @@ -2786,6 +2892,9 @@ static inline void address_space_stb_cached(MemoryRegionCache *cache, } } +#define ENDIANNESS +#include "exec/memory_ldst_cached.h.inc" + #define ENDIANNESS _le #include "exec/memory_ldst_cached.h.inc" @@ -2983,15 +3092,34 @@ MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr); bool prepare_mmio_access(MemoryRegion *mr); -static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) +static inline bool memory_region_supports_direct_access(MemoryRegion *mr) { - if (is_write) { - return memory_region_is_ram(mr) && !mr->readonly && - !mr->rom_device && !memory_region_is_ram_device(mr); - } else { - return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || - memory_region_is_romd(mr); + /* ROM DEVICE regions only allow direct access if in ROMD mode. */ + if (memory_region_is_romd(mr)) { + return true; } + if (!memory_region_is_ram(mr)) { + return false; + } + /* + * RAM DEVICE regions can be accessed directly using memcpy, but it might + * be MMIO and access using mempy can be wrong (e.g., using instructions not + * intended for MMIO access). So we treat this as IO. + */ + return !memory_region_is_ram_device(mr); +} + +static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write, + MemTxAttrs attrs) +{ + if (!memory_region_supports_direct_access(mr)) { + return false; + } + /* Debug access can write to ROM. */ + if (is_write && !attrs.debug) { + return !mr->readonly && !mr->rom_device; + } + return true; } /** @@ -3024,7 +3152,7 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr, fv = address_space_to_flatview(as); l = len; mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); - if (len == l && memory_access_is_direct(mr, false)) { + if (len == l && memory_access_is_direct(mr, false, attrs)) { ptr = qemu_map_ram_ptr(mr->ram_block, addr1); memcpy(buf, ptr, len); } else { @@ -3097,26 +3225,6 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, MemTxResult address_space_set(AddressSpace *as, hwaddr addr, uint8_t c, hwaddr len, MemTxAttrs attrs); -#ifdef COMPILING_PER_TARGET -/* enum device_endian to MemOp. */ -static inline MemOp devend_memop(enum device_endian end) -{ - QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && - DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); - -#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN - /* Swap if non-host endianness or native (target) endianness */ - return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; -#else - const int non_host_endianness = - DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; - - /* In this case, native (target) endianness needs no swap. */ - return (end == non_host_endianness) ? MO_BSWAP : 0; -#endif -} -#endif /* COMPILING_PER_TARGET */ - /* * Inhibit technologies that require discarding of pages in RAM blocks, e.g., * to manage the actual amount of memory consumed by the VM (then, the memory @@ -3172,6 +3280,7 @@ bool ram_block_discard_is_disabled(void); */ bool ram_block_discard_is_required(void); -#endif +void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp); +void ram_block_del_cpr_blocker(RAMBlock *rb); #endif diff --git a/include/sysemu/memory_mapping.h b/include/system/memory_mapping.h similarity index 100% rename from include/sysemu/memory_mapping.h rename to include/system/memory_mapping.h diff --git a/include/sysemu/numa.h b/include/system/numa.h similarity index 97% rename from include/sysemu/numa.h rename to include/system/numa.h index 04676141470..1044b0eb6e9 100644 --- a/include/sysemu/numa.h +++ b/include/system/numa.h @@ -1,9 +1,8 @@ -#ifndef SYSEMU_NUMA_H -#define SYSEMU_NUMA_H +#ifndef SYSTEM_NUMA_H +#define SYSTEM_NUMA_H #include "qemu/bitmap.h" #include "qapi/qapi-types-machine.h" -#include "exec/cpu-common.h" struct CPUArchId; diff --git a/include/system/nvmm.h b/include/system/nvmm.h new file mode 100644 index 00000000000..7390def9adb --- /dev/null +++ b/include/system/nvmm.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. + * + * NetBSD Virtual Machine Monitor (NVMM) accelerator support. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* header to be included in non-NVMM-specific code */ + +#ifndef QEMU_NVMM_H +#define QEMU_NVMM_H + +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_NVMM +# define CONFIG_NVMM_IS_POSSIBLE +# endif /* !CONFIG_NVMM */ +#else +# define CONFIG_NVMM_IS_POSSIBLE +#endif /* COMPILING_PER_TARGET */ + +#ifdef CONFIG_NVMM_IS_POSSIBLE +extern bool nvmm_allowed; +#define nvmm_enabled() (nvmm_allowed) +#else /* !CONFIG_NVMM_IS_POSSIBLE */ +#define nvmm_enabled() 0 +#endif /* !CONFIG_NVMM_IS_POSSIBLE */ + +#endif /* QEMU_NVMM_H */ diff --git a/include/sysemu/os-posix.h b/include/system/os-posix.h similarity index 99% rename from include/sysemu/os-posix.h rename to include/system/os-posix.h index b881ac6c6f7..ce5b3bccf8d 100644 --- a/include/sysemu/os-posix.h +++ b/include/system/os-posix.h @@ -53,7 +53,7 @@ bool os_set_runas(const char *user_id); void os_set_chroot(const char *path); void os_setup_limits(void); void os_setup_post(void); -int os_mlock(void); +int os_mlock(bool on_fault); /** * qemu_alloc_stack: diff --git a/include/system/os-wasm.h b/include/system/os-wasm.h new file mode 100644 index 00000000000..3abb3aaa03b --- /dev/null +++ b/include/system/os-wasm.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: MIT */ +/* + * posix specific declarations forked from os-posix.h, removing functions not + * working on Emscripten + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Jes Sorensen + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OS_WASM_H +#define QEMU_OS_WASM_H + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SYSMACROS +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void os_set_line_buffering(void); +void os_setup_early_signal_handling(void); +void os_set_proc_name(const char *s); +void os_setup_signal_handling(void); +void os_setup_limits(void); +void os_setup_post(void); +int os_mlock(bool on_fault); +static inline int os_set_daemonize(bool d) +{ + return -1; +}; +bool is_daemonized(void); +static inline void os_daemonize(void) {} + +/** + * qemu_alloc_stack: + * @sz: pointer to a size_t holding the requested usable stack size + * + * Allocate memory that can be used as a stack, for instance for + * coroutines. If the memory cannot be allocated, this function + * will abort (like g_malloc()). This function also inserts an + * additional guard page to catch a potential stack overflow. + * Note that the memory required for the guard page and alignment + * and minimal stack size restrictions will increase the value of sz. + * + * The allocated stack must be freed with qemu_free_stack(). + * + * Returns: pointer to (the lowest address of) the stack memory. + */ +void *qemu_alloc_stack(size_t *sz); + +/** + * qemu_free_stack: + * @stack: stack to free + * @sz: size of stack in bytes + * + * Free a stack allocated via qemu_alloc_stack(). Note that sz must + * be exactly the adjusted stack size returned by qemu_alloc_stack. + */ +void qemu_free_stack(void *stack, size_t sz); + +/* POSIX and Mingw32 differ in the name of the stdio lock functions. */ + +static inline void qemu_flockfile(FILE *f) +{ + flockfile(f); +} + +static inline void qemu_funlockfile(FILE *f) +{ + funlockfile(f); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/sysemu/os-win32.h b/include/system/os-win32.h similarity index 99% rename from include/sysemu/os-win32.h rename to include/system/os-win32.h index b82a5d3ad93..3aa6cee4c23 100644 --- a/include/sysemu/os-win32.h +++ b/include/system/os-win32.h @@ -123,14 +123,13 @@ static inline bool is_daemonized(void) return false; } -static inline int os_mlock(void) +static inline int os_mlock(bool on_fault G_GNUC_UNUSED) { return -ENOSYS; } static inline void os_setup_limits(void) { - return; } #define fsync _commit diff --git a/include/sysemu/qtest.h b/include/system/qtest.h similarity index 92% rename from include/sysemu/qtest.h rename to include/system/qtest.h index c161d751650..84b1f8c6ee4 100644 --- a/include/sysemu/qtest.h +++ b/include/system/qtest.h @@ -23,8 +23,6 @@ static inline bool qtest_enabled(void) return qtest_allowed; } -#ifndef CONFIG_USER_ONLY -void qtest_send_prefix(CharBackend *chr); void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...); void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words)); bool qtest_driver(void); @@ -34,6 +32,5 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error ** void qtest_server_set_send_handler(void (*send)(void *, const char *), void *opaque); void qtest_server_inproc_recv(void *opaque, const char *buf); -#endif #endif diff --git a/include/exec/ram_addr.h b/include/system/ram_addr.h similarity index 94% rename from include/exec/ram_addr.h rename to include/system/ram_addr.h index 891c44cf2d9..15a1b1a4fa2 100644 --- a/include/exec/ram_addr.h +++ b/include/system/ram_addr.h @@ -16,18 +16,21 @@ * The functions declared here will be removed soon. */ -#ifndef RAM_ADDR_H -#define RAM_ADDR_H +#ifndef SYSTEM_RAM_ADDR_H +#define SYSTEM_RAM_ADDR_H -#ifndef CONFIG_USER_ONLY -#include "cpu.h" -#include "sysemu/xen.h" -#include "sysemu/tcg.h" +#include "system/xen.h" +#include "system/tcg.h" +#include "exec/cputlb.h" #include "exec/ramlist.h" -#include "exec/ramblock.h" -#include "exec/exec-all.h" +#include "system/ramblock.h" +#include "system/memory.h" +#include "exec/target_page.h" #include "qemu/rcu.h" +#include "exec/hwaddr.h" +#include "exec/cpu-common.h" + extern uint64_t total_dirty_pages; /** @@ -98,9 +101,6 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, bool ramblock_is_pmem(RAMBlock *rb); -long qemu_minrampagesize(void); -long qemu_maxrampagesize(void); - /** * qemu_ram_alloc_from_file, * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing @@ -108,23 +108,30 @@ long qemu_maxrampagesize(void); * * Parameters: * @size: the size in bytes of the ram block + * @max_size: the maximum size of the block after resizing * @mr: the memory region where the ram block is + * @resized: callback after calls to qemu_ram_resize * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY, * RAM_READONLY_FD, RAM_GUEST_MEMFD * @mem_path or @fd: specify the backing file or device * @offset: Offset into target file + * @grow: extend file if necessary (but an empty file is always extended). * @errp: pointer to Error*, to store an error if it happens * * Return: * On success, return a pointer to the ram block. * On failure, return NULL. */ +typedef void (*qemu_ram_resize_cb)(const char *, uint64_t length, void *host); + RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, uint32_t ram_flags, const char *mem_path, off_t offset, Error **errp); -RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, +RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, + qemu_ram_resize_cb resized, MemoryRegion *mr, uint32_t ram_flags, int fd, off_t offset, + bool grow, Error **errp); RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, @@ -132,9 +139,7 @@ RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr, Error **errp); RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size, - void (*resized)(const char*, - uint64_t length, - void *host), + qemu_ram_resize_cb resized, MemoryRegion *mr, Error **errp); void qemu_ram_free(RAMBlock *block); @@ -332,7 +337,9 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, } } - xen_hvm_modified_memory(start, length); + if (xen_enabled()) { + xen_hvm_modified_memory(start, length); + } } #if !defined(_WIN32) @@ -408,7 +415,9 @@ uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } } - xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); + if (xen_enabled()) { + xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); + } } else { uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; @@ -548,5 +557,5 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, return num_dirty; } -#endif + #endif diff --git a/include/exec/ramblock.h b/include/system/ramblock.h similarity index 78% rename from include/exec/ramblock.h rename to include/system/ramblock.h index 0babd105c02..87e847e184a 100644 --- a/include/exec/ramblock.h +++ b/include/system/ramblock.h @@ -16,13 +16,16 @@ * The functions declared here will be removed soon. */ -#ifndef QEMU_EXEC_RAMBLOCK_H -#define QEMU_EXEC_RAMBLOCK_H +#ifndef SYSTEM_RAMBLOCK_H +#define SYSTEM_RAMBLOCK_H -#ifndef CONFIG_USER_ONLY -#include "cpu-common.h" +#include "exec/cpu-common.h" #include "qemu/rcu.h" #include "exec/ramlist.h" +#include "system/hostmem.h" + +#define TYPE_RAM_BLOCK_ATTRIBUTES "ram-block-attributes" +OBJECT_DECLARE_SIMPLE_TYPE(RamBlockAttributes, RAM_BLOCK_ATTRIBUTES) struct RAMBlock { struct rcu_head rcu; @@ -39,9 +42,11 @@ struct RAMBlock { /* RCU-enabled, writes protected by the ramlist lock */ QLIST_ENTRY(RAMBlock) next; QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers; + Error *cpr_blocker; int fd; uint64_t fd_offset; int guest_memfd; + RamBlockAttributes *attributes; size_t page_size; /* dirty bitmap used during migration */ unsigned long *bmap; @@ -90,5 +95,22 @@ struct RAMBlock { */ ram_addr_t postcopy_length; }; -#endif + +struct RamBlockAttributes { + Object parent; + + RAMBlock *ram_block; + + /* 1-setting of the bitmap represents ram is populated (shared) */ + unsigned bitmap_size; + unsigned long *bitmap; + + QLIST_HEAD(, RamDiscardListener) rdl_list; +}; + +RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block); +void ram_block_attributes_destroy(RamBlockAttributes *attr); +int ram_block_attributes_state_change(RamBlockAttributes *attr, uint64_t offset, + uint64_t size, bool to_discard); + #endif diff --git a/include/sysemu/replay.h b/include/system/replay.h similarity index 95% rename from include/sysemu/replay.h rename to include/system/replay.h index f229b2109c9..1c87c97fdd0 100644 --- a/include/sysemu/replay.h +++ b/include/system/replay.h @@ -8,12 +8,8 @@ * See the COPYING file in the top-level directory. * */ -#ifndef SYSEMU_REPLAY_H -#define SYSEMU_REPLAY_H - -#ifdef CONFIG_USER_ONLY -#error Cannot include this header from user emulation -#endif +#ifndef SYSTEM_REPLAY_H +#define SYSTEM_REPLAY_H #include "exec/replay-core.h" #include "qapi/qapi-types-misc.h" @@ -73,11 +69,6 @@ int replay_get_instructions(void); /*! Updates instructions counter in replay mode. */ void replay_account_executed_instructions(void); -/** - * replay_can_wait: check if we should pause for wait-io - */ -bool replay_can_wait(void); - /* Processing clocks and other time sources */ /*! Save the specified clock */ @@ -122,8 +113,6 @@ void replay_async_events(void); /* Asynchronous events queue */ -/*! Disables storing events in the queue */ -void replay_disable_events(void); /*! Enables storing events in the queue */ void replay_enable_events(void); /*! Returns true when saving events is enabled */ diff --git a/include/sysemu/reset.h b/include/system/reset.h similarity index 96% rename from include/sysemu/reset.h rename to include/system/reset.h index ae436044a92..97131d94cfc 100644 --- a/include/sysemu/reset.h +++ b/include/system/reset.h @@ -24,9 +24,10 @@ * THE SOFTWARE. */ -#ifndef QEMU_SYSEMU_RESET_H -#define QEMU_SYSEMU_RESET_H +#ifndef QEMU_SYSTEM_RESET_H +#define QEMU_SYSTEM_RESET_H +#include "hw/resettable.h" #include "qapi/qapi-events-run-state.h" typedef void QEMUResetHandler(void *opaque); @@ -110,7 +111,7 @@ void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); /** * qemu_devices_reset: Perform a complete system reset - * @reason: reason for the reset + * @reason: type of the reset * * This function performs the low-level work needed to do a complete reset * of the system (calling all the callbacks registered with @@ -121,6 +122,6 @@ void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); * If you want to trigger a system reset from, for instance, a device * model, don't use this function. Use qemu_system_reset_request(). */ -void qemu_devices_reset(ShutdownCause reason); +void qemu_devices_reset(ResetType type); #endif diff --git a/include/sysemu/rng-random.h b/include/system/rng-random.h similarity index 100% rename from include/sysemu/rng-random.h rename to include/system/rng-random.h diff --git a/include/sysemu/rng.h b/include/system/rng.h similarity index 100% rename from include/sysemu/rng.h rename to include/system/rng.h diff --git a/include/sysemu/rtc.h b/include/system/rtc.h similarity index 98% rename from include/sysemu/rtc.h rename to include/system/rtc.h index 0fc8ad6fdf1..cde83fab15f 100644 --- a/include/sysemu/rtc.h +++ b/include/system/rtc.h @@ -22,8 +22,8 @@ * THE SOFTWARE. */ -#ifndef SYSEMU_RTC_H -#define SYSEMU_RTC_H +#ifndef SYSTEM_RTC_H +#define SYSTEM_RTC_H /** * qemu_get_timedate: Get the current RTC time diff --git a/include/sysemu/runstate-action.h b/include/system/runstate-action.h similarity index 100% rename from include/sysemu/runstate-action.h rename to include/system/runstate-action.h diff --git a/include/system/runstate.h b/include/system/runstate.h new file mode 100644 index 00000000000..929379adae4 --- /dev/null +++ b/include/system/runstate.h @@ -0,0 +1,160 @@ +#ifndef SYSTEM_RUNSTATE_H +#define SYSTEM_RUNSTATE_H + +#include "qapi/qapi-types-run-state.h" +#include "qemu/notify.h" + +bool runstate_check(RunState state); +void runstate_set(RunState new_state); +RunState runstate_get(void); +bool runstate_is_running(void); +bool runstate_needs_reset(void); +void runstate_replay_enable(void); + +typedef void VMChangeStateHandler(void *opaque, bool running, RunState state); +typedef int VMChangeStateHandlerWithRet(void *opaque, bool running, RunState state); + +/** + * qemu_add_vm_change_state_handler: + * @cb: the callback to invoke + * @opaque: user data passed to the callback + * + * Register a callback function that is invoked when the vm starts or stops + * running. + * + * Returns: an entry to be freed using qemu_del_vm_change_state_handler() + */ +VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, + void *opaque); +/** + * qemu_add_vm_change_state_handler_prio: + * @cb: the callback to invoke + * @opaque: user data passed to the callback + * @priority: low priorities execute first when the vm runs and the reverse is + * true when the vm stops + * + * Register a callback function that is invoked when the vm starts or stops + * running. + * + * Returns: an entry to be freed using qemu_del_vm_change_state_handler() + */ +VMChangeStateEntry *qemu_add_vm_change_state_handler_prio( + VMChangeStateHandler *cb, void *opaque, int priority); +VMChangeStateEntry * +/** + * qemu_add_vm_change_state_handler_prio_full: + * @cb: the main callback to invoke + * @prepare_cb: a callback to invoke before the main callback + * @cb_ret: the main callback to invoke with return value + * @opaque: user data passed to the callbacks + * @priority: low priorities execute first when the vm runs and the reverse is + * true when the vm stops + * + * Register a main callback function and an optional prepare callback function + * that are invoked when the vm starts or stops running. The main callback and + * the prepare callback are called in two separate phases: First all prepare + * callbacks are called and only then all main callbacks are called. As its + * name suggests, the prepare callback can be used to do some preparatory work + * before invoking the main callback. + * + * Returns: an entry to be freed using qemu_del_vm_change_state_handler() + */ +qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb, + VMChangeStateHandler *prepare_cb, + VMChangeStateHandlerWithRet *cb_ret, + void *opaque, int priority); +VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, + VMChangeStateHandler *cb, + VMChangeStateHandlerWithRet *cb_ret, + void *opaque); +VMChangeStateEntry *qdev_add_vm_change_state_handler_full( + DeviceState *dev, VMChangeStateHandler *cb, VMChangeStateHandler *prepare_cb, + VMChangeStateHandlerWithRet *cb_ret, void *opaque); +void qemu_del_vm_change_state_handler(VMChangeStateEntry *e); +/** + * vm_state_notify: Notify the state of the VM + * + * @running: whether the VM is running or not. + * @state: the #RunState of the VM. + * + * Return the result of the callback which has return value. + * If no callback has return value, still return 0 and the + * upper layer should not do additional processing. + */ +int vm_state_notify(bool running, RunState state); + +static inline bool shutdown_caused_by_guest(ShutdownCause cause) +{ + return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN; +} + +/* + * In a "live" state, the vcpu clock is ticking, and the runstate notifiers + * think we are running. + */ +static inline bool runstate_is_live(RunState state) +{ + return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED; +} + +void vm_start(void); + +/** + * vm_prepare_start: Prepare for starting/resuming the VM + * + * @step_pending: whether any of the CPUs is about to be single-stepped by gdb + */ +int vm_prepare_start(bool step_pending); + +/** + * vm_resume: If @state is a live state, start the vm and set the state, + * else just set the state. + * + * @state: the state to restore + */ +void vm_resume(RunState state); + +int vm_stop(RunState state); +int vm_stop_force_state(RunState state); +int vm_shutdown(void); +void vm_set_suspended(bool suspended); +bool vm_get_suspended(void); + +typedef enum WakeupReason { + /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */ + QEMU_WAKEUP_REASON_NONE = 0, + QEMU_WAKEUP_REASON_RTC, + QEMU_WAKEUP_REASON_PMTIMER, + QEMU_WAKEUP_REASON_OTHER, +} WakeupReason; + +void qemu_system_reset_request(ShutdownCause reason); +void qemu_system_suspend_request(void); +void qemu_register_suspend_notifier(Notifier *notifier); +bool qemu_wakeup_suspend_enabled(void); +void qemu_system_wakeup_request(WakeupReason reason, Error **errp); +void qemu_system_wakeup_enable(WakeupReason reason, bool enabled); +void qemu_register_wakeup_notifier(Notifier *notifier); +void qemu_register_wakeup_support(void); +void qemu_system_shutdown_request_with_code(ShutdownCause reason, + int exit_code); +void qemu_system_shutdown_request(ShutdownCause reason); +void qemu_system_powerdown_request(void); +void qemu_register_powerdown_notifier(Notifier *notifier); +void qemu_register_shutdown_notifier(Notifier *notifier); +void qemu_system_debug_request(void); +void qemu_system_vmstop_request(RunState reason); +void qemu_system_vmstop_request_prepare(void); +bool qemu_vmstop_requested(RunState *r); +ShutdownCause qemu_shutdown_requested_get(void); +bool qemu_force_shutdown_requested(void); +ShutdownCause qemu_reset_requested_get(void); +void qemu_system_killed(int signal, pid_t pid); +void qemu_system_reset(ShutdownCause reason); +void qemu_system_guest_panicked(GuestPanicInformation *info); +void qemu_system_guest_crashloaded(GuestPanicInformation *info); +void qemu_system_guest_pvshutdown(void); +bool qemu_system_dump_in_progress(void); + +#endif + diff --git a/include/sysemu/seccomp.h b/include/system/seccomp.h similarity index 100% rename from include/sysemu/seccomp.h rename to include/system/seccomp.h diff --git a/include/sysemu/spdm-socket.h b/include/system/spdm-socket.h similarity index 100% rename from include/sysemu/spdm-socket.h rename to include/system/spdm-socket.h diff --git a/include/sysemu/stats.h b/include/system/stats.h similarity index 100% rename from include/sysemu/stats.h rename to include/system/stats.h diff --git a/include/system/system.h b/include/system/system.h new file mode 100644 index 00000000000..a7effe7dfd8 --- /dev/null +++ b/include/system/system.h @@ -0,0 +1,125 @@ +#ifndef SYSTEM_H +#define SYSTEM_H +/* Misc. things related to the system emulator. */ + +#include "qemu/timer.h" +#include "qemu/notify.h" +#include "qemu/uuid.h" + +/* vl.c */ + +extern int only_migratable; +extern const char *qemu_name; +extern QemuUUID qemu_uuid; +extern bool qemu_uuid_set; + +const char *qemu_get_vm_name(void); + +/* Exit notifiers will run with BQL held. */ +void qemu_add_exit_notifier(Notifier *notify); +void qemu_remove_exit_notifier(Notifier *notify); + +void qemu_add_machine_init_done_notifier(Notifier *notify); +void qemu_remove_machine_init_done_notifier(Notifier *notify); + +void configure_rtc(QemuOpts *opts); + +void qemu_init_subsystems(void); + +extern int autostart; + +typedef enum { + VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL, + VGA_TCX, VGA_CG3, VGA_DEVICE, VGA_VIRTIO, + VGA_TYPE_MAX, +} VGAInterfaceType; + +extern int vga_interface_type; +extern bool vga_interface_created; + +extern int graphic_width; +extern int graphic_height; +extern int graphic_depth; +extern int display_opengl; +extern const char *keyboard_layout; +extern int old_param; +extern uint8_t *boot_splash_filedata; +extern bool enable_cpu_pm; +extern QEMUClockType rtc_clock; + +typedef enum { + MLOCK_OFF = 0, + MLOCK_ON, + MLOCK_ON_FAULT, +} MlockState; + +bool should_mlock(MlockState); +bool is_mlock_on_fault(MlockState); + +extern MlockState mlock_state; + +#define MAX_OPTION_ROMS 16 +typedef struct QEMUOptionRom { + const char *name; + int32_t bootindex; +} QEMUOptionRom; +extern QEMUOptionRom option_rom[MAX_OPTION_ROMS]; +extern int nb_option_roms; + +#define MAX_PROM_ENVS 128 +extern const char *prom_envs[MAX_PROM_ENVS]; +extern unsigned int nb_prom_envs; + +/* serial ports */ + +/* Return the Chardev for serial port i, or NULL if none */ +Chardev *serial_hd(int i); + +/* parallel ports */ + +#define MAX_PARALLEL_PORTS 3 + +extern Chardev *parallel_hds[MAX_PARALLEL_PORTS]; + +void add_boot_device_path(int32_t bootindex, DeviceState *dev, + const char *suffix); +char *get_boot_devices_list(size_t *size); + +DeviceState *get_boot_device(uint32_t position); +void check_boot_index(int32_t bootindex, Error **errp); +void del_boot_device_path(DeviceState *dev, const char *suffix); +void device_add_bootindex_property(Object *obj, int32_t *bootindex, + const char *name, const char *suffix, + DeviceState *dev); +void restore_boot_order(void *opaque); +void validate_bootdevices(const char *devices, Error **errp); +void add_boot_device_lchs(DeviceState *dev, const char *suffix, + uint32_t lcyls, uint32_t lheads, uint32_t lsecs); +void del_boot_device_lchs(DeviceState *dev, const char *suffix); +char *get_boot_devices_lchs_list(size_t *size); + +/* handler to set the boot_device order for a specific type of MachineClass */ +typedef void QEMUBootSetHandler(void *opaque, const char *boot_order, + Error **errp); +void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque); +void qemu_boot_set(const char *boot_order, Error **errp); + +bool defaults_enabled(void); + +void qemu_init(int argc, char **argv); +int qemu_main_loop(void); +void qemu_cleanup(int); + +extern QemuOptsList qemu_legacy_drive_opts; +extern QemuOptsList qemu_common_drive_opts; +extern QemuOptsList qemu_drive_opts; +extern QemuOptsList bdrv_runtime_opts; +extern QemuOptsList qemu_chardev_opts; +extern QemuOptsList qemu_device_opts; +extern QemuOptsList qemu_netdev_opts; +extern QemuOptsList qemu_nic_opts; +extern QemuOptsList qemu_net_opts; +extern QemuOptsList qemu_global_opts; +extern QemuOptsList qemu_semihosting_config_opts; + +#endif diff --git a/include/system/tcg.h b/include/system/tcg.h new file mode 100644 index 00000000000..7622dcea302 --- /dev/null +++ b/include/system/tcg.h @@ -0,0 +1,28 @@ +/* + * QEMU TCG support + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* header to be included in non-TCG-specific code */ + +#ifndef SYSTEM_TCG_H +#define SYSTEM_TCG_H + +#ifdef CONFIG_TCG +extern bool tcg_allowed; +#define tcg_enabled() (tcg_allowed) +#else +#define tcg_enabled() 0 +#endif + +/** + * qemu_tcg_mttcg_enabled: + * Check whether we are running MultiThread TCG or not. + * + * Returns: %true if we are in MTTCG mode %false otherwise. + */ +bool qemu_tcg_mttcg_enabled(void); + +#endif diff --git a/include/sysemu/tpm.h b/include/system/tpm.h similarity index 100% rename from include/sysemu/tpm.h rename to include/system/tpm.h diff --git a/include/sysemu/tpm_backend.h b/include/system/tpm_backend.h similarity index 99% rename from include/sysemu/tpm_backend.h rename to include/system/tpm_backend.h index 7fabafefee1..01b11f629c4 100644 --- a/include/sysemu/tpm_backend.h +++ b/include/system/tpm_backend.h @@ -15,7 +15,7 @@ #include "qom/object.h" #include "qemu/option.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #include "qapi/error.h" #ifdef CONFIG_TPM diff --git a/include/sysemu/tpm_util.h b/include/system/tpm_util.h similarity index 94% rename from include/sysemu/tpm_util.h rename to include/system/tpm_util.h index 08f05172a71..18586932257 100644 --- a/include/sysemu/tpm_util.h +++ b/include/system/tpm_util.h @@ -19,10 +19,10 @@ * License along with this library; if not, see */ -#ifndef SYSEMU_TPM_UTIL_H -#define SYSEMU_TPM_UTIL_H +#ifndef SYSTEM_TPM_UTIL_H +#define SYSTEM_TPM_UTIL_H -#include "sysemu/tpm.h" +#include "system/tpm.h" #include "qemu/bswap.h" void tpm_util_write_fatal_error_response(uint8_t *out, uint32_t out_len); @@ -69,4 +69,4 @@ static inline void tpm_cmd_set_error(void *b, uint32_t error) void tpm_util_show_buffer(const unsigned char *buffer, size_t buffer_size, const char *string); -#endif /* SYSEMU_TPM_UTIL_H */ +#endif /* SYSTEM_TPM_UTIL_H */ diff --git a/include/sysemu/vhost-user-backend.h b/include/system/vhost-user-backend.h similarity index 93% rename from include/sysemu/vhost-user-backend.h rename to include/system/vhost-user-backend.h index 327b0b84f1f..5634ebdb2eb 100644 --- a/include/sysemu/vhost-user-backend.h +++ b/include/system/vhost-user-backend.h @@ -13,7 +13,7 @@ #define QEMU_VHOST_USER_BACKEND_H #include "qom/object.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/option.h" #include "qemu/bitmap.h" #include "hw/virtio/vhost.h" @@ -43,6 +43,6 @@ struct VhostUserBackend { int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev, unsigned nvqs, Error **errp); void vhost_user_backend_start(VhostUserBackend *b); -void vhost_user_backend_stop(VhostUserBackend *b); +int vhost_user_backend_stop(VhostUserBackend *b); #endif diff --git a/include/sysemu/watchdog.h b/include/system/watchdog.h similarity index 100% rename from include/sysemu/watchdog.h rename to include/system/watchdog.h diff --git a/include/system/whpx.h b/include/system/whpx.h new file mode 100644 index 00000000000..00f6a3e5236 --- /dev/null +++ b/include/system/whpx.h @@ -0,0 +1,35 @@ +/* + * QEMU Windows Hypervisor Platform accelerator (WHPX) support + * + * Copyright Microsoft, Corp. 2017 + * + * Authors: + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* header to be included in non-WHPX-specific code */ + +#ifndef QEMU_WHPX_H +#define QEMU_WHPX_H + +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_WHPX +# define CONFIG_WHPX_IS_POSSIBLE +# endif /* !CONFIG_WHPX */ +#else +# define CONFIG_WHPX_IS_POSSIBLE +#endif /* COMPILING_PER_TARGET */ + +#ifdef CONFIG_WHPX_IS_POSSIBLE +extern bool whpx_allowed; +#define whpx_enabled() (whpx_allowed) +bool whpx_apic_in_platform(void); +#else /* !CONFIG_WHPX_IS_POSSIBLE */ +#define whpx_enabled() 0 +#define whpx_apic_in_platform() (0) +#endif /* !CONFIG_WHPX_IS_POSSIBLE */ + +#endif /* QEMU_WHPX_H */ diff --git a/include/system/xen-mapcache.h b/include/system/xen-mapcache.h new file mode 100644 index 00000000000..bb454a7c96c --- /dev/null +++ b/include/system/xen-mapcache.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef XEN_MAPCACHE_H +#define XEN_MAPCACHE_H + +#include "exec/cpu-common.h" +#include "system/xen.h" + +typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset, + ram_addr_t size); +void xen_map_cache_init(phys_offset_to_gaddr_t f, + void *opaque); +uint8_t *xen_map_cache(MemoryRegion *mr, hwaddr phys_addr, hwaddr size, + ram_addr_t ram_addr_offset, + uint8_t lock, bool dma, + bool is_write); +ram_addr_t xen_ram_addr_from_mapcache(void *ptr); +void xen_invalidate_map_cache_entry(uint8_t *buffer); +void xen_invalidate_map_cache(void); +uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size); + +#endif /* XEN_MAPCACHE_H */ diff --git a/include/system/xen.h b/include/system/xen.h new file mode 100644 index 00000000000..c2f283d1c26 --- /dev/null +++ b/include/system/xen.h @@ -0,0 +1,35 @@ +/* + * QEMU Xen support + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* header to be included in non-Xen-specific code */ + +#ifndef SYSTEM_XEN_H +#define SYSTEM_XEN_H + +#include "exec/cpu-common.h" + +#ifdef COMPILING_PER_TARGET +# ifdef CONFIG_XEN +# define CONFIG_XEN_IS_POSSIBLE +# endif +#else +# define CONFIG_XEN_IS_POSSIBLE +#endif /* COMPILING_PER_TARGET */ + +#ifdef CONFIG_XEN_IS_POSSIBLE +extern bool xen_allowed; +#define xen_enabled() (xen_allowed) +#else /* !CONFIG_XEN_IS_POSSIBLE */ +#define xen_enabled() 0 +#endif /* CONFIG_XEN_IS_POSSIBLE */ + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length); +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, + struct MemoryRegion *mr, Error **errp); +bool xen_mr_is_memory(MemoryRegion *mr); +bool xen_mr_is_grants(MemoryRegion *mr); +#endif diff --git a/include/tcg/insn-start-words.h b/include/tcg/insn-start-words.h index 50c18bd326d..c52aec50a70 100644 --- a/include/tcg/insn-start-words.h +++ b/include/tcg/insn-start-words.h @@ -1,17 +1,12 @@ /* SPDX-License-Identifier: MIT */ /* - * Define TARGET_INSN_START_WORDS + * Define INSN_START_WORDS * Copyright (c) 2008 Fabrice Bellard */ -#ifndef TARGET_INSN_START_WORDS +#ifndef TCG_INSN_START_WORDS +#define TCG_INSN_START_WORDS -#include "cpu.h" +#define INSN_START_WORDS 3 -#ifndef TARGET_INSN_START_EXTRA_WORDS -# define TARGET_INSN_START_WORDS 1 -#else -# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) -#endif - -#endif /* TARGET_INSN_START_WORDS */ +#endif /* TCG_INSN_START_WORDS */ diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h deleted file mode 100644 index 641b9749ffc..00000000000 --- a/include/tcg/oversized-guest.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Define TCG_OVERSIZED_GUEST - * Copyright (c) 2008 Fabrice Bellard - */ - -#ifndef EXEC_TCG_OVERSIZED_GUEST_H -#define EXEC_TCG_OVERSIZED_GUEST_H - -#include "tcg-target-reg-bits.h" -#include "cpu-param.h" - -/* - * Oversized TCG guests make things like MTTCG hard - * as we can't use atomics for cputlb updates. - */ -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS -#define TCG_OVERSIZED_GUEST 1 -#else -#define TCG_OVERSIZED_GUEST 0 -#endif - -#endif diff --git a/include/tcg/startup.h b/include/tcg/startup.h index f71305765c0..95f574af2be 100644 --- a/include/tcg/startup.h +++ b/include/tcg/startup.h @@ -29,12 +29,12 @@ * tcg_init: Initialize the TCG runtime * @tb_size: translation buffer size * @splitwx: use separate rw and rx mappings - * @max_cpus: number of vcpus in system mode + * @max_threads: number of vcpu threads in system mode * * Allocate and initialize TCG resources, especially the JIT buffer. - * In user-only mode, @max_cpus is unused. + * In user-only mode, @max_threads is unused. */ -void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus); +void tcg_init(size_t tb_size, int splitwx, unsigned max_threads); /** * tcg_register_thread: Register this thread with the TCG runtime diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h index 009e2778c5f..e1071adebf2 100644 --- a/include/tcg/tcg-op-common.h +++ b/include/tcg/tcg-op-common.h @@ -14,6 +14,7 @@ TCGv_i32 tcg_constant_i32(int32_t val); TCGv_i64 tcg_constant_i64(int64_t val); +TCGv_vaddr tcg_constant_vaddr(uintptr_t val); TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); @@ -135,6 +136,8 @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); +void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co, + TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci); void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2); @@ -238,6 +241,8 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); +void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co, + TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci); void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2); diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h index 65553f5f971..ea0c87f4db6 100644 --- a/include/tcg/tcg-op-gvec-common.h +++ b/include/tcg/tcg-op-gvec-common.h @@ -227,25 +227,66 @@ typedef struct { bool prefer_i64; } GVecGen4i; +/* Expand (dbase+dofs) = op(abase+aofs), length @oprsz, clearing to @maxsz. */ +void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op); +/* Similarly, expand (env+dofs) = op(env+aofs). */ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen2 *); + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *op); +/* Similarly, expand (env+dofs) = op(env+aofs, c). */ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - uint32_t maxsz, int64_t c, const GVecGen2i *); + uint32_t maxsz, int64_t c, const GVecGen2i *op); +/* Similarly, expand (env+dofs) = op(env+aofs, s). */ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - uint32_t maxsz, TCGv_i64 c, const GVecGen2s *); + uint32_t maxsz, TCGv_i64 c, const GVecGen2s *op); + +/* + * Expand (dbase+dofs) = op(abase+aofs, bbase+bofs), + * length @oprsz, clearing to @maxsz. + */ +void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op); +/* Similarly, expand (env+dofs) = op(env+aofs, env+bofs). */ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen3 *); + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *op); + +/* + * Depending on op->load_dest and op->write_aofs, expand + * (env+dofs) = op(env+aofs, env+bofs, c) + * or + * (env+dofs) = op(env+dofs, env+aofs, env+bofs, c) + * or + * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, c) + * or + * (env+dofs), (env+aofs) = op(env+dofs, env+aofs, env+bofs, c) + */ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int64_t c, - const GVecGen3i *); + const GVecGen3i *op); + +/* + * Depending on op->write_aofs, expand + * (env+dofs) = op(env+aofs, env+bofs, env+cofs) + * or + * (env+dofs), (env+aofs) = op(env+aofs, env+bofs, env+cofs) + */ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, const GVecGen4 *); + +/* Expand (env+dofs) = op(env+aofs, env+bofs, env+cofs, c). */ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen4i *); /* Expand a specific vector operation. */ +void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); + void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -255,6 +296,15 @@ void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz); +void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz); + void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -336,6 +386,9 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s, void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s, uint32_t m, TCGv_i64); +void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, uint64_t imm); + void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs, diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index a02850583bd..c912578fdd2 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -9,6 +9,8 @@ #define TCG_TCG_OP_H #include "tcg/tcg-op-common.h" +#include "tcg/insn-start-words.h" +#include "exec/target_long.h" #ifndef TARGET_LONG_BITS #error must include QEMU headers @@ -22,24 +24,34 @@ # error #endif -#ifndef TARGET_INSN_START_EXTRA_WORDS +#if INSN_START_WORDS != 3 +# error Mismatch with insn-start-words.h +#endif + +#if TARGET_INSN_START_EXTRA_WORDS == 0 static inline void tcg_gen_insn_start(target_ulong pc) { - TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, + INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS); tcg_set_insn_start_param(op, 0, pc); + tcg_set_insn_start_param(op, 1, 0); + tcg_set_insn_start_param(op, 2, 0); } #elif TARGET_INSN_START_EXTRA_WORDS == 1 static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1) { - TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, + INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS); tcg_set_insn_start_param(op, 0, pc); tcg_set_insn_start_param(op, 1, a1); + tcg_set_insn_start_param(op, 2, 0); } #elif TARGET_INSN_START_EXTRA_WORDS == 2 static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1, target_ulong a2) { - TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 3 * 64 / TCG_TARGET_REG_BITS); + TCGOp *op = tcg_emit_op(INDEX_op_insn_start, + INSN_START_WORDS * 64 / TCG_TARGET_REG_BITS); tcg_set_insn_start_param(op, 0, pc); tcg_set_insn_start_param(op, 1, a1); tcg_set_insn_start_param(op, 2, a2); @@ -252,6 +264,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) #define tcg_gen_movcond_tl tcg_gen_movcond_i64 #define tcg_gen_add2_tl tcg_gen_add2_i64 #define tcg_gen_sub2_tl tcg_gen_sub2_i64 +#define tcg_gen_addcio_tl tcg_gen_addcio_i64 #define tcg_gen_mulu2_tl tcg_gen_mulu2_i64 #define tcg_gen_muls2_tl tcg_gen_muls2_i64 #define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64 @@ -370,6 +383,7 @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64) #define tcg_gen_movcond_tl tcg_gen_movcond_i32 #define tcg_gen_add2_tl tcg_gen_add2_i32 #define tcg_gen_sub2_tl tcg_gen_sub2_i32 +#define tcg_gen_addcio_tl tcg_gen_addcio_i32 #define tcg_gen_mulu2_tl tcg_gen_mulu2_i32 #define tcg_gen_muls2_tl tcg_gen_muls2_i32 #define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32 diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index 546eb49c11c..e988edd93ab 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -33,286 +33,160 @@ DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) /* variable number of parameters */ DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT) -DEF(br, 0, 0, 1, TCG_OPF_BB_END) - -#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0) -#if TCG_TARGET_REG_BITS == 32 -# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT -#else -# define IMPL64 TCG_OPF_64BIT -#endif - -DEF(mb, 0, 0, 1, 0) - -DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) -DEF(setcond_i32, 1, 2, 1, 0) -DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32)) -DEF(movcond_i32, 1, 4, 1, 0) -/* load/store */ -DEF(ld8u_i32, 1, 1, 1, 0) -DEF(ld8s_i32, 1, 1, 1, 0) -DEF(ld16u_i32, 1, 1, 1, 0) -DEF(ld16s_i32, 1, 1, 1, 0) -DEF(ld_i32, 1, 1, 1, 0) -DEF(st8_i32, 0, 2, 1, 0) -DEF(st16_i32, 0, 2, 1, 0) -DEF(st_i32, 0, 2, 1, 0) -/* arith */ -DEF(add_i32, 1, 2, 0, 0) -DEF(sub_i32, 1, 2, 0, 0) -DEF(mul_i32, 1, 2, 0, 0) -DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) -DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) -DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) -DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) -DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) -DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) -DEF(and_i32, 1, 2, 0, 0) -DEF(or_i32, 1, 2, 0, 0) -DEF(xor_i32, 1, 2, 0, 0) -/* shifts/rotates */ -DEF(shl_i32, 1, 2, 0, 0) -DEF(shr_i32, 1, 2, 0, 0) -DEF(sar_i32, 1, 2, 0, 0) -DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) -DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) -DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32)) -DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32)) -DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32)) -DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32)) - -DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH) - -DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32)) -DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32)) -DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32)) -DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32)) -DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32)) -DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32)) -DEF(brcond2_i32, 0, 4, 2, - TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32)) -DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32)) - -DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32)) -DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32)) -DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32)) -DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32)) -DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32)) -DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32)) -DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32)) -DEF(neg_i32, 1, 1, 0, 0) -DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32)) -DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32)) -DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32)) -DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32)) -DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32)) -DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32)) -DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32)) -DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32)) - -DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) -DEF(setcond_i64, 1, 2, 1, IMPL64) -DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64)) -DEF(movcond_i64, 1, 4, 1, IMPL64) -/* load/store */ -DEF(ld8u_i64, 1, 1, 1, IMPL64) -DEF(ld8s_i64, 1, 1, 1, IMPL64) -DEF(ld16u_i64, 1, 1, 1, IMPL64) -DEF(ld16s_i64, 1, 1, 1, IMPL64) -DEF(ld32u_i64, 1, 1, 1, IMPL64) -DEF(ld32s_i64, 1, 1, 1, IMPL64) -DEF(ld_i64, 1, 1, 1, IMPL64) -DEF(st8_i64, 0, 2, 1, IMPL64) -DEF(st16_i64, 0, 2, 1, IMPL64) -DEF(st32_i64, 0, 2, 1, IMPL64) -DEF(st_i64, 0, 2, 1, IMPL64) -/* arith */ -DEF(add_i64, 1, 2, 0, IMPL64) -DEF(sub_i64, 1, 2, 0, IMPL64) -DEF(mul_i64, 1, 2, 0, IMPL64) -DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) -DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) -DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) -DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) -DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) -DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) -DEF(and_i64, 1, 2, 0, IMPL64) -DEF(or_i64, 1, 2, 0, IMPL64) -DEF(xor_i64, 1, 2, 0, IMPL64) -/* shifts/rotates */ -DEF(shl_i64, 1, 2, 0, IMPL64) -DEF(shr_i64, 1, 2, 0, IMPL64) -DEF(sar_i64, 1, 2, 0, IMPL64) -DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) -DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) -DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64)) -DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64)) -DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64)) -DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64)) +DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) +DEF(brcond, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | TCG_OPF_INT) + +DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT) + +DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT) + +DEF(add, 1, 2, 0, TCG_OPF_INT) +DEF(and, 1, 2, 0, TCG_OPF_INT) +DEF(andc, 1, 2, 0, TCG_OPF_INT) +DEF(bswap16, 1, 1, 1, TCG_OPF_INT) +DEF(bswap32, 1, 1, 1, TCG_OPF_INT) +DEF(bswap64, 1, 1, 1, TCG_OPF_INT) +DEF(clz, 1, 2, 0, TCG_OPF_INT) +DEF(ctpop, 1, 1, 0, TCG_OPF_INT) +DEF(ctz, 1, 2, 0, TCG_OPF_INT) +DEF(deposit, 1, 2, 2, TCG_OPF_INT) +DEF(divs, 1, 2, 0, TCG_OPF_INT) +DEF(divs2, 2, 3, 0, TCG_OPF_INT) +DEF(divu, 1, 2, 0, TCG_OPF_INT) +DEF(divu2, 2, 3, 0, TCG_OPF_INT) +DEF(eqv, 1, 2, 0, TCG_OPF_INT) +DEF(extract, 1, 1, 2, TCG_OPF_INT) +DEF(extract2, 1, 2, 1, TCG_OPF_INT) +DEF(ld8u, 1, 1, 1, TCG_OPF_INT) +DEF(ld8s, 1, 1, 1, TCG_OPF_INT) +DEF(ld16u, 1, 1, 1, TCG_OPF_INT) +DEF(ld16s, 1, 1, 1, TCG_OPF_INT) +DEF(ld32u, 1, 1, 1, TCG_OPF_INT) +DEF(ld32s, 1, 1, 1, TCG_OPF_INT) +DEF(ld, 1, 1, 1, TCG_OPF_INT) +DEF(movcond, 1, 4, 1, TCG_OPF_INT) +DEF(mul, 1, 2, 0, TCG_OPF_INT) +DEF(muls2, 2, 2, 0, TCG_OPF_INT) +DEF(mulsh, 1, 2, 0, TCG_OPF_INT) +DEF(mulu2, 2, 2, 0, TCG_OPF_INT) +DEF(muluh, 1, 2, 0, TCG_OPF_INT) +DEF(nand, 1, 2, 0, TCG_OPF_INT) +DEF(neg, 1, 1, 0, TCG_OPF_INT) +DEF(negsetcond, 1, 2, 1, TCG_OPF_INT) +DEF(nor, 1, 2, 0, TCG_OPF_INT) +DEF(not, 1, 1, 0, TCG_OPF_INT) +DEF(or, 1, 2, 0, TCG_OPF_INT) +DEF(orc, 1, 2, 0, TCG_OPF_INT) +DEF(rems, 1, 2, 0, TCG_OPF_INT) +DEF(remu, 1, 2, 0, TCG_OPF_INT) +DEF(rotl, 1, 2, 0, TCG_OPF_INT) +DEF(rotr, 1, 2, 0, TCG_OPF_INT) +DEF(sar, 1, 2, 0, TCG_OPF_INT) +DEF(setcond, 1, 2, 1, TCG_OPF_INT) +DEF(sextract, 1, 1, 2, TCG_OPF_INT) +DEF(shl, 1, 2, 0, TCG_OPF_INT) +DEF(shr, 1, 2, 0, TCG_OPF_INT) +DEF(st8, 0, 2, 1, TCG_OPF_INT) +DEF(st16, 0, 2, 1, TCG_OPF_INT) +DEF(st32, 0, 2, 1, TCG_OPF_INT) +DEF(st, 0, 2, 1, TCG_OPF_INT) +DEF(sub, 1, 2, 0, TCG_OPF_INT) +DEF(xor, 1, 2, 0, TCG_OPF_INT) + +DEF(addco, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT) +DEF(addc1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT) +DEF(addci, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN) +DEF(addcio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT) + +DEF(subbo, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT) +DEF(subb1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT) +DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN) +DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT) + +DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH) +DEF(setcond2_i32, 1, 4, 1, 0) /* size changing ops */ -DEF(ext_i32_i64, 1, 1, 0, IMPL64) -DEF(extu_i32_i64, 1, 1, 0, IMPL64) -DEF(extrl_i64_i32, 1, 1, 0, - IMPL(TCG_TARGET_HAS_extr_i64_i32) - | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) -DEF(extrh_i64_i32, 1, 1, 0, - IMPL(TCG_TARGET_HAS_extr_i64_i32) - | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) - -DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64) -DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64)) -DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64)) -DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64)) -DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64)) -DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64)) -DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64)) -DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64)) -DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64)) -DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64)) -DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64)) -DEF(neg_i64, 1, 1, 0, IMPL64) -DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64)) -DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64)) -DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64)) -DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64)) -DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64)) -DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64)) -DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64)) -DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64)) - -DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64)) -DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64)) -DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64)) -DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) -DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64)) -DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64)) +DEF(ext_i32_i64, 1, 1, 0, 0) +DEF(extu_i32_i64, 1, 1, 0, 0) +DEF(extrl_i64_i32, 1, 1, 0, 0) +DEF(extrh_i64_i32, 1, 1, 0, 0) #define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) -/* There are tcg_ctx->insn_start_words here, not just one. */ -DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT) +DEF(insn_start, 0, 0, DATA64_ARGS * INSN_START_WORDS, TCG_OPF_NOT_PRESENT) -DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) -DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) +DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) +DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT) DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT) -/* Replicate ld/st ops for 32 and 64-bit guest addresses. */ -DEF(qemu_ld_a32_i32, 1, 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_st_a32_i32, 0, 1 + 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) -DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) - -DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) -DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) -DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) - -/* Only used by i386 to cope with stupid register constraints. */ -DEF(qemu_st8_a32_i32, 0, 1 + 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | - IMPL(TCG_TARGET_HAS_qemu_st8_i32)) -DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | - IMPL(TCG_TARGET_HAS_qemu_st8_i32)) - -/* Only for 64-bit hosts at the moment. */ -DEF(qemu_ld_a32_i128, 2, 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | - IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) -DEF(qemu_ld_a64_i128, 2, 1, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | - IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) -DEF(qemu_st_a32_i128, 0, 3, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | - IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) -DEF(qemu_st_a64_i128, 0, 3, 1, - TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT | - IMPL(TCG_TARGET_HAS_qemu_ldst_i128)) +DEF(qemu_ld, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT) +DEF(qemu_st, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT) +DEF(qemu_ld2, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT) +DEF(qemu_st2, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT) /* Host vector support. */ -#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec) - DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT) -DEF(dup_vec, 1, 1, 0, IMPLVEC) -DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32)) - -DEF(ld_vec, 1, 1, 1, IMPLVEC) -DEF(st_vec, 0, 2, 1, IMPLVEC) -DEF(dupm_vec, 1, 1, 1, IMPLVEC) - -DEF(add_vec, 1, 2, 0, IMPLVEC) -DEF(sub_vec, 1, 2, 0, IMPLVEC) -DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec)) -DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec)) -DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec)) -DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) -DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) -DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) -DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec)) -DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) -DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) -DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) -DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec)) - -DEF(and_vec, 1, 2, 0, IMPLVEC) -DEF(or_vec, 1, 2, 0, IMPLVEC) -DEF(xor_vec, 1, 2, 0, IMPLVEC) -DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec)) -DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec)) -DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec)) -DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec)) -DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec)) -DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec)) - -DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) -DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) -DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec)) -DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec)) - -DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) -DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) -DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec)) -DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec)) - -DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) -DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) -DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec)) -DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) -DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec)) - -DEF(cmp_vec, 1, 2, 1, IMPLVEC) - -DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec)) -DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec)) +DEF(dup_vec, 1, 1, 0, TCG_OPF_VECTOR) +DEF(dup2_vec, 1, 2, 0, TCG_OPF_VECTOR) + +DEF(ld_vec, 1, 1, 1, TCG_OPF_VECTOR) +DEF(st_vec, 0, 2, 1, TCG_OPF_VECTOR) +DEF(dupm_vec, 1, 1, 1, TCG_OPF_VECTOR) + +DEF(add_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(sub_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(mul_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(neg_vec, 1, 1, 0, TCG_OPF_VECTOR) +DEF(abs_vec, 1, 1, 0, TCG_OPF_VECTOR) +DEF(ssadd_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(usadd_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(sssub_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(ussub_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(smin_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(umin_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(smax_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(umax_vec, 1, 2, 0, TCG_OPF_VECTOR) + +DEF(and_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(or_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(xor_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(andc_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(orc_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(nand_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(nor_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(eqv_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(not_vec, 1, 1, 0, TCG_OPF_VECTOR) + +DEF(shli_vec, 1, 1, 1, TCG_OPF_VECTOR) +DEF(shri_vec, 1, 1, 1, TCG_OPF_VECTOR) +DEF(sari_vec, 1, 1, 1, TCG_OPF_VECTOR) +DEF(rotli_vec, 1, 1, 1, TCG_OPF_VECTOR) + +DEF(shls_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(shrs_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(sars_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(rotls_vec, 1, 2, 0, TCG_OPF_VECTOR) + +DEF(shlv_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(shrv_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(sarv_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(rotlv_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(rotrv_vec, 1, 2, 0, TCG_OPF_VECTOR) + +DEF(cmp_vec, 1, 2, 1, TCG_OPF_VECTOR) + +DEF(bitsel_vec, 1, 3, 0, TCG_OPF_VECTOR) +DEF(cmpsel_vec, 1, 4, 1, TCG_OPF_VECTOR) DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT) -#if TCG_TARGET_MAYBE_vec -#include "tcg-target.opc.h" -#endif - -#ifdef TCG_TARGET_INTERPRETER -/* These opcodes are only for use between the tci generator and interpreter. */ -DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) -DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) -#endif +#include "tcg-target-opc.h.inc" #undef DATA64_ARGS -#undef IMPL -#undef IMPL64 -#undef IMPLVEC #undef DEF diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h index 44192c55a9d..98f91e68b70 100644 --- a/include/tcg/tcg-temp-internal.h +++ b/include/tcg/tcg-temp-internal.h @@ -42,4 +42,10 @@ TCGv_i64 tcg_temp_ebb_new_i64(void); TCGv_ptr tcg_temp_ebb_new_ptr(void); TCGv_i128 tcg_temp_ebb_new_i128(void); +/* Forget all freed EBB temps, so that new allocations produce new temps. */ +static inline void tcg_temp_ebb_reset_freed(TCGContext *s) +{ + memset(s->free_temps, 0, sizeof(s->free_temps)); +} + #endif /* TCG_TEMP_FREE_H */ diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 21d58847412..a6d9aa50d47 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -34,6 +34,7 @@ #include "tcg-target-reg-bits.h" #include "tcg-target.h" #include "tcg/tcg-cond.h" +#include "tcg/insn-start-words.h" #include "tcg/debug-assert.h" /* XXX: make safe guess about sizes */ @@ -64,111 +65,6 @@ typedef uint64_t TCGRegSet; #error unsupported #endif -#if TCG_TARGET_REG_BITS == 32 -/* Turn some undef macros into false macros. */ -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_rem_i64 0 -#define TCG_TARGET_HAS_div2_i64 0 -#define TCG_TARGET_HAS_rot_i64 0 -#define TCG_TARGET_HAS_ext8s_i64 0 -#define TCG_TARGET_HAS_ext16s_i64 0 -#define TCG_TARGET_HAS_ext32s_i64 0 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 0 -#define TCG_TARGET_HAS_bswap16_i64 0 -#define TCG_TARGET_HAS_bswap32_i64 0 -#define TCG_TARGET_HAS_bswap64_i64 0 -#define TCG_TARGET_HAS_not_i64 0 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 0 -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_deposit_i64 0 -#define TCG_TARGET_HAS_extract_i64 0 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_negsetcond_i64 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 -/* Turn some undef macros into true macros. */ -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#endif - -#ifndef TCG_TARGET_deposit_i32_valid -#define TCG_TARGET_deposit_i32_valid(ofs, len) 1 -#endif -#ifndef TCG_TARGET_deposit_i64_valid -#define TCG_TARGET_deposit_i64_valid(ofs, len) 1 -#endif -#ifndef TCG_TARGET_extract_i32_valid -#define TCG_TARGET_extract_i32_valid(ofs, len) 1 -#endif -#ifndef TCG_TARGET_extract_i64_valid -#define TCG_TARGET_extract_i64_valid(ofs, len) 1 -#endif - -/* Only one of DIV or DIV2 should be defined. */ -#if defined(TCG_TARGET_HAS_div_i32) -#define TCG_TARGET_HAS_div2_i32 0 -#elif defined(TCG_TARGET_HAS_div2_i32) -#define TCG_TARGET_HAS_div_i32 0 -#define TCG_TARGET_HAS_rem_i32 0 -#endif -#if defined(TCG_TARGET_HAS_div_i64) -#define TCG_TARGET_HAS_div2_i64 0 -#elif defined(TCG_TARGET_HAS_div2_i64) -#define TCG_TARGET_HAS_div_i64 0 -#define TCG_TARGET_HAS_rem_i64 0 -#endif - -#if !defined(TCG_TARGET_HAS_v64) \ - && !defined(TCG_TARGET_HAS_v128) \ - && !defined(TCG_TARGET_HAS_v256) -#define TCG_TARGET_MAYBE_vec 0 -#define TCG_TARGET_HAS_abs_vec 0 -#define TCG_TARGET_HAS_neg_vec 0 -#define TCG_TARGET_HAS_not_vec 0 -#define TCG_TARGET_HAS_andc_vec 0 -#define TCG_TARGET_HAS_orc_vec 0 -#define TCG_TARGET_HAS_nand_vec 0 -#define TCG_TARGET_HAS_nor_vec 0 -#define TCG_TARGET_HAS_eqv_vec 0 -#define TCG_TARGET_HAS_roti_vec 0 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 0 -#define TCG_TARGET_HAS_shi_vec 0 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 0 -#define TCG_TARGET_HAS_mul_vec 0 -#define TCG_TARGET_HAS_sat_vec 0 -#define TCG_TARGET_HAS_minmax_vec 0 -#define TCG_TARGET_HAS_bitsel_vec 0 -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 0 -#else -#define TCG_TARGET_MAYBE_vec 1 -#endif -#ifndef TCG_TARGET_HAS_v64 -#define TCG_TARGET_HAS_v64 0 -#endif -#ifndef TCG_TARGET_HAS_v128 -#define TCG_TARGET_HAS_v128 0 -#endif -#ifndef TCG_TARGET_HAS_v256 -#define TCG_TARGET_HAS_v256 0 -#endif - typedef enum TCGOpcode { #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, #include "tcg/tcg-opc.h" @@ -281,29 +177,6 @@ static inline int tcg_type_size(TCGType t) return 4 << i; } -/** - * get_alignment_bits - * @memop: MemOp value - * - * Extract the alignment size from the memop. - */ -static inline unsigned get_alignment_bits(MemOp memop) -{ - unsigned a = memop & MO_AMASK; - - if (a == MO_UNALN) { - /* No alignment required. */ - a = 0; - } else if (a == MO_ALIGN) { - /* A natural alignment requirement. */ - a = memop & MO_SIZE; - } else { - /* A specific alignment requirement. */ - a = a >> MO_ASHIFT; - } - return a; -} - typedef tcg_target_ulong TCGArg; /* Define type and accessor macros for TCG variables. @@ -316,6 +189,7 @@ typedef tcg_target_ulong TCGArg; * TCGv_i64 : 64 bit integer type * TCGv_i128 : 128 bit integer type * TCGv_ptr : a host pointer type + * TCGv_vaddr: an integer type wide enough to hold a target pointer type * TCGv_vec : a host vector type; the exact size is not exposed to the CPU front-end code. * TCGv : an integer type the same size as target_ulong @@ -344,6 +218,14 @@ typedef struct TCGv_ptr_d *TCGv_ptr; typedef struct TCGv_vec_d *TCGv_vec; typedef TCGv_ptr TCGv_env; +#if __SIZEOF_POINTER__ == 4 +typedef TCGv_i32 TCGv_vaddr; +#elif __SIZEOF_POINTER__ == 8 +typedef TCGv_i64 TCGv_vaddr; +#else +# error "sizeof pointer is different from {4,8}" +#endif /* __SIZEOF_POINTER__ */ + /* call flags */ /* Helper does not read globals (either directly or through an exception). It implies TCG_CALL_NO_WRITE_GLOBALS. */ @@ -462,7 +344,8 @@ struct TCGOp { #define TCGOP_CALLI(X) (X)->param1 #define TCGOP_CALLO(X) (X)->param2 -#define TCGOP_VECL(X) (X)->param1 +#define TCGOP_TYPE(X) (X)->param1 +#define TCGOP_FLAGS(X) (X)->param2 #define TCGOP_VECE(X) (X)->param2 /* Make sure operands fit in the bitfields above. */ @@ -474,7 +357,7 @@ static inline TCGRegSet output_pref(const TCGOp *op, unsigned i) } struct TCGContext { - uint8_t *pool_cur, *pool_end; + uintptr_t pool_cur, pool_end; TCGPool *pool_first, *pool_current, *pool_first_large; int nb_labels; int nb_globals; @@ -482,11 +365,6 @@ struct TCGContext { int nb_indirects; int nb_ops; TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */ - - int page_mask; - uint8_t page_bits; - uint8_t tlb_dyn_max_bits; - uint8_t insn_start_words; TCGBar guest_mo; TCGRegSet reserved_regs; @@ -520,12 +398,8 @@ struct TCGContext { CPUState *cpu; /* *_trans */ /* These structures are private to tcg-target.c.inc. */ -#ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; -#endif -#ifdef TCG_TARGET_NEED_POOL_LABELS struct TCGLabelPoolData *pool_labels; -#endif TCGLabel *exitreq_label; @@ -544,6 +418,17 @@ struct TCGContext { struct qemu_plugin_insn *plugin_insn; #endif + /* For host-specific values. */ +#ifdef __riscv + MemOp riscv_cur_vsew; + TCGType riscv_cur_type; +#endif + /* + * During the tcg_reg_alloc_op loop, we are within a sequence of + * carry-using opcodes like addco+addci. + */ + bool carry_live; + GHashTable *const_table[TCG_TYPE_COUNT]; TCGTempSet free_temps[TCG_TYPE_COUNT]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ @@ -697,23 +582,29 @@ static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t) return (TCGv_ptr)temp_tcgv_i32(t); } +static inline TCGv_vaddr temp_tcgv_vaddr(TCGTemp *t) +{ + return (TCGv_vaddr)temp_tcgv_i32(t); +} + static inline TCGv_vec temp_tcgv_vec(TCGTemp *t) { return (TCGv_vec)temp_tcgv_i32(t); } -static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg) +static inline TCGArg tcg_get_insn_param(TCGOp *op, unsigned arg) { return op->args[arg]; } -static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) +static inline void tcg_set_insn_param(TCGOp *op, unsigned arg, TCGArg v) { op->args[arg] = v; } -static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg) +static inline uint64_t tcg_get_insn_start_param(TCGOp *op, unsigned arg) { + tcg_debug_assert(arg < INSN_START_WORDS); if (TCG_TARGET_REG_BITS == 64) { return tcg_get_insn_param(op, arg); } else { @@ -722,8 +613,9 @@ static inline uint64_t tcg_get_insn_start_param(TCGOp *op, int arg) } } -static inline void tcg_set_insn_start_param(TCGOp *op, int arg, uint64_t v) +static inline void tcg_set_insn_start_param(TCGOp *op, unsigned arg, uint64_t v) { + tcg_debug_assert(arg < INSN_START_WORDS); if (TCG_TARGET_REG_BITS == 64) { tcg_set_insn_param(op, arg, v); } else { @@ -763,17 +655,58 @@ void tcg_region_reset_all(void); size_t tcg_code_size(void); size_t tcg_code_capacity(void); +/** + * tcg_tb_insert: + * @tb: translation block to insert + * + * Insert @tb into the region trees. + */ void tcg_tb_insert(TranslationBlock *tb); + +/** + * tcg_tb_remove: + * @tb: translation block to remove + * + * Remove @tb from the region trees. + */ void tcg_tb_remove(TranslationBlock *tb); + +/** + * tcg_tb_lookup: + * @tc_ptr: host PC to look up + * + * Look up a translation block inside the region trees by @tc_ptr. This is + * useful for exception handling, but must not be used for the purposes of + * executing the returned translation block. See struct tb_tc for more + * information. + * + * Returns: a translation block previously inserted into the region trees, + * such that @tc_ptr points anywhere inside the code generated for it, or + * NULL. + */ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); + +/** + * tcg_tb_foreach: + * @func: callback + * @user_data: opaque value to pass to @callback + * + * Call @func for each translation block inserted into the region trees. + */ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); + +/** + * tcg_nb_tbs: + * + * Returns: the number of translation blocks inserted into the region trees. + */ size_t tcg_nb_tbs(void); /* user-mode: Called with mmap_lock held. */ static inline void *tcg_malloc(int size) { TCGContext *s = tcg_ctx; - uint8_t *ptr, *ptr_end; + uintptr_t ptr, ptr_end; /* ??? This is a weak placeholder for minimum malloc alignment. */ size = QEMU_ALIGN_UP(size, 8); @@ -784,7 +717,7 @@ static inline void *tcg_malloc(int size) return tcg_malloc_internal(tcg_ctx, size); } else { s->pool_cur = ptr_end; - return ptr; + return (void *)ptr; } } @@ -797,7 +730,8 @@ void tb_target_set_jmp_target(const TranslationBlock *, int, void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); -#define TCG_CT_CONST 1 /* any constant of register size */ +#define TCG_CT_CONST 1 /* any constant of register size */ +#define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */ typedef struct TCGArgConstraint { unsigned ct : 16; @@ -824,33 +758,42 @@ enum { /* Instruction has side effects: it cannot be removed if its outputs are not used, and might trigger exceptions. */ TCG_OPF_SIDE_EFFECTS = 0x08, - /* Instruction operands are 64-bits (otherwise 32-bits). */ - TCG_OPF_64BIT = 0x10, + /* Instruction operands may be I32 or I64 */ + TCG_OPF_INT = 0x10, /* Instruction is optional and not implemented by the host, or insn is generic and should not be implemented by the host. */ TCG_OPF_NOT_PRESENT = 0x20, /* Instruction operands are vectors. */ TCG_OPF_VECTOR = 0x40, /* Instruction is a conditional branch. */ - TCG_OPF_COND_BRANCH = 0x80 + TCG_OPF_COND_BRANCH = 0x80, + /* Instruction produces carry out. */ + TCG_OPF_CARRY_OUT = 0x100, + /* Instruction consumes carry in. */ + TCG_OPF_CARRY_IN = 0x200, }; typedef struct TCGOpDef { const char *name; uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; - uint8_t flags; - TCGArgConstraint *args_ct; + uint16_t flags; } TCGOpDef; -extern TCGOpDef tcg_op_defs[]; +extern const TCGOpDef tcg_op_defs[]; extern const size_t tcg_op_defs_max; -typedef struct TCGTargetOpDef { - TCGOpcode op; - const char *args_ct_str[TCG_MAX_OP_ARGS]; -} TCGTargetOpDef; - -bool tcg_op_supported(TCGOpcode op); +/* + * tcg_op_supported: + * Query if @op, for @type and @flags, is supported by the host + * on which we are currently executing. + */ +bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags); +/* + * tcg_op_deposit_valid: + * Query if a deposit into (ofs, len) is supported for @type by + * the host on which we are currently executing. + */ +bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len); void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret); void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *); @@ -871,10 +814,6 @@ void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret, TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs); void tcg_op_remove(TCGContext *s, TCGOp *op); -TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, - TCGOpcode opc, unsigned nargs); -TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, - TCGOpcode opc, unsigned nargs); /** * tcg_remove_ops_after: @@ -1033,17 +972,10 @@ extern tcg_prologue_fn *tcg_qemu_tb_exec; void tcg_register_jit(const void *buf, size_t buf_size); -#if TCG_TARGET_MAYBE_vec /* Return zero if the tuple (opc, type, vece) is unsupportable; return > 0 if it is directly supportable; return < 0 if we must call tcg_expand_vec_op. */ int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned); -#else -static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve) -{ - return 0; -} -#endif /* Expand the tuple (opc, type, vece) on the given arguments. */ void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...); @@ -1073,5 +1005,7 @@ static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned); void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs); +/* tcg_dump_stats: Append TCG statistics to @buf */ +void tcg_dump_stats(GString *buf); #endif /* TCG_H */ diff --git a/include/ui/clipboard.h b/include/ui/clipboard.h index ab6acdbd8a4..62a96ce9ff5 100644 --- a/include/ui/clipboard.h +++ b/include/ui/clipboard.h @@ -2,6 +2,7 @@ #define QEMU_CLIPBOARD_H #include "qemu/notify.h" +#include "migration/vmstate.h" /** * DOC: Introduction @@ -25,6 +26,9 @@ typedef enum QemuClipboardSelection QemuClipboardSelection; typedef struct QemuClipboardPeer QemuClipboardPeer; typedef struct QemuClipboardNotify QemuClipboardNotify; typedef struct QemuClipboardInfo QemuClipboardInfo; +typedef struct QemuClipboardContent QemuClipboardContent; + +extern const VMStateDescription vmstate_cbinfo; /** * enum QemuClipboardType @@ -97,6 +101,24 @@ struct QemuClipboardNotify { }; }; + +/** + * struct QemuClipboardContent + * + * @available: whether the data is available + * @requested: whether the data was requested + * @size: the size of the @data + * @data: the clipboard data + * + * Clipboard content. + */ +struct QemuClipboardContent { + bool available; + bool requested; + uint32_t size; + void *data; +}; + /** * struct QemuClipboardInfo * @@ -112,15 +134,10 @@ struct QemuClipboardNotify { struct QemuClipboardInfo { uint32_t refcount; QemuClipboardPeer *owner; - QemuClipboardSelection selection; + int selection; /* QemuClipboardSelection */ bool has_serial; uint32_t serial; - struct { - bool available; - bool requested; - size_t size; - void *data; - } types[QEMU_CLIPBOARD_TYPE__COUNT]; + QemuClipboardContent types[QEMU_CLIPBOARD_TYPE__COUNT]; }; /** diff --git a/include/ui/console.h b/include/ui/console.h index fa986ab97e3..98feaa58bdd 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -70,8 +70,6 @@ typedef struct QEMUPutMouseEntry QEMUPutMouseEntry; typedef struct QEMUPutKbdEntry QEMUPutKbdEntry; typedef struct QEMUPutLEDEntry QEMUPutLEDEntry; -QEMUPutKbdEntry *qemu_add_kbd_event_handler(QEMUPutKBDEvent *func, - void *opaque); QEMUPutMouseEntry *qemu_add_mouse_event_handler(QEMUPutMouseEvent *func, void *opaque, int absolute, const char *name); @@ -175,7 +173,6 @@ int cursor_get_mono_bpl(QEMUCursor *c); void cursor_set_mono(QEMUCursor *c, uint32_t foreground, uint32_t background, uint8_t *image, int transparent, uint8_t *mask); -void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *mask); void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask); typedef void *QEMUGLContext; @@ -425,6 +422,9 @@ bool console_gl_check_format(DisplayChangeListener *dcl, pixman_format_code_t format); void surface_gl_create_texture(QemuGLShader *gls, DisplaySurface *surface); +bool surface_gl_create_texture_from_fd(DisplaySurface *surface, + int fd, GLuint *texture, + GLuint *mem_obj); void surface_gl_update_texture(QemuGLShader *gls, DisplaySurface *surface, int x, int y, int w, int h); diff --git a/include/ui/dmabuf.h b/include/ui/dmabuf.h index dc74ba895a5..3decdca4979 100644 --- a/include/ui/dmabuf.h +++ b/include/ui/dmabuf.h @@ -10,24 +10,29 @@ #ifndef DMABUF_H #define DMABUF_H +#define DMABUF_MAX_PLANES 4 + typedef struct QemuDmaBuf QemuDmaBuf; QemuDmaBuf *qemu_dmabuf_new(uint32_t width, uint32_t height, - uint32_t stride, uint32_t x, - uint32_t y, uint32_t backing_width, - uint32_t backing_height, uint32_t fourcc, - uint64_t modifier, int dmabuf_fd, + const uint32_t *offset, const uint32_t *stride, + uint32_t x, uint32_t y, + uint32_t backing_width, uint32_t backing_height, + uint32_t fourcc, uint64_t modifier, + const int32_t *dmabuf_fd, uint32_t num_planes, bool allow_fences, bool y0_top); void qemu_dmabuf_free(QemuDmaBuf *dmabuf); G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuDmaBuf, qemu_dmabuf_free); -int qemu_dmabuf_get_fd(QemuDmaBuf *dmabuf); -int qemu_dmabuf_dup_fd(QemuDmaBuf *dmabuf); +const int *qemu_dmabuf_get_fds(QemuDmaBuf *dmabuf, int *nfds); +void qemu_dmabuf_dup_fds(QemuDmaBuf *dmabuf, int *fds, int nfds); void qemu_dmabuf_close(QemuDmaBuf *dmabuf); uint32_t qemu_dmabuf_get_width(QemuDmaBuf *dmabuf); uint32_t qemu_dmabuf_get_height(QemuDmaBuf *dmabuf); -uint32_t qemu_dmabuf_get_stride(QemuDmaBuf *dmabuf); +const uint32_t *qemu_dmabuf_get_offsets(QemuDmaBuf *dmabuf, int *noffsets); +const uint32_t *qemu_dmabuf_get_strides(QemuDmaBuf *dmabuf, int *nstrides); +uint32_t qemu_dmabuf_get_num_planes(QemuDmaBuf *dmabuf); uint32_t qemu_dmabuf_get_fourcc(QemuDmaBuf *dmabuf); uint64_t qemu_dmabuf_get_modifier(QemuDmaBuf *dmabuf); uint32_t qemu_dmabuf_get_texture(QemuDmaBuf *dmabuf); @@ -44,6 +49,5 @@ void qemu_dmabuf_set_texture(QemuDmaBuf *dmabuf, uint32_t texture); void qemu_dmabuf_set_fence_fd(QemuDmaBuf *dmabuf, int32_t fence_fd); void qemu_dmabuf_set_sync(QemuDmaBuf *dmabuf, void *sync); void qemu_dmabuf_set_draw_submitted(QemuDmaBuf *dmabuf, bool draw_submitted); -void qemu_dmabuf_set_fd(QemuDmaBuf *dmabuf, int32_t fd); #endif diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h index 4b8c0d2281c..acf993fcf52 100644 --- a/include/ui/egl-helpers.h +++ b/include/ui/egl-helpers.h @@ -17,6 +17,8 @@ extern bool qemu_egl_angle_d3d; typedef struct egl_fb { int width; int height; + int x; + int y; GLuint texture; GLuint framebuffer; bool delete_texture; @@ -26,7 +28,7 @@ typedef struct egl_fb { #define EGL_FB_INIT { 0, } void egl_fb_destroy(egl_fb *fb); -void egl_fb_setup_default(egl_fb *fb, int width, int height); +void egl_fb_setup_default(egl_fb *fb, int width, int height, int x, int y); void egl_fb_setup_for_tex(egl_fb *fb, int width, int height, GLuint texture, bool delete); void egl_fb_setup_new_tex(egl_fb *fb, int width, int height); @@ -46,8 +48,9 @@ extern int qemu_egl_rn_fd; extern struct gbm_device *qemu_egl_rn_gbm_dev; int egl_rendernode_init(const char *rendernode, DisplayGLMode mode); -int egl_get_fd_for_texture(uint32_t tex_id, EGLint *stride, EGLint *fourcc, - EGLuint64KHR *modifier); +bool egl_dmabuf_export_texture(uint32_t tex_id, int *fd, EGLint *offset, + EGLint *stride, EGLint *fourcc, int *num_planes, + EGLuint64KHR *modifier); void egl_dmabuf_import_texture(QemuDmaBuf *dmabuf); void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf); diff --git a/include/ui/gtk.h b/include/ui/gtk.h index aa3d6370299..3e6ce3cb48c 100644 --- a/include/ui/gtk.h +++ b/include/ui/gtk.h @@ -41,6 +41,7 @@ typedef struct VirtualGfxConsole { DisplaySurface *ds; pixman_image_t *convert; cairo_surface_t *surface; + double preferred_scale; double scale_x; double scale_y; #if defined(CONFIG_OPENGL) @@ -140,6 +141,7 @@ struct GtkDisplayState { GdkCursor *null_cursor; Notifier mouse_mode_notifier; gboolean free_scale; + gboolean keep_aspect_ratio; bool external_pause_update; @@ -224,4 +226,6 @@ int gd_gl_area_make_current(DisplayGLCtx *dgc, /* gtk-clipboard.c */ void gd_clipboard_init(GtkDisplayState *gd); +void gd_update_scale(VirtualConsole *vc, int ww, int wh, int fbw, int fbh); + #endif /* UI_GTK_H */ diff --git a/include/ui/qemu-pixman.h b/include/ui/qemu-pixman.h index ef13a8210cc..2ca0ed7029c 100644 --- a/include/ui/qemu-pixman.h +++ b/include/ui/qemu-pixman.h @@ -12,6 +12,8 @@ #include "pixman-minimal.h" #endif +#include "qapi/error.h" + /* * pixman image formats are defined to be native endian, * that means host byte order on qemu. So we go define @@ -73,12 +75,12 @@ PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format); pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian); pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format); uint32_t qemu_pixman_to_drm_format(pixman_format_code_t pixman); -int qemu_pixman_get_type(int rshift, int gshift, int bshift); +int qemu_pixman_get_type(int rshift, int gshift, int bshift, int endian); bool qemu_pixman_check_format(DisplayChangeListener *dcl, pixman_format_code_t format); #ifdef CONFIG_PIXMAN -pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf); +pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf, int endian); pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format, int width); void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb, @@ -97,6 +99,28 @@ void qemu_pixman_glyph_render(pixman_image_t *glyph, void qemu_pixman_image_unref(pixman_image_t *image); +#ifdef WIN32 +typedef HANDLE qemu_pixman_shareable; +#define SHAREABLE_NONE (NULL) +#define SHAREABLE_TO_PTR(handle) (handle) +#define PTR_TO_SHAREABLE(ptr) (ptr) +#else +typedef int qemu_pixman_shareable; +#define SHAREABLE_NONE (-1) +#define SHAREABLE_TO_PTR(handle) GINT_TO_POINTER(handle) +#define PTR_TO_SHAREABLE(ptr) GPOINTER_TO_INT(ptr) +#endif + +bool qemu_pixman_image_new_shareable( + pixman_image_t **image, + qemu_pixman_shareable *handle, + const char *name, + pixman_format_code_t format, + int width, + int height, + int rowstride_bytes, + Error **errp); + G_DEFINE_AUTOPTR_CLEANUP_FUNC(pixman_image_t, qemu_pixman_image_unref) #endif /* QEMU_PIXMAN_H */ diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h index e3acc7c82a7..dbe6e3d9739 100644 --- a/include/ui/sdl2.h +++ b/include/ui/sdl2.h @@ -42,6 +42,7 @@ struct sdl2_console { int updates; int idle_counter; int ignore_hotkeys; + bool gui_keysym; SDL_GLContext winctx; QKbdState *kbd; #ifdef CONFIG_OPENGL @@ -60,6 +61,7 @@ void sdl2_poll_events(struct sdl2_console *scon); void sdl2_process_key(struct sdl2_console *scon, SDL_KeyboardEvent *ev); +void sdl2_release_modifiers(struct sdl2_console *scon); void sdl2_2d_update(DisplayChangeListener *dcl, int x, int y, int w, int h); diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h index e1a9b361852..690ece73801 100644 --- a/include/ui/spice-display.h +++ b/include/ui/spice-display.h @@ -132,6 +132,9 @@ struct SimpleSpiceDisplay { egl_fb guest_fb; egl_fb blit_fb; egl_fb cursor_fb; + bool backing_y_0_top; + bool blit_scanout_texture; + bool new_scanout_texture; bool have_hot; #endif }; @@ -151,6 +154,8 @@ struct SimpleSpiceCursor { }; extern bool spice_opengl; +extern bool spice_remote_client; +extern int spice_max_refresh_rate; int qemu_spice_rect_is_empty(const QXLRect* r); void qemu_spice_rect_union(QXLRect *dest, const QXLRect *r); diff --git a/include/ui/surface.h b/include/ui/surface.h index 345b19169d2..006b1986bb9 100644 --- a/include/ui/surface.h +++ b/include/ui/surface.h @@ -22,11 +22,10 @@ typedef struct DisplaySurface { GLenum glformat; GLenum gltype; GLuint texture; + GLuint mem_obj; #endif -#ifdef WIN32 - HANDLE handle; - uint32_t handle_offset; -#endif + qemu_pixman_shareable share_handle; + uint32_t share_handle_offset; } DisplaySurface; PixelFormat qemu_default_pixelformat(int bpp); @@ -37,10 +36,10 @@ DisplaySurface *qemu_create_displaysurface_from(int width, int height, DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image); DisplaySurface *qemu_create_placeholder_surface(int w, int h, const char *msg); -#ifdef WIN32 -void qemu_displaysurface_win32_set_handle(DisplaySurface *surface, - HANDLE h, uint32_t offset); -#endif + +void qemu_displaysurface_set_share_handle(DisplaySurface *surface, + qemu_pixman_shareable handle, + uint32_t offset); DisplaySurface *qemu_create_displaysurface(int width, int height); void qemu_free_displaysurface(DisplaySurface *surface); diff --git a/include/user/abitypes.h b/include/user/abitypes.h index 5c9a9556315..be7a8765238 100644 --- a/include/user/abitypes.h +++ b/include/user/abitypes.h @@ -6,7 +6,6 @@ #endif #include "exec/cpu-defs.h" -#include "exec/tswap.h" #include "user/tswap-target.h" #ifdef TARGET_ABI32 @@ -21,13 +20,6 @@ #define ABI_LLONG_ALIGNMENT 2 #endif -#ifdef TARGET_CRIS -#define ABI_SHORT_ALIGNMENT 1 -#define ABI_INT_ALIGNMENT 1 -#define ABI_LONG_ALIGNMENT 1 -#define ABI_LLONG_ALIGNMENT 1 -#endif - #if (defined(TARGET_I386) && !defined(TARGET_X86_64)) \ || defined(TARGET_SH4) \ || defined(TARGET_OPENRISC) \ diff --git a/include/user/cpu_loop.h b/include/user/cpu_loop.h new file mode 100644 index 00000000000..ad8a1d711f0 --- /dev/null +++ b/include/user/cpu_loop.h @@ -0,0 +1,88 @@ +/* + * qemu user cpu loop + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef USER_CPU_LOOP_H +#define USER_CPU_LOOP_H + +#include "exec/vaddr.h" +#include "exec/mmu-access-type.h" + + +/** + * adjust_signal_pc: + * @pc: raw pc from the host signal ucontext_t. + * @is_write: host memory operation was write, or read-modify-write. + * + * Alter @pc as required for unwinding. Return the type of the + * guest memory access -- host reads may be for guest execution. + */ +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @host_addr: the host address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, vaddr guest_addr); + +/** + * cpu_loop_exit_sigsegv: + * @cpu: the cpu context + * @addr: the guest address of the fault + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGSEGV, and jump to the main cpu loop. + */ +G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + +/** + * cpu_loop_exit_sigbus: + * @cpu: the cpu context + * @addr: the guest address of the alignment fault + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGBUS, and jump to the main cpu loop. + */ +G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + uintptr_t ra); + +G_NORETURN void cpu_loop(CPUArchState *env); + +void target_exception_dump(CPUArchState *env, const char *fmt, int code); +#define EXCP_DUMP(env, fmt, code) \ + target_exception_dump(env, fmt, code) + +typedef struct target_pt_regs target_pt_regs; + +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs); + +#endif diff --git a/include/user/guest-host.h b/include/user/guest-host.h new file mode 100644 index 00000000000..8f7ef758962 --- /dev/null +++ b/include/user/guest-host.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * guest <-> host helpers. + * + * Copyright (c) 2003 Fabrice Bellard + */ + +#ifndef USER_GUEST_HOST_H +#define USER_GUEST_HOST_H + +#include "exec/vaddr.h" +#include "user/guest-base.h" +#include "accel/tcg/cpu-ops.h" + +/* + * If non-zero, the guest virtual address space is a contiguous subset + * of the host virtual address space, i.e. '-R reserved_va' is in effect + * either from the command-line or by default. The value is the last + * byte of the guest address space e.g. UINT32_MAX. + * + * If zero, the host and guest virtual address spaces are intermingled. + */ +extern unsigned long reserved_va; + +/* + * The last byte of the guest address space. + * If reserved_va is non-zero, guest_addr_max matches. + * If reserved_va is zero, guest_addr_max equals the full guest space. + */ +extern unsigned long guest_addr_max; + +static inline vaddr cpu_untagged_addr(CPUState *cs, vaddr x) +{ + const TCGCPUOps *tcg_ops = cs->cc->tcg_ops; + if (tcg_ops->untagged_addr) { + return tcg_ops->untagged_addr(cs, x); + } + return x; +} + +/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ +static inline void *g2h_untagged(vaddr x) +{ + return (void *)((uintptr_t)(x) + guest_base); +} + +static inline void *g2h(CPUState *cs, vaddr x) +{ + return g2h_untagged(cpu_untagged_addr(cs, x)); +} + +static inline bool guest_addr_valid_untagged(vaddr x) +{ + return x <= guest_addr_max; +} + +static inline bool guest_range_valid_untagged(vaddr start, vaddr len) +{ + return len - 1 <= guest_addr_max && start <= guest_addr_max - len + 1; +} + +#define h2g_valid(x) \ + ((uintptr_t)(x) - guest_base <= guest_addr_max) + +#define h2g_nocheck(x) ({ \ + uintptr_t __ret = (uintptr_t)(x) - guest_base; \ + (vaddr)__ret; \ +}) + +#define h2g(x) ({ \ + /* Check if given address fits target address space */ \ + assert(h2g_valid(x)); \ + h2g_nocheck(x); \ +}) + +#endif diff --git a/include/user/mmap.h b/include/user/mmap.h new file mode 100644 index 00000000000..4d5e9aac70a --- /dev/null +++ b/include/user/mmap.h @@ -0,0 +1,32 @@ +/* + * MMAP declarations for QEMU user emulation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef USER_MMAP_H +#define USER_MMAP_H + +#include "user/abitypes.h" + +/* + * mmap_next_start: The base address for the next mmap without hint, + * increased after each successful map, starting at task_unmapped_base. + * This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT. + */ +extern abi_ulong mmap_next_start; + +int target_mprotect(abi_ulong start, abi_ulong len, int prot); + +abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, + int flags, int fd, off_t offset); +int target_munmap(abi_ulong start, abi_ulong len); +abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, + abi_ulong new_size, unsigned long flags, + abi_ulong new_addr); + +abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment); + +void TSA_NO_TSA mmap_fork_start(void); +void TSA_NO_TSA mmap_fork_end(int child); + +#endif diff --git a/include/user/page-protection.h b/include/user/page-protection.h new file mode 100644 index 00000000000..4bde664e4a7 --- /dev/null +++ b/include/user/page-protection.h @@ -0,0 +1,96 @@ +/* + * QEMU page protection declarations. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: LGPL-2.1+ + */ +#ifndef USER_PAGE_PROTECTION_H +#define USER_PAGE_PROTECTION_H + +#ifndef CONFIG_USER_ONLY +#error Cannot include this header from system emulation +#endif + +#include "exec/vaddr.h" +#include "exec/translation-block.h" + +int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc); + +int page_get_flags(vaddr address); + +/** + * page_set_flags: + * @start: first byte of range + * @last: last byte of range + * @flags: flags to set + * Context: holding mmap lock + * + * Modify the flags of a page and invalidate the code if necessary. + * The flag PAGE_WRITE_ORG is positioned automatically depending + * on PAGE_WRITE. The mmap_lock should already be held. + */ +void page_set_flags(vaddr start, vaddr last, int flags); + +void page_reset_target_data(vaddr start, vaddr last); + +/** + * page_check_range + * @start: first byte of range + * @len: length of range + * @flags: flags required for each page + * + * Return true if every page in [@start, @start+@len) has @flags set. + * Return false if any page is unmapped. Thus testing flags == 0 is + * equivalent to testing for flags == PAGE_VALID. + */ +bool page_check_range(vaddr start, vaddr last, int flags); + +/** + * page_check_range_empty: + * @start: first byte of range + * @last: last byte of range + * Context: holding mmap lock + * + * Return true if the entire range [@start, @last] is unmapped. + * The memory lock must be held so that the caller will can ensure + * the result stays true until a new mapping can be installed. + */ +bool page_check_range_empty(vaddr start, vaddr last); + +/** + * page_find_range_empty + * @min: first byte of search range + * @max: last byte of search range + * @len: size of the hole required + * @align: alignment of the hole required (power of 2) + * + * If there is a range [x, x+@len) within [@min, @max] such that + * x % @align == 0, then return x. Otherwise return -1. + * The memory lock must be held, as the caller will want to ensure + * the returned range stays empty until a new mapping can be installed. + */ +vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align); + +/** + * page_get_target_data + * @address: guest virtual address + * @size: per-page size + * + * Return @size bytes of out-of-band data to associate + * with the guest page at @address, allocating it if necessary. The + * caller should already have verified that the address is valid. + * The value of @size must be the same for every call. + * + * The memory will be freed when the guest page is deallocated, + * e.g. with the munmap system call. + */ +__attribute__((returns_nonnull)) +void *page_get_target_data(vaddr address, size_t size); + +typedef int (*walk_memory_regions_fn)(void *, vaddr, vaddr, int); +int walk_memory_regions(void *, walk_memory_regions_fn); + +void page_dump(FILE *f); + +#endif diff --git a/include/user/signal.h b/include/user/signal.h new file mode 100644 index 00000000000..7fa33b05d91 --- /dev/null +++ b/include/user/signal.h @@ -0,0 +1,25 @@ +/* + * Signal-related declarations. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef USER_SIGNAL_H +#define USER_SIGNAL_H + +#ifndef CONFIG_USER_ONLY +#error Cannot include this header from system emulation +#endif + +/** + * target_to_host_signal: + * @sig: target signal. + * + * On success, return the host signal between 0 (inclusive) and NSIG + * (exclusive) corresponding to the target signal @sig. Return any other value + * on failure. + */ +int target_to_host_signal(int sig); + +extern int host_interrupt_signal; + +#endif diff --git a/io/channel-buffer.c b/io/channel-buffer.c index 8096180f855..189fa67ac21 100644 --- a/io/channel-buffer.c +++ b/io/channel-buffer.c @@ -225,7 +225,7 @@ static GSource *qio_channel_buffer_create_watch(QIOChannel *ioc, static void qio_channel_buffer_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-command.c b/io/channel-command.c index 6d5f64e146d..8966dd3a2ba 100644 --- a/io/channel-command.c +++ b/io/channel-command.c @@ -358,7 +358,7 @@ static GSource *qio_channel_command_create_watch(QIOChannel *ioc, static void qio_channel_command_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-file.c b/io/channel-file.c index 2ea8d083600..ca3f180cc2f 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -290,7 +290,7 @@ static GSource *qio_channel_file_create_watch(QIOChannel *ioc, } static void qio_channel_file_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-null.c b/io/channel-null.c index ef995863483..49f1c80a54e 100644 --- a/io/channel-null.c +++ b/io/channel-null.c @@ -207,7 +207,7 @@ qio_channel_null_create_watch(QIOChannel *ioc, static void qio_channel_null_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-socket.c b/io/channel-socket.c index 608bcf066ec..3b7ca924ff3 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -78,6 +78,17 @@ qio_channel_socket_new(void) return sioc; } +int qio_channel_socket_set_send_buffer(QIOChannelSocket *ioc, + size_t size, + Error **errp) +{ + if (setsockopt(ioc->fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) < 0) { + error_setg_errno(errp, errno, "Unable to set socket send buffer size"); + return -1; + } + + return 0; +} static int qio_channel_socket_set_fd(QIOChannelSocket *sioc, @@ -949,7 +960,7 @@ static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, } static void qio_channel_socket_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-tls.c b/io/channel-tls.c index aab630e5ae3..a8248a9216b 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -162,16 +162,17 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc, GMainContext *context) { Error *err = NULL; - QCryptoTLSSessionHandshakeStatus status; + int status; - if (qcrypto_tls_session_handshake(ioc->session, &err) < 0) { + status = qcrypto_tls_session_handshake(ioc->session, &err); + + if (status < 0) { trace_qio_channel_tls_handshake_fail(ioc); qio_task_set_error(task, err); qio_task_complete(task); return; } - status = qcrypto_tls_session_get_handshake_status(ioc->session); if (status == QCRYPTO_TLS_HANDSHAKE_COMPLETE) { trace_qio_channel_tls_handshake_complete(ioc); if (qcrypto_tls_session_check_credentials(ioc->session, @@ -240,6 +241,11 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc, { QIOTask *task; + if (qio_channel_has_feature(QIO_CHANNEL(ioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO)) { + qcrypto_tls_session_require_thread_safety(ioc->session); + } + task = qio_task_new(OBJECT(ioc), func, opaque, destroy); @@ -247,6 +253,85 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc, qio_channel_tls_handshake_task(ioc, task, context); } +static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition, + gpointer user_data); + +static void qio_channel_tls_bye_task(QIOChannelTLS *ioc, QIOTask *task, + GMainContext *context) +{ + GIOCondition condition; + QIOChannelTLSData *data; + int status; + Error *err = NULL; + + status = qcrypto_tls_session_bye(ioc->session, &err); + + if (status < 0) { + trace_qio_channel_tls_bye_fail(ioc); + qio_task_set_error(task, err); + qio_task_complete(task); + return; + } + + if (status == QCRYPTO_TLS_BYE_COMPLETE) { + qio_task_complete(task); + return; + } + + data = g_new0(typeof(*data), 1); + data->task = task; + data->context = context; + + if (context) { + g_main_context_ref(context); + } + + if (status == QCRYPTO_TLS_BYE_SENDING) { + condition = G_IO_OUT; + } else { + condition = G_IO_IN; + } + + trace_qio_channel_tls_bye_pending(ioc, status); + ioc->bye_ioc_tag = qio_channel_add_watch_full(ioc->master, condition, + qio_channel_tls_bye_io, + data, NULL, context); +} + + +static gboolean qio_channel_tls_bye_io(QIOChannel *ioc, GIOCondition condition, + gpointer user_data) +{ + QIOChannelTLSData *data = user_data; + QIOTask *task = data->task; + GMainContext *context = data->context; + QIOChannelTLS *tioc = QIO_CHANNEL_TLS(qio_task_get_source(task)); + + tioc->bye_ioc_tag = 0; + g_free(data); + qio_channel_tls_bye_task(tioc, task, context); + + if (context) { + g_main_context_unref(context); + } + + return FALSE; +} + +static void propagate_error(QIOTask *task, gpointer opaque) +{ + qio_task_propagate_error(task, opaque); +} + +void qio_channel_tls_bye(QIOChannelTLS *ioc, Error **errp) +{ + QIOTask *task; + + task = qio_task_new(OBJECT(ioc), propagate_error, errp, NULL); + + trace_qio_channel_tls_bye_start(ioc); + qio_channel_tls_bye_task(ioc, task, NULL); +} static void qio_channel_tls_init(Object *obj G_GNUC_UNUSED) { @@ -279,6 +364,7 @@ static ssize_t qio_channel_tls_readv(QIOChannel *ioc, tioc->session, iov[i].iov_base, iov[i].iov_len, + flags & QIO_CHANNEL_READ_FLAG_RELAXED_EOF || qatomic_load_acquire(&tioc->shutdown) & QIO_CHANNEL_SHUTDOWN_READ, errp); if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { @@ -379,6 +465,11 @@ static int qio_channel_tls_close(QIOChannel *ioc, g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove); } + if (tioc->bye_ioc_tag) { + trace_qio_channel_tls_bye_cancel(ioc); + g_clear_handle_id(&tioc->bye_ioc_tag, g_source_remove); + } + return qio_channel_close(tioc->master, errp); } @@ -475,7 +566,7 @@ qio_channel_tls_get_session(QIOChannelTLS *ioc) } static void qio_channel_tls_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel-websock.c b/io/channel-websock.c index de39f0d182d..08ddb274f0c 100644 --- a/io/channel-websock.c +++ b/io/channel-websock.c @@ -351,7 +351,7 @@ static void qio_channel_websock_handshake_send_res_ok(QIOChannelWebsock *ioc, QIO_CHANNEL_WEBSOCK_GUID_LEN + 1); /* hash and encode it */ - if (qcrypto_hash_base64(QCRYPTO_HASH_ALG_SHA1, + if (qcrypto_hash_base64(QCRYPTO_HASH_ALGO_SHA1, combined_key, QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN + QIO_CHANNEL_WEBSOCK_GUID_LEN, @@ -1308,7 +1308,7 @@ static GSource *qio_channel_websock_create_watch(QIOChannel *ioc, } static void qio_channel_websock_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/io/channel.c b/io/channel.c index e3f17c24a00..ebd93227651 100644 --- a/io/channel.c +++ b/io/channel.c @@ -115,7 +115,8 @@ int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc, size_t niov, Error **errp) { - return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp); + return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, 0, + errp); } int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc, @@ -130,6 +131,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, + int flags, Error **errp) { int ret = -1; @@ -155,7 +157,7 @@ int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc, while ((nlocal_iov > 0) || local_fds) { ssize_t len; len = qio_channel_readv_full(ioc, local_iov, nlocal_iov, local_fds, - local_nfds, 0, errp); + local_nfds, flags, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { qio_channel_yield(ioc, G_IO_IN); @@ -222,7 +224,8 @@ int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc, int **fds, size_t *nfds, Error **errp) { - int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp); + int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, 0, + errp); if (ret == 0) { error_setg(errp, "Unexpected end-of-file before all data were read"); diff --git a/io/dns-resolver.c b/io/dns-resolver.c index 53b0e8407a9..3712438f829 100644 --- a/io/dns-resolver.c +++ b/io/dns-resolver.c @@ -111,22 +111,11 @@ static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver, uaddr, INET6_ADDRSTRLEN, uport, 32, NI_NUMERICHOST | NI_NUMERICSERV); - newaddr->u.inet = (InetSocketAddress){ - .host = g_strdup(uaddr), - .port = g_strdup(uport), - .has_numeric = true, - .numeric = true, - .has_to = iaddr->has_to, - .to = iaddr->to, - .has_ipv4 = iaddr->has_ipv4, - .ipv4 = iaddr->ipv4, - .has_ipv6 = iaddr->has_ipv6, - .ipv6 = iaddr->ipv6, -#ifdef HAVE_IPPROTO_MPTCP - .has_mptcp = iaddr->has_mptcp, - .mptcp = iaddr->mptcp, -#endif - }; + newaddr->u.inet = *iaddr; + newaddr->u.inet.host = g_strdup(uaddr), + newaddr->u.inet.port = g_strdup(uport), + newaddr->u.inet.has_numeric = true, + newaddr->u.inet.numeric = true, (*addrs)[i] = newaddr; } diff --git a/io/trace-events b/io/trace-events index d4c0f84a9a2..dc3a63ba1fb 100644 --- a/io/trace-events +++ b/io/trace-events @@ -44,6 +44,11 @@ qio_channel_tls_handshake_pending(void *ioc, int status) "TLS handshake pending qio_channel_tls_handshake_fail(void *ioc) "TLS handshake fail ioc=%p" qio_channel_tls_handshake_complete(void *ioc) "TLS handshake complete ioc=%p" qio_channel_tls_handshake_cancel(void *ioc) "TLS handshake cancel ioc=%p" +qio_channel_tls_bye_start(void *ioc) "TLS termination start ioc=%p" +qio_channel_tls_bye_pending(void *ioc, int status) "TLS termination pending ioc=%p status=%d" +qio_channel_tls_bye_fail(void *ioc) "TLS termination fail ioc=%p" +qio_channel_tls_bye_complete(void *ioc) "TLS termination complete ioc=%p" +qio_channel_tls_bye_cancel(void *ioc) "TLS termination cancel ioc=%p" qio_channel_tls_credentials_allow(void *ioc) "TLS credentials allow ioc=%p" qio_channel_tls_credentials_deny(void *ioc) "TLS credentials deny ioc=%p" diff --git a/iothread.c b/iothread.c index e1e9e047365..8810376dcea 100644 --- a/iothread.c +++ b/iothread.c @@ -17,8 +17,8 @@ #include "qemu/module.h" #include "block/aio.h" #include "block/block.h" -#include "sysemu/event-loop-base.h" -#include "sysemu/iothread.h" +#include "system/event-loop-base.h" +#include "system/iothread.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" #include "qemu/error-report.h" @@ -292,7 +292,7 @@ static void iothread_set_poll_param(Object *obj, Visitor *v, } } -static void iothread_class_init(ObjectClass *klass, void *class_data) +static void iothread_class_init(ObjectClass *klass, const void *class_data) { EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass); diff --git a/job.c b/job.c index 660ce22c56b..0653bc2ba69 100644 --- a/job.c +++ b/job.c @@ -251,6 +251,12 @@ bool job_is_cancelled_locked(Job *job) return job->force_cancel; } +bool job_is_paused(Job *job) +{ + JOB_LOCK_GUARD(); + return job->paused; +} + bool job_is_cancelled(Job *job) { JOB_LOCK_GUARD(); diff --git a/libdecnumber/decContext.c b/libdecnumber/decContext.c index 1956edf0a7a..d99b08026c2 100644 --- a/libdecnumber/decContext.c +++ b/libdecnumber/decContext.c @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal Context module */ diff --git a/libdecnumber/decNumber.c b/libdecnumber/decNumber.c index 31282adafdc..4b57d8a6fe2 100644 --- a/libdecnumber/decNumber.c +++ b/libdecnumber/decNumber.c @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal Number arithmetic module */ diff --git a/libdecnumber/dpd/decimal128.c b/libdecnumber/dpd/decimal128.c index ca4764e5471..1064fb25e01 100644 --- a/libdecnumber/dpd/decimal128.c +++ b/libdecnumber/dpd/decimal128.c @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 128-bit format module */ diff --git a/libdecnumber/dpd/decimal32.c b/libdecnumber/dpd/decimal32.c index 53f29789d75..34ff0fe9599 100644 --- a/libdecnumber/dpd/decimal32.c +++ b/libdecnumber/dpd/decimal32.c @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 32-bit format module */ diff --git a/libdecnumber/dpd/decimal64.c b/libdecnumber/dpd/decimal64.c index 290dbe81771..11e0674fa7c 100644 --- a/libdecnumber/dpd/decimal64.c +++ b/libdecnumber/dpd/decimal64.c @@ -24,9 +24,8 @@ for more details. You should have received a copy of the GNU General Public License - along with GCC; see the file COPYING. If not, write to the Free - Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with GCC; see the file COPYING. If not, see + . */ /* ------------------------------------------------------------------ */ /* Decimal 64-bit format module */ diff --git a/linux-headers/asm-arm/bitsperlong.h b/linux-headers/asm-arm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/linux-headers/asm-arm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h deleted file mode 100644 index 0db5644e27a..00000000000 --- a/linux-headers/asm-arm/kvm.h +++ /dev/null @@ -1,312 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_H__ -#define __ARM_KVM_H__ - -#include -#include -#include - -#define __KVM_HAVE_GUEST_DEBUG -#define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_READONLY_MEM -#define __KVM_HAVE_VCPU_EVENTS - -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 - -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - -/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] - -/* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] - -struct kvm_regs { - struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ - unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ - unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ -}; - -/* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_TARGET_CORTEX_A7 1 -#define KVM_ARM_NUM_TARGETS 2 - -/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ -#define KVM_ARM_DEVICE_TYPE_SHIFT 0 -#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) -#define KVM_ARM_DEVICE_ID_SHIFT 16 -#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) - -/* Supported device IDs */ -#define KVM_ARM_DEVICE_VGIC_V2 0 - -/* Supported VGIC address types */ -#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 -#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 - -#define KVM_VGIC_V2_DIST_SIZE 0x1000 -#define KVM_VGIC_V2_CPU_SIZE 0x2000 - -/* Supported VGICv3 address types */ -#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 -#define KVM_VGIC_ITS_ADDR_TYPE 4 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 - -#define KVM_VGIC_V3_DIST_SIZE SZ_64K -#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) -#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) - -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ -#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ - -struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -struct kvm_guest_debug_arch { -}; - -struct kvm_debug_exit_arch { -}; - -struct kvm_sync_regs { - /* Used with KVM_CAP_ARM_USER_IRQ */ - __u64 device_irq_level; -}; - -struct kvm_arch_memory_slot { -}; - -/* for KVM_GET/SET_VCPU_EVENTS */ -struct kvm_vcpu_events { - struct { - __u8 serror_pending; - __u8 serror_has_esr; - __u8 ext_dabt_pending; - /* Align it to 8 bytes */ - __u8 pad[5]; - __u64 serror_esr; - } exception; - __u32 reserved[12]; -}; - -/* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 -/* - * For KVM currently all guest registers are nonsecure, but we reserve a bit - * in the encoding to distinguish secure from nonsecure for AArch32 system - * registers that are banked by security. This is 1 for the secure banked - * register, and 0 for the nonsecure banked register or if the register is - * not banked by security. - */ -#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 -#define KVM_REG_ARM_SECURE_SHIFT 28 - -#define ARM_CP15_REG_SHIFT_MASK(x,n) \ - (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) - -#define __ARM_CP15_REG(op1,crn,crm,op2) \ - (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ - ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ - ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ - ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ - ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) - -#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) - -#define __ARM_CP15_REG64(op1,crm) \ - (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) -#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) - -/* PL1 Physical Timer Registers */ -#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) -#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) -#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) - -/* Virtual Timer Registers */ -#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) -#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) - -/* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) - -/* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 - -/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A - -/* KVM-as-firmware specific pseudo-registers */ -#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_FW | ((r) & 0xffff)) -#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) - -/* Device Control API: ARM VGIC */ -#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 -#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 -#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 -#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 -#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) -#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 -#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \ - (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) -#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 -#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) -#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) -#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 -#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 -#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 -#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 -#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 -#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ - (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff -#define VGIC_LEVEL_INFO_LINE_LEVEL 0 - -/* Device Control API on vcpu fd */ -#define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_TIMER_CTRL 1 -#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 -#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 - -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 -#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 -#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 -#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 -#define KVM_DEV_ARM_ITS_CTRL_RESET 4 - -/* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_VCPU2_SHIFT 28 -#define KVM_ARM_IRQ_VCPU2_MASK 0xf -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xf -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff - -/* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 - -/* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 - -/* - * This used to hold the highest supported SPI, but it is now obsolete - * and only here to provide source code level compatibility with older - * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. - */ -#define KVM_ARM_IRQ_GIC_MAX 127 - -/* One single KVM irqchip, ie. the VGIC */ -#define KVM_NR_IRQCHIPS 1 - -/* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) - -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) - -#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS -#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED -#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS -#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED - -#endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-arm/mman.h b/linux-headers/asm-arm/mman.h deleted file mode 100644 index 41f99c573b9..00000000000 --- a/linux-headers/asm-arm/mman.h +++ /dev/null @@ -1,4 +0,0 @@ -#include - -#define arch_mmap_check(addr, len, flags) \ - (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/linux-headers/asm-arm/unistd-common.h b/linux-headers/asm-arm/unistd-common.h deleted file mode 100644 index 57cd1f21dbd..00000000000 --- a/linux-headers/asm-arm/unistd-common.h +++ /dev/null @@ -1,397 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_COMMON_H -#define _ASM_ARM_UNISTD_COMMON_H 1 - -#define __NR_restart_syscall (__NR_SYSCALL_BASE + 0) -#define __NR_exit (__NR_SYSCALL_BASE + 1) -#define __NR_fork (__NR_SYSCALL_BASE + 2) -#define __NR_read (__NR_SYSCALL_BASE + 3) -#define __NR_write (__NR_SYSCALL_BASE + 4) -#define __NR_open (__NR_SYSCALL_BASE + 5) -#define __NR_close (__NR_SYSCALL_BASE + 6) -#define __NR_creat (__NR_SYSCALL_BASE + 8) -#define __NR_link (__NR_SYSCALL_BASE + 9) -#define __NR_unlink (__NR_SYSCALL_BASE + 10) -#define __NR_execve (__NR_SYSCALL_BASE + 11) -#define __NR_chdir (__NR_SYSCALL_BASE + 12) -#define __NR_mknod (__NR_SYSCALL_BASE + 14) -#define __NR_chmod (__NR_SYSCALL_BASE + 15) -#define __NR_lchown (__NR_SYSCALL_BASE + 16) -#define __NR_lseek (__NR_SYSCALL_BASE + 19) -#define __NR_getpid (__NR_SYSCALL_BASE + 20) -#define __NR_mount (__NR_SYSCALL_BASE + 21) -#define __NR_setuid (__NR_SYSCALL_BASE + 23) -#define __NR_getuid (__NR_SYSCALL_BASE + 24) -#define __NR_ptrace (__NR_SYSCALL_BASE + 26) -#define __NR_pause (__NR_SYSCALL_BASE + 29) -#define __NR_access (__NR_SYSCALL_BASE + 33) -#define __NR_nice (__NR_SYSCALL_BASE + 34) -#define __NR_sync (__NR_SYSCALL_BASE + 36) -#define __NR_kill (__NR_SYSCALL_BASE + 37) -#define __NR_rename (__NR_SYSCALL_BASE + 38) -#define __NR_mkdir (__NR_SYSCALL_BASE + 39) -#define __NR_rmdir (__NR_SYSCALL_BASE + 40) -#define __NR_dup (__NR_SYSCALL_BASE + 41) -#define __NR_pipe (__NR_SYSCALL_BASE + 42) -#define __NR_times (__NR_SYSCALL_BASE + 43) -#define __NR_brk (__NR_SYSCALL_BASE + 45) -#define __NR_setgid (__NR_SYSCALL_BASE + 46) -#define __NR_getgid (__NR_SYSCALL_BASE + 47) -#define __NR_geteuid (__NR_SYSCALL_BASE + 49) -#define __NR_getegid (__NR_SYSCALL_BASE + 50) -#define __NR_acct (__NR_SYSCALL_BASE + 51) -#define __NR_umount2 (__NR_SYSCALL_BASE + 52) -#define __NR_ioctl (__NR_SYSCALL_BASE + 54) -#define __NR_fcntl (__NR_SYSCALL_BASE + 55) -#define __NR_setpgid (__NR_SYSCALL_BASE + 57) -#define __NR_umask (__NR_SYSCALL_BASE + 60) -#define __NR_chroot (__NR_SYSCALL_BASE + 61) -#define __NR_ustat (__NR_SYSCALL_BASE + 62) -#define __NR_dup2 (__NR_SYSCALL_BASE + 63) -#define __NR_getppid (__NR_SYSCALL_BASE + 64) -#define __NR_getpgrp (__NR_SYSCALL_BASE + 65) -#define __NR_setsid (__NR_SYSCALL_BASE + 66) -#define __NR_sigaction (__NR_SYSCALL_BASE + 67) -#define __NR_setreuid (__NR_SYSCALL_BASE + 70) -#define __NR_setregid (__NR_SYSCALL_BASE + 71) -#define __NR_sigsuspend (__NR_SYSCALL_BASE + 72) -#define __NR_sigpending (__NR_SYSCALL_BASE + 73) -#define __NR_sethostname (__NR_SYSCALL_BASE + 74) -#define __NR_setrlimit (__NR_SYSCALL_BASE + 75) -#define __NR_getrusage (__NR_SYSCALL_BASE + 77) -#define __NR_gettimeofday (__NR_SYSCALL_BASE + 78) -#define __NR_settimeofday (__NR_SYSCALL_BASE + 79) -#define __NR_getgroups (__NR_SYSCALL_BASE + 80) -#define __NR_setgroups (__NR_SYSCALL_BASE + 81) -#define __NR_symlink (__NR_SYSCALL_BASE + 83) -#define __NR_readlink (__NR_SYSCALL_BASE + 85) -#define __NR_uselib (__NR_SYSCALL_BASE + 86) -#define __NR_swapon (__NR_SYSCALL_BASE + 87) -#define __NR_reboot (__NR_SYSCALL_BASE + 88) -#define __NR_munmap (__NR_SYSCALL_BASE + 91) -#define __NR_truncate (__NR_SYSCALL_BASE + 92) -#define __NR_ftruncate (__NR_SYSCALL_BASE + 93) -#define __NR_fchmod (__NR_SYSCALL_BASE + 94) -#define __NR_fchown (__NR_SYSCALL_BASE + 95) -#define __NR_getpriority (__NR_SYSCALL_BASE + 96) -#define __NR_setpriority (__NR_SYSCALL_BASE + 97) -#define __NR_statfs (__NR_SYSCALL_BASE + 99) -#define __NR_fstatfs (__NR_SYSCALL_BASE + 100) -#define __NR_syslog (__NR_SYSCALL_BASE + 103) -#define __NR_setitimer (__NR_SYSCALL_BASE + 104) -#define __NR_getitimer (__NR_SYSCALL_BASE + 105) -#define __NR_stat (__NR_SYSCALL_BASE + 106) -#define __NR_lstat (__NR_SYSCALL_BASE + 107) -#define __NR_fstat (__NR_SYSCALL_BASE + 108) -#define __NR_vhangup (__NR_SYSCALL_BASE + 111) -#define __NR_wait4 (__NR_SYSCALL_BASE + 114) -#define __NR_swapoff (__NR_SYSCALL_BASE + 115) -#define __NR_sysinfo (__NR_SYSCALL_BASE + 116) -#define __NR_fsync (__NR_SYSCALL_BASE + 118) -#define __NR_sigreturn (__NR_SYSCALL_BASE + 119) -#define __NR_clone (__NR_SYSCALL_BASE + 120) -#define __NR_setdomainname (__NR_SYSCALL_BASE + 121) -#define __NR_uname (__NR_SYSCALL_BASE + 122) -#define __NR_adjtimex (__NR_SYSCALL_BASE + 124) -#define __NR_mprotect (__NR_SYSCALL_BASE + 125) -#define __NR_sigprocmask (__NR_SYSCALL_BASE + 126) -#define __NR_init_module (__NR_SYSCALL_BASE + 128) -#define __NR_delete_module (__NR_SYSCALL_BASE + 129) -#define __NR_quotactl (__NR_SYSCALL_BASE + 131) -#define __NR_getpgid (__NR_SYSCALL_BASE + 132) -#define __NR_fchdir (__NR_SYSCALL_BASE + 133) -#define __NR_bdflush (__NR_SYSCALL_BASE + 134) -#define __NR_sysfs (__NR_SYSCALL_BASE + 135) -#define __NR_personality (__NR_SYSCALL_BASE + 136) -#define __NR_setfsuid (__NR_SYSCALL_BASE + 138) -#define __NR_setfsgid (__NR_SYSCALL_BASE + 139) -#define __NR__llseek (__NR_SYSCALL_BASE + 140) -#define __NR_getdents (__NR_SYSCALL_BASE + 141) -#define __NR__newselect (__NR_SYSCALL_BASE + 142) -#define __NR_flock (__NR_SYSCALL_BASE + 143) -#define __NR_msync (__NR_SYSCALL_BASE + 144) -#define __NR_readv (__NR_SYSCALL_BASE + 145) -#define __NR_writev (__NR_SYSCALL_BASE + 146) -#define __NR_getsid (__NR_SYSCALL_BASE + 147) -#define __NR_fdatasync (__NR_SYSCALL_BASE + 148) -#define __NR__sysctl (__NR_SYSCALL_BASE + 149) -#define __NR_mlock (__NR_SYSCALL_BASE + 150) -#define __NR_munlock (__NR_SYSCALL_BASE + 151) -#define __NR_mlockall (__NR_SYSCALL_BASE + 152) -#define __NR_munlockall (__NR_SYSCALL_BASE + 153) -#define __NR_sched_setparam (__NR_SYSCALL_BASE + 154) -#define __NR_sched_getparam (__NR_SYSCALL_BASE + 155) -#define __NR_sched_setscheduler (__NR_SYSCALL_BASE + 156) -#define __NR_sched_getscheduler (__NR_SYSCALL_BASE + 157) -#define __NR_sched_yield (__NR_SYSCALL_BASE + 158) -#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE + 159) -#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE + 160) -#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE + 161) -#define __NR_nanosleep (__NR_SYSCALL_BASE + 162) -#define __NR_mremap (__NR_SYSCALL_BASE + 163) -#define __NR_setresuid (__NR_SYSCALL_BASE + 164) -#define __NR_getresuid (__NR_SYSCALL_BASE + 165) -#define __NR_poll (__NR_SYSCALL_BASE + 168) -#define __NR_nfsservctl (__NR_SYSCALL_BASE + 169) -#define __NR_setresgid (__NR_SYSCALL_BASE + 170) -#define __NR_getresgid (__NR_SYSCALL_BASE + 171) -#define __NR_prctl (__NR_SYSCALL_BASE + 172) -#define __NR_rt_sigreturn (__NR_SYSCALL_BASE + 173) -#define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174) -#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175) -#define __NR_rt_sigpending (__NR_SYSCALL_BASE + 176) -#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE + 177) -#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE + 178) -#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE + 179) -#define __NR_pread64 (__NR_SYSCALL_BASE + 180) -#define __NR_pwrite64 (__NR_SYSCALL_BASE + 181) -#define __NR_chown (__NR_SYSCALL_BASE + 182) -#define __NR_getcwd (__NR_SYSCALL_BASE + 183) -#define __NR_capget (__NR_SYSCALL_BASE + 184) -#define __NR_capset (__NR_SYSCALL_BASE + 185) -#define __NR_sigaltstack (__NR_SYSCALL_BASE + 186) -#define __NR_sendfile (__NR_SYSCALL_BASE + 187) -#define __NR_vfork (__NR_SYSCALL_BASE + 190) -#define __NR_ugetrlimit (__NR_SYSCALL_BASE + 191) -#define __NR_mmap2 (__NR_SYSCALL_BASE + 192) -#define __NR_truncate64 (__NR_SYSCALL_BASE + 193) -#define __NR_ftruncate64 (__NR_SYSCALL_BASE + 194) -#define __NR_stat64 (__NR_SYSCALL_BASE + 195) -#define __NR_lstat64 (__NR_SYSCALL_BASE + 196) -#define __NR_fstat64 (__NR_SYSCALL_BASE + 197) -#define __NR_lchown32 (__NR_SYSCALL_BASE + 198) -#define __NR_getuid32 (__NR_SYSCALL_BASE + 199) -#define __NR_getgid32 (__NR_SYSCALL_BASE + 200) -#define __NR_geteuid32 (__NR_SYSCALL_BASE + 201) -#define __NR_getegid32 (__NR_SYSCALL_BASE + 202) -#define __NR_setreuid32 (__NR_SYSCALL_BASE + 203) -#define __NR_setregid32 (__NR_SYSCALL_BASE + 204) -#define __NR_getgroups32 (__NR_SYSCALL_BASE + 205) -#define __NR_setgroups32 (__NR_SYSCALL_BASE + 206) -#define __NR_fchown32 (__NR_SYSCALL_BASE + 207) -#define __NR_setresuid32 (__NR_SYSCALL_BASE + 208) -#define __NR_getresuid32 (__NR_SYSCALL_BASE + 209) -#define __NR_setresgid32 (__NR_SYSCALL_BASE + 210) -#define __NR_getresgid32 (__NR_SYSCALL_BASE + 211) -#define __NR_chown32 (__NR_SYSCALL_BASE + 212) -#define __NR_setuid32 (__NR_SYSCALL_BASE + 213) -#define __NR_setgid32 (__NR_SYSCALL_BASE + 214) -#define __NR_setfsuid32 (__NR_SYSCALL_BASE + 215) -#define __NR_setfsgid32 (__NR_SYSCALL_BASE + 216) -#define __NR_getdents64 (__NR_SYSCALL_BASE + 217) -#define __NR_pivot_root (__NR_SYSCALL_BASE + 218) -#define __NR_mincore (__NR_SYSCALL_BASE + 219) -#define __NR_madvise (__NR_SYSCALL_BASE + 220) -#define __NR_fcntl64 (__NR_SYSCALL_BASE + 221) -#define __NR_gettid (__NR_SYSCALL_BASE + 224) -#define __NR_readahead (__NR_SYSCALL_BASE + 225) -#define __NR_setxattr (__NR_SYSCALL_BASE + 226) -#define __NR_lsetxattr (__NR_SYSCALL_BASE + 227) -#define __NR_fsetxattr (__NR_SYSCALL_BASE + 228) -#define __NR_getxattr (__NR_SYSCALL_BASE + 229) -#define __NR_lgetxattr (__NR_SYSCALL_BASE + 230) -#define __NR_fgetxattr (__NR_SYSCALL_BASE + 231) -#define __NR_listxattr (__NR_SYSCALL_BASE + 232) -#define __NR_llistxattr (__NR_SYSCALL_BASE + 233) -#define __NR_flistxattr (__NR_SYSCALL_BASE + 234) -#define __NR_removexattr (__NR_SYSCALL_BASE + 235) -#define __NR_lremovexattr (__NR_SYSCALL_BASE + 236) -#define __NR_fremovexattr (__NR_SYSCALL_BASE + 237) -#define __NR_tkill (__NR_SYSCALL_BASE + 238) -#define __NR_sendfile64 (__NR_SYSCALL_BASE + 239) -#define __NR_futex (__NR_SYSCALL_BASE + 240) -#define __NR_sched_setaffinity (__NR_SYSCALL_BASE + 241) -#define __NR_sched_getaffinity (__NR_SYSCALL_BASE + 242) -#define __NR_io_setup (__NR_SYSCALL_BASE + 243) -#define __NR_io_destroy (__NR_SYSCALL_BASE + 244) -#define __NR_io_getevents (__NR_SYSCALL_BASE + 245) -#define __NR_io_submit (__NR_SYSCALL_BASE + 246) -#define __NR_io_cancel (__NR_SYSCALL_BASE + 247) -#define __NR_exit_group (__NR_SYSCALL_BASE + 248) -#define __NR_lookup_dcookie (__NR_SYSCALL_BASE + 249) -#define __NR_epoll_create (__NR_SYSCALL_BASE + 250) -#define __NR_epoll_ctl (__NR_SYSCALL_BASE + 251) -#define __NR_epoll_wait (__NR_SYSCALL_BASE + 252) -#define __NR_remap_file_pages (__NR_SYSCALL_BASE + 253) -#define __NR_set_tid_address (__NR_SYSCALL_BASE + 256) -#define __NR_timer_create (__NR_SYSCALL_BASE + 257) -#define __NR_timer_settime (__NR_SYSCALL_BASE + 258) -#define __NR_timer_gettime (__NR_SYSCALL_BASE + 259) -#define __NR_timer_getoverrun (__NR_SYSCALL_BASE + 260) -#define __NR_timer_delete (__NR_SYSCALL_BASE + 261) -#define __NR_clock_settime (__NR_SYSCALL_BASE + 262) -#define __NR_clock_gettime (__NR_SYSCALL_BASE + 263) -#define __NR_clock_getres (__NR_SYSCALL_BASE + 264) -#define __NR_clock_nanosleep (__NR_SYSCALL_BASE + 265) -#define __NR_statfs64 (__NR_SYSCALL_BASE + 266) -#define __NR_fstatfs64 (__NR_SYSCALL_BASE + 267) -#define __NR_tgkill (__NR_SYSCALL_BASE + 268) -#define __NR_utimes (__NR_SYSCALL_BASE + 269) -#define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE + 270) -#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE + 271) -#define __NR_pciconfig_read (__NR_SYSCALL_BASE + 272) -#define __NR_pciconfig_write (__NR_SYSCALL_BASE + 273) -#define __NR_mq_open (__NR_SYSCALL_BASE + 274) -#define __NR_mq_unlink (__NR_SYSCALL_BASE + 275) -#define __NR_mq_timedsend (__NR_SYSCALL_BASE + 276) -#define __NR_mq_timedreceive (__NR_SYSCALL_BASE + 277) -#define __NR_mq_notify (__NR_SYSCALL_BASE + 278) -#define __NR_mq_getsetattr (__NR_SYSCALL_BASE + 279) -#define __NR_waitid (__NR_SYSCALL_BASE + 280) -#define __NR_socket (__NR_SYSCALL_BASE + 281) -#define __NR_bind (__NR_SYSCALL_BASE + 282) -#define __NR_connect (__NR_SYSCALL_BASE + 283) -#define __NR_listen (__NR_SYSCALL_BASE + 284) -#define __NR_accept (__NR_SYSCALL_BASE + 285) -#define __NR_getsockname (__NR_SYSCALL_BASE + 286) -#define __NR_getpeername (__NR_SYSCALL_BASE + 287) -#define __NR_socketpair (__NR_SYSCALL_BASE + 288) -#define __NR_send (__NR_SYSCALL_BASE + 289) -#define __NR_sendto (__NR_SYSCALL_BASE + 290) -#define __NR_recv (__NR_SYSCALL_BASE + 291) -#define __NR_recvfrom (__NR_SYSCALL_BASE + 292) -#define __NR_shutdown (__NR_SYSCALL_BASE + 293) -#define __NR_setsockopt (__NR_SYSCALL_BASE + 294) -#define __NR_getsockopt (__NR_SYSCALL_BASE + 295) -#define __NR_sendmsg (__NR_SYSCALL_BASE + 296) -#define __NR_recvmsg (__NR_SYSCALL_BASE + 297) -#define __NR_semop (__NR_SYSCALL_BASE + 298) -#define __NR_semget (__NR_SYSCALL_BASE + 299) -#define __NR_semctl (__NR_SYSCALL_BASE + 300) -#define __NR_msgsnd (__NR_SYSCALL_BASE + 301) -#define __NR_msgrcv (__NR_SYSCALL_BASE + 302) -#define __NR_msgget (__NR_SYSCALL_BASE + 303) -#define __NR_msgctl (__NR_SYSCALL_BASE + 304) -#define __NR_shmat (__NR_SYSCALL_BASE + 305) -#define __NR_shmdt (__NR_SYSCALL_BASE + 306) -#define __NR_shmget (__NR_SYSCALL_BASE + 307) -#define __NR_shmctl (__NR_SYSCALL_BASE + 308) -#define __NR_add_key (__NR_SYSCALL_BASE + 309) -#define __NR_request_key (__NR_SYSCALL_BASE + 310) -#define __NR_keyctl (__NR_SYSCALL_BASE + 311) -#define __NR_semtimedop (__NR_SYSCALL_BASE + 312) -#define __NR_vserver (__NR_SYSCALL_BASE + 313) -#define __NR_ioprio_set (__NR_SYSCALL_BASE + 314) -#define __NR_ioprio_get (__NR_SYSCALL_BASE + 315) -#define __NR_inotify_init (__NR_SYSCALL_BASE + 316) -#define __NR_inotify_add_watch (__NR_SYSCALL_BASE + 317) -#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE + 318) -#define __NR_mbind (__NR_SYSCALL_BASE + 319) -#define __NR_get_mempolicy (__NR_SYSCALL_BASE + 320) -#define __NR_set_mempolicy (__NR_SYSCALL_BASE + 321) -#define __NR_openat (__NR_SYSCALL_BASE + 322) -#define __NR_mkdirat (__NR_SYSCALL_BASE + 323) -#define __NR_mknodat (__NR_SYSCALL_BASE + 324) -#define __NR_fchownat (__NR_SYSCALL_BASE + 325) -#define __NR_futimesat (__NR_SYSCALL_BASE + 326) -#define __NR_fstatat64 (__NR_SYSCALL_BASE + 327) -#define __NR_unlinkat (__NR_SYSCALL_BASE + 328) -#define __NR_renameat (__NR_SYSCALL_BASE + 329) -#define __NR_linkat (__NR_SYSCALL_BASE + 330) -#define __NR_symlinkat (__NR_SYSCALL_BASE + 331) -#define __NR_readlinkat (__NR_SYSCALL_BASE + 332) -#define __NR_fchmodat (__NR_SYSCALL_BASE + 333) -#define __NR_faccessat (__NR_SYSCALL_BASE + 334) -#define __NR_pselect6 (__NR_SYSCALL_BASE + 335) -#define __NR_ppoll (__NR_SYSCALL_BASE + 336) -#define __NR_unshare (__NR_SYSCALL_BASE + 337) -#define __NR_set_robust_list (__NR_SYSCALL_BASE + 338) -#define __NR_get_robust_list (__NR_SYSCALL_BASE + 339) -#define __NR_splice (__NR_SYSCALL_BASE + 340) -#define __NR_arm_sync_file_range (__NR_SYSCALL_BASE + 341) -#define __NR_tee (__NR_SYSCALL_BASE + 342) -#define __NR_vmsplice (__NR_SYSCALL_BASE + 343) -#define __NR_move_pages (__NR_SYSCALL_BASE + 344) -#define __NR_getcpu (__NR_SYSCALL_BASE + 345) -#define __NR_epoll_pwait (__NR_SYSCALL_BASE + 346) -#define __NR_kexec_load (__NR_SYSCALL_BASE + 347) -#define __NR_utimensat (__NR_SYSCALL_BASE + 348) -#define __NR_signalfd (__NR_SYSCALL_BASE + 349) -#define __NR_timerfd_create (__NR_SYSCALL_BASE + 350) -#define __NR_eventfd (__NR_SYSCALL_BASE + 351) -#define __NR_fallocate (__NR_SYSCALL_BASE + 352) -#define __NR_timerfd_settime (__NR_SYSCALL_BASE + 353) -#define __NR_timerfd_gettime (__NR_SYSCALL_BASE + 354) -#define __NR_signalfd4 (__NR_SYSCALL_BASE + 355) -#define __NR_eventfd2 (__NR_SYSCALL_BASE + 356) -#define __NR_epoll_create1 (__NR_SYSCALL_BASE + 357) -#define __NR_dup3 (__NR_SYSCALL_BASE + 358) -#define __NR_pipe2 (__NR_SYSCALL_BASE + 359) -#define __NR_inotify_init1 (__NR_SYSCALL_BASE + 360) -#define __NR_preadv (__NR_SYSCALL_BASE + 361) -#define __NR_pwritev (__NR_SYSCALL_BASE + 362) -#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE + 363) -#define __NR_perf_event_open (__NR_SYSCALL_BASE + 364) -#define __NR_recvmmsg (__NR_SYSCALL_BASE + 365) -#define __NR_accept4 (__NR_SYSCALL_BASE + 366) -#define __NR_fanotify_init (__NR_SYSCALL_BASE + 367) -#define __NR_fanotify_mark (__NR_SYSCALL_BASE + 368) -#define __NR_prlimit64 (__NR_SYSCALL_BASE + 369) -#define __NR_name_to_handle_at (__NR_SYSCALL_BASE + 370) -#define __NR_open_by_handle_at (__NR_SYSCALL_BASE + 371) -#define __NR_clock_adjtime (__NR_SYSCALL_BASE + 372) -#define __NR_syncfs (__NR_SYSCALL_BASE + 373) -#define __NR_sendmmsg (__NR_SYSCALL_BASE + 374) -#define __NR_setns (__NR_SYSCALL_BASE + 375) -#define __NR_process_vm_readv (__NR_SYSCALL_BASE + 376) -#define __NR_process_vm_writev (__NR_SYSCALL_BASE + 377) -#define __NR_kcmp (__NR_SYSCALL_BASE + 378) -#define __NR_finit_module (__NR_SYSCALL_BASE + 379) -#define __NR_sched_setattr (__NR_SYSCALL_BASE + 380) -#define __NR_sched_getattr (__NR_SYSCALL_BASE + 381) -#define __NR_renameat2 (__NR_SYSCALL_BASE + 382) -#define __NR_seccomp (__NR_SYSCALL_BASE + 383) -#define __NR_getrandom (__NR_SYSCALL_BASE + 384) -#define __NR_memfd_create (__NR_SYSCALL_BASE + 385) -#define __NR_bpf (__NR_SYSCALL_BASE + 386) -#define __NR_execveat (__NR_SYSCALL_BASE + 387) -#define __NR_userfaultfd (__NR_SYSCALL_BASE + 388) -#define __NR_membarrier (__NR_SYSCALL_BASE + 389) -#define __NR_mlock2 (__NR_SYSCALL_BASE + 390) -#define __NR_copy_file_range (__NR_SYSCALL_BASE + 391) -#define __NR_preadv2 (__NR_SYSCALL_BASE + 392) -#define __NR_pwritev2 (__NR_SYSCALL_BASE + 393) -#define __NR_pkey_mprotect (__NR_SYSCALL_BASE + 394) -#define __NR_pkey_alloc (__NR_SYSCALL_BASE + 395) -#define __NR_pkey_free (__NR_SYSCALL_BASE + 396) -#define __NR_statx (__NR_SYSCALL_BASE + 397) -#define __NR_rseq (__NR_SYSCALL_BASE + 398) -#define __NR_io_pgetevents (__NR_SYSCALL_BASE + 399) -#define __NR_migrate_pages (__NR_SYSCALL_BASE + 400) -#define __NR_kexec_file_load (__NR_SYSCALL_BASE + 401) -#define __NR_clock_gettime64 (__NR_SYSCALL_BASE + 403) -#define __NR_clock_settime64 (__NR_SYSCALL_BASE + 404) -#define __NR_clock_adjtime64 (__NR_SYSCALL_BASE + 405) -#define __NR_clock_getres_time64 (__NR_SYSCALL_BASE + 406) -#define __NR_clock_nanosleep_time64 (__NR_SYSCALL_BASE + 407) -#define __NR_timer_gettime64 (__NR_SYSCALL_BASE + 408) -#define __NR_timer_settime64 (__NR_SYSCALL_BASE + 409) -#define __NR_timerfd_gettime64 (__NR_SYSCALL_BASE + 410) -#define __NR_timerfd_settime64 (__NR_SYSCALL_BASE + 411) -#define __NR_utimensat_time64 (__NR_SYSCALL_BASE + 412) -#define __NR_pselect6_time64 (__NR_SYSCALL_BASE + 413) -#define __NR_ppoll_time64 (__NR_SYSCALL_BASE + 414) -#define __NR_io_pgetevents_time64 (__NR_SYSCALL_BASE + 416) -#define __NR_recvmmsg_time64 (__NR_SYSCALL_BASE + 417) -#define __NR_mq_timedsend_time64 (__NR_SYSCALL_BASE + 418) -#define __NR_mq_timedreceive_time64 (__NR_SYSCALL_BASE + 419) -#define __NR_semtimedop_time64 (__NR_SYSCALL_BASE + 420) -#define __NR_rt_sigtimedwait_time64 (__NR_SYSCALL_BASE + 421) -#define __NR_futex_time64 (__NR_SYSCALL_BASE + 422) -#define __NR_sched_rr_get_interval_time64 (__NR_SYSCALL_BASE + 423) -#define __NR_pidfd_send_signal (__NR_SYSCALL_BASE + 424) -#define __NR_io_uring_setup (__NR_SYSCALL_BASE + 425) -#define __NR_io_uring_enter (__NR_SYSCALL_BASE + 426) -#define __NR_io_uring_register (__NR_SYSCALL_BASE + 427) -#define __NR_open_tree (__NR_SYSCALL_BASE + 428) -#define __NR_move_mount (__NR_SYSCALL_BASE + 429) -#define __NR_fsopen (__NR_SYSCALL_BASE + 430) -#define __NR_fsconfig (__NR_SYSCALL_BASE + 431) -#define __NR_fsmount (__NR_SYSCALL_BASE + 432) -#define __NR_fspick (__NR_SYSCALL_BASE + 433) -#define __NR_pidfd_open (__NR_SYSCALL_BASE + 434) -#define __NR_clone3 (__NR_SYSCALL_BASE + 435) -#define __NR_openat2 (__NR_SYSCALL_BASE + 437) -#define __NR_pidfd_getfd (__NR_SYSCALL_BASE + 438) -#define __NR_faccessat2 (__NR_SYSCALL_BASE + 439) - -#endif /* _ASM_ARM_UNISTD_COMMON_H */ diff --git a/linux-headers/asm-arm/unistd-eabi.h b/linux-headers/asm-arm/unistd-eabi.h deleted file mode 100644 index 266f1fcdfb3..00000000000 --- a/linux-headers/asm-arm/unistd-eabi.h +++ /dev/null @@ -1,5 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_EABI_H -#define _ASM_ARM_UNISTD_EABI_H 1 - - -#endif /* _ASM_ARM_UNISTD_EABI_H */ diff --git a/linux-headers/asm-arm/unistd-oabi.h b/linux-headers/asm-arm/unistd-oabi.h deleted file mode 100644 index 47d9afb96d5..00000000000 --- a/linux-headers/asm-arm/unistd-oabi.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_OABI_H -#define _ASM_ARM_UNISTD_OABI_H 1 - -#define __NR_time (__NR_SYSCALL_BASE + 13) -#define __NR_umount (__NR_SYSCALL_BASE + 22) -#define __NR_stime (__NR_SYSCALL_BASE + 25) -#define __NR_alarm (__NR_SYSCALL_BASE + 27) -#define __NR_utime (__NR_SYSCALL_BASE + 30) -#define __NR_getrlimit (__NR_SYSCALL_BASE + 76) -#define __NR_select (__NR_SYSCALL_BASE + 82) -#define __NR_readdir (__NR_SYSCALL_BASE + 89) -#define __NR_mmap (__NR_SYSCALL_BASE + 90) -#define __NR_socketcall (__NR_SYSCALL_BASE + 102) -#define __NR_syscall (__NR_SYSCALL_BASE + 113) -#define __NR_ipc (__NR_SYSCALL_BASE + 117) - -#endif /* _ASM_ARM_UNISTD_OABI_H */ diff --git a/linux-headers/asm-arm/unistd.h b/linux-headers/asm-arm/unistd.h deleted file mode 100644 index 18b0825885e..00000000000 --- a/linux-headers/asm-arm/unistd.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * arch/arm/include/asm/unistd.h - * - * Copyright (C) 2001-2005 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Please forward _all_ changes to this file to rmk@arm.linux.org.uk, - * no matter what the change is. Thanks! - */ -#ifndef __ASM_ARM_UNISTD_H -#define __ASM_ARM_UNISTD_H - -#define __NR_OABI_SYSCALL_BASE 0x900000 - -#if defined(__thumb__) || defined(__ARM_EABI__) -#define __NR_SYSCALL_BASE 0 -#include -#else -#define __NR_SYSCALL_BASE __NR_OABI_SYSCALL_BASE -#include -#endif - -#include -#define __NR_sync_file_range2 __NR_arm_sync_file_range - -/* - * The following SWIs are ARM private. - */ -#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) -#define __ARM_NR_breakpoint (__ARM_NR_BASE+1) -#define __ARM_NR_cacheflush (__ARM_NR_BASE+2) -#define __ARM_NR_usr26 (__ARM_NR_BASE+3) -#define __ARM_NR_usr32 (__ARM_NR_BASE+4) -#define __ARM_NR_set_tls (__ARM_NR_BASE+5) -#define __ARM_NR_get_tls (__ARM_NR_BASE+6) - -#endif /* __ASM_ARM_UNISTD_H */ diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 2af9931ae98..f4d9baafa1c 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -43,9 +43,6 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - struct kvm_regs { struct user_pt_regs regs; /* sp = sp_el0 */ @@ -108,6 +105,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ +#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */ struct kvm_vcpu_init { __u32 target; @@ -368,6 +366,7 @@ enum { KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0, }; +/* Vendor hyper call function numbers 0-63 */ #define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2) enum { @@ -375,6 +374,14 @@ enum { KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, }; +/* Vendor hyper call function numbers 64-127 */ +#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3) + +enum { + KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0, + KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1, +}; + /* Device Control API on vm fd */ #define KVM_ARM_VM_SMCCC_CTRL 0 #define KVM_ARM_VM_SMCCC_FILTER 0 @@ -397,6 +404,7 @@ enum { #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 #define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 +#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) @@ -411,10 +419,11 @@ enum { /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_PMU_V3_FILTER 2 -#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 +#define KVM_ARM_VCPU_PMU_V3_IRQ 0 +#define KVM_ARM_VCPU_PMU_V3_INIT 1 +#define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 +#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 @@ -473,6 +482,12 @@ enum { */ #define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0) +/* + * Shutdown caused by a PSCI v1.3 SYSTEM_OFF2 call. + * Valid only when the system event has a type of KVM_SYSTEM_EVENT_SHUTDOWN. + */ +#define KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2 (1ULL << 0) + /* run->fail_entry.hardware_entry_failure_reason codes. */ #define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0) diff --git a/linux-headers/asm-arm64/mman.h b/linux-headers/asm-arm64/mman.h index d0dbfe95878..7b500a3a7be 100644 --- a/linux-headers/asm-arm64/mman.h +++ b/linux-headers/asm-arm64/mman.h @@ -7,4 +7,13 @@ #define PROT_BTI 0x10 /* BTI guarded page */ #define PROT_MTE 0x20 /* Normal Tagged mapping */ +/* Override any generic PKEY permission defines */ +#define PKEY_DISABLE_EXECUTE 0x4 +#define PKEY_DISABLE_READ 0x8 +#undef PKEY_ACCESS_MASK +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE |\ + PKEY_DISABLE_READ |\ + PKEY_DISABLE_EXECUTE) + #endif /* ! _UAPI__ASM_MMAN_H */ diff --git a/linux-headers/asm-arm64/unistd.h b/linux-headers/asm-arm64/unistd.h index ce2ee8f1e36..df36f23876e 100644 --- a/linux-headers/asm-arm64/unistd.h +++ b/linux-headers/asm-arm64/unistd.h @@ -1,25 +1,2 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#define __ARCH_WANT_RENAMEAT -#define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_SET_GET_RLIMIT -#define __ARCH_WANT_TIME32_SYSCALLS -#define __ARCH_WANT_SYS_CLONE3 -#define __ARCH_WANT_MEMFD_SECRET - -#include +#include diff --git a/linux-headers/asm-arm64/unistd_64.h b/linux-headers/asm-arm64/unistd_64.h new file mode 100644 index 00000000000..ee9aaebdf32 --- /dev/null +++ b/linux-headers/asm-arm64/unistd_64.h @@ -0,0 +1,329 @@ +#ifndef _ASM_UNISTD_64_H +#define _ASM_UNISTD_64_H + +#define __NR_io_setup 0 +#define __NR_io_destroy 1 +#define __NR_io_submit 2 +#define __NR_io_cancel 3 +#define __NR_io_getevents 4 +#define __NR_setxattr 5 +#define __NR_lsetxattr 6 +#define __NR_fsetxattr 7 +#define __NR_getxattr 8 +#define __NR_lgetxattr 9 +#define __NR_fgetxattr 10 +#define __NR_listxattr 11 +#define __NR_llistxattr 12 +#define __NR_flistxattr 13 +#define __NR_removexattr 14 +#define __NR_lremovexattr 15 +#define __NR_fremovexattr 16 +#define __NR_getcwd 17 +#define __NR_lookup_dcookie 18 +#define __NR_eventfd2 19 +#define __NR_epoll_create1 20 +#define __NR_epoll_ctl 21 +#define __NR_epoll_pwait 22 +#define __NR_dup 23 +#define __NR_dup3 24 +#define __NR_fcntl 25 +#define __NR_inotify_init1 26 +#define __NR_inotify_add_watch 27 +#define __NR_inotify_rm_watch 28 +#define __NR_ioctl 29 +#define __NR_ioprio_set 30 +#define __NR_ioprio_get 31 +#define __NR_flock 32 +#define __NR_mknodat 33 +#define __NR_mkdirat 34 +#define __NR_unlinkat 35 +#define __NR_symlinkat 36 +#define __NR_linkat 37 +#define __NR_renameat 38 +#define __NR_umount2 39 +#define __NR_mount 40 +#define __NR_pivot_root 41 +#define __NR_nfsservctl 42 +#define __NR_statfs 43 +#define __NR_fstatfs 44 +#define __NR_truncate 45 +#define __NR_ftruncate 46 +#define __NR_fallocate 47 +#define __NR_faccessat 48 +#define __NR_chdir 49 +#define __NR_fchdir 50 +#define __NR_chroot 51 +#define __NR_fchmod 52 +#define __NR_fchmodat 53 +#define __NR_fchownat 54 +#define __NR_fchown 55 +#define __NR_openat 56 +#define __NR_close 57 +#define __NR_vhangup 58 +#define __NR_pipe2 59 +#define __NR_quotactl 60 +#define __NR_getdents64 61 +#define __NR_lseek 62 +#define __NR_read 63 +#define __NR_write 64 +#define __NR_readv 65 +#define __NR_writev 66 +#define __NR_pread64 67 +#define __NR_pwrite64 68 +#define __NR_preadv 69 +#define __NR_pwritev 70 +#define __NR_sendfile 71 +#define __NR_pselect6 72 +#define __NR_ppoll 73 +#define __NR_signalfd4 74 +#define __NR_vmsplice 75 +#define __NR_splice 76 +#define __NR_tee 77 +#define __NR_readlinkat 78 +#define __NR_newfstatat 79 +#define __NR_fstat 80 +#define __NR_sync 81 +#define __NR_fsync 82 +#define __NR_fdatasync 83 +#define __NR_sync_file_range 84 +#define __NR_timerfd_create 85 +#define __NR_timerfd_settime 86 +#define __NR_timerfd_gettime 87 +#define __NR_utimensat 88 +#define __NR_acct 89 +#define __NR_capget 90 +#define __NR_capset 91 +#define __NR_personality 92 +#define __NR_exit 93 +#define __NR_exit_group 94 +#define __NR_waitid 95 +#define __NR_set_tid_address 96 +#define __NR_unshare 97 +#define __NR_futex 98 +#define __NR_set_robust_list 99 +#define __NR_get_robust_list 100 +#define __NR_nanosleep 101 +#define __NR_getitimer 102 +#define __NR_setitimer 103 +#define __NR_kexec_load 104 +#define __NR_init_module 105 +#define __NR_delete_module 106 +#define __NR_timer_create 107 +#define __NR_timer_gettime 108 +#define __NR_timer_getoverrun 109 +#define __NR_timer_settime 110 +#define __NR_timer_delete 111 +#define __NR_clock_settime 112 +#define __NR_clock_gettime 113 +#define __NR_clock_getres 114 +#define __NR_clock_nanosleep 115 +#define __NR_syslog 116 +#define __NR_ptrace 117 +#define __NR_sched_setparam 118 +#define __NR_sched_setscheduler 119 +#define __NR_sched_getscheduler 120 +#define __NR_sched_getparam 121 +#define __NR_sched_setaffinity 122 +#define __NR_sched_getaffinity 123 +#define __NR_sched_yield 124 +#define __NR_sched_get_priority_max 125 +#define __NR_sched_get_priority_min 126 +#define __NR_sched_rr_get_interval 127 +#define __NR_restart_syscall 128 +#define __NR_kill 129 +#define __NR_tkill 130 +#define __NR_tgkill 131 +#define __NR_sigaltstack 132 +#define __NR_rt_sigsuspend 133 +#define __NR_rt_sigaction 134 +#define __NR_rt_sigprocmask 135 +#define __NR_rt_sigpending 136 +#define __NR_rt_sigtimedwait 137 +#define __NR_rt_sigqueueinfo 138 +#define __NR_rt_sigreturn 139 +#define __NR_setpriority 140 +#define __NR_getpriority 141 +#define __NR_reboot 142 +#define __NR_setregid 143 +#define __NR_setgid 144 +#define __NR_setreuid 145 +#define __NR_setuid 146 +#define __NR_setresuid 147 +#define __NR_getresuid 148 +#define __NR_setresgid 149 +#define __NR_getresgid 150 +#define __NR_setfsuid 151 +#define __NR_setfsgid 152 +#define __NR_times 153 +#define __NR_setpgid 154 +#define __NR_getpgid 155 +#define __NR_getsid 156 +#define __NR_setsid 157 +#define __NR_getgroups 158 +#define __NR_setgroups 159 +#define __NR_uname 160 +#define __NR_sethostname 161 +#define __NR_setdomainname 162 +#define __NR_getrlimit 163 +#define __NR_setrlimit 164 +#define __NR_getrusage 165 +#define __NR_umask 166 +#define __NR_prctl 167 +#define __NR_getcpu 168 +#define __NR_gettimeofday 169 +#define __NR_settimeofday 170 +#define __NR_adjtimex 171 +#define __NR_getpid 172 +#define __NR_getppid 173 +#define __NR_getuid 174 +#define __NR_geteuid 175 +#define __NR_getgid 176 +#define __NR_getegid 177 +#define __NR_gettid 178 +#define __NR_sysinfo 179 +#define __NR_mq_open 180 +#define __NR_mq_unlink 181 +#define __NR_mq_timedsend 182 +#define __NR_mq_timedreceive 183 +#define __NR_mq_notify 184 +#define __NR_mq_getsetattr 185 +#define __NR_msgget 186 +#define __NR_msgctl 187 +#define __NR_msgrcv 188 +#define __NR_msgsnd 189 +#define __NR_semget 190 +#define __NR_semctl 191 +#define __NR_semtimedop 192 +#define __NR_semop 193 +#define __NR_shmget 194 +#define __NR_shmctl 195 +#define __NR_shmat 196 +#define __NR_shmdt 197 +#define __NR_socket 198 +#define __NR_socketpair 199 +#define __NR_bind 200 +#define __NR_listen 201 +#define __NR_accept 202 +#define __NR_connect 203 +#define __NR_getsockname 204 +#define __NR_getpeername 205 +#define __NR_sendto 206 +#define __NR_recvfrom 207 +#define __NR_setsockopt 208 +#define __NR_getsockopt 209 +#define __NR_shutdown 210 +#define __NR_sendmsg 211 +#define __NR_recvmsg 212 +#define __NR_readahead 213 +#define __NR_brk 214 +#define __NR_munmap 215 +#define __NR_mremap 216 +#define __NR_add_key 217 +#define __NR_request_key 218 +#define __NR_keyctl 219 +#define __NR_clone 220 +#define __NR_execve 221 +#define __NR_mmap 222 +#define __NR_fadvise64 223 +#define __NR_swapon 224 +#define __NR_swapoff 225 +#define __NR_mprotect 226 +#define __NR_msync 227 +#define __NR_mlock 228 +#define __NR_munlock 229 +#define __NR_mlockall 230 +#define __NR_munlockall 231 +#define __NR_mincore 232 +#define __NR_madvise 233 +#define __NR_remap_file_pages 234 +#define __NR_mbind 235 +#define __NR_get_mempolicy 236 +#define __NR_set_mempolicy 237 +#define __NR_migrate_pages 238 +#define __NR_move_pages 239 +#define __NR_rt_tgsigqueueinfo 240 +#define __NR_perf_event_open 241 +#define __NR_accept4 242 +#define __NR_recvmmsg 243 +#define __NR_wait4 260 +#define __NR_prlimit64 261 +#define __NR_fanotify_init 262 +#define __NR_fanotify_mark 263 +#define __NR_name_to_handle_at 264 +#define __NR_open_by_handle_at 265 +#define __NR_clock_adjtime 266 +#define __NR_syncfs 267 +#define __NR_setns 268 +#define __NR_sendmmsg 269 +#define __NR_process_vm_readv 270 +#define __NR_process_vm_writev 271 +#define __NR_kcmp 272 +#define __NR_finit_module 273 +#define __NR_sched_setattr 274 +#define __NR_sched_getattr 275 +#define __NR_renameat2 276 +#define __NR_seccomp 277 +#define __NR_getrandom 278 +#define __NR_memfd_create 279 +#define __NR_bpf 280 +#define __NR_execveat 281 +#define __NR_userfaultfd 282 +#define __NR_membarrier 283 +#define __NR_mlock2 284 +#define __NR_copy_file_range 285 +#define __NR_preadv2 286 +#define __NR_pwritev2 287 +#define __NR_pkey_mprotect 288 +#define __NR_pkey_alloc 289 +#define __NR_pkey_free 290 +#define __NR_statx 291 +#define __NR_io_pgetevents 292 +#define __NR_rseq 293 +#define __NR_kexec_file_load 294 +#define __NR_pidfd_send_signal 424 +#define __NR_io_uring_setup 425 +#define __NR_io_uring_enter 426 +#define __NR_io_uring_register 427 +#define __NR_open_tree 428 +#define __NR_move_mount 429 +#define __NR_fsopen 430 +#define __NR_fsconfig 431 +#define __NR_fsmount 432 +#define __NR_fspick 433 +#define __NR_pidfd_open 434 +#define __NR_clone3 435 +#define __NR_close_range 436 +#define __NR_openat2 437 +#define __NR_pidfd_getfd 438 +#define __NR_faccessat2 439 +#define __NR_process_madvise 440 +#define __NR_epoll_pwait2 441 +#define __NR_mount_setattr 442 +#define __NR_quotactl_fd 443 +#define __NR_landlock_create_ruleset 444 +#define __NR_landlock_add_rule 445 +#define __NR_landlock_restrict_self 446 +#define __NR_memfd_secret 447 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 +#define __NR_cachestat 451 +#define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 +#define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 + + +#endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-generic/mman-common.h b/linux-headers/asm-generic/mman-common.h index 6ce1f1ceb43..ef1c27fa3c5 100644 --- a/linux-headers/asm-generic/mman-common.h +++ b/linux-headers/asm-generic/mman-common.h @@ -79,9 +79,13 @@ #define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ +#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */ +#define MADV_GUARD_REMOVE 103 /* unguard range */ + /* compatibility flags */ #define MAP_FILE 0 +#define PKEY_UNRESTRICTED 0x0 #define PKEY_DISABLE_ACCESS 0x1 #define PKEY_DISABLE_WRITE 0x2 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ diff --git a/linux-headers/asm-generic/mman.h b/linux-headers/asm-generic/mman.h index 57e8195d0b5..5e3d61ddbd8 100644 --- a/linux-headers/asm-generic/mman.h +++ b/linux-headers/asm-generic/mman.h @@ -19,4 +19,8 @@ #define MCL_FUTURE 2 /* lock all future mappings */ #define MCL_ONFAULT 4 /* lock all pages that are faulted in */ +#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */ +#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack marker in the shadow stack */ + + #endif /* __ASM_GENERIC_MMAN_H */ diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index d983c48a3b6..2892a45023a 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64) #define __NR_ppoll_time64 414 __SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64) #define __NR_io_pgetevents_time64 416 -__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents) +__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64) #define __NR_recvmmsg_time64 417 __SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64) #define __NR_mq_timedsend_time64 418 @@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount) __SYSCALL(__NR_fspick, sys_fspick) #define __NR_pidfd_open 434 __SYSCALL(__NR_pidfd_open, sys_pidfd_open) - -#ifdef __ARCH_WANT_SYS_CLONE3 #define __NR_clone3 435 __SYSCALL(__NR_clone3, sys_clone3) -#endif - #define __NR_close_range 436 __SYSCALL(__NR_close_range, sys_close_range) #define __NR_openat2 437 @@ -845,8 +841,19 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) #define __NR_mseal 462 __SYSCALL(__NR_mseal, sys_mseal) +#define __NR_setxattrat 463 +__SYSCALL(__NR_setxattrat, sys_setxattrat) +#define __NR_getxattrat 464 +__SYSCALL(__NR_getxattrat, sys_getxattrat) +#define __NR_listxattrat 465 +__SYSCALL(__NR_listxattrat, sys_listxattrat) +#define __NR_removexattrat 466 +__SYSCALL(__NR_removexattrat, sys_removexattrat) +#define __NR_open_tree_attr 467 +__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr) + #undef __NR_syscalls -#define __NR_syscalls 463 +#define __NR_syscalls 468 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h index f9abef38231..5f354f5c684 100644 --- a/linux-headers/asm-loongarch/kvm.h +++ b/linux-headers/asm-loongarch/kvm.h @@ -8,6 +8,8 @@ #include +#define __KVM_HAVE_IRQ_LINE + /* * KVM LoongArch specific structures and definitions. * @@ -64,6 +66,7 @@ struct kvm_fpu { #define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) #define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) #define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL) #define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) #define KVM_CSR_IDX_MASK 0x7fff #define KVM_CPUCFG_IDX_MASK 0x7fff @@ -77,11 +80,34 @@ struct kvm_fpu { /* Debugging: Special instruction for software breakpoint */ #define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) +/* LBT registers */ +#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6) + #define LOONGARCH_REG_SHIFT 3 #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +/* Device Control API on vm fd */ +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_LSX 0 +#define KVM_LOONGARCH_VM_FEAT_LASX 1 +#define KVM_LOONGARCH_VM_FEAT_X86BT 2 +#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 +#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 +#define KVM_LOONGARCH_VM_FEAT_PMU 5 +#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 +#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 + +/* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 +#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 +#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 struct kvm_debug_exit_arch { }; @@ -108,4 +134,22 @@ struct kvm_iocsr_entry { #define KVM_IRQCHIP_NUM_PINS 64 #define KVM_MAX_CORES 256 +#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000001 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000002 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000003 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2 + +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000004 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1 +#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3 + +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005 +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000006 +#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 + #endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/linux-headers/asm-loongarch/kvm_para.h b/linux-headers/asm-loongarch/kvm_para.h new file mode 100644 index 00000000000..fd7f40713d4 --- /dev/null +++ b/linux-headers/asm-loongarch/kvm_para.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_KVM_PARA_H +#define _ASM_KVM_PARA_H + +#include + +/* + * CPUCFG index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000 +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0) +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_IPI 1 +#define KVM_FEATURE_STEAL_TIME 2 +/* BIT 24 - 31 are features configurable by user space vmm */ +#define KVM_FEATURE_VIRT_EXTIOI 24 +#define KVM_FEATURE_USER_HCALL 25 + +#endif /* _ASM_KVM_PARA_H */ diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h index fcb668984f0..1f01980f9c9 100644 --- a/linux-headers/asm-loongarch/unistd.h +++ b/linux-headers/asm-loongarch/unistd.h @@ -1,5 +1,3 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#define __ARCH_WANT_SYS_CLONE -#define __ARCH_WANT_SYS_CLONE3 -#include +#include diff --git a/linux-headers/asm-loongarch/unistd_64.h b/linux-headers/asm-loongarch/unistd_64.h new file mode 100644 index 00000000000..50d22df8f7c --- /dev/null +++ b/linux-headers/asm-loongarch/unistd_64.h @@ -0,0 +1,325 @@ +#ifndef _ASM_UNISTD_64_H +#define _ASM_UNISTD_64_H + +#define __NR_io_setup 0 +#define __NR_io_destroy 1 +#define __NR_io_submit 2 +#define __NR_io_cancel 3 +#define __NR_io_getevents 4 +#define __NR_setxattr 5 +#define __NR_lsetxattr 6 +#define __NR_fsetxattr 7 +#define __NR_getxattr 8 +#define __NR_lgetxattr 9 +#define __NR_fgetxattr 10 +#define __NR_listxattr 11 +#define __NR_llistxattr 12 +#define __NR_flistxattr 13 +#define __NR_removexattr 14 +#define __NR_lremovexattr 15 +#define __NR_fremovexattr 16 +#define __NR_getcwd 17 +#define __NR_lookup_dcookie 18 +#define __NR_eventfd2 19 +#define __NR_epoll_create1 20 +#define __NR_epoll_ctl 21 +#define __NR_epoll_pwait 22 +#define __NR_dup 23 +#define __NR_dup3 24 +#define __NR_fcntl 25 +#define __NR_inotify_init1 26 +#define __NR_inotify_add_watch 27 +#define __NR_inotify_rm_watch 28 +#define __NR_ioctl 29 +#define __NR_ioprio_set 30 +#define __NR_ioprio_get 31 +#define __NR_flock 32 +#define __NR_mknodat 33 +#define __NR_mkdirat 34 +#define __NR_unlinkat 35 +#define __NR_symlinkat 36 +#define __NR_linkat 37 +#define __NR_umount2 39 +#define __NR_mount 40 +#define __NR_pivot_root 41 +#define __NR_nfsservctl 42 +#define __NR_statfs 43 +#define __NR_fstatfs 44 +#define __NR_truncate 45 +#define __NR_ftruncate 46 +#define __NR_fallocate 47 +#define __NR_faccessat 48 +#define __NR_chdir 49 +#define __NR_fchdir 50 +#define __NR_chroot 51 +#define __NR_fchmod 52 +#define __NR_fchmodat 53 +#define __NR_fchownat 54 +#define __NR_fchown 55 +#define __NR_openat 56 +#define __NR_close 57 +#define __NR_vhangup 58 +#define __NR_pipe2 59 +#define __NR_quotactl 60 +#define __NR_getdents64 61 +#define __NR_lseek 62 +#define __NR_read 63 +#define __NR_write 64 +#define __NR_readv 65 +#define __NR_writev 66 +#define __NR_pread64 67 +#define __NR_pwrite64 68 +#define __NR_preadv 69 +#define __NR_pwritev 70 +#define __NR_sendfile 71 +#define __NR_pselect6 72 +#define __NR_ppoll 73 +#define __NR_signalfd4 74 +#define __NR_vmsplice 75 +#define __NR_splice 76 +#define __NR_tee 77 +#define __NR_readlinkat 78 +#define __NR_newfstatat 79 +#define __NR_fstat 80 +#define __NR_sync 81 +#define __NR_fsync 82 +#define __NR_fdatasync 83 +#define __NR_sync_file_range 84 +#define __NR_timerfd_create 85 +#define __NR_timerfd_settime 86 +#define __NR_timerfd_gettime 87 +#define __NR_utimensat 88 +#define __NR_acct 89 +#define __NR_capget 90 +#define __NR_capset 91 +#define __NR_personality 92 +#define __NR_exit 93 +#define __NR_exit_group 94 +#define __NR_waitid 95 +#define __NR_set_tid_address 96 +#define __NR_unshare 97 +#define __NR_futex 98 +#define __NR_set_robust_list 99 +#define __NR_get_robust_list 100 +#define __NR_nanosleep 101 +#define __NR_getitimer 102 +#define __NR_setitimer 103 +#define __NR_kexec_load 104 +#define __NR_init_module 105 +#define __NR_delete_module 106 +#define __NR_timer_create 107 +#define __NR_timer_gettime 108 +#define __NR_timer_getoverrun 109 +#define __NR_timer_settime 110 +#define __NR_timer_delete 111 +#define __NR_clock_settime 112 +#define __NR_clock_gettime 113 +#define __NR_clock_getres 114 +#define __NR_clock_nanosleep 115 +#define __NR_syslog 116 +#define __NR_ptrace 117 +#define __NR_sched_setparam 118 +#define __NR_sched_setscheduler 119 +#define __NR_sched_getscheduler 120 +#define __NR_sched_getparam 121 +#define __NR_sched_setaffinity 122 +#define __NR_sched_getaffinity 123 +#define __NR_sched_yield 124 +#define __NR_sched_get_priority_max 125 +#define __NR_sched_get_priority_min 126 +#define __NR_sched_rr_get_interval 127 +#define __NR_restart_syscall 128 +#define __NR_kill 129 +#define __NR_tkill 130 +#define __NR_tgkill 131 +#define __NR_sigaltstack 132 +#define __NR_rt_sigsuspend 133 +#define __NR_rt_sigaction 134 +#define __NR_rt_sigprocmask 135 +#define __NR_rt_sigpending 136 +#define __NR_rt_sigtimedwait 137 +#define __NR_rt_sigqueueinfo 138 +#define __NR_rt_sigreturn 139 +#define __NR_setpriority 140 +#define __NR_getpriority 141 +#define __NR_reboot 142 +#define __NR_setregid 143 +#define __NR_setgid 144 +#define __NR_setreuid 145 +#define __NR_setuid 146 +#define __NR_setresuid 147 +#define __NR_getresuid 148 +#define __NR_setresgid 149 +#define __NR_getresgid 150 +#define __NR_setfsuid 151 +#define __NR_setfsgid 152 +#define __NR_times 153 +#define __NR_setpgid 154 +#define __NR_getpgid 155 +#define __NR_getsid 156 +#define __NR_setsid 157 +#define __NR_getgroups 158 +#define __NR_setgroups 159 +#define __NR_uname 160 +#define __NR_sethostname 161 +#define __NR_setdomainname 162 +#define __NR_getrusage 165 +#define __NR_umask 166 +#define __NR_prctl 167 +#define __NR_getcpu 168 +#define __NR_gettimeofday 169 +#define __NR_settimeofday 170 +#define __NR_adjtimex 171 +#define __NR_getpid 172 +#define __NR_getppid 173 +#define __NR_getuid 174 +#define __NR_geteuid 175 +#define __NR_getgid 176 +#define __NR_getegid 177 +#define __NR_gettid 178 +#define __NR_sysinfo 179 +#define __NR_mq_open 180 +#define __NR_mq_unlink 181 +#define __NR_mq_timedsend 182 +#define __NR_mq_timedreceive 183 +#define __NR_mq_notify 184 +#define __NR_mq_getsetattr 185 +#define __NR_msgget 186 +#define __NR_msgctl 187 +#define __NR_msgrcv 188 +#define __NR_msgsnd 189 +#define __NR_semget 190 +#define __NR_semctl 191 +#define __NR_semtimedop 192 +#define __NR_semop 193 +#define __NR_shmget 194 +#define __NR_shmctl 195 +#define __NR_shmat 196 +#define __NR_shmdt 197 +#define __NR_socket 198 +#define __NR_socketpair 199 +#define __NR_bind 200 +#define __NR_listen 201 +#define __NR_accept 202 +#define __NR_connect 203 +#define __NR_getsockname 204 +#define __NR_getpeername 205 +#define __NR_sendto 206 +#define __NR_recvfrom 207 +#define __NR_setsockopt 208 +#define __NR_getsockopt 209 +#define __NR_shutdown 210 +#define __NR_sendmsg 211 +#define __NR_recvmsg 212 +#define __NR_readahead 213 +#define __NR_brk 214 +#define __NR_munmap 215 +#define __NR_mremap 216 +#define __NR_add_key 217 +#define __NR_request_key 218 +#define __NR_keyctl 219 +#define __NR_clone 220 +#define __NR_execve 221 +#define __NR_mmap 222 +#define __NR_fadvise64 223 +#define __NR_swapon 224 +#define __NR_swapoff 225 +#define __NR_mprotect 226 +#define __NR_msync 227 +#define __NR_mlock 228 +#define __NR_munlock 229 +#define __NR_mlockall 230 +#define __NR_munlockall 231 +#define __NR_mincore 232 +#define __NR_madvise 233 +#define __NR_remap_file_pages 234 +#define __NR_mbind 235 +#define __NR_get_mempolicy 236 +#define __NR_set_mempolicy 237 +#define __NR_migrate_pages 238 +#define __NR_move_pages 239 +#define __NR_rt_tgsigqueueinfo 240 +#define __NR_perf_event_open 241 +#define __NR_accept4 242 +#define __NR_recvmmsg 243 +#define __NR_wait4 260 +#define __NR_prlimit64 261 +#define __NR_fanotify_init 262 +#define __NR_fanotify_mark 263 +#define __NR_name_to_handle_at 264 +#define __NR_open_by_handle_at 265 +#define __NR_clock_adjtime 266 +#define __NR_syncfs 267 +#define __NR_setns 268 +#define __NR_sendmmsg 269 +#define __NR_process_vm_readv 270 +#define __NR_process_vm_writev 271 +#define __NR_kcmp 272 +#define __NR_finit_module 273 +#define __NR_sched_setattr 274 +#define __NR_sched_getattr 275 +#define __NR_renameat2 276 +#define __NR_seccomp 277 +#define __NR_getrandom 278 +#define __NR_memfd_create 279 +#define __NR_bpf 280 +#define __NR_execveat 281 +#define __NR_userfaultfd 282 +#define __NR_membarrier 283 +#define __NR_mlock2 284 +#define __NR_copy_file_range 285 +#define __NR_preadv2 286 +#define __NR_pwritev2 287 +#define __NR_pkey_mprotect 288 +#define __NR_pkey_alloc 289 +#define __NR_pkey_free 290 +#define __NR_statx 291 +#define __NR_io_pgetevents 292 +#define __NR_rseq 293 +#define __NR_kexec_file_load 294 +#define __NR_pidfd_send_signal 424 +#define __NR_io_uring_setup 425 +#define __NR_io_uring_enter 426 +#define __NR_io_uring_register 427 +#define __NR_open_tree 428 +#define __NR_move_mount 429 +#define __NR_fsopen 430 +#define __NR_fsconfig 431 +#define __NR_fsmount 432 +#define __NR_fspick 433 +#define __NR_pidfd_open 434 +#define __NR_clone3 435 +#define __NR_close_range 436 +#define __NR_openat2 437 +#define __NR_pidfd_getfd 438 +#define __NR_faccessat2 439 +#define __NR_process_madvise 440 +#define __NR_epoll_pwait2 441 +#define __NR_mount_setattr 442 +#define __NR_quotactl_fd 443 +#define __NR_landlock_create_ruleset 444 +#define __NR_landlock_add_rule 445 +#define __NR_landlock_restrict_self 446 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 +#define __NR_cachestat 451 +#define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 +#define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 + + +#endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-mips/mman.h b/linux-headers/asm-mips/mman.h index 9c48d9a21aa..b700dae28c4 100644 --- a/linux-headers/asm-mips/mman.h +++ b/linux-headers/asm-mips/mman.h @@ -105,6 +105,9 @@ #define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ +#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */ +#define MADV_GUARD_REMOVE 103 /* unguard range */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index fc93b3be309..bdcc2f460ba 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -391,5 +391,10 @@ #define __NR_lsm_set_self_attr (__NR_Linux + 460) #define __NR_lsm_list_modules (__NR_Linux + 461) #define __NR_mseal (__NR_Linux + 462) +#define __NR_setxattrat (__NR_Linux + 463) +#define __NR_getxattrat (__NR_Linux + 464) +#define __NR_listxattrat (__NR_Linux + 465) +#define __NR_removexattrat (__NR_Linux + 466) +#define __NR_open_tree_attr (__NR_Linux + 467) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index e72a3eb2c93..3b6b0193b69 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -367,5 +367,10 @@ #define __NR_lsm_set_self_attr (__NR_Linux + 460) #define __NR_lsm_list_modules (__NR_Linux + 461) #define __NR_mseal (__NR_Linux + 462) +#define __NR_setxattrat (__NR_Linux + 463) +#define __NR_getxattrat (__NR_Linux + 464) +#define __NR_listxattrat (__NR_Linux + 465) +#define __NR_removexattrat (__NR_Linux + 466) +#define __NR_open_tree_attr (__NR_Linux + 467) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index b86eb0786c7..4609a4b4d38 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -437,5 +437,10 @@ #define __NR_lsm_set_self_attr (__NR_Linux + 460) #define __NR_lsm_list_modules (__NR_Linux + 461) #define __NR_mseal (__NR_Linux + 462) +#define __NR_setxattrat (__NR_Linux + 463) +#define __NR_getxattrat (__NR_Linux + 464) +#define __NR_listxattrat (__NR_Linux + 465) +#define __NR_removexattrat (__NR_Linux + 466) +#define __NR_open_tree_attr (__NR_Linux + 467) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index 28627b6546f..5d38a427e0a 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -444,6 +444,11 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 1fc42a83004..860a488e4d1 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -416,6 +416,11 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h index e878e7cc397..5f59fd226cc 100644 --- a/linux-headers/asm-riscv/kvm.h +++ b/linux-headers/asm-riscv/kvm.h @@ -168,6 +168,22 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZTSO, KVM_RISCV_ISA_EXT_ZACAS, KVM_RISCV_ISA_EXT_SSCOFPMF, + KVM_RISCV_ISA_EXT_ZIMOP, + KVM_RISCV_ISA_EXT_ZCA, + KVM_RISCV_ISA_EXT_ZCB, + KVM_RISCV_ISA_EXT_ZCD, + KVM_RISCV_ISA_EXT_ZCF, + KVM_RISCV_ISA_EXT_ZCMOP, + KVM_RISCV_ISA_EXT_ZAWRS, + KVM_RISCV_ISA_EXT_SMNPM, + KVM_RISCV_ISA_EXT_SSNPM, + KVM_RISCV_ISA_EXT_SVADE, + KVM_RISCV_ISA_EXT_SVADU, + KVM_RISCV_ISA_EXT_SVVPTC, + KVM_RISCV_ISA_EXT_ZABHA, + KVM_RISCV_ISA_EXT_ZICCRSE, + KVM_RISCV_ISA_EXT_ZAAMO, + KVM_RISCV_ISA_EXT_ZALRSC, KVM_RISCV_ISA_EXT_MAX, }; @@ -187,6 +203,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_VENDOR, KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_STA, + KVM_RISCV_SBI_EXT_SUSP, KVM_RISCV_SBI_EXT_MAX, }; @@ -200,9 +217,6 @@ struct kvm_riscv_sbi_sta { #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - /* If you need to interpret the index values, here is the key: */ #define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000 #define KVM_REG_RISCV_TYPE_SHIFT 24 diff --git a/linux-headers/asm-riscv/unistd.h b/linux-headers/asm-riscv/unistd.h index 950ab3fd440..81896bbbf72 100644 --- a/linux-headers/asm-riscv/unistd.h +++ b/linux-headers/asm-riscv/unistd.h @@ -14,41 +14,10 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ +#include -#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) -#define __ARCH_WANT_NEW_STAT -#define __ARCH_WANT_SET_GET_RLIMIT -#endif /* __LP64__ */ - -#define __ARCH_WANT_SYS_CLONE3 -#define __ARCH_WANT_MEMFD_SECRET - -#include - -/* - * Allows the instruction cache to be flushed from userspace. Despite RISC-V - * having a direct 'fence.i' instruction available to userspace (which we - * can't trap!), that's not actually viable when running on Linux because the - * kernel might schedule a process on another hart. There is no way for - * userspace to handle this without invoking the kernel (as it doesn't know the - * thread->hart mappings), so we've defined a RISC-V specific system call to - * flush the instruction cache. - * - * __NR_riscv_flush_icache is defined to flush the instruction cache over an - * address range, with the flush applying to either all threads or just the - * caller. We don't currently do anything with the address range, that's just - * in there for forwards compatibility. - */ -#ifndef __NR_riscv_flush_icache -#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) -#endif -__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) - -/* - * Allows userspace to query the kernel for CPU architecture and - * microarchitecture details across a given set of CPUs. - */ -#ifndef __NR_riscv_hwprobe -#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14) +#if __BITS_PER_LONG == 64 +#include +#else +#include #endif -__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe) diff --git a/linux-headers/asm-riscv/unistd_32.h b/linux-headers/asm-riscv/unistd_32.h new file mode 100644 index 00000000000..a5e769f1d99 --- /dev/null +++ b/linux-headers/asm-riscv/unistd_32.h @@ -0,0 +1,320 @@ +#ifndef _ASM_UNISTD_32_H +#define _ASM_UNISTD_32_H + +#define __NR_io_setup 0 +#define __NR_io_destroy 1 +#define __NR_io_submit 2 +#define __NR_io_cancel 3 +#define __NR_setxattr 5 +#define __NR_lsetxattr 6 +#define __NR_fsetxattr 7 +#define __NR_getxattr 8 +#define __NR_lgetxattr 9 +#define __NR_fgetxattr 10 +#define __NR_listxattr 11 +#define __NR_llistxattr 12 +#define __NR_flistxattr 13 +#define __NR_removexattr 14 +#define __NR_lremovexattr 15 +#define __NR_fremovexattr 16 +#define __NR_getcwd 17 +#define __NR_lookup_dcookie 18 +#define __NR_eventfd2 19 +#define __NR_epoll_create1 20 +#define __NR_epoll_ctl 21 +#define __NR_epoll_pwait 22 +#define __NR_dup 23 +#define __NR_dup3 24 +#define __NR_fcntl64 25 +#define __NR_inotify_init1 26 +#define __NR_inotify_add_watch 27 +#define __NR_inotify_rm_watch 28 +#define __NR_ioctl 29 +#define __NR_ioprio_set 30 +#define __NR_ioprio_get 31 +#define __NR_flock 32 +#define __NR_mknodat 33 +#define __NR_mkdirat 34 +#define __NR_unlinkat 35 +#define __NR_symlinkat 36 +#define __NR_linkat 37 +#define __NR_umount2 39 +#define __NR_mount 40 +#define __NR_pivot_root 41 +#define __NR_nfsservctl 42 +#define __NR_statfs64 43 +#define __NR_fstatfs64 44 +#define __NR_truncate64 45 +#define __NR_ftruncate64 46 +#define __NR_fallocate 47 +#define __NR_faccessat 48 +#define __NR_chdir 49 +#define __NR_fchdir 50 +#define __NR_chroot 51 +#define __NR_fchmod 52 +#define __NR_fchmodat 53 +#define __NR_fchownat 54 +#define __NR_fchown 55 +#define __NR_openat 56 +#define __NR_close 57 +#define __NR_vhangup 58 +#define __NR_pipe2 59 +#define __NR_quotactl 60 +#define __NR_getdents64 61 +#define __NR_llseek 62 +#define __NR_read 63 +#define __NR_write 64 +#define __NR_readv 65 +#define __NR_writev 66 +#define __NR_pread64 67 +#define __NR_pwrite64 68 +#define __NR_preadv 69 +#define __NR_pwritev 70 +#define __NR_sendfile64 71 +#define __NR_signalfd4 74 +#define __NR_vmsplice 75 +#define __NR_splice 76 +#define __NR_tee 77 +#define __NR_readlinkat 78 +#define __NR_sync 81 +#define __NR_fsync 82 +#define __NR_fdatasync 83 +#define __NR_sync_file_range 84 +#define __NR_timerfd_create 85 +#define __NR_acct 89 +#define __NR_capget 90 +#define __NR_capset 91 +#define __NR_personality 92 +#define __NR_exit 93 +#define __NR_exit_group 94 +#define __NR_waitid 95 +#define __NR_set_tid_address 96 +#define __NR_unshare 97 +#define __NR_set_robust_list 99 +#define __NR_get_robust_list 100 +#define __NR_getitimer 102 +#define __NR_setitimer 103 +#define __NR_kexec_load 104 +#define __NR_init_module 105 +#define __NR_delete_module 106 +#define __NR_timer_create 107 +#define __NR_timer_getoverrun 109 +#define __NR_timer_delete 111 +#define __NR_syslog 116 +#define __NR_ptrace 117 +#define __NR_sched_setparam 118 +#define __NR_sched_setscheduler 119 +#define __NR_sched_getscheduler 120 +#define __NR_sched_getparam 121 +#define __NR_sched_setaffinity 122 +#define __NR_sched_getaffinity 123 +#define __NR_sched_yield 124 +#define __NR_sched_get_priority_max 125 +#define __NR_sched_get_priority_min 126 +#define __NR_restart_syscall 128 +#define __NR_kill 129 +#define __NR_tkill 130 +#define __NR_tgkill 131 +#define __NR_sigaltstack 132 +#define __NR_rt_sigsuspend 133 +#define __NR_rt_sigaction 134 +#define __NR_rt_sigprocmask 135 +#define __NR_rt_sigpending 136 +#define __NR_rt_sigqueueinfo 138 +#define __NR_rt_sigreturn 139 +#define __NR_setpriority 140 +#define __NR_getpriority 141 +#define __NR_reboot 142 +#define __NR_setregid 143 +#define __NR_setgid 144 +#define __NR_setreuid 145 +#define __NR_setuid 146 +#define __NR_setresuid 147 +#define __NR_getresuid 148 +#define __NR_setresgid 149 +#define __NR_getresgid 150 +#define __NR_setfsuid 151 +#define __NR_setfsgid 152 +#define __NR_times 153 +#define __NR_setpgid 154 +#define __NR_getpgid 155 +#define __NR_getsid 156 +#define __NR_setsid 157 +#define __NR_getgroups 158 +#define __NR_setgroups 159 +#define __NR_uname 160 +#define __NR_sethostname 161 +#define __NR_setdomainname 162 +#define __NR_getrusage 165 +#define __NR_umask 166 +#define __NR_prctl 167 +#define __NR_getcpu 168 +#define __NR_getpid 172 +#define __NR_getppid 173 +#define __NR_getuid 174 +#define __NR_geteuid 175 +#define __NR_getgid 176 +#define __NR_getegid 177 +#define __NR_gettid 178 +#define __NR_sysinfo 179 +#define __NR_mq_open 180 +#define __NR_mq_unlink 181 +#define __NR_mq_notify 184 +#define __NR_mq_getsetattr 185 +#define __NR_msgget 186 +#define __NR_msgctl 187 +#define __NR_msgrcv 188 +#define __NR_msgsnd 189 +#define __NR_semget 190 +#define __NR_semctl 191 +#define __NR_semop 193 +#define __NR_shmget 194 +#define __NR_shmctl 195 +#define __NR_shmat 196 +#define __NR_shmdt 197 +#define __NR_socket 198 +#define __NR_socketpair 199 +#define __NR_bind 200 +#define __NR_listen 201 +#define __NR_accept 202 +#define __NR_connect 203 +#define __NR_getsockname 204 +#define __NR_getpeername 205 +#define __NR_sendto 206 +#define __NR_recvfrom 207 +#define __NR_setsockopt 208 +#define __NR_getsockopt 209 +#define __NR_shutdown 210 +#define __NR_sendmsg 211 +#define __NR_recvmsg 212 +#define __NR_readahead 213 +#define __NR_brk 214 +#define __NR_munmap 215 +#define __NR_mremap 216 +#define __NR_add_key 217 +#define __NR_request_key 218 +#define __NR_keyctl 219 +#define __NR_clone 220 +#define __NR_execve 221 +#define __NR_mmap2 222 +#define __NR_fadvise64_64 223 +#define __NR_swapon 224 +#define __NR_swapoff 225 +#define __NR_mprotect 226 +#define __NR_msync 227 +#define __NR_mlock 228 +#define __NR_munlock 229 +#define __NR_mlockall 230 +#define __NR_munlockall 231 +#define __NR_mincore 232 +#define __NR_madvise 233 +#define __NR_remap_file_pages 234 +#define __NR_mbind 235 +#define __NR_get_mempolicy 236 +#define __NR_set_mempolicy 237 +#define __NR_migrate_pages 238 +#define __NR_move_pages 239 +#define __NR_rt_tgsigqueueinfo 240 +#define __NR_perf_event_open 241 +#define __NR_accept4 242 +#define __NR_riscv_hwprobe 258 +#define __NR_riscv_flush_icache 259 +#define __NR_prlimit64 261 +#define __NR_fanotify_init 262 +#define __NR_fanotify_mark 263 +#define __NR_name_to_handle_at 264 +#define __NR_open_by_handle_at 265 +#define __NR_syncfs 267 +#define __NR_setns 268 +#define __NR_sendmmsg 269 +#define __NR_process_vm_readv 270 +#define __NR_process_vm_writev 271 +#define __NR_kcmp 272 +#define __NR_finit_module 273 +#define __NR_sched_setattr 274 +#define __NR_sched_getattr 275 +#define __NR_renameat2 276 +#define __NR_seccomp 277 +#define __NR_getrandom 278 +#define __NR_memfd_create 279 +#define __NR_bpf 280 +#define __NR_execveat 281 +#define __NR_userfaultfd 282 +#define __NR_membarrier 283 +#define __NR_mlock2 284 +#define __NR_copy_file_range 285 +#define __NR_preadv2 286 +#define __NR_pwritev2 287 +#define __NR_pkey_mprotect 288 +#define __NR_pkey_alloc 289 +#define __NR_pkey_free 290 +#define __NR_statx 291 +#define __NR_rseq 293 +#define __NR_kexec_file_load 294 +#define __NR_clock_gettime64 403 +#define __NR_clock_settime64 404 +#define __NR_clock_adjtime64 405 +#define __NR_clock_getres_time64 406 +#define __NR_clock_nanosleep_time64 407 +#define __NR_timer_gettime64 408 +#define __NR_timer_settime64 409 +#define __NR_timerfd_gettime64 410 +#define __NR_timerfd_settime64 411 +#define __NR_utimensat_time64 412 +#define __NR_pselect6_time64 413 +#define __NR_ppoll_time64 414 +#define __NR_io_pgetevents_time64 416 +#define __NR_recvmmsg_time64 417 +#define __NR_mq_timedsend_time64 418 +#define __NR_mq_timedreceive_time64 419 +#define __NR_semtimedop_time64 420 +#define __NR_rt_sigtimedwait_time64 421 +#define __NR_futex_time64 422 +#define __NR_sched_rr_get_interval_time64 423 +#define __NR_pidfd_send_signal 424 +#define __NR_io_uring_setup 425 +#define __NR_io_uring_enter 426 +#define __NR_io_uring_register 427 +#define __NR_open_tree 428 +#define __NR_move_mount 429 +#define __NR_fsopen 430 +#define __NR_fsconfig 431 +#define __NR_fsmount 432 +#define __NR_fspick 433 +#define __NR_pidfd_open 434 +#define __NR_clone3 435 +#define __NR_close_range 436 +#define __NR_openat2 437 +#define __NR_pidfd_getfd 438 +#define __NR_faccessat2 439 +#define __NR_process_madvise 440 +#define __NR_epoll_pwait2 441 +#define __NR_mount_setattr 442 +#define __NR_quotactl_fd 443 +#define __NR_landlock_create_ruleset 444 +#define __NR_landlock_add_rule 445 +#define __NR_landlock_restrict_self 446 +#define __NR_memfd_secret 447 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 +#define __NR_cachestat 451 +#define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 +#define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 + + +#endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-riscv/unistd_64.h b/linux-headers/asm-riscv/unistd_64.h new file mode 100644 index 00000000000..8df4d64841c --- /dev/null +++ b/linux-headers/asm-riscv/unistd_64.h @@ -0,0 +1,330 @@ +#ifndef _ASM_UNISTD_64_H +#define _ASM_UNISTD_64_H + +#define __NR_io_setup 0 +#define __NR_io_destroy 1 +#define __NR_io_submit 2 +#define __NR_io_cancel 3 +#define __NR_io_getevents 4 +#define __NR_setxattr 5 +#define __NR_lsetxattr 6 +#define __NR_fsetxattr 7 +#define __NR_getxattr 8 +#define __NR_lgetxattr 9 +#define __NR_fgetxattr 10 +#define __NR_listxattr 11 +#define __NR_llistxattr 12 +#define __NR_flistxattr 13 +#define __NR_removexattr 14 +#define __NR_lremovexattr 15 +#define __NR_fremovexattr 16 +#define __NR_getcwd 17 +#define __NR_lookup_dcookie 18 +#define __NR_eventfd2 19 +#define __NR_epoll_create1 20 +#define __NR_epoll_ctl 21 +#define __NR_epoll_pwait 22 +#define __NR_dup 23 +#define __NR_dup3 24 +#define __NR_fcntl 25 +#define __NR_inotify_init1 26 +#define __NR_inotify_add_watch 27 +#define __NR_inotify_rm_watch 28 +#define __NR_ioctl 29 +#define __NR_ioprio_set 30 +#define __NR_ioprio_get 31 +#define __NR_flock 32 +#define __NR_mknodat 33 +#define __NR_mkdirat 34 +#define __NR_unlinkat 35 +#define __NR_symlinkat 36 +#define __NR_linkat 37 +#define __NR_umount2 39 +#define __NR_mount 40 +#define __NR_pivot_root 41 +#define __NR_nfsservctl 42 +#define __NR_statfs 43 +#define __NR_fstatfs 44 +#define __NR_truncate 45 +#define __NR_ftruncate 46 +#define __NR_fallocate 47 +#define __NR_faccessat 48 +#define __NR_chdir 49 +#define __NR_fchdir 50 +#define __NR_chroot 51 +#define __NR_fchmod 52 +#define __NR_fchmodat 53 +#define __NR_fchownat 54 +#define __NR_fchown 55 +#define __NR_openat 56 +#define __NR_close 57 +#define __NR_vhangup 58 +#define __NR_pipe2 59 +#define __NR_quotactl 60 +#define __NR_getdents64 61 +#define __NR_lseek 62 +#define __NR_read 63 +#define __NR_write 64 +#define __NR_readv 65 +#define __NR_writev 66 +#define __NR_pread64 67 +#define __NR_pwrite64 68 +#define __NR_preadv 69 +#define __NR_pwritev 70 +#define __NR_sendfile 71 +#define __NR_pselect6 72 +#define __NR_ppoll 73 +#define __NR_signalfd4 74 +#define __NR_vmsplice 75 +#define __NR_splice 76 +#define __NR_tee 77 +#define __NR_readlinkat 78 +#define __NR_newfstatat 79 +#define __NR_fstat 80 +#define __NR_sync 81 +#define __NR_fsync 82 +#define __NR_fdatasync 83 +#define __NR_sync_file_range 84 +#define __NR_timerfd_create 85 +#define __NR_timerfd_settime 86 +#define __NR_timerfd_gettime 87 +#define __NR_utimensat 88 +#define __NR_acct 89 +#define __NR_capget 90 +#define __NR_capset 91 +#define __NR_personality 92 +#define __NR_exit 93 +#define __NR_exit_group 94 +#define __NR_waitid 95 +#define __NR_set_tid_address 96 +#define __NR_unshare 97 +#define __NR_futex 98 +#define __NR_set_robust_list 99 +#define __NR_get_robust_list 100 +#define __NR_nanosleep 101 +#define __NR_getitimer 102 +#define __NR_setitimer 103 +#define __NR_kexec_load 104 +#define __NR_init_module 105 +#define __NR_delete_module 106 +#define __NR_timer_create 107 +#define __NR_timer_gettime 108 +#define __NR_timer_getoverrun 109 +#define __NR_timer_settime 110 +#define __NR_timer_delete 111 +#define __NR_clock_settime 112 +#define __NR_clock_gettime 113 +#define __NR_clock_getres 114 +#define __NR_clock_nanosleep 115 +#define __NR_syslog 116 +#define __NR_ptrace 117 +#define __NR_sched_setparam 118 +#define __NR_sched_setscheduler 119 +#define __NR_sched_getscheduler 120 +#define __NR_sched_getparam 121 +#define __NR_sched_setaffinity 122 +#define __NR_sched_getaffinity 123 +#define __NR_sched_yield 124 +#define __NR_sched_get_priority_max 125 +#define __NR_sched_get_priority_min 126 +#define __NR_sched_rr_get_interval 127 +#define __NR_restart_syscall 128 +#define __NR_kill 129 +#define __NR_tkill 130 +#define __NR_tgkill 131 +#define __NR_sigaltstack 132 +#define __NR_rt_sigsuspend 133 +#define __NR_rt_sigaction 134 +#define __NR_rt_sigprocmask 135 +#define __NR_rt_sigpending 136 +#define __NR_rt_sigtimedwait 137 +#define __NR_rt_sigqueueinfo 138 +#define __NR_rt_sigreturn 139 +#define __NR_setpriority 140 +#define __NR_getpriority 141 +#define __NR_reboot 142 +#define __NR_setregid 143 +#define __NR_setgid 144 +#define __NR_setreuid 145 +#define __NR_setuid 146 +#define __NR_setresuid 147 +#define __NR_getresuid 148 +#define __NR_setresgid 149 +#define __NR_getresgid 150 +#define __NR_setfsuid 151 +#define __NR_setfsgid 152 +#define __NR_times 153 +#define __NR_setpgid 154 +#define __NR_getpgid 155 +#define __NR_getsid 156 +#define __NR_setsid 157 +#define __NR_getgroups 158 +#define __NR_setgroups 159 +#define __NR_uname 160 +#define __NR_sethostname 161 +#define __NR_setdomainname 162 +#define __NR_getrlimit 163 +#define __NR_setrlimit 164 +#define __NR_getrusage 165 +#define __NR_umask 166 +#define __NR_prctl 167 +#define __NR_getcpu 168 +#define __NR_gettimeofday 169 +#define __NR_settimeofday 170 +#define __NR_adjtimex 171 +#define __NR_getpid 172 +#define __NR_getppid 173 +#define __NR_getuid 174 +#define __NR_geteuid 175 +#define __NR_getgid 176 +#define __NR_getegid 177 +#define __NR_gettid 178 +#define __NR_sysinfo 179 +#define __NR_mq_open 180 +#define __NR_mq_unlink 181 +#define __NR_mq_timedsend 182 +#define __NR_mq_timedreceive 183 +#define __NR_mq_notify 184 +#define __NR_mq_getsetattr 185 +#define __NR_msgget 186 +#define __NR_msgctl 187 +#define __NR_msgrcv 188 +#define __NR_msgsnd 189 +#define __NR_semget 190 +#define __NR_semctl 191 +#define __NR_semtimedop 192 +#define __NR_semop 193 +#define __NR_shmget 194 +#define __NR_shmctl 195 +#define __NR_shmat 196 +#define __NR_shmdt 197 +#define __NR_socket 198 +#define __NR_socketpair 199 +#define __NR_bind 200 +#define __NR_listen 201 +#define __NR_accept 202 +#define __NR_connect 203 +#define __NR_getsockname 204 +#define __NR_getpeername 205 +#define __NR_sendto 206 +#define __NR_recvfrom 207 +#define __NR_setsockopt 208 +#define __NR_getsockopt 209 +#define __NR_shutdown 210 +#define __NR_sendmsg 211 +#define __NR_recvmsg 212 +#define __NR_readahead 213 +#define __NR_brk 214 +#define __NR_munmap 215 +#define __NR_mremap 216 +#define __NR_add_key 217 +#define __NR_request_key 218 +#define __NR_keyctl 219 +#define __NR_clone 220 +#define __NR_execve 221 +#define __NR_mmap 222 +#define __NR_fadvise64 223 +#define __NR_swapon 224 +#define __NR_swapoff 225 +#define __NR_mprotect 226 +#define __NR_msync 227 +#define __NR_mlock 228 +#define __NR_munlock 229 +#define __NR_mlockall 230 +#define __NR_munlockall 231 +#define __NR_mincore 232 +#define __NR_madvise 233 +#define __NR_remap_file_pages 234 +#define __NR_mbind 235 +#define __NR_get_mempolicy 236 +#define __NR_set_mempolicy 237 +#define __NR_migrate_pages 238 +#define __NR_move_pages 239 +#define __NR_rt_tgsigqueueinfo 240 +#define __NR_perf_event_open 241 +#define __NR_accept4 242 +#define __NR_recvmmsg 243 +#define __NR_riscv_hwprobe 258 +#define __NR_riscv_flush_icache 259 +#define __NR_wait4 260 +#define __NR_prlimit64 261 +#define __NR_fanotify_init 262 +#define __NR_fanotify_mark 263 +#define __NR_name_to_handle_at 264 +#define __NR_open_by_handle_at 265 +#define __NR_clock_adjtime 266 +#define __NR_syncfs 267 +#define __NR_setns 268 +#define __NR_sendmmsg 269 +#define __NR_process_vm_readv 270 +#define __NR_process_vm_writev 271 +#define __NR_kcmp 272 +#define __NR_finit_module 273 +#define __NR_sched_setattr 274 +#define __NR_sched_getattr 275 +#define __NR_renameat2 276 +#define __NR_seccomp 277 +#define __NR_getrandom 278 +#define __NR_memfd_create 279 +#define __NR_bpf 280 +#define __NR_execveat 281 +#define __NR_userfaultfd 282 +#define __NR_membarrier 283 +#define __NR_mlock2 284 +#define __NR_copy_file_range 285 +#define __NR_preadv2 286 +#define __NR_pwritev2 287 +#define __NR_pkey_mprotect 288 +#define __NR_pkey_alloc 289 +#define __NR_pkey_free 290 +#define __NR_statx 291 +#define __NR_io_pgetevents 292 +#define __NR_rseq 293 +#define __NR_kexec_file_load 294 +#define __NR_pidfd_send_signal 424 +#define __NR_io_uring_setup 425 +#define __NR_io_uring_enter 426 +#define __NR_io_uring_register 427 +#define __NR_open_tree 428 +#define __NR_move_mount 429 +#define __NR_fsopen 430 +#define __NR_fsconfig 431 +#define __NR_fsmount 432 +#define __NR_fspick 433 +#define __NR_pidfd_open 434 +#define __NR_clone3 435 +#define __NR_close_range 436 +#define __NR_openat2 437 +#define __NR_pidfd_getfd 438 +#define __NR_faccessat2 439 +#define __NR_process_madvise 440 +#define __NR_epoll_pwait2 441 +#define __NR_mount_setattr 442 +#define __NR_quotactl_fd 443 +#define __NR_landlock_create_ruleset 444 +#define __NR_landlock_add_rule 445 +#define __NR_landlock_restrict_self 446 +#define __NR_memfd_secret 447 +#define __NR_process_mrelease 448 +#define __NR_futex_waitv 449 +#define __NR_set_mempolicy_home_node 450 +#define __NR_cachestat 451 +#define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 +#define __NR_statmount 457 +#define __NR_listmount 458 +#define __NR_lsm_get_self_attr 459 +#define __NR_lsm_set_self_attr 460 +#define __NR_lsm_list_modules 461 +#define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 + + +#endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index 684c4e1205d..ab5a6bce590 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -469,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc { __u8 kdsa[16]; /* with MSA9 */ __u8 sortl[32]; /* with STFLE.150 */ __u8 dfltcc[32]; /* with STFLE.151 */ - __u8 reserved[1728]; + __u8 pfcr[16]; /* with STFLE.201 */ + __u8 reserved[1712]; }; #define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6 diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 7706c21b87e..85eedbd18e3 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -435,5 +435,10 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index 62082d592d3..c03b1b97010 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -383,5 +383,10 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index 1c8f9182348..f0c1a730d9c 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -106,6 +106,7 @@ struct kvm_ioapic_state { #define KVM_RUN_X86_SMM (1 << 0) #define KVM_RUN_X86_BUS_LOCK (1 << 1) +#define KVM_RUN_X86_GUEST_MODE (1 << 2) /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { @@ -436,6 +437,9 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) #define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5) #define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6) +#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7) +#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8) +#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9) #define KVM_STATE_NESTED_FORMAT_VMX 0 #define KVM_STATE_NESTED_FORMAT_SVM 1 @@ -554,6 +558,9 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) #define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8) +#define KVM_XEN_MSR_MIN_INDEX 0x40000000u +#define KVM_XEN_MSR_MAX_INDEX 0x4fffffffu + struct kvm_xen_hvm_config { __u32 flags; __u32 msr; @@ -836,6 +843,7 @@ struct kvm_sev_snp_launch_start { }; /* Kept in sync with firmware values for simplicity. */ +#define KVM_SEV_PAGE_TYPE_INVALID 0x0 #define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1 #define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3 #define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4 @@ -920,5 +928,82 @@ struct kvm_hyperv_eventfd { #define KVM_X86_SEV_VM 2 #define KVM_X86_SEV_ES_VM 3 #define KVM_X86_SNP_VM 4 +#define KVM_X86_TDX_VM 5 + +/* Trust Domain eXtension sub-ioctl() commands. */ +enum kvm_tdx_cmd_id { + KVM_TDX_CAPABILITIES = 0, + KVM_TDX_INIT_VM, + KVM_TDX_INIT_VCPU, + KVM_TDX_INIT_MEM_REGION, + KVM_TDX_FINALIZE_VM, + KVM_TDX_GET_CPUID, + + KVM_TDX_CMD_NR_MAX, +}; + +struct kvm_tdx_cmd { + /* enum kvm_tdx_cmd_id */ + __u32 id; + /* flags for sub-commend. If sub-command doesn't use this, set zero. */ + __u32 flags; + /* + * data for each sub-command. An immediate or a pointer to the actual + * data in process virtual address. If sub-command doesn't use it, + * set zero. + */ + __u64 data; + /* + * Auxiliary error code. The sub-command may return TDX SEAMCALL + * status code in addition to -Exxx. + */ + __u64 hw_error; +}; + +struct kvm_tdx_capabilities { + __u64 supported_attrs; + __u64 supported_xfam; + + __u64 kernel_tdvmcallinfo_1_r11; + __u64 user_tdvmcallinfo_1_r11; + __u64 kernel_tdvmcallinfo_1_r12; + __u64 user_tdvmcallinfo_1_r12; + + __u64 reserved[250]; + + /* Configurable CPUID bits for userspace */ + struct kvm_cpuid2 cpuid; +}; + +struct kvm_tdx_init_vm { + __u64 attributes; + __u64 xfam; + __u64 mrconfigid[6]; /* sha384 digest */ + __u64 mrowner[6]; /* sha384 digest */ + __u64 mrownerconfig[6]; /* sha384 digest */ + + /* The total space for TD_PARAMS before the CPUIDs is 256 bytes */ + __u64 reserved[12]; + + /* + * Call KVM_TDX_INIT_VM before vcpu creation, thus before + * KVM_SET_CPUID2. + * This configuration supersedes KVM_SET_CPUID2s for VCPUs because the + * TDX module directly virtualizes those CPUIDs without VMM. The user + * space VMM, e.g. qemu, should make KVM_SET_CPUID2 consistent with + * those values. If it doesn't, KVM may have wrong idea of vCPUIDs of + * the guest, and KVM may wrongly emulate CPUIDs or MSRs that the TDX + * module doesn't virtualize. + */ + struct kvm_cpuid2 cpuid; +}; + +#define KVM_TDX_MEASURE_MEMORY_REGION _BITULL(0) + +struct kvm_tdx_init_mem_region { + __u64 source_addr; + __u64 gpa; + __u64 nr_pages; +}; #endif /* _ASM_X86_KVM_H */ diff --git a/linux-headers/asm-x86/mman.h b/linux-headers/asm-x86/mman.h index 46cdc941f95..ac1e6277212 100644 --- a/linux-headers/asm-x86/mman.h +++ b/linux-headers/asm-x86/mman.h @@ -5,9 +5,6 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ #define MAP_ABOVE4G 0x80 /* only map above 4GB */ -/* Flags for map_shadow_stack(2) */ -#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */ - #include #endif /* _ASM_X86_MMAN_H */ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index fb7b8b169bf..491d6b4eb68 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -453,6 +453,11 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index da439afee1c..7cf88bf9bda 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -336,6 +336,7 @@ #define __NR_statx 332 #define __NR_io_pgetevents 333 #define __NR_rseq 334 +#define __NR_uretprobe 335 #define __NR_pidfd_send_signal 424 #define __NR_io_uring_setup 425 #define __NR_io_uring_enter 426 @@ -375,6 +376,11 @@ #define __NR_lsm_set_self_attr 460 #define __NR_lsm_list_modules 461 #define __NR_mseal 462 +#define __NR_setxattrat 463 +#define __NR_getxattrat 464 +#define __NR_listxattrat 465 +#define __NR_removexattrat 466 +#define __NR_open_tree_attr 467 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 4fcb607c724..82959111e6a 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -289,6 +289,7 @@ #define __NR_statx (__X32_SYSCALL_BIT + 332) #define __NR_io_pgetevents (__X32_SYSCALL_BIT + 333) #define __NR_rseq (__X32_SYSCALL_BIT + 334) +#define __NR_uretprobe (__X32_SYSCALL_BIT + 335) #define __NR_pidfd_send_signal (__X32_SYSCALL_BIT + 424) #define __NR_io_uring_setup (__X32_SYSCALL_BIT + 425) #define __NR_io_uring_enter (__X32_SYSCALL_BIT + 426) @@ -328,6 +329,11 @@ #define __NR_lsm_set_self_attr (__X32_SYSCALL_BIT + 460) #define __NR_lsm_list_modules (__X32_SYSCALL_BIT + 461) #define __NR_mseal (__X32_SYSCALL_BIT + 462) +#define __NR_setxattrat (__X32_SYSCALL_BIT + 463) +#define __NR_getxattrat (__X32_SYSCALL_BIT + 464) +#define __NR_listxattrat (__X32_SYSCALL_BIT + 465) +#define __NR_removexattrat (__X32_SYSCALL_BIT + 466) +#define __NR_open_tree_attr (__X32_SYSCALL_BIT + 467) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/bits.h b/linux-headers/linux/bits.h index d9897771be8..9243f389751 100644 --- a/linux-headers/linux/bits.h +++ b/linux-headers/linux/bits.h @@ -4,12 +4,11 @@ #ifndef _LINUX_BITS_H #define _LINUX_BITS_H -#define __GENMASK(h, l) \ - (((~_UL(0)) - (_UL(1) << (l)) + 1) & \ - (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) +#define __GENMASK(h, l) (((~_UL(0)) << (l)) & (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) -#define __GENMASK_ULL(h, l) \ - (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \ - (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) +#define __GENMASK_ULL(h, l) (((~_ULL(0)) << (l)) & (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) + +#define __GENMASK_U128(h, l) \ + ((_BIT128((h)) << 1) - (_BIT128(l))) #endif /* _LINUX_BITS_H */ diff --git a/linux-headers/linux/const.h b/linux-headers/linux/const.h index 1eb84b5087f..95ede233420 100644 --- a/linux-headers/linux/const.h +++ b/linux-headers/linux/const.h @@ -28,6 +28,23 @@ #define _BITUL(x) (_UL(1) << (x)) #define _BITULL(x) (_ULL(1) << (x)) +#if !defined(__ASSEMBLY__) +/* + * Missing __asm__ support + * + * __BIT128() would not work in the __asm__ code, as it shifts an + * 'unsigned __int128' data type as direct representation of + * 128 bit constants is not supported in the gcc compiler, as + * they get silently truncated. + * + * TODO: Please revisit this implementation when gcc compiler + * starts representing 128 bit constants directly like long + * and unsigned long etc. Subsequently drop the comment for + * GENMASK_U128() which would then start supporting __asm__ code. + */ +#define _BIT128(x) ((unsigned __int128)(1) << (x)) +#endif + #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) diff --git a/linux-headers/linux/egm.h b/linux-headers/linux/egm.h new file mode 100644 index 00000000000..aed9c528bd5 --- /dev/null +++ b/linux-headers/linux/egm.h @@ -0,0 +1,20 @@ +#ifndef EGM_H +#define EGM_H + +#define EGM_TYPE ('E') + +struct egm_bad_pages_info { + __aligned_u64 offset; + __aligned_u64 size; +}; + +struct egm_bad_pages_list { + __u32 argsz; + /* out */ + __u32 count; + /* out */ + struct egm_bad_pages_info bad_pages[]; +}; +#define EGM_BAD_PAGES_LIST _IO(EGM_TYPE, 100) + +#endif /* EGM_H */ diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h index 72e8f4b9dd0..2bbbca4a1d5 100644 --- a/linux-headers/linux/iommufd.h +++ b/linux-headers/linux/iommufd.h @@ -4,8 +4,8 @@ #ifndef _IOMMUFD_H #define _IOMMUFD_H -#include #include +#include #define IOMMUFD_TYPE (';') @@ -37,19 +37,26 @@ enum { IOMMUFD_CMD_BASE = 0x80, IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE, - IOMMUFD_CMD_IOAS_ALLOC, - IOMMUFD_CMD_IOAS_ALLOW_IOVAS, - IOMMUFD_CMD_IOAS_COPY, - IOMMUFD_CMD_IOAS_IOVA_RANGES, - IOMMUFD_CMD_IOAS_MAP, - IOMMUFD_CMD_IOAS_UNMAP, - IOMMUFD_CMD_OPTION, - IOMMUFD_CMD_VFIO_IOAS, - IOMMUFD_CMD_HWPT_ALLOC, - IOMMUFD_CMD_GET_HW_INFO, - IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, - IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, - IOMMUFD_CMD_HWPT_INVALIDATE, + IOMMUFD_CMD_IOAS_ALLOC = 0x81, + IOMMUFD_CMD_IOAS_ALLOW_IOVAS = 0x82, + IOMMUFD_CMD_IOAS_COPY = 0x83, + IOMMUFD_CMD_IOAS_IOVA_RANGES = 0x84, + IOMMUFD_CMD_IOAS_MAP = 0x85, + IOMMUFD_CMD_IOAS_UNMAP = 0x86, + IOMMUFD_CMD_OPTION = 0x87, + IOMMUFD_CMD_VFIO_IOAS = 0x88, + IOMMUFD_CMD_HWPT_ALLOC = 0x89, + IOMMUFD_CMD_GET_HW_INFO = 0x8a, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING = 0x8b, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP = 0x8c, + IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d, + IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e, + IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f, + IOMMUFD_CMD_VIOMMU_ALLOC = 0x90, + IOMMUFD_CMD_VDEVICE_ALLOC = 0x91, + IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92, + IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93, + IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94, }; /** @@ -212,6 +219,30 @@ struct iommu_ioas_map { }; #define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP) +/** + * struct iommu_ioas_map_file - ioctl(IOMMU_IOAS_MAP_FILE) + * @size: sizeof(struct iommu_ioas_map_file) + * @flags: same as for iommu_ioas_map + * @ioas_id: same as for iommu_ioas_map + * @fd: the memfd to map + * @start: byte offset from start of file to map from + * @length: same as for iommu_ioas_map + * @iova: same as for iommu_ioas_map + * + * Set an IOVA mapping from a memfd file. All other arguments and semantics + * match those of IOMMU_IOAS_MAP. + */ +struct iommu_ioas_map_file { + __u32 size; + __u32 flags; + __u32 ioas_id; + __s32 fd; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 iova; +}; +#define IOMMU_IOAS_MAP_FILE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP_FILE) + /** * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY) * @size: sizeof(struct iommu_ioas_copy) @@ -268,7 +299,7 @@ struct iommu_ioas_unmap { * ioctl(IOMMU_OPTION_HUGE_PAGES) * @IOMMU_OPTION_RLIMIT_MODE: * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege - * to invoke this. Value 0 (default) is user based accouting, 1 uses process + * to invoke this. Value 0 (default) is user based accounting, 1 uses process * based accounting. Global option, object_id must be 0 * @IOMMU_OPTION_HUGE_PAGES: * Value 1 (default) allows contiguous pages to be combined when generating @@ -356,10 +387,24 @@ struct iommu_vfio_ioas { * the parent HWPT in a nesting configuration. * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is * enforced on device attachment + * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is + * valid. + * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The + * domain can be attached to any PASID on the device. + * Any domain attached to the non-PASID part of the + * device must also be flagged, otherwise attaching a + * PASID will blocked. + * For the user that wants to attach PASID, ioas is + * not recommended for both the non-PASID part + * and PASID part of the device. + * If IOMMU does not support PASID it will return + * error (-EOPNOTSUPP). */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, + IOMMU_HWPT_FAULT_ID_VALID = 1 << 2, + IOMMU_HWPT_ALLOC_PASID = 1 << 3, }; /** @@ -390,14 +435,36 @@ struct iommu_hwpt_vtd_s1 { __u32 __reserved; }; +/** + * struct iommu_hwpt_arm_smmuv3 - ARM SMMUv3 nested STE + * (IOMMU_HWPT_DATA_ARM_SMMUV3) + * + * @ste: The first two double words of the user space Stream Table Entry for + * the translation. Must be little-endian. + * Allowed fields: (Refer to "5.2 Stream Table Entry" in SMMUv3 HW Spec) + * - word-0: V, Cfg, S1Fmt, S1ContextPtr, S1CDMax + * - word-1: EATS, S1DSS, S1CIR, S1COR, S1CSH, S1STALLD + * + * -EIO will be returned if @ste is not legal or contains any non-allowed field. + * Cfg can be used to select a S1, Bypass or Abort configuration. A Bypass + * nested domain will translate the same as the nesting parent. The S1 will + * install a Context Descriptor Table pointing at userspace memory translated + * by the nesting parent. + */ +struct iommu_hwpt_arm_smmuv3 { + __aligned_le64 ste[2]; +}; + /** * enum iommu_hwpt_data_type - IOMMU HWPT Data Type * @IOMMU_HWPT_DATA_NONE: no data * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + * @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table */ enum iommu_hwpt_data_type { - IOMMU_HWPT_DATA_NONE, - IOMMU_HWPT_DATA_VTD_S1, + IOMMU_HWPT_DATA_NONE = 0, + IOMMU_HWPT_DATA_VTD_S1 = 1, + IOMMU_HWPT_DATA_ARM_SMMUV3 = 2, }; /** @@ -405,12 +472,15 @@ enum iommu_hwpt_data_type { * @size: sizeof(struct iommu_hwpt_alloc) * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS or HWPT to connect this HWPT to + * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 * @data_type: One of enum iommu_hwpt_data_type * @data_len: Length of the type specific data * @data_uptr: User pointer to the type specific data + * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of + * IOMMU_HWPT_FAULT_ID_VALID is set. + * @__reserved2: Padding to 64-bit alignment. Must be 0. * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the @@ -421,11 +491,13 @@ enum iommu_hwpt_data_type { * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. * - * A user-managed nested HWPT will be created from a given parent HWPT via - * @pt_id, in which the parent HWPT must be allocated previously via the - * same ioctl from a given IOAS (@pt_id). In this case, the @data_type - * must be set to a pre-defined type corresponding to an I/O page table - * type supported by the underlying IOMMU hardware. + * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a + * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be + * allocated previously via the same ioctl from a given IOAS (@pt_id). In this + * case, the @data_type must be set to a pre-defined type corresponding to an + * I/O page table type supported by the underlying IOMMU hardware. The device + * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU + * instance. * * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr @@ -441,6 +513,8 @@ struct iommu_hwpt_alloc { __u32 data_type; __u32 data_len; __aligned_u64 data_uptr; + __u32 fault_id; + __u32 __reserved2; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) @@ -475,15 +549,86 @@ struct iommu_hw_info_vtd { __aligned_u64 ecap_reg; }; +/** + * struct iommu_hw_info_arm_smmuv3 - ARM SMMUv3 hardware information + * (IOMMU_HW_INFO_TYPE_ARM_SMMUV3) + * + * @flags: Must be set to 0 + * @__reserved: Must be 0 + * @idr: Implemented features for ARM SMMU Non-secure programming interface + * @iidr: Information about the implementation and implementer of ARM SMMU, + * and architecture version supported + * @aidr: ARM SMMU architecture version + * + * For the details of @idr, @iidr and @aidr, please refer to the chapters + * from 6.3.1 to 6.3.6 in the SMMUv3 Spec. + * + * This reports the raw HW capability, and not all bits are meaningful to be + * read by userspace. Only the following fields should be used: + * + * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF + * idr[1]: SIDSIZE, SSIDSIZE + * idr[3]: BBML, RIL + * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K + * + * - S1P should be assumed to be true if a NESTED HWPT can be created + * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be + * true. + * - ATS is a per-device property. If the VMM describes any devices as ATS + * capable in ACPI/DT it should set the corresponding idr. + * + * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is + * important that VMMs do not read bits outside the list to allow for + * compatibility with future kernels. Several features in the SMMUv3 + * architecture are not currently supported by the kernel for nesting: HTTU, + * BTM, MPAM and others. + */ +struct iommu_hw_info_arm_smmuv3 { + __u32 flags; + __u32 __reserved; + __u32 idr[6]; + __u32 iidr; + __u32 aidr; +}; + +/** + * struct iommu_hw_info_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Hardware + * Information (IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV) + * + * @flags: Must be 0 + * @version: Version number for the CMDQ-V HW for PARAM bits[03:00] + * @log2vcmdqs: Log2 of the total number of VCMDQs for PARAM bits[07:04] + * @log2vsids: Log2 of the total number of SID replacements for PARAM bits[15:12] + * @__reserved: Must be 0 + * + * VMM can use these fields directly in its emulated global PARAM register. Note + * that only one Virtual Interface (VINTF) should be exposed to a VM, i.e. PARAM + * bits[11:08] should be set to 0 for log2 of the total number of VINTFs. + */ +struct iommu_hw_info_tegra241_cmdqv { + __u32 flags; + __u8 version; + __u8 log2vcmdqs; + __u8 log2vsids; + __u8 __reserved; +}; + /** * enum iommu_hw_info_type - IOMMU Hardware Info Types - * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware + * @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware * info + * @IOMMU_HW_INFO_TYPE_DEFAULT: Input to request for a default type * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type + * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type + * @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) info type */ enum iommu_hw_info_type { - IOMMU_HW_INFO_TYPE_NONE, - IOMMU_HW_INFO_TYPE_INTEL_VTD, + IOMMU_HW_INFO_TYPE_NONE = 0, + IOMMU_HW_INFO_TYPE_DEFAULT = 0, + IOMMU_HW_INFO_TYPE_INTEL_VTD = 1, + IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2, + IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3, }; /** @@ -495,9 +640,26 @@ enum iommu_hw_info_type { * IOMMU_HWPT_GET_DIRTY_BITMAP * IOMMU_HWPT_SET_DIRTY_TRACKING * + * @IOMMU_HW_CAP_PCI_PASID_EXEC: Execute Permission Supported, user ignores it + * when the struct + * iommu_hw_info::out_max_pasid_log2 is zero. + * @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it + * when the struct + * iommu_hw_info::out_max_pasid_log2 is zero. */ enum iommufd_hw_capabilities { IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, + IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1, + IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2, +}; + +/** + * enum iommufd_hw_info_flags - Flags for iommu_hw_info + * @IOMMU_HW_INFO_FLAG_INPUT_TYPE: If set, @in_data_type carries an input type + * for user space to request for a specific info + */ +enum iommufd_hw_info_flags { + IOMMU_HW_INFO_FLAG_INPUT_TYPE = 1 << 0, }; /** @@ -509,10 +671,19 @@ enum iommufd_hw_capabilities { * data that kernel supports * @data_uptr: User pointer to a user-space buffer used by the kernel to fill * the iommu type specific hardware information data + * @in_data_type: This shares the same field with @out_data_type, making it be + * a bidirectional field. When IOMMU_HW_INFO_FLAG_INPUT_TYPE is + * set, an input type carried via this @in_data_type field will + * be valid, requesting for the info data to the given type. If + * IOMMU_HW_INFO_FLAG_INPUT_TYPE is unset, any input value will + * be seen as IOMMU_HW_INFO_TYPE_DEFAULT * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. * @out_capabilities: Output the generic iommu capability info type as defined * in the enum iommu_hw_capabilities. + * @out_max_pasid_log2: Output the width of PASIDs. 0 means no PASID support. + * PCI devices turn to out_capabilities to check if the + * specific capabilities is supported or not. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -535,8 +706,12 @@ struct iommu_hw_info { __u32 dev_id; __u32 data_len; __aligned_u64 data_uptr; - __u32 out_data_type; - __u32 __reserved; + union { + __u32 in_data_type; + __u32 out_data_type; + }; + __u8 out_max_pasid_log2; + __u8 __reserved[3]; __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) @@ -618,9 +793,11 @@ struct iommu_hwpt_get_dirty_bitmap { * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation * Data Type * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 + * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3 */ enum iommu_hwpt_invalidate_data_type { - IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, + IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0, + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1, }; /** @@ -659,10 +836,32 @@ struct iommu_hwpt_vtd_s1_invalidate { __u32 __reserved; }; +/** + * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation + * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3) + * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ. + * Must be little-endian. + * + * Supported command list only when passing in a vIOMMU via @hwpt_id: + * CMDQ_OP_TLBI_NSNH_ALL + * CMDQ_OP_TLBI_NH_VA + * CMDQ_OP_TLBI_NH_VAA + * CMDQ_OP_TLBI_NH_ALL + * CMDQ_OP_TLBI_NH_ASID + * CMDQ_OP_ATC_INV + * CMDQ_OP_CFGI_CD + * CMDQ_OP_CFGI_CD_ALL + * + * -EIO will be returned if the command is not supported. + */ +struct iommu_viommu_arm_smmuv3_invalidate { + __aligned_le64 cmd[2]; +}; + /** * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE) * @size: sizeof(struct iommu_hwpt_invalidate) - * @hwpt_id: ID of a nested HWPT for cache invalidation + * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation * @data_uptr: User pointer to an array of driver-specific cache invalidation * data. * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data @@ -673,8 +872,11 @@ struct iommu_hwpt_vtd_s1_invalidate { * Output the number of requests successfully handled by kernel. * @__reserved: Must be 0. * - * Invalidate the iommu cache for user-managed page table. Modifications on a - * user-managed page table should be followed by this operation to sync cache. + * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications + * on a user-managed page table should be followed by this operation, if a HWPT + * is passed in via @hwpt_id. Other caches, such as device cache or descriptor + * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field. + * * Each ioctl can support one or more cache invalidation requests in the array * that has a total size of @entry_len * @entry_num. * @@ -692,4 +894,394 @@ struct iommu_hwpt_invalidate { __u32 __reserved; }; #define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE) + +/** + * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is + * valid. + * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group. + */ +enum iommu_hwpt_pgfault_flags { + IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0), + IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1), +}; + +/** + * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_PERM_READ: request for read permission + * @IOMMU_PGFAULT_PERM_WRITE: request for write permission + * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the + * Execute Requested bit set in PASID TLP Prefix. + * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the + * Privileged Mode Requested bit set in PASID TLP + * Prefix. + */ +enum iommu_hwpt_pgfault_perm { + IOMMU_PGFAULT_PERM_READ = (1 << 0), + IOMMU_PGFAULT_PERM_WRITE = (1 << 1), + IOMMU_PGFAULT_PERM_EXEC = (1 << 2), + IOMMU_PGFAULT_PERM_PRIV = (1 << 3), +}; + +/** + * struct iommu_hwpt_pgfault - iommu page fault data + * @flags: Combination of enum iommu_hwpt_pgfault_flags + * @dev_id: id of the originated device + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @perm: Combination of enum iommu_hwpt_pgfault_perm + * @__reserved: Must be 0. + * @addr: Fault address + * @length: a hint of how much data the requestor is expecting to fetch. For + * example, if the PRI initiator knows it is going to do a 10MB + * transfer, it could fill in 10MB and the OS could pre-fault in + * 10MB of IOVA. It's default to 0 if there's no such hint. + * @cookie: kernel-managed cookie identifying a group of fault messages. The + * cookie number encoded in the last page fault of the group should + * be echoed back in the response message. + */ +struct iommu_hwpt_pgfault { + __u32 flags; + __u32 dev_id; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u32 __reserved; + __aligned_u64 addr; + __u32 length; + __u32 cookie; +}; + +/** + * enum iommufd_page_response_code - Return status of fault handlers + * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is the + * "Success" defined in PCI 10.4.2.1. + * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the + * access. This is the "Invalid Request" in PCI + * 10.4.2.1. + */ +enum iommufd_page_response_code { + IOMMUFD_PAGE_RESP_SUCCESS = 0, + IOMMUFD_PAGE_RESP_INVALID = 1, +}; + +/** + * struct iommu_hwpt_page_response - IOMMU page fault response + * @cookie: The kernel-managed cookie reported in the fault message. + * @code: One of response code in enum iommufd_page_response_code. + */ +struct iommu_hwpt_page_response { + __u32 cookie; + __u32 code; +}; + +/** + * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC) + * @size: sizeof(struct iommu_fault_alloc) + * @flags: Must be 0 + * @out_fault_id: The ID of the new FAULT + * @out_fault_fd: The fd of the new FAULT + * + * Explicitly allocate a fault handling object. + */ +struct iommu_fault_alloc { + __u32 size; + __u32 flags; + __u32 out_fault_id; + __u32 out_fault_fd; +}; +#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC) + +/** + * enum iommu_viommu_type - Virtual IOMMU Type + * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use + * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type + * @IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) enabled ARM SMMUv3 type + */ +enum iommu_viommu_type { + IOMMU_VIOMMU_TYPE_DEFAULT = 0, + IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1, + IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2, +}; + +/** + * struct iommu_viommu_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Virtual Interface + * (IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV) + * @out_vintf_mmap_offset: mmap offset argument for VINTF's page0 + * @out_vintf_mmap_length: mmap length argument for VINTF's page0 + * + * Both @out_vintf_mmap_offset and @out_vintf_mmap_length are reported by kernel + * for user space to mmap the VINTF page0 from the host physical address space + * to the guest physical address space so that a guest kernel can directly R/W + * access to the VINTF page0 in order to control its virtual command queues. + */ +struct iommu_viommu_tegra241_cmdqv { + __aligned_u64 out_vintf_mmap_offset; + __aligned_u64 out_vintf_mmap_length; +}; + +/** + * struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC) + * @size: sizeof(struct iommu_viommu_alloc) + * @flags: Must be 0 + * @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type + * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU + * @hwpt_id: ID of a nesting parent HWPT to associate to + * @out_viommu_id: Output virtual IOMMU ID for the allocated object + * @data_len: Length of the type specific data + * @__reserved: Must be 0 + * @data_uptr: User pointer to a driver-specific virtual IOMMU data + * + * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's + * virtualization support that is a security-isolated slice of the real IOMMU HW + * that is unique to a specific VM. Operations global to the IOMMU are connected + * to the vIOMMU, such as: + * - Security namespace for guest owned ID, e.g. guest-controlled cache tags + * - Non-device-affiliated event reporting, e.g. invalidation queue errors + * - Access to a sharable nesting parent pagetable across physical IOMMUs + * - Virtualization of various platforms IDs, e.g. RIDs and others + * - Delivery of paravirtualized invalidation + * - Direct assigned invalidation queues + * - Direct assigned interrupts + */ +struct iommu_viommu_alloc { + __u32 size; + __u32 flags; + __u32 type; + __u32 dev_id; + __u32 hwpt_id; + __u32 out_viommu_id; + __u32 data_len; + __u32 __reserved; + __aligned_u64 data_uptr; +}; +#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC) + +/** + * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC) + * @size: sizeof(struct iommu_vdevice_alloc) + * @viommu_id: vIOMMU ID to associate with the virtual device + * @dev_id: The physical device to allocate a virtual instance on the vIOMMU + * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY + * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID + * of AMD IOMMU, and vRID of Intel VT-d + * + * Allocate a virtual device instance (for a physical device) against a vIOMMU. + * This instance holds the device's information (related to its vIOMMU) in a VM. + */ +struct iommu_vdevice_alloc { + __u32 size; + __u32 viommu_id; + __u32 dev_id; + __u32 out_vdevice_id; + __aligned_u64 virt_id; +}; +#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC) + +/** + * struct iommu_ioas_change_process - ioctl(VFIO_IOAS_CHANGE_PROCESS) + * @size: sizeof(struct iommu_ioas_change_process) + * @__reserved: Must be 0 + * + * This transfers pinned memory counts for every memory map in every IOAS + * in the context to the current process. This only supports maps created + * with IOMMU_IOAS_MAP_FILE, and returns EINVAL if other maps are present. + * If the ioctl returns a failure status, then nothing is changed. + * + * This API is useful for transferring operation of a device from one process + * to another, such as during userland live update. + */ +struct iommu_ioas_change_process { + __u32 size; + __u32 __reserved; +}; + +#define IOMMU_IOAS_CHANGE_PROCESS \ + _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS) + +/** + * enum iommu_veventq_flag - flag for struct iommufd_vevent_header + * @IOMMU_VEVENTQ_FLAG_LOST_EVENTS: vEVENTQ has lost vEVENTs + */ +enum iommu_veventq_flag { + IOMMU_VEVENTQ_FLAG_LOST_EVENTS = (1U << 0), +}; + +/** + * struct iommufd_vevent_header - Virtual Event Header for a vEVENTQ Status + * @flags: Combination of enum iommu_veventq_flag + * @sequence: The sequence index of a vEVENT in the vEVENTQ, with a range of + * [0, INT_MAX] where the following index of INT_MAX is 0 + * + * Each iommufd_vevent_header reports a sequence index of the following vEVENT: + * + * +----------------------+-------+----------------------+-------+---+-------+ + * | header0 {sequence=0} | data0 | header1 {sequence=1} | data1 |...| dataN | + * +----------------------+-------+----------------------+-------+---+-------+ + * + * And this sequence index is expected to be monotonic to the sequence index of + * the previous vEVENT. If two adjacent sequence indexes has a delta larger than + * 1, it means that delta - 1 number of vEVENTs has lost, e.g. two lost vEVENTs: + * + * +-----+----------------------+-------+----------------------+-------+-----+ + * | ... | header3 {sequence=3} | data3 | header6 {sequence=6} | data6 | ... | + * +-----+----------------------+-------+----------------------+-------+-----+ + * + * If a vEVENT lost at the tail of the vEVENTQ and there is no following vEVENT + * providing the next sequence index, an IOMMU_VEVENTQ_FLAG_LOST_EVENTS header + * would be added to the tail, and no data would follow this header: + * + * +--+----------------------+-------+-----------------------------------------+ + * |..| header3 {sequence=3} | data3 | header4 {flags=LOST_EVENTS, sequence=4} | + * +--+----------------------+-------+-----------------------------------------+ + */ +struct iommufd_vevent_header { + __u32 flags; + __u32 sequence; +}; + +/** + * enum iommu_veventq_type - Virtual Event Queue Type + * @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use + * @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue + * @IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV Extension IRQ + */ +enum iommu_veventq_type { + IOMMU_VEVENTQ_TYPE_DEFAULT = 0, + IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1, + IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV = 2, +}; + +/** + * struct iommu_vevent_arm_smmuv3 - ARM SMMUv3 Virtual Event + * (IOMMU_VEVENTQ_TYPE_ARM_SMMUV3) + * @evt: 256-bit ARM SMMUv3 Event record, little-endian. + * Reported event records: (Refer to "7.3 Event records" in SMMUv3 HW Spec) + * - 0x04 C_BAD_STE + * - 0x06 F_STREAM_DISABLED + * - 0x08 C_BAD_SUBSTREAMID + * - 0x0a C_BAD_CD + * - 0x10 F_TRANSLATION + * - 0x11 F_ADDR_SIZE + * - 0x12 F_ACCESS + * - 0x13 F_PERMISSION + * + * StreamID field reports a virtual device ID. To receive a virtual event for a + * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC. + */ +struct iommu_vevent_arm_smmuv3 { + __aligned_le64 evt[4]; +}; + +/** + * struct iommu_vevent_tegra241_cmdqv - Tegra241 CMDQV IRQ + * (IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV) + * @lvcmdq_err_map: 128-bit logical vcmdq error map, little-endian. + * (Refer to register LVCMDQ_ERR_MAPs per VINTF ) + * + * The 128-bit register value from HW exclusively reflect the error bits for a + * Virtual Interface represented by a vIOMMU object. Read and report directly. + */ +struct iommu_vevent_tegra241_cmdqv { + __aligned_le64 lvcmdq_err_map[2]; +}; + +/** + * struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC) + * @size: sizeof(struct iommu_veventq_alloc) + * @flags: Must be 0 + * @viommu_id: virtual IOMMU ID to associate the vEVENTQ with + * @type: Type of the vEVENTQ. Must be defined in enum iommu_veventq_type + * @veventq_depth: Maximum number of events in the vEVENTQ + * @out_veventq_id: The ID of the new vEVENTQ + * @out_veventq_fd: The fd of the new vEVENTQ. User space must close the + * successfully returned fd after using it + * @__reserved: Must be 0 + * + * Explicitly allocate a virtual event queue interface for a vIOMMU. A vIOMMU + * can have multiple FDs for different types, but is confined to one per @type. + * User space should open the @out_veventq_fd to read vEVENTs out of a vEVENTQ, + * if there are vEVENTs available. A vEVENTQ will lose events due to overflow, + * if the number of the vEVENTs hits @veventq_depth. + * + * Each vEVENT in a vEVENTQ encloses a struct iommufd_vevent_header followed by + * a type-specific data structure, in a normal case: + * + * +-+---------+-------+---------+-------+-----+---------+-------+-+ + * | | header0 | data0 | header1 | data1 | ... | headerN | dataN | | + * +-+---------+-------+---------+-------+-----+---------+-------+-+ + * + * unless a tailing IOMMU_VEVENTQ_FLAG_LOST_EVENTS header is logged (refer to + * struct iommufd_vevent_header). + */ +struct iommu_veventq_alloc { + __u32 size; + __u32 flags; + __u32 viommu_id; + __u32 type; + __u32 veventq_depth; + __u32 out_veventq_id; + __u32 out_veventq_fd; + __u32 __reserved; +}; +#define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC) + +/** + * enum iommu_hw_queue_type - HW Queue Type + * @IOMMU_HW_QUEUE_TYPE_DEFAULT: Reserved for future use + * @IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) Virtual Command Queue (VCMDQ) + */ +enum iommu_hw_queue_type { + IOMMU_HW_QUEUE_TYPE_DEFAULT = 0, + /* + * TEGRA241_CMDQV requirements (otherwise, allocation will fail) + * - alloc starts from the lowest @index=0 in ascending order + * - destroy starts from the last allocated @index in descending order + * - @base_addr must be aligned to @length in bytes and mapped in IOAS + * - @length must be a power of 2, with a minimum 32 bytes and a maximum + * 2 ^ idr[1].CMDQS * 16 bytes (use GET_HW_INFO call to read idr[1] + * from struct iommu_hw_info_arm_smmuv3) + * - suggest to back the queue memory with contiguous physical pages or + * a single huge page with alignment of the queue size, and limit the + * emulated vSMMU's IDR1.CMDQS to log2(huge page size / 16 bytes) + */ + IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV = 1, +}; + +/** + * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC) + * @size: sizeof(struct iommu_hw_queue_alloc) + * @flags: Must be 0 + * @viommu_id: Virtual IOMMU ID to associate the HW queue with + * @type: One of enum iommu_hw_queue_type + * @index: The logical index to the HW queue per virtual IOMMU for a multi-queue + * model + * @out_hw_queue_id: The ID of the new HW queue + * @nesting_parent_iova: Base address of the queue memory in the guest physical + * address space + * @length: Length of the queue memory + * + * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, which + * allows HW to access a guest queue memory described using @nesting_parent_iova + * and @length. + * + * A vIOMMU can allocate multiple queues, but it must use a different @index per + * type to separate each allocation, e.g:: + * + * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ... + */ +struct iommu_hw_queue_alloc { + __u32 size; + __u32 flags; + __u32 viommu_id; + __u32 type; + __u32 index; + __u32 out_hw_queue_id; + __aligned_u64 nesting_parent_iova; + __aligned_u64 length; +}; +#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC) #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index c93876ca0ba..32c5885a3c2 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -178,6 +178,7 @@ struct kvm_xen_exit { #define KVM_EXIT_NOTIFY 37 #define KVM_EXIT_LOONGARCH_IOCSR 38 #define KVM_EXIT_MEMORY_FAULT 39 +#define KVM_EXIT_TDX 40 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -192,11 +193,20 @@ struct kvm_xen_exit { /* Flags that describe what fields in emulation_failure hold valid data. */ #define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0) +/* + * struct kvm_run can be modified by userspace at any time, so KVM must be + * careful to avoid TOCTOU bugs. In order to protect KVM, HINT_UNSAFE_IN_KVM() + * renames fields in struct kvm_run from to __unsafe when + * compiled into the kernel, ensuring that any use within KVM is obvious and + * gets extra scrutiny. + */ +#define HINT_UNSAFE_IN_KVM(_symbol) _symbol + /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ __u8 request_interrupt_window; - __u8 immediate_exit; + __u8 HINT_UNSAFE_IN_KVM(immediate_exit); __u8 padding1[6]; /* out */ @@ -360,6 +370,7 @@ struct kvm_run { #define KVM_SYSTEM_EVENT_WAKEUP 4 #define KVM_SYSTEM_EVENT_SUSPEND 5 #define KVM_SYSTEM_EVENT_SEV_TERM 6 +#define KVM_SYSTEM_EVENT_TDX_FATAL 7 __u32 type; __u32 ndata; union { @@ -429,6 +440,31 @@ struct kvm_run { __u64 gpa; __u64 size; } memory_fault; + /* KVM_EXIT_TDX */ + struct { + __u64 flags; + __u64 nr; + union { + struct { + __u64 ret; + __u64 data[5]; + } unknown; + struct { + __u64 ret; + __u64 gpa; + __u64 size; + } get_quote; + struct { + __u64 ret; + __u64 leaf; + __u64 r11, r12, r13, r14; + } get_tdvmcall_info; + struct { + __u64 ret; + __u64 vector; + } setup_event_notify; + }; + } tdx; /* Fix the size of the union. */ char padding[256]; }; @@ -600,10 +636,6 @@ struct kvm_ioeventfd { #define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) #define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) -#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HLT | \ - KVM_X86_DISABLE_EXITS_PAUSE | \ - KVM_X86_DISABLE_EXITS_CSTATE) /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { @@ -913,6 +945,13 @@ struct kvm_enable_cap { #define KVM_CAP_MEMORY_ATTRIBUTES 233 #define KVM_CAP_GUEST_MEMFD 234 #define KVM_CAP_VM_TYPES 235 +#define KVM_CAP_PRE_FAULT_MEMORY 236 +#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237 +#define KVM_CAP_X86_GUEST_MODE 238 +#define KVM_CAP_ARM_WRITABLE_IMP_ID_REGS 239 +#define KVM_CAP_ARM_EL2 240 +#define KVM_CAP_ARM_EL2_E2H0 241 +#define KVM_CAP_RISCV_MP_STATE_RESET 242 struct kvm_irq_routing_irqchip { __u32 irqchip; @@ -1050,6 +1089,10 @@ struct kvm_dirty_tlb { #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + #define KVM_REG_SIZE_U8 0x0000000000000000ULL #define KVM_REG_SIZE_U16 0x0010000000000000ULL #define KVM_REG_SIZE_U32 0x0020000000000000ULL @@ -1138,7 +1181,15 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_LOONGARCH_IPI, +#define KVM_DEV_TYPE_LOONGARCH_IPI KVM_DEV_TYPE_LOONGARCH_IPI + KVM_DEV_TYPE_LOONGARCH_EIOINTC, +#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC + KVM_DEV_TYPE_LOONGARCH_PCHPIC, +#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC + KVM_DEV_TYPE_MAX, + }; struct kvm_vfio_spapr_tce { @@ -1544,4 +1595,13 @@ struct kvm_create_guest_memfd { __u64 reserved[6]; }; +#define KVM_PRE_FAULT_MEMORY _IOWR(KVMIO, 0xd5, struct kvm_pre_fault_memory) + +struct kvm_pre_fault_memory { + __u64 gpa; + __u64 size; + __u64 flags; + __u64 padding[5]; +}; + #endif /* __LINUX_KVM_H */ diff --git a/linux-headers/linux/mman.h b/linux-headers/linux/mman.h index 4e8cb607805..2b83059586e 100644 --- a/linux-headers/linux/mman.h +++ b/linux-headers/linux/mman.h @@ -17,6 +17,7 @@ #define MAP_SHARED 0x01 /* Share changes */ #define MAP_PRIVATE 0x02 /* Changes are private */ #define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ +#define MAP_DROPPABLE 0x08 /* Zero memory under memory pressure. */ /* * Huge page size encoding when MAP_HUGETLB is specified, and a huge page diff --git a/linux-headers/linux/psci.h b/linux-headers/linux/psci.h index 74f3cb5007c..a982afd4987 100644 --- a/linux-headers/linux/psci.h +++ b/linux-headers/linux/psci.h @@ -59,6 +59,7 @@ #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) #define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20) +#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21) #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12) #define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13) @@ -68,6 +69,7 @@ #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20) +#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21) /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff @@ -100,6 +102,9 @@ #define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 #define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U +/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */ +#define PSCI_1_3_OFF_TYPE_HIBERNATE_OFF BIT(0) + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h index c3046c6bfff..113c4ceb784 100644 --- a/linux-headers/linux/psp-sev.h +++ b/linux-headers/linux/psp-sev.h @@ -31,6 +31,7 @@ enum { SNP_PLATFORM_STATUS, SNP_COMMIT, SNP_SET_CONFIG, + SNP_VLEK_LOAD, SEV_MAX, }; @@ -50,6 +51,7 @@ typedef enum { SEV_RET_INVALID_PLATFORM_STATE, SEV_RET_INVALID_GUEST_STATE, SEV_RET_INAVLID_CONFIG, + SEV_RET_INVALID_CONFIG = SEV_RET_INAVLID_CONFIG, SEV_RET_INVALID_LEN, SEV_RET_ALREADY_OWNED, SEV_RET_INVALID_CERTIFICATE, @@ -71,13 +73,20 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, - SEV_RET_INVALID_KEY = 0x27, - SEV_RET_INVALID_PAGE_SIZE, - SEV_RET_INVALID_PAGE_STATE, - SEV_RET_INVALID_MDATA_ENTRY, - SEV_RET_INVALID_PAGE_OWNER, - SEV_RET_INVALID_PAGE_AEAD_OFLOW, - SEV_RET_RMP_INIT_REQUIRED, + SEV_RET_INVALID_PAGE_SIZE = 0x0019, + SEV_RET_INVALID_PAGE_STATE = 0x001A, + SEV_RET_INVALID_MDATA_ENTRY = 0x001B, + SEV_RET_INVALID_PAGE_OWNER = 0x001C, + SEV_RET_AEAD_OFLOW = 0x001D, + SEV_RET_EXIT_RING_BUFFER = 0x001F, + SEV_RET_RMP_INIT_REQUIRED = 0x0020, + SEV_RET_BAD_SVN = 0x0021, + SEV_RET_BAD_VERSION = 0x0022, + SEV_RET_SHUTDOWN_REQUIRED = 0x0023, + SEV_RET_UPDATE_FAILED = 0x0024, + SEV_RET_RESTORE_REQUIRED = 0x0025, + SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026, + SEV_RET_INVALID_KEY = 0x0027, SEV_RET_MAX, } sev_ret_code; @@ -214,6 +223,32 @@ struct sev_user_data_snp_config { __u8 rsvd1[52]; } __attribute__((packed)); +/** + * struct sev_data_snp_vlek_load - SNP_VLEK_LOAD structure + * + * @len: length of the command buffer read by the PSP + * @vlek_wrapped_version: version of wrapped VLEK hashstick (Must be 0h) + * @rsvd: reserved + * @vlek_wrapped_address: address of a wrapped VLEK hashstick + * (struct sev_user_data_snp_wrapped_vlek_hashstick) + */ +struct sev_user_data_snp_vlek_load { + __u32 len; /* In */ + __u8 vlek_wrapped_version; /* In */ + __u8 rsvd[3]; /* In */ + __u64 vlek_wrapped_address; /* In */ +} __attribute__((packed)); + +/** + * struct sev_user_data_snp_vlek_wrapped_vlek_hashstick - Wrapped VLEK data + * + * @data: Opaque data provided by AMD KDS (as described in SEV-SNP Firmware ABI + * 1.54, SNP_VLEK_LOAD) + */ +struct sev_user_data_snp_wrapped_vlek_hashstick { + __u8 data[432]; /* In */ +} __attribute__((packed)); + /** * struct sev_issue_cmd - SEV ioctl parameters * diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h index 96aa341942b..e1fcfcf3b33 100644 --- a/linux-headers/linux/stddef.h +++ b/linux-headers/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline __inline__ #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,13 +27,13 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ } ATTRS #ifdef __cplusplus @@ -63,4 +70,6 @@ #define __counted_by_be(m) #endif +#define __kernel_nonstring + #endif /* _LINUX_STDDEF_H */ diff --git a/linux-headers/linux/vduse.h b/linux-headers/linux/vduse.h index 6d2ca064b5d..f46269af349 100644 --- a/linux-headers/linux/vduse.h +++ b/linux-headers/linux/vduse.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ #ifndef _VDUSE_H_ #define _VDUSE_H_ diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index b4be37b2255..79bf8c0cc5e 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -35,7 +35,7 @@ #define VFIO_EEH 5 /* Two-stage IOMMU */ -#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ +#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ #define VFIO_SPAPR_TCE_v2_IOMMU 7 @@ -671,6 +671,7 @@ enum { */ enum { VFIO_AP_REQ_IRQ_INDEX, + VFIO_AP_CFG_CHG_IRQ_INDEX, VFIO_AP_NUM_IRQS }; @@ -931,29 +932,34 @@ struct vfio_device_bind_iommufd { * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19, * struct vfio_device_attach_iommufd_pt) * @argsz: User filled size of this data. - * @flags: Must be 0. + * @flags: Flags for attach. * @pt_id: Input the target id which can represent an ioas or a hwpt * allocated via iommufd subsystem. * Output the input ioas id or the attached hwpt id which could * be the specified hwpt itself or a hwpt automatically created * for the specified ioas by kernel during the attachment. + * @pasid: The pasid to be attached, only meaningful when + * VFIO_DEVICE_ATTACH_PASID is set in @flags * * Associate the device with an address space within the bound iommufd. * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only * allowed on cdev fds. * - * If a vfio device is currently attached to a valid hw_pagetable, without doing - * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl - * passing in another hw_pagetable (hwpt) id is allowed. This action, also known - * as a hw_pagetable replacement, will replace the device's currently attached - * hw_pagetable with a new hw_pagetable corresponding to the given pt_id. + * If a vfio device or a pasid of this device is currently attached to a valid + * hw_pagetable (hwpt), without doing a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second + * VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl passing in another hwpt id is allowed. + * This action, also known as a hw_pagetable replacement, will replace the + * currently attached hwpt of the device or the pasid of this device with a new + * hwpt corresponding to the given pt_id. * * Return: 0 on success, -errno on failure. */ struct vfio_device_attach_iommufd_pt { __u32 argsz; __u32 flags; +#define VFIO_DEVICE_ATTACH_PASID (1 << 0) __u32 pt_id; + __u32 pasid; }; #define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19) @@ -962,17 +968,21 @@ struct vfio_device_attach_iommufd_pt { * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20, * struct vfio_device_detach_iommufd_pt) * @argsz: User filled size of this data. - * @flags: Must be 0. + * @flags: Flags for detach. + * @pasid: The pasid to be detached, only meaningful when + * VFIO_DEVICE_DETACH_PASID is set in @flags * - * Remove the association of the device and its current associated address - * space. After it, the device should be in a blocking DMA state. This is only - * allowed on cdev fds. + * Remove the association of the device or a pasid of the device and its current + * associated address space. After it, the device or the pasid should be in a + * blocking DMA state. This is only allowed on cdev fds. * * Return: 0 on success, -errno on failure. */ struct vfio_device_detach_iommufd_pt { __u32 argsz; __u32 flags; +#define VFIO_DEVICE_DETACH_PASID (1 << 0) + __u32 pasid; }; #define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20) diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index b95dd84eef2..d4b3e2ae131 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -28,10 +28,10 @@ /* Set current process as the (exclusive) owner of this file descriptor. This * must be called before any other vhost command. Further calls to - * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */ + * VHOST_SET_OWNER fail until VHOST_RESET_OWNER is called. */ #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01) /* Give up ownership, and reset the device to default values. - * Allows subsequent call to VHOST_OWNER_SET to succeed. */ + * Allows subsequent call to VHOST_SET_OWNER to succeed. */ #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02) /* Set up/modify memory layout */ diff --git a/linux-user/aarch64/Makefile.vdso b/linux-user/aarch64/Makefile.vdso index 599958116b6..c33a679c0f2 100644 --- a/linux-user/aarch64/Makefile.vdso +++ b/linux-user/aarch64/Makefile.vdso @@ -5,8 +5,9 @@ VPATH += $(SUBDIR) all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so -LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ - -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld +LDFLAGS = -nostdlib -shared -Wl,-h,linux-vdso.so.1 \ + -Wl,--build-id=sha1 -Wl,--hash-style=both \ + -Wl,-z,max-page-size=4096 -Wl,-T,$(SUBDIR)/vdso.ld $(SUBDIR)/vdso-be.so: vdso.S vdso.ld $(CC) -o $@ $(LDFLAGS) -mbig-endian $< diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index 71cdc8be50c..fea43cefa6b 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -20,61 +20,13 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "qemu/guest-random.h" #include "semihosting/common-semi.h" #include "target/arm/syndrome.h" #include "target/arm/cpu-features.h" -#define get_user_code_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_code_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define get_user_data_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_data_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define put_user_data_u32(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap32(__x); \ - } \ - put_user_u32(__x, (gaddr)); \ - }) - -#define put_user_data_u16(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap16(__x); \ - } \ - put_user_u16(__x, (gaddr)); \ - }) - /* AArch64 main loop */ void cpu_loop(CPUARMState *env) { @@ -185,7 +137,7 @@ void cpu_loop(CPUARMState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); diff --git a/linux-user/aarch64/meson.build b/linux-user/aarch64/meson.build index f75bb3cd752..f25a67a21ef 100644 --- a/linux-user/aarch64/meson.build +++ b/linux-user/aarch64/meson.build @@ -11,3 +11,9 @@ vdso_le_inc = gen_vdso.process('vdso-le.so', linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [vdso_be_inc, vdso_le_inc]) linux_user_ss.add(when: 'TARGET_AARCH64', if_true: [files('mte_user_helper.c')]) + +syscall_nr_generators += { + 'aarch64': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/aarch64/mte_user_helper.h b/linux-user/aarch64/mte_user_helper.h index 8685e5175a2..0c53abda222 100644 --- a/linux-user/aarch64/mte_user_helper.h +++ b/linux-user/aarch64/mte_user_helper.h @@ -9,6 +9,8 @@ #ifndef AARCH64_MTE_USER_HELPER_H #define AARCH64_MTE USER_HELPER_H +#include "user/abitypes.h" + #ifndef PR_MTE_TCF_SHIFT # define PR_MTE_TCF_SHIFT 1 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index bc7a13800da..668353bbda4 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -121,6 +121,30 @@ struct target_za_context { #define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) +#define TARGET_TPIDR2_MAGIC 0x54504902 + +struct target_tpidr2_context { + struct target_aarch64_ctx head; + uint64_t tpidr2; +}; + +#define TARGET_ZT_MAGIC 0x5a544e01 + +struct target_zt_context { + struct target_aarch64_ctx head; + uint16_t nregs; + uint16_t reserved[3]; + /* ZTn register data immediately follows */ +}; + +#define TARGET_ZT_SIG_REG_BYTES (512 / 8) +#define TARGET_ZT_SIG_REGS_SIZE(n) (TARGET_ZT_SIG_REG_BYTES * (n)) +#define TARGET_ZT_SIG_CONTEXT_SIZE(n) (sizeof(struct target_zt_context) + \ + TARGET_ZT_SIG_REGS_SIZE(n)) +#define TARGET_ZT_SIG_REGS_OFFSET sizeof(struct target_zt_context) +QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \ + sizeof_field(CPUARMState, za_state.zt0)); + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -248,11 +272,41 @@ static void target_setup_za_record(struct target_za_context *za, for (i = 0; i < vl; ++i) { uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); for (j = 0; j < vq * 2; ++j) { - __put_user_e(env->zarray[i].d[j], z + j, le); + __put_user_e(env->za_state.za[i].d[j], z + j, le); } } } +static void target_setup_tpidr2_record(struct target_tpidr2_context *tpidr2, + CPUARMState *env) +{ + __put_user(TARGET_TPIDR2_MAGIC, &tpidr2->head.magic); + __put_user(sizeof(struct target_tpidr2_context), &tpidr2->head.size); + __put_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2); +} + +static void target_setup_zt_record(struct target_zt_context *zt, + CPUARMState *env, int size) +{ + uint64_t *z; + + memset(zt, 0, sizeof(*zt)); + __put_user(TARGET_ZT_MAGIC, &zt->head.magic); + __put_user(size, &zt->head.size); + /* + * The record format allows for multiple ZT regs, but + * currently there is only one, ZT0. + */ + __put_user(1, &zt->nregs); + assert(size == TARGET_ZT_SIG_CONTEXT_SIZE(1)); + + /* ZT0 is the same byte-stream format as SVE regs and ZA */ + z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET; + for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) { + __put_user_e(env->za_state.zt0[i], z + i, le); + } +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -397,12 +451,42 @@ static bool target_restore_za_record(CPUARMState *env, for (i = 0; i < vl; ++i) { uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); for (j = 0; j < vq * 2; ++j) { - __get_user_e(env->zarray[i].d[j], z + j, le); + __get_user_e(env->za_state.za[i].d[j], z + j, le); } } return true; } +static void target_restore_tpidr2_record(CPUARMState *env, + struct target_tpidr2_context *tpidr2) +{ + __get_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2); +} + +static bool target_restore_zt_record(CPUARMState *env, + struct target_zt_context *zt, int size, + int svcr) +{ + uint16_t nregs; + uint64_t *z; + + if (!(FIELD_EX64(svcr, SVCR, ZA))) { + return false; + } + + __get_user(nregs, &zt->nregs); + + if (nregs != 1) { + return false; + } + + z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET; + for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) { + __get_user_e(env->za_state.zt0[i], z + i, le); + } + return true; +} + static int target_restore_sigframe(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -410,10 +494,13 @@ static int target_restore_sigframe(CPUARMState *env, struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; struct target_za_context *za = NULL; + struct target_tpidr2_context *tpidr2 = NULL; + struct target_zt_context *zt = NULL; uint64_t extra_datap = 0; bool used_extra = false; int sve_size = 0; int za_size = 0; + int zt_size = 0; int svcr = 0; target_restore_general_frame(env, sf); @@ -460,6 +547,23 @@ static int target_restore_sigframe(CPUARMState *env, za_size = size; break; + case TARGET_TPIDR2_MAGIC: + if (tpidr2 || size != sizeof(struct target_tpidr2_context) || + !cpu_isar_feature(aa64_sme, env_archcpu(env))) { + goto err; + } + tpidr2 = (struct target_tpidr2_context *)ctx; + break; + + case TARGET_ZT_MAGIC: + if (zt || size != TARGET_ZT_SIG_CONTEXT_SIZE(1) || + !cpu_isar_feature(aa64_sme2, env_archcpu(env))) { + goto err; + } + zt = (struct target_zt_context *)ctx; + zt_size = size; + break; + case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { goto err; @@ -497,6 +601,16 @@ static int target_restore_sigframe(CPUARMState *env, if (za && !target_restore_za_record(env, za, za_size, &svcr)) { goto err; } + if (tpidr2) { + target_restore_tpidr2_record(env, tpidr2); + } + /* + * NB that we must restore ZT after ZA so the check that there's + * no ZT record if SVCR.ZA is 0 gets the right value of SVCR. + */ + if (zt && !target_restore_zt_record(env, zt, zt_size, svcr)) { + goto err; + } if (env->svcr != svcr) { env->svcr = svcr; arm_rebuild_hflags(env); @@ -568,8 +682,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; - int sve_size = 0, za_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0; + int zt_ofs = 0; + int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -585,6 +700,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, sve_ofs = alloc_sigframe_space(sve_size, &layout); } if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + tpidr2_size = sizeof(struct target_tpidr2_context); + tpidr2_ofs = alloc_sigframe_space(tpidr2_size, &layout); /* ZA state needs saving only if it is enabled. */ if (FIELD_EX64(env->svcr, SVCR, ZA)) { za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); @@ -593,6 +710,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, } za_ofs = alloc_sigframe_space(za_size, &layout); } + if (cpu_isar_feature(aa64_sme2, env_archcpu(env)) && + FIELD_EX64(env->svcr, SVCR, ZA)) { + /* If SME ZA storage is enabled, we must also save SME2 ZT0 */ + zt_size = TARGET_ZT_SIG_CONTEXT_SIZE(1); + zt_ofs = alloc_sigframe_space(zt_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -644,6 +767,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, if (za_ofs) { target_setup_za_record((void *)frame + za_ofs, env, za_size); } + if (tpidr2_ofs) { + target_setup_tpidr2_record((void *)frame + tpidr2_ofs, env); + } + if (zt_ofs) { + target_setup_zt_record((void *)frame + zt_ofs, env, zt_size); + } /* Set up the stack frame for unwinding. */ fr = (void *)frame + fr_ofs; @@ -666,8 +795,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } - /* Invoke the signal handler with both SM and ZA disabled. */ + /* + * Invoke the signal handler with a clean SME state: both SM and ZA + * disabled and TPIDR2_EL0 cleared. + */ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); + env->cp15.tpidr2_el0 = 0; if (info) { frame->info = *info; diff --git a/linux-user/aarch64/syscall_64.tbl b/linux-user/aarch64/syscall_64.tbl new file mode 100644 index 00000000000..845e24eb372 --- /dev/null +++ b/linux-user/aarch64/syscall_64.tbl @@ -0,0 +1,405 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# This file contains the system call numbers for all of the +# more recently added architectures. +# +# As a basic principle, no duplication of functionality +# should be added, e.g. we don't use lseek when llseek +# is present. New architectures should use this file +# and implement the less feature-full calls in user space. +# +0 common io_setup sys_io_setup compat_sys_io_setup +1 common io_destroy sys_io_destroy +2 common io_submit sys_io_submit compat_sys_io_submit +3 common io_cancel sys_io_cancel +4 time32 io_getevents sys_io_getevents_time32 +4 64 io_getevents sys_io_getevents +5 common setxattr sys_setxattr +6 common lsetxattr sys_lsetxattr +7 common fsetxattr sys_fsetxattr +8 common getxattr sys_getxattr +9 common lgetxattr sys_lgetxattr +10 common fgetxattr sys_fgetxattr +11 common listxattr sys_listxattr +12 common llistxattr sys_llistxattr +13 common flistxattr sys_flistxattr +14 common removexattr sys_removexattr +15 common lremovexattr sys_lremovexattr +16 common fremovexattr sys_fremovexattr +17 common getcwd sys_getcwd +18 common lookup_dcookie sys_ni_syscall +19 common eventfd2 sys_eventfd2 +20 common epoll_create1 sys_epoll_create1 +21 common epoll_ctl sys_epoll_ctl +22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +23 common dup sys_dup +24 common dup3 sys_dup3 +25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +25 64 fcntl sys_fcntl +26 common inotify_init1 sys_inotify_init1 +27 common inotify_add_watch sys_inotify_add_watch +28 common inotify_rm_watch sys_inotify_rm_watch +29 common ioctl sys_ioctl compat_sys_ioctl +30 common ioprio_set sys_ioprio_set +31 common ioprio_get sys_ioprio_get +32 common flock sys_flock +33 common mknodat sys_mknodat +34 common mkdirat sys_mkdirat +35 common unlinkat sys_unlinkat +36 common symlinkat sys_symlinkat +37 common linkat sys_linkat +# renameat is superseded with flags by renameat2 +38 renameat renameat sys_renameat +39 common umount2 sys_umount +40 common mount sys_mount +41 common pivot_root sys_pivot_root +42 common nfsservctl sys_ni_syscall +43 32 statfs64 sys_statfs64 compat_sys_statfs64 +43 64 statfs sys_statfs +44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +44 64 fstatfs sys_fstatfs +45 32 truncate64 sys_truncate64 compat_sys_truncate64 +45 64 truncate sys_truncate +46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +46 64 ftruncate sys_ftruncate +47 common fallocate sys_fallocate compat_sys_fallocate +48 common faccessat sys_faccessat +49 common chdir sys_chdir +50 common fchdir sys_fchdir +51 common chroot sys_chroot +52 common fchmod sys_fchmod +53 common fchmodat sys_fchmodat +54 common fchownat sys_fchownat +55 common fchown sys_fchown +56 common openat sys_openat +57 common close sys_close +58 common vhangup sys_vhangup +59 common pipe2 sys_pipe2 +60 common quotactl sys_quotactl +61 common getdents64 sys_getdents64 +62 32 llseek sys_llseek +62 64 lseek sys_lseek +63 common read sys_read +64 common write sys_write +65 common readv sys_readv sys_readv +66 common writev sys_writev sys_writev +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 common preadv sys_preadv compat_sys_preadv +70 common pwritev sys_pwritev compat_sys_pwritev +71 32 sendfile64 sys_sendfile64 +71 64 sendfile sys_sendfile64 +72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +72 64 pselect6 sys_pselect6 +73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +73 64 ppoll sys_ppoll +74 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +75 common vmsplice sys_vmsplice +76 common splice sys_splice +77 common tee sys_tee +78 common readlinkat sys_readlinkat +79 stat64 fstatat64 sys_fstatat64 +79 64 newfstatat sys_newfstatat +80 stat64 fstat64 sys_fstat64 +80 64 fstat sys_newfstat +81 common sync sys_sync +82 common fsync sys_fsync +83 common fdatasync sys_fdatasync +84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +85 common timerfd_create sys_timerfd_create +86 time32 timerfd_settime sys_timerfd_settime32 +86 64 timerfd_settime sys_timerfd_settime +87 time32 timerfd_gettime sys_timerfd_gettime32 +87 64 timerfd_gettime sys_timerfd_gettime +88 time32 utimensat sys_utimensat_time32 +88 64 utimensat sys_utimensat +89 common acct sys_acct +90 common capget sys_capget +91 common capset sys_capset +92 common personality sys_personality +93 common exit sys_exit +94 common exit_group sys_exit_group +95 common waitid sys_waitid compat_sys_waitid +96 common set_tid_address sys_set_tid_address +97 common unshare sys_unshare +98 time32 futex sys_futex_time32 +98 64 futex sys_futex +99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +101 time32 nanosleep sys_nanosleep_time32 +101 64 nanosleep sys_nanosleep +102 common getitimer sys_getitimer compat_sys_getitimer +103 common setitimer sys_setitimer compat_sys_setitimer +104 common kexec_load sys_kexec_load compat_sys_kexec_load +105 common init_module sys_init_module +106 common delete_module sys_delete_module +107 common timer_create sys_timer_create compat_sys_timer_create +108 time32 timer_gettime sys_timer_gettime32 +108 64 timer_gettime sys_timer_gettime +109 common timer_getoverrun sys_timer_getoverrun +110 time32 timer_settime sys_timer_settime32 +110 64 timer_settime sys_timer_settime +111 common timer_delete sys_timer_delete +112 time32 clock_settime sys_clock_settime32 +112 64 clock_settime sys_clock_settime +113 time32 clock_gettime sys_clock_gettime32 +113 64 clock_gettime sys_clock_gettime +114 time32 clock_getres sys_clock_getres_time32 +114 64 clock_getres sys_clock_getres +115 time32 clock_nanosleep sys_clock_nanosleep_time32 +115 64 clock_nanosleep sys_clock_nanosleep +116 common syslog sys_syslog +117 common ptrace sys_ptrace compat_sys_ptrace +118 common sched_setparam sys_sched_setparam +119 common sched_setscheduler sys_sched_setscheduler +120 common sched_getscheduler sys_sched_getscheduler +121 common sched_getparam sys_sched_getparam +122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +124 common sched_yield sys_sched_yield +125 common sched_get_priority_max sys_sched_get_priority_max +126 common sched_get_priority_min sys_sched_get_priority_min +127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +127 64 sched_rr_get_interval sys_sched_rr_get_interval +128 common restart_syscall sys_restart_syscall +129 common kill sys_kill +130 common tkill sys_tkill +131 common tgkill sys_tgkill +132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +137 64 rt_sigtimedwait sys_rt_sigtimedwait +138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +140 common setpriority sys_setpriority +141 common getpriority sys_getpriority +142 common reboot sys_reboot +143 common setregid sys_setregid +144 common setgid sys_setgid +145 common setreuid sys_setreuid +146 common setuid sys_setuid +147 common setresuid sys_setresuid +148 common getresuid sys_getresuid +149 common setresgid sys_setresgid +150 common getresgid sys_getresgid +151 common setfsuid sys_setfsuid +152 common setfsgid sys_setfsgid +153 common times sys_times compat_sys_times +154 common setpgid sys_setpgid +155 common getpgid sys_getpgid +156 common getsid sys_getsid +157 common setsid sys_setsid +158 common getgroups sys_getgroups +159 common setgroups sys_setgroups +160 common uname sys_newuname +161 common sethostname sys_sethostname +162 common setdomainname sys_setdomainname +# getrlimit and setrlimit are superseded with prlimit64 +163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit +164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit +165 common getrusage sys_getrusage compat_sys_getrusage +166 common umask sys_umask +167 common prctl sys_prctl +168 common getcpu sys_getcpu +169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +169 64 gettimeofday sys_gettimeofday +170 time32 settimeofday sys_settimeofday compat_sys_settimeofday +170 64 settimeofday sys_settimeofday +171 time32 adjtimex sys_adjtimex_time32 +171 64 adjtimex sys_adjtimex +172 common getpid sys_getpid +173 common getppid sys_getppid +174 common getuid sys_getuid +175 common geteuid sys_geteuid +176 common getgid sys_getgid +177 common getegid sys_getegid +178 common gettid sys_gettid +179 common sysinfo sys_sysinfo compat_sys_sysinfo +180 common mq_open sys_mq_open compat_sys_mq_open +181 common mq_unlink sys_mq_unlink +182 time32 mq_timedsend sys_mq_timedsend_time32 +182 64 mq_timedsend sys_mq_timedsend +183 time32 mq_timedreceive sys_mq_timedreceive_time32 +183 64 mq_timedreceive sys_mq_timedreceive +184 common mq_notify sys_mq_notify compat_sys_mq_notify +185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +186 common msgget sys_msgget +187 common msgctl sys_msgctl compat_sys_msgctl +188 common msgrcv sys_msgrcv compat_sys_msgrcv +189 common msgsnd sys_msgsnd compat_sys_msgsnd +190 common semget sys_semget +191 common semctl sys_semctl compat_sys_semctl +192 time32 semtimedop sys_semtimedop_time32 +192 64 semtimedop sys_semtimedop +193 common semop sys_semop +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +196 common shmat sys_shmat compat_sys_shmat +197 common shmdt sys_shmdt +198 common socket sys_socket +199 common socketpair sys_socketpair +200 common bind sys_bind +201 common listen sys_listen +202 common accept sys_accept +203 common connect sys_connect +204 common getsockname sys_getsockname +205 common getpeername sys_getpeername +206 common sendto sys_sendto +207 common recvfrom sys_recvfrom compat_sys_recvfrom +208 common setsockopt sys_setsockopt sys_setsockopt +209 common getsockopt sys_getsockopt sys_getsockopt +210 common shutdown sys_shutdown +211 common sendmsg sys_sendmsg compat_sys_sendmsg +212 common recvmsg sys_recvmsg compat_sys_recvmsg +213 common readahead sys_readahead compat_sys_readahead +214 common brk sys_brk +215 common munmap sys_munmap +216 common mremap sys_mremap +217 common add_key sys_add_key +218 common request_key sys_request_key +219 common keyctl sys_keyctl compat_sys_keyctl +220 common clone sys_clone +221 common execve sys_execve compat_sys_execve +222 32 mmap2 sys_mmap2 +222 64 mmap sys_mmap +223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +223 64 fadvise64 sys_fadvise64_64 +224 common swapon sys_swapon +225 common swapoff sys_swapoff +226 common mprotect sys_mprotect +227 common msync sys_msync +228 common mlock sys_mlock +229 common munlock sys_munlock +230 common mlockall sys_mlockall +231 common munlockall sys_munlockall +232 common mincore sys_mincore +233 common madvise sys_madvise +234 common remap_file_pages sys_remap_file_pages +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common migrate_pages sys_migrate_pages +239 common move_pages sys_move_pages +240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +241 common perf_event_open sys_perf_event_open +242 common accept4 sys_accept4 +243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +243 64 recvmmsg sys_recvmmsg +# Architectures may provide up to 16 syscalls of their own between 244 and 259 +244 arc cacheflush sys_cacheflush +245 arc arc_settls sys_arc_settls +246 arc arc_gettls sys_arc_gettls +247 arc sysfs sys_sysfs +248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg + +244 csky set_thread_area sys_set_thread_area +245 csky cacheflush sys_cacheflush + +244 nios2 cacheflush sys_cacheflush + +244 or1k or1k_atomic sys_or1k_atomic + +258 riscv riscv_hwprobe sys_riscv_hwprobe +259 riscv riscv_flush_icache sys_riscv_flush_icache + +260 time32 wait4 sys_wait4 compat_sys_wait4 +260 64 wait4 sys_wait4 +261 common prlimit64 sys_prlimit64 +262 common fanotify_init sys_fanotify_init +263 common fanotify_mark sys_fanotify_mark +264 common name_to_handle_at sys_name_to_handle_at +265 common open_by_handle_at sys_open_by_handle_at +266 time32 clock_adjtime sys_clock_adjtime32 +266 64 clock_adjtime sys_clock_adjtime +267 common syncfs sys_syncfs +268 common setns sys_setns +269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +270 common process_vm_readv sys_process_vm_readv +271 common process_vm_writev sys_process_vm_writev +272 common kcmp sys_kcmp +273 common finit_module sys_finit_module +274 common sched_setattr sys_sched_setattr +275 common sched_getattr sys_sched_getattr +276 common renameat2 sys_renameat2 +277 common seccomp sys_seccomp +278 common getrandom sys_getrandom +279 common memfd_create sys_memfd_create +280 common bpf sys_bpf +281 common execveat sys_execveat compat_sys_execveat +282 common userfaultfd sys_userfaultfd +283 common membarrier sys_membarrier +284 common mlock2 sys_mlock2 +285 common copy_file_range sys_copy_file_range +286 common preadv2 sys_preadv2 compat_sys_preadv2 +287 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +291 common statx sys_statx +292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +292 64 io_pgetevents sys_io_pgetevents +293 common rseq sys_rseq +294 common kexec_file_load sys_kexec_file_load +# 295 through 402 are unassigned to sync up with generic numbers don't use +403 32 clock_gettime64 sys_clock_gettime +404 32 clock_settime64 sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime +409 32 timer_settime64 sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +447 memfd_secret memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/aarch64/syscall_nr.h b/linux-user/aarch64/syscall_nr.h index 12ef002d60f..760302cb3ef 100644 --- a/linux-user/aarch64/syscall_nr.h +++ b/linux-user/aarch64/syscall_nr.h @@ -1,313 +1 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_AARCH64_SYSCALL_NR_H -#define LINUX_USER_AARCH64_SYSCALL_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_io_getevents 4 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_renameat 38 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs 43 -#define TARGET_NR_fstatfs 44 -#define TARGET_NR_truncate 45 -#define TARGET_NR_ftruncate 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_lseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile 71 -#define TARGET_NR_pselect6 72 -#define TARGET_NR_ppoll 73 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_newfstatat 79 -#define TARGET_NR_fstat 80 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_timerfd_settime 86 -#define TARGET_NR_timerfd_gettime 87 -#define TARGET_NR_utimensat 88 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_futex 98 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_nanosleep 101 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_gettime 108 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_settime 110 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_clock_settime 112 -#define TARGET_NR_clock_gettime 113 -#define TARGET_NR_clock_getres 114 -#define TARGET_NR_clock_nanosleep 115 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_sched_rr_get_interval 127 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigtimedwait 137 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrlimit 163 -#define TARGET_NR_setrlimit 164 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_gettimeofday 169 -#define TARGET_NR_settimeofday 170 -#define TARGET_NR_adjtimex 171 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_timedsend 182 -#define TARGET_NR_mq_timedreceive 183 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semtimedop 192 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap 222 -#define TARGET_NR_fadvise64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_recvmmsg 243 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_wait4 260 -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_clock_adjtime 266 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_io_pgetevents 292 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_clone3 435 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_syscalls 447 - -#endif /* LINUX_USER_AARCH64_SYSCALL_NR_H */ +#include "syscall_64_nr.h" diff --git a/linux-user/aarch64/syscallhdr.sh b/linux-user/aarch64/syscallhdr.sh new file mode 100644 index 00000000000..dd6b586b1b7 --- /dev/null +++ b/linux-user/aarch64/syscallhdr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_AARCH64_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + echo "#ifndef ${fileguard}" + echo "#define ${fileguard} 1" + echo "" + + while read nr abi name entry compat; do + if [ -z "$offset" ]; then + echo "#define TARGET_NR_${prefix}${name} $nr" + else + echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)" + fi + done + + echo "" + echo "#endif /* ${fileguard} */" +) > "$out" diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h index 40e399d9908..6f66a50bfd2 100644 --- a/linux-user/aarch64/target_signal.h +++ b/linux-user/aarch64/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */ #define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */ diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so index 808206ade82..d43c3b19cdf 100755 Binary files a/linux-user/aarch64/vdso-be.so and b/linux-user/aarch64/vdso-be.so differ diff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so index 941aaf29931..aaedc9d85e5 100755 Binary files a/linux-user/aarch64/vdso-le.so and b/linux-user/aarch64/vdso-le.so differ diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c index 2ea039aa71f..80ad536c5f2 100644 --- a/linux-user/alpha/cpu_loop.c +++ b/linux-user/alpha/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" void cpu_loop(CPUAlphaState *env) @@ -173,7 +173,7 @@ void cpu_loop(CPUAlphaState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; diff --git a/linux-user/alpha/syscall.tbl b/linux-user/alpha/syscall.tbl index 3000a2e8ee2..54ee7aaa24f 100644 --- a/linux-user/alpha/syscall.tbl +++ b/linux-user/alpha/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for alpha # @@ -125,8 +125,8 @@ 116 common osf_gettimeofday sys_osf_gettimeofday 117 common osf_getrusage sys_osf_getrusage 118 common getsockopt sys_getsockopt -120 common readv sys_osf_readv -121 common writev sys_osf_writev +120 common readv sys_readv +121 common writev sys_writev 122 common osf_settimeofday sys_osf_settimeofday 123 common fchown sys_fchown 124 common fchmod sys_fchmod @@ -230,7 +230,7 @@ 259 common osf_swapctl sys_ni_syscall 260 common osf_memcntl sys_ni_syscall 261 common osf_fdatasync sys_ni_syscall -300 common bdflush sys_bdflush +300 common bdflush sys_ni_syscall 301 common sethae sys_sethae 302 common mount sys_mount 303 common old_adjtimex sys_old_adjtimex @@ -334,7 +334,7 @@ 401 common io_submit sys_io_submit 402 common io_cancel sys_io_cancel 405 common exit_group sys_exit_group -406 common lookup_dcookie sys_lookup_dcookie +406 common lookup_dcookie sys_ni_syscall 407 common epoll_create sys_epoll_create 408 common epoll_ctl sys_epoll_ctl 409 common epoll_wait sys_epoll_wait @@ -474,7 +474,7 @@ 542 common fsmount sys_fsmount 543 common fspick sys_fspick 544 common pidfd_open sys_pidfd_open -# 545 reserved for clone3 +545 common clone3 alpha_clone3 546 common close_range sys_close_range 547 common openat2 sys_openat2 548 common pidfd_getfd sys_pidfd_getfd @@ -482,7 +482,23 @@ 550 common process_madvise sys_process_madvise 551 common epoll_pwait2 sys_epoll_pwait2 552 common mount_setattr sys_mount_setattr -# 553 reserved for quotactl_path +553 common quotactl_fd sys_quotactl_fd 554 common landlock_create_ruleset sys_landlock_create_ruleset 555 common landlock_add_rule sys_landlock_add_rule 556 common landlock_restrict_self sys_landlock_restrict_self +# 557 reserved for memfd_secret +558 common process_mrelease sys_process_mrelease +559 common futex_waitv sys_futex_waitv +560 common set_mempolicy_home_node sys_ni_syscall +561 common cachestat sys_cachestat +562 common fchmodat2 sys_fchmodat2 +563 common map_shadow_stack sys_map_shadow_stack +564 common futex_wake sys_futex_wake +565 common futex_wait sys_futex_wait +566 common futex_requeue sys_futex_requeue +567 common statmount sys_statmount +568 common listmount sys_listmount +569 common lsm_get_self_attr sys_lsm_get_self_attr +570 common lsm_set_self_attr sys_lsm_set_self_attr +571 common lsm_list_modules sys_lsm_list_modules +572 common mseal sys_mseal diff --git a/linux-user/alpha/syscallhdr.sh b/linux-user/alpha/syscallhdr.sh index 55cafe6abf9..6da0c957e21 100644 --- a/linux-user/alpha/syscallhdr.sh +++ b/linux-user/alpha/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/alpha/target_proc.h b/linux-user/alpha/target_proc.h index dac37dffc9d..da437ee0e56 100644 --- a/linux-user/alpha/target_proc.h +++ b/linux-user/alpha/target_proc.h @@ -15,7 +15,7 @@ static int open_cpuinfo(CPUArchState *cpu_env, int fd) const char *p, *q; int t; - p = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(env_cpu(cpu_env)))); + p = object_class_get_name(OBJECT_CLASS(env_cpu(cpu_env)->cc)); q = strchr(p, '-'); t = q - p; assert(t < sizeof(model)); diff --git a/linux-user/arm/Makefile.vdso b/linux-user/arm/Makefile.vdso index 2d098a57483..ede489e2363 100644 --- a/linux-user/arm/Makefile.vdso +++ b/linux-user/arm/Makefile.vdso @@ -3,15 +3,18 @@ include $(BUILD_DIR)/tests/tcg/arm-linux-user/config-target.mak SUBDIR = $(SRC_PATH)/linux-user/arm VPATH += $(SUBDIR) -all: $(SUBDIR)/vdso-be.so $(SUBDIR)/vdso-le.so +all: $(SUBDIR)/vdso-be8.so $(SUBDIR)/vdso-be32.so $(SUBDIR)/vdso-le.so # Adding -use-blx disables unneeded interworking without actually using blx. -LDFLAGS = -nostdlib -shared -Wl,-use-blx \ +LDFLAGS = -nostdlib -shared -Wl,-use-blx -Wl,-z,max-page-size=4096 \ -Wl,-h,linux-vdso.so.1 -Wl,--build-id=sha1 \ -Wl,--hash-style=both -Wl,-T,$(SUBDIR)/vdso.ld -$(SUBDIR)/vdso-be.so: vdso.S vdso.ld vdso-asmoffset.h - $(CC) -o $@ $(LDFLAGS) -mbig-endian $< +$(SUBDIR)/vdso-be8.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe8 $< + +$(SUBDIR)/vdso-be32.so: vdso.S vdso.ld vdso-asmoffset.h + $(CC) -o $@ $(LDFLAGS) -mbig-endian -mbe32 $< $(SUBDIR)/vdso-le.so: vdso.S vdso.ld vdso-asmoffset.h $(CC) -o $@ $(LDFLAGS) -mlittle-endian $< diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index ec665862d93..33f63951a95 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -21,10 +21,12 @@ #include "qemu.h" #include "user-internals.h" #include "elf.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "semihosting/common-semi.h" #include "exec/page-protection.h" +#include "exec/mmap-lock.h" +#include "user/page-protection.h" #include "target/arm/syndrome.h" #define get_user_code_u32(x, gaddr, env) \ @@ -35,45 +37,10 @@ __r; \ }) -#define get_user_code_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && bswap_code(arm_sctlr_b(env))) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define get_user_data_u32(x, gaddr, env) \ - ({ abi_long __r = get_user_u32((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap32(x); \ - } \ - __r; \ - }) - -#define get_user_data_u16(x, gaddr, env) \ - ({ abi_long __r = get_user_u16((x), (gaddr)); \ - if (!__r && arm_cpu_bswap_data(env)) { \ - (x) = bswap16(x); \ - } \ - __r; \ - }) - -#define put_user_data_u32(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap32(__x); \ - } \ - put_user_u32(__x, (gaddr)); \ - }) - -#define put_user_data_u16(x, gaddr, env) \ - ({ typeof(x) __x = (x); \ - if (arm_cpu_bswap_data(env)) { \ - __x = bswap16(__x); \ - } \ - put_user_u16(__x, (gaddr)); \ - }) +/* + * Note that if we need to do data accesses here, they should do a + * bswap if arm_cpu_bswap_data() returns true. + */ /* * Similar to code in accel/tcg/user-exec.c, but outside the execution loop. @@ -396,6 +363,7 @@ void cpu_loop(CPUARMState *env) switch (n) { case ARM_NR_cacheflush: /* nop */ + env->regs[0] = 0; break; case ARM_NR_set_tls: cpu_set_tls(env, env->regs[0]); @@ -512,7 +480,7 @@ void cpu_loop(CPUARMState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { CPUState *cpu = env_cpu(env); TaskState *ts = get_task_state(cpu); diff --git a/linux-user/arm/meson.build b/linux-user/arm/meson.build index c4bb9af5b85..348ffb810d7 100644 --- a/linux-user/arm/meson.build +++ b/linux-user/arm/meson.build @@ -10,10 +10,17 @@ syscall_nr_generators += { # is always true as far as source_set.apply() is concerned. Always build # both header files and include the right one via #if. -vdso_be_inc = gen_vdso.process('vdso-be.so', - extra_args: ['-s', 'sigreturn_codes']) +vdso_be8_inc = gen_vdso.process('vdso-be8.so', + extra_args: ['-s', 'sigreturn_codes', + '-p', 'vdso_be8']) + +vdso_be32_inc = gen_vdso.process('vdso-be32.so', + extra_args: ['-s', 'sigreturn_codes', + '-p', 'vdso_be32']) vdso_le_inc = gen_vdso.process('vdso-le.so', extra_args: ['-s', 'sigreturn_codes']) -linux_user_ss.add(when: 'TARGET_ARM', if_true: [vdso_be_inc, vdso_le_inc]) +linux_user_ss.add(when: 'TARGET_ARM', if_true: [ + vdso_be8_inc, vdso_be32_inc, vdso_le_inc +]) diff --git a/linux-user/arm/nwfpe/fpa11.c b/linux-user/arm/nwfpe/fpa11.c index 9a93610d245..0f1afbd91df 100644 --- a/linux-user/arm/nwfpe/fpa11.c +++ b/linux-user/arm/nwfpe/fpa11.c @@ -51,6 +51,29 @@ void resetFPA11(void) #ifdef MAINTAIN_FPCR fpa11->fpcr = MASK_RESET; #endif + + /* + * Real FPA11 hardware does not handle NaNs, but always takes an + * exception for them to be software-emulated (ARM7500FE datasheet + * section 10.4). There is no documented architectural requirement + * for NaN propagation rules and it will depend on how the OS + * level software emulation opted to do it. We here use prop_s_ab + * which matches the later VFP hardware choice and how QEMU's + * fpa11 emulation has worked in the past. The real Linux kernel + * does something slightly different: arch/arm/nwfpe/softfloat-specialize + * propagateFloat64NaN() has the curious behaviour that it prefers + * the QNaN over the SNaN, but if both are QNaN it picks A and + * if both are SNaN it picks B. In theory we could add this as + * a NaN propagation rule, but in practice FPA11 emulation is so + * close to totally dead that it's not worth trying to match it at + * this late date. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &fpa11->fp_status); + /* + * Use the same default NaN value as Arm VFP. This doesn't match + * the Linux kernel's nwfpe emulation, which uses an all-1s value. + */ + set_float_default_nan_pattern(0b01000000, &fpa11->fp_status); } void SetRoundingMode(const unsigned int opcode) diff --git a/linux-user/arm/syscall.tbl b/linux-user/arm/syscall.tbl index 28e03b5fec0..23c98203c40 100644 --- a/linux-user/arm/syscall.tbl +++ b/linux-user/arm/syscall.tbl @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note # # Linux system call numbers and entry vectors # @@ -147,7 +148,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 common personality sys_personality # 137 was sys_afs_syscall @@ -263,10 +264,10 @@ 246 common io_submit sys_io_submit 247 common io_cancel sys_io_cancel 248 common exit_group sys_exit_group -249 common lookup_dcookie sys_lookup_dcookie +249 common lookup_dcookie sys_ni_syscall 250 common epoll_create sys_epoll_create 251 common epoll_ctl sys_epoll_ctl sys_oabi_epoll_ctl -252 common epoll_wait sys_epoll_wait sys_oabi_epoll_wait +252 common epoll_wait sys_epoll_wait 253 common remap_file_pages sys_remap_file_pages # 254 for set_thread_area # 255 for get_thread_area @@ -456,7 +457,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/arm/syscallhdr.sh b/linux-user/arm/syscallhdr.sh index 4c952b2cfb2..692fd6a76e6 100644 --- a/linux-user/arm/syscallhdr.sh +++ b/linux-user/arm/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h index 0e6351d9f78..ff1810b1fe0 100644 --- a/linux-user/arm/target_signal.h +++ b/linux-user/arm/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/arm/vdso-be.so b/linux-user/arm/vdso-be.so deleted file mode 100755 index 69cafbb956e..00000000000 Binary files a/linux-user/arm/vdso-be.so and /dev/null differ diff --git a/linux-user/arm/vdso-be32.so b/linux-user/arm/vdso-be32.so new file mode 100755 index 00000000000..b896d3d545e Binary files /dev/null and b/linux-user/arm/vdso-be32.so differ diff --git a/linux-user/arm/vdso-be8.so b/linux-user/arm/vdso-be8.so new file mode 100755 index 00000000000..784b7bdb2a9 Binary files /dev/null and b/linux-user/arm/vdso-be8.so differ diff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so index ad05a125187..38d3d510473 100755 Binary files a/linux-user/arm/vdso-le.so and b/linux-user/arm/vdso-le.so differ diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h deleted file mode 100644 index e644d2ef909..00000000000 --- a/linux-user/cpu_loop-common.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * qemu user cpu loop - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#ifndef CPU_LOOP_COMMON_H -#define CPU_LOOP_COMMON_H - -#include "exec/log.h" -#include "special-errno.h" - -void target_exception_dump(CPUArchState *env, const char *fmt, int code); -#define EXCP_DUMP(env, fmt, code) \ - target_exception_dump(env, fmt, code) - -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs); -#endif diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c deleted file mode 100644 index 04c9086b6dc..00000000000 --- a/linux-user/cris/cpu_loop.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * qemu user cpu loop - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu.h" -#include "user-internals.h" -#include "cpu_loop-common.h" -#include "signal-common.h" - -void cpu_loop(CPUCRISState *env) -{ - CPUState *cs = env_cpu(env); - int trapnr, ret; - - while (1) { - cpu_exec_start(cs); - trapnr = cpu_exec(cs); - cpu_exec_end(cs); - process_queued_cpu_work(cs); - - switch (trapnr) { - case EXCP_INTERRUPT: - /* just indicate that signals should be handled asap */ - break; - case EXCP_BREAK: - ret = do_syscall(env, - env->regs[9], - env->regs[10], - env->regs[11], - env->regs[12], - env->regs[13], - env->pregs[7], - env->pregs[11], - 0, 0); - if (ret == -QEMU_ERESTARTSYS) { - env->pc -= 2; - } else if (ret != -QEMU_ESIGRETURN) { - env->regs[10] = ret; - } - break; - case EXCP_DEBUG: - force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); - break; - case EXCP_ATOMIC: - cpu_exec_step_atomic(cs); - break; - default: - fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); - cpu_dump_state(cs, stderr, 0); - exit(EXIT_FAILURE); - } - process_pending_signals (env); - } -} - -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) -{ - CPUState *cpu = env_cpu(env); - TaskState *ts = get_task_state(cpu); - struct image_info *info = ts->info; - - env->regs[0] = regs->r0; - env->regs[1] = regs->r1; - env->regs[2] = regs->r2; - env->regs[3] = regs->r3; - env->regs[4] = regs->r4; - env->regs[5] = regs->r5; - env->regs[6] = regs->r6; - env->regs[7] = regs->r7; - env->regs[8] = regs->r8; - env->regs[9] = regs->r9; - env->regs[10] = regs->r10; - env->regs[11] = regs->r11; - env->regs[12] = regs->r12; - env->regs[13] = regs->r13; - env->regs[14] = info->start_stack; - env->regs[15] = regs->acr; - env->pc = regs->erp; -} diff --git a/linux-user/cris/signal.c b/linux-user/cris/signal.c deleted file mode 100644 index 10948bcf30c..00000000000 --- a/linux-user/cris/signal.c +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Emulation of Linux signals - * - * Copyright (c) 2003 Fabrice Bellard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ -#include "qemu/osdep.h" -#include "qemu.h" -#include "user-internals.h" -#include "signal-common.h" -#include "linux-user/trace.h" - -struct target_sigcontext { - struct target_pt_regs regs; /* needs to be first */ - uint32_t oldmask; - uint32_t usp; /* usp before stacking this gunk on it */ -}; - -/* Signal frames. */ -struct target_signal_frame { - struct target_sigcontext sc; - uint32_t extramask[TARGET_NSIG_WORDS - 1]; - uint16_t retcode[4]; /* Trampoline code. */ -}; - -static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) -{ - __put_user(env->regs[0], &sc->regs.r0); - __put_user(env->regs[1], &sc->regs.r1); - __put_user(env->regs[2], &sc->regs.r2); - __put_user(env->regs[3], &sc->regs.r3); - __put_user(env->regs[4], &sc->regs.r4); - __put_user(env->regs[5], &sc->regs.r5); - __put_user(env->regs[6], &sc->regs.r6); - __put_user(env->regs[7], &sc->regs.r7); - __put_user(env->regs[8], &sc->regs.r8); - __put_user(env->regs[9], &sc->regs.r9); - __put_user(env->regs[10], &sc->regs.r10); - __put_user(env->regs[11], &sc->regs.r11); - __put_user(env->regs[12], &sc->regs.r12); - __put_user(env->regs[13], &sc->regs.r13); - __put_user(env->regs[14], &sc->usp); - __put_user(env->regs[15], &sc->regs.acr); - __put_user(env->pregs[PR_MOF], &sc->regs.mof); - __put_user(env->pregs[PR_SRP], &sc->regs.srp); - __put_user(env->pc, &sc->regs.erp); -} - -static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) -{ - __get_user(env->regs[0], &sc->regs.r0); - __get_user(env->regs[1], &sc->regs.r1); - __get_user(env->regs[2], &sc->regs.r2); - __get_user(env->regs[3], &sc->regs.r3); - __get_user(env->regs[4], &sc->regs.r4); - __get_user(env->regs[5], &sc->regs.r5); - __get_user(env->regs[6], &sc->regs.r6); - __get_user(env->regs[7], &sc->regs.r7); - __get_user(env->regs[8], &sc->regs.r8); - __get_user(env->regs[9], &sc->regs.r9); - __get_user(env->regs[10], &sc->regs.r10); - __get_user(env->regs[11], &sc->regs.r11); - __get_user(env->regs[12], &sc->regs.r12); - __get_user(env->regs[13], &sc->regs.r13); - __get_user(env->regs[14], &sc->usp); - __get_user(env->regs[15], &sc->regs.acr); - __get_user(env->pregs[PR_MOF], &sc->regs.mof); - __get_user(env->pregs[PR_SRP], &sc->regs.srp); - __get_user(env->pc, &sc->regs.erp); -} - -static abi_ulong get_sigframe(CPUCRISState *env, int framesize) -{ - abi_ulong sp; - /* Align the stack downwards to 4. */ - sp = (env->regs[R_SP] & ~3); - return sp - framesize; -} - -static void setup_sigreturn(uint16_t *retcode) -{ - /* This is movu.w __NR_sigreturn, r9; break 13; */ - __put_user(0x9c5f, retcode + 0); - __put_user(TARGET_NR_sigreturn, retcode + 1); - __put_user(0xe93d, retcode + 2); -} - -void setup_frame(int sig, struct target_sigaction *ka, - target_sigset_t *set, CPUCRISState *env) -{ - struct target_signal_frame *frame; - abi_ulong frame_addr; - int i; - - frame_addr = get_sigframe(env, sizeof *frame); - trace_user_setup_frame(env, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) - goto badframe; - - /* - * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't - * use this trampoline anymore but it sets it up for GDB. - */ - setup_sigreturn(frame->retcode); - - /* Save the mask. */ - __put_user(set->sig[0], &frame->sc.oldmask); - - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __put_user(set->sig[i], &frame->extramask[i - 1]); - } - - setup_sigcontext(&frame->sc, env); - - /* Move the stack and setup the arguments for the handler. */ - env->regs[R_SP] = frame_addr; - env->regs[10] = sig; - env->pc = (unsigned long) ka->_sa_handler; - /* Link SRP so the guest returns through the trampoline. */ - env->pregs[PR_SRP] = default_sigreturn; - - unlock_user_struct(frame, frame_addr, 1); - return; -badframe: - force_sigsegv(sig); -} - -void setup_rt_frame(int sig, struct target_sigaction *ka, - target_siginfo_t *info, - target_sigset_t *set, CPUCRISState *env) -{ - qemu_log_mask(LOG_UNIMP, "setup_rt_frame: not implemented\n"); -} - -long do_sigreturn(CPUCRISState *env) -{ - struct target_signal_frame *frame; - abi_ulong frame_addr; - target_sigset_t target_set; - sigset_t set; - int i; - - frame_addr = env->regs[R_SP]; - trace_user_do_sigreturn(env, frame_addr); - /* Make sure the guest isn't playing games. */ - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { - goto badframe; - } - - /* Restore blocked signals */ - __get_user(target_set.sig[0], &frame->sc.oldmask); - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __get_user(target_set.sig[i], &frame->extramask[i - 1]); - } - target_to_host_sigset_internal(&set, &target_set); - set_sigmask(&set); - - restore_sigcontext(&frame->sc, env); - unlock_user_struct(frame, frame_addr, 0); - return -QEMU_ESIGRETURN; -badframe: - force_sig(TARGET_SIGSEGV); - return -QEMU_ESIGRETURN; -} - -long do_rt_sigreturn(CPUCRISState *env) -{ - trace_user_do_rt_sigreturn(env, 0); - qemu_log_mask(LOG_UNIMP, "do_rt_sigreturn: not implemented\n"); - return -TARGET_ENOSYS; -} - -void setup_sigtramp(abi_ulong sigtramp_page) -{ - uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0); - assert(tramp != NULL); - - default_sigreturn = sigtramp_page; - setup_sigreturn(tramp); - - unlock_user(tramp, sigtramp_page, 6); -} diff --git a/linux-user/cris/sockbits.h b/linux-user/cris/sockbits.h deleted file mode 100644 index 0e4c8f012d7..00000000000 --- a/linux-user/cris/sockbits.h +++ /dev/null @@ -1 +0,0 @@ -#include "../generic/sockbits.h" diff --git a/linux-user/cris/syscall_nr.h b/linux-user/cris/syscall_nr.h deleted file mode 100644 index 4b6cf65c42b..00000000000 --- a/linux-user/cris/syscall_nr.h +++ /dev/null @@ -1,367 +0,0 @@ -/* - * This file contains the system call numbers, and stub macros for libc. - */ - -#ifndef LINUX_USER_CRIS_SYSCALL_NR_H -#define LINUX_USER_CRIS_SYSCALL_NR_H - -#define TARGET_NR_restart_syscall 0 -#define TARGET_NR_exit 1 -#define TARGET_NR_fork 2 -#define TARGET_NR_read 3 -#define TARGET_NR_write 4 -#define TARGET_NR_open 5 -#define TARGET_NR_close 6 -#define TARGET_NR_waitpid 7 -#define TARGET_NR_creat 8 -#define TARGET_NR_link 9 -#define TARGET_NR_unlink 10 -#define TARGET_NR_execve 11 -#define TARGET_NR_chdir 12 -#define TARGET_NR_time 13 -#define TARGET_NR_mknod 14 -#define TARGET_NR_chmod 15 -#define TARGET_NR_lchown 16 -#define TARGET_NR_break 17 -#define TARGET_NR_oldstat 18 -#define TARGET_NR_lseek 19 -#define TARGET_NR_getpid 20 -#define TARGET_NR_mount 21 -#define TARGET_NR_umount 22 -#define TARGET_NR_setuid 23 -#define TARGET_NR_getuid 24 -#define TARGET_NR_stime 25 -#define TARGET_NR_ptrace 26 -#define TARGET_NR_alarm 27 -#define TARGET_NR_oldfstat 28 -#define TARGET_NR_pause 29 -#define TARGET_NR_utime 30 -#define TARGET_NR_stty 31 -#define TARGET_NR_gtty 32 -#define TARGET_NR_access 33 -#define TARGET_NR_nice 34 -#define TARGET_NR_ftime 35 -#define TARGET_NR_sync 36 -#define TARGET_NR_kill 37 -#define TARGET_NR_rename 38 -#define TARGET_NR_mkdir 39 -#define TARGET_NR_rmdir 40 -#define TARGET_NR_dup 41 -#define TARGET_NR_pipe 42 -#define TARGET_NR_times 43 -#define TARGET_NR_prof 44 -#define TARGET_NR_brk 45 -#define TARGET_NR_setgid 46 -#define TARGET_NR_getgid 47 -#define TARGET_NR_signal 48 -#define TARGET_NR_geteuid 49 -#define TARGET_NR_getegid 50 -#define TARGET_NR_acct 51 -#define TARGET_NR_umount2 52 -#define TARGET_NR_lock 53 -#define TARGET_NR_ioctl 54 -#define TARGET_NR_fcntl 55 -#define TARGET_NR_mpx 56 -#define TARGET_NR_setpgid 57 -#define TARGET_NR_ulimit 58 -#define TARGET_NR_oldolduname 59 -#define TARGET_NR_umask 60 -#define TARGET_NR_chroot 61 -#define TARGET_NR_ustat 62 -#define TARGET_NR_dup2 63 -#define TARGET_NR_getppid 64 -#define TARGET_NR_getpgrp 65 -#define TARGET_NR_setsid 66 -#define TARGET_NR_sigaction 67 -#define TARGET_NR_sgetmask 68 -#define TARGET_NR_ssetmask 69 -#define TARGET_NR_setreuid 70 -#define TARGET_NR_setregid 71 -#define TARGET_NR_sigsuspend 72 -#define TARGET_NR_sigpending 73 -#define TARGET_NR_sethostname 74 -#define TARGET_NR_setrlimit 75 -#define TARGET_NR_getrlimit 76 -#define TARGET_NR_getrusage 77 -#define TARGET_NR_gettimeofday 78 -#define TARGET_NR_settimeofday 79 -#define TARGET_NR_getgroups 80 -#define TARGET_NR_setgroups 81 -#define TARGET_NR_select 82 -#define TARGET_NR_symlink 83 -#define TARGET_NR_oldlstat 84 -#define TARGET_NR_readlink 85 -#define TARGET_NR_uselib 86 -#define TARGET_NR_swapon 87 -#define TARGET_NR_reboot 88 -#define TARGET_NR_readdir 89 -#define TARGET_NR_mmap 90 -#define TARGET_NR_munmap 91 -#define TARGET_NR_truncate 92 -#define TARGET_NR_ftruncate 93 -#define TARGET_NR_fchmod 94 -#define TARGET_NR_fchown 95 -#define TARGET_NR_getpriority 96 -#define TARGET_NR_setpriority 97 -#define TARGET_NR_profil 98 -#define TARGET_NR_statfs 99 -#define TARGET_NR_fstatfs 100 -#define TARGET_NR_ioperm 101 -#define TARGET_NR_socketcall 102 -#define TARGET_NR_syslog 103 -#define TARGET_NR_setitimer 104 -#define TARGET_NR_getitimer 105 -#define TARGET_NR_stat 106 -#define TARGET_NR_lstat 107 -#define TARGET_NR_fstat 108 -#define TARGET_NR_olduname 109 -#define TARGET_NR_iopl 110 -#define TARGET_NR_vhangup 111 -#define TARGET_NR_idle 112 -#define TARGET_NR_vm86 113 -#define TARGET_NR_wait4 114 -#define TARGET_NR_swapoff 115 -#define TARGET_NR_sysinfo 116 -#define TARGET_NR_ipc 117 -#define TARGET_NR_fsync 118 -#define TARGET_NR_sigreturn 119 -#define TARGET_NR_clone 120 -#define TARGET_NR_setdomainname 121 -#define TARGET_NR_uname 122 -#define TARGET_NR_modify_ldt 123 -#define TARGET_NR_adjtimex 124 -#define TARGET_NR_mprotect 125 -#define TARGET_NR_sigprocmask 126 -#define TARGET_NR_create_module 127 -#define TARGET_NR_init_module 128 -#define TARGET_NR_delete_module 129 -#define TARGET_NR_get_kernel_syms 130 -#define TARGET_NR_quotactl 131 -#define TARGET_NR_getpgid 132 -#define TARGET_NR_fchdir 133 -#define TARGET_NR_bdflush 134 -#define TARGET_NR_sysfs 135 -#define TARGET_NR_personality 136 -#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define TARGET_NR_setfsuid 138 -#define TARGET_NR_setfsgid 139 -#define TARGET_NR__llseek 140 -#define TARGET_NR_getdents 141 -#define TARGET_NR__newselect 142 -#define TARGET_NR_flock 143 -#define TARGET_NR_msync 144 -#define TARGET_NR_readv 145 -#define TARGET_NR_writev 146 -#define TARGET_NR_getsid 147 -#define TARGET_NR_fdatasync 148 -#define TARGET_NR__sysctl 149 -#define TARGET_NR_mlock 150 -#define TARGET_NR_munlock 151 -#define TARGET_NR_mlockall 152 -#define TARGET_NR_munlockall 153 -#define TARGET_NR_sched_setparam 154 -#define TARGET_NR_sched_getparam 155 -#define TARGET_NR_sched_setscheduler 156 -#define TARGET_NR_sched_getscheduler 157 -#define TARGET_NR_sched_yield 158 -#define TARGET_NR_sched_get_priority_max 159 -#define TARGET_NR_sched_get_priority_min 160 -#define TARGET_NR_sched_rr_get_interval 161 -#define TARGET_NR_nanosleep 162 -#define TARGET_NR_mremap 163 -#define TARGET_NR_setresuid 164 -#define TARGET_NR_getresuid 165 - -#define TARGET_NR_query_module 167 -#define TARGET_NR_poll 168 -#define TARGET_NR_nfsservctl 169 -#define TARGET_NR_setresgid 170 -#define TARGET_NR_getresgid 171 -#define TARGET_NR_prctl 172 -#define TARGET_NR_rt_sigreturn 173 -#define TARGET_NR_rt_sigaction 174 -#define TARGET_NR_rt_sigprocmask 175 -#define TARGET_NR_rt_sigpending 176 -#define TARGET_NR_rt_sigtimedwait 177 -#define TARGET_NR_rt_sigqueueinfo 178 -#define TARGET_NR_rt_sigsuspend 179 -#define TARGET_NR_pread64 180 -#define TARGET_NR_pwrite64 181 -#define TARGET_NR_chown 182 -#define TARGET_NR_getcwd 183 -#define TARGET_NR_capget 184 -#define TARGET_NR_capset 185 -#define TARGET_NR_sigaltstack 186 -#define TARGET_NR_sendfile 187 -#define TARGET_NR_getpmsg 188 /* some people actually want streams */ -#define TARGET_NR_putpmsg 189 /* some people actually want streams */ -#define TARGET_NR_vfork 190 -#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */ -#define TARGET_NR_mmap2 192 -#define TARGET_NR_truncate64 193 -#define TARGET_NR_ftruncate64 194 -#define TARGET_NR_stat64 195 -#define TARGET_NR_lstat64 196 -#define TARGET_NR_fstat64 197 -#define TARGET_NR_lchown32 198 -#define TARGET_NR_getuid32 199 -#define TARGET_NR_getgid32 200 -#define TARGET_NR_geteuid32 201 -#define TARGET_NR_getegid32 202 -#define TARGET_NR_setreuid32 203 -#define TARGET_NR_setregid32 204 -#define TARGET_NR_getgroups32 205 -#define TARGET_NR_setgroups32 206 -#define TARGET_NR_fchown32 207 -#define TARGET_NR_setresuid32 208 -#define TARGET_NR_getresuid32 209 -#define TARGET_NR_setresgid32 210 -#define TARGET_NR_getresgid32 211 -#define TARGET_NR_chown32 212 -#define TARGET_NR_setuid32 213 -#define TARGET_NR_setgid32 214 -#define TARGET_NR_setfsuid32 215 -#define TARGET_NR_setfsgid32 216 -#define TARGET_NR_pivot_root 217 -#define TARGET_NR_mincore 218 -#define TARGET_NR_madvise 219 -#define TARGET_NR_getdents64 220 -#define TARGET_NR_fcntl64 221 -/* 223 is unused */ -#define TARGET_NR_gettid 224 -#define TARGET_NR_readahead 225 -#define TARGET_NR_setxattr 226 -#define TARGET_NR_lsetxattr 227 -#define TARGET_NR_fsetxattr 228 -#define TARGET_NR_getxattr 229 -#define TARGET_NR_lgetxattr 230 -#define TARGET_NR_fgetxattr 231 -#define TARGET_NR_listxattr 232 -#define TARGET_NR_llistxattr 233 -#define TARGET_NR_flistxattr 234 -#define TARGET_NR_removexattr 235 -#define TARGET_NR_lremovexattr 236 -#define TARGET_NR_fremovexattr 237 -#define TARGET_NR_tkill 238 -#define TARGET_NR_sendfile64 239 -#define TARGET_NR_futex 240 -#define TARGET_NR_sched_setaffinity 241 -#define TARGET_NR_sched_getaffinity 242 -#define TARGET_NR_set_thread_area 243 -#define TARGET_NR_get_thread_area 244 -#define TARGET_NR_io_setup 245 -#define TARGET_NR_io_destroy 246 -#define TARGET_NR_io_getevents 247 -#define TARGET_NR_io_submit 248 -#define TARGET_NR_io_cancel 249 -#define TARGET_NR_fadvise64 250 -#define TARGET_NR_exit_group 252 -#define TARGET_NR_lookup_dcookie 253 -#define TARGET_NR_epoll_create 254 -#define TARGET_NR_epoll_ctl 255 -#define TARGET_NR_epoll_wait 256 -#define TARGET_NR_remap_file_pages 257 -#define TARGET_NR_set_tid_address 258 -#define TARGET_NR_timer_create 259 -#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1) -#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2) -#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3) -#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4) -#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5) -#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6) -#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7) -#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8) -#define TARGET_NR_statfs64 268 -#define TARGET_NR_fstatfs64 269 -#define TARGET_NR_tgkill 270 -#define TARGET_NR_utimes 271 -#define TARGET_NR_fadvise64_64 272 -#define TARGET_NR_vserver 273 -#define TARGET_NR_mbind 274 -#define TARGET_NR_get_mempolicy 275 -#define TARGET_NR_set_mempolicy 276 -#define TARGET_NR_mq_open 277 -#define TARGET_NR_mq_unlink (TARGET_NR_mq_open+1) -#define TARGET_NR_mq_timedsend (TARGET_NR_mq_open+2) -#define TARGET_NR_mq_timedreceive (TARGET_NR_mq_open+3) -#define TARGET_NR_mq_notify (TARGET_NR_mq_open+4) -#define TARGET_NR_mq_getsetattr (TARGET_NR_mq_open+5) -#define TARGET_NR_kexec_load 283 -#define TARGET_NR_waitid 284 -/* #define TARGET_NR_sys_setaltroot 285 */ -#define TARGET_NR_add_key 286 -#define TARGET_NR_request_key 287 -#define TARGET_NR_keyctl 288 -#define TARGET_NR_ioprio_set 289 -#define TARGET_NR_ioprio_get 290 -#define TARGET_NR_inotify_init 291 -#define TARGET_NR_inotify_add_watch 292 -#define TARGET_NR_inotify_rm_watch 293 -#define TARGET_NR_migrate_pages 294 -#define TARGET_NR_openat 295 -#define TARGET_NR_mkdirat 296 -#define TARGET_NR_mknodat 297 -#define TARGET_NR_fchownat 298 -#define TARGET_NR_futimesat 299 -#define TARGET_NR_fstatat64 300 -#define TARGET_NR_unlinkat 301 -#define TARGET_NR_renameat 302 -#define TARGET_NR_linkat 303 -#define TARGET_NR_symlinkat 304 -#define TARGET_NR_readlinkat 305 -#define TARGET_NR_fchmodat 306 -#define TARGET_NR_faccessat 307 -#define TARGET_NR_pselect6 308 -#define TARGET_NR_ppoll 309 -#define TARGET_NR_unshare 310 -#define TARGET_NR_set_robust_list 311 -#define TARGET_NR_get_robust_list 312 -#define TARGET_NR_splice 313 -#define TARGET_NR_sync_file_range 314 -#define TARGET_NR_tee 315 -#define TARGET_NR_vmsplice 316 -#define TARGET_NR_move_pages 317 -#define TARGET_NR_getcpu 318 -#define TARGET_NR_epoll_pwait 319 -#define TARGET_NR_utimensat 320 -#define TARGET_NR_signalfd 321 -#define TARGET_NR_timerfd_create 322 -#define TARGET_NR_eventfd 323 -#define TARGET_NR_fallocate 324 -#define TARGET_NR_timerfd_settime 325 -#define TARGET_NR_timerfd_gettime 326 -#define TARGET_NR_signalfd4 327 -#define TARGET_NR_eventfd2 328 -#define TARGET_NR_epoll_create1 329 -#define TARGET_NR_dup3 330 -#define TARGET_NR_pipe2 331 -#define TARGET_NR_inotify_init1 332 -#define TARGET_NR_preadv 333 -#define TARGET_NR_pwritev 334 -#define TARGET_NR_setns 335 -#define TARGET_NR_name_to_handle_at 336 -#define TARGET_NR_open_by_handle_at 337 -#define TARGET_NR_rt_tgsigqueueinfo 338 -#define TARGET_NR_perf_event_open 339 -#define TARGET_NR_recvmmsg 340 -#define TARGET_NR_accept4 341 -#define TARGET_NR_fanotify_init 342 -#define TARGET_NR_fanotify_mark 343 -#define TARGET_NR_prlimit64 344 -#define TARGET_NR_clock_adjtime 345 -#define TARGET_NR_syncfs 346 -#define TARGET_NR_sendmmsg 347 -#define TARGET_NR_process_vm_readv 348 -#define TARGET_NR_process_vm_writev 349 -#define TARGET_NR_kcmp 350 -#define TARGET_NR_finit_module 351 -#define TARGET_NR_sched_setattr 352 -#define TARGET_NR_sched_getattr 353 -#define TARGET_NR_renameat2 354 -#define TARGET_NR_seccomp 355 -#define TARGET_NR_getrandom 356 -#define TARGET_NR_memfd_create 357 -#define TARGET_NR_bpf 358 -#define TARGET_NR_execveat 359 - -#endif diff --git a/linux-user/cris/target_cpu.h b/linux-user/cris/target_cpu.h deleted file mode 100644 index 7f6cade7b65..00000000000 --- a/linux-user/cris/target_cpu.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * CRIS specific CPU ABI and functions for linux-user - * - * Copyright (c) 2007 AXIS Communications AB - * Written by Edgar E. Iglesias - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#ifndef CRIS_TARGET_CPU_H -#define CRIS_TARGET_CPU_H - -static inline void cpu_clone_regs_child(CPUCRISState *env, target_ulong newsp, - unsigned flags) -{ - if (newsp) { - env->regs[14] = newsp; - } - env->regs[10] = 0; -} - -static inline void cpu_clone_regs_parent(CPUCRISState *env, unsigned flags) -{ -} - -static inline void cpu_set_tls(CPUCRISState *env, target_ulong newtls) -{ - env->pregs[PR_PID] = (env->pregs[PR_PID] & 0xff) | newtls; -} - -static inline abi_ulong get_sp_from_cpustate(CPUCRISState *state) -{ - return state->regs[14]; -} -#endif diff --git a/linux-user/cris/target_elf.h b/linux-user/cris/target_elf.h deleted file mode 100644 index 99eb4ec7046..00000000000 --- a/linux-user/cris/target_elf.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ - -#ifndef CRIS_TARGET_ELF_H -#define CRIS_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "any"; -} -#endif diff --git a/linux-user/cris/target_errno_defs.h b/linux-user/cris/target_errno_defs.h deleted file mode 100644 index 1cf43b17a50..00000000000 --- a/linux-user/cris/target_errno_defs.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef CRIS_TARGET_ERRNO_DEFS_H -#define CRIS_TARGET_ERRNO_DEFS_H - -/* Target uses generic errno */ -#include "../generic/target_errno_defs.h" - -#endif diff --git a/linux-user/cris/target_fcntl.h b/linux-user/cris/target_fcntl.h deleted file mode 100644 index df0aceea344..00000000000 --- a/linux-user/cris/target_fcntl.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation, or (at your option) any - * later version. See the COPYING file in the top-level directory. - */ - -#ifndef CRIS_TARGET_FCNTL_H -#define CRIS_TARGET_FCNTL_H -#include "../generic/fcntl.h" -#endif diff --git a/linux-user/cris/target_mman.h b/linux-user/cris/target_mman.h deleted file mode 100644 index 9ace8ac2925..00000000000 --- a/linux-user/cris/target_mman.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * arch/cris/include/asm/processor.h: - * TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) - * - * arch/cris/include/arch-v32/arch/processor.h - * TASK_SIZE 0xb0000000 - */ -#define TASK_UNMAPPED_BASE TARGET_PAGE_ALIGN(0xb0000000 / 3) - -/* arch/cris/include/uapi/asm/elf.h */ -#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE * 2) - -#include "../generic/target_mman.h" diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h deleted file mode 100644 index eb53b31ad55..00000000000 --- a/linux-user/cris/target_prctl.h +++ /dev/null @@ -1 +0,0 @@ -/* No special prctl support required. */ diff --git a/linux-user/cris/target_proc.h b/linux-user/cris/target_proc.h deleted file mode 100644 index 43fe29ca723..00000000000 --- a/linux-user/cris/target_proc.h +++ /dev/null @@ -1 +0,0 @@ -/* No target-specific /proc support */ diff --git a/linux-user/cris/target_resource.h b/linux-user/cris/target_resource.h deleted file mode 100644 index 227259594c0..00000000000 --- a/linux-user/cris/target_resource.h +++ /dev/null @@ -1 +0,0 @@ -#include "../generic/target_resource.h" diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h deleted file mode 100644 index ab0653fcdc0..00000000000 --- a/linux-user/cris/target_signal.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef CRIS_TARGET_SIGNAL_H -#define CRIS_TARGET_SIGNAL_H - -#include "../generic/signal.h" - -#define TARGET_ARCH_HAS_SETUP_FRAME -#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 - -#endif /* CRIS_TARGET_SIGNAL_H */ diff --git a/linux-user/cris/target_structs.h b/linux-user/cris/target_structs.h deleted file mode 100644 index 3a06f373c35..00000000000 --- a/linux-user/cris/target_structs.h +++ /dev/null @@ -1 +0,0 @@ -#include "../generic/target_structs.h" diff --git a/linux-user/cris/target_syscall.h b/linux-user/cris/target_syscall.h deleted file mode 100644 index 0b5ebf1f028..00000000000 --- a/linux-user/cris/target_syscall.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef CRIS_TARGET_SYSCALL_H -#define CRIS_TARGET_SYSCALL_H - -#define UNAME_MACHINE "cris" -#define UNAME_MINIMUM_RELEASE "2.6.32" - -/* pt_regs not only specifies the format in the user-struct during - * ptrace but is also the frame format used in the kernel prologue/epilogues - * themselves - */ - -struct target_pt_regs { - unsigned long orig_r10; - /* pushed by movem r13, [sp] in SAVE_ALL. */ - unsigned long r0; - unsigned long r1; - unsigned long r2; - unsigned long r3; - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long acr; - unsigned long srs; - unsigned long mof; - unsigned long spc; - unsigned long ccs; - unsigned long srp; - unsigned long erp; /* This is actually the debugged process's PC */ - /* For debugging purposes; saved only when needed. */ - unsigned long exs; - unsigned long eda; -}; - -#define TARGET_CLONE_BACKWARDS2 -#define TARGET_MCL_CURRENT 1 -#define TARGET_MCL_FUTURE 2 -#define TARGET_MCL_ONFAULT 4 - -#endif diff --git a/linux-user/cris/termbits.h b/linux-user/cris/termbits.h deleted file mode 100644 index 0c8d8fc0516..00000000000 --- a/linux-user/cris/termbits.h +++ /dev/null @@ -1,225 +0,0 @@ -/* from asm/termbits.h */ - -#ifndef LINUX_USER_CRIS_TERMBITS_H -#define LINUX_USER_CRIS_TERMBITS_H - -#define TARGET_NCCS 19 - -typedef unsigned char target_cc_t; /* cc_t */ -typedef unsigned int target_speed_t; /* speed_t */ -typedef unsigned int target_tcflag_t; /* tcflag_t */ - -struct target_termios { - target_tcflag_t c_iflag; /* input mode flags */ - target_tcflag_t c_oflag; /* output mode flags */ - target_tcflag_t c_cflag; /* control mode flags */ - target_tcflag_t c_lflag; /* local mode flags */ - target_cc_t c_line; /* line discipline */ - target_cc_t c_cc[TARGET_NCCS]; /* control characters */ -}; - -/* c_iflag bits */ -#define TARGET_IGNBRK 0000001 -#define TARGET_BRKINT 0000002 -#define TARGET_IGNPAR 0000004 -#define TARGET_PARMRK 0000010 -#define TARGET_INPCK 0000020 -#define TARGET_ISTRIP 0000040 -#define TARGET_INLCR 0000100 -#define TARGET_IGNCR 0000200 -#define TARGET_ICRNL 0000400 -#define TARGET_IUCLC 0001000 -#define TARGET_IXON 0002000 -#define TARGET_IXANY 0004000 -#define TARGET_IXOFF 0010000 -#define TARGET_IMAXBEL 0020000 -#define TARGET_IUTF8 0040000 - -/* c_oflag bits */ -#define TARGET_OPOST 0000001 -#define TARGET_OLCUC 0000002 -#define TARGET_ONLCR 0000004 -#define TARGET_OCRNL 0000010 -#define TARGET_ONOCR 0000020 -#define TARGET_ONLRET 0000040 -#define TARGET_OFILL 0000100 -#define TARGET_OFDEL 0000200 -#define TARGET_NLDLY 0000400 -#define TARGET_NL0 0000000 -#define TARGET_NL1 0000400 -#define TARGET_CRDLY 0003000 -#define TARGET_CR0 0000000 -#define TARGET_CR1 0001000 -#define TARGET_CR2 0002000 -#define TARGET_CR3 0003000 -#define TARGET_TABDLY 0014000 -#define TARGET_TAB0 0000000 -#define TARGET_TAB1 0004000 -#define TARGET_TAB2 0010000 -#define TARGET_TAB3 0014000 -#define TARGET_XTABS 0014000 -#define TARGET_BSDLY 0020000 -#define TARGET_BS0 0000000 -#define TARGET_BS1 0020000 -#define TARGET_VTDLY 0040000 -#define TARGET_VT0 0000000 -#define TARGET_VT1 0040000 -#define TARGET_FFDLY 0100000 -#define TARGET_FF0 0000000 -#define TARGET_FF1 0100000 - -/* c_cflag bit meaning */ -#define TARGET_CBAUD 0010017 -#define TARGET_B0 0000000 /* hang up */ -#define TARGET_B50 0000001 -#define TARGET_B75 0000002 -#define TARGET_B110 0000003 -#define TARGET_B134 0000004 -#define TARGET_B150 0000005 -#define TARGET_B200 0000006 -#define TARGET_B300 0000007 -#define TARGET_B600 0000010 -#define TARGET_B1200 0000011 -#define TARGET_B1800 0000012 -#define TARGET_B2400 0000013 -#define TARGET_B4800 0000014 -#define TARGET_B9600 0000015 -#define TARGET_B19200 0000016 -#define TARGET_B38400 0000017 -#define TARGET_EXTA B19200 -#define TARGET_EXTB B38400 -#define TARGET_CSIZE 0000060 -#define TARGET_CS5 0000000 -#define TARGET_CS6 0000020 -#define TARGET_CS7 0000040 -#define TARGET_CS8 0000060 -#define TARGET_CSTOPB 0000100 -#define TARGET_CREAD 0000200 -#define TARGET_PARENB 0000400 -#define TARGET_PARODD 0001000 -#define TARGET_HUPCL 0002000 -#define TARGET_CLOCAL 0004000 -#define TARGET_CBAUDEX 0010000 -#define TARGET_B57600 0010001 -#define TARGET_B115200 0010002 -#define TARGET_B230400 0010003 -#define TARGET_B460800 0010004 -#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */ -#define TARGET_CRTSCTS 020000000000 /* flow control */ - -/* c_lflag bits */ -#define TARGET_ISIG 0000001 -#define TARGET_ICANON 0000002 -#define TARGET_XCASE 0000004 -#define TARGET_ECHO 0000010 -#define TARGET_ECHOE 0000020 -#define TARGET_ECHOK 0000040 -#define TARGET_ECHONL 0000100 -#define TARGET_NOFLSH 0000200 -#define TARGET_TOSTOP 0000400 -#define TARGET_ECHOCTL 0001000 -#define TARGET_ECHOPRT 0002000 -#define TARGET_ECHOKE 0004000 -#define TARGET_FLUSHO 0010000 -#define TARGET_PENDIN 0040000 -#define TARGET_IEXTEN 0100000 -#define TARGET_EXTPROC 0200000 - -/* c_cc character offsets */ -#define TARGET_VINTR 0 -#define TARGET_VQUIT 1 -#define TARGET_VERASE 2 -#define TARGET_VKILL 3 -#define TARGET_VEOF 4 -#define TARGET_VTIME 5 -#define TARGET_VMIN 6 -#define TARGET_VSWTC 7 -#define TARGET_VSTART 8 -#define TARGET_VSTOP 9 -#define TARGET_VSUSP 10 -#define TARGET_VEOL 11 -#define TARGET_VREPRINT 12 -#define TARGET_VDISCARD 13 -#define TARGET_VWERASE 14 -#define TARGET_VLNEXT 15 -#define TARGET_VEOL2 16 - -/* ioctls */ - -#define TARGET_TCGETS 0x5401 -#define TARGET_TCSETS 0x5402 -#define TARGET_TCSETSW 0x5403 -#define TARGET_TCSETSF 0x5404 -#define TARGET_TCGETA 0x5405 -#define TARGET_TCSETA 0x5406 -#define TARGET_TCSETAW 0x5407 -#define TARGET_TCSETAF 0x5408 -#define TARGET_TCSBRK 0x5409 -#define TARGET_TCXONC 0x540A -#define TARGET_TCFLSH 0x540B - -#define TARGET_TIOCEXCL 0x540C -#define TARGET_TIOCNXCL 0x540D -#define TARGET_TIOCSCTTY 0x540E -#define TARGET_TIOCGPGRP 0x540F -#define TARGET_TIOCSPGRP 0x5410 -#define TARGET_TIOCOUTQ 0x5411 -#define TARGET_TIOCSTI 0x5412 -#define TARGET_TIOCGWINSZ 0x5413 -#define TARGET_TIOCSWINSZ 0x5414 -#define TARGET_TIOCMGET 0x5415 -#define TARGET_TIOCMBIS 0x5416 -#define TARGET_TIOCMBIC 0x5417 -#define TARGET_TIOCMSET 0x5418 -#define TARGET_TIOCGSOFTCAR 0x5419 -#define TARGET_TIOCSSOFTCAR 0x541A -#define TARGET_FIONREAD 0x541B -#define TARGET_TIOCINQ TARGET_FIONREAD -#define TARGET_TIOCLINUX 0x541C -#define TARGET_TIOCCONS 0x541D -#define TARGET_TIOCGSERIAL 0x541E -#define TARGET_TIOCSSERIAL 0x541F -#define TARGET_TIOCPKT 0x5420 -#define TARGET_FIONBIO 0x5421 -#define TARGET_TIOCNOTTY 0x5422 -#define TARGET_TIOCSETD 0x5423 -#define TARGET_TIOCGETD 0x5424 -#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */ -#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ -#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ -#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TARGET_TIOCGPTN TARGET_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TARGET_TIOCSPTLCK TARGET_IOW('T',0x31, int) /* Lock/unlock Pty */ -#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) /* Safely open the slave */ - -#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */ -#define TARGET_FIOCLEX 0x5451 -#define TARGET_FIOASYNC 0x5452 -#define TARGET_TIOCSERCONFIG 0x5453 -#define TARGET_TIOCSERGWILD 0x5454 -#define TARGET_TIOCSERSWILD 0x5455 -#define TARGET_TIOCGLCKTRMIOS 0x5456 -#define TARGET_TIOCSLCKTRMIOS 0x5457 -#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ - -/* Used for packet mode */ -#define TARGET_TIOCPKT_DATA 0 -#define TARGET_TIOCPKT_FLUSHREAD 1 -#define TARGET_TIOCPKT_FLUSHWRITE 2 -#define TARGET_TIOCPKT_STOP 4 -#define TARGET_TIOCPKT_START 8 -#define TARGET_TIOCPKT_NOSTOP 16 -#define TARGET_TIOCPKT_DOSTOP 32 - -#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -#endif diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 0d4dc1f6d15..ea214105ff8 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -8,7 +8,11 @@ #include "qemu.h" #include "user/tswap-target.h" +#include "user/page-protection.h" #include "exec/page-protection.h" +#include "exec/mmap-lock.h" +#include "exec/translation-block.h" +#include "exec/tswap.h" #include "user/guest-base.h" #include "user-internals.h" #include "signal-common.h" @@ -203,7 +207,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en (*regs)[12] = tswapreg(env->regs[R_EDX]); (*regs)[13] = tswapreg(env->regs[R_ESI]); (*regs)[14] = tswapreg(env->regs[R_EDI]); - (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ + (*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax); (*regs)[16] = tswapreg(env->eip); (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); (*regs)[18] = tswapreg(env->eflags); @@ -306,7 +310,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); - (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ + (*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax); (*regs)[12] = tswapreg(env->eip); (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); (*regs)[14] = tswapreg(env->eflags); @@ -659,6 +663,23 @@ static const char *get_elf_platform(void) #undef END } +#if TARGET_BIG_ENDIAN +#include "elf.h" +#include "vdso-be8.c.inc" +#include "vdso-be32.c.inc" + +static const VdsoImageInfo *vdso_image_info(uint32_t elf_flags) +{ + return (EF_ARM_EABI_VERSION(elf_flags) >= EF_ARM_EABI_VER4 + && (elf_flags & EF_ARM_BE8) + ? &vdso_be8_image_info + : &vdso_be32_image_info); +} +#define vdso_image_info vdso_image_info +#else +# define VDSO_HEADER "vdso-le.c.inc" +#endif + #else /* 64 bit ARM definitions */ @@ -730,7 +751,23 @@ enum { ARM_HWCAP_A64_SSBS = 1 << 28, ARM_HWCAP_A64_SB = 1 << 29, ARM_HWCAP_A64_PACA = 1 << 30, - ARM_HWCAP_A64_PACG = 1UL << 31, + ARM_HWCAP_A64_PACG = 1ULL << 31, + ARM_HWCAP_A64_GCS = 1ULL << 32, + ARM_HWCAP_A64_CMPBR = 1ULL << 33, + ARM_HWCAP_A64_FPRCVT = 1ULL << 34, + ARM_HWCAP_A64_F8MM8 = 1ULL << 35, + ARM_HWCAP_A64_F8MM4 = 1ULL << 36, + ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37, + ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38, + ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39, + ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40, + ARM_HWCAP_A64_SVE2P2 = 1ULL << 41, + ARM_HWCAP_A64_SME2P2 = 1ULL << 42, + ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43, + ARM_HWCAP_A64_SME_AES = 1ULL << 44, + ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45, + ARM_HWCAP_A64_SME_STMOP = 1ULL << 46, + ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47, ARM_HWCAP2_A64_DCPODP = 1 << 0, ARM_HWCAP2_A64_SVE2 = 1 << 1, @@ -777,6 +814,25 @@ enum { ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, ARM_HWCAP2_A64_MOPS = 1ULL << 43, ARM_HWCAP2_A64_HBC = 1ULL << 44, + ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45, + ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46, + ARM_HWCAP2_A64_LSE128 = 1ULL << 47, + ARM_HWCAP2_A64_FPMR = 1ULL << 48, + ARM_HWCAP2_A64_LUT = 1ULL << 49, + ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50, + ARM_HWCAP2_A64_F8CVT = 1ULL << 51, + ARM_HWCAP2_A64_F8FMA = 1ULL << 52, + ARM_HWCAP2_A64_F8DP4 = 1ULL << 53, + ARM_HWCAP2_A64_F8DP2 = 1ULL << 54, + ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55, + ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56, + ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57, + ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58, + ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59, + ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60, + ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61, + ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62, + ARM_HWCAP2_A64_POE = 1ULL << 63, }; #define ELF_HWCAP get_elf_hwcap() @@ -859,13 +915,21 @@ uint64_t get_elf_hwcap2(void) GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); + GET_FEATURE_ID(aa64_sve2p1, ARM_HWCAP2_A64_SVE2P1); + GET_FEATURE_ID(aa64_sme2, (ARM_HWCAP2_A64_SME2 | + ARM_HWCAP2_A64_SME_I16I32 | + ARM_HWCAP2_A64_SME_BI32I32)); + GET_FEATURE_ID(aa64_sme2p1, ARM_HWCAP2_A64_SME2P1); + GET_FEATURE_ID(aa64_sme_b16b16, ARM_HWCAP2_A64_SME_B16B16); + GET_FEATURE_ID(aa64_sme_f16f16, ARM_HWCAP2_A64_SME_F16F16); + GET_FEATURE_ID(aa64_sve_b16b16, ARM_HWCAP2_A64_SVE_B16B16); return hwcaps; } const char *elf_hwcap_str(uint32_t bit) { - static const char *hwcap_str[] = { + static const char * const hwcap_str[] = { [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", @@ -898,6 +962,22 @@ const char *elf_hwcap_str(uint32_t bit) [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", + [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs", + [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr", + [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt", + [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8", + [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale", + [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2", + [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm", + [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa", + [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4", }; return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; @@ -905,7 +985,7 @@ const char *elf_hwcap_str(uint32_t bit) const char *elf_hwcap2_str(uint32_t bit) { - static const char *hwcap_str[] = { + static const char * const hwcap_str[] = { [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", @@ -951,6 +1031,24 @@ const char *elf_hwcap2_str(uint32_t bit) [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", + [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16", + [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3", + [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128", + [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr", + [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut", + [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax", + [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt", + [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma", + [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4", + [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2", + [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3", + [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2", + [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe", }; return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; @@ -958,14 +1056,14 @@ const char *elf_hwcap2_str(uint32_t bit) #undef GET_FEATURE_ID -#endif /* not TARGET_AARCH64 */ - #if TARGET_BIG_ENDIAN # define VDSO_HEADER "vdso-be.c.inc" #else # define VDSO_HEADER "vdso-le.c.inc" #endif +#endif /* not TARGET_AARCH64 */ + #endif /* TARGET_ARM */ #ifdef TARGET_SPARC @@ -1647,21 +1745,6 @@ static uint32_t get_elf_hwcap(void) #endif -#ifdef TARGET_CRIS - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_CRIS - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->erp = infop->entry; -} - -#define ELF_EXEC_PAGESIZE 8192 - -#endif - #ifdef TARGET_M68K #define ELF_CLASS ELFCLASS32 @@ -2117,9 +2200,12 @@ static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) memcpy(to, from, n); } -#ifdef BSWAP_NEEDED static void bswap_ehdr(struct elfhdr *ehdr) { + if (!target_needs_bswap()) { + return; + } + bswap16s(&ehdr->e_type); /* Object file type */ bswap16s(&ehdr->e_machine); /* Architecture */ bswap32s(&ehdr->e_version); /* Object file version */ @@ -2137,8 +2223,11 @@ static void bswap_ehdr(struct elfhdr *ehdr) static void bswap_phdr(struct elf_phdr *phdr, int phnum) { - int i; - for (i = 0; i < phnum; ++i, ++phdr) { + if (!target_needs_bswap()) { + return; + } + + for (int i = 0; i < phnum; ++i, ++phdr) { bswap32s(&phdr->p_type); /* Segment type */ bswap32s(&phdr->p_flags); /* Segment flags */ bswaptls(&phdr->p_offset); /* Segment file offset */ @@ -2152,8 +2241,11 @@ static void bswap_phdr(struct elf_phdr *phdr, int phnum) static void bswap_shdr(struct elf_shdr *shdr, int shnum) { - int i; - for (i = 0; i < shnum; ++i, ++shdr) { + if (!target_needs_bswap()) { + return; + } + + for (int i = 0; i < shnum; ++i, ++shdr) { bswap32s(&shdr->sh_name); bswap32s(&shdr->sh_type); bswaptls(&shdr->sh_flags); @@ -2169,6 +2261,10 @@ static void bswap_shdr(struct elf_shdr *shdr, int shnum) static void bswap_sym(struct elf_sym *sym) { + if (!target_needs_bswap()) { + return; + } + bswap32s(&sym->st_name); bswaptls(&sym->st_value); bswaptls(&sym->st_size); @@ -2178,6 +2274,10 @@ static void bswap_sym(struct elf_sym *sym) #ifdef TARGET_MIPS static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { + if (!target_needs_bswap()) { + return; + } + bswap16s(&abiflags->version); bswap32s(&abiflags->ases); bswap32s(&abiflags->isa_ext); @@ -2185,15 +2285,6 @@ static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) bswap32s(&abiflags->flags2); } #endif -#else -static inline void bswap_ehdr(struct elfhdr *ehdr) { } -static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } -static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } -static inline void bswap_sym(struct elf_sym *sym) { } -#ifdef TARGET_MIPS -static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } -#endif -#endif #ifdef USE_ELF_CORE_DUMP static int elf_core_dump(int, const CPUArchState *); @@ -2913,7 +3004,7 @@ static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, uintptr_t align, uintptr_t brk) { - uintptr_t last = mmap_min_addr; + uintptr_t last = sizeof(uintptr_t) == 4 ? MiB : GiB; uintptr_t base, skip; while (true) { @@ -3136,29 +3227,29 @@ static bool parse_elf_properties(const ImageSource *src, } /* - * The contents of a valid PT_GNU_PROPERTY is a sequence - * of uint32_t -- swap them all now. + * The contents of a valid PT_GNU_PROPERTY is a sequence of uint32_t. + * Swap most of them now, beyond the header and namesz. */ -#ifdef BSWAP_NEEDED - for (int i = 0; i < n / 4; i++) { - bswap32s(note.data + i); + if (target_needs_bswap()) { + for (int i = 4; i < n / 4; i++) { + bswap32s(note.data + i); + } } -#endif /* * Note that nhdr is 3 words, and that the "name" described by namesz * immediately follows nhdr and is thus at the 4th word. Further, all * of the inputs to the kernel's round_up are multiples of 4. */ - if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || - note.nhdr.n_namesz != NOTE_NAME_SZ || + if (tswap32(note.nhdr.n_type) != NT_GNU_PROPERTY_TYPE_0 || + tswap32(note.nhdr.n_namesz) != NOTE_NAME_SZ || note.data[3] != GNU0_MAGIC) { error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); return false; } off = sizeof(note.nhdr) + NOTE_NAME_SZ; - datasz = note.nhdr.n_descsz + off; + datasz = tswap32(note.nhdr.n_descsz) + off; if (datasz > n) { error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); return false; @@ -3194,7 +3285,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src, char **pinterp_name) { g_autofree struct elf_phdr *phdr = NULL; - abi_ulong load_addr, load_bias, loaddr, hiaddr, error; + abi_ulong load_addr, load_bias, loaddr, hiaddr, error, align; + size_t reserve_size, align_size; int i, prot_exec; Error *err = NULL; @@ -3234,7 +3326,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * amount of memory to handle that. Locate the interpreter, if any. */ loaddr = -1, hiaddr = 0; - info->alignment = 0; + align = 0; info->exec_stack = EXSTACK_DEFAULT; for (i = 0; i < ehdr->e_phnum; ++i) { struct elf_phdr *eppnt = phdr + i; @@ -3248,7 +3340,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, hiaddr = a; } ++info->nsegs; - info->alignment |= eppnt->p_align; + align |= eppnt->p_align; } else if (eppnt->p_type == PT_INTERP && pinterp_name) { g_autofree char *interp_name = NULL; @@ -3278,6 +3370,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src, load_addr = loaddr; + align = pow2ceil(align); + if (pinterp_name != NULL) { if (ehdr->e_type == ET_EXEC) { /* @@ -3286,8 +3380,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src, */ probe_guest_base(image_name, loaddr, hiaddr); } else { - abi_ulong align; - /* * The binary is dynamic, but we still need to * select guest_base. In this case we pass a size. @@ -3305,10 +3397,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * Since we do not have complete control over the guest * address space, we prefer the kernel to choose some address * rather than force the use of LOAD_ADDR via MAP_FIXED. - * But without MAP_FIXED we cannot guarantee alignment, - * only suggest it. */ - align = pow2ceil(info->alignment); if (align) { load_addr &= -align; } @@ -3332,13 +3421,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src, * In both cases, we will overwrite pages in this range with mappings * from the executable. */ - load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, + reserve_size = (size_t)hiaddr - loaddr + 1; + align_size = reserve_size; + + if (ehdr->e_type != ET_EXEC && align > qemu_real_host_page_size()) { + align_size += align - 1; + } + + load_addr = target_mmap(load_addr, align_size, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), -1, 0); if (load_addr == -1) { goto exit_mmap; } + + if (align_size != reserve_size) { + abi_ulong align_addr = ROUND_UP(load_addr, align); + abi_ulong align_end = TARGET_PAGE_ALIGN(align_addr + reserve_size); + abi_ulong load_end = TARGET_PAGE_ALIGN(load_addr + align_size); + + if (align_addr != load_addr) { + target_munmap(load_addr, align_addr - load_addr); + } + if (align_end != load_end) { + target_munmap(align_end, load_end - align_end); + } + load_addr = align_addr; + } + load_bias = load_addr - loaddr; if (elf_is_fdpic(ehdr)) { @@ -3519,12 +3630,14 @@ static void load_elf_interp(const char *filename, struct image_info *info, load_elf_image(filename, &src, info, &ehdr, NULL); } +#ifndef vdso_image_info #ifdef VDSO_HEADER #include VDSO_HEADER -#define vdso_image_info() &vdso_image_info +#define vdso_image_info(flags) &vdso_image_info #else -#define vdso_image_info() NULL -#endif +#define vdso_image_info(flags) NULL +#endif /* VDSO_HEADER */ +#endif /* vdso_image_info */ static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) { @@ -3855,7 +3968,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) * Load a vdso if available, which will amongst other things contain the * signal trampolines. Otherwise, allocate a separate page for them. */ - const VdsoImageInfo *vdso = vdso_image_info(); + const VdsoImageInfo *vdso = vdso_image_info(info->elf_flags); if (vdso) { load_elf_vdso(&vdso_info, vdso); info->vdso = vdso_info.load_bias; @@ -3894,7 +4007,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) } #ifdef USE_ELF_CORE_DUMP -#include "exec/translate-all.h" /* * Definitions to generate Intel SVR4-like core files. @@ -3974,9 +4086,12 @@ struct target_elf_prpsinfo { char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; -#ifdef BSWAP_NEEDED static void bswap_prstatus(struct target_elf_prstatus *prstatus) { + if (!target_needs_bswap()) { + return; + } + prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); @@ -3994,6 +4109,10 @@ static void bswap_prstatus(struct target_elf_prstatus *prstatus) static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) { + if (!target_needs_bswap()) { + return; + } + psinfo->pr_flag = tswapal(psinfo->pr_flag); psinfo->pr_uid = tswap16(psinfo->pr_uid); psinfo->pr_gid = tswap16(psinfo->pr_gid); @@ -4005,21 +4124,19 @@ static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) static void bswap_note(struct elf_note *en) { + if (!target_needs_bswap()) { + return; + } + bswap32s(&en->n_namesz); bswap32s(&en->n_descsz); bswap32s(&en->n_type); } -#else -static inline void bswap_prstatus(struct target_elf_prstatus *p) { } -static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} -static inline void bswap_note(struct elf_note *en) { } -#endif /* BSWAP_NEEDED */ /* * Calculate file (dump) size of given memory region. */ -static size_t vma_dump_size(target_ulong start, target_ulong end, - unsigned long flags) +static size_t vma_dump_size(vaddr start, vaddr end, int flags) { /* The area must be readable. */ if (!(flags & PAGE_READ)) { @@ -4102,8 +4219,7 @@ static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) bswap_phdr(phdr, 1); } -static void fill_prstatus_note(void *data, const TaskState *ts, - CPUState *cpu, int signr) +static void fill_prstatus_note(void *data, CPUState *cpu, int signr) { /* * Because note memory is only aligned to 4, and target_elf_prstatus @@ -4113,7 +4229,7 @@ static void fill_prstatus_note(void *data, const TaskState *ts, struct target_elf_prstatus prstatus = { .pr_info.si_signo = signr, .pr_cursig = signr, - .pr_pid = ts->ts_tid, + .pr_pid = get_task_state(cpu)->ts_tid, .pr_ppid = getppid(), .pr_pgrp = getpgrp(), .pr_sid = getsid(0), @@ -4213,14 +4329,14 @@ static int dump_write(int fd, const void *ptr, size_t size) return (0); } -static int wmr_page_unprotect_regions(void *opaque, target_ulong start, - target_ulong end, unsigned long flags) +static int wmr_page_unprotect_regions(void *opaque, vaddr start, + vaddr end, int flags) { if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); while (1) { - page_unprotect(start, 0); + page_unprotect(NULL, start, 0); if (end - start <= step) { break; } @@ -4235,8 +4351,8 @@ typedef struct { size_t size; } CountAndSizeRegions; -static int wmr_count_and_size_regions(void *opaque, target_ulong start, - target_ulong end, unsigned long flags) +static int wmr_count_and_size_regions(void *opaque, vaddr start, + vaddr end, int flags) { CountAndSizeRegions *css = opaque; @@ -4250,8 +4366,8 @@ typedef struct { off_t offset; } FillRegionPhdr; -static int wmr_fill_region_phdr(void *opaque, target_ulong start, - target_ulong end, unsigned long flags) +static int wmr_fill_region_phdr(void *opaque, vaddr start, + vaddr end, int flags) { FillRegionPhdr *d = opaque; struct elf_phdr *phdr = d->phdr; @@ -4273,8 +4389,8 @@ static int wmr_fill_region_phdr(void *opaque, target_ulong start, return 0; } -static int wmr_write_region(void *opaque, target_ulong start, - target_ulong end, unsigned long flags) +static int wmr_write_region(void *opaque, vaddr start, + vaddr end, int flags) { int fd = *(int *)opaque; size_t size = vma_dump_size(start, end, flags); @@ -4330,7 +4446,7 @@ static int wmr_write_region(void *opaque, target_ulong start, */ static int elf_core_dump(int signr, const CPUArchState *env) { - const CPUState *cpu = env_cpu((CPUArchState *)env); + const CPUState *cpu = env_cpu_const(env); const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); struct rlimit dumpsize; CountAndSizeRegions css; @@ -4428,8 +4544,7 @@ static int elf_core_dump(int signr, const CPUArchState *env) CPU_FOREACH(cpu_iter) { dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", sizeof(struct target_elf_prstatus)); - fill_prstatus_note(dptr, ts, cpu_iter, - cpu_iter == cpu ? signr : 0); + fill_prstatus_note(dptr, cpu_iter, cpu_iter == cpu ? signr : 0); } if (dump_write(fd, header, data_offset) < 0) { diff --git a/linux-user/fd-trans.c b/linux-user/fd-trans.c index c04a97c73a3..f83d1f79d51 100644 --- a/linux-user/fd-trans.c +++ b/linux-user/fd-trans.c @@ -25,12 +25,32 @@ #ifdef CONFIG_RTNETLINK #include #include +#include #endif #include "qemu.h" #include "user-internals.h" #include "fd-trans.h" #include "signal-common.h" +#define NDM_RTA(r) ((struct rtattr*)(((char*)(r)) + \ + NLMSG_ALIGN(sizeof(struct ndmsg)))) + +enum { + QEMU_IFA_UNSPEC, + QEMU_IFA_ADDRESS, + QEMU_IFA_LOCAL, + QEMU_IFA_LABEL, + QEMU_IFA_BROADCAST, + QEMU_IFA_ANYCAST, + QEMU_IFA_CACHEINFO, + QEMU_IFA_MULTICAST, + QEMU_IFA_FLAGS, + QEMU_IFA_RT_PRIORITY, + QEMU_IFA_TARGET_NETNSID, + QEMU_IFA_PROTO, + QEMU__IFA__MAX, +}; + enum { QEMU_IFLA_BR_UNSPEC, QEMU_IFLA_BR_FORWARD_DELAY, @@ -141,6 +161,14 @@ enum { QEMU_IFLA_PROTO_DOWN_REASON, QEMU_IFLA_PARENT_DEV_NAME, QEMU_IFLA_PARENT_DEV_BUS_NAME, + QEMU_IFLA_GRO_MAX_SIZE, + QEMU_IFLA_TSO_MAX_SIZE, + QEMU_IFLA_TSO_MAX_SEGS, + QEMU_IFLA_ALLMULTI, + QEMU_IFLA_DEVLINK_PORT, + QEMU_IFLA_GSO_IPV4_MAX_SIZE, + QEMU_IFLA_GRO_IPV4_MAX_SIZE, + QEMU_IFLA_DPLL_PIN, QEMU___IFLA_MAX }; @@ -982,6 +1010,22 @@ static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr, return 0; } +static abi_long host_to_target_data_prop_nlattr(struct nlattr *nlattr, + void *context) +{ + switch (nlattr->nla_type) { + /* string */ + case QEMU_IFLA_ALT_IFNAME: + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown host PROP type: %d\n", + nlattr->nla_type); + break; + } + return 0; +} + + static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) { uint32_t *u32; @@ -990,7 +1034,7 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) struct rtnl_link_ifmap *map; struct linkinfo_context li_context; - switch (rtattr->rta_type) { + switch (rtattr->rta_type & NLA_TYPE_MASK) { /* binary stream */ case QEMU_IFLA_ADDRESS: case QEMU_IFLA_BROADCAST: @@ -1028,6 +1072,12 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) case QEMU_IFLA_CARRIER_DOWN_COUNT: case QEMU_IFLA_MIN_MTU: case QEMU_IFLA_MAX_MTU: + case QEMU_IFLA_GRO_MAX_SIZE: + case QEMU_IFLA_TSO_MAX_SIZE: + case QEMU_IFLA_TSO_MAX_SEGS: + case QEMU_IFLA_ALLMULTI: + case QEMU_IFLA_GSO_IPV4_MAX_SIZE: + case QEMU_IFLA_GRO_IPV4_MAX_SIZE: u32 = RTA_DATA(rtattr); *u32 = tswap32(*u32); break; @@ -1123,6 +1173,10 @@ static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr) return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, NULL, host_to_target_data_vfinfo_nlattr); + case QEMU_IFLA_PROP_LIST: + return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len, + NULL, + host_to_target_data_prop_nlattr); default: qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type); @@ -1138,20 +1192,21 @@ static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr) switch (rtattr->rta_type) { /* binary: depends on family type */ - case IFA_ADDRESS: - case IFA_LOCAL: + case QEMU_IFA_ADDRESS: + case QEMU_IFA_LOCAL: + case QEMU_IFA_PROTO: break; /* string */ - case IFA_LABEL: + case QEMU_IFA_LABEL: break; /* u32 */ - case IFA_FLAGS: - case IFA_BROADCAST: + case QEMU_IFA_FLAGS: + case QEMU_IFA_BROADCAST: u32 = RTA_DATA(rtattr); *u32 = tswap32(*u32); break; /* struct ifa_cacheinfo */ - case IFA_CACHEINFO: + case QEMU_IFA_CACHEINFO: ci = RTA_DATA(rtattr); ci->ifa_prefered = tswap32(ci->ifa_prefered); ci->ifa_valid = tswap32(ci->ifa_valid); @@ -1209,6 +1264,35 @@ static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr) return 0; } +static abi_long host_to_target_data_neigh_rtattr(struct rtattr *rtattr) +{ + struct nda_cacheinfo *ndac; + uint32_t *u32; + + switch (rtattr->rta_type) { + case NDA_UNSPEC: + case NDA_DST: + case NDA_LLADDR: + break; + case NDA_PROBES: + u32 = RTA_DATA(rtattr); + *u32 = tswap32(*u32); + break; + case NDA_CACHEINFO: + ndac = RTA_DATA(rtattr); + ndac->ndm_confirmed = tswap32(ndac->ndm_confirmed); + ndac->ndm_used = tswap32(ndac->ndm_used); + ndac->ndm_updated = tswap32(ndac->ndm_updated); + ndac->ndm_refcnt = tswap32(ndac->ndm_refcnt); + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown host to target NEIGH type: %d\n", + rtattr->rta_type); + break; + } + return 0; +} + static abi_long host_to_target_link_rtattr(struct rtattr *rtattr, uint32_t rtattr_len) { @@ -1230,12 +1314,20 @@ static abi_long host_to_target_route_rtattr(struct rtattr *rtattr, host_to_target_data_route_rtattr); } +static abi_long host_to_target_neigh_rtattr(struct rtattr *rtattr, + uint32_t rtattr_len) +{ + return host_to_target_for_each_rtattr(rtattr, rtattr_len, + host_to_target_data_neigh_rtattr); +} + static abi_long host_to_target_data_route(struct nlmsghdr *nlh) { uint32_t nlmsg_len; struct ifinfomsg *ifi; struct ifaddrmsg *ifa; struct rtmsg *rtm; + struct ndmsg *ndm; nlmsg_len = nlh->nlmsg_len; switch (nlh->nlmsg_type) { @@ -1262,6 +1354,17 @@ static abi_long host_to_target_data_route(struct nlmsghdr *nlh) nlmsg_len - NLMSG_LENGTH(sizeof(*ifa))); } break; + case RTM_NEWNEIGH: + case RTM_DELNEIGH: + case RTM_GETNEIGH: + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ndm))) { + ndm = NLMSG_DATA(nlh); + ndm->ndm_ifindex = tswap32(ndm->ndm_ifindex); + ndm->ndm_state = tswap16(ndm->ndm_state); + host_to_target_neigh_rtattr(NDM_RTA(ndm), + nlmsg_len - NLMSG_LENGTH(sizeof(*ndm))); + } + break; case RTM_NEWROUTE: case RTM_DELROUTE: case RTM_GETROUTE: @@ -1398,8 +1501,8 @@ static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) { switch (rtattr->rta_type) { /* binary: depends on family type */ - case IFA_LOCAL: - case IFA_ADDRESS: + case QEMU_IFA_LOCAL: + case QEMU_IFA_ADDRESS: break; default: qemu_log_mask(LOG_UNIMP, "Unknown target IFA type: %d\n", @@ -1409,6 +1512,35 @@ static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr) return 0; } +static abi_long target_to_host_data_neigh_rtattr(struct rtattr *rtattr) +{ + struct nda_cacheinfo *ndac; + uint32_t *u32; + + switch (rtattr->rta_type) { + case NDA_UNSPEC: + case NDA_DST: + case NDA_LLADDR: + break; + case NDA_PROBES: + u32 = RTA_DATA(rtattr); + *u32 = tswap32(*u32); + break; + case NDA_CACHEINFO: + ndac = RTA_DATA(rtattr); + ndac->ndm_confirmed = tswap32(ndac->ndm_confirmed); + ndac->ndm_used = tswap32(ndac->ndm_used); + ndac->ndm_updated = tswap32(ndac->ndm_updated); + ndac->ndm_refcnt = tswap32(ndac->ndm_refcnt); + break; + default: + qemu_log_mask(LOG_UNIMP, "Unknown target NEIGH type: %d\n", + rtattr->rta_type); + break; + } + return 0; +} + static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr) { uint32_t *u32; @@ -1447,6 +1579,13 @@ static void target_to_host_addr_rtattr(struct rtattr *rtattr, target_to_host_data_addr_rtattr); } +static void target_to_host_neigh_rtattr(struct rtattr *rtattr, + uint32_t rtattr_len) +{ + target_to_host_for_each_rtattr(rtattr, rtattr_len, + target_to_host_data_neigh_rtattr); +} + static void target_to_host_route_rtattr(struct rtattr *rtattr, uint32_t rtattr_len) { @@ -1459,6 +1598,7 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh) struct ifinfomsg *ifi; struct ifaddrmsg *ifa; struct rtmsg *rtm; + struct ndmsg *ndm; switch (nlh->nlmsg_type) { case RTM_NEWLINK: @@ -1485,6 +1625,17 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh) NLMSG_LENGTH(sizeof(*ifa))); } break; + case RTM_NEWNEIGH: + case RTM_DELNEIGH: + case RTM_GETNEIGH: + if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ndm))) { + ndm = NLMSG_DATA(nlh); + ndm->ndm_ifindex = tswap32(ndm->ndm_ifindex); + ndm->ndm_state = tswap16(ndm->ndm_state); + target_to_host_neigh_rtattr(NDM_RTA(ndm), nlh->nlmsg_len - + NLMSG_LENGTH(sizeof(*ndm))); + } + break; case RTM_NEWROUTE: case RTM_DELROUTE: case RTM_GETROUTE: diff --git a/linux-user/fd-trans.h b/linux-user/fd-trans.h index 910faaf237c..e14f96059c5 100644 --- a/linux-user/fd-trans.h +++ b/linux-user/fd-trans.h @@ -36,6 +36,16 @@ static inline void fd_trans_init(void) qemu_mutex_init(&target_fd_trans_lock); } +static inline void fd_trans_prefork(void) +{ + qemu_mutex_lock(&target_fd_trans_lock); +} + +static inline void fd_trans_postfork(void) +{ + qemu_mutex_unlock(&target_fd_trans_lock); +} + static inline TargetFdDataFunc fd_trans_target_to_host_data(int fd) { if (fd < 0) { diff --git a/linux-user/flatload.c b/linux-user/flatload.c index 04d8138d12e..4beb3ed1b96 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -34,6 +34,8 @@ #include "qemu/osdep.h" #include "qemu.h" +#include "exec/page-protection.h" +#include "exec/mmap-lock.h" #include "user-internals.h" #include "loader.h" #include "user-mmap.h" @@ -487,7 +489,10 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info) stack_len += (bprm->envc + 1) * 4; /* the envp array */ + mmap_lock(); res = load_flat_file(bprm, libinfo, 0, &stack_len); + mmap_unlock(); + if (is_error(res)) { return res; } diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc index 95856eb839b..b47019e136c 100644 --- a/linux-user/gen-vdso-elfn.c.inc +++ b/linux-user/gen-vdso-elfn.c.inc @@ -68,28 +68,45 @@ static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx, void *buf, bool need_bswap) { unsigned str_idx = shdr[sym_idx].sh_link; - ElfN(Sym) *sym = buf + shdr[sym_idx].sh_offset; - unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*sym); + ElfN(Sym) *target_sym = buf + shdr[sym_idx].sh_offset; + unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*target_sym); const char *str = buf + shdr[str_idx].sh_offset; for (unsigned i = 0; i < sym_n; ++i) { const char *name; + ElfN(Sym) sym; + memcpy(&sym, &target_sym[i], sizeof(sym)); if (need_bswap) { - elfN(bswap_sym)(sym + i); + elfN(bswap_sym)(&sym); } - name = str + sym[i].st_name; + name = str + sym.st_name; if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) { - sigreturn_addr = sym[i].st_value; + sigreturn_addr = sym.st_value; } if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) { - rt_sigreturn_addr = sym[i].st_value; + rt_sigreturn_addr = sym.st_value; } } } -static void elfN(process)(FILE *outf, void *buf, bool need_bswap) +static void elfN(bswap_ps_hdrs)(ElfN(Ehdr) *ehdr) +{ + ElfN(Phdr) *phdr = (void *)ehdr + ehdr->e_phoff; + ElfN(Shdr) *shdr = (void *)ehdr + ehdr->e_shoff; + ElfN(Half) i; + + for (i = 0; i < ehdr->e_phnum; ++i) { + elfN(bswap_phdr)(&phdr[i]); + } + + for (i = 0; i < ehdr->e_shnum; ++i) { + elfN(bswap_shdr)(&shdr[i]); + } +} + +static void elfN(process)(FILE *outf, void *buf, long len, bool need_bswap) { ElfN(Ehdr) *ehdr = buf; ElfN(Phdr) *phdr; @@ -103,24 +120,14 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) int errors = 0; if (need_bswap) { - elfN(bswap_ehdr)(ehdr); + elfN(bswap_ehdr)(buf); + elfN(bswap_ps_hdrs)(buf); } phnum = ehdr->e_phnum; phdr = buf + ehdr->e_phoff; - if (need_bswap) { - for (unsigned i = 0; i < phnum; ++i) { - elfN(bswap_phdr)(phdr + i); - } - } - shnum = ehdr->e_shnum; shdr = buf + ehdr->e_shoff; - if (need_bswap) { - for (unsigned i = 0; i < shnum; ++i) { - elfN(bswap_shdr)(shdr + i); - } - } for (unsigned i = 0; i < shnum; ++i) { switch (shdr[i].sh_type) { case SHT_SYMTAB: @@ -154,7 +161,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) fprintf(stderr, "LOAD segment not loaded at address 0\n"); errors++; } - first_segsz = phdr[i].p_filesz; + /* + * Extend the program header to cover the entire VDSO, so that + * load_elf_vdso() loads everything, including section headers. + * + * Require that there is no .bss, since it would break this + * approach. + */ + if (phdr[i].p_filesz != phdr[i].p_memsz) { + fprintf(stderr, "LOAD segment's filesz and memsz differ\n"); + errors++; + } + if (phdr[i].p_filesz > len) { + fprintf(stderr, "LOAD segment is larger than the whole VDSO\n"); + errors++; + } + phdr[i].p_filesz = len; + phdr[i].p_memsz = len; + first_segsz = len; if (first_segsz < ehdr->e_phoff + phnum * sizeof(*phdr)) { fprintf(stderr, "LOAD segment does not cover PHDRs\n"); errors++; @@ -197,17 +221,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) output_reloc(outf, buf, &phdr[i].p_paddr); } + /* Relocate the section headers. */ + for (unsigned i = 0; i < shnum; ++i) { + output_reloc(outf, buf, &shdr[i].sh_addr); + } + /* Relocate the DYNAMIC entries. */ if (dynamic_addr) { - ElfN(Dyn) *dyn = buf + dynamic_ofs; - __typeof(dyn->d_tag) tag; + ElfN(Dyn) *target_dyn = buf + dynamic_ofs; + __typeof(((ElfN(Dyn) *)target_dyn)->d_tag) tag; do { + ElfN(Dyn) dyn; + memcpy(&dyn, target_dyn, sizeof(dyn)); if (need_bswap) { - elfN(bswap_dyn)(dyn); + elfN(bswap_dyn)(&dyn); } - tag = dyn->d_tag; + tag = dyn.d_tag; switch (tag) { case DT_HASH: @@ -218,7 +249,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) case DT_PLTGOT: case DT_ADDRRNGLO ... DT_ADDRRNGHI: /* These entries store an address in the entry. */ - output_reloc(outf, buf, &dyn->d_un.d_val); + output_reloc(outf, buf, &target_dyn->d_un.d_val); break; case DT_NULL: @@ -235,7 +266,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) break; case DT_SYMENT: - if (dyn->d_un.d_val != sizeof(ElfN(Sym))) { + if (dyn.d_un.d_val != sizeof(ElfN(Sym))) { fprintf(stderr, "VDSO has incorrect dynamic symbol size\n"); errors++; } @@ -251,7 +282,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) * ??? The RISC-V toolchain will emit these even when there * are no relocations. Validate zeros. */ - if (dyn->d_un.d_val != 0) { + if (dyn.d_un.d_val != 0) { fprintf(stderr, "VDSO has dynamic relocations\n"); errors++; } @@ -287,7 +318,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) errors++; break; } - dyn++; + target_dyn++; } while (tag != DT_NULL); if (errors) { exit(EXIT_FAILURE); @@ -296,11 +327,11 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) /* Relocate the dynamic symbol table. */ if (dynsym_idx) { - ElfN(Sym) *sym = buf + shdr[dynsym_idx].sh_offset; - unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*sym); + ElfN(Sym) *target_sym = buf + shdr[dynsym_idx].sh_offset; + unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*target_sym); for (unsigned i = 0; i < sym_n; ++i) { - output_reloc(outf, buf, &sym[i].st_value); + output_reloc(outf, buf, &target_sym[i].st_value); } } @@ -311,4 +342,9 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) if (symtab_idx) { elfN(search_symtab)(shdr, symtab_idx, buf, need_bswap); } + + if (need_bswap) { + elfN(bswap_ps_hdrs)(buf); + elfN(bswap_ehdr)(buf); + } } diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c index 31e333be802..aeaa927db8f 100644 --- a/linux-user/gen-vdso.c +++ b/linux-user/gen-vdso.c @@ -56,13 +56,14 @@ static unsigned rt_sigreturn_addr; int main(int argc, char **argv) { - FILE *inf, *outf; + FILE *inf = NULL, *outf = NULL; long total_len; const char *prefix = "vdso"; const char *inf_name; const char *outf_name = NULL; - unsigned char *buf; + unsigned char *buf = NULL; bool need_bswap; + int ret = EXIT_FAILURE; while (1) { int opt = getopt(argc, argv, "o:p:r:s:"); @@ -112,9 +113,21 @@ int main(int argc, char **argv) * We expect the vdso to be small, on the order of one page, * therefore we do not expect a partial read. */ - fseek(inf, 0, SEEK_END); + if (fseek(inf, 0, SEEK_END) < 0) { + goto perror_inf; + } total_len = ftell(inf); - fseek(inf, 0, SEEK_SET); + if (total_len < 0) { + goto perror_inf; + } + if (fseek(inf, 0, SEEK_SET) < 0) { + goto perror_inf; + } + + if (total_len < EI_NIDENT) { + fprintf(stderr, "%s: file too small (truncated?)\n", inf_name); + return EXIT_FAILURE; + } buf = malloc(total_len); if (buf == NULL) { @@ -129,24 +142,6 @@ int main(int argc, char **argv) fprintf(stderr, "%s: incomplete read\n", inf_name); return EXIT_FAILURE; } - fclose(inf); - - /* - * Write out the vdso image now, before we make local changes. - */ - - fprintf(outf, - "/* Automatically generated from linux-user/gen-vdso.c. */\n" - "\n" - "static const uint8_t %s_image[] = {", - prefix); - for (long i = 0; i < total_len; ++i) { - if (i % 12 == 0) { - fputs("\n ", outf); - } - fprintf(outf, " 0x%02x,", buf[i]); - } - fprintf(outf, "\n};\n\n"); /* * Identify which elf flavor we're processing. @@ -179,14 +174,17 @@ int main(int argc, char **argv) * Output relocation addresses as we go. */ - fprintf(outf, "static const unsigned %s_relocs[] = {\n", prefix); + fprintf(outf, + "/* Automatically generated by linux-user/gen-vdso.c. */\n" + "\n" + "static const unsigned %s_relocs[] = {\n", prefix); switch (buf[EI_CLASS]) { case ELFCLASS32: - elf32_process(outf, buf, need_bswap); + elf32_process(outf, buf, total_len, need_bswap); break; case ELFCLASS64: - elf64_process(outf, buf, need_bswap); + elf64_process(outf, buf, total_len, need_bswap); break; default: fprintf(stderr, "%s: invalid elf EI_CLASS (%u)\n", @@ -196,6 +194,20 @@ int main(int argc, char **argv) fprintf(outf, "};\n\n"); /* end vdso_relocs. */ + /* + * Write out the vdso image now, after we made local changes. + */ + fprintf(outf, + "static const uint8_t %s_image[] = {", + prefix); + for (long i = 0; i < total_len; ++i) { + if (i % 12 == 0) { + fputs("\n ", outf); + } + fprintf(outf, " 0x%02x,", buf[i]); + } + fprintf(outf, "\n};\n\n"); + fprintf(outf, "static const VdsoImageInfo %s_image_info = {\n", prefix); fprintf(outf, " .image = %s_image,\n", prefix); fprintf(outf, " .relocs = %s_relocs,\n", prefix); @@ -205,19 +217,24 @@ int main(int argc, char **argv) fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr); fprintf(outf, "};\n"); - /* - * Everything should have gone well. - */ - if (fclose(outf)) { - goto perror_outf; + ret = EXIT_SUCCESS; + + cleanup: + free(buf); + + if (outf && fclose(outf) != 0) { + ret = EXIT_FAILURE; + } + if (inf && fclose(inf) != 0) { + ret = EXIT_FAILURE; } - return EXIT_SUCCESS; + return ret; perror_inf: perror(inf_name); - return EXIT_FAILURE; + goto cleanup; perror_outf: perror(outf_name); - return EXIT_FAILURE; + goto cleanup; } diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h index 6fd05b77bb3..b34740258ed 100644 --- a/linux-user/generic/signal.h +++ b/linux-user/generic/signal.h @@ -15,7 +15,6 @@ #define TARGET_SA_RESTART 0x10000000 #define TARGET_SA_NODEFER 0x40000000 #define TARGET_SA_RESETHAND 0x80000000 -#define TARGET_SA_RESTORER 0x04000000 #define TARGET_SIGHUP 1 #define TARGET_SIGINT 2 diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c index d41159e52ad..e18a0183b50 100644 --- a/linux-user/hexagon/cpu_loop.c +++ b/linux-user/hexagon/cpu_loop.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "internal.h" @@ -42,7 +42,7 @@ void cpu_loop(CPUHexagonState *env) case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; - case HEX_EXCP_TRAP0: + case HEX_EVENT_TRAP0: syscallnum = env->gpr[6]; env->gpr[HEX_REG_PC] += 4; ret = do_syscall(env, @@ -60,7 +60,7 @@ void cpu_loop(CPUHexagonState *env) env->gpr[0] = ret; } break; - case HEX_EXCP_PC_NOT_ALIGNED: + case HEX_CAUSE_PC_NOT_ALIGNED: force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, env->gpr[HEX_REG_R31]); break; @@ -79,7 +79,7 @@ void cpu_loop(CPUHexagonState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { env->gpr[HEX_REG_PC] = regs->sepc; env->gpr[HEX_REG_SP] = regs->sp; diff --git a/linux-user/hexagon/meson.build b/linux-user/hexagon/meson.build new file mode 100644 index 00000000000..d203c3ec929 --- /dev/null +++ b/linux-user/hexagon/meson.build @@ -0,0 +1,6 @@ + +syscall_nr_generators += { + 'hexagon': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/hexagon/syscall.tbl b/linux-user/hexagon/syscall.tbl new file mode 100644 index 00000000000..845e24eb372 --- /dev/null +++ b/linux-user/hexagon/syscall.tbl @@ -0,0 +1,405 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# This file contains the system call numbers for all of the +# more recently added architectures. +# +# As a basic principle, no duplication of functionality +# should be added, e.g. we don't use lseek when llseek +# is present. New architectures should use this file +# and implement the less feature-full calls in user space. +# +0 common io_setup sys_io_setup compat_sys_io_setup +1 common io_destroy sys_io_destroy +2 common io_submit sys_io_submit compat_sys_io_submit +3 common io_cancel sys_io_cancel +4 time32 io_getevents sys_io_getevents_time32 +4 64 io_getevents sys_io_getevents +5 common setxattr sys_setxattr +6 common lsetxattr sys_lsetxattr +7 common fsetxattr sys_fsetxattr +8 common getxattr sys_getxattr +9 common lgetxattr sys_lgetxattr +10 common fgetxattr sys_fgetxattr +11 common listxattr sys_listxattr +12 common llistxattr sys_llistxattr +13 common flistxattr sys_flistxattr +14 common removexattr sys_removexattr +15 common lremovexattr sys_lremovexattr +16 common fremovexattr sys_fremovexattr +17 common getcwd sys_getcwd +18 common lookup_dcookie sys_ni_syscall +19 common eventfd2 sys_eventfd2 +20 common epoll_create1 sys_epoll_create1 +21 common epoll_ctl sys_epoll_ctl +22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +23 common dup sys_dup +24 common dup3 sys_dup3 +25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +25 64 fcntl sys_fcntl +26 common inotify_init1 sys_inotify_init1 +27 common inotify_add_watch sys_inotify_add_watch +28 common inotify_rm_watch sys_inotify_rm_watch +29 common ioctl sys_ioctl compat_sys_ioctl +30 common ioprio_set sys_ioprio_set +31 common ioprio_get sys_ioprio_get +32 common flock sys_flock +33 common mknodat sys_mknodat +34 common mkdirat sys_mkdirat +35 common unlinkat sys_unlinkat +36 common symlinkat sys_symlinkat +37 common linkat sys_linkat +# renameat is superseded with flags by renameat2 +38 renameat renameat sys_renameat +39 common umount2 sys_umount +40 common mount sys_mount +41 common pivot_root sys_pivot_root +42 common nfsservctl sys_ni_syscall +43 32 statfs64 sys_statfs64 compat_sys_statfs64 +43 64 statfs sys_statfs +44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +44 64 fstatfs sys_fstatfs +45 32 truncate64 sys_truncate64 compat_sys_truncate64 +45 64 truncate sys_truncate +46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +46 64 ftruncate sys_ftruncate +47 common fallocate sys_fallocate compat_sys_fallocate +48 common faccessat sys_faccessat +49 common chdir sys_chdir +50 common fchdir sys_fchdir +51 common chroot sys_chroot +52 common fchmod sys_fchmod +53 common fchmodat sys_fchmodat +54 common fchownat sys_fchownat +55 common fchown sys_fchown +56 common openat sys_openat +57 common close sys_close +58 common vhangup sys_vhangup +59 common pipe2 sys_pipe2 +60 common quotactl sys_quotactl +61 common getdents64 sys_getdents64 +62 32 llseek sys_llseek +62 64 lseek sys_lseek +63 common read sys_read +64 common write sys_write +65 common readv sys_readv sys_readv +66 common writev sys_writev sys_writev +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 common preadv sys_preadv compat_sys_preadv +70 common pwritev sys_pwritev compat_sys_pwritev +71 32 sendfile64 sys_sendfile64 +71 64 sendfile sys_sendfile64 +72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +72 64 pselect6 sys_pselect6 +73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +73 64 ppoll sys_ppoll +74 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +75 common vmsplice sys_vmsplice +76 common splice sys_splice +77 common tee sys_tee +78 common readlinkat sys_readlinkat +79 stat64 fstatat64 sys_fstatat64 +79 64 newfstatat sys_newfstatat +80 stat64 fstat64 sys_fstat64 +80 64 fstat sys_newfstat +81 common sync sys_sync +82 common fsync sys_fsync +83 common fdatasync sys_fdatasync +84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +85 common timerfd_create sys_timerfd_create +86 time32 timerfd_settime sys_timerfd_settime32 +86 64 timerfd_settime sys_timerfd_settime +87 time32 timerfd_gettime sys_timerfd_gettime32 +87 64 timerfd_gettime sys_timerfd_gettime +88 time32 utimensat sys_utimensat_time32 +88 64 utimensat sys_utimensat +89 common acct sys_acct +90 common capget sys_capget +91 common capset sys_capset +92 common personality sys_personality +93 common exit sys_exit +94 common exit_group sys_exit_group +95 common waitid sys_waitid compat_sys_waitid +96 common set_tid_address sys_set_tid_address +97 common unshare sys_unshare +98 time32 futex sys_futex_time32 +98 64 futex sys_futex +99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +101 time32 nanosleep sys_nanosleep_time32 +101 64 nanosleep sys_nanosleep +102 common getitimer sys_getitimer compat_sys_getitimer +103 common setitimer sys_setitimer compat_sys_setitimer +104 common kexec_load sys_kexec_load compat_sys_kexec_load +105 common init_module sys_init_module +106 common delete_module sys_delete_module +107 common timer_create sys_timer_create compat_sys_timer_create +108 time32 timer_gettime sys_timer_gettime32 +108 64 timer_gettime sys_timer_gettime +109 common timer_getoverrun sys_timer_getoverrun +110 time32 timer_settime sys_timer_settime32 +110 64 timer_settime sys_timer_settime +111 common timer_delete sys_timer_delete +112 time32 clock_settime sys_clock_settime32 +112 64 clock_settime sys_clock_settime +113 time32 clock_gettime sys_clock_gettime32 +113 64 clock_gettime sys_clock_gettime +114 time32 clock_getres sys_clock_getres_time32 +114 64 clock_getres sys_clock_getres +115 time32 clock_nanosleep sys_clock_nanosleep_time32 +115 64 clock_nanosleep sys_clock_nanosleep +116 common syslog sys_syslog +117 common ptrace sys_ptrace compat_sys_ptrace +118 common sched_setparam sys_sched_setparam +119 common sched_setscheduler sys_sched_setscheduler +120 common sched_getscheduler sys_sched_getscheduler +121 common sched_getparam sys_sched_getparam +122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +124 common sched_yield sys_sched_yield +125 common sched_get_priority_max sys_sched_get_priority_max +126 common sched_get_priority_min sys_sched_get_priority_min +127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +127 64 sched_rr_get_interval sys_sched_rr_get_interval +128 common restart_syscall sys_restart_syscall +129 common kill sys_kill +130 common tkill sys_tkill +131 common tgkill sys_tgkill +132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +137 64 rt_sigtimedwait sys_rt_sigtimedwait +138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +140 common setpriority sys_setpriority +141 common getpriority sys_getpriority +142 common reboot sys_reboot +143 common setregid sys_setregid +144 common setgid sys_setgid +145 common setreuid sys_setreuid +146 common setuid sys_setuid +147 common setresuid sys_setresuid +148 common getresuid sys_getresuid +149 common setresgid sys_setresgid +150 common getresgid sys_getresgid +151 common setfsuid sys_setfsuid +152 common setfsgid sys_setfsgid +153 common times sys_times compat_sys_times +154 common setpgid sys_setpgid +155 common getpgid sys_getpgid +156 common getsid sys_getsid +157 common setsid sys_setsid +158 common getgroups sys_getgroups +159 common setgroups sys_setgroups +160 common uname sys_newuname +161 common sethostname sys_sethostname +162 common setdomainname sys_setdomainname +# getrlimit and setrlimit are superseded with prlimit64 +163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit +164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit +165 common getrusage sys_getrusage compat_sys_getrusage +166 common umask sys_umask +167 common prctl sys_prctl +168 common getcpu sys_getcpu +169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +169 64 gettimeofday sys_gettimeofday +170 time32 settimeofday sys_settimeofday compat_sys_settimeofday +170 64 settimeofday sys_settimeofday +171 time32 adjtimex sys_adjtimex_time32 +171 64 adjtimex sys_adjtimex +172 common getpid sys_getpid +173 common getppid sys_getppid +174 common getuid sys_getuid +175 common geteuid sys_geteuid +176 common getgid sys_getgid +177 common getegid sys_getegid +178 common gettid sys_gettid +179 common sysinfo sys_sysinfo compat_sys_sysinfo +180 common mq_open sys_mq_open compat_sys_mq_open +181 common mq_unlink sys_mq_unlink +182 time32 mq_timedsend sys_mq_timedsend_time32 +182 64 mq_timedsend sys_mq_timedsend +183 time32 mq_timedreceive sys_mq_timedreceive_time32 +183 64 mq_timedreceive sys_mq_timedreceive +184 common mq_notify sys_mq_notify compat_sys_mq_notify +185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +186 common msgget sys_msgget +187 common msgctl sys_msgctl compat_sys_msgctl +188 common msgrcv sys_msgrcv compat_sys_msgrcv +189 common msgsnd sys_msgsnd compat_sys_msgsnd +190 common semget sys_semget +191 common semctl sys_semctl compat_sys_semctl +192 time32 semtimedop sys_semtimedop_time32 +192 64 semtimedop sys_semtimedop +193 common semop sys_semop +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +196 common shmat sys_shmat compat_sys_shmat +197 common shmdt sys_shmdt +198 common socket sys_socket +199 common socketpair sys_socketpair +200 common bind sys_bind +201 common listen sys_listen +202 common accept sys_accept +203 common connect sys_connect +204 common getsockname sys_getsockname +205 common getpeername sys_getpeername +206 common sendto sys_sendto +207 common recvfrom sys_recvfrom compat_sys_recvfrom +208 common setsockopt sys_setsockopt sys_setsockopt +209 common getsockopt sys_getsockopt sys_getsockopt +210 common shutdown sys_shutdown +211 common sendmsg sys_sendmsg compat_sys_sendmsg +212 common recvmsg sys_recvmsg compat_sys_recvmsg +213 common readahead sys_readahead compat_sys_readahead +214 common brk sys_brk +215 common munmap sys_munmap +216 common mremap sys_mremap +217 common add_key sys_add_key +218 common request_key sys_request_key +219 common keyctl sys_keyctl compat_sys_keyctl +220 common clone sys_clone +221 common execve sys_execve compat_sys_execve +222 32 mmap2 sys_mmap2 +222 64 mmap sys_mmap +223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +223 64 fadvise64 sys_fadvise64_64 +224 common swapon sys_swapon +225 common swapoff sys_swapoff +226 common mprotect sys_mprotect +227 common msync sys_msync +228 common mlock sys_mlock +229 common munlock sys_munlock +230 common mlockall sys_mlockall +231 common munlockall sys_munlockall +232 common mincore sys_mincore +233 common madvise sys_madvise +234 common remap_file_pages sys_remap_file_pages +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common migrate_pages sys_migrate_pages +239 common move_pages sys_move_pages +240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +241 common perf_event_open sys_perf_event_open +242 common accept4 sys_accept4 +243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +243 64 recvmmsg sys_recvmmsg +# Architectures may provide up to 16 syscalls of their own between 244 and 259 +244 arc cacheflush sys_cacheflush +245 arc arc_settls sys_arc_settls +246 arc arc_gettls sys_arc_gettls +247 arc sysfs sys_sysfs +248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg + +244 csky set_thread_area sys_set_thread_area +245 csky cacheflush sys_cacheflush + +244 nios2 cacheflush sys_cacheflush + +244 or1k or1k_atomic sys_or1k_atomic + +258 riscv riscv_hwprobe sys_riscv_hwprobe +259 riscv riscv_flush_icache sys_riscv_flush_icache + +260 time32 wait4 sys_wait4 compat_sys_wait4 +260 64 wait4 sys_wait4 +261 common prlimit64 sys_prlimit64 +262 common fanotify_init sys_fanotify_init +263 common fanotify_mark sys_fanotify_mark +264 common name_to_handle_at sys_name_to_handle_at +265 common open_by_handle_at sys_open_by_handle_at +266 time32 clock_adjtime sys_clock_adjtime32 +266 64 clock_adjtime sys_clock_adjtime +267 common syncfs sys_syncfs +268 common setns sys_setns +269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +270 common process_vm_readv sys_process_vm_readv +271 common process_vm_writev sys_process_vm_writev +272 common kcmp sys_kcmp +273 common finit_module sys_finit_module +274 common sched_setattr sys_sched_setattr +275 common sched_getattr sys_sched_getattr +276 common renameat2 sys_renameat2 +277 common seccomp sys_seccomp +278 common getrandom sys_getrandom +279 common memfd_create sys_memfd_create +280 common bpf sys_bpf +281 common execveat sys_execveat compat_sys_execveat +282 common userfaultfd sys_userfaultfd +283 common membarrier sys_membarrier +284 common mlock2 sys_mlock2 +285 common copy_file_range sys_copy_file_range +286 common preadv2 sys_preadv2 compat_sys_preadv2 +287 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +291 common statx sys_statx +292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +292 64 io_pgetevents sys_io_pgetevents +293 common rseq sys_rseq +294 common kexec_file_load sys_kexec_file_load +# 295 through 402 are unassigned to sync up with generic numbers don't use +403 32 clock_gettime64 sys_clock_gettime +404 32 clock_settime64 sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime +409 32 timer_settime64 sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +447 memfd_secret memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/hexagon/syscall_nr.h b/linux-user/hexagon/syscall_nr.h deleted file mode 100644 index b047dbbf6df..00000000000 --- a/linux-user/hexagon/syscall_nr.h +++ /dev/null @@ -1,332 +0,0 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_HEXAGON_SYSCALL_NR_H -#define LINUX_USER_HEXAGON_SYSCALL_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_io_getevents 4 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl64 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_renameat 38 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs64 43 -#define TARGET_NR_fstatfs64 44 -#define TARGET_NR_truncate64 45 -#define TARGET_NR_ftruncate64 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_llseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile64 71 -#define TARGET_NR_pselect6 72 -#define TARGET_NR_ppoll 73 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_fstatat64 79 -#define TARGET_NR_fstat64 80 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_timerfd_settime 86 -#define TARGET_NR_timerfd_gettime 87 -#define TARGET_NR_utimensat 88 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_futex 98 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_nanosleep 101 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_gettime 108 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_settime 110 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_clock_settime 112 -#define TARGET_NR_clock_gettime 113 -#define TARGET_NR_clock_getres 114 -#define TARGET_NR_clock_nanosleep 115 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_sched_rr_get_interval 127 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigtimedwait 137 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrlimit 163 -#define TARGET_NR_setrlimit 164 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_gettimeofday 169 -#define TARGET_NR_settimeofday 170 -#define TARGET_NR_adjtimex 171 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_timedsend 182 -#define TARGET_NR_mq_timedreceive 183 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semtimedop 192 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap2 222 -#define TARGET_NR_fadvise64_64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_recvmmsg 243 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_wait4 260 -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_clock_adjtime 266 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_io_pgetevents 292 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_clock_gettime64 403 -#define TARGET_NR_clock_settime64 404 -#define TARGET_NR_clock_adjtime64 405 -#define TARGET_NR_clock_getres_time64 406 -#define TARGET_NR_clock_nanosleep_time64 407 -#define TARGET_NR_timer_gettime64 408 -#define TARGET_NR_timer_settime64 409 -#define TARGET_NR_timerfd_gettime64 410 -#define TARGET_NR_timerfd_settime64 411 -#define TARGET_NR_utimensat_time64 412 -#define TARGET_NR_pselect6_time64 413 -#define TARGET_NR_ppoll_time64 414 -#define TARGET_NR_io_pgetevents_time64 416 -#define TARGET_NR_recvmmsg_time64 417 -#define TARGET_NR_mq_timedsend_time64 418 -#define TARGET_NR_mq_timedreceive_time64 419 -#define TARGET_NR_semtimedop_time64 420 -#define TARGET_NR_rt_sigtimedwait_time64 421 -#define TARGET_NR_futex_time64 422 -#define TARGET_NR_sched_rr_get_interval_time64 423 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_syscalls 447 - -#endif /* LINUX_USER_HEXAGON_SYSCALL_NR_H */ diff --git a/linux-user/hexagon/syscallhdr.sh b/linux-user/hexagon/syscallhdr.sh new file mode 100644 index 00000000000..ed605c038ee --- /dev/null +++ b/linux-user/hexagon/syscallhdr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_HEXAGON_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + echo "#ifndef ${fileguard}" + echo "#define ${fileguard} 1" + echo "" + + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + echo "#define TARGET_NR_${prefix}${name} $nr" + else + echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)" + fi + done + + echo "" + echo "#endif /* ${fileguard} */" +) > "$out" diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index bc093b8fe8b..9abaad5ef81 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" static abi_ulong hppa_lws(CPUHPPAState *env) @@ -99,6 +99,8 @@ static abi_ulong hppa_lws(CPUHPPAState *env) #endif } break; + default: + g_assert_not_reached(); } break; } @@ -110,7 +112,7 @@ static abi_ulong hppa_lws(CPUHPPAState *env) void cpu_loop(CPUHPPAState *env) { CPUState *cs = env_cpu(env); - abi_ulong ret; + abi_ulong ret, si_code = 0; int trapnr; while (1) { @@ -167,7 +169,15 @@ void cpu_loop(CPUHPPAState *env) force_sig_fault(TARGET_SIGFPE, TARGET_FPE_CONDTRAP, env->iaoq_f); break; case EXCP_ASSIST: - force_sig_fault(TARGET_SIGFPE, 0, env->iaoq_f); + #define set_si_code(mask, val) \ + if (env->fr[0] & mask) { si_code = val; } + set_si_code(R_FPSR_FLG_I_MASK, TARGET_FPE_FLTRES); + set_si_code(R_FPSR_FLG_U_MASK, TARGET_FPE_FLTUND); + set_si_code(R_FPSR_FLG_O_MASK, TARGET_FPE_FLTOVF); + set_si_code(R_FPSR_FLG_Z_MASK, TARGET_FPE_FLTDIV); + set_si_code(R_FPSR_FLG_V_MASK, TARGET_FPE_FLTINV); + #undef set_si_code + force_sig_fault(TARGET_SIGFPE, si_code, env->iaoq_f); break; case EXCP_BREAK: force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->iaoq_f); @@ -186,7 +196,7 @@ void cpu_loop(CPUHPPAState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; for (i = 1; i < 32; i++) { diff --git a/linux-user/hppa/syscall.tbl b/linux-user/hppa/syscall.tbl index aabc37f8cae..647f08e67ac 100644 --- a/linux-user/hppa/syscall.tbl +++ b/linux-user/hppa/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for parisc # @@ -108,7 +108,7 @@ 95 common fchown sys_fchown 96 common getpriority sys_getpriority 97 common setpriority sys_setpriority -98 common recv sys_recv +98 common recv sys_recv compat_sys_recv 99 common statfs sys_statfs compat_sys_statfs 100 common fstatfs sys_fstatfs compat_sys_fstatfs 101 common stat64 sys_stat64 @@ -131,11 +131,11 @@ 116 common sysinfo sys_sysinfo compat_sys_sysinfo 117 common shutdown sys_shutdown 118 common fsync sys_fsync -119 common madvise sys_madvise +119 common madvise parisc_madvise 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile -123 common recvfrom sys_recvfrom +123 common recvfrom sys_recvfrom compat_sys_recvfrom 124 32 adjtimex sys_adjtimex_time32 124 64 adjtimex sys_adjtimex 125 common mprotect sys_mprotect @@ -147,7 +147,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 32 personality parisc_personality 136 64 personality sys_personality @@ -245,7 +245,7 @@ # 220 was alloc_hugepages # 221 was free_hugepages 222 common exit_group sys_exit_group -223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +223 common lookup_dcookie sys_ni_syscall 224 common epoll_create sys_epoll_create 225 common epoll_ctl sys_epoll_ctl 226 common epoll_wait sys_epoll_wait @@ -292,9 +292,9 @@ 258 32 clock_nanosleep sys_clock_nanosleep_time32 258 64 clock_nanosleep sys_clock_nanosleep 259 common tgkill sys_tgkill -260 common mbind sys_mbind compat_sys_mbind -261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +260 common mbind sys_mbind +261 common get_mempolicy sys_get_mempolicy +262 common set_mempolicy sys_set_mempolicy # 263 was vserver 264 common add_key sys_add_key 265 common request_key sys_request_key @@ -331,7 +331,7 @@ 292 64 sync_file_range sys_sync_file_range 293 common tee sys_tee 294 common vmsplice sys_vmsplice -295 common move_pages sys_move_pages compat_sys_move_pages +295 common move_pages sys_move_pages 296 common getcpu sys_getcpu 297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 298 common statfs64 sys_statfs64 compat_sys_statfs64 @@ -364,7 +364,7 @@ 320 common accept4 sys_accept4 321 common prlimit64 sys_prlimit64 322 common fanotify_init sys_fanotify_init -323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark +323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 324 32 clock_adjtime sys_clock_adjtime32 324 64 clock_adjtime sys_clock_adjtime 325 common name_to_handle_at sys_name_to_handle_at @@ -400,6 +400,7 @@ 353 common pkey_free sys_pkey_free 354 common rseq sys_rseq 355 common kexec_file_load sys_kexec_file_load sys_kexec_file_load +356 common cacheflush sys_cacheflush # up to 402 is unassigned and reserved for arch specific syscalls 403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime 404 32 clock_settime64 sys_clock_settime sys_clock_settime @@ -413,7 +414,7 @@ 412 32 utimensat_time64 sys_utimensat sys_utimensat 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -440,7 +441,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/hppa/syscallhdr.sh b/linux-user/hppa/syscallhdr.sh index ac91a957621..bf1c1d4f30b 100644 --- a/linux-user/hppa/syscallhdr.sh +++ b/linux-user/hppa/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index 92beb6830cc..d96d5553faf 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -21,7 +21,7 @@ #include "qemu.h" #include "qemu/timer.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "user-mmap.h" @@ -172,6 +172,7 @@ static void emulate_vsyscall(CPUX86State *env) /* * Perform the syscall. None of the vsyscalls should need restarting. */ + get_task_state(env_cpu(env))->orig_ax = syscall; ret = do_syscall(env, syscall, env->regs[R_EDI], env->regs[R_ESI], env->regs[R_EDX], env->regs[10], env->regs[8], env->regs[9], 0, 0); @@ -221,6 +222,7 @@ void cpu_loop(CPUX86State *env) case EXCP_SYSCALL: #endif /* linux syscall from int $0x80 */ + get_task_state(cs)->orig_ax = env->regs[R_EAX]; ret = do_syscall(env, env->regs[R_EAX], env->regs[R_EBX], @@ -239,6 +241,7 @@ void cpu_loop(CPUX86State *env) #ifdef TARGET_X86_64 case EXCP_SYSCALL: /* linux syscall from syscall instruction. */ + get_task_state(cs)->orig_ax = env->regs[R_EAX]; ret = do_syscall(env, env->regs[R_EAX], env->regs[R_EDI], @@ -328,7 +331,7 @@ static void target_cpu_free(void *obj) g_free(obj); } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { CPUState *cpu = env_cpu(env); bool is64 = (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) != 0; diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c index cb90711834f..0f11dba831f 100644 --- a/linux-user/i386/signal.c +++ b/linux-user/i386/signal.c @@ -754,8 +754,8 @@ static bool restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) env->eip = tswapl(sc->rip); #endif - cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); - cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); + cpu_x86_load_seg(env, R_CS, lduw_le_p(&sc->cs) | 3); + cpu_x86_load_seg(env, R_SS, lduw_le_p(&sc->ss) | 3); tmpflags = tswapl(sc->eflags); env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); diff --git a/linux-user/i386/syscall_32.tbl b/linux-user/i386/syscall_32.tbl index 4bbc267fb36..534c74b14fa 100644 --- a/linux-user/i386/syscall_32.tbl +++ b/linux-user/i386/syscall_32.tbl @@ -1,8 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note # # 32-bit system call numbers and entry vectors # # The format is: -# +# [ [noreturn]] # # The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for # sys_*() system calls and compat_sys_*() compat system calls if @@ -12,7 +13,7 @@ # The abi is always "i386" for this file. # 0 i386 restart_syscall sys_restart_syscall -1 i386 exit sys_exit +1 i386 exit sys_exit - noreturn 2 i386 fork sys_fork 3 i386 read sys_read 4 i386 write sys_write @@ -145,7 +146,7 @@ 131 i386 quotactl sys_quotactl 132 i386 getpgid sys_getpgid 133 i386 fchdir sys_fchdir -134 i386 bdflush sys_bdflush +134 i386 bdflush sys_ni_syscall 135 i386 sysfs sys_sysfs 136 i386 personality sys_personality 137 i386 afs_syscall @@ -263,8 +264,8 @@ 249 i386 io_cancel sys_io_cancel 250 i386 fadvise64 sys_ia32_fadvise64 # 251 is available for reuse (was briefly sys_set_zone_reclaim) -252 i386 exit_group sys_exit_group -253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +252 i386 exit_group sys_exit_group - noreturn +253 i386 lookup_dcookie 254 i386 epoll_create sys_epoll_create 255 i386 epoll_ctl sys_epoll_ctl 256 i386 epoll_wait sys_epoll_wait @@ -286,7 +287,7 @@ 272 i386 fadvise64_64 sys_ia32_fadvise64_64 273 i386 vserver 274 i386 mbind sys_mbind -275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy +275 i386 get_mempolicy sys_get_mempolicy 276 i386 set_mempolicy sys_set_mempolicy 277 i386 mq_open sys_mq_open compat_sys_mq_open 278 i386 mq_unlink sys_mq_unlink @@ -328,7 +329,7 @@ 314 i386 sync_file_range sys_ia32_sync_file_range 315 i386 tee sys_tee 316 i386 vmsplice sys_vmsplice -317 i386 move_pages sys_move_pages compat_sys_move_pages +317 i386 move_pages sys_move_pages 318 i386 getcpu sys_getcpu 319 i386 epoll_pwait sys_epoll_pwait 320 i386 utimensat sys_utimensat_time32 @@ -420,7 +421,7 @@ 412 i386 utimensat_time64 sys_utimensat 413 i386 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 i386 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 i386 io_pgetevents_time64 sys_io_pgetevents +416 i386 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 i386 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 i386 mq_timedsend_time64 sys_mq_timedsend 419 i386 mq_timedreceive_time64 sys_mq_timedreceive @@ -447,7 +448,23 @@ 440 i386 process_madvise sys_process_madvise 441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 i386 mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 i386 quotactl_fd sys_quotactl_fd 444 i386 landlock_create_ruleset sys_landlock_create_ruleset 445 i386 landlock_add_rule sys_landlock_add_rule 446 i386 landlock_restrict_self sys_landlock_restrict_self +447 i386 memfd_secret sys_memfd_secret +448 i386 process_mrelease sys_process_mrelease +449 i386 futex_waitv sys_futex_waitv +450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node +451 i386 cachestat sys_cachestat +452 i386 fchmodat2 sys_fchmodat2 +453 i386 map_shadow_stack sys_map_shadow_stack +454 i386 futex_wake sys_futex_wake +455 i386 futex_wait sys_futex_wait +456 i386 futex_requeue sys_futex_requeue +457 i386 statmount sys_statmount +458 i386 listmount sys_listmount +459 i386 lsm_get_self_attr sys_lsm_get_self_attr +460 i386 lsm_set_self_attr sys_lsm_set_self_attr +461 i386 lsm_list_modules sys_lsm_list_modules +462 i386 mseal sys_mseal diff --git a/linux-user/i386/syscallhdr.sh b/linux-user/i386/syscallhdr.sh index b2eca96db7e..938a793d2a8 100644 --- a/linux-user/i386/syscallhdr.sh +++ b/linux-user/i386/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h index 9315cba241c..eee792ef637 100644 --- a/linux-user/i386/target_signal.h +++ b/linux-user/i386/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/loongarch64/Makefile.vdso b/linux-user/loongarch64/Makefile.vdso index 369de13344a..1d760b1e47e 100644 --- a/linux-user/loongarch64/Makefile.vdso +++ b/linux-user/loongarch64/Makefile.vdso @@ -8,4 +8,5 @@ all: $(SUBDIR)/vdso.so $(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h $(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \ -Wl,--build-id=sha1 -Wl,--hash-style=both \ - -Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $< + -Wl,--no-warn-rwx-segments -Wl,-z,max-page-size=4096 \ + -Wl,-T,$(SUBDIR)/vdso.ld $< diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c index 73d7b6796a4..ec8a06c88c7 100644 --- a/linux-user/loongarch64/cpu_loop.c +++ b/linux-user/loongarch64/cpu_loop.c @@ -8,9 +8,15 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" +/* Break codes */ +enum { + BRK_OVERFLOW = 6, + BRK_DIVZERO = 7 +}; + void cpu_loop(CPULoongArchState *env) { CPUState *cs = env_cpu(env); @@ -66,9 +72,26 @@ void cpu_loop(CPULoongArchState *env) force_sig_fault(TARGET_SIGFPE, si_code, env->pc); break; case EXCP_DEBUG: - case EXCCODE_BRK: force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); break; + case EXCCODE_BRK: + { + unsigned int opcode; + + get_user_u32(opcode, env->pc); + + switch (opcode & 0x7fff) { + case BRK_OVERFLOW: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTOVF, env->pc); + break; + case BRK_DIVZERO: + force_sig_fault(TARGET_SIGFPE, TARGET_FPE_INTDIV, env->pc); + break; + default: + force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->pc); + } + } + break; case EXCCODE_BCE: force_sig_fault(TARGET_SIGSYS, TARGET_SI_KERNEL, env->pc); break; @@ -97,7 +120,7 @@ void cpu_loop(CPULoongArchState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build index 17896535f0f..64cb537bf9e 100644 --- a/linux-user/loongarch64/meson.build +++ b/linux-user/loongarch64/meson.build @@ -2,3 +2,10 @@ vdso_inc = gen_vdso.process('vdso.so', extra_args: ['-r', '__vdso_rt_sigreturn']) linux_user_ss.add(when: 'TARGET_LOONGARCH64', if_true: vdso_inc) + + +syscall_nr_generators += { + 'loongarch64': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/loongarch64/syscall.tbl b/linux-user/loongarch64/syscall.tbl new file mode 100644 index 00000000000..845e24eb372 --- /dev/null +++ b/linux-user/loongarch64/syscall.tbl @@ -0,0 +1,405 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# This file contains the system call numbers for all of the +# more recently added architectures. +# +# As a basic principle, no duplication of functionality +# should be added, e.g. we don't use lseek when llseek +# is present. New architectures should use this file +# and implement the less feature-full calls in user space. +# +0 common io_setup sys_io_setup compat_sys_io_setup +1 common io_destroy sys_io_destroy +2 common io_submit sys_io_submit compat_sys_io_submit +3 common io_cancel sys_io_cancel +4 time32 io_getevents sys_io_getevents_time32 +4 64 io_getevents sys_io_getevents +5 common setxattr sys_setxattr +6 common lsetxattr sys_lsetxattr +7 common fsetxattr sys_fsetxattr +8 common getxattr sys_getxattr +9 common lgetxattr sys_lgetxattr +10 common fgetxattr sys_fgetxattr +11 common listxattr sys_listxattr +12 common llistxattr sys_llistxattr +13 common flistxattr sys_flistxattr +14 common removexattr sys_removexattr +15 common lremovexattr sys_lremovexattr +16 common fremovexattr sys_fremovexattr +17 common getcwd sys_getcwd +18 common lookup_dcookie sys_ni_syscall +19 common eventfd2 sys_eventfd2 +20 common epoll_create1 sys_epoll_create1 +21 common epoll_ctl sys_epoll_ctl +22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +23 common dup sys_dup +24 common dup3 sys_dup3 +25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +25 64 fcntl sys_fcntl +26 common inotify_init1 sys_inotify_init1 +27 common inotify_add_watch sys_inotify_add_watch +28 common inotify_rm_watch sys_inotify_rm_watch +29 common ioctl sys_ioctl compat_sys_ioctl +30 common ioprio_set sys_ioprio_set +31 common ioprio_get sys_ioprio_get +32 common flock sys_flock +33 common mknodat sys_mknodat +34 common mkdirat sys_mkdirat +35 common unlinkat sys_unlinkat +36 common symlinkat sys_symlinkat +37 common linkat sys_linkat +# renameat is superseded with flags by renameat2 +38 renameat renameat sys_renameat +39 common umount2 sys_umount +40 common mount sys_mount +41 common pivot_root sys_pivot_root +42 common nfsservctl sys_ni_syscall +43 32 statfs64 sys_statfs64 compat_sys_statfs64 +43 64 statfs sys_statfs +44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +44 64 fstatfs sys_fstatfs +45 32 truncate64 sys_truncate64 compat_sys_truncate64 +45 64 truncate sys_truncate +46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +46 64 ftruncate sys_ftruncate +47 common fallocate sys_fallocate compat_sys_fallocate +48 common faccessat sys_faccessat +49 common chdir sys_chdir +50 common fchdir sys_fchdir +51 common chroot sys_chroot +52 common fchmod sys_fchmod +53 common fchmodat sys_fchmodat +54 common fchownat sys_fchownat +55 common fchown sys_fchown +56 common openat sys_openat +57 common close sys_close +58 common vhangup sys_vhangup +59 common pipe2 sys_pipe2 +60 common quotactl sys_quotactl +61 common getdents64 sys_getdents64 +62 32 llseek sys_llseek +62 64 lseek sys_lseek +63 common read sys_read +64 common write sys_write +65 common readv sys_readv sys_readv +66 common writev sys_writev sys_writev +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 common preadv sys_preadv compat_sys_preadv +70 common pwritev sys_pwritev compat_sys_pwritev +71 32 sendfile64 sys_sendfile64 +71 64 sendfile sys_sendfile64 +72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +72 64 pselect6 sys_pselect6 +73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +73 64 ppoll sys_ppoll +74 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +75 common vmsplice sys_vmsplice +76 common splice sys_splice +77 common tee sys_tee +78 common readlinkat sys_readlinkat +79 stat64 fstatat64 sys_fstatat64 +79 64 newfstatat sys_newfstatat +80 stat64 fstat64 sys_fstat64 +80 64 fstat sys_newfstat +81 common sync sys_sync +82 common fsync sys_fsync +83 common fdatasync sys_fdatasync +84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +85 common timerfd_create sys_timerfd_create +86 time32 timerfd_settime sys_timerfd_settime32 +86 64 timerfd_settime sys_timerfd_settime +87 time32 timerfd_gettime sys_timerfd_gettime32 +87 64 timerfd_gettime sys_timerfd_gettime +88 time32 utimensat sys_utimensat_time32 +88 64 utimensat sys_utimensat +89 common acct sys_acct +90 common capget sys_capget +91 common capset sys_capset +92 common personality sys_personality +93 common exit sys_exit +94 common exit_group sys_exit_group +95 common waitid sys_waitid compat_sys_waitid +96 common set_tid_address sys_set_tid_address +97 common unshare sys_unshare +98 time32 futex sys_futex_time32 +98 64 futex sys_futex +99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +101 time32 nanosleep sys_nanosleep_time32 +101 64 nanosleep sys_nanosleep +102 common getitimer sys_getitimer compat_sys_getitimer +103 common setitimer sys_setitimer compat_sys_setitimer +104 common kexec_load sys_kexec_load compat_sys_kexec_load +105 common init_module sys_init_module +106 common delete_module sys_delete_module +107 common timer_create sys_timer_create compat_sys_timer_create +108 time32 timer_gettime sys_timer_gettime32 +108 64 timer_gettime sys_timer_gettime +109 common timer_getoverrun sys_timer_getoverrun +110 time32 timer_settime sys_timer_settime32 +110 64 timer_settime sys_timer_settime +111 common timer_delete sys_timer_delete +112 time32 clock_settime sys_clock_settime32 +112 64 clock_settime sys_clock_settime +113 time32 clock_gettime sys_clock_gettime32 +113 64 clock_gettime sys_clock_gettime +114 time32 clock_getres sys_clock_getres_time32 +114 64 clock_getres sys_clock_getres +115 time32 clock_nanosleep sys_clock_nanosleep_time32 +115 64 clock_nanosleep sys_clock_nanosleep +116 common syslog sys_syslog +117 common ptrace sys_ptrace compat_sys_ptrace +118 common sched_setparam sys_sched_setparam +119 common sched_setscheduler sys_sched_setscheduler +120 common sched_getscheduler sys_sched_getscheduler +121 common sched_getparam sys_sched_getparam +122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +124 common sched_yield sys_sched_yield +125 common sched_get_priority_max sys_sched_get_priority_max +126 common sched_get_priority_min sys_sched_get_priority_min +127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +127 64 sched_rr_get_interval sys_sched_rr_get_interval +128 common restart_syscall sys_restart_syscall +129 common kill sys_kill +130 common tkill sys_tkill +131 common tgkill sys_tgkill +132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +137 64 rt_sigtimedwait sys_rt_sigtimedwait +138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +140 common setpriority sys_setpriority +141 common getpriority sys_getpriority +142 common reboot sys_reboot +143 common setregid sys_setregid +144 common setgid sys_setgid +145 common setreuid sys_setreuid +146 common setuid sys_setuid +147 common setresuid sys_setresuid +148 common getresuid sys_getresuid +149 common setresgid sys_setresgid +150 common getresgid sys_getresgid +151 common setfsuid sys_setfsuid +152 common setfsgid sys_setfsgid +153 common times sys_times compat_sys_times +154 common setpgid sys_setpgid +155 common getpgid sys_getpgid +156 common getsid sys_getsid +157 common setsid sys_setsid +158 common getgroups sys_getgroups +159 common setgroups sys_setgroups +160 common uname sys_newuname +161 common sethostname sys_sethostname +162 common setdomainname sys_setdomainname +# getrlimit and setrlimit are superseded with prlimit64 +163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit +164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit +165 common getrusage sys_getrusage compat_sys_getrusage +166 common umask sys_umask +167 common prctl sys_prctl +168 common getcpu sys_getcpu +169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +169 64 gettimeofday sys_gettimeofday +170 time32 settimeofday sys_settimeofday compat_sys_settimeofday +170 64 settimeofday sys_settimeofday +171 time32 adjtimex sys_adjtimex_time32 +171 64 adjtimex sys_adjtimex +172 common getpid sys_getpid +173 common getppid sys_getppid +174 common getuid sys_getuid +175 common geteuid sys_geteuid +176 common getgid sys_getgid +177 common getegid sys_getegid +178 common gettid sys_gettid +179 common sysinfo sys_sysinfo compat_sys_sysinfo +180 common mq_open sys_mq_open compat_sys_mq_open +181 common mq_unlink sys_mq_unlink +182 time32 mq_timedsend sys_mq_timedsend_time32 +182 64 mq_timedsend sys_mq_timedsend +183 time32 mq_timedreceive sys_mq_timedreceive_time32 +183 64 mq_timedreceive sys_mq_timedreceive +184 common mq_notify sys_mq_notify compat_sys_mq_notify +185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +186 common msgget sys_msgget +187 common msgctl sys_msgctl compat_sys_msgctl +188 common msgrcv sys_msgrcv compat_sys_msgrcv +189 common msgsnd sys_msgsnd compat_sys_msgsnd +190 common semget sys_semget +191 common semctl sys_semctl compat_sys_semctl +192 time32 semtimedop sys_semtimedop_time32 +192 64 semtimedop sys_semtimedop +193 common semop sys_semop +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +196 common shmat sys_shmat compat_sys_shmat +197 common shmdt sys_shmdt +198 common socket sys_socket +199 common socketpair sys_socketpair +200 common bind sys_bind +201 common listen sys_listen +202 common accept sys_accept +203 common connect sys_connect +204 common getsockname sys_getsockname +205 common getpeername sys_getpeername +206 common sendto sys_sendto +207 common recvfrom sys_recvfrom compat_sys_recvfrom +208 common setsockopt sys_setsockopt sys_setsockopt +209 common getsockopt sys_getsockopt sys_getsockopt +210 common shutdown sys_shutdown +211 common sendmsg sys_sendmsg compat_sys_sendmsg +212 common recvmsg sys_recvmsg compat_sys_recvmsg +213 common readahead sys_readahead compat_sys_readahead +214 common brk sys_brk +215 common munmap sys_munmap +216 common mremap sys_mremap +217 common add_key sys_add_key +218 common request_key sys_request_key +219 common keyctl sys_keyctl compat_sys_keyctl +220 common clone sys_clone +221 common execve sys_execve compat_sys_execve +222 32 mmap2 sys_mmap2 +222 64 mmap sys_mmap +223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +223 64 fadvise64 sys_fadvise64_64 +224 common swapon sys_swapon +225 common swapoff sys_swapoff +226 common mprotect sys_mprotect +227 common msync sys_msync +228 common mlock sys_mlock +229 common munlock sys_munlock +230 common mlockall sys_mlockall +231 common munlockall sys_munlockall +232 common mincore sys_mincore +233 common madvise sys_madvise +234 common remap_file_pages sys_remap_file_pages +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common migrate_pages sys_migrate_pages +239 common move_pages sys_move_pages +240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +241 common perf_event_open sys_perf_event_open +242 common accept4 sys_accept4 +243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +243 64 recvmmsg sys_recvmmsg +# Architectures may provide up to 16 syscalls of their own between 244 and 259 +244 arc cacheflush sys_cacheflush +245 arc arc_settls sys_arc_settls +246 arc arc_gettls sys_arc_gettls +247 arc sysfs sys_sysfs +248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg + +244 csky set_thread_area sys_set_thread_area +245 csky cacheflush sys_cacheflush + +244 nios2 cacheflush sys_cacheflush + +244 or1k or1k_atomic sys_or1k_atomic + +258 riscv riscv_hwprobe sys_riscv_hwprobe +259 riscv riscv_flush_icache sys_riscv_flush_icache + +260 time32 wait4 sys_wait4 compat_sys_wait4 +260 64 wait4 sys_wait4 +261 common prlimit64 sys_prlimit64 +262 common fanotify_init sys_fanotify_init +263 common fanotify_mark sys_fanotify_mark +264 common name_to_handle_at sys_name_to_handle_at +265 common open_by_handle_at sys_open_by_handle_at +266 time32 clock_adjtime sys_clock_adjtime32 +266 64 clock_adjtime sys_clock_adjtime +267 common syncfs sys_syncfs +268 common setns sys_setns +269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +270 common process_vm_readv sys_process_vm_readv +271 common process_vm_writev sys_process_vm_writev +272 common kcmp sys_kcmp +273 common finit_module sys_finit_module +274 common sched_setattr sys_sched_setattr +275 common sched_getattr sys_sched_getattr +276 common renameat2 sys_renameat2 +277 common seccomp sys_seccomp +278 common getrandom sys_getrandom +279 common memfd_create sys_memfd_create +280 common bpf sys_bpf +281 common execveat sys_execveat compat_sys_execveat +282 common userfaultfd sys_userfaultfd +283 common membarrier sys_membarrier +284 common mlock2 sys_mlock2 +285 common copy_file_range sys_copy_file_range +286 common preadv2 sys_preadv2 compat_sys_preadv2 +287 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +291 common statx sys_statx +292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +292 64 io_pgetevents sys_io_pgetevents +293 common rseq sys_rseq +294 common kexec_file_load sys_kexec_file_load +# 295 through 402 are unassigned to sync up with generic numbers don't use +403 32 clock_gettime64 sys_clock_gettime +404 32 clock_settime64 sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime +409 32 timer_settime64 sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +447 memfd_secret memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/loongarch64/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h deleted file mode 100644 index be00915adf2..00000000000 --- a/linux-user/loongarch64/syscall_nr.h +++ /dev/null @@ -1,312 +0,0 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H -#define LINUX_USER_LOONGARCH_SYSCALL_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_io_getevents 4 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs 43 -#define TARGET_NR_fstatfs 44 -#define TARGET_NR_truncate 45 -#define TARGET_NR_ftruncate 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_lseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile 71 -#define TARGET_NR_pselect6 72 -#define TARGET_NR_ppoll 73 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_timerfd_settime 86 -#define TARGET_NR_timerfd_gettime 87 -#define TARGET_NR_utimensat 88 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_futex 98 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_nanosleep 101 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_gettime 108 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_settime 110 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_clock_settime 112 -#define TARGET_NR_clock_gettime 113 -#define TARGET_NR_clock_getres 114 -#define TARGET_NR_clock_nanosleep 115 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_sched_rr_get_interval 127 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigtimedwait 137 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_gettimeofday 169 -#define TARGET_NR_settimeofday 170 -#define TARGET_NR_adjtimex 171 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_timedsend 182 -#define TARGET_NR_mq_timedreceive 183 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semtimedop 192 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap 222 -#define TARGET_NR_fadvise64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_recvmmsg 243 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_wait4 260 -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_clock_adjtime 266 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_io_pgetevents 292 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_clone3 435 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_quotactl_fd 443 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_process_mrelease 448 -#define TARGET_NR_futex_waitv 449 -#define TARGET_NR_set_mempolicy_home_node 450 -#define TARGET_NR_syscalls 451 - -#endif /* LINUX_USER_LOONGARCH_SYSCALL_NR_H */ diff --git a/linux-user/loongarch64/syscallhdr.sh b/linux-user/loongarch64/syscallhdr.sh new file mode 100644 index 00000000000..3d8a993b424 --- /dev/null +++ b/linux-user/loongarch64/syscallhdr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_LOONGARCH64_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + echo "#ifndef ${fileguard}" + echo "#define ${fileguard} 1" + echo "" + + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + echo "#define TARGET_NR_${prefix}${name} $nr" + else + echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)" + fi + done + + echo "" + echo "#endif /* ${fileguard} */" +) > "$out" diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so index bfaa26f2bfe..7c2de6c50e7 100755 Binary files a/linux-user/loongarch64/vdso.so and b/linux-user/loongarch64/vdso.so differ diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c index f79b8e4ab05..5da91b997ae 100644 --- a/linux-user/m68k/cpu_loop.c +++ b/linux-user/m68k/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" void cpu_loop(CPUM68KState *env) @@ -92,7 +92,7 @@ void cpu_loop(CPUM68KState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { CPUState *cpu = env_cpu(env); TaskState *ts = get_task_state(cpu); diff --git a/linux-user/m68k/syscall.tbl b/linux-user/m68k/syscall.tbl index 79c2d24c89d..b6094f8933b 100644 --- a/linux-user/m68k/syscall.tbl +++ b/linux-user/m68k/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for m68k # @@ -141,7 +141,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 common personality sys_personality # 137 was afs_syscall @@ -255,7 +255,7 @@ 245 common io_cancel sys_io_cancel 246 common fadvise64 sys_fadvise64 247 common exit_group sys_exit_group -248 common lookup_dcookie sys_lookup_dcookie +248 common lookup_dcookie sys_ni_syscall 249 common epoll_create sys_epoll_create 250 common epoll_ctl sys_epoll_ctl 251 common epoll_wait sys_epoll_wait @@ -442,7 +442,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/m68k/syscallhdr.sh b/linux-user/m68k/syscallhdr.sh index eeb4d01d34d..39b11dd05e2 100644 --- a/linux-user/m68k/syscallhdr.sh +++ b/linux-user/m68k/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h index 6e0f4b74e39..b05b9303b0d 100644 --- a/linux-user/m68k/target_signal.h +++ b/linux-user/m68k/target_signal.h @@ -3,6 +3,7 @@ #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SA_RESTORER 1 #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/main.c b/linux-user/main.c index 8143a0d4b02..68972f00a15 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -39,9 +39,10 @@ #include "qemu/module.h" #include "qemu/plugin.h" #include "user/guest-base.h" -#include "exec/exec-all.h" +#include "user/page-protection.h" #include "exec/gdbstub.h" #include "gdbstub/user.h" +#include "accel/accel-ops.h" #include "tcg/startup.h" #include "qemu/timer.h" #include "qemu/envlist.h" @@ -49,7 +50,7 @@ #include "elf.h" #include "trace/control.h" #include "target_elf.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "crypto/init.h" #include "fd-trans.h" #include "signal-common.h" @@ -71,6 +72,7 @@ char *exec_path; char real_exec_path[PATH_MAX]; static bool opt_one_insn_per_tb; +static unsigned long opt_tb_size; static const char *argv0; static const char *gdbstub; static envlist_t *envlist; @@ -121,6 +123,7 @@ static const char *last_log_filename; #endif unsigned long reserved_va; +unsigned long guest_addr_max; static void usage(int exitcode); @@ -147,12 +150,14 @@ void fork_start(void) cpu_list_lock(); qemu_plugin_user_prefork_lock(); gdbserver_fork_start(); + fd_trans_prefork(); } void fork_end(pid_t pid) { bool child = pid == 0; + fd_trans_postfork(); qemu_plugin_user_postfork(child); mmap_fork_end(child); if (child) { @@ -412,11 +417,25 @@ static void handle_arg_reserved_va(const char *arg) reserved_va = val ? val - 1 : 0; } +static const char *rtsig_map = CONFIG_QEMU_RTSIG_MAP; + +static void handle_arg_rtsig_map(const char *arg) +{ + rtsig_map = arg; +} + static void handle_arg_one_insn_per_tb(const char *arg) { opt_one_insn_per_tb = true; } +static void handle_arg_tb_size(const char *arg) +{ + if (qemu_strtoul(arg, NULL, 0, &opt_tb_size)) { + usage(EXIT_FAILURE); + } +} + static void handle_arg_strace(const char *arg) { enable_strace = true; @@ -494,6 +513,9 @@ static const struct qemu_argument arg_table[] = { "address", "set guest_base address to 'address'"}, {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va, "size", "reserve 'size' bytes for guest virtual address space"}, + {"t", "QEMU_RTSIG_MAP", true, handle_arg_rtsig_map, + "tsig hsig n[,...]", + "map target rt signals [tsig,tsig+n) to [hsig,hsig+n]"}, {"d", "QEMU_LOG", true, handle_arg_log, "item[,...]", "enable logging of specified items " "(use '-d help' for a list of items)"}, @@ -506,6 +528,8 @@ static const struct qemu_argument arg_table[] = { {"one-insn-per-tb", "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb, "", "run with one guest instruction per emulated TB"}, + {"tb-size", "QEMU_TB_SIZE", true, handle_arg_tb_size, + "size", "TCG translation block cache size"}, {"strace", "QEMU_STRACE", false, handle_arg_strace, "", "log system calls"}, {"seed", "QEMU_RAND_SEED", true, handle_arg_seed, @@ -797,7 +821,9 @@ int main(int argc, char **argv, char **envp) accel_init_interfaces(ac); object_property_set_bool(OBJECT(accel), "one-insn-per-tb", opt_one_insn_per_tb, &error_abort); - ac->init_machine(NULL); + object_property_set_int(OBJECT(accel), "tb-size", + opt_tb_size, &error_abort); + ac->init_machine(accel, NULL); } /* @@ -836,6 +862,13 @@ int main(int argc, char **argv, char **envp) /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */ reserved_va = max_reserved_va; } + if (reserved_va != 0) { + guest_addr_max = reserved_va; + } else if (MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) { + guest_addr_max = UINT32_MAX; + } else { + guest_addr_max = ~0ul; + } /* * Temporarily disable @@ -1002,7 +1035,7 @@ int main(int argc, char **argv, char **envp) target_set_brk(info->brk); syscall_init(); - signal_init(); + signal_init(rtsig_map); /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay generating the prologue until now so that the prologue can take @@ -1012,12 +1045,7 @@ int main(int argc, char **argv, char **envp) target_cpu_copy_regs(env, regs); if (gdbstub) { - if (gdbserver_start(gdbstub) < 0) { - fprintf(stderr, "qemu: could not open gdbserver on %s\n", - gdbstub); - exit(EXIT_FAILURE); - } - gdb_handlesig(cpu, 0, NULL, NULL, 0); + gdbserver_start(gdbstub, &error_fatal); } #ifdef CONFIG_SEMIHOSTING diff --git a/linux-user/meson.build b/linux-user/meson.build index bc41e8c3bca..efca8433698 100644 --- a/linux-user/meson.build +++ b/linux-user/meson.build @@ -28,6 +28,10 @@ linux_user_ss.add(when: 'TARGET_HAS_BFLT', if_true: files('flatload.c')) linux_user_ss.add(when: 'TARGET_I386', if_true: files('vm86.c')) linux_user_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', if_true: files('semihost.c')) +if get_option('plugins') + linux_user_ss.add(files('plugin-api.c')) +endif + syscall_nr_generators = {} gen_vdso_exe = executable('gen-vdso', 'gen-vdso.c', @@ -38,6 +42,7 @@ gen_vdso = generator(gen_vdso_exe, output: '@BASENAME@.c.inc', subdir('aarch64') subdir('alpha') subdir('arm') +subdir('hexagon') subdir('hppa') subdir('i386') subdir('loongarch64') @@ -45,6 +50,7 @@ subdir('m68k') subdir('microblaze') subdir('mips64') subdir('mips') +subdir('openrisc') subdir('ppc') subdir('riscv') subdir('s390x') diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c index 212e62d0a62..87236c166f2 100644 --- a/linux-user/microblaze/cpu_loop.c +++ b/linux-user/microblaze/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" void cpu_loop(CPUMBState *env) @@ -127,7 +127,7 @@ void cpu_loop(CPUMBState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { env->regs[0] = regs->r0; env->regs[1] = regs->r1; diff --git a/linux-user/microblaze/syscall.tbl b/linux-user/microblaze/syscall.tbl index b11395a20c2..e3b643870e8 100644 --- a/linux-user/microblaze/syscall.tbl +++ b/linux-user/microblaze/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for microblaze # @@ -141,7 +141,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 common personality sys_personality 137 common afs_syscall sys_ni_syscall @@ -260,7 +260,7 @@ 250 common fadvise64 sys_fadvise64 # 251 is available for reuse (was briefly sys_set_zone_reclaim) 252 common exit_group sys_exit_group -253 common lookup_dcookie sys_lookup_dcookie +253 common lookup_dcookie sys_ni_syscall 254 common epoll_create sys_epoll_create 255 common epoll_ctl sys_epoll_ctl 256 common epoll_wait sys_epoll_wait @@ -448,7 +448,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/microblaze/syscallhdr.sh b/linux-user/microblaze/syscallhdr.sh index f55dce8a624..b42b6691546 100644 --- a/linux-user/microblaze/syscallhdr.sh +++ b/linux-user/microblaze/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h index 7dc5c45f00a..ffe4442213a 100644 --- a/linux-user/microblaze/target_signal.h +++ b/linux-user/microblaze/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 #endif /* MICROBLAZE_TARGET_SIGNAL_H */ diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 462387a0737..6405806eb02 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "elf.h" #include "internal.h" @@ -211,7 +211,7 @@ void cpu_loop(CPUMIPSState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { CPUState *cpu = env_cpu(env); TaskState *ts = get_task_state(cpu); diff --git a/linux-user/mips/syscall-args-o32.c.inc b/linux-user/mips/syscall-args-o32.c.inc index a6a2c5c566c..780c0a8a495 100644 --- a/linux-user/mips/syscall-args-o32.c.inc +++ b/linux-user/mips/syscall-args-o32.c.inc @@ -441,3 +441,23 @@ [ 440] = 5, /* process_madvise */ [ 441] = 6, /* epoll_pwait2 */ [ 442] = 5, /* mount_setattr */ + [ 443] = 4, /* quotactl_fd */ + [ 444] = 3, /* landlock_create_ruleset */ + [ 445] = 4, /* landlock_add_rule */ + [ 446] = 2, /* landlock_restrict_self */ + [ 447] = 1, /* memfd_secret */ + [ 448] = 2, /* process_mrelease */ + [ 449] = 5, /* futex_waitv */ + [ 450] = 4, /* set_mempolicy_home_node */ + [ 451] = 4, /* cachestat */ + [ 452] = 4, /* fchmodat2 */ + [ 453] = 3, /* map_shadow_stack */ + [ 454] = 4, /* futex_wake */ + [ 455] = 6, /* futex_wait */ + [ 456] = 4, /* futex_requeue */ + [ 457] = 4, /* statmount */ + [ 458] = 4, /* listmount */ + [ 459] = 4, /* lsm_get_self_attr */ + [ 460] = 4, /* lsm_set_self_attr */ + [ 461] = 3, /* lsm_list_modules */ + [ 462] = 3, /* mseal */ diff --git a/linux-user/mips/syscall_o32.tbl b/linux-user/mips/syscall_o32.tbl index d560c467a8c..360055c626b 100644 --- a/linux-user/mips/syscall_o32.tbl +++ b/linux-user/mips/syscall_o32.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for mips # @@ -27,7 +27,7 @@ 17 o32 break sys_ni_syscall # 18 was sys_stat 18 o32 unused18 sys_ni_syscall -19 o32 lseek sys_lseek +19 o32 lseek sys_lseek compat_sys_lseek 20 o32 getpid sys_getpid 21 o32 mount sys_mount 22 o32 umount sys_oldumount @@ -145,7 +145,7 @@ 131 o32 quotactl sys_quotactl 132 o32 getpgid sys_getpgid 133 o32 fchdir sys_fchdir -134 o32 bdflush sys_bdflush +134 o32 bdflush sys_ni_syscall 135 o32 sysfs sys_sysfs 136 o32 personality sys_personality sys_32_personality 137 o32 afs_syscall sys_ni_syscall @@ -258,7 +258,7 @@ 244 o32 io_submit sys_io_submit compat_sys_io_submit 245 o32 io_cancel sys_io_cancel 246 o32 exit_group sys_exit_group -247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +247 o32 lookup_dcookie sys_ni_syscall 248 o32 epoll_create sys_epoll_create 249 o32 epoll_ctl sys_epoll_ctl 250 o32 epoll_wait sys_epoll_wait @@ -279,9 +279,9 @@ 265 o32 clock_nanosleep sys_clock_nanosleep_time32 266 o32 tgkill sys_tgkill 267 o32 utimes sys_utimes_time32 -268 o32 mbind sys_mbind compat_sys_mbind -269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +268 o32 mbind sys_mbind +269 o32 get_mempolicy sys_get_mempolicy +270 o32 set_mempolicy sys_set_mempolicy 271 o32 mq_open sys_mq_open compat_sys_mq_open 272 o32 mq_unlink sys_mq_unlink 273 o32 mq_timedsend sys_mq_timedsend_time32 @@ -298,7 +298,7 @@ 284 o32 inotify_init sys_inotify_init 285 o32 inotify_add_watch sys_inotify_add_watch 286 o32 inotify_rm_watch sys_inotify_rm_watch -287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages +287 o32 migrate_pages sys_migrate_pages 288 o32 openat sys_openat compat_sys_openat 289 o32 mkdirat sys_mkdirat 290 o32 mknodat sys_mknodat @@ -319,7 +319,7 @@ 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range 306 o32 tee sys_tee 307 o32 vmsplice sys_vmsplice -308 o32 move_pages sys_move_pages compat_sys_move_pages +308 o32 move_pages sys_move_pages 309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list 310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list 311 o32 kexec_load sys_kexec_load compat_sys_kexec_load @@ -403,7 +403,7 @@ 412 o32 utimensat_time64 sys_utimensat sys_utimensat 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -430,7 +430,23 @@ 440 o32 process_madvise sys_process_madvise 441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 o32 mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 o32 quotactl_fd sys_quotactl_fd 444 o32 landlock_create_ruleset sys_landlock_create_ruleset 445 o32 landlock_add_rule sys_landlock_add_rule 446 o32 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 o32 process_mrelease sys_process_mrelease +449 o32 futex_waitv sys_futex_waitv +450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 o32 cachestat sys_cachestat +452 o32 fchmodat2 sys_fchmodat2 +453 o32 map_shadow_stack sys_map_shadow_stack +454 o32 futex_wake sys_futex_wake +455 o32 futex_wait sys_futex_wait +456 o32 futex_requeue sys_futex_requeue +457 o32 statmount sys_statmount +458 o32 listmount sys_listmount +459 o32 lsm_get_self_attr sys_lsm_get_self_attr +460 o32 lsm_set_self_attr sys_lsm_set_self_attr +461 o32 lsm_list_modules sys_lsm_list_modules +462 o32 mseal sys_mseal diff --git a/linux-user/mips/syscallhdr.sh b/linux-user/mips/syscallhdr.sh index 761e3e47dda..cd7043ef5a8 100644 --- a/linux-user/mips/syscallhdr.sh +++ b/linux-user/mips/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h index b965e86b2bf..71a32315a85 100644 --- a/linux-user/mips/target_elf.h +++ b/linux-user/mips/target_elf.h @@ -12,9 +12,6 @@ static inline const char *cpu_get_model(uint32_t eflags) if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) { return "mips32r6-generic"; } - if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) { - return "R5900"; - } if (eflags & EF_MIPS_NAN2008) { return "P5600"; } diff --git a/linux-user/mips/target_signal.h b/linux-user/mips/target_signal.h index fa542c1f4e2..4481426b99f 100644 --- a/linux-user/mips/target_signal.h +++ b/linux-user/mips/target_signal.h @@ -64,7 +64,6 @@ typedef struct target_sigaltstack { #define TARGET_SA_NODEFER 0x40000000 #define TARGET_SA_RESTART 0x10000000 #define TARGET_SA_RESETHAND 0x80000000 -#define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */ #define TARGET_MINSIGSTKSZ 2048 diff --git a/linux-user/mips64/syscall_n32.tbl b/linux-user/mips64/syscall_n32.tbl index 9220909526f..793eca6635c 100644 --- a/linux-user/mips64/syscall_n32.tbl +++ b/linux-user/mips64/syscall_n32.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for mips # @@ -214,7 +214,7 @@ 203 n32 io_submit compat_sys_io_submit 204 n32 io_cancel sys_io_cancel 205 n32 exit_group sys_exit_group -206 n32 lookup_dcookie sys_lookup_dcookie +206 n32 lookup_dcookie sys_ni_syscall 207 n32 epoll_create sys_epoll_create 208 n32 epoll_ctl sys_epoll_ctl 209 n32 epoll_wait sys_epoll_wait @@ -239,9 +239,9 @@ 228 n32 clock_nanosleep sys_clock_nanosleep_time32 229 n32 tgkill sys_tgkill 230 n32 utimes sys_utimes_time32 -231 n32 mbind compat_sys_mbind -232 n32 get_mempolicy compat_sys_get_mempolicy -233 n32 set_mempolicy compat_sys_set_mempolicy +231 n32 mbind sys_mbind +232 n32 get_mempolicy sys_get_mempolicy +233 n32 set_mempolicy sys_set_mempolicy 234 n32 mq_open compat_sys_mq_open 235 n32 mq_unlink sys_mq_unlink 236 n32 mq_timedsend sys_mq_timedsend_time32 @@ -258,7 +258,7 @@ 247 n32 inotify_init sys_inotify_init 248 n32 inotify_add_watch sys_inotify_add_watch 249 n32 inotify_rm_watch sys_inotify_rm_watch -250 n32 migrate_pages compat_sys_migrate_pages +250 n32 migrate_pages sys_migrate_pages 251 n32 openat sys_openat 252 n32 mkdirat sys_mkdirat 253 n32 mknodat sys_mknodat @@ -279,7 +279,7 @@ 268 n32 sync_file_range sys_sync_file_range 269 n32 tee sys_tee 270 n32 vmsplice sys_vmsplice -271 n32 move_pages compat_sys_move_pages +271 n32 move_pages sys_move_pages 272 n32 set_robust_list compat_sys_set_robust_list 273 n32 get_robust_list compat_sys_get_robust_list 274 n32 kexec_load compat_sys_kexec_load @@ -354,7 +354,7 @@ 412 n32 utimensat_time64 sys_utimensat 413 n32 pselect6_time64 compat_sys_pselect6_time64 414 n32 ppoll_time64 compat_sys_ppoll_time64 -416 n32 io_pgetevents_time64 sys_io_pgetevents +416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 418 n32 mq_timedsend_time64 sys_mq_timedsend 419 n32 mq_timedreceive_time64 sys_mq_timedreceive @@ -381,7 +381,23 @@ 440 n32 process_madvise sys_process_madvise 441 n32 epoll_pwait2 compat_sys_epoll_pwait2 442 n32 mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 n32 quotactl_fd sys_quotactl_fd 444 n32 landlock_create_ruleset sys_landlock_create_ruleset 445 n32 landlock_add_rule sys_landlock_add_rule 446 n32 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 n32 process_mrelease sys_process_mrelease +449 n32 futex_waitv sys_futex_waitv +450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 n32 cachestat sys_cachestat +452 n32 fchmodat2 sys_fchmodat2 +453 n32 map_shadow_stack sys_map_shadow_stack +454 n32 futex_wake sys_futex_wake +455 n32 futex_wait sys_futex_wait +456 n32 futex_requeue sys_futex_requeue +457 n32 statmount sys_statmount +458 n32 listmount sys_listmount +459 n32 lsm_get_self_attr sys_lsm_get_self_attr +460 n32 lsm_set_self_attr sys_lsm_set_self_attr +461 n32 lsm_list_modules sys_lsm_list_modules +462 n32 mseal sys_mseal diff --git a/linux-user/mips64/syscall_n64.tbl b/linux-user/mips64/syscall_n64.tbl index 9cd1c34f31b..ebff531accb 100644 --- a/linux-user/mips64/syscall_n64.tbl +++ b/linux-user/mips64/syscall_n64.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for mips # @@ -214,7 +214,7 @@ 203 n64 io_submit sys_io_submit 204 n64 io_cancel sys_io_cancel 205 n64 exit_group sys_exit_group -206 n64 lookup_dcookie sys_lookup_dcookie +206 n64 lookup_dcookie sys_ni_syscall 207 n64 epoll_create sys_epoll_create 208 n64 epoll_ctl sys_epoll_ctl 209 n64 epoll_wait sys_epoll_wait @@ -357,7 +357,23 @@ 440 n64 process_madvise sys_process_madvise 441 n64 epoll_pwait2 sys_epoll_pwait2 442 n64 mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 n64 quotactl_fd sys_quotactl_fd 444 n64 landlock_create_ruleset sys_landlock_create_ruleset 445 n64 landlock_add_rule sys_landlock_add_rule 446 n64 landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 n64 process_mrelease sys_process_mrelease +449 n64 futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 n64 cachestat sys_cachestat +452 n64 fchmodat2 sys_fchmodat2 +453 n64 map_shadow_stack sys_map_shadow_stack +454 n64 futex_wake sys_futex_wake +455 n64 futex_wait sys_futex_wait +456 n64 futex_requeue sys_futex_requeue +457 n64 statmount sys_statmount +458 n64 listmount sys_listmount +459 n64 lsm_get_self_attr sys_lsm_get_self_attr +460 n64 lsm_set_self_attr sys_lsm_set_self_attr +461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal diff --git a/linux-user/mips64/syscallhdr.sh b/linux-user/mips64/syscallhdr.sh index ed5a45165a4..a4339b2041b 100644 --- a/linux-user/mips64/syscallhdr.sh +++ b/linux-user/mips64/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/mips64/target_elf.h b/linux-user/mips64/target_elf.h index 5f2f2df29f7..502af9d2781 100644 --- a/linux-user/mips64/target_elf.h +++ b/linux-user/mips64/target_elf.h @@ -9,11 +9,27 @@ #define MIPS64_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) { - return "I6400"; + switch (eflags & EF_MIPS_MACH) { + case EF_MIPS_MACH_OCTEON: + case EF_MIPS_MACH_OCTEON2: + case EF_MIPS_MACH_OCTEON3: + return "Octeon68XX"; + case EF_MIPS_MACH_LS2E: + return "Loongson-2E"; + case EF_MIPS_MACH_LS2F: + return "Loongson-2F"; + case EF_MIPS_MACH_LS3A: + return "Loongson-3A1000"; + default: + break; } - if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) { - return "R5900"; + switch (eflags & EF_MIPS_ARCH) { + case EF_MIPS_ARCH_64R6: + return "I6400"; + case EF_MIPS_ARCH_64R2: + return "MIPS64R2-generic"; + default: + break; } return "5KEf"; } diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 4d09a72fadc..002e1e668e6 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -21,7 +21,11 @@ #include "trace.h" #include "exec/log.h" #include "exec/page-protection.h" +#include "exec/mmap-lock.h" +#include "exec/tb-flush.h" +#include "exec/translation-block.h" #include "qemu.h" +#include "user/page-protection.h" #include "user-internals.h" #include "user-mmap.h" #include "target_mman.h" @@ -283,6 +287,40 @@ static int do_munmap(void *addr, size_t len) return munmap(addr, len); } +/* + * Perform a pread on behalf of target_mmap. We can reach EOF, we can be + * interrupted by signals, and in general there's no good error return path. + * If @zero, zero the rest of the block at EOF. + * Return true on success. + */ +static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero) +{ + while (1) { + ssize_t r = pread(fd, p, len, offset); + + if (likely(r == len)) { + /* Complete */ + return true; + } + if (r == 0) { + /* EOF */ + if (zero) { + memset(p, 0, len); + } + return true; + } + if (r > 0) { + /* Short read */ + p += r; + len -= r; + offset += r; + } else if (errno != EINTR) { + /* Error */ + return false; + } + } +} + /* * Map an incomplete host page. * @@ -357,10 +395,9 @@ static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last, /* Read or zero the new guest pages. */ if (flags & MAP_ANONYMOUS) { memset(g2h_untagged(start), 0, last - start + 1); - } else { - if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) { - return false; - } + } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1, + offset, true)) { + return false; } /* Put final protection */ @@ -560,9 +597,13 @@ static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len, int host_prot, int flags, int page_flags, int fd, off_t offset) { - void *p, *want_p = g2h_untagged(start); + void *p, *want_p = NULL; abi_ulong last; + if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { + want_p = g2h_untagged(start); + } + p = mmap(want_p, len, host_prot, flags, fd, offset); if (p == MAP_FAILED) { return -1; @@ -604,17 +645,21 @@ static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len, * * However, this case is rather common with executable images, * so the workaround is important for even trivial tests, whereas - * the mmap of of a file being extended is less common. + * the mmap of a file being extended is less common. */ static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot, int mmap_flags, int page_flags, int fd, off_t offset, int host_page_size) { - void *p, *want_p = g2h_untagged(start); + void *p, *want_p = NULL; off_t fileend_adj = 0; int flags = mmap_flags; abi_ulong last, pass_last; + if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { + want_p = g2h_untagged(start); + } + if (!(flags & MAP_ANONYMOUS)) { struct stat sb; @@ -740,12 +785,16 @@ static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len, int flags, int page_flags, int fd, off_t offset, int host_page_size) { - void *p, *want_p = g2h_untagged(start); + void *p, *want_p = NULL; off_t host_offset = offset & -host_page_size; abi_ulong last, real_start, real_last; bool misaligned_offset = false; size_t host_len; + if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { + want_p = g2h_untagged(start); + } + if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) { /* * Adjust the offset to something representable on the host. @@ -841,8 +890,7 @@ static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len, } if (misaligned_offset) { - /* TODO: The read could be short. */ - if (pread(fd, p, host_len, offset + real_start - start) != host_len) { + if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) { do_munmap(p, host_len); return -1; } diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c index a7aa586c8f9..306b4f8eb43 100644 --- a/linux-user/openrisc/cpu_loop.c +++ b/linux-user/openrisc/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" void cpu_loop(CPUOpenRISCState *env) @@ -83,7 +83,7 @@ void cpu_loop(CPUOpenRISCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; diff --git a/linux-user/openrisc/meson.build b/linux-user/openrisc/meson.build new file mode 100644 index 00000000000..273e7a0c38a --- /dev/null +++ b/linux-user/openrisc/meson.build @@ -0,0 +1,5 @@ +syscall_nr_generators += { + 'openrisc': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/openrisc/syscall.tbl b/linux-user/openrisc/syscall.tbl new file mode 100644 index 00000000000..845e24eb372 --- /dev/null +++ b/linux-user/openrisc/syscall.tbl @@ -0,0 +1,405 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# This file contains the system call numbers for all of the +# more recently added architectures. +# +# As a basic principle, no duplication of functionality +# should be added, e.g. we don't use lseek when llseek +# is present. New architectures should use this file +# and implement the less feature-full calls in user space. +# +0 common io_setup sys_io_setup compat_sys_io_setup +1 common io_destroy sys_io_destroy +2 common io_submit sys_io_submit compat_sys_io_submit +3 common io_cancel sys_io_cancel +4 time32 io_getevents sys_io_getevents_time32 +4 64 io_getevents sys_io_getevents +5 common setxattr sys_setxattr +6 common lsetxattr sys_lsetxattr +7 common fsetxattr sys_fsetxattr +8 common getxattr sys_getxattr +9 common lgetxattr sys_lgetxattr +10 common fgetxattr sys_fgetxattr +11 common listxattr sys_listxattr +12 common llistxattr sys_llistxattr +13 common flistxattr sys_flistxattr +14 common removexattr sys_removexattr +15 common lremovexattr sys_lremovexattr +16 common fremovexattr sys_fremovexattr +17 common getcwd sys_getcwd +18 common lookup_dcookie sys_ni_syscall +19 common eventfd2 sys_eventfd2 +20 common epoll_create1 sys_epoll_create1 +21 common epoll_ctl sys_epoll_ctl +22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +23 common dup sys_dup +24 common dup3 sys_dup3 +25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +25 64 fcntl sys_fcntl +26 common inotify_init1 sys_inotify_init1 +27 common inotify_add_watch sys_inotify_add_watch +28 common inotify_rm_watch sys_inotify_rm_watch +29 common ioctl sys_ioctl compat_sys_ioctl +30 common ioprio_set sys_ioprio_set +31 common ioprio_get sys_ioprio_get +32 common flock sys_flock +33 common mknodat sys_mknodat +34 common mkdirat sys_mkdirat +35 common unlinkat sys_unlinkat +36 common symlinkat sys_symlinkat +37 common linkat sys_linkat +# renameat is superseded with flags by renameat2 +38 renameat renameat sys_renameat +39 common umount2 sys_umount +40 common mount sys_mount +41 common pivot_root sys_pivot_root +42 common nfsservctl sys_ni_syscall +43 32 statfs64 sys_statfs64 compat_sys_statfs64 +43 64 statfs sys_statfs +44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +44 64 fstatfs sys_fstatfs +45 32 truncate64 sys_truncate64 compat_sys_truncate64 +45 64 truncate sys_truncate +46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +46 64 ftruncate sys_ftruncate +47 common fallocate sys_fallocate compat_sys_fallocate +48 common faccessat sys_faccessat +49 common chdir sys_chdir +50 common fchdir sys_fchdir +51 common chroot sys_chroot +52 common fchmod sys_fchmod +53 common fchmodat sys_fchmodat +54 common fchownat sys_fchownat +55 common fchown sys_fchown +56 common openat sys_openat +57 common close sys_close +58 common vhangup sys_vhangup +59 common pipe2 sys_pipe2 +60 common quotactl sys_quotactl +61 common getdents64 sys_getdents64 +62 32 llseek sys_llseek +62 64 lseek sys_lseek +63 common read sys_read +64 common write sys_write +65 common readv sys_readv sys_readv +66 common writev sys_writev sys_writev +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 common preadv sys_preadv compat_sys_preadv +70 common pwritev sys_pwritev compat_sys_pwritev +71 32 sendfile64 sys_sendfile64 +71 64 sendfile sys_sendfile64 +72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +72 64 pselect6 sys_pselect6 +73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +73 64 ppoll sys_ppoll +74 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +75 common vmsplice sys_vmsplice +76 common splice sys_splice +77 common tee sys_tee +78 common readlinkat sys_readlinkat +79 stat64 fstatat64 sys_fstatat64 +79 64 newfstatat sys_newfstatat +80 stat64 fstat64 sys_fstat64 +80 64 fstat sys_newfstat +81 common sync sys_sync +82 common fsync sys_fsync +83 common fdatasync sys_fdatasync +84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +85 common timerfd_create sys_timerfd_create +86 time32 timerfd_settime sys_timerfd_settime32 +86 64 timerfd_settime sys_timerfd_settime +87 time32 timerfd_gettime sys_timerfd_gettime32 +87 64 timerfd_gettime sys_timerfd_gettime +88 time32 utimensat sys_utimensat_time32 +88 64 utimensat sys_utimensat +89 common acct sys_acct +90 common capget sys_capget +91 common capset sys_capset +92 common personality sys_personality +93 common exit sys_exit +94 common exit_group sys_exit_group +95 common waitid sys_waitid compat_sys_waitid +96 common set_tid_address sys_set_tid_address +97 common unshare sys_unshare +98 time32 futex sys_futex_time32 +98 64 futex sys_futex +99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +101 time32 nanosleep sys_nanosleep_time32 +101 64 nanosleep sys_nanosleep +102 common getitimer sys_getitimer compat_sys_getitimer +103 common setitimer sys_setitimer compat_sys_setitimer +104 common kexec_load sys_kexec_load compat_sys_kexec_load +105 common init_module sys_init_module +106 common delete_module sys_delete_module +107 common timer_create sys_timer_create compat_sys_timer_create +108 time32 timer_gettime sys_timer_gettime32 +108 64 timer_gettime sys_timer_gettime +109 common timer_getoverrun sys_timer_getoverrun +110 time32 timer_settime sys_timer_settime32 +110 64 timer_settime sys_timer_settime +111 common timer_delete sys_timer_delete +112 time32 clock_settime sys_clock_settime32 +112 64 clock_settime sys_clock_settime +113 time32 clock_gettime sys_clock_gettime32 +113 64 clock_gettime sys_clock_gettime +114 time32 clock_getres sys_clock_getres_time32 +114 64 clock_getres sys_clock_getres +115 time32 clock_nanosleep sys_clock_nanosleep_time32 +115 64 clock_nanosleep sys_clock_nanosleep +116 common syslog sys_syslog +117 common ptrace sys_ptrace compat_sys_ptrace +118 common sched_setparam sys_sched_setparam +119 common sched_setscheduler sys_sched_setscheduler +120 common sched_getscheduler sys_sched_getscheduler +121 common sched_getparam sys_sched_getparam +122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +124 common sched_yield sys_sched_yield +125 common sched_get_priority_max sys_sched_get_priority_max +126 common sched_get_priority_min sys_sched_get_priority_min +127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +127 64 sched_rr_get_interval sys_sched_rr_get_interval +128 common restart_syscall sys_restart_syscall +129 common kill sys_kill +130 common tkill sys_tkill +131 common tgkill sys_tgkill +132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +137 64 rt_sigtimedwait sys_rt_sigtimedwait +138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +140 common setpriority sys_setpriority +141 common getpriority sys_getpriority +142 common reboot sys_reboot +143 common setregid sys_setregid +144 common setgid sys_setgid +145 common setreuid sys_setreuid +146 common setuid sys_setuid +147 common setresuid sys_setresuid +148 common getresuid sys_getresuid +149 common setresgid sys_setresgid +150 common getresgid sys_getresgid +151 common setfsuid sys_setfsuid +152 common setfsgid sys_setfsgid +153 common times sys_times compat_sys_times +154 common setpgid sys_setpgid +155 common getpgid sys_getpgid +156 common getsid sys_getsid +157 common setsid sys_setsid +158 common getgroups sys_getgroups +159 common setgroups sys_setgroups +160 common uname sys_newuname +161 common sethostname sys_sethostname +162 common setdomainname sys_setdomainname +# getrlimit and setrlimit are superseded with prlimit64 +163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit +164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit +165 common getrusage sys_getrusage compat_sys_getrusage +166 common umask sys_umask +167 common prctl sys_prctl +168 common getcpu sys_getcpu +169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +169 64 gettimeofday sys_gettimeofday +170 time32 settimeofday sys_settimeofday compat_sys_settimeofday +170 64 settimeofday sys_settimeofday +171 time32 adjtimex sys_adjtimex_time32 +171 64 adjtimex sys_adjtimex +172 common getpid sys_getpid +173 common getppid sys_getppid +174 common getuid sys_getuid +175 common geteuid sys_geteuid +176 common getgid sys_getgid +177 common getegid sys_getegid +178 common gettid sys_gettid +179 common sysinfo sys_sysinfo compat_sys_sysinfo +180 common mq_open sys_mq_open compat_sys_mq_open +181 common mq_unlink sys_mq_unlink +182 time32 mq_timedsend sys_mq_timedsend_time32 +182 64 mq_timedsend sys_mq_timedsend +183 time32 mq_timedreceive sys_mq_timedreceive_time32 +183 64 mq_timedreceive sys_mq_timedreceive +184 common mq_notify sys_mq_notify compat_sys_mq_notify +185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +186 common msgget sys_msgget +187 common msgctl sys_msgctl compat_sys_msgctl +188 common msgrcv sys_msgrcv compat_sys_msgrcv +189 common msgsnd sys_msgsnd compat_sys_msgsnd +190 common semget sys_semget +191 common semctl sys_semctl compat_sys_semctl +192 time32 semtimedop sys_semtimedop_time32 +192 64 semtimedop sys_semtimedop +193 common semop sys_semop +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +196 common shmat sys_shmat compat_sys_shmat +197 common shmdt sys_shmdt +198 common socket sys_socket +199 common socketpair sys_socketpair +200 common bind sys_bind +201 common listen sys_listen +202 common accept sys_accept +203 common connect sys_connect +204 common getsockname sys_getsockname +205 common getpeername sys_getpeername +206 common sendto sys_sendto +207 common recvfrom sys_recvfrom compat_sys_recvfrom +208 common setsockopt sys_setsockopt sys_setsockopt +209 common getsockopt sys_getsockopt sys_getsockopt +210 common shutdown sys_shutdown +211 common sendmsg sys_sendmsg compat_sys_sendmsg +212 common recvmsg sys_recvmsg compat_sys_recvmsg +213 common readahead sys_readahead compat_sys_readahead +214 common brk sys_brk +215 common munmap sys_munmap +216 common mremap sys_mremap +217 common add_key sys_add_key +218 common request_key sys_request_key +219 common keyctl sys_keyctl compat_sys_keyctl +220 common clone sys_clone +221 common execve sys_execve compat_sys_execve +222 32 mmap2 sys_mmap2 +222 64 mmap sys_mmap +223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +223 64 fadvise64 sys_fadvise64_64 +224 common swapon sys_swapon +225 common swapoff sys_swapoff +226 common mprotect sys_mprotect +227 common msync sys_msync +228 common mlock sys_mlock +229 common munlock sys_munlock +230 common mlockall sys_mlockall +231 common munlockall sys_munlockall +232 common mincore sys_mincore +233 common madvise sys_madvise +234 common remap_file_pages sys_remap_file_pages +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common migrate_pages sys_migrate_pages +239 common move_pages sys_move_pages +240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +241 common perf_event_open sys_perf_event_open +242 common accept4 sys_accept4 +243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +243 64 recvmmsg sys_recvmmsg +# Architectures may provide up to 16 syscalls of their own between 244 and 259 +244 arc cacheflush sys_cacheflush +245 arc arc_settls sys_arc_settls +246 arc arc_gettls sys_arc_gettls +247 arc sysfs sys_sysfs +248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg + +244 csky set_thread_area sys_set_thread_area +245 csky cacheflush sys_cacheflush + +244 nios2 cacheflush sys_cacheflush + +244 or1k or1k_atomic sys_or1k_atomic + +258 riscv riscv_hwprobe sys_riscv_hwprobe +259 riscv riscv_flush_icache sys_riscv_flush_icache + +260 time32 wait4 sys_wait4 compat_sys_wait4 +260 64 wait4 sys_wait4 +261 common prlimit64 sys_prlimit64 +262 common fanotify_init sys_fanotify_init +263 common fanotify_mark sys_fanotify_mark +264 common name_to_handle_at sys_name_to_handle_at +265 common open_by_handle_at sys_open_by_handle_at +266 time32 clock_adjtime sys_clock_adjtime32 +266 64 clock_adjtime sys_clock_adjtime +267 common syncfs sys_syncfs +268 common setns sys_setns +269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +270 common process_vm_readv sys_process_vm_readv +271 common process_vm_writev sys_process_vm_writev +272 common kcmp sys_kcmp +273 common finit_module sys_finit_module +274 common sched_setattr sys_sched_setattr +275 common sched_getattr sys_sched_getattr +276 common renameat2 sys_renameat2 +277 common seccomp sys_seccomp +278 common getrandom sys_getrandom +279 common memfd_create sys_memfd_create +280 common bpf sys_bpf +281 common execveat sys_execveat compat_sys_execveat +282 common userfaultfd sys_userfaultfd +283 common membarrier sys_membarrier +284 common mlock2 sys_mlock2 +285 common copy_file_range sys_copy_file_range +286 common preadv2 sys_preadv2 compat_sys_preadv2 +287 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +291 common statx sys_statx +292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +292 64 io_pgetevents sys_io_pgetevents +293 common rseq sys_rseq +294 common kexec_file_load sys_kexec_file_load +# 295 through 402 are unassigned to sync up with generic numbers don't use +403 32 clock_gettime64 sys_clock_gettime +404 32 clock_settime64 sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime +409 32 timer_settime64 sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +447 memfd_secret memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/openrisc/syscall_nr.h b/linux-user/openrisc/syscall_nr.h deleted file mode 100644 index f7faddb54c5..00000000000 --- a/linux-user/openrisc/syscall_nr.h +++ /dev/null @@ -1,334 +0,0 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_OPENRISC_SYSCALL_NR_H -#define LINUX_USER_OPENRISC_SYSCALL_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_or1k_atomic TARGET_NR_arch_specific_syscall -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_io_getevents 4 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl64 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_renameat 38 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs64 43 -#define TARGET_NR_fstatfs64 44 -#define TARGET_NR_truncate64 45 -#define TARGET_NR_ftruncate64 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_llseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile64 71 -#define TARGET_NR_pselect6 72 -#define TARGET_NR_ppoll 73 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_fstatat64 79 -#define TARGET_NR_fstat64 80 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_timerfd_settime 86 -#define TARGET_NR_timerfd_gettime 87 -#define TARGET_NR_utimensat 88 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_futex 98 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_nanosleep 101 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_gettime 108 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_settime 110 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_clock_settime 112 -#define TARGET_NR_clock_gettime 113 -#define TARGET_NR_clock_getres 114 -#define TARGET_NR_clock_nanosleep 115 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_sched_rr_get_interval 127 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigtimedwait 137 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrlimit 163 -#define TARGET_NR_setrlimit 164 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_gettimeofday 169 -#define TARGET_NR_settimeofday 170 -#define TARGET_NR_adjtimex 171 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_timedsend 182 -#define TARGET_NR_mq_timedreceive 183 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semtimedop 192 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap2 222 -#define TARGET_NR_fadvise64_64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_recvmmsg 243 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_wait4 260 -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_clock_adjtime 266 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_io_pgetevents 292 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_clock_gettime64 403 -#define TARGET_NR_clock_settime64 404 -#define TARGET_NR_clock_adjtime64 405 -#define TARGET_NR_clock_getres_time64 406 -#define TARGET_NR_clock_nanosleep_time64 407 -#define TARGET_NR_timer_gettime64 408 -#define TARGET_NR_timer_settime64 409 -#define TARGET_NR_timerfd_gettime64 410 -#define TARGET_NR_timerfd_settime64 411 -#define TARGET_NR_utimensat_time64 412 -#define TARGET_NR_pselect6_time64 413 -#define TARGET_NR_ppoll_time64 414 -#define TARGET_NR_io_pgetevents_time64 416 -#define TARGET_NR_recvmmsg_time64 417 -#define TARGET_NR_mq_timedsend_time64 418 -#define TARGET_NR_mq_timedreceive_time64 419 -#define TARGET_NR_semtimedop_time64 420 -#define TARGET_NR_rt_sigtimedwait_time64 421 -#define TARGET_NR_futex_time64 422 -#define TARGET_NR_sched_rr_get_interval_time64 423 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_clone3 435 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_syscalls 447 - -#endif /* LINUX_USER_OPENRISC_SYSCALL_NR_H */ diff --git a/linux-user/openrisc/syscallhdr.sh b/linux-user/openrisc/syscallhdr.sh new file mode 100644 index 00000000000..047e9f77c75 --- /dev/null +++ b/linux-user/openrisc/syscallhdr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_OPENRISC_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + echo "#ifndef ${fileguard}" + echo "#define ${fileguard} 1" + echo "" + + while read nr abi name entry ; do + if [ -z "$offset" ]; then + echo "#define TARGET_NR_${prefix}${name} $nr" + else + echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)" + fi + done + + echo "" + echo "#endif /* ${fileguard} */" +) > "$out" diff --git a/linux-user/plugin-api.c b/linux-user/plugin-api.c new file mode 100644 index 00000000000..8d6fbb60e0c --- /dev/null +++ b/linux-user/plugin-api.c @@ -0,0 +1,16 @@ +/* + * QEMU Plugin API - linux-user-mode only implementations + * + * Common user-mode only APIs are in plugins/api-user. These helpers + * are only specific to linux-user. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "common-user/plugin-api.c.inc" diff --git a/linux-user/ppc/Makefile.vdso b/linux-user/ppc/Makefile.vdso index 3ca3c6b83ea..e2b8facbb50 100644 --- a/linux-user/ppc/Makefile.vdso +++ b/linux-user/ppc/Makefile.vdso @@ -6,9 +6,11 @@ VPATH += $(SUBDIR) all: $(SUBDIR)/vdso-32.so $(SUBDIR)/vdso-64.so $(SUBDIR)/vdso-64le.so LDFLAGS32 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-32.ld \ - -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 + -Wl,-h,linux-vdso32.so.1 -Wl,--hash-style=both \ + -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096 LDFLAGS64 = -nostdlib -shared -Wl,-T,$(SUBDIR)/vdso-64.ld \ - -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both -Wl,--build-id=sha1 + -Wl,-h,linux-vdso64.so.1 -Wl,--hash-style=both \ + -Wl,--build-id=sha1 -Wl,-z,max-page-size=4096 $(SUBDIR)/vdso-32.so: vdso.S vdso-32.ld vdso-asmoffset.h $(CC) -o $@ $(LDFLAGS32) -m32 $< diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c index 02204ad8beb..2a0efaffcd6 100644 --- a/linux-user/ppc/cpu_loop.c +++ b/linux-user/ppc/cpu_loop.c @@ -21,7 +21,7 @@ #include "qemu.h" #include "qemu/timer.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env) @@ -378,7 +378,7 @@ void cpu_loop(CPUPPCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index a1d8c0bccc1..24e5a02a782 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -628,7 +628,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) return 1; - target_to_host_sigset_internal(&blocked, &set); + target_to_host_sigset(&blocked, &set); set_sigmask(&blocked); restore_user_regs(env, mcp, sig); diff --git a/linux-user/ppc/syscall.tbl b/linux-user/ppc/syscall.tbl index 8f052ff4058..4b428a43ccd 100644 --- a/linux-user/ppc/syscall.tbl +++ b/linux-user/ppc/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for powerpc # @@ -110,7 +110,7 @@ 79 common settimeofday sys_settimeofday compat_sys_settimeofday 80 common getgroups sys_getgroups 81 common setgroups sys_setgroups -82 32 select ppc_select sys_ni_syscall +82 32 select sys_old_select compat_sys_old_select 82 64 select sys_ni_syscall 82 spu select sys_ni_syscall 83 common symlink sys_symlink @@ -176,11 +176,11 @@ 131 nospu quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs -136 32 personality sys_personality ppc64_personality -136 64 personality ppc64_personality -136 spu personality ppc64_personality +136 32 personality sys_personality compat_sys_ppc64_personality +136 64 personality sys_ppc64_personality +136 spu personality sys_ppc64_personality 137 common afs_syscall sys_ni_syscall 138 common setfsuid sys_setfsuid 139 common setfsgid sys_setfsgid @@ -228,8 +228,12 @@ 176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend -179 common pread64 sys_pread64 compat_sys_pread64 -180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64 +179 64 pread64 sys_pread64 +179 spu pread64 sys_pread64 +180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64 +180 64 pwrite64 sys_pwrite64 +180 spu pwrite64 sys_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -242,10 +246,12 @@ 188 common putpmsg sys_ni_syscall 189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit -191 common readahead sys_readahead compat_sys_readahead +191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead +191 64 readahead sys_readahead +191 spu readahead sys_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 -193 32 truncate64 sys_truncate64 compat_sys_truncate64 -194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64 +194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64 195 32 stat64 sys_stat64 196 32 lstat64 sys_lstat64 197 32 fstat64 sys_fstat64 @@ -288,9 +294,11 @@ 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address -233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64 +233 64 fadvise64 sys_fadvise64 +233 spu fadvise64 sys_fadvise64 234 nospu exit_group sys_exit_group -235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +235 nospu lookup_dcookie sys_ni_syscall 236 common epoll_create sys_epoll_create 237 common epoll_ctl sys_epoll_ctl 238 common epoll_wait sys_epoll_wait @@ -323,17 +331,17 @@ 251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -254 32 fadvise64_64 ppc_fadvise64_64 +254 32 fadvise64_64 sys_ppc_fadvise64_64 254 spu fadvise64_64 sys_ni_syscall 255 common rtas sys_rtas 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall 256 64 sys_debug_setcontext sys_ni_syscall 256 spu sys_debug_setcontext sys_ni_syscall # 257 reserved for vserver -258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages -259 nospu mbind sys_mbind compat_sys_mbind -260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +258 nospu migrate_pages sys_migrate_pages +259 nospu mbind sys_mbind +260 nospu get_mempolicy sys_get_mempolicy +261 nospu set_mempolicy sys_set_mempolicy 262 nospu mq_open sys_mq_open compat_sys_mq_open 263 nospu mq_unlink sys_mq_unlink 264 32 mq_timedsend sys_mq_timedsend_time32 @@ -381,7 +389,7 @@ 298 common faccessat sys_faccessat 299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list -301 common move_pages sys_move_pages compat_sys_move_pages +301 common move_pages sys_move_pages 302 common getcpu sys_getcpu 303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 304 32 utimensat sys_utimensat_time32 @@ -390,8 +398,11 @@ 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd -308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 -309 nospu fallocate sys_fallocate compat_sys_fallocate +308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2 +308 64 sync_file_range2 sys_sync_file_range2 +308 spu sync_file_range2 sys_sync_file_range2 +309 32 fallocate sys_ppc_fallocate compat_sys_fallocate +309 64 fallocate sys_fallocate 310 nospu subpage_prot sys_subpage_prot 311 32 timerfd_settime sys_timerfd_settime32 311 64 timerfd_settime sys_timerfd_settime @@ -495,7 +506,7 @@ 412 32 utimensat_time64 sys_utimensat sys_utimensat 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -522,7 +533,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_ni_syscall +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/ppc/syscallhdr.sh b/linux-user/ppc/syscallhdr.sh index 6c44e0eaad2..6e8b93d673c 100644 --- a/linux-user/ppc/syscallhdr.sh +++ b/linux-user/ppc/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h index 5be24e152b7..53fae473f3c 100644 --- a/linux-user/ppc/target_signal.h +++ b/linux-user/ppc/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #if !defined(TARGET_PPC64) #define TARGET_ARCH_HAS_SETUP_FRAME #endif diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so index b19baafb0d3..0dc55e0dddf 100755 Binary files a/linux-user/ppc/vdso-32.so and b/linux-user/ppc/vdso-32.so differ diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so index 913c831b381..ac1ab2582e4 100755 Binary files a/linux-user/ppc/vdso-64.so and b/linux-user/ppc/vdso-64.so differ diff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so index 258a03b807c..424abb4290b 100755 Binary files a/linux-user/ppc/vdso-64le.so and b/linux-user/ppc/vdso-64le.so differ diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 2e90a971753..0b19fa43e65 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -2,9 +2,10 @@ #define QEMU_H #include "cpu.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "user/abitypes.h" +#include "user/page-protection.h" #include "syscall_defs.h" #include "target_syscall.h" @@ -44,7 +45,6 @@ struct image_info { abi_ulong file_string; uint32_t elf_flags; int personality; - abi_ulong alignment; bool exec_stack; /* Generic semihosting knows about these pointers. */ @@ -113,6 +113,10 @@ struct TaskState { struct target_vm86plus_struct vm86plus; uint32_t v86flags; uint32_t v86mask; +#endif +#if defined(TARGET_I386) + /* Last syscall number. */ + target_ulong orig_ax; #endif abi_ulong child_tidptr; #ifdef TARGET_M68K @@ -313,6 +317,15 @@ static inline bool access_ok(CPUState *cpu, int type, int copy_from_user(void *hptr, abi_ulong gaddr, ssize_t len); int copy_to_user(abi_ulong gaddr, void *hptr, ssize_t len); +/* + * copy_struct_from_user() copies a target struct to a host struct, in + * a way that guarantees backwards-compatibility for struct syscall + * arguments. + * + * Similar to kernels uaccess.h:copy_struct_from_user() + */ +int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize); + /* Functions for accessing guest memory. The tget and tput functions read/write single values, byteswapping as necessary. The lock_user function gets a pointer to a contiguous area of guest memory, but does not perform @@ -349,4 +362,7 @@ void *lock_user_string(abi_ulong guest_addr); #define unlock_user_struct(host_ptr, guest_addr, copy) \ unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0) +/* Clone cpu state */ +CPUArchState *cpu_copy(CPUArchState *env); + #endif /* QEMU_H */ diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c index 52c49c2e426..3ac8bbfec1f 100644 --- a/linux-user/riscv/cpu_loop.c +++ b/linux-user/riscv/cpu_loop.c @@ -21,7 +21,7 @@ #include "qemu/error-report.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #include "elf.h" #include "semihosting/common-semi.h" @@ -47,7 +47,7 @@ void cpu_loop(CPURISCVState *env) break; case RISCV_EXCP_U_ECALL: env->pc += 4; - if (env->gpr[xA7] == TARGET_NR_arch_specific_syscall + 15) { + if (env->gpr[xA7] == TARGET_NR_riscv_flush_icache) { /* riscv_flush_icache_syscall is a no-op in QEMU as self-modifying code is automatically detected */ ret = 0; @@ -94,7 +94,7 @@ void cpu_loop(CPURISCVState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { CPUState *cpu = env_cpu(env); TaskState *ts = get_task_state(cpu); diff --git a/linux-user/riscv/meson.build b/linux-user/riscv/meson.build index beb989a7caa..b2e7df0f4f6 100644 --- a/linux-user/riscv/meson.build +++ b/linux-user/riscv/meson.build @@ -5,3 +5,9 @@ vdso_64_inc = gen_vdso.process('vdso-64.so', linux_user_ss.add(when: 'TARGET_RISCV32', if_true: vdso_32_inc) linux_user_ss.add(when: 'TARGET_RISCV64', if_true: vdso_64_inc) + +syscall_nr_generators += { + 'riscv': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/riscv/syscall.tbl b/linux-user/riscv/syscall.tbl new file mode 100644 index 00000000000..845e24eb372 --- /dev/null +++ b/linux-user/riscv/syscall.tbl @@ -0,0 +1,405 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# This file contains the system call numbers for all of the +# more recently added architectures. +# +# As a basic principle, no duplication of functionality +# should be added, e.g. we don't use lseek when llseek +# is present. New architectures should use this file +# and implement the less feature-full calls in user space. +# +0 common io_setup sys_io_setup compat_sys_io_setup +1 common io_destroy sys_io_destroy +2 common io_submit sys_io_submit compat_sys_io_submit +3 common io_cancel sys_io_cancel +4 time32 io_getevents sys_io_getevents_time32 +4 64 io_getevents sys_io_getevents +5 common setxattr sys_setxattr +6 common lsetxattr sys_lsetxattr +7 common fsetxattr sys_fsetxattr +8 common getxattr sys_getxattr +9 common lgetxattr sys_lgetxattr +10 common fgetxattr sys_fgetxattr +11 common listxattr sys_listxattr +12 common llistxattr sys_llistxattr +13 common flistxattr sys_flistxattr +14 common removexattr sys_removexattr +15 common lremovexattr sys_lremovexattr +16 common fremovexattr sys_fremovexattr +17 common getcwd sys_getcwd +18 common lookup_dcookie sys_ni_syscall +19 common eventfd2 sys_eventfd2 +20 common epoll_create1 sys_epoll_create1 +21 common epoll_ctl sys_epoll_ctl +22 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait +23 common dup sys_dup +24 common dup3 sys_dup3 +25 32 fcntl64 sys_fcntl64 compat_sys_fcntl64 +25 64 fcntl sys_fcntl +26 common inotify_init1 sys_inotify_init1 +27 common inotify_add_watch sys_inotify_add_watch +28 common inotify_rm_watch sys_inotify_rm_watch +29 common ioctl sys_ioctl compat_sys_ioctl +30 common ioprio_set sys_ioprio_set +31 common ioprio_get sys_ioprio_get +32 common flock sys_flock +33 common mknodat sys_mknodat +34 common mkdirat sys_mkdirat +35 common unlinkat sys_unlinkat +36 common symlinkat sys_symlinkat +37 common linkat sys_linkat +# renameat is superseded with flags by renameat2 +38 renameat renameat sys_renameat +39 common umount2 sys_umount +40 common mount sys_mount +41 common pivot_root sys_pivot_root +42 common nfsservctl sys_ni_syscall +43 32 statfs64 sys_statfs64 compat_sys_statfs64 +43 64 statfs sys_statfs +44 32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 +44 64 fstatfs sys_fstatfs +45 32 truncate64 sys_truncate64 compat_sys_truncate64 +45 64 truncate sys_truncate +46 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +46 64 ftruncate sys_ftruncate +47 common fallocate sys_fallocate compat_sys_fallocate +48 common faccessat sys_faccessat +49 common chdir sys_chdir +50 common fchdir sys_fchdir +51 common chroot sys_chroot +52 common fchmod sys_fchmod +53 common fchmodat sys_fchmodat +54 common fchownat sys_fchownat +55 common fchown sys_fchown +56 common openat sys_openat +57 common close sys_close +58 common vhangup sys_vhangup +59 common pipe2 sys_pipe2 +60 common quotactl sys_quotactl +61 common getdents64 sys_getdents64 +62 32 llseek sys_llseek +62 64 lseek sys_lseek +63 common read sys_read +64 common write sys_write +65 common readv sys_readv sys_readv +66 common writev sys_writev sys_writev +67 common pread64 sys_pread64 compat_sys_pread64 +68 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +69 common preadv sys_preadv compat_sys_preadv +70 common pwritev sys_pwritev compat_sys_pwritev +71 32 sendfile64 sys_sendfile64 +71 64 sendfile sys_sendfile64 +72 time32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +72 64 pselect6 sys_pselect6 +73 time32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +73 64 ppoll sys_ppoll +74 common signalfd4 sys_signalfd4 compat_sys_signalfd4 +75 common vmsplice sys_vmsplice +76 common splice sys_splice +77 common tee sys_tee +78 common readlinkat sys_readlinkat +79 stat64 fstatat64 sys_fstatat64 +79 64 newfstatat sys_newfstatat +80 stat64 fstat64 sys_fstat64 +80 64 fstat sys_newfstat +81 common sync sys_sync +82 common fsync sys_fsync +83 common fdatasync sys_fdatasync +84 common sync_file_range sys_sync_file_range compat_sys_sync_file_range +85 common timerfd_create sys_timerfd_create +86 time32 timerfd_settime sys_timerfd_settime32 +86 64 timerfd_settime sys_timerfd_settime +87 time32 timerfd_gettime sys_timerfd_gettime32 +87 64 timerfd_gettime sys_timerfd_gettime +88 time32 utimensat sys_utimensat_time32 +88 64 utimensat sys_utimensat +89 common acct sys_acct +90 common capget sys_capget +91 common capset sys_capset +92 common personality sys_personality +93 common exit sys_exit +94 common exit_group sys_exit_group +95 common waitid sys_waitid compat_sys_waitid +96 common set_tid_address sys_set_tid_address +97 common unshare sys_unshare +98 time32 futex sys_futex_time32 +98 64 futex sys_futex +99 common set_robust_list sys_set_robust_list compat_sys_set_robust_list +100 common get_robust_list sys_get_robust_list compat_sys_get_robust_list +101 time32 nanosleep sys_nanosleep_time32 +101 64 nanosleep sys_nanosleep +102 common getitimer sys_getitimer compat_sys_getitimer +103 common setitimer sys_setitimer compat_sys_setitimer +104 common kexec_load sys_kexec_load compat_sys_kexec_load +105 common init_module sys_init_module +106 common delete_module sys_delete_module +107 common timer_create sys_timer_create compat_sys_timer_create +108 time32 timer_gettime sys_timer_gettime32 +108 64 timer_gettime sys_timer_gettime +109 common timer_getoverrun sys_timer_getoverrun +110 time32 timer_settime sys_timer_settime32 +110 64 timer_settime sys_timer_settime +111 common timer_delete sys_timer_delete +112 time32 clock_settime sys_clock_settime32 +112 64 clock_settime sys_clock_settime +113 time32 clock_gettime sys_clock_gettime32 +113 64 clock_gettime sys_clock_gettime +114 time32 clock_getres sys_clock_getres_time32 +114 64 clock_getres sys_clock_getres +115 time32 clock_nanosleep sys_clock_nanosleep_time32 +115 64 clock_nanosleep sys_clock_nanosleep +116 common syslog sys_syslog +117 common ptrace sys_ptrace compat_sys_ptrace +118 common sched_setparam sys_sched_setparam +119 common sched_setscheduler sys_sched_setscheduler +120 common sched_getscheduler sys_sched_getscheduler +121 common sched_getparam sys_sched_getparam +122 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity +123 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity +124 common sched_yield sys_sched_yield +125 common sched_get_priority_max sys_sched_get_priority_max +126 common sched_get_priority_min sys_sched_get_priority_min +127 time32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +127 64 sched_rr_get_interval sys_sched_rr_get_interval +128 common restart_syscall sys_restart_syscall +129 common kill sys_kill +130 common tkill sys_tkill +131 common tgkill sys_tgkill +132 common sigaltstack sys_sigaltstack compat_sys_sigaltstack +133 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend +134 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction +135 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask +136 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending +137 time32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +137 64 rt_sigtimedwait sys_rt_sigtimedwait +138 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +139 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn +140 common setpriority sys_setpriority +141 common getpriority sys_getpriority +142 common reboot sys_reboot +143 common setregid sys_setregid +144 common setgid sys_setgid +145 common setreuid sys_setreuid +146 common setuid sys_setuid +147 common setresuid sys_setresuid +148 common getresuid sys_getresuid +149 common setresgid sys_setresgid +150 common getresgid sys_getresgid +151 common setfsuid sys_setfsuid +152 common setfsgid sys_setfsgid +153 common times sys_times compat_sys_times +154 common setpgid sys_setpgid +155 common getpgid sys_getpgid +156 common getsid sys_getsid +157 common setsid sys_setsid +158 common getgroups sys_getgroups +159 common setgroups sys_setgroups +160 common uname sys_newuname +161 common sethostname sys_sethostname +162 common setdomainname sys_setdomainname +# getrlimit and setrlimit are superseded with prlimit64 +163 rlimit getrlimit sys_getrlimit compat_sys_getrlimit +164 rlimit setrlimit sys_setrlimit compat_sys_setrlimit +165 common getrusage sys_getrusage compat_sys_getrusage +166 common umask sys_umask +167 common prctl sys_prctl +168 common getcpu sys_getcpu +169 time32 gettimeofday sys_gettimeofday compat_sys_gettimeofday +169 64 gettimeofday sys_gettimeofday +170 time32 settimeofday sys_settimeofday compat_sys_settimeofday +170 64 settimeofday sys_settimeofday +171 time32 adjtimex sys_adjtimex_time32 +171 64 adjtimex sys_adjtimex +172 common getpid sys_getpid +173 common getppid sys_getppid +174 common getuid sys_getuid +175 common geteuid sys_geteuid +176 common getgid sys_getgid +177 common getegid sys_getegid +178 common gettid sys_gettid +179 common sysinfo sys_sysinfo compat_sys_sysinfo +180 common mq_open sys_mq_open compat_sys_mq_open +181 common mq_unlink sys_mq_unlink +182 time32 mq_timedsend sys_mq_timedsend_time32 +182 64 mq_timedsend sys_mq_timedsend +183 time32 mq_timedreceive sys_mq_timedreceive_time32 +183 64 mq_timedreceive sys_mq_timedreceive +184 common mq_notify sys_mq_notify compat_sys_mq_notify +185 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr +186 common msgget sys_msgget +187 common msgctl sys_msgctl compat_sys_msgctl +188 common msgrcv sys_msgrcv compat_sys_msgrcv +189 common msgsnd sys_msgsnd compat_sys_msgsnd +190 common semget sys_semget +191 common semctl sys_semctl compat_sys_semctl +192 time32 semtimedop sys_semtimedop_time32 +192 64 semtimedop sys_semtimedop +193 common semop sys_semop +194 common shmget sys_shmget +195 common shmctl sys_shmctl compat_sys_shmctl +196 common shmat sys_shmat compat_sys_shmat +197 common shmdt sys_shmdt +198 common socket sys_socket +199 common socketpair sys_socketpair +200 common bind sys_bind +201 common listen sys_listen +202 common accept sys_accept +203 common connect sys_connect +204 common getsockname sys_getsockname +205 common getpeername sys_getpeername +206 common sendto sys_sendto +207 common recvfrom sys_recvfrom compat_sys_recvfrom +208 common setsockopt sys_setsockopt sys_setsockopt +209 common getsockopt sys_getsockopt sys_getsockopt +210 common shutdown sys_shutdown +211 common sendmsg sys_sendmsg compat_sys_sendmsg +212 common recvmsg sys_recvmsg compat_sys_recvmsg +213 common readahead sys_readahead compat_sys_readahead +214 common brk sys_brk +215 common munmap sys_munmap +216 common mremap sys_mremap +217 common add_key sys_add_key +218 common request_key sys_request_key +219 common keyctl sys_keyctl compat_sys_keyctl +220 common clone sys_clone +221 common execve sys_execve compat_sys_execve +222 32 mmap2 sys_mmap2 +222 64 mmap sys_mmap +223 32 fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 +223 64 fadvise64 sys_fadvise64_64 +224 common swapon sys_swapon +225 common swapoff sys_swapoff +226 common mprotect sys_mprotect +227 common msync sys_msync +228 common mlock sys_mlock +229 common munlock sys_munlock +230 common mlockall sys_mlockall +231 common munlockall sys_munlockall +232 common mincore sys_mincore +233 common madvise sys_madvise +234 common remap_file_pages sys_remap_file_pages +235 common mbind sys_mbind +236 common get_mempolicy sys_get_mempolicy +237 common set_mempolicy sys_set_mempolicy +238 common migrate_pages sys_migrate_pages +239 common move_pages sys_move_pages +240 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +241 common perf_event_open sys_perf_event_open +242 common accept4 sys_accept4 +243 time32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +243 64 recvmmsg sys_recvmmsg +# Architectures may provide up to 16 syscalls of their own between 244 and 259 +244 arc cacheflush sys_cacheflush +245 arc arc_settls sys_arc_settls +246 arc arc_gettls sys_arc_gettls +247 arc sysfs sys_sysfs +248 arc arc_usr_cmpxchg sys_arc_usr_cmpxchg + +244 csky set_thread_area sys_set_thread_area +245 csky cacheflush sys_cacheflush + +244 nios2 cacheflush sys_cacheflush + +244 or1k or1k_atomic sys_or1k_atomic + +258 riscv riscv_hwprobe sys_riscv_hwprobe +259 riscv riscv_flush_icache sys_riscv_flush_icache + +260 time32 wait4 sys_wait4 compat_sys_wait4 +260 64 wait4 sys_wait4 +261 common prlimit64 sys_prlimit64 +262 common fanotify_init sys_fanotify_init +263 common fanotify_mark sys_fanotify_mark +264 common name_to_handle_at sys_name_to_handle_at +265 common open_by_handle_at sys_open_by_handle_at +266 time32 clock_adjtime sys_clock_adjtime32 +266 64 clock_adjtime sys_clock_adjtime +267 common syncfs sys_syncfs +268 common setns sys_setns +269 common sendmmsg sys_sendmmsg compat_sys_sendmmsg +270 common process_vm_readv sys_process_vm_readv +271 common process_vm_writev sys_process_vm_writev +272 common kcmp sys_kcmp +273 common finit_module sys_finit_module +274 common sched_setattr sys_sched_setattr +275 common sched_getattr sys_sched_getattr +276 common renameat2 sys_renameat2 +277 common seccomp sys_seccomp +278 common getrandom sys_getrandom +279 common memfd_create sys_memfd_create +280 common bpf sys_bpf +281 common execveat sys_execveat compat_sys_execveat +282 common userfaultfd sys_userfaultfd +283 common membarrier sys_membarrier +284 common mlock2 sys_mlock2 +285 common copy_file_range sys_copy_file_range +286 common preadv2 sys_preadv2 compat_sys_preadv2 +287 common pwritev2 sys_pwritev2 compat_sys_pwritev2 +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +291 common statx sys_statx +292 time32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +292 64 io_pgetevents sys_io_pgetevents +293 common rseq sys_rseq +294 common kexec_file_load sys_kexec_file_load +# 295 through 402 are unassigned to sync up with generic numbers don't use +403 32 clock_gettime64 sys_clock_gettime +404 32 clock_settime64 sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime +409 32 timer_settime64 sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +435 common clone3 sys_clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +447 memfd_secret memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/riscv/syscall32_nr.h b/linux-user/riscv/syscall32_nr.h deleted file mode 100644 index 412e58e5b2f..00000000000 --- a/linux-user/riscv/syscall32_nr.h +++ /dev/null @@ -1,308 +0,0 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_RISCV_SYSCALL32_NR_H -#define LINUX_USER_RISCV_SYSCALL32_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl64 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs64 43 -#define TARGET_NR_fstatfs64 44 -#define TARGET_NR_truncate64 45 -#define TARGET_NR_ftruncate64 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_llseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile64 71 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_fstatat64 79 -#define TARGET_NR_fstat64 80 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrlimit 163 -#define TARGET_NR_setrlimit 164 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap2 222 -#define TARGET_NR_fadvise64_64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15) -#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14) -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_clock_gettime64 403 -#define TARGET_NR_clock_settime64 404 -#define TARGET_NR_clock_adjtime64 405 -#define TARGET_NR_clock_getres_time64 406 -#define TARGET_NR_clock_nanosleep_time64 407 -#define TARGET_NR_timer_gettime64 408 -#define TARGET_NR_timer_settime64 409 -#define TARGET_NR_timerfd_gettime64 410 -#define TARGET_NR_timerfd_settime64 411 -#define TARGET_NR_utimensat_time64 412 -#define TARGET_NR_pselect6_time64 413 -#define TARGET_NR_ppoll_time64 414 -#define TARGET_NR_io_pgetevents_time64 416 -#define TARGET_NR_recvmmsg_time64 417 -#define TARGET_NR_mq_timedsend_time64 418 -#define TARGET_NR_mq_timedreceive_time64 419 -#define TARGET_NR_semtimedop_time64 420 -#define TARGET_NR_rt_sigtimedwait_time64 421 -#define TARGET_NR_futex_time64 422 -#define TARGET_NR_sched_rr_get_interval_time64 423 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_clone3 435 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_syscalls 447 - -#endif /* LINUX_USER_RISCV_SYSCALL32_NR_H */ diff --git a/linux-user/riscv/syscall64_nr.h b/linux-user/riscv/syscall64_nr.h deleted file mode 100644 index 29e1eb20753..00000000000 --- a/linux-user/riscv/syscall64_nr.h +++ /dev/null @@ -1,314 +0,0 @@ -/* - * This file contains the system call numbers. - * Do not modify. - * This file is generated by scripts/gensyscalls.sh - */ -#ifndef LINUX_USER_RISCV_SYSCALL64_NR_H -#define LINUX_USER_RISCV_SYSCALL64_NR_H - -#define TARGET_NR_io_setup 0 -#define TARGET_NR_io_destroy 1 -#define TARGET_NR_io_submit 2 -#define TARGET_NR_io_cancel 3 -#define TARGET_NR_io_getevents 4 -#define TARGET_NR_setxattr 5 -#define TARGET_NR_lsetxattr 6 -#define TARGET_NR_fsetxattr 7 -#define TARGET_NR_getxattr 8 -#define TARGET_NR_lgetxattr 9 -#define TARGET_NR_fgetxattr 10 -#define TARGET_NR_listxattr 11 -#define TARGET_NR_llistxattr 12 -#define TARGET_NR_flistxattr 13 -#define TARGET_NR_removexattr 14 -#define TARGET_NR_lremovexattr 15 -#define TARGET_NR_fremovexattr 16 -#define TARGET_NR_getcwd 17 -#define TARGET_NR_lookup_dcookie 18 -#define TARGET_NR_eventfd2 19 -#define TARGET_NR_epoll_create1 20 -#define TARGET_NR_epoll_ctl 21 -#define TARGET_NR_epoll_pwait 22 -#define TARGET_NR_dup 23 -#define TARGET_NR_dup3 24 -#define TARGET_NR_fcntl 25 -#define TARGET_NR_inotify_init1 26 -#define TARGET_NR_inotify_add_watch 27 -#define TARGET_NR_inotify_rm_watch 28 -#define TARGET_NR_ioctl 29 -#define TARGET_NR_ioprio_set 30 -#define TARGET_NR_ioprio_get 31 -#define TARGET_NR_flock 32 -#define TARGET_NR_mknodat 33 -#define TARGET_NR_mkdirat 34 -#define TARGET_NR_unlinkat 35 -#define TARGET_NR_symlinkat 36 -#define TARGET_NR_linkat 37 -#define TARGET_NR_umount2 39 -#define TARGET_NR_mount 40 -#define TARGET_NR_pivot_root 41 -#define TARGET_NR_nfsservctl 42 -#define TARGET_NR_statfs 43 -#define TARGET_NR_fstatfs 44 -#define TARGET_NR_truncate 45 -#define TARGET_NR_ftruncate 46 -#define TARGET_NR_fallocate 47 -#define TARGET_NR_faccessat 48 -#define TARGET_NR_chdir 49 -#define TARGET_NR_fchdir 50 -#define TARGET_NR_chroot 51 -#define TARGET_NR_fchmod 52 -#define TARGET_NR_fchmodat 53 -#define TARGET_NR_fchownat 54 -#define TARGET_NR_fchown 55 -#define TARGET_NR_openat 56 -#define TARGET_NR_close 57 -#define TARGET_NR_vhangup 58 -#define TARGET_NR_pipe2 59 -#define TARGET_NR_quotactl 60 -#define TARGET_NR_getdents64 61 -#define TARGET_NR_lseek 62 -#define TARGET_NR_read 63 -#define TARGET_NR_write 64 -#define TARGET_NR_readv 65 -#define TARGET_NR_writev 66 -#define TARGET_NR_pread64 67 -#define TARGET_NR_pwrite64 68 -#define TARGET_NR_preadv 69 -#define TARGET_NR_pwritev 70 -#define TARGET_NR_sendfile 71 -#define TARGET_NR_pselect6 72 -#define TARGET_NR_ppoll 73 -#define TARGET_NR_signalfd4 74 -#define TARGET_NR_vmsplice 75 -#define TARGET_NR_splice 76 -#define TARGET_NR_tee 77 -#define TARGET_NR_readlinkat 78 -#define TARGET_NR_newfstatat 79 -#define TARGET_NR_fstat 80 -#define TARGET_NR_sync 81 -#define TARGET_NR_fsync 82 -#define TARGET_NR_fdatasync 83 -#define TARGET_NR_sync_file_range 84 -#define TARGET_NR_timerfd_create 85 -#define TARGET_NR_timerfd_settime 86 -#define TARGET_NR_timerfd_gettime 87 -#define TARGET_NR_utimensat 88 -#define TARGET_NR_acct 89 -#define TARGET_NR_capget 90 -#define TARGET_NR_capset 91 -#define TARGET_NR_personality 92 -#define TARGET_NR_exit 93 -#define TARGET_NR_exit_group 94 -#define TARGET_NR_waitid 95 -#define TARGET_NR_set_tid_address 96 -#define TARGET_NR_unshare 97 -#define TARGET_NR_futex 98 -#define TARGET_NR_set_robust_list 99 -#define TARGET_NR_get_robust_list 100 -#define TARGET_NR_nanosleep 101 -#define TARGET_NR_getitimer 102 -#define TARGET_NR_setitimer 103 -#define TARGET_NR_kexec_load 104 -#define TARGET_NR_init_module 105 -#define TARGET_NR_delete_module 106 -#define TARGET_NR_timer_create 107 -#define TARGET_NR_timer_gettime 108 -#define TARGET_NR_timer_getoverrun 109 -#define TARGET_NR_timer_settime 110 -#define TARGET_NR_timer_delete 111 -#define TARGET_NR_clock_settime 112 -#define TARGET_NR_clock_gettime 113 -#define TARGET_NR_clock_getres 114 -#define TARGET_NR_clock_nanosleep 115 -#define TARGET_NR_syslog 116 -#define TARGET_NR_ptrace 117 -#define TARGET_NR_sched_setparam 118 -#define TARGET_NR_sched_setscheduler 119 -#define TARGET_NR_sched_getscheduler 120 -#define TARGET_NR_sched_getparam 121 -#define TARGET_NR_sched_setaffinity 122 -#define TARGET_NR_sched_getaffinity 123 -#define TARGET_NR_sched_yield 124 -#define TARGET_NR_sched_get_priority_max 125 -#define TARGET_NR_sched_get_priority_min 126 -#define TARGET_NR_sched_rr_get_interval 127 -#define TARGET_NR_restart_syscall 128 -#define TARGET_NR_kill 129 -#define TARGET_NR_tkill 130 -#define TARGET_NR_tgkill 131 -#define TARGET_NR_sigaltstack 132 -#define TARGET_NR_rt_sigsuspend 133 -#define TARGET_NR_rt_sigaction 134 -#define TARGET_NR_rt_sigprocmask 135 -#define TARGET_NR_rt_sigpending 136 -#define TARGET_NR_rt_sigtimedwait 137 -#define TARGET_NR_rt_sigqueueinfo 138 -#define TARGET_NR_rt_sigreturn 139 -#define TARGET_NR_setpriority 140 -#define TARGET_NR_getpriority 141 -#define TARGET_NR_reboot 142 -#define TARGET_NR_setregid 143 -#define TARGET_NR_setgid 144 -#define TARGET_NR_setreuid 145 -#define TARGET_NR_setuid 146 -#define TARGET_NR_setresuid 147 -#define TARGET_NR_getresuid 148 -#define TARGET_NR_setresgid 149 -#define TARGET_NR_getresgid 150 -#define TARGET_NR_setfsuid 151 -#define TARGET_NR_setfsgid 152 -#define TARGET_NR_times 153 -#define TARGET_NR_setpgid 154 -#define TARGET_NR_getpgid 155 -#define TARGET_NR_getsid 156 -#define TARGET_NR_setsid 157 -#define TARGET_NR_getgroups 158 -#define TARGET_NR_setgroups 159 -#define TARGET_NR_uname 160 -#define TARGET_NR_sethostname 161 -#define TARGET_NR_setdomainname 162 -#define TARGET_NR_getrlimit 163 -#define TARGET_NR_setrlimit 164 -#define TARGET_NR_getrusage 165 -#define TARGET_NR_umask 166 -#define TARGET_NR_prctl 167 -#define TARGET_NR_getcpu 168 -#define TARGET_NR_gettimeofday 169 -#define TARGET_NR_settimeofday 170 -#define TARGET_NR_adjtimex 171 -#define TARGET_NR_getpid 172 -#define TARGET_NR_getppid 173 -#define TARGET_NR_getuid 174 -#define TARGET_NR_geteuid 175 -#define TARGET_NR_getgid 176 -#define TARGET_NR_getegid 177 -#define TARGET_NR_gettid 178 -#define TARGET_NR_sysinfo 179 -#define TARGET_NR_mq_open 180 -#define TARGET_NR_mq_unlink 181 -#define TARGET_NR_mq_timedsend 182 -#define TARGET_NR_mq_timedreceive 183 -#define TARGET_NR_mq_notify 184 -#define TARGET_NR_mq_getsetattr 185 -#define TARGET_NR_msgget 186 -#define TARGET_NR_msgctl 187 -#define TARGET_NR_msgrcv 188 -#define TARGET_NR_msgsnd 189 -#define TARGET_NR_semget 190 -#define TARGET_NR_semctl 191 -#define TARGET_NR_semtimedop 192 -#define TARGET_NR_semop 193 -#define TARGET_NR_shmget 194 -#define TARGET_NR_shmctl 195 -#define TARGET_NR_shmat 196 -#define TARGET_NR_shmdt 197 -#define TARGET_NR_socket 198 -#define TARGET_NR_socketpair 199 -#define TARGET_NR_bind 200 -#define TARGET_NR_listen 201 -#define TARGET_NR_accept 202 -#define TARGET_NR_connect 203 -#define TARGET_NR_getsockname 204 -#define TARGET_NR_getpeername 205 -#define TARGET_NR_sendto 206 -#define TARGET_NR_recvfrom 207 -#define TARGET_NR_setsockopt 208 -#define TARGET_NR_getsockopt 209 -#define TARGET_NR_shutdown 210 -#define TARGET_NR_sendmsg 211 -#define TARGET_NR_recvmsg 212 -#define TARGET_NR_readahead 213 -#define TARGET_NR_brk 214 -#define TARGET_NR_munmap 215 -#define TARGET_NR_mremap 216 -#define TARGET_NR_add_key 217 -#define TARGET_NR_request_key 218 -#define TARGET_NR_keyctl 219 -#define TARGET_NR_clone 220 -#define TARGET_NR_execve 221 -#define TARGET_NR_mmap 222 -#define TARGET_NR_fadvise64 223 -#define TARGET_NR_swapon 224 -#define TARGET_NR_swapoff 225 -#define TARGET_NR_mprotect 226 -#define TARGET_NR_msync 227 -#define TARGET_NR_mlock 228 -#define TARGET_NR_munlock 229 -#define TARGET_NR_mlockall 230 -#define TARGET_NR_munlockall 231 -#define TARGET_NR_mincore 232 -#define TARGET_NR_madvise 233 -#define TARGET_NR_remap_file_pages 234 -#define TARGET_NR_mbind 235 -#define TARGET_NR_get_mempolicy 236 -#define TARGET_NR_set_mempolicy 237 -#define TARGET_NR_migrate_pages 238 -#define TARGET_NR_move_pages 239 -#define TARGET_NR_rt_tgsigqueueinfo 240 -#define TARGET_NR_perf_event_open 241 -#define TARGET_NR_accept4 242 -#define TARGET_NR_recvmmsg 243 -#define TARGET_NR_arch_specific_syscall 244 -#define TARGET_NR_riscv_flush_icache (TARGET_NR_arch_specific_syscall + 15) -#define TARGET_NR_riscv_hwprobe (TARGET_NR_arch_specific_syscall + 14) -#define TARGET_NR_wait4 260 -#define TARGET_NR_prlimit64 261 -#define TARGET_NR_fanotify_init 262 -#define TARGET_NR_fanotify_mark 263 -#define TARGET_NR_name_to_handle_at 264 -#define TARGET_NR_open_by_handle_at 265 -#define TARGET_NR_clock_adjtime 266 -#define TARGET_NR_syncfs 267 -#define TARGET_NR_setns 268 -#define TARGET_NR_sendmmsg 269 -#define TARGET_NR_process_vm_readv 270 -#define TARGET_NR_process_vm_writev 271 -#define TARGET_NR_kcmp 272 -#define TARGET_NR_finit_module 273 -#define TARGET_NR_sched_setattr 274 -#define TARGET_NR_sched_getattr 275 -#define TARGET_NR_renameat2 276 -#define TARGET_NR_seccomp 277 -#define TARGET_NR_getrandom 278 -#define TARGET_NR_memfd_create 279 -#define TARGET_NR_bpf 280 -#define TARGET_NR_execveat 281 -#define TARGET_NR_userfaultfd 282 -#define TARGET_NR_membarrier 283 -#define TARGET_NR_mlock2 284 -#define TARGET_NR_copy_file_range 285 -#define TARGET_NR_preadv2 286 -#define TARGET_NR_pwritev2 287 -#define TARGET_NR_pkey_mprotect 288 -#define TARGET_NR_pkey_alloc 289 -#define TARGET_NR_pkey_free 290 -#define TARGET_NR_statx 291 -#define TARGET_NR_io_pgetevents 292 -#define TARGET_NR_rseq 293 -#define TARGET_NR_kexec_file_load 294 -#define TARGET_NR_pidfd_send_signal 424 -#define TARGET_NR_io_uring_setup 425 -#define TARGET_NR_io_uring_enter 426 -#define TARGET_NR_io_uring_register 427 -#define TARGET_NR_open_tree 428 -#define TARGET_NR_move_mount 429 -#define TARGET_NR_fsopen 430 -#define TARGET_NR_fsconfig 431 -#define TARGET_NR_fsmount 432 -#define TARGET_NR_fspick 433 -#define TARGET_NR_pidfd_open 434 -#define TARGET_NR_clone3 435 -#define TARGET_NR_close_range 436 -#define TARGET_NR_openat2 437 -#define TARGET_NR_pidfd_getfd 438 -#define TARGET_NR_faccessat2 439 -#define TARGET_NR_process_madvise 440 -#define TARGET_NR_epoll_pwait2 441 -#define TARGET_NR_mount_setattr 442 -#define TARGET_NR_landlock_create_ruleset 444 -#define TARGET_NR_landlock_add_rule 445 -#define TARGET_NR_landlock_restrict_self 446 -#define TARGET_NR_syscalls 447 - -#endif /* LINUX_USER_RISCV_SYSCALL64_NR_H */ diff --git a/linux-user/riscv/syscall_nr.h b/linux-user/riscv/syscall_nr.h deleted file mode 100644 index 0a5a2f2fb16..00000000000 --- a/linux-user/riscv/syscall_nr.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Syscall numbers from asm-generic, common for most - * of recently-added arches including RISC-V. - */ - -#ifndef LINUX_USER_RISCV_SYSCALL_NR_H -#define LINUX_USER_RISCV_SYSCALL_NR_H - -#ifdef TARGET_RISCV32 -# include "syscall32_nr.h" -#else -# include "syscall64_nr.h" -#endif - -#endif diff --git a/linux-user/riscv/syscallhdr.sh b/linux-user/riscv/syscallhdr.sh new file mode 100644 index 00000000000..4069dc59b68 --- /dev/null +++ b/linux-user/riscv/syscallhdr.sh @@ -0,0 +1,28 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_X86_64_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + echo "#ifndef ${fileguard}" + echo "#define ${fileguard} 1" + echo "" + + while read nr abi name entry compat ; do + if [ -z "$offset" ]; then + echo "#define TARGET_NR_${prefix}${name} $nr" + else + echo "#define TARGET_NR_${prefix}${name} ($offset + $nr)" + fi + done + + echo "" + echo "#endif /* ${fileguard} */" +) > "$out" diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c index 8b7ac2879ef..c9124444ed2 100644 --- a/linux-user/s390x/cpu_loop.c +++ b/linux-user/s390x/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" @@ -180,7 +180,7 @@ void cpu_loop(CPUS390XState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; for (i = 0; i < 16; i++) { diff --git a/linux-user/s390x/syscall.tbl b/linux-user/s390x/syscall.tbl index 0690263df1d..8e0d1f1c2aa 100644 --- a/linux-user/s390x/syscall.tbl +++ b/linux-user/s390x/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # System call table for s390 # @@ -100,7 +100,7 @@ 106 common stat sys_newstat compat_sys_newstat 107 common lstat sys_newlstat compat_sys_newlstat 108 common fstat sys_newfstat compat_sys_newfstat -110 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +110 common lookup_dcookie - - 111 common vhangup sys_vhangup sys_vhangup 112 common idle - - 114 common wait4 sys_wait4 compat_sys_wait4 @@ -122,7 +122,7 @@ 131 common quotactl sys_quotactl sys_quotactl 132 common getpgid sys_getpgid sys_getpgid 133 common fchdir sys_fchdir sys_fchdir -134 common bdflush sys_bdflush sys_bdflush +134 common bdflush sys_ni_syscall sys_ni_syscall 135 common sysfs sys_sysfs sys_sysfs 136 common personality sys_s390_personality sys_s390_personality 137 common afs_syscall - - @@ -274,9 +274,9 @@ 265 common statfs64 sys_statfs64 compat_sys_statfs64 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages -268 common mbind sys_mbind compat_sys_mbind -269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +268 common mbind sys_mbind sys_mbind +269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy +270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy 271 common mq_open sys_mq_open compat_sys_mq_open 272 common mq_unlink sys_mq_unlink sys_mq_unlink 273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 @@ -293,7 +293,7 @@ 284 common inotify_init sys_inotify_init sys_inotify_init 285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch -287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages +287 common migrate_pages sys_migrate_pages sys_migrate_pages 288 common openat sys_openat compat_sys_openat 289 common mkdirat sys_mkdirat sys_mkdirat 290 common mknodat sys_mknodat sys_mknodat @@ -317,7 +317,7 @@ 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 308 common tee sys_tee sys_tee 309 common vmsplice sys_vmsplice sys_vmsplice -310 common move_pages sys_move_pages compat_sys_move_pages +310 common move_pages sys_move_pages sys_move_pages 311 common getcpu sys_getcpu sys_getcpu 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 313 common utimes sys_utimes sys_utimes_time32 @@ -418,7 +418,7 @@ 412 32 utimensat_time64 - sys_utimensat 413 32 pselect6_time64 - compat_sys_pselect6_time64 414 32 ppoll_time64 - compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 - sys_io_pgetevents +416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 - sys_mq_timedsend 419 32 mq_timedreceive_time64 - sys_mq_timedreceive @@ -445,7 +445,23 @@ 440 common process_madvise sys_process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self +447 common memfd_secret sys_memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue sys_futex_requeue +457 common statmount sys_statmount sys_statmount +458 common listmount sys_listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal sys_mseal diff --git a/linux-user/s390x/syscallhdr.sh b/linux-user/s390x/syscallhdr.sh index 85a99c48dee..ac22d422b0b 100755 --- a/linux-user/s390x/syscallhdr.sh +++ b/linux-user/s390x/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h index 41e0e34a55d..738e0673f4a 100644 --- a/linux-user/s390x/target_signal.h +++ b/linux-user/s390x/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c index c805f9db110..ee9eff3428a 100644 --- a/linux-user/sh4/cpu_loop.c +++ b/linux-user/sh4/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" void cpu_loop(CPUSH4State *env) @@ -81,7 +81,7 @@ void cpu_loop(CPUSH4State *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; diff --git a/linux-user/sh4/syscall.tbl b/linux-user/sh4/syscall.tbl index 0b91499ebdc..cf4ec0493d0 100644 --- a/linux-user/sh4/syscall.tbl +++ b/linux-user/sh4/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for sh # @@ -141,7 +141,7 @@ 131 common quotactl sys_quotactl 132 common getpgid sys_getpgid 133 common fchdir sys_fchdir -134 common bdflush sys_bdflush +134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs 136 common personality sys_personality # 137 was afs_syscall @@ -260,7 +260,7 @@ 250 common fadvise64 sys_fadvise64 # 251 is unused 252 common exit_group sys_exit_group -253 common lookup_dcookie sys_lookup_dcookie +253 common lookup_dcookie sys_ni_syscall 254 common epoll_create sys_epoll_create 255 common epoll_ctl sys_epoll_ctl 256 common epoll_wait sys_epoll_wait @@ -321,7 +321,7 @@ 311 common set_robust_list sys_set_robust_list 312 common get_robust_list sys_get_robust_list 313 common splice sys_splice -314 common sync_file_range sys_sync_file_range +314 common sync_file_range sys_sh_sync_file_range6 315 common tee sys_tee 316 common vmsplice sys_vmsplice 317 common move_pages sys_move_pages @@ -395,6 +395,7 @@ 385 common pkey_alloc sys_pkey_alloc 386 common pkey_free sys_pkey_free 387 common rseq sys_rseq +388 common sync_file_range2 sys_sync_file_range2 # room for arch specific syscalls 393 common semget sys_semget 394 common semctl sys_semctl @@ -445,7 +446,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/sh4/syscallhdr.sh b/linux-user/sh4/syscallhdr.sh index 080790556ad..cb3a5de7110 100644 --- a/linux-user/sh4/syscallhdr.sh +++ b/linux-user/sh4/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h index eee6a1a7cda..0bde417fd1f 100644 --- a/linux-user/sh4/target_signal.h +++ b/linux-user/sh4/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index f4cbe6185e1..196d2406f86 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -56,12 +56,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUArchState *env); void process_pending_signals(CPUArchState *cpu_env); -void signal_init(void); +void signal_init(const char *rtsig_map); void queue_signal(CPUArchState *env, int sig, int si_type, target_siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo); -int target_to_host_signal(int sig); int host_to_target_signal(int sig); long do_sigreturn(CPUArchState *env); long do_rt_sigreturn(CPUArchState *env); diff --git a/linux-user/signal.c b/linux-user/signal.c index 63ac2df53b7..cd0e7398aa4 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" #include "qemu/bitops.h" +#include "qemu/cutils.h" #include "gdbstub/user.h" #include "exec/page-protection.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include #include @@ -32,7 +33,10 @@ #include "trace.h" #include "signal-common.h" #include "host-signal.h" +#include "user/cpu_loop.h" +#include "user/page-protection.h" #include "user/safe-syscall.h" +#include "user/signal.h" #include "tcg/tcg.h" /* target_siginfo_t must fit in gdbstub's siginfo save area. */ @@ -513,20 +517,83 @@ static int core_dump_signal(int sig) } } -static void signal_table_init(void) +int host_interrupt_signal; + +static void signal_table_init(const char *rtsig_map) { int hsig, tsig, count; + if (rtsig_map) { + /* + * Map host RT signals to target RT signals according to the + * user-provided specification. + */ + const char *s = rtsig_map; + + while (true) { + int i; + + if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') { + fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') { + fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) { + fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + + for (i = 0; i < count; i++, tsig++, hsig++) { + if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) { + fprintf(stderr, "%d is not a target rt signal\n", tsig); + exit(EXIT_FAILURE); + } + if (hsig < SIGRTMIN || hsig > SIGRTMAX) { + fprintf(stderr, "%d is not a host rt signal\n", hsig); + exit(EXIT_FAILURE); + } + if (host_to_target_signal_table[hsig]) { + fprintf(stderr, "%d already maps %d\n", + hsig, host_to_target_signal_table[hsig]); + exit(EXIT_FAILURE); + } + host_to_target_signal_table[hsig] = tsig; + } + + if (*s) { + s++; + } else { + break; + } + } + } else { + /* + * Default host-to-target RT signal mapping. + * + * Signals are supported starting from TARGET_SIGRTMIN and going up + * until we run out of host realtime signals. Glibc uses the lower 2 + * RT signals and (hopefully) nobody uses the upper ones. + * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32). + * To fix this properly we would need to do manual signal delivery + * multiplexed over a single host signal. + * Attempts for configure "missing" signals via sigaction will be + * silently ignored. + * + * Reserve two signals for internal usage (see below). + */ + + hsig = SIGRTMIN + 2; + for (tsig = TARGET_SIGRTMIN; + hsig <= SIGRTMAX && tsig <= TARGET_NSIG; + hsig++, tsig++) { + host_to_target_signal_table[hsig] = tsig; + } + } + /* - * Signals are supported starting from TARGET_SIGRTMIN and going up - * until we run out of host realtime signals. Glibc uses the lower 2 - * RT signals and (hopefully) nobody uses the upper ones. - * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32). - * To fix this properly we would need to do manual signal delivery - * multiplexed over a single host signal. - * Attempts for configure "missing" signals via sigaction will be - * silently ignored. - * * Remap the target SIGABRT, so that we can distinguish host abort * from guest abort. When the guest registers a signal handler or * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest @@ -536,21 +603,32 @@ static void signal_table_init(void) * parent sees the correct mapping from wait status. */ - hsig = SIGRTMIN; host_to_target_signal_table[SIGABRT] = 0; - host_to_target_signal_table[hsig++] = TARGET_SIGABRT; - - for (tsig = TARGET_SIGRTMIN; - hsig <= SIGRTMAX && tsig <= TARGET_NSIG; - hsig++, tsig++) { - host_to_target_signal_table[hsig] = tsig; + for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) { + if (!host_to_target_signal_table[hsig]) { + if (host_interrupt_signal) { + host_to_target_signal_table[hsig] = TARGET_SIGABRT; + break; + } else { + host_interrupt_signal = hsig; + } + } + } + if (hsig > SIGRTMAX) { + fprintf(stderr, + "No rt signals left for interrupt and SIGABRT mapping\n"); + exit(EXIT_FAILURE); } /* Invert the mapping that has already been assigned. */ for (hsig = 1; hsig < _NSIG; hsig++) { tsig = host_to_target_signal_table[hsig]; if (tsig) { - assert(target_to_host_signal_table[tsig] == 0); + if (target_to_host_signal_table[tsig]) { + fprintf(stderr, "%d is already mapped to %d\n", + tsig, target_to_host_signal_table[tsig]); + exit(EXIT_FAILURE); + } target_to_host_signal_table[tsig] = hsig; } } @@ -573,13 +651,13 @@ static void signal_table_init(void) trace_signal_table_init(count); } -void signal_init(void) +void signal_init(const char *rtsig_map) { TaskState *ts = get_task_state(thread_cpu); struct sigaction act, oact; /* initialize signal conversion tables */ - signal_table_init(); + signal_table_init(rtsig_map); /* Set the signal mask from the host mask. */ sigprocmask(0, 0, &ts->signal_mask); @@ -618,6 +696,8 @@ void signal_init(void) } sigact_table[tsig - 1]._sa_handler = thand; } + + sigaction(host_interrupt_signal, &act, NULL); } /* Force a synchronously taken signal. The kernel force_sig() function @@ -670,10 +750,10 @@ void force_sigsegv(int oldsig) } #endif -void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, +void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr, MMUAccessType access_type, bool maperr, uintptr_t ra) { - const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; if (tcg_ops->record_sigsegv) { tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); @@ -686,10 +766,10 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, cpu_loop_exit_restore(cpu, ra); } -void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, +void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra) { - const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; if (tcg_ops->record_sigbus) { tcg_ops->record_sigbus(cpu, addr, access_type, ra); @@ -965,6 +1045,12 @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) bool sync_sig = false; void *sigmask; + if (host_sig == host_interrupt_signal) { + ts->signal_pending = 1; + cpu_exit(thread_cpu); + return; + } + /* * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special * handling wrt signal blocking and unwinding. Non-spoofed SIGILL, diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index 50424a54df5..68f1e8ecd43 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" #define SPARC64_STACK_BIAS 2047 @@ -357,7 +357,7 @@ void cpu_loop (CPUSPARCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; env->pc = regs->pc; diff --git a/linux-user/sparc/syscall.tbl b/linux-user/sparc/syscall.tbl index e34cc30ef22..3bc83783b78 100644 --- a/linux-user/sparc/syscall.tbl +++ b/linux-user/sparc/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for sparc # @@ -117,7 +117,7 @@ 90 common dup2 sys_dup2 91 32 setfsuid32 sys_setfsuid 92 common fcntl sys_fcntl compat_sys_fcntl -93 common select sys_select +93 common select sys_select compat_sys_select 94 32 setfsgid32 sys_setfsgid 95 common fsync sys_fsync 96 common setpriority sys_setpriority @@ -155,7 +155,7 @@ 123 32 fchown sys_fchown16 123 64 fchown sys_fchown 124 common fchmod sys_fchmod -125 common recvfrom sys_recvfrom +125 common recvfrom sys_recvfrom compat_sys_recvfrom 126 32 setreuid sys_setreuid16 126 64 setreuid sys_setreuid 127 32 setregid sys_setregid16 @@ -247,9 +247,9 @@ 204 32 readdir sys_old_readdir compat_sys_old_readdir 204 64 readdir sys_nis_syscall 205 common readahead sys_readahead compat_sys_readahead -206 common socketcall sys_socketcall sys32_socketcall +206 common socketcall sys_socketcall compat_sys_socketcall 207 common syslog sys_syslog -208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +208 common lookup_dcookie sys_ni_syscall 209 common fadvise64 sys_fadvise64 compat_sys_fadvise64 210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64 211 common tgkill sys_tgkill @@ -270,7 +270,7 @@ 222 common delete_module sys_delete_module 223 common get_kernel_syms sys_ni_syscall 224 common getpgid sys_getpgid -225 common bdflush sys_bdflush +225 common bdflush sys_ni_syscall 226 common sysfs sys_sysfs 227 common afs_syscall sys_nis_syscall 228 common setfsuid sys_setfsuid16 @@ -365,12 +365,12 @@ 299 common unshare sys_unshare 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list -302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages -303 common mbind sys_mbind compat_sys_mbind -304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy +302 common migrate_pages sys_migrate_pages +303 common mbind sys_mbind +304 common get_mempolicy sys_get_mempolicy +305 common set_mempolicy sys_set_mempolicy 306 common kexec_load sys_kexec_load compat_sys_kexec_load -307 common move_pages sys_move_pages compat_sys_move_pages +307 common move_pages sys_move_pages 308 common getcpu sys_getcpu 309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 310 32 utimensat sys_utimensat_time32 @@ -461,7 +461,7 @@ 412 32 utimensat_time64 sys_utimensat sys_utimensat 413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -488,7 +488,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/sparc/syscallhdr.sh b/linux-user/sparc/syscallhdr.sh index 34a99dc8326..938a02bb481 100644 --- a/linux-user/sparc/syscallhdr.sh +++ b/linux-user/sparc/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/sparc/target_proc.h b/linux-user/sparc/target_proc.h index 3bb3134a477..744fa107309 100644 --- a/linux-user/sparc/target_proc.h +++ b/linux-user/sparc/target_proc.h @@ -8,7 +8,25 @@ static int open_cpuinfo(CPUArchState *cpu_env, int fd) { - dprintf(fd, "type\t\t: sun4u\n"); + int i, num_cpus; + const char *cpu_type; + + num_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (cpu_env->def.features & CPU_FEATURE_HYPV) { + cpu_type = "sun4v"; + } else { + cpu_type = "sun4u"; + } + + dprintf(fd, "cpu\t\t: %s (QEMU)\n", cpu_env->def.name); + dprintf(fd, "type\t\t: %s\n", cpu_type); + dprintf(fd, "ncpus probed\t: %d\n", num_cpus); + dprintf(fd, "ncpus active\t: %d\n", num_cpus); + dprintf(fd, "State:\n"); + for (i = 0; i < num_cpus; i++) { + dprintf(fd, "CPU%d:\t\t: online\n", i); + } + return 0; } #define HAVE_ARCH_PROC_CPUINFO diff --git a/linux-user/strace.c b/linux-user/strace.c index b4d1098170e..3b744ccd4a7 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -13,6 +13,9 @@ #include #include #include +#ifdef HAVE_OPENAT2_H +#include +#endif #include #include "qemu.h" #include "user-internals.h" @@ -158,19 +161,20 @@ static const char * const target_signal_name[] = { }; static void -print_signal(abi_ulong arg, int last) +print_signal_1(abi_ulong arg) { - const char *signal_name = NULL; - if (arg < ARRAY_SIZE(target_signal_name)) { - signal_name = target_signal_name[arg]; + qemu_log("%s", target_signal_name[arg]); + } else { + qemu_log(TARGET_ABI_FMT_lu, arg); } +} - if (signal_name == NULL) { - print_raw_param("%ld", arg, last); - return; - } - qemu_log("%s%s", signal_name, get_comma(last)); +static void +print_signal(abi_ulong arg, int last) +{ + print_signal_1(arg); + qemu_log("%s", get_comma(last)); } static void print_si_code(int arg) @@ -373,7 +377,7 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last) un->sun_path[i]; i++) { qemu_log("%c", un->sun_path[i]); } - qemu_log("\"}"); + qemu_log("\"},"); break; } case AF_INET: { @@ -383,7 +387,7 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last) ntohs(in->sin_port)); qemu_log("sin_addr=inet_addr(\"%d.%d.%d.%d\")", c[0], c[1], c[2], c[3]); - qemu_log("}"); + qemu_log("},"); break; } case AF_PACKET: { @@ -414,12 +418,12 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last) } qemu_log(",sll_addr=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); - qemu_log("}"); + qemu_log("},"); break; } case AF_NETLINK: { struct target_sockaddr_nl *nl = (struct target_sockaddr_nl *)sa; - qemu_log("{nl_family=AF_NETLINK,nl_pid=%u,nl_groups=%u}", + qemu_log("{nl_family=AF_NETLINK,nl_pid=%u,nl_groups=%u},", tswap32(nl->nl_pid), tswap32(nl->nl_groups)); break; } @@ -429,14 +433,14 @@ print_sockaddr(abi_ulong addr, abi_long addrlen, int last) qemu_log("%02x, ", sa->sa_data[i]); } qemu_log("%02x}", sa->sa_data[i]); - qemu_log("}"); + qemu_log("},"); break; } unlock_user(sa, addr, 0); } else { - print_raw_param("0x"TARGET_ABI_FMT_lx, addr, 0); + print_pointer(addr, 0); } - qemu_log(", "TARGET_ABI_FMT_ld"%s", addrlen, get_comma(last)); + qemu_log(TARGET_ABI_FMT_ld"%s", addrlen, get_comma(last)); } static void @@ -715,6 +719,51 @@ print_ipc(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_rt_sigprocmask +static void print_target_sigset_t_1(target_sigset_t *set, int last) +{ + bool first = true; + int i, sig = 1; + + qemu_log("["); + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + abi_ulong bits = 0; + int j; + + __get_user(bits, &set->sig[i]); + for (j = 0; j < sizeof(bits) * 8; j++) { + if (bits & ((abi_ulong)1 << j)) { + if (first) { + first = false; + } else { + qemu_log(" "); + } + print_signal_1(sig); + } + sig++; + } + } + qemu_log("]%s", get_comma(last)); +} + +static void print_target_sigset_t(abi_ulong addr, abi_ulong size, int last) +{ + if (addr && size == sizeof(target_sigset_t)) { + target_sigset_t *set; + + set = lock_user(VERIFY_READ, addr, sizeof(target_sigset_t), 1); + if (set) { + print_target_sigset_t_1(set, last); + unlock_user(set, addr, 0); + } else { + print_pointer(addr, last); + } + } else { + print_pointer(addr, last); + } +} +#endif + /* * Variants for the return value output function */ @@ -1063,6 +1112,18 @@ UNUSED static const struct flags open_flags[] = { FLAG_END, }; +UNUSED static const struct flags openat2_resolve_flags[] = { +#ifdef HAVE_OPENAT2_H + FLAG_GENERIC(RESOLVE_NO_XDEV), + FLAG_GENERIC(RESOLVE_NO_MAGICLINKS), + FLAG_GENERIC(RESOLVE_NO_SYMLINKS), + FLAG_GENERIC(RESOLVE_BENEATH), + FLAG_GENERIC(RESOLVE_IN_ROOT), + FLAG_GENERIC(RESOLVE_CACHED), +#endif + FLAG_END, +}; + UNUSED static const struct flags mount_flags[] = { #ifdef MS_BIND FLAG_GENERIC(MS_BIND), @@ -1655,6 +1716,13 @@ print_buf(abi_long addr, abi_long len, int last) } } +static void +print_buf_len(abi_long addr, abi_long len, int last) +{ + print_buf(addr, len, 0); + print_raw_param(TARGET_ABI_FMT_ld, len, last); +} + /* * Prints out raw parameter using given format. Caller needs * to do byte swapping if needed. @@ -2742,8 +2810,7 @@ static void do_print_sendrecv(const char *name, abi_long arg1) qemu_log("%s(", name); print_sockfd(sockfd, 0); - print_buf(msg, len, 0); - print_raw_param(TARGET_ABI_FMT_ld, len, 0); + print_buf_len(msg, len, 0); print_flags(msg_flags, flags, 1); qemu_log(")"); } @@ -2761,8 +2828,7 @@ static void do_print_msgaddr(const char *name, abi_long arg1) qemu_log("%s(", name); print_sockfd(sockfd, 0); - print_buf(msg, len, 0); - print_raw_param(TARGET_ABI_FMT_ld, len, 0); + print_buf_len(msg, len, 0); print_flags(msg_flags, flags, 0); print_sockaddr(addr, addrlen, 0); qemu_log(")"); @@ -3122,6 +3188,38 @@ print_bind(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_recvfrom +static void +print_recvfrom(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_sockfd(arg0, 0); + print_pointer(arg1, 0); /* output */ + print_raw_param(TARGET_ABI_FMT_ld, arg2, 0); + print_flags(msg_flags, arg3, 0); + print_pointer(arg4, 0); /* output */ + print_pointer(arg5, 1); /* in/out */ + print_syscall_epilogue(name); +} +#endif + +#ifdef TARGET_NR_sendto +static void +print_sendto(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_sockfd(arg0, 0); + print_buf_len(arg1, arg2, 0); + print_flags(msg_flags, arg3, 0); + print_sockaddr(arg4, arg5, 1); + print_syscall_epilogue(name); +} +#endif + #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \ defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) static void @@ -3260,11 +3358,29 @@ print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name, case TARGET_SIG_SETMASK: how = "SIG_SETMASK"; break; } qemu_log("%s,", how); - print_pointer(arg1, 0); + print_target_sigset_t(arg1, arg3, 0); print_pointer(arg2, 0); print_raw_param("%u", arg3, 1); print_syscall_epilogue(name); } + +static void +print_rt_sigprocmask_ret(CPUArchState *cpu_env, const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + if (!print_syscall_err(ret)) { + qemu_log(TARGET_ABI_FMT_ld, ret); + if (arg2) { + qemu_log(" (oldset="); + print_target_sigset_t(arg2, arg3, 1); + qemu_log(")"); + } + } + + qemu_log("\n"); +} #endif #ifdef TARGET_NR_rt_sigqueueinfo @@ -3483,6 +3599,38 @@ print_openat(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_openat2 +static void +print_openat2(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + struct open_how_ver0 how; + + print_syscall_prologue(name); + print_at_dirfd(arg0, 0); + print_string(arg1, 0); + + if ((abi_ulong)arg3 >= sizeof(struct target_open_how_ver0) && + copy_struct_from_user(&how, sizeof(how), arg2, arg3) == 0) { + how.flags = tswap64(how.flags); + how.mode = tswap64(how.mode); + how.resolve = tswap64(how.resolve); + qemu_log("{"); + print_open_flags(how.flags, 0); + if (how.flags & TARGET_O_CREAT) { + print_file_mode(how.mode, 0); + } + print_flags(openat2_resolve_flags, how.resolve, 1); + qemu_log("},"); + } else { + print_pointer(arg2, 0); + } + print_raw_param(TARGET_ABI_FMT_lu, arg3, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_pidfd_send_signal static void print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name, @@ -3823,7 +3971,7 @@ print_mmap(CPUArchState *cpu_env, const struct syscallname *name, { return print_mmap_both(cpu_env, name, arg0, arg1, arg2, arg3, arg4, arg5, -#if defined(TARGET_NR_mmap2) +#ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP true #else false @@ -4168,6 +4316,63 @@ print_ioctl(CPUArchState *cpu_env, const struct syscallname *name, } #endif +#if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) +static void print_wstatus(int wstatus) +{ + if (WIFSIGNALED(wstatus)) { + qemu_log("{WIFSIGNALED(s) && WTERMSIG(s) == "); + print_signal(WTERMSIG(wstatus), 1); + if (WCOREDUMP(wstatus)) { + qemu_log(" && WCOREDUMP(s)"); + } + qemu_log("}"); + } else if (WIFEXITED(wstatus)) { + qemu_log("{WIFEXITED(s) && WEXITSTATUS(s) == %d}", + WEXITSTATUS(wstatus)); + } else { + print_number(wstatus, 1); + } +} + +static void print_ret_wstatus(abi_long ret, abi_long wstatus_addr) +{ + int wstatus; + + if (!print_syscall_err(ret) + && wstatus_addr + && get_user_s32(wstatus, wstatus_addr)) { + qemu_log(TARGET_ABI_FMT_ld " (wstatus=", ret); + print_wstatus(wstatus); + qemu_log(")"); + } + qemu_log("\n"); +} +#endif + +#ifdef TARGET_NR_wait4 +static void +print_syscall_ret_wait4(CPUArchState *cpu_env, + const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + print_ret_wstatus(ret, arg1); +} +#endif + +#ifdef TARGET_NR_waitpid +static void +print_syscall_ret_waitpid(CPUArchState *cpu_env, + const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + print_ret_wstatus(ret, arg1); +} +#endif + /* * An array of all of the syscalls we know about */ @@ -4196,7 +4401,7 @@ print_syscall(CPUArchState *cpu_env, int num, if (!f) { return; } - fprintf(f, "%d ", getpid()); + fprintf(f, "%d ", get_task_state(env_cpu(cpu_env))->ts_tid); for (i = 0; i < nsyscalls; i++) { if (scnames[i].nr == num) { diff --git a/linux-user/strace.list b/linux-user/strace.list index dfd4237d14e..ab818352a90 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -715,6 +715,9 @@ #ifdef TARGET_NR_openat { TARGET_NR_openat, "openat" , NULL, print_openat, NULL }, #endif +#ifdef TARGET_NR_openat2 +{ TARGET_NR_openat2, "openat2" , NULL, print_openat2, NULL }, +#endif #ifdef TARGET_NR_osf_adjtime { TARGET_NR_osf_adjtime, "osf_adjtime" , NULL, NULL, NULL }, #endif @@ -1135,7 +1138,7 @@ { TARGET_NR_recv, "recv" , "%s(%d,%p,%u,%d)", NULL, NULL }, #endif #ifdef TARGET_NR_recvfrom -{ TARGET_NR_recvfrom, "recvfrom" , NULL, NULL, NULL }, +{ TARGET_NR_recvfrom, "recvfrom" , NULL, print_recvfrom, NULL }, #endif #ifdef TARGET_NR_recvmmsg { TARGET_NR_recvmmsg, "recvmmsg" , NULL, NULL, NULL }, @@ -1186,7 +1189,8 @@ { TARGET_NR_rt_sigpending, "rt_sigpending" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_rt_sigprocmask -{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, NULL }, +{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, + print_rt_sigprocmask_ret }, #endif #ifdef TARGET_NR_rt_sigqueueinfo { TARGET_NR_rt_sigqueueinfo, "rt_sigqueueinfo" , NULL, print_rt_sigqueueinfo, NULL }, @@ -1285,7 +1289,7 @@ { TARGET_NR_sendmsg, "sendmsg" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_sendto -{ TARGET_NR_sendto, "sendto" , NULL, NULL, NULL }, +{ TARGET_NR_sendto, "sendto" , NULL, print_sendto, NULL }, #endif #ifdef TARGET_NR_setdomainname { TARGET_NR_setdomainname, "setdomainname" , NULL, NULL, NULL }, @@ -1659,13 +1663,15 @@ { TARGET_NR_vserver, "vserver" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_wait4 -{ TARGET_NR_wait4, "wait4" , "%s(%d,%p,%d,%p)", NULL, NULL }, +{ TARGET_NR_wait4, "wait4" , "%s(%d,%p,%d,%p)", NULL, + print_syscall_ret_wait4 }, #endif #ifdef TARGET_NR_waitid { TARGET_NR_waitid, "waitid" , "%s(%#x,%d,%p,%#x)", NULL, NULL }, #endif #ifdef TARGET_NR_waitpid -{ TARGET_NR_waitpid, "waitpid" , "%s(%d,%p,%#x)", NULL, NULL }, +{ TARGET_NR_waitpid, "waitpid", "%s(%d,%p,%#x)", NULL, + print_syscall_ret_waitpid }, #endif #ifdef TARGET_NR_write { TARGET_NR_write, "write" , "%s(%d,%#x,%d)", NULL, NULL }, @@ -1710,3 +1716,6 @@ { TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64, print_syscall_ret_clock_gettime64 }, #endif +#ifdef TARGET_NR_riscv_hwprobe +{ TARGET_NR_riscv_hwprobe, "riscv_hwprobe" , "%s(%p,%d,%d,%d,%d,%d)", NULL, NULL }, +#endif diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 9d5415674db..91360a072c7 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -26,6 +26,9 @@ #include "tcg/startup.h" #include "target_mman.h" #include "exec/page-protection.h" +#include "exec/mmap-lock.h" +#include "exec/tb-flush.h" +#include "exec/translation-block.h" #include #include #include @@ -54,7 +57,6 @@ #include #include #include -//#include #include #include #include @@ -136,14 +138,16 @@ #include "signal-common.h" #include "loader.h" #include "user-mmap.h" +#include "user/page-protection.h" #include "user/safe-syscall.h" +#include "user/signal.h" #include "qemu/guest-random.h" #include "qemu/selfmap.h" #include "user/syscall-trace.h" #include "special-errno.h" #include "qapi/error.h" #include "fd-trans.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #ifndef CLONE_IO #define CLONE_IO 0x80000000 /* Clone io context */ @@ -359,7 +363,8 @@ _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, #define __NR_sys_sched_setaffinity __NR_sched_setaffinity _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long *, user_mask_ptr); -/* sched_attr is not defined in glibc */ +/* sched_attr is not defined in glibc < 2.41 */ +#ifndef SCHED_ATTR_SIZE_VER0 struct sched_attr { uint32_t size; uint32_t sched_policy; @@ -372,6 +377,7 @@ struct sched_attr { uint32_t sched_util_min; uint32_t sched_util_max; }; +#endif #define __NR_sys_sched_getattr __NR_sched_getattr _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, unsigned int, size, unsigned int, flags); @@ -602,6 +608,33 @@ static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) return 1; } +/* + * Copies a target struct to a host struct, in a way that guarantees + * backwards-compatibility for struct syscall arguments. + * + * Similar to kernels uaccess.h:copy_struct_from_user() + */ +int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize) +{ + size_t size = MIN(ksize, usize); + size_t rest = MAX(ksize, usize) - size; + + /* Deal with trailing bytes. */ + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + int ret = check_zeroed_user(src, ksize, usize); + if (ret <= 0) { + return ret ?: -TARGET_E2BIG; + } + } + /* Copy the interoperable parts of the struct. */ + if (copy_from_user(dst, src, size)) { + return -TARGET_EFAULT; + } + return 0; +} + #define safe_syscall0(type, name) \ static type safe_##name(void) \ { \ @@ -653,6 +686,10 @@ safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ int, flags, mode_t, mode) + +safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \ + const struct open_how_ver0 *, how, size_t, size) + #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ struct rusage *, rusage) @@ -753,16 +790,18 @@ safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, int, outfd, loff_t *, poutoff, size_t, length, unsigned int, flags) #endif +#if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2) +safe_syscall4(int, fchmodat2, int, dfd, const char *, filename, + unsigned short, mode, unsigned int, flags) +#endif /* We do ioctl like this rather than via safe_syscall3 to preserve the * "third argument might be integer or pointer or not present" behaviour of * the libc function. */ #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) -/* Similarly for fcntl. Note that callers must always: - * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK - * use the flock64 struct rather than unsuffixed flock - * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. +/* Similarly for fcntl. Since we always build with LFS enabled, + * we should be using the 64-bit structures automatically. */ #ifdef __NR_fcntl64 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) @@ -1797,7 +1836,7 @@ static inline abi_long target_to_host_cmsg(struct msghdr *msgh, *dst = tswap32(*dst); } } else { - qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", + qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); memcpy(data, target_data, len); } @@ -1968,6 +2007,16 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, (void *) &errh->offender, sizeof(errh->offender)); break; } + case IP_PKTINFO: + { + struct in_pktinfo *pkti = data; + struct target_in_pktinfo *target_pi = target_data; + + __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex); + target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr; + target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr; + break; + } default: goto unimplemented; } @@ -2019,7 +2068,7 @@ static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, default: unimplemented: - qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", + qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type); memcpy(target_data, data, MIN(len, tgt_len)); if (tgt_len > len) { @@ -2090,16 +2139,23 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, } ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); break; + case IP_MULTICAST_IF: case IP_ADD_MEMBERSHIP: case IP_DROP_MEMBERSHIP: { struct ip_mreqn ip_mreq; struct target_ip_mreqn *target_smreqn; + int min_size; QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) != sizeof(struct target_ip_mreq)); - if (optlen < sizeof (struct target_ip_mreq) || + if (optname == IP_MULTICAST_IF) { + min_size = sizeof(struct in_addr); + } else { + min_size = sizeof(struct target_ip_mreq); + } + if (optlen < min_size || optlen > sizeof (struct target_ip_mreqn)) { return -TARGET_EINVAL; } @@ -2109,13 +2165,14 @@ static abi_long do_setsockopt(int sockfd, int level, int optname, return -TARGET_EFAULT; } ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; - ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr; - if (optlen == sizeof(struct target_ip_mreqn)) { - ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex); - optlen = sizeof(struct ip_mreqn); + if (optlen >= sizeof(struct target_ip_mreq)) { + ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr; + if (optlen >= sizeof(struct target_ip_mreqn)) { + __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex); + optlen = sizeof(struct ip_mreqn); + } } unlock_user(target_smreqn, optval_addr, 0); - ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen)); break; } @@ -6690,10 +6747,9 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, int pid_child = ret; pid_fd = pidfd_open(pid_child, 0); if (pid_fd >= 0) { - fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL) - | FD_CLOEXEC); + qemu_set_cloexec(pid_fd); } else { - pid_fd = 0; + pid_fd = 0; } #endif put_user_u32(pid_fd, parent_tidptr); @@ -6722,13 +6778,13 @@ static int target_to_host_fcntl_cmd(int cmd) ret = cmd; break; case TARGET_F_GETLK: - ret = F_GETLK64; + ret = F_GETLK; break; case TARGET_F_SETLK: - ret = F_SETLK64; + ret = F_SETLK; break; case TARGET_F_SETLKW: - ret = F_SETLKW64; + ret = F_SETLKW; break; case TARGET_F_GETOWN: ret = F_GETOWN; @@ -6744,13 +6800,13 @@ static int target_to_host_fcntl_cmd(int cmd) break; #if TARGET_ABI_BITS == 32 case TARGET_F_GETLK64: - ret = F_GETLK64; + ret = F_GETLK; break; case TARGET_F_SETLK64: - ret = F_SETLK64; + ret = F_SETLK; break; case TARGET_F_SETLKW64: - ret = F_SETLKW64; + ret = F_SETLKW; break; #endif case TARGET_F_SETLEASE: @@ -6804,8 +6860,8 @@ static int target_to_host_fcntl_cmd(int cmd) * them to 5, 6 and 7 before making the syscall(). Since we make the * syscall directly, adjust to what is supported by the kernel. */ - if (ret >= F_GETLK64 && ret <= F_SETLKW64) { - ret -= F_GETLK64 - 5; + if (ret >= F_GETLK && ret <= F_SETLKW) { + ret -= F_GETLK - 5; } #endif @@ -6838,7 +6894,7 @@ static int host_to_target_flock(int type) return type; } -static inline abi_long copy_from_user_flock(struct flock64 *fl, +static inline abi_long copy_from_user_flock(struct flock *fl, abi_ulong target_flock_addr) { struct target_flock *target_fl; @@ -6863,7 +6919,7 @@ static inline abi_long copy_from_user_flock(struct flock64 *fl, } static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, - const struct flock64 *fl) + const struct flock *fl) { struct target_flock *target_fl; short l_type; @@ -6882,8 +6938,8 @@ static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, return 0; } -typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); -typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); +typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr); +typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl); #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 struct target_oabi_flock64 { @@ -6894,7 +6950,7 @@ struct target_oabi_flock64 { abi_int l_pid; } QEMU_PACKED; -static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, +static inline abi_long copy_from_user_oabi_flock64(struct flock *fl, abi_ulong target_flock_addr) { struct target_oabi_flock64 *target_fl; @@ -6919,7 +6975,7 @@ static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, } static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, - const struct flock64 *fl) + const struct flock *fl) { struct target_oabi_flock64 *target_fl; short l_type; @@ -6939,7 +6995,7 @@ static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, } #endif -static inline abi_long copy_from_user_flock64(struct flock64 *fl, +static inline abi_long copy_from_user_flock64(struct flock *fl, abi_ulong target_flock_addr) { struct target_flock64 *target_fl; @@ -6964,7 +7020,7 @@ static inline abi_long copy_from_user_flock64(struct flock64 *fl, } static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, - const struct flock64 *fl) + const struct flock *fl) { struct target_flock64 *target_fl; short l_type; @@ -6985,7 +7041,7 @@ static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) { - struct flock64 fl64; + struct flock fl; #ifdef F_GETOWN_EX struct f_owner_ex fox; struct target_f_owner_ex *target_fox; @@ -6998,45 +7054,45 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) switch(cmd) { case TARGET_F_GETLK: - ret = copy_from_user_flock(&fl64, arg); + ret = copy_from_user_flock(&fl, arg); if (ret) { return ret; } - ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fl)); if (ret == 0) { - ret = copy_to_user_flock(arg, &fl64); + ret = copy_to_user_flock(arg, &fl); } break; case TARGET_F_SETLK: case TARGET_F_SETLKW: - ret = copy_from_user_flock(&fl64, arg); + ret = copy_from_user_flock(&fl, arg); if (ret) { return ret; } - ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fl)); break; case TARGET_F_GETLK64: case TARGET_F_OFD_GETLK: - ret = copy_from_user_flock64(&fl64, arg); + ret = copy_from_user_flock64(&fl, arg); if (ret) { return ret; } - ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fl)); if (ret == 0) { - ret = copy_to_user_flock64(arg, &fl64); + ret = copy_to_user_flock64(arg, &fl); } break; case TARGET_F_SETLK64: case TARGET_F_SETLKW64: case TARGET_F_OFD_SETLK: case TARGET_F_OFD_SETLKW: - ret = copy_from_user_flock64(&fl64, arg); + ret = copy_from_user_flock64(&fl, arg); if (ret) { return ret; } - ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); + ret = get_errno(safe_fcntl(fd, host_cmd, &fl)); break; case TARGET_F_GETFL: @@ -7205,12 +7261,24 @@ static inline int tswapid(int id) #else #define __NR_sys_setgroups __NR_setgroups #endif +#ifdef __NR_sys_setreuid32 +#define __NR_sys_setreuid __NR_setreuid32 +#else +#define __NR_sys_setreuid __NR_setreuid +#endif +#ifdef __NR_sys_setregid32 +#define __NR_sys_setregid __NR_setregid32 +#else +#define __NR_sys_setregid __NR_setregid +#endif _syscall1(int, sys_setuid, uid_t, uid) _syscall1(int, sys_setgid, gid_t, gid) _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist) +_syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid); +_syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid); void syscall_init(void) { @@ -7267,7 +7335,7 @@ static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1 arg2 = arg3; arg3 = arg4; } - return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); + return get_errno(truncate(arg1, target_offset64(arg2, arg3))); } #endif @@ -7281,7 +7349,7 @@ static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, arg2 = arg3; arg3 = arg4; } - return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); + return get_errno(ftruncate(arg1, target_offset64(arg2, arg3))); } #endif @@ -8070,8 +8138,8 @@ static void open_self_maps_4(const struct open_self_maps_data *d, * Callback for walk_memory_regions, when read_self_maps() fails. * Proceed without the benefit of host /proc/self/maps cross-check. */ -static int open_self_maps_3(void *opaque, target_ulong guest_start, - target_ulong guest_end, unsigned long flags) +static int open_self_maps_3(void *opaque, vaddr guest_start, + vaddr guest_end, int flags) { static const MapInfo mi = { .is_priv = true }; @@ -8082,8 +8150,8 @@ static int open_self_maps_3(void *opaque, target_ulong guest_start, /* * Callback for walk_memory_regions, when read_self_maps() succeeds. */ -static int open_self_maps_2(void *opaque, target_ulong guest_start, - target_ulong guest_end, unsigned long flags) +static int open_self_maps_2(void *opaque, vaddr guest_start, + vaddr guest_end, int flags) { const struct open_self_maps_data *d = opaque; uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start); @@ -8122,17 +8190,19 @@ static int open_self_maps_1(CPUArchState *env, int fd, bool smaps) { struct open_self_maps_data d = { .ts = get_task_state(env_cpu(env)), - .host_maps = read_self_maps(), .fd = fd, .smaps = smaps }; + mmap_lock(); + d.host_maps = read_self_maps(); if (d.host_maps) { walk_memory_regions(&d, open_self_maps_2); free_self_maps(d.host_maps); } else { walk_memory_regions(&d, open_self_maps_3); } + mmap_unlock(); return 0; } @@ -8168,6 +8238,9 @@ static int open_self_stat(CPUArchState *cpu_env, int fd) } else if (i == 3) { /* ppid */ g_string_printf(buf, FMT_pid " ", getppid()); + } else if (i == 4) { + /* pgid */ + g_string_printf(buf, FMT_pid " ", getpgrp()); } else if (i == 19) { /* num_threads */ int cpus = 0; @@ -8334,8 +8407,9 @@ static int open_net_route(CPUArchState *cpu_env, int fd) } #endif -int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname, - int flags, mode_t mode, bool safe) +static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd, + const char *fname, int flags, mode_t mode, + int openat2_resolve, bool safe) { g_autofree char *proc_name = NULL; const char *pathname; @@ -8372,6 +8446,12 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname, } if (is_proc_myself(pathname, "exe")) { + /* Honor openat2 resolve flags */ + if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) || + (openat2_resolve & RESOLVE_NO_SYMLINKS)) { + errno = ELOOP; + return -1; + } if (safe) { return safe_openat(dirfd, exec_path, flags, mode); } else { @@ -8418,6 +8498,17 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname, return fd; } + return -2; +} + +int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, + int flags, mode_t mode, bool safe) +{ + int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe); + if (fd > -2) { + return fd; + } + if (safe) { return safe_openat(dirfd, path(pathname), flags, mode); } else { @@ -8425,6 +8516,49 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname, } } + +static int do_openat2(CPUArchState *cpu_env, abi_long dirfd, + abi_ptr guest_pathname, abi_ptr guest_open_how, + abi_ulong guest_size) +{ + struct open_how_ver0 how = {0}; + char *pathname; + int ret; + + if (guest_size < sizeof(struct target_open_how_ver0)) { + return -TARGET_EINVAL; + } + ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size); + if (ret) { + if (ret == -TARGET_E2BIG) { + qemu_log_mask(LOG_UNIMP, + "Unimplemented openat2 open_how size: " + TARGET_ABI_FMT_lu "\n", guest_size); + } + return ret; + } + pathname = lock_user_string(guest_pathname); + if (!pathname) { + return -TARGET_EFAULT; + } + + how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl); + how.mode = tswap64(how.mode); + how.resolve = tswap64(how.resolve); + int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode, + how.resolve, true); + if (fd > -2) { + ret = get_errno(fd); + } else { + ret = get_errno(safe_openat2(dirfd, pathname, &how, + sizeof(struct open_how_ver0))); + } + + fd_trans_unregister(ret); + unlock_user(pathname, guest_pathname, 0); + return ret; +} + ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz) { ssize_t ret; @@ -8666,7 +8800,7 @@ static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) void *tdirp; int hlen, hoff, toff; int hreclen, treclen; - off64_t prev_diroff = 0; + off_t prev_diroff = 0; hdirp = g_try_malloc(count); if (!hdirp) { @@ -8719,7 +8853,7 @@ static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) * Return what we have, resetting the file pointer to the * location of the first record not returned. */ - lseek64(dirfd, prev_diroff, SEEK_SET); + lseek(dirfd, prev_diroff, SEEK_SET); break; } @@ -8753,7 +8887,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) void *tdirp; int hlen, hoff, toff; int hreclen, treclen; - off64_t prev_diroff = 0; + off_t prev_diroff = 0; hdirp = g_try_malloc(count); if (!hdirp) { @@ -8795,7 +8929,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) * Return what we have, resetting the file pointer to the * location of the first record not returned. */ - lseek64(dirfd, prev_diroff, SEEK_SET); + lseek(dirfd, prev_diroff, SEEK_SET); break; } @@ -8853,7 +8987,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) #define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) #define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) #define RISCV_HWPROBE_EXT_ZVFH (1 << 30) -#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31) +#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) #define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) @@ -8992,35 +9126,38 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env, } } -static int cpu_set_valid(abi_long arg3, abi_long arg4) +/* + * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT. + * If the cpumast_t has no bits set: -EINVAL. + * Otherwise the cpumask_t contains some bit set: 0. + * Unlike the kernel, we do not mask cpumask_t by the set of online cpus, + * nor bound the search by cpumask_size(). + */ +static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus) { - int ret, i, tmp; - size_t host_mask_size, target_mask_size; - unsigned long *host_mask; - - /* - * cpu_set_t represent CPU masks as bit masks of type unsigned long *. - * arg3 contains the cpu count. - */ - tmp = (8 * sizeof(abi_ulong)); - target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong); - host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) & - ~(sizeof(*host_mask) - 1); - - host_mask = alloca(host_mask_size); - - ret = target_to_host_cpu_mask(host_mask, host_mask_size, - arg4, target_mask_size); - if (ret != 0) { - return ret; - } + unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1); + int ret = -TARGET_EFAULT; - for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) { - if (host_mask[i] != 0) { - return 0; + if (p) { + ret = -TARGET_EINVAL; + /* + * Since we only care about the empty/non-empty state of the cpumask_t + * not the individual bits, we do not need to repartition the bits + * from target abi_ulong to host unsigned long. + * + * Note that the kernel does not round up cpusetsize to a multiple of + * sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(), + * it copies exactly cpusetsize bytes into a zeroed buffer. + */ + for (abi_ulong i = 0; i < cpusetsize; ++i) { + if (p[i]) { + ret = 0; + break; + } } + unlock_user(p, target_cpus, 0); } - return -TARGET_EINVAL; + return ret; } static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1, @@ -9037,7 +9174,7 @@ static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1, /* check cpu_set */ if (arg3 != 0) { - ret = cpu_set_valid(arg3, arg4); + ret = nonempty_cpu_set(arg3, arg4); if (ret != 0) { return ret; } @@ -9197,6 +9334,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, fd_trans_unregister(ret); unlock_user(p, arg2, 0); return ret; + case TARGET_NR_openat2: + ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4); + return ret; #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) case TARGET_NR_name_to_handle_at: ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); @@ -10482,10 +10622,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return ret; #ifdef TARGET_NR_mmap case TARGET_NR_mmap: -#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ - (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ - defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ - || defined(TARGET_S390X) +#ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; @@ -10579,6 +10716,15 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, ret = get_errno(fchmodat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); return ret; +#endif +#if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2) + case TARGET_NR_fchmodat2: + if (!(p = lock_user_string(arg2))) { + return -TARGET_EFAULT; + } + ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4)); + unlock_user(p, arg2, 0); + return ret; #endif case TARGET_NR_getpriority: /* Note that negative values are valid for getpriority, so we must @@ -11496,10 +11642,14 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, case TARGET_NR_nanosleep: { struct timespec req, rem; - target_to_host_timespec(&req, arg1); + if (target_to_host_timespec(&req, arg1)) { + return -TARGET_EFAULT; + } ret = get_errno(safe_nanosleep(&req, &rem)); if (is_error(ret) && arg2) { - host_to_target_timespec(arg2, &rem); + if (host_to_target_timespec(arg2, &rem)) { + return -TARGET_EFAULT; + } } } return ret; @@ -11526,7 +11676,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return -TARGET_EFAULT; } } - ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); + ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, ret); return ret; case TARGET_NR_pwrite64: @@ -11543,7 +11693,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return -TARGET_EFAULT; } } - ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); + ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, 0); return ret; #endif @@ -11838,9 +11988,9 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return get_errno(high2lowgid(getegid())); #endif case TARGET_NR_setreuid: - return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); + return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2))); case TARGET_NR_setregid: - return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); + return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2))); case TARGET_NR_getgroups: { /* the same code as for TARGET_NR_getgroups32 */ int gidsetsize = arg1; @@ -12170,11 +12320,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #endif #ifdef TARGET_NR_setreuid32 case TARGET_NR_setreuid32: - return get_errno(setreuid(arg1, arg2)); + return get_errno(sys_setreuid(arg1, arg2)); #endif #ifdef TARGET_NR_setregid32 case TARGET_NR_setregid32: - return get_errno(setregid(arg1, arg2)); + return get_errno(sys_setregid(arg1, arg2)); #endif #ifdef TARGET_NR_getgroups32 case TARGET_NR_getgroups32: @@ -12403,7 +12553,7 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, case TARGET_NR_fcntl64: { int cmd; - struct flock64 fl; + struct flock fl; from_flock64_fn *copyfrom = copy_from_user_flock64; to_flock64_fn *copyto = copy_to_user_flock64; @@ -12638,14 +12788,6 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, #if defined(TARGET_MIPS) cpu_env->active_tc.CP0_UserLocal = arg1; return 0; -#elif defined(TARGET_CRIS) - if (arg1 & 0xff) - ret = -TARGET_EINVAL; - else { - cpu_env->pregs[PR_PID] = arg1; - ret = 0; - } - return ret; #elif defined(TARGET_I386) && defined(TARGET_ABI32) return do_set_thread_area(cpu_env, arg1); #elif defined(TARGET_M68K) diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index a00b617cae8..df26a2d28f2 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -62,7 +62,7 @@ #if (defined(TARGET_I386) && defined(TARGET_ABI32)) \ || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \ || (defined(TARGET_SPARC) && defined(TARGET_ABI32)) \ - || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS) + || defined(TARGET_M68K) || defined(TARGET_SH4) /* 16 bit uid wrappers emulation */ #define USE_UID16 #define target_id uint16_t @@ -71,7 +71,7 @@ #endif #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \ - || defined(TARGET_M68K) || defined(TARGET_CRIS) \ + || defined(TARGET_M68K) \ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \ || defined(TARGET_RISCV) \ || defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64) @@ -462,7 +462,7 @@ typedef struct { abi_ulong sig[TARGET_NSIG_WORDS]; } target_sigset_t; -#ifdef BSWAP_NEEDED +#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN static inline void tswap_sigset(target_sigset_t *d, const target_sigset_t *s) { int i; @@ -515,10 +515,6 @@ struct target_sigaction { abi_ulong _sa_handler; #endif target_sigset_t sa_mask; -#ifdef TARGET_ARCH_HAS_SA_RESTORER - /* ??? This is always present, but ignored unless O32. */ - abi_ulong sa_restorer; -#endif }; #else struct target_old_sigaction { @@ -1234,8 +1230,7 @@ struct target_winsize { #include "target_mman.h" #if (defined(TARGET_I386) && defined(TARGET_ABI32)) \ - || (defined(TARGET_ARM) && defined(TARGET_ABI32)) \ - || defined(TARGET_CRIS) + || (defined(TARGET_ARM) && defined(TARGET_ABI32)) #define TARGET_STAT_HAVE_NSEC struct target_stat { abi_ushort st_dev; @@ -1976,7 +1971,7 @@ struct target_stat64 { }; #elif defined(TARGET_OPENRISC) \ - || defined(TARGET_RISCV) || defined(TARGET_HEXAGON) + || defined(TARGET_RISCV) || defined(TARGET_HEXAGON) || defined(TARGET_LOONGARCH) /* These are the asm-generic versions of the stat and stat64 structures */ @@ -2086,11 +2081,6 @@ struct target_stat64 { abi_uint target_st_ctime_nsec; abi_ullong st_ino; }; - -#elif defined(TARGET_LOONGARCH64) - -/* LoongArch no newfstatat/fstat syscall. */ - #else #error unsupported CPU #endif @@ -2628,6 +2618,12 @@ struct target_ucred { abi_uint gid; }; +struct target_in_pktinfo { + abi_int ipi_ifindex; + struct target_in_addr ipi_spec_dst; + struct target_in_addr ipi_addr; +}; + typedef abi_int target_timer_t; #define TARGET_SIGEV_MAX_SIZE 64 @@ -2754,4 +2750,29 @@ struct target_sched_param { abi_int sched_priority; }; +/* from kernel's include/uapi/linux/openat2.h */ +struct open_how_ver0 { + uint64_t flags; + uint64_t mode; + uint64_t resolve; +}; +struct target_open_how_ver0 { + abi_ullong flags; + abi_ullong mode; + abi_ullong resolve; +}; +#ifndef RESOLVE_NO_MAGICLINKS +#define RESOLVE_NO_MAGICLINKS 0x02 +#endif +#ifndef RESOLVE_NO_SYMLINKS +#define RESOLVE_NO_SYMLINKS 0x04 +#endif + +#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ + (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ + defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) || \ + defined(TARGET_S390X) +#define TARGET_ARCH_WANT_SYS_OLD_MMAP +#endif + #endif diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h index 5c7f173ceb4..691b9a1775f 100644 --- a/linux-user/user-internals.h +++ b/linux-user/user-internals.h @@ -19,8 +19,6 @@ #define LINUX_USER_USER_INTERNALS_H #include "user/thunk.h" -#include "exec/exec-all.h" -#include "exec/tb-flush.h" #include "qemu/log.h" extern char *exec_path; @@ -65,7 +63,6 @@ abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8); extern __thread CPUState *thread_cpu; -G_NORETURN void cpu_loop(CPUArchState *env); abi_long get_errno(abi_long ret); const char *target_strerror(int err); int get_osversion(void); @@ -102,7 +99,6 @@ int host_to_target_waitstatus(int status); /* vm86.c */ void save_v86_state(CPUX86State *env); void handle_vm86_trap(CPUX86State *env, int trapno); -void handle_vm86_fault(CPUX86State *env); int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr); #elif defined(TARGET_SPARC64) void sparc64_set_context(CPUSPARCState *env); diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h index b94bcdcf83c..dfc4477a720 100644 --- a/linux-user/user-mmap.h +++ b/linux-user/user-mmap.h @@ -18,6 +18,8 @@ #ifndef LINUX_USER_USER_MMAP_H #define LINUX_USER_USER_MMAP_H +#include "user/mmap.h" + /* * Guest parameters for the ADDR_COMPAT_LAYOUT personality * (at present this is the only layout supported by QEMU). @@ -39,24 +41,7 @@ extern abi_ulong task_unmapped_base; extern abi_ulong elf_et_dyn_base; -/* - * mmap_next_start: The base address for the next mmap without hint, - * increased after each successful map, starting at task_unmapped_base. - * This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT. - */ -extern abi_ulong mmap_next_start; - -int target_mprotect(abi_ulong start, abi_ulong len, int prot); -abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, - int flags, int fd, off_t offset); -int target_munmap(abi_ulong start, abi_ulong len); -abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, - abi_ulong new_size, unsigned long flags, - abi_ulong new_addr); abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice); -abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong); -void mmap_fork_start(void); -void mmap_fork_end(int child); abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, abi_ulong shmaddr, int shmflg); diff --git a/linux-user/vm86.c b/linux-user/vm86.c index 9f512a2242b..5091d53fb84 100644 --- a/linux-user/vm86.c +++ b/linux-user/vm86.c @@ -47,30 +47,6 @@ static inline void vm_putw(CPUX86State *env, uint32_t segptr, cpu_stw_data(env, segptr + (reg16 & 0xffff), val); } -static inline void vm_putl(CPUX86State *env, uint32_t segptr, - unsigned int reg16, unsigned int val) -{ - cpu_stl_data(env, segptr + (reg16 & 0xffff), val); -} - -static inline unsigned int vm_getb(CPUX86State *env, - uint32_t segptr, unsigned int reg16) -{ - return cpu_ldub_data(env, segptr + (reg16 & 0xffff)); -} - -static inline unsigned int vm_getw(CPUX86State *env, - uint32_t segptr, unsigned int reg16) -{ - return cpu_lduw_data(env, segptr + (reg16 & 0xffff)); -} - -static inline unsigned int vm_getl(CPUX86State *env, - uint32_t segptr, unsigned int reg16) -{ - return cpu_ldl_data(env, segptr + (reg16 & 0xffff)); -} - void save_v86_state(CPUX86State *env) { CPUState *cs = env_cpu(env); @@ -131,19 +107,6 @@ static inline void return_to_32bit(CPUX86State *env, int retval) env->regs[R_EAX] = retval; } -static inline int set_IF(CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - TaskState *ts = get_task_state(cs); - - ts->v86flags |= VIF_MASK; - if (ts->v86flags & VIP_MASK) { - return_to_32bit(env, TARGET_VM86_STI); - return 1; - } - return 0; -} - static inline void clear_IF(CPUX86State *env) { CPUState *cs = env_cpu(env); @@ -162,34 +125,6 @@ static inline void clear_AC(CPUX86State *env) env->eflags &= ~AC_MASK; } -static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - TaskState *ts = get_task_state(cs); - - set_flags(ts->v86flags, eflags, ts->v86mask); - set_flags(env->eflags, eflags, SAFE_MASK); - if (eflags & IF_MASK) - return set_IF(env); - else - clear_IF(env); - return 0; -} - -static inline int set_vflags_short(unsigned short flags, CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - TaskState *ts = get_task_state(cs); - - set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); - set_flags(env->eflags, flags, SAFE_MASK); - if (flags & IF_MASK) - return set_IF(env); - else - clear_IF(env); - return 0; -} - static inline unsigned int get_vflags(CPUX86State *env) { CPUState *cs = env_cpu(env); @@ -255,142 +190,6 @@ void handle_vm86_trap(CPUX86State *env, int trapno) } } -#define CHECK_IF_IN_TRAP() \ - if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \ - (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \ - newflags |= TF_MASK - -#define VM86_FAULT_RETURN \ - if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \ - (ts->v86flags & (IF_MASK | VIF_MASK))) \ - return_to_32bit(env, TARGET_VM86_PICRETURN); \ - return - -void handle_vm86_fault(CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - TaskState *ts = get_task_state(cs); - uint32_t csp, ssp; - unsigned int ip, sp, newflags, newip, newcs, opcode, intno; - int data32, pref_done; - - csp = env->segs[R_CS].selector << 4; - ip = env->eip & 0xffff; - - ssp = env->segs[R_SS].selector << 4; - sp = env->regs[R_ESP] & 0xffff; - - LOG_VM86("VM86 exception %04x:%08x\n", - env->segs[R_CS].selector, env->eip); - - data32 = 0; - pref_done = 0; - do { - opcode = vm_getb(env, csp, ip); - ADD16(ip, 1); - switch (opcode) { - case 0x66: /* 32-bit data */ data32=1; break; - case 0x67: /* 32-bit address */ break; - case 0x2e: /* CS */ break; - case 0x3e: /* DS */ break; - case 0x26: /* ES */ break; - case 0x36: /* SS */ break; - case 0x65: /* GS */ break; - case 0x64: /* FS */ break; - case 0xf2: /* repnz */ break; - case 0xf3: /* rep */ break; - default: pref_done = 1; - } - } while (!pref_done); - - /* VM86 mode */ - switch(opcode) { - case 0x9c: /* pushf */ - if (data32) { - vm_putl(env, ssp, sp - 4, get_vflags(env)); - ADD16(env->regs[R_ESP], -4); - } else { - vm_putw(env, ssp, sp - 2, get_vflags(env)); - ADD16(env->regs[R_ESP], -2); - } - env->eip = ip; - VM86_FAULT_RETURN; - - case 0x9d: /* popf */ - if (data32) { - newflags = vm_getl(env, ssp, sp); - ADD16(env->regs[R_ESP], 4); - } else { - newflags = vm_getw(env, ssp, sp); - ADD16(env->regs[R_ESP], 2); - } - env->eip = ip; - CHECK_IF_IN_TRAP(); - if (data32) { - if (set_vflags_long(newflags, env)) - return; - } else { - if (set_vflags_short(newflags, env)) - return; - } - VM86_FAULT_RETURN; - - case 0xcd: /* int */ - intno = vm_getb(env, csp, ip); - ADD16(ip, 1); - env->eip = ip; - if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) { - if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >> - (intno &7)) & 1) { - return_to_32bit(env, TARGET_VM86_INTx + (intno << 8)); - return; - } - } - do_int(env, intno); - break; - - case 0xcf: /* iret */ - if (data32) { - newip = vm_getl(env, ssp, sp) & 0xffff; - newcs = vm_getl(env, ssp, sp + 4) & 0xffff; - newflags = vm_getl(env, ssp, sp + 8); - ADD16(env->regs[R_ESP], 12); - } else { - newip = vm_getw(env, ssp, sp); - newcs = vm_getw(env, ssp, sp + 2); - newflags = vm_getw(env, ssp, sp + 4); - ADD16(env->regs[R_ESP], 6); - } - env->eip = newip; - cpu_x86_load_seg(env, R_CS, newcs); - CHECK_IF_IN_TRAP(); - if (data32) { - if (set_vflags_long(newflags, env)) - return; - } else { - if (set_vflags_short(newflags, env)) - return; - } - VM86_FAULT_RETURN; - - case 0xfa: /* cli */ - env->eip = ip; - clear_IF(env); - VM86_FAULT_RETURN; - - case 0xfb: /* sti */ - env->eip = ip; - if (set_IF(env)) - return; - VM86_FAULT_RETURN; - - default: - /* real VM86 GPF exception */ - return_to_32bit(env, TARGET_VM86_UNKNOWN); - break; - } -} - int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) { CPUState *cs = env_cpu(env); diff --git a/linux-user/x86_64/syscall_64.tbl b/linux-user/x86_64/syscall_64.tbl index ce18119ea0d..7093ee21c0d 100644 --- a/linux-user/x86_64/syscall_64.tbl +++ b/linux-user/x86_64/syscall_64.tbl @@ -1,8 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note # # 64-bit system call numbers and entry vectors # # The format is: -# +# [ [noreturn]] # # The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls # @@ -68,7 +69,7 @@ 57 common fork sys_fork 58 common vfork sys_vfork 59 64 execve sys_execve -60 common exit sys_exit +60 common exit sys_exit - noreturn 61 common wait4 sys_wait4 62 common kill sys_kill 63 common uname sys_newuname @@ -220,7 +221,7 @@ 209 64 io_submit sys_io_submit 210 common io_cancel sys_io_cancel 211 64 get_thread_area -212 common lookup_dcookie sys_lookup_dcookie +212 common lookup_dcookie 213 common epoll_create sys_epoll_create 214 64 epoll_ctl_old 215 64 epoll_wait_old @@ -239,7 +240,7 @@ 228 common clock_gettime sys_clock_gettime 229 common clock_getres sys_clock_getres 230 common clock_nanosleep sys_clock_nanosleep -231 common exit_group sys_exit_group +231 common exit_group sys_exit_group - noreturn 232 common epoll_wait sys_epoll_wait 233 common epoll_ctl sys_epoll_ctl 234 common tgkill sys_tgkill @@ -343,6 +344,7 @@ 332 common statx sys_statx 333 common io_pgetevents sys_io_pgetevents 334 common rseq sys_rseq +335 common uretprobe sys_uretprobe # don't use numbers 387 through 423, add new calls after the last # 'common' entry 424 common pidfd_send_signal sys_pidfd_send_signal @@ -364,10 +366,26 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +447 common memfd_secret sys_memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal # # Due to a historical design error, certain syscalls are numbered differently @@ -396,7 +414,7 @@ 530 x32 set_robust_list compat_sys_set_robust_list 531 x32 get_robust_list compat_sys_get_robust_list 532 x32 vmsplice sys_vmsplice -533 x32 move_pages compat_sys_move_pages +533 x32 move_pages sys_move_pages 534 x32 preadv compat_sys_preadv64 535 x32 pwritev compat_sys_pwritev64 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo diff --git a/linux-user/x86_64/syscallhdr.sh b/linux-user/x86_64/syscallhdr.sh index 182be52a74f..988256b6c63 100644 --- a/linux-user/x86_64/syscallhdr.sh +++ b/linux-user/x86_64/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h index 9d9717406f3..0af100c6611 100644 --- a/linux-user/x86_64/target_signal.h +++ b/linux-user/x86_64/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + /* For x86_64, use of SA_RESTORER is mandatory. */ #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c index d51ce053926..c0fcf743e70 100644 --- a/linux-user/xtensa/cpu_loop.c +++ b/linux-user/xtensa/cpu_loop.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu.h" #include "user-internals.h" -#include "cpu_loop-common.h" +#include "user/cpu_loop.h" #include "signal-common.h" static void xtensa_rfw(CPUXtensaState *env) @@ -238,7 +238,7 @@ void cpu_loop(CPUXtensaState *env) } } -void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) { int i; for (i = 0; i < 16; ++i) { diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c index 6514b8dd57f..ef8b0c3a279 100644 --- a/linux-user/xtensa/signal.c +++ b/linux-user/xtensa/signal.c @@ -241,7 +241,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, give_sigsegv: force_sigsegv(sig); - return; } static void restore_sigcontext(CPUXtensaState *env, diff --git a/linux-user/xtensa/syscall.tbl b/linux-user/xtensa/syscall.tbl index fd2f30227d9..735a89b3bd8 100644 --- a/linux-user/xtensa/syscall.tbl +++ b/linux-user/xtensa/syscall.tbl @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note # # system call numbers and entry vectors for xtensa # @@ -223,7 +223,7 @@ # 205 was old nfsservctl 205 common nfsservctl sys_ni_syscall 206 common _sysctl sys_ni_syscall -207 common bdflush sys_bdflush +207 common bdflush sys_ni_syscall 208 common uname sys_newuname 209 common sysinfo sys_sysinfo 210 common init_module sys_init_module @@ -273,7 +273,7 @@ 252 common timer_getoverrun sys_timer_getoverrun # System 253 common reserved253 sys_ni_syscall -254 common lookup_dcookie sys_lookup_dcookie +254 common lookup_dcookie sys_ni_syscall 255 common available255 sys_ni_syscall 256 common add_key sys_add_key 257 common request_key sys_request_key @@ -413,7 +413,23 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/linux-user/xtensa/syscallhdr.sh b/linux-user/xtensa/syscallhdr.sh index eef0644c943..dc787fbbfec 100644 --- a/linux-user/xtensa/syscallhdr.sh +++ b/linux-user/xtensa/syscallhdr.sh @@ -1,5 +1,5 @@ #!/bin/sh -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only in="$1" out="$2" diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h index e4b1bea5cb5..8a198bf8ac8 100644 --- a/linux-user/xtensa/target_signal.h +++ b/linux-user/xtensa/target_signal.h @@ -3,6 +3,8 @@ #include "../generic/signal.h" +#define TARGET_SA_RESTORER 0x04000000 + #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 #endif diff --git a/meson.build b/meson.build index 97f63aa86c3..50c774a1955 100644 --- a/meson.build +++ b/meson.build @@ -1,11 +1,16 @@ -project('qemu', ['c'], meson_version: '>=1.1.0', +project('qemu', ['c'], meson_version: '>=1.5.0', default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto', 'b_staticpic=false', 'stdsplit=false', 'optimization=2', 'b_pie=true'], version: files('VERSION')) -add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true) -add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow']) -add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough']) +meson.add_devenv({ 'MESON_BUILD_ROOT' : meson.project_build_root() }) + +add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true, + env: ['RUST_BACKTRACE=1']) +add_test_setup('slow', exclude_suites: ['thorough'], + env: ['G_TEST_SLOW=1', 'SPEED=slow', 'RUST_BACKTRACE=1']) +add_test_setup('thorough', + env: ['G_TEST_SLOW=1', 'SPEED=thorough', 'RUST_BACKTRACE=1']) meson.add_postconf_script(find_program('scripts/symlink-install-tree.py')) @@ -15,6 +20,7 @@ meson.add_postconf_script(find_program('scripts/symlink-install-tree.py')) not_found = dependency('', required: false) keyval = import('keyval') +rust = import('rust') ss = import('sourceset') fs = import('fs') @@ -44,14 +50,25 @@ genh = [] qapi_trace_events = [] bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin'] -supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] +supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux', 'emscripten'] supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64', - 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64'] + 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64', 'wasm32'] cpu = host_machine.cpu_family() target_dirs = config_host['TARGET_DIRS'].split() +# type of binaries to build +have_linux_user = false +have_bsd_user = false +have_system = false +foreach target : target_dirs + have_linux_user = have_linux_user or target.endswith('linux-user') + have_bsd_user = have_bsd_user or target.endswith('bsd-user') + have_system = have_system or target.endswith('-softmmu') +endforeach +have_user = have_linux_user or have_bsd_user + ############ # Programs # ############ @@ -71,6 +88,66 @@ if host_os == 'darwin' and \ objc = meson.get_compiler('objc') endif +have_rust = add_languages('rust', native: false, + required: get_option('rust').disable_auto_if(not have_system)) +have_rust = have_rust and add_languages('rust', native: true, + required: get_option('rust').disable_auto_if(not have_system)) +if have_rust + rustc = meson.get_compiler('rust') + if rustc.version().version_compare('<1.77.0') + if get_option('rust').enabled() + error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.77.0') + else + warning('rustc version ' + rustc.version() + ' is unsupported, disabling Rust compilation.') + message('Please upgrade to at least 1.77.0 to use Rust.') + have_rust = false + endif + endif +endif + +if have_rust + rustdoc = find_program('rustdoc', required: get_option('rust')) + bindgen = find_program('bindgen', required: get_option('rust')) + if not bindgen.found() or bindgen.version().version_compare('<0.60.0') + if get_option('rust').enabled() + error('bindgen version ' + bindgen.version() + ' is unsupported. You can install a new version with "cargo install bindgen-cli"') + else + if bindgen.found() + warning('bindgen version ' + bindgen.version() + ' is unsupported, disabling Rust compilation.') + else + warning('bindgen not found, disabling Rust compilation.') + endif + message('To use Rust you can install a new version with "cargo install bindgen-cli"') + have_rust = false + endif + endif +endif + +if have_rust + rustc_args = [find_program('scripts/rust/rustc_args.py'), + '--rustc-version', rustc.version(), + '--workspace', meson.project_source_root() / 'rust'] + if get_option('strict_rust_lints') + rustc_args += ['--strict-lints'] + endif + + rustfmt = find_program('rustfmt', required: false) + + rustc_lint_args = run_command(rustc_args, '--lints', + capture: true, check: true).stdout().strip().splitlines() + + # Apart from procedural macros, our Rust executables will often link + # with C code, so include all the libraries that C code needs. This + # is safe; https://github.com/rust-lang/rust/pull/54675 says that + # passing -nodefaultlibs to the linker "was more ideological to + # start with than anything". + add_project_arguments(rustc_lint_args + + ['--cfg', 'MESON', '-C', 'default-linker-libraries'], + native: false, language: 'rust') + add_project_arguments(rustc_lint_args + ['--cfg', 'MESON'], + native: true, language: 'rust') +endif + dtrace = not_found stap = not_found if 'dtrace' in get_option('trace_backends') @@ -93,7 +170,7 @@ else iasl = find_program(get_option('iasl'), required: true) endif -edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu', 'riscv64-softmmu' ] +edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu', 'riscv64-softmmu', 'loongarch64-softmmu' ] unpack_edk2_blobs = false foreach target : edk2_targets if target in target_dirs @@ -171,16 +248,7 @@ have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa -# type of binaries to build -have_linux_user = false -have_bsd_user = false -have_system = false -foreach target : target_dirs - have_linux_user = have_linux_user or target.endswith('linux-user') - have_bsd_user = have_bsd_user or target.endswith('bsd-user') - have_system = have_system or target.endswith('-softmmu') -endforeach -have_user = have_linux_user or have_bsd_user +have_tcg = get_option('tcg').allowed() and (have_system or have_user) have_tools = get_option('tools') \ .disable_auto_if(not have_system) \ @@ -215,30 +283,41 @@ else host_arch = cpu endif -if cpu in ['x86', 'x86_64'] +if cpu == 'x86' + kvm_targets = ['i386-softmmu'] +elif cpu == 'x86_64' kvm_targets = ['i386-softmmu', 'x86_64-softmmu'] elif cpu == 'aarch64' kvm_targets = ['aarch64-softmmu'] elif cpu == 's390x' kvm_targets = ['s390x-softmmu'] -elif cpu in ['ppc', 'ppc64'] +elif cpu == 'ppc' + kvm_targets = ['ppc-softmmu'] +elif cpu == 'ppc64' kvm_targets = ['ppc-softmmu', 'ppc64-softmmu'] -elif cpu in ['mips', 'mips64'] +elif cpu == 'mips' + kvm_targets = ['mips-softmmu', 'mipsel-softmmu'] +elif cpu == 'mips64' kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu'] -elif cpu in ['riscv32'] +elif cpu == 'riscv32' kvm_targets = ['riscv32-softmmu'] -elif cpu in ['riscv64'] +elif cpu == 'riscv64' kvm_targets = ['riscv64-softmmu'] -elif cpu in ['loongarch64'] +elif cpu == 'loongarch64' kvm_targets = ['loongarch64-softmmu'] else kvm_targets = [] endif accelerator_targets = { 'CONFIG_KVM': kvm_targets } -if cpu in ['x86', 'x86_64'] +if cpu == 'x86' + xen_targets = ['i386-softmmu'] +elif cpu == 'x86_64' xen_targets = ['i386-softmmu', 'x86_64-softmmu'] -elif cpu in ['arm', 'aarch64'] +elif cpu == 'arm' + # i386 emulator provides xenpv machine type for multiple architectures + xen_targets = ['i386-softmmu'] +elif cpu == 'aarch64' # i386 emulator provides xenpv machine type for multiple architectures xen_targets = ['i386-softmmu', 'x86_64-softmmu', 'aarch64-softmmu'] else @@ -246,13 +325,11 @@ else endif accelerator_targets += { 'CONFIG_XEN': xen_targets } -if cpu in ['aarch64'] +if cpu == 'aarch64' accelerator_targets += { 'CONFIG_HVF': ['aarch64-softmmu'] } -endif - -if cpu in ['x86', 'x86_64'] +elif cpu == 'x86_64' accelerator_targets += { 'CONFIG_HVF': ['x86_64-softmmu'], 'CONFIG_NVMM': ['i386-softmmu', 'x86_64-softmmu'], @@ -260,12 +337,6 @@ if cpu in ['x86', 'x86_64'] } endif -modular_tcg = [] -# Darwin does not support references to thread-local variables in modules -if host_os != 'darwin' - modular_tcg = ['i386-softmmu', 'x86_64-softmmu'] -endif - ################## # Compiler flags # ################## @@ -276,8 +347,8 @@ foreach lang : all_languages # ok elif compiler.get_id() == 'clang' and compiler.compiles(''' #ifdef __apple_build_version__ - # if __clang_major__ < 12 || (__clang_major__ == 12 && __clang_minor__ < 0) - # error You need at least XCode Clang v12.0 to compile QEMU + # if __clang_major__ < 15 || (__clang_major__ == 15 && __clang_minor__ < 0) + # error You need at least XCode Clang v15.0 to compile QEMU # endif #else # if __clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 0) @@ -285,8 +356,10 @@ foreach lang : all_languages # endif #endif''') # ok + elif compiler.get_id() == 'emscripten' + # ok else - error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v12.0) to compile QEMU') + error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v15.0) to compile QEMU') endif endforeach @@ -315,8 +388,17 @@ elif host_os == 'sunos' qemu_common_flags += '-D__EXTENSIONS__' elif host_os == 'haiku' qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC'] +elif host_os == 'windows' + # plugins use delaylib, and clang needs to be used with lld to make it work. + if compiler.get_id() == 'clang' and compiler.get_linker_id() != 'ld.lld' + error('On windows, you need to use lld with clang - use msys2 clang64/clangarm64 env') + endif endif +# Choose instruction set (currently x86-only) + +qemu_isa_flags = [] + # __sync_fetch_and_and requires at least -march=i486. Many toolchains # use i686 as default anyway, but for those that don't, an explicit # specification is necessary @@ -333,7 +415,7 @@ if host_arch == 'i386' and not cc.links(''' sfaa(&val); return val; }''') - qemu_common_flags = ['-march=i486'] + qemu_common_flags + qemu_isa_flags += ['-march=i486'] endif # Pick x86-64 baseline version @@ -349,29 +431,31 @@ if host_arch in ['i386', 'x86_64'] else # present on basically all processors but technically not part of # x86-64-v1, so only include -mneeded for x86-64 version 2 and above - qemu_common_flags = ['-mcx16'] + qemu_common_flags + qemu_isa_flags += ['-mcx16'] endif endif if get_option('x86_version') >= '2' - qemu_common_flags = ['-mpopcnt'] + qemu_common_flags - qemu_common_flags = cc.get_supported_arguments('-mneeded') + qemu_common_flags + qemu_isa_flags += ['-mpopcnt'] + qemu_isa_flags += cc.get_supported_arguments('-mneeded') endif if get_option('x86_version') >= '3' - qemu_common_flags = ['-mmovbe', '-mabm', '-mbmi1', '-mbmi2', '-mfma', '-mf16c'] + qemu_common_flags + qemu_isa_flags += ['-mmovbe', '-mabm', '-mbmi', '-mbmi2', '-mfma', '-mf16c'] endif # add required vector instruction set (each level implies those below) if get_option('x86_version') == '1' - qemu_common_flags = ['-msse2'] + qemu_common_flags + qemu_isa_flags += ['-msse2'] elif get_option('x86_version') == '2' - qemu_common_flags = ['-msse4.2'] + qemu_common_flags + qemu_isa_flags += ['-msse4.2'] elif get_option('x86_version') == '3' - qemu_common_flags = ['-mavx2'] + qemu_common_flags + qemu_isa_flags += ['-mavx2'] elif get_option('x86_version') == '4' - qemu_common_flags = ['-mavx512f', '-mavx512bw', '-mavx512cd', '-mavx512dq', '-mavx512vl'] + qemu_common_flags + qemu_isa_flags += ['-mavx512f', '-mavx512bw', '-mavx512cd', '-mavx512dq', '-mavx512vl'] endif endif +qemu_common_flags = qemu_isa_flags + qemu_common_flags + if get_option('prefer_static') qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static' endif @@ -391,7 +475,10 @@ endif # instead, we can't add -no-pie because it overrides -shared: the linker then # tries to build an executable instead of a shared library and fails. So # don't add -no-pie anywhere and cross fingers. :( -if not get_option('b_pie') +# +# Emscripten doesn't support -no-pie but meson can't catch the compiler +# warning. So explicitly omit the flag for Emscripten. +if not get_option('b_pie') and host_os != 'emscripten' qemu_common_flags += cc.get_supported_arguments('-fno-pie', '-no-pie') endif @@ -435,6 +522,8 @@ ucontext_probe = ''' supported_backends = [] if host_os == 'windows' supported_backends += ['windows'] +elif host_os == 'emscripten' + supported_backends += ['wasm'] else if host_os != 'darwin' and cc.links(ucontext_probe) supported_backends += ['ucontext'] @@ -474,24 +563,38 @@ if get_option('safe_stack') and coroutine_backend != 'ucontext' error('SafeStack is only supported with the ucontext coroutine backend') endif -if get_option('sanitizers') +if get_option('asan') if cc.has_argument('-fsanitize=address') qemu_cflags = ['-fsanitize=address'] + qemu_cflags qemu_ldflags = ['-fsanitize=address'] + qemu_ldflags + else + error('Your compiler does not support -fsanitize=address') endif +endif - # Detect static linking issue with ubsan - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 +if get_option('ubsan') + # Detect static linking issue with ubsan: + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285 if cc.links('int main(int argc, char **argv) { return argc + 1; }', args: [qemu_ldflags, '-fsanitize=undefined']) - qemu_cflags = ['-fsanitize=undefined'] + qemu_cflags - qemu_ldflags = ['-fsanitize=undefined'] + qemu_ldflags + qemu_cflags += ['-fsanitize=undefined'] + qemu_ldflags += ['-fsanitize=undefined'] + + # Suppress undefined behaviour from function call to mismatched type. + # In addition, tcg prologue does not emit function type prefix + # required by function call sanitizer. + if cc.has_argument('-fno-sanitize=function') + qemu_cflags += ['-fno-sanitize=function'] + endif + else + error('Your compiler does not support -fsanitize=undefined') endif endif # Thread sanitizer is, for now, much noisier than the other sanitizers; # keep it separate until that is not the case. if get_option('tsan') - if get_option('sanitizers') + if get_option('asan') or get_option('ubsan') error('TSAN is not supported with other sanitizers') endif if not cc.has_function('__tsan_create_fiber', @@ -499,7 +602,15 @@ if get_option('tsan') prefix: '#include ') error('Cannot enable TSAN due to missing fiber annotation interface') endif - qemu_cflags = ['-fsanitize=thread'] + qemu_cflags + tsan_warn_suppress = [] + # gcc (>=11) will report constructions not supported by tsan: + # "error: ‘atomic_thread_fence’ is not supported with ‘-fsanitize=thread’" + # https://gcc.gnu.org/gcc-11/changes.html + # However, clang does not support this warning and this triggers an error. + if cc.has_argument('-Wno-tsan') + tsan_warn_suppress = ['-Wno-tsan'] + endif + qemu_cflags = ['-fsanitize=thread'] + tsan_warn_suppress + qemu_cflags qemu_ldflags = ['-fsanitize=thread'] + qemu_ldflags endif @@ -649,7 +760,19 @@ warn_flags = [ ] if host_os != 'darwin' - warn_flags += ['-Wthread-safety'] + tsa_has_cleanup = cc.compiles(''' + struct __attribute__((capability("mutex"))) mutex {}; + void lock(struct mutex *m) __attribute__((acquire_capability(m))); + void unlock(struct mutex *m) __attribute__((release_capability(m))); + + void test(void) { + struct mutex __attribute__((cleanup(unlock))) m; + lock(&m); + } + ''', args: ['-Wthread-safety', '-Werror']) + if tsa_has_cleanup + warn_flags += ['-Wthread-safety'] + endif endif # Set up C++ compiler flags @@ -710,17 +833,22 @@ socket = [] version_res = [] coref = [] iokit = [] +pvg = not_found emulator_link_args = [] midl = not_found widl = not_found pathcch = not_found +synchronization = not_found host_dsosuf = '.so' if host_os == 'windows' midl = find_program('midl', required: false) widl = find_program('widl', required: false) - pathcch = cc.find_library('pathcch') - socket = cc.find_library('ws2_32') - winmm = cc.find_library('winmm') + + # MinGW uses lowercase for library names + pathcch = cc.find_library('pathcch', required: true) + synchronization = cc.find_library('synchronization', required: true) + socket = cc.find_library('ws2_32', required: true) + winmm = cc.find_library('winmm', required: true) win = import('windows') version_res = win.compile_resources('version.rc', @@ -731,6 +859,8 @@ elif host_os == 'darwin' coref = dependency('appleframeworks', modules: 'CoreFoundation') iokit = dependency('appleframeworks', modules: 'IOKit', required: false) host_dsosuf = '.dylib' + pvg = dependency('appleframeworks', modules: ['ParavirtualizedGraphics', 'Metal'], + required: get_option('pvg')) elif host_os == 'sunos' socket = [cc.find_library('socket'), cc.find_library('nsl'), @@ -740,7 +870,7 @@ elif host_os == 'haiku' cc.find_library('network'), cc.find_library('bsd')] elif host_os == 'openbsd' - if get_option('tcg').allowed() and target_dirs.length() > 0 + if have_tcg # Disable OpenBSD W^X if available emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded') endif @@ -781,11 +911,15 @@ if host_os == 'netbsd' endif tcg_arch = host_arch -if get_option('tcg').allowed() +if have_tcg if host_arch == 'unknown' if not get_option('tcg_interpreter') error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu)) endif + elif host_arch == 'wasm32' + if not get_option('tcg_interpreter') + error('WebAssembly host requires --enable-tcg-interpreter') + endif elif get_option('tcg_interpreter') warning('Use of the TCG interpreter is not recommended on this host') warning('architecture. There is a native TCG execution backend available') @@ -904,7 +1038,9 @@ have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ ################ # When bumping glib minimum version, please check also whether to increase -# the _WIN32_WINNT setting in osdep.h according to the value from glib +# the _WIN32_WINNT setting in osdep.h according to the value from glib. +# You should also check if any of the glib.version() checks +# below can also be removed. glib_req_ver = '>=2.66.0' glib_pc = dependency('glib-2.0', version: glib_req_ver, required: true, method: 'pkg-config') @@ -954,6 +1090,9 @@ glib = declare_dependency(dependencies: [glib_pc, gmodule], # TODO: remove this check and the corresponding workaround (qtree) when # the minimum supported glib is >= 2.75.3 glib_has_gslice = glib.version().version_compare('<2.75.3') +# Check whether glib has the aligned_alloc family of functions. +# +glib_has_aligned_alloc = glib.version().version_compare('>=2.72.0') # override glib dep to include the above refinements meson.override_dependency('glib-2.0', glib) @@ -981,7 +1120,7 @@ if not get_option('gio').auto() or have_system gio = not_found endif if gio.found() - gdbus_codegen = find_program(gio.get_variable('gdbus_codegen'), + gdbus_codegen = find_program('gdbus-codegen', required: get_option('gio')) gio_unix = dependency('gio-unix-2.0', required: get_option('gio'), method: 'pkg-config') @@ -1033,7 +1172,7 @@ endif libnfs = not_found if not get_option('libnfs').auto() or have_block - libnfs = dependency('libnfs', version: '>=1.9.3', + libnfs = dependency('libnfs', version: ['>=1.9.3', '<6.0.0'], required: get_option('libnfs'), method: 'pkg-config') endif @@ -1146,6 +1285,10 @@ if not get_option('slirp').auto() or have_system endif endif +enable_passt = get_option('passt') \ + .require(host_os == 'linux', error_message: 'passt is supported only on Linux') \ + .allowed() + vde = not_found if not get_option('vde').auto() or have_system or have_tools vde = cc.find_library('vdeplug', has_headers: ['libvdeplug.h'], @@ -1245,6 +1388,14 @@ if not get_option('uadk').auto() or have_system uadk = declare_dependency(dependencies: [libwd, libwd_comp]) endif endif + +qatzip = not_found +if not get_option('qatzip').auto() or have_system + qatzip = dependency('qatzip', version: '>=1.1.2', + required: get_option('qatzip'), + method: 'pkg-config') +endif + virgl = not_found have_vhost_user_gpu = have_tools and host_os == 'linux' and pixman.found() @@ -1277,6 +1428,12 @@ if host_os == 'linux' and (have_system or have_tools) method: 'pkg-config', required: get_option('libudev')) endif +igvm = not_found +if not get_option('igvm').auto() or have_system + igvm = dependency('igvm', version: '>= 0.3.0', + method: 'pkg-config', + required: get_option('igvm')) +endif mpathlibs = [libudev] mpathpersist = not_found @@ -1335,7 +1492,7 @@ iconv = not_found curses = not_found if have_system and get_option('curses').allowed() curses_test = ''' - #if defined(__APPLE__) || defined(__OpenBSD__) + #ifdef __APPLE__ #define _XOPEN_SOURCE_EXTENDED 1 #endif #include @@ -1429,9 +1586,11 @@ if not get_option('brlapi').auto() or have_system brlapi = cc.find_library('brlapi', has_headers: ['brlapi.h'], required: get_option('brlapi')) if brlapi.found() and not cc.links(''' - #include - #include - int main(void) { return brlapi__openConnection (NULL, NULL, NULL); }''', dependencies: brlapi) + #include + #include + int main(void) { + return brlapi__openConnection(NULL, NULL, NULL) == BRLAPI_INVALID_FILE_DESCRIPTOR; + }''', dependencies: brlapi) brlapi = not_found if get_option('brlapi').enabled() error('could not link brlapi') @@ -1644,8 +1803,15 @@ if (have_system or have_tools) and (virgl.found() or opengl.found()) endif have_vhost_user_gpu = have_vhost_user_gpu and virgl.found() and opengl.found() and gbm.found() +libcbor = not_found +if not get_option('libcbor').auto() or have_system + libcbor = dependency('libcbor', version: '>=0.7.0', + required: get_option('libcbor')) +endif + gnutls = not_found gnutls_crypto = not_found +gnutls_bug1717_workaround = false if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_system) # For general TLS support our min gnutls matches # that implied by our platform support matrix @@ -1671,6 +1837,12 @@ if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_syste method: 'pkg-config', required: get_option('gnutls')) endif + + #if gnutls.found() and not get_option('gnutls-bug1717-workaround').disabled() + # XXX: when bug 1717 is resolved, add logic to probe for + # the GNUTLS fixed version number to handle the 'auto' case + # gnutls_bug1717_workaround = true + #endif endif # We prefer use of gnutls for crypto, unless the options @@ -1682,6 +1854,7 @@ gcrypt = not_found nettle = not_found hogweed = not_found crypto_sm4 = not_found +crypto_sm3 = not_found xts = 'none' if get_option('nettle').enabled() and get_option('gcrypt').enabled() @@ -1717,6 +1890,17 @@ if not gnutls_crypto.found() }''', dependencies: gcrypt) crypto_sm4 = not_found endif + crypto_sm3 = gcrypt + # SM3 ALG is available in libgcrypt >= 1.9 + if gcrypt.found() and not cc.links(''' + #include + int main(void) { + gcry_md_hd_t handler; + gcry_md_open(&handler, GCRY_MD_SM3, 0); + return 0; + }''', dependencies: gcrypt) + crypto_sm3 = not_found + endif endif if (not get_option('nettle').auto() or have_system) and not gcrypt.found() nettle = dependency('nettle', version: '>=3.4', @@ -1737,6 +1921,31 @@ if not gnutls_crypto.found() }''', dependencies: nettle) crypto_sm4 = not_found endif + crypto_sm3 = nettle + # SM3 ALG is available in nettle >= 3.8 + if nettle.found() and not cc.links(''' + #include + #include + int main(void) { + struct sm3_ctx ctx; + struct hmac_sm3_ctx hmac_ctx; + unsigned char data[64] = {0}; + unsigned char output[32]; + + // SM3 hash function test + sm3_init(&ctx); + sm3_update(&ctx, 64, data); + sm3_digest(&ctx, 32, data); + + // HMAC-SM3 test + hmac_sm3_set_key(&hmac_ctx, 32, data); + hmac_sm3_update(&hmac_ctx, 64, data); + hmac_sm3_digest(&hmac_ctx, 32, output); + + return 0; + }''', dependencies: nettle) + crypto_sm3 = not_found + endif endif endif @@ -2013,19 +2222,23 @@ if not has_malloc_trim and get_option('malloc_trim').enabled() endif endif -gnu_source_prefix = ''' +osdep_prefix = ''' #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif -''' -# Check whether the glibc provides STATX_BASIC_STATS - -has_statx = cc.has_header_symbol('sys/stat.h', 'STATX_BASIC_STATS', prefix: gnu_source_prefix) - -# Check whether statx() provides mount ID information + #include + #include -has_statx_mnt_id = cc.has_header_symbol('sys/stat.h', 'STATX_MNT_ID', prefix: gnu_source_prefix) + #include + #include + /* Put unistd.h before time.h as that triggers localtime_r/gmtime_r + * function availability on recentish Mingw-w64 platforms. */ + #include + #include + #include + #include +''' have_vhost_user_blk_server = get_option('vhost_user_blk_server') \ .require(host_os == 'linux', @@ -2100,8 +2313,14 @@ endif # libxdp libxdp = not_found if not get_option('af_xdp').auto() or have_system - libxdp = dependency('libxdp', required: get_option('af_xdp'), - version: '>=1.4.0', method: 'pkg-config') + if libbpf.found() + libxdp = dependency('libxdp', required: get_option('af_xdp'), + version: '>=1.4.0', method: 'pkg-config') + else + if get_option('af_xdp').enabled() + error('libxdp requested, but libbpf is not available') + endif + endif endif # libdw @@ -2119,6 +2338,7 @@ endif config_host_data = configuration_data() +config_host_data.set('CONFIG_HAVE_RUST', have_rust) audio_drivers_selected = [] if have_system audio_drivers_available = { @@ -2180,13 +2400,6 @@ have_virtfs = get_option('virtfs') \ .disable_auto_if(not have_tools and not have_system) \ .allowed() -have_virtfs_proxy_helper = get_option('virtfs_proxy_helper') \ - .require(host_os != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ - .require(have_virtfs, error_message: 'the virtfs proxy helper requires that virtfs is enabled') \ - .disable_auto_if(not have_tools) \ - .require(libcap_ng.found(), error_message: 'the virtfs proxy helper requires libcap-ng') \ - .allowed() - qga_fsfreeze = false qga_fstrim = false if host_os == 'linux' @@ -2293,6 +2506,8 @@ config_host_data.set('CONFIG_BLKIO', blkio.found()) if blkio.found() config_host_data.set('CONFIG_BLKIO_VHOST_VDPA_FD', blkio.version().version_compare('>=1.3.0')) + config_host_data.set('CONFIG_BLKIO_WRITE_ZEROS_FUA', + blkio.version().version_compare('>=1.4.0')) endif config_host_data.set('CONFIG_CURL', curl.found()) config_host_data.set('CONFIG_CURSES', curses.found()) @@ -2342,10 +2557,11 @@ if seccomp.found() config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc) endif config_host_data.set('CONFIG_PIXMAN', pixman.found()) +config_host_data.set('CONFIG_PASST', enable_passt) config_host_data.set('CONFIG_SLIRP', slirp.found()) config_host_data.set('CONFIG_SNAPPY', snappy.found()) config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos') -if get_option('tcg').allowed() +if have_tcg config_host_data.set('CONFIG_TCG', 1) config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci') endif @@ -2369,10 +2585,7 @@ config_host_data.set('CONFIG_VNC', vnc.found()) config_host_data.set('CONFIG_VNC_JPEG', jpeg.found()) config_host_data.set('CONFIG_VNC_SASL', sasl.found()) if virgl.found() - config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT', - cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d', - prefix: '#include ', - dependencies: virgl)) + config_host_data.set('VIRGL_VERSION_MAJOR', virgl.version().split('.')[0]) endif config_host_data.set('CONFIG_VIRTFS', have_virtfs) config_host_data.set('CONFIG_VTE', vte.found()) @@ -2381,18 +2594,19 @@ config_host_data.set('CONFIG_KEYUTILS', keyutils.found()) config_host_data.set('CONFIG_GETTID', has_gettid) config_host_data.set('CONFIG_GNUTLS', gnutls.found()) config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found()) +config_host_data.set('CONFIG_GNUTLS_BUG1717_WORKAROUND', gnutls_bug1717_workaround) config_host_data.set('CONFIG_TASN1', tasn1.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found()) +config_host_data.set('CONFIG_CRYPTO_SM3', crypto_sm3.found()) config_host_data.set('CONFIG_HOGWEED', hogweed.found()) config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private') config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim) -config_host_data.set('CONFIG_STATX', has_statx) -config_host_data.set('CONFIG_STATX_MNT_ID', has_statx_mnt_id) config_host_data.set('CONFIG_ZSTD', zstd.found()) config_host_data.set('CONFIG_QPL', qpl.found()) config_host_data.set('CONFIG_UADK', uadk.found()) +config_host_data.set('CONFIG_QATZIP', qatzip.found()) config_host_data.set('CONFIG_FUSE', fuse.found()) config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found()) config_host_data.set('CONFIG_SPICE_PROTOCOL', spice_protocol.found()) @@ -2408,6 +2622,7 @@ config_host_data.set('CONFIG_CFI', get_option('cfi')) config_host_data.set('CONFIG_SELINUX', selinux.found()) config_host_data.set('CONFIG_XEN_BACKEND', xen.found()) config_host_data.set('CONFIG_LIBDW', libdw.found()) +config_host_data.set('CONFIG_IGVM', igvm.found()) if xen.found() # protect from xen.version() having less than three components xen_version = xen.version().split('.') + ['0', '0'] @@ -2443,9 +2658,20 @@ config_host_data.set('CONFIG_FSTRIM', qga_fstrim) # has_header config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h')) config_host_data.set('CONFIG_LINUX_MAGIC_H', cc.has_header('linux/magic.h')) -config_host_data.set('CONFIG_VALGRIND_H', cc.has_header('valgrind/valgrind.h')) +valgrind = false +if get_option('valgrind').allowed() + if cc.has_header('valgrind/valgrind.h') + valgrind = true + else + if get_option('valgrind').enabled() + error('valgrind requested but valgrind.h not found') + endif + endif +endif +config_host_data.set('CONFIG_VALGRIND_H', valgrind) config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) config_host_data.set('HAVE_DRM_H', cc.has_header('libdrm/drm.h')) +config_host_data.set('HAVE_OPENAT2_H', cc.has_header('linux/openat2.h')) config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h')) config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h')) config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h')) @@ -2461,7 +2687,6 @@ config_host_data.set('CONFIG_CLOCK_ADJTIME', cc.has_function('clock_adjtime')) config_host_data.set('CONFIG_DUP3', cc.has_function('dup3')) config_host_data.set('CONFIG_FALLOCATE', cc.has_function('fallocate')) config_host_data.set('CONFIG_POSIX_FALLOCATE', cc.has_function('posix_fallocate')) -config_host_data.set('CONFIG_GETCPU', cc.has_function('getcpu', prefix: gnu_source_prefix)) config_host_data.set('CONFIG_SCHED_GETCPU', cc.has_function('sched_getcpu', prefix: '#include ')) # Note that we need to specify prefix: here to avoid incorrectly # thinking that Windows has posix_memalign() @@ -2477,11 +2702,13 @@ config_host_data.set('CONFIG_SETNS', cc.has_function('setns') and cc.has_functio config_host_data.set('CONFIG_SYNCFS', cc.has_function('syncfs')) config_host_data.set('CONFIG_SYNC_FILE_RANGE', cc.has_function('sync_file_range')) config_host_data.set('CONFIG_TIMERFD', cc.has_function('timerfd_create')) +config_host_data.set('CONFIG_GETLOADAVG', cc.has_function('getloadavg')) config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range')) config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs')) config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice) +config_host_data.set('HAVE_GLIB_WITH_ALIGNED_ALLOC', glib_has_aligned_alloc) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) -config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) +config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul', prefix: osdep_prefix)) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) if rbd.found() config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS', @@ -2497,7 +2724,7 @@ if rdma.found() endif have_asan_fiber = false -if get_option('sanitizers') and \ +if get_option('asan') and \ not cc.has_function('__sanitizer_start_switch_fiber', args: '-fsanitize=address', prefix: '#include ') @@ -2537,6 +2764,8 @@ config_host_data.set('CONFIG_FALLOCATE_ZERO_RANGE', config_host_data.set('CONFIG_FIEMAP', cc.has_header('linux/fiemap.h') and cc.has_header_symbol('linux/fs.h', 'FS_IOC_FIEMAP')) +config_host_data.set('CONFIG_GETCPU', + cc.has_header_symbol('sched.h', 'getcpu', prefix: osdep_prefix)) config_host_data.set('CONFIG_GETRANDOM', cc.has_function('getrandom') and cc.has_header_symbol('sys/random.h', 'GRND_NONBLOCK')) @@ -2550,6 +2779,44 @@ config_host_data.set('HAVE_OPTRESET', cc.has_header_symbol('getopt.h', 'optreset')) config_host_data.set('HAVE_IPPROTO_MPTCP', cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP')) +if libaio.found() + config_host_data.set('HAVE_IO_PREP_PWRITEV2', + cc.has_header_symbol('libaio.h', 'io_prep_pwritev2')) +endif +if linux_io_uring.found() + config_host_data.set('HAVE_IO_URING_PREP_WRITEV2', + cc.has_header_symbol('liburing.h', 'io_uring_prep_writev2')) +endif +config_host_data.set('HAVE_TCP_KEEPCNT', + cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPCNT') or + cc.compiles(''' + #include + #ifndef TCP_KEEPCNT + #error + #endif + int main(void) { return 0; }''', + name: 'Win32 TCP_KEEPCNT')) +# On Darwin TCP_KEEPIDLE is available under different name, TCP_KEEPALIVE. +# https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/bsd/man/man4/tcp.4#L172 +config_host_data.set('HAVE_TCP_KEEPIDLE', + cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPIDLE') or + cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPALIVE') or + cc.compiles(''' + #include + #ifndef TCP_KEEPIDLE + #error + #endif + int main(void) { return 0; }''', + name: 'Win32 TCP_KEEPIDLE')) +config_host_data.set('HAVE_TCP_KEEPINTVL', + cc.has_header_symbol('netinet/tcp.h', 'TCP_KEEPINTVL') or + cc.compiles(''' + #include + #ifndef TCP_KEEPINTVL + #error + #endif + int main(void) { return 0; }''', + name: 'Win32 TCP_KEEPINTVL')) # has_member config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID', @@ -2573,8 +2840,7 @@ config_host_data.set('HAVE_UTMPX', config_host_data.set('CONFIG_EVENTFD', cc.links(''' #include int main(void) { return eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); }''')) -config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + ''' - #include +config_host_data.set('CONFIG_FDATASYNC', cc.links(osdep_prefix + ''' int main(void) { #if defined(_POSIX_SYNCHRONIZED_IO) && _POSIX_SYNCHRONIZED_IO > 0 return fdatasync(0); @@ -2583,10 +2849,8 @@ config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + ''' #endif }''')) -has_madvise = cc.links(gnu_source_prefix + ''' - #include +has_madvise = cc.links(osdep_prefix + ''' #include - #include int main(void) { return madvise(NULL, 0, MADV_DONTNEED); }''') missing_madvise_proto = false if has_madvise @@ -2596,21 +2860,18 @@ if has_madvise # missing-prototype case, we try again with a definitely-bogus prototype. # This will only compile if the system headers don't provide the prototype; # otherwise the conflicting prototypes will cause a compiler error. - missing_madvise_proto = cc.links(gnu_source_prefix + ''' - #include + missing_madvise_proto = cc.links(osdep_prefix + '''> #include - #include extern int madvise(int); int main(void) { return madvise(0); }''') endif config_host_data.set('CONFIG_MADVISE', has_madvise) config_host_data.set('HAVE_MADVISE_WITHOUT_PROTOTYPE', missing_madvise_proto) -config_host_data.set('CONFIG_MEMFD', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_MEMFD', cc.links(osdep_prefix + ''' #include int main(void) { return memfd_create("foo", MFD_ALLOW_SEALING); }''')) -config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + ''' - #include +config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(osdep_prefix + ''' #if !defined(AT_EMPTY_PATH) # error missing definition #else @@ -2621,13 +2882,12 @@ config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + ''' # i.e. errno is set and -1 is returned. That's not really how POSIX defines the # function. On the flip side, it has madvise() which is preferred anyways. if host_os != 'darwin' - config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(gnu_source_prefix + ''' + config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(osdep_prefix + ''' #include - #include int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); }''')) endif -config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(osdep_prefix + ''' #include static void *f(void *p) { return NULL; } @@ -2638,7 +2898,7 @@ config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_pref pthread_setname_np(thread, "QEMU"); return 0; }''', dependencies: threads)) -config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(osdep_prefix + ''' #include static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; } @@ -2648,7 +2908,7 @@ config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_pre pthread_create(&thread, 0, f, 0); return 0; }''', dependencies: threads)) -config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(osdep_prefix + ''' #include #include @@ -2660,9 +2920,8 @@ config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(gnu_source_prefix + pthread_set_name_np(thread, "QEMU"); return 0; }''', dependencies: threads)) -config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(osdep_prefix + ''' #include - #include int main(void) { @@ -2671,7 +2930,7 @@ config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_pre pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); return 0; }''', dependencies: threads)) -config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(osdep_prefix + ''' #include static void *f(void *p) { return NULL; } @@ -2688,15 +2947,10 @@ config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix + CPU_FREE(cpuset); return 0; }''', dependencies: threads)) -config_host_data.set('CONFIG_SIGNALFD', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_SIGNALFD', cc.links(osdep_prefix + ''' #include - #include int main(void) { return signalfd(-1, NULL, SFD_CLOEXEC); }''')) -config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' - #include - #include - #include - +config_host_data.set('CONFIG_SPLICE', cc.links(osdep_prefix + ''' int main(void) { int len, fd = 0; @@ -2705,16 +2959,22 @@ config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' return 0; }''')) -config_host_data.set('HAVE_MLOCKALL', cc.links(gnu_source_prefix + ''' +config_host_data.set('HAVE_MLOCKALL', cc.links(osdep_prefix + ''' #include int main(void) { return mlockall(MCL_FUTURE); }''')) +config_host_data.set('HAVE_MLOCK_ONFAULT', cc.links(osdep_prefix + ''' + #include + int main(void) { + return mlockall(MCL_FUTURE | MCL_ONFAULT); + }''')) + have_l2tpv3 = false if get_option('l2tpv3').allowed() and have_system have_l2tpv3 = cc.has_type('struct mmsghdr', - prefix: gnu_source_prefix + ''' + prefix: osdep_prefix + ''' #include #include ''') endif @@ -2770,9 +3030,11 @@ config_host_data.set('CONFIG_ATOMIC64', cc.links(''' __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); return 0; - }''')) + }''', args: qemu_isa_flags)) -has_int128_type = cc.compiles(''' +# has_int128_type is set to false on Emscripten to avoid errors by libffi +# during runtime. +has_int128_type = host_os != 'emscripten' and cc.compiles(''' __int128_t a; __uint128_t b; int main(void) { b = a; }''') @@ -2804,7 +3066,7 @@ if has_int128_type __atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return 0; }''' - has_atomic128 = cc.links(atomic_test_128) + has_atomic128 = cc.links(atomic_test_128, args: qemu_isa_flags) config_host_data.set('CONFIG_ATOMIC128', has_atomic128) @@ -2813,7 +3075,8 @@ if has_int128_type # without optimization enabled. Try again with optimizations locally # enabled for the function. See # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389 - has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128) + has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128, + args: qemu_isa_flags) config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt) if not has_atomic128_opt @@ -2824,18 +3087,18 @@ if has_int128_type __sync_val_compare_and_swap_16(&x, y, x); return 0; } - ''')) + ''', args: qemu_isa_flags)) endif endif endif -config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_GETAUXVAL', cc.links(osdep_prefix + ''' #include int main(void) { return getauxval(AT_HWCAP) == 0; }''')) -config_host_data.set('CONFIG_ELF_AUX_INFO', cc.links(gnu_source_prefix + ''' +config_host_data.set('CONFIG_ELF_AUX_INFO', cc.links(osdep_prefix + ''' #include int main(void) { unsigned long hwcap = 0; @@ -2893,22 +3156,16 @@ config_host_data.set('CONFIG_ASM_HWPROBE_H', cc.has_header_symbol('asm/hwprobe.h', 'RISCV_HWPROBE_EXT_ZBA')) -config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \ - .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \ - .require(cc.links(''' - #include +if have_cpuid_h + have_avx2 = cc.links(''' #include static int __attribute__((target("avx2"))) bar(void *a) { __m256i x = *(__m256i *)a; return _mm256_testz_si256(x, x); } int main(int argc, char *argv[]) { return bar(argv[argc - 1]); } - '''), error_message: 'AVX2 not available').allowed()) - -config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \ - .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \ - .require(cc.links(''' - #include + ''') + have_avx512bw = cc.links(''' #include static int __attribute__((target("avx512bw"))) bar(void *a) { __m512i *x = a; @@ -2916,7 +3173,21 @@ config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \ return res[1]; } int main(int argc, char *argv[]) { return bar(argv[0]); } - '''), error_message: 'AVX512BW not available').allowed()) + ''') + if get_option('x86_version') >= '3' and not have_avx2 + error('Cannot enable AVX optimizations due to missing intrinsics') + elif get_option('x86_version') >= '4' and not have_avx512bw + error('Cannot enable AVX512 optimizations due to missing intrinsics') + endif +else + have_avx2 = false + have_avx512bw = false + if get_option('x86_version') >= '3' + error('Cannot enable AVX optimizations due to missing cpuid.h') + endif +endif +config_host_data.set('CONFIG_AVX2_OPT', have_avx2) +config_host_data.set('CONFIG_AVX512BW_OPT', have_avx512bw) # For both AArch64 and AArch32, detect if builtins are available. config_host_data.set('CONFIG_ARM_AES_BUILTIN', cc.compiles(''' @@ -2948,9 +3219,7 @@ config_host_data.set('CONFIG_MEMBARRIER', get_option('membarrier') \ .allowed()) have_afalg = get_option('crypto_afalg') \ - .require(cc.compiles(gnu_source_prefix + ''' - #include - #include + .require(cc.compiles(osdep_prefix + ''' #include #include int main(void) { @@ -2989,6 +3258,11 @@ if host_os == 'windows' }''', name: '_lock_file and _unlock_file')) endif +if spice.found() + config_host_data.set('HAVE_SPICE_QXL_GL_SCANOUT2', + cc.has_function('spice_qxl_gl_scanout2', dependencies: spice)) +endif + if host_os == 'windows' mingw_has_setjmp_longjmp = cc.links(''' #include @@ -3010,6 +3284,9 @@ if host_os == 'windows' endif endif +# Detect host pointer size for the target configuration loop. +host_long_bits = cc.sizeof('void *') * 8 + ######################## # Target configuration # ######################## @@ -3022,11 +3299,11 @@ config_devices_mak_list = [] config_devices_h = {} config_target_h = {} config_target_mak = {} +config_base_arch_mak = {} disassemblers = { 'alpha' : ['CONFIG_ALPHA_DIS'], 'avr' : ['CONFIG_AVR_DIS'], - 'cris' : ['CONFIG_CRIS_DIS'], 'hexagon' : ['CONFIG_HEXAGON_DIS'], 'hppa' : ['CONFIG_HPPA_DIS'], 'i386' : ['CONFIG_I386_DIS'], @@ -3053,6 +3330,8 @@ host_kconfig = \ (spice.found() ? ['CONFIG_SPICE=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ (opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \ + (libcbor.found() ? ['CONFIG_LIBCBOR=y'] : []) + \ + (gnutls.found() ? ['CONFIG_GNUTLS=y'] : []) + \ (x11.found() ? ['CONFIG_X11=y'] : []) + \ (fdt.found() ? ['CONFIG_FDT=y'] : []) + \ (have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \ @@ -3062,7 +3341,8 @@ host_kconfig = \ (host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ (multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \ (vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) + \ - (hv_balloon ? ['CONFIG_HV_BALLOON_POSSIBLE=y'] : []) + (hv_balloon ? ['CONFIG_HV_BALLOON_POSSIBLE=y'] : []) + \ + (have_rust ? ['CONFIG_HAVE_RUST=y'] : []) ignored = [ 'TARGET_XML_FILES', 'TARGET_ABI_DIR', 'TARGET_ARCH' ] @@ -3095,20 +3375,22 @@ foreach target : target_dirs config_target += { 'CONFIG_USER_ONLY': 'y', 'CONFIG_QEMU_INTERP_PREFIX': - get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']) + get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']), + 'CONFIG_QEMU_RTSIG_MAP': get_option('rtsig_map'), } endif + config_target += keyval.load('configs/targets' / target + '.mak') + target_kconfig = [] foreach sym: accelerators + # Disallow 64-bit on 32-bit emulation and virtualization + if host_long_bits < config_target['TARGET_LONG_BITS'].to_int() + continue + endif if sym == 'CONFIG_TCG' or target in accelerator_targets.get(sym, []) config_target += { sym: 'y' } config_all_accel += { sym: 'y' } - if target in modular_tcg - config_target += { 'CONFIG_TCG_MODULAR': 'y' } - else - config_target += { 'CONFIG_TCG_BUILTIN': 'y' } - endif target_kconfig += [ sym + '=y' ] endif endforeach @@ -3119,9 +3401,6 @@ foreach target : target_dirs error('No accelerator available for target @0@'.format(target)) endif - config_target += keyval.load('configs/targets' / target + '.mak') - config_target += { 'TARGET_' + config_target['TARGET_ARCH'].to_upper(): 'y' } - if 'TARGET_NEED_FDT' in config_target and not fdt.found() if default_targets warning('Disabling ' + target + ' due to missing libfdt') @@ -3134,6 +3413,7 @@ foreach target : target_dirs actual_target_dirs += target # Add default keys + config_target += { 'TARGET_' + config_target['TARGET_ARCH'].to_upper(): 'y' } if 'TARGET_BASE_ARCH' not in config_target config_target += {'TARGET_BASE_ARCH': config_target['TARGET_ARCH']} endif @@ -3181,6 +3461,12 @@ foreach target : target_dirs target_kconfig += 'CONFIG_' + config_target['TARGET_ARCH'].to_upper() + '=y' target_kconfig += 'CONFIG_TARGET_BIG_ENDIAN=' + config_target['TARGET_BIG_ENDIAN'] + # PVG is not cross-architecture. Use accelerator_targets as a proxy to + # figure out which target can support PVG on this host + if pvg.found() and target in accelerator_targets.get('CONFIG_HVF', []) + target_kconfig += 'CONFIG_MAC_PVG=y' + endif + config_input = meson.get_external_property(target, 'default') config_devices_mak = target + '-config-devices.mak' config_devices_mak = configure_file( @@ -3205,6 +3491,11 @@ foreach target : target_dirs config_all_devices += config_devices endif config_target_mak += {target: config_target} + + # build a merged config for all targets with the same TARGET_BASE_ARCH + target_base_arch = config_target['TARGET_BASE_ARCH'] + config_base_arch = config_base_arch_mak.get(target_base_arch, {}) + config_target + config_base_arch_mak += {target_base_arch: config_base_arch} endforeach target_dirs = actual_target_dirs @@ -3250,7 +3541,8 @@ endif # Generated sources # ##################### -genh += configure_file(output: 'config-host.h', configuration: config_host_data) +config_host_h = configure_file(output: 'config-host.h', configuration: config_host_data) +genh += config_host_h hxtool = find_program('scripts/hxtool') shaderinclude = find_program('scripts/shaderinclude.py') @@ -3268,6 +3560,7 @@ qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py', meson.current_source_dir() / 'scripts/qapi/schema.py', meson.current_source_dir() / 'scripts/qapi/source.py', meson.current_source_dir() / 'scripts/qapi/types.py', + meson.current_source_dir() / 'scripts/qapi/features.py', meson.current_source_dir() / 'scripts/qapi/visit.py', meson.current_source_dir() / 'scripts/qapi-gen.py' ] @@ -3359,6 +3652,7 @@ if have_block endif if have_system trace_events_subdirs += [ + 'accel/hvf', 'accel/kvm', 'audio', 'backends', @@ -3397,19 +3691,24 @@ if have_system 'hw/pci-host', 'hw/ppc', 'hw/rtc', + 'hw/riscv', 'hw/s390x', 'hw/scsi', 'hw/sd', + 'hw/sensor', 'hw/sh4', 'hw/sparc', 'hw/sparc64', 'hw/ssi', 'hw/timer', 'hw/tpm', + 'hw/uefi', 'hw/ufs', 'hw/usb', 'hw/vfio', + 'hw/vfio-user', 'hw/virtio', + 'hw/vmapple', 'hw/watchdog', 'hw/xen', 'hw/gpio', @@ -3456,6 +3755,7 @@ qom_ss = ss.source_set() system_ss = ss.source_set() specific_fuzz_ss = ss.source_set() specific_ss = ss.source_set() +rust_devices_ss = ss.source_set() stub_ss = ss.source_set() trace_ss = ss.source_set() user_ss = ss.source_set() @@ -3463,14 +3763,17 @@ util_ss = ss.source_set() # accel modules qtest_module_ss = ss.source_set() -tcg_module_ss = ss.source_set() modules = {} target_modules = {} +plugin_modules = [] hw_arch = {} target_arch = {} target_system_arch = {} target_user_arch = {} +hw_common_arch = {} +target_common_arch = {} +target_common_system_arch = {} # NOTE: the trace/ subdirectory needs the qapi_trace_events variable # that is filled in by qapi/. @@ -3517,9 +3820,13 @@ libqemuutil = static_library('qemuutil', build_by_default: false, sources: util_ss.sources() + stub_ss.sources() + genh, dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc]) +qemuutil_deps = [event_loop_base] +if host_os != 'windows' + qemuutil_deps += [rt] +endif qemuutil = declare_dependency(link_with: libqemuutil, sources: genh + version_res, - dependencies: [event_loop_base]) + dependencies: qemuutil_deps) if have_system or have_user decodetree = generator(find_program('scripts/decodetree.py'), @@ -3561,6 +3868,8 @@ if have_block # os-win32.c does not if host_os == 'windows' system_ss.add(files('os-win32.c')) + elif host_os == 'emscripten' + blockdev_ss.add(files('os-wasm.c')) else blockdev_ss.add(files('os-posix.c')) endif @@ -3591,6 +3900,9 @@ endif common_ss.add(pagevary) specific_ss.add(files('page-target.c', 'page-vary-target.c')) +common_ss.add(files('target-info.c')) +specific_ss.add(files('target-info-stub.c')) + subdir('backends') subdir('disas') subdir('migration') @@ -3605,6 +3917,10 @@ subdir('accel') subdir('plugins') subdir('ebpf') +if 'CONFIG_TCG' in config_all_accel + subdir('contrib/plugins') +endif + common_user_inc = [] subdir('common-user') @@ -3616,11 +3932,7 @@ subdir('tests/qtest/libqos') subdir('tests/qtest/fuzz') # accel modules -tcg_real_module_ss = ss.source_set() -tcg_real_module_ss.add_all(when: 'CONFIG_TCG_MODULAR', if_true: tcg_module_ss) -specific_ss.add_all(when: 'CONFIG_TCG_BUILTIN', if_true: tcg_module_ss) -target_modules += { 'accel' : { 'qtest': qtest_module_ss, - 'tcg': tcg_real_module_ss }} +target_modules += { 'accel' : { 'qtest': qtest_module_ss }} ############################################## # Internal static_libraries and dependencies # @@ -3656,16 +3968,11 @@ foreach d, list : modules install: true, install_dir: qemu_moddir) if module_ss.sources() != [] - # FIXME: Should use sl.extract_all_objects(recursive: true) as - # input. Sources can be used multiple times but objects are - # unique when it comes to lookup in compile_commands.json. - # Depnds on a mesion version with - # https://github.com/mesonbuild/meson/pull/8900 modinfo_files += custom_target(d + '-' + m + '.modinfo', output: d + '-' + m + '.modinfo', - input: module_ss.sources() + genh, + input: sl.extract_all_objects(recursive: true), capture: true, - command: [modinfo_collect, module_ss.sources()]) + command: [modinfo_collect, '@INPUT@']) endif else if d == 'block' @@ -3704,12 +4011,11 @@ foreach d, list : target_modules dependencies: target_module_ss.dependencies(), install: true, install_dir: qemu_moddir) - # FIXME: Should use sl.extract_all_objects(recursive: true) too. modinfo_files += custom_target(module_name + '.modinfo', output: module_name + '.modinfo', - input: target_module_ss.sources() + genh, + input: sl.extract_all_objects(recursive: true), capture: true, - command: [modinfo_collect, '--target', target, target_module_ss.sources()]) + command: [modinfo_collect, '--target', target, '@INPUT@']) endif endif endforeach @@ -3814,7 +4120,7 @@ libchardev = static_library('chardev', chardev_ss.sources() + genh, build_by_default: false) chardev = declare_dependency(objects: libchardev.extract_all_objects(recursive: false), - dependencies: chardev_ss.dependencies()) + dependencies: [chardev_ss.dependencies(), io]) hwcore_ss = hwcore_ss.apply({}) libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh, @@ -3829,8 +4135,20 @@ common_ss.add(hwcore) system_ss.add(authz, blockdev, chardev, crypto, io, qmp) common_ss.add(qom, qemuutil) -common_ss.add_all(when: 'CONFIG_SYSTEM_ONLY', if_true: [system_ss]) -common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss) +libuser = static_library('user', + user_ss.all_sources() + genh, + c_args: ['-DCONFIG_USER_ONLY', + '-DCOMPILING_SYSTEM_VS_USER'], + include_directories: common_user_inc, + dependencies: user_ss.all_dependencies(), + build_by_default: false) + +libsystem = static_library('system', + system_ss.all_sources() + genh, + c_args: ['-DCONFIG_SOFTMMU', + '-DCOMPILING_SYSTEM_VS_USER'], + dependencies: system_ss.all_dependencies(), + build_by_default: false) # Note that this library is never used directly (only through extract_objects) # and is not built by default; therefore, source files not used by the build @@ -3838,11 +4156,114 @@ common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss) common_all = static_library('common', build_by_default: false, sources: common_ss.all_sources() + genh, - include_directories: common_user_inc, implicit_include_directories: false, dependencies: common_ss.all_dependencies()) +# construct common libraries per base architecture +target_common_arch_libs = {} +target_common_system_arch_libs = {} +foreach target_base_arch, config_base_arch : config_base_arch_mak + target_inc = [include_directories('target' / target_base_arch)] + inc = [common_user_inc + target_inc] + + target_common = common_ss.apply(config_base_arch, strict: false) + target_system = system_ss.apply(config_base_arch, strict: false) + target_user = user_ss.apply(config_base_arch, strict: false) + common_deps = [] + system_deps = [] + user_deps = [] + foreach dep: target_common.dependencies() + common_deps += dep.partial_dependency(compile_args: true, includes: true) + endforeach + foreach dep: target_system.dependencies() + system_deps += dep.partial_dependency(compile_args: true, includes: true) + endforeach + foreach dep: target_user.dependencies() + user_deps += dep.partial_dependency(compile_args: true, includes: true) + endforeach + + # prevent common code to access cpu compile time definition, + # but still allow access to cpu.h + target_c_args = ['-DCPU_DEFS_H'] + target_system_c_args = target_c_args + ['-DCOMPILING_SYSTEM_VS_USER', '-DCONFIG_SOFTMMU'] + + if target_base_arch in target_common_arch + src = target_common_arch[target_base_arch] + lib = static_library( + 'common_' + target_base_arch, + build_by_default: false, + sources: src.all_sources() + genh, + include_directories: inc, + c_args: target_c_args, + dependencies: src.all_dependencies() + common_deps + + system_deps + user_deps) + target_common_arch_libs += {target_base_arch: lib} + endif + + # merge hw_common_arch in target_common_system_arch + if target_base_arch in hw_common_arch + hw_src = hw_common_arch[target_base_arch] + if target_base_arch in target_common_system_arch + target_common_system_arch[target_base_arch].add_all(hw_src) + else + target_common_system_arch += {target_base_arch: hw_src} + endif + endif + + if target_base_arch in target_common_system_arch + src = target_common_system_arch[target_base_arch] + lib = static_library( + 'system_' + target_base_arch, + build_by_default: false, + sources: src.all_sources() + genh, + include_directories: inc, + c_args: target_system_c_args, + dependencies: src.all_dependencies() + common_deps + system_deps) + target_common_system_arch_libs += {target_base_arch: lib} + endif +endforeach + +if have_rust + bindings_incdir = include_directories('.', 'include') + # We would like to use --generate-cstr, but it is only available + # starting with bindgen 0.66.0. The oldest supported versions + # is 0.60.x (Debian 12 has 0.60.1) which introduces --allowlist-file. + bindgen_args_common = [ + '--disable-header-comment', + '--raw-line', '// @generated', + '--ctypes-prefix', 'std::os::raw', + '--generate-block', + '--impl-debug', + '--no-doc-comments', + '--with-derive-default', + '--no-layout-tests', + '--no-prepend-enum-name', + '--allowlist-file', meson.project_source_root() + '/include/.*', + '--allowlist-file', meson.project_source_root() + '/.*', + '--allowlist-file', meson.project_build_root() + '/.*' + ] + if not rustfmt.found() + if bindgen.version().version_compare('<0.65.0') + bindgen_args_common += ['--no-rustfmt-bindings'] + else + bindgen_args_common += ['--formatter', 'none'] + endif + endif + if bindgen.version().version_compare('>=0.66.0') + bindgen_args_common += ['--rust-target', '1.59'] + endif + if bindgen.version().version_compare('<0.61.0') + # default in 0.61+ + bindgen_args_common += ['--size_t-is-usize'] + else + bindgen_args_common += ['--merge-extern-blocks'] + endif + subdir('rust') +endif + + feature_to_c = find_program('scripts/feature_to_c.py') +rust_root_crate = find_program('scripts/rust/rust_root_crate.sh') if host_os == 'darwin' entitlement = find_program('scripts/entitlement.sh') @@ -3858,7 +4279,7 @@ foreach target : target_dirs arch_deps = [] c_args = ['-DCOMPILING_PER_TARGET', '-DCONFIG_TARGET="@0@-config-target.h"'.format(target), - '-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)] + ] link_args = emulator_link_args target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])] @@ -3878,6 +4299,7 @@ foreach target : target_dirs arch_deps += hw.dependencies() endif + c_args += ['-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)] arch_srcs += config_devices_h[target] link_args += ['@block.syms', '@qemu.syms'] else @@ -3928,13 +4350,57 @@ foreach target : target_dirs arch_deps += t.dependencies() target_common = common_ss.apply(config_target, strict: false) - objects = common_all.extract_objects(target_common.sources()) + objects = [common_all.extract_objects(target_common.sources())] arch_deps += target_common.dependencies() + if target_base_arch in target_common_arch_libs + src = target_common_arch[target_base_arch].apply(config_target, strict: false) + lib = target_common_arch_libs[target_base_arch] + objects += lib.extract_objects(src.sources()) + arch_deps += src.dependencies() + endif + if target_type == 'system' + src = system_ss.apply(config_target, strict: false) + objects += libsystem.extract_objects(src.sources()) + arch_deps += src.dependencies() + endif + if target_type == 'user' + src = user_ss.apply(config_target, strict: false) + objects += libuser.extract_objects(src.sources()) + arch_deps += src.dependencies() + endif + if target_type == 'system' and target_base_arch in target_common_system_arch_libs + src = target_common_system_arch[target_base_arch].apply(config_target, strict: false) + lib = target_common_system_arch_libs[target_base_arch] + objects += lib.extract_objects(src.sources()) + arch_deps += src.dependencies() + endif target_specific = specific_ss.apply(config_target, strict: false) arch_srcs += target_specific.sources() arch_deps += target_specific.dependencies() + if have_rust and target_type == 'system' + target_rust = rust_devices_ss.apply(config_target, strict: false) + crates = [] + foreach dep : target_rust.dependencies() + crates += dep.get_variable('crate') + endforeach + if crates.length() > 0 + rlib_rs = custom_target('rust_' + target.underscorify() + '.rs', + output: 'rust_' + target.underscorify() + '.rs', + command: [rust_root_crate, crates], + capture: true, + build_by_default: true, + build_always_stale: true) + rlib = static_library('rust_' + target.underscorify(), + structured_sources([], {'.': rlib_rs}), + dependencies: target_rust.dependencies(), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'c') + arch_deps += declare_dependency(link_whole: [rlib]) + endif + endif + # allow using headers from the dependencies but do not include the sources, # because this emulator only needs those in "objects". For external # dependencies, the full dependency is included below in the executable. @@ -3956,14 +4422,14 @@ foreach target : target_dirs 'name': 'qemu-system-' + target_name, 'win_subsystem': 'console', 'sources': files('system/main.c'), - 'dependencies': [] + 'dependencies': [sdl] }] if host_os == 'windows' and (sdl.found() or gtk.found()) execs += [{ 'name': 'qemu-system-' + target_name + 'w', 'win_subsystem': 'windows', 'sources': files('system/main.c'), - 'dependencies': [] + 'dependencies': [sdl] }] endif if get_option('fuzzing') @@ -4077,7 +4543,7 @@ if have_tools subdir('contrib/elf2dmp') executable('qemu-edid', files('qemu-edid.c', 'hw/display/edid-generate.c'), - dependencies: qemuutil, + dependencies: [qemuutil, rt], install: true) if have_vhost_user @@ -4142,7 +4608,11 @@ subdir('scripts') subdir('tools') subdir('pc-bios') subdir('docs') -subdir('tests') +# Tests are disabled on emscripten because they rely on host features that aren't +# supported by emscripten (e.g. fork and unix socket). +if host_os != 'emscripten' + subdir('tests') +endif if gtk.found() subdir('po') endif @@ -4245,7 +4715,6 @@ summary_info += {'Trace backends': ','.join(get_option('trace_backends'))} if 'simple' in get_option('trace_backends') summary_info += {'Trace output file': get_option('trace_file') + '-'} endif -summary_info += {'D-Bus display': dbus_display} summary_info += {'QOM debugging': get_option('qom_cast_debug')} summary_info += {'Relocatable install': get_option('relocatable')} summary_info += {'vhost-kernel support': have_vhost_kernel} @@ -4273,6 +4742,15 @@ if 'objc' in all_languages else summary_info += {'Objective-C compiler': false} endif +summary_info += {'Rust support': have_rust} +if have_rust + summary_info += {'Rust target': config_host['RUST_TARGET_TRIPLE']} + summary_info += {'rustc': ' '.join(rustc.cmd_array())} + summary_info += {'rustc version': rustc.version()} + summary_info += {'rustdoc': rustdoc} + summary_info += {'bindgen': bindgen.full_path()} + summary_info += {'bindgen version': bindgen.version()} +endif option_cflags = (get_option('debug') ? ['-g'] : []) if get_option('optimization') != 'plain' option_cflags += ['-O' + get_option('optimization')] @@ -4378,7 +4856,6 @@ if have_block summary_info += {'Block whitelist (ro)': get_option('block_drv_ro_whitelist')} summary_info += {'Use block whitelist in tools': get_option('block_drv_whitelist_in_tools')} summary_info += {'VirtFS (9P) support': have_virtfs} - summary_info += {'VirtFS (9P) Proxy Helper support (deprecated)': have_virtfs_proxy_helper} summary_info += {'replication support': config_host_data.get('CONFIG_REPLICATION')} summary_info += {'bochs support': get_option('bochs').allowed()} summary_info += {'cloop support': get_option('cloop').allowed()} @@ -4402,6 +4879,7 @@ summary_info += {'TLS priority': get_option('tls_priority')} summary_info += {'GNUTLS support': gnutls} if gnutls.found() summary_info += {' GNUTLS crypto': gnutls_crypto.found()} + summary_info += {' GNUTLS bug 1717 workaround': gnutls_bug1717_workaround } endif summary_info += {'libgcrypt': gcrypt} summary_info += {'nettle': nettle} @@ -4409,6 +4887,7 @@ if nettle.found() summary_info += {' XTS': xts != 'private'} endif summary_info += {'SM4 ALG support': crypto_sm4} +summary_info += {'SM3 ALG support': crypto_sm3} summary_info += {'AF_ALG support': have_afalg} summary_info += {'rng-none': get_option('rng_none')} summary_info += {'Linux keyring': have_keyring} @@ -4420,6 +4899,7 @@ summary_info = {} if host_os == 'darwin' summary_info += {'Cocoa support': cocoa} endif +summary_info += {'D-Bus display': dbus_display} summary_info += {'SDL support': sdl} summary_info += {'SDL image support': sdl_image} summary_info += {'GTK support': gtk} @@ -4469,6 +4949,7 @@ if host_os == 'darwin' summary_info += {'vmnet.framework support': vmnet} endif summary_info += {'AF_XDP support': libxdp} +summary_info += {'passt support': enable_passt} summary_info += {'slirp support': slirp} summary_info += {'vde support': vde} summary_info += {'netmap support': have_netmap} @@ -4508,6 +4989,7 @@ summary_info += {'seccomp support': seccomp} summary_info += {'GlusterFS support': glusterfs} summary_info += {'hv-balloon support': hv_balloon} summary_info += {'TPM support': have_tpm} +summary_info += {'IGVM support': igvm} summary_info += {'libssh support': libssh} summary_info += {'lzo support': lzo} summary_info += {'snappy support': snappy} @@ -4516,10 +4998,12 @@ summary_info += {'lzfse support': liblzfse} summary_info += {'zstd support': zstd} summary_info += {'Query Processing Library support': qpl} summary_info += {'UADK Library support': uadk} +summary_info += {'qatzip support': qatzip} summary_info += {'NUMA host support': numa} summary_info += {'capstone': capstone} summary_info += {'libpmem support': libpmem} summary_info += {'libdaxctl support': libdaxctl} +summary_info += {'libcbor support': libcbor} summary_info += {'libudev': libudev} # Dummy dependency, keep .found() summary_info += {'FUSE lseek': fuse_lseek.found()} @@ -4528,6 +5012,10 @@ summary_info += {'libdw': libdw} if host_os == 'freebsd' summary_info += {'libinotify-kqueue': inotify} endif +if host_os == 'darwin' + summary_info += {'ParavirtualizedGraphics support': pvg} +endif +summary_info += {'valgrind': valgrind} summary(summary_info, bool_yn: true, section: 'Dependencies') if host_arch == 'unknown' @@ -4539,11 +5027,17 @@ if host_arch == 'unknown' message('compile or work on this host CPU. You can help by volunteering') message('to maintain it and providing a build host for our continuous') message('integration setup.') - if get_option('tcg').allowed() and target_dirs.length() > 0 + if have_tcg message() message('configure has succeeded and you can continue to build, but') message('QEMU will use a slow interpreter to emulate the target CPU.') endif +elif host_long_bits < 64 + message() + warning('DEPRECATED HOST CPU') + message() + message('Support for 32-bit CPU host architecture ' + cpu + ' is going') + message('to be dropped in a future QEMU release.') endif if not supported_oses.contains(host_os) diff --git a/meson_options.txt b/meson_options.txt index 0269fa0f16e..fff1521e580 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -27,12 +27,14 @@ option('block_drv_ro_whitelist', type : 'string', value : '', description: 'set block driver read-only whitelist (by default affects only QEMU, not tools like qemu-img)') option('interp_prefix', type : 'string', value : '/usr/gnemul/qemu-%M', description: 'where to find shared libraries etc., use %M for cpu name') +option('rtsig_map', type : 'string', value : 'NULL', + description: 'default value of QEMU_RTSIG_MAP') option('fuzzing_engine', type : 'string', value : '', description: 'fuzzing engine library for OSS-Fuzz') option('trace_file', type: 'string', value: 'trace', description: 'Trace file prefix for simple backend') option('coroutine_backend', type: 'combo', - choices: ['ucontext', 'sigaltstack', 'windows', 'auto'], + choices: ['ucontext', 'sigaltstack', 'windows', 'wasm', 'auto'], value: 'auto', description: 'coroutine backend to use') # Everything else can be set via --enable/--disable-* option @@ -91,8 +93,10 @@ option('tcg_interpreter', type: 'boolean', value: false, description: 'TCG with bytecode interpreter (slow)') option('safe_stack', type: 'boolean', value: false, description: 'SafeStack Stack Smash Protection (requires clang/llvm and coroutine backend ucontext)') -option('sanitizers', type: 'boolean', value: false, - description: 'enable default sanitizers') +option('asan', type: 'boolean', value: false, + description: 'enable address sanitizer') +option('ubsan', type: 'boolean', value: false, + description: 'enable undefined behaviour sanitizer') option('tsan', type: 'boolean', value: false, description: 'enable thread sanitizer') option('stack_protector', type: 'feature', value: 'auto', @@ -111,16 +115,16 @@ option('dbus_display', type: 'feature', value: 'auto', description: '-display dbus support') option('tpm', type : 'feature', value : 'auto', description: 'TPM support') +option('valgrind', type : 'feature', value: 'auto', + description: 'valgrind debug support for coroutine stacks') +option('igvm', type: 'feature', value: 'auto', + description: 'Independent Guest Virtual Machine (IGVM) file support') # Do not enable it by default even for Mingw32, because it doesn't # work on Wine. option('membarrier', type: 'feature', value: 'disabled', description: 'membarrier system call (for Linux 4.14+ or Windows') -option('avx2', type: 'feature', value: 'auto', - description: 'AVX2 optimizations') -option('avx512bw', type: 'feature', value: 'auto', - description: 'AVX512BW optimizations') option('keyring', type: 'feature', value: 'auto', description: 'Linux keyring support') option('libkeyutils', type: 'feature', value: 'auto', @@ -166,6 +170,8 @@ option('iconv', type : 'feature', value : 'auto', description: 'Font glyph conversion support') option('curses', type : 'feature', value : 'auto', description: 'curses UI') +option('libcbor', type : 'feature', value : 'auto', + description: 'libcbor support') option('gnutls', type : 'feature', value : 'auto', description: 'GNUTLS cryptography support') option('nettle', type : 'feature', value : 'auto', @@ -192,6 +198,8 @@ option('lzfse', type : 'feature', value : 'auto', description: 'lzfse support for DMG images') option('lzo', type : 'feature', value : 'auto', description: 'lzo compression support') +option('pvg', type: 'feature', value: 'auto', + description: 'macOS paravirtualized graphics support') option('rbd', type : 'feature', value : 'auto', description: 'Ceph block device driver') option('opengl', type : 'feature', value : 'auto', @@ -228,6 +236,8 @@ option('pixman', type : 'feature', value : 'auto', description: 'pixman support') option('slirp', type: 'feature', value: 'auto', description: 'libslirp user mode network backend support') +option('passt', type: 'feature', value: 'auto', + description: 'passt network backend support') option('vde', type : 'feature', value : 'auto', description: 'vde network backend support') option('vmnet', type : 'feature', value : 'auto', @@ -261,6 +271,8 @@ option('qpl', type : 'feature', value : 'auto', description: 'Query Processing Library support') option('uadk', type : 'feature', value : 'auto', description: 'UADK Library support') +option('qatzip', type: 'feature', value: 'auto', + description: 'QATzip compression support') option('fuse', type: 'feature', value: 'auto', description: 'FUSE block device export') option('fuse_lseek', type : 'feature', value : 'auto', @@ -301,8 +313,6 @@ option('vhost_user_blk_server', type: 'feature', value: 'auto', description: 'build vhost-user-blk server') option('virtfs', type: 'feature', value: 'auto', description: 'virtio-9p support') -option('virtfs_proxy_helper', type: 'feature', value: 'auto', - description: 'virtio-9p proxy helper support') option('libvduse', type: 'feature', value: 'auto', description: 'build VDUSE Library') option('vduse_blk_export', type: 'feature', value: 'auto', @@ -371,3 +381,8 @@ option('hexagon_idef_parser', type : 'boolean', value : true, option('x86_version', type : 'combo', choices : ['0', '1', '2', '3', '4'], value: '1', description: 'tweak required x86_64 architecture version beyond compiler default') + +option('rust', type: 'feature', value: 'disabled', + description: 'Rust support') +option('strict_rust_lints', type: 'boolean', value: false, + description: 'Enable stricter set of Rust warnings') diff --git a/migration/block-active.c b/migration/block-active.c new file mode 100644 index 00000000000..40e986aadea --- /dev/null +++ b/migration/block-active.c @@ -0,0 +1,48 @@ +/* + * Block activation tracking for migration purpose + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Copyright (C) 2024 Red Hat, Inc. + */ +#include "qemu/osdep.h" +#include "block/block.h" +#include "qapi/error.h" +#include "migration/migration.h" +#include "qemu/error-report.h" +#include "trace.h" + +bool migration_block_activate(Error **errp) +{ + ERRP_GUARD(); + + assert(bql_locked()); + + trace_migration_block_activation("active"); + + bdrv_activate_all(errp); + if (*errp) { + error_report_err(error_copy(*errp)); + return false; + } + + return true; +} + +bool migration_block_inactivate(void) +{ + int ret; + + assert(bql_locked()); + + trace_migration_block_activation("inactive"); + + ret = bdrv_inactivate_all(); + if (ret) { + error_report("%s: bdrv_inactivate_all() failed: %d", + __func__, ret); + return false; + } + + return true; +} diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index a7d55048c23..a061aad8177 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -62,8 +62,8 @@ #include "block/block.h" #include "block/block_int.h" #include "block/dirty-bitmap.h" -#include "sysemu/block-backend.h" -#include "sysemu/runstate.h" +#include "system/block-backend.h" +#include "system/runstate.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" #include "migration/misc.h" @@ -1248,8 +1248,7 @@ static bool dirty_bitmap_has_postcopy(void *opaque) static SaveVMHandlers savevm_dirty_bitmap_handlers = { .save_setup = dirty_bitmap_save_setup, - .save_live_complete_postcopy = dirty_bitmap_save_complete, - .save_live_complete_precopy = dirty_bitmap_save_complete, + .save_complete = dirty_bitmap_save_complete, .has_postcopy = dirty_bitmap_has_postcopy, .state_pending_exact = dirty_bitmap_state_pending, .state_pending_estimate = dirty_bitmap_state_pending, diff --git a/migration/channel-block.c b/migration/channel-block.c index fff8d870942..97de5a691b5 100644 --- a/migration/channel-block.c +++ b/migration/channel-block.c @@ -123,7 +123,7 @@ qio_channel_block_seek(QIOChannel *ioc, bioc->offset = offset; break; case SEEK_CUR: - bioc->offset += whence; + bioc->offset += offset; break; case SEEK_END: error_setg(errp, "Size of VMstate region is unknown"); @@ -170,7 +170,7 @@ qio_channel_block_set_aio_fd_handler(QIOChannel *ioc, static void qio_channel_block_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); diff --git a/migration/channel.c b/migration/channel.c index f9de064f3b1..a547b1fbfeb 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -33,6 +33,7 @@ void migration_channel_process_incoming(QIOChannel *ioc) { MigrationState *s = migrate_get_current(); + MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; trace_migration_set_incoming_channel( @@ -47,6 +48,10 @@ void migration_channel_process_incoming(QIOChannel *ioc) if (local_err) { error_report_err(local_err); + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + if (mis->exit_on_error) { + exit(EXIT_FAILURE); + } } } @@ -74,7 +79,7 @@ void migration_channel_connect(MigrationState *s, if (!error) { /* tls_channel_connect will call back to this * function after the TLS handshake, - * so we mustn't call migrate_fd_connect until then + * so we mustn't call migration_connect until then */ return; @@ -89,7 +94,7 @@ void migration_channel_connect(MigrationState *s, qemu_mutex_unlock(&s->qemu_file_lock); } } - migrate_fd_connect(s, error); + migration_connect(s, error); error_free(error); } diff --git a/migration/colo.c b/migration/colo.c index 64494902211..e0f713c837f 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "qapi/qapi-commands-migration.h" #include "migration.h" @@ -30,8 +30,8 @@ #include "net/colo.h" #include "block/block.h" #include "qapi/qapi-events-migration.h" -#include "sysemu/cpus.h" -#include "sysemu/runstate.h" +#include "system/cpus.h" +#include "system/runstate.h" #include "net/filter.h" #include "options.h" @@ -146,7 +146,7 @@ static void secondary_vm_do_failover(void) return; } /* Notify COLO incoming thread that failover work is finished */ - qemu_sem_post(&mis->colo_incoming_sem); + qemu_event_set(&mis->colo_incoming_event); /* For Secondary VM, jump to incoming co */ if (mis->colo_incoming_co) { @@ -195,7 +195,7 @@ static void primary_vm_do_failover(void) } /* Notify COLO thread that failover work is finished */ - qemu_sem_post(&s->colo_exit_sem); + qemu_event_set(&s->colo_exit_event); } COLOMode get_colo_mode(void) @@ -452,6 +452,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s, bql_unlock(); goto out; } + + qemu_savevm_maybe_send_switchover_start(s->to_dst_file); + /* Note: device state is saved into buffer */ ret = qemu_save_device_state(fb); @@ -617,8 +620,8 @@ static void colo_process_checkpoint(MigrationState *s) } /* Hope this not to be too long to wait here */ - qemu_sem_wait(&s->colo_exit_sem); - qemu_sem_destroy(&s->colo_exit_sem); + qemu_event_wait(&s->colo_exit_event); + qemu_event_destroy(&s->colo_exit_event); /* * It is safe to unregister notifier after failover finished. @@ -648,7 +651,7 @@ void migrate_start_colo_process(MigrationState *s) s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST, colo_checkpoint_notify_timer, NULL); - qemu_sem_init(&s->colo_exit_sem, 0); + qemu_event_init(&s->colo_exit_event, false); colo_process_checkpoint(s); bql_lock(); } @@ -805,11 +808,11 @@ void colo_shutdown(void) case COLO_MODE_PRIMARY: s = migrate_get_current(); qemu_event_set(&s->colo_checkpoint_event); - qemu_sem_post(&s->colo_exit_sem); + qemu_event_set(&s->colo_exit_event); break; case COLO_MODE_SECONDARY: mis = migration_incoming_get_current(); - qemu_sem_post(&mis->colo_incoming_sem); + qemu_event_set(&mis->colo_incoming_event); break; default: break; @@ -824,7 +827,7 @@ static void *colo_process_incoming_thread(void *opaque) Error *local_err = NULL; rcu_register_thread(); - qemu_sem_init(&mis->colo_incoming_sem, 0); + qemu_event_init(&mis->colo_incoming_event, false); migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); @@ -836,7 +839,7 @@ static void *colo_process_incoming_thread(void *opaque) /* Make sure all file formats throw away their mutable metadata */ bql_lock(); - bdrv_activate_all(&local_err); + migration_block_activate(&local_err); bql_unlock(); if (local_err) { error_report_err(local_err); @@ -920,8 +923,8 @@ static void *colo_process_incoming_thread(void *opaque) } /* Hope this not to be too long to loop here */ - qemu_sem_wait(&mis->colo_incoming_sem); - qemu_sem_destroy(&mis->colo_incoming_sem); + qemu_event_wait(&mis->colo_incoming_event); + qemu_event_destroy(&mis->colo_incoming_event); rcu_unregister_thread(); return NULL; @@ -935,7 +938,8 @@ void coroutine_fn colo_incoming_co(void) assert(bql_locked()); assert(migration_incoming_colo_enabled()); - qemu_thread_create(&th, "mig/dst/colo", colo_process_incoming_thread, + qemu_thread_create(&th, MIGRATION_THREAD_DST_COLO, + colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); mis->colo_incoming_co = qemu_coroutine_self(); diff --git a/migration/cpr-transfer.c b/migration/cpr-transfer.c new file mode 100644 index 00000000000..00371d17c3a --- /dev/null +++ b/migration/cpr-transfer.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2022, 2024 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "io/channel-file.h" +#include "io/channel-socket.h" +#include "io/net-listener.h" +#include "migration/cpr.h" +#include "migration/migration.h" +#include "migration/savevm.h" +#include "migration/qemu-file.h" +#include "migration/vmstate.h" +#include "trace.h" + +QEMUFile *cpr_transfer_output(MigrationChannel *channel, Error **errp) +{ + MigrationAddress *addr = channel->addr; + + if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET && + addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX) { + + g_autoptr(QIOChannelSocket) sioc = qio_channel_socket_new(); + QIOChannel *ioc = QIO_CHANNEL(sioc); + SocketAddress *saddr = &addr->u.socket; + + if (qio_channel_socket_connect_sync(sioc, saddr, errp) < 0) { + return NULL; + } + trace_cpr_transfer_output(addr->u.socket.u.q_unix.path); + qio_channel_set_name(ioc, "cpr-out"); + return qemu_file_new_output(ioc); + + } else { + error_setg(errp, "bad cpr channel address; must be unix"); + return NULL; + } +} + +QEMUFile *cpr_transfer_input(MigrationChannel *channel, Error **errp) +{ + MigrationAddress *addr = channel->addr; + + if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET && + (addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX || + addr->u.socket.type == SOCKET_ADDRESS_TYPE_FD)) { + + g_autoptr(QIOChannelSocket) sioc = NULL; + SocketAddress *saddr = &addr->u.socket; + g_autoptr(QIONetListener) listener = qio_net_listener_new(); + QIOChannel *ioc; + + qio_net_listener_set_name(listener, "cpr-socket-listener"); + if (qio_net_listener_open_sync(listener, saddr, 1, errp) < 0) { + return NULL; + } + + sioc = qio_net_listener_wait_client(listener); + ioc = QIO_CHANNEL(sioc); + trace_cpr_transfer_input( + addr->u.socket.type == SOCKET_ADDRESS_TYPE_UNIX ? + addr->u.socket.u.q_unix.path : addr->u.socket.u.fd.str); + qio_channel_set_name(ioc, "cpr-in"); + return qemu_file_new_input(ioc); + + } else { + error_setg(errp, "bad cpr channel socket type; must be unix"); + return NULL; + } +} diff --git a/migration/cpr.c b/migration/cpr.c new file mode 100644 index 00000000000..42ad0b0d500 --- /dev/null +++ b/migration/cpr.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2021-2024 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/vfio/vfio-device.h" +#include "migration/cpr.h" +#include "migration/misc.h" +#include "migration/options.h" +#include "migration/qemu-file.h" +#include "migration/savevm.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "system/runstate.h" +#include "trace.h" + +/*************************************************************************/ +/* cpr state container for all information to be saved. */ + +CprState cpr_state; + +/****************************************************************************/ + +typedef struct CprFd { + char *name; + unsigned int namelen; + int id; + int fd; + QLIST_ENTRY(CprFd) next; +} CprFd; + +static const VMStateDescription vmstate_cpr_fd = { + .name = "cpr fd", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(namelen, CprFd), + VMSTATE_VBUFFER_ALLOC_UINT32(name, CprFd, 0, NULL, namelen), + VMSTATE_INT32(id, CprFd), + VMSTATE_FD(fd, CprFd), + VMSTATE_END_OF_LIST() + } +}; + +void cpr_save_fd(const char *name, int id, int fd) +{ + CprFd *elem = g_new0(CprFd, 1); + + trace_cpr_save_fd(name, id, fd); + elem->name = g_strdup(name); + elem->namelen = strlen(name) + 1; + elem->id = id; + elem->fd = fd; + QLIST_INSERT_HEAD(&cpr_state.fds, elem, next); +} + +static CprFd *find_fd(CprFdList *head, const char *name, int id) +{ + CprFd *elem; + + QLIST_FOREACH(elem, head, next) { + if (!strcmp(elem->name, name) && elem->id == id) { + return elem; + } + } + return NULL; +} + +void cpr_delete_fd(const char *name, int id) +{ + CprFd *elem = find_fd(&cpr_state.fds, name, id); + + if (elem) { + QLIST_REMOVE(elem, next); + g_free(elem->name); + g_free(elem); + } + + trace_cpr_delete_fd(name, id); +} + +int cpr_find_fd(const char *name, int id) +{ + CprFd *elem = find_fd(&cpr_state.fds, name, id); + int fd = elem ? elem->fd : -1; + + trace_cpr_find_fd(name, id, fd); + return fd; +} + +void cpr_resave_fd(const char *name, int id, int fd) +{ + CprFd *elem = find_fd(&cpr_state.fds, name, id); + int old_fd = elem ? elem->fd : -1; + + if (old_fd < 0) { + cpr_save_fd(name, id, fd); + } else if (old_fd != fd) { + error_setg(&error_fatal, + "internal error: cpr fd '%s' id %d value %d " + "already saved with a different value %d", + name, id, fd, old_fd); + } +} + +int cpr_open_fd(const char *path, int flags, const char *name, int id, + Error **errp) +{ + int fd = cpr_find_fd(name, id); + + if (fd < 0) { + fd = qemu_open(path, flags, errp); + if (fd >= 0) { + cpr_save_fd(name, id, fd); + } + } + return fd; +} + +/*************************************************************************/ +static const VMStateDescription vmstate_cpr_state = { + .name = CPR_STATE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_QLIST_V(fds, CprState, 1, vmstate_cpr_fd, CprFd, next), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_cpr_vfio_devices, + NULL + } +}; +/*************************************************************************/ + +static QEMUFile *cpr_state_file; + +QIOChannel *cpr_state_ioc(void) +{ + return qemu_file_get_ioc(cpr_state_file); +} + +static MigMode incoming_mode = MIG_MODE_NONE; + +MigMode cpr_get_incoming_mode(void) +{ + return incoming_mode; +} + +void cpr_set_incoming_mode(MigMode mode) +{ + incoming_mode = mode; +} + +bool cpr_is_incoming(void) +{ + return incoming_mode != MIG_MODE_NONE; +} + +int cpr_state_save(MigrationChannel *channel, Error **errp) +{ + int ret; + QEMUFile *f; + MigMode mode = migrate_mode(); + + trace_cpr_state_save(MigMode_str(mode)); + + if (mode == MIG_MODE_CPR_TRANSFER) { + g_assert(channel); + f = cpr_transfer_output(channel, errp); + } else { + return 0; + } + if (!f) { + return -1; + } + + qemu_put_be32(f, QEMU_CPR_FILE_MAGIC); + qemu_put_be32(f, QEMU_CPR_FILE_VERSION); + + ret = vmstate_save_state(f, &vmstate_cpr_state, &cpr_state, 0); + if (ret) { + error_setg(errp, "vmstate_save_state error %d", ret); + qemu_fclose(f); + return ret; + } + + /* + * Close the socket only partially so we can later detect when the other + * end closes by getting a HUP event. + */ + qemu_fflush(f); + qio_channel_shutdown(qemu_file_get_ioc(f), QIO_CHANNEL_SHUTDOWN_WRITE, + NULL); + cpr_state_file = f; + return 0; +} + +int cpr_state_load(MigrationChannel *channel, Error **errp) +{ + int ret; + uint32_t v; + QEMUFile *f; + MigMode mode = 0; + + if (channel) { + mode = MIG_MODE_CPR_TRANSFER; + cpr_set_incoming_mode(mode); + f = cpr_transfer_input(channel, errp); + } else { + return 0; + } + if (!f) { + return -1; + } + + trace_cpr_state_load(MigMode_str(mode)); + + v = qemu_get_be32(f); + if (v != QEMU_CPR_FILE_MAGIC) { + error_setg(errp, "Not a migration stream (bad magic %x)", v); + qemu_fclose(f); + return -EINVAL; + } + v = qemu_get_be32(f); + if (v != QEMU_CPR_FILE_VERSION) { + error_setg(errp, "Unsupported migration stream version %d", v); + qemu_fclose(f); + return -ENOTSUP; + } + + ret = vmstate_load_state(f, &vmstate_cpr_state, &cpr_state, 1); + if (ret) { + error_setg(errp, "vmstate_load_state error %d", ret); + qemu_fclose(f); + return ret; + } + + /* + * Let the caller decide when to close the socket (and generate a HUP event + * for the sending side). + */ + cpr_state_file = f; + + return ret; +} + +void cpr_state_close(void) +{ + if (cpr_state_file) { + qemu_fclose(cpr_state_file); + cpr_state_file = NULL; + } +} + +bool cpr_incoming_needed(void *opaque) +{ + MigMode mode = migrate_mode(); + return mode == MIG_MODE_CPR_TRANSFER; +} + +/* + * cpr_get_fd_param: find a descriptor and return its value. + * + * @name: CPR name for the descriptor + * @fdname: An integer-valued string, or a name passed to a getfd command + * @index: CPR index of the descriptor + * @errp: returned error message + * + * If CPR is not being performed, then use @fdname to find the fd. + * If CPR is being performed, then ignore @fdname, and look for @name + * and @index in CPR state. + * + * On success returns the fd value, else returns -1. + */ +int cpr_get_fd_param(const char *name, const char *fdname, int index, + Error **errp) +{ + ERRP_GUARD(); + int fd; + + if (cpr_is_incoming()) { + fd = cpr_find_fd(name, index); + if (fd < 0) { + error_setg(errp, "cannot find saved value for fd %s", fdname); + } + } else { + fd = monitor_fd_param(monitor_cur(), fdname, errp); + if (fd >= 0) { + cpr_save_fd(name, index, fd); + } else { + error_prepend(errp, "Could not parse object fd %s:", fdname); + } + } + return fd; +} diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c new file mode 100644 index 00000000000..0642e6bdeaf --- /dev/null +++ b/migration/cpu-throttle.c @@ -0,0 +1,199 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "hw/core/cpu.h" +#include "qemu/main-loop.h" +#include "system/cpus.h" +#include "system/cpu-throttle.h" +#include "migration.h" +#include "migration-stats.h" +#include "trace.h" + +/* vcpu throttling controls */ +static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer; +static unsigned int throttle_percentage; +static bool throttle_dirty_sync_timer_active; +static uint64_t throttle_dirty_sync_count_prev; + +#define CPU_THROTTLE_PCT_MIN 1 +#define CPU_THROTTLE_PCT_MAX 99 +#define CPU_THROTTLE_TIMESLICE_NS 10000000 + +/* Making sure RAMBlock dirty bitmap is synchronized every five seconds */ +#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000 + +static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) +{ + double pct; + double throttle_ratio; + int64_t sleeptime_ns, endtime_ns; + + if (!cpu_throttle_get_percentage()) { + return; + } + + pct = (double)cpu_throttle_get_percentage() / 100; + throttle_ratio = pct / (1 - pct); + /* Add 1ns to fix double's rounding error (like 0.9999999...) */ + sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1); + endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; + while (sleeptime_ns > 0 && !cpu->stop) { + if (sleeptime_ns > SCALE_MS) { + qemu_cond_timedwait_bql(cpu->halt_cond, + sleeptime_ns / SCALE_MS); + } else { + bql_unlock(); + g_usleep(sleeptime_ns / SCALE_US); + bql_lock(); + } + sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + } + qatomic_set(&cpu->throttle_thread_scheduled, 0); +} + +static void cpu_throttle_timer_tick(void *opaque) +{ + CPUState *cpu; + double pct; + + /* Stop the timer if needed */ + if (!cpu_throttle_get_percentage()) { + return; + } + CPU_FOREACH(cpu) { + if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { + async_run_on_cpu(cpu, cpu_throttle_thread, + RUN_ON_CPU_NULL); + } + } + + pct = (double)cpu_throttle_get_percentage() / 100; + timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + + CPU_THROTTLE_TIMESLICE_NS / (1 - pct)); +} + +void cpu_throttle_set(int new_throttle_pct) +{ + /* + * boolean to store whether throttle is already active or not, + * before modifying throttle_percentage + */ + bool throttle_active = cpu_throttle_active(); + + trace_cpu_throttle_set(new_throttle_pct); + + /* Ensure throttle percentage is within valid range */ + new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); + new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); + + qatomic_set(&throttle_percentage, new_throttle_pct); + + if (!throttle_active) { + cpu_throttle_timer_tick(NULL); + } +} + +void cpu_throttle_stop(void) +{ + qatomic_set(&throttle_percentage, 0); + cpu_throttle_dirty_sync_timer(false); +} + +bool cpu_throttle_active(void) +{ + return (cpu_throttle_get_percentage() != 0); +} + +int cpu_throttle_get_percentage(void) +{ + return qatomic_read(&throttle_percentage); +} + +void cpu_throttle_dirty_sync_timer_tick(void *opaque) +{ + uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count); + + /* + * The first iteration copies all memory anyhow and has no + * effect on guest performance, therefore omit it to avoid + * paying extra for the sync penalty. + */ + if (sync_cnt <= 1) { + goto end; + } + + if (sync_cnt == throttle_dirty_sync_count_prev) { + trace_cpu_throttle_dirty_sync(); + WITH_RCU_READ_LOCK_GUARD() { + migration_bitmap_sync_precopy(false); + } + } + +end: + throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count); + + timer_mod(throttle_dirty_sync_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + + CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS); +} + +static bool cpu_throttle_dirty_sync_active(void) +{ + return qatomic_read(&throttle_dirty_sync_timer_active); +} + +void cpu_throttle_dirty_sync_timer(bool enable) +{ + assert(throttle_dirty_sync_timer); + + if (enable) { + if (!cpu_throttle_dirty_sync_active()) { + /* + * Always reset the dirty sync count cache, in case migration + * was cancelled once. + */ + throttle_dirty_sync_count_prev = 0; + timer_mod(throttle_dirty_sync_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + + CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS); + qatomic_set(&throttle_dirty_sync_timer_active, 1); + } + } else { + if (cpu_throttle_dirty_sync_active()) { + timer_del(throttle_dirty_sync_timer); + qatomic_set(&throttle_dirty_sync_timer_active, 0); + } + } +} + +void cpu_throttle_init(void) +{ + throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, + cpu_throttle_timer_tick, NULL); + throttle_dirty_sync_timer = + timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, + cpu_throttle_dirty_sync_timer_tick, NULL); +} diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 1d9db812990..986624c79a1 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -14,7 +14,7 @@ #include "qemu/error-report.h" #include "hw/core/cpu.h" #include "qapi/error.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "exec/target_page.h" #include "qemu/rcu_queue.h" #include "qemu/main-loop.h" @@ -24,11 +24,12 @@ #include "dirtyrate.h" #include "monitor/hmp.h" #include "monitor/monitor.h" -#include "qapi/qmp/qdict.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "exec/memory.h" +#include "qobject/qdict.h" +#include "system/kvm.h" +#include "system/runstate.h" +#include "system/memory.h" #include "qemu/xxhash.h" +#include "migration.h" /* * total_dirty_pages is procted by BQL and is used @@ -149,12 +150,12 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, unsigned int flag, bool one_shot) { - DirtyPageRecord *records; + DirtyPageRecord *records = NULL; int64_t init_time_ms; int64_t duration; int64_t dirtyrate; int i = 0; - unsigned int gen_id; + unsigned int gen_id = 0; retry: init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -228,8 +229,7 @@ static int time_unit_to_power(TimeUnit time_unit) case TIME_UNIT_MILLISECOND: return -3; default: - assert(false); /* unreachable */ - return 0; + g_assert_not_reached(); } } @@ -437,6 +437,7 @@ static void get_ramblock_dirty_info(RAMBlock *block, struct DirtyRateConfig *config) { uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes; + gsize len; /* Right shift 30 bits to calc ramblock size in GB */ info->sample_pages_count = (qemu_ram_get_used_length(block) * @@ -445,7 +446,9 @@ static void get_ramblock_dirty_info(RAMBlock *block, info->ramblock_pages = qemu_ram_get_used_length(block) >> qemu_target_page_bits(); info->ramblock_addr = qemu_ram_get_host_addr(block); - strcpy(info->idstr, qemu_ram_get_idstr(block)); + len = g_strlcpy(info->idstr, qemu_ram_get_idstr(block), + sizeof(info->idstr)); + g_assert(len < sizeof(info->idstr)); } static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count) @@ -840,8 +843,9 @@ void qmp_calc_dirty_rate(int64_t calc_time, init_dirtyrate_stat(config); - qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, - (void *)&config, QEMU_THREAD_DETACHED); + qemu_thread_create(&thread, MIGRATION_THREAD_DIRTY_RATE, + get_dirtyrate_thread, (void *)&config, + QEMU_THREAD_DETACHED); } diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h index 869c0609412..35225c36b69 100644 --- a/migration/dirtyrate.h +++ b/migration/dirtyrate.h @@ -13,7 +13,7 @@ #ifndef QEMU_MIGRATION_DIRTYRATE_H #define QEMU_MIGRATION_DIRTYRATE_H -#include "sysemu/dirtyrate.h" +#include "system/dirtyrate.h" /* * Sample 512 pages per GB as default. diff --git a/migration/fd.c b/migration/fd.c index aab5189eac5..9bf9be6acb2 100644 --- a/migration/fd.c +++ b/migration/fd.c @@ -25,6 +25,29 @@ #include "io/channel-util.h" #include "trace.h" +static bool fd_is_pipe(int fd) +{ + struct stat statbuf; + + if (fstat(fd, &statbuf) == -1) { + return false; + } + + return S_ISFIFO(statbuf.st_mode); +} + +static bool migration_fd_valid(int fd) +{ + if (fd_is_socket(fd)) { + return true; + } + + if (fd_is_pipe(fd)) { + return true; + } + + return false; +} void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error **errp) { @@ -34,7 +57,7 @@ void fd_start_outgoing_migration(MigrationState *s, const char *fdname, Error ** return; } - if (!fd_is_socket(fd)) { + if (!migration_fd_valid(fd)) { warn_report("fd: migration to a file is deprecated." " Use file: instead."); } @@ -68,7 +91,7 @@ void fd_start_incoming_migration(const char *fdname, Error **errp) return; } - if (!fd_is_socket(fd)) { + if (!migration_fd_valid(fd)) { warn_report("fd: migration to a file is deprecated." " Use file: instead."); } diff --git a/migration/file.c b/migration/file.c index 6451a21c866..bb8031e3c77 100644 --- a/migration/file.c +++ b/migration/file.c @@ -6,7 +6,7 @@ */ #include "qemu/osdep.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qapi/error.h" @@ -196,12 +196,13 @@ void file_start_incoming_migration(FileMigrationArgs *file_args, Error **errp) } int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov, - int niov, RAMBlock *block, Error **errp) + int niov, MultiFDPages_t *pages, Error **errp) { ssize_t ret = 0; int i, slice_idx, slice_num; uintptr_t base, next, offset; size_t len; + RAMBlock *block = pages->block; slice_idx = 0; slice_num = 1; diff --git a/migration/file.h b/migration/file.h index 9f71e87f743..1a1115f7f10 100644 --- a/migration/file.h +++ b/migration/file.h @@ -21,6 +21,6 @@ int file_parse_offset(char *filespec, uint64_t *offsetp, Error **errp); void file_cleanup_outgoing_migration(void); bool file_send_channel_create(gpointer opaque, Error **errp); int file_write_ramblock_iov(QIOChannel *ioc, const struct iovec *iov, - int niov, RAMBlock *block, Error **errp); + int niov, MultiFDPages_t *pages, Error **errp); int multifd_file_recv_data(MultiFDRecvParams *p, Error **errp); #endif diff --git a/migration/global_state.c b/migration/global_state.c index 3a9796cae28..c1f90fce0f9 100644 --- a/migration/global_state.c +++ b/migration/global_state.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "qemu/error-report.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qapi/error.h" #include "migration.h" #include "migration/global_state.h" diff --git a/migration/meson.build b/migration/meson.build index 5ce2acb41e5..276da3be5a3 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -11,8 +11,12 @@ migration_files = files( system_ss.add(files( 'block-dirty-bitmap.c', + 'block-active.c', 'channel.c', 'channel-block.c', + 'cpr.c', + 'cpr-transfer.c', + 'cpu-throttle.c', 'dirtyrate.c', 'exec.c', 'fd.c', @@ -21,6 +25,8 @@ system_ss.add(files( 'migration-hmp-cmds.c', 'migration.c', 'multifd.c', + 'multifd-device-state.c', + 'multifd-nocomp.c', 'multifd-zlib.c', 'multifd-zero-page.c', 'options.c', @@ -41,7 +47,8 @@ system_ss.add(when: rdma, if_true: files('rdma.c')) system_ss.add(when: zstd, if_true: files('multifd-zstd.c')) system_ss.add(when: qpl, if_true: files('multifd-qpl.c')) system_ss.add(when: uadk, if_true: files('multifd-uadk.c')) +system_ss.add(when: qatzip, if_true: files('multifd-qatzip.c')) specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: files('ram.c', - 'target.c')) + 'vfio.c')) diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index 7d608d26e19..0fc21f06473 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -21,15 +21,15 @@ #include "qapi/error.h" #include "qapi/qapi-commands-migration.h" #include "qapi/qapi-visit-migration.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/string-input-visitor.h" #include "qapi/string-output-visitor.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/sockets.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "ui/qemu-spice.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "options.h" #include "migration.h" @@ -37,27 +37,108 @@ static void migration_global_dump(Monitor *mon) { MigrationState *ms = migrate_get_current(); - monitor_printf(mon, "globals:\n"); - monitor_printf(mon, "store-global-state: %s\n", + monitor_printf(mon, "Globals:\n"); + monitor_printf(mon, " store-global-state: %s\n", ms->store_global_state ? "on" : "off"); - monitor_printf(mon, "only-migratable: %s\n", + monitor_printf(mon, " only-migratable: %s\n", only_migratable ? "on" : "off"); - monitor_printf(mon, "send-configuration: %s\n", + monitor_printf(mon, " send-configuration: %s\n", ms->send_configuration ? "on" : "off"); - monitor_printf(mon, "send-section-footer: %s\n", + monitor_printf(mon, " send-section-footer: %s\n", ms->send_section_footer ? "on" : "off"); - monitor_printf(mon, "clear-bitmap-shift: %u\n", + monitor_printf(mon, " send-switchover-start: %s\n", + ms->send_switchover_start ? "on" : "off"); + monitor_printf(mon, " clear-bitmap-shift: %u\n", ms->clear_bitmap_shift); } +static const gchar *format_time_str(uint64_t us) +{ + const char *units[] = {"us", "ms", "sec"}; + int index = 0; + + while (us >= 1000 && index + 1 < ARRAY_SIZE(units)) { + us /= 1000; + index++; + } + + return g_strdup_printf("%"PRIu64" %s", us, units[index]); +} + +static void migration_dump_blocktime(Monitor *mon, MigrationInfo *info) +{ + if (info->has_postcopy_blocktime) { + monitor_printf(mon, "Postcopy Blocktime (ms): %" PRIu32 "\n", + info->postcopy_blocktime); + } + + if (info->has_postcopy_vcpu_blocktime) { + uint32List *item = info->postcopy_vcpu_blocktime; + const char *sep = ""; + int count = 0; + + monitor_printf(mon, "Postcopy vCPU Blocktime (ms):\n ["); + + while (item) { + monitor_printf(mon, "%s%"PRIu32, sep, item->value); + item = item->next; + /* Each line 10 vcpu results, newline if there's more */ + sep = ((++count % 10 == 0) && item) ? ",\n " : ", "; + } + monitor_printf(mon, "]\n"); + } + + if (info->has_postcopy_latency) { + monitor_printf(mon, "Postcopy Latency (ns): %" PRIu64 "\n", + info->postcopy_latency); + } + + if (info->has_postcopy_non_vcpu_latency) { + monitor_printf(mon, "Postcopy non-vCPU Latencies (ns): %" PRIu64 "\n", + info->postcopy_non_vcpu_latency); + } + + if (info->has_postcopy_vcpu_latency) { + uint64List *item = info->postcopy_vcpu_latency; + const char *sep = ""; + int count = 0; + + monitor_printf(mon, "Postcopy vCPU Latencies (ns):\n ["); + + while (item) { + monitor_printf(mon, "%s%"PRIu64, sep, item->value); + item = item->next; + /* Each line 10 vcpu results, newline if there's more */ + sep = ((++count % 10 == 0) && item) ? ",\n " : ", "; + } + monitor_printf(mon, "]\n"); + } + + if (info->has_postcopy_latency_dist) { + uint64List *item = info->postcopy_latency_dist; + int count = 0; + + monitor_printf(mon, "Postcopy Latency Distribution:\n"); + + while (item) { + g_autofree const gchar *from = format_time_str(1UL << count); + g_autofree const gchar *to = format_time_str(1UL << (count + 1)); + + monitor_printf(mon, " [ %8s - %8s ]: %10"PRIu64"\n", + from, to, item->value); + item = item->next; + count++; + } + } +} + void hmp_info_migrate(Monitor *mon, const QDict *qdict) { + bool show_all = qdict_get_try_bool(qdict, "all", false); MigrationInfo *info; info = qmp_query_migrate(NULL); - migration_global_dump(mon); - if (info->blocked_reasons) { strList *reasons = info->blocked_reasons; monitor_printf(mon, "Outgoing migration blocked:\n"); @@ -68,147 +149,143 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) } if (info->has_status) { - monitor_printf(mon, "Migration status: %s", + monitor_printf(mon, "Status: \t\t%s", MigrationStatus_str(info->status)); - if (info->status == MIGRATION_STATUS_FAILED && info->error_desc) { + if ((info->status == MIGRATION_STATUS_FAILED || + info->status == MIGRATION_STATUS_POSTCOPY_PAUSED) && + info->error_desc) { monitor_printf(mon, " (%s)\n", info->error_desc); } else { monitor_printf(mon, "\n"); } - monitor_printf(mon, "total time: %" PRIu64 " ms\n", - info->total_time); - if (info->has_expected_downtime) { - monitor_printf(mon, "expected downtime: %" PRIu64 " ms\n", - info->expected_downtime); - } - if (info->has_downtime) { - monitor_printf(mon, "downtime: %" PRIu64 " ms\n", - info->downtime); + if (info->total_time) { + monitor_printf(mon, "Time (ms): \t\ttotal=%" PRIu64, + info->total_time); + if (info->has_setup_time) { + monitor_printf(mon, ", setup=%" PRIu64, + info->setup_time); + } + if (info->has_expected_downtime) { + monitor_printf(mon, ", exp_down=%" PRIu64, + info->expected_downtime); + } + if (info->has_downtime) { + monitor_printf(mon, ", down=%" PRIu64, + info->downtime); + } + monitor_printf(mon, "\n"); } - if (info->has_setup_time) { - monitor_printf(mon, "setup: %" PRIu64 " ms\n", - info->setup_time); + } + + if (info->has_socket_address) { + SocketAddressList *addr; + + monitor_printf(mon, "Sockets: [\n"); + + for (addr = info->socket_address; addr; addr = addr->next) { + char *s = socket_uri(addr->value); + monitor_printf(mon, "\t%s\n", s); + g_free(s); } + monitor_printf(mon, "]\n"); } if (info->ram) { - monitor_printf(mon, "transferred ram: %" PRIu64 " kbytes\n", - info->ram->transferred >> 10); - monitor_printf(mon, "throughput: %0.2f mbps\n", + g_autofree char *str_psize = size_to_str(info->ram->page_size); + g_autofree char *str_total = size_to_str(info->ram->total); + g_autofree char *str_transferred = size_to_str(info->ram->transferred); + g_autofree char *str_remaining = size_to_str(info->ram->remaining); + g_autofree char *str_precopy = size_to_str(info->ram->precopy_bytes); + g_autofree char *str_multifd = size_to_str(info->ram->multifd_bytes); + g_autofree char *str_postcopy = size_to_str(info->ram->postcopy_bytes); + + monitor_printf(mon, "RAM info:\n"); + monitor_printf(mon, " Throughput (Mbps): \t%0.2f\n", info->ram->mbps); - monitor_printf(mon, "remaining ram: %" PRIu64 " kbytes\n", - info->ram->remaining >> 10); - monitor_printf(mon, "total ram: %" PRIu64 " kbytes\n", - info->ram->total >> 10); - monitor_printf(mon, "duplicate: %" PRIu64 " pages\n", - info->ram->duplicate); - monitor_printf(mon, "normal: %" PRIu64 " pages\n", - info->ram->normal); - monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n", - info->ram->normal_bytes >> 10); - monitor_printf(mon, "dirty sync count: %" PRIu64 "\n", - info->ram->dirty_sync_count); - monitor_printf(mon, "page size: %" PRIu64 " kbytes\n", - info->ram->page_size >> 10); - monitor_printf(mon, "multifd bytes: %" PRIu64 " kbytes\n", - info->ram->multifd_bytes >> 10); - monitor_printf(mon, "pages-per-second: %" PRIu64 "\n", - info->ram->pages_per_second); + monitor_printf(mon, " Sizes: \t\tpagesize=%s, total=%s\n", + str_psize, str_total); + monitor_printf(mon, " Transfers: \t\ttransferred=%s, remain=%s\n", + str_transferred, str_remaining); + monitor_printf(mon, " Channels: \t\tprecopy=%s, " + "multifd=%s, postcopy=%s", + str_precopy, str_multifd, str_postcopy); + + if (info->vfio) { + g_autofree char *str_vfio = size_to_str(info->vfio->transferred); + + monitor_printf(mon, ", vfio=%s", str_vfio); + } + monitor_printf(mon, "\n"); + monitor_printf(mon, " Page Types: \tnormal=%" PRIu64 + ", zero=%" PRIu64 "\n", + info->ram->normal, info->ram->duplicate); + monitor_printf(mon, " Page Rates (pps): \ttransfer=%" PRIu64, + info->ram->pages_per_second); if (info->ram->dirty_pages_rate) { - monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n", + monitor_printf(mon, ", dirty=%" PRIu64, info->ram->dirty_pages_rate); } + monitor_printf(mon, "\n"); + + monitor_printf(mon, " Others: \t\tdirty_syncs=%" PRIu64, + info->ram->dirty_sync_count); if (info->ram->postcopy_requests) { - monitor_printf(mon, "postcopy request count: %" PRIu64 "\n", + monitor_printf(mon, ", postcopy_req=%" PRIu64, info->ram->postcopy_requests); } - if (info->ram->precopy_bytes) { - monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n", - info->ram->precopy_bytes >> 10); - } if (info->ram->downtime_bytes) { - monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n", - info->ram->downtime_bytes >> 10); - } - if (info->ram->postcopy_bytes) { - monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n", - info->ram->postcopy_bytes >> 10); + monitor_printf(mon, ", downtime_bytes=%" PRIu64, + info->ram->downtime_bytes); } if (info->ram->dirty_sync_missed_zero_copy) { - monitor_printf(mon, - "Zero-copy-send fallbacks happened: %" PRIu64 " times\n", + monitor_printf(mon, ", zerocopy_fallbacks=%" PRIu64, info->ram->dirty_sync_missed_zero_copy); } + monitor_printf(mon, "\n"); + } + + if (!show_all) { + goto out; } + migration_global_dump(mon); + if (info->xbzrle_cache) { - monitor_printf(mon, "cache size: %" PRIu64 " bytes\n", - info->xbzrle_cache->cache_size); - monitor_printf(mon, "xbzrle transferred: %" PRIu64 " kbytes\n", - info->xbzrle_cache->bytes >> 10); - monitor_printf(mon, "xbzrle pages: %" PRIu64 " pages\n", - info->xbzrle_cache->pages); - monitor_printf(mon, "xbzrle cache miss: %" PRIu64 " pages\n", - info->xbzrle_cache->cache_miss); - monitor_printf(mon, "xbzrle cache miss rate: %0.2f\n", - info->xbzrle_cache->cache_miss_rate); - monitor_printf(mon, "xbzrle encoding rate: %0.2f\n", - info->xbzrle_cache->encoding_rate); - monitor_printf(mon, "xbzrle overflow: %" PRIu64 "\n", + monitor_printf(mon, "XBZRLE: size=%" PRIu64 + ", transferred=%" PRIu64 + ", pages=%" PRIu64 + ", miss=%" PRIu64 "\n" + " miss_rate=%0.2f" + ", encode_rate=%0.2f" + ", overflow=%" PRIu64 "\n", + info->xbzrle_cache->cache_size, + info->xbzrle_cache->bytes, + info->xbzrle_cache->pages, + info->xbzrle_cache->cache_miss, + info->xbzrle_cache->cache_miss_rate, + info->xbzrle_cache->encoding_rate, info->xbzrle_cache->overflow); } if (info->has_cpu_throttle_percentage) { - monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n", + monitor_printf(mon, "CPU Throttle (%%): %" PRIu64 "\n", info->cpu_throttle_percentage); } if (info->has_dirty_limit_throttle_time_per_round) { - monitor_printf(mon, "dirty-limit throttle time: %" PRIu64 " us\n", + monitor_printf(mon, "Dirty-limit Throttle (us): %" PRIu64 "\n", info->dirty_limit_throttle_time_per_round); } if (info->has_dirty_limit_ring_full_time) { - monitor_printf(mon, "dirty-limit ring full time: %" PRIu64 " us\n", + monitor_printf(mon, "Dirty-limit Ring Full (us): %" PRIu64 "\n", info->dirty_limit_ring_full_time); } - if (info->has_postcopy_blocktime) { - monitor_printf(mon, "postcopy blocktime: %u\n", - info->postcopy_blocktime); - } - - if (info->has_postcopy_vcpu_blocktime) { - Visitor *v; - char *str; - v = string_output_visitor_new(false, &str); - visit_type_uint32List(v, NULL, &info->postcopy_vcpu_blocktime, - &error_abort); - visit_complete(v, &str); - monitor_printf(mon, "postcopy vcpu blocktime: %s\n", str); - g_free(str); - visit_free(v); - } - if (info->has_socket_address) { - SocketAddressList *addr; - - monitor_printf(mon, "socket address: [\n"); - - for (addr = info->socket_address; addr; addr = addr->next) { - char *s = socket_uri(addr->value); - monitor_printf(mon, "\t%s\n", s); - g_free(s); - } - monitor_printf(mon, "]\n"); - } - - if (info->vfio) { - monitor_printf(mon, "vfio device transferred: %" PRIu64 " kbytes\n", - info->vfio->transferred >> 10); - } - + migration_dump_blocktime(mon, info); +out: qapi_free_MigrationInfo(info); } @@ -576,6 +653,10 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_multifd_zlib_level = true; visit_type_uint8(v, param, &p->multifd_zlib_level, &err); break; + case MIGRATION_PARAMETER_MULTIFD_QATZIP_LEVEL: + p->has_multifd_qatzip_level = true; + visit_type_uint8(v, param, &p->multifd_qatzip_level, &err); + break; case MIGRATION_PARAMETER_MULTIFD_ZSTD_LEVEL: p->has_multifd_zstd_level = true; visit_type_uint8(v, param, &p->multifd_zstd_level, &err); @@ -636,7 +717,7 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) visit_type_bool(v, param, &p->direct_io, &err); break; default: - assert(0); + g_assert_not_reached(); } if (err) { diff --git a/migration/migration.c b/migration/migration.c index 3dea06d5773..10c216d25de 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -14,6 +14,7 @@ */ #include "qemu/osdep.h" +#include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -22,11 +23,12 @@ #include "fd.h" #include "file.h" #include "socket.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/cpu-throttle.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/cpu-throttle.h" #include "rdma.h" #include "ram.h" +#include "migration/cpr.h" #include "migration/global_state.h" #include "migration/misc.h" #include "migration.h" @@ -43,7 +45,7 @@ #include "qapi/qapi-commands-migration.h" #include "qapi/qapi-events-migration.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qemu/rcu.h" #include "postcopy-ram.h" #include "qemu/thread.h" @@ -59,13 +61,13 @@ #include "multifd.h" #include "threadinfo.h" #include "qemu/yank.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "yank_functions.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "options.h" -#include "sysemu/dirtylimit.h" +#include "system/dirtylimit.h" #include "qemu/sockets.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #define NOTIFIER_ELEM_INIT(array, elem) \ [elem] = NOTIFIER_WITH_RETURN_LIST_INITIALIZER((array)[elem]) @@ -75,6 +77,7 @@ static NotifierWithReturnList migration_state_notifiers[] = { NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), + NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_TRANSFER), }; /* Messages sent on the return path from destination to source */ @@ -92,6 +95,9 @@ enum mig_rp_message_type { MIG_RP_MSG_MAX }; +/* Migration channel types */ +enum { CH_MAIN, CH_MULTIFD, CH_POSTCOPY }; + /* When we add fault tolerance, we could have several migrations at once. For now we don't need to add dynamic creation of migration */ @@ -102,12 +108,10 @@ static MigrationIncomingState *current_incoming; static GSList *migration_blockers[MIG_MODE__MAX]; static bool migration_object_check(MigrationState *ms, Error **errp); -static int migration_maybe_pause(MigrationState *s, - int *current_active_state, - int new_state); -static void migrate_fd_cancel(MigrationState *s); +static bool migration_switchover_start(MigrationState *s, Error **errp); static bool close_return_path_on_source(MigrationState *s); static void migration_completion_end(MigrationState *s); +static void migrate_hup_delete(MigrationState *s); static void migration_downtime_start(MigrationState *s) { @@ -115,6 +119,27 @@ static void migration_downtime_start(MigrationState *s) s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); } +/* + * This is unfortunate: incoming migration actually needs the outgoing + * migration state (MigrationState) to be there too, e.g. to query + * capabilities, parameters, using locks, setup errors, etc. + * + * NOTE: when calling this, making sure current_migration exists and not + * been freed yet! Otherwise trying to access the refcount is already + * an use-after-free itself.. + * + * TODO: Move shared part of incoming / outgoing out into separate object. + * Then this is not needed. + */ +static void migrate_incoming_ref_outgoing_state(void) +{ + object_ref(migrate_get_current()); +} +static void migrate_incoming_unref_outgoing_state(void) +{ + object_unref(migrate_get_current()); +} + static void migration_downtime_end(MigrationState *s) { int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -125,9 +150,19 @@ static void migration_downtime_end(MigrationState *s) */ if (!s->downtime) { s->downtime = now - s->downtime_start; + trace_vmstate_downtime_checkpoint("src-downtime-end"); } +} - trace_vmstate_downtime_checkpoint("src-downtime-end"); +static void precopy_notify_complete(void) +{ + Error *local_err = NULL; + + if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { + error_report_err(local_err); + } + + trace_migration_precopy_complete(); } static bool migration_needs_multiple_sockets(void) @@ -135,6 +170,21 @@ static bool migration_needs_multiple_sockets(void) return migrate_multifd() || migrate_postcopy_preempt(); } +static RunState migration_get_target_runstate(void) +{ + /* + * When the global state is not migrated, it means we don't know the + * runstate of the src QEMU. We don't have much choice but assuming + * the VM is running. NOTE: this is pretty rare case, so far only Xen + * uses it. + */ + if (!global_state_received()) { + return RUN_STATE_RUNNING; + } + + return global_state_get_runstate(); +} + static bool transport_supports_multi_channels(MigrationAddress *addr) { if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { @@ -203,9 +253,33 @@ migration_channels_and_transport_compatible(MigrationAddress *addr, return false; } + if (migrate_mode() == MIG_MODE_CPR_TRANSFER && + addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { + error_setg(errp, "Migration requires streamable transport (eg unix)"); + return false; + } + return true; } +static bool +migration_capabilities_and_transport_compatible(MigrationAddress *addr, + Error **errp) +{ + if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { + return migrate_rdma_caps_check(migrate_get_current()->capabilities, + errp); + } + + return true; +} + +static bool migration_transport_compatible(MigrationAddress *addr, Error **errp) +{ + return migration_channels_and_transport_compatible(addr, errp) && + migration_capabilities_and_transport_compatible(addr, errp); +} + static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) { uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; @@ -263,6 +337,9 @@ void migration_object_init(void) ram_mig_init(); dirty_bitmap_mig_init(); + + /* Initialize cpu throttle timers */ + cpu_throttle_init(); } typedef struct { @@ -306,17 +383,6 @@ void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) qemu_bh_schedule(bh); } -void migration_cancel(const Error *error) -{ - if (error) { - migrate_set_error(current_migration, error); - } - if (migrate_dirty_limit()) { - qmp_cancel_vcpu_dirty_limit(false, -1, NULL); - } - migrate_fd_cancel(current_migration); -} - void migration_shutdown(void) { /* @@ -329,7 +395,7 @@ void migration_shutdown(void) * Cancel the current migration - that will (eventually) * stop the migration using this structure */ - migration_cancel(NULL); + migration_cancel(); object_unref(OBJECT(current_migration)); /* @@ -379,6 +445,24 @@ void migration_incoming_state_destroy(void) multifd_recv_cleanup(); + /* + * RAM state cleanup needs to happen after multifd cleanup, because + * multifd threads can use some of its states (receivedmap). + * The VFIO load_cleanup() implementation is BQL-sensitive. It requires + * BQL must NOT be taken when recycling load threads, so that it won't + * block the load threads from making progress on address space + * modification operations. + * + * To make it work, we could try to not take BQL for all load_cleanup(), + * or conditionally unlock BQL only if bql_locked() in VFIO. + * + * Since most existing call sites take BQL for load_cleanup(), make + * it simple by taking BQL always as the rule, so that VFIO can unlock + * BQL and retake unconditionally. + */ + assert(bql_locked()); + qemu_loadvm_state_cleanup(mis); + if (mis->to_src_file) { /* Tell source that we are done */ migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); @@ -410,6 +494,7 @@ void migration_incoming_state_destroy(void) mis->postcopy_qemufile_dst = NULL; } + cpr_set_incoming_mode(MIG_MODE_NONE); yank_unregister_instance(MIGRATION_YANK_INSTANCE); } @@ -491,22 +576,27 @@ int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, } int migrate_send_rp_req_pages(MigrationIncomingState *mis, - RAMBlock *rb, ram_addr_t start, uint64_t haddr) + RAMBlock *rb, ram_addr_t start, uint64_t haddr, + uint32_t tid) { void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); bool received = false; WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { received = ramblock_recv_bitmap_test_byte_offset(rb, start); - if (!received && !g_tree_lookup(mis->page_requested, aligned)) { - /* - * The page has not been received, and it's not yet in the page - * request list. Queue it. Set the value of element to 1, so that - * things like g_tree_lookup() will return TRUE (1) when found. - */ - g_tree_insert(mis->page_requested, aligned, (gpointer)1); - qatomic_inc(&mis->page_requested_count); - trace_postcopy_page_req_add(aligned, mis->page_requested_count); + if (!received) { + if (!g_tree_lookup(mis->page_requested, aligned)) { + /* + * The page has not been received, and it's not yet in the + * page request list. Queue it. Set the value of element + * to 1, so that things like g_tree_lookup() will return + * TRUE (1) when found. + */ + g_tree_insert(mis->page_requested, aligned, (gpointer)1); + qatomic_inc(&mis->page_requested_count); + trace_postcopy_page_req_add(aligned, mis->page_requested_count); + } + mark_postcopy_blocktime_begin(haddr, tid, rb); } } @@ -563,6 +653,16 @@ void migrate_add_address(SocketAddress *address) QAPI_CLONE(SocketAddress, address)); } +bool migrate_is_uri(const char *uri) +{ + while (*uri && *uri != ':') { + if (!qemu_isalpha(*uri++)) { + return false; + } + } + return *uri == ':'; +} + bool migrate_uri_parse(const char *uri, MigrationChannel **channel, Error **errp) { @@ -660,7 +760,8 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, if (channels) { /* To verify that Migrate channel list has only item */ if (channels->next) { - error_setg(errp, "Channel list has more than one entries"); + error_setg(errp, "Channel list must have only one entry, " + "for type 'main'"); return; } addr = channels->value->addr; @@ -675,7 +776,7 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, } /* transport mechanism not suitable for migration? */ - if (!migration_channels_and_transport_compatible(addr, errp)) { + if (!migration_transport_compatible(addr, errp)) { return; } @@ -694,14 +795,6 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, } #ifdef CONFIG_RDMA } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { - if (migrate_xbzrle()) { - error_setg(errp, "RDMA and XBZRLE can't be used together"); - return; - } - if (migrate_multifd()) { - error_setg(errp, "RDMA and multifd can't be used together"); - return; - } rdma_start_incoming_migration(&addr->u.rdma, errp); #endif } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { @@ -711,34 +804,17 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, } else { error_setg(errp, "unknown migration protocol: %s", uri); } + + /* Close cpr socket to tell source that we are listening */ + cpr_state_close(); } static void process_incoming_migration_bh(void *opaque) { - Error *local_err = NULL; MigrationIncomingState *mis = opaque; trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); - /* If capability late_block_activate is set: - * Only fire up the block code now if we're going to restart the - * VM, else 'cont' will do it. - * This causes file locking to happen; so we don't want it to happen - * unless we really are starting the VM. - */ - if (!migrate_late_block_activate() || - (autostart && (!global_state_received() || - runstate_is_live(global_state_get_runstate())))) { - /* Make sure all file formats throw away their mutable metadata. - * If we get an error here, just don't restart the VM yet. */ - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - local_err = NULL; - autostart = false; - } - } - /* * This must happen after all error conditions are dealt with and * we're sure the VM is going to be running on this host. @@ -751,10 +827,23 @@ static void process_incoming_migration_bh(void *opaque) dirty_bitmap_mig_before_vm_start(); - if (!global_state_received() || - runstate_is_live(global_state_get_runstate())) { + if (runstate_is_live(migration_get_target_runstate())) { if (autostart) { - vm_start(); + /* + * Block activation is always delayed until VM starts, either + * here (which means we need to start the dest VM right now..), + * or until qmp_cont() later. + * + * We used to have cap 'late-block-activate' but now we do this + * unconditionally, as it has no harm but only benefit. E.g., + * it's not part of migration ABI on the time of disk activation. + * + * Make sure all file formats throw away their mutable + * metadata. If error, don't restart the VM yet. + */ + if (migration_block_activate(NULL)) { + vm_start(); + } } else { runstate_set(RUN_STATE_PAUSED); } @@ -813,7 +902,7 @@ process_incoming_migration_co(void *opaque) * postcopy thread. */ trace_process_incoming_migration_co_postcopy_end_main(); - return; + goto out; } /* Else if something went wrong then just fall out of the normal exit */ } @@ -829,7 +918,8 @@ process_incoming_migration_co(void *opaque) } migration_bh_schedule(process_incoming_migration_bh, mis); - return; + goto out; + fail: migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); @@ -846,6 +936,9 @@ process_incoming_migration_co(void *opaque) exit(EXIT_FAILURE); } +out: + /* Pairs with the refcount taken in qmp_migrate_incoming() */ + migrate_incoming_unref_outgoing_state(); } /** @@ -856,9 +949,8 @@ static void migration_incoming_setup(QEMUFile *f) { MigrationIncomingState *mis = migration_incoming_get_current(); - if (!mis->from_src_file) { - mis->from_src_file = f; - } + assert(!mis->from_src_file); + mis->from_src_file = f; qemu_file_set_blocking(f, false); } @@ -910,28 +1002,19 @@ void migration_fd_process_incoming(QEMUFile *f) migration_incoming_process(); } -/* - * Returns true when we want to start a new incoming migration process, - * false otherwise. - */ -static bool migration_should_start_incoming(bool main_channel) +static bool migration_has_main_and_multifd_channels(void) { - /* Multifd doesn't start unless all channels are established */ - if (migrate_multifd()) { - return migration_has_all_channels(); + MigrationIncomingState *mis = migration_incoming_get_current(); + if (!mis->from_src_file) { + /* main channel not established */ + return false; } - /* Preempt channel only starts when the main channel is created */ - if (migrate_postcopy_preempt()) { - return main_channel; + if (migrate_multifd() && !multifd_recv_all_channels_created()) { + return false; } - /* - * For all the rest types of migration, we should only reach here when - * it's the main channel that's being created, and we should always - * proceed with this channel. - */ - assert(main_channel); + /* main and all multifd channels are established */ return true; } @@ -940,59 +1023,81 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; QEMUFile *f; - bool default_channel = true; + uint8_t channel; uint32_t channel_magic = 0; int ret = 0; - if (migrate_multifd() && !migrate_mapped_ram() && - !migrate_postcopy_ram() && - qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { - /* - * With multiple channels, it is possible that we receive channels - * out of order on destination side, causing incorrect mapping of - * source channels on destination side. Check channel MAGIC to - * decide type of channel. Please note this is best effort, postcopy - * preempt channel does not send any magic number so avoid it for - * postcopy live migration. Also tls live migration already does - * tls handshake while initializing main channel so with tls this - * issue is not possible. - */ - ret = migration_channel_read_peek(ioc, (void *)&channel_magic, - sizeof(channel_magic), errp); + if (!migration_has_main_and_multifd_channels()) { + if (qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { + /* + * With multiple channels, it is possible that we receive channels + * out of order on destination side, causing incorrect mapping of + * source channels on destination side. Check channel MAGIC to + * decide type of channel. Please note this is best effort, + * postcopy preempt channel does not send any magic number so + * avoid it for postcopy live migration. Also tls live migration + * already does tls handshake while initializing main channel so + * with tls this issue is not possible. + */ + ret = migration_channel_read_peek(ioc, (void *)&channel_magic, + sizeof(channel_magic), errp); + if (ret != 0) { + return; + } - if (ret != 0) { + channel_magic = be32_to_cpu(channel_magic); + if (channel_magic == QEMU_VM_FILE_MAGIC) { + channel = CH_MAIN; + } else if (channel_magic == MULTIFD_MAGIC) { + assert(migrate_multifd()); + channel = CH_MULTIFD; + } else if (!mis->from_src_file && + mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { + /* reconnect main channel for postcopy recovery */ + channel = CH_MAIN; + } else { + error_setg(errp, "unknown channel magic: %u", channel_magic); + return; + } + } else if (mis->from_src_file && migrate_multifd()) { + /* + * Non-peekable channels like tls/file are processed as + * multifd channels when multifd is enabled. + */ + channel = CH_MULTIFD; + } else if (!mis->from_src_file) { + channel = CH_MAIN; + } else { + error_setg(errp, "non-peekable channel used without multifd"); return; } - - default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); } else { - default_channel = !mis->from_src_file; + assert(migrate_postcopy_preempt()); + channel = CH_POSTCOPY; } if (multifd_recv_setup(errp) != 0) { return; } - if (default_channel) { + if (channel == CH_MAIN) { f = qemu_file_new_input(ioc); migration_incoming_setup(f); - } else { + } else if (channel == CH_MULTIFD) { /* Multiple connections */ - assert(migration_needs_multiple_sockets()); - if (migrate_multifd()) { - multifd_recv_new_channel(ioc, &local_err); - } else { - assert(migrate_postcopy_preempt()); - f = qemu_file_new_input(ioc); - postcopy_preempt_new_channel(mis, f); - } + multifd_recv_new_channel(ioc, &local_err); if (local_err) { error_propagate(errp, local_err); return; } + } else if (channel == CH_POSTCOPY) { + assert(!mis->postcopy_qemufile_dst); + f = qemu_file_new_input(ioc); + postcopy_preempt_new_channel(mis, f); + return; } - if (migration_should_start_incoming(default_channel)) { + if (migration_has_main_and_multifd_channels()) { /* If it's a recovery, we're done */ if (postcopy_try_recover()) { return; @@ -1009,18 +1114,13 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) */ bool migration_has_all_channels(void) { - MigrationIncomingState *mis = migration_incoming_get_current(); - - if (!mis->from_src_file) { + if (!migration_has_main_and_multifd_channels()) { return false; } - if (migrate_multifd()) { - return multifd_recv_all_channels_created(); - } - - if (migrate_postcopy_preempt()) { - return mis->postcopy_qemufile_dst != NULL; + MigrationIncomingState *mis = migration_incoming_get_current(); + if (migrate_postcopy_preempt() && !mis->postcopy_qemufile_dst) { + return false; } return true; @@ -1105,14 +1205,14 @@ void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); } -/* - * Return true if we're already in the middle of a migration - * (i.e. any of the active or setup states) - */ -bool migration_is_setup_or_active(void) +bool migration_is_running(void) { MigrationState *s = current_migration; + if (!s) { + return false; + } + switch (s->state) { case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE: @@ -1123,36 +1223,20 @@ bool migration_is_setup_or_active(void) case MIGRATION_STATUS_PRE_SWITCHOVER: case MIGRATION_STATUS_DEVICE: case MIGRATION_STATUS_WAIT_UNPLUG: + case MIGRATION_STATUS_CANCELLING: case MIGRATION_STATUS_COLO: return true; - default: return false; - } } -bool migration_is_running(void) +static bool migration_is_active(void) { MigrationState *s = current_migration; - switch (s->state) { - case MIGRATION_STATUS_ACTIVE: - case MIGRATION_STATUS_POSTCOPY_ACTIVE: - case MIGRATION_STATUS_POSTCOPY_PAUSED: - case MIGRATION_STATUS_POSTCOPY_RECOVER_SETUP: - case MIGRATION_STATUS_POSTCOPY_RECOVER: - case MIGRATION_STATUS_SETUP: - case MIGRATION_STATUS_PRE_SWITCHOVER: - case MIGRATION_STATUS_DEVICE: - case MIGRATION_STATUS_WAIT_UNPLUG: - case MIGRATION_STATUS_CANCELLING: - return true; - - default: - return false; - - } + return (s->state == MIGRATION_STATUS_ACTIVE || + s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); } static bool migrate_show_downtime(MigrationState *s) @@ -1397,39 +1481,52 @@ void migrate_set_state(MigrationStatus *state, MigrationStatus old_state, } } -static void migrate_fd_cleanup(MigrationState *s) +static void migration_cleanup_json_writer(MigrationState *s) +{ + g_clear_pointer(&s->vmdesc, json_writer_free); +} + +static void migration_cleanup(MigrationState *s) { MigrationEventType type; + QEMUFile *tmp = NULL; + + trace_migration_cleanup(); + + migration_cleanup_json_writer(s); g_free(s->hostname); s->hostname = NULL; - json_writer_free(s->vmdesc); - s->vmdesc = NULL; qemu_savevm_state_cleanup(); + cpr_state_close(); + migrate_hup_delete(s); close_return_path_on_source(s); - if (s->to_dst_file) { - QEMUFile *tmp; - - trace_migrate_fd_cleanup(); + if (s->migration_thread_running) { bql_unlock(); - if (s->migration_thread_running) { - qemu_thread_join(&s->thread); - s->migration_thread_running = false; - } + qemu_thread_join(&s->thread); + s->migration_thread_running = false; bql_lock(); + } - multifd_send_shutdown(); - qemu_mutex_lock(&s->qemu_file_lock); + WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { + /* + * Close the file handle without the lock to make sure the critical + * section won't block for long. + */ tmp = s->to_dst_file; s->to_dst_file = NULL; - qemu_mutex_unlock(&s->qemu_file_lock); + } + + if (tmp) { /* - * Close the file handle without the lock to make sure the - * critical section won't block for long. + * We only need to shutdown multifd if tmp!=NULL, because if + * tmp==NULL, it means the main channel isn't established, while + * multifd is only setup after that (in migration_thread()). */ + multifd_send_shutdown(); migration_ioc_unregister_yank_from_file(tmp); qemu_fclose(tmp); } @@ -1451,9 +1548,9 @@ static void migrate_fd_cleanup(MigrationState *s) yank_unregister_instance(MIGRATION_YANK_INSTANCE); } -static void migrate_fd_cleanup_bh(void *opaque) +static void migration_cleanup_bh(void *opaque) { - migrate_fd_cleanup(opaque); + migration_cleanup(opaque); } void migrate_set_error(MigrationState *s, const Error *error) @@ -1483,7 +1580,7 @@ static void migrate_error_free(MigrationState *s) } } -static void migrate_fd_error(MigrationState *s, const Error *error) +static void migration_connect_set_error(MigrationState *s, const Error *error) { MigrationStatus current = s->state; MigrationStatus next; @@ -1512,11 +1609,17 @@ static void migrate_fd_error(MigrationState *s, const Error *error) migrate_set_error(s, error); } -static void migrate_fd_cancel(MigrationState *s) +void migration_cancel(void) { + MigrationState *s = migrate_get_current(); int old_state ; + bool setup = (s->state == MIGRATION_STATUS_SETUP); - trace_migrate_fd_cancel(); + trace_migration_cancel(); + + if (migrate_dirty_limit()) { + qmp_cancel_vcpu_dirty_limit(false, -1, NULL); + } WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { if (s->rp_state.from_dst_file) { @@ -1532,7 +1635,7 @@ static void migrate_fd_cancel(MigrationState *s) } /* If the migration is paused, kick it out of the pause */ if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { - qemu_sem_post(&s->pause_sem); + qemu_event_set(&s->pause_event); } migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); } while (s->state != MIGRATION_STATUS_CANCELLING); @@ -1549,15 +1652,16 @@ static void migrate_fd_cancel(MigrationState *s) } } } - if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { - Error *local_err = NULL; - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } else { - s->block_inactive = false; - } + /* + * If qmp_migrate_finish has not been called, then there is no path that + * will complete the cancellation. Do it now. + */ + if (setup && !s->to_dst_file) { + migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, + MIGRATION_STATUS_CANCELLED); + cpr_state_close(); + migrate_hup_delete(s); } } @@ -1644,42 +1748,7 @@ bool migration_incoming_postcopy_advised(void) bool migration_in_bg_snapshot(void) { - return migrate_background_snapshot() && - migration_is_setup_or_active(); -} - -bool migration_is_idle(void) -{ - MigrationState *s = current_migration; - - if (!s) { - return true; - } - - switch (s->state) { - case MIGRATION_STATUS_NONE: - case MIGRATION_STATUS_CANCELLED: - case MIGRATION_STATUS_COMPLETED: - case MIGRATION_STATUS_FAILED: - return true; - default: - return false; - } -} - -bool migration_is_active(void) -{ - MigrationState *s = current_migration; - - return (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); -} - -bool migration_is_device(void) -{ - MigrationState *s = current_migration; - - return s->state == MIGRATION_STATUS_DEVICE; + return migrate_background_snapshot() && migration_is_running(); } bool migration_thread_is_self(void) @@ -1691,7 +1760,9 @@ bool migration_thread_is_self(void) bool migrate_mode_is_cpr(MigrationState *s) { - return s->parameters.mode == MIG_MODE_CPR_REBOOT; + MigMode mode = s->parameters.mode; + return mode == MIG_MODE_CPR_REBOOT || + mode == MIG_MODE_CPR_TRANSFER; } int migrate_init(MigrationState *s, Error **errp) @@ -1720,7 +1791,10 @@ int migrate_init(MigrationState *s, Error **errp) s->migration_thread_running = false; error_free(s->error); s->error = NULL; - s->vmdesc = NULL; + + if (should_send_vmdesc()) { + s->vmdesc = json_writer_new(false); + } migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); @@ -1745,7 +1819,7 @@ static bool is_busy(Error **reasonp, Error **errp) ERRP_GUARD(); /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ - if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { + if (runstate_check(RUN_STATE_SAVE_VM) || migration_is_running()) { error_propagate_prepend(errp, *reasonp, "disallowing migration blocker " "(migration/snapshot in progress) for: "); @@ -1877,6 +1951,17 @@ void qmp_migrate_incoming(const char *uri, bool has_channels, return; } + /* + * Making sure MigrationState is available until incoming migration + * completes. + * + * NOTE: QEMU _might_ leak this refcount in some failure paths, but + * that's OK. This is the minimum change we need to at least making + * sure success case is clean on the refcount. We can try harder to + * make it accurate for any kind of failures, but it might be an + * overkill and doesn't bring us much benefit. + */ + migrate_incoming_ref_outgoing_state(); once = false; } @@ -2066,6 +2151,40 @@ static bool migrate_prepare(MigrationState *s, bool resume, Error **errp) return true; } +static void qmp_migrate_finish(MigrationAddress *addr, bool resume_requested, + Error **errp); + +static void migrate_hup_add(MigrationState *s, QIOChannel *ioc, GSourceFunc cb, + void *opaque) +{ + s->hup_source = qio_channel_create_watch(ioc, G_IO_HUP); + g_source_set_callback(s->hup_source, cb, opaque, NULL); + g_source_attach(s->hup_source, NULL); +} + +static void migrate_hup_delete(MigrationState *s) +{ + if (s->hup_source) { + g_source_destroy(s->hup_source); + g_source_unref(s->hup_source); + s->hup_source = NULL; + } +} + +static gboolean qmp_migrate_finish_cb(QIOChannel *channel, + GIOCondition cond, + void *opaque) +{ + MigrationAddress *addr = opaque; + + qmp_migrate_finish(addr, false, NULL); + + cpr_state_close(); + migrate_hup_delete(migrate_get_current()); + qapi_free_MigrationAddress(addr); + return G_SOURCE_REMOVE; +} + void qmp_migrate(const char *uri, bool has_channels, MigrationChannelList *channels, bool has_detach, bool detach, bool has_resume, bool resume, Error **errp) @@ -2075,6 +2194,8 @@ void qmp_migrate(const char *uri, bool has_channels, MigrationState *s = migrate_get_current(); g_autoptr(MigrationChannel) channel = NULL; MigrationAddress *addr = NULL; + MigrationChannel *channelv[MIGRATION_CHANNEL_TYPE__MAX] = { NULL }; + MigrationChannel *cpr_channel = NULL; /* * Having preliminary checks for uri and channel @@ -2085,12 +2206,22 @@ void qmp_migrate(const char *uri, bool has_channels, } if (channels) { - /* To verify that Migrate channel list has only item */ - if (channels->next) { - error_setg(errp, "Channel list has more than one entries"); + for ( ; channels; channels = channels->next) { + MigrationChannelType type = channels->value->channel_type; + + if (channelv[type]) { + error_setg(errp, "Channel list has more than one %s entry", + MigrationChannelType_str(type)); + return; + } + channelv[type] = channels->value; + } + cpr_channel = channelv[MIGRATION_CHANNEL_TYPE_CPR]; + addr = channelv[MIGRATION_CHANNEL_TYPE_MAIN]->addr; + if (!addr) { + error_setg(errp, "Channel list has no main entry"); return; } - addr = channels->value->addr; } if (uri) { @@ -2102,7 +2233,12 @@ void qmp_migrate(const char *uri, bool has_channels, } /* transport mechanism not suitable for migration? */ - if (!migration_channels_and_transport_compatible(addr, errp)) { + if (!migration_transport_compatible(addr, errp)) { + return; + } + + if (s->parameters.mode == MIG_MODE_CPR_TRANSFER && !cpr_channel) { + error_setg(errp, "missing 'cpr' migration channel"); return; } @@ -2112,6 +2248,41 @@ void qmp_migrate(const char *uri, bool has_channels, return; } + if (cpr_state_save(cpr_channel, &local_err)) { + goto out; + } + + /* + * For cpr-transfer, the target may not be listening yet on the migration + * channel, because first it must finish cpr_load_state. The target tells + * us it is listening by closing the cpr-state socket. Wait for that HUP + * event before connecting in qmp_migrate_finish. + * + * The HUP could occur because the target fails while reading CPR state, + * in which case the target will not listen for the incoming migration + * connection, so qmp_migrate_finish will fail to connect, and then recover. + */ + if (s->parameters.mode == MIG_MODE_CPR_TRANSFER) { + migrate_hup_add(s, cpr_state_ioc(), (GSourceFunc)qmp_migrate_finish_cb, + QAPI_CLONE(MigrationAddress, addr)); + + } else { + qmp_migrate_finish(addr, resume_requested, errp); + } + +out: + if (local_err) { + migration_connect_set_error(s, local_err); + error_propagate(errp, local_err); + } +} + +static void qmp_migrate_finish(MigrationAddress *addr, bool resume_requested, + Error **errp) +{ + MigrationState *s = migrate_get_current(); + Error *local_err = NULL; + if (!resume_requested) { if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { return; @@ -2146,7 +2317,7 @@ void qmp_migrate(const char *uri, bool has_channels, if (!resume_requested) { yank_unregister_instance(MIGRATION_YANK_INSTANCE); } - migrate_fd_error(s, local_err); + migration_connect_set_error(s, local_err); error_propagate(errp, local_err); return; } @@ -2154,7 +2325,18 @@ void qmp_migrate(const char *uri, bool has_channels, void qmp_migrate_cancel(Error **errp) { - migration_cancel(NULL); + /* + * After postcopy migration has started, the source machine is not + * recoverable in case of a migration error. This also means the + * cancel command cannot be used as cancel should allow the + * machine to continue operation. + */ + if (migration_in_postcopy()) { + error_setg(errp, "Postcopy migration in progress, cannot cancel."); + return; + } + + migration_cancel(); } void qmp_migrate_continue(MigrationStatus state, Error **errp) @@ -2165,7 +2347,7 @@ void qmp_migrate_continue(MigrationStatus state, Error **errp) MigrationStatus_str(s->state)); return; } - qemu_sem_post(&s->pause_sem); + qemu_event_set(&s->pause_event); } int migration_rp_wait(MigrationState *s) @@ -2273,7 +2455,7 @@ static bool migrate_handle_rp_resume_ack(MigrationState *s, */ static void migration_release_dst_files(MigrationState *ms) { - QEMUFile *file; + QEMUFile *file = NULL; WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { /* @@ -2318,7 +2500,7 @@ static void *source_return_path_thread(void *opaque) trace_source_return_path_thread_entry(); rcu_register_thread(); - while (migration_is_setup_or_active()) { + while (migration_is_running()) { trace_source_return_path_thread_loop_top(); header_type = qemu_get_be16(rp); @@ -2473,7 +2655,7 @@ static int open_return_path_on_source(MigrationState *ms) trace_open_return_path_on_source(); - qemu_thread_create(&ms->rp_state.rp_thread, "mig/src/rp-thr", + qemu_thread_create(&ms->rp_state.rp_thread, MIGRATION_THREAD_SRC_RETURN, source_return_path_thread, ms, QEMU_THREAD_JOINABLE); ms->rp_state.rp_thread_created = true; @@ -2528,23 +2710,30 @@ static int postcopy_start(MigrationState *ms, Error **errp) int ret; QIOChannelBuffer *bioc; QEMUFile *fb; - uint64_t bandwidth = migrate_max_postcopy_bandwidth(); - bool restart_block = false; - int cur_state = MIGRATION_STATUS_ACTIVE; + + /* + * Now we're 100% sure to switch to postcopy, so JSON writer won't be + * useful anymore. Free the resources early if it is there. Clearing + * the vmdesc also means any follow up vmstate_save()s will start to + * skip all JSON operations, which can shrink postcopy downtime. + */ + migration_cleanup_json_writer(ms); if (migrate_postcopy_preempt()) { migration_wait_main_channel(ms); if (postcopy_preempt_establish_channel(ms)) { - migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + if (ms->state != MIGRATION_STATUS_CANCELLING) { + migrate_set_state(&ms->state, ms->state, + MIGRATION_STATUS_FAILED); + } error_setg(errp, "%s: Failed to establish preempt channel", __func__); return -1; } } - if (!migrate_pause_before_switchover()) { - migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, - MIGRATION_STATUS_POSTCOPY_ACTIVE); + if (!qemu_savevm_state_postcopy_prepare(ms->to_dst_file, errp)) { + return -1; } trace_postcopy_start(); @@ -2557,27 +2746,19 @@ static int postcopy_start(MigrationState *ms, Error **errp) goto fail; } - ret = migration_maybe_pause(ms, &cur_state, - MIGRATION_STATUS_POSTCOPY_ACTIVE); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Failed in migration_maybe_pause()", - __func__); - goto fail; - } - - ret = bdrv_inactivate_all(); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Failed in bdrv_inactivate_all()", - __func__); + if (!migration_switchover_start(ms, errp)) { goto fail; } - restart_block = true; /* * Cause any non-postcopiable, but iterative devices to * send out their final data. */ - qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); + ret = qemu_savevm_state_complete_precopy_iterable(ms->to_dst_file, true); + if (ret) { + error_setg(errp, "Postcopy save non-postcopiable iterables failed"); + goto fail; + } /* * in Finish migrate and with the io-lock held everything should @@ -2589,12 +2770,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) ram_postcopy_send_discard_bitmap(ms); } - /* - * send rest of state - note things that are doing postcopy - * will notice we're in POSTCOPY_ACTIVE and not actually - * wrap their state up here - */ - migration_rate_set(bandwidth); if (migrate_postcopy_ram()) { /* Ping just for debugging, helps line traces up */ qemu_savevm_send_ping(ms->to_dst_file, 2); @@ -2622,7 +2797,12 @@ static int postcopy_start(MigrationState *ms, Error **errp) */ qemu_savevm_send_postcopy_listen(fb); - qemu_savevm_state_complete_precopy(fb, false, false); + ret = qemu_savevm_state_complete_precopy_non_iterable(fb, true); + if (ret) { + error_setg(errp, "Postcopy save non-iterable device states failed"); + goto fail_closefb; + } + if (migrate_postcopy_ram()) { qemu_savevm_send_ping(fb, 3); } @@ -2641,8 +2821,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) goto fail_closefb; } - restart_block = false; - /* Now send that blob */ if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { error_setg(errp, "%s: Failed to send packaged data", __func__); @@ -2658,8 +2836,6 @@ static int postcopy_start(MigrationState *ms, Error **errp) migration_downtime_end(ms); - bql_unlock(); - if (migrate_postcopy_ram()) { /* * Although this ping is just for debug, it could potentially be @@ -2675,11 +2851,22 @@ static int postcopy_start(MigrationState *ms, Error **errp) ret = qemu_file_get_error(ms->to_dst_file); if (ret) { error_setg_errno(errp, -ret, "postcopy_start: Migration stream error"); - bql_lock(); goto fail; } trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); + /* + * Now postcopy officially started, switch to postcopy bandwidth that + * user specified. + */ + migration_rate_set(migrate_max_postcopy_bandwidth()); + + /* Now, switchover looks all fine, switching to postcopy-active */ + migrate_set_state(&ms->state, MIGRATION_STATUS_DEVICE, + MIGRATION_STATUS_POSTCOPY_ACTIVE); + + bql_unlock(); + return ret; fail_closefb: @@ -2687,67 +2874,104 @@ static int postcopy_start(MigrationState *ms, Error **errp) fail: migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, MIGRATION_STATUS_FAILED); - if (restart_block) { - /* A failure happened early enough that we know the destination hasn't - * accessed block devices, so we're safe to recover. - */ - Error *local_err = NULL; - - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } - } + migration_block_activate(NULL); migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); bql_unlock(); return -1; } /** - * migration_maybe_pause: Pause if required to by - * migrate_pause_before_switchover called with the BQL locked - * Returns: 0 on success + * @migration_switchover_prepare: Start VM switchover procedure + * + * @s: The migration state object pointer + * + * Prepares for the switchover, depending on "pause-before-switchover" + * capability. + * + * If cap set, state machine goes like: + * [postcopy-]active -> pre-switchover -> device + * + * If cap not set: + * [postcopy-]active -> device + * + * Returns: true on success, false on interruptions. */ -static int migration_maybe_pause(MigrationState *s, - int *current_active_state, - int new_state) +static bool migration_switchover_prepare(MigrationState *s) { + /* Concurrent cancellation? Quit */ + if (s->state == MIGRATION_STATUS_CANCELLING) { + return false; + } + + /* + * No matter precopy or postcopy, since we still hold BQL it must not + * change concurrently to CANCELLING, so it must be either ACTIVE or + * POSTCOPY_ACTIVE. + */ + assert(migration_is_active()); + + /* If the pre stage not requested, directly switch to DEVICE */ if (!migrate_pause_before_switchover()) { - return 0; + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_DEVICE); + return true; } - /* Since leaving this state is not atomic with posting the semaphore + /* + * Since leaving this state is not atomic with setting the event * it's possible that someone could have issued multiple migrate_continue - * and the semaphore is incorrectly positive at this point; - * the docs say it's undefined to reinit a semaphore that's already - * init'd, so use timedwait to eat up any existing posts. + * and the event is incorrectly set at this point so reset it. */ - while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { - /* This block intentionally left blank */ - } + qemu_event_reset(&s->pause_event); + + /* Update [POSTCOPY_]ACTIVE to PRE_SWITCHOVER */ + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_PRE_SWITCHOVER); + bql_unlock(); + + qemu_event_wait(&s->pause_event); + bql_lock(); /* - * If the migration is cancelled when it is in the completion phase, - * the migration state is set to MIGRATION_STATUS_CANCELLING. - * So we don't need to wait a semaphore, otherwise we would always - * wait for the 'pause_sem' semaphore. + * After BQL released and retaken, the state can be CANCELLING if it + * happend during sem_wait().. Only change the state if it's still + * pre-switchover. */ - if (s->state != MIGRATION_STATUS_CANCELLING) { - bql_unlock(); - migrate_set_state(&s->state, *current_active_state, - MIGRATION_STATUS_PRE_SWITCHOVER); - qemu_sem_wait(&s->pause_sem); - migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, - new_state); - *current_active_state = new_state; - bql_lock(); + migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, + MIGRATION_STATUS_DEVICE); + + return s->state == MIGRATION_STATUS_DEVICE; +} + +static bool migration_switchover_start(MigrationState *s, Error **errp) +{ + ERRP_GUARD(); + + if (!migration_switchover_prepare(s)) { + error_setg(errp, "Switchover is interrupted"); + return false; + } + + /* Inactivate disks except in COLO */ + if (!migrate_colo()) { + /* + * Inactivate before sending QEMU_VM_EOF so that the + * bdrv_activate_all() on the other end won't fail. + */ + if (!migration_block_inactivate()) { + error_setg(errp, "Block inactivate failed during switchover"); + return false; + } } - return s->state == new_state ? 0 : -EINVAL; + migration_rate_set(RATE_LIMIT_DISABLED); + + precopy_notify_complete(); + + qemu_savevm_maybe_send_switchover_start(s->to_dst_file); + + return true; } -static int migration_completion_precopy(MigrationState *s, - int *current_active_state) +static int migration_completion_precopy(MigrationState *s) { int ret; @@ -2760,20 +2984,12 @@ static int migration_completion_precopy(MigrationState *s, } } - ret = migration_maybe_pause(s, current_active_state, - MIGRATION_STATUS_DEVICE); - if (ret < 0) { + if (!migration_switchover_start(s, NULL)) { + ret = -EFAULT; goto out_unlock; } - /* - * Inactivate disks except in COLO, and track that we have done so in order - * to remember to reactivate them if migration fails or is cancelled. - */ - s->block_inactive = !migrate_colo(); - migration_rate_set(RATE_LIMIT_DISABLED); - ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, - s->block_inactive); + ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false); out_unlock: bql_unlock(); return ret; @@ -2798,31 +3014,6 @@ static void migration_completion_postcopy(MigrationState *s) trace_migration_completion_postcopy_end_after_complete(); } -static void migration_completion_failed(MigrationState *s, - int current_active_state) -{ - if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || - s->state == MIGRATION_STATUS_DEVICE)) { - /* - * If not doing postcopy, vm_start() will be called: let's - * regain control on images. - */ - Error *local_err = NULL; - - bql_lock(); - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - } else { - s->block_inactive = false; - } - bql_unlock(); - } - - migrate_set_state(&s->state, current_active_state, - MIGRATION_STATUS_FAILED); -} - /** * migration_completion: Used by migration_thread when there's not much left. * The caller 'breaks' the loop when this returns. @@ -2832,11 +3023,10 @@ static void migration_completion_failed(MigrationState *s, static void migration_completion(MigrationState *s) { int ret = 0; - int current_active_state = s->state; Error *local_err = NULL; if (s->state == MIGRATION_STATUS_ACTIVE) { - ret = migration_completion_precopy(s, ¤t_active_state); + ret = migration_completion_precopy(s); } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { migration_completion_postcopy(s); } else { @@ -2876,7 +3066,9 @@ static void migration_completion(MigrationState *s) error_free(local_err); } - migration_completion_failed(s, current_active_state); + if (s->state != MIGRATION_STATUS_CANCELLING) { + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + } } /** @@ -2899,7 +3091,7 @@ static void bg_migration_completion(MigrationState *s) qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); qemu_fflush(s->to_dst_file); } else if (s->state == MIGRATION_STATUS_CANCELLING) { - goto fail; + return; } if (qemu_file_get_error(s->to_dst_file)) { @@ -3249,33 +3441,60 @@ static MigIterateState migration_iteration_run(MigrationState *s) Error *local_err = NULL; bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; bool can_switchover = migration_can_switchover(s); + bool complete_ready; + /* Fast path - get the estimated amount of pending data */ qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); pending_size = must_precopy + can_postcopy; trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); - if (pending_size < s->threshold_size) { - qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); - pending_size = must_precopy + can_postcopy; - trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); + if (in_postcopy) { + /* + * Iterate in postcopy until all pending data flushed. Note that + * postcopy completion doesn't rely on can_switchover, because when + * POSTCOPY_ACTIVE it means switchover already happened. + */ + complete_ready = !pending_size; + } else { + /* + * Exact pending reporting is only needed for precopy. Taking RAM + * as example, there'll be no extra dirty information after + * postcopy started, so ESTIMATE should always match with EXACT + * during postcopy phase. + */ + if (pending_size < s->threshold_size) { + qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); + pending_size = must_precopy + can_postcopy; + trace_migrate_pending_exact(pending_size, must_precopy, + can_postcopy); + } + + /* Should we switch to postcopy now? */ + if (must_precopy <= s->threshold_size && + can_switchover && qatomic_read(&s->start_postcopy)) { + if (postcopy_start(s, &local_err)) { + migrate_set_error(s, local_err); + error_report_err(local_err); + } + return MIG_ITERATE_SKIP; + } + + /* + * For precopy, migration can complete only if: + * + * (1) Switchover is acknowledged by destination + * (2) Pending size is no more than the threshold specified + * (which was calculated from expected downtime) + */ + complete_ready = can_switchover && (pending_size <= s->threshold_size); } - if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { + if (complete_ready) { trace_migration_thread_low_pending(pending_size); migration_completion(s); return MIG_ITERATE_BREAK; } - /* Still a significant amount to transfer */ - if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && - qatomic_read(&s->start_postcopy)) { - if (postcopy_start(s, &local_err)) { - migrate_set_error(s, local_err); - error_report_err(local_err); - } - return MIG_ITERATE_SKIP; - } - /* Just another iteration step */ qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); return MIG_ITERATE_RESUME; @@ -3283,10 +3502,17 @@ static MigIterateState migration_iteration_run(MigrationState *s) static void migration_iteration_finish(MigrationState *s) { - /* If we enabled cpu throttling for auto-converge, turn it off. */ - cpu_throttle_stop(); - bql_lock(); + + /* + * If we enabled cpu throttling for auto-converge, turn it off. + * Stopping CPU throttle should be serialized by BQL to avoid + * racing for the throttle_dirty_sync_timer. + */ + if (migrate_auto_converge()) { + cpu_throttle_stop(); + } + switch (s->state) { case MIGRATION_STATUS_COMPLETED: runstate_set(RUN_STATE_POSTMIGRATE); @@ -3299,6 +3525,11 @@ static void migration_iteration_finish(MigrationState *s) case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: + /* + * Re-activate the block drives if they're inactivated. Note, COLO + * shouldn't use block_active at all, so it should be no-op there. + */ + migration_block_activate(NULL); if (runstate_is_live(s->vm_old_state)) { if (!runstate_check(RUN_STATE_SHUTDOWN)) { vm_start(); @@ -3316,7 +3547,7 @@ static void migration_iteration_finish(MigrationState *s) break; } - migration_bh_schedule(migrate_fd_cleanup_bh, s); + migration_bh_schedule(migration_cleanup_bh, s); bql_unlock(); } @@ -3344,7 +3575,7 @@ static void bg_migration_iteration_finish(MigrationState *s) break; } - migration_bh_schedule(migrate_fd_cleanup_bh, s); + migration_bh_schedule(migration_cleanup_bh, s); bql_unlock(); } @@ -3462,11 +3693,11 @@ static void *migration_thread(void *opaque) Error *local_err = NULL; int ret; - thread = migration_threads_add("live_migration", qemu_get_thread_id()); + thread = migration_threads_add(MIGRATION_THREAD_SRC_MAIN, + qemu_get_thread_id()); rcu_register_thread(); - object_ref(OBJECT(s)); update_iteration_initial_status(s); if (!multifd_send_setup()) { @@ -3503,6 +3734,11 @@ static void *migration_thread(void *opaque) qemu_savevm_send_colo_enable(s->to_dst_file); } + if (migrate_auto_converge()) { + /* Start RAMBlock dirty bitmap sync timer */ + cpu_throttle_dirty_sync_timer(true); + } + bql_lock(); ret = qemu_savevm_state_setup(s->to_dst_file, &local_err); bql_unlock(); @@ -3599,7 +3835,6 @@ static void *bg_migration_thread(void *opaque) int ret; rcu_register_thread(); - object_ref(OBJECT(s)); migration_rate_set(RATE_LIMIT_DISABLED); @@ -3657,12 +3892,8 @@ static void *bg_migration_thread(void *opaque) if (migration_stop_vm(s, RUN_STATE_PAUSED)) { goto fail; } - /* - * Put vCPUs in sync with shadow context structures, then - * save their state to channel-buffer along with devices. - */ - cpu_synchronize_all_states(); - if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { + + if (qemu_savevm_state_complete_precopy_non_iterable(fb, false)) { goto fail; } /* @@ -3688,9 +3919,8 @@ static void *bg_migration_thread(void *opaque) while (migration_is_active()) { MigIterateState iter_state = bg_migration_iteration_run(s); - if (iter_state == MIG_ITERATE_SKIP) { - continue; - } else if (iter_state == MIG_ITERATE_BREAK) { + + if (iter_state == MIG_ITERATE_BREAK) { break; } @@ -3726,7 +3956,7 @@ static void *bg_migration_thread(void *opaque) return NULL; } -void migrate_fd_connect(MigrationState *s, Error *error_in) +void migration_connect(MigrationState *s, Error *error_in) { Error *local_err = NULL; uint64_t rate_limit; @@ -3736,24 +3966,24 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) /* * If there's a previous error, free it and prepare for another one. * Meanwhile if migration completes successfully, there won't have an error - * dumped when calling migrate_fd_cleanup(). + * dumped when calling migration_cleanup(). */ migrate_error_free(s); s->expected_downtime = migrate_downtime_limit(); if (error_in) { - migrate_fd_error(s, error_in); + migration_connect_set_error(s, error_in); if (resume) { /* * Don't do cleanup for resume if channel is invalid, but only dump * the error. We wait for another channel connect from the user. * The error_report still gives HMP user a hint on what failed. - * It's normally done in migrate_fd_cleanup(), but call it here + * It's normally done in migration_cleanup(), but call it here * explicitly. */ error_report_err(error_copy(s->error)); } else { - migrate_fd_cleanup(s); + migration_cleanup(s); } return; } @@ -3811,11 +4041,19 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } } + /* + * Take a refcount to make sure the migration object won't get freed by + * the main thread already in migration_shutdown(). + * + * The refcount will be released at the end of the thread function. + */ + object_ref(OBJECT(s)); + if (migrate_background_snapshot()) { - qemu_thread_create(&s->thread, "mig/snapshot", + qemu_thread_create(&s->thread, MIGRATION_THREAD_SNAPSHOT, bg_migration_thread, s, QEMU_THREAD_JOINABLE); } else { - qemu_thread_create(&s->thread, "mig/src/main", + qemu_thread_create(&s->thread, MIGRATION_THREAD_SRC_MAIN, migration_thread, s, QEMU_THREAD_JOINABLE); } s->migration_thread_running = true; @@ -3823,17 +4061,20 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) fail: migrate_set_error(s, local_err); - migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + if (s->state != MIGRATION_STATUS_CANCELLING) { + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + } error_report_err(local_err); - migrate_fd_cleanup(s); + migration_cleanup(s); } -static void migration_class_init(ObjectClass *klass, void *data) +static void migration_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->user_creatable = false; - device_class_set_props(dc, migration_properties); + device_class_set_props_n(dc, migration_properties, + migration_properties_count); } static void migration_instance_finalize(Object *obj) @@ -3844,7 +4085,7 @@ static void migration_instance_finalize(Object *obj) qemu_mutex_destroy(&ms->qemu_file_lock); qemu_sem_destroy(&ms->wait_unplug_sem); qemu_sem_destroy(&ms->rate_limit_sem); - qemu_sem_destroy(&ms->pause_sem); + qemu_event_destroy(&ms->pause_event); qemu_sem_destroy(&ms->postcopy_pause_sem); qemu_sem_destroy(&ms->rp_state.rp_sem); qemu_sem_destroy(&ms->rp_state.rp_pong_acks); @@ -3859,7 +4100,7 @@ static void migration_instance_init(Object *obj) ms->state = MIGRATION_STATUS_NONE; ms->mbps = -1; ms->pages_per_second = -1; - qemu_sem_init(&ms->pause_sem, 0); + qemu_event_init(&ms->pause_event, false); qemu_mutex_init(&ms->error_mutex); migrate_params_init(&ms->parameters); diff --git a/migration/migration.h b/migration/migration.h index 38aa1402d51..01329bf8248 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -17,7 +17,7 @@ #include "exec/cpu-common.h" #include "hw/qdev-core.h" #include "qapi/qapi-types-migration.h" -#include "qapi/qmp/json-writer.h" +#include "qobject/json-writer.h" #include "qemu/thread.h" #include "qemu/coroutine.h" #include "io/channel.h" @@ -25,10 +25,25 @@ #include "net/announce.h" #include "qom/object.h" #include "postcopy-ram.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "migration/misc.h" +#define MIGRATION_THREAD_SNAPSHOT "mig/snapshot" +#define MIGRATION_THREAD_DIRTY_RATE "mig/dirtyrate" + +#define MIGRATION_THREAD_SRC_MAIN "mig/src/main" +#define MIGRATION_THREAD_SRC_MULTIFD "mig/src/send_%d" +#define MIGRATION_THREAD_SRC_RETURN "mig/src/return" +#define MIGRATION_THREAD_SRC_TLS "mig/src/tls" + +#define MIGRATION_THREAD_DST_COLO "mig/dst/colo" +#define MIGRATION_THREAD_DST_MULTIFD "mig/dst/recv_%d" +#define MIGRATION_THREAD_DST_FAULT "mig/dst/fault" +#define MIGRATION_THREAD_DST_LISTEN "mig/dst/listen" +#define MIGRATION_THREAD_DST_PREEMPT "mig/dst/preempt" + struct PostcopyBlocktimeContext; +typedef struct ThreadPool ThreadPool; #define MIGRATION_RESUME_ACK_VALUE (1) @@ -83,9 +98,9 @@ struct MigrationIncomingState { void (*transport_cleanup)(void *data); /* * Used to sync thread creations. Note that we can't create threads in - * parallel with this sem. + * parallel with this event. */ - QemuSemaphore thread_sync_sem; + QemuEvent thread_sync_event; /* * Free at the start of the main state load, set as the main thread finishes * loading state. @@ -171,7 +186,11 @@ struct MigrationIncomingState { /* The coroutine we should enter (back) after failover */ Coroutine *colo_incoming_co; - QemuSemaphore colo_incoming_sem; + QemuEvent colo_incoming_event; + + /* Optional load threads pool and its thread exit request flag */ + ThreadPool *load_threads; + bool load_threads_abort; /* * PostcopyBlocktimeContext to keep information for postcopy @@ -356,17 +375,14 @@ struct MigrationState { /* Flag set once the migration thread is running (and needs joining) */ bool migration_thread_running; - /* Flag set once the migration thread called bdrv_inactivate_all */ - bool block_inactive; - /* Migration is waiting for guest to unplug device */ QemuSemaphore wait_unplug_sem; /* Migration is paused due to pause-before-switchover */ - QemuSemaphore pause_sem; + QemuEvent pause_event; - /* The semaphore is used to notify COLO thread that failover is finished */ - QemuSemaphore colo_exit_sem; + /* The event is used to notify COLO thread that failover is finished */ + QemuEvent colo_exit_event; /* The event is used to notify COLO thread to do checkpoint */ QemuEvent colo_checkpoint_event; @@ -389,6 +405,8 @@ struct MigrationState { bool send_configuration; /* Whether we send section footer during migration */ bool send_section_footer; + /* Whether we send switchover start notification during migration */ + bool send_switchover_start; /* Needed by postcopy-pause state */ QemuSemaphore postcopy_pause_sem; @@ -432,6 +450,39 @@ struct MigrationState { * Default value is false. (since 8.1) */ bool multifd_flush_after_each_section; + + /* + * This variable only makes sense when set on the machine that is + * the destination of a multifd migration with TLS enabled. It + * affects the behavior of the last send->recv iteration with + * regards to termination of the TLS session. + * + * When set: + * + * - the destination QEMU instance can expect to never get a + * GNUTLS_E_PREMATURE_TERMINATION error. Manifested as the error + * message: "The TLS connection was non-properly terminated". + * + * When clear: + * + * - the destination QEMU instance can expect to see a + * GNUTLS_E_PREMATURE_TERMINATION error in any multifd channel + * whenever the last recv() call of that channel happens after + * the source QEMU instance has already issued shutdown() on the + * channel. + * + * Commit 637280aeb2 (since 9.1) introduced a side effect that + * causes the destination instance to not be affected by the + * premature termination, while commit 1d457daf86 (since 10.0) + * causes the premature termination condition to be once again + * reachable. + * + * NOTE: Regardless of the state of this option, a premature + * termination of the TLS connection might happen due to error at + * any moment prior to the last send->recv iteration. + */ + bool multifd_clean_tls_termination; + /* * This decides the size of guest memory chunk that will be used * to track dirty bitmap clearing. The size of memory chunk will @@ -457,6 +508,8 @@ struct MigrationState { bool switchover_acked; /* Is this a rdma migration */ bool rdma_migration; + + GSource *hup_source; }; void migrate_set_state(MigrationStatus *state, MigrationStatus old_state, @@ -471,7 +524,7 @@ bool migration_has_all_channels(void); void migrate_set_error(MigrationState *s, const Error *error); bool migrate_has_error(MigrationState *s); -void migrate_fd_connect(MigrationState *s, Error *error_in); +void migration_connect(MigrationState *s, Error *error_in); int migration_call_notifiers(MigrationState *s, MigrationEventType type, Error **errp); @@ -493,7 +546,7 @@ void migrate_send_rp_shut(MigrationIncomingState *mis, void migrate_send_rp_pong(MigrationIncomingState *mis, uint32_t value); int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb, - ram_addr_t start, uint64_t haddr); + ram_addr_t start, uint64_t haddr, uint32_t tid); int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, RAMBlock *rb, ram_addr_t start); void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, @@ -508,8 +561,6 @@ bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm, Error **errp); void migrate_add_address(SocketAddress *address); -bool migrate_uri_parse(const char *uri, MigrationChannel **channel, - Error **errp); int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); #define qemu_ram_foreach_block \ @@ -519,7 +570,7 @@ void migration_make_urgent_request(void); void migration_consume_urgent_request(void); bool migration_rate_limit(void); void migration_bh_schedule(QEMUBHFunc *cb, void *opaque); -void migration_cancel(const Error *error); +void migration_cancel(void); void migration_populate_vfio_info(MigrationInfo *info); void migration_reset_vfio_bytes_transferred(void); @@ -537,4 +588,10 @@ int migration_rp_wait(MigrationState *s); */ void migration_rp_kick(MigrationState *s); +void migration_bitmap_sync_precopy(bool last_stage); + +/* migration/block-dirty-bitmap.c */ +void dirty_bitmap_mig_init(void); +bool should_send_vmdesc(void); + #endif diff --git a/migration/multifd-device-state.c b/migration/multifd-device-state.c new file mode 100644 index 00000000000..fce64f00b08 --- /dev/null +++ b/migration/multifd-device-state.c @@ -0,0 +1,212 @@ +/* + * Multifd device state migration + * + * Copyright (C) 2024,2025 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/lockable.h" +#include "block/thread-pool.h" +#include "migration.h" +#include "migration/misc.h" +#include "multifd.h" +#include "options.h" + +static struct { + QemuMutex queue_job_mutex; + + MultiFDSendData *send_data; + + ThreadPool *threads; + bool threads_abort; +} *multifd_send_device_state; + +void multifd_device_state_send_setup(void) +{ + assert(!multifd_send_device_state); + multifd_send_device_state = g_malloc(sizeof(*multifd_send_device_state)); + + qemu_mutex_init(&multifd_send_device_state->queue_job_mutex); + + multifd_send_device_state->send_data = multifd_send_data_alloc(); + + multifd_send_device_state->threads = thread_pool_new(); + multifd_send_device_state->threads_abort = false; +} + +void multifd_device_state_send_cleanup(void) +{ + g_clear_pointer(&multifd_send_device_state->threads, thread_pool_free); + g_clear_pointer(&multifd_send_device_state->send_data, + multifd_send_data_free); + + qemu_mutex_destroy(&multifd_send_device_state->queue_job_mutex); + + g_clear_pointer(&multifd_send_device_state, g_free); +} + +void multifd_send_data_clear_device_state(MultiFDDeviceState_t *device_state) +{ + g_clear_pointer(&device_state->idstr, g_free); + g_clear_pointer(&device_state->buf, g_free); +} + +static void multifd_device_state_fill_packet(MultiFDSendParams *p) +{ + MultiFDDeviceState_t *device_state = &p->data->u.device_state; + MultiFDPacketDeviceState_t *packet = p->packet_device_state; + + packet->hdr.flags = cpu_to_be32(p->flags); + strncpy(packet->idstr, device_state->idstr, sizeof(packet->idstr) - 1); + packet->idstr[sizeof(packet->idstr) - 1] = 0; + packet->instance_id = cpu_to_be32(device_state->instance_id); + packet->next_packet_size = cpu_to_be32(p->next_packet_size); +} + +static void multifd_prepare_header_device_state(MultiFDSendParams *p) +{ + p->iov[0].iov_len = sizeof(*p->packet_device_state); + p->iov[0].iov_base = p->packet_device_state; + p->iovs_num++; +} + +void multifd_device_state_send_prepare(MultiFDSendParams *p) +{ + MultiFDDeviceState_t *device_state = &p->data->u.device_state; + + assert(multifd_payload_device_state(p->data)); + + multifd_prepare_header_device_state(p); + + assert(!(p->flags & MULTIFD_FLAG_SYNC)); + + p->next_packet_size = device_state->buf_len; + if (p->next_packet_size > 0) { + p->iov[p->iovs_num].iov_base = device_state->buf; + p->iov[p->iovs_num].iov_len = p->next_packet_size; + p->iovs_num++; + } + + p->flags |= MULTIFD_FLAG_NOCOMP | MULTIFD_FLAG_DEVICE_STATE; + + multifd_device_state_fill_packet(p); +} + +bool multifd_queue_device_state(char *idstr, uint32_t instance_id, + char *data, size_t len) +{ + /* Device state submissions can come from multiple threads */ + QEMU_LOCK_GUARD(&multifd_send_device_state->queue_job_mutex); + MultiFDDeviceState_t *device_state; + + assert(multifd_payload_empty(multifd_send_device_state->send_data)); + + multifd_set_payload_type(multifd_send_device_state->send_data, + MULTIFD_PAYLOAD_DEVICE_STATE); + device_state = &multifd_send_device_state->send_data->u.device_state; + device_state->idstr = g_strdup(idstr); + device_state->instance_id = instance_id; + device_state->buf = g_memdup2(data, len); + device_state->buf_len = len; + + if (!multifd_send(&multifd_send_device_state->send_data)) { + multifd_send_data_clear(multifd_send_device_state->send_data); + return false; + } + + return true; +} + +bool multifd_device_state_supported(void) +{ + return migrate_multifd() && !migrate_mapped_ram() && + migrate_multifd_compression() == MULTIFD_COMPRESSION_NONE; +} + +static void multifd_device_state_save_thread_data_free(void *opaque) +{ + SaveCompletePrecopyThreadData *data = opaque; + + g_clear_pointer(&data->idstr, g_free); + g_free(data); +} + +static int multifd_device_state_save_thread(void *opaque) +{ + SaveCompletePrecopyThreadData *data = opaque; + g_autoptr(Error) local_err = NULL; + + if (!data->hdlr(data, &local_err)) { + MigrationState *s = migrate_get_current(); + + /* + * Can't call abort_device_state_save_threads() here since new + * save threads could still be in process of being launched + * (if, for example, the very first save thread launched exited + * with an error very quickly). + */ + + assert(local_err); + + /* + * In case of multiple save threads failing which thread error + * return we end setting is purely arbitrary. + */ + migrate_set_error(s, local_err); + } + + return 0; +} + +bool multifd_device_state_save_thread_should_exit(void) +{ + return qatomic_read(&multifd_send_device_state->threads_abort); +} + +void +multifd_spawn_device_state_save_thread(SaveCompletePrecopyThreadHandler hdlr, + char *idstr, uint32_t instance_id, + void *opaque) +{ + SaveCompletePrecopyThreadData *data; + + assert(multifd_device_state_supported()); + assert(multifd_send_device_state); + + assert(!qatomic_read(&multifd_send_device_state->threads_abort)); + + data = g_new(SaveCompletePrecopyThreadData, 1); + data->hdlr = hdlr; + data->idstr = g_strdup(idstr); + data->instance_id = instance_id; + data->handler_opaque = opaque; + + thread_pool_submit_immediate(multifd_send_device_state->threads, + multifd_device_state_save_thread, + data, + multifd_device_state_save_thread_data_free); +} + +void multifd_abort_device_state_save_threads(void) +{ + assert(multifd_device_state_supported()); + + qatomic_set(&multifd_send_device_state->threads_abort, true); +} + +bool multifd_join_device_state_save_threads(void) +{ + MigrationState *s = migrate_get_current(); + + assert(multifd_device_state_supported()); + + thread_pool_wait(multifd_send_device_state->threads); + + return !migrate_has_error(s); +} diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c new file mode 100644 index 00000000000..b48eae3d861 --- /dev/null +++ b/migration/multifd-nocomp.c @@ -0,0 +1,468 @@ +/* + * Multifd RAM migration without compression + * + * Copyright (c) 2019-2020 Red Hat Inc + * + * Authors: + * Juan Quintela + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "system/ramblock.h" +#include "exec/target_page.h" +#include "file.h" +#include "migration-stats.h" +#include "multifd.h" +#include "options.h" +#include "migration.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "qemu-file.h" + +static MultiFDSendData *multifd_ram_send; + +void multifd_ram_payload_alloc(MultiFDPages_t *pages) +{ + pages->offset = g_new0(ram_addr_t, multifd_ram_page_count()); +} + +void multifd_ram_payload_free(MultiFDPages_t *pages) +{ + g_clear_pointer(&pages->offset, g_free); +} + +void multifd_ram_save_setup(void) +{ + multifd_ram_send = multifd_send_data_alloc(); +} + +void multifd_ram_save_cleanup(void) +{ + g_clear_pointer(&multifd_ram_send, multifd_send_data_free); +} + +static void multifd_set_file_bitmap(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = &p->data->u.ram; + + assert(pages->block); + + for (int i = 0; i < pages->normal_num; i++) { + ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); + } + + for (int i = pages->normal_num; i < pages->num; i++) { + ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); + } +} + +static int multifd_nocomp_send_setup(MultiFDSendParams *p, Error **errp) +{ + uint32_t page_count = multifd_ram_page_count(); + + if (migrate_zero_copy_send()) { + p->write_flags |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; + } + + if (!migrate_mapped_ram()) { + /* We need one extra place for the packet header */ + p->iov = g_new0(struct iovec, page_count + 1); + } else { + p->iov = g_new0(struct iovec, page_count); + } + + return 0; +} + +static void multifd_nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) +{ + g_free(p->iov); + p->iov = NULL; +} + +static void multifd_ram_prepare_header(MultiFDSendParams *p) +{ + p->iov[0].iov_len = p->packet_len; + p->iov[0].iov_base = p->packet; + p->iovs_num++; +} + +static void multifd_send_prepare_iovs(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = &p->data->u.ram; + uint32_t page_size = multifd_ram_page_size(); + + for (int i = 0; i < pages->normal_num; i++) { + p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i]; + p->iov[p->iovs_num].iov_len = page_size; + p->iovs_num++; + } + + p->next_packet_size = pages->normal_num * page_size; +} + +static int multifd_nocomp_send_prepare(MultiFDSendParams *p, Error **errp) +{ + bool use_zero_copy_send = migrate_zero_copy_send(); + int ret; + + multifd_send_zero_page_detect(p); + + if (migrate_mapped_ram()) { + multifd_send_prepare_iovs(p); + multifd_set_file_bitmap(p); + + return 0; + } + + if (!use_zero_copy_send) { + /* + * Only !zerocopy needs the header in IOV; zerocopy will + * send it separately. + */ + multifd_ram_prepare_header(p); + } + + multifd_send_prepare_iovs(p); + p->flags |= MULTIFD_FLAG_NOCOMP; + + multifd_send_fill_packet(p); + + if (use_zero_copy_send) { + /* Send header first, without zerocopy */ + ret = qio_channel_write_all(p->c, (void *)p->packet, + p->packet_len, errp); + if (ret != 0) { + return -1; + } + + stat64_add(&mig_stats.multifd_bytes, p->packet_len); + } + + return 0; +} + +static int multifd_nocomp_recv_setup(MultiFDRecvParams *p, Error **errp) +{ + p->iov = g_new0(struct iovec, multifd_ram_page_count()); + return 0; +} + +static void multifd_nocomp_recv_cleanup(MultiFDRecvParams *p) +{ + g_free(p->iov); + p->iov = NULL; +} + +static int multifd_nocomp_recv(MultiFDRecvParams *p, Error **errp) +{ + uint32_t flags; + + if (migrate_mapped_ram()) { + return multifd_file_recv_data(p, errp); + } + + flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; + + if (flags != MULTIFD_FLAG_NOCOMP) { + error_setg(errp, "multifd %u: flags received %x flags expected %x", + p->id, flags, MULTIFD_FLAG_NOCOMP); + return -1; + } + + multifd_recv_zero_page_process(p); + + if (!p->normal_num) { + return 0; + } + + for (int i = 0; i < p->normal_num; i++) { + p->iov[i].iov_base = p->host + p->normal[i]; + p->iov[i].iov_len = multifd_ram_page_size(); + ramblock_recv_bitmap_set_offset(p->block, p->normal[i]); + } + return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp); +} + +static void multifd_pages_reset(MultiFDPages_t *pages) +{ + /* + * We don't need to touch offset[] array, because it will be + * overwritten later when reused. + */ + pages->num = 0; + pages->normal_num = 0; + pages->block = NULL; +} + +void multifd_ram_fill_packet(MultiFDSendParams *p) +{ + MultiFDPacket_t *packet = p->packet; + MultiFDPages_t *pages = &p->data->u.ram; + uint32_t zero_num = pages->num - pages->normal_num; + + packet->pages_alloc = cpu_to_be32(multifd_ram_page_count()); + packet->normal_pages = cpu_to_be32(pages->normal_num); + packet->zero_pages = cpu_to_be32(zero_num); + + if (pages->block) { + pstrcpy(packet->ramblock, sizeof(packet->ramblock), + pages->block->idstr); + } + + for (int i = 0; i < pages->num; i++) { + /* there are architectures where ram_addr_t is 32 bit */ + uint64_t temp = pages->offset[i]; + + packet->offset[i] = cpu_to_be64(temp); + } + + trace_multifd_send_ram_fill(p->id, pages->normal_num, + zero_num); +} + +int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp) +{ + MultiFDPacket_t *packet = p->packet; + uint32_t page_count = multifd_ram_page_count(); + uint32_t page_size = multifd_ram_page_size(); + uint32_t pages_per_packet = be32_to_cpu(packet->pages_alloc); + int i; + + if (pages_per_packet > page_count) { + error_setg(errp, "multifd: received packet with %u pages, expected %u", + pages_per_packet, page_count); + return -1; + } + + p->normal_num = be32_to_cpu(packet->normal_pages); + if (p->normal_num > pages_per_packet) { + error_setg(errp, "multifd: received packet with %u non-zero pages, " + "which exceeds maximum expected pages %u", + p->normal_num, pages_per_packet); + return -1; + } + + p->zero_num = be32_to_cpu(packet->zero_pages); + if (p->zero_num > pages_per_packet - p->normal_num) { + error_setg(errp, + "multifd: received packet with %u zero pages, expected maximum %u", + p->zero_num, pages_per_packet - p->normal_num); + return -1; + } + + if (p->normal_num == 0 && p->zero_num == 0) { + return 0; + } + + /* make sure that ramblock is 0 terminated */ + packet->ramblock[255] = 0; + p->block = qemu_ram_block_by_name(packet->ramblock); + if (!p->block) { + error_setg(errp, "multifd: unknown ram block %s", + packet->ramblock); + return -1; + } + + p->host = p->block->host; + for (i = 0; i < p->normal_num; i++) { + uint64_t offset = be64_to_cpu(packet->offset[i]); + + if (offset > (p->block->used_length - page_size)) { + error_setg(errp, "multifd: offset too long %" PRIu64 + " (max " RAM_ADDR_FMT ")", + offset, p->block->used_length); + return -1; + } + p->normal[i] = offset; + } + + for (i = 0; i < p->zero_num; i++) { + uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]); + + if (offset > (p->block->used_length - page_size)) { + error_setg(errp, "multifd: offset too long %" PRIu64 + " (max " RAM_ADDR_FMT ")", + offset, p->block->used_length); + return -1; + } + p->zero[i] = offset; + } + + return 0; +} + +static inline bool multifd_queue_empty(MultiFDPages_t *pages) +{ + return pages->num == 0; +} + +static inline bool multifd_queue_full(MultiFDPages_t *pages) +{ + return pages->num == multifd_ram_page_count(); +} + +static inline void multifd_enqueue(MultiFDPages_t *pages, ram_addr_t offset) +{ + pages->offset[pages->num++] = offset; +} + +/* Returns true if enqueue successful, false otherwise */ +bool multifd_queue_page(RAMBlock *block, ram_addr_t offset) +{ + MultiFDPages_t *pages; + +retry: + pages = &multifd_ram_send->u.ram; + + if (multifd_payload_empty(multifd_ram_send)) { + multifd_pages_reset(pages); + multifd_set_payload_type(multifd_ram_send, MULTIFD_PAYLOAD_RAM); + } + + /* If the queue is empty, we can already enqueue now */ + if (multifd_queue_empty(pages)) { + pages->block = block; + multifd_enqueue(pages, offset); + return true; + } + + /* + * Not empty, meanwhile we need a flush. It can because of either: + * + * (1) The page is not on the same ramblock of previous ones, or, + * (2) The queue is full. + * + * After flush, always retry. + */ + if (pages->block != block || multifd_queue_full(pages)) { + if (!multifd_send(&multifd_ram_send)) { + return false; + } + goto retry; + } + + /* Not empty, and we still have space, do it! */ + multifd_enqueue(pages, offset); + return true; +} + +/* + * We have two modes for multifd flushes: + * + * - Per-section mode: this is the legacy way to flush, it requires one + * MULTIFD_FLAG_SYNC message for each RAM_SAVE_FLAG_EOS. + * + * - Per-round mode: this is the modern way to flush, it requires one + * MULTIFD_FLAG_SYNC message only for each round of RAM scan. Normally + * it's paired with a new RAM_SAVE_FLAG_MULTIFD_FLUSH message in network + * based migrations. + * + * One thing to mention is mapped-ram always use the modern way to sync. + */ + +/* Do we need a per-section multifd flush (legacy way)? */ +bool multifd_ram_sync_per_section(void) +{ + if (!migrate_multifd()) { + return false; + } + + if (migrate_mapped_ram()) { + return false; + } + + return migrate_multifd_flush_after_each_section(); +} + +/* Do we need a per-round multifd flush (modern way)? */ +bool multifd_ram_sync_per_round(void) +{ + if (!migrate_multifd()) { + return false; + } + + if (migrate_mapped_ram()) { + return true; + } + + return !migrate_multifd_flush_after_each_section(); +} + +int multifd_ram_flush_and_sync(QEMUFile *f) +{ + MultiFDSyncReq req; + int ret; + + if (!migrate_multifd() || migration_in_postcopy()) { + return 0; + } + + if (!multifd_payload_empty(multifd_ram_send)) { + if (!multifd_send(&multifd_ram_send)) { + error_report("%s: multifd_send fail", __func__); + return -1; + } + } + + /* File migrations only need to sync with threads */ + req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL; + + ret = multifd_send_sync_main(req); + if (ret) { + return ret; + } + + /* If we don't need to sync with remote at all, nothing else to do */ + if (req == MULTIFD_SYNC_LOCAL) { + return 0; + } + + /* + * Old QEMUs don't understand RAM_SAVE_FLAG_MULTIFD_FLUSH, it relies + * on RAM_SAVE_FLAG_EOS instead. + */ + if (migrate_multifd_flush_after_each_section()) { + return 0; + } + + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + qemu_fflush(f); + + return 0; +} + +bool multifd_send_prepare_common(MultiFDSendParams *p) +{ + MultiFDPages_t *pages = &p->data->u.ram; + multifd_ram_prepare_header(p); + multifd_send_zero_page_detect(p); + + if (!pages->normal_num) { + p->next_packet_size = 0; + return false; + } + + return true; +} + +static const MultiFDMethods multifd_nocomp_ops = { + .send_setup = multifd_nocomp_send_setup, + .send_cleanup = multifd_nocomp_send_cleanup, + .send_prepare = multifd_nocomp_send_prepare, + .recv_setup = multifd_nocomp_recv_setup, + .recv_cleanup = multifd_nocomp_recv_cleanup, + .recv = multifd_nocomp_recv +}; + +static void multifd_nocomp_register(void) +{ + multifd_register_ops(MULTIFD_COMPRESSION_NONE, &multifd_nocomp_ops); +} + +migration_init(multifd_nocomp_register); diff --git a/migration/multifd-qatzip.c b/migration/multifd-qatzip.c new file mode 100644 index 00000000000..7419e5dc0d0 --- /dev/null +++ b/migration/multifd-qatzip.c @@ -0,0 +1,395 @@ +/* + * Multifd QATzip compression implementation + * + * Copyright (c) Bytedance + * + * Authors: + * Bryan Zhang + * Hao Xiang + * Yichen Wang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "system/ramblock.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qapi/qapi-types-migration.h" +#include "options.h" +#include "multifd.h" +#include + +typedef struct { + /* + * Unique session for use with QATzip API + */ + QzSession_T sess; + + /* + * For compression: Buffer for pages to compress + * For decompression: Buffer for data to decompress + */ + uint8_t *in_buf; + uint32_t in_len; + + /* + * For compression: Output buffer of compressed data + * For decompression: Output buffer of decompressed data + */ + uint8_t *out_buf; + uint32_t out_len; +} QatzipData; + +/** + * qatzip_send_setup: Set up QATzip session and private buffers. + * + * @param p Multifd channel params + * @param errp Pointer to error, which will be set in case of error + * @return 0 on success, -1 on error (and *errp will be set) + */ +static int qatzip_send_setup(MultiFDSendParams *p, Error **errp) +{ + QatzipData *q; + QzSessionParamsDeflate_T params; + const char *err_msg; + int ret; + + q = g_new0(QatzipData, 1); + p->compress_data = q; + /* We need one extra place for the packet header */ + p->iov = g_new0(struct iovec, 2); + + /* + * Initialize QAT device with software fallback by default. This allows + * QATzip to use CPU path when QAT hardware reaches maximum throughput. + */ + ret = qzInit(&q->sess, true); + if (ret != QZ_OK && ret != QZ_DUPLICATE) { + err_msg = "qzInit failed"; + goto err; + } + + ret = qzGetDefaultsDeflate(¶ms); + if (ret != QZ_OK) { + err_msg = "qzGetDefaultsDeflate failed"; + goto err; + } + + /* Make sure to use configured QATzip compression level. */ + params.common_params.comp_lvl = migrate_multifd_qatzip_level(); + ret = qzSetupSessionDeflate(&q->sess, ¶ms); + if (ret != QZ_OK && ret != QZ_DUPLICATE) { + err_msg = "qzSetupSessionDeflate failed"; + goto err; + } + + if (MULTIFD_PACKET_SIZE > UINT32_MAX) { + err_msg = "packet size too large for QAT"; + goto err; + } + + q->in_len = MULTIFD_PACKET_SIZE; + /* + * PINNED_MEM is an enum from qatzip headers, which means to use + * kzalloc_node() to allocate memory for QAT DMA purposes. When QAT device + * is not available or software fallback is used, the malloc flag needs to + * be set as COMMON_MEM. + */ + q->in_buf = qzMalloc(q->in_len, 0, PINNED_MEM); + if (!q->in_buf) { + q->in_buf = qzMalloc(q->in_len, 0, COMMON_MEM); + if (!q->in_buf) { + err_msg = "qzMalloc failed"; + goto err; + } + } + + q->out_len = qzMaxCompressedLength(MULTIFD_PACKET_SIZE, &q->sess); + q->out_buf = qzMalloc(q->out_len, 0, PINNED_MEM); + if (!q->out_buf) { + q->out_buf = qzMalloc(q->out_len, 0, COMMON_MEM); + if (!q->out_buf) { + err_msg = "qzMalloc failed"; + goto err; + } + } + + return 0; + +err: + error_setg(errp, "multifd %u: [sender] %s", p->id, err_msg); + return -1; +} + +/** + * qatzip_send_cleanup: Tear down QATzip session and release private buffers. + * + * @param p Multifd channel params + * @param errp Pointer to error, which will be set in case of error + * @return None + */ +static void qatzip_send_cleanup(MultiFDSendParams *p, Error **errp) +{ + QatzipData *q = p->compress_data; + + if (q) { + if (q->in_buf) { + qzFree(q->in_buf); + } + if (q->out_buf) { + qzFree(q->out_buf); + } + (void)qzTeardownSession(&q->sess); + (void)qzClose(&q->sess); + g_free(q); + } + + g_free(p->iov); + p->iov = NULL; + p->compress_data = NULL; +} + +/** + * qatzip_send_prepare: Compress pages and update IO channel info. + * + * @param p Multifd channel params + * @param errp Pointer to error, which will be set in case of error + * @return 0 on success, -1 on error (and *errp will be set) + */ +static int qatzip_send_prepare(MultiFDSendParams *p, Error **errp) +{ + uint32_t page_size = multifd_ram_page_size(); + MultiFDPages_t *pages = &p->data->u.ram; + QatzipData *q = p->compress_data; + int ret; + unsigned int in_len, out_len; + + if (!multifd_send_prepare_common(p)) { + goto out; + } + + /* + * Unlike other multifd compression implementations, we use a non-streaming + * API and place all the data into one buffer, rather than sending each + * page to the compression API at a time. Based on initial benchmarks, the + * non-streaming API outperforms the streaming API. Plus, the logic in QEMU + * is friendly to using the non-streaming API anyway. If either of these + * statements becomes no longer true, we can revisit adding a streaming + * implementation. + */ + for (int i = 0; i < pages->normal_num; i++) { + memcpy(q->in_buf + (i * page_size), + pages->block->host + pages->offset[i], + page_size); + } + + in_len = pages->normal_num * page_size; + if (in_len > q->in_len) { + error_setg(errp, "multifd %u: unexpectedly large input", p->id); + return -1; + } + out_len = q->out_len; + + ret = qzCompress(&q->sess, q->in_buf, &in_len, q->out_buf, &out_len, 1); + if (ret != QZ_OK) { + error_setg(errp, "multifd %u: QATzip returned %d instead of QZ_OK", + p->id, ret); + return -1; + } + if (in_len != pages->normal_num * page_size) { + error_setg(errp, "multifd %u: QATzip failed to compress all input", + p->id); + return -1; + } + + p->iov[p->iovs_num].iov_base = q->out_buf; + p->iov[p->iovs_num].iov_len = out_len; + p->iovs_num++; + p->next_packet_size = out_len; + +out: + p->flags |= MULTIFD_FLAG_QATZIP; + multifd_send_fill_packet(p); + return 0; +} + +/** + * qatzip_recv_setup: Set up QATzip session and allocate private buffers. + * + * @param p Multifd channel params + * @param errp Pointer to error, which will be set in case of error + * @return 0 on success, -1 on error (and *errp will be set) + */ +static int qatzip_recv_setup(MultiFDRecvParams *p, Error **errp) +{ + QatzipData *q; + QzSessionParamsDeflate_T params; + const char *err_msg; + int ret; + + q = g_new0(QatzipData, 1); + p->compress_data = q; + + /* + * Initialize QAT device with software fallback by default. This allows + * QATzip to use CPU path when QAT hardware reaches maximum throughput. + */ + ret = qzInit(&q->sess, true); + if (ret != QZ_OK && ret != QZ_DUPLICATE) { + err_msg = "qzInit failed"; + goto err; + } + + ret = qzGetDefaultsDeflate(¶ms); + if (ret != QZ_OK) { + err_msg = "qzGetDefaultsDeflate failed"; + goto err; + } + + ret = qzSetupSessionDeflate(&q->sess, ¶ms); + if (ret != QZ_OK && ret != QZ_DUPLICATE) { + err_msg = "qzSetupSessionDeflate failed"; + goto err; + } + + /* + * Reserve extra spaces for the incoming packets. Current implementation + * doesn't send uncompressed pages in case the compression gets too big. + */ + q->in_len = MULTIFD_PACKET_SIZE * 2; + /* + * PINNED_MEM is an enum from qatzip headers, which means to use + * kzalloc_node() to allocate memory for QAT DMA purposes. When QAT device + * is not available or software fallback is used, the malloc flag needs to + * be set as COMMON_MEM. + */ + q->in_buf = qzMalloc(q->in_len, 0, PINNED_MEM); + if (!q->in_buf) { + q->in_buf = qzMalloc(q->in_len, 0, COMMON_MEM); + if (!q->in_buf) { + err_msg = "qzMalloc failed"; + goto err; + } + } + + q->out_len = MULTIFD_PACKET_SIZE; + q->out_buf = qzMalloc(q->out_len, 0, PINNED_MEM); + if (!q->out_buf) { + q->out_buf = qzMalloc(q->out_len, 0, COMMON_MEM); + if (!q->out_buf) { + err_msg = "qzMalloc failed"; + goto err; + } + } + + return 0; + +err: + error_setg(errp, "multifd %u: [receiver] %s", p->id, err_msg); + return -1; +} + +/** + * qatzip_recv_cleanup: Tear down QATzip session and release private buffers. + * + * @param p Multifd channel params + * @return None + */ +static void qatzip_recv_cleanup(MultiFDRecvParams *p) +{ + QatzipData *q = p->compress_data; + + if (q) { + if (q->in_buf) { + qzFree(q->in_buf); + } + if (q->out_buf) { + qzFree(q->out_buf); + } + (void)qzTeardownSession(&q->sess); + (void)qzClose(&q->sess); + g_free(q); + } + p->compress_data = NULL; +} + + +/** + * qatzip_recv: Decompress pages and copy them to the appropriate + * locations. + * + * @param p Multifd channel params + * @param errp Pointer to error, which will be set in case of error + * @return 0 on success, -1 on error (and *errp will be set) + */ +static int qatzip_recv(MultiFDRecvParams *p, Error **errp) +{ + QatzipData *q = p->compress_data; + int ret; + unsigned int in_len, out_len; + uint32_t in_size = p->next_packet_size; + uint32_t page_size = multifd_ram_page_size(); + uint32_t expected_size = p->normal_num * page_size; + uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; + + if (in_size > q->in_len) { + error_setg(errp, "multifd %u: received unexpectedly large packet", + p->id); + return -1; + } + + if (flags != MULTIFD_FLAG_QATZIP) { + error_setg(errp, "multifd %u: flags received %x flags expected %x", + p->id, flags, MULTIFD_FLAG_QATZIP); + return -1; + } + + multifd_recv_zero_page_process(p); + if (!p->normal_num) { + assert(in_size == 0); + return 0; + } + + ret = qio_channel_read_all(p->c, (void *)q->in_buf, in_size, errp); + if (ret != 0) { + return ret; + } + + in_len = in_size; + out_len = q->out_len; + ret = qzDecompress(&q->sess, q->in_buf, &in_len, q->out_buf, &out_len); + if (ret != QZ_OK) { + error_setg(errp, "multifd %u: qzDecompress failed", p->id); + return -1; + } + if (out_len != expected_size) { + error_setg(errp, "multifd %u: packet size received %u size expected %u", + p->id, out_len, expected_size); + return -1; + } + + /* Copy each page to its appropriate location. */ + for (int i = 0; i < p->normal_num; i++) { + memcpy(p->host + p->normal[i], q->out_buf + page_size * i, page_size); + ramblock_recv_bitmap_set_offset(p->block, p->normal[i]); + } + return 0; +} + +static MultiFDMethods multifd_qatzip_ops = { + .send_setup = qatzip_send_setup, + .send_cleanup = qatzip_send_cleanup, + .send_prepare = qatzip_send_prepare, + .recv_setup = qatzip_recv_setup, + .recv_cleanup = qatzip_recv_cleanup, + .recv = qatzip_recv +}; + +static void multifd_qatzip_register(void) +{ + multifd_register_ops(MULTIFD_COMPRESSION_QATZIP, &multifd_qatzip_ops); +} + +migration_init(multifd_qatzip_register); diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c index 9265098ee79..52902eb00cc 100644 --- a/migration/multifd-qpl.c +++ b/migration/multifd-qpl.c @@ -14,7 +14,7 @@ #include "qemu/module.h" #include "qapi/error.h" #include "qapi/qapi-types-migration.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "multifd.h" #include "qpl/qpl.h" @@ -220,21 +220,13 @@ static void multifd_qpl_deinit(QplData *qpl) } } -/** - * multifd_qpl_send_setup: set up send side - * - * Set up the channel with QPL compression. - * - * Returns 0 on success or -1 on error - * - * @p: Params for the channel being used - * @errp: pointer to an error - */ static int multifd_qpl_send_setup(MultiFDSendParams *p, Error **errp) { QplData *qpl; + uint32_t page_size = multifd_ram_page_size(); + uint32_t page_count = multifd_ram_page_count(); - qpl = multifd_qpl_init(p->page_count, p->page_size, errp); + qpl = multifd_qpl_init(page_count, page_size, errp); if (!qpl) { return -1; } @@ -245,18 +237,10 @@ static int multifd_qpl_send_setup(MultiFDSendParams *p, Error **errp) * additional two IOVs are used to store packet header and compressed data * length */ - p->iov = g_new0(struct iovec, p->page_count + 2); + p->iov = g_new0(struct iovec, page_count + 2); return 0; } -/** - * multifd_qpl_send_cleanup: clean up send side - * - * Close the channel and free memory. - * - * @p: Params for the channel being used - * @errp: pointer to an error - */ static void multifd_qpl_send_cleanup(MultiFDSendParams *p, Error **errp) { multifd_qpl_deinit(p->compress_data); @@ -404,13 +388,14 @@ static bool multifd_qpl_submit_job(qpl_job *job) static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p) { QplData *qpl = p->compress_data; - uint32_t size = p->page_size; + MultiFDPages_t *pages = &p->data->u.ram; + uint32_t size = multifd_ram_page_size(); qpl_job *job = qpl->sw_job; uint8_t *zbuf = qpl->zbuf; uint8_t *buf; - for (int i = 0; i < p->pages->normal_num; i++) { - buf = p->pages->block->host + p->pages->offset[i]; + for (int i = 0; i < pages->normal_num; i++) { + buf = pages->block->host + pages->offset[i]; multifd_qpl_prepare_comp_job(job, buf, zbuf, size); if (qpl_execute_job(job) == QPL_STS_OK) { multifd_qpl_fill_packet(i, p, zbuf, job->total_out); @@ -434,8 +419,8 @@ static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p) static void multifd_qpl_compress_pages(MultiFDSendParams *p) { QplData *qpl = p->compress_data; - MultiFDPages_t *pages = p->pages; - uint32_t size = p->page_size; + MultiFDPages_t *pages = &p->data->u.ram; + uint32_t size = multifd_ram_page_size(); QplHwJob *hw_job; uint8_t *buf; uint8_t *zbuf; @@ -484,20 +469,10 @@ static void multifd_qpl_compress_pages(MultiFDSendParams *p) } } -/** - * multifd_qpl_send_prepare: prepare data to be able to send - * - * Create a compressed buffer with all the pages that we are going to - * send. - * - * Returns 0 on success or -1 on error - * - * @p: Params for the channel being used - * @errp: pointer to an error - */ static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp) { QplData *qpl = p->compress_data; + MultiFDPages_t *pages = &p->data->u.ram; uint32_t len = 0; if (!multifd_send_prepare_common(p)) { @@ -505,7 +480,7 @@ static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp) } /* The first IOV is used to store the compressed page lengths */ - len = p->pages->normal_num * sizeof(uint32_t); + len = pages->normal_num * sizeof(uint32_t); multifd_qpl_fill_iov(p, (uint8_t *) qpl->zlen, len); if (qpl->hw_avail) { multifd_qpl_compress_pages(p); @@ -519,21 +494,13 @@ static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp) return 0; } -/** - * multifd_qpl_recv_setup: set up receive side - * - * Create the compressed channel and buffer. - * - * Returns 0 on success or -1 on error - * - * @p: Params for the channel being used - * @errp: pointer to an error - */ static int multifd_qpl_recv_setup(MultiFDRecvParams *p, Error **errp) { QplData *qpl; + uint32_t page_size = multifd_ram_page_size(); + uint32_t page_count = multifd_ram_page_count(); - qpl = multifd_qpl_init(p->page_count, p->page_size, errp); + qpl = multifd_qpl_init(page_count, page_size, errp); if (!qpl) { return -1; } @@ -541,13 +508,6 @@ static int multifd_qpl_recv_setup(MultiFDRecvParams *p, Error **errp) return 0; } -/** - * multifd_qpl_recv_cleanup: set up receive side - * - * Close the channel and free memory. - * - * @p: Params for the channel being used - */ static void multifd_qpl_recv_cleanup(MultiFDRecvParams *p) { multifd_qpl_deinit(p->compress_data); @@ -600,7 +560,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p, Error **errp) { QplData *qpl = p->compress_data; - uint32_t size = p->page_size; + uint32_t size = multifd_ram_page_size(); qpl_job *job = qpl->sw_job; uint8_t *zbuf = qpl->zbuf; uint8_t *addr; @@ -638,7 +598,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p, static int multifd_qpl_decompress_pages(MultiFDRecvParams *p, Error **errp) { QplData *qpl = p->compress_data; - uint32_t size = p->page_size; + uint32_t size = multifd_ram_page_size(); uint8_t *zbuf = qpl->zbuf; uint8_t *addr; uint32_t len; @@ -688,17 +648,6 @@ static int multifd_qpl_decompress_pages(MultiFDRecvParams *p, Error **errp) } return 0; } -/** - * multifd_qpl_recv: read the data from the channel into actual pages - * - * Read the compressed buffer, and uncompress it into the actual - * pages. - * - * Returns 0 on success or -1 on error - * - * @p: Params for the channel being used - * @errp: pointer to an error - */ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp) { QplData *qpl = p->compress_data; @@ -728,8 +677,9 @@ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp) } for (int i = 0; i < p->normal_num; i++) { qpl->zlen[i] = be32_to_cpu(qpl->zlen[i]); - assert(qpl->zlen[i] <= p->page_size); + assert(qpl->zlen[i] <= multifd_ram_page_size()); zbuf_len += qpl->zlen[i]; + ramblock_recv_bitmap_set_offset(p->block, p->normal[i]); } /* read compressed pages */ @@ -745,7 +695,7 @@ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp) return multifd_qpl_decompress_pages_slow_path(p, errp); } -static MultiFDMethods multifd_qpl_ops = { +static const MultiFDMethods multifd_qpl_ops = { .send_setup = multifd_qpl_send_setup, .send_cleanup = multifd_qpl_send_cleanup, .send_prepare = multifd_qpl_send_prepare, diff --git a/migration/multifd-uadk.c b/migration/multifd-uadk.c index d12353fb218..fd7cd9b5e8e 100644 --- a/migration/multifd-uadk.c +++ b/migration/multifd-uadk.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qapi/error.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "migration.h" #include "multifd.h" #include "options.h" @@ -103,19 +103,13 @@ static void multifd_uadk_uninit_sess(struct wd_data *wd) g_free(wd); } -/** - * multifd_uadk_send_setup: setup send side - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp) { struct wd_data *wd; + uint32_t page_size = multifd_ram_page_size(); + uint32_t page_count = multifd_ram_page_count(); - wd = multifd_uadk_init_sess(p->page_count, p->page_size, true, errp); + wd = multifd_uadk_init_sess(page_count, page_size, true, errp); if (!wd) { return -1; } @@ -128,24 +122,18 @@ static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp) * length */ - p->iov = g_new0(struct iovec, p->page_count + 2); + p->iov = g_new0(struct iovec, page_count + 2); return 0; } -/** - * multifd_uadk_send_cleanup: cleanup send side - * - * Close the channel and return memory. - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ static void multifd_uadk_send_cleanup(MultiFDSendParams *p, Error **errp) { struct wd_data *wd = p->compress_data; multifd_uadk_uninit_sess(wd); p->compress_data = NULL; + g_free(p->iov); + p->iov = NULL; } static inline void prepare_next_iov(MultiFDSendParams *p, void *base, @@ -157,40 +145,31 @@ static inline void prepare_next_iov(MultiFDSendParams *p, void *base, p->iovs_num++; } -/** - * multifd_uadk_send_prepare: prepare data to be able to send - * - * Create a compressed buffer with all the pages that we are going to - * send. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp) { struct wd_data *uadk_data = p->compress_data; uint32_t hdr_size; + uint32_t page_size = multifd_ram_page_size(); uint8_t *buf = uadk_data->buf; int ret = 0; + MultiFDPages_t *pages = &p->data->u.ram; if (!multifd_send_prepare_common(p)) { goto out; } - hdr_size = p->pages->normal_num * sizeof(uint32_t); + hdr_size = pages->normal_num * sizeof(uint32_t); /* prepare the header that stores the lengths of all compressed data */ prepare_next_iov(p, uadk_data->buf_hdr, hdr_size); - for (int i = 0; i < p->pages->normal_num; i++) { + for (int i = 0; i < pages->normal_num; i++) { struct wd_comp_req creq = { .op_type = WD_DIR_COMPRESS, - .src = p->pages->block->host + p->pages->offset[i], - .src_len = p->page_size, + .src = pages->block->host + pages->offset[i], + .src_len = page_size, .dst = buf, /* Set dst_len to double the src in case compressed out >= page_size */ - .dst_len = p->page_size * 2, + .dst_len = page_size * 2, }; if (uadk_data->handle) { @@ -200,7 +179,7 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp) p->id, ret, creq.status); return -1; } - if (creq.dst_len < p->page_size) { + if (creq.dst_len < page_size) { uadk_data->buf_hdr[i] = cpu_to_be32(creq.dst_len); prepare_next_iov(p, buf, creq.dst_len); buf += creq.dst_len; @@ -212,11 +191,11 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp) * than page_size as well because at the receive end we can skip the * decompression. But it is tricky to find the right number here. */ - if (!uadk_data->handle || creq.dst_len >= p->page_size) { - uadk_data->buf_hdr[i] = cpu_to_be32(p->page_size); - prepare_next_iov(p, p->pages->block->host + p->pages->offset[i], - p->page_size); - buf += p->page_size; + if (!uadk_data->handle || creq.dst_len >= page_size) { + uadk_data->buf_hdr[i] = cpu_to_be32(page_size); + prepare_next_iov(p, pages->block->host + pages->offset[i], + page_size); + buf += page_size; } } out: @@ -225,21 +204,13 @@ static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp) return 0; } -/** - * multifd_uadk_recv_setup: setup receive side - * - * Create the compressed channel and buffer. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp) { struct wd_data *wd; + uint32_t page_size = multifd_ram_page_size(); + uint32_t page_count = multifd_ram_page_count(); - wd = multifd_uadk_init_sess(p->page_count, p->page_size, false, errp); + wd = multifd_uadk_init_sess(page_count, page_size, false, errp); if (!wd) { return -1; } @@ -247,13 +218,6 @@ static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp) return 0; } -/** - * multifd_uadk_recv_cleanup: cleanup receive side - * - * Close the channel and return memory. - * - * @p: Params for the channel that we are using - */ static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p) { struct wd_data *wd = p->compress_data; @@ -262,17 +226,6 @@ static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p) p->compress_data = NULL; } -/** - * multifd_uadk_recv: read the data from the channel into actual pages - * - * Read the compressed buffer, and uncompress it into the actual - * pages. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) { struct wd_data *uadk_data = p->compress_data; @@ -280,6 +233,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; uint32_t hdr_len = p->normal_num * sizeof(uint32_t); uint32_t data_len = 0; + uint32_t page_size = multifd_ram_page_size(); uint8_t *buf = uadk_data->buf; int ret = 0; @@ -306,7 +260,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) for (int i = 0; i < p->normal_num; i++) { uadk_data->buf_hdr[i] = be32_to_cpu(uadk_data->buf_hdr[i]); data_len += uadk_data->buf_hdr[i]; - assert(uadk_data->buf_hdr[i] <= p->page_size); + assert(uadk_data->buf_hdr[i] <= page_size); } /* read compressed data */ @@ -322,12 +276,12 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) .src = buf, .src_len = uadk_data->buf_hdr[i], .dst = p->host + p->normal[i], - .dst_len = p->page_size, + .dst_len = page_size, }; - if (uadk_data->buf_hdr[i] == p->page_size) { - memcpy(p->host + p->normal[i], buf, p->page_size); - buf += p->page_size; + if (uadk_data->buf_hdr[i] == page_size) { + memcpy(p->host + p->normal[i], buf, page_size); + buf += page_size; continue; } @@ -343,7 +297,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) p->id, ret, creq.status); return -1; } - if (creq.dst_len != p->page_size) { + if (creq.dst_len != page_size) { error_setg(errp, "multifd %u: decompressed length error", p->id); return -1; } @@ -353,7 +307,7 @@ static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) return 0; } -static MultiFDMethods multifd_uadk_ops = { +static const MultiFDMethods multifd_uadk_ops = { .send_setup = multifd_uadk_send_setup, .send_cleanup = multifd_uadk_send_cleanup, .send_prepare = multifd_uadk_send_prepare, diff --git a/migration/multifd-zero-page.c b/migration/multifd-zero-page.c index e1b8370f88e..4cde8681594 100644 --- a/migration/multifd-zero-page.c +++ b/migration/multifd-zero-page.c @@ -12,8 +12,9 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "migration.h" +#include "migration-stats.h" #include "multifd.h" #include "options.h" #include "ram.h" @@ -46,14 +47,14 @@ static void swap_page_offset(ram_addr_t *pages_offset, int a, int b) */ void multifd_send_zero_page_detect(MultiFDSendParams *p) { - MultiFDPages_t *pages = p->pages; + MultiFDPages_t *pages = &p->data->u.ram; RAMBlock *rb = pages->block; int i = 0; int j = pages->num - 1; if (!multifd_zero_page_enabled()) { pages->normal_num = pages->num; - return; + goto out; } /* @@ -63,7 +64,7 @@ void multifd_send_zero_page_detect(MultiFDSendParams *p) while (i <= j) { uint64_t offset = pages->offset[i]; - if (!buffer_is_zero(rb->host + offset, p->page_size)) { + if (!buffer_is_zero(rb->host + offset, multifd_ram_page_size())) { i++; continue; } @@ -74,15 +75,37 @@ void multifd_send_zero_page_detect(MultiFDSendParams *p) } pages->normal_num = i; + +out: + stat64_add(&mig_stats.normal_pages, pages->normal_num); + stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num); } void multifd_recv_zero_page_process(MultiFDRecvParams *p) { for (int i = 0; i < p->zero_num; i++) { void *page = p->host + p->zero[i]; - if (ramblock_recv_bitmap_test_byte_offset(p->block, p->zero[i])) { - memset(page, 0, p->page_size); - } else { + bool received = + ramblock_recv_bitmap_test_byte_offset(p->block, p->zero[i]); + + /* + * During multifd migration zero page is written to the memory + * only if it is migrated more than once. + * + * It becomes a problem when both multifd & postcopy options are + * enabled. If the zero page which was skipped during multifd phase, + * is accessed during the postcopy phase of the migration, a page + * fault occurs. But this page fault is not served because the + * 'receivedmap' says the zero page is already received. Thus the + * thread accessing that page may hang. + * + * When postcopy is enabled, always write the zero page as and when + * it is migrated. + */ + if (migrate_postcopy_ram() || received) { + memset(page, 0, multifd_ram_page_size()); + } + if (!received) { ramblock_recv_bitmap_set_offset(p->block, p->zero[i]); } } diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index 2ced69487e5..8820b2a787c 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include #include "qemu/rcu.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -34,17 +34,7 @@ struct zlib_data { /* Multifd zlib compression */ -/** - * zlib_send_setup: setup send side - * - * Setup each channel with zlib compression. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zlib_send_setup(MultiFDSendParams *p, Error **errp) +static int multifd_zlib_send_setup(MultiFDSendParams *p, Error **errp) { struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; @@ -86,15 +76,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) return -1; } -/** - * zlib_send_cleanup: cleanup send side - * - * Close the channel and return memory. - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) +static void multifd_zlib_send_cleanup(MultiFDSendParams *p, Error **errp) { struct zlib_data *z = p->compress_data; @@ -110,23 +92,13 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) p->iov = NULL; } -/** - * zlib_send_prepare: prepare date to be able to send - * - * Create a compressed buffer with all the pages that we are going to - * send. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) +static int multifd_zlib_send_prepare(MultiFDSendParams *p, Error **errp) { - MultiFDPages_t *pages = p->pages; + MultiFDPages_t *pages = &p->data->u.ram; struct zlib_data *z = p->compress_data; z_stream *zs = &z->zs; uint32_t out_size = 0; + uint32_t page_size = multifd_ram_page_size(); int ret; uint32_t i; @@ -147,8 +119,8 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) * with compression. zlib does not guarantee that this is safe, * therefore copy the page before calling deflate(). */ - memcpy(z->buf, p->pages->block->host + pages->offset[i], p->page_size); - zs->avail_in = p->page_size; + memcpy(z->buf, pages->block->host + pages->offset[i], page_size); + zs->avail_in = page_size; zs->next_in = z->buf; zs->avail_out = available; @@ -188,17 +160,7 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) return 0; } -/** - * zlib_recv_setup: setup receive side - * - * Create the compressed channel and buffer. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) +static int multifd_zlib_recv_setup(MultiFDRecvParams *p, Error **errp) { struct zlib_data *z = g_new0(struct zlib_data, 1); z_stream *zs = &z->zs; @@ -224,14 +186,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) return 0; } -/** - * zlib_recv_cleanup: setup receive side - * - * For no compression this function does nothing. - * - * @p: Params for the channel that we are using - */ -static void zlib_recv_cleanup(MultiFDRecvParams *p) +static void multifd_zlib_recv_cleanup(MultiFDRecvParams *p) { struct zlib_data *z = p->compress_data; @@ -242,25 +197,15 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p) p->compress_data = NULL; } -/** - * zlib_recv: read the data from the channel into actual pages - * - * Read the compressed buffer, and uncompress it into the actual - * pages. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zlib_recv(MultiFDRecvParams *p, Error **errp) +static int multifd_zlib_recv(MultiFDRecvParams *p, Error **errp) { struct zlib_data *z = p->compress_data; z_stream *zs = &z->zs; uint32_t in_size = p->next_packet_size; /* we measure the change of total_out */ uint32_t out_size = zs->total_out; - uint32_t expected_size = p->normal_num * p->page_size; + uint32_t page_size = multifd_ram_page_size(); + uint32_t expected_size = p->normal_num * page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; int ret; int i; @@ -296,7 +241,7 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp) flush = Z_SYNC_FLUSH; } - zs->avail_out = p->page_size; + zs->avail_out = page_size; zs->next_out = p->host + p->normal[i]; /* @@ -310,8 +255,8 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp) do { ret = inflate(zs, flush); } while (ret == Z_OK && zs->avail_in - && (zs->total_out - start) < p->page_size); - if (ret == Z_OK && (zs->total_out - start) < p->page_size) { + && (zs->total_out - start) < page_size); + if (ret == Z_OK && (zs->total_out - start) < page_size) { error_setg(errp, "multifd %u: inflate generated too few output", p->id); return -1; @@ -332,13 +277,13 @@ static int zlib_recv(MultiFDRecvParams *p, Error **errp) return 0; } -static MultiFDMethods multifd_zlib_ops = { - .send_setup = zlib_send_setup, - .send_cleanup = zlib_send_cleanup, - .send_prepare = zlib_send_prepare, - .recv_setup = zlib_recv_setup, - .recv_cleanup = zlib_recv_cleanup, - .recv = zlib_recv +static const MultiFDMethods multifd_zlib_ops = { + .send_setup = multifd_zlib_send_setup, + .send_cleanup = multifd_zlib_send_cleanup, + .send_prepare = multifd_zlib_send_prepare, + .recv_setup = multifd_zlib_recv_setup, + .recv_cleanup = multifd_zlib_recv_cleanup, + .recv = multifd_zlib_recv }; static void multifd_zlib_register(void) diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index ca17b7e310f..3c2dcf76b0f 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include #include "qemu/rcu.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -37,17 +37,7 @@ struct zstd_data { /* Multifd zstd compression */ -/** - * zstd_send_setup: setup send side - * - * Setup each channel with zstd compression. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zstd_send_setup(MultiFDSendParams *p, Error **errp) +static int multifd_zstd_send_setup(MultiFDSendParams *p, Error **errp) { struct zstd_data *z = g_new0(struct zstd_data, 1); int res; @@ -83,15 +73,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) return 0; } -/** - * zstd_send_cleanup: cleanup send side - * - * Close the channel and return memory. - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) +static void multifd_zstd_send_cleanup(MultiFDSendParams *p, Error **errp) { struct zstd_data *z = p->compress_data; @@ -106,20 +88,9 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) p->iov = NULL; } -/** - * zstd_send_prepare: prepare date to be able to send - * - * Create a compressed buffer with all the pages that we are going to - * send. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) +static int multifd_zstd_send_prepare(MultiFDSendParams *p, Error **errp) { - MultiFDPages_t *pages = p->pages; + MultiFDPages_t *pages = &p->data->u.ram; struct zstd_data *z = p->compress_data; int ret; uint32_t i; @@ -138,8 +109,8 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) if (i == pages->normal_num - 1) { flush = ZSTD_e_flush; } - z->in.src = p->pages->block->host + pages->offset[i]; - z->in.size = p->page_size; + z->in.src = pages->block->host + pages->offset[i]; + z->in.size = multifd_ram_page_size(); z->in.pos = 0; /* @@ -152,9 +123,9 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) */ do { ret = ZSTD_compressStream2(z->zcs, &z->out, &z->in, flush); - } while (ret > 0 && (z->in.size - z->in.pos > 0) - && (z->out.size - z->out.pos > 0)); - if (ret > 0 && (z->in.size - z->in.pos > 0)) { + } while (ret > 0 && (z->in.size > z->in.pos) + && (z->out.size > z->out.pos)); + if (ret > 0 && (z->in.size > z->in.pos)) { error_setg(errp, "multifd %u: compressStream buffer too small", p->id); return -1; @@ -176,17 +147,7 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) return 0; } -/** - * zstd_recv_setup: setup receive side - * - * Create the compressed channel and buffer. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) +static int multifd_zstd_recv_setup(MultiFDRecvParams *p, Error **errp) { struct zstd_data *z = g_new0(struct zstd_data, 1); int ret; @@ -220,14 +181,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) return 0; } -/** - * zstd_recv_cleanup: setup receive side - * - * For no compression this function does nothing. - * - * @p: Params for the channel that we are using - */ -static void zstd_recv_cleanup(MultiFDRecvParams *p) +static void multifd_zstd_recv_cleanup(MultiFDRecvParams *p) { struct zstd_data *z = p->compress_data; @@ -239,22 +193,12 @@ static void zstd_recv_cleanup(MultiFDRecvParams *p) p->compress_data = NULL; } -/** - * zstd_recv: read the data from the channel into actual pages - * - * Read the compressed buffer, and uncompress it into the actual - * pages. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int zstd_recv(MultiFDRecvParams *p, Error **errp) +static int multifd_zstd_recv(MultiFDRecvParams *p, Error **errp) { uint32_t in_size = p->next_packet_size; uint32_t out_size = 0; - uint32_t expected_size = p->normal_num * p->page_size; + uint32_t page_size = multifd_ram_page_size(); + uint32_t expected_size = p->normal_num * page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; struct zstd_data *z = p->compress_data; int ret; @@ -286,7 +230,7 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp) for (i = 0; i < p->normal_num; i++) { ramblock_recv_bitmap_set_offset(p->block, p->normal[i]); z->out.dst = p->host + p->normal[i]; - z->out.size = p->page_size; + z->out.size = page_size; z->out.pos = 0; /* @@ -299,9 +243,9 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp) */ do { ret = ZSTD_decompressStream(z->zds, &z->out, &z->in); - } while (ret > 0 && (z->in.size - z->in.pos > 0) - && (z->out.pos < p->page_size)); - if (ret > 0 && (z->out.pos < p->page_size)) { + } while (ret > 0 && (z->in.size > z->in.pos) + && (z->out.pos < page_size)); + if (ret > 0 && (z->out.pos < page_size)) { error_setg(errp, "multifd %u: decompressStream buffer too small", p->id); return -1; @@ -321,13 +265,13 @@ static int zstd_recv(MultiFDRecvParams *p, Error **errp) return 0; } -static MultiFDMethods multifd_zstd_ops = { - .send_setup = zstd_send_setup, - .send_cleanup = zstd_send_cleanup, - .send_prepare = zstd_send_prepare, - .recv_setup = zstd_recv_setup, - .recv_cleanup = zstd_recv_cleanup, - .recv = zstd_recv +static const MultiFDMethods multifd_zstd_ops = { + .send_setup = multifd_zstd_send_setup, + .send_cleanup = multifd_zstd_send_cleanup, + .send_prepare = multifd_zstd_send_prepare, + .recv_setup = multifd_zstd_recv_setup, + .recv_cleanup = multifd_zstd_recv_cleanup, + .recv = multifd_zstd_recv }; static void multifd_zstd_register(void) diff --git a/migration/multifd.c b/migration/multifd.c index 552f9723c82..b2557788555 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -12,15 +12,18 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/iov.h" #include "qemu/rcu.h" #include "exec/target_page.h" -#include "sysemu/sysemu.h" -#include "exec/ramblock.h" +#include "system/system.h" +#include "system/ramblock.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "file.h" +#include "migration/misc.h" #include "migration.h" #include "migration-stats.h" +#include "savevm.h" #include "socket.h" #include "tls.h" #include "qemu-file.h" @@ -33,11 +36,6 @@ #include "io/channel-socket.h" #include "yank_functions.h" -/* Multiple fd's */ - -#define MULTIFD_MAGIC 0x11223344U -#define MULTIFD_VERSION 1 - typedef struct { uint32_t magic; uint32_t version; @@ -49,8 +47,10 @@ typedef struct { struct { MultiFDSendParams *params; - /* array of pages to sent */ - MultiFDPages_t *pages; + + /* multifd_send() body is not thread safe, needs serialization */ + QemuMutex multifd_send_mutex; + /* * Global number of generated multifd packets. * @@ -78,7 +78,7 @@ struct { */ int exiting; /* multifd ops */ - MultiFDMethods *ops; + const MultiFDMethods *ops; } *multifd_send_state; struct { @@ -95,236 +95,70 @@ struct { uint64_t packet_num; int exiting; /* multifd ops */ - MultiFDMethods *ops; + const MultiFDMethods *ops; } *multifd_recv_state; -static bool multifd_use_packets(void) +MultiFDSendData *multifd_send_data_alloc(void) { - return !migrate_mapped_ram(); -} + MultiFDSendData *new = g_new0(MultiFDSendData, 1); -void multifd_send_channel_created(void) -{ - qemu_sem_post(&multifd_send_state->channels_created); -} - -static void multifd_set_file_bitmap(MultiFDSendParams *p) -{ - MultiFDPages_t *pages = p->pages; + multifd_ram_payload_alloc(&new->u.ram); + /* Device state allocates its payload on-demand */ - assert(pages->block); - - for (int i = 0; i < p->pages->normal_num; i++) { - ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); - } - - for (int i = p->pages->normal_num; i < p->pages->num; i++) { - ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); - } + return new; } -/* Multifd without compression */ - -/** - * nocomp_send_setup: setup send side - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int nocomp_send_setup(MultiFDSendParams *p, Error **errp) +void multifd_send_data_clear(MultiFDSendData *data) { - if (migrate_zero_copy_send()) { - p->write_flags |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; - } - - if (multifd_use_packets()) { - /* We need one extra place for the packet header */ - p->iov = g_new0(struct iovec, p->page_count + 1); - } else { - p->iov = g_new0(struct iovec, p->page_count); + if (multifd_payload_empty(data)) { + return; } - return 0; -} - -/** - * nocomp_send_cleanup: cleanup send side - * - * For no compression this function does nothing. - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) -{ - g_free(p->iov); - p->iov = NULL; - return; -} - -static void multifd_send_prepare_iovs(MultiFDSendParams *p) -{ - MultiFDPages_t *pages = p->pages; - - for (int i = 0; i < pages->normal_num; i++) { - p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i]; - p->iov[p->iovs_num].iov_len = p->page_size; - p->iovs_num++; + switch (data->type) { + case MULTIFD_PAYLOAD_DEVICE_STATE: + multifd_send_data_clear_device_state(&data->u.device_state); + break; + default: + /* Nothing to do */ + break; } - p->next_packet_size = pages->normal_num * p->page_size; + data->type = MULTIFD_PAYLOAD_NONE; } -/** - * nocomp_send_prepare: prepare date to be able to send - * - * For no compression we just have to calculate the size of the - * packet. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) +void multifd_send_data_free(MultiFDSendData *data) { - bool use_zero_copy_send = migrate_zero_copy_send(); - int ret; - - multifd_send_zero_page_detect(p); - - if (!multifd_use_packets()) { - multifd_send_prepare_iovs(p); - multifd_set_file_bitmap(p); - - return 0; - } - - if (!use_zero_copy_send) { - /* - * Only !zerocopy needs the header in IOV; zerocopy will - * send it separately. - */ - multifd_send_prepare_header(p); + if (!data) { + return; } - multifd_send_prepare_iovs(p); - p->flags |= MULTIFD_FLAG_NOCOMP; - - multifd_send_fill_packet(p); + /* This also free's device state payload */ + multifd_send_data_clear(data); - if (use_zero_copy_send) { - /* Send header first, without zerocopy */ - ret = qio_channel_write_all(p->c, (void *)p->packet, - p->packet_len, errp); - if (ret != 0) { - return -1; - } - } + multifd_ram_payload_free(&data->u.ram); - return 0; + g_free(data); } -/** - * nocomp_recv_setup: setup receive side - * - * For no compression this function does nothing. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int nocomp_recv_setup(MultiFDRecvParams *p, Error **errp) -{ - p->iov = g_new0(struct iovec, p->page_count); - return 0; -} - -/** - * nocomp_recv_cleanup: setup receive side - * - * For no compression this function does nothing. - * - * @p: Params for the channel that we are using - */ -static void nocomp_recv_cleanup(MultiFDRecvParams *p) +static bool multifd_use_packets(void) { - g_free(p->iov); - p->iov = NULL; + return !migrate_mapped_ram(); } -/** - * nocomp_recv: read the data from the channel - * - * For no compression we just need to read things into the correct place. - * - * Returns 0 for success or -1 for error - * - * @p: Params for the channel that we are using - * @errp: pointer to an error - */ -static int nocomp_recv(MultiFDRecvParams *p, Error **errp) +void multifd_send_channel_created(void) { - uint32_t flags; - - if (!multifd_use_packets()) { - return multifd_file_recv_data(p, errp); - } - - flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; - - if (flags != MULTIFD_FLAG_NOCOMP) { - error_setg(errp, "multifd %u: flags received %x flags expected %x", - p->id, flags, MULTIFD_FLAG_NOCOMP); - return -1; - } - - multifd_recv_zero_page_process(p); - - if (!p->normal_num) { - return 0; - } - - for (int i = 0; i < p->normal_num; i++) { - p->iov[i].iov_base = p->host + p->normal[i]; - p->iov[i].iov_len = p->page_size; - ramblock_recv_bitmap_set_offset(p->block, p->normal[i]); - } - return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp); + qemu_sem_post(&multifd_send_state->channels_created); } -static MultiFDMethods multifd_nocomp_ops = { - .send_setup = nocomp_send_setup, - .send_cleanup = nocomp_send_cleanup, - .send_prepare = nocomp_send_prepare, - .recv_setup = nocomp_recv_setup, - .recv_cleanup = nocomp_recv_cleanup, - .recv = nocomp_recv -}; - -static MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = { - [MULTIFD_COMPRESSION_NONE] = &multifd_nocomp_ops, -}; +static const MultiFDMethods *multifd_ops[MULTIFD_COMPRESSION__MAX] = {}; -void multifd_register_ops(int method, MultiFDMethods *ops) +void multifd_register_ops(int method, const MultiFDMethods *ops) { - assert(0 < method && method < MULTIFD_COMPRESSION__MAX); + assert(0 <= method && method < MULTIFD_COMPRESSION__MAX); + assert(!multifd_ops[method]); multifd_ops[method] = ops; } -/* Reset a MultiFDPages_t* object for the next use */ -static void multifd_pages_reset(MultiFDPages_t *pages) -{ - /* - * We don't need to touch offset[] array, because it will be - * overwritten later when reused. - */ - pages->num = 0; - pages->normal_num = 0; - pages->block = NULL; -} - static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp) { MultiFDInit_t msg = {}; @@ -389,160 +223,95 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) return msg.id; } -static MultiFDPages_t *multifd_pages_init(uint32_t n) -{ - MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1); - - pages->allocated = n; - pages->offset = g_new0(ram_addr_t, n); - - return pages; -} - -static void multifd_pages_clear(MultiFDPages_t *pages) -{ - multifd_pages_reset(pages); - pages->allocated = 0; - g_free(pages->offset); - pages->offset = NULL; - g_free(pages); -} - +/* Fills a RAM multifd packet */ void multifd_send_fill_packet(MultiFDSendParams *p) { MultiFDPacket_t *packet = p->packet; - MultiFDPages_t *pages = p->pages; uint64_t packet_num; - uint32_t zero_num = pages->num - pages->normal_num; - int i; + bool sync_packet = p->flags & MULTIFD_FLAG_SYNC; - packet->flags = cpu_to_be32(p->flags); - packet->pages_alloc = cpu_to_be32(p->pages->allocated); - packet->normal_pages = cpu_to_be32(pages->normal_num); - packet->zero_pages = cpu_to_be32(zero_num); + memset(packet, 0, p->packet_len); + + packet->hdr.magic = cpu_to_be32(MULTIFD_MAGIC); + packet->hdr.version = cpu_to_be32(MULTIFD_VERSION); + + packet->hdr.flags = cpu_to_be32(p->flags); packet->next_packet_size = cpu_to_be32(p->next_packet_size); packet_num = qatomic_fetch_inc(&multifd_send_state->packet_num); packet->packet_num = cpu_to_be64(packet_num); - if (pages->block) { - strncpy(packet->ramblock, pages->block->idstr, 256); - } - - for (i = 0; i < pages->num; i++) { - /* there are architectures where ram_addr_t is 32 bit */ - uint64_t temp = pages->offset[i]; + p->packets_sent++; - packet->offset[i] = cpu_to_be64(temp); + if (!sync_packet) { + multifd_ram_fill_packet(p); } - p->packets_sent++; - p->total_normal_pages += pages->normal_num; - p->total_zero_pages += zero_num; - - trace_multifd_send(p->id, packet_num, pages->normal_num, zero_num, - p->flags, p->next_packet_size); + trace_multifd_send_fill(p->id, packet_num, + p->flags, p->next_packet_size); } -static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) +static int multifd_recv_unfill_packet_header(MultiFDRecvParams *p, + const MultiFDPacketHdr_t *hdr, + Error **errp) { - MultiFDPacket_t *packet = p->packet; - int i; + uint32_t magic = be32_to_cpu(hdr->magic); + uint32_t version = be32_to_cpu(hdr->version); - packet->magic = be32_to_cpu(packet->magic); - if (packet->magic != MULTIFD_MAGIC) { - error_setg(errp, "multifd: received packet " - "magic %x and expected magic %x", - packet->magic, MULTIFD_MAGIC); + if (magic != MULTIFD_MAGIC) { + error_setg(errp, "multifd: received packet magic %x, expected %x", + magic, MULTIFD_MAGIC); return -1; } - packet->version = be32_to_cpu(packet->version); - if (packet->version != MULTIFD_VERSION) { - error_setg(errp, "multifd: received packet " - "version %u and expected version %u", - packet->version, MULTIFD_VERSION); + if (version != MULTIFD_VERSION) { + error_setg(errp, "multifd: received packet version %u, expected %u", + version, MULTIFD_VERSION); return -1; } - p->flags = be32_to_cpu(packet->flags); - - packet->pages_alloc = be32_to_cpu(packet->pages_alloc); - /* - * If we received a packet that is 100 times bigger than expected - * just stop migration. It is a magic number. - */ - if (packet->pages_alloc > p->page_count) { - error_setg(errp, "multifd: received packet " - "with size %u and expected a size of %u", - packet->pages_alloc, p->page_count) ; - return -1; - } + p->flags = be32_to_cpu(hdr->flags); - p->normal_num = be32_to_cpu(packet->normal_pages); - if (p->normal_num > packet->pages_alloc) { - error_setg(errp, "multifd: received packet " - "with %u normal pages and expected maximum pages are %u", - p->normal_num, packet->pages_alloc) ; - return -1; - } + return 0; +} - p->zero_num = be32_to_cpu(packet->zero_pages); - if (p->zero_num > packet->pages_alloc - p->normal_num) { - error_setg(errp, "multifd: received packet " - "with %u zero pages and expected maximum zero pages are %u", - p->zero_num, packet->pages_alloc - p->normal_num) ; - return -1; - } +static int multifd_recv_unfill_packet_device_state(MultiFDRecvParams *p, + Error **errp) +{ + MultiFDPacketDeviceState_t *packet = p->packet_dev_state; + packet->instance_id = be32_to_cpu(packet->instance_id); p->next_packet_size = be32_to_cpu(packet->next_packet_size); - p->packet_num = be64_to_cpu(packet->packet_num); - p->packets_recved++; - p->total_normal_pages += p->normal_num; - p->total_zero_pages += p->zero_num; - trace_multifd_recv(p->id, p->packet_num, p->normal_num, p->zero_num, - p->flags, p->next_packet_size); + return 0; +} - if (p->normal_num == 0 && p->zero_num == 0) { - return 0; - } +static int multifd_recv_unfill_packet_ram(MultiFDRecvParams *p, Error **errp) +{ + const MultiFDPacket_t *packet = p->packet; + int ret = 0; - /* make sure that ramblock is 0 terminated */ - packet->ramblock[255] = 0; - p->block = qemu_ram_block_by_name(packet->ramblock); - if (!p->block) { - error_setg(errp, "multifd: unknown ram block %s", - packet->ramblock); - return -1; - } + p->next_packet_size = be32_to_cpu(packet->next_packet_size); + p->packet_num = be64_to_cpu(packet->packet_num); - p->host = p->block->host; - for (i = 0; i < p->normal_num; i++) { - uint64_t offset = be64_to_cpu(packet->offset[i]); + /* Always unfill, old QEMUs (<9.0) send data along with SYNC */ + ret = multifd_ram_unfill_packet(p, errp); - if (offset > (p->block->used_length - p->page_size)) { - error_setg(errp, "multifd: offset too long %" PRIu64 - " (max " RAM_ADDR_FMT ")", - offset, p->block->used_length); - return -1; - } - p->normal[i] = offset; - } + trace_multifd_recv_unfill(p->id, p->packet_num, p->flags, + p->next_packet_size); - for (i = 0; i < p->zero_num; i++) { - uint64_t offset = be64_to_cpu(packet->offset[p->normal_num + i]); + return ret; +} - if (offset > (p->block->used_length - p->page_size)) { - error_setg(errp, "multifd: offset too long %" PRIu64 - " (max " RAM_ADDR_FMT ")", - offset, p->block->used_length); - return -1; - } - p->zero[i] = offset; +static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) +{ + p->packets_recved++; + + if (p->flags & MULTIFD_FLAG_DEVICE_STATE) { + return multifd_recv_unfill_packet_device_state(p, errp); } - return 0; + return multifd_recv_unfill_packet_ram(p, errp); } static bool multifd_send_should_exit(void) @@ -568,35 +337,32 @@ static void multifd_send_kick_main(MultiFDSendParams *p) } /* - * How we use multifd_send_state->pages and channel->pages? + * multifd_send() works by exchanging the MultiFDSendData object + * provided by the caller with an unused MultiFDSendData object from + * the next channel that is found to be idle. * - * We create a pages for each channel, and a main one. Each time that - * we need to send a batch of pages we interchange the ones between - * multifd_send_state and the channel that is sending it. There are - * two reasons for that: - * - to not have to do so many mallocs during migration - * - to make easier to know what to free at the end of migration + * The channel owns the data until it finishes transmitting and the + * caller owns the empty object until it fills it with data and calls + * this function again. No locking necessary. * - * This way we always know who is the owner of each "pages" struct, - * and we don't need any locking. It belongs to the migration thread - * or to the channel thread. Switching is safe because the migration - * thread is using the channel mutex when changing it, and the channel - * have to had finish with its own, otherwise pending_job can't be - * false. + * Switching is safe because both the migration thread and the channel + * thread have barriers in place to serialize access. * * Returns true if succeed, false otherwise. */ -static bool multifd_send_pages(void) +bool multifd_send(MultiFDSendData **send_data) { int i; static int next_channel; MultiFDSendParams *p = NULL; /* make happy gcc */ - MultiFDPages_t *pages = multifd_send_state->pages; + MultiFDSendData *tmp; if (multifd_send_should_exit()) { return false; } + QEMU_LOCK_GUARD(&multifd_send_state->multifd_send_mutex); + /* We wait here, until at least one channel is ready */ qemu_sem_wait(&multifd_send_state->channels_ready); @@ -626,66 +392,24 @@ static bool multifd_send_pages(void) * qatomic_store_release() in multifd_send_thread(). */ smp_mb_acquire(); - assert(!p->pages->num); - multifd_send_state->pages = p->pages; - p->pages = pages; - /* - * Making sure p->pages is setup before marking pending_job=true. Pairs - * with the qatomic_load_acquire() in multifd_send_thread(). - */ - qatomic_store_release(&p->pending_job, true); - qemu_sem_post(&p->sem); - return true; -} - -static inline bool multifd_queue_empty(MultiFDPages_t *pages) -{ - return pages->num == 0; -} - -static inline bool multifd_queue_full(MultiFDPages_t *pages) -{ - return pages->num == pages->allocated; -} + assert(multifd_payload_empty(p->data)); -static inline void multifd_enqueue(MultiFDPages_t *pages, ram_addr_t offset) -{ - pages->offset[pages->num++] = offset; -} - -/* Returns true if enqueue successful, false otherwise */ -bool multifd_queue_page(RAMBlock *block, ram_addr_t offset) -{ - MultiFDPages_t *pages; - -retry: - pages = multifd_send_state->pages; - - /* If the queue is empty, we can already enqueue now */ - if (multifd_queue_empty(pages)) { - pages->block = block; - multifd_enqueue(pages, offset); - return true; - } + /* + * Swap the pointers. The channel gets the client data for + * transferring and the client gets back an unused data slot. + */ + tmp = *send_data; + *send_data = p->data; + p->data = tmp; /* - * Not empty, meanwhile we need a flush. It can because of either: - * - * (1) The page is not on the same ramblock of previous ones, or, - * (2) The queue is full. - * - * After flush, always retry. + * Making sure p->data is setup before marking pending_job=true. Pairs + * with the qatomic_load_acquire() in multifd_send_thread(). */ - if (pages->block != block || multifd_queue_full(pages)) { - if (!multifd_send_pages()) { - return false; - } - goto retry; - } + qatomic_store_release(&p->pending_job, true); + qemu_sem_post(&p->sem); - /* Not empty, and we still have space, do it! */ - multifd_enqueue(pages, offset); return true; } @@ -775,7 +499,7 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp) * channels have no I/O handler callback registered when reaching * here, because migration thread will wait for all multifd channel * establishments to complete during setup. Since - * migrate_fd_cleanup() will be scheduled in main thread too, all + * migration_cleanup() will be scheduled in main thread too, all * previous callbacks should guarantee to be completed when * reaching here. See multifd_send_state.channels_created and its * usage. In the future, we could replace this with an assert @@ -790,12 +514,13 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp) qemu_sem_destroy(&p->sem_sync); g_free(p->name); p->name = NULL; - multifd_pages_clear(p->pages); - p->pages = NULL; + g_clear_pointer(&p->data, multifd_send_data_free); p->packet_len = 0; + g_clear_pointer(&p->packet_device_state, g_free); g_free(p->packet); p->packet = NULL; multifd_send_state->ops->send_cleanup(p, errp); + assert(!p->iov); return *errp == NULL; } @@ -804,12 +529,12 @@ static void multifd_send_cleanup_state(void) { file_cleanup_outgoing_migration(); socket_cleanup_outgoing_migration(); + multifd_device_state_send_cleanup(); qemu_sem_destroy(&multifd_send_state->channels_created); qemu_sem_destroy(&multifd_send_state->channels_ready); + qemu_mutex_destroy(&multifd_send_state->multifd_send_mutex); g_free(multifd_send_state->params); multifd_send_state->params = NULL; - multifd_pages_clear(multifd_send_state->pages); - multifd_send_state->pages = NULL; g_free(multifd_send_state); multifd_send_state = NULL; } @@ -822,6 +547,36 @@ void multifd_send_shutdown(void) return; } + for (i = 0; i < migrate_multifd_channels(); i++) { + MultiFDSendParams *p = &multifd_send_state->params[i]; + + /* thread_created implies the TLS handshake has succeeded */ + if (p->tls_thread_created && p->thread_created) { + Error *local_err = NULL; + /* + * The destination expects the TLS session to always be + * properly terminated. This helps to detect a premature + * termination in the middle of the stream. Note that + * older QEMUs always break the connection on the source + * and the destination always sees + * GNUTLS_E_PREMATURE_TERMINATION. + */ + migration_tls_channel_end(p->c, &local_err); + + /* + * The above can return an error in case the migration has + * already failed. If the migration succeeded, errors are + * not expected but there's no need to kill the source. + */ + if (local_err && !migration_has_failed(migrate_get_current())) { + warn_report( + "multifd_send_%d: Failed to terminate TLS connection: %s", + p->id, error_get_pretty(local_err)); + break; + } + } + } + multifd_send_terminate_threads(); for (i = 0; i < migrate_multifd_channels(); i++) { @@ -854,20 +609,12 @@ static int multifd_zero_copy_flush(QIOChannel *c) return ret; } -int multifd_send_sync_main(void) +int multifd_send_sync_main(MultiFDSyncReq req) { int i; bool flush_zero_copy; - if (!migrate_multifd()) { - return 0; - } - if (multifd_send_state->pages->num) { - if (!multifd_send_pages()) { - error_report("%s: multifd_send_pages fail", __func__); - return -1; - } - } + assert(req != MULTIFD_SYNC_NONE); flush_zero_copy = migrate_zero_copy_send(); @@ -884,8 +631,8 @@ int multifd_send_sync_main(void) * We should be the only user so far, so not possible to be set by * others concurrently. */ - assert(qatomic_read(&p->pending_sync) == false); - qatomic_set(&p->pending_sync, true); + assert(qatomic_read(&p->pending_sync) == MULTIFD_SYNC_NONE); + qatomic_set(&p->pending_sync, req); qemu_sem_post(&p->sem); } for (i = 0; i < migrate_multifd_channels(); i++) { @@ -937,26 +684,46 @@ static void *multifd_send_thread(void *opaque) } /* - * Read pending_job flag before p->pages. Pairs with the - * qatomic_store_release() in multifd_send_pages(). + * Read pending_job flag before p->data. Pairs with the + * qatomic_store_release() in multifd_send(). */ if (qatomic_load_acquire(&p->pending_job)) { - MultiFDPages_t *pages = p->pages; + bool is_device_state = multifd_payload_device_state(p->data); + size_t total_size; + int write_flags_masked = 0; + p->flags = 0; p->iovs_num = 0; - assert(pages->num); + assert(!multifd_payload_empty(p->data)); - ret = multifd_send_state->ops->send_prepare(p, &local_err); - if (ret != 0) { - break; + if (is_device_state) { + multifd_device_state_send_prepare(p); + + /* Device state packets cannot be sent via zerocopy */ + write_flags_masked |= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; + } else { + ret = multifd_send_state->ops->send_prepare(p, &local_err); + if (ret != 0) { + break; + } } + /* + * The packet header in the zerocopy RAM case is accounted for + * in multifd_nocomp_send_prepare() - where it is actually + * being sent. + */ + total_size = iov_size(p->iov, p->iovs_num); + if (migrate_mapped_ram()) { + assert(!is_device_state); + ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num, - p->pages->block, &local_err); + &p->data->u.ram, &local_err); } else { ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, - NULL, 0, p->write_flags, + NULL, 0, + p->write_flags & ~write_flags_masked, &local_err); } @@ -964,29 +731,29 @@ static void *multifd_send_thread(void *opaque) break; } - stat64_add(&mig_stats.multifd_bytes, - p->next_packet_size + p->packet_len); - stat64_add(&mig_stats.normal_pages, pages->normal_num); - stat64_add(&mig_stats.zero_pages, pages->num - pages->normal_num); + stat64_add(&mig_stats.multifd_bytes, total_size); - multifd_pages_reset(p->pages); p->next_packet_size = 0; + multifd_send_data_clear(p->data); /* - * Making sure p->pages is published before saying "we're + * Making sure p->data is published before saying "we're * free". Pairs with the smp_mb_acquire() in - * multifd_send_pages(). + * multifd_send(). */ qatomic_store_release(&p->pending_job, false); } else { + MultiFDSyncReq req = qatomic_read(&p->pending_sync); + /* * If not a normal job, must be a sync request. Note that * pending_sync is a standalone flag (unlike pending_job), so * it doesn't require explicit memory barriers. */ - assert(qatomic_read(&p->pending_sync)); + assert(req != MULTIFD_SYNC_NONE); - if (use_packets) { + /* Only push the SYNC message if it involves a remote sync */ + if (req == MULTIFD_SYNC_ALL) { p->flags = MULTIFD_FLAG_SYNC; multifd_send_fill_packet(p); ret = qio_channel_write_all(p->c, (void *)p->packet, @@ -996,10 +763,9 @@ static void *multifd_send_thread(void *opaque) } /* p->next_packet_size will always be zero for a SYNC packet */ stat64_add(&mig_stats.multifd_bytes, p->packet_len); - p->flags = 0; } - qatomic_set(&p->pending_sync, false); + qatomic_set(&p->pending_sync, MULTIFD_SYNC_NONE); qemu_sem_post(&p->sem_sync); } } @@ -1015,8 +781,7 @@ static void *multifd_send_thread(void *opaque) rcu_unregister_thread(); migration_threads_remove(thread); - trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages, - p->total_zero_pages); + trace_multifd_send_thread_end(p->id, p->packets_sent); return NULL; } @@ -1069,7 +834,7 @@ static bool multifd_tls_channel_connect(MultiFDSendParams *p, args->p = p; p->tls_thread_created = true; - qemu_thread_create(&p->tls_thread, "mig/src/tls", + qemu_thread_create(&p->tls_thread, MIGRATION_THREAD_SRC_TLS, multifd_tls_handshake_thread, args, QEMU_THREAD_JOINABLE); return true; @@ -1157,7 +922,7 @@ bool multifd_send_setup(void) { MigrationState *s = migrate_get_current(); int thread_count, ret = 0; - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + uint32_t page_count = multifd_ram_page_count(); bool use_packets = multifd_use_packets(); uint8_t i; @@ -1168,7 +933,7 @@ bool multifd_send_setup(void) thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); - multifd_send_state->pages = multifd_pages_init(page_count); + qemu_mutex_init(&multifd_send_state->multifd_send_mutex); qemu_sem_init(&multifd_send_state->channels_created, 0); qemu_sem_init(&multifd_send_state->channels_ready, 0); qatomic_set(&multifd_send_state->exiting, 0); @@ -1181,18 +946,17 @@ bool multifd_send_setup(void) qemu_sem_init(&p->sem, 0); qemu_sem_init(&p->sem_sync, 0); p->id = i; - p->pages = multifd_pages_init(page_count); + p->data = multifd_send_data_alloc(); if (use_packets) { p->packet_len = sizeof(MultiFDPacket_t) + sizeof(uint64_t) * page_count; p->packet = g_malloc0(p->packet_len); - p->packet->magic = cpu_to_be32(MULTIFD_MAGIC); - p->packet->version = cpu_to_be32(MULTIFD_VERSION); + p->packet_device_state = g_malloc0(sizeof(*p->packet_device_state)); + p->packet_device_state->hdr.magic = cpu_to_be32(MULTIFD_MAGIC); + p->packet_device_state->hdr.version = cpu_to_be32(MULTIFD_VERSION); } - p->name = g_strdup_printf("mig/src/send_%d", i); - p->page_size = qemu_target_page_size(); - p->page_count = page_count; + p->name = g_strdup_printf(MIGRATION_THREAD_SRC_MULTIFD, i); p->write_flags = 0; if (!multifd_new_send_channel_create(p, &local_err)) { @@ -1223,8 +987,11 @@ bool multifd_send_setup(void) migrate_set_error(s, local_err); goto err; } + assert(p->iov); } + multifd_device_state_send_setup(); + return true; err: @@ -1357,11 +1124,14 @@ static void multifd_recv_cleanup_channel(MultiFDRecvParams *p) qemu_mutex_destroy(&p->mutex); qemu_sem_destroy(&p->sem_sync); qemu_sem_destroy(&p->sem); + g_free(p->data); + p->data = NULL; g_free(p->name); p->name = NULL; p->packet_len = 0; g_free(p->packet); p->packet = NULL; + g_clear_pointer(&p->packet_dev_state, g_free); g_free(p->normal); p->normal = NULL; g_free(p->zero); @@ -1463,8 +1233,37 @@ void multifd_recv_sync_main(void) trace_multifd_recv_sync_main(multifd_recv_state->packet_num); } +static int multifd_device_state_recv(MultiFDRecvParams *p, Error **errp) +{ + g_autofree char *dev_state_buf = NULL; + int ret; + + dev_state_buf = g_malloc(p->next_packet_size); + + ret = qio_channel_read_all(p->c, dev_state_buf, p->next_packet_size, errp); + if (ret != 0) { + return ret; + } + + if (p->packet_dev_state->idstr[sizeof(p->packet_dev_state->idstr) - 1] + != 0) { + error_setg(errp, "unterminated multifd device state idstr"); + return -1; + } + + if (!qemu_loadvm_load_state_buffer(p->packet_dev_state->idstr, + p->packet_dev_state->instance_id, + dev_state_buf, p->next_packet_size, + errp)) { + ret = -1; + } + + return ret; +} + static void *multifd_recv_thread(void *opaque) { + MigrationState *s = migrate_get_current(); MultiFDRecvParams *p = opaque; Error *local_err = NULL; bool use_packets = multifd_use_packets(); @@ -1473,19 +1272,65 @@ static void *multifd_recv_thread(void *opaque) trace_multifd_recv_thread_start(p->id); rcu_register_thread(); + if (!s->multifd_clean_tls_termination) { + p->read_flags = QIO_CHANNEL_READ_FLAG_RELAXED_EOF; + } + while (true) { + MultiFDPacketHdr_t hdr; uint32_t flags = 0; + bool is_device_state = false; bool has_data = false; + uint8_t *pkt_buf; + size_t pkt_len; + p->normal_num = 0; if (use_packets) { + struct iovec iov = { + .iov_base = (void *)&hdr, + .iov_len = sizeof(hdr) + }; + if (multifd_recv_should_exit()) { break; } - ret = qio_channel_read_all_eof(p->c, (void *)p->packet, - p->packet_len, &local_err); - if (ret == 0 || ret == -1) { /* 0: EOF -1: Error */ + ret = qio_channel_readv_full_all_eof(p->c, &iov, 1, NULL, NULL, + p->read_flags, &local_err); + if (!ret) { + /* EOF */ + assert(!local_err); + break; + } + + if (ret == -1) { + break; + } + + ret = multifd_recv_unfill_packet_header(p, &hdr, &local_err); + if (ret) { + break; + } + + is_device_state = p->flags & MULTIFD_FLAG_DEVICE_STATE; + if (is_device_state) { + pkt_buf = (uint8_t *)p->packet_dev_state + sizeof(hdr); + pkt_len = sizeof(*p->packet_dev_state) - sizeof(hdr); + } else { + pkt_buf = (uint8_t *)p->packet + sizeof(hdr); + pkt_len = p->packet_len - sizeof(hdr); + } + + ret = qio_channel_read_all_eof(p->c, (char *)pkt_buf, pkt_len, + &local_err); + if (!ret) { + /* EOF */ + error_setg(&local_err, "multifd: unexpected EOF after packet header"); + break; + } + + if (ret == -1) { break; } @@ -1499,7 +1344,18 @@ static void *multifd_recv_thread(void *opaque) flags = p->flags; /* recv methods don't know how to handle the SYNC flag */ p->flags &= ~MULTIFD_FLAG_SYNC; - has_data = p->normal_num || p->zero_num; + + if (is_device_state) { + has_data = p->next_packet_size > 0; + } else { + /* + * Even if it's a SYNC packet, this needs to be set + * because older QEMUs (<9.0) still send data along with + * the SYNC packet. + */ + has_data = p->normal_num || p->zero_num; + } + qemu_mutex_unlock(&p->mutex); } else { /* @@ -1528,19 +1384,40 @@ static void *multifd_recv_thread(void *opaque) } if (has_data) { - ret = multifd_recv_state->ops->recv(p, &local_err); + /* + * multifd thread should not be active and receive data + * when migration is in the Postcopy phase. Two threads + * writing the same memory area could easily corrupt + * the guest state. + */ + assert(!migration_in_postcopy()); + if (is_device_state) { + assert(use_packets); + ret = multifd_device_state_recv(p, &local_err); + } else { + ret = multifd_recv_state->ops->recv(p, &local_err); + } if (ret != 0) { break; } + } else if (is_device_state) { + error_setg(&local_err, + "multifd: received empty device state packet"); + break; } if (use_packets) { if (flags & MULTIFD_FLAG_SYNC) { + if (is_device_state) { + error_setg(&local_err, + "multifd: received SYNC device state packet"); + break; + } + qemu_sem_post(&multifd_recv_state->sem_sync); qemu_sem_wait(&p->sem_sync); } } else { - p->total_normal_pages += p->data->size / qemu_target_page_size(); p->data->size = 0; /* * Order data->size update before clearing @@ -1557,9 +1434,7 @@ static void *multifd_recv_thread(void *opaque) } rcu_unregister_thread(); - trace_multifd_recv_thread_end(p->id, p->packets_recved, - p->total_normal_pages, - p->total_zero_pages); + trace_multifd_recv_thread_end(p->id, p->packets_recved); return NULL; } @@ -1567,7 +1442,7 @@ static void *multifd_recv_thread(void *opaque) int multifd_recv_setup(Error **errp) { int thread_count; - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + uint32_t page_count = multifd_ram_page_count(); bool use_packets = multifd_use_packets(); uint8_t i; @@ -1607,12 +1482,11 @@ int multifd_recv_setup(Error **errp) p->packet_len = sizeof(MultiFDPacket_t) + sizeof(uint64_t) * page_count; p->packet = g_malloc0(p->packet_len); + p->packet_dev_state = g_malloc0(sizeof(*p->packet_dev_state)); } - p->name = g_strdup_printf("mig/dst/recv_%d", i); + p->name = g_strdup_printf(MIGRATION_THREAD_DST_MULTIFD, i); p->normal = g_new0(ram_addr_t, page_count); p->zero = g_new0(ram_addr_t, page_count); - p->page_count = page_count; - p->page_size = qemu_target_page_size(); } for (i = 0; i < thread_count; i++) { @@ -1685,17 +1559,3 @@ void multifd_recv_new_channel(QIOChannel *ioc, Error **errp) QEMU_THREAD_JOINABLE); qatomic_inc(&multifd_recv_state->count); } - -bool multifd_send_prepare_common(MultiFDSendParams *p) -{ - multifd_send_zero_page_detect(p); - - if (!p->pages->normal_num) { - p->next_packet_size = 0; - return false; - } - - multifd_send_prepare_header(p); - - return true; -} diff --git a/migration/multifd.h b/migration/multifd.h index 0ecd6f47d73..9b6d81e7ede 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -13,9 +13,27 @@ #ifndef QEMU_MIGRATION_MULTIFD_H #define QEMU_MIGRATION_MULTIFD_H +#include "exec/target_page.h" #include "ram.h" typedef struct MultiFDRecvData MultiFDRecvData; +typedef struct MultiFDSendData MultiFDSendData; + +typedef enum { + /* No sync request */ + MULTIFD_SYNC_NONE = 0, + /* Sync locally on the sender threads without pushing messages */ + MULTIFD_SYNC_LOCAL, + /* + * Sync not only on the sender threads, but also push MULTIFD_FLAG_SYNC + * message to the wire for each iochannel (which is for a remote sync). + * + * When remote sync is used, need to be paired with a follow up + * RAM_SAVE_FLAG_EOS / RAM_SAVE_FLAG_MULTIFD_FLUSH message on the main + * channel. + */ + MULTIFD_SYNC_ALL, +} MultiFDSyncReq; bool multifd_send_setup(void); void multifd_send_shutdown(void); @@ -26,22 +44,34 @@ void multifd_recv_shutdown(void); bool multifd_recv_all_channels_created(void); void multifd_recv_new_channel(QIOChannel *ioc, Error **errp); void multifd_recv_sync_main(void); -int multifd_send_sync_main(void); +int multifd_send_sync_main(MultiFDSyncReq req); bool multifd_queue_page(RAMBlock *block, ram_addr_t offset); bool multifd_recv(void); MultiFDRecvData *multifd_get_recv_data(void); +/* Multiple fd's */ + +#define MULTIFD_MAGIC 0x11223344U +#define MULTIFD_VERSION 1 + /* Multifd Compression flags */ #define MULTIFD_FLAG_SYNC (1 << 0) -/* We reserve 4 bits for compression methods */ -#define MULTIFD_FLAG_COMPRESSION_MASK (0xf << 1) +/* We reserve 5 bits for compression methods */ +#define MULTIFD_FLAG_COMPRESSION_MASK (0x1f << 1) /* we need to be compatible. Before compression value was 0 */ #define MULTIFD_FLAG_NOCOMP (0 << 1) #define MULTIFD_FLAG_ZLIB (1 << 1) #define MULTIFD_FLAG_ZSTD (2 << 1) #define MULTIFD_FLAG_QPL (4 << 1) #define MULTIFD_FLAG_UADK (8 << 1) +#define MULTIFD_FLAG_QATZIP (16 << 1) + +/* + * If set it means that this packet contains device state + * (MultiFDPacketDeviceState_t), not RAM data (MultiFDPacket_t). + */ +#define MULTIFD_FLAG_DEVICE_STATE (32 << 1) /* This value needs to be a multiple of qemu_target_page_size() */ #define MULTIFD_PACKET_SIZE (512 * 1024) @@ -50,6 +80,11 @@ typedef struct { uint32_t magic; uint32_t version; uint32_t flags; +} __attribute__((packed)) MultiFDPacketHdr_t; + +typedef struct { + MultiFDPacketHdr_t hdr; + /* maximum number of allocated pages */ uint32_t pages_alloc; /* non zero pages */ @@ -70,16 +105,28 @@ typedef struct { uint64_t offset[]; } __attribute__((packed)) MultiFDPacket_t; +typedef struct { + MultiFDPacketHdr_t hdr; + + char idstr[256]; + uint32_t instance_id; + + /* size of the next packet that contains the actual data */ + uint32_t next_packet_size; +} __attribute__((packed)) MultiFDPacketDeviceState_t; + typedef struct { /* number of used pages */ uint32_t num; /* number of normal pages */ uint32_t normal_num; - /* number of allocated pages */ - uint32_t allocated; - /* offset of each page */ - ram_addr_t *offset; + /* + * Pointer to the ramblock. NOTE: it's caller's responsibility to make + * sure the pointer is always valid! + */ RAMBlock *block; + /* offset array of each page, managed by multifd */ + ram_addr_t *offset; } MultiFDPages_t; struct MultiFDRecvData { @@ -89,6 +136,48 @@ struct MultiFDRecvData { off_t file_offset; }; +typedef struct { + char *idstr; + uint32_t instance_id; + char *buf; + size_t buf_len; +} MultiFDDeviceState_t; + +typedef enum { + MULTIFD_PAYLOAD_NONE, + MULTIFD_PAYLOAD_RAM, + MULTIFD_PAYLOAD_DEVICE_STATE, +} MultiFDPayloadType; + +typedef struct MultiFDPayload { + MultiFDPages_t ram; + MultiFDDeviceState_t device_state; +} MultiFDPayload; + +struct MultiFDSendData { + MultiFDPayloadType type; + MultiFDPayload u; +}; + +static inline bool multifd_payload_empty(MultiFDSendData *data) +{ + return data->type == MULTIFD_PAYLOAD_NONE; +} + +static inline bool multifd_payload_device_state(MultiFDSendData *data) +{ + return data->type == MULTIFD_PAYLOAD_DEVICE_STATE; +} + +static inline void multifd_set_payload_type(MultiFDSendData *data, + MultiFDPayloadType type) +{ + assert(multifd_payload_empty(data)); + assert(type != MULTIFD_PAYLOAD_NONE); + + data->type = type; +} + typedef struct { /* Fields are only written at creating/deletion time */ /* No lock required for them, they are read only */ @@ -106,10 +195,6 @@ typedef struct { QIOChannel *c; /* packet allocated len */ uint32_t packet_len; - /* guest page size */ - uint32_t page_size; - /* number of pages in a full packet */ - uint32_t page_count; /* multifd flags for sending ram */ int write_flags; @@ -121,7 +206,7 @@ typedef struct { /* multifd flags for each packet */ uint32_t flags; /* - * The sender thread has work to do if either of below boolean is set. + * The sender thread has work to do if either of below field is set. * * @pending_job: a job is pending * @pending_sync: a sync request is pending @@ -130,26 +215,19 @@ typedef struct { * cleared by the multifd sender threads. */ bool pending_job; - bool pending_sync; - /* array of pages to sent. - * The owner of 'pages' depends of 'pending_job' value: - * pending_job == 0 -> migration_thread can use it. - * pending_job != 0 -> multifd_channel can use it. - */ - MultiFDPages_t *pages; + MultiFDSyncReq pending_sync; + + MultiFDSendData *data; /* thread local variables. No locking required */ - /* pointer to the packet */ + /* pointers to the possible packet types */ MultiFDPacket_t *packet; + MultiFDPacketDeviceState_t *packet_device_state; /* size of the next packet that contains pages */ uint32_t next_packet_size; /* packets sent through this channel */ uint64_t packets_sent; - /* non zero pages sent through this channel */ - uint64_t total_normal_pages; - /* zero pages sent through this channel */ - uint64_t total_zero_pages; /* buffers to send */ struct iovec *iov; /* number of iovs used */ @@ -173,10 +251,6 @@ typedef struct { QIOChannel *c; /* packet allocated len */ uint32_t packet_len; - /* guest page size */ - uint32_t page_size; - /* number of pages in a full packet */ - uint32_t page_count; /* syncs main thread and channels */ QemuSemaphore sem_sync; @@ -196,8 +270,9 @@ typedef struct { /* thread local variables. No locking required */ - /* pointer to the packet */ + /* pointers to the possible packet types */ MultiFDPacket_t *packet; + MultiFDPacketDeviceState_t *packet_dev_state; /* size of the next packet that contains pages */ uint32_t next_packet_size; /* packets received through this channel */ @@ -206,10 +281,6 @@ typedef struct { RAMBlock *block; /* ramblock host address */ uint8_t *host; - /* non zero pages recv through this channel */ - uint64_t total_normal_pages; - /* zero pages recv through this channel */ - uint64_t total_zero_pages; /* buffers to recv */ struct iovec *iov; /* Pages that are not zero */ @@ -222,36 +293,126 @@ typedef struct { uint32_t zero_num; /* used for de-compression methods */ void *compress_data; + /* Flags for the QIOChannel */ + int read_flags; } MultiFDRecvParams; typedef struct { - /* Setup for sending side */ + /* + * The send_setup, send_cleanup, send_prepare are only called on + * the QEMU instance at the migration source. + */ + + /* + * Setup for sending side. Called once per channel during channel + * setup phase. + * + * Must allocate p->iov. If packets are in use (default), one + * extra iovec must be allocated for the packet header. Any memory + * allocated in this hook must be released at send_cleanup. + * + * p->write_flags may be used for passing flags to the QIOChannel. + * + * p->compression_data may be used by compression methods to store + * compression data. + */ int (*send_setup)(MultiFDSendParams *p, Error **errp); - /* Cleanup for sending side */ + + /* + * Cleanup for sending side. Called once per channel during + * channel cleanup phase. + */ void (*send_cleanup)(MultiFDSendParams *p, Error **errp); - /* Prepare the send packet */ + + /* + * Prepare the send packet. Called as a result of multifd_send() + * on the client side, with p pointing to the MultiFDSendParams of + * a channel that is currently idle. + * + * Must populate p->iov with the data to be sent, increment + * p->iovs_num to match the amount of iovecs used and set + * p->next_packet_size with the amount of data currently present + * in p->iov. + * + * Must indicate whether this is a compression packet by setting + * p->flags. + * + * As a last step, if packets are in use (default), must prepare + * the packet by calling multifd_send_fill_packet(). + */ int (*send_prepare)(MultiFDSendParams *p, Error **errp); - /* Setup for receiving side */ + + /* + * The recv_setup, recv_cleanup, recv are only called on the QEMU + * instance at the migration destination. + */ + + /* + * Setup for receiving side. Called once per channel during + * channel setup phase. May be empty. + * + * May allocate data structures for the receiving of data. May use + * p->iov. Compression methods may use p->compress_data. + */ int (*recv_setup)(MultiFDRecvParams *p, Error **errp); - /* Cleanup for receiving side */ + + /* + * Cleanup for receiving side. Called once per channel during + * channel cleanup phase. May be empty. + */ void (*recv_cleanup)(MultiFDRecvParams *p); - /* Read all data */ + + /* + * Data receive method. Called as a result of multifd_recv() on + * the client side, with p pointing to the MultiFDRecvParams of a + * channel that is currently idle. Only called if there is data + * available to receive. + * + * Must validate p->flags according to what was set at + * send_prepare. + * + * Must read the data from the QIOChannel p->c. + */ int (*recv)(MultiFDRecvParams *p, Error **errp); } MultiFDMethods; -void multifd_register_ops(int method, MultiFDMethods *ops); +void multifd_register_ops(int method, const MultiFDMethods *ops); void multifd_send_fill_packet(MultiFDSendParams *p); bool multifd_send_prepare_common(MultiFDSendParams *p); void multifd_send_zero_page_detect(MultiFDSendParams *p); void multifd_recv_zero_page_process(MultiFDRecvParams *p); -static inline void multifd_send_prepare_header(MultiFDSendParams *p) +void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc); +bool multifd_send(MultiFDSendData **send_data); +MultiFDSendData *multifd_send_data_alloc(void); +void multifd_send_data_clear(MultiFDSendData *data); +void multifd_send_data_free(MultiFDSendData *data); + +static inline uint32_t multifd_ram_page_size(void) { - p->iov[0].iov_len = p->packet_len; - p->iov[0].iov_base = p->packet; - p->iovs_num++; + return qemu_target_page_size(); } -void multifd_channel_connect(MultiFDSendParams *p, QIOChannel *ioc); +static inline uint32_t multifd_ram_page_count(void) +{ + return MULTIFD_PACKET_SIZE / qemu_target_page_size(); +} + +void multifd_ram_save_setup(void); +void multifd_ram_save_cleanup(void); +int multifd_ram_flush_and_sync(QEMUFile *f); +bool multifd_ram_sync_per_round(void); +bool multifd_ram_sync_per_section(void); +void multifd_ram_payload_alloc(MultiFDPages_t *pages); +void multifd_ram_payload_free(MultiFDPages_t *pages); +void multifd_ram_fill_packet(MultiFDSendParams *p); +int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp); + +void multifd_send_data_clear_device_state(MultiFDDeviceState_t *device_state); + +void multifd_device_state_send_setup(void); +void multifd_device_state_send_cleanup(void); + +void multifd_device_state_send_prepare(MultiFDSendParams *p); #endif diff --git a/migration/options.c b/migration/options.c index 645f55003da..4e923a2e072 100644 --- a/migration/options.c +++ b/migration/options.c @@ -19,16 +19,17 @@ #include "qapi/qapi-commands-migration.h" #include "qapi/qapi-visit-migration.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qnull.h" -#include "sysemu/runstate.h" +#include "qobject/qnull.h" +#include "system/runstate.h" #include "migration/colo.h" +#include "migration/cpr.h" #include "migration/misc.h" #include "migration.h" #include "migration-stats.h" #include "qemu-file.h" #include "ram.h" #include "options.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" /* Maximum migrate downtime set to 2000 seconds */ #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 @@ -55,6 +56,13 @@ #define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE /* 0: means nocompress, 1: best speed, ... 9: best compress ratio */ #define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1 +/* + * 1: best speed, ... 9: best compress ratio + * There is some nuance here. Refer to QATzip documentation to understand + * the mapping of QATzip levels to standard deflate levels. + */ +#define DEFAULT_MIGRATE_MULTIFD_QATZIP_LEVEL 1 + /* 0: means nocompress, 1: best speed, ... 20: best compress ratio */ #define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1 @@ -78,19 +86,23 @@ #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ #define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ -Property migration_properties[] = { +const Property migration_properties[] = { DEFINE_PROP_BOOL("store-global-state", MigrationState, store_global_state, true), DEFINE_PROP_BOOL("send-configuration", MigrationState, send_configuration, true), DEFINE_PROP_BOOL("send-section-footer", MigrationState, send_section_footer, true), + DEFINE_PROP_BOOL("send-switchover-start", MigrationState, + send_switchover_start, true), DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState, multifd_flush_after_each_section, false), DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState, preempt_pre_7_2, false), + DEFINE_PROP_BOOL("multifd-clean-tls-termination", MigrationState, + multifd_clean_tls_termination, true), /* Migration parameters */ DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, @@ -123,6 +135,9 @@ Property migration_properties[] = { DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState, parameters.multifd_zlib_level, DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL), + DEFINE_PROP_UINT8("multifd-qatzip-level", MigrationState, + parameters.multifd_qatzip_level, + DEFAULT_MIGRATE_MULTIFD_QATZIP_LEVEL), DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState, parameters.multifd_zstd_level, DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL), @@ -172,6 +187,8 @@ Property migration_properties[] = { DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), DEFINE_PROP_MIG_CAP("x-postcopy-preempt", MIGRATION_CAPABILITY_POSTCOPY_PREEMPT), + DEFINE_PROP_MIG_CAP("postcopy-blocktime", + MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME), DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), @@ -186,8 +203,8 @@ Property migration_properties[] = { MIGRATION_CAPABILITY_SWITCHOVER_ACK), DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT), DEFINE_PROP_MIG_CAP("mapped-ram", MIGRATION_CAPABILITY_MAPPED_RAM), - DEFINE_PROP_END_OF_LIST(), }; +const size_t migration_properties_count = ARRAY_SIZE(migration_properties); bool migrate_auto_converge(void) { @@ -196,6 +213,13 @@ bool migrate_auto_converge(void) return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; } +bool migrate_send_switchover_start(void) +{ + MigrationState *s = migrate_get_current(); + + return s->send_switchover_start; +} + bool migrate_background_snapshot(void) { MigrationState *s = migrate_get_current(); @@ -329,13 +353,6 @@ bool migrate_xbzrle(void) return s->capabilities[MIGRATION_CAPABILITY_XBZRLE]; } -bool migrate_zero_blocks(void) -{ - MigrationState *s = migrate_get_current(); - - return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; -} - bool migrate_zero_copy_send(void) { MigrationState *s = migrate_get_current(); @@ -433,6 +450,24 @@ static bool migrate_incoming_started(void) return !!migration_incoming_get_current()->transport_data; } +bool migrate_rdma_caps_check(bool *caps, Error **errp) +{ + if (caps[MIGRATION_CAPABILITY_XBZRLE]) { + error_setg(errp, "RDMA and XBZRLE can't be used together"); + return false; + } + if (caps[MIGRATION_CAPABILITY_MULTIFD]) { + error_setg(errp, "RDMA and multifd can't be used together"); + return false; + } + if (caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { + error_setg(errp, "RDMA and postcopy-ram can't be used together"); + return false; + } + + return true; +} + /** * @migration_caps_check - check capability compatibility * @@ -447,6 +482,10 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) ERRP_GUARD(); MigrationIncomingState *mis = migration_incoming_get_current(); + if (new_caps[MIGRATION_CAPABILITY_ZERO_BLOCKS]) { + warn_report("zero-blocks capability is deprecated"); + } + #ifndef CONFIG_REPLICATION if (new_caps[MIGRATION_CAPABILITY_X_COLO]) { error_setg(errp, "QEMU compiled without replication module" @@ -472,11 +511,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) error_setg(errp, "Postcopy is not compatible with ignore-shared"); return false; } - - if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) { - error_setg(errp, "Postcopy is not yet compatible with multifd"); - return false; - } } if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { @@ -536,7 +570,7 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) return false; } - if (migrate_incoming_started()) { + if (!migrate_postcopy_preempt() && migrate_incoming_started()) { error_setg(errp, "Postcopy preempt must be set before incoming starts"); return false; @@ -544,7 +578,7 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) } if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) { - if (migrate_incoming_started()) { + if (!migrate_multifd() && migrate_incoming_started()) { error_setg(errp, "Multifd must be set before incoming starts"); return false; } @@ -592,26 +626,13 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp) } } - return true; -} - -bool migrate_cap_set(int cap, bool value, Error **errp) -{ - MigrationState *s = migrate_get_current(); - bool new_caps[MIGRATION_CAPABILITY__MAX]; - - if (migration_is_running()) { - error_setg(errp, "There's a migration process in progress"); - return false; - } - - memcpy(new_caps, s->capabilities, sizeof(new_caps)); - new_caps[cap] = value; - - if (!migrate_caps_check(s->capabilities, new_caps, errp)) { + /* + * On destination side, check the cases that capability is being set + * after incoming thread has started. + */ + if (migrate_rdma() && !migrate_rdma_caps_check(new_caps, errp)) { return false; } - s->capabilities[cap] = value; return true; } @@ -758,8 +779,11 @@ uint64_t migrate_max_postcopy_bandwidth(void) MigMode migrate_mode(void) { - MigrationState *s = migrate_get_current(); - MigMode mode = s->parameters.mode; + MigMode mode = cpr_get_incoming_mode(); + + if (mode == MIG_MODE_NONE) { + mode = migrate_get_current()->parameters.mode; + } assert(mode >= 0 && mode < MIG_MODE__MAX); return mode; @@ -787,6 +811,13 @@ int migrate_multifd_zlib_level(void) return s->parameters.multifd_zlib_level; } +int migrate_multifd_qatzip_level(void) +{ + MigrationState *s = migrate_get_current(); + + return s->parameters.multifd_qatzip_level; +} + int migrate_multifd_zstd_level(void) { MigrationState *s = migrate_get_current(); @@ -892,6 +923,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->multifd_compression = s->parameters.multifd_compression; params->has_multifd_zlib_level = true; params->multifd_zlib_level = s->parameters.multifd_zlib_level; + params->has_multifd_qatzip_level = true; + params->multifd_qatzip_level = s->parameters.multifd_qatzip_level; params->has_multifd_zstd_level = true; params->multifd_zstd_level = s->parameters.multifd_zstd_level; params->has_xbzrle_cache_size = true; @@ -946,6 +979,7 @@ void migrate_params_init(MigrationParameters *params) params->has_multifd_channels = true; params->has_multifd_compression = true; params->has_multifd_zlib_level = true; + params->has_multifd_qatzip_level = true; params->has_multifd_zstd_level = true; params->has_xbzrle_cache_size = true; params->has_max_postcopy_bandwidth = true; @@ -1038,6 +1072,14 @@ bool migrate_params_check(MigrationParameters *params, Error **errp) return false; } + if (params->has_multifd_qatzip_level && + ((params->multifd_qatzip_level > 9) || + (params->multifd_qatzip_level < 1))) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_qatzip_level", + "a value between 1 and 9"); + return false; + } + if (params->has_multifd_zstd_level && (params->multifd_zstd_level > 20)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", @@ -1173,6 +1215,11 @@ static void migrate_params_test_apply(MigrateSetParameters *params, dest->tls_hostname = params->tls_hostname->u.s; } + if (params->tls_authz) { + assert(params->tls_authz->type == QTYPE_QSTRING); + dest->tls_authz = params->tls_authz->u.s; + } + if (params->has_max_bandwidth) { dest->max_bandwidth = params->max_bandwidth; } @@ -1195,6 +1242,9 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_multifd_compression) { dest->multifd_compression = params->multifd_compression; } + if (params->has_multifd_qatzip_level) { + dest->multifd_qatzip_level = params->multifd_qatzip_level; + } if (params->has_multifd_zlib_level) { dest->multifd_zlib_level = params->multifd_zlib_level; } @@ -1315,6 +1365,9 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) if (params->has_multifd_compression) { s->parameters.multifd_compression = params->multifd_compression; } + if (params->has_multifd_qatzip_level) { + s->parameters.multifd_qatzip_level = params->multifd_qatzip_level; + } if (params->has_multifd_zlib_level) { s->parameters.multifd_zlib_level = params->multifd_zlib_level; } diff --git a/migration/options.h b/migration/options.h index a2397026db7..82d839709e7 100644 --- a/migration/options.h +++ b/migration/options.h @@ -20,7 +20,8 @@ /* migration properties */ -extern Property migration_properties[]; +extern const Property migration_properties[]; +extern const size_t migration_properties_count; /* capabilities */ @@ -40,7 +41,6 @@ bool migrate_release_ram(void); bool migrate_return_path(void); bool migrate_validate_uuid(void); bool migrate_xbzrle(void); -bool migrate_zero_blocks(void); bool migrate_zero_copy_send(void); /* @@ -57,8 +57,8 @@ bool migrate_tls(void); /* capabilities helpers */ +bool migrate_rdma_caps_check(bool *caps, Error **errp); bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp); -bool migrate_cap_set(int cap, bool value, Error **errp); /* parameters */ @@ -78,6 +78,7 @@ uint64_t migrate_max_postcopy_bandwidth(void); int migrate_multifd_channels(void); MultiFDCompression migrate_multifd_compression(void); int migrate_multifd_zlib_level(void); +int migrate_multifd_qatzip_level(void); int migrate_multifd_zstd_level(void); uint8_t migrate_throttle_trigger_threshold(void); const char *migrate_tls_authz(void); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 1c374b7ea1e..45af9a361e8 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -27,11 +27,11 @@ #include "qapi/error.h" #include "qemu/notify.h" #include "qemu/rcu.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/error-report.h" #include "trace.h" #include "hw/boards.h" -#include "exec/ramblock.h" +#include "system/ramblock.h" #include "socket.h" #include "yank_functions.h" #include "tls.h" @@ -90,10 +90,10 @@ void postcopy_thread_create(MigrationIncomingState *mis, QemuThread *thread, const char *name, void *(*fn)(void *), int joinable) { - qemu_sem_init(&mis->thread_sync_sem, 0); + qemu_event_init(&mis->thread_sync_event, false); qemu_thread_create(thread, name, fn, mis, joinable); - qemu_sem_wait(&mis->thread_sync_sem); - qemu_sem_destroy(&mis->thread_sync_sem); + qemu_event_wait(&mis->thread_sync_event); + qemu_event_destroy(&mis->thread_sync_event); } /* Postcopy needs to detect accesses to pages that haven't yet been copied @@ -110,19 +110,104 @@ void postcopy_thread_create(MigrationIncomingState *mis, #include #include +/* + * Here we use 24 buckets, which means the last bucket will cover [2^24 us, + * 2^25 us) ~= [16, 32) seconds. It should be far enough to record even + * extreme (perf-wise broken) 1G pages moving over, which can sometimes + * take a few seconds due to various reasons. Anything more than that + * might be unsensible to account anymore. + */ +#define BLOCKTIME_LATENCY_BUCKET_N (24) + +/* All the time records are in unit of nanoseconds */ typedef struct PostcopyBlocktimeContext { - /* time when page fault initiated per vCPU */ - uint32_t *page_fault_vcpu_time; - /* page address per vCPU */ - uintptr_t *vcpu_addr; - uint32_t total_blocktime; /* blocktime per vCPU */ - uint32_t *vcpu_blocktime; + uint64_t *vcpu_blocktime_total; + /* count of faults per vCPU */ + uint64_t *vcpu_faults_count; + /* + * count of currently blocked faults per vCPU. + * + * NOTE: Normally there should only be one fault in-progress per vCPU + * thread, so logically it _seems_ vcpu_faults_count[] for any vCPU + * should be either zero or one. However, there can be reasons we see + * >1 faults on the same vCPU thread. + * + * CASE (1): since the process to resolve faults (ioctl(UFFDIO_COPY), + * for example) is done before taking the mutex that protects the + * blocktime context, it can happen that we read more than one faulted + * addresses per vCPU. + * + * One example when we can see >1 faulted addresses for one vCPU: + * + * vcpu1 thread fault thread resolve thread + * ============ ============ ============== + * + * faulted on addr1 + * read uffd msg (addr1) + * MUTEX_LOCK + * add entry (cpu1, addr1) + * MUTEX_UNLOCK + * request remote fault (addr1) + * resolve fault (addr1) + * addr1 resolved, continue.. + * faulted on addr2 + * read uffd msg (addr2) + * MUTEX_LOCK + * add entry (cpu1, addr2) <--------------- [A] + * MUTEX_UNLOCK + * MUTEX_LOCK + * remove entry (cpu1, addr1) + * MUTEX_UNLOCK + * + * In above case, we may see (cpu1, addr1) and (cpu1, addr2) entries to + * appear together at [A], when it gets the lock before the resolve + * thread. Use this counter to maintain such case, and only when it + * reaches zero we know the vCPU is not blocked anymore. + * + * CASE (2): theoretically (the author admit to not have verified + * this..), one vCPU thread can also generate more than one userfaultfd + * message on the same address. It can happen e.g. for whatever reason + * the fault got retried before a resolution arrives. In that extremely + * rare case, we could also see two (cpu1, addr1) entries. + * + * In all cases, be prepared with such re-entrancies with this array. + * + * Using uint8_t should be far enough for now. For example, when + * there're only one resolve thread (postcopy ram listening thread), + * the max (concurrent fault entries) should be two. + */ + uint8_t *vcpu_faults_current; + /* + * The hash that contains addr1->[(cpu1,ts1),(cpu2,ts2) ...] mappings. + * Each of the entry is a tuple of (CPU index, fault timestamp) showing + * that a fault was requested. + */ + GHashTable *vcpu_addr_hash; + /* + * Each bucket stores the count of faults that were resolved within the + * bucket window [2^N us, 2^(N+1) us). + */ + uint64_t latency_buckets[BLOCKTIME_LATENCY_BUCKET_N]; + /* total blocktime when all vCPUs are stopped */ + uint64_t total_blocktime; /* point in time when last page fault was initiated */ - uint32_t last_begin; + uint64_t last_begin; /* number of vCPU are suspended */ int smp_cpus_down; - uint64_t start_time; + + /* + * Fast path for looking up vcpu_index from tid. NOTE: this result + * only reflects the vcpu setup when postcopy is running. It may not + * always match with the current vcpu setup because vcpus can be hot + * attached/detached after migration completes. However this should be + * stable when blocktime is using the structure. + */ + GHashTable *tid_to_vcpu_hash; + /* Count of non-vCPU faults. This is only for debugging purpose. */ + uint64_t non_vcpu_faults; + /* total blocktime when a non-vCPU thread is stopped */ + uint64_t non_vcpu_blocktime_total; /* * Handler for exit event, necessary for @@ -131,11 +216,41 @@ typedef struct PostcopyBlocktimeContext { Notifier exit_notifier; } PostcopyBlocktimeContext; +typedef struct { + /* The time the fault was triggered */ + uint64_t fault_time; + /* + * The vCPU index that was blocked, when cpu==-1, it means it's a + * fault from non-vCPU threads. + */ + int cpu; +} BlocktimeVCPUEntry; + +/* Alloc an entry to record a vCPU fault */ +static BlocktimeVCPUEntry * +blocktime_vcpu_entry_alloc(int cpu, uint64_t fault_time) +{ + BlocktimeVCPUEntry *entry = g_new(BlocktimeVCPUEntry, 1); + + entry->fault_time = fault_time; + entry->cpu = cpu; + + return entry; +} + +/* Free a @GList of @BlocktimeVCPUEntry */ +static void blocktime_vcpu_list_free(gpointer data) +{ + g_list_free_full(data, g_free); +} + static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) { - g_free(ctx->page_fault_vcpu_time); - g_free(ctx->vcpu_addr); - g_free(ctx->vcpu_blocktime); + g_hash_table_destroy(ctx->tid_to_vcpu_hash); + g_hash_table_destroy(ctx->vcpu_addr_hash); + g_free(ctx->vcpu_blocktime_total); + g_free(ctx->vcpu_faults_count); + g_free(ctx->vcpu_faults_current); g_free(ctx); } @@ -146,32 +261,65 @@ static void migration_exit_cb(Notifier *n, void *data) destroy_blocktime_context(ctx); } +static GHashTable *blocktime_init_tid_to_vcpu_hash(void) +{ + /* + * TID as an unsigned int can be directly used as the key. However, + * CPU index can NOT be directly used as value, because CPU index can + * be 0, which means NULL. Then when lookup we can never know whether + * it's 0 or "not found". Hence use an indirection for CPU index. + */ + GHashTable *table = g_hash_table_new_full(g_direct_hash, g_direct_equal, + NULL, g_free); + CPUState *cpu; + + /* + * Initialize the tid->cpu_id mapping for lookups. The caller needs to + * make sure when reaching here the CPU topology is frozen and will be + * stable for the whole blocktime trapping period. + */ + CPU_FOREACH(cpu) { + int *value = g_new(int, 1); + + *value = cpu->cpu_index; + g_hash_table_insert(table, + GUINT_TO_POINTER((uint32_t)cpu->thread_id), + value); + trace_postcopy_blocktime_tid_cpu_map(cpu->cpu_index, cpu->thread_id); + } + + return table; +} + static struct PostcopyBlocktimeContext *blocktime_context_new(void) { MachineState *ms = MACHINE(qdev_get_machine()); unsigned int smp_cpus = ms->smp.cpus; PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); - ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); - ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); - ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); - ctx->exit_notifier.notify = migration_exit_cb; - ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - qemu_add_exit_notifier(&ctx->exit_notifier); - return ctx; -} + /* Initialize all counters to be zeros */ + memset(ctx->latency_buckets, 0, sizeof(ctx->latency_buckets)); -static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) -{ - MachineState *ms = MACHINE(qdev_get_machine()); - uint32List *list = NULL; - int i; + ctx->vcpu_blocktime_total = g_new0(uint64_t, smp_cpus); + ctx->vcpu_faults_count = g_new0(uint64_t, smp_cpus); + ctx->vcpu_faults_current = g_new0(uint8_t, smp_cpus); + ctx->tid_to_vcpu_hash = blocktime_init_tid_to_vcpu_hash(); - for (i = ms->smp.cpus - 1; i >= 0; i--) { - QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); - } + /* + * The key (host virtual addresses) will always be gpointer-sized on + * either 32bits or 64bits systems, so it'll fit as a direct key. + * + * The value will be a list of BlocktimeVCPUEntry entries. + */ + ctx->vcpu_addr_hash = g_hash_table_new_full(g_direct_hash, + g_direct_equal, + NULL, + blocktime_vcpu_list_free); + + ctx->exit_notifier.notify = migration_exit_cb; + qemu_add_exit_notifier(&ctx->exit_notifier); - return list; + return ctx; } /* @@ -185,18 +333,64 @@ void fill_destination_postcopy_migration_info(MigrationInfo *info) { MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyBlocktimeContext *bc = mis->blocktime_ctx; + MachineState *ms = MACHINE(qdev_get_machine()); + uint64_t latency_total = 0, faults = 0; + uint32List *list_blocktime = NULL; + uint64List *list_latency = NULL; + uint64List *latency_buckets = NULL; + int i; if (!bc) { return; } + for (i = ms->smp.cpus - 1; i >= 0; i--) { + uint64_t latency, total, count; + + /* Convert ns -> ms */ + QAPI_LIST_PREPEND(list_blocktime, + (uint32_t)(bc->vcpu_blocktime_total[i] / SCALE_MS)); + + /* The rest in nanoseconds */ + total = bc->vcpu_blocktime_total[i]; + latency_total += total; + count = bc->vcpu_faults_count[i]; + faults += count; + + if (count) { + latency = total / count; + } else { + /* No fault detected */ + latency = 0; + } + + QAPI_LIST_PREPEND(list_latency, latency); + } + + for (i = BLOCKTIME_LATENCY_BUCKET_N - 1; i >= 0; i--) { + QAPI_LIST_PREPEND(latency_buckets, bc->latency_buckets[i]); + } + + latency_total += bc->non_vcpu_blocktime_total; + faults += bc->non_vcpu_faults; + + info->has_postcopy_non_vcpu_latency = true; + info->postcopy_non_vcpu_latency = bc->non_vcpu_faults ? + (bc->non_vcpu_blocktime_total / bc->non_vcpu_faults) : 0; info->has_postcopy_blocktime = true; - info->postcopy_blocktime = bc->total_blocktime; + /* Convert ns -> ms */ + info->postcopy_blocktime = (uint32_t)(bc->total_blocktime / SCALE_MS); info->has_postcopy_vcpu_blocktime = true; - info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); + info->postcopy_vcpu_blocktime = list_blocktime; + info->has_postcopy_latency = true; + info->postcopy_latency = faults ? (latency_total / faults) : 0; + info->has_postcopy_vcpu_latency = true; + info->postcopy_vcpu_latency = list_latency; + info->has_postcopy_latency_dist = true; + info->postcopy_latency_dist = latency_buckets; } -static uint32_t get_postcopy_total_blocktime(void) +static uint64_t get_postcopy_total_blocktime(void) { MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyBlocktimeContext *bc = mis->blocktime_ctx; @@ -300,13 +494,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis, } #ifdef UFFD_FEATURE_THREAD_ID + /* + * Postcopy blocktime conditionally needs THREAD_ID feature (introduced + * to Linux in 2017). Always try to enable it when QEMU is compiled + * with such environment. + */ if (UFFD_FEATURE_THREAD_ID & supported_features) { asked_features |= UFFD_FEATURE_THREAD_ID; - if (migrate_postcopy_blocktime()) { - if (!mis->blocktime_ctx) { - mis->blocktime_ctx = blocktime_context_new(); - } - } } #endif @@ -651,8 +845,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) mis->have_fault_thread = false; } - if (enable_mlock) { - if (os_mlock() < 0) { + if (should_mlock(mlock_state)) { + if (os_mlock(is_mlock_on_fault(mlock_state)) < 0) { error_report("mlock: %s", strerror(errno)); /* * It doesn't feel right to fail at this point, we have a valid @@ -746,22 +940,18 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, RAMBlock *rb) { size_t pagesize = qemu_ram_pagesize(rb); - struct uffdio_range range; - int ret; trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); - range.start = ROUND_DOWN(client_addr, pagesize); - range.len = pagesize; - ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); - if (ret) { - error_report("%s: Failed to wake: %zx in %s (%s)", - __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), - strerror(errno)); - } - return ret; + return uffd_wakeup(pcfd->fd, + (void *)(uintptr_t)ROUND_DOWN(client_addr, pagesize), + pagesize); } +/* + * NOTE: @tid is only used when postcopy-blocktime feature is enabled, and + * also optional: when zero is provided, the fault accounting will be ignored. + */ static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, - ram_addr_t start, uint64_t haddr) + ram_addr_t start, uint64_t haddr, uint32_t tid) { void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); @@ -780,7 +970,7 @@ static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); } - return migrate_send_rp_req_pages(mis, rb, start, haddr); + return migrate_send_rp_req_pages(mis, rb, start, haddr, tid); } /* @@ -801,83 +991,204 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, qemu_ram_get_idstr(rb), rb_offset); return postcopy_wake_shared(pcfd, client_addr, rb); } - postcopy_request_page(mis, rb, aligned_rbo, client_addr); + /* TODO: support blocktime tracking */ + postcopy_request_page(mis, rb, aligned_rbo, client_addr, 0); return 0; } -static int get_mem_fault_cpu_index(uint32_t pid) +static int blocktime_get_vcpu(PostcopyBlocktimeContext *ctx, uint32_t tid) { - CPUState *cpu_iter; + int *found; - CPU_FOREACH(cpu_iter) { - if (cpu_iter->thread_id == pid) { - trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); - return cpu_iter->cpu_index; - } + found = g_hash_table_lookup(ctx->tid_to_vcpu_hash, GUINT_TO_POINTER(tid)); + if (!found) { + /* + * NOTE: this is possible, because QEMU's non-vCPU threads can + * also access a missing page. Or, when KVM async pf is enabled, a + * fault can even happen from a kworker.. + */ + return -1; } - trace_get_mem_fault_cpu_index(-1, pid); - return -1; + + return *found; } -static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) +static uint64_t get_current_ns(void) { - int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - - dc->start_time; - return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; + return (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } /* - * This function is being called when pagefault occurs. It - * tracks down vCPU blocking time. + * Inject an (cpu, fault_time) entry into the database, using addr as key. + * When cpu==-1, it means it's a non-vCPU fault. + */ +static void blocktime_fault_inject(PostcopyBlocktimeContext *ctx, + uintptr_t addr, int cpu, uint64_t time) +{ + BlocktimeVCPUEntry *entry = blocktime_vcpu_entry_alloc(cpu, time); + GHashTable *table = ctx->vcpu_addr_hash; + gpointer key = (gpointer)addr; + GList *head, *list; + gboolean result; + + head = g_hash_table_lookup(table, key); + if (head) { + /* + * If existed, steal the @head for list operation rather than + * freeing it, making sure steal succeeded. + */ + result = g_hash_table_steal(table, key); + assert(result == TRUE); + } + + /* + * Now the key is guaranteed to be absent. Two cases: + * + * (1) There's no existing entry, list contains the only one. Insert. + * (2) There're existing entries, after stealing we own it, prepend the + * result and re-insert. + */ + list = g_list_prepend(head, entry); + g_hash_table_insert(table, key, list); + + trace_postcopy_blocktime_begin(addr, time, cpu, !!head); +} + +/* + * This function is being called when pagefault occurs. It tracks down vCPU + * blocking time. It's protected by @page_request_mutex. * * @addr: faulted host virtual address * @ptid: faulted process thread id * @rb: ramblock appropriate to addr */ -static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, - RAMBlock *rb) +void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, + RAMBlock *rb) { - int cpu, already_received; + int cpu; MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyBlocktimeContext *dc = mis->blocktime_ctx; - uint32_t low_time_offset; + uint64_t current; if (!dc || ptid == 0) { return; } - cpu = get_mem_fault_cpu_index(ptid); - if (cpu < 0) { - return; + + /* + * The caller should only inject a blocktime entry when the page is + * yet missing. + */ + assert(!ramblock_recv_bitmap_test(rb, (void *)addr)); + + current = get_current_ns(); + cpu = blocktime_get_vcpu(dc, ptid); + + if (cpu >= 0) { + /* How many faults on this vCPU in total? */ + dc->vcpu_faults_count[cpu]++; + + /* + * Account how many concurrent faults on this vCPU we trapped. See + * comments above vcpu_faults_current[] on why it can be more than one. + */ + if (dc->vcpu_faults_current[cpu]++ == 0) { + dc->smp_cpus_down++; + /* + * We use last_begin to cover (1) the 1st fault on this specific + * vCPU, but meanwhile (2) the last vCPU that got blocked. It's + * only used to calculate system-wide blocktime. + */ + dc->last_begin = current; + } + + /* Making sure it won't overflow - it really should never! */ + assert(dc->vcpu_faults_current[cpu] <= 255); + } else { + /* + * For non-vCPU thread faults, we don't care about tid or cpu index + * or time the thread is blocked (e.g., a kworker trying to help + * KVM when async_pf=on is OK to be blocked and not affect guest + * responsiveness), but we care about latency. Track it with + * cpu=-1. + * + * Note that this will NOT affect blocktime reports on vCPU being + * blocked, but only about system-wide latency reports. + */ + dc->non_vcpu_faults++; } - low_time_offset = get_low_time_offset(dc); - if (dc->vcpu_addr[cpu] == 0) { - qatomic_inc(&dc->smp_cpus_down); + blocktime_fault_inject(dc, addr, cpu, current); +} + +static void blocktime_latency_account(PostcopyBlocktimeContext *ctx, + uint64_t time_us) +{ + /* + * Convert time (in us) to bucket index it belongs. Take extra caution + * of time_us==0 even if normally rare - when happens put into bucket 0. + */ + int index = time_us ? (63 - clz64(time_us)) : 0; + + assert(index >= 0); + + /* If it's too large, put into top bucket */ + if (index >= BLOCKTIME_LATENCY_BUCKET_N) { + index = BLOCKTIME_LATENCY_BUCKET_N - 1; } - qatomic_xchg(&dc->last_begin, low_time_offset); - qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); - qatomic_xchg(&dc->vcpu_addr[cpu], addr); + ctx->latency_buckets[index]++; +} + +typedef struct { + PostcopyBlocktimeContext *ctx; + uint64_t current; + int affected_cpus; + int affected_non_cpus; +} BlockTimeVCPUIter; + +static void blocktime_cpu_list_iter_fn(gpointer data, gpointer user_data) +{ + BlockTimeVCPUIter *iter = user_data; + PostcopyBlocktimeContext *ctx = iter->ctx; + BlocktimeVCPUEntry *entry = data; + uint64_t time_passed; + int cpu = entry->cpu; /* - * check it here, not at the beginning of the function, - * due to, check could occur early than bitmap_set in - * qemu_ufd_copy_ioctl + * Time should never go back.. so when the fault is resolved it must be + * later than when it was faulted. */ - already_received = ramblock_recv_bitmap_test(rb, (void *)addr); - if (already_received) { - qatomic_xchg(&dc->vcpu_addr[cpu], 0); - qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); - qatomic_dec(&dc->smp_cpus_down); + assert(iter->current >= entry->fault_time); + time_passed = iter->current - entry->fault_time; + + /* Latency buckets are in microseconds */ + blocktime_latency_account(ctx, time_passed / SCALE_US); + + if (cpu >= 0) { + /* + * If we resolved all pending faults on one vCPU due to this page + * resolution, take a note. + */ + if (--ctx->vcpu_faults_current[cpu] == 0) { + ctx->vcpu_blocktime_total[cpu] += time_passed; + iter->affected_cpus += 1; + } + trace_postcopy_blocktime_end_one(cpu, ctx->vcpu_faults_current[cpu]); + } else { + iter->affected_non_cpus++; + ctx->non_vcpu_blocktime_total += time_passed; + /* + * We do not maintain how many pending non-vCPU faults because we + * do not care about blocktime, only latency. + */ + trace_postcopy_blocktime_end_one(-1, 0); } - trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], - cpu, already_received); } /* - * This function just provide calculated blocktime per cpu and trace it. - * Total blocktime is calculated in mark_postcopy_blocktime_end. - * + * This function just provide calculated blocktime per cpu and trace it. + * Total blocktime is calculated in mark_postcopy_blocktime_end. It's + * protected by @page_request_mutex. * * Assume we have 3 CPU * @@ -907,48 +1218,45 @@ static void mark_postcopy_blocktime_end(uintptr_t addr) PostcopyBlocktimeContext *dc = mis->blocktime_ctx; MachineState *ms = MACHINE(qdev_get_machine()); unsigned int smp_cpus = ms->smp.cpus; - int i, affected_cpu = 0; - bool vcpu_total_blocktime = false; - uint32_t read_vcpu_time, low_time_offset; + BlockTimeVCPUIter iter = { + .current = get_current_ns(), + .affected_cpus = 0, + .affected_non_cpus = 0, + .ctx = dc, + }; + gpointer key = (gpointer)addr; + GHashTable *table; + GList *list; if (!dc) { return; } - low_time_offset = get_low_time_offset(dc); - /* lookup cpu, to clear it, - * that algorithm looks straightforward, but it's not - * optimal, more optimal algorithm is keeping tree or hash - * where key is address value is a list of */ - for (i = 0; i < smp_cpus; i++) { - uint32_t vcpu_blocktime = 0; - - read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); - if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || - read_vcpu_time == 0) { - continue; - } - qatomic_xchg(&dc->vcpu_addr[i], 0); - vcpu_blocktime = low_time_offset - read_vcpu_time; - affected_cpu += 1; - /* we need to know is that mark_postcopy_end was due to - * faulted page, another possible case it's prefetched - * page and in that case we shouldn't be here */ - if (!vcpu_total_blocktime && - qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { - vcpu_total_blocktime = true; - } - /* continue cycle, due to one page could affect several vCPUs */ - dc->vcpu_blocktime[i] += vcpu_blocktime; + table = dc->vcpu_addr_hash; + /* the address wasn't tracked at all? */ + list = g_hash_table_lookup(table, key); + if (!list) { + return; } - qatomic_sub(&dc->smp_cpus_down, affected_cpu); - if (vcpu_total_blocktime) { - dc->total_blocktime += low_time_offset - qatomic_fetch_add( - &dc->last_begin, 0); + /* + * Loop over the set of vCPUs that got blocked on this addr, do the + * blocktime accounting. After that, remove the whole list. + */ + g_list_foreach(list, blocktime_cpu_list_iter_fn, &iter); + g_hash_table_remove(table, key); + + /* + * If all vCPUs used to be down, and copying this page would free some + * vCPUs, then the system-level blocktime ends here. + */ + if (dc->smp_cpus_down == smp_cpus && iter.affected_cpus) { + dc->total_blocktime += iter.current - dc->last_begin; } - trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, - affected_cpu); + dc->smp_cpus_down -= iter.affected_cpus; + + trace_postcopy_blocktime_end(addr, iter.current, iter.affected_cpus, + iter.affected_non_cpus); } static void postcopy_pause_fault_thread(MigrationIncomingState *mis) @@ -972,7 +1280,7 @@ static void *postcopy_ram_fault_thread(void *opaque) trace_postcopy_ram_fault_thread_entry(); rcu_register_thread(); mis->last_rb = NULL; /* last RAMBlock we sent part of */ - qemu_sem_post(&mis->thread_sync_sem); + qemu_event_set(&mis->thread_sync_event); struct pollfd *pfd; size_t pfd_len = 2 + mis->postcopy_remote_fds->len; @@ -1076,17 +1384,14 @@ static void *postcopy_ram_fault_thread(void *opaque) qemu_ram_get_idstr(rb), rb_offset, msg.arg.pagefault.feat.ptid); - mark_postcopy_blocktime_begin( - (uintptr_t)(msg.arg.pagefault.address), - msg.arg.pagefault.feat.ptid, rb); - retry: /* * Send the request to the source - we want to request one * of our host page sizes (which is >= TPS) */ ret = postcopy_request_page(mis, rb, rb_offset, - msg.arg.pagefault.address); + msg.arg.pagefault.address, + msg.arg.pagefault.feat.ptid); if (ret) { /* May be network failure, try to wait for recovery */ postcopy_pause_fault_thread(mis); @@ -1229,6 +1534,11 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } + if (migrate_postcopy_blocktime()) { + assert(mis->blocktime_ctx == NULL); + mis->blocktime_ctx = blocktime_context_new(); + } + /* Now an eventfd we use to tell the fault-thread to quit */ mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); if (mis->userfault_event_fd == -1) { @@ -1238,7 +1548,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) return -1; } - postcopy_thread_create(mis, &mis->fault_thread, "mig/dst/fault", + postcopy_thread_create(mis, &mis->fault_thread, + MIGRATION_THREAD_DST_FAULT, postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); mis->have_fault_thread = true; @@ -1258,7 +1569,8 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) * This thread needs to be created after the temp pages because * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. */ - postcopy_thread_create(mis, &mis->postcopy_prio_thread, "mig/dst/preempt", + postcopy_thread_create(mis, &mis->postcopy_prio_thread, + MIGRATION_THREAD_DST_PREEMPT, postcopy_preempt_thread, QEMU_THREAD_JOINABLE); mis->preempt_thread_status = PREEMPT_THREAD_CREATED; } @@ -1275,18 +1587,10 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, int ret; if (from_addr) { - struct uffdio_copy copy_struct; - copy_struct.dst = (uint64_t)(uintptr_t)host_addr; - copy_struct.src = (uint64_t)(uintptr_t)from_addr; - copy_struct.len = pagesize; - copy_struct.mode = 0; - ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); + ret = uffd_copy_page(userfault_fd, host_addr, from_addr, pagesize, + false); } else { - struct uffdio_zeropage zero_struct; - zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; - zero_struct.range.len = pagesize; - zero_struct.mode = 0; - ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); + ret = uffd_zero_page(userfault_fd, host_addr, pagesize, false); } if (!ret) { qemu_mutex_lock(&mis->page_request_mutex); @@ -1313,8 +1617,8 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, qemu_cond_signal(&mis->page_request_cond); } } - qemu_mutex_unlock(&mis->page_request_mutex); mark_postcopy_blocktime_end((uintptr_t)host_addr); + qemu_mutex_unlock(&mis->page_request_mutex); } return ret; } @@ -1343,18 +1647,16 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, RAMBlock *rb) { size_t pagesize = qemu_ram_pagesize(rb); + int e; /* copy also acks to the kernel waking the stalled thread up * TODO: We can inhibit that ack and only do it if it was requested * which would be slightly cheaper, but we'd have to be careful * of the order of updating our page state. */ - if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { - int e = errno; - error_report("%s: %s copy host: %p from: %p (size: %zd)", - __func__, strerror(e), host, from, pagesize); - - return -e; + e = qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb); + if (e) { + return e; } trace_postcopy_place_page(host); @@ -1376,12 +1678,10 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, * but it's not available for everything (e.g. hugetlbpages) */ if (qemu_ram_is_uf_zeroable(rb)) { - if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { - int e = errno; - error_report("%s: %s zero host: %p", - __func__, strerror(e), host); - - return -e; + int e; + e = qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb); + if (e) { + return e; } return postcopy_notify_shared_wake(rb, qemu_ram_block_host_offset(rb, @@ -1411,49 +1711,47 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis) int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_ram_prepare_discard(MigrationIncomingState *mis) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t client_addr, uint64_t rb_offset) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_ram_incoming_setup(MigrationIncomingState *mis) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, RAMBlock *rb) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, RAMBlock *rb) { - assert(0); - return -1; + g_assert_not_reached(); } int postcopy_wake_shared(struct PostCopyFD *pcfd, uint64_t client_addr, RAMBlock *rb) { - assert(0); - return -1; + g_assert_not_reached(); +} + +void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, + RAMBlock *rb) +{ } #endif @@ -1741,7 +2039,7 @@ void *postcopy_preempt_thread(void *opaque) rcu_register_thread(); - qemu_sem_post(&mis->thread_sync_sem); + qemu_event_set(&mis->thread_sync_event); /* * The preempt channel is established in asynchronous way. Wait diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index a6df1b2811b..3852141d7e3 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -196,5 +196,7 @@ void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file); void postcopy_preempt_setup(MigrationState *s); int postcopy_preempt_establish_channel(MigrationState *s); bool postcopy_is_paused(MigrationStatus status); +void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, + RAMBlock *rb); #endif diff --git a/migration/qemu-file.c b/migration/qemu-file.c index b6d2f588bd7..b6ac190034f 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -37,6 +37,11 @@ #define IO_BUF_SIZE 32768 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64) +typedef struct FdEntry { + QTAILQ_ENTRY(FdEntry) entry; + int fd; +} FdEntry; + struct QEMUFile { QIOChannel *ioc; bool is_writable; @@ -51,6 +56,9 @@ struct QEMUFile { int last_error; Error *last_error_obj; + + bool can_pass_fd; + QTAILQ_HEAD(, FdEntry) fds; }; /* @@ -109,6 +117,8 @@ static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable) object_ref(ioc); f->ioc = ioc; f->is_writable = is_writable; + f->can_pass_fd = qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS); + QTAILQ_INIT(&f->fds); return f; } @@ -310,6 +320,10 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) int len; int pending; Error *local_error = NULL; + g_autofree int *fds = NULL; + size_t nfd = 0; + int **pfds = f->can_pass_fd ? &fds : NULL; + size_t *pnfd = f->can_pass_fd ? &nfd : NULL; assert(!qemu_file_is_writable(f)); @@ -325,10 +339,9 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) } do { - len = qio_channel_read(f->ioc, - (char *)f->buf + pending, - IO_BUF_SIZE - pending, - &local_error); + struct iovec iov = { f->buf + pending, IO_BUF_SIZE - pending }; + len = qio_channel_readv_full(f->ioc, &iov, 1, pfds, pnfd, 0, + &local_error); if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { qio_channel_yield(f->ioc, G_IO_IN); @@ -348,9 +361,66 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) qemu_file_set_error_obj(f, len, local_error); } + for (int i = 0; i < nfd; i++) { + FdEntry *fde = g_new0(FdEntry, 1); + fde->fd = fds[i]; + QTAILQ_INSERT_TAIL(&f->fds, fde, entry); + } + return len; } +int qemu_file_put_fd(QEMUFile *f, int fd) +{ + int ret = 0; + QIOChannel *ioc = qemu_file_get_ioc(f); + Error *err = NULL; + struct iovec iov = { (void *)" ", 1 }; + + /* + * Send a dummy byte so qemu_fill_buffer on the receiving side does not + * fail with a len=0 error. Flush first to maintain ordering wrt other + * data. + */ + + qemu_fflush(f); + if (qio_channel_writev_full(ioc, &iov, 1, &fd, 1, 0, &err) < 1) { + error_report_err(error_copy(err)); + qemu_file_set_error_obj(f, -EIO, err); + ret = -1; + } + trace_qemu_file_put_fd(f->ioc->name, fd, ret); + return ret; +} + +int qemu_file_get_fd(QEMUFile *f) +{ + int fd = -1; + FdEntry *fde; + + if (!f->can_pass_fd) { + Error *err = NULL; + error_setg(&err, "%s does not support fd passing", f->ioc->name); + error_report_err(error_copy(err)); + qemu_file_set_error_obj(f, -EIO, err); + goto out; + } + + /* Force the dummy byte and its fd passenger to appear. */ + qemu_peek_byte(f, 0); + + fde = QTAILQ_FIRST(&f->fds); + if (fde) { + qemu_get_byte(f); /* Drop the dummy byte */ + fd = fde->fd; + QTAILQ_REMOVE(&f->fds, fde, entry); + g_free(fde); + } +out: + trace_qemu_file_get_fd(f->ioc->name, fd); + return fd; +} + /** Closes the file * * Returns negative error value if any error happened on previous operations or @@ -361,11 +431,17 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) */ int qemu_fclose(QEMUFile *f) { + FdEntry *fde, *next; int ret = qemu_fflush(f); int ret2 = qio_channel_close(f->ioc, NULL); if (ret >= 0) { ret = ret2; } + QTAILQ_FOREACH_SAFE(fde, &f->fds, entry, next) { + warn_report("qemu_fclose: received fd %d was never claimed", fde->fd); + close(fde->fd); + g_free(fde); + } g_clear_pointer(&f->ioc, object_unref); error_free(f->last_error_obj); g_free(f); @@ -485,8 +561,6 @@ void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, } stat64_add(&mig_stats.qemu_file_transferred, buflen); - - return; } diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 11c2120edd7..f5b9f430e04 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -33,6 +33,8 @@ QEMUFile *qemu_file_new_input(QIOChannel *ioc); QEMUFile *qemu_file_new_output(QIOChannel *ioc); int qemu_fclose(QEMUFile *f); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(QEMUFile, qemu_fclose) + /* * qemu_file_transferred: * @@ -79,5 +81,7 @@ size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen, off_t pos); QIOChannel *qemu_file_get_ioc(QEMUFile *file); +int qemu_file_put_fd(QEMUFile *f, int fd); +int qemu_file_get_fd(QEMUFile *f); #endif diff --git a/migration/ram.c b/migration/ram.c index edec1a2d073..7208bc114fb 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -48,19 +48,19 @@ #include "qapi/qapi-commands-migration.h" #include "qapi/qmp/qerror.h" #include "trace.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "exec/target_page.h" #include "qemu/rcu_queue.h" #include "migration/colo.h" -#include "sysemu/cpu-throttle.h" +#include "system/cpu-throttle.h" #include "savevm.h" #include "qemu/iov.h" #include "multifd.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "rdma.h" #include "options.h" -#include "sysemu/dirtylimit.h" -#include "sysemu/kvm.h" +#include "system/dirtylimit.h" +#include "system/kvm.h" #include "hw/boards.h" /* for machine_dump_guest_core() */ @@ -71,27 +71,6 @@ /***********************************************************/ /* ram save/restore */ -/* - * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it - * worked for pages that were filled with the same char. We switched - * it to only search for the zero value. And to avoid confusion with - * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. - * - * RAM_SAVE_FLAG_FULL was obsoleted in 2009. - * - * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1. - */ -#define RAM_SAVE_FLAG_FULL 0x01 -#define RAM_SAVE_FLAG_ZERO 0x02 -#define RAM_SAVE_FLAG_MEM_SIZE 0x04 -#define RAM_SAVE_FLAG_PAGE 0x08 -#define RAM_SAVE_FLAG_EOS 0x10 -#define RAM_SAVE_FLAG_CONTINUE 0x20 -#define RAM_SAVE_FLAG_XBZRLE 0x40 -/* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */ -#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 -/* We can't use any flag that is bigger than 0x200 */ - /* * mapped-ram migration supports O_DIRECT, so we need to make sure the * userspace buffer, the IO operation size and the file offset are @@ -112,6 +91,36 @@ XBZRLECacheStats xbzrle_counters; +/* + * This structure locates a specific location of a guest page. In QEMU, + * it's described in a tuple of (ramblock, offset). + */ +struct PageLocation { + RAMBlock *block; + unsigned long offset; +}; +typedef struct PageLocation PageLocation; + +/** + * PageLocationHint: describes a hint to a page location + * + * @valid set if the hint is vaild and to be consumed + * @location: the hint content + * + * In postcopy preempt mode, the urgent channel may provide hints to the + * background channel, so that QEMU source can try to migrate whatever is + * right after the requested urgent pages. + * + * This is based on the assumption that the VM (already running on the + * destination side) tends to access the memory with spatial locality. + * This is also the default behavior of vanilla postcopy (preempt off). + */ +struct PageLocationHint { + bool valid; + PageLocation location; +}; +typedef struct PageLocationHint PageLocationHint; + /* used by the search for pages to send */ struct PageSearchStatus { /* The migration channel used for a specific host page */ @@ -216,7 +225,9 @@ static bool postcopy_preempt_active(void) bool migrate_ram_is_ignored(RAMBlock *block) { + MigMode mode = migrate_mode(); return !qemu_ram_is_migratable(block) || + mode == MIG_MODE_CPR_TRANSFER || (migrate_ignore_shared() && qemu_ram_is_shared(block) && qemu_ram_is_named_file(block)); } @@ -414,6 +425,13 @@ struct RAMState { * RAM migration. */ unsigned int postcopy_bmap_sync_requested; + /* + * Page hint during postcopy when preempt mode is on. Return path + * thread sets it, while background migration thread consumes it. + * + * Protected by @bitmap_mutex. + */ + PageLocationHint page_hint; }; typedef struct RAMState RAMState; @@ -467,13 +485,6 @@ void ram_transferred_add(uint64_t bytes) } } -struct MigrationOps { - int (*ram_save_target_page)(RAMState *rs, PageSearchStatus *pss); -}; -typedef struct MigrationOps MigrationOps; - -MigrationOps *migration_ops; - static int ram_save_host_page_urgent(PageSearchStatus *pss); /* NOTE: page is the PFN not real ram_addr_t. */ @@ -820,14 +831,24 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, bool ret; /* - * Clear dirty bitmap if needed. This _must_ be called before we - * send any of the page in the chunk because we need to make sure - * we can capture further page content changes when we sync dirty - * log the next time. So as long as we are going to send any of - * the page in the chunk we clear the remote dirty bitmap for all. - * Clearing it earlier won't be a problem, but too late will. + * During the last stage (after source VM stopped), resetting the write + * protections isn't needed as we know there will be either (1) no + * further writes if migration will complete, or (2) migration fails + * at last then tracking isn't needed either. + * + * Do the same for postcopy due to the same reason. */ - migration_clear_memory_region_dirty_bitmap(rb, page); + if (!rs->last_stage && !migration_in_postcopy()) { + /* + * Clear dirty bitmap if needed. This _must_ be called before we + * send any of the page in the chunk because we need to make sure + * we can capture further page content changes when we sync dirty + * log the next time. So as long as we are going to send any of + * the page in the chunk we clear the remote dirty bitmap for all. + * Clearing it earlier won't be a problem, but too late will. + */ + migration_clear_memory_region_dirty_bitmap(rb, page); + } ret = test_and_clear_bit(page, rb->bmap); if (ret) { @@ -837,8 +858,8 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, return ret; } -static void dirty_bitmap_clear_section(MemoryRegionSection *section, - void *opaque) +static int dirty_bitmap_clear_section(MemoryRegionSection *section, + void *opaque) { const hwaddr offset = section->offset_within_region; const hwaddr size = int128_get64(section->size); @@ -857,6 +878,7 @@ static void dirty_bitmap_clear_section(MemoryRegionSection *section, } *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); bitmap_clear(rb->bmap, start, npages); + return 0; } /* @@ -1088,9 +1110,10 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage) } } -static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) +void migration_bitmap_sync_precopy(bool last_stage) { Error *local_err = NULL; + assert(ram_state); /* * The current notifier usage is just an optimization to migration, so we @@ -1101,7 +1124,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage) local_err = NULL; } - migration_bitmap_sync(rs, last_stage); + migration_bitmap_sync(ram_state, last_stage); if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) { error_report_err(local_err); @@ -1168,32 +1191,6 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, return len; } -/* - * @pages: the number of pages written by the control path, - * < 0 - error - * > 0 - number of pages written - * - * Return true if the pages has been saved, otherwise false is returned. - */ -static bool control_save_page(PageSearchStatus *pss, - ram_addr_t offset, int *pages) -{ - int ret; - - ret = rdma_control_save_page(pss->pss_channel, pss->block->offset, offset, - TARGET_PAGE_SIZE); - if (ret == RAM_SAVE_CONTROL_NOT_SUPP) { - return false; - } - - if (ret == RAM_SAVE_CONTROL_DELAYED) { - *pages = 1; - return true; - } - *pages = ret; - return true; -} - /* * directly send the page to the stream * @@ -1322,19 +1319,12 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) pss->page = 0; pss->block = QLIST_NEXT_RCU(pss->block, next); if (!pss->block) { - if (migrate_multifd() && - (!migrate_multifd_flush_after_each_section() || - migrate_mapped_ram())) { + if (multifd_ram_sync_per_round()) { QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; - int ret = multifd_send_sync_main(); + int ret = multifd_ram_flush_and_sync(f); if (ret < 0) { return ret; } - - if (!migrate_mapped_ram()) { - qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); - qemu_fflush(f); - } } /* Hit the end of the list */ @@ -1765,19 +1755,17 @@ bool ram_write_tracking_available(void) bool ram_write_tracking_compatible(void) { - assert(0); - return false; + g_assert_not_reached(); } int ram_write_tracking_start(void) { - assert(0); - return -1; + g_assert_not_reached(); } void ram_write_tracking_stop(void) { - assert(0); + g_assert_not_reached(); } #endif /* defined(__linux__) */ @@ -1795,7 +1783,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) { RAMBlock *block; ram_addr_t offset; - bool dirty; + bool dirty = false; do { block = unqueue_page(rs, &offset); @@ -1987,53 +1975,40 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len, } /** - * ram_save_target_page_legacy: save one target page - * - * Returns the number of pages written + * ram_save_target_page: save one target page to the precopy thread + * OR to multifd workers. * * @rs: current RAM state * @pss: data about the page we want to send */ -static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) +static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) { ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; int res; - if (control_save_page(pss, offset, &res)) { - return res; - } + /* Hand over to RDMA first */ + if (migrate_rdma()) { + res = rdma_control_save_page(pss->pss_channel, pss->block->offset, + offset, TARGET_PAGE_SIZE); - if (save_zero_page(rs, pss, offset)) { - return 1; + if (res == RAM_SAVE_CONTROL_DELAYED) { + res = 1; + } + return res; } - return ram_save_page(rs, pss); -} - -/** - * ram_save_target_page_multifd: send one target page to multifd workers - * - * Returns 1 if the page was queued, -1 otherwise. - * - * @rs: current RAM state - * @pss: data about the page we want to send - */ -static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss) -{ - RAMBlock *block = pss->block; - ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; - - /* - * While using multifd live migration, we still need to handle zero - * page checking on the migration main thread. - */ - if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { + if (!migrate_multifd() + || migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { if (save_zero_page(rs, pss, offset)) { return 1; } } - return ram_save_multifd_page(block, offset); + if (migrate_multifd() && !migration_in_postcopy()) { + return ram_save_multifd_page(pss->block, offset); + } + + return ram_save_page(rs, pss); } /* Should be called before sending a host page */ @@ -2091,6 +2066,21 @@ static void pss_host_page_finish(PageSearchStatus *pss) pss->host_page_start = pss->host_page_end = 0; } +static void ram_page_hint_update(RAMState *rs, PageSearchStatus *pss) +{ + PageLocationHint *hint = &rs->page_hint; + + /* If there's a pending hint not consumed, don't bother */ + if (hint->valid) { + return; + } + + /* Provide a hint to the background stream otherwise */ + hint->location.block = pss->block; + hint->location.offset = pss->page; + hint->valid = true; +} + /* * Send an urgent host page specified by `pss'. Need to be called with * bitmap_mutex held. @@ -2122,7 +2112,7 @@ static int ram_save_host_page_urgent(PageSearchStatus *pss) if (page_dirty) { /* Be strict to return code; it must be 1, or what else? */ - if (migration_ops->ram_save_target_page(rs, pss) != 1) { + if (ram_save_target_page(rs, pss) != 1) { error_report_once("%s: ram_save_target_page failed", __func__); ret = -1; goto out; @@ -2136,6 +2126,7 @@ static int ram_save_host_page_urgent(PageSearchStatus *pss) /* For urgent requests, flush immediately if sent */ if (sent) { qemu_fflush(pss->pss_channel); + ram_page_hint_update(rs, pss); } return ret; } @@ -2191,7 +2182,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) if (preempt_active) { qemu_mutex_unlock(&rs->bitmap_mutex); } - tmppages = migration_ops->ram_save_target_page(rs, pss); + tmppages = ram_save_target_page(rs, pss); if (tmppages >= 0) { pages += tmppages; /* @@ -2223,6 +2214,30 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) return (res < 0 ? res : pages); } +static bool ram_page_hint_valid(RAMState *rs) +{ + /* There's only page hint during postcopy preempt mode */ + if (!postcopy_preempt_active()) { + return false; + } + + return rs->page_hint.valid; +} + +static void ram_page_hint_collect(RAMState *rs, RAMBlock **block, + unsigned long *page) +{ + PageLocationHint *hint = &rs->page_hint; + + assert(hint->valid); + + *block = hint->location.block; + *page = hint->location.offset; + + /* Mark the hint consumed */ + hint->valid = false; +} + /** * ram_find_and_save_block: finds a dirty page and sends it to f * @@ -2239,6 +2254,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) static int ram_find_and_save_block(RAMState *rs) { PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY]; + unsigned long next_page; + RAMBlock *next_block; int pages = 0; /* No dirty page as there is zero RAM */ @@ -2258,22 +2275,31 @@ static int ram_find_and_save_block(RAMState *rs) rs->last_page = 0; } - pss_init(pss, rs->last_seen_block, rs->last_page); + if (ram_page_hint_valid(rs)) { + ram_page_hint_collect(rs, &next_block, &next_page); + } else { + next_block = rs->last_seen_block; + next_page = rs->last_page; + } + + pss_init(pss, next_block, next_page); while (true){ if (!get_queued_page(rs, pss)) { /* priority queue empty, so just search for something dirty */ int res = find_dirty_block(rs, pss); - if (res != PAGE_DIRTY_FOUND) { - if (res == PAGE_ALL_CLEAN) { - break; - } else if (res == PAGE_TRY_AGAIN) { - continue; - } else if (res < 0) { - pages = res; - break; - } + + if (res == PAGE_ALL_CLEAN) { + break; + } else if (res == PAGE_TRY_AGAIN) { + continue; + } else if (res < 0) { + pages = res; + break; } + + /* Otherwise we must have a dirty page to move */ + assert(res == PAGE_DIRTY_FOUND); } pages = ram_save_host_page(rs, pss); if (pages) { @@ -2387,9 +2413,15 @@ static void ram_save_cleanup(void *opaque) ram_bitmaps_destroy(); xbzrle_cleanup(); + multifd_ram_save_cleanup(); ram_state_cleanup(rsp); - g_free(migration_ops); - migration_ops = NULL; +} + +static void ram_page_hint_reset(PageLocationHint *hint) +{ + hint->location.block = NULL; + hint->location.offset = 0; + hint->valid = false; } static void ram_state_reset(RAMState *rs) @@ -2404,6 +2436,8 @@ static void ram_state_reset(RAMState *rs) rs->last_page = 0; rs->last_version = ram_list.version; rs->xbzrle_started = false; + + ram_page_hint_reset(&rs->page_hint); } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -2783,7 +2817,7 @@ static bool ram_init_bitmaps(RAMState *rs, Error **errp) if (!ret) { goto out_unlock; } - migration_bitmap_sync_precopy(rs, false); + migration_bitmap_sync_precopy(false); } } out_unlock: @@ -2860,7 +2894,7 @@ void qemu_guest_free_page_hint(void *addr, size_t len) size_t used_len, start, npages; /* This function is currently expected to be used during live migration */ - if (!migration_is_setup_or_active()) { + if (!migration_is_running()) { return; } @@ -3055,27 +3089,43 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp) return ret; } - migration_ops = g_malloc0(sizeof(MigrationOps)); - if (migrate_multifd()) { - migration_ops->ram_save_target_page = ram_save_target_page_multifd; - } else { - migration_ops->ram_save_target_page = ram_save_target_page_legacy; + multifd_ram_save_setup(); } + /* + * This operation is unfortunate.. + * + * For legacy QEMUs using per-section sync + * ======================================= + * + * This must exist because the EOS below requires the SYNC messages + * per-channel to work. + * + * For modern QEMUs using per-round sync + * ===================================== + * + * Logically such sync is not needed, and recv threads should not run + * until setup ready (using things like channels_ready on src). Then + * we should be all fine. + * + * However even if we add channels_ready to recv side in new QEMUs, old + * QEMU won't have them so this sync will still be needed to make sure + * multifd recv threads won't start processing guest pages early before + * ram_load_setup() is properly done. + * + * Let's stick with this. Fortunately the overhead is low to sync + * during setup because the VM is running, so at least it's not + * accounted as part of downtime. + */ bql_unlock(); - ret = multifd_send_sync_main(); + ret = multifd_ram_flush_and_sync(f); bql_lock(); if (ret < 0) { error_setg(errp, "%s: multifd synchronization failed", __func__); return ret; } - if (migrate_multifd() && !migrate_multifd_flush_after_each_section() - && !migrate_mapped_ram()) { - qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); - } - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); ret = qemu_fflush(f); if (ret < 0) { @@ -3207,11 +3257,9 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } out: - if (ret >= 0 - && migration_is_setup_or_active()) { - if (migrate_multifd() && migrate_multifd_flush_after_each_section() && - !migrate_mapped_ram()) { - ret = multifd_send_sync_main(); + if (ret >= 0 && migration_is_running()) { + if (multifd_ram_sync_per_section()) { + ret = multifd_ram_flush_and_sync(f); if (ret < 0) { return ret; } @@ -3244,11 +3292,13 @@ static int ram_save_complete(QEMUFile *f, void *opaque) RAMState *rs = *temp; int ret = 0; + trace_ram_save_complete(rs->migration_dirty_pages, 0); + rs->last_stage = !migration_in_colo_state(); WITH_RCU_READ_LOCK_GUARD() { if (!migration_in_postcopy()) { - migration_bitmap_sync_precopy(rs, true); + migration_bitmap_sync_precopy(true); } ret = rdma_registration_start(f, RAM_CONTROL_FINISH); @@ -3283,9 +3333,15 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } } - ret = multifd_send_sync_main(); - if (ret < 0) { - return ret; + if (multifd_ram_sync_per_section()) { + /* + * Only the old dest QEMU will need this sync, because each EOS + * will require one SYNC message on each channel. + */ + ret = multifd_ram_flush_and_sync(f); + if (ret < 0) { + return ret; + } } if (migrate_mapped_ram()) { @@ -3301,6 +3357,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + + trace_ram_save_complete(rs->migration_dirty_pages, 1); + return qemu_fflush(f); } @@ -3330,7 +3389,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, if (!migration_in_postcopy()) { bql_lock(); WITH_RCU_READ_LOCK_GUARD() { - migration_bitmap_sync_precopy(rs, false); + migration_bitmap_sync_precopy(false); } bql_unlock(); } @@ -3631,7 +3690,9 @@ static int ram_load_cleanup(void *opaque) RAMBlock *rb; RAMBLOCK_FOREACH_NOT_IGNORED(rb) { - qemu_ram_block_writeback(rb); + if (memory_region_is_nonvolatile(rb->mr)) { + qemu_ram_block_writeback(rb); + } } xbzrle_load_cleanup(); @@ -3796,15 +3857,7 @@ int ram_load_postcopy(QEMUFile *f, int channel) TARGET_PAGE_SIZE); } break; - case RAM_SAVE_FLAG_MULTIFD_FLUSH: - multifd_recv_sync_main(); - break; case RAM_SAVE_FLAG_EOS: - /* normal exit */ - if (migrate_multifd() && - migrate_multifd_flush_after_each_section()) { - multifd_recv_sync_main(); - } break; default: error_report("Unknown combination of migration flags: 0x%x" @@ -4004,8 +4057,6 @@ static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block, /* Skip pages array */ qemu_set_offset(f, block->pages_offset + length, SEEK_SET); - - return; } static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length) @@ -4294,6 +4345,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) * it will be necessary to reduce the granularity of this * critical section. */ + trace_ram_load_start(); WITH_RCU_READ_LOCK_GUARD() { if (postcopy_running) { /* @@ -4460,6 +4512,42 @@ static int ram_resume_prepare(MigrationState *s, void *opaque) return 0; } +static bool ram_save_postcopy_prepare(QEMUFile *f, void *opaque, Error **errp) +{ + int ret; + + if (migrate_multifd()) { + /* + * When multifd is enabled, source QEMU needs to make sure all the + * pages queued before postcopy starts have been flushed. + * + * The load of these pages must happen before switching to postcopy. + * It's because loading of guest pages (so far) in multifd recv + * threads is still non-atomic, so the load cannot happen with vCPUs + * running on the destination side. + * + * This flush and sync will guarantee that those pages are loaded + * _before_ postcopy starts on the destination. The rationale is, + * this happens before VM stops (and before source QEMU sends all + * the rest of the postcopy messages). So when the destination QEMU + * receives the postcopy messages, it must have received the sync + * message on the main channel (either RAM_SAVE_FLAG_MULTIFD_FLUSH, + * or RAM_SAVE_FLAG_EOS), and such message would guarantee that + * all previous guest pages queued in the multifd channels are + * completely loaded. + */ + ret = multifd_ram_flush_and_sync(f); + if (ret < 0) { + error_setg(errp, "%s: multifd flush and sync failed", __func__); + return false; + } + } + + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + + return true; +} + void postcopy_preempt_shutdown_file(MigrationState *s) { qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS); @@ -4469,8 +4557,7 @@ void postcopy_preempt_shutdown_file(MigrationState *s) static SaveVMHandlers savevm_ram_handlers = { .save_setup = ram_save_setup, .save_live_iterate = ram_save_iterate, - .save_live_complete_postcopy = ram_save_complete, - .save_live_complete_precopy = ram_save_complete, + .save_complete = ram_save_complete, .has_postcopy = ram_has_postcopy, .state_pending_exact = ram_state_pending_exact, .state_pending_estimate = ram_state_pending_estimate, @@ -4479,6 +4566,7 @@ static SaveVMHandlers savevm_ram_handlers = { .load_setup = ram_load_setup, .load_cleanup = ram_load_cleanup, .resume_prepare = ram_resume_prepare, + .save_postcopy_prepare = ram_save_postcopy_prepare, }; static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, @@ -4498,7 +4586,7 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, return; } - if (!migration_is_idle()) { + if (migration_is_running()) { /* * Precopy code on the source cannot deal with the size of RAM blocks * changing at random points in time - especially after sending the @@ -4506,8 +4594,10 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, * Abort and indicate a proper reason. */ error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); - migration_cancel(err); + migrate_set_error(migrate_get_current(), err); error_free(err); + + migration_cancel(); } switch (ps) { diff --git a/migration/ram.h b/migration/ram.h index bc0318b8346..921c39a2c5c 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -33,6 +33,34 @@ #include "exec/cpu-common.h" #include "io/channel.h" +/* + * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it + * worked for pages that were filled with the same char. We switched + * it to only search for the zero value. And to avoid confusion with + * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it. + * + * RAM_SAVE_FLAG_FULL (0x01) was obsoleted in 2009. + * + * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1. + * + * RAM_SAVE_FLAG_HOOK is only used in RDMA. Whenever this is found in the + * data stream, the flags will be passed to rdma functions in the + * incoming-migration side. + * + * We can't use any flag that is bigger than 0x200, because the flags are + * always assumed to be encoded in a ramblock address offset, which is + * multiple of PAGE_SIZE. Here it means QEMU supports migration with any + * architecture that has PAGE_SIZE>=1K (0x400). + */ +#define RAM_SAVE_FLAG_ZERO 0x002 +#define RAM_SAVE_FLAG_MEM_SIZE 0x004 +#define RAM_SAVE_FLAG_PAGE 0x008 +#define RAM_SAVE_FLAG_EOS 0x010 +#define RAM_SAVE_FLAG_CONTINUE 0x020 +#define RAM_SAVE_FLAG_XBZRLE 0x040 +#define RAM_SAVE_FLAG_HOOK 0x080 +#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200 + extern XBZRLECacheStats xbzrle_counters; /* Should be holding either ram_list.mutex, or the RCU lock. */ @@ -44,6 +72,7 @@ extern XBZRLECacheStats xbzrle_counters; INTERNAL_RAMBLOCK_FOREACH(block) \ if (!qemu_ram_is_migratable(block)) {} else +void ram_mig_init(void); int xbzrle_cache_resize(uint64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); diff --git a/migration/rdma.c b/migration/rdma.c index 855753c6719..2d839fce6c4 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -30,7 +30,7 @@ #include "qemu/sockets.h" #include "qemu/bitmap.h" #include "qemu/coroutine.h" -#include "exec/memory.h" +#include "system/memory.h" #include #include #include @@ -767,149 +767,6 @@ static void qemu_rdma_dump_gid(const char *who, struct rdma_cm_id *id) trace_qemu_rdma_dump_gid(who, sgid, dgid); } -/* - * As of now, IPv6 over RoCE / iWARP is not supported by linux. - * We will try the next addrinfo struct, and fail if there are - * no other valid addresses to bind against. - * - * If user is listening on '[::]', then we will not have a opened a device - * yet and have no way of verifying if the device is RoCE or not. - * - * In this case, the source VM will throw an error for ALL types of - * connections (both IPv4 and IPv6) if the destination machine does not have - * a regular infiniband network available for use. - * - * The only way to guarantee that an error is thrown for broken kernels is - * for the management software to choose a *specific* interface at bind time - * and validate what time of hardware it is. - * - * Unfortunately, this puts the user in a fix: - * - * If the source VM connects with an IPv4 address without knowing that the - * destination has bound to '[::]' the migration will unconditionally fail - * unless the management software is explicitly listening on the IPv4 - * address while using a RoCE-based device. - * - * If the source VM connects with an IPv6 address, then we're OK because we can - * throw an error on the source (and similarly on the destination). - * - * But in mixed environments, this will be broken for a while until it is fixed - * inside linux. - * - * We do provide a *tiny* bit of help in this function: We can list all of the - * devices in the system and check to see if all the devices are RoCE or - * Infiniband. - * - * If we detect that we have a *pure* RoCE environment, then we can safely - * thrown an error even if the management software has specified '[::]' as the - * bind address. - * - * However, if there is are multiple hetergeneous devices, then we cannot make - * this assumption and the user just has to be sure they know what they are - * doing. - * - * Patches are being reviewed on linux-rdma. - */ -static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp) -{ - /* This bug only exists in linux, to our knowledge. */ -#ifdef CONFIG_LINUX - struct ibv_port_attr port_attr; - - /* - * Verbs are only NULL if management has bound to '[::]'. - * - * Let's iterate through all the devices and see if there any pure IB - * devices (non-ethernet). - * - * If not, then we can safely proceed with the migration. - * Otherwise, there are no guarantees until the bug is fixed in linux. - */ - if (!verbs) { - int num_devices; - struct ibv_device **dev_list = ibv_get_device_list(&num_devices); - bool roce_found = false; - bool ib_found = false; - - for (int x = 0; x < num_devices; x++) { - verbs = ibv_open_device(dev_list[x]); - /* - * ibv_open_device() is not documented to set errno. If - * it does, it's somebody else's doc bug. If it doesn't, - * the use of errno below is wrong. - * TODO Find out whether ibv_open_device() sets errno. - */ - if (!verbs) { - if (errno == EPERM) { - continue; - } else { - error_setg_errno(errp, errno, - "could not open RDMA device context"); - return -1; - } - } - - if (ibv_query_port(verbs, 1, &port_attr)) { - ibv_close_device(verbs); - error_setg(errp, - "RDMA ERROR: Could not query initial IB port"); - return -1; - } - - if (port_attr.link_layer == IBV_LINK_LAYER_INFINIBAND) { - ib_found = true; - } else if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { - roce_found = true; - } - - ibv_close_device(verbs); - - } - - if (roce_found) { - if (ib_found) { - warn_report("migrations may fail:" - " IPv6 over RoCE / iWARP in linux" - " is broken. But since you appear to have a" - " mixed RoCE / IB environment, be sure to only" - " migrate over the IB fabric until the kernel " - " fixes the bug."); - } else { - error_setg(errp, "RDMA ERROR: " - "You only have RoCE / iWARP devices in your systems" - " and your management software has specified '[::]'" - ", but IPv6 over RoCE / iWARP is not supported in Linux."); - return -1; - } - } - - return 0; - } - - /* - * If we have a verbs context, that means that some other than '[::]' was - * used by the management software for binding. In which case we can - * actually warn the user about a potentially broken kernel. - */ - - /* IB ports start with 1, not 0 */ - if (ibv_query_port(verbs, 1, &port_attr)) { - error_setg(errp, "RDMA ERROR: Could not query initial IB port"); - return -1; - } - - if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) { - error_setg(errp, "RDMA ERROR: " - "Linux kernel's RoCE / iWARP does not support IPv6 " - "(but patches on linux-rdma in progress)"); - return -1; - } - -#endif - - return 0; -} - /* * Figure out which RDMA device corresponds to the requested IP hostname * Also create the initial connection manager identifiers for opening @@ -917,7 +774,6 @@ static int qemu_rdma_broken_ipv6_kernel(struct ibv_context *verbs, Error **errp) */ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp) { - Error *err = NULL; int ret; struct rdma_addrinfo *res; char port_str[16]; @@ -953,9 +809,8 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp) goto err_resolve_get_addr; } - /* Try all addresses, saving the first error in @err */ + /* Try all addresses, exit loop on first success of resolving address */ for (struct rdma_addrinfo *e = res; e != NULL; e = e->ai_next) { - Error **local_errp = err ? NULL : &err; inet_ntop(e->ai_family, &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); @@ -964,25 +819,12 @@ static int qemu_rdma_resolve_host(RDMAContext *rdma, Error **errp) ret = rdma_resolve_addr(rdma->cm_id, NULL, e->ai_dst_addr, RDMA_RESOLVE_TIMEOUT_MS); if (ret >= 0) { - if (e->ai_family == AF_INET6) { - ret = qemu_rdma_broken_ipv6_kernel(rdma->cm_id->verbs, - local_errp); - if (ret < 0) { - continue; - } - } - error_free(err); goto route; } } rdma_freeaddrinfo(res); - if (err) { - error_propagate(errp, err); - } else { - error_setg(errp, "RDMA ERROR: could not resolve address %s", - rdma->host); - } + error_setg(errp, "RDMA ERROR: could not resolve address %s", rdma->host); goto err_resolve_get_addr; route: @@ -2611,7 +2453,6 @@ static int qemu_rdma_connect(RDMAContext *rdma, bool return_path, static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) { - Error *err = NULL; int ret; struct rdma_cm_id *listen_id; char ip[40] = "unknown"; @@ -2661,9 +2502,8 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) goto err_dest_init_bind_addr; } - /* Try all addresses, saving the first error in @err */ + /* Try all addresses */ for (e = res; e != NULL; e = e->ai_next) { - Error **local_errp = err ? NULL : &err; inet_ntop(e->ai_family, &((struct sockaddr_in *) e->ai_dst_addr)->sin_addr, ip, sizeof ip); @@ -2672,24 +2512,12 @@ static int qemu_rdma_dest_init(RDMAContext *rdma, Error **errp) if (ret < 0) { continue; } - if (e->ai_family == AF_INET6) { - ret = qemu_rdma_broken_ipv6_kernel(listen_id->verbs, - local_errp); - if (ret < 0) { - continue; - } - } - error_free(err); break; } rdma_freeaddrinfo(res); if (!e) { - if (err) { - error_propagate(errp, err); - } else { - error_setg(errp, "RDMA ERROR: Error: could not rdma_bind_addr!"); - } + error_setg(errp, "RDMA ERROR: Error: could not rdma_bind_addr!"); goto err_dest_init_bind_addr; } @@ -3284,14 +3112,11 @@ static int qemu_rdma_save_page(QEMUFile *f, ram_addr_t block_offset, int rdma_control_save_page(QEMUFile *f, ram_addr_t block_offset, ram_addr_t offset, size_t size) { - if (!migrate_rdma() || migration_in_postcopy()) { - return RAM_SAVE_CONTROL_NOT_SUPP; - } + assert(migrate_rdma()); int ret = qemu_rdma_save_page(f, block_offset, offset, size); - if (ret != RAM_SAVE_CONTROL_DELAYED && - ret != RAM_SAVE_CONTROL_NOT_SUPP) { + if (ret != RAM_SAVE_CONTROL_DELAYED) { if (ret < 0) { qemu_file_set_error(f, ret); } @@ -3829,7 +3654,7 @@ int rdma_block_notification_handle(QEMUFile *f, const char *name) int rdma_registration_start(QEMUFile *f, uint64_t flags) { - if (!migrate_rdma() || migration_in_postcopy()) { + if (!migrate_rdma()) { return 0; } @@ -3861,7 +3686,7 @@ int rdma_registration_stop(QEMUFile *f, uint64_t flags) RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret; - if (!migrate_rdma() || migration_in_postcopy()) { + if (!migrate_rdma()) { return 0; } @@ -3985,7 +3810,7 @@ static void qio_channel_rdma_finalize(Object *obj) } static void qio_channel_rdma_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { QIOChannelClass *ioc_klass = QIO_CHANNEL_CLASS(klass); @@ -4174,7 +3999,7 @@ void rdma_start_outgoing_migration(void *opaque, s->to_dst_file = rdma_new_output(rdma); s->rdma_migration = true; - migrate_fd_connect(s, NULL); + migration_connect(s, NULL); return; return_path_err: qemu_rdma_cleanup(rdma); diff --git a/migration/rdma.h b/migration/rdma.h index a8d27f33b8f..f74f16a4599 100644 --- a/migration/rdma.h +++ b/migration/rdma.h @@ -19,7 +19,7 @@ #ifndef QEMU_MIGRATION_RDMA_H #define QEMU_MIGRATION_RDMA_H -#include "exec/memory.h" +#include "system/memory.h" void rdma_start_outgoing_migration(void *opaque, InetSocketAddress *host_port, Error **errp); @@ -33,14 +33,6 @@ void rdma_start_incoming_migration(InetSocketAddress *host_port, Error **errp); #define RAM_CONTROL_ROUND 1 #define RAM_CONTROL_FINISH 3 -/* - * Whenever this is found in the data stream, the flags - * will be passed to rdma functions in the incoming-migration - * side. - */ -#define RAM_SAVE_FLAG_HOOK 0x80 - -#define RAM_SAVE_CONTROL_NOT_SUPP -1000 #define RAM_SAVE_CONTROL_DELAYED -2000 #ifdef CONFIG_RDMA @@ -63,7 +55,7 @@ static inline int rdma_control_save_page(QEMUFile *f, ram_addr_t block_offset, ram_addr_t offset, size_t size) { - return RAM_SAVE_CONTROL_NOT_SUPP; + g_assert_not_reached(); } #endif #endif diff --git a/migration/savevm.c b/migration/savevm.c index 85958d7b09c..fabbeb296ae 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -37,6 +37,7 @@ #include "migration/register.h" #include "migration/global_state.h" #include "migration/channel-block.h" +#include "multifd.h" #include "ram.h" #include "qemu-file.h" #include "savevm.h" @@ -46,27 +47,29 @@ #include "qapi/clone-visitor.h" #include "qapi/qapi-builtin-visit.h" #include "qemu/error-report.h" -#include "sysemu/cpus.h" -#include "exec/memory.h" +#include "system/cpus.h" +#include "system/memory.h" #include "exec/target_page.h" +#include "exec/page-vary.h" #include "trace.h" #include "qemu/iov.h" #include "qemu/job.h" #include "qemu/main-loop.h" #include "block/snapshot.h" +#include "block/thread-pool.h" #include "qemu/cutils.h" #include "io/channel-buffer.h" #include "io/channel-file.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" +#include "system/replay.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/xen.h" #include "migration/colo.h" #include "qemu/bitmap.h" #include "net/announce.h" #include "qemu/yank.h" #include "yank_functions.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "options.h" const unsigned int postcopy_ram_discard_version; @@ -90,6 +93,7 @@ enum qemu_vm_cmd { MIG_CMD_ENABLE_COLO, /* Enable COLO */ MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ + MIG_CMD_SWITCHOVER_START, /* Switchover start notification */ MIG_CMD_MAX }; @@ -109,6 +113,7 @@ static struct mig_cmd_args { [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, + [MIG_CMD_SWITCHOVER_START] = { .len = 0, .name = "SWITCHOVER_START" }, [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, }; @@ -129,6 +134,35 @@ static struct mig_cmd_args { * generic extendable format with an exception for two old entities. */ +/***********************************************************/ +/* Optional load threads pool support */ + +static void qemu_loadvm_thread_pool_create(MigrationIncomingState *mis) +{ + assert(!mis->load_threads); + mis->load_threads = thread_pool_new(); + mis->load_threads_abort = false; +} + +static void qemu_loadvm_thread_pool_destroy(MigrationIncomingState *mis) +{ + qatomic_set(&mis->load_threads_abort, true); + + bql_unlock(); /* Load threads might be waiting for BQL */ + g_clear_pointer(&mis->load_threads, thread_pool_free); + bql_lock(); +} + +static bool qemu_loadvm_thread_pool_wait(MigrationState *s, + MigrationIncomingState *mis) +{ + bql_unlock(); /* Let load threads do work requiring BQL */ + thread_pool_wait(mis->load_threads); + bql_lock(); + + return !migrate_has_error(s); +} + /***********************************************************/ /* savevm/loadvm support */ @@ -232,7 +266,7 @@ typedef struct SaveState { static SaveState savevm_state = { .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), - .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, + .handler_pri_head = { [0 ... MIG_PRI_MAX] = NULL }, .global_section_id = 0, }; @@ -306,7 +340,7 @@ static int configuration_pre_load(void *opaque) * predates the variable-target-page-bits support and is using the * minimum possible value for this CPU. */ - state->target_page_bits = qemu_target_page_bits_min(); + state->target_page_bits = migration_legacy_page_bits(); return 0; } @@ -429,8 +463,7 @@ static const VMStateInfo vmstate_info_capability = { */ static bool vmstate_target_page_bits_needed(void *opaque) { - return qemu_target_page_bits() - > qemu_target_page_bits_min(); + return qemu_target_page_bits() > migration_legacy_page_bits(); } static const VMStateDescription vmstate_target_page_bits = { @@ -704,7 +737,7 @@ static int calculate_compat_instance_id(const char *idstr) static inline MigrationPriority save_state_priority(SaveStateEntry *se) { - if (se->vmsd) { + if (se->vmsd && se->vmsd->priority) { return se->vmsd->priority; } return MIG_PRI_DEFAULT; @@ -860,25 +893,6 @@ static void vmstate_check(const VMStateDescription *vmsd) } } -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * This function can be removed when - * pre_2_10_vmstate_register_dummy_icp() is removed. - */ -int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id, - const VMStateDescription *vmsd, - void *opaque) -{ - SaveStateEntry *se = find_se(vmsd->name, instance_id); - - if (se) { - savevm_state_handler_remove(se); - g_free(se->compat); - g_free(se); - } - return vmstate_register(obj, instance_id, vmsd, opaque); -} int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, const VMStateDescription *vmsd, @@ -1220,6 +1234,19 @@ void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); } +static void qemu_savevm_send_switchover_start(QEMUFile *f) +{ + trace_savevm_send_switchover_start(); + qemu_savevm_command_send(f, MIG_CMD_SWITCHOVER_START, 0, NULL); +} + +void qemu_savevm_maybe_send_switchover_start(QEMUFile *f) +{ + if (migrate_send_switchover_start()) { + qemu_savevm_send_switchover_start(f); + } +} + bool qemu_savevm_state_blocked(Error **errp) { SaveStateEntry *se; @@ -1250,8 +1277,7 @@ void qemu_savevm_non_migratable_list(strList **reasons) void qemu_savevm_state_header(QEMUFile *f) { MigrationState *s = migrate_get_current(); - - s->vmdesc = json_writer_new(false); + JSONWriter *vmdesc = s->vmdesc; trace_savevm_state_header(); qemu_put_be32(f, QEMU_VM_FILE_MAGIC); @@ -1260,16 +1286,21 @@ void qemu_savevm_state_header(QEMUFile *f) if (s->send_configuration) { qemu_put_byte(f, QEMU_VM_CONFIGURATION); - /* - * This starts the main json object and is paired with the - * json_writer_end_object in - * qemu_savevm_state_complete_precopy_non_iterable - */ - json_writer_start_object(s->vmdesc, NULL); + if (vmdesc) { + /* + * This starts the main json object and is paired with the + * json_writer_end_object in + * qemu_savevm_state_complete_precopy_non_iterable + */ + json_writer_start_object(vmdesc, NULL); + json_writer_start_object(vmdesc, "configuration"); + } - json_writer_start_object(s->vmdesc, "configuration"); - vmstate_save_state(f, &vmstate_configuration, &savevm_state, s->vmdesc); - json_writer_end_object(s->vmdesc); + vmstate_save_state(f, &vmstate_configuration, &savevm_state, vmdesc); + + if (vmdesc) { + json_writer_end_object(vmdesc); + } } } @@ -1315,16 +1346,19 @@ int qemu_savevm_state_setup(QEMUFile *f, Error **errp) { ERRP_GUARD(); MigrationState *ms = migrate_get_current(); + JSONWriter *vmdesc = ms->vmdesc; SaveStateEntry *se; int ret = 0; - json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); - json_writer_start_array(ms->vmdesc, "devices"); + if (vmdesc) { + json_writer_int64(vmdesc, "page_size", qemu_target_page_size()); + json_writer_start_array(vmdesc, "devices"); + } trace_savevm_state_setup(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->vmsd && se->vmsd->early_setup) { - ret = vmstate_save(f, se, ms->vmdesc, errp); + ret = vmstate_save(f, se, vmdesc, errp); if (ret) { migrate_set_error(ms, *errp); qemu_file_set_error(f, ret); @@ -1443,44 +1477,61 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) return all_finished; } -static bool should_send_vmdesc(void) +bool should_send_vmdesc(void) { MachineState *machine = MACHINE(qdev_get_machine()); - bool in_postcopy = migration_in_postcopy(); - return !machine->suppress_vmdesc && !in_postcopy; + + return !machine->suppress_vmdesc; +} + +static bool qemu_savevm_complete_exists(SaveStateEntry *se) +{ + return se->ops && se->ops->save_complete; } /* - * Calls the save_live_complete_postcopy methods - * causing the last few pages to be sent immediately and doing any associated - * cleanup. + * Invoke the ->save_complete() if necessary. + * Returns: 0 if skip the current SE or succeeded, <0 if error happened. + */ +static int qemu_savevm_complete(SaveStateEntry *se, QEMUFile *f) +{ + int ret; + + if (se->ops->is_active) { + if (!se->ops->is_active(se->opaque)) { + return 0; + } + } + + trace_savevm_section_start(se->idstr, se->section_id); + save_section_header(f, se, QEMU_VM_SECTION_END); + ret = se->ops->save_complete(f, se->opaque); + trace_savevm_section_end(se->idstr, se->section_id, ret); + save_section_footer(f, se); + + if (ret < 0) { + qemu_file_set_error(f, ret); + } + + return ret; +} + +/* + * Complete saving any postcopy-able devices. + * * Note postcopy also calls qemu_savevm_state_complete_precopy to complete * all the other devices, but that happens at the point we switch to postcopy. */ void qemu_savevm_state_complete_postcopy(QEMUFile *f) { SaveStateEntry *se; - int ret; QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { - if (!se->ops || !se->ops->save_live_complete_postcopy) { + if (!qemu_savevm_complete_exists(se)) { continue; } - if (se->ops->is_active) { - if (!se->ops->is_active(se->opaque)) { - continue; - } - } - trace_savevm_section_start(se->idstr, se->section_id); - /* Section type */ - qemu_put_byte(f, QEMU_VM_SECTION_END); - qemu_put_be32(f, se->section_id); - ret = se->ops->save_live_complete_postcopy(f, se->opaque); - trace_savevm_section_end(se->idstr, se->section_id, ret); - save_section_footer(f, se); - if (ret < 0) { - qemu_file_set_error(f, ret); + if (qemu_savevm_complete(se, f) < 0) { return; } } @@ -1489,18 +1540,13 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f) qemu_fflush(f); } -static -int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) +bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp) { - int64_t start_ts_each, end_ts_each; SaveStateEntry *se; - int ret; + bool ret; QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { - if (!se->ops || - (in_postcopy && se->ops->has_postcopy && - se->ops->has_postcopy(se->opaque)) || - !se->ops->save_live_complete_precopy) { + if (!se->ops || !se->ops->save_postcopy_prepare) { continue; } @@ -1510,31 +1556,96 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) } } - start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); trace_savevm_section_start(se->idstr, se->section_id); - save_section_header(f, se, QEMU_VM_SECTION_END); + save_section_header(f, se, QEMU_VM_SECTION_PART); + ret = se->ops->save_postcopy_prepare(f, se->opaque, errp); + save_section_footer(f, se); - ret = se->ops->save_live_complete_precopy(f, se->opaque); trace_savevm_section_end(se->idstr, se->section_id, ret); - save_section_footer(f, se); - if (ret < 0) { - qemu_file_set_error(f, ret); - return -1; + + if (!ret) { + assert(*errp); + return false; + } + } + + return true; +} + +int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) +{ + int64_t start_ts_each, end_ts_each; + SaveStateEntry *se; + bool multifd_device_state = multifd_device_state_supported(); + + if (multifd_device_state) { + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + SaveCompletePrecopyThreadHandler hdlr; + + if (!se->ops || (in_postcopy && se->ops->has_postcopy && + se->ops->has_postcopy(se->opaque)) || + !se->ops->save_complete_precopy_thread) { + continue; + } + + hdlr = se->ops->save_complete_precopy_thread; + multifd_spawn_device_state_save_thread(hdlr, + se->idstr, se->instance_id, + se->opaque); + } + } + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if (!qemu_savevm_complete_exists(se)) { + continue; + } + + if (in_postcopy && se->ops->has_postcopy && + se->ops->has_postcopy(se->opaque)) { + /* + * If postcopy will start soon, and if the SE supports + * postcopy, then we can skip the SE for the postcopy phase. + */ + continue; + } + + start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + if (qemu_savevm_complete(se, f) < 0) { + goto ret_fail_abort_threads; } end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); + trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id, end_ts_each - start_ts_each); } + if (multifd_device_state) { + if (migrate_has_error(migrate_get_current())) { + multifd_abort_device_state_save_threads(); + } + + if (!multifd_join_device_state_save_threads()) { + qemu_file_set_error(f, -EINVAL); + return -1; + } + } + trace_vmstate_downtime_checkpoint("src-iterable-saved"); return 0; + +ret_fail_abort_threads: + if (multifd_device_state) { + multifd_abort_device_state_save_threads(); + multifd_join_device_state_save_threads(); + } + + return -1; } int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, - bool in_postcopy, - bool inactivate_disks) + bool in_postcopy) { MigrationState *ms = migrate_get_current(); int64_t start_ts_each, end_ts_each; @@ -1544,6 +1655,9 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, Error *local_err = NULL; int ret; + /* Making sure cpu states are synchronized before saving non-iterable */ + cpu_synchronize_all_states(); + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->vmsd && se->vmsd->early_setup) { /* Already saved during qemu_savevm_state_setup(). */ @@ -1565,76 +1679,42 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, end_ts_each - start_ts_each); } - if (inactivate_disks) { - /* Inactivate before sending QEMU_VM_EOF so that the - * bdrv_activate_all() on the other end won't fail. */ - ret = bdrv_inactivate_all(); - if (ret) { - error_setg(&local_err, "%s: bdrv_inactivate_all() failed (%d)", - __func__, ret); - migrate_set_error(ms, local_err); - error_report_err(local_err); - qemu_file_set_error(f, ret); - return ret; - } - } if (!in_postcopy) { /* Postcopy stream will still be going */ qemu_put_byte(f, QEMU_VM_EOF); - } - json_writer_end_array(vmdesc); - json_writer_end_object(vmdesc); - vmdesc_len = strlen(json_writer_get(vmdesc)); + if (vmdesc) { + json_writer_end_array(vmdesc); + json_writer_end_object(vmdesc); + vmdesc_len = strlen(json_writer_get(vmdesc)); - if (should_send_vmdesc()) { - qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); - qemu_put_be32(f, vmdesc_len); - qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); + qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); + qemu_put_be32(f, vmdesc_len); + qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); + } } - /* Free it now to detect any inconsistencies. */ - json_writer_free(vmdesc); - ms->vmdesc = NULL; - trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); return 0; } -int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, - bool inactivate_disks) +int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) { int ret; - Error *local_err = NULL; - bool in_postcopy = migration_in_postcopy(); - if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { - error_report_err(local_err); + ret = qemu_savevm_state_complete_precopy_iterable(f, false); + if (ret) { + return ret; } - trace_savevm_state_complete_precopy(); - - cpu_synchronize_all_states(); - - if (!in_postcopy || iterable_only) { - ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); + if (!iterable_only) { + ret = qemu_savevm_state_complete_precopy_non_iterable(f, false); if (ret) { return ret; } } - if (iterable_only) { - goto flush; - } - - ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, - inactivate_disks); - if (ret) { - return ret; - } - -flush: return qemu_fflush(f); } @@ -1732,7 +1812,8 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) ret = qemu_file_get_error(f); if (ret == 0) { - qemu_savevm_state_complete_precopy(f, false, false); + qemu_savevm_maybe_send_switchover_start(f); + qemu_savevm_state_complete_precopy(f, false); ret = qemu_file_get_error(f); } if (ret != 0) { @@ -1758,7 +1839,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) void qemu_savevm_live_state(QEMUFile *f) { /* save QEMU_VM_SECTION_END section */ - qemu_savevm_state_complete_precopy(f, true, false); + qemu_savevm_state_complete_precopy(f, true); qemu_put_byte(f, QEMU_VM_EOF); } @@ -2006,7 +2087,7 @@ static void *postcopy_ram_listen_thread(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_POSTCOPY_ACTIVE); - qemu_sem_post(&mis->thread_sync_sem); + qemu_event_set(&mis->thread_sync_event); trace_postcopy_ram_listen_thread_start(); rcu_register_thread(); @@ -2015,6 +2096,8 @@ static void *postcopy_ram_listen_thread(void *opaque) * in qemu_file, and thus we must be blocking now. */ qemu_file_set_blocking(f, true); + + /* TODO: sanity check that only postcopiable data will be loaded here */ load_res = qemu_loadvm_state_main(f, mis); /* @@ -2075,8 +2158,9 @@ static void *postcopy_ram_listen_thread(void *opaque) * (If something broke then qemu will have to exit anyway since it's * got a bad migration state). */ + bql_lock(); migration_incoming_state_destroy(); - qemu_loadvm_state_cleanup(); + bql_unlock(); rcu_unregister_thread(); mis->have_listen_thread = false; @@ -2131,7 +2215,8 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) } mis->have_listen_thread = true; - postcopy_thread_create(mis, &mis->listen_thread, "mig/dst/listen", + postcopy_thread_create(mis, &mis->listen_thread, + MIGRATION_THREAD_DST_LISTEN, postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); trace_loadvm_postcopy_handle_listen("return"); @@ -2140,7 +2225,6 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) static void loadvm_postcopy_handle_run_bh(void *opaque) { - Error *local_err = NULL; MigrationIncomingState *mis = opaque; trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter"); @@ -2156,22 +2240,20 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced"); - /* Make sure all file formats throw away their mutable metadata. - * If we get an error here, just don't restart the VM yet. */ - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - local_err = NULL; - autostart = false; - } - - trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); - dirty_bitmap_mig_before_vm_start(); if (autostart) { - /* Hold onto your hats, starting the CPU */ - vm_start(); + /* + * Make sure all file formats throw away their mutable metadata. + * If we get an error here, just don't restart the VM yet. + */ + bool success = migration_block_activate(NULL); + + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); + + if (success) { + vm_start(); + } } else { /* leave it paused and let management decide when to start the CPU */ runstate_set(RUN_STATE_PAUSED); @@ -2431,6 +2513,26 @@ static int loadvm_process_enable_colo(MigrationIncomingState *mis) return ret; } +static int loadvm_postcopy_handle_switchover_start(void) +{ + SaveStateEntry *se; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + int ret; + + if (!se->ops || !se->ops->switchover_start) { + continue; + } + + ret = se->ops->switchover_start(se->opaque); + if (ret < 0) { + return ret; + } + } + + return 0; +} + /* * Process an incoming 'QEMU_VM_COMMAND' * 0 just a normal return @@ -2529,6 +2631,9 @@ static int loadvm_process_command(QEMUFile *f) case MIG_CMD_ENABLE_COLO: return loadvm_process_enable_colo(mis); + + case MIG_CMD_SWITCHOVER_START: + return loadvm_postcopy_handle_switchover_start(); } return 0; @@ -2578,8 +2683,7 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) } static int -qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, - uint8_t type) +qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) { bool trace_downtime = (type == QEMU_VM_SECTION_FULL); uint32_t instance_id, version_id, section_id; @@ -2657,8 +2761,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, } static int -qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis, - uint8_t type) +qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) { bool trace_downtime = (type == QEMU_VM_SECTION_END); int64_t start_ts, end_ts; @@ -2734,13 +2837,11 @@ static int qemu_loadvm_state_header(QEMUFile *f) if (migrate_get_current()->send_configuration) { if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { error_report("Configuration section missing"); - qemu_loadvm_state_cleanup(); return -EINVAL; } ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); if (ret) { - qemu_loadvm_state_cleanup(); return ret; } } @@ -2792,16 +2893,68 @@ static int qemu_loadvm_state_setup(QEMUFile *f, Error **errp) return 0; } -void qemu_loadvm_state_cleanup(void) +struct LoadThreadData { + MigrationLoadThread function; + void *opaque; +}; + +static int qemu_loadvm_load_thread(void *thread_opaque) +{ + struct LoadThreadData *data = thread_opaque; + MigrationIncomingState *mis = migration_incoming_get_current(); + g_autoptr(Error) local_err = NULL; + + if (!data->function(data->opaque, &mis->load_threads_abort, &local_err)) { + MigrationState *s = migrate_get_current(); + + /* + * Can't set load_threads_abort here since processing of main migration + * channel data could still be happening, resulting in launching of new + * load threads. + */ + + assert(local_err); + + /* + * In case of multiple load threads failing which thread error + * return we end setting is purely arbitrary. + */ + migrate_set_error(s, local_err); + } + + return 0; +} + +void qemu_loadvm_start_load_thread(MigrationLoadThread function, + void *opaque) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + struct LoadThreadData *data; + + /* We only set it from this thread so it's okay to read it directly */ + assert(!mis->load_threads_abort); + + data = g_new(struct LoadThreadData, 1); + data->function = function; + data->opaque = opaque; + + thread_pool_submit_immediate(mis->load_threads, qemu_loadvm_load_thread, + data, g_free); +} + +void qemu_loadvm_state_cleanup(MigrationIncomingState *mis) { SaveStateEntry *se; trace_loadvm_state_cleanup(); + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->ops && se->ops->load_cleanup) { se->ops->load_cleanup(se->opaque); } } + + qemu_loadvm_thread_pool_destroy(mis); } /* Return true if we should continue the migration, or false. */ @@ -2893,14 +3046,14 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: - ret = qemu_loadvm_section_start_full(f, mis, section_type); + ret = qemu_loadvm_section_start_full(f, section_type); if (ret < 0) { goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: - ret = qemu_loadvm_section_part_end(f, mis, section_type); + ret = qemu_loadvm_section_part_end(f, section_type); if (ret < 0) { goto out; } @@ -2952,6 +3105,7 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) int qemu_loadvm_state(QEMUFile *f) { + MigrationState *s = migrate_get_current(); MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; int ret; @@ -2961,6 +3115,8 @@ int qemu_loadvm_state(QEMUFile *f) return -EINVAL; } + qemu_loadvm_thread_pool_create(mis); + ret = qemu_loadvm_state_header(f); if (ret) { return ret; @@ -2983,13 +3139,27 @@ int qemu_loadvm_state(QEMUFile *f) trace_qemu_loadvm_state_post_main(ret); if (mis->have_listen_thread) { - /* Listen thread still going, can't clean up yet */ + /* + * Postcopy listen thread still going, don't synchronize the + * cpus yet. + */ return ret; } + /* When reaching here, it must be precopy */ if (ret == 0) { - ret = qemu_file_get_error(f); + if (migrate_has_error(migrate_get_current()) || + !qemu_loadvm_thread_pool_wait(s, mis)) { + ret = -EINVAL; + } else { + ret = qemu_file_get_error(f); + } } + /* + * Set this flag unconditionally so we'll catch further attempts to + * start additional threads via an appropriate assert() + */ + qatomic_set(&mis->load_threads_abort, true); /* * Try to read in the VMDESC section as well, so that dumping tools that @@ -3026,7 +3196,6 @@ int qemu_loadvm_state(QEMUFile *f) } } - qemu_loadvm_state_cleanup(); cpu_synchronize_all_post_init(); return ret; @@ -3066,6 +3235,29 @@ int qemu_loadvm_approve_switchover(void) return migrate_send_rp_switchover_ack(mis); } +bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, + char *buf, size_t len, Error **errp) +{ + SaveStateEntry *se; + + se = find_se(idstr, instance_id); + if (!se) { + error_setg(errp, + "Unknown idstr %s or instance id %u for load state buffer", + idstr, instance_id); + return false; + } + + if (!se->ops || !se->ops->load_state_buffer) { + error_setg(errp, + "idstr %s / instance %u has no load state buffer operation", + idstr, instance_id); + return false; + } + + return se->ops->load_state_buffer(se->opaque, buf, len, errp); +} + bool save_snapshot(const char *name, bool overwrite, const char *vmstate, bool has_devices, strList *devices, Error **errp) { @@ -3213,11 +3405,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, * side of the migration take control of the images. */ if (live && !saved_vm_running) { - ret = bdrv_inactivate_all(); - if (ret) { - error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", - __func__, ret); - } + migration_block_inactivate(); } } @@ -3288,6 +3476,7 @@ bool load_snapshot(const char *name, const char *vmstate, /* Don't even try to load empty VM states */ ret = bdrv_snapshot_find(bs_vm_state, &sn, name); if (ret < 0) { + error_setg(errp, "Snapshot can not be found"); return false; } else if (sn.vm_state_size == 0) { error_setg(errp, "This is a disk-only snapshot. Revert to it " @@ -3367,12 +3556,14 @@ void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) qemu_ram_set_idstr(mr->ram_block, memory_region_name(mr), dev); qemu_ram_set_migratable(mr->ram_block); + ram_block_add_cpr_blocker(mr->ram_block, &error_fatal); } void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_unset_idstr(mr->ram_block); qemu_ram_unset_migratable(mr->ram_block); + ram_block_del_cpr_blocker(mr->ram_block); } void vmstate_register_ram_global(MemoryRegion *mr) diff --git a/migration/savevm.h b/migration/savevm.h index 9ec96a995c9..2d5e9c71668 100644 --- a/migration/savevm.h +++ b/migration/savevm.h @@ -39,12 +39,13 @@ void qemu_savevm_state_header(QEMUFile *f); int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy); void qemu_savevm_state_cleanup(void); void qemu_savevm_state_complete_postcopy(QEMUFile *f); -int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, - bool inactivate_disks); +int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only); void qemu_savevm_state_pending_exact(uint64_t *must_precopy, uint64_t *can_postcopy); void qemu_savevm_state_pending_estimate(uint64_t *must_precopy, uint64_t *can_postcopy); +int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy); +bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp); void qemu_savevm_send_ping(QEMUFile *f, uint32_t value); void qemu_savevm_send_open_return_path(QEMUFile *f); int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len); @@ -53,6 +54,7 @@ void qemu_savevm_send_postcopy_listen(QEMUFile *f); void qemu_savevm_send_postcopy_run(QEMUFile *f); void qemu_savevm_send_postcopy_resume(QEMUFile *f); void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name); +void qemu_savevm_maybe_send_switchover_start(QEMUFile *f); void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, uint16_t len, @@ -63,11 +65,14 @@ void qemu_savevm_live_state(QEMUFile *f); int qemu_save_device_state(QEMUFile *f); int qemu_loadvm_state(QEMUFile *f); -void qemu_loadvm_state_cleanup(void); +void qemu_loadvm_state_cleanup(MigrationIncomingState *mis); int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis); int qemu_load_device_state(QEMUFile *f); int qemu_loadvm_approve_switchover(void); int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, - bool in_postcopy, bool inactivate_disks); + bool in_postcopy); + +bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, + char *buf, size_t len, Error **errp); #endif diff --git a/migration/socket.c b/migration/socket.c index 9ab89b1e089..5ec65b8c039 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -42,24 +42,6 @@ void socket_send_channel_create(QIOTaskFunc f, void *data) f, data, NULL, NULL); } -QIOChannel *socket_send_channel_create_sync(Error **errp) -{ - QIOChannelSocket *sioc = qio_channel_socket_new(); - - if (!outgoing_args.saddr) { - object_unref(OBJECT(sioc)); - error_setg(errp, "Initial sock address not set!"); - return NULL; - } - - if (qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp) < 0) { - object_unref(OBJECT(sioc)); - return NULL; - } - - return QIO_CHANNEL(sioc); -} - struct SocketConnectData { MigrationState *s; char *hostname; diff --git a/migration/socket.h b/migration/socket.h index 46c233ecd29..04ebbe95a13 100644 --- a/migration/socket.h +++ b/migration/socket.h @@ -22,7 +22,6 @@ #include "qemu/sockets.h" void socket_send_channel_create(QIOTaskFunc f, void *data); -QIOChannel *socket_send_channel_create_sync(Error **errp); void socket_start_incoming_migration(SocketAddress *saddr, Error **errp); diff --git a/migration/target.c b/migration/target.c deleted file mode 100644 index a6ffa9a5ce3..00000000000 --- a/migration/target.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * QEMU live migration - functions that need to be compiled target-specific - * - * This work is licensed under the terms of the GNU GPL, version 2 - * or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qapi/qapi-types-migration.h" -#include "migration.h" -#include CONFIG_DEVICES - -#ifdef CONFIG_VFIO -#include "hw/vfio/vfio-common.h" -#endif - -#ifdef CONFIG_VFIO -void migration_populate_vfio_info(MigrationInfo *info) -{ - if (vfio_mig_active()) { - info->vfio = g_malloc0(sizeof(*info->vfio)); - info->vfio->transferred = vfio_mig_bytes_transferred(); - } -} - -void migration_reset_vfio_bytes_transferred(void) -{ - vfio_reset_bytes_transferred(); -} -#else -void migration_populate_vfio_info(MigrationInfo *info) -{ -} - -void migration_reset_vfio_bytes_transferred(void) -{ -} -#endif diff --git a/migration/tls.c b/migration/tls.c index fa03d9136ca..284a6194b2b 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -90,6 +90,10 @@ void migration_tls_channel_process_incoming(MigrationState *s, trace_migration_tls_incoming_handshake_start(); qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-incoming"); + if (migrate_postcopy_ram() || migrate_return_path()) { + qio_channel_set_feature(QIO_CHANNEL(tioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO); + } qio_channel_tls_handshake(tioc, migration_tls_incoming_handshake, NULL, @@ -149,6 +153,11 @@ void migration_tls_channel_connect(MigrationState *s, s->hostname = g_strdup(hostname); trace_migration_tls_outgoing_handshake_start(hostname); qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-outgoing"); + + if (migrate_postcopy_ram() || migrate_return_path()) { + qio_channel_set_feature(QIO_CHANNEL(tioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO); + } qio_channel_tls_handshake(tioc, migration_tls_outgoing_handshake, s, @@ -156,6 +165,11 @@ void migration_tls_channel_connect(MigrationState *s, NULL); } +void migration_tls_channel_end(QIOChannel *ioc, Error **errp) +{ + qio_channel_tls_bye(QIO_CHANNEL_TLS(ioc), errp); +} + bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc) { if (!migrate_tls()) { diff --git a/migration/tls.h b/migration/tls.h index 5797d153cb0..58b25e12281 100644 --- a/migration/tls.h +++ b/migration/tls.h @@ -36,7 +36,7 @@ void migration_tls_channel_connect(MigrationState *s, QIOChannel *ioc, const char *hostname, Error **errp); - +void migration_tls_channel_end(QIOChannel *ioc, Error **errp); /* Whether the QIO channel requires further TLS handshake? */ bool migrate_channel_requires_tls_upgrade(QIOChannel *ioc); diff --git a/migration/trace-events b/migration/trace-events index 0b7c3324fb5..706db97def9 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -39,12 +39,12 @@ savevm_send_postcopy_run(void) "" savevm_send_postcopy_resume(void) "" savevm_send_colo_enable(void) "" savevm_send_recv_bitmap(char *name) "%s" +savevm_send_switchover_start(void) "" savevm_state_setup(void) "" savevm_state_resume_prepare(void) "" savevm_state_header(void) "" savevm_state_iterate(void) "" savevm_state_cleanup(void) "" -savevm_state_complete_precopy(void) "" vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s" vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s" vmstate_downtime_save(const char *type, const char *idstr, uint32_t instance_id, int64_t downtime) "type=%s idstr=%s instance_id=%d downtime=%"PRIi64 @@ -88,6 +88,8 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)" # qemu-file.c qemu_file_fclose(void) "" +qemu_file_put_fd(const char *name, int fd, int ret) "ioc %s, fd %d -> status %d" +qemu_file_get_fd(const char *name, int fd) "ioc %s -> fd %d" # ram.c get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx" @@ -103,6 +105,7 @@ ram_load_postcopy_loop(int channel, uint64_t addr, int flags) "chan=%d addr=0x%" ram_postcopy_send_discard_bitmap(void) "" ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p" ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx" +ram_save_complete(uint64_t dirty_pages, int done) "dirty=%" PRIu64 ", done=%d" ram_dirty_bitmap_request(char *str) "%s" ram_dirty_bitmap_reload_begin(char *str) "%s" ram_dirty_bitmap_reload_complete(char *str) "%s" @@ -115,6 +118,7 @@ colo_flush_ram_cache_end(void) "" save_xbzrle_page_skipping(void) "" save_xbzrle_page_overflow(void) "" ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations" +ram_load_start(void) "" ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" @@ -128,21 +132,22 @@ postcopy_preempt_reset_channel(void) "" # multifd.c multifd_new_send_channel_async(uint8_t id) "channel %u" multifd_new_send_channel_async_error(uint8_t id, void *err) "channel=%u err=%p" -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t zero, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u" +multifd_recv_unfill(uint8_t id, uint64_t packet_num, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " flags 0x%x next packet size %u" multifd_recv_new_channel(uint8_t id) "channel %u" multifd_recv_sync_main(long packet_num) "packet num %ld" multifd_recv_sync_main_signal(uint8_t id) "channel %u" multifd_recv_sync_main_wait(uint8_t id) "iter %u" multifd_recv_terminate_threads(bool error) "error %d" -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64 +multifd_recv_thread_end(uint8_t id, uint64_t packets) "channel %u packets %" PRIu64 multifd_recv_thread_start(uint8_t id) "%u" -multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal_pages, uint32_t zero_pages, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u zero pages %u flags 0x%x next packet size %u" +multifd_send_fill(uint8_t id, uint64_t packet_num, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " flags 0x%x next packet size %u" +multifd_send_ram_fill(uint8_t id, uint32_t normal, uint32_t zero) "channel %u normal pages %u zero pages %u" multifd_send_error(uint8_t id) "channel %u" multifd_send_sync_main(long packet_num) "packet num %ld" multifd_send_sync_main_signal(uint8_t id) "channel %u" multifd_send_sync_main_wait(uint8_t id) "channel %u" multifd_send_terminate_threads(void) "" -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages, uint64_t zero_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 " zero pages %" PRIu64 +multifd_send_thread_end(uint8_t id, uint64_t packets) "channel %u packets %" PRIu64 multifd_send_thread_start(uint8_t id) "%u" multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s" multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s" @@ -151,9 +156,9 @@ multifd_set_outgoing_channel(void *ioc, const char *ioctype, const char *hostnam # migration.c migrate_set_state(const char *new_state) "new state %s" -migrate_fd_cleanup(void) "" +migration_cleanup(void) "" migrate_error(const char *error_desc) "error=%s" -migrate_fd_cancel(void) "" +migration_cancel(void) "" migrate_handle_rp_req_pages(const char *rbname, size_t start, size_t len) "in %s at 0x%zx len 0x%zx" migrate_pending_exact(uint64_t size, uint64_t pre, uint64_t post) "exact pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")" migrate_pending_estimate(uint64_t size, uint64_t pre, uint64_t post) "estimate pending size %" PRIu64 " (pre = %" PRIu64 " post=%" PRIu64 ")" @@ -191,6 +196,7 @@ migrate_transferred(uint64_t transferred, uint64_t time_spent, uint64_t bandwidt process_incoming_migration_co_end(int ret, int ps) "ret=%d postcopy-state=%d" process_incoming_migration_co_postcopy_end_main(void) "" postcopy_preempt_enabled(bool value) "%d" +migration_precopy_complete(void) "" # migration-stats migration_transferred_bytes(uint64_t qemu_file, uint64_t multifd, uint64_t rdma) "qemu_file %" PRIu64 " multifd %" PRIu64 " RDMA %" PRIu64 @@ -279,8 +285,6 @@ postcopy_nhp_range(const char *ramblock, void *host_addr, size_t offset, size_t postcopy_place_page(void *host_addr) "host=%p" postcopy_place_page_zero(void *host_addr) "host=%p" postcopy_ram_enable_notify(void) "" -mark_postcopy_blocktime_begin(uint64_t addr, void *dd, uint32_t time, int cpu, int received) "addr: 0x%" PRIx64 ", dd: %p, time: %u, cpu: %d, already_received: %d" -mark_postcopy_blocktime_end(uint64_t addr, void *dd, uint32_t time, int affected_cpu) "addr: 0x%" PRIx64 ", dd: %p, time: %u, affected_cpu: %d" postcopy_pause_fault_thread(void) "" postcopy_pause_fault_thread_continued(void) "" postcopy_pause_fast_load(void) "" @@ -304,8 +308,10 @@ postcopy_preempt_tls_handshake(void) "" postcopy_preempt_new_channel(void) "" postcopy_preempt_thread_entry(void) "" postcopy_preempt_thread_exit(void) "" - -get_mem_fault_cpu_index(int cpu, uint32_t pid) "cpu: %d, pid: %u" +postcopy_blocktime_tid_cpu_map(int cpu, uint32_t tid) "cpu: %d, tid: %u" +postcopy_blocktime_begin(uint64_t addr, uint64_t time, int cpu, bool exists) "addr: 0x%" PRIx64 ", time: %" PRIu64 ", cpu: %d, exist: %d" +postcopy_blocktime_end(uint64_t addr, uint64_t time, int affected_cpu, int affected_non_cpus) "addr: 0x%" PRIx64 ", time: %" PRIu64 ", affected_cpus: %d, affected_non_cpus: %d" +postcopy_blocktime_end_one(int cpu, uint8_t left_faults) "cpu: %d, left_faults: %" PRIu8 # exec.c migration_exec_outgoing(const char *cmd) "cmd=%s" @@ -340,6 +346,15 @@ colo_receive_message(const char *msg) "Receive '%s' message" # colo-failover.c colo_failover_set_state(const char *new_state) "new state %s" +# cpr.c +cpr_save_fd(const char *name, int id, int fd) "%s, id %d, fd %d" +cpr_delete_fd(const char *name, int id) "%s, id %d" +cpr_find_fd(const char *name, int id, int fd) "%s, id %d returns %d" +cpr_state_save(const char *mode) "%s mode" +cpr_state_load(const char *mode) "%s mode" +cpr_transfer_input(const char *path) "%s" +cpr_transfer_output(const char *path) "%s" + # block-dirty-bitmap.c send_bitmap_header_enter(void) "" send_bitmap_bits(uint32_t flags, uint64_t start_sector, uint32_t nr_sectors, uint64_t data_size) "flags: 0x%x, start_sector: %" PRIu64 ", nr_sectors: %" PRIu32 ", data_size: %" PRIu64 @@ -377,3 +392,10 @@ migration_block_progression(unsigned percent) "Completed %u%%" # page_cache.c migration_pagecache_init(int64_t max_num_items) "Setting cache buckets to %" PRId64 migration_pagecache_insert(void) "Error allocating page" + +# cpu-throttle.c +cpu_throttle_set(int new_throttle_pct) "set guest CPU throttled by %d%%" +cpu_throttle_dirty_sync(void) "" + +# block-active.c +migration_block_activation(const char *name) "%s" diff --git a/migration/vfio.c b/migration/vfio.c new file mode 100644 index 00000000000..0b64e49ef06 --- /dev/null +++ b/migration/vfio.c @@ -0,0 +1,38 @@ +/* + * QEMU live migration - VFIO + * + * This work is licensed under the terms of the GNU GPL, version 2 + * or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-types-migration.h" +#include "migration.h" +#include CONFIG_DEVICES + +#ifdef CONFIG_VFIO +#include "hw/vfio/vfio-migration.h" +#endif + +#ifdef CONFIG_VFIO +void migration_populate_vfio_info(MigrationInfo *info) +{ + if (vfio_migration_active()) { + info->vfio = g_malloc0(sizeof(*info->vfio)); + info->vfio->transferred = vfio_migration_bytes_transferred(); + } +} + +void migration_reset_vfio_bytes_transferred(void) +{ + vfio_migration_reset_bytes_transferred(); +} +#else +void migration_populate_vfio_info(MigrationInfo *info) +{ +} + +void migration_reset_vfio_bytes_transferred(void) +{ +} +#endif diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c index e83bfccb9ec..741a588b7e1 100644 --- a/migration/vmstate-types.c +++ b/migration/vmstate-types.c @@ -15,6 +15,7 @@ #include "qemu-file.h" #include "migration.h" #include "migration/vmstate.h" +#include "migration/client-options.h" #include "qemu/error-report.h" #include "qemu/queue.h" #include "trace.h" @@ -314,6 +315,29 @@ const VMStateInfo vmstate_info_uint64 = { .put = put_uint64, }; +/* File descriptor communicated via SCM_RIGHTS */ + +static int get_fd(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + int32_t *v = pv; + *v = qemu_file_get_fd(f); + return 0; +} + +static int put_fd(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + int32_t *v = pv; + return qemu_file_put_fd(f, *v); +} + +const VMStateInfo vmstate_info_fd = { + .name = "fd", + .get = get_fd, + .put = put_fd, +}; + static int get_nullptr(QEMUFile *f, void *pv, size_t size, const VMStateField *field) @@ -338,7 +362,7 @@ static int put_nullptr(QEMUFile *f, void *pv, size_t size, } const VMStateInfo vmstate_info_nullptr = { - .name = "uint64", + .name = "nullptr", .get = get_nullptr, .put = put_nullptr, }; diff --git a/migration/vmstate.c b/migration/vmstate.c index ff5d589a6d0..5feaa3244d2 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -15,14 +15,15 @@ #include "migration/vmstate.h" #include "savevm.h" #include "qapi/error.h" -#include "qapi/qmp/json-writer.h" +#include "qobject/json-writer.h" #include "qemu-file.h" #include "qemu/bitops.h" #include "qemu/error-report.h" #include "trace.h" static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, JSONWriter *vmdesc); + void *opaque, JSONWriter *vmdesc, + Error **errp); static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, void *opaque); @@ -50,6 +51,36 @@ vmstate_field_exists(const VMStateDescription *vmsd, const VMStateField *field, return result; } +/* + * Create a fake nullptr field when there's a NULL pointer detected in the + * array of a VMS_ARRAY_OF_POINTER VMSD field. It's needed because we + * can't dereference the NULL pointer. + */ +static const VMStateField * +vmsd_create_fake_nullptr_field(const VMStateField *field) +{ + VMStateField *fake = g_new0(VMStateField, 1); + + /* It can only happen on an array of pointers! */ + assert(field->flags & VMS_ARRAY_OF_POINTER); + + /* Some of fake's properties should match the original's */ + fake->name = field->name; + fake->version_id = field->version_id; + + /* Do not need "field_exists" check as it always exists (which is null) */ + fake->field_exists = NULL; + + /* See vmstate_info_nullptr - use 1 byte to represent nullptr */ + fake->size = 1; + fake->info = &vmstate_info_nullptr; + fake->flags = VMS_SINGLE; + + /* All the rest fields shouldn't matter.. */ + + return (const VMStateField *)fake; +} + static int vmstate_n_elems(void *opaque, const VMStateField *field) { int n_elems = 1; @@ -142,23 +173,39 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, } for (i = 0; i < n_elems; i++) { void *curr_elem = first_elem + size * i; + const VMStateField *inner_field; if (field->flags & VMS_ARRAY_OF_POINTER) { curr_elem = *(void **)curr_elem; } + if (!curr_elem && size) { - /* if null pointer check placeholder and do not follow */ - assert(field->flags & VMS_ARRAY_OF_POINTER); - ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL); - } else if (field->flags & VMS_STRUCT) { - ret = vmstate_load_state(f, field->vmsd, curr_elem, - field->vmsd->version_id); - } else if (field->flags & VMS_VSTRUCT) { - ret = vmstate_load_state(f, field->vmsd, curr_elem, - field->struct_version_id); + /* + * If null pointer found (which should only happen in + * an array of pointers), use null placeholder and do + * not follow. + */ + inner_field = vmsd_create_fake_nullptr_field(field); + } else { + inner_field = field; + } + + if (inner_field->flags & VMS_STRUCT) { + ret = vmstate_load_state(f, inner_field->vmsd, curr_elem, + inner_field->vmsd->version_id); + } else if (inner_field->flags & VMS_VSTRUCT) { + ret = vmstate_load_state(f, inner_field->vmsd, curr_elem, + inner_field->struct_version_id); } else { - ret = field->info->get(f, curr_elem, size, field); + ret = inner_field->info->get(f, curr_elem, size, + inner_field); + } + + /* If we used a fake temp field.. free it now */ + if (inner_field != field) { + g_clear_pointer((gpointer *)&inner_field, g_free); } + if (ret >= 0) { ret = qemu_file_get_error(f); } @@ -310,7 +357,7 @@ static void vmsd_desc_field_start(const VMStateDescription *vmsd, static void vmsd_desc_field_end(const VMStateDescription *vmsd, JSONWriter *vmdesc, - const VMStateField *field, size_t size, int i) + const VMStateField *field, size_t size) { if (!vmdesc) { return; @@ -378,37 +425,91 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, int size = vmstate_size(opaque, field); uint64_t old_offset, written_bytes; JSONWriter *vmdesc_loop = vmdesc; + bool is_prev_null = false; trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems); if (field->flags & VMS_POINTER) { first_elem = *(void **)first_elem; assert(first_elem || !n_elems || !size); } + for (i = 0; i < n_elems; i++) { void *curr_elem = first_elem + size * i; + const VMStateField *inner_field; + bool is_null; + int max_elems = n_elems - i; - vmsd_desc_field_start(vmsd, vmdesc_loop, field, i, n_elems); old_offset = qemu_file_transferred(f); if (field->flags & VMS_ARRAY_OF_POINTER) { assert(curr_elem); curr_elem = *(void **)curr_elem; } + if (!curr_elem && size) { - /* if null pointer write placeholder and do not follow */ - assert(field->flags & VMS_ARRAY_OF_POINTER); - ret = vmstate_info_nullptr.put(f, curr_elem, size, NULL, - NULL); - } else if (field->flags & VMS_STRUCT) { - ret = vmstate_save_state(f, field->vmsd, curr_elem, - vmdesc_loop); - } else if (field->flags & VMS_VSTRUCT) { - ret = vmstate_save_state_v(f, field->vmsd, curr_elem, - vmdesc_loop, - field->struct_version_id, errp); + /* + * If null pointer found (which should only happen in + * an array of pointers), use null placeholder and do + * not follow. + */ + inner_field = vmsd_create_fake_nullptr_field(field); + is_null = true; + } else { + inner_field = field; + is_null = false; + } + + /* + * This logic only matters when dumping VM Desc. + * + * Due to the fake nullptr handling above, if there's mixed + * null/non-null data, it doesn't make sense to emit a + * compressed array representation spanning the entire array + * because the field types will be different (e.g. struct + * vs. nullptr). Search ahead for the next null/non-null element + * and start a new compressed array if found. + */ + if (vmdesc && (field->flags & VMS_ARRAY_OF_POINTER) && + is_null != is_prev_null) { + + is_prev_null = is_null; + vmdesc_loop = vmdesc; + + for (int j = i + 1; j < n_elems; j++) { + void *elem = *(void **)(first_elem + size * j); + bool elem_is_null = !elem && size; + + if (is_null != elem_is_null) { + max_elems = j - i; + break; + } + } + } + + vmsd_desc_field_start(vmsd, vmdesc_loop, inner_field, + i, max_elems); + + if (inner_field->flags & VMS_STRUCT) { + ret = vmstate_save_state(f, inner_field->vmsd, + curr_elem, vmdesc_loop); + } else if (inner_field->flags & VMS_VSTRUCT) { + ret = vmstate_save_state_v(f, inner_field->vmsd, + curr_elem, vmdesc_loop, + inner_field->struct_version_id, + errp); } else { - ret = field->info->put(f, curr_elem, size, field, - vmdesc_loop); + ret = inner_field->info->put(f, curr_elem, size, + inner_field, vmdesc_loop); + } + + written_bytes = qemu_file_transferred(f) - old_offset; + vmsd_desc_field_end(vmsd, vmdesc_loop, inner_field, + written_bytes); + + /* If we used a fake temp field.. free it now */ + if (is_null) { + g_clear_pointer((gpointer *)&inner_field, g_free); } + if (ret) { error_setg(errp, "Save of field %s/%s failed", vmsd->name, field->name); @@ -418,9 +519,6 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, return ret; } - written_bytes = qemu_file_transferred(f) - old_offset; - vmsd_desc_field_end(vmsd, vmdesc_loop, field, written_bytes, i); - /* Compressed arrays only care about the first element */ if (vmdesc_loop && vmsd_can_compress(field)) { vmdesc_loop = NULL; @@ -441,12 +539,13 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, json_writer_end_array(vmdesc); } - ret = vmstate_subsection_save(f, vmsd, opaque, vmdesc); + ret = vmstate_subsection_save(f, vmsd, opaque, vmdesc, errp); if (vmsd->post_save) { int ps_ret = vmsd->post_save(opaque); - if (!ret) { + if (!ret && ps_ret) { ret = ps_ret; + error_setg(errp, "post-save failed: %s", vmsd->name); } } return ret; @@ -518,7 +617,8 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, } static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, JSONWriter *vmdesc) + void *opaque, JSONWriter *vmdesc, + Error **errp) { const VMStateDescription * const *sub = vmsd->subsections; bool vmdesc_has_subsections = false; @@ -546,7 +646,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)vmsdsub->name, len); qemu_put_be32(f, vmsdsub->version_id); - ret = vmstate_save_state(f, vmsdsub, opaque, vmdesc); + ret = vmstate_save_state_with_err(f, vmsdsub, opaque, vmdesc, errp); if (ret) { return ret; } diff --git a/monitor/fds.c b/monitor/fds.c index b5416b5b5dc..cc35d2ec334 100644 --- a/monitor/fds.c +++ b/monitor/fds.c @@ -29,7 +29,7 @@ #include "qapi/qmp/qerror.h" #include "qemu/ctype.h" #include "qemu/cutils.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" /* file descriptors passed via SCM_RIGHTS */ typedef struct mon_fd_t mon_fd_t; diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c index ff01cf9d8db..e9820611466 100644 --- a/monitor/hmp-cmds-target.c +++ b/monitor/hmp-cmds-target.c @@ -24,13 +24,14 @@ #include "qemu/osdep.h" #include "disas/disas.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "monitor/hmp-target.h" #include "monitor/monitor-internal.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "sysemu/hw_accel.h" +#include "qobject/qdict.h" +#include "system/hw_accel.h" +#include "exec/target_page.h" /* Set the current CPU defined by the user. Callers must hold BQL. */ int monitor_set_cpu(Monitor *mon, int cpu_index) @@ -101,7 +102,7 @@ void hmp_info_registers(Monitor *mon, const QDict *qdict) if (all_cpus) { CPU_FOREACH(cs) { monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); - cpu_dump_state(cs, NULL, CPU_DUMP_FPU); + cpu_dump_state(cs, NULL, CPU_DUMP_FPU | CPU_DUMP_VPU); } } else { cs = vcpu >= 0 ? qemu_get_cpu(vcpu) : mon_get_cpu(mon); @@ -116,7 +117,7 @@ void hmp_info_registers(Monitor *mon, const QDict *qdict) } monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); - cpu_dump_state(cs, NULL, CPU_DUMP_FPU); + cpu_dump_state(cs, NULL, CPU_DUMP_FPU | CPU_DUMP_VPU); } } @@ -301,7 +302,6 @@ void hmp_gpa2hva(Monitor *mon, const QDict *qdict) void hmp_gva2gpa(Monitor *mon, const QDict *qdict) { target_ulong addr = qdict_get_int(qdict, "addr"); - MemTxAttrs attrs; CPUState *cs = mon_get_cpu(mon); hwaddr gpa; @@ -310,7 +310,7 @@ void hmp_gva2gpa(Monitor *mon, const QDict *qdict) return; } - gpa = cpu_get_phys_page_attrs_debug(cs, addr & TARGET_PAGE_MASK, &attrs); + gpa = cpu_get_phys_page_debug(cs, addr & TARGET_PAGE_MASK); if (gpa == -1) { monitor_printf(mon, "Unmapped\n"); } else { diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index f601d06ab89..74a0f56566e 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -14,8 +14,8 @@ */ #include "qemu/osdep.h" -#include "exec/address-spaces.h" -#include "exec/ioport.h" +#include "system/address-spaces.h" +#include "system/ioport.h" #include "exec/gdbstub.h" #include "gdbstub/enums.h" #include "monitor/hmp.h" @@ -25,10 +25,10 @@ #include "qapi/qapi-commands-control.h" #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/cutils.h" #include "qemu/log.h" -#include "sysemu/sysemu.h" +#include "system/system.h" bool hmp_handle_error(Monitor *mon, Error *err) { @@ -285,7 +285,7 @@ void hmp_gdbserver(Monitor *mon, const QDict *qdict) device = "tcp::" DEFAULT_GDBSTUB_PORT; } - if (gdbserver_start(device) < 0) { + if (!gdbserver_start(device, &error_warn)) { monitor_printf(mon, "Could not open gdbserver on device '%s'\n", device); } else if (strcmp(device, "none") == 0) { @@ -431,6 +431,6 @@ void hmp_dumpdtb(Monitor *mon, const QDict *qdict) return; } - monitor_printf(mon, "dtb dumped to %s", filename); + monitor_printf(mon, "DTB dumped to '%s'\n", filename); } #endif diff --git a/monitor/hmp-target.c b/monitor/hmp-target.c index 1eb72ac1bf5..37dfd7fd4c6 100644 --- a/monitor/hmp-target.c +++ b/monitor/hmp-target.c @@ -26,7 +26,7 @@ #include "monitor-internal.h" #include "monitor/qdev.h" #include "net/slirp.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "monitor/hmp-target.h" #include "monitor/hmp.h" #include "block/block-hmp-cmds.h" diff --git a/monitor/hmp.c b/monitor/hmp.c index 460e8832f69..34e2b8f748b 100644 --- a/monitor/hmp.c +++ b/monitor/hmp.c @@ -27,15 +27,15 @@ #include "hw/qdev-core.h" #include "monitor-internal.h" #include "monitor/hmp.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/config-file.h" #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/option.h" #include "qemu/units.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "trace.h" static void monitor_command_cb(void *opaque, const char *cmdline, diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h index cb628f681df..5676eb334e8 100644 --- a/monitor/monitor-internal.h +++ b/monitor/monitor-internal.h @@ -28,10 +28,10 @@ #include "chardev/char-fe.h" #include "monitor/monitor.h" #include "qapi/qapi-types-control.h" -#include "qapi/qmp/dispatch.h" -#include "qapi/qmp/json-parser.h" +#include "qapi/qmp-registry.h" +#include "qobject/json-parser.h" #include "qemu/readline.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" /* * Supported types: diff --git a/monitor/monitor.c b/monitor/monitor.c index db52a9c7efb..c5a5d308774 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -28,10 +28,10 @@ #include "qapi/opts-visitor.h" #include "qapi/qapi-emit-events.h" #include "qapi/qapi-visit-control.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/error-report.h" #include "qemu/option.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "trace.h" /* @@ -308,6 +308,7 @@ int error_printf_unless_qmp(const char *fmt, ...) static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = { /* Limit guest-triggerable events to 1 per second */ [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS }, + [QAPI_EVENT_BLOCK_IO_ERROR] = { 1000 * SCALE_MS }, [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS }, [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS }, [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS }, @@ -493,7 +494,8 @@ static unsigned int qapi_event_throttle_hash(const void *key) hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); } - if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE || + evstate->event == QAPI_EVENT_BLOCK_IO_ERROR) { hash += g_str_hash(qdict_get_str(evstate->data, "qom-path")); } @@ -519,7 +521,8 @@ static gboolean qapi_event_throttle_equal(const void *a, const void *b) qdict_get_str(evb->data, "node-name")); } - if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE || + eva->event == QAPI_EVENT_BLOCK_IO_ERROR) { return !strcmp(qdict_get_str(eva->data, "qom-path"), qdict_get_str(evb->data, "qom-path")); } diff --git a/monitor/qemu-config-qmp.c b/monitor/qemu-config-qmp.c index 24477a0e448..9a3b183602d 100644 --- a/monitor/qemu-config-qmp.c +++ b/monitor/qemu-config-qmp.c @@ -2,7 +2,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "qemu/option.h" #include "qemu/config-file.h" #include "hw/boards.h" diff --git a/monitor/qmp-cmds-control.c b/monitor/qmp-cmds-control.c index f21506efa58..150ca9f5cb6 100644 --- a/monitor/qmp-cmds-control.c +++ b/monitor/qmp-cmds-control.c @@ -1,5 +1,5 @@ /* - * QMP commands related to the monitor (common to sysemu and tools) + * QMP commands related to the monitor (common to system and tools) * * Copyright (c) 2003-2004 Fabrice Bellard * diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index f84a0dc523f..1ca44fbd72f 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -18,11 +18,11 @@ #include "monitor-internal.h" #include "monitor/qdev.h" #include "monitor/qmp-helpers.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "sysemu/runstate-action.h" -#include "sysemu/block-backend.h" +#include "system/system.h" +#include "system/kvm.h" +#include "system/runstate.h" +#include "system/runstate-action.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qapi/qapi-init-commands.h" #include "qapi/qapi-commands-control.h" @@ -31,6 +31,7 @@ #include "qapi/type-helpers.h" #include "hw/mem/memory-device.h" #include "hw/intc/intc.h" +#include "migration/misc.h" NameInfo *qmp_query_name(Error **errp) { @@ -96,21 +97,18 @@ void qmp_cont(Error **errp) } } - /* Continuing after completed migration. Images have been inactivated to - * allow the destination to take control. Need to get control back now. - * - * If there are no inactive block nodes (e.g. because the VM was just - * paused rather than completing a migration), bdrv_inactivate_all() simply - * doesn't do anything. */ - bdrv_activate_all(&local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - if (runstate_check(RUN_STATE_INMIGRATE)) { autostart = 1; } else { + /* + * Continuing after completed migration. Images have been + * inactivated to allow the destination to take control. Need to + * get control back now. + */ + if (!migration_block_activate(&local_err)) { + error_propagate(errp, local_err); + return; + } vm_start(); } } diff --git a/monitor/qmp.c b/monitor/qmp.c index 5e538f34c0d..cb99a12d941 100644 --- a/monitor/qmp.c +++ b/monitor/qmp.c @@ -28,9 +28,9 @@ #include "monitor-internal.h" #include "qapi/error.h" #include "qapi/qapi-commands-control.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qlist.h" #include "trace.h" /* @@ -356,7 +356,8 @@ void qmp_dispatcher_co_wake(void) /* Write request before reading qmp_dispatcher_co_busy. */ smp_mb__before_rmw(); - if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) { + if (!qatomic_xchg(&qmp_dispatcher_co_busy, true) && + qatomic_read(&qmp_dispatcher_co)) { aio_co_wake(qmp_dispatcher_co); } } diff --git a/nbd/client-connection.c b/nbd/client-connection.c index f9da67c87e3..79ea97e4cc1 100644 --- a/nbd/client-connection.c +++ b/nbd/client-connection.c @@ -31,6 +31,8 @@ #include "qapi/clone-visitor.h" #include "qemu/coroutine.h" +#include "nbd/nbd-internal.h" + struct NBDClientConnection { /* Initialization constants, never change */ SocketAddress *saddr; /* address to connect to */ @@ -140,6 +142,7 @@ static int nbd_connect(QIOChannelSocket *sioc, SocketAddress *addr, return ret; } + nbd_set_socket_send_buffer(sioc); qio_channel_set_delay(QIO_CHANNEL(sioc), false); if (!info) { @@ -410,7 +413,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, */ void nbd_co_establish_connection_cancel(NBDClientConnection *conn) { - Coroutine *wait_co; + Coroutine *wait_co = NULL; WITH_QEMU_LOCK_GUARD(&conn->mutex) { wait_co = g_steal_pointer(&conn->wait_co); diff --git a/nbd/common.c b/nbd/common.c index 589a748cfe6..2a133a66c39 100644 --- a/nbd/common.c +++ b/nbd/common.c @@ -18,6 +18,9 @@ #include "qemu/osdep.h" #include "trace.h" +#include "io/channel-socket.h" +#include "qapi/error.h" +#include "qemu/units.h" #include "nbd-internal.h" /* Discard length bytes from channel. Return -errno on failure and 0 on @@ -264,3 +267,26 @@ const char *nbd_mode_lookup(NBDMode mode) return ""; } } + +/* + * Testing shows that 2m send buffer is optimal. Changing the receive buffer + * size has no effect on performance. + * On Linux we need to increase net.core.wmem_max to make this effective. + */ +#if defined(__APPLE__) || defined(__linux__) +#define UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE (2 * MiB) +#endif + +void nbd_set_socket_send_buffer(QIOChannelSocket *sioc) +{ +#ifdef UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE + if (sioc->localAddr.ss_family == AF_UNIX) { + size_t size = UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE; + Error *errp = NULL; + + if (qio_channel_socket_set_send_buffer(sioc, size, &errp) < 0) { + warn_report_err(errp); + } + } +#endif /* UNIX_STREAM_SOCKET_SEND_BUFFER_SIZE */ +} diff --git a/nbd/nbd-internal.h b/nbd/nbd-internal.h index 91895106a95..6bafeef5ddc 100644 --- a/nbd/nbd-internal.h +++ b/nbd/nbd-internal.h @@ -10,7 +10,7 @@ #ifndef NBD_INTERNAL_H #define NBD_INTERNAL_H #include "block/nbd.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "io/channel-tls.h" #include "qemu/iov.h" @@ -74,4 +74,9 @@ static inline int nbd_write(QIOChannel *ioc, const void *buffer, size_t size, int nbd_drop(QIOChannel *ioc, size_t size, Error **errp); +/* nbd_set_socket_send_buffer + * Set the socket send buffer size for optimal performance. + */ +void nbd_set_socket_send_buffer(QIOChannelSocket *sioc); + #endif diff --git a/nbd/server.c b/nbd/server.c index 892797bb111..d242be98115 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -124,12 +124,14 @@ struct NBDMetaContexts { struct NBDClient { int refcount; /* atomic */ void (*close_fn)(NBDClient *client, bool negotiated); + void *owner; QemuMutex lock; NBDExport *exp; QCryptoTLSCreds *tlscreds; char *tlsauthz; + uint32_t handshake_max_secs; QIOChannelSocket *sioc; /* The underlying data channel */ QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ @@ -1148,8 +1150,8 @@ nbd_negotiate_meta_queries(NBDClient *client, Error **errp) * Return: * -errno on error, errp is set * 0 on successful negotiation, errp is not set - * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, - * errp is not set + * 1 if client sent NBD_OPT_ABORT (i.e. on valid disconnect) or never + * wrote anything (i.e. port probe); errp is not set */ static coroutine_fn int nbd_negotiate_options(NBDClient *client, Error **errp) @@ -1173,8 +1175,13 @@ nbd_negotiate_options(NBDClient *client, Error **errp) ... Rest of request */ - if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) { - return -EIO; + /* + * Intentionally ignore errors on this first read - we do not want + * to be noisy about a mere port probe, but only for clients that + * start talking the protocol and then quit abruptly. + */ + if (nbd_read32(client->ioc, &flags, "flags", NULL) < 0) { + return 1; } client->mode = NBD_MODE_EXPORT_NAME; trace_nbd_negotiate_options_flags(flags); @@ -1381,8 +1388,8 @@ nbd_negotiate_options(NBDClient *client, Error **errp) * Return: * -errno on error, errp is set * 0 on successful negotiation, errp is not set - * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect, - * errp is not set + * 1 if client sent NBD_OPT_ABORT (i.e. on valid disconnect) or never + * wrote anything (i.e. port probe); errp is not set */ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp) { @@ -1413,9 +1420,12 @@ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp) stq_be_p(buf + 8, NBD_OPTS_MAGIC); stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES); - if (nbd_write(client->ioc, buf, 18, errp) < 0) { - error_prepend(errp, "write failed: "); - return -EINVAL; + /* + * Be silent about failure to write our greeting: there is nothing + * wrong with a client testing if our port is alive. + */ + if (nbd_write(client->ioc, buf, 18, NULL) < 0) { + return 1; } ret = nbd_negotiate_options(client, errp); if (ret != 0) { @@ -1972,7 +1982,7 @@ static void nbd_export_request_shutdown(BlockExport *blk_exp) blk_exp_ref(&exp->common); /* - * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a + * TODO: Should we expand QMP BlockExportRemoveMode enum to allow a * close mode that stops advertising the export to new clients but * still permits existing clients to run to completion? Because of * that possibility, nbd_export_close() can be called more than @@ -2016,6 +2026,7 @@ static void nbd_export_delete(BlockExport *blk_exp) const BlockExportDriver blk_exp_nbd = { .type = BLOCK_EXPORT_TYPE_NBD, .instance_size = sizeof(NBDExport), + .supports_inactive = true, .create = nbd_export_create, .delete = nbd_export_delete, .request_shutdown = nbd_export_request_shutdown, @@ -2910,6 +2921,22 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, NBDExport *exp = client->exp; char *msg; size_t i; + bool inactive; + + WITH_GRAPH_RDLOCK_GUARD() { + inactive = bdrv_is_inactive(blk_bs(exp->common.blk)); + if (inactive) { + switch (request->type) { + case NBD_CMD_READ: + /* These commands are allowed on inactive nodes */ + break; + default: + /* Return an error for the rest */ + return nbd_send_generic_reply(client, request, -EPERM, + "export is inactive", errp); + } + } + } switch (request->type) { case NBD_CMD_CACHE: @@ -3184,35 +3211,65 @@ static void nbd_client_receive_next_request(NBDClient *client) } } +static void nbd_handshake_timer_cb(void *opaque) +{ + QIOChannel *ioc = opaque; + + trace_nbd_handshake_timer_cb(); + qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); +} + static coroutine_fn void nbd_co_client_start(void *opaque) { NBDClient *client = opaque; Error *local_err = NULL; + QEMUTimer *handshake_timer = NULL; qemu_co_mutex_init(&client->send_lock); + /* + * Create a timer to bound the time spent in negotiation. If the + * timer expires, it is likely nbd_negotiate will fail because the + * socket was shutdown. + */ + if (client->handshake_max_secs > 0) { + handshake_timer = aio_timer_new(qemu_get_aio_context(), + QEMU_CLOCK_REALTIME, + SCALE_NS, + nbd_handshake_timer_cb, + client->sioc); + timer_mod(handshake_timer, + qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + client->handshake_max_secs * NANOSECONDS_PER_SECOND); + } + if (nbd_negotiate(client, &local_err)) { if (local_err) { error_report_err(local_err); } + timer_free(handshake_timer); client_close(client, false); return; } + timer_free(handshake_timer); WITH_QEMU_LOCK_GUARD(&client->lock) { nbd_client_receive_next_request(client); } } /* - * Create a new client listener using the given channel @sioc. + * Create a new client listener using the given channel @sioc and @owner. * Begin servicing it in a coroutine. When the connection closes, call - * @close_fn with an indication of whether the client completed negotiation. + * @close_fn with an indication of whether the client completed negotiation + * within @handshake_max_secs seconds (0 for unbounded). */ void nbd_client_new(QIOChannelSocket *sioc, + uint32_t handshake_max_secs, QCryptoTLSCreds *tlscreds, const char *tlsauthz, - void (*close_fn)(NBDClient *, bool)) + void (*close_fn)(NBDClient *, bool), + void *owner) { NBDClient *client; Coroutine *co; @@ -3225,13 +3282,23 @@ void nbd_client_new(QIOChannelSocket *sioc, object_ref(OBJECT(client->tlscreds)); } client->tlsauthz = g_strdup(tlsauthz); + client->handshake_max_secs = handshake_max_secs; client->sioc = sioc; qio_channel_set_delay(QIO_CHANNEL(sioc), false); object_ref(OBJECT(client->sioc)); client->ioc = QIO_CHANNEL(sioc); object_ref(OBJECT(client->ioc)); client->close_fn = close_fn; + client->owner = owner; + + nbd_set_socket_send_buffer(sioc); co = qemu_coroutine_create(nbd_co_client_start, client); qemu_coroutine_enter(co); } + +void * +nbd_client_owner(NBDClient *client) +{ + return client->owner; +} diff --git a/nbd/trace-events b/nbd/trace-events index 00ae3216a11..cbd0a4ab7e4 100644 --- a/nbd/trace-events +++ b/nbd/trace-events @@ -76,6 +76,7 @@ nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload nbd_co_receive_ext_payload_compliance(uint64_t from, uint64_t len) "client sent non-compliant write without payload flag: from=0x%" PRIx64 ", len=0x%" PRIx64 nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32 nbd_trip(void) "Reading request" +nbd_handshake_timer_cb(void) "client took too long to negotiate" # client-connection.c nbd_connect_thread_sleep(uint64_t timeout) "timeout %" PRIu64 diff --git a/net/af-xdp.c b/net/af-xdp.c index 01c5fb914ec..14f302ea215 100644 --- a/net/af-xdp.c +++ b/net/af-xdp.c @@ -49,9 +49,12 @@ typedef struct AFXDPState { char *buffer; struct xsk_umem *umem; - uint32_t n_queues; uint32_t xdp_flags; bool inhibit; + + char *map_path; + int map_fd; + uint32_t map_start_index; } AFXDPState; #define AF_XDP_BATCH_SIZE 64 @@ -261,6 +264,7 @@ static void af_xdp_send(void *opaque) static void af_xdp_cleanup(NetClientState *nc) { AFXDPState *s = DO_UPCAST(AFXDPState, nc, nc); + int idx; qemu_purge_queued_packets(nc); @@ -275,13 +279,17 @@ static void af_xdp_cleanup(NetClientState *nc) qemu_vfree(s->buffer); s->buffer = NULL; - /* Remove the program if it's the last open queue. */ - if (!s->inhibit && nc->queue_index == s->n_queues - 1 && s->xdp_flags - && bpf_xdp_detach(s->ifindex, s->xdp_flags, NULL) != 0) { - fprintf(stderr, - "af-xdp: unable to remove XDP program from '%s', ifindex: %d\n", - s->ifname, s->ifindex); + if (s->map_fd >= 0) { + idx = nc->queue_index + s->map_start_index; + if (bpf_map_delete_elem(s->map_fd, &idx)) { + fprintf(stderr, "af-xdp: unable to remove AF_XDP socket from map" + " %s\n", s->map_path); + } + close(s->map_fd); + s->map_fd = -1; } + g_free(s->map_path); + s->map_path = NULL; } static int af_xdp_umem_create(AFXDPState *s, int sock_fd, Error **errp) @@ -323,7 +331,7 @@ static int af_xdp_umem_create(AFXDPState *s, int sock_fd, Error **errp) s->pool = g_new(uint64_t, n_descs); /* Fill the pool in the opposite order, because it's a LIFO queue. */ - for (i = n_descs; i >= 0; i--) { + for (i = n_descs - 1; i >= 0; i--) { s->pool[i] = i * XSK_UMEM__DEFAULT_FRAME_SIZE; } s->n_pool = n_descs; @@ -345,7 +353,6 @@ static int af_xdp_socket_create(AFXDPState *s, }; int queue_id, error = 0; - s->inhibit = opts->has_inhibit && opts->inhibit; if (s->inhibit) { cfg.libxdp_flags |= XSK_LIBXDP_FLAGS__INHIBIT_PROG_LOAD; } @@ -396,6 +403,35 @@ static int af_xdp_socket_create(AFXDPState *s, return 0; } +static int af_xdp_update_xsk_map(AFXDPState *s, Error **errp) +{ + int xsk_fd, idx, error = 0; + + if (!s->map_path) { + return 0; + } + + s->map_fd = bpf_obj_get(s->map_path); + if (s->map_fd < 0) { + error = errno; + } else { + xsk_fd = xsk_socket__fd(s->xsk); + idx = s->nc.queue_index + s->map_start_index; + if (bpf_map_update_elem(s->map_fd, &idx, &xsk_fd, 0)) { + error = errno; + } + } + + if (error) { + error_setg_errno(errp, error, + "failed to insert AF_XDP socket into map %s", + s->map_path); + return -1; + } + + return 0; +} + /* NetClientInfo methods. */ static NetClientInfo net_af_xdp_info = { .type = NET_CLIENT_DRIVER_AF_XDP, @@ -444,12 +480,14 @@ int net_init_af_xdp(const Netdev *netdev, { const NetdevAFXDPOptions *opts = &netdev->u.af_xdp; NetClientState *nc, *nc0 = NULL; + int32_t map_start_index; unsigned int ifindex; uint32_t prog_id = 0; g_autofree int *sock_fds = NULL; int64_t i, queues; Error *err = NULL; AFXDPState *s; + bool inhibit; ifindex = if_nametoindex(opts->ifname); if (!ifindex) { @@ -465,8 +503,28 @@ int net_init_af_xdp(const Netdev *netdev, return -1; } - if ((opts->has_inhibit && opts->inhibit) != !!opts->sock_fds) { - error_setg(errp, "'inhibit=on' requires 'sock-fds' and vice versa"); + inhibit = opts->has_inhibit && opts->inhibit; + if (inhibit && !opts->sock_fds && !opts->map_path) { + error_setg(errp, "'inhibit=on' requires 'sock-fds' or 'map-path'"); + return -1; + } + if (!inhibit && (opts->sock_fds || opts->map_path)) { + error_setg(errp, "'sock-fds' and 'map-path' require 'inhibit=on'"); + return -1; + } + if (opts->sock_fds && opts->map_path) { + error_setg(errp, "'sock-fds' and 'map-path' are mutually exclusive"); + return -1; + } + if (!opts->map_path && opts->has_map_start_index) { + error_setg(errp, "'map-start-index' requires 'map-path'"); + return -1; + } + + map_start_index = opts->has_map_start_index ? opts->map_start_index : 0; + if (map_start_index < 0) { + error_setg(errp, "'map-start-index' cannot be negative (%d)", + map_start_index); return -1; } @@ -490,21 +548,23 @@ int net_init_af_xdp(const Netdev *netdev, pstrcpy(s->ifname, sizeof(s->ifname), opts->ifname); s->ifindex = ifindex; - s->n_queues = queues; + s->inhibit = inhibit; + + s->map_path = g_strdup(opts->map_path); + s->map_start_index = map_start_index; + s->map_fd = -1; - if (af_xdp_umem_create(s, sock_fds ? sock_fds[i] : -1, errp) - || af_xdp_socket_create(s, opts, errp)) { - /* Make sure the XDP program will be removed. */ - s->n_queues = i; - error_propagate(errp, err); + if (af_xdp_umem_create(s, sock_fds ? sock_fds[i] : -1, &err) || + af_xdp_socket_create(s, opts, &err) || + af_xdp_update_xsk_map(s, &err)) { goto err; } } - if (nc0) { + if (nc0 && !inhibit) { s = DO_UPCAST(AFXDPState, nc, nc0); if (bpf_xdp_query_id(s->ifindex, s->xdp_flags, &prog_id) || !prog_id) { - error_setg_errno(errp, errno, + error_setg_errno(&err, errno, "no XDP program loaded on '%s', ifindex: %d", s->ifname, s->ifindex); goto err; @@ -518,6 +578,7 @@ int net_init_af_xdp(const Netdev *netdev, err: if (nc0) { qemu_del_net_client(nc0); + error_propagate(errp, err); } return -1; diff --git a/net/can/can_core.c b/net/can/can_core.c index 0115d787941..77fe2b8ba48 100644 --- a/net/can/can_core.c +++ b/net/can/can_core.c @@ -149,7 +149,7 @@ static bool can_bus_can_be_deleted(UserCreatable *uc) } static void can_bus_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass); @@ -162,7 +162,7 @@ static const TypeInfo can_bus_info = { .instance_size = sizeof(CanBusState), .instance_init = can_bus_instance_init, .class_init = can_bus_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/net/can/can_host.c b/net/can/can_host.c index b2fe553f91a..3f9bb33e474 100644 --- a/net/can/can_host.c +++ b/net/can/can_host.c @@ -72,7 +72,7 @@ static void can_host_complete(UserCreatable *uc, Error **errp) } static void can_host_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass); @@ -92,7 +92,7 @@ static const TypeInfo can_host_info = { .class_size = sizeof(CanHostClass), .abstract = true, .class_init = can_host_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/net/can/can_socketcan.c b/net/can/can_socketcan.c index c1a1ad05636..8a57ae07178 100644 --- a/net/can/can_socketcan.c +++ b/net/can/can_socketcan.c @@ -308,7 +308,7 @@ static void can_host_socketcan_instance_init(Object *obj) } static void can_host_socketcan_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { CanHostClass *chc = CAN_HOST_CLASS(klass); diff --git a/net/checksum.c b/net/checksum.c index 1a957e4c0b1..537457d89d0 100644 --- a/net/checksum.c +++ b/net/checksum.c @@ -57,7 +57,7 @@ uint16_t net_checksum_tcpudp(uint16_t length, uint16_t proto, return net_checksum_finish(sum); } -void net_checksum_calculate(uint8_t *data, int length, int csum_flag) +void net_checksum_calculate(void *data, int length, int csum_flag) { int mac_hdr_len, ip_len; struct ip_header *ip; @@ -101,7 +101,7 @@ void net_checksum_calculate(uint8_t *data, int length, int csum_flag) return; } - ip = (struct ip_header *)(data + mac_hdr_len); + ip = (struct ip_header *)((uint8_t *)data + mac_hdr_len); if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) { return; /* not IPv4 */ diff --git a/net/clients.h b/net/clients.h index be53794582c..e786ab42035 100644 --- a/net/clients.h +++ b/net/clients.h @@ -29,6 +29,10 @@ int net_init_dump(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp); +#ifdef CONFIG_PASST +int net_init_passt(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); +#endif #ifdef CONFIG_SLIRP int net_init_slirp(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp); diff --git a/net/colo-compare.c b/net/colo-compare.c index c4ad0ab71fa..0e1844ee4cb 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -25,7 +25,7 @@ #include "chardev/char-fe.h" #include "qemu/sockets.h" #include "colo.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" #include "net/colo-compare.h" #include "migration/colo.h" #include "util.h" @@ -412,8 +412,7 @@ static void colo_compare_tcp(CompareState *s, Connection *conn) * can ensure that the packet's payload is acknowledged by * primary and secondary. */ - uint32_t min_ack = conn->pack - conn->sack > 0 ? - conn->sack : conn->pack; + uint32_t min_ack = MIN(conn->pack, conn->sack); pri: if (g_queue_is_empty(&conn->primary_list)) { @@ -1329,8 +1328,6 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp) } QTAILQ_INSERT_TAIL(&net_compares, s, next); qemu_mutex_unlock(&colo_compare_mutex); - - return; } static void colo_flush_packets(void *opaque, void *user_data) @@ -1355,7 +1352,7 @@ static void colo_flush_packets(void *opaque, void *user_data) } } -static void colo_compare_class_init(ObjectClass *oc, void *data) +static void colo_compare_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -1479,7 +1476,7 @@ static const TypeInfo colo_compare_info = { .instance_finalize = colo_compare_finalize, .class_size = sizeof(CompareClass), .class_init = colo_compare_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/net/dump.c b/net/dump.c index 956e34a123c..581234b7751 100644 --- a/net/dump.c +++ b/net/dump.c @@ -32,7 +32,7 @@ #include "qapi/visitor.h" #include "net/filter.h" #include "qom/object.h" -#include "sysemu/rtc.h" +#include "system/rtc.h" typedef struct DumpState { int64_t start_ts; @@ -155,7 +155,8 @@ static ssize_t filter_dump_receive_iov(NetFilterState *nf, NetClientState *sndr, { NetFilterDumpState *nfds = FILTER_DUMP(nf); - dump_receive_iov(&nfds->ds, iov, iovcnt, qemu_get_vnet_hdr_len(nf->netdev)); + dump_receive_iov(&nfds->ds, iov, iovcnt, flags & QEMU_NET_PACKET_FLAG_RAW ? + 0 : qemu_get_vnet_hdr_len(nf->netdev)); return 0; } @@ -233,7 +234,7 @@ static void filter_dump_instance_finalize(Object *obj) g_free(nfds->filename); } -static void filter_dump_class_init(ObjectClass *oc, void *data) +static void filter_dump_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); diff --git a/net/filter-buffer.c b/net/filter-buffer.c index 283dc9cbe6b..a36be31dc8e 100644 --- a/net/filter-buffer.c +++ b/net/filter-buffer.c @@ -172,7 +172,7 @@ static void filter_buffer_set_interval(Object *obj, Visitor *v, s->interval = value; } -static void filter_buffer_class_init(ObjectClass *oc, void *data) +static void filter_buffer_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); diff --git a/net/filter-mirror.c b/net/filter-mirror.c index 34a63b5dbbf..27734c91a71 100644 --- a/net/filter-mirror.c +++ b/net/filter-mirror.c @@ -410,7 +410,7 @@ static void filter_redirector_set_vnet_hdr(Object *obj, s->vnet_hdr = value; } -static void filter_mirror_class_init(ObjectClass *oc, void *data) +static void filter_mirror_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); @@ -425,7 +425,7 @@ static void filter_mirror_class_init(ObjectClass *oc, void *data) nfc->receive_iov = filter_mirror_receive_iov; } -static void filter_redirector_class_init(ObjectClass *oc, void *data) +static void filter_redirector_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); diff --git a/net/filter-replay.c b/net/filter-replay.c index 54690676ef0..451663c5a56 100644 --- a/net/filter-replay.c +++ b/net/filter-replay.c @@ -17,7 +17,7 @@ #include "qemu/timer.h" #include "qapi/visitor.h" #include "net/filter.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qom/object.h" #define TYPE_FILTER_REPLAY "filter-replay" @@ -65,7 +65,7 @@ static void filter_replay_instance_finalize(Object *obj) replay_unregister_net(nfrs->rns); } -static void filter_replay_class_init(ObjectClass *oc, void *data) +static void filter_replay_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c index c18c4c20190..cdf85aa5eed 100644 --- a/net/filter-rewriter.c +++ b/net/filter-rewriter.c @@ -411,7 +411,7 @@ static void filter_rewriter_init(Object *obj) s->failover_mode = FAILOVER_MODE_OFF; } -static void colo_rewriter_class_init(ObjectClass *oc, void *data) +static void colo_rewriter_class_init(ObjectClass *oc, const void *data) { NetFilterClass *nfc = NETFILTER_CLASS(oc); diff --git a/net/filter.c b/net/filter.c index 33359087711..c7cc6615dc9 100644 --- a/net/filter.c +++ b/net/filter.c @@ -333,7 +333,7 @@ static void default_handle_event(NetFilterState *nf, int event, Error **errp) } } -static void netfilter_class_init(ObjectClass *oc, void *data) +static void netfilter_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); NetFilterClass *nfc = NETFILTER_CLASS(oc); @@ -363,7 +363,7 @@ static const TypeInfo netfilter_info = { .instance_size = sizeof(NetFilterState), .instance_init = netfilter_init, .instance_finalize = netfilter_finalize, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/net/hub.c b/net/hub.c index 4c8a469a50a..e3b58b1c4f8 100644 --- a/net/hub.c +++ b/net/hub.c @@ -20,7 +20,7 @@ #include "hub.h" #include "qemu/iov.h" #include "qemu/error-report.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" /* * A hub broadcasts incoming packets to all its ports except the source port. @@ -193,31 +193,6 @@ NetClientState *net_hub_add_port(int hub_id, const char *name, return &port->nc; } -/** - * Find a available port on a hub; otherwise create one new port - */ -NetClientState *net_hub_port_find(int hub_id) -{ - NetHub *hub; - NetHubPort *port; - NetClientState *nc; - - QLIST_FOREACH(hub, &hubs, next) { - if (hub->id == hub_id) { - QLIST_FOREACH(port, &hub->ports, next) { - nc = port->nc.peer; - if (!nc) { - return &(port->nc); - } - } - break; - } - } - - nc = net_hub_add_port(hub_id, NULL, NULL); - return nc; -} - /** * Print hub configuration */ @@ -310,6 +285,9 @@ void net_hub_check_clients(void) case NET_CLIENT_DRIVER_NIC: has_nic = 1; break; +#ifdef CONFIG_PASST + case NET_CLIENT_DRIVER_PASST: +#endif case NET_CLIENT_DRIVER_USER: case NET_CLIENT_DRIVER_TAP: case NET_CLIENT_DRIVER_SOCKET: diff --git a/net/meson.build b/net/meson.build index e0cd71470e0..da6ea635e95 100644 --- a/net/meson.build +++ b/net/meson.build @@ -1,6 +1,7 @@ system_ss.add(files( 'announce.c', 'checksum.c', + 'dgram.c', 'dump.c', 'eth.c', 'filter-buffer.c', @@ -12,7 +13,7 @@ system_ss.add(files( 'queue.c', 'socket.c', 'stream.c', - 'dgram.c', + 'stream_data.c', 'util.c', )) @@ -33,13 +34,16 @@ system_ss.add(when: 'CONFIG_TCG', if_true: files('filter-replay.c')) if have_l2tpv3 system_ss.add(files('l2tpv3.c')) endif +if enable_passt + system_ss.add(files('passt.c')) +endif system_ss.add(when: slirp, if_true: files('slirp.c')) system_ss.add(when: vde, if_true: files('vde.c')) if have_netmap system_ss.add(files('netmap.c')) endif -system_ss.add(when: libxdp, if_true: files('af-xdp.c')) +system_ss.add(when: [libxdp, libbpf], if_true: files('af-xdp.c')) if have_vhost_net_user system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) diff --git a/net/net-hmp-cmds.c b/net/net-hmp-cmds.c index 41d326bf5f1..e7c55d27876 100644 --- a/net/net-hmp-cmds.c +++ b/net/net-hmp-cmds.c @@ -22,7 +22,7 @@ #include "qapi/clone-visitor.h" #include "qapi/qapi-commands-net.h" #include "qapi/qapi-visit-net.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/config-file.h" #include "qemu/help_option.h" #include "qemu/option.h" diff --git a/net/net.c b/net/net.c index 2eb8bc9c0b9..da275db86ec 100644 --- a/net/net.c +++ b/net/net.c @@ -36,7 +36,7 @@ #include "qemu/help_option.h" #include "qapi/qapi-commands-net.h" #include "qapi/qapi-visit-net.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" #include "qemu/sockets.h" @@ -51,7 +51,7 @@ #include "qemu/keyval.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "net/colo-compare.h" #include "net/filter.h" #include "qapi/string-output-visitor.h" @@ -381,9 +381,12 @@ NetClientState *qemu_get_peer(NetClientState *nc, int queue_index) return ncs->peer; } -static void qemu_cleanup_net_client(NetClientState *nc) +static void qemu_cleanup_net_client(NetClientState *nc, + bool remove_from_net_clients) { - QTAILQ_REMOVE(&net_clients, nc, next); + if (remove_from_net_clients) { + QTAILQ_REMOVE(&net_clients, nc, next); + } if (nc->info->cleanup) { nc->info->cleanup(nc); @@ -425,7 +428,13 @@ void qemu_del_net_client(NetClientState *nc) object_unparent(OBJECT(nf)); } - /* If there is a peer NIC, delete and cleanup client, but do not free. */ + /* + * If there is a peer NIC, transfer ownership to it. Delete the client + * from net_client list but do not cleanup nor free. This way NIC can + * still access to members of the backend. + * + * The cleanup and free will be done when the NIC is free. + */ if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { NICState *nic = qemu_get_nic(nc->peer); if (nic->peer_deleted) { @@ -435,21 +444,18 @@ void qemu_del_net_client(NetClientState *nc) for (i = 0; i < queues; i++) { ncs[i]->peer->link_down = true; + QTAILQ_REMOVE(&net_clients, ncs[i], next); } if (nc->peer->info->link_status_changed) { nc->peer->info->link_status_changed(nc->peer); } - for (i = 0; i < queues; i++) { - qemu_cleanup_net_client(ncs[i]); - } - return; } for (i = 0; i < queues; i++) { - qemu_cleanup_net_client(ncs[i]); + qemu_cleanup_net_client(ncs[i], true); qemu_free_net_client(ncs[i]); } } @@ -462,8 +468,12 @@ void qemu_del_nic(NICState *nic) for (i = 0; i < queues; i++) { NetClientState *nc = qemu_get_subqueue(nic, i); - /* If this is a peer NIC and peer has already been deleted, free it now. */ + /* + * If this is a peer NIC and peer has already been deleted, clean it up + * and free it now. + */ if (nic->peer_deleted) { + qemu_cleanup_net_client(nc->peer, false); qemu_free_net_client(nc->peer); } else if (nc->peer) { /* if there are RX packets pending, complete them */ @@ -474,7 +484,7 @@ void qemu_del_nic(NICState *nic) for (i = queues - 1; i >= 0; i--) { NetClientState *nc = qemu_get_subqueue(nic, i); - qemu_cleanup_net_client(nc); + qemu_cleanup_net_client(nc, true); qemu_free_net_client(nc); } @@ -542,6 +552,10 @@ void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int qemu_get_vnet_hdr_len(NetClientState *nc) { + if (!nc) { + return 0; + } + return nc->vnet_hdr_len; } @@ -559,6 +573,15 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len) nc->info->set_vnet_hdr_len(nc, len); } +bool qemu_get_vnet_hash_supported_types(NetClientState *nc, uint32_t *types) +{ + if (!nc || !nc->info->get_vnet_hash_supported_types) { + return false; + } + + return nc->info->get_vnet_hash_supported_types(nc, types); +} + int qemu_set_vnet_le(NetClientState *nc, bool is_le) { #if HOST_BIG_ENDIAN @@ -750,16 +773,6 @@ ssize_t qemu_receive_packet(NetClientState *nc, const uint8_t *buf, int size) return qemu_net_queue_receive(nc->incoming_queue, buf, size); } -ssize_t qemu_receive_packet_iov(NetClientState *nc, const struct iovec *iov, - int iovcnt) -{ - if (!qemu_can_receive_packet(nc)) { - return 0; - } - - return qemu_net_queue_receive_iov(nc->incoming_queue, iov, iovcnt); -} - ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size) { return qemu_send_packet_async_with_flags(nc, QEMU_NET_PACKET_FLAG_RAW, @@ -828,6 +841,7 @@ static ssize_t qemu_deliver_packet_iov(NetClientState *sender, iov_copy[0].iov_len = nc->vnet_hdr_len; memcpy(&iov_copy[1], iov, iovcnt * sizeof(*iov)); iov = iov_copy; + iovcnt++; } if (nc->info->receive_iov) { @@ -1243,6 +1257,9 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])( const char *name, NetClientState *peer, Error **errp) = { [NET_CLIENT_DRIVER_NIC] = net_init_nic, +#ifdef CONFIG_PASST + [NET_CLIENT_DRIVER_PASST] = net_init_passt, +#endif #ifdef CONFIG_SLIRP [NET_CLIENT_DRIVER_USER] = net_init_slirp, #endif @@ -1348,6 +1365,7 @@ void show_netdevs(void) "dgram", "hubport", "tap", + "passt", #ifdef CONFIG_SLIRP "user", #endif @@ -1596,21 +1614,11 @@ void colo_notify_filters_event(int event, Error **errp) } } -void qmp_set_link(const char *name, bool up, Error **errp) +void net_client_set_link(NetClientState **ncs, int queues, bool up) { - NetClientState *ncs[MAX_QUEUE_NUM]; NetClientState *nc; - int queues, i; - - queues = qemu_find_net_clients_except(name, ncs, - NET_CLIENT_DRIVER__MAX, - MAX_QUEUE_NUM); + int i; - if (queues == 0) { - error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, - "Device '%s' not found", name); - return; - } nc = ncs[0]; for (i = 0; i < queues; i++) { @@ -1641,6 +1649,24 @@ void qmp_set_link(const char *name, bool up, Error **errp) } } +void qmp_set_link(const char *name, bool up, Error **errp) +{ + NetClientState *ncs[MAX_QUEUE_NUM]; + int queues; + + queues = qemu_find_net_clients_except(name, ncs, + NET_CLIENT_DRIVER__MAX, + MAX_QUEUE_NUM); + + if (queues == 0) { + error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, + "Device '%s' not found", name); + return; + } + + net_client_set_link(ncs, queues, up); +} + static void net_vm_change_state_handler(void *opaque, bool running, RunState state) { @@ -1683,6 +1709,9 @@ void net_cleanup(void) * of the latest NET_CLIENT_DRIVER_NIC, and operate on *p as we walk * the list. * + * However, the NIC may have peers that trust to be clean beyond this + * point. For example, if they have been removed with device_del. + * * The 'nc' variable isn't part of the list traversal; it's purely * for convenience as too much '(*p)->' has a tendency to make the * readers' eyes bleed. @@ -1690,6 +1719,17 @@ void net_cleanup(void) while (*p) { nc = *p; if (nc->info->type == NET_CLIENT_DRIVER_NIC) { + NICState *nic = qemu_get_nic(nc); + + if (nic->peer_deleted) { + int queues = MAX(nic->conf->peers.queues, 1); + + for (int i = 0; i < queues; i++) { + nc = qemu_get_subqueue(nic, i); + qemu_cleanup_net_client(nc->peer, false); + } + } + /* Skip NET_CLIENT_DRIVER_NIC entries */ p = &QTAILQ_NEXT(nc, next); } else { @@ -1737,7 +1777,7 @@ void net_check_clients(void) static int net_init_client(void *dummy, QemuOpts *opts, Error **errp) { - const char *model = qemu_opt_get_del(opts, "model"); + const char *model = qemu_opt_get(opts, "model"); if (is_nic_model_help_option(model)) { return 0; diff --git a/net/passt.c b/net/passt.c new file mode 100644 index 00000000000..32ecffb763b --- /dev/null +++ b/net/passt.c @@ -0,0 +1,745 @@ +/* + * passt network backend + * + * Copyright Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include +#include "qemu/error-report.h" +#include +#include "net/net.h" +#include "clients.h" +#include "qapi/error.h" +#include "io/net-listener.h" +#include "chardev/char-fe.h" +#include "net/vhost_net.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "standard-headers/linux/virtio_net.h" +#include "stream_data.h" + +#ifdef CONFIG_VHOST_USER +static const int user_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_F_NOTIFICATION_DATA, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + + VIRTIO_F_ANY_LAYOUT, + VIRTIO_F_VERSION_1, + VIRTIO_NET_F_CSUM, + VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, + VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_HOST_TSO4, + VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, + VIRTIO_NET_F_HOST_UFO, + VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_NET_F_MTU, + VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, + VIRTIO_F_IN_ORDER, + VIRTIO_NET_F_RSS, + VIRTIO_NET_F_RSC_EXT, + VIRTIO_NET_F_HASH_REPORT, + VIRTIO_NET_F_GUEST_USO4, + VIRTIO_NET_F_GUEST_USO6, + VIRTIO_NET_F_HOST_USO, + + /* This bit implies RARP isn't sent by QEMU out of band */ + VIRTIO_NET_F_GUEST_ANNOUNCE, + + VIRTIO_NET_F_MQ, + + VHOST_INVALID_FEATURE_BIT +}; +#endif + +typedef struct NetPasstState { + NetStreamData data; + GPtrArray *args; + gchar *pidfile; + pid_t pid; +#ifdef CONFIG_VHOST_USER + /* vhost user */ + VhostUserState *vhost_user; + VHostNetState *vhost_net; + CharBackend vhost_chr; + guint vhost_watch; + uint64_t acked_features; + bool started; +#endif +} NetPasstState; + +static int net_passt_stream_start(NetPasstState *s, Error **errp); + +static void net_passt_cleanup(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + +#ifdef CONFIG_VHOST_USER + if (s->vhost_net) { + vhost_net_cleanup(s->vhost_net); + g_free(s->vhost_net); + s->vhost_net = NULL; + } + if (s->vhost_watch) { + g_source_remove(s->vhost_watch); + s->vhost_watch = 0; + } + qemu_chr_fe_deinit(&s->vhost_chr, true); + if (s->vhost_user) { + vhost_user_cleanup(s->vhost_user); + g_free(s->vhost_user); + s->vhost_user = NULL; + } +#endif + + kill(s->pid, SIGTERM); + if (g_remove(s->pidfile) != 0) { + warn_report("Failed to remove passt pidfile %s: %s", + s->pidfile, strerror(errno)); + } + g_free(s->pidfile); + g_ptr_array_free(s->args, TRUE); +} + +static ssize_t net_passt_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + NetStreamData *d = DO_UPCAST(NetStreamData, nc, nc); + + return net_stream_data_receive(d, buf, size); +} + +static gboolean net_passt_send(QIOChannel *ioc, GIOCondition condition, + gpointer data) +{ + if (net_stream_data_send(ioc, condition, data) == G_SOURCE_REMOVE) { + NetPasstState *s = DO_UPCAST(NetPasstState, data, data); + Error *error = NULL; + + /* we need to restart passt */ + kill(s->pid, SIGTERM); + if (net_passt_stream_start(s, &error) == -1) { + error_report_err(error); + } + + return G_SOURCE_REMOVE; + } + + return G_SOURCE_CONTINUE; +} + +#ifdef CONFIG_VHOST_USER +static int passt_set_vnet_endianness(NetClientState *nc, bool enable) +{ + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + return 0; +} + +static bool passt_has_vnet_hdr(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + return s->vhost_user != NULL; +} + +static bool passt_has_ufo(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + return s->vhost_user != NULL; +} + +static bool passt_check_peer_type(NetClientState *nc, ObjectClass *oc, + Error **errp) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + const char *driver = object_class_get_name(oc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + if (s->vhost_user == NULL) { + return true; + } + + if (!g_str_has_prefix(driver, "virtio-net-")) { + error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); + return false; + } + + return true; +} + +static struct vhost_net *passt_get_vhost_net(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + return s->vhost_net; +} + +static uint64_t passt_get_acked_features(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + return s->acked_features; +} + +static void passt_save_acked_features(NetClientState *nc) +{ + NetPasstState *s = DO_UPCAST(NetPasstState, data.nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_PASST); + + if (s->vhost_net) { + uint64_t features = vhost_net_get_acked_features(s->vhost_net); + if (features) { + s->acked_features = features; + } + } +} +#endif + +static NetClientInfo net_passt_info = { + .type = NET_CLIENT_DRIVER_PASST, + .size = sizeof(NetPasstState), + .receive = net_passt_receive, + .cleanup = net_passt_cleanup, +#ifdef CONFIG_VHOST_USER + .has_vnet_hdr = passt_has_vnet_hdr, + .has_ufo = passt_has_ufo, + .set_vnet_be = passt_set_vnet_endianness, + .set_vnet_le = passt_set_vnet_endianness, + .check_peer_type = passt_check_peer_type, + .get_vhost_net = passt_get_vhost_net, +#endif +}; + +static void net_passt_client_connected(QIOTask *task, gpointer opaque) +{ + NetPasstState *s = opaque; + + if (net_stream_data_client_connected(task, &s->data) == 0) { + qemu_set_info_str(&s->data.nc, "stream,connected to pid %d", s->pid); + } +} + +static int net_passt_start_daemon(NetPasstState *s, int sock, Error **errp) +{ + g_autoptr(GSubprocess) daemon = NULL; + g_autofree gchar *contents = NULL; + g_autoptr(GError) error = NULL; + GSubprocessLauncher *launcher; + + qemu_set_info_str(&s->data.nc, "launching passt"); + + launcher = g_subprocess_launcher_new(G_SUBPROCESS_FLAGS_NONE); + g_subprocess_launcher_take_fd(launcher, sock, 3); + + daemon = g_subprocess_launcher_spawnv(launcher, + (const gchar *const *)s->args->pdata, + &error); + g_object_unref(launcher); + + if (!daemon) { + error_setg(errp, "Error creating daemon: %s", error->message); + return -1; + } + + if (!g_subprocess_wait(daemon, NULL, &error)) { + error_setg(errp, "Error waiting for daemon: %s", error->message); + return -1; + } + + if (g_subprocess_get_if_exited(daemon) && + g_subprocess_get_exit_status(daemon)) { + return -1; + } + + if (!g_file_get_contents(s->pidfile, &contents, NULL, &error)) { + error_setg(errp, "Cannot read passt pid: %s", error->message); + return -1; + } + + s->pid = (pid_t)g_ascii_strtoll(contents, NULL, 10); + if (s->pid <= 0) { + error_setg(errp, "File '%s' did not contain a valid PID.", s->pidfile); + return -1; + } + + return 0; +} + +static int net_passt_stream_start(NetPasstState *s, Error **errp) +{ + QIOChannelSocket *sioc; + SocketAddress *addr; + int sv[2]; + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + error_setg_errno(errp, errno, "socketpair() failed"); + return -1; + } + + /* connect to passt */ + qemu_set_info_str(&s->data.nc, "connecting to passt"); + + /* create socket channel */ + sioc = qio_channel_socket_new(); + s->data.ioc = QIO_CHANNEL(sioc); + s->data.nc.link_down = true; + s->data.send = net_passt_send; + + addr = g_new0(SocketAddress, 1); + addr->type = SOCKET_ADDRESS_TYPE_FD; + addr->u.fd.str = g_strdup_printf("%d", sv[0]); + + qio_channel_socket_connect_async(sioc, addr, + net_passt_client_connected, s, + NULL, NULL); + + qapi_free_SocketAddress(addr); + + /* start passt */ + if (net_passt_start_daemon(s, sv[1], errp) == -1) { + close(sv[0]); + close(sv[1]); + return -1; + } + close(sv[1]); + + return 0; +} + +#ifdef CONFIG_VHOST_USER +static gboolean passt_vhost_user_watch(void *do_not_use, GIOCondition cond, + void *opaque) +{ + NetPasstState *s = opaque; + + qemu_chr_fe_disconnect(&s->vhost_chr); + + return G_SOURCE_CONTINUE; +} + +static void passt_vhost_user_event(void *opaque, QEMUChrEvent event); + +static void chr_closed_bh(void *opaque) +{ + NetPasstState *s = opaque; + + passt_save_acked_features(&s->data.nc); + + net_client_set_link(&(NetClientState *){ &s->data.nc }, 1, false); + + qemu_chr_fe_set_handlers(&s->vhost_chr, NULL, NULL, passt_vhost_user_event, + NULL, s, NULL, true); +} + +static void passt_vhost_user_stop(NetPasstState *s) +{ + passt_save_acked_features(&s->data.nc); + vhost_net_cleanup(s->vhost_net); +} + +static int passt_vhost_user_start(NetPasstState *s, VhostUserState *be) +{ + struct vhost_net *net = NULL; + VhostNetOptions options; + + options.backend_type = VHOST_BACKEND_TYPE_USER; + options.net_backend = &s->data.nc; + options.opaque = be; + options.busyloop_timeout = 0; + options.nvqs = 2; + options.feature_bits = user_feature_bits; + options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE; + options.get_acked_features = passt_get_acked_features; + options.save_acked_features = passt_save_acked_features; + options.is_vhost_user = true; + + net = vhost_net_init(&options); + if (!net) { + error_report("failed to init passt vhost_net"); + passt_vhost_user_stop(s); + return -1; + } + + if (s->vhost_net) { + vhost_net_cleanup(s->vhost_net); + g_free(s->vhost_net); + } + s->vhost_net = net; + + return 0; +} + +static void passt_vhost_user_event(void *opaque, QEMUChrEvent event) +{ + NetPasstState *s = opaque; + + switch (event) { + case CHR_EVENT_OPENED: + if (passt_vhost_user_start(s, s->vhost_user) < 0) { + qemu_chr_fe_disconnect(&s->vhost_chr); + return; + } + s->vhost_watch = qemu_chr_fe_add_watch(&s->vhost_chr, G_IO_HUP, + passt_vhost_user_watch, s); + net_client_set_link(&(NetClientState *){ &s->data.nc }, 1, true); + s->started = true; + break; + case CHR_EVENT_CLOSED: + if (s->vhost_watch) { + AioContext *ctx = qemu_get_current_aio_context(); + + g_source_remove(s->vhost_watch); + s->vhost_watch = 0; + qemu_chr_fe_set_handlers(&s->vhost_chr, NULL, NULL, NULL, NULL, + NULL, NULL, false); + + aio_bh_schedule_oneshot(ctx, chr_closed_bh, s); + } + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static int net_passt_vhost_user_init(NetPasstState *s, Error **errp) +{ + Chardev *chr; + int sv[2]; + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + error_setg_errno(errp, errno, "socketpair() failed"); + return -1; + } + + /* connect to passt */ + qemu_set_info_str(&s->data.nc, "connecting to passt"); + + /* create chardev */ + + chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET)); + if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) { + object_unref(OBJECT(chr)); + error_setg(errp, "Failed to make socket chardev"); + goto err; + } + + s->vhost_user = g_new0(struct VhostUserState, 1); + if (!qemu_chr_fe_init(&s->vhost_chr, chr, errp) || + !vhost_user_init(s->vhost_user, &s->vhost_chr, errp)) { + goto err; + } + + /* start passt */ + if (net_passt_start_daemon(s, sv[1], errp) == -1) { + goto err; + } + + do { + if (qemu_chr_fe_wait_connected(&s->vhost_chr, errp) < 0) { + goto err; + } + + qemu_chr_fe_set_handlers(&s->vhost_chr, NULL, NULL, + passt_vhost_user_event, NULL, s, NULL, + true); + } while (!s->started); + + qemu_set_info_str(&s->data.nc, "vhost-user,connected to pid %d", s->pid); + + close(sv[1]); + return 0; +err: + close(sv[0]); + close(sv[1]); + + return -1; +} +#else +static int net_passt_vhost_user_init(NetPasstState *s, Error **errp) +{ + error_setg(errp, "vhost-user support has not been built"); + + return -1; +} +#endif + +static GPtrArray *net_passt_decode_args(const NetDevPasstOptions *passt, + gchar *pidfile, Error **errp) +{ + GPtrArray *args = g_ptr_array_new_with_free_func(g_free); + + if (passt->path) { + g_ptr_array_add(args, g_strdup(passt->path)); + } else { + g_ptr_array_add(args, g_strdup("passt")); + } + + if (passt->has_vhost_user && passt->vhost_user) { + g_ptr_array_add(args, g_strdup("--vhost-user")); + } + + /* by default, be quiet */ + if (!passt->has_quiet || passt->quiet) { + g_ptr_array_add(args, g_strdup("--quiet")); + } + + if (passt->has_mtu) { + g_ptr_array_add(args, g_strdup("--mtu")); + g_ptr_array_add(args, g_strdup_printf("%"PRId64, passt->mtu)); + } + + if (passt->address) { + g_ptr_array_add(args, g_strdup("--address")); + g_ptr_array_add(args, g_strdup(passt->address)); + } + + if (passt->netmask) { + g_ptr_array_add(args, g_strdup("--netmask")); + g_ptr_array_add(args, g_strdup(passt->netmask)); + } + + if (passt->mac) { + g_ptr_array_add(args, g_strdup("--mac-addr")); + g_ptr_array_add(args, g_strdup(passt->mac)); + } + + if (passt->gateway) { + g_ptr_array_add(args, g_strdup("--gateway")); + g_ptr_array_add(args, g_strdup(passt->gateway)); + } + + if (passt->interface) { + g_ptr_array_add(args, g_strdup("--interface")); + g_ptr_array_add(args, g_strdup(passt->interface)); + } + + if (passt->outbound) { + g_ptr_array_add(args, g_strdup("--outbound")); + g_ptr_array_add(args, g_strdup(passt->outbound)); + } + + if (passt->outbound_if4) { + g_ptr_array_add(args, g_strdup("--outbound-if4")); + g_ptr_array_add(args, g_strdup(passt->outbound_if4)); + } + + if (passt->outbound_if6) { + g_ptr_array_add(args, g_strdup("--outbound-if6")); + g_ptr_array_add(args, g_strdup(passt->outbound_if6)); + } + + if (passt->dns) { + g_ptr_array_add(args, g_strdup("--dns")); + g_ptr_array_add(args, g_strdup(passt->dns)); + } + if (passt->fqdn) { + g_ptr_array_add(args, g_strdup("--fqdn")); + g_ptr_array_add(args, g_strdup(passt->fqdn)); + } + + if (passt->has_dhcp_dns && !passt->dhcp_dns) { + g_ptr_array_add(args, g_strdup("--no-dhcp-dns")); + } + + if (passt->has_dhcp_search && !passt->dhcp_search) { + g_ptr_array_add(args, g_strdup("--no-dhcp-search")); + } + + if (passt->map_host_loopback) { + g_ptr_array_add(args, g_strdup("--map-host-loopback")); + g_ptr_array_add(args, g_strdup(passt->map_host_loopback)); + } + + if (passt->map_guest_addr) { + g_ptr_array_add(args, g_strdup("--map-guest-addr")); + g_ptr_array_add(args, g_strdup(passt->map_guest_addr)); + } + + if (passt->dns_forward) { + g_ptr_array_add(args, g_strdup("--dns-forward")); + g_ptr_array_add(args, g_strdup(passt->dns_forward)); + } + + if (passt->dns_host) { + g_ptr_array_add(args, g_strdup("--dns-host")); + g_ptr_array_add(args, g_strdup(passt->dns_host)); + } + + if (passt->has_tcp && !passt->tcp) { + g_ptr_array_add(args, g_strdup("--no-tcp")); + } + + if (passt->has_udp && !passt->udp) { + g_ptr_array_add(args, g_strdup("--no-udp")); + } + + if (passt->has_icmp && !passt->icmp) { + g_ptr_array_add(args, g_strdup("--no-icmp")); + } + + if (passt->has_dhcp && !passt->dhcp) { + g_ptr_array_add(args, g_strdup("--no-dhcp")); + } + + if (passt->has_ndp && !passt->ndp) { + g_ptr_array_add(args, g_strdup("--no-ndp")); + } + if (passt->has_dhcpv6 && !passt->dhcpv6) { + g_ptr_array_add(args, g_strdup("--no-dhcpv6")); + } + + if (passt->has_ra && !passt->ra) { + g_ptr_array_add(args, g_strdup("--no-ra")); + } + + if (passt->has_freebind && passt->freebind) { + g_ptr_array_add(args, g_strdup("--freebind")); + } + + if (passt->has_ipv4 && !passt->ipv4) { + g_ptr_array_add(args, g_strdup("--ipv6-only")); + } + + if (passt->has_ipv6 && !passt->ipv6) { + g_ptr_array_add(args, g_strdup("--ipv4-only")); + } + + if (passt->has_search && passt->search) { + const StringList *list = passt->search; + GString *domains = g_string_new(list->value->str); + + list = list->next; + while (list) { + g_string_append(domains, " "); + g_string_append(domains, list->value->str); + list = list->next; + } + + g_ptr_array_add(args, g_strdup("--search")); + g_ptr_array_add(args, g_string_free(domains, FALSE)); + } + + if (passt->has_tcp_ports && passt->tcp_ports) { + const StringList *list = passt->tcp_ports; + GString *tcp_ports = g_string_new(list->value->str); + + list = list->next; + while (list) { + g_string_append(tcp_ports, ","); + g_string_append(tcp_ports, list->value->str); + list = list->next; + } + + g_ptr_array_add(args, g_strdup("--tcp-ports")); + g_ptr_array_add(args, g_string_free(tcp_ports, FALSE)); + } + + if (passt->has_udp_ports && passt->udp_ports) { + const StringList *list = passt->udp_ports; + GString *udp_ports = g_string_new(list->value->str); + + list = list->next; + while (list) { + g_string_append(udp_ports, ","); + g_string_append(udp_ports, list->value->str); + list = list->next; + } + + g_ptr_array_add(args, g_strdup("--udp-ports")); + g_ptr_array_add(args, g_string_free(udp_ports, FALSE)); + } + + if (passt->has_param && passt->param) { + const StringList *list = passt->param; + + while (list) { + g_ptr_array_add(args, g_strdup(list->value->str)); + list = list->next; + } + } + + /* provide a pid file to be able to kil passt on exit */ + g_ptr_array_add(args, g_strdup("--pid")); + g_ptr_array_add(args, g_strdup(pidfile)); + + /* g_subprocess_launcher_take_fd() will set the socket on fd 3 */ + g_ptr_array_add(args, g_strdup("--fd")); + g_ptr_array_add(args, g_strdup("3")); + + g_ptr_array_add(args, NULL); + + return args; +} + +int net_init_passt(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp) +{ + g_autoptr(GError) error = NULL; + NetClientState *nc; + NetPasstState *s; + GPtrArray *args; + gchar *pidfile; + int pidfd; + + assert(netdev->type == NET_CLIENT_DRIVER_PASST); + + pidfd = g_file_open_tmp("passt-XXXXXX.pid", &pidfile, &error); + if (pidfd == -1) { + error_setg(errp, "Failed to create temporary file: %s", error->message); + return -1; + } + close(pidfd); + + args = net_passt_decode_args(&netdev->u.passt, pidfile, errp); + if (args == NULL) { + g_free(pidfile); + return -1; + } + + nc = qemu_new_net_client(&net_passt_info, peer, "passt", name); + s = DO_UPCAST(NetPasstState, data.nc, nc); + + s->args = args; + s->pidfile = pidfile; + + if (netdev->u.passt.has_vhost_user && netdev->u.passt.vhost_user) { + if (net_passt_vhost_user_init(s, errp) == -1) { + qemu_del_net_client(nc); + return -1; + } + + return 0; + } + + if (net_passt_stream_start(s, errp) == -1) { + qemu_del_net_client(nc); + return -1; + } + + return 0; +} diff --git a/net/queue.c b/net/queue.c index c872d51df8b..fb33856c188 100644 --- a/net/queue.c +++ b/net/queue.c @@ -193,17 +193,6 @@ ssize_t qemu_net_queue_receive(NetQueue *queue, return qemu_net_queue_deliver(queue, NULL, 0, data, size); } -ssize_t qemu_net_queue_receive_iov(NetQueue *queue, - const struct iovec *iov, - int iovcnt) -{ - if (queue->delivering) { - return 0; - } - - return qemu_net_queue_deliver_iov(queue, NULL, 0, iov, iovcnt); -} - ssize_t qemu_net_queue_send(NetQueue *queue, NetClientState *sender, unsigned flags, diff --git a/net/slirp.c b/net/slirp.c index eb9a456ed49..9657e86a841 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -40,10 +40,10 @@ #include "qemu/sockets.h" #include #include "chardev/char-fe.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/cutils.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "util.h" #include "migration/register.h" #include "migration/vmstate.h" @@ -247,7 +247,14 @@ static void net_slirp_timer_mod(void *timer, int64_t expire_timer, timer_mod(&t->timer, expire_timer); } -static void net_slirp_register_poll_fd(int fd, void *opaque) +#if !SLIRP_CHECK_VERSION(4, 9, 0) +# define slirp_os_socket int +# define slirp_pollfds_fill_socket slirp_pollfds_fill +# define register_poll_socket register_poll_fd +# define unregister_poll_socket unregister_poll_fd +#endif + +static void net_slirp_register_poll_sock(slirp_os_socket fd, void *opaque) { #ifdef WIN32 AioContext *ctxt = qemu_get_aio_context(); @@ -260,7 +267,7 @@ static void net_slirp_register_poll_fd(int fd, void *opaque) #endif } -static void net_slirp_unregister_poll_fd(int fd, void *opaque) +static void net_slirp_unregister_poll_sock(slirp_os_socket fd, void *opaque) { #ifdef WIN32 if (WSAEventSelect(fd, NULL, 0) != 0) { @@ -286,8 +293,8 @@ static const SlirpCb slirp_cb = { #endif .timer_free = net_slirp_timer_free, .timer_mod = net_slirp_timer_mod, - .register_poll_fd = net_slirp_register_poll_fd, - .unregister_poll_fd = net_slirp_unregister_poll_fd, + .register_poll_socket = net_slirp_register_poll_sock, + .unregister_poll_socket = net_slirp_unregister_poll_sock, .notify = net_slirp_notify, }; @@ -314,7 +321,7 @@ static int slirp_poll_to_gio(int events) return ret; } -static int net_slirp_add_poll(int fd, int events, void *opaque) +static int net_slirp_add_poll(slirp_os_socket fd, int events, void *opaque) { GArray *pollfds = opaque; GPollFD pfd = { @@ -363,8 +370,8 @@ static void net_slirp_poll_notify(Notifier *notifier, void *data) switch (poll->state) { case MAIN_LOOP_POLL_FILL: - slirp_pollfds_fill(s->slirp, &poll->timeout, - net_slirp_add_poll, poll->pollfds); + slirp_pollfds_fill_socket(s->slirp, &poll->timeout, + net_slirp_add_poll, poll->pollfds); break; case MAIN_LOOP_POLL_OK: case MAIN_LOOP_POLL_ERR: @@ -629,7 +636,9 @@ static int net_slirp_init(NetClientState *peer, const char *model, s = DO_UPCAST(SlirpState, nc, nc); - cfg.version = SLIRP_CHECK_VERSION(4,7,0) ? 4 : 1; + cfg.version = + SLIRP_CHECK_VERSION(4, 9, 0) ? 6 : + SLIRP_CHECK_VERSION(4, 7, 0) ? 4 : 1; cfg.restricted = restricted; cfg.in_enabled = ipv4; cfg.vnetwork = net; diff --git a/net/socket.c b/net/socket.c index 8e3702e1f3a..784dda686f5 100644 --- a/net/socket.c +++ b/net/socket.c @@ -157,7 +157,7 @@ static void net_socket_send(void *opaque) NetSocketState *s = opaque; int size; int ret; - uint8_t buf1[NET_BUFSIZE]; + QEMU_UNINITIALIZED uint8_t buf1[NET_BUFSIZE]; const uint8_t *buf; size = recv(s->fd, buf1, sizeof(buf1), 0); diff --git a/net/stream.c b/net/stream.c index 97e6ec6679e..d893f02cabe 100644 --- a/net/stream.c +++ b/net/stream.c @@ -27,173 +27,50 @@ #include "net/net.h" #include "clients.h" -#include "monitor/monitor.h" #include "qapi/error.h" -#include "qemu/error-report.h" -#include "qemu/option.h" -#include "qemu/sockets.h" -#include "qemu/iov.h" -#include "qemu/main-loop.h" -#include "qemu/cutils.h" -#include "io/channel.h" -#include "io/channel-socket.h" #include "io/net-listener.h" #include "qapi/qapi-events-net.h" #include "qapi/qapi-visit-sockets.h" #include "qapi/clone-visitor.h" +#include "stream_data.h" + typedef struct NetStreamState { - NetClientState nc; - QIOChannel *listen_ioc; - QIONetListener *listener; - QIOChannel *ioc; - guint ioc_read_tag; - guint ioc_write_tag; - SocketReadState rs; - unsigned int send_index; /* number of bytes sent*/ - uint32_t reconnect; + NetStreamData data; + uint32_t reconnect_ms; guint timer_tag; SocketAddress *addr; } NetStreamState; -static void net_stream_listen(QIONetListener *listener, - QIOChannelSocket *cioc, - void *opaque); static void net_stream_arm_reconnect(NetStreamState *s); -static gboolean net_stream_writable(QIOChannel *ioc, - GIOCondition condition, - gpointer data) -{ - NetStreamState *s = data; - - s->ioc_write_tag = 0; - - qemu_flush_queued_packets(&s->nc); - - return G_SOURCE_REMOVE; -} - static ssize_t net_stream_receive(NetClientState *nc, const uint8_t *buf, size_t size) { - NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); - uint32_t len = htonl(size); - struct iovec iov[] = { - { - .iov_base = &len, - .iov_len = sizeof(len), - }, { - .iov_base = (void *)buf, - .iov_len = size, - }, - }; - struct iovec local_iov[2]; - unsigned int nlocal_iov; - size_t remaining; - ssize_t ret; - - remaining = iov_size(iov, 2) - s->send_index; - nlocal_iov = iov_copy(local_iov, 2, iov, 2, s->send_index, remaining); - ret = qio_channel_writev(s->ioc, local_iov, nlocal_iov, NULL); - if (ret == QIO_CHANNEL_ERR_BLOCK) { - ret = 0; /* handled further down */ - } - if (ret == -1) { - s->send_index = 0; - return -errno; - } - if (ret < (ssize_t)remaining) { - s->send_index += ret; - s->ioc_write_tag = qio_channel_add_watch(s->ioc, G_IO_OUT, - net_stream_writable, s, NULL); - return 0; - } - s->send_index = 0; - return size; -} + NetStreamData *d = DO_UPCAST(NetStreamData, nc, nc); -static gboolean net_stream_send(QIOChannel *ioc, - GIOCondition condition, - gpointer data); - -static void net_stream_send_completed(NetClientState *nc, ssize_t len) -{ - NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); - - if (!s->ioc_read_tag) { - s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, - net_stream_send, s, NULL); - } -} - -static void net_stream_rs_finalize(SocketReadState *rs) -{ - NetStreamState *s = container_of(rs, NetStreamState, rs); - - if (qemu_send_packet_async(&s->nc, rs->buf, - rs->packet_len, - net_stream_send_completed) == 0) { - if (s->ioc_read_tag) { - g_source_remove(s->ioc_read_tag); - s->ioc_read_tag = 0; - } - } + return net_stream_data_receive(d, buf, size); } static gboolean net_stream_send(QIOChannel *ioc, GIOCondition condition, gpointer data) { - NetStreamState *s = data; - int size; - int ret; - char buf1[NET_BUFSIZE]; - const char *buf; - - size = qio_channel_read(s->ioc, buf1, sizeof(buf1), NULL); - if (size < 0) { - if (errno != EWOULDBLOCK) { - goto eoc; - } - } else if (size == 0) { - /* end of connection */ - eoc: - s->ioc_read_tag = 0; - if (s->ioc_write_tag) { - g_source_remove(s->ioc_write_tag); - s->ioc_write_tag = 0; - } - if (s->listener) { - qemu_set_info_str(&s->nc, "listening"); - qio_net_listener_set_client_func(s->listener, net_stream_listen, - s, NULL); - } - object_unref(OBJECT(s->ioc)); - s->ioc = NULL; + if (net_stream_data_send(ioc, condition, data) == G_SOURCE_REMOVE) { + NetStreamState *s = DO_UPCAST(NetStreamState, data, data); - net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); - s->nc.link_down = true; - - qapi_event_send_netdev_stream_disconnected(s->nc.name); + qapi_event_send_netdev_stream_disconnected(s->data.nc.name); net_stream_arm_reconnect(s); return G_SOURCE_REMOVE; } - buf = buf1; - - ret = net_fill_rstate(&s->rs, (const uint8_t *)buf, size); - - if (ret == -1) { - goto eoc; - } return G_SOURCE_CONTINUE; } static void net_stream_cleanup(NetClientState *nc) { - NetStreamState *s = DO_UPCAST(NetStreamState, nc, nc); + NetStreamState *s = DO_UPCAST(NetStreamState, data.nc, nc); if (s->timer_tag) { g_source_remove(s->timer_tag); s->timer_tag = 0; @@ -202,28 +79,28 @@ static void net_stream_cleanup(NetClientState *nc) qapi_free_SocketAddress(s->addr); s->addr = NULL; } - if (s->ioc) { - if (QIO_CHANNEL_SOCKET(s->ioc)->fd != -1) { - if (s->ioc_read_tag) { - g_source_remove(s->ioc_read_tag); - s->ioc_read_tag = 0; + if (s->data.ioc) { + if (QIO_CHANNEL_SOCKET(s->data.ioc)->fd != -1) { + if (s->data.ioc_read_tag) { + g_source_remove(s->data.ioc_read_tag); + s->data.ioc_read_tag = 0; } - if (s->ioc_write_tag) { - g_source_remove(s->ioc_write_tag); - s->ioc_write_tag = 0; + if (s->data.ioc_write_tag) { + g_source_remove(s->data.ioc_write_tag); + s->data.ioc_write_tag = 0; } } - object_unref(OBJECT(s->ioc)); - s->ioc = NULL; + object_unref(OBJECT(s->data.ioc)); + s->data.ioc = NULL; } - if (s->listen_ioc) { - if (s->listener) { - qio_net_listener_disconnect(s->listener); - object_unref(OBJECT(s->listener)); - s->listener = NULL; + if (s->data.listen_ioc) { + if (s->data.listener) { + qio_net_listener_disconnect(s->data.listener); + object_unref(OBJECT(s->data.listener)); + s->data.listener = NULL; } - object_unref(OBJECT(s->listen_ioc)); - s->listen_ioc = NULL; + object_unref(OBJECT(s->data.listen_ioc)); + s->data.listen_ioc = NULL; } } @@ -235,23 +112,13 @@ static NetClientInfo net_stream_info = { }; static void net_stream_listen(QIONetListener *listener, - QIOChannelSocket *cioc, - void *opaque) + QIOChannelSocket *cioc, gpointer data) { - NetStreamState *s = opaque; + NetStreamData *d = data; SocketAddress *addr; char *uri; - object_ref(OBJECT(cioc)); - - qio_net_listener_set_client_func(s->listener, NULL, s, NULL); - - s->ioc = QIO_CHANNEL(cioc); - qio_channel_set_name(s->ioc, "stream-server"); - s->nc.link_down = false; - - s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, net_stream_send, - s, NULL); + net_stream_data_listen(listener, cioc, data); if (cioc->localAddr.ss_family == AF_UNIX) { addr = qio_channel_socket_get_local_address(cioc, NULL); @@ -260,22 +127,22 @@ static void net_stream_listen(QIONetListener *listener, } g_assert(addr != NULL); uri = socket_uri(addr); - qemu_set_info_str(&s->nc, "%s", uri); + qemu_set_info_str(&d->nc, "%s", uri); g_free(uri); - qapi_event_send_netdev_stream_connected(s->nc.name, addr); + qapi_event_send_netdev_stream_connected(d->nc.name, addr); qapi_free_SocketAddress(addr); } static void net_stream_server_listening(QIOTask *task, gpointer opaque) { - NetStreamState *s = opaque; - QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(s->listen_ioc); + NetStreamData *d = opaque; + QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(d->listen_ioc); SocketAddress *addr; int ret; Error *err = NULL; if (qio_task_propagate_error(task, &err)) { - qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); + qemu_set_info_str(&d->nc, "error: %s", error_get_pretty(err)); error_free(err); return; } @@ -284,20 +151,21 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) g_assert(addr != NULL); ret = qemu_socket_try_set_nonblock(listen_sioc->fd); if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { - qemu_set_info_str(&s->nc, "can't use file descriptor %s (errno %d)", + qemu_set_info_str(&d->nc, "can't use file descriptor %s (errno %d)", addr->u.fd.str, -ret); return; } g_assert(ret == 0); qapi_free_SocketAddress(addr); - s->nc.link_down = true; - s->listener = qio_net_listener_new(); + d->nc.link_down = true; + d->listener = qio_net_listener_new(); - qemu_set_info_str(&s->nc, "listening"); - net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); - qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); - qio_net_listener_add(s->listener, listen_sioc); + qemu_set_info_str(&d->nc, "listening"); + net_socket_rs_init(&d->rs, net_stream_data_rs_finalize, false); + qio_net_listener_set_client_func(d->listener, d->listen, d, + NULL); + qio_net_listener_add(d->listener, listen_sioc); } static int net_stream_server_init(NetClientState *peer, @@ -307,16 +175,18 @@ static int net_stream_server_init(NetClientState *peer, Error **errp) { NetClientState *nc; - NetStreamState *s; + NetStreamData *d; QIOChannelSocket *listen_sioc = qio_channel_socket_new(); nc = qemu_new_net_client(&net_stream_info, peer, model, name); - s = DO_UPCAST(NetStreamState, nc, nc); - qemu_set_info_str(&s->nc, "initializing"); + d = DO_UPCAST(NetStreamData, nc, nc); + d->send = net_stream_send; + d->listen = net_stream_listen; + qemu_set_info_str(&d->nc, "initializing"); - s->listen_ioc = QIO_CHANNEL(listen_sioc); + d->listen_ioc = QIO_CHANNEL(listen_sioc); qio_channel_socket_listen_async(listen_sioc, addr, 0, - net_stream_server_listening, s, + net_stream_server_listening, d, NULL, NULL); return 0; @@ -325,49 +195,23 @@ static int net_stream_server_init(NetClientState *peer, static void net_stream_client_connected(QIOTask *task, gpointer opaque) { NetStreamState *s = opaque; - QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(s->ioc); + NetStreamData *d = &s->data; + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(d->ioc); SocketAddress *addr; gchar *uri; - int ret; - Error *err = NULL; - if (qio_task_propagate_error(task, &err)) { - qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); - error_free(err); - goto error; + if (net_stream_data_client_connected(task, d) == -1) { + net_stream_arm_reconnect(s); + return; } addr = qio_channel_socket_get_remote_address(sioc, NULL); g_assert(addr != NULL); uri = socket_uri(addr); - qemu_set_info_str(&s->nc, "%s", uri); + qemu_set_info_str(&d->nc, "%s", uri); g_free(uri); - - ret = qemu_socket_try_set_nonblock(sioc->fd); - if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { - qemu_set_info_str(&s->nc, "can't use file descriptor %s (errno %d)", - addr->u.fd.str, -ret); - qapi_free_SocketAddress(addr); - goto error; - } - g_assert(ret == 0); - - net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); - - /* Disable Nagle algorithm on TCP sockets to reduce latency */ - qio_channel_set_delay(s->ioc, false); - - s->ioc_read_tag = qio_channel_add_watch(s->ioc, G_IO_IN, net_stream_send, - s, NULL); - s->nc.link_down = false; - qapi_event_send_netdev_stream_connected(s->nc.name, addr); + qapi_event_send_netdev_stream_connected(d->nc.name, addr); qapi_free_SocketAddress(addr); - - return; -error: - object_unref(OBJECT(s->ioc)); - s->ioc = NULL; - net_stream_arm_reconnect(s); } static gboolean net_stream_reconnect(gpointer data) @@ -378,7 +222,7 @@ static gboolean net_stream_reconnect(gpointer data) s->timer_tag = 0; sioc = qio_channel_socket_new(); - s->ioc = QIO_CHANNEL(sioc); + s->data.ioc = QIO_CHANNEL(sioc); qio_channel_socket_connect_async(sioc, s->addr, net_stream_client_connected, s, NULL, NULL); @@ -387,10 +231,9 @@ static gboolean net_stream_reconnect(gpointer data) static void net_stream_arm_reconnect(NetStreamState *s) { - if (s->reconnect && s->timer_tag == 0) { - qemu_set_info_str(&s->nc, "connecting"); - s->timer_tag = g_timeout_add_seconds(s->reconnect, - net_stream_reconnect, s); + if (s->reconnect_ms && s->timer_tag == 0) { + qemu_set_info_str(&s->data.nc, "connecting"); + s->timer_tag = g_timeout_add(s->reconnect_ms, net_stream_reconnect, s); } } @@ -398,7 +241,7 @@ static int net_stream_client_init(NetClientState *peer, const char *model, const char *name, SocketAddress *addr, - uint32_t reconnect, + uint32_t reconnect_ms, Error **errp) { NetStreamState *s; @@ -406,14 +249,16 @@ static int net_stream_client_init(NetClientState *peer, QIOChannelSocket *sioc = qio_channel_socket_new(); nc = qemu_new_net_client(&net_stream_info, peer, model, name); - s = DO_UPCAST(NetStreamState, nc, nc); - qemu_set_info_str(&s->nc, "connecting"); + s = DO_UPCAST(NetStreamState, data.nc, nc); + qemu_set_info_str(&s->data.nc, "connecting"); - s->ioc = QIO_CHANNEL(sioc); - s->nc.link_down = true; + s->data.ioc = QIO_CHANNEL(sioc); + s->data.nc.link_down = true; + s->data.send = net_stream_send; + s->data.listen = net_stream_listen; - s->reconnect = reconnect; - if (reconnect) { + s->reconnect_ms = reconnect_ms; + if (reconnect_ms) { s->addr = QAPI_CLONE(SocketAddress, addr); } qio_channel_socket_connect_async(sioc, addr, @@ -432,13 +277,24 @@ int net_init_stream(const Netdev *netdev, const char *name, sock = &netdev->u.stream; if (!sock->has_server || !sock->server) { + uint32_t reconnect_ms = 0; + + if (sock->has_reconnect && sock->has_reconnect_ms) { + error_setg(errp, "'reconnect' and 'reconnect-ms' are mutually " + "exclusive"); + return -1; + } else if (sock->has_reconnect_ms) { + reconnect_ms = sock->reconnect_ms; + } else if (sock->has_reconnect) { + reconnect_ms = sock->reconnect * 1000u; + } + return net_stream_client_init(peer, "stream", name, sock->addr, - sock->has_reconnect ? sock->reconnect : 0, - errp); + reconnect_ms, errp); } - if (sock->has_reconnect) { - error_setg(errp, "'reconnect' option is incompatible with " - "socket in server mode"); + if (sock->has_reconnect || sock->has_reconnect_ms) { + error_setg(errp, "'reconnect' and 'reconnect-ms' options are " + "incompatible with socket in server mode"); return -1; } return net_stream_server_init(peer, "stream", name, sock->addr, errp); diff --git a/net/stream_data.c b/net/stream_data.c new file mode 100644 index 00000000000..5af27e0d1d6 --- /dev/null +++ b/net/stream_data.c @@ -0,0 +1,193 @@ +/* + * net stream generic functions + * + * Copyright Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qapi/error.h" +#include "net/net.h" +#include "io/channel.h" +#include "io/net-listener.h" + +#include "stream_data.h" + +static gboolean net_stream_data_writable(QIOChannel *ioc, + GIOCondition condition, gpointer data) +{ + NetStreamData *d = data; + + d->ioc_write_tag = 0; + + qemu_flush_queued_packets(&d->nc); + + return G_SOURCE_REMOVE; +} + +ssize_t net_stream_data_receive(NetStreamData *d, const uint8_t *buf, + size_t size) +{ + uint32_t len = htonl(size); + struct iovec iov[] = { + { + .iov_base = &len, + .iov_len = sizeof(len), + }, { + .iov_base = (void *)buf, + .iov_len = size, + }, + }; + struct iovec local_iov[2]; + unsigned int nlocal_iov; + size_t remaining; + ssize_t ret; + + remaining = iov_size(iov, 2) - d->send_index; + nlocal_iov = iov_copy(local_iov, 2, iov, 2, d->send_index, remaining); + ret = qio_channel_writev(d->ioc, local_iov, nlocal_iov, NULL); + if (ret == QIO_CHANNEL_ERR_BLOCK) { + ret = 0; /* handled further down */ + } + if (ret == -1) { + d->send_index = 0; + return -errno; + } + if (ret < (ssize_t)remaining) { + d->send_index += ret; + d->ioc_write_tag = qio_channel_add_watch(d->ioc, G_IO_OUT, + net_stream_data_writable, d, + NULL); + return 0; + } + d->send_index = 0; + return size; +} + +static void net_stream_data_send_completed(NetClientState *nc, ssize_t len) +{ + NetStreamData *d = DO_UPCAST(NetStreamData, nc, nc); + + if (!d->ioc_read_tag) { + d->ioc_read_tag = qio_channel_add_watch(d->ioc, G_IO_IN, d->send, d, + NULL); + } +} + +void net_stream_data_rs_finalize(SocketReadState *rs) +{ + NetStreamData *d = container_of(rs, NetStreamData, rs); + + if (qemu_send_packet_async(&d->nc, rs->buf, + rs->packet_len, + net_stream_data_send_completed) == 0) { + if (d->ioc_read_tag) { + g_source_remove(d->ioc_read_tag); + d->ioc_read_tag = 0; + } + } +} + +gboolean net_stream_data_send(QIOChannel *ioc, GIOCondition condition, + NetStreamData *d) +{ + int size; + int ret; + QEMU_UNINITIALIZED char buf1[NET_BUFSIZE]; + const char *buf; + + size = qio_channel_read(d->ioc, buf1, sizeof(buf1), NULL); + if (size < 0) { + if (errno != EWOULDBLOCK) { + goto eoc; + } + } else if (size == 0) { + /* end of connection */ + eoc: + d->ioc_read_tag = 0; + if (d->ioc_write_tag) { + g_source_remove(d->ioc_write_tag); + d->ioc_write_tag = 0; + } + if (d->listener) { + qemu_set_info_str(&d->nc, "listening"); + qio_net_listener_set_client_func(d->listener, + d->listen, d, NULL); + } + object_unref(OBJECT(d->ioc)); + d->ioc = NULL; + + net_socket_rs_init(&d->rs, net_stream_data_rs_finalize, false); + d->nc.link_down = true; + + return G_SOURCE_REMOVE; + } + buf = buf1; + + ret = net_fill_rstate(&d->rs, (const uint8_t *)buf, size); + + if (ret == -1) { + goto eoc; + } + + return G_SOURCE_CONTINUE; +} + +void net_stream_data_listen(QIONetListener *listener, + QIOChannelSocket *cioc, + NetStreamData *d) +{ + object_ref(OBJECT(cioc)); + + qio_net_listener_set_client_func(d->listener, NULL, d, NULL); + + d->ioc = QIO_CHANNEL(cioc); + qio_channel_set_name(d->ioc, "stream-server"); + d->nc.link_down = false; + + d->ioc_read_tag = qio_channel_add_watch(d->ioc, G_IO_IN, d->send, d, NULL); +} + +int net_stream_data_client_connected(QIOTask *task, NetStreamData *d) +{ + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(d->ioc); + SocketAddress *addr; + int ret; + Error *err = NULL; + + if (qio_task_propagate_error(task, &err)) { + qemu_set_info_str(&d->nc, "error: %s", error_get_pretty(err)); + error_free(err); + goto error; + } + + addr = qio_channel_socket_get_remote_address(sioc, NULL); + g_assert(addr != NULL); + + ret = qemu_socket_try_set_nonblock(sioc->fd); + if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { + qemu_set_info_str(&d->nc, "can't use file descriptor %s (errno %d)", + addr->u.fd.str, -ret); + qapi_free_SocketAddress(addr); + goto error; + } + g_assert(ret == 0); + qapi_free_SocketAddress(addr); + + net_socket_rs_init(&d->rs, net_stream_data_rs_finalize, false); + + /* Disable Nagle algorithm on TCP sockets to reduce latency */ + qio_channel_set_delay(d->ioc, false); + + d->ioc_read_tag = qio_channel_add_watch(d->ioc, G_IO_IN, d->send, d, NULL); + d->nc.link_down = false; + + return 0; +error: + object_unref(OBJECT(d->ioc)); + d->ioc = NULL; + + return -1; +} diff --git a/net/stream_data.h b/net/stream_data.h new file mode 100644 index 00000000000..b868625665a --- /dev/null +++ b/net/stream_data.h @@ -0,0 +1,31 @@ +/* + * net stream generic functions + * + * Copyright Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +typedef struct NetStreamData { + NetClientState nc; + QIOChannel *ioc; + guint ioc_read_tag; + guint ioc_write_tag; + SocketReadState rs; + unsigned int send_index; /* number of bytes sent*/ + QIOChannelFunc send; + /* server data */ + QIOChannel *listen_ioc; + QIONetListener *listener; + QIONetListenerClientFunc listen; +} NetStreamData; + +ssize_t net_stream_data_receive(NetStreamData *d, const uint8_t *buf, + size_t size); +void net_stream_data_rs_finalize(SocketReadState *rs); +gboolean net_stream_data_send(QIOChannel *ioc, GIOCondition condition, + NetStreamData *d); +int net_stream_data_client_connected(QIOTask *task, NetStreamData *d); +void net_stream_data_listen(QIONetListener *listener, + QIOChannelSocket *cioc, + NetStreamData *d); diff --git a/net/tap-linux.c b/net/tap-linux.c index 1226d5fda2d..22ec2f45d2b 100644 --- a/net/tap-linux.c +++ b/net/tap-linux.c @@ -45,10 +45,21 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int len = sizeof(struct virtio_net_hdr); unsigned int features; - fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR)); + + ret = if_nametoindex(ifname); + if (ret) { + g_autofree char *file = g_strdup_printf("/dev/tap%d", ret); + fd = open(file, O_RDWR); + } else { + fd = -1; + } + if (fd < 0) { - error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN); - return -1; + fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR)); + if (fd < 0) { + error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN); + return -1; + } } memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; diff --git a/net/tap-win32.c b/net/tap-win32.c index 7edbd716337..38baf90e0b3 100644 --- a/net/tap-win32.c +++ b/net/tap-win32.c @@ -214,7 +214,7 @@ static int is_tap_win32_dev(const char *guid) for (;;) { char enum_name[256]; - char unit_string[256]; + g_autofree char *unit_string = NULL; HKEY unit_key; char component_id_string[] = "ComponentId"; char component_id[256]; @@ -239,8 +239,7 @@ static int is_tap_win32_dev(const char *guid) return FALSE; } - snprintf (unit_string, sizeof(unit_string), "%s\\%s", - ADAPTER_KEY, enum_name); + unit_string = g_strdup_printf("%s\\%s", ADAPTER_KEY, enum_name); status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, @@ -315,7 +314,7 @@ static int get_device_guid( while (!stop) { char enum_name[256]; - char connection_string[256]; + g_autofree char *connection_string = NULL; HKEY connection_key; char name_data[256]; DWORD name_type; @@ -338,9 +337,7 @@ static int get_device_guid( return -1; } - snprintf(connection_string, - sizeof(connection_string), - "%s\\%s\\Connection", + connection_string = g_strdup_printf("%s\\%s\\Connection", NETWORK_CONNECTIONS_KEY, enum_name); status = RegOpenKeyEx( @@ -595,7 +592,7 @@ static void tap_win32_free_buffer(tap_win32_overlapped_t *overlapped, static int tap_win32_open(tap_win32_overlapped_t **phandle, const char *preferred_name) { - char device_path[256]; + g_autofree char *device_path = NULL; char device_guid[0x100]; int rc; HANDLE handle; @@ -617,7 +614,7 @@ static int tap_win32_open(tap_win32_overlapped_t **phandle, if (rc) return -1; - snprintf (device_path, sizeof(device_path), "%s%s%s", + device_path = g_strdup_printf("%s%s%s", USERMODEDEVICEDIR, device_guid, TAPSUFFIX); @@ -707,11 +704,6 @@ static void tap_win32_send(void *opaque) } } -struct vhost_net *tap_get_vhost_net(NetClientState *nc) -{ - return NULL; -} - static NetClientInfo net_tap_win32_info = { .type = NET_CLIENT_DRIVER_TAP, .size = sizeof(TAPState), diff --git a/net/tap.c b/net/tap.c index 51f7aec39d9..f7df702f978 100644 --- a/net/tap.c +++ b/net/tap.c @@ -36,17 +36,35 @@ #include "net/net.h" #include "clients.h" #include "monitor/monitor.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/sockets.h" +#include "hw/virtio/vhost.h" #include "net/tap.h" #include "net/vhost_net.h" +static const int kernel_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_F_VERSION_1, + VIRTIO_NET_F_MTU, + VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, + VIRTIO_F_IN_ORDER, + VIRTIO_F_NOTIFICATION_DATA, + VIRTIO_NET_F_RSC_EXT, + VIRTIO_NET_F_HASH_REPORT, + VHOST_INVALID_FEATURE_BIT +}; + typedef struct TAPState { NetClientState nc; int fd; @@ -172,6 +190,11 @@ static void tap_send(void *opaque) break; } + if (s->host_vnet_hdr_len && size <= s->host_vnet_hdr_len) { + /* Invalid packet */ + break; + } + if (s->host_vnet_hdr_len && !s->using_vnet_hdr) { buf += s->host_vnet_hdr_len; size -= s->host_vnet_hdr_len; @@ -329,6 +352,18 @@ int tap_get_fd(NetClientState *nc) return s->fd; } +/* + * tap_get_vhost_net() can return NULL if a tap net-device backend is + * created with 'vhost=off' option, 'vhostforce=off' or no vhost or + * vhostforce or vhostfd options at all. Please see net_init_tap_one(). + */ +static VHostNetState *tap_get_vhost_net(NetClientState *nc) +{ + TAPState *s = DO_UPCAST(TAPState, nc, nc); + assert(nc->info->type == NET_CLIENT_DRIVER_TAP); + return s->vhost_net; +} + /* fd support */ static NetClientInfo net_tap_info = { @@ -347,6 +382,7 @@ static NetClientInfo net_tap_info = { .set_vnet_le = tap_set_vnet_le, .set_vnet_be = tap_set_vnet_be, .set_steering_ebpf = tap_set_steering_ebpf, + .get_vhost_net = tap_get_vhost_net, }; static TAPState *net_tap_fd_init(NetClientState *peer, @@ -385,6 +421,24 @@ static TAPState *net_tap_fd_init(NetClientState *peer, return s; } +static void close_all_fds_after_fork(int excluded_fd) +{ + const int skip_fd[] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO, + excluded_fd}; + unsigned int nskip = ARRAY_SIZE(skip_fd); + + /* + * skip_fd must be an ordered array of distinct fds, exclude + * excluded_fd if already included in the [STDIN_FILENO - STDERR_FILENO] + * range + */ + if (excluded_fd <= STDERR_FILENO) { + nskip--; + } + + qemu_close_all_open_fd(skip_fd, nskip); +} + static void launch_script(const char *setup_script, const char *ifname, int fd, Error **errp) { @@ -400,13 +454,7 @@ static void launch_script(const char *setup_script, const char *ifname, return; } if (pid == 0) { - int open_max = sysconf(_SC_OPEN_MAX), i; - - for (i = 3; i < open_max; i++) { - if (i != fd) { - close(i); - } - } + close_all_fds_after_fork(fd); parg = args; *parg++ = (char *)setup_script; *parg++ = (char *)ifname; @@ -490,17 +538,11 @@ static int net_bridge_run_helper(const char *helper, const char *bridge, return -1; } if (pid == 0) { - int open_max = sysconf(_SC_OPEN_MAX), i; char *fd_buf = NULL; char *br_buf = NULL; char *helper_cmd = NULL; - for (i = 3; i < open_max; i++) { - if (i != sv[1]) { - close(i); - } - } - + close_all_fds_after_fork(sv[1]); fd_buf = g_strdup_printf("%s%d", "--fd=", sv[1]); if (strrchr(helper, ' ') || strrchr(helper, '\t')) { @@ -706,6 +748,11 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } options.opaque = (void *)(uintptr_t)vhostfd; options.nvqs = 2; + options.feature_bits = kernel_feature_bits; + options.get_acked_features = NULL; + options.save_acked_features = NULL; + options.max_tx_queue_size = 0; + options.is_vhost_user = false; s->vhost_net = vhost_net_init(&options); if (!s->vhost_net) { @@ -848,8 +895,8 @@ int net_init_tap(const Netdev *netdev, const char *name, goto free_fail; } - ret = g_unix_set_fd_nonblocking(fd, true, NULL); - if (!ret) { + if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { + ret = -1; error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", name, fd); goto free_fail; @@ -974,13 +1021,6 @@ int net_init_tap(const Netdev *netdev, const char *name, return 0; } -VHostNetState *tap_get_vhost_net(NetClientState *nc) -{ - TAPState *s = DO_UPCAST(TAPState, nc, nc); - assert(nc->info->type == NET_CLIENT_DRIVER_TAP); - return s->vhost_net; -} - int tap_enable(NetClientState *nc) { TAPState *s = DO_UPCAST(TAPState, nc, nc); diff --git a/net/vhost-user-stub.c b/net/vhost-user-stub.c index 52ab4e13f12..283dee87db2 100644 --- a/net/vhost-user-stub.c +++ b/net/vhost-user-stub.c @@ -11,7 +11,6 @@ #include "qemu/osdep.h" #include "clients.h" #include "net/vhost_net.h" -#include "net/vhost-user.h" #include "qemu/error-report.h" #include "qapi/error.h" diff --git a/net/vhost-user.c b/net/vhost-user.c index 12555518e83..8b96157145a 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -11,16 +11,58 @@ #include "qemu/osdep.h" #include "clients.h" #include "net/vhost_net.h" -#include "net/vhost-user.h" +#include "hw/virtio/vhost.h" #include "hw/virtio/vhost-user.h" +#include "standard-headers/linux/virtio_net.h" #include "chardev/char-fe.h" #include "qapi/error.h" #include "qapi/qapi-commands-net.h" +#include "qapi/qapi-events-net.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "trace.h" +static const int user_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_F_NOTIFICATION_DATA, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + + VIRTIO_F_ANY_LAYOUT, + VIRTIO_F_VERSION_1, + VIRTIO_NET_F_CSUM, + VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, + VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_HOST_TSO4, + VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, + VIRTIO_NET_F_HOST_UFO, + VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_NET_F_MTU, + VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, + VIRTIO_F_RING_RESET, + VIRTIO_F_IN_ORDER, + VIRTIO_NET_F_RSS, + VIRTIO_NET_F_RSC_EXT, + VIRTIO_NET_F_HASH_REPORT, + VIRTIO_NET_F_GUEST_USO4, + VIRTIO_NET_F_GUEST_USO6, + VIRTIO_NET_F_HOST_USO, + + /* This bit implies RARP isn't sent by QEMU out of band */ + VIRTIO_NET_F_GUEST_ANNOUNCE, + + VIRTIO_NET_F_MQ, + + VHOST_INVALID_FEATURE_BIT +}; + typedef struct NetVhostUserState { NetClientState nc; CharBackend chr; /* only queue index 0 */ @@ -31,21 +73,21 @@ typedef struct NetVhostUserState { bool started; } NetVhostUserState; -VHostNetState *vhost_user_get_vhost_net(NetClientState *nc) +static struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc) { NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER); return s->vhost_net; } -uint64_t vhost_user_get_acked_features(NetClientState *nc) +static uint64_t vhost_user_get_acked_features(NetClientState *nc) { NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER); return s->acked_features; } -void vhost_user_save_acked_features(NetClientState *nc) +static void vhost_user_save_acked_features(NetClientState *nc) { NetVhostUserState *s; @@ -95,6 +137,12 @@ static int vhost_user_start(int queues, NetClientState *ncs[], options.opaque = be; options.busyloop_timeout = 0; options.nvqs = 2; + options.feature_bits = user_feature_bits; + options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE; + options.get_acked_features = vhost_user_get_acked_features; + options.save_acked_features = vhost_user_save_acked_features; + options.is_vhost_user = true; + net = vhost_net_init(&options); if (!net) { error_report("failed to init vhost_net for queue %d", i); @@ -230,6 +278,7 @@ static NetClientInfo net_vhost_user_info = { .set_vnet_be = vhost_user_set_vnet_endianness, .set_vnet_le = vhost_user_set_vnet_endianness, .check_peer_type = vhost_user_check_peer_type, + .get_vhost_net = vhost_user_get_vhost_net, }; static gboolean net_vhost_user_watch(void *do_not_use, GIOCondition cond, @@ -249,7 +298,6 @@ static void chr_closed_bh(void *opaque) const char *name = opaque; NetClientState *ncs[MAX_QUEUE_NUM]; NetVhostUserState *s; - Error *err = NULL; int queues, i; queues = qemu_find_net_clients_except(name, ncs, @@ -263,14 +311,12 @@ static void chr_closed_bh(void *opaque) vhost_user_save_acked_features(ncs[i]); } - qmp_set_link(name, false, &err); + net_client_set_link(ncs, queues, false); qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event, NULL, opaque, NULL, true); - if (err) { - error_report_err(err); - } + qapi_event_send_netdev_vhost_user_disconnected(name); } static void net_vhost_user_event(void *opaque, QEMUChrEvent event) @@ -279,7 +325,6 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) NetClientState *ncs[MAX_QUEUE_NUM]; NetVhostUserState *s; Chardev *chr; - Error *err = NULL; int queues; queues = qemu_find_net_clients_except(name, ncs, @@ -298,8 +343,9 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) } s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP, net_vhost_user_watch, s); - qmp_set_link(name, true, &err); + net_client_set_link(ncs, queues, true); s->started = true; + qapi_event_send_netdev_vhost_user_connected(name, chr->label); break; case CHR_EVENT_CLOSED: /* a close event may happen during a read/write, but vhost @@ -324,10 +370,6 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) /* Ignore */ break; } - - if (err) { - error_report_err(err); - } } static int net_vhost_user_init(NetClientState *peer, const char *device, diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 03457ead663..74d26a94972 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -55,7 +55,7 @@ typedef struct VhostVDPAState { * with the exception of VHOST_INVALID_FEATURE_BIT, * which should always be the last entry. */ -const int vdpa_feature_bits[] = { +static const int vdpa_feature_bits[] = { VIRTIO_F_ANY_LAYOUT, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_NOTIFY_ON_EMPTY, @@ -88,6 +88,7 @@ const int vdpa_feature_bits[] = { VIRTIO_NET_F_MQ, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MTU, + VIRTIO_NET_F_RSC_EXT, VIRTIO_NET_F_RSS, VIRTIO_NET_F_STATUS, VIRTIO_RING_F_EVENT_IDX, @@ -131,7 +132,7 @@ static const uint64_t vdpa_svq_device_features = #define VHOST_VDPA_NET_CVQ_ASID 1 -VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) +static struct vhost_net *vhost_vdpa_get_vhost_net(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); @@ -200,6 +201,11 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be, options.opaque = be; options.busyloop_timeout = 0; options.nvqs = nvqs; + options.feature_bits = vdpa_feature_bits; + options.get_acked_features = NULL; + options.save_acked_features = NULL; + options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE; + options.is_vhost_user = false; net = vhost_net_init(&options); if (!net) { @@ -223,14 +229,6 @@ static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); - /* - * If a peer NIC is attached, do not cleanup anything. - * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() - * when the guest is shutting down. - */ - if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { - return; - } munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); if (s->vhost_net) { @@ -242,18 +240,39 @@ static void vhost_vdpa_cleanup(NetClientState *nc) return; } qemu_close(s->vhost_vdpa.shared->device_fd); + g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete); g_free(s->vhost_vdpa.shared); } -/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ -static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) +static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) { + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + return true; } -static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) +static bool vhost_vdpa_get_vnet_hash_supported_types(NetClientState *nc, + uint32_t *types) { assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + uint64_t features = s->vhost_vdpa.dev->features; + int fd = s->vhost_vdpa.shared->device_fd; + struct { + struct vhost_vdpa_config hdr; + uint32_t supported_hash_types; + } config; + + if (!virtio_has_feature(features, VIRTIO_NET_F_HASH_REPORT) && + !virtio_has_feature(features, VIRTIO_NET_F_RSS)) { + return false; + } + + config.hdr.off = offsetof(struct virtio_net_config, supported_hash_types); + config.hdr.len = sizeof(config.supported_hash_types); + + assert(!ioctl(fd, VHOST_VDPA_GET_CONFIG, &config)); + *types = le32_to_cpu(config.supported_hash_types); return true; } @@ -269,6 +288,18 @@ static bool vhost_vdpa_has_ufo(NetClientState *nc) } +/* + * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's + * reasonable to assume that h/w is LE by default, because LE is what + * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is + * LE". Otherwise, on a BE machine, higher-level code would mistakely think + * the h/w is BE and can't support VDPA for a virtio 1.0 client. + */ +static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable) +{ + return 0; +} + static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, Error **errp) { @@ -357,14 +388,8 @@ static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) { - struct vhost_vdpa *v = &s->vhost_vdpa; - migration_add_notifier(&s->migration_state, vdpa_net_migration_state_notifier); - if (v->shadow_vqs_enabled) { - v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, - v->shared->iova_range.last); - } } static int vhost_vdpa_net_data_start(NetClientState *nc) @@ -374,8 +399,7 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); - if (s->always_svq || - migration_is_setup_or_active()) { + if (s->always_svq || migration_is_running()) { v->shadow_vqs_enabled = true; } else { v->shadow_vqs_enabled = false; @@ -412,19 +436,12 @@ static int vhost_vdpa_net_data_load(NetClientState *nc) static void vhost_vdpa_net_client_stop(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); - struct vhost_dev *dev; assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); if (s->vhost_vdpa.index == 0) { migration_remove_notifier(&s->migration_state); } - - dev = s->vhost_vdpa.dev; - if (dev->vq_index + dev->nvqs == dev->vq_index_end) { - g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, - vhost_iova_tree_delete); - } } static NetClientInfo net_vhost_vdpa_info = { @@ -436,9 +453,11 @@ static NetClientInfo net_vhost_vdpa_info = { .stop = vhost_vdpa_net_client_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, + .get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types, .has_ufo = vhost_vdpa_has_ufo, + .set_vnet_le = vhost_vdpa_set_vnet_le, .check_peer_type = vhost_vdpa_check_peer_type, - .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, + .get_vhost_net = vhost_vdpa_get_vhost_net, }; static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, @@ -510,14 +529,20 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, bool write) { DMAMap map = {}; + hwaddr taddr = (hwaddr)(uintptr_t)buf; int r; - map.translated_addr = (hwaddr)(uintptr_t)buf; map.size = size - 1; map.perm = write ? IOMMU_RW : IOMMU_RO, - r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr); if (unlikely(r != IOVA_OK)) { error_report("Cannot map injected element"); + + if (map.translated_addr == taddr) { + error_report("Insertion to IOVA->HVA tree failed"); + /* Remove the mapping from the IOVA-only tree */ + goto dma_map_err; + } return r; } @@ -589,24 +614,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) return 0; } - /* - * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, - * whether CVQ shares ASID with guest or not, because: - * - Memory listener need access to guest's memory addresses allocated in - * the IOVA tree. - * - There should be plenty of IOVA address space for both ASID not to - * worry about collisions between them. Guest's translations are still - * validated with virtio virtqueue_pop so there is no risk for the guest - * to access memory that it shouldn't. - * - * To allocate a iova tree per ASID is doable but it complicates the code - * and it is not worth it for the moment. - */ - if (!v->shared->iova_tree) { - v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, - v->shared->iova_range.last); - } - r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len(), false); if (unlikely(r < 0)) { @@ -643,7 +650,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); int r; - r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); + r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL); if (unlikely(r != 0)) { if (unlikely(r == -ENOSPC)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", @@ -857,13 +864,13 @@ static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, * configuration only at live migration. */ if (!n->rss_data.enabled || - n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { + n->rss_data.runtime_hash_types == VIRTIO_NET_HASH_REPORT_NONE) { return 0; } table = g_malloc_n(n->rss_data.indirections_len, sizeof(n->rss_data.indirections_table[0])); - cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); + cfg.hash_types = cpu_to_le32(n->rss_data.runtime_hash_types); if (do_rss) { /* @@ -1303,9 +1310,10 @@ static NetClientInfo net_vhost_vdpa_cvq_info = { .stop = vhost_vdpa_net_cvq_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, + .get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types, .has_ufo = vhost_vdpa_has_ufo, .check_peer_type = vhost_vdpa_check_peer_type, - .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, + .get_vhost_net = vhost_vdpa_get_vhost_net, }; /* @@ -1715,6 +1723,8 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.shared->device_fd = vdpa_device_fd; s->vhost_vdpa.shared->iova_range = iova_range; s->vhost_vdpa.shared->shadow_data = svq; + s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first, + iova_range.last); } else if (!is_datapath) { s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), PROT_READ | PROT_WRITE, @@ -1830,9 +1840,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, &has_cvq, errp); - if (queue_pairs < 0) { - qemu_close(vdpa_device_fd); - return queue_pairs; + if (queue_pairs <= 0) { + goto err; } r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); diff --git a/net/vmnet-common.m b/net/vmnet-common.m index 30c4e53c136..ab33ce2b0c2 100644 --- a/net/vmnet-common.m +++ b/net/vmnet-common.m @@ -17,7 +17,8 @@ #include "clients.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" +#include "net/eth.h" #include #include @@ -93,7 +94,7 @@ ssize_t vmnet_receive_common(NetClientState *nc, if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt); if (if_status != VMNET_SUCCESS) { - error_report("vmnet: write error: %s\n", + error_report("vmnet: write error: %s", vmnet_status_map_str(if_status)); return -1; } @@ -147,10 +148,26 @@ static int vmnet_read_packets(VmnetState *s) */ static void vmnet_write_packets_to_qemu(VmnetState *s) { + uint8_t *pkt; + size_t pktsz; + uint8_t min_pkt[ETH_ZLEN]; + size_t min_pktsz; + ssize_t size; + while (s->packets_send_current_pos < s->packets_send_end_pos) { - ssize_t size = qemu_send_packet_async(&s->nc, - s->iov_buf[s->packets_send_current_pos].iov_base, - s->packets_buf[s->packets_send_current_pos].vm_pkt_size, + pkt = s->iov_buf[s->packets_send_current_pos].iov_base; + pktsz = s->packets_buf[s->packets_send_current_pos].vm_pkt_size; + + if (net_peer_needs_padding(&s->nc)) { + min_pktsz = sizeof(min_pkt); + + if (eth_pad_short_frame(min_pkt, &min_pktsz, pkt, pktsz)) { + pkt = min_pkt; + pktsz = min_pktsz; + } + } + + size = qemu_send_packet_async(&s->nc, pkt, pktsz, vmnet_send_completed); if (size == 0) { diff --git a/os-posix.c b/os-posix.c index 43f9a43f3fe..52925c23d3d 100644 --- a/os-posix.c +++ b/os-posix.c @@ -32,7 +32,7 @@ #include "qemu/error-report.h" #include "qemu/log.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "qemu/cutils.h" #ifdef CONFIG_LINUX @@ -327,18 +327,29 @@ void os_set_line_buffering(void) setvbuf(stdout, NULL, _IOLBF, 0); } -int os_mlock(void) +int os_mlock(bool on_fault) { #ifdef HAVE_MLOCKALL int ret = 0; + int flags = MCL_CURRENT | MCL_FUTURE; - ret = mlockall(MCL_CURRENT | MCL_FUTURE); + if (on_fault) { +#ifdef HAVE_MLOCK_ONFAULT + flags |= MCL_ONFAULT; +#else + error_report("mlockall: on_fault not supported"); + return -EINVAL; +#endif + } + + ret = mlockall(flags); if (ret < 0) { error_report("mlockall: %s", strerror(errno)); } return ret; #else + (void)on_fault; return -ENOSYS; #endif } diff --git a/os-wasm.c b/os-wasm.c new file mode 100644 index 00000000000..d240c180c5f --- /dev/null +++ b/os-wasm.c @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: MIT */ +/* + * os-wasm.c + * Forked from os-posix.c, removing functions not working on Emscripten + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include +#include +#include +#include +#include + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "system/runstate.h" +#include "qemu/cutils.h" + +void os_setup_post(void){} +void os_set_line_buffering(void) +{ + setvbuf(stdout, NULL, _IOLBF, 0); +} +void os_setup_early_signal_handling(void) +{ + struct sigaction act; + sigfillset(&act.sa_mask); + act.sa_flags = 0; + act.sa_handler = SIG_IGN; + sigaction(SIGPIPE, &act, NULL); +} +void os_set_proc_name(const char *s) +{ + error_report("Change of process name not supported by your OS"); + exit(1); +} +static void termsig_handler(int signal, siginfo_t *info, void *c) +{ + qemu_system_killed(info->si_signo, info->si_pid); +} + +void os_setup_signal_handling(void) +{ + struct sigaction act; + + memset(&act, 0, sizeof(act)); + act.sa_sigaction = termsig_handler; + act.sa_flags = SA_SIGINFO; + sigaction(SIGINT, &act, NULL); + sigaction(SIGHUP, &act, NULL); + sigaction(SIGTERM, &act, NULL); +} +void os_setup_limits(void) +{ + struct rlimit nofile; + + if (getrlimit(RLIMIT_NOFILE, &nofile) < 0) { + warn_report("unable to query NOFILE limit: %s", strerror(errno)); + return; + } + + if (nofile.rlim_cur == nofile.rlim_max) { + return; + } + + nofile.rlim_cur = nofile.rlim_max; + + if (setrlimit(RLIMIT_NOFILE, &nofile) < 0) { + warn_report("unable to set NOFILE limit: %s", strerror(errno)); + return; + } +} +int os_mlock(bool on_fault) +{ +#ifdef HAVE_MLOCKALL + int ret = 0; + int flags = MCL_CURRENT | MCL_FUTURE; + + if (on_fault) { +#ifdef HAVE_MLOCK_ONFAULT + flags |= MCL_ONFAULT; +#else + error_report("mlockall: on_fault not supported"); + return -EINVAL; +#endif + } + + ret = mlockall(flags); + if (ret < 0) { + error_report("mlockall: %s", strerror(errno)); + } + + return ret; +#else + (void)on_fault; + return -ENOSYS; +#endif +} diff --git a/os-win32.c b/os-win32.c index 725ad652e8b..c1bff808b4b 100644 --- a/os-win32.c +++ b/os-win32.c @@ -26,7 +26,7 @@ #include "qemu/osdep.h" #include #include -#include "sysemu/runstate.h" +#include "system/runstate.h" static BOOL WINAPI qemu_ctrl_handler(DWORD type) { diff --git a/page-target.c b/page-target.c index 82211c85937..8fcd5443b52 100644 --- a/page-target.c +++ b/page-target.c @@ -8,29 +8,6 @@ #include "qemu/osdep.h" #include "exec/target_page.h" -#include "exec/cpu-defs.h" -#include "cpu.h" -#include "exec/cpu-all.h" - -size_t qemu_target_page_size(void) -{ - return TARGET_PAGE_SIZE; -} - -int qemu_target_page_mask(void) -{ - return TARGET_PAGE_MASK; -} - -int qemu_target_page_bits(void) -{ - return TARGET_PAGE_BITS; -} - -int qemu_target_page_bits_min(void) -{ - return TARGET_PAGE_BITS_MIN; -} /* Convert target pages to MiB (2**20). */ size_t qemu_target_pages_to_MiB(size_t pages) diff --git a/page-vary-target.c b/page-vary-target.c index 343b4adb95a..49a32b4fe51 100644 --- a/page-vary-target.c +++ b/page-vary-target.c @@ -21,12 +21,47 @@ #include "qemu/osdep.h" #include "exec/page-vary.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" -bool set_preferred_target_page_bits(int bits) + +/* + * For system mode, the minimum comes from the number of bits + * required for maximum alignment (6) and the number of bits + * required for TLB_FLAGS_MASK (3). + * + * For user mode, TARGET_PAGE_BITS_VARY is a hack to allow the target + * page size to match the host page size. Mostly, this reduces the + * ordinary target page size to run on a host with 4KiB pages (i.e. x86). + * There is no true minimum required by the implementation, but keep the + * same minimum as for system mode for sanity. + * See linux-user/mmap.c, mmap_h_lt_g and mmap_h_gt_g. + */ +#define TARGET_PAGE_BITS_MIN 9 + +#ifndef TARGET_PAGE_BITS_VARY +QEMU_BUILD_BUG_ON(TARGET_PAGE_BITS < TARGET_PAGE_BITS_MIN); +#endif + +#ifndef CONFIG_USER_ONLY +#include "exec/tlb-flags.h" + +QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & ((1u < TARGET_PAGE_BITS_MIN) - 1)); + +int migration_legacy_page_bits(void) { #ifdef TARGET_PAGE_BITS_VARY + QEMU_BUILD_BUG_ON(TARGET_PAGE_BITS_LEGACY < TARGET_PAGE_BITS_MIN); + return TARGET_PAGE_BITS_LEGACY; +#else + return TARGET_PAGE_BITS; +#endif +} +#endif + +bool set_preferred_target_page_bits(int bits) +{ assert(bits >= TARGET_PAGE_BITS_MIN); +#ifdef TARGET_PAGE_BITS_VARY return set_preferred_target_page_bits_common(bits); #else return true; @@ -35,7 +70,12 @@ bool set_preferred_target_page_bits(int bits) void finalize_target_page_bits(void) { -#ifdef TARGET_PAGE_BITS_VARY - finalize_target_page_bits_common(TARGET_PAGE_BITS_MIN); +#ifndef TARGET_PAGE_BITS_VARY + finalize_target_page_bits_common(TARGET_PAGE_BITS); +#elif defined(CONFIG_USER_ONLY) + assert(target_page.bits != 0); + finalize_target_page_bits_common(target_page.bits); +#else + finalize_target_page_bits_common(TARGET_PAGE_BITS_LEGACY); #endif } diff --git a/pc-bios/README b/pc-bios/README index 7ffb2f43a46..d009c378953 100644 --- a/pc-bios/README +++ b/pc-bios/README @@ -13,8 +13,8 @@ - SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware implementation for certain IBM POWER hardware. The sources are at - https://github.com/aik/SLOF, and the image currently in qemu is - built from git tag qemu-slof-20230918. + https://gitlab.com/slof/slof, and the image currently in qemu is + built from git tag qemu-slof-20241106. - VOF (Virtual Open Firmware) is a minimalistic firmware to work with -machine pseries,x-vof=on. When enabled, the firmware acts as a slim shim and @@ -43,6 +43,19 @@ run an hypervisor OS or simply a host OS on the "baremetal" platform, also known as the PowerNV (Non-Virtualized) platform. +- pnv-pnor.bin is a non-volatile RAM image used by PowerNV, which stores + NVRAM BIOS settings among other things. This image was created with the + following command (the ffspart tool can be found in the skiboot source tree): + + ffspart -s 0x1000 -c 34 -i pnv-pnor.in -p pnv-pnor.bin + + Where pnv-pnor.in contains the two lines (no leading whitespace): + + NVRAM,0x01000,0x00020000,,,/dev/zero + VERSION,0x21000,0x00001000,,,/dev/zero + + skiboot is then booted once to format the NVRAM partition. + - QemuMacDrivers (https://github.com/ozbenh/QemuMacDrivers) is a project to provide virtualised drivers for PPC MacOS guests. @@ -70,10 +83,16 @@ source code also contains code reused from other projects described here: https://github.com/riscv/opensbi/blob/master/ThirdPartyNotices.md. -- npcm7xx_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for Nuvoton - NPCM7xx BMC devices. It currently implements the bare minimum to load, parse, - initialize and run boot images stored in SPI flash, but may grow more - features over time as needed. The source code is available at: +- npcm{7xx,8xx}_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for + Nuvoton NPCM7xx/8xx BMC devices. It currently implements the bare minimum to + load, parse, initialize and run boot images stored in SPI flash, but may grow + more features over time as needed. The source code is available at: + https://github.com/google/vbootrom + +- ast27x0_bootrom.bin is a simplified, free (Apache 2.0) boot ROM for + ASPEED AST27x0 BMC SOC. It currently implements the bare minimum to + load, parse, initialize and run boot images stored in SPI flash, but may grow + more features over time as needed. The source code is available at: https://github.com/google/vbootrom - hppa-firmware.img (32-bit) and hppa-firmware64.img (64-bit) are firmware diff --git a/pc-bios/ast27x0_bootrom.bin b/pc-bios/ast27x0_bootrom.bin new file mode 100644 index 00000000000..a4c94d64da5 Binary files /dev/null and b/pc-bios/ast27x0_bootrom.bin differ diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin index 48c3707d55d..509f3988e49 100644 Binary files a/pc-bios/bios-256k.bin and b/pc-bios/bios-256k.bin differ diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin index c98351e7345..4870015d9ca 100644 Binary files a/pc-bios/bios-microvm.bin and b/pc-bios/bios-microvm.bin differ diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin index 7e2d062af67..4b81a969a9d 100644 Binary files a/pc-bios/bios.bin and b/pc-bios/bios.bin differ diff --git a/pc-bios/descriptors/60-edk2-loongarch64.json b/pc-bios/descriptors/60-edk2-loongarch64.json new file mode 100644 index 00000000000..f174a1fc9b2 --- /dev/null +++ b/pc-bios/descriptors/60-edk2-loongarch64.json @@ -0,0 +1,31 @@ +{ + "description": "UEFI firmware for loongarch64", + "interface-types": [ + "uefi" + ], + "mapping": { + "device": "flash", + "executable": { + "filename": "@DATADIR@/edk2-loongarch64-code.fd", + "format": "raw" + }, + "nvram-template": { + "filename": "@DATADIR@/edk2-loongarch64-vars.fd", + "format": "raw" + } + }, + "targets": [ + { + "architecture": "loongarch64", + "machines": [ + "virt*" + ] + } + ], + "features": [ + + ], + "tags": [ + + ] +} diff --git a/pc-bios/descriptors/60-edk2-riscv64.json b/pc-bios/descriptors/60-edk2-riscv64.json new file mode 100644 index 00000000000..14811ca307f --- /dev/null +++ b/pc-bios/descriptors/60-edk2-riscv64.json @@ -0,0 +1,31 @@ +{ + "description": "UEFI firmware for riscv64", + "interface-types": [ + "uefi" + ], + "mapping": { + "device": "flash", + "executable": { + "filename": "@DATADIR@/edk2-riscv-code.fd", + "format": "raw" + }, + "nvram-template": { + "filename": "@DATADIR@/edk2-riscv-vars.fd", + "format": "raw" + } + }, + "targets": [ + { + "architecture": "riscv64", + "machines": [ + "virt*" + ] + } + ], + "features": [ + + ], + "tags": [ + + ] +} diff --git a/pc-bios/descriptors/60-edk2-x86_64.json b/pc-bios/descriptors/60-edk2-x86_64.json index 968cb65cf97..4599c63f14e 100644 --- a/pc-bios/descriptors/60-edk2-x86_64.json +++ b/pc-bios/descriptors/60-edk2-x86_64.json @@ -26,6 +26,7 @@ "features": [ "acpi-s3", "amd-sev", + "amd-sev-es", "verbose-dynamic" ], "tags": [ diff --git a/pc-bios/descriptors/meson.build b/pc-bios/descriptors/meson.build index 66f85d01c42..cdd0be01a35 100644 --- a/pc-bios/descriptors/meson.build +++ b/pc-bios/descriptors/meson.build @@ -5,7 +5,9 @@ if unpack_edk2_blobs and get_option('install_blobs') '60-edk2-aarch64.json', '60-edk2-arm.json', '60-edk2-i386.json', - '60-edk2-x86_64.json' + '60-edk2-x86_64.json', + '60-edk2-loongarch64.json', + '60-edk2-riscv64.json' ] configure_file(input: files(f), output: f, diff --git a/pc-bios/bamboo.dtb b/pc-bios/dtb/bamboo.dtb similarity index 100% rename from pc-bios/bamboo.dtb rename to pc-bios/dtb/bamboo.dtb diff --git a/pc-bios/bamboo.dts b/pc-bios/dtb/bamboo.dts similarity index 100% rename from pc-bios/bamboo.dts rename to pc-bios/dtb/bamboo.dts diff --git a/pc-bios/canyonlands.dtb b/pc-bios/dtb/canyonlands.dtb similarity index 100% rename from pc-bios/canyonlands.dtb rename to pc-bios/dtb/canyonlands.dtb diff --git a/pc-bios/canyonlands.dts b/pc-bios/dtb/canyonlands.dts similarity index 100% rename from pc-bios/canyonlands.dts rename to pc-bios/dtb/canyonlands.dts diff --git a/pc-bios/dtb/meson.build b/pc-bios/dtb/meson.build new file mode 100644 index 00000000000..969f7c95ef1 --- /dev/null +++ b/pc-bios/dtb/meson.build @@ -0,0 +1,23 @@ +dtbs = [ + 'bamboo.dtb', + 'canyonlands.dtb', + 'petalogix-ml605.dtb', + 'petalogix-s3adsp1800.dtb', +] + +dtc = disabler() # Force use of pre-built DTB files +if dtc.found() + foreach out : dtbs + f = fs.replace_suffix(out, '.dts') + custom_target(out, + build_by_default: have_system, + input: files(f), + output: out, + install: get_option('install_blobs'), + install_dir: qemu_datadir / 'dtb', + command: [ dtc, '-q', '-I', 'dts', '-O', 'dtb', + '-o', '@OUTPUT@', '@INPUT0@' ]) + endforeach +else + install_data(dtbs, install_dir: qemu_datadir / 'dtb') +endif diff --git a/pc-bios/petalogix-ml605.dtb b/pc-bios/dtb/petalogix-ml605.dtb similarity index 100% rename from pc-bios/petalogix-ml605.dtb rename to pc-bios/dtb/petalogix-ml605.dtb diff --git a/pc-bios/petalogix-ml605.dts b/pc-bios/dtb/petalogix-ml605.dts similarity index 100% rename from pc-bios/petalogix-ml605.dts rename to pc-bios/dtb/petalogix-ml605.dts diff --git a/pc-bios/petalogix-s3adsp1800.dtb b/pc-bios/dtb/petalogix-s3adsp1800.dtb similarity index 100% rename from pc-bios/petalogix-s3adsp1800.dtb rename to pc-bios/dtb/petalogix-s3adsp1800.dtb diff --git a/pc-bios/petalogix-s3adsp1800.dts b/pc-bios/dtb/petalogix-s3adsp1800.dts similarity index 100% rename from pc-bios/petalogix-s3adsp1800.dts rename to pc-bios/dtb/petalogix-s3adsp1800.dts diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2 index e763982db4d..2ce728c9ed6 100644 Binary files a/pc-bios/edk2-aarch64-code.fd.bz2 and b/pc-bios/edk2-aarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2 index 329646dafa9..9b98490a801 100644 Binary files a/pc-bios/edk2-arm-code.fd.bz2 and b/pc-bios/edk2-arm-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2 index 271ce659b64..50c9869960b 100644 Binary files a/pc-bios/edk2-i386-code.fd.bz2 and b/pc-bios/edk2-i386-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2 index 00335cde4b1..d58c16fb04b 100644 Binary files a/pc-bios/edk2-i386-secure-code.fd.bz2 and b/pc-bios/edk2-i386-secure-code.fd.bz2 differ diff --git a/pc-bios/edk2-loongarch64-code.fd.bz2 b/pc-bios/edk2-loongarch64-code.fd.bz2 new file mode 100644 index 00000000000..ba12bc9a066 Binary files /dev/null and b/pc-bios/edk2-loongarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-loongarch64-vars.fd.bz2 b/pc-bios/edk2-loongarch64-vars.fd.bz2 new file mode 100644 index 00000000000..8a13571e28a Binary files /dev/null and b/pc-bios/edk2-loongarch64-vars.fd.bz2 differ diff --git a/pc-bios/edk2-riscv-code.fd.bz2 b/pc-bios/edk2-riscv-code.fd.bz2 index f3a98d6ed82..f4e243d996f 100644 Binary files a/pc-bios/edk2-riscv-code.fd.bz2 and b/pc-bios/edk2-riscv-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2 index a1a8c05a1b6..cf043fcb629 100644 Binary files a/pc-bios/edk2-x86_64-code.fd.bz2 and b/pc-bios/edk2-x86_64-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2 index 6b7cd544a43..c2b04f82c69 100644 Binary files a/pc-bios/edk2-x86_64-microvm.fd.bz2 and b/pc-bios/edk2-x86_64-microvm.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2 index ef40a8bfd65..50f5b3694dd 100644 Binary files a/pc-bios/edk2-x86_64-secure-code.fd.bz2 and b/pc-bios/edk2-x86_64-secure-code.fd.bz2 differ diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img old mode 100755 new mode 100644 index e065e48ded2..d5f6f2fcaf5 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/hppa-firmware64.img b/pc-bios/hppa-firmware64.img old mode 100755 new mode 100644 index 7f6d837f0da..577b0a1b3f9 Binary files a/pc-bios/hppa-firmware64.img and b/pc-bios/hppa-firmware64.img differ diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build index 0bd8ce00775..a79a09b2765 100644 --- a/pc-bios/keymaps/meson.build +++ b/pc-bios/keymaps/meson.build @@ -39,19 +39,18 @@ else native_qemu_keymap = qemu_keymap endif +keymap_targets = [] if native_qemu_keymap.found() - t = [] foreach km, args: keymaps # generate with qemu-kvm - t += custom_target(km, - build_by_default: true, - output: km, - command: [native_qemu_keymap, '-f', '@OUTPUT@', args.split()], - install: have_system, - install_dir: qemu_datadir / 'keymaps') + keymap_targets += custom_target(km, + build_by_default: true, + output: km, + command: [native_qemu_keymap, '-f', '@OUTPUT@', args.split()], + install: have_system, + install_dir: qemu_datadir / 'keymaps') endforeach - - alias_target('update-keymaps', t) + alias_target('update-keymaps', keymap_targets) else install_data(keymaps.keys(), install_dir: qemu_datadir / 'keymaps') endif diff --git a/pc-bios/meson.build b/pc-bios/meson.build index 8602b45b9b3..3c41620044a 100644 --- a/pc-bios/meson.build +++ b/pc-bios/meson.build @@ -11,6 +11,8 @@ if unpack_edk2_blobs 'edk2-i386-vars.fd', 'edk2-x86_64-code.fd', 'edk2-x86_64-secure-code.fd', + 'edk2-loongarch64-code.fd', + 'edk2-loongarch64-vars.fd', ] foreach f : fds @@ -26,6 +28,7 @@ if unpack_edk2_blobs endif blobs = [ + 'ast27x0_bootrom.bin', 'bios.bin', 'bios-256k.bin', 'bios-microvm.bin', @@ -66,9 +69,9 @@ blobs = [ 'kvmvapic.bin', 'pvh.bin', 's390-ccw.img', - 's390-netboot.img', 'slof.bin', 'skiboot.lid', + 'pnv-pnor.bin', 'palcode-clipper', 'u-boot.e500', 'u-boot-sam460-20100605.bin', @@ -79,34 +82,15 @@ blobs = [ 'opensbi-riscv32-generic-fw_dynamic.bin', 'opensbi-riscv64-generic-fw_dynamic.bin', 'npcm7xx_bootrom.bin', + 'npcm8xx_bootrom.bin', 'vof.bin', 'vof-nvram.bin', ] -dtc = find_program('dtc', required: false) -foreach f : [ - 'bamboo.dts', - 'canyonlands.dts', - 'petalogix-s3adsp1800.dts', - 'petalogix-ml605.dts', -] - out = fs.replace_suffix(f, '.dtb') - if dtc.found() - custom_target(f, - build_by_default: have_system, - input: files(f), - output: out, - install: get_option('install_blobs'), - install_dir: qemu_datadir, - command: [ dtc, '-I', 'dts', '-O', 'dtb', '-o', '@OUTPUT@', '@INPUT0@' ]) - else - blobs += out - endif -endforeach - if get_option('install_blobs') - install_data(blobs, install_dir: qemu_datadir) + install_data(blobs, install_dir: qemu_datadir, install_mode: 'rw-r--r--') endif subdir('descriptors') +subdir('dtb') subdir('keymaps') diff --git a/pc-bios/npcm7xx_bootrom.bin b/pc-bios/npcm7xx_bootrom.bin index 38f89d1b97b..92282892b70 100644 Binary files a/pc-bios/npcm7xx_bootrom.bin and b/pc-bios/npcm7xx_bootrom.bin differ diff --git a/pc-bios/npcm8xx_bootrom.bin b/pc-bios/npcm8xx_bootrom.bin new file mode 100644 index 00000000000..45fb40fb598 Binary files /dev/null and b/pc-bios/npcm8xx_bootrom.bin differ diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc index 4af60026b40..6f472d4b11a 100644 Binary files a/pc-bios/openbios-ppc and b/pc-bios/openbios-ppc differ diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32 index 41b6a607fb6..96792482607 100644 Binary files a/pc-bios/openbios-sparc32 and b/pc-bios/openbios-sparc32 differ diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64 index 902b4b35080..0a13453efa0 100644 Binary files a/pc-bios/openbios-sparc64 and b/pc-bios/openbios-sparc64 differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index 7ec260ff404..b2e740010b2 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 090c0cf6ac9..018b4731a71 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/pc-bios/pnv-pnor.bin b/pc-bios/pnv-pnor.bin new file mode 100644 index 00000000000..3e6f7001440 Binary files /dev/null and b/pc-bios/pnv-pnor.bin differ diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img index f0d9ef6d4d8..ff60978d28c 100644 Binary files a/pc-bios/s390-ccw.img and b/pc-bios/s390-ccw.img differ diff --git a/pc-bios/s390-ccw/Makefile b/pc-bios/s390-ccw/Makefile index 6207911b53a..a0f24c94a87 100644 --- a/pc-bios/s390-ccw/Makefile +++ b/pc-bios/s390-ccw/Makefile @@ -3,7 +3,8 @@ all: build-all @true include config-host.mak -CFLAGS = -O2 -g +CFLAGS = -O2 -g -I $(SRC_PATH)/../../include/hw/s390x/ipl +LDFLAGS ?= MAKEFLAGS += -rR GIT_SUBMODULES = roms/SLOF @@ -32,15 +33,21 @@ QEMU_DGFLAGS = -MMD -MP -MT $@ -MF $(@D)/$(*F).d .PHONY : all clean build-all distclean -OBJECTS = start.o main.o bootmap.o jump2ipl.o sclp.o menu.o \ - virtio.o virtio-scsi.o virtio-blkdev.o libc.o cio.o dasd-ipl.o +OBJECTS = start.o main.o bootmap.o jump2ipl.o sclp.o menu.o netmain.o \ + virtio.o virtio-net.o virtio-scsi.o virtio-blkdev.o cio.o dasd-ipl.o + +SLOF_DIR := $(SRC_PATH)/../../roms/SLOF + +LIBC_INC := -nostdinc -I$(SLOF_DIR)/lib/libc/include +LIBNET_INC := -I$(SLOF_DIR)/lib/libnet EXTRA_CFLAGS += -Wall EXTRA_CFLAGS += -ffreestanding -fno-delete-null-pointer-checks -fno-common -fPIE EXTRA_CFLAGS += -fwrapv -fno-strict-aliasing -fno-asynchronous-unwind-tables EXTRA_CFLAGS += -msoft-float EXTRA_CFLAGS += -std=gnu99 -LDFLAGS += -Wl,-pie -nostdlib -z noexecstack +EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC) +EXTRA_LDFLAGS += -static-pie -nostdlib -z noexecstack -z text cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>/dev/null cc-option = if $(call cc-test, $1); then \ @@ -55,19 +62,64 @@ config-cc.mak: Makefile $(call cc-option,-march=z900,-march=z10)) 3> config-cc.mak -include config-cc.mak -build-all: s390-ccw.img s390-netboot.img +# libc files: + +LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ + -MMD -MP -MT $@ -MF $(@:%.o=%.d) + +CTYPE_OBJS = isdigit.o isxdigit.o toupper.o +%.o : $(SLOF_DIR)/lib/libc/ctype/%.c + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) + +STRING_OBJS = strcat.o strchr.o strrchr.o strcpy.o strlen.o strncpy.o \ + strcmp.o strncmp.o strcasecmp.o strncasecmp.o strstr.o \ + memset.o memcpy.o memmove.o memcmp.o +%.o : $(SLOF_DIR)/lib/libc/string/%.c + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) + +STDLIB_OBJS = atoi.o atol.o strtoul.o strtol.o rand.o malloc.o free.o +%.o : $(SLOF_DIR)/lib/libc/stdlib/%.c + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) + +STDIO_OBJS = sprintf.o snprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \ + printf.o putc.o puts.o putchar.o stdchnls.o fileno.o +%.o : $(SLOF_DIR)/lib/libc/stdio/%.c + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) + +sbrk.o: $(SLOF_DIR)/slof/sbrk.c + $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) + +LIBCOBJS := $(STRING_OBJS) $(CTYPE_OBJS) $(STDLIB_OBJS) $(STDIO_OBJS) sbrk.o -s390-ccw.elf: $(OBJECTS) - $(call quiet-command,$(CC) $(LDFLAGS) -o $@ $(OBJECTS),Linking) +libc.a: $(LIBCOBJS) + $(call quiet-command,$(AR) -rc $@ $^,Creating static library) + +# libnet files: + +LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \ + dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o +LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ + -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d) + +%.o : $(SLOF_DIR)/lib/libnet/%.c + $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,Compiling) + +libnet.a: $(LIBNETOBJS) + $(call quiet-command,$(AR) -rc $@ $^,Creating static library) + +# Main targets: + +build-all: s390-ccw.img + +s390-ccw.elf: $(OBJECTS) libnet.a libc.a + $(call quiet-command,$(CC) $(EXTRA_LDFLAGS) $(LDFLAGS) -o $@ $^,Linking) s390-ccw.img: s390-ccw.elf $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into) $(OBJECTS): Makefile -include $(SRC_PATH)/netboot.mak - -ALL_OBJS = $(sort $(OBJECTS) $(NETOBJS) $(LIBCOBJS) $(LIBNETOBJS)) +ALL_OBJS = $(sort $(OBJECTS) $(LIBCOBJS) $(LIBNETOBJS)) -include $(ALL_OBJS:%.o=%.d) clean: diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c index a2137449dc3..0f8baa01985 100644 --- a/pc-bios/s390-ccw/bootmap.c +++ b/pc-bios/s390-ccw/bootmap.c @@ -8,7 +8,8 @@ * directory. */ -#include "libc.h" +#include +#include #include "s390-ccw.h" #include "s390-arch.h" #include "bootmap.h" @@ -21,7 +22,7 @@ #ifdef DEBUG_FALLBACK #define dputs(txt) \ - do { sclp_print("zipl: " txt); } while (0) + do { printf("zipl: " txt); } while (0) #else #define dputs(fmt, ...) \ do { } while (0) @@ -61,15 +62,34 @@ static void *s2_prev_blk = _s2; static void *s2_cur_blk = _s2 + MAX_SECTOR_SIZE; static void *s2_next_blk = _s2 + MAX_SECTOR_SIZE * 2; -static inline void verify_boot_info(BootInfo *bip) +static inline int verify_boot_info(BootInfo *bip) { - IPL_assert(magic_match(bip->magic, ZIPL_MAGIC), "No zIPL sig in BootInfo"); - IPL_assert(bip->version == BOOT_INFO_VERSION, "Wrong zIPL version"); - IPL_assert(bip->bp_type == BOOT_INFO_BP_TYPE_IPL, "DASD is not for IPL"); - IPL_assert(bip->dev_type == BOOT_INFO_DEV_TYPE_ECKD, "DASD is not ECKD"); - IPL_assert(bip->flags == BOOT_INFO_FLAGS_ARCH, "Not for this arch"); - IPL_assert(block_size_ok(bip->bp.ipl.bm_ptr.eckd.bptr.size), - "Bad block size in zIPL section of the 1st record."); + if (!magic_match(bip->magic, ZIPL_MAGIC)) { + puts("No zIPL sig in BootInfo"); + return -EINVAL; + } + if (bip->version != BOOT_INFO_VERSION) { + puts("Wrong zIPL version"); + return -EINVAL; + } + if (bip->bp_type != BOOT_INFO_BP_TYPE_IPL) { + puts("DASD is not for IPL"); + return -ENODEV; + } + if (bip->dev_type != BOOT_INFO_DEV_TYPE_ECKD) { + puts("DASD is not ECKD"); + return -ENODEV; + } + if (bip->flags != BOOT_INFO_FLAGS_ARCH) { + puts("Not for this arch"); + return -EINVAL; + } + if (!block_size_ok(bip->bp.ipl.bm_ptr.eckd.bptr.size)) { + puts("Bad block size in zIPL section of 1st record"); + return -EINVAL; + } + + return 0; } static void eckd_format_chs(ExtEckdBlockPtr *ptr, bool ldipl, @@ -144,14 +164,17 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl, bool more_data; memset(_bprs, FREE_SPACE_FILLER, sizeof(_bprs)); - read_block(blk, bprs, "BPRS read failed"); + if (virtio_read(blk, bprs)) { + puts("BPRS read failed"); + return ERROR_BLOCK_NR; + } do { more_data = false; for (j = 0;; j++) { block_nr = gen_eckd_block_num(&bprs[j].xeckd, ldipl); if (is_null_block_number(block_nr)) { /* end of chunk */ - break; + return NULL_BLOCK_NR; } /* we need the updated blockno for the next indirect entry @@ -162,15 +185,20 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl, } /* List directed pointer does not store block size */ - IPL_assert(ldipl || block_size_ok(bprs[j].xeckd.bptr.size), - "bad chunk block size"); + if (!ldipl && !block_size_ok(bprs[j].xeckd.bptr.size)) { + puts("Bad chunk block size"); + return ERROR_BLOCK_NR; + } if (!eckd_valid_address(&bprs[j].xeckd, ldipl)) { /* * If an invalid address is found during LD-IPL then break and - * retry as CCW + * retry as CCW-IPL, otherwise abort on error */ - IPL_assert(ldipl, "bad chunk ECKD addr"); + if (!ldipl) { + puts("Bad chunk ECKD address"); + return ERROR_BLOCK_NR; + } break; } @@ -188,7 +216,10 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl, * I.e. the next ptr must point to the unused memory area */ memset(_bprs, FREE_SPACE_FILLER, sizeof(_bprs)); - read_block(block_nr, bprs, "BPRS continuation read failed"); + if (virtio_read(block_nr, bprs)) { + puts("BPRS continuation read failed"); + return ERROR_BLOCK_NR; + } more_data = true; break; } @@ -197,7 +228,10 @@ static block_number_t load_eckd_segments(block_number_t blk, bool ldipl, * to memory (address). */ rc = virtio_read_many(block_nr, (void *)(*address), count + 1); - IPL_assert(rc == 0, "code chunk read failed"); + if (rc != 0) { + puts("Code chunk read failed"); + return ERROR_BLOCK_NR; + } *address += (count + 1) * virtio_get_block_size(); } @@ -231,7 +265,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr) /* Get Stage1b data */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(s1b_block_nr, s1b, "Cannot read stage1b boot loader"); + if (virtio_read(s1b_block_nr, s1b)) { + puts("Cannot read stage1b boot loader"); + return -EIO; + } memset(_s2, FREE_SPACE_FILLER, sizeof(_s2)); @@ -243,7 +280,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr) break; } - read_block(cur_block_nr, s2_cur_blk, "Cannot read stage2 boot loader"); + if (virtio_read(cur_block_nr, s2_cur_blk)) { + puts("Cannot read stage2 boot loader"); + return -EIO; + } if (find_zipl_boot_menu_banner(&banner_offset)) { /* @@ -251,8 +291,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr) * possibility of menu data spanning multiple blocks. */ if (prev_block_nr) { - read_block(prev_block_nr, s2_prev_blk, - "Cannot read stage2 boot loader"); + if (virtio_read(prev_block_nr, s2_prev_blk)) { + puts("Cannot read stage2 boot loader"); + return -EIO; + } } if (i + 1 < STAGE2_BLK_CNT_MAX) { @@ -260,8 +302,10 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr) } if (next_block_nr && !is_null_block_number(next_block_nr)) { - read_block(next_block_nr, s2_next_blk, - "Cannot read stage2 boot loader"); + if (virtio_read(next_block_nr, s2_next_blk)) { + puts("Cannot read stage2 boot loader"); + return -EIO; + } } return menu_get_zipl_boot_index(s2_cur_blk + banner_offset); @@ -270,11 +314,11 @@ static int eckd_get_boot_menu_index(block_number_t s1b_block_nr) prev_block_nr = cur_block_nr; } - sclp_print("No zipl boot menu data found. Booting default entry."); + printf("No zipl boot menu data found. Booting default entry."); return 0; } -static void run_eckd_boot_script(block_number_t bmt_block_nr, +static int run_eckd_boot_script(block_number_t bmt_block_nr, block_number_t s1b_block_nr) { int i; @@ -291,17 +335,27 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr, } debug_print_int("loadparm", loadparm); - IPL_assert(loadparm < MAX_BOOT_ENTRIES, "loadparm value greater than" - " maximum number of boot entries allowed"); + if (loadparm >= MAX_BOOT_ENTRIES) { + panic("loadparm value greater than max number of boot entries allowed"); + } memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(bmt_block_nr, sec, "Cannot read Boot Map Table"); + if (virtio_read(bmt_block_nr, sec)) { + puts("Cannot read Boot Map Table"); + return -EIO; + } block_nr = gen_eckd_block_num(&bmt->entry[loadparm].xeckd, ldipl); - IPL_assert(block_nr != -1, "Cannot find Boot Map Table Entry"); + if (block_nr == NULL_BLOCK_NR) { + printf("The requested boot entry (%d) is invalid\n", loadparm); + panic("Invalid loadparm"); + } memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(block_nr, sec, "Cannot read Boot Map Script"); + if (virtio_read(block_nr, sec)) { + puts("Cannot read Boot Map Script"); + return -EIO; + } for (i = 0; bms->entry[i].type == BOOT_SCRIPT_LOAD || bms->entry[i].type == BOOT_SCRIPT_SIGNATURE; i++) { @@ -316,21 +370,27 @@ static void run_eckd_boot_script(block_number_t bmt_block_nr, do { block_nr = load_eckd_segments(block_nr, ldipl, &address); - } while (block_nr != -1); + if (block_nr == ERROR_BLOCK_NR) { + return ldipl ? 0 : -EIO; + } + } while (block_nr != NULL_BLOCK_NR); } if (ldipl && bms->entry[i].type != BOOT_SCRIPT_EXEC) { /* Abort LD-IPL and retry as CCW-IPL */ - return; + return 0; } - IPL_assert(bms->entry[i].type == BOOT_SCRIPT_EXEC, - "Unknown script entry type"); - write_reset_psw(bms->entry[i].address.load_address); /* no return */ - jump_to_IPL_code(0); /* no return */ + if (bms->entry[i].type != BOOT_SCRIPT_EXEC) { + puts("Unknown script entry type"); + return -EINVAL; + } + write_reset_psw(bms->entry[i].address.load_address); + jump_to_IPL_code(0); + return -1; } -static void ipl_eckd_cdl(void) +static int ipl_eckd_cdl(void) { XEckdMbr *mbr; EckdCdlIpl2 *ipl2 = (void *)sec; @@ -338,23 +398,26 @@ static void ipl_eckd_cdl(void) block_number_t bmt_block_nr, s1b_block_nr; /* we have just read the block #0 and recognized it as "IPL1" */ - sclp_print("CDL\n"); + puts("CDL"); memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(1, ipl2, "Cannot read IPL2 record at block 1"); + if (virtio_read(1, ipl2)) { + puts("Cannot read IPL2 record at block 1"); + return -EIO; + } mbr = &ipl2->mbr; if (!magic_match(mbr, ZIPL_MAGIC)) { - sclp_print("No zIPL section in IPL2 record.\n"); - return; + puts("No zIPL section in IPL2 record."); + return 0; } if (!block_size_ok(mbr->blockptr.xeckd.bptr.size)) { - sclp_print("Bad block size in zIPL section of IPL2 record.\n"); - return; + puts("Bad block size in zIPL section of IPL2 record."); + return 0; } if (mbr->dev_type != DEV_TYPE_ECKD) { - sclp_print("Non-ECKD device type in zIPL section of IPL2 record.\n"); - return; + puts("Non-ECKD device type in zIPL section of IPL2 record."); + return 0; } /* save pointer to Boot Map Table */ @@ -364,19 +427,21 @@ static void ipl_eckd_cdl(void) s1b_block_nr = eckd_block_num(&ipl2->stage1.seek[0].chs); memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(2, vlbl, "Cannot read Volume Label at block 2"); + if (virtio_read(2, vlbl)) { + puts("Cannot read Volume Label at block 2"); + return -EIO; + } if (!magic_match(vlbl->key, VOL1_MAGIC)) { - sclp_print("Invalid magic of volume label block.\n"); - return; + puts("Invalid magic of volume label block."); + return 0; } if (!magic_match(vlbl->f.key, VOL1_MAGIC)) { - sclp_print("Invalid magic of volser block.\n"); - return; + puts("Invalid magic of volser block."); + return 0; } print_volser(vlbl->f.volser); - run_eckd_boot_script(bmt_block_nr, s1b_block_nr); - /* no return */ + return run_eckd_boot_script(bmt_block_nr, s1b_block_nr); } static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode) @@ -384,8 +449,8 @@ static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode) LDL_VTOC *vlbl = (void *)sec; /* already read, 3rd block */ char msg[4] = { '?', '.', '\n', '\0' }; - sclp_print((mode == ECKD_CMS) ? "CMS" : "LDL"); - sclp_print(" version "); + printf((mode == ECKD_CMS) ? "CMS" : "LDL"); + printf(" version "); switch (vlbl->LDL_version) { case LDL1_VERSION: msg[0] = '1'; @@ -398,11 +463,11 @@ static void print_eckd_ldl_msg(ECKD_IPL_mode_t mode) msg[1] = '?'; break; } - sclp_print(msg); + printf("%s", msg); print_volser(vlbl->volser); } -static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) +static int ipl_eckd_ldl(ECKD_IPL_mode_t mode) { block_number_t bmt_block_nr, s1b_block_nr; EckdLdlIpl1 *ipl1 = (void *)sec; @@ -414,12 +479,15 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) /* DO NOT read BootMap pointer (only one, xECKD) at block #2 */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(0, sec, "Cannot read block 0 to grab boot info."); + if (virtio_read(0, sec)) { + puts("Cannot read block 0 to grab boot info."); + return -EIO; + } if (mode == ECKD_LDL_UNLABELED) { if (!magic_match(ipl1->bip.magic, ZIPL_MAGIC)) { - return; /* not applicable layout */ + return 0; /* not applicable layout */ } - sclp_print("unlabeled LDL.\n"); + puts("unlabeled LDL."); } verify_boot_info(&ipl1->bip); @@ -429,8 +497,7 @@ static void ipl_eckd_ldl(ECKD_IPL_mode_t mode) /* save pointer to Stage1b Data */ s1b_block_nr = eckd_block_num(&ipl1->stage1.seek[0].chs); - run_eckd_boot_script(bmt_block_nr, s1b_block_nr); - /* no return */ + return run_eckd_boot_script(bmt_block_nr, s1b_block_nr); } static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr) @@ -440,7 +507,10 @@ static block_number_t eckd_find_bmt(ExtEckdBlockPtr *ptr) BootRecord *br; blockno = gen_eckd_block_num(ptr, 0); - read_block(blockno, tmp_sec, "Cannot read boot record"); + if (virtio_read(blockno, tmp_sec)) { + puts("Cannot read boot record"); + return ERROR_BLOCK_NR; + } br = (BootRecord *)tmp_sec; if (!magic_match(br->magic, ZIPL_MAGIC)) { /* If the boot record is invalid, return and try CCW-IPL instead */ @@ -466,10 +536,10 @@ static void print_eckd_msg(void) *p-- = ' '; } } - sclp_print(msg); + printf("%s", msg); } -static void ipl_eckd(void) +static int ipl_eckd(void) { IplVolumeLabel *vlbl = (void *)sec; LDL_VTOC *vtoc = (void *)sec; @@ -479,7 +549,10 @@ static void ipl_eckd(void) /* Block 2 can contain either the CDL VOL1 label or the LDL VTOC */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(2, vlbl, "Cannot read block 2"); + if (virtio_read(2, vlbl)) { + puts("Cannot read block 2"); + return -EIO; + } /* * First check for a list-directed-format pointer which would @@ -487,43 +560,60 @@ static void ipl_eckd(void) */ if (eckd_valid_address((ExtEckdBlockPtr *)&vlbl->f.br, 0)) { ldipl_bmt = eckd_find_bmt((ExtEckdBlockPtr *)&vlbl->f.br); - if (ldipl_bmt) { - sclp_print("List-Directed\n"); - /* LD-IPL does not use the S1B bock, just make it NULL */ - run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR); - /* Only return in error, retry as CCW-IPL */ - sclp_print("Retrying IPL "); + switch (ldipl_bmt) { + case ERROR_BLOCK_NR: + return -EIO; + case NULL_BLOCK_NR: + break; /* Invalid BMT but the device may still boot with CCW-IPL */ + default: + puts("List-Directed"); + /* + * LD-IPL does not use the S1B bock, just make it NULL_BLOCK_NR. + * In some failure cases retry IPL before aborting. + */ + if (run_eckd_boot_script(ldipl_bmt, NULL_BLOCK_NR)) { + return -EIO; + } + /* Non-fatal error, retry as CCW-IPL */ + printf("Retrying IPL "); print_eckd_msg(); } memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(2, vtoc, "Cannot read block 2"); + if (virtio_read(2, vtoc)) { + puts("Cannot read block 2"); + return -EIO; + } } /* Not list-directed */ if (magic_match(vtoc->magic, VOL1_MAGIC)) { - ipl_eckd_cdl(); /* may return in error */ + if (ipl_eckd_cdl()) { + return -1; + } } if (magic_match(vtoc->magic, CMS1_MAGIC)) { - ipl_eckd_ldl(ECKD_CMS); /* no return */ + return ipl_eckd_ldl(ECKD_CMS); } if (magic_match(vtoc->magic, LNX1_MAGIC)) { - ipl_eckd_ldl(ECKD_LDL); /* no return */ + return ipl_eckd_ldl(ECKD_LDL); } - ipl_eckd_ldl(ECKD_LDL_UNLABELED); /* it still may return */ + if (ipl_eckd_ldl(ECKD_LDL_UNLABELED)) { + return -1; + } /* * Ok, it is not a LDL by any means. * It still might be a CDL with zero record keys for IPL1 and IPL2 */ - ipl_eckd_cdl(); + return ipl_eckd_cdl(); } /*********************************************************************** * IPL a SCSI disk */ -static void zipl_load_segment(ComponentEntry *entry) +static int zipl_load_segment(ComponentEntry *entry) { const int max_entries = (MAX_SECTOR_SIZE / sizeof(ScsiBlockPtr)); ScsiBlockPtr *bprs = (void *)sec; @@ -543,7 +633,10 @@ static void zipl_load_segment(ComponentEntry *entry) do { memset(bprs, FREE_SPACE_FILLER, bprs_size); fill_hex_val(blk_no, &blockno, sizeof(blockno)); - read_block(blockno, bprs, err_msg); + if (virtio_read(blockno, bprs)) { + puts(err_msg); + return -EIO; + } for (i = 0;; i++) { uint64_t *cur_desc = (void *)&bprs[i]; @@ -571,23 +664,37 @@ static void zipl_load_segment(ComponentEntry *entry) } address = virtio_load_direct(cur_desc[0], cur_desc[1], 0, (void *)address); - IPL_assert(address != -1, "zIPL load segment failed"); + if (!address) { + puts("zIPL load segment failed"); + return -EIO; + } } } while (blockno); + + return 0; } /* Run a zipl program */ -static void zipl_run(ScsiBlockPtr *pte) +static int zipl_run(ScsiBlockPtr *pte) { ComponentHeader *header; ComponentEntry *entry; uint8_t tmp_sec[MAX_SECTOR_SIZE]; - read_block(pte->blockno, tmp_sec, "Cannot read header"); + if (virtio_read(pte->blockno, tmp_sec)) { + puts("Cannot read header"); + return -EIO; + } header = (ComponentHeader *)tmp_sec; - IPL_assert(magic_match(tmp_sec, ZIPL_MAGIC), "No zIPL magic in header"); - IPL_assert(header->type == ZIPL_COMP_HEADER_IPL, "Bad header type"); + if (!magic_match(tmp_sec, ZIPL_MAGIC)) { + puts("No zIPL magic in header"); + return -EINVAL; + } + if (header->type != ZIPL_COMP_HEADER_IPL) { + puts("Bad header type"); + return -EINVAL; + } dputs("start loading images\n"); @@ -602,22 +709,30 @@ static void zipl_run(ScsiBlockPtr *pte) continue; } - zipl_load_segment(entry); + if (zipl_load_segment(entry)) { + return -1; + } entry++; - IPL_assert((uint8_t *)(&entry[1]) <= (tmp_sec + MAX_SECTOR_SIZE), - "Wrong entry value"); + if ((uint8_t *)(&entry[1]) > (tmp_sec + MAX_SECTOR_SIZE)) { + puts("Wrong entry value"); + return -EINVAL; + } } - IPL_assert(entry->component_type == ZIPL_COMP_ENTRY_EXEC, "No EXEC entry"); + if (entry->component_type != ZIPL_COMP_ENTRY_EXEC) { + puts("No EXEC entry"); + return -EINVAL; + } /* should not return */ write_reset_psw(entry->compdat.load_psw); jump_to_IPL_code(0); + return -1; } -static void ipl_scsi(void) +static int ipl_scsi(void) { ScsiMbr *mbr = (void *)sec; int program_table_entries = 0; @@ -628,22 +743,34 @@ static void ipl_scsi(void) /* Grab the MBR */ memset(sec, FREE_SPACE_FILLER, sizeof(sec)); - read_block(0, mbr, "Cannot read block 0"); + if (virtio_read(0, mbr)) { + puts("Cannot read block 0"); + return -EIO; + } if (!magic_match(mbr->magic, ZIPL_MAGIC)) { - return; + return 0; } - sclp_print("Using SCSI scheme.\n"); + puts("Using SCSI scheme."); debug_print_int("MBR Version", mbr->version_id); IPL_check(mbr->version_id == 1, "Unknown MBR layout version, assuming version 1"); debug_print_int("program table", mbr->pt.blockno); - IPL_assert(mbr->pt.blockno, "No Program Table"); + if (!mbr->pt.blockno) { + puts("No Program Table"); + return -EINVAL; + } /* Parse the program table */ - read_block(mbr->pt.blockno, sec, "Error reading Program Table"); - IPL_assert(magic_match(sec, ZIPL_MAGIC), "No zIPL magic in PT"); + if (virtio_read(mbr->pt.blockno, sec)) { + puts("Error reading Program Table"); + return -EIO; + } + if (!magic_match(sec, ZIPL_MAGIC)) { + puts("No zIPL magic in Program Table"); + return -EINVAL; + } for (i = 0; i < MAX_BOOT_ENTRIES; i++) { if (prog_table->entry[i].scsi.blockno) { @@ -653,17 +780,26 @@ static void ipl_scsi(void) } debug_print_int("program table entries", program_table_entries); - IPL_assert(program_table_entries != 0, "Empty Program Table"); + if (program_table_entries == 0) { + puts("Empty Program Table"); + return -EINVAL; + } if (menu_is_enabled_enum()) { loadparm = menu_get_enum_boot_index(valid_entries); } debug_print_int("loadparm", loadparm); - IPL_assert(loadparm < MAX_BOOT_ENTRIES, "loadparm value greater than" - " maximum number of boot entries allowed"); + if (loadparm >= MAX_BOOT_ENTRIES) { + panic("loadparm value greater than max number of boot entries allowed"); + } + + if (!valid_entries[loadparm]) { + printf("The requested boot entry (%d) is invalid\n", loadparm); + panic("Invalid loadparm"); + } - zipl_run(&prog_table->entry[loadparm].scsi); /* no return */ + return zipl_run(&prog_table->entry[loadparm].scsi); } /*********************************************************************** @@ -677,8 +813,10 @@ static bool is_iso_bc_entry_compatible(IsoBcSection *s) if (s->unused || !s->sector_count) { return false; } - read_iso_sector(bswap32(s->load_rba), magic_sec, - "Failed to read image sector 0"); + if (virtio_read(bswap32(s->load_rba), magic_sec)) { + puts("Failed to read image sector 0"); + return false; + } /* Checking bytes 8 - 32 for S390 Linux magic */ return !memcmp(magic_sec + 8, linux_s390_magic, 24); @@ -691,28 +829,35 @@ static uint32_t sec_offset[ISO9660_MAX_DIR_DEPTH]; /* Remained directory space in bytes */ static uint32_t dir_rem[ISO9660_MAX_DIR_DEPTH]; -static inline uint32_t iso_get_file_size(uint32_t load_rba) +static inline long iso_get_file_size(uint32_t load_rba) { IsoVolDesc *vd = (IsoVolDesc *)sec; IsoDirHdr *cur_record = &vd->vd.primary.rootdir; uint8_t *temp = sec + ISO_SECTOR_SIZE; int level = 0; - read_iso_sector(ISO_PRIMARY_VD_SECTOR, sec, - "Failed to read ISO primary descriptor"); + if (virtio_read(ISO_PRIMARY_VD_SECTOR, sec)) { + puts("Failed to read ISO primary descriptor"); + return -EIO; + } + sec_loc[0] = iso_733_to_u32(cur_record->ext_loc); dir_rem[0] = 0; sec_offset[0] = 0; while (level >= 0) { - IPL_assert(sec_offset[level] <= ISO_SECTOR_SIZE, - "Directory tree structure violation"); + if (sec_offset[level] > ISO_SECTOR_SIZE) { + puts("Directory tree structure violation"); + return -EIO; + } cur_record = (IsoDirHdr *)(temp + sec_offset[level]); if (sec_offset[level] == 0) { - read_iso_sector(sec_loc[level], temp, - "Failed to read ISO directory"); + if (virtio_read(sec_loc[level], temp)) { + puts("Failed to read ISO directory"); + return -EIO; + } if (dir_rem[level] == 0) { /* Skip self and parent records */ dir_rem[level] = iso_733_to_u32(cur_record->data_len) - @@ -743,7 +888,7 @@ static inline uint32_t iso_get_file_size(uint32_t load_rba) if (cur_record->file_flags & 0x2) { /* Subdirectory */ if (level == ISO9660_MAX_DIR_DEPTH - 1) { - sclp_print("ISO-9660 directory depth limit exceeded\n"); + puts("ISO-9660 directory depth limit exceeded"); } else { level++; sec_loc[level] = iso_733_to_u32(cur_record->ext_loc); @@ -757,8 +902,10 @@ static inline uint32_t iso_get_file_size(uint32_t load_rba) if (dir_rem[level] == 0) { /* Nothing remaining */ level--; - read_iso_sector(sec_loc[level], temp, - "Failed to read ISO directory"); + if (virtio_read(sec_loc[level], temp)) { + puts("Failed to read ISO directory"); + return -EIO; + } } } @@ -773,19 +920,24 @@ static void load_iso_bc_entry(IsoBcSection *load) * is padded and ISO_SECTOR_SIZE bytes aligned */ uint32_t blks_to_load = bswap16(s.sector_count) >> ET_SECTOR_SHIFT; - uint32_t real_size = iso_get_file_size(bswap32(s.load_rba)); + long real_size = iso_get_file_size(bswap32(s.load_rba)); - if (real_size) { + if (real_size > 0) { /* Round up blocks to load */ blks_to_load = (real_size + ISO_SECTOR_SIZE - 1) / ISO_SECTOR_SIZE; - sclp_print("ISO boot image size verified\n"); + puts("ISO boot image size verified"); } else { - sclp_print("ISO boot image size could not be verified\n"); + puts("ISO boot image size could not be verified"); + if (real_size < 0) { + return; + } } - read_iso_boot_image(bswap32(s.load_rba), + if (read_iso_boot_image(bswap32(s.load_rba), (void *)((uint64_t)bswap16(s.load_segment)), - blks_to_load); + blks_to_load)) { + return; + } jump_to_low_kernel(); } @@ -808,17 +960,18 @@ static uint32_t find_iso_bc(void) return bswap32(et->bc_offset); } } - read_iso_sector(block_num++, sec, - "Failed to read ISO volume descriptor"); + if (virtio_read(block_num++, sec)) { + puts("Failed to read ISO volume descriptor"); + return 0; + } } return 0; } -static IsoBcSection *find_iso_bc_entry(void) +static IsoBcSection *find_iso_bc_entry(uint32_t offset) { IsoBcEntry *e = (IsoBcEntry *)sec; - uint32_t offset = find_iso_bc(); int i; unsigned int loadparm = get_loadparm_index(); @@ -826,11 +979,13 @@ static IsoBcSection *find_iso_bc_entry(void) return NULL; } - read_iso_sector(offset, sec, "Failed to read El Torito boot catalog"); + if (virtio_read(offset, sec)) { + puts("Failed to read El Torito boot catalog"); + return NULL; + } if (!is_iso_bc_valid(e)) { /* The validation entry is mandatory */ - panic("No valid boot catalog found!\n"); return NULL; } @@ -850,19 +1005,25 @@ static IsoBcSection *find_iso_bc_entry(void) } } - panic("No suitable boot entry found on ISO-9660 media!\n"); - return NULL; } -static void ipl_iso_el_torito(void) +static int ipl_iso_el_torito(void) { - IsoBcSection *s = find_iso_bc_entry(); + uint32_t offset = find_iso_bc(); + if (!offset) { + return 0; + } + + IsoBcSection *s = find_iso_bc_entry(offset); if (s) { - load_iso_bc_entry(s); - /* no return */ + load_iso_bc_entry(s); /* only return in error */ + return -1; } + + puts("No suitable boot entry found on ISO-9660 media!"); + return -EIO; } /** @@ -884,7 +1045,7 @@ static bool has_iso_signature(void) * Bus specific IPL sequences */ -static void zipl_load_vblk(void) +static int zipl_load_vblk(void) { int blksize = virtio_get_block_size(); @@ -892,26 +1053,30 @@ static void zipl_load_vblk(void) if (blksize != VIRTIO_ISO_BLOCK_SIZE) { virtio_assume_iso9660(); } - ipl_iso_el_torito(); + if (ipl_iso_el_torito()) { + return 0; + } } if (blksize != VIRTIO_DASD_DEFAULT_BLOCK_SIZE) { - sclp_print("Using guessed DASD geometry.\n"); + puts("Using guessed DASD geometry."); virtio_assume_eckd(); } - ipl_eckd(); + return ipl_eckd(); } -static void zipl_load_vscsi(void) +static int zipl_load_vscsi(void) { if (virtio_get_block_size() == VIRTIO_ISO_BLOCK_SIZE) { /* Is it an ISO image in non-CD drive? */ - ipl_iso_el_torito(); + if (ipl_iso_el_torito()) { + return 0; + } } - sclp_print("Using guessed DASD geometry.\n"); + puts("Using guessed DASD geometry."); virtio_assume_eckd(); - ipl_eckd(); + return ipl_eckd(); } /*********************************************************************** @@ -924,14 +1089,20 @@ void zipl_load(void) if (vdev->is_cdrom) { ipl_iso_el_torito(); - panic("\n! Cannot IPL this ISO image !\n"); + puts("Failed to IPL this ISO image!"); + return; } if (virtio_get_device_type() == VIRTIO_ID_NET) { - jump_to_IPL_code(vdev->netboot_start_addr); + netmain(); + puts("Failed to IPL from this network!"); + return; } - ipl_scsi(); + if (ipl_scsi()) { + puts("Failed to IPL from this SCSI device!"); + return; + } switch (virtio_get_device_type()) { case VIRTIO_ID_BLOCK: @@ -941,8 +1112,9 @@ void zipl_load(void) zipl_load_vscsi(); break; default: - panic("\n! Unknown IPL device type !\n"); + puts("Unknown IPL device type!"); + return; } - sclp_print("zIPL load failed.\n"); + puts("zIPL load failed!"); } diff --git a/pc-bios/s390-ccw/bootmap.h b/pc-bios/s390-ccw/bootmap.h index d4690a88c28..95943441d3d 100644 --- a/pc-bios/s390-ccw/bootmap.h +++ b/pc-bios/s390-ccw/bootmap.h @@ -16,6 +16,7 @@ typedef uint64_t block_number_t; #define NULL_BLOCK_NR 0xffffffffffffffffULL +#define ERROR_BLOCK_NR 0xfffffffffffffffeULL #define FREE_SPACE_FILLER '\xAA' @@ -336,9 +337,7 @@ static inline void print_volser(const void *volser) ebcdic_to_ascii((char *)volser, ascii, 6); ascii[6] = '\0'; - sclp_print("VOLSER=["); - sclp_print(ascii); - sclp_print("]\n"); + printf("VOLSER=[%s]\n", ascii); } static inline bool unused_space(const void *p, size_t size) @@ -387,17 +386,14 @@ static inline uint32_t iso_733_to_u32(uint64_t x) #define ISO_PRIMARY_VD_SECTOR 16 -static inline void read_iso_sector(uint32_t block_offset, void *buf, - const char *errmsg) -{ - IPL_assert(virtio_read_many(block_offset, buf, 1) == 0, errmsg); -} - -static inline void read_iso_boot_image(uint32_t block_offset, void *load_addr, +static inline int read_iso_boot_image(uint32_t block_offset, void *load_addr, uint32_t blks_to_load) { - IPL_assert(virtio_read_many(block_offset, load_addr, blks_to_load) == 0, - "Failed to read boot image!"); + if (virtio_read_many(block_offset, load_addr, blks_to_load)) { + puts("Failed to read boot image!"); + return -1; + } + return 0; } #define ISO9660_MAX_DIR_DEPTH 8 diff --git a/pc-bios/s390-ccw/cio.c b/pc-bios/s390-ccw/cio.c index 83ca27ab41d..5d543da73fc 100644 --- a/pc-bios/s390-ccw/cio.c +++ b/pc-bios/s390-ccw/cio.c @@ -11,7 +11,8 @@ * directory. */ -#include "libc.h" +#include +#include #include "s390-ccw.h" #include "s390-arch.h" #include "helper.h" @@ -58,7 +59,8 @@ uint16_t cu_type(SubChannelId schid) }; if (do_cio(schid, CU_TYPE_UNKNOWN, ptr2u32(&sense_id_ccw), CCW_FMT1)) { - panic("Failed to run SenseID CCw\n"); + puts("Failed to run SenseID CCW"); + return CU_TYPE_UNKNOWN; } return sense_data.cu_type; @@ -90,9 +92,9 @@ static void print_eckd_dasd_sense_data(SenseDataEckdDasd *sd) char msgline[512]; if (sd->config_info & 0x8000) { - sclp_print("Eckd Dasd Sense Data (fmt 24-bytes):\n"); + puts("Eckd Dasd Sense Data (fmt 24-bytes):"); } else { - sclp_print("Eckd Dasd Sense Data (fmt 32-bytes):\n"); + puts("Eckd Dasd Sense Data (fmt 32-bytes):"); } strcat(msgline, " Sense Condition Flags :"); @@ -158,22 +160,21 @@ static void print_eckd_dasd_sense_data(SenseDataEckdDasd *sd) if (sd->status[1] & SNS_STAT2_IMPRECISE_END) { strcat(msgline, " [Imprecise-End]"); } - strcat(msgline, "\n"); - sclp_print(msgline); - - print_int(" Residual Count =", sd->res_count); - print_int(" Phys Drive ID =", sd->phys_drive_id); - print_int(" low cyl address =", sd->low_cyl_addr); - print_int(" head addr & hi cyl =", sd->head_high_cyl_addr); - print_int(" format/message =", sd->fmt_msg); - print_int(" fmt-dependent[0-7] =", sd->fmt_dependent_info[0]); - print_int(" fmt-dependent[8-15]=", sd->fmt_dependent_info[1]); - print_int(" prog action code =", sd->program_action_code); - print_int(" Configuration info =", sd->config_info); - print_int(" mcode / hi-cyl =", sd->mcode_hicyl); - print_int(" cyl & head addr [0]=", sd->cyl_head_addr[0]); - print_int(" cyl & head addr [1]=", sd->cyl_head_addr[1]); - print_int(" cyl & head addr [2]=", sd->cyl_head_addr[2]); + puts(msgline); + + printf(" Residual Count = 0x%X\n", sd->res_count); + printf(" Phys Drive ID = 0x%X\n", sd->phys_drive_id); + printf(" low cyl address = 0x%X\n", sd->low_cyl_addr); + printf(" head addr & hi cyl = 0x%X\n", sd->head_high_cyl_addr); + printf(" format/message = 0x%X\n", sd->fmt_msg); + printf(" fmt-dependent[0-7] = 0x%llX\n", sd->fmt_dependent_info[0]); + printf(" fmt-dependent[8-15]= 0x%llX\n", sd->fmt_dependent_info[1]); + printf(" prog action code = 0x%X\n", sd->program_action_code); + printf(" Configuration info = 0x%X\n", sd->config_info); + printf(" mcode / hi-cyl = 0x%X\n", sd->mcode_hicyl); + printf(" cyl & head addr [0]= 0x%X\n", sd->cyl_head_addr[0]); + printf(" cyl & head addr [1]= 0x%X\n", sd->cyl_head_addr[1]); + printf(" cyl & head addr [2]= 0x%X\n", sd->cyl_head_addr[2]); } static void print_irb_err(Irb *irb) @@ -182,7 +183,7 @@ static void print_irb_err(Irb *irb) uint64_t prev_ccw = *(uint64_t *)u32toptr(irb->scsw.cpa - 8); char msgline[256]; - sclp_print("Interrupt Response Block Data:\n"); + puts("Interrupt Response Block Data:"); strcat(msgline, " Function Ctrl :"); if (irb->scsw.ctrl & SCSW_FCTL_START_FUNC) { @@ -194,8 +195,7 @@ static void print_irb_err(Irb *irb) if (irb->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { strcat(msgline, " [Clear]"); } - strcat(msgline, "\n"); - sclp_print(msgline); + puts(msgline); msgline[0] = '\0'; strcat(msgline, " Activity Ctrl :"); @@ -220,8 +220,7 @@ static void print_irb_err(Irb *irb) if (irb->scsw.ctrl & SCSW_ACTL_SUSPENDED) { strcat(msgline, " [Suspended]"); } - strcat(msgline, "\n"); - sclp_print(msgline); + puts(msgline); msgline[0] = '\0'; strcat(msgline, " Status Ctrl :"); @@ -240,9 +239,7 @@ static void print_irb_err(Irb *irb) if (irb->scsw.ctrl & SCSW_SCTL_STATUS_PEND) { strcat(msgline, " [Status-Pending]"); } - - strcat(msgline, "\n"); - sclp_print(msgline); + puts(msgline); msgline[0] = '\0'; strcat(msgline, " Device Status :"); @@ -270,8 +267,7 @@ static void print_irb_err(Irb *irb) if (irb->scsw.dstat & SCSW_DSTAT_UEXCP) { strcat(msgline, " [Unit-Exception]"); } - strcat(msgline, "\n"); - sclp_print(msgline); + puts(msgline); msgline[0] = '\0'; strcat(msgline, " Channel Status :"); @@ -299,12 +295,11 @@ static void print_irb_err(Irb *irb) if (irb->scsw.cstat & SCSW_CSTAT_CHAINCHK) { strcat(msgline, " [Chaining-Check]"); } - strcat(msgline, "\n"); - sclp_print(msgline); + puts(msgline); - print_int(" cpa=", irb->scsw.cpa); - print_int(" prev_ccw=", prev_ccw); - print_int(" this_ccw=", this_ccw); + printf(" cpa= 0x%X\n", irb->scsw.cpa); + printf(" prev_ccw= 0x%llX\n", prev_ccw); + printf(" this_ccw= 0x%llX\n", this_ccw); } /* @@ -341,7 +336,7 @@ static int __do_cio(SubChannelId schid, uint32_t ccw_addr, int fmt, Irb *irb) return -1; } if (rc) { - print_int("ssch failed with cc=", rc); + printf("ssch failed with cc= 0x%x\n", rc); return rc; } @@ -350,7 +345,7 @@ static int __do_cio(SubChannelId schid, uint32_t ccw_addr, int fmt, Irb *irb) /* collect status */ rc = tsch(schid, irb); if (rc) { - print_int("tsch failed with cc=", rc); + printf("tsch failed with cc= 0x%X\n", rc); } return rc; @@ -406,12 +401,12 @@ int do_cio(SubChannelId schid, uint16_t cutype, uint32_t ccw_addr, int fmt) continue; } - sclp_print("cio device error\n"); - print_int(" ssid ", schid.ssid); - print_int(" cssid ", schid.cssid); - print_int(" sch_no", schid.sch_no); - print_int(" ctrl-unit type", cutype); - sclp_print("\n"); + printf("cio device error\n"); + printf(" ssid 0x%X\n", schid.ssid); + printf(" cssid 0x%X\n", schid.cssid); + printf(" sch_no 0x%X\n", schid.sch_no); + printf(" ctrl-unit type 0x%X\n", cutype); + printf("\n"); print_irb_err(&irb); if (cutype == CU_TYPE_DASD_3990 || cutype == CU_TYPE_DASD_2107 || cutype == CU_TYPE_UNKNOWN) { diff --git a/pc-bios/s390-ccw/cio.h b/pc-bios/s390-ccw/cio.h index 8b18153deb4..6a5e86ba01f 100644 --- a/pc-bios/s390-ccw/cio.h +++ b/pc-bios/s390-ccw/cio.h @@ -361,6 +361,8 @@ typedef struct CcwSearchIdData { uint8_t record; } __attribute__((packed)) CcwSearchIdData; +extern SubChannelId net_schid; + int enable_mss_facility(void); void enable_subchannel(SubChannelId schid); uint16_t cu_type(SubChannelId schid); diff --git a/pc-bios/s390-ccw/dasd-ipl.c b/pc-bios/s390-ccw/dasd-ipl.c index 254bb1a15e3..babece95ea2 100644 --- a/pc-bios/s390-ccw/dasd-ipl.c +++ b/pc-bios/s390-ccw/dasd-ipl.c @@ -8,7 +8,8 @@ * directory. */ -#include "libc.h" +#include +#include #include "s390-ccw.h" #include "s390-arch.h" #include "dasd-ipl.h" @@ -82,7 +83,7 @@ static int run_dynamic_ccw_program(SubChannelId schid, uint16_t cutype, do { has_next = dynamic_cp_fixup(cpa, &next_cpa); - print_int("executing ccw chain at ", cpa); + printf("executing ccw chain at 0x%X\n", cpa); enable_prefixing(); rc = do_cio(schid, cutype, cpa, CCW_FMT0); disable_prefixing(); @@ -110,38 +111,29 @@ static void make_readipl(void) ccwIplRead->count = 0x18; /* Read 0x18 bytes of data */ } -static void run_readipl(SubChannelId schid, uint16_t cutype) +static int run_readipl(SubChannelId schid, uint16_t cutype) { - if (do_cio(schid, cutype, 0x00, CCW_FMT0)) { - panic("dasd-ipl: Failed to run Read IPL channel program\n"); - } + return do_cio(schid, cutype, 0x00, CCW_FMT0); } /* * The architecture states that IPL1 data should consist of a psw followed by * format-0 READ and TIC CCWs. Let's sanity check. */ -static void check_ipl1(void) +static bool check_ipl1(void) { Ccw0 *ccwread = (Ccw0 *)0x08; Ccw0 *ccwtic = (Ccw0 *)0x10; - if (ccwread->cmd_code != CCW_CMD_DASD_READ || - ccwtic->cmd_code != CCW_CMD_TIC) { - panic("dasd-ipl: IPL1 data invalid. Is this disk really bootable?\n"); - } + return (ccwread->cmd_code == CCW_CMD_DASD_READ && + ccwtic->cmd_code == CCW_CMD_TIC); } -static void check_ipl2(uint32_t ipl2_addr) +static bool check_ipl2(uint32_t ipl2_addr) { Ccw0 *ccw = u32toptr(ipl2_addr); - if (ipl2_addr == 0x00) { - panic("IPL2 address invalid. Is this disk really bootable?\n"); - } - if (ccw->cmd_code == 0x00) { - panic("IPL2 ccw data invalid. Is this disk really bootable?\n"); - } + return (ipl2_addr != 0x00 && ccw->cmd_code != 0x00); } static uint32_t read_ipl2_addr(void) @@ -187,52 +179,67 @@ static void ipl1_fixup(void) ccwSearchTic->cda = ptr2u32(ccwSearchID); } -static void run_ipl1(SubChannelId schid, uint16_t cutype) +static int run_ipl1(SubChannelId schid, uint16_t cutype) { uint32_t startAddr = 0x08; - if (do_cio(schid, cutype, startAddr, CCW_FMT0)) { - panic("dasd-ipl: Failed to run IPL1 channel program\n"); - } + return do_cio(schid, cutype, startAddr, CCW_FMT0); } -static void run_ipl2(SubChannelId schid, uint16_t cutype, uint32_t addr) +static int run_ipl2(SubChannelId schid, uint16_t cutype, uint32_t addr) { - if (run_dynamic_ccw_program(schid, cutype, addr)) { - panic("dasd-ipl: Failed to run IPL2 channel program\n"); - } + return run_dynamic_ccw_program(schid, cutype, addr); } /* * Limitations in vfio-ccw support complicate the IPL process. Details can * be found in docs/devel/s390-dasd-ipl.rst */ -void dasd_ipl(SubChannelId schid, uint16_t cutype) +int dasd_ipl(SubChannelId schid, uint16_t cutype) { PSWLegacy *pswl = (PSWLegacy *) 0x00; uint32_t ipl2_addr; /* Construct Read IPL CCW and run it to read IPL1 from boot disk */ make_readipl(); - run_readipl(schid, cutype); + if (run_readipl(schid, cutype)) { + puts("Failed to run Read IPL channel program"); + return -EIO; + } + ipl2_addr = read_ipl2_addr(); - check_ipl1(); + + if (!check_ipl1()) { + puts("IPL1 invalid for DASD-IPL"); + return -EINVAL; + } /* * Fixup IPL1 channel program to account for vfio-ccw limitations, then run * it to read IPL2 channel program from boot disk. */ ipl1_fixup(); - run_ipl1(schid, cutype); - check_ipl2(ipl2_addr); + if (run_ipl1(schid, cutype)) { + puts("Failed to run IPL1 channel program"); + return -EIO; + } + + if (!check_ipl2(ipl2_addr)) { + puts("IPL2 invalid for DASD-IPL"); + return -EINVAL; + } /* * Run IPL2 channel program to read operating system code from boot disk */ - run_ipl2(schid, cutype, ipl2_addr); + if (run_ipl2(schid, cutype, ipl2_addr)) { + puts("Failed to run IPL2 channel program"); + return -EIO; + } /* Transfer control to the guest operating system */ pswl->mask |= PSW_MASK_EAMODE; /* Force z-mode */ pswl->addr |= PSW_MASK_BAMODE; /* ... */ jump_to_low_kernel(); + return -1; } diff --git a/pc-bios/s390-ccw/dasd-ipl.h b/pc-bios/s390-ccw/dasd-ipl.h index c3948289062..eb1898c84af 100644 --- a/pc-bios/s390-ccw/dasd-ipl.h +++ b/pc-bios/s390-ccw/dasd-ipl.h @@ -11,6 +11,6 @@ #ifndef DASD_IPL_H #define DASD_IPL_H -void dasd_ipl(SubChannelId schid, uint16_t cutype); +int dasd_ipl(SubChannelId schid, uint16_t cutype); #endif /* DASD_IPL_H */ diff --git a/pc-bios/s390-ccw/iplb.h b/pc-bios/s390-ccw/iplb.h index cb6ac8a880a..08f259ff319 100644 --- a/pc-bios/s390-ccw/iplb.h +++ b/pc-bios/s390-ccw/iplb.h @@ -12,88 +12,16 @@ #ifndef IPLB_H #define IPLB_H -#define LOADPARM_LEN 8 - -struct IplBlockCcw { - uint8_t reserved0[85]; - uint8_t ssid; - uint16_t devno; - uint8_t vm_flags; - uint8_t reserved3[3]; - uint32_t vm_parm_len; - uint8_t nss_name[8]; - uint8_t vm_parm[64]; - uint8_t reserved4[8]; -} __attribute__ ((packed)); -typedef struct IplBlockCcw IplBlockCcw; - -struct IplBlockFcp { - uint8_t reserved1[305 - 1]; - uint8_t opt; - uint8_t reserved2[3]; - uint16_t reserved3; - uint16_t devno; - uint8_t reserved4[4]; - uint64_t wwpn; - uint64_t lun; - uint32_t bootprog; - uint8_t reserved5[12]; - uint64_t br_lba; - uint32_t scp_data_len; - uint8_t reserved6[260]; - uint8_t scp_data[]; -} __attribute__ ((packed)); -typedef struct IplBlockFcp IplBlockFcp; - -struct IplBlockQemuScsi { - uint32_t lun; - uint16_t target; - uint16_t channel; - uint8_t reserved0[77]; - uint8_t ssid; - uint16_t devno; -} __attribute__ ((packed)); -typedef struct IplBlockQemuScsi IplBlockQemuScsi; - -struct IplParameterBlock { - uint32_t len; - uint8_t reserved0[3]; - uint8_t version; - uint32_t blk0_len; - uint8_t pbt; - uint8_t flags; - uint16_t reserved01; - uint8_t loadparm[LOADPARM_LEN]; - union { - IplBlockCcw ccw; - IplBlockFcp fcp; - IplBlockQemuScsi scsi; - }; -} __attribute__ ((packed)); -typedef struct IplParameterBlock IplParameterBlock; +#ifndef QEMU_PACKED +#define QEMU_PACKED __attribute__((packed)) +#endif -extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE))); - -#define QIPL_ADDRESS 0xcc - -/* Boot Menu flags */ -#define QIPL_FLAG_BM_OPTS_CMD 0x80 -#define QIPL_FLAG_BM_OPTS_ZIPL 0x40 - -/* - * This definition must be kept in sync with the definition - * in hw/s390x/ipl.h - */ -struct QemuIplParameters { - uint8_t qipl_flags; - uint8_t reserved1[3]; - uint64_t netboot_start_addr; - uint32_t boot_menu_timeout; - uint8_t reserved2[12]; -} __attribute__ ((packed)); -typedef struct QemuIplParameters QemuIplParameters; +#include +#include extern QemuIplParameters qipl; +extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE))); +extern bool have_iplb; #define S390_IPL_TYPE_FCP 0x00 #define S390_IPL_TYPE_CCW 0x02 @@ -123,4 +51,26 @@ static inline bool set_iplb(IplParameterBlock *iplb) return manage_iplb(iplb, false); } +/* + * The IPL started on the device, but failed in some way. If the IPLB chain + * still has more devices left to try, use the next device in order. + */ +static inline bool load_next_iplb(void) +{ + IplParameterBlock *next_iplb; + + if (qipl.chain_len < 1) { + return false; + } + + qipl.index++; + next_iplb = (IplParameterBlock *) qipl.next_iplb; + memcpy(&iplb, next_iplb, sizeof(IplParameterBlock)); + + qipl.chain_len--; + qipl.next_iplb = qipl.next_iplb + sizeof(IplParameterBlock); + + return true; +} + #endif /* IPLB_H */ diff --git a/pc-bios/s390-ccw/jump2ipl.c b/pc-bios/s390-ccw/jump2ipl.c index 78f5f46533a..86321d0f46c 100644 --- a/pc-bios/s390-ccw/jump2ipl.c +++ b/pc-bios/s390-ccw/jump2ipl.c @@ -6,7 +6,8 @@ * directory. */ -#include "libc.h" +#include +#include #include "s390-ccw.h" #include "s390-arch.h" @@ -32,16 +33,22 @@ static void jump_to_IPL_addr(void) /* should not return */ } -void jump_to_IPL_code(uint64_t address) +int jump_to_IPL_code(uint64_t address) { /* store the subsystem information _after_ the bootmap was loaded */ write_subsystem_identification(); write_iplb_location(); - /* prevent unknown IPL types in the guest */ + /* + * The IPLB for QEMU SCSI type devices must be rebuilt during re-ipl. The + * iplb.devno is set to the boot position of the target SCSI device. + */ if (iplb.pbt == S390_IPL_TYPE_QEMU_SCSI) { - iplb.pbt = S390_IPL_TYPE_CCW; - set_iplb(&iplb); + iplb.devno = qipl.index; + } + + if (have_iplb && !set_iplb(&iplb)) { + panic("Failed to set IPLB"); } /* @@ -57,7 +64,7 @@ void jump_to_IPL_code(uint64_t address) debug_print_int("set IPL addr to", address ?: *reset_psw & PSW_MASK_SHORT_ADDR); /* Ensure the guest output starts fresh */ - sclp_print("\n"); + printf("\n"); /* * HACK ALERT. @@ -67,7 +74,8 @@ void jump_to_IPL_code(uint64_t address) asm volatile("lghi %%r1,1\n\t" "diag %%r1,%%r1,0x308\n\t" : : : "1", "memory"); - panic("\n! IPL returns !\n"); + puts("IPL code jump failed"); + return -1; } void jump_to_low_kernel(void) diff --git a/pc-bios/s390-ccw/libc.c b/pc-bios/s390-ccw/libc.c deleted file mode 100644 index 3187923950e..00000000000 --- a/pc-bios/s390-ccw/libc.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * libc-style definitions and functions - * - * Copyright 2018 IBM Corp. - * Author(s): Collin L. Walling - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include "libc.h" -#include "s390-ccw.h" - -/** - * atoui: - * @str: the string to be converted. - * - * Given a string @str, convert it to an integer. Leading spaces are - * ignored. Any other non-numerical value will terminate the conversion - * and return 0. This function only handles numbers between 0 and - * UINT64_MAX inclusive. - * - * Returns: an integer converted from the string @str, or the number 0 - * if an error occurred. - */ -uint64_t atoui(const char *str) -{ - int val = 0; - - if (!str || !str[0]) { - return 0; - } - - while (*str == ' ') { - str++; - } - - while (*str) { - if (!isdigit(*(unsigned char *)str)) { - break; - } - val = val * 10 + *str - '0'; - str++; - } - - return val; -} - -/** - * uitoa: - * @num: an integer (base 10) to be converted. - * @str: a pointer to a string to store the conversion. - * @len: the length of the passed string. - * - * Given an integer @num, convert it to a string. The string @str must be - * allocated beforehand. The resulting string will be null terminated and - * returned. This function only handles numbers between 0 and UINT64_MAX - * inclusive. - * - * Returns: the string @str of the converted integer @num - */ -char *uitoa(uint64_t num, char *str, size_t len) -{ - long num_idx = 1; /* account for NUL */ - uint64_t tmp = num; - - IPL_assert(str != NULL, "uitoa: no space allocated to store string"); - - /* Count indices of num */ - while ((tmp /= 10) != 0) { - num_idx++; - } - - /* Check if we have enough space for num and NUL */ - IPL_assert(len > num_idx, "uitoa: array too small for conversion"); - - str[num_idx--] = '\0'; - - /* Convert int to string */ - while (num_idx >= 0) { - str[num_idx--] = num % 10 + '0'; - num /= 10; - } - - return str; -} diff --git a/pc-bios/s390-ccw/libc.h b/pc-bios/s390-ccw/libc.h deleted file mode 100644 index bcdc45732dd..00000000000 --- a/pc-bios/s390-ccw/libc.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * libc-style definitions and functions - * - * Copyright (c) 2013 Alexander Graf - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef S390_CCW_LIBC_H -#define S390_CCW_LIBC_H - -typedef unsigned long size_t; -typedef int bool; -typedef unsigned char uint8_t; -typedef unsigned short uint16_t; -typedef unsigned int uint32_t; -typedef unsigned long long uint64_t; - -static inline void *memset(void *s, int c, size_t n) -{ - size_t i; - unsigned char *p = s; - - for (i = 0; i < n; i++) { - p[i] = c; - } - - return s; -} - -static inline void *memcpy(void *s1, const void *s2, size_t n) -{ - uint8_t *dest = s1; - const uint8_t *src = s2; - size_t i; - - for (i = 0; i < n; i++) { - dest[i] = src[i]; - } - - return s1; -} - -static inline int memcmp(const void *s1, const void *s2, size_t n) -{ - size_t i; - const uint8_t *p1 = s1, *p2 = s2; - - for (i = 0; i < n; i++) { - if (p1[i] != p2[i]) { - return p1[i] > p2[i] ? 1 : -1; - } - } - - return 0; -} - -static inline size_t strlen(const char *str) -{ - size_t i; - for (i = 0; *str; i++) { - str++; - } - return i; -} - -static inline char *strcat(char *dest, const char *src) -{ - int i; - char *dest_end = dest + strlen(dest); - - for (i = 0; i <= strlen(src); i++) { - dest_end[i] = src[i]; - } - return dest; -} - -static inline int isdigit(int c) -{ - return (c >= '0') && (c <= '9'); -} - -uint64_t atoui(const char *str); -char *uitoa(uint64_t num, char *str, size_t len); - -#endif diff --git a/pc-bios/s390-ccw/main.c b/pc-bios/s390-ccw/main.c index 55067980982..76bf743900c 100644 --- a/pc-bios/s390-ccw/main.c +++ b/pc-bios/s390-ccw/main.c @@ -8,7 +8,9 @@ * directory. */ -#include "libc.h" +#include +#include +#include #include "helper.h" #include "s390-arch.h" #include "s390-ccw.h" @@ -21,7 +23,7 @@ static SubChannelId blk_schid = { .one = 1 }; static char loadparm_str[LOADPARM_LEN + 1]; QemuIplParameters qipl; IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE))); -static bool have_iplb; +bool have_iplb; static uint16_t cutype; LowCore *lowcore; /* Yes, this *is* a pointer to address 0 */ @@ -36,8 +38,13 @@ LowCore *lowcore; /* Yes, this *is* a pointer to address 0 */ */ void write_subsystem_identification(void) { - lowcore->subchannel_id = blk_schid.sch_id; - lowcore->subchannel_nr = blk_schid.sch_no; + if (cutype == CU_TYPE_VIRTIO && virtio_get_device_type() == VIRTIO_ID_NET) { + lowcore->subchannel_id = net_schid.sch_id; + lowcore->subchannel_nr = net_schid.sch_no; + } else { + lowcore->subchannel_id = blk_schid.sch_id; + lowcore->subchannel_nr = blk_schid.sch_no; + } lowcore->io_int_parm = 0; } @@ -48,9 +55,15 @@ void write_iplb_location(void) } } +static void copy_qipl(void) +{ + QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS; + memcpy(&qipl, early_qipl, sizeof(QemuIplParameters)); +} + unsigned int get_loadparm_index(void) { - return atoui(loadparm_str); + return atoi(loadparm_str); } static int is_dev_possibly_bootable(int dev_no, int sch_no) @@ -70,6 +83,9 @@ static int is_dev_possibly_bootable(int dev_no, int sch_no) enable_subchannel(blk_schid); cutype = cu_type(blk_schid); + if (cutype == CU_TYPE_UNKNOWN) { + return -EIO; + } /* * Note: we always have to run virtio_is_supported() here to make @@ -142,6 +158,7 @@ static void menu_setup(void) /* If loadparm was set to any other value, then do not enable menu */ if (memcmp(loadparm_str, LOADPARM_EMPTY, LOADPARM_LEN) != 0) { + menu_set_parms(qipl.qipl_flags & ~BOOT_MENU_FLAG_MASK, 0); return; } @@ -174,26 +191,34 @@ static void boot_setup(void) { char lpmsg[] = "LOADPARM=[________]\n"; - sclp_get_loadparm_ascii(loadparm_str); + if (have_iplb && memcmp(iplb.loadparm, NO_LOADPARM, LOADPARM_LEN) != 0) { + ebcdic_to_ascii((char *) iplb.loadparm, loadparm_str, LOADPARM_LEN); + } else { + sclp_get_loadparm_ascii(loadparm_str); + } + + if (have_iplb) { + menu_setup(); + } + memcpy(lpmsg + 10, loadparm_str, 8); - sclp_print(lpmsg); + puts(lpmsg); /* * Clear out any potential S390EP magic (see jump_to_low_kernel()), * so we don't taint our decision-making process during a reboot. */ memset((char *)S390EP, 0, 6); - - have_iplb = store_iplb(&iplb); } -static void find_boot_device(void) +static bool find_boot_device(void) { VDev *vdev = virtio_get_device(); - bool found; + bool found = false; switch (iplb.pbt) { case S390_IPL_TYPE_CCW: + vdev->scsi_device_selected = false; debug_print_int("device no. ", iplb.ccw.devno); blk_schid.ssid = iplb.ccw.ssid & 0x3; debug_print_int("ssid ", blk_schid.ssid); @@ -208,28 +233,21 @@ static void find_boot_device(void) found = find_subch(iplb.scsi.devno); break; default: - panic("List-directed IPL not supported yet!\n"); + puts("Unsupported IPLB"); } - IPL_assert(found, "Boot device not found\n"); + return found; } static int virtio_setup(void) { VDev *vdev = virtio_get_device(); - QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS; + vdev->is_cdrom = false; int ret; - memcpy(&qipl, early_qipl, sizeof(QemuIplParameters)); - - if (have_iplb) { - menu_setup(); - } - switch (vdev->senseid.cu_model) { case VIRTIO_ID_NET: - sclp_print("Network boot device detected\n"); - vdev->netboot_start_addr = qipl.netboot_start_addr; + puts("Network boot device detected"); return 0; case VIRTIO_ID_BLOCK: ret = virtio_blk_setup_device(blk_schid); @@ -238,11 +256,13 @@ static int virtio_setup(void) ret = virtio_scsi_setup_device(blk_schid); break; default: - panic("\n! No IPL device available !\n"); + puts("\n! No IPL device available !\n"); + return -1; } - if (!ret) { - IPL_assert(virtio_ipl_disk_is_valid(), "No valid IPL device detected"); + if (!ret && !virtio_ipl_disk_is_valid()) { + puts("No valid IPL device detected"); + return -ENODEV; } return ret; @@ -253,16 +273,15 @@ static void ipl_boot_device(void) switch (cutype) { case CU_TYPE_DASD_3990: case CU_TYPE_DASD_2107: - dasd_ipl(blk_schid, cutype); /* no return */ + dasd_ipl(blk_schid, cutype); break; case CU_TYPE_VIRTIO: if (virtio_setup() == 0) { - zipl_load(); /* Only returns in case of errors */ + zipl_load(); } break; default: - print_int("Attempting to boot from unexpected device type", cutype); - panic("\nBoot failed.\n"); + printf("Attempting to boot from unexpected device type 0x%X\n", cutype); } } @@ -287,20 +306,28 @@ static void probe_boot_device(void) } } - sclp_print("Could not find a suitable boot device (none specified)\n"); + puts("Could not find a suitable boot device (none specified)"); } void main(void) { + copy_qipl(); sclp_setup(); css_setup(); - boot_setup(); - if (have_iplb) { - find_boot_device(); - ipl_boot_device(); - } else { + have_iplb = store_iplb(&iplb); + if (!have_iplb) { + boot_setup(); probe_boot_device(); } - panic("Failed to load OS from hard disk\n"); + while (have_iplb) { + boot_setup(); + if (have_iplb && find_boot_device()) { + ipl_boot_device(); + } + have_iplb = load_next_iplb(); + } + + panic("No suitable device for IPL. Halting..."); + } diff --git a/pc-bios/s390-ccw/menu.c b/pc-bios/s390-ccw/menu.c index d601952d3e1..eeaff78f870 100644 --- a/pc-bios/s390-ccw/menu.c +++ b/pc-bios/s390-ccw/menu.c @@ -9,7 +9,10 @@ * directory. */ -#include "libc.h" +#include +#include +#include +#include #include "s390-ccw.h" #include "sclp.h" #include "s390-time.h" @@ -93,7 +96,7 @@ static int read_prompt(char *buf, size_t len) case KEYCODE_BACKSP: if (idx > 0) { buf[--idx] = 0; - sclp_print("\b \b"); + printf("\b \b"); } continue; case KEYCODE_ENTER: @@ -103,7 +106,7 @@ static int read_prompt(char *buf, size_t len) /* Echo input and add to buffer */ if (idx < len) { buf[idx++] = inp[0]; - sclp_print(inp); + printf("%s", inp); } } } @@ -140,30 +143,26 @@ static int get_index(void) } } - return atoui(buf); + return atoi(buf); } static void boot_menu_prompt(bool retry) { - char tmp[11]; - if (retry) { - sclp_print("\nError: undefined configuration" + printf("\nError: undefined configuration" "\nPlease choose:\n"); } else if (timeout > 0) { - sclp_print("Please choose (default will boot in "); - sclp_print(uitoa(timeout / 1000, tmp, sizeof(tmp))); - sclp_print(" seconds):\n"); + printf("Please choose (default will boot in %d seconds):\n", + (int)(timeout / 1000)); } else { - sclp_print("Please choose:\n"); + puts("Please choose:"); } } -static int get_boot_index(bool *valid_entries) +int menu_get_boot_index(bool *valid_entries) { int boot_index; bool retry = false; - char tmp[5]; do { boot_menu_prompt(retry); @@ -172,8 +171,7 @@ static int get_boot_index(bool *valid_entries) } while (boot_index < 0 || boot_index >= MAX_BOOT_ENTRIES || !valid_entries[boot_index]); - sclp_print("\nBooting entry #"); - sclp_print(uitoa(boot_index, tmp, sizeof(tmp))); + printf("\nBooting entry #%d", boot_index); return boot_index; } @@ -187,9 +185,9 @@ static int zipl_print_entry(const char *data, size_t len) buf[len] = '\n'; buf[len + 1] = '\0'; - sclp_print(buf); + printf("%s", buf); - return buf[0] == ' ' ? atoui(buf + 1) : atoui(buf); + return buf[0] == ' ' ? atoi(buf + 1) : atoi(buf); } int menu_get_zipl_boot_index(const char *menu_data) @@ -209,7 +207,7 @@ int menu_get_zipl_boot_index(const char *menu_data) } /* Print banner */ - sclp_print("s390-ccw zIPL Boot Menu\n\n"); + puts("s390-ccw zIPL Boot Menu\n"); menu_data += strlen(menu_data) + 1; /* Print entries */ @@ -221,38 +219,35 @@ int menu_get_zipl_boot_index(const char *menu_data) valid_entries[entry] = true; if (entry == 0) { - sclp_print("\n"); + printf("\n"); } } - sclp_print("\n"); - return get_boot_index(valid_entries); + printf("\n"); + return menu_get_boot_index(valid_entries); } int menu_get_enum_boot_index(bool *valid_entries) { - char tmp[3]; int i; - sclp_print("s390-ccw Enumerated Boot Menu.\n\n"); + puts("s390-ccw Enumerated Boot Menu.\n"); for (i = 0; i < MAX_BOOT_ENTRIES; i++) { if (valid_entries[i]) { if (i < 10) { - sclp_print(" "); + printf(" "); } - sclp_print("["); - sclp_print(uitoa(i, tmp, sizeof(tmp))); - sclp_print("]"); + printf("[%d]", i); if (i == 0) { - sclp_print(" default\n"); + printf(" default\n"); } - sclp_print("\n"); + printf("\n"); } } - sclp_print("\n"); - return get_boot_index(valid_entries); + printf("\n"); + return menu_get_boot_index(valid_entries); } void menu_set_parms(uint8_t boot_menu_flag, uint32_t boot_menu_timeout) diff --git a/pc-bios/s390-ccw/netboot.mak b/pc-bios/s390-ccw/netboot.mak deleted file mode 100644 index 046aa35587a..00000000000 --- a/pc-bios/s390-ccw/netboot.mak +++ /dev/null @@ -1,62 +0,0 @@ - -SLOF_DIR := $(SRC_PATH)/../../roms/SLOF - -NETOBJS := start.o sclp.o cio.o virtio.o virtio-net.o jump2ipl.o netmain.o - -LIBC_INC := -nostdinc -I$(SLOF_DIR)/lib/libc/include -LIBNET_INC := -I$(SLOF_DIR)/lib/libnet - -NETLDFLAGS := $(LDFLAGS) -Wl,-Ttext=0x7800000 - -$(NETOBJS): EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC) - -s390-netboot.elf: $(NETOBJS) libnet.a libc.a - $(call quiet-command,$(CC) $(NETLDFLAGS) -o $@ $^,Linking) - -s390-netboot.img: s390-netboot.elf - $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,Stripping $< into) - -# libc files: - -LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ - -MMD -MP -MT $@ -MF $(@:%.o=%.d) - -CTYPE_OBJS = isdigit.o isxdigit.o toupper.o -%.o : $(SLOF_DIR)/lib/libc/ctype/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) - -STRING_OBJS = strcat.o strchr.o strrchr.o strcpy.o strlen.o strncpy.o \ - strcmp.o strncmp.o strcasecmp.o strncasecmp.o strstr.o \ - memset.o memcpy.o memmove.o memcmp.o -%.o : $(SLOF_DIR)/lib/libc/string/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) - -STDLIB_OBJS = atoi.o atol.o strtoul.o strtol.o rand.o malloc.o free.o -%.o : $(SLOF_DIR)/lib/libc/stdlib/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) - -STDIO_OBJS = sprintf.o snprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \ - printf.o putc.o puts.o putchar.o stdchnls.o fileno.o -%.o : $(SLOF_DIR)/lib/libc/stdio/%.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) - -sbrk.o: $(SLOF_DIR)/slof/sbrk.c - $(call quiet-command,$(CC) $(LIBC_CFLAGS) -c -o $@ $<,Compiling) - -LIBCOBJS := $(STRING_OBJS) $(CTYPE_OBJS) $(STDLIB_OBJS) $(STDIO_OBJS) sbrk.o - -libc.a: $(LIBCOBJS) - $(call quiet-command,$(AR) -rc $@ $^,Creating static library) - -# libnet files: - -LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \ - dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o -LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ - -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d) - -%.o : $(SLOF_DIR)/lib/libnet/%.c - $(call quiet-command,$(CC) $(LIBNETCFLAGS) -c -o $@ $<,Compiling) - -libnet.a: $(LIBNETOBJS) - $(call quiet-command,$(AR) -rc $@ $^,Creating static library) diff --git a/pc-bios/s390-ccw/netmain.c b/pc-bios/s390-ccw/netmain.c index 5cd619b2d60..a9521dff416 100644 --- a/pc-bios/s390-ccw/netmain.c +++ b/pc-bios/s390-ccw/netmain.c @@ -41,7 +41,6 @@ #define DEFAULT_TFTP_RETRIES 20 extern char _start[]; -void write_iplb_location(void) {} #define KERNEL_ADDR ((void *)0L) #define KERNEL_MAX_SIZE ((long)_start) @@ -50,10 +49,9 @@ void write_iplb_location(void) {} /* STSI 3.2.2 offset of first vmdb + offset of uuid inside vmdb */ #define STSI322_VMDB_UUID_OFFSET ((8 + 12) * 4) -IplParameterBlock iplb __attribute__((aligned(PAGE_SIZE))); static char cfgbuf[2048]; -static SubChannelId net_schid = { .one = 1 }; +SubChannelId net_schid = { .one = 1 }; static uint8_t mac[6]; static uint64_t dest_timer; @@ -155,19 +153,10 @@ static int tftp_load(filename_ip_t *fnip, void *buffer, int len) return rc; } -static int net_init(filename_ip_t *fn_ip) +static int net_init_ip(filename_ip_t *fn_ip) { int rc; - memset(fn_ip, 0, sizeof(filename_ip_t)); - - rc = virtio_net_init(mac); - if (rc < 0) { - puts("Could not initialize network device"); - return -101; - } - fn_ip->fd = rc; - printf(" Using MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); @@ -179,6 +168,14 @@ static int net_init(filename_ip_t *fn_ip) if (fn_ip->ip_version == 4) { set_ipv4_address(fn_ip->own_ip); } + } else if (rc == -2) { + printf("ARP request to TFTP server (%d.%d.%d.%d) failed\n", + (fn_ip->server_ip >> 24) & 0xFF, (fn_ip->server_ip >> 16) & 0xFF, + (fn_ip->server_ip >> 8) & 0xFF, fn_ip->server_ip & 0xFF); + return -102; + } else if (rc == -4 || rc == -3) { + puts("Can't obtain TFTP server IP address"); + return -107; } else { puts("Could not get IP address"); return -101; @@ -194,17 +191,6 @@ static int net_init(filename_ip_t *fn_ip) printf(" Using IPv6 address: %s\n", ip6_str); } - if (rc == -2) { - printf("ARP request to TFTP server (%d.%d.%d.%d) failed\n", - (fn_ip->server_ip >> 24) & 0xFF, (fn_ip->server_ip >> 16) & 0xFF, - (fn_ip->server_ip >> 8) & 0xFF, fn_ip->server_ip & 0xFF); - return -102; - } - if (rc == -4 || rc == -3) { - puts("Can't obtain TFTP server IP address"); - return -107; - } - printf(" Using TFTP server: "); if (fn_ip->ip_version == 4) { printf("%d.%d.%d.%d\n", @@ -223,11 +209,33 @@ static int net_init(filename_ip_t *fn_ip) return rc; } +static int net_init(filename_ip_t *fn_ip) +{ + int rc; + + memset(fn_ip, 0, sizeof(filename_ip_t)); + + rc = virtio_net_init(mac); + if (rc < 0) { + puts("Could not initialize network device"); + return -101; + } + fn_ip->fd = rc; + + rc = net_init_ip(fn_ip); + if (rc < 0) { + virtio_net_deinit(); + } + + return rc; +} + static void net_release(filename_ip_t *fn_ip) { if (fn_ip->ip_version == 4) { dhcp_send_release(fn_ip->fd); } + virtio_net_deinit(); } /** @@ -293,7 +301,7 @@ static int load_kernel_with_initrd(filename_ip_t *fn_ip, printf("Loading pxelinux.cfg entry '%s'\n", entry->label); if (!entry->kernel) { - printf("Kernel entry is missing!\n"); + puts("Kernel entry is missing!\n"); return -1; } @@ -324,22 +332,64 @@ static int load_kernel_with_initrd(filename_ip_t *fn_ip, return rc; } -#define MAX_PXELINUX_ENTRIES 16 +static int net_boot_menu(int num_ent, int def_ent, + struct pl_cfg_entry *entries) +{ + bool valid_entries[MAX_BOOT_ENTRIES] = { false }; + int idx; + + puts("\ns390-ccw pxelinux.cfg boot menu:\n"); + printf(" [0] default (%d)\n", def_ent + 1); + valid_entries[0] = true; + + for (idx = 1; idx <= num_ent; idx++) { + printf(" [%d] %s\n", idx, entries[idx - 1].label); + valid_entries[idx] = true; + } + putchar('\n'); + + idx = menu_get_boot_index(valid_entries); + putchar('\n'); + + return idx; +} + +static int net_select_and_load_kernel(filename_ip_t *fn_ip, + int num_ent, int selected, + struct pl_cfg_entry *entries) +{ + unsigned int loadparm = get_loadparm_index(); + + if (num_ent <= 0) { + return -1; + } + + if (menu_is_enabled_enum() && num_ent > 1) { + loadparm = net_boot_menu(num_ent, selected, entries); + } + + IPL_assert(loadparm <= num_ent, + "loadparm is set to an entry that is not available in the " + "pxelinux.cfg file!"); + + if (loadparm > 0) { + selected = loadparm - 1; + } + + return load_kernel_with_initrd(fn_ip, &entries[selected]); +} static int net_try_pxelinux_cfg(filename_ip_t *fn_ip) { - struct pl_cfg_entry entries[MAX_PXELINUX_ENTRIES]; + struct pl_cfg_entry entries[MAX_BOOT_ENTRIES]; int num_ent, def_ent = 0; num_ent = pxelinux_load_parse_cfg(fn_ip, mac, get_uuid(), DEFAULT_TFTP_RETRIES, cfgbuf, sizeof(cfgbuf), - entries, MAX_PXELINUX_ENTRIES, &def_ent); - if (num_ent > 0) { - return load_kernel_with_initrd(fn_ip, &entries[def_ent]); - } + entries, MAX_BOOT_ENTRIES, &def_ent); - return -1; + return net_select_and_load_kernel(fn_ip, num_ent, def_ent, entries); } /** @@ -420,15 +470,13 @@ static int net_try_direct_tftp_load(filename_ip_t *fn_ip) * a magic comment string. */ if (!strncasecmp("# pxelinux", cfgbuf, 10)) { - struct pl_cfg_entry entries[MAX_PXELINUX_ENTRIES]; + struct pl_cfg_entry entries[MAX_BOOT_ENTRIES]; int num_ent, def_ent = 0; num_ent = pxelinux_parse_cfg(cfgbuf, sizeof(cfgbuf), entries, - MAX_PXELINUX_ENTRIES, &def_ent); - if (num_ent <= 0) { - return -1; - } - return load_kernel_with_initrd(fn_ip, &entries[def_ent]); + MAX_BOOT_ENTRIES, &def_ent); + return net_select_and_load_kernel(fn_ip, num_ent, def_ent, + entries); } } @@ -438,15 +486,6 @@ static int net_try_direct_tftp_load(filename_ip_t *fn_ip) return rc; } -void write_subsystem_identification(void) -{ - SubChannelId *schid = (SubChannelId *) 184; - uint32_t *zeroes = (uint32_t *) 188; - - *schid = net_schid; - *zeroes = 0; -} - static bool find_net_dev(Schib *schib, int dev_no) { int i, r; @@ -475,7 +514,7 @@ static bool find_net_dev(Schib *schib, int dev_no) return false; } -static void virtio_setup(void) +static bool virtio_setup(void) { Schib schib; int ssid; @@ -489,7 +528,7 @@ static void virtio_setup(void) */ enable_mss_facility(); - if (store_iplb(&iplb)) { + if (have_iplb || store_iplb(&iplb)) { IPL_assert(iplb.pbt == S390_IPL_TYPE_CCW, "IPL_TYPE_CCW expected"); dev_no = iplb.ccw.devno; debug_print_int("device no. ", dev_no); @@ -506,22 +545,26 @@ static void virtio_setup(void) } } - IPL_assert(found, "No virtio net device found"); + return found; } -void main(void) +int netmain(void) { filename_ip_t fn_ip; int rc, fnlen; sclp_setup(); - sclp_print("Network boot starting...\n"); + puts("Network boot starting..."); - virtio_setup(); + if (!virtio_setup()) { + puts("No virtio net device found."); + return -1; + } rc = net_init(&fn_ip); if (rc) { - panic("Network initialization failed. Halting.\n"); + puts("Network initialization failed."); + return -1; } fnlen = strlen(fn_ip.filename); @@ -535,9 +578,10 @@ void main(void) net_release(&fn_ip); if (rc > 0) { - sclp_print("Network loading done, starting kernel...\n"); + puts("Network loading done, starting kernel..."); jump_to_low_kernel(); } - panic("Failed to load OS from network\n"); + puts("Failed to load OS from network."); + return -1; } diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h index c977a52b501..b1dc35cdedf 100644 --- a/pc-bios/s390-ccw/s390-ccw.h +++ b/pc-bios/s390-ccw/s390-ccw.h @@ -13,6 +13,11 @@ /* #define DEBUG */ +#include +#include +#include +#include + typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; @@ -25,10 +30,8 @@ typedef unsigned long long u64; #define EIO 1 #define EBUSY 2 #define ENODEV 3 +#define EINVAL 4 -#ifndef NULL -#define NULL 0 -#endif #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif @@ -53,6 +56,9 @@ void write_iplb_location(void); unsigned int get_loadparm_index(void); void main(void); +/* netmain.c */ +int netmain(void); + /* sclp.c */ void sclp_print(const char *string); void sclp_set_write_mask(uint32_t receive_mask, uint32_t send_mask); @@ -72,7 +78,7 @@ void zipl_load(void); /* jump2ipl.c */ void write_reset_psw(uint64_t psw); -void jump_to_IPL_code(uint64_t address); +int jump_to_IPL_code(uint64_t address); void jump_to_low_kernel(void); /* menu.c */ @@ -81,13 +87,14 @@ int menu_get_zipl_boot_index(const char *menu_data); bool menu_is_enabled_zipl(void); int menu_get_enum_boot_index(bool *valid_entries); bool menu_is_enabled_enum(void); +int menu_get_boot_index(bool *valid_entries); #define MAX_BOOT_ENTRIES 31 __attribute__ ((__noreturn__)) static inline void panic(const char *string) { - sclp_print(string); + printf("ERROR: %s\n ", string); disabled_wait(); } @@ -109,20 +116,10 @@ static inline void fill_hex_val(char *out, void *ptr, unsigned size) } } -static inline void print_int(const char *desc, u64 addr) -{ - char out[] = ": 0xffffffffffffffff\n"; - - fill_hex_val(&out[4], &addr, sizeof(addr)); - - sclp_print(desc); - sclp_print(out); -} - static inline void debug_print_int(const char *desc, u64 addr) { #ifdef DEBUG - print_int(desc, addr); + printf("%s 0x%X\n", desc, addr); #endif } @@ -147,18 +144,14 @@ static inline void debug_print_addr(const char *desc, void *p) static inline void IPL_assert(bool term, const char *message) { if (!term) { - sclp_print("\n! "); - sclp_print(message); - panic(" !\n"); /* no return */ + panic(message); /* no return */ } } static inline void IPL_check(bool term, const char *message) { if (!term) { - sclp_print("\n! WARNING: "); - sclp_print(message); - sclp_print(" !\n"); + printf("WARNING: %s\n", message); } } diff --git a/pc-bios/s390-ccw/sclp.c b/pc-bios/s390-ccw/sclp.c index 7251f9af4df..4a07de018d2 100644 --- a/pc-bios/s390-ccw/sclp.c +++ b/pc-bios/s390-ccw/sclp.c @@ -8,7 +8,7 @@ * directory. */ -#include "libc.h" +#include #include "s390-ccw.h" #include "sclp.h" @@ -101,11 +101,6 @@ long write(int fd, const void *str, size_t len) return len; } -void sclp_print(const char *str) -{ - write(1, str, strlen(str)); -} - void sclp_get_loadparm_ascii(char *loadparm) { diff --git a/pc-bios/s390-ccw/start.S b/pc-bios/s390-ccw/start.S index 061b06591cf..b70213e4124 100644 --- a/pc-bios/s390-ccw/start.S +++ b/pc-bios/s390-ccw/start.S @@ -112,9 +112,7 @@ io_new_code: lctlg %c6,%c6,0(%r15) br %r14 - .align 8 -bss_start_literal: - .quad __bss_start + .balign 8 disabled_wait_psw: .quad 0x0002000180000000,0x0000000000000000 enabled_wait_psw: @@ -124,8 +122,13 @@ external_new_mask: io_new_mask: .quad 0x0000000180000000 +.data + .balign 8 +bss_start_literal: + .quad __bss_start + .bss - .align 8 + .balign 8 stack: .space STACK_SIZE .size stack,STACK_SIZE diff --git a/pc-bios/s390-ccw/virtio-blkdev.c b/pc-bios/s390-ccw/virtio-blkdev.c index a81207b52eb..7b2d1e20f4d 100644 --- a/pc-bios/s390-ccw/virtio-blkdev.c +++ b/pc-bios/s390-ccw/virtio-blkdev.c @@ -8,7 +8,7 @@ * directory. */ -#include "libc.h" +#include #include "s390-ccw.h" #include "virtio.h" #include "virtio-scsi.h" @@ -59,7 +59,7 @@ int virtio_read_many(unsigned long sector, void *load_addr, int sec_num) case VIRTIO_ID_SCSI: return virtio_scsi_read_many(vdev, sector, load_addr, sec_num); } - panic("\n! No readable IPL device !\n"); + return -1; } @@ -73,13 +73,13 @@ unsigned long virtio_load_direct(unsigned long rec_list1, unsigned long rec_list unsigned long addr = (unsigned long)load_addr; if (sec_len != virtio_get_block_size()) { - return -1; + return 0; } - sclp_print("."); + printf("."); status = virtio_read_many(sec, (void *)addr, sec_num); if (status) { - panic("I/O Error"); + return 0; } addr += sec_num * virtio_get_block_size(); @@ -230,7 +230,7 @@ int virtio_blk_setup_device(SubChannelId schid) vdev->schid = schid; virtio_setup_ccw(vdev); - sclp_print("Using virtio-blk.\n"); + puts("Using virtio-blk."); return 0; } diff --git a/pc-bios/s390-ccw/virtio-net.c b/pc-bios/s390-ccw/virtio-net.c index 2fcb0a58c5b..301445bf978 100644 --- a/pc-bios/s390-ccw/virtio-net.c +++ b/pc-bios/s390-ccw/virtio-net.c @@ -51,11 +51,16 @@ int virtio_net_init(void *mac_addr) void *buf; int i; + rx_last_idx = 0; + vdev->guest_features[0] = VIRTIO_NET_F_MAC_BIT; virtio_setup_ccw(vdev); - IPL_assert(vdev->guest_features[0] & VIRTIO_NET_F_MAC_BIT, - "virtio-net device does not support the MAC address feature"); + if (!(vdev->guest_features[0] & VIRTIO_NET_F_MAC_BIT)) { + puts("virtio-net device does not support the MAC address feature"); + return -1; + } + memcpy(mac_addr, vdev->config.net.mac, ETH_ALEN); for (i = 0; i < 64; i++) { @@ -135,3 +140,8 @@ int recv(int fd, void *buf, int maxlen, int flags) return len; } + +void virtio_net_deinit(void) +{ + virtio_reset(virtio_get_device()); +} diff --git a/pc-bios/s390-ccw/virtio-scsi.c b/pc-bios/s390-ccw/virtio-scsi.c index d1a84b937c8..71db75ce7b4 100644 --- a/pc-bios/s390-ccw/virtio-scsi.c +++ b/pc-bios/s390-ccw/virtio-scsi.c @@ -9,7 +9,8 @@ * directory. */ -#include "libc.h" +#include +#include #include "s390-ccw.h" #include "virtio.h" #include "scsi.h" @@ -25,20 +26,22 @@ static uint8_t scsi_inquiry_std_response[256]; static ScsiInquiryEvpdPages scsi_inquiry_evpd_pages_response; static ScsiInquiryEvpdBl scsi_inquiry_evpd_bl_response; -static inline void vs_assert(bool term, const char **msgs) +static inline bool vs_assert(bool term, const char **msgs) { if (!term) { int i = 0; - sclp_print("\n! "); + printf("\n! "); while (msgs[i]) { - sclp_print(msgs[i++]); + printf("%s", msgs[i++]); } - panic(" !\n"); + puts(" !"); } + + return term; } -static void virtio_scsi_verify_response(VirtioScsiCmdResp *resp, +static bool virtio_scsi_verify_response(VirtioScsiCmdResp *resp, const char *title) { const char *mr[] = { @@ -55,8 +58,8 @@ static void virtio_scsi_verify_response(VirtioScsiCmdResp *resp, 0 }; - vs_assert(resp->response == VIRTIO_SCSI_S_OK, mr); - vs_assert(resp->status == CDB_STATUS_GOOD, ms); + return vs_assert(resp->response == VIRTIO_SCSI_S_OK, mr) && + vs_assert(resp->status == CDB_STATUS_GOOD, ms); } static void prepare_request(VDev *vdev, const void *cdb, int cdb_size, @@ -77,24 +80,31 @@ static void prepare_request(VDev *vdev, const void *cdb, int cdb_size, } } -static inline void vs_io_assert(bool term, const char *msg) +static inline bool vs_io_assert(bool term, const char *msg) { - if (!term) { - virtio_scsi_verify_response(&resp, msg); + if (!term && !virtio_scsi_verify_response(&resp, msg)) { + return false; } + + return true; } -static void vs_run(const char *title, VirtioCmd *cmd, VDev *vdev, +static int vs_run(const char *title, VirtioCmd *cmd, VDev *vdev, const void *cdb, int cdb_size, void *data, uint32_t data_size) { prepare_request(vdev, cdb, cdb_size, data, data_size); - vs_io_assert(virtio_run(vdev, VR_REQUEST, cmd) == 0, title); + if (!vs_io_assert(virtio_run(vdev, VR_REQUEST, cmd) == 0, title)) { + puts(title); + return -EIO; + } + + return 0; } /* SCSI protocol implementation routines */ -static bool scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page, +static int scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page, void *data, uint32_t data_size) { ScsiCdbInquiry cdb = { @@ -109,12 +119,13 @@ static bool scsi_inquiry(VDev *vdev, uint8_t evpd, uint8_t page, { data, data_size, VRING_DESC_F_WRITE }, }; - vs_run("inquiry", inquiry, vdev, &cdb, sizeof(cdb), data, data_size); + int ret = vs_run("inquiry", inquiry, + vdev, &cdb, sizeof(cdb), data, data_size); - return virtio_scsi_response_ok(&resp); + return ret ? ret : virtio_scsi_response_ok(&resp); } -static bool scsi_test_unit_ready(VDev *vdev) +static int scsi_test_unit_ready(VDev *vdev) { ScsiCdbTestUnitReady cdb = { .command = 0x00, @@ -130,7 +141,7 @@ static bool scsi_test_unit_ready(VDev *vdev) return virtio_scsi_response_ok(&resp); } -static bool scsi_report_luns(VDev *vdev, void *data, uint32_t data_size) +static int scsi_report_luns(VDev *vdev, void *data, uint32_t data_size) { ScsiCdbReportLuns cdb = { .command = 0xa0, @@ -143,13 +154,13 @@ static bool scsi_report_luns(VDev *vdev, void *data, uint32_t data_size) { data, data_size, VRING_DESC_F_WRITE }, }; - vs_run("report luns", report_luns, + int ret = vs_run("report luns", report_luns, vdev, &cdb, sizeof(cdb), data, data_size); - return virtio_scsi_response_ok(&resp); + return ret ? ret : virtio_scsi_response_ok(&resp); } -static bool scsi_read_10(VDev *vdev, +static int scsi_read_10(VDev *vdev, unsigned long sector, int sectors, void *data, unsigned int data_size) { @@ -167,12 +178,13 @@ static bool scsi_read_10(VDev *vdev, debug_print_int("read_10 sector", sector); debug_print_int("read_10 sectors", sectors); - vs_run("read(10)", read_10, vdev, &cdb, sizeof(cdb), data, data_size); + int ret = vs_run("read(10)", read_10, + vdev, &cdb, sizeof(cdb), data, data_size); - return virtio_scsi_response_ok(&resp); + return ret ? ret : virtio_scsi_response_ok(&resp); } -static bool scsi_read_capacity(VDev *vdev, +static int scsi_read_capacity(VDev *vdev, void *data, uint32_t data_size) { ScsiCdbReadCapacity16 cdb = { @@ -186,10 +198,10 @@ static bool scsi_read_capacity(VDev *vdev, { data, data_size, VRING_DESC_F_WRITE }, }; - vs_run("read capacity", read_capacity_16, + int ret = vs_run("read capacity", read_capacity_16, vdev, &cdb, sizeof(cdb), data, data_size); - return virtio_scsi_response_ok(&resp); + return ret ? ret : virtio_scsi_response_ok(&resp); } /* virtio-scsi routines */ @@ -206,7 +218,7 @@ static int virtio_scsi_locate_device(VDev *vdev) static uint8_t data[16 + 8 * 63]; ScsiLunReport *r = (void *) data; ScsiDevice *sdev = vdev->scsi_device; - int i, luns; + int i, ret, luns; /* QEMU has hardcoded channel #0 in many places. * If this hardcoded value is ever changed, we'll need to add code for @@ -232,15 +244,23 @@ static int virtio_scsi_locate_device(VDev *vdev) sdev->channel = channel; sdev->target = target; sdev->lun = 0; /* LUN has to be 0 for REPORT LUNS */ - if (!scsi_report_luns(vdev, data, sizeof(data))) { + ret = scsi_report_luns(vdev, data, sizeof(data)); + if (ret < 0) { + return ret; + } + + else if (ret == 0) { if (resp.response == VIRTIO_SCSI_S_BAD_TARGET) { continue; } - print_int("target", target); - virtio_scsi_verify_response(&resp, "SCSI cannot report LUNs"); + printf("target 0x%X\n", target); + if (!virtio_scsi_verify_response(&resp, "SCSI cannot report LUNs")) { + return -EIO; + } } + if (r->lun_list_len == 0) { - print_int("no LUNs for target", target); + printf("no LUNs for target 0x%X\n", target); continue; } luns = r->lun_list_len / 8; @@ -264,7 +284,7 @@ static int virtio_scsi_locate_device(VDev *vdev) } } - sclp_print("Warning: Could not locate a usable virtio-scsi device\n"); + puts("Warning: Could not locate a usable virtio-scsi device"); return -ENODEV; } @@ -282,7 +302,9 @@ int virtio_scsi_read_many(VDev *vdev, data_size = sector_count * virtio_get_block_size() * f; if (!scsi_read_10(vdev, sector * f, sector_count * f, load_addr, data_size)) { - virtio_scsi_verify_response(&resp, "virtio-scsi:read_many"); + if (!virtio_scsi_verify_response(&resp, "virtio-scsi:read_many")) { + return -1; + } } load_addr += data_size; sector += sector_count; @@ -351,11 +373,16 @@ static int virtio_scsi_setup(VDev *vdev) uint8_t code = resp.sense[0] & SCSI_SENSE_CODE_MASK; uint8_t sense_key = resp.sense[2] & SCSI_SENSE_KEY_MASK; - IPL_assert(resp.sense_len != 0, "virtio-scsi:setup: no SENSE data"); + if (resp.sense_len == 0) { + puts("virtio-scsi: setup: no SENSE data"); + return -EINVAL; + } - IPL_assert(retry_test_unit_ready && code == 0x70 && - sense_key == SCSI_SENSE_KEY_UNIT_ATTENTION, - "virtio-scsi:setup: cannot retry"); + if (!retry_test_unit_ready || code != 0x70 || + sense_key != SCSI_SENSE_KEY_UNIT_ATTENTION) { + puts("virtio-scsi:setup: cannot retry"); + return -EIO; + } /* retry on CHECK_CONDITION/UNIT_ATTENTION as it * may not designate a real error, but it may be @@ -366,30 +393,40 @@ static int virtio_scsi_setup(VDev *vdev) continue; } - virtio_scsi_verify_response(&resp, "virtio-scsi:setup"); + if (!virtio_scsi_verify_response(&resp, "virtio-scsi:setup")) { + return -1; + } } /* read and cache SCSI INQUIRY response */ - if (!scsi_inquiry(vdev, + ret = scsi_inquiry(vdev, SCSI_INQUIRY_STANDARD, SCSI_INQUIRY_STANDARD_NONE, scsi_inquiry_std_response, - sizeof(scsi_inquiry_std_response))) { - virtio_scsi_verify_response(&resp, "virtio-scsi:setup:inquiry"); + sizeof(scsi_inquiry_std_response)); + if (ret < 1) { + if (ret != 0 || !virtio_scsi_verify_response(&resp, + "virtio-scsi:setup:inquiry")) { + return -1; + } } if (virtio_scsi_inquiry_response_is_cdrom(scsi_inquiry_std_response)) { - sclp_print("SCSI CD-ROM detected.\n"); + puts("SCSI CD-ROM detected."); vdev->is_cdrom = true; vdev->scsi_block_size = VIRTIO_ISO_BLOCK_SIZE; } - if (!scsi_inquiry(vdev, + ret = scsi_inquiry(vdev, SCSI_INQUIRY_EVPD, SCSI_INQUIRY_EVPD_SUPPORTED_PAGES, evpd, - sizeof(*evpd))) { - virtio_scsi_verify_response(&resp, "virtio-scsi:setup:supported_pages"); + sizeof(*evpd)); + if (ret < 1) { + if (ret != 0 || !virtio_scsi_verify_response(&resp, + "virtio-scsi:setup:supported_pages")) { + return -1; + } } debug_print_int("EVPD length", evpd->page_length); @@ -401,12 +438,16 @@ static int virtio_scsi_setup(VDev *vdev) continue; } - if (!scsi_inquiry(vdev, + ret = scsi_inquiry(vdev, SCSI_INQUIRY_EVPD, SCSI_INQUIRY_EVPD_BLOCK_LIMITS, evpd_bl, - sizeof(*evpd_bl))) { - virtio_scsi_verify_response(&resp, "virtio-scsi:setup:blocklimits"); + sizeof(*evpd_bl)); + if (ret < 1) { + if (ret != 0 || !virtio_scsi_verify_response(&resp, + "virtio-scsi:setup:blocklimits")) { + return -1; + } } debug_print_int("max transfer", evpd_bl->max_transfer); @@ -422,8 +463,12 @@ static int virtio_scsi_setup(VDev *vdev) vdev->max_transfer = MIN_NON_ZERO(VIRTIO_SCSI_MAX_SECTORS, vdev->max_transfer); - if (!scsi_read_capacity(vdev, data, data_size)) { - virtio_scsi_verify_response(&resp, "virtio-scsi:setup:read_capacity"); + ret = scsi_read_capacity(vdev, data, data_size); + if (ret < 1) { + if (ret != 0 || !virtio_scsi_verify_response(&resp, + "virtio-scsi:setup:read_capacity")) { + return -1; + } } scsi_parse_capacity_report(data, &vdev->scsi_last_block, (uint32_t *) &vdev->scsi_block_size); @@ -438,12 +483,17 @@ int virtio_scsi_setup_device(SubChannelId schid) vdev->schid = schid; virtio_setup_ccw(vdev); - IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE, - "Config: sense size mismatch"); - IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE, - "Config: CDB size mismatch"); + if (vdev->config.scsi.sense_size != VIRTIO_SCSI_SENSE_SIZE) { + puts("Config: sense size mismatch"); + return -EINVAL; + } + + if (vdev->config.scsi.cdb_size != VIRTIO_SCSI_CDB_SIZE) { + puts("Config: CDB size mismatch"); + return -EINVAL; + } - sclp_print("Using virtio-scsi.\n"); + puts("Using virtio-scsi."); return virtio_scsi_setup(vdev); } diff --git a/pc-bios/s390-ccw/virtio.c b/pc-bios/s390-ccw/virtio.c index 5edd058d880..cd6c99c7e32 100644 --- a/pc-bios/s390-ccw/virtio.c +++ b/pc-bios/s390-ccw/virtio.c @@ -8,7 +8,7 @@ * directory. */ -#include "libc.h" +#include #include "s390-ccw.h" #include "cio.h" #include "virtio.h" @@ -217,26 +217,36 @@ int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd) return 0; } -void virtio_setup_ccw(VDev *vdev) +int virtio_reset(VDev *vdev) { - int i, rc, cfg_size = 0; + return run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false); +} + +int virtio_setup_ccw(VDev *vdev) +{ + int i, cfg_size = 0; uint8_t status; struct VirtioFeatureDesc { uint32_t features; uint8_t index; } __attribute__((packed)) feats; - IPL_assert(virtio_is_supported(vdev->schid), "PE"); + if (!virtio_is_supported(vdev->schid)) { + puts("Virtio unsupported for this device ID"); + return -ENODEV; + } /* device ID has been established now */ vdev->config.blk.blk_size = 0; /* mark "illegal" - setup started... */ vdev->guessed_disk_nature = VIRTIO_GDN_NONE; - run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false); + virtio_reset(vdev); status = VIRTIO_CONFIG_S_ACKNOWLEDGE; - rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); - IPL_assert(rc == 0, "Could not write ACKNOWLEDGE status to host"); + if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) { + puts("Could not write ACKNOWLEDGE status to host"); + return -EIO; + } switch (vdev->senseid.cu_model) { case VIRTIO_ID_NET: @@ -255,27 +265,37 @@ void virtio_setup_ccw(VDev *vdev) cfg_size = sizeof(vdev->config.scsi); break; default: - panic("Unsupported virtio device\n"); + puts("Unsupported virtio device"); + return -ENODEV; } status |= VIRTIO_CONFIG_S_DRIVER; - rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); - IPL_assert(rc == 0, "Could not write DRIVER status to host"); + if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) { + puts("Could not write DRIVER status to host"); + return -EIO; + } /* Feature negotiation */ for (i = 0; i < ARRAY_SIZE(vdev->guest_features); i++) { feats.features = 0; feats.index = i; - rc = run_ccw(vdev, CCW_CMD_READ_FEAT, &feats, sizeof(feats), false); - IPL_assert(rc == 0, "Could not get features bits"); + if (run_ccw(vdev, CCW_CMD_READ_FEAT, &feats, sizeof(feats), false)) { + puts("Could not get features bits"); + return -EIO; + } + vdev->guest_features[i] &= bswap32(feats.features); feats.features = bswap32(vdev->guest_features[i]); - rc = run_ccw(vdev, CCW_CMD_WRITE_FEAT, &feats, sizeof(feats), false); - IPL_assert(rc == 0, "Could not set features bits"); + if (run_ccw(vdev, CCW_CMD_WRITE_FEAT, &feats, sizeof(feats), false)) { + puts("Could not set features bits"); + return -EIO; + } } - rc = run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false); - IPL_assert(rc == 0, "Could not get virtio device configuration"); + if (run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false)) { + puts("Could not get virtio device configuration"); + return -EIO; + } for (i = 0; i < vdev->nr_vqs; i++) { VqInfo info = { @@ -289,19 +309,27 @@ void virtio_setup_ccw(VDev *vdev) .num = 0, }; - rc = run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false); - IPL_assert(rc == 0, "Could not get virtio device VQ configuration"); + if (run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), + false)) { + puts("Could not get virtio device VQ config"); + return -EIO; + } info.num = config.num; vring_init(&vdev->vrings[i], &info); vdev->vrings[i].schid = vdev->schid; - IPL_assert( - run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false) == 0, - "Cannot set VQ info"); + if (run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false)) { + puts("Cannot set VQ info"); + return -EIO; + } } status |= VIRTIO_CONFIG_S_DRIVER_OK; - rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); - IPL_assert(rc == 0, "Could not write DRIVER_OK status to host"); + if (run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false)) { + puts("Could not write DRIVER_OK status to host"); + return -EIO; + } + + return 0; } bool virtio_is_supported(SubChannelId schid) diff --git a/pc-bios/s390-ccw/virtio.h b/pc-bios/s390-ccw/virtio.h index 85bd9d1695c..5c5e808a500 100644 --- a/pc-bios/s390-ccw/virtio.h +++ b/pc-bios/s390-ccw/virtio.h @@ -253,7 +253,6 @@ struct VDev { uint8_t scsi_dev_heads; bool scsi_device_selected; ScsiDevice selected_scsi_device; - uint64_t netboot_start_addr; uint32_t max_transfer; uint32_t guest_features[2]; }; @@ -275,8 +274,10 @@ void vring_send_buf(VRing *vr, void *p, int len, int flags); int vr_poll(VRing *vr); int vring_wait_reply(void); int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd); -void virtio_setup_ccw(VDev *vdev); +int virtio_reset(VDev *vdev); +int virtio_setup_ccw(VDev *vdev); int virtio_net_init(void *mac_addr); +void virtio_net_deinit(void); #endif /* VIRTIO_H */ diff --git a/pc-bios/s390-netboot.img b/pc-bios/s390-netboot.img deleted file mode 100644 index 6908e49f068..00000000000 Binary files a/pc-bios/s390-netboot.img and /dev/null differ diff --git a/pc-bios/skiboot.lid b/pc-bios/skiboot.lid index 906bd512717..ffc77ee1112 100644 Binary files a/pc-bios/skiboot.lid and b/pc-bios/skiboot.lid differ diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin index 27fed09f49a..4314e17b9dd 100644 Binary files a/pc-bios/slof.bin and b/pc-bios/slof.bin differ diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin index e10cd263b81..011359e7c85 100644 Binary files a/pc-bios/vgabios-ati.bin and b/pc-bios/vgabios-ati.bin differ diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin index 416036d9868..1d233afe69c 100644 Binary files a/pc-bios/vgabios-bochs-display.bin and b/pc-bios/vgabios-bochs-display.bin differ diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin index 4ffaa43d6f8..f7b06f214c4 100644 Binary files a/pc-bios/vgabios-cirrus.bin and b/pc-bios/vgabios-cirrus.bin differ diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin index 1b7a3831ef9..50dfeb208a1 100644 Binary files a/pc-bios/vgabios-qxl.bin and b/pc-bios/vgabios-qxl.bin differ diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin index dba6cb8e322..b72279fcf33 100644 Binary files a/pc-bios/vgabios-ramfb.bin and b/pc-bios/vgabios-ramfb.bin differ diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin index 0d541c5530d..5b48ca860af 100644 Binary files a/pc-bios/vgabios-stdvga.bin and b/pc-bios/vgabios-stdvga.bin differ diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin index 2ce35576243..f580c33794a 100644 Binary files a/pc-bios/vgabios-virtio.bin and b/pc-bios/vgabios-virtio.bin differ diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin index b7cab15593e..03b6dbd77d2 100644 Binary files a/pc-bios/vgabios-vmware.bin and b/pc-bios/vgabios-vmware.bin differ diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin index ee748f6599d..3f71aae1715 100644 Binary files a/pc-bios/vgabios.bin and b/pc-bios/vgabios.bin differ diff --git a/plugins/api-system.c b/plugins/api-system.c new file mode 100644 index 00000000000..cc190b167ea --- /dev/null +++ b/plugins/api-system.c @@ -0,0 +1,131 @@ +/* + * QEMU Plugin API - System specific implementations + * + * This provides the APIs that have a specific system implementation + * or are only relevant to system-mode. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "migration/blocker.h" +#include "hw/boards.h" +#include "qemu/plugin-memory.h" +#include "qemu/plugin.h" + +/* + * In system mode we cannot trace the binary being executed so the + * helpers all return NULL/0. + */ +const char *qemu_plugin_path_to_binary(void) +{ + return NULL; +} + +uint64_t qemu_plugin_start_code(void) +{ + return 0; +} + +uint64_t qemu_plugin_end_code(void) +{ + return 0; +} + +uint64_t qemu_plugin_entry_code(void) +{ + return 0; +} + +/* + * Virtual Memory queries + */ + +static __thread struct qemu_plugin_hwaddr hwaddr_info; + +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, + uint64_t vaddr) +{ + CPUState *cpu = current_cpu; + unsigned int mmu_idx = get_mmuidx(info); + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); + hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; + + assert(mmu_idx < NB_MMU_MODES); + + if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, + hwaddr_info.is_store, &hwaddr_info)) { + error_report("invalid use of qemu_plugin_get_hwaddr"); + return NULL; + } + + return &hwaddr_info; +} + +bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) +{ + return haddr->is_io; +} + +uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) +{ + if (haddr) { + return haddr->phys_addr; + } + return 0; +} + +const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) +{ + if (h && h->is_io) { + MemoryRegion *mr = h->mr; + if (!mr->name) { + unsigned maddr = (uintptr_t)mr; + g_autofree char *temp = g_strdup_printf("anon%08x", maddr); + return g_intern_string(temp); + } else { + return g_intern_string(mr->name); + } + } else { + return g_intern_static_string("RAM"); + } +} + +/* + * Time control + */ +static bool has_control; +static Error *migration_blocker; + +const void *qemu_plugin_request_time_control(void) +{ + if (!has_control) { + has_control = true; + error_setg(&migration_blocker, + "TCG plugin time control does not support migration"); + migrate_add_blocker(&migration_blocker, NULL); + return &has_control; + } + return NULL; +} + +static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data) +{ + int64_t new_time = data.host_ulong; + qemu_clock_advance_virtual_time(new_time); +} + +void qemu_plugin_update_ns(const void *handle, int64_t new_time) +{ + if (handle == &has_control) { + /* Need to execute out of cpu_exec, so bql can be locked. */ + async_run_on_cpu(current_cpu, + advance_virtual_time__async, + RUN_ON_CPU_HOST_ULONG(new_time)); + } +} diff --git a/plugins/api-user.c b/plugins/api-user.c new file mode 100644 index 00000000000..28704a89e89 --- /dev/null +++ b/plugins/api-user.c @@ -0,0 +1,57 @@ +/* + * QEMU Plugin API - user-mode only implementations + * + * This provides the APIs that have a user-mode specific + * implementations or are only relevant to user-mode. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/plugin.h" +#include "exec/log.h" + +/* + * Virtual Memory queries - these are all NOPs for user-mode which + * only ever has visibility of virtual addresses. + */ + +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, + uint64_t vaddr) +{ + return NULL; +} + +bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) +{ + return false; +} + +uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) +{ + return 0; +} + +const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) +{ + return g_intern_static_string("Invalid"); +} + +/* + * Time control - for user mode the only real time is wall clock time + * so realistically all you can do in user mode is slow down execution + * which doesn't require the ability to mess with the clock. + */ + +const void *qemu_plugin_request_time_control(void) +{ + return NULL; +} + +void qemu_plugin_update_ns(const void *handle, int64_t new_time) +{ + qemu_log_mask(LOG_UNIMP, "user-mode can't control time"); +} diff --git a/plugins/api.c b/plugins/api.c index 2ff13d09de6..eac04cc1f6b 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -39,25 +39,14 @@ #include "qemu/main-loop.h" #include "qemu/plugin.h" #include "qemu/log.h" -#include "qemu/timer.h" +#include "system/memory.h" #include "tcg/tcg.h" -#include "exec/exec-all.h" #include "exec/gdbstub.h" +#include "exec/target_page.h" +#include "exec/translation-block.h" #include "exec/translator.h" #include "disas/disas.h" #include "plugin.h" -#ifndef CONFIG_USER_ONLY -#include "qapi/error.h" -#include "migration/blocker.h" -#include "exec/ram_addr.h" -#include "qemu/plugin-memory.h" -#include "hw/boards.h" -#else -#include "qemu.h" -#ifdef CONFIG_LINUX -#include "loader.h" -#endif -#endif /* Uninstall and Reset handlers */ @@ -249,12 +238,10 @@ uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) struct qemu_plugin_insn * qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx) { - struct qemu_plugin_insn *insn; if (unlikely(idx >= tb->n)) { return NULL; } - insn = g_ptr_array_index(tb->insns, idx); - return insn; + return g_ptr_array_index(tb->insns, idx); } /* @@ -286,7 +273,7 @@ uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn) void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) { const DisasContextBase *db = tcg_ctx->plugin_db; - vaddr page0_last = db->pc_first | ~TARGET_PAGE_MASK; + vaddr page0_last = db->pc_first | ~qemu_target_page_mask(); if (db->fake_insn) { return NULL; @@ -351,74 +338,37 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; } -/* - * Virtual Memory queries - */ - -#ifdef CONFIG_SOFTMMU -static __thread struct qemu_plugin_hwaddr hwaddr_info; -#endif - -struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, - uint64_t vaddr) -{ -#ifdef CONFIG_SOFTMMU - CPUState *cpu = current_cpu; - unsigned int mmu_idx = get_mmuidx(info); - enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); - hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; - - assert(mmu_idx < NB_MMU_MODES); - - if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, - hwaddr_info.is_store, &hwaddr_info)) { - error_report("invalid use of qemu_plugin_get_hwaddr"); - return NULL; - } - - return &hwaddr_info; -#else - return NULL; -#endif -} - -bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr) -{ -#ifdef CONFIG_SOFTMMU - return haddr->is_io; -#else - return false; -#endif -} - -uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr) -{ -#ifdef CONFIG_SOFTMMU - if (haddr) { - return haddr->phys_addr; +qemu_plugin_mem_value qemu_plugin_mem_get_value(qemu_plugin_meminfo_t info) +{ + uint64_t low = current_cpu->neg.plugin_mem_value_low; + qemu_plugin_mem_value value; + + switch (qemu_plugin_mem_size_shift(info)) { + case 0: + value.type = QEMU_PLUGIN_MEM_VALUE_U8; + value.data.u8 = (uint8_t)low; + break; + case 1: + value.type = QEMU_PLUGIN_MEM_VALUE_U16; + value.data.u16 = (uint16_t)low; + break; + case 2: + value.type = QEMU_PLUGIN_MEM_VALUE_U32; + value.data.u32 = (uint32_t)low; + break; + case 3: + value.type = QEMU_PLUGIN_MEM_VALUE_U64; + value.data.u64 = low; + break; + case 4: + value.type = QEMU_PLUGIN_MEM_VALUE_U128; + value.data.u128.low = low; + value.data.u128.high = current_cpu->neg.plugin_mem_value_high; + break; + default: + g_assert_not_reached(); } -#endif - return 0; -} - -const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h) -{ -#ifdef CONFIG_SOFTMMU - if (h && h->is_io) { - MemoryRegion *mr = h->mr; - if (!mr->name) { - unsigned maddr = (uintptr_t)mr; - g_autofree char *temp = g_strdup_printf("anon%08x", maddr); - return g_intern_string(temp); - } else { - return g_intern_string(mr->name); - } - } else { - return g_intern_static_string("RAM"); - } -#else - return g_intern_static_string("Invalid"); -#endif + return value; } int qemu_plugin_num_vcpus(void) @@ -439,49 +389,6 @@ bool qemu_plugin_bool_parse(const char *name, const char *value, bool *ret) return name && value && qapi_bool_parse(name, value, ret, NULL); } -/* - * Binary path, start and end locations - */ -const char *qemu_plugin_path_to_binary(void) -{ - char *path = NULL; -#ifdef CONFIG_USER_ONLY - TaskState *ts = get_task_state(current_cpu); - path = g_strdup(ts->bprm->filename); -#endif - return path; -} - -uint64_t qemu_plugin_start_code(void) -{ - uint64_t start = 0; -#ifdef CONFIG_USER_ONLY - TaskState *ts = get_task_state(current_cpu); - start = ts->info->start_code; -#endif - return start; -} - -uint64_t qemu_plugin_end_code(void) -{ - uint64_t end = 0; -#ifdef CONFIG_USER_ONLY - TaskState *ts = get_task_state(current_cpu); - end = ts->info->end_code; -#endif - return end; -} - -uint64_t qemu_plugin_entry_code(void) -{ - uint64_t entry = 0; -#ifdef CONFIG_USER_ONLY - TaskState *ts = get_task_state(current_cpu); - entry = ts->info->entry; -#endif - return entry; -} - /* * Create register handles. * @@ -531,9 +438,159 @@ int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf) { g_assert(current_cpu); + if (qemu_plugin_get_cb_flags() == QEMU_PLUGIN_CB_NO_REGS) { + return -1; + } + return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg) - 1); } +int qemu_plugin_write_register(struct qemu_plugin_register *reg, + GByteArray *buf) +{ + g_assert(current_cpu); + + if (buf->len == 0 || qemu_plugin_get_cb_flags() != QEMU_PLUGIN_CB_RW_REGS) { + return -1; + } + + return gdb_write_register(current_cpu, buf->data, GPOINTER_TO_INT(reg) - 1); +} + +bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len) +{ + g_assert(current_cpu); + + if (len == 0) { + return false; + } + + g_byte_array_set_size(data, len); + + int result = cpu_memory_rw_debug(current_cpu, addr, data->data, + data->len, false); + + if (result < 0) { + return false; + } + + return true; +} + +bool qemu_plugin_write_memory_vaddr(uint64_t addr, GByteArray *data) +{ + g_assert(current_cpu); + + if (data->len == 0) { + return false; + } + + int result = cpu_memory_rw_debug(current_cpu, addr, data->data, + data->len, true); + + if (result < 0) { + return false; + } + + return true; +} + +enum qemu_plugin_hwaddr_operation_result +qemu_plugin_read_memory_hwaddr(hwaddr addr, GByteArray *data, size_t len) +{ +#ifdef CONFIG_SOFTMMU + if (len == 0) { + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; + } + + g_assert(current_cpu); + + + int as_idx = cpu_asidx_from_attrs(current_cpu, MEMTXATTRS_UNSPECIFIED); + AddressSpace *as = cpu_get_address_space(current_cpu, as_idx); + + if (as == NULL) { + return QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS_SPACE; + } + + g_byte_array_set_size(data, len); + MemTxResult res = address_space_rw(as, addr, + MEMTXATTRS_UNSPECIFIED, data->data, + data->len, false); + + switch (res) { + case MEMTX_OK: + return QEMU_PLUGIN_HWADDR_OPERATION_OK; + case MEMTX_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_DEVICE_ERROR; + case MEMTX_DECODE_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS; + case MEMTX_ACCESS_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_ACCESS_DENIED; + default: + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; + } +#else + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; +#endif +} + +enum qemu_plugin_hwaddr_operation_result +qemu_plugin_write_memory_hwaddr(hwaddr addr, GByteArray *data) +{ +#ifdef CONFIG_SOFTMMU + if (data->len == 0) { + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; + } + + g_assert(current_cpu); + + int as_idx = cpu_asidx_from_attrs(current_cpu, MEMTXATTRS_UNSPECIFIED); + AddressSpace *as = cpu_get_address_space(current_cpu, as_idx); + + if (as == NULL) { + return QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS_SPACE; + } + + MemTxResult res = address_space_rw(as, addr, + MEMTXATTRS_UNSPECIFIED, data->data, + data->len, true); + switch (res) { + case MEMTX_OK: + return QEMU_PLUGIN_HWADDR_OPERATION_OK; + case MEMTX_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_DEVICE_ERROR; + case MEMTX_DECODE_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_INVALID_ADDRESS; + case MEMTX_ACCESS_ERROR: + return QEMU_PLUGIN_HWADDR_OPERATION_ACCESS_DENIED; + default: + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; + } +#else + return QEMU_PLUGIN_HWADDR_OPERATION_ERROR; +#endif +} + +bool qemu_plugin_translate_vaddr(uint64_t vaddr, uint64_t *hwaddr) +{ +#ifdef CONFIG_SOFTMMU + g_assert(current_cpu); + + uint64_t res = cpu_get_phys_page_debug(current_cpu, vaddr); + + if (res == (uint64_t)-1) { + return false; + } + + *hwaddr = res | (vaddr & ~TARGET_PAGE_MASK); + + return true; +#else + return false; +#endif +} + struct qemu_plugin_scoreboard *qemu_plugin_scoreboard_new(size_t element_size) { return plugin_scoreboard_new(element_size); @@ -587,44 +644,3 @@ uint64_t qemu_plugin_u64_sum(qemu_plugin_u64 entry) return total; } -/* - * Time control - */ -static bool has_control; -#ifdef CONFIG_SOFTMMU -static Error *migration_blocker; -#endif - -const void *qemu_plugin_request_time_control(void) -{ - if (!has_control) { - has_control = true; -#ifdef CONFIG_SOFTMMU - error_setg(&migration_blocker, - "TCG plugin time control does not support migration"); - migrate_add_blocker(&migration_blocker, NULL); -#endif - return &has_control; - } - return NULL; -} - -#ifdef CONFIG_SOFTMMU -static void advance_virtual_time__async(CPUState *cpu, run_on_cpu_data data) -{ - int64_t new_time = data.host_ulong; - qemu_clock_advance_virtual_time(new_time); -} -#endif - -void qemu_plugin_update_ns(const void *handle, int64_t new_time) -{ -#ifdef CONFIG_SOFTMMU - if (handle == &has_control) { - /* Need to execute out of cpu_exec, so bql can be locked. */ - async_run_on_cpu(current_cpu, - advance_virtual_time__async, - RUN_ON_CPU_HOST_ULONG(new_time)); - } -#endif -} diff --git a/plugins/core.c b/plugins/core.c index 12c67b4b4eb..c6e9ef14784 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -12,22 +12,15 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qemu/config-file.h" -#include "qapi/error.h" #include "qemu/lockable.h" #include "qemu/option.h" #include "qemu/plugin.h" +#include "qemu/qemu-plugin.h" #include "qemu/queue.h" #include "qemu/rcu_queue.h" -#include "qemu/xxhash.h" #include "qemu/rcu.h" -#include "hw/core/cpu.h" - -#include "exec/exec-all.h" #include "exec/tb-flush.h" -#include "tcg/tcg.h" -#include "tcg/tcg-op.h" +#include "tcg/tcg-op-common.h" #include "plugin.h" struct qemu_plugin_cb { @@ -214,30 +207,49 @@ CPUPluginState *qemu_plugin_create_vcpu_state(void) static void plugin_grow_scoreboards__locked(CPUState *cpu) { - if (cpu->cpu_index < plugin.scoreboard_alloc_size) { + size_t scoreboard_size = plugin.scoreboard_alloc_size; + bool need_realloc = false; + + if (cpu->cpu_index < scoreboard_size) { return; } - bool need_realloc = FALSE; - while (cpu->cpu_index >= plugin.scoreboard_alloc_size) { - plugin.scoreboard_alloc_size *= 2; - need_realloc = TRUE; + while (cpu->cpu_index >= scoreboard_size) { + scoreboard_size *= 2; + need_realloc = true; } + if (!need_realloc) { + return; + } - if (!need_realloc || QLIST_EMPTY(&plugin.scoreboards)) { - /* nothing to do, we just updated sizes for future scoreboards */ + if (QLIST_EMPTY(&plugin.scoreboards)) { + /* just update size for future scoreboards */ + plugin.scoreboard_alloc_size = scoreboard_size; return; } + /* + * A scoreboard creation/deletion might be in progress. If a new vcpu is + * initialized at the same time, we are safe, as the new + * plugin.scoreboard_alloc_size was not yet written. + */ + qemu_rec_mutex_unlock(&plugin.lock); + /* cpus must be stopped, as tb might still use an existing scoreboard. */ start_exclusive(); - struct qemu_plugin_scoreboard *score; - QLIST_FOREACH(score, &plugin.scoreboards, entry) { - g_array_set_size(score->data, plugin.scoreboard_alloc_size); + /* re-acquire lock */ + qemu_rec_mutex_lock(&plugin.lock); + /* in case another vcpu is created between unlock and exclusive section. */ + if (scoreboard_size > plugin.scoreboard_alloc_size) { + struct qemu_plugin_scoreboard *score; + QLIST_FOREACH(score, &plugin.scoreboards, entry) { + g_array_set_size(score->data, scoreboard_size); + } + plugin.scoreboard_alloc_size = scoreboard_size; + /* force all tb to be flushed, as scoreboard pointers were changed. */ + tb_flush(cpu); } - /* force all tb to be flushed, as scoreboard pointers were changed. */ - tb_flush(cpu); end_exclusive(); } @@ -255,7 +267,9 @@ static void qemu_plugin_vcpu_init__async(CPUState *cpu, run_on_cpu_data unused) plugin_grow_scoreboards__locked(cpu); qemu_rec_mutex_unlock(&plugin.lock); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } void qemu_plugin_vcpu_init_hook(CPUState *cpu) @@ -268,7 +282,9 @@ void qemu_plugin_vcpu_exit_hook(CPUState *cpu) { bool success; + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); qemu_rec_mutex_lock(&plugin.lock); @@ -356,6 +372,7 @@ void plugin_register_dyn_cb__udata(GArray **arr, static TCGHelperInfo info[3] = { [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, + [QEMU_PLUGIN_CB_RW_REGS].flags = 0, /* * Match qemu_plugin_vcpu_udata_cb_t: * void (*)(uint32_t, void *) @@ -385,6 +402,7 @@ void plugin_register_dyn_cond_cb__udata(GArray **arr, static TCGHelperInfo info[3] = { [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, + [QEMU_PLUGIN_CB_RW_REGS].flags = 0, /* * Match qemu_plugin_vcpu_udata_cb_t: * void (*)(uint32_t, void *) @@ -423,6 +441,7 @@ void plugin_register_vcpu_mem_cb(GArray **arr, static TCGHelperInfo info[3] = { [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG, [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG, + [QEMU_PLUGIN_CB_RW_REGS].flags = 0, /* * Match qemu_plugin_vcpu_mem_cb_t: * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *) @@ -462,7 +481,9 @@ void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); func(cb->ctx->id, tb); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } } @@ -487,7 +508,9 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } } @@ -509,7 +532,9 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); func(cb->ctx->id, cpu->cpu_index, num, ret); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } } @@ -517,14 +542,18 @@ void qemu_plugin_vcpu_idle_cb(CPUState *cpu) { /* idle and resume cb may be called before init, ignore in this case */ if (cpu->cpu_index < plugin.num_vcpus) { + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } } void qemu_plugin_vcpu_resume_cb(CPUState *cpu) { if (cpu->cpu_index < plugin.num_vcpus) { + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_RW_REGS); plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } } @@ -583,6 +612,8 @@ void exec_inline_op(enum plugin_dyn_cb_type type, } void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + uint64_t value_low, + uint64_t value_high, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { GArray *arr = cpu->neg.plugin_mem_cbs; @@ -591,6 +622,10 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, if (arr == NULL) { return; } + + cpu->neg.plugin_mem_value_low = value_low; + cpu->neg.plugin_mem_value_high = value_high; + for (i = 0; i < arr->len; i++) { struct qemu_plugin_dyn_cb *cb = &g_array_index(arr, struct qemu_plugin_dyn_cb, i); @@ -598,9 +633,13 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, switch (cb->type) { case PLUGIN_CB_MEM_REGULAR: if (rw & cb->regular.rw) { + qemu_plugin_set_cb_flags(cpu, + tcg_call_to_qemu_plugin_cb_flags(cb->regular.info->flags)); + cb->regular.f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw), vaddr, cb->regular.userp); + qemu_plugin_set_cb_flags(cpu, QEMU_PLUGIN_CB_NO_REGS); } break; case PLUGIN_CB_INLINE_ADD_U64: @@ -743,3 +782,14 @@ void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score) g_array_free(score->data, TRUE); g_free(score); } + +enum qemu_plugin_cb_flags tcg_call_to_qemu_plugin_cb_flags(int flags) +{ + if (flags & TCG_CALL_NO_RWG) { + return QEMU_PLUGIN_CB_NO_REGS; + } else if (flags & TCG_CALL_NO_WG) { + return QEMU_PLUGIN_CB_R_REGS; + } else { + return QEMU_PLUGIN_CB_RW_REGS; + } +} diff --git a/plugins/loader.c b/plugins/loader.c index ebc01da9c6e..8f0d75c9049 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -29,11 +29,8 @@ #include "qemu/xxhash.h" #include "qemu/plugin.h" #include "qemu/memalign.h" -#include "hw/core/cpu.h" +#include "qemu/target-info.h" #include "exec/tb-flush.h" -#ifndef CONFIG_USER_ONLY -#include "hw/boards.h" -#endif #include "plugin.h" @@ -128,7 +125,7 @@ static int plugin_add(void *opaque, const char *name, const char *value, /* Will treat arg="argname" as "argname=on" */ fullarg = g_strdup_printf("%s=%s", value, "on"); } else { - fullarg = g_strdup_printf("%s", value); + fullarg = g_strdup(value); } warn_report("using 'arg=%s' is deprecated", value); error_printf("Please use '%s' directly\n", fullarg); @@ -297,17 +294,11 @@ int qemu_plugin_load_list(QemuPluginList *head, Error **errp) struct qemu_plugin_desc *desc, *next; g_autofree qemu_info_t *info = g_new0(qemu_info_t, 1); - info->target_name = TARGET_NAME; + info->target_name = target_name(); info->version.min = QEMU_PLUGIN_MIN_VERSION; info->version.cur = QEMU_PLUGIN_VERSION; -#ifndef CONFIG_USER_ONLY - MachineState *ms = MACHINE(qdev_get_machine()); - info->system_emulation = true; - info->system.smp_vcpus = ms->smp.cpus; - info->system.max_vcpus = ms->smp.max_cpus; -#else - info->system_emulation = false; -#endif + + qemu_plugin_fillin_mode_info(info); QTAILQ_FOREACH_SAFE(desc, head, entry, next) { int err; @@ -379,7 +370,7 @@ static void plugin_reset_destroy(struct qemu_plugin_reset_data *data) { qemu_rec_mutex_lock(&plugin.lock); plugin_reset_destroy__locked(data); - qemu_rec_mutex_lock(&plugin.lock); + qemu_rec_mutex_unlock(&plugin.lock); } static void plugin_flush_destroy(CPUState *cpu, run_on_cpu_data arg) diff --git a/plugins/meson.build b/plugins/meson.build index 18a0303bff9..62c991d87fc 100644 --- a/plugins/meson.build +++ b/plugins/meson.build @@ -1,40 +1,68 @@ +if not get_option('plugins') + subdir_done() +endif + +qemu_plugin_symbols = configure_file( + input: files('../include/qemu/qemu-plugin.h'), + output: 'qemu-plugin.symbols', + capture: true, + command: [files('../scripts/qemu-plugin-symbols.py'), '@INPUT@']) + # Modules need more symbols than just those in plugins/qemu-plugins.symbols if not enable_modules if host_os == 'darwin' configure_file( - input: files('qemu-plugins.symbols'), + input: qemu_plugin_symbols, output: 'qemu-plugins-ld64.symbols', capture: true, command: ['sed', '-ne', 's/^[[:space:]]*\\(qemu_.*\\);/_\\1/p', '@INPUT@']) emulator_link_args += ['-Wl,-exported_symbols_list,plugins/qemu-plugins-ld64.symbols'] + elif host_os == 'windows' and meson.get_compiler('c').get_id() == 'clang' + # LLVM/lld does not support exporting specific symbols. However, it works + # out of the box with dllexport/dllimport attribute we set in the code. else - emulator_link_args += ['-Xlinker', '--dynamic-list=' + (meson.project_source_root() / 'plugins/qemu-plugins.symbols')] + emulator_link_args += ['-Xlinker', '--dynamic-list=' + qemu_plugin_symbols.full_path()] endif endif -if get_option('plugins') - if host_os == 'windows' - dlltool = find_program('dlltool', required: true) +if host_os == 'windows' + # Generate a .lib file for plugins to link against. + # First, create a .def file listing all the symbols a plugin should expect to have + # available in qemu + win32_plugin_def = configure_file( + input: qemu_plugin_symbols, + output: 'qemu_plugin_api.def', + capture: true, + command: [python, '-c', 'import fileinput, re; print("EXPORTS", end=""); [print(re.sub(r"[{};]", "", line), end="") for line in fileinput.input()]', '@INPUT@']) - # Generate a .lib file for plugins to link against. - # First, create a .def file listing all the symbols a plugin should expect to have - # available in qemu - win32_plugin_def = configure_file( - input: files('qemu-plugins.symbols'), - output: 'qemu_plugin_api.def', - capture: true, - command: ['sed', '-e', '0,/^/s//EXPORTS/; s/[{};]//g', '@INPUT@']) - # then use dlltool to assemble a delaylib. - win32_qemu_plugin_api_lib = configure_file( - input: win32_plugin_def, - output: 'libqemu_plugin_api.a', - command: [dlltool, '--input-def', '@INPUT@', - '--output-delaylib', '@OUTPUT@', '--dllname', 'qemu.exe'] - ) + # then use dlltool to assemble a delaylib. + # The delaylib will have an "imaginary" name (qemu.exe), that is used by the + # linker file we add with plugins (win32_linker.c) to identify that we want + # to find missing symbols in current program. + win32_qemu_plugin_api_link_flags = ['-Lplugins', '-lqemu_plugin_api'] + if meson.get_compiler('c').get_id() == 'clang' + # With LLVM/lld, delaylib is specified at link time (-delayload) + dlltool = find_program('llvm-dlltool', required: true) + dlltool_cmd = [dlltool, '-d', '@INPUT@', '-l', '@OUTPUT@', '-D', 'qemu.exe'] + win32_qemu_plugin_api_link_flags += ['-Wl,-delayload=qemu.exe'] + else + # With gcc/ld, delay lib is built with a specific delay parameter. + dlltool = find_program('dlltool', required: true) + dlltool_cmd = [dlltool, '--input-def', '@INPUT@', + '--output-delaylib', '@OUTPUT@', '--dllname', 'qemu.exe'] endif - specific_ss.add(files( - 'loader.c', - 'core.c', - 'api.c', - )) + win32_qemu_plugin_api_lib = configure_file( + input: win32_plugin_def, + output: 'libqemu_plugin_api.a', + command: dlltool_cmd + ) endif + +user_ss.add(files('user.c', 'api-user.c')) +system_ss.add(files('system.c', 'api-system.c')) + +user_ss.add(files('api.c', 'core.c')) +system_ss.add(files('api.c', 'core.c')) + +common_ss.add(files('loader.c')) + diff --git a/plugins/plugin.h b/plugins/plugin.h index 30e2299a54d..6fbc443b96c 100644 --- a/plugins/plugin.h +++ b/plugins/plugin.h @@ -13,6 +13,7 @@ #define PLUGIN_H #include +#include "qemu/queue.h" #include "qemu/qht.h" #define QEMU_PLUGIN_MIN_VERSION 2 @@ -118,4 +119,10 @@ struct qemu_plugin_scoreboard *plugin_scoreboard_new(size_t element_size); void plugin_scoreboard_free(struct qemu_plugin_scoreboard *score); +/** + * qemu_plugin_fillin_mode_info() - populate mode specific info + * info: pointer to qemu_info_t structure + */ +void qemu_plugin_fillin_mode_info(qemu_info_t *info); + #endif /* PLUGIN_H */ diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols deleted file mode 100644 index ca773d8d9fe..00000000000 --- a/plugins/qemu-plugins.symbols +++ /dev/null @@ -1,57 +0,0 @@ -{ - qemu_plugin_bool_parse; - qemu_plugin_end_code; - qemu_plugin_entry_code; - qemu_plugin_get_hwaddr; - qemu_plugin_get_registers; - qemu_plugin_hwaddr_device_name; - qemu_plugin_hwaddr_is_io; - qemu_plugin_hwaddr_phys_addr; - qemu_plugin_insn_data; - qemu_plugin_insn_disas; - qemu_plugin_insn_haddr; - qemu_plugin_insn_size; - qemu_plugin_insn_symbol; - qemu_plugin_insn_vaddr; - qemu_plugin_mem_is_big_endian; - qemu_plugin_mem_is_sign_extended; - qemu_plugin_mem_is_store; - qemu_plugin_mem_size_shift; - qemu_plugin_num_vcpus; - qemu_plugin_outs; - qemu_plugin_path_to_binary; - qemu_plugin_read_register; - qemu_plugin_register_atexit_cb; - qemu_plugin_register_flush_cb; - qemu_plugin_register_vcpu_exit_cb; - qemu_plugin_register_vcpu_idle_cb; - qemu_plugin_register_vcpu_init_cb; - qemu_plugin_register_vcpu_insn_exec_cb; - qemu_plugin_register_vcpu_insn_exec_cond_cb; - qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu; - qemu_plugin_register_vcpu_mem_cb; - qemu_plugin_register_vcpu_mem_inline_per_vcpu; - qemu_plugin_register_vcpu_resume_cb; - qemu_plugin_register_vcpu_syscall_cb; - qemu_plugin_register_vcpu_syscall_ret_cb; - qemu_plugin_register_vcpu_tb_exec_cb; - qemu_plugin_register_vcpu_tb_exec_cond_cb; - qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu; - qemu_plugin_register_vcpu_tb_trans_cb; - qemu_plugin_request_time_control; - qemu_plugin_reset; - qemu_plugin_scoreboard_free; - qemu_plugin_scoreboard_find; - qemu_plugin_scoreboard_new; - qemu_plugin_start_code; - qemu_plugin_tb_get_insn; - qemu_plugin_tb_n_insns; - qemu_plugin_tb_vaddr; - qemu_plugin_u64_add; - qemu_plugin_u64_get; - qemu_plugin_u64_set; - qemu_plugin_u64_sum; - qemu_plugin_uninstall; - qemu_plugin_update_ns; - qemu_plugin_vcpu_for_each; -}; diff --git a/plugins/system.c b/plugins/system.c new file mode 100644 index 00000000000..b3ecc33ba52 --- /dev/null +++ b/plugins/system.c @@ -0,0 +1,24 @@ +/* + * QEMU Plugin system-emulation helpers + * + * Helpers that are specific to system emulation. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/plugin.h" +#include "hw/boards.h" + +#include "plugin.h" + +void qemu_plugin_fillin_mode_info(qemu_info_t *info) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + info->system_emulation = true; + info->system.smp_vcpus = ms->smp.cpus; + info->system.max_vcpus = ms->smp.max_cpus; +} diff --git a/plugins/user.c b/plugins/user.c new file mode 100644 index 00000000000..250d5425020 --- /dev/null +++ b/plugins/user.c @@ -0,0 +1,19 @@ +/* + * QEMU Plugin user-mode helpers + * + * Helpers that are specific to user-mode. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019-2025, Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/plugin.h" +#include "plugin.h" + +void qemu_plugin_fillin_mode_info(qemu_info_t *info) +{ + info->system_emulation = false; +} diff --git a/po/it.po b/po/it.po index c6d95172070..363b9bddf2f 100644 --- a/po/it.po +++ b/po/it.po @@ -65,7 +65,7 @@ msgid "Detach Tab" msgstr "_Sposta in una nuova finestra" msgid "Show Menubar" -msgstr "" +msgstr "Mostra _barra dei menu" msgid "_Machine" msgstr "_Macchina virtuale" diff --git a/python/Makefile b/python/Makefile index 1fa4ba2498e..32aedce4137 100644 --- a/python/Makefile +++ b/python/Makefile @@ -9,13 +9,13 @@ help: @echo "make check-minreqs:" @echo " Run tests in the minreqs virtual environment." @echo " These tests use the oldest dependencies." - @echo " Requires: Python 3.8" - @echo " Hint (Fedora): 'sudo dnf install python3.8'" + @echo " Requires: Python 3.9" + @echo " Hint (Fedora): 'sudo dnf install python3.9'" @echo "" @echo "make check-tox:" @echo " Run tests against multiple python versions." @echo " These tests use the newest dependencies." - @echo " Requires: Python 3.8 - 3.11, and tox." + @echo " Requires: Python 3.9 - 3.11, and tox." @echo " Hint (Fedora): 'sudo dnf install python3-tox python3.11'" @echo " The variable QEMU_TOX_EXTRA_ARGS can be use to pass extra" @echo " arguments to tox". @@ -59,7 +59,7 @@ PIP_INSTALL = pip install --disable-pip-version-check min-venv: $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.txt @echo "VENV $(QEMU_MINVENV_DIR)" - @python3.8 -m venv $(QEMU_MINVENV_DIR) + @python3.9 -m venv $(QEMU_MINVENV_DIR) @( \ echo "ACTIVATE $(QEMU_MINVENV_DIR)"; \ . $(QEMU_MINVENV_DIR)/bin/activate; \ @@ -68,7 +68,7 @@ $(QEMU_MINVENV_DIR) $(QEMU_MINVENV_DIR)/bin/activate: setup.cfg tests/minreqs.tx echo "INSTALL -r tests/minreqs.txt $(QEMU_MINVENV_DIR)";\ $(PIP_INSTALL) -r tests/minreqs.txt 1>/dev/null; \ echo "INSTALL -e qemu $(QEMU_MINVENV_DIR)"; \ - $(PIP_INSTALL) -e . 1>/dev/null; \ + PIP_CONFIG_SETTINGS="editable_mode=compat" $(PIP_INSTALL) -e . 1>/dev/null; \ ) @touch $(QEMU_MINVENV_DIR) @@ -103,7 +103,7 @@ check-dev: dev-venv .PHONY: develop develop: - $(PIP_INSTALL) -e .[devel] + PIP_CONFIG_SETTINGS="editable_mode=compat" $(PIP_INSTALL) -e .[devel] .PHONY: check check: diff --git a/python/qemu/utils/__init__.py b/python/qemu/utils/__init__.py index 017cfdcda75..be5daa83634 100644 --- a/python/qemu/utils/__init__.py +++ b/python/qemu/utils/__init__.py @@ -23,13 +23,19 @@ from typing import Optional # pylint: disable=import-error -from .accel import kvm_available, list_accel, tcg_available +from .accel import ( + hvf_available, + kvm_available, + list_accel, + tcg_available, +) __all__ = ( 'VerboseProcessError', 'add_visual_margin', 'get_info_usernet_hostfwd_port', + 'hvf_available', 'kvm_available', 'list_accel', 'tcg_available', diff --git a/python/qemu/utils/accel.py b/python/qemu/utils/accel.py index 386ff640ca8..f915b646692 100644 --- a/python/qemu/utils/accel.py +++ b/python/qemu/utils/accel.py @@ -82,3 +82,12 @@ def tcg_available(qemu_bin: str) -> bool: @param qemu_bin (str): path to the QEMU binary """ return 'tcg' in list_accel(qemu_bin) + + +def hvf_available(qemu_bin: str) -> bool: + """ + Check if HVF is available. + + @param qemu_bin (str): path to the QEMU binary + """ + return 'hvf' in list_accel(qemu_bin) diff --git a/python/qemu/utils/qom.py b/python/qemu/utils/qom.py index 426a0f245f2..05e5f141791 100644 --- a/python/qemu/utils/qom.py +++ b/python/qemu/utils/qom.py @@ -31,8 +31,7 @@ ## import argparse - -from qemu.qmp import ExecuteError +from typing import List from .qom_common import QOMCommand @@ -224,28 +223,34 @@ def __init__(self, args: argparse.Namespace): super().__init__(args) self.path = args.path - def _list_node(self, path: str) -> None: - print(path) - items = self.qom_list(path) - for item in items: - if item.child: - continue - try: - rsp = self.qmp.cmd('qom-get', path=path, - property=item.name) - print(f" {item.name}: {rsp} ({item.type})") - except ExecuteError as err: - print(f" {item.name}: ({item.type})") - print('') - for item in items: - if not item.child: - continue + def _list_nodes(self, paths: List[str]) -> None: + all_paths_props = self.qom_list_get(paths) + i = 0 + + for props in all_paths_props: + path = paths[i] + i = i + 1 + print(path) if path == '/': path = '' - self._list_node(f"{path}/{item.name}") + newpaths = [] + + for item in props.properties: + if item.child: + newpaths += [f"{path}/{item.name}"] + else: + value = item.value + if value is None: + value = "" + print(f" {item.name}: {value} ({item.type})") + + print('') + + if newpaths: + self._list_nodes(newpaths) def run(self) -> int: - self._list_node(self.path) + self._list_nodes([self.path]) return 0 diff --git a/python/qemu/utils/qom_common.py b/python/qemu/utils/qom_common.py index dd2c8b1908c..ab21a4d364a 100644 --- a/python/qemu/utils/qom_common.py +++ b/python/qemu/utils/qom_common.py @@ -65,6 +65,52 @@ def link(self) -> bool: return self.type.startswith('link<') +class ObjectPropertyValue: + """ + Represents a property return from e.g. qom-tree-get + """ + def __init__(self, name: str, type_: str, value: object): + self.name = name + self.type = type_ + self.value = value + + @classmethod + def make(cls, value: Dict[str, Any]) -> 'ObjectPropertyValue': + """ + Build an ObjectPropertyValue from a Dict with an unknown shape. + """ + assert value.keys() >= {'name', 'type'} + assert value.keys() <= {'name', 'type', 'value'} + return cls(value['name'], value['type'], value.get('value')) + + @property + def child(self) -> bool: + """Is this property a child property?""" + return self.type.startswith('child<') + + +class ObjectPropertiesValues: + """ + Represents the return type from e.g. qom-list-get + """ + # pylint: disable=too-few-public-methods + + def __init__(self, properties: List[ObjectPropertyValue]) -> None: + self.properties = properties + + @classmethod + def make(cls, value: Dict[str, Any]) -> 'ObjectPropertiesValues': + """ + Build an ObjectPropertiesValues from a Dict with an unknown shape. + """ + assert value.keys() == {'properties'} + props = [ObjectPropertyValue(item['name'], + item['type'], + item.get('value')) + for item in value['properties']] + return cls(props) + + CommandT = TypeVar('CommandT', bound='QOMCommand') @@ -145,6 +191,15 @@ def qom_list(self, path: str) -> List[ObjectPropertyInfo]: assert isinstance(rsp, list) return [ObjectPropertyInfo.make(x) for x in rsp] + def qom_list_get(self, paths: List[str]) -> List[ObjectPropertiesValues]: + """ + :return: a strongly typed list from the 'qom-list-get' command. + """ + rsp = self.qmp.cmd('qom-list-get', paths=paths) + # qom-list-get returns List[ObjectPropertiesValues] + assert isinstance(rsp, list) + return [ObjectPropertiesValues.make(x) for x in rsp] + @classmethod def command_runner( cls: Type[CommandT], diff --git a/python/scripts/mkvenv.py b/python/scripts/mkvenv.py index f2526af0a04..f102527c4de 100644 --- a/python/scripts/mkvenv.py +++ b/python/scripts/mkvenv.py @@ -84,6 +84,7 @@ Sequence, Tuple, Union, + cast, ) import venv @@ -94,17 +95,39 @@ HAVE_DISTLIB = True try: import distlib.scripts - import distlib.version except ImportError: try: # Reach into pip's cookie jar. pylint and flake8 don't understand # that these imports will be used via distlib.xxx. from pip._vendor import distlib import pip._vendor.distlib.scripts # noqa, pylint: disable=unused-import - import pip._vendor.distlib.version # noqa, pylint: disable=unused-import except ImportError: HAVE_DISTLIB = False +# pip 25.2 does not vendor distlib.version, but it uses vendored +# packaging.version +HAVE_DISTLIB_VERSION = True +try: + import distlib.version # pylint: disable=ungrouped-imports +except ImportError: + try: + # pylint: disable=unused-import,ungrouped-imports + import pip._vendor.distlib.version # noqa + except ImportError: + HAVE_DISTLIB_VERSION = False + +HAVE_PACKAGING_VERSION = True +try: + # Do not bother importing non-vendored packaging, because it is not + # in stdlib. + from pip._vendor import packaging + # pylint: disable=unused-import + import pip._vendor.packaging.requirements # noqa + import pip._vendor.packaging.version # noqa +except ImportError: + HAVE_PACKAGING_VERSION = False + + # Try to load tomllib, with a fallback to tomli. # HAVE_TOMLLIB is checked below, just-in-time, so that mkvenv does not fail # outside the venv or before a potential call to ensurepip in checkpip(). @@ -133,6 +156,39 @@ class Ouch(RuntimeError): """An Exception class we can't confuse with a builtin.""" +class Matcher: + """Compatibility appliance for version/requirement string parsing.""" + def __init__(self, name_and_constraint: str): + """Create a matcher from a requirement-like string.""" + if HAVE_DISTLIB_VERSION: + self._m = distlib.version.LegacyMatcher(name_and_constraint) + elif HAVE_PACKAGING_VERSION: + self._m = packaging.requirements.Requirement(name_and_constraint) + else: + raise Ouch("found neither distlib.version nor packaging.version") + self.name = self._m.name + + def match(self, version_str: str) -> bool: + """Return True if `version` satisfies the stored constraint.""" + if HAVE_DISTLIB_VERSION: + return cast( + bool, + self._m.match(distlib.version.LegacyVersion(version_str)) + ) + + assert HAVE_PACKAGING_VERSION + return cast( + bool, + self._m.specifier.contains( + packaging.version.Version(version_str), prereleases=True + ) + ) + + def __repr__(self) -> str: + """Stable debug representation delegated to the backend.""" + return repr(self._m) + + class QemuEnvBuilder(venv.EnvBuilder): """ An extension of venv.EnvBuilder for building QEMU's configure-time venv. @@ -379,6 +435,9 @@ def make_venv( # pylint: disable=too-many-arguments try: builder.create(str(env_dir)) except SystemExit as exc: + # pylint 3.3 bug: + # pylint: disable=raising-non-exception, raise-missing-from + # Some versions of the venv module raise SystemExit; *nasty*! # We want the exception that prompted it. It might be a subprocess # error that has output we *really* want to see. @@ -666,7 +725,7 @@ def _do_ensure( canary = None for name, info in group.items(): constraint = _make_version_constraint(info, False) - matcher = distlib.version.LegacyMatcher(name + constraint) + matcher = Matcher(name + constraint) print(f"mkvenv: checking for {matcher}", file=sys.stderr) dist: Optional[Distribution] = None @@ -680,7 +739,7 @@ def _do_ensure( # Always pass installed package to pip, so that they can be # updated if the requested version changes or not _is_system_package(dist) - or not matcher.match(distlib.version.LegacyVersion(dist.version)) + or not matcher.match(dist.version) ): absent.append(name + _make_version_constraint(info, True)) if len(absent) == 1: diff --git a/python/scripts/vendor.py b/python/scripts/vendor.py index 07aff97ccad..b47db00743a 100755 --- a/python/scripts/vendor.py +++ b/python/scripts/vendor.py @@ -41,8 +41,8 @@ def main() -> int: parser.parse_args() packages = { - "meson==1.2.3": - "4533a43c34548edd1f63a276a42690fce15bde9409bcf20c4b8fa3d7e4d7cac1", + "meson==1.8.1": + "374bbf71247e629475fc10b0bd2ef66fc418c2d8f4890572f74de0f97d0d42da", } vendor_dir = Path(__file__, "..", "..", "wheels").resolve() diff --git a/python/setup.cfg b/python/setup.cfg index 3b4e2cc5501..d7f5dc7bafe 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -14,7 +14,6 @@ classifiers = Natural Language :: English Operating System :: OS Independent Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 @@ -23,7 +22,7 @@ classifiers = Typing :: Typed [options] -python_requires = >= 3.8 +python_requires = >= 3.9 packages = qemu.qmp qemu.machine @@ -47,6 +46,7 @@ devel = urwid >= 2.1.2 urwid-readline >= 0.13 Pygments >= 2.9.0 + sphinx >= 3.4.3 # Provides qom-fuse functionality fuse = @@ -78,8 +78,7 @@ exclude = __pycache__, [mypy] strict = True -python_version = 3.8 -warn_unused_configs = True +python_version = 3.9 namespace_packages = True warn_unused_ignores = False @@ -142,6 +141,7 @@ ignore_missing_imports = True disable=consider-using-f-string, consider-using-with, too-many-arguments, + too-many-positional-arguments, too-many-function-args, # mypy handles this with less false positives. too-many-instance-attributes, no-member, # mypy also handles this better. @@ -185,7 +185,7 @@ multi_line_output=3 # of python available on your system to run this test. [tox:tox] -envlist = py38, py39, py310, py311, py312, py313 +envlist = py39, py310, py311, py312, py313 skip_missing_interpreters = true [testenv] diff --git a/python/tests/minreqs.txt b/python/tests/minreqs.txt index a3f423efd84..cd2e2a81c3d 100644 --- a/python/tests/minreqs.txt +++ b/python/tests/minreqs.txt @@ -1,5 +1,5 @@ # This file lists the ***oldest possible dependencies*** needed to run -# "make check" successfully under ***Python 3.8***. It is used primarily +# "make check" successfully under ***Python 3.9***. It is used primarily # by GitLab CI to ensure that our stated minimum versions in setup.cfg # are truthful and regularly validated. # @@ -11,6 +11,15 @@ # When adding new dependencies, pin the very oldest non-yanked version # on PyPI that allows the test suite to pass. +# For some reason, the presence of packaging==14.0 below requires us to +# also pin setuptools to version 70 or below. Otherwise, the +# installation of the QEMU package itself fails, failing to find +# setuptools. +setuptools<=70 + +# Dependencies for qapidoc/qapi_domain et al +sphinx==3.4.3 + # Dependencies for the TUI addon (Required for successful linting) urwid==2.1.2 urwid-readline==0.13 @@ -38,10 +47,32 @@ pyflakes==2.5.0 # Transitive mypy dependencies mypy-extensions==1.0.0 +tomli==1.1.0 typing-extensions==4.7.1 # Transitive pylint dependencies astroid==2.15.4 +dill==0.2 lazy-object-proxy==1.4.0 +platformdirs==2.2.0 toml==0.10.0 +tomlkit==0.10.1 wrapt==1.14.0 + +# Transitive sphinx dependencies +Jinja2==2.7 +MarkupSafe==1.1.0 +alabaster==0.7.1 +babel==1.3 +docutils==0.12 +imagesize==0.5.0 +packaging==14.0 +pytz==2011b0 +requests==2.5.0 +snowballstemmer==1.1 +sphinxcontrib-applehelp==1.0.0 +sphinxcontrib-devhelp==1.0.0 +sphinxcontrib-htmlhelp==1.0.0 +sphinxcontrib-jsmath==1.0.0 +sphinxcontrib-qthelp==1.0.0 +sphinxcontrib-serializinghtml==1.0.0 diff --git a/python/tests/qapi-flake8.sh b/python/tests/qapi-flake8.sh new file mode 100755 index 00000000000..c69f9ea2e00 --- /dev/null +++ b/python/tests/qapi-flake8.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e +# SPDX-License-Identifier: GPL-2.0-or-later + +python3 -m flake8 ../scripts/qapi/ \ + ../docs/sphinx/qapidoc.py \ + ../docs/sphinx/qapi_domain.py diff --git a/python/tests/qapi-isort.sh b/python/tests/qapi-isort.sh new file mode 100755 index 00000000000..067c16d5d94 --- /dev/null +++ b/python/tests/qapi-isort.sh @@ -0,0 +1,8 @@ +#!/bin/sh -e +# SPDX-License-Identifier: GPL-2.0-or-later + +python3 -m isort --sp . -c ../scripts/qapi/ +# Force isort to recognize "compat" as a local module and not third-party +python3 -m isort --sp . -c -p compat \ + ../docs/sphinx/qapi_domain.py \ + ../docs/sphinx/qapidoc.py diff --git a/python/tests/qapi-mypy.sh b/python/tests/qapi-mypy.sh new file mode 100755 index 00000000000..363dbaf8c06 --- /dev/null +++ b/python/tests/qapi-mypy.sh @@ -0,0 +1,4 @@ +#!/bin/sh -e +# SPDX-License-Identifier: GPL-2.0-or-later + +python3 -m mypy ../scripts/qapi diff --git a/python/tests/qapi-pylint.sh b/python/tests/qapi-pylint.sh new file mode 100755 index 00000000000..8767d9d2a2d --- /dev/null +++ b/python/tests/qapi-pylint.sh @@ -0,0 +1,8 @@ +#!/bin/sh -e +# SPDX-License-Identifier: GPL-2.0-or-later + +SETUPTOOLS_USE_DISTUTILS=stdlib python3 -m pylint \ + --rcfile=../scripts/qapi/pylintrc \ + ../scripts/qapi/ \ + ../docs/sphinx/qapidoc.py \ + ../docs/sphinx/qapi_domain.py diff --git a/python/wheels/meson-1.2.3-py3-none-any.whl b/python/wheels/meson-1.2.3-py3-none-any.whl deleted file mode 100644 index a8b84e5f114..00000000000 Binary files a/python/wheels/meson-1.2.3-py3-none-any.whl and /dev/null differ diff --git a/python/wheels/meson-1.8.1-py3-none-any.whl b/python/wheels/meson-1.8.1-py3-none-any.whl new file mode 100644 index 00000000000..a885f0e18ce Binary files /dev/null and b/python/wheels/meson-1.8.1-py3-none-any.whl differ diff --git a/python/wheels/pycotap-1.3.1-py3-none-any.whl b/python/wheels/pycotap-1.3.1-py3-none-any.whl new file mode 100644 index 00000000000..9c2c7d25936 Binary files /dev/null and b/python/wheels/pycotap-1.3.1-py3-none-any.whl differ diff --git a/pythondeps.toml b/pythondeps.toml index f6e590fdd86..d0f52b14f79 100644 --- a/pythondeps.toml +++ b/pythondeps.toml @@ -19,16 +19,17 @@ [meson] # The install key should match the version in python/wheels/ -meson = { accepted = ">=1.1.0", installed = "1.2.3", canary = "meson" } +meson = { accepted = ">=1.5.0", installed = "1.8.1", canary = "meson" } +pycotap = { accepted = ">=1.1.0", installed = "1.3.1" } + +[meson-rust] +# The install key should match the version in python/wheels/ +meson = { accepted = ">=1.8.1", installed = "1.8.1", canary = "meson" } [docs] # Please keep the installed versions in sync with docs/requirements.txt -sphinx = { accepted = ">=3.4.3", installed = "5.3.0", canary = "sphinx-build" } -sphinx_rtd_theme = { accepted = ">=0.5", installed = "1.1.1" } +sphinx = { accepted = ">=3.4.3", installed = "6.2.1", canary = "sphinx-build" } +sphinx_rtd_theme = { accepted = ">=0.5", installed = "1.2.2" } -[avocado] -# Note that qemu.git/python/ is always implicitly installed. -# Prefer an LTS version when updating the accepted versions of -# avocado-framework, for example right now the limit is 92.x. -avocado-framework = { accepted = "(>=88.1, <93.0)", installed = "88.1", canary = "avocado" } -pycdlib = { accepted = ">=1.11.0" } +[testdeps] +qemu.qmp = { accepted = ">=0.0.3", installed = "0.0.3" } diff --git a/qapi/accelerator.json b/qapi/accelerator.json new file mode 100644 index 00000000000..fb28c8d920a --- /dev/null +++ b/qapi/accelerator.json @@ -0,0 +1,56 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# SPDX-License-Identifier: GPL-2.0-or-later + +## +# ************ +# Accelerators +# ************ +## + +{ 'include': 'common.json' } + +## +# @KvmInfo: +# +# Information about support for KVM acceleration +# +# @enabled: true if KVM acceleration is active +# +# @present: true if KVM acceleration is built into this executable +# +# Since: 0.14 +## +{ 'struct': 'KvmInfo', 'data': {'enabled': 'bool', 'present': 'bool'} } + +## +# @query-kvm: +# +# Return information about KVM acceleration +# +# Since: 0.14 +# +# .. qmp-example:: +# +# -> { "execute": "query-kvm" } +# <- { "return": { "enabled": true, "present": true } } +## +{ 'command': 'query-kvm', 'returns': 'KvmInfo' } + +## +# @x-accel-stats: +# +# Query accelerator statistics +# +# Features: +# +# @unstable: This command is meant for debugging. +# +# Returns: accelerator statistics +# +# Since: 10.1 +## +{ 'command': 'x-accel-stats', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } diff --git a/qapi/acpi.json b/qapi/acpi.json index 045dab6228b..906b3687a55 100644 --- a/qapi/acpi.json +++ b/qapi/acpi.json @@ -6,7 +6,9 @@ # SPDX-License-Identifier: GPL-2.0-or-later ## -# = ACPI +# **** +# ACPI +# **** ## ## @@ -80,7 +82,7 @@ ## # @ACPIOSTInfo: # -# OSPM Status Indication for a device For description of possible +# OSPM Status Indication for a device. For description of possible # values of @source and @status fields see "_OST (OSPM Status # Indication)" chapter of ACPI5.0 spec. # @@ -106,7 +108,7 @@ ## # @query-acpi-ospm-status: # -# Return a list of ACPIOSTInfo for devices that support status +# Return a list of `ACPIOSTInfo` for devices that support status # reporting via ACPI _OST method. # # Since: 2.1 diff --git a/qapi/audio.json b/qapi/audio.json index 519697c0cd8..53142080f72 100644 --- a/qapi/audio.json +++ b/qapi/audio.json @@ -7,7 +7,9 @@ # See the COPYING file in the top-level directory. ## -# = Audio +# ***** +# Audio +# ***** ## ## @@ -65,6 +67,26 @@ '*in': 'AudiodevPerDirectionOptions', '*out': 'AudiodevPerDirectionOptions' } } +## +# @AudiodevDBusOptions: +# +# Options of the D-Bus audio backend. +# +# @in: options of the capture stream +# +# @out: options of the playback stream +# +# @nsamples: set the number of samples per read/write calls (default to 480, +# 10ms at 48kHz). +# +# Since: 10.0 +## +{ 'struct': 'AudiodevDBusOptions', + 'data': { + '*in': 'AudiodevPerDirectionOptions', + '*out': 'AudiodevPerDirectionOptions', + '*nsamples': 'uint32'} } + ## # @AudiodevAlsaPerDirectionOptions: # @@ -76,7 +98,7 @@ # @period-length: the period length in microseconds # # @try-poll: attempt to use poll mode, falling back to non-polling -# access on failure (default true) +# access on failure (default false) # # Since: 4.0 ## @@ -289,9 +311,9 @@ # # @name: name of the sink/source to use # -# @stream-name: name of the PulseAudio stream created by qemu. Can be +# @stream-name: name of the PulseAudio stream created by QEMU. Can be # used to identify the stream in PulseAudio when you create -# multiple PulseAudio devices or run multiple qemu instances +# multiple PulseAudio devices or run multiple QEMU instances # (default: audiodev's id, since 4.2) # # @latency: latency you want PulseAudio to achieve in microseconds @@ -333,9 +355,9 @@ # # @name: name of the sink/source to use # -# @stream-name: name of the PipeWire stream created by qemu. Can be +# @stream-name: name of the PipeWire stream created by QEMU. Can be # used to identify the stream in PipeWire when you create multiple -# PipeWire devices or run multiple qemu instances (default: +# PipeWire devices or run multiple QEMU instances (default: # audiodev's id) # # @latency: latency you want PipeWire to achieve in microseconds @@ -490,7 +512,7 @@ 'if': 'CONFIG_AUDIO_ALSA' }, 'coreaudio': { 'type': 'AudiodevCoreaudioOptions', 'if': 'CONFIG_AUDIO_COREAUDIO' }, - 'dbus': { 'type': 'AudiodevGenericOptions', + 'dbus': { 'type': 'AudiodevDBusOptions', 'if': 'CONFIG_DBUS_DISPLAY' }, 'dsound': { 'type': 'AudiodevDsoundOptions', 'if': 'CONFIG_AUDIO_DSOUND' }, @@ -513,9 +535,7 @@ ## # @query-audiodevs: # -# Returns information about audiodev configuration -# -# Returns: array of @Audiodev +# Return information about audiodev configuration # # Since: 8.0 ## diff --git a/qapi/authz.json b/qapi/authz.json index 7fc6e3032ea..bc1123cc053 100644 --- a/qapi/authz.json +++ b/qapi/authz.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = User authorization +# ****************** +# User authorization +# ****************** ## ## @@ -75,7 +77,7 @@ # Properties for authz-listfile objects. # # @filename: File name to load the configuration from. The file must -# contain valid JSON for AuthZListProperties. +# contain valid JSON for `AuthZListProperties`. # # @refresh: If true, inotify is used to monitor the file, # automatically reloading changes. If an error occurs during diff --git a/qapi/block-core.json b/qapi/block-core.json index f400b334c84..dc6eb4ae23d 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -2,7 +2,8 @@ # vim: filetype=python ## -# == Block core (VM unrelated) +# Block core (VM unrelated) +# ========================= ## { 'include': 'common.json' } @@ -31,8 +32,8 @@ # @icount: Current instruction count. Appears when execution # record/replay is enabled. Used for "time-traveling" to match # the moment in the recorded execution with the snapshots. This -# counter may be obtained through @query-replay command (since -# 5.2) +# counter may be obtained through `query-replay` command +# (since 5.2) # # Since: 1.3 ## @@ -158,7 +159,14 @@ ## # @ImageInfoSpecificRbd: # -# @encryption-format: Image encryption format +# @encryption-format: Image encryption format. If encryption is enabled for the +# image (see encrypted in BlockNodeInfo), this is the actual format in which the +# image is accessed. If encryption is not enabled, this is the result of +# probing when the image was opened, to give a suggestion which encryption +# format could be enabled. Note that probing results can be changed by the +# guest by writing a (possibly partial) encryption format header to the +# image, so don't treat this information as trusted if the guest is not +# trusted. # # Since: 6.1 ## @@ -223,7 +231,7 @@ { 'struct': 'ImageInfoSpecificLUKSWrapper', 'data': { 'data': 'QCryptoBlockInfoLUKS' } } # If we need to add block driver specific parameters for -# LUKS in future, then we'll subclass QCryptoBlockInfoLUKS +# LUKS in future, then we'll subclass `QCryptoBlockInfoLUKS` # to define a ImageInfoSpecificLUKS ## @@ -332,7 +340,7 @@ # node, annotated with information about that node in relation to its # parent. # -# @name: Child name of the root node in the BlockGraphInfo struct, in +# @name: Child name of the root node in the `BlockGraphInfo` struct, in # its role as the child of some undescribed parent node # # @info: Block graph information starting at this node @@ -349,7 +357,7 @@ # @BlockGraphInfo: # # Information about all nodes in a block (sub)graph in the form of -# BlockNodeInfo data. The base BlockNodeInfo struct contains the +# `BlockNodeInfo` data. The base `BlockNodeInfo` struct contains the # information for the (sub)graph's root node. # # @children: Array of links to this node's child nodes' information @@ -461,6 +469,19 @@ 'direct': 'bool', 'no-flush': 'bool' } } +## +# @BlockdevChild: +# +# @child: The name of the child, for example 'file' or 'backing'. +# +# @node-name: The name of the child's block driver node. +# +# Since: 10.1 +## +{ 'struct': 'BlockdevChild', + 'data': { 'child': 'str', + 'node-name': 'str' } } + ## # @BlockDeviceInfo: # @@ -486,6 +507,12 @@ # @backing_file_depth: number of files in the backing file chain # (since: 1.2) # +# @children: Information about child block nodes. (since: 10.1) +# +# @active: true if the backend is active; typical cases for inactive backends +# are on the migration source instance after migration completes and on the +# destination before it completes. (since: 10.0) +# # @encrypted: true if the backing device is encrypted # # @detect_zeroes: detect and optimize zero writes (Since 2.1) @@ -506,11 +533,11 @@ # # @bps_max: total throughput limit during bursts, in bytes (Since 1.7) # -# @bps_rd_max: read throughput limit during bursts, in bytes (Since -# 1.7) +# @bps_rd_max: read throughput limit during bursts, in bytes +# (Since 1.7) # -# @bps_wr_max: write throughput limit during bursts, in bytes (Since -# 1.7) +# @bps_wr_max: write throughput limit during bursts, in bytes +# (Since 1.7) # # @iops_max: total I/O operations per second during bursts, in bytes # (Since 1.7) @@ -554,9 +581,10 @@ # Since: 0.14 ## { 'struct': 'BlockDeviceInfo', - 'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str', + 'data': { 'file': 'str', 'node-name': 'str', 'ro': 'bool', 'drv': 'str', '*backing_file': 'str', 'backing_file_depth': 'int', - 'encrypted': 'bool', + 'children': ['BlockdevChild'], + 'active': 'bool', 'encrypted': 'bool', 'detect_zeroes': 'BlockdevDetectZeroesOptions', 'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int', 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int', @@ -610,7 +638,7 @@ # @inconsistent: true if this is a persistent bitmap that was # improperly stored. Implies @persistent to be true; @recording # and @busy to be false. This bitmap cannot be used. To remove -# it, use @block-dirty-bitmap-remove. (Since 4.0) +# it, use `block-dirty-bitmap-remove`. (Since 4.0) # # Since: 1.3 ## @@ -705,12 +733,12 @@ # @tray_open: True if the device's tray is open (only present if it # has a tray) # -# @io-status: @BlockDeviceIoStatus. Only present if the device +# @io-status: `BlockDeviceIoStatus`. Only present if the device # supports it and the VM is configured to stop on errors # (supported device models: virtio-blk, IDE, SCSI except # scsi-generic) # -# @inserted: @BlockDeviceInfo describing the device if media is +# @inserted: `BlockDeviceInfo` describing the device if media is # present # # Since: 0.14 @@ -757,9 +785,9 @@ ## # @query-block: # -# Get a list of BlockInfo for all virtual block devices. +# Get a list of `BlockInfo` for all virtual block devices. # -# Returns: a list of @BlockInfo describing each virtual block device. +# Returns: a list describing each virtual block device. # Filter nodes that were created implicitly are skipped over. # # Since: 0.14 @@ -947,11 +975,11 @@ # @unmap_operations: The number of unmap operations performed by the # device (Since 4.2) # -# @rd_total_time_ns: Total time spent on reads in nanoseconds (since -# 0.15). +# @rd_total_time_ns: Total time spent on reads in nanoseconds +# (since 0.15) # -# @wr_total_time_ns: Total time spent on writes in nanoseconds (since -# 0.15). +# @wr_total_time_ns: Total time spent on writes in nanoseconds +# (since 0.15) # # @zone_append_total_time_ns: Total time spent on zone append writes # in nanoseconds (since 8.1) @@ -1022,14 +1050,14 @@ # @timed_stats: Statistics specific to the set of previously defined # intervals of time (Since 2.5) # -# @rd_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @rd_latency_histogram: `BlockLatencyHistogramInfo`. (Since 4.0) # -# @wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @wr_latency_histogram: `BlockLatencyHistogramInfo`. (Since 4.0) # -# @zone_append_latency_histogram: @BlockLatencyHistogramInfo. +# @zone_append_latency_histogram: `BlockLatencyHistogramInfo`. # (since 8.1) # -# @flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) +# @flush_latency_histogram: `BlockLatencyHistogramInfo`. (Since 4.0) # # Since: 0.14 ## @@ -1130,7 +1158,7 @@ # @qdev: The qdev ID, or if no ID is assigned, the QOM path of the # block device. (since 3.0) # -# @stats: A @BlockDeviceStats for the device. +# @stats: A `BlockDeviceStats` for the device. # # @driver-specific: Optional driver-specific stats. (Since 4.2) # @@ -1154,17 +1182,17 @@ ## # @query-blockstats: # -# Query the @BlockStats for all virtual block devices. +# Query the `BlockStats` for all virtual block devices. # # @query-nodes: If true, the command will query all the block nodes # that have a node name, in a list which will include "parent" -# information, but not "backing". If false or omitted, the +# information, but not "backing". If false or omitted, the # behavior is as before - query all the device backends, -# recursively including their "parent" and "backing". Filter nodes -# that were created implicitly are skipped over in this mode. -# (Since 2.3) +# recursively including their "parent" and "backing". Filter +# nodes that were created implicitly are skipped over in this +# mode. (Since 2.3) # -# Returns: A list of @BlockStats for each virtual block devices. +# Returns: A list of statistics for each virtual block device. # # Since: 0.14 # @@ -1285,8 +1313,8 @@ # @report: for guest operations, report the error to the guest; for # jobs, cancel the job # -# @ignore: ignore the error, only report a QMP event (BLOCK_IO_ERROR -# or BLOCK_JOB_ERROR). The backup, mirror and commit block jobs +# @ignore: ignore the error, only report a QMP event (`BLOCK_IO_ERROR` +# or `BLOCK_JOB_ERROR`). The backup, mirror and commit block jobs # retry the failing request later and may still complete # successfully. The stream block job continues to stream and will # complete with an error. @@ -1318,8 +1346,8 @@ # @incremental: only copy data described by the dirty bitmap. # (since: 2.4) # -# @bitmap: only copy data described by the dirty bitmap. (since: 4.2) -# Behavior on completion is determined by the BitmapSyncMode. +# @bitmap: only copy data described by the dirty bitmap. Behavior on +# completion is determined by the `BitmapSyncMode`. (since: 4.2) # # Since: 1.3 ## @@ -1333,7 +1361,7 @@ # bitmap when used for data copy operations. # # @on-success: The bitmap is only synced when the operation is -# successful. This is the behavior always used for 'INCREMENTAL' +# successful. This is the behavior always used for incremental # backups. # # @never: The bitmap is never synchronized with the operation, and is @@ -1413,8 +1441,8 @@ # @auto-finalize: Job will finalize itself when PENDING, moving to the # CONCLUDED state. (since 2.12) # -# @auto-dismiss: Job will dismiss itself when CONCLUDED, moving to the -# NULL state and disappearing from the query list. (since 2.12) +# @auto-dismiss: Job will dismiss itself when CONCLUDED, and +# disappear. (since 2.12) # # @error: Error information if the job did not complete successfully. # Not set if the job completed successfully. (since 2.12.1) @@ -1436,7 +1464,7 @@ # # Return information about long-running block device operations. # -# Returns: a list of @BlockJobInfo for each active block job +# Returns: a list of job info for each active block job # # Since: 1.1 ## @@ -1498,15 +1526,15 @@ # # @device: the name of the device to take a snapshot of. # -# @node-name: graph node name to generate the snapshot from (Since -# 2.0) +# @node-name: graph node name to generate the snapshot from +# (Since 2.0) # # @snapshot-file: the target of the new overlay image. If the file # exists, or if it is a device, the overlay will be created in the # existing file/device. Otherwise, a new file will be created. # -# @snapshot-node-name: the graph node name of the new image (Since -# 2.0) +# @snapshot-node-name: the graph node name of the new image +# (Since 2.0) # # @format: the format of the overlay image, default is 'qcow2'. # @@ -1526,7 +1554,7 @@ # @overlay: reference to the existing block device that will become # the overlay of @node, as part of taking the snapshot. It must # not have a current backing file (this can be achieved by passing -# "backing": null to blockdev-add). +# "backing": null to `blockdev-add`). # # Since: 2.5 ## @@ -1551,11 +1579,16 @@ # it should not be less than job cluster size which is calculated # as maximum of target image cluster size and 64k. Default 0. # +# @min-cluster-size: Minimum size of blocks used by copy-before-write +# and background copy operations. Has to be a power of 2. No +# effect if smaller than the maximum of the target's cluster size +# and 64 KiB. Default 0. (Since 9.2) +# # Since: 6.0 ## { 'struct': 'BackupPerf', - 'data': { '*use-copy-range': 'bool', - '*max-workers': 'int', '*max-chunk': 'int64' } } + 'data': { '*use-copy-range': 'bool', '*max-workers': 'int', + '*max-chunk': 'int64', '*min-cluster-size': 'size' } } ## # @BackupCommon: @@ -1574,36 +1607,38 @@ # for unlimited. # # @bitmap: The name of a dirty bitmap to use. Must be present if sync -# is "bitmap" or "incremental". Can be present if sync is "full" +# is "bitmap" or "incremental". Can be present if sync is "full" # or "top". Must not be present otherwise. -# (Since 2.4 (drive-backup), 3.1 (blockdev-backup)) +# (Since 2.4 (`drive-backup`), 3.1 (`blockdev-backup`)) # # @bitmap-mode: Specifies the type of data the bitmap should contain # after the operation concludes. Must be present if a bitmap was -# provided, Must NOT be present otherwise. (Since 4.2) +# provided, must **not** be present otherwise. (Since 4.2) # # @compress: true to compress data, if the target format supports it. # (default: false) (since 2.8) # # @on-source-error: the action to take on an error on the source, # default 'report'. 'stop' and 'enospc' can only be used if the -# block device supports io-status (see BlockInfo). +# block device supports io-status (see `BlockInfo`). # # @on-target-error: the action to take on an error on the target, # default 'report' (no limitations, since this applies to a # different block device than @device). # +# @on-cbw-error: policy defining behavior on I/O errors in +# copy-before-write jobs; defaults to break-guest-write. (Since 10.1) +# # @auto-finalize: When false, this job will wait in a PENDING state -# after it has finished its work, waiting for @block-job-finalize -# before making any block graph changes. When true, this job will +# after it has finished its work, waiting for `job-finalize` before +# making any block graph changes. When true, this job will # automatically perform its abort or commit actions. Defaults to # true. (Since 2.12) # # @auto-dismiss: When false, this job will wait in a CONCLUDED state # after it has completely ceased all work, and awaits -# @block-job-dismiss. When true, this job will automatically -# disappear from the query list without user intervention. -# Defaults to true. (Since 2.12) +# `job-dismiss`. When true, this job will automatically disappear +# without user intervention. Defaults to true. (Since 2.12) # # @filter-node-name: the node name that should be assigned to the # filter driver that the backup job inserts into the graph above @@ -1619,9 +1654,9 @@ # # @unstable: Member @x-perf is experimental. # -# .. note:: @on-source-error and @on-target-error only affect background -# I/O. If an error occurs during a guest write request, the device's -# rerror/werror actions will be used. +# .. note:: @on-source-error and @on-target-error only affect +# background I/O. If an error occurs during a guest write request, +# the device's rerror/werror actions will be used. # # Since: 4.2 ## @@ -1632,6 +1667,7 @@ '*compress': 'bool', '*on-source-error': 'BlockdevOnError', '*on-target-error': 'BlockdevOnError', + '*on-cbw-error': 'OnCbwError', '*auto-finalize': 'bool', '*auto-dismiss': 'bool', '*filter-node-name': 'str', '*discard-source': 'bool', @@ -1699,7 +1735,7 @@ # Takes a snapshot of a block device. # # Take a snapshot, by installing 'node' as the backing image of -# 'overlay'. Additionally, if 'node' is associated with a block +# 'overlay'. Additionally, if 'node' is associated with a block # device, the block device changes to using 'overlay' as its new # active image. # @@ -1707,7 +1743,7 @@ # # @allow-write-only-overlay: If present, the check whether this # operation is safe was relaxed so that it can be used to change -# backing file of a destination of a blockdev-mirror. (since 5.0) +# backing file of a destination of a `blockdev-mirror`. (since 5.0) # # Since: 2.5 # @@ -1738,7 +1774,7 @@ # Change the backing file in the image file metadata. This does not # cause QEMU to reopen the image file to reparse the backing filename # (it may, however, perform a reopen to change permissions from r/o -> -# r/w -> r/o, if needed). The new backing file string is written into +# r/w -> r/o, if needed). The new backing file string is written into # the image file metadata, and the QEMU internal strings are updated. # # @image-node-name: The name of the block driver state node of the @@ -1772,8 +1808,7 @@ # If top == base, that is an error. If top has no overlays on top of # it, or if it is in use by a writer, the job will not be completed by # itself. The user needs to complete the job with the -# block-job-complete command after getting the ready event. (Since -# 2.0) +# `job-complete` command after getting the ready event. (Since 2.0) # # If the base image is smaller than top, then the base image will be # resized to be the same size as top. If top is smaller than the base @@ -1827,7 +1862,7 @@ # @speed: the maximum speed, in bytes per second # # @on-error: the action to take on an error. 'ignore' means that the -# request should be retried. (default: report; Since: 5.0) +# request should be retried. (default: report; since: 5.0) # # @filter-node-name: the node name that should be assigned to the # filter driver that the commit job inserts into the graph above @@ -1835,16 +1870,15 @@ # autogenerated. (Since: 2.9) # # @auto-finalize: When false, this job will wait in a PENDING state -# after it has finished its work, waiting for @block-job-finalize -# before making any block graph changes. When true, this job will +# after it has finished its work, waiting for `job-finalize` before +# making any block graph changes. When true, this job will # automatically perform its abort or commit actions. Defaults to # true. (Since 3.1) # # @auto-dismiss: When false, this job will wait in a CONCLUDED state # after it has completely ceased all work, and awaits -# @block-job-dismiss. When true, this job will automatically -# disappear from the query list without user intervention. -# Defaults to true. (Since 3.1) +# `job-dismiss`. When true, this job will automatically disappear +# without user intervention. Defaults to true. (Since 3.1) # # Features: # @@ -1853,7 +1887,6 @@ # # Errors: # - If @device does not exist, DeviceNotFound -# - Any other error returns a GenericError. # # Since: 1.3 # @@ -1880,14 +1913,14 @@ # @drive-backup: # # Start a point-in-time copy of a block device to a new destination. -# The status of ongoing drive-backup operations can be checked with -# query-block-jobs where the BlockJobInfo.type field has the value -# 'backup'. The operation can be stopped before it has completed using -# the block-job-cancel command. +# The status of ongoing `drive-backup` operations can be checked with +# `query-block-jobs` where the `BlockJobInfo`.type field has the value +# 'backup'. The operation can be stopped before it has completed +# using the `job-cancel` or `block-job-cancel` command. # # Features: # -# @deprecated: This command is deprecated. Use @blockdev-backup +# @deprecated: This command is deprecated. Use `blockdev-backup` # instead. # # Errors: @@ -1911,10 +1944,10 @@ # @blockdev-backup: # # Start a point-in-time copy of a block device to a new destination. -# The status of ongoing blockdev-backup operations can be checked with -# query-block-jobs where the BlockJobInfo.type field has the value -# 'backup'. The operation can be stopped before it has completed using -# the block-job-cancel command. +# The status of ongoing `blockdev-backup` operations can be checked with +# `query-block-jobs` where the `BlockJobInfo`.type field has the value +# 'backup'. The operation can be stopped before it has completed +# using the `job-cancel` or `block-job-cancel` command. # # Errors: # - If @device is not a valid block device, DeviceNotFound @@ -1941,8 +1974,6 @@ # @flat: Omit the nested data about backing image ("backing-image" # key) if true. Default is false (Since 5.0) # -# Returns: the list of BlockDeviceInfo -# # Since: 2.0 # # .. qmp-example:: @@ -2017,8 +2048,8 @@ # @XDbgBlockGraphNode: # # @id: Block graph node identifier. This @id is generated only for -# x-debug-query-block-graph and does not relate to any other -# identifiers in Qemu. +# `x-debug-query-block-graph` and does not relate to any other +# identifiers in QEMU. # # @type: Type of graph node. Can be one of block-backend, block-job # or block-driver-state. @@ -2066,7 +2097,7 @@ ## # @XDbgBlockGraphEdge: # -# Block Graph edge description for x-debug-query-block-graph. +# Block Graph edge description for `x-debug-query-block-graph`. # # @parent: parent id # @@ -2157,8 +2188,8 @@ # @format: the format of the new destination, default is to probe if # @mode is 'existing', else the format of the source # -# @node-name: the new block driver state node name in the graph (Since -# 2.1) +# @node-name: the new block driver state node name in the graph +# (Since 2.1) # # @replaces: with sync=full graph node name to be replaced by the new # image when a whole image copy is done. This can be used to @@ -2184,7 +2215,7 @@ # # @on-source-error: the action to take on an error on the source, # default 'report'. 'stop' and 'enospc' can only be used if the -# block device supports io-status (see BlockInfo). +# block device supports io-status (see `BlockInfo`). # # @on-target-error: the action to take on an error on the target, # default 'report' (no limitations, since this applies to a @@ -2200,16 +2231,15 @@ # 'background' (Since: 3.0) # # @auto-finalize: When false, this job will wait in a PENDING state -# after it has finished its work, waiting for @block-job-finalize -# before making any block graph changes. When true, this job will +# after it has finished its work, waiting for `job-finalize` before +# making any block graph changes. When true, this job will # automatically perform its abort or commit actions. Defaults to # true. (Since 3.1) # # @auto-dismiss: When false, this job will wait in a CONCLUDED state # after it has completely ceased all work, and awaits -# @block-job-dismiss. When true, this job will automatically -# disappear from the query list without user intervention. -# Defaults to true. (Since 3.1) +# `job-dismiss`. When true, this job will automatically disappear +# without user intervention. Defaults to true. (Since 3.1) # # Since: 1.3 ## @@ -2243,16 +2273,16 @@ # @name: name of the dirty bitmap (must be less than 1024 bytes) # # @granularity: the bitmap granularity, default is 64k for -# block-dirty-bitmap-add +# `block-dirty-bitmap-add` # # @persistent: the bitmap is persistent, i.e. it will be saved to the # corresponding block device image file on its close. For now # only Qcow2 disks support persistent bitmaps. Default is false -# for block-dirty-bitmap-add. (Since: 2.10) +# for `block-dirty-bitmap-add`. (Since: 2.10) # # @disabled: the bitmap is created in the disabled state, which means # that it will not track drive changes. The bitmap may be enabled -# with block-dirty-bitmap-enable. Default is false. (Since: 4.0) +# with `block-dirty-bitmap-enable`. Default is false. (Since: 4.0) # # Since: 2.4 ## @@ -2282,7 +2312,7 @@ # @target: name of the destination dirty bitmap # # @bitmaps: name(s) of the source dirty bitmap(s) at @node and/or -# fully specified BlockDirtyBitmap elements. The latter are +# fully specified `BlockDirtyBitmap` elements. The latter are # supported since 4.1. # # Since: 4.0 @@ -2299,7 +2329,7 @@ # # Errors: # - If @node is not a valid block device or node, DeviceNotFound -# - If @name is already taken, GenericError with an explanation +# - If @name is already taken, GenericError # # Since: 2.4 # @@ -2317,12 +2347,12 @@ # @block-dirty-bitmap-remove: # # Stop write tracking and remove the dirty bitmap that was created -# with block-dirty-bitmap-add. If the bitmap is persistent, remove it +# with `block-dirty-bitmap-add`. If the bitmap is persistent, remove it # from its storage too. # # Errors: # - If @node is not a valid block device or node, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# - If @name is not found, GenericError # - if @name is frozen by an operation, GenericError # # Since: 2.4 @@ -2346,7 +2376,7 @@ # # Errors: # - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# - If @name is not found, GenericError # # Since: 2.4 # @@ -2367,7 +2397,7 @@ # # Errors: # - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# - If @name is not found, GenericError # # Since: 4.0 # @@ -2388,7 +2418,7 @@ # # Errors: # - If @node is not a valid block device, DeviceNotFound -# - If @name is not found, GenericError with an explanation +# - If @name is not found, GenericError # # Since: 4.0 # @@ -2456,13 +2486,9 @@ # # @unstable: This command is meant for debugging. # -# Returns: -# BlockDirtyBitmapSha256 -# # Errors: # - If @node is not a valid block device, DeviceNotFound # - If @name is not found or if hashing has failed, GenericError -# with an explanation # # Since: 2.10 ## @@ -2505,7 +2531,7 @@ # # @on-source-error: the action to take on an error on the source, # default 'report'. 'stop' and 'enospc' can only be used if the -# block device supports io-status (see BlockInfo). +# block device supports io-status (see `BlockInfo`). # # @on-target-error: the action to take on an error on the target, # default 'report' (no limitations, since this applies to a @@ -2520,16 +2546,20 @@ # 'background' (Since: 3.0) # # @auto-finalize: When false, this job will wait in a PENDING state -# after it has finished its work, waiting for @block-job-finalize -# before making any block graph changes. When true, this job will +# after it has finished its work, waiting for `job-finalize` before +# making any block graph changes. When true, this job will # automatically perform its abort or commit actions. Defaults to # true. (Since 3.1) # # @auto-dismiss: When false, this job will wait in a CONCLUDED state # after it has completely ceased all work, and awaits -# @block-job-dismiss. When true, this job will automatically -# disappear from the query list without user intervention. -# Defaults to true. (Since 3.1) +# `job-dismiss`. When true, this job will automatically disappear +# without user intervention. Defaults to true. (Since 3.1) +# +# @target-is-zero: Assume the destination reads as all zeroes before +# the mirror started. Setting this to true can speed up the +# mirror. Setting this to true when the destination is not +# actually all zero can corrupt the destination. (Since 10.1) # # Since: 2.6 # @@ -2550,7 +2580,8 @@ '*on-target-error': 'BlockdevOnError', '*filter-node-name': 'str', '*copy-mode': 'MirrorCopyMode', - '*auto-finalize': 'bool', '*auto-dismiss': 'bool' }, + '*auto-finalize': 'bool', '*auto-dismiss': 'bool', + '*target-is-zero': 'bool'}, 'allow-preconfig': true } ## @@ -2576,11 +2607,11 @@ # # @bps_max: total throughput limit during bursts, in bytes (Since 1.7) # -# @bps_rd_max: read throughput limit during bursts, in bytes (Since -# 1.7) +# @bps_rd_max: read throughput limit during bursts, in bytes +# (Since 1.7) # -# @bps_wr_max: write throughput limit during bursts, in bytes (Since -# 1.7) +# @bps_wr_max: write throughput limit during bursts, in bytes +# (Since 1.7) # # @iops_max: total I/O operations per second during bursts, in bytes # (Since 1.7) @@ -2650,7 +2681,7 @@ # @iops-total-max: I/O operations burst # # @iops-total-max-length: length of the iops-total-max burst period, -# in seconds It must only be set if @iops-total-max is set as +# in seconds. It must only be set if @iops-total-max is set as # well. # # @iops-read: limit read operations per second @@ -2658,14 +2689,14 @@ # @iops-read-max: I/O operations read burst # # @iops-read-max-length: length of the iops-read-max burst period, in -# seconds It must only be set if @iops-read-max is set as well. +# seconds. It must only be set if @iops-read-max is set as well. # # @iops-write: limit write operations per second # # @iops-write-max: I/O operations write burst # # @iops-write-max-length: length of the iops-write-max burst period, -# in seconds It must only be set if @iops-write-max is set as +# in seconds. It must only be set if @iops-write-max is set as # well. # # @bps-total: limit total bytes per second @@ -2680,14 +2711,14 @@ # @bps-read-max: total bytes read burst # # @bps-read-max-length: length of the bps-read-max burst period, in -# seconds It must only be set if @bps-read-max is set as well. +# seconds. It must only be set if @bps-read-max is set as well. # # @bps-write: limit write bytes per second # # @bps-write-max: total bytes write burst # # @bps-write-max-length: length of the bps-write-max burst period, in -# seconds It must only be set if @bps-write-max is set as well. +# seconds. It must only be set if @bps-write-max is set as well. # # @iops-size: when limiting by iops max size of an I/O in bytes # @@ -2770,14 +2801,14 @@ # The block streaming operation is performed in the background until # the entire backing file has been copied. This command returns # immediately once streaming has started. The status of ongoing block -# streaming operations can be checked with query-block-jobs. The +# streaming operations can be checked with `query-block-jobs`. The # operation can be stopped before it has completed using the -# block-job-cancel command. +# `job-cancel` or `block-job-cancel` command. # # The node that receives the data is called the top image, can be # located in any part of the chain (but always above the base image; # see below) and can be specified using its device or node name. -# Earlier qemu versions only allowed 'device' to name the top level +# Earlier QEMU versions only allowed 'device' to name the top level # node; presence of the 'base-node' parameter during introspection can # be used as a witness of the enhanced semantics of 'device'. # @@ -2791,9 +2822,9 @@ # will be the new backing file. # # On successful completion the image file is updated to drop the -# backing file and the BLOCK_JOB_COMPLETED event is emitted. +# backing file and the `BLOCK_JOB_COMPLETED` event is emitted. # -# In case @device is a filter node, block-stream modifies the first +# In case @device is a filter node, `block-stream` modifies the first # non-filter overlay node below it to point to the new backing node # instead of modifying @device itself. # @@ -2832,9 +2863,9 @@ # # @speed: the maximum speed, in bytes per second # -# @on-error: the action to take on an error (default report). 'stop' +# @on-error: the action to take on an error (default report). 'stop' # and 'enospc' can only be used if the block device supports -# io-status (see BlockInfo). (Since 1.3) +# io-status (see `BlockInfo`). (Since 1.3) # # @filter-node-name: the node name that should be assigned to the # filter driver that the stream job inserts into the graph above @@ -2842,16 +2873,15 @@ # autogenerated. (Since: 6.0) # # @auto-finalize: When false, this job will wait in a PENDING state -# after it has finished its work, waiting for @block-job-finalize -# before making any block graph changes. When true, this job will +# after it has finished its work, waiting for `job-finalize` before +# making any block graph changes. When true, this job will # automatically perform its abort or commit actions. Defaults to # true. (Since 3.1) # # @auto-dismiss: When false, this job will wait in a CONCLUDED state # after it has completely ceased all work, and awaits -# @block-job-dismiss. When true, this job will automatically -# disappear from the query list without user intervention. -# Defaults to true. (Since 3.1) +# `job-dismiss`. When true, this job will automatically disappear +# without user intervention. Defaults to true. (Since 3.1) # # Errors: # - If @device does not exist, DeviceNotFound. @@ -2911,13 +2941,13 @@ # command if no operation is in progress. # # The operation will cancel as soon as possible and then emit the -# BLOCK_JOB_CANCELLED event. Before that happens the job is still -# visible when enumerated using query-block-jobs. +# `BLOCK_JOB_CANCELLED` event. Before that happens the job is still +# visible when enumerated using `query-block-jobs`. # -# Note that if you issue 'block-job-cancel' after 'drive-mirror' has -# indicated (via the event BLOCK_JOB_READY) that the source and +# Note that if you issue `block-job-cancel` after `drive-mirror` has +# indicated (via the event `BLOCK_JOB_READY`) that the source and # destination are synchronized, then the event triggered by this -# command changes to BLOCK_JOB_COMPLETED, to indicate that the +# command changes to `BLOCK_JOB_COMPLETED`, to indicate that the # mirroring has ended and the destination now has a point-in-time copy # tied to the time of the cancellation. # @@ -2931,7 +2961,7 @@ # values. # # @force: If true, and the job has already emitted the event -# BLOCK_JOB_READY, abandon the job immediately (even if it is +# `BLOCK_JOB_READY`, abandon the job immediately (even if it is # paused) instead of waiting for the destination to complete its # final synchronization (since 1.3) # @@ -2949,18 +2979,24 @@ # # Pause an active background block operation. # -# This command returns immediately after marking the active background -# block operation for pausing. It is an error to call this command if -# no operation is in progress or if the job is already paused. +# This command returns immediately after marking the active job for +# pausing. Pausing an already paused job is an error. +# +# The job will pause as soon as possible, which means transitioning +# into the PAUSED state if it was RUNNING, or into STANDBY if it was +# READY. The corresponding `JOB_STATUS_CHANGE` event will be emitted. # -# The operation will pause as soon as possible. No event is emitted -# when the operation is actually paused. Cancelling a paused job -# automatically resumes it. +# Cancelling a paused job automatically resumes it. # # @device: The job identifier. This used to be a device name (hence # the name of the parameter), but since QEMU 2.7 it can have other # values. # +# Features: +# +# @deprecated: This command is deprecated. Use `job-pause` +# instead. +# # Errors: # - If no background operation is active on this device, # DeviceNotActive @@ -2968,6 +3004,7 @@ # Since: 1.3 ## { 'command': 'block-job-pause', 'data': { 'device': 'str' }, + 'features': ['deprecated'], 'allow-preconfig': true } ## @@ -2975,9 +3012,8 @@ # # Resume an active background block operation. # -# This command returns immediately after resuming a paused background -# block operation. It is an error to call this command if no -# operation is in progress or if the job is not paused. +# This command returns immediately after resuming a paused job. +# Resuming an already running job is an error. # # This command also clears the error status of the job. # @@ -2985,6 +3021,11 @@ # the name of the parameter), but since QEMU 2.7 it can have other # values. # +# Features: +# +# @deprecated: This command is deprecated. Use `job-resume` +# instead. +# # Errors: # - If no background operation is active on this device, # DeviceNotActive @@ -2992,30 +3033,39 @@ # Since: 1.3 ## { 'command': 'block-job-resume', 'data': { 'device': 'str' }, + 'features': ['deprecated'], 'allow-preconfig': true } ## # @block-job-complete: # -# Manually trigger completion of an active background block operation. -# This is supported for drive mirroring, where it also switches the -# device to write to the target path only. The ability to complete is -# signaled with a BLOCK_JOB_READY event. +# Manually trigger completion of an active job in the READY or STANDBY +# state. Completing the job in any other state is an error. +# +# This is supported only for drive mirroring, where it also switches +# the device to write to the target path only. Note that drive +# mirroring includes `drive-mirror`, `blockdev-mirror` and `block-commit` +# job (only in case of "active commit", when the node being commited +# is used by the guest). The ability to complete is signaled with a +# `BLOCK_JOB_READY` event. # # This command completes an active background block operation # synchronously. The ordering of this command's return with the -# BLOCK_JOB_COMPLETED event is not defined. Note that if an I/O error +# `BLOCK_JOB_COMPLETED` event is not defined. Note that if an I/O error # occurs during the processing of this command: 1) the command itself # will fail; 2) the error will be processed according to the # rerror/werror arguments that were specified when starting the # operation. # -# A cancelled or paused job cannot be completed. -# # @device: The job identifier. This used to be a device name (hence # the name of the parameter), but since QEMU 2.7 it can have other # values. # +# Features: +# +# @deprecated: This command is deprecated. Use `job-complete` +# instead. +# # Errors: # - If no background operation is active on this device, # DeviceNotActive @@ -3023,43 +3073,64 @@ # Since: 1.3 ## { 'command': 'block-job-complete', 'data': { 'device': 'str' }, + 'features': ['deprecated'], 'allow-preconfig': true } ## # @block-job-dismiss: # -# For jobs that have already concluded, remove them from the -# block-job-query list. This command only needs to be run for jobs -# which were started with QEMU 2.12+ job lifetime management -# semantics. +# Deletes a job that is in the CONCLUDED state. This command only +# needs to be run explicitly for jobs that don't have automatic +# dismiss enabled. In turn, automatic dismiss may be enabled only +# for jobs that have @auto-dismiss option, which are `drive-backup`, +# `blockdev-backup`, `drive-mirror`, `blockdev-mirror`, `block-commit` and +# `block-stream`. @auto-dismiss is enabled by default for these +# jobs. # # This command will refuse to operate on any job that has not yet -# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make -# use of the BLOCK_JOB_READY event, block-job-cancel or -# block-job-complete will still need to be used as appropriate. +# reached its terminal state, CONCLUDED. For jobs that make use of +# the `BLOCK_JOB_READY` event, `job-cancel`, `block-job-cancel` or +# `job-complete` will still need to be used as appropriate. # # @id: The job identifier. # +# Features: +# +# @deprecated: This command is deprecated. Use `job-dismiss` +# instead. +# # Since: 2.12 ## { 'command': 'block-job-dismiss', 'data': { 'id': 'str' }, + 'features': ['deprecated'], 'allow-preconfig': true } ## # @block-job-finalize: # -# Once a job that has manual=true reaches the pending state, it can be -# instructed to finalize any graph changes and do any necessary -# cleanup via this command. For jobs in a transaction, instructing -# one job to finalize will force ALL jobs in the transaction to -# finalize, so it is only necessary to instruct a single member job to -# finalize. +# Instructs all jobs in a transaction (or a single job if it is not +# part of any transaction) to finalize any graph changes and do any +# necessary cleanup. This command requires that all involved jobs are +# in the PENDING state. +# +# For jobs in a transaction, instructing one job to finalize will +# force ALL jobs in the transaction to finalize, so it is only +# necessary to instruct a single member job to finalize. +# +# The command is applicable only to jobs which have @auto-finalize option +# and only when this option is set to false. # # @id: The job identifier. # +# Features: +# +# @deprecated: This command is deprecated. Use `job-finalize` +# instead. +# # Since: 2.12 ## { 'command': 'block-job-finalize', 'data': { 'id': 'str' }, + 'features': ['deprecated'], 'allow-preconfig': true } ## @@ -3125,7 +3196,7 @@ # @on: Enabled # # @unmap: Enabled and even try to unmap blocks if possible. This -# requires also that @BlockdevDiscardOptions is set to unmap for +# requires also that `BlockdevDiscardOptions` is set to unmap for # this device. # # Since: 2.1 @@ -3138,7 +3209,7 @@ # # Selects the AIO backend to handle I/O requests # -# @threads: Use qemu's thread pool +# @threads: Use QEMU's thread pool # # @native: Use native AIO backend (only Linux and Windows) # @@ -3187,12 +3258,18 @@ # # @snapshot-access: Since 7.0 # +# Features: +# +# @deprecated: Member @gluster is deprecated because GlusterFS +# development ceased. +# # Since: 2.9 ## { 'enum': 'BlockdevDriver', 'data': [ 'blkdebug', 'blklogwrites', 'blkreplay', 'blkverify', 'bochs', 'cloop', 'compress', 'copy-before-write', 'copy-on-read', 'dmg', - 'file', 'snapshot-access', 'ftp', 'ftps', 'gluster', + 'file', 'snapshot-access', 'ftp', 'ftps', + {'name': 'gluster', 'features': [ 'deprecated' ] }, {'name': 'host_cdrom', 'if': 'HAVE_HOST_BLOCK_DEVICE' }, {'name': 'host_device', 'if': 'HAVE_HOST_BLOCK_DEVICE' }, 'http', 'https', @@ -3351,8 +3428,8 @@ # Driver specific block device options for LUKS. # # @key-secret: the ID of a QCryptoSecret object providing the -# decryption key (since 2.6). Mandatory except when doing a -# metadata-only probe of the image. +# decryption key. Mandatory except when doing a metadata-only +# probe of the image. (since 2.6) # # @header: block device holding a detached LUKS header. (since 9.0) # @@ -3591,8 +3668,8 @@ # this feature. (since 2.5) # # @encrypt: Image decryption options. Mandatory for encrypted images, -# except when doing a metadata-only probe of the image. (since -# 2.10) +# except when doing a metadata-only probe of the image. +# (since 2.10) # # @data-file: reference to or definition of the external data file. # This may only be specified for images that require an external @@ -3746,7 +3823,7 @@ # # Since: 4.1 ## -{ 'enum': 'BlkdebugIOType', 'prefix': 'BLKDEBUG_IO_TYPE', +{ 'enum': 'BlkdebugIOType', 'data': [ 'read', 'write', 'write-zeroes', 'discard', 'flush', 'block-status' ] } @@ -4050,6 +4127,7 @@ # @path: path to the vhost-vdpa character device. # # Features: +# # @fdset: Member @path supports the special "/dev/fdset/N" path # (since 8.1) # @@ -4162,7 +4240,7 @@ ## { 'struct': 'RbdEncryptionCreateOptionsLUKSBase', 'base': 'RbdEncryptionOptionsLUKSBase', - 'data': { '*cipher-alg': 'QCryptoCipherAlgorithm' } } + 'data': { '*cipher-alg': 'QCryptoCipherAlgo' } } ## # @RbdEncryptionOptionsLUKS: @@ -4261,8 +4339,8 @@ # @user: Ceph id name. # # @auth-client-required: Acceptable authentication modes. This maps -# to Ceph configuration option "auth_client_required". (Since -# 3.0) +# to Ceph configuration option "auth_client_required". +# (Since 3.0) # # @key-secret: ID of a QCryptoSecret object providing a key for cephx # authentication. This maps to Ceph configuration option "key". @@ -4427,7 +4505,7 @@ # curl backend. URLs must start with "http://". # # @cookie: List of cookies to set; format is "name1=content1; -# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to +# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to # no cookies. # # @cookie-secret: ID of a QCryptoSecret object providing the cookie @@ -4447,7 +4525,7 @@ # curl backend. URLs must start with "https://". # # @cookie: List of cookies to set; format is "name1=content1; -# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to +# name2=content2;" as explained by CURLOPT_COOKIE(3). Defaults to # no cookies. # # @sslverify: Whether to verify the SSL certificate's validity @@ -4516,8 +4594,8 @@ # error. During the first @reconnect-delay seconds, all requests # are paused and will be rerun on a successful reconnect. After # that time, any delayed requests and all future requests before a -# successful reconnect will immediately fail. Default 0 (Since -# 4.2) +# successful reconnect will immediately fail. Default 0 +# (Since 4.2) # # @open-timeout: In seconds. If zero, the nbd driver tries the # connection only once, and fails to open if the connection fails. @@ -4638,12 +4716,18 @@ # @on-cbw-error parameter will decide how this failure is handled. # Default 0. (Since 7.1) # +# @min-cluster-size: Minimum size of blocks used by copy-before-write +# operations. Has to be a power of 2. No effect if smaller than +# the maximum of the target's cluster size and 64 KiB. Default 0. +# (Since 9.2) +# # Since: 6.2 ## { 'struct': 'BlockdevOptionsCbw', 'base': 'BlockdevOptionsGenericFormat', 'data': { 'target': 'BlockdevRef', '*bitmap': 'BlockDirtyBitmap', - '*on-cbw-error': 'OnCbwError', '*cbw-timeout': 'uint32' } } + '*on-cbw-error': 'OnCbwError', '*cbw-timeout': 'uint32', + '*min-cluster-size': 'size' } } ## # @BlockdevOptions: @@ -4653,18 +4737,23 @@ # # @driver: block driver name # -# @node-name: the node name of the new node (Since 2.0). This option -# is required on the top level of blockdev-add. Valid node names -# start with an alphabetic character and may contain only -# alphanumeric characters, '-', '.' and '_'. Their maximum length -# is 31 characters. +# @node-name: the node name of the new node. This option is required +# on the top level of `blockdev-add`. Valid node names start with +# an alphabetic character and may contain only alphanumeric +# characters, '-', '.' and '_'. Their maximum length is 31 +# characters. (Since 2.0) # # @discard: discard-related options (default: ignore) # # @cache: cache-related options # +# @active: whether the block node should be activated (default: true). +# Having inactive block nodes is useful primarily for migration because it +# allows opening an image on the destination while the source is still +# holding locks for it. (Since 10.0) +# # @read-only: whether the block device should be read-only (default: -# false). Note that some block drivers support only read-only +# false). Note that some block drivers support only read-only # access, either generally or in certain configurations. In this # case, the default value does not work and the option must be # specified explicitly. @@ -4689,6 +4778,7 @@ '*node-name': 'str', '*discard': 'BlockdevDiscardOptions', '*cache': 'BlockdevCacheOptions', + '*active': 'bool', '*read-only': 'bool', '*auto-read-only': 'bool', '*force-share': 'bool', @@ -4852,7 +4942,7 @@ # cancelled. # # The command receives a list of block devices to reopen. For each -# one of them, the top-level @node-name option (from BlockdevOptions) +# one of them, the top-level @node-name option (from `BlockdevOptions`) # must be specified and is used to select the block device to be # reopened. Other @node-name options must be either omitted or set to # the current name of the appropriate node. This command won't change @@ -4861,7 +4951,7 @@ # In the case of options that refer to child nodes, the behavior of # this command depends on the value: # -# 1) A set of options (BlockdevOptions): the child is reopened with +# 1) A set of options (`BlockdevOptions`): the child is reopened with # the specified set of options. # # 2) A reference to the current child: the child is reopened using @@ -4870,12 +4960,12 @@ # 3) A reference to a different node: the current child is replaced # with the specified one. # -# 4) NULL: the current child (if any) is detached. +# 4) null: the current child (if any) is detached. # # Options (1) and (2) are supported in all cases. Option (3) is # supported for @file and @backing, and option (4) for @backing only. # -# Unlike with blockdev-add, the @backing option must always be present +# Unlike with `blockdev-add`, the @backing option must always be present # unless the node being reopened does not have a backing file and its # image does not have a default backing file name as part of its # metadata. @@ -4889,7 +4979,7 @@ ## # @blockdev-del: # -# Deletes a block device that has been added using blockdev-add. The +# Deletes a block device that has been added using `blockdev-add`. The # command will fail if the node is attached to a device or is # otherwise being used. # @@ -4919,6 +5009,38 @@ { 'command': 'blockdev-del', 'data': { 'node-name': 'str' }, 'allow-preconfig': true } +## +# @blockdev-set-active: +# +# Activate or inactivate a block device. Use this to manage the handover of +# block devices on migration with qemu-storage-daemon. +# +# Activating a node automatically activates all of its child nodes first. +# Inactivating a node automatically inactivates any of its child nodes that are +# not in use by a still active node. +# +# @node-name: Name of the graph node to activate or inactivate. By default, all +# nodes are affected by the operation. +# +# @active: true if the nodes should be active when the command returns success, +# false if they should be inactive. +# +# Since: 10.0 +# +# .. qmp-example:: +# +# -> { "execute": "blockdev-set-active", +# "arguments": { +# "node-name": "node0", +# "active": false +# } +# } +# <- { "return": {} } +## +{ 'command': 'blockdev-set-active', + 'data': { '*node-name': 'str', 'active': 'bool' }, + 'allow-preconfig': true } + ## # @BlockdevCreateOptionsFile: # @@ -5048,10 +5170,10 @@ ## # @BlockdevQcow2Version: # -# @v2: The original QCOW2 format as introduced in qemu 0.10 (version +# @v2: The original QCOW2 format as introduced in QEMU 0.10 (version # 2) # -# @v3: The extended QCOW2 format as introduced in qemu 1.1 (version 3) +# @v3: The extended QCOW2 format as introduced in QEMU 1.1 (version 3) # # Since: 2.12 ## @@ -5231,8 +5353,8 @@ # monolithcFlat, twoGbMaxExtentSparse and twoGbMaxExtentFlat # formats. For monolithicFlat, only one entry is required; for # twoGbMaxExtent* formats, the number of entries required is -# calculated as extent_number = virtual_size / 2GB. Providing more -# extents than will be used is an error. +# calculated as extent_number = virtual_size / 2GB. Providing +# more extents than will be used is an error. # # @subformat: The subformat of the VMDK image. Default: # "monolithicSparse". @@ -5244,7 +5366,7 @@ # Default: ide. # # @hwversion: Hardware version. The meaningful options are "4" or -# "6". Default: "4". +# "6". Default: "4". # # @toolsversion: VMware guest tools version. Default: "2147483647" # (Since 6.2) @@ -5412,7 +5534,7 @@ # @blockdev-create: # # Starts a job to create an image format on a given node. The job is -# automatically finalized, but a manual job-dismiss is required. +# automatically finalized, but a manual `job-dismiss` is required. # # @job-id: Identifier for the newly created job. # @@ -5440,7 +5562,7 @@ ## # @BlockdevAmendOptionsQcow2: # -# Driver specific image amend options for qcow2. For now, only +# Driver specific image amend options for qcow2. For now, only # encryption options can be amended # # @encrypt: Encryption options to be amended @@ -5471,8 +5593,8 @@ # @x-blockdev-amend: # # Starts a job to amend format specific options of an existing open -# block device The job is automatically finalized, but a manual -# job-dismiss is required. +# block device. The job is automatically finalized, but a manual +# `job-dismiss` is required. # # @job-id: Identifier for the newly created job. # @@ -5480,7 +5602,7 @@ # # @options: Options (driver specific) # -# @force: Allow unsafe operations, format specific For luks that +# @force: Allow unsafe operations, format specific. For luks that # allows erase of the last active keyslot (permanent loss of # data), and replacement of an active keyslot (possible loss of # data if IO error happens) @@ -5541,10 +5663,10 @@ # # @fatal: if set, the image is marked corrupt and therefore unusable # after this event and must be repaired (Since 2.2; before, every -# BLOCK_IMAGE_CORRUPTED event was fatal) +# `BLOCK_IMAGE_CORRUPTED` event was fatal) # -# .. note:: If action is "stop", a STOP event will eventually follow the -# BLOCK_IO_ERROR event. +# .. note:: If action is "stop", a `STOP` event will eventually follow +# the `BLOCK_IO_ERROR` event. # # .. qmp-example:: # @@ -5568,6 +5690,8 @@ # # Emitted when a disk I/O error occurs # +# @qom-path: path to the device object in the QOM tree (since 9.2) +# # @device: device name. This is always present for compatibility # reasons, but it can be empty ("") if the image does not have a # device name associated. @@ -5583,22 +5707,25 @@ # # @nospace: true if I/O error was caused due to a no-space condition. # This key is only present if query-block's io-status is present, -# please see query-block documentation for more information +# please see `query-block` documentation for more information # (since: 2.2) # # @reason: human readable string describing the error cause. (This # field is a debugging aid for humans, it should not be parsed by # applications) (since: 2.2) # -# .. note:: If action is "stop", a STOP event will eventually follow the -# BLOCK_IO_ERROR event. +# .. note:: If action is "stop", a `STOP` event will eventually follow +# the `BLOCK_IO_ERROR` event. +# +# .. note:: This event is rate-limited. # # Since: 0.13 # # .. qmp-example:: # # <- { "event": "BLOCK_IO_ERROR", -# "data": { "device": "ide0-hd1", +# "data": { "qom-path": "/machine/unattached/device[0]", +# "device": "ide0-hd1", # "node-name": "#block212", # "operation": "write", # "action": "stop", @@ -5606,7 +5733,7 @@ # "timestamp": { "seconds": 1265044230, "microseconds": 450486 } } ## { 'event': 'BLOCK_IO_ERROR', - 'data': { 'device': 'str', '*node-name': 'str', + 'data': { 'qom-path': 'str', 'device': 'str', '*node-name': 'str', 'operation': 'IoOperationType', 'action': 'BlockErrorAction', '*nospace': 'bool', 'reason': 'str' } } @@ -5730,7 +5857,7 @@ # @speed: rate limit, bytes per second # # .. note:: The "ready to complete" status is always reset by a -# @BLOCK_JOB_ERROR event. +# `BLOCK_JOB_ERROR` event. # # Since: 1.3 # @@ -5752,7 +5879,7 @@ # @BLOCK_JOB_PENDING: # # Emitted when a block job is awaiting explicit authorization to -# finalize graph changes via @block-job-finalize. If this job is part +# finalize graph changes via `job-finalize`. If this job is part # of a transaction, it will not emit this event until the transaction # has converged first. # @@ -5801,7 +5928,7 @@ # configured write threshold. For thin-provisioned devices, this # means the device should be extended to avoid pausing for disk # exhaustion. The event is one shot. Once triggered, it needs to be -# re-registered with another block-set-write-threshold command. +# re-registered with another `block-set-write-threshold` command. # # @node-name: graph node name on which the threshold was exceeded. # @@ -5850,35 +5977,31 @@ ## # @x-blockdev-change: # -# Dynamically reconfigure the block driver state graph. It can be -# used to add, remove, insert or replace a graph node. Currently only -# the Quorum driver implements this feature to add or remove its -# child. This is useful to fix a broken quorum child. +# Dynamically reconfigure the block driver state graph. # -# If @node is specified, it will be inserted under @parent. @child -# may not be specified in this case. If both @parent and @child are -# specified but @node is not, @child will be detached from @parent. +# Currently only supports adding and deleting quorum children. A +# child will be added at the end of the list of children. Its +# contents *must* be consistent with the other childrens' contents. +# Deleting a child that is not last in the list of children is +# problematic, because it "renumbers" the children following it. # # @parent: the id or name of the parent node. # -# @child: the name of a child under the given parent node. +# @child: the name of a child to be deleted. Mutually exclusive with +# @node. # -# @node: the name of the node that will be added. +# @node: the name of the node to be added. Mutually exclusive with +# @child. # # Features: # -# @unstable: This command is experimental, and its API is not stable. -# It does not support all kinds of operations, all kinds of -# children, nor all block drivers. +# @unstable: This command is experimental. # -# FIXME Removing children from a quorum node means introducing +# TODO: Removing children from a quorum node means introducing # gaps in the child indices. This cannot be represented in the -# 'children' list of BlockdevOptionsQuorum, as returned by +# 'children' list of `BlockdevOptionsQuorum`, as returned by # .bdrv_refresh_filename(). # -# Warning: The data in a new quorum child MUST be consistent with -# that of the rest of the array. -# # Since: 2.7 # # .. qmp-example:: @@ -6046,10 +6169,6 @@ # # @name: the name of the internal snapshot to be created # -# .. note:: In a transaction, if @name is empty or any snapshot matching -# @name exists, the operation will fail. Only some image formats -# support it; for example, qcow2, and rbd. -# # Since: 1.7 ## { 'struct': 'BlockdevSnapshotInternal', @@ -6070,6 +6189,9 @@ # - If the format of the image used does not support it, # GenericError # +# .. note:: Only some image formats such as qcow2 and rbd support +# internal snapshots. +# # Since: 1.7 # # .. qmp-example:: @@ -6090,7 +6212,7 @@ # Synchronously delete an internal snapshot of a block device, when # the format of the image used support it. The snapshot is identified # by name or id or both. One of the name or id is required. Return -# SnapshotInfo for the successfully deleted snapshot. +# `SnapshotInfo` for the successfully deleted snapshot. # # @device: the device name or node-name of a root node to delete the # snapshot from @@ -6099,9 +6221,6 @@ # # @name: optional the snapshot's name to be deleted # -# Returns: -# SnapshotInfo -# # Errors: # - If @device is not a valid block device, GenericError # - If snapshot not found, GenericError diff --git a/qapi/block-export.json b/qapi/block-export.json index 3919a2d5b9d..6878b89dcf8 100644 --- a/qapi/block-export.json +++ b/qapi/block-export.json @@ -2,20 +2,19 @@ # vim: filetype=python ## -# == Block device exports +# Block device exports +# ==================== ## { 'include': 'sockets.json' } { 'include': 'block-core.json' } ## -# @NbdServerOptions: -# -# Keep this type consistent with the nbd-server-start arguments. The -# only intended difference is using SocketAddress instead of -# SocketAddressLegacy. +# @NbdServerOptionsBase: # -# @addr: Address on which to listen. +# @handshake-max-seconds: Time limit, in seconds, at which a client +# that has not completed the negotiation handshake will be +# disconnected, or 0 for no limit (since 10.0; default: 10). # # @tls-creds: ID of the TLS credentials object (since 2.6). # @@ -28,42 +27,47 @@ # @max-connections: The maximum number of connections to allow at the # same time, 0 for unlimited. Setting this to 1 also stops the # server from advertising multiple client support (since 5.2; -# default: 0) -# -# Since: 4.2 +# default: 100). ## -{ 'struct': 'NbdServerOptions', - 'data': { 'addr': 'SocketAddress', +{ 'struct': 'NbdServerOptionsBase', + 'data': { '*handshake-max-seconds': 'uint32', '*tls-creds': 'str', '*tls-authz': 'str', '*max-connections': 'uint32' } } ## -# @nbd-server-start: -# -# Start an NBD server listening on the given host and port. Block -# devices can then be exported using @nbd-server-add. The NBD server -# will present them as named exports; for example, another QEMU -# instance could refer to them as "nbd:HOST:PORT:exportname=NAME". +# @NbdServerOptions: # -# Keep this type consistent with the NbdServerOptions type. The only -# intended difference is using SocketAddressLegacy instead of -# SocketAddress. +# Keep this type consistent with the `NbdServerOptionsLegacy` type. The +# only intended difference is using `SocketAddress` instead of +# `SocketAddressLegacy`. # -# @addr: Address on which to listen. +# @addr: Address on which to listen (since 4.2). +## +{ 'struct': 'NbdServerOptions', + 'base': 'NbdServerOptionsBase', + 'data': { 'addr': 'SocketAddress' } } + +## +# @NbdServerOptionsLegacy: # -# @tls-creds: ID of the TLS credentials object (since 2.6). +# Keep this type consistent with the `NbdServerOptions` type. The only +# intended difference is using `SocketAddressLegacy` instead of +# `SocketAddress`. # -# @tls-authz: ID of the QAuthZ authorization object used to validate -# the client's x509 distinguished name. This object is is only -# resolved at time of use, so can be deleted and recreated on the -# fly while the NBD server is active. If missing, it will default -# to denying access (since 4.0). +# @addr: Address on which to listen (since 1.3). +## +{ 'struct': 'NbdServerOptionsLegacy', + 'base': 'NbdServerOptionsBase', + 'data': { 'addr': 'SocketAddressLegacy' } } + +## +# @nbd-server-start: # -# @max-connections: The maximum number of connections to allow at the -# same time, 0 for unlimited. Setting this to 1 also stops the -# server from advertising multiple client support (since 5.2; -# default: 0). +# Start an NBD server listening on the given host and port. Block +# devices can then be exported using `nbd-server-add`. The NBD server +# will present them as named exports; for example, another QEMU +# instance could refer to them as "nbd:HOST:PORT:exportname=NAME". # # Errors: # - if the server is already running @@ -71,17 +75,14 @@ # Since: 1.3 ## { 'command': 'nbd-server-start', - 'data': { 'addr': 'SocketAddressLegacy', - '*tls-creds': 'str', - '*tls-authz': 'str', - '*max-connections': 'uint32' }, + 'data': 'NbdServerOptionsLegacy', 'allow-preconfig': true } ## # @BlockExportOptionsNbdBase: # -# An NBD block export (common options shared between nbd-server-add -# and the NBD branch of block-export-add). +# An NBD block export (common options shared between `nbd-server-add` +# and the NBD branch of `block-export-add`). # # @name: Export name. If unspecified, the @device parameter is used # as the export name. (Since 2.12) @@ -98,7 +99,7 @@ # @BlockExportOptionsNbd: # # An NBD block export (distinct options used in the NBD branch of -# block-export-add). +# `block-export-add`). # # @bitmaps: Also export each of the named dirty bitmaps reachable from # @device, so the NBD client can use NBD_OPT_SET_META_CONTEXT with @@ -124,7 +125,7 @@ # A vhost-user-blk block export. # # @addr: The vhost-user socket on which to listen. Both 'unix' and -# 'fd' SocketAddress types are supported. Passed fds must be UNIX +# 'fd' `SocketAddress` types are supported. Passed fds must be UNIX # domain sockets. # # @logical-block-size: Logical block size in bytes. Defaults to 512 @@ -163,16 +164,16 @@ # Options for exporting a block graph node on some (file) mountpoint # as a raw image. # -# @mountpoint: Path on which to export the block device via FUSE. This -# must point to an existing regular file. +# @mountpoint: Path on which to export the block device via FUSE. +# This must point to an existing regular file. # # @growable: Whether writes beyond the EOF should grow the block node # accordingly. (default: false) # -# @allow-other: If this is off, only qemu's user is allowed access to +# @allow-other: If this is off, only QEMU's user is allowed access to # this export. That cannot be changed even with chmod or chown. # Enabling this option will allow other users access to the export -# with the FUSE mount option "allow_other". Note that using +# with the FUSE mount option "allow_other". Note that using # allow_other as a non-root user requires user_allow_other to be # enabled in the global fuse.conf configuration file. In auto # mode (the default), the FUSE export driver will first attempt to @@ -199,7 +200,7 @@ # @queue-size: the size of virtqueue. Defaults to 256. # # @logical-block-size: Logical block size in bytes. Range [512, -# PAGE_SIZE] and must be power of 2. Defaults to 512 bytes. +# PAGE_SIZE] and must be power of 2. Defaults to 512 bytes. # # @serial: the serial number of virtio block device. Defaults to # empty string. @@ -216,7 +217,7 @@ ## # @NbdServerAddOptions: # -# An NBD block export, per legacy nbd-server-add command. +# An NBD block export, per legacy `nbd-server-add` command. # # @device: The device name or node name of the node to be exported # @@ -245,7 +246,7 @@ # # Features: # -# @deprecated: This command is deprecated. Use @block-export-add +# @deprecated: This command is deprecated. Use `block-export-add` # instead. # # Errors: @@ -288,12 +289,12 @@ # # @name: Block export id. # -# @mode: Mode of command operation. See @BlockExportRemoveMode +# @mode: Mode of command operation. See `BlockExportRemoveMode` # description. Default is 'safe'. # # Features: # -# @deprecated: This command is deprecated. Use @block-export-del +# @deprecated: This command is deprecated. Use `block-export-del` # instead. # # Errors: @@ -312,7 +313,7 @@ # @nbd-server-stop: # # Stop QEMU's embedded NBD server, and unregister all devices -# previously added via @nbd-server-add. +# previously added via `nbd-server-add`. # # Since: 1.3 ## @@ -372,6 +373,13 @@ # cannot be moved to the iothread. The default is false. # (since: 5.2) # +# @allow-inactive: If true, the export allows the exported node to be inactive. +# If it is created for an inactive block node, the node remains inactive. If +# the export type doesn't support running on an inactive node, an error is +# returned. If false, inactive block nodes are automatically activated before +# creating the export and trying to inactivate them later fails. +# (since: 10.0; default: false) +# # Since: 4.2 ## { 'union': 'BlockExportOptions', @@ -381,7 +389,8 @@ '*iothread': 'str', 'node-name': 'str', '*writable': 'bool', - '*writethrough': 'bool' }, + '*writethrough': 'bool', + '*allow-inactive': 'bool' }, 'discriminator': 'type', 'data': { 'nbd': 'BlockExportOptionsNbd', @@ -413,7 +422,7 @@ # # @id: Block export id. # -# @mode: Mode of command operation. See @BlockExportRemoveMode +# @mode: Mode of command operation. See `BlockExportRemoveMode` # description. Default is 'safe'. # # Errors: @@ -451,7 +460,7 @@ # @node-name: The node name of the block node that is exported # # @shutting-down: True if the export is shutting down (e.g. after a -# block-export-del command, but before the shutdown has completed) +# `block-export-del` command, but before the shutdown has completed) # # Since: 5.2 ## @@ -464,7 +473,7 @@ ## # @query-block-exports: # -# Returns: A list of BlockExportInfo describing all block exports +# Returns: A list describing all block exports # # Since: 5.2 ## diff --git a/qapi/block.json b/qapi/block.json index ce9490a3678..46955bbb3e3 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -2,13 +2,16 @@ # vim: filetype=python ## -# = Block devices +# ************* +# Block devices +# ************* ## { 'include': 'block-core.json' } ## -# == Additional block stuff (VM related) +# Additional block stuff (VM related) +# =================================== ## ## @@ -48,7 +51,7 @@ ## # @FloppyDriveType: # -# Type of Floppy drive to be emulated by the Floppy Disk Controller. +# Type of floppy drive to be emulated by the Floppy Disk Controller. # # @144: 1.44MB 3.5" drive # @@ -83,10 +86,10 @@ ## # @query-pr-managers: # -# Returns a list of information about each persistent reservation +# Return a list of information about each persistent reservation # manager. # -# Returns: a list of @PRManagerInfo for each persistent reservation +# Returns: a list of manager info for each persistent reservation # manager # # Since: 3.0 @@ -137,7 +140,7 @@ # # If the tray was already open before, this will be a no-op. # -# Once the tray opens, a DEVICE_TRAY_MOVED event is emitted. There +# Once the tray opens, a `DEVICE_TRAY_MOVED` event is emitted. There # are cases in which no such event will be generated, these include: # # - if the guest has locked the tray, @force is false and the guest @@ -296,7 +299,7 @@ # @BlockdevChangeReadOnlyMode: # # Specifies the new read-only mode of a block device subject to the -# @blockdev-change-medium command. +# `blockdev-change-medium` command. # # @retain: Retains the current read-only mode # @@ -314,9 +317,9 @@ # # Changes the medium inserted into a block device by ejecting the # current medium and loading a new image file which is inserted as the -# new medium (this command combines blockdev-open-tray, -# blockdev-remove-medium, blockdev-insert-medium and -# blockdev-close-tray). +# new medium (this command combines `blockdev-open-tray`, +# `blockdev-remove-medium`, `blockdev-insert-medium` and +# `blockdev-close-tray`). # # @device: Block device name # @@ -331,7 +334,7 @@ # to 'retain' # # @force: if false (the default), an eject request through -# blockdev-open-tray will be sent to the guest if it has locked +# `blockdev-open-tray` will be sent to the guest if it has locked # the tray (and the tray will not be opened immediately); if true, # the tray will be opened regardless of whether it is locked. # (since 7.1) @@ -454,8 +457,8 @@ # different group. In this case the limits specified in the # parameters will be applied to the new group only. # -# I/O limits can be disabled by setting all of them to 0. In this case -# the device will be removed from its group and the rest of its +# I/O limits can be disabled by setting all of them to 0. In this +# case the device will be removed from its group and the rest of its # members will not be affected. The 'group' parameter is ignored. # # Errors: @@ -519,10 +522,10 @@ # @id: The name or QOM path of the guest device. # # @boundaries: list of interval boundary values (see description in -# BlockLatencyHistogramInfo definition). If specified, all latency -# histograms are removed, and empty ones created for all io types -# with intervals corresponding to @boundaries (except for io -# types, for which specific boundaries are set through the +# `BlockLatencyHistogramInfo` definition). If specified, all +# latency histograms are removed, and empty ones created for all +# io types with intervals corresponding to @boundaries (except for +# io types, for which specific boundaries are set through the # following parameters). # # @boundaries-read: list of interval boundary values for read latency diff --git a/qapi/char.json b/qapi/char.json index 5e4aeb9799d..f0a53f742c8 100644 --- a/qapi/char.json +++ b/qapi/char.json @@ -3,7 +3,9 @@ # ## -# = Character devices +# ***************** +# Character devices +# ***************** ## { 'include': 'sockets.json' } @@ -34,9 +36,7 @@ ## # @query-chardev: # -# Returns information about current character devices. -# -# Returns: a list of @ChardevInfo +# Return information about current character devices. # # Since: 0.14 # @@ -80,9 +80,7 @@ ## # @query-chardev-backends: # -# Returns information about character device backends. -# -# Returns: a list of @ChardevBackendInfo +# Return information about character device backends. # # Since: 2.0 # @@ -258,7 +256,7 @@ # @server: create server socket (default: true) # # @wait: wait for incoming connection on server sockets (default: -# false). Silently ignored with server: false. This use is +# false). Silently ignored with server: false. This use is # deprecated. # # @nodelay: set TCP_NODELAY socket option (default: false) @@ -273,7 +271,19 @@ # # @reconnect: For a client socket, if a socket is disconnected, then # attempt a reconnect after the given number of seconds. Setting -# this to zero disables this function. (default: 0) (Since: 2.2) +# this to zero disables this function. The use of this member is +# deprecated, use @reconnect-ms instead. (default: 0) (Since: 2.2) +# +# @reconnect-ms: For a client socket, if a socket is disconnected, +# then attempt a reconnect after the given number of milliseconds. +# Setting this to zero disables this function. This member is +# mutually exclusive with @reconnect. +# (default: 0) (Since: 9.2) +# +# Features: +# +# @deprecated: Member @reconnect is deprecated. Use @reconnect-ms +# instead. # # Since: 1.4 ## @@ -287,7 +297,8 @@ '*telnet': 'bool', '*tn3270': 'bool', '*websocket': 'bool', - '*reconnect': 'int' }, + '*reconnect': { 'type': 'int', 'features': [ 'deprecated' ] }, + '*reconnect-ms': 'int' }, 'base': 'ChardevCommon' } ## @@ -319,13 +330,26 @@ 'data': { 'chardev': 'str' }, 'base': 'ChardevCommon' } +## +# @ChardevHub: +# +# Configuration info for hub chardevs. +# +# @chardevs: IDs to be added to this hub (maximum 4 devices). +# +# Since: 10.0 +## +{ 'struct': 'ChardevHub', + 'data': { 'chardevs': ['str'] }, + 'base': 'ChardevCommon' } + ## # @ChardevStdio: # # Configuration info for stdio chardevs. # # @signal: Allow signals (such as SIGINT triggered by ^C) be delivered -# to qemu. Default: true. +# to QEMU. Default: true. # # Since: 1.5 ## @@ -388,9 +412,9 @@ # # @rows: console height, in chars # -# .. note:: The options are only effective when the VNC or SDL graphical -# display backend is active. They are ignored with the GTK, Spice, -# VNC and D-Bus display backends. +# .. note:: The options are only effective when the VNC or SDL +# graphical display backend is active. They are ignored with the +# GTK, Spice, VNC and D-Bus display backends. # # Since: 1.5 ## @@ -417,7 +441,7 @@ ## # @ChardevQemuVDAgent: # -# Configuration info for qemu vdagent implementation. +# Configuration info for QEMU vdagent implementation. # # @mouse: enable/disable mouse, default is enabled. # @@ -431,40 +455,68 @@ 'base': 'ChardevCommon', 'if': 'CONFIG_SPICE_PROTOCOL' } +## +# @ChardevPty: +# +# Configuration info for pty implementation. +# +# @path: optional path to create a symbolic link that points to the +# allocated PTY +# +# Since: 9.2 +## +{ 'struct': 'ChardevPty', + 'data': { '*path': 'str' }, + 'base': 'ChardevCommon' } + ## # @ChardevBackendKind: # -# @pipe: Since 1.5 +# @file: regular files # -# @udp: Since 1.5 +# @serial: serial host device # -# @mux: Since 1.5 +# @parallel: parallel host device # -# @msmouse: Since 1.5 +# @pipe: pipes (since 1.5) # -# @wctablet: Since 2.9 +# @socket: stream socket # -# @braille: Since 1.5 +# @udp: datagram socket (since 1.5) # -# @testdev: Since 2.2 +# @pty: pseudo-terminal # -# @stdio: Since 1.5 +# @null: provides no input, throws away output # -# @console: Since 1.5 +# @mux: (since 1.5) # -# @spicevmc: Since 1.5 +# @hub: (since 10.0) # -# @spiceport: Since 1.5 +# @msmouse: emulated Microsoft serial mouse (since 1.5) # -# @qemu-vdagent: Since 6.1 +# @wctablet: emulated Wacom Penpartner serial tablet (since 2.9) # -# @dbus: Since 7.0 +# @braille: Baum Braille device (since 1.5) # -# @vc: v1.5 +# @testdev: device for test-suite control (since 2.2) # -# @ringbuf: Since 1.6 +# @stdio: standard I/O (since 1.5) # -# @memory: Since 1.5 +# @console: Windows console (since 1.5) +# +# @spicevmc: spice vm channel (since 1.5) +# +# @spiceport: Spice port channel (since 1.5) +# +# @qemu-vdagent: Spice vdagent (since 6.1) +# +# @dbus: D-Bus channel (since 7.0) +# +# @vc: virtual console (since 1.5) +# +# @ringbuf: memory ring buffer (since 1.6) +# +# @memory: synonym for @ringbuf (since 1.5) # # Features: # @@ -482,6 +534,7 @@ 'pty', 'null', 'mux', + 'hub', 'msmouse', 'wctablet', { 'name': 'braille', 'if': 'CONFIG_BRLAPI' }, @@ -556,6 +609,16 @@ { 'struct': 'ChardevMuxWrapper', 'data': { 'data': 'ChardevMux' } } +## +# @ChardevHubWrapper: +# +# @data: Configuration info for hub chardevs +# +# Since: 10.0 +## +{ 'struct': 'ChardevHubWrapper', + 'data': { 'data': 'ChardevHub' } } + ## # @ChardevStdioWrapper: # @@ -591,7 +654,7 @@ ## # @ChardevQemuVDAgentWrapper: # -# @data: Configuration info for qemu vdagent implementation +# @data: Configuration info for QEMU vdagent implementation # # Since: 6.1 ## @@ -630,6 +693,17 @@ { 'struct': 'ChardevRingbufWrapper', 'data': { 'data': 'ChardevRingbuf' } } + +## +# @ChardevPtyWrapper: +# +# @data: Configuration info for pty chardevs +# +# Since: 9.2 +## +{ 'struct': 'ChardevPtyWrapper', + 'data': { 'data': 'ChardevPty' } } + ## # @ChardevBackend: # @@ -650,9 +724,10 @@ 'pipe': 'ChardevHostdevWrapper', 'socket': 'ChardevSocketWrapper', 'udp': 'ChardevUdpWrapper', - 'pty': 'ChardevCommonWrapper', + 'pty': 'ChardevPtyWrapper', 'null': 'ChardevCommonWrapper', 'mux': 'ChardevMuxWrapper', + 'hub': 'ChardevHubWrapper', 'msmouse': 'ChardevCommonWrapper', 'wctablet': 'ChardevCommonWrapper', 'braille': { 'type': 'ChardevCommonWrapper', @@ -695,8 +770,6 @@ # # @backend: backend type and parameters # -# Returns: ChardevReturn. -# # Since: 1.4 # # .. qmp-example:: @@ -735,8 +808,6 @@ # # @backend: new backend type and parameters # -# Returns: ChardevReturn. -# # Since: 2.10 # # .. qmp-example:: diff --git a/qapi/common.json b/qapi/common.json index 7558ce5430d..af7e3d618a7 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = Common data types +# ***************** +# Common data types +# ***************** ## ## @@ -183,7 +185,19 @@ ## # @GrabToggleKeys: # -# Keys to toggle input-linux between host and guest. +# Key combinations to toggle input-linux between host and guest. +# +# @ctrl-ctrl: left and right control key +# +# @alt-alt: left and right alt key +# +# @shift-shift: left and right shift key +# +# @meta-meta: left and right meta key +# +# @scrolllock: scroll lock key +# +# @ctrl-scrolllock: either control key and scroll lock key # # Since: 4.0 ## @@ -200,3 +214,17 @@ ## { 'struct': 'HumanReadableText', 'data': { 'human-readable-text': 'str' } } + +## +# @EndianMode: +# +# @unspecified: Endianness not specified +# +# @little: Little endianness +# +# @big: Big endianness +# +# Since: 10.0 +## +{ 'enum': 'EndianMode', + 'data': [ 'unspecified', 'little', 'big' ] } diff --git a/qapi/compat.json b/qapi/compat.json index 42034d9368c..90b8d51cf27 100644 --- a/qapi/compat.json +++ b/qapi/compat.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = Compatibility policy +# ******************** +# Compatibility policy +# ******************** ## ## diff --git a/qapi/control.json b/qapi/control.json index 950443df9d4..9a5302193d6 100644 --- a/qapi/control.json +++ b/qapi/control.json @@ -3,7 +3,9 @@ # ## -# = QMP monitor control +# ******************* +# QMP monitor control +# ******************* ## ## @@ -11,7 +13,7 @@ # # Enable QMP capabilities. # -# @enable: An optional list of QMPCapability values to enable. The +# @enable: An optional list of `QMPCapability` values to enable. The # client must not enable any capability that is not mentioned in # the QMP greeting message. If the field is not provided, it # means no QMP capabilities will be enabled. (since 2.12) @@ -22,13 +24,14 @@ # "arguments": { "enable": [ "oob" ] } } # <- { "return": {} } # -# .. note:: This command is valid exactly when first connecting: it must -# be issued before any other command will be accepted, and will fail -# once the monitor is accepting other commands. -# (see :doc:`/interop/qmp-spec`) +# .. note:: This command is valid exactly when first connecting: it +# must be issued before any other command will be accepted, and +# will fail once the monitor is accepting other commands. (see +# :doc:`/interop/qmp-spec`) # -# .. note:: The QMP client needs to explicitly enable QMP capabilities, -# otherwise all the QMP capabilities will be turned off by default. +# .. note:: The QMP client needs to explicitly enable QMP +# capabilities, otherwise all the QMP capabilities will be turned +# off by default. # # Since: 0.13 ## @@ -90,10 +93,9 @@ ## # @query-version: # -# Returns the current version of QEMU. +# Return the current version of QEMU. # -# Returns: A @VersionInfo object describing the current version of -# QEMU. +# Returns: An object describing the current version of QEMU. # # Since: 0.14 # @@ -130,7 +132,7 @@ # # Return a list of supported QMP commands by this server # -# Returns: A list of @CommandInfo for all supported commands +# Returns: A list of all supported commands # # Since: 0.14 # @@ -150,7 +152,6 @@ # } # # This example has been shortened as the real response is too long. -# ## { 'command': 'query-commands', 'returns': ['CommandInfo'], 'allow-preconfig': true } @@ -158,10 +159,11 @@ ## # @quit: # -# This command will cause the QEMU process to exit gracefully. While -# every attempt is made to send the QMP response before terminating, -# this is not guaranteed. When using this interface, a premature EOF -# would not be unexpected. +# Request graceful QEMU process termination. +# +# While every attempt is made to send the QMP response before +# terminating, this is not guaranteed. When using this interface, a +# premature EOF would not be unexpected. # # Since: 0.14 # diff --git a/qapi/crypto.json b/qapi/crypto.json index 39b191e8a28..ab6eda4c2fb 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -3,7 +3,9 @@ # ## -# = Cryptography +# ************ +# Cryptography +# ************ ## ## @@ -20,7 +22,6 @@ # Since: 2.5 ## { 'enum': 'QCryptoTLSCredsEndpoint', - 'prefix': 'QCRYPTO_TLS_CREDS_ENDPOINT', 'data': ['client', 'server']} ## @@ -36,21 +37,20 @@ # Since: 2.6 ## { 'enum': 'QCryptoSecretFormat', - 'prefix': 'QCRYPTO_SECRET_FORMAT', 'data': ['raw', 'base64']} ## -# @QCryptoHashAlgorithm: +# @QCryptoHashAlgo: # # The supported algorithms for computing content digests # -# @md5: MD5. Should not be used in any new code, legacy compat only +# @md5: MD5. Should not be used in any new code, legacy compat only # -# @sha1: SHA-1. Should not be used in any new code, legacy compat only +# @sha1: SHA-1. Should not be used in any new code, legacy compat only # # @sha224: SHA-224. (since 2.7) # -# @sha256: SHA-256. Current recommended strong hash. +# @sha256: SHA-256. Current recommended strong hash. # # @sha384: SHA-384. (since 2.7) # @@ -58,14 +58,15 @@ # # @ripemd160: RIPEMD-160. (since 2.7) # +# @sm3: SM3. (since 9.2.0) +# # Since: 2.6 ## -{ 'enum': 'QCryptoHashAlgorithm', - 'prefix': 'QCRYPTO_HASH_ALG', - 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160']} +{ 'enum': 'QCryptoHashAlgo', + 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160', 'sm3']} ## -# @QCryptoCipherAlgorithm: +# @QCryptoCipherAlgo: # # The supported algorithms for content encryption ciphers # @@ -98,8 +99,7 @@ # # Since: 2.6 ## -{ 'enum': 'QCryptoCipherAlgorithm', - 'prefix': 'QCRYPTO_CIPHER_ALG', +{ 'enum': 'QCryptoCipherAlgo', 'data': ['aes-128', 'aes-192', 'aes-256', 'des', '3des', 'cast5-128', @@ -123,11 +123,10 @@ # Since: 2.6 ## { 'enum': 'QCryptoCipherMode', - 'prefix': 'QCRYPTO_CIPHER_MODE', 'data': ['ecb', 'cbc', 'xts', 'ctr']} ## -# @QCryptoIVGenAlgorithm: +# @QCryptoIVGenAlgo: # # The supported algorithms for generating initialization vectors for # full disk encryption. The 'plain' generator should not be used for @@ -143,8 +142,7 @@ # # Since: 2.6 ## -{ 'enum': 'QCryptoIVGenAlgorithm', - 'prefix': 'QCRYPTO_IVGEN_ALG', +{ 'enum': 'QCryptoIVGenAlgo', 'data': ['plain', 'plain64', 'essiv']} ## @@ -160,7 +158,6 @@ # Since: 2.6 ## { 'enum': 'QCryptoBlockFormat', -# 'prefix': 'QCRYPTO_BLOCK_FORMAT', 'data': ['qcow', 'luks']} ## @@ -208,19 +205,19 @@ # # The options that apply to LUKS encryption format initialization # -# @cipher-alg: the cipher algorithm for data encryption Currently +# @cipher-alg: the cipher algorithm for data encryption. Currently # defaults to 'aes-256'. # -# @cipher-mode: the cipher mode for data encryption Currently defaults -# to 'xts' +# @cipher-mode: the cipher mode for data encryption. Currently +# defaults to 'xts' # -# @ivgen-alg: the initialization vector generator Currently defaults +# @ivgen-alg: the initialization vector generator. Currently defaults # to 'plain64' # -# @ivgen-hash-alg: the initialization vector generator hash Currently -# defaults to 'sha256' +# @ivgen-hash-alg: the initialization vector generator hash. +# Currently defaults to 'sha256' # -# @hash-alg: the master key hash algorithm Currently defaults to +# @hash-alg: the master key hash algorithm. Currently defaults to # 'sha256' # # @iter-time: number of milliseconds to spend in PBKDF passphrase @@ -230,11 +227,11 @@ ## { 'struct': 'QCryptoBlockCreateOptionsLUKS', 'base': 'QCryptoBlockOptionsLUKS', - 'data': { '*cipher-alg': 'QCryptoCipherAlgorithm', + 'data': { '*cipher-alg': 'QCryptoCipherAlgo', '*cipher-mode': 'QCryptoCipherMode', - '*ivgen-alg': 'QCryptoIVGenAlgorithm', - '*ivgen-hash-alg': 'QCryptoHashAlgorithm', - '*hash-alg': 'QCryptoHashAlgorithm', + '*ivgen-alg': 'QCryptoIVGenAlgo', + '*ivgen-hash-alg': 'QCryptoHashAlgo', + '*hash-alg': 'QCryptoHashAlgo', '*iter-time': 'int' }} ## @@ -327,11 +324,11 @@ # Since: 2.7 ## { 'struct': 'QCryptoBlockInfoLUKS', - 'data': {'cipher-alg': 'QCryptoCipherAlgorithm', + 'data': {'cipher-alg': 'QCryptoCipherAlgo', 'cipher-mode': 'QCryptoCipherMode', - 'ivgen-alg': 'QCryptoIVGenAlgorithm', - '*ivgen-hash-alg': 'QCryptoHashAlgorithm', - 'hash-alg': 'QCryptoHashAlgorithm', + 'ivgen-alg': 'QCryptoIVGenAlgo', + '*ivgen-hash-alg': 'QCryptoHashAlgo', + 'hash-alg': 'QCryptoHashAlgo', 'detached-header': 'bool', 'payload-offset': 'int', 'master-key-iters': 'int', @@ -376,11 +373,11 @@ # @new-secret: The ID of a QCryptoSecret object providing the password # to be written into added active keyslots # -# @old-secret: Optional (for deactivation only) If given will +# @old-secret: Optional (for deactivation only). If given will # deactivate all keyslots that match password located in # QCryptoSecret with this ID # -# @iter-time: Optional (for activation only) Number of milliseconds to +# @iter-time: Optional (for activation only). Number of milliseconds to # spend in PBKDF passphrase processing for the newly activated # keyslot. Currently defaults to 2000. # @@ -426,11 +423,6 @@ # # Properties for objects of classes derived from secret-common. # -# @loaded: if true, the secret is loaded immediately when applying -# this option and will probably fail when processing the next -# option. Don't use; only provided for compatibility. -# (default: false) -# # @format: the data format that the secret is provided in # (default: raw) # @@ -440,19 +432,13 @@ # # @iv: the random initialization vector used for encryption of this # particular secret. Should be a base64 encrypted string of the -# 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is +# 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is # absent. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.6 ## { 'struct': 'SecretCommonProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*format': 'QCryptoSecretFormat', + 'data': { '*format': 'QCryptoSecretFormat', '*keyid': 'str', '*iv': 'str' } } @@ -519,58 +505,32 @@ # # Properties for tls-creds-anon objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.5 ## { 'struct': 'TlsCredsAnonProperties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] } } } + 'data': { } } ## # @TlsCredsPskProperties: # # Properties for tls-creds-psk objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# # @username: the username which will be sent to the server. For # clients only. If absent, "qemu" is sent and the property will # read back as an empty string. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 3.0 ## { 'struct': 'TlsCredsPskProperties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*username': 'str' } } + 'data': { '*username': 'str' } } ## # @TlsCredsX509Properties: # # Properties for tls-creds-x509 objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# # @sanity-check: if true, perform some sanity checks before using the # credentials (default: true) # @@ -580,20 +540,14 @@ # provides the ID of a previously created secret object containing # the password for decryption. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.5 ## { 'struct': 'TlsCredsX509Properties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*sanity-check': 'bool', + 'data': { '*sanity-check': 'bool', '*passwordid': 'str' } } ## -# @QCryptoAkCipherAlgorithm: +# @QCryptoAkCipherAlgo: # # The supported algorithms for asymmetric encryption ciphers # @@ -601,8 +555,7 @@ # # Since: 7.1 ## -{ 'enum': 'QCryptoAkCipherAlgorithm', - 'prefix': 'QCRYPTO_AKCIPHER_ALG', +{ 'enum': 'QCryptoAkCipherAlgo', 'data': ['rsa']} ## @@ -610,14 +563,17 @@ # # The type of asymmetric keys. # +# @public: public key +# +# @private: private key +# # Since: 7.1 ## { 'enum': 'QCryptoAkCipherKeyType', - 'prefix': 'QCRYPTO_AKCIPHER_KEY_TYPE', 'data': ['public', 'private']} ## -# @QCryptoRSAPaddingAlgorithm: +# @QCryptoRSAPaddingAlgo: # # The padding algorithm for RSA. # @@ -627,8 +583,7 @@ # # Since: 7.1 ## -{ 'enum': 'QCryptoRSAPaddingAlgorithm', - 'prefix': 'QCRYPTO_RSA_PADDING_ALG', +{ 'enum': 'QCryptoRSAPaddingAlgo', 'data': ['raw', 'pkcs1']} ## @@ -636,15 +591,15 @@ # # Specific parameters for RSA algorithm. # -# @hash-alg: QCryptoHashAlgorithm +# @hash-alg: `QCryptoHashAlgo` # -# @padding-alg: QCryptoRSAPaddingAlgorithm +# @padding-alg: `QCryptoRSAPaddingAlgo` # # Since: 7.1 ## { 'struct': 'QCryptoAkCipherOptionsRSA', - 'data': { 'hash-alg':'QCryptoHashAlgorithm', - 'padding-alg': 'QCryptoRSAPaddingAlgorithm'}} + 'data': { 'hash-alg':'QCryptoHashAlgo', + 'padding-alg': 'QCryptoRSAPaddingAlgo'}} ## # @QCryptoAkCipherOptions: @@ -657,6 +612,6 @@ # Since: 7.1 ## { 'union': 'QCryptoAkCipherOptions', - 'base': { 'alg': 'QCryptoAkCipherAlgorithm' }, + 'base': { 'alg': 'QCryptoAkCipherAlgo' }, 'discriminator': 'alg', 'data': { 'rsa': 'QCryptoAkCipherOptionsRSA' }} diff --git a/qapi/cryptodev.json b/qapi/cryptodev.json index 68289f49840..eb309c22f86 100644 --- a/qapi/cryptodev.json +++ b/qapi/cryptodev.json @@ -5,22 +5,23 @@ # See the COPYING file in the top-level directory. ## -# = Cryptography devices +# ******************** +# Cryptography devices +# ******************** ## ## -# @QCryptodevBackendAlgType: +# @QCryptodevBackendAlgoType: # # The supported algorithm types of a crypto device. # # @sym: symmetric encryption # -# @asym: asymmetric Encryption +# @asym: asymmetric encryption # # Since: 8.0 ## -{ 'enum': 'QCryptodevBackendAlgType', - 'prefix': 'QCRYPTODEV_BACKEND_ALG', +{ 'enum': 'QCryptodevBackendAlgoType', 'data': ['sym', 'asym']} ## @@ -28,10 +29,19 @@ # # The supported service types of a crypto device. # +# @cipher: Symmetric Key Cipher service +# +# @hash: Hash service +# +# @mac: Message Authentication Codes service +# +# @aead: Authenticated Encryption with Associated Data service +# +# @akcipher: Asymmetric Key Cipher service +# # Since: 8.0 ## { 'enum': 'QCryptodevBackendServiceType', - 'prefix': 'QCRYPTODEV_BACKEND_SERVICE', 'data': ['cipher', 'hash', 'mac', 'aead', 'akcipher']} ## @@ -48,7 +58,6 @@ # Since: 8.0 ## { 'enum': 'QCryptodevBackendType', - 'prefix': 'QCRYPTODEV_BACKEND_TYPE', 'data': ['builtin', 'vhost-user', 'lkcf']} ## @@ -87,9 +96,7 @@ ## # @query-cryptodev: # -# Returns information about current crypto devices. -# -# Returns: a list of @QCryptodevInfo +# Return information about current crypto devices. # # Since: 8.0 ## diff --git a/qapi/cxl.json b/qapi/cxl.json index bdfac67c473..52cc5d4f336 100644 --- a/qapi/cxl.json +++ b/qapi/cxl.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = CXL devices +# *********** +# CXL devices +# *********** ## ## @@ -117,7 +119,7 @@ # @nibble-mask: Identifies one or more nibbles that the error affects # # @bank-group: Bank group of the memory event location, incorporating -# a number of Banks. +# a number of banks. # # @bank: Bank of the memory event location. A single bank is accessed # per read or write of the memory. @@ -326,6 +328,9 @@ # @crc-threshold: Component specific and applicable to 68 byte Flit # mode only. # +# @retry-threshold: Retry threshold hit in the Local Retry State +# Machine, 68B Flits only. +# # @cache-poison-received: Received poison from a peer on CXL.cache. # # @mem-poison-received: Received poison from a peer on CXL.mem @@ -369,8 +374,8 @@ # of memory by Device Physical Address within a single Dynamic # Capacity Region on a CXL Type 3 Device. # -# @offset: The offset (in bytes) to the start of the region -# where the extent belongs to. +# @offset: The offset (in bytes) to the start of the region where the +# extent belongs to. # # @len: The length of the extent in bytes. # @@ -404,16 +409,16 @@ # # @enable-shared-access: Capacity has already been allocated to a # different host using free, contiguous or prescriptive policy -# with a known tag. This policy then instructs the device to -# make the capacity with the specified tag available to an -# additional host. Capacity is implicit as it matches that -# already associated with the tag. Note that the extent list -# (and hence Device Physical Addresses) used are per host, so -# a device may use different representations on each host. -# The ordering of the extents provided to each host is indicated -# to the host using per extent sequence numbers generated by -# the device. Has a similar meaning for temporal sharing, but -# in that case there may be only one host involved. +# with a known tag. This policy then instructs the device to make +# the capacity with the specified tag available to an additional +# host. Capacity is implicit as it matches that already +# associated with the tag. Note that the extent list (and hence +# Device Physical Addresses) used are per host, so a device may +# use different representations on each host. The ordering of the +# extents provided to each host is indicated to the host using per +# extent sequence numbers generated by the device. Has a similar +# meaning for temporal sharing, but in that case there may be only +# one host involved. # # Since: 9.1 ## @@ -429,7 +434,7 @@ # # Initiate adding dynamic capacity extents to a host. This simulates # operations defined in Compute Express Link (CXL) Specification, -# Revision 3.1, Section 7.6.7.6.5. Note that, currently, establishing +# Revision 3.1, Section 7.6.7.6.5. Note that, currently, establishing # success or failure of the full Add Dynamic Capacity flow requires # out of band communication with the OS of the CXL host. # @@ -457,7 +462,7 @@ # # @unstable: For now this command is subject to change. # -# Since : 9.1 +# Since: 9.1 ## { 'command': 'cxl-add-dynamic-capacity', 'data': { 'path': 'str', @@ -495,7 +500,7 @@ # # Initiate release of dynamic capacity extents from a host. This # simulates operations defined in Compute Express Link (CXL) -# Specification, Revision 3.1, Section 7.6.7.6.6. Note that, +# Specification, Revision 3.1, Section 7.6.7.6.6. Note that, # currently, success or failure of the full Release Dynamic Capacity # flow requires out of band communication with the OS of the CXL host. # @@ -514,13 +519,13 @@ # from the host. Instead, the host immediately looses access to # the released capacity. # -# @sanitize-on-release: Bit[5] of the "Flags" field in Compute -# Express Link (CXL) Specification, Revision 3.1, Table 7-71. -# When set, the device should sanitize all released capacity as -# a result of this request. This ensures that all user data -# and metadata is made permanently unavailable by whatever -# means is appropriate for the media type. Note that changing -# encryption keys is not sufficient. +# @sanitize-on-release: Bit[5] of the "Flags" field in Compute Express +# Link (CXL) Specification, Revision 3.1, Table 7-71. When set, +# the device should sanitize all released capacity as a result of +# this request. This ensures that all user data and metadata is +# made permanently unavailable by whatever means is appropriate +# for the media type. Note that changing encryption keys is not +# sufficient. # # @region: The "Region Number" field as defined in Compute Express # Link Specification, Revision 3.1, Table 7-71. Valid range @@ -536,7 +541,7 @@ # # @unstable: For now this command is subject to change. # -# Since : 9.1 +# Since: 9.1 ## { 'command': 'cxl-release-dynamic-capacity', 'data': { 'path': 'str', diff --git a/qapi/dump.json b/qapi/dump.json index d8145dad979..726b5208703 100644 --- a/qapi/dump.json +++ b/qapi/dump.json @@ -5,7 +5,9 @@ # See the COPYING file in the top-level directory. ## -# = Dump guest memory +# ***************** +# Dump guest memory +# ***************** ## ## @@ -54,9 +56,9 @@ # @paging: if true, do paging to get guest's memory mapping. This # allows using gdb to process the core file. # -# IMPORTANT: this option can make QEMU allocate several gigabytes -# of RAM. This can happen for a large guest, or a malicious guest -# pretending to be large. +# **Important**: this option can make QEMU allocate several +# gigabytes of RAM. This can happen for a large guest, or a +# malicious guest pretending to be large. # # Also, paging=true has the following limitations: # @@ -77,7 +79,7 @@ # # @detach: if true, QMP will return immediately rather than waiting # for the dump to finish. The user can track progress using -# "query-dump". (since 2.6). +# `query-dump`. (since 2.6). # # @begin: if specified, the starting physical address. # @@ -110,7 +112,7 @@ # # Describe the status of a long-running background guest memory dump. # -# @none: no dump-guest-memory has started yet. +# @none: no `dump-guest-memory` has started yet. # # @active: there is one dump running in background. # @@ -126,9 +128,9 @@ ## # @DumpQueryResult: # -# The result format for 'query-dump'. +# The result format for `query-dump`. # -# @status: enum of @DumpStatus, which shows current dump status +# @status: enum of `DumpStatus`, which shows current dump status # # @completed: bytes written in latest dump (uncompressed) # @@ -146,7 +148,7 @@ # # Query latest dump status. # -# Returns: A @DumpStatus object showing the dump status. +# Returns: An object showing the dump status. # # Since: 2.6 # @@ -184,7 +186,7 @@ ## # @DumpGuestMemoryCapability: # -# @formats: the available formats for dump-guest-memory +# @formats: the available formats for `dump-guest-memory` # # Since: 2.0 ## @@ -195,10 +197,9 @@ ## # @query-dump-guest-memory-capability: # -# Returns the available formats for dump-guest-memory +# Return the available formats for `dump-guest-memory` # -# Returns: A @DumpGuestMemoryCapability object listing available -# formats for dump-guest-memory +# Returns: An object listing available formats for `dump-guest-memory` # # Since: 2.0 # diff --git a/qapi/ebpf.json b/qapi/ebpf.json index e500b5a744e..f0257955fa2 100644 --- a/qapi/ebpf.json +++ b/qapi/ebpf.json @@ -5,10 +5,12 @@ # See the COPYING file in the top-level directory. ## -# = eBPF Objects +# ************ +# eBPF Objects +# ************ # # eBPF object is an ELF binary that contains the eBPF program and eBPF -# map description(BTF). Overall, eBPF object should contain the +# map description(BTF). Overall, eBPF object should contain the # program and enough metadata to create/load eBPF with libbpf. As the # eBPF maps/program should correspond to QEMU, the eBPF can't be used # from different QEMU build. @@ -32,7 +34,7 @@ ## # @EbpfProgramID: # -# The eBPF programs that can be gotten with request-ebpf. +# The eBPF programs that can be gotten with `request-ebpf`. # # @rss: Receive side scaling, technology that allows steering traffic # between queues by calculation hash. Users may set up diff --git a/qapi/error.json b/qapi/error.json index 135c1e82319..54cb02fb880 100644 --- a/qapi/error.json +++ b/qapi/error.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = QMP errors +# ********** +# QMP errors +# ********** ## ## diff --git a/qapi/introspect.json b/qapi/introspect.json index b15052ec21a..53100714a80 100644 --- a/qapi/introspect.json +++ b/qapi/introspect.json @@ -10,40 +10,42 @@ # See the COPYING file in the top-level directory. ## -# = QMP introspection +# ***************** +# QMP introspection +# ***************** ## ## # @query-qmp-schema: # -# Command query-qmp-schema exposes the QMP wire ABI as an array of -# SchemaInfo. This lets QMP clients figure out what commands and +# Command `query-qmp-schema` exposes the QMP wire ABI as an array of +# `SchemaInfo`. This lets QMP clients figure out what commands and # events are available in this QEMU, and their parameters and results. # -# However, the SchemaInfo can't reflect all the rules and restrictions +# However, the `SchemaInfo` can't reflect all the rules and restrictions # that apply to QMP. It's interface introspection (figuring out # what's there), not interface specification. The specification is in # the QAPI schema. # # Furthermore, while we strive to keep the QMP wire format -# backwards-compatible across qemu versions, the introspection output +# backwards-compatible across QEMU versions, the introspection output # is not guaranteed to have the same stability. For example, one -# version of qemu may list an object member as an optional +# version of QEMU may list an object member as an optional # non-variant, while another lists the same member only through the # object's variants; or the type of a member may change from a generic # string into a specific enum or from one specific type into an # alternate that includes the original type alongside something else. # -# Returns: array of @SchemaInfo, where each element describes an -# entity in the ABI: command, event, type, ... +# Returns: an array where each element describes an entity in the ABI: +# command, event, type, ... # -# The order of the various SchemaInfo is unspecified; however, all +# The order of the various elements is unspecified; however, all # names are guaranteed to be unique (no name will be duplicated # with different meta-types). # # .. note:: The QAPI schema is also used to help define *internal* -# interfaces, by defining QAPI types. These are not part of the QMP -# wire ABI, and therefore not returned by this command. +# interfaces, by defining QAPI types. These are not part of the +# QMP wire ABI, and therefore not returned by this command. # # Since: 2.5 ## @@ -54,7 +56,7 @@ ## # @SchemaMetaType: # -# This is a @SchemaInfo's meta type, i.e. the kind of entity it +# This is a `SchemaInfo`'s meta type, i.e. the kind of entity it # describes. # # @builtin: a predefined type such as 'int' or 'bool'. @@ -80,7 +82,7 @@ ## # @SchemaInfo: # -# @name: the entity's name, inherited from @base. The SchemaInfo is +# @name: the entity's name, inherited from @base. The `SchemaInfo` is # always referenced by this name. Commands and events have the # name defined in the QAPI schema. Unlike command and event # names, type names are not part of the wire ABI. Consequently, @@ -111,7 +113,7 @@ ## # @SchemaInfoBuiltin: # -# Additional SchemaInfo members for meta-type 'builtin'. +# Additional `SchemaInfo` members for meta-type 'builtin'. # # @json-type: the JSON type used for this type on the wire. # @@ -127,6 +129,22 @@ # section 1, plus 'int' (split off 'number'), plus the obvious top # type 'value'. # +# @string: JSON string +# +# @number: JSON number +# +# @int: JSON number that is an integer +# +# @boolean: literal ``false`` or ``true`` +# +# @null: literal ``null`` +# +# @object: JSON object +# +# @array: JSON array +# +# @value: any JSON value +# # Since: 2.5 ## { 'enum': 'JSONType', @@ -136,10 +154,10 @@ ## # @SchemaInfoEnum: # -# Additional SchemaInfo members for meta-type 'enum'. +# Additional `SchemaInfo` members for meta-type 'enum'. # -# @members: the enum type's members, in no particular order (since -# 6.2). +# @members: the enum type's members, in no particular order. +# (since 6.2) # # @values: the enumeration type's member names, in no particular # order. Redundant with @members. Just for backward @@ -176,7 +194,7 @@ ## # @SchemaInfoArray: # -# Additional SchemaInfo members for meta-type 'array'. +# Additional `SchemaInfo` members for meta-type 'array'. # # @element-type: the array type's element type. # @@ -190,7 +208,7 @@ ## # @SchemaInfoObject: # -# Additional SchemaInfo members for meta-type 'object'. +# Additional `SchemaInfo` members for meta-type 'object'. # # @members: the object type's (non-variant) members, in no particular # order. @@ -255,7 +273,7 @@ ## # @SchemaInfoAlternate: # -# Additional SchemaInfo members for meta-type 'alternate'. +# Additional `SchemaInfo` members for meta-type 'alternate'. # # @members: the alternate type's members, in no particular order. The # members' wire encoding is distinct, see @@ -283,7 +301,7 @@ ## # @SchemaInfoCommand: # -# Additional SchemaInfo members for meta-type 'command'. +# Additional `SchemaInfo` members for meta-type 'command'. # # @arg-type: the name of the object type that provides the command's # parameters. @@ -305,7 +323,7 @@ ## # @SchemaInfoEvent: # -# Additional SchemaInfo members for meta-type 'event'. +# Additional `SchemaInfo` members for meta-type 'event'. # # @arg-type: the name of the object type that provides the event's # parameters. diff --git a/qapi/job.json b/qapi/job.json index b3957207a45..8b08350af2e 100644 --- a/qapi/job.json +++ b/qapi/job.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = Background jobs +# *************** +# Background jobs +# *************** ## ## @@ -10,26 +12,26 @@ # # Type of a background job. # -# @commit: block commit job type, see "block-commit" +# @commit: block commit job type, see `block-commit` # -# @stream: block stream job type, see "block-stream" +# @stream: block stream job type, see `block-stream` # -# @mirror: drive mirror job type, see "drive-mirror" +# @mirror: drive mirror job type, see `drive-mirror` # -# @backup: drive backup job type, see "drive-backup" +# @backup: drive backup job type, see `drive-backup` # -# @create: image creation job type, see "blockdev-create" (since 3.0) +# @create: image creation job type, see `blockdev-create` (since 3.0) # -# @amend: image options amend job type, see "x-blockdev-amend" (since -# 5.1) +# @amend: image options amend job type, see `x-blockdev-amend` +# (since 5.1) # -# @snapshot-load: snapshot load job type, see "snapshot-load" (since -# 6.0) +# @snapshot-load: snapshot load job type, see `snapshot-load` +# (since 6.0) # -# @snapshot-save: snapshot save job type, see "snapshot-save" (since -# 6.0) +# @snapshot-save: snapshot save job type, see `snapshot-save` +# (since 6.0) # -# @snapshot-delete: snapshot delete job type, see "snapshot-delete" +# @snapshot-delete: snapshot delete job type, see `snapshot-delete` # (since 6.0) # # Since: 1.7 @@ -65,7 +67,7 @@ # # @pending: The job has finished its work, but has finalization steps # that it needs to make prior to completing. These changes will -# require manual intervention via @job-finalize if auto-finalize +# require manual intervention via `job-finalize` if auto-finalize # was set to false. These pending changes may still fail. # # @aborting: The job is in the process of being aborted, and will @@ -74,8 +76,8 @@ # process. # # @concluded: The job has finished all work. If auto-dismiss was set -# to false, the job will remain in the query list until it is -# dismissed via @job-dismiss. +# to false, the job will remain in this state until it is +# dismissed via `job-dismiss`. # # @null: The job is in the process of being dismantled. This state # should not ever be visible externally. @@ -91,21 +93,21 @@ # # Represents command verbs that can be applied to a job. # -# @cancel: see @job-cancel +# @cancel: see `job-cancel` # -# @pause: see @job-pause +# @pause: see `job-pause` # -# @resume: see @job-resume +# @resume: see `job-resume` # -# @set-speed: see @block-job-set-speed +# @set-speed: see `block-job-set-speed` # -# @complete: see @job-complete +# @complete: see `job-complete` # -# @dismiss: see @job-dismiss +# @dismiss: see `job-dismiss` # -# @finalize: see @job-finalize +# @finalize: see `job-finalize` # -# @change: see @block-job-change (since 8.2) +# @change: see `block-job-change` (since 8.2) # # Since: 2.12 ## @@ -138,7 +140,7 @@ # # The job will pause as soon as possible, which means transitioning # into the PAUSED state if it was RUNNING, or into STANDBY if it was -# READY. The corresponding JOB_STATUS_CHANGE event will be emitted. +# READY. The corresponding `JOB_STATUS_CHANGE` event will be emitted. # # Cancelling a paused job automatically resumes it. # @@ -156,6 +158,9 @@ # This command returns immediately after resuming a paused job. # Resuming an already running job is an error. # +# This command also clears the error status for block-jobs (stream, +# commit, mirror, backup). +# # @id: The job identifier. # # Since: 3.0 @@ -170,7 +175,7 @@ # cancellation. # # The job will cancel as soon as possible and then emit a -# JOB_STATUS_CHANGE event. Usually, the status will change to +# `JOB_STATUS_CHANGE` event. Usually, the status will change to # ABORTING, but it is possible that a job successfully completes (e.g. # because it was almost done and there was no opportunity to cancel # earlier than completing the job) and transitions to PENDING instead. @@ -184,7 +189,23 @@ ## # @job-complete: # -# Manually trigger completion of an active job in the READY state. +# Manually trigger completion of an active job in the READY or STANDBY +# state. Completing the job in any other state is an error. +# +# This is supported only for drive mirroring, where it also switches +# the device to write to the target path only. Note that drive +# mirroring includes `drive-mirror`, `blockdev-mirror` and `block-commit` +# job (only in case of "active commit", when the node being commited +# is used by the guest). The ability to complete is signaled with a +# `BLOCK_JOB_READY` event. +# +# This command completes an active background block operation +# synchronously. The ordering of this command's return with the +# `BLOCK_JOB_COMPLETED` event is not defined. Note that if an I/O error +# occurs during the processing of this command: 1) the command itself +# will fail; 2) the error will be processed according to the +# rerror/werror arguments that were specified when starting the +# operation. # # @id: The job identifier. # @@ -197,12 +218,16 @@ # # Deletes a job that is in the CONCLUDED state. This command only # needs to be run explicitly for jobs that don't have automatic -# dismiss enabled. +# dismiss enabled. In turn, automatic dismiss may be enabled only +# for jobs that have @auto-dismiss option, which are `drive-backup`, +# `blockdev-backup`, `drive-mirror`, `blockdev-mirror`, `block-commit` and +# `block-stream`. @auto-dismiss is enabled by default for these +# jobs. # # This command will refuse to operate on any job that has not yet -# reached its terminal state, JOB_STATUS_CONCLUDED. For jobs that make -# use of JOB_READY event, job-cancel or job-complete will still need -# to be used as appropriate. +# reached its terminal state, CONCLUDED. For jobs that make use of +# the JOB_READY event, `job-cancel` or `job-complete` will still need to +# be used as appropriate. # # @id: The job identifier. # @@ -222,6 +247,9 @@ # force ALL jobs in the transaction to finalize, so it is only # necessary to instruct a single member job to finalize. # +# The command is applicable only to jobs which have @auto-finalize option +# and only when this option is set to false. +# # @id: The identifier of any job in the transaction, or of a job that # is not part of any transaction. # @@ -269,7 +297,7 @@ # # Return information about jobs. # -# Returns: a list with a @JobInfo for each active job +# Returns: a list with info for each active job # # Since: 3.0 ## diff --git a/qapi/machine-common.json b/qapi/machine-common.json index fa6bd71d128..ed3d20a2fbd 100644 --- a/qapi/machine-common.json +++ b/qapi/machine-common.json @@ -5,17 +5,110 @@ # See the COPYING file in the top-level directory. ## -# = Machines S390 data types +# ******************** +# Common machine types +# ******************** ## ## -# @CpuS390Entitlement: +# @S390CpuEntitlement: # # An enumeration of CPU entitlements that can be assumed by a virtual # S390 CPU # # Since: 8.2 ## -{ 'enum': 'CpuS390Entitlement', - 'prefix': 'S390_CPU_ENTITLEMENT', +{ 'enum': 'S390CpuEntitlement', 'data': [ 'auto', 'low', 'medium', 'high' ] } + +## +# @CpuTopologyLevel: +# +# An enumeration of CPU topology levels. +# +# @thread: thread level, which would also be called SMT level or +# logical processor level. The @threads option in +# `SMPConfiguration` is used to configure the topology of this +# level. +# +# @core: core level. The @cores option in `SMPConfiguration` is used +# to configure the topology of this level. +# +# @module: module level. The @modules option in `SMPConfiguration` is +# used to configure the topology of this level. +# +# @cluster: cluster level. The @clusters option in `SMPConfiguration` +# is used to configure the topology of this level. +# +# @die: die level. The @dies option in `SMPConfiguration` is used to +# configure the topology of this level. +# +# @socket: socket level, which would also be called package level. +# The @sockets option in `SMPConfiguration` is used to configure +# the topology of this level. +# +# @book: book level. The @books option in `SMPConfiguration` is used +# to configure the topology of this level. +# +# @drawer: drawer level. The @drawers option in `SMPConfiguration` is +# used to configure the topology of this level. +# +# @default: default level. Some architectures will have default +# topology settings (e.g., cache topology), and this special +# level means following the architecture-specific settings. +# +# Since: 9.2 +## +{ 'enum': 'CpuTopologyLevel', + 'data': [ 'thread', 'core', 'module', 'cluster', 'die', + 'socket', 'book', 'drawer', 'default' ] } + +## +# @CacheLevelAndType: +# +# Caches a system may have. The enumeration value here is the +# combination of cache level and cache type. +# +# @l1d: L1 data cache. +# +# @l1i: L1 instruction cache. +# +# @l2: L2 (unified) cache. +# +# @l3: L3 (unified) cache +# +# Since: 9.2 +## +{ 'enum': 'CacheLevelAndType', + 'data': [ 'l1d', 'l1i', 'l2', 'l3' ] } + +## +# @SmpCacheProperties: +# +# Cache information for SMP system. +# +# @cache: Cache name, which is the combination of cache level +# and cache type. +# +# @topology: Cache topology level. It accepts the CPU topology +# enumeration as the parameter, i.e., CPUs in the same +# topology container share the same cache. +# +# Since: 9.2 +## +{ 'struct': 'SmpCacheProperties', + 'data': { + 'cache': 'CacheLevelAndType', + 'topology': 'CpuTopologyLevel' } } + +## +# @SmpCachePropertiesWrapper: +# +# List wrapper of `SmpCacheProperties`. +# +# @caches: the list of `SmpCacheProperties`. +# +# Since 9.2 +## +{ 'struct': 'SmpCachePropertiesWrapper', + 'data': { 'caches': ['SmpCacheProperties'] } } diff --git a/qapi/machine-s390x.json b/qapi/machine-s390x.json new file mode 100644 index 00000000000..966dbd61d2e --- /dev/null +++ b/qapi/machine-s390x.json @@ -0,0 +1,121 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# SPDX-License-Identifier: GPL-2.0-or-later +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. + +{ 'include': 'machine-common.json' } + +## +# @S390CpuPolarization: +# +# An enumeration of CPU polarization that can be assumed by a virtual +# S390 CPU +# +# Since: 8.2 +## +{ 'enum': 'S390CpuPolarization', + 'data': [ 'horizontal', 'vertical' ] +} + +## +# @set-cpu-topology: +# +# Modify the topology by moving the CPU inside the topology tree, or +# by changing a modifier attribute of a CPU. Absent values will not +# be modified. +# +# @core-id: the vCPU ID to be moved +# +# @socket-id: destination socket to move the vCPU to +# +# @book-id: destination book to move the vCPU to +# +# @drawer-id: destination drawer to move the vCPU to +# +# @entitlement: entitlement to set +# +# @dedicated: whether the provisioning of real to virtual CPU is +# dedicated +# +# Features: +# +# @unstable: This command is experimental. +# +# Since: 8.2 +## +{ 'command': 'set-cpu-topology', + 'data': { + 'core-id': 'uint16', + '*socket-id': 'uint16', + '*book-id': 'uint16', + '*drawer-id': 'uint16', + '*entitlement': 'S390CpuEntitlement', + '*dedicated': 'bool' + }, + 'features': [ 'unstable' ] +} + +## +# @CPU_POLARIZATION_CHANGE: +# +# Emitted when the guest asks to change the polarization. +# +# The guest can tell the host (via the PTF instruction) whether the +# CPUs should be provisioned using horizontal or vertical +# polarization. +# +# On horizontal polarization the host is expected to provision all +# vCPUs equally. +# +# On vertical polarization the host can provision each vCPU +# differently. The guest will get information on the details of the +# provisioning the next time it uses the STSI(15) instruction. +# +# @polarization: polarization specified by the guest +# +# Features: +# +# @unstable: This event is experimental. +# +# Since: 8.2 +# +# .. qmp-example:: +# +# <- { "event": "CPU_POLARIZATION_CHANGE", +# "data": { "polarization": "horizontal" }, +# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } +## +{ 'event': 'CPU_POLARIZATION_CHANGE', + 'data': { 'polarization': 'S390CpuPolarization' }, + 'features': [ 'unstable' ] +} + +## +# @CpuPolarizationInfo: +# +# The result of a CPU polarization query. +# +# @polarization: the CPU polarization +# +# Since: 8.2 +## +{ 'struct': 'CpuPolarizationInfo', + 'data': { 'polarization': 'S390CpuPolarization' } +} + +## +# @query-s390x-cpu-polarization: +# +# Features: +# +# @unstable: This command is experimental. +# +# Returns: the machine's CPU polarization +# +# Since: 8.2 +## +{ 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo', + 'features': [ 'unstable' ] +} diff --git a/qapi/machine-target.json b/qapi/machine-target.json deleted file mode 100644 index 00bbecc905a..00000000000 --- a/qapi/machine-target.json +++ /dev/null @@ -1,522 +0,0 @@ -# -*- Mode: Python -*- -# vim: filetype=python -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. -# See the COPYING file in the top-level directory. - -{ 'include': 'machine-common.json' } - -## -# @CpuModelInfo: -# -# Virtual CPU model. -# -# A CPU model consists of the name of a CPU definition, to which delta -# changes are applied (e.g. features added/removed). Most magic values -# that an architecture might require should be hidden behind the name. -# However, if required, architectures can expose relevant properties. -# -# @name: the name of the CPU definition the model is based on -# -# @props: a dictionary of QOM properties to be applied -# -# Since: 2.8 -## -{ 'struct': 'CpuModelInfo', - 'data': { 'name': 'str', - '*props': 'any' } } - -## -# @CpuModelExpansionType: -# -# An enumeration of CPU model expansion types. -# -# @static: Expand to a static CPU model, a combination of a static -# base model name and property delta changes. As the static base -# model will never change, the expanded CPU model will be the -# same, independent of QEMU version, machine type, machine -# options, and accelerator options. Therefore, the resulting -# model can be used by tooling without having to specify a -# compatibility machine - e.g. when displaying the "host" model. -# The @static CPU models are migration-safe. -# -# @full: Expand all properties. The produced model is not guaranteed -# to be migration-safe, but allows tooling to get an insight and -# work with model details. -# -# .. note:: When a non-migration-safe CPU model is expanded in static -# mode, some features enabled by the CPU model may be omitted, -# because they can't be implemented by a static CPU model definition -# (e.g. cache info passthrough and PMU passthrough in x86). If you -# need an accurate representation of the features enabled by a -# non-migration-safe CPU model, use @full. If you need a static -# representation that will keep ABI compatibility even when changing -# QEMU version or machine-type, use @static (but keep in mind that -# some features may be omitted). -# -# Since: 2.8 -## -{ 'enum': 'CpuModelExpansionType', - 'data': [ 'static', 'full' ] } - -## -# @CpuModelCompareResult: -# -# An enumeration of CPU model comparison results. The result is -# usually calculated using e.g. CPU features or CPU generations. -# -# @incompatible: If model A is incompatible to model B, model A is not -# guaranteed to run where model B runs and the other way around. -# -# @identical: If model A is identical to model B, model A is -# guaranteed to run where model B runs and the other way around. -# -# @superset: If model A is a superset of model B, model B is -# guaranteed to run where model A runs. There are no guarantees -# about the other way. -# -# @subset: If model A is a subset of model B, model A is guaranteed to -# run where model B runs. There are no guarantees about the other -# way. -# -# Since: 2.8 -## -{ 'enum': 'CpuModelCompareResult', - 'data': [ 'incompatible', 'identical', 'superset', 'subset' ] } - -## -# @CpuModelBaselineInfo: -# -# The result of a CPU model baseline. -# -# @model: the baselined CpuModelInfo. -# -# Since: 2.8 -## -{ 'struct': 'CpuModelBaselineInfo', - 'data': { 'model': 'CpuModelInfo' }, - 'if': 'TARGET_S390X' } - -## -# @CpuModelCompareInfo: -# -# The result of a CPU model comparison. -# -# @result: The result of the compare operation. -# -# @responsible-properties: List of properties that led to the -# comparison result not being identical. -# -# @responsible-properties is a list of QOM property names that led to -# both CPUs not being detected as identical. For identical models, -# this list is empty. If a QOM property is read-only, that means -# there's no known way to make the CPU models identical. If the -# special property name "type" is included, the models are by -# definition not identical and cannot be made identical. -# -# Since: 2.8 -## -{ 'struct': 'CpuModelCompareInfo', - 'data': { 'result': 'CpuModelCompareResult', - 'responsible-properties': ['str'] }, - 'if': 'TARGET_S390X' } - -## -# @query-cpu-model-comparison: -# -# Compares two CPU models, @modela and @modelb, returning how they -# compare in a specific configuration. The results indicates how -# both models compare regarding runnability. This result can be -# used by tooling to make decisions if a certain CPU model will -# run in a certain configuration or if a compatible CPU model has -# to be created by baselining. -# -# Usually, a CPU model is compared against the maximum possible CPU -# model of a certain configuration (e.g. the "host" model for KVM). -# If that CPU model is identical or a subset, it will run in that -# configuration. -# -# The result returned by this command may be affected by: -# -# * QEMU version: CPU models may look different depending on the QEMU -# version. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the -# machine-type. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, -# CPU models may look different depending on machine and accelerator -# options. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu -# option and global properties may affect expansion of CPU models. -# Using query-cpu-model-expansion while using these is not advised. -# -# Some architectures may not support comparing CPU models. s390x -# supports comparing CPU models. -# -# @modela: description of the first CPU model to compare, referred to as -# "model A" in CpuModelCompareResult -# -# @modelb: description of the second CPU model to compare, referred to as -# "model B" in CpuModelCompareResult -# -# Returns: a CpuModelCompareInfo describing how both CPU models -# compare -# -# Errors: -# - if comparing CPU models is not supported -# - if a model cannot be used -# - if a model contains an unknown cpu definition name, unknown -# properties or properties with wrong types. -# -# .. note:: This command isn't specific to s390x, but is only -# implemented on this architecture currently. -# -# Since: 2.8 -## -{ 'command': 'query-cpu-model-comparison', - 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' }, - 'returns': 'CpuModelCompareInfo', - 'if': 'TARGET_S390X' } - -## -# @query-cpu-model-baseline: -# -# Baseline two CPU models, @modela and @modelb, creating a compatible -# third model. The created model will always be a static, -# migration-safe CPU model (see "static" CPU model expansion for details). -# -# This interface can be used by tooling to create a compatible CPU -# model out two CPU models. The created CPU model will be identical -# to or a subset of both CPU models when comparing them. Therefore, -# the created CPU model is guaranteed to run where the given CPU -# models run. -# -# The result returned by this command may be affected by: -# -# * QEMU version: CPU models may look different depending on the QEMU -# version. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the -# machine-type. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, -# CPU models may look different depending on machine and accelerator -# options. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu -# option and global properties may affect expansion of CPU models. -# Using query-cpu-model-expansion while using these is not advised. -# -# Some architectures may not support baselining CPU models. s390x -# supports baselining CPU models. -# -# @modela: description of the first CPU model to baseline -# -# @modelb: description of the second CPU model to baseline -# -# Returns: a CpuModelBaselineInfo describing the baselined CPU model -# -# Errors: -# - if baselining CPU models is not supported -# - if a model cannot be used -# - if a model contains an unknown cpu definition name, unknown -# properties or properties with wrong types. -# -# .. note:: This command isn't specific to s390x, but is only -# implemented on this architecture currently. -# -# Since: 2.8 -## -{ 'command': 'query-cpu-model-baseline', - 'data': { 'modela': 'CpuModelInfo', - 'modelb': 'CpuModelInfo' }, - 'returns': 'CpuModelBaselineInfo', - 'if': 'TARGET_S390X' } - -## -# @CpuModelExpansionInfo: -# -# The result of a cpu model expansion. -# -# @model: the expanded CpuModelInfo. -# -# @deprecated-props: a list of properties that are flagged as deprecated -# by the CPU vendor. The list depends on the CpuModelExpansionType: -# "static" properties are a subset of the enabled-properties for -# the expanded model; "full" properties are a set of properties -# that are deprecated across all models for the architecture. -# (since: 9.1). -# -# Since: 2.8 -## -{ 'struct': 'CpuModelExpansionInfo', - 'data': { 'model': 'CpuModelInfo', - 'deprecated-props' : { 'type': ['str'], - 'if': 'TARGET_S390X' } }, - 'if': { 'any': [ 'TARGET_S390X', - 'TARGET_I386', - 'TARGET_ARM', - 'TARGET_LOONGARCH64', - 'TARGET_RISCV' ] } } - -## -# @query-cpu-model-expansion: -# -# Expands a given CPU model, @model, (or a combination of CPU model + -# additional options) to different granularities, specified by -# @type, allowing tooling to get an understanding what a specific -# CPU model looks like in QEMU under a certain configuration. -# -# This interface can be used to query the "host" CPU model. -# -# The data returned by this command may be affected by: -# -# * QEMU version: CPU models may look different depending on the QEMU -# version. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine-type: CPU model may look different depending on the -# machine-type. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * machine options (including accelerator): in some architectures, -# CPU models may look different depending on machine and accelerator -# options. (Except for CPU models reported as "static" in -# query-cpu-definitions.) -# * "-cpu" arguments and global properties: arguments to the -cpu -# option and global properties may affect expansion of CPU models. -# Using query-cpu-model-expansion while using these is not advised. -# -# Some architectures may not support all expansion types. s390x -# supports "full" and "static". Arm only supports "full". -# -# @model: description of the CPU model to expand -# -# @type: expansion type, specifying how to expand the CPU model -# -# Returns: a CpuModelExpansionInfo describing the expanded CPU model -# -# Errors: -# - if expanding CPU models is not supported -# - if the model cannot be expanded -# - if the model contains an unknown CPU definition name, unknown -# properties or properties with a wrong type -# - if an expansion type is not supported -# -# Since: 2.8 -## -{ 'command': 'query-cpu-model-expansion', - 'data': { 'type': 'CpuModelExpansionType', - 'model': 'CpuModelInfo' }, - 'returns': 'CpuModelExpansionInfo', - 'if': { 'any': [ 'TARGET_S390X', - 'TARGET_I386', - 'TARGET_ARM', - 'TARGET_LOONGARCH64', - 'TARGET_RISCV' ] } } - -## -# @CpuDefinitionInfo: -# -# Virtual CPU definition. -# -# @name: the name of the CPU definition -# -# @migration-safe: whether a CPU definition can be safely used for -# migration in combination with a QEMU compatibility machine when -# migrating between different QEMU versions and between hosts with -# different sets of (hardware or software) capabilities. If not -# provided, information is not available and callers should not -# assume the CPU definition to be migration-safe. (since 2.8) -# -# @static: whether a CPU definition is static and will not change -# depending on QEMU version, machine type, machine options and -# accelerator options. A static model is always migration-safe. -# (since 2.8) -# -# @unavailable-features: List of properties that prevent the CPU model -# from running in the current host. (since 2.8) -# -# @typename: Type name that can be used as argument to -# @device-list-properties, to introspect properties configurable -# using -cpu or -global. (since 2.9) -# -# @alias-of: Name of CPU model this model is an alias for. The target -# of the CPU model alias may change depending on the machine type. -# Management software is supposed to translate CPU model aliases -# in the VM configuration, because aliases may stop being -# migration-safe in the future (since 4.1) -# -# @deprecated: If true, this CPU model is deprecated and may be -# removed in in some future version of QEMU according to the QEMU -# deprecation policy. (since 5.2) -# -# @unavailable-features is a list of QOM property names that represent -# CPU model attributes that prevent the CPU from running. If the QOM -# property is read-only, that means there's no known way to make the -# CPU model run in the current host. Implementations that choose not -# to provide specific information return the property name "type". If -# the property is read-write, it means that it MAY be possible to run -# the CPU model in the current host if that property is changed. -# Management software can use it as hints to suggest or choose an -# alternative for the user, or just to generate meaningful error -# messages explaining why the CPU model can't be used. If -# @unavailable-features is an empty list, the CPU model is runnable -# using the current host and machine-type. If @unavailable-features -# is not present, runnability information for the CPU is not -# available. -# -# Since: 1.2 -## -{ 'struct': 'CpuDefinitionInfo', - 'data': { 'name': 'str', - '*migration-safe': 'bool', - 'static': 'bool', - '*unavailable-features': [ 'str' ], - 'typename': 'str', - '*alias-of' : 'str', - 'deprecated' : 'bool' }, - 'if': { 'any': [ 'TARGET_PPC', - 'TARGET_ARM', - 'TARGET_I386', - 'TARGET_S390X', - 'TARGET_MIPS', - 'TARGET_LOONGARCH64', - 'TARGET_RISCV' ] } } - -## -# @query-cpu-definitions: -# -# Return a list of supported virtual CPU definitions -# -# Returns: a list of CpuDefinitionInfo -# -# Since: 1.2 -## -{ 'command': 'query-cpu-definitions', 'returns': ['CpuDefinitionInfo'], - 'if': { 'any': [ 'TARGET_PPC', - 'TARGET_ARM', - 'TARGET_I386', - 'TARGET_S390X', - 'TARGET_MIPS', - 'TARGET_LOONGARCH64', - 'TARGET_RISCV' ] } } - -## -# @CpuS390Polarization: -# -# An enumeration of CPU polarization that can be assumed by a virtual -# S390 CPU -# -# Since: 8.2 -## -{ 'enum': 'CpuS390Polarization', - 'prefix': 'S390_CPU_POLARIZATION', - 'data': [ 'horizontal', 'vertical' ], - 'if': 'TARGET_S390X' -} - -## -# @set-cpu-topology: -# -# Modify the topology by moving the CPU inside the topology tree, or -# by changing a modifier attribute of a CPU. Absent values will not -# be modified. -# -# @core-id: the vCPU ID to be moved -# -# @socket-id: destination socket to move the vCPU to -# -# @book-id: destination book to move the vCPU to -# -# @drawer-id: destination drawer to move the vCPU to -# -# @entitlement: entitlement to set -# -# @dedicated: whether the provisioning of real to virtual CPU is -# dedicated -# -# Features: -# -# @unstable: This command is experimental. -# -# Since: 8.2 -## -{ 'command': 'set-cpu-topology', - 'data': { - 'core-id': 'uint16', - '*socket-id': 'uint16', - '*book-id': 'uint16', - '*drawer-id': 'uint16', - '*entitlement': 'CpuS390Entitlement', - '*dedicated': 'bool' - }, - 'features': [ 'unstable' ], - 'if': { 'all': [ 'TARGET_S390X' , 'CONFIG_KVM' ] } -} - -## -# @CPU_POLARIZATION_CHANGE: -# -# Emitted when the guest asks to change the polarization. -# -# The guest can tell the host (via the PTF instruction) whether the -# CPUs should be provisioned using horizontal or vertical -# polarization. -# -# On horizontal polarization the host is expected to provision all -# vCPUs equally. -# -# On vertical polarization the host can provision each vCPU -# differently. The guest will get information on the details of the -# provisioning the next time it uses the STSI(15) instruction. -# -# @polarization: polarization specified by the guest -# -# Features: -# -# @unstable: This event is experimental. -# -# Since: 8.2 -# -# .. qmp-example:: -# -# <- { "event": "CPU_POLARIZATION_CHANGE", -# "data": { "polarization": "horizontal" }, -# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } -## -{ 'event': 'CPU_POLARIZATION_CHANGE', - 'data': { 'polarization': 'CpuS390Polarization' }, - 'features': [ 'unstable' ], - 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] } -} - -## -# @CpuPolarizationInfo: -# -# The result of a CPU polarization query. -# -# @polarization: the CPU polarization -# -# Since: 8.2 -## -{ 'struct': 'CpuPolarizationInfo', - 'data': { 'polarization': 'CpuS390Polarization' }, - 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] } -} - -## -# @query-s390x-cpu-polarization: -# -# Features: -# -# @unstable: This command is experimental. -# -# Returns: the machine's CPU polarization -# -# Since: 8.2 -## -{ 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo', - 'features': [ 'unstable' ], - 'if': { 'all': [ 'TARGET_S390X', 'CONFIG_KVM' ] } -} diff --git a/qapi/machine.json b/qapi/machine.json index fcfd249e2d6..038eab281c7 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -5,7 +5,9 @@ # See the COPYING file in the top-level directory. ## -# = Machines +# ******** +# Machines +# ******** ## { 'include': 'common.json' } @@ -27,13 +29,13 @@ # @loongarch64: since 7.1 # # .. note:: The resulting QMP strings can be appended to the -# "qemu-system-" prefix to produce the corresponding QEMU executable -# name. This is true even for "qemu-system-x86_64". +# "qemu-system-" prefix to produce the corresponding QEMU +# executable name. This is true even for "qemu-system-x86_64". # # Since: 3.0 ## { 'enum' : 'SysEmuTarget', - 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'cris', 'hppa', 'i386', + 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'hppa', 'i386', 'loongarch64', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', 'mips64el', 'mipsel', 'or1k', 'ppc', 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4', @@ -41,15 +43,14 @@ 'x86_64', 'xtensa', 'xtensaeb' ] } ## -# @CpuS390State: +# @S390CpuState: # # An enumeration of cpu states that can be assumed by a virtual S390 # CPU # # Since: 2.12 ## -{ 'enum': 'CpuS390State', - 'prefix': 'S390_CPU_STATE', +{ 'enum': 'S390CpuState', 'data': [ 'uninitialized', 'stopped', 'check-stop', 'operating', 'load' ] } ## @@ -66,9 +67,9 @@ # Since: 2.12 ## { 'struct': 'CpuInfoS390', - 'data': { 'cpu-state': 'CpuS390State', + 'data': { 'cpu-state': 'S390CpuState', '*dedicated': 'bool', - '*entitlement': 'CpuS390Entitlement' } } + '*entitlement': 'S390CpuEntitlement' } } ## # @CpuInfoFast: @@ -77,6 +78,8 @@ # # @cpu-index: index of the virtual CPU # +# @qom-type: QOM type name of the CPU (since 10.1) +# # @qom-path: path to the CPU object in the QOM tree # # @thread-id: ID of the underlying host thread @@ -90,6 +93,7 @@ ## { 'union' : 'CpuInfoFast', 'base' : { 'cpu-index' : 'int', + 'qom-type' : 'str', 'qom-path' : 'str', 'thread-id' : 'int', '*props' : 'CpuInstanceProperties', @@ -100,9 +104,7 @@ ## # @query-cpus-fast: # -# Returns information about all virtual CPUs. -# -# Returns: list of @CpuInfoFast +# Return information about all virtual CPUs. # # Since: 2.12 # @@ -183,13 +185,13 @@ # @default-cpu-type: default CPU model typename if none is requested # via the -cpu argument. (since 4.2) # -# @default-ram-id: the default ID of initial RAM memory backend (since -# 5.2) +# @default-ram-id: the default ID of initial RAM memory backend +# (since 5.2) # # @acpi: machine type supports ACPI (since 8.0) # # @compat-props: The machine type's compatibility properties. Only -# present when query-machines argument @compat-props is true. +# present when `query-machines` argument @compat-props is true. # (since 9.1) # # Features: @@ -219,8 +221,6 @@ # # @unstable: Argument @compat-props is experimental. # -# Returns: a list of MachineInfo -# # Since: 1.2 # # .. qmp-example:: @@ -269,22 +269,20 @@ # # Return information on the current virtual machine. # -# Returns: CurrentMachineParams -# # Since: 4.0 ## { 'command': 'query-current-machine', 'returns': 'CurrentMachineParams' } ## -# @TargetInfo: +# @QemuTargetInfo: # -# Information describing the QEMU target. +# Information on the target configuration built into the QEMU binary. # # @arch: the target architecture # # Since: 1.2 ## -{ 'struct': 'TargetInfo', +{ 'struct': 'QemuTargetInfo', 'data': { 'arch': 'SysEmuTarget' } } ## @@ -292,11 +290,9 @@ # # Return information about the target for this QEMU # -# Returns: TargetInfo -# # Since: 1.2 ## -{ 'command': 'query-target', 'returns': 'TargetInfo' } +{ 'command': 'query-target', 'returns': 'QemuTargetInfo' } ## # @UuidInfo: @@ -317,8 +313,6 @@ # # Query the guest UUID information. # -# Returns: The @UuidInfo for the guest -# # Since: 0.14 # # .. qmp-example:: @@ -371,8 +365,8 @@ # # .. note:: A guest may or may not respond to this command. This # command returning does not indicate that a guest has accepted the -# request or that it has shut down. Many guests will respond to this -# command by prompting the user in some way. +# request or that it has shut down. Many guests will respond to +# this command by prompting the user in some way. # # .. qmp-example:: # @@ -386,7 +380,7 @@ # # Wake up guest from suspend. If the guest has wake-up from suspend # support enabled (wakeup-suspend-support flag from -# query-current-machine), wake-up guest from suspend if the guest is +# `query-current-machine`), wake-up guest from suspend if the guest is # in SUSPENDED state. Return an error otherwise. # # Since: 1.1 @@ -437,7 +431,7 @@ # @inject-nmi: # # Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or -# all CPUs (ppc64). The command fails when the guest doesn't support +# all CPUs (ppc64). The command fails when the guest doesn't support # injecting. # # Since: 0.14 @@ -452,35 +446,6 @@ ## { 'command': 'inject-nmi' } -## -# @KvmInfo: -# -# Information about support for KVM acceleration -# -# @enabled: true if KVM acceleration is active -# -# @present: true if KVM acceleration is built into this executable -# -# Since: 0.14 -## -{ 'struct': 'KvmInfo', 'data': {'enabled': 'bool', 'present': 'bool'} } - -## -# @query-kvm: -# -# Returns information about KVM acceleration -# -# Returns: @KvmInfo -# -# Since: 0.14 -# -# .. qmp-example:: -# -# -> { "execute": "query-kvm" } -# <- { "return": { "enabled": true, "present": true } } -## -{ 'command': 'query-kvm', 'returns': 'KvmInfo' } - ## # @NumaOptionsType: # @@ -600,7 +565,7 @@ # # List of CXL Fixed Memory Windows. # -# @cxl-fmw: List of CXLFixedMemoryWindowOptions +# @cxl-fmw: List of `CXLFixedMemoryWindowOptions` # # Since: 7.1 ## @@ -655,10 +620,10 @@ ## # @NumaCpuOptions: # -# Option "-numa cpu" overrides default cpu to node mapping. It -# accepts the same set of cpu properties as returned by -# query-hotpluggable-cpus[].props, where node-id could be used to -# override default node mapping. +# Option "-numa cpu" overrides default cpu to node mapping. It accepts +# the same set of cpu properties as returned by +# `query-hotpluggable-cpus[].props `, where +# node-id could be used to override default node mapping. # # Since: 2.10 ## @@ -672,7 +637,7 @@ # The memory hierarchy in the System Locality Latency and Bandwidth # Information Structure of HMAT (Heterogeneous Memory Attribute Table) # -# For more information about @HmatLBMemoryHierarchy, see chapter +# For more information about `HmatLBMemoryHierarchy`, see chapter # 5.2.27.4: Table 5-146: Field "Flags" of ACPI 6.3 spec. # # @memory: the structure represents the memory performance @@ -694,8 +659,8 @@ # Data type in the System Locality Latency and Bandwidth Information # Structure of HMAT (Heterogeneous Memory Attribute Table) # -# For more information about @HmatLBDataType, see chapter 5.2.27.4: -# Table 5-146: Field "Data Type" of ACPI 6.3 spec. +# For more information about `HmatLBDataType`, see chapter 5.2.27.4: +# Table 5-146: Field "Data Type" of ACPI 6.3 spec. # # @access-latency: access latency (nanoseconds) # @@ -721,7 +686,7 @@ # Set the system locality latency and bandwidth information between # Initiator and Target proximity Domains. # -# For more information about @NumaHmatLBOptions, see chapter 5.2.27.4: +# For more information about `NumaHmatLBOptions`, see chapter 5.2.27.4: # Table 5-146 of ACPI 6.3 spec. # # @initiator: the Initiator Proximity Domain. @@ -757,7 +722,7 @@ # Cache associativity in the Memory Side Cache Information Structure # of HMAT # -# For more information of @HmatCacheAssociativity, see chapter +# For more information of `HmatCacheAssociativity`, see chapter # 5.2.27.5: Table 5-147 of ACPI 6.3 spec. # # @none: None (no memory side cache in this proximity domain, or cache @@ -778,7 +743,7 @@ # Cache write policy in the Memory Side Cache Information Structure of # HMAT # -# For more information of @HmatCacheWritePolicy, see chapter 5.2.27.5: +# For more information of `HmatCacheWritePolicy`, see chapter 5.2.27.5: # Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. # # @none: None (no memory side cache in this proximity domain, or cache @@ -798,7 +763,7 @@ # # Set the memory side cache information for a given memory domain. # -# For more information of @NumaHmatCacheOptions, see chapter 5.2.27.5: +# For more information of `NumaHmatCacheOptions`, see chapter 5.2.27.5: # Table 5-147: Field "Cache Attributes" of ACPI 6.3 spec. # # @node-id: the memory proximity domain to which the memory belongs. @@ -812,7 +777,7 @@ # # @policy: the write policy, none/write-back/write-through. # -# @line: the cache Line size in bytes. +# @line: the cache line size in bytes. # # Since: 5.0 ## @@ -852,7 +817,11 @@ # <- { "return": {} } ## { 'command': 'memsave', - 'data': {'val': 'int', 'size': 'int', 'filename': 'str', '*cpu-index': 'int'} } + 'data': { + 'val': 'uint64', + 'size': 'size', + 'filename': 'str', + '*cpu-index': 'int' } } ## # @pmemsave: @@ -878,7 +847,10 @@ # <- { "return": {} } ## { 'command': 'pmemsave', - 'data': {'val': 'int', 'size': 'int', 'filename': 'str'} } + 'data': { + 'val': 'uint64', + 'size': 'size', + 'filename': 'str' } } ## # @Memdev: @@ -924,9 +896,7 @@ ## # @query-memdev: # -# Returns information for all memory backends. -# -# Returns: a list of @Memdev. +# Return information for all memory backends. # # Since: 2.1 # @@ -970,7 +940,7 @@ # # The ids other than the node-id specify the position of the CPU # within the CPU topology (as defined by the machine property "smp", -# thus see also type @SMPConfiguration) +# thus see also type `SMPConfiguration`) # # @node-id: NUMA node ID the CPU belongs to # @@ -988,8 +958,8 @@ # @cluster-id: cluster number within the parent container the CPU # belongs to (since 7.1) # -# @module-id: module number within the parent container the CPU belongs -# to (since 9.1) +# @module-id: module number within the parent container the CPU +# belongs to (since 9.1) # # @core-id: core number within the parent container the CPU belongs to # @@ -998,7 +968,7 @@ # Since: 2.7 ## { 'struct': 'CpuInstanceProperties', - # Keep these in sync with the properties device_add accepts + # Keep these in sync with the properties `device_add` accepts 'data': { '*node-id': 'int', '*drawer-id': 'int', '*book-id': 'int', @@ -1014,19 +984,19 @@ ## # @HotpluggableCPU: # -# @type: CPU object type for usage with device_add command +# @type: CPU object type for usage with `device_add` command # # @props: list of properties to pass for hotplugging a CPU with -# device_add +# `device_add` # -# @vcpus-count: number of logical VCPU threads @HotpluggableCPU +# @vcpus-count: number of logical VCPU threads `HotpluggableCPU` # provides # # @qom-path: link to existing CPU object if CPU is present or omitted # if CPU is not present. # # .. note:: Management should be prepared to pass through additional -# properties with device_add. +# properties with `device_add`. # # Since: 2.7 ## @@ -1043,8 +1013,6 @@ # # TODO: Better documentation; currently there is none. # -# Returns: a list of HotpluggableCPU objects. -# # Since: 2.7 # # .. qmp-example:: @@ -1083,7 +1051,7 @@ # :annotated: # # For s390x-virtio-ccw machine type started with -# ``-smp 1,maxcpus=2 -cpu qemu`` (Since: 2.11):: +# ``-smp 1,maxcpus=2 -cpu qemu``:: # # -> { "execute": "query-hotpluggable-cpus" } # <- {"return": [ @@ -1132,8 +1100,8 @@ # - If no balloon device is present, DeviceNotActive # # .. note:: This command just issues a request to the guest. When it -# returns, the balloon size may not have changed. A guest can change -# the balloon size independent of this command. +# returns, the balloon size may not have changed. A guest can +# change the balloon size independent of this command. # # Since: 0.14 # @@ -1154,7 +1122,7 @@ # # Information about the guest balloon device. # -# @actual: the logical size of the VM in bytes Formula used: +# @actual: the logical size of the VM in bytes. Formula used: # logical_vm_size = vm_ram_size - balloon_size # # Since: 0.14 @@ -1166,9 +1134,6 @@ # # Return information about the balloon device. # -# Returns: -# @BalloonInfo -# # Errors: # - If the balloon driver is enabled but not functional because # the KVM kernel module cannot support it, KVMMissingCap @@ -1190,10 +1155,10 @@ # @BALLOON_CHANGE: # # Emitted when the guest changes the actual BALLOON level. This value -# is equivalent to the @actual field return by the 'query-balloon' +# is equivalent to the @actual field return by the `query-balloon` # command # -# @actual: the logical size of the VM in bytes Formula used: +# @actual: the logical size of the VM in bytes. Formula used: # logical_vm_size = vm_ram_size - balloon_size # # .. note:: This event is rate-limited. @@ -1229,12 +1194,9 @@ ## # @query-hv-balloon-status-report: # -# Returns the hv-balloon driver data contained in the last received +# Return the hv-balloon driver data contained in the last received # "STATUS" message from the guest. # -# Returns: -# @HvBalloonInfo -# # Errors: # - If no hv-balloon device is present, guest memory status # reporting is not enabled or no guest memory status report @@ -1295,6 +1257,8 @@ # Return the amount of initially allocated and present hotpluggable # (if enabled) memory in bytes. # +# TODO: This line is a hack to separate the example from the body +# # .. qmp-example:: # # -> { "execute": "query-memory-size-summary" } @@ -1659,8 +1623,8 @@ # The members other than @cpus and @maxcpus define a topology of # containers. # -# The ordering from highest/coarsest to lowest/finest is: -# @drawers, @books, @sockets, @dies, @clusters, @cores, @threads. +# The ordering from highest/coarsest to lowest/finest is: @drawers, +# @books, @sockets, @dies, @clusters, @cores, @threads. # # Different architectures support different subsets of topology # containers. @@ -1755,24 +1719,6 @@ 'returns': 'HumanReadableText', 'features': [ 'unstable' ] } -## -# @x-query-opcount: -# -# Query TCG opcode counters -# -# Features: -# -# @unstable: This command is meant for debugging. -# -# Returns: TCG opcode counters -# -# Since: 6.2 -## -{ 'command': 'x-query-opcount', - 'returns': 'HumanReadableText', - 'if': 'CONFIG_TCG', - 'features': [ 'unstable' ] } - ## # @x-query-ramblock: # @@ -1892,3 +1838,382 @@ { 'command': 'x-query-interrupt-controllers', 'returns': 'HumanReadableText', 'features': [ 'unstable' ]} + +## +# @dump-skeys: +# +# Dump the storage keys for an s390x guest +# +# @filename: the path to the file to dump to +# +# Since: 2.5 +# +# .. qmp-example:: +# +# -> { "execute": "dump-skeys", +# "arguments": { "filename": "/tmp/skeys" } } +# <- { "return": {} } +## +{ 'command': 'dump-skeys', + 'data': { 'filename': 'str' } } + +## +# @CpuModelInfo: +# +# Virtual CPU model. +# +# A CPU model consists of the name of a CPU definition, to which delta +# changes are applied (e.g. features added/removed). Most magic +# values that an architecture might require should be hidden behind +# the name. However, if required, architectures can expose relevant +# properties. +# +# @name: the name of the CPU definition the model is based on +# +# @props: a dictionary of QOM properties to be applied +# +# Since: 2.8 +## +{ 'struct': 'CpuModelInfo', + 'data': { 'name': 'str', + '*props': 'any' } } + +## +# @CpuModelExpansionType: +# +# An enumeration of CPU model expansion types. +# +# @static: Expand to a static CPU model, a combination of a static +# base model name and property delta changes. As the static base +# model will never change, the expanded CPU model will be the +# same, independent of QEMU version, machine type, machine +# options, and accelerator options. Therefore, the resulting +# model can be used by tooling without having to specify a +# compatibility machine - e.g. when displaying the "host" model. +# The @static CPU models are migration-safe. +# +# @full: Expand all properties. The produced model is not guaranteed +# to be migration-safe, but allows tooling to get an insight and +# work with model details. +# +# .. note:: When a non-migration-safe CPU model is expanded in static +# mode, some features enabled by the CPU model may be omitted, +# because they can't be implemented by a static CPU model +# definition (e.g. cache info passthrough and PMU passthrough in +# x86). If you need an accurate representation of the features +# enabled by a non-migration-safe CPU model, use @full. If you +# need a static representation that will keep ABI compatibility +# even when changing QEMU version or machine-type, use @static (but +# keep in mind that some features may be omitted). +# +# Since: 2.8 +## +{ 'enum': 'CpuModelExpansionType', + 'data': [ 'static', 'full' ] } + +## +# @CpuModelCompareResult: +# +# An enumeration of CPU model comparison results. The result is +# usually calculated using e.g. CPU features or CPU generations. +# +# @incompatible: If model A is incompatible to model B, model A is not +# guaranteed to run where model B runs and the other way around. +# +# @identical: If model A is identical to model B, model A is +# guaranteed to run where model B runs and the other way around. +# +# @superset: If model A is a superset of model B, model B is +# guaranteed to run where model A runs. There are no guarantees +# about the other way. +# +# @subset: If model A is a subset of model B, model A is guaranteed to +# run where model B runs. There are no guarantees about the other +# way. +# +# Since: 2.8 +## +{ 'enum': 'CpuModelCompareResult', + 'data': [ 'incompatible', 'identical', 'superset', 'subset' ] } + +## +# @CpuModelBaselineInfo: +# +# The result of a CPU model baseline. +# +# @model: the baselined `CpuModelInfo`. +# +# Since: 2.8 +## +{ 'struct': 'CpuModelBaselineInfo', + 'data': { 'model': 'CpuModelInfo' } } + +## +# @CpuModelCompareInfo: +# +# The result of a CPU model comparison. +# +# @result: The result of the compare operation. +# +# @responsible-properties: List of properties that led to the +# comparison result not being identical. +# +# @responsible-properties is a list of QOM property names that led to +# both CPUs not being detected as identical. For identical models, +# this list is empty. If a QOM property is read-only, that means +# there's no known way to make the CPU models identical. If the +# special property name "type" is included, the models are by +# definition not identical and cannot be made identical. +# +# Since: 2.8 +## +{ 'struct': 'CpuModelCompareInfo', + 'data': { 'result': 'CpuModelCompareResult', + 'responsible-properties': ['str'] } } + +## +# @query-cpu-model-comparison: +# +# Compares two CPU models, @modela and @modelb, returning how they +# compare in a specific configuration. The results indicates how +# both models compare regarding runnability. This result can be +# used by tooling to make decisions if a certain CPU model will +# run in a certain configuration or if a compatible CPU model has +# to be created by baselining. +# +# Usually, a CPU model is compared against the maximum possible CPU +# model of a certain configuration (e.g. the "host" model for KVM). +# If that CPU model is identical or a subset, it will run in that +# configuration. +# +# The result returned by this command may be affected by: +# +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using `query-cpu-model-expansion` while using these is not advised. +# +# Some architectures may not support comparing CPU models. s390x +# supports comparing CPU models. +# +# @modela: description of the first CPU model to compare, referred to +# as "model A" in `CpuModelCompareResult` +# +# @modelb: description of the second CPU model to compare, referred to +# as "model B" in `CpuModelCompareResult` +# +# Returns: a `CpuModelCompareInfo` describing how both CPU models +# compare +# +# Errors: +# - if comparing CPU models is not supported by the target +# - if a model cannot be used +# - if a model contains an unknown cpu definition name, unknown +# properties or properties with wrong types. +# +# Since: 2.8 +## +{ 'command': 'query-cpu-model-comparison', + 'data': { 'modela': 'CpuModelInfo', 'modelb': 'CpuModelInfo' }, + 'returns': 'CpuModelCompareInfo' } + +## +# @query-cpu-model-baseline: +# +# Baseline two CPU models, @modela and @modelb, creating a compatible +# third model. The created model will always be a static, +# migration-safe CPU model (see "static" CPU model expansion for +# details). +# +# This interface can be used by tooling to create a compatible CPU +# model out two CPU models. The created CPU model will be identical +# to or a subset of both CPU models when comparing them. Therefore, +# the created CPU model is guaranteed to run where the given CPU +# models run. +# +# The result returned by this command may be affected by: +# +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using `query-cpu-model-expansion` while using these is not advised. +# +# Some architectures may not support baselining CPU models. s390x +# supports baselining CPU models. +# +# @modela: description of the first CPU model to baseline +# +# @modelb: description of the second CPU model to baseline +# +# Returns: a `CpuModelBaselineInfo` describing the baselined CPU model +# +# Errors: +# - if baselining CPU models is not supported by the target +# - if a model cannot be used +# - if a model contains an unknown cpu definition name, unknown +# properties or properties with wrong types. +# +# Since: 2.8 +## +{ 'command': 'query-cpu-model-baseline', + 'data': { 'modela': 'CpuModelInfo', + 'modelb': 'CpuModelInfo' }, + 'returns': 'CpuModelBaselineInfo' } + +## +# @CpuModelExpansionInfo: +# +# The result of a cpu model expansion. +# +# @model: the expanded `CpuModelInfo`. +# +# @deprecated-props: an optional list of properties that are flagged as +# deprecated by the CPU vendor. The list depends on the +# `CpuModelExpansionType`: "static" properties are a subset of the +# enabled-properties for the expanded model; "full" properties are +# a set of properties that are deprecated across all models for +# the architecture. (since: 10.1 -- since 9.1 on s390x --). +# +# Since: 2.8 +## +{ 'struct': 'CpuModelExpansionInfo', + 'data': { 'model': 'CpuModelInfo', + '*deprecated-props' : ['str'] } } + +## +# @query-cpu-model-expansion: +# +# Expands a given CPU model, @model, (or a combination of CPU model + +# additional options) to different granularities, specified by @type, +# allowing tooling to get an understanding what a specific CPU model +# looks like in QEMU under a certain configuration. +# +# This interface can be used to query the "host" CPU model. +# +# The data returned by this command may be affected by: +# +# * QEMU version: CPU models may look different depending on the QEMU +# version. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine-type: CPU model may look different depending on the +# machine-type. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * machine options (including accelerator): in some architectures, +# CPU models may look different depending on machine and accelerator +# options. (Except for CPU models reported as "static" in +# `query-cpu-definitions`.) +# * "-cpu" arguments and global properties: arguments to the -cpu +# option and global properties may affect expansion of CPU models. +# Using `query-cpu-model-expansion` while using these is not advised. +# +# Some architectures may not support all expansion types. s390x +# supports "full" and "static". Arm only supports "full". +# +# @model: description of the CPU model to expand +# +# @type: expansion type, specifying how to expand the CPU model +# +# Returns: a `CpuModelExpansionInfo` describing the expanded CPU model +# +# Errors: +# - if expanding CPU models is not supported +# - if the model cannot be expanded +# - if the model contains an unknown CPU definition name, unknown +# properties or properties with a wrong type +# - if an expansion type is not supported +# +# Since: 2.8 +## +{ 'command': 'query-cpu-model-expansion', + 'data': { 'type': 'CpuModelExpansionType', + 'model': 'CpuModelInfo' }, + 'returns': 'CpuModelExpansionInfo' } + +## +# @CpuDefinitionInfo: +# +# Virtual CPU definition. +# +# @name: the name of the CPU definition +# +# @migration-safe: whether a CPU definition can be safely used for +# migration in combination with a QEMU compatibility machine when +# migrating between different QEMU versions and between hosts with +# different sets of (hardware or software) capabilities. If not +# provided, information is not available and callers should not +# assume the CPU definition to be migration-safe. (since 2.8) +# +# @static: whether a CPU definition is static and will not change +# depending on QEMU version, machine type, machine options and +# accelerator options. A static model is always migration-safe. +# (since 2.8) +# +# @unavailable-features: List of properties that prevent the CPU model +# from running in the current host. (since 2.8) +# +# @typename: Type name that can be used as argument to +# `device-list-properties`, to introspect properties configurable +# using -cpu or -global. (since 2.9) +# +# @alias-of: Name of CPU model this model is an alias for. The target +# of the CPU model alias may change depending on the machine type. +# Management software is supposed to translate CPU model aliases +# in the VM configuration, because aliases may stop being +# migration-safe in the future (since 4.1) +# +# @deprecated: If true, this CPU model is deprecated and may be +# removed in some future version of QEMU according to the QEMU +# deprecation policy. (since 5.2) +# +# @unavailable-features is a list of QOM property names that represent +# CPU model attributes that prevent the CPU from running. If the QOM +# property is read-only, that means there's no known way to make the +# CPU model run in the current host. Implementations that choose not +# to provide specific information return the property name "type". If +# the property is read-write, it means that it MAY be possible to run +# the CPU model in the current host if that property is changed. +# Management software can use it as hints to suggest or choose an +# alternative for the user, or just to generate meaningful error +# messages explaining why the CPU model can't be used. If +# @unavailable-features is an empty list, the CPU model is runnable +# using the current host and machine-type. If @unavailable-features +# is not present, runnability information for the CPU is not +# available. +# +# Since: 1.2 +## +{ 'struct': 'CpuDefinitionInfo', + 'data': { 'name': 'str', + '*migration-safe': 'bool', + 'static': 'bool', + '*unavailable-features': [ 'str' ], + 'typename': 'str', + '*alias-of' : 'str', + 'deprecated' : 'bool' } } + +## +# @query-cpu-definitions: +# +# Return a list of supported virtual CPU definitions +# +# Since: 1.2 +## +{ 'command': 'query-cpu-definitions', 'returns': ['CpuDefinitionInfo'] } diff --git a/qapi/meson.build b/qapi/meson.build index e7bc54e5d04..ca6b61a608d 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -39,10 +39,9 @@ qapi_all_modules = [ 'job', 'machine-common', 'machine', - 'machine-target', + 'machine-s390x', 'migration', 'misc', - 'misc-target', 'net', 'pragma', 'qom', @@ -58,13 +57,17 @@ qapi_all_modules = [ ] if have_system qapi_all_modules += [ + 'accelerator', 'acpi', 'audio', 'cryptodev', 'qdev', 'pci', 'rocker', + 'misc-arm', + 'misc-i386', 'tpm', + 'uefi', ] endif if have_system or have_tools @@ -83,14 +86,12 @@ qapi_nonmodule_outputs = [ 'qapi-emit-events.c', 'qapi-emit-events.h', ] -# First build all sources -qapi_util_outputs = [ +qapi_outputs = qapi_nonmodule_outputs + [ 'qapi-builtin-types.c', 'qapi-builtin-visit.c', 'qapi-builtin-types.h', 'qapi-builtin-visit.h', ] qapi_inputs = [] -qapi_specific_outputs = [] foreach module : qapi_all_modules qapi_inputs += [ files(module + '.json') ] qapi_module_outputs = [ @@ -108,24 +109,17 @@ foreach module : qapi_all_modules 'qapi-commands-@0@.trace-events'.format(module), ] endif - if module.endswith('-target') - qapi_specific_outputs += qapi_module_outputs - else - qapi_util_outputs += qapi_module_outputs - endif + qapi_outputs += qapi_module_outputs endforeach qapi_files = custom_target('shared QAPI source files', - output: qapi_util_outputs + qapi_specific_outputs + qapi_nonmodule_outputs, + output: qapi_outputs, input: [ files('qapi-schema.json') ], command: [ qapi_gen, '-o', 'qapi', '-b', '@INPUT0@' ], depend_files: [ qapi_inputs, qapi_gen_depends ]) -# Now go through all the outputs and add them to the right sourceset. -# These loops must be synchronized with the output of the above custom target. - i = 0 -foreach output : qapi_util_outputs +foreach output : qapi_outputs if output.endswith('.h') genh += qapi_files[i] endif @@ -135,14 +129,3 @@ foreach output : qapi_util_outputs util_ss.add(qapi_files[i]) i = i + 1 endforeach - -foreach output : qapi_specific_outputs + qapi_nonmodule_outputs - if output.endswith('.h') - genh += qapi_files[i] - endif - if output.endswith('.trace-events') - qapi_trace_events += qapi_files[i] - endif - specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: qapi_files[i]) - i = i + 1 -endforeach diff --git a/qapi/migration.json b/qapi/migration.json index 073b67c0524..2387c21e9c1 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -3,7 +3,9 @@ # ## -# = Migration +# ********* +# Migration +# ********* ## { 'include': 'common.json' } @@ -57,8 +59,8 @@ # # @dirty-sync-missed-zero-copy: Number of times dirty RAM # synchronization could not avoid copying dirty pages. This is -# between 0 and @dirty-sync-count * @multifd-channels. (since -# 7.1) +# between 0 and @dirty-sync-count * @multifd-channels. +# (since 7.1) # # Since: 0.14 ## @@ -137,16 +139,16 @@ # # @active: in the process of doing migration. # -# @postcopy-active: like active, but now in postcopy mode. (since -# 2.5) +# @postcopy-active: like active, but now in postcopy mode. +# (since 2.5) # # @postcopy-paused: during postcopy but paused. (since 3.0) # -# @postcopy-recover-setup: setup phase for a postcopy recovery process, -# preparing for a recovery phase to start. (since 9.1) +# @postcopy-recover-setup: setup phase for a postcopy recovery +# process, preparing for a recovery phase to start. (since 9.1) # -# @postcopy-recover: trying to recover from a paused postcopy. (since -# 3.0) +# @postcopy-recover: trying to recover from a paused postcopy. +# (since 3.0) # # @completed: migration is finished. # @@ -158,8 +160,11 @@ # # @pre-switchover: Paused before device serialisation. (since 2.11) # -# @device: During device serialisation when pause-before-switchover is -# enabled (since 2.11) +# @device: During device serialisation (also known as switchover phase). +# Before 9.2, this is only used when (1) in precopy, and (2) when +# pre-switchover capability is enabled. After 10.0, this state will +# always be present for every migration procedure as the switchover +# phase. (since 2.11) # # @wait-unplug: wait for device unplug request by guest OS to be # completed. (since 4.2) @@ -190,14 +195,14 @@ # # Information about current migration process. # -# @status: @MigrationStatus describing the current migration status. +# @status: `MigrationStatus` describing the current migration status. # If this field is not returned, no migration process has been # initiated # -# @ram: @MigrationStats containing detailed migration status, only +# @ram: `MigrationStats` containing detailed migration status, only # returned if status is 'active' or 'completed'(since 1.2) # -# @xbzrle-cache: @XBZRLECacheStats containing detailed XBZRLE +# @xbzrle-cache: `XBZRLECacheStats` containing detailed XBZRLE # migration statistics, only returned if XBZRLE feature is on and # status is 'active' or 'completed' (since 1.2) # @@ -233,10 +238,35 @@ # This is only present when the postcopy-blocktime migration # capability is enabled. (Since 3.0) # +# @postcopy-latency: average remote page fault latency (in ns). Note that +# this doesn't include all faults, but only the ones that require a +# remote page request. So it should be always bigger than the real +# average page fault latency. This is only present when the +# postcopy-blocktime migration capability is enabled. (Since 10.1) +# +# @postcopy-latency-dist: remote page fault latency distributions. Each +# element of the array is the number of faults that fall into the +# bucket period. For the N-th bucket (N>=0), the latency window is +# [2^Nus, 2^(N+1)us). For example, the 8th element stores how many +# remote faults got resolved within [256us, 512us) window. This is only +# present when the postcopy-blocktime migration capability is enabled. +# (Since 10.1) +# +# @postcopy-vcpu-latency: average remote page fault latency per vCPU (in +# ns). It has the same definition of @postcopy-latency, but instead +# this is the per-vCPU statistics. This is only present when the +# postcopy-blocktime migration capability is enabled. (Since 10.1) +# +# @postcopy-non-vcpu-latency: average remote page fault latency for all +# faults happend in non-vCPU threads (in ns). It has the same +# definition of @postcopy-latency but this only provides statistics to +# non-vCPU faults. This is only present when the postcopy-blocktime +# migration capability is enabled. (Since 10.1) +# # @socket-address: Only used for tcp, to know what the real port is # (Since 4.0) # -# @vfio: @VfioStats containing detailed VFIO devices migration +# @vfio: `VfioStats` containing detailed VFIO devices migration # statistics, only returned if VFIO device is present, migration # is supported by all VFIO devices and status is 'active' or # 'completed' (since 5.2) @@ -245,10 +275,10 @@ # blocked. Present and non-empty when migration is blocked. # (since 6.0) # -# @dirty-limit-throttle-time-per-round: Maximum throttle time -# (in microseconds) of virtual CPUs each dirty ring full round, -# which shows how MigrationCapability dirty-limit affects the -# guest during live migration. (Since 8.1) +# @dirty-limit-throttle-time-per-round: Maximum throttle time (in +# microseconds) of virtual CPUs each dirty ring full round, which +# shows how `MigrationCapability` dirty-limit affects the guest +# during live migration. (Since 8.1) # # @dirty-limit-ring-full-time: Estimated average dirty ring full time # (in microseconds) for each dirty ring full round. The value @@ -257,6 +287,11 @@ # average memory load of the virtual CPU indirectly. Note that # zero means guest doesn't dirty memory. (Since 8.1) # +# Features: +# +# @unstable: Members @postcopy-latency, @postcopy-vcpu-latency, +# @postcopy-latency-dist, @postcopy-non-vcpu-latency are experimental. +# # Since: 0.14 ## { 'struct': 'MigrationInfo', @@ -272,6 +307,14 @@ '*blocked-reasons': ['str'], '*postcopy-blocktime': 'uint32', '*postcopy-vcpu-blocktime': ['uint32'], + '*postcopy-latency': { + 'type': 'uint64', 'features': [ 'unstable' ] }, + '*postcopy-latency-dist': { + 'type': ['uint64'], 'features': [ 'unstable' ] }, + '*postcopy-vcpu-latency': { + 'type': ['uint64'], 'features': [ 'unstable' ] }, + '*postcopy-non-vcpu-latency': { + 'type': 'uint64', 'features': [ 'unstable' ] }, '*socket-address': ['SocketAddress'], '*dirty-limit-throttle-time-per-round': 'uint64', '*dirty-limit-ring-full-time': 'uint64'} } @@ -279,12 +322,10 @@ ## # @query-migrate: # -# Returns information about current migration process. If migration +# Return information about current migration process. If migration # is active there will be another json-object with RAM migration # status. # -# Returns: @MigrationInfo -# # Since: 0.14 # # .. qmp-example:: @@ -381,7 +422,7 @@ # Migration capabilities enumeration # # @xbzrle: Migration supports xbzrle (Xor Based Zero Run Length -# Encoding). This feature allows us to minimize migration traffic +# Encoding). This feature allows us to minimize migration traffic # for certain work loads, by sending compressed difference of the # pages # @@ -393,8 +434,8 @@ # efficiently. This essentially saves 1MB of zeroes per block on # the wire. Enabling requires source and target VM to support # this feature. To enable it is sufficient to enable the -# capability on the source VM. The feature is disabled by default. -# (since 1.6) +# capability on the source VM. The feature is disabled by +# default. (since 1.6) # # @events: generate events for each migration state change (since 2.4) # @@ -404,7 +445,7 @@ # @postcopy-ram: Start executing on the migration target before all of # RAM has been migrated, pulling the remaining pages along as # needed. The capacity must have the same setting on both source -# and target or migration will not even start. NOTE: If the +# and target or migration will not even start. **Note:** if the # migration fails during postcopy the VM will fail. (since 2.6) # # @x-colo: If enabled, migration will never end, and the state of the @@ -412,15 +453,15 @@ # on secondary side, this process is called COarse-Grain LOck # Stepping (COLO) for Non-stop Service. (since 2.8) # -# @release-ram: if enabled, qemu will free the migrated ram pages on +# @release-ram: if enabled, QEMU will free the migrated ram pages on # the source during postcopy-ram migration. (since 2.9) # # @return-path: If enabled, migration will use the return path even # for precopy. (since 2.10) # # @pause-before-switchover: Pause outgoing migration before -# serialising device state and before disabling block IO (since -# 2.11) +# serialising device state and before disabling block IO +# (since 2.11) # # @multifd: Use more than one fd for migration (since 4.0) # @@ -479,11 +520,14 @@ # Features: # # @unstable: Members @x-colo and @x-ignore-shared are experimental. +# @deprecated: Member @zero-blocks is deprecated as being part of +# block migration which was already removed. # # Since: 1.2 ## { 'enum': 'MigrationCapability', - 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', + 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', + { 'name': 'zero-blocks', 'features': [ 'deprecated' ] }, 'events', 'postcopy-ram', { 'name': 'x-colo', 'features': [ 'unstable' ] }, 'release-ram', @@ -529,9 +573,7 @@ ## # @query-migrate-capabilities: # -# Returns information about the current migration capabilities status -# -# Returns: @MigrationCapabilityStatus +# Return information about the current migration capabilities status # # Since: 1.2 # @@ -561,18 +603,22 @@ # # @zstd: use zstd compression method. # +# @qatzip: use qatzip compression method. (Since 9.2) +# # @qpl: use qpl compression method. Query Processing Library(qpl) is -# based on the deflate compression algorithm and use the Intel -# In-Memory Analytics Accelerator(IAA) accelerated compression -# and decompression. (Since 9.1) +# based on the deflate compression algorithm and use the Intel +# In-Memory Analytics Accelerator(IAA) accelerated compression and +# decompression. (Since 9.1) # # @uadk: use UADK library compression method. (Since 9.1) # # Since: 5.0 ## { 'enum': 'MultiFDCompression', + 'prefix': 'MULTIFD_COMPRESSION', 'data': [ 'none', 'zlib', { 'name': 'zstd', 'if': 'CONFIG_ZSTD' }, + { 'name': 'qatzip', 'if': 'CONFIG_QATZIP'}, { 'name': 'qpl', 'if': 'CONFIG_QPL' }, { 'name': 'uadk', 'if': 'CONFIG_UADK' } ] } @@ -581,7 +627,7 @@ # # @normal: the original form of migration. (since 8.2) # -# @cpr-reboot: The migrate command stops the VM and saves state to the +# @cpr-reboot: The `migrate` command stops the VM and saves state to the # URI. After quitting QEMU, the user resumes by running QEMU # -incoming. # @@ -595,7 +641,7 @@ # # This mode supports VFIO devices provided the user first puts the # guest in the suspended runstate, such as by issuing -# guest-suspend-ram to the QEMU guest agent. +# `guest-suspend-ram` to the QEMU guest agent. # # Best performance is achieved when the memory backend is shared # and the @x-ignore-shared migration capability is set, but this @@ -607,9 +653,50 @@ # or COLO. # # (since 8.2) +# +# @cpr-transfer: This mode allows the user to transfer a guest to a +# new QEMU instance on the same host with minimal guest pause +# time by preserving guest RAM in place. +# +# Devices and their pinned pages are also preserved for VFIO and +# IOMMUFD. (since 10.1) +# +# The user starts new QEMU on the same host as old QEMU, with +# command-line arguments to create the same machine, plus the +# -incoming option for the main migration channel, like normal +# live migration. In addition, the user adds a second -incoming +# option with channel type "cpr". This CPR channel must support +# file descriptor transfer with SCM_RIGHTS, i.e. it must be a +# UNIX domain socket. +# +# To initiate CPR, the user issues a migrate command to old QEMU, +# adding a second migration channel of type "cpr" in the channels +# argument. Old QEMU stops the VM, saves state to the migration +# channels, and enters the postmigrate state. Execution resumes +# in new QEMU. +# +# New QEMU reads the CPR channel before opening a monitor, hence +# the CPR channel cannot be specified in the list of channels for +# a `migrate-incoming` command. It may only be specified on the +# command line. +# +# The main channel address cannot be a file type, and for an +# inet socket, the port cannot be 0 (meaning dynamically choose +# a port). +# +# Memory-backend objects must have the share=on attribute, but +# memory-backend-epc is not supported. The VM must be started +# with the '-machine aux-ram-share=on' option. +# +# When using -incoming defer, you must issue the `migrate` command +# to old QEMU before issuing any monitor commands to new QEMU. +# However, new QEMU does not open and read the migration stream +# until you issue the `migrate-incoming` command. +# +# (since 10.0) ## { 'enum': 'MigMode', - 'data': [ 'normal', 'cpr-reboot' ] } + 'data': [ 'normal', 'cpr-reboot', 'cpr-transfer' ] } ## # @ZeroPageDetection: @@ -648,8 +735,8 @@ # @alias: An alias name for migration (for example the bitmap name on # the opposite site). # -# @transform: Allows the modification of the migrated bitmap. (since -# 6.0) +# @transform: Allows the modification of the migrated bitmap. +# (since 6.0) # # Since: 5.2 ## @@ -711,9 +798,9 @@ # auto-converge detects that migration is not making progress. # The default value is 10. (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At -# the tail stage of throttling, the Guest is very sensitive to CPU -# percentage while the @cpu-throttle -increment is excessive +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage. +# At the tail stage of throttling, the Guest is very sensitive to +# CPU percentage while the @cpu-throttle -increment is excessive # usually at tail stage. If this parameter is true, we will # compute the ideal CPU percentage used by the Guest, which may # exactly make the dirty rate match the dirty rate threshold. @@ -721,8 +808,8 @@ # specified by @cpu-throttle-increment and the one generated by # ideal CPU percentage. Therefore, it is compatible to # traditional throttling, meanwhile the throttle increment won't -# be excessive at tail stage. The default value is false. (Since -# 5.1) +# be excessive at tail stage. The default value is false. +# (Since 5.1) # # @tls-creds: ID of the 'tls-creds' object that provides credentials # for establishing a TLS connection over the migration data @@ -752,10 +839,10 @@ # (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that -# migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations -# when making decisions to switchover. By default, this value is -# zero, which means QEMU will estimate the bandwidth +# migration can use during switchover phase. **Note:** this does +# not limit the bandwidth during switchover, but only for +# calculations when making decisions to switchover. By default, +# this value is zero, which means QEMU will estimate the bandwidth # automatically. This can be set when the estimated value is not # accurate, while the user is able to guarantee such bandwidth is # available when switching over. When specified correctly, this @@ -790,13 +877,18 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) +# +# @multifd-qatzip-level: Set the compression level to be used in live +# migration. The level is an integer between 1 and 9, where 1 means +# the best compression speed, and 9 means the best compression +# ratio which will consume more CPU. Defaults to 1. (Since 9.2) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -821,11 +913,11 @@ # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is +# @mode: Migration mode. See description in `MigMode`. Default is # 'normal'. (Since 8.2) # # @zero-page-detection: Whether and how to detect zero pages. -# See description in @ZeroPageDetection. Default is 'multifd'. +# See description in `ZeroPageDetection`. Default is 'multifd'. # (since 9.0) # # @direct-io: Open migration files with O_DIRECT when possible. This @@ -852,6 +944,7 @@ 'xbzrle-cache-size', 'max-postcopy-bandwidth', 'max-cpu-throttle', 'multifd-compression', 'multifd-zlib-level', 'multifd-zstd-level', + 'multifd-qatzip-level', 'block-bitmap-mapping', { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, 'vcpu-dirty-limit', @@ -886,9 +979,9 @@ # auto-converge detects that migration is not making progress. # The default value is 10. (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At -# the tail stage of throttling, the Guest is very sensitive to CPU -# percentage while the @cpu-throttle -increment is excessive +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage. +# At the tail stage of throttling, the Guest is very sensitive to +# CPU percentage while the @cpu-throttle -increment is excessive # usually at tail stage. If this parameter is true, we will # compute the ideal CPU percentage used by the Guest, which may # exactly make the dirty rate match the dirty rate threshold. @@ -896,8 +989,8 @@ # specified by @cpu-throttle-increment and the one generated by # ideal CPU percentage. Therefore, it is compatible to # traditional throttling, meanwhile the throttle increment won't -# be excessive at tail stage. The default value is false. (Since -# 5.1) +# be excessive at tail stage. The default value is false. +# (Since 5.1) # # @tls-creds: ID of the 'tls-creds' object that provides credentials # for establishing a TLS connection over the migration data @@ -927,10 +1020,10 @@ # (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that -# migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations -# when making decisions to switchover. By default, this value is -# zero, which means QEMU will estimate the bandwidth +# migration can use during switchover phase. **Note:** this does +# not limit the bandwidth during switchover, but only for +# calculations when making decisions to switchover. By default, +# this value is zero, which means QEMU will estimate the bandwidth # automatically. This can be set when the estimated value is not # accurate, while the user is able to guarantee such bandwidth is # available when switching over. When specified correctly, this @@ -965,13 +1058,18 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) +# +# @multifd-qatzip-level: Set the compression level to be used in live +# migration. The level is an integer between 1 and 9, where 1 means +# the best compression speed, and 9 means the best compression +# ratio which will consume more CPU. Defaults to 1. (Since 9.2) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -996,11 +1094,11 @@ # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is +# @mode: Migration mode. See description in `MigMode`. Default is # 'normal'. (Since 8.2) # # @zero-page-detection: Whether and how to detect zero pages. -# See description in @ZeroPageDetection. Default is 'multifd'. +# See description in `ZeroPageDetection`. Default is 'multifd'. # (since 9.0) # # @direct-io: Open migration files with O_DIRECT when possible. This @@ -1012,8 +1110,8 @@ # @unstable: Members @x-checkpoint-delay and # @x-vcpu-dirty-limit-period are experimental. # -# TODO: either fuse back into MigrationParameters, or make -# MigrationParameters members mandatory +# TODO: either fuse back into `MigrationParameters`, or make +# `MigrationParameters` members mandatory # # Since: 2.4 ## @@ -1040,6 +1138,7 @@ '*max-cpu-throttle': 'uint8', '*multifd-compression': 'MultiFDCompression', '*multifd-zlib-level': 'uint8', + '*multifd-qatzip-level': 'uint8', '*multifd-zstd-level': 'uint8', '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], '*x-vcpu-dirty-limit-period': { 'type': 'uint64', @@ -1087,16 +1186,16 @@ # percentage. The default value is 50. (Since 5.0) # # @cpu-throttle-initial: Initial percentage of time guest cpus are -# throttled when migration auto-converge is activated. (Since -# 2.7) +# throttled when migration auto-converge is activated. +# (Since 2.7) # # @cpu-throttle-increment: throttle percentage increase each time # auto-converge detects that migration is not making progress. # (Since 2.7) # -# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage At -# the tail stage of throttling, the Guest is very sensitive to CPU -# percentage while the @cpu-throttle -increment is excessive +# @cpu-throttle-tailslow: Make CPU throttling slower at tail stage. +# At the tail stage of throttling, the Guest is very sensitive to +# CPU percentage while the @cpu-throttle -increment is excessive # usually at tail stage. If this parameter is true, we will # compute the ideal CPU percentage used by the Guest, which may # exactly make the dirty rate match the dirty rate threshold. @@ -1104,8 +1203,8 @@ # specified by @cpu-throttle-increment and the one generated by # ideal CPU percentage. Therefore, it is compatible to # traditional throttling, meanwhile the throttle increment won't -# be excessive at tail stage. The default value is false. (Since -# 5.1) +# be excessive at tail stage. The default value is false. +# (Since 5.1) # # @tls-creds: ID of the 'tls-creds' object that provides credentials # for establishing a TLS connection over the migration data @@ -1131,10 +1230,10 @@ # (Since 2.8) # # @avail-switchover-bandwidth: to set the available bandwidth that -# migration can use during switchover phase. NOTE! This does not -# limit the bandwidth during switchover, but only for calculations -# when making decisions to switchover. By default, this value is -# zero, which means QEMU will estimate the bandwidth +# migration can use during switchover phase. **Note:** this does +# not limit the bandwidth during switchover, but only for +# calculations when making decisions to switchover. By default, +# this value is zero, which means QEMU will estimate the bandwidth # automatically. This can be set when the estimated value is not # accurate, while the user is able to guarantee such bandwidth is # available when switching over. When specified correctly, this @@ -1169,13 +1268,18 @@ # migration, the compression level is an integer between 0 and 9, # where 0 means no compression, 1 means the best compression # speed, and 9 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) +# +# @multifd-qatzip-level: Set the compression level to be used in live +# migration. The level is an integer between 1 and 9, where 1 means +# the best compression speed, and 9 means the best compression +# ratio which will consume more CPU. Defaults to 1. (Since 9.2) # # @multifd-zstd-level: Set the compression level to be used in live # migration, the compression level is an integer between 0 and 20, # where 0 means no compression, 1 means the best compression # speed, and 20 means best compression ratio which will consume -# more CPU. Defaults to 1. (Since 5.0) +# more CPU. Defaults to 1. (Since 5.0) # # @block-bitmap-mapping: Maps block nodes and bitmaps on them to # aliases for the purpose of dirty bitmap migration. Such aliases @@ -1200,11 +1304,11 @@ # @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. # Defaults to 1. (Since 8.1) # -# @mode: Migration mode. See description in @MigMode. Default is -# 'normal'. (Since 8.2) +# @mode: Migration mode. See description in `MigMode`. Default is +# 'normal'. (Since 8.2) # # @zero-page-detection: Whether and how to detect zero pages. -# See description in @ZeroPageDetection. Default is 'multifd'. +# See description in `ZeroPageDetection`. Default is 'multifd'. # (since 9.0) # # @direct-io: Open migration files with O_DIRECT when possible. This @@ -1241,6 +1345,7 @@ '*max-cpu-throttle': 'uint8', '*multifd-compression': 'MultiFDCompression', '*multifd-zlib-level': 'uint8', + '*multifd-qatzip-level': 'uint8', '*multifd-zstd-level': 'uint8', '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], '*x-vcpu-dirty-limit-period': { 'type': 'uint64', @@ -1253,9 +1358,7 @@ ## # @query-migrate-parameters: # -# Returns information about the current migration parameters -# -# Returns: @MigrationParameters +# Return information about the current migration parameters # # Since: 2.4 # @@ -1295,7 +1398,7 @@ # # Emitted when a migration event happens # -# @status: @MigrationStatus describing the current migration status. +# @status: `MigrationStatus` describing the current migration status. # # Since: 2.4 # @@ -1416,8 +1519,8 @@ # The reason for a COLO exit. # # @none: failover has never happened. This state does not occur in -# the COLO_EXIT event, and is only visible in the result of -# query-colo-status. +# the `COLO_EXIT` event, and is only visible in the result of +# `query-colo-status`. # # @request: COLO exit is due to an external request. # @@ -1433,7 +1536,7 @@ ## # @x-colo-lost-heartbeat: # -# Tell qemu that heartbeat is lost, request it to do takeover +# Tell QEMU that heartbeat is lost, request it to do takeover # procedures. If this command is sent to the PVM, the Primary side # will exit COLO mode. If sent to the Secondary, the Secondary side # will run failover work, then takes over server operation to become @@ -1457,10 +1560,12 @@ ## # @migrate_cancel: # -# Cancel the current executing migration process. +# Cancel the currently executing migration process. Allows a new +# migration to be started right after. When postcopy-ram is in use, +# cancelling is not allowed after the postcopy phase has started. # -# .. note:: This command succeeds even if there is no migration process -# running. +# .. note:: This command succeeds even if there is no migration +# process running. # # Since: 0.14 # @@ -1553,11 +1658,12 @@ # The migration channel-type request options. # # @main: Main outbound migration channel. +# @cpr: Checkpoint and restart state channel. # # Since: 8.1 ## { 'enum': 'MigrationChannelType', - 'data': [ 'main' ] } + 'data': [ 'main', 'cpr' ] } ## # @MigrationChannel: @@ -1590,27 +1696,26 @@ # # @resume: resume one paused migration, default "off". (since 3.0) # +# Features: +# +# @deprecated: Argument @detach is deprecated. +# # Since: 0.14 # # .. admonition:: Notes # -# 1. The 'query-migrate' command should be used to check +# 1. The `query-migrate` command should be used to check # migration's progress and final result (this information is # provided by the 'status' member). # -# 2. All boolean arguments default to false. -# -# 3. The user Monitor's "detach" argument is invalid in QMP and -# should not be used. -# -# 4. The uri argument should have the Uniform Resource Identifier -# of default destination VM. This connection will be bound to +# 2. The uri argument should have the Uniform Resource Identifier +# of default destination VM. This connection will be bound to # default network. # -# 5. For now, number of migration streams is restricted to one, +# 3. For now, number of migration streams is restricted to one, # i.e. number of items in 'channels' list is just 1. # -# 6. The 'uri' and 'channels' arguments are mutually exclusive; +# 4. The 'uri' and 'channels' arguments are mutually exclusive; # exactly one of the two should be present. # # .. qmp-example:: @@ -1650,18 +1755,18 @@ # "filename": "/tmp/migfile", # "offset": "0x1000" } } ] } } # <- { "return": {} } -# ## { 'command': 'migrate', 'data': {'*uri': 'str', '*channels': [ 'MigrationChannel' ], - '*detach': 'bool', '*resume': 'bool' } } + '*detach': { 'type': 'bool', 'features': [ 'deprecated' ] }, + '*resume': 'bool' } } ## # @migrate-incoming: # -# Start an incoming migration, the qemu must have been started with -# -incoming defer +# Start an incoming migration. QEMU must have been started with +# -incoming defer. # # @uri: The Uniform Resource Identifier identifying the source or # address to listen on @@ -1670,8 +1775,9 @@ # list connected to a destination interface endpoint. # # @exit-on-error: Exit on incoming migration failure. Default true. -# When set to false, the failure triggers a MIGRATION event, and -# error details could be retrieved with query-migrate. (since 9.1) +# When set to false, the failure triggers a :qapi:event:`MIGRATION` +# event, and error details could be retrieved with `query-migrate`. +# (since 9.1) # # Since: 2.3 # @@ -1682,7 +1788,7 @@ # already exposed above libvirt. # # 2. QEMU must be started with -incoming defer to allow -# migrate-incoming to be used. +# `migrate-incoming` to be used. # # 3. The uri format is the same as for -incoming # @@ -1735,7 +1841,7 @@ # devices of the VM are not saved by this command. # # @filename: the file to save the state of the devices to as binary -# data. See xen-save-devices-state.txt for a description of the +# data. See `xen-save-devices-state`.txt for a description of the # binary format. # # @live: Optional argument to ask QEMU to treat this command as part @@ -1776,7 +1882,7 @@ # devices of the VM are not loaded by this command. # # @filename: the file to load the state of the devices from as binary -# data. See xen-save-devices-state.txt for a description of the +# data. See `xen-save-devices-state`.txt for a description of the # binary format. # # Since: 2.7 @@ -1816,7 +1922,7 @@ ## # @ReplicationStatus: # -# The result format for 'query-xen-replication-status'. +# The result format for `query-xen-replication-status`. # # @error: true if an error happened, false if replication is normal. # @@ -1834,7 +1940,7 @@ # # Query replication status while the vm is running. # -# Returns: A @ReplicationStatus object showing the status. +# TODO: This line is a hack to separate the example from the body # # .. qmp-example:: # @@ -1865,7 +1971,7 @@ ## # @COLOStatus: # -# The result format for 'query-colo-status'. +# The result format for `query-colo-status`. # # @mode: COLO running mode. If COLO is running, this field will # return 'primary' or 'secondary'. @@ -1888,7 +1994,7 @@ # # Query COLO status while the vm is running. # -# Returns: A @COLOStatus object showing the status. +# TODO: This line is a hack to separate the example from the body # # .. qmp-example:: # @@ -1938,9 +2044,9 @@ # @UNPLUG_PRIMARY: # # Emitted from source side of a migration when migration state is -# WAIT_UNPLUG. Device was unplugged by guest operating system. Device -# resources in QEMU are kept on standby to be able to re-plug it in -# case of migration failure. +# WAIT_UNPLUG. Device was unplugged by guest operating system. +# Device resources in QEMU are kept on standby to be able to re-plug +# it in case of migration failure. # # @device-id: QEMU device id of the unplugged device # @@ -1989,7 +2095,7 @@ # @DirtyRateMeasureMode: # # Method used to measure dirty page rate. Differences between -# available methods are explained in @calc-dirty-rate. +# available methods are explained in `calc-dirty-rate`. # # @page-sampling: use page sampling # @@ -2057,7 +2163,7 @@ # @calc-dirty-rate: # # Start measuring dirty page rate of the VM. Results can be retrieved -# with @query-dirty-rate after measurements are completed. +# with `query-dirty-rate` after measurements are completed. # # Dirty page rate is the number of pages changed in a given time # period expressed in MiB/s. The following methods of calculation are @@ -2084,16 +2190,16 @@ # This mode tracks page modification per each vCPU separately. It # requires that KVM accelerator property "dirty-ring-size" is set. # -# @calc-time: time period for which dirty page rate is calculated. -# By default it is specified in seconds, but the unit can be set +# @calc-time: time period for which dirty page rate is calculated. By +# default it is specified in seconds, but the unit can be set # explicitly with @calc-time-unit. Note that larger @calc-time # values will typically result in smaller dirty page rates because -# page dirtying is a one-time event. Once some page is counted -# as dirty during @calc-time period, further writes to this page -# will not increase dirty page rate anymore. +# page dirtying is a one-time event. Once some page is counted as +# dirty during @calc-time period, further writes to this page will +# not increase dirty page rate anymore. # -# @calc-time-unit: time unit in which @calc-time is specified. -# By default it is seconds. (Since 8.2) +# @calc-time-unit: time unit in which @calc-time is specified. By +# default it is seconds. (Since 8.2) # # @sample-pages: number of sampled pages per each GiB of guest memory. # Default value is 512. For 4KiB guest pages this corresponds to @@ -2130,7 +2236,7 @@ ## # @query-dirty-rate: # -# Query results of the most recent invocation of @calc-dirty-rate. +# Query results of the most recent invocation of `calc-dirty-rate`. # # @calc-time-unit: time unit in which to report calculation time. # By default it is reported in seconds. (Since 8.2) @@ -2180,7 +2286,7 @@ # # Requires KVM with accelerator property "dirty-ring-size" set. A # virtual CPU's dirty page rate is a measure of its memory load. To -# observe dirty page rates, use @calc-dirty-rate. +# observe dirty page rates, use `calc-dirty-rate`. # # @cpu-index: index of a virtual CPU, default is all. # @@ -2205,8 +2311,8 @@ # Cancel the upper limit of dirty page rate for virtual CPUs. # # Cancel the dirty page limit for the vCPU which has been set with -# set-vcpu-dirty-limit command. Note that this command requires -# support from dirty ring, same as the "set-vcpu-dirty-limit". +# `set-vcpu-dirty-limit` command. Note that this command requires +# support from dirty ring, same as the `set-vcpu-dirty-limit`. # # @cpu-index: index of a virtual CPU, default is all. # @@ -2224,7 +2330,7 @@ ## # @query-vcpu-dirty-limit: # -# Returns information about virtual CPU dirty page rate limits, if +# Return information about virtual CPU dirty page rate limits, if # any. # # Since: 7.1 @@ -2257,14 +2363,17 @@ ## # @query-migrationthreads: # -# Returns information of migration threads +# Return information of migration threads +# +# Features: # -# Returns: @MigrationThreadInfo +# @deprecated: This command is deprecated with no replacement yet. # # Since: 7.2 ## { 'command': 'query-migrationthreads', - 'returns': ['MigrationThreadInfo'] } + 'returns': ['MigrationThreadInfo'], + 'features': ['deprecated'] } ## # @snapshot-save: @@ -2360,7 +2469,7 @@ # time it takes to load the snapshot. # # It is strongly recommended that @devices contain all writable block -# device nodes that can have changed since the original @snapshot-save +# device nodes that can have changed since the original `snapshot-save` # command execution. # # .. qmp-example:: diff --git a/qapi/misc-arm.json b/qapi/misc-arm.json new file mode 100644 index 00000000000..f921d740f15 --- /dev/null +++ b/qapi/misc-arm.json @@ -0,0 +1,47 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# SPDX-License-Identifier: GPL-2.0-or-later + +## +# @GICCapability: +# +# The struct describes capability for a specific GIC (Generic +# Interrupt Controller) version. These bits are not only decided by +# QEMU/KVM software version, but also decided by the hardware that the +# program is running upon. +# +# @version: version of GIC to be described. Currently, only 2 and 3 +# are supported. +# +# @emulated: whether current QEMU/hardware supports emulated GIC +# device in user space. +# +# @kernel: whether current QEMU/hardware supports hardware accelerated +# GIC device in kernel. +# +# Since: 2.6 +## +{ 'struct': 'GICCapability', + 'data': { 'version': 'int', + 'emulated': 'bool', + 'kernel': 'bool' } } + +## +# @query-gic-capabilities: +# +# It will return a list of `GICCapability` objects that describe its +# capability bits. +# +# On non-ARM targets this command will report an error as the GIC +# technology is not applicable. +# +# Since: 2.6 +# +# .. qmp-example:: +# +# -> { "execute": "query-gic-capabilities" } +# <- { "return": [{ "version": 2, "emulated": true, "kernel": false }, +# { "version": 3, "emulated": false, "kernel": true } ] } +## +{ 'command': 'query-gic-capabilities', 'returns': ['GICCapability'] } diff --git a/qapi/misc-i386.json b/qapi/misc-i386.json new file mode 100644 index 00000000000..d1ce8caf253 --- /dev/null +++ b/qapi/misc-i386.json @@ -0,0 +1,474 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# +# SPDX-License-Identifier: GPL-2.0-or-later + +## +# @rtc-reset-reinjection: +# +# Reset the RTC interrupt reinjection backlog. Can be used if another +# mechanism to synchronize guest time is in effect, for example QEMU +# guest agent's `guest-set-time` command. +# +# Use of this command is only applicable for x86 machines with an RTC, +# and on other machines will silently return without performing any +# action. +# +# Since: 2.1 +# +# .. qmp-example:: +# +# -> { "execute": "rtc-reset-reinjection" } +# <- { "return": {} } +## +{ 'command': 'rtc-reset-reinjection' } + +## +# @SevState: +# +# An enumeration of SEV state information used during `query-sev`. +# +# @uninit: The guest is uninitialized. +# +# @launch-update: The guest is currently being launched; plaintext +# data and register state is being imported. +# +# @launch-secret: The guest is currently being launched; ciphertext +# data is being imported. +# +# @running: The guest is fully launched or migrated in. +# +# @send-update: The guest is currently being migrated out to another +# machine. +# +# @receive-update: The guest is currently being migrated from another +# machine. +# +# Since: 2.12 +## +{ 'enum': 'SevState', + 'data': ['uninit', 'launch-update', 'launch-secret', 'running', + 'send-update', 'receive-update' ] } + +## +# @SevGuestType: +# +# An enumeration indicating the type of SEV guest being run. +# +# @sev: The guest is a legacy SEV or SEV-ES guest. +# +# @sev-snp: The guest is an SEV-SNP guest. +# +# Since: 6.2 +## +{ 'enum': 'SevGuestType', + 'data': [ 'sev', 'sev-snp' ] } + +## +# @SevGuestInfo: +# +# Information specific to legacy SEV/SEV-ES guests. +# +# @policy: SEV policy value +# +# @handle: SEV firmware handle +# +# Since: 2.12 +## +{ 'struct': 'SevGuestInfo', + 'data': { 'policy': 'uint32', + 'handle': 'uint32' } } + +## +# @SevSnpGuestInfo: +# +# Information specific to SEV-SNP guests. +# +# @snp-policy: SEV-SNP policy value +# +# Since: 9.1 +## +{ 'struct': 'SevSnpGuestInfo', + 'data': { 'snp-policy': 'uint64' } } + +## +# @SevInfo: +# +# Information about Secure Encrypted Virtualization (SEV) support +# +# @enabled: true if SEV is active +# +# @api-major: SEV API major version +# +# @api-minor: SEV API minor version +# +# @build-id: SEV FW build id +# +# @state: SEV guest state +# +# @sev-type: Type of SEV guest being run +# +# Since: 2.12 +## +{ 'union': 'SevInfo', + 'base': { 'enabled': 'bool', + 'api-major': 'uint8', + 'api-minor' : 'uint8', + 'build-id' : 'uint8', + 'state' : 'SevState', + 'sev-type' : 'SevGuestType' }, + 'discriminator': 'sev-type', + 'data': { + 'sev': 'SevGuestInfo', + 'sev-snp': 'SevSnpGuestInfo' } } + + +## +# @query-sev: +# +# Return information about SEV/SEV-ES/SEV-SNP. +# +# If unavailable due to an incompatible configuration the returned +# @enabled field is set to 'false' and the state of all other fields +# is unspecified. +# +# Since: 2.12 +# +# .. qmp-example:: +# +# -> { "execute": "query-sev" } +# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0, +# "build-id" : 0, "policy" : 0, "state" : "running", +# "handle" : 1 } } +## +{ 'command': 'query-sev', 'returns': 'SevInfo' } + +## +# @SevLaunchMeasureInfo: +# +# SEV Guest Launch measurement information +# +# @data: the measurement value encoded in base64 +# +# Since: 2.12 +## +{ 'struct': 'SevLaunchMeasureInfo', 'data': {'data': 'str'} } + +## +# @query-sev-launch-measure: +# +# Query the SEV/SEV-ES guest launch information. +# +# This is only valid on x86 machines configured with KVM and the +# 'sev-guest' confidential virtualization object. The launch +# measurement for SEV-SNP guests is only available within the guest. +# +# Returns: The guest's SEV guest launch measurement info +# +# Errors: +# - If the launch measurement is unavailable, either due to an +# invalid guest configuration or if the guest has not reached +# the required SEV state, GenericError +# +# Since: 2.12 +# +# .. qmp-example:: +# +# -> { "execute": "query-sev-launch-measure" } +# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } } +## +{ 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo' } + +## +# @SevCapability: +# +# The struct describes capability for a Secure Encrypted +# Virtualization feature. +# +# @pdh: Platform Diffie-Hellman key (base64 encoded) +# +# @cert-chain: PDH certificate chain (base64 encoded) +# +# @cpu0-id: Unique ID of CPU0 (base64 encoded) (since 7.1) +# +# @cbitpos: C-bit location in page table entry +# +# @reduced-phys-bits: Number of physical address bit reduction when +# SEV is enabled +# +# Since: 2.12 +## +{ 'struct': 'SevCapability', + 'data': { 'pdh': 'str', + 'cert-chain': 'str', + 'cpu0-id': 'str', + 'cbitpos': 'int', + 'reduced-phys-bits': 'int'} } + +## +# @query-sev-capabilities: +# +# Get SEV capabilities. +# +# This is only supported on AMD X86 platforms with KVM enabled. +# +# Errors: +# - If SEV is not available on the platform, GenericError +# +# Since: 2.12 +# +# .. qmp-example:: +# +# -> { "execute": "query-sev-capabilities" } +# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", +# "cpu0-id": "2lvmGwo+...61iEinw==", +# "cbitpos": 47, "reduced-phys-bits": 1}} +## +{ 'command': 'query-sev-capabilities', 'returns': 'SevCapability' } + +## +# @sev-inject-launch-secret: +# +# Inject a secret blob into a SEV/SEV-ES guest's memory. +# +# This is only valid on x86 machines configured with KVM and the +# 'sev-guest' confidential virtualization object. SEV-SNP guests do +# not support launch secret injection. +# +# @packet-header: the launch secret packet header encoded in base64 +# +# @secret: the launch secret data to be injected encoded in base64 +# +# @gpa: the guest physical address where secret will be injected. +# +# Errors: +# - If launch secret injection is not possible, either due to +# an invalid guest configuration, or if the guest has not +# reached the required SEV state, GenericError +# +# Since: 6.0 +## +{ 'command': 'sev-inject-launch-secret', + 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' } } + +## +# @SevAttestationReport: +# +# The struct describes attestation report for a Secure Encrypted +# Virtualization feature. +# +# @data: guest attestation report (base64 encoded) +# +# Since: 6.1 +## +{ 'struct': 'SevAttestationReport', + 'data': { 'data': 'str'} } + +## +# @query-sev-attestation-report: +# +# Get the SEV attestation report. +# +# This is only valid on x86 machines configured with KVM and the +# 'sev-guest' confidential virtualization object. The attestation +# report for SEV-SNP guests is only available within the guest. +# +# @mnonce: a random 16 bytes value encoded in base64 (it will be +# included in report) +# +# Errors: +# - If the attestation report is unavailable, either due to an +# invalid guest configuration or because the guest has not +# reached the required SEV state, GenericError +# +# Since: 6.1 +# +# .. qmp-example:: +# +# -> { "execute" : "query-sev-attestation-report", +# "arguments": { "mnonce": "aaaaaaa" } } +# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +## +{ 'command': 'query-sev-attestation-report', + 'data': { 'mnonce': 'str' }, + 'returns': 'SevAttestationReport' } + +## +# @SgxEpcSection: +# +# Information about intel SGX EPC section +# +# @node: the numa node +# +# @size: the size of EPC section +# +# Since: 7.0 +## +{ 'struct': 'SgxEpcSection', + 'data': { 'node': 'int', + 'size': 'uint64'}} + +## +# @SgxInfo: +# +# Information about intel Safe Guard eXtension (SGX) support +# +# @sgx: true if SGX is supported +# +# @sgx1: true if SGX1 is supported +# +# @sgx2: true if SGX2 is supported +# +# @flc: true if FLC is supported +# +# @sections: The EPC sections information (Since: 7.0) +# +# Since: 6.2 +## +{ 'struct': 'SgxInfo', + 'data': { 'sgx': 'bool', + 'sgx1': 'bool', + 'sgx2': 'bool', + 'flc': 'bool', + 'sections': ['SgxEpcSection']} } + +## +# @query-sgx: +# +# Return information about configured SGX capabilities of guest +# +# Since: 6.2 +# +# .. qmp-example:: +# +# -> { "execute": "query-sgx" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, +# "sections": [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } +## +{ 'command': 'query-sgx', 'returns': 'SgxInfo' } + +## +# @query-sgx-capabilities: +# +# Return information about SGX capabilities of host +# +# Since: 6.2 +# +# .. qmp-example:: +# +# -> { "execute": "query-sgx-capabilities" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, +# "section" : [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } +## +{ 'command': 'query-sgx-capabilities', 'returns': 'SgxInfo' } + +## +# @EvtchnPortType: +# +# An enumeration of Xen event channel port types. +# +# @closed: The port is unused. +# +# @unbound: The port is allocated and ready to be bound. +# +# @interdomain: The port is connected as an interdomain interrupt. +# +# @pirq: The port is bound to a physical IRQ (PIRQ). +# +# @virq: The port is bound to a virtual IRQ (VIRQ). +# +# @ipi: The post is an inter-processor interrupt (IPI). +# +# Since: 8.0 +## +{ 'enum': 'EvtchnPortType', + 'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'] } + +## +# @EvtchnInfo: +# +# Information about a Xen event channel port +# +# @port: the port number +# +# @vcpu: target vCPU for this port +# +# @type: the port type +# +# @remote-domain: remote domain for interdomain ports +# +# @target: remote port ID, or virq/pirq number +# +# @pending: port is currently active pending delivery +# +# @masked: port is masked +# +# Since: 8.0 +## +{ 'struct': 'EvtchnInfo', + 'data': {'port': 'uint16', + 'vcpu': 'uint32', + 'type': 'EvtchnPortType', + 'remote-domain': 'str', + 'target': 'uint16', + 'pending': 'bool', + 'masked': 'bool'} } + + +## +# @xen-event-list: +# +# Query the Xen event channels opened by the guest. +# +# Returns: list of open event channel ports. +# +# Since: 8.0 +# +# .. qmp-example:: +# +# -> { "execute": "xen-event-list" } +# <- { "return": [ +# { +# "pending": false, +# "port": 1, +# "vcpu": 1, +# "remote-domain": "qemu", +# "masked": false, +# "type": "interdomain", +# "target": 1 +# }, +# { +# "pending": false, +# "port": 2, +# "vcpu": 0, +# "remote-domain": "", +# "masked": false, +# "type": "virq", +# "target": 0 +# } +# ] +# } +## +{ 'command': 'xen-event-list', + 'returns': ['EvtchnInfo'] } + +## +# @xen-event-inject: +# +# Inject a Xen event channel port (interrupt) to the guest. +# +# @port: The port number +# +# Since: 8.0 +# +# .. qmp-example:: +# +# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } } +# <- { "return": { } } +## +{ 'command': 'xen-event-inject', + 'data': { 'port': 'uint32' } } diff --git a/qapi/misc-target.json b/qapi/misc-target.json deleted file mode 100644 index 8d70bd24d8c..00000000000 --- a/qapi/misc-target.json +++ /dev/null @@ -1,528 +0,0 @@ -# -*- Mode: Python -*- -# vim: filetype=python -# - -## -# @rtc-reset-reinjection: -# -# This command will reset the RTC interrupt reinjection backlog. Can -# be used if another mechanism to synchronize guest time is in effect, -# for example QEMU guest agent's guest-set-time command. -# -# Since: 2.1 -# -# .. qmp-example:: -# -# -> { "execute": "rtc-reset-reinjection" } -# <- { "return": {} } -## -{ 'command': 'rtc-reset-reinjection', - 'if': 'TARGET_I386' } - -## -# @SevState: -# -# An enumeration of SEV state information used during @query-sev. -# -# @uninit: The guest is uninitialized. -# -# @launch-update: The guest is currently being launched; plaintext -# data and register state is being imported. -# -# @launch-secret: The guest is currently being launched; ciphertext -# data is being imported. -# -# @running: The guest is fully launched or migrated in. -# -# @send-update: The guest is currently being migrated out to another -# machine. -# -# @receive-update: The guest is currently being migrated from another -# machine. -# -# Since: 2.12 -## -{ 'enum': 'SevState', - 'data': ['uninit', 'launch-update', 'launch-secret', 'running', - 'send-update', 'receive-update' ], - 'if': 'TARGET_I386' } - -## -# @SevGuestType: -# -# An enumeration indicating the type of SEV guest being run. -# -# @sev: The guest is a legacy SEV or SEV-ES guest. -# -# @sev-snp: The guest is an SEV-SNP guest. -# -# Since: 6.2 -## -{ 'enum': 'SevGuestType', - 'data': [ 'sev', 'sev-snp' ], - 'if': 'TARGET_I386' } - -## -# @SevGuestInfo: -# -# Information specific to legacy SEV/SEV-ES guests. -# -# @policy: SEV policy value -# -# @handle: SEV firmware handle -# -# Since: 2.12 -## -{ 'struct': 'SevGuestInfo', - 'data': { 'policy': 'uint32', - 'handle': 'uint32' }, - 'if': 'TARGET_I386' } - -## -# @SevSnpGuestInfo: -# -# Information specific to SEV-SNP guests. -# -# @snp-policy: SEV-SNP policy value -# -# Since: 9.1 -## -{ 'struct': 'SevSnpGuestInfo', - 'data': { 'snp-policy': 'uint64' }, - 'if': 'TARGET_I386' } - -## -# @SevInfo: -# -# Information about Secure Encrypted Virtualization (SEV) support -# -# @enabled: true if SEV is active -# -# @api-major: SEV API major version -# -# @api-minor: SEV API minor version -# -# @build-id: SEV FW build id -# -# @state: SEV guest state -# -# @sev-type: Type of SEV guest being run -# -# Since: 2.12 -## -{ 'union': 'SevInfo', - 'base': { 'enabled': 'bool', - 'api-major': 'uint8', - 'api-minor' : 'uint8', - 'build-id' : 'uint8', - 'state' : 'SevState', - 'sev-type' : 'SevGuestType' }, - 'discriminator': 'sev-type', - 'data': { - 'sev': 'SevGuestInfo', - 'sev-snp': 'SevSnpGuestInfo' }, - 'if': 'TARGET_I386' } - - -## -# @query-sev: -# -# Returns information about SEV -# -# Returns: @SevInfo -# -# Since: 2.12 -# -# .. qmp-example:: -# -# -> { "execute": "query-sev" } -# <- { "return": { "enabled": true, "api-major" : 0, "api-minor" : 0, -# "build-id" : 0, "policy" : 0, "state" : "running", -# "handle" : 1 } } -## -{ 'command': 'query-sev', 'returns': 'SevInfo', - 'if': 'TARGET_I386' } - -## -# @SevLaunchMeasureInfo: -# -# SEV Guest Launch measurement information -# -# @data: the measurement value encoded in base64 -# -# Since: 2.12 -## -{ 'struct': 'SevLaunchMeasureInfo', 'data': {'data': 'str'}, - 'if': 'TARGET_I386' } - -## -# @query-sev-launch-measure: -# -# Query the SEV guest launch information. -# -# Returns: The @SevLaunchMeasureInfo for the guest -# -# Since: 2.12 -# -# .. qmp-example:: -# -# -> { "execute": "query-sev-launch-measure" } -# <- { "return": { "data": "4l8LXeNlSPUDlXPJG5966/8%YZ" } } -## -{ 'command': 'query-sev-launch-measure', 'returns': 'SevLaunchMeasureInfo', - 'if': 'TARGET_I386' } - -## -# @SevCapability: -# -# The struct describes capability for a Secure Encrypted -# Virtualization feature. -# -# @pdh: Platform Diffie-Hellman key (base64 encoded) -# -# @cert-chain: PDH certificate chain (base64 encoded) -# -# @cpu0-id: Unique ID of CPU0 (base64 encoded) (since 7.1) -# -# @cbitpos: C-bit location in page table entry -# -# @reduced-phys-bits: Number of physical Address bit reduction when -# SEV is enabled -# -# Since: 2.12 -## -{ 'struct': 'SevCapability', - 'data': { 'pdh': 'str', - 'cert-chain': 'str', - 'cpu0-id': 'str', - 'cbitpos': 'int', - 'reduced-phys-bits': 'int'}, - 'if': 'TARGET_I386' } - -## -# @query-sev-capabilities: -# -# This command is used to get the SEV capabilities, and is supported -# on AMD X86 platforms only. -# -# Returns: SevCapability objects. -# -# Since: 2.12 -# -# .. qmp-example:: -# -# -> { "execute": "query-sev-capabilities" } -# <- { "return": { "pdh": "8CCDD8DDD", "cert-chain": "888CCCDDDEE", -# "cpu0-id": "2lvmGwo+...61iEinw==", -# "cbitpos": 47, "reduced-phys-bits": 1}} -## -{ 'command': 'query-sev-capabilities', 'returns': 'SevCapability', - 'if': 'TARGET_I386' } - -## -# @sev-inject-launch-secret: -# -# This command injects a secret blob into memory of SEV guest. -# -# @packet-header: the launch secret packet header encoded in base64 -# -# @secret: the launch secret data to be injected encoded in base64 -# -# @gpa: the guest physical address where secret will be injected. -# -# Since: 6.0 -## -{ 'command': 'sev-inject-launch-secret', - 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' }, - 'if': 'TARGET_I386' } - -## -# @SevAttestationReport: -# -# The struct describes attestation report for a Secure Encrypted -# Virtualization feature. -# -# @data: guest attestation report (base64 encoded) -# -# Since: 6.1 -## -{ 'struct': 'SevAttestationReport', - 'data': { 'data': 'str'}, - 'if': 'TARGET_I386' } - -## -# @query-sev-attestation-report: -# -# This command is used to get the SEV attestation report, and is -# supported on AMD X86 platforms only. -# -# @mnonce: a random 16 bytes value encoded in base64 (it will be -# included in report) -# -# Returns: SevAttestationReport objects. -# -# Since: 6.1 -# -# .. qmp-example:: -# -# -> { "execute" : "query-sev-attestation-report", -# "arguments": { "mnonce": "aaaaaaa" } } -# <- { "return" : { "data": "aaaaaaaabbbddddd"} } -## -{ 'command': 'query-sev-attestation-report', - 'data': { 'mnonce': 'str' }, - 'returns': 'SevAttestationReport', - 'if': 'TARGET_I386' } - -## -# @dump-skeys: -# -# Dump guest's storage keys -# -# @filename: the path to the file to dump to -# -# Since: 2.5 -# -# .. qmp-example:: -# -# -> { "execute": "dump-skeys", -# "arguments": { "filename": "/tmp/skeys" } } -# <- { "return": {} } -## -{ 'command': 'dump-skeys', - 'data': { 'filename': 'str' }, - 'if': 'TARGET_S390X' } - -## -# @GICCapability: -# -# The struct describes capability for a specific GIC (Generic -# Interrupt Controller) version. These bits are not only decided by -# QEMU/KVM software version, but also decided by the hardware that the -# program is running upon. -# -# @version: version of GIC to be described. Currently, only 2 and 3 -# are supported. -# -# @emulated: whether current QEMU/hardware supports emulated GIC -# device in user space. -# -# @kernel: whether current QEMU/hardware supports hardware accelerated -# GIC device in kernel. -# -# Since: 2.6 -## -{ 'struct': 'GICCapability', - 'data': { 'version': 'int', - 'emulated': 'bool', - 'kernel': 'bool' }, - 'if': 'TARGET_ARM' } - -## -# @query-gic-capabilities: -# -# This command is ARM-only. It will return a list of GICCapability -# objects that describe its capability bits. -# -# Returns: a list of GICCapability objects. -# -# Since: 2.6 -# -# .. qmp-example:: -# -# -> { "execute": "query-gic-capabilities" } -# <- { "return": [{ "version": 2, "emulated": true, "kernel": false }, -# { "version": 3, "emulated": false, "kernel": true } ] } -## -{ 'command': 'query-gic-capabilities', 'returns': ['GICCapability'], - 'if': 'TARGET_ARM' } - -## -# @SGXEPCSection: -# -# Information about intel SGX EPC section info -# -# @node: the numa node -# -# @size: the size of EPC section -# -# Since: 7.0 -## -{ 'struct': 'SGXEPCSection', - 'data': { 'node': 'int', - 'size': 'uint64'}} - -## -# @SGXInfo: -# -# Information about intel Safe Guard eXtension (SGX) support -# -# @sgx: true if SGX is supported -# -# @sgx1: true if SGX1 is supported -# -# @sgx2: true if SGX2 is supported -# -# @flc: true if FLC is supported -# -# @sections: The EPC sections info for guest (Since: 7.0) -# -# Since: 6.2 -## -{ 'struct': 'SGXInfo', - 'data': { 'sgx': 'bool', - 'sgx1': 'bool', - 'sgx2': 'bool', - 'flc': 'bool', - 'sections': ['SGXEPCSection']}, - 'if': 'TARGET_I386' } - -## -# @query-sgx: -# -# Returns information about SGX -# -# Returns: @SGXInfo -# -# Since: 6.2 -# -# .. qmp-example:: -# -# -> { "execute": "query-sgx" } -# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, -# "sections": [{"node": 0, "size": 67108864}, -# {"node": 1, "size": 29360128}]} } -## -{ 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } - -## -# @query-sgx-capabilities: -# -# Returns information from host SGX capabilities -# -# Returns: @SGXInfo -# -# Since: 6.2 -# -# .. qmp-example:: -# -# -> { "execute": "query-sgx-capabilities" } -# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, -# "section" : [{"node": 0, "size": 67108864}, -# {"node": 1, "size": 29360128}]} } -## -{ 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } - - -## -# @EvtchnPortType: -# -# An enumeration of Xen event channel port types. -# -# @closed: The port is unused. -# -# @unbound: The port is allocated and ready to be bound. -# -# @interdomain: The port is connected as an interdomain interrupt. -# -# @pirq: The port is bound to a physical IRQ (PIRQ). -# -# @virq: The port is bound to a virtual IRQ (VIRQ). -# -# @ipi: The post is an inter-processor interrupt (IPI). -# -# Since: 8.0 -## -{ 'enum': 'EvtchnPortType', - 'data': ['closed', 'unbound', 'interdomain', 'pirq', 'virq', 'ipi'], - 'if': 'TARGET_I386' } - -## -# @EvtchnInfo: -# -# Information about a Xen event channel port -# -# @port: the port number -# -# @vcpu: target vCPU for this port -# -# @type: the port type -# -# @remote-domain: remote domain for interdomain ports -# -# @target: remote port ID, or virq/pirq number -# -# @pending: port is currently active pending delivery -# -# @masked: port is masked -# -# Since: 8.0 -## -{ 'struct': 'EvtchnInfo', - 'data': {'port': 'uint16', - 'vcpu': 'uint32', - 'type': 'EvtchnPortType', - 'remote-domain': 'str', - 'target': 'uint16', - 'pending': 'bool', - 'masked': 'bool'}, - 'if': 'TARGET_I386' } - - -## -# @xen-event-list: -# -# Query the Xen event channels opened by the guest. -# -# Returns: list of open event channel ports. -# -# Since: 8.0 -# -# .. qmp-example:: -# -# -> { "execute": "xen-event-list" } -# <- { "return": [ -# { -# "pending": false, -# "port": 1, -# "vcpu": 1, -# "remote-domain": "qemu", -# "masked": false, -# "type": "interdomain", -# "target": 1 -# }, -# { -# "pending": false, -# "port": 2, -# "vcpu": 0, -# "remote-domain": "", -# "masked": false, -# "type": "virq", -# "target": 0 -# } -# ] -# } -## -{ 'command': 'xen-event-list', - 'returns': ['EvtchnInfo'], - 'if': 'TARGET_I386' } - -## -# @xen-event-inject: -# -# Inject a Xen event channel port (interrupt) to the guest. -# -# @port: The port number -# -# Since: 8.0 -# -# .. qmp-example:: -# -# -> { "execute": "xen-event-inject", "arguments": { "port": 1 } } -# <- { "return": { } } -## -{ 'command': 'xen-event-inject', - 'data': { 'port': 'uint32' }, - 'if': 'TARGET_I386' } diff --git a/qapi/misc.json b/qapi/misc.json index 4a6f3baeaed..28c641fe2fe 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -3,7 +3,9 @@ # ## -# = Miscellanea +# *********** +# Miscellanea +# *********** ## { 'include': 'common.json' } @@ -21,12 +23,12 @@ # "@dbus-display" or the name of a character device (e.g. from # -chardev id=XXXX) # -# @fdname: file descriptor name previously passed via 'getfd' command +# @fdname: file descriptor name previously passed via `getfd` command # # @skipauth: whether to skip authentication. Only applies to "vnc" # and "spice" protocols # -# @tls: whether to perform TLS. Only applies to the "spice" protocol +# @tls: whether to perform TLS. Only applies to the "spice" protocol # # Since: 0.14 # @@ -56,8 +58,6 @@ # # Return the name information of a guest. # -# Returns: @NameInfo of the guest -# # Since: 0.14 # # .. qmp-example:: @@ -101,13 +101,13 @@ ## # @query-iothreads: # -# Returns a list of information about each iothread. +# Return a list of information about each iothread. # # .. note:: This list excludes the QEMU main loop thread, which is not -# declared using the ``-object iothread`` command-line option. It is -# always the main thread of the process. +# declared using the ``-object iothread`` command-line option. It +# is always the main thread of the process. # -# Returns: a list of @IOThreadInfo for each iothread +# Returns: a list of info for each iothread # # Since: 2.0 # @@ -141,8 +141,8 @@ # guest remains paused once migration finishes, as if the ``-S`` # option was passed on the command line. # -# In the "suspended" state, it will completely stop the VM and cause -# a transition to the "paused" state. (Since 9.0) +# In the "suspended" state, it will completely stop the VM and +# cause a transition to the "paused" state. (Since 9.0) # # .. qmp-example:: # @@ -158,15 +158,15 @@ # # Since: 0.14 # -# .. note:: This command will succeed if the guest is currently running. -# It will also succeed if the guest is in the "inmigrate" state; in -# this case, the effect of the command is to make sure the guest -# starts once migration finishes, removing the effect of the ``-S`` -# command line option if it was passed. +# .. note:: This command will succeed if the guest is currently +# running. It will also succeed if the guest is in the "inmigrate" +# state; in this case, the effect of the command is to make sure +# the guest starts once migration finishes, removing the effect of +# the ``-S`` command line option if it was passed. # # If the VM was previously suspended, and not been reset or woken, -# this command will transition back to the "suspended" state. (Since -# 9.0) +# this command will transition back to the "suspended" state. +# (Since 9.0) # # .. qmp-example:: # @@ -222,13 +222,13 @@ # .. note:: This command only exists as a stop-gap. Its use is highly # discouraged. The semantics of this command are not guaranteed: # this means that command names, arguments and responses can change -# or be removed at ANY time. Applications that rely on long term -# stability guarantees should NOT use this command. +# or be removed at **any** time. Applications that rely on long +# term stability guarantees should **not** use this command. # # Known limitations: # -# * This command is stateless, this means that commands that -# depend on state information (such as getfd) might not work. +# * This command is stateless, this means that commands that depend +# on state information (such as `getfd`) might not work. # # * Commands that prompt the user for data don't currently work. # @@ -255,7 +255,7 @@ # .. note:: If @fdname already exists, the file descriptor assigned to # it will be closed and replaced by the received file descriptor. # -# The 'closefd' command can be used to explicitly close the file +# The `closefd` command can be used to explicitly close the file # descriptor when it is no longer needed. # # .. qmp-example:: @@ -282,7 +282,7 @@ # .. note:: If @fdname already exists, the file descriptor assigned to # it will be closed and replaced by the received file descriptor. # -# The 'closefd' command can be used to explicitly close the file +# The `closefd` command can be used to explicitly close the file # descriptor when it is no longer needed. # # .. qmp-example:: @@ -332,16 +332,14 @@ # # @opaque: A free-form string that can be used to describe the fd. # -# Returns: -# @AddfdInfo -# # Errors: # - If file descriptor was not received, GenericError # - If @fdset-id is a negative value, GenericError # # .. note:: The list of fd sets is shared by all monitor connections. # -# .. note:: If @fdset-id is not specified, a new fd set will be created. +# .. note:: If @fdset-id is not specified, a new fd set will be +# created. # # Since: 1.2 # @@ -414,8 +412,6 @@ # # Return information describing all fd sets. # -# Returns: A list of @FdsetInfo -# # Since: 1.2 # # .. note:: The list of fd sets is shared by all monitor connections. @@ -479,7 +475,7 @@ # # @name: parameter name # -# @type: parameter @CommandLineParameterType +# @type: parameter `CommandLineParameterType` # # @help: human readable text string, not suitable for parsing. # @@ -501,7 +497,7 @@ # # @option: option name # -# @parameters: an array of @CommandLineParameterInfo +# @parameters: an array of `CommandLineParameterInfo` # # Since: 1.5 ## @@ -515,8 +511,7 @@ # # @option: option name # -# Returns: list of @CommandLineOptionInfo for all options (or for the -# given @option). +# Returns: list of objects for all options (or for the given @option). # # Errors: # - if the given @option doesn't exist diff --git a/qapi/net.json b/qapi/net.json index 31b3417d654..78bcc9871e0 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -3,7 +3,9 @@ # ## -# = Net devices +# *********** +# Net devices +# *********** ## { 'include': 'sockets.json' } @@ -22,9 +24,9 @@ # # Since: 0.14 # -# .. note:: Not all network adapters support setting link status. This -# command will succeed even if the network adapter does not support -# link status notification. +# .. note:: Not all network adapters support setting link status. +# This command will succeed even if the network adapter does not +# support link status notification. # # .. qmp-example:: # @@ -112,6 +114,119 @@ 'data': { 'str': 'str' } } +## +# @NetDevPasstOptions: +# +# Unprivileged user-mode network connectivity using passt +# +# @path: Filename of the passt program to run (by default 'passt', and use PATH) +# +# @quiet: don't print informational messages (default, passed as '--quiet') +# +# @vhost-user: enable vhost-user +# +# @mtu: assign MTU via DHCP/NDP +# +# @address: IPv4 or IPv6 address +# +# @netmask: IPv4 mask +# +# @mac: source MAC address +# +# @gateway: IPv4 or IPv6 address as gateway +# +# @interface: interface for addresses and routes +# +# @outbound: bind to address as outbound source +# +# @outbound-if4: bind to outbound interface for IPv4 +# +# @outbound-if6: bind to outbound interface for IPv6 +# +# @dns: IPv4 or IPv6 address as DNS +# +# @search: search domains +# +# @fqdn: FQDN to configure client with +# +# @dhcp-dns: enable/disable DNS list in DHCP/DHCPv6/NDP +# +# @dhcp-search: enable/disable list in DHCP/DHCPv6/NDP +# +# @map-host-loopback: addresse to refer to host +# +# @map-guest-addr: addr to translate to guest's address +# +# @dns-forward: forward DNS queries sent to +# +# @dns-host: host nameserver to direct queries to +# +# @tcp: enable/disable TCP +# +# @udp: enable/disable UDP +# +# @icmp: enable/disable ICMP +# +# @dhcp: enable/disable DHCP +# +# @ndp: enable/disable NDP +# +# @dhcpv6: enable/disable DHCPv6 +# +# @ra: enable/disable route advertisements +# +# @freebind: bind to any address for forwarding +# +# @ipv4: enable/disable IPv4 +# +# @ipv6: enable/disable IPv6 +# +# @tcp-ports: TCP ports to forward +# +# @udp-ports: UDP ports to forward +# +# @param: parameter to pass to passt command +# +# Since: 10.1 +## +{ 'struct': 'NetDevPasstOptions', + 'data': { + '*path': 'str', + '*quiet': 'bool', + '*vhost-user': 'bool', + '*mtu': 'int', + '*address': 'str', + '*netmask': 'str', + '*mac': 'str', + '*gateway': 'str', + '*interface': 'str', + '*outbound': 'str', + '*outbound-if4': 'str', + '*outbound-if6': 'str', + '*dns': 'str', + '*search': ['String'], + '*fqdn': 'str', + '*dhcp-dns': 'bool', + '*dhcp-search': 'bool', + '*map-host-loopback': 'str', + '*map-guest-addr': 'str', + '*dns-forward': 'str', + '*dns-host': 'str', + '*tcp': 'bool', + '*udp': 'bool', + '*icmp': 'bool', + '*dhcp': 'bool', + '*ndp': 'bool', + '*dhcpv6': 'bool', + '*ra': 'bool', + '*freebind': 'bool', + '*ipv4': 'bool', + '*ipv6': 'bool', + '*tcp-ports': ['String'], + '*udp-ports': ['String'], + '*param': ['String'] }, + 'if': 'CONFIG_PASST' } + ## # @NetdevUserOptions: # @@ -150,12 +265,12 @@ # @domainname: guest-visible domain name of the virtual nameserver # (since 3.0) # -# @ipv6-prefix: IPv6 network prefix (default is fec0::) (since 2.6). -# The network prefix is given in the usual hexadecimal IPv6 -# address notation. +# @ipv6-prefix: IPv6 network prefix (default is fec0::). The network +# prefix is given in the usual hexadecimal IPv6 address notation. +# (since 2.6) # -# @ipv6-prefixlen: IPv6 network prefix length (default is 64) (since -# 2.6) +# @ipv6-prefixlen: IPv6 network prefix length (default is 64) +# (since 2.6) # # @ipv6-host: guest-visible IPv6 address of the host (since 2.6) # @@ -387,8 +502,8 @@ # # @hubid: hub identifier number # -# @netdev: used to connect hub to a netdev instead of a device (since -# 2.12) +# @netdev: used to connect hub to a netdev instead of a device +# (since 2.12) # # Since: 1.2 ## @@ -403,7 +518,7 @@ # Connect a client to a netmap-enabled NIC or to a VALE switch port # # @ifname: Either the name of an existing network interface supported -# by netmap, or the name of a VALE port (created on the fly). A +# by netmap, or the name of a VALE port (created on the fly). A # VALE port name is in the form 'valeXXX:YYY', where XXX and YYY # are non-negative integers. XXX identifies a switch and YYY # identifies a port of the switch. VALE ports having the same XXX @@ -454,25 +569,34 @@ # (default: 0). # # @inhibit: Don't load a default XDP program, use one already loaded -# to the interface (default: false). Requires @sock-fds. +# to the interface (default: false). Requires @sock-fds or @map-path. # # @sock-fds: A colon (:) separated list of file descriptors for # already open but not bound AF_XDP sockets in the queue order. # One fd per queue. These descriptors should already be added -# into XDP socket map for corresponding queues. Requires -# @inhibit. +# into XDP socket map for corresponding queues. @sock-fds and +# @map-path are mutually exclusive. Requires @inhibit. +# +# @map-path: The path to a pinned xsk map to push file descriptors +# for bound AF_XDP sockets into. @map-path and @sock-fds are +# mutually exclusive. Requires @inhibit. (Since 10.1) +# +# @map-start-index: Use @map-path to insert xsk sockets starting from +# this index number (default: 0). Requires @map-path. (Since 10.1) # # Since: 8.2 ## { 'struct': 'NetdevAFXDPOptions', 'data': { - 'ifname': 'str', - '*mode': 'AFXDPMode', - '*force-copy': 'bool', - '*queues': 'int', - '*start-queue': 'int', - '*inhibit': 'bool', - '*sock-fds': 'str' }, + 'ifname': 'str', + '*mode': 'AFXDPMode', + '*force-copy': 'bool', + '*queues': 'int', + '*start-queue': 'int', + '*inhibit': 'bool', + '*sock-fds': 'str', + '*map-path': 'str', + '*map-start-index': 'int32' }, 'if': 'CONFIG_AF_XDP' } ## @@ -510,8 +634,8 @@ # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # -# @x-svq: Start device with (experimental) shadow virtqueue. (Since -# 7.1) (default: false) +# @x-svq: Start device with (experimental) shadow virtqueue. +# (Since 7.1) (default: false) # # Features: # @@ -535,13 +659,13 @@ # interfaces that are in host mode and also with the host. # # @start-address: The starting IPv4 address to use for the interface. -# Must be in the private IP range (RFC 1918). Must be specified +# Must be in the private IP range (RFC 1918). Must be specified # along with @end-address and @subnet-mask. This address is used # as the gateway address. The subsequent address up to and # including end-address are placed in the DHCP pool. # # @end-address: The DHCP IPv4 range end address to use for the -# interface. Must be in the private IP range (RFC 1918). Must be +# interface. Must be in the private IP range (RFC 1918). Must be # specified along with @start-address and @subnet-mask. # # @subnet-mask: The IPv4 subnet mask to use on the interface. Must be @@ -556,7 +680,7 @@ # network vmnet interface should be added to. If set, no DHCP # service is provided for this interface and network communication # is allowed only with other interfaces added to this network -# identified by the UUID. Requires at least macOS Big Sur 11.0. +# identified by the UUID. Requires at least macOS Big Sur 11.0. # # Since: 7.1 ## @@ -575,20 +699,20 @@ # vmnet (shared mode) network backend. # # Allows traffic originating from the vmnet interface to reach the -# Internet through a network address translator (NAT). The vmnet +# Internet through a network address translator (NAT). The vmnet # interface can communicate with the host and with other shared mode # interfaces on the same subnet. If no DHCP settings, subnet mask and # IPv6 prefix specified, the interface can communicate with any of # other interfaces in shared mode. # # @start-address: The starting IPv4 address to use for the interface. -# Must be in the private IP range (RFC 1918). Must be specified +# Must be in the private IP range (RFC 1918). Must be specified # along with @end-address and @subnet-mask. This address is used # as the gateway address. The subsequent address up to and # including end-address are placed in the DHCP pool. # # @end-address: The DHCP IPv4 range end address to use for the -# interface. Must be in the private IP range (RFC 1918). Must be +# interface. Must be in the private IP range (RFC 1918). Must be # specified along with @start-address and @subnet-mask. # # @subnet-mask: The IPv4 subnet mask to use on the interface. Must be @@ -650,7 +774,17 @@ # attempt a reconnect after the given number of seconds. Setting # this to zero disables this function. (default: 0) (since 8.0) # -# Only SocketAddress types 'unix', 'inet' and 'fd' are supported. +# @reconnect-ms: For a client socket, if a socket is disconnected, then +# attempt a reconnect after the given number of milliseconds. Setting +# this to zero disables this function. This member is mutually +# exclusive with @reconnect. (default: 0) (Since: 9.2) +# +# Only `SocketAddress` types 'unix', 'inet' and 'fd' are supported. +# +# Features: +# +# @deprecated: Member @reconnect is deprecated. Use @reconnect-ms +# instead. # # Since: 7.2 ## @@ -658,7 +792,8 @@ 'data': { 'addr': 'SocketAddress', '*server': 'bool', - '*reconnect': 'uint32' } } + '*reconnect': { 'type': 'int', 'features': [ 'deprecated' ] }, + '*reconnect-ms': 'int' } } ## # @NetdevDgramOptions: @@ -669,7 +804,7 @@ # # @local: local address # -# Only SocketAddress types 'unix', 'inet' and 'fd' are supported. +# Only `SocketAddress` types 'unix', 'inet' and 'fd' are supported. # # If remote address is present and it's a multicast address, local # address is optional. Otherwise local address is required and remote @@ -703,20 +838,30 @@ # Available netdev drivers. # # @l2tpv3: since 2.1 +# # @vhost-vdpa: since 5.1 +# # @vmnet-host: since 7.1 +# # @vmnet-shared: since 7.1 +# # @vmnet-bridged: since 7.1 +# # @stream: since 7.2 +# # @dgram: since 7.2 +# # @af-xdp: since 8.2 # +# @passt: since 10.1 +# # Since: 2.7 ## { 'enum': 'NetClientDriver', 'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'stream', 'dgram', 'vde', 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa', + { 'name': 'passt', 'if': 'CONFIG_PASST' }, { 'name': 'af-xdp', 'if': 'CONFIG_AF_XDP' }, { 'name': 'vmnet-host', 'if': 'CONFIG_VMNET' }, { 'name': 'vmnet-shared', 'if': 'CONFIG_VMNET' }, @@ -738,6 +883,8 @@ 'discriminator': 'type', 'data': { 'nic': 'NetLegacyNicOptions', + 'passt': { 'type': 'NetDevPasstOptions', + 'if': 'CONFIG_PASST' }, 'user': 'NetdevUserOptions', 'tap': 'NetdevTapOptions', 'l2tpv3': 'NetdevL2TPv3Options', @@ -827,7 +974,7 @@ # # @name: net client name # -# Returns: list of @RxFilterInfo for all NICs (or for the given NIC). +# Returns: list of info for all NICs (or for the given NIC). # # Errors: # - if the given @name doesn't exist @@ -872,7 +1019,7 @@ ## # @NIC_RX_FILTER_CHANGED: # -# Emitted once until the 'query-rx-filter' command is executed, the +# Emitted once until the `query-rx-filter` command is executed, the # first event will always be emitted # # @name: net client name @@ -1013,3 +1160,43 @@ ## { 'event': 'NETDEV_STREAM_DISCONNECTED', 'data': { 'netdev-id': 'str' } } + +## +# @NETDEV_VHOST_USER_CONNECTED: +# +# Emitted when the vhost-user chardev is connected +# +# @netdev-id: QEMU netdev id that is connected +# +# @chardev-id: The character device id used by the QEMU netdev +# +# Since: 10.0 +# +# .. qmp-example:: +# +# <- { "timestamp": {"seconds": 1739538638, "microseconds": 354181 }, +# "event": "NETDEV_VHOST_USER_CONNECTED", +# "data": { "netdev-id": "netdev0", "chardev-id": "chr0" } } +# +## +{ 'event': 'NETDEV_VHOST_USER_CONNECTED', + 'data': { 'netdev-id': 'str', 'chardev-id': 'str' } } + +## +# @NETDEV_VHOST_USER_DISCONNECTED: +# +# Emitted when the vhost-user chardev is disconnected +# +# @netdev-id: QEMU netdev id that is disconnected +# +# Since: 10.0 +# +# .. qmp-example:: +# +# <- { "timestamp": { "seconds": 1739538634, "microseconds": 920450 }, +# "event": "NETDEV_VHOST_USER_DISCONNECTED", +# "data": { "netdev-id": "netdev0" } } +# +## +{ 'event': 'NETDEV_VHOST_USER_DISCONNECTED', + 'data': { 'netdev-id': 'str' } } diff --git a/qapi/pci.json b/qapi/pci.json index ec28f1d9b46..694c741e420 100644 --- a/qapi/pci.json +++ b/qapi/pci.json @@ -6,7 +6,9 @@ # SPDX-License-Identifier: GPL-2.0-or-later ## -# = PCI +# *** +# PCI +# *** ## ## @@ -33,6 +35,8 @@ # - 'io' if the region is a PIO region # - 'memory' if the region is a MMIO region # +# @address: memory address +# # @size: memory size # # @prefetch: if @type is 'memory', true if the memory is prefetchable @@ -81,7 +85,7 @@ # # @bus: information about the bus the device resides on # -# @devices: a list of @PciDeviceInfo for each device on this bridge +# @devices: a list of `PciDeviceInfo` for each device on this bridge # # Since: 0.14 ## @@ -173,7 +177,7 @@ # # Return information about the PCI bus topology of the guest. # -# Returns: a list of @PciInfo for each PCI bus. Each bus is +# Returns: a list of info for each PCI bus. Each bus is # represented by a json-object, which has a key with a json-array # of all PCI devices attached to it. Each device is represented # by a json-object. @@ -310,6 +314,5 @@ # } # # This example has been shortened as the real response is too long. -# ## { 'command': 'query-pci', 'returns': ['PciInfo'] } diff --git a/qapi/pragma.json b/qapi/pragma.json index 59fbe74b8ce..023a2ef7bcd 100644 --- a/qapi/pragma.json +++ b/qapi/pragma.json @@ -46,34 +46,27 @@ 'BlockdevSnapshotSyncWrapper', 'BlockdevSnapshotWrapper', 'BlockdevVmdkAdapterType', - 'ChardevBackendKind', - 'CpuS390Entitlement', - 'CpuS390Polarization', - 'CpuS390State', - 'CxlCorErrorType', 'DisplayProtocol', 'DriveBackupWrapper', 'DummyBlockCoreForceArrays', 'DummyForceArrays', 'DummyVirtioForceArrays', - 'GrabToggleKeys', 'HotKeyMod', 'ImageInfoSpecificKind', 'InputAxis', 'InputButton', 'IscsiHeaderDigest', 'IscsiTransport', - 'JSONType', 'KeyValueKind', 'MemoryDeviceInfoKind', 'NetClientDriver', 'ObjectType', - 'PciMemoryRegion', - 'QCryptoAkCipherKeyType', - 'QCryptodevBackendServiceType', 'QKeyCode', 'RbdAuthMode', 'RbdImageEncryptionFormat', + 'S390CpuEntitlement', + 'S390CpuPolarization', + 'S390CpuState', 'String', 'StringWrapper', 'SysEmuTarget', @@ -83,9 +76,7 @@ 'X86CPURegister32', 'XDbgBlockGraph', 'YankInstanceType', - 'blockdev-reopen', - 'query-rocker', - 'query-rocker-ports' ], + 'blockdev-reopen' ], # Externally visible types whose member names may use uppercase 'member-name-exceptions': [ # visible in: 'ACPISlotType', # query-acpi-ospm-status diff --git a/qapi/qapi-clone-visitor.c b/qapi/qapi-clone-visitor.c index bbf953698f3..30997638de5 100644 --- a/qapi/qapi-clone-visitor.c +++ b/qapi/qapi-clone-visitor.c @@ -12,7 +12,7 @@ #include "qapi/clone-visitor.h" #include "qapi/visitor-impl.h" #include "qapi/error.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" struct QapiCloneVisitor { Visitor visitor; diff --git a/qapi/qapi-dealloc-visitor.c b/qapi/qapi-dealloc-visitor.c index ef283f29660..57a2c904bb1 100644 --- a/qapi/qapi-dealloc-visitor.c +++ b/qapi/qapi-dealloc-visitor.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" #include "qapi/dealloc-visitor.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qapi/visitor-impl.h" struct QapiDeallocVisitor diff --git a/qapi/qapi-forward-visitor.c b/qapi/qapi-forward-visitor.c index e36d9bc9ba7..d91d921bdbd 100644 --- a/qapi/qapi-forward-visitor.c +++ b/qapi/qapi-forward-visitor.c @@ -14,14 +14,14 @@ #include "qapi/forward-visitor.h" #include "qapi/visitor-impl.h" #include "qemu/queue.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" struct ForwardFieldVisitor { @@ -246,7 +246,7 @@ static void forward_field_optional(Visitor *v, const char *name, bool *present) } static bool forward_field_policy_reject(Visitor *v, const char *name, - unsigned special_features, + uint64_t features, Error **errp) { ForwardFieldVisitor *ffv = to_ffv(v); @@ -254,18 +254,18 @@ static bool forward_field_policy_reject(Visitor *v, const char *name, if (!forward_field_translate_name(ffv, &name, errp)) { return true; } - return visit_policy_reject(ffv->target, name, special_features, errp); + return visit_policy_reject(ffv->target, name, features, errp); } static bool forward_field_policy_skip(Visitor *v, const char *name, - unsigned special_features) + uint64_t features) { ForwardFieldVisitor *ffv = to_ffv(v); if (!forward_field_translate_name(ffv, &name, NULL)) { return true; } - return visit_policy_skip(ffv->target, name, special_features); + return visit_policy_skip(ffv->target, name, features); } static void forward_field_complete(Visitor *v, void *opaque) diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json index b1581988e4e..82f111ba063 100644 --- a/qapi/qapi-schema.json +++ b/qapi/qapi-schema.json @@ -1,37 +1,28 @@ # -*- Mode: Python -*- # vim: filetype=python ## -# = Introduction +# ************ +# Introduction +# ************ # -# This document describes all commands currently supported by QMP. +# This manual describes the commands and events supported by the QEMU +# Monitor Protocol (QMP). # -# Most of the time their usage is exactly the same as in the user -# Monitor, this means that any other document which also describe -# commands (the manpage, QEMU's manual, etc) can and should be -# consulted. +# For locating a particular item, please see the `qapi-qmp-index`. # -# QMP has two types of commands: regular and query commands. Regular -# commands usually change the Virtual Machine's state someway, while -# query commands just return information. The sections below are -# divided accordingly. +# The following notation is used in examples: # -# It's important to observe that all communication examples are -# formatted in a reader-friendly way, so that they're easier to -# understand. However, in real protocol usage, they're emitted as a -# single line. +# .. qmp-example:: # -# Also, the following notation is used to denote data flow: +# -> ... text sent by client (commands) ... +# <- ... text sent by server (command responses and events) ... # -# Example: -# -# :: -# -# -> data issued by the Client -# <- Server data response +# Example text is formatted for readability. However, in real +# protocol usage, its commonly emitted as a single line. # # Please refer to the # :doc:`QEMU Machine Protocol Specification ` -# for detailed information on the Server command and response formats. +# for the general format of commands, responses, and events. ## { 'include': 'pragma.json' } @@ -48,6 +39,7 @@ { 'include': 'run-state.json' } { 'include': 'crypto.json' } { 'include': 'job.json' } +{ 'include': 'accelerator.json' } { 'include': 'block.json' } { 'include': 'block-export.json' } { 'include': 'char.json' } @@ -68,11 +60,12 @@ { 'include': 'qdev.json' } { 'include': 'machine-common.json' } { 'include': 'machine.json' } -{ 'include': 'machine-target.json' } +{ 'include': 'machine-s390x.json' } { 'include': 'replay.json' } { 'include': 'yank.json' } { 'include': 'misc.json' } -{ 'include': 'misc-target.json' } +{ 'include': 'misc-arm.json' } +{ 'include': 'misc-i386.json' } { 'include': 'audio.json' } { 'include': 'acpi.json' } { 'include': 'pci.json' } @@ -81,3 +74,4 @@ { 'include': 'vfio.json' } { 'include': 'cryptodev.json' } { 'include': 'cxl.json' } +{ 'include': 'uefi.json' } diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c index 65a7d184372..3d849fe0347 100644 --- a/qapi/qapi-util.c +++ b/qapi/qapi-util.c @@ -37,19 +37,19 @@ static bool compat_policy_input_ok1(const char *adjective, } } -bool compat_policy_input_ok(unsigned special_features, +bool compat_policy_input_ok(uint64_t features, const CompatPolicy *policy, ErrorClass error_class, const char *kind, const char *name, Error **errp) { - if ((special_features & 1u << QAPI_DEPRECATED) + if ((features & 1u << QAPI_DEPRECATED) && !compat_policy_input_ok1("Deprecated", policy->deprecated_input, error_class, kind, name, errp)) { return false; } - if ((special_features & (1u << QAPI_UNSTABLE)) + if ((features & (1u << QAPI_UNSTABLE)) && !compat_policy_input_ok1("Unstable", policy->unstable_input, error_class, kind, name, errp)) { diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c index 6c13510a2bc..706c61e0260 100644 --- a/qapi/qapi-visit-core.c +++ b/qapi/qapi-visit-core.c @@ -141,21 +141,21 @@ bool visit_optional(Visitor *v, const char *name, bool *present) } bool visit_policy_reject(Visitor *v, const char *name, - unsigned special_features, Error **errp) + uint64_t features, Error **errp) { trace_visit_policy_reject(v, name); if (v->policy_reject) { - return v->policy_reject(v, name, special_features, errp); + return v->policy_reject(v, name, features, errp); } return false; } bool visit_policy_skip(Visitor *v, const char *name, - unsigned special_features) + uint64_t features) { trace_visit_policy_skip(v, name); if (v->policy_skip) { - return v->policy_skip(v, name, special_features); + return v->policy_skip(v, name, features); } return false; } @@ -409,8 +409,8 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj, return false; } - if (lookup->special_features - && !compat_policy_input_ok(lookup->special_features[value], + if (lookup->features + && !compat_policy_input_ok(lookup->features[value], &v->compat_policy, ERROR_CLASS_GENERIC_ERROR, "value", enum_str, errp)) { diff --git a/qapi/qdev.json b/qapi/qdev.json index e91ca0309df..e14a0c92598 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -5,7 +5,9 @@ # See the COPYING file in the top-level directory. ## -# = Device infrastructure (qdev) +# **************************** +# Device infrastructure (qdev) +# **************************** ## { 'include': 'qom.json' } @@ -17,8 +19,7 @@ # # @typename: the type name of a device # -# Returns: a list of ObjectPropertyInfo describing a devices -# properties +# Returns: a list describing a devices properties # # .. note:: Objects can create properties at runtime, for example to # describe links between different devices and/or objects. These @@ -59,8 +60,8 @@ # the 'docs/qdev-device-use.txt' file. # # 3. It's possible to list device properties by running QEMU with -# the ``-device DEVICE,help`` command-line argument, where DEVICE -# is the device's name. +# the ``-device DEVICE,help`` command-line argument, where +# DEVICE is the device's name. # # .. qmp-example:: # @@ -94,13 +95,13 @@ # # .. note:: When this command completes, the device may not be removed # from the guest. Hot removal is an operation that requires guest -# cooperation. This command merely requests that the guest begin the -# hot removal process. Completion of the device removal process is -# signaled with a DEVICE_DELETED event. Guest reset will -# automatically complete removal for all devices. If a guest-side -# error in the hot removal process is detected, the device will not -# be removed and a DEVICE_UNPLUG_GUEST_ERROR event is sent. Some -# errors cannot be detected. +# cooperation. This command merely requests that the guest begin +# the hot removal process. Completion of the device removal +# process is signaled with a `DEVICE_DELETED` event. Guest reset +# will automatically complete removal for all devices. If a +# guest-side error in the hot removal process is detected, the +# device will not be removed and a `DEVICE_UNPLUG_GUEST_ERROR` event +# is sent. Some errors cannot be detected. # # Since: 0.14 # @@ -123,7 +124,7 @@ # # Emitted whenever the device removal completion is acknowledged by # the guest. At this point, it's safe to reuse the specified device -# ID. Device removal can be initiated by the guest or by HMP/QMP +# ID. Device removal can be initiated by the guest or by HMP/QMP # commands. # # @device: the device's ID if it has one @@ -163,3 +164,27 @@ ## { 'event': 'DEVICE_UNPLUG_GUEST_ERROR', 'data': { '*device': 'str', 'path': 'str' } } + +## +# @device-sync-config: +# +# Synchronize device configuration from host to guest part. First, +# copy the configuration from the host part (backend) to the guest +# part (frontend). Then notify guest software that device +# configuration changed. +# +# The command may be used to notify the guest about block device +# capacity change. Currently only vhost-user-blk device supports +# this. +# +# @id: the device's ID or QOM path +# +# Features: +# +# @unstable: The command is experimental. +# +# Since: 9.2 +## +{ 'command': 'device-sync-config', + 'features': [ 'unstable' ], + 'data': {'id': 'str'} } diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index 176b549473c..e569224eaea 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -16,12 +16,12 @@ #include "block/aio.h" #include "qapi/compat-policy.h" #include "qapi/error.h" -#include "qapi/qmp/dispatch.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" +#include "qapi/qmp-registry.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" -#include "qapi/qmp/qbool.h" +#include "qobject/qbool.h" #include "qemu/coroutine.h" #include "qemu/main-loop.h" @@ -173,7 +173,7 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *requ "The command %s has not been found", command); goto out; } - if (!compat_policy_input_ok(cmd->special_features, &compat_policy, + if (!compat_policy_input_ok(cmd->features, &compat_policy, ERROR_CLASS_COMMAND_NOT_FOUND, "command", command, &err)) { goto out; diff --git a/qapi/qmp-event.c b/qapi/qmp-event.c index 0fe0d0a5a6e..11cb6ace995 100644 --- a/qapi/qmp-event.c +++ b/qapi/qmp-event.c @@ -14,9 +14,9 @@ #include "qemu/osdep.h" #include "qapi/qmp-event.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" static void timestamp_put(QDict *qdict) { diff --git a/qapi/qmp-registry.c b/qapi/qmp-registry.c index 485bc5e6fc7..e2623f2b78c 100644 --- a/qapi/qmp-registry.c +++ b/qapi/qmp-registry.c @@ -13,11 +13,11 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/dispatch.h" +#include "qapi/qmp-registry.h" void qmp_register_command(QmpCommandList *cmds, const char *name, QmpCommandFunc *fn, QmpCommandOptions options, - unsigned special_features) + uint64_t features) { QmpCommand *cmd = g_malloc0(sizeof(*cmd)); @@ -28,7 +28,7 @@ void qmp_register_command(QmpCommandList *cmds, const char *name, cmd->fn = fn; cmd->enabled = true; cmd->options = options; - cmd->special_features = special_features; + cmd->features = features; QTAILQ_INSERT_TAIL(cmds, cmd, node); } diff --git a/qapi/qobject-input-visitor.c b/qapi/qobject-input-visitor.c index f110a804b2a..c52d36997df 100644 --- a/qapi/qobject-input-visitor.c +++ b/qapi/qobject-input-visitor.c @@ -19,14 +19,14 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/visitor-impl.h" #include "qemu/queue.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" #include "qemu/keyval.h" @@ -664,10 +664,10 @@ static void qobject_input_optional(Visitor *v, const char *name, bool *present) } static bool qobject_input_policy_reject(Visitor *v, const char *name, - unsigned special_features, + uint64_t features, Error **errp) { - return !compat_policy_input_ok(special_features, &v->compat_policy, + return !compat_policy_input_ok(features, &v->compat_policy, ERROR_CLASS_GENERIC_ERROR, "parameter", name, errp); } diff --git a/qapi/qobject-output-visitor.c b/qapi/qobject-output-visitor.c index 74770edd73c..de5b36bda55 100644 --- a/qapi/qobject-output-visitor.c +++ b/qapi/qobject-output-visitor.c @@ -17,12 +17,12 @@ #include "qapi/qobject-output-visitor.h" #include "qapi/visitor-impl.h" #include "qemu/queue.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" typedef struct QStackEntry { QObject *value; @@ -210,13 +210,13 @@ static bool qobject_output_type_null(Visitor *v, const char *name, } static bool qobject_output_policy_skip(Visitor *v, const char *name, - unsigned special_features) + uint64_t features) { CompatPolicy *pol = &v->compat_policy; - return ((special_features & 1u << QAPI_DEPRECATED) + return ((features & 1u << QAPI_DEPRECATED) && pol->deprecated_output == COMPAT_POLICY_OUTPUT_HIDE) - || ((special_features & 1u << QAPI_UNSTABLE) + || ((features & 1u << QAPI_UNSTABLE) && pol->unstable_output == COMPAT_POLICY_OUTPUT_HIDE); } diff --git a/qapi/qom.json b/qapi/qom.json index fc035f126ad..6f619f92bf9 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -10,7 +10,9 @@ { 'include': 'crypto.json' } ## -# = QEMU Object Model (QOM) +# *********************** +# QEMU Object Model (QOM) +# *********************** ## ## @@ -45,17 +47,43 @@ '*description': 'str', '*default-value': 'any' } } +## +# @ObjectPropertyValue: +# +# @name: the name of the property. +# +# @type: the type of the property, as described in `ObjectPropertyInfo`. +# +# @value: the value of the property. Absent when the property cannot +# be read. +# +# Since 10.1 +## +{ 'struct': 'ObjectPropertyValue', + 'data': { 'name': 'str', + 'type': 'str', + '*value': 'any' } } + +## +# @ObjectPropertiesValues: +# +# @properties: a list of properties. +# +# Since 10.1 +## +{ 'struct': 'ObjectPropertiesValues', + 'data': { 'properties': [ 'ObjectPropertyValue' ] }} + + ## # @qom-list: # -# This command will list any properties of a object given a path in -# the object model. +# List properties of a object given a path in the object model. # -# @path: the path within the object model. See @qom-get for a +# @path: the path within the object model. See `qom-get` for a # description of this parameter. # -# Returns: a list of @ObjectPropertyInfo that describe the properties -# of the object. +# Returns: a list that describe the properties of the object. # # Since: 1.2 # @@ -76,8 +104,7 @@ ## # @qom-get: # -# This command will get a property from a object model path and return -# the value. +# Get a property value. # # @path: The path within the object model. There are two forms of # supported paths--absolute and partial paths. @@ -125,17 +152,39 @@ 'returns': 'any', 'allow-preconfig': true } +## +# @qom-list-get: +# +# List properties and their values for each object path in the input +# list. +# +# @paths: The absolute or partial path for each object, as described +# in `qom-get`. +# +# Errors: +# - If any path is not valid or is ambiguous +# +# Returns: A list where each element is the result for the +# corresponding element of @paths. +# +# Since 10.1 +## +{ 'command': 'qom-list-get', + 'data': { 'paths': [ 'str' ] }, + 'returns': [ 'ObjectPropertiesValues' ], + 'allow-preconfig': true } + ## # @qom-set: # -# This command will set a property from a object model path. +# Set a property value. # -# @path: see @qom-get for a description of this parameter +# @path: see `qom-get` for a description of this parameter # # @property: the property name to set # # @value: a value who's type is appropriate for the property type. -# See @qom-get for a description of type mapping. +# See `qom-get` for a description of type mapping. # # Since: 1.2 # @@ -154,7 +203,7 @@ ## # @ObjectTypeInfo: # -# This structure describes a search result from @qom-list-types +# This structure describes a search result from `qom-list-types` # # @name: the type name found in the search # @@ -171,15 +220,14 @@ ## # @qom-list-types: # -# This command will return a list of types given search parameters +# Return a list of types given search parameters. # # @implements: if specified, only return types that implement this # type name # # @abstract: if true, include abstract types in the results # -# Returns: a list of @ObjectTypeInfo or an empty list if no results -# are found +# Returns: a list of types, or an empty list if no results are found # # Since: 1.1 ## @@ -195,11 +243,12 @@ # # @typename: the type name of an object # +# # .. note:: Objects can create properties at runtime, for example to # describe links between different devices and/or objects. These # properties are not included in the output of this command. # -# Returns: a list of ObjectPropertyInfo describing object properties +# Returns: a list describing object properties # # Since: 2.12 ## @@ -356,7 +405,7 @@ # filter list. "head" means the filter is inserted at the head of # the filter list, before any existing filters. "tail" means the # filter is inserted at the tail of the filter list, behind any -# existing filters (default). "id=" means the filter is +# existing filters (default). "id=" means the filter is # inserted before or behind the filter specified by , # depending on the @insert property. (default: "tail") # @@ -620,8 +669,8 @@ # .. note:: prealloc=true and reserve=false cannot be set at the same # time. With reserve=true, the behavior depends on the operating # system: for example, Linux will not reserve swap space for shared -# file mappings -- "not applicable". In contrast, reserve=false will -# bail out if it cannot be configured accordingly. +# file mappings -- "not applicable". In contrast, reserve=false +# will bail out if it cannot be configured accordingly. # # Since: 2.1 ## @@ -646,9 +695,9 @@ # @align: the base address alignment when QEMU mmap(2)s @mem-path. # Some backend stores specified by @mem-path require an alignment # different than the default one used by QEMU, e.g. the device DAX -# /dev/dax0.0 requires 2M alignment rather than 4K. In such cases, -# users can specify the required alignment via this option. 0 -# selects a default alignment (currently the page size). +# /dev/dax0.0 requires 2M alignment rather than 4K. In such +# cases, users can specify the required alignment via this option. +# 0 selects a default alignment (currently the page size). # (default: 0) # # @offset: the offset into the target file that the region starts at. @@ -709,7 +758,7 @@ # # @hugetlbsize: the hugetlb page size on systems that support multiple # hugetlb page sizes (it must be a power of 2 value supported by -# the system). 0 selects a default page size. This option is +# the system). 0 selects a default page size. This option is # ignored if @hugetlb is false. (default: 0) # # @seal: if true, create a sealed-file, which will block further @@ -789,7 +838,7 @@ # # Properties for x-remote-object objects. # -# @fd: file descriptor name previously passed via 'getfd' command +# @fd: file descriptor name previously passed via `getfd` command # # @devid: the id of the device to be associated with the file # descriptor @@ -818,7 +867,7 @@ # # Properties for iommufd objects. # -# @fd: file descriptor name previously passed via 'getfd' command, +# @fd: file descriptor name previously passed via `getfd` command, # which represents a pre-opened /dev/iommu. This allows the # iommufd object to be shared across several subsystems (VFIO, # VDPA, ...), and the file descriptor to be shared with other @@ -829,6 +878,21 @@ { 'struct': 'IOMMUFDProperties', 'data': { '*fd': 'str' } } +## +# @AcpiEgmMemoryProperties: +# +# Properties for acpi-egm-memory objects. +# +# @pci-dev: PCI device ID to be associated with the node +# +# @node: NUMA node associated with the PCI device +# +# Since: 9.0 +## +{ 'struct': 'AcpiEgmMemoryProperties', + 'data': { 'pci-dev': 'str', + 'node': 'uint32' } } + ## # @AcpiGenericInitiatorProperties: # @@ -844,6 +908,45 @@ 'data': { 'pci-dev': 'str', 'node': 'uint32' } } +## +# @AcpiGenericPortProperties: +# +# Properties for acpi-generic-port objects. +# +# @pci-bus: QOM path of the PCI bus of the hostbridge associated with +# this SRAT Generic Port Affinity Structure. This is the same as +# the bus parameter for the root ports attached to this host +# bridge. The resulting SRAT Generic Port Affinity Structure will +# refer to the ACPI object in DSDT that represents the host bridge +# (e.g. ACPI0016 for CXL host bridges). See ACPI 6.5 Section +# 5.2.16.7 for more information. +# +# @node: Similar to a NUMA node ID, but instead of providing a +# reference point used for defining NUMA distances and access +# characteristics to memory or from an initiator (e.g. CPU), this +# node defines the boundary point between non-discoverable system +# buses which must be described by firmware, and a discoverable +# bus. NUMA distances and access characteristics are defined to +# and from that point. For system software to establish full +# initiator to target characteristics this information must be +# combined with information retrieved from the discoverable part +# of the path. An example would use CDAT (see UEFI.org) +# information read from devices and switches in conjunction with +# link characteristics read from PCIe Configuration space. +# To get the full path latency from CPU to CXL attached DRAM +# CXL device: Add the latency from CPU to Generic Port (from +# HMAT indexed via the node ID in this SRAT structure) to +# that for CXL bus links, the latency across intermediate switches +# and from the EP port to the actual memory. Bandwidth is more +# complex as there may be interleaving across multiple devices +# and shared links in the path. +# +# Since: 9.2 +## +{ 'struct': 'AcpiGenericPortProperties', + 'data': { 'pci-bus': 'str', + 'node': 'uint32' } } + ## # @RngProperties: # @@ -893,6 +996,19 @@ 'data': { '*filename': 'str' }, 'if': 'CONFIG_POSIX' } +## +# @IgvmCfgProperties: +# +# Properties common to objects that handle IGVM files. +# +# @file: IGVM file to use to configure guest +# +# Since: 10.1 +## +{ 'struct': 'IgvmCfgProperties', + 'if': 'CONFIG_IGVM', + 'data': { 'file': 'str' } } + ## # @SevCommonProperties: # @@ -930,17 +1046,17 @@ # # @handle: SEV firmware handle (default: 0) # -# @legacy-vm-type: Use legacy KVM_SEV_INIT KVM interface for creating the VM. -# The newer KVM_SEV_INIT2 interface, from Linux >= 6.10, syncs -# additional vCPU state when initializing the VMSA structures, -# which will result in a different guest measurement. Set -# this to 'on' to force compatibility with older QEMU or kernel -# versions that rely on legacy KVM_SEV_INIT behavior. 'auto' -# will behave identically to 'on', but will automatically -# switch to using KVM_SEV_INIT2 if the user specifies any -# additional options that require it. If set to 'off', QEMU -# will require KVM_SEV_INIT2 unconditionally. -# (default: off) (since 9.1) +# @legacy-vm-type: Use legacy KVM_SEV_INIT KVM interface for creating +# the VM. The newer KVM_SEV_INIT2 interface, from Linux >= 6.10, +# syncs additional vCPU state when initializing the VMSA +# structures, which will result in a different guest measurement. +# Set this to 'on' to force compatibility with older QEMU or kernel +# versions that rely on legacy KVM_SEV_INIT behavior. 'auto' will +# behave identically to 'on', but will automatically switch to +# using KVM_SEV_INIT2 if the user specifies any additional options +# that require it. If set to 'off', QEMU will require +# KVM_SEV_INIT2 unconditionally. +# (default: off) (since 9.1) # # Since: 2.12 ## @@ -992,7 +1108,7 @@ # @vcek-disabled: Guests are by default allowed to choose between VLEK # (Versioned Loaded Endorsement Key) or VCEK (Versioned Chip # Endorsement Key) when requesting attestation reports from -# firmware. Set this to true to disable the use of VCEK. +# firmware. Set this to true to disable the use of VCEK. # (default: false) (since: 9.1) # # Since: 9.1 @@ -1008,6 +1124,45 @@ '*host-data': 'str', '*vcek-disabled': 'bool' } } +## +# @TdxGuestProperties: +# +# Properties for tdx-guest objects. +# +# @attributes: The 'attributes' of a TD guest that is passed to +# KVM_TDX_INIT_VM +# +# @sept-ve-disable: toggle bit 28 of TD attributes to control disabling +# of EPT violation conversion to #VE on guest TD access of PENDING +# pages. Some guest OS (e.g., Linux TD guest) may require this to +# be set, otherwise they refuse to boot. +# +# @mrconfigid: ID for non-owner-defined configuration of the guest TD, +# e.g., run-time or OS configuration (base64 encoded SHA384 digest). +# Defaults to all zeros. +# +# @mrowner: ID for the guest TD’s owner (base64 encoded SHA384 digest). +# Defaults to all zeros. +# +# @mrownerconfig: ID for owner-defined configuration of the guest TD, +# e.g., specific to the workload rather than the run-time or OS +# (base64 encoded SHA384 digest). Defaults to all zeros. +# +# @quote-generation-socket: socket address for Quote Generation +# Service (QGS). QGS is a daemon running on the host. Without +# it, the guest will not be able to get a TD quote for +# attestation. +# +# Since: 10.1 +## +{ 'struct': 'TdxGuestProperties', + 'data': { '*attributes': 'uint64', + '*sept-ve-disable': 'bool', + '*mrconfigid': 'str', + '*mrowner': 'str', + '*mrownerconfig': 'str', + '*quote-generation-socket': 'SocketAddress' } } + ## # @ThreadContextProperties: # @@ -1042,7 +1197,9 @@ ## { 'enum': 'ObjectType', 'data': [ + 'acpi-egm-memory', 'acpi-generic-initiator', + 'acpi-generic-port', 'authz-list', 'authz-listfile', 'authz-pam', @@ -1063,6 +1220,8 @@ 'filter-redirector', 'filter-replay', 'filter-rewriter', + { 'name': 'igvm-cfg', + 'if': 'CONFIG_IGVM' }, 'input-barrier', { 'name': 'input-linux', 'if': 'CONFIG_LINUX' }, @@ -1092,6 +1251,7 @@ 'sev-snp-guest', 'thread-context', 's390-pv-guest', + 'tdx-guest', 'throttle-group', 'tls-creds-anon', 'tls-creds-psk', @@ -1117,7 +1277,9 @@ 'id': 'str' }, 'discriminator': 'qom-type', 'data': { + 'acpi-egm-memory': 'AcpiEgmMemoryProperties', 'acpi-generic-initiator': 'AcpiGenericInitiatorProperties', + 'acpi-generic-port': 'AcpiGenericPortProperties', 'authz-list': 'AuthZListProperties', 'authz-listfile': 'AuthZListFileProperties', 'authz-pam': 'AuthZPAMProperties', @@ -1137,6 +1299,8 @@ 'filter-redirector': 'FilterRedirectorProperties', 'filter-replay': 'NetfilterProperties', 'filter-rewriter': 'FilterRewriterProperties', + 'igvm-cfg': { 'type': 'IgvmCfgProperties', + 'if': 'CONFIG_IGVM' }, 'input-barrier': 'InputBarrierProperties', 'input-linux': { 'type': 'InputLinuxProperties', 'if': 'CONFIG_LINUX' }, @@ -1163,6 +1327,7 @@ 'if': 'CONFIG_SECRET_KEYRING' }, 'sev-guest': 'SevGuestProperties', 'sev-snp-guest': 'SevSnpGuestProperties', + 'tdx-guest': 'TdxGuestProperties', 'thread-context': 'ThreadContextProperties', 'throttle-group': 'ThrottleGroupProperties', 'tls-creds-anon': 'TlsCredsAnonProperties', @@ -1179,7 +1344,7 @@ # Create a QOM object. # # Errors: -# - Error if @qom-type is not a valid class name +# - If @qom-type is not a valid class name # # Since: 2.0 # @@ -1201,7 +1366,7 @@ # @id: the name of the QOM object to remove # # Errors: -# - Error if @id is not a valid id for a QOM object +# - If @id is not a valid id for a QOM object # # Since: 2.0 # diff --git a/qapi/replay.json b/qapi/replay.json index 35e0c4a6926..ccf84da68ef 100644 --- a/qapi/replay.json +++ b/qapi/replay.json @@ -3,7 +3,9 @@ # ## -# = Record/replay +# ************* +# Record/replay +# ************* ## { 'include': 'common.json' } @@ -47,8 +49,8 @@ # @query-replay: # # Retrieve the record/replay information. It includes current -# instruction count which may be used for @replay-break and -# @replay-seek commands. +# instruction count which may be used for `replay-break` and +# `replay-seek` commands. # # Returns: record/replay information. # @@ -70,7 +72,7 @@ # breakpoint. When breakpoint is set, any prior one is removed. The # breakpoint may be set only in replay mode and only "in the future", # i.e. at instruction counts greater than the current one. The -# current instruction count can be observed with @query-replay. +# current instruction count can be observed with `query-replay`. # # @icount: instruction count to stop at # @@ -86,7 +88,7 @@ ## # @replay-delete-break: # -# Remove replay breakpoint which was set with @replay-break. The +# Remove replay breakpoint which was set with `replay-break`. The # command is ignored when there are no replay breakpoints. # # Since: 5.2 @@ -106,7 +108,7 @@ # snapshot and replays the execution to find the desired instruction. # When there is no preceding snapshot or the execution is not # replayed, then the command fails. Instruction count can be obtained -# with the @query-replay command. +# with the `query-replay` command. # # @icount: target instruction count # diff --git a/qapi/rocker.json b/qapi/rocker.json index 2e63dcb3d6f..5d2dbd26034 100644 --- a/qapi/rocker.json +++ b/qapi/rocker.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = Rocker switch device +# ******************** +# Rocker switch device +# ******************** ## ## @@ -26,7 +28,7 @@ # # Return rocker switch information. # -# Returns: @Rocker information +# @name: switch name # # Since: 2.4 # @@ -42,7 +44,7 @@ ## # @RockerPortDuplex: # -# An eumeration of port duplex states. +# An enumeration of port duplex states. # # @half: half duplex # @@ -55,7 +57,7 @@ ## # @RockerPortAutoneg: # -# An eumeration of port autoneg states. +# An enumeration of port autoneg states. # # @off: autoneg is off # @@ -94,7 +96,7 @@ # # Return rocker switch port information. # -# Returns: a list of @RockerPort information +# @name: port name # # Since: 2.4 # @@ -250,7 +252,7 @@ # "action": {"goto-tbl": 10}, # "mask": {"in-pport": 4294901760} # }, -# {...}, +# ... # ]} ## { 'command': 'query-rocker-of-dpa-flows', @@ -288,8 +290,8 @@ # # @ttl-check: perform TTL check # -# .. note:: Optional members may or may not appear in the group depending -# if they're relevant to the group type. +# .. note:: Optional members may or may not appear in the group +# depending if they're relevant to the group type. # # Since: 2.4 ## diff --git a/qapi/run-state.json b/qapi/run-state.json index 287691ca0e7..4757947ca6b 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -3,7 +3,9 @@ # ## -# = VM run state +# ************ +# VM run state +# ************ ## ## @@ -18,7 +20,7 @@ # @inmigrate: guest is paused waiting for an incoming migration. Note # that this state does not tell whether the machine will start at # the end of the migration. This depends on the command-line -S -# option and any invocation of 'stop' or 'cont' that has happened +# option and any invocation of `stop` or `cont` that has happened # since QEMU was started. # # @internal-error: An internal error that prevents further guest @@ -62,15 +64,15 @@ ## # @ShutdownCause: # -# An enumeration of reasons for a Shutdown. +# An enumeration of reasons for a shutdown. # # @none: No shutdown request pending # # @host-error: An error prevents further use of guest # -# @host-qmp-quit: Reaction to the QMP command 'quit' +# @host-qmp-quit: Reaction to the QMP command `quit` # -# @host-qmp-system-reset: Reaction to the QMP command 'system_reset' +# @host-qmp-system-reset: Reaction to the QMP command `system_reset` # # @host-signal: Reaction to a signal, such as SIGINT # @@ -106,7 +108,7 @@ # # @running: true if all VCPUs are runnable, false if not runnable # -# @status: the virtual machine @RunState +# @status: the virtual machine `RunState` # # Since: 0.14 ## @@ -119,8 +121,6 @@ # # Query the run status of the VM # -# Returns: @StatusInfo reflecting the VM -# # Since: 0.14 # # .. qmp-example:: @@ -135,20 +135,20 @@ ## # @SHUTDOWN: # -# Emitted when the virtual machine has shut down, indicating that qemu +# Emitted when the virtual machine has shut down, indicating that QEMU # is about to exit. # # @guest: If true, the shutdown was triggered by a guest request (such # as a guest-initiated ACPI shutdown request or other # hardware-specific action) rather than a host request (such as -# sending qemu a SIGINT). (since 2.10) +# sending QEMU a SIGINT). (since 2.10) # -# @reason: The @ShutdownCause which resulted in the SHUTDOWN. +# @reason: The `ShutdownCause` which resulted in the `SHUTDOWN`. # (since 4.0) # # .. note:: If the command-line option ``-no-shutdown`` has been -# specified, qemu will not exit, and a STOP event will eventually -# follow the SHUTDOWN event. +# specified, QEMU will not exit, and a `STOP` event will eventually +# follow the `SHUTDOWN` event. # # Since: 0.12 # @@ -183,9 +183,9 @@ # @guest: If true, the reset was triggered by a guest request (such as # a guest-initiated ACPI reboot request or other hardware-specific # action) rather than a host request (such as the QMP command -# system_reset). (since 2.10) +# `system_reset`). (since 2.10) # -# @reason: The @ShutdownCause of the RESET. (since 4.0) +# @reason: The `ShutdownCause` of the `RESET`. (since 4.0) # # Since: 0.12 # @@ -247,7 +247,7 @@ # saved on disk, for example, S4 state, which is sometimes called # hibernate state # -# .. note:: QEMU shuts down (similar to event @SHUTDOWN) when entering +# .. note:: QEMU shuts down (similar to event `SHUTDOWN`) when entering # this state. # # Since: 1.2 @@ -281,8 +281,8 @@ # # @action: action that has been taken # -# .. note:: If action is "reset", "shutdown", or "pause" the WATCHDOG -# event is followed respectively by the RESET, SHUTDOWN, or STOP +# .. note:: If action is "reset", "shutdown", or "pause" the `WATCHDOG` +# event is followed respectively by the `RESET`, `SHUTDOWN`, or `STOP` # events. # # .. note:: This event is rate-limited. @@ -365,8 +365,8 @@ # @shutdown: Shutdown the VM and exit, according to the shutdown # action # -# @exit-failure: Shutdown the VM and exit with nonzero status (since -# 7.1) +# @exit-failure: Shutdown the VM and exit with nonzero status +# (since 7.1) # # Since: 6.0 ## @@ -378,7 +378,7 @@ # # Set watchdog action. # -# @action: @WatchdogAction action taken when watchdog timer expires. +# @action: `WatchdogAction` action taken when watchdog timer expires. # # Since: 2.11 # @@ -396,13 +396,13 @@ # Set the actions that will be taken by the emulator in response to # guest events. # -# @reboot: @RebootAction action taken on guest reboot. +# @reboot: `RebootAction` action taken on guest reboot. # -# @shutdown: @ShutdownAction action taken on guest shutdown. +# @shutdown: `ShutdownAction` action taken on guest shutdown. # -# @panic: @PanicAction action taken on guest panic. +# @panic: `PanicAction` action taken on guest panic. # -# @watchdog: @WatchdogAction action taken when watchdog timer expires. +# @watchdog: `WatchdogAction` action taken when watchdog timer expires. # # Since: 6.0 # @@ -501,10 +501,12 @@ # # @s390: s390 guest panic information type (Since: 2.12) # +# @tdx: tdx guest panic information type (Since: 10.1) +# # Since: 2.9 ## { 'enum': 'GuestPanicInformationType', - 'data': [ 'hyper-v', 's390' ] } + 'data': [ 'hyper-v', 's390', 'tdx' ] } ## # @GuestPanicInformation: @@ -519,28 +521,29 @@ 'base': {'type': 'GuestPanicInformationType'}, 'discriminator': 'type', 'data': {'hyper-v': 'GuestPanicInformationHyperV', - 's390': 'GuestPanicInformationS390'}} + 's390': 'GuestPanicInformationS390', + 'tdx' : 'GuestPanicInformationTdx'}} ## # @GuestPanicInformationHyperV: # # Hyper-V specific guest panic information (HV crash MSRs) # -# @arg1: for Windows, STOP code for the guest crash. For Linux, -# an error code. +# @arg1: for Windows, `STOP` code for the guest crash. For Linux, +# an error code. # -# @arg2: for Windows, first argument of the STOP. For Linux, the -# guest OS ID, which has the kernel version in bits 16-47 -# and 0x8100 in bits 48-63. +# @arg2: for Windows, first argument of the `STOP`. For Linux, the +# guest OS ID, which has the kernel version in bits 16-47 and +# 0x8100 in bits 48-63. # -# @arg3: for Windows, second argument of the STOP. For Linux, the -# program counter of the guest. +# @arg3: for Windows, second argument of the `STOP`. For Linux, the +# program counter of the guest. # -# @arg4: for Windows, third argument of the STOP. For Linux, the -# RAX register (x86) or the stack pointer (aarch64) of the guest. +# @arg4: for Windows, third argument of the `STOP`. For Linux, the +# RAX register (x86) or the stack pointer (aarch64) of the guest. # -# @arg5: for Windows, fourth argument of the STOP. For x86 Linux, the -# stack pointer of the guest. +# @arg5: for Windows, fourth argument of the `STOP`. For x86 Linux, the +# stack pointer of the guest. # # Since: 2.9 ## @@ -598,16 +601,40 @@ 'psw-addr': 'uint64', 'reason': 'S390CrashReason'}} +## +# @GuestPanicInformationTdx: +# +# TDX Guest panic information specific to TDX, as specified in the +# "Guest-Hypervisor Communication Interface (GHCI) Specification", +# section TDG.VP.VMCALL. +# +# @error-code: TD-specific error code +# +# @message: Human-readable error message provided by the guest. Not +# to be trusted. +# +# @gpa: guest-physical address of a page that contains more verbose +# error information, as zero-terminated string. Present when the +# "GPA valid" bit (bit 63) is set in @error-code. +# +# +# Since: 10.1 +## +{'struct': 'GuestPanicInformationTdx', + 'data': {'error-code': 'uint32', + 'message': 'str', + '*gpa': 'uint64'}} + ## # @MEMORY_FAILURE: # # Emitted when a memory failure occurs on host side. # -# @recipient: recipient is defined as @MemoryFailureRecipient. +# @recipient: recipient is defined as `MemoryFailureRecipient`. # # @action: action that has been taken. # -# @flags: flags for MemoryFailureAction. +# @flags: flags for `MemoryFailureAction`. # # Since: 5.2 # diff --git a/qapi/sockets.json b/qapi/sockets.json index e76fdb99253..32fac517288 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -2,7 +2,9 @@ # vim: filetype=python ## -# = Socket data types +# ***************** +# Socket data types +# ***************** ## ## @@ -29,6 +31,7 @@ # @InetSocketAddressBase: # # @host: host part of the address +# # @port: port part of the address ## { 'struct': 'InetSocketAddressBase', @@ -55,8 +58,24 @@ # @ipv6: whether to accept IPv6 addresses, default try both IPv4 and # IPv6 # -# @keep-alive: enable keep-alive when connecting to this socket. Not -# supported for passive sockets. (Since 4.2) +# @keep-alive: enable keep-alive when connecting to/listening on this socket. +# (Since 4.2, not supported for listening sockets until 10.1) +# +# @keep-alive-count: number of keep-alive packets sent before the connection is +# closed. Only supported for TCP sockets on systems where TCP_KEEPCNT +# socket option is defined (this includes Linux, Windows, macOS, FreeBSD, +# but not OpenBSD). When set to 0, system setting is used. (Since 10.1) +# +# @keep-alive-idle: time in seconds the connection needs to be idle before +# sending a keepalive packet. Only supported for TCP sockets on systems +# where TCP_KEEPIDLE socket option is defined (this includes Linux, +# Windows, macOS, FreeBSD, but not OpenBSD). When set to 0, system setting +# is used. (Since 10.1) +# +# @keep-alive-interval: time in seconds between keep-alive packets. Only +# supported for TCP sockets on systems where TCP_KEEPINTVL is defined (this +# includes Linux, Windows, macOS, FreeBSD, but not OpenBSD). When set to +# 0, system setting is used. (Since 10.1) # # @mptcp: enable multi-path TCP. (Since 6.1) # @@ -70,6 +89,9 @@ '*ipv4': 'bool', '*ipv6': 'bool', '*keep-alive': 'bool', + '*keep-alive-count': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPCNT' }, + '*keep-alive-idle': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPIDLE' }, + '*keep-alive-interval': { 'type': 'uint32', 'if': 'HAVE_TCP_KEEPINTVL' }, '*mptcp': { 'type': 'bool', 'if': 'HAVE_IPPROTO_MPTCP' } } } ## @@ -104,8 +126,8 @@ # # @port: port # -# .. note:: String types are used to allow for possible future hostname -# or service resolution support. +# .. note:: String types are used to allow for possible future +# hostname or service resolution support. # # Since: 2.8 ## @@ -121,7 +143,7 @@ # # @str: decimal is for file descriptor number, otherwise it's a file # descriptor name. Named file descriptors are permitted in -# monitor commands, in combination with the 'getfd' command. +# monitor commands, in combination with the `getfd` command. # Decimal file descriptors are permitted at startup or other # contexts where no monitor context is active. # @@ -189,14 +211,14 @@ 'unix': 'UnixSocketAddressWrapper', 'vsock': 'VsockSocketAddressWrapper', 'fd': 'FdSocketAddressWrapper' } } -# Note: This type is deprecated in favor of SocketAddress. The -# difference between SocketAddressLegacy and SocketAddress is that the +# Note: This type is deprecated in favor of `SocketAddress`. The +# difference between `SocketAddressLegacy` and `SocketAddress` is that the # latter has fewer ``{}`` on the wire. ## # @SocketAddressType: # -# Available SocketAddress types +# Available `SocketAddress` types # # @inet: Internet address # diff --git a/qapi/stats.json b/qapi/stats.json index efbbe26244a..151ac43c487 100644 --- a/qapi/stats.json +++ b/qapi/stats.json @@ -9,7 +9,9 @@ # SPDX-License-Identifier: GPL-2.0-or-later ## -# = Statistics +# ********** +# Statistics +# ********** ## ## @@ -87,7 +89,7 @@ # @StatsRequest: # # Indicates a set of statistics that should be returned by -# query-stats. +# `query-stats`. # # @provider: provider for which to return statistics. # @@ -112,15 +114,15 @@ ## # @StatsFilter: # -# The arguments to the query-stats command; specifies a target for +# The arguments to the `query-stats` command; specifies a target for # which to request statistics and optionally the required subset of # information for that target. # # @target: the kind of objects to query. Note that each possible -# target may enable additional filtering options +# target may enable additional filtering options # -# @providers: which providers to request statistics from, and optionally -# which named values to return within each provider +# @providers: which providers to request statistics from, and +# optionally which named values to return within each provider # # Since: 7.1 ## @@ -183,10 +185,10 @@ # Return runtime-collected statistics for objects such as the VM or # its vCPUs. # -# The arguments are a StatsFilter and specify the provider and objects +# The arguments are a `StatsFilter` and specify the provider and objects # to return statistics about. # -# Returns: a list of StatsResult, one for each provider and object +# Returns: a list of statistics, one for each provider and object # (e.g., for each vCPU). # # Since: 7.1 @@ -203,7 +205,7 @@ # # @name: name of the statistic; each element of the schema is uniquely # identified by a target, a provider (both available in -# @StatsSchema) and the name. +# `StatsSchema`) and the name. # # @type: kind of statistic. # diff --git a/qapi/string-input-visitor.c b/qapi/string-input-visitor.c index 3f1b9e9b413..f4eecc73d06 100644 --- a/qapi/string-input-visitor.c +++ b/qapi/string-input-visitor.c @@ -15,7 +15,7 @@ #include "qapi/string-input-visitor.h" #include "qapi/visitor-impl.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qemu/option.h" #include "qemu/cutils.h" diff --git a/qapi/tpm.json b/qapi/tpm.json index a16a72edb98..3f2850a5733 100644 --- a/qapi/tpm.json +++ b/qapi/tpm.json @@ -3,7 +3,9 @@ # ## -# = TPM (trusted platform module) devices +# ************************************* +# TPM (trusted platform module) devices +# ************************************* ## ## @@ -27,8 +29,6 @@ # # Return a list of supported TPM models # -# Returns: a list of TpmModel -# # Since: 1.5 # # .. qmp-example:: @@ -58,8 +58,6 @@ # # Return a list of supported TPM types # -# Returns: a list of TpmType -# # Since: 1.5 # # .. qmp-example:: diff --git a/qapi/trace.json b/qapi/trace.json index eb5f63f5135..de369dae6b5 100644 --- a/qapi/trace.json +++ b/qapi/trace.json @@ -7,7 +7,9 @@ # See the COPYING file in the top-level directory. ## -# = Tracing +# ******* +# Tracing +# ******* ## ## @@ -47,7 +49,7 @@ # # @name: Event name pattern (case-sensitive glob). # -# Returns: a list of @TraceEventInfo for the matching events +# Returns: a list of info for the matching events # # Since: 2.2 # diff --git a/qapi/transaction.json b/qapi/transaction.json index b0ae3437eba..4b4eb09bf38 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -3,7 +3,9 @@ # ## -# = Transactions +# ************ +# Transactions +# ************ ## { 'include': 'block-core.json' } @@ -21,7 +23,7 @@ ## # @ActionCompletionMode: # -# An enumeration of Transactional completion modes. +# An enumeration of transactional completion modes. # # @individual: Do not attempt to cancel any other Actions if any # Actions fail after the Transaction request succeeds. All @@ -67,8 +69,8 @@ # # Features: # -# @deprecated: Member @drive-backup is deprecated. Use member -# @blockdev-backup instead. +# @deprecated: Member `drive-backup` is deprecated. Use member +# `blockdev-backup` instead. # # Since: 1.1 ## @@ -156,7 +158,7 @@ # @TransactionAction: # # A discriminated record of operations that can be performed with -# @transaction. +# `transaction`. # # @type: the operation to be performed # @@ -187,7 +189,7 @@ # # @completion-mode: Controls how jobs launched asynchronously by # Actions will complete or fail as a group. See -# @ActionCompletionMode for details. +# `ActionCompletionMode` for details. # # Since: 2.5 ## @@ -223,23 +225,23 @@ # exists, the request will be rejected. Only some image formats # support it, for example, qcow2, and rbd, # -# On failure, qemu will try delete the newly created internal snapshot +# On failure, QEMU will try delete the newly created internal snapshot # in the transaction. When an I/O error occurs during deletion, the # user needs to fix it later with qemu-img or other command. # -# @actions: List of @TransactionAction; information needed for the +# @actions: List of `TransactionAction`; information needed for the # respective operations. # # @properties: structure of additional options to control the -# execution of the transaction. See @TransactionProperties for +# execution of the transaction. See `TransactionProperties` for # additional detail. # # Errors: # - Any errors from commands in the transaction # # .. note:: The transaction aborts on the first failure. Therefore, -# there will be information on only one failed operation returned in -# an error condition, and subsequent actions will not have been +# there will be information on only one failed operation returned +# in an error condition, and subsequent actions will not have been # attempted. # # Since: 1.1 diff --git a/qapi/uefi.json b/qapi/uefi.json new file mode 100644 index 00000000000..a206c2e9539 --- /dev/null +++ b/qapi/uefi.json @@ -0,0 +1,66 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# + +## +# ******************* +# UEFI Variable Store +# ******************* +# +# The QEMU efi variable store implementation (hw/uefi/) uses this to +# store non-volatile variables in json format on disk. +# +# This is an existing format already supported by (at least) two other +# projects, specifically https://gitlab.com/kraxel/virt-firmware and +# https://github.com/awslabs/python-uefivars. +## + +## +# @UefiVariable: +# +# UEFI Variable. Check the UEFI specifification for more detailed +# information on the fields. +# +# @guid: variable namespace GUID +# +# @name: variable name, in UTF-8 encoding. +# +# @attr: variable attributes. +# +# @data: variable value, encoded as hex string. +# +# @time: variable modification time. EFI_TIME struct, encoded as hex +# string. Used only for authenticated variables, where the +# EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS attribute bit +# is set. +# +# @digest: variable certificate digest. Used to verify the signature +# of updates for authenticated variables. UEFI has two kinds of +# authenticated variables. The secure boot variables ('PK', +# 'KEK', 'db' and 'dbx') have hard coded signature checking rules. +# For other authenticated variables the firmware stores a digest +# of the signing certificate at variable creation time, and any +# updates must be signed with the same certificate. +# +# Since: 10.0 +## +{ 'struct' : 'UefiVariable', + 'data' : { 'guid' : 'str', + 'name' : 'str', + 'attr' : 'int', + 'data' : 'str', + '*time' : 'str', + '*digest' : 'str'}} + +## +# @UefiVarStore: +# +# @version: currently always 2 +# +# @variables: list of UEFI variables +# +# Since: 10.0 +## +{ 'struct' : 'UefiVarStore', + 'data' : { 'version' : 'int', + 'variables' : [ 'UefiVariable' ] }} diff --git a/qapi/ui.json b/qapi/ui.json index 5daca5168ca..1b2f4a47693 100644 --- a/qapi/ui.json +++ b/qapi/ui.json @@ -3,7 +3,9 @@ # ## -# = Remote desktop +# ************** +# Remote desktop +# ************** ## { 'include': 'common.json' } @@ -39,7 +41,7 @@ ## # @SetPasswordOptions: # -# Options for set_password. +# Options for `set_password`. # # @protocol: # - 'vnc' to modify the VNC server password @@ -48,8 +50,8 @@ # @password: the new password # # @connected: How to handle existing clients when changing the -# password. If nothing is specified, defaults to 'keep'. For VNC, -# only 'keep' is currently implemented. +# password. If nothing is specified, defaults to 'keep'. For +# VNC, only 'keep' is currently implemented. # # Since: 7.0 ## @@ -63,7 +65,7 @@ ## # @SetPasswordOptionsVnc: # -# Options for set_password specific to the VNC protocol. +# Options for `set_password` specific to the VNC protocol. # # @display: The id of the display where the password should be # changed. Defaults to the first. @@ -94,7 +96,7 @@ ## # @ExpirePasswordOptions: # -# General options for expire_password. +# General options for `expire_password`. # # @protocol: # - 'vnc' to modify the VNC server expiration @@ -107,10 +109,11 @@ # - '+INT' where INT is the number of seconds from now (integer) # - 'INT' where INT is the absolute time in seconds # -# .. note:: Time is relative to the server and currently there is no way -# to coordinate server time with client time. It is not recommended -# to use the absolute time version of the @time parameter unless -# you're sure you are on the same machine as the QEMU instance. +# .. note:: Time is relative to the server and currently there is no +# way to coordinate server time with client time. It is not +# recommended to use the absolute time version of the @time +# parameter unless you're sure you are on the same machine as the +# QEMU instance. # # Since: 7.0 ## @@ -123,7 +126,7 @@ ## # @ExpirePasswordOptionsVnc: # -# Options for expire_password specific to the VNC protocol. +# Options for `expire_password` specific to the VNC protocol. # # @display: The id of the display where the expiration should be # changed. Defaults to the first. @@ -174,15 +177,15 @@ # @filename: the path of a new file to store the image # # @device: ID of the display device that should be dumped. If this -# parameter is missing, the primary display will be used. (Since -# 2.12) +# parameter is missing, the primary display will be used. +# (Since 2.12) # # @head: head to use in case the device supports multiple heads. If # this parameter is missing, head #0 will be used. Also note that # the head can only be specified in conjunction with the device # ID. (Since 2.12) # -# @format: image format for screendump. (default: ppm) (Since 7.1) +# @format: image format for `screendump`. (default: ppm) (Since 7.1) # # Since: 0.14 # @@ -199,7 +202,8 @@ 'if': 'CONFIG_PIXMAN' } ## -# == Spice +# Spice +# ===== ## ## @@ -309,7 +313,7 @@ # unknown if spice server doesn't provide this information. # (since: 1.1) # -# @channels: a list of @SpiceChannel for each active spice channel +# @channels: a list of `SpiceChannel` for each active spice channel # # Since: 0.14 ## @@ -322,9 +326,7 @@ ## # @query-spice: # -# Returns information about the current SPICE server -# -# Returns: @SpiceInfo +# Return information about the current SPICE server # # Since: 0.14 # @@ -460,7 +462,8 @@ 'if': 'CONFIG_SPICE' } ## -# == VNC +# VNC +# === ## ## @@ -560,7 +563,7 @@ # - 'vencrypt+x509+sasl' if VEncrypt is used with x509 and SASL # auth # -# @clients: a list of @VncClientInfo of all currently connected +# @clients: a list of `VncClientInfo` of all currently connected # clients # # Since: 0.14 @@ -623,12 +626,12 @@ # # @id: vnc server name. # -# @server: A list of @VncBasincInfo describing all listening sockets. -# The list can be empty (in case the vnc server is disabled). It +# @server: A list of `VncBasicInfo` describing all listening sockets. +# The list can be empty (in case the vnc server is disabled). It # also may have multiple entries: normal + websocket, possibly # also ipv4 + ipv6 in the future. # -# @clients: A list of @VncClientInfo of all currently connected +# @clients: A list of `VncClientInfo` of all currently connected # clients. The list can be empty, for obvious reasons. # # @auth: The current authentication type used by the non-websockets @@ -653,9 +656,7 @@ ## # @query-vnc: # -# Returns information about the current VNC server -# -# Returns: @VncInfo +# Return information about the current VNC server # # Since: 0.14 # @@ -684,9 +685,7 @@ ## # @query-vnc-servers: # -# Returns a list of vnc servers. The list can be empty. -# -# Returns: a list of @VncInfo2 +# Return a list of vnc servers. The list can be empty. # # Since: 2.3 ## @@ -719,8 +718,8 @@ # # @client: client information # -# .. note:: This event is emitted before any authentication takes place, -# thus the authentication ID is not provided. +# .. note:: This event is emitted before any authentication takes +# place, thus the authentication ID is not provided. # # Since: 0.13 # @@ -793,7 +792,9 @@ 'if': 'CONFIG_VNC' } ## -# = Input +# ***** +# Input +# ***** ## ## @@ -819,9 +820,9 @@ ## # @query-mice: # -# Returns information about each active mouse device +# Return information about each active mouse device # -# Returns: a list of @MouseInfo for each device +# Returns: a list of info for each device # # Since: 0.14 # @@ -851,7 +852,7 @@ # # An enumeration of key name. # -# This is used by the @send-key command. +# This is used by the `send-key` command. # # @unmapped: since 2.0 # @@ -948,6 +949,7 @@ # Since: 1.3 ## { 'enum': 'QKeyCode', + 'prefix': 'Q_KEY_CODE', 'data': [ 'unmapped', 'shift', 'shift_r', 'alt', 'alt_r', 'ctrl', 'ctrl_r', 'menu', 'esc', '1', '2', '3', '4', '5', '6', '7', '8', @@ -1021,10 +1023,10 @@ # # Send keys to guest. # -# @keys: An array of @KeyValue elements. All @KeyValues in this array -# are simultaneously sent to the guest. A @KeyValue.number value -# is sent directly to the guest, while @KeyValue.qcode must be a -# valid @QKeyCode value +# @keys: An array of `KeyValue` elements. All @KeyValues in this array +# are simultaneously sent to the guest. A `KeyValue`.number value +# is sent directly to the guest, while `KeyValue`.qcode must be a +# valid `QKeyCode` value # # @hold-time: time to delay key up events, milliseconds. Defaults to # 100 @@ -1131,7 +1133,7 @@ # @axis: Which axis is referenced by @value. # # @value: Pointer position. For absolute coordinates the valid range -# is 0 -> 0x7ffff +# is 0 to 0x7fff. # # Since: 2.0 ## @@ -1261,12 +1263,12 @@ # @head: head to send event(s) to, in case the display device supports # multiple scanouts. # -# @events: List of InputEvent union. +# @events: List of `InputEvent` union. # # Since: 2.6 # # .. note:: The consoles are visible in the qom tree, under -# ``/backend/console[$index]``. They have a device link and head +# ``/backend/console[$index]``. They have a device link and head # property, so it is possible to map which console belongs to which # device and display. # @@ -1333,13 +1335,20 @@ # @show-menubar: Display the main window menubar. Defaults to "on". # (Since 8.0) # +# @keep-aspect-ratio: Keep width/height aspect ratio of guest content when +# resizing host window. Defaults to "on". (Since 10.1) +# +# @scale: Set preferred scale of the display. Defaults to 1.0. (Since 10.1) +# # Since: 2.12 ## { 'struct' : 'DisplayGTK', - 'data' : { '*grab-on-hover' : 'bool', - '*zoom-to-fit' : 'bool', - '*show-tabs' : 'bool', - '*show-menubar' : 'bool' } } + 'data' : { '*grab-on-hover' : 'bool', + '*zoom-to-fit' : 'bool', + '*show-tabs' : 'bool', + '*show-menubar' : 'bool', + '*keep-aspect-ratio' : 'bool', + '*scale' : 'number' } } ## # @DisplayEGLHeadless: @@ -1365,7 +1374,7 @@ # first available node on the host. # # @p2p: Whether to use peer-to-peer connections (accepted through -# @add_client). +# `add_client`). # # @audiodev: Use the specified DBus audiodev to export audio. # @@ -1416,11 +1425,11 @@ # # @left-command-key: Enable/disable forwarding of left command key to # guest. Allows command-tab window switching on the host without -# sending this key to the guest when "off". Defaults to "on" +# sending this key to the guest when "off". Defaults to "on" # # @full-grab: Capture all key presses, including system combos. This # requires accessibility permissions, since it performs a global -# grab on key events. (default: off) See +# grab on key events. (default: off) See # https://support.apple.com/en-in/guide/mac-help/mh32356/mac # # @swap-opt-cmd: Swap the Option and Command keys so that their key @@ -1432,7 +1441,7 @@ # "off". (Since 8.2) # # @zoom-interpolation: Apply interpolation to smooth output when -# zoom-to-fit is enabled. Defaults to "off". (Since 9.0) +# zoom-to-fit is enabled. Defaults to "off". (Since 9.0) # # Since: 7.0 ## @@ -1524,12 +1533,12 @@ # # Display (user interface) options. # -# @type: Which DisplayType qemu should use. +# @type: Which `DisplayType` QEMU should use. # # @full-screen: Start user interface in fullscreen mode # (default: off). # -# @window-close: Allow to quit qemu with window close button +# @window-close: Allow to quit QEMU with window close button # (default: on). # # @show-cursor: Force showing the mouse cursor (default: off). @@ -1560,9 +1569,7 @@ ## # @query-display-options: # -# Returns information about display configuration -# -# Returns: @DisplayOptions +# Return information about display configuration # # Since: 3.1 ## diff --git a/qapi/vfio.json b/qapi/vfio.json index 40cbcde02eb..a1a9c5b673d 100644 --- a/qapi/vfio.json +++ b/qapi/vfio.json @@ -3,11 +3,13 @@ # ## -# = VFIO devices +# ************ +# VFIO devices +# ************ ## ## -# @VfioMigrationState: +# @QapiVfioMigrationState: # # An enumeration of the VFIO device migration states. # @@ -15,16 +17,16 @@ # # @running: The device is running. # -# @stop-copy: The device is stopped and its internal state is available -# for reading. +# @stop-copy: The device is stopped and its internal state is +# available for reading. # # @resuming: The device is stopped and its internal state is available # for writing. # # @running-p2p: The device is running in the P2P quiescent state. # -# @pre-copy: The device is running, tracking its internal state and its -# internal state is available for reading. +# @pre-copy: The device is running, tracking its internal state and +# its internal state is available for reading. # # @pre-copy-p2p: The device is running in the P2P quiescent state, # tracking its internal state and its internal state is available @@ -32,10 +34,9 @@ # # Since: 9.1 ## -{ 'enum': 'VfioMigrationState', +{ 'enum': 'QapiVfioMigrationState', 'data': [ 'stop', 'running', 'stop-copy', 'resuming', 'running-p2p', - 'pre-copy', 'pre-copy-p2p' ], - 'prefix': 'QAPI_VFIO_MIGRATION_STATE' } + 'pre-copy', 'pre-copy-p2p' ] } ## # @VFIO_MIGRATION: @@ -63,5 +64,5 @@ 'data': { 'device-id': 'str', 'qom-path': 'str', - 'device-state': 'VfioMigrationState' + 'device-state': 'QapiVfioMigrationState' } } diff --git a/qapi/virtio.json b/qapi/virtio.json index 26df8b3064b..9d652fe4a8c 100644 --- a/qapi/virtio.json +++ b/qapi/virtio.json @@ -3,7 +3,9 @@ # ## -# = Virtio devices +# ************** +# Virtio devices +# ************** ## ## @@ -24,7 +26,7 @@ ## # @x-query-virtio: # -# Returns a list of all realized VirtIODevices +# Return a list of all realized VirtIODevices # # Features: # @@ -135,7 +137,7 @@ # @num-vqs: VirtIODevice virtqueue count. This is the number of # active virtqueues being used by the VirtIODevice. # -# @status: VirtIODevice configuration status (VirtioDeviceStatus) +# @status: VirtIODevice configuration status (`VirtioDeviceStatus`) # # @isr: VirtIODevice ISR # @@ -199,7 +201,7 @@ # # @unstable: This command is meant for debugging. # -# Returns: VirtioStatus of the virtio device +# Returns: Status of the virtio device # # Since: 7.2 # @@ -563,21 +565,21 @@ # # @unstable: This command is meant for debugging. # -# Returns: VirtQueueStatus of the VirtQueue +# Returns: Status of the queue # # .. note:: last_avail_idx will not be displayed in the case where the # selected VirtIODevice has a running vhost device and the # VirtIODevice VirtQueue index (queue) does not exist for the -# corresponding vhost device vhost_virtqueue. Also, shadow_avail_idx -# will not be displayed in the case where the selected VirtIODevice -# has a running vhost device. +# corresponding vhost device vhost_virtqueue. Also, +# shadow_avail_idx will not be displayed in the case where the +# selected VirtIODevice has a running vhost device. # # Since: 7.2 # # .. qmp-example:: # :annotated: # -# Get VirtQueueStatus for virtio-vsock (vhost-vsock running) +# Get `VirtQueueStatus` for virtio-vsock (vhost-vsock running) # :: # # -> { "execute": "x-query-virtio-queue-status", @@ -604,7 +606,7 @@ # .. qmp-example:: # :annotated: # -# Get VirtQueueStatus for virtio-serial (no vhost) +# Get `VirtQueueStatus` for virtio-serial (no vhost) # :: # # -> { "execute": "x-query-virtio-queue-status", @@ -698,7 +700,7 @@ # # @unstable: This command is meant for debugging. # -# Returns: VirtVhostQueueStatus of the vhost_virtqueue +# Returns: Status of the vhost_virtqueue # # Since: 7.2 # @@ -816,7 +818,7 @@ # # @index: Index of the element in the queue # -# @descs: List of descriptors (VirtioRingDesc) +# @descs: List of descriptors (`VirtioRingDesc`) # # @avail: VRingAvail info # @@ -847,8 +849,6 @@ # # @unstable: This command is meant for debugging. # -# Returns: VirtioQueueElement information -# # Since: 7.2 # # .. qmp-example:: @@ -963,17 +963,31 @@ { 'struct': 'IOThreadVirtQueueMapping', 'data': { 'iothread': 'str', '*vqs': ['uint16'] } } +## +# @VirtIOGPUOutput: +# +# Describes configuration of a VirtIO GPU output. +# +# @name: the name of the output +# +# Since: 10.1 +## + +{ 'struct': 'VirtIOGPUOutput', + 'data': { 'name': 'str' } } + ## # @DummyVirtioForceArrays: # # Not used by QMP; hack to let us use IOThreadVirtQueueMappingList -# internally +# and VirtIOGPUOutputList internally # # Since: 9.0 ## { 'struct': 'DummyVirtioForceArrays', - 'data': { 'unused-iothread-vq-mapping': ['IOThreadVirtQueueMapping'] } } + 'data': { 'unused-iothread-vq-mapping': ['IOThreadVirtQueueMapping'], + 'unused-virtio-gpu-output': ['VirtIOGPUOutput'] } } ## # @GranuleMode: @@ -992,3 +1006,17 @@ ## { 'enum': 'GranuleMode', 'data': [ '4k', '8k', '16k', '64k', 'host' ] } + +## +# @VMAppleVirtioBlkVariant: +# +# @unspecified: The default, not a valid setting. +# +# @root: Block device holding the root volume +# +# @aux: Block device holding auxiliary data required for boot +# +# Since: 9.2 +## +{ 'enum': 'VMAppleVirtioBlkVariant', + 'data': [ 'unspecified', 'root', 'aux' ] } diff --git a/qapi/yank.json b/qapi/yank.json index 30f46c97c98..f3cd5c15d60 100644 --- a/qapi/yank.json +++ b/qapi/yank.json @@ -3,13 +3,15 @@ # ## -# = Yank feature +# ************ +# Yank feature +# ************ ## ## # @YankInstanceType: # -# An enumeration of yank instance types. See @YankInstance for more +# An enumeration of yank instance types. See `YankInstance` for more # information. # # Since: 6.0 @@ -20,7 +22,7 @@ ## # @YankInstanceBlockNode: # -# Specifies which block graph node to yank. See @YankInstance for +# Specifies which block graph node to yank. See `YankInstance` for # more information. # # @node-name: the name of the block graph node @@ -33,7 +35,7 @@ ## # @YankInstanceChardev: # -# Specifies which character device to yank. See @YankInstance for +# Specifies which character device to yank. See `YankInstance` for # more information. # # @id: the chardev's ID @@ -46,7 +48,7 @@ ## # @YankInstance: # -# A yank instance can be yanked with the @yank qmp command to recover +# A yank instance can be yanked with the `yank` qmp command to recover # from a hanging QEMU. # # @type: yank instance type @@ -57,9 +59,9 @@ # nbd server without attempting to reconnect. # - socket chardev: Yanking it will shut down the connected socket. # - migration: Yanking it will shut down all migration connections. -# Unlike @migrate_cancel, it will not notify the migration process, +# Unlike `migrate_cancel`, it will not notify the migration process, # so migration will go into @failed state, instead of @cancelled -# state. @yank should be used to recover from hangs. +# state. `yank` should be used to recover from hangs. # # Since: 6.0 ## @@ -74,7 +76,7 @@ # @yank: # # Try to recover from hanging QEMU by yanking the specified instances. -# See @YankInstance for more information. +# See `YankInstance` for more information. # # @instances: the instances to be yanked # @@ -100,9 +102,9 @@ ## # @query-yank: # -# Query yank instances. See @YankInstance for more information. +# Query yank instances. See `YankInstance` for more information. # -# Returns: list of @YankInstance +# TODO: This line is a hack to separate the example from the body # # .. qmp-example:: # diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index c9dd70a8920..2c5a8a28f9d 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -84,9 +84,9 @@ SRST ERST DEF("snapshot", img_snapshot, - "snapshot [--object objectdef] [--image-opts] [-U] [-q] [-l | -a snapshot | -c snapshot | -d snapshot] filename") + "snapshot [--object objectdef] [-f fmt | --image-opts] [-U] [-q] [-l | -a snapshot | -c snapshot | -d snapshot] filename") SRST -.. option:: snapshot [--object OBJECTDEF] [--image-opts] [-U] [-q] [-l | -a SNAPSHOT | -c SNAPSHOT | -d SNAPSHOT] FILENAME +.. option:: snapshot [--object OBJECTDEF] [-f FMT | --image-opts] [-U] [-q] [-l | -a SNAPSHOT | -c SNAPSHOT | -d SNAPSHOT] FILENAME ERST DEF("rebase", img_rebase, diff --git a/qemu-img.c b/qemu-img.c index 7668f86769f..7a162fdc08d 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -32,8 +32,8 @@ #include "qapi/qapi-commands-block-core.h" #include "qapi/qapi-visit-block-core.h" #include "qapi/qobject-output-visitor.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qdict.h" #include "qemu/cutils.h" #include "qemu/config-file.h" #include "qemu/option.h" @@ -45,7 +45,7 @@ #include "qemu/units.h" #include "qemu/memalign.h" #include "qom/object_interfaces.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/block_int.h" #include "block/blockjob.h" #include "block/dirty-bitmap.h" @@ -60,7 +60,8 @@ typedef struct img_cmd_t { const char *name; - int (*handler)(int argc, char **argv); + int (*handler)(const struct img_cmd_t *ccmd, int argc, char **argv); + const char *description; } img_cmd_t; enum { @@ -72,7 +73,6 @@ enum { OPTION_FLUSH_INTERVAL = 261, OPTION_NO_DRAIN = 262, OPTION_TARGET_IMAGE_OPTS = 263, - OPTION_SIZE = 264, OPTION_PREALLOCATION = 265, OPTION_SHRINK = 266, OPTION_SALVAGE = 267, @@ -96,13 +96,15 @@ typedef enum OutputFormat { /* Default to cache=writeback as data integrity is not important for qemu-img */ #define BDRV_DEFAULT_CACHE "writeback" -static void format_print(void *opaque, const char *name) +static G_NORETURN +void tryhelp(const char *argv0) { - printf(" %s", name); + error_printf("Try '%s --help' for more information\n", argv0); + exit(EXIT_FAILURE); } -static G_NORETURN G_GNUC_PRINTF(1, 2) -void error_exit(const char *fmt, ...) +static G_NORETURN G_GNUC_PRINTF(2, 3) +void error_exit(const char *argv0, const char *fmt, ...) { va_list ap; @@ -110,128 +112,43 @@ void error_exit(const char *fmt, ...) error_vreport(fmt, ap); va_end(ap); - error_printf("Try 'qemu-img --help' for more information\n"); - exit(EXIT_FAILURE); -} - -static G_NORETURN -void missing_argument(const char *option) -{ - error_exit("missing argument for option '%s'", option); + tryhelp(argv0); } +/* + * Print --help output for a command and exit. + * @syntax and @description are multi-line with trailing EOL + * (to allow easy extending of the text) + * @syntax has each subsequent line indented by 8 chars. + * @description is indented by 2 chars for argument on each own line, + * and with 5 chars for argument description (like -h arg below). + */ static G_NORETURN -void unrecognized_option(const char *option) +void cmd_help(const img_cmd_t *ccmd, + const char *syntax, const char *arguments) { - error_exit("unrecognized option '%s'", option); + printf( +"Usage:\n" +" %s %s %s\n" +"%s.\n" +"\n" +"Arguments:\n" +" -h, --help\n" +" print this help and exit\n" +"%s\n", + "qemu-img", ccmd->name, syntax, ccmd->description, arguments); + exit(EXIT_SUCCESS); } -/* Please keep in synch with docs/tools/qemu-img.rst */ -static G_NORETURN -void help(void) +static OutputFormat parse_output_format(const char *argv0, const char *arg) { - const char *help_msg = - QEMU_IMG_VERSION - "usage: qemu-img [standard options] command [command options]\n" - "QEMU disk image utility\n" - "\n" - " '-h', '--help' display this help and exit\n" - " '-V', '--version' output version information and exit\n" - " '-T', '--trace' [[enable=]][,events=][,file=]\n" - " specify tracing options\n" - "\n" - "Command syntax:\n" -#define DEF(option, callback, arg_string) \ - " " arg_string "\n" -#include "qemu-img-cmds.h" -#undef DEF - "\n" - "Command parameters:\n" - " 'filename' is a disk image filename\n" - " 'objectdef' is a QEMU user creatable object definition. See the qemu(1)\n" - " manual page for a description of the object properties. The most common\n" - " object type is a 'secret', which is used to supply passwords and/or\n" - " encryption keys.\n" - " 'fmt' is the disk image format. It is guessed automatically in most cases\n" - " 'cache' is the cache mode used to write the output disk image, the valid\n" - " options are: 'none', 'writeback' (default, except for convert), 'writethrough',\n" - " 'directsync' and 'unsafe' (default for convert)\n" - " 'src_cache' is the cache mode used to read input disk images, the valid\n" - " options are the same as for the 'cache' option\n" - " 'size' is the disk image size in bytes. Optional suffixes\n" - " 'k' or 'K' (kilobyte, 1024), 'M' (megabyte, 1024k), 'G' (gigabyte, 1024M),\n" - " 'T' (terabyte, 1024G), 'P' (petabyte, 1024T) and 'E' (exabyte, 1024P) are\n" - " supported. 'b' is ignored.\n" - " 'output_filename' is the destination disk image filename\n" - " 'output_fmt' is the destination format\n" - " 'options' is a comma separated list of format specific options in a\n" - " name=value format. Use -o help for an overview of the options supported by\n" - " the used format\n" - " 'snapshot_param' is param used for internal snapshot, format\n" - " is 'snapshot.id=[ID],snapshot.name=[NAME]', or\n" - " '[ID_OR_NAME]'\n" - " '-c' indicates that target image must be compressed (qcow format only)\n" - " '-u' allows unsafe backing chains. For rebasing, it is assumed that old and\n" - " new backing file match exactly. The image doesn't need a working\n" - " backing file before rebasing in this case (useful for renaming the\n" - " backing file). For image creation, allow creating without attempting\n" - " to open the backing file.\n" - " '-h' with or without a command shows this help and lists the supported formats\n" - " '-p' show progress of command (only certain commands)\n" - " '-q' use Quiet mode - do not print any output (except errors)\n" - " '-S' indicates the consecutive number of bytes (defaults to 4k) that must\n" - " contain only zeros for qemu-img to create a sparse image during\n" - " conversion. If the number of bytes is 0, the source will not be scanned for\n" - " unallocated or zero sectors, and the destination image will always be\n" - " fully allocated\n" - " '--output' takes the format in which the output must be done (human or json)\n" - " '-n' skips the target volume creation (useful if the volume is created\n" - " prior to running qemu-img)\n" - "\n" - "Parameters to bitmap subcommand:\n" - " 'bitmap' is the name of the bitmap to manipulate, through one or more\n" - " actions from '--add', '--remove', '--clear', '--enable', '--disable',\n" - " or '--merge source'\n" - " '-g granularity' sets the granularity for '--add' actions\n" - " '-b source' and '-F src_fmt' tell '--merge' actions to find the source\n" - " bitmaps from an alternative file\n" - "\n" - "Parameters to check subcommand:\n" - " '-r' tries to repair any inconsistencies that are found during the check.\n" - " '-r leaks' repairs only cluster leaks, whereas '-r all' fixes all\n" - " kinds of errors, with a higher risk of choosing the wrong fix or\n" - " hiding corruption that has already occurred.\n" - "\n" - "Parameters to convert subcommand:\n" - " '--bitmaps' copies all top-level persistent bitmaps to destination\n" - " '-m' specifies how many coroutines work in parallel during the convert\n" - " process (defaults to 8)\n" - " '-W' allow to write to the target out of order rather than sequential\n" - "\n" - "Parameters to snapshot subcommand:\n" - " 'snapshot' is the name of the snapshot to create, apply or delete\n" - " '-a' applies a snapshot (revert disk to saved state)\n" - " '-c' creates a snapshot\n" - " '-d' deletes a snapshot\n" - " '-l' lists all snapshots in the given image\n" - "\n" - "Parameters to compare subcommand:\n" - " '-f' first image format\n" - " '-F' second image format\n" - " '-s' run in Strict mode - fail on different image size or sector allocation\n" - "\n" - "Parameters to dd subcommand:\n" - " 'bs=BYTES' read and write up to BYTES bytes at a time " - "(default: 512)\n" - " 'count=N' copy only N input blocks\n" - " 'if=FILE' read from FILE\n" - " 'of=FILE' write to FILE\n" - " 'skip=N' skip N bs-sized blocks at the start of input\n"; - - printf("%s\nSupported formats:", help_msg); - bdrv_iterate_format(format_print, NULL, false); - printf("\n\n" QEMU_HELP_BOTTOM "\n"); - exit(EXIT_SUCCESS); + if (!strcmp(arg, "json")) { + return OFORMAT_JSON; + } else if (!strcmp(arg, "human")) { + return OFORMAT_HUMAN; + } else { + error_exit(argv0, "--output expects 'human' or 'json', not '%s'", arg); + } } /* @@ -481,18 +398,16 @@ static int add_old_style_options(const char *fmt, QemuOpts *opts, return 0; } -static int64_t cvtnum_full(const char *name, const char *value, int64_t min, - int64_t max) +static int64_t cvtnum_full(const char *name, const char *value, + bool is_size, int64_t min, int64_t max) { int err; uint64_t res; - err = qemu_strtosz(value, NULL, &res); + err = is_size ? qemu_strtosz(value, NULL, &res) : + qemu_strtou64(value, NULL, 0, &res); if (err < 0 && err != -ERANGE) { - error_report("Invalid %s specified. You may use " - "k, M, G, T, P or E suffixes for", name); - error_report("kilobytes, megabytes, gigabytes, terabytes, " - "petabytes and exabytes."); + error_report("Invalid %s specified: '%s'", name, value); return err; } if (err == -ERANGE || res > max || res < min) { @@ -503,15 +418,15 @@ static int64_t cvtnum_full(const char *name, const char *value, int64_t min, return res; } -static int64_t cvtnum(const char *name, const char *value) +static int64_t cvtnum(const char *name, const char *value, bool is_size) { - return cvtnum_full(name, value, 0, INT64_MAX); + return cvtnum_full(name, value, is_size, 0, INT64_MAX); } -static int img_create(int argc, char **argv) +static int img_create(const img_cmd_t *ccmd, int argc, char **argv) { int c; - uint64_t img_size = -1; + int64_t img_size = -1; const char *fmt = "raw"; const char *base_fmt = NULL; const char *filename; @@ -524,29 +439,46 @@ static int img_create(int argc, char **argv) for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, + {"format", required_argument, 0, 'f'}, + {"options", required_argument, 0, 'o'}, + {"backing", required_argument, 0, 'b'}, + {"backing-format", required_argument, 0, 'B'}, /* was -F in 10.0 */ + {"backing-unsafe", no_argument, 0, 'u'}, + {"quiet", no_argument, 0, 'q'}, {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":F:b:f:ho:qu", + c = getopt_long(argc, argv, "hf:o:b:F:B:uq", long_options, NULL); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); - break; - case 'F': - base_fmt = optarg; - break; - case 'b': - base_filename = optarg; + cmd_help(ccmd, "[-f FMT] [-o FMT_OPTS]\n" +" [-b BACKING_FILE [-B BACKING_FMT]] [-u]\n" +" [-q] [--object OBJDEF] FILE [SIZE]\n" +, +" -f, --format FMT\n" +" specifies the format of the new image (default: raw)\n" +" -o, --options FMT_OPTS\n" +" format-specific options (specify '-o help' for help)\n" +" -b, --backing BACKING_FILE\n" +" create target image to be a CoW on top of BACKING_FILE\n" +" -B, --backing-format BACKING_FMT (was -F in <= 10.0)\n" +" specifies the format of BACKING_FILE (default: probing is used)\n" +" -u, --backing-unsafe\n" +" do not fail if BACKING_FILE can not be read\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file to create (will be overritten if already exists)\n" +" SIZE[bKMGTPE]\n" +" image size with optional multiplier suffix (powers of 1024)\n" +" (required unless BACKING_FILE is specified)\n" +); break; case 'f': fmt = optarg; @@ -556,15 +488,24 @@ static int img_create(int argc, char **argv) goto fail; } break; - case 'q': - quiet = true; + case 'b': + base_filename = optarg; + break; + case 'F': /* <=10.0 */ + case 'B': + base_fmt = optarg; break; case 'u': flags |= BDRV_O_NO_BACKING; break; + case 'q': + quiet = true; + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; + default: + tryhelp(argv[0]); } } @@ -576,22 +517,19 @@ static int img_create(int argc, char **argv) } if (optind >= argc) { - error_exit("Expecting image file name"); + error_exit(argv[0], "Expecting image file name"); } optind++; /* Get image size, if specified */ if (optind < argc) { - int64_t sval; - - sval = cvtnum("image size", argv[optind++]); - if (sval < 0) { + img_size = cvtnum("image size", argv[optind++], true); + if (img_size < 0) { goto fail; } - img_size = (uint64_t)sval; } if (optind != argc) { - error_exit("Unexpected argument: %s", argv[optind]); + error_exit(argv[0], "Unexpected argument: %s", argv[optind]); } bdrv_img_create(filename, fmt, base_filename, base_fmt, @@ -716,11 +654,11 @@ static int collect_image_check(BlockDriverState *bs, * 3 - Check completed, image has leaked clusters, but is good otherwise * 63 - Checks are not supported by the image format */ -static int img_check(int argc, char **argv) +static int img_check(const img_cmd_t *ccmd, int argc, char **argv) { int c, ret; OutputFormat output_format = OFORMAT_HUMAN; - const char *filename, *fmt, *output, *cache; + const char *filename, *fmt, *cache; BlockBackend *blk; BlockDriverState *bs; int fix = 0; @@ -732,7 +670,6 @@ static int img_check(int argc, char **argv) bool force_share = false; fmt = NULL; - output = NULL; cache = BDRV_DEFAULT_CACHE; for(;;) { @@ -740,31 +677,57 @@ static int img_check(int argc, char **argv) static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"format", required_argument, 0, 'f'}, + {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"cache", required_argument, 0, 'T'}, {"repair", required_argument, 0, 'r'}, + {"force-share", no_argument, 0, 'U'}, {"output", required_argument, 0, OPTION_OUTPUT}, + {"quiet", no_argument, 0, 'q'}, {"object", required_argument, 0, OPTION_OBJECT}, - {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, - {"force-share", no_argument, 0, 'U'}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hf:r:T:qU", + c = getopt_long(argc, argv, "hf:T:r:Uq", long_options, &option_index); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, "[-f FMT | --image-opts] [-T CACHE_MODE] [-r leaks|all]\n" +" [-U] [--output human|json] [-q] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specifies the format of the image explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -T, --cache CACHE_MODE\n" /* why not -t ? */ +" cache mode (default: " BDRV_DEFAULT_CACHE ")\n" +" -r, --repair leaks|all\n" +" repair errors of the given category in the image (image will be\n" +" opened in read-write mode, incompatible with -U|--force-share)\n" +" -U, --force-share\n" +" open image in shared mode for concurrent access\n" +" --output human|json\n" +" output format (default: human)\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or an option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); break; case 'f': fmt = optarg; break; + case OPTION_IMAGE_OPTS: + image_opts = true; + break; + case 'T': + cache = optarg; + break; case 'r': flags |= BDRV_O_RDWR; @@ -773,44 +736,32 @@ static int img_check(int argc, char **argv) } else if (!strcmp(optarg, "all")) { fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS; } else { - error_exit("Unknown option value for -r " - "(expecting 'leaks' or 'all'): %s", optarg); + error_exit(argv[0], + "--repair (-r) expects 'leaks' or 'all', not '%s'", + optarg); } break; - case OPTION_OUTPUT: - output = optarg; + case 'U': + force_share = true; break; - case 'T': - cache = optarg; + case OPTION_OUTPUT: + output_format = parse_output_format(argv[0], optarg); break; case 'q': quiet = true; break; - case 'U': - force_share = true; - break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[optind++]; - if (output && !strcmp(output, "json")) { - output_format = OFORMAT_JSON; - } else if (output && !strcmp(output, "human")) { - output_format = OFORMAT_HUMAN; - } else if (output) { - error_report("--output must be used with human or json as argument."); - return 1; - } - ret = bdrv_parse_cache_mode(cache, &flags, &writethrough); if (ret < 0) { error_report("Invalid source cache option: %s", cache); @@ -948,7 +899,7 @@ static void run_block_job(BlockJob *job, Error **errp) } } -static int img_commit(int argc, char **argv) +static int img_commit(const img_cmd_t *ccmd, int argc, char **argv) { int c, ret, flags; const char *filename, *fmt, *cache, *base; @@ -968,38 +919,73 @@ static int img_commit(int argc, char **argv) for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"cache", required_argument, 0, 't'}, + {"drop", no_argument, 0, 'd'}, + {"base", required_argument, 0, 'b'}, + {"rate-limit", required_argument, 0, 'r'}, + {"progress", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":f:ht:b:dpqr:", + c = getopt_long(argc, argv, "hf:t:db:r:pq", long_options, NULL); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, "[-f FMT | --image-opts] [-t CACHE_MODE] [-b BASE_IMG]\n" +" [-d] [-r RATE] [-q] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specify FILE image format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -t, --cache CACHE_MODE image cache mode (default: " BDRV_DEFAULT_CACHE ")\n" +" -d, --drop\n" +" skip emptying FILE on completion\n" +" -b, --base BASE_IMG\n" +" image in the backing chain to commit change to\n" +" (default: immediate backing file; implies --drop)\n" +" -r, --rate-limit RATE\n" +" I/O rate limit, in bytes per second\n" +" -p, --progress\n" +" display progress information\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or an option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); break; case 'f': fmt = optarg; break; + case OPTION_IMAGE_OPTS: + image_opts = true; + break; case 't': cache = optarg; break; + case 'd': + drop = true; + break; case 'b': base = optarg; /* -b implies -d */ drop = true; break; - case 'd': - drop = true; + case 'r': + rate_limit = cvtnum("rate limit", optarg, true); + if (rate_limit < 0) { + return 1; + } break; case 'p': progress = true; @@ -1007,18 +993,11 @@ static int img_commit(int argc, char **argv) case 'q': quiet = true; break; - case 'r': - rate_limit = cvtnum("rate limit", optarg); - if (rate_limit < 0) { - return 1; - } - break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } @@ -1028,7 +1007,7 @@ static int img_commit(int argc, char **argv) } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[optind++]; @@ -1355,7 +1334,7 @@ static int check_empty_sectors(BlockBackend *blk, int64_t offset, * 1 - Images differ * >1 - Error occurred */ -static int img_compare(int argc, char **argv) +static int img_compare(const img_cmd_t *ccmd, int argc, char **argv) { const char *fmt1 = NULL, *fmt2 = NULL, *cache, *filename1, *filename2; BlockBackend *blk1, *blk2; @@ -1380,25 +1359,51 @@ static int img_compare(int argc, char **argv) for (;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"a-format", required_argument, 0, 'f'}, + {"b-format", required_argument, 0, 'F'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"strict", no_argument, 0, 's'}, + {"cache", required_argument, 0, 'T'}, {"force-share", no_argument, 0, 'U'}, + {"progress", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hf:F:T:pqsU", + c = getopt_long(argc, argv, "hf:F:sT:Upq", long_options, NULL); if (c == -1) { break; } switch (c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, +"[[-f FMT] [-F FMT] | --image-opts] [-s] [-T CACHE]\n" +" [-U] [-p] [-q] [--object OBJDEF] FILE1 FILE2\n" +, +" -f, --a-format FMT\n" +" specify FILE1 image format explicitly (default: probing is used)\n" +" -F, --b-format FMT\n" +" specify FILE2 image format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE1 and FILE2 as option strings (key=value,..), not file names\n" +" (incompatible with -f|--a-format and -F|--b-format)\n" +" -s, --strict\n" +" strict mode, also check if sizes are equal\n" +" -T, --cache CACHE_MODE\n" +" images caching mode (default: " BDRV_DEFAULT_CACHE ")\n" +" -U, --force-share\n" +" open images in shared mode for concurrent access\n" +" -p, --progress\n" +" display progress information\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE1, FILE2\n" +" names of the image files, or option strings (key=value,..)\n" +" with --image-opts, to compare\n" +); break; case 'f': fmt1 = optarg; @@ -1406,39 +1411,29 @@ static int img_compare(int argc, char **argv) case 'F': fmt2 = optarg; break; + case OPTION_IMAGE_OPTS: + image_opts = true; + break; + case 's': + strict = true; + break; case 'T': cache = optarg; break; + case 'U': + force_share = true; + break; case 'p': progress = true; break; case 'q': quiet = true; break; - case 's': - strict = true; - break; - case 'U': - force_share = true; - break; case OPTION_OBJECT: - { - Error *local_err = NULL; - - if (!user_creatable_add_from_str(optarg, &local_err)) { - if (local_err) { - error_report_err(local_err); - exit(2); - } else { - /* Help was printed */ - exit(EXIT_SUCCESS); - } - } - break; - } - case OPTION_IMAGE_OPTS: - image_opts = true; + user_creatable_process_cmdline(optarg); break; + default: + tryhelp(argv[0]); } } @@ -1449,7 +1444,7 @@ static int img_compare(int argc, char **argv) if (optind != argc - 2) { - error_exit("Expecting two image file names"); + error_exit(argv[0], "Expecting two image file names"); } filename1 = argv[optind++]; filename2 = argv[optind++]; @@ -2231,7 +2226,7 @@ static void set_rate_limit(BlockBackend *blk, int64_t rate_limit) blk_set_io_limits(blk, &cfg); } -static int img_convert(int argc, char **argv) +static int img_convert(const img_cmd_t *ccmd, int argc, char **argv) { int c, bs_i, flags, src_flags = BDRV_O_NO_SHARE; const char *fmt = NULL, *out_fmt = NULL, *cache = "unsafe", @@ -2267,53 +2262,122 @@ static int img_convert(int argc, char **argv) for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"source-format", required_argument, 0, 'f'}, + /* + * XXX: historic --image-opts acts on source file only, + * it seems better to have it affect both source and target, + * and have separate --source-image-opts for source, + * but this might break existing setups. + */ {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, - {"force-share", no_argument, 0, 'U'}, - {"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS}, - {"salvage", no_argument, 0, OPTION_SALVAGE}, - {"target-is-zero", no_argument, 0, OPTION_TARGET_IS_ZERO}, + {"source-cache", required_argument, 0, 'T'}, + {"snapshot", required_argument, 0, 'l'}, {"bitmaps", no_argument, 0, OPTION_BITMAPS}, {"skip-broken-bitmaps", no_argument, 0, OPTION_SKIP_BROKEN}, + {"salvage", no_argument, 0, OPTION_SALVAGE}, + {"target-format", required_argument, 0, 'O'}, + {"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS}, + {"target-format-options", required_argument, 0, 'o'}, + {"target-cache", required_argument, 0, 't'}, + {"backing", required_argument, 0, 'b'}, + {"backing-format", required_argument, 0, 'F'}, + {"sparse-size", required_argument, 0, 'S'}, + {"no-create", no_argument, 0, 'n'}, + {"target-is-zero", no_argument, 0, OPTION_TARGET_IS_ZERO}, + {"force-share", no_argument, 0, 'U'}, + {"rate-limit", required_argument, 0, 'r'}, + {"parallel", required_argument, 0, 'm'}, + {"oob-writes", no_argument, 0, 'W'}, + {"copy-range-offloading", no_argument, 0, 'C'}, + {"progress", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hf:O:B:CcF:o:l:S:pt:T:qnm:WUr:", + c = getopt_long(argc, argv, "hf:O:b:B:CcF:o:l:S:pt:T:nm:WUr:q", long_options, NULL); if (c == -1) { break; } - switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; + switch (c) { case 'h': - help(); + cmd_help(ccmd, "[-f SRC_FMT | --image-opts] [-T SRC_CACHE]\n" +" [-l SNAPSHOT] [--bitmaps [--skip-broken-bitmaps]] [--salvage]\n" +" [-O TGT_FMT | --target-image-opts] [-o TGT_FMT_OPTS] [-t TGT_CACHE]\n" +" [-b BACKING_FILE [-F BACKING_FMT]] [-S SPARSE_SIZE]\n" +" [-n] [--target-is-zero] [-c]\n" +" [-U] [-r RATE] [-m NUM_PARALLEL] [-W] [-C] [-p] [-q] [--object OBJDEF]\n" +" SRC_FILE [SRC_FILE2...] TGT_FILE\n" +, +" -f, --source-format SRC_FMT\n" +" specify format of all SRC_FILEs explicitly (default: probing is used)\n" +" --image-opts\n" +" treat each SRC_FILE as an option string (key=value,...), not a file name\n" +" (incompatible with -f|--source-format)\n" +" -T, --source-cache SRC_CACHE\n" +" source image(s) cache mode (" BDRV_DEFAULT_CACHE ")\n" +" -l, --snapshot SNAPSHOT\n" +" specify source snapshot\n" +" --bitmaps\n" +" also copy any persistent bitmaps present in source\n" +" --skip-broken-bitmaps\n" +" skip (do not error out) any broken bitmaps\n" +" --salvage\n" +" ignore errors on input (convert unreadable areas to zeros)\n" +" -O, --target-format TGT_FMT\n" +" specify TGT_FILE image format (default: raw)\n" +" --target-image-opts\n" +" treat TGT_FILE as an option string (key=value,...), not a file name\n" +" (incompatible with -O|--target-format)\n" +" -o, --target-format-options TGT_FMT_OPTS\n" +" TGT_FMT-specific options\n" +" -t, --target-cache TGT_CACHE\n" +" cache mode when opening output image (default: unsafe)\n" +" -b, --backing BACKING_FILE (was -B in <= 10.0)\n" +" create target image to be a CoW on top of BACKING_FILE\n" +" -F, --backing-format BACKING_FMT\n" /* -B used for -b in <=10.0 */ +" specify BACKING_FILE image format explicitly (default: probing is used)\n" +" -S, --sparse-size SPARSE_SIZE[bkKMGTPE]\n" +" specify number of consecutive zero bytes to treat as a gap on output\n" +" (rounded down to nearest 512 bytes), with optional multiplier suffix\n" +" -n, --no-create\n" +" omit target volume creation (e.g. on rbd)\n" +" --target-is-zero\n" +" indicates that the target volume is pre-zeroed\n" +" -c, --compress\n" +" create compressed output image (qcow and qcow2 formats only)\n" +" -U, --force-share\n" +" open images in shared mode for concurrent access\n" +" -r, --rate-limit RATE\n" +" I/O rate limit, in bytes per second\n" +" -m, --parallel NUM_PARALLEL\n" +" specify parallelism (default: 8)\n" +" -C, --copy-range-offloading\n" +" try to use copy offloading\n" +" -W, --oob-writes\n" +" enable out-of-order writes to improve performance\n" +" -p, --progress\n" +" display progress information\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" SRC_FILE...\n" +" one or more source image file names,\n" +" or option strings (key=value,..) with --source-image-opts\n" +" TGT_FILE\n" +" target (output) image file name,\n" +" or option string (key=value,..) with --target-image-opts\n" +); break; case 'f': fmt = optarg; break; - case 'O': - out_fmt = optarg; - break; - case 'B': - out_baseimg = optarg; - break; - case 'C': - s.copy_range = true; - break; - case 'c': - s.compressed = true; - break; - case 'F': - backing_fmt = optarg; + case OPTION_IMAGE_OPTS: + image_opts = true; break; - case 'o': - if (accumulate_options(&options, optarg) < 0) { - goto fail_getopt; - } + case 'T': + src_cache = optarg; break; case 'l': if (strstart(optarg, SNAPSHOT_OPT_BASE, NULL)) { @@ -2328,11 +2392,41 @@ static int img_convert(int argc, char **argv) snapshot_name = optarg; } break; + case OPTION_BITMAPS: + bitmaps = true; + break; + case OPTION_SKIP_BROKEN: + skip_broken = true; + break; + case OPTION_SALVAGE: + s.salvage = true; + break; + case 'O': + out_fmt = optarg; + break; + case OPTION_TARGET_IMAGE_OPTS: + tgt_image_opts = true; + break; + case 'o': + if (accumulate_options(&options, optarg) < 0) { + goto fail_getopt; + } + break; + case 't': + cache = optarg; + break; + case 'B': /* <=10.0 */ + case 'b': + out_baseimg = optarg; + break; + case 'F': /* can't use -B as it used as -b in <=10.0 */ + backing_fmt = optarg; + break; case 'S': { int64_t sval; - sval = cvtnum("buffer size for sparse output", optarg); + sval = cvtnum("buffer size for sparse output", optarg, true); if (sval < 0) { goto fail_getopt; } else if (!QEMU_IS_ALIGNED(sval, BDRV_SECTOR_SIZE) || @@ -2348,67 +2442,53 @@ static int img_convert(int argc, char **argv) explict_min_sparse = true; break; } - case 'p': - progress = true; - break; - case 't': - cache = optarg; - break; - case 'T': - src_cache = optarg; - break; - case 'q': - s.quiet = true; - break; case 'n': skip_create = true; break; - case 'm': - if (qemu_strtol(optarg, NULL, 0, &s.num_coroutines) || - s.num_coroutines < 1 || s.num_coroutines > MAX_COROUTINES) { - error_report("Invalid number of coroutines. Allowed number of" - " coroutines is between 1 and %d", MAX_COROUTINES); - goto fail_getopt; - } + case OPTION_TARGET_IS_ZERO: + /* + * The user asserting that the target is blank has the + * same effect as the target driver supporting zero + * initialisation. + */ + s.has_zero_init = true; break; - case 'W': - s.wr_in_order = false; + case 'c': + s.compressed = true; break; case 'U': force_share = true; break; case 'r': - rate_limit = cvtnum("rate limit", optarg); + rate_limit = cvtnum("rate limit", optarg, true); if (rate_limit < 0) { goto fail_getopt; } break; - case OPTION_OBJECT: - user_creatable_process_cmdline(optarg); - break; - case OPTION_IMAGE_OPTS: - image_opts = true; + case 'm': + s.num_coroutines = cvtnum_full("number of coroutines", optarg, + false, 1, MAX_COROUTINES); + if (s.num_coroutines < 0) { + goto fail_getopt; + } break; - case OPTION_SALVAGE: - s.salvage = true; + case 'W': + s.wr_in_order = false; break; - case OPTION_TARGET_IMAGE_OPTS: - tgt_image_opts = true; + case 'C': + s.copy_range = true; break; - case OPTION_TARGET_IS_ZERO: - /* - * The user asserting that the target is blank has the - * same effect as the target driver supporting zero - * initialisation. - */ - s.has_zero_init = true; + case 'p': + progress = true; break; - case OPTION_BITMAPS: - bitmaps = true; + case 'q': + s.quiet = true; break; - case OPTION_SKIP_BROKEN: - skip_broken = true; + case OPTION_OBJECT: + user_creatable_process_cmdline(optarg); break; + default: + tryhelp(argv[0]); } } @@ -2999,79 +3079,82 @@ static BlockGraphInfoList *collect_image_info_list(bool image_opts, return NULL; } -static int img_info(int argc, char **argv) +static int img_info(const img_cmd_t *ccmd, int argc, char **argv) { int c; OutputFormat output_format = OFORMAT_HUMAN; bool chain = false; - const char *filename, *fmt, *output; + const char *filename, *fmt; BlockGraphInfoList *list; bool image_opts = false; bool force_share = false; fmt = NULL; - output = NULL; for(;;) { - int option_index = 0; static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"format", required_argument, 0, 'f'}, - {"output", required_argument, 0, OPTION_OUTPUT}, - {"backing-chain", no_argument, 0, OPTION_BACKING_CHAIN}, - {"object", required_argument, 0, OPTION_OBJECT}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"backing-chain", no_argument, 0, OPTION_BACKING_CHAIN}, {"force-share", no_argument, 0, 'U'}, + {"output", required_argument, 0, OPTION_OUTPUT}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":f:hU", - long_options, &option_index); + c = getopt_long(argc, argv, "hf:U", long_options, NULL); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, "[-f FMT | --image-opts] [--backing-chain] [-U]\n" +" [--output human|json] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specify FILE image format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" --backing-chain\n" +" display information about the backing chain for copy-on-write overlays\n" +" -U, --force-share\n" +" open image in shared mode for concurrent access\n" +" --output human|json\n" +" specify output format (default: human)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); break; case 'f': fmt = optarg; break; - case 'U': - force_share = true; - break; - case OPTION_OUTPUT: - output = optarg; + case OPTION_IMAGE_OPTS: + image_opts = true; break; case OPTION_BACKING_CHAIN: chain = true; break; + case 'U': + force_share = true; + break; + case OPTION_OUTPUT: + output_format = parse_output_format(argv[0], optarg); + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[optind++]; - if (output && !strcmp(output, "json")) { - output_format = OFORMAT_JSON; - } else if (output && !strcmp(output, "human")) { - output_format = OFORMAT_HUMAN; - } else if (output) { - error_report("--output must be used with human or json as argument."); - return 1; - } - list = collect_image_info_list(image_opts, filename, fmt, chain, force_share); if (!list) { @@ -3224,13 +3307,13 @@ static inline bool entry_mergeable(const MapEntry *curr, const MapEntry *next) return true; } -static int img_map(int argc, char **argv) +static int img_map(const img_cmd_t *ccmd, int argc, char **argv) { int c; OutputFormat output_format = OFORMAT_HUMAN; BlockBackend *blk; BlockDriverState *bs; - const char *filename, *fmt, *output; + const char *filename, *fmt; int64_t length; MapEntry curr = { .length = 0 }, next; int ret = 0; @@ -3240,78 +3323,85 @@ static int img_map(int argc, char **argv) int64_t max_length = -1; fmt = NULL; - output = NULL; for (;;) { - int option_index = 0; static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"format", required_argument, 0, 'f'}, - {"output", required_argument, 0, OPTION_OUTPUT}, - {"object", required_argument, 0, OPTION_OBJECT}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, - {"force-share", no_argument, 0, 'U'}, {"start-offset", required_argument, 0, 's'}, {"max-length", required_argument, 0, 'l'}, + {"force-share", no_argument, 0, 'U'}, + {"output", required_argument, 0, OPTION_OUTPUT}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":f:s:l:hU", - long_options, &option_index); + c = getopt_long(argc, argv, "hf:s:l:U", + long_options, NULL); if (c == -1) { break; } switch (c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, "[-f FMT | --image-opts]\n" +" [--start-offset OFFSET] [--max-length LENGTH]\n" +" [--output human|json] [-U] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specify FILE image format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -s, --start-offset OFFSET\n" +" start at the given OFFSET in the image, not at the beginning\n" +" -l, --max-length LENGTH\n" +" process at most LENGTH bytes instead of up to the end of the image\n" +" --output human|json\n" +" specify output format name (default: human)\n" +" -U, --force-share\n" +" open image in shared mode for concurrent access\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" the image file name, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); break; case 'f': fmt = optarg; break; - case 'U': - force_share = true; - break; - case OPTION_OUTPUT: - output = optarg; + case OPTION_IMAGE_OPTS: + image_opts = true; break; case 's': - start_offset = cvtnum("start offset", optarg); + start_offset = cvtnum("start offset", optarg, true); if (start_offset < 0) { return 1; } break; case 'l': - max_length = cvtnum("max length", optarg); + max_length = cvtnum("max length", optarg, true); if (max_length < 0) { return 1; } break; + case OPTION_OUTPUT: + output_format = parse_output_format(argv[0], optarg); + break; + case 'U': + force_share = true; + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[optind]; - if (output && !strcmp(output, "json")) { - output_format = OFORMAT_JSON; - } else if (output && !strcmp(output, "human")) { - output_format = OFORMAT_HUMAN; - } else if (output) { - error_report("--output must be used with human or json as argument."); - return 1; - } - blk = img_open(image_opts, filename, fmt, 0, false, false, force_share); if (!blk) { return 1; @@ -3368,18 +3458,19 @@ static int img_map(int argc, char **argv) return ret < 0; } -#define SNAPSHOT_LIST 1 -#define SNAPSHOT_CREATE 2 -#define SNAPSHOT_APPLY 3 -#define SNAPSHOT_DELETE 4 +/* the same as options */ +#define SNAPSHOT_LIST 'l' +#define SNAPSHOT_CREATE 'c' +#define SNAPSHOT_APPLY 'a' +#define SNAPSHOT_DELETE 'd' -static int img_snapshot(int argc, char **argv) +static int img_snapshot(const img_cmd_t *ccmd, int argc, char **argv) { BlockBackend *blk; BlockDriverState *bs; QEMUSnapshotInfo sn; - char *filename, *snapshot_name = NULL; - int c, ret = 0, bdrv_oflags; + char *filename, *fmt = NULL, *snapshot_name = NULL; + int c, ret = 0; int action = 0; bool quiet = false; Error *err = NULL; @@ -3387,86 +3478,100 @@ static int img_snapshot(int argc, char **argv) bool force_share = false; int64_t rt; - bdrv_oflags = BDRV_O_RDWR; /* Parse commandline parameters */ for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"list", no_argument, 0, SNAPSHOT_LIST}, + {"apply", required_argument, 0, SNAPSHOT_APPLY}, + {"create", required_argument, 0, SNAPSHOT_CREATE}, + {"delete", required_argument, 0, SNAPSHOT_DELETE}, {"force-share", no_argument, 0, 'U'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":la:c:d:hqU", + c = getopt_long(argc, argv, "hf:la:c:d:Uq", long_options, NULL); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); - return 0; - case 'l': - if (action) { - error_exit("Cannot mix '-l', '-a', '-c', '-d'"); - return 0; - } - action = SNAPSHOT_LIST; - bdrv_oflags &= ~BDRV_O_RDWR; /* no need for RW */ + cmd_help(ccmd, "[-f FMT | --image-opts] [-l | -a|-c|-d SNAPSHOT]\n" +" [-U] [-q] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specify FILE format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -l, --list\n" +" list snapshots in FILE (default action if no -l|-c|-a|-d is given)\n" +" -c, --create SNAPSHOT\n" +" create named snapshot\n" +" -a, --apply SNAPSHOT\n" +" apply named snapshot to the base\n" +" -d, --delete SNAPSHOT\n" +" delete named snapshot\n" +" (only one of -l|-c|-a|-d can be specified)\n" +" -U, --force-share\n" +" open image in shared mode for concurrent access\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts) to operate on\n" +); break; - case 'a': - if (action) { - error_exit("Cannot mix '-l', '-a', '-c', '-d'"); - return 0; - } - action = SNAPSHOT_APPLY; - snapshot_name = optarg; + case 'f': + fmt = optarg; break; - case 'c': - if (action) { - error_exit("Cannot mix '-l', '-a', '-c', '-d'"); - return 0; - } - action = SNAPSHOT_CREATE; - snapshot_name = optarg; + case OPTION_IMAGE_OPTS: + image_opts = true; break; - case 'd': + case SNAPSHOT_LIST: + case SNAPSHOT_APPLY: + case SNAPSHOT_CREATE: + case SNAPSHOT_DELETE: if (action) { - error_exit("Cannot mix '-l', '-a', '-c', '-d'"); + error_exit(argv[0], "Cannot mix '-l', '-a', '-c', '-d'"); return 0; } - action = SNAPSHOT_DELETE; + action = c; snapshot_name = optarg; break; - case 'q': - quiet = true; - break; case 'U': force_share = true; break; + case 'q': + quiet = true; + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[optind++]; + if (!action) { + action = SNAPSHOT_LIST; + } + /* Open the image */ - blk = img_open(image_opts, filename, NULL, bdrv_oflags, false, quiet, - force_share); + blk = img_open(image_opts, filename, fmt, + action == SNAPSHOT_LIST ? 0 : BDRV_O_RDWR, + false, quiet, force_share); if (!blk) { return 1; } @@ -3505,6 +3610,7 @@ static int img_snapshot(int argc, char **argv) break; case SNAPSHOT_DELETE: + bdrv_drain_all_begin(); bdrv_graph_rdlock_main_loop(); ret = bdrv_snapshot_find(bs, &sn, snapshot_name); if (ret < 0) { @@ -3520,6 +3626,7 @@ static int img_snapshot(int argc, char **argv) } } bdrv_graph_rdunlock_main_loop(); + bdrv_drain_all_end(); break; } @@ -3531,7 +3638,7 @@ static int img_snapshot(int argc, char **argv) return 0; } -static int img_rebase(int argc, char **argv) +static int img_rebase(const img_cmd_t *ccmd, int argc, char **argv) { BlockBackend *blk = NULL, *blk_old_backing = NULL, *blk_new_backing = NULL; uint8_t *buf_old = NULL; @@ -3562,45 +3669,89 @@ static int img_rebase(int argc, char **argv) for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, - {"force-share", no_argument, 0, 'U'}, + {"cache", required_argument, 0, 't'}, {"compress", no_argument, 0, 'c'}, + {"backing", required_argument, 0, 'b'}, + {"backing-format", required_argument, 0, 'B'}, + {"backing-cache", required_argument, 0, 'T'}, + {"backing-unsafe", no_argument, 0, 'u'}, + {"force-share", no_argument, 0, 'U'}, + {"progress", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hf:F:b:upt:T:qUc", + c = getopt_long(argc, argv, "hf:t:cb:F:B:T:uUpq", long_options, NULL); if (c == -1) { break; } - switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; + switch (c) { case 'h': - help(); + cmd_help(ccmd, "[-f FMT | --image-opts] [-t CACHE]\n" +" [-b BACKING_FILE [-B BACKING_FMT] [-T BACKING_CACHE]] [-u]\n" +" [-c] [-U] [-p] [-q] [--object OBJDEF] FILE\n" +, +" -f, --format FMT\n" +" specify FILE format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -t, --cache CACHE\n" +" cache mode for FILE (default: " BDRV_DEFAULT_CACHE ")\n" +" -b, --backing BACKING_FILE|\"\"\n" +" rebase onto this file (specify empty name for no backing file)\n" +" -B, --backing-format BACKING_FMT (was -F in <=10.0)\n" +" specify format for BACKING_FILE explicitly (default: probing is used)\n" +" -T, --backing-cache CACHE\n" +" BACKING_FILE cache mode (default: " BDRV_DEFAULT_CACHE ")\n" +" -u, --backing-unsafe\n" +" do not fail if BACKING_FILE can not be read\n" +" -c, --compress\n" +" compress image (when image supports this)\n" +" -U, --force-share\n" +" open image in shared mode for concurrent access\n" +" -p, --progress\n" +" display progress information\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); return 0; case 'f': fmt = optarg; break; - case 'F': - out_basefmt = optarg; + case OPTION_IMAGE_OPTS: + image_opts = true; + break; + case 't': + cache = optarg; break; case 'b': out_baseimg = optarg; break; + case 'F': /* <=10.0 */ + case 'B': + out_basefmt = optarg; + break; case 'u': unsafe = 1; break; + case 'c': + compress = true; + break; + case 'U': + force_share = true; + break; case 'p': progress = 1; break; - case 't': - cache = optarg; - break; case 'T': src_cache = optarg; break; @@ -3610,15 +3761,8 @@ static int img_rebase(int argc, char **argv) case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; - case 'U': - force_share = true; - break; - case 'c': - compress = true; - break; + default: + tryhelp(argv[0]); } } @@ -3627,10 +3771,11 @@ static int img_rebase(int argc, char **argv) } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } if (!unsafe && !out_baseimg) { - error_exit("Must specify backing file (-b) or use unsafe mode (-u)"); + error_exit(argv[0], + "Must specify backing file (-b) or use unsafe mode (-u)"); } filename = argv[optind++]; @@ -4024,11 +4169,11 @@ static int img_rebase(int argc, char **argv) return 0; } -static int img_resize(int argc, char **argv) +static int img_resize(const img_cmd_t *ccmd, int argc, char **argv) { Error *err = NULL; int c, ret, relative; - const char *filename, *fmt, *size; + const char *filename = NULL, *fmt = NULL, *size = NULL; int64_t n, total_size, current_size; bool quiet = false; BlockBackend *blk = NULL; @@ -4051,50 +4196,52 @@ static int img_resize(int argc, char **argv) bool image_opts = false; bool shrink = false; - /* Remove size from argv manually so that negative numbers are not treated - * as options by getopt. */ - if (argc < 3) { - error_exit("Not enough arguments"); - return 1; - } - - size = argv[--argc]; - /* Parse getopt arguments */ - fmt = NULL; for(;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, {"preallocation", required_argument, 0, OPTION_PREALLOCATION}, {"shrink", no_argument, 0, OPTION_SHRINK}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":f:hq", + c = getopt_long(argc, argv, "-hf:q", long_options, NULL); if (c == -1) { break; } switch(c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); - break; + cmd_help(ccmd, "[-f FMT | --image-opts] [--preallocation PREALLOC] [--shrink]\n" +" [-q] [--object OBJDEF] FILE [+-]SIZE[bkKMGTPE]\n" +, +" -f, --format FMT\n" +" specify FILE format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,...), not a file name\n" +" (incompatible with -f|--format)\n" +" --shrink\n" +" allow operation when the new size is smaller than the original\n" +" --preallocation PREALLOC\n" +" specify FMT-specific preallocation type for the new areas\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +" [+-]SIZE[bkKMGTPE]\n" +" new image size or amount by which to shrink (-)/grow (+),\n" +" with optional multiplier suffix (powers of 1024, default is bytes)\n" +); + return 0; case 'f': fmt = optarg; break; - case 'q': - quiet = true; - break; - case OPTION_OBJECT: - user_creatable_process_cmdline(optarg); - break; case OPTION_IMAGE_OPTS: image_opts = true; break; @@ -4109,12 +4256,43 @@ static int img_resize(int argc, char **argv) case OPTION_SHRINK: shrink = true; break; + case 'q': + quiet = true; + break; + case OPTION_OBJECT: + user_creatable_process_cmdline(optarg); + break; + case 1: /* a non-optional argument */ + if (!filename) { + filename = optarg; + /* see if we have -size (number) next to filename */ + if (optind < argc) { + size = argv[optind]; + if (size[0] == '-' && size[1] >= '0' && size[1] <= '9') { + ++optind; + } else { + size = NULL; + } + } + } else if (!size) { + size = optarg; + } else { + error_exit(argv[0], "Extra argument(s) in command line"); + } + break; + default: + tryhelp(argv[0]); } } - if (optind != argc - 1) { - error_exit("Expecting image file name and size"); + if (!filename && optind < argc) { + filename = argv[optind++]; + } + if (!size && optind < argc) { + size = argv[optind++]; + } + if (!filename || !size || optind < argc) { + error_exit(argv[0], "Expecting image file name and size"); } - filename = argv[optind++]; /* Choose grow, shrink, or absolute resize mode */ switch (size[0]) { @@ -4237,7 +4415,7 @@ static int print_amend_option_help(const char *format) return 0; } -static int img_amend(int argc, char **argv) +static int img_amend(const img_cmd_t *ccmd, int argc, char **argv) { Error *err = NULL; int c, ret = 0; @@ -4257,26 +4435,48 @@ static int img_amend(int argc, char **argv) for (;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"options", required_argument, 0, 'o'}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"cache", required_argument, 0, 't'}, {"force", no_argument, 0, OPTION_FORCE}, + {"progress", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":ho:f:t:pq", + c = getopt_long(argc, argv, "ho:f:t:pq", long_options, NULL); if (c == -1) { break; } switch (c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); + cmd_help(ccmd, "-o FMT_OPTS [-f FMT | --image-opts]\n" +" [-t CACHE] [--force] [-p] [-q] [--object OBJDEF] FILE\n" +, +" -o, --options FMT_OPTS\n" +" FMT-specfic format options (required)\n" +" -f, --format FMT\n" +" specify FILE format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -t, --cache CACHE\n" +" cache mode for FILE (default: " BDRV_DEFAULT_CACHE ")\n" +" --force\n" +" allow certain unsafe operations\n" +" -p, --progres\n" +" show operation progress\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); break; case 'o': if (accumulate_options(&options, optarg) < 0) { @@ -4287,9 +4487,15 @@ static int img_amend(int argc, char **argv) case 'f': fmt = optarg; break; + case OPTION_IMAGE_OPTS: + image_opts = true; + break; case 't': cache = optarg; break; + case OPTION_FORCE: + force = true; + break; case 'p': progress = true; break; @@ -4299,17 +4505,13 @@ static int img_amend(int argc, char **argv) case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; - case OPTION_FORCE: - force = true; - break; + default: + tryhelp(argv[0]); } } if (!options) { - error_exit("Must specify options (-o)"); + error_exit(argv[0], "Must specify options (-o)"); } if (quiet) { @@ -4488,7 +4690,11 @@ static void bench_cb(void *opaque, int ret) */ b->in_flight++; b->offset += b->step; - b->offset %= b->image_size; + if (b->image_size <= b->bufsize) { + b->offset = 0; + } else { + b->offset %= b->image_size - b->bufsize; + } if (b->write) { acb = blk_aio_pwritev(b->blk, offset, b->qiov, 0, bench_cb, b); } else { @@ -4501,7 +4707,7 @@ static void bench_cb(void *opaque, int ret) } } -static int img_bench(int argc, char **argv) +static int img_bench(const img_cmd_t *ccmd, int argc, char **argv) { int c, ret = 0; const char *fmt = NULL, *filename; @@ -4511,9 +4717,9 @@ static int img_bench(int argc, char **argv) int count = 75000; int depth = 64; int64_t offset = 0; - size_t bufsize = 4096; + ssize_t bufsize = 4096; int pattern = 0; - size_t step = 0; + ssize_t step = 0; int flush_interval = 0; bool drain_on_flush = true; int64_t image_size; @@ -4529,53 +4735,105 @@ static int img_bench(int argc, char **argv) for (;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"flush-interval", required_argument, 0, OPTION_FLUSH_INTERVAL}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"cache", required_argument, 0, 't'}, + {"count", required_argument, 0, 'c'}, + {"depth", required_argument, 0, 'd'}, + {"offset", required_argument, 0, 'o'}, + {"buffer-size", required_argument, 0, 's'}, + {"step-size", required_argument, 0, 'S'}, + {"write", no_argument, 0, 'w'}, {"pattern", required_argument, 0, OPTION_PATTERN}, + {"flush-interval", required_argument, 0, OPTION_FLUSH_INTERVAL}, {"no-drain", no_argument, 0, OPTION_NO_DRAIN}, + {"aio", required_argument, 0, 'i'}, + {"native", no_argument, 0, 'n'}, {"force-share", no_argument, 0, 'U'}, + {"quiet", no_argument, 0, 'q'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":hc:d:f:ni:o:qs:S:t:wU", long_options, - NULL); + c = getopt_long(argc, argv, "hf:t:c:d:o:s:S:wi:nUq", + long_options, NULL); if (c == -1) { break; } switch (c) { - case ':': - missing_argument(argv[optind - 1]); + case 'h': + cmd_help(ccmd, "[-f FMT | --image-opts] [-t CACHE]\n" +" [-c COUNT] [-d DEPTH] [-o OFFSET] [-s BUFFER_SIZE] [-S STEP_SIZE]\n" +" [-w [--pattern PATTERN] [--flush-interval INTERVAL [--no-drain]]]\n" +" [-i AIO] [-n] [-U] [-q] FILE\n" +, +" -f, --format FMT\n" +" specify FILE format explicitly\n" +" --image-opts\n" +" indicates that FILE is a complete image specification\n" +" instead of a file name (incompatible with --format)\n" +" -t, --cache CACHE\n" +" cache mode for FILE (default: " BDRV_DEFAULT_CACHE ")\n" +" -c, --count COUNT\n" +" number of I/O requests to perform\n" +" -d, --depth DEPTH\n" +" number of requests to perform in parallel\n" +" -o, --offset OFFSET\n" +" start first request at this OFFSET\n" +" -s, --buffer-size BUFFER_SIZE[bkKMGTPE]\n" +" size of each I/O request, with optional multiplier suffix\n" +" (powers of 1024, default is 4K)\n" +" -S, --step-size STEP_SIZE[bkKMGTPE]\n" +" each next request offset increment, with optional multiplier suffix\n" +" (powers of 1024, default is the same as BUFFER_SIZE)\n" +" -w, --write\n" +" perform write test (default is read)\n" +" --pattern PATTERN\n" +" write this pattern byte instead of zero\n" +" --flush-interval FLUSH_INTERVAL\n" +" issue flush after this number of requests\n" +" --no-drain\n" +" do not wait when flushing pending requests\n" +" -i, --aio AIO\n" +" async-io backend (threads, native, io_uring)\n" +" -n, --native\n" +" use native AIO backend if possible\n" +" -U, --force-share\n" +" open images in shared mode for concurrent access\n" +" -q, --quiet\n" +" quiet mode (produce only error messages if any)\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +); + break; + case 'f': + fmt = optarg; break; - case '?': - unrecognized_option(argv[optind - 1]); + case OPTION_IMAGE_OPTS: + image_opts = true; break; - case 'h': - help(); + case 't': + ret = bdrv_parse_cache_mode(optarg, &flags, &writethrough); + if (ret < 0) { + error_report("Invalid cache mode"); + ret = -1; + goto out; + } break; case 'c': - { - unsigned long res; - - if (qemu_strtoul(optarg, NULL, 0, &res) < 0 || res > INT_MAX) { - error_report("Invalid request count specified"); + count = cvtnum_full("request count", optarg, false, 1, INT_MAX); + if (count < 0) { return 1; } - count = res; break; - } case 'd': - { - unsigned long res; - - if (qemu_strtoul(optarg, NULL, 0, &res) < 0 || res > INT_MAX) { - error_report("Invalid queue depth specified"); + depth = cvtnum_full("queue depth", optarg, false, 1, INT_MAX); + if (depth < 0) { return 1; } - depth = res; - break; - } - case 'f': - fmt = optarg; break; case 'n': flags |= BDRV_O_NATIVE_AIO; @@ -4589,89 +4847,59 @@ static int img_bench(int argc, char **argv) } break; case 'o': - { - offset = cvtnum("offset", optarg); + offset = cvtnum("offset", optarg, true); if (offset < 0) { return 1; } break; - } - break; - case 'q': - quiet = true; - break; case 's': - { - int64_t sval; - - sval = cvtnum_full("buffer size", optarg, 0, INT_MAX); - if (sval < 0) { + bufsize = cvtnum_full("buffer size", optarg, true, 1, INT_MAX); + if (bufsize < 0) { return 1; } - - bufsize = sval; break; - } case 'S': - { - int64_t sval; - - sval = cvtnum_full("step_size", optarg, 0, INT_MAX); - if (sval < 0) { + step = cvtnum_full("step size", optarg, true, 0, INT_MAX); + if (step < 0) { return 1; } - - step = sval; - break; - } - case 't': - ret = bdrv_parse_cache_mode(optarg, &flags, &writethrough); - if (ret < 0) { - error_report("Invalid cache mode"); - ret = -1; - goto out; - } break; case 'w': flags |= BDRV_O_RDWR; is_write = true; break; - case 'U': - force_share = true; - break; case OPTION_PATTERN: - { - unsigned long res; - - if (qemu_strtoul(optarg, NULL, 0, &res) < 0 || res > 0xff) { - error_report("Invalid pattern byte specified"); + pattern = cvtnum_full("pattern byte", optarg, false, 0, 0xff); + if (pattern < 0) { return 1; } - pattern = res; break; - } case OPTION_FLUSH_INTERVAL: - { - unsigned long res; - - if (qemu_strtoul(optarg, NULL, 0, &res) < 0 || res > INT_MAX) { - error_report("Invalid flush interval specified"); + flush_interval = cvtnum_full("flush interval", optarg, + false, 0, INT_MAX); + if (flush_interval < 0) { return 1; } - flush_interval = res; break; - } case OPTION_NO_DRAIN: drain_on_flush = false; break; - case OPTION_IMAGE_OPTS: - image_opts = true; + case 'U': + force_share = true; + break; + case 'q': + quiet = true; break; + case OPTION_OBJECT: + user_creatable_process_cmdline(optarg); + break; + default: + tryhelp(argv[0]); } } if (optind != argc - 1) { - error_exit("Expecting one image file name"); + error_exit(argv[0], "Expecting one image file name"); } filename = argv[argc - 1]; @@ -4771,7 +4999,7 @@ typedef struct ImgBitmapAction { QSIMPLEQ_ENTRY(ImgBitmapAction) next; } ImgBitmapAction; -static int img_bitmap(int argc, char **argv) +static int img_bitmap(const img_cmd_t *ccmd, int argc, char **argv) { Error *err = NULL; int c, ret = 1; @@ -4793,48 +5021,69 @@ static int img_bitmap(int argc, char **argv) for (;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, - {"object", required_argument, 0, OPTION_OBJECT}, + {"format", required_argument, 0, 'f'}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, {"add", no_argument, 0, OPTION_ADD}, + {"granularity", required_argument, 0, 'g'}, {"remove", no_argument, 0, OPTION_REMOVE}, {"clear", no_argument, 0, OPTION_CLEAR}, {"enable", no_argument, 0, OPTION_ENABLE}, {"disable", no_argument, 0, OPTION_DISABLE}, {"merge", required_argument, 0, OPTION_MERGE}, - {"granularity", required_argument, 0, 'g'}, {"source-file", required_argument, 0, 'b'}, {"source-format", required_argument, 0, 'F'}, + {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":b:f:F:g:h", long_options, NULL); + c = getopt_long(argc, argv, "hf:g:b:F:", + long_options, NULL); if (c == -1) { break; } switch (c) { - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); - break; case 'h': - help(); - break; - case 'b': - src_filename = optarg; + cmd_help(ccmd, "[-f FMT | --image-opts]\n" +" ( --add [-g SIZE] | --remove | --clear | --enable | --disable |\n" +" --merge SOURCE [-b SRC_FILE [-F SRC_FMT]] )..\n" +" [--object OBJDEF] FILE BITMAP\n" +, +" -f, --format FMT\n" +" specify FILE format explicitly (default: probing is used)\n" +" --image-opts\n" +" treat FILE as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" --add\n" +" creates BITMAP in FILE, enables to record future edits\n" +" -g, --granularity SIZE[bKMGTPE]\n" +" sets non-default granularity for the bitmap being added,\n" +" with optional multiplier suffix (in powers of 1024)\n" +" --remove\n" +" removes BITMAP from FILE\n" +" --clear\n" +" clears BITMAP in FILE\n" +" --enable, --disable\n" +" starts and stops recording future edits to BITMAP in FILE\n" +" --merge SOURCE\n" +" merges contents of the SOURCE bitmap into BITMAP in FILE\n" +" -b, --source-file SRC_FILE\n" +" select alternative source file for --merge\n" +" -F, --source-format SRC_FMT\n" +" specify format for SRC_FILE explicitly\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" FILE\n" +" name of the image file, or option string (key=value,..)\n" +" with --image-opts, to operate on\n" +" BITMAP\n" +" name of the bitmap to add, remove, clear, enable, disable or merge to\n" +); break; case 'f': fmt = optarg; break; - case 'F': - src_fmt = optarg; - break; - case 'g': - granularity = cvtnum("granularity", optarg); - if (granularity < 0) { - return 1; - } + case OPTION_IMAGE_OPTS: + image_opts = true; break; case OPTION_ADD: act = g_new0(ImgBitmapAction, 1); @@ -4842,6 +5091,12 @@ static int img_bitmap(int argc, char **argv) QSIMPLEQ_INSERT_TAIL(&actions, act, next); add = true; break; + case 'g': + granularity = cvtnum("granularity", optarg, true); + if (granularity < 0) { + return 1; + } + break; case OPTION_REMOVE: act = g_new0(ImgBitmapAction, 1); act->act = BITMAP_REMOVE; @@ -4869,12 +5124,17 @@ static int img_bitmap(int argc, char **argv) QSIMPLEQ_INSERT_TAIL(&actions, act, next); merge = true; break; + case 'b': + src_filename = optarg; + break; + case 'F': + src_fmt = optarg; + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } @@ -5017,7 +5277,7 @@ static int img_dd_bs(const char *arg, { int64_t res; - res = cvtnum_full("bs", arg, 1, INT_MAX); + res = cvtnum_full("bs", arg, true, 1, INT_MAX); if (res < 0) { return 1; @@ -5031,7 +5291,7 @@ static int img_dd_count(const char *arg, struct DdIo *in, struct DdIo *out, struct DdInfo *dd) { - dd->count = cvtnum("count", arg); + dd->count = cvtnum("count", arg, true); if (dd->count < 0) { return 1; @@ -5062,7 +5322,7 @@ static int img_dd_skip(const char *arg, struct DdIo *in, struct DdIo *out, struct DdInfo *dd) { - in->offset = cvtnum("skip", arg); + in->offset = cvtnum("skip", arg, true); if (in->offset < 0) { return 1; @@ -5071,7 +5331,7 @@ static int img_dd_skip(const char *arg, return 0; } -static int img_dd(int argc, char **argv) +static int img_dd(const img_cmd_t *ccmd, int argc, char **argv) { int ret = 0; char *arg = NULL; @@ -5115,31 +5375,54 @@ static int img_dd(int argc, char **argv) }; const struct option long_options[] = { { "help", no_argument, 0, 'h'}, - { "object", required_argument, 0, OPTION_OBJECT}, + { "format", required_argument, 0, 'f'}, { "image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + { "output-format", required_argument, 0, 'O'}, { "force-share", no_argument, 0, 'U'}, + { "object", required_argument, 0, OPTION_OBJECT}, { 0, 0, 0, 0 } }; - while ((c = getopt_long(argc, argv, ":hf:O:U", long_options, NULL))) { + while ((c = getopt_long(argc, argv, "hf:O:U", long_options, NULL))) { if (c == EOF) { break; } switch (c) { - case 'O': - out_fmt = optarg; + case 'h': + cmd_help(ccmd, "[-f FMT|--image-opts] [-O OUTPUT_FMT] [-U]\n" +" [--object OBJDEF] [bs=BLOCK_SIZE] [count=BLOCKS] if=INPUT of=OUTPUT\n" +, +" -f, --format FMT\n" +" specify format for INPUT explicitly (default: probing is used)\n" +" --image-opts\n" +" treat INPUT as an option string (key=value,..), not a file name\n" +" (incompatible with -f|--format)\n" +" -O, --output-format OUTPUT_FMT\n" +" format of the OUTPUT (default: raw)\n" +" -U, --force-share\n" +" open images in shared mode for concurrent access\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" bs=BLOCK_SIZE[bKMGTP]\n" +" size of the I/O block, with optional multiplier suffix (powers of 1024)\n" +" (default: 512)\n" +" count=COUNT\n" +" number of blocks to convert (default whole INPUT)\n" +" if=INPUT\n" +" name of the file, or option string (key=value,..)\n" +" with --image-opts, to use for input\n" +" of=OUTPUT\n" +" output file name to create (will be overridden if alrady exists)\n" +); break; case 'f': fmt = optarg; break; - case ':': - missing_argument(argv[optind - 1]); - break; - case '?': - unrecognized_option(argv[optind - 1]); + case OPTION_IMAGE_OPTS: + image_opts = true; break; - case 'h': - help(); + case 'O': + out_fmt = optarg; break; case 'U': force_share = true; @@ -5147,9 +5430,8 @@ static int img_dd(int argc, char **argv) case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; + default: + tryhelp(argv[0]); } } @@ -5339,17 +5621,8 @@ static void dump_json_block_measure_info(BlockMeasureInfo *info) g_string_free(str, true); } -static int img_measure(int argc, char **argv) +static int img_measure(const img_cmd_t *ccmd, int argc, char **argv) { - static const struct option long_options[] = { - {"help", no_argument, 0, 'h'}, - {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, - {"object", required_argument, 0, OPTION_OBJECT}, - {"output", required_argument, 0, OPTION_OUTPUT}, - {"size", required_argument, 0, OPTION_SIZE}, - {"force-share", no_argument, 0, 'U'}, - {0, 0, 0, 0} - }; OutputFormat output_format = OFORMAT_HUMAN; BlockBackend *in_blk = NULL; BlockDriver *drv; @@ -5364,29 +5637,67 @@ static int img_measure(int argc, char **argv) QemuOpts *sn_opts = NULL; QemuOptsList *create_opts = NULL; bool image_opts = false; - uint64_t img_size = UINT64_MAX; + int64_t img_size = -1; BlockMeasureInfo *info = NULL; Error *local_err = NULL; int ret = 1; int c; - while ((c = getopt_long(argc, argv, "hf:O:o:l:U", + static const struct option long_options[] = { + {"help", no_argument, 0, 'h'}, + {"source-format", required_argument, 0, 'f'}, /* img_convert */ + {"format", required_argument, 0, 'f'}, + {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, + {"source-image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, /* img_convert */ + {"snapshot", required_argument, 0, 'l'}, + {"target-format", required_argument, 0, 'O'}, + {"target-format-options", required_argument, 0, 'o'}, /* img_convert */ + {"options", required_argument, 0, 'o'}, + {"force-share", no_argument, 0, 'U'}, + {"output", required_argument, 0, OPTION_OUTPUT}, + {"object", required_argument, 0, OPTION_OBJECT}, + {"size", required_argument, 0, 's'}, + {0, 0, 0, 0} + }; + + while ((c = getopt_long(argc, argv, "hf:l:O:o:Us:", long_options, NULL)) != -1) { switch (c) { - case '?': case 'h': - help(); + cmd_help(ccmd, "[-f FMT|--image-opts] [-l SNAPSHOT]\n" +" [-O TARGET_FMT] [-o TARGET_FMT_OPTS] [--output human|json]\n" +" [--object OBJDEF] (--size SIZE | FILE)\n" +, +" -f, --format\n" +" specify format of FILE explicitly (default: probing is used)\n" +" --image-opts\n" +" indicates that FILE is a complete image specification\n" +" instead of a file name (incompatible with --format)\n" +" -l, --snapshot SNAPSHOT\n" +" use this snapshot in FILE as source\n" +" -O, --target-format TARGET_FMT\n" +" desired target/output image format (default: raw)\n" +" -o TARGET_FMT_OPTS\n" +" options specific to TARGET_FMT\n" +" --output human|json\n" +" output format (default: human)\n" +" -U, --force-share\n" +" open images in shared mode for concurrent access\n" +" --object OBJDEF\n" +" defines QEMU user-creatable object\n" +" -s, --size SIZE[bKMGTPE]\n" +" measure file size for given image size,\n" +" with optional multiplier suffix (powers of 1024)\n" +" FILE\n" +" measure file size required to convert from FILE (either a file name\n" +" or an option string (key=value,..) with --image-options)\n" +); break; case 'f': fmt = optarg; break; - case 'O': - out_fmt = optarg; - break; - case 'o': - if (accumulate_options(&options, optarg) < 0) { - goto out; - } + case OPTION_IMAGE_OPTS: + image_opts = true; break; case 'l': if (strstart(optarg, SNAPSHOT_OPT_BASE, NULL)) { @@ -5401,37 +5712,31 @@ static int img_measure(int argc, char **argv) snapshot_name = optarg; } break; + case 'O': + out_fmt = optarg; + break; + case 'o': + if (accumulate_options(&options, optarg) < 0) { + goto out; + } + break; case 'U': force_share = true; break; + case OPTION_OUTPUT: + output_format = parse_output_format(argv[0], optarg); + break; case OPTION_OBJECT: user_creatable_process_cmdline(optarg); break; - case OPTION_IMAGE_OPTS: - image_opts = true; - break; - case OPTION_OUTPUT: - if (!strcmp(optarg, "json")) { - output_format = OFORMAT_JSON; - } else if (!strcmp(optarg, "human")) { - output_format = OFORMAT_HUMAN; - } else { - error_report("--output must be used with human or json " - "as argument."); + case 's': + img_size = cvtnum("image size", optarg, true); + if (img_size < 0) { goto out; } break; - case OPTION_SIZE: - { - int64_t sval; - - sval = cvtnum("image size", optarg); - if (sval < 0) { - goto out; - } - img_size = (uint64_t)sval; - } - break; + default: + tryhelp(argv[0]); } } @@ -5446,11 +5751,11 @@ static int img_measure(int argc, char **argv) error_report("--image-opts, -f, and -l require a filename argument."); goto out; } - if (filename && img_size != UINT64_MAX) { + if (filename && img_size != -1) { error_report("--size N cannot be used together with a filename."); goto out; } - if (!filename && img_size == UINT64_MAX) { + if (!filename && img_size == -1) { error_report("Either --size N or one filename must be specified."); goto out; } @@ -5498,7 +5803,7 @@ static int img_measure(int argc, char **argv) goto out; } } - if (img_size != UINT64_MAX) { + if (img_size != -1) { qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size, &error_abort); } @@ -5532,13 +5837,49 @@ static int img_measure(int argc, char **argv) } static const img_cmd_t img_cmds[] = { -#define DEF(option, callback, arg_string) \ - { option, callback }, -#include "qemu-img-cmds.h" -#undef DEF + { "amend", img_amend, + "Update format-specific options of the image" }, + { "bench", img_bench, + "Run a simple image benchmark" }, + { "bitmap", img_bitmap, + "Perform modifications of the persistent bitmap in the image" }, + { "check", img_check, + "Check basic image integrity" }, + { "commit", img_commit, + "Commit image to its backing file" }, + { "compare", img_compare, + "Check if two images have the same contents" }, + { "convert", img_convert, + "Copy one or more images to another with optional format conversion" }, + { "create", img_create, + "Create and format a new image file" }, + { "dd", img_dd, + "Copy input to output with optional format conversion" }, + { "info", img_info, + "Display information about the image" }, + { "map", img_map, + "Dump image metadata" }, + { "measure", img_measure, + "Calculate the file size required for a new image" }, + { "rebase", img_rebase, + "Change the backing file of the image" }, + { "resize", img_resize, + "Resize the image" }, + { "snapshot", img_snapshot, + "List or manipulate snapshots in the image" }, { NULL, NULL, }, }; +static void format_print(void *opaque, const char *name) +{ + int *np = opaque; + if (*np + strlen(name) > 75) { + printf("\n "); + *np = 1; + } + *np += printf(" %s", name); +} + int main(int argc, char **argv) { const img_cmd_t *cmd; @@ -5566,23 +5907,39 @@ int main(int argc, char **argv) module_call_init(MODULE_INIT_QOM); bdrv_init(); - if (argc < 2) { - error_exit("Not enough arguments"); - } qemu_add_opts(&qemu_source_opts); qemu_add_opts(&qemu_trace_opts); - while ((c = getopt_long(argc, argv, "+:hVT:", long_options, NULL)) != -1) { + while ((c = getopt_long(argc, argv, "+hVT:", long_options, NULL)) != -1) { switch (c) { - case ':': - missing_argument(argv[optind - 1]); - return 0; - case '?': - unrecognized_option(argv[optind - 1]); - return 0; case 'h': - help(); + printf( +QEMU_IMG_VERSION +"QEMU disk image utility. Usage:\n" +"\n" +" qemu-img [standard options] COMMAND [--help | command options]\n" +"\n" +"Standard options:\n" +" -h, --help\n" +" display this help and exit\n" +" -V, --version\n" +" display version info and exit\n" +" -T,--trace TRACE\n" +" specify tracing options:\n" +" [[enable=]][,events=][,file=]\n" +"\n" +"Recognized commands (run qemu-img COMMAND --help for command-specific help):\n\n"); + for (cmd = img_cmds; cmd->name != NULL; cmd++) { + printf(" %s - %s\n", cmd->name, cmd->description); + } + printf("\nSupported image formats:\n"); + c = 99; /* force a newline */ + bdrv_iterate_format(format_print, &c, false); + if (c) { + printf("\n"); + } + printf("\n" QEMU_HELP_BOTTOM "\n"); return 0; case 'V': printf(QEMU_IMG_VERSION); @@ -5590,18 +5947,16 @@ int main(int argc, char **argv) case 'T': trace_opt_parse(optarg); break; + default: + tryhelp(argv[0]); } } - cmdname = argv[optind]; - - /* reset getopt_long scanning */ - argc -= optind; - if (argc < 1) { - return 0; + if (optind >= argc) { + error_exit(argv[0], "Not enough arguments"); } - argv += optind; - qemu_reset_optind(); + + cmdname = argv[optind]; if (!trace_init_backends()) { exit(1); @@ -5612,10 +5967,16 @@ int main(int argc, char **argv) /* find the command */ for (cmd = img_cmds; cmd->name != NULL; cmd++) { if (!strcmp(cmdname, cmd->name)) { - return cmd->handler(argc, argv); + g_autofree char *argv0 = g_strdup_printf("%s %s", argv[0], cmdname); + /* reset options and getopt processing (incl return order) */ + argv += optind; + argc -= optind; + qemu_reset_optind(); + argv[0] = argv0; + return cmd->handler(cmd, argc, argv); } } /* not found */ - error_exit("Command not found: %s", cmdname); + error_exit(argv[0], "Command not found: %s", cmdname); } diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c index e2fab571831..13e03301624 100644 --- a/qemu-io-cmds.c +++ b/qemu-io-cmds.c @@ -10,9 +10,9 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu-io.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/block.h" #include "block/block_int.h" /* for info_f() */ #include "block/qapi.h" diff --git a/qemu-io.c b/qemu-io.c index 6cb1e00385e..8f2de83f3c8 100644 --- a/qemu-io.c +++ b/qemu-io.c @@ -27,10 +27,10 @@ #include "qemu/readline.h" #include "qemu/log.h" #include "qemu/sockets.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qstring.h" +#include "qobject/qdict.h" #include "qom/object_interfaces.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "block/block_int.h" #include "trace/control.h" #include "crypto/init.h" diff --git a/qemu-keymap.c b/qemu-keymap.c index 701e4332af8..1c081db2870 100644 --- a/qemu-keymap.c +++ b/qemu-keymap.c @@ -116,7 +116,6 @@ static void walk_map(struct xkb_keymap *map, xkb_keycode_t code, void *data) if (kshift != kaltgrshift && kaltgr != kaltgrshift) { print_sym(kaltgrshift, qcode, " shift altgr"); } - return; } static void usage(FILE *out) @@ -154,9 +153,9 @@ static xkb_mod_mask_t get_mod(struct xkb_keymap *map, const char *name) int main(int argc, char *argv[]) { - static struct xkb_context *ctx; - static struct xkb_keymap *map; - static struct xkb_state *state; + struct xkb_context *ctx; + struct xkb_keymap *map; + struct xkb_state *state; xkb_mod_index_t mod, mods; int rc; @@ -213,6 +212,7 @@ int main(int argc, char *argv[]) ctx = xkb_context_new(XKB_CONTEXT_NO_FLAGS); map = xkb_keymap_new_from_names(ctx, &names, XKB_KEYMAP_COMPILE_NO_FLAGS); + xkb_context_unref(ctx); if (!map) { /* libxkbcommon prints error */ exit(1); @@ -234,6 +234,8 @@ int main(int argc, char *argv[]) state = xkb_state_new(map); xkb_keymap_key_for_each(map, walk_map, state); + xkb_state_unref(state); + xkb_keymap_unref(map); /* add quirks */ fprintf(outfile, diff --git a/qemu-nbd.c b/qemu-nbd.c index d7b3ccab21c..ed5895861bb 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -24,8 +24,8 @@ #include "qemu/help-texts.h" #include "qapi/error.h" #include "qemu/cutils.h" -#include "sysemu/block-backend.h" -#include "sysemu/runstate.h" /* for qemu_system_killed() prototype */ +#include "system/block-backend.h" +#include "system/runstate.h" /* for qemu_system_killed() prototype */ #include "block/block_int.h" #include "block/nbd.h" #include "qemu/main-loop.h" @@ -37,8 +37,8 @@ #include "qemu/log.h" #include "qemu/systemd.h" #include "block/snapshot.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qom/object_interfaces.h" #include "io/channel-socket.h" #include "io/net-listener.h" @@ -57,19 +57,20 @@ #define HAVE_NBD_DEVICE 0 #endif -#define SOCKET_PATH "/var/lock/qemu-nbd-%s" -#define QEMU_NBD_OPT_CACHE 256 -#define QEMU_NBD_OPT_AIO 257 -#define QEMU_NBD_OPT_DISCARD 258 -#define QEMU_NBD_OPT_DETECT_ZEROES 259 -#define QEMU_NBD_OPT_OBJECT 260 -#define QEMU_NBD_OPT_TLSCREDS 261 -#define QEMU_NBD_OPT_IMAGE_OPTS 262 -#define QEMU_NBD_OPT_FORK 263 -#define QEMU_NBD_OPT_TLSAUTHZ 264 -#define QEMU_NBD_OPT_PID_FILE 265 -#define QEMU_NBD_OPT_SELINUX_LABEL 266 -#define QEMU_NBD_OPT_TLSHOSTNAME 267 +#define SOCKET_PATH "/var/lock/qemu-nbd-%s" +#define QEMU_NBD_OPT_CACHE 256 +#define QEMU_NBD_OPT_AIO 257 +#define QEMU_NBD_OPT_DISCARD 258 +#define QEMU_NBD_OPT_DETECT_ZEROES 259 +#define QEMU_NBD_OPT_OBJECT 260 +#define QEMU_NBD_OPT_TLSCREDS 261 +#define QEMU_NBD_OPT_IMAGE_OPTS 262 +#define QEMU_NBD_OPT_FORK 263 +#define QEMU_NBD_OPT_TLSAUTHZ 264 +#define QEMU_NBD_OPT_PID_FILE 265 +#define QEMU_NBD_OPT_SELINUX_LABEL 266 +#define QEMU_NBD_OPT_TLSHOSTNAME 267 +#define QEMU_NBD_OPT_HANDSHAKE_LIMIT 268 #define MBR_SIZE 512 @@ -80,6 +81,7 @@ static int nb_fds; static QIONetListener *server; static QCryptoTLSCreds *tlscreds; static const char *tlsauthz; +static int handshake_limit = NBD_DEFAULT_HANDSHAKE_MAX_SECS; static void usage(const char *name) { @@ -101,6 +103,7 @@ static void usage(const char *name) " -v, --verbose display extra debugging information\n" " -x, --export-name=NAME expose export by name (default is empty string)\n" " -D, --description=TEXT export a human-readable description\n" +" --handshake-limit=N limit client's handshake to N seconds (default 10)\n" "\n" "Exposing part of the image:\n" " -o, --offset=OFFSET offset into the image\n" @@ -390,7 +393,8 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc, nb_fds++; nbd_update_server_watch(); - nbd_client_new(cioc, tlscreds, tlsauthz, nbd_client_closed); + nbd_client_new(cioc, handshake_limit, + tlscreds, tlsauthz, nbd_client_closed, NULL); } static void nbd_update_server_watch(void) @@ -567,6 +571,8 @@ int main(int argc, char **argv) { "object", required_argument, NULL, QEMU_NBD_OPT_OBJECT }, { "export-name", required_argument, NULL, 'x' }, { "description", required_argument, NULL, 'D' }, + { "handshake-limit", required_argument, NULL, + QEMU_NBD_OPT_HANDSHAKE_LIMIT }, { "tls-creds", required_argument, NULL, QEMU_NBD_OPT_TLSCREDS }, { "tls-hostname", required_argument, NULL, QEMU_NBD_OPT_TLSHOSTNAME }, { "tls-authz", required_argument, NULL, QEMU_NBD_OPT_TLSAUTHZ }, @@ -588,7 +594,8 @@ int main(int argc, char **argv) pthread_t client_thread; const char *fmt = NULL; Error *local_err = NULL; - BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; + BlockdevDetectZeroesOptions detect_zeroes = + BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; QDict *options = NULL; const char *export_name = NULL; /* defaults to "" later for server mode */ const char *export_description = NULL; @@ -812,6 +819,13 @@ int main(int argc, char **argv) case QEMU_NBD_OPT_SELINUX_LABEL: selinux_label = optarg; break; + case QEMU_NBD_OPT_HANDSHAKE_LIMIT: + if (qemu_strtoi(optarg, NULL, 0, &handshake_limit) < 0 || + handshake_limit < 0) { + error_report("Invalid handshake limit '%s'", optarg); + exit(EXIT_FAILURE); + } + break; } } @@ -838,10 +852,6 @@ int main(int argc, char **argv) export_name = ""; } - if (!trace_init_backends()) { - exit(1); - } - trace_init_file(); qemu_set_log(LOG_TRACE, &error_fatal); socket_activation = check_socket_activation(); @@ -1031,6 +1041,18 @@ int main(int argc, char **argv) #endif /* WIN32 */ } + /* + * trace_init must be done after daemonization. Why? Because at + * least the simple backend spins up a helper thread as well as an + * atexit() handler that waits on that thread, but the helper + * thread won't survive a fork, leading to deadlock in the child + * if we initialized pre-fork. + */ + if (!trace_init_backends()) { + exit(1); + } + trace_init_file(); + if (opts.device != NULL && sockpath == NULL) { sockpath = g_malloc(128); snprintf(sockpath, 128, SOCKET_PATH, basename(opts.device)); diff --git a/qemu-options.hx b/qemu-options.hx index cee0da20149..64c1f1352d2 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -38,8 +38,13 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ " nvdimm=on|off controls NVDIMM support (default=off)\n" " memory-encryption=@var{} memory encryption object to use (default=none)\n" " hmat=on|off controls ACPI HMAT support (default=off)\n" + " spcr=on|off controls ACPI SPCR support (default=on)\n" +#ifdef CONFIG_POSIX + " aux-ram-share=on|off allocate auxiliary guest RAM as shared (default: off)\n" +#endif " memory-backend='backend-id' specifies explicitly provided backend for main RAM (default=none)\n" - " cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n", + " cxl-fmw.0.targets.0=firsttarget,cxl-fmw.0.targets.1=secondtarget,cxl-fmw.0.size=size[,cxl-fmw.0.interleave-granularity=granularity]\n" + " smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel\n", QEMU_ARCH_ALL) SRST ``-machine [type=]name[,prop=value[,...]]`` @@ -68,8 +73,8 @@ SRST ``vmport=on|off|auto`` Enables emulation of VMWare IO port, for vmmouse etc. auto says - to select the value based on accel. For accel=xen the default is - off otherwise the default is on. + to select the value based on accel and i8042. For accel=xen or + i8042=off the default is off otherwise the default is on. ``dump-guest-core=on|off`` Include guest memory in a core dump. The default is on. @@ -101,6 +106,20 @@ SRST Enables or disables ACPI Heterogeneous Memory Attribute Table (HMAT) support. The default is off. + ``spcr=on|off`` + Enables or disables ACPI Serial Port Console Redirection Table + (SPCR) support. The default is on. + + ``aux-ram-share=on|off`` + Allocate auxiliary guest RAM as an anonymous file that is + shareable with an external process. This option applies to + memory allocated as a side effect of creating various devices. + It does not apply to memory-backend-objects, whether explicitly + specified on the command line, or implicitly created by the -m + command line option. The default is off. + + To use the cpr-transfer migration mode, you must set aux-ram-share=on. + ``memory-backend='id'`` An alternative to legacy ``-mem-path`` and ``mem-prealloc`` options. Allows to use a memory backend as main RAM. @@ -159,6 +178,33 @@ SRST :: -machine cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=128G,cxl-fmw.0.interleave-granularity=512 + + ``smp-cache.0.cache=cachename,smp-cache.0.topology=topologylevel`` + Define cache properties for SMP system. + + ``cache=cachename`` specifies the cache that the properties will be + applied on. This field is the combination of cache level and cache + type. It supports ``l1d`` (L1 data cache), ``l1i`` (L1 instruction + cache), ``l2`` (L2 unified cache) and ``l3`` (L3 unified cache). + + ``topology=topologylevel`` sets the cache topology level. It accepts + CPU topology levels including ``core``, ``module``, ``cluster``, ``die``, + ``socket``, ``book``, ``drawer`` and a special value ``default``. If + ``default`` is set, then the cache topology will follow the architecture's + default cache topology model. If another topology level is set, the cache + will be shared at corresponding CPU topology level. For example, + ``topology=core`` makes the cache shared by all threads within a core. + The omitting cache will default to using the ``default`` level. + + The default cache topology model for an i386 PC machine is as follows: + ``l1d``, ``l1i``, and ``l2`` caches are per ``core``, while the ``l3`` + cache is per ``die``. + + Example: + + :: + + -machine smp-cache.0.cache=l1d,smp-cache.0.topology=core,smp-cache.1.cache=l1i,smp-cache.1.topology=core ERST DEF("M", HAS_ARG, QEMU_OPTION_M, @@ -924,7 +970,7 @@ SRST Sets the period length in microseconds. ``in|out.try-poll=on|off`` - Attempt to use poll mode with the device. Default is on. + Attempt to use poll mode with the device. Default is off. ``threshold=threshold`` Threshold (in microseconds) when playback starts. Default is 0. @@ -961,7 +1007,7 @@ SRST ``in|out.buffer-count=count`` Sets the count of the buffers. - ``in|out.try-poll=on|of`` + ``in|out.try-poll=on|off`` Attempt to use poll mode with the device. Default is on. ``try-mmap=on|off`` @@ -1102,7 +1148,7 @@ SRST external entity that provides the IPMI services. A connection is made to an external BMC simulator. If you do this, - it is strongly recommended that you use the "reconnect=" chardev + it is strongly recommended that you use the "reconnect-ms=" chardev option to reconnect to the simulator if the connection is lost. Note that if this is not used carefully, it can be a security issue, as the interface has the ability to send resets, NMIs, and power off @@ -1185,6 +1231,13 @@ SRST ``aw-bits=val`` (val between 32 and 64, default depends on machine) This decides the address width of the IOVA address space. +``-device arm-smmuv3,primary-bus=id`` + This is only supported by ``-machine virt`` (ARM). + + ``primary-bus=id`` + Accepts either the default root complex (pcie.0) or a + pxb-pcie based root complex. + ERST DEF("name", HAS_ARG, QEMU_OPTION_name, @@ -1766,29 +1819,18 @@ DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, " [[,throttling.bps-total-max=bm]|[[,throttling.bps-read-max=rm][,throttling.bps-write-max=wm]]]\n" " [[,throttling.iops-total-max=im]|[[,throttling.iops-read-max=irm][,throttling.iops-write-max=iwm]]]\n" " [[,throttling.iops-size=is]]\n" - "-fsdev proxy,id=id,socket=socket[,writeout=immediate][,readonly=on]\n" - "-fsdev proxy,id=id,sock_fd=sock_fd[,writeout=immediate][,readonly=on]\n" "-fsdev synth,id=id\n", QEMU_ARCH_ALL) SRST ``-fsdev local,id=id,path=path,security_model=security_model [,writeout=writeout][,readonly=on][,fmode=fmode][,dmode=dmode] [,throttling.option=value[,throttling.option=value[,...]]]`` \ -``-fsdev proxy,id=id,socket=socket[,writeout=writeout][,readonly=on]`` - \ -``-fsdev proxy,id=id,sock_fd=sock_fd[,writeout=writeout][,readonly=on]`` - \ ``-fsdev synth,id=id[,readonly=on]`` Define a new file system device. Valid options are: ``local`` Accesses to the filesystem are done by QEMU. - ``proxy`` - Accesses to the filesystem are done by virtfs-proxy-helper(1). This - option is deprecated (since QEMU 8.1) and will be removed in a future - version of QEMU. Use ``local`` instead. - ``synth`` Synthetic filesystem, only used by QTests. @@ -1813,8 +1855,6 @@ SRST security model is same as passthrough except the sever won't report failures if it fails to set file attributes like ownership. Security model is mandatory only for local fsdriver. - Other fsdrivers (like proxy) don't take security model as a - parameter. ``writeout=writeout`` This is an optional argument. The only supported value is @@ -1827,16 +1867,6 @@ SRST Enables exporting 9p share as a readonly mount for guests. By default read-write access is given. - ``socket=socket`` - Enables proxy filesystem driver to use passed socket file for - communicating with virtfs-proxy-helper(1). - - ``sock_fd=sock_fd`` - Enables proxy filesystem driver to use passed socket descriptor - for communicating with virtfs-proxy-helper(1). Usually a helper - like libvirt will create socketpair and pass one of the fds as - sock\_fd. - ``fmode=fmode`` Specifies the default mode for newly created files on the host. Works only with security models "mapped-xattr" and @@ -1889,18 +1919,12 @@ ERST DEF("virtfs", HAS_ARG, QEMU_OPTION_virtfs, "-virtfs local,path=path,mount_tag=tag,security_model=mapped-xattr|mapped-file|passthrough|none\n" " [,id=id][,writeout=immediate][,readonly=on][,fmode=fmode][,dmode=dmode][,multidevs=remap|forbid|warn]\n" - "-virtfs proxy,mount_tag=tag,socket=socket[,id=id][,writeout=immediate][,readonly=on]\n" - "-virtfs proxy,mount_tag=tag,sock_fd=sock_fd[,id=id][,writeout=immediate][,readonly=on]\n" "-virtfs synth,mount_tag=tag[,id=id][,readonly=on]\n", QEMU_ARCH_ALL) SRST ``-virtfs local,path=path,mount_tag=mount_tag ,security_model=security_model[,writeout=writeout][,readonly=on] [,fmode=fmode][,dmode=dmode][,multidevs=multidevs]`` \ -``-virtfs proxy,socket=socket,mount_tag=mount_tag [,writeout=writeout][,readonly=on]`` - \ -``-virtfs proxy,sock_fd=sock_fd,mount_tag=mount_tag [,writeout=writeout][,readonly=on]`` - \ ``-virtfs synth,mount_tag=mount_tag`` Define a new virtual filesystem device and expose it to the guest using a virtio-9p-device (a.k.a. 9pfs), which essentially means that a certain @@ -1917,11 +1941,6 @@ SRST ``local`` Accesses to the filesystem are done by QEMU. - ``proxy`` - Accesses to the filesystem are done by virtfs-proxy-helper(1). - This option is deprecated (since QEMU 8.1) and will be removed in a - future version of QEMU. Use ``local`` instead. - ``synth`` Synthetic filesystem, only used by QTests. @@ -1946,8 +1965,6 @@ SRST security model is same as passthrough except the sever won't report failures if it fails to set file attributes like ownership. Security model is mandatory only for local fsdriver. - Other fsdrivers (like proxy) don't take security model as a - parameter. ``writeout=writeout`` This is an optional argument. The only supported value is @@ -1960,16 +1977,6 @@ SRST Enables exporting 9p share as a readonly mount for guests. By default read-write access is given. - ``socket=socket`` - Enables proxy filesystem driver to use passed socket file for - communicating with virtfs-proxy-helper(1). Usually a helper like - libvirt will create socketpair and pass one of the fds as - sock\_fd. - - ``sock_fd`` - Enables proxy filesystem driver to use passed 'sock\_fd' as the - socket descriptor for interfacing with virtfs-proxy-helper(1). - ``fmode=fmode`` Specifies the default mode for newly created files on the host. Works only with security models "mapped-xattr" and @@ -1984,32 +1991,37 @@ SRST Specifies the tag name to be used by the guest to mount this export point. - ``multidevs=multidevs`` - Specifies how to deal with multiple devices being shared with a - 9p export. Supported behaviours are either "remap", "forbid" or - "warn". The latter is the default behaviour on which virtfs 9p - expects only one device to be shared with the same export, and - if more than one device is shared and accessed via the same 9p - export then only a warning message is logged (once) by qemu on - host side. In order to avoid file ID collisions on guest you - should either create a separate virtfs export for each device to - be shared with guests (recommended way) or you might use "remap" - instead which allows you to share multiple devices with only one - export instead, which is achieved by remapping the original - inode numbers from host to guest in a way that would prevent - such collisions. Remapping inodes in such use cases is required + ``multidevs=remap|forbid|warn`` + Specifies how to deal with multiple devices being shared with + the same 9p export in order to avoid file ID collisions on guest. + Supported behaviours are either "remap" (default), "forbid" or + "warn". + + ``remap`` : assumes the possibility that more than one device is + shared with the same 9p export. Therefore inode numbers from host + are remapped for guest in a way that would prevent file ID + collisions on guest. Remapping inodes in such cases is required because the original device IDs from host are never passed and exposed on guest. Instead all files of an export shared with - virtfs always share the same device id on guest. So two files + virtfs always share the same device ID on guest. So two files with identical inode numbers but from actually different devices on host would otherwise cause a file ID collision and hence - potential misbehaviours on guest. "forbid" on the other hand - assumes like "warn" that only one device is shared by the same - export, however it will not only log a warning message but also - deny access to additional devices on guest. Note though that - "forbid" does currently not block all possible file access - operations (e.g. readdir() would still return entries from other - devices). + potential severe misbehaviours on guest. + + ``warn`` : virtfs 9p expects only one device to be shared with + the same export. If however more than one device is shared and + accessed via the same 9p export then only a warning message is + logged (once) by qemu on host side. No further action is performed + in this case that would prevent file ID collisions on guest. This + could thus lead to severe misbehaviours in this case like wrong + files being accessed and data corruption on the exported tree. + + ``forbid`` : assumes like "warn" that only one device is shared + by the same 9p export, however it will not only log a warning + message but also deny access to additional devices on guest. Note + though that "forbid" does currently not block all possible file + access operations (e.g. readdir() would still return entries from + other devices). ERST DEF("iscsi", HAS_ARG, QEMU_OPTION_iscsi, @@ -2281,6 +2293,8 @@ DEF("spice", HAS_ARG, QEMU_OPTION_spice, " [,streaming-video=[off|all|filter]][,disable-copy-paste=on|off]\n" " [,disable-agent-file-xfer=on|off][,agent-mouse=[on|off]]\n" " [,playback-compression=[on|off]][,seamless-migration=[on|off]]\n" + " [,video-codec=\n" + " [,max-refresh-rate=rate\n" " [,gl=[on|off]][,rendernode=]\n" " enable spice\n" " at least one of {port, tls-port} is mandatory\n", @@ -2369,6 +2383,17 @@ SRST ``seamless-migration=[on|off]`` Enable/disable spice seamless migration. Default is off. + ``video-codec=`` + Provide the preferred codec the Spice server should use with the + Gstreamer encoder. This option is only relevant when gl=on is + specified. If no codec is provided, then the codec gstreamer:h264 + would be used as default. And, for the case where gl=off, the + default codec to be used is determined by the Spice server. + + ``max-refresh-rate=rate`` + Provide the maximum refresh rate (or FPS) at which the encoding + requests should be sent to the Spice server. Default would be 30. + ``gl=[on|off]`` Enable/disable OpenGL context. Default is off. @@ -2377,22 +2402,6 @@ SRST pick the first available. (Since 2.9) ERST -DEF("portrait", 0, QEMU_OPTION_portrait, - "-portrait rotate graphical output 90 deg left (only PXA LCD)\n", - QEMU_ARCH_ALL) -SRST -``-portrait`` - Rotate graphical output 90 deg left (only PXA LCD). -ERST - -DEF("rotate", HAS_ARG, QEMU_OPTION_rotate, - "-rotate rotate graphical output some deg left (only PXA LCD)\n", - QEMU_ARCH_ALL) -SRST -``-rotate deg`` - Rotate graphical output some deg left (only PXA LCD). -ERST - DEF("vga", HAS_ARG, QEMU_OPTION_vga, "-vga [std|cirrus|vmware|qxl|xenfb|tcx|cg3|virtio|none]\n" " select video card type\n", QEMU_ARCH_ALL) @@ -2704,7 +2713,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 3 fields\n" "-smbios type=4[,sock_pfx=str][,manufacturer=str][,version=str][,serial=str]\n" " [,asset=str][,part=str][,max-speed=%d][,current-speed=%d]\n" - " [,processor-family=%d,processor-id=%d]\n" + " [,processor-family=%d][,processor-id=%d]\n" " specify SMBIOS type 4 fields\n" "-smbios type=8[,external_reference=str][,internal_reference=str][,connector_type=%d][,port_type=%d]\n" " specify SMBIOS type 8 fields\n" @@ -2812,6 +2821,26 @@ DEFHEADING() DEFHEADING(Network options:) DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, +#ifdef CONFIG_PASST + "-netdev passt,id=str[,path=file][,quiet=on|off][,vhost-user=on|off]\n" + "[,mtu=mtu][,address=addr][,netmask=mask][,mac=addr][,gateway=addr]\n" + " [,interface=name][,outbound=address][,outbound-if4=name]\n" + " [,outbound-if6=name][,dns=addr][,search=list][,fqdn=name]\n" + " [,dhcp-dns=on|off][,dhcp-search=on|off][,map-host-loopback=addr]\n" + " [,map-guest-addr=addr][,dns-forward=addr][,dns-host=addr]\n" + " [,tcp=on|off][,udp=on|off][,icmp=on|off][,dhcp=on|off]\n" + " [,ndp=on|off][,dhcpv6=on|off][,ra=on|off][,freebind=on|off]\n" + " [,ipv4=on|off][,ipv6=on|off][,tcp-ports=spec][,udp-ports=spec]\n" + " [,param=list]\n" + " configure a passt network backend with ID 'str'\n" + " if 'path' is not provided 'passt' will be started according to PATH\n" + " by default, informational message of passt are not displayed (quiet=on)\n" + " to display this message, use 'quiet=off'\n" + " by default, passt will be started in socket-based mode, to enable vhost-mode,\n" + " use 'vhost-user=on'\n" + " for details on other options, refer to passt(1)\n" + " 'param' allows to pass any option defined by passt(1)\n" +#endif #ifdef CONFIG_SLIRP "-netdev user,id=str[,ipv4=on|off][,net=addr[/mask]][,host=addr]\n" " [,ipv6=on|off][,ipv6-net=addr[/int]][,ipv6-host=addr]\n" @@ -2895,9 +2924,9 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, "-netdev socket,id=str[,fd=h][,udp=host:port][,localaddr=host:port]\n" " configure a network backend to connect to another network\n" " using an UDP tunnel\n" - "-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect=seconds]\n" - "-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect=seconds]\n" - "-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect=seconds]\n" + "-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect-ms=milliseconds]\n" + "-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect-ms=milliseconds]\n" + "-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect-ms=milliseconds]\n" " configure a network backend to connect to another network\n" " using a socket connection in stream mode.\n" "-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=inet,local.host=addr]\n" @@ -2925,6 +2954,7 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, #ifdef CONFIG_AF_XDP "-netdev af-xdp,id=str,ifname=name[,mode=native|skb][,force-copy=on|off]\n" " [,queues=n][,start-queue=m][,inhibit=on|off][,sock-fds=x:y:...:z]\n" + " [,map-path=/path/to/socket/map][,map-start-index=i]\n" " attach to the existing network interface 'name' with AF_XDP socket\n" " use 'mode=MODE' to specify an XDP program attach mode\n" " use 'force-copy=on|off' to force XDP copy mode even if device supports zero-copy (default: off)\n" @@ -2932,6 +2962,8 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, " with inhibit=on,\n" " use 'sock-fds' to provide file descriptors for already open AF_XDP sockets\n" " added to a socket map in XDP program. One socket per queue.\n" + " use 'map-path' to provide the socket map location to populate AF_XDP sockets with,\n" + " and use 'map-start-index' to specify the starting index for the map (default: 0) (Since 10.1)\n" " use 'queues=n' to specify how many queues of a multiqueue interface should be used\n" " use 'start-queue=m' to specify the first queue that should be used\n" #endif @@ -2968,6 +3000,9 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, " configure a hub port on the hub with ID 'n'\n", QEMU_ARCH_ALL) DEF("nic", HAS_ARG, QEMU_OPTION_nic, "-nic [tap|bridge|" +#ifdef CONFIG_PASST + "passt|" +#endif #ifdef CONFIG_SLIRP "user|" #endif @@ -3000,6 +3035,9 @@ DEF("net", HAS_ARG, QEMU_OPTION_net, " configure or create an on-board (or machine default) NIC and\n" " connect it to hub 0 (please use -nic unless you need a hub)\n" "-net [" +#ifdef CONFIG_PASST + "passt|" +#endif #ifdef CONFIG_SLIRP "user|" #endif @@ -3021,7 +3059,7 @@ DEF("net", HAS_ARG, QEMU_OPTION_net, " old way to initialize a host network interface\n" " (use the -netdev option if possible instead)\n", QEMU_ARCH_ALL) SRST -``-nic [tap|bridge|user|l2tpv3|vde|netmap|af-xdp|vhost-user|socket][,...][,mac=macaddr][,model=mn]`` +``-nic [tap|passt|bridge|user|l2tpv3|vde|netmap|af-xdp|vhost-user|socket][,...][,mac=macaddr][,model=mn]`` This option is a shortcut for configuring both the on-board (default) guest NIC hardware and the host network backend in one go. The host backend options are the same as with the corresponding @@ -3043,6 +3081,129 @@ SRST network backend) which is activated if no other networking options are provided. +``-netdev passt,id=str[,option][,...]`` + Configure a passt network backend which requires no administrator + privilege to run. Valid options are: + + ``id=id`` + Assign symbolic name for use in monitor commands. + + ``path=file`` + Filename of the passt program to run. If it is not provided, + passt command will be started with the help of the PATH environment + variable. + + ``quiet=on|off`` + By default, ``quiet=on`` to disable informational message from + passt. ``quiet=on`` is passed as ``--quiet`` to passt. + + ``vhost-user=on|off`` + By default, ``vhost-user=off`` and QEMU uses the stream network + backend to communicate with passt. If ``vhost-user=on``, passt is + started with ``--vhost-user`` and QEMU uses the vhost-user network + backend to communicate with passt. + + ``@mtu`` + Assign MTU via DHCP/NDP + + ``address`` + IPv4 or IPv6 address + + ``netmask`` + IPv4 mask + + ``mac`` + source MAC address + + ``gateway`` + IPv4 or IPv6 address as gateway + + ``interface`` + Interface for addresses and routes + + ``outbound`` + Bind to address as outbound source + + ``outbound-if4`` + Bind to outbound interface for IPv4 + + ``outbound-if6`` + Bind to outbound interface for IPv6 + + ``dns`` + IPv4 or IPv6 address as DNS + + ``search`` + Search domains + + ``fqdn`` + FQDN to configure client with + + ``dhcp-dns`` + Enable/disable DNS list in DHCP/DHCPv6/NDP + + ``dhcp-search`` + Enable/disable list in DHCP/DHCPv6/NDP + + ``map-host-loopback`` + Addresse to refer to host + + ``map-guest-addr`` + Addr to translate to guest's address + + ``dns-forward`` + Forward DNS queries sent to + + ``dns-host`` + Host nameserver to direct queries to + + ``tcp`` + Enable/disable TCP + + ``udp`` + Enable/disable UDP + + ``icmp`` + Enable/disable ICMP + + ``dhcp`` + Enable/disable DHCP + + ``ndp`` + Enable/disable NDP + + ``dhcpv6`` + Enable/disable DHCPv6 + + ``ra`` + Enable/disable route advertisements + + ``freebind`` + Bind to any address for forwarding + + ``ipv4`` + Enable/disable IPv4 + + ``ipv6`` + Enable/disable IPv6 + + ``tcp-ports`` + TCP ports to forward + + ``udp-ports`` + UDP ports to forward + + ``param=string`` + ``string`` will be passed to passt has a command line parameter, + we can have multiple occurences of the ``param`` parameter to + pass multiple parameters to passt. + + For instance, to pass ``--trace --log=trace.log``: + + .. parsed-literal:: + + |qemu_system| -nic passt,param=--trace,param=--log=trace.log + ``-netdev user,id=id[,option][,option][,...]`` Configure user mode host network backend which requires no administrator privilege to run. Valid options are: @@ -3353,7 +3514,7 @@ SRST -device e1000,netdev=n1,mac=52:54:00:12:34:56 \\ -netdev socket,id=n1,mcast=239.192.168.1:1102,localaddr=1.2.3.4 -``-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect=seconds]`` +``-netdev stream,id=str[,server=on|off],addr.type=inet,addr.host=host,addr.port=port[,to=maxport][,numeric=on|off][,keep-alive=on|off][,mptcp=on|off][,addr.ipv4=on|off][,addr.ipv6=on|off][,reconnect-ms=milliseconds]`` Configure a network backend to connect to another QEMU virtual machine or a proxy using a TCP/IP socket. ``server=on|off`` @@ -3380,8 +3541,8 @@ SRST ``ipv6=on|off`` whether to accept IPv6 addresses, default to try both IPv4 and IPv6 - ``reconnect=seconds`` - for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of seconds. + ``reconnect-ms=milliseconds`` + for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds. Setting this to zero disables this function. (default: 0) Example (two guests connected using a TCP/IP socket): @@ -3395,9 +3556,9 @@ SRST # second VM |qemu_system| linux.img \\ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\ - -netdev stream,id=net0,server=off,addr.type=inet,addr.host=localhost,addr.port=1234,reconnect=5 + -netdev stream,id=net0,server=off,addr.type=inet,addr.host=localhost,addr.port=1234,reconnect-ms=5000 -``-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect=seconds]`` +``-netdev stream,id=str[,server=on|off],addr.type=unix,addr.path=path[,abstract=on|off][,tight=on|off][,reconnect-ms=milliseconds]`` Configure a network backend to connect to another QEMU virtual machine or a proxy using a stream oriented unix domain socket. ``server=on|off`` @@ -3412,8 +3573,8 @@ SRST ``tight=on|off`` if false, pad an abstract socket address with enough null bytes to make it fill struct sockaddr_un member sun_path. - ``reconnect=seconds`` - for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of seconds. + ``reconnect-ms=milliseconds`` + for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds. Setting this to zero disables this function. (default: 0) Example (using passt as a replacement of -netdev user): @@ -3439,9 +3600,9 @@ SRST # second VM |qemu_system| linux.img \\ -device virtio-net,netdev=net0,mac=52:54:00:12:34:57 \\ - -netdev stream,id=net0,server=off,addr.type=unix,addr.path=/tmp/qemu0,reconnect=5 + -netdev stream,id=net0,server=off,addr.type=unix,addr.path=/tmp/qemu0,reconnect-ms=5000 -``-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect=seconds]`` +``-netdev stream,id=str[,server=on|off],addr.type=fd,addr.str=file-descriptor[,reconnect-ms=milliseconds]`` Configure a network backend to connect to another QEMU virtual machine or a proxy using a stream oriented socket file descriptor. ``server=on|off`` @@ -3450,8 +3611,8 @@ SRST ``addr.str=file-descriptor`` file descriptor number to use as a socket - ``reconnect=seconds`` - for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of seconds. + ``reconnect-ms=milliseconds`` + for a client socket, if a socket is disconnected, then attempt a reconnect after the given number of milliseconds. Setting this to zero disables this function. (default: 0) ``-netdev dgram,id=str,remote.type=inet,remote.host=maddr,remote.port=port[,local.type=inet,local.host=addr]`` @@ -3626,7 +3787,7 @@ SRST # launch QEMU instance |qemu_system| linux.img -nic vde,sock=/tmp/myswitch -``-netdev af-xdp,id=str,ifname=name[,mode=native|skb][,force-copy=on|off][,queues=n][,start-queue=m][,inhibit=on|off][,sock-fds=x:y:...:z]`` +``-netdev af-xdp,id=str,ifname=name[,mode=native|skb][,force-copy=on|off][,queues=n][,start-queue=m][,inhibit=on|off][,sock-fds=x:y:...:z][,map-path=/path/to/socket/map][,map-start-index=i]`` Configure AF_XDP backend to connect to a network interface 'name' using AF_XDP socket. A specific program attach mode for a default XDP program can be forced with 'mode', defaults to best-effort, @@ -3666,7 +3827,8 @@ SRST -netdev af-xdp,id=n1,ifname=eth0,queues=1,start-queue=1 XDP program can also be loaded externally. In this case 'inhibit' option - should be set to 'on' and 'sock-fds' provided with file descriptors for + should be set to 'on'. Either 'sock-fds' or 'map-path' can be used with + 'inhibit' enabled. 'sock-fds' can be provided with file descriptors for already open but not bound XDP sockets already added to a socket map for corresponding queues. One socket per queue. @@ -3675,6 +3837,21 @@ SRST |qemu_system| linux.img -device virtio-net-pci,netdev=n1 \\ -netdev af-xdp,id=n1,ifname=eth0,queues=3,inhibit=on,sock-fds=15:16:17 + For the 'inhibit' option set to 'on' used together with 'map-path' it is + expected that the XDP program with the socket map is already loaded on + the networking device and the map pinned into BPF file system. The path + to the pinned map is then passed to QEMU which then creates the file + descriptors and inserts them into the existing socket map. + + .. parsed-literal:: + + |qemu_system| linux.img -device virtio-net-pci,netdev=n1 \\ + -netdev af-xdp,id=n1,ifname=eth0,queues=2,inhibit=on,map-path=/sys/fs/bpf/xsks_map + + Additionally, 'map-start-index' can be used to specify the start offset + for insertion into the socket map. The combination of 'map-path' and + 'sock-fds' together is not supported. + ``-netdev vhost-user,chardev=id[,vhostforce=on|off][,queues=n]`` Establish a vhost-user netdev, backed by a chardev id. The chardev should be a unix domain socket backed one. The vhost-user uses a @@ -3727,7 +3904,7 @@ SRST Use ``-net nic,model=help`` for a list of available devices for your target. -``-net user|tap|bridge|socket|l2tpv3|vde[,...][,name=name]`` +``-net user|passt|tap|bridge|socket|l2tpv3|vde[,...][,name=name]`` Configure a host network backend (with the options corresponding to the same ``-netdev`` option) and connect it to the emulated hub 0 (the default hub). Use name to specify the name of the hub port. @@ -3741,9 +3918,9 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev, "-chardev help\n" "-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off]\n" - " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,mux=on|off]\n" + " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds][,mux=on|off]\n" " [,logfile=PATH][,logappend=on|off][,tls-creds=ID][,tls-authz=ID] (tcp)\n" - "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds]\n" + "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds]\n" " [,mux=on|off][,logfile=PATH][,logappend=on|off][,abstract=on|off][,tight=on|off] (unix)\n" "-chardev udp,id=id[,host=host],port=port[,localaddr=localaddr]\n" " [,localport=localport][,ipv4=on|off][,ipv6=on|off][,mux=on|off]\n" @@ -3758,7 +3935,7 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev, "-chardev console,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" "-chardev serial,id=id,path=path[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" #else - "-chardev pty,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" + "-chardev pty,id=id[,path=path][,mux=on|off][,logfile=PATH][,logappend=on|off]\n" "-chardev stdio,id=id[,mux=on|off][,signal=on|off][,logfile=PATH][,logappend=on|off]\n" #endif #ifdef CONFIG_BRLAPI @@ -3782,7 +3959,7 @@ SRST The general form of a character device option is: ``-chardev backend,id=id[,mux=on|off][,options]`` - Backend is one of: ``null``, ``socket``, ``udp``, ``msmouse``, + Backend is one of: ``null``, ``socket``, ``udp``, ``msmouse``, ``hub``, ``vc``, ``ringbuf``, ``file``, ``pipe``, ``console``, ``serial``, ``pty``, ``stdio``, ``braille``, ``parallel``, ``spicevmc``, ``spiceport``. The specific backend will determine the @@ -3839,9 +4016,10 @@ The general form of a character device option is: the QEMU monitor, and ``-nographic`` also multiplexes the console and the monitor to stdio. - There is currently no support for multiplexing in the other - direction (where a single QEMU front end takes input and output from - multiple chardevs). + If you need to aggregate data in the opposite direction (where one + QEMU frontend interface receives input and output from multiple + backend chardev devices), please refer to the paragraph below + regarding chardev ``hub`` aggregator device configuration. Every backend supports the ``logfile`` option, which supplies the path to a file to record all data transmitted via the backend. The @@ -3854,7 +4032,7 @@ The available backends are: A void device. This device will not emit any data, and will drop any data it receives. The null backend does not take any options. -``-chardev socket,id=id[,TCP options or unix options][,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,tls-creds=id][,tls-authz=id]`` +``-chardev socket,id=id[,TCP options or unix options][,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect-ms=milliseconds][,tls-creds=id][,tls-authz=id]`` Create a two-way stream socket, which can be either a TCP or a unix socket. A unix socket will be created if ``path`` is specified. Behaviour is undefined if TCP options are specified for a unix @@ -3871,9 +4049,9 @@ The available backends are: ``websocket=on|off`` specifies that the socket uses WebSocket protocol for communication. - ``reconnect`` sets the timeout for reconnecting on non-server + ``reconnect-ms`` sets the timeout for reconnecting on non-server sockets when the remote end goes away. qemu will delay this many - seconds and then attempt to reconnect. Zero disables reconnecting, + milliseconds and then attempt to reconnect. Zero disables reconnecting, and is the default. ``tls-creds`` requests enablement of the TLS protocol for @@ -3941,6 +4119,46 @@ The available backends are: Forward QEMU's emulated msmouse events to the guest. ``msmouse`` does not take any options. +``-chardev hub,id=id,chardevs.0=id[,chardevs.N=id]`` + Explicitly create chardev backend hub device with the possibility + to aggregate input from multiple backend devices and forward it to + a single frontend device. Additionally, ``hub`` device takes the + output from the frontend device and sends it back to all the + connected backend devices. This allows for seamless interaction + between different backend devices and a single frontend + interface. Aggregation supported for up to 4 chardev + devices. (Since 10.0) + + For example, the following is a use case of 2 backend devices: + virtual console ``vc0`` and a pseudo TTY ``pty0`` connected to + a single virtio hvc console frontend device with a hub ``hub0`` + help. Virtual console renders text to an image, which can be + shared over the VNC protocol. In turn, pty backend provides + bidirectional communication to the virtio hvc console over the + pseudo TTY file. The example configuration can be as follows: + + :: + + -chardev pty,path=/tmp/pty,id=pty0 \ + -chardev vc,id=vc0 \ + -chardev hub,id=hub0,chardevs.0=pty0,chardevs.1=vc0 \ + -device virtconsole,chardev=hub0 \ + -vnc 0.0.0.0:0 + + Once QEMU starts VNC client and any TTY emulator can be used to + control a single hvc console: + + :: + + # Start TTY emulator + tio /tmp/pty + + # Start VNC client and switch to virtual console Ctrl-Alt-2 + vncviewer :0 + + Several frontend devices is not supported. Stacking of multiplexers + and hub devices is not supported as well. + ``-chardev vc,id=id[[,width=width][,height=height]][[,cols=cols][,rows=rows]]`` Connect to a QEMU text console. ``vc`` may optionally be given a specific size. @@ -3997,12 +4215,22 @@ The available backends are: ``path`` specifies the name of the serial device to open. -``-chardev pty,id=id`` - Create a new pseudo-terminal on the host and connect to it. ``pty`` - does not take any options. +``-chardev pty,id=id[,path=path]`` + Create a new pseudo-terminal on the host and connect to it. ``pty`` is not available on Windows hosts. + If ``path`` is specified, QEMU will create a symbolic link at + that location which points to the new PTY device. + + This avoids having to make QMP or HMP monitor queries to find out + what the new PTY device path is. + + Note that while QEMU will remove the symlink when it exits + gracefully, it will not do so in case of crashes or on certain + startup errors. It is recommended that the user checks and removes + the symlink after QEMU terminates to account for this. + ``-chardev stdio,id=id[,signal=on|off]`` Connect to standard input and standard output of the QEMU process. @@ -4197,6 +4425,13 @@ SRST or in multiboot format. ERST +DEF("shim", HAS_ARG, QEMU_OPTION_shim, \ + "-shim shim.efi use 'shim.efi' to boot the kernel\n", QEMU_ARCH_ALL) +SRST +``-shim shim.efi`` + Use 'shim.efi' to boot the kernel +ERST + DEF("append", HAS_ARG, QEMU_OPTION_append, \ "-append cmdline use 'cmdline' as kernel command line\n", QEMU_ARCH_ALL) SRST @@ -4360,8 +4595,19 @@ SRST vc:80Cx24C - ``pty`` - [Linux only] Pseudo TTY (a new PTY is automatically allocated) + ``pty[:path]`` + [Linux only] Pseudo TTY (a new PTY is automatically allocated). + + If ``path`` is specified, QEMU will create a symbolic link at + that location which points to the new PTY device. + + This avoids having to make QMP or HMP monitor queries to find + out what the new PTY device path is. + + Note that while QEMU will remove the symlink when it exits + gracefully, it will not do so in case of crashes or on certain + startup errors. It is recommended that the user checks and + removes the symlink after QEMU terminates to account for this. ``none`` No device is allocated. Note that for machine types which @@ -4431,14 +4677,14 @@ SRST ``telnet options:`` localhost 5555 - ``tcp:[host]:port[,server=on|off][,wait=on|off][,nodelay=on|off][,reconnect=seconds]`` + ``tcp:[host]:port[,server=on|off][,wait=on|off][,nodelay=on|off][,reconnect-ms=milliseconds]`` The TCP Net Console has two modes of operation. It can send the serial I/O to a location or wait for a connection from a location. By default the TCP Net Console is sent to host at the port. If you use the ``server=on`` option QEMU will wait for a client socket application to connect to the port before continuing, unless the ``wait=on|off`` option was specified. The ``nodelay=on|off`` - option disables the Nagle buffering algorithm. The ``reconnect=on`` + option disables the Nagle buffering algorithm. The ``reconnect-ms`` option only applies if ``server=no`` is set, if the connection goes down it will attempt to reconnect at the given interval. If host is omitted, 0.0.0.0 is assumed. Only one TCP connection at a @@ -4468,7 +4714,7 @@ SRST The WebSocket protocol is used instead of raw tcp socket. The port acts as a WebSocket server. Client mode is not supported. - ``unix:path[,server=on|off][,wait=on|off][,reconnect=seconds]`` + ``unix:path[,server=on|off][,wait=on|off][,reconnect-ms=milliseconds]`` A unix domain socket is used instead of a tcp socket. The option works the same as if you had specified ``-serial tcp`` except the unix domain socket path is used for connections. @@ -4607,21 +4853,25 @@ SRST ERST DEF("overcommit", HAS_ARG, QEMU_OPTION_overcommit, - "-overcommit [mem-lock=on|off][cpu-pm=on|off]\n" + "-overcommit [mem-lock=on|off|on-fault][cpu-pm=on|off]\n" " run qemu with overcommit hints\n" - " mem-lock=on|off controls memory lock support (default: off)\n" + " mem-lock=on|off|on-fault controls memory lock support (default: off)\n" " cpu-pm=on|off controls cpu power management (default: off)\n", QEMU_ARCH_ALL) SRST -``-overcommit mem-lock=on|off`` +``-overcommit mem-lock=on|off|on-fault`` \ ``-overcommit cpu-pm=on|off`` Run qemu with hints about host resource overcommit. The default is to assume that host overcommits all resources. Locking qemu and guest memory can be enabled via ``mem-lock=on`` - (disabled by default). This works when host memory is not - overcommitted and reduces the worst-case latency for guest. + or ``mem-lock=on-fault`` (disabled by default). This works when + host memory is not overcommitted and reduces the worst-case latency for + guest. The on-fault option is better for reducing the memory footprint + since it makes allocations lazy, but the pages still get locked in place + once faulted by the guest or QEMU. Note that the two options are mutually + exclusive. Guest ability to manage power state of host cpus (increasing latency for other processes on the same host cpu, but decreasing latency for @@ -4805,7 +5055,7 @@ SRST Start right away with a saved state (``loadvm`` in monitor) ERST -#ifndef _WIN32 +#if !defined(_WIN32) && !defined(EMSCRIPTEN) DEF("daemonize", 0, QEMU_OPTION_daemonize, \ "-daemonize daemonize QEMU after initializing\n", QEMU_ARCH_ALL) #endif @@ -4879,13 +5129,13 @@ SRST with actual performance. When the virtual cpu is sleeping, the virtual time will advance at - default speed unless ``sleep=on`` is specified. With - ``sleep=on``, the virtual time will jump to the next timer + default speed unless ``sleep=off`` is specified. With + ``sleep=off``, the virtual time will jump to the next timer deadline instantly whenever the virtual cpu goes to sleep mode and will not advance if no timer is enabled. This behavior gives deterministic execution times from the guest point of view. - The default if icount is enabled is ``sleep=off``. - ``sleep=on`` cannot be used together with either ``shift=auto`` + The default if icount is enabled is ``sleep=on``. + ``sleep=off`` cannot be used together with either ``shift=auto`` or ``align=on``. ``align=on`` will activate the delay algorithm which will try to @@ -4963,10 +5213,18 @@ DEF("incoming", HAS_ARG, QEMU_OPTION_incoming, \ "-incoming exec:cmdline\n" \ " accept incoming migration on given file descriptor\n" \ " or from given external command\n" \ + "-incoming \n" \ + " accept incoming migration on the migration channel\n" \ "-incoming defer\n" \ " wait for the URI to be specified via migrate_incoming\n", QEMU_ARCH_ALL) SRST +The -incoming option specifies the migration channel for an incoming +migration. It may be used multiple times to specify multiple +migration channel types. The channel type is specified in , +or is 'main' for all other forms of -incoming. If multiple -incoming +options are specified for a channel type, the last one takes precedence. + ``-incoming tcp:[host]:port[,to=maxport][,ipv4=on|off][,ipv6=on|off]`` \ ``-incoming rdma:host:port[,ipv4=on|off][,ipv6=on|off]`` @@ -4986,6 +5244,19 @@ SRST Accept incoming migration as an output from specified external command. +``-incoming `` + Accept incoming migration on the migration channel. For the syntax + of , see the QAPI documentation of ``MigrationChannel``. + Examples: + :: + + -incoming '{"channel-type": "main", + "addr": { "transport": "socket", + "type": "unix", + "path": "my.sock" }}' + + -incoming main,addr.transport=socket,addr.type=unix,addr.path=my.sock + ``-incoming defer`` Wait for the URI to be specified via migrate\_incoming. The monitor can be used to change settings (such as migration parameters) prior @@ -5010,19 +5281,6 @@ SRST ``-nodefaults`` option will disable all those default devices. ERST -#ifndef _WIN32 -DEF("runas", HAS_ARG, QEMU_OPTION_runas, \ - "-runas user change to user id user just before starting the VM\n" \ - " user can be numeric uid:gid instead\n", - QEMU_ARCH_ALL) -#endif -SRST -``-runas user`` - Immediately before starting guest execution, drop root privileges, - switching to the specified user. This option is deprecated, use - ``-run-with user=...`` instead. -ERST - DEF("prom-env", HAS_ARG, QEMU_OPTION_prom_env, "-prom-env variable=value\n" " set OpenBIOS nvram variables\n", @@ -5184,7 +5442,7 @@ HXCOMM Internal use DEF("qtest", HAS_ARG, QEMU_OPTION_qtest, "", QEMU_ARCH_ALL) DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log, "", QEMU_ARCH_ALL) -#ifdef CONFIG_POSIX +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) DEF("run-with", HAS_ARG, QEMU_OPTION_run_with, "-run-with [async-teardown=on|off][,chroot=dir][user=username|uid:gid]\n" " Set miscellaneous QEMU process lifecycle options:\n" @@ -5210,7 +5468,7 @@ SRST ``chroot=dir`` can be used for doing a chroot to the specified directory immediately before starting the guest execution. This is especially useful - in combination with -runas. + in combination with ``user=...``. ``user=username`` or ``user=uid:gid`` can be used to drop root privileges before starting guest execution. QEMU will use the ``setuid`` and ``setgid`` @@ -5927,6 +6185,34 @@ SRST -machine ...,memory-encryption=sev0 \\ ..... + ``-object igvm-cfg,file=file`` + Create an IGVM configuration object that defines the initial state + of the guest using a file in that conforms to the Independent Guest + Virtual Machine (IGVM) file format. + + This is currently only supported by ``-machine q35`` and + ``-machine pc``. + + The ``file`` parameter is used to specify the IGVM file to load. + When provided, the IGVM file is used to populate the initial + memory of the virtual machine and, depending on the platform, can + define the initial processor state, memory map and parameters. + + The IGVM file is expected to contain the firmware for the virtual + machine, therefore an ``igvm-cfg`` object cannot be provided along + with other ways of specifying firmware, such as the ``-bios`` + parameter on x86 machines. + + e.g to launch a machine providing the firmware in an IGVM file + + .. parsed-literal:: + + # |qemu_system_x86| \\ + ...... \\ + -object igvm-cfg,id=igvm0,file=bios.igvm \\ + -machine ...,igvm-cfg=igvm0 \\ + ..... + ``-object authz-simple,id=id,identity=string`` Create an authorization object that will control access to network services. diff --git a/qemu.nsi b/qemu.nsi index 564d617d11f..d419986ca06 100644 --- a/qemu.nsi +++ b/qemu.nsi @@ -7,7 +7,7 @@ ; This program is free software: you can redistribute it and/or modify ; it under the terms of the GNU General Public License as published by ; the Free Software Foundation, either version 2 of the License, or -; (at your option) version 3 or any later version. +; (at your option) any later version. ; ; This program is distributed in the hope that it will be useful, ; but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -16,6 +16,8 @@ ; ; You should have received a copy of the GNU General Public License ; along with this program. If not, see . +; +; SPDX-License-Identifier: GPL-2.0-or-later ; NSIS_WIN32_MAKENSIS @@ -202,7 +204,6 @@ Section "Uninstall" Delete "$INSTDIR\*.bmp" Delete "$INSTDIR\*.bin" Delete "$INSTDIR\*.dll" - Delete "$INSTDIR\*.dtb" Delete "$INSTDIR\*.fd" Delete "$INSTDIR\*.img" Delete "$INSTDIR\*.lid" @@ -213,6 +214,7 @@ Section "Uninstall" Delete "$INSTDIR\qemu-io.exe" Delete "$INSTDIR\qemu.exe" Delete "$INSTDIR\qemu-system-*.exe" + RMDir /r "$INSTDIR\dtb" RMDir /r "$INSTDIR\doc" RMDir /r "$INSTDIR\share" ; Remove generated files diff --git a/qga/commands-bsd.c b/qga/commands-bsd.c index 9ce48af3118..94ff6fee6ad 100644 --- a/qga/commands-bsd.c +++ b/qga/commands-bsd.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" #include "qga-qapi-commands.h" -#include "qapi/qmp/qerror.h" #include "qapi/error.h" #include "qemu/queue.h" #include "commands-common.h" diff --git a/qga/commands-linux.c b/qga/commands-linux.c index 51d5e3d9271..9dc0c825035 100644 --- a/qga/commands-linux.c +++ b/qga/commands-linux.c @@ -15,7 +15,6 @@ #include "qapi/error.h" #include "qga-qapi-commands.h" #include "qapi/error.h" -#include "qapi/qmp/qerror.h" #include "commands-common.h" #include "cutils.h" #include @@ -59,6 +58,22 @@ static int dev_major_minor(const char *devpath, return -1; } +/* + * Check if we already have the devmajor:devminor in the mounts + * If thats the case return true. + */ +static bool dev_exists(FsMountList *mounts, unsigned int devmajor, unsigned int devminor) +{ + FsMount *mount; + + QTAILQ_FOREACH(mount, mounts, next) { + if (mount->devmajor == devmajor && mount->devminor == devminor) { + return true; + } + } + return false; +} + static bool build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp) { struct mntent *ment; @@ -89,6 +104,10 @@ static bool build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp) /* Skip bind mounts */ continue; } + if (dev_exists(mounts, devmajor, devminor)) { + /* Skip already existing devices (bind mounts) */ + continue; + } mount = g_new0(FsMount, 1); mount->dirname = g_strdup(ment->mnt_dir); @@ -172,6 +191,11 @@ bool build_fs_mount_list(FsMountList *mounts, Error **errp) } } + if (dev_exists(mounts, devmajor, devminor)) { + /* Skip already existing devices (bind mounts) */ + continue; + } + mount = g_new0(FsMount, 1); mount->dirname = g_strdup(line + dir_s); mount->devtype = g_strdup(dash + type_s); @@ -1376,20 +1400,22 @@ static bool linux_sys_state_supports_mode(SuspendMode mode, Error **errp) static void linux_sys_state_suspend(SuspendMode mode, Error **errp) { - g_autoptr(GError) local_gerr = NULL; const char *sysfile_strs[3] = {"disk", "mem", NULL}; const char *sysfile_str = sysfile_strs[mode]; + int fd; if (!sysfile_str) { error_setg(errp, "unknown guest suspend mode"); return; } - if (!g_file_set_contents(LINUX_SYS_STATE_FILE, sysfile_str, - -1, &local_gerr)) { - error_setg(errp, "suspend: cannot write to '%s': %s", - LINUX_SYS_STATE_FILE, local_gerr->message); - return; + fd = open(LINUX_SYS_STATE_FILE, O_WRONLY); + if (fd < 0 || write(fd, sysfile_str, strlen(sysfile_str)) < 0) { + error_setg(errp, "suspend: cannot write to '%s': %m", + LINUX_SYS_STATE_FILE); + } + if (fd >= 0) { + close(fd); } } @@ -2094,26 +2120,28 @@ GuestCpuStatsList *qmp_guest_get_cpustats(Error **errp) return head; } -static char *hexToIPAddress(const void *hexValue, int is_ipv6) +static char *hex_to_ip_address(const void *hex_value, int is_ipv6) { if (is_ipv6) { char addr[INET6_ADDRSTRLEN]; struct in6_addr in6; - const char *hexStr = (const char *)hexValue; + const char *hex_str = (const char *)hex_value; int i; for (i = 0; i < 16; i++) { - sscanf(&hexStr[i * 2], "%02hhx", &in6.s6_addr[i]); + if (sscanf(&hex_str[i * 2], "%02hhx", &in6.s6_addr[i]) != 1) { + return NULL; + } } inet_ntop(AF_INET6, &in6, addr, INET6_ADDRSTRLEN); return g_strdup(addr); } else { - unsigned int hexInt = *(unsigned int *)hexValue; - unsigned int byte1 = (hexInt >> 24) & 0xFF; - unsigned int byte2 = (hexInt >> 16) & 0xFF; - unsigned int byte3 = (hexInt >> 8) & 0xFF; - unsigned int byte4 = hexInt & 0xFF; + unsigned int hex_int = *(unsigned int *)hex_value; + unsigned int byte1 = (hex_int >> 24) & 0xFF; + unsigned int byte2 = (hex_int >> 16) & 0xFF; + unsigned int byte3 = (hex_int >> 8) & 0xFF; + unsigned int byte4 = hex_int & 0xFF; return g_strdup_printf("%u.%u.%u.%u", byte4, byte3, byte2, byte1); } @@ -2122,21 +2150,21 @@ static char *hexToIPAddress(const void *hexValue, int is_ipv6) GuestNetworkRouteList *qmp_guest_network_get_route(Error **errp) { GuestNetworkRouteList *head = NULL, **tail = &head; - const char *routeFiles[] = {"/proc/net/route", "/proc/net/ipv6_route"}; + const char *route_files[] = {"/proc/net/route", "/proc/net/ipv6_route"}; FILE *fp; - size_t n; + size_t n = 0; char *line = NULL; int firstLine; int is_ipv6; int i; + char iface[IFNAMSIZ]; for (i = 0; i < 2; i++) { firstLine = 1; is_ipv6 = (i == 1); - fp = fopen(routeFiles[i], "r"); + fp = fopen(route_files[i], "r"); if (fp == NULL) { - error_setg_errno(errp, errno, "open(\"%s\")", routeFiles[i]); - free(line); + error_setg_errno(errp, errno, "open(\"%s\")", route_files[i]); continue; } @@ -2145,80 +2173,74 @@ GuestNetworkRouteList *qmp_guest_network_get_route(Error **errp) firstLine = 0; continue; } - GuestNetworkRoute *route = NULL; - GuestNetworkRoute *networkroute; - char Iface[IFNAMSIZ]; - if (is_ipv6) { - char Destination[33], Source[33], NextHop[33]; - int DesPrefixlen, SrcPrefixlen, Metric, RefCnt, Use, Flags; + g_autoptr(GuestNetworkRoute) route = g_new0(GuestNetworkRoute, 1); - /* Parse the line and extract the values */ + if (is_ipv6) { + char destination[33], source[33], next_hop[33]; + int des_prefixlen, src_prefixlen, metric, refcnt, use, flags; if (sscanf(line, "%32s %x %32s %x %32s %x %x %x %x %s", - Destination, &DesPrefixlen, Source, - &SrcPrefixlen, NextHop, &Metric, &RefCnt, - &Use, &Flags, Iface) != 10) { + destination, &des_prefixlen, source, + &src_prefixlen, next_hop, &metric, &refcnt, + &use, &flags, iface) != 10) { continue; } - route = g_new0(GuestNetworkRoute, 1); - networkroute = route; - networkroute->iface = g_strdup(Iface); - networkroute->destination = hexToIPAddress(Destination, 1); - networkroute->metric = Metric; - networkroute->source = hexToIPAddress(Source, 1); - networkroute->desprefixlen = g_strdup_printf( - "%d", DesPrefixlen - ); - networkroute->srcprefixlen = g_strdup_printf( - "%d", SrcPrefixlen - ); - networkroute->nexthop = hexToIPAddress(NextHop, 1); - networkroute->has_flags = true; - networkroute->flags = Flags; - networkroute->has_refcnt = true; - networkroute->refcnt = RefCnt; - networkroute->has_use = true; - networkroute->use = Use; - networkroute->version = 6; + route->destination = hex_to_ip_address(destination, 1); + if (route->destination == NULL) { + continue; + } + route->iface = g_strdup(iface); + route->source = hex_to_ip_address(source, 1); + route->nexthop = hex_to_ip_address(next_hop, 1); + route->desprefixlen = g_strdup_printf("%d", des_prefixlen); + route->srcprefixlen = g_strdup_printf("%d", src_prefixlen); + route->metric = metric; + route->has_flags = true; + route->flags = flags; + route->has_refcnt = true; + route->refcnt = refcnt; + route->has_use = true; + route->use = use; + route->version = 6; } else { - unsigned int Destination, Gateway, Mask, Flags; - int RefCnt, Use, Metric, MTU, Window, IRTT; - - /* Parse the line and extract the values */ + unsigned int destination, gateway, mask, flags; + int refcnt, use, metric, mtu, window, irtt; if (sscanf(line, "%s %X %X %x %d %d %d %X %d %d %d", - Iface, &Destination, &Gateway, &Flags, &RefCnt, - &Use, &Metric, &Mask, &MTU, &Window, &IRTT) != 11) { + iface, &destination, &gateway, &flags, &refcnt, + &use, &metric, &mask, &mtu, &window, &irtt) != 11) { continue; } - route = g_new0(GuestNetworkRoute, 1); - networkroute = route; - networkroute->iface = g_strdup(Iface); - networkroute->destination = hexToIPAddress(&Destination, 0); - networkroute->gateway = hexToIPAddress(&Gateway, 0); - networkroute->mask = hexToIPAddress(&Mask, 0); - networkroute->metric = Metric; - networkroute->has_flags = true; - networkroute->flags = Flags; - networkroute->has_refcnt = true; - networkroute->refcnt = RefCnt; - networkroute->has_use = true; - networkroute->use = Use; - networkroute->has_mtu = true; - networkroute->mtu = MTU; - networkroute->has_window = true; - networkroute->window = Window; - networkroute->has_irtt = true; - networkroute->irtt = IRTT; - networkroute->version = 4; + route->destination = hex_to_ip_address(&destination, 0); + if (route->destination == NULL) { + continue; + } + route->iface = g_strdup(iface); + route->gateway = hex_to_ip_address(&gateway, 0); + route->mask = hex_to_ip_address(&mask, 0); + route->metric = metric; + route->has_flags = true; + route->flags = flags; + route->has_refcnt = true; + route->refcnt = refcnt; + route->has_use = true; + route->use = use; + route->has_mtu = true; + route->mtu = mtu; + route->has_window = true; + route->window = window; + route->has_irtt = true; + route->irtt = irtt; + route->version = 4; } QAPI_LIST_APPEND(tail, route); + route = NULL; } - free(line); fclose(fp); } + free(line); return head; } diff --git a/qga/commands-posix.c b/qga/commands-posix.c index c2bd0b43169..12bc086d79e 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -18,7 +18,6 @@ #include #include "qga-qapi-commands.h" #include "qapi/error.h" -#include "qapi/qmp/qerror.h" #include "qemu/host-utils.h" #include "qemu/sockets.h" #include "qemu/base64.h" @@ -85,7 +84,7 @@ static ssize_t ga_pipe_read_str(int fd[2], char **str) *str = g_realloc(*str, len + n + 1); memcpy(*str + len, buf, n); len += n; - *str[len] = '\0'; + (*str)[len] = '\0'; } close(fd[0]); fd[0] = -1; @@ -806,8 +805,10 @@ int64_t qmp_guest_fsfreeze_thaw(Error **errp) int ret; ret = qmp_guest_fsfreeze_do_thaw(errp); + if (ret >= 0) { ga_unset_frozen(ga_state); + slog("guest-fsthaw called"); execute_fsfreeze_hook(FSFREEZE_HOOK_THAW, errp); } else { ret = 0; @@ -1369,3 +1370,23 @@ char *qga_get_host_name(Error **errp) return g_steal_pointer(&hostname); } + +#ifdef CONFIG_GETLOADAVG +GuestLoadAverage *qmp_guest_get_load(Error **errp) +{ + double loadavg[3]; + GuestLoadAverage *ret = NULL; + + if (getloadavg(loadavg, G_N_ELEMENTS(loadavg)) < 0) { + error_setg_errno(errp, errno, + "cannot query load average"); + return NULL; + } + + ret = g_new0(GuestLoadAverage, 1); + ret->load1m = loadavg[0]; + ret->load5m = loadavg[1]; + ret->load15m = loadavg[2]; + return ret; +} +#endif diff --git a/qga/commands-win32.c b/qga/commands-win32.c index 61b36da4694..82274808101 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "guest-agent-core.h" #include "vss-win32.h" @@ -119,6 +120,28 @@ static OpenFlags guest_file_open_modes[] = { {"a+b", FILE_GENERIC_APPEND | GENERIC_READ, OPEN_ALWAYS } }; +/* + * We use an exponentially weighted moving average, just like Unix systems do + * https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation + * + * These constants serve as the damping factor and are calculated with + * 1 / exp(sampling interval in seconds / window size in seconds) + * + * This formula comes from linux's include/linux/sched/loadavg.h + * https://github.com/torvalds/linux/blob/345671ea0f9258f410eb057b9ced9cefbbe5dc78/include/linux/sched/loadavg.h#L20-L23 + */ +#define LOADAVG_FACTOR_1F 0.9200444146293232478931553241 +#define LOADAVG_FACTOR_5F 0.9834714538216174894737477501 +#define LOADAVG_FACTOR_15F 0.9944598480048967508795473394 +/* + * The time interval in seconds between taking load counts, same as Linux + */ +#define LOADAVG_SAMPLING_INTERVAL 5 + +double load_avg_1m; +double load_avg_5m; +double load_avg_15m; + #define debug_error(msg) do { \ char *suffix = g_win32_error_message(GetLastError()); \ g_debug("%s: %s", (msg), suffix); \ @@ -826,8 +849,6 @@ static void get_disk_properties(HANDLE vol_h, GuestDiskAddress *disk, } out_free: g_free(dev_desc); - - return; } static void get_single_disk_info(int disk_number, @@ -891,7 +912,6 @@ static void get_single_disk_info(int disk_number, err_close: CloseHandle(disk_h); - return; } /* VSS provider works with volumes, thus there is no difference if @@ -1273,6 +1293,9 @@ int64_t qmp_guest_fsfreeze_thaw(Error **errp) qga_vss_fsfreeze(&i, false, NULL, errp); ga_unset_frozen(ga_state); + + slog("guest-fsthaw called"); + return i; } @@ -1914,7 +1937,7 @@ void qmp_guest_set_user_password(const char *username, GError *gerr = NULL; if (crypted) { - error_setg(errp, QERR_UNSUPPORTED); + error_setg(errp, "'crypted' must be off on this host"); return; } @@ -2088,7 +2111,7 @@ static const ga_win_10_0_t WIN_10_0_SERVER_VERSION_MATRIX[] = { {14393, "Microsoft Windows Server 2016", "2016"}, {17763, "Microsoft Windows Server 2019", "2019"}, {20344, "Microsoft Windows Server 2022", "2022"}, - {26040, "MIcrosoft Windows Server 2025", "2025"}, + {26040, "Microsoft Windows Server 2025", "2025"}, { } }; @@ -2114,7 +2137,6 @@ static void ga_get_win_version(RTL_OSVERSIONINFOEXW *info, Error **errp) rtl_get_version_t rtl_get_version = (rtl_get_version_t)fun; rtl_get_version(info); - return; } static char *ga_get_win_name(const OSVERSIONINFOEXW *os_version, bool id) @@ -2445,3 +2467,128 @@ char *qga_get_host_name(Error **errp) return g_utf16_to_utf8(tmp, size, NULL, NULL, NULL); } + + +static VOID CALLBACK load_avg_callback(PVOID hCounter, BOOLEAN timedOut) +{ + PDH_FMT_COUNTERVALUE displayValue; + double currentLoad; + PDH_STATUS err; + + err = PdhGetFormattedCounterValue( + (PDH_HCOUNTER)hCounter, PDH_FMT_DOUBLE, 0, &displayValue); + /* Skip updating the load if we can't get the value successfully */ + if (err != ERROR_SUCCESS) { + slog("PdhGetFormattedCounterValue failed to get load value with 0x%lx", + err); + return; + } + currentLoad = displayValue.doubleValue; + + load_avg_1m = load_avg_1m * LOADAVG_FACTOR_1F + currentLoad * \ + (1.0 - LOADAVG_FACTOR_1F); + load_avg_5m = load_avg_5m * LOADAVG_FACTOR_5F + currentLoad * \ + (1.0 - LOADAVG_FACTOR_5F); + load_avg_15m = load_avg_15m * LOADAVG_FACTOR_15F + currentLoad * \ + (1.0 - LOADAVG_FACTOR_15F); +} + +static BOOL init_load_avg_counter(Error **errp) +{ + CONST WCHAR *szCounterPath = L"\\System\\Processor Queue Length"; + PDH_STATUS status; + BOOL ret; + HQUERY hQuery; + HCOUNTER hCounter; + HANDLE event; + HANDLE waitHandle; + + status = PdhOpenQueryW(NULL, 0, &hQuery); + if (status != ERROR_SUCCESS) { + /* + * If the function fails, the return value is a system error code or + * a PDH error code. error_setg_win32 cant translate PDH error code + * properly, so just report it as is. + */ + error_setg_win32(errp, (DWORD)status, + "PdhOpenQueryW failed with 0x%lx", status); + return FALSE; + } + + status = PdhAddEnglishCounterW(hQuery, szCounterPath, 0, &hCounter); + if (status != ERROR_SUCCESS) { + error_setg_win32(errp, (DWORD)status, + "PdhAddEnglishCounterW failed with 0x%lx. Performance counters may be disabled.", + status); + PdhCloseQuery(hQuery); + return FALSE; + } + + event = CreateEventW(NULL, FALSE, FALSE, L"LoadUpdateEvent"); + if (event == NULL) { + error_setg_win32(errp, GetLastError(), "Create LoadUpdateEvent failed"); + PdhCloseQuery(hQuery); + return FALSE; + } + + status = PdhCollectQueryDataEx(hQuery, LOADAVG_SAMPLING_INTERVAL, event); + if (status != ERROR_SUCCESS) { + error_setg_win32(errp, (DWORD)status, + "PdhCollectQueryDataEx failed with 0x%lx", status); + CloseHandle(event); + PdhCloseQuery(hQuery); + return FALSE; + } + + ret = RegisterWaitForSingleObject( + &waitHandle, + event, + (WAITORTIMERCALLBACK)load_avg_callback, + (PVOID)hCounter, + INFINITE, + WT_EXECUTEDEFAULT); + + if (ret == 0) { + error_setg_win32(errp, GetLastError(), + "RegisterWaitForSingleObject failed"); + CloseHandle(event); + PdhCloseQuery(hQuery); + return FALSE; + } + + ga_set_load_avg_wait_handle(ga_state, waitHandle); + ga_set_load_avg_event(ga_state, event); + ga_set_load_avg_pdh_query(ga_state, hQuery); + + return TRUE; +} + +GuestLoadAverage *qmp_guest_get_load(Error **errp) +{ + /* + * The load average logic calls PerformaceCounterAPI, which can result + * in a performance penalty. This avoids running the load average logic + * until a management application actually requests it. The load average + * will not initially be very accurate, but assuming that any interested + * management application will request it repeatedly throughout the lifetime + * of the VM, this seems like a good mitigation. + */ + if (ga_get_load_avg_pdh_query(ga_state) == NULL) { + /* set initial values */ + load_avg_1m = 0; + load_avg_5m = 0; + load_avg_15m = 0; + + if (init_load_avg_counter(errp) == false) { + return NULL; + } + } + + GuestLoadAverage *ret = NULL; + + ret = g_new0(GuestLoadAverage, 1); + ret->load1m = load_avg_1m; + ret->load5m = load_avg_5m; + ret->load15m = load_avg_15m; + return ret; +} diff --git a/qga/commands-windows-ssh.c b/qga/commands-windows-ssh.c index 6a642e3ba81..df45c17b757 100644 --- a/qga/commands-windows-ssh.c +++ b/qga/commands-windows-ssh.c @@ -377,7 +377,7 @@ static bool create_ssh_directory(WindowsUserInfo *userInfo, Error **errp) static bool set_file_permissions(PWindowsUserInfo userInfo, Error **errp) { PACL pACL = NULL; - PSID userPSID; + PSID userPSID = NULL; /* Creates the access control structure */ if (!create_acl(userInfo, &pACL, errp)) { diff --git a/qga/guest-agent-core.h b/qga/guest-agent-core.h index b4e7c52c611..d9f3922adfe 100644 --- a/qga/guest-agent-core.h +++ b/qga/guest-agent-core.h @@ -13,7 +13,11 @@ #ifndef GUEST_AGENT_CORE_H #define GUEST_AGENT_CORE_H -#include "qapi/qmp/dispatch.h" +#ifdef _WIN32 +#include +#endif + +#include "qapi/qmp-registry.h" #include "qga-qapi-types.h" #define QGA_READ_COUNT_DEFAULT 4096 @@ -41,6 +45,12 @@ void ga_set_response_delimited(GAState *s); bool ga_is_frozen(GAState *s); void ga_set_frozen(GAState *s); void ga_unset_frozen(GAState *s); +#ifdef _WIN32 +void ga_set_load_avg_event(GAState *s, HANDLE event); +void ga_set_load_avg_wait_handle(GAState *s, HANDLE wait_handle); +void ga_set_load_avg_pdh_query(GAState *s, HQUERY query); +HQUERY ga_get_load_avg_pdh_query(GAState *s); +#endif const char *ga_fsfreeze_hook(GAState *s); int64_t ga_get_fd_handle(GAState *s, Error **errp); int ga_parse_whence(GuestFileWhence *whence, Error **errp); diff --git a/qga/main.c b/qga/main.c index b8f7b1e4a30..6c02f3ec386 100644 --- a/qga/main.c +++ b/qga/main.c @@ -19,9 +19,9 @@ #include #endif #include "qemu/help-texts.h" -#include "qapi/qmp/json-parser.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" +#include "qobject/json-parser.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" #include "guest-agent-core.h" #include "qga-qapi-init-commands.h" #include "qapi/error.h" @@ -33,6 +33,7 @@ #include "qemu-version.h" #ifdef _WIN32 #include +#include #include "qga/service-win32.h" #include "qga/vss-win32.h" #endif @@ -105,6 +106,9 @@ struct GAState { GAService service; HANDLE wakeup_event; HANDLE event_log; + HANDLE load_avg_wait_handle; + HANDLE load_avg_event; + HQUERY load_avg_pdh_query; #endif bool delimit_response; bool frozen; @@ -257,7 +261,7 @@ QEMU_COPYRIGHT "\n" "\n" " -c, --config=PATH configuration file path (default is\n" " %s/qemu-ga.conf\n" -" unless overriden by the QGA_CONF environment variable)\n" +" unless overridden by the QGA_CONF environment variable)\n" " -m, --method transport method: one of unix-listen, virtio-serial,\n" " isa-serial, or vsock-listen (virtio-serial is the default)\n" " -p, --path device/socket path (the default for virtio-serial is:\n" @@ -582,6 +586,25 @@ const char *ga_fsfreeze_hook(GAState *s) } #endif +#ifdef _WIN32 +void ga_set_load_avg_wait_handle(GAState *s, HANDLE wait_handle) +{ + s->load_avg_wait_handle = wait_handle; +} +void ga_set_load_avg_event(GAState *s, HANDLE event) +{ + s->load_avg_event = event; +} +void ga_set_load_avg_pdh_query(GAState *s, HQUERY query) +{ + s->load_avg_pdh_query = query; +} +HQUERY ga_get_load_avg_pdh_query(GAState *s) +{ + return s->load_avg_pdh_query; +} +#endif + static void become_daemon(const char *pidfile) { #ifndef _WIN32 @@ -1402,6 +1425,10 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) g_debug("Guest agent version %s started", QEMU_FULL_VERSION); #ifdef _WIN32 + s->load_avg_wait_handle = INVALID_HANDLE_VALUE; + s->load_avg_event = INVALID_HANDLE_VALUE; + s->load_avg_pdh_query = NULL; + s->event_log = RegisterEventSource(NULL, "qemu-ga"); if (!s->event_log) { g_autofree gchar *errmsg = g_win32_error_message(GetLastError()); @@ -1430,7 +1457,6 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) if (config->daemonize) { /* delay opening/locking of pidfile till filesystems are unfrozen */ s->deferred_options.pid_filepath = config->pid_filepath; - become_daemon(NULL); } if (config->log_filepath) { /* delay opening the log file till filesystems are unfrozen */ @@ -1438,9 +1464,6 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) } ga_disable_logging(s); } else { - if (config->daemonize) { - become_daemon(config->pid_filepath); - } if (config->log_filepath) { FILE *log_file = ga_open_logfile(config->log_filepath); if (!log_file) { @@ -1487,6 +1510,20 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) ga_apply_command_filters(s); + if (!channel_init(s, s->config->method, s->config->channel_path, + s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) { + g_critical("failed to initialize guest agent channel"); + return NULL; + } + + if (config->daemonize) { + if (ga_is_frozen(s)) { + become_daemon(NULL); + } else { + become_daemon(config->pid_filepath); + } + } + ga_state = s; return s; } @@ -1496,6 +1533,18 @@ static void cleanup_agent(GAState *s) #ifdef _WIN32 CloseHandle(s->wakeup_event); CloseHandle(s->event_log); + + if (s->load_avg_wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(s->load_avg_wait_handle); + } + + if (s->load_avg_event != INVALID_HANDLE_VALUE) { + CloseHandle(s->load_avg_event); + } + + if (s->load_avg_pdh_query) { + PdhCloseQuery(s->load_avg_pdh_query); + } #endif if (s->command_state) { ga_command_state_cleanup_all(s->command_state); @@ -1513,16 +1562,18 @@ static void cleanup_agent(GAState *s) static int run_agent_once(GAState *s) { - if (!channel_init(s, s->config->method, s->config->channel_path, - s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) { + if (!s->channel && + channel_init(s, s->config->method, s->config->channel_path, + s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) { g_critical("failed to initialize guest agent channel"); return EXIT_FAILURE; } - g_main_loop_run(ga_state->main_loop); + g_main_loop_run(s->main_loop); if (s->channel) { ga_channel_free(s->channel); + s->channel = NULL; } return EXIT_SUCCESS; @@ -1579,7 +1630,7 @@ static void stop_agent(GAState *s, bool requested) int main(int argc, char **argv) { - int ret = EXIT_SUCCESS; + int ret = EXIT_FAILURE; GAState *s; GAConfig *config = g_new0(GAConfig, 1); int socket_activation; @@ -1607,7 +1658,6 @@ int main(int argc, char **argv) socket_activation = check_socket_activation(); if (socket_activation > 1) { g_critical("qemu-ga only supports listening on one socket"); - ret = EXIT_FAILURE; goto end; } if (socket_activation) { @@ -1631,7 +1681,6 @@ int main(int argc, char **argv) if (!config->method) { g_critical("unsupported listen fd type"); - ret = EXIT_FAILURE; goto end; } } else if (config->channel_path == NULL) { @@ -1643,13 +1692,13 @@ int main(int argc, char **argv) config->channel_path = g_strdup(QGA_SERIAL_PATH_DEFAULT); } else { g_critical("must specify a path for this channel"); - ret = EXIT_FAILURE; goto end; } } if (config->dumpconf) { config_dump(config); + ret = EXIT_SUCCESS; goto end; } @@ -1664,6 +1713,7 @@ int main(int argc, char **argv) SERVICE_TABLE_ENTRY service_table[] = { { (char *)QGA_SERVICE_NAME, service_main }, { NULL, NULL } }; StartServiceCtrlDispatcher(service_table); + ret = EXIT_SUCCESS; } else { ret = run_agent(s); } diff --git a/qga/meson.build b/qga/meson.build index 587ec4e5e81..89a4a8f713d 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -95,7 +95,7 @@ gen_tlb = [] qga_libs = [] if host_os == 'windows' qga_libs += ['-lws2_32', '-lwinmm', '-lpowrprof', '-lwtsapi32', '-lwininet', '-liphlpapi', '-lnetapi32', - '-lsetupapi', '-lcfgmgr32', '-luserenv'] + '-lsetupapi', '-lcfgmgr32', '-luserenv', '-lpdh' ] if have_qga_vss qga_libs += ['-lole32', '-loleaut32', '-lshlwapi', '-lstdc++', '-Wl,--enable-stdcall-fixup'] subdir('vss-win32') diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 495706cf731..8162d888bb3 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -2,16 +2,24 @@ # vim: filetype=python ## -# = General note concerning the use of guest agent interfaces +# This manual describes the commands supported by the QEMU Guest +# Agent Protocol. # -# "unsupported" is a higher-level error than the errors that -# individual commands might document. The caller should always be -# prepared to receive QERR_UNSUPPORTED, even if the given command -# doesn't specify it, or doesn't document any failure mode at all. -## - -## -# = QEMU guest agent protocol commands and structs +# For locating a particular item, please see the `qapi-qga-index`. +# +# The following notation is used in examples: +# +# .. qmp-example:: +# +# -> ... text sent by client (commands) ... +# <- ... text sent by server (command responses and events) ... +# +# Example text is formatted for readability. However, in real +# protocol usage, its commonly emitted as a single line. +# +# Please refer to the +# :doc:`QEMU Machine Protocol Specification ` +# for the general format of commands, responses, and events. ## { 'pragma': { 'doc-required': true } } @@ -88,11 +96,11 @@ # In cases where a partial stale response was previously received by # the client, this cannot always be done reliably. One particular # scenario being if qemu-ga responses are fed character-by-character -# into a JSON parser. In these situations, using guest-sync-delimited +# into a JSON parser. In these situations, using `guest-sync-delimited` # may be optimal. # # For clients that fetch responses line by line and convert them to -# JSON objects, guest-sync should be sufficient, but note that in +# JSON objects, `guest-sync` should be sufficient, but note that in # cases where the channel is dirty some attempts at parsing the # response may result in a parser error. # @@ -194,8 +202,6 @@ # # Get some information about the guest agent. # -# Returns: @GuestAgentInfo -# # Since: 0.15.0 ## { 'command': 'guest-info', @@ -211,7 +217,7 @@ # # This command does NOT return a response on success. Success # condition is indicated by the VM exiting with a zero exit status or, -# when running with --no-shutdown, by issuing the query-status QMP +# when running with --no-shutdown, by issuing the `query-status` QMP # command to confirm the VM status is "shutdown". # # Since: 0.15.0 @@ -241,7 +247,7 @@ # # Close an open file in the guest # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # Since: 0.15.0 ## @@ -272,13 +278,11 @@ # As this command is just for limited, ad-hoc debugging, such as log # file access, the number of bytes to read is limited to 48 MB. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @count: maximum number of bytes to read (default is 4KB, maximum is # 48MB) # -# Returns: @GuestFileRead -# # Since: 0.15.0 ## { 'command': 'guest-file-read', @@ -305,15 +309,13 @@ # # Write to an open file in the guest. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @buf-b64: base64-encoded string representing data to be written # # @count: bytes to write (actual bytes, after base64-decode), default # is all content in buf-b64 buffer after base64 decoding # -# Returns: @GuestFileWrite -# # Since: 0.15.0 ## { 'command': 'guest-file-write', @@ -338,7 +340,7 @@ ## # @QGASeek: # -# Symbolic names for use in @guest-file-seek +# Symbolic names for use in `guest-file-seek` # # @set: Set to the specified offset (same effect as 'whence':0) # @@ -353,7 +355,7 @@ ## # @GuestFileWhence: # -# Controls the meaning of offset to @guest-file-seek. +# Controls the meaning of offset to `guest-file-seek`. # # @value: Integral value (0 for set, 1 for cur, 2 for end), available # for historical reasons, and might differ from the host's or @@ -373,14 +375,12 @@ # current file position afterward. Also encapsulates ftell()'s # functionality, with offset=0 and whence=1. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @offset: bytes to skip over in the file stream # # @whence: Symbolic or numeric code for interpreting offset # -# Returns: @GuestFileSeek -# # Since: 0.15.0 ## { 'command': 'guest-file-seek', @@ -393,7 +393,7 @@ # # Write file changes buffered in userspace to disk/kernel buffers # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # Since: 0.15.0 ## @@ -420,9 +420,6 @@ # # Get guest fsfreeze state. # -# Returns: GuestFsfreezeStatus ("thawed", "frozen", etc., as defined -# below) -# # .. note:: This may fail to properly report the current state as a # result of some other guest processes having issued an fs # freeze/thaw. @@ -437,12 +434,12 @@ # @guest-fsfreeze-freeze: # # Sync and freeze all freezable, local guest filesystems. If this -# command succeeded, you may call @guest-fsfreeze-thaw later to +# command succeeded, you may call `guest-fsfreeze-thaw` later to # unfreeze. # # On error, all filesystems will be thawed. If no filesystems are -# frozen as a result of this call, then @guest-fsfreeze-status will -# remain "thawed" and calling @guest-fsfreeze-thaw is not necessary. +# frozen as a result of this call, then `guest-fsfreeze-status` will +# remain "thawed" and calling `guest-fsfreeze-thaw` is not necessary. # # Returns: Number of file systems currently frozen. # @@ -460,7 +457,7 @@ # @guest-fsfreeze-freeze-list: # # Sync and freeze specified guest filesystems. See also -# @guest-fsfreeze-freeze. +# `guest-fsfreeze-freeze`. # # On error, all filesystems will be thawed. # @@ -485,7 +482,7 @@ # Returns: Number of file systems thawed by this call # # .. note:: If the return value does not match the previous call to -# guest-fsfreeze-freeze, this likely means some freezable filesystems +# `guest-fsfreeze-freeze`, this likely means some freezable filesystems # were unfrozen before this call, and that the filesystem state may # have changed before issuing this command. # @@ -516,7 +513,7 @@ ## # @GuestFilesystemTrimResponse: # -# @paths: list of @GuestFilesystemTrimResult per path that was trimmed +# @paths: list of `GuestFilesystemTrimResult` per path that was trimmed # # Since: 2.4 ## @@ -537,8 +534,7 @@ # discarded. The default value is zero, meaning "discard every # free block". # -# Returns: A @GuestFilesystemTrimResponse which contains the status of -# all trimmed paths. (since 2.4) +# Returns: status of all trimmed paths. (since 2.4) # # Since: 1.2 ## @@ -561,7 +557,7 @@ # # This command does NOT return a response on success. There is a high # chance the command succeeded if the VM exits with a zero exit status -# or, when running with --no-shutdown, by issuing the query-status QMP +# or, when running with --no-shutdown, by issuing the `query-status` QMP # command to to confirm the VM status is "shutdown". However, the VM # could also exit (or set its status to "shutdown") due to other # reasons. @@ -569,7 +565,7 @@ # Errors: # - If suspend to disk is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -589,8 +585,8 @@ # - pm-utils (via pm-hibernate) # - manual write into sysfs # -# IMPORTANT: guest-suspend-ram requires working wakeup support in -# QEMU. You should check QMP command query-current-machine returns +# IMPORTANT: `guest-suspend-ram` requires working wakeup support in +# QEMU. You should check QMP command `query-current-machine` returns # wakeup-suspend-support: true before issuing this command. Failure # in doing so can result in a suspended guest that QEMU will not be # able to awaken, forcing the user to power cycle the guest to bring @@ -599,14 +595,14 @@ # This command does NOT return a response on success. There are two # options to check for success: # -# 1. Wait for the SUSPEND QMP event from QEMU -# 2. Issue the query-status QMP command to confirm the VM status is +# 1. Wait for the `SUSPEND` QMP event from QEMU +# 2. Issue the `query-status` QMP command to confirm the VM status is # "suspended" # # Errors: # - If suspend to ram is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -625,8 +621,8 @@ # - systemd hybrid-sleep # - pm-utils (via pm-suspend-hybrid) # -# IMPORTANT: guest-suspend-hybrid requires working wakeup support in -# QEMU. You should check QMP command query-current-machine returns +# IMPORTANT: `guest-suspend-hybrid` requires working wakeup support in +# QEMU. You should check QMP command `query-current-machine` returns # wakeup-suspend-support: true before issuing this command. Failure # in doing so can result in a suspended guest that QEMU will not be # able to awaken, forcing the user to power cycle the guest to bring @@ -635,14 +631,14 @@ # This command does NOT return a response on success. There are two # options to check for success: # -# 1. Wait for the SUSPEND QMP event from QEMU -# 2. Issue the query-status QMP command to confirm the VM status is +# 1. Wait for the `SUSPEND` QMP event from QEMU +# 2. Issue the `query-status` QMP command to confirm the VM status is # "suspended" # # Errors: # - If hybrid suspend is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -741,8 +737,6 @@ # # Get list of guest IP addresses, MAC addresses and netmasks. # -# Returns: List of GuestNetworkInterface -# # Since: 1.1 ## { 'command': 'guest-network-get-interfaces', @@ -799,7 +793,7 @@ # There's no restriction on list length or on repeating the same # @logical-id (with possibly different @online field). Preferably # the input list should describe a modified subset of -# @guest-get-vcpus' return value. +# `guest-get-vcpus`' return value. # # Returns: The length of the initial sublist that has been # successfully processed. The guest agent maximizes this value. @@ -1075,7 +1069,7 @@ # # Returns: The list of filesystems information mounted in the guest. # The returned mountpoints may be specified to -# @guest-fsfreeze-freeze-list. Network filesystems (such as CIFS +# `guest-fsfreeze-freeze-list`. Network filesystems (such as CIFS # and NFS) are not listed. # # Since: 2.2 @@ -1177,7 +1171,7 @@ ## # @GuestMemoryBlockResponse: # -# @phys-index: same with the 'phys-index' member of @GuestMemoryBlock. +# @phys-index: same with the 'phys-index' member of `GuestMemoryBlock`. # # @response: the result of memory block operation. # @@ -1207,11 +1201,11 @@ # guest-supported identifiers. There's no restriction on list # length or on repeating the same @phys-index (with possibly # different @online field). Preferably the input list should -# describe a modified subset of @guest-get-memory-blocks' return +# describe a modified subset of `guest-get-memory-blocks`' return # value. # # Returns: The operation results, it is a list of -# @GuestMemoryBlockResponse, which is corresponding to the input +# `GuestMemoryBlockResponse`, which is corresponding to the input # list. # # Note: it will return an empty list if the @mem-blks list was @@ -1243,8 +1237,6 @@ # # Get information relating to guest memory blocks. # -# Returns: @GuestMemoryBlockInfo -# # Since: 2.3 ## { 'command': 'guest-get-memory-block-info', @@ -1266,7 +1258,7 @@ # # @err-data: base64-encoded stderr of the process. Note: @out-data # and @err-data are present only if 'capture-output' was specified -# for 'guest-exec'. This field will only be populated after the +# for `guest-exec`. This field will only be populated after the # process exits. # # @out-truncated: true if stdout was not fully captured due to size @@ -1285,12 +1277,10 @@ # @guest-exec-status: # # Check status of process associated with PID retrieved via -# guest-exec. Reap the process and associated metadata if it has +# `guest-exec`. Reap the process and associated metadata if it has # exited. # -# @pid: pid returned from guest-exec -# -# Returns: GuestExecStatus +# @pid: pid returned from `guest-exec` # # Since: 2.5 ## @@ -1311,7 +1301,7 @@ ## # @GuestExecCaptureOutputMode: # -# An enumeration of guest-exec capture modes. +# An enumeration of `guest-exec` capture modes. # # @none: do not capture any output # @@ -1320,7 +1310,7 @@ # @stderr: only capture stderr # # @separated: capture both stdout and stderr, but separated into -# GuestExecStatus out-data and err-data, respectively +# `GuestExecStatus` out-data and err-data, respectively # # @merged: capture both stdout and stderr, but merge together into # out-data. Not effective on windows guests. @@ -1334,10 +1324,10 @@ ## # @GuestExecCaptureOutput: # -# Controls what guest-exec output gets captures. +# Controls what `guest-exec` output gets captures. # # @flag: captures both stdout and stderr if true. Equivalent to -# GuestExecCaptureOutputMode::all. (since 2.5) +# `GuestExecCaptureOutputMode`::all. (since 2.5) # # @mode: capture mode; preferred interface # @@ -1450,8 +1440,6 @@ # # Retrieves the timezone information from the guest. # -# Returns: A GuestTimezone dictionary. -# # Since: 2.10 ## { 'command': 'guest-get-timezone', @@ -1525,8 +1513,6 @@ # # Retrieve guest operating system information # -# Returns: @GuestOSInfo -# # Since: 2.10 ## { 'command': 'guest-get-osinfo', @@ -1596,8 +1582,6 @@ # # Retrieve information about device drivers in Windows guest # -# Returns: @GuestDeviceInfo -# # Since: 5.2 ## { 'command': 'guest-get-devices', @@ -1625,8 +1609,6 @@ # # @username: the user account to add the authorized keys # -# Returns: @GuestAuthorizedKeys -# # Since: 5.2 ## { 'command': 'guest-ssh-get-authorized-keys', @@ -1852,6 +1834,48 @@ 'if': 'CONFIG_LINUX' } + +## +# @GuestLoadAverage: +# +# Statistics about process load information +# +# @load1m: 1-minute load avage +# +# @load5m: 5-minute load avage +# +# @load15m: 15-minute load avage +# +# Since: 10.0 +## +{ 'struct': 'GuestLoadAverage', + 'data': { + 'load1m': 'number', + 'load5m': 'number', + 'load15m': 'number' + }, + 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_GETLOADAVG'] } +} + +## +# @guest-get-load: +# +# Retrieve CPU process load information +# +# .. note:: Windows does not have load average API, so QGA emulates it by +# calculating the average CPU usage in the last 1, 5, 15 minutes +# similar as Linux does this. +# Calculation starts from the first time this command is called. +# +# Returns: load information +# +# Since: 10.0 +## +{ 'command': 'guest-get-load', + 'returns': 'GuestLoadAverage', + 'if': { 'any': ['CONFIG_WIN32', 'CONFIG_GETLOADAVG'] } +} + ## # @GuestNetworkRoute: # @@ -1916,6 +1940,7 @@ # @guest-network-get-route: # # Retrieve information about route of network. +# # Returns: List of route info of guest. # # Since: 9.1 diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index 84944133f79..7b25d9098ba 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -39,7 +39,7 @@ const GUID CLSID_WbemLocator = { 0x4590f811, 0x1d3a, 0x11d0, const GUID IID_IWbemLocator = { 0xdc12a687, 0x737f, 0x11cf, {0x88, 0x4d, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24} }; -void errmsg(DWORD err, const char *text) +static void errmsg(DWORD err, const char *text) { /* * `text' contains function call statement when errmsg is called via chk(). @@ -242,6 +242,7 @@ static HRESULT QGAProviderRemove(ICatalogCollection *coll, int i, void *arg) } /* Unregister this module from COM+ Applications Catalog */ +STDAPI COMUnregister(void); STDAPI COMUnregister(void) { qga_debug_begin; @@ -256,6 +257,7 @@ STDAPI COMUnregister(void) } /* Register this module to COM+ Applications Catalog */ +STDAPI COMRegister(void); STDAPI COMRegister(void) { qga_debug_begin; @@ -285,9 +287,13 @@ STDAPI COMRegister(void) chk(QGAProviderFind(QGAProviderCount, (void *)&count)); if (count) { - errmsg(E_ABORT, "QGA VSS Provider is already installed"); - qga_debug_end; - return E_ABORT; + qga_debug("QGA VSS Provider is already installed. Attempting to unregister first."); + hr = COMUnregister(); + if (FAILED(hr)) { + errmsg(hr, "Failed to unregister existing QGA VSS Provider. Aborting installation."); + qga_debug_end; + return E_ABORT; + } } chk(CoCreateInstance(CLSID_COMAdminCatalog, NULL, CLSCTX_INPROC_SERVER, @@ -380,11 +386,16 @@ STDAPI COMRegister(void) return hr; } +STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int); STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int) { - COMRegister(); + HRESULT hr = COMRegister(); + if (FAILED(hr)) { + exit(hr); + } } +STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int); STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int) { COMUnregister(); diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp index cc72e5ef1b9..a102a23fbf1 100644 --- a/qga/vss-win32/provider.cpp +++ b/qga/vss-win32/provider.cpp @@ -45,7 +45,7 @@ const IID IID_IVssEnumObject = { 0xAE1C7110, 0x2F60, 0x11d3, {0x8A, 0x39, 0x00, 0xC0, 0x4F, 0x72, 0xD8, 0xE3} }; -void LockModule(BOOL lock) +static void LockModule(BOOL lock) { if (lock) { InterlockedIncrement(&g_nComObjsInUse); @@ -527,6 +527,9 @@ STDAPI DllCanUnloadNow() return g_nComObjsInUse == 0 ? S_OK : S_FALSE; } +EXTERN_C +BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved); + EXTERN_C BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved) { diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 9884c65e707..4401d55e3a4 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -254,8 +254,8 @@ static void AddComponents(ErrorSet *errset) qga_debug_end; } -DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, - DWORD defaultData) +static DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, + DWORD defaultData) { qga_debug_begin; @@ -272,12 +272,12 @@ DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, return dwordData; } -bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT) +static bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT) { return (vssBT > VSS_BT_UNDEFINED && vssBT < VSS_BT_OTHER); } -VSS_BACKUP_TYPE get_vss_backup_type( +static VSS_BACKUP_TYPE get_vss_backup_type( VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE) { qga_debug_begin; diff --git a/qobject/block-qdict.c b/qobject/block-qdict.c index 4a83bda2c3b..d0e1c63cf6c 100644 --- a/qobject/block-qdict.c +++ b/qobject/block-qdict.c @@ -9,10 +9,10 @@ #include "qemu/osdep.h" #include "block/qdict.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qemu/cutils.h" #include "qapi/error.h" diff --git a/qobject/json-parser-int.h b/qobject/json-parser-int.h index 16a25d00bb6..8c01f236276 100644 --- a/qobject/json-parser-int.h +++ b/qobject/json-parser-int.h @@ -14,7 +14,7 @@ #ifndef JSON_PARSER_INT_H #define JSON_PARSER_INT_H -#include "qapi/qmp/json-parser.h" +#include "qobject/json-parser.h" typedef enum json_token_type { JSON_ERROR = 0, /* must be zero, see json_lexer[] */ diff --git a/qobject/json-parser.c b/qobject/json-parser.c index d498db6e702..7483e582fea 100644 --- a/qobject/json-parser.c +++ b/qobject/json-parser.c @@ -16,12 +16,12 @@ #include "qemu/cutils.h" #include "qemu/unicode.h" #include "qapi/error.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "json-parser-int.h" struct JSONToken { diff --git a/qobject/json-writer.c b/qobject/json-writer.c index 309a31d57a9..aac2c6ab71e 100644 --- a/qobject/json-writer.c +++ b/qobject/json-writer.c @@ -14,7 +14,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/json-writer.h" +#include "qobject/json-writer.h" #include "qemu/unicode.h" struct JSONWriter { diff --git a/qobject/qbool.c b/qobject/qbool.c index c7049c0c501..00d7066aae3 100644 --- a/qobject/qbool.c +++ b/qobject/qbool.c @@ -12,7 +12,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qbool.h" +#include "qobject/qbool.h" #include "qobject-internal.h" /** diff --git a/qobject/qdict.c b/qobject/qdict.c index 8faff230d39..a90ac9ae2f8 100644 --- a/qobject/qdict.c +++ b/qobject/qdict.c @@ -11,11 +11,11 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qbool.h" +#include "qobject/qnull.h" +#include "qobject/qstring.h" #include "qobject-internal.h" /** diff --git a/qobject/qjson.c b/qobject/qjson.c index 167fcb429c9..c858dafb5e8 100644 --- a/qobject/qjson.c +++ b/qobject/qjson.c @@ -13,14 +13,14 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/json-parser.h" -#include "qapi/qmp/json-writer.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/json-parser.h" +#include "qobject/json-writer.h" +#include "qobject/qjson.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" typedef struct JSONParsingState { JSONMessageParser parser; diff --git a/qobject/qlist.c b/qobject/qlist.c index 356ad946b00..41e6876d5b6 100644 --- a/qobject/qlist.c +++ b/qobject/qlist.c @@ -11,11 +11,11 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/queue.h" #include "qobject-internal.h" diff --git a/qobject/qlit.c b/qobject/qlit.c index be8332136c2..a44f47eaa57 100644 --- a/qobject/qlit.c +++ b/qobject/qlit.c @@ -15,13 +15,13 @@ #include "qemu/osdep.h" -#include "qapi/qmp/qlit.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qlit.h" +#include "qobject/qbool.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" +#include "qobject/qnull.h" static bool qlit_equal_qdict(const QLitObject *lhs, const QDict *qdict) { @@ -118,7 +118,7 @@ QObject *qobject_from_qlit(const QLitObject *qlit) case QTYPE_QBOOL: return QOBJECT(qbool_from_bool(qlit->value.qbool)); default: - assert(0); + g_assert_not_reached(); } return NULL; diff --git a/qobject/qnull.c b/qobject/qnull.c index 445a5db7f36..0fb78cbd0a2 100644 --- a/qobject/qnull.c +++ b/qobject/qnull.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qobject-internal.h" QNull qnull_ = { diff --git a/qobject/qnum.c b/qobject/qnum.c index 2bbeaedc7b4..a938b64537d 100644 --- a/qobject/qnum.c +++ b/qobject/qnum.c @@ -13,7 +13,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qnum.h" #include "qobject-internal.h" /** @@ -85,8 +85,7 @@ bool qnum_get_try_int(const QNum *qn, int64_t *val) return false; } - assert(0); - return false; + g_assert_not_reached(); } /** @@ -123,8 +122,7 @@ bool qnum_get_try_uint(const QNum *qn, uint64_t *val) return false; } - assert(0); - return false; + g_assert_not_reached(); } /** @@ -156,8 +154,7 @@ double qnum_get_double(QNum *qn) return qn->u.dbl; } - assert(0); - return 0.0; + g_assert_not_reached(); } char *qnum_to_string(QNum *qn) @@ -172,8 +169,7 @@ char *qnum_to_string(QNum *qn) return g_strdup_printf("%.17g", qn->u.dbl); } - assert(0); - return NULL; + g_assert_not_reached(); } /** diff --git a/qobject/qobject-internal.h b/qobject/qobject-internal.h index b310c8e1b58..0c7679fe989 100644 --- a/qobject/qobject-internal.h +++ b/qobject/qobject-internal.h @@ -10,7 +10,7 @@ #ifndef QOBJECT_INTERNAL_H #define QOBJECT_INTERNAL_H -#include "qapi/qmp/qobject.h" +#include "qobject/qobject.h" static inline void qobject_init(QObject *obj, QType type) { diff --git a/qobject/qobject.c b/qobject/qobject.c index d7077b8f2a9..78d1e057c12 100644 --- a/qobject/qobject.c +++ b/qobject/qobject.c @@ -8,12 +8,12 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "qobject-internal.h" QEMU_BUILD_BUG_MSG( diff --git a/qobject/qstring.c b/qobject/qstring.c index 794f8c93578..d3166049149 100644 --- a/qobject/qstring.c +++ b/qobject/qstring.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qobject-internal.h" /** diff --git a/qom/container.c b/qom/container.c index 455e8410c66..38a27ec1edd 100644 --- a/qom/container.c +++ b/qom/container.c @@ -15,7 +15,7 @@ #include "qemu/module.h" static const TypeInfo container_info = { - .name = "container", + .name = TYPE_CONTAINER, .parent = TYPE_OBJECT, }; @@ -24,29 +24,14 @@ static void container_register_types(void) type_register_static(&container_info); } -Object *container_get(Object *root, const char *path) +Object *object_property_add_new_container(Object *obj, const char *name) { - Object *obj, *child; - char **parts; - int i; + Object *child = object_new(TYPE_CONTAINER); - parts = g_strsplit(path, "/", 0); - assert(parts != NULL && parts[0] != NULL && !parts[0][0]); - obj = root; + object_property_add_child(obj, name, child); + object_unref(child); - for (i = 1; parts[i] != NULL; i++, obj = child) { - child = object_resolve_path_component(obj, parts[i]); - if (!child) { - child = object_new("container"); - object_property_add_child(obj, parts[i], child); - object_unref(child); - } - } - - g_strfreev(parts); - - return obj; + return child; } - type_init(container_register_types) diff --git a/qom/object.c b/qom/object.c index 157a45c5f8b..1856bb36c74 100644 --- a/qom/object.c +++ b/qom/object.c @@ -23,16 +23,16 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/forward-visitor.h" #include "qapi/qapi-builtin-visit.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qjson.h" #include "trace.h" /* TODO: replace QObject with a simpler visitor to avoid a dependency * of the QOM core on QObject? */ #include "qom/qom-qobject.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/error-report.h" #define MAX_INTERFACES 32 @@ -54,10 +54,10 @@ struct TypeImpl size_t instance_size; size_t instance_align; - void (*class_init)(ObjectClass *klass, void *data); - void (*class_base_init)(ObjectClass *klass, void *data); + void (*class_init)(ObjectClass *klass, const void *data); + void (*class_base_init)(ObjectClass *klass, const void *data); - void *class_data; + const void *class_data; void (*instance_init)(Object *obj); void (*instance_post_init)(Object *obj); @@ -175,17 +175,12 @@ static TypeImpl *type_register_internal(const TypeInfo *info) return ti; } -TypeImpl *type_register(const TypeInfo *info) +TypeImpl *type_register_static(const TypeInfo *info) { assert(info->parent); return type_register_internal(info); } -TypeImpl *type_register_static(const TypeInfo *info) -{ - return type_register(info); -} - void type_register_static_array(const TypeInfo *infos, int nr_infos) { int i; @@ -195,7 +190,7 @@ void type_register_static_array(const TypeInfo *infos, int nr_infos) } } -static TypeImpl *type_get_by_name(const char *name) +static TypeImpl *type_get_by_name_noload(const char *name) { if (name == NULL) { return NULL; @@ -204,10 +199,32 @@ static TypeImpl *type_get_by_name(const char *name) return type_table_lookup(name); } +static TypeImpl *type_get_or_load_by_name(const char *name, Error **errp) +{ + TypeImpl *type = type_get_by_name_noload(name); + +#ifdef CONFIG_MODULES + if (!type) { + int rv = module_load_qom(name, errp); + if (rv > 0) { + type = type_get_by_name_noload(name); + } else { + error_prepend(errp, "could not load a module for type '%s'", name); + return NULL; + } + } +#endif + if (!type) { + error_setg(errp, "unknown type '%s'", name); + } + + return type; +} + static TypeImpl *type_get_parent(TypeImpl *type) { if (!type->parent_type && type->parent) { - type->parent_type = type_get_by_name(type->parent); + type->parent_type = type_get_by_name_noload(type->parent); if (!type->parent_type) { fprintf(stderr, "Type '%s' is missing its parent '%s'\n", type->name, type->parent); @@ -262,14 +279,6 @@ static size_t type_object_get_align(TypeImpl *ti) return 0; } -size_t object_type_get_instance_size(const char *typename) -{ - TypeImpl *type = type_get_by_name(typename); - - g_assert(type != NULL); - return type_object_get_size(type); -} - static bool type_is_ancestor(TypeImpl *type, TypeImpl *target_type) { assert(target_type); @@ -305,7 +314,6 @@ static void type_initialize_interface(TypeImpl *ti, TypeImpl *interface_type, g_free((char *)info.name); new_iface = (InterfaceClass *)iface_impl->class; - new_iface->concrete_class = ti->class; new_iface->interface_type = interface_type; ti->class->interfaces = g_slist_append(ti->class->interfaces, new_iface); @@ -371,7 +379,7 @@ static void type_initialize(TypeImpl *ti) } for (i = 0; i < ti->num_interfaces; i++) { - TypeImpl *t = type_get_by_name(ti->interfaces[i].typename); + TypeImpl *t = type_get_by_name_noload(ti->interfaces[i].typename); if (!t) { error_report("missing interface '%s' for object '%s'", ti->interfaces[i].typename, parent->name); @@ -423,13 +431,13 @@ static void object_init_with_type(Object *obj, TypeImpl *ti) static void object_post_init_with_type(Object *obj, TypeImpl *ti) { - if (ti->instance_post_init) { - ti->instance_post_init(obj); - } - if (type_has_parent(ti)) { object_post_init_with_type(obj, type_get_parent(ti)); } + + if (ti->instance_post_init) { + ti->instance_post_init(obj); + } } bool object_apply_global_props(Object *obj, const GPtrArray *props, @@ -477,7 +485,7 @@ bool object_apply_global_props(Object *obj, const GPtrArray *props, * Slot 0: accelerator's global property defaults * Slot 1: machine's global property defaults * Slot 2: global properties from legacy command line option - * Each is a GPtrArray of of GlobalProperty. + * Each is a GPtrArray of GlobalProperty. * Applied in order, later entries override earlier ones. */ static GPtrArray *object_compat_props[3]; @@ -565,23 +573,7 @@ static void object_initialize_with_type(Object *obj, size_t size, TypeImpl *type void object_initialize(void *data, size_t size, const char *typename) { - TypeImpl *type = type_get_by_name(typename); - -#ifdef CONFIG_MODULES - if (!type) { - int rv = module_load_qom(typename, &error_fatal); - if (rv > 0) { - type = type_get_by_name(typename); - } else { - error_report("missing object type '%s'", typename); - exit(1); - } - } -#endif - if (!type) { - error_report("missing object type '%s'", typename); - abort(); - } + TypeImpl *type = type_get_or_load_by_name(typename, &error_fatal); object_initialize_with_type(data, size, type); } @@ -792,7 +784,7 @@ Object *object_new_with_class(ObjectClass *klass) Object *object_new(const char *typename) { - TypeImpl *ti = type_get_by_name(typename); + TypeImpl *ti = type_get_or_load_by_name(typename, &error_fatal); return object_new_with_type(ti); } @@ -965,7 +957,7 @@ ObjectClass *object_class_dynamic_cast(ObjectClass *class, return class; } - target_type = type_get_by_name(typename); + target_type = type_get_by_name_noload(typename); if (!target_type) { /* target class type unknown, so fail the cast */ return NULL; @@ -1063,7 +1055,7 @@ const char *object_class_get_name(ObjectClass *klass) ObjectClass *object_class_by_name(const char *typename) { - TypeImpl *type = type_get_by_name(typename); + TypeImpl *type = type_get_by_name_noload(typename); if (!type) { return NULL; @@ -1076,21 +1068,15 @@ ObjectClass *object_class_by_name(const char *typename) ObjectClass *module_object_class_by_name(const char *typename) { - ObjectClass *oc; + TypeImpl *type = type_get_or_load_by_name(typename, NULL); - oc = object_class_by_name(typename); -#ifdef CONFIG_MODULES - if (!oc) { - Error *local_err = NULL; - int rv = module_load_qom(typename, &local_err); - if (rv > 0) { - oc = object_class_by_name(typename); - } else if (rv < 0) { - error_report_err(local_err); - } + if (!type) { + return NULL; } -#endif - return oc; + + type_initialize(type); + + return type->class; } ObjectClass *object_class_get_parent(ObjectClass *class) @@ -1205,7 +1191,7 @@ GSList *object_class_get_list(const char *implements_type, return list; } -static gint object_class_cmp(gconstpointer a, gconstpointer b) +static gint object_class_cmp(gconstpointer a, gconstpointer b, gpointer d) { return strcasecmp(object_class_get_name((ObjectClass *)a), object_class_get_name((ObjectClass *)b)); @@ -1214,8 +1200,9 @@ static gint object_class_cmp(gconstpointer a, gconstpointer b) GSList *object_class_get_list_sorted(const char *implements_type, bool include_abstract) { - return g_slist_sort(object_class_get_list(implements_type, include_abstract), - object_class_cmp); + return g_slist_sort_with_data( + object_class_get_list(implements_type, include_abstract), + object_class_cmp, NULL); } Object *object_ref(void *objptr) @@ -1742,12 +1729,44 @@ const char *object_property_get_type(Object *obj, const char *name, Error **errp return prop->type; } +static const char *const root_containers[] = { + "chardevs", + "objects", + "backend" +}; + +static Object *object_root_initialize(void) +{ + Object *root = object_new(TYPE_CONTAINER); + int i; + + /* + * Create all QEMU system containers. "machine" and its sub-containers + * are only created when machine initializes (qemu_create_machine()). + */ + for (i = 0; i < ARRAY_SIZE(root_containers); i++) { + object_property_add_new_container(root, root_containers[i]); + } + + return root; +} + +Object *object_get_container(const char *name) +{ + Object *container; + + container = object_resolve_path_component(object_get_root(), name); + assert(object_dynamic_cast(container, TYPE_CONTAINER)); + + return container; +} + Object *object_get_root(void) { static Object *root; if (!root) { - root = object_new("container"); + root = object_root_initialize(); } return root; @@ -1755,7 +1774,7 @@ Object *object_get_root(void) Object *object_get_objects_root(void) { - return container_get(object_get_root(), "/objects"); + return object_get_container("objects"); } Object *object_get_internal_root(void) @@ -1763,7 +1782,7 @@ Object *object_get_internal_root(void) static Object *internal_root; if (!internal_root) { - internal_root = object_new("container"); + internal_root = object_new(TYPE_CONTAINER); } return internal_root; @@ -2079,7 +2098,6 @@ const char *object_get_canonical_path_component(const Object *obj) /* obj had a parent but was not a child, should never happen */ g_assert_not_reached(); - return NULL; } char *object_get_canonical_path(const Object *obj) @@ -2185,7 +2203,7 @@ static Object *object_resolve_partial_path(Object *parent, } Object *object_resolve_path_type(const char *path, const char *typename, - bool *ambiguousp) + bool *ambiguous) { Object *obj; char **parts; @@ -2194,14 +2212,17 @@ Object *object_resolve_path_type(const char *path, const char *typename, assert(parts); if (parts[0] == NULL || strcmp(parts[0], "") != 0) { - bool ambiguous = false; + bool ambig = false; obj = object_resolve_partial_path(object_get_root(), parts, - typename, &ambiguous); - if (ambiguousp) { - *ambiguousp = ambiguous; + typename, &ambig); + if (ambiguous) { + *ambiguous = ambig; } } else { obj = object_resolve_abs_path(object_get_root(), parts + 1, typename); + if (ambiguous) { + *ambiguous = false; + } } g_strfreev(parts); @@ -2227,7 +2248,7 @@ Object *object_resolve_path_at(Object *parent, const char *path) Object *object_resolve_type_unambiguous(const char *typename, Error **errp) { - bool ambig; + bool ambig = false; Object *o = object_resolve_path_type("", typename, &ambig); if (ambig) { @@ -2871,7 +2892,7 @@ void object_class_property_set_description(ObjectClass *klass, op->description = g_strdup(description); } -static void object_class_init(ObjectClass *klass, void *data) +static void object_class_init(ObjectClass *klass, const void *data) { object_class_property_add_str(klass, "type", object_get_type, NULL); diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c index e0833c8bfe4..1ffea1a7288 100644 --- a/qom/object_interfaces.c +++ b/qom/object_interfaces.c @@ -3,10 +3,12 @@ #include "qemu/cutils.h" #include "qapi/error.h" #include "qapi/qapi-visit-qom.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qobject.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qjson.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "qom/object_interfaces.h" @@ -90,7 +92,7 @@ Object *user_creatable_add_type(const char *type, const char *id, return NULL; } - klass = object_class_by_name(type); + klass = module_object_class_by_name(type); if (!klass) { error_setg(errp, "invalid object type: %s", type); return NULL; @@ -108,7 +110,7 @@ Object *user_creatable_add_type(const char *type, const char *id, } assert(qdict); - obj = object_new(type); + obj = object_new_with_class(klass); object_set_properties_from_qdict(obj, qdict, v, &local_err); if (local_err) { goto out; @@ -177,9 +179,25 @@ char *object_property_help(const char *name, const char *type, g_string_append(str, description); } if (defval) { - g_autofree char *def_json = g_string_free(qobject_to_json(defval), - false); - g_string_append_printf(str, " (default: %s)", def_json); + g_autofree char *def_json = NULL; + const char *def; + + switch (qobject_type(defval)) { + case QTYPE_QSTRING: + def = qstring_get_str(qobject_to(QString, defval)); + break; + + case QTYPE_QBOOL: + def = qbool_get_bool(qobject_to(QBool, defval)) ? "on" : "off"; + break; + + default: + def_json = g_string_free(qobject_to_json(defval), false); + def = def_json; + break; + } + + g_string_append_printf(str, " (default: %s)", def); } return g_string_free(str, false); diff --git a/qom/qom-hmp-cmds.c b/qom/qom-hmp-cmds.c index 6e3a2175a4e..a00a564b1e2 100644 --- a/qom/qom-hmp-cmds.c +++ b/qom/qom-hmp-cmds.c @@ -11,8 +11,8 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-commands-qom.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" #include "qemu/readline.h" #include "qom/object.h" #include "qom/object_interfaces.h" diff --git a/qom/qom-qmp-cmds.c b/qom/qom-qmp-cmds.c index e91a2353472..57f1898cf61 100644 --- a/qom/qom-qmp-cmds.c +++ b/qom/qom-qmp-cmds.c @@ -20,7 +20,7 @@ #include "qapi/qapi-commands-qdev.h" #include "qapi/qapi-commands-qom.h" #include "qapi/qapi-visit-qom.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" @@ -28,15 +28,11 @@ #include "qom/object_interfaces.h" #include "qom/qom-qobject.h" -ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp) +static Object *qom_resolve_path(const char *path, Error **errp) { - Object *obj; bool ambiguous = false; - ObjectPropertyInfoList *props = NULL; - ObjectProperty *prop; - ObjectPropertyIterator iter; + Object *obj = object_resolve_path(path, &ambiguous); - obj = object_resolve_path(path, &ambiguous); if (obj == NULL) { if (ambiguous) { error_setg(errp, "Path '%s' is ambiguous", path); @@ -44,6 +40,19 @@ ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp) error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found", path); } + } + return obj; +} + +ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp) +{ + Object *obj; + ObjectPropertyInfoList *props = NULL; + ObjectProperty *prop; + ObjectPropertyIterator iter; + + obj = qom_resolve_path(path, errp); + if (obj == NULL) { return NULL; } @@ -60,6 +69,59 @@ ObjectPropertyInfoList *qmp_qom_list(const char *path, Error **errp) return props; } +static void qom_list_add_property_value(Object *obj, ObjectProperty *prop, + ObjectPropertyValueList **props) +{ + ObjectPropertyValue *item = g_new0(ObjectPropertyValue, 1); + + QAPI_LIST_PREPEND(*props, item); + + item->name = g_strdup(prop->name); + item->type = g_strdup(prop->type); + item->value = object_property_get_qobject(obj, prop->name, NULL); +} + +static ObjectPropertyValueList *qom_get_property_value_list(const char *path, + Error **errp) +{ + Object *obj; + ObjectProperty *prop; + ObjectPropertyIterator iter; + ObjectPropertyValueList *props = NULL; + + obj = qom_resolve_path(path, errp); + if (obj == NULL) { + return NULL; + } + + object_property_iter_init(&iter, obj); + while ((prop = object_property_iter_next(&iter))) { + qom_list_add_property_value(obj, prop, &props); + } + + return props; +} + +ObjectPropertiesValuesList *qmp_qom_list_get(strList *paths, Error **errp) +{ + ObjectPropertiesValuesList *head = NULL, **tail = &head; + strList *path; + + for (path = paths; path; path = path->next) { + ObjectPropertiesValues *item = g_new0(ObjectPropertiesValues, 1); + + QAPI_LIST_APPEND(tail, item); + + item->properties = qom_get_property_value_list(path->value, errp); + if (!item->properties) { + qapi_free_ObjectPropertiesValuesList(head); + return NULL; + } + } + + return head; +} + void qmp_qom_set(const char *path, const char *property, QObject *value, Error **errp) { @@ -141,7 +203,7 @@ ObjectPropertyInfoList *qmp_device_list_properties(const char *typename, return NULL; } - obj = object_new(typename); + obj = object_new_with_class(klass); object_property_iter_init(&iter, obj); while ((prop = object_property_iter_next(&iter))) { @@ -186,7 +248,7 @@ ObjectPropertyInfoList *qmp_qom_list_properties(const char *typename, ObjectPropertyIterator iter; ObjectPropertyInfoList *prop_list = NULL; - klass = object_class_by_name(typename); + klass = module_object_class_by_name(typename); if (klass == NULL) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, "Class '%s' not found", typename); diff --git a/replay/replay-audio.c b/replay/replay-audio.c index 91854f02ea0..ed2ba2164ba 100644 --- a/replay/replay-audio.c +++ b/replay/replay-audio.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "audio/audio.h" diff --git a/replay/replay-char.c b/replay/replay-char.c index 72b1f832dde..81dc416e988 100644 --- a/replay/replay-char.c +++ b/replay/replay-char.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "chardev/char.h" diff --git a/replay/replay-debugging.c b/replay/replay-debugging.c index 82c66fff262..11053640021 100644 --- a/replay/replay-debugging.c +++ b/replay/replay-debugging.c @@ -11,13 +11,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/replay.h" +#include "system/runstate.h" #include "replay-internal.h" #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/qapi-commands-replay.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/timer.h" #include "block/snapshot.h" #include "migration/snapshot.h" diff --git a/replay/replay-events.c b/replay/replay-events.c index af0721cc1a1..8959da9f1fa 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "block/aio.h" #include "ui/input.h" @@ -92,15 +92,6 @@ void replay_flush_events(void) } } -void replay_disable_events(void) -{ - if (replay_mode != REPLAY_MODE_NONE) { - events_enabled = false; - /* Flush events queue before waiting of completion */ - replay_flush_events(); - } -} - /*! Adds specified async event to the queue */ void replay_add_event(ReplayAsyncEventKind event_kind, void *opaque, diff --git a/replay/replay-input.c b/replay/replay-input.c index bee3dbe5283..562bbf37175 100644 --- a/replay/replay-input.c +++ b/replay/replay-input.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "qemu/notify.h" #include "ui/input.h" diff --git a/replay/replay-internal.c b/replay/replay-internal.c index 13fcbdd8f42..c2a7200339f 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -10,8 +10,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/replay.h" +#include "system/runstate.h" #include "replay-internal.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" diff --git a/replay/replay-net.c b/replay/replay-net.c index 3b70f71cf12..d4b197e91e3 100644 --- a/replay/replay-net.c +++ b/replay/replay-net.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "net/net.h" #include "net/filter.h" diff --git a/replay/replay-random.c b/replay/replay-random.c index afc7a0fccca..7f4c46f74d9 100644 --- a/replay/replay-random.c +++ b/replay/replay-random.c @@ -11,7 +11,7 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" void replay_save_random(int ret, void *buf, size_t len) diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c index ccb4d89dda7..3c0a89442a9 100644 --- a/replay/replay-snapshot.c +++ b/replay/replay-snapshot.c @@ -11,10 +11,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "monitor/monitor.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qemu/error-report.h" #include "migration/vmstate.h" #include "migration/snapshot.h" diff --git a/replay/replay-time.c b/replay/replay-time.c index ee0ebfcf09f..f3d62e11393 100644 --- a/replay/replay-time.c +++ b/replay/replay-time.c @@ -10,7 +10,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "replay-internal.h" #include "qemu/error-report.h" diff --git a/replay/replay.c b/replay/replay.c index a2c576c16e7..a3e24c967ae 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -11,13 +11,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "exec/icount.h" +#include "system/replay.h" +#include "system/runstate.h" #include "replay-internal.h" #include "qemu/main-loop.h" #include "qemu/option.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/error-report.h" /* Current version of the replay mechanism. @@ -385,6 +385,8 @@ static void replay_enable(const char *fname, int mode) replay_fetch_data_kind(); } + runstate_replay_enable(); + replay_init_events(); } @@ -449,27 +451,6 @@ void replay_start(void) replay_enable_events(); } -/* - * For none/record the answer is yes. - */ -bool replay_can_wait(void) -{ - if (replay_mode == REPLAY_MODE_PLAY) { - /* - * For playback we shouldn't ever be at a point we wait. If - * the instruction count has reached zero and we have an - * unconsumed event we should go around again and consume it. - */ - if (replay_state.instruction_count == 0 && replay_state.has_unread_data) { - return false; - } else { - replay_sync_error("Playback shouldn't have to iowait"); - } - } - return true; -} - - void replay_finish(void) { if (replay_mode == REPLAY_MODE_NONE) { diff --git a/replay/stubs-system.c b/replay/stubs-system.c index 50cefdb2d69..8f2b2d326e7 100644 --- a/replay/stubs-system.c +++ b/replay/stubs-system.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "ui/input.h" void replay_input_event(QemuConsole *src, InputEvent *evt) diff --git a/roms/Makefile b/roms/Makefile index dfed2b216a1..4c8793c5bd4 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -34,6 +34,7 @@ find-cross-gcc = $(firstword $(wildcard $(patsubst %ld,%gcc,$(call find-cross-ld # finally strip off path + toolname so we get the prefix find-cross-prefix = $(subst gcc,,$(notdir $(call find-cross-gcc,$(1)))) +aarch64_cross_prefix := $(call find-cross-prefix,aarch64) arm_cross_prefix := $(call find-cross-prefix,arm) powerpc64_cross_prefix := $(call find-cross-prefix,powerpc64) powerpc_cross_prefix := $(call find-cross-prefix,powerpc) @@ -66,6 +67,8 @@ default help: @echo " u-boot.e500 -- update u-boot.e500" @echo " u-boot.sam460 -- update u-boot.sam460" @echo " npcm7xx_bootrom -- update vbootrom for npcm7xx" + @echo " npcm8xx_bootrom -- update vbootrom for npcm8xx" + @echo " ast27x0_bootrom -- update vbootrom for ast27x0" @echo " efi -- update UEFI (edk2) platform firmware" @echo " opensbi32-generic -- update OpenSBI for 32-bit generic machine" @echo " opensbi64-generic -- update OpenSBI for 64-bit generic machine" @@ -157,6 +160,11 @@ edk2-version: edk2 touch $@; \ fi +edk2-basetools: edk2-version + $(PYTHON) edk2-build.py --config edk2-build.config \ + --silent --no-logs \ + --match none # build only basetools + efi: edk2-version $(PYTHON) edk2-build.py --config edk2-build.config \ --version-override "$(EDK2_STABLE)$(FIRMWARE_EXTRAVERSION)" \ @@ -186,8 +194,16 @@ qboot: cp qboot/build/bios.bin ../pc-bios/qboot.rom npcm7xx_bootrom: - $(MAKE) -C vbootrom CROSS_COMPILE=$(arm_cross_prefix) - cp vbootrom/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin + $(MAKE) -C vbootrom/npcm7xx CROSS_COMPILE=$(arm_cross_prefix) + cp vbootrom/npcm7xx/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin + +npcm8xx_bootrom: + $(MAKE) -C vbootrom/npcm8xx CROSS_COMPILE=$(aarch64_cross_prefix) + cp vbootrom/npcm8xx/npcm8xx_bootrom.bin ../pc-bios/npcm8xx_bootrom.bin + +ast27x0_bootrom: + $(MAKE) -C vbootrom/ast27x0 CROSS_COMPILE=$(aarch64_cross_prefix) + cp vbootrom/ast27x0/ast27x0_bootrom.bin ../pc-bios/ast27x0_bootrom.bin hppa-firmware: $(MAKE) -C seabios-hppa parisc diff --git a/roms/edk2 b/roms/edk2 index edc6681206c..4dfdca63a93 160000 --- a/roms/edk2 +++ b/roms/edk2 @@ -1 +1 @@ -Subproject commit edc6681206c1a8791981a2f911d2fb8b3d2f5768 +Subproject commit 4dfdca63a93497203f197ec98ba20e2327e4afe4 diff --git a/roms/edk2-build.config b/roms/edk2-build.config index cc9b2115420..9e45361fb44 100644 --- a/roms/edk2-build.config +++ b/roms/edk2-build.config @@ -131,3 +131,16 @@ cpy1 = FV/RISCV_VIRT_CODE.fd edk2-riscv-code.fd cpy2 = FV/RISCV_VIRT_VARS.fd edk2-riscv-vars.fd pad1 = edk2-riscv-code.fd 32m pad2 = edk2-riscv-vars.fd 32m + +#################################################################################### +# LoongArch64 + +[build.loongarch64.qemu] +conf = OvmfPkg/LoongArchVirt/LoongArchVirtQemu.dsc +arch = LOONGARCH64 +plat = LoongArchVirtQemu +dest = ../pc-bios +cpy1 = FV/QEMU_EFI.fd edk2-loongarch64-code.fd +pad1 = edk2-loongarch64-code.fd 16m +cpy2 = FV/QEMU_VARS.fd edk2-loongarch64-vars.fd +pad2 = edk2-loongarch64-vars.fd 16m diff --git a/roms/edk2-version b/roms/edk2-version index 1594ed8c4de..069f19f8bf4 100644 --- a/roms/edk2-version +++ b/roms/edk2-version @@ -1,2 +1,2 @@ -EDK2_STABLE = edk2-stable202402 -EDK2_DATE = 02/14/2024 +EDK2_STABLE = edk2-stable202408 +EDK2_DATE = 08/13/2024 diff --git a/roms/openbios b/roms/openbios index af97fd7af5e..c3a19c1e549 160000 --- a/roms/openbios +++ b/roms/openbios @@ -1 +1 @@ -Subproject commit af97fd7af5e7c18f591a7b987291d3db4ffb28b5 +Subproject commit c3a19c1e54977a53027d6232050e1e3e39a98a1b diff --git a/roms/opensbi b/roms/opensbi index 455de672dd7..43cace6c367 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 455de672dd7c2aa1992df54dfb08dc11abbc1b1a +Subproject commit 43cace6c3671e5172d0df0a8963e552bb04b7b20 diff --git a/roms/seabios b/roms/seabios index a6ed6b701f0..b52ca86e094 160000 --- a/roms/seabios +++ b/roms/seabios @@ -1 +1 @@ -Subproject commit a6ed6b701f0a57db0569ab98b0661c12a6ec3ff8 +Subproject commit b52ca86e094d19b58e2304417787e96b940e39c6 diff --git a/roms/seabios-hppa b/roms/seabios-hppa index 03774edaad3..3391c580960 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit 03774edaad3bfae090ac96ca5450353c641637d1 +Subproject commit 3391c580960febcb9fa8f686f9666adaa462c349 diff --git a/roms/skiboot b/roms/skiboot index 24a7eb35966..785a5e3070a 160000 --- a/roms/skiboot +++ b/roms/skiboot @@ -1 +1 @@ -Subproject commit 24a7eb35966d93455520bc2debdd7954314b638b +Subproject commit 785a5e3070a86e18521e62fe202b87209de30fa2 diff --git a/roms/vbootrom b/roms/vbootrom index 0c37a43527f..183c9ff8056 160000 --- a/roms/vbootrom +++ b/roms/vbootrom @@ -1 +1 @@ -Subproject commit 0c37a43527f0ee2b9584e7fb2fdc805e902635ac +Subproject commit 183c9ff8056b7946db1ae49cc23e8980ac413174 diff --git a/rust/.gitignore b/rust/.gitignore new file mode 100644 index 00000000000..1bf71b1f68e --- /dev/null +++ b/rust/.gitignore @@ -0,0 +1,3 @@ +# Ignore any cargo development build artifacts; for qemu-wide builds, all build +# artifacts will go to the meson build directory. +target diff --git a/rust/Cargo.lock b/rust/Cargo.lock new file mode 100644 index 00000000000..b785c718f31 --- /dev/null +++ b/rust/Cargo.lock @@ -0,0 +1,177 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arbitrary-int" +version = "1.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c84fc003e338a6f69fbd4f7fe9f92b535ff13e9af8997f3b14b6ddff8b1df46d" + +[[package]] +name = "bilge" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc707ed8ebf81de5cd6c7f48f54b4c8621760926cdf35a57000747c512e67b57" +dependencies = [ + "arbitrary-int", + "bilge-impl", +] + +[[package]] +name = "bilge-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb11e002038ad243af39c2068c8a72bcf147acf05025dcdb916fcc000adb2d8" +dependencies = [ + "itertools", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "bits" +version = "0.1.0" +dependencies = [ + "qemu_api_macros", +] + +[[package]] +name = "either" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" + +[[package]] +name = "foreign" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846" +dependencies = [ + "libc", +] + +[[package]] +name = "hpet" +version = "0.1.0" +dependencies = [ + "qemu_api", + "qemu_api_macros", +] + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "libc" +version = "0.2.162" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" + +[[package]] +name = "pl011" +version = "0.1.0" +dependencies = [ + "bilge", + "bilge-impl", + "bits", + "qemu_api", + "qemu_api_macros", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.84" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "qemu_api" +version = "0.1.0" +dependencies = [ + "anyhow", + "foreign", + "libc", + "qemu_api_macros", +] + +[[package]] +name = "qemu_api_macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "quote" +version = "1.0.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "syn" +version = "2.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" diff --git a/rust/Cargo.toml b/rust/Cargo.toml new file mode 100644 index 00000000000..0868e1b4268 --- /dev/null +++ b/rust/Cargo.toml @@ -0,0 +1,101 @@ +[workspace] +resolver = "2" +members = [ + "bits", + "qemu-api-macros", + "qemu-api", + "hw/char/pl011", + "hw/timer/hpet", +] + +[workspace.package] +edition = "2021" +homepage = "https://www.qemu.org" +license = "GPL-2.0-or-later" +repository = "https://gitlab.com/qemu-project/qemu/" +rust-version = "1.77.0" + +[workspace.lints.rust] +unexpected_cfgs = { level = "deny", check-cfg = [ + 'cfg(MESON)', 'cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)', +] } + +# Occasionally, we may need to silence warnings and clippy lints that +# were only introduced in newer Rust compiler versions. Do not croak +# in that case; a CI job with rust_strict_lints == true disables this +# and ensures that we do not have misspelled allow() attributes. +unknown_lints = "allow" + +# Prohibit code that is forbidden in Rust 2024 +unsafe_op_in_unsafe_fn = "deny" + +[workspace.lints.rustdoc] +private_intra_doc_links = "allow" + +broken_intra_doc_links = "deny" +invalid_html_tags = "deny" +invalid_rust_codeblocks = "deny" +bare_urls = "deny" +unescaped_backticks = "deny" +redundant_explicit_links = "deny" + +[workspace.lints.clippy] +# default-warn lints +result_unit_err = "allow" +should_implement_trait = "deny" +# can be for a reason, e.g. in callbacks +unused_self = "allow" +# common in device crates +upper_case_acronyms = "allow" + +# default-allow lints +as_ptr_cast_mut = "deny" +as_underscore = "deny" +assertions_on_result_states = "deny" +bool_to_int_with_if = "deny" +borrow_as_ptr = "deny" +cast_lossless = "deny" +dbg_macro = "deny" +debug_assert_with_mut_call = "deny" +derive_partial_eq_without_eq = "deny" +doc_markdown = "deny" +empty_structs_with_brackets = "deny" +ignored_unit_patterns = "deny" +implicit_clone = "deny" +macro_use_imports = "deny" +missing_safety_doc = "deny" +mut_mut = "deny" +needless_bitwise_bool = "deny" +needless_pass_by_ref_mut = "deny" +needless_update = "deny" +no_effect_underscore_binding = "deny" +option_option = "deny" +or_fun_call = "deny" +ptr_as_ptr = "deny" +ptr_cast_constness = "deny" +pub_underscore_fields = "deny" +redundant_clone = "deny" +redundant_closure_for_method_calls = "deny" +redundant_else = "deny" +redundant_pub_crate = "deny" +ref_binding_to_reference = "deny" +ref_option_ref = "deny" +return_self_not_must_use = "deny" +same_name_method = "deny" +semicolon_inside_block = "deny" +shadow_unrelated = "deny" +significant_drop_in_scrutinee = "deny" +significant_drop_tightening = "deny" +suspicious_operation_groupings = "deny" +transmute_ptr_to_ptr = "deny" +transmute_undefined_repr = "deny" +type_repetition_in_bounds = "deny" +uninlined_format_args = "deny" +used_underscore_binding = "deny" + +# nice to have, but cannot be enabled yet +#wildcard_imports = "deny" # still have many bindings::* imports + +# these may have false positives +#option_if_let_else = "deny" +cognitive_complexity = "deny" diff --git a/rust/Kconfig b/rust/Kconfig new file mode 100644 index 00000000000..f9f5c390988 --- /dev/null +++ b/rust/Kconfig @@ -0,0 +1 @@ +source hw/Kconfig diff --git a/rust/bits/Cargo.toml b/rust/bits/Cargo.toml new file mode 100644 index 00000000000..1ff38a41175 --- /dev/null +++ b/rust/bits/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "bits" +version = "0.1.0" +authors = ["Paolo Bonzini "] +description = "const-friendly bit flags" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +qemu_api_macros = { path = "../qemu-api-macros" } + +[lints] +workspace = true diff --git a/rust/bits/meson.build b/rust/bits/meson.build new file mode 100644 index 00000000000..2a41e138c54 --- /dev/null +++ b/rust/bits/meson.build @@ -0,0 +1,16 @@ +_bits_rs = static_library( + 'bits', + 'src/lib.rs', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + dependencies: [qemu_api_macros], +) + +bits_rs = declare_dependency(link_with: _bits_rs) + +rust.test('rust-bits-tests', _bits_rs, + suite: ['unit', 'rust']) + +rust.doctest('rust-bits-doctests', _bits_rs, + dependencies: bits_rs, + suite: ['doc', 'rust']) diff --git a/rust/bits/src/lib.rs b/rust/bits/src/lib.rs new file mode 100644 index 00000000000..d485d6bd110 --- /dev/null +++ b/rust/bits/src/lib.rs @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: MIT or Apache-2.0 or GPL-2.0-or-later + +/// # Definition entry point +/// +/// Define a struct with a single field of type $type. Include public constants +/// for each element listed in braces. +/// +/// The unnamed element at the end, if present, can be used to enlarge the set +/// of valid bits. Bits that are valid but not listed are treated normally for +/// the purpose of arithmetic operations, and are printed with their hexadecimal +/// value. +/// +/// The struct implements the following traits: [`BitAnd`](std::ops::BitAnd), +/// [`BitOr`](std::ops::BitOr), [`BitXor`](std::ops::BitXor), +/// [`Not`](std::ops::Not), [`Sub`](std::ops::Sub); [`Debug`](std::fmt::Debug), +/// [`Display`](std::fmt::Display), [`Binary`](std::fmt::Binary), +/// [`Octal`](std::fmt::Octal), [`LowerHex`](std::fmt::LowerHex), +/// [`UpperHex`](std::fmt::UpperHex); [`From`]``/[`Into`]`` where +/// type is the type specified in the definition. +/// +/// ## Example +/// +/// ``` +/// # use bits::bits; +/// bits! { +/// pub struct Colors(u8) { +/// BLACK = 0, +/// RED = 1, +/// GREEN = 1 << 1, +/// BLUE = 1 << 2, +/// WHITE = (1 << 0) | (1 << 1) | (1 << 2), +/// } +/// } +/// ``` +/// +/// ``` +/// # use bits::bits; +/// # bits! { pub struct Colors(u8) { BLACK = 0, RED = 1, GREEN = 1 << 1, BLUE = 1 << 2, } } +/// +/// bits! { +/// pub struct Colors8(u8) { +/// BLACK = 0, +/// RED = 1, +/// GREEN = 1 << 1, +/// BLUE = 1 << 2, +/// WHITE = (1 << 0) | (1 << 1) | (1 << 2), +/// +/// _ = 255, +/// } +/// } +/// +/// // The previously defined struct ignores bits not explicitly defined. +/// assert_eq!( +/// Colors::from(255).into_bits(), +/// (Colors::RED | Colors::GREEN | Colors::BLUE).into_bits() +/// ); +/// +/// // Adding "_ = 255" makes it retain other bits as well. +/// assert_eq!(Colors8::from(255).into_bits(), 255); +/// +/// // all() does not include the additional bits, valid_bits() does +/// assert_eq!(Colors8::all().into_bits(), Colors::all().into_bits()); +/// assert_eq!(Colors8::valid_bits().into_bits(), 255); +/// ``` +/// +/// # Evaluation entry point +/// +/// Return a constant corresponding to the boolean expression `$expr`. +/// Identifiers in the expression correspond to values defined for the +/// type `$type`. Supported operators are `!` (unary), `-`, `&`, `^`, `|`. +/// +/// ## Examples +/// +/// ``` +/// # use bits::bits; +/// bits! { +/// pub struct Colors(u8) { +/// BLACK = 0, +/// RED = 1, +/// GREEN = 1 << 1, +/// BLUE = 1 << 2, +/// // same as "WHITE = 7", +/// WHITE = bits!(Self as u8: RED | GREEN | BLUE), +/// } +/// } +/// +/// let rgb = bits! { Colors: RED | GREEN | BLUE }; +/// assert_eq!(rgb, Colors::WHITE); +/// ``` +#[macro_export] +macro_rules! bits { + { + $(#[$struct_meta:meta])* + $struct_vis:vis struct $struct_name:ident($field_vis:vis $type:ty) { + $($(#[$const_meta:meta])* $const:ident = $val:expr),+ + $(,_ = $mask:expr)? + $(,)? + } + } => { + $(#[$struct_meta])* + #[derive(Clone, Copy, PartialEq, Eq)] + #[repr(transparent)] + $struct_vis struct $struct_name($field_vis $type); + + impl $struct_name { + $( #[allow(dead_code)] $(#[$const_meta])* + pub const $const: $struct_name = $struct_name($val); )+ + + #[doc(hidden)] + const VALID__: $type = $( Self::$const.0 )|+ $(|$mask)?; + + #[allow(dead_code)] + #[inline(always)] + pub const fn empty() -> Self { + Self(0) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn all() -> Self { + Self($( Self::$const.0 )|+) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn valid_bits() -> Self { + Self(Self::VALID__) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn valid(val: $type) -> bool { + (val & !Self::VALID__) == 0 + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn any_set(self, mask: Self) -> bool { + (self.0 & mask.0) != 0 + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn all_set(self, mask: Self) -> bool { + (self.0 & mask.0) == mask.0 + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn none_set(self, mask: Self) -> bool { + (self.0 & mask.0) == 0 + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn from_bits(value: $type) -> Self { + $struct_name(value) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn into_bits(self) -> $type { + self.0 + } + + #[allow(dead_code)] + #[inline(always)] + pub fn set(&mut self, rhs: Self) { + self.0 |= rhs.0; + } + + #[allow(dead_code)] + #[inline(always)] + pub fn clear(&mut self, rhs: Self) { + self.0 &= !rhs.0; + } + + #[allow(dead_code)] + #[inline(always)] + pub fn toggle(&mut self, rhs: Self) { + self.0 ^= rhs.0; + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn intersection(self, rhs: Self) -> Self { + $struct_name(self.0 & rhs.0) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn difference(self, rhs: Self) -> Self { + $struct_name(self.0 & !rhs.0) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn symmetric_difference(self, rhs: Self) -> Self { + $struct_name(self.0 ^ rhs.0) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn union(self, rhs: Self) -> Self { + $struct_name(self.0 | rhs.0) + } + + #[allow(dead_code)] + #[inline(always)] + pub const fn invert(self) -> Self { + $struct_name(self.0 ^ Self::VALID__) + } + } + + impl ::std::fmt::Binary for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + // If no width, use the highest valid bit + let width = f.width().unwrap_or((Self::VALID__.ilog2() + 1) as usize); + write!(f, "{:0>width$.precision$b}", self.0, + width = width, + precision = f.precision().unwrap_or(width)) + } + } + + impl ::std::fmt::LowerHex for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + <$type as ::std::fmt::LowerHex>::fmt(&self.0, f) + } + } + + impl ::std::fmt::Octal for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + <$type as ::std::fmt::Octal>::fmt(&self.0, f) + } + } + + impl ::std::fmt::UpperHex for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + <$type as ::std::fmt::UpperHex>::fmt(&self.0, f) + } + } + + impl ::std::fmt::Debug for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "{}({})", stringify!($struct_name), self) + } + } + + impl ::std::fmt::Display for $struct_name { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + use ::std::fmt::Display; + let mut first = true; + let mut left = self.0; + $(if Self::$const.0.is_power_of_two() && (self & Self::$const).0 != 0 { + if first { first = false } else { Display::fmt(&'|', f)?; } + Display::fmt(stringify!($const), f)?; + left -= Self::$const.0; + })+ + if first { + Display::fmt(&'0', f) + } else if left != 0 { + write!(f, "|{left:#x}") + } else { + Ok(()) + } + } + } + + impl ::std::cmp::PartialEq<$type> for $struct_name { + fn eq(&self, rhs: &$type) -> bool { + self.0 == *rhs + } + } + + impl ::std::ops::BitAnd<$struct_name> for &$struct_name { + type Output = $struct_name; + fn bitand(self, rhs: $struct_name) -> Self::Output { + $struct_name(self.0 & rhs.0) + } + } + + impl ::std::ops::BitAndAssign<$struct_name> for $struct_name { + fn bitand_assign(&mut self, rhs: $struct_name) { + self.0 = self.0 & rhs.0 + } + } + + impl ::std::ops::BitXor<$struct_name> for &$struct_name { + type Output = $struct_name; + fn bitxor(self, rhs: $struct_name) -> Self::Output { + $struct_name(self.0 ^ rhs.0) + } + } + + impl ::std::ops::BitXorAssign<$struct_name> for $struct_name { + fn bitxor_assign(&mut self, rhs: $struct_name) { + self.0 = self.0 ^ rhs.0 + } + } + + impl ::std::ops::BitOr<$struct_name> for &$struct_name { + type Output = $struct_name; + fn bitor(self, rhs: $struct_name) -> Self::Output { + $struct_name(self.0 | rhs.0) + } + } + + impl ::std::ops::BitOrAssign<$struct_name> for $struct_name { + fn bitor_assign(&mut self, rhs: $struct_name) { + self.0 = self.0 | rhs.0 + } + } + + impl ::std::ops::Sub<$struct_name> for &$struct_name { + type Output = $struct_name; + fn sub(self, rhs: $struct_name) -> Self::Output { + $struct_name(self.0 & !rhs.0) + } + } + + impl ::std::ops::SubAssign<$struct_name> for $struct_name { + fn sub_assign(&mut self, rhs: $struct_name) { + self.0 = self.0 - rhs.0 + } + } + + impl ::std::ops::Not for &$struct_name { + type Output = $struct_name; + fn not(self) -> Self::Output { + $struct_name(self.0 ^ $struct_name::VALID__) + } + } + + impl ::std::ops::BitAnd<$struct_name> for $struct_name { + type Output = Self; + fn bitand(self, rhs: Self) -> Self::Output { + $struct_name(self.0 & rhs.0) + } + } + + impl ::std::ops::BitXor<$struct_name> for $struct_name { + type Output = Self; + fn bitxor(self, rhs: Self) -> Self::Output { + $struct_name(self.0 ^ rhs.0) + } + } + + impl ::std::ops::BitOr<$struct_name> for $struct_name { + type Output = Self; + fn bitor(self, rhs: Self) -> Self::Output { + $struct_name(self.0 | rhs.0) + } + } + + impl ::std::ops::Sub<$struct_name> for $struct_name { + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { + $struct_name(self.0 & !rhs.0) + } + } + + impl ::std::ops::Not for $struct_name { + type Output = Self; + fn not(self) -> Self::Output { + $struct_name(self.0 ^ Self::VALID__) + } + } + + impl From<$struct_name> for $type { + fn from(x: $struct_name) -> $type { + x.0 + } + } + + impl From<$type> for $struct_name { + fn from(x: $type) -> Self { + $struct_name(x & Self::VALID__) + } + } + }; + + { $type:ty: $expr:expr } => { + ::qemu_api_macros::bits_const_internal! { $type @ ($expr) } + }; + + { $type:ty as $int_type:ty: $expr:expr } => { + (::qemu_api_macros::bits_const_internal! { $type @ ($expr) }.into_bits()) as $int_type + }; +} + +#[cfg(test)] +mod test { + bits! { + pub struct InterruptMask(u32) { + OE = 1 << 10, + BE = 1 << 9, + PE = 1 << 8, + FE = 1 << 7, + RT = 1 << 6, + TX = 1 << 5, + RX = 1 << 4, + DSR = 1 << 3, + DCD = 1 << 2, + CTS = 1 << 1, + RI = 1 << 0, + + E = bits!(Self as u32: OE | BE | PE | FE), + MS = bits!(Self as u32: RI | DSR | DCD | CTS), + } + } + + #[test] + pub fn test_not() { + assert_eq!( + !InterruptMask::from(InterruptMask::RT.0), + InterruptMask::E | InterruptMask::MS | InterruptMask::TX | InterruptMask::RX + ); + } + + #[test] + pub fn test_and() { + assert_eq!( + InterruptMask::from(0), + InterruptMask::MS & InterruptMask::OE + ) + } + + #[test] + pub fn test_or() { + assert_eq!( + InterruptMask::E, + InterruptMask::OE | InterruptMask::BE | InterruptMask::PE | InterruptMask::FE + ); + } + + #[test] + pub fn test_xor() { + assert_eq!( + InterruptMask::E ^ InterruptMask::BE, + InterruptMask::OE | InterruptMask::PE | InterruptMask::FE + ); + } +} diff --git a/rust/hw/Kconfig b/rust/hw/Kconfig new file mode 100644 index 00000000000..36f92ec0287 --- /dev/null +++ b/rust/hw/Kconfig @@ -0,0 +1,3 @@ +# devices Kconfig +source char/Kconfig +source timer/Kconfig diff --git a/rust/hw/char/Kconfig b/rust/hw/char/Kconfig new file mode 100644 index 00000000000..5fe800c4806 --- /dev/null +++ b/rust/hw/char/Kconfig @@ -0,0 +1,2 @@ +config X_PL011_RUST + bool diff --git a/rust/hw/char/meson.build b/rust/hw/char/meson.build new file mode 100644 index 00000000000..5716dc43ef6 --- /dev/null +++ b/rust/hw/char/meson.build @@ -0,0 +1 @@ +subdir('pl011') diff --git a/rust/hw/char/pl011/Cargo.toml b/rust/hw/char/pl011/Cargo.toml new file mode 100644 index 00000000000..88ef110507d --- /dev/null +++ b/rust/hw/char/pl011/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "pl011" +version = "0.1.0" +authors = ["Manos Pitsidianakis "] +description = "pl011 device model for QEMU" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +bilge = { version = "0.2.0" } +bilge-impl = { version = "0.2.0" } +bits = { path = "../../../bits" } +qemu_api = { path = "../../../qemu-api" } +qemu_api_macros = { path = "../../../qemu-api-macros" } + +[lints] +workspace = true diff --git a/rust/hw/char/pl011/meson.build b/rust/hw/char/pl011/meson.build new file mode 100644 index 00000000000..2a1be329abc --- /dev/null +++ b/rust/hw/char/pl011/meson.build @@ -0,0 +1,21 @@ +_libpl011_rs = static_library( + 'pl011', + files('src/lib.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + dependencies: [ + bilge_rs, + bilge_impl_rs, + bits_rs, + qemu_api, + qemu_api_macros, + ], +) + +rust_devices_ss.add(when: 'CONFIG_X_PL011_RUST', if_true: [declare_dependency( + link_whole: [_libpl011_rs], + # Putting proc macro crates in `dependencies` is necessary for Meson to find + # them when compiling the root per-target static rust lib. + dependencies: [bilge_impl_rs, qemu_api_macros], + variables: {'crate': 'pl011'}, +)]) diff --git a/rust/hw/char/pl011/src/device.rs b/rust/hw/char/pl011/src/device.rs new file mode 100644 index 00000000000..ceb71dd99b6 --- /dev/null +++ b/rust/hw/char/pl011/src/device.rs @@ -0,0 +1,803 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_int, c_void, CStr}, + mem::size_of, + ptr::NonNull, +}; + +use qemu_api::{ + bindings::{qdev_prop_bool, qdev_prop_chr}, + chardev::{CharBackend, Chardev, Event}, + impl_vmstate_forward, + irq::{IRQState, InterruptSource}, + log::Log, + log_mask_ln, + memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, + prelude::*, + qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, + qom::{ObjectImpl, Owned, ParentField, ParentInit}, + static_assert, + sysbus::{SysBusDevice, SysBusDeviceImpl}, + uninit_field_mut, + vmstate::VMStateDescription, + vmstate_clock, vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_unused, + zeroable::Zeroable, +}; + +use crate::registers::{self, Interrupt, RegisterOffset}; + +// TODO: You must disable the UART before any of the control registers are +// reprogrammed. When the UART is disabled in the middle of transmission or +// reception, it completes the current character before stopping + +/// Integer Baud Rate Divider, `UARTIBRD` +const IBRD_MASK: u32 = 0xffff; + +/// Fractional Baud Rate Divider, `UARTFBRD` +const FBRD_MASK: u32 = 0x3f; + +/// QEMU sourced constant. +pub const PL011_FIFO_DEPTH: u32 = 16; + +#[derive(Clone, Copy)] +struct DeviceId(&'static [u8; 8]); + +impl std::ops::Index for DeviceId { + type Output = u8; + + fn index(&self, idx: hwaddr) -> &Self::Output { + &self.0[idx as usize] + } +} + +// FIFOs use 32-bit indices instead of usize, for compatibility with +// the migration stream produced by the C version of this device. +#[repr(transparent)] +#[derive(Debug, Default)] +pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); +impl_vmstate_forward!(Fifo); + +impl Fifo { + const fn len(&self) -> u32 { + self.0.len() as u32 + } +} + +impl std::ops::IndexMut for Fifo { + fn index_mut(&mut self, idx: u32) -> &mut Self::Output { + &mut self.0[idx as usize] + } +} + +impl std::ops::Index for Fifo { + type Output = registers::Data; + + fn index(&self, idx: u32) -> &Self::Output { + &self.0[idx as usize] + } +} + +#[repr(C)] +#[derive(Debug, Default)] +pub struct PL011Registers { + #[doc(alias = "fr")] + pub flags: registers::Flags, + #[doc(alias = "lcr")] + pub line_control: registers::LineControl, + #[doc(alias = "rsr")] + pub receive_status_error_clear: registers::ReceiveStatusErrorClear, + #[doc(alias = "cr")] + pub control: registers::Control, + pub dmacr: u32, + pub int_enabled: Interrupt, + pub int_level: Interrupt, + pub read_fifo: Fifo, + pub ilpr: u32, + pub ibrd: u32, + pub fbrd: u32, + pub ifl: u32, + pub read_pos: u32, + pub read_count: u32, + pub read_trigger: u32, +} + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +/// PL011 Device Model in QEMU +pub struct PL011State { + pub parent_obj: ParentField, + pub iomem: MemoryRegion, + #[doc(alias = "chr")] + pub char_backend: CharBackend, + pub regs: BqlRefCell, + /// QEMU interrupts + /// + /// ```text + /// * sysbus MMIO region 0: device registers + /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) + /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) + /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) + /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) + /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) + /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) + /// ``` + #[doc(alias = "irq")] + pub interrupts: [InterruptSource; IRQMASK.len()], + #[doc(alias = "clk")] + pub clock: Owned, + #[doc(alias = "migrate_clk")] + pub migrate_clock: bool, +} + +// Some C users of this device embed its state struct into their own +// structs, so the size of the Rust version must not be any larger +// than the size of the C one. If this assert triggers you need to +// expand the padding_for_rust[] array in the C PL011State struct. +static_assert!(size_of::() <= size_of::()); + +qom_isa!(PL011State : SysBusDevice, DeviceState, Object); + +#[repr(C)] +pub struct PL011Class { + parent_class: ::Class, + /// The byte string that identifies the device. + device_id: DeviceId, +} + +trait PL011Impl: SysBusDeviceImpl + IsA { + const DEVICE_ID: DeviceId; +} + +impl PL011Class { + fn class_init(&mut self) { + self.device_id = T::DEVICE_ID; + self.parent_class.class_init::(); + } +} + +unsafe impl ObjectType for PL011State { + type Class = PL011Class; + const TYPE_NAME: &'static CStr = crate::TYPE_PL011; +} + +impl PL011Impl for PL011State { + const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); +} + +impl ObjectImpl for PL011State { + type ParentType = SysBusDevice; + + const INSTANCE_INIT: Option)> = Some(Self::init); + const INSTANCE_POST_INIT: Option = Some(Self::post_init); + const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::; +} + +impl DeviceImpl for PL011State { + fn properties() -> &'static [Property] { + &PL011_PROPERTIES + } + fn vmsd() -> Option<&'static VMStateDescription> { + Some(&VMSTATE_PL011) + } + const REALIZE: Option qemu_api::Result<()>> = Some(Self::realize); +} + +impl ResettablePhasesImpl for PL011State { + const HOLD: Option = Some(Self::reset_hold); +} + +impl SysBusDeviceImpl for PL011State {} + +impl PL011Registers { + pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { + use RegisterOffset::*; + + let mut update = false; + let result = match offset { + DR => self.read_data_register(&mut update), + RSR => u32::from(self.receive_status_error_clear), + FR => u32::from(self.flags), + FBRD => self.fbrd, + ILPR => self.ilpr, + IBRD => self.ibrd, + LCR_H => u32::from(self.line_control), + CR => u32::from(self.control), + FLS => self.ifl, + IMSC => u32::from(self.int_enabled), + RIS => u32::from(self.int_level), + MIS => u32::from(self.int_level & self.int_enabled), + ICR => { + // "The UARTICR Register is the interrupt clear register and is write-only" + // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR + 0 + } + DMACR => self.dmacr, + }; + (update, result) + } + + pub(self) fn write( + &mut self, + offset: RegisterOffset, + value: u32, + char_backend: &CharBackend, + ) -> bool { + // eprintln!("write offset {offset} value {value}"); + use RegisterOffset::*; + match offset { + DR => return self.write_data_register(value), + RSR => { + self.receive_status_error_clear = 0.into(); + } + FR => { + // flag writes are ignored + } + ILPR => { + self.ilpr = value; + } + IBRD => { + self.ibrd = value; + } + FBRD => { + self.fbrd = value; + } + LCR_H => { + let new_val: registers::LineControl = value.into(); + // Reset the FIFO state on FIFO enable or disable + if self.line_control.fifos_enabled() != new_val.fifos_enabled() { + self.reset_rx_fifo(); + self.reset_tx_fifo(); + } + let update = (self.line_control.send_break() != new_val.send_break()) && { + let break_enable = new_val.send_break(); + let _ = char_backend.send_break(break_enable); + self.loopback_break(break_enable) + }; + self.line_control = new_val; + self.set_read_trigger(); + return update; + } + CR => { + // ??? Need to implement the enable bit. + self.control = value.into(); + return self.loopback_mdmctrl(); + } + FLS => { + self.ifl = value; + self.set_read_trigger(); + } + IMSC => { + self.int_enabled = Interrupt::from(value); + return true; + } + RIS => {} + MIS => {} + ICR => { + self.int_level &= !Interrupt::from(value); + return true; + } + DMACR => { + self.dmacr = value; + if value & 3 > 0 { + log_mask_ln!(Log::Unimp, "pl011: DMA not implemented"); + } + } + } + false + } + + fn read_data_register(&mut self, update: &mut bool) -> u32 { + self.flags.set_receive_fifo_full(false); + let c = self.read_fifo[self.read_pos]; + + if self.read_count > 0 { + self.read_count -= 1; + self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); + } + if self.read_count == 0 { + self.flags.set_receive_fifo_empty(true); + } + if self.read_count + 1 == self.read_trigger { + self.int_level &= !Interrupt::RX; + } + self.receive_status_error_clear.set_from_data(c); + *update = true; + u32::from(c) + } + + fn write_data_register(&mut self, value: u32) -> bool { + if !self.control.enable_uart() { + log_mask_ln!(Log::GuestError, "PL011 data written to disabled UART"); + } + if !self.control.enable_transmit() { + log_mask_ln!(Log::GuestError, "PL011 data written to disabled TX UART"); + } + // interrupts always checked + let _ = self.loopback_tx(value.into()); + self.int_level |= Interrupt::TX; + true + } + + #[inline] + #[must_use] + fn loopback_tx(&mut self, value: registers::Data) -> bool { + // Caveat: + // + // In real hardware, TX loopback happens at the serial-bit level + // and then reassembled by the RX logics back into bytes and placed + // into the RX fifo. That is, loopback happens after TX fifo. + // + // Because the real hardware TX fifo is time-drained at the frame + // rate governed by the configured serial format, some loopback + // bytes in TX fifo may still be able to get into the RX fifo + // that could be full at times while being drained at software + // pace. + // + // In such scenario, the RX draining pace is the major factor + // deciding which loopback bytes get into the RX fifo, unless + // hardware flow-control is enabled. + // + // For simplicity, the above described is not emulated. + self.loopback_enabled() && self.fifo_rx_put(value) + } + + #[must_use] + fn loopback_mdmctrl(&mut self) -> bool { + if !self.loopback_enabled() { + return false; + } + + /* + * Loopback software-driven modem control outputs to modem status inputs: + * FR.RI <= CR.Out2 + * FR.DCD <= CR.Out1 + * FR.CTS <= CR.RTS + * FR.DSR <= CR.DTR + * + * The loopback happens immediately even if this call is triggered + * by setting only CR.LBE. + * + * CTS/RTS updates due to enabled hardware flow controls are not + * dealt with here. + */ + + self.flags.set_ring_indicator(self.control.out_2()); + self.flags.set_data_carrier_detect(self.control.out_1()); + self.flags.set_clear_to_send(self.control.request_to_send()); + self.flags + .set_data_set_ready(self.control.data_transmit_ready()); + + // Change interrupts based on updated FR + let mut il = self.int_level; + + il &= !Interrupt::MS; + + if self.flags.data_set_ready() { + il |= Interrupt::DSR; + } + if self.flags.data_carrier_detect() { + il |= Interrupt::DCD; + } + if self.flags.clear_to_send() { + il |= Interrupt::CTS; + } + if self.flags.ring_indicator() { + il |= Interrupt::RI; + } + self.int_level = il; + true + } + + fn loopback_break(&mut self, enable: bool) -> bool { + enable && self.loopback_tx(registers::Data::BREAK) + } + + fn set_read_trigger(&mut self) { + self.read_trigger = 1; + } + + pub fn reset(&mut self) { + self.line_control.reset(); + self.receive_status_error_clear.reset(); + self.dmacr = 0; + self.int_enabled = 0.into(); + self.int_level = 0.into(); + self.ilpr = 0; + self.ibrd = 0; + self.fbrd = 0; + self.read_trigger = 1; + self.ifl = 0x12; + self.control.reset(); + self.flags.reset(); + self.reset_rx_fifo(); + self.reset_tx_fifo(); + } + + pub fn reset_rx_fifo(&mut self) { + self.read_count = 0; + self.read_pos = 0; + + // Reset FIFO flags + self.flags.set_receive_fifo_full(false); + self.flags.set_receive_fifo_empty(true); + } + + pub fn reset_tx_fifo(&mut self) { + // Reset FIFO flags + self.flags.set_transmit_fifo_full(false); + self.flags.set_transmit_fifo_empty(true); + } + + #[inline] + pub fn fifo_enabled(&self) -> bool { + self.line_control.fifos_enabled() == registers::Mode::FIFO + } + + #[inline] + pub fn loopback_enabled(&self) -> bool { + self.control.enable_loopback() + } + + #[inline] + pub fn fifo_depth(&self) -> u32 { + // Note: FIFO depth is expected to be power-of-2 + if self.fifo_enabled() { + return PL011_FIFO_DEPTH; + } + 1 + } + + #[must_use] + pub fn fifo_rx_put(&mut self, value: registers::Data) -> bool { + let depth = self.fifo_depth(); + assert!(depth > 0); + let slot = (self.read_pos + self.read_count) & (depth - 1); + self.read_fifo[slot] = value; + self.read_count += 1; + self.flags.set_receive_fifo_empty(false); + if self.read_count == depth { + self.flags.set_receive_fifo_full(true); + } + + if self.read_count == self.read_trigger { + self.int_level |= Interrupt::RX; + return true; + } + false + } + + pub fn post_load(&mut self) -> Result<(), ()> { + /* Sanity-check input state */ + if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { + return Err(()); + } + + if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { + // Older versions of PL011 didn't ensure that the single + // character in the FIFO in FIFO-disabled mode is in + // element 0 of the array; convert to follow the current + // code's assumptions. + self.read_fifo[0] = self.read_fifo[self.read_pos]; + self.read_pos = 0; + } + + self.ibrd &= IBRD_MASK; + self.fbrd &= FBRD_MASK; + + Ok(()) + } +} + +impl PL011State { + /// Initializes a pre-allocated, uninitialized instance of `PL011State`. + /// + /// # Safety + /// + /// `self` must point to a correctly sized and aligned location for the + /// `PL011State` type. It must not be called more than once on the same + /// location/instance. All its fields are expected to hold uninitialized + /// values with the sole exception of `parent_obj`. + unsafe fn init(mut this: ParentInit) { + static PL011_OPS: MemoryRegionOps = MemoryRegionOpsBuilder::::new() + .read(&PL011State::read) + .write(&PL011State::write) + .native_endian() + .impl_sizes(4, 4) + .build(); + + // SAFETY: this and this.iomem are guaranteed to be valid at this point + MemoryRegion::init_io( + &mut uninit_field_mut!(*this, iomem), + &PL011_OPS, + "pl011", + 0x1000, + ); + + uninit_field_mut!(*this, regs).write(Default::default()); + + let clock = DeviceState::init_clock_in( + &mut this, + "clk", + &Self::clock_update, + ClockEvent::ClockUpdate, + ); + uninit_field_mut!(*this, clock).write(clock); + } + + const fn clock_update(&self, _event: ClockEvent) { + /* pl011_trace_baudrate_change(s); */ + } + + fn post_init(&self) { + self.init_mmio(&self.iomem); + for irq in self.interrupts.iter() { + self.init_irq(irq); + } + } + + fn read(&self, offset: hwaddr, _size: u32) -> u64 { + match RegisterOffset::try_from(offset) { + Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { + let device_id = self.get_class().device_id; + u64::from(device_id[(offset - 0xfe0) >> 2]) + } + Err(_) => { + log_mask_ln!(Log::GuestError, "PL011State::read: Bad offset {offset}"); + 0 + } + Ok(field) => { + let (update_irq, result) = self.regs.borrow_mut().read(field); + if update_irq { + self.update(); + self.char_backend.accept_input(); + } + result.into() + } + } + } + + fn write(&self, offset: hwaddr, value: u64, _size: u32) { + let mut update_irq = false; + if let Ok(field) = RegisterOffset::try_from(offset) { + // qemu_chr_fe_write_all() calls into the can_receive + // callback, so handle writes before entering PL011Registers. + if field == RegisterOffset::DR { + // ??? Check if transmitter is enabled. + let ch: [u8; 1] = [value as u8]; + // XXX this blocks entire thread. Rewrite to use + // qemu_chr_fe_write and background I/O callbacks + let _ = self.char_backend.write_all(&ch); + } + + update_irq = self + .regs + .borrow_mut() + .write(field, value as u32, &self.char_backend); + } else { + log_mask_ln!( + Log::GuestError, + "PL011State::write: Bad offset {offset} value {value}" + ); + } + if update_irq { + self.update(); + } + } + + fn can_receive(&self) -> u32 { + let regs = self.regs.borrow(); + // trace_pl011_can_receive(s->lcr, s->read_count, r); + regs.fifo_depth() - regs.read_count + } + + fn receive(&self, buf: &[u8]) { + let mut regs = self.regs.borrow_mut(); + if regs.loopback_enabled() { + // In loopback mode, the RX input signal is internally disconnected + // from the entire receiving logics; thus, all inputs are ignored, + // and BREAK detection on RX input signal is also not performed. + return; + } + + let mut update_irq = false; + for &c in buf { + let c: u32 = c.into(); + update_irq |= regs.fifo_rx_put(c.into()); + } + + // Release the BqlRefCell before calling self.update() + drop(regs); + if update_irq { + self.update(); + } + } + + fn event(&self, event: Event) { + let mut update_irq = false; + let mut regs = self.regs.borrow_mut(); + if event == Event::CHR_EVENT_BREAK && !regs.loopback_enabled() { + update_irq = regs.fifo_rx_put(registers::Data::BREAK); + } + // Release the BqlRefCell before calling self.update() + drop(regs); + + if update_irq { + self.update() + } + } + + fn realize(&self) -> qemu_api::Result<()> { + self.char_backend + .enable_handlers(self, Self::can_receive, Self::receive, Self::event); + Ok(()) + } + + fn reset_hold(&self, _type: ResetType) { + self.regs.borrow_mut().reset(); + } + + fn update(&self) { + let regs = self.regs.borrow(); + let flags = regs.int_level & regs.int_enabled; + for (irq, i) in self.interrupts.iter().zip(IRQMASK) { + irq.set(flags.any_set(i)); + } + } + + pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { + self.regs.borrow_mut().post_load() + } +} + +/// Which bits in the interrupt status matter for each outbound IRQ line ? +const IRQMASK: [Interrupt; 6] = [ + Interrupt::all(), + Interrupt::RX, + Interrupt::TX, + Interrupt::RT, + Interrupt::MS, + Interrupt::E, +]; + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer for `chr` +/// and `irq`. +#[no_mangle] +pub unsafe extern "C" fn pl011_create( + addr: u64, + irq: *mut IRQState, + chr: *mut Chardev, +) -> *mut DeviceState { + // SAFETY: The callers promise that they have owned references. + // They do not gift them to pl011_create, so use `Owned::from`. + let irq = unsafe { Owned::::from(&*irq) }; + + let dev = PL011State::new(); + if !chr.is_null() { + let chr = unsafe { Owned::::from(&*chr) }; + dev.prop_set_chr("chardev", &chr); + } + dev.sysbus_realize(); + dev.mmio_map(0, addr); + dev.connect_irq(0, &irq); + + // The pointer is kept alive by the QOM tree; drop the owned ref + dev.as_mut_ptr() +} + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +/// PL011 Luminary device model. +pub struct PL011Luminary { + parent_obj: ParentField, +} + +qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); + +unsafe impl ObjectType for PL011Luminary { + type Class = ::Class; + const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; +} + +impl ObjectImpl for PL011Luminary { + type ParentType = PL011State; + + const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::; +} + +impl PL011Impl for PL011Luminary { + const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); +} + +impl DeviceImpl for PL011Luminary {} +impl ResettablePhasesImpl for PL011Luminary {} +impl SysBusDeviceImpl for PL011Luminary {} + +extern "C" fn pl011_clock_needed(opaque: *mut c_void) -> bool { + let state = NonNull::new(opaque).unwrap().cast::(); + unsafe { state.as_ref().migrate_clock } +} + +/// Migration subsection for [`PL011State`] clock. +static VMSTATE_PL011_CLOCK: VMStateDescription = VMStateDescription { + name: c"pl011/clock".as_ptr(), + version_id: 1, + minimum_version_id: 1, + needed: Some(pl011_clock_needed), + fields: vmstate_fields! { + vmstate_clock!(PL011State, clock), + }, + ..Zeroable::ZERO +}; + +extern "C" fn pl011_post_load(opaque: *mut c_void, version_id: c_int) -> c_int { + let state = NonNull::new(opaque).unwrap().cast::(); + let result = unsafe { state.as_ref().post_load(version_id as u32) }; + if result.is_err() { + -1 + } else { + 0 + } +} + +static VMSTATE_PL011_REGS: VMStateDescription = VMStateDescription { + name: c"pl011/regs".as_ptr(), + version_id: 2, + minimum_version_id: 2, + fields: vmstate_fields! { + vmstate_of!(PL011Registers, flags), + vmstate_of!(PL011Registers, line_control), + vmstate_of!(PL011Registers, receive_status_error_clear), + vmstate_of!(PL011Registers, control), + vmstate_of!(PL011Registers, dmacr), + vmstate_of!(PL011Registers, int_enabled), + vmstate_of!(PL011Registers, int_level), + vmstate_of!(PL011Registers, read_fifo), + vmstate_of!(PL011Registers, ilpr), + vmstate_of!(PL011Registers, ibrd), + vmstate_of!(PL011Registers, fbrd), + vmstate_of!(PL011Registers, ifl), + vmstate_of!(PL011Registers, read_pos), + vmstate_of!(PL011Registers, read_count), + vmstate_of!(PL011Registers, read_trigger), + }, + ..Zeroable::ZERO +}; + +pub static VMSTATE_PL011: VMStateDescription = VMStateDescription { + name: c"pl011".as_ptr(), + version_id: 2, + minimum_version_id: 2, + post_load: Some(pl011_post_load), + fields: vmstate_fields! { + vmstate_unused!(core::mem::size_of::()), + vmstate_struct!(PL011State, regs, &VMSTATE_PL011_REGS, BqlRefCell), + }, + subsections: vmstate_subsections! { + VMSTATE_PL011_CLOCK + }, + ..Zeroable::ZERO +}; + +qemu_api::declare_properties! { + PL011_PROPERTIES, + qemu_api::define_property!( + c"chardev", + PL011State, + char_backend, + unsafe { &qdev_prop_chr }, + CharBackend + ), + qemu_api::define_property!( + c"migrate-clk", + PL011State, + migrate_clock, + unsafe { &qdev_prop_bool }, + bool, + default = true + ), +} diff --git a/rust/hw/char/pl011/src/lib.rs b/rust/hw/char/pl011/src/lib.rs new file mode 100644 index 00000000000..2b70d2ff560 --- /dev/null +++ b/rust/hw/char/pl011/src/lib.rs @@ -0,0 +1,21 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! PL011 QEMU Device Model +//! +//! This library implements a device model for the PrimeCell® UART (PL011) +//! device in QEMU. +//! +//! # Library crate +//! +//! See [`PL011State`](crate::device::PL011State) for the device model type and +//! the [`registers`] module for register types. + +mod device; +mod registers; + +pub use device::pl011_create; + +pub const TYPE_PL011: &::std::ffi::CStr = c"pl011"; +pub const TYPE_PL011_LUMINARY: &::std::ffi::CStr = c"pl011_luminary"; diff --git a/rust/hw/char/pl011/src/registers.rs b/rust/hw/char/pl011/src/registers.rs new file mode 100644 index 00000000000..7ececd39f86 --- /dev/null +++ b/rust/hw/char/pl011/src/registers.rs @@ -0,0 +1,350 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Device registers exposed as typed structs which are backed by arbitrary +//! integer bitmaps. [`Data`], [`Control`], [`LineControl`], etc. + +// For more detail see the PL011 Technical Reference Manual DDI0183: +// https://developer.arm.com/documentation/ddi0183/latest/ + +use bilge::prelude::*; +use bits::bits; +use qemu_api::{impl_vmstate_bitsized, impl_vmstate_forward}; + +/// Offset of each register from the base memory address of the device. +#[doc(alias = "offset")] +#[allow(non_camel_case_types)] +#[repr(u64)] +#[derive(Debug, Eq, PartialEq, qemu_api_macros::TryInto)] +pub enum RegisterOffset { + /// Data Register + /// + /// A write to this register initiates the actual data transmission + #[doc(alias = "UARTDR")] + DR = 0x000, + /// Receive Status Register or Error Clear Register + #[doc(alias = "UARTRSR")] + #[doc(alias = "UARTECR")] + RSR = 0x004, + /// Flag Register + /// + /// A read of this register shows if transmission is complete + #[doc(alias = "UARTFR")] + FR = 0x018, + /// Fractional Baud Rate Register + /// + /// responsible for baud rate speed + #[doc(alias = "UARTFBRD")] + FBRD = 0x028, + /// `IrDA` Low-Power Counter Register + #[doc(alias = "UARTILPR")] + ILPR = 0x020, + /// Integer Baud Rate Register + /// + /// Responsible for baud rate speed + #[doc(alias = "UARTIBRD")] + IBRD = 0x024, + /// line control register (data frame format) + #[doc(alias = "UARTLCR_H")] + LCR_H = 0x02C, + /// Toggle UART, transmission or reception + #[doc(alias = "UARTCR")] + CR = 0x030, + /// Interrupt FIFO Level Select Register + #[doc(alias = "UARTIFLS")] + FLS = 0x034, + /// Interrupt Mask Set/Clear Register + #[doc(alias = "UARTIMSC")] + IMSC = 0x038, + /// Raw Interrupt Status Register + #[doc(alias = "UARTRIS")] + RIS = 0x03C, + /// Masked Interrupt Status Register + #[doc(alias = "UARTMIS")] + MIS = 0x040, + /// Interrupt Clear Register + #[doc(alias = "UARTICR")] + ICR = 0x044, + /// DMA control Register + #[doc(alias = "UARTDMACR")] + DMACR = 0x048, + ///// Reserved, offsets `0x04C` to `0x07C`. + //Reserved = 0x04C, +} + +/// Receive Status Register / Data Register common error bits +/// +/// The `UARTRSR` register is updated only when a read occurs +/// from the `UARTDR` register with the same status information +/// that can also be obtained by reading the `UARTDR` register +#[bitsize(8)] +#[derive(Clone, Copy, Default, DebugBits, FromBits)] +pub struct Errors { + pub framing_error: bool, + pub parity_error: bool, + pub break_error: bool, + pub overrun_error: bool, + _reserved_unpredictable: u4, +} + +/// Data Register, `UARTDR` +/// +/// The `UARTDR` register is the data register; write for TX and +/// read for RX. It is a 12-bit register, where bits 7..0 are the +/// character and bits 11..8 are error bits. +#[bitsize(32)] +#[derive(Clone, Copy, Default, DebugBits, FromBits)] +#[doc(alias = "UARTDR")] +pub struct Data { + pub data: u8, + pub errors: Errors, + _reserved: u16, +} +impl_vmstate_bitsized!(Data); + +impl Data { + // bilge is not very const-friendly, unfortunately + pub const BREAK: Self = Self { value: 1 << 10 }; +} + +/// Receive Status Register / Error Clear Register, `UARTRSR/UARTECR` +/// +/// This register provides a different way to read the four receive +/// status error bits that can be found in bits 11..8 of the UARTDR +/// on a read. It gets updated when the guest reads UARTDR, and the +/// status bits correspond to that character that was just read. +/// +/// The TRM confusingly describes this offset as UARTRSR for reads +/// and UARTECR for writes, but really it's a single error status +/// register where writing anything to the register clears the error +/// bits. +#[bitsize(32)] +#[derive(Clone, Copy, DebugBits, FromBits)] +pub struct ReceiveStatusErrorClear { + pub errors: Errors, + _reserved_unpredictable: u24, +} +impl_vmstate_bitsized!(ReceiveStatusErrorClear); + +impl ReceiveStatusErrorClear { + pub fn set_from_data(&mut self, data: Data) { + self.set_errors(data.errors()); + } + + pub fn reset(&mut self) { + // All the bits are cleared to 0 on reset. + *self = Self::default(); + } +} + +impl Default for ReceiveStatusErrorClear { + fn default() -> Self { + 0.into() + } +} + +#[bitsize(32)] +#[derive(Clone, Copy, DebugBits, FromBits)] +/// Flag Register, `UARTFR` +/// +/// This has the usual inbound RS232 modem-control signals, plus flags +/// for RX and TX FIFO fill levels and a BUSY flag. +#[doc(alias = "UARTFR")] +pub struct Flags { + /// CTS: Clear to send + pub clear_to_send: bool, + /// DSR: Data set ready + pub data_set_ready: bool, + /// DCD: Data carrier detect + pub data_carrier_detect: bool, + /// BUSY: UART busy. In real hardware, set while the UART is + /// busy transmitting data. QEMU's implementation never sets BUSY. + pub busy: bool, + /// RXFE: Receive FIFO empty + pub receive_fifo_empty: bool, + /// TXFF: Transmit FIFO full + pub transmit_fifo_full: bool, + /// RXFF: Receive FIFO full + pub receive_fifo_full: bool, + /// TXFE: Transmit FIFO empty + pub transmit_fifo_empty: bool, + /// RI: Ring indicator + pub ring_indicator: bool, + _reserved_zero_no_modify: u23, +} +impl_vmstate_bitsized!(Flags); + +impl Flags { + pub fn reset(&mut self) { + *self = Self::default(); + } +} + +impl Default for Flags { + fn default() -> Self { + let mut ret: Self = 0.into(); + // After reset TXFF, RXFF, and BUSY are 0, and TXFE and RXFE are 1 + ret.set_receive_fifo_empty(true); + ret.set_transmit_fifo_empty(true); + ret + } +} + +#[bitsize(32)] +#[derive(Clone, Copy, DebugBits, FromBits)] +/// Line Control Register, `UARTLCR_H` +#[doc(alias = "UARTLCR_H")] +pub struct LineControl { + /// BRK: Send break + pub send_break: bool, + /// PEN: Parity enable + pub parity_enabled: bool, + /// EPS: Even parity select + pub parity: Parity, + /// STP2: Two stop bits select + pub two_stops_bits: bool, + /// FEN: Enable FIFOs + pub fifos_enabled: Mode, + /// WLEN: Word length in bits + /// b11 = 8 bits + /// b10 = 7 bits + /// b01 = 6 bits + /// b00 = 5 bits. + pub word_length: WordLength, + /// SPS Stick parity select + pub sticky_parity: bool, + /// 31:8 - Reserved, do not modify, read as zero. + _reserved_zero_no_modify: u24, +} +impl_vmstate_bitsized!(LineControl); + +impl LineControl { + pub fn reset(&mut self) { + // All the bits are cleared to 0 when reset. + *self = 0.into(); + } +} + +impl Default for LineControl { + fn default() -> Self { + 0.into() + } +} + +#[bitsize(1)] +#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)] +/// `EPS` "Even parity select", field of [Line Control +/// register](LineControl). +pub enum Parity { + Odd = 0, + Even = 1, +} + +#[bitsize(1)] +#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)] +/// `FEN` "Enable FIFOs" or Device mode, field of [Line Control +/// register](LineControl). +pub enum Mode { + /// 0 = FIFOs are disabled (character mode) that is, the FIFOs become + /// 1-byte-deep holding registers + Character = 0, + /// 1 = transmit and receive FIFO buffers are enabled (FIFO mode). + FIFO = 1, +} + +#[bitsize(2)] +#[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)] +/// `WLEN` Word length, field of [Line Control register](LineControl). +/// +/// These bits indicate the number of data bits transmitted or received in a +/// frame as follows: +pub enum WordLength { + /// b11 = 8 bits + _8Bits = 0b11, + /// b10 = 7 bits + _7Bits = 0b10, + /// b01 = 6 bits + _6Bits = 0b01, + /// b00 = 5 bits. + _5Bits = 0b00, +} + +/// Control Register, `UARTCR` +/// +/// The `UARTCR` register is the control register. It contains various +/// enable bits, and the bits to write to set the usual outbound RS232 +/// modem control signals. All bits reset to 0 except TXE and RXE. +#[bitsize(32)] +#[doc(alias = "UARTCR")] +#[derive(Clone, Copy, DebugBits, FromBits)] +pub struct Control { + /// `UARTEN` UART enable: 0 = UART is disabled. + pub enable_uart: bool, + /// `SIREN` `SIR` enable: disable or enable IrDA SIR ENDEC. + /// QEMU does not model this. + pub enable_sir: bool, + /// `SIRLP` SIR low-power IrDA mode. QEMU does not model this. + pub sir_lowpower_irda_mode: u1, + /// Reserved, do not modify, read as zero. + _reserved_zero_no_modify: u4, + /// `LBE` Loopback enable: feed UART output back to the input + pub enable_loopback: bool, + /// `TXE` Transmit enable + pub enable_transmit: bool, + /// `RXE` Receive enable + pub enable_receive: bool, + /// `DTR` Data transmit ready + pub data_transmit_ready: bool, + /// `RTS` Request to send + pub request_to_send: bool, + /// `Out1` UART Out1 signal; can be used as DCD + pub out_1: bool, + /// `Out2` UART Out2 signal; can be used as RI + pub out_2: bool, + /// `RTSEn` RTS hardware flow control enable + pub rts_hardware_flow_control_enable: bool, + /// `CTSEn` CTS hardware flow control enable + pub cts_hardware_flow_control_enable: bool, + /// 31:16 - Reserved, do not modify, read as zero. + _reserved_zero_no_modify2: u16, +} +impl_vmstate_bitsized!(Control); + +impl Control { + pub fn reset(&mut self) { + *self = 0.into(); + self.set_enable_receive(true); + self.set_enable_transmit(true); + } +} + +impl Default for Control { + fn default() -> Self { + let mut ret: Self = 0.into(); + ret.reset(); + ret + } +} + +bits! { + /// Interrupt status bits in UARTRIS, UARTMIS, UARTIMSC + #[derive(Default)] + pub struct Interrupt(u32) { + OE = 1 << 10, + BE = 1 << 9, + PE = 1 << 8, + FE = 1 << 7, + RT = 1 << 6, + TX = 1 << 5, + RX = 1 << 4, + DSR = 1 << 3, + DCD = 1 << 2, + CTS = 1 << 1, + RI = 1 << 0, + + E = bits!(Self as u32: OE | BE | PE | FE), + MS = bits!(Self as u32: RI | DSR | DCD | CTS), + } +} +impl_vmstate_forward!(Interrupt); diff --git a/rust/hw/meson.build b/rust/hw/meson.build new file mode 100644 index 00000000000..9749d4adfc9 --- /dev/null +++ b/rust/hw/meson.build @@ -0,0 +1,2 @@ +subdir('char') +subdir('timer') diff --git a/rust/hw/timer/Kconfig b/rust/hw/timer/Kconfig new file mode 100644 index 00000000000..afd98033503 --- /dev/null +++ b/rust/hw/timer/Kconfig @@ -0,0 +1,2 @@ +config X_HPET_RUST + bool diff --git a/rust/hw/timer/hpet/Cargo.toml b/rust/hw/timer/hpet/Cargo.toml new file mode 100644 index 00000000000..ac5df23c1d0 --- /dev/null +++ b/rust/hw/timer/hpet/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "hpet" +version = "0.1.0" +authors = ["Zhao Liu "] +description = "IA-PC High Precision Event Timer emulation in Rust" + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +qemu_api = { path = "../../../qemu-api" } +qemu_api_macros = { path = "../../../qemu-api-macros" } + +[lints] +workspace = true diff --git a/rust/hw/timer/hpet/meson.build b/rust/hw/timer/hpet/meson.build new file mode 100644 index 00000000000..c2d7c0532ca --- /dev/null +++ b/rust/hw/timer/hpet/meson.build @@ -0,0 +1,18 @@ +_libhpet_rs = static_library( + 'hpet', + files('src/lib.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + dependencies: [ + qemu_api, + qemu_api_macros, + ], +) + +rust_devices_ss.add(when: 'CONFIG_X_HPET_RUST', if_true: [declare_dependency( + link_whole: [_libhpet_rs], + # Putting proc macro crates in `dependencies` is necessary for Meson to find + # them when compiling the root per-target static rust lib. + dependencies: [qemu_api_macros], + variables: {'crate': 'hpet'}, +)]) diff --git a/rust/hw/timer/hpet/src/device.rs b/rust/hw/timer/hpet/src/device.rs new file mode 100644 index 00000000000..acf7251029e --- /dev/null +++ b/rust/hw/timer/hpet/src/device.rs @@ -0,0 +1,1050 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_int, c_void, CStr}, + mem::MaybeUninit, + pin::Pin, + ptr::{addr_of_mut, null_mut, NonNull}, + slice::from_ref, +}; + +use qemu_api::{ + bindings::{ + address_space_memory, address_space_stl_le, qdev_prop_bit, qdev_prop_bool, + qdev_prop_uint32, qdev_prop_usize, + }, + cell::{BqlCell, BqlRefCell}, + irq::InterruptSource, + memory::{ + hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder, MEMTXATTRS_UNSPECIFIED, + }, + prelude::*, + qdev::{DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, + qom::{ObjectImpl, ObjectType, ParentField, ParentInit}, + qom_isa, + sysbus::{SysBusDevice, SysBusDeviceImpl}, + timer::{Timer, CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND}, + uninit_field_mut, + vmstate::VMStateDescription, + vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_validate, + zeroable::Zeroable, +}; + +use crate::fw_cfg::HPETFwConfig; + +/// Register space for each timer block (`HPET_BASE` is defined in hpet.h). +const HPET_REG_SPACE_LEN: u64 = 0x400; // 1024 bytes + +/// Minimum recommended hardware implementation. +const HPET_MIN_TIMERS: usize = 3; +/// Maximum timers in each timer block. +const HPET_MAX_TIMERS: usize = 32; + +/// Flags that HPETState.flags supports. +const HPET_FLAG_MSI_SUPPORT_SHIFT: usize = 0; + +const HPET_NUM_IRQ_ROUTES: usize = 32; +const HPET_LEGACY_PIT_INT: u32 = 0; // HPET_LEGACY_RTC_INT isn't defined here. +const RTC_ISA_IRQ: usize = 8; + +const HPET_CLK_PERIOD: u64 = 10; // 10 ns +const FS_PER_NS: u64 = 1000000; // 1000000 femtoseconds == 1 ns + +/// Revision ID (bits 0:7). Revision 1 is implemented (refer to v1.0a spec). +const HPET_CAP_REV_ID_VALUE: u64 = 0x1; +const HPET_CAP_REV_ID_SHIFT: usize = 0; +/// Number of Timers (bits 8:12) +const HPET_CAP_NUM_TIM_SHIFT: usize = 8; +/// Counter Size (bit 13) +const HPET_CAP_COUNT_SIZE_CAP_SHIFT: usize = 13; +/// Legacy Replacement Route Capable (bit 15) +const HPET_CAP_LEG_RT_CAP_SHIFT: usize = 15; +/// Vendor ID (bits 16:31) +const HPET_CAP_VENDER_ID_VALUE: u64 = 0x8086; +const HPET_CAP_VENDER_ID_SHIFT: usize = 16; +/// Main Counter Tick Period (bits 32:63) +const HPET_CAP_CNT_CLK_PERIOD_SHIFT: usize = 32; + +/// Overall Enable (bit 0) +const HPET_CFG_ENABLE_SHIFT: usize = 0; +/// Legacy Replacement Route (bit 1) +const HPET_CFG_LEG_RT_SHIFT: usize = 1; +/// Other bits are reserved. +const HPET_CFG_WRITE_MASK: u64 = 0x003; + +/// bit 0, 7, and bits 16:31 are reserved. +/// bit 4, 5, 15, and bits 32:64 are read-only. +const HPET_TN_CFG_WRITE_MASK: u64 = 0x7f4e; +/// Timer N Interrupt Type (bit 1) +const HPET_TN_CFG_INT_TYPE_SHIFT: usize = 1; +/// Timer N Interrupt Enable (bit 2) +const HPET_TN_CFG_INT_ENABLE_SHIFT: usize = 2; +/// Timer N Type (Periodic enabled or not, bit 3) +const HPET_TN_CFG_PERIODIC_SHIFT: usize = 3; +/// Timer N Periodic Interrupt Capable (support Periodic or not, bit 4) +const HPET_TN_CFG_PERIODIC_CAP_SHIFT: usize = 4; +/// Timer N Size (timer size is 64-bits or 32 bits, bit 5) +const HPET_TN_CFG_SIZE_CAP_SHIFT: usize = 5; +/// Timer N Value Set (bit 6) +const HPET_TN_CFG_SETVAL_SHIFT: usize = 6; +/// Timer N 32-bit Mode (bit 8) +const HPET_TN_CFG_32BIT_SHIFT: usize = 8; +/// Timer N Interrupt Rout (bits 9:13) +const HPET_TN_CFG_INT_ROUTE_MASK: u64 = 0x3e00; +const HPET_TN_CFG_INT_ROUTE_SHIFT: usize = 9; +/// Timer N FSB Interrupt Enable (bit 14) +const HPET_TN_CFG_FSB_ENABLE_SHIFT: usize = 14; +/// Timer N FSB Interrupt Delivery (bit 15) +const HPET_TN_CFG_FSB_CAP_SHIFT: usize = 15; +/// Timer N Interrupt Routing Capability (bits 32:63) +const HPET_TN_CFG_INT_ROUTE_CAP_SHIFT: usize = 32; + +#[derive(qemu_api_macros::TryInto)] +#[repr(u64)] +#[allow(non_camel_case_types)] +/// Timer registers, masked by 0x18 +enum TimerRegister { + /// Timer N Configuration and Capability Register + CFG = 0, + /// Timer N Comparator Value Register + CMP = 8, + /// Timer N FSB Interrupt Route Register + ROUTE = 16, +} + +#[derive(qemu_api_macros::TryInto)] +#[repr(u64)] +#[allow(non_camel_case_types)] +/// Global registers +enum GlobalRegister { + /// General Capabilities and ID Register + CAP = 0, + /// General Configuration Register + CFG = 0x10, + /// General Interrupt Status Register + INT_STATUS = 0x20, + /// Main Counter Value Register + COUNTER = 0xF0, +} + +enum HPETRegister<'a> { + /// Global register in the range from `0` to `0xff` + Global(GlobalRegister), + + /// Register in the timer block `0x100`...`0x3ff` + Timer(&'a BqlRefCell, TimerRegister), + + /// Invalid address + #[allow(dead_code)] + Unknown(hwaddr), +} + +struct HPETAddrDecode<'a> { + shift: u32, + len: u32, + reg: HPETRegister<'a>, +} + +const fn hpet_next_wrap(cur_tick: u64) -> u64 { + (cur_tick | 0xffffffff) + 1 +} + +const fn hpet_time_after(a: u64, b: u64) -> bool { + ((b - a) as i64) < 0 +} + +const fn ticks_to_ns(value: u64) -> u64 { + value * HPET_CLK_PERIOD +} + +const fn ns_to_ticks(value: u64) -> u64 { + value / HPET_CLK_PERIOD +} + +// Avoid touching the bits that cannot be written. +const fn hpet_fixup_reg(new: u64, old: u64, mask: u64) -> u64 { + (new & mask) | (old & !mask) +} + +const fn activating_bit(old: u64, new: u64, shift: usize) -> bool { + let mask: u64 = 1 << shift; + (old & mask == 0) && (new & mask != 0) +} + +const fn deactivating_bit(old: u64, new: u64, shift: usize) -> bool { + let mask: u64 = 1 << shift; + (old & mask != 0) && (new & mask == 0) +} + +fn timer_handler(timer_cell: &BqlRefCell) { + timer_cell.borrow_mut().callback() +} + +/// HPET Timer Abstraction +#[repr(C)] +#[derive(Debug)] +pub struct HPETTimer { + /// timer N index within the timer block (`HPETState`) + #[doc(alias = "tn")] + index: u8, + qemu_timer: Timer, + /// timer block abstraction containing this timer + state: NonNull, + + // Memory-mapped, software visible timer registers + /// Timer N Configuration and Capability Register + config: u64, + /// Timer N Comparator Value Register + cmp: u64, + /// Timer N FSB Interrupt Route Register + fsb: u64, + + // Hidden register state + /// comparator (extended to counter width) + cmp64: u64, + /// Last value written to comparator + period: u64, + /// timer pop will indicate wrap for one-shot 32-bit + /// mode. Next pop will be actual timer expiration. + wrap_flag: u8, + /// last value armed, to avoid timer storms + last: u64, +} + +impl HPETTimer { + fn new(index: u8, state: *const HPETState) -> HPETTimer { + HPETTimer { + index, + // SAFETY: the HPETTimer will only be used after the timer + // is initialized below. + qemu_timer: unsafe { Timer::new() }, + state: NonNull::new(state.cast_mut()).unwrap(), + config: 0, + cmp: 0, + fsb: 0, + cmp64: 0, + period: 0, + wrap_flag: 0, + last: 0, + } + } + + fn init_timer_with_cell(cell: &BqlRefCell) { + let mut timer = cell.borrow_mut(); + // SAFETY: HPETTimer is only used as part of HPETState, which is + // always pinned. + let qemu_timer = unsafe { Pin::new_unchecked(&mut timer.qemu_timer) }; + qemu_timer.init_full(None, CLOCK_VIRTUAL, Timer::NS, 0, timer_handler, cell); + } + + fn get_state(&self) -> &HPETState { + // SAFETY: + // the pointer is convertible to a reference + unsafe { self.state.as_ref() } + } + + fn is_int_active(&self) -> bool { + self.get_state().is_timer_int_active(self.index.into()) + } + + const fn is_fsb_route_enabled(&self) -> bool { + self.config & (1 << HPET_TN_CFG_FSB_ENABLE_SHIFT) != 0 + } + + const fn is_periodic(&self) -> bool { + self.config & (1 << HPET_TN_CFG_PERIODIC_SHIFT) != 0 + } + + const fn is_int_enabled(&self) -> bool { + self.config & (1 << HPET_TN_CFG_INT_ENABLE_SHIFT) != 0 + } + + const fn is_32bit_mod(&self) -> bool { + self.config & (1 << HPET_TN_CFG_32BIT_SHIFT) != 0 + } + + const fn is_valset_enabled(&self) -> bool { + self.config & (1 << HPET_TN_CFG_SETVAL_SHIFT) != 0 + } + + fn clear_valset(&mut self) { + self.config &= !(1 << HPET_TN_CFG_SETVAL_SHIFT); + } + + /// True if timer interrupt is level triggered; otherwise, edge triggered. + const fn is_int_level_triggered(&self) -> bool { + self.config & (1 << HPET_TN_CFG_INT_TYPE_SHIFT) != 0 + } + + /// calculate next value of the general counter that matches the + /// target (either entirely, or the low 32-bit only depending on + /// the timer mode). + fn calculate_cmp64(&self, cur_tick: u64, target: u64) -> u64 { + if self.is_32bit_mod() { + let mut result: u64 = cur_tick.deposit(0, 32, target); + if result < cur_tick { + result += 0x100000000; + } + result + } else { + target + } + } + + const fn get_individual_route(&self) -> usize { + ((self.config & HPET_TN_CFG_INT_ROUTE_MASK) >> HPET_TN_CFG_INT_ROUTE_SHIFT) as usize + } + + fn get_int_route(&self) -> usize { + if self.index <= 1 && self.get_state().is_legacy_mode() { + // If LegacyReplacement Route bit is set, HPET specification requires + // timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC, + // timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC. + // + // If the LegacyReplacement Route bit is set, the individual routing + // bits for timers 0 and 1 (APIC or FSB) will have no impact. + // + // FIXME: Consider I/O APIC case. + if self.index == 0 { + 0 + } else { + RTC_ISA_IRQ + } + } else { + // (If the LegacyReplacement Route bit is set) Timer 2-n will be + // routed as per the routing in the timer n config registers. + // ... + // If the LegacyReplacement Route bit is not set, the individual + // routing bits for each of the timers are used. + self.get_individual_route() + } + } + + fn set_irq(&mut self, set: bool) { + let route = self.get_int_route(); + + if set && self.is_int_enabled() && self.get_state().is_hpet_enabled() { + if self.is_fsb_route_enabled() { + // SAFETY: + // the parameters are valid. + unsafe { + address_space_stl_le( + addr_of_mut!(address_space_memory), + self.fsb >> 32, // Timer N FSB int addr + self.fsb as u32, // Timer N FSB int value, truncate! + MEMTXATTRS_UNSPECIFIED, + null_mut(), + ); + } + } else if self.is_int_level_triggered() { + self.get_state().irqs[route].raise(); + } else { + self.get_state().irqs[route].pulse(); + } + } else if !self.is_fsb_route_enabled() { + self.get_state().irqs[route].lower(); + } + } + + fn update_irq(&mut self, set: bool) { + // If Timer N Interrupt Enable bit is 0, "the timer will + // still operate and generate appropriate status bits, but + // will not cause an interrupt" + self.get_state() + .update_int_status(self.index.into(), set && self.is_int_level_triggered()); + self.set_irq(set); + } + + fn arm_timer(&mut self, tick: u64) { + let mut ns = self.get_state().get_ns(tick); + + // Clamp period to reasonable min value (1 us) + if self.is_periodic() && ns - self.last < 1000 { + ns = self.last + 1000; + } + + self.last = ns; + self.qemu_timer.modify(self.last); + } + + fn set_timer(&mut self) { + let cur_tick: u64 = self.get_state().get_ticks(); + + self.wrap_flag = 0; + self.cmp64 = self.calculate_cmp64(cur_tick, self.cmp); + if self.is_32bit_mod() { + // HPET spec says in one-shot 32-bit mode, generate an interrupt when + // counter wraps in addition to an interrupt with comparator match. + if !self.is_periodic() && self.cmp64 > hpet_next_wrap(cur_tick) { + self.wrap_flag = 1; + self.arm_timer(hpet_next_wrap(cur_tick)); + return; + } + } + self.arm_timer(self.cmp64); + } + + fn del_timer(&mut self) { + // Just remove the timer from the timer_list without destroying + // this timer instance. + self.qemu_timer.delete(); + + if self.is_int_active() { + // For level-triggered interrupt, this leaves interrupt status + // register set but lowers irq. + self.update_irq(true); + } + } + + /// Configuration and Capability Register + fn set_tn_cfg_reg(&mut self, shift: u32, len: u32, val: u64) { + // TODO: Add trace point - trace_hpet_ram_write_tn_cfg(addr & 4) + let old_val: u64 = self.config; + let mut new_val: u64 = old_val.deposit(shift, len, val); + new_val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK); + + // Switch level-type interrupt to edge-type. + if deactivating_bit(old_val, new_val, HPET_TN_CFG_INT_TYPE_SHIFT) { + // Do this before changing timer.config; otherwise, if + // HPET_TN_FSB is set, update_irq will not lower the qemu_irq. + self.update_irq(false); + } + + self.config = new_val; + + if activating_bit(old_val, new_val, HPET_TN_CFG_INT_ENABLE_SHIFT) && self.is_int_active() { + self.update_irq(true); + } + + if self.is_32bit_mod() { + self.cmp = u64::from(self.cmp as u32); // truncate! + self.period = u64::from(self.period as u32); // truncate! + } + + if self.get_state().is_hpet_enabled() { + self.set_timer(); + } + } + + /// Comparator Value Register + fn set_tn_cmp_reg(&mut self, shift: u32, len: u32, val: u64) { + let mut length = len; + let mut value = val; + + // TODO: Add trace point - trace_hpet_ram_write_tn_cmp(addr & 4) + if self.is_32bit_mod() { + // High 32-bits are zero, leave them untouched. + if shift != 0 { + // TODO: Add trace point - trace_hpet_ram_write_invalid_tn_cmp() + return; + } + length = 64; + value = u64::from(value as u32); // truncate! + } + + if !self.is_periodic() || self.is_valset_enabled() { + self.cmp = self.cmp.deposit(shift, length, value); + } + + if self.is_periodic() { + self.period = self.period.deposit(shift, length, value); + } + + self.clear_valset(); + if self.get_state().is_hpet_enabled() { + self.set_timer(); + } + } + + /// FSB Interrupt Route Register + fn set_tn_fsb_route_reg(&mut self, shift: u32, len: u32, val: u64) { + self.fsb = self.fsb.deposit(shift, len, val); + } + + fn reset(&mut self) { + self.del_timer(); + self.cmp = u64::MAX; // Comparator Match Registers reset to all 1's. + self.config = (1 << HPET_TN_CFG_PERIODIC_CAP_SHIFT) | (1 << HPET_TN_CFG_SIZE_CAP_SHIFT); + if self.get_state().has_msi_flag() { + self.config |= 1 << HPET_TN_CFG_FSB_CAP_SHIFT; + } + // advertise availability of ioapic int + self.config |= + (u64::from(self.get_state().int_route_cap)) << HPET_TN_CFG_INT_ROUTE_CAP_SHIFT; + self.period = 0; + self.wrap_flag = 0; + } + + /// timer expiration callback + fn callback(&mut self) { + let period: u64 = self.period; + let cur_tick: u64 = self.get_state().get_ticks(); + + if self.is_periodic() && period != 0 { + while hpet_time_after(cur_tick, self.cmp64) { + self.cmp64 += period; + } + if self.is_32bit_mod() { + self.cmp = u64::from(self.cmp64 as u32); // truncate! + } else { + self.cmp = self.cmp64; + } + self.arm_timer(self.cmp64); + } else if self.wrap_flag != 0 { + self.wrap_flag = 0; + self.arm_timer(self.cmp64); + } + self.update_irq(true); + } + + const fn read(&self, reg: TimerRegister) -> u64 { + use TimerRegister::*; + match reg { + CFG => self.config, // including interrupt capabilities + CMP => self.cmp, // comparator register + ROUTE => self.fsb, + } + } + + fn write(&mut self, reg: TimerRegister, value: u64, shift: u32, len: u32) { + use TimerRegister::*; + match reg { + CFG => self.set_tn_cfg_reg(shift, len, value), + CMP => self.set_tn_cmp_reg(shift, len, value), + ROUTE => self.set_tn_fsb_route_reg(shift, len, value), + } + } +} + +/// HPET Event Timer Block Abstraction +#[repr(C)] +#[derive(qemu_api_macros::Object)] +pub struct HPETState { + parent_obj: ParentField, + iomem: MemoryRegion, + + // HPET block Registers: Memory-mapped, software visible registers + /// General Capabilities and ID Register + capability: BqlCell, + /// General Configuration Register + config: BqlCell, + /// General Interrupt Status Register + #[doc(alias = "isr")] + int_status: BqlCell, + /// Main Counter Value Register + #[doc(alias = "hpet_counter")] + counter: BqlCell, + + // Internal state + /// Capabilities that QEMU HPET supports. + /// bit 0: MSI (or FSB) support. + flags: u32, + + /// Offset of main counter relative to qemu clock. + hpet_offset: BqlCell, + hpet_offset_saved: bool, + + irqs: [InterruptSource; HPET_NUM_IRQ_ROUTES], + rtc_irq_level: BqlCell, + pit_enabled: InterruptSource, + + /// Interrupt Routing Capability. + /// This field indicates to which interrupts in the I/O (x) APIC + /// the timers' interrupt can be routed, and is encoded in the + /// bits 32:64 of timer N's config register: + #[doc(alias = "intcap")] + int_route_cap: u32, + + /// HPET timer array managed by this timer block. + #[doc(alias = "timer")] + timers: [BqlRefCell; HPET_MAX_TIMERS], + num_timers: usize, + num_timers_save: BqlCell, + + /// Instance id (HPET timer block ID). + hpet_id: BqlCell, +} + +impl HPETState { + const fn has_msi_flag(&self) -> bool { + self.flags & (1 << HPET_FLAG_MSI_SUPPORT_SHIFT) != 0 + } + + fn is_legacy_mode(&self) -> bool { + self.config.get() & (1 << HPET_CFG_LEG_RT_SHIFT) != 0 + } + + fn is_hpet_enabled(&self) -> bool { + self.config.get() & (1 << HPET_CFG_ENABLE_SHIFT) != 0 + } + + fn is_timer_int_active(&self, index: usize) -> bool { + self.int_status.get() & (1 << index) != 0 + } + + fn get_ticks(&self) -> u64 { + ns_to_ticks(CLOCK_VIRTUAL.get_ns() + self.hpet_offset.get()) + } + + fn get_ns(&self, tick: u64) -> u64 { + ticks_to_ns(tick) - self.hpet_offset.get() + } + + fn handle_legacy_irq(&self, irq: u32, level: u32) { + if irq == HPET_LEGACY_PIT_INT { + if !self.is_legacy_mode() { + self.irqs[0].set(level != 0); + } + } else { + self.rtc_irq_level.set(level); + if !self.is_legacy_mode() { + self.irqs[RTC_ISA_IRQ].set(level != 0); + } + } + } + + fn init_timers(this: &mut MaybeUninit) { + let state = this.as_ptr(); + for index in 0..HPET_MAX_TIMERS { + let mut timer = uninit_field_mut!(*this, timers[index]); + + // Initialize in two steps, to avoid calling Timer::init_full on a + // temporary that can be moved. + let timer = timer.write(BqlRefCell::new(HPETTimer::new( + index.try_into().unwrap(), + state, + ))); + HPETTimer::init_timer_with_cell(timer); + } + } + + fn update_int_status(&self, index: u32, level: bool) { + self.int_status + .set(self.int_status.get().deposit(index, 1, u64::from(level))); + } + + /// General Configuration Register + fn set_cfg_reg(&self, shift: u32, len: u32, val: u64) { + let old_val = self.config.get(); + let mut new_val = old_val.deposit(shift, len, val); + + new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); + self.config.set(new_val); + + if activating_bit(old_val, new_val, HPET_CFG_ENABLE_SHIFT) { + // Enable main counter and interrupt generation. + self.hpet_offset + .set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns()); + + for timer in self.timers.iter().take(self.num_timers) { + let mut t = timer.borrow_mut(); + + if t.is_int_enabled() && t.is_int_active() { + t.update_irq(true); + } + t.set_timer(); + } + } else if deactivating_bit(old_val, new_val, HPET_CFG_ENABLE_SHIFT) { + // Halt main counter and disable interrupt generation. + self.counter.set(self.get_ticks()); + + for timer in self.timers.iter().take(self.num_timers) { + timer.borrow_mut().del_timer(); + } + } + + // i8254 and RTC output pins are disabled when HPET is in legacy mode + if activating_bit(old_val, new_val, HPET_CFG_LEG_RT_SHIFT) { + self.pit_enabled.set(false); + self.irqs[0].lower(); + self.irqs[RTC_ISA_IRQ].lower(); + } else if deactivating_bit(old_val, new_val, HPET_CFG_LEG_RT_SHIFT) { + // TODO: Add irq binding: qemu_irq_lower(s->irqs[0]) + self.irqs[0].lower(); + self.pit_enabled.set(true); + self.irqs[RTC_ISA_IRQ].set(self.rtc_irq_level.get() != 0); + } + } + + /// General Interrupt Status Register: Read/Write Clear + fn set_int_status_reg(&self, shift: u32, _len: u32, val: u64) { + let new_val = val << shift; + let cleared = new_val & self.int_status.get(); + + for (index, timer) in self.timers.iter().take(self.num_timers).enumerate() { + if cleared & (1 << index) != 0 { + timer.borrow_mut().update_irq(false); + } + } + } + + /// Main Counter Value Register + fn set_counter_reg(&self, shift: u32, len: u32, val: u64) { + if self.is_hpet_enabled() { + // TODO: Add trace point - + // trace_hpet_ram_write_counter_write_while_enabled() + // + // HPET spec says that writes to this register should only be + // done while the counter is halted. So this is an undefined + // behavior. There's no need to forbid it, but when HPET is + // enabled, the changed counter value will not affect the + // tick count (i.e., the previously calculated offset will + // not be changed as well). + } + self.counter + .set(self.counter.get().deposit(shift, len, val)); + } + + unsafe fn init(mut this: ParentInit) { + static HPET_RAM_OPS: MemoryRegionOps = + MemoryRegionOpsBuilder::::new() + .read(&HPETState::read) + .write(&HPETState::write) + .native_endian() + .valid_sizes(4, 8) + .impl_sizes(4, 8) + .build(); + + MemoryRegion::init_io( + &mut uninit_field_mut!(*this, iomem), + &HPET_RAM_OPS, + "hpet", + HPET_REG_SPACE_LEN, + ); + + Self::init_timers(&mut this); + } + + fn post_init(&self) { + self.init_mmio(&self.iomem); + for irq in self.irqs.iter() { + self.init_irq(irq); + } + } + + fn realize(&self) -> qemu_api::Result<()> { + if self.num_timers < HPET_MIN_TIMERS || self.num_timers > HPET_MAX_TIMERS { + Err(format!( + "hpet.num_timers must be between {HPET_MIN_TIMERS} and {HPET_MAX_TIMERS}" + ))?; + } + if self.int_route_cap == 0 { + Err("hpet.hpet-intcap property not initialized")?; + } + + self.hpet_id.set(HPETFwConfig::assign_hpet_id()?); + + // 64-bit General Capabilities and ID Register; LegacyReplacementRoute. + self.capability.set( + HPET_CAP_REV_ID_VALUE << HPET_CAP_REV_ID_SHIFT | + 1 << HPET_CAP_COUNT_SIZE_CAP_SHIFT | + 1 << HPET_CAP_LEG_RT_CAP_SHIFT | + HPET_CAP_VENDER_ID_VALUE << HPET_CAP_VENDER_ID_SHIFT | + ((self.num_timers - 1) as u64) << HPET_CAP_NUM_TIM_SHIFT | // indicate the last timer + (HPET_CLK_PERIOD * FS_PER_NS) << HPET_CAP_CNT_CLK_PERIOD_SHIFT, // 10 ns + ); + + self.init_gpio_in(2, HPETState::handle_legacy_irq); + self.init_gpio_out(from_ref(&self.pit_enabled)); + Ok(()) + } + + fn reset_hold(&self, _type: ResetType) { + for timer in self.timers.iter().take(self.num_timers) { + timer.borrow_mut().reset(); + } + + self.counter.set(0); + self.config.set(0); + self.pit_enabled.set(true); + self.hpet_offset.set(0); + + HPETFwConfig::update_hpet_cfg( + self.hpet_id.get(), + self.capability.get() as u32, + self.mmio_addr(0).unwrap(), + ); + + // to document that the RTC lowers its output on reset as well + self.rtc_irq_level.set(0); + } + + fn decode(&self, mut addr: hwaddr, size: u32) -> HPETAddrDecode<'_> { + let shift = ((addr & 4) * 8) as u32; + let len = std::cmp::min(size * 8, 64 - shift); + + addr &= !4; + let reg = if (0..=0xff).contains(&addr) { + GlobalRegister::try_from(addr).map(HPETRegister::Global) + } else { + let timer_id: usize = ((addr - 0x100) / 0x20) as usize; + if timer_id < self.num_timers { + // TODO: Add trace point - trace_hpet_ram_[read|write]_timer_id(timer_id) + TimerRegister::try_from(addr & 0x18) + .map(|reg| HPETRegister::Timer(&self.timers[timer_id], reg)) + } else { + // TODO: Add trace point - trace_hpet_timer_id_out_of_range(timer_id) + Err(addr) + } + }; + + // reg is now a Result + // convert the Err case into HPETRegister as well + let reg = reg.unwrap_or_else(HPETRegister::Unknown); + HPETAddrDecode { shift, len, reg } + } + + fn read(&self, addr: hwaddr, size: u32) -> u64 { + // TODO: Add trace point - trace_hpet_ram_read(addr) + let HPETAddrDecode { shift, reg, .. } = self.decode(addr, size); + + use GlobalRegister::*; + use HPETRegister::*; + (match reg { + Timer(timer, tn_reg) => timer.borrow_mut().read(tn_reg), + Global(CAP) => self.capability.get(), /* including HPET_PERIOD 0x004 */ + Global(CFG) => self.config.get(), + Global(INT_STATUS) => self.int_status.get(), + Global(COUNTER) => { + // TODO: Add trace point + // trace_hpet_ram_read_reading_counter(addr & 4, cur_tick) + if self.is_hpet_enabled() { + self.get_ticks() + } else { + self.counter.get() + } + } + Unknown(_) => { + // TODO: Add trace point- trace_hpet_ram_read_invalid() + 0 + } + }) >> shift + } + + fn write(&self, addr: hwaddr, value: u64, size: u32) { + let HPETAddrDecode { shift, len, reg } = self.decode(addr, size); + + // TODO: Add trace point - trace_hpet_ram_write(addr, value) + use GlobalRegister::*; + use HPETRegister::*; + match reg { + Timer(timer, tn_reg) => timer.borrow_mut().write(tn_reg, value, shift, len), + Global(CAP) => {} // General Capabilities and ID Register: Read Only + Global(CFG) => self.set_cfg_reg(shift, len, value), + Global(INT_STATUS) => self.set_int_status_reg(shift, len, value), + Global(COUNTER) => self.set_counter_reg(shift, len, value), + Unknown(_) => { + // TODO: Add trace point - trace_hpet_ram_write_invalid() + } + } + } + + fn pre_save(&self) -> i32 { + if self.is_hpet_enabled() { + self.counter.set(self.get_ticks()); + } + + /* + * The number of timers must match on source and destination, but it was + * also added to the migration stream. Check that it matches the value + * that was configured. + */ + self.num_timers_save.set(self.num_timers as u8); + 0 + } + + fn post_load(&self, _version_id: u8) -> i32 { + for timer in self.timers.iter().take(self.num_timers) { + let mut t = timer.borrow_mut(); + + t.cmp64 = t.calculate_cmp64(t.get_state().counter.get(), t.cmp); + t.last = CLOCK_VIRTUAL.get_ns() - NANOSECONDS_PER_SECOND; + } + + // Recalculate the offset between the main counter and guest time + if !self.hpet_offset_saved { + self.hpet_offset + .set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns()); + } + + 0 + } + + fn is_rtc_irq_level_needed(&self) -> bool { + self.rtc_irq_level.get() != 0 + } + + fn is_offset_needed(&self) -> bool { + self.is_hpet_enabled() && self.hpet_offset_saved + } + + fn validate_num_timers(&self, _version_id: u8) -> bool { + self.num_timers == self.num_timers_save.get().into() + } +} + +qom_isa!(HPETState: SysBusDevice, DeviceState, Object); + +unsafe impl ObjectType for HPETState { + // No need for HPETClass. Just like OBJECT_DECLARE_SIMPLE_TYPE in C. + type Class = ::Class; + const TYPE_NAME: &'static CStr = crate::TYPE_HPET; +} + +impl ObjectImpl for HPETState { + type ParentType = SysBusDevice; + + const INSTANCE_INIT: Option)> = Some(Self::init); + const INSTANCE_POST_INIT: Option = Some(Self::post_init); + const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::; +} + +// TODO: Make these properties user-configurable! +qemu_api::declare_properties! { + HPET_PROPERTIES, + qemu_api::define_property!( + c"timers", + HPETState, + num_timers, + unsafe { &qdev_prop_usize }, + u8, + default = HPET_MIN_TIMERS + ), + qemu_api::define_property!( + c"msi", + HPETState, + flags, + unsafe { &qdev_prop_bit }, + u32, + bit = HPET_FLAG_MSI_SUPPORT_SHIFT as u8, + default = false, + ), + qemu_api::define_property!( + c"hpet-intcap", + HPETState, + int_route_cap, + unsafe { &qdev_prop_uint32 }, + u32, + default = 0 + ), + qemu_api::define_property!( + c"hpet-offset-saved", + HPETState, + hpet_offset_saved, + unsafe { &qdev_prop_bool }, + bool, + default = true + ), +} + +unsafe extern "C" fn hpet_rtc_irq_level_needed(opaque: *mut c_void) -> bool { + // SAFETY: + // the pointer is convertible to a reference + let state: &HPETState = unsafe { NonNull::new(opaque.cast::()).unwrap().as_ref() }; + state.is_rtc_irq_level_needed() +} + +unsafe extern "C" fn hpet_offset_needed(opaque: *mut c_void) -> bool { + // SAFETY: + // the pointer is convertible to a reference + let state: &HPETState = unsafe { NonNull::new(opaque.cast::()).unwrap().as_ref() }; + state.is_offset_needed() +} + +unsafe extern "C" fn hpet_pre_save(opaque: *mut c_void) -> c_int { + // SAFETY: + // the pointer is convertible to a reference + let state: &mut HPETState = + unsafe { NonNull::new(opaque.cast::()).unwrap().as_mut() }; + state.pre_save() as c_int +} + +unsafe extern "C" fn hpet_post_load(opaque: *mut c_void, version_id: c_int) -> c_int { + // SAFETY: + // the pointer is convertible to a reference + let state: &mut HPETState = + unsafe { NonNull::new(opaque.cast::()).unwrap().as_mut() }; + let version: u8 = version_id.try_into().unwrap(); + state.post_load(version) as c_int +} + +static VMSTATE_HPET_RTC_IRQ_LEVEL: VMStateDescription = VMStateDescription { + name: c"hpet/rtc_irq_level".as_ptr(), + version_id: 1, + minimum_version_id: 1, + needed: Some(hpet_rtc_irq_level_needed), + fields: vmstate_fields! { + vmstate_of!(HPETState, rtc_irq_level), + }, + ..Zeroable::ZERO +}; + +static VMSTATE_HPET_OFFSET: VMStateDescription = VMStateDescription { + name: c"hpet/offset".as_ptr(), + version_id: 1, + minimum_version_id: 1, + needed: Some(hpet_offset_needed), + fields: vmstate_fields! { + vmstate_of!(HPETState, hpet_offset), + }, + ..Zeroable::ZERO +}; + +static VMSTATE_HPET_TIMER: VMStateDescription = VMStateDescription { + name: c"hpet_timer".as_ptr(), + version_id: 1, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(HPETTimer, index), + vmstate_of!(HPETTimer, config), + vmstate_of!(HPETTimer, cmp), + vmstate_of!(HPETTimer, fsb), + vmstate_of!(HPETTimer, period), + vmstate_of!(HPETTimer, wrap_flag), + vmstate_of!(HPETTimer, qemu_timer), + }, + ..Zeroable::ZERO +}; + +const VALIDATE_TIMERS_NAME: &CStr = c"num_timers must match"; + +static VMSTATE_HPET: VMStateDescription = VMStateDescription { + name: c"hpet".as_ptr(), + version_id: 2, + minimum_version_id: 2, + pre_save: Some(hpet_pre_save), + post_load: Some(hpet_post_load), + fields: vmstate_fields! { + vmstate_of!(HPETState, config), + vmstate_of!(HPETState, int_status), + vmstate_of!(HPETState, counter), + vmstate_of!(HPETState, num_timers_save), + vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers), + vmstate_struct!(HPETState, timers[0 .. num_timers_save], &VMSTATE_HPET_TIMER, BqlRefCell, HPETState::validate_num_timers).with_version_id(0), + }, + subsections: vmstate_subsections! { + VMSTATE_HPET_RTC_IRQ_LEVEL, + VMSTATE_HPET_OFFSET, + }, + ..Zeroable::ZERO +}; + +impl DeviceImpl for HPETState { + fn properties() -> &'static [Property] { + &HPET_PROPERTIES + } + + fn vmsd() -> Option<&'static VMStateDescription> { + Some(&VMSTATE_HPET) + } + + const REALIZE: Option qemu_api::Result<()>> = Some(Self::realize); +} + +impl ResettablePhasesImpl for HPETState { + const HOLD: Option = Some(Self::reset_hold); +} + +impl SysBusDeviceImpl for HPETState {} diff --git a/rust/hw/timer/hpet/src/fw_cfg.rs b/rust/hw/timer/hpet/src/fw_cfg.rs new file mode 100644 index 00000000000..619d662ee1e --- /dev/null +++ b/rust/hw/timer/hpet/src/fw_cfg.rs @@ -0,0 +1,68 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::ptr::addr_of_mut; + +use qemu_api::{cell::bql_locked, zeroable::Zeroable}; + +/// Each `HPETState` represents a Event Timer Block. The v1 spec supports +/// up to 8 blocks. QEMU only uses 1 block (in PC machine). +const HPET_MAX_NUM_EVENT_TIMER_BLOCK: usize = 8; + +#[repr(C, packed)] +#[derive(Copy, Clone, Default)] +pub struct HPETFwEntry { + pub event_timer_block_id: u32, + pub address: u64, + pub min_tick: u16, + pub page_prot: u8, +} +unsafe impl Zeroable for HPETFwEntry {} + +#[repr(C, packed)] +#[derive(Copy, Clone, Default)] +pub struct HPETFwConfig { + pub count: u8, + pub hpet: [HPETFwEntry; HPET_MAX_NUM_EVENT_TIMER_BLOCK], +} +unsafe impl Zeroable for HPETFwConfig {} + +#[allow(non_upper_case_globals)] +#[no_mangle] +pub static mut hpet_fw_cfg: HPETFwConfig = HPETFwConfig { + count: u8::MAX, + ..Zeroable::ZERO +}; + +impl HPETFwConfig { + pub(crate) fn assign_hpet_id() -> Result { + assert!(bql_locked()); + // SAFETY: all accesses go through these methods, which guarantee + // that the accesses are protected by the BQL. + let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) }; + + if fw_cfg.count == u8::MAX { + // first instance + fw_cfg.count = 0; + } + + if fw_cfg.count == 8 { + Err("Only 8 instances of HPET are allowed")?; + } + + let id: usize = fw_cfg.count.into(); + fw_cfg.count += 1; + Ok(id) + } + + pub(crate) fn update_hpet_cfg(hpet_id: usize, timer_block_id: u32, address: u64) { + assert!(bql_locked()); + // SAFETY: all accesses go through these methods, which guarantee + // that the accesses are protected by the BQL. + let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) }; + + fw_cfg.hpet[hpet_id].event_timer_block_id = timer_block_id; + fw_cfg.hpet[hpet_id].address = address; + } +} diff --git a/rust/hw/timer/hpet/src/lib.rs b/rust/hw/timer/hpet/src/lib.rs new file mode 100644 index 00000000000..a95cf14ac98 --- /dev/null +++ b/rust/hw/timer/hpet/src/lib.rs @@ -0,0 +1,13 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +//! # HPET QEMU Device Model +//! +//! This library implements a device model for the IA-PC HPET (High +//! Precision Event Timers) device in QEMU. + +pub mod device; +pub mod fw_cfg; + +pub const TYPE_HPET: &::std::ffi::CStr = c"hpet"; diff --git a/rust/hw/timer/meson.build b/rust/hw/timer/meson.build new file mode 100644 index 00000000000..22a84f15536 --- /dev/null +++ b/rust/hw/timer/meson.build @@ -0,0 +1 @@ +subdir('hpet') diff --git a/rust/meson.build b/rust/meson.build new file mode 100644 index 00000000000..331f11b7e72 --- /dev/null +++ b/rust/meson.build @@ -0,0 +1,39 @@ +subproject('anyhow-1-rs', required: true) +subproject('bilge-0.2-rs', required: true) +subproject('bilge-impl-0.2-rs', required: true) +subproject('foreign-0.3-rs', required: true) +subproject('libc-0.2-rs', required: true) + +anyhow_rs = dependency('anyhow-1-rs') +bilge_rs = dependency('bilge-0.2-rs') +bilge_impl_rs = dependency('bilge-impl-0.2-rs') +foreign_rs = dependency('foreign-0.3-rs') +libc_rs = dependency('libc-0.2-rs') + +subproject('proc-macro2-1-rs', required: true) +subproject('quote-1-rs', required: true) +subproject('syn-2-rs', required: true) + +quote_rs_native = dependency('quote-1-rs', native: true) +syn_rs_native = dependency('syn-2-rs', native: true) +proc_macro2_rs_native = dependency('proc-macro2-1-rs', native: true) + +qemuutil_rs = qemuutil.partial_dependency(link_args: true, links: true) + +genrs = [] + +subdir('qemu-api-macros') +subdir('bits') +subdir('qemu-api') + +subdir('hw') + +cargo = find_program('cargo', required: false) + +if cargo.found() + run_target('rustfmt', + command: [config_host['MESON'], 'devenv', + '--workdir', '@CURRENT_SOURCE_DIR@', + cargo, 'fmt'], + depends: genrs) +endif diff --git a/rust/qemu-api-macros/Cargo.toml b/rust/qemu-api-macros/Cargo.toml new file mode 100644 index 00000000000..0cd40c8e168 --- /dev/null +++ b/rust/qemu-api-macros/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "qemu_api_macros" +version = "0.1.0" +authors = ["Manos Pitsidianakis "] +description = "Rust bindings for QEMU - Utility macros" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1" +quote = "1" +syn = { version = "2", features = ["extra-traits"] } + +[lints] +workspace = true diff --git a/rust/qemu-api-macros/meson.build b/rust/qemu-api-macros/meson.build new file mode 100644 index 00000000000..2152bcb99b3 --- /dev/null +++ b/rust/qemu-api-macros/meson.build @@ -0,0 +1,22 @@ +_qemu_api_macros_rs = rust.proc_macro( + 'qemu_api_macros', + files('src/lib.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: [ + '--cfg', 'use_fallback', + '--cfg', 'feature="syn-error"', + '--cfg', 'feature="proc-macro"', + ], + dependencies: [ + proc_macro2_rs_native, + quote_rs_native, + syn_rs_native, + ], +) + +qemu_api_macros = declare_dependency( + link_with: _qemu_api_macros_rs, +) + +rust.test('rust-qemu-api-macros-tests', _qemu_api_macros_rs, + suite: ['unit', 'rust']) diff --git a/rust/qemu-api-macros/src/bits.rs b/rust/qemu-api-macros/src/bits.rs new file mode 100644 index 00000000000..a80a3b9fee1 --- /dev/null +++ b/rust/qemu-api-macros/src/bits.rs @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: MIT or Apache-2.0 or GPL-2.0-or-later + +// shadowing is useful together with "if let" +#![allow(clippy::shadow_unrelated)] + +use proc_macro2::{ + Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree, TokenTree as TT, +}; +use syn::Error; + +pub struct BitsConstInternal { + typ: TokenTree, +} + +fn paren(ts: TokenStream) -> TokenTree { + TT::Group(Group::new(Delimiter::Parenthesis, ts)) +} + +fn ident(s: &'static str) -> TokenTree { + TT::Ident(Ident::new(s, Span::call_site())) +} + +fn punct(ch: char) -> TokenTree { + TT::Punct(Punct::new(ch, Spacing::Alone)) +} + +/// Implements a recursive-descent parser that translates Boolean expressions on +/// bitmasks to invocations of `const` functions defined by the `bits!` macro. +impl BitsConstInternal { + // primary ::= '(' or ')' + // | ident + // | '!' ident + fn parse_primary( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ) -> Result, Error> { + let next = match tok { + TT::Group(ref g) => { + if g.delimiter() != Delimiter::Parenthesis && g.delimiter() != Delimiter::None { + return Err(Error::new(g.span(), "expected parenthesis")); + } + let mut stream = g.stream().into_iter(); + let Some(first_tok) = stream.next() else { + return Err(Error::new(g.span(), "expected operand, found ')'")); + }; + let mut output = TokenStream::new(); + // start from the lowest precedence + let next = self.parse_or(first_tok, &mut stream, &mut output)?; + if let Some(tok) = next { + return Err(Error::new(tok.span(), format!("unexpected token {tok}"))); + } + out.extend(Some(paren(output))); + it.next() + } + TT::Ident(_) => { + let mut output = TokenStream::new(); + output.extend([ + self.typ.clone(), + TT::Punct(Punct::new(':', Spacing::Joint)), + TT::Punct(Punct::new(':', Spacing::Joint)), + tok, + ]); + out.extend(Some(paren(output))); + it.next() + } + TT::Punct(ref p) => { + if p.as_char() != '!' { + return Err(Error::new(p.span(), "expected operand")); + } + let Some(rhs_tok) = it.next() else { + return Err(Error::new(p.span(), "expected operand at end of input")); + }; + let next = self.parse_primary(rhs_tok, it, out)?; + out.extend([punct('.'), ident("invert"), paren(TokenStream::new())]); + next + } + _ => { + return Err(Error::new(tok.span(), "unexpected literal")); + } + }; + Ok(next) + } + + fn parse_binop< + F: Fn( + &Self, + TokenTree, + &mut dyn Iterator, + &mut TokenStream, + ) -> Result, Error>, + >( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ch: char, + f: F, + method: &'static str, + ) -> Result, Error> { + let mut next = f(self, tok, it, out)?; + while next.is_some() { + let op = next.as_ref().unwrap(); + let TT::Punct(ref p) = op else { break }; + if p.as_char() != ch { + break; + } + + let Some(rhs_tok) = it.next() else { + return Err(Error::new(p.span(), "expected operand at end of input")); + }; + let mut rhs = TokenStream::new(); + next = f(self, rhs_tok, it, &mut rhs)?; + out.extend([punct('.'), ident(method), paren(rhs)]); + } + Ok(next) + } + + // sub ::= primary ('-' primary)* + pub fn parse_sub( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ) -> Result, Error> { + self.parse_binop(tok, it, out, '-', Self::parse_primary, "difference") + } + + // and ::= sub ('&' sub)* + fn parse_and( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ) -> Result, Error> { + self.parse_binop(tok, it, out, '&', Self::parse_sub, "intersection") + } + + // xor ::= and ('&' and)* + fn parse_xor( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ) -> Result, Error> { + self.parse_binop(tok, it, out, '^', Self::parse_and, "symmetric_difference") + } + + // or ::= xor ('|' xor)* + pub fn parse_or( + &self, + tok: TokenTree, + it: &mut dyn Iterator, + out: &mut TokenStream, + ) -> Result, Error> { + self.parse_binop(tok, it, out, '|', Self::parse_xor, "union") + } + + pub fn parse( + it: &mut dyn Iterator, + ) -> Result { + let mut pos = Span::call_site(); + let mut typ = proc_macro2::TokenStream::new(); + + // Gobble everything up to an `@` sign, which is followed by a + // parenthesized expression; that is, all token trees except the + // last two form the type. + let next = loop { + let tok = it.next(); + if let Some(ref t) = tok { + pos = t.span(); + } + match tok { + None => break None, + Some(TT::Punct(ref p)) if p.as_char() == '@' => { + let tok = it.next(); + if let Some(ref t) = tok { + pos = t.span(); + } + break tok; + } + Some(x) => typ.extend(Some(x)), + } + }; + + let Some(tok) = next else { + return Err(Error::new( + pos, + "expected expression, do not call this macro directly", + )); + }; + let TT::Group(ref _group) = tok else { + return Err(Error::new( + tok.span(), + "expected parenthesis, do not call this macro directly", + )); + }; + let mut out = TokenStream::new(); + let state = Self { + typ: TT::Group(Group::new(Delimiter::None, typ)), + }; + + let next = state.parse_primary(tok, it, &mut out)?; + + // A parenthesized expression is a single production of the grammar, + // so the input must have reached the last token. + if let Some(tok) = next { + return Err(Error::new(tok.span(), format!("unexpected token {tok}"))); + } + Ok(out) + } +} diff --git a/rust/qemu-api-macros/src/lib.rs b/rust/qemu-api-macros/src/lib.rs new file mode 100644 index 00000000000..b525d89c09e --- /dev/null +++ b/rust/qemu-api-macros/src/lib.rs @@ -0,0 +1,265 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use proc_macro::TokenStream; +use quote::quote; +use syn::{ + parse_macro_input, parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, Data, + DeriveInput, Error, Field, Fields, FieldsUnnamed, Ident, Meta, Path, Token, Variant, +}; +mod bits; +use bits::BitsConstInternal; + +#[cfg(test)] +mod tests; + +fn get_fields<'a>( + input: &'a DeriveInput, + msg: &str, +) -> Result<&'a Punctuated, Error> { + let Data::Struct(ref s) = &input.data else { + return Err(Error::new( + input.ident.span(), + format!("Struct required for {msg}"), + )); + }; + let Fields::Named(ref fs) = &s.fields else { + return Err(Error::new( + input.ident.span(), + format!("Named fields required for {msg}"), + )); + }; + Ok(&fs.named) +} + +fn get_unnamed_field<'a>(input: &'a DeriveInput, msg: &str) -> Result<&'a Field, Error> { + let Data::Struct(ref s) = &input.data else { + return Err(Error::new( + input.ident.span(), + format!("Struct required for {msg}"), + )); + }; + let Fields::Unnamed(FieldsUnnamed { ref unnamed, .. }) = &s.fields else { + return Err(Error::new( + s.fields.span(), + format!("Tuple struct required for {msg}"), + )); + }; + if unnamed.len() != 1 { + return Err(Error::new( + s.fields.span(), + format!("A single field is required for {msg}"), + )); + } + Ok(&unnamed[0]) +} + +fn is_c_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { + let expected = parse_quote! { #[repr(C)] }; + + if input.attrs.iter().any(|attr| attr == &expected) { + Ok(()) + } else { + Err(Error::new( + input.ident.span(), + format!("#[repr(C)] required for {msg}"), + )) + } +} + +fn is_transparent_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { + let expected = parse_quote! { #[repr(transparent)] }; + + if input.attrs.iter().any(|attr| attr == &expected) { + Ok(()) + } else { + Err(Error::new( + input.ident.span(), + format!("#[repr(transparent)] required for {msg}"), + )) + } +} + +fn derive_object_or_error(input: DeriveInput) -> Result { + is_c_repr(&input, "#[derive(Object)]")?; + + let name = &input.ident; + let parent = &get_fields(&input, "#[derive(Object)]")?[0].ident; + + Ok(quote! { + ::qemu_api::assert_field_type!(#name, #parent, + ::qemu_api::qom::ParentField<<#name as ::qemu_api::qom::ObjectImpl>::ParentType>); + + ::qemu_api::module_init! { + MODULE_INIT_QOM => unsafe { + ::qemu_api::bindings::type_register_static(&<#name as ::qemu_api::qom::ObjectImpl>::TYPE_INFO); + } + } + }) +} + +#[proc_macro_derive(Object)] +pub fn derive_object(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_object_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +fn derive_opaque_or_error(input: DeriveInput) -> Result { + is_transparent_repr(&input, "#[derive(Wrapper)]")?; + + let name = &input.ident; + let field = &get_unnamed_field(&input, "#[derive(Wrapper)]")?; + let typ = &field.ty; + + // TODO: how to add "::qemu_api"? For now, this is only used in the + // qemu_api crate so it's not a problem. + Ok(quote! { + unsafe impl crate::cell::Wrapper for #name { + type Wrapped = <#typ as crate::cell::Wrapper>::Wrapped; + } + impl #name { + pub unsafe fn from_raw<'a>(ptr: *mut ::Wrapped) -> &'a Self { + let ptr = ::std::ptr::NonNull::new(ptr).unwrap().cast::(); + unsafe { ptr.as_ref() } + } + + pub const fn as_mut_ptr(&self) -> *mut ::Wrapped { + self.0.as_mut_ptr() + } + + pub const fn as_ptr(&self) -> *const ::Wrapped { + self.0.as_ptr() + } + + pub const fn as_void_ptr(&self) -> *mut ::core::ffi::c_void { + self.0.as_void_ptr() + } + + pub const fn raw_get(slot: *mut Self) -> *mut ::Wrapped { + slot.cast() + } + } + }) +} + +#[proc_macro_derive(Wrapper)] +pub fn derive_opaque(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_opaque_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[allow(non_snake_case)] +fn get_repr_uN(input: &DeriveInput, msg: &str) -> Result { + let repr = input.attrs.iter().find(|attr| attr.path().is_ident("repr")); + if let Some(repr) = repr { + let nested = repr.parse_args_with(Punctuated::::parse_terminated)?; + for meta in nested { + match meta { + Meta::Path(path) if path.is_ident("u8") => return Ok(path), + Meta::Path(path) if path.is_ident("u16") => return Ok(path), + Meta::Path(path) if path.is_ident("u32") => return Ok(path), + Meta::Path(path) if path.is_ident("u64") => return Ok(path), + _ => {} + } + } + } + + Err(Error::new( + input.ident.span(), + format!("#[repr(u8/u16/u32/u64) required for {msg}"), + )) +} + +fn get_variants(input: &DeriveInput) -> Result<&Punctuated, Error> { + let Data::Enum(ref e) = &input.data else { + return Err(Error::new( + input.ident.span(), + "Cannot derive TryInto for union or struct.", + )); + }; + if let Some(v) = e.variants.iter().find(|v| v.fields != Fields::Unit) { + return Err(Error::new( + v.fields.span(), + "Cannot derive TryInto for enum with non-unit variants.", + )); + } + Ok(&e.variants) +} + +#[rustfmt::skip::macros(quote)] +fn derive_tryinto_body( + name: &Ident, + variants: &Punctuated, + repr: &Path, +) -> Result { + let discriminants: Vec<&Ident> = variants.iter().map(|f| &f.ident).collect(); + + Ok(quote! { + #(const #discriminants: #repr = #name::#discriminants as #repr;)* + match value { + #(#discriminants => core::result::Result::Ok(#name::#discriminants),)* + _ => core::result::Result::Err(value), + } + }) +} + +#[rustfmt::skip::macros(quote)] +fn derive_tryinto_or_error(input: DeriveInput) -> Result { + let repr = get_repr_uN(&input, "#[derive(TryInto)]")?; + let name = &input.ident; + let body = derive_tryinto_body(name, get_variants(&input)?, &repr)?; + let errmsg = format!("invalid value for {name}"); + + Ok(quote! { + impl #name { + #[allow(dead_code)] + pub const fn into_bits(self) -> #repr { + self as #repr + } + + #[allow(dead_code)] + pub const fn from_bits(value: #repr) -> Self { + match ({ + #body + }) { + Ok(x) => x, + Err(_) => panic!(#errmsg), + } + } + } + impl core::convert::TryFrom<#repr> for #name { + type Error = #repr; + + #[allow(ambiguous_associated_items)] + fn try_from(value: #repr) -> Result { + #body + } + } + }) +} + +#[proc_macro_derive(TryInto)] +pub fn derive_tryinto(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_tryinto_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[proc_macro] +pub fn bits_const_internal(ts: TokenStream) -> TokenStream { + let ts = proc_macro2::TokenStream::from(ts); + let mut it = ts.into_iter(); + + BitsConstInternal::parse(&mut it) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/rust/qemu-api-macros/src/tests.rs b/rust/qemu-api-macros/src/tests.rs new file mode 100644 index 00000000000..d6dcd62fcf6 --- /dev/null +++ b/rust/qemu-api-macros/src/tests.rs @@ -0,0 +1,137 @@ +// Copyright 2025, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use quote::quote; + +use super::*; + +macro_rules! derive_compile_fail { + ($derive_fn:ident, $input:expr, $error_msg:expr) => {{ + let input: proc_macro2::TokenStream = $input; + let error_msg: &str = $error_msg; + let derive_fn: fn(input: syn::DeriveInput) -> Result = + $derive_fn; + + let input: syn::DeriveInput = syn::parse2(input).unwrap(); + let result = derive_fn(input); + let err = result.unwrap_err().into_compile_error(); + assert_eq!( + err.to_string(), + quote! { ::core::compile_error! { #error_msg } }.to_string() + ); + }}; +} + +macro_rules! derive_compile { + ($derive_fn:ident, $input:expr, $($expected:tt)*) => {{ + let input: proc_macro2::TokenStream = $input; + let expected: proc_macro2::TokenStream = $($expected)*; + let derive_fn: fn(input: syn::DeriveInput) -> Result = + $derive_fn; + + let input: syn::DeriveInput = syn::parse2(input).unwrap(); + let result = derive_fn(input).unwrap(); + assert_eq!(result.to_string(), expected.to_string()); + }}; +} + +#[test] +fn test_derive_object() { + derive_compile_fail!( + derive_object_or_error, + quote! { + #[derive(Object)] + struct Foo { + _unused: [u8; 0], + } + }, + "#[repr(C)] required for #[derive(Object)]" + ); + derive_compile!( + derive_object_or_error, + quote! { + #[derive(Object)] + #[repr(C)] + struct Foo { + _unused: [u8; 0], + } + }, + quote! { + ::qemu_api::assert_field_type!( + Foo, + _unused, + ::qemu_api::qom::ParentField<::ParentType> + ); + ::qemu_api::module_init! { + MODULE_INIT_QOM => unsafe { + ::qemu_api::bindings::type_register_static(&::TYPE_INFO); + } + } + } + ); +} + +#[test] +fn test_derive_tryinto() { + derive_compile_fail!( + derive_tryinto_or_error, + quote! { + #[derive(TryInto)] + struct Foo { + _unused: [u8; 0], + } + }, + "#[repr(u8/u16/u32/u64) required for #[derive(TryInto)]" + ); + derive_compile!( + derive_tryinto_or_error, + quote! { + #[derive(TryInto)] + #[repr(u8)] + enum Foo { + First = 0, + Second, + } + }, + quote! { + impl Foo { + #[allow(dead_code)] + pub const fn into_bits(self) -> u8 { + self as u8 + } + + #[allow(dead_code)] + pub const fn from_bits(value: u8) -> Self { + match ({ + const First: u8 = Foo::First as u8; + const Second: u8 = Foo::Second as u8; + match value { + First => core::result::Result::Ok(Foo::First), + Second => core::result::Result::Ok(Foo::Second), + _ => core::result::Result::Err(value), + } + }) { + Ok(x) => x, + Err(_) => panic!("invalid value for Foo"), + } + } + } + + impl core::convert::TryFrom for Foo { + type Error = u8; + + #[allow(ambiguous_associated_items)] + fn try_from(value: u8) -> Result { + const First: u8 = Foo::First as u8; + const Second: u8 = Foo::Second as u8; + match value { + First => core::result::Result::Ok(Foo::First), + Second => core::result::Result::Ok(Foo::Second), + _ => core::result::Result::Err(value), + } + } + } + } + ); +} diff --git a/rust/qemu-api/.gitignore b/rust/qemu-api/.gitignore new file mode 100644 index 00000000000..df6c2163e03 --- /dev/null +++ b/rust/qemu-api/.gitignore @@ -0,0 +1,2 @@ +# Ignore generated bindings file overrides. +/src/bindings.inc.rs diff --git a/rust/qemu-api/Cargo.toml b/rust/qemu-api/Cargo.toml new file mode 100644 index 00000000000..db7000dee44 --- /dev/null +++ b/rust/qemu-api/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "qemu_api" +version = "0.1.0" +authors = ["Manos Pitsidianakis "] +description = "Rust bindings for QEMU" +readme = "README.md" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +qemu_api_macros = { path = "../qemu-api-macros" } +anyhow = "~1.0" +libc = "0.2.162" +foreign = "~0.3.1" + +[features] +default = ["debug_cell"] +allocator = [] +debug_cell = [] + +[lints] +workspace = true diff --git a/rust/qemu-api/README.md b/rust/qemu-api/README.md new file mode 100644 index 00000000000..ed1b7ab263d --- /dev/null +++ b/rust/qemu-api/README.md @@ -0,0 +1,19 @@ +# QEMU bindings and API wrappers + +This library exports helper Rust types, Rust macros and C FFI bindings for internal QEMU APIs. + +The C bindings can be generated with `bindgen`, using this build target: + +```console +$ make bindings.inc.rs +``` + +## Generate Rust documentation + +Common Cargo tasks can be performed from the QEMU build directory + +```console +$ make clippy +$ make rustfmt +$ make rustdoc +``` diff --git a/rust/qemu-api/build.rs b/rust/qemu-api/build.rs new file mode 100644 index 00000000000..29d09456257 --- /dev/null +++ b/rust/qemu-api/build.rs @@ -0,0 +1,43 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +#[cfg(unix)] +use std::os::unix::fs::symlink as symlink_file; +#[cfg(windows)] +use std::os::windows::fs::symlink_file; +use std::{env, fs::remove_file, io::Result, path::Path}; + +fn main() -> Result<()> { + let file = if let Ok(root) = env::var("MESON_BUILD_ROOT") { + format!("{root}/rust/qemu-api/bindings.inc.rs") + } else { + // Placing bindings.inc.rs in the source directory is supported + // but not documented or encouraged. + format!("{}/src/bindings.inc.rs", env!("CARGO_MANIFEST_DIR")) + }; + + let file = Path::new(&file); + if !Path::new(&file).exists() { + panic!(concat!( + "\n", + " No generated C bindings found! Maybe you wanted one of\n", + " `make clippy`, `make rustfmt`, `make rustdoc`?\n", + "\n", + " For other uses of `cargo`, start a subshell with\n", + " `pyvenv/bin/meson devenv`, or point MESON_BUILD_ROOT to\n", + " the top of the build tree." + )); + } + + let out_dir = env::var("OUT_DIR").unwrap(); + let dest_path = format!("{out_dir}/bindings.inc.rs"); + let dest_path = Path::new(&dest_path); + if dest_path.symlink_metadata().is_ok() { + remove_file(dest_path)?; + } + symlink_file(file, dest_path)?; + + println!("cargo:rerun-if-changed=build.rs"); + Ok(()) +} diff --git a/rust/qemu-api/meson.build b/rust/qemu-api/meson.build new file mode 100644 index 00000000000..a090297c458 --- /dev/null +++ b/rust/qemu-api/meson.build @@ -0,0 +1,114 @@ +_qemu_api_cfg = run_command(rustc_args, + '--config-headers', config_host_h, '--features', files('Cargo.toml'), + capture: true, check: true).stdout().strip().splitlines() + +# _qemu_api_cfg += ['--cfg', 'feature="allocator"'] +if get_option('debug_mutex') + _qemu_api_cfg += ['--cfg', 'feature="debug_cell"'] +endif + +c_enums = [ + 'DeviceCategory', + 'GpioPolarity', + 'MachineInitPhase', + 'MemoryDeviceInfoKind', + 'MigrationPolicy', + 'MigrationPriority', + 'QEMUChrEvent', + 'QEMUClockType', + 'ResetType', + 'device_endian', + 'module_init_type', +] +_qemu_api_bindgen_args = [] +foreach enum : c_enums + _qemu_api_bindgen_args += ['--rustified-enum', enum] +endforeach +c_bitfields = [ + 'ClockEvent', + 'VMStateFlags', +] +foreach enum : c_bitfields + _qemu_api_bindgen_args += ['--bitfield-enum', enum] +endforeach + +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_qemu_api_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _qemu_api_bindgen_args, + ) + +_qemu_api_rs = static_library( + 'qemu_api', + structured_sources( + [ + 'src/lib.rs', + 'src/assertions.rs', + 'src/bindings.rs', + 'src/bitops.rs', + 'src/callbacks.rs', + 'src/cell.rs', + 'src/chardev.rs', + 'src/errno.rs', + 'src/error.rs', + 'src/irq.rs', + 'src/log.rs', + 'src/memory.rs', + 'src/module.rs', + 'src/prelude.rs', + 'src/qdev.rs', + 'src/qom.rs', + 'src/sysbus.rs', + 'src/timer.rs', + 'src/uninit.rs', + 'src/vmstate.rs', + 'src/zeroable.rs', + ], + {'.' : _qemu_api_bindings_inc_rs}, + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: _qemu_api_cfg, + dependencies: [anyhow_rs, foreign_rs, libc_rs, qemu_api_macros, qemuutil_rs, + qom, hwcore, chardev, migration], +) + +rust.test('rust-qemu-api-tests', _qemu_api_rs, + suite: ['unit', 'rust']) + +qemu_api = declare_dependency(link_with: [_qemu_api_rs], + dependencies: [qemu_api_macros, qom, hwcore, chardev, migration]) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-qemu-api-doctests', + _qemu_api_rs, + protocol: 'rust', + dependencies: qemu_api, + suite: ['doc', 'rust']) + +test('rust-qemu-api-integration', + executable( + 'rust-qemu-api-integration', + files('tests/tests.rs', 'tests/vmstate_tests.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: ['--test'], + install: false, + dependencies: [qemu_api]), + args: [ + '--test', '--test-threads', '1', + '--format', 'pretty', + ], + protocol: 'rust', + suite: ['unit', 'rust']) diff --git a/rust/qemu-api/src/assertions.rs b/rust/qemu-api/src/assertions.rs new file mode 100644 index 00000000000..a2d38c877df --- /dev/null +++ b/rust/qemu-api/src/assertions.rs @@ -0,0 +1,152 @@ +// Copyright 2024, Red Hat Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +#![doc(hidden)] +//! This module provides macros to check the equality of types and +//! the type of `struct` fields. This can be useful to ensure that +//! types match the expectations of C code. +//! +//! Documentation is hidden because it only exposes macros, which +//! are exported directly from `qemu_api`. + +// Based on https://stackoverflow.com/questions/64251852/x/70978292#70978292 +// (stackoverflow answers are released under MIT license). + +#[doc(hidden)] +pub trait EqType { + type Itself; +} + +impl EqType for T { + type Itself = T; +} + +/// Assert that two types are the same. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_same_type; +/// # use std::ops::Deref; +/// assert_same_type!(u32, u32); +/// assert_same_type!( as Deref>::Target, u32); +/// ``` +/// +/// Different types will cause a compile failure +/// +/// ```compile_fail +/// # use qemu_api::assert_same_type; +/// assert_same_type!(&Box, &u32); +/// ``` +#[macro_export] +macro_rules! assert_same_type { + ($t1:ty, $t2:ty) => { + const _: () = { + #[allow(unused)] + fn assert_same_type(v: $t1) { + fn types_must_be_equal(_: T) + where + T: $crate::assertions::EqType, + { + } + types_must_be_equal::<_, $t2>(v); + } + }; + }; +} + +/// Assert that a field of a struct has the given type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_field_type; +/// pub struct A { +/// field1: u32, +/// } +/// +/// assert_field_type!(A, field1, u32); +/// ``` +/// +/// Different types will cause a compile failure +/// +/// ```compile_fail +/// # use qemu_api::assert_field_type; +/// # pub struct A { field1: u32 } +/// assert_field_type!(A, field1, i32); +/// ``` +#[macro_export] +macro_rules! assert_field_type { + (@internal $param_name:ident, $ti:ty, $t:ty, $($field:tt)*) => { + const _: () = { + #[allow(unused)] + fn assert_field_type($param_name: &$t) { + fn types_must_be_equal(_: &T) + where + T: $crate::assertions::EqType, + { + } + types_must_be_equal::<_, $ti>(&$($field)*); + } + }; + }; + + ($t:ty, $i:tt, $ti:ty) => { + $crate::assert_field_type!(@internal v, $ti, $t, v.$i); + }; + + ($t:ty, $i:tt, $ti:ty, num = $num:ident) => { + $crate::assert_field_type!(@internal v, $ti, $t, v.$i[0]); + }; +} + +/// Assert that an expression matches a pattern. This can also be +/// useful to compare enums that do not implement `Eq`. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::assert_match; +/// // JoinHandle does not implement `Eq`, therefore the result +/// // does not either. +/// let result: Result, u32> = Err(42); +/// assert_match!(result, Err(42)); +/// ``` +#[macro_export] +macro_rules! assert_match { + ($a:expr, $b:pat) => { + assert!( + match $a { + $b => true, + _ => false, + }, + "{} = {:?} does not match {}", + stringify!($a), + $a, + stringify!($b) + ); + }; +} + +/// Assert at compile time that an expression is true. This is similar +/// to `const { assert!(...); }` but it works outside functions, as well as +/// on versions of Rust before 1.79. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::static_assert; +/// static_assert!("abc".len() == 3); +/// ``` +/// +/// ```compile_fail +/// # use qemu_api::static_assert; +/// static_assert!("abc".len() == 2); // does not compile +/// ``` +#[macro_export] +macro_rules! static_assert { + ($x:expr) => { + const _: () = assert!($x); + }; +} diff --git a/rust/qemu-api/src/bindings.rs b/rust/qemu-api/src/bindings.rs new file mode 100644 index 00000000000..b8104dea8be --- /dev/null +++ b/rust/qemu-api/src/bindings.rs @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +//! `bindgen`-generated declarations. + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +// SAFETY: these are implemented in C; the bindings need to assert that the +// BQL is taken, either directly or via `BqlCell` and `BqlRefCell`. +// When bindings for character devices are introduced, this can be +// moved to the Opaque<> wrapper in src/chardev.rs. +unsafe impl Send for CharBackend {} +unsafe impl Sync for CharBackend {} + +// SAFETY: this is a pure data struct +unsafe impl Send for CoalescedMemoryRange {} +unsafe impl Sync for CoalescedMemoryRange {} + +// SAFETY: these are constants and vtables; the Send and Sync requirements +// are deferred to the unsafe callbacks that they contain +unsafe impl Send for MemoryRegionOps {} +unsafe impl Sync for MemoryRegionOps {} + +unsafe impl Send for Property {} +unsafe impl Sync for Property {} + +unsafe impl Send for TypeInfo {} +unsafe impl Sync for TypeInfo {} + +unsafe impl Send for VMStateDescription {} +unsafe impl Sync for VMStateDescription {} + +unsafe impl Send for VMStateField {} +unsafe impl Sync for VMStateField {} + +unsafe impl Send for VMStateInfo {} +unsafe impl Sync for VMStateInfo {} diff --git a/rust/qemu-api/src/bitops.rs b/rust/qemu-api/src/bitops.rs new file mode 100644 index 00000000000..b1e3a530ab5 --- /dev/null +++ b/rust/qemu-api/src/bitops.rs @@ -0,0 +1,119 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +//! This module provides bit operation extensions to integer types. +//! It is usually included via the `qemu_api` prelude. + +use std::ops::{ + Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, + Mul, MulAssign, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, +}; + +/// Trait for extensions to integer types +pub trait IntegerExt: + Add + AddAssign + + BitAnd + BitAndAssign + + BitOr + BitOrAssign + + BitXor + BitXorAssign + + Copy + + Div + DivAssign + + Eq + + Mul + MulAssign + + Not + Ord + PartialOrd + + Rem + RemAssign + + Shl + ShlAssign + + Shl + ShlAssign + // add more as needed + Shr + ShrAssign + + Shr + ShrAssign // add more as needed +{ + const BITS: u32; + const MAX: Self; + const MIN: Self; + const ONE: Self; + const ZERO: Self; + + #[inline] + #[must_use] + fn bit(start: u32) -> Self + { + debug_assert!(start < Self::BITS); + + Self::ONE << start + } + + #[inline] + #[must_use] + fn mask(start: u32, length: u32) -> Self + { + /* FIXME: Implement a more elegant check with error handling support? */ + debug_assert!(start < Self::BITS && length > 0 && length <= Self::BITS - start); + + (Self::MAX >> (Self::BITS - length)) << start + } + + #[inline] + #[must_use] + fn deposit(self, start: u32, length: u32, + fieldval: U) -> Self + where Self: From + { + debug_assert!(length <= U::BITS); + + let mask = Self::mask(start, length); + (self & !mask) | ((Self::from(fieldval) << start) & mask) + } + + #[inline] + #[must_use] + fn extract(self, start: u32, length: u32) -> Self + { + let mask = Self::mask(start, length); + (self & mask) >> start + } +} + +macro_rules! impl_num_ext { + ($type:ty) => { + impl IntegerExt for $type { + const BITS: u32 = <$type>::BITS; + const MAX: Self = <$type>::MAX; + const MIN: Self = <$type>::MIN; + const ONE: Self = 1; + const ZERO: Self = 0; + } + }; +} + +impl_num_ext!(u8); +impl_num_ext!(u16); +impl_num_ext!(u32); +impl_num_ext!(u64); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deposit() { + assert_eq!(15u32.deposit(8, 8, 1u32), 256 + 15); + assert_eq!(15u32.deposit(8, 1, 255u8), 256 + 15); + } + + #[test] + fn test_extract() { + assert_eq!(15u32.extract(2, 4), 3); + } + + #[test] + fn test_bit() { + assert_eq!(u8::bit(7), 128); + assert_eq!(u32::bit(16), 0x10000); + } + + #[test] + fn test_mask() { + assert_eq!(u8::mask(7, 1), 128); + assert_eq!(u32::mask(8, 8), 0xff00); + } +} diff --git a/rust/qemu-api/src/callbacks.rs b/rust/qemu-api/src/callbacks.rs new file mode 100644 index 00000000000..9642a16eb89 --- /dev/null +++ b/rust/qemu-api/src/callbacks.rs @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: MIT + +//! Utility functions to deal with callbacks from C to Rust. + +use std::{mem, ptr::NonNull}; + +/// Trait for functions (types implementing [`Fn`]) that can be used as +/// callbacks. These include both zero-capture closures and function pointers. +/// +/// In Rust, calling a function through the `Fn` trait normally requires a +/// `self` parameter, even though for zero-sized functions (including function +/// pointers) the type itself contains all necessary information to call the +/// function. This trait provides a `call` function that doesn't require `self`, +/// allowing zero-sized functions to be called using only their type. +/// +/// This enables zero-sized functions to be passed entirely through generic +/// parameters and resolved at compile-time. A typical use is a function +/// receiving an unused parameter of generic type `F` and calling it via +/// `F::call` or passing it to another function via `func::`. +/// +/// QEMU uses this trick to create wrappers to C callbacks. The wrappers +/// are needed to convert an opaque `*mut c_void` into a Rust reference, +/// but they only have a single opaque that they can use. The `FnCall` +/// trait makes it possible to use that opaque for `self` or any other +/// reference: +/// +/// ```ignore +/// // The compiler creates a new `rust_bh_cb` wrapper for each function +/// // passed to `qemu_bh_schedule_oneshot` below. +/// unsafe extern "C" fn rust_bh_cb FnCall<(&'a T,)>>( +/// opaque: *mut c_void, +/// ) { +/// // SAFETY: the opaque was passed as a reference to `T`. +/// F::call((unsafe { &*(opaque.cast::()) }, )) +/// } +/// +/// // The `_f` parameter is unused but it helps the compiler build the appropriate `F`. +/// // Using a reference allows usage in const context. +/// fn qemu_bh_schedule_oneshot FnCall<(&'a T,)>>(_f: &F, opaque: &T) { +/// let cb: unsafe extern "C" fn(*mut c_void) = rust_bh_cb::; +/// unsafe { +/// bindings::qemu_bh_schedule_oneshot(cb, opaque as *const T as *const c_void as *mut c_void) +/// } +/// } +/// ``` +/// +/// Each wrapper is a separate instance of `rust_bh_cb` and is therefore +/// compiled to a separate function ("monomorphization"). If you wanted +/// to pass `self` as the opaque value, the generic parameters would be +/// `rust_bh_cb::`. +/// +/// `Args` is a tuple type whose types are the arguments of the function, +/// while `R` is the returned type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::callbacks::FnCall; +/// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { +/// F::call((s,)) +/// } +/// +/// let s: String = call_it(&str::to_owned, "hello world"); +/// assert_eq!(s, "hello world"); +/// ``` +/// +/// Note that the compiler will produce a different version of `call_it` for +/// each function that is passed to it. Therefore the argument is not really +/// used, except to decide what is `F` and what `F::call` does. +/// +/// Attempting to pass a non-zero-sized closure causes a compile-time failure: +/// +/// ```compile_fail +/// # use qemu_api::callbacks::FnCall; +/// # fn call_it<'a, F: FnCall<(&'a str,), String>>(_f: &F, s: &'a str) -> String { +/// # F::call((s,)) +/// # } +/// let x: &'static str = "goodbye world"; +/// call_it(&move |_| String::from(x), "hello workd"); +/// ``` +/// +/// `()` can be used to indicate "no function": +/// +/// ``` +/// # use qemu_api::callbacks::FnCall; +/// fn optional FnCall<(&'a str,), String>>(_f: &F, s: &str) -> Option { +/// if F::IS_SOME { +/// Some(F::call((s,))) +/// } else { +/// None +/// } +/// } +/// +/// assert!(optional(&(), "hello world").is_none()); +/// ``` +/// +/// Invoking `F::call` will then be a run-time error. +/// +/// ```should_panic +/// # use qemu_api::callbacks::FnCall; +/// # fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { +/// # F::call((s,)) +/// # } +/// let s: String = call_it(&(), "hello world"); // panics +/// ``` +/// +/// # Safety +/// +/// Because `Self` is a zero-sized type, all instances of the type are +/// equivalent. However, in addition to this, `Self` must have no invariants +/// that could be violated by creating a reference to it. +/// +/// This is always true for zero-capture closures and function pointers, as long +/// as the code is able to name the function in the first place. +pub unsafe trait FnCall: 'static + Sync + Sized { + /// Referring to this internal constant asserts that the `Self` type is + /// zero-sized. Can be replaced by an inline const expression in + /// Rust 1.79.0+. + const ASSERT_ZERO_SIZED: () = { assert!(mem::size_of::() == 0) }; + + /// Referring to this constant asserts that the `Self` type is an actual + /// function type, which can be used to catch incorrect use of `()` + /// at compile time. + /// + /// # Examples + /// + /// ```compile_fail + /// # use qemu_api::callbacks::FnCall; + /// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { + /// let _: () = F::ASSERT_IS_SOME; + /// F::call((s,)) + /// } + /// + /// let s: String = call_it((), "hello world"); // does not compile + /// ``` + /// + /// Note that this can be more simply `const { assert!(F::IS_SOME) }` in + /// Rust 1.79.0 or newer. + const ASSERT_IS_SOME: () = { assert!(Self::IS_SOME) }; + + /// `true` if `Self` is an actual function type and not `()`. + /// + /// # Examples + /// + /// You can use `IS_SOME` to catch this at compile time: + /// + /// ```compile_fail + /// # use qemu_api::callbacks::FnCall; + /// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { + /// const { assert!(F::IS_SOME) } + /// F::call((s,)) + /// } + /// + /// let s: String = call_it((), "hello world"); // does not compile + /// ``` + const IS_SOME: bool; + + /// `false` if `Self` is an actual function type, `true` if it is `()`. + fn is_none() -> bool { + !Self::IS_SOME + } + + /// `true` if `Self` is an actual function type, `false` if it is `()`. + fn is_some() -> bool { + Self::IS_SOME + } + + /// Call the function with the arguments in args. + fn call(a: Args) -> R; +} + +/// `()` acts as a "null" callback. Using `()` and `function` is nicer +/// than `None` and `Some(function)`, because the compiler is unable to +/// infer the type of just `None`. Therefore, the trait itself acts as the +/// option type, with functions [`FnCall::is_some`] and [`FnCall::is_none`]. +unsafe impl FnCall for () { + const IS_SOME: bool = false; + + /// Call the function with the arguments in args. + fn call(_a: Args) -> R { + panic!("callback not specified") + } +} + +macro_rules! impl_call { + ($($args:ident,)* ) => ( + // SAFETY: because each function is treated as a separate type, + // accessing `FnCall` is only possible in code that would be + // allowed to call the function. + unsafe impl FnCall<($($args,)*), R> for F + where + F: 'static + Sync + Sized + Fn($($args, )*) -> R, + { + const IS_SOME: bool = true; + + #[inline(always)] + fn call(a: ($($args,)*)) -> R { + let _: () = Self::ASSERT_ZERO_SIZED; + + // SAFETY: the safety of this method is the condition for implementing + // `FnCall`. As to the `NonNull` idiom to create a zero-sized type, + // see https://github.com/rust-lang/libs-team/issues/292. + let f: &'static F = unsafe { &*NonNull::::dangling().as_ptr() }; + let ($($args,)*) = a; + f($($args,)*) + } + } + ) +} + +impl_call!(_1, _2, _3, _4, _5,); +impl_call!(_1, _2, _3, _4,); +impl_call!(_1, _2, _3,); +impl_call!(_1, _2,); +impl_call!(_1,); +impl_call!(); + +#[cfg(test)] +mod tests { + use super::*; + + // The `_f` parameter is unused but it helps the compiler infer `F`. + fn do_test_call<'a, F: FnCall<(&'a str,), String>>(_f: &F) -> String { + F::call(("hello world",)) + } + + #[test] + fn test_call() { + assert_eq!(do_test_call(&str::to_owned), "hello world") + } + + // The `_f` parameter is unused but it helps the compiler infer `F`. + fn do_test_is_some<'a, F: FnCall<(&'a str,), String>>(_f: &F) { + assert!(F::is_some()); + } + + #[test] + fn test_is_some() { + do_test_is_some(&str::to_owned); + } +} diff --git a/rust/qemu-api/src/cell.rs b/rust/qemu-api/src/cell.rs new file mode 100644 index 00000000000..27063b049d5 --- /dev/null +++ b/rust/qemu-api/src/cell.rs @@ -0,0 +1,1101 @@ +// SPDX-License-Identifier: MIT +// +// This file is based on library/core/src/cell.rs from +// Rust 1.82.0. +// +// Permission is hereby granted, free of charge, to any +// person obtaining a copy of this software and associated +// documentation files (the "Software"), to deal in the +// Software without restriction, including without +// limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice +// shall be included in all copies or substantial portions +// of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! QEMU-specific mutable containers +//! +//! Rust memory safety is based on this rule: Given an object `T`, it is only +//! possible to have one of the following: +//! +//! - Having several immutable references (`&T`) to the object (also known as +//! **aliasing**). +//! - Having one mutable reference (`&mut T`) to the object (also known as +//! **mutability**). +//! +//! This is enforced by the Rust compiler. However, there are situations where +//! this rule is not flexible enough. Sometimes it is required to have multiple +//! references to an object and yet mutate it. In particular, QEMU objects +//! usually have their pointer shared with the "outside world very early in +//! their lifetime", for example when they create their +//! [`MemoryRegion`s](crate::bindings::MemoryRegion). Therefore, individual +//! parts of a device must be made mutable in a controlled manner; this module +//! provides the tools to do so. +//! +//! ## Cell types +//! +//! [`BqlCell`] and [`BqlRefCell`] allow doing this via the Big QEMU Lock. +//! While they are essentially the same single-threaded primitives that are +//! available in `std::cell`, the BQL allows them to be used from a +//! multi-threaded context and to share references across threads, while +//! maintaining Rust's safety guarantees. For this reason, unlike +//! their `std::cell` counterparts, `BqlCell` and `BqlRefCell` implement the +//! `Sync` trait. +//! +//! BQL checks are performed in debug builds but can be optimized away in +//! release builds, providing runtime safety during development with no overhead +//! in production. +//! +//! The two provide different ways of handling interior mutability. +//! `BqlRefCell` is best suited for data that is primarily accessed by the +//! device's own methods, where multiple reads and writes can be grouped within +//! a single borrow and a mutable reference can be passed around. Instead, +//! [`BqlCell`] is a better choice when sharing small pieces of data with +//! external code (especially C code), because it provides simple get/set +//! operations that can be used one at a time. +//! +//! Warning: While `BqlCell` and `BqlRefCell` are similar to their `std::cell` +//! counterparts, they are not interchangeable. Using `std::cell` types in +//! QEMU device implementations is usually incorrect and can lead to +//! thread-safety issues. +//! +//! ### Example +//! +//! ``` +//! # use qemu_api::prelude::*; +//! # use qemu_api::{cell::BqlRefCell, irq::InterruptSource, irq::IRQState}; +//! # use qemu_api::{sysbus::SysBusDevice, qom::Owned, qom::ParentField}; +//! # const N_GPIOS: usize = 8; +//! # struct PL061Registers { /* ... */ } +//! # unsafe impl ObjectType for PL061State { +//! # type Class = ::Class; +//! # const TYPE_NAME: &'static std::ffi::CStr = c"pl061"; +//! # } +//! struct PL061State { +//! parent_obj: ParentField, +//! +//! // Configuration is read-only after initialization +//! pullups: u32, +//! pulldowns: u32, +//! +//! // Single values shared with C code use BqlCell, in this case via InterruptSource +//! out: [InterruptSource; N_GPIOS], +//! interrupt: InterruptSource, +//! +//! // Larger state accessed by device methods uses BqlRefCell or Mutex +//! registers: BqlRefCell, +//! } +//! ``` +//! +//! ### `BqlCell` +//! +//! [`BqlCell`] implements interior mutability by moving values in and out of +//! the cell. That is, an `&mut T` to the inner value can never be obtained as +//! long as the cell is shared. The value itself cannot be directly obtained +//! without copying it, cloning it, or replacing it with something else. This +//! type provides the following methods, all of which can be called only while +//! the BQL is held: +//! +//! - For types that implement [`Copy`], the [`get`](BqlCell::get) method +//! retrieves the current interior value by duplicating it. +//! - For types that implement [`Default`], the [`take`](BqlCell::take) method +//! replaces the current interior value with [`Default::default()`] and +//! returns the replaced value. +//! - All types have: +//! - [`replace`](BqlCell::replace): replaces the current interior value and +//! returns the replaced value. +//! - [`set`](BqlCell::set): this method replaces the interior value, +//! dropping the replaced value. +//! +//! ### `BqlRefCell` +//! +//! [`BqlRefCell`] uses Rust's lifetimes to implement "dynamic borrowing", a +//! process whereby one can claim temporary, exclusive, mutable access to the +//! inner value: +//! +//! ```ignore +//! fn clear_interrupts(&self, val: u32) { +//! // A mutable borrow gives read-write access to the registers +//! let mut regs = self.registers.borrow_mut(); +//! let old = regs.interrupt_status(); +//! regs.update_interrupt_status(old & !val); +//! } +//! ``` +//! +//! Borrows for `BqlRefCell`s are tracked at _runtime_, unlike Rust's native +//! reference types which are entirely tracked statically, at compile time. +//! Multiple immutable borrows are allowed via [`borrow`](BqlRefCell::borrow), +//! or a single mutable borrow via [`borrow_mut`](BqlRefCell::borrow_mut). The +//! thread will panic if these rules are violated or if the BQL is not held. +//! +//! ## Opaque wrappers +//! +//! The cell types from the previous section are useful at the boundaries +//! of code that requires interior mutability. When writing glue code that +//! interacts directly with C structs, however, it is useful to operate +//! at a lower level. +//! +//! C functions often violate Rust's fundamental assumptions about memory +//! safety by modifying memory even if it is shared. Furthermore, C structs +//! often start their life uninitialized and may be populated lazily. +//! +//! For this reason, this module provides the [`Opaque`] type to opt out +//! of Rust's usual guarantees about the wrapped type. Access to the wrapped +//! value is always through raw pointers, obtained via methods like +//! [`as_mut_ptr()`](Opaque::as_mut_ptr) and [`as_ptr()`](Opaque::as_ptr). These +//! pointers can then be passed to C functions or dereferenced; both actions +//! require `unsafe` blocks, making it clear where safety guarantees must be +//! manually verified. For example +//! +//! ```ignore +//! unsafe { +//! let state = Opaque::::uninit(); +//! qemu_struct_init(state.as_mut_ptr()); +//! } +//! ``` +//! +//! [`Opaque`] will usually be wrapped one level further, so that +//! bridge methods can be added to the wrapper: +//! +//! ```ignore +//! pub struct MyStruct(Opaque); +//! +//! impl MyStruct { +//! fn new() -> Pin> { +//! let result = Box::pin(unsafe { Opaque::uninit() }); +//! unsafe { qemu_struct_init(result.as_mut_ptr()) }; +//! result +//! } +//! } +//! ``` +//! +//! This pattern of wrapping bindgen-generated types in [`Opaque`] provides +//! several advantages: +//! +//! * The choice of traits to be implemented is not limited by the +//! bindgen-generated code. For example, [`Drop`] can be added without +//! disabling [`Copy`] on the underlying bindgen type +//! +//! * [`Send`] and [`Sync`] implementations can be controlled by the wrapper +//! type rather than being automatically derived from the C struct's layout +//! +//! * Methods can be implemented in a separate crate from the bindgen-generated +//! bindings +//! +//! * [`Debug`](std::fmt::Debug) and [`Display`](std::fmt::Display) +//! implementations can be customized to be more readable than the raw C +//! struct representation +//! +//! The [`Opaque`] type does not include BQL validation; it is possible to +//! assert in the code that the right lock is taken, to use it together +//! with a custom lock guard type, or to let C code take the lock, as +//! appropriate. It is also possible to use it with non-thread-safe +//! types, since by default (unlike [`BqlCell`] and [`BqlRefCell`] +//! it is neither `Sync` nor `Send`. +//! +//! While [`Opaque`] is necessary for C interop, it should be used sparingly +//! and only at FFI boundaries. For QEMU-specific types that need interior +//! mutability, prefer [`BqlCell`] or [`BqlRefCell`]. + +use std::{ + cell::{Cell, UnsafeCell}, + cmp::Ordering, + fmt, + marker::{PhantomData, PhantomPinned}, + mem::{self, MaybeUninit}, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use crate::bindings; + +/// An internal function that is used by doctests. +pub fn bql_start_test() { + // SAFETY: integration tests are run with --test-threads=1, while + // unit tests and doctests are not multithreaded and do not have + // any BQL-protected data. Just set bql_locked to true. + unsafe { + bindings::rust_bql_mock_lock(); + } +} + +pub fn bql_locked() -> bool { + // SAFETY: the function does nothing but return a thread-local bool + unsafe { bindings::bql_locked() } +} + +fn bql_block_unlock(increase: bool) { + // SAFETY: this only adjusts a counter + unsafe { + bindings::bql_block_unlock(increase); + } +} + +/// A mutable memory location that is protected by the Big QEMU Lock. +/// +/// # Memory layout +/// +/// `BqlCell` has the same in-memory representation as its inner type `T`. +#[repr(transparent)] +pub struct BqlCell { + value: UnsafeCell, +} + +// SAFETY: Same as for std::sync::Mutex. In the end this *is* a Mutex, +// except it is stored out-of-line +unsafe impl Send for BqlCell {} +unsafe impl Sync for BqlCell {} + +impl Clone for BqlCell { + #[inline] + fn clone(&self) -> BqlCell { + BqlCell::new(self.get()) + } +} + +impl Default for BqlCell { + /// Creates a `BqlCell`, with the `Default` value for T. + #[inline] + fn default() -> BqlCell { + BqlCell::new(Default::default()) + } +} + +impl PartialEq for BqlCell { + #[inline] + fn eq(&self, other: &BqlCell) -> bool { + self.get() == other.get() + } +} + +impl Eq for BqlCell {} + +impl PartialOrd for BqlCell { + #[inline] + fn partial_cmp(&self, other: &BqlCell) -> Option { + self.get().partial_cmp(&other.get()) + } +} + +impl Ord for BqlCell { + #[inline] + fn cmp(&self, other: &BqlCell) -> Ordering { + self.get().cmp(&other.get()) + } +} + +impl From for BqlCell { + /// Creates a new `BqlCell` containing the given value. + fn from(t: T) -> BqlCell { + BqlCell::new(t) + } +} + +impl fmt::Debug for BqlCell { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.get().fmt(f) + } +} + +impl fmt::Display for BqlCell { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.get().fmt(f) + } +} + +impl BqlCell { + /// Creates a new `BqlCell` containing the given value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// ``` + #[inline] + pub const fn new(value: T) -> BqlCell { + BqlCell { + value: UnsafeCell::new(value), + } + } + + /// Sets the contained value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// c.set(10); + /// ``` + #[inline] + pub fn set(&self, val: T) { + self.replace(val); + } + + /// Replaces the contained value with `val`, and returns the old contained + /// value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let cell = BqlCell::new(5); + /// assert_eq!(cell.get(), 5); + /// assert_eq!(cell.replace(10), 5); + /// assert_eq!(cell.get(), 10); + /// ``` + #[inline] + pub fn replace(&self, val: T) -> T { + assert!(bql_locked()); + // SAFETY: This can cause data races if called from multiple threads, + // but it won't happen as long as C code accesses the value + // under BQL protection only. + mem::replace(unsafe { &mut *self.value.get() }, val) + } + + /// Unwraps the value, consuming the cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// let five = c.into_inner(); + /// + /// assert_eq!(five, 5); + /// ``` + pub fn into_inner(self) -> T { + assert!(bql_locked()); + self.value.into_inner() + } +} + +impl BqlCell { + /// Returns a copy of the contained value. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// let five = c.get(); + /// ``` + #[inline] + pub fn get(&self) -> T { + assert!(bql_locked()); + // SAFETY: This can cause data races if called from multiple threads, + // but it won't happen as long as C code accesses the value + // under BQL protection only. + unsafe { *self.value.get() } + } +} + +impl BqlCell { + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + pub const fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +impl BqlCell { + /// Takes the value of the cell, leaving `Default::default()` in its place. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlCell::new(5); + /// let five = c.take(); + /// + /// assert_eq!(five, 5); + /// assert_eq!(c.into_inner(), 0); + /// ``` + pub fn take(&self) -> T { + self.replace(Default::default()) + } +} + +/// A mutable memory location with dynamically checked borrow rules, +/// protected by the Big QEMU Lock. +/// +/// See the [module-level documentation](self) for more. +/// +/// # Memory layout +/// +/// `BqlRefCell` starts with the same in-memory representation as its +/// inner type `T`. +#[repr(C)] +pub struct BqlRefCell { + // It is important that this is the first field (which is not the case + // for std::cell::BqlRefCell), so that we can use offset_of! on it. + // UnsafeCell and repr(C) both prevent usage of niches. + value: UnsafeCell, + borrow: Cell, + // Stores the location of the earliest currently active borrow. + // This gets updated whenever we go from having zero borrows + // to having a single borrow. When a borrow occurs, this gets included + // in the panic message + #[cfg(feature = "debug_cell")] + borrowed_at: Cell>>, +} + +// Positive values represent the number of `BqlRef` active. Negative values +// represent the number of `BqlRefMut` active. Right now QEMU's implementation +// does not allow to create `BqlRefMut`s that refer to distinct, nonoverlapping +// components of a `BqlRefCell` (e.g., different ranges of a slice). +// +// `BqlRef` and `BqlRefMut` are both two words in size, and so there will likely +// never be enough `BqlRef`s or `BqlRefMut`s in existence to overflow half of +// the `usize` range. Thus, a `BorrowFlag` will probably never overflow or +// underflow. However, this is not a guarantee, as a pathological program could +// repeatedly create and then mem::forget `BqlRef`s or `BqlRefMut`s. Thus, all +// code must explicitly check for overflow and underflow in order to avoid +// unsafety, or at least behave correctly in the event that overflow or +// underflow happens (e.g., see BorrowRef::new). +type BorrowFlag = isize; +const UNUSED: BorrowFlag = 0; + +#[inline(always)] +const fn is_writing(x: BorrowFlag) -> bool { + x < UNUSED +} + +#[inline(always)] +const fn is_reading(x: BorrowFlag) -> bool { + x > UNUSED +} + +impl BqlRefCell { + /// Creates a new `BqlRefCell` containing `value`. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// + /// let c = BqlRefCell::new(5); + /// ``` + #[inline] + pub const fn new(value: T) -> BqlRefCell { + BqlRefCell { + value: UnsafeCell::new(value), + borrow: Cell::new(UNUSED), + #[cfg(feature = "debug_cell")] + borrowed_at: Cell::new(None), + } + } +} + +// This ensures the panicking code is outlined from `borrow_mut` for +// `BqlRefCell`. +#[inline(never)] +#[cold] +#[cfg(feature = "debug_cell")] +fn panic_already_borrowed(source: &Cell>>) -> ! { + // If a borrow occurred, then we must already have an outstanding borrow, + // so `borrowed_at` will be `Some` + panic!("already borrowed at {:?}", source.take().unwrap()) +} + +#[inline(never)] +#[cold] +#[cfg(not(feature = "debug_cell"))] +fn panic_already_borrowed() -> ! { + panic!("already borrowed") +} + +impl BqlRefCell { + #[inline] + #[allow(clippy::unused_self)] + fn panic_already_borrowed(&self) -> ! { + #[cfg(feature = "debug_cell")] + { + panic_already_borrowed(&self.borrowed_at) + } + #[cfg(not(feature = "debug_cell"))] + { + panic_already_borrowed() + } + } + + /// Immutably borrows the wrapped value. + /// + /// The borrow lasts until the returned `BqlRef` exits scope. Multiple + /// immutable borrows can be taken out at the same time. + /// + /// # Panics + /// + /// Panics if the value is currently mutably borrowed. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// + /// let borrowed_five = c.borrow(); + /// let borrowed_five2 = c.borrow(); + /// ``` + /// + /// An example of panic: + /// + /// ```should_panic + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// + /// let m = c.borrow_mut(); + /// let b = c.borrow(); // this causes a panic + /// ``` + #[inline] + #[track_caller] + pub fn borrow(&self) -> BqlRef<'_, T> { + if let Some(b) = BorrowRef::new(&self.borrow) { + // `borrowed_at` is always the *first* active borrow + if b.borrow.get() == 1 { + #[cfg(feature = "debug_cell")] + self.borrowed_at.set(Some(std::panic::Location::caller())); + } + + bql_block_unlock(true); + + // SAFETY: `BorrowRef` ensures that there is only immutable access + // to the value while borrowed. + let value = unsafe { NonNull::new_unchecked(self.value.get()) }; + BqlRef { value, borrow: b } + } else { + self.panic_already_borrowed() + } + } + + /// Mutably borrows the wrapped value. + /// + /// The borrow lasts until the returned `BqlRefMut` or all `BqlRefMut`s + /// derived from it exit scope. The value cannot be borrowed while this + /// borrow is active. + /// + /// # Panics + /// + /// Panics if the value is currently borrowed. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new("hello".to_owned()); + /// + /// *c.borrow_mut() = "bonjour".to_owned(); + /// + /// assert_eq!(&*c.borrow(), "bonjour"); + /// ``` + /// + /// An example of panic: + /// + /// ```should_panic + /// use qemu_api::cell::BqlRefCell; + /// # qemu_api::cell::bql_start_test(); + /// + /// let c = BqlRefCell::new(5); + /// let m = c.borrow(); + /// + /// let b = c.borrow_mut(); // this causes a panic + /// ``` + #[inline] + #[track_caller] + pub fn borrow_mut(&self) -> BqlRefMut<'_, T> { + if let Some(b) = BorrowRefMut::new(&self.borrow) { + #[cfg(feature = "debug_cell")] + { + self.borrowed_at.set(Some(std::panic::Location::caller())); + } + + // SAFETY: this only adjusts a counter + bql_block_unlock(true); + + // SAFETY: `BorrowRefMut` guarantees unique access. + let value = unsafe { NonNull::new_unchecked(self.value.get()) }; + BqlRefMut { + value, + _borrow: b, + marker: PhantomData, + } + } else { + self.panic_already_borrowed() + } + } + + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use qemu_api::cell::BqlRefCell; + /// + /// let c = BqlRefCell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + pub const fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +// SAFETY: Same as for std::sync::Mutex. In the end this is a Mutex that is +// stored out-of-line. Even though BqlRefCell includes Cells, they are +// themselves protected by the Big QEMU Lock. Furtheremore, the Big QEMU +// Lock cannot be released while any borrows is active. +unsafe impl Send for BqlRefCell where T: Send {} +unsafe impl Sync for BqlRefCell {} + +impl Clone for BqlRefCell { + /// # Panics + /// + /// Panics if the value is currently mutably borrowed. + #[inline] + #[track_caller] + fn clone(&self) -> BqlRefCell { + BqlRefCell::new(self.borrow().clone()) + } + + /// # Panics + /// + /// Panics if `source` is currently mutably borrowed. + #[inline] + #[track_caller] + fn clone_from(&mut self, source: &Self) { + self.value.get_mut().clone_from(&source.borrow()) + } +} + +impl Default for BqlRefCell { + /// Creates a `BqlRefCell`, with the `Default` value for T. + #[inline] + fn default() -> BqlRefCell { + BqlRefCell::new(Default::default()) + } +} + +impl PartialEq for BqlRefCell { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn eq(&self, other: &BqlRefCell) -> bool { + *self.borrow() == *other.borrow() + } +} + +impl Eq for BqlRefCell {} + +impl PartialOrd for BqlRefCell { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn partial_cmp(&self, other: &BqlRefCell) -> Option { + self.borrow().partial_cmp(&*other.borrow()) + } +} + +impl Ord for BqlRefCell { + /// # Panics + /// + /// Panics if the value in either `BqlRefCell` is currently mutably + /// borrowed. + #[inline] + fn cmp(&self, other: &BqlRefCell) -> Ordering { + self.borrow().cmp(&*other.borrow()) + } +} + +impl From for BqlRefCell { + /// Creates a new `BqlRefCell` containing the given value. + fn from(t: T) -> BqlRefCell { + BqlRefCell::new(t) + } +} + +struct BorrowRef<'b> { + borrow: &'b Cell, +} + +impl<'b> BorrowRef<'b> { + #[inline] + fn new(borrow: &'b Cell) -> Option> { + let b = borrow.get().wrapping_add(1); + if !is_reading(b) { + // Incrementing borrow can result in a non-reading value (<= 0) in these cases: + // 1. It was < 0, i.e. there are writing borrows, so we can't allow a read + // borrow due to Rust's reference aliasing rules + // 2. It was isize::MAX (the max amount of reading borrows) and it overflowed + // into isize::MIN (the max amount of writing borrows) so we can't allow an + // additional read borrow because isize can't represent so many read borrows + // (this can only happen if you mem::forget more than a small constant amount + // of `BqlRef`s, which is not good practice) + None + } else { + // Incrementing borrow can result in a reading value (> 0) in these cases: + // 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read + // borrow + // 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize is + // large enough to represent having one more read borrow + borrow.set(b); + Some(BorrowRef { borrow }) + } + } +} + +impl Drop for BorrowRef<'_> { + #[inline] + fn drop(&mut self) { + let borrow = self.borrow.get(); + debug_assert!(is_reading(borrow)); + self.borrow.set(borrow - 1); + bql_block_unlock(false) + } +} + +impl Clone for BorrowRef<'_> { + #[inline] + fn clone(&self) -> Self { + BorrowRef::new(self.borrow).unwrap() + } +} + +/// Wraps a borrowed reference to a value in a `BqlRefCell` box. +/// A wrapper type for an immutably borrowed value from a `BqlRefCell`. +/// +/// See the [module-level documentation](self) for more. +pub struct BqlRef<'b, T: 'b> { + // NB: we use a pointer instead of `&'b T` to avoid `noalias` violations, because a + // `BqlRef` argument doesn't hold immutability for its whole scope, only until it drops. + // `NonNull` is also covariant over `T`, just like we would have with `&T`. + value: NonNull, + borrow: BorrowRef<'b>, +} + +impl Deref for BqlRef<'_, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_ref() } + } +} + +impl<'b, T> BqlRef<'b, T> { + /// Copies a `BqlRef`. + /// + /// The `BqlRefCell` is already immutably borrowed, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `BqlRef::clone(...)`. A `Clone` implementation or a method would + /// interfere with the widespread use of `r.borrow().clone()` to clone + /// the contents of a `BqlRefCell`. + #[must_use] + #[inline] + #[allow(clippy::should_implement_trait)] + pub fn clone(orig: &BqlRef<'b, T>) -> BqlRef<'b, T> { + BqlRef { + value: orig.value, + borrow: orig.borrow.clone(), + } + } +} + +impl fmt::Debug for BqlRef<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl fmt::Display for BqlRef<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +struct BorrowRefMut<'b> { + borrow: &'b Cell, +} + +impl<'b> BorrowRefMut<'b> { + #[inline] + fn new(borrow: &'b Cell) -> Option> { + // There must currently be no existing references when borrow_mut() is + // called, so we explicitly only allow going from UNUSED to UNUSED - 1. + match borrow.get() { + UNUSED => { + borrow.set(UNUSED - 1); + Some(BorrowRefMut { borrow }) + } + _ => None, + } + } +} + +impl Drop for BorrowRefMut<'_> { + #[inline] + fn drop(&mut self) { + let borrow = self.borrow.get(); + debug_assert!(is_writing(borrow)); + self.borrow.set(borrow + 1); + bql_block_unlock(false) + } +} + +/// A wrapper type for a mutably borrowed value from a `BqlRefCell`. +/// +/// See the [module-level documentation](self) for more. +pub struct BqlRefMut<'b, T: 'b> { + // NB: we use a pointer instead of `&'b mut T` to avoid `noalias` violations, because a + // `BqlRefMut` argument doesn't hold exclusivity for its whole scope, only until it drops. + value: NonNull, + _borrow: BorrowRefMut<'b>, + // `NonNull` is covariant over `T`, so we need to reintroduce invariance. + marker: PhantomData<&'b mut T>, +} + +impl Deref for BqlRefMut<'_, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_ref() } + } +} + +impl DerefMut for BqlRefMut<'_, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: the value is accessible as long as we hold our borrow. + unsafe { self.value.as_mut() } + } +} + +impl fmt::Debug for BqlRefMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +impl fmt::Display for BqlRefMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (**self).fmt(f) + } +} + +/// Stores an opaque value that is shared with C code. +/// +/// Often, C structs can changed when calling a C function even if they are +/// behind a shared Rust reference, or they can be initialized lazily and have +/// invalid bit patterns (e.g. `3` for a [`bool`]). This goes against Rust's +/// strict aliasing rules, which normally prevent mutation through shared +/// references. +/// +/// Wrapping the struct with `Opaque` ensures that the Rust compiler does not +/// assume the usual constraints that Rust structs require, and allows using +/// shared references on the Rust side. +/// +/// `Opaque` is `#[repr(transparent)]`, so that it matches the memory layout +/// of `T`. +#[repr(transparent)] +pub struct Opaque { + value: UnsafeCell>, + // PhantomPinned also allows multiple references to the `Opaque`, i.e. + // one `&mut Opaque` can coexist with a `&mut T` or any number of `&T`; + // see https://docs.rs/pinned-aliasable/latest/pinned_aliasable/. + _pin: PhantomPinned, +} + +impl Opaque { + /// Creates a new shared reference from a C pointer + /// + /// # Safety + /// + /// The pointer must be valid, though it need not point to a valid value. + pub unsafe fn from_raw<'a>(ptr: *mut T) -> &'a Self { + let ptr = NonNull::new(ptr).unwrap().cast::(); + // SAFETY: Self is a transparent wrapper over T + unsafe { ptr.as_ref() } + } + + /// Creates a new opaque object with uninitialized contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be initialized and pinned before + /// calling them. + #[allow(clippy::missing_const_for_fn)] + pub unsafe fn uninit() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::uninit()), + _pin: PhantomPinned, + } + } + + /// Creates a new opaque object with zeroed contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned (and possibly initialized) + /// before calling them. + #[allow(clippy::missing_const_for_fn)] + pub unsafe fn zeroed() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::zeroed()), + _pin: PhantomPinned, + } + } + + /// Returns a raw mutable pointer to the opaque data. + pub const fn as_mut_ptr(&self) -> *mut T { + UnsafeCell::get(&self.value).cast() + } + + /// Returns a raw pointer to the opaque data. + pub const fn as_ptr(&self) -> *const T { + self.as_mut_ptr().cast_const() + } + + /// Returns a raw pointer to the opaque data that can be passed to a + /// C function as `void *`. + pub const fn as_void_ptr(&self) -> *mut std::ffi::c_void { + UnsafeCell::get(&self.value).cast() + } + + /// Converts a raw pointer to the wrapped type. + pub const fn raw_get(slot: *mut Self) -> *mut T { + // Compare with Linux's raw_get method, which goes through an UnsafeCell + // because it takes a *const Self instead. + slot.cast() + } +} + +impl fmt::Debug for Opaque { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut name: String = "Opaque<".to_string(); + name += std::any::type_name::(); + name += ">"; + f.debug_tuple(&name).field(&self.as_ptr()).finish() + } +} + +impl Opaque { + /// Creates a new opaque object with default contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned before calling them. + pub unsafe fn new() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::new(T::default())), + _pin: PhantomPinned, + } + } +} + +/// Annotates [`Self`] as a transparent wrapper for another type. +/// +/// Usually defined via the [`qemu_api_macros::Wrapper`] derive macro. +/// +/// # Examples +/// +/// ``` +/// # use std::mem::ManuallyDrop; +/// # use qemu_api::cell::Wrapper; +/// #[repr(transparent)] +/// pub struct Example { +/// inner: ManuallyDrop, +/// } +/// +/// unsafe impl Wrapper for Example { +/// type Wrapped = String; +/// } +/// ``` +/// +/// # Safety +/// +/// `Self` must be a `#[repr(transparent)]` wrapper for the `Wrapped` type, +/// whether directly or indirectly. +/// +/// # Methods +/// +/// By convention, types that implement Wrapper also implement the following +/// methods: +/// +/// ```ignore +/// pub const unsafe fn from_raw<'a>(value: *mut Self::Wrapped) -> &'a Self; +/// pub const unsafe fn as_mut_ptr(&self) -> *mut Self::Wrapped; +/// pub const unsafe fn as_ptr(&self) -> *const Self::Wrapped; +/// pub const unsafe fn raw_get(slot: *mut Self) -> *const Self::Wrapped; +/// ``` +/// +/// They are not defined here to allow them to be `const`. +pub unsafe trait Wrapper { + type Wrapped; +} + +unsafe impl Wrapper for Opaque { + type Wrapped = T; +} diff --git a/rust/qemu-api/src/chardev.rs b/rust/qemu-api/src/chardev.rs new file mode 100644 index 00000000000..6e0590d758e --- /dev/null +++ b/rust/qemu-api/src/chardev.rs @@ -0,0 +1,260 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for character devices +//! +//! Character devices in QEMU can run under the big QEMU lock or in a separate +//! `GMainContext`. Here we only support the former, because the bindings +//! enforce that the BQL is taken whenever the functions in [`CharBackend`] are +//! called. + +use std::{ + ffi::{c_int, c_void, CStr}, + fmt::{self, Debug}, + io::{self, ErrorKind, Write}, + marker::PhantomPinned, + ptr::addr_of_mut, + slice, +}; + +use crate::{ + bindings, + callbacks::FnCall, + cell::{BqlRefMut, Opaque}, + prelude::*, +}; + +/// A safe wrapper around [`bindings::Chardev`]. +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct Chardev(Opaque); + +pub type ChardevClass = bindings::ChardevClass; +pub type Event = bindings::QEMUChrEvent; + +/// A safe wrapper around [`bindings::CharBackend`], denoting the character +/// back-end that is used for example by a device. Compared to the +/// underlying C struct it adds BQL protection, and is marked as pinned +/// because the QOM object ([`bindings::Chardev`]) contains a pointer to +/// the `CharBackend`. +pub struct CharBackend { + inner: BqlRefCell, + _pin: PhantomPinned, +} + +impl Write for BqlRefMut<'_, bindings::CharBackend> { + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + + fn write(&mut self, buf: &[u8]) -> io::Result { + let chr: &mut bindings::CharBackend = self; + + let len = buf.len().try_into().unwrap(); + let r = unsafe { bindings::qemu_chr_fe_write(addr_of_mut!(*chr), buf.as_ptr(), len) }; + errno::into_io_result(r).map(|cnt| cnt as usize) + } + + fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { + let chr: &mut bindings::CharBackend = self; + + let len = buf.len().try_into().unwrap(); + let r = unsafe { bindings::qemu_chr_fe_write_all(addr_of_mut!(*chr), buf.as_ptr(), len) }; + errno::into_io_result(r).and_then(|cnt| { + if cnt as usize == buf.len() { + Ok(()) + } else { + Err(ErrorKind::WriteZero.into()) + } + }) + } +} + +impl Debug for CharBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // SAFETY: accessed just to print the values + let chr = self.inner.as_ptr(); + Debug::fmt(unsafe { &*chr }, f) + } +} + +// FIXME: use something like PinnedDrop from the pinned_init crate +impl Drop for CharBackend { + fn drop(&mut self) { + self.disable_handlers(); + } +} + +impl CharBackend { + /// Enable the front-end's character device handlers, if there is an + /// associated `Chardev`. + pub fn enable_handlers< + 'chardev, + 'owner: 'chardev, + T, + CanReceiveFn: for<'a> FnCall<(&'a T,), u32>, + ReceiveFn: for<'a, 'b> FnCall<(&'a T, &'b [u8])>, + EventFn: for<'a> FnCall<(&'a T, Event)>, + >( + // When "self" is dropped, the handlers are automatically disabled. + // However, this is not necessarily true if the owner is dropped. + // So require the owner to outlive the character device. + &'chardev self, + owner: &'owner T, + _can_receive: CanReceiveFn, + _receive: ReceiveFn, + _event: EventFn, + ) { + unsafe extern "C" fn rust_can_receive_cb FnCall<(&'a T,), u32>>( + opaque: *mut c_void, + ) -> c_int { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::()) }; + let r = F::call((owner,)); + r.try_into().unwrap() + } + + unsafe extern "C" fn rust_receive_cb FnCall<(&'a T, &'b [u8])>>( + opaque: *mut c_void, + buf: *const u8, + size: c_int, + ) { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::()) }; + let buf = unsafe { slice::from_raw_parts(buf, size.try_into().unwrap()) }; + F::call((owner, buf)) + } + + unsafe extern "C" fn rust_event_cb FnCall<(&'a T, Event)>>( + opaque: *mut c_void, + event: Event, + ) { + // SAFETY: the values are safe according to the contract of + // enable_handlers() and qemu_chr_fe_set_handlers() + let owner: &T = unsafe { &*(opaque.cast::()) }; + F::call((owner, event)) + } + + let _: () = CanReceiveFn::ASSERT_IS_SOME; + let receive_cb: Option = + if ReceiveFn::is_some() { + Some(rust_receive_cb::) + } else { + None + }; + let event_cb: Option = if EventFn::is_some() { + Some(rust_event_cb::) + } else { + None + }; + + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { + bindings::qemu_chr_fe_set_handlers( + addr_of_mut!(*chr), + Some(rust_can_receive_cb::), + receive_cb, + event_cb, + None, + (owner as *const T).cast_mut().cast::(), + core::ptr::null_mut(), + true, + ); + } + } + + /// Disable the front-end's character device handlers. + pub fn disable_handlers(&self) { + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { + bindings::qemu_chr_fe_set_handlers( + addr_of_mut!(*chr), + None, + None, + None, + None, + core::ptr::null_mut(), + core::ptr::null_mut(), + true, + ); + } + } + + /// Notify that the frontend is ready to receive data. + pub fn accept_input(&self) { + let mut chr = self.inner.borrow_mut(); + // SAFETY: the borrow promises that the BQL is taken + unsafe { bindings::qemu_chr_fe_accept_input(addr_of_mut!(*chr)) } + } + + /// Temporarily borrow the character device, allowing it to be used + /// as an implementor of `Write`. Note that it is not valid to drop + /// the big QEMU lock while the character device is borrowed, as + /// that might cause C code to write to the character device. + pub fn borrow_mut(&self) -> impl Write + '_ { + self.inner.borrow_mut() + } + + /// Send a continuous stream of zero bits on the line if `enabled` is + /// true, or a short stream if `enabled` is false. + pub fn send_break(&self, long: bool) -> io::Result<()> { + let mut chr = self.inner.borrow_mut(); + let mut duration: c_int = long.into(); + // SAFETY: the borrow promises that the BQL is taken + let r = unsafe { + bindings::qemu_chr_fe_ioctl( + addr_of_mut!(*chr), + bindings::CHR_IOCTL_SERIAL_SET_BREAK as i32, + addr_of_mut!(duration).cast::(), + ) + }; + + errno::into_io_result(r).map(|_| ()) + } + + /// Write data to a character backend from the front end. This function + /// will send data from the front end to the back end. Unlike + /// `write`, this function will block if the back end cannot + /// consume all of the data attempted to be written. + /// + /// Returns the number of bytes consumed (0 if no associated Chardev) or an + /// error. + pub fn write(&self, buf: &[u8]) -> io::Result { + let len = buf.len().try_into().unwrap(); + // SAFETY: qemu_chr_fe_write is thread-safe + let r = unsafe { bindings::qemu_chr_fe_write(self.inner.as_ptr(), buf.as_ptr(), len) }; + errno::into_io_result(r).map(|cnt| cnt as usize) + } + + /// Write data to a character backend from the front end. This function + /// will send data from the front end to the back end. Unlike + /// `write`, this function will block if the back end cannot + /// consume all of the data attempted to be written. + /// + /// Returns the number of bytes consumed (0 if no associated Chardev) or an + /// error. + pub fn write_all(&self, buf: &[u8]) -> io::Result<()> { + let len = buf.len().try_into().unwrap(); + // SAFETY: qemu_chr_fe_write_all is thread-safe + let r = unsafe { bindings::qemu_chr_fe_write_all(self.inner.as_ptr(), buf.as_ptr(), len) }; + errno::into_io_result(r).and_then(|cnt| { + if cnt as usize == buf.len() { + Ok(()) + } else { + Err(ErrorKind::WriteZero.into()) + } + }) + } +} + +unsafe impl ObjectType for Chardev { + type Class = ChardevClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CHARDEV) }; +} +qom_isa!(Chardev: Object); diff --git a/rust/qemu-api/src/errno.rs b/rust/qemu-api/src/errno.rs new file mode 100644 index 00000000000..18d101448b9 --- /dev/null +++ b/rust/qemu-api/src/errno.rs @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Utility functions to convert `errno` to and from +//! [`io::Error`]/[`io::Result`] +//! +//! QEMU C functions often have a "positive success/negative `errno`" calling +//! convention. This module provides functions to portably convert an integer +//! into an [`io::Result`] and back. + +use std::{convert::TryFrom, io, io::ErrorKind}; + +/// An `errno` value that can be converted into an [`io::Error`] +pub struct Errno(pub u16); + +// On Unix, from_raw_os_error takes an errno value and OS errors +// are printed using strerror. On Windows however it takes a +// GetLastError() value; therefore we need to convert errno values +// into io::Error by hand. This is the same mapping that the +// standard library uses to retrieve the kind of OS errors +// (`std::sys::pal::unix::decode_error_kind`). +impl From for ErrorKind { + fn from(value: Errno) -> ErrorKind { + use ErrorKind::*; + let Errno(errno) = value; + match i32::from(errno) { + libc::EPERM | libc::EACCES => PermissionDenied, + libc::ENOENT => NotFound, + libc::EINTR => Interrupted, + x if x == libc::EAGAIN || x == libc::EWOULDBLOCK => WouldBlock, + libc::ENOMEM => OutOfMemory, + libc::EEXIST => AlreadyExists, + libc::EINVAL => InvalidInput, + libc::EPIPE => BrokenPipe, + libc::EADDRINUSE => AddrInUse, + libc::EADDRNOTAVAIL => AddrNotAvailable, + libc::ECONNABORTED => ConnectionAborted, + libc::ECONNREFUSED => ConnectionRefused, + libc::ECONNRESET => ConnectionReset, + libc::ENOTCONN => NotConnected, + libc::ENOTSUP => Unsupported, + libc::ETIMEDOUT => TimedOut, + _ => Other, + } + } +} + +// This is used on Windows for all io::Errors, but also on Unix if the +// io::Error does not have a raw OS error. This is the reversed +// mapping of the above; EIO is returned for unknown ErrorKinds. +impl From for Errno { + fn from(value: io::ErrorKind) -> Errno { + use ErrorKind::*; + let errno = match value { + // can be both EPERM or EACCES :( pick one + PermissionDenied => libc::EPERM, + NotFound => libc::ENOENT, + Interrupted => libc::EINTR, + WouldBlock => libc::EAGAIN, + OutOfMemory => libc::ENOMEM, + AlreadyExists => libc::EEXIST, + InvalidInput => libc::EINVAL, + BrokenPipe => libc::EPIPE, + AddrInUse => libc::EADDRINUSE, + AddrNotAvailable => libc::EADDRNOTAVAIL, + ConnectionAborted => libc::ECONNABORTED, + ConnectionRefused => libc::ECONNREFUSED, + ConnectionReset => libc::ECONNRESET, + NotConnected => libc::ENOTCONN, + Unsupported => libc::ENOTSUP, + TimedOut => libc::ETIMEDOUT, + _ => libc::EIO, + }; + Errno(errno as u16) + } +} + +impl From for io::Error { + #[cfg(unix)] + fn from(value: Errno) -> io::Error { + let Errno(errno) = value; + io::Error::from_raw_os_error(errno.into()) + } + + #[cfg(windows)] + fn from(value: Errno) -> io::Error { + let error_kind: ErrorKind = value.into(); + error_kind.into() + } +} + +impl From for Errno { + fn from(value: io::Error) -> Errno { + if cfg!(unix) { + if let Some(errno) = value.raw_os_error() { + return Errno(u16::try_from(errno).unwrap()); + } + } + value.kind().into() + } +} + +/// Internal traits; used to enable [`into_io_result`] and [`into_neg_errno`] +/// for the "right" set of types. +mod traits { + use super::Errno; + + /// A signed type that can be converted into an + /// [`io::Result`](std::io::Result) + pub trait GetErrno { + /// Unsigned variant of `Self`, used as the type for the `Ok` case. + type Out; + + /// Return `Ok(self)` if positive, `Err(Errno(-self))` if negative + fn into_errno_result(self) -> Result; + } + + /// A type that can be taken out of an [`io::Result`](std::io::Result) and + /// converted into "positive success/negative `errno`" convention. + pub trait MergeErrno { + /// Signed variant of `Self`, used as the return type of + /// [`into_neg_errno`](super::into_neg_errno). + type Out: From + std::ops::Neg; + + /// Return `self`, asserting that it is in range + fn map_ok(self) -> Self::Out; + } + + macro_rules! get_errno { + ($t:ty, $out:ty) => { + impl GetErrno for $t { + type Out = $out; + fn into_errno_result(self) -> Result { + match self { + 0.. => Ok(self as $out), + -65535..=-1 => Err(Errno(-self as u16)), + _ => panic!("{self} is not a negative errno"), + } + } + } + }; + } + + get_errno!(i32, u32); + get_errno!(i64, u64); + get_errno!(isize, usize); + + macro_rules! merge_errno { + ($t:ty, $out:ty) => { + impl MergeErrno for $t { + type Out = $out; + fn map_ok(self) -> Self::Out { + self.try_into().unwrap() + } + } + }; + } + + merge_errno!(u8, i32); + merge_errno!(u16, i32); + merge_errno!(u32, i32); + merge_errno!(u64, i64); + + impl MergeErrno for () { + type Out = i32; + fn map_ok(self) -> i32 { + 0 + } + } +} + +use traits::{GetErrno, MergeErrno}; + +/// Convert an integer value into a [`io::Result`]. +/// +/// Positive values are turned into an `Ok` result; negative values +/// are interpreted as negated `errno` and turned into an `Err`. +/// +/// ``` +/// # use qemu_api::errno::into_io_result; +/// # use std::io::ErrorKind; +/// let ok = into_io_result(1i32).unwrap(); +/// assert_eq!(ok, 1u32); +/// +/// let err = into_io_result(-1i32).unwrap_err(); // -EPERM +/// assert_eq!(err.kind(), ErrorKind::PermissionDenied); +/// ``` +/// +/// # Panics +/// +/// Since the result is an unsigned integer, negative values must +/// be close to 0; values that are too far away are considered +/// likely overflows and will panic: +/// +/// ```should_panic +/// # use qemu_api::errno::into_io_result; +/// # #[allow(dead_code)] +/// let err = into_io_result(-0x1234_5678i32); // panic +/// ``` +pub fn into_io_result(value: T) -> io::Result { + value.into_errno_result().map_err(Into::into) +} + +/// Convert a [`Result`] into an integer value, using negative `errno` +/// values to report errors. +/// +/// ``` +/// # use qemu_api::errno::into_neg_errno; +/// # use std::io::{self, ErrorKind}; +/// let ok: io::Result<()> = Ok(()); +/// assert_eq!(into_neg_errno(ok), 0); +/// +/// let err: io::Result<()> = Err(ErrorKind::InvalidInput.into()); +/// assert_eq!(into_neg_errno(err), -22); // -EINVAL +/// ``` +/// +/// Since this module also provides the ability to convert [`io::Error`] +/// to an `errno` value, [`io::Result`] is the most commonly used type +/// for the argument of this function: +/// +/// # Panics +/// +/// Since the result is a signed integer, integer `Ok` values must remain +/// positive: +/// +/// ```should_panic +/// # use qemu_api::errno::into_neg_errno; +/// # use std::io; +/// let err: io::Result = Ok(0x8899_AABB); +/// into_neg_errno(err) // panic +/// # ; +/// ``` +pub fn into_neg_errno>(value: Result) -> T::Out { + match value { + Ok(x) => x.map_ok(), + Err(err) => -T::Out::from(err.into().0), + } +} + +#[cfg(test)] +mod tests { + use std::io::ErrorKind; + + use super::*; + use crate::assert_match; + + #[test] + pub fn test_from_u8() { + let ok: io::Result<_> = Ok(42u8); + assert_eq!(into_neg_errno(ok), 42); + + let err: io::Result = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_from_u16() { + let ok: io::Result<_> = Ok(1234u16); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_i32() { + assert_match!(into_io_result(1234i32), Ok(1234)); + + let err = into_io_result(-1i32).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(1)); + assert_match!(err.kind(), ErrorKind::PermissionDenied); + } + + #[test] + pub fn test_from_u32() { + let ok: io::Result<_> = Ok(1234u32); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result = Err(io::ErrorKind::PermissionDenied.into()); + assert_eq!(into_neg_errno(err), -1); + + if cfg!(unix) { + let os_err: io::Result = Err(io::Error::from_raw_os_error(10)); + assert_eq!(into_neg_errno(os_err), -10); + } + } + + #[test] + pub fn test_i64() { + assert_match!(into_io_result(1234i64), Ok(1234)); + + let err = into_io_result(-22i64).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(22)); + assert_match!(err.kind(), ErrorKind::InvalidInput); + } + + #[test] + pub fn test_from_u64() { + let ok: io::Result<_> = Ok(1234u64); + assert_eq!(into_neg_errno(ok), 1234); + + let err: io::Result = Err(io::ErrorKind::InvalidInput.into()); + assert_eq!(into_neg_errno(err), -22); + + if cfg!(unix) { + let os_err: io::Result = Err(io::Error::from_raw_os_error(6)); + assert_eq!(into_neg_errno(os_err), -6); + } + } + + #[test] + pub fn test_isize() { + assert_match!(into_io_result(1234isize), Ok(1234)); + + let err = into_io_result(-4isize).unwrap_err(); + #[cfg(unix)] + assert_match!(err.raw_os_error(), Some(4)); + assert_match!(err.kind(), ErrorKind::Interrupted); + } + + #[test] + pub fn test_from_unit() { + let ok: io::Result<_> = Ok(()); + assert_eq!(into_neg_errno(ok), 0); + + let err: io::Result<()> = Err(io::ErrorKind::OutOfMemory.into()); + assert_eq!(into_neg_errno(err), -12); + + if cfg!(unix) { + let os_err: io::Result<()> = Err(io::Error::from_raw_os_error(2)); + assert_eq!(into_neg_errno(os_err), -2); + } + } +} diff --git a/rust/qemu-api/src/error.rs b/rust/qemu-api/src/error.rs new file mode 100644 index 00000000000..e114fc4178b --- /dev/null +++ b/rust/qemu-api/src/error.rs @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Error propagation for QEMU Rust code +//! +//! This module contains [`Error`], the bridge between Rust errors and +//! [`Result`](std::result::Result)s and QEMU's C [`Error`](bindings::Error) +//! struct. +//! +//! For FFI code, [`Error`] provides functions to simplify conversion between +//! the Rust ([`Result<>`](std::result::Result)) and C (`Error**`) conventions: +//! +//! * [`ok_or_propagate`](crate::Error::ok_or_propagate), +//! [`bool_or_propagate`](crate::Error::bool_or_propagate), +//! [`ptr_or_propagate`](crate::Error::ptr_or_propagate) can be used to build +//! a C return value while also propagating an error condition +//! +//! * [`err_or_else`](crate::Error::err_or_else) and +//! [`err_or_unit`](crate::Error::err_or_unit) can be used to build a `Result` +//! +//! This module is most commonly used at the boundary between C and Rust code; +//! other code will usually access it through the +//! [`qemu_api::Result`](crate::Result) type alias, and will use the +//! [`std::error::Error`] interface to let C errors participate in Rust's error +//! handling functionality. +//! +//! Rust code can also create use this module to create an error object that +//! will be passed up to C code, though in most cases this will be done +//! transparently through the `?` operator. Errors can be constructed from a +//! simple error string, from an [`anyhow::Error`] to pass any other Rust error +//! type up to C code, or from a combination of the two. +//! +//! The third case, corresponding to [`Error::with_error`], is the only one that +//! requires mentioning [`qemu_api::Error`](crate::Error) explicitly. Similar +//! to how QEMU's C code handles errno values, the string and the +//! `anyhow::Error` object will be concatenated with `:` as the separator. + +use std::{ + borrow::Cow, + ffi::{c_char, c_int, c_void, CStr}, + fmt::{self, Display}, + panic, ptr, +}; + +use foreign::{prelude::*, OwnedPointer}; + +use crate::bindings; + +pub type Result = std::result::Result; + +#[derive(Debug)] +pub struct Error { + msg: Option>, + /// Appends the print string of the error to the msg if not None + cause: Option, + file: &'static str, + line: u32, +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + self.cause.as_ref().map(AsRef::as_ref) + } + + #[allow(deprecated)] + fn description(&self) -> &str { + self.msg + .as_deref() + .or_else(|| self.cause.as_deref().map(std::error::Error::description)) + .expect("no message nor cause?") + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut prefix = ""; + if let Some(ref msg) = self.msg { + write!(f, "{msg}")?; + prefix = ": "; + } + if let Some(ref cause) = self.cause { + write!(f, "{prefix}{cause}")?; + } else if prefix.is_empty() { + panic!("no message nor cause?"); + } + Ok(()) + } +} + +impl From for Error { + #[track_caller] + fn from(msg: String) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(Cow::Owned(msg)), + cause: None, + file: location.file(), + line: location.line(), + } + } +} + +impl From<&'static str> for Error { + #[track_caller] + fn from(msg: &'static str) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(Cow::Borrowed(msg)), + cause: None, + file: location.file(), + line: location.line(), + } + } +} + +impl From for Error { + #[track_caller] + fn from(error: anyhow::Error) -> Self { + let location = panic::Location::caller(); + Error { + msg: None, + cause: Some(error), + file: location.file(), + line: location.line(), + } + } +} + +impl Error { + /// Create a new error, prepending `msg` to the + /// description of `cause` + #[track_caller] + pub fn with_error(msg: impl Into>, cause: impl Into) -> Self { + let location = panic::Location::caller(); + Error { + msg: Some(msg.into()), + cause: Some(cause.into()), + file: location.file(), + line: location.line(), + } + } + + /// Consume a result, returning `false` if it is an error and + /// `true` if it is successful. The error is propagated into + /// `errp` like the C API `error_propagate` would do. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + pub unsafe fn bool_or_propagate(result: Result<()>, errp: *mut *mut bindings::Error) -> bool { + // SAFETY: caller guarantees errp is valid + unsafe { Self::ok_or_propagate(result, errp) }.is_some() + } + + /// Consume a result, returning a `NULL` pointer if it is an error and + /// a C representation of the contents if it is successful. This is + /// similar to the C API `error_propagate`, but it panics if `*errp` + /// is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + /// + /// See [`propagate`](Error::propagate) for more information. + #[must_use] + pub unsafe fn ptr_or_propagate( + result: Result, + errp: *mut *mut bindings::Error, + ) -> *mut T::Foreign { + // SAFETY: caller guarantees errp is valid + unsafe { Self::ok_or_propagate(result, errp) }.clone_to_foreign_ptr() + } + + /// Consume a result in the same way as `self.ok()`, but also propagate + /// a possible error into `errp`. This is similar to the C API + /// `error_propagate`, but it panics if `*errp` is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; + /// typically it is received from C code and need not be + /// checked further at the Rust↔C boundary. + /// + /// See [`propagate`](Error::propagate) for more information. + pub unsafe fn ok_or_propagate( + result: Result, + errp: *mut *mut bindings::Error, + ) -> Option { + result.map_err(|err| unsafe { err.propagate(errp) }).ok() + } + + /// Equivalent of the C function `error_propagate`. Fill `*errp` + /// with the information container in `self` if `errp` is not NULL; + /// then consume it. + /// + /// This is similar to the C API `error_propagate`, but it panics if + /// `*errp` is not `NULL`. + /// + /// # Safety + /// + /// `errp` must be a valid argument to `error_propagate`; it can be + /// `NULL` or it can point to any of: + /// * `error_abort` + /// * `error_fatal` + /// * a local variable of (C) type `Error *` + /// + /// Typically `errp` is received from C code and need not be + /// checked further at the Rust↔C boundary. + pub unsafe fn propagate(self, errp: *mut *mut bindings::Error) { + if errp.is_null() { + return; + } + + // SAFETY: caller guarantees that errp and *errp are valid + unsafe { + assert_eq!(*errp, ptr::null_mut()); + bindings::error_propagate(errp, self.clone_to_foreign_ptr()); + } + } + + /// Convert a C `Error*` into a Rust `Result`, using + /// `Ok(())` if `c_error` is NULL. Free the `Error*`. + /// + /// # Safety + /// + /// `c_error` must be `NULL` or valid; typically it was initialized + /// with `ptr::null_mut()` and passed by reference to a C function. + pub unsafe fn err_or_unit(c_error: *mut bindings::Error) -> Result<()> { + // SAFETY: caller guarantees c_error is valid + unsafe { Self::err_or_else(c_error, || ()) } + } + + /// Convert a C `Error*` into a Rust `Result`, calling `f()` to + /// obtain an `Ok` value if `c_error` is NULL. Free the `Error*`. + /// + /// # Safety + /// + /// `c_error` must be `NULL` or point to a valid C [`struct + /// Error`](bindings::Error); typically it was initialized with + /// `ptr::null_mut()` and passed by reference to a C function. + pub unsafe fn err_or_else T>( + c_error: *mut bindings::Error, + f: F, + ) -> Result { + // SAFETY: caller guarantees c_error is valid + let err = unsafe { Option::::from_foreign(c_error) }; + match err { + None => Ok(f()), + Some(err) => Err(err), + } + } +} + +impl FreeForeign for Error { + type Foreign = bindings::Error; + + unsafe fn free_foreign(p: *mut bindings::Error) { + // SAFETY: caller guarantees p is valid + unsafe { + bindings::error_free(p); + } + } +} + +impl CloneToForeign for Error { + fn clone_to_foreign(&self) -> OwnedPointer { + // SAFETY: all arguments are controlled by this function + unsafe { + let err: *mut c_void = libc::malloc(std::mem::size_of::()); + let err: &mut bindings::Error = &mut *err.cast(); + *err = bindings::Error { + msg: format!("{self}").clone_to_foreign_ptr(), + err_class: bindings::ERROR_CLASS_GENERIC_ERROR, + src_len: self.file.len() as c_int, + src: self.file.as_ptr().cast::(), + line: self.line as c_int, + func: ptr::null_mut(), + hint: ptr::null_mut(), + }; + OwnedPointer::new(err) + } + } +} + +impl FromForeign for Error { + unsafe fn cloned_from_foreign(c_error: *const bindings::Error) -> Self { + // SAFETY: caller guarantees c_error is valid + unsafe { + let error = &*c_error; + let file = if error.src_len < 0 { + // NUL-terminated + CStr::from_ptr(error.src).to_str() + } else { + // Can become str::from_utf8 with Rust 1.87.0 + std::str::from_utf8(std::slice::from_raw_parts( + &*error.src.cast::(), + error.src_len as usize, + )) + }; + + Error { + msg: FromForeign::cloned_from_foreign(error.msg), + cause: None, + file: file.unwrap(), + line: error.line as u32, + } + } + } +} + +#[cfg(test)] +mod tests { + use std::ffi::CStr; + + use anyhow::anyhow; + use foreign::OwnedPointer; + + use super::*; + use crate::{assert_match, bindings}; + + #[track_caller] + fn error_for_test(msg: &CStr) -> OwnedPointer { + // SAFETY: all arguments are controlled by this function + let location = panic::Location::caller(); + unsafe { + let err: *mut c_void = libc::malloc(std::mem::size_of::()); + let err: &mut bindings::Error = &mut *err.cast(); + *err = bindings::Error { + msg: msg.clone_to_foreign_ptr(), + err_class: bindings::ERROR_CLASS_GENERIC_ERROR, + src_len: location.file().len() as c_int, + src: location.file().as_ptr().cast::(), + line: location.line() as c_int, + func: ptr::null_mut(), + hint: ptr::null_mut(), + }; + OwnedPointer::new(err) + } + } + + unsafe fn error_get_pretty<'a>(local_err: *mut bindings::Error) -> &'a CStr { + unsafe { CStr::from_ptr(bindings::error_get_pretty(local_err)) } + } + + #[test] + #[allow(deprecated)] + fn test_description() { + use std::error::Error; + + assert_eq!(super::Error::from("msg").description(), "msg"); + assert_eq!(super::Error::from("msg".to_owned()).description(), "msg"); + } + + #[test] + fn test_display() { + assert_eq!(&*format!("{}", Error::from("msg")), "msg"); + assert_eq!(&*format!("{}", Error::from("msg".to_owned())), "msg"); + assert_eq!(&*format!("{}", Error::from(anyhow!("msg"))), "msg"); + + assert_eq!( + &*format!("{}", Error::with_error("msg", anyhow!("cause"))), + "msg: cause" + ); + } + + #[test] + fn test_bool_or_propagate() { + unsafe { + let mut local_err: *mut bindings::Error = ptr::null_mut(); + + assert!(Error::bool_or_propagate(Ok(()), &mut local_err)); + assert_eq!(local_err, ptr::null_mut()); + + let my_err = Error::from("msg"); + assert!(!Error::bool_or_propagate(Err(my_err), &mut local_err)); + assert_ne!(local_err, ptr::null_mut()); + assert_eq!(error_get_pretty(local_err), c"msg"); + bindings::error_free(local_err); + } + } + + #[test] + fn test_ptr_or_propagate() { + unsafe { + let mut local_err: *mut bindings::Error = ptr::null_mut(); + + let ret = Error::ptr_or_propagate(Ok("abc".to_owned()), &mut local_err); + assert_eq!(String::from_foreign(ret), "abc"); + assert_eq!(local_err, ptr::null_mut()); + + let my_err = Error::from("msg"); + assert_eq!( + Error::ptr_or_propagate(Err::(my_err), &mut local_err), + ptr::null_mut() + ); + assert_ne!(local_err, ptr::null_mut()); + assert_eq!(error_get_pretty(local_err), c"msg"); + bindings::error_free(local_err); + } + } + + #[test] + fn test_err_or_unit() { + unsafe { + let result = Error::err_or_unit(ptr::null_mut()); + assert_match!(result, Ok(())); + + let err = error_for_test(c"msg"); + let err = Error::err_or_unit(err.into_inner()).unwrap_err(); + assert_eq!(&*format!("{err}"), "msg"); + } + } +} diff --git a/rust/qemu-api/src/irq.rs b/rust/qemu-api/src/irq.rs new file mode 100644 index 00000000000..1526e6f63a1 --- /dev/null +++ b/rust/qemu-api/src/irq.rs @@ -0,0 +1,115 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for interrupt sources + +use std::{ + ffi::{c_int, CStr}, + marker::PhantomData, + ptr, +}; + +use crate::{ + bindings::{self, qemu_set_irq}, + cell::Opaque, + prelude::*, + qom::ObjectClass, +}; + +/// An opaque wrapper around [`bindings::IRQState`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct IRQState(Opaque); + +/// Interrupt sources are used by devices to pass changes to a value (typically +/// a boolean). The interrupt sink is usually an interrupt controller or +/// GPIO controller. +/// +/// As far as devices are concerned, interrupt sources are always active-high: +/// for example, `InterruptSource`'s [`raise`](InterruptSource::raise) +/// method sends a `true` value to the sink. If the guest has to see a +/// different polarity, that change is performed by the board between the +/// device and the interrupt controller. +/// +/// Interrupts are implemented as a pointer to the interrupt "sink", which has +/// type [`IRQState`]. A device exposes its source as a QOM link property using +/// a function such as [`SysBusDeviceMethods::init_irq`], and +/// initially leaves the pointer to a NULL value, representing an unconnected +/// interrupt. To connect it, whoever creates the device fills the pointer with +/// the sink's `IRQState *`, for example using `sysbus_connect_irq`. Because +/// devices are generally shared objects, interrupt sources are an example of +/// the interior mutability pattern. +/// +/// Interrupt sources can only be triggered under the Big QEMU Lock; `BqlCell` +/// allows access from whatever thread has it. +#[derive(Debug)] +#[repr(transparent)] +pub struct InterruptSource +where + c_int: From, +{ + cell: BqlCell<*mut bindings::IRQState>, + _marker: PhantomData, +} + +// SAFETY: the implementation asserts via `BqlCell` that the BQL is taken +unsafe impl Sync for InterruptSource where c_int: From {} + +impl InterruptSource { + /// Send a low (`false`) value to the interrupt sink. + pub fn lower(&self) { + self.set(false); + } + + /// Send a high-low pulse to the interrupt sink. + pub fn pulse(&self) { + self.set(true); + self.set(false); + } + + /// Send a high (`true`) value to the interrupt sink. + pub fn raise(&self) { + self.set(true); + } +} + +impl InterruptSource +where + c_int: From, +{ + /// Send `level` to the interrupt sink. + pub fn set(&self, level: T) { + let ptr = self.cell.get(); + // SAFETY: the pointer is retrieved under the BQL and remains valid + // until the BQL is released, which is after qemu_set_irq() is entered. + unsafe { + qemu_set_irq(ptr, level.into()); + } + } + + pub(crate) const fn as_ptr(&self) -> *mut *mut bindings::IRQState { + self.cell.as_ptr() + } + + pub(crate) const fn slice_as_ptr(slice: &[Self]) -> *mut *mut bindings::IRQState { + assert!(!slice.is_empty()); + slice[0].as_ptr() + } +} + +impl Default for InterruptSource { + fn default() -> Self { + InterruptSource { + cell: BqlCell::new(ptr::null_mut()), + _marker: PhantomData, + } + } +} + +unsafe impl ObjectType for IRQState { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_IRQ) }; +} +qom_isa!(IRQState: Object); diff --git a/rust/qemu-api/src/lib.rs b/rust/qemu-api/src/lib.rs new file mode 100644 index 00000000000..86dcd8ef17a --- /dev/null +++ b/rust/qemu-api/src/lib.rs @@ -0,0 +1,170 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +#![cfg_attr(not(MESON), doc = include_str!("../README.md"))] +#![deny(clippy::missing_const_for_fn)] + +#[rustfmt::skip] +pub mod bindings; + +// preserve one-item-per-"use" syntax, it is clearer +// for prelude-like modules +#[rustfmt::skip] +pub mod prelude; + +pub mod assertions; +pub mod bitops; +pub mod callbacks; +pub mod cell; +pub mod chardev; +pub mod errno; +pub mod error; +pub mod irq; +pub mod log; +pub mod memory; +pub mod module; +pub mod qdev; +pub mod qom; +pub mod sysbus; +pub mod timer; +pub mod uninit; +pub mod vmstate; +pub mod zeroable; + +use std::{ + alloc::{GlobalAlloc, Layout}, + ffi::c_void, +}; + +pub use error::{Error, Result}; + +#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] +extern "C" { + fn g_aligned_alloc0( + n_blocks: bindings::gsize, + n_block_bytes: bindings::gsize, + alignment: bindings::gsize, + ) -> bindings::gpointer; + fn g_aligned_free(mem: bindings::gpointer); +} + +#[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] +extern "C" { + fn qemu_memalign(alignment: usize, size: usize) -> *mut c_void; + fn qemu_vfree(ptr: *mut c_void); +} + +extern "C" { + fn g_malloc0(n_bytes: bindings::gsize) -> bindings::gpointer; + fn g_free(mem: bindings::gpointer); +} + +/// An allocator that uses the same allocator as QEMU in C. +/// +/// It is enabled by default with the `allocator` feature. +/// +/// To set it up manually as a global allocator in your crate: +/// +/// ```ignore +/// use qemu_api::QemuAllocator; +/// +/// #[global_allocator] +/// static GLOBAL: QemuAllocator = QemuAllocator::new(); +/// ``` +#[derive(Clone, Copy, Debug)] +#[repr(C)] +pub struct QemuAllocator { + _unused: [u8; 0], +} + +#[cfg_attr(all(feature = "allocator", not(test)), global_allocator)] +pub static GLOBAL: QemuAllocator = QemuAllocator::new(); + +impl QemuAllocator { + // From the glibc documentation, on GNU systems, malloc guarantees 16-byte + // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See + // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html. + // This alignment guarantee also applies to Windows and Android. On Darwin + // and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems. + #[cfg(all( + target_pointer_width = "32", + not(any(target_os = "macos", target_os = "openbsd")) + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(8); + #[cfg(all( + target_pointer_width = "64", + not(any(target_os = "macos", target_os = "openbsd")) + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(16); + #[cfg(all( + any(target_pointer_width = "32", target_pointer_width = "64"), + any(target_os = "macos", target_os = "openbsd") + ))] + pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(16); + #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] + pub const DEFAULT_ALIGNMENT_BYTES: Option = None; + + pub const fn new() -> Self { + Self { _unused: [] } + } +} + +impl Default for QemuAllocator { + fn default() -> Self { + Self::new() + } +} + +// Sanity check. +const _: [(); 8] = [(); ::core::mem::size_of::<*mut c_void>()]; + +unsafe impl GlobalAlloc for QemuAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) + { + // SAFETY: g_malloc0() is safe to call. + unsafe { g_malloc0(layout.size().try_into().unwrap()).cast::() } + } else { + #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] + { + // SAFETY: g_aligned_alloc0() is safe to call. + unsafe { + g_aligned_alloc0( + layout.size().try_into().unwrap(), + 1, + layout.align().try_into().unwrap(), + ) + .cast::() + } + } + #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] + { + // SAFETY: qemu_memalign() is safe to call. + unsafe { qemu_memalign(layout.align(), layout.size()).cast::() } + } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid + // glib-allocated pointer, so `g_free`ing is safe. + unsafe { g_free(ptr.cast::<_>()) } + } else { + #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned + // glib-allocated pointer, so `g_aligned_free`ing is safe. + unsafe { g_aligned_free(ptr.cast::<_>()) } + } + #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] + { + // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned + // glib-allocated pointer, so `qemu_vfree`ing is safe. + unsafe { qemu_vfree(ptr.cast::<_>()) } + } + } + } +} diff --git a/rust/qemu-api/src/log.rs b/rust/qemu-api/src/log.rs new file mode 100644 index 00000000000..a441b8c1f2e --- /dev/null +++ b/rust/qemu-api/src/log.rs @@ -0,0 +1,149 @@ +// Copyright 2025 Bernhard Beschow +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for QEMU's logging infrastructure + +use std::{ + io::{self, Write}, + ptr::NonNull, +}; + +use crate::{bindings, errno}; + +#[repr(u32)] +/// Represents specific error categories within QEMU's logging system. +/// +/// The `Log` enum provides a Rust abstraction for logging errors, corresponding +/// to a subset of the error categories defined in the C implementation. +pub enum Log { + /// Log invalid access caused by the guest. + /// Corresponds to `LOG_GUEST_ERROR` in the C implementation. + GuestError = bindings::LOG_GUEST_ERROR, + + /// Log guest access of unimplemented functionality. + /// Corresponds to `LOG_UNIMP` in the C implementation. + Unimp = bindings::LOG_UNIMP, +} + +/// A RAII guard for QEMU's logging infrastructure. Creating the guard +/// locks the log file, and dropping it (letting it go out of scope) unlocks +/// the file. +/// +/// As long as the guard lives, it can be written to using [`std::io::Write`]. +/// +/// The locking is recursive, therefore owning a guard does not prevent +/// using [`log_mask_ln!()`](crate::log_mask_ln). +pub struct LogGuard(NonNull); + +impl LogGuard { + /// Return a RAII guard that writes to QEMU's logging infrastructure. + /// The log file is locked while the guard exists, ensuring that there + /// is no tearing of the messages. + /// + /// Return `None` if the log file is closed and could not be opened. + /// Do *not* use `unwrap()` on the result; failure can be handled simply + /// by not logging anything. + /// + /// # Examples + /// + /// ``` + /// # use qemu_api::log::LogGuard; + /// # use std::io::Write; + /// if let Some(mut log) = LogGuard::new() { + /// writeln!(log, "test"); + /// } + /// ``` + pub fn new() -> Option { + let f = unsafe { bindings::qemu_log_trylock() }.cast(); + NonNull::new(f).map(Self) + } + + /// Writes a formatted string into the log, returning any error encountered. + /// + /// This method is primarily used by the + /// [`log_mask_ln!()`](crate::log_mask_ln) macro, and it is rare for it + /// to be called explicitly. It is public because it is the only way to + /// examine the error, which `log_mask_ln!()` ignores + /// + /// Unlike `log_mask_ln!()`, it does *not* append a newline at the end. + pub fn log_fmt(args: std::fmt::Arguments) -> io::Result<()> { + if let Some(mut log) = Self::new() { + log.write_fmt(args)?; + } + Ok(()) + } +} + +impl Write for LogGuard { + fn write(&mut self, bytes: &[u8]) -> io::Result { + let ret = unsafe { + bindings::rust_fwrite(bytes.as_ptr().cast(), 1, bytes.len(), self.0.as_ptr()) + }; + errno::into_io_result(ret) + } + + fn flush(&mut self) -> io::Result<()> { + // Do nothing, dropping the guard takes care of flushing + Ok(()) + } +} + +impl Drop for LogGuard { + fn drop(&mut self) { + unsafe { + bindings::qemu_log_unlock(self.0.as_ptr()); + } + } +} + +/// A macro to log messages conditionally based on a provided mask. +/// +/// The `log_mask_ln` macro checks whether the given mask matches the current +/// log level and, if so, formats and logs the message. It is the Rust +/// counterpart of the `qemu_log_mask()` macro in the C implementation. +/// +/// Errors from writing to the log are ignored. +/// +/// # Parameters +/// +/// - `$mask`: A log level mask. This should be a variant of the `Log` enum. +/// - `$fmt`: A format string following the syntax and rules of the `format!` +/// macro. It specifies the structure of the log message. +/// - `$args`: Optional arguments to be interpolated into the format string. +/// +/// # Example +/// +/// ``` +/// use qemu_api::{log::Log, log_mask_ln}; +/// +/// let error_address = 0xbad; +/// log_mask_ln!(Log::GuestError, "Address 0x{error_address:x} out of range"); +/// ``` +/// +/// It is also possible to use printf-style formatting, as well as having a +/// trailing `,`: +/// +/// ``` +/// use qemu_api::{log::Log, log_mask_ln}; +/// +/// let error_address = 0xbad; +/// log_mask_ln!( +/// Log::GuestError, +/// "Address 0x{:x} out of range", +/// error_address, +/// ); +/// ``` +#[macro_export] +macro_rules! log_mask_ln { + ($mask:expr, $fmt:tt $($args:tt)*) => {{ + // Type assertion to enforce type `Log` for $mask + let _: Log = $mask; + + if unsafe { + (::qemu_api::bindings::qemu_loglevel & ($mask as std::os::raw::c_int)) != 0 + } { + _ = ::qemu_api::log::LogGuard::log_fmt( + format_args!("{}\n", format_args!($fmt $($args)*))); + } + }}; +} diff --git a/rust/qemu-api/src/memory.rs b/rust/qemu-api/src/memory.rs new file mode 100644 index 00000000000..e40fad6cf19 --- /dev/null +++ b/rust/qemu-api/src/memory.rs @@ -0,0 +1,204 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings for `MemoryRegion`, `MemoryRegionOps` and `MemTxAttrs` + +use std::{ + ffi::{c_uint, c_void, CStr, CString}, + marker::PhantomData, +}; + +pub use bindings::{hwaddr, MemTxAttrs}; + +use crate::{ + bindings::{self, device_endian, memory_region_init_io}, + callbacks::FnCall, + cell::Opaque, + prelude::*, + uninit::MaybeUninitField, + zeroable::Zeroable, +}; + +pub struct MemoryRegionOps( + bindings::MemoryRegionOps, + // Note: quite often you'll see PhantomData mentioned when discussing + // covariance and contravariance; you don't need any of those to understand + // this usage of PhantomData. Quite simply, MemoryRegionOps *logically* + // holds callbacks that take an argument of type &T, except the type is erased + // before the callback is stored in the bindings::MemoryRegionOps field. + // The argument of PhantomData is a function pointer in order to represent + // that relationship; while that will also provide desirable and safe variance + // for T, variance is not the point but just a consequence. + PhantomData, +); + +// SAFETY: When a *const T is passed to the callbacks, the call itself +// is done in a thread-safe manner. The invocation is okay as long as +// T itself is `Sync`. +unsafe impl Sync for MemoryRegionOps {} + +#[derive(Clone)] +pub struct MemoryRegionOpsBuilder(bindings::MemoryRegionOps, PhantomData); + +unsafe extern "C" fn memory_region_ops_read_cb FnCall<(&'a T, hwaddr, u32), u64>>( + opaque: *mut c_void, + addr: hwaddr, + size: c_uint, +) -> u64 { + F::call((unsafe { &*(opaque.cast::()) }, addr, size)) +} + +unsafe extern "C" fn memory_region_ops_write_cb FnCall<(&'a T, hwaddr, u64, u32)>>( + opaque: *mut c_void, + addr: hwaddr, + data: u64, + size: c_uint, +) { + F::call((unsafe { &*(opaque.cast::()) }, addr, data, size)) +} + +impl MemoryRegionOpsBuilder { + #[must_use] + pub const fn read FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self { + self.0.read = Some(memory_region_ops_read_cb::); + self + } + + #[must_use] + pub const fn write FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self { + self.0.write = Some(memory_region_ops_write_cb::); + self + } + + #[must_use] + pub const fn big_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_BIG_ENDIAN; + self + } + + #[must_use] + pub const fn little_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_LITTLE_ENDIAN; + self + } + + #[must_use] + pub const fn native_endian(mut self) -> Self { + self.0.endianness = device_endian::DEVICE_NATIVE_ENDIAN; + self + } + + #[must_use] + pub const fn valid_sizes(mut self, min: u32, max: u32) -> Self { + self.0.valid.min_access_size = min; + self.0.valid.max_access_size = max; + self + } + + #[must_use] + pub const fn valid_unaligned(mut self) -> Self { + self.0.valid.unaligned = true; + self + } + + #[must_use] + pub const fn impl_sizes(mut self, min: u32, max: u32) -> Self { + self.0.impl_.min_access_size = min; + self.0.impl_.max_access_size = max; + self + } + + #[must_use] + pub const fn impl_unaligned(mut self) -> Self { + self.0.impl_.unaligned = true; + self + } + + #[must_use] + pub const fn build(self) -> MemoryRegionOps { + MemoryRegionOps::(self.0, PhantomData) + } + + #[must_use] + pub const fn new() -> Self { + Self(bindings::MemoryRegionOps::ZERO, PhantomData) + } +} + +impl Default for MemoryRegionOpsBuilder { + fn default() -> Self { + Self::new() + } +} + +/// A safe wrapper around [`bindings::MemoryRegion`]. +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct MemoryRegion(Opaque); + +unsafe impl Send for MemoryRegion {} +unsafe impl Sync for MemoryRegion {} + +impl MemoryRegion { + // inline to ensure that it is not included in tests, which only + // link to hwcore and qom. FIXME: inlining is actually the opposite + // of what we want, since this is the type-erased version of the + // init_io function below. Look into splitting the qemu_api crate. + #[inline(always)] + unsafe fn do_init_io( + slot: *mut bindings::MemoryRegion, + owner: *mut bindings::Object, + ops: &'static bindings::MemoryRegionOps, + name: &'static str, + size: u64, + ) { + unsafe { + let cstr = CString::new(name).unwrap(); + memory_region_init_io( + slot, + owner, + ops, + owner.cast::(), + cstr.as_ptr(), + size, + ); + } + } + + pub fn init_io>( + this: &mut MaybeUninitField<'_, T, Self>, + ops: &'static MemoryRegionOps, + name: &'static str, + size: u64, + ) { + unsafe { + Self::do_init_io( + this.as_mut_ptr().cast(), + MaybeUninitField::parent_mut(this).cast(), + &ops.0, + name, + size, + ); + } + } +} + +unsafe impl ObjectType for MemoryRegion { + type Class = bindings::MemoryRegionClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) }; +} +qom_isa!(MemoryRegion: Object); + +/// A special `MemTxAttrs` constant, used to indicate that no memory +/// attributes are specified. +/// +/// Bus masters which don't specify any attributes will get this, +/// which has all attribute bits clear except the topmost one +/// (so that we can distinguish "all attributes deliberately clear" +/// from "didn't specify" if necessary). +pub const MEMTXATTRS_UNSPECIFIED: MemTxAttrs = MemTxAttrs { + unspecified: true, + ..Zeroable::ZERO +}; diff --git a/rust/qemu-api/src/module.rs b/rust/qemu-api/src/module.rs new file mode 100644 index 00000000000..fa5cea3598f --- /dev/null +++ b/rust/qemu-api/src/module.rs @@ -0,0 +1,43 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Macro to register blocks of code that run as QEMU starts up. + +#[macro_export] +macro_rules! module_init { + ($type:ident => $body:block) => { + const _: () = { + #[used] + #[cfg_attr( + not(any(target_vendor = "apple", target_os = "windows")), + link_section = ".init_array" + )] + #[cfg_attr(target_vendor = "apple", link_section = "__DATA,__mod_init_func")] + #[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")] + pub static LOAD_MODULE: extern "C" fn() = { + extern "C" fn init_fn() { + $body + } + + extern "C" fn ctor_fn() { + unsafe { + $crate::bindings::register_module_init( + Some(init_fn), + $crate::bindings::module_init_type::$type, + ); + } + } + + ctor_fn + }; + }; + }; + + // shortcut because it's quite common that $body needs unsafe {} + ($type:ident => unsafe $body:block) => { + $crate::module_init! { + $type => { unsafe { $body } } + } + }; +} diff --git a/rust/qemu-api/src/prelude.rs b/rust/qemu-api/src/prelude.rs new file mode 100644 index 00000000000..8f9e23ee2c5 --- /dev/null +++ b/rust/qemu-api/src/prelude.rs @@ -0,0 +1,31 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Commonly used traits and types for QEMU. + +pub use crate::bitops::IntegerExt; + +pub use crate::cell::BqlCell; +pub use crate::cell::BqlRefCell; + +pub use crate::errno; + +pub use crate::log_mask_ln; + +pub use crate::qdev::DeviceMethods; + +pub use crate::qom::InterfaceType; +pub use crate::qom::IsA; +pub use crate::qom::Object; +pub use crate::qom::ObjectCast; +pub use crate::qom::ObjectDeref; +pub use crate::qom::ObjectClassMethods; +pub use crate::qom::ObjectMethods; +pub use crate::qom::ObjectType; + +pub use crate::qom_isa; + +pub use crate::sysbus::SysBusDeviceMethods; + +pub use crate::vmstate::VMState; diff --git a/rust/qemu-api/src/qdev.rs b/rust/qemu-api/src/qdev.rs new file mode 100644 index 00000000000..36f02fb57db --- /dev/null +++ b/rust/qemu-api/src/qdev.rs @@ -0,0 +1,410 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to create devices and access device functionality from Rust. + +use std::{ + ffi::{c_int, c_void, CStr, CString}, + ptr::NonNull, +}; + +pub use bindings::{ClockEvent, DeviceClass, Property, ResetType}; + +use crate::{ + bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, ResettableClass}, + callbacks::FnCall, + cell::{bql_locked, Opaque}, + chardev::Chardev, + error::{Error, Result}, + irq::InterruptSource, + prelude::*, + qom::{ObjectClass, ObjectImpl, Owned, ParentInit}, + vmstate::VMStateDescription, +}; + +/// A safe wrapper around [`bindings::Clock`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Clock(Opaque); + +unsafe impl Send for Clock {} +unsafe impl Sync for Clock {} + +/// A safe wrapper around [`bindings::DeviceState`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct DeviceState(Opaque); + +unsafe impl Send for DeviceState {} +unsafe impl Sync for DeviceState {} + +/// Trait providing the contents of the `ResettablePhases` struct, +/// which is part of the QOM `Resettable` interface. +pub trait ResettablePhasesImpl { + /// If not None, this is called when the object enters reset. It + /// can reset local state of the object, but it must not do anything that + /// has a side-effect on other objects, such as raising or lowering an + /// [`InterruptSource`], or reading or writing guest memory. It takes the + /// reset's type as argument. + const ENTER: Option = None; + + /// If not None, this is called when the object for entry into reset, once + /// every object in the system which is being reset has had its + /// `ResettablePhasesImpl::ENTER` method called. At this point devices + /// can do actions that affect other objects. + /// + /// If in doubt, implement this method. + const HOLD: Option = None; + + /// If not None, this phase is called when the object leaves the reset + /// state. Actions affecting other objects are permitted. + const EXIT: Option = None; +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_enter_fn( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::(); + T::ENTER.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_hold_fn( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::(); + T::HOLD.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_resettable_exit_fn( + obj: *mut bindings::Object, + typ: ResetType, +) { + let state = NonNull::new(obj).unwrap().cast::(); + T::EXIT.unwrap()(unsafe { state.as_ref() }, typ); +} + +/// Trait providing the contents of [`DeviceClass`]. +pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA { + /// _Realization_ is the second stage of device creation. It contains + /// all operations that depend on device properties and can fail (note: + /// this is not yet supported for Rust devices). + /// + /// If not `None`, the parent class's `realize` method is overridden + /// with the function pointed to by `REALIZE`. + const REALIZE: Option Result<()>> = None; + + /// An array providing the properties that the user can set on the + /// device. Not a `const` because referencing statics in constants + /// is unstable until Rust 1.83.0. + fn properties() -> &'static [Property] { + &[] + } + + /// A `VMStateDescription` providing the migration format for the device + /// Not a `const` because referencing statics in constants is unstable + /// until Rust 1.83.0. + fn vmsd() -> Option<&'static VMStateDescription> { + None + } +} + +/// # Safety +/// +/// This function is only called through the QOM machinery and +/// used by `DeviceClass::class_init`. +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_realize_fn( + dev: *mut bindings::DeviceState, + errp: *mut *mut bindings::Error, +) { + let state = NonNull::new(dev).unwrap().cast::(); + let result = T::REALIZE.unwrap()(unsafe { state.as_ref() }); + unsafe { + Error::ok_or_propagate(result, errp); + } +} + +unsafe impl InterfaceType for ResettableClass { + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_RESETTABLE_INTERFACE) }; +} + +impl ResettableClass { + /// Fill in the virtual methods of `ResettableClass` based on the + /// definitions in the `ResettablePhasesImpl` trait. + pub fn class_init(&mut self) { + if ::ENTER.is_some() { + self.phases.enter = Some(rust_resettable_enter_fn::); + } + if ::HOLD.is_some() { + self.phases.hold = Some(rust_resettable_hold_fn::); + } + if ::EXIT.is_some() { + self.phases.exit = Some(rust_resettable_exit_fn::); + } + } +} + +impl DeviceClass { + /// Fill in the virtual methods of `DeviceClass` based on the definitions in + /// the `DeviceImpl` trait. + pub fn class_init(&mut self) { + if ::REALIZE.is_some() { + self.realize = Some(rust_realize_fn::); + } + if let Some(vmsd) = ::vmsd() { + self.vmsd = vmsd; + } + let prop = ::properties(); + if !prop.is_empty() { + unsafe { + bindings::device_class_set_props_n(self, prop.as_ptr(), prop.len()); + } + } + + ResettableClass::cast::(self).class_init::(); + self.parent_class.class_init::(); + } +} + +#[macro_export] +macro_rules! define_property { + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, bit = $bitnr:expr, default = $defval:expr$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + bitnr: $bitnr, + set_default: true, + defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, + ..$crate::zeroable::Zeroable::ZERO + } + }; + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, default = $defval:expr$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + set_default: true, + defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, + ..$crate::zeroable::Zeroable::ZERO + } + }; + ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty$(,)*) => { + $crate::bindings::Property { + // use associated function syntax for type checking + name: ::std::ffi::CStr::as_ptr($name), + info: $prop, + offset: ::std::mem::offset_of!($state, $field) as isize, + set_default: false, + ..$crate::zeroable::Zeroable::ZERO + } + }; +} + +#[macro_export] +macro_rules! declare_properties { + ($ident:ident, $($prop:expr),*$(,)*) => { + pub static $ident: [$crate::bindings::Property; { + let mut len = 0; + $({ + _ = stringify!($prop); + len += 1; + })* + len + }] = [ + $($prop),*, + ]; + }; +} + +unsafe impl ObjectType for DeviceState { + type Class = DeviceClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_DEVICE) }; +} +qom_isa!(DeviceState: Object); + +/// Initialization methods take a [`ParentInit`] and can be called as +/// associated functions. +impl DeviceState { + /// Add an input clock named `name`. Invoke the callback with + /// `self` as the first parameter for the events that are requested. + /// + /// The resulting clock is added as a child of `self`, but it also + /// stays alive until after `Drop::drop` is called because C code + /// keeps an extra reference to it until `device_finalize()` calls + /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in + /// which Rust code has a reference to a child object) it would be + /// possible for this function to return a `&Clock` too. + #[inline] + pub fn init_clock_in FnCall<(&'a T, ClockEvent)>>( + this: &mut ParentInit, + name: &str, + _cb: &F, + events: ClockEvent, + ) -> Owned + where + T::ParentType: IsA, + { + fn do_init_clock_in( + dev: &DeviceState, + name: &str, + cb: Option, + events: ClockEvent, + ) -> Owned { + assert!(bql_locked()); + + // SAFETY: the clock is heap allocated, but qdev_init_clock_in() + // does not gift the reference to its caller; so use Owned::from to + // add one. The callback is disabled automatically when the clock + // is unparented, which happens before the device is finalized. + unsafe { + let cstr = CString::new(name).unwrap(); + let clk = bindings::qdev_init_clock_in( + dev.0.as_mut_ptr(), + cstr.as_ptr(), + cb, + dev.0.as_void_ptr(), + events.0, + ); + + let clk: &Clock = Clock::from_raw(clk); + Owned::from(clk) + } + } + + let cb: Option = if F::is_some() { + unsafe extern "C" fn rust_clock_cb FnCall<(&'a T, ClockEvent)>>( + opaque: *mut c_void, + event: ClockEvent, + ) { + // SAFETY: the opaque is "this", which is indeed a pointer to T + F::call((unsafe { &*(opaque.cast::()) }, event)) + } + Some(rust_clock_cb::) + } else { + None + }; + + do_init_clock_in(unsafe { this.upcast_mut() }, name, cb, events) + } + + /// Add an output clock named `name`. + /// + /// The resulting clock is added as a child of `self`, but it also + /// stays alive until after `Drop::drop` is called because C code + /// keeps an extra reference to it until `device_finalize()` calls + /// `qdev_finalize_clocklist()`. Therefore (unlike most cases in + /// which Rust code has a reference to a child object) it would be + /// possible for this function to return a `&Clock` too. + #[inline] + pub fn init_clock_out(this: &mut ParentInit, name: &str) -> Owned + where + T::ParentType: IsA, + { + unsafe { + let cstr = CString::new(name).unwrap(); + let dev: &mut DeviceState = this.upcast_mut(); + let clk = bindings::qdev_init_clock_out(dev.0.as_mut_ptr(), cstr.as_ptr()); + + let clk: &Clock = Clock::from_raw(clk); + Owned::from(clk) + } + } +} + +/// Trait for methods exposed by the [`DeviceState`] class. The methods can be +/// called on all objects that have the trait `IsA`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA`. +pub trait DeviceMethods: ObjectDeref +where + Self::Target: IsA, +{ + fn prop_set_chr(&self, propname: &str, chr: &Owned) { + assert!(bql_locked()); + let c_propname = CString::new(propname).unwrap(); + let chr: &Chardev = chr; + unsafe { + bindings::qdev_prop_set_chr( + self.upcast().as_mut_ptr(), + c_propname.as_ptr(), + chr.as_mut_ptr(), + ); + } + } + + fn init_gpio_in FnCall<(&'a Self::Target, u32, u32)>>( + &self, + num_lines: u32, + _cb: F, + ) { + fn do_init_gpio_in( + dev: &DeviceState, + num_lines: u32, + gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int), + ) { + unsafe { + qdev_init_gpio_in(dev.as_mut_ptr(), Some(gpio_in_cb), num_lines as c_int); + } + } + + let _: () = F::ASSERT_IS_SOME; + unsafe extern "C" fn rust_irq_handler FnCall<(&'a T, u32, u32)>>( + opaque: *mut c_void, + line: c_int, + level: c_int, + ) { + // SAFETY: the opaque was passed as a reference to `T` + F::call((unsafe { &*(opaque.cast::()) }, line as u32, level as u32)) + } + + let gpio_in_cb: unsafe extern "C" fn(*mut c_void, c_int, c_int) = + rust_irq_handler::; + + do_init_gpio_in(self.upcast(), num_lines, gpio_in_cb); + } + + fn init_gpio_out(&self, pins: &[InterruptSource]) { + unsafe { + qdev_init_gpio_out( + self.upcast().as_mut_ptr(), + InterruptSource::slice_as_ptr(pins), + pins.len() as c_int, + ); + } + } +} + +impl DeviceMethods for R where R::Target: IsA {} + +unsafe impl ObjectType for Clock { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CLOCK) }; +} +qom_isa!(Clock: Object); diff --git a/rust/qemu-api/src/qom.rs b/rust/qemu-api/src/qom.rs new file mode 100644 index 00000000000..e20ee014cb1 --- /dev/null +++ b/rust/qemu-api/src/qom.rs @@ -0,0 +1,950 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to access QOM functionality from Rust. +//! +//! The QEMU Object Model (QOM) provides inheritance and dynamic typing for QEMU +//! devices. This module makes QOM's features available in Rust through three +//! main mechanisms: +//! +//! * Automatic creation and registration of `TypeInfo` for classes that are +//! written in Rust, as well as mapping between Rust traits and QOM vtables. +//! +//! * Type-safe casting between parent and child classes, through the [`IsA`] +//! trait and methods such as [`upcast`](ObjectCast::upcast) and +//! [`downcast`](ObjectCast::downcast). +//! +//! * Automatic delegation of parent class methods to child classes. When a +//! trait uses [`IsA`] as a bound, its contents become available to all child +//! classes through blanket implementations. This works both for class methods +//! and for instance methods accessed through references or smart pointers. +//! +//! # Structure of a class +//! +//! A leaf class only needs a struct holding instance state. The struct must +//! implement the [`ObjectType`] and [`IsA`] traits, as well as any `*Impl` +//! traits that exist for its superclasses. +//! +//! If a class has subclasses, it will also provide a struct for instance data, +//! with the same characteristics as for concrete classes, but it also needs +//! additional components to support virtual methods: +//! +//! * a struct for class data, for example `DeviceClass`. This corresponds to +//! the C "class struct" and holds the vtable that is used by instances of the +//! class and its subclasses. It must start with its parent's class struct. +//! +//! * a trait for virtual method implementations, for example `DeviceImpl`. +//! Child classes implement this trait to provide their own behavior for +//! virtual methods. The trait's methods take `&self` to access instance data. +//! The traits have the appropriate specialization of `IsA<>` as a supertrait, +//! for example `IsA` for `DeviceImpl`. +//! +//! * a trait for instance methods, for example `DeviceMethods`. This trait is +//! automatically implemented for any reference or smart pointer to a device +//! instance. It calls into the vtable provides access across all subclasses +//! to methods defined for the class. +//! +//! * optionally, a trait for class methods, for example `DeviceClassMethods`. +//! This provides access to class-wide functionality that doesn't depend on +//! instance data. Like instance methods, these are automatically inherited by +//! child classes. +//! +//! # Class structures +//! +//! Each QOM class that has virtual methods describes them in a +//! _class struct_. Class structs include a parent field corresponding +//! to the vtable of the parent class, all the way up to [`ObjectClass`]. +//! +//! As mentioned above, virtual methods are defined via traits such as +//! `DeviceImpl`. Class structs do not define any trait but, conventionally, +//! all of them have a `class_init` method to initialize the virtual methods +//! based on the trait and then call the same method on the superclass. +//! +//! ```ignore +//! impl YourSubclassClass +//! { +//! pub fn class_init(&mut self) { +//! ... +//! klass.parent_class::class_init(); +//! } +//! } +//! ``` +//! +//! If a class implements a QOM interface. In that case, the function must +//! contain, for each interface, an extra forwarding call as follows: +//! +//! ```ignore +//! ResettableClass::cast::(self).class_init::(); +//! ``` +//! +//! These `class_init` functions are methods on the class rather than a trait, +//! because the bound on `T` (`DeviceImpl` in this case), will change for every +//! class struct. The functions are pointed to by the +//! [`ObjectImpl::CLASS_INIT`] function pointer. While there is no default +//! implementation, in most cases it will be enough to write it as follows: +//! +//! ```ignore +//! const CLASS_INIT: fn(&mut Self::Class)> = Self::Class::class_init::; +//! ``` +//! +//! This design incurs a small amount of code duplication but, by not using +//! traits, it allows the flexibility of implementing bindings in any crate, +//! without incurring into violations of orphan rules for traits. + +use std::{ + ffi::{c_void, CStr}, + fmt, + marker::PhantomData, + mem::{ManuallyDrop, MaybeUninit}, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +pub use bindings::ObjectClass; + +use crate::{ + bindings::{ + self, object_class_dynamic_cast, object_dynamic_cast, object_get_class, + object_get_typename, object_new, object_ref, object_unref, TypeInfo, + }, + cell::{bql_locked, Opaque}, +}; + +/// A safe wrapper around [`bindings::Object`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Object(Opaque); + +unsafe impl Send for Object {} +unsafe impl Sync for Object {} + +/// Marker trait: `Self` can be statically upcasted to `P` (i.e. `P` is a direct +/// or indirect parent of `Self`). +/// +/// # Safety +/// +/// The struct `Self` must be `#[repr(C)]` and must begin, directly or +/// indirectly, with a field of type `P`. This ensures that invalid casts, +/// which rely on `IsA<>` for static checking, are rejected at compile time. +pub unsafe trait IsA: ObjectType {} + +// SAFETY: it is always safe to cast to your own type +unsafe impl IsA for T {} + +/// Macro to mark superclasses of QOM classes. This enables type-safe +/// up- and downcasting. +/// +/// # Safety +/// +/// This macro is a thin wrapper around the [`IsA`] trait and performs +/// no checking whatsoever of what is declared. It is the caller's +/// responsibility to have $struct begin, directly or indirectly, with +/// a field of type `$parent`. +#[macro_export] +macro_rules! qom_isa { + ($struct:ty : $($parent:ty),* ) => { + $( + // SAFETY: it is the caller responsibility to have $parent as the + // first field + unsafe impl $crate::qom::IsA<$parent> for $struct {} + + impl AsRef<$parent> for $struct { + fn as_ref(&self) -> &$parent { + // SAFETY: follows the same rules as for IsA, which is + // declared above. + let ptr: *const Self = self; + unsafe { &*ptr.cast::<$parent>() } + } + } + )* + }; +} + +/// This is the same as [`ManuallyDrop`](std::mem::ManuallyDrop), though +/// it hides the standard methods of `ManuallyDrop`. +/// +/// The first field of an `ObjectType` must be of type `ParentField`. +/// (Technically, this is only necessary if there is at least one Rust +/// superclass in the hierarchy). This is to ensure that the parent field is +/// dropped after the subclass; this drop order is enforced by the C +/// `object_deinit` function. +/// +/// # Examples +/// +/// ```ignore +/// #[repr(C)] +/// #[derive(qemu_api_macros::Object)] +/// pub struct MyDevice { +/// parent: ParentField, +/// ... +/// } +/// ``` +#[derive(Debug)] +#[repr(transparent)] +pub struct ParentField(std::mem::ManuallyDrop); + +impl Deref for ParentField { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ParentField { + #[inline(always)] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl fmt::Display for ParentField { + #[inline(always)] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + self.0.fmt(f) + } +} + +/// This struct knows that the superclasses of the object have already been +/// initialized. +/// +/// The declaration of `ParentInit` is.. *"a kind of magic"*. It uses a +/// technique that is found in several crates, the main ones probably being +/// `ghost-cell` (in fact it was introduced by the [`GhostCell` paper](https://plv.mpi-sws.org/rustbelt/ghostcell/)) +/// and `generativity`. +/// +/// The `PhantomData` makes the `ParentInit` type *invariant* with respect to +/// the lifetime argument `'init`. This, together with the `for<'...>` in +/// `[ParentInit::with]`, block any attempt of the compiler to be creative when +/// operating on types of type `ParentInit` and to extend their lifetimes. In +/// particular, it ensures that the `ParentInit` cannot be made to outlive the +/// `rust_instance_init()` function that creates it, and therefore that the +/// `&'init T` reference is valid. +/// +/// This implementation of the same concept, without the QOM baggage, can help +/// understanding the effect: +/// +/// ``` +/// use std::marker::PhantomData; +/// +/// #[derive(PartialEq, Eq)] +/// pub struct Jail<'closure, T: Copy>(&'closure T, PhantomData &'closure ()>); +/// +/// impl<'closure, T: Copy> Jail<'closure, T> { +/// fn get(&self) -> T { +/// *self.0 +/// } +/// +/// #[inline] +/// fn with(v: T, f: impl for<'id> FnOnce(Jail<'id, T>) -> U) -> U { +/// let parent_init = Jail(&v, PhantomData); +/// f(parent_init) +/// } +/// } +/// ``` +/// +/// It's impossible to escape the `Jail`; `token1` cannot be moved out of the +/// closure: +/// +/// ```ignore +/// let x = 42; +/// let escape = Jail::with(&x, |token1| { +/// println!("{}", token1.get()); +/// // fails to compile... +/// token1 +/// }); +/// // ... so you cannot do this: +/// println!("{}", escape.get()); +/// ``` +/// +/// Likewise, in the QOM case the `ParentInit` cannot be moved out of +/// `instance_init()`. Without this trick it would be possible to stash a +/// `ParentInit` and use it later to access uninitialized memory. +/// +/// Here is another example, showing how separately-created "identities" stay +/// isolated: +/// +/// ```ignore +/// impl<'closure, T: Copy> Clone for Jail<'closure, T> { +/// fn clone(&self) -> Jail<'closure, T> { +/// Jail(self.0, PhantomData) +/// } +/// } +/// +/// fn main() { +/// Jail::with(42, |token1| { +/// // this works and returns true: the clone has the same "identity" +/// println!("{}", token1 == token1.clone()); +/// Jail::with(42, |token2| { +/// // here the outer token remains accessible... +/// println!("{}", token1.get()); +/// // ... but the two are separate: this fails to compile: +/// println!("{}", token1 == token2); +/// }); +/// }); +/// } +/// ``` +pub struct ParentInit<'init, T>( + &'init mut MaybeUninit, + PhantomData &'init ()>, +); + +impl<'init, T> ParentInit<'init, T> { + #[inline] + pub fn with(obj: &'init mut MaybeUninit, f: impl for<'id> FnOnce(ParentInit<'id, T>)) { + let parent_init = ParentInit(obj, PhantomData); + f(parent_init) + } +} + +impl ParentInit<'_, T> { + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// Fields beyond `Object` could be uninitialized and it's your + /// responsibility to avoid that they're used when the pointer is + /// dereferenced, either directly or through a cast. + pub fn as_object_mut_ptr(&self) -> *mut bindings::Object { + self.as_object_ptr().cast_mut() + } + + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// Fields beyond `Object` could be uninitialized and it's your + /// responsibility to avoid that they're used when the pointer is + /// dereferenced, either directly or through a cast. + pub fn as_object_ptr(&self) -> *const bindings::Object { + self.0.as_ptr().cast() + } +} + +impl<'a, T: ObjectImpl> ParentInit<'a, T> { + /// Convert from a derived type to one of its parent types, which + /// have already been initialized. + /// + /// # Safety + /// + /// Structurally this is always a safe operation; the [`IsA`] trait + /// provides static verification trait that `Self` dereferences to `U` or + /// a child of `U`, and only parent types of `T` are allowed. + /// + /// However, while the fields of the resulting reference are initialized, + /// calls might use uninitialized fields of the subclass. It is your + /// responsibility to avoid this. + pub unsafe fn upcast(&self) -> &'a U + where + T::ParentType: IsA, + { + // SAFETY: soundness is declared via IsA, which is an unsafe trait; + // the parent has been initialized before `instance_init `is called + unsafe { &*(self.0.as_ptr().cast::()) } + } + + /// Convert from a derived type to one of its parent types, which + /// have already been initialized. + /// + /// # Safety + /// + /// Structurally this is always a safe operation; the [`IsA`] trait + /// provides static verification trait that `Self` dereferences to `U` or + /// a child of `U`, and only parent types of `T` are allowed. + /// + /// However, while the fields of the resulting reference are initialized, + /// calls might use uninitialized fields of the subclass. It is your + /// responsibility to avoid this. + pub unsafe fn upcast_mut(&mut self) -> &'a mut U + where + T::ParentType: IsA, + { + // SAFETY: soundness is declared via IsA, which is an unsafe trait; + // the parent has been initialized before `instance_init `is called + unsafe { &mut *(self.0.as_mut_ptr().cast::()) } + } +} + +impl Deref for ParentInit<'_, T> { + type Target = MaybeUninit; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl DerefMut for ParentInit<'_, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0 + } +} + +unsafe extern "C" fn rust_instance_init(obj: *mut bindings::Object) { + let mut state = NonNull::new(obj).unwrap().cast::>(); + + // SAFETY: obj is an instance of T, since rust_instance_init + // is called from QOM core as the instance_init function + // for class T + unsafe { + ParentInit::with(state.as_mut(), |parent_init| { + T::INSTANCE_INIT.unwrap()(parent_init); + }); + } +} + +unsafe extern "C" fn rust_instance_post_init(obj: *mut bindings::Object) { + let state = NonNull::new(obj).unwrap().cast::(); + // SAFETY: obj is an instance of T, since rust_instance_post_init + // is called from QOM core as the instance_post_init function + // for class T + T::INSTANCE_POST_INIT.unwrap()(unsafe { state.as_ref() }); +} + +unsafe extern "C" fn rust_class_init( + klass: *mut ObjectClass, + _data: *const c_void, +) { + let mut klass = NonNull::new(klass) + .unwrap() + .cast::<::Class>(); + // SAFETY: klass is a T::Class, since rust_class_init + // is called from QOM core as the class_init function + // for class T + ::CLASS_INIT(unsafe { klass.as_mut() }) +} + +unsafe extern "C" fn drop_object(obj: *mut bindings::Object) { + // SAFETY: obj is an instance of T, since drop_object is called + // from the QOM core function object_deinit() as the instance_finalize + // function for class T. Note that while object_deinit() will drop the + // superclass field separately after this function returns, `T` must + // implement the unsafe trait ObjectType; the safety rules for the + // trait mandate that the parent field is manually dropped. + unsafe { std::ptr::drop_in_place(obj.cast::()) } +} + +/// Trait exposed by all structs corresponding to QOM objects. +/// +/// # Safety +/// +/// For classes declared in C: +/// +/// - `Class` and `TYPE` must match the data in the `TypeInfo`; +/// +/// - the first field of the struct must be of the instance type corresponding +/// to the superclass, as declared in the `TypeInfo` +/// +/// - likewise, the first field of the `Class` struct must be of the class type +/// corresponding to the superclass +/// +/// For classes declared in Rust and implementing [`ObjectImpl`]: +/// +/// - the struct must be `#[repr(C)]`; +/// +/// - the first field of the struct must be of type +/// [`ParentField`](ParentField), where `T` is the parent type +/// [`ObjectImpl::ParentType`] +/// +/// - the first field of the `Class` must be of the class struct corresponding +/// to the superclass, which is `ObjectImpl::ParentType::Class`. `ParentField` +/// is not needed here. +/// +/// In both cases, having a separate class type is not necessary if the subclass +/// does not add any field. +pub unsafe trait ObjectType: Sized { + /// The QOM class object corresponding to this struct. This is used + /// to automatically generate a `class_init` method. + type Class; + + /// The name of the type, which can be passed to `object_new()` to + /// generate an instance of this type. + const TYPE_NAME: &'static CStr; + + /// Return the receiver as an Object. This is always safe, even + /// if this type represents an interface. + fn as_object(&self) -> &Object { + unsafe { &*self.as_ptr().cast() } + } + + /// Return the receiver as a const raw pointer to Object. + /// This is preferable to `as_object_mut_ptr()` if a C + /// function only needs a `const Object *`. + fn as_object_ptr(&self) -> *const bindings::Object { + self.as_object().as_ptr() + } + + /// Return the receiver as a mutable raw pointer to Object. + /// + /// # Safety + /// + /// This cast is always safe, but because the result is mutable + /// and the incoming reference is not, this should only be used + /// for calls to C functions, and only if needed. + unsafe fn as_object_mut_ptr(&self) -> *mut bindings::Object { + self.as_object().as_mut_ptr() + } +} + +/// Trait exposed by all structs corresponding to QOM interfaces. +/// Unlike `ObjectType`, it is implemented on the class type (which provides +/// the vtable for the interfaces). +/// +/// # Safety +/// +/// `TYPE` must match the contents of the `TypeInfo` as found in the C code; +/// right now, interfaces can only be declared in C. +pub unsafe trait InterfaceType: Sized { + /// The name of the type, which can be passed to + /// `object_class_dynamic_cast()` to obtain the pointer to the vtable + /// for this interface. + const TYPE_NAME: &'static CStr; + + /// Return the vtable for the interface; `U` is the type that + /// lists the interface in its `TypeInfo`. + /// + /// # Examples + /// + /// This function is usually called by a `class_init` method in `U::Class`. + /// For example, `DeviceClass::class_init` initializes its `Resettable` + /// interface as follows: + /// + /// ```ignore + /// ResettableClass::cast::(self).class_init::(); + /// ``` + /// + /// where `T` is the concrete subclass that is being initialized. + /// + /// # Panics + /// + /// Panic if the incoming argument if `T` does not implement the interface. + fn cast(klass: &mut U::Class) -> &mut Self { + unsafe { + // SAFETY: upcasting to ObjectClass is always valid, and the + // return type is either NULL or the argument itself + let result: *mut Self = object_class_dynamic_cast( + (klass as *mut U::Class).cast(), + Self::TYPE_NAME.as_ptr(), + ) + .cast(); + result.as_mut().unwrap() + } + } +} + +/// This trait provides safe casting operations for QOM objects to raw pointers, +/// to be used for example for FFI. The trait can be applied to any kind of +/// reference or smart pointers, and enforces correctness through the [`IsA`] +/// trait. +pub trait ObjectDeref: Deref +where + Self::Target: ObjectType, +{ + /// Convert to a const Rust pointer, to be used for example for FFI. + /// The target pointer type must be the type of `self` or a superclass + fn as_ptr(&self) -> *const U + where + Self::Target: IsA, + { + let ptr: *const Self::Target = self.deref(); + ptr.cast::() + } + + /// Convert to a mutable Rust pointer, to be used for example for FFI. + /// The target pointer type must be the type of `self` or a superclass. + /// Used to implement interior mutability for objects. + /// + /// # Safety + /// + /// This method is safe because only the actual dereference of the pointer + /// has to be unsafe. Bindings to C APIs will use it a lot, but care has + /// to be taken because it overrides the const-ness of `&self`. + fn as_mut_ptr(&self) -> *mut U + where + Self::Target: IsA, + { + #[allow(clippy::as_ptr_cast_mut)] + { + self.as_ptr::().cast_mut() + } + } +} + +/// Trait that adds extra functionality for `&T` where `T` is a QOM +/// object type. Allows conversion to/from C objects in generic code. +pub trait ObjectCast: ObjectDeref + Copy +where + Self::Target: ObjectType, +{ + /// Safely convert from a derived type to one of its parent types. + /// + /// This is always safe; the [`IsA`] trait provides static verification + /// trait that `Self` dereferences to `U` or a child of `U`. + fn upcast<'a, U: ObjectType>(self) -> &'a U + where + Self::Target: IsA, + Self: 'a, + { + // SAFETY: soundness is declared via IsA, which is an unsafe trait + unsafe { self.unsafe_cast::() } + } + + /// Attempt to convert to a derived type. + /// + /// Returns `None` if the object is not actually of type `U`. This is + /// verified at runtime by checking the object's type information. + fn downcast<'a, U: IsA>(self) -> Option<&'a U> + where + Self: 'a, + { + self.dynamic_cast::() + } + + /// Attempt to convert between any two types in the QOM hierarchy. + /// + /// Returns `None` if the object is not actually of type `U`. This is + /// verified at runtime by checking the object's type information. + fn dynamic_cast<'a, U: ObjectType>(self) -> Option<&'a U> + where + Self: 'a, + { + unsafe { + // SAFETY: upcasting to Object is always valid, and the + // return type is either NULL or the argument itself + let result: *const U = + object_dynamic_cast(self.as_object_mut_ptr(), U::TYPE_NAME.as_ptr()).cast(); + + result.as_ref() + } + } + + /// Convert to any QOM type without verification. + /// + /// # Safety + /// + /// What safety? You need to know yourself that the cast is correct; only + /// use when performance is paramount. It is still better than a raw + /// pointer `cast()`, which does not even check that you remain in the + /// realm of QOM `ObjectType`s. + /// + /// `unsafe_cast::()` is always safe. + unsafe fn unsafe_cast<'a, U: ObjectType>(self) -> &'a U + where + Self: 'a, + { + unsafe { &*(self.as_ptr::().cast::()) } + } +} + +impl ObjectDeref for &T {} +impl ObjectCast for &T {} + +impl ObjectDeref for &mut T {} + +/// Trait a type must implement to be registered with QEMU. +pub trait ObjectImpl: ObjectType + IsA { + /// The parent of the type. This should match the first field of the + /// struct that implements `ObjectImpl`, minus the `ParentField<_>` wrapper. + type ParentType: ObjectType; + + /// Whether the object can be instantiated + const ABSTRACT: bool = false; + + /// Function that is called to initialize an object. The parent class will + /// have already been initialized so the type is only responsible for + /// initializing its own members. + /// + /// FIXME: The argument is not really a valid reference. `&mut + /// MaybeUninit` would be a better description. + const INSTANCE_INIT: Option)> = None; + + /// Function that is called to finish initialization of an object, once + /// `INSTANCE_INIT` functions have been called. + const INSTANCE_POST_INIT: Option = None; + + /// Called on descendant classes after all parent class initialization + /// has occurred, but before the class itself is initialized. This + /// is only useful if a class is not a leaf, and can be used to undo + /// the effects of copying the contents of the parent's class struct + /// to the descendants. + const CLASS_BASE_INIT: Option< + unsafe extern "C" fn(klass: *mut ObjectClass, data: *const c_void), + > = None; + + const TYPE_INFO: TypeInfo = TypeInfo { + name: Self::TYPE_NAME.as_ptr(), + parent: Self::ParentType::TYPE_NAME.as_ptr(), + instance_size: core::mem::size_of::(), + instance_align: core::mem::align_of::(), + instance_init: match Self::INSTANCE_INIT { + None => None, + Some(_) => Some(rust_instance_init::), + }, + instance_post_init: match Self::INSTANCE_POST_INIT { + None => None, + Some(_) => Some(rust_instance_post_init::), + }, + instance_finalize: Some(drop_object::), + abstract_: Self::ABSTRACT, + class_size: core::mem::size_of::(), + class_init: Some(rust_class_init::), + class_base_init: Self::CLASS_BASE_INIT, + class_data: core::ptr::null(), + interfaces: core::ptr::null(), + }; + + // methods on ObjectClass + const UNPARENT: Option = None; + + /// Store into the argument the virtual method implementations + /// for `Self`. On entry, the virtual method pointers are set to + /// the default values coming from the parent classes; the function + /// can change them to override virtual methods of a parent class. + /// + /// Usually defined simply as `Self::Class::class_init::`; + /// however a default implementation cannot be included here, because the + /// bounds that the `Self::Class::class_init` method places on `Self` are + /// not known in advance. + /// + /// # Safety + /// + /// While `klass`'s parent class is initialized on entry, the other fields + /// are all zero; it is therefore assumed that all fields in `T` can be + /// zeroed, otherwise it would not be possible to provide the class as a + /// `&mut T`. TODO: it may be possible to add an unsafe trait that checks + /// that all fields *after the parent class* (but not the parent class + /// itself) are Zeroable. This unsafe trait can be added via a derive + /// macro. + const CLASS_INIT: fn(&mut Self::Class); +} + +/// # Safety +/// +/// We expect the FFI user of this function to pass a valid pointer that +/// can be downcasted to type `T`. We also expect the device is +/// readable/writeable from one thread at any time. +unsafe extern "C" fn rust_unparent_fn(dev: *mut bindings::Object) { + let state = NonNull::new(dev).unwrap().cast::(); + T::UNPARENT.unwrap()(unsafe { state.as_ref() }); +} + +impl ObjectClass { + /// Fill in the virtual methods of `ObjectClass` based on the definitions in + /// the `ObjectImpl` trait. + pub fn class_init(&mut self) { + if ::UNPARENT.is_some() { + self.unparent = Some(rust_unparent_fn::); + } + } +} + +unsafe impl ObjectType for Object { + type Class = ObjectClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_OBJECT) }; +} + +/// A reference-counted pointer to a QOM object. +/// +/// `Owned` wraps `T` with automatic reference counting. It increases the +/// reference count when created via [`Owned::from`] or cloned, and decreases +/// it when dropped. This ensures that the reference count remains elevated +/// as long as any `Owned` references to it exist. +/// +/// `Owned` can be used for two reasons: +/// * because the lifetime of the QOM object is unknown and someone else could +/// take a reference (similar to `Arc`, for example): in this case, the +/// object can escape and outlive the Rust struct that contains the `Owned` +/// field; +/// +/// * to ensure that the object stays alive until after `Drop::drop` is called +/// on the Rust struct: in this case, the object will always die together with +/// the Rust struct that contains the `Owned` field. +/// +/// Child properties are an example of the second case: in C, an object that +/// is created with `object_initialize_child` will die *before* +/// `instance_finalize` is called, whereas Rust expects the struct to have valid +/// contents when `Drop::drop` is called. Therefore Rust structs that have +/// child properties need to keep a reference to the child object. Right now +/// this can be done with `Owned`; in the future one might have a separate +/// `Child<'parent, T>` smart pointer that keeps a reference to a `T`, like +/// `Owned`, but does not allow cloning. +/// +/// Note that dropping an `Owned` requires the big QEMU lock to be taken. +#[repr(transparent)] +#[derive(PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct Owned(NonNull); + +// The following rationale for safety is taken from Linux's kernel::sync::Arc. + +// SAFETY: It is safe to send `Owned` to another thread when the underlying +// `T` is `Sync` because it effectively means sharing `&T` (which is safe +// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any +// thread that has an `Owned` may ultimately access `T` using a +// mutable reference when the reference count reaches zero and `T` is dropped. +unsafe impl Send for Owned {} + +// SAFETY: It is safe to send `&Owned` to another thread when the underlying +// `T` is `Sync` because it effectively means sharing `&T` (which is safe +// because `T` is `Sync`); additionally, it needs `T` to be `Send` because any +// thread that has a `&Owned` may clone it and get an `Owned` on that +// thread, so the thread may ultimately access `T` using a mutable reference +// when the reference count reaches zero and `T` is dropped. +unsafe impl Sync for Owned {} + +impl Owned { + /// Convert a raw C pointer into an owned reference to the QOM + /// object it points to. The object's reference count will be + /// decreased when the `Owned` is dropped. + /// + /// # Panics + /// + /// Panics if `ptr` is NULL. + /// + /// # Safety + /// + /// The caller must indeed own a reference to the QOM object. + /// The object must not be embedded in another unless the outer + /// object is guaranteed to have a longer lifetime. + /// + /// A raw pointer obtained via [`Owned::into_raw()`] can always be passed + /// back to `from_raw()` (assuming the original `Owned` was valid!), + /// since the owned reference remains there between the calls to + /// `into_raw()` and `from_raw()`. + pub unsafe fn from_raw(ptr: *const T) -> Self { + // SAFETY NOTE: while NonNull requires a mutable pointer, only + // Deref is implemented so the pointer passed to from_raw + // remains const + Owned(NonNull::new(ptr.cast_mut()).unwrap()) + } + + /// Obtain a raw C pointer from a reference. `src` is consumed + /// and the reference is leaked. + #[allow(clippy::missing_const_for_fn)] + pub fn into_raw(src: Owned) -> *mut T { + let src = ManuallyDrop::new(src); + src.0.as_ptr() + } + + /// Increase the reference count of a QOM object and return + /// a new owned reference to it. + /// + /// # Safety + /// + /// The object must not be embedded in another, unless the outer + /// object is guaranteed to have a longer lifetime. + pub unsafe fn from(obj: &T) -> Self { + unsafe { + object_ref(obj.as_object_mut_ptr().cast::()); + + // SAFETY NOTE: while NonNull requires a mutable pointer, only + // Deref is implemented so the reference passed to from_raw + // remains shared + Owned(NonNull::new_unchecked(obj.as_mut_ptr())) + } + } +} + +impl Clone for Owned { + fn clone(&self) -> Self { + // SAFETY: creation method is unsafe; whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned` and its clones. + unsafe { Owned::from(self.deref()) } + } +} + +impl Deref for Owned { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: creation method is unsafe; whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned` and its clones. + // With that guarantee, reference counting ensures that + // the object remains alive. + unsafe { &*self.0.as_ptr() } + } +} +impl ObjectDeref for Owned {} + +impl Drop for Owned { + fn drop(&mut self) { + assert!(bql_locked()); + // SAFETY: creation method is unsafe, and whoever calls it has + // responsibility that the pointer is valid, and remains valid + // throughout the lifetime of the `Owned` and its clones. + unsafe { + object_unref(self.as_object_mut_ptr().cast::()); + } + } +} + +impl> fmt::Debug for Owned { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.deref().debug_fmt(f) + } +} + +/// Trait for class methods exposed by the Object class. The methods can be +/// called on all objects that have the trait `IsA`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA` +pub trait ObjectClassMethods: IsA { + /// Return a new reference counted instance of this class + fn new() -> Owned { + assert!(bql_locked()); + // SAFETY: the object created by object_new is allocated on + // the heap and has a reference count of 1 + unsafe { + let raw_obj = object_new(Self::TYPE_NAME.as_ptr()); + let obj = Object::from_raw(raw_obj).unsafe_cast::(); + Owned::from_raw(obj) + } + } +} + +/// Trait for methods exposed by the Object class. The methods can be +/// called on all objects that have the trait `IsA`. +/// +/// The trait should only be used through the blanket implementation, +/// which guarantees safety via `IsA` +pub trait ObjectMethods: ObjectDeref +where + Self::Target: IsA, +{ + /// Return the name of the type of `self` + fn typename(&self) -> std::borrow::Cow<'_, str> { + let obj = self.upcast::(); + // SAFETY: safety of this is the requirement for implementing IsA + // The result of the C API has static lifetime + unsafe { + let p = object_get_typename(obj.as_mut_ptr()); + CStr::from_ptr(p).to_string_lossy() + } + } + + fn get_class(&self) -> &'static ::Class { + let obj = self.upcast::(); + + // SAFETY: all objects can call object_get_class; the actual class + // type is guaranteed by the implementation of `ObjectType` and + // `ObjectImpl`. + let klass: &'static ::Class = + unsafe { &*object_get_class(obj.as_mut_ptr()).cast() }; + + klass + } + + /// Convenience function for implementing the Debug trait + fn debug_fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(&self.typename()) + .field(&(self as *const Self)) + .finish() + } +} + +impl ObjectClassMethods for T where T: IsA {} +impl ObjectMethods for R where R::Target: IsA {} diff --git a/rust/qemu-api/src/sysbus.rs b/rust/qemu-api/src/sysbus.rs new file mode 100644 index 00000000000..e92502a8fe6 --- /dev/null +++ b/rust/qemu-api/src/sysbus.rs @@ -0,0 +1,122 @@ +// Copyright 2024 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Bindings to access `sysbus` functionality from Rust. + +use std::{ffi::CStr, ptr::addr_of_mut}; + +pub use bindings::SysBusDeviceClass; + +use crate::{ + bindings, + cell::{bql_locked, Opaque}, + irq::{IRQState, InterruptSource}, + memory::MemoryRegion, + prelude::*, + qdev::{DeviceImpl, DeviceState}, + qom::Owned, +}; + +/// A safe wrapper around [`bindings::SysBusDevice`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct SysBusDevice(Opaque); + +unsafe impl Send for SysBusDevice {} +unsafe impl Sync for SysBusDevice {} + +unsafe impl ObjectType for SysBusDevice { + type Class = SysBusDeviceClass; + const TYPE_NAME: &'static CStr = + unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_SYS_BUS_DEVICE) }; +} +qom_isa!(SysBusDevice: DeviceState, Object); + +// TODO: add virtual methods +pub trait SysBusDeviceImpl: DeviceImpl + IsA {} + +impl SysBusDeviceClass { + /// Fill in the virtual methods of `SysBusDeviceClass` based on the + /// definitions in the `SysBusDeviceImpl` trait. + pub fn class_init(self: &mut SysBusDeviceClass) { + self.parent_class.class_init::(); + } +} + +/// Trait for methods of [`SysBusDevice`] and its subclasses. +pub trait SysBusDeviceMethods: ObjectDeref +where + Self::Target: IsA, +{ + /// Expose a memory region to the board so that it can give it an address + /// in guest memory. Note that the ordering of calls to `init_mmio` is + /// important, since whoever creates the sysbus device will refer to the + /// region with a number that corresponds to the order of calls to + /// `init_mmio`. + fn init_mmio(&self, iomem: &MemoryRegion) { + assert!(bql_locked()); + unsafe { + bindings::sysbus_init_mmio(self.upcast().as_mut_ptr(), iomem.as_mut_ptr()); + } + } + + /// Expose an interrupt source outside the device as a qdev GPIO output. + /// Note that the ordering of calls to `init_irq` is important, since + /// whoever creates the sysbus device will refer to the interrupts with + /// a number that corresponds to the order of calls to `init_irq`. + fn init_irq(&self, irq: &InterruptSource) { + assert!(bql_locked()); + unsafe { + bindings::sysbus_init_irq(self.upcast().as_mut_ptr(), irq.as_ptr()); + } + } + + // TODO: do we want a type like GuestAddress here? + fn mmio_addr(&self, id: u32) -> Option { + assert!(bql_locked()); + // SAFETY: the BQL ensures that no one else writes to sbd.mmio[], and + // the SysBusDevice must be initialized to get an IsA. + let sbd = unsafe { *self.upcast().as_ptr() }; + let id: usize = id.try_into().unwrap(); + if sbd.mmio[id].memory.is_null() { + None + } else { + Some(sbd.mmio[id].addr) + } + } + + // TODO: do we want a type like GuestAddress here? + fn mmio_map(&self, id: u32, addr: u64) { + assert!(bql_locked()); + let id: i32 = id.try_into().unwrap(); + unsafe { + bindings::sysbus_mmio_map(self.upcast().as_mut_ptr(), id, addr); + } + } + + // Owned<> is used here because sysbus_connect_irq (via + // object_property_set_link) adds a reference to the IRQState, + // which can prolong its life + fn connect_irq(&self, id: u32, irq: &Owned) { + assert!(bql_locked()); + let id: i32 = id.try_into().unwrap(); + let irq: &IRQState = irq; + unsafe { + bindings::sysbus_connect_irq(self.upcast().as_mut_ptr(), id, irq.as_mut_ptr()); + } + } + + fn sysbus_realize(&self) { + // TODO: return an Error + assert!(bql_locked()); + unsafe { + bindings::sysbus_realize( + self.upcast().as_mut_ptr(), + addr_of_mut!(bindings::error_fatal), + ); + } + } +} + +impl SysBusDeviceMethods for R where R::Target: IsA {} diff --git a/rust/qemu-api/src/timer.rs b/rust/qemu-api/src/timer.rs new file mode 100644 index 00000000000..0a2d111d490 --- /dev/null +++ b/rust/qemu-api/src/timer.rs @@ -0,0 +1,125 @@ +// Copyright (C) 2024 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_int, c_void}, + pin::Pin, +}; + +use crate::{ + bindings::{self, qemu_clock_get_ns, timer_del, timer_init_full, timer_mod, QEMUClockType}, + callbacks::FnCall, + cell::Opaque, +}; + +/// A safe wrapper around [`bindings::QEMUTimer`]. +#[repr(transparent)] +#[derive(Debug, qemu_api_macros::Wrapper)] +pub struct Timer(Opaque); + +unsafe impl Send for Timer {} +unsafe impl Sync for Timer {} + +#[repr(transparent)] +#[derive(qemu_api_macros::Wrapper)] +pub struct TimerListGroup(Opaque); + +unsafe impl Send for TimerListGroup {} +unsafe impl Sync for TimerListGroup {} + +impl Timer { + pub const MS: u32 = bindings::SCALE_MS; + pub const US: u32 = bindings::SCALE_US; + pub const NS: u32 = bindings::SCALE_NS; + + /// Create a `Timer` struct without initializing it. + /// + /// # Safety + /// + /// The timer must be initialized before it is armed with + /// [`modify`](Self::modify). + pub unsafe fn new() -> Self { + // SAFETY: requirements relayed to callers of Timer::new + Self(unsafe { Opaque::zeroed() }) + } + + /// Create a new timer with the given attributes. + pub fn init_full<'timer, 'opaque: 'timer, T, F>( + self: Pin<&'timer mut Self>, + timer_list_group: Option<&TimerListGroup>, + clk_type: ClockType, + scale: u32, + attributes: u32, + _cb: F, + opaque: &'opaque T, + ) where + F: for<'a> FnCall<(&'a T,)>, + { + let _: () = F::ASSERT_IS_SOME; + + /// timer expiration callback + unsafe extern "C" fn rust_timer_handler FnCall<(&'a T,)>>( + opaque: *mut c_void, + ) { + // SAFETY: the opaque was passed as a reference to `T`. + F::call((unsafe { &*(opaque.cast::()) },)) + } + + let timer_cb: unsafe extern "C" fn(*mut c_void) = rust_timer_handler::; + + // SAFETY: the opaque outlives the timer + unsafe { + timer_init_full( + self.as_mut_ptr(), + if let Some(g) = timer_list_group { + g as *const TimerListGroup as *mut _ + } else { + ::core::ptr::null_mut() + }, + clk_type.id, + scale as c_int, + attributes as c_int, + Some(timer_cb), + (opaque as *const T).cast::().cast_mut(), + ) + } + } + + pub fn modify(&self, expire_time: u64) { + // SAFETY: the only way to obtain a Timer safely is via methods that + // take a Pin<&mut Self>, therefore the timer is pinned + unsafe { timer_mod(self.as_mut_ptr(), expire_time as i64) } + } + + pub fn delete(&self) { + // SAFETY: the only way to obtain a Timer safely is via methods that + // take a Pin<&mut Self>, therefore the timer is pinned + unsafe { timer_del(self.as_mut_ptr()) } + } +} + +// FIXME: use something like PinnedDrop from the pinned_init crate +impl Drop for Timer { + fn drop(&mut self) { + self.delete() + } +} + +pub struct ClockType { + id: QEMUClockType, +} + +impl ClockType { + pub fn get_ns(&self) -> u64 { + // SAFETY: cannot be created outside this module, therefore id + // is valid + (unsafe { qemu_clock_get_ns(self.id) }) as u64 + } +} + +pub const CLOCK_VIRTUAL: ClockType = ClockType { + id: QEMUClockType::QEMU_CLOCK_VIRTUAL, +}; + +pub const NANOSECONDS_PER_SECOND: u64 = 1000000000; diff --git a/rust/qemu-api/src/uninit.rs b/rust/qemu-api/src/uninit.rs new file mode 100644 index 00000000000..04123b4ae99 --- /dev/null +++ b/rust/qemu-api/src/uninit.rs @@ -0,0 +1,85 @@ +//! Access fields of a [`MaybeUninit`] + +use std::{ + mem::MaybeUninit, + ops::{Deref, DerefMut}, +}; + +pub struct MaybeUninitField<'a, T, U> { + parent: &'a mut MaybeUninit, + child: *mut U, +} + +impl<'a, T, U> MaybeUninitField<'a, T, U> { + #[doc(hidden)] + pub fn new(parent: &'a mut MaybeUninit, child: *mut U) -> Self { + MaybeUninitField { parent, child } + } + + /// Return a constant pointer to the containing object of the field. + /// + /// Because the `MaybeUninitField` remembers the containing object, + /// it is possible to use it in foreign APIs that initialize the + /// child. + pub fn parent(f: &Self) -> *const T { + f.parent.as_ptr() + } + + /// Return a mutable pointer to the containing object. + /// + /// Because the `MaybeUninitField` remembers the containing object, + /// it is possible to use it in foreign APIs that initialize the + /// child. + pub fn parent_mut(f: &mut Self) -> *mut T { + f.parent.as_mut_ptr() + } +} + +impl<'a, T, U> Deref for MaybeUninitField<'a, T, U> { + type Target = MaybeUninit; + + fn deref(&self) -> &MaybeUninit { + // SAFETY: self.child was obtained by dereferencing a valid mutable + // reference; the content of the memory may be invalid or uninitialized + // but MaybeUninit<_> makes no assumption on it + unsafe { &*(self.child.cast()) } + } +} + +impl<'a, T, U> DerefMut for MaybeUninitField<'a, T, U> { + fn deref_mut(&mut self) -> &mut MaybeUninit { + // SAFETY: self.child was obtained by dereferencing a valid mutable + // reference; the content of the memory may be invalid or uninitialized + // but MaybeUninit<_> makes no assumption on it + unsafe { &mut *(self.child.cast()) } + } +} + +/// ``` +/// #[derive(Debug)] +/// struct S { +/// x: u32, +/// y: u32, +/// } +/// +/// # use std::mem::MaybeUninit; +/// # use qemu_api::{assert_match, uninit_field_mut}; +/// +/// let mut s: MaybeUninit = MaybeUninit::zeroed(); +/// uninit_field_mut!(s, x).write(5); +/// let s = unsafe { s.assume_init() }; +/// assert_match!(s, S { x: 5, y: 0 }); +/// ``` +#[macro_export] +macro_rules! uninit_field_mut { + ($container:expr, $($field:tt)+) => {{ + let container__: &mut ::std::mem::MaybeUninit<_> = &mut $container; + let container_ptr__ = container__.as_mut_ptr(); + + // SAFETY: the container is not used directly, only through a MaybeUninit<>, + // so the safety is delegated to the caller and to final invocation of + // assume_init() + let target__ = unsafe { std::ptr::addr_of_mut!((*container_ptr__).$($field)+) }; + $crate::uninit::MaybeUninitField::new(container__, target__) + }}; +} diff --git a/rust/qemu-api/src/vmstate.rs b/rust/qemu-api/src/vmstate.rs new file mode 100644 index 00000000000..812f390d780 --- /dev/null +++ b/rust/qemu-api/src/vmstate.rs @@ -0,0 +1,604 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Helper macros to declare migration state for device models. +//! +//! This module includes four families of macros: +//! +//! * [`vmstate_unused!`](crate::vmstate_unused) and +//! [`vmstate_of!`](crate::vmstate_of), which are used to express the +//! migration format for a struct. This is based on the [`VMState`] trait, +//! which is defined by all migratable types. +//! +//! * [`impl_vmstate_forward`](crate::impl_vmstate_forward) and +//! [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), which help with +//! the definition of the [`VMState`] trait (respectively for transparent +//! structs and for `bilge`-defined types) +//! +//! * helper macros to declare a device model state struct, in particular +//! [`vmstate_subsections`](crate::vmstate_subsections) and +//! [`vmstate_fields`](crate::vmstate_fields). +//! +//! * direct equivalents to the C macros declared in +//! `include/migration/vmstate.h`. These are not type-safe and only provide +//! functionality that is missing from `vmstate_of!`. + +use core::{marker::PhantomData, mem, ptr::NonNull}; +use std::ffi::{c_int, c_void}; + +pub use crate::bindings::{VMStateDescription, VMStateField}; +use crate::{ + bindings::VMStateFlags, callbacks::FnCall, prelude::*, qom::Owned, zeroable::Zeroable, +}; + +/// This macro is used to call a function with a generic argument bound +/// to the type of a field. The function must take a +/// [`PhantomData`]`` argument; `T` is the type of +/// field `$field` in the `$typ` type. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::call_func_with_field; +/// # use core::marker::PhantomData; +/// const fn size_of_field(_: PhantomData) -> usize { +/// std::mem::size_of::() +/// } +/// +/// struct Foo { +/// x: u16, +/// }; +/// // calls size_of_field::() +/// assert_eq!(call_func_with_field!(size_of_field, Foo, x), 2); +/// ``` +#[macro_export] +macro_rules! call_func_with_field { + // Based on the answer by user steffahn (Frank Steffahn) at + // https://users.rust-lang.org/t/inferring-type-of-field/122857 + // and used under MIT license + ($func:expr, $typ:ty, $($field:tt).+) => { + $func(loop { + #![allow(unreachable_code)] + const fn phantom__(_: &T) -> ::core::marker::PhantomData { ::core::marker::PhantomData } + // Unreachable code is exempt from checks on uninitialized values. + // Use that trick to infer the type of this PhantomData. + break ::core::marker::PhantomData; + break phantom__(&{ let value__: $typ; value__.$($field).+ }); + }) + }; +} + +/// Workaround for lack of `const_refs_static`: references to global variables +/// can be included in a `static`, but not in a `const`; unfortunately, this +/// is exactly what would go in the `VMStateField`'s `info` member. +/// +/// This enum contains the contents of the `VMStateField`'s `info` member, +/// but as an `enum` instead of a pointer. +#[allow(non_camel_case_types)] +pub enum VMStateFieldType { + null, + vmstate_info_bool, + vmstate_info_int8, + vmstate_info_int16, + vmstate_info_int32, + vmstate_info_int64, + vmstate_info_uint8, + vmstate_info_uint16, + vmstate_info_uint32, + vmstate_info_uint64, + vmstate_info_timer, +} + +/// Workaround for lack of `const_refs_static`. Converts a `VMStateFieldType` +/// to a `*const VMStateInfo`, for inclusion in a `VMStateField`. +#[macro_export] +macro_rules! info_enum_to_ref { + ($e:expr) => { + unsafe { + match $e { + $crate::vmstate::VMStateFieldType::null => ::core::ptr::null(), + $crate::vmstate::VMStateFieldType::vmstate_info_bool => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_bool) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int8 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int8) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int16 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int16) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int32 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int32) + } + $crate::vmstate::VMStateFieldType::vmstate_info_int64 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_int64) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint8 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint8) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint16 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint16) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint32 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint32) + } + $crate::vmstate::VMStateFieldType::vmstate_info_uint64 => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint64) + } + $crate::vmstate::VMStateFieldType::vmstate_info_timer => { + ::core::ptr::addr_of!($crate::bindings::vmstate_info_timer) + } + } + } + }; +} + +/// A trait for types that can be included in a device's migration stream. It +/// provides the base contents of a `VMStateField` (minus the name and offset). +/// +/// # Safety +/// +/// The contents of this trait go straight into structs that are parsed by C +/// code and used to introspect into other structs. Generally, you don't need +/// to implement it except via macros that do it for you, such as +/// `impl_vmstate_bitsized!`. +pub unsafe trait VMState { + /// The `info` member of a `VMStateField` is a pointer and as such cannot + /// yet be included in the [`BASE`](VMState::BASE) associated constant; + /// this is only allowed by Rust 1.83.0 and newer. For now, include the + /// member as an enum which is stored in a separate constant. + const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::null; + + /// The base contents of a `VMStateField` (minus the name and offset) for + /// the type that is implementing the trait. + const BASE: VMStateField; + + /// A flag that is added to another field's `VMStateField` to specify the + /// length's type in a variable-sized array. If this is not a supported + /// type for the length (i.e. if it is not `u8`, `u16`, `u32`), using it + /// in a call to [`vmstate_of!`](crate::vmstate_of) will cause a + /// compile-time error. + const VARRAY_FLAG: VMStateFlags = { + panic!("invalid type for variable-sized array"); + }; +} + +/// Internal utility function to retrieve a type's `VMStateFieldType`; +/// used by [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_scalar_type(_: PhantomData) -> VMStateFieldType { + T::SCALAR_TYPE +} + +/// Internal utility function to retrieve a type's `VMStateField`; +/// used by [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_base(_: PhantomData) -> VMStateField { + T::BASE +} + +/// Internal utility function to retrieve a type's `VMStateFlags` when it +/// is used as the element count of a `VMSTATE_VARRAY`; used by +/// [`vmstate_of!`](crate::vmstate_of). +pub const fn vmstate_varray_flag(_: PhantomData) -> VMStateFlags { + T::VARRAY_FLAG +} + +/// Return the `VMStateField` for a field of a struct. The field must be +/// visible in the current scope. +/// +/// Only a limited set of types is supported out of the box: +/// * scalar types (integer and `bool`) +/// * the C struct `QEMUTimer` +/// * a transparent wrapper for any of the above (`Cell`, `UnsafeCell`, +/// [`BqlCell`], [`BqlRefCell`] +/// * a raw pointer to any of the above +/// * a `NonNull` pointer, a `Box` or an [`Owned`] for any of the above +/// * an array of any of the above +/// +/// In order to support other types, the trait `VMState` must be implemented +/// for them. The macros +/// [`impl_vmstate_bitsized!`](crate::impl_vmstate_bitsized) +/// and [`impl_vmstate_forward!`](crate::impl_vmstate_forward) help with this. +#[macro_export] +macro_rules! vmstate_of { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])? $(, $test_fn:expr)? $(,)?) => { + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + offset: ::std::mem::offset_of!($struct_name, $field_name), + $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? + $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? + // The calls to `call_func_with_field!` are the magic that + // computes most of the VMStateField from the type of the field. + info: $crate::info_enum_to_ref!($crate::call_func_with_field!( + $crate::vmstate::vmstate_scalar_type, + $struct_name, + $field_name + )), + ..$crate::call_func_with_field!( + $crate::vmstate::vmstate_base, + $struct_name, + $field_name + )$(.with_varray_flag($crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num)) + $(.with_varray_multiply($factor))?)? + } + }; +} + +impl VMStateFlags { + const VMS_VARRAY_FLAGS: VMStateFlags = VMStateFlags( + VMStateFlags::VMS_VARRAY_INT32.0 + | VMStateFlags::VMS_VARRAY_UINT8.0 + | VMStateFlags::VMS_VARRAY_UINT16.0 + | VMStateFlags::VMS_VARRAY_UINT32.0, + ); +} + +// Add a couple builder-style methods to VMStateField, allowing +// easy derivation of VMStateField constants from other types. +impl VMStateField { + #[must_use] + pub const fn with_version_id(mut self, version_id: i32) -> Self { + assert!(version_id >= 0); + self.version_id = version_id; + self + } + + #[must_use] + pub const fn with_array_flag(mut self, num: usize) -> Self { + assert!(num <= 0x7FFF_FFFFusize); + assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) == 0); + assert!((self.flags.0 & VMStateFlags::VMS_VARRAY_FLAGS.0) == 0); + if (self.flags.0 & VMStateFlags::VMS_POINTER.0) != 0 { + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_POINTER.0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0); + // VMS_ARRAY_OF_POINTER flag stores the size of pointer. + // FIXME: *const, *mut, NonNull and Box<> have the same size as usize. + // Resize if more smart pointers are supported. + self.size = std::mem::size_of::(); + } + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_SINGLE.0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_ARRAY.0); + self.num = num as i32; + self + } + + #[must_use] + pub const fn with_pointer_flag(mut self) -> Self { + assert!((self.flags.0 & VMStateFlags::VMS_POINTER.0) == 0); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_POINTER.0); + self + } + + #[must_use] + pub const fn with_varray_flag_unchecked(mut self, flag: VMStateFlags) -> VMStateField { + self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_ARRAY.0); + self.flags = VMStateFlags(self.flags.0 | flag.0); + self.num = 0; // varray uses num_offset instead of num. + self + } + + #[must_use] + #[allow(unused_mut)] + pub const fn with_varray_flag(mut self, flag: VMStateFlags) -> VMStateField { + assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) != 0); + self.with_varray_flag_unchecked(flag) + } + + #[must_use] + pub const fn with_varray_multiply(mut self, num: u32) -> VMStateField { + assert!(num <= 0x7FFF_FFFFu32); + self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0); + self.num = num as i32; + self + } +} + +/// This macro can be used (by just passing it a type) to forward the `VMState` +/// trait to the first field of a tuple. This is a workaround for lack of +/// support of nested [`offset_of`](core::mem::offset_of) until Rust 1.82.0. +/// +/// # Examples +/// +/// ``` +/// # use qemu_api::impl_vmstate_forward; +/// pub struct Fifo([u8; 16]); +/// impl_vmstate_forward!(Fifo); +/// ``` +#[macro_export] +macro_rules! impl_vmstate_forward { + // This is similar to impl_vmstate_transparent below, but it + // uses the same trick as vmstate_of! to obtain the type of + // the first field of the tuple + ($tuple:ty) => { + unsafe impl $crate::vmstate::VMState for $tuple { + const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = + $crate::call_func_with_field!($crate::vmstate::vmstate_scalar_type, $tuple, 0); + const BASE: $crate::bindings::VMStateField = + $crate::call_func_with_field!($crate::vmstate::vmstate_base, $tuple, 0); + } + }; +} + +// Transparent wrappers: just use the internal type + +macro_rules! impl_vmstate_transparent { + ($type:ty where $base:tt: VMState $($where:tt)*) => { + unsafe impl<$base> VMState for $type where $base: VMState $($where)* { + const SCALAR_TYPE: VMStateFieldType = <$base as VMState>::SCALAR_TYPE; + const BASE: VMStateField = VMStateField { + size: mem::size_of::<$type>(), + ..<$base as VMState>::BASE + }; + const VARRAY_FLAG: VMStateFlags = <$base as VMState>::VARRAY_FLAG; + } + }; +} + +impl_vmstate_transparent!(std::cell::Cell where T: VMState); +impl_vmstate_transparent!(std::cell::UnsafeCell where T: VMState); +impl_vmstate_transparent!(std::pin::Pin where T: VMState); +impl_vmstate_transparent!(crate::cell::BqlCell where T: VMState); +impl_vmstate_transparent!(crate::cell::BqlRefCell where T: VMState); +impl_vmstate_transparent!(crate::cell::Opaque where T: VMState); + +#[macro_export] +macro_rules! impl_vmstate_bitsized { + ($type:ty) => { + unsafe impl $crate::vmstate::VMState for $type { + const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::SCALAR_TYPE; + const BASE: $crate::bindings::VMStateField = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::BASE; + const VARRAY_FLAG: $crate::bindings::VMStateFlags = + <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType + as $crate::vmstate::VMState>::VARRAY_FLAG; + } + }; +} + +// Scalar types using predefined VMStateInfos + +macro_rules! impl_vmstate_scalar { + ($info:ident, $type:ty$(, $varray_flag:ident)?) => { + unsafe impl VMState for $type { + const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::$info; + const BASE: VMStateField = VMStateField { + size: mem::size_of::<$type>(), + flags: VMStateFlags::VMS_SINGLE, + ..Zeroable::ZERO + }; + $(const VARRAY_FLAG: VMStateFlags = VMStateFlags::$varray_flag;)? + } + }; +} + +impl_vmstate_scalar!(vmstate_info_bool, bool); +impl_vmstate_scalar!(vmstate_info_int8, i8); +impl_vmstate_scalar!(vmstate_info_int16, i16); +impl_vmstate_scalar!(vmstate_info_int32, i32); +impl_vmstate_scalar!(vmstate_info_int64, i64); +impl_vmstate_scalar!(vmstate_info_uint8, u8, VMS_VARRAY_UINT8); +impl_vmstate_scalar!(vmstate_info_uint16, u16, VMS_VARRAY_UINT16); +impl_vmstate_scalar!(vmstate_info_uint32, u32, VMS_VARRAY_UINT32); +impl_vmstate_scalar!(vmstate_info_uint64, u64); +impl_vmstate_scalar!(vmstate_info_timer, crate::timer::Timer); + +// Pointer types using the underlying type's VMState plus VMS_POINTER +// Note that references are not supported, though references to cells +// could be allowed. + +macro_rules! impl_vmstate_pointer { + ($type:ty where $base:tt: VMState $($where:tt)*) => { + unsafe impl<$base> VMState for $type where $base: VMState $($where)* { + const SCALAR_TYPE: VMStateFieldType = ::SCALAR_TYPE; + const BASE: VMStateField = <$base as VMState>::BASE.with_pointer_flag(); + } + }; +} + +impl_vmstate_pointer!(*const T where T: VMState); +impl_vmstate_pointer!(*mut T where T: VMState); +impl_vmstate_pointer!(NonNull where T: VMState); + +// Unlike C pointers, Box is always non-null therefore there is no need +// to specify VMS_ALLOC. +impl_vmstate_pointer!(Box where T: VMState); +impl_vmstate_pointer!(Owned where T: VMState + ObjectType); + +// Arrays using the underlying type's VMState plus +// VMS_ARRAY/VMS_ARRAY_OF_POINTER + +unsafe impl VMState for [T; N] { + const SCALAR_TYPE: VMStateFieldType = ::SCALAR_TYPE; + const BASE: VMStateField = ::BASE.with_array_flag(N); +} + +#[doc(alias = "VMSTATE_UNUSED")] +#[macro_export] +macro_rules! vmstate_unused { + ($size:expr) => {{ + $crate::bindings::VMStateField { + name: c"unused".as_ptr(), + size: $size, + info: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_info_unused_buffer) }, + flags: $crate::bindings::VMStateFlags::VMS_BUFFER, + ..$crate::zeroable::Zeroable::ZERO + } + }}; +} + +pub extern "C" fn rust_vms_test_field_exists FnCall<(&'a T, u8), bool>>( + opaque: *mut c_void, + version_id: c_int, +) -> bool { + // SAFETY: the opaque was passed as a reference to `T`. + let owner: &T = unsafe { &*(opaque.cast::()) }; + let version: u8 = version_id.try_into().unwrap(); + F::call((owner, version)) +} + +pub type VMSFieldExistCb = unsafe extern "C" fn( + opaque: *mut std::os::raw::c_void, + version_id: std::os::raw::c_int, +) -> bool; + +#[macro_export] +macro_rules! vmstate_exist_fn { + ($struct_name:ty, $test_fn:expr) => {{ + const fn test_cb_builder__ $crate::callbacks::FnCall<(&'a T, u8), bool>>( + _phantom: ::core::marker::PhantomData, + ) -> $crate::vmstate::VMSFieldExistCb { + let _: () = F::ASSERT_IS_SOME; + $crate::vmstate::rust_vms_test_field_exists:: + } + + const fn phantom__(_: &T) -> ::core::marker::PhantomData { + ::core::marker::PhantomData + } + Some(test_cb_builder__::<$struct_name, _>(phantom__(&$test_fn))) + }}; +} + +// FIXME: including the `vmsd` field in a `const` is not possible without +// the const_refs_static feature (stabilized in Rust 1.83.0). Without it, +// it is not possible to use VMS_STRUCT in a transparent manner using +// `vmstate_of!`. While VMSTATE_CLOCK can at least try to be type-safe, +// VMSTATE_STRUCT includes $type only for documentation purposes; it +// is checked against $field_name and $struct_name, but not against $vmsd +// which is what really would matter. +#[doc(alias = "VMSTATE_STRUCT")] +#[macro_export] +macro_rules! vmstate_struct { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?, $vmsd:expr, $type:ty $(, $test_fn:expr)? $(,)?) => { + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? + offset: { + $crate::assert_field_type!($struct_name, $field_name, $type $(, num = $num)?); + ::std::mem::offset_of!($struct_name, $field_name) + }, + size: ::core::mem::size_of::<$type>(), + flags: $crate::bindings::VMStateFlags::VMS_STRUCT, + vmsd: $vmsd, + $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? + ..$crate::zeroable::Zeroable::ZERO + } $(.with_varray_flag_unchecked( + $crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num + ) + ) + $(.with_varray_multiply($factor))?)? + }; +} + +#[doc(alias = "VMSTATE_CLOCK")] +#[macro_export] +macro_rules! vmstate_clock { + ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?) => {{ + $crate::bindings::VMStateField { + name: ::core::concat!(::core::stringify!($field_name), "\0") + .as_bytes() + .as_ptr() as *const ::std::os::raw::c_char, + offset: { + $crate::assert_field_type!( + $struct_name, + $field_name, + $crate::qom::Owned<$crate::qdev::Clock> $(, num = $num)? + ); + ::std::mem::offset_of!($struct_name, $field_name) + }, + size: ::core::mem::size_of::<*const $crate::qdev::Clock>(), + flags: $crate::bindings::VMStateFlags( + $crate::bindings::VMStateFlags::VMS_STRUCT.0 + | $crate::bindings::VMStateFlags::VMS_POINTER.0, + ), + vmsd: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_clock) }, + ..$crate::zeroable::Zeroable::ZERO + } $(.with_varray_flag_unchecked( + $crate::call_func_with_field!( + $crate::vmstate::vmstate_varray_flag, + $struct_name, + $num + ) + ) + $(.with_varray_multiply($factor))?)? + }}; +} + +/// Helper macro to declare a list of +/// ([`VMStateField`](`crate::bindings::VMStateField`)) into a static and return +/// a pointer to the array of values it created. +#[macro_export] +macro_rules! vmstate_fields { + ($($field:expr),*$(,)*) => {{ + static _FIELDS: &[$crate::bindings::VMStateField] = &[ + $($field),*, + $crate::bindings::VMStateField { + flags: $crate::bindings::VMStateFlags::VMS_END, + ..$crate::zeroable::Zeroable::ZERO + } + ]; + _FIELDS.as_ptr() + }} +} + +#[doc(alias = "VMSTATE_VALIDATE")] +#[macro_export] +macro_rules! vmstate_validate { + ($struct_name:ty, $test_name:expr, $test_fn:expr $(,)?) => { + $crate::bindings::VMStateField { + name: ::std::ffi::CStr::as_ptr($test_name), + field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn), + flags: $crate::bindings::VMStateFlags( + $crate::bindings::VMStateFlags::VMS_MUST_EXIST.0 + | $crate::bindings::VMStateFlags::VMS_ARRAY.0, + ), + num: 0, // 0 elements: no data, only run test_fn callback + ..$crate::zeroable::Zeroable::ZERO + } + }; +} + +/// A transparent wrapper type for the `subsections` field of +/// [`VMStateDescription`]. +/// +/// This is necessary to be able to declare subsection descriptions as statics, +/// because the only way to implement `Sync` for a foreign type (and `*const` +/// pointers are foreign types in Rust) is to create a wrapper struct and +/// `unsafe impl Sync` for it. +/// +/// This struct is used in the +/// [`vm_state_subsections`](crate::vmstate_subsections) macro implementation. +#[repr(transparent)] +pub struct VMStateSubsectionsWrapper(pub &'static [*const crate::bindings::VMStateDescription]); + +unsafe impl Sync for VMStateSubsectionsWrapper {} + +/// Helper macro to declare a list of subsections ([`VMStateDescription`]) +/// into a static and return a pointer to the array of pointers it created. +#[macro_export] +macro_rules! vmstate_subsections { + ($($subsection:expr),*$(,)*) => {{ + static _SUBSECTIONS: $crate::vmstate::VMStateSubsectionsWrapper = $crate::vmstate::VMStateSubsectionsWrapper(&[ + $({ + static _SUBSECTION: $crate::bindings::VMStateDescription = $subsection; + ::core::ptr::addr_of!(_SUBSECTION) + }),*, + ::core::ptr::null() + ]); + _SUBSECTIONS.0.as_ptr() + }} +} diff --git a/rust/qemu-api/src/zeroable.rs b/rust/qemu-api/src/zeroable.rs new file mode 100644 index 00000000000..d8239d08563 --- /dev/null +++ b/rust/qemu-api/src/zeroable.rs @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Defines a trait for structs that can be safely initialized with zero bytes. + +/// Encapsulates the requirement that +/// `MaybeUninit::::zeroed().assume_init()` does not cause undefined +/// behavior. +/// +/// # Safety +/// +/// Do not add this trait to a type unless all-zeroes is a valid value for the +/// type. In particular, raw pointers can be zero, but references and +/// `NonNull` cannot. +pub unsafe trait Zeroable: Default { + /// Return a value of Self whose memory representation consists of all + /// zeroes, with the possible exclusion of padding bytes. + const ZERO: Self = unsafe { ::core::mem::MaybeUninit::::zeroed().assume_init() }; +} + +// bindgen does not derive Default here +#[allow(clippy::derivable_impls)] +impl Default for crate::bindings::VMStateFlags { + fn default() -> Self { + Self(0) + } +} + +unsafe impl Zeroable for crate::bindings::Property__bindgen_ty_1 {} +unsafe impl Zeroable for crate::bindings::Property {} +unsafe impl Zeroable for crate::bindings::VMStateFlags {} +unsafe impl Zeroable for crate::bindings::VMStateField {} +unsafe impl Zeroable for crate::bindings::VMStateDescription {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_1 {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_2 {} +unsafe impl Zeroable for crate::bindings::MemoryRegionOps {} +unsafe impl Zeroable for crate::bindings::MemTxAttrs {} +unsafe impl Zeroable for crate::bindings::CharBackend {} diff --git a/rust/qemu-api/tests/tests.rs b/rust/qemu-api/tests/tests.rs new file mode 100644 index 00000000000..a658a49fcfd --- /dev/null +++ b/rust/qemu-api/tests/tests.rs @@ -0,0 +1,180 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ffi::CStr, ptr::addr_of}; + +use qemu_api::{ + bindings::{module_call_init, module_init_type, qdev_prop_bool}, + cell::{self, BqlCell}, + declare_properties, define_property, + prelude::*, + qdev::{DeviceImpl, DeviceState, Property, ResettablePhasesImpl}, + qom::{ObjectImpl, ParentField}, + sysbus::SysBusDevice, + vmstate::VMStateDescription, + zeroable::Zeroable, +}; + +mod vmstate_tests; + +// Test that macros can compile. +pub static VMSTATE: VMStateDescription = VMStateDescription { + name: c"name".as_ptr(), + unmigratable: true, + ..Zeroable::ZERO +}; + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +pub struct DummyState { + parent: ParentField, + migrate_clock: bool, +} + +qom_isa!(DummyState: Object, DeviceState); + +pub struct DummyClass { + parent_class: ::Class, +} + +impl DummyClass { + pub fn class_init(self: &mut DummyClass) { + self.parent_class.class_init::(); + } +} + +declare_properties! { + DUMMY_PROPERTIES, + define_property!( + c"migrate-clk", + DummyState, + migrate_clock, + unsafe { &qdev_prop_bool }, + bool + ), +} + +unsafe impl ObjectType for DummyState { + type Class = DummyClass; + const TYPE_NAME: &'static CStr = c"dummy"; +} + +impl ObjectImpl for DummyState { + type ParentType = DeviceState; + const ABSTRACT: bool = false; + const CLASS_INIT: fn(&mut DummyClass) = DummyClass::class_init::; +} + +impl ResettablePhasesImpl for DummyState {} + +impl DeviceImpl for DummyState { + fn properties() -> &'static [Property] { + &DUMMY_PROPERTIES + } + fn vmsd() -> Option<&'static VMStateDescription> { + Some(&VMSTATE) + } +} + +#[repr(C)] +#[derive(qemu_api_macros::Object)] +pub struct DummyChildState { + parent: ParentField, +} + +qom_isa!(DummyChildState: Object, DeviceState, DummyState); + +pub struct DummyChildClass { + parent_class: ::Class, +} + +unsafe impl ObjectType for DummyChildState { + type Class = DummyChildClass; + const TYPE_NAME: &'static CStr = c"dummy_child"; +} + +impl ObjectImpl for DummyChildState { + type ParentType = DummyState; + const ABSTRACT: bool = false; + const CLASS_INIT: fn(&mut DummyChildClass) = DummyChildClass::class_init::; +} + +impl ResettablePhasesImpl for DummyChildState {} +impl DeviceImpl for DummyChildState {} + +impl DummyChildClass { + pub fn class_init(self: &mut DummyChildClass) { + self.parent_class.class_init::(); + } +} + +fn init_qom() { + static ONCE: BqlCell = BqlCell::new(false); + + cell::bql_start_test(); + if !ONCE.get() { + unsafe { + module_call_init(module_init_type::MODULE_INIT_QOM); + } + ONCE.set(true); + } +} + +#[test] +/// Create and immediately drop an instance. +fn test_object_new() { + init_qom(); + drop(DummyState::new()); + drop(DummyChildState::new()); +} + +#[test] +#[allow(clippy::redundant_clone)] +/// Create, clone and then drop an instance. +fn test_clone() { + init_qom(); + let p = DummyState::new(); + assert_eq!(p.clone().typename(), "dummy"); + drop(p); +} + +#[test] +/// Try invoking a method on an object. +fn test_typename() { + init_qom(); + let p = DummyState::new(); + assert_eq!(p.typename(), "dummy"); +} + +// a note on all "cast" tests: usually, especially for downcasts the desired +// class would be placed on the right, for example: +// +// let sbd_ref = p.dynamic_cast::(); +// +// Here I am doing the opposite to check that the resulting type is correct. + +#[test] +#[allow(clippy::shadow_unrelated)] +/// Test casts on shared references. +fn test_cast() { + init_qom(); + let p = DummyState::new(); + let p_ptr: *mut DummyState = p.as_mut_ptr(); + let p_ref: &mut DummyState = unsafe { &mut *p_ptr }; + + let obj_ref: &Object = p_ref.upcast(); + assert_eq!(addr_of!(*obj_ref), p_ptr.cast()); + + let sbd_ref: Option<&SysBusDevice> = obj_ref.dynamic_cast(); + assert!(sbd_ref.is_none()); + + let dev_ref: Option<&DeviceState> = obj_ref.downcast(); + assert_eq!(addr_of!(*dev_ref.unwrap()), p_ptr.cast()); + + // SAFETY: the cast is wrong, but the value is only used for comparison + unsafe { + let sbd_ref: &SysBusDevice = obj_ref.unsafe_cast(); + assert_eq!(addr_of!(*sbd_ref), p_ptr.cast()); + } +} diff --git a/rust/qemu-api/tests/vmstate_tests.rs b/rust/qemu-api/tests/vmstate_tests.rs new file mode 100644 index 00000000000..bded836eb60 --- /dev/null +++ b/rust/qemu-api/tests/vmstate_tests.rs @@ -0,0 +1,505 @@ +// Copyright (C) 2025 Intel Corporation. +// Author(s): Zhao Liu +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + ffi::{c_void, CStr}, + mem::size_of, + ptr::NonNull, + slice, +}; + +use qemu_api::{ + bindings::{ + vmstate_info_bool, vmstate_info_int32, vmstate_info_int64, vmstate_info_int8, + vmstate_info_uint64, vmstate_info_uint8, vmstate_info_unused_buffer, VMStateFlags, + }, + cell::{BqlCell, Opaque}, + impl_vmstate_forward, + vmstate::{VMStateDescription, VMStateField}, + vmstate_fields, vmstate_of, vmstate_struct, vmstate_unused, vmstate_validate, + zeroable::Zeroable, +}; + +const FOO_ARRAY_MAX: usize = 3; + +// =========================== Test VMSTATE_FOOA =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOA: +// - VMSTATE_U16 +// - VMSTATE_UNUSED +// - VMSTATE_VARRAY_UINT16_UNSAFE +// - VMSTATE_VARRAY_MULTIPLY +#[repr(C)] +#[derive(Default)] +struct FooA { + arr: [u8; FOO_ARRAY_MAX], + num: u16, + arr_mul: [i8; FOO_ARRAY_MAX], + num_mul: u32, + elem: i8, +} + +static VMSTATE_FOOA: VMStateDescription = VMStateDescription { + name: c"foo_a".as_ptr(), + version_id: 1, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooA, elem), + vmstate_unused!(size_of::()), + vmstate_of!(FooA, arr[0 .. num]).with_version_id(0), + vmstate_of!(FooA, arr_mul[0 .. num_mul * 16]), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_uint16() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 1st VMStateField ("elem") in VMSTATE_FOOA (corresponding to VMSTATE_UINT16) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"elem\0" + ); + assert_eq!(foo_fields[0].offset, 16); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int8 }); + assert_eq!(foo_fields[0].version_id, 0); + assert_eq!(foo_fields[0].size, 1); + assert_eq!(foo_fields[0].num, 0); + assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_unused() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 2nd VMStateField ("unused") in VMSTATE_FOOA (corresponding to VMSTATE_UNUSED) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"unused\0" + ); + assert_eq!(foo_fields[1].offset, 0); + assert_eq!(foo_fields[1].num_offset, 0); + assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_unused_buffer }); + assert_eq!(foo_fields[1].version_id, 0); + assert_eq!(foo_fields[1].size, 8); + assert_eq!(foo_fields[1].num, 0); + assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_BUFFER); + assert!(foo_fields[1].vmsd.is_null()); + assert!(foo_fields[1].field_exists.is_none()); +} + +#[test] +fn test_vmstate_varray_uint16_unsafe() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 3rd VMStateField ("arr") in VMSTATE_FOOA (corresponding to + // VMSTATE_VARRAY_UINT16_UNSAFE) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr\0" + ); + assert_eq!(foo_fields[2].offset, 0); + assert_eq!(foo_fields[2].num_offset, 4); + assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[2].version_id, 0); + assert_eq!(foo_fields[2].size, 1); + assert_eq!(foo_fields[2].num, 0); + assert_eq!(foo_fields[2].flags, VMStateFlags::VMS_VARRAY_UINT16); + assert!(foo_fields[2].vmsd.is_null()); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_varray_multiply() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + + // 4th VMStateField ("arr_mul") in VMSTATE_FOOA (corresponding to + // VMSTATE_VARRAY_MULTIPLY) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_mul\0" + ); + assert_eq!(foo_fields[3].offset, 6); + assert_eq!(foo_fields[3].num_offset, 12); + assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_int8 }); + assert_eq!(foo_fields[3].version_id, 0); + assert_eq!(foo_fields[3].size, 1); + assert_eq!(foo_fields[3].num, 16); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_VARRAY_UINT32.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0 + ); + assert!(foo_fields[3].vmsd.is_null()); + assert!(foo_fields[3].field_exists.is_none()); + + // The last VMStateField in VMSTATE_FOOA. + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOB =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOB: +// - VMSTATE_BOOL_V +// - VMSTATE_U64 +// - VMSTATE_STRUCT_VARRAY_UINT8 +// - (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32 +// - VMSTATE_ARRAY +// - VMSTATE_STRUCT_VARRAY_UINT8 with BqlCell wrapper & test_fn +#[repr(C)] +#[derive(Default)] +struct FooB { + arr_a: [FooA; FOO_ARRAY_MAX], + num_a: u8, + arr_a_mul: [FooA; FOO_ARRAY_MAX], + num_a_mul: u32, + wrap: BqlCell, + val: bool, + // FIXME: Use Timer array. Now we can't since it's hard to link savevm.c to test. + arr_i64: [i64; FOO_ARRAY_MAX], + arr_a_wrap: [FooA; FOO_ARRAY_MAX], + num_a_wrap: BqlCell, +} + +fn validate_foob(_state: &FooB, _version_id: u8) -> bool { + true +} + +static VMSTATE_FOOB: VMStateDescription = VMStateDescription { + name: c"foo_b".as_ptr(), + version_id: 2, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooB, val).with_version_id(2), + vmstate_of!(FooB, wrap), + vmstate_struct!(FooB, arr_a[0 .. num_a], &VMSTATE_FOOA, FooA).with_version_id(1), + vmstate_struct!(FooB, arr_a_mul[0 .. num_a_mul * 32], &VMSTATE_FOOA, FooA).with_version_id(2), + vmstate_of!(FooB, arr_i64), + vmstate_struct!(FooB, arr_a_wrap[0 .. num_a_wrap], &VMSTATE_FOOA, FooA, validate_foob), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_bool_v() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 1st VMStateField ("val") in VMSTATE_FOOB (corresponding to VMSTATE_BOOL_V) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"val\0" + ); + assert_eq!(foo_fields[0].offset, 136); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_bool }); + assert_eq!(foo_fields[0].version_id, 2); + assert_eq!(foo_fields[0].size, 1); + assert_eq!(foo_fields[0].num, 0); + assert_eq!(foo_fields[0].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_uint64() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 2nd VMStateField ("wrap") in VMSTATE_FOOB (corresponding to VMSTATE_U64) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"wrap\0" + ); + assert_eq!(foo_fields[1].offset, 128); + assert_eq!(foo_fields[1].num_offset, 0); + assert_eq!(foo_fields[1].info, unsafe { &vmstate_info_uint64 }); + assert_eq!(foo_fields[1].version_id, 0); + assert_eq!(foo_fields[1].size, 8); + assert_eq!(foo_fields[1].num, 0); + assert_eq!(foo_fields[1].flags, VMStateFlags::VMS_SINGLE); + assert!(foo_fields[1].vmsd.is_null()); + assert!(foo_fields[1].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint8() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 3rd VMStateField ("arr_a") in VMSTATE_FOOB (corresponding to + // VMSTATE_STRUCT_VARRAY_UINT8) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr_a\0" + ); + assert_eq!(foo_fields[2].offset, 0); + assert_eq!(foo_fields[2].num_offset, 60); + assert!(foo_fields[2].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field. + assert_eq!(foo_fields[2].version_id, 1); + assert_eq!(foo_fields[2].size, 20); + assert_eq!(foo_fields[2].num, 0); + assert_eq!( + foo_fields[2].flags.0, + VMStateFlags::VMS_STRUCT.0 | VMStateFlags::VMS_VARRAY_UINT8.0 + ); + assert_eq!(foo_fields[2].vmsd, &VMSTATE_FOOA); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint32_multiply() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 4th VMStateField ("arr_a_mul") in VMSTATE_FOOB (corresponding to + // (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_a_mul\0" + ); + assert_eq!(foo_fields[3].offset, 64); + assert_eq!(foo_fields[3].num_offset, 124); + assert!(foo_fields[3].info.is_null()); // VMSTATE_STRUCT_VARRAY_UINT8 doesn't set info field. + assert_eq!(foo_fields[3].version_id, 2); + assert_eq!(foo_fields[3].size, 20); + assert_eq!(foo_fields[3].num, 32); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_STRUCT.0 + | VMStateFlags::VMS_VARRAY_UINT32.0 + | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0 + ); + assert_eq!(foo_fields[3].vmsd, &VMSTATE_FOOA); + assert!(foo_fields[3].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + + // 5th VMStateField ("arr_i64") in VMSTATE_FOOB (corresponding to + // VMSTATE_ARRAY) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[4].name) }.to_bytes_with_nul(), + b"arr_i64\0" + ); + assert_eq!(foo_fields[4].offset, 144); + assert_eq!(foo_fields[4].num_offset, 0); + assert_eq!(foo_fields[4].info, unsafe { &vmstate_info_int64 }); + assert_eq!(foo_fields[4].version_id, 0); + assert_eq!(foo_fields[4].size, 8); + assert_eq!(foo_fields[4].num, FOO_ARRAY_MAX as i32); + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_ARRAY); + assert!(foo_fields[4].vmsd.is_null()); + assert!(foo_fields[4].field_exists.is_none()); +} + +#[test] +fn test_vmstate_struct_varray_uint8_wrapper() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let mut foo_b: FooB = Default::default(); + let foo_b_p = std::ptr::addr_of_mut!(foo_b).cast::(); + + // 6th VMStateField ("arr_a_wrap") in VMSTATE_FOOB (corresponding to + // VMSTATE_STRUCT_VARRAY_UINT8). Other fields are checked in + // test_vmstate_struct_varray_uint8. + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[5].name) }.to_bytes_with_nul(), + b"arr_a_wrap\0" + ); + assert_eq!(foo_fields[5].num_offset, 228); + assert!(unsafe { foo_fields[5].field_exists.unwrap()(foo_b_p, 0) }); + + // The last VMStateField in VMSTATE_FOOB. + assert_eq!(foo_fields[6].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOC =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOC: +// - VMSTATE_POINTER +// - VMSTATE_ARRAY_OF_POINTER +struct FooCWrapper([Opaque<*mut u8>; FOO_ARRAY_MAX]); // Though Opaque<> array is almost impossible. + +impl_vmstate_forward!(FooCWrapper); + +#[repr(C)] +struct FooC { + ptr: *const i32, + ptr_a: NonNull, + arr_ptr: [Box; FOO_ARRAY_MAX], + arr_ptr_wrap: FooCWrapper, +} + +static VMSTATE_FOOC: VMStateDescription = VMStateDescription { + name: c"foo_c".as_ptr(), + version_id: 3, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_of!(FooC, ptr).with_version_id(2), + // FIXME: Currently vmstate_struct doesn't support the pointer to structure. + // VMSTATE_STRUCT_POINTER: vmstate_struct!(FooC, ptr_a, VMSTATE_FOOA, NonNull) + vmstate_unused!(size_of::>()), + vmstate_of!(FooC, arr_ptr), + vmstate_of!(FooC, arr_ptr_wrap), + }, + ..Zeroable::ZERO +}; + +const PTR_SIZE: usize = size_of::<*mut ()>(); + +#[test] +fn test_vmstate_pointer() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 1st VMStateField ("ptr") in VMSTATE_FOOC (corresponding to VMSTATE_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"ptr\0" + ); + assert_eq!(foo_fields[0].offset, 0); + assert_eq!(foo_fields[0].num_offset, 0); + assert_eq!(foo_fields[0].info, unsafe { &vmstate_info_int32 }); + assert_eq!(foo_fields[0].version_id, 2); + assert_eq!(foo_fields[0].size, 4); + assert_eq!(foo_fields[0].num, 0); + assert_eq!( + foo_fields[0].flags.0, + VMStateFlags::VMS_SINGLE.0 | VMStateFlags::VMS_POINTER.0 + ); + assert!(foo_fields[0].vmsd.is_null()); + assert!(foo_fields[0].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array_of_pointer() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 3rd VMStateField ("arr_ptr") in VMSTATE_FOOC (corresponding to + // VMSTATE_ARRAY_OF_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"arr_ptr\0" + ); + assert_eq!(foo_fields[2].offset, 2 * PTR_SIZE); + assert_eq!(foo_fields[2].num_offset, 0); + assert_eq!(foo_fields[2].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[2].version_id, 0); + assert_eq!(foo_fields[2].size, PTR_SIZE); + assert_eq!(foo_fields[2].num, FOO_ARRAY_MAX as i32); + assert_eq!( + foo_fields[2].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0 + ); + assert!(foo_fields[2].vmsd.is_null()); + assert!(foo_fields[2].field_exists.is_none()); +} + +#[test] +fn test_vmstate_macro_array_of_pointer_wrapped() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + + // 4th VMStateField ("arr_ptr_wrap") in VMSTATE_FOOC (corresponding to + // VMSTATE_ARRAY_OF_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[3].name) }.to_bytes_with_nul(), + b"arr_ptr_wrap\0" + ); + assert_eq!(foo_fields[3].offset, (FOO_ARRAY_MAX + 2) * PTR_SIZE); + assert_eq!(foo_fields[3].num_offset, 0); + assert_eq!(foo_fields[3].info, unsafe { &vmstate_info_uint8 }); + assert_eq!(foo_fields[3].version_id, 0); + assert_eq!(foo_fields[3].size, PTR_SIZE); + assert_eq!(foo_fields[3].num, FOO_ARRAY_MAX as i32); + assert_eq!( + foo_fields[3].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_ARRAY_OF_POINTER.0 + ); + assert!(foo_fields[3].vmsd.is_null()); + assert!(foo_fields[3].field_exists.is_none()); + + // The last VMStateField in VMSTATE_FOOC. + assert_eq!(foo_fields[4].flags, VMStateFlags::VMS_END); +} + +// =========================== Test VMSTATE_FOOD =========================== +// Test the use cases of the vmstate macro, corresponding to the following C +// macro variants: +// * VMSTATE_FOOD: +// - VMSTATE_VALIDATE + +// Add more member fields when vmstate_of/vmstate_struct support "test" +// parameter. +struct FooD; + +impl FooD { + fn validate_food_0(&self, _version_id: u8) -> bool { + true + } + + fn validate_food_1(_state: &FooD, _version_id: u8) -> bool { + false + } +} + +fn validate_food_2(_state: &FooD, _version_id: u8) -> bool { + true +} + +static VMSTATE_FOOD: VMStateDescription = VMStateDescription { + name: c"foo_d".as_ptr(), + version_id: 3, + minimum_version_id: 1, + fields: vmstate_fields! { + vmstate_validate!(FooD, c"foo_d_0", FooD::validate_food_0), + vmstate_validate!(FooD, c"foo_d_1", FooD::validate_food_1), + vmstate_validate!(FooD, c"foo_d_2", validate_food_2), + }, + ..Zeroable::ZERO +}; + +#[test] +fn test_vmstate_validate() { + let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOD.fields, 4) }; + let mut foo_d = FooD; + let foo_d_p = std::ptr::addr_of_mut!(foo_d).cast::(); + + // 1st VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[0].name) }.to_bytes_with_nul(), + b"foo_d_0\0" + ); + assert_eq!(foo_fields[0].offset, 0); + assert_eq!(foo_fields[0].num_offset, 0); + assert!(foo_fields[0].info.is_null()); + assert_eq!(foo_fields[0].version_id, 0); + assert_eq!(foo_fields[0].size, 0); + assert_eq!(foo_fields[0].num, 0); + assert_eq!( + foo_fields[0].flags.0, + VMStateFlags::VMS_ARRAY.0 | VMStateFlags::VMS_MUST_EXIST.0 + ); + assert!(foo_fields[0].vmsd.is_null()); + assert!(unsafe { foo_fields[0].field_exists.unwrap()(foo_d_p, 0) }); + + // 2nd VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"foo_d_1\0" + ); + assert!(!unsafe { foo_fields[1].field_exists.unwrap()(foo_d_p, 1) }); + + // 3rd VMStateField in VMSTATE_FOOD + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[2].name) }.to_bytes_with_nul(), + b"foo_d_2\0" + ); + assert!(unsafe { foo_fields[2].field_exists.unwrap()(foo_d_p, 2) }); + + // The last VMStateField in VMSTATE_FOOD. + assert_eq!(foo_fields[3].flags, VMStateFlags::VMS_END); +} diff --git a/rust/qemu-api/wrapper.h b/rust/qemu-api/wrapper.h new file mode 100644 index 00000000000..15a1b19847f --- /dev/null +++ b/rust/qemu-api/wrapper.h @@ -0,0 +1,71 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2024 Linaro Ltd. + * + * Authors: Manos Pitsidianakis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/log-for-trace.h" +#include "qemu/module.h" +#include "qemu-io.h" +#include "system/system.h" +#include "hw/sysbus.h" +#include "system/memory.h" +#include "chardev/char-fe.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qapi/error-internal.h" +#include "migration/vmstate.h" +#include "chardev/char-serial.h" +#include "exec/memattrs.h" +#include "qemu/timer.h" +#include "system/address-spaces.h" +#include "hw/char/pl011.h" diff --git a/rust/rustfmt.toml b/rust/rustfmt.toml new file mode 100644 index 00000000000..ebecb99fe09 --- /dev/null +++ b/rust/rustfmt.toml @@ -0,0 +1,7 @@ +edition = "2021" +format_generated_files = false +format_code_in_doc_comments = true +format_strings = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" +wrap_comments = true diff --git a/scripts/analyze-inclusions b/scripts/analyze-inclusions index b6280f25c84..d2c566667df 100644 --- a/scripts/analyze-inclusions +++ b/scripts/analyze-inclusions @@ -53,7 +53,7 @@ echo $(grep_include -F 'trace/generated-tracers.h') files include generated-trac echo $(grep_include -F 'qapi/error.h') files include qapi/error.h echo $(grep_include -F 'qom/object.h') files include qom/object.h echo $(grep_include -F 'block/aio.h') files include block/aio.h -echo $(grep_include -F 'exec/memory.h') files include exec/memory.h +echo $(grep_include -F 'system/memory.h') files include system/memory.h echo $(grep_include -F 'fpu/softfloat.h') files include fpu/softfloat.h echo $(grep_include -F 'qemu/bswap.h') files include qemu/bswap.h echo diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py index 8a254a5b6a2..67631ac43e9 100755 --- a/scripts/analyze-migration.py +++ b/scripts/analyze-migration.py @@ -65,6 +65,9 @@ def readvar(self, size = None): def tell(self): return self.file.tell() + def seek(self, a, b): + return self.file.seek(a, b) + # The VMSD description is at the end of the file, after EOF. Look for # the last NULL byte, then for the beginning brace of JSON. def read_migration_debug_json(self): @@ -272,11 +275,24 @@ def __init__(self, file, version_id, device, section_key): self.section_key = section_key def read(self): + pos = 0 while True: addr_flags = self.file.read64() flags = addr_flags & 0xfff - if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)): + + if flags & self.STATTR_FLAG_DONE: + pos = self.file.tell() + continue + elif flags & self.STATTR_FLAG_EOS: return + else: + # No EOS came after DONE, that's OK, but rewind the + # stream because this is not our data. + if pos: + self.file.seek(pos, os.SEEK_SET) + return + raise Exception("Unknown flags %x", flags) + if (flags & self.STATTR_FLAG_ERROR): raise Exception("Error in migration stream") count = self.file.read64() @@ -401,6 +417,28 @@ def __init__(self, desc, file): super(VMSDFieldIntLE, self).__init__(desc, file) self.dtype = ' "$tar_file" test $? -ne 0 && error "failed to archive qemu" for sp in $subprojects; do meson subprojects download $sp test $? -ne 0 && error "failed to download subproject $sp" - tar --append --file "$tar_file" --exclude=.git subprojects/$sp + tar --append --file "$tar_file" --exclude=.git subprojects/"$(subproject_dir $sp)" test $? -ne 0 && error "failed to append subproject $sp to $tar_file" done exit 0 diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index ff373a70831..833f20f5555 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -365,6 +365,18 @@ sub help { qr{guintptr}, ); +# Match text found in common license boilerplate comments: +# for new files the SPDX-License-Identifier line is sufficient. +our @LICENSE_BOILERPLATE = ( + "licensed under the terms of the GNU GPL", + "under the terms of the GNU General Public License", + "under the terms of the GNU Lesser General Public", + "Permission is hereby granted, free of charge", + "GNU GPL, version 2 or later", + "See the COPYING file" +); +our $LICENSE_BOILERPLATE_RE = join("|", @LICENSE_BOILERPLATE); + # Load common spelling mistakes and build regular expression list. my $misspellings; my %spelling_fix; @@ -1330,26 +1342,179 @@ sub WARN { } } -# According to tests/qtest/bios-tables-test.c: do not -# change expected file in the same commit with adding test -sub checkfilename { - my ($name, $acpi_testexpected, $acpi_nontestexpected) = @_; - - # Note: shell script that rebuilds the expected files is in the same - # directory as files themselves. - # Note: allowed diff list can be changed both when changing expected - # files and when changing tests. - if ($name =~ m#^tests/data/acpi/# and not $name =~ m#^\.sh$#) { - $$acpi_testexpected = $name; - } elsif ($name !~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) { - $$acpi_nontestexpected = $name; +sub checkspdx { + my ($file, $expr) = @_; + + # Imported Linux headers probably have SPDX tags, but if they + # don't we're not requiring contributors to fix this, as these + # files are not expected to be modified locally in QEMU. + # Also don't accidentally detect own checking code. + if ($file =~ m,include/standard-headers, || + $file =~ m,linux-headers, || + $file =~ m,checkpatch.pl,) { + return; + } + + my $origexpr = $expr; + + # Flatten sub-expressions + $expr =~ s/\(|\)/ /g; + $expr =~ s/OR|AND/ /g; + + # Merge WITH exceptions to the license + $expr =~ s/\s+WITH\s+/-WITH-/g; + + # Cull more leading/trailing whitespace + $expr =~ s/^\s*//g; + $expr =~ s/\s*$//g; + + my @bits = split / +/, $expr; + + my $prefer = "GPL-2.0-or-later"; + my @valid = qw( + GPL-2.0-only + LGPL-2.1-only + LGPL-2.1-or-later + BSD-2-Clause + BSD-3-Clause + MIT + ); + + my $nonpreferred = 0; + my @unknown = (); + foreach my $bit (@bits) { + if ($bit eq $prefer) { + next; + } + if (grep /^$bit$/, @valid) { + $nonpreferred = 1; + } else { + push @unknown, $bit; + } + } + if (@unknown) { + ERROR("Saw unacceptable licenses '" . join(',', @unknown) . + "', valid choices for QEMU are:\n" . join("\n", $prefer, @valid)); + } + + if ($nonpreferred) { + WARN("Saw acceptable license '$origexpr' but note '$prefer' is " . + "preferred for new files unless the code is derived from a " . + "source file with an existing declared license that must be " . + "retained. Please explain the license choice in the commit " . + "message."); + } +} + +# All three of the methods below take a 'file info' record +# which is a hash ref containing +# +# 'isgit': 1 if an enhanced git diff or 0 for a plain diff +# 'githeader': 1 if still parsing git patch header, 0 otherwise +# 'linestart': line number of start of file diff +# 'lineend': line number of end of file diff +# 'filenew': the new filename +# 'fileold': the old filename (same as 'new filename' except +# for renames in git diffs) +# 'action': one of 'modified' (always) or 'new' or 'deleted' or +# 'renamed' (git diffs only) +# 'mode': file mode for new/deleted files (git diffs only) +# 'similarity': file similarity when renamed (git diffs only) +# 'facts': hash ref for storing any metadata related to checks +# + +# Called at the end of each patch, with the list of +# real filenames that were seen in the patch +sub process_file_list { + my @fileinfos = @_; + + # According to tests/qtest/bios-tables-test.c: do not + # change expected file in the same commit with adding test + my @acpi_testexpected; + my @acpi_nontestexpected; + + foreach my $fileinfo (@fileinfos) { + # Note: shell script that rebuilds the expected files is in + # the same directory as files themselves. + # Note: allowed diff list can be changed both when changing + # expected files and when changing tests. + if ($fileinfo->{filenew} =~ m#^tests/data/acpi/# && + $fileinfo->{filenew} !~ m#^\.sh$#) { + push @acpi_testexpected, $fileinfo->{filenew}; + } elsif ($fileinfo->{filenew} !~ + m#^tests/qtest/bios-tables-test-allowed-diff.h$#) { + push @acpi_nontestexpected, $fileinfo->{filenew}; + } } - if (defined $$acpi_testexpected and defined $$acpi_nontestexpected) { + if (int(@acpi_testexpected) > 0 and int(@acpi_nontestexpected) > 0) { ERROR("Do not add expected files together with tests, " . "follow instructions in " . - "tests/qtest/bios-tables-test.c: both " . - $$acpi_testexpected . " and " . - $$acpi_nontestexpected . " found\n"); + "tests/qtest/bios-tables-test.c. Files\n\n " . + join("\n ", @acpi_testexpected) . + "\n\nand\n\n " . + join("\n ", @acpi_nontestexpected) . + "\n\nfound in the same patch\n"); + } + + my $sawmaintainers = 0; + my @maybemaintainers; + foreach my $fileinfo (@fileinfos) { + if ($fileinfo->{action} ne "modified" && + $fileinfo->{filenew} !~ m#^tests/data/acpi/#) { + push @maybemaintainers, $fileinfo->{filenew}; + } + if ($fileinfo->{filenew} eq "MAINTAINERS") { + $sawmaintainers = 1; + } + } + + # If we don't see a MAINTAINERS update, prod the user to check + if (int(@maybemaintainers) > 0 && !$sawmaintainers) { + WARN("added, moved or deleted file(s):\n\n " . + join("\n ", @maybemaintainers) . + "\n\nDoes MAINTAINERS need updating?\n"); + } +} + +# Called at the start of processing a diff hunk for a file +sub process_start_of_file { + my $fileinfo = shift; + + # Check for incorrect file permissions + if ($fileinfo->{action} eq "new" && ($fileinfo->{mode} & 0111)) { + my $permhere = $fileinfo->{linestart} . "FILE: " . + $fileinfo->{filenew} . "\n"; + if ($fileinfo->{filenew} =~ + /(\bMakefile.*|\.(c|cc|cpp|h|mak|s|S))$/) { + ERROR("do not set execute permissions for source " . + "files\n" . $permhere); + } + } +} + +# Called at the end of processing a diff hunk for a file +sub process_end_of_file { + my $fileinfo = shift; + + if ($fileinfo->{action} eq "new" && + !exists $fileinfo->{facts}->{sawspdx}) { + if ($fileinfo->{filenew} =~ + /(\.(c|h|py|pl|sh|json|inc|rs)|Makefile.*)$/) { + # source code files MUST have SPDX license declared + ERROR("New file '" . $fileinfo->{filenew} . + "' requires 'SPDX-License-Identifier'"); + } else { + # Other files MAY have SPDX license if appropriate + WARN("Does new file '" . $fileinfo->{filenew} . + "' need 'SPDX-License-Identifier'?"); + } + } + if ($fileinfo->{action} eq "new" && + exists $fileinfo->{facts}->{sawboilerplate}) { + ERROR("New file '" . $fileinfo->{filenew} . "' must " . + "not have license boilerplate header text, only " . + "the SPDX-License-Identifier, unless this file was " . + "copied from existing code already having such text."); } } @@ -1373,7 +1538,9 @@ sub process { my $in_header_lines = $file ? 0 : 1; my $in_commit_log = 0; #Scanning lines before patch - my $reported_maintainer_file = 0; + my $reported_mixing_imported_file = 0; + my $in_imported_file = 0; + my $in_no_imported_file = 0; my $non_utf8_charset = 0; our @report = (); @@ -1386,7 +1553,10 @@ sub process { my $realfile = ''; my $realline = 0; my $realcnt = 0; + my $fileinfo; + my @fileinfolist; my $here = ''; + my $oldhere = ''; my $in_comment = 0; my $comment_edge = 0; my $first_line = 0; @@ -1399,9 +1569,6 @@ sub process { my %suppress_whiletrailers; my %suppress_export; - my $acpi_testexpected; - my $acpi_nontestexpected; - # Pre-scan the patch sanitizing the lines. sanitise_line_reset(); @@ -1524,18 +1691,54 @@ sub process { $prefix = "$filename:$realline: " if ($emacs && $file); $prefix = "$filename:$linenr: " if ($emacs && !$file); + $oldhere = $here; $here = "#$linenr: " if (!$file); $here = "#$realline: " if ($file); # extract the filename as it passes - if ($line =~ /^diff --git.*?(\S+)$/) { - $realfile = $1; - $realfile =~ s@^([^/]*)/@@ if (!$file); - checkfilename($realfile, \$acpi_testexpected, \$acpi_nontestexpected); + if ($line =~ /^diff --git\s+(\S+)\s+(\S+)$/) { + my $fileold = $1; + my $filenew = $2; + + if (defined $fileinfo) { + $fileinfo->{lineend} = $oldhere; + process_end_of_file($fileinfo) + } + $fileold =~ s@^([^/]*)/@@ if (!$file); + $filenew =~ s@^([^/]*)/@@ if (!$file); + $realfile = $filenew; + + $fileinfo = { + "isgit" => 1, + "githeader" => 1, + "linestart" => $here, + "lineend" => 0, + "fileold" => $fileold, + "filenew" => $filenew, + "action" => "modified", + "mode" => 0, + "similarity" => 0, + "facts" => {}, + }; + push @fileinfolist, $fileinfo; + } elsif (defined $fileinfo && $fileinfo->{githeader} && + $line =~ /^(new|deleted) (?:file )?mode\s+([0-7]+)$/) { + $fileinfo->{action} = $1; + $fileinfo->{mode} = oct($2); + } elsif (defined $fileinfo && $fileinfo->{githeader} && + $line =~ /^similarity index (\d+)%/) { + $fileinfo->{similarity} = int($1); + } elsif (defined $fileinfo && $fileinfo->{githeader} && + $line =~ /^rename (from|to) [\w\/\.\-]+\s*$/) { + $fileinfo->{action} = "renamed"; + # For a no-change rename, we'll never have any "+++..." + # lines, so trigger actions now + if ($1 eq "to" && $fileinfo->{similarity} == 100) { + process_start_of_file($fileinfo); + } } elsif ($line =~ /^\+\+\+\s+(\S+)/) { $realfile = $1; $realfile =~ s@^([^/]*)/@@ if (!$file); - checkfilename($realfile, \$acpi_testexpected, \$acpi_nontestexpected); $p1_prefix = $1; if (!$file && $tree && $p1_prefix ne '' && @@ -1543,6 +1746,30 @@ sub process { WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n"); } + if (defined $fileinfo && !$fileinfo->{isgit}) { + $fileinfo->{lineend} = $oldhere; + process_end_of_file($fileinfo); + } + + if (!defined $fileinfo || !$fileinfo->{isgit}) { + $fileinfo = { + "isgit" => 0, + "githeader" => 0, + "linestart" => $here, + "lineend" => 0, + "fileold" => $realfile, + "filenew" => $realfile, + "action" => "modified", + "mode" => 0, + "similarity" => 0, + "facts" => {}, + }; + push @fileinfolist, $fileinfo; + } else { + $fileinfo->{githeader} = 0; + } + process_start_of_file($fileinfo); + next; } @@ -1554,14 +1781,6 @@ sub process { $cnt_lines++ if ($realcnt != 0); -# Check for incorrect file permissions - if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) { - my $permhere = $here . "FILE: $realfile\n"; - if ($realfile =~ /(\bMakefile(?:\.objs)?|\.c|\.cc|\.cpp|\.h|\.mak|\.[sS])$/) { - ERROR("do not set execute permissions for source files\n" . $permhere); - } - } - # Only allow Python 3 interpreter if ($realline == 1 && $line =~ /^\+#!\ *\/usr\/bin\/(?:env )?python$/) { @@ -1593,23 +1812,27 @@ sub process { } } -# Check if MAINTAINERS is being updated. If so, there's probably no need to -# emit the "does MAINTAINERS need updating?" message on file add/move/delete - if ($line =~ /^\s*MAINTAINERS\s*\|/) { - $reported_maintainer_file = 1; +# Check SPDX-License-Identifier references a permitted license + if ($rawline =~ m,SPDX-License-Identifier: (.*?)(\*/)?\s*$,) { + $fileinfo->{facts}->{sawspdx} = 1; + &checkspdx($realfile, $1); } -# Check for added, moved or deleted files - if (!$reported_maintainer_file && !$in_commit_log && - ($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ || - $line =~ /^rename (?:from|to) [\w\/\.\-]+\s*$/ || - ($line =~ /\{\s*([\w\/\.\-]*)\s*\=\>\s*([\w\/\.\-]*)\s*\}/ && - (defined($1) || defined($2)))) && - !(($realfile ne '') && - defined($acpi_testexpected) && - ($realfile eq $acpi_testexpected))) { - $reported_maintainer_file = 1; - WARN("added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr); + if ($rawline =~ /$LICENSE_BOILERPLATE_RE/) { + $fileinfo->{facts}->{sawboilerplate} = 1; + } + + if ($rawline =~ m,(SPDX-[a-zA-Z0-9-_]+):,) { + my $tag = $1; + my @permitted = qw( + SPDX-License-Identifier + ); + + unless (grep { /^$tag$/ } @permitted) { + ERROR("Tag $tag not permitted in QEMU code, " . + "valid choices are: " . + join(", ", @permitted)); + } } # Check for wrappage within a valid hunk of the file @@ -1673,6 +1896,27 @@ sub process { # ignore non-hunk lines and lines being removed next if (!$hunk_line || $line =~ /^-/); +# Check that updating imported files from Linux are not mixed with other changes + if ($realfile =~ /^(linux-headers|include\/standard-headers)\//) { + if (!$in_imported_file) { + WARN("added, moved or deleted file(s) " . + "imported from Linux, are you using " . + "scripts/update-linux-headers.sh?\n" . + $herecurr); + } + $in_imported_file = 1; + } else { + $in_no_imported_file = 1; + } + + if (!$reported_mixing_imported_file && + $in_imported_file && $in_no_imported_file) { + ERROR("headers imported from Linux should be self-" . + "contained in a patch with no other changes\n" . + $herecurr); + $reported_mixing_imported_file = 1; + } + # ignore files that are being periodically imported from Linux next if ($realfile =~ /^(linux-headers|include\/standard-headers)\//); @@ -2169,7 +2413,7 @@ sub process { # missing space after union, struct or enum definition if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) { - ERROR("missing space after $1 definition\n" . $herecurr); + ERROR("missing space after $1 definition\n" . $herecurr); } # check for spacing round square brackets; allowed: @@ -2222,7 +2466,7 @@ sub process { } } # Check operator spacing. - if (!($line=~/\#\s*include/)) { + if (!($line=~/\#\s*(include|import)/)) { my $ops = qr{ <<=|>>=|<=|>=|==|!=| \+=|-=|\*=|\/=|%=|\^=|\|=|&=| @@ -2464,7 +2708,7 @@ sub process { if ($line =~ /^.\s*(Q(?:S?LIST|SIMPLEQ|TAILQ)_HEAD)\s*\(\s*[^,]/ && $line !~ /^.typedef/) { - ERROR("named $1 should be typedefed separately\n" . $herecurr); + ERROR("named $1 should be typedefed separately\n" . $herecurr); } # Need a space before open parenthesis after if, while etc @@ -3013,48 +3257,50 @@ sub process { # Qemu error function tests - # Find newlines in error messages - my $qemu_error_funcs = qr{error_setg| - error_setg_errno| - error_setg_win32| - error_setg_file_open| - error_set| - error_prepend| - warn_reportf_err| - error_reportf_err| - error_vreport| - warn_vreport| - info_vreport| - error_report| - warn_report| - info_report| - g_test_message}x; - - if ($rawline =~ /\b(?:$qemu_error_funcs)\s*\(.*\".*\\n/) { - ERROR("Error messages should not contain newlines\n" . $herecurr); - } + # Find newlines in error messages + my $qemu_error_funcs = qr{error_setg| + error_setg_errno| + error_setg_win32| + error_setg_file_open| + error_set| + error_prepend| + warn_reportf_err| + error_reportf_err| + error_vreport| + warn_vreport| + info_vreport| + error_report| + warn_report| + info_report| + g_test_message}x; + + if ($rawline =~ /\b(?:$qemu_error_funcs)\s*\(.*\".*\\n/) { + ERROR("Error messages should not contain newlines\n" . $herecurr); + } - # Continue checking for error messages that contains newlines. This - # check handles cases where string literals are spread over multiple lines. - # Example: - # error_report("Error msg line #1" - # "Error msg line #2\n"); - my $quoted_newline_regex = qr{\+\s*\".*\\n.*\"}; - my $continued_str_literal = qr{\+\s*\".*\"}; + # Continue checking for error messages that contains newlines. + # This check handles cases where string literals are spread + # over multiple lines. + # Example: + # error_report("Error msg line #1" + # "Error msg line #2\n"); + my $quoted_newline_regex = qr{\+\s*\".*\\n.*\"}; + my $continued_str_literal = qr{\+\s*\".*\"}; - if ($rawline =~ /$quoted_newline_regex/) { - # Backtrack to first line that does not contain only a quoted literal - # and assume that it is the start of the statement. - my $i = $linenr - 2; + if ($rawline =~ /$quoted_newline_regex/) { + # Backtrack to first line that does not contain only + # a quoted literal and assume that it is the start + # of the statement. + my $i = $linenr - 2; - while (($i >= 0) & $rawlines[$i] =~ /$continued_str_literal/) { - $i--; - } + while (($i >= 0) & $rawlines[$i] =~ /$continued_str_literal/) { + $i--; + } - if ($rawlines[$i] =~ /\b(?:$qemu_error_funcs)\s*\(/) { - ERROR("Error messages should not contain newlines\n" . $herecurr); + if ($rawlines[$i] =~ /\b(?:$qemu_error_funcs)\s*\(/) { + ERROR("Error messages should not contain newlines\n" . $herecurr); + } } - } # check for non-portable libc calls that have portable alternatives in QEMU if ($line =~ /\bffs\(/) { @@ -3078,6 +3324,10 @@ sub process { if ($line =~ /\b(g_)?assert\(0\)/) { ERROR("use g_assert_not_reached() instead of assert(0)\n" . $herecurr); } + if ($line =~ /\b(g_)?assert\(false\)/) { + ERROR("use g_assert_not_reached() instead of assert(false)\n" . + $herecurr); + } if ($line =~ /\bstrerrorname_np\(/) { ERROR("use strerror() instead of strerrorname_np()\n" . $herecurr); } @@ -3104,6 +3354,11 @@ sub process { } } + if (defined $fileinfo) { + process_end_of_file($fileinfo); + } + process_file_list(@fileinfolist); + if ($is_patch && $chk_signoff && $signoff == 0) { ERROR("Missing Signed-off-by: line(s)\n"); } diff --git a/scripts/ci/gitlab-ci-section b/scripts/ci/gitlab-ci-section new file mode 100644 index 00000000000..9bbe80420d6 --- /dev/null +++ b/scripts/ci/gitlab-ci-section @@ -0,0 +1,29 @@ +# Copyright (c) 2024 Linaro Ltd +# SPDX-License-Identifier: GPL-2.0-or-later + +# gitlab-ci-section: This is a shell script fragment which defines +# functions section_start and section_end which will emit marker lines +# that GitLab will interpret as the beginning or end of a "collapsible +# section" in a CI job log. See +# https://docs.gitlab.com/ee/ci/yaml/script.html#expand-and-collapse-job-log-sections +# +# This is intended to be sourced in the before_script section of +# a CI config; the section_start and section_end functions will +# then be available for use in the before_script and script sections. + +# Section names are [-_.A-Za-z0-9] and the section_start pairs with +# a section_end with the same section name. +# The description can be any printable text without newlines; this is +# what will appear in the log. + +# Usage: +# section_start section_name "Description of the section" +section_start () { + printf "section_start:%s:%s\r\e[0K%s\n" "$(date +%s)" "$1" "$2" +} + +# Usage: +# section_end section_name +section_end () { + printf "section_end:%s:%s\r\e[0K\n" "$(date +%s)" "$1" +} diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml index 7bdafab5115..57e7faebf10 100644 --- a/scripts/ci/setup/gitlab-runner.yml +++ b/scripts/ci/setup/gitlab-runner.yml @@ -49,30 +49,51 @@ - debug: msg: gitlab-runner arch is {{ gitlab_runner_arch }} - - name: Download the matching gitlab-runner (DEB) + # Debian/Ubuntu setup + - name: Get gitlab-runner repo setup script (DEB) get_url: dest: "/root/" - url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/deb/gitlab-runner_{{ gitlab_runner_arch }}.deb" + url: "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" + mode: 0755 when: - ansible_facts['distribution'] == 'Ubuntu' - - name: Download the matching gitlab-runner (RPM) + - name: Run gitlab-runner repo setup script (DEB) + shell: "/root/script.deb.sh" + when: + - ansible_facts['distribution'] == 'Ubuntu' + + - name: Install gitlab-runner (DEB) + ansible.builtin.apt: + name: gitlab-runner + update_cache: yes + state: present + when: + - ansible_facts['distribution'] == 'Ubuntu' + + # RPM setup + - name: Get gitlab-runner repo setup script (RPM) get_url: dest: "/root/" - url: "https://gitlab-runner-downloads.s3.amazonaws.com/latest/rpm/gitlab-runner_{{ gitlab_runner_arch }}.rpm" + url: "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh" + mode: 0755 when: - ansible_facts['distribution'] == 'CentOS' - - name: Install gitlab-runner via package manager (DEB) - apt: deb="/root/gitlab-runner_{{ gitlab_runner_arch }}.deb" + - name: Run gitlab-runner repo setup script (RPM) + shell: "/root/script.rpm.sh" when: - - ansible_facts['distribution'] == 'Ubuntu' + - ansible_facts['distribution'] == 'CentOS' - - name: Install gitlab-runner via package manager (RPM) - yum: name="/root/gitlab-runner_{{ gitlab_runner_arch }}.rpm" + - name: Install gitlab-runner (RPM) + yum: + name: gitlab-runner + update_cache: yes + state: present when: - ansible_facts['distribution'] == 'CentOS' + # Register Runners - name: Register the gitlab-runner command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list {{ ansible_facts[\"architecture\"] }},{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'" diff --git a/scripts/ci/setup/ubuntu/build-environment.yml b/scripts/ci/setup/ubuntu/build-environment.yml index edf1900b3ec..56b51609e38 100644 --- a/scripts/ci/setup/ubuntu/build-environment.yml +++ b/scripts/ci/setup/ubuntu/build-environment.yml @@ -39,7 +39,6 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - ansible_facts['distribution_version'] == '22.04' - - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64' - name: Install packages for QEMU on Ubuntu 22.04 package: @@ -47,7 +46,6 @@ when: - ansible_facts['distribution'] == 'Ubuntu' - ansible_facts['distribution_version'] == '22.04' - - ansible_facts['architecture'] == 'aarch64' or ansible_facts['architecture'] == 'x86_64' - name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 22.04 package: diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml b/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml index fd5489cd82b..f11e9808267 100644 --- a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml +++ b/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml @@ -35,6 +35,7 @@ packages: - libcacard-dev - libcap-ng-dev - libcapstone-dev + - libcbor-dev - libcmocka-dev - libcurl4-gnutls-dev - libdaxctl-dev @@ -49,6 +50,7 @@ packages: - libglusterfs-dev - libgnutls28-dev - libgtk-3-dev + - libgtk-vnc-2.0-dev - libibverbs-dev - libiscsi-dev - libjemalloc-dev @@ -112,6 +114,7 @@ packages: - python3-venv - python3-yaml - rpm2cpio + - rustc-1.77 - sed - socat - sparse @@ -120,6 +123,7 @@ packages: - tar - tesseract-ocr - tesseract-ocr-eng + - vulkan-tools - xorriso - zlib1g-dev - zstd diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml b/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml index afa04502cf0..6559cb29343 100644 --- a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml +++ b/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml @@ -35,6 +35,7 @@ packages: - libcacard-dev - libcap-ng-dev - libcapstone-dev + - libcbor-dev - libcmocka-dev - libcurl4-gnutls-dev - libdaxctl-dev @@ -49,6 +50,7 @@ packages: - libglusterfs-dev - libgnutls28-dev - libgtk-3-dev + - libgtk-vnc-2.0-dev - libibverbs-dev - libiscsi-dev - libjemalloc-dev @@ -110,6 +112,7 @@ packages: - python3-venv - python3-yaml - rpm2cpio + - rustc-1.77 - sed - socat - sparse @@ -118,6 +121,7 @@ packages: - tar - tesseract-ocr - tesseract-ocr-eng + - vulkan-tools - xorriso - zlib1g-dev - zstd diff --git a/scripts/clean-includes b/scripts/clean-includes index bdbf4040240..25dbf16c021 100755 --- a/scripts/clean-includes +++ b/scripts/clean-includes @@ -130,8 +130,8 @@ for f in "$@"; do *include/qemu/compiler.h | \ *include/qemu/qemu-plugin.h | \ *include/glib-compat.h | \ - *include/sysemu/os-posix.h | \ - *include/sysemu/os-win32.h | \ + *include/system/os-posix.h | \ + *include/system/os-win32.h | \ *include/standard-headers/ ) # Removing include lines from osdep.h itself would be counterproductive. echo "SKIPPING $f (special case header)" @@ -174,7 +174,7 @@ for f in "$@"; do - "sysemu/os-posix.h, sysemu/os-win32.h "glib-compat.h" + "system/os-posix.h, system/os-win32.h "glib-compat.h" "qemu/typedefs.h" ))' "$f" diff --git a/scripts/cocci-macro-file.h b/scripts/cocci-macro-file.h index d247a5086e9..c64831d5408 100644 --- a/scripts/cocci-macro-file.h +++ b/scripts/cocci-macro-file.h @@ -23,11 +23,7 @@ #define G_GNUC_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #define G_GNUC_NULL_TERMINATED __attribute__((sentinel)) -#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) -# define QEMU_PACKED __attribute__((gcc_struct, packed)) -#else -# define QEMU_PACKED __attribute__((packed)) -#endif +#define QEMU_PACKED __attribute__((packed)) #define cat(x,y) x ## y #define cat2(x,y) cat(x,y) diff --git a/scripts/coccinelle/device-reset.cocci b/scripts/coccinelle/device-reset.cocci new file mode 100644 index 00000000000..510042afcca --- /dev/null +++ b/scripts/coccinelle/device-reset.cocci @@ -0,0 +1,30 @@ +// Convert opencoded DeviceClass::reset assignments to calls to +// device_class_set_legacy_reset() +// +// Copyright Linaro Ltd 2024 +// This work is licensed under the terms of the GNU GPLv2 or later. +// +// spatch --macro-file scripts/cocci-macro-file.h \ +// --sp-file scripts/coccinelle/device-reset.cocci \ +// --keep-comments --smpl-spacing --in-place --include-headers --dir hw +// +// For simplicity we assume some things about the code we're modifying +// that happen to be true for all our targets: +// * all cpu_class_set_parent_reset() callsites have a 'DeviceClass *dc' local +// * the parent reset field in the target CPU class is 'parent_reset' +// * no reset function already has a 'dev' local + +@@ +identifier dc, resetfn; +@@ + DeviceClass *dc; + ... +- dc->reset = resetfn; ++ device_class_set_legacy_reset(dc, resetfn); +@@ +identifier dc, resetfn; +@@ + DeviceClass *dc; + ... +- dc->reset = &resetfn; ++ device_class_set_legacy_reset(dc, resetfn); diff --git a/scripts/codeconverter/codeconverter/qom_type_info.py b/scripts/codeconverter/codeconverter/qom_type_info.py index 255cb59923d..22a25560760 100644 --- a/scripts/codeconverter/codeconverter/qom_type_info.py +++ b/scripts/codeconverter/codeconverter/qom_type_info.py @@ -798,7 +798,8 @@ def gen_patches(self) -> Iterable[Patch]: # # # if 'class_init' not in fields: -# yield self.prepend(('static void %s_class_init(ObjectClass *oc, void *data)\n' +# yield self.prepend(('static void %s_class_init(ObjectClass *oc,\n' +# 'const void *data)\n' # '{\n' # '}\n\n') % (ids.lowercase)) # yield self.append_field('class_init', ids.lowercase+'_class_init') @@ -901,26 +902,6 @@ class TypeRegisterCall(FileMatch): regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register'), r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n') -class MakeTypeRegisterStatic(TypeRegisterCall): - """Make type_register() call static if variable is static const""" - def gen_patches(self): - var = self.file.find_match(TypeInfoVar, self.name) - if var is None: - self.warn("can't find TypeInfo var declaration for %s", self.name) - return - if var.is_static() and var.is_const(): - yield self.group_match('func_name').make_patch('type_register_static') - -class MakeTypeRegisterNotStatic(TypeRegisterStaticCall): - """Make type_register() call static if variable is static const""" - def gen_patches(self): - var = self.file.find_match(TypeInfoVar, self.name) - if var is None: - self.warn("can't find TypeInfo var declaration for %s", self.name) - return - if not var.is_static() or not var.is_const(): - yield self.group_match('func_name').make_patch('type_register') - class TypeInfoMacro(FileMatch): """TYPE_INFO macro usage""" regexp = S(r'^[ \t]*TYPE_INFO\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\)[ \t]*;?[ \t]*\n') diff --git a/scripts/codeconverter/codeconverter/test_regexps.py b/scripts/codeconverter/codeconverter/test_regexps.py index a445634d88a..4526268ae80 100644 --- a/scripts/codeconverter/codeconverter/test_regexps.py +++ b/scripts/codeconverter/codeconverter/test_regexps.py @@ -70,15 +70,15 @@ def fullmatch(regexp, s): .name = armsse_variants[i].name, .parent = TYPE_ARMSSE, .class_init = armsse_class_init, - .class_data = (void *)&armsse_variants[i], + .class_data = &armsse_variants[i], };''', re.MULTILINE) print(RE_ARRAY_ITEM) assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_HOTPLUG_HANDLER },') assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_ACPI_DEVICE_IF },') assert fullmatch(RE_ARRAY_ITEM, '{ }') - assert fullmatch(RE_ARRAY_CAST, '(InterfaceInfo[])') - assert fullmatch(RE_ARRAY, '''(InterfaceInfo[]) { + assert fullmatch(RE_ARRAY_CAST, '(const InterfaceInfo[])') + assert fullmatch(RE_ARRAY, '''(const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { } @@ -98,7 +98,7 @@ def fullmatch(regexp, s): .parent = TYPE_DEVICE, .instance_size = sizeof(CRBState), .class_init = tpm_crb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } @@ -134,7 +134,7 @@ def fullmatch(regexp, s): .instance_size = sizeof(AcpiGedState), .instance_init = acpi_ged_initfn, .class_init = acpi_ged_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { TYPE_ACPI_DEVICE_IF }, { } @@ -164,7 +164,7 @@ def fullmatch(regexp, s): .parent = TYPE_DEVICE, .instance_size = sizeof(CRBState), .class_init = tpm_crb_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TPM_IF }, { } } @@ -269,7 +269,7 @@ def test_initial_includes(): #include "hw/pci/pci.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "sysemu/dma.h" +#include "system/dma.h" /* Missing stuff: SCTRL_P[12](END|ST)INC @@ -278,5 +278,5 @@ def test_initial_includes(): m = InitialIncludes.domatch(c) assert m print(repr(m.group(0))) - assert m.group(0).endswith('#include "sysemu/dma.h"\n') + assert m.group(0).endswith('#include "system/dma.h"\n') diff --git a/scripts/coverity-scan/COMPONENTS.md b/scripts/coverity-scan/COMPONENTS.md index 858190be097..72995903ff9 100644 --- a/scripts/coverity-scan/COMPONENTS.md +++ b/scripts/coverity-scan/COMPONENTS.md @@ -9,9 +9,6 @@ arm avr ~ .*/qemu((/include)?/hw/avr/.*|/target/avr/.*) -cris - ~ .*/qemu((/include)?/hw/cris/.*|/target/cris/.*) - hexagon-gen (component should be ignored in analysis) ~ .*/qemu(/target/hexagon/.*generated.*) @@ -79,7 +76,7 @@ chardev ~ .*/qemu((/include)?/chardev/.*) crypto - ~ .*/qemu((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/sysemu|/backends)/cryptodev.*|/host/include/.*/host/crypto/.*) + ~ .*/qemu((/include)?/crypto/.*|/hw/.*/.*crypto.*|(/include/system|/backends)/cryptodev.*|/host/include/.*/host/crypto/.*) disas ~ .*/qemu((/include)?/disas.*) @@ -147,7 +144,7 @@ kvm tcg ~ .*/qemu(/accel/tcg|/replay|/tcg)/.* -sysemu +system ~ .*/qemu(/system/.*|/accel/.*) (headers) diff --git a/scripts/decodetree.py b/scripts/decodetree.py index e8b72da3a97..f992472b73e 100644 --- a/scripts/decodetree.py +++ b/scripts/decodetree.py @@ -1016,9 +1016,12 @@ def infer_format(arg, fieldmask, flds, width): else: var_flds[n] = c + if not arg: + arg = infer_argument_set(flds) + # Look for an existing format with the same argument set and fields for fmt in formats.values(): - if arg and fmt.base != arg: + if fmt.base != arg: continue if fieldmask != fmt.fieldmask: continue @@ -1029,8 +1032,6 @@ def infer_format(arg, fieldmask, flds, width): return (fmt, const_flds) name = decode_function + '_Fmt_' + str(len(formats)) - if not arg: - arg = infer_argument_set(flds) fmt = Format(name, 0, arg, 0, 0, 0, fieldmask, var_flds, width) formats[name] = fmt diff --git a/scripts/device-crash-test b/scripts/device-crash-test index da8b56edd99..1ecb9663ae8 100755 --- a/scripts/device-crash-test +++ b/scripts/device-crash-test @@ -16,8 +16,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# with this program; if not, see . """ Run QEMU with all combinations of -machine and -device types, diff --git a/scripts/gensyscalls.sh b/scripts/gensyscalls.sh deleted file mode 100755 index 84957280da6..00000000000 --- a/scripts/gensyscalls.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/sh -# -# Update syscall_nr.h files from linux headers asm-generic/unistd.h -# -# This code is licensed under the GPL version 2 or later. See -# the COPYING file in the top-level directory. -# - -linux="$1" -output="$2" - -TMP=$(mktemp -d) - -if [ "$linux" = "" ] ; then - echo "Needs path to linux source tree" 1>&2 - exit 1 -fi - -if [ "$output" = "" ] ; then - output="$PWD" -fi - -upper() -{ - echo "$1" | tr "[:lower:]" "[:upper:]" | tr "[:punct:]" "_" -} - -qemu_arch() -{ - case "$1" in - arm64) - echo "aarch64" - ;; - *) - echo "$1" - ;; - esac -} - -read_includes() -{ - arch=$1 - bits=$2 - - cpp -P -nostdinc -fdirectives-only \ - -D_UAPI_ASM_$(upper ${arch})_BITSPERLONG_H \ - -D__ASM_$(upper ${arch})_BITSPERLONG_H \ - -D__BITS_PER_LONG=${bits} \ - -I${linux}/arch/${arch}/include/uapi/ \ - -I${linux}/include/uapi \ - -I${TMP} \ - "${linux}/arch/${arch}/include/uapi/asm/unistd.h" -} - -filter_defines() -{ - grep -e "#define __NR_" -e "#define __NR3264" -} - -rename_defines() -{ - sed "s/ __NR_/ TARGET_NR_/g;s/(__NR_/(TARGET_NR_/g" -} - -evaluate_values() -{ - sed "s/#define TARGET_NR_/QEMU TARGET_NR_/" | \ - cpp -P -nostdinc | \ - sed "s/^QEMU /#define /" -} - -generate_syscall_nr() -{ - arch=$1 - bits=$2 - file="$3" - guard="$(upper LINUX_USER_$(qemu_arch $arch)_$(basename "$file"))" - - (echo "/*" - echo " * This file contains the system call numbers." - echo " * Do not modify." - echo " * This file is generated by scripts/gensyscalls.sh" - echo " */" - echo "#ifndef ${guard}" - echo "#define ${guard}" - echo - read_includes $arch $bits | filter_defines | rename_defines | \ - evaluate_values | sort -n -k 3 - echo - echo "#endif /* ${guard} */") > "$file" -} - -mkdir "$TMP/asm" -> "$TMP/asm/bitsperlong.h" - -generate_syscall_nr arm64 64 "$output/linux-user/aarch64/syscall_nr.h" -generate_syscall_nr openrisc 32 "$output/linux-user/openrisc/syscall_nr.h" - -generate_syscall_nr riscv 32 "$output/linux-user/riscv/syscall32_nr.h" -generate_syscall_nr riscv 64 "$output/linux-user/riscv/syscall64_nr.h" -generate_syscall_nr hexagon 32 "$output/linux-user/hexagon/syscall_nr.h" -generate_syscall_nr loongarch 64 "$output/linux-user/loongarch64/syscall_nr.h" -rm -fr "$TMP" diff --git a/scripts/get-wraps-from-cargo-registry.py b/scripts/get-wraps-from-cargo-registry.py new file mode 100755 index 00000000000..31eed5c2dd4 --- /dev/null +++ b/scripts/get-wraps-from-cargo-registry.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python3 + +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +get-wraps-from-cargo-registry.py - Update Meson subprojects from a global registry +""" + +# Copyright (C) 2025 Red Hat, Inc. +# +# Author: Paolo Bonzini + +import argparse +import configparser +import filecmp +import glob +import os +import subprocess +import sys + + +def get_name_and_semver(namever: str) -> tuple[str, str]: + """Split a subproject name into its name and semantic version parts""" + parts = namever.rsplit("-", 1) + if len(parts) != 2: + return namever, "" + + return parts[0], parts[1] + + +class UpdateSubprojects: + cargo_registry: str + top_srcdir: str + dry_run: bool + changes: int = 0 + + def find_installed_crate(self, namever: str) -> str | None: + """Find installed crate matching name and semver prefix""" + name, semver = get_name_and_semver(namever) + + # exact version match + path = os.path.join(self.cargo_registry, f"{name}-{semver}") + if os.path.exists(path): + return f"{name}-{semver}" + + # semver match + matches = sorted(glob.glob(f"{path}.*")) + return os.path.basename(matches[0]) if matches else None + + def compare_build_rs(self, orig_dir: str, registry_namever: str) -> None: + """Warn if the build.rs in the original directory differs from the registry version.""" + orig_build_rs = os.path.join(orig_dir, "build.rs") + new_build_rs = os.path.join(self.cargo_registry, registry_namever, "build.rs") + + msg = None + if os.path.isfile(orig_build_rs) != os.path.isfile(new_build_rs): + if os.path.isfile(orig_build_rs): + msg = f"build.rs removed in {registry_namever}" + if os.path.isfile(new_build_rs): + msg = f"build.rs added in {registry_namever}" + + elif os.path.isfile(orig_build_rs) and not filecmp.cmp(orig_build_rs, new_build_rs): + msg = f"build.rs changed from {orig_dir} to {registry_namever}" + + if msg: + print(f"⚠️ Warning: {msg}") + print(" This may affect the build process - please review the differences.") + + def update_subproject(self, wrap_file: str, registry_namever: str) -> None: + """Modify [wrap-file] section to point to self.cargo_registry.""" + assert wrap_file.endswith("-rs.wrap") + wrap_name = wrap_file[:-5] + + env = os.environ.copy() + env["MESON_PACKAGE_CACHE_DIR"] = self.cargo_registry + + config = configparser.ConfigParser() + config.read(wrap_file) + if "wrap-file" not in config: + return + + # do not download the wrap, always use the local copy + orig_dir = config["wrap-file"]["directory"] + if os.path.exists(orig_dir) and orig_dir != registry_namever: + self.compare_build_rs(orig_dir, registry_namever) + + if self.dry_run: + if orig_dir == registry_namever: + print(f"Will install {orig_dir} from registry.") + else: + print(f"Will replace {orig_dir} with {registry_namever}.") + self.changes += 1 + return + + config["wrap-file"]["directory"] = registry_namever + for key in list(config["wrap-file"].keys()): + if key.startswith("source"): + del config["wrap-file"][key] + + # replace existing directory with installed version + if os.path.exists(orig_dir): + subprocess.run( + ["meson", "subprojects", "purge", "--confirm", wrap_name], + cwd=self.top_srcdir, + env=env, + check=True, + ) + + with open(wrap_file, "w") as f: + config.write(f) + + if orig_dir == registry_namever: + print(f"Installing {orig_dir} from registry.") + else: + print(f"Replacing {orig_dir} with {registry_namever}.") + patch_dir = config["wrap-file"]["patch_directory"] + patch_dir = os.path.join("packagefiles", patch_dir) + _, ver = registry_namever.rsplit("-", 1) + subprocess.run( + ["meson", "rewrite", "kwargs", "set", "project", "/", "version", ver], + cwd=patch_dir, + env=env, + check=True, + ) + + subprocess.run( + ["meson", "subprojects", "download", wrap_name], + cwd=self.top_srcdir, + env=env, + check=True, + ) + self.changes += 1 + + @staticmethod + def parse_cmdline() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Replace Meson subprojects with packages in a Cargo registry" + ) + parser.add_argument( + "--cargo-registry", + default=os.environ.get("CARGO_REGISTRY"), + help="Path to Cargo registry (default: CARGO_REGISTRY env var)", + ) + parser.add_argument( + "--dry-run", + action="store_true", + default=False, + help="Do not actually replace anything", + ) + + args = parser.parse_args() + if not args.cargo_registry: + print("error: CARGO_REGISTRY environment variable not set and --cargo-registry not provided") + sys.exit(1) + + return args + + def __init__(self, args: argparse.Namespace): + self.cargo_registry = args.cargo_registry + self.dry_run = args.dry_run + self.top_srcdir = os.getcwd() + + def main(self) -> None: + if not os.path.exists("subprojects"): + print("'subprojects' directory not found, nothing to do.") + return + + os.chdir("subprojects") + for wrap_file in sorted(glob.glob("*-rs.wrap")): + namever = wrap_file[:-8] # Remove '-rs.wrap' + + registry_namever = self.find_installed_crate(namever) + if not registry_namever: + print(f"No installed crate found for {wrap_file}") + continue + + self.update_subproject(wrap_file, registry_namever) + + if self.changes: + if self.dry_run: + print("Rerun without --dry-run to apply changes.") + else: + print(f"✨ {self.changes} subproject(s) updated!") + else: + print("No changes.") + + +if __name__ == "__main__": + args = UpdateSubprojects.parse_cmdline() + UpdateSubprojects(args).main() diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 240923d509a..fec83f53eda 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1,5 +1,5 @@ #!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-only use warnings; use strict; diff --git a/scripts/make-release b/scripts/make-release index 6e0433de24d..87f563ef5f7 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -10,6 +10,28 @@ # This work is licensed under the terms of the GNU GPLv2 or later. # See the COPYING file in the top-level directory. +function subproject_dir() { + if test ! -f "$src/subprojects/$1.wrap"; then + echo "scripts/archive-source.sh should only process wrap subprojects" + exit 1 + fi + + # Print the directory key of the wrap file, defaulting to the + # subproject name. The wrap file is in ini format and should + # have a single section only. There should be only one section + # named "[wrap-*]", which helps keeping the script simple. + local dir + dir=$(sed -n \ + -e '/^\[wrap-[a-z][a-z]*\]$/,/^\[/{' \ + -e '/^directory *= */!b' \ + -e 's///p' \ + -e 'q' \ + -e '}' \ + "$src/subprojects/$1.wrap") + + echo "${dir:-$1}" +} + if [ $# -ne 2 ]; then echo "Usage:" echo " $0 gitrepo version" @@ -17,7 +39,12 @@ if [ $# -ne 2 ]; then fi # Only include wraps that are invoked with subproject() -SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3 berkeley-testfloat-3" +SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3 + berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs + bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs + libc-0.2-rs proc-macro2-1-rs + proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs + syn-2-rs unicode-ident-1-rs" src="$1" version="$2" @@ -35,17 +62,23 @@ meson subprojects download $SUBPROJECTS (cd roms/skiboot && ./make_version.sh > .version) # Fetch edk2 submodule's submodules, since it won't have access to them via # the tarball later. -# -# A more uniform way to handle this sort of situation would be nice, but we -# don't necessarily have much control over how a submodule handles its -# submodule dependencies, so we continue to handle these on a case-by-case -# basis for now. -(cd roms/edk2 && \ - git submodule update --init --depth 1 -- \ - ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ - BaseTools/Source/C/BrotliCompress/brotli \ - CryptoPkg/Library/OpensslLib/openssl \ - MdeModulePkg/Library/BrotliCustomDecompressLib/brotli) + +# As recommended by the EDK2 readme, we don't use --recursive here. +# EDK2 won't use any code or feature from a submodule of a submodule, +# so we don't need to add them to the tarball. +# Although we don't necessarily need all of the submodules that EDK2 +# has, we clone them all, to avoid running into problems where EDK2 +# adds a new submodule or changes its use of an existing one and +# the sources we ship in the tarball then fail to build. +(cd roms/edk2 && git submodule update --init --depth 1) popd -tar --exclude=.git -cJf ${destination}.tar.xz ${destination} + +exclude=(--exclude=.git) +# include the tarballs in subprojects/packagecache but not their expansion +for sp in $SUBPROJECTS; do + if grep -xqF "[wrap-file]" $src/subprojects/$sp.wrap; then + exclude+=(--exclude=subprojects/"$(subproject_dir $sp)") + fi +done +tar "${exclude[@]}" -cJf ${destination}.tar.xz ${destination} rm -rf ${destination} diff --git a/scripts/meson-buildoptions.py b/scripts/meson-buildoptions.py index 4814a8ff61f..a3e22471b2f 100644 --- a/scripts/meson-buildoptions.py +++ b/scripts/meson-buildoptions.py @@ -241,8 +241,14 @@ def print_parse(options): print(" esac") print("}") - -options = load_options(json.load(sys.stdin)) +json_data = sys.stdin.read() +try: + options = load_options(json.loads(json_data)) +except: + print("Failure in scripts/meson-buildoptions.py parsing stdin as json", + file=sys.stderr) + print(json_data, file=sys.stderr) + sys.exit(1) print("# This file is generated by meson-buildoptions.py, do not edit!") print_help(options) print_parse(options) diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index c97079a38c9..0ebe6bc52a6 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -21,6 +21,7 @@ meson_options_help() { printf "%s\n" ' --disable-relocatable toggle relocatable install' printf "%s\n" ' --docdir=VALUE Base directory for documentation installation' printf "%s\n" ' (can be empty) [share/doc]' + printf "%s\n" ' --enable-asan enable address sanitizer' printf "%s\n" ' --enable-block-drv-whitelist-in-tools' printf "%s\n" ' use block whitelist also in tools instead of only' printf "%s\n" ' QEMU' @@ -46,13 +47,15 @@ meson_options_help() { printf "%s\n" ' getrandom()' printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires' printf "%s\n" ' clang/llvm and coroutine backend ucontext)' - printf "%s\n" ' --enable-sanitizers enable default sanitizers' + printf "%s\n" ' --enable-strict-rust-lints' + printf "%s\n" ' Enable stricter set of Rust warnings' printf "%s\n" ' --enable-strip Strip targets on install' printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)' printf "%s\n" ' --enable-trace-backends=CHOICES' printf "%s\n" ' Set available tracing backends [log] (choices:' printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' printf "%s\n" ' --enable-tsan enable thread sanitizer' + printf "%s\n" ' --enable-ubsan enable undefined behaviour sanitizer' printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' printf "%s\n" ' firmware]' printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' @@ -71,12 +74,13 @@ meson_options_help() { printf "%s\n" ' "manufacturer" name for qemu-ga registry entries' printf "%s\n" ' [QEMU]' printf "%s\n" ' --qemu-ga-version=VALUE version number for qemu-ga installer' + printf "%s\n" ' --rtsig-map=VALUE default value of QEMU_RTSIG_MAP [NULL]' printf "%s\n" ' --smbd=VALUE Path to smbd for slirp networking' printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]' printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string' printf "%s\n" ' [NORMAL]' printf "%s\n" ' --with-coroutine=CHOICE coroutine backend to use (choices:' - printf "%s\n" ' auto/sigaltstack/ucontext/windows)' + printf "%s\n" ' auto/sigaltstack/ucontext/wasm/windows)' printf "%s\n" ' --with-pkgversion=VALUE use specified string as sub-version of the' printf "%s\n" ' package' printf "%s\n" ' --with-suffix=VALUE Suffix for QEMU data/modules/config directories' @@ -93,8 +97,6 @@ meson_options_help() { printf "%s\n" ' alsa ALSA sound support' printf "%s\n" ' attr attr/xattr support' printf "%s\n" ' auth-pam PAM access control' - printf "%s\n" ' avx2 AVX2 optimizations' - printf "%s\n" ' avx512bw AVX512BW optimizations' printf "%s\n" ' blkio libblkio block device driver' printf "%s\n" ' bochs bochs image format support' printf "%s\n" ' bpf eBPF support' @@ -128,10 +130,12 @@ meson_options_help() { printf "%s\n" ' hv-balloon hv-balloon driver (requires Glib 2.68+ GTree API)' printf "%s\n" ' hvf HVF acceleration support' printf "%s\n" ' iconv Font glyph conversion support' + printf "%s\n" ' igvm Independent Guest Virtual Machine (IGVM) file support' printf "%s\n" ' jack JACK sound support' printf "%s\n" ' keyring Linux keyring support' printf "%s\n" ' kvm KVM acceleration support' printf "%s\n" ' l2tpv3 l2tpv3 network backend support' + printf "%s\n" ' libcbor libcbor support' printf "%s\n" ' libdaxctl libdaxctl support' printf "%s\n" ' libdw debuginfo support' printf "%s\n" ' libiscsi libiscsi userspace initiator' @@ -159,10 +163,13 @@ meson_options_help() { printf "%s\n" ' oss OSS sound support' printf "%s\n" ' pa PulseAudio sound support' printf "%s\n" ' parallels parallels image format support' + printf "%s\n" ' passt passt network backend support' printf "%s\n" ' pipewire PipeWire sound support' printf "%s\n" ' pixman pixman support' printf "%s\n" ' plugins TCG plugins via shared library loading' printf "%s\n" ' png PNG support with libpng' + printf "%s\n" ' pvg macOS paravirtualized graphics support' + printf "%s\n" ' qatzip QATzip compression support' printf "%s\n" ' qcow1 qcow1 image format support' printf "%s\n" ' qed qed image format support' printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)' @@ -170,6 +177,7 @@ meson_options_help() { printf "%s\n" ' rbd Ceph block device driver' printf "%s\n" ' rdma Enable RDMA-based migration' printf "%s\n" ' replication replication support' + printf "%s\n" ' rust Rust support' printf "%s\n" ' rutabaga-gfx rutabaga_gfx support' printf "%s\n" ' sdl SDL user interface' printf "%s\n" ' sdl-image SDL Image support for icons' @@ -190,6 +198,7 @@ meson_options_help() { printf "%s\n" ' u2f U2F emulation support' printf "%s\n" ' uadk UADK Library support' printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' valgrind valgrind debug support for coroutine stacks' printf "%s\n" ' vde vde network backend support' printf "%s\n" ' vdi vdi image format support' printf "%s\n" ' vduse-blk-export' @@ -206,8 +215,6 @@ meson_options_help() { printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support' printf "%s\n" ' virglrenderer virgl rendering support' printf "%s\n" ' virtfs virtio-9p support' - printf "%s\n" ' virtfs-proxy-helper' - printf "%s\n" ' virtio-9p proxy helper support' printf "%s\n" ' vmdk vmdk image format support' printf "%s\n" ' vmnet vmnet.framework network backend support' printf "%s\n" ' vnc VNC server' @@ -230,15 +237,13 @@ _meson_option_parse() { --disable-af-xdp) printf "%s" -Daf_xdp=disabled ;; --enable-alsa) printf "%s" -Dalsa=enabled ;; --disable-alsa) printf "%s" -Dalsa=disabled ;; + --enable-asan) printf "%s" -Dasan=true ;; + --disable-asan) printf "%s" -Dasan=false ;; --enable-attr) printf "%s" -Dattr=enabled ;; --disable-attr) printf "%s" -Dattr=disabled ;; --audio-drv-list=*) quote_sh "-Daudio_drv_list=$2" ;; --enable-auth-pam) printf "%s" -Dauth_pam=enabled ;; --disable-auth-pam) printf "%s" -Dauth_pam=disabled ;; - --enable-avx2) printf "%s" -Davx2=enabled ;; - --disable-avx2) printf "%s" -Davx2=disabled ;; - --enable-avx512bw) printf "%s" -Davx512bw=enabled ;; - --disable-avx512bw) printf "%s" -Davx512bw=disabled ;; --enable-gcov) printf "%s" -Db_coverage=true ;; --disable-gcov) printf "%s" -Db_coverage=false ;; --enable-lto) printf "%s" -Db_lto=true ;; @@ -343,6 +348,8 @@ _meson_option_parse() { --iasl=*) quote_sh "-Diasl=$2" ;; --enable-iconv) printf "%s" -Diconv=enabled ;; --disable-iconv) printf "%s" -Diconv=disabled ;; + --enable-igvm) printf "%s" -Digvm=enabled ;; + --disable-igvm) printf "%s" -Digvm=disabled ;; --includedir=*) quote_sh "-Dincludedir=$2" ;; --enable-install-blobs) printf "%s" -Dinstall_blobs=true ;; --disable-install-blobs) printf "%s" -Dinstall_blobs=false ;; @@ -355,6 +362,8 @@ _meson_option_parse() { --disable-kvm) printf "%s" -Dkvm=disabled ;; --enable-l2tpv3) printf "%s" -Dl2tpv3=enabled ;; --disable-l2tpv3) printf "%s" -Dl2tpv3=disabled ;; + --enable-libcbor) printf "%s" -Dlibcbor=enabled ;; + --disable-libcbor) printf "%s" -Dlibcbor=disabled ;; --enable-libdaxctl) printf "%s" -Dlibdaxctl=enabled ;; --disable-libdaxctl) printf "%s" -Dlibdaxctl=disabled ;; --libdir=*) quote_sh "-Dlibdir=$2" ;; @@ -417,6 +426,8 @@ _meson_option_parse() { --disable-pa) printf "%s" -Dpa=disabled ;; --enable-parallels) printf "%s" -Dparallels=enabled ;; --disable-parallels) printf "%s" -Dparallels=disabled ;; + --enable-passt) printf "%s" -Dpasst=enabled ;; + --disable-passt) printf "%s" -Dpasst=disabled ;; --enable-pipewire) printf "%s" -Dpipewire=enabled ;; --disable-pipewire) printf "%s" -Dpipewire=disabled ;; --enable-pixman) printf "%s" -Dpixman=enabled ;; @@ -427,6 +438,10 @@ _meson_option_parse() { --enable-png) printf "%s" -Dpng=enabled ;; --disable-png) printf "%s" -Dpng=disabled ;; --prefix=*) quote_sh "-Dprefix=$2" ;; + --enable-pvg) printf "%s" -Dpvg=enabled ;; + --disable-pvg) printf "%s" -Dpvg=disabled ;; + --enable-qatzip) printf "%s" -Dqatzip=enabled ;; + --disable-qatzip) printf "%s" -Dqatzip=disabled ;; --enable-qcow1) printf "%s" -Dqcow1=enabled ;; --disable-qcow1) printf "%s" -Dqcow1=disabled ;; --enable-qed) printf "%s" -Dqed=enabled ;; @@ -452,12 +467,13 @@ _meson_option_parse() { --disable-replication) printf "%s" -Dreplication=disabled ;; --enable-rng-none) printf "%s" -Drng_none=true ;; --disable-rng-none) printf "%s" -Drng_none=false ;; + --rtsig-map=*) quote_sh "-Drtsig_map=$2" ;; + --enable-rust) printf "%s" -Drust=enabled ;; + --disable-rust) printf "%s" -Drust=disabled ;; --enable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=enabled ;; --disable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=disabled ;; --enable-safe-stack) printf "%s" -Dsafe_stack=true ;; --disable-safe-stack) printf "%s" -Dsafe_stack=false ;; - --enable-sanitizers) printf "%s" -Dsanitizers=true ;; - --disable-sanitizers) printf "%s" -Dsanitizers=false ;; --enable-sdl) printf "%s" -Dsdl=enabled ;; --disable-sdl) printf "%s" -Dsdl=disabled ;; --enable-sdl-image) printf "%s" -Dsdl_image=enabled ;; @@ -485,6 +501,8 @@ _meson_option_parse() { --disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;; --enable-stack-protector) printf "%s" -Dstack_protector=enabled ;; --disable-stack-protector) printf "%s" -Dstack_protector=disabled ;; + --enable-strict-rust-lints) printf "%s" -Dstrict_rust_lints=true ;; + --disable-strict-rust-lints) printf "%s" -Dstrict_rust_lints=false ;; --enable-strip) printf "%s" -Dstrip=true ;; --disable-strip) printf "%s" -Dstrip=false ;; --sysconfdir=*) quote_sh "-Dsysconfdir=$2" ;; @@ -505,8 +523,12 @@ _meson_option_parse() { --disable-u2f) printf "%s" -Du2f=disabled ;; --enable-uadk) printf "%s" -Duadk=enabled ;; --disable-uadk) printf "%s" -Duadk=disabled ;; + --enable-ubsan) printf "%s" -Dubsan=true ;; + --disable-ubsan) printf "%s" -Dubsan=false ;; --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-valgrind) printf "%s" -Dvalgrind=enabled ;; + --disable-valgrind) printf "%s" -Dvalgrind=disabled ;; --enable-vde) printf "%s" -Dvde=enabled ;; --disable-vde) printf "%s" -Dvde=disabled ;; --enable-vdi) printf "%s" -Dvdi=enabled ;; @@ -533,8 +555,6 @@ _meson_option_parse() { --disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;; --enable-virtfs) printf "%s" -Dvirtfs=enabled ;; --disable-virtfs) printf "%s" -Dvirtfs=disabled ;; - --enable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=enabled ;; - --disable-virtfs-proxy-helper) printf "%s" -Dvirtfs_proxy_helper=disabled ;; --enable-vmdk) printf "%s" -Dvmdk=enabled ;; --disable-vmdk) printf "%s" -Dvmdk=disabled ;; --enable-vmnet) printf "%s" -Dvmnet=enabled ;; diff --git a/scripts/minikconf.py b/scripts/minikconf.py index bcd91015d34..6f7f43b2918 100644 --- a/scripts/minikconf.py +++ b/scripts/minikconf.py @@ -112,7 +112,7 @@ def has_value(self): def set_value(self, val, clause): self.clauses_for_var.append(clause) if self.has_value() and self.value != val: - print("The following clauses were found for " + self.name) + print("The following clauses were found for " + self.name, file=sys.stderr) for i in self.clauses_for_var: print(" " + str(i), file=sys.stderr) raise KconfigDataError('contradiction between clauses when setting %s' % self) diff --git a/scripts/modinfo-collect.py b/scripts/modinfo-collect.py index 4e7584df667..48bd92bd618 100644 --- a/scripts/modinfo-collect.py +++ b/scripts/modinfo-collect.py @@ -7,15 +7,6 @@ import shlex import subprocess -def find_command(src, target, compile_commands): - for command in compile_commands: - if command['file'] != src: - continue - if target != '' and command['command'].find(target) == -1: - continue - return command['command'] - return 'false' - def process_command(src, command): skip = False out = [] @@ -43,14 +34,22 @@ def main(args): print("MODINFO_DEBUG target %s" % target) arch = target[:-8] # cut '-softmmu' print("MODINFO_START arch \"%s\" MODINFO_END" % arch) + with open('compile_commands.json') as f: - compile_commands = json.load(f) - for src in args: + compile_commands_json = json.load(f) + compile_commands = { x['output']: x for x in compile_commands_json } + + for obj in args: + entry = compile_commands.get(obj, None) + if not entry: + sys.stderr.print('modinfo: Could not find object file', obj) + sys.exit(1) + src = entry['file'] if not src.endswith('.c'): print("MODINFO_DEBUG skip %s" % src) continue + command = entry['command'] print("MODINFO_DEBUG src %s" % src) - command = find_command(src, target, compile_commands) cmdline = process_command(src, command) print("MODINFO_DEBUG cmd", cmdline) result = subprocess.run(cmdline, stdout = subprocess.PIPE, diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index eb01a05ddbd..2ef375fc6fb 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -27,7 +27,7 @@ def names(self, base): .speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s) .speed.thorough = $(foreach s,$(sort $1), --suite $s) -TIMEOUT_MULTIPLIER = 1 +TIMEOUT_MULTIPLIER ?= 1 .mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER) ifneq ($(SPEED), quick) .mtestargs += --setup $(SPEED) diff --git a/scripts/nsis.py b/scripts/nsis.py index 03ed7608a2c..8f469634eb7 100644 --- a/scripts/nsis.py +++ b/scripts/nsis.py @@ -23,7 +23,7 @@ def find_deps(exe_or_dll, search_path, analyzed_deps): output = subprocess.check_output(["objdump", "-p", exe_or_dll], text=True) output = output.split("\n") for line in output: - if not line.startswith("\tDLL Name: "): + if not line.lstrip().startswith("DLL Name: "): continue dep = line.split("DLL Name: ")[1].strip() @@ -37,10 +37,10 @@ def find_deps(exe_or_dll, search_path, analyzed_deps): analyzed_deps.add(dep) # locate the dll dependencies recursively - rdeps = find_deps(dll, search_path, analyzed_deps) + analyzed_deps, rdeps = find_deps(dll, search_path, analyzed_deps) deps.extend(rdeps) - return deps + return analyzed_deps, deps def main(): parser = argparse.ArgumentParser(description="QEMU NSIS build helper.") @@ -92,18 +92,18 @@ def main(): dlldir = os.path.join(destdir + prefix, "dll") os.mkdir(dlldir) + analyzed_deps = set() for exe in glob.glob(os.path.join(destdir + prefix, "*.exe")): signcode(exe) # find all dll dependencies - deps = set(find_deps(exe, search_path, set())) + analyzed_deps, deps = find_deps(exe, search_path, analyzed_deps) + deps = set(deps) deps.remove(exe) # copy all dlls to the DLLDIR for dep in deps: dllfile = os.path.join(dlldir, os.path.basename(dep)) - if (os.path.exists(dllfile)): - continue print("Copying '%s' to '%s'" % (dep, dllfile)) shutil.copy(dep, dllfile) diff --git a/scripts/probe-gdb-support.py b/scripts/probe-gdb-support.py index 46d6c001408..6bcadce1500 100644 --- a/scripts/probe-gdb-support.py +++ b/scripts/probe-gdb-support.py @@ -19,59 +19,61 @@ import argparse import re -from subprocess import check_output, STDOUT +from subprocess import check_output, STDOUT, CalledProcessError +import sys -# mappings from gdb arch to QEMU target -mappings = { - "alpha" : "alpha", +# Mappings from gdb arch to QEMU target +MAP = { + "alpha" : ["alpha"], "aarch64" : ["aarch64", "aarch64_be"], - "armv7": "arm", + "armv7": ["arm"], "armv8-a" : ["aarch64", "aarch64_be"], - "avr" : "avr", - "cris" : "cris", + "avr" : ["avr"], # no hexagon in upstream gdb - "hppa1.0" : "hppa", - "i386" : "i386", - "i386:x86-64" : "x86_64", - "Loongarch64" : "loongarch64", - "m68k" : "m68k", - "MicroBlaze" : "microblaze", + "hppa1.0" : ["hppa"], + "i386" : ["i386"], + "i386:x86-64" : ["x86_64"], + "Loongarch64" : ["loongarch64"], + "m68k" : ["m68k"], + "MicroBlaze" : ["microblaze"], "mips:isa64" : ["mips64", "mips64el"], - "or1k" : "or1k", - "powerpc:common" : "ppc", + "or1k" : ["or1k"], + "powerpc:common" : ["ppc"], "powerpc:common64" : ["ppc64", "ppc64le"], - "riscv:rv32" : "riscv32", - "riscv:rv64" : "riscv64", - "s390:64-bit" : "s390x", + "riscv:rv32" : ["riscv32"], + "riscv:rv64" : ["riscv64"], + "s390:64-bit" : ["s390x"], "sh4" : ["sh4", "sh4eb"], - "sparc": "sparc", - "sparc:v8plus": "sparc32plus", - "sparc:v9a" : "sparc64", + "sparc": ["sparc"], + "sparc:v8plus": ["sparc32plus"], + "sparc:v9a" : ["sparc64"], # no tricore in upstream gdb "xtensa" : ["xtensa", "xtensaeb"] } + def do_probe(gdb): - gdb_out = check_output([gdb, - "-ex", "set architecture", - "-ex", "quit"], stderr=STDOUT) + try: + gdb_out = check_output([gdb, + "-ex", "set architecture", + "-ex", "quit"], stderr=STDOUT, encoding="utf-8") + except (OSError) as e: + sys.exit(e) + except CalledProcessError as e: + sys.exit(f'{e}. Output:\n\n{e.output}') + + found_gdb_archs = re.search(r'Valid arguments are (.*)', gdb_out) - m = re.search(r"Valid arguments are (.*)", - gdb_out.decode("utf-8")) + targets = set() + if found_gdb_archs: + gdb_archs = found_gdb_archs.group(1).split(", ") + mapped_gdb_archs = [arch for arch in gdb_archs if arch in MAP] - valid_arches = set() + targets = {target for arch in mapped_gdb_archs for target in MAP[arch]} - if m.group(1): - for arch in m.group(1).split(", "): - if arch in mappings: - mapping = mappings[arch] - if isinstance(mapping, str): - valid_arches.add(mapping) - else: - for entry in mapping: - valid_arches.add(entry) + # QEMU targets + return targets - return valid_arches def main() -> None: parser = argparse.ArgumentParser(description='Probe GDB Architectures') diff --git a/scripts/qapi/.flake8 b/scripts/qapi/.flake8 deleted file mode 100644 index a873ff67309..00000000000 --- a/scripts/qapi/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -# Prefer pylint's bare-except checks to flake8's -extend-ignore = E722 diff --git a/scripts/qapi/.isort.cfg b/scripts/qapi/.isort.cfg deleted file mode 100644 index 643caa1fbd6..00000000000 --- a/scripts/qapi/.isort.cfg +++ /dev/null @@ -1,7 +0,0 @@ -[settings] -force_grid_wrap=4 -force_sort_within_sections=True -include_trailing_comma=True -line_length=72 -lines_after_imports=2 -multi_line_output=3 diff --git a/scripts/qapi/backend.py b/scripts/qapi/backend.py new file mode 100644 index 00000000000..49ae6ecdd33 --- /dev/null +++ b/scripts/qapi/backend.py @@ -0,0 +1,65 @@ +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. + +from abc import ABC, abstractmethod + +from .commands import gen_commands +from .events import gen_events +from .features import gen_features +from .introspect import gen_introspect +from .schema import QAPISchema +from .types import gen_types +from .visit import gen_visit + + +class QAPIBackend(ABC): + # pylint: disable=too-few-public-methods + + @abstractmethod + def generate(self, + schema: QAPISchema, + output_dir: str, + prefix: str, + unmask: bool, + builtins: bool, + gen_tracing: bool) -> None: + """ + Generate code for the given schema into the target directory. + + :param schema: The primary QAPI schema object. + :param output_dir: The output directory to store generated code. + :param prefix: Optional C-code prefix for symbol names. + :param unmask: Expose non-ABI names through introspection? + :param builtins: Generate code for built-in types? + + :raise QAPIError: On failures. + """ + + +class QAPICBackend(QAPIBackend): + # pylint: disable=too-few-public-methods + + def generate(self, + schema: QAPISchema, + output_dir: str, + prefix: str, + unmask: bool, + builtins: bool, + gen_tracing: bool) -> None: + """ + Generate C code for the given schema into the target directory. + + :param schema_file: The primary QAPI schema file. + :param output_dir: The output directory to store generated code. + :param prefix: Optional C-code prefix for symbol names. + :param unmask: Expose non-ABI names through introspection? + :param builtins: Generate code for built-in types? + + :raise QAPIError: On failures. + """ + gen_types(schema, output_dir, prefix, builtins) + gen_features(schema, output_dir, prefix) + gen_visit(schema, output_dir, prefix, builtins) + gen_commands(schema, output_dir, prefix, gen_tracing) + gen_events(schema, output_dir, prefix) + gen_introspect(schema, output_dir, prefix, unmask) diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py index 79951a841f5..79142273828 100644 --- a/scripts/qapi/commands.py +++ b/scripts/qapi/commands.py @@ -25,7 +25,7 @@ QAPIGenC, QAPISchemaModularCVisitor, build_params, - gen_special_features, + gen_features, ifcontext, ) from .schema import ( @@ -298,7 +298,7 @@ def gen_register_command(name: str, ''', name=name, c_name=c_name(name), opts=' | '.join(options) or 0, - feats=gen_special_features(features)) + feats=gen_features(features)) return ret @@ -320,7 +320,7 @@ def _begin_user_module(self, name: str) -> None: #include "qemu/osdep.h" #include "qapi/compat-policy.h" #include "qapi/visitor.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/dealloc-visitor.h" #include "qapi/error.h" #include "%(visit)s.h" @@ -330,7 +330,7 @@ def _begin_user_module(self, name: str) -> None: if self._gen_tracing and commands != 'qapi-commands': self._genc.add(mcgen(''' -#include "qapi/qmp/qjson.h" +#include "qobject/qjson.h" #include "trace/trace-%(nm)s_trace_events.h" ''', nm=c_name(commands, protect=False))) @@ -346,7 +346,7 @@ def _begin_user_module(self, name: str) -> None: def visit_begin(self, schema: QAPISchema) -> None: self._add_module('./init', ' * QAPI Commands initialization') self._genh.add(mcgen(''' -#include "qapi/qmp/dispatch.h" +#include "qapi/qmp-registry.h" void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds); ''', @@ -355,6 +355,7 @@ def visit_begin(self, schema: QAPISchema) -> None: #include "qemu/osdep.h" #include "%(prefix)sqapi-commands.h" #include "%(prefix)sqapi-init-commands.h" +#include "%(prefix)sqapi-features.h" void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds) { diff --git a/scripts/qapi/common.py b/scripts/qapi/common.py index 737b059e629..d7c8aa3365c 100644 --- a/scripts/qapi/common.py +++ b/scripts/qapi/common.py @@ -40,22 +40,28 @@ def camel_to_upper(value: str) -> str: ENUM_Name2 -> ENUM_NAME2 ENUM24_Name -> ENUM24_NAME """ - c_fun_str = c_name(value, False) - if value.isupper(): - return c_fun_str - - new_name = '' - length = len(c_fun_str) - for i in range(length): - char = c_fun_str[i] - # When char is upper case and no '_' appears before, do more checks - if char.isupper() and (i > 0) and c_fun_str[i - 1] != '_': - if i < length - 1 and c_fun_str[i + 1].islower(): - new_name += '_' - elif c_fun_str[i - 1].isdigit(): - new_name += '_' - new_name += char - return new_name.lstrip('_').upper() + ret = value[0] + upc = value[0].isupper() + + # Copy remainder of ``value`` to ``ret`` with '_' inserted + for ch in value[1:]: + if ch.isupper() == upc: + pass + elif upc: + # ``ret`` ends in upper case, next char isn't: insert '_' + # before the last upper case char unless there is one + # already, or it's at the beginning + if len(ret) > 2 and ret[-2].isalnum(): + ret = ret[:-1] + '_' + ret[-1] + else: + # ``ret`` doesn't end in upper case, next char is: insert + # '_' before it + if ret[-1].isalnum(): + ret += '_' + ret += ch + upc = ch.isupper() + + return c_name(ret.upper()).lstrip('_') def c_enum_const(type_name: str, @@ -68,9 +74,9 @@ def c_enum_const(type_name: str, :param const_name: The name of this constant. :param prefix: Optional, prefix that overrides the type_name. """ - if prefix is not None: - type_name = prefix - return camel_to_upper(type_name) + '_' + c_name(const_name, False).upper() + if prefix is None: + prefix = camel_to_upper(type_name) + return prefix + '_' + c_name(const_name, False).upper() def c_name(name: str, protect: bool = True) -> str: diff --git a/scripts/qapi/events.py b/scripts/qapi/events.py index d1f639981a9..d179b0ed695 100644 --- a/scripts/qapi/events.py +++ b/scripts/qapi/events.py @@ -194,7 +194,7 @@ def _begin_user_module(self, name: str) -> None: #include "%(visit)s.h" #include "qapi/compat-policy.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qmp-event.h" ''', events=events, visit=visit, diff --git a/scripts/qapi/features.py b/scripts/qapi/features.py new file mode 100644 index 00000000000..57563207a82 --- /dev/null +++ b/scripts/qapi/features.py @@ -0,0 +1,48 @@ +""" +QAPI features generator + +Copyright 2024 Red Hat + +This work is licensed under the terms of the GNU GPL, version 2. +# See the COPYING file in the top-level directory. +""" + +from typing import ValuesView + +from .common import c_enum_const, c_name +from .gen import QAPISchemaMonolithicCVisitor +from .schema import QAPISchema, QAPISchemaFeature + + +class QAPISchemaGenFeatureVisitor(QAPISchemaMonolithicCVisitor): + + def __init__(self, prefix: str): + super().__init__( + prefix, 'qapi-features', + ' * Schema-defined QAPI features', + __doc__) + + self.features: ValuesView[QAPISchemaFeature] + + def visit_begin(self, schema: QAPISchema) -> None: + self.features = schema.features() + self._genh.add("#include \"qapi/util.h\"\n\n") + + def visit_end(self) -> None: + self._genh.add("typedef enum {\n") + for f in self.features: + self._genh.add(f" {c_enum_const('qapi_feature', f.name)}") + if f.name in QAPISchemaFeature.SPECIAL_NAMES: + self._genh.add(f" = {c_enum_const('qapi', f.name)},\n") + else: + self._genh.add(",\n") + + self._genh.add("} " + c_name('QapiFeature') + ";\n") + + +def gen_features(schema: QAPISchema, + output_dir: str, + prefix: str) -> None: + vis = QAPISchemaGenFeatureVisitor(prefix) + schema.visit(vis) + vis.write(output_dir) diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index 6a8abe00415..d3c56d45c89 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -24,6 +24,7 @@ ) from .common import ( + c_enum_const, c_fname, c_name, guardend, @@ -40,10 +41,10 @@ from .source import QAPISourceInfo -def gen_special_features(features: Sequence[QAPISchemaFeature]) -> str: - special_features = [f"1u << QAPI_{feat.name.upper()}" - for feat in features if feat.is_special()] - return ' | '.join(special_features) or '0' +def gen_features(features: Sequence[QAPISchemaFeature]) -> str: + feats = [f"1u << {c_enum_const('qapi_feature', feat.name)}" + for feat in features] + return ' | '.join(feats) or '0' class QAPIGen: diff --git a/scripts/qapi/introspect.py b/scripts/qapi/introspect.py index ac14b20f308..89ee5d5f176 100644 --- a/scripts/qapi/introspect.py +++ b/scripts/qapi/introspect.py @@ -11,6 +11,7 @@ See the COPYING file in the top-level directory. """ +from dataclasses import dataclass from typing import ( Any, Dict, @@ -79,19 +80,16 @@ _ValueT = TypeVar('_ValueT', bound=_Value) +@dataclass class Annotated(Generic[_ValueT]): """ Annotated generally contains a SchemaInfo-like type (as a dict), But it also used to wrap comments/ifconds around scalar leaf values, for the benefit of features and enums. """ - # TODO: Remove after Python 3.7 adds @dataclass: - # pylint: disable=too-few-public-methods - def __init__(self, value: _ValueT, ifcond: QAPISchemaIfCond, - comment: Optional[str] = None): - self.value = value - self.comment: Optional[str] = comment - self.ifcond = ifcond + value: _ValueT + ifcond: QAPISchemaIfCond + comment: Optional[str] = None def _tree_to_qlit(obj: JSONValue, @@ -197,7 +195,7 @@ def visit_end(self) -> None: # generate C name = c_name(self._prefix, protect=False) + 'qmp_schema_qlit' self._genh.add(mcgen(''' -#include "qapi/qmp/qlit.h" +#include "qobject/qlit.h" extern const QLitObject %(c_name)s; ''', diff --git a/scripts/qapi/main.py b/scripts/qapi/main.py index 316736b6a29..0e2a6ae3f07 100644 --- a/scripts/qapi/main.py +++ b/scripts/qapi/main.py @@ -8,17 +8,14 @@ """ import argparse +from importlib import import_module import sys from typing import Optional -from .commands import gen_commands +from .backend import QAPIBackend, QAPICBackend from .common import must_match from .error import QAPIError -from .events import gen_events -from .introspect import gen_introspect from .schema import QAPISchema -from .types import gen_types -from .visit import gen_visit def invalid_prefix_char(prefix: str) -> Optional[str]: @@ -28,31 +25,36 @@ def invalid_prefix_char(prefix: str) -> Optional[str]: return None -def generate(schema_file: str, - output_dir: str, - prefix: str, - unmask: bool = False, - builtins: bool = False, - gen_tracing: bool = False) -> None: - """ - Generate C code for the given schema into the target directory. +def create_backend(path: str) -> QAPIBackend: + if path is None: + return QAPICBackend() - :param schema_file: The primary QAPI schema file. - :param output_dir: The output directory to store generated code. - :param prefix: Optional C-code prefix for symbol names. - :param unmask: Expose non-ABI names through introspection? - :param builtins: Generate code for built-in types? + module_path, dot, class_name = path.rpartition('.') + if not dot: + raise QAPIError("argument of -B must be of the form MODULE.CLASS") - :raise QAPIError: On failures. - """ - assert invalid_prefix_char(prefix) is None + try: + mod = import_module(module_path) + except Exception as ex: + raise QAPIError(f"unable to import '{module_path}': {ex}") from ex + + try: + klass = getattr(mod, class_name) + except AttributeError as ex: + raise QAPIError( + f"module '{module_path}' has no class '{class_name}'") from ex + + try: + backend = klass() + except Exception as ex: + raise QAPIError( + f"backend '{path}' cannot be instantiated: {ex}") from ex + + if not isinstance(backend, QAPIBackend): + raise QAPIError( + f"backend '{path}' must be an instance of QAPIBackend") - schema = QAPISchema(schema_file) - gen_types(schema, output_dir, prefix, builtins) - gen_visit(schema, output_dir, prefix, builtins) - gen_commands(schema, output_dir, prefix, gen_tracing) - gen_events(schema, output_dir, prefix) - gen_introspect(schema, output_dir, prefix, unmask) + return backend def main() -> int: @@ -75,6 +77,8 @@ def main() -> int: parser.add_argument('-u', '--unmask-non-abi-names', action='store_true', dest='unmask', help="expose non-ABI names in introspection") + parser.add_argument('-B', '--backend', default=None, + help="Python module name for code generator") # Option --suppress-tracing exists so we can avoid solving build system # problems. TODO Drop it when we no longer need it. @@ -91,12 +95,14 @@ def main() -> int: return 1 try: - generate(args.schema, - output_dir=args.output_dir, - prefix=args.prefix, - unmask=args.unmask, - builtins=args.builtins, - gen_tracing=not args.suppress_tracing) + schema = QAPISchema(args.schema) + backend = create_backend(args.backend) + backend.generate(schema, + output_dir=args.output_dir, + prefix=args.prefix, + unmask=args.unmask, + builtins=args.builtins, + gen_tracing=not args.suppress_tracing) except QAPIError as err: print(err, file=sys.stderr) return 1 diff --git a/scripts/qapi/mypy.ini b/scripts/qapi/mypy.ini deleted file mode 100644 index 8109470a031..00000000000 --- a/scripts/qapi/mypy.ini +++ /dev/null @@ -1,4 +0,0 @@ -[mypy] -strict = True -disallow_untyped_calls = False -python_version = 3.8 diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index adc85b5b394..2529edf81aa 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -14,7 +14,7 @@ # This work is licensed under the terms of the GNU GPL, version 2. # See the COPYING file in the top-level directory. -from collections import OrderedDict +import enum import os import re from typing import ( @@ -154,7 +154,7 @@ def _parse(self) -> None: "value of 'include' must be a string") incl_fname = os.path.join(os.path.dirname(self._fname), include) - self._add_expr(OrderedDict({'include': incl_fname}), info) + self._add_expr({'include': incl_fname}, info) exprs_include = self._include(include, info, incl_fname, self._included) if exprs_include: @@ -355,7 +355,7 @@ def accept(self, skip_comment: bool = True) -> None: raise QAPIParseError(self, "stray '%s'" % match.group(0)) def get_members(self) -> Dict[str, object]: - expr: Dict[str, object] = OrderedDict() + expr: Dict[str, object] = {} if self.tok == '}': self.accept() return expr @@ -575,18 +575,17 @@ def get_doc(self) -> 'QAPIDoc': ) raise QAPIParseError(self, emsg) - doc.new_tagged_section(self.info, match.group(1)) + doc.new_tagged_section( + self.info, + QAPIDoc.Kind.from_string(match.group(1)) + ) text = line[match.end():] if text: doc.append_line(text) line = self.get_doc_indented(doc) no_more_args = True - elif line.startswith('='): - raise QAPIParseError( - self, - "unexpected '=' markup in definition documentation") else: - # tag-less paragraph + # plain paragraph doc.ensure_untagged_section(self.info) doc.append_line(line) line = self.get_doc_paragraph(doc) @@ -594,22 +593,15 @@ def get_doc(self) -> 'QAPIDoc': # Free-form documentation doc = QAPIDoc(info) doc.ensure_untagged_section(self.info) - first = True while line is not None: if match := self._match_at_name_colon(line): raise QAPIParseError( self, "'@%s:' not allowed in free-form documentation" % match.group(1)) - if line.startswith('='): - if not first: - raise QAPIParseError( - self, - "'=' heading must come first in a comment block") doc.append_line(line) self.accept(False) line = self.get_doc_line() - first = False self.accept() doc.end() @@ -635,23 +627,51 @@ class QAPIDoc: Free-form documentation blocks consist only of a body section. """ + class Kind(enum.Enum): + PLAIN = 0 + MEMBER = 1 + FEATURE = 2 + RETURNS = 3 + ERRORS = 4 + SINCE = 5 + TODO = 6 + + @staticmethod + def from_string(kind: str) -> 'QAPIDoc.Kind': + return QAPIDoc.Kind[kind.upper()] + + def __str__(self) -> str: + return self.name.title() + class Section: # pylint: disable=too-few-public-methods - def __init__(self, info: QAPISourceInfo, - tag: Optional[str] = None): + def __init__( + self, + info: QAPISourceInfo, + kind: 'QAPIDoc.Kind', + ): # section source info, i.e. where it begins self.info = info - # section tag, if any ('Returns', '@name', ...) - self.tag = tag + # section kind + self.kind = kind # section text without tag self.text = '' + def __repr__(self) -> str: + return f"" + def append_line(self, line: str) -> None: self.text += line + '\n' class ArgSection(Section): - def __init__(self, info: QAPISourceInfo, tag: str): - super().__init__(info, tag) + def __init__( + self, + info: QAPISourceInfo, + kind: 'QAPIDoc.Kind', + name: str + ): + super().__init__(info, kind) + self.name = name self.member: Optional['QAPISchemaMember'] = None def connect(self, member: 'QAPISchemaMember') -> None: @@ -663,7 +683,9 @@ def __init__(self, info: QAPISourceInfo, symbol: Optional[str] = None): # definition doc's symbol, None for free-form doc self.symbol: Optional[str] = symbol # the sections in textual order - self.all_sections: List[QAPIDoc.Section] = [QAPIDoc.Section(info)] + self.all_sections: List[QAPIDoc.Section] = [ + QAPIDoc.Section(info, QAPIDoc.Kind.PLAIN) + ] # the body section self.body: Optional[QAPIDoc.Section] = self.all_sections[0] # dicts mapping parameter/feature names to their description @@ -680,55 +702,71 @@ def __init__(self, info: QAPISourceInfo, symbol: Optional[str] = None): def end(self) -> None: for section in self.all_sections: section.text = section.text.strip('\n') - if section.tag is not None and section.text == '': + if section.kind != QAPIDoc.Kind.PLAIN and section.text == '': raise QAPISemError( - section.info, "text required after '%s:'" % section.tag) + section.info, "text required after '%s:'" % section.kind) def ensure_untagged_section(self, info: QAPISourceInfo) -> None: - if self.all_sections and not self.all_sections[-1].tag: + kind = QAPIDoc.Kind.PLAIN + + if self.all_sections and self.all_sections[-1].kind == kind: # extend current section - self.all_sections[-1].text += '\n' + section = self.all_sections[-1] + if not section.text: + # Section is empty so far; update info to start *here*. + section.info = info + section.text += '\n' return + # start new section - section = self.Section(info) + section = self.Section(info, kind) self.sections.append(section) self.all_sections.append(section) - def new_tagged_section(self, info: QAPISourceInfo, tag: str) -> None: - section = self.Section(info, tag) - if tag == 'Returns': + def new_tagged_section( + self, + info: QAPISourceInfo, + kind: 'QAPIDoc.Kind', + ) -> None: + section = self.Section(info, kind) + if kind == QAPIDoc.Kind.RETURNS: if self.returns: raise QAPISemError( - info, "duplicated '%s' section" % tag) + info, "duplicated '%s' section" % kind) self.returns = section - elif tag == 'Errors': + elif kind == QAPIDoc.Kind.ERRORS: if self.errors: raise QAPISemError( - info, "duplicated '%s' section" % tag) + info, "duplicated '%s' section" % kind) self.errors = section - elif tag == 'Since': + elif kind == QAPIDoc.Kind.SINCE: if self.since: raise QAPISemError( - info, "duplicated '%s' section" % tag) + info, "duplicated '%s' section" % kind) self.since = section self.sections.append(section) self.all_sections.append(section) - def _new_description(self, info: QAPISourceInfo, name: str, - desc: Dict[str, ArgSection]) -> None: + def _new_description( + self, + info: QAPISourceInfo, + name: str, + kind: 'QAPIDoc.Kind', + desc: Dict[str, ArgSection] + ) -> None: if not name: raise QAPISemError(info, "invalid parameter name") if name in desc: raise QAPISemError(info, "'%s' parameter name duplicated" % name) - section = self.ArgSection(info, '@' + name) + section = self.ArgSection(info, kind, name) self.all_sections.append(section) desc[name] = section def new_argument(self, info: QAPISourceInfo, name: str) -> None: - self._new_description(info, name, self.args) + self._new_description(info, name, QAPIDoc.Kind.MEMBER, self.args) def new_feature(self, info: QAPISourceInfo, name: str) -> None: - self._new_description(info, name, self.features) + self._new_description(info, name, QAPIDoc.Kind.FEATURE, self.features) def append_line(self, line: str) -> None: self.all_sections[-1].append_line(line) @@ -740,8 +778,23 @@ def connect_member(self, member: 'QAPISchemaMember') -> None: raise QAPISemError(member.info, "%s '%s' lacks documentation" % (member.role, member.name)) - self.args[member.name] = QAPIDoc.ArgSection( - self.info, '@' + member.name) + # Insert stub documentation section for missing member docs. + # TODO: drop when undocumented members are outlawed + + section = QAPIDoc.ArgSection( + self.info, QAPIDoc.Kind.MEMBER, member.name) + self.args[member.name] = section + + # Determine where to insert stub doc - it should go at the + # end of the members section(s), if any. Note that index 0 + # is assumed to be an untagged intro section, even if it is + # empty. + index = 1 + if len(self.all_sections) > 1: + while self.all_sections[index].kind == QAPIDoc.Kind.MEMBER: + index += 1 + self.all_sections.insert(index, section) + self.args[member.name].connect(member) def connect_feature(self, feature: 'QAPISchemaFeature') -> None: @@ -751,6 +804,43 @@ def connect_feature(self, feature: 'QAPISchemaFeature') -> None: % feature.name) self.features[feature.name].connect(feature) + def ensure_returns(self, info: QAPISourceInfo) -> None: + + def _insert_near_kind( + kind: QAPIDoc.Kind, + new_sect: QAPIDoc.Section, + after: bool = False, + ) -> bool: + for idx, sect in enumerate(reversed(self.all_sections)): + if sect.kind == kind: + pos = len(self.all_sections) - idx - 1 + if after: + pos += 1 + self.all_sections.insert(pos, new_sect) + return True + return False + + if any(s.kind == QAPIDoc.Kind.RETURNS for s in self.all_sections): + return + + # Stub "Returns" section for undocumented returns value + stub = QAPIDoc.Section(info, QAPIDoc.Kind.RETURNS) + + if any(_insert_near_kind(kind, stub, after) for kind, after in ( + # 1. If arguments, right after those. + (QAPIDoc.Kind.MEMBER, True), + # 2. Elif errors, right *before* those. + (QAPIDoc.Kind.ERRORS, False), + # 3. Elif features, right *before* those. + (QAPIDoc.Kind.FEATURE, False), + )): + return + + # Otherwise, it should go right after the intro. The intro + # is always the first section and is always present (even + # when empty), so we can insert directly at index=1 blindly. + self.all_sections.insert(1, stub) + def check_expr(self, expr: QAPIExpression) -> None: if 'command' in expr: if self.returns and 'returns' not in expr: diff --git a/scripts/qapi/pylintrc b/scripts/qapi/pylintrc index c028a1f9f51..e16283ada3d 100644 --- a/scripts/qapi/pylintrc +++ b/scripts/qapi/pylintrc @@ -17,7 +17,9 @@ disable=consider-using-f-string, too-many-arguments, too-many-branches, too-many-instance-attributes, + too-many-positional-arguments, too-many-statements, + unknown-option-value, useless-option-value, [REPORTS] diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index d65c35f6ee6..3abddea3525 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -19,7 +19,6 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections import OrderedDict import os import re from typing import ( @@ -29,6 +28,7 @@ List, Optional, Union, + ValuesView, cast, ) @@ -556,7 +556,7 @@ def check(self, schema: QAPISchema) -> None: super().check(schema) assert self._checked and not self._check_complete - seen = OrderedDict() + seen = {} if self._base_name: self.base = schema.resolve_type(self._base_name, self.info, "'base'") @@ -933,8 +933,11 @@ def connect_doc(self, doc: Optional[QAPIDoc]) -> None: class QAPISchemaFeature(QAPISchemaMember): role = 'feature' + # Features which are standardized across all schemas + SPECIAL_NAMES = ['deprecated', 'unstable'] + def is_special(self) -> bool: - return self.name in ('deprecated', 'unstable') + return self.name in QAPISchemaFeature.SPECIAL_NAMES class QAPISchemaObjectTypeMember(QAPISchemaMember): @@ -1059,6 +1062,9 @@ def connect_doc(self, doc: Optional[QAPIDoc] = None) -> None: if self.arg_type and self.arg_type.is_implicit(): self.arg_type.connect_doc(doc) + if self.ret_type and self.info: + doc.ensure_returns(self.info) + def visit(self, visitor: QAPISchemaVisitor) -> None: super().visit(visitor) visitor.visit_command( @@ -1137,7 +1143,17 @@ def __init__(self, fname: str): self.docs = parser.docs self._entity_list: List[QAPISchemaEntity] = [] self._entity_dict: Dict[str, QAPISchemaDefinition] = {} - self._module_dict: Dict[str, QAPISchemaModule] = OrderedDict() + self._module_dict: Dict[str, QAPISchemaModule] = {} + # NB, values in the dict will identify the first encountered + # usage of a named feature only + self._feature_dict: Dict[str, QAPISchemaFeature] = {} + + # All schemas get the names defined in the QapiSpecialFeature enum. + # Rely on dict iteration order matching insertion order so that + # the special names are emitted first when generating code. + for f in QAPISchemaFeature.SPECIAL_NAMES: + self._feature_dict[f] = QAPISchemaFeature(f, None) + self._schema_dir = os.path.dirname(fname) self._make_module(QAPISchemaModule.BUILTIN_MODULE_NAME) self._make_module(fname) @@ -1147,6 +1163,9 @@ def __init__(self, fname: str): self._def_exprs(exprs) self.check() + def features(self) -> ValuesView[QAPISchemaFeature]: + return self._feature_dict.values() + def _def_entity(self, ent: QAPISchemaEntity) -> None: self._entity_list.append(ent) @@ -1249,7 +1268,7 @@ def _def_predefineds(self) -> None: [{'name': n} for n in qtypes], None) self._def_definition(QAPISchemaEnumType( - 'QType', None, None, None, None, qtype_values, 'QTYPE')) + 'QType', None, None, None, None, qtype_values, None)) def _make_features( self, @@ -1258,6 +1277,12 @@ def _make_features( ) -> List[QAPISchemaFeature]: if features is None: return [] + + for f in features: + feat = QAPISchemaFeature(f['name'], info) + if feat.name not in self._feature_dict: + self._feature_dict[feat.name] = feat + return [QAPISchemaFeature(f['name'], info, QAPISchemaIfCond(f.get('if'))) for f in features] @@ -1431,7 +1456,7 @@ def _def_command(self, expr: QAPIExpression) -> None: ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) - if isinstance(data, OrderedDict): + if isinstance(data, dict): data = self._make_implicit_object_type( name, info, ifcond, 'arg', self._make_members(data, info)) @@ -1450,7 +1475,7 @@ def _def_event(self, expr: QAPIExpression) -> None: ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) - if isinstance(data, OrderedDict): + if isinstance(data, dict): data = self._make_implicit_object_type( name, info, ifcond, 'arg', self._make_members(data, info)) @@ -1485,6 +1510,12 @@ def check(self) -> None: for doc in self.docs: doc.check() + features = list(self._feature_dict.values()) + if len(features) > 64: + raise QAPISemError( + features[64].info, + "Maximum of 64 schema features is permitted") + def visit(self, visitor: QAPISchemaVisitor) -> None: visitor.visit_begin(self) for mod in self._module_dict.values(): diff --git a/scripts/qapi/source.py b/scripts/qapi/source.py index 7b379fdc925..ffdc3f482ac 100644 --- a/scripts/qapi/source.py +++ b/scripts/qapi/source.py @@ -47,9 +47,9 @@ def set_defn(self, meta: str, name: str) -> None: self.defn_meta = meta self.defn_name = name - def next_line(self: T) -> T: + def next_line(self: T, n: int = 1) -> T: info = copy.copy(self) - info.line += 1 + info.line += n return info def loc(self) -> str: diff --git a/scripts/qapi/types.py b/scripts/qapi/types.py index 0dd0b00ada3..2bf75338283 100644 --- a/scripts/qapi/types.py +++ b/scripts/qapi/types.py @@ -16,11 +16,7 @@ from typing import List, Optional from .common import c_enum_const, c_name, mcgen -from .gen import ( - QAPISchemaModularCVisitor, - gen_special_features, - ifcontext, -) +from .gen import QAPISchemaModularCVisitor, gen_features, ifcontext from .schema import ( QAPISchema, QAPISchemaAlternatives, @@ -61,17 +57,17 @@ def gen_enum_lookup(name: str, index=index, name=memb.name) ret += memb.ifcond.gen_endif() - special_features = gen_special_features(memb.features) - if special_features != '0': + features = gen_features(memb.features) + if features != '0': feats += mcgen(''' - [%(index)s] = %(special_features)s, + [%(index)s] = %(features)s, ''', - index=index, special_features=special_features) + index=index, features=features) if feats: ret += mcgen(''' }, - .special_features = (const unsigned char[%(max_index)s]) { + .features = (const uint64_t[%(max_index)s]) { ''', max_index=max_index) ret += feats @@ -308,11 +304,14 @@ def _begin_user_module(self, name: str) -> None: #include "qapi/dealloc-visitor.h" #include "%(types)s.h" #include "%(visit)s.h" +#include "%(prefix)sqapi-features.h" ''', - types=types, visit=visit)) + types=types, visit=visit, + prefix=self._prefix)) self._genh.preamble_add(mcgen(''' #include "qapi/qapi-builtin-types.h" -''')) +''', + prefix=self._prefix)) def visit_begin(self, schema: QAPISchema) -> None: # gen_object() is recursive, ensure it doesn't visit the empty type diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py index 12f92e429f6..36e240967b6 100644 --- a/scripts/qapi/visit.py +++ b/scripts/qapi/visit.py @@ -21,11 +21,7 @@ indent, mcgen, ) -from .gen import ( - QAPISchemaModularCVisitor, - gen_special_features, - ifcontext, -) +from .gen import QAPISchemaModularCVisitor, gen_features, ifcontext from .schema import ( QAPISchema, QAPISchemaAlternatives, @@ -103,15 +99,15 @@ def gen_visit_object_members(name: str, ''', name=memb.name, has=has) indent.increase() - special_features = gen_special_features(memb.features) - if special_features != '0': + features = gen_features(memb.features) + if features != '0': ret += mcgen(''' - if (visit_policy_reject(v, "%(name)s", %(special_features)s, errp)) { + if (visit_policy_reject(v, "%(name)s", %(features)s, errp)) { return false; } - if (!visit_policy_skip(v, "%(name)s", %(special_features)s)) { + if (!visit_policy_skip(v, "%(name)s", %(features)s)) { ''', - name=memb.name, special_features=special_features) + name=memb.name, features=features) indent.increase() ret += mcgen(''' if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) { @@ -120,7 +116,7 @@ def gen_visit_object_members(name: str, ''', c_type=memb.type.c_name(), name=memb.name, c_name=c_name(memb.name)) - if special_features != '0': + if features != '0': indent.decrease() ret += mcgen(''' } @@ -360,8 +356,9 @@ def _begin_user_module(self, name: str) -> None: #include "qemu/osdep.h" #include "qapi/error.h" #include "%(visit)s.h" +#include "%(prefix)sqapi-features.h" ''', - visit=visit)) + visit=visit, prefix=self._prefix)) self._genh.preamble_add(mcgen(''' #include "qapi/qapi-builtin-visit.h" #include "%(types)s.h" diff --git a/scripts/qcow2-to-stdout.py b/scripts/qcow2-to-stdout.py new file mode 100755 index 00000000000..06b7c13ccbb --- /dev/null +++ b/scripts/qcow2-to-stdout.py @@ -0,0 +1,449 @@ +#!/usr/bin/env python3 + +# This tool reads a disk image in any format and converts it to qcow2, +# writing the result directly to stdout. +# +# Copyright (C) 2024 Igalia, S.L. +# +# Authors: Alberto Garcia +# Madeeha Javed +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# qcow2 files produced by this script are always arranged like this: +# +# - qcow2 header +# - refcount table +# - refcount blocks +# - L1 table +# - L2 tables +# - Data clusters +# +# A note about variable names: in qcow2 there is one refcount table +# and one (active) L1 table, although each can occupy several +# clusters. For the sake of simplicity the code sometimes talks about +# refcount tables and L1 tables when referring to those clusters. + +import argparse +import errno +import math +import os +import signal +import struct +import subprocess +import sys +import tempfile +import time +from contextlib import contextmanager + +QCOW2_DEFAULT_CLUSTER_SIZE = 65536 +QCOW2_DEFAULT_REFCOUNT_BITS = 16 +QCOW2_FEATURE_NAME_TABLE = 0x6803F857 +QCOW2_DATA_FILE_NAME_STRING = 0x44415441 +QCOW2_V3_HEADER_LENGTH = 112 # Header length in QEMU 9.0. Must be a multiple of 8 +QCOW2_INCOMPAT_DATA_FILE_BIT = 2 +QCOW2_AUTOCLEAR_DATA_FILE_RAW_BIT = 1 +QCOW_OFLAG_COPIED = 1 << 63 +QEMU_STORAGE_DAEMON = "qemu-storage-daemon" + + +def bitmap_set(bitmap, idx): + bitmap[idx // 8] |= 1 << (idx % 8) + + +def bitmap_is_set(bitmap, idx): + return (bitmap[idx // 8] & (1 << (idx % 8))) != 0 + + +def bitmap_iterator(bitmap, length): + for idx in range(length): + if bitmap_is_set(bitmap, idx): + yield idx + + +def align_up(num, d): + return d * math.ceil(num / d) + + +# Holes in the input file contain only zeroes so we can skip them and +# save time. This function returns the indexes of the clusters that +# are known to contain data. Those are the ones that we need to read. +def clusters_with_data(fd, cluster_size): + data_to = 0 + while True: + try: + data_from = os.lseek(fd, data_to, os.SEEK_DATA) + data_to = align_up(os.lseek(fd, data_from, os.SEEK_HOLE), cluster_size) + for idx in range(data_from // cluster_size, data_to // cluster_size): + yield idx + except OSError as err: + if err.errno == errno.ENXIO: # End of file reached + break + raise err + + +# write_qcow2_content() expects a raw input file. If we have a different +# format we can use qemu-storage-daemon to make it appear as raw. +@contextmanager +def get_input_as_raw_file(input_file, input_format): + if input_format == "raw": + yield input_file + return + try: + temp_dir = tempfile.mkdtemp() + pid_file = os.path.join(temp_dir, "pid") + raw_file = os.path.join(temp_dir, "raw") + open(raw_file, "wb").close() + ret = subprocess.run( + [ + QEMU_STORAGE_DAEMON, + "--daemonize", + "--pidfile", pid_file, + "--blockdev", f"driver=file,node-name=file0,driver=file,filename={input_file},read-only=on", + "--blockdev", f"driver={input_format},node-name=disk0,file=file0,read-only=on", + "--export", f"type=fuse,id=export0,node-name=disk0,mountpoint={raw_file},writable=off", + ], + capture_output=True, + ) + if ret.returncode != 0: + sys.exit("[Error] Could not start the qemu-storage-daemon:\n" + + ret.stderr.decode().rstrip('\n')) + yield raw_file + finally: + # Kill the storage daemon on exit + # and remove all temporary files + if os.path.exists(pid_file): + with open(pid_file, "r") as f: + pid = int(f.readline()) + os.kill(pid, signal.SIGTERM) + while os.path.exists(pid_file): + time.sleep(0.1) + os.unlink(raw_file) + os.rmdir(temp_dir) + + +def write_features(cluster, offset, data_file_name): + if data_file_name is not None: + encoded_name = data_file_name.encode("utf-8") + padded_name_len = align_up(len(encoded_name), 8) + struct.pack_into(f">II{padded_name_len}s", cluster, offset, + QCOW2_DATA_FILE_NAME_STRING, + len(encoded_name), + encoded_name) + offset += 8 + padded_name_len + + qcow2_features = [ + # Incompatible + (0, 0, "dirty bit"), + (0, 1, "corrupt bit"), + (0, 2, "external data file"), + (0, 3, "compression type"), + (0, 4, "extended L2 entries"), + # Compatible + (1, 0, "lazy refcounts"), + # Autoclear + (2, 0, "bitmaps"), + (2, 1, "raw external data"), + ] + struct.pack_into(">I", cluster, offset, QCOW2_FEATURE_NAME_TABLE) + struct.pack_into(">I", cluster, offset + 4, len(qcow2_features) * 48) + offset += 8 + for feature_type, feature_bit, feature_name in qcow2_features: + struct.pack_into(">BB46s", cluster, offset, + feature_type, feature_bit, feature_name.encode("ascii")) + offset += 48 + + +def write_qcow2_content(input_file, cluster_size, refcount_bits, data_file_name, data_file_raw): + # Some basic values + l1_entries_per_table = cluster_size // 8 + l2_entries_per_table = cluster_size // 8 + refcounts_per_table = cluster_size // 8 + refcounts_per_block = cluster_size * 8 // refcount_bits + + # Virtual disk size, number of data clusters and L1 entries + disk_size = align_up(os.path.getsize(input_file), 512) + total_data_clusters = math.ceil(disk_size / cluster_size) + l1_entries = math.ceil(total_data_clusters / l2_entries_per_table) + allocated_l1_tables = math.ceil(l1_entries / l1_entries_per_table) + + # Max L1 table size is 32 MB (QCOW_MAX_L1_SIZE in block/qcow2.h) + if (l1_entries * 8) > (32 * 1024 * 1024): + sys.exit("[Error] The image size is too large. Try using a larger cluster size.") + + # Two bitmaps indicating which L1 and L2 entries are set + l1_bitmap = bytearray(allocated_l1_tables * l1_entries_per_table // 8) + l2_bitmap = bytearray(l1_entries * l2_entries_per_table // 8) + allocated_l2_tables = 0 + allocated_data_clusters = 0 + + if data_file_raw: + # If data_file_raw is set then all clusters are allocated and + # we don't need to read the input file at all. + allocated_l2_tables = l1_entries + for idx in range(l1_entries): + bitmap_set(l1_bitmap, idx) + for idx in range(total_data_clusters): + bitmap_set(l2_bitmap, idx) + else: + # Open the input file for reading + fd = os.open(input_file, os.O_RDONLY) + zero_cluster = bytes(cluster_size) + # Read all the clusters that contain data + for idx in clusters_with_data(fd, cluster_size): + cluster = os.pread(fd, cluster_size, cluster_size * idx) + # If the last cluster is smaller than cluster_size pad it with zeroes + if len(cluster) < cluster_size: + cluster += bytes(cluster_size - len(cluster)) + # If a cluster has non-zero data then it must be allocated + # in the output file and its L2 entry must be set + if cluster != zero_cluster: + bitmap_set(l2_bitmap, idx) + allocated_data_clusters += 1 + # Allocated data clusters also need their corresponding L1 entry and L2 table + l1_idx = math.floor(idx / l2_entries_per_table) + if not bitmap_is_set(l1_bitmap, l1_idx): + bitmap_set(l1_bitmap, l1_idx) + allocated_l2_tables += 1 + + # Total amount of allocated clusters excluding the refcount blocks and table + total_allocated_clusters = 1 + allocated_l1_tables + allocated_l2_tables + if data_file_name is None: + total_allocated_clusters += allocated_data_clusters + + # Clusters allocated for the refcount blocks and table + allocated_refcount_blocks = math.ceil(total_allocated_clusters / refcounts_per_block) + allocated_refcount_tables = math.ceil(allocated_refcount_blocks / refcounts_per_table) + + # Now we have a problem because allocated_refcount_blocks and allocated_refcount_tables... + # (a) increase total_allocated_clusters, and + # (b) need to be recalculated when total_allocated_clusters is increased + # So we need to repeat the calculation as long as the numbers change + while True: + new_total_allocated_clusters = total_allocated_clusters + allocated_refcount_tables + allocated_refcount_blocks + new_allocated_refcount_blocks = math.ceil(new_total_allocated_clusters / refcounts_per_block) + if new_allocated_refcount_blocks > allocated_refcount_blocks: + allocated_refcount_blocks = new_allocated_refcount_blocks + allocated_refcount_tables = math.ceil(allocated_refcount_blocks / refcounts_per_table) + else: + break + + # Now that we have the final numbers we can update total_allocated_clusters + total_allocated_clusters += allocated_refcount_tables + allocated_refcount_blocks + + # At this point we have the exact number of clusters that the output + # image is going to use so we can calculate all the offsets. + current_cluster_idx = 1 + + refcount_table_offset = current_cluster_idx * cluster_size + current_cluster_idx += allocated_refcount_tables + + refcount_block_offset = current_cluster_idx * cluster_size + current_cluster_idx += allocated_refcount_blocks + + l1_table_offset = current_cluster_idx * cluster_size + current_cluster_idx += allocated_l1_tables + + l2_table_offset = current_cluster_idx * cluster_size + current_cluster_idx += allocated_l2_tables + + data_clusters_offset = current_cluster_idx * cluster_size + + # Calculate some values used in the qcow2 header + if allocated_l1_tables == 0: + l1_table_offset = 0 + + hdr_cluster_bits = int(math.log2(cluster_size)) + hdr_refcount_bits = int(math.log2(refcount_bits)) + hdr_length = QCOW2_V3_HEADER_LENGTH + hdr_incompat_features = 0 + if data_file_name is not None: + hdr_incompat_features |= 1 << QCOW2_INCOMPAT_DATA_FILE_BIT + hdr_autoclear_features = 0 + if data_file_raw: + hdr_autoclear_features |= 1 << QCOW2_AUTOCLEAR_DATA_FILE_RAW_BIT + + ### Write qcow2 header + cluster = bytearray(cluster_size) + struct.pack_into(">4sIQIIQIIQQIIQQQQII", cluster, 0, + b"QFI\xfb", # QCOW magic string + 3, # version + 0, # backing file offset + 0, # backing file sizes + hdr_cluster_bits, + disk_size, + 0, # encryption method + l1_entries, + l1_table_offset, + refcount_table_offset, + allocated_refcount_tables, + 0, # number of snapshots + 0, # snapshot table offset + hdr_incompat_features, + 0, # compatible features + hdr_autoclear_features, + hdr_refcount_bits, + hdr_length, + ) + + write_features(cluster, hdr_length, data_file_name) + + sys.stdout.buffer.write(cluster) + + ### Write refcount table + cur_offset = refcount_block_offset + remaining_refcount_table_entries = allocated_refcount_blocks # Each entry is a pointer to a refcount block + while remaining_refcount_table_entries > 0: + cluster = bytearray(cluster_size) + to_write = min(remaining_refcount_table_entries, refcounts_per_table) + remaining_refcount_table_entries -= to_write + for idx in range(to_write): + struct.pack_into(">Q", cluster, idx * 8, cur_offset) + cur_offset += cluster_size + sys.stdout.buffer.write(cluster) + + ### Write refcount blocks + remaining_refcount_block_entries = total_allocated_clusters # One entry for each allocated cluster + for tbl in range(allocated_refcount_blocks): + cluster = bytearray(cluster_size) + to_write = min(remaining_refcount_block_entries, refcounts_per_block) + remaining_refcount_block_entries -= to_write + # All refcount entries contain the number 1. The only difference + # is their bit width, defined when the image is created. + for idx in range(to_write): + if refcount_bits == 64: + struct.pack_into(">Q", cluster, idx * 8, 1) + elif refcount_bits == 32: + struct.pack_into(">L", cluster, idx * 4, 1) + elif refcount_bits == 16: + struct.pack_into(">H", cluster, idx * 2, 1) + elif refcount_bits == 8: + cluster[idx] = 1 + elif refcount_bits == 4: + cluster[idx // 2] |= 1 << ((idx % 2) * 4) + elif refcount_bits == 2: + cluster[idx // 4] |= 1 << ((idx % 4) * 2) + elif refcount_bits == 1: + cluster[idx // 8] |= 1 << (idx % 8) + sys.stdout.buffer.write(cluster) + + ### Write L1 table + cur_offset = l2_table_offset + for tbl in range(allocated_l1_tables): + cluster = bytearray(cluster_size) + for idx in range(l1_entries_per_table): + l1_idx = tbl * l1_entries_per_table + idx + if bitmap_is_set(l1_bitmap, l1_idx): + struct.pack_into(">Q", cluster, idx * 8, cur_offset | QCOW_OFLAG_COPIED) + cur_offset += cluster_size + sys.stdout.buffer.write(cluster) + + ### Write L2 tables + cur_offset = data_clusters_offset + for tbl in range(l1_entries): + # Skip the empty L2 tables. We can identify them because + # there is no L1 entry pointing at them. + if bitmap_is_set(l1_bitmap, tbl): + cluster = bytearray(cluster_size) + for idx in range(l2_entries_per_table): + l2_idx = tbl * l2_entries_per_table + idx + if bitmap_is_set(l2_bitmap, l2_idx): + if data_file_name is None: + struct.pack_into(">Q", cluster, idx * 8, cur_offset | QCOW_OFLAG_COPIED) + cur_offset += cluster_size + else: + struct.pack_into(">Q", cluster, idx * 8, (l2_idx * cluster_size) | QCOW_OFLAG_COPIED) + sys.stdout.buffer.write(cluster) + + ### Write data clusters + if data_file_name is None: + for idx in bitmap_iterator(l2_bitmap, total_data_clusters): + cluster = os.pread(fd, cluster_size, cluster_size * idx) + # If the last cluster is smaller than cluster_size pad it with zeroes + if len(cluster) < cluster_size: + cluster += bytes(cluster_size - len(cluster)) + sys.stdout.buffer.write(cluster) + + if not data_file_raw: + os.close(fd) + + +def main(): + # Command-line arguments + parser = argparse.ArgumentParser( + description="This program converts a QEMU disk image to qcow2 " + "and writes it to the standard output" + ) + parser.add_argument("input_file", help="name of the input file") + parser.add_argument( + "-f", + dest="input_format", + metavar="input_format", + help="format of the input file (default: raw)", + default="raw", + ) + parser.add_argument( + "-c", + dest="cluster_size", + metavar="cluster_size", + help=f"qcow2 cluster size (default: {QCOW2_DEFAULT_CLUSTER_SIZE})", + default=QCOW2_DEFAULT_CLUSTER_SIZE, + type=int, + choices=[1 << x for x in range(9, 22)], + ) + parser.add_argument( + "-r", + dest="refcount_bits", + metavar="refcount_bits", + help=f"width of the reference count entries (default: {QCOW2_DEFAULT_REFCOUNT_BITS})", + default=QCOW2_DEFAULT_REFCOUNT_BITS, + type=int, + choices=[1 << x for x in range(7)], + ) + parser.add_argument( + "-d", + dest="data_file", + help="create an image with input_file as an external data file", + action="store_true", + ) + parser.add_argument( + "-R", + dest="data_file_raw", + help="enable data_file_raw on the generated image (implies -d)", + action="store_true", + ) + args = parser.parse_args() + + if args.data_file_raw: + args.data_file = True + + if not os.path.isfile(args.input_file): + sys.exit(f"[Error] {args.input_file} does not exist or is not a regular file.") + + if args.data_file and args.input_format != "raw": + sys.exit("[Error] External data files can only be used with raw input images") + + # A 512 byte header is too small for the data file name extension + if args.data_file and args.cluster_size == 512: + sys.exit("[Error] External data files require a larger cluster size") + + if sys.stdout.isatty(): + sys.exit("[Error] Refusing to write to a tty. Try redirecting stdout.") + + if args.data_file: + data_file_name = args.input_file + else: + data_file_name = None + + with get_input_as_raw_file(args.input_file, args.input_format) as raw_file: + write_qcow2_content( + raw_file, + args.cluster_size, + args.refcount_bits, + data_file_name, + args.data_file_raw, + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh index 6ef9f118d9f..5fd462b1d1c 100755 --- a/scripts/qemu-binfmt-conf.sh +++ b/scripts/qemu-binfmt-conf.sh @@ -144,35 +144,35 @@ loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff' loongarch64_family=loongarch -qemu_get_family() { - cpu=${HOST_ARCH:-$(uname -m)} +# Converts the name of a host CPU architecture to the corresponding QEMU +# target. +# +# FIXME: This can probably be simplified a lot by dropping most entries. +# Remember that the script is only used on Linux, so we only need to +# handle the strings Linux uses to report the host CPU architecture. +qemu_normalize() { + cpu="$1" case "$cpu" in - amd64|i386|i486|i586|i686|i86pc|BePC|x86_64) + i[3-6]86) echo "i386" ;; - mips*) - echo "mips" + amd64) + echo "x86_64" ;; - "Power Macintosh"|ppc64|powerpc|ppc) + powerpc) echo "ppc" ;; - ppc64el|ppc64le) - echo "ppcle" + ppc64el) + echo "ppc64le" ;; - arm|armel|armhf|arm64|armv[4-9]*l|aarch64) + armel|armhf|armv[4-9]*l) echo "arm" ;; - armeb|armv[4-9]*b|aarch64_be) + armv[4-9]*b) echo "armeb" ;; - sparc*) - echo "sparc" - ;; - riscv*) - echo "riscv" - ;; - loongarch*) - echo "loongarch" + arm64) + echo "aarch64" ;; *) echo "$cpu" @@ -205,6 +205,9 @@ Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU] --persistent: if yes, the interpreter is loaded when binfmt is configured and remains in memory. All future uses are cloned from the open file. + --ignore-family: if yes, it is assumed that the host CPU (e.g. riscv64) + can't natively run programs targeting a CPU that is + part of the same family (e.g. riscv32). --preserve-argv0 preserve argv[0] To import templates with update-binfmts, use : @@ -309,7 +312,13 @@ EOF qemu_set_binfmts() { # probe cpu type - host_family=$(qemu_get_family) + host_cpu=$(qemu_normalize ${HOST_ARCH:-$(uname -m)}) + host_family=$(eval echo \$${host_cpu}_family) + + if [ "$host_family" = "" ] ; then + echo "INTERNAL ERROR: unknown host cpu $host_cpu" 1>&2 + exit 1 + fi # register the interpreter for each cpu except for the native one @@ -318,20 +327,28 @@ qemu_set_binfmts() { mask=$(eval echo \$${cpu}_mask) family=$(eval echo \$${cpu}_family) + target="$cpu" + if [ "$cpu" = "i486" ] ; then + target="i386" + fi + + qemu="$QEMU_PATH/qemu-$target$QEMU_SUFFIX" + if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2 continue fi - qemu="$QEMU_PATH/qemu-$cpu" - if [ "$cpu" = "i486" ] ; then - qemu="$QEMU_PATH/qemu-i386" + if [ "$host_family" = "$family" ] ; then + # When --ignore-family is used, we have to generate rules even + # for targets that are in the same family as the host CPU. The + # only exception is of course when the CPU types exactly match + if [ "$target" = "$host_cpu" ] || [ "$IGNORE_FAMILY" = "no" ] ; then + continue + fi fi - qemu="$qemu$QEMU_SUFFIX" - if [ "$host_family" != "$family" ] ; then - $BINFMT_SET - fi + $BINFMT_SET done } @@ -346,10 +363,11 @@ CREDENTIAL=no PERSISTENT=no PRESERVE_ARG0=no QEMU_SUFFIX="" +IGNORE_FAMILY=no _longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\ -persistent:,preserve-argv0:" -options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@") +persistent:,preserve-argv0:,ignore-family:" +options=$(getopt -o ds:Q:S:e:hc:p:g:F:i: -l ${_longopts} -- "$@") eval set -- "$options" while true ; do @@ -409,6 +427,10 @@ while true ; do shift PRESERVE_ARG0="$1" ;; + -i|--ignore-family) + shift + IGNORE_FAMILY="$1" + ;; *) break ;; diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py index 4d2a9f6c430..cfae94a2e90 100644 --- a/scripts/qemu-gdb.py +++ b/scripts/qemu-gdb.py @@ -45,3 +45,5 @@ def __init__(self): # Default to silently passing through SIGUSR1, because QEMU sends it # to itself a lot. gdb.execute('handle SIGUSR1 pass noprint nostop') +# Always print full stack for python errors, easier to debug and report issues +gdb.execute('set python print-stack full') diff --git a/scripts/qemu-guest-agent/fsfreeze-hook b/scripts/qemu-guest-agent/fsfreeze-hook index 13aafd48451..c1feb6f5cee 100755 --- a/scripts/qemu-guest-agent/fsfreeze-hook +++ b/scripts/qemu-guest-agent/fsfreeze-hook @@ -19,15 +19,43 @@ is_ignored_file() { return 1 } +USE_SYSLOG=0 +# if log file is not writable, fallback to syslog +[ ! -w "$LOGFILE" ] && USE_SYSLOG=1 +# try to update log file and fallback to syslog if it fails +touch "$LOGFILE" &>/dev/null || USE_SYSLOG=1 + +# Ensure the log file is writable, fallback to syslog if not +log_message() { + local message="$1" + if [ "$USE_SYSLOG" -eq 0 ]; then + printf "%s: %s\n" "$(date)" "$message" >>"$LOGFILE" + else + logger -t qemu-ga-freeze-hook "$message" + fi +} + # Iterate executables in directory "fsfreeze-hook.d" with the specified args [ ! -d "$FSFREEZE_D" ] && exit 0 + for file in "$FSFREEZE_D"/* ; do is_ignored_file "$file" && continue [ -x "$file" ] || continue - printf "$(date): execute $file $@\n" >>$LOGFILE - "$file" "$@" >>$LOGFILE 2>&1 - STATUS=$? - printf "$(date): $file finished with status=$STATUS\n" >>$LOGFILE + + log_message "Executing $file $@" + if [ "$USE_SYSLOG" -eq 0 ]; then + "$file" "$@" >>"$LOGFILE" 2>&1 + STATUS=$? + else + "$file" "$@" 2>&1 | logger -t qemu-ga-freeze-hook + STATUS=${PIPESTATUS[0]} + fi + + if [ $STATUS -ne 0 ]; then + log_message "Error: $file finished with status=$STATUS" + else + log_message "$file finished successfully" + fi done exit 0 diff --git a/scripts/qemu-plugin-symbols.py b/scripts/qemu-plugin-symbols.py new file mode 100755 index 00000000000..e285ebb8f9e --- /dev/null +++ b/scripts/qemu-plugin-symbols.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Extract QEMU Plugin API symbols from a header file +# +# Copyright 2024 Linaro Ltd +# +# Author: Pierrick Bouvier +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import re + +def extract_symbols(plugin_header): + with open(plugin_header) as file: + content = file.read() + # Remove QEMU_PLUGIN_API macro definition. + content = content.replace('#define QEMU_PLUGIN_API', '') + expected = content.count('QEMU_PLUGIN_API') + # Find last word between QEMU_PLUGIN_API and (, matching on several lines. + # We use *? non-greedy quantifier. + syms = re.findall(r'QEMU_PLUGIN_API.*?(\w+)\s*\(', content, re.DOTALL) + syms.sort() + # Ensure we found as many symbols as API markers. + assert len(syms) == expected + return syms + +def main() -> None: + parser = argparse.ArgumentParser(description='Extract QEMU plugin symbols') + parser.add_argument('plugin_header', help='Path to QEMU plugin header.') + args = parser.parse_args() + + syms = extract_symbols(args.plugin_header) + + print('{') + for s in syms: + print(" {};".format(s)) + print('};') + +if __name__ == '__main__': + main() diff --git a/scripts/qemu-trace-stap b/scripts/qemu-trace-stap index eb6e951ff23..e983460ee75 100755 --- a/scripts/qemu-trace-stap +++ b/scripts/qemu-trace-stap @@ -56,6 +56,7 @@ def tapset_dir(binary): def cmd_run(args): + stap = which("stap") prefix = probe_prefix(args.binary) tapsets = tapset_dir(args.binary) @@ -76,7 +77,7 @@ def cmd_run(args): # We request an 8MB buffer, since the stap default 1MB buffer # can be easily overflowed by frequently firing QEMU traces - stapargs = ["stap", "-s", "8", "-I", tapsets ] + stapargs = [stap, "-s", "8", "-I", tapsets ] if args.pid is not None: stapargs.extend(["-x", args.pid]) stapargs.extend(["-e", script]) @@ -84,6 +85,7 @@ def cmd_run(args): def cmd_list(args): + stap = which("stap") tapsets = tapset_dir(args.binary) if args.verbose: @@ -96,7 +98,7 @@ def cmd_list(args): if verbose: print("Listing probes with name '%s'" % script) - proc = subprocess.Popen(["stap", "-I", tapsets, "-l", script], + proc = subprocess.Popen([stap, "-I", tapsets, "-l", script], stdout=subprocess.PIPE, universal_newlines=True) out, err = proc.communicate() diff --git a/scripts/qemugdb/coroutine.py b/scripts/qemugdb/coroutine.py index 7db46d4b684..e98fc48a4b2 100644 --- a/scripts/qemugdb/coroutine.py +++ b/scripts/qemugdb/coroutine.py @@ -13,28 +13,9 @@ VOID_PTR = gdb.lookup_type('void').pointer() -def get_fs_base(): - '''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is - pthread_self().''' - # %rsp - 120 is scratch space according to the SystemV ABI - old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') - gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True) - fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') - gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True) - return fs_base - def pthread_self(): - '''Fetch pthread_self() from the glibc start_thread function.''' - f = gdb.newest_frame() - while f.name() != 'start_thread': - f = f.older() - if f is None: - return get_fs_base() - - try: - return f.read_var("arg") - except ValueError: - return get_fs_base() + '''Fetch the base address of TLS.''' + return gdb.parse_and_eval("$fs_base") def get_glibc_pointer_guard(): '''Fetch glibc pointer guard value''' @@ -65,9 +46,60 @@ def get_jmpbuf_regs(jmpbuf): 'r15': jmpbuf[JB_R15], 'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) } -def bt_jmpbuf(jmpbuf): - '''Backtrace a jmpbuf''' - regs = get_jmpbuf_regs(jmpbuf) +def symbol_lookup(addr): + # Example: "__clone3 + 44 in section .text of /lib64/libc.so.6" + result = gdb.execute(f"info symbol {hex(addr)}", to_string=True).strip() + try: + if "+" in result: + (func, result) = result.split(" + ") + (offset, result) = result.split(" in ") + else: + offset = "0" + (func, result) = result.split(" in ") + func_str = f"{func}<+{offset}> ()" + except: + return f"??? ({result})" + + # Example: Line 321 of "../util/coroutine-ucontext.c" starts at address + # 0x55cf3894d993 and ends at 0x55cf3894d9ab + # . + result = gdb.execute(f"info line *{hex(addr)}", to_string=True).strip() + if not result.startswith("Line "): + return func_str + result = result[5:] + + try: + result = result.split(" starts ")[0] + (line, path) = result.split(" of ") + path = path.replace("\"", "") + except: + return func_str + + return f"{func_str} at {path}:{line}" + +def dump_backtrace(regs): + ''' + Backtrace dump with raw registers, mimic GDB command 'bt'. + ''' + # Here only rbp and rip that matter.. + rbp = regs['rbp'] + rip = regs['rip'] + i = 0 + + while rbp: + # For all return addresses on stack, we want to look up symbol/line + # on the CALL command, because the return address is the next + # instruction instead of the CALL. Here -1 would work for any + # sized CALL instruction. + print(f"#{i} {hex(rip)} in {symbol_lookup(rip if i == 0 else rip-1)}") + rip = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)} + 8)") + rbp = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)})") + i += 1 + +def dump_backtrace_live(regs): + ''' + Backtrace dump with gdb's 'bt' command, only usable in a live session. + ''' old = dict() # remember current stack frame and select the topmost @@ -88,6 +120,17 @@ def bt_jmpbuf(jmpbuf): selected_frame.select() +def bt_jmpbuf(jmpbuf): + '''Backtrace a jmpbuf''' + regs = get_jmpbuf_regs(jmpbuf) + try: + # This reuses gdb's "bt" command, which can be slightly prettier + # but only works with live sessions. + dump_backtrace_live(regs) + except: + # If above doesn't work, fallback to poor man's unwind + dump_backtrace(regs) + def co_cast(co): return co.cast(gdb.lookup_type('CoroutineUContext').pointer()) @@ -120,10 +163,15 @@ def invoke(self, arg, from_tty): gdb.execute("bt") - if gdb.parse_and_eval("qemu_in_coroutine()") == False: - return + try: + # This only works with a live session + co_ptr = gdb.parse_and_eval("qemu_coroutine_self()") + except: + # Fallback to use hard-coded ucontext vars if it's coredump + co_ptr = gdb.parse_and_eval("co_tls_current") - co_ptr = gdb.parse_and_eval("qemu_coroutine_self()") + if co_ptr == False: + return while True: co = co_cast(co_ptr) diff --git a/scripts/qom-cast-macro-clean-cocci-gen.py b/scripts/qom-cast-macro-clean-cocci-gen.py index 2fa8438a146..5aa51d0c18e 100644 --- a/scripts/qom-cast-macro-clean-cocci-gen.py +++ b/scripts/qom-cast-macro-clean-cocci-gen.py @@ -13,8 +13,11 @@ # --in-place \ # --dir . # -# SPDX-FileContributor: Philippe Mathieu-Daudé -# SPDX-FileCopyrightText: 2023 Linaro Ltd. +# Copyright (c) 2023 Linaro Ltd. +# +# Authors: +# Philippe Mathieu-Daudé +# # SPDX-License-Identifier: GPL-2.0-or-later import re diff --git a/scripts/rdma-migration-helper.sh b/scripts/rdma-migration-helper.sh new file mode 100755 index 00000000000..d784d1566a9 --- /dev/null +++ b/scripts/rdma-migration-helper.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +# Copied from blktests +get_ipv4_addr() +{ + ip -4 -o addr show dev "$1" | + sed -n 's/.*[[:blank:]]inet[[:blank:]]*\([^[:blank:]/]*\).*/\1/p' | + head -1 | tr -d '\n' +} + +get_ipv6_addr() { + ipv6=$(ip -6 -o addr show dev "$1" | + sed -n 's/.*[[:blank:]]inet6[[:blank:]]*\([^[:blank:]/]*\).*/\1/p' | + head -1 | tr -d '\n') + + [ $? -eq 0 ] || return + + if [[ "$ipv6" =~ ^fe80: ]]; then + echo -n "[$ipv6%$1]" + else + echo -n "[$ipv6]" + fi +} + +# existing rdma interfaces +rdma_interfaces() +{ + rdma link show | sed -nE 's/^link .* netdev ([^ ]+).*$/\1 /p' | + grep -Ev '^(lo|tun|tap)' +} + +# existing valid ipv4 interfaces +ipv4_interfaces() +{ + ip -o addr show | awk '/inet / {print $2}' | grep -Ev '^(lo|tun|tap)' +} + +ipv6_interfaces() +{ + ip -o addr show | awk '/inet6 / {print $2}' | grep -Ev '^(lo|tun|tap)' +} + +rdma_rxe_detect() +{ + family=$1 + for r in $(rdma_interfaces) + do + "$family"_interfaces | grep -qw $r && get_"$family"_addr $r && return + done + + return 1 +} + +rdma_rxe_setup() +{ + family=$1 + for i in $("$family"_interfaces) + do + if rdma_interfaces | grep -qw $i; then + echo "$family: Reuse the existing rdma/rxe ${i}_rxe" \ + "for $i with $(get_"$family"_addr $i)" + return + fi + + rdma link add "${i}_rxe" type rxe netdev "$i" && { + echo "$family: Setup new rdma/rxe ${i}_rxe" \ + "for $i with $(get_"$family"_addr $i)" + return + } + done + + echo "$family: Failed to setup any new rdma/rxe link" >&2 + return 1 +} + +rdma_rxe_clean() +{ + modprobe -r rdma_rxe +} + +IP_FAMILY=${IP_FAMILY:-ipv4} +if [ "$IP_FAMILY" != "ipv6" ] && [ "$IP_FAMILY" != "ipv4" ]; then + echo "Unknown ip family '$IP_FAMILY', only ipv4 or ipv6 is supported." >&2 + exit 1 +fi + +operation=${1:-detect} + +command -v rdma >/dev/null || { + echo "Command 'rdma' is not available, please install it first." >&2 + exit 1 +} + +if [ "$operation" == "setup" ] || [ "$operation" == "clean" ]; then + [ "$UID" == 0 ] || { + echo "Root privilege is required to setup/clean a rdma/rxe link" >&2 + exit 1 + } + if [ "$operation" == "setup" ]; then + rdma_rxe_setup ipv4 + rdma_rxe_setup ipv6 + else + rdma_rxe_clean + fi +elif [ "$operation" == "detect" ]; then + rdma_rxe_detect "$IP_FAMILY" +else + echo "Usage: $0 [setup | detect | clean]" +fi diff --git a/scripts/replay-dump.py b/scripts/replay-dump.py index d668193e793..4ce7ff51cc7 100755 --- a/scripts/replay-dump.py +++ b/scripts/replay-dump.py @@ -20,6 +20,8 @@ import argparse import struct +import os +import sys from collections import namedtuple from os import path @@ -99,7 +101,7 @@ def call_decode(table, index, dumpfile): print("Could not decode index: %d" % (index)) print("Entry is: %s" % (decoder)) print("Decode Table is:\n%s" % (table)) - return False + raise(Exception("unknown event")) else: return decoder.fn(decoder.eid, decoder.name, dumpfile) @@ -120,7 +122,7 @@ def print_event(eid, name, string=None, event_count=None): def decode_unimp(eid, name, _unused_dumpfile): "Unimplemented decoder, will trigger exit" print("%s not handled - will now stop" % (name)) - return False + raise(Exception("unhandled event")) def decode_plain(eid, name, _unused_dumpfile): "Plain events without additional data" @@ -134,6 +136,30 @@ def swallow_async_qword(eid, name, dumpfile): print(" %s(%d) @ %d" % (name, eid, step_id)) return True +def swallow_bytes(eid, name, dumpfile, nr): + """Swallow nr bytes of data without looking at it""" + dumpfile.seek(nr, os.SEEK_CUR) + +total_insns = 0 + +def decode_instruction(eid, name, dumpfile): + global total_insns + ins_diff = read_dword(dumpfile) + total_insns += ins_diff + print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns)) + return True + +def decode_interrupt(eid, name, dumpfile): + print_event(eid, name) + return True + +def decode_exception(eid, name, dumpfile): + print_event(eid, name) + return True + +# v12 does away with the additional event byte and encodes it in the main type +# Between v8 and v9, REPLAY_ASYNC_BH_ONESHOT was added, but we don't decode +# those versions so leave it out. async_decode_table = [ Decoder(0, "REPLAY_ASYNC_EVENT_BH", swallow_async_qword), Decoder(1, "REPLAY_ASYNC_INPUT", decode_unimp), Decoder(2, "REPLAY_ASYNC_INPUT_SYNC", decode_unimp), @@ -142,8 +168,8 @@ def swallow_async_qword(eid, name, dumpfile): Decoder(5, "REPLAY_ASYNC_EVENT_NET", decode_unimp), ] # See replay_read_events/replay_read_event -def decode_async(eid, name, dumpfile): - """Decode an ASYNC event""" +def decode_async_old(eid, name, dumpfile): + """Decode an ASYNC event (pre-v8)""" print_event(eid, name) @@ -157,13 +183,37 @@ def decode_async(eid, name, dumpfile): return call_decode(async_decode_table, async_event_kind, dumpfile) -total_insns = 0 +def decode_async_bh(eid, name, dumpfile): + op_id = read_qword(dumpfile) + print_event(eid, name) + return True -def decode_instruction(eid, name, dumpfile): - global total_insns - ins_diff = read_dword(dumpfile) - total_insns += ins_diff - print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns)) +def decode_async_bh_oneshot(eid, name, dumpfile): + op_id = read_qword(dumpfile) + print_event(eid, name) + return True + +def decode_async_char_read(eid, name, dumpfile): + char_id = read_byte(dumpfile) + size = read_dword(dumpfile) + print_event(eid, name, "device:%x chars:%s" % (char_id, dumpfile.read(size))) + return True + +def decode_async_block(eid, name, dumpfile): + op_id = read_qword(dumpfile) + print_event(eid, name) + return True + +def decode_async_net(eid, name, dumpfile): + net_id = read_byte(dumpfile) + flags = read_dword(dumpfile) + size = read_dword(dumpfile) + swallow_bytes(eid, name, dumpfile, size) + print_event(eid, name, "net:%x flags:%x bytes:%d" % (net_id, flags, size)) + return True + +def decode_shutdown(eid, name, dumpfile): + print_event(eid, name) return True def decode_char_write(eid, name, dumpfile): @@ -177,7 +227,22 @@ def decode_audio_out(eid, name, dumpfile): print_event(eid, name, "%d" % (audio_data)) return True -def decode_checkpoint(eid, name, dumpfile): +def decode_random(eid, name, dumpfile): + ret = read_dword(dumpfile) + size = read_dword(dumpfile) + swallow_bytes(eid, name, dumpfile, size) + if (ret): + print_event(eid, name, "%d bytes (getrandom failed)" % (size)) + else: + print_event(eid, name, "%d bytes" % (size)) + return True + +def decode_clock(eid, name, dumpfile): + clock_data = read_qword(dumpfile) + print_event(eid, name, "0x%x" % (clock_data)) + return True + +def __decode_checkpoint(eid, name, dumpfile, old): """Decode a checkpoint. Checkpoints contain a series of async events with their own specific data. @@ -189,38 +254,33 @@ def decode_checkpoint(eid, name, dumpfile): # if the next event is EVENT_ASYNC there are a bunch of # async events to read, otherwise we are done - if next_event != 3: - print_event(eid, name, "no additional data", event_number) - else: + if (old and next_event == 3) or (not old and next_event >= 3 and next_event <= 9): print_event(eid, name, "more data follows", event_number) + else: + print_event(eid, name, "no additional data", event_number) replay_state.reuse_event(next_event) return True +def decode_checkpoint_old(eid, name, dumpfile): + return __decode_checkpoint(eid, name, dumpfile, False) + +def decode_checkpoint(eid, name, dumpfile): + return __decode_checkpoint(eid, name, dumpfile, True) + def decode_checkpoint_init(eid, name, dumpfile): print_event(eid, name) return True -def decode_interrupt(eid, name, dumpfile): +def decode_end(eid, name, dumpfile): print_event(eid, name) - return True - -def decode_clock(eid, name, dumpfile): - clock_data = read_qword(dumpfile) - print_event(eid, name, "0x%x" % (clock_data)) - return True - -def decode_random(eid, name, dumpfile): - ret = read_dword(dumpfile) - data = read_array(dumpfile) - print_event(eid, "%d bytes of random data" % len(data)) - return True + return False # pre-MTTCG merge v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), Decoder(2, "EVENT_EXCEPTION", decode_plain), - Decoder(3, "EVENT_ASYNC", decode_async), + Decoder(3, "EVENT_ASYNC", decode_async_old), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), @@ -242,7 +302,7 @@ def decode_random(eid, name, dumpfile): v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), Decoder(2, "EVENT_EXCEPTION", decode_plain), - Decoder(3, "EVENT_ASYNC", decode_async), + Decoder(3, "EVENT_ASYNC", decode_async_old), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), @@ -266,7 +326,7 @@ def decode_random(eid, name, dumpfile): v7_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), Decoder(2, "EVENT_EXCEPTION", decode_unimp), - Decoder(3, "EVENT_ASYNC", decode_async), + Decoder(3, "EVENT_ASYNC", decode_async_old), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), Decoder(5, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp), Decoder(6, "EVENT_SHUTDOWN_HOST_QMP", decode_unimp), @@ -296,32 +356,31 @@ def decode_random(eid, name, dumpfile): v12_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), - Decoder(2, "EVENT_EXCEPTION", decode_plain), - Decoder(3, "EVENT_ASYNC", decode_async), - Decoder(4, "EVENT_ASYNC", decode_async), - Decoder(5, "EVENT_ASYNC", decode_async), - Decoder(6, "EVENT_ASYNC", decode_async), - Decoder(6, "EVENT_ASYNC", decode_async), - Decoder(8, "EVENT_ASYNC", decode_async), - Decoder(9, "EVENT_ASYNC", decode_async), - Decoder(10, "EVENT_ASYNC", decode_async), - Decoder(11, "EVENT_SHUTDOWN", decode_unimp), - Decoder(12, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp), - Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_unimp), - Decoder(14, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_unimp), - Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_unimp), - Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_unimp), - Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_unimp), - Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp), - Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp), - Decoder(19, "EVENT_SHUTDOWN_GUEST_SUBSYSTEM_RESET", decode_unimp), - Decoder(20, "EVENT_SHUTDOWN_GUEST_SNAPSHOT_LOAD", decode_unimp), - Decoder(21, "EVENT_SHUTDOWN___MAX", decode_unimp), + Decoder(2, "EVENT_EXCEPTION", decode_exception), + Decoder(3, "EVENT_ASYNC_BH", decode_async_bh), + Decoder(4, "EVENT_ASYNC_BH_ONESHOT", decode_async_bh_oneshot), + Decoder(5, "EVENT_ASYNC_INPUT", decode_unimp), + Decoder(6, "EVENT_ASYNC_INPUT_SYNC", decode_unimp), + Decoder(7, "EVENT_ASYNC_CHAR_READ", decode_async_char_read), + Decoder(8, "EVENT_ASYNC_BLOCK", decode_async_block), + Decoder(9, "EVENT_ASYNC_NET", decode_async_net), + Decoder(10, "EVENT_SHUTDOWN", decode_shutdown), + Decoder(11, "EVENT_SHUTDOWN_HOST_ERR", decode_shutdown), + Decoder(12, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_shutdown), + Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_shutdown), + Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_shutdown), + Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_shutdown), + Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_shutdown), + Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_shutdown), + Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_shutdown), + Decoder(19, "EVENT_SHUTDOWN_SUBSYS_RESET", decode_shutdown), + Decoder(20, "EVENT_SHUTDOWN_SNAPSHOT_LOAD", decode_shutdown), + Decoder(21, "EVENT_SHUTDOWN___MAX", decode_shutdown), Decoder(22, "EVENT_CHAR_WRITE", decode_char_write), Decoder(23, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(24, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), - Decoder(25, "EVENT_AUDIO_IN", decode_unimp), - Decoder(26, "EVENT_AUDIO_OUT", decode_audio_out), + Decoder(25, "EVENT_AUDIO_OUT", decode_audio_out), + Decoder(26, "EVENT_AUDIO_IN", decode_unimp), Decoder(27, "EVENT_RANDOM", decode_random), Decoder(28, "EVENT_CLOCK_HOST", decode_clock), Decoder(29, "EVENT_CLOCK_VIRTUAL_RT", decode_clock), @@ -334,6 +393,7 @@ def decode_random(eid, name, dumpfile): Decoder(36, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint), Decoder(37, "EVENT_CP_INIT", decode_checkpoint_init), Decoder(38, "EVENT_CP_RESET", decode_checkpoint), + Decoder(39, "EVENT_END", decode_end), ] def parse_arguments(): @@ -375,6 +435,7 @@ def decode_file(filename): dumpfile) except Exception as inst: print(f"error {inst}") + sys.exit(1) finally: print(f"Reached {dumpfile.tell()} of {dumpsize} bytes") diff --git a/scripts/rust/rust_root_crate.sh b/scripts/rust/rust_root_crate.sh new file mode 100755 index 00000000000..975bddf7f1a --- /dev/null +++ b/scripts/rust/rust_root_crate.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +set -eu + +cat < + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +""" + +import argparse +from dataclasses import dataclass +import logging +from pathlib import Path +from typing import Any, Iterable, List, Mapping, Optional, Set + +try: + import tomllib +except ImportError: + import tomli as tomllib + +STRICT_LINTS = {"unknown_lints", "warnings"} + + +class CargoTOML: + tomldata: Mapping[Any, Any] + workspace_data: Mapping[Any, Any] + check_cfg: Set[str] + + def __init__(self, path: Optional[str], workspace: Optional[str]): + if path is not None: + with open(path, 'rb') as f: + self.tomldata = tomllib.load(f) + else: + self.tomldata = {"lints": {"workspace": True}} + + if workspace is not None: + with open(workspace, 'rb') as f: + self.workspace_data = tomllib.load(f) + if "workspace" not in self.workspace_data: + self.workspace_data["workspace"] = {} + + self.check_cfg = set(self.find_check_cfg()) + + def find_check_cfg(self) -> Iterable[str]: + toml_lints = self.lints + rust_lints = toml_lints.get("rust", {}) + cfg_lint = rust_lints.get("unexpected_cfgs", {}) + return cfg_lint.get("check-cfg", []) + + @property + def lints(self) -> Mapping[Any, Any]: + return self.get_table("lints", True) + + def get_table(self, key: str, can_be_workspace: bool = False) -> Mapping[Any, Any]: + table = self.tomldata.get(key, {}) + if can_be_workspace and table.get("workspace", False) is True: + table = self.workspace_data["workspace"].get(key, {}) + + return table + + +@dataclass +class LintFlag: + flags: List[str] + priority: int + + +def generate_lint_flags(cargo_toml: CargoTOML, strict_lints: bool) -> Iterable[str]: + """Converts Cargo.toml lints to rustc -A/-D/-F/-W flags.""" + + toml_lints = cargo_toml.lints + + lint_list = [] + for k, v in toml_lints.items(): + prefix = "" if k == "rust" else k + "::" + for lint, data in v.items(): + level = data if isinstance(data, str) else data["level"] + priority = 0 if isinstance(data, str) else data.get("priority", 0) + if level == "deny": + flag = "-D" + elif level == "allow": + flag = "-A" + elif level == "warn": + flag = "-W" + elif level == "forbid": + flag = "-F" + else: + raise Exception(f"invalid level {level} for {prefix}{lint}") + + if not (strict_lints and lint in STRICT_LINTS): + lint_list.append(LintFlag(flags=[flag, prefix + lint], priority=priority)) + + if strict_lints: + for lint in STRICT_LINTS: + lint_list.append(LintFlag(flags=["-D", lint], priority=1000000)) + + lint_list.sort(key=lambda x: x.priority) + for lint in lint_list: + yield from lint.flags + + +def generate_cfg_flags(header: str, cargo_toml: CargoTOML) -> Iterable[str]: + """Converts defines from config[..].h headers to rustc --cfg flags.""" + + with open(header, encoding="utf-8") as cfg: + config = [l.split()[1:] for l in cfg if l.startswith("#define")] + + cfg_list = [] + for cfg in config: + name = cfg[0] + if f'cfg({name})' not in cargo_toml.check_cfg: + continue + if len(cfg) >= 2 and cfg[1] != "1": + continue + cfg_list.append("--cfg") + cfg_list.append(name) + return cfg_list + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--verbose", action="store_true") + parser.add_argument( + "--config-headers", + metavar="CONFIG_HEADER", + action="append", + dest="config_headers", + help="paths to any configuration C headers (*.h files), if any", + required=False, + default=[], + ) + parser.add_argument( + metavar="TOML_FILE", + action="store", + dest="cargo_toml", + help="path to Cargo.toml file", + nargs='?', + ) + parser.add_argument( + "--workspace", + metavar="DIR", + action="store", + dest="workspace", + help="path to root of the workspace", + required=False, + default=None, + ) + parser.add_argument( + "--features", + action="store_true", + dest="features", + help="generate --check-cfg arguments for features", + required=False, + default=None, + ) + parser.add_argument( + "--lints", + action="store_true", + dest="lints", + help="generate arguments from [lints] table", + required=False, + default=None, + ) + parser.add_argument( + "--rustc-version", + metavar="VERSION", + dest="rustc_version", + action="store", + help="version of rustc", + required=False, + default="1.0.0", + ) + parser.add_argument( + "--strict-lints", + action="store_true", + dest="strict_lints", + help="apply stricter checks (for nightly Rust)", + default=False, + ) + args = parser.parse_args() + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + logging.debug("args: %s", args) + + rustc_version = tuple((int(x) for x in args.rustc_version.split('.')[0:2])) + if args.workspace: + workspace_cargo_toml = Path(args.workspace, "Cargo.toml").resolve() + cargo_toml = CargoTOML(args.cargo_toml, str(workspace_cargo_toml)) + else: + cargo_toml = CargoTOML(args.cargo_toml, None) + + if args.lints: + for tok in generate_lint_flags(cargo_toml, args.strict_lints): + print(tok) + + if rustc_version >= (1, 80): + if args.lints: + print("--check-cfg") + print("cfg(test)") + for cfg in sorted(cargo_toml.check_cfg): + print("--check-cfg") + print(cfg) + if args.features: + for feature in cargo_toml.get_table("features"): + if feature != "default": + print("--check-cfg") + print(f'cfg(feature,values("{feature}"))') + + for header in args.config_headers: + for tok in generate_cfg_flags(header, cargo_toml): + print(tok) + + +if __name__ == "__main__": + main() diff --git a/scripts/symlink-install-tree.py b/scripts/symlink-install-tree.py index 8ed97e3c943..b72563895c5 100644 --- a/scripts/symlink-install-tree.py +++ b/scripts/symlink-install-tree.py @@ -4,6 +4,7 @@ import errno import json import os +import shlex import subprocess import sys @@ -14,7 +15,7 @@ def destdir_join(d1: str, d2: str) -> str: return str(PurePath(d1, *PurePath(d2).parts[1:])) introspect = os.environ.get('MESONINTROSPECT') -out = subprocess.run([*introspect.split(' '), '--installed'], +out = subprocess.run([*shlex.split(introspect), '--installed'], stdout=subprocess.PIPE, check=True).stdout for source, dest in json.loads(out).items(): bundle_dest = destdir_join('qemu-bundle', dest) diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py index bc03238c0fa..2ae2e562d64 100644 --- a/scripts/tracetool/__init__.py +++ b/scripts/tracetool/__init__.py @@ -12,12 +12,14 @@ __email__ = "stefanha@redhat.com" +import os import re import sys import weakref +from pathlib import PurePath -import tracetool.format import tracetool.backend +import tracetool.format def error_write(*lines): @@ -36,7 +38,7 @@ def error(*lines): def out_open(filename): global out_filename, out_fobj - out_filename = filename + out_filename = posix_relpath(filename) out_fobj = open(filename, 'wt') def out(*lines, **kwargs): @@ -217,7 +219,7 @@ class Event(object): r"(?:(?:(?P\".+),)?\s*(?P\".+))?" r"\s*") - _VALID_PROPS = set(["disable", "vcpu"]) + _VALID_PROPS = set(["disable"]) def __init__(self, name, props, fmt, args, lineno, filename, orig=None, event_trans=None, event_exec=None): @@ -308,7 +310,7 @@ def build(line_str, lineno, filename): fmt = [fmt_trans, fmt] args = Arguments.build(groups["args"]) - return Event(name, props, fmt, args, lineno, filename) + return Event(name, props, fmt, args, lineno, posix_relpath(filename)) def __repr__(self): """Evaluable string representation for this object.""" @@ -447,3 +449,10 @@ def generate(events, group, format, backends, tracetool.backend.dtrace.PROBEPREFIX = probe_prefix tracetool.format.generate(events, format, backend, group) + +def posix_relpath(path, start=None): + try: + path = os.path.relpath(path, start) + except ValueError: + pass + return PurePath(path).as_posix() diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py index baed2ae61cb..5fa30ccc08e 100644 --- a/scripts/tracetool/backend/ftrace.py +++ b/scripts/tracetool/backend/ftrace.py @@ -12,8 +12,6 @@ __email__ = "stefanha@redhat.com" -import os.path - from tracetool import out @@ -47,7 +45,7 @@ def generate_h(event, group): args=event.args, event_id="TRACE_" + event.name.upper(), event_lineno=event.lineno, - event_filename=os.path.relpath(event.filename), + event_filename=event.filename, fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/tracetool/backend/log.py b/scripts/tracetool/backend/log.py index de27b7e62e4..eb50ceea34c 100644 --- a/scripts/tracetool/backend/log.py +++ b/scripts/tracetool/backend/log.py @@ -12,8 +12,6 @@ __email__ = "stefanha@redhat.com" -import os.path - from tracetool import out @@ -22,7 +20,6 @@ def generate_h_begin(events, group): out('#include "qemu/log-for-trace.h"', - '#include "qemu/error-report.h"', '') @@ -31,31 +28,16 @@ def generate_h(event, group): if len(event.args) > 0: argnames = ", " + argnames - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) + cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) out(' if (%(cond)s && qemu_loglevel_mask(LOG_TRACE)) {', - ' if (message_with_timestamp) {', - ' struct timeval _now;', - ' gettimeofday(&_now, NULL);', - '#line %(event_lineno)d "%(event_filename)s"', - ' qemu_log("%%d@%%zu.%%06zu:%(name)s " %(fmt)s "\\n",', - ' qemu_get_thread_id(),', - ' (size_t)_now.tv_sec, (size_t)_now.tv_usec', - ' %(argnames)s);', - '#line %(out_next_lineno)d "%(out_filename)s"', - ' } else {', '#line %(event_lineno)d "%(event_filename)s"', - ' qemu_log("%(name)s " %(fmt)s "\\n"%(argnames)s);', + ' qemu_log("%(name)s " %(fmt)s "\\n"%(argnames)s);', '#line %(out_next_lineno)d "%(out_filename)s"', - ' }', ' }', cond=cond, event_lineno=event.lineno, - event_filename=os.path.relpath(event.filename), + event_filename=event.filename, name=event.name, fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py index a74d61fcd64..7c84c06b200 100644 --- a/scripts/tracetool/backend/simple.py +++ b/scripts/tracetool/backend/simple.py @@ -36,8 +36,13 @@ def generate_h_begin(events, group): def generate_h(event, group): - out(' _simple_%(api)s(%(args)s);', + event_id = 'TRACE_' + event.name.upper() + cond = "trace_event_get_state(%s)" % event_id + out(' if (%(cond)s) {', + ' _simple_%(api)s(%(args)s);', + ' }', api=event.api(), + cond=cond, args=", ".join(event.args.names())) @@ -72,22 +77,10 @@ def generate_c(event, group): if len(event.args) == 0: sizestr = '0' - event_id = 'TRACE_' + event.name.upper() - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % event_id - out('', - ' if (!%(cond)s) {', - ' return;', - ' }', - '', ' if (trace_record_start(&rec, %(event_obj)s.id, %(size_str)s)) {', ' return; /* Trace Buffer Full, Event Dropped ! */', ' }', - cond=cond, event_obj=event.api(event.QEMU_EVENT), size_str=sizestr) diff --git a/scripts/tracetool/backend/syslog.py b/scripts/tracetool/backend/syslog.py index 012970f6cc0..3f82e54aabf 100644 --- a/scripts/tracetool/backend/syslog.py +++ b/scripts/tracetool/backend/syslog.py @@ -12,8 +12,6 @@ __email__ = "stefanha@redhat.com" -import os.path - from tracetool import out @@ -30,11 +28,7 @@ def generate_h(event, group): if len(event.args) > 0: argnames = ", " + argnames - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) + cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) out(' if (%(cond)s) {', '#line %(event_lineno)d "%(event_filename)s"', @@ -43,7 +37,7 @@ def generate_h(event, group): ' }', cond=cond, event_lineno=event.lineno, - event_filename=os.path.relpath(event.filename), + event_filename=event.filename, name=event.name, fmt=event.fmt.rstrip("\n"), argnames=argnames) diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index c34ac6454ef..717c379f9e0 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -156,13 +156,9 @@ EOF cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-s390/" cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-s390/" fi - if [ $arch = arm ]; then - cp "$hdrdir/include/asm/unistd-eabi.h" "$output/linux-headers/asm-arm/" - cp "$hdrdir/include/asm/unistd-oabi.h" "$output/linux-headers/asm-arm/" - cp "$hdrdir/include/asm/unistd-common.h" "$output/linux-headers/asm-arm/" - fi if [ $arch = arm64 ]; then cp "$hdrdir/include/asm/sve_context.h" "$output/linux-headers/asm-arm64/" + cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-arm64/" fi if [ $arch = x86 ]; then cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/" @@ -176,7 +172,7 @@ EOF # Remove everything except the macros from bootparam.h avoiding the # unnecessary import of several video/ist/etc headers - sed -e '/__ASSEMBLY__/,/__ASSEMBLY__/d' \ + sed -e '/__ASSEMBLER__/,/__ASSEMBLER__/d' \ "$hdrdir/include/asm/bootparam.h" > "$hdrdir/bootparam.h" cp_portable "$hdrdir/bootparam.h" \ "$output/include/standard-headers/asm-$arch" @@ -185,6 +181,12 @@ EOF fi if [ $arch = riscv ]; then cp "$hdrdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/" + cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-riscv/" + cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-riscv/" + fi + if [ $arch = loongarch ]; then + cp "$hdrdir/include/asm/kvm_para.h" "$output/linux-headers/asm-loongarch/" + cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-loongarch/" fi done arch= @@ -251,6 +253,7 @@ for i in "$hdrdir"/include/linux/*virtio*.h \ "$hdrdir/include/linux/kernel.h" \ "$hdrdir/include/linux/kvm_para.h" \ "$hdrdir/include/linux/vhost_types.h" \ + "$hdrdir/include/linux/vmclock-abi.h" \ "$hdrdir/include/linux/sysinfo.h"; do cp_portable "$i" "$output/include/standard-headers/linux" done diff --git a/scripts/update-syscalltbl.sh b/scripts/update-syscalltbl.sh index 2d23e568007..f0927c544db 100755 --- a/scripts/update-syscalltbl.sh +++ b/scripts/update-syscalltbl.sh @@ -1,13 +1,18 @@ TBL_LIST="\ arch/alpha/kernel/syscalls/syscall.tbl,linux-user/alpha/syscall.tbl \ arch/arm/tools/syscall.tbl,linux-user/arm/syscall.tbl \ +scripts/syscall.tbl,linux-user/aarch64/syscall_64.tbl \ +scripts/syscall.tbl,linux-user/hexagon/syscall.tbl \ +scripts/syscall.tbl,linux-user/loongarch64/syscall.tbl \ arch/m68k/kernel/syscalls/syscall.tbl,linux-user/m68k/syscall.tbl \ arch/microblaze/kernel/syscalls/syscall.tbl,linux-user/microblaze/syscall.tbl \ arch/mips/kernel/syscalls/syscall_n32.tbl,linux-user/mips64/syscall_n32.tbl \ arch/mips/kernel/syscalls/syscall_n64.tbl,linux-user/mips64/syscall_n64.tbl \ arch/mips/kernel/syscalls/syscall_o32.tbl,linux-user/mips/syscall_o32.tbl \ +scripts/syscall.tbl,linux-user/openrisc/syscall.tbl \ arch/parisc/kernel/syscalls/syscall.tbl,linux-user/hppa/syscall.tbl \ arch/powerpc/kernel/syscalls/syscall.tbl,linux-user/ppc/syscall.tbl \ +scripts/syscall.tbl,linux-user/riscv/syscall.tbl \ arch/s390/kernel/syscalls/syscall.tbl,linux-user/s390x/syscall.tbl \ arch/sh/kernel/syscalls/syscall.tbl,linux-user/sh4/syscall.tbl \ arch/sparc/kernel/syscalls/syscall.tbl,linux-user/sparc64/syscall.tbl \ diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index 9c0e6b81f21..2335e25f94c 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -42,6 +42,7 @@ def check_fields_match(name, s_field, d_field): # Some fields changed names between qemu versions. This list # is used to allow such changes in each section / description. changed_names = { + 'acpi-ghes': ['ghes_addr_le', 'hw_error_le'], 'apic': ['timer', 'timer_expiry'], 'e1000': ['dev', 'parent_obj'], 'ehci': ['dev', 'pcidev'], @@ -90,6 +91,7 @@ def check_fields_match(name, s_field, d_field): 'mem_win_size', 'mig_mem_win_size', 'io_win_addr', 'mig_io_win_addr', 'io_win_size', 'mig_io_win_size'], + 'hpet': ['num_timers', 'num_timers_save'], } if not name in changed_names: diff --git a/scsi/pr-manager-helper.c b/scsi/pr-manager-helper.c index 3be52a98d58..6b86f01b01f 100644 --- a/scsi/pr-manager-helper.c +++ b/scsi/pr-manager-helper.c @@ -300,7 +300,7 @@ static void pr_manager_helper_instance_init(Object *obj) } static void pr_manager_helper_class_init(ObjectClass *klass, - void *class_data G_GNUC_UNUSED) + const void *class_data G_GNUC_UNUSED) { PRManagerClass *prmgr_klass = PR_MANAGER_CLASS(klass); UserCreatableClass *uc_klass = USER_CREATABLE_CLASS(klass); diff --git a/scsi/pr-manager.c b/scsi/pr-manager.c index fb5fc297309..40e1210eb21 100644 --- a/scsi/pr-manager.c +++ b/scsi/pr-manager.c @@ -21,7 +21,7 @@ #include "qemu/module.h" #include "qapi/qapi-commands-block.h" -#define PR_MANAGER_PATH "/objects" +#define PR_MANAGER_PATH "objects" typedef struct PRManagerData { PRManager *pr_mgr; @@ -77,7 +77,7 @@ static const TypeInfo pr_manager_info = { .name = TYPE_PR_MANAGER, .class_size = sizeof(PRManagerClass), .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } @@ -135,7 +135,7 @@ PRManagerInfoList *qmp_query_pr_managers(Error **errp) { PRManagerInfoList *head = NULL; PRManagerInfoList **prev = &head; - Object *container = container_get(object_get_root(), PR_MANAGER_PATH); + Object *container = object_get_container(PR_MANAGER_PATH); object_child_foreach(container, query_one_pr_manager, &prev); return head; diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index c6c6347e9b6..b69dd982d6a 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -47,7 +47,7 @@ #include "qemu/log.h" #include "qemu/systemd.h" #include "qapi/util.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "io/channel-socket.h" #include "trace/control.h" #include "qemu-version.h" diff --git a/scsi/utils.c b/scsi/utils.c index 357b0366716..545956f4f95 100644 --- a/scsi/utils.c +++ b/scsi/utils.c @@ -587,20 +587,27 @@ int scsi_sense_from_errno(int errno_value, SCSISense *sense) return GOOD; case EDOM: return TASK_SET_FULL; +#if ENODEV != ENOMEDIUM + case ENODEV: + /* + * Some of the BSDs have ENODEV and ENOMEDIUM as synonyms. For + * everyone else, give a more severe sense code for ENODEV. + */ +#endif #ifdef CONFIG_LINUX /* These errno mapping are specific to Linux. For more information: * - scsi_check_sense and scsi_decide_disposition in drivers/scsi/scsi_error.c * - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c * - blk_errors[] in block/blk-core.c */ + case EREMOTEIO: + *sense = SENSE_CODE(TARGET_FAILURE); + return CHECK_CONDITION; case EBADE: return RESERVATION_CONFLICT; case ENODATA: *sense = SENSE_CODE(READ_ERROR); return CHECK_CONDITION; - case EREMOTEIO: - *sense = SENSE_CODE(TARGET_FAILURE); - return CHECK_CONDITION; #endif case ENOMEDIUM: *sense = SENSE_CODE(NO_MEDIUM); diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index d78c6428b90..86e5260e504 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -166,6 +166,7 @@ static LayoutInfo common_semi_find_bases(CPUState *cs) #endif +#include "cpu.h" #include "common-semi-target.h" /* diff --git a/semihosting/console.c b/semihosting/console.c index 60102bbab66..c3683a15668 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -18,14 +18,15 @@ #include "qemu/osdep.h" #include "semihosting/semihost.h" #include "semihosting/console.h" +#include "exec/cpu-common.h" #include "exec/gdbstub.h" -#include "exec/exec-all.h" #include "qemu/log.h" #include "chardev/char.h" #include "chardev/char-fe.h" #include "qemu/main-loop.h" #include "qapi/error.h" #include "qemu/fifo8.h" +#include "hw/core/cpu.h" /* Access to this structure is protected by the BQL */ typedef struct SemihostingConsole { diff --git a/semihosting/meson.build b/semihosting/meson.build index 34933e5a195..b1ab2506c6e 100644 --- a/semihosting/meson.build +++ b/semihosting/meson.build @@ -3,14 +3,15 @@ specific_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( 'syscalls.c', )) -specific_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_SYSTEM_ONLY'], if_true: files( +common_ss.add(when: 'CONFIG_SEMIHOSTING', if_false: files('stubs-all.c')) +user_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files('user.c')) +system_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( 'config.c', 'console.c', 'uaccess.c', +), if_false: files( + 'stubs-system.c', )) -common_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_SYSTEM_ONLY'], if_false: files('stubs-all.c')) -system_ss.add(when: ['CONFIG_SEMIHOSTING'], if_false: files('stubs-system.c')) - specific_ss.add(when: ['CONFIG_ARM_COMPATIBLE_SEMIHOSTING'], if_true: files('arm-compat-semi.c')) diff --git a/semihosting/stubs-all.c b/semihosting/stubs-all.c index a2a1fc9c6fe..c001c845749 100644 --- a/semihosting/stubs-all.c +++ b/semihosting/stubs-all.c @@ -11,6 +11,12 @@ #include "qemu/osdep.h" #include "semihosting/semihost.h" +/* Queries to config status default to off */ +bool semihosting_enabled(bool is_user) +{ + return false; +} + SemihostingTarget semihosting_get_target(void) { return SEMIHOSTING_TARGET_AUTO; diff --git a/semihosting/stubs-system.c b/semihosting/stubs-system.c index f26cbb7c257..989789f373a 100644 --- a/semihosting/stubs-system.c +++ b/semihosting/stubs-system.c @@ -22,12 +22,6 @@ QemuOptsList qemu_semihosting_config_opts = { }, }; -/* Queries to config status default to off */ -bool semihosting_enabled(bool is_user) -{ - return false; -} - /* * All the rest are empty subs. We could g_assert_not_reached() but * that adds extra weight to the final binary. Waste not want not. diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c index c40348f9968..f6451d9bb0e 100644 --- a/semihosting/syscalls.c +++ b/semihosting/syscalls.c @@ -7,6 +7,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "gdbstub/syscalls.h" #include "semihosting/guestfd.h" @@ -287,6 +288,7 @@ static void host_open(CPUState *cs, gdb_syscall_complete_cb complete, ret = open(p, host_flags, mode); if (ret < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to open %s\n", __func__, p); complete(cs, -1, errno); } else { int guestfd = alloc_guestfd(); diff --git a/semihosting/uaccess.c b/semihosting/uaccess.c index dc587d73bc4..ff944d8c2f7 100644 --- a/semihosting/uaccess.c +++ b/semihosting/uaccess.c @@ -8,11 +8,14 @@ */ #include "qemu/osdep.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "accel/tcg/probe.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" #include "semihosting/uaccess.h" -void *uaccess_lock_user(CPUArchState *env, target_ulong addr, - target_ulong len, bool copy) +void *uaccess_lock_user(CPUArchState *env, vaddr addr, + size_t len, bool copy) { void *p = malloc(len); if (p && copy) { @@ -24,7 +27,7 @@ void *uaccess_lock_user(CPUArchState *env, target_ulong addr, return p; } -ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr) +ssize_t uaccess_strlen_user(CPUArchState *env, vaddr addr) { int mmu_idx = cpu_mmu_index(env_cpu(env), false); size_t len = 0; @@ -72,7 +75,7 @@ ssize_t uaccess_strlen_user(CPUArchState *env, target_ulong addr) } } -char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr) +char *uaccess_lock_user_string(CPUArchState *env, vaddr addr) { ssize_t len = uaccess_strlen_user(env, addr); if (len < 0) { @@ -82,7 +85,7 @@ char *uaccess_lock_user_string(CPUArchState *env, target_ulong addr) } void uaccess_unlock_user(CPUArchState *env, void *p, - target_ulong addr, target_ulong len) + vaddr addr, size_t len) { if (len) { cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); diff --git a/semihosting/user.c b/semihosting/user.c new file mode 100644 index 00000000000..98c144cb456 --- /dev/null +++ b/semihosting/user.c @@ -0,0 +1,21 @@ +/* + * Semihosting for user emulation + * + * Copyright (c) 2019 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "semihosting/semihost.h" + +bool semihosting_enabled(bool is_user) +{ + assert(is_user); + return true; +} + +SemihostingTarget semihosting_get_target(void) +{ + return SEMIHOSTING_TARGET_AUTO; +} diff --git a/stats/stats-hmp-cmds.c b/stats/stats-hmp-cmds.c index 1f91bf8bd54..b93b471b1b2 100644 --- a/stats/stats-hmp-cmds.c +++ b/stats/stats-hmp-cmds.c @@ -11,7 +11,7 @@ #include "monitor/monitor.h" #include "qemu/cutils.h" #include "hw/core/cpu.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" static void print_stats_schema_value(Monitor *mon, StatsSchemaValue *value) diff --git a/stats/stats-qmp-cmds.c b/stats/stats-qmp-cmds.c index e214b964fda..884674ee323 100644 --- a/stats/stats-qmp-cmds.c +++ b/stats/stats-qmp-cmds.c @@ -6,7 +6,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/stats.h" +#include "system/stats.h" #include "qapi/qapi-commands-stats.h" #include "qemu/queue.h" #include "qapi/error.h" diff --git a/storage-daemon/qapi/qapi-schema.json b/storage-daemon/qapi/qapi-schema.json index f10c9494906..478e7a92b21 100644 --- a/storage-daemon/qapi/qapi-schema.json +++ b/storage-daemon/qapi/qapi-schema.json @@ -13,6 +13,32 @@ # the array type in the main schema, even if it is unused outside of the # storage daemon. +## +# ************ +# Introduction +# ************ +# +# This manual describes the commands and events supported by the QEMU +# storage daemon QMP. +# +# For locating a particular item, please see the `qapi-qsd-index`. +# +# The following notation is used in examples: +# +# .. qmp-example:: +# +# -> ... text sent by client (commands) ... +# <- ... text sent by server (command responses and events) ... +# +# Example text is formatted for readability. However, in real +# protocol usage, its commonly emitted as a single line. +# +# Please refer to the +# :doc:`QEMU Machine Protocol Specification ` +# for the general format of commands, responses, and events. +## + + { 'include': '../../qapi/pragma.json' } # Documentation generated with qapi-gen.py is in source order, with @@ -27,7 +53,9 @@ { 'include': '../../qapi/job.json' } ## -# = Block devices +# ************* +# Block devices +# ************* ## { 'include': '../../qapi/block-core.json' } { 'include': '../../qapi/block-export.json' } diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c index 0e9354faa65..eb725613582 100644 --- a/storage-daemon/qemu-storage-daemon.c +++ b/storage-daemon/qemu-storage-daemon.c @@ -38,8 +38,8 @@ #include "qapi/qapi-visit-block-core.h" #include "qapi/qapi-visit-block-export.h" #include "qapi/qapi-visit-control.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qemu/help-texts.h" @@ -58,7 +58,7 @@ #include "storage-daemon/qapi/qapi-commands.h" #include "storage-daemon/qapi/qapi-init-commands.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "trace/control.h" static const char *pid_file; diff --git a/stubs/blk-commit-all.c b/stubs/blk-commit-all.c index e156c57f8d7..76b08275a5c 100644 --- a/stubs/blk-commit-all.c +++ b/stubs/blk-commit-all.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" int blk_commit_all(void) { diff --git a/stubs/change-state-handler.c b/stubs/change-state-handler.c index d1ed46bfb0f..002d248abf2 100644 --- a/stubs/change-state-handler.c +++ b/stubs/change-state-handler.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, void *opaque) diff --git a/stubs/cpu-get-clock.c b/stubs/cpu-get-clock.c index 9e924048169..53b9c83d768 100644 --- a/stubs/cpu-get-clock.c +++ b/stubs/cpu-get-clock.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/cpu-timers.h" +#include "system/cpu-timers.h" #include "qemu/main-loop.h" int64_t cpu_get_clock(void) diff --git a/stubs/cpu-synchronize-state.c b/stubs/cpu-synchronize-state.c index d9211da66ce..2ed09ff3ed6 100644 --- a/stubs/cpu-synchronize-state.c +++ b/stubs/cpu-synchronize-state.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" void cpu_synchronize_state(CPUState *cpu) { diff --git a/stubs/cpus-virtual-clock.c b/stubs/cpus-virtual-clock.c index af7c1a1d403..0b83a925334 100644 --- a/stubs/cpus-virtual-clock.c +++ b/stubs/cpus-virtual-clock.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/cpu-timers.h" +#include "system/cpu-timers.h" #include "qemu/main-loop.h" int64_t cpus_get_virtual_clock(void) diff --git a/stubs/dump.c b/stubs/dump.c index 1f28ec2be3f..df7897b72b1 100644 --- a/stubs/dump.c +++ b/stubs/dump.c @@ -12,7 +12,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/dump-arch.h" +#include "system/dump-arch.h" int cpu_get_dump_info(ArchDumpInfo *info, const struct GuestPhysBlockList *guest_phys_blocks) diff --git a/stubs/get-vm-name.c b/stubs/get-vm-name.c index 0906303f730..4cfac484e53 100644 --- a/stubs/get-vm-name.c +++ b/stubs/get-vm-name.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/sysemu.h" +#include "system/system.h" const char *qemu_get_vm_name(void) { diff --git a/stubs/icount.c b/stubs/icount.c index 9f9a59f55b9..ceb73b4fc2c 100644 --- a/stubs/icount.c +++ b/stubs/icount.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" /* icount - Instruction Counter API */ diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c index d7890e5581c..6050c081f53 100644 --- a/stubs/iothread-lock.c +++ b/stubs/iothread-lock.c @@ -1,9 +1,17 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" +static bool bql_is_locked = false; +static uint32_t bql_unlock_blocked; + bool bql_locked(void) { - return false; + return bql_is_locked; +} + +void rust_bql_mock_lock(void) +{ + bql_is_locked = true; } void bql_lock_impl(const char *file, int line) @@ -12,4 +20,17 @@ void bql_lock_impl(const char *file, int line) void bql_unlock(void) { + assert(!bql_unlock_blocked); +} + +void bql_block_unlock(bool increase) +{ + uint32_t new_value; + + assert(bql_locked()); + + /* check for overflow! */ + new_value = bql_unlock_blocked + increase - !increase; + assert((new_value > bql_unlock_blocked) == increase); + bql_unlock_blocked = new_value; } diff --git a/stubs/meson.build b/stubs/meson.build index 772a3e817df..cef046e6854 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -55,7 +55,14 @@ endif if have_user # Symbols that are used by hw/core. stub_ss.add(files('cpu-synchronize-state.c')) - stub_ss.add(files('qdev.c')) + + # Stubs for QAPI events. Those can always be included in the build, but + # they are not built at all for --disable-system builds. + if not have_system + stub_ss.add(files('qdev.c')) + endif + + stub_ss.add(files('monitor-internal.c')) endif if have_system @@ -70,6 +77,14 @@ if have_system stub_ss.add(files('target-monitor-defs.c')) stub_ss.add(files('win32-kbd-hook.c')) stub_ss.add(files('xen-hw-stub.c')) + stub_ss.add(files('monitor-arm-gic.c')) + stub_ss.add(files('monitor-i386-rtc.c')) + stub_ss.add(files('monitor-i386-sev.c')) + stub_ss.add(files('monitor-i386-sgx.c')) + stub_ss.add(files('monitor-i386-xen.c')) + stub_ss.add(files('monitor-cpu.c')) + stub_ss.add(files('monitor-cpu-s390x.c')) + stub_ss.add(files('monitor-cpu-s390x-kvm.c')) endif if have_system or have_user diff --git a/stubs/monitor-arm-gic.c b/stubs/monitor-arm-gic.c new file mode 100644 index 00000000000..b3429243ef8 --- /dev/null +++ b/stubs/monitor-arm-gic.c @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-arm.h" + + +GICCapabilityList *qmp_query_gic_capabilities(Error **errp) +{ + error_setg(errp, "GIC hardware is not available on this target"); + return NULL; +} diff --git a/stubs/monitor-cpu-s390x-kvm.c b/stubs/monitor-cpu-s390x-kvm.c new file mode 100644 index 00000000000..8683dd2d4c6 --- /dev/null +++ b/stubs/monitor-cpu-s390x-kvm.c @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine-s390x.h" + +void qmp_set_cpu_topology(uint16_t core, + bool has_socket, uint16_t socket, + bool has_book, uint16_t book, + bool has_drawer, uint16_t drawer, + bool has_entitlement, S390CpuEntitlement entitlement, + bool has_dedicated, bool dedicated, + Error **errp) +{ + error_setg(errp, "CPU topology change is not supported on this target"); +} + +CpuPolarizationInfo *qmp_query_s390x_cpu_polarization(Error **errp) +{ + error_setg(errp, "CPU polarization is not supported on this target"); + return NULL; +} diff --git a/stubs/monitor-cpu-s390x.c b/stubs/monitor-cpu-s390x.c new file mode 100644 index 00000000000..71e794482b5 --- /dev/null +++ b/stubs/monitor-cpu-s390x.c @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" + +CpuModelCompareInfo * +qmp_query_cpu_model_comparison(CpuModelInfo *infoa, + CpuModelInfo *infob, + Error **errp) +{ + error_setg(errp, "CPU model comparison is not supported on this target"); + return NULL; +} + +CpuModelBaselineInfo * +qmp_query_cpu_model_baseline(CpuModelInfo *infoa, + CpuModelInfo *infob, + Error **errp) +{ + error_setg(errp, "CPU model baseline is not supported on this target"); + return NULL; +} diff --git a/stubs/monitor-cpu.c b/stubs/monitor-cpu.c new file mode 100644 index 00000000000..a8c7ee89b9d --- /dev/null +++ b/stubs/monitor-cpu.c @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" + +CpuModelExpansionInfo * +qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + error_setg(errp, "CPU model expansion is not supported on this target"); + return NULL; +} + +CpuDefinitionInfoList * +qmp_query_cpu_definitions(Error **errp) +{ + error_setg(errp, "CPU model definitions are not supported on this target"); + return NULL; +} diff --git a/stubs/monitor-i386-rtc.c b/stubs/monitor-i386-rtc.c new file mode 100644 index 00000000000..8420d7c93c2 --- /dev/null +++ b/stubs/monitor-i386-rtc.c @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-i386.h" + +void qmp_rtc_reset_reinjection(Error **errp) +{ + error_setg(errp, + "RTC interrupt reinjection backlog reset is not available for" + "this machine"); +} diff --git a/stubs/monitor-i386-sev.c b/stubs/monitor-i386-sev.c new file mode 100644 index 00000000000..d4f024128ca --- /dev/null +++ b/stubs/monitor-i386-sev.c @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-i386.h" + +SevInfo *qmp_query_sev(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevCapability *qmp_query_sev_capabilities(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret, + bool has_gpa, uint64_t gpa, Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); +} + +SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, + Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} diff --git a/stubs/monitor-i386-sgx.c b/stubs/monitor-i386-sgx.c new file mode 100644 index 00000000000..00e081d52dd --- /dev/null +++ b/stubs/monitor-i386-sgx.c @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-i386.h" + +SgxInfo *qmp_query_sgx(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +SgxInfo *qmp_query_sgx_capabilities(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} diff --git a/stubs/monitor-i386-xen.c b/stubs/monitor-i386-xen.c new file mode 100644 index 00000000000..95b826f9795 --- /dev/null +++ b/stubs/monitor-i386-xen.c @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-i386.h" + +EvtchnInfoList *qmp_xen_event_list(Error **errp) +{ + error_setg(errp, "Xen event channel emulation not enabled"); + return NULL; +} + +void qmp_xen_event_inject(uint32_t port, Error **errp) +{ + error_setg(errp, "Xen event channel emulation not enabled"); +} diff --git a/stubs/qemu-timer-notify-cb.c b/stubs/qemu-timer-notify-cb.c index 845e46f8e03..b57b983c6f4 100644 --- a/stubs/qemu-timer-notify-cb.c +++ b/stubs/qemu-timer-notify-cb.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/cpu-timers.h" +#include "system/cpu-timers.h" #include "qemu/main-loop.h" void qemu_timer_notify_cb(void *opaque, QEMUClockType type) diff --git a/stubs/qmp-command-available.c b/stubs/qmp-command-available.c index 46540af7bfc..8851faced13 100644 --- a/stubs/qmp-command-available.c +++ b/stubs/qmp-command-available.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "qapi/qmp/dispatch.h" +#include "qapi/qmp-registry.h" bool qmp_command_available(const QmpCommand *cmd, Error **errp) { diff --git a/stubs/qmp-quit.c b/stubs/qmp-quit.c index a3ff47f7bdb..8fb523e9050 100644 --- a/stubs/qmp-quit.c +++ b/stubs/qmp-quit.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" #include "qapi/qapi-commands-control.h" -#include "qapi/qmp/dispatch.h" +#include "qapi/qmp-registry.h" void qmp_quit(Error **errp) { diff --git a/stubs/qtest.c b/stubs/qtest.c index 39e376eb67d..6c397259274 100644 --- a/stubs/qtest.c +++ b/stubs/qtest.c @@ -9,7 +9,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" /* Needed for qtest_allowed() */ bool qtest_allowed; diff --git a/stubs/ram-block.c b/stubs/ram-block.c index 108197683bb..e88fab31a5d 100644 --- a/stubs/ram-block.c +++ b/stubs/ram-block.c @@ -1,7 +1,7 @@ #include "qemu/osdep.h" #include "exec/ramlist.h" #include "exec/cpu-common.h" -#include "exec/memory.h" +#include "system/memory.h" void *qemu_ram_get_host_addr(RAMBlock *rb) { diff --git a/stubs/replay-mode.c b/stubs/replay-mode.c index 264be9d96c9..439d97e4a87 100644 --- a/stubs/replay-mode.c +++ b/stubs/replay-mode.c @@ -1,4 +1,4 @@ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "system/replay.h" ReplayMode replay_mode; diff --git a/stubs/replay-tools.c b/stubs/replay-tools.c index 3e8ca3212d9..c537485f401 100644 --- a/stubs/replay-tools.c +++ b/stubs/replay-tools.c @@ -1,5 +1,5 @@ #include "qemu/osdep.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "block/aio.h" bool replay_events_enabled(void) diff --git a/stubs/runstate-check.c b/stubs/runstate-check.c index 2ccda2b70fa..c47abdf84b7 100644 --- a/stubs/runstate-check.c +++ b/stubs/runstate-check.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" bool runstate_check(RunState state) { return state == RUN_STATE_PRELAUNCH; diff --git a/stubs/vm-stop.c b/stubs/vm-stop.c index 7f8a9da8a59..e139aabf7be 100644 --- a/stubs/vm-stop.c +++ b/stubs/vm-stop.c @@ -1,6 +1,6 @@ #include "qemu/osdep.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" void qemu_system_vmstop_request_prepare(void) { abort(); diff --git a/stubs/vmstate.c b/stubs/vmstate.c index 8513d9204e4..c190762d7c1 100644 --- a/stubs/vmstate.c +++ b/stubs/vmstate.c @@ -1,5 +1,7 @@ #include "qemu/osdep.h" #include "migration/vmstate.h" +#include "qapi/qapi-types-migration.h" +#include "migration/client-options.h" int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, @@ -21,3 +23,8 @@ bool vmstate_check_only_migratable(const VMStateDescription *vmsd) { return true; } + +MigMode migrate_mode(void) +{ + return MIG_MODE_NORMAL; +} diff --git a/subprojects/.gitignore b/subprojects/.gitignore index adca0266be6..f4281934ce1 100644 --- a/subprojects/.gitignore +++ b/subprojects/.gitignore @@ -6,3 +6,17 @@ /keycodemapdb /libvfio-user /slirp +/anyhow-1.0.98 +/arbitrary-int-1.2.7 +/bilge-0.2.0 +/bilge-impl-0.2.0 +/either-1.12.0 +/foreign-0.3.1 +/itertools-0.11.0 +/libc-0.2.162 +/proc-macro-error-1.0.4 +/proc-macro-error-attr-1.0.4 +/proc-macro2-1.0.84 +/quote-1.0.36 +/syn-2.0.66 +/unicode-ident-1.0.12 diff --git a/subprojects/anyhow-1-rs.wrap b/subprojects/anyhow-1-rs.wrap new file mode 100644 index 00000000000..a69a3645b49 --- /dev/null +++ b/subprojects/anyhow-1-rs.wrap @@ -0,0 +1,7 @@ +[wrap-file] +directory = anyhow-1.0.98 +source_url = https://crates.io/api/v1/crates/anyhow/1.0.98/download +source_filename = anyhow-1.0.98.tar.gz +source_hash = e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487 +#method = cargo +patch_directory = anyhow-1-rs diff --git a/subprojects/arbitrary-int-1-rs.wrap b/subprojects/arbitrary-int-1-rs.wrap new file mode 100644 index 00000000000..a1838b20b0f --- /dev/null +++ b/subprojects/arbitrary-int-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = arbitrary-int-1.2.7 +source_url = https://crates.io/api/v1/crates/arbitrary-int/1.2.7/download +source_filename = arbitrary-int-1.2.7.tar.gz +source_hash = c84fc003e338a6f69fbd4f7fe9f92b535ff13e9af8997f3b14b6ddff8b1df46d +#method = cargo +patch_directory = arbitrary-int-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/bilge-0.2-rs.wrap b/subprojects/bilge-0.2-rs.wrap new file mode 100644 index 00000000000..900bb1497b9 --- /dev/null +++ b/subprojects/bilge-0.2-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = bilge-0.2.0 +source_url = https://crates.io/api/v1/crates/bilge/0.2.0/download +source_filename = bilge-0.2.0.tar.gz +source_hash = dc707ed8ebf81de5cd6c7f48f54b4c8621760926cdf35a57000747c512e67b57 +#method = cargo +patch_directory = bilge-0.2-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/bilge-impl-0.2-rs.wrap b/subprojects/bilge-impl-0.2-rs.wrap new file mode 100644 index 00000000000..4f84eca1ccd --- /dev/null +++ b/subprojects/bilge-impl-0.2-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = bilge-impl-0.2.0 +source_url = https://crates.io/api/v1/crates/bilge-impl/0.2.0/download +source_filename = bilge-impl-0.2.0.tar.gz +source_hash = feb11e002038ad243af39c2068c8a72bcf147acf05025dcdb916fcc000adb2d8 +#method = cargo +patch_directory = bilge-impl-0.2-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/either-1-rs.wrap b/subprojects/either-1-rs.wrap new file mode 100644 index 00000000000..352e11cfee6 --- /dev/null +++ b/subprojects/either-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = either-1.12.0 +source_url = https://crates.io/api/v1/crates/either/1.12.0/download +source_filename = either-1.12.0.tar.gz +source_hash = 3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b +#method = cargo +patch_directory = either-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/foreign-0.3-rs.wrap b/subprojects/foreign-0.3-rs.wrap new file mode 100644 index 00000000000..0d218ec2c25 --- /dev/null +++ b/subprojects/foreign-0.3-rs.wrap @@ -0,0 +1,7 @@ +[wrap-file] +directory = foreign-0.3.1 +source_url = https://crates.io/api/v1/crates/foreign/0.3.1/download +source_filename = foreign-0.3.1.tar.gz +source_hash = 17ca1b5be8c9d320daf386f1809c7acc0cb09accbae795c2001953fa50585846 +#method = cargo +patch_directory = foreign-0.3-rs diff --git a/subprojects/itertools-0.11-rs.wrap b/subprojects/itertools-0.11-rs.wrap new file mode 100644 index 00000000000..ee12d0053bc --- /dev/null +++ b/subprojects/itertools-0.11-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = itertools-0.11.0 +source_url = https://crates.io/api/v1/crates/itertools/0.11.0/download +source_filename = itertools-0.11.0.tar.gz +source_hash = b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57 +#method = cargo +patch_directory = itertools-0.11-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/libc-0.2-rs.wrap b/subprojects/libc-0.2-rs.wrap new file mode 100644 index 00000000000..bbe08f87883 --- /dev/null +++ b/subprojects/libc-0.2-rs.wrap @@ -0,0 +1,7 @@ +[wrap-file] +directory = libc-0.2.162 +source_url = https://crates.io/api/v1/crates/libc/0.2.162/download +source_filename = libc-0.2.162.tar.gz +source_hash = 18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398 +#method = cargo +patch_directory = libc-0.2-rs diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index deb40e77b3f..2ffc58c11b1 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -186,11 +186,7 @@ typedef struct VhostUserShared { unsigned char uuid[UUID_LEN]; } VhostUserShared; -#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) -# define VU_PACKED __attribute__((gcc_struct, packed)) -#else -# define VU_PACKED __attribute__((packed)) -#endif +#define VU_PACKED __attribute__((packed)) typedef struct VhostUserMsg { int request; diff --git a/subprojects/packagefiles/anyhow-1-rs/meson.build b/subprojects/packagefiles/anyhow-1-rs/meson.build new file mode 100644 index 00000000000..348bab98b9f --- /dev/null +++ b/subprojects/packagefiles/anyhow-1-rs/meson.build @@ -0,0 +1,33 @@ +project('anyhow-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.0.98', + license: 'MIT OR Apache-2.0', + default_options: []) + +rustc = meson.get_compiler('rust') + +rust_args = ['--cap-lints', 'allow'] +rust_args += ['--cfg', 'feature="std"'] +if rustc.version().version_compare('<1.65.0') + error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.65.0') +endif +rust_args += [ '--cfg', 'std_backtrace' ] # >= 1.65.0 +if rustc.version().version_compare('<1.81.0') + rust_args += [ '--cfg', 'anyhow_no_core_error' ] +endif + +_anyhow_rs = static_library( + 'anyhow', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2018', 'build.rust_std=2018'], + rust_abi: 'rust', + rust_args: rust_args, + dependencies: [], +) + +anyhow_dep = declare_dependency( + link_with: _anyhow_rs, +) + +meson.override_dependency('anyhow-1-rs', anyhow_dep) diff --git a/subprojects/packagefiles/arbitrary-int-1-rs/meson.build b/subprojects/packagefiles/arbitrary-int-1-rs/meson.build new file mode 100644 index 00000000000..00733d1faab --- /dev/null +++ b/subprojects/packagefiles/arbitrary-int-1-rs/meson.build @@ -0,0 +1,21 @@ +project('arbitrary-int-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.2.7', + license: 'MIT', + default_options: []) + +_arbitrary_int_rs = static_library( + 'arbitrary_int', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: ['--cap-lints', 'allow'], + rust_abi: 'rust', + dependencies: [], +) + +arbitrary_int_dep = declare_dependency( + link_with: _arbitrary_int_rs, +) + +meson.override_dependency('arbitrary-int-1-rs', arbitrary_int_dep) diff --git a/subprojects/packagefiles/bilge-0.2-rs/meson.build b/subprojects/packagefiles/bilge-0.2-rs/meson.build new file mode 100644 index 00000000000..ce13d0fe80f --- /dev/null +++ b/subprojects/packagefiles/bilge-0.2-rs/meson.build @@ -0,0 +1,31 @@ +project( + 'bilge-0.2-rs', + 'rust', + meson_version: '>=1.5.0', + version : '0.2.0', + license : 'MIT or Apache-2.0', +) + +subproject('arbitrary-int-1-rs', required: true) +subproject('bilge-impl-0.2-rs', required: true) + +arbitrary_int_dep = dependency('arbitrary-int-1-rs') +bilge_impl_dep = dependency('bilge-impl-0.2-rs') + +lib = static_library( + 'bilge', + 'src/lib.rs', + override_options : ['rust_std=2021', 'build.rust_std=2021'], + rust_abi : 'rust', + rust_args: ['--cap-lints', 'allow'], + dependencies: [ + arbitrary_int_dep, + bilge_impl_dep, + ], +) + +bilge_dep = declare_dependency( + link_with : [lib], +) + +meson.override_dependency('bilge-0.2-rs', bilge_dep) diff --git a/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build b/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build new file mode 100644 index 00000000000..42b03dcd53c --- /dev/null +++ b/subprojects/packagefiles/bilge-impl-0.2-rs/meson.build @@ -0,0 +1,47 @@ +project('bilge-impl-0.2-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.2.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('itertools-0.11-rs', required: true) +subproject('proc-macro-error-attr-1-rs', required: true) +subproject('proc-macro-error-1-rs', required: true) +subproject('quote-1-rs', required: true) +subproject('syn-2-rs', required: true) +subproject('proc-macro2-1-rs', required: true) + +itertools_dep = dependency('itertools-0.11-rs', native: true) +proc_macro_error_attr_dep = dependency('proc-macro-error-attr-1-rs', native: true) +proc_macro_error_dep = dependency('proc-macro-error-1-rs', native: true) +quote_dep = dependency('quote-1-rs', native: true) +syn_dep = dependency('syn-2-rs', native: true) +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) + +rust = import('rust') + +_bilge_impl_rs = rust.proc_macro( + 'bilge_impl', + files('src/lib.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'use_fallback', + '--cfg', 'feature="syn-error"', + '--cfg', 'feature="proc-macro"', + ], + dependencies: [ + itertools_dep, + proc_macro_error_attr_dep, + proc_macro_error_dep, + quote_dep, + syn_dep, + proc_macro2_dep, + ], +) + +bilge_impl_dep = declare_dependency( + link_with: _bilge_impl_rs, +) + +meson.override_dependency('bilge-impl-0.2-rs', bilge_impl_dep) diff --git a/subprojects/packagefiles/either-1-rs/meson.build b/subprojects/packagefiles/either-1-rs/meson.build new file mode 100644 index 00000000000..04c96cc5fb3 --- /dev/null +++ b/subprojects/packagefiles/either-1-rs/meson.build @@ -0,0 +1,26 @@ +project('either-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.12.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +_either_rs = static_library( + 'either', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2018', 'build.rust_std=2018'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="use_std"', + '--cfg', 'feature="use_alloc"', + ], + dependencies: [], + native: true, +) + +either_dep = declare_dependency( + link_with: _either_rs, +) + +meson.override_dependency('either-1-rs', either_dep, native: true) diff --git a/subprojects/packagefiles/foreign-0.3-rs/meson.build b/subprojects/packagefiles/foreign-0.3-rs/meson.build new file mode 100644 index 00000000000..0901c02c527 --- /dev/null +++ b/subprojects/packagefiles/foreign-0.3-rs/meson.build @@ -0,0 +1,26 @@ +project('foreign-0.3-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.2.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('libc-0.2-rs', required: true) +libc_rs = dependency('libc-0.2-rs') + +_foreign_rs = static_library( + 'foreign', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + ], + dependencies: [libc_rs], +) + +foreign_dep = declare_dependency( + link_with: _foreign_rs, +) + +meson.override_dependency('foreign-0.3-rs', foreign_dep) diff --git a/subprojects/packagefiles/itertools-0.11-rs/meson.build b/subprojects/packagefiles/itertools-0.11-rs/meson.build new file mode 100644 index 00000000000..2a3fbe9ee5a --- /dev/null +++ b/subprojects/packagefiles/itertools-0.11-rs/meson.build @@ -0,0 +1,32 @@ +project('itertools-0.11-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.11.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('either-1-rs', required: true) + +either_dep = dependency('either-1-rs', native: true) + +_itertools_rs = static_library( + 'itertools', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2018', 'build.rust_std=2018'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="use_std"', + '--cfg', 'feature="use_alloc"', + ], + dependencies: [ + either_dep, + ], + native: true, +) + +itertools_dep = declare_dependency( + link_with: _itertools_rs, +) + +meson.override_dependency('itertools-0.11-rs', itertools_dep, native: true) diff --git a/subprojects/packagefiles/libc-0.2-rs/meson.build b/subprojects/packagefiles/libc-0.2-rs/meson.build new file mode 100644 index 00000000000..ac4f80dba98 --- /dev/null +++ b/subprojects/packagefiles/libc-0.2-rs/meson.build @@ -0,0 +1,37 @@ +project('libc-0.2-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.2.162', + license: 'MIT OR Apache-2.0', + default_options: []) + +_libc_rs = static_library( + 'libc', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2015', 'build.rust_std=2015'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'freebsd11', + '--cfg', 'libc_priv_mod_use', + '--cfg', 'libc_union', + '--cfg', 'libc_const_size_of', + '--cfg', 'libc_align', + '--cfg', 'libc_int128', + '--cfg', 'libc_core_cvoid', + '--cfg', 'libc_packedN', + '--cfg', 'libc_cfg_target_vendor', + '--cfg', 'libc_non_exhaustive', + '--cfg', 'libc_long_array', + '--cfg', 'libc_ptr_addr_of', + '--cfg', 'libc_underscore_const_names', + '--cfg', 'libc_const_extern_fn', + ], + dependencies: [], +) + +libc_dep = declare_dependency( + link_with: _libc_rs, +) + +meson.override_dependency('libc-0.2-rs', libc_dep) diff --git a/subprojects/packagefiles/proc-macro-error-1-rs/meson.build b/subprojects/packagefiles/proc-macro-error-1-rs/meson.build new file mode 100644 index 00000000000..10c2741085c --- /dev/null +++ b/subprojects/packagefiles/proc-macro-error-1-rs/meson.build @@ -0,0 +1,42 @@ +project('proc-macro-error-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.0.4', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('proc-macro-error-attr-1-rs', required: true) +subproject('quote-1-rs', required: true) +subproject('syn-2-rs', required: true) +subproject('proc-macro2-1-rs', required: true) + +proc_macro_error_attr_dep = dependency('proc-macro-error-attr-1-rs', native: true) +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) +quote_dep = dependency('quote-1-rs', native: true) +syn_dep = dependency('syn-2-rs', native: true) + +_proc_macro_error_rs = static_library( + 'proc_macro_error', + files('src/lib.rs'), + override_options: ['rust_std=2018', 'build.rust_std=2018'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'use_fallback', + '--cfg', 'feature="syn-error"', + '--cfg', 'feature="proc-macro"', + '-A', 'non_fmt_panics' + ], + dependencies: [ + proc_macro_error_attr_dep, + proc_macro2_dep, + quote_dep, + syn_dep, + ], + native: true, +) + +proc_macro_error_dep = declare_dependency( + link_with: _proc_macro_error_rs, +) + +meson.override_dependency('proc-macro-error-1-rs', proc_macro_error_dep, native: true) diff --git a/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build b/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build new file mode 100644 index 00000000000..c4c4c5e397c --- /dev/null +++ b/subprojects/packagefiles/proc-macro-error-attr-1-rs/meson.build @@ -0,0 +1,34 @@ +project('proc-macro-error-attr-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.12.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('proc-macro2-1-rs', required: true) +subproject('quote-1-rs', required: true) + +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) +quote_dep = dependency('quote-1-rs', native: true) + +rust = import('rust') +_proc_macro_error_attr_rs = rust.proc_macro( + 'proc_macro_error_attr', + files('src/lib.rs'), + override_options: ['rust_std=2018', 'build.rust_std=2018'], + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'use_fallback', + '--cfg', 'feature="syn-error"', + '--cfg', 'feature="proc-macro"' + ], + dependencies: [ + proc_macro2_dep, + quote_dep, + ], +) + +proc_macro_error_attr_dep = declare_dependency( + link_with: _proc_macro_error_attr_rs, +) + +meson.override_dependency('proc-macro-error-attr-1-rs', proc_macro_error_attr_dep, native: true) diff --git a/subprojects/packagefiles/proc-macro2-1-rs/meson.build b/subprojects/packagefiles/proc-macro2-1-rs/meson.build new file mode 100644 index 00000000000..5759df3ecc9 --- /dev/null +++ b/subprojects/packagefiles/proc-macro2-1-rs/meson.build @@ -0,0 +1,35 @@ +project('proc-macro2-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.0.84', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('unicode-ident-1-rs', required: true) + +unicode_ident_dep = dependency('unicode-ident-1-rs', native: true) + +_proc_macro2_rs = static_library( + 'proc_macro2', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="proc-macro"', + '--cfg', 'no_literal_byte_character', + '--cfg', 'no_literal_c_string', + '--cfg', 'no_source_text', + '--cfg', 'wrap_proc_macro', + ], + dependencies: [ + unicode_ident_dep, + ], + native: true, +) + +proc_macro2_dep = declare_dependency( + link_with: _proc_macro2_rs, +) + +meson.override_dependency('proc-macro2-1-rs', proc_macro2_dep, native: true) diff --git a/subprojects/packagefiles/quote-1-rs/meson.build b/subprojects/packagefiles/quote-1-rs/meson.build new file mode 100644 index 00000000000..bf41fad99bb --- /dev/null +++ b/subprojects/packagefiles/quote-1-rs/meson.build @@ -0,0 +1,31 @@ +project('quote-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.12.0', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('proc-macro2-1-rs', required: true) + +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) + +_quote_rs = static_library( + 'quote', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="proc-macro"', + ], + dependencies: [ + proc_macro2_dep, + ], + native: true, +) + +quote_dep = declare_dependency( + link_with: _quote_rs, +) + +meson.override_dependency('quote-1-rs', quote_dep, native: true) diff --git a/subprojects/packagefiles/syn-2-rs/meson.build b/subprojects/packagefiles/syn-2-rs/meson.build new file mode 100644 index 00000000000..a0094174084 --- /dev/null +++ b/subprojects/packagefiles/syn-2-rs/meson.build @@ -0,0 +1,43 @@ +project('syn-2-rs', 'rust', + meson_version: '>=1.5.0', + version: '2.0.66', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('proc-macro2-1-rs', required: true) +subproject('quote-1-rs', required: true) +subproject('unicode-ident-1-rs', required: true) + +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) +quote_dep = dependency('quote-1-rs', native: true) +unicode_ident_dep = dependency('unicode-ident-1-rs', native: true) + +_syn_rs = static_library( + 'syn', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="full"', + '--cfg', 'feature="derive"', + '--cfg', 'feature="parsing"', + '--cfg', 'feature="printing"', + '--cfg', 'feature="clone-impls"', + '--cfg', 'feature="proc-macro"', + '--cfg', 'feature="extra-traits"', + ], + dependencies: [ + quote_dep, + proc_macro2_dep, + unicode_ident_dep, + ], + native: true, +) + +syn_dep = declare_dependency( + link_with: _syn_rs, +) + +meson.override_dependency('syn-2-rs', syn_dep, native: true) diff --git a/subprojects/packagefiles/unicode-ident-1-rs/meson.build b/subprojects/packagefiles/unicode-ident-1-rs/meson.build new file mode 100644 index 00000000000..11a5dab97df --- /dev/null +++ b/subprojects/packagefiles/unicode-ident-1-rs/meson.build @@ -0,0 +1,22 @@ +project('unicode-ident-1-rs', 'rust', + meson_version: '>=1.5.0', + version: '1.0.12', + license: '(MIT OR Apache-2.0) AND Unicode-DFS-2016', + default_options: []) + +_unicode_ident_rs = static_library( + 'unicode_ident', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: ['--cap-lints', 'allow'], + dependencies: [], + native: true, +) + +unicode_ident_dep = declare_dependency( + link_with: _unicode_ident_rs, +) + +meson.override_dependency('unicode-ident-1-rs', unicode_ident_dep, native: true) diff --git a/subprojects/proc-macro-error-1-rs.wrap b/subprojects/proc-macro-error-1-rs.wrap new file mode 100644 index 00000000000..59f892f7825 --- /dev/null +++ b/subprojects/proc-macro-error-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = proc-macro-error-1.0.4 +source_url = https://crates.io/api/v1/crates/proc-macro-error/1.0.4/download +source_filename = proc-macro-error-1.0.4.tar.gz +source_hash = da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c +#method = cargo +patch_directory = proc-macro-error-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/proc-macro-error-attr-1-rs.wrap b/subprojects/proc-macro-error-attr-1-rs.wrap new file mode 100644 index 00000000000..5aeb224a103 --- /dev/null +++ b/subprojects/proc-macro-error-attr-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = proc-macro-error-attr-1.0.4 +source_url = https://crates.io/api/v1/crates/proc-macro-error-attr/1.0.4/download +source_filename = proc-macro-error-attr-1.0.4.tar.gz +source_hash = a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869 +#method = cargo +patch_directory = proc-macro-error-attr-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/proc-macro2-1-rs.wrap b/subprojects/proc-macro2-1-rs.wrap new file mode 100644 index 00000000000..6c9369f0df3 --- /dev/null +++ b/subprojects/proc-macro2-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = proc-macro2-1.0.84 +source_url = https://crates.io/api/v1/crates/proc-macro2/1.0.84/download +source_filename = proc-macro2-1.0.84.0.tar.gz +source_hash = ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6 +#method = cargo +patch_directory = proc-macro2-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/quote-1-rs.wrap b/subprojects/quote-1-rs.wrap new file mode 100644 index 00000000000..8b721dfa00b --- /dev/null +++ b/subprojects/quote-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = quote-1.0.36 +source_url = https://crates.io/api/v1/crates/quote/1.0.36/download +source_filename = quote-1.0.36.0.tar.gz +source_hash = 0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7 +#method = cargo +patch_directory = quote-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/syn-2-rs.wrap b/subprojects/syn-2-rs.wrap new file mode 100644 index 00000000000..d79cf750fb4 --- /dev/null +++ b/subprojects/syn-2-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = syn-2.0.66 +source_url = https://crates.io/api/v1/crates/syn/2.0.66/download +source_filename = syn-2.0.66.0.tar.gz +source_hash = c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5 +#method = cargo +patch_directory = syn-2-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/subprojects/unicode-ident-1-rs.wrap b/subprojects/unicode-ident-1-rs.wrap new file mode 100644 index 00000000000..50988f612e2 --- /dev/null +++ b/subprojects/unicode-ident-1-rs.wrap @@ -0,0 +1,10 @@ +[wrap-file] +directory = unicode-ident-1.0.12 +source_url = https://crates.io/api/v1/crates/unicode-ident/1.0.12/download +source_filename = unicode-ident-1.0.12.tar.gz +source_hash = 3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b +#method = cargo +patch_directory = unicode-ident-1-rs + +# bump this version number on every change to meson.build or the patches: +# v2 diff --git a/system/arch_init.c b/system/arch_init.c index 79716f959ba..e85736884c9 100644 --- a/system/arch_init.c +++ b/system/arch_init.c @@ -22,29 +22,9 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" -#include "qemu/module.h" -#include "sysemu/arch_init.h" +#include "system/arch_init.h" -#ifdef TARGET_SPARC -int graphic_width = 1024; -int graphic_height = 768; -int graphic_depth = 8; -#elif defined(TARGET_M68K) -int graphic_width = 800; -int graphic_height = 600; -int graphic_depth = 8; -#else -int graphic_width = 800; -int graphic_height = 600; -int graphic_depth = 32; -#endif - -const uint32_t arch_type = QEMU_ARCH; - -void qemu_init_arch_modules(void) +bool qemu_arch_available(unsigned qemu_arch_mask) { -#ifdef CONFIG_MODULES - module_init_info(qemu_modinfo); - module_allow_arch(TARGET_NAME); -#endif + return qemu_arch_mask & QEMU_ARCH; } diff --git a/system/async-teardown.c b/system/async-teardown.c index 396963c0913..9148ee8d041 100644 --- a/system/async-teardown.c +++ b/system/async-teardown.c @@ -26,40 +26,6 @@ static pid_t the_ppid; -/* - * Close all open file descriptors. - */ -static void close_all_open_fd(void) -{ - struct dirent *de; - int fd, dfd; - DIR *dir; - -#ifdef CONFIG_CLOSE_RANGE - int r = close_range(0, ~0U, 0); - if (!r) { - /* Success, no need to try other ways. */ - return; - } -#endif - - dir = opendir("/proc/self/fd"); - if (!dir) { - /* If /proc is not mounted, there is nothing that can be done. */ - return; - } - /* Avoid closing the directory. */ - dfd = dirfd(dir); - - for (de = readdir(dir); de; de = readdir(dir)) { - fd = atoi(de->d_name); - if (fd != dfd) { - close(fd); - } - } - closedir(dir); -} - static void hup_handler(int signal) { /* Check every second if this process has been reparented. */ @@ -85,9 +51,8 @@ static int async_teardown_fn(void *arg) /* * Close all file descriptors that might have been inherited from the * main qemu process when doing clone, needed to make libvirt happy. - * Not using close_range for increased compatibility with older kernels. */ - close_all_open_fd(); + qemu_close_all_open_fd(NULL, 0); /* Set up a handler for SIGHUP and unblock SIGHUP. */ sigaction(SIGHUP, &sa, NULL); diff --git a/system/balloon.c b/system/balloon.c index fda7af832e4..311fa5058b9 100644 --- a/system/balloon.c +++ b/system/balloon.c @@ -26,8 +26,8 @@ #include "qemu/osdep.h" #include "qemu/atomic.h" -#include "sysemu/kvm.h" -#include "sysemu/balloon.h" +#include "system/kvm.h" +#include "system/balloon.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" #include "qapi/qmp/qerror.h" diff --git a/system/bootdevice.c b/system/bootdevice.c index 2579b26dc8b..1845be4507e 100644 --- a/system/bootdevice.c +++ b/system/bootdevice.c @@ -24,10 +24,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/visitor.h" #include "qemu/error-report.h" -#include "sysemu/reset.h" +#include "system/reset.h" #include "hw/qdev-core.h" #include "hw/boards.h" diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c deleted file mode 100644 index c951a6c65e1..00000000000 --- a/system/cpu-throttle.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * QEMU System Emulator - * - * Copyright (c) 2003-2008 Fabrice Bellard - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qemu/thread.h" -#include "hw/core/cpu.h" -#include "qemu/main-loop.h" -#include "sysemu/cpus.h" -#include "sysemu/cpu-throttle.h" - -/* vcpu throttling controls */ -static QEMUTimer *throttle_timer; -static unsigned int throttle_percentage; - -#define CPU_THROTTLE_PCT_MIN 1 -#define CPU_THROTTLE_PCT_MAX 99 -#define CPU_THROTTLE_TIMESLICE_NS 10000000 - -static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) -{ - double pct; - double throttle_ratio; - int64_t sleeptime_ns, endtime_ns; - - if (!cpu_throttle_get_percentage()) { - return; - } - - pct = (double)cpu_throttle_get_percentage() / 100; - throttle_ratio = pct / (1 - pct); - /* Add 1ns to fix double's rounding error (like 0.9999999...) */ - sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1); - endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; - while (sleeptime_ns > 0 && !cpu->stop) { - if (sleeptime_ns > SCALE_MS) { - qemu_cond_timedwait_bql(cpu->halt_cond, - sleeptime_ns / SCALE_MS); - } else { - bql_unlock(); - g_usleep(sleeptime_ns / SCALE_US); - bql_lock(); - } - sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - } - qatomic_set(&cpu->throttle_thread_scheduled, 0); -} - -static void cpu_throttle_timer_tick(void *opaque) -{ - CPUState *cpu; - double pct; - - /* Stop the timer if needed */ - if (!cpu_throttle_get_percentage()) { - return; - } - CPU_FOREACH(cpu) { - if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { - async_run_on_cpu(cpu, cpu_throttle_thread, - RUN_ON_CPU_NULL); - } - } - - pct = (double)cpu_throttle_get_percentage() / 100; - timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + - CPU_THROTTLE_TIMESLICE_NS / (1 - pct)); -} - -void cpu_throttle_set(int new_throttle_pct) -{ - /* - * boolean to store whether throttle is already active or not, - * before modifying throttle_percentage - */ - bool throttle_active = cpu_throttle_active(); - - /* Ensure throttle percentage is within valid range */ - new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); - new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); - - qatomic_set(&throttle_percentage, new_throttle_pct); - - if (!throttle_active) { - cpu_throttle_timer_tick(NULL); - } -} - -void cpu_throttle_stop(void) -{ - qatomic_set(&throttle_percentage, 0); -} - -bool cpu_throttle_active(void) -{ - return (cpu_throttle_get_percentage() != 0); -} - -int cpu_throttle_get_percentage(void) -{ - return qatomic_read(&throttle_percentage); -} - -void cpu_throttle_init(void) -{ - throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, - cpu_throttle_timer_tick, NULL); -} diff --git a/system/cpu-timers.c b/system/cpu-timers.c index 0b31c9a1b6a..cb35fa62b8a 100644 --- a/system/cpu-timers.c +++ b/system/cpu-timers.c @@ -27,16 +27,16 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/main-loop.h" #include "qemu/option.h" #include "qemu/seqlock.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" +#include "system/replay.h" +#include "system/runstate.h" #include "hw/core/cpu.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/cpu-throttle.h" -#include "sysemu/cpu-timers-internal.h" +#include "system/cpu-timers.h" +#include "system/cpu-timers-internal.h" +#include "exec/icount.h" /* clock and ticks */ @@ -272,6 +272,4 @@ void cpu_timers_init(void) seqlock_init(&timers_state.vm_clock_seqlock); qemu_spin_init(&timers_state.vm_clock_lock); vmstate_register(NULL, 0, &vmstate_timers, &timers_state); - - cpu_throttle_init(); } diff --git a/system/cpus.c b/system/cpus.c index 5e3a988a0ae..256723558d0 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -31,18 +31,19 @@ #include "qapi/qapi-events-run-state.h" #include "qapi/qmp/qerror.h" #include "exec/gdbstub.h" -#include "sysemu/hw_accel.h" +#include "accel/accel-cpu-ops.h" +#include "system/hw_accel.h" #include "exec/cpu-common.h" #include "qemu/thread.h" #include "qemu/main-loop.h" #include "qemu/plugin.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/guest-random.h" #include "hw/nmi.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/whpx.h" +#include "system/replay.h" +#include "system/runstate.h" +#include "system/cpu-timers.h" +#include "system/whpx.h" #include "hw/boards.h" #include "hw/hw.h" #include "trace.h" @@ -253,7 +254,7 @@ int64_t cpus_get_elapsed_ticks(void) return cpu_get_ticks(); } -static void generic_handle_interrupt(CPUState *cpu, int mask) +void generic_handle_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request |= mask; @@ -264,11 +265,9 @@ static void generic_handle_interrupt(CPUState *cpu, int mask) void cpu_interrupt(CPUState *cpu, int mask) { - if (cpus_accel->handle_interrupt) { - cpus_accel->handle_interrupt(cpu, mask); - } else { - generic_handle_interrupt(cpu, mask); - } + g_assert(bql_locked()); + + cpus_accel->handle_interrupt(cpu, mask); } /* @@ -298,14 +297,18 @@ static int do_vm_stop(RunState state, bool send_stop) if (oldstate == RUN_STATE_RUNNING) { pause_all_vcpus(); } - vm_state_notify(0, state); + ret = vm_state_notify(0, state); if (send_stop) { qapi_event_send_stop(); } } bdrv_drain_all(); - ret = bdrv_flush_all(); + /* + * Even if vm_state_notify() return failure, + * it would be better to flush as before. + */ + ret |= bdrv_flush_all(); trace_vm_stop_flush_all(ret); return ret; @@ -514,6 +517,20 @@ bool qemu_in_vcpu_thread(void) QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked) +static uint32_t bql_unlock_blocked; + +void bql_block_unlock(bool increase) +{ + uint32_t new_value; + + assert(bql_locked()); + + /* check for overflow! */ + new_value = bql_unlock_blocked + increase - !increase; + assert((new_value > bql_unlock_blocked) == increase); + bql_unlock_blocked = new_value; +} + bool bql_locked(void) { return get_bql_locked(); @@ -524,6 +541,12 @@ bool qemu_in_main_thread(void) return bql_locked(); } +void rust_bql_mock_lock(void) +{ + error_report("This function should be used only from tests"); + abort(); +} + /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. @@ -540,6 +563,7 @@ void bql_lock_impl(const char *file, int line) void bql_unlock(void) { g_assert(bql_locked()); + g_assert(!bql_unlock_blocked); set_bql_locked(false); qemu_mutex_unlock(&bql); } @@ -652,6 +676,8 @@ void cpus_register_accel(const AccelOpsClass *ops) { assert(ops != NULL); assert(ops->create_vcpu_thread != NULL); /* mandatory */ + assert(ops->handle_interrupt); + cpus_accel = ops; } @@ -666,7 +692,6 @@ void qemu_init_vcpu(CPUState *cpu) { MachineState *ms = MACHINE(qdev_get_machine()); - cpu->nr_cores = machine_topo_get_cores_per_socket(ms); cpu->nr_threads = ms->smp.threads; cpu->stopped = true; cpu->random_seed = qemu_guest_random_seed_thread_part1(); @@ -743,9 +768,7 @@ int vm_prepare_start(bool step_pending) * WHPX accelerator needs to know whether we are going to step * any CPUs, before starting the first one. */ - if (cpus_accel->synchronize_pre_resume) { - cpus_accel->synchronize_pre_resume(step_pending); - } + accel_pre_resume(MACHINE(qdev_get_machine()), step_pending); /* We are sending this now, but the CPUs will be resumed shortly later */ qapi_event_send_resume(); @@ -792,14 +815,14 @@ int vm_stop_force_state(RunState state) } } -void qmp_memsave(int64_t addr, int64_t size, const char *filename, +void qmp_memsave(uint64_t addr, uint64_t size, const char *filename, bool has_cpu, int64_t cpu_index, Error **errp) { FILE *f; - uint32_t l; + uint64_t l; CPUState *cpu; uint8_t buf[1024]; - int64_t orig_addr = addr, orig_size = size; + uint64_t orig_addr = addr, orig_size = size; if (!has_cpu) { cpu_index = 0; @@ -823,7 +846,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename, if (l > size) l = size; if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { - error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 + error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRIu64 " specified", orig_addr, orig_size); goto exit; } @@ -840,11 +863,11 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename, fclose(f); } -void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, +void qmp_pmemsave(uint64_t addr, uint64_t size, const char *filename, Error **errp) { FILE *f; - uint32_t l; + uint64_t l; uint8_t buf[1024]; f = fopen(filename, "wb"); diff --git a/system/datadir.c b/system/datadir.c index c9237cb5d4a..f96f8fc2646 100644 --- a/system/datadir.c +++ b/system/datadir.c @@ -30,7 +30,7 @@ static const char *data_dir[16]; static int data_dir_idx; -char *qemu_find_file(int type, const char *name) +char *qemu_find_file(QemuFileType type, const char *name) { int i; const char *subdir; @@ -46,6 +46,9 @@ char *qemu_find_file(int type, const char *name) case QEMU_FILE_TYPE_BIOS: subdir = ""; break; + case QEMU_FILE_TYPE_DTB: + subdir = "dtb/"; + break; case QEMU_FILE_TYPE_KEYMAP: subdir = "keymaps/"; break; diff --git a/system/device_tree-stub.c b/system/device_tree-stub.c index bddda6fa37a..428330b0fec 100644 --- a/system/device_tree-stub.c +++ b/system/device_tree-stub.c @@ -5,6 +5,9 @@ #ifdef CONFIG_FDT void qmp_dumpdtb(const char *filename, Error **errp) { - error_setg(errp, "This machine doesn't have a FDT"); + ERRP_GUARD(); + + error_setg(errp, "This machine doesn't have an FDT"); + error_append_hint(errp, "(this machine type definitely doesn't use FDT)\n"); } #endif diff --git a/system/device_tree.c b/system/device_tree.c index 2e38259d34f..aa3fe9516f3 100644 --- a/system/device_tree.c +++ b/system/device_tree.c @@ -23,12 +23,12 @@ #include "qemu/bswap.h" #include "qemu/cutils.h" #include "qemu/guest-random.h" -#include "sysemu/device_tree.h" +#include "system/device_tree.h" #include "hw/loader.h" #include "hw/boards.h" #include "qemu/config-file.h" #include "qapi/qapi-commands-machine.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "monitor/hmp.h" #include @@ -594,21 +594,6 @@ int qemu_fdt_add_path(void *fdt, const char *path) return retval; } -void qemu_fdt_dumpdtb(void *fdt, int size) -{ - const char *dumpdtb = current_machine->dumpdtb; - - if (dumpdtb) { - /* Dump the dtb to a file and quit */ - if (g_file_set_contents(dumpdtb, fdt, size, NULL)) { - info_report("dtb dumped to %s. Exiting.", dumpdtb); - exit(0); - } - error_report("%s: Failed dumping dtb to %s", __func__, dumpdtb); - exit(1); - } -} - int qemu_fdt_setprop_sized_cells_from_array(void *fdt, const char *node_path, const char *property, @@ -650,11 +635,16 @@ int qemu_fdt_setprop_sized_cells_from_array(void *fdt, void qmp_dumpdtb(const char *filename, Error **errp) { + ERRP_GUARD(); + g_autoptr(GError) err = NULL; uint32_t size; if (!current_machine->fdt) { - error_setg(errp, "This machine doesn't have a FDT"); + error_setg(errp, "This machine doesn't have an FDT"); + error_append_hint(errp, + "(Perhaps it doesn't support FDT at all, or perhaps " + "you need to provide an FDT with the -fdt option?)\n"); return; } diff --git a/system/dirtylimit.c b/system/dirtylimit.c index ab20da34bb9..b48c0d4b3da 100644 --- a/system/dirtylimit.c +++ b/system/dirtylimit.c @@ -13,16 +13,16 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "qapi/qapi-commands-migration.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" -#include "sysemu/dirtyrate.h" -#include "sysemu/dirtylimit.h" +#include "system/dirtyrate.h" +#include "system/dirtylimit.h" #include "monitor/hmp.h" #include "monitor/monitor.h" -#include "exec/memory.h" +#include "system/memory.h" #include "exec/target_page.h" #include "hw/boards.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "trace.h" #include "migration/misc.h" @@ -80,8 +80,7 @@ static void vcpu_dirty_rate_stat_collect(void) int i = 0; int64_t period = DIRTYLIMIT_CALC_TIME_MS; - if (migrate_dirty_limit() && - migration_is_active()) { + if (migrate_dirty_limit() && migration_is_running()) { period = migrate_vcpu_dirty_limit_period(); } @@ -338,8 +337,6 @@ static void dirtylimit_adjust_throttle(CPUState *cpu) if (!dirtylimit_done(quota, current)) { dirtylimit_set_throttle(cpu, quota, current); } - - return; } void dirtylimit_process(void) diff --git a/system/dma-helpers.c b/system/dma-helpers.c index 74013308f52..0d592f64680 100644 --- a/system/dma-helpers.c +++ b/system/dma-helpers.c @@ -8,12 +8,12 @@ */ #include "qemu/osdep.h" -#include "sysemu/block-backend.h" -#include "sysemu/dma.h" -#include "trace/trace-root.h" +#include "system/block-backend.h" +#include "system/dma.h" +#include "trace.h" #include "qemu/thread.h" #include "qemu/main-loop.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" #include "qemu/range.h" /* #define DEBUG_IOMMU */ @@ -211,7 +211,7 @@ static const AIOCBInfo dma_aiocb_info = { .cancel_async = dma_aio_cancel, }; -BlockAIOCB *dma_blk_io(AioContext *ctx, +BlockAIOCB *dma_blk_io( QEMUSGList *sg, uint64_t offset, uint32_t align, DMAIOFunc *io_func, void *io_func_opaque, BlockCompletionFunc *cb, @@ -223,7 +223,7 @@ BlockAIOCB *dma_blk_io(AioContext *ctx, dbs->acb = NULL; dbs->sg = sg; - dbs->ctx = ctx; + dbs->ctx = qemu_get_current_aio_context(); dbs->offset = offset; dbs->align = align; dbs->sg_cur_index = 0; @@ -251,7 +251,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk, QEMUSGList *sg, uint64_t offset, uint32_t align, void (*cb)(void *opaque, int ret), void *opaque) { - return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, + return dma_blk_io(sg, offset, align, dma_blk_read_io_func, blk, cb, opaque, DMA_DIRECTION_FROM_DEVICE); } @@ -269,7 +269,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk, QEMUSGList *sg, uint64_t offset, uint32_t align, void (*cb)(void *opaque, int ret), void *opaque) { - return dma_blk_io(blk_get_aio_context(blk), sg, offset, align, + return dma_blk_io(sg, offset, align, dma_blk_write_io_func, blk, cb, opaque, DMA_DIRECTION_TO_DEVICE); } diff --git a/system/globals-target.c b/system/globals-target.c new file mode 100644 index 00000000000..989720591e7 --- /dev/null +++ b/system/globals-target.c @@ -0,0 +1,24 @@ +/* + * Global variables that should not exist (target specific) + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "system/system.h" + +#ifdef TARGET_SPARC +int graphic_width = 1024; +int graphic_height = 768; +int graphic_depth = 8; +#elif defined(TARGET_M68K) +int graphic_width = 800; +int graphic_height = 600; +int graphic_depth = 8; +#else +int graphic_width = 800; +int graphic_height = 600; +int graphic_depth = 32; +#endif diff --git a/system/globals.c b/system/globals.c index d602a04fa28..9640c9511e9 100644 --- a/system/globals.c +++ b/system/globals.c @@ -28,19 +28,28 @@ #include "hw/loader.h" #include "hw/xen/xen.h" #include "net/net.h" -#include "sysemu/cpus.h" -#include "sysemu/sysemu.h" +#include "system/cpus.h" +#include "system/system.h" + +bool should_mlock(MlockState state) +{ + return state == MLOCK_ON || state == MLOCK_ON_FAULT; +} + +bool is_mlock_on_fault(MlockState state) +{ + return state == MLOCK_ON_FAULT; +} enum vga_retrace_method vga_retrace_method = VGA_RETRACE_DUMB; int display_opengl; const char* keyboard_layout; -bool enable_mlock; +MlockState mlock_state; bool enable_cpu_pm; int autostart = 1; int vga_interface_type = VGA_NONE; bool vga_interface_created; Chardev *parallel_hds[MAX_PARALLEL_PORTS]; -int graphic_rotate; QEMUOptionRom option_rom[MAX_OPTION_ROMS]; int nb_option_roms; int old_param; @@ -49,7 +58,6 @@ unsigned int nb_prom_envs; const char *prom_envs[MAX_PROM_ENVS]; uint8_t *boot_splash_filedata; int only_migratable; /* turn it off unless user states otherwise */ -int icount_align_option; /* The bytes in qemu_uuid are in the order specified by RFC4122, _not_ in the * little-endian "wire format" described in the SMBIOS 2.6 specification. diff --git a/system/ioport.c b/system/ioport.c index fd551d0375e..4f96e9119fc 100644 --- a/system/ioport.c +++ b/system/ioport.c @@ -26,10 +26,9 @@ */ #include "qemu/osdep.h" -#include "cpu.h" -#include "exec/ioport.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/ioport.h" +#include "system/memory.h" +#include "system/address-spaces.h" #include "trace.h" struct MemoryRegionPortioList { @@ -258,7 +257,7 @@ static void portio_list_add_1(PortioList *piolist, object_ref(&mrpio->mr); object_unparent(OBJECT(&mrpio->mr)); if (!piolist->owner) { - owner = container_get(qdev_get_machine(), "/unattached"); + owner = machine_get_container("unattached"); } else { owner = piolist->owner; } diff --git a/system/main.c b/system/main.c index 9b91d21ea8c..b8f7157cc34 100644 --- a/system/main.c +++ b/system/main.c @@ -24,26 +24,73 @@ #include "qemu/osdep.h" #include "qemu-main.h" -#include "sysemu/sysemu.h" +#include "qemu/main-loop.h" +#include "system/replay.h" +#include "system/system.h" #ifdef CONFIG_SDL +/* + * SDL insists on wrapping the main() function with its own implementation on + * some platforms; it does so via a macro that renames our main function, so + * must be #included here even with no SDL code called from this file. + */ #include #endif -int qemu_default_main(void) +#ifdef CONFIG_DARWIN +#include +#endif + +static void *qemu_default_main(void *opaque) { int status; + replay_mutex_lock(); + bql_lock(); status = qemu_main_loop(); qemu_cleanup(status); + bql_unlock(); + replay_mutex_unlock(); - return status; + exit(status); } -int (*qemu_main)(void) = qemu_default_main; +int (*qemu_main)(void); + +#ifdef CONFIG_DARWIN +static int os_darwin_cfrunloop_main(void) +{ + CFRunLoopRun(); + g_assert_not_reached(); +} +int (*qemu_main)(void) = os_darwin_cfrunloop_main; +#endif int main(int argc, char **argv) { qemu_init(argc, argv); - return qemu_main(); + + /* + * qemu_init acquires the BQL and replay mutex lock. BQL is acquired when + * initializing cpus, to block associated threads until initialization is + * complete. Replay_mutex lock is acquired on initialization, because it + * must be held when configuring icount_mode. + * + * On MacOS, qemu main event loop runs in a background thread, as main + * thread must be reserved for UI. Thus, we need to transfer lock ownership, + * and the simplest way to do that is to release them, and reacquire them + * from qemu_default_main. + */ + bql_unlock(); + replay_mutex_unlock(); + + if (qemu_main) { + QemuThread main_loop_thread; + qemu_thread_create(&main_loop_thread, "qemu_main", + qemu_default_main, NULL, QEMU_THREAD_DETACHED); + return qemu_main(); + } else { + qemu_default_main(NULL); + g_assert_not_reached(); + } } diff --git a/include/exec/memory-internal.h b/system/memory-internal.h similarity index 75% rename from include/exec/memory-internal.h rename to system/memory-internal.h index 100c1237ac2..46f758fa7e4 100644 --- a/include/exec/memory-internal.h +++ b/system/memory-internal.h @@ -11,17 +11,9 @@ * */ -/* - * This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY, - * for declarations which are shared between the memory subsystem's - * internals and the TCG TLB code. Do not include it from elsewhere. - */ - #ifndef MEMORY_INTERNAL_H #define MEMORY_INTERNAL_H -#include "cpu.h" - #ifndef CONFIG_USER_ONLY static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv) { @@ -45,5 +37,21 @@ void address_space_dispatch_free(AddressSpaceDispatch *d); void mtree_print_dispatch(struct AddressSpaceDispatch *d, MemoryRegion *root); + +/* returns true if end is big endian. */ +static inline bool devend_big_endian(enum device_endian end) +{ + if (end == DEVICE_NATIVE_ENDIAN) { + return target_big_endian(); + } + return end == DEVICE_BIG_ENDIAN; +} + +/* enum device_endian to MemOp. */ +static inline MemOp devend_memop(enum device_endian end) +{ + return devend_big_endian(end) ? MO_BE : MO_LE; +} + #endif #endif diff --git a/system/memory.c b/system/memory.c index 5e6eb459d5d..56465479406 100644 --- a/system/memory.c +++ b/system/memory.c @@ -16,24 +16,26 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qapi/visitor.h" #include "qemu/bitops.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/qemu-print.h" +#include "qemu/target-info.h" #include "qom/object.h" #include "trace.h" - -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" -#include "sysemu/kvm.h" -#include "sysemu/runstate.h" -#include "sysemu/tcg.h" +#include "system/ram_addr.h" +#include "system/kvm.h" +#include "system/runstate.h" +#include "system/tcg.h" #include "qemu/accel.h" +#include "accel/accel-ops.h" #include "hw/boards.h" #include "migration/vmstate.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" + +#include "memory-internal.h" //#define DEBUG_UNASSIGNED @@ -353,15 +355,6 @@ static void flatview_simplify(FlatView *view) } } -static bool memory_region_big_endian(MemoryRegion *mr) -{ -#if TARGET_BIG_ENDIAN - return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; -#else - return mr->ops->endianness == DEVICE_BIG_ENDIAN; -#endif -} - static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) { if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { @@ -563,7 +556,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); access_mask = MAKE_64BIT_MASK(0, access_size * 8); - if (memory_region_big_endian(mr)) { + if (devend_big_endian(mr->ops->endianness)) { for (i = 0; i < size; i += access_size) { r |= access_fn(mr, addr + i, value, access_size, (size - access_size - i) * 8, access_mask, attrs); @@ -941,6 +934,38 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) } } +static void +flat_range_coalesced_io_notify_listener_add_del(FlatRange *fr, + MemoryRegionSection *mrs, + MemoryListener *listener, + AddressSpace *as, bool add) +{ + CoalescedMemoryRange *cmr; + MemoryRegion *mr = fr->mr; + AddrRange tmp; + + QTAILQ_FOREACH(cmr, &mr->coalesced, link) { + tmp = addrrange_shift(cmr->addr, + int128_sub(fr->addr.start, + int128_make64(fr->offset_in_region))); + + if (!addrrange_intersects(tmp, fr->addr)) { + return; + } + tmp = addrrange_intersection(tmp, fr->addr); + + if (add && listener->coalesced_io_add) { + listener->coalesced_io_add(listener, mrs, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } else if (!add && listener->coalesced_io_del) { + listener->coalesced_io_del(listener, mrs, + int128_get64(tmp.start), + int128_get64(tmp.size)); + } + } +} + static void address_space_update_topology_pass(AddressSpace *as, const FlatView *old_view, const FlatView *new_view, @@ -1206,7 +1231,7 @@ static void memory_region_do_init(MemoryRegion *mr, char *name_array = g_strdup_printf("%s[*]", escaped_name); if (!owner) { - owner = container_get(qdev_get_machine(), "/unattached"); + owner = machine_get_container("unattached"); } object_property_add_child(owner, name_array, OBJECT(mr)); @@ -1359,7 +1384,7 @@ static void memory_region_ram_device_write(void *opaque, hwaddr addr, static const MemoryRegionOps ram_device_mem_ops = { .read = memory_region_ram_device_read, .write = memory_region_ram_device_write, - .endianness = DEVICE_HOST_ENDIAN, + .endianness = HOST_BIG_ENDIAN ? DEVICE_BIG_ENDIAN : DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, .max_access_size = 8, @@ -1380,7 +1405,7 @@ bool memory_region_access_valid(MemoryRegion *mr, { if (mr->ops->valid.accepts && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX ", size %u, region '%s', reason: rejected\n", is_write ? "write" : "read", addr, size, memory_region_name(mr)); @@ -1388,7 +1413,7 @@ bool memory_region_access_valid(MemoryRegion *mr, } if (!mr->ops->valid.unaligned && (addr & (size - 1))) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX ", size %u, region '%s', reason: unaligned\n", is_write ? "write" : "read", addr, size, memory_region_name(mr)); @@ -1402,7 +1427,7 @@ bool memory_region_access_valid(MemoryRegion *mr, if (size > mr->ops->valid.max_access_size || size < mr->ops->valid.min_access_size) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + qemu_log_mask(LOG_INVALID_MEM, "Invalid %s at addr 0x%" HWADDR_PRIX ", size %u, region '%s', reason: invalid size " "(min:%u max:%u)\n", is_write ? "write" : "read", @@ -1604,7 +1629,7 @@ bool memory_region_init_resizeable_ram(MemoryRegion *mr, return true; } -#ifdef CONFIG_POSIX +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, @@ -1648,8 +1673,8 @@ bool memory_region_init_ram_from_fd(MemoryRegion *mr, mr->readonly = !!(ram_flags & RAM_READONLY); mr->terminates = true; mr->destructor = memory_region_destructor_ram; - mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, - &err); + mr->ram_block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, + offset, false, &err); if (err) { mr->size = int128_zero(); object_unparent(OBJECT(mr)); @@ -2083,12 +2108,16 @@ RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr) return mr->rdm; } -void memory_region_set_ram_discard_manager(MemoryRegion *mr, - RamDiscardManager *rdm) +int memory_region_set_ram_discard_manager(MemoryRegion *mr, + RamDiscardManager *rdm) { g_assert(memory_region_is_ram(mr)); - g_assert(!rdm || !mr->rdm); + if (mr->rdm && rdm) { + return -EBUSY; + } + mr->rdm = rdm; + return 0; } uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm, @@ -2111,7 +2140,7 @@ bool ram_discard_manager_is_populated(const RamDiscardManager *rdm, int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, MemoryRegionSection *section, - ReplayRamPopulate replay_fn, + ReplayRamDiscardState replay_fn, void *opaque) { RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); @@ -2120,15 +2149,15 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, return rdmc->replay_populated(rdm, section, replay_fn, opaque); } -void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, - MemoryRegionSection *section, - ReplayRamDiscard replay_fn, - void *opaque) +int ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, + void *opaque) { RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); g_assert(rdmc->replay_discarded); - rdmc->replay_discarded(rdm, section, replay_fn, opaque); + return rdmc->replay_discarded(rdm, section, replay_fn, opaque); } void ram_discard_manager_register_listener(RamDiscardManager *rdm, @@ -2151,18 +2180,14 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm, } /* Called with rcu_read_lock held. */ -bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, - ram_addr_t *ram_addr, bool *read_only, - bool *mr_has_discard_manager, Error **errp) +MemoryRegion *memory_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p, + Error **errp) { MemoryRegion *mr; hwaddr xlat; hwaddr len = iotlb->addr_mask + 1; bool writable = iotlb->perm & IOMMU_WO; - if (mr_has_discard_manager) { - *mr_has_discard_manager = false; - } /* * The IOMMU TLB entry we have just covers translation through * this IOMMU to its immediate target. We need to translate @@ -2172,7 +2197,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED); if (!memory_region_is_ram(mr)) { error_setg(errp, "iommu map to non memory area %" HWADDR_PRIx "", xlat); - return false; + return NULL; } else if (memory_region_has_ram_discard_manager(mr)) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); MemoryRegionSection tmp = { @@ -2180,9 +2205,6 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, .offset_within_region = xlat, .size = int128_make64(len), }; - if (mr_has_discard_manager) { - *mr_has_discard_manager = true; - } /* * Malicious VMs can map memory into the IOMMU, which is expected * to remain discarded. vfio will pin all pages, populating memory. @@ -2193,7 +2215,7 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, error_setg(errp, "iommu map to discarded memory (e.g., unplugged" " via virtio-mem): %" HWADDR_PRIx "", iotlb->translated_addr); - return false; + return NULL; } } @@ -2203,22 +2225,11 @@ bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, */ if (len & iotlb->addr_mask) { error_setg(errp, "iommu has granularity incompatible with target AS"); - return false; - } - - if (vaddr) { - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - } - - if (ram_addr) { - *ram_addr = memory_region_get_ram_addr(mr) + xlat; - } - - if (read_only) { - *read_only = !writable || mr->readonly; + return NULL; } - return true; + *xlat_p = xlat; + return mr; } void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) @@ -2552,7 +2563,8 @@ void memory_region_add_eventfd(MemoryRegion *mr, unsigned i; if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size); + adjust_endianness(mr, &mrfd.data, mop); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { @@ -2587,7 +2599,8 @@ void memory_region_del_eventfd(MemoryRegion *mr, unsigned i; if (size) { - adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); + MemOp mop = (target_big_endian() ? MO_BE : MO_LE) | size_memop(size); + adjust_endianness(mr, &mrfd.data, mop); } memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { @@ -3015,8 +3028,10 @@ void memory_global_dirty_log_stop(unsigned int flags) static void listener_add_address_space(MemoryListener *listener, AddressSpace *as) { + unsigned i; FlatView *view; FlatRange *fr; + MemoryRegionIoeventfd *fd; if (listener->begin) { listener->begin(listener); @@ -3041,10 +3056,34 @@ static void listener_add_address_space(MemoryListener *listener, if (listener->region_add) { listener->region_add(listener, §ion); } + + /* send coalesced io add notifications */ + flat_range_coalesced_io_notify_listener_add_del(fr, §ion, + listener, as, true); + if (fr->dirty_log_mask && listener->log_start) { listener->log_start(listener, §ion, 0, fr->dirty_log_mask); } } + + /* + * register all eventfds for this address space for the newly registered + * listener. + */ + for (i = 0; i < as->ioeventfd_nb; i++) { + fd = &as->ioeventfds[i]; + MemoryRegionSection section = (MemoryRegionSection) { + .fv = view, + .offset_within_address_space = int128_get64(fd->addr.start), + .size = fd->addr.size, + }; + + if (listener->eventfd_add) { + listener->eventfd_add(listener, §ion, + fd->match_data, fd->data, fd->e); + } + } + if (listener->commit) { listener->commit(listener); } @@ -3054,8 +3093,10 @@ static void listener_add_address_space(MemoryListener *listener, static void listener_del_address_space(MemoryListener *listener, AddressSpace *as) { + unsigned i; FlatView *view; FlatRange *fr; + MemoryRegionIoeventfd *fd; if (listener->begin) { listener->begin(listener); @@ -3067,10 +3108,33 @@ static void listener_del_address_space(MemoryListener *listener, if (fr->dirty_log_mask && listener->log_stop) { listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); } + + /* send coalesced io del notifications */ + flat_range_coalesced_io_notify_listener_add_del(fr, §ion, + listener, as, false); if (listener->region_del) { listener->region_del(listener, §ion); } } + + /* + * de-register all eventfds for this address space for the current + * listener. + */ + for (i = 0; i < as->ioeventfd_nb; i++) { + fd = &as->ioeventfds[i]; + MemoryRegionSection section = (MemoryRegionSection) { + .fv = view, + .offset_within_address_space = int128_get64(fd->addr.start), + .size = fd->addr.size, + }; + + if (listener->eventfd_del) { + listener->eventfd_del(listener, §ion, + fd->match_data, fd->data, fd->e); + } + } + if (listener->commit) { listener->commit(listener); } @@ -3148,7 +3212,8 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) as->ioeventfds = NULL; QTAILQ_INIT(&as->listeners); QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); - as->bounce.in_use = false; + as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE; + as->bounce_buffer_size = 0; qemu_mutex_init(&as->map_client_list_lock); QLIST_INIT(&as->map_client_list); as->name = g_strdup(name ? name : "anonymous"); @@ -3158,7 +3223,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) static void do_address_space_destroy(AddressSpace *as) { - assert(!qatomic_read(&as->bounce.in_use)); + assert(qatomic_read(&as->bounce_buffer_size) == 0); assert(QLIST_EMPTY(&as->map_client_list)); qemu_mutex_destroy(&as->map_client_list_lock); @@ -3438,7 +3503,7 @@ static void mtree_print_flatview(gpointer key, gpointer value, if (fvi->ac) { for (i = 0; i < fv_address_spaces->len; ++i) { as = g_array_index(fv_address_spaces, AddressSpace*, i); - if (fvi->ac->has_memory(current_machine, as, + if (fvi->ac->has_memory(current_machine->accelerator, as, int128_get64(range->addr.start), MR_SIZE(range->addr.size) + 1)) { qemu_printf(" %s", fvi->ac->name); diff --git a/system/memory_ldst.c.inc b/system/memory_ldst.c.inc index 0e6f3940a9a..7f32d3d9ff3 100644 --- a/system/memory_ldst.c.inc +++ b/system/memory_ldst.c.inc @@ -34,7 +34,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); - if (l < 4 || !memory_access_is_direct(mr, false)) { + if (l < 4 || !memory_access_is_direct(mr, false, attrs)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ @@ -103,7 +103,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); - if (l < 8 || !memory_access_is_direct(mr, false)) { + if (l < 8 || !memory_access_is_direct(mr, false, attrs)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ @@ -170,7 +170,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); - if (!memory_access_is_direct(mr, false)) { + if (!memory_access_is_direct(mr, false, attrs)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ @@ -207,7 +207,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, false, attrs); - if (l < 2 || !memory_access_is_direct(mr, false)) { + if (l < 2 || !memory_access_is_direct(mr, false, attrs)) { release_lock |= prepare_mmio_access(mr); /* I/O case */ @@ -277,7 +277,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); - if (l < 4 || !memory_access_is_direct(mr, true)) { + if (l < 4 || !memory_access_is_direct(mr, true, attrs)) { release_lock |= prepare_mmio_access(mr); r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs); @@ -314,7 +314,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); - if (l < 4 || !memory_access_is_direct(mr, true)) { + if (l < 4 || !memory_access_is_direct(mr, true, attrs)) { release_lock |= prepare_mmio_access(mr); r = memory_region_dispatch_write(mr, addr1, val, MO_32 | devend_memop(endian), attrs); @@ -377,7 +377,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); - if (!memory_access_is_direct(mr, true)) { + if (!memory_access_is_direct(mr, true, attrs)) { release_lock |= prepare_mmio_access(mr); r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs); } else { @@ -410,7 +410,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); - if (l < 2 || !memory_access_is_direct(mr, true)) { + if (l < 2 || !memory_access_is_direct(mr, true, attrs)) { release_lock |= prepare_mmio_access(mr); r = memory_region_dispatch_write(mr, addr1, val, MO_16 | devend_memop(endian), attrs); @@ -474,7 +474,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, RCU_READ_LOCK(); mr = TRANSLATE(addr, &addr1, &l, true, attrs); - if (l < 8 || !memory_access_is_direct(mr, true)) { + if (l < 8 || !memory_access_is_direct(mr, true, attrs)) { release_lock |= prepare_mmio_access(mr); r = memory_region_dispatch_write(mr, addr1, val, MO_64 | devend_memop(endian), attrs); diff --git a/system/memory_mapping.c b/system/memory_mapping.c index ca2390eb804..da708a08ab7 100644 --- a/system/memory_mapping.c +++ b/system/memory_mapping.c @@ -15,9 +15,9 @@ #include "qemu/range.h" #include "qapi/error.h" -#include "sysemu/memory_mapping.h" -#include "exec/memory.h" -#include "exec/address-spaces.h" +#include "system/memory_mapping.h" +#include "system/memory.h" +#include "system/address-spaces.h" #include "hw/core/cpu.h" //#define DEBUG_GUEST_PHYS_REGION_ADD diff --git a/system/meson.build b/system/meson.build index a296270cb00..6d21ff9faa7 100644 --- a/system/meson.build +++ b/system/meson.build @@ -1,22 +1,26 @@ specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_true: [files( 'arch_init.c', - 'ioport.c', - 'memory.c', - 'physmem.c', - 'watchpoint.c', + 'globals-target.c', )]) +system_ss.add(files( + 'vl.c', +), sdl, libpmem, libdaxctl) + system_ss.add(files( 'balloon.c', 'bootdevice.c', 'cpus.c', - 'cpu-throttle.c', 'cpu-timers.c', 'datadir.c', 'dirtylimit.c', 'dma-helpers.c', 'globals.c', + 'ioport.c', + 'ram-block-attributes.c', 'memory_mapping.c', + 'memory.c', + 'physmem.c', 'qdev-monitor.c', 'qtest.c', 'rtc.c', @@ -24,8 +28,8 @@ system_ss.add(files( 'runstate-hmp-cmds.c', 'runstate.c', 'tpm-hmp-cmds.c', - 'vl.c', -), sdl, libpmem, libdaxctl) + 'watchpoint.c', +)) if have_tpm system_ss.add(files('tpm.c')) diff --git a/system/physmem.c b/system/physmem.c index 94600a33ec3..cd971ae1ec7 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -28,31 +28,34 @@ #include "qemu/lockable.h" #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/iommu.h" #endif /* CONFIG_TCG */ -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" #include "exec/target_page.h" +#include "exec/translation-block.h" #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "hw/boards.h" -#include "sysemu/xen.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" -#include "sysemu/qtest.h" +#include "system/xen.h" +#include "system/kvm.h" +#include "system/tcg.h" +#include "system/qtest.h" #include "qemu/timer.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/qemu-print.h" #include "qemu/log.h" #include "qemu/memalign.h" -#include "exec/memory.h" -#include "exec/ioport.h" -#include "sysemu/dma.h" -#include "sysemu/hostmem.h" -#include "sysemu/hw_accel.h" -#include "sysemu/xen-mapcache.h" +#include "qemu/memfd.h" +#include "system/memory.h" +#include "system/ioport.h" +#include "system/dma.h" +#include "system/hostmem.h" +#include "system/hw_accel.h" +#include "system/xen-mapcache.h" #include "trace.h" #ifdef CONFIG_FALLOCATE_PUNCH_HOLE @@ -61,14 +64,16 @@ #include "qemu/rcu_queue.h" #include "qemu/main-loop.h" -#include "exec/translate-all.h" -#include "sysemu/replay.h" +#include "system/replay.h" -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "qemu/pmem.h" +#include "qapi/qapi-types-migration.h" +#include "migration/blocker.h" +#include "migration/cpr.h" +#include "migration/options.h" #include "migration/vmstate.h" #include "qemu/range.h" @@ -82,6 +87,8 @@ #include #endif +#include "memory-internal.h" + //#define DEBUG_SUBPAGE /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes @@ -152,18 +159,17 @@ static void io_mem_init(void); static void memory_map_init(void); static void tcg_log_global_after_sync(MemoryListener *listener); static void tcg_commit(MemoryListener *listener); +static bool ram_is_cpr_compatible(RAMBlock *rb); /** * CPUAddressSpace: all the information a CPU needs about an AddressSpace * @cpu: the CPU whose AddressSpace this is * @as: the AddressSpace itself - * @memory_dispatch: its dispatch pointer (cached, RCU protected) * @tcg_as_listener: listener for tracking changes to the AddressSpace */ typedef struct CPUAddressSpace { CPUState *cpu; AddressSpace *as; - struct AddressSpaceDispatch *memory_dispatch; MemoryListener tcg_as_listener; } CPUAddressSpace; @@ -571,7 +577,7 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, is_write, true, &as, attrs); mr = section.mr; - if (xen_enabled() && memory_access_is_direct(mr, is_write)) { + if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) { hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; *plen = MIN(page, *plen); } @@ -579,6 +585,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, return mr; } +#ifdef CONFIG_TCG + typedef struct TCGIOMMUNotifier { IOMMUNotifier n; MemoryRegion *mr; @@ -682,7 +690,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, IOMMUTLBEntry iotlb; int iommu_idx; hwaddr addr = orig_addr; - AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; + AddressSpaceDispatch *d = address_space_to_dispatch(cpu->cpu_ases[asidx].as); for (;;) { section = address_space_translate_internal(d, addr, &addr, plen, false); @@ -738,6 +746,33 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } +MemoryRegionSection *iotlb_to_section(CPUState *cpu, + hwaddr index, MemTxAttrs attrs) +{ + int asidx = cpu_asidx_from_attrs(cpu, attrs); + CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; + AddressSpaceDispatch *d = address_space_to_dispatch(cpuas->as); + int section_index = index & ~TARGET_PAGE_MASK; + MemoryRegionSection *ret; + + assert(section_index < d->map.sections_nb); + ret = d->map.sections + section_index; + assert(ret->mr); + assert(ret->mr->ops); + + return ret; +} + +/* Called from RCU critical section */ +hwaddr memory_region_section_get_iotlb(CPUState *cpu, + MemoryRegionSection *section) +{ + AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); + return section - d->map.sections; +} + +#endif /* CONFIG_TCG */ + void cpu_address_space_init(CPUState *cpu, int asidx, const char *prefix, MemoryRegion *mr) { @@ -994,14 +1029,6 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, return false; } -/* Called from RCU critical section */ -hwaddr memory_region_section_get_iotlb(CPUState *cpu, - MemoryRegionSection *section) -{ - AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); - return section - d->map.sections; -} - static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(FlatView *fv, hwaddr base); @@ -1235,7 +1262,7 @@ long qemu_maxrampagesize(void) return pagesize; } -#ifdef CONFIG_POSIX +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) static int64_t get_file_size(int fd) { int64_t size; @@ -1534,18 +1561,6 @@ static ram_addr_t find_ram_offset(ram_addr_t size) return offset; } -static unsigned long last_ram_page(void) -{ - RAMBlock *block; - ram_addr_t last = 0; - - RCU_READ_LOCK_GUARD(); - RAMBLOCK_FOREACH(block) { - last = MAX(last, block->offset + block->max_length); - } - return last >> TARGET_PAGE_BITS; -} - static void qemu_ram_setup_dump(void *addr, ram_addr_t size) { int ret; @@ -1576,6 +1591,11 @@ ram_addr_t qemu_ram_get_offset(RAMBlock *rb) return rb->offset; } +ram_addr_t qemu_ram_get_fd_offset(RAMBlock *rb) +{ + return rb->fd_offset; +} + ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) { return rb->used_length; @@ -1672,6 +1692,18 @@ void qemu_ram_unset_idstr(RAMBlock *block) } } +static char *cpr_name(MemoryRegion *mr) +{ + const char *mr_name = memory_region_name(mr); + g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; + + if (id) { + return g_strdup_printf("%s/%s", id, mr_name); + } else { + return g_strdup(mr_name); + } +} + size_t qemu_ram_pagesize(RAMBlock *rb) { return rb->page_size; @@ -1799,13 +1831,11 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) } /* Called with ram_list.mutex held */ -static void dirty_memory_extend(ram_addr_t old_ram_size, - ram_addr_t new_ram_size) +static void dirty_memory_extend(ram_addr_t new_ram_size) { - ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, - DIRTY_MEMORY_BLOCK_SIZE); - ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, - DIRTY_MEMORY_BLOCK_SIZE); + unsigned int old_num_blocks = ram_list.num_dirty_blocks; + unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size, + DIRTY_MEMORY_BLOCK_SIZE); int i; /* Only need to extend if block count increased */ @@ -1837,6 +1867,8 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, g_free_rcu(old_blocks, rcu); } } + + ram_list.num_dirty_blocks = new_num_blocks; } static void ram_block_add(RAMBlock *new_block, Error **errp) @@ -1846,11 +1878,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) RAMBlock *block; RAMBlock *last_block = NULL; bool free_on_error = false; - ram_addr_t old_ram_size, new_ram_size; + ram_addr_t ram_size; Error *err = NULL; - old_ram_size = last_ram_page(); - qemu_mutex_lock_ramlist(); new_block->offset = find_ram_offset(new_block->max_length); @@ -1882,10 +1912,14 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) if (new_block->flags & RAM_GUEST_MEMFD) { int ret; - assert(kvm_enabled()); + if (!kvm_enabled()) { + error_setg(errp, "cannot set up private guest memory for %s: KVM required", + object_get_typename(OBJECT(current_machine->cgs))); + goto out_free; + } assert(new_block->guest_memfd < 0); - ret = ram_block_discard_require(true); + ret = ram_block_coordinated_discard_require(true); if (ret < 0) { error_setg_errno(errp, -ret, "cannot set up private guest memory: discard currently blocked"); @@ -1899,13 +1933,41 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) qemu_mutex_unlock_ramlist(); goto out_free; } - } - new_ram_size = MAX(old_ram_size, - (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); - if (new_ram_size > old_ram_size) { - dirty_memory_extend(old_ram_size, new_ram_size); + /* + * The attribute bitmap of the RamBlockAttributes is default to + * discarded, which mimics the behavior of kvm_set_phys_mem() when it + * calls kvm_set_memory_attributes_private(). This leads to a brief + * period of inconsistency between the creation of the RAMBlock and its + * mapping into the physical address space. However, this is not + * problematic, as no users rely on the attribute status to perform + * any actions during this interval. + */ + new_block->attributes = ram_block_attributes_create(new_block); + if (!new_block->attributes) { + error_setg(errp, "Failed to create ram block attribute"); + close(new_block->guest_memfd); + ram_block_coordinated_discard_require(false); + qemu_mutex_unlock_ramlist(); + goto out_free; + } + + /* + * Add a specific guest_memfd blocker if a generic one would not be + * added by ram_block_add_cpr_blocker. + */ + if (ram_is_cpr_compatible(new_block)) { + error_setg(&new_block->cpr_blocker, + "Memory region %s uses guest_memfd, " + "which is not supported with CPR.", + memory_region_name(new_block->mr)); + migrate_add_blocker_modes(&new_block->cpr_blocker, errp, + MIG_MODE_CPR_TRANSFER, -1); + } } + + ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; + dirty_memory_extend(ram_size); /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, * QLIST (which has an RCU-friendly variant) does not have insertion at * tail, so save the last element in last_block. @@ -1958,19 +2020,28 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) } } -#ifdef CONFIG_POSIX -RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) +RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, + qemu_ram_resize_cb resized, MemoryRegion *mr, uint32_t ram_flags, int fd, off_t offset, + bool grow, Error **errp) { + ERRP_GUARD(); RAMBlock *new_block; Error *local_err = NULL; - int64_t file_size, file_align; + int64_t file_size, file_align, share_flags; + + share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); + assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); + ram_flags &= ~RAM_PRIVATE; /* Just support these ram flags by now. */ assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | - RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0); + RAM_READONLY_FD | RAM_GUEST_MEMFD | + RAM_RESIZEABLE)) == 0); + assert(max_size >= size); if (xen_enabled()) { error_setg(errp, "-mem-path not supported with Xen"); @@ -1985,12 +2056,16 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, size = TARGET_PAGE_ALIGN(size); size = REAL_HOST_PAGE_ALIGN(size); + max_size = TARGET_PAGE_ALIGN(max_size); + max_size = REAL_HOST_PAGE_ALIGN(max_size); file_size = get_file_size(fd); - if (file_size > offset && file_size < (offset + size)) { - error_setg(errp, "backing store size 0x%" PRIx64 - " does not match 'size' option 0x" RAM_ADDR_FMT, - file_size, size); + if (file_size && file_size < offset + max_size && !grow) { + error_setg(errp, "%s backing store size 0x%" PRIx64 + " is too small for 'size' option 0x" RAM_ADDR_FMT + " plus 'offset' option 0x%" PRIx64, + memory_region_name(mr), file_size, max_size, + (uint64_t)offset); return NULL; } @@ -2005,11 +2080,13 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; new_block->used_length = size; - new_block->max_length = size; + new_block->max_length = max_size; + new_block->resized = resized; new_block->flags = ram_flags; new_block->guest_memfd = -1; - new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset, - errp); + new_block->host = file_ram_alloc(new_block, max_size, fd, + file_size < offset + max_size, + offset, errp); if (!new_block->host) { g_free(new_block); return NULL; @@ -2061,7 +2138,8 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, return NULL; } - block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp); + block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, + false, errp); if (!block) { if (created) { unlink(mem_path); @@ -2074,21 +2152,98 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, } #endif +#ifdef CONFIG_POSIX +/* + * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be + * shared with another process if CPR is being used. Use memfd if available + * because it has no size limits, else use POSIX shm. + */ +static int qemu_ram_get_shared_fd(const char *name, bool *reused, Error **errp) +{ + int fd = cpr_find_fd(name, 0); + + if (fd >= 0) { + *reused = true; + return fd; + } + + if (qemu_memfd_check(0)) { + fd = qemu_memfd_create(name, 0, 0, 0, 0, errp); + } else { + fd = qemu_shm_alloc(0, errp); + } + + if (fd >= 0) { + cpr_save_fd(name, 0, fd); + } + *reused = false; + return fd; +} +#endif + static RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, - void (*resized)(const char*, - uint64_t length, - void *host), + qemu_ram_resize_cb resized, void *host, uint32_t ram_flags, MemoryRegion *mr, Error **errp) { RAMBlock *new_block; Error *local_err = NULL; - int align; + int align, share_flags; + + share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); + assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); + ram_flags &= ~RAM_PRIVATE; assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); assert(!host ^ (ram_flags & RAM_PREALLOC)); + assert(max_size >= size); + + /* ignore RAM_SHARED for Windows and emscripten*/ +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) + if (!host) { + if (!share_flags && current_machine->aux_ram_share) { + ram_flags |= RAM_SHARED; + } + if (ram_flags & RAM_SHARED) { + bool reused; + g_autofree char *name = cpr_name(mr); + int fd = qemu_ram_get_shared_fd(name, &reused, errp); + + if (fd < 0) { + return NULL; + } + + /* Use same alignment as qemu_anon_ram_alloc */ + mr->align = QEMU_VMALLOC_ALIGN; + + /* + * This can fail if the shm mount size is too small, or alloc from + * fd is not supported, but previous QEMU versions that called + * qemu_anon_ram_alloc for anonymous shared memory could have + * succeeded. Quietly fail and fall back. + * + * After cpr-transfer, new QEMU could create a memory region + * with a larger max size than old, so pass reused to grow the + * region if necessary. The extra space will be usable after a + * guest reset. + */ + new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr, + ram_flags, fd, 0, reused, NULL); + if (new_block) { + trace_qemu_ram_alloc_shared(name, new_block->used_length, + new_block->max_length, fd, + new_block->host); + return new_block; + } + + cpr_delete_fd(name, 0); + close(fd); + /* fall back to anon allocation */ + } + } +#endif align = qemu_real_host_page_size(); align = MAX(align, TARGET_PAGE_SIZE); @@ -2100,7 +2255,6 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, new_block->resized = resized; new_block->used_length = size; new_block->max_length = max_size; - assert(max_size >= size); new_block->fd = -1; new_block->guest_memfd = -1; new_block->page_size = qemu_real_host_page_size(); @@ -2125,15 +2279,14 @@ RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr, Error **errp) { - assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); + assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD | + RAM_PRIVATE)) == 0); return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); } RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, - void (*resized)(const char*, - uint64_t length, - void *host), - MemoryRegion *mr, Error **errp) + qemu_ram_resize_cb resized, + MemoryRegion *mr, Error **errp) { return qemu_ram_alloc_internal(size, maxsz, resized, NULL, RAM_RESIZEABLE, mr, errp); @@ -2145,7 +2298,7 @@ static void reclaim_ramblock(RAMBlock *block) ; } else if (xen_enabled()) { xen_invalidate_map_cache_entry(block->host); -#ifndef _WIN32 +#if !defined(_WIN32) && !defined(EMSCRIPTEN) } else if (block->fd >= 0) { qemu_ram_munmap(block->fd, block->host, block->max_length); close(block->fd); @@ -2155,8 +2308,9 @@ static void reclaim_ramblock(RAMBlock *block) } if (block->guest_memfd >= 0) { + ram_block_attributes_destroy(block->attributes); close(block->guest_memfd); - ram_block_discard_require(false); + ram_block_coordinated_discard_require(false); } g_free(block); @@ -2164,6 +2318,8 @@ static void reclaim_ramblock(RAMBlock *block) void qemu_ram_free(RAMBlock *block) { + g_autofree char *name = NULL; + if (!block) { return; } @@ -2174,6 +2330,8 @@ void qemu_ram_free(RAMBlock *block) } qemu_mutex_lock_ramlist(); + name = cpr_name(block->mr); + cpr_delete_fd(name, 0); QLIST_REMOVE_RCU(block, next); ram_list.mru_block = NULL; /* Write list before version */ @@ -2184,45 +2342,80 @@ void qemu_ram_free(RAMBlock *block) } #ifndef _WIN32 -void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) +/* Simply remap the given VM memory location from start to start+length */ +static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) +{ + int flags, prot; + void *area; + void *host_startaddr = block->host + start; + + assert(block->fd < 0); + flags = MAP_FIXED | MAP_ANONYMOUS; + flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; + flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; + prot = PROT_READ; + prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; + area = mmap(host_startaddr, length, prot, flags, -1, 0); + return area != host_startaddr ? -errno : 0; +} + +/* + * qemu_ram_remap - remap a single RAM page + * + * @addr: address in ram_addr_t address space. + * + * This function will try remapping a single page of guest RAM identified by + * @addr, essentially discarding memory to recover from previously poisoned + * memory (MCE). The page size depends on the RAMBlock (i.e., hugetlb). @addr + * does not have to point at the start of the page. + * + * This function is only to be used during system resets; it will kill the + * VM if remapping failed. + */ +void qemu_ram_remap(ram_addr_t addr) { RAMBlock *block; - ram_addr_t offset; - int flags; - void *area, *vaddr; - int prot; + uint64_t offset; + void *vaddr; + size_t page_size; RAMBLOCK_FOREACH(block) { offset = addr - block->offset; if (offset < block->max_length) { + /* Respect the pagesize of our RAMBlock */ + page_size = qemu_ram_pagesize(block); + offset = QEMU_ALIGN_DOWN(offset, page_size); + vaddr = ramblock_ptr(block, offset); if (block->flags & RAM_PREALLOC) { ; } else if (xen_enabled()) { abort(); } else { - flags = MAP_FIXED; - flags |= block->flags & RAM_SHARED ? - MAP_SHARED : MAP_PRIVATE; - flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; - prot = PROT_READ; - prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; - if (block->fd >= 0) { - area = mmap(vaddr, length, prot, flags, block->fd, - offset + block->fd_offset); - } else { - flags |= MAP_ANONYMOUS; - area = mmap(vaddr, length, prot, flags, -1, 0); - } - if (area != vaddr) { - error_report("Could not remap addr: " - RAM_ADDR_FMT "@" RAM_ADDR_FMT "", - length, addr); - exit(1); + if (ram_block_discard_range(block, offset, page_size) != 0) { + /* + * Fall back to using mmap() only for anonymous mapping, + * as if a backing file is associated we may not be able + * to recover the memory in all cases. + * So don't take the risk of using only mmap and fail now. + */ + if (block->fd >= 0) { + error_report("Could not remap RAM %s:%" PRIx64 "+%" + PRIx64 " +%zx", block->idstr, offset, + block->fd_offset, page_size); + exit(1); + } + if (qemu_ram_remap_mmap(block, offset, page_size) != 0) { + error_report("Could not remap RAM %s:%" PRIx64 " +%zx", + block->idstr, offset, page_size); + exit(1); + } } - memory_try_enable_merging(vaddr, length); - qemu_ram_setup_dump(vaddr, length); + memory_try_enable_merging(vaddr, page_size); + qemu_ram_setup_dump(vaddr, page_size); } + + break; } } } @@ -2520,23 +2713,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) return phys_section_add(map, §ion); } -MemoryRegionSection *iotlb_to_section(CPUState *cpu, - hwaddr index, MemTxAttrs attrs) -{ - int asidx = cpu_asidx_from_attrs(cpu, attrs); - CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; - AddressSpaceDispatch *d = cpuas->memory_dispatch; - int section_index = index & ~TARGET_PAGE_MASK; - MemoryRegionSection *ret; - - assert(section_index < d->map.sections_nb); - ret = d->map.sections + section_index; - assert(ret->mr); - assert(ret->mr->ops); - - return ret; -} - static void io_mem_init(void) { memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, @@ -2602,9 +2778,6 @@ static void tcg_log_global_after_sync(MemoryListener *listener) static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) { - CPUAddressSpace *cpuas = data.host_ptr; - - cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); tlb_flush(cpu); } @@ -2620,11 +2793,7 @@ static void tcg_commit(MemoryListener *listener) cpu = cpuas->cpu; /* - * Defer changes to as->memory_dispatch until the cpu is quiescent. - * Otherwise we race between (1) other cpu threads and (2) ongoing - * i/o for the current cpu thread, with data cached by mmu_lookup(). - * - * In addition, queueing the work function will kick the cpu back to + * Queueing the work function will kick the cpu back to * the main loop, which will end the RCU critical section and reclaim * the memory data structures. * @@ -2681,7 +2850,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, } if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { assert(tcg_enabled()); - tb_invalidate_phys_range(addr, addr + length - 1); + tb_invalidate_phys_range(NULL, addr, addr + length - 1); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); @@ -2762,7 +2931,7 @@ static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, if (memory_region_is_ram(mr)) { return true; } - qemu_log_mask(LOG_GUEST_ERROR, + qemu_log_mask(LOG_INVALID_MEM, "Invalid access to non-RAM device at " "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " "region '%s'\n", addr, len, memory_region_name(mr)); @@ -2778,7 +2947,7 @@ static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, return MEMTX_ACCESS_ERROR; } - if (!memory_access_is_direct(mr, true)) { + if (!memory_access_is_direct(mr, true, attrs)) { uint64_t val; MemTxResult result; bool release_lock = prepare_mmio_access(mr); @@ -2874,7 +3043,7 @@ static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, return MEMTX_ACCESS_ERROR; } - if (!memory_access_is_direct(mr, false)) { + if (!memory_access_is_direct(mr, false, attrs)) { /* I/O case */ uint64_t val; MemTxResult result; @@ -3046,8 +3215,7 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, l = len; mr = address_space_translate(as, addr, &addr1, &l, true, attrs); - if (!(memory_region_is_ram(mr) || - memory_region_is_romd(mr))) { + if (!memory_region_supports_direct_access(mr)) { l = memory_access_size(mr, l, addr1); } else { /* ROM/RAM case */ @@ -3095,6 +3263,20 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len) NULL, len, FLUSH_CACHE); } +/* + * A magic value stored in the first 8 bytes of the bounce buffer struct. Used + * to detect illegal pointers passed to address_space_unmap. + */ +#define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed + +typedef struct { + uint64_t magic; + MemoryRegion *mr; + hwaddr addr; + size_t len; + uint8_t buffer[]; +} BounceBuffer; + static void address_space_unregister_map_client_do(AddressSpaceMapClient *client) { @@ -3120,9 +3302,9 @@ void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) QEMU_LOCK_GUARD(&as->map_client_list_lock); client->bh = bh; QLIST_INSERT_HEAD(&as->map_client_list, client, link); - /* Write map_client_list before reading in_use. */ + /* Write map_client_list before reading bounce_buffer_size. */ smp_mb(); - if (!qatomic_read(&as->bounce.in_use)) { + if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { address_space_notify_map_clients_locked(as); } } @@ -3170,7 +3352,7 @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, while (len > 0) { l = len; mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); - if (!memory_access_is_direct(mr, is_write)) { + if (!memory_access_is_direct(mr, is_write, attrs)) { l = memory_access_size(mr, l, addr); if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { return false; @@ -3250,29 +3432,41 @@ void *address_space_map(AddressSpace *as, fv = address_space_to_flatview(as); mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); - if (!memory_access_is_direct(mr, is_write)) { - if (qatomic_xchg(&as->bounce.in_use, true)) { + if (!memory_access_is_direct(mr, is_write, attrs)) { + size_t used = qatomic_read(&as->bounce_buffer_size); + for (;;) { + hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); + size_t new_size = used + alloc; + size_t actual = + qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); + if (actual == used) { + l = alloc; + break; + } + used = actual; + } + + if (l == 0) { *plen = 0; return NULL; } - /* Avoid unbounded allocations */ - l = MIN(l, TARGET_PAGE_SIZE); - as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); - as->bounce.addr = addr; - as->bounce.len = l; + BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); + bounce->magic = BOUNCE_BUFFER_MAGIC; memory_region_ref(mr); - as->bounce.mr = mr; + bounce->mr = mr; + bounce->addr = addr; + bounce->len = l; + if (!is_write) { - flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, - as->bounce.buffer, l); + flatview_read(fv, addr, attrs, + bounce->buffer, l); } *plen = l; - return as->bounce.buffer; + return bounce->buffer; } - memory_region_ref(mr); *plen = flatview_extend_translation(fv, addr, len, mr, xlat, l, is_write, attrs); @@ -3287,12 +3481,11 @@ void *address_space_map(AddressSpace *as, void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len) { - if (buffer != as->bounce.buffer) { - MemoryRegion *mr; - ram_addr_t addr1; + MemoryRegion *mr; + ram_addr_t addr1; - mr = memory_region_from_host(buffer, &addr1); - assert(mr != NULL); + mr = memory_region_from_host(buffer, &addr1); + if (mr != NULL) { if (is_write) { invalidate_and_set_dirty(mr, addr1, access_len); } @@ -3302,15 +3495,22 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, memory_region_unref(mr); return; } + + + BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); + assert(bounce->magic == BOUNCE_BUFFER_MAGIC); + if (is_write) { - address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED, - as->bounce.buffer, access_len); - } - qemu_vfree(as->bounce.buffer); - as->bounce.buffer = NULL; - memory_region_unref(as->bounce.mr); - /* Clear in_use before reading map_client_list. */ - qatomic_set_mb(&as->bounce.in_use, false); + address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, + bounce->buffer, access_len); + } + + qatomic_sub(&as->bounce_buffer_size, bounce->len); + bounce->magic = ~BOUNCE_BUFFER_MAGIC; + memory_region_unref(bounce->mr); + g_free(bounce); + /* Write bounce_buffer_size before reading map_client_list. */ + smp_mb(); address_space_notify_map_clients(as); } @@ -3365,7 +3565,7 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, mr = cache->mrs.mr; memory_region_ref(mr); - if (memory_access_is_direct(mr, is_write)) { + if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) { /* We don't care about the memory attributes here as we're only * doing this if we found actual RAM, which behaves the same * regardless of attributes; so UNSPECIFIED is fine. @@ -3558,13 +3758,8 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, if (l > len) l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); - if (is_write) { - res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, - attrs, buf, l); - } else { - res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, - attrs, buf, l); - } + res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, + l, is_write); if (res != MEMTX_OK) { return -1; } @@ -3588,6 +3783,18 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr) return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); } +bool cpu_physical_memory_is_ram(hwaddr phys_addr) +{ + MemoryRegion *mr; + hwaddr l = 1; + + RCU_READ_LOCK_GUARD(); + mr = address_space_translate(&address_space_memory, phys_addr, &phys_addr, + &l, false, MEMTXATTRS_UNSPECIFIED); + + return memory_region_is_ram(mr); +} + int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) { RAMBlock *block; @@ -3674,18 +3881,19 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) } ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, length); + start + rb->fd_offset, length); if (ret) { ret = -errno; - error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64 + " +%zx (%d)", __func__, rb->idstr, start, + rb->fd_offset, length, ret); goto err; } #else ret = -ENOSYS; error_report("%s: fallocate not available/file" - "%s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + "%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__, + rb->idstr, start, rb->fd_offset, length, ret); goto err; #endif } @@ -3732,6 +3940,7 @@ int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, int ret = -1; #ifdef CONFIG_FALLOCATE_PUNCH_HOLE + /* ignore fd_offset with guest_memfd */ ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, start, length); @@ -3936,3 +4145,58 @@ bool ram_block_discard_is_required(void) return qatomic_read(&ram_block_discard_required_cnt) || qatomic_read(&ram_block_coordinated_discard_required_cnt); } + +/* + * Return true if ram is compatible with CPR. Do not exclude rom, + * because the rom file could change in new QEMU. + */ +static bool ram_is_cpr_compatible(RAMBlock *rb) +{ + MemoryRegion *mr = rb->mr; + + if (!mr || !memory_region_is_ram(mr)) { + return true; + } + + /* Ram device is remapped in new QEMU */ + if (memory_region_is_ram_device(mr)) { + return true; + } + + /* + * A file descriptor is passed to new QEMU and remapped, or its backing + * file is reopened and mapped. It must be shared to avoid COW. + */ + if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { + return true; + } + + return false; +} + +/* + * Add a blocker for each volatile ram block. This function should only be + * called after we know that the block is migratable. Non-migratable blocks + * are either re-created in new QEMU, or are handled specially, or are covered + * by a device-level CPR blocker. + */ +void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp) +{ + assert(qemu_ram_is_migratable(rb)); + + if (ram_is_cpr_compatible(rb)) { + return; + } + + error_setg(&rb->cpr_blocker, + "Memory region %s is not compatible with CPR. share=on is " + "required for memory-backend objects, and aux-ram-share=on is " + "required.", memory_region_name(rb->mr)); + migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, + -1); +} + +void ram_block_del_cpr_blocker(RAMBlock *rb) +{ + migrate_del_blocker(&rb->cpr_blocker); +} diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c index 6af6ef7d667..2ac92d0a076 100644 --- a/system/qdev-monitor.c +++ b/system/qdev-monitor.c @@ -22,13 +22,14 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "monitor/qdev.h" -#include "sysemu/arch_init.h" +#include "system/arch_init.h" +#include "system/runstate.h" #include "qapi/error.h" #include "qapi/qapi-commands-qdev.h" -#include "qapi/qmp/dispatch.h" -#include "qapi/qmp/qdict.h" +#include "qapi/qmp-registry.h" +#include "qobject/qdict.h" #include "qapi/qmp/qerror.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qemu/config-file.h" #include "qemu/error-report.h" @@ -36,7 +37,7 @@ #include "qemu/option.h" #include "qemu/qemu-print.h" #include "qemu/option_int.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "migration/misc.h" #include "qemu/cutils.h" #include "hw/qdev-properties.h" @@ -55,12 +56,18 @@ typedef struct QDevAlias } QDevAlias; /* default virtio transport per architecture */ -#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | QEMU_ARCH_ARM | \ - QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \ - QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \ - QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \ - QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \ - QEMU_ARCH_LOONGARCH) +#define QEMU_ARCH_VIRTIO_PCI (QEMU_ARCH_ALPHA | \ + QEMU_ARCH_ARM | \ + QEMU_ARCH_HPPA | \ + QEMU_ARCH_I386 | \ + QEMU_ARCH_LOONGARCH | \ + QEMU_ARCH_MIPS | \ + QEMU_ARCH_OPENRISC | \ + QEMU_ARCH_PPC | \ + QEMU_ARCH_RISCV | \ + QEMU_ARCH_SH4 | \ + QEMU_ARCH_SPARC | \ + QEMU_ARCH_XTENSA) #define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X) #define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K) @@ -125,7 +132,7 @@ static const char *qdev_class_get_alias(DeviceClass *dc) for (i = 0; qdev_alias_table[i].typename; i++) { if (qdev_alias_table[i].arch_mask && - !(qdev_alias_table[i].arch_mask & arch_type)) { + !qemu_arch_available(qdev_alias_table[i].arch_mask)) { continue; } @@ -211,7 +218,7 @@ static const char *find_typename_by_alias(const char *alias) for (i = 0; qdev_alias_table[i].alias; i++) { if (qdev_alias_table[i].arch_mask && - !(qdev_alias_table[i].arch_mask & arch_type)) { + !qemu_arch_available(qdev_alias_table[i].arch_mask)) { continue; } @@ -256,8 +263,7 @@ static DeviceClass *qdev_get_device_class(const char **driver, Error **errp) } dc = DEVICE_CLASS(oc); - if (!dc->user_creatable || - (phase_check(PHASE_MACHINE_READY) && !dc->hotpluggable)) { + if (!dc->user_creatable) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver", "a pluggable device type"); return NULL; @@ -341,7 +347,7 @@ static Object *qdev_get_peripheral(void) static Object *dev; if (dev == NULL) { - dev = container_get(qdev_get_machine(), "/peripheral"); + dev = machine_get_container("peripheral"); } return dev; @@ -352,7 +358,7 @@ static Object *qdev_get_peripheral_anon(void) static Object *dev; if (dev == NULL) { - dev = container_get(qdev_get_machine(), "/peripheral-anon"); + dev = machine_get_container("peripheral-anon"); } return dev; @@ -622,8 +628,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, DeviceClass *dc; const char *driver, *path; char *id; - DeviceState *dev = NULL; + DeviceState *dev; BusState *bus = NULL; + QDict *properties; driver = qdict_get_try_str(opts, "driver"); if (!driver) { @@ -668,12 +675,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, return NULL; } - if (phase_check(PHASE_MACHINE_READY) && bus && !qbus_is_hotpluggable(bus)) { - error_setg(errp, "Bus '%s' does not support hotplugging", bus->name); - return NULL; - } - - if (!migration_is_idle()) { + if (migration_is_running()) { error_setg(errp, "device_add not allowed while migrating"); return NULL; } @@ -682,17 +684,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, dev = qdev_new(driver); /* Check whether the hotplug is allowed by the machine */ - if (phase_check(PHASE_MACHINE_READY)) { - if (!qdev_hotplug_allowed(dev, errp)) { - goto err_del_dev; - } - - if (!bus && !qdev_get_machine_hotplug_handler(dev)) { - /* No bus, no machine hotplug handler --> device is not hotpluggable */ - error_setg(errp, "Device '%s' can not be hotplugged on this machine", - driver); - goto err_del_dev; - } + if (phase_check(PHASE_MACHINE_READY) && + !qdev_hotplug_allowed(dev, bus, errp)) { + goto err_del_dev; } /* @@ -705,13 +699,14 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, } /* set properties */ - dev->opts = qdict_clone_shallow(opts); - qdict_del(dev->opts, "driver"); - qdict_del(dev->opts, "bus"); - qdict_del(dev->opts, "id"); + properties = qdict_clone_shallow(opts); + qdict_del(properties, "driver"); + qdict_del(properties, "bus"); + qdict_del(properties, "id"); - object_set_properties_from_keyval(&dev->parent_obj, dev->opts, from_json, + object_set_properties_from_keyval(&dev->parent_obj, properties, from_json, errp); + qobject_unref(properties); if (*errp) { goto err_del_dev; } @@ -722,10 +717,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, return dev; err_del_dev: - if (dev) { - object_unparent(OBJECT(dev)); - object_unref(OBJECT(dev)); - } + object_unparent(OBJECT(dev)); + object_unref(OBJECT(dev)); + return NULL; } @@ -745,19 +739,18 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) #define qdev_printf(fmt, ...) monitor_printf(mon, "%*s" fmt, indent, "", ## __VA_ARGS__) -static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props, +static void qdev_print_props(Monitor *mon, DeviceState *dev, DeviceClass *dc, int indent) { - if (!props) - return; - for (; props->name; props++) { + for (int i = 0, n = dc->props_count_; i < n; ++i) { + const Property *prop = &dc->props_[i]; char *value; - char *legacy_name = g_strdup_printf("legacy-%s", props->name); + char *legacy_name = g_strdup_printf("legacy-%s", prop->name); if (object_property_get_type(OBJECT(dev), legacy_name, NULL)) { value = object_property_get_str(OBJECT(dev), legacy_name, NULL); } else { - value = object_property_print(OBJECT(dev), props->name, true, + value = object_property_print(OBJECT(dev), prop->name, true, NULL); } g_free(legacy_name); @@ -765,7 +758,7 @@ static void qdev_print_props(Monitor *mon, DeviceState *dev, Property *props, if (!value) { continue; } - qdev_printf("%s = %s\n", props->name, + qdev_printf("%s = %s\n", prop->name, *value ? value : ""); g_free(value); } @@ -805,7 +798,7 @@ static void qdev_print(Monitor *mon, DeviceState *dev, int indent) } class = object_get_class(OBJECT(dev)); do { - qdev_print_props(mon, dev, DEVICE_CLASS(class)->props_, indent); + qdev_print_props(mon, dev, DEVICE_CLASS(class), indent); class = object_class_get_parent(class); } while (class != object_class_by_name(TYPE_DEVICE)); bus_print_dev(dev->parent_bus, mon, dev, indent); @@ -849,18 +842,9 @@ void hmp_info_qdm(Monitor *mon, const QDict *qdict) void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) { - QemuOpts *opts; DeviceState *dev; - opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, errp); - if (!opts) { - return; - } - if (!monitor_cur_is_qmp() && qdev_device_help(opts)) { - qemu_opts_del(opts); - return; - } - dev = qdev_device_add(opts, errp); + dev = qdev_device_add_from_qdict(qdict, true, errp); if (!dev) { /* * Drain all pending RCU callbacks. This is done because @@ -872,20 +856,24 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) * to the user */ drain_call_rcu(); - - qemu_opts_del(opts); - return; } object_unref(OBJECT(dev)); } -static DeviceState *find_device_state(const char *id, Error **errp) +/* + * Note that creating new APIs using error classes other than GenericError is + * not recommended. Set use_generic_error=true for new interfaces. + */ +static DeviceState *find_device_state(const char *id, bool use_generic_error, + Error **errp) { Object *obj = object_resolve_path_at(qdev_get_peripheral(), id); DeviceState *dev; if (!obj) { - error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, + error_set(errp, + (use_generic_error ? + ERROR_CLASS_GENERIC_ERROR : ERROR_CLASS_DEVICE_NOT_FOUND), "Device '%s' not found", id); return NULL; } @@ -901,28 +889,15 @@ static DeviceState *find_device_state(const char *id, Error **errp) void qdev_unplug(DeviceState *dev, Error **errp) { - DeviceClass *dc = DEVICE_GET_CLASS(dev); HotplugHandler *hotplug_ctrl; HotplugHandlerClass *hdc; Error *local_err = NULL; - if (qdev_unplug_blocked(dev, errp)) { + if (!qdev_hotunplug_allowed(dev, errp)) { return; } - if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) { - error_setg(errp, "Bus '%s' does not support hotplugging", - dev->parent_bus->name); - return; - } - - if (!dc->hotpluggable) { - error_setg(errp, "Device '%s' does not support hotplugging", - object_get_typename(OBJECT(dev))); - return; - } - - if (!migration_is_idle() && !dev->allow_unplug_during_migration) { + if (migration_is_running() && !dev->allow_unplug_during_migration) { error_setg(errp, "device_del not allowed while migrating"); return; } @@ -950,7 +925,7 @@ void qdev_unplug(DeviceState *dev, Error **errp) void qmp_device_del(const char *id, Error **errp) { - DeviceState *dev = find_device_state(id, errp); + DeviceState *dev = find_device_state(id, false, errp); if (dev != NULL) { if (dev->pending_deleted_event && (dev->pending_deleted_expires_ms == 0 || @@ -964,11 +939,74 @@ void qmp_device_del(const char *id, Error **errp) } } +int qdev_sync_config(DeviceState *dev, Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + if (!dc->sync_config) { + error_setg(errp, "device-sync-config is not supported for '%s'", + object_get_typename(OBJECT(dev))); + return -ENOTSUP; + } + + return dc->sync_config(dev, errp); +} + +void qmp_device_sync_config(const char *id, Error **errp) +{ + DeviceState *dev; + + /* + * During migration there is a race between syncing`configuration + * and migrating it (if migrate first, that target would get + * outdated version), so let's just not allow it. + */ + + if (migration_is_running()) { + error_setg(errp, "Config synchronization is not allowed " + "during migration"); + return; + } + + dev = find_device_state(id, true, errp); + if (!dev) { + return; + } + + qdev_sync_config(dev, errp); +} + void hmp_device_add(Monitor *mon, const QDict *qdict) { Error *err = NULL; + QemuOpts *opts; + DeviceState *dev; - qmp_device_add((QDict *)qdict, NULL, &err); + opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, &err); + if (!opts) { + goto out; + } + if (qdev_device_help(opts)) { + qemu_opts_del(opts); + return; + } + dev = qdev_device_add(opts, &err); + if (!dev) { + /* + * Drain all pending RCU callbacks. This is done because + * some bus related operations can delay a device removal + * (in this case this can happen if device is added and then + * removed due to a configuration error) + * to a RCU callback, but user might expect that this interface + * will finish its job completely once qmp command returns result + * to the user + */ + drain_call_rcu(); + + qemu_opts_del(opts); + } + object_unref(dev); +out: hmp_handle_error(mon, err); } @@ -1034,7 +1072,7 @@ static GSList *qdev_build_hotpluggable_device_list(Object *peripheral) static void peripheral_device_del_completion(ReadLineState *rs, const char *str) { - Object *peripheral = container_get(qdev_get_machine(), "/peripheral"); + Object *peripheral = machine_get_container("peripheral"); GSList *list, *item; list = qdev_build_hotpluggable_device_list(peripheral); @@ -1070,7 +1108,7 @@ BlockBackend *blk_by_qdev_id(const char *id, Error **errp) GLOBAL_STATE_CODE(); - dev = find_device_state(id, errp); + dev = find_device_state(id, false, errp); if (dev == NULL) { return NULL; } diff --git a/system/qemu-seccomp.c b/system/qemu-seccomp.c index 98ffce075c3..f8e1238b914 100644 --- a/system/qemu-seccomp.c +++ b/system/qemu-seccomp.c @@ -20,7 +20,7 @@ #include "qemu/module.h" #include #include -#include "sysemu/seccomp.h" +#include "system/seccomp.h" #include /* For some architectures (notably ARM) cacheflush is not supported until @@ -47,10 +47,10 @@ const struct scmp_arg_cmp sched_setscheduler_arg[] = { }; /* - * See 'NOTES' in 'man 2 clone' - s390 & cross have 'flags' in + * See 'NOTES' in 'man 2 clone' - s390 has 'flags' in * different position to other architectures */ -#if defined(HOST_S390X) || defined(HOST_S390) || defined(HOST_CRIS) +#if defined(HOST_S390X) || defined(HOST_S390) #define CLONE_FLAGS_ARG 1 #else #define CLONE_FLAGS_ARG 0 diff --git a/system/qtest.c b/system/qtest.c index 12703a20455..fa42c9f9215 100644 --- a/system/qtest.c +++ b/system/qtest.c @@ -13,22 +13,23 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" +#include "system/qtest.h" +#include "system/runstate.h" #include "chardev/char-fe.h" -#include "exec/ioport.h" -#include "exec/memory.h" +#include "system/ioport.h" +#include "system/memory.h" #include "exec/tswap.h" #include "hw/qdev-core.h" #include "hw/irq.h" #include "hw/core/cpu.h" #include "qemu/accel.h" -#include "sysemu/cpu-timers.h" +#include "system/cpu-timers.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/cutils.h" +#include "qemu/target-info.h" #include "qom/object_interfaces.h" #define MAX_IRQ 256 @@ -78,6 +79,11 @@ static void *qtest_server_send_opaque; * let you adjust the value of the clock (monotonically). All the commands * return the current value of the clock in nanoseconds. * + * If the commands FAIL then time wasn't advanced which is likely + * because the machine was in a paused state or no timer events exist + * in the future. This will cause qtest to abort and the test will + * need to check its assumptions. + * * .. code-block:: none * * > clock_step @@ -260,7 +266,7 @@ static int hex2nib(char ch) } } -void qtest_send_prefix(CharBackend *chr) +static void qtest_log_timestamp(void) { if (!qtest_log_fp || !qtest_opened) { return; @@ -277,7 +283,7 @@ static void G_GNUC_PRINTF(1, 2) qtest_log_send(const char *fmt, ...) return; } - qtest_send_prefix(NULL); + qtest_log_timestamp(); va_start(ap, fmt); vfprintf(qtest_log_fp, fmt, ap); @@ -296,6 +302,7 @@ static void qtest_server_char_be_send(void *opaque, const char *str) static void qtest_send(CharBackend *chr, const char *str) { + qtest_log_timestamp(); qtest_server_send(qtest_server_send_opaque, str); } @@ -319,7 +326,6 @@ static void qtest_irq_handler(void *opaque, int n, int level) if (irq_levels[n] != level) { CharBackend *chr = &qtest->qtest_chr; irq_levels[n] = level; - qtest_send_prefix(chr); qtest_sendf(chr, "IRQ %s %d\n", level ? "raise" : "lower", n); } @@ -375,19 +381,16 @@ static void qtest_process_command(CharBackend *chr, gchar **words) is_outbound = words[0][14] == 'o'; dev = DEVICE(object_resolve_path(words[1], NULL)); if (!dev) { - qtest_send_prefix(chr); qtest_send(chr, "FAIL Unknown device\n"); return; } if (is_named && !is_outbound) { - qtest_send_prefix(chr); qtest_send(chr, "FAIL Interception of named in-GPIOs not yet supported\n"); return; } if (irq_intercept_dev) { - qtest_send_prefix(chr); if (irq_intercept_dev != dev) { qtest_send(chr, "FAIL IRQ intercept already enabled\n"); } else { @@ -414,7 +417,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) } } - qtest_send_prefix(chr); if (interception_succeeded) { irq_intercept_dev = dev; qtest_send(chr, "OK\n"); @@ -433,7 +435,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) dev = DEVICE(object_resolve_path(words[1], NULL)); if (!dev) { - qtest_send_prefix(chr); qtest_send(chr, "FAIL Unknown device\n"); return; } @@ -452,7 +453,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) irq = qdev_get_gpio_in_named(dev, name, num); qemu_set_irq(irq, level); - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "outb") == 0 || strcmp(words[0], "outw") == 0 || @@ -475,7 +475,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) } else if (words[0][3] == 'l') { cpu_outl(addr, value); } - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "inb") == 0 || strcmp(words[0], "inw") == 0 || @@ -496,7 +495,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) } else if (words[0][2] == 'l') { value = cpu_inl(addr); } - qtest_send_prefix(chr); qtest_sendf(chr, "OK 0x%04x\n", value); } else if (strcmp(words[0], "writeb") == 0 || strcmp(words[0], "writew") == 0 || @@ -532,7 +530,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, &data, 8); } - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "readb") == 0 || strcmp(words[0], "readw") == 0 || @@ -566,7 +563,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) &value, 8); tswap64s(&value); } - qtest_send_prefix(chr); qtest_sendf(chr, "OK 0x%016" PRIx64 "\n", value); } else if (strcmp(words[0], "read") == 0) { g_autoptr(GString) enc = NULL; @@ -588,7 +584,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) enc = qemu_hexdump_line(NULL, data, len, 0, 0); - qtest_send_prefix(chr); qtest_sendf(chr, "OK 0x%s\n", enc->str); g_free(data); @@ -608,7 +603,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) address_space_read(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, len); b64_data = g_base64_encode(data, len); - qtest_send_prefix(chr); qtest_sendf(chr, "OK %s\n", b64_data); g_free(data); @@ -644,7 +638,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) len); g_free(data); - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "memset") == 0) { uint64_t addr, len; @@ -668,7 +661,6 @@ static void qtest_process_command(CharBackend *chr, gchar **words) g_free(data); } - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "b64write") == 0) { uint64_t addr, len; @@ -700,17 +692,16 @@ static void qtest_process_command(CharBackend *chr, gchar **words) address_space_write(first_cpu->as, addr, MEMTXATTRS_UNSPECIFIED, data, len); - qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "endianness") == 0) { - qtest_send_prefix(chr); - if (target_words_bigendian()) { + if (target_big_endian()) { qtest_sendf(chr, "OK big\n"); } else { qtest_sendf(chr, "OK little\n"); } } else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) { - int64_t ns; + int64_t old_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t ns, new_ns; if (words[1]) { int ret = qemu_strtoi64(words[1], NULL, 0, &ns); @@ -718,18 +709,24 @@ static void qtest_process_command(CharBackend *chr, gchar **words) } else { ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL, QEMU_TIMER_ATTR_ALL); + if (ns < 0) { + qtest_send(chr, "FAIL " + "cannot advance clock to the next deadline " + "because there is no pending deadline\n"); + return; + } + } + new_ns = qemu_clock_advance_virtual_time(old_ns + ns); + if (new_ns > old_ns) { + qtest_sendf(chr, "OK %"PRIi64"\n", new_ns); + } else { + qtest_sendf(chr, "FAIL could not advance time\n"); } - qemu_clock_advance_virtual_time( - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns); - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIi64"\n", - (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } else if (strcmp(words[0], "module_load") == 0) { Error *local_err = NULL; int rv; g_assert(words[1] && words[2]); - qtest_send_prefix(chr); rv = module_load(words[1], words[2], &local_err); if (rv > 0) { qtest_sendf(chr, "OK\n"); @@ -740,43 +737,37 @@ static void qtest_process_command(CharBackend *chr, gchar **words) qtest_sendf(chr, "FAIL\n"); } } else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) { - int64_t ns; + int64_t ns, new_ns; int ret; g_assert(words[1]); ret = qemu_strtoi64(words[1], NULL, 0, &ns); g_assert(ret == 0); - qemu_clock_advance_virtual_time(ns); - qtest_send_prefix(chr); - qtest_sendf(chr, "OK %"PRIi64"\n", - (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + new_ns = qemu_clock_advance_virtual_time(ns); + qtest_sendf(chr, "%s %"PRIi64"\n", + new_ns == ns ? "OK" : "FAIL", new_ns); } else if (process_command_cb && process_command_cb(chr, words)) { /* Command got consumed by the callback handler */ } else { - qtest_send_prefix(chr); qtest_sendf(chr, "FAIL Unknown command '%s'\n", words[0]); } } +/* + * Process as much of @inbuf as we can in newline terminated chunks. + * Remove the processed commands from @inbuf as we go. + */ static void qtest_process_inbuf(CharBackend *chr, GString *inbuf) { char *end; while ((end = strchr(inbuf->str, '\n')) != NULL) { - size_t offset; - GString *cmd; - gchar **words; - - offset = end - inbuf->str; + size_t len = end - inbuf->str; + g_autofree char *cmd = g_strndup(inbuf->str, len); + g_auto(GStrv) words = g_strsplit(cmd, " ", 0); - cmd = g_string_new_len(inbuf->str, offset); - g_string_erase(inbuf, 0, offset + 1); - - words = g_strsplit(cmd->str, " ", 0); + g_string_erase(inbuf, 0, len + 1); qtest_process_command(chr, words); - g_strfreev(words); - - g_string_free(cmd, TRUE); } } @@ -1004,7 +995,7 @@ static char *qtest_get_chardev(Object *obj, Error **errp) return g_strdup(q->chr_name); } -static void qtest_class_init(ObjectClass *oc, void *data) +static void qtest_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -1022,7 +1013,7 @@ static const TypeInfo qtest_info = { .parent = TYPE_OBJECT, .class_init = qtest_class_init, .instance_size = sizeof(QTest), - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/system/ram-block-attributes.c b/system/ram-block-attributes.c new file mode 100644 index 00000000000..68e8a027032 --- /dev/null +++ b/system/ram-block-attributes.c @@ -0,0 +1,444 @@ +/* + * QEMU ram block attributes + * + * Copyright Intel + * + * Author: + * Chenyi Qiang + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "system/ramblock.h" +#include "trace.h" + +OBJECT_DEFINE_SIMPLE_TYPE_WITH_INTERFACES(RamBlockAttributes, + ram_block_attributes, + RAM_BLOCK_ATTRIBUTES, + OBJECT, + { TYPE_RAM_DISCARD_MANAGER }, + { }) + +static size_t +ram_block_attributes_get_block_size(const RamBlockAttributes *attr) +{ + /* + * Because page conversion could be manipulated in the size of at least 4K + * or 4K aligned, Use the host page size as the granularity to track the + * memory attribute. + */ + g_assert(attr && attr->ram_block); + g_assert(attr->ram_block->page_size == qemu_real_host_page_size()); + return attr->ram_block->page_size; +} + + +static bool +ram_block_attributes_rdm_is_populated(const RamDiscardManager *rdm, + const MemoryRegionSection *section) +{ + const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + const size_t block_size = ram_block_attributes_get_block_size(attr); + const uint64_t first_bit = section->offset_within_region / block_size; + const uint64_t last_bit = + first_bit + int128_get64(section->size) / block_size - 1; + unsigned long first_discarded_bit; + + first_discarded_bit = find_next_zero_bit(attr->bitmap, last_bit + 1, + first_bit); + return first_discarded_bit > last_bit; +} + +typedef int (*ram_block_attributes_section_cb)(MemoryRegionSection *s, + void *arg); + +static int +ram_block_attributes_notify_populate_cb(MemoryRegionSection *section, + void *arg) +{ + RamDiscardListener *rdl = arg; + + return rdl->notify_populate(rdl, section); +} + +static int +ram_block_attributes_notify_discard_cb(MemoryRegionSection *section, + void *arg) +{ + RamDiscardListener *rdl = arg; + + rdl->notify_discard(rdl, section); + return 0; +} + +static int +ram_block_attributes_for_each_populated_section(const RamBlockAttributes *attr, + MemoryRegionSection *section, + void *arg, + ram_block_attributes_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + const size_t block_size = ram_block_attributes_get_block_size(attr); + int ret = 0; + + first_bit = section->offset_within_region / block_size; + first_bit = find_next_bit(attr->bitmap, attr->bitmap_size, + first_bit); + + while (first_bit < attr->bitmap_size) { + MemoryRegionSection tmp = *section; + + offset = first_bit * block_size; + last_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * block_size; + + if (!memory_region_section_intersect_range(&tmp, offset, size)) { + break; + } + + ret = cb(&tmp, arg); + if (ret) { + error_report("%s: Failed to notify RAM discard listener: %s", + __func__, strerror(-ret)); + break; + } + + first_bit = find_next_bit(attr->bitmap, attr->bitmap_size, + last_bit + 2); + } + + return ret; +} + +static int +ram_block_attributes_for_each_discarded_section(const RamBlockAttributes *attr, + MemoryRegionSection *section, + void *arg, + ram_block_attributes_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + const size_t block_size = ram_block_attributes_get_block_size(attr); + int ret = 0; + + first_bit = section->offset_within_region / block_size; + first_bit = find_next_zero_bit(attr->bitmap, attr->bitmap_size, + first_bit); + + while (first_bit < attr->bitmap_size) { + MemoryRegionSection tmp = *section; + + offset = first_bit * block_size; + last_bit = find_next_bit(attr->bitmap, attr->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * block_size; + + if (!memory_region_section_intersect_range(&tmp, offset, size)) { + break; + } + + ret = cb(&tmp, arg); + if (ret) { + error_report("%s: Failed to notify RAM discard listener: %s", + __func__, strerror(-ret)); + break; + } + + first_bit = find_next_zero_bit(attr->bitmap, + attr->bitmap_size, + last_bit + 2); + } + + return ret; +} + +static uint64_t +ram_block_attributes_rdm_get_min_granularity(const RamDiscardManager *rdm, + const MemoryRegion *mr) +{ + const RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + + g_assert(mr == attr->ram_block->mr); + return ram_block_attributes_get_block_size(attr); +} + +static void +ram_block_attributes_rdm_register_listener(RamDiscardManager *rdm, + RamDiscardListener *rdl, + MemoryRegionSection *section) +{ + RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + int ret; + + g_assert(section->mr == attr->ram_block->mr); + rdl->section = memory_region_section_new_copy(section); + + QLIST_INSERT_HEAD(&attr->rdl_list, rdl, next); + + ret = ram_block_attributes_for_each_populated_section(attr, section, rdl, + ram_block_attributes_notify_populate_cb); + if (ret) { + error_report("%s: Failed to register RAM discard listener: %s", + __func__, strerror(-ret)); + exit(1); + } +} + +static void +ram_block_attributes_rdm_unregister_listener(RamDiscardManager *rdm, + RamDiscardListener *rdl) +{ + RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + int ret; + + g_assert(rdl->section); + g_assert(rdl->section->mr == attr->ram_block->mr); + + if (rdl->double_discard_supported) { + rdl->notify_discard(rdl, rdl->section); + } else { + ret = ram_block_attributes_for_each_populated_section(attr, + rdl->section, rdl, ram_block_attributes_notify_discard_cb); + if (ret) { + error_report("%s: Failed to unregister RAM discard listener: %s", + __func__, strerror(-ret)); + exit(1); + } + } + + memory_region_section_free_copy(rdl->section); + rdl->section = NULL; + QLIST_REMOVE(rdl, next); +} + +typedef struct RamBlockAttributesReplayData { + ReplayRamDiscardState fn; + void *opaque; +} RamBlockAttributesReplayData; + +static int ram_block_attributes_rdm_replay_cb(MemoryRegionSection *section, + void *arg) +{ + RamBlockAttributesReplayData *data = arg; + + return data->fn(section, data->opaque); +} + +static int +ram_block_attributes_rdm_replay_populated(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, + void *opaque) +{ + RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque }; + + g_assert(section->mr == attr->ram_block->mr); + return ram_block_attributes_for_each_populated_section(attr, section, &data, + ram_block_attributes_rdm_replay_cb); +} + +static int +ram_block_attributes_rdm_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscardState replay_fn, + void *opaque) +{ + RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(rdm); + RamBlockAttributesReplayData data = { .fn = replay_fn, .opaque = opaque }; + + g_assert(section->mr == attr->ram_block->mr); + return ram_block_attributes_for_each_discarded_section(attr, section, &data, + ram_block_attributes_rdm_replay_cb); +} + +static bool +ram_block_attributes_is_valid_range(RamBlockAttributes *attr, uint64_t offset, + uint64_t size) +{ + MemoryRegion *mr = attr->ram_block->mr; + + g_assert(mr); + + uint64_t region_size = memory_region_size(mr); + const size_t block_size = ram_block_attributes_get_block_size(attr); + + if (!QEMU_IS_ALIGNED(offset, block_size) || + !QEMU_IS_ALIGNED(size, block_size)) { + return false; + } + if (offset + size <= offset) { + return false; + } + if (offset + size > region_size) { + return false; + } + return true; +} + +static void ram_block_attributes_notify_discard(RamBlockAttributes *attr, + uint64_t offset, + uint64_t size) +{ + RamDiscardListener *rdl; + + QLIST_FOREACH(rdl, &attr->rdl_list, next) { + MemoryRegionSection tmp = *rdl->section; + + if (!memory_region_section_intersect_range(&tmp, offset, size)) { + continue; + } + rdl->notify_discard(rdl, &tmp); + } +} + +static int +ram_block_attributes_notify_populate(RamBlockAttributes *attr, + uint64_t offset, uint64_t size) +{ + RamDiscardListener *rdl; + int ret = 0; + + QLIST_FOREACH(rdl, &attr->rdl_list, next) { + MemoryRegionSection tmp = *rdl->section; + + if (!memory_region_section_intersect_range(&tmp, offset, size)) { + continue; + } + ret = rdl->notify_populate(rdl, &tmp); + if (ret) { + break; + } + } + + return ret; +} + +int ram_block_attributes_state_change(RamBlockAttributes *attr, + uint64_t offset, uint64_t size, + bool to_discard) +{ + const size_t block_size = ram_block_attributes_get_block_size(attr); + const unsigned long first_bit = offset / block_size; + const unsigned long nbits = size / block_size; + const unsigned long last_bit = first_bit + nbits - 1; + const bool is_discarded = find_next_bit(attr->bitmap, attr->bitmap_size, + first_bit) > last_bit; + const bool is_populated = find_next_zero_bit(attr->bitmap, + attr->bitmap_size, first_bit) > last_bit; + unsigned long bit; + int ret = 0; + + if (!ram_block_attributes_is_valid_range(attr, offset, size)) { + error_report("%s, invalid range: offset 0x%" PRIx64 ", size " + "0x%" PRIx64, __func__, offset, size); + return -EINVAL; + } + + trace_ram_block_attributes_state_change(offset, size, + is_discarded ? "discarded" : + is_populated ? "populated" : + "mixture", + to_discard ? "discarded" : + "populated"); + if (to_discard) { + if (is_discarded) { + /* Already private */ + } else if (is_populated) { + /* Completely shared */ + bitmap_clear(attr->bitmap, first_bit, nbits); + ram_block_attributes_notify_discard(attr, offset, size); + } else { + /* Unexpected mixture: process individual blocks */ + for (bit = first_bit; bit < first_bit + nbits; bit++) { + if (!test_bit(bit, attr->bitmap)) { + continue; + } + clear_bit(bit, attr->bitmap); + ram_block_attributes_notify_discard(attr, bit * block_size, + block_size); + } + } + } else { + if (is_populated) { + /* Already shared */ + } else if (is_discarded) { + /* Completely private */ + bitmap_set(attr->bitmap, first_bit, nbits); + ret = ram_block_attributes_notify_populate(attr, offset, size); + } else { + /* Unexpected mixture: process individual blocks */ + for (bit = first_bit; bit < first_bit + nbits; bit++) { + if (test_bit(bit, attr->bitmap)) { + continue; + } + set_bit(bit, attr->bitmap); + ret = ram_block_attributes_notify_populate(attr, + bit * block_size, + block_size); + if (ret) { + break; + } + } + } + } + + return ret; +} + +RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block) +{ + const int block_size = qemu_real_host_page_size(); + RamBlockAttributes *attr; + MemoryRegion *mr = ram_block->mr; + + attr = RAM_BLOCK_ATTRIBUTES(object_new(TYPE_RAM_BLOCK_ATTRIBUTES)); + + attr->ram_block = ram_block; + if (memory_region_set_ram_discard_manager(mr, RAM_DISCARD_MANAGER(attr))) { + object_unref(OBJECT(attr)); + return NULL; + } + attr->bitmap_size = + ROUND_UP(int128_get64(mr->size), block_size) / block_size; + attr->bitmap = bitmap_new(attr->bitmap_size); + + return attr; +} + +void ram_block_attributes_destroy(RamBlockAttributes *attr) +{ + g_assert(attr); + + g_free(attr->bitmap); + memory_region_set_ram_discard_manager(attr->ram_block->mr, NULL); + object_unref(OBJECT(attr)); +} + +static void ram_block_attributes_init(Object *obj) +{ + RamBlockAttributes *attr = RAM_BLOCK_ATTRIBUTES(obj); + + QLIST_INIT(&attr->rdl_list); +} + +static void ram_block_attributes_finalize(Object *obj) +{ +} + +static void ram_block_attributes_class_init(ObjectClass *klass, + const void *data) +{ + RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass); + + rdmc->get_min_granularity = ram_block_attributes_rdm_get_min_granularity; + rdmc->register_listener = ram_block_attributes_rdm_register_listener; + rdmc->unregister_listener = ram_block_attributes_rdm_unregister_listener; + rdmc->is_populated = ram_block_attributes_rdm_is_populated; + rdmc->replay_populated = ram_block_attributes_rdm_replay_populated; + rdmc->replay_discarded = ram_block_attributes_rdm_replay_discarded; +} diff --git a/system/rtc.c b/system/rtc.c index dc44576686e..56951288c40 100644 --- a/system/rtc.c +++ b/system/rtc.c @@ -29,9 +29,9 @@ #include "qemu/option.h" #include "qemu/timer.h" #include "qom/object.h" -#include "sysemu/replay.h" -#include "sysemu/sysemu.h" -#include "sysemu/rtc.h" +#include "system/replay.h" +#include "system/system.h" +#include "system/rtc.h" #include "hw/rtc/mc146818rtc.h" static enum { @@ -62,7 +62,7 @@ static time_t qemu_ref_timedate(QEMUClockType clock) } break; default: - assert(0); + g_assert_not_reached(); } return value; } diff --git a/system/runstate-action.c b/system/runstate-action.c index ae0761a9c3a..f912bc837f2 100644 --- a/system/runstate-action.c +++ b/system/runstate-action.c @@ -7,8 +7,8 @@ */ #include "qemu/osdep.h" -#include "sysemu/runstate-action.h" -#include "sysemu/watchdog.h" +#include "system/runstate-action.h" +#include "system/watchdog.h" #include "qemu/config-file.h" #include "qapi/error.h" #include "qemu/option_int.h" diff --git a/system/runstate-hmp-cmds.c b/system/runstate-hmp-cmds.c index 2df670f0c06..be1d6769926 100644 --- a/system/runstate-hmp-cmds.c +++ b/system/runstate-hmp-cmds.c @@ -19,7 +19,7 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-commands-run-state.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/accel.h" void hmp_info_status(Monitor *mon, const QDict *qdict) diff --git a/system/runstate.c b/system/runstate.c index c833316f6de..6178b0091a3 100644 --- a/system/runstate.c +++ b/system/runstate.c @@ -32,6 +32,7 @@ #include "exec/cpu-common.h" #include "gdbstub/syscalls.h" #include "hw/boards.h" +#include "hw/resettable.h" #include "migration/misc.h" #include "migration/postcopy-ram.h" #include "monitor/monitor.h" @@ -50,14 +51,14 @@ #include "qemu/thread.h" #include "qom/object.h" #include "qom/object_interfaces.h" -#include "sysemu/cpus.h" -#include "sysemu/qtest.h" -#include "sysemu/replay.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/runstate-action.h" -#include "sysemu/sysemu.h" -#include "sysemu/tpm.h" +#include "system/cpus.h" +#include "system/qtest.h" +#include "system/replay.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/runstate-action.h" +#include "system/system.h" +#include "system/tpm.h" #include "trace.h" static NotifierList exit_notifiers = @@ -181,6 +182,12 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE__MAX, RUN_STATE__MAX }, }; +static const RunStateTransition replay_play_runstate_transitions_def[] = { + { RUN_STATE_SHUTDOWN, RUN_STATE_RUNNING}, + + { RUN_STATE__MAX, RUN_STATE__MAX }, +}; + static bool runstate_valid_transitions[RUN_STATE__MAX][RUN_STATE__MAX]; bool runstate_check(RunState state) @@ -188,14 +195,33 @@ bool runstate_check(RunState state) return current_run_state == state; } -static void runstate_init(void) +static void transitions_set_valid(const RunStateTransition *rst) { const RunStateTransition *p; - memset(&runstate_valid_transitions, 0, sizeof(runstate_valid_transitions)); - for (p = &runstate_transitions_def[0]; p->from != RUN_STATE__MAX; p++) { + for (p = rst; p->from != RUN_STATE__MAX; p++) { runstate_valid_transitions[p->from][p->to] = true; } +} + +void runstate_replay_enable(void) +{ + assert(replay_mode != REPLAY_MODE_NONE); + + if (replay_mode == REPLAY_MODE_PLAY) { + /* + * When reverse-debugging, it is possible to move state from + * shutdown to running. + */ + transitions_set_valid(&replay_play_runstate_transitions_def[0]); + } +} + +static void runstate_init(void) +{ + memset(&runstate_valid_transitions, 0, sizeof(runstate_valid_transitions)); + + transitions_set_valid(&runstate_transitions_def[0]); qemu_mutex_init(&vmstop_lock); } @@ -271,6 +297,7 @@ void qemu_system_vmstop_request(RunState state) struct VMChangeStateEntry { VMChangeStateHandler *cb; VMChangeStateHandler *prepare_cb; + VMChangeStateHandlerWithRet *cb_ret; void *opaque; QTAILQ_ENTRY(VMChangeStateEntry) entries; int priority; @@ -279,45 +306,17 @@ struct VMChangeStateEntry { static QTAILQ_HEAD(, VMChangeStateEntry) vm_change_state_head = QTAILQ_HEAD_INITIALIZER(vm_change_state_head); -/** - * qemu_add_vm_change_state_handler_prio: - * @cb: the callback to invoke - * @opaque: user data passed to the callback - * @priority: low priorities execute first when the vm runs and the reverse is - * true when the vm stops - * - * Register a callback function that is invoked when the vm starts or stops - * running. - * - * Returns: an entry to be freed using qemu_del_vm_change_state_handler() - */ VMChangeStateEntry *qemu_add_vm_change_state_handler_prio( VMChangeStateHandler *cb, void *opaque, int priority) { - return qemu_add_vm_change_state_handler_prio_full(cb, NULL, opaque, - priority); + return qemu_add_vm_change_state_handler_prio_full(cb, NULL, NULL, + opaque, priority); } -/** - * qemu_add_vm_change_state_handler_prio_full: - * @cb: the main callback to invoke - * @prepare_cb: a callback to invoke before the main callback - * @opaque: user data passed to the callbacks - * @priority: low priorities execute first when the vm runs and the reverse is - * true when the vm stops - * - * Register a main callback function and an optional prepare callback function - * that are invoked when the vm starts or stops running. The main callback and - * the prepare callback are called in two separate phases: First all prepare - * callbacks are called and only then all main callbacks are called. As its - * name suggests, the prepare callback can be used to do some preparatory work - * before invoking the main callback. - * - * Returns: an entry to be freed using qemu_del_vm_change_state_handler() - */ VMChangeStateEntry * qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb, VMChangeStateHandler *prepare_cb, + VMChangeStateHandlerWithRet *cb_ret, void *opaque, int priority) { VMChangeStateEntry *e; @@ -326,6 +325,7 @@ qemu_add_vm_change_state_handler_prio_full(VMChangeStateHandler *cb, e = g_malloc0(sizeof(*e)); e->cb = cb; e->prepare_cb = prepare_cb; + e->cb_ret = cb_ret; e->opaque = opaque; e->priority = priority; @@ -353,9 +353,10 @@ void qemu_del_vm_change_state_handler(VMChangeStateEntry *e) g_free(e); } -void vm_state_notify(bool running, RunState state) +int vm_state_notify(bool running, RunState state) { VMChangeStateEntry *e, *next; + int ret = 0; trace_vm_state_notify(running, state, RunState_str(state)); @@ -367,7 +368,17 @@ void vm_state_notify(bool running, RunState state) } QTAILQ_FOREACH_SAFE(e, &vm_change_state_head, entries, next) { - e->cb(e->opaque, running, state); + if (e->cb) { + e->cb(e->opaque, running, state); + } else if (e->cb_ret) { + /* + * Here ignore the return value of cb_ret because + * we only care about the stopping the device during + * the VM live migration to indicate whether the + * connection between qemu and backend is normal. + */ + e->cb_ret(e->opaque, running, state); + } } } else { QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) { @@ -377,15 +388,26 @@ void vm_state_notify(bool running, RunState state) } QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) { - e->cb(e->opaque, running, state); + if (e->cb) { + e->cb(e->opaque, running, state); + } else if (e->cb_ret) { + /* + * We should execute all registered callbacks even if + * one of them returns failure, otherwise, some cleanup + * work of the device will be skipped. + */ + ret |= e->cb_ret(e->opaque, running, state); + } } } + return ret; } static ShutdownCause reset_requested; static ShutdownCause shutdown_requested; static int shutdown_exit_code = EXIT_SUCCESS; static int shutdown_signal; +static bool force_shutdown; static pid_t shutdown_pid; static int powerdown_requested; static int debug_requested; @@ -406,6 +428,11 @@ ShutdownCause qemu_shutdown_requested_get(void) return shutdown_requested; } +bool qemu_force_shutdown_requested(void) +{ + return force_shutdown; +} + ShutdownCause qemu_reset_requested_get(void) { return reset_requested; @@ -482,15 +509,23 @@ static int qemu_debug_requested(void) void qemu_system_reset(ShutdownCause reason) { MachineClass *mc; + ResetType type; mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL; cpu_synchronize_all_states(); + switch (reason) { + case SHUTDOWN_CAUSE_SNAPSHOT_LOAD: + type = RESET_TYPE_SNAPSHOT_LOAD; + break; + default: + type = RESET_TYPE_COLD; + } if (mc && mc->reset) { - mc->reset(current_machine, reason); + mc->reset(current_machine, type); } else { - qemu_devices_reset(reason); + qemu_devices_reset(type); } switch (reason) { case SHUTDOWN_CAUSE_NONE: @@ -531,6 +566,58 @@ static void qemu_system_wakeup(void) } } +static char *tdx_parse_panic_message(char *message) +{ + bool printable = false; + char *buf = NULL; + int len = 0, i; + + /* + * Although message is defined as a json string, we shouldn't + * unconditionally treat it as is because the guest generated it and + * it's not necessarily trustable. + */ + if (message) { + /* The caller guarantees the NULL-terminated string. */ + len = strlen(message); + + printable = len > 0; + for (i = 0; i < len; i++) { + if (!(0x20 <= message[i] && message[i] <= 0x7e)) { + printable = false; + break; + } + } + } + + if (len == 0) { + buf = g_malloc(1); + buf[0] = '\0'; + } else { + if (!printable) { + /* 3 = length of "%02x " */ + buf = g_malloc(len * 3); + for (i = 0; i < len; i++) { + if (message[i] == '\0') { + break; + } else { + sprintf(buf + 3 * i, "%02x ", message[i]); + } + } + if (i > 0) { + /* replace the last ' '(space) to NULL */ + buf[i * 3 - 1] = '\0'; + } else { + buf[0] = '\0'; + } + } else { + buf = g_strdup(message); + } + } + + return buf; +} + void qemu_system_guest_panicked(GuestPanicInformation *info) { qemu_log_mask(LOG_GUEST_ERROR, "Guest crashed"); @@ -572,7 +659,20 @@ void qemu_system_guest_panicked(GuestPanicInformation *info) S390CrashReason_str(info->u.s390.reason), info->u.s390.psw_mask, info->u.s390.psw_addr); + } else if (info->type == GUEST_PANIC_INFORMATION_TYPE_TDX) { + char *message = tdx_parse_panic_message(info->u.tdx.message); + qemu_log_mask(LOG_GUEST_ERROR, + "\nTDX guest reports fatal error." + " error code: 0x%" PRIx32 " error message:\"%s\"\n", + info->u.tdx.error_code, message); + g_free(message); + if (info->u.tdx.gpa != -1ull) { + qemu_log_mask(LOG_GUEST_ERROR, "Additional error information " + "can be found at gpa page: 0x%" PRIx64 "\n", + info->u.tdx.gpa); + } } + qapi_free_GuestPanicInformation(info); } } @@ -681,6 +781,7 @@ void qemu_system_killed(int signal, pid_t pid) * we are in a signal handler. */ shutdown_requested = SHUTDOWN_CAUSE_HOST_SIGNAL; + force_shutdown = true; qemu_notify_event(); } @@ -696,6 +797,9 @@ void qemu_system_shutdown_request(ShutdownCause reason) trace_qemu_system_shutdown_request(reason); replay_shutdown_request(reason); shutdown_requested = reason; + if (reason == SHUTDOWN_CAUSE_HOST_QMP_QUIT) { + force_shutdown = true; + } qemu_notify_event(); } @@ -816,6 +920,7 @@ void qemu_remove_exit_notifier(Notifier *notify) static void qemu_run_exit_notifiers(void) { + BQL_LOCK_GUARD(); notifier_list_notify(&exit_notifiers, NULL); } diff --git a/system/tpm.c b/system/tpm.c index 7164ea7ff19..903b29c0437 100644 --- a/system/tpm.c +++ b/system/tpm.c @@ -17,10 +17,11 @@ #include "qapi/error.h" #include "qapi/qapi-commands-tpm.h" #include "qapi/qmp/qerror.h" -#include "sysemu/tpm_backend.h" -#include "sysemu/tpm.h" +#include "system/tpm_backend.h" +#include "system/tpm.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/help_option.h" static QLIST_HEAD(, TPMBackend) tpm_backends = QLIST_HEAD_INITIALIZER(tpm_backends); @@ -179,9 +180,9 @@ int tpm_config_parse(QemuOptsList *opts_list, const char *optstr) { QemuOpts *opts; - if (!strcmp(optstr, "help")) { + if (is_help_option(optstr)) { tpm_display_backend_drivers(); - return -1; + exit(EXIT_SUCCESS); } opts = qemu_opts_parse_noisily(opts_list, optstr, true); if (!opts) { diff --git a/system/trace-events b/system/trace-events index 2ed1d59b1fb..82856e44f2e 100644 --- a/system/trace-events +++ b/system/trace-events @@ -4,6 +4,13 @@ # Since requests are raised via monitor, not many tracepoints are needed. balloon_event(void *opaque, unsigned long addr) "opaque %p addr %lu" +# dma-helpers.c +dma_blk_io(void *dbs, void *bs, int64_t offset, bool to_dev) "dbs=%p bs=%p offset=%" PRId64 " to_dev=%d" +dma_aio_cancel(void *dbs) "dbs=%p" +dma_complete(void *dbs, int ret, void *cb) "dbs=%p ret=%d cb=%p" +dma_blk_cb(void *dbs, int ret) "dbs=%p ret=%d" +dma_map_wait(void *dbs) "dbs=%p" + # ioport.c cpu_in(unsigned int addr, char size, unsigned int val) "addr 0x%x(%c) value %u" cpu_out(unsigned int addr, char size, unsigned int val) "addr 0x%x(%c) value %u" @@ -26,6 +33,7 @@ address_space_map(void *as, uint64_t addr, uint64_t len, bool is_write, uint32_t find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64 find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64 ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d" +qemu_ram_alloc_shared(const char *name, size_t size, size_t max_size, int fd, void *host) "%s size %zu max_size %zu fd %d host %p" # cpus.c vm_stop_flush_all(int ret) "ret %d" @@ -44,3 +52,6 @@ dirtylimit_state_finalize(void) dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us" dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64 dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us" + +# ram-block-attributes.c +ram_block_attributes_state_change(uint64_t offset, uint64_t size, const char *from, const char *to) "offset 0x%"PRIx64" size 0x%"PRIx64" from '%s' to '%s'" diff --git a/system/vl.c b/system/vl.c index 9e8f16f1551..3b7057e6c66 100644 --- a/system/vl.c +++ b/system/vl.c @@ -26,25 +26,28 @@ #include "qemu/help-texts.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "qemu/module.h" +#include "qemu/target-info.h" #include "exec/cpu-common.h" #include "exec/page-vary.h" #include "hw/qdev-properties.h" #include "qapi/compat-policy.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" +#include "qobject/qjson.h" #include "qemu-version.h" #include "qemu/cutils.h" #include "qemu/help_option.h" #include "qemu/hw-version.h" #include "qemu/uuid.h" -#include "sysemu/reset.h" -#include "sysemu/runstate.h" -#include "sysemu/runstate-action.h" -#include "sysemu/seccomp.h" -#include "sysemu/tcg.h" -#include "sysemu/xen.h" +#include "qemu/target-info.h" +#include "system/reset.h" +#include "system/runstate.h" +#include "system/runstate-action.h" +#include "system/seccomp.h" +#include "system/tcg.h" +#include "system/xen.h" #include "qemu/error-report.h" #include "qemu/sockets.h" @@ -53,6 +56,7 @@ #include "hw/usb.h" #include "hw/isa/isa.h" #include "hw/scsi/scsi.h" +#include "hw/sd/sd.h" #include "hw/display/vga.h" #include "hw/firmware/smbios.h" #include "hw/acpi/acpi.h" @@ -64,30 +68,32 @@ #include "monitor/monitor.h" #include "ui/console.h" #include "ui/input.h" -#include "sysemu/sysemu.h" -#include "sysemu/numa.h" -#include "sysemu/hostmem.h" +#include "system/system.h" +#include "system/numa.h" +#include "system/hostmem.h" #include "exec/gdbstub.h" #include "gdbstub/enums.h" #include "qemu/timer.h" #include "chardev/char.h" #include "qemu/bitmap.h" #include "qemu/log.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "hw/block/block.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" +#include "migration/cpr.h" #include "migration/misc.h" #include "migration/snapshot.h" -#include "sysemu/tpm.h" -#include "sysemu/dma.h" +#include "system/tpm.h" +#include "system/dma.h" #include "hw/audio/soundhw.h" #include "audio/audio.h" -#include "sysemu/cpus.h" -#include "sysemu/cpu-timers.h" +#include "system/cpus.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" #include "migration/colo.h" #include "migration/postcopy-ram.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qapi/qobject-input-visitor.h" #include "qemu/option.h" #include "qemu/config-file.h" @@ -95,7 +101,7 @@ #ifdef CONFIG_VIRTFS #include "fsdev/qemu-fsdev.h" #endif -#include "sysemu/qtest.h" +#include "system/qtest.h" #ifdef CONFIG_TCG #include "tcg/perf.h" #endif @@ -106,8 +112,8 @@ #include "trace/control.h" #include "qemu/plugin.h" #include "qemu/queue.h" -#include "sysemu/arch_init.h" -#include "exec/confidential-guest-support.h" +#include "system/arch_init.h" +#include "system/confidential-guest-support.h" #include "ui/qemu-spice.h" #include "qapi/string-input-visitor.h" @@ -116,13 +122,14 @@ #include "qom/object_interfaces.h" #include "semihosting/semihost.h" #include "crypto/init.h" -#include "sysemu/replay.h" +#include "system/replay.h" #include "qapi/qapi-events-run-state.h" #include "qapi/qapi-types-audio.h" #include "qapi/qapi-visit-audio.h" #include "qapi/qapi-visit-block-core.h" #include "qapi/qapi-visit-compat.h" #include "qapi/qapi-visit-machine.h" +#include "qapi/qapi-visit-migration.h" #include "qapi/qapi-visit-ui.h" #include "qapi/qapi-commands-block-core.h" #include "qapi/qapi-commands-migration.h" @@ -131,7 +138,7 @@ #include "qapi/qapi-commands-ui.h" #include "block/qdict.h" #include "qapi/qmp/qerror.h" -#include "sysemu/iothread.h" +#include "system/iothread.h" #include "qemu/guest-random.h" #include "qemu/keyval.h" @@ -159,6 +166,8 @@ typedef struct DeviceOption { static const char *cpu_option; static const char *mem_path; static const char *incoming; +static const char *incoming_str[MIGRATION_CHANNEL_TYPE__MAX]; +static MigrationChannel *incoming_channels[MIGRATION_CHANNEL_TYPE__MAX]; static const char *loadvm; static const char *accelerators; static bool have_custom_ram_size; @@ -190,7 +199,7 @@ static int default_parallel = 1; static int default_monitor = 1; static int default_floppy = 1; static int default_cdrom = 1; -static int default_sdcard = 1; +static bool auto_create_sdcard = true; static int default_vga = 1; static int default_net = 1; @@ -347,7 +356,7 @@ static QemuOptsList qemu_overcommit_opts = { .desc = { { .name = "mem-lock", - .type = QEMU_OPT_BOOL, + .type = QEMU_OPT_STRING, }, { .name = "cpu-pm", @@ -714,7 +723,7 @@ static void configure_blockdev(BlockdevOptionsQueue *bdo_queue, default_drive(default_cdrom, snapshot, machine_class->block_default_type, 2, CDROM_OPTS); default_drive(default_floppy, snapshot, IF_FLOPPY, 0, FD_OPTS); - default_drive(default_sdcard, snapshot, IF_SD, 0, SD_OPTS); + default_drive(auto_create_sdcard, snapshot, IF_SD, 0, SD_OPTS); } @@ -759,7 +768,7 @@ static QemuOptsList qemu_smp_opts = { }, }; -#if defined(CONFIG_POSIX) +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) static QemuOptsList qemu_run_with_opts = { .name = "run-with", .head = QTAILQ_HEAD_INITIALIZER(qemu_run_with_opts.head), @@ -792,8 +801,8 @@ static QemuOptsList qemu_run_with_opts = { static void realtime_init(void) { - if (enable_mlock) { - if (os_mlock() < 0) { + if (should_mlock(mlock_state)) { + if (os_mlock(is_mlock_on_fault(mlock_state)) < 0) { error_report("locking memory failed"); exit(1); } @@ -811,29 +820,15 @@ static void configure_msg(QemuOpts *opts) /***********************************************************/ /* USB devices */ -static int usb_device_add(const char *devname) +static bool usb_parse(const char *cmdline, Error **errp) { - USBDevice *dev = NULL; - - if (!machine_usb(current_machine)) { - return -1; - } - - dev = usbdevice_create(devname); - if (!dev) - return -1; + g_assert(machine_usb(current_machine)); - return 0; -} - -static int usb_parse(const char *cmdline) -{ - int r; - r = usb_device_add(cmdline); - if (r < 0) { - error_report("could not add USB device '%s'", cmdline); + if (!usbdevice_create(cmdline)) { + error_setg(errp, "could not add USB device '%s'", cmdline); + return false; } - return r; + return true; } /***********************************************************/ @@ -885,11 +880,11 @@ static void help(int exitcode) g_get_prgname()); #define DEF(option, opt_arg, opt_enum, opt_help, arch_mask) \ - if ((arch_mask) & arch_type) \ + if (qemu_arch_available(arch_mask)) \ fputs(opt_help, stdout); #define ARCHHEADING(text, arch_mask) \ - if ((arch_mask) & arch_type) \ + if (qemu_arch_available(arch_mask)) \ puts(stringify(text)); #define DEFHEADING(text) ARCHHEADING(text, QEMU_ARCH_ALL) @@ -1184,7 +1179,8 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) size = strlen(str); /* NUL terminator NOT included in fw_cfg blob */ buf = g_memdup(str, size); } else if (nonempty_str(gen_id)) { - if (!fw_cfg_add_from_generator(fw_cfg, name, gen_id, errp)) { + if (!fw_cfg_add_file_from_generator(fw_cfg, object_get_objects_root(), + gen_id, name, errp)) { return -1; } return 0; @@ -1196,10 +1192,7 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) return -1; } } - /* For legacy, keep user files in a specific global order. */ - fw_cfg_set_order_override(fw_cfg, FW_CFG_ORDER_OVERRIDE_USER); fw_cfg_add_file(fw_cfg, name, buf, size); - fw_cfg_reset_order_override(fw_cfg); return 0; } @@ -1306,22 +1299,27 @@ static void add_device_config(int type, const char *cmdline) QTAILQ_INSERT_TAIL(&device_configs, conf, next); } -static int foreach_device_config(int type, int (*func)(const char *cmdline)) +/** + * foreach_device_config_or_exit(): process per-device configs + * @type: device_config type + * @func: device specific config function, returning pass/fail + * + * @func is called with the &error_fatal handler so device specific + * error messages can be reported on failure. + */ +static void foreach_device_config_or_exit(int type, + bool (*func)(const char *cmdline, + Error **errp)) { struct device_config *conf; - int rc; QTAILQ_FOREACH(conf, &device_configs, next) { if (conf->type != type) continue; loc_push_restore(&conf->loc); - rc = func(conf->cmdline); + func(conf->cmdline, &error_fatal); loc_pop(&conf->loc); - if (rc) { - return rc; - } } - return 0; } static void qemu_disable_default_devices(void) @@ -1350,8 +1348,8 @@ static void qemu_disable_default_devices(void) if (!has_defaults || machine_class->no_cdrom) { default_cdrom = 0; } - if (!has_defaults || machine_class->no_sdcard) { - default_sdcard = 0; + if (!has_defaults || !machine_class->auto_create_sdcard) { + auto_create_sdcard = false; } if (!has_defaults) { default_audio = 0; @@ -1451,7 +1449,7 @@ static void qemu_create_default_devices(void) } } -static int serial_parse(const char *devname) +static bool serial_parse(const char *devname, Error **errp) { int index = num_serial_hds; @@ -1466,13 +1464,13 @@ static int serial_parse(const char *devname) serial_hds[index] = qemu_chr_new_mux_mon(label, devname, NULL); if (!serial_hds[index]) { - error_report("could not connect serial device" - " to character backend '%s'", devname); - return -1; + error_setg(errp, "could not connect serial device" + " to character backend '%s'", devname); + return false; } } num_serial_hds++; - return 0; + return true; } Chardev *serial_hd(int i) @@ -1484,47 +1482,47 @@ Chardev *serial_hd(int i) return NULL; } -static int parallel_parse(const char *devname) +static bool parallel_parse(const char *devname, Error **errp) { static int index = 0; char label[32]; if (strcmp(devname, "none") == 0) - return 0; + return true; if (index == MAX_PARALLEL_PORTS) { - error_report("too many parallel ports"); - exit(1); + error_setg(errp, "too many parallel ports"); + return false; } snprintf(label, sizeof(label), "parallel%d", index); parallel_hds[index] = qemu_chr_new_mux_mon(label, devname, NULL); if (!parallel_hds[index]) { - error_report("could not connect parallel device" - " to character backend '%s'", devname); - return -1; + error_setg(errp, "could not connect parallel device" + " to character backend '%s'", devname); + return false; } index++; - return 0; + return true; } -static int debugcon_parse(const char *devname) +static bool debugcon_parse(const char *devname, Error **errp) { QemuOpts *opts; if (!qemu_chr_new_mux_mon("debugcon", devname, NULL)) { - error_report("invalid character backend '%s'", devname); - exit(1); + error_setg(errp, "invalid character backend '%s'", devname); + return false; } opts = qemu_opts_create(qemu_find_opts("device"), "debugcon", 1, NULL); if (!opts) { - error_report("already have a debugcon device"); - exit(1); + error_setg(errp, "already have a debugcon device"); + return false; } qemu_opt_set(opts, "driver", "isa-debugcon", &error_abort); qemu_opt_set(opts, "chardev", "debugcon", &error_abort); - return 0; + return true; } -static gint machine_class_cmp(gconstpointer a, gconstpointer b) +static gint machine_class_cmp(gconstpointer a, gconstpointer b, gpointer d) { const MachineClass *mc1 = a, *mc2 = b; int res; @@ -1564,7 +1562,7 @@ static void machine_help_func(const QDict *qdict) GSList *el; const char *type = qdict_get_try_str(qdict, "type"); - machines = object_class_get_list(TYPE_MACHINE, false); + machines = object_class_get_list(target_machine_typename(), false); if (type) { ObjectClass *machine_class = OBJECT_CLASS(find_machine(type, machines)); if (machine_class) { @@ -1574,7 +1572,7 @@ static void machine_help_func(const QDict *qdict) } printf("Supported machines are:\n"); - machines = g_slist_sort(machines, machine_class_cmp); + machines = g_slist_sort_with_data(machines, machine_class_cmp, NULL); for (el = machines; el; el = el->next) { MachineClass *mc = el->data; if (mc->alias) { @@ -1679,10 +1677,10 @@ static MachineClass *select_machine(QDict *qdict, Error **errp) if (machine_type) { machine_class = find_machine(machine_type, machines); - qdict_del(qdict, "type"); if (!machine_class) { - error_setg(errp, "unsupported machine type: \"%s\"", optarg); + error_setg(errp, "unsupported machine type: \"%s\"", machine_type); } + qdict_del(qdict, "type"); } else { machine_class = find_default_machine(machines); if (!machine_class) { @@ -1821,6 +1819,30 @@ static void object_option_add_visitor(Visitor *v) QTAILQ_INSERT_TAIL(&object_opts, opt, next); } +static void incoming_option_parse(const char *str) +{ + MigrationChannelType type = MIGRATION_CHANNEL_TYPE_MAIN; + MigrationChannel *channel; + Visitor *v; + + if (!strcmp(str, "defer")) { + channel = NULL; + } else if (migrate_is_uri(str)) { + migrate_uri_parse(str, &channel, &error_fatal); + } else { + v = qobject_input_visitor_new_str(str, "channel-type", &error_fatal); + visit_type_MigrationChannel(v, NULL, &channel, &error_fatal); + visit_free(v); + type = channel->channel_type; + } + + /* New incoming spec replaces the previous */ + qapi_free_MigrationChannel(incoming_channels[type]); + incoming_channels[type] = channel; + incoming_str[type] = str; + incoming = incoming_str[MIGRATION_CHANNEL_TYPE_MAIN]; +} + static void object_option_parse(const char *str) { QemuOpts *opts; @@ -1841,7 +1863,8 @@ static void object_option_parse(const char *str) type = qemu_opt_get(opts, "qom-type"); if (!type) { - error_setg(&error_fatal, QERR_MISSING_PARAMETER, "qom-type"); + error_report(QERR_MISSING_PARAMETER, "qom-type"); + exit(1); } if (user_creatable_print_help(type, opts)) { exit(0); @@ -1854,6 +1877,44 @@ static void object_option_parse(const char *str) visit_free(v); } +static void overcommit_parse(const char *str) +{ + QemuOpts *opts; + const char *mem_lock_opt; + + opts = qemu_opts_parse_noisily(qemu_find_opts("overcommit"), + str, false); + if (!opts) { + exit(1); + } + + enable_cpu_pm = qemu_opt_get_bool(opts, "cpu-pm", enable_cpu_pm); + + mem_lock_opt = qemu_opt_get(opts, "mem-lock"); + if (!mem_lock_opt) { + return; + } + + if (strcmp(mem_lock_opt, "on") == 0) { + mlock_state = MLOCK_ON; + return; + } + + if (strcmp(mem_lock_opt, "off") == 0) { + mlock_state = MLOCK_OFF; + return; + } + + if (strcmp(mem_lock_opt, "on-fault") == 0) { + mlock_state = MLOCK_ON_FAULT; + return; + } + + error_report("parameter 'mem-lock' expects one of " + "'on', 'off', 'on-fault'"); + exit(1); +} + /* * Very early object creation, before the sandbox options have been activated. */ @@ -1971,11 +2032,12 @@ static void qemu_create_early_backends(void) qemu_console_early_init(); - if (dpy.has_gl && dpy.gl != DISPLAYGL_MODE_OFF && display_opengl == 0) { + if (dpy.has_gl && dpy.gl != DISPLAY_GL_MODE_OFF && display_opengl == 0) { #if defined(CONFIG_OPENGL) - error_report("OpenGL is not supported by the display"); + error_report("OpenGL is not supported by display backend '%s'", + DisplayType_str(dpy.type)); #else - error_report("OpenGL support is disabled"); + error_report("OpenGL support was not enabled in this build of QEMU"); #endif exit(1); } @@ -2041,12 +2103,9 @@ static void qemu_create_late_backends(void) qemu_opts_foreach(qemu_find_opts("mon"), mon_init_func, NULL, &error_fatal); - if (foreach_device_config(DEV_SERIAL, serial_parse) < 0) - exit(1); - if (foreach_device_config(DEV_PARALLEL, parallel_parse) < 0) - exit(1); - if (foreach_device_config(DEV_DEBUGCON, debugcon_parse) < 0) - exit(1); + foreach_device_config_or_exit(DEV_SERIAL, serial_parse); + foreach_device_config_or_exit(DEV_PARALLEL, parallel_parse); + foreach_device_config_or_exit(DEV_DEBUGCON, debugcon_parse); /* now chardevs have been created we may have semihosting to connect */ qemu_semihosting_chardev_init(); @@ -2110,6 +2169,19 @@ static void parse_memory_options(void) loc_pop(&loc); } +static void qemu_create_machine_containers(Object *machine) +{ + static const char *const containers[] = { + "unattached", + "peripheral", + "peripheral-anon", + }; + + for (unsigned i = 0; i < ARRAY_SIZE(containers); i++) { + object_property_add_new_container(machine, containers[i]); + } +} + static void qemu_create_machine(QDict *qdict) { MachineClass *machine_class = select_machine(qdict, &error_fatal); @@ -2118,8 +2190,8 @@ static void qemu_create_machine(QDict *qdict) current_machine = MACHINE(object_new_with_class(OBJECT_CLASS(machine_class))); object_property_add_child(object_get_root(), "machine", OBJECT(current_machine)); - object_property_add_child(container_get(OBJECT(current_machine), - "/unattached"), + qemu_create_machine_containers(OBJECT(current_machine)); + object_property_add_child(machine_get_container("unattached"), "sysbus", OBJECT(sysbus_get_default())); if (machine_class->minimum_page_bits) { @@ -2360,6 +2432,7 @@ static void configure_accelerators(const char *progname) /* Select the default accelerator */ bool have_tcg = accel_find("tcg"); bool have_kvm = accel_find("kvm"); + bool have_hvf = accel_find("hvf"); if (have_tcg && have_kvm) { if (g_str_has_suffix(progname, "kvm")) { @@ -2372,6 +2445,8 @@ static void configure_accelerators(const char *progname) accelerators = "kvm"; } else if (have_tcg) { accelerators = "tcg"; + } else if (have_hvf) { + accelerators = "hvf"; } else { error_report("No accelerator selected and" " no default accelerator available"); @@ -2421,19 +2496,25 @@ static void configure_accelerators(const char *progname) static void qemu_validate_options(const QDict *machine_opts) { const char *kernel_filename = qdict_get_try_str(machine_opts, "kernel"); + const char *shim_filename = qdict_get_try_str(machine_opts, "shim"); const char *initrd_filename = qdict_get_try_str(machine_opts, "initrd"); const char *kernel_cmdline = qdict_get_try_str(machine_opts, "append"); if (kernel_filename == NULL) { - if (kernel_cmdline != NULL) { - error_report("-append only allowed with -kernel option"); - exit(1); - } + if (kernel_cmdline != NULL) { + error_report("-append only allowed with -kernel option"); + exit(1); + } + + if (shim_filename != NULL) { + error_report("-shim only allowed with -kernel option"); + exit(1); + } - if (initrd_filename != NULL) { - error_report("-initrd only allowed with -kernel option"); - exit(1); - } + if (initrd_filename != NULL) { + error_report("-initrd only allowed with -kernel option"); + exit(1); + } } if (loadvm && incoming) { @@ -2620,12 +2701,27 @@ static void qemu_init_displays(void) static void qemu_init_board(void) { + MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); + /* process plugin before CPUs are created, but once -smp has been parsed */ qemu_plugin_load_list(&plugin_list, &error_fatal); /* From here on we enter MACHINE_PHASE_INITIALIZED. */ machine_run_board_init(current_machine, mem_path, &error_fatal); + if (machine_class->auto_create_sdcard) { + bool ambigous; + + /* Ensure there is a SD bus available to create SD card on */ + Object *obj = object_resolve_path_type("", TYPE_SD_BUS, &ambigous); + if (!obj && !ambigous) { + fprintf(stderr, "Can not create sd-card on '%s' machine" + " because it lacks a sd-bus\n", + machine_class->name); + abort(); + } + } + drive_check_orphaned(); realtime_init(); @@ -2642,29 +2738,20 @@ static void qemu_create_cli_devices(void) /* init USB devices */ if (machine_usb(current_machine)) { - if (foreach_device_config(DEV_USB, usb_parse) < 0) - exit(1); + foreach_device_config_or_exit(DEV_USB, usb_parse); } /* init generic devices */ - rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE); qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, &error_fatal); QTAILQ_FOREACH(opt, &device_opts, next) { - DeviceState *dev; + QObject *ret_data = NULL; + loc_push_restore(&opt->loc); - /* - * TODO Eventually we should call qmp_device_add() here to make sure it - * behaves the same, but QMP still has to accept incorrectly typed - * options until libvirt is fixed and we want to be strict on the CLI - * from the start, so call qdev_device_add_from_qdict() directly for - * now. - */ - dev = qdev_device_add_from_qdict(opt->opts, true, &error_fatal); - object_unref(OBJECT(dev)); + qmp_device_add(opt->opts, &ret_data, &error_fatal); + assert(ret_data == NULL); /* error_fatal aborts */ loc_pop(&opt->loc); } - rom_reset_order_override(); } static bool qemu_machine_creation_done(Error **errp) @@ -2696,10 +2783,8 @@ static bool qemu_machine_creation_done(Error **errp) exit(1); } - if (foreach_device_config(DEV_GDB, gdbserver_start) < 0) { - error_setg(errp, "could not start gdbserver"); - return false; - } + foreach_device_config_or_exit(DEV_GDB, gdbserver_start); + if (!vga_interface_created && !default_vga && vga_interface_type != VGA_NONE) { warn_report("A -vga option was passed but this machine " @@ -2734,8 +2819,11 @@ void qmp_x_exit_preconfig(Error **errp) if (incoming) { Error *local_err = NULL; if (strcmp(incoming, "defer") != 0) { - qmp_migrate_incoming(incoming, false, NULL, true, true, - &local_err); + g_autofree MigrationChannelList *channels = + g_new0(MigrationChannelList, 1); + + channels->value = incoming_channels[MIGRATION_CHANNEL_TYPE_MAIN]; + qmp_migrate_incoming(NULL, true, channels, true, true, &local_err); if (local_err) { error_reportf_err(local_err, "-incoming %s: ", incoming); exit(1); @@ -2796,7 +2884,10 @@ void qemu_init(int argc, char **argv) os_setup_limits(); - qemu_init_arch_modules(); +#ifdef CONFIG_MODULES + module_init_info(qemu_modinfo); + module_allow_arch(target_name()); +#endif qemu_init_subsystems(); @@ -2835,7 +2926,7 @@ void qemu_init(int argc, char **argv) const QEMUOption *popt; popt = lookup_opt(argc, argv, &optarg, &optind); - if (!(popt->arch_mask & arch_type)) { + if (!qemu_arch_available(popt->arch_mask)) { error_report("Option not supported for this target"); exit(1); } @@ -2909,20 +3000,12 @@ void qemu_init(int argc, char **argv) nographic = true; dpy.type = DISPLAY_TYPE_NONE; break; - case QEMU_OPTION_portrait: - graphic_rotate = 90; - break; - case QEMU_OPTION_rotate: - graphic_rotate = strtol(optarg, (char **) &optarg, 10); - if (graphic_rotate != 0 && graphic_rotate != 90 && - graphic_rotate != 180 && graphic_rotate != 270) { - error_report("only 90, 180, 270 deg rotation is available"); - exit(1); - } - break; case QEMU_OPTION_kernel: qdict_put_str(machine_opts_dict, "kernel", optarg); break; + case QEMU_OPTION_shim: + qdict_put_str(machine_opts_dict, "shim", optarg); + break; case QEMU_OPTION_initrd: qdict_put_str(machine_opts_dict, "initrd", optarg); break; @@ -3442,6 +3525,7 @@ void qemu_init(int argc, char **argv) nb_prom_envs++; break; case QEMU_OPTION_old_param: + warn_report("-old-param is deprecated"); old_param = 1; break; case QEMU_OPTION_rtc: @@ -3462,7 +3546,7 @@ void qemu_init(int argc, char **argv) if (!incoming) { runstate_set(RUN_STATE_INMIGRATE); } - incoming = optarg; + incoming_option_parse(optarg); break; case QEMU_OPTION_only_migratable: only_migratable = 1; @@ -3547,13 +3631,7 @@ void qemu_init(int argc, char **argv) object_option_parse(optarg); break; case QEMU_OPTION_overcommit: - opts = qemu_opts_parse_noisily(qemu_find_opts("overcommit"), - optarg, false); - if (!opts) { - exit(1); - } - enable_mlock = qemu_opt_get_bool(opts, "mem-lock", enable_mlock); - enable_cpu_pm = qemu_opt_get_bool(opts, "cpu-pm", enable_cpu_pm); + overcommit_parse(optarg); break; case QEMU_OPTION_compat: { @@ -3596,16 +3674,7 @@ void qemu_init(int argc, char **argv) case QEMU_OPTION_nouserconfig: /* Nothing to be parsed here. Especially, do not error out below. */ break; -#if defined(CONFIG_POSIX) - case QEMU_OPTION_runas: - warn_report("-runas is deprecated, use '-run-with user=...' instead"); - if (!os_set_runas(optarg)) { - error_report("User \"%s\" doesn't exist" - " (and is not :)", - optarg); - exit(1); - } - break; +#if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) case QEMU_OPTION_daemonize: os_set_daemonize(true); break; @@ -3689,6 +3758,12 @@ void qemu_init(int argc, char **argv) qemu_create_machine(machine_opts_dict); + /* + * Load incoming CPR state before any devices are created, because it + * contains file descriptors that are needed in device initialization code. + */ + cpr_state_load(incoming_channels[MIGRATION_CHANNEL_TYPE_CPR], &error_fatal); + suspend_mux_open(); qemu_disable_default_devices(); diff --git a/system/watchpoint.c b/system/watchpoint.c index 2aa2a9ea63f..21d0bb36cae 100644 --- a/system/watchpoint.c +++ b/system/watchpoint.c @@ -19,7 +19,9 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" +#include "exec/watchpoint.h" #include "hw/core/cpu.h" /* Add a watchpoint. */ diff --git a/target-info-stub.c b/target-info-stub.c new file mode 100644 index 00000000000..ca0caa3686c --- /dev/null +++ b/target-info-stub.c @@ -0,0 +1,27 @@ +/* + * QEMU target info stubs (target specific) + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/target-info.h" +#include "qemu/target-info-impl.h" +#include "hw/boards.h" +#include "cpu.h" + +static const TargetInfo target_info_stub = { + .target_name = TARGET_NAME, + .target_arch = SYS_EMU_TARGET__MAX, + .long_bits = TARGET_LONG_BITS, + .cpu_type = CPU_RESOLVING_TYPE, + .machine_typename = TYPE_MACHINE, + .endianness = TARGET_BIG_ENDIAN ? ENDIAN_MODE_BIG : ENDIAN_MODE_LITTLE, +}; + +const TargetInfo *target_info(void) +{ + return &target_info_stub; +} diff --git a/target-info.c b/target-info.c new file mode 100644 index 00000000000..3110ab32f75 --- /dev/null +++ b/target-info.c @@ -0,0 +1,54 @@ +/* + * QEMU target info helpers + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/target-info.h" +#include "qemu/target-info-qapi.h" +#include "qemu/target-info-impl.h" +#include "qapi/error.h" + +const char *target_name(void) +{ + return target_info()->target_name; +} + +unsigned target_long_bits(void) +{ + return target_info()->long_bits; +} + +SysEmuTarget target_arch(void) +{ + SysEmuTarget arch = target_info()->target_arch; + + if (arch == SYS_EMU_TARGET__MAX) { + arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1, + &error_abort); + } + return arch; +} + +const char *target_cpu_type(void) +{ + return target_info()->cpu_type; +} + +const char *target_machine_typename(void) +{ + return target_info()->machine_typename; +} + +EndianMode target_endian_mode(void) +{ + return target_info()->endianness; +} + +bool target_big_endian(void) +{ + return target_endian_mode() == ENDIAN_MODE_BIG; +} diff --git a/target/Kconfig b/target/Kconfig index 7f64112e9e7..d0c7b59d9c7 100644 --- a/target/Kconfig +++ b/target/Kconfig @@ -1,7 +1,6 @@ source alpha/Kconfig source arm/Kconfig source avr/Kconfig -source cris/Kconfig source hppa/Kconfig source i386/Kconfig source loongarch/Kconfig diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h index 5ce213a9a11..a799f42db31 100644 --- a/target/alpha/cpu-param.h +++ b/target/alpha/cpu-param.h @@ -2,14 +2,12 @@ * Alpha cpu parameters for qemu. * * Copyright (c) 2007 Jocelyn Mayer - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef ALPHA_CPU_PARAM_H #define ALPHA_CPU_PARAM_H -#define TARGET_LONG_BITS 64 - /* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */ #define TARGET_PHYS_ADDR_SPACE_BITS 44 @@ -20,14 +18,12 @@ * a 4k minimum to match x86 host, which can minimize emulation issues. */ # define TARGET_PAGE_BITS_VARY -# define TARGET_PAGE_BITS_MIN 12 # define TARGET_VIRT_ADDR_SPACE_BITS 63 #else # define TARGET_PAGE_BITS 13 # define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) #endif -/* Alpha processors have a weak memory model */ -#define TCG_GUEST_DEFAULT_MO (0) +#define TARGET_INSN_START_EXTRA_WORDS 0 #endif diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 9db1dffc03e..bf1787a69dd 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -23,7 +23,10 @@ #include "qapi/error.h" #include "qemu/qemu-print.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ops.h" +#include "fpu/softfloat.h" static void alpha_cpu_set_pc(CPUState *cs, vaddr value) @@ -38,6 +41,18 @@ static vaddr alpha_cpu_get_pc(CPUState *cs) return env->pc; } +static TCGTBCPUState alpha_get_tb_cpu_state(CPUState *cs) +{ + CPUAlphaState *env = cpu_env(cs); + uint32_t flags = env->flags & ENV_FLAG_TB_MASK; + +#ifdef CONFIG_USER_ONLY + flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus; +#endif + + return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; +} + static void alpha_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -61,6 +76,7 @@ static void alpha_restore_state_to_opc(CPUState *cs, } } +#ifndef CONFIG_USER_ONLY static bool alpha_cpu_has_work(CPUState *cs) { /* Here we are checking to see if the CPU should wake up from HALT. @@ -75,6 +91,7 @@ static bool alpha_cpu_has_work(CPUState *cs) | CPU_INTERRUPT_SMP | CPU_INTERRUPT_MCHK); } +#endif /* !CONFIG_USER_ONLY */ static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -83,6 +100,7 @@ static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch) static void alpha_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { + info->endian = BFD_ENDIAN_LITTLE; info->mach = bfd_mach_alpha_ev6; info->print_insn = print_insn_alpha; } @@ -187,7 +205,26 @@ static void alpha_cpu_initfn(Object *obj) { CPUAlphaState *env = cpu_env(CPU(obj)); + /* TODO all this should be done in reset, not init */ + env->lock_addr = -1; + + /* + * TODO: this is incorrect. The Alpha Architecture Handbook version 4 + * describes NaN propagation in section 4.7.10.4. We should prefer + * the operand in Fb (whether it is a QNaN or an SNaN), then the + * operand in Fa. That is float_2nan_prop_ba. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + /* Default NaN: sign bit clear, msb frac bit set */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); + /* + * TODO: this is incorrect. The Alpha Architecture Handbook version 4 + * section 4.7.7.11 says that we flush to zero for underflow cases, so + * this should be float_ftz_after_rounding to match the + * tininess_after_rounding (which is specified in section 4.7.5). + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); #if defined(CONFIG_USER_ONLY) env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD @@ -202,31 +239,39 @@ static void alpha_cpu_initfn(Object *obj) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps alpha_sysemu_ops = { + .has_work = alpha_cpu_has_work, .get_phys_page_debug = alpha_cpu_get_phys_page_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps alpha_tcg_ops = { + /* Alpha processors have a weak memory model */ + .guest_default_memory_order = 0, + .mttcg_supported = true, + .initialize = alpha_translate_init, + .translate_code = alpha_translate_code, + .get_tb_cpu_state = alpha_get_tb_cpu_state, .synchronize_from_tb = alpha_cpu_synchronize_from_tb, .restore_state_to_opc = alpha_restore_state_to_opc, + .mmu_index = alpha_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = alpha_cpu_record_sigsegv, .record_sigbus = alpha_cpu_record_sigbus, #else .tlb_fill = alpha_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_notreached, .cpu_exec_interrupt = alpha_cpu_exec_interrupt, .cpu_exec_halt = alpha_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = alpha_cpu_do_interrupt, .do_transaction_failed = alpha_cpu_do_transaction_failed, .do_unaligned_access = alpha_cpu_do_unaligned_access, #endif /* !CONFIG_USER_ONLY */ }; -static void alpha_cpu_class_init(ObjectClass *oc, void *data) +static void alpha_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -236,13 +281,12 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data) &acc->parent_realize); cc->class_by_name = alpha_cpu_class_by_name; - cc->has_work = alpha_cpu_has_work; - cc->mmu_index = alpha_cpu_mmu_index; cc->dump_state = alpha_cpu_dump_state; cc->set_pc = alpha_cpu_set_pc; cc->get_pc = alpha_cpu_get_pc; cc->gdb_read_register = alpha_cpu_gdb_read_register; cc->gdb_write_register = alpha_cpu_gdb_write_register; + cc->gdb_core_xml_file = "alpha-core.xml"; #ifndef CONFIG_USER_ONLY dc->vmsd = &vmstate_alpha_cpu; cc->sysemu_ops = &alpha_sysemu_ops; diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index f9e2ecb90ab..45944e46b54 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -21,7 +21,9 @@ #define ALPHA_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" #define ICACHE_LINE_SIZE 32 @@ -267,7 +269,6 @@ struct ArchCPU { /** * AlphaCPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. * * An Alpha CPU model. */ @@ -275,7 +276,6 @@ struct AlphaCPUClass { CPUClass parent_class; DeviceRealize parent_realize; - DeviceReset parent_reset; }; #ifndef CONFIG_USER_ONLY @@ -289,8 +289,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -#include "exec/cpu-all.h" - enum { FEATURE_ASN = 0x00000001, FEATURE_SPS = 0x00000002, @@ -433,6 +431,8 @@ enum { }; void alpha_translate_init(void); +void alpha_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU @@ -464,17 +464,6 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MemTxResult response, uintptr_t retaddr); #endif -static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - *pc = env->pc; - *cs_base = 0; - *pflags = env->flags & ENV_FLAG_TB_MASK; -#ifdef CONFIG_USER_ONLY - *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; -#endif -} - #ifdef CONFIG_USER_ONLY /* Copied from linux ieee_swcr_to_fpcr. */ static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr) diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c index 63d9e9ce39c..30f3c7fd185 100644 --- a/target/alpha/fpu_helper.c +++ b/target/alpha/fpu_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" @@ -455,26 +454,27 @@ static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode) { float64 fa; int64_t ret; - uint32_t exc; + uint32_t exc = 0; + int flags; fa = t_to_float64(a); ret = float64_to_int64_modulo(fa, roundmode, &FP_STATUS); - exc = get_float_exception_flags(&FP_STATUS); - if (unlikely(exc)) { + flags = get_float_exception_flags(&FP_STATUS); + if (unlikely(flags)) { set_float_exception_flags(0, &FP_STATUS); /* We need to massage the resulting exceptions. */ - if (exc & float_flag_invalid_cvti) { + if (flags & float_flag_invalid_cvti) { /* Overflow, either normal or infinity. */ if (float64_is_infinity(fa)) { exc = FPCR_INV; } else { exc = FPCR_IOV | FPCR_INE; } - } else if (exc & float_flag_invalid) { + } else if (flags & float_flag_invalid) { exc = FPCR_INV; - } else if (exc & float_flag_inexact) { + } else if (flags & float_flag_inexact) { exc = FPCR_INE; } } diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c index 13694fd321e..1a7e2dd9202 100644 --- a/target/alpha/gdbstub.c +++ b/target/alpha/gdbstub.c @@ -59,7 +59,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { CPUAlphaState *env = cpu_env(cs); - target_ulong tmp = ldtul_p(mem_buf); + target_ulong tmp = ldq_le_p(mem_buf); CPU_DoubleU d; switch (n) { diff --git a/target/alpha/helper.c b/target/alpha/helper.c index 2f1000c99fa..096eac34458 100644 --- a/target/alpha/helper.c +++ b/target/alpha/helper.c @@ -20,11 +20,13 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "fpu/softfloat-types.h" #include "exec/helper-proto.h" #include "qemu/qemu-print.h" +#include "system/memory.h" #define CONVERT_BIT(X, SRC, DST) \ diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c index 5672696f6f1..6bfe63500e0 100644 --- a/target/alpha/int_helper.c +++ b/target/alpha/int_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" diff --git a/target/alpha/machine.c b/target/alpha/machine.c index f09834f635d..5f302b166da 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_env = { }; static const VMStateField vmstate_cpu_fields[] = { - VMSTATE_CPU(), + VMSTATE_STRUCT(parent_obj, AlphaCPU, 0, vmstate_cpu_common, CPUState), VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState), VMSTATE_END_OF_LIST() }; diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c index 872955f5e74..2113fe33ae2 100644 --- a/target/alpha/mem_helper.c +++ b/target/alpha/mem_helper.c @@ -20,8 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr) { diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c index 768116ef32b..51e32544287 100644 --- a/target/alpha/sys_helper.c +++ b/target/alpha/sys_helper.c @@ -19,11 +19,11 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/tb-flush.h" #include "exec/helper-proto.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "qemu/timer.h" diff --git a/target/alpha/translate.c b/target/alpha/translate.c index fb6cac4b538..cebab0318cf 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -19,13 +19,14 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "exec/log.h" #define HELPER_H "helper.h" @@ -2954,8 +2955,8 @@ static const TranslatorOps alpha_tr_ops = { .tb_stop = alpha_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void alpha_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base); diff --git a/target/alpha/vax_helper.c b/target/alpha/vax_helper.c index f94fb519dbd..c1d201e7b4d 100644 --- a/target/alpha/vax_helper.c +++ b/target/alpha/vax_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c index 06cdf4ba281..1dd79849c13 100644 --- a/target/arm/arch_dump.c +++ b/target/arm/arch_dump.c @@ -21,8 +21,9 @@ #include "qemu/osdep.h" #include "cpu.h" #include "elf.h" -#include "sysemu/dump.h" +#include "system/dump.h" #include "cpu-features.h" +#include "internals.h" /* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */ struct aarch64_user_regs { @@ -142,7 +143,6 @@ static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f, return 0; } -#ifdef TARGET_AARCH64 static off_t sve_zreg_offset(uint32_t vq, int n) { off_t off = sizeof(struct aarch64_user_sve_header); @@ -230,7 +230,6 @@ static int aarch64_write_elf64_sve(WriteCoreDumpFunction f, return 0; } -#endif int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, DumpState *s) @@ -272,11 +271,9 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, return ret; } -#ifdef TARGET_AARCH64 if (cpu_isar_feature(aa64_sve, cpu)) { ret = aarch64_write_elf64_sve(f, env, cpuid, s); } -#endif return ret; } @@ -450,11 +447,9 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) if (class == ELFCLASS64) { note_size = AARCH64_PRSTATUS_NOTE_SIZE; note_size += AARCH64_PRFPREG_NOTE_SIZE; -#ifdef TARGET_AARCH64 if (cpu_isar_feature(aa64_sve, cpu)) { note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env); } -#endif } else { note_size = ARM_PRSTATUS_NOTE_SIZE; if (cpu_isar_feature(aa32_vfp_simd, cpu)) { diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index 2b2055c6acc..20c70c7d6bb 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -15,7 +15,7 @@ #include "arm-powerctl.h" #include "qemu/log.h" #include "qemu/main-loop.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "target/arm/multiprocessing.h" #ifndef DEBUG_ARM_POWERCTL diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c index 3cc8cc738bb..d292c974c44 100644 --- a/target/arm/arm-qmp-cmds.c +++ b/target/arm/arm-qmp-cmds.c @@ -21,15 +21,17 @@ */ #include "qemu/osdep.h" +#include "qemu/target-info.h" #include "hw/boards.h" #include "kvm_arm.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "qapi/qobject-input-visitor.h" -#include "qapi/qapi-commands-machine-target.h" -#include "qapi/qapi-commands-misc-target.h" -#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qapi-commands-misc-arm.h" +#include "qobject/qdict.h" #include "qom/qom-qobject.h" +#include "cpu.h" static GICCapability *gic_cap_new(int version) { @@ -46,7 +48,7 @@ static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3) #ifdef CONFIG_KVM int fdarray[3]; - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) { + if (!kvm_arm_create_scratch_host_vcpu(fdarray, NULL)) { return; } @@ -94,7 +96,7 @@ static const char *cpu_model_advertised_features[] = { "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280", "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048", "kvm-no-adjvtime", "kvm-steal-time", - "pauth", "pauth-impdef", "pauth-qarma3", + "pauth", "pauth-impdef", "pauth-qarma3", "pauth-qarma5", NULL }; @@ -240,7 +242,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) CpuDefinitionInfoList *cpu_list = NULL; GSList *list; - list = object_class_get_list(TYPE_ARM_CPU, false); + list = object_class_get_list(target_cpu_type(), false); g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); g_slist_free(list); diff --git a/target/arm/cpregs-pmu.c b/target/arm/cpregs-pmu.c new file mode 100644 index 00000000000..9c4431c18ba --- /dev/null +++ b/target/arm/cpregs-pmu.c @@ -0,0 +1,1328 @@ +/* + * QEMU ARM CP Register PMU insns + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "exec/icount.h" +#include "hw/irq.h" +#include "cpu.h" +#include "cpu-features.h" +#include "cpregs.h" +#include "internals.h" + + +#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ + +/* + * Check for traps to performance monitor registers, which are controlled + * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. + */ +static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + + if (el < 2 && (mdcr_el2 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +typedef struct pm_event { + uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ + /* If the event is supported on this CPU (used to generate PMCEID[01]) */ + bool (*supported)(CPUARMState *); + /* + * Retrieve the current count of the underlying event. The programmed + * counters hold a difference from the return value from this function + */ + uint64_t (*get_count)(CPUARMState *); + /* + * Return how many nanoseconds it will take (at a minimum) for count events + * to occur. A negative value indicates the counter will never overflow, or + * that the counter has otherwise arranged for the overflow bit to be set + * and the PMU interrupt to be raised on overflow. + */ + int64_t (*ns_per_count)(uint64_t); +} pm_event; + +static bool event_always_supported(CPUARMState *env) +{ + return true; +} + +static uint64_t swinc_get_count(CPUARMState *env) +{ + /* + * SW_INCR events are written directly to the pmevcntr's by writes to + * PMSWINC, so there is no underlying count maintained by the PMU itself + */ + return 0; +} + +static int64_t swinc_ns_per(uint64_t ignored) +{ + return -1; +} + +/* + * Return the underlying cycle count for the PMU cycle counters. If we're in + * usermode, simply return 0. + */ +static uint64_t cycles_get_count(CPUARMState *env) +{ +#ifndef CONFIG_USER_ONLY + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); +#else + return cpu_get_host_ticks(); +#endif +} + +#ifndef CONFIG_USER_ONLY +static int64_t cycles_ns_per(uint64_t cycles) +{ + return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; +} + +static bool instructions_supported(CPUARMState *env) +{ + /* Precise instruction counting */ + return icount_enabled() == ICOUNT_PRECISE; +} + +static uint64_t instructions_get_count(CPUARMState *env) +{ + assert(icount_enabled() == ICOUNT_PRECISE); + return (uint64_t)icount_get_raw(); +} + +static int64_t instructions_ns_per(uint64_t icount) +{ + assert(icount_enabled() == ICOUNT_PRECISE); + return icount_to_ns((int64_t)icount); +} +#endif + +static bool pmuv3p1_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); +} + +static bool pmuv3p4_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); +} + +static uint64_t zero_event_get_count(CPUARMState *env) +{ + /* For events which on QEMU never fire, so their count is always zero */ + return 0; +} + +static int64_t zero_event_ns_per(uint64_t cycles) +{ + /* An event which never fires can never overflow */ + return -1; +} + +static const pm_event pm_events[] = { + { .number = 0x000, /* SW_INCR */ + .supported = event_always_supported, + .get_count = swinc_get_count, + .ns_per_count = swinc_ns_per, + }, +#ifndef CONFIG_USER_ONLY + { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ + .supported = instructions_supported, + .get_count = instructions_get_count, + .ns_per_count = instructions_ns_per, + }, + { .number = 0x011, /* CPU_CYCLES, Cycle */ + .supported = event_always_supported, + .get_count = cycles_get_count, + .ns_per_count = cycles_ns_per, + }, +#endif + { .number = 0x023, /* STALL_FRONTEND */ + .supported = pmuv3p1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x024, /* STALL_BACKEND */ + .supported = pmuv3p1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x03c, /* STALL */ + .supported = pmuv3p4_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, +}; + +/* + * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of + * events (i.e. the statistical profiling extension), this implementation + * should first be updated to something sparse instead of the current + * supported_event_map[] array. + */ +#define MAX_EVENT_ID 0x3c +#define UNSUPPORTED_EVENT UINT16_MAX +static uint16_t supported_event_map[MAX_EVENT_ID + 1]; + +/* + * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map + * of ARM event numbers to indices in our pm_events array. + * + * Note: Events in the 0x40XX range are not currently supported. + */ +void pmu_init(ARMCPU *cpu) +{ + unsigned int i; + + /* + * Empty supported_event_map and cpu->pmceid[01] before adding supported + * events to them + */ + for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { + supported_event_map[i] = UNSUPPORTED_EVENT; + } + cpu->pmceid0 = 0; + cpu->pmceid1 = 0; + + for (i = 0; i < ARRAY_SIZE(pm_events); i++) { + const pm_event *cnt = &pm_events[i]; + assert(cnt->number <= MAX_EVENT_ID); + /* We do not currently support events in the 0x40xx range */ + assert(cnt->number <= 0x3f); + + if (cnt->supported(&cpu->env)) { + supported_event_map[cnt->number] = i; + uint64_t event_mask = 1ULL << (cnt->number & 0x1f); + if (cnt->number & 0x20) { + cpu->pmceid1 |= event_mask; + } else { + cpu->pmceid0 |= event_mask; + } + } + } +} + +/* + * Check at runtime whether a PMU event is supported for the current machine + */ +static bool event_supported(uint16_t number) +{ + if (number > MAX_EVENT_ID) { + return false; + } + return supported_event_map[number] != UNSUPPORTED_EVENT; +} + +static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* + * Performance monitor registers user accessibility is controlled + * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable + * trapping to EL2 or EL3 for other accesses. + */ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + + if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { + return CP_ACCESS_TRAP_EL1; + } + if (el < 2 && (mdcr_el2 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { + return CP_ACCESS_TRAP_EL3; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* ER: event counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 + && isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_swinc(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* SW: software increment write trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 + && !isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_selr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* ER: event counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +static CPAccessResult pmreg_access_ccntr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* CR: cycle counter read trap control */ + if (arm_feature(env, ARM_FEATURE_V8) + && arm_current_el(env) == 0 + && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 + && isread) { + return CP_ACCESS_OK; + } + + return pmreg_access(env, ri, isread); +} + +/* + * Returns true if the counter (pass 31 for PMCCNTR) should count events using + * the current EL, security state, and register configuration. + */ +static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) +{ + uint64_t filter; + bool e, p, u, nsk, nsu, nsh, m; + bool enabled, prohibited = false, filtered; + bool secure = arm_is_secure(env); + int el = arm_current_el(env); + uint64_t mdcr_el2; + uint8_t hpmn; + + /* + * We might be called for M-profile cores where MDCR_EL2 doesn't + * exist and arm_mdcr_el2_eff() will assert, so this early-exit check + * must be before we read that value. + */ + if (!arm_feature(env, ARM_FEATURE_PMU)) { + return false; + } + + mdcr_el2 = arm_mdcr_el2_eff(env); + hpmn = mdcr_el2 & MDCR_HPMN; + + if (!arm_feature(env, ARM_FEATURE_EL2) || + (counter < hpmn || counter == 31)) { + e = env->cp15.c9_pmcr & PMCRE; + } else { + e = mdcr_el2 & MDCR_HPME; + } + enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); + + /* Is event counting prohibited? */ + if (el == 2 && (counter < hpmn || counter == 31)) { + prohibited = mdcr_el2 & MDCR_HPMD; + } + if (secure) { + prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); + } + + if (counter == 31) { + /* + * The cycle counter defaults to running. PMCR.DP says "disable + * the cycle counter when event counting is prohibited". + * Some MDCR bits disable the cycle counter specifically. + */ + prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; + if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + if (secure) { + prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); + } + if (el == 2) { + prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); + } + } + } + + if (counter == 31) { + filter = env->cp15.pmccfiltr_el0; + } else { + filter = env->cp15.c14_pmevtyper[counter]; + } + + p = filter & PMXEVTYPER_P; + u = filter & PMXEVTYPER_U; + nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); + nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); + nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); + m = arm_el_is_aa64(env, 1) && + arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); + + if (el == 0) { + filtered = secure ? u : u != nsu; + } else if (el == 1) { + filtered = secure ? p : p != nsk; + } else if (el == 2) { + filtered = !nsh; + } else { /* EL3 */ + filtered = m != p; + } + + if (counter != 31) { + /* + * If not checking PMCCNTR, ensure the counter is setup to an event we + * support + */ + uint16_t event = filter & PMXEVTYPER_EVTCOUNT; + if (!event_supported(event)) { + return false; + } + } + + return enabled && !prohibited && !filtered; +} + +static void pmu_update_irq(CPUARMState *env) +{ + ARMCPU *cpu = env_archcpu(env); + qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && + (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); +} + +static bool pmccntr_clockdiv_enabled(CPUARMState *env) +{ + /* + * Return true if the clock divider is enabled and the cycle counter + * is supposed to tick only once every 64 clock cycles. This is + * controlled by PMCR.D, but if PMCR.LC is set to enable the long + * (64-bit) cycle counter PMCR.D has no effect. + */ + return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; +} + +static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) +{ + /* Return true if the specified event counter is configured to be 64 bit */ + + /* This isn't intended to be used with the cycle counter */ + assert(counter < 31); + + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + return false; + } + + if (arm_feature(env, ARM_FEATURE_EL2)) { + /* + * MDCR_EL2.HLP still applies even when EL2 is disabled in the + * current security state, so we don't use arm_mdcr_el2_eff() here. + */ + bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; + int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; + + if (counter >= hpmn) { + return hlp; + } + } + return env->cp15.c9_pmcr & PMCRLP; +} + +/* + * Ensure c15_ccnt is the guest-visible count so that operations such as + * enabling/disabling the counter or filtering, modifying the count itself, + * etc. can be done logically. This is essentially a no-op if the counter is + * not enabled at the time of the call. + */ +static void pmccntr_op_start(CPUARMState *env) +{ + uint64_t cycles = cycles_get_count(env); + + if (pmu_counter_enabled(env, 31)) { + uint64_t eff_cycles = cycles; + if (pmccntr_clockdiv_enabled(env)) { + eff_cycles /= 64; + } + + uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; + + uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ + 1ull << 63 : 1ull << 31; + if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { + env->cp15.c9_pmovsr |= (1ULL << 31); + pmu_update_irq(env); + } + + env->cp15.c15_ccnt = new_pmccntr; + } + env->cp15.c15_ccnt_delta = cycles; +} + +/* + * If PMCCNTR is enabled, recalculate the delta between the clock and the + * guest-visible count. A call to pmccntr_op_finish should follow every call to + * pmccntr_op_start. + */ +static void pmccntr_op_finish(CPUARMState *env) +{ + if (pmu_counter_enabled(env, 31)) { +#ifndef CONFIG_USER_ONLY + /* Calculate when the counter will next overflow */ + uint64_t remaining_cycles = -env->cp15.c15_ccnt; + if (!(env->cp15.c9_pmcr & PMCRLC)) { + remaining_cycles = (uint32_t)remaining_cycles; + } + int64_t overflow_in = cycles_ns_per(remaining_cycles); + + if (overflow_in > 0) { + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } + } +#endif + + uint64_t prev_cycles = env->cp15.c15_ccnt_delta; + if (pmccntr_clockdiv_enabled(env)) { + prev_cycles /= 64; + } + env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; + } +} + +static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) +{ + + uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; + uint64_t count = 0; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + count = pm_events[event_idx].get_count(env); + } + + if (pmu_counter_enabled(env, counter)) { + uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; + uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? + 1ULL << 63 : 1ULL << 31; + + if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { + env->cp15.c9_pmovsr |= (1 << counter); + pmu_update_irq(env); + } + env->cp15.c14_pmevcntr[counter] = new_pmevcntr; + } + env->cp15.c14_pmevcntr_delta[counter] = count; +} + +static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) +{ + if (pmu_counter_enabled(env, counter)) { +#ifndef CONFIG_USER_ONLY + uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; + uint16_t event_idx = supported_event_map[event]; + uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); + int64_t overflow_in; + + if (!pmevcntr_is_64_bit(env, counter)) { + delta = (uint32_t)delta; + } + overflow_in = pm_events[event_idx].ns_per_count(delta); + + if (overflow_in > 0) { + int64_t overflow_at; + + if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + overflow_in, &overflow_at)) { + ARMCPU *cpu = env_archcpu(env); + timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); + } + } +#endif + + env->cp15.c14_pmevcntr_delta[counter] -= + env->cp15.c14_pmevcntr[counter]; + } +} + +void pmu_op_start(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_start(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_start(env, i); + } +} + +void pmu_op_finish(CPUARMState *env) +{ + unsigned int i; + pmccntr_op_finish(env); + for (i = 0; i < pmu_num_counters(env); i++) { + pmevcntr_op_finish(env, i); + } +} + +void pmu_pre_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_start(&cpu->env); +} + +void pmu_post_el_change(ARMCPU *cpu, void *ignored) +{ + pmu_op_finish(&cpu->env); +} + +void arm_pmu_timer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + /* + * Update all the counter values based on the current underlying counts, + * triggering interrupts to be raised, if necessary. pmu_op_finish() also + * has the effect of setting the cpu->pmu_timer to the next earliest time a + * counter may expire. + */ + pmu_op_start(&cpu->env); + pmu_op_finish(&cpu->env); +} + +static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmu_op_start(env); + + if (value & PMCRC) { + /* The counter has been reset */ + env->cp15.c15_ccnt = 0; + } + + if (value & PMCRP) { + unsigned int i; + for (i = 0; i < pmu_num_counters(env); i++) { + env->cp15.c14_pmevcntr[i] = 0; + } + } + + env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; + env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); + + pmu_op_finish(env); +} + +static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t pmcr = env->cp15.c9_pmcr; + + /* + * If EL2 is implemented and enabled for the current security state, reads + * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. + */ + if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { + pmcr &= ~PMCRN_MASK; + pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; + } + + return pmcr; +} + +static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + unsigned int i; + uint64_t overflow_mask, new_pmswinc; + + for (i = 0; i < pmu_num_counters(env); i++) { + /* Increment a counter's count iff: */ + if ((value & (1 << i)) && /* counter's bit is set */ + /* counter is enabled and not filtered */ + pmu_counter_enabled(env, i) && + /* counter is SW_INCR */ + (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { + pmevcntr_op_start(env, i); + + /* + * Detect if this write causes an overflow since we can't predict + * PMSWINC overflows like we can for other events + */ + new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + + overflow_mask = pmevcntr_is_64_bit(env, i) ? + 1ULL << 63 : 1ULL << 31; + + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { + env->cp15.c9_pmovsr |= (1 << i); + pmu_update_irq(env); + } + + env->cp15.c14_pmevcntr[i] = new_pmswinc; + + pmevcntr_op_finish(env, i); + } + } +} + +static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t ret; + pmccntr_op_start(env); + ret = env->cp15.c15_ccnt; + pmccntr_op_finish(env); + return ret; +} + +static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and + * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the + * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are + * accessed. + */ + env->cp15.c9_pmselr = value & 0x1f; +} + +static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + env->cp15.c15_ccnt = value; + pmccntr_op_finish(env); +} + +static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t cur_val = pmccntr_read(env, NULL); + + pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); +} + +static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; + pmccntr_op_finish(env); +} + +static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_op_start(env); + /* M is not accessible from AArch32 */ + env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | + (value & PMCCFILTR); + pmccntr_op_finish(env); +} + +static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* M is not visible in AArch32 */ + return env->cp15.pmccfiltr_el0 & PMCCFILTR; +} + +static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmu_op_start(env); + value &= pmu_counter_mask(env); + env->cp15.c9_pmcnten |= value; + pmu_op_finish(env); +} + +static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmu_op_start(env); + value &= pmu_counter_mask(env); + env->cp15.c9_pmcnten &= ~value; + pmu_op_finish(env); +} + +static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmovsr &= ~value; + pmu_update_irq(env); +} + +static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pmovsr |= value; + pmu_update_irq(env); +} + +static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, const uint8_t counter) +{ + if (counter == 31) { + pmccfiltr_write(env, ri, value); + } else if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + + /* + * If this counter's event type is changing, store the current + * underlying count for the new type in c14_pmevcntr_delta[counter] so + * pmevcntr_op_finish has the correct baseline when it converts back to + * a delta. + */ + uint16_t old_event = env->cp15.c14_pmevtyper[counter] & + PMXEVTYPER_EVTCOUNT; + uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; + if (old_event != new_event) { + uint64_t count = 0; + if (event_supported(new_event)) { + uint16_t event_idx = supported_event_map[new_event]; + count = pm_events[event_idx].get_count(env); + } + env->cp15.c14_pmevcntr_delta[counter] = count; + } + + env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; + pmevcntr_op_finish(env, counter); + } + /* + * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when + * PMSELR value is equal to or greater than the number of implemented + * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. + */ +} + +static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, + const uint8_t counter) +{ + if (counter == 31) { + return env->cp15.pmccfiltr_el0; + } else if (counter < pmu_num_counters(env)) { + return env->cp15.c14_pmevtyper[counter]; + } else { + /* + * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER + * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). + */ + return 0; + } +} + +static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevtyper_write(env, ri, value, counter); +} + +static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + env->cp15.c14_pmevtyper[counter] = value; + + /* + * pmevtyper_rawwrite is called between a pair of pmu_op_start and + * pmu_op_finish calls when loading saved state for a migration. Because + * we're potentially updating the type of event here, the value written to + * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a + * different counter type. Therefore, we need to set this value to the + * current count for the counter type we're writing so that pmu_op_finish + * has the correct count for its calculation. + */ + uint16_t event = value & PMXEVTYPER_EVTCOUNT; + if (event_supported(event)) { + uint16_t event_idx = supported_event_map[event]; + env->cp15.c14_pmevcntr_delta[counter] = + pm_events[event_idx].get_count(env); + } +} + +static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevtyper_read(env, ri, counter); +} + +static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + +static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); +} + +static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value, uint8_t counter) +{ + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + value &= MAKE_64BIT_MASK(0, 32); + } + if (counter < pmu_num_counters(env)) { + pmevcntr_op_start(env, counter); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_op_finish(env, counter); + } + /* + * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. + */ +} + +static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, + uint8_t counter) +{ + if (counter < pmu_num_counters(env)) { + uint64_t ret; + pmevcntr_op_start(env, counter); + ret = env->cp15.c14_pmevcntr[counter]; + pmevcntr_op_finish(env, counter); + if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { + /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ + ret &= MAKE_64BIT_MASK(0, 32); + } + return ret; + } else { + /* + * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR + * are CONSTRAINED UNPREDICTABLE. + */ + return 0; + } +} + +static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + return pmevcntr_read(env, ri, counter); +} + +static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + env->cp15.c14_pmevcntr[counter] = value; + pmevcntr_write(env, ri, value, counter); +} + +static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); + assert(counter < pmu_num_counters(env)); + return env->cp15.c14_pmevcntr[counter]; +} + +static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); +} + +static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); +} + +static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + env->cp15.c9_pmuserenr = value & 0xf; + } else { + env->cp15.c9_pmuserenr = value & 1; + } +} + +static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* We have no event counters so only the C bit can be changed */ + value &= pmu_counter_mask(env); + env->cp15.c9_pminten |= value; + pmu_update_irq(env); +} + +static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= pmu_counter_mask(env); + env->cp15.c9_pminten &= ~value; + pmu_update_irq(env); +} + +static const ARMCPRegInfo v7_pm_reginfo[] = { + /* + * Performance monitors are implementation defined in v7, + * but with an ARM recommended set of registers, which we + * follow. + * + * Performance registers fall into three categories: + * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) + * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) + * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) + * For the cases controlled by PMUSERENR we must set .access to PL0_RW + * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. + */ + { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), + .writefn = pmcntenset_write, + .accessfn = pmreg_access, + .fgt = FGT_PMCNTEN, + .raw_writefn = raw_write }, + { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMCNTEN, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, + .writefn = pmcntenset_write, .raw_writefn = raw_write }, + { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), + .accessfn = pmreg_access, + .fgt = FGT_PMCNTEN, + .writefn = pmcntenclr_write, .raw_writefn = raw_write, + .type = ARM_CP_ALIAS | ARM_CP_IO }, + { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMCNTEN, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), + .writefn = pmcntenclr_write, .raw_writefn = raw_write }, + { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, + .access = PL0_RW, .type = ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), + .accessfn = pmreg_access, + .fgt = FGT_PMOVS, + .writefn = pmovsr_write, + .raw_writefn = raw_write }, + { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMOVS, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsr_write, + .raw_writefn = raw_write }, + { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_W, .accessfn = pmreg_access_swinc, + .fgt = FGT_PMSWINC_EL0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, + .writefn = pmswinc_write }, + { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, + .access = PL0_W, .accessfn = pmreg_access_swinc, + .fgt = FGT_PMSWINC_EL0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, + .writefn = pmswinc_write }, + { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, + .access = PL0_RW, .type = ARM_CP_ALIAS, + .fgt = FGT_PMSELR_EL0, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), + .accessfn = pmreg_access_selr, .writefn = pmselr_write, + .raw_writefn = raw_write}, + { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, + .access = PL0_RW, .accessfn = pmreg_access_selr, + .fgt = FGT_PMSELR_EL0, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), + .writefn = pmselr_write, .raw_writefn = raw_write, }, + { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, + .fgt = FGT_PMCCNTR_EL0, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), + .readfn = pmccntr_read, .writefn = pmccntr_write, + .raw_readfn = raw_read, .raw_writefn = raw_write, }, + { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, + .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMCCFILTR_EL0, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .resetvalue = 0, }, + { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, + .writefn = pmccfiltr_write, .raw_writefn = raw_write, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMCCFILTR_EL0, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), + .resetvalue = 0, }, + { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, + .fgt = FGT_PMEVTYPERN_EL0, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, + { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access, + .fgt = FGT_PMEVTYPERN_EL0, + .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, + { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .fgt = FGT_PMEVCNTRN_EL0, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, + { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, + .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, + .accessfn = pmreg_access_xevcntr, + .fgt = FGT_PMEVCNTRN_EL0, + .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, + { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), + .resetvalue = 0, + .writefn = pmuserenr_write, .raw_writefn = raw_write }, + { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, + .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), + .resetvalue = 0, + .writefn = pmuserenr_write, .raw_writefn = raw_write }, + { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tpm, + .fgt = FGT_PMINTEN, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), + .resetvalue = 0, + .writefn = pmintenset_write, .raw_writefn = raw_write }, + { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tpm, + .fgt = FGT_PMINTEN, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenset_write, .raw_writefn = raw_write, + .resetvalue = 0x0 }, + { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tpm, + .fgt = FGT_PMINTEN, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenclr_write, .raw_writefn = raw_write }, + { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tpm, + .fgt = FGT_PMINTEN, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), + .writefn = pmintenclr_write, .raw_writefn = raw_write }, +}; + +static const ARMCPRegInfo pmovsset_cp_reginfo[] = { + /* PMOVSSET is not implemented in v7 before v7ve */ + { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMOVS, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, + { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMOVS, + .type = ARM_CP_ALIAS | ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .writefn = pmovsset_write, + .raw_writefn = raw_write }, +}; + +void define_pm_cpregs(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + + if (arm_feature(env, ARM_FEATURE_V7)) { + /* + * v7 performance monitor control register: same implementor + * field as main ID register, and we implement four counters in + * addition to the cycle count register. + */ + static const ARMCPRegInfo pmcr = { + .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, + .fgt = FGT_PMCR_EL0, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), + .accessfn = pmreg_access, + .readfn = pmcr_read, .raw_readfn = raw_read, + .writefn = pmcr_write, .raw_writefn = raw_write, + }; + const ARMCPRegInfo pmcr64 = { + .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMCR_EL0, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), + .resetvalue = cpu->isar.reset_pmcr_el0, + .readfn = pmcr_read, .raw_readfn = raw_read, + .writefn = pmcr_write, .raw_writefn = raw_write, + }; + + define_one_arm_cp_reg(cpu, &pmcr); + define_one_arm_cp_reg(cpu, &pmcr64); + define_arm_cp_regs(cpu, v7_pm_reginfo); + /* + * 32-bit AArch32 PMCCNTR. We don't expose this to GDB if the + * new-in-v8 PMUv3 64-bit AArch32 PMCCNTR register is implemented + * (as that will provide the GDB user's view of "PMCCNTR"). + */ + ARMCPRegInfo pmccntr = { + .name = "PMCCNTR", + .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, + .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, + .fgt = FGT_PMCCNTR_EL0, + .readfn = pmccntr_read, .writefn = pmccntr_write32, + }; + if (arm_feature(env, ARM_FEATURE_V8)) { + pmccntr.type |= ARM_CP_NO_GDB; + } + define_one_arm_cp_reg(cpu, &pmccntr); + + for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) { + g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); + g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); + g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); + g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); + + ARMCPRegInfo pmev_regs[] = { + { .name = pmevcntr_name, .cp = 15, .crn = 14, + .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .fgt = FGT_PMEVCNTRN_EL0, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .accessfn = pmreg_access_xevcntr }, + { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, + .type = ARM_CP_IO, + .fgt = FGT_PMEVCNTRN_EL0, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .raw_readfn = pmevcntr_rawread, + .raw_writefn = pmevcntr_rawwrite }, + { .name = pmevtyper_name, .cp = 15, .crn = 14, + .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .fgt = FGT_PMEVTYPERN_EL0, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .accessfn = pmreg_access }, + { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .fgt = FGT_PMEVTYPERN_EL0, + .type = ARM_CP_IO, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .raw_writefn = pmevtyper_rawwrite }, + }; + define_arm_cp_regs(cpu, pmev_regs); + } + } + if (arm_feature(env, ARM_FEATURE_V7VE)) { + define_arm_cp_regs(cpu, pmovsset_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_V8)) { + const ARMCPRegInfo v8_pm_reginfo[] = { + { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = extract64(cpu->pmceid0, 0, 32) }, + { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = cpu->pmceid0 }, + { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = extract64(cpu->pmceid1, 0, 32) }, + { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = cpu->pmceid1 }, + /* AArch32 64-bit PMCCNTR view: added in PMUv3 with Armv8 */ + { .name = "PMCCNTR", .state = ARM_CP_STATE_AA32, + .cp = 15, .crm = 9, .opc1 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, .resetvalue = 0, + .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_64BIT, + .fgt = FGT_PMCCNTR_EL0, .readfn = pmccntr_read, + .writefn = pmccntr_write, }, + }; + define_arm_cp_regs(cpu, v8_pm_reginfo); + } + + if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { + ARMCPRegInfo v81_pmu_regs[] = { + { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = extract64(cpu->pmceid0, 32, 32) }, + { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMCEIDN_EL0, + .resetvalue = extract64(cpu->pmceid1, 32, 32) }, + }; + define_arm_cp_regs(cpu, v81_pmu_regs); + } + + if (cpu_isar_feature(any_pmuv3p4, cpu)) { + static const ARMCPRegInfo v84_pmmir = { + .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, + .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .fgt = FGT_PMMIR_EL1, + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &v84_pmmir); + } +} diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index cc7c54378f4..c9506aa6d57 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -23,6 +23,7 @@ #include "hw/registerfields.h" #include "target/arm/kvm-consts.h" +#include "cpu.h" /* * ARMCPRegInfo type field bits: @@ -126,6 +127,14 @@ enum { * equivalent EL1 register when FEAT_NV2 is enabled. */ ARM_CP_NV2_REDIRECT = 1 << 20, + /* + * Flag: this is a TLBI insn which (when FEAT_XS is present) also has + * an NXS variant at the same encoding except that crn is 1 greater, + * so when registering this cpreg automatically also register one + * for the TLBI NXS variant. (For QEMU the NXS variant behaves + * identically to the normal one, other than FGT trapping handling.) + */ + ARM_CP_ADD_TLBI_NXS = 1 << 21, }; /* @@ -320,20 +329,23 @@ typedef enum CPAccessResult { * Access fails due to a configurable trap or enable which would * result in a categorized exception syndrome giving information about * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, - * 0xc or 0x18). + * 0xc or 0x18). These traps are always to a specified target EL, + * never to the usual target EL. */ - CP_ACCESS_TRAP = (1 << 2), - CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP | 2, - CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP | 3, + CP_ACCESS_TRAP_BIT = (1 << 2), + CP_ACCESS_TRAP_EL1 = CP_ACCESS_TRAP_BIT | 1, + CP_ACCESS_TRAP_EL2 = CP_ACCESS_TRAP_BIT | 2, + CP_ACCESS_TRAP_EL3 = CP_ACCESS_TRAP_BIT | 3, /* - * Access fails and results in an exception syndrome 0x0 ("uncategorized"). + * Access fails with UNDEFINED, i.e. an exception syndrome 0x0 + * ("uncategorized"), which is what an undefined insn produces. * Note that this is not a catch-all case -- the set of cases which may * result in this failure is specifically defined by the architecture. * This trap is always to the usual target EL, never directly to a * specified target EL. */ - CP_ACCESS_TRAP_UNCATEGORIZED = (2 << 2), + CP_ACCESS_UNDEFINED = (2 << 2), } CPAccessResult; /* Indexes into fgt_read[] */ @@ -621,6 +633,7 @@ FIELD(HDFGWTR_EL2, NBRBCTL, 60, 1) FIELD(HDFGWTR_EL2, NBRBDATA, 61, 1) FIELD(HDFGWTR_EL2, NPMSNEVFR_EL1, 62, 1) +FIELD(FGT, NXS, 13, 1) /* Honour HCR_EL2.FGTnXS to suppress FGT */ /* Which fine-grained trap bit register to check, if any */ FIELD(FGT, TYPE, 10, 3) FIELD(FGT, REV, 9, 1) /* Is bit sense reversed? */ @@ -639,6 +652,17 @@ FIELD(FGT, BITPOS, 0, 6) /* Bit position within the uint64_t */ #define DO_REV_BIT(REG, BITNAME) \ FGT_##BITNAME = FGT_##REG | FGT_REV | R_##REG##_EL2_##BITNAME##_SHIFT +/* + * The FGT bits for TLBI maintenance instructions accessible at EL1 always + * affect the "normal" TLBI insns; they affect the corresponding TLBI insns + * with the nXS qualifier only if HCRX_EL2.FGTnXS is 0. We define e.g. + * FGT_TLBIVAE1 to use for the normal insn, and FGT_TLBIVAE1NXS to use + * for the nXS qualified insn. + */ +#define DO_TLBINXS_BIT(REG, BITNAME) \ + FGT_##BITNAME = FGT_##REG | R_##REG##_EL2_##BITNAME##_SHIFT, \ + FGT_##BITNAME##NXS = FGT_##BITNAME | R_FGT_NXS_MASK + typedef enum FGTBit { /* * These bits tell us which register arrays to use: @@ -772,36 +796,36 @@ typedef enum FGTBit { DO_BIT(HFGITR, ATS1E0W), DO_BIT(HFGITR, ATS1E1RP), DO_BIT(HFGITR, ATS1E1WP), - DO_BIT(HFGITR, TLBIVMALLE1OS), - DO_BIT(HFGITR, TLBIVAE1OS), - DO_BIT(HFGITR, TLBIASIDE1OS), - DO_BIT(HFGITR, TLBIVAAE1OS), - DO_BIT(HFGITR, TLBIVALE1OS), - DO_BIT(HFGITR, TLBIVAALE1OS), - DO_BIT(HFGITR, TLBIRVAE1OS), - DO_BIT(HFGITR, TLBIRVAAE1OS), - DO_BIT(HFGITR, TLBIRVALE1OS), - DO_BIT(HFGITR, TLBIRVAALE1OS), - DO_BIT(HFGITR, TLBIVMALLE1IS), - DO_BIT(HFGITR, TLBIVAE1IS), - DO_BIT(HFGITR, TLBIASIDE1IS), - DO_BIT(HFGITR, TLBIVAAE1IS), - DO_BIT(HFGITR, TLBIVALE1IS), - DO_BIT(HFGITR, TLBIVAALE1IS), - DO_BIT(HFGITR, TLBIRVAE1IS), - DO_BIT(HFGITR, TLBIRVAAE1IS), - DO_BIT(HFGITR, TLBIRVALE1IS), - DO_BIT(HFGITR, TLBIRVAALE1IS), - DO_BIT(HFGITR, TLBIRVAE1), - DO_BIT(HFGITR, TLBIRVAAE1), - DO_BIT(HFGITR, TLBIRVALE1), - DO_BIT(HFGITR, TLBIRVAALE1), - DO_BIT(HFGITR, TLBIVMALLE1), - DO_BIT(HFGITR, TLBIVAE1), - DO_BIT(HFGITR, TLBIASIDE1), - DO_BIT(HFGITR, TLBIVAAE1), - DO_BIT(HFGITR, TLBIVALE1), - DO_BIT(HFGITR, TLBIVAALE1), + DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIVAE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIASIDE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIVAAE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIVALE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIVAALE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIRVALE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1OS), + DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIVAE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIASIDE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIVAAE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIVALE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIVAALE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIRVALE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1IS), + DO_TLBINXS_BIT(HFGITR, TLBIRVAE1), + DO_TLBINXS_BIT(HFGITR, TLBIRVAAE1), + DO_TLBINXS_BIT(HFGITR, TLBIRVALE1), + DO_TLBINXS_BIT(HFGITR, TLBIRVAALE1), + DO_TLBINXS_BIT(HFGITR, TLBIVMALLE1), + DO_TLBINXS_BIT(HFGITR, TLBIVAE1), + DO_TLBINXS_BIT(HFGITR, TLBIASIDE1), + DO_TLBINXS_BIT(HFGITR, TLBIVAAE1), + DO_TLBINXS_BIT(HFGITR, TLBIVALE1), + DO_TLBINXS_BIT(HFGITR, TLBIVAALE1), DO_BIT(HFGITR, CFPRCTX), DO_BIT(HFGITR, DVPRCTX), DO_BIT(HFGITR, CPPRCTX), @@ -1041,6 +1065,9 @@ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, /* CPReadFn that can be used for read-as-zero behaviour */ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); +/* CPReadFn that just reads the value from ri->fieldoffset */ +uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri); + /* CPWriteFn that just writes the value to ri->fieldoffset */ void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); @@ -1134,4 +1161,32 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) return ri->opc1 == 4 || ri->opc1 == 5; } +/* Macros for accessing a specified CP register bank */ +#define A32_BANKED_REG_GET(_env, _regname, _secure) \ + ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) + +#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ + do { \ + if (_secure) { \ + (_env)->cp15._regname##_s = (_val); \ + } else { \ + (_env)->cp15._regname##_ns = (_val); \ + } \ + } while (0) + +/* + * Macros for automatically accessing a specific CP register bank depending on + * the current secure state of the system. These macros are not intended for + * supporting instruction translation reads/writes as these are dependent + * solely on the SCR.NS bit and not the mode. + */ +#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ + A32_BANKED_REG_GET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) + +#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ + A32_BANKED_REG_SET((_env), _regname, \ + (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ + (_val)) + #endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index c59ca104fe1..5876162428a 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -21,6 +21,9 @@ #define TARGET_ARM_FEATURES_H #include "hw/registerfields.h" +#include "qemu/host-utils.h" +#include "cpu.h" +#include "cpu-sysregs.h" /* * Naming convention for isar_feature functions: @@ -43,103 +46,103 @@ */ static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) != 0; } static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; + return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) > 1; } static inline bool isar_feature_aa32_lob(const ARMISARegisters *id) { /* (M-profile) low-overhead loops and branch future */ - return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3; + return FIELD_EX32_IDREG(id, ID_ISAR0, CMPBRANCH) >= 3; } static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR1, JAZELLE) != 0; } static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, AES) != 0; } static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; + return FIELD_EX32_IDREG(id, ID_ISAR5, AES) > 1; } static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, SHA1) != 0; } static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, SHA2) != 0; } static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, CRC32) != 0; } static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, RDM) != 0; } static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR5, VCMA) != 0; } static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, JSCVT) != 0; } static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, DP) != 0; } static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, FHM) != 0; } static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, SB) != 0; } static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, SPECRES) != 0; } static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, BF16) != 0; } static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0; + return FIELD_EX32_IDREG(id, ID_ISAR6, I8MM) != 0; } static inline bool isar_feature_aa32_ras(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0; + return FIELD_EX32_IDREG(id, ID_PFR0, RAS) != 0; } static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0; + return FIELD_EX32_IDREG(id, ID_PFR1, MPROGMOD) != 0; } static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) @@ -148,7 +151,7 @@ static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) * Return true if M-profile state handling insns * (VSCCLRM, CLRM, FPCTX access insns) are implemented */ - return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3; + return FIELD_EX32_IDREG(id, ID_PFR1, SECURITY) >= 3; } static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) @@ -281,88 +284,88 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4; + return FIELD_EX32_IDREG(id, ID_MMFR0, VMSA) >= 4; } static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; + return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) != 0; } static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; + return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) >= 2; } static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; + return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 4 && + FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; + return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 5 && + FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; + return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 6 && + FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; + return FIELD_EX32_IDREG(id, ID_MMFR4, HPDS) != 0; } static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; + return FIELD_EX32_IDREG(id, ID_MMFR4, AC2) != 0; } static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; + return FIELD_EX32_IDREG(id, ID_MMFR4, CCIDX) != 0; } static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0; + return FIELD_EX32_IDREG(id, ID_MMFR4, XNX) != 0; } static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1; + return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 1; } static inline bool isar_feature_aa32_evt(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2; + return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 2; } static inline bool isar_feature_aa32_dit(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0; + return FIELD_EX32_IDREG(id, ID_PFR0, DIT) != 0; } static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; + return FIELD_EX32_IDREG(id, ID_PFR2, SSBS) != 0; } static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5; + return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 5; } static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8; + return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 8; } static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) @@ -375,102 +378,107 @@ static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) */ static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) != 0; } static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) > 1; } static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA1) != 0; } static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) != 0; } static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) > 1; } static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, CRC32) != 0; } static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) != 0; } static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RDM) != 0; } static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA3) != 0; } static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM3) != 0; } static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM4) != 0; } static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, DP) != 0; } static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, FHM) != 0; } static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) != 0; } static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) >= 2; } static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RNDR) != 0; } static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) == 2; } static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) != 0; } static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, JSCVT) != 0; } static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FCMA) != 0; +} + +static inline bool isar_feature_aa64_xs(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, XS) != 0; } /* @@ -494,9 +502,9 @@ isar_feature_pauth_feature(const ARMISARegisters *id) * Architecturally, only one of {APA,API,APA3} may be active (non-zero) * and the other two must be zero. Thus we may avoid conditionals. */ - return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) | - FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) | - FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3)); + return (FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) | + FIELD_EX64_IDREG(id, ID_AA64ISAR1, API) | + FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3)); } static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) @@ -514,7 +522,7 @@ static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id) * Return true if pauth is enabled with the architected QARMA5 algorithm. * QEMU will always enable or disable both APA and GPA. */ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) != 0; } static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id) @@ -523,134 +531,149 @@ static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id) * Return true if pauth is enabled with the architected QARMA3 algorithm. * QEMU will always enable or disable both APA3 and GPA3. */ - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3) != 0; } static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SB) != 0; } static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SPECRES) != 0; } static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FRINTTS) != 0; } static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) != 0; } static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) >= 2; } static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) != 0; +} + +static inline bool isar_feature_aa64_ebf16(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) > 1; } static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) != 0; } static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) >= 2; } static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR1, I8MM) != 0; } static inline bool isar_feature_aa64_wfxt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, WFXT) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, WFXT) >= 2; } static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, BC) != 0; } static inline bool isar_feature_aa64_mops(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS); + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, MOPS); +} + +static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, RPRES); +} + +static inline bool isar_feature_aa64_lut(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, LUT); } static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) != 0xf; } static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL0) >= 2; } static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL1) >= 2; } static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL2) >= 2; } static inline bool isar_feature_aa64_ras(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) != 0; } static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) >= 2; } static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, SVE) != 0; } static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, SEL2) != 0; } static inline bool isar_feature_aa64_rme(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) != 0; } static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR0, DIT) != 0; } static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id) { - int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2); + int key = FIELD_EX64_IDREG(id, ID_AA64PFR0, CSV2); if (key >= 2) { return true; /* FEAT_CSV2_2 */ } if (key == 1) { - key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC); + key = FIELD_EX64_IDREG(id, ID_AA64PFR1, CSV2_FRAC); return key >= 2; /* FEAT_CSV2_1p2 */ } return false; @@ -658,310 +681,378 @@ static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id) static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, SSBS) != 0; } static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, BT) != 0; } static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) != 0; } static inline bool isar_feature_aa64_mte(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 2; } static inline bool isar_feature_aa64_mte3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 3; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 3; } static inline bool isar_feature_aa64_sme(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, SME) != 0; } static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, NMI) != 0; + return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0; } static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) { - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1; + return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1; } static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id) { - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2); return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id)); } static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 2; } static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id) { - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2); return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id)); } static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id) { - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0; + return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 0; } static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1; + return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 1; } static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id) { - return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0; + return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN64) >= 0; } static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id) { - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2); + unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2); return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id)); } static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id) { - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2); + unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2); return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id)); } static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id) { - unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2); + unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN64_2); return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id)); } static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR0, FGT) != 0; } static inline bool isar_feature_aa64_ecv_traps(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 0; } static inline bool isar_feature_aa64_ecv(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 1; + return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 1; } static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, VH) != 0; } static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, LO) != 0; } static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) != 0; } static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 2; } static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 3; } static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HCX) != 0; +} + +static inline bool isar_feature_aa64_afp(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, AFP) != 0; } static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, TIDCP1) != 0; +} + +static inline bool isar_feature_aa64_cmow(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, CMOW) != 0; } static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) != 0; } static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) >= 2; } static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR1, XNX) != 0; } static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, UAO) != 0; } static inline bool isar_feature_aa64_st(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, ST) != 0; } static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, AT) != 0; } static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, FWB) != 0; } static inline bool isar_feature_aa64_ids(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, IDS) != 0; } static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 1; } static inline bool isar_feature_aa64_evt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 2; } static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, CCIDX) != 0; } static inline bool isar_feature_aa64_lva(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, VARANGE) != 0; } static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, E0PD) != 0; } static inline bool isar_feature_aa64_nv(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) != 0; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) != 0; } static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) >= 2; } static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; + return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; + return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 5 && + FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; + return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 6 && + FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8; + return FIELD_EX64_IDREG(id, ID_AA64DFR0, DEBUGVER) >= 8; } static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) { - return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; + return FIELD_SEX64_IDREG(id, ID_AA64DFR0, DOUBLELOCK) >= 0; } static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SVEVER) != 0; +} + +static inline bool isar_feature_aa64_sve2p1(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SVEVER) >=2; } static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) != 0; } static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) >= 2; } static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BITPERM) != 0; } static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BFLOAT16) != 0; } static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SHA3) != 0; } static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SM4) != 0; } static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, I8MM) != 0; } static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F32MM) != 0; } static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F64MM) != 0; +} + +static inline bool isar_feature_aa64_sve_b16b16(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ZFR0, B16B16); +} + +static inline bool isar_feature_aa64_sme_b16b16(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, B16B16); +} + +static inline bool isar_feature_aa64_sme_f16f16(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, F16F16); } static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64); + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, F64F64); } static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf; + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, I16I64) == 0xf; } static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64); + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, FA64); +} + +static inline bool isar_feature_aa64_sme2(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, SMEVER) != 0; +} + +static inline bool isar_feature_aa64_sme2p1(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64SMFR0, SMEVER) >= 2; +} + +/* + * Combinations of feature tests, for ease of use with TRANS_FEAT. + */ +static inline bool isar_feature_aa64_sme_or_sve2p1(const ARMISARegisters *id) +{ + return isar_feature_aa64_sme(id) || isar_feature_aa64_sve2p1(id); +} + +static inline bool isar_feature_aa64_sme2_or_sve2p1(const ARMISARegisters *id) +{ + return isar_feature_aa64_sme2(id) || isar_feature_aa64_sve2p1(id); +} + +static inline bool isar_feature_aa64_sme2p1_or_sve2p1(const ARMISARegisters *id) +{ + return isar_feature_aa64_sme2p1(id) || isar_feature_aa64_sve2p1(id); +} + +static inline bool isar_feature_aa64_sme2_i16i64(const ARMISARegisters *id) +{ + return isar_feature_aa64_sme2(id) && isar_feature_aa64_sme_i16i64(id); +} + +static inline bool isar_feature_aa64_sme2_f64f64(const ARMISARegisters *id) +{ + return isar_feature_aa64_sme2(id) && isar_feature_aa64_sme_f64f64(id); } /* @@ -1022,6 +1113,55 @@ static inline bool isar_feature_any_evt(const ARMISARegisters *id) return isar_feature_aa64_evt(id) || isar_feature_aa32_evt(id); } +typedef enum { + CCSIDR_FORMAT_LEGACY, + CCSIDR_FORMAT_CCIDX, +} CCSIDRFormat; + +static inline uint64_t make_ccsidr(CCSIDRFormat format, unsigned assoc, + unsigned linesize, unsigned cachesize, + uint8_t flags) +{ + unsigned lg_linesize = ctz32(linesize); + unsigned sets; + uint64_t ccsidr = 0; + + assert(assoc != 0); + assert(is_power_of_2(linesize)); + assert(lg_linesize >= 4 && lg_linesize <= 7 + 4); + + /* sets * associativity * linesize == cachesize. */ + sets = cachesize / (assoc * linesize); + assert(cachesize % (assoc * linesize) == 0); + + if (format == CCSIDR_FORMAT_LEGACY) { + /* + * The 32-bit CCSIDR format is: + * [27:13] number of sets - 1 + * [12:3] associativity - 1 + * [2:0] log2(linesize) - 4 + * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc + */ + ccsidr = deposit32(ccsidr, 28, 4, flags); + ccsidr = deposit32(ccsidr, 13, 15, sets - 1); + ccsidr = deposit32(ccsidr, 3, 10, assoc - 1); + ccsidr = deposit32(ccsidr, 0, 3, lg_linesize - 4); + } else { + /* + * The 64-bit CCSIDR_EL1 format is: + * [55:32] number of sets - 1 + * [23:3] associativity - 1 + * [2:0] log2(linesize) - 4 + * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc + */ + ccsidr = deposit64(ccsidr, 32, 24, sets - 1); + ccsidr = deposit64(ccsidr, 3, 21, assoc - 1); + ccsidr = deposit64(ccsidr, 0, 3, lg_linesize - 4); + } + + return ccsidr; +} + /* * Forward to the above feature tests given an ARMCPU pointer. */ diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h index fa6cae0e3aa..8b46c7c5708 100644 --- a/target/arm/cpu-param.h +++ b/target/arm/cpu-param.h @@ -2,32 +2,24 @@ * ARM cpu parameters for qemu. * * Copyright (c) 2003 Fabrice Bellard - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef ARM_CPU_PARAM_H #define ARM_CPU_PARAM_H #ifdef TARGET_AARCH64 -# define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 52 # define TARGET_VIRT_ADDR_SPACE_BITS 52 #else -# define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 40 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #ifdef CONFIG_USER_ONLY -# ifdef TARGET_AARCH64 -# define TARGET_TAGGED_ADDRESSES -# ifdef __FreeBSD__ -# define TARGET_PAGE_BITS 12 -# else +# if defined(TARGET_AARCH64) && defined(CONFIG_LINUX) /* Allow user-only to vary page size from 4k */ # define TARGET_PAGE_BITS_VARY -# define TARGET_PAGE_BITS_MIN 12 -# endif # else # define TARGET_PAGE_BITS 12 # endif @@ -37,10 +29,14 @@ * have to support 1K tiny pages. */ # define TARGET_PAGE_BITS_VARY -# define TARGET_PAGE_BITS_MIN 10 +# define TARGET_PAGE_BITS_LEGACY 10 #endif /* !CONFIG_USER_ONLY */ -/* ARM processors have a weak memory model */ -#define TCG_GUEST_DEFAULT_MO (0) +/* + * ARM-specific extra insn start words: + * 1: Conditional execution bits + * 2: Partial exception syndrome for data aborts + */ +#define TARGET_INSN_START_EXTRA_WORDS 2 #endif diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h index b497667d61e..2fcb0e12525 100644 --- a/target/arm/cpu-qom.h +++ b/target/arm/cpu-qom.h @@ -28,11 +28,6 @@ OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU) #define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU -#define TYPE_AARCH64_CPU "aarch64-cpu" -typedef struct AArch64CPUClass AArch64CPUClass; -DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU, - TYPE_AARCH64_CPU) - #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) diff --git a/target/arm/cpu-sysregs.h b/target/arm/cpu-sysregs.h new file mode 100644 index 00000000000..7877a3b06a8 --- /dev/null +++ b/target/arm/cpu-sysregs.h @@ -0,0 +1,42 @@ +/* + * Definitions for Arm ID system registers + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef ARM_CPU_SYSREGS_H +#define ARM_CPU_SYSREGS_H + +/* + * Following is similar to the coprocessor regs encodings, but with an argument + * ordering that matches the ARM ARM. We also reuse the various CP_REG_ defines + * that actually are the same as the equivalent KVM_REG_ values. + */ +#define ENCODE_ID_REG(op0, op1, crn, crm, op2) \ + (((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ + ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ + ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ + ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ + ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) + +#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) NAME##_IDX, + +typedef enum ARMIDRegisterIdx { +#include "cpu-sysregs.h.inc" + NUM_ID_IDX, +} ARMIDRegisterIdx; + +#undef DEF +#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \ + SYS_##NAME = ENCODE_ID_REG(OP0, OP1, CRN, CRM, OP2), + +typedef enum ARMSysRegs { +#include "cpu-sysregs.h.inc" +} ARMSysRegs; + +#undef DEF + +extern const uint32_t id_register_sysreg[NUM_ID_IDX]; + +int get_sysreg_idx(ARMSysRegs sysreg); + +#endif /* ARM_CPU_SYSREGS_H */ diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc new file mode 100644 index 00000000000..f48a9daa7c1 --- /dev/null +++ b/target/arm/cpu-sysregs.h.inc @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +DEF(ID_AA64PFR0_EL1, 3, 0, 0, 4, 0) +DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1) +DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5) +DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0) +DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1) +DEF(ID_AA64AFR0_EL1, 3, 0, 0, 5, 4) +DEF(ID_AA64AFR1_EL1, 3, 0, 0, 5, 5) +DEF(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0) +DEF(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1) +DEF(ID_AA64ISAR2_EL1, 3, 0, 0, 6, 2) +DEF(ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0) +DEF(ID_AA64MMFR1_EL1, 3, 0, 0, 7, 1) +DEF(ID_AA64MMFR2_EL1, 3, 0, 0, 7, 2) +DEF(ID_AA64MMFR3_EL1, 3, 0, 0, 7, 3) +DEF(ID_PFR0_EL1, 3, 0, 0, 1, 0) +DEF(ID_PFR1_EL1, 3, 0, 0, 1, 1) +DEF(ID_DFR0_EL1, 3, 0, 0, 1, 2) +DEF(ID_AFR0_EL1, 3, 0, 0, 1, 3) +DEF(ID_MMFR0_EL1, 3, 0, 0, 1, 4) +DEF(ID_MMFR1_EL1, 3, 0, 0, 1, 5) +DEF(ID_MMFR2_EL1, 3, 0, 0, 1, 6) +DEF(ID_MMFR3_EL1, 3, 0, 0, 1, 7) +DEF(ID_ISAR0_EL1, 3, 0, 0, 2, 0) +DEF(ID_ISAR1_EL1, 3, 0, 0, 2, 1) +DEF(ID_ISAR2_EL1, 3, 0, 0, 2, 2) +DEF(ID_ISAR3_EL1, 3, 0, 0, 2, 3) +DEF(ID_ISAR4_EL1, 3, 0, 0, 2, 4) +DEF(ID_ISAR5_EL1, 3, 0, 0, 2, 5) +DEF(ID_MMFR4_EL1, 3, 0, 0, 2, 6) +DEF(ID_ISAR6_EL1, 3, 0, 0, 2, 7) +DEF(MVFR0_EL1, 3, 0, 0, 3, 0) +DEF(MVFR1_EL1, 3, 0, 0, 3, 1) +DEF(MVFR2_EL1, 3, 0, 0, 3, 2) +DEF(ID_PFR2_EL1, 3, 0, 0, 3, 4) +DEF(ID_DFR1_EL1, 3, 0, 0, 3, 5) +DEF(ID_MMFR5_EL1, 3, 0, 0, 3, 6) +DEF(CLIDR_EL1, 3, 1, 0, 0, 1) +DEF(ID_AA64ZFR0_EL1, 3, 0, 0, 4, 4) +DEF(CTR_EL0, 3, 3, 0, 0, 1) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 19191c23918..e2b2337399c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -28,11 +28,12 @@ #include "qapi/error.h" #include "cpu.h" #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" +#include "exec/translation-block.h" +#include "accel/tcg/cpu-ops.h" #endif /* CONFIG_TCG */ #include "internals.h" #include "cpu-features.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "hw/qdev-properties.h" #if !defined(CONFIG_USER_ONLY) #include "hw/loader.h" @@ -41,9 +42,9 @@ #include "hw/intc/armv7m_nvic.h" #endif /* CONFIG_TCG */ #endif /* !CONFIG_USER_ONLY */ -#include "sysemu/tcg.h" -#include "sysemu/qtest.h" -#include "sysemu/hw_accel.h" +#include "system/tcg.h" +#include "system/qtest.h" +#include "system/hw_accel.h" #include "kvm_arm.h" #include "disas/capstone.h" #include "fpu/softfloat.h" @@ -120,8 +121,15 @@ void arm_restore_state_to_opc(CPUState *cs, env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } } + +int arm_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return arm_env_mmu_index(cpu_env(cs)); +} + #endif /* CONFIG_TCG */ +#ifndef CONFIG_USER_ONLY /* * With SCTLR_ELx.NMI == 0, IRQ with Superpriority is masked identically with * IRQ without Superpriority. Moreover, if the GIC is configured so that @@ -140,11 +148,7 @@ static bool arm_cpu_has_work(CPUState *cs) | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR | CPU_INTERRUPT_EXITTB); } - -static int arm_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return arm_env_mmu_index(cpu_env(cs)); -} +#endif /* !CONFIG_USER_ONLY */ void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) @@ -545,18 +549,25 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) env->sau.ctrl = 0; } - set_flush_to_zero(1, &env->vfp.standard_fp_status); - set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); - set_default_nan_mode(1, &env->vfp.standard_fp_status); - set_default_nan_mode(1, &env->vfp.standard_fp_status_f16); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.standard_fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.fp_status_f16); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.standard_fp_status_f16); + set_flush_to_zero(1, &env->vfp.fp_status[FPST_STD]); + set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA]); + set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA_F16]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]); + set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]); + set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_AH]); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH_F16]); + #ifndef CONFIG_USER_ONLY if (kvm_enabled()) { kvm_arm_reset_vcpu(cpu); @@ -623,6 +634,9 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK; env->cp15.scr_el3 |= SCR_ENTP2; env->vfp.smcr_el[3] = 0xf; + if (cpu_isar_feature(aa64_sme2, cpu)) { + env->vfp.smcr_el[3] |= R_SMCR_EZT0_MASK; + } } if (cpu_isar_feature(aa64_hcx, cpu)) { env->cp15.scr_el3 |= SCR_HXEN; @@ -826,7 +840,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - CPUClass *cc = CPU_GET_CLASS(cs); CPUARMState *env = cpu_env(cs); uint32_t cur_el = arm_current_el(env); bool secure = arm_is_secure(env); @@ -926,7 +939,7 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) found: cs->exception_index = excp_idx; env->exception.target_el = target_el; - cc->tcg_ops->do_interrupt(cs); + cs->cc->tcg_ops->do_interrupt(cs); return true; } @@ -1092,37 +1105,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) } } -static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level) -{ -#ifdef CONFIG_KVM - ARMCPU *cpu = opaque; - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - uint32_t linestate_bit; - int irq_id; - - switch (irq) { - case ARM_CPU_IRQ: - irq_id = KVM_ARM_IRQ_CPU_IRQ; - linestate_bit = CPU_INTERRUPT_HARD; - break; - case ARM_CPU_FIQ: - irq_id = KVM_ARM_IRQ_CPU_FIQ; - linestate_bit = CPU_INTERRUPT_FIQ; - break; - default: - g_assert_not_reached(); - } - - if (level) { - env->irq_line_state |= linestate_bit; - } else { - env->irq_line_state &= ~linestate_bit; - } - kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level); -#endif -} - static bool arm_cpu_virtio_is_big_endian(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); @@ -1167,7 +1149,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) { ARMCPU *ac = ARM_CPU(cpu); CPUARMState *env = &ac->env; - bool sctlr_b; + bool sctlr_b = arm_sctlr_b(env); if (is_a64(env)) { info->cap_arch = CS_ARCH_ARM64; @@ -1194,13 +1176,9 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) info->cap_mode = cap_mode; } - sctlr_b = arm_sctlr_b(env); + info->endian = BFD_ENDIAN_LITTLE; if (bswap_code(sctlr_b)) { -#if TARGET_BIG_ENDIAN - info->endian = BFD_ENDIAN_LITTLE; -#else - info->endian = BFD_ENDIAN_BIG; -#endif + info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG; } info->flags &= ~INSN_ARM_BE32; #ifndef CONFIG_USER_ONLY @@ -1210,8 +1188,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) #endif } -#ifdef TARGET_AARCH64 - static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) { ARMCPU *cpu = ARM_CPU(cs); @@ -1361,23 +1337,14 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i); for (j = zcr_len; j >= 0; --j) { qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c", - env->zarray[i].d[2 * j + 1], - env->zarray[i].d[2 * j], + env->za_state.za[i].d[2 * j + 1], + env->za_state.za[i].d[2 * j], j ? ':' : '\n'); } } } } -#else - -static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - g_assert_not_reached(); -} - -#endif - static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags) { ARMCPU *cpu = ARM_CPU(cs); @@ -1539,39 +1506,40 @@ static void arm_cpu_initfn(Object *obj) * 0 means "unset, use the default value". That default might vary depending * on the CPU type, and is set in the realize fn. */ -static Property arm_cpu_gt_cntfrq_property = +#ifndef CONFIG_USER_ONLY +static const Property arm_cpu_gt_cntfrq_property = DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0); -static Property arm_cpu_reset_cbar_property = +static const Property arm_cpu_reset_cbar_property = DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0); -static Property arm_cpu_reset_hivecs_property = +static const Property arm_cpu_reset_hivecs_property = DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false); -#ifndef CONFIG_USER_ONLY -static Property arm_cpu_has_el2_property = +static const Property arm_cpu_has_el2_property = DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true); -static Property arm_cpu_has_el3_property = +static const Property arm_cpu_has_el3_property = DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true); #endif -static Property arm_cpu_cfgend_property = +static const Property arm_cpu_cfgend_property = DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false); -static Property arm_cpu_has_vfp_property = +static const Property arm_cpu_has_vfp_property = DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true); -static Property arm_cpu_has_vfp_d32_property = +static const Property arm_cpu_has_vfp_d32_property = DEFINE_PROP_BOOL("vfp-d32", ARMCPU, has_vfp_d32, true); -static Property arm_cpu_has_neon_property = +static const Property arm_cpu_has_neon_property = DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true); -static Property arm_cpu_has_dsp_property = +static const Property arm_cpu_has_dsp_property = DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true); -static Property arm_cpu_has_mpu_property = +#ifndef CONFIG_USER_ONLY +static const Property arm_cpu_has_mpu_property = DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true); /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value, @@ -1579,10 +1547,11 @@ static Property arm_cpu_has_mpu_property = * the right value for that particular CPU type, and we don't want * to override that with an incorrect constant value. */ -static Property arm_cpu_pmsav7_dregion_property = +static const Property arm_cpu_pmsav7_dregion_property = DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU, pmsav7_dregion, qdev_prop_uint32, uint32_t); +#endif static bool arm_get_pmu(Object *obj, Error **errp) { @@ -1607,6 +1576,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp) cpu->has_pmu = value; } +static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + + return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); +} + +static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + + /* + * At this time, this property is only allowed if KVM is enabled. This + * restriction allows us to avoid fixing up functionality that assumes a + * uniform execution state like do_interrupt. + */ + if (value == false) { + if (!kvm_enabled() || !kvm_arm_aarch32_supported()) { + error_setg(errp, "'aarch64' feature cannot be disabled " + "unless KVM is enabled and 32-bit EL1 " + "is supported"); + return; + } + unset_feature(&cpu->env, ARM_FEATURE_AARCH64); + } else { + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + } +} + unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) { /* @@ -1723,7 +1721,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu) } } -void arm_cpu_post_init(Object *obj) +static void arm_cpu_post_init(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1734,6 +1732,14 @@ void arm_cpu_post_init(Object *obj) */ arm_cpu_propagate_feature_implications(cpu); + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64, + aarch64_cpu_set_aarch64); + object_property_set_description(obj, "aarch64", + "Set on/off to enable/disable aarch64 " + "execution state "); + } +#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property); @@ -1749,7 +1755,6 @@ void arm_cpu_post_init(Object *obj) OBJ_PROP_FLAG_READWRITE); } -#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { /* Add the has_el3 state CPU property only if EL3 is allowed. This will * prevent "has_el3" from existing on CPUs which cannot support EL3. @@ -1821,6 +1826,7 @@ void arm_cpu_post_init(Object *obj) qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property); } +#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) { qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property); if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { @@ -1857,8 +1863,6 @@ void arm_cpu_post_init(Object *obj) &cpu->psci_conduit, OBJ_PROP_FLAG_READWRITE); - qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); - if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property); } @@ -1867,7 +1871,6 @@ void arm_cpu_post_init(Object *obj) kvm_arm_add_vcpu_properties(cpu); } -#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && cpu_isar_feature(aa64_mte, cpu)) { object_property_add_link(obj, "tag-memory", @@ -1885,6 +1888,7 @@ void arm_cpu_post_init(Object *obj) } } #endif + qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); } static void arm_cpu_finalizefn(Object *obj) @@ -1916,7 +1920,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) { Error *local_err = NULL; -#ifdef TARGET_AARCH64 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { arm_cpu_sve_finalize(cpu, &local_err); if (local_err != NULL) { @@ -1952,7 +1955,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) return; } } -#endif if (kvm_enabled()) { kvm_arm_steal_time_finalize(cpu, &local_err); @@ -1967,6 +1969,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); ARMCPU *cpu = ARM_CPU(dev); + ARMISARegisters *isar = &cpu->isar; ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev); CPUARMState *env = &cpu->env; Error *local_err = NULL; @@ -2069,6 +2072,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) arm_gt_stimer_cb, cpu); cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale, arm_gt_hvtimer_cb, cpu); + cpu->gt_timer[GTIMER_S_EL2_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale, + arm_gt_sel2timer_cb, cpu); + cpu->gt_timer[GTIMER_S_EL2_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale, + arm_gt_sel2vtimer_cb, cpu); } #endif @@ -2120,21 +2127,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (!cpu->has_vfp) { - uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0); - cpu->isar.id_aa64isar1 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0); - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf); - u = cpu->isar.id_isar6; + u = GET_IDREG(isar, ID_ISAR6); u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); - cpu->isar.id_isar6 = u; + SET_IDREG(isar, ID_ISAR6, u); u = cpu->isar.mvfr0; u = FIELD_DP32(u, MVFR0, FPSP, 0); @@ -2168,7 +2170,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_NEON); - t = cpu->isar.id_aa64isar0; + t = GET_IDREG(isar, ID_AA64ISAR0); t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0); @@ -2176,32 +2178,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0); t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0); - cpu->isar.id_aa64isar0 = t; + SET_IDREG(isar, ID_AA64ISAR0, t); - t = cpu->isar.id_aa64isar1; + t = GET_IDREG(isar, ID_AA64ISAR1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0); t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0); t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0); - cpu->isar.id_aa64isar1 = t; + SET_IDREG(isar, ID_AA64ISAR1, t); - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf); - u = cpu->isar.id_isar5; + u = GET_IDREG(isar, ID_ISAR5); u = FIELD_DP32(u, ID_ISAR5, AES, 0); u = FIELD_DP32(u, ID_ISAR5, SHA1, 0); u = FIELD_DP32(u, ID_ISAR5, SHA2, 0); u = FIELD_DP32(u, ID_ISAR5, RDM, 0); u = FIELD_DP32(u, ID_ISAR5, VCMA, 0); - cpu->isar.id_isar5 = u; + SET_IDREG(isar, ID_ISAR5, u); - u = cpu->isar.id_isar6; + u = GET_IDREG(isar, ID_ISAR6); u = FIELD_DP32(u, ID_ISAR6, DP, 0); u = FIELD_DP32(u, ID_ISAR6, FHM, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); u = FIELD_DP32(u, ID_ISAR6, I8MM, 0); - cpu->isar.id_isar6 = u; + SET_IDREG(isar, ID_ISAR6, u); if (!arm_feature(env, ARM_FEATURE_M)) { u = cpu->isar.mvfr1; @@ -2218,16 +2218,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (!cpu->has_neon && !cpu->has_vfp) { - uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar0; - t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0); - cpu->isar.id_aa64isar0 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0); - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0); - cpu->isar.id_aa64isar1 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0); u = cpu->isar.mvfr0; u = FIELD_DP32(u, MVFR0, SIMDREG, 0); @@ -2244,19 +2239,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_THUMB_DSP); - u = cpu->isar.id_isar1; - u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1); - cpu->isar.id_isar1 = u; + FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1); - u = cpu->isar.id_isar2; + u = GET_IDREG(isar, ID_ISAR2); u = FIELD_DP32(u, ID_ISAR2, MULTU, 1); u = FIELD_DP32(u, ID_ISAR2, MULTS, 1); - cpu->isar.id_isar2 = u; + SET_IDREG(isar, ID_ISAR2, u); - u = cpu->isar.id_isar3; + u = GET_IDREG(isar, ID_ISAR3); u = FIELD_DP32(u, ID_ISAR3, SIMD, 1); u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0); - cpu->isar.id_isar3 = u; + SET_IDREG(isar, ID_ISAR3, u); } @@ -2331,14 +2324,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Disable the security extension feature bits in the processor * feature registers as well. */ - cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0); - cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0); - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, EL3, 0); + FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0); /* Disable the realm management extension, which requires EL3. */ - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, RME, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0); } if (!cpu->has_el2) { @@ -2361,9 +2352,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu); #endif } else { - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0); - cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } @@ -2373,10 +2363,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. */ - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, EL2, 0); - cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, - ID_PFR1, VIRTUALIZATION, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0); + FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0); } if (cpu_isar_feature(aa64_mte, cpu)) { @@ -2390,13 +2378,20 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) #ifndef CONFIG_USER_ONLY /* - * If we do not have tag-memory provided by the machine, - * reduce MTE support to instructions enabled at EL0. + * If we run with TCG and do not have tag-memory provided by + * the machine, then reduce MTE support to instructions enabled at EL0. * This matches Cortex-A710 BROADCASTMTE input being LOW. */ - if (cpu->tag_memory == NULL) { - cpu->isar.id_aa64pfr1 = - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1); + if (tcg_enabled() && cpu->tag_memory == NULL) { + FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1); + } + + /* + * If MTE is supported by the host, however it should not be + * enabled on the guest (i.e mte=off), clear guest's MTE bits." + */ + if (kvm_enabled() && !cpu->kvm_mte) { + FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0); } #endif } @@ -2416,32 +2411,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * try to access the non-existent system registers for them. */ /* FEAT_SPE (Statistical Profiling Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0); /* FEAT_TRBE (Trace Buffer Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0); /* FEAT_TRF (Self-hosted Trace Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0); - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0); /* Trace Macrocell system register access */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0); - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0); /* Memory mapped trace */ - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0); /* FEAT_AMU (Activity Monitors Extension) */ - cpu->isar.id_aa64pfr0 = - FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0); - cpu->isar.id_pfr0 = - FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0); + FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0); /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */ - cpu->isar.id_aa64pfr0 = - FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0); } /* MPU can be configured out of a PMSA CPU either by setting has-mpu @@ -2617,7 +2602,7 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model) return oc; } -static Property arm_cpu_properties[] = { +static const Property arm_cpu_properties[] = { DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0), DEFINE_PROP_UINT64("mp-affinity", ARMCPU, mp_affinity, ARM64_AFFINITY_INVALID), @@ -2625,7 +2610,8 @@ static Property arm_cpu_properties[] = { DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), /* True to default to the backward-compat old CNTFRQ rather than 1Ghz */ DEFINE_PROP_BOOL("backcompat-cntfrq", ARMCPU, backcompat_cntfrq, false), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_BOOL("backcompat-pauth-default-use-qarma5", ARMCPU, + backcompat_pauth_default_use_qarma5, false), }; static const gchar *arm_gdb_arch_name(CPUState *cs) @@ -2633,16 +2619,58 @@ static const gchar *arm_gdb_arch_name(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; + if (arm_gdbstub_is_aarch64(cpu)) { + return "aarch64"; + } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { return "iwmmxt"; } return "arm"; } -#ifndef CONFIG_USER_ONLY +static const char *arm_gdb_get_core_xml_file(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (arm_gdbstub_is_aarch64(cpu)) { + return "aarch64-core.xml"; + } + if (arm_feature(env, ARM_FEATURE_M)) { + return "arm-m-profile.xml"; + } + return "arm-core.xml"; +} + +#ifdef CONFIG_USER_ONLY +/** + * aarch64_untagged_addr: + * + * Remove any address tag from @x. This is explicitly related to the + * linux syscall TIF_TAGGED_ADDR setting, not TBI in general. + * + * There should be a better place to put this, but we need this in + * include/accel/tcg/cpu-ldst.h, and not some place linux-user specific. + * + * Note that arm-*-user will never set tagged_addr_enable. + */ +static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x) +{ + CPUARMState *env = cpu_env(cs); + if (env->tagged_addr_enable) { + /* + * TBI is enabled for userspace but not kernelspace addresses. + * Only clear the tag if bit 55 is clear. + */ + x &= sextract64(x, 0, 56); + } + return x; +} +#else #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps arm_sysemu_ops = { + .has_work = arm_cpu_has_work, .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug, .asidx_from_attrs = arm_asidx_from_attrs, .write_elf32_note = arm_cpu_write_elf32_note, @@ -2653,19 +2681,52 @@ static const struct SysemuCPUOps arm_sysemu_ops = { #endif #ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY +static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + /* + * The Stage2 and Phys indexes are only used for ptw on arm32, + * and all pte's are aligned, so we never produce a wrap for these. + * Double check that we're not truncating a 40-bit physical address. + */ + assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK)); + + if (!is_a64(cpu_env(cs))) { + return (uint32_t)result; + } + + /* + * TODO: For FEAT_CPA2, decide how to we want to resolve + * Unpredictable_CPACHECK in AddressIncrement. + */ + return result; +} +#endif /* !CONFIG_USER_ONLY */ + static const TCGCPUOps arm_tcg_ops = { + .mttcg_supported = true, + /* ARM processors have a weak memory model */ + .guest_default_memory_order = 0, + .initialize = arm_translate_init, + .translate_code = arm_translate_code, + .get_tb_cpu_state = arm_get_tb_cpu_state, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, .restore_state_to_opc = arm_restore_state_to_opc, + .mmu_index = arm_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = arm_cpu_record_sigsegv, .record_sigbus = arm_cpu_record_sigbus, + .untagged_addr = aarch64_untagged_addr, #else - .tlb_fill = arm_cpu_tlb_fill, + .tlb_fill_align = arm_cpu_tlb_fill_align, + .pointer_wrap = aprofile_pointer_wrap, .cpu_exec_interrupt = arm_cpu_exec_interrupt, .cpu_exec_halt = arm_cpu_exec_halt, + .cpu_exec_reset = cpu_reset, .do_interrupt = arm_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, .do_unaligned_access = arm_cpu_do_unaligned_access, @@ -2676,7 +2737,7 @@ static const TCGCPUOps arm_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void arm_cpu_class_init(ObjectClass *oc, void *data) +static void arm_cpu_class_init(ObjectClass *oc, const void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); @@ -2692,8 +2753,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) &acc->parent_phases); cc->class_by_name = arm_cpu_class_by_name; - cc->has_work = arm_cpu_has_work; - cc->mmu_index = arm_cpu_mmu_index; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; cc->get_pc = arm_cpu_get_pc; @@ -2703,6 +2762,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) cc->sysemu_ops = &arm_sysemu_ops; #endif cc->gdb_arch_name = arm_gdb_arch_name; + cc->gdb_get_core_xml_file = arm_gdb_get_core_xml_file; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = arm_disas_set_info; @@ -2719,13 +2779,15 @@ static void arm_cpu_instance_init(Object *obj) arm_cpu_post_init(obj); } -static void cpu_register_class_init(ObjectClass *oc, void *data) +static void cpu_register_class_init(ObjectClass *oc, const void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); acc->info = data; - cc->gdb_core_xml_file = "arm-core.xml"; + if (acc->info->deprecation_note) { + cc->deprecation_note = acc->info->deprecation_note; + } } void arm_cpu_register(const ARMCPUInfo *info) @@ -2734,11 +2796,11 @@ void arm_cpu_register(const ARMCPUInfo *info) .parent = TYPE_ARM_CPU, .instance_init = arm_cpu_instance_init, .class_init = info->class_init ?: cpu_register_class_init, - .class_data = (void *)info, + .class_data = info, }; type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); - type_register(&type_info); + type_register_static(&type_info); g_free((void *)type_info.name); } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a12859fc533..dc9b6dce4c9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -24,16 +24,15 @@ #include "qemu/cpu-float.h" #include "hw/registerfields.h" #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "exec/gdbstub.h" #include "exec/page-protection.h" #include "qapi/qapi-types-common.h" #include "target/arm/multiprocessing.h" #include "target/arm/gtimer.h" - -#ifdef TARGET_AARCH64 -#define KVM_HAVE_MCE_INJECTION 1 -#endif +#include "target/arm/cpu-sysregs.h" #define EXCP_UDEF 1 /* undefined instruction */ #define EXCP_SWI 2 /* software interrupt */ @@ -62,6 +61,7 @@ #define EXCP_NMI 26 #define EXCP_VINMI 27 #define EXCP_VFNMI 28 +#define EXCP_MON_TRAP 29 /* AArch32 trap to Monitor mode */ /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ #define ARMV7M_EXCP_RESET 1 @@ -99,12 +99,6 @@ #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) #endif -/* ARM-specific extra insn start words: - * 1: Conditional execution bits - * 2: Partial exception syndrome for data aborts - */ -#define TARGET_INSN_START_EXTRA_WORDS 2 - /* The 2nd extra word holding syndrome info for data aborts does not use * the upper 6 bits nor the lower 13 bits. We mask and shift it down to * help the sleb128 encoder do a better job. @@ -170,17 +164,12 @@ typedef struct ARMGenericTimer { * Align the data for use with TCG host vector operations. */ -#ifdef TARGET_AARCH64 -# define ARM_MAX_VQ 16 -#else -# define ARM_MAX_VQ 1 -#endif +#define ARM_MAX_VQ 16 typedef struct ARMVectorReg { uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16); } ARMVectorReg; -#ifdef TARGET_AARCH64 /* In AArch32 mode, predicate registers do not exist at all. */ typedef struct ARMPredicateReg { uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16); @@ -190,18 +179,82 @@ typedef struct ARMPredicateReg { typedef struct ARMPACKey { uint64_t lo, hi; } ARMPACKey; -#endif /* See the commentary above the TBFLAG field definitions. */ typedef struct CPUARMTBFlags { uint32_t flags; - target_ulong flags2; + uint64_t flags2; } CPUARMTBFlags; typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; typedef struct NVICState NVICState; +/* + * Enum for indexing vfp.fp_status[]. + * + * FPST_A32: is the "normal" fp status for AArch32 insns + * FPST_A64: is the "normal" fp status for AArch64 insns + * FPST_A32_F16: used for AArch32 half-precision calculations + * FPST_A64_F16: used for AArch64 half-precision calculations + * FPST_STD: the ARM "Standard FPSCR Value" + * FPST_STD_F16: used for half-precision + * calculations with the ARM "Standard FPSCR Value" + * FPST_AH: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns) + * FPST_AH_F16: used for the A64 insns which change behaviour + * when FPCR.AH == 1 (bfloat16 conversions and multiplies, + * and the reciprocal and square root estimate/step insns); + * for half-precision + * ZA: the "streaming sve" fp status. + * ZA_F16: likewise for half-precision. + * + * Half-precision operations are governed by a separate + * flush-to-zero control bit in FPSCR:FZ16. We pass a separate + * status structure to control this. + * + * The "Standard FPSCR", ie default-NaN, flush-to-zero, + * round-to-nearest and is used by any operations (generally + * Neon) which the architecture defines as controlled by the + * standard FPSCR value rather than the FPSCR. + * + * The "standard FPSCR but for fp16 ops" is needed because + * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than + * using a fixed value for it. + * + * FPST_AH is needed because some insns have different + * behaviour when FPCR.AH == 1: they don't update cumulative + * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and + * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16, + * which means we need an FPST_AH_F16 as well. + * + * The "ZA" float_status are for Streaming SVE operations which use + * default-NaN and do not generate fp exceptions, which means that they + * do not accumulate exception bits back into FPCR. + * See e.g. FPAdd vs FPAdd_ZA pseudocode functions, and the setting + * of fpcr.DN and fpexec parameters. + * + * To avoid having to transfer exception bits around, we simply + * say that the FPSCR cumulative exception flags are the logical + * OR of the flags in the four fp statuses. This relies on the + * only thing which needs to read the exception flags being + * an explicit FPSCR read. + */ +typedef enum ARMFPStatusFlavour { + FPST_A32, + FPST_A64, + FPST_A32_F16, + FPST_A64_F16, + FPST_AH, + FPST_AH_F16, + FPST_ZA, + FPST_ZA_F16, + FPST_STD, + FPST_STD_F16, +} ARMFPStatusFlavour; +#define FPST_COUNT 10 + typedef struct CPUArchState { /* Regs for current mode. */ uint32_t regs[16]; @@ -606,13 +659,11 @@ typedef struct CPUArchState { struct { ARMVectorReg zregs[32]; -#ifdef TARGET_AARCH64 /* Store FFR as pregs[16] to make it easier to treat as any other. */ #define FFR_PRED_NUM 16 ARMPredicateReg pregs[17]; /* Scratch space for aa64 sve predicate temporary. */ ARMPredicateReg preg_tmp; -#endif /* We store these fpcsr fields separately for convenience. */ uint32_t qc[4] QEMU_ALIGNED(16); @@ -628,40 +679,8 @@ typedef struct CPUArchState { uint32_t xregs[16]; - /* Scratch space for aa32 neon expansion. */ - uint32_t scratch[8]; - - /* There are a number of distinct float control structures: - * - * fp_status: is the "normal" fp status. - * fp_status_fp16: used for half-precision calculations - * standard_fp_status : the ARM "Standard FPSCR Value" - * standard_fp_status_fp16 : used for half-precision - * calculations with the ARM "Standard FPSCR Value" - * - * Half-precision operations are governed by a separate - * flush-to-zero control bit in FPSCR:FZ16. We pass a separate - * status structure to control this. - * - * The "Standard FPSCR", ie default-NaN, flush-to-zero, - * round-to-nearest and is used by any operations (generally - * Neon) which the architecture defines as controlled by the - * standard FPSCR value rather than the FPSCR. - * - * The "standard FPSCR but for fp16 ops" is needed because - * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than - * using a fixed value for it. - * - * To avoid having to transfer exception bits around, we simply - * say that the FPSCR cumulative exception flags are the logical - * OR of the flags in the four fp statuses. This relies on the - * only thing which needs to read the exception flags being - * an explicit FPSCR read. - */ - float_status fp_status; - float_status fp_status_f16; - float_status standard_fp_status; - float_status standard_fp_status_f16; + /* There are a number of distinct float control structures. */ + float_status fp_status[FPST_COUNT]; uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ @@ -686,7 +705,6 @@ typedef struct CPUArchState { uint32_t cregs[16]; } iwmmxt; -#ifdef TARGET_AARCH64 struct { ARMPACKey apia; ARMPACKey apib; @@ -697,28 +715,36 @@ typedef struct CPUArchState { uint64_t scxtnum_el[4]; - /* - * SME ZA storage -- 256 x 256 byte array, with bytes in host word order, - * as we do with vfp.zregs[]. This corresponds to the architectural ZA - * array, where ZA[N] is in the least-significant bytes of env->zarray[N]. - * When SVL is less than the architectural maximum, the accessible - * storage is restricted, such that if the SVL is X bytes the guest can - * see only the bottom X elements of zarray[], and only the least - * significant X bytes of each element of the array. (In other words, - * the observable part is always square.) - * - * The ZA storage can also be considered as a set of square tiles of - * elements of different sizes. The mapping from tiles to the ZA array - * is architecturally defined, such that for tiles of elements of esz - * bytes, the Nth row (or "horizontal slice") of tile T is in - * ZA[T + N * esz]. Note that this means that each tile is not contiguous - * in the ZA storage, because its rows are striped through the ZA array. - * - * Because this is so large, keep this toward the end of the reset area, - * to keep the offsets into the rest of the structure smaller. - */ - ARMVectorReg zarray[ARM_MAX_VQ * 16]; -#endif + struct { + /* SME2 ZT0 -- 512 bit array, with data ordered like ARMVectorReg. */ + uint64_t zt0[512 / 64] QEMU_ALIGNED(16); + + /* + * SME ZA storage -- 256 x 256 byte array, with bytes in host + * word order, as we do with vfp.zregs[]. This corresponds to + * the architectural ZA array, where ZA[N] is in the least + * significant bytes of env->za_state.za[N]. + * + * When SVL is less than the architectural maximum, the accessible + * storage is restricted, such that if the SVL is X bytes the guest + * can see only the bottom X elements of zarray[], and only the least + * significant X bytes of each element of the array. (In other words, + * the observable part is always square.) + * + * The ZA storage can also be considered as a set of square tiles of + * elements of different sizes. The mapping from tiles to the ZA array + * is architecturally defined, such that for tiles of elements of esz + * bytes, the Nth row (or "horizontal slice") of tile T is in + * ZA[T + N * esz]. Note that this means that each tile is not + * contiguous in the ZA storage, because its rows are striped through + * the ZA array. + * + * Because this is so large, keep this toward the end of the + * reset area, to keep the offsets into the rest of the structure + * smaller. + */ + ARMVectorReg za[ARM_MAX_VQ * 16]; + } za_state; struct CPUBreakpoint *cpu_breakpoint[16]; struct CPUWatchpoint *cpu_watchpoint[16]; @@ -774,12 +800,9 @@ typedef struct CPUArchState { #else /* CONFIG_USER_ONLY */ /* For usermode syscall translation. */ bool eabi; -#endif /* CONFIG_USER_ONLY */ - -#ifdef TARGET_TAGGED_ADDRESSES /* Linux syscall tagged address support */ bool tagged_addr_enable; -#endif +#endif /* CONFIG_USER_ONLY */ } CPUARMState; static inline void set_feature(CPUARMState *env, int feature) @@ -828,6 +851,53 @@ typedef struct { uint32_t map, init, supported; } ARMVQMap; +/* REG is ID_XXX */ +#define FIELD_DP64_IDREG(ISAR, REG, FIELD, VALUE) \ + ({ \ + ARMISARegisters *i_ = (ISAR); \ + uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \ + regval = FIELD_DP64(regval, REG, FIELD, VALUE); \ + i_->idregs[REG ## _EL1_IDX] = regval; \ + }) + +#define FIELD_DP32_IDREG(ISAR, REG, FIELD, VALUE) \ + ({ \ + ARMISARegisters *i_ = (ISAR); \ + uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \ + regval = FIELD_DP32(regval, REG, FIELD, VALUE); \ + i_->idregs[REG ## _EL1_IDX] = regval; \ + }) + +#define FIELD_EX64_IDREG(ISAR, REG, FIELD) \ + ({ \ + const ARMISARegisters *i_ = (ISAR); \ + FIELD_EX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \ + }) + +#define FIELD_EX32_IDREG(ISAR, REG, FIELD) \ + ({ \ + const ARMISARegisters *i_ = (ISAR); \ + FIELD_EX32(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \ + }) + +#define FIELD_SEX64_IDREG(ISAR, REG, FIELD) \ + ({ \ + const ARMISARegisters *i_ = (ISAR); \ + FIELD_SEX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \ + }) + +#define SET_IDREG(ISAR, REG, VALUE) \ + ({ \ + ARMISARegisters *i_ = (ISAR); \ + i_->idregs[REG ## _EL1_IDX] = VALUE; \ + }) + +#define GET_IDREG(ISAR, REG) \ + ({ \ + const ARMISARegisters *i_ = (ISAR); \ + i_->idregs[REG ## _EL1_IDX]; \ + }) + /** * ARMCPU: * @env: #CPUARMState @@ -922,6 +992,8 @@ struct ArchCPU { /* CPU has memory protection unit */ bool has_mpu; + /* CPU has MTE enabled in KVM mode */ + bool kvm_mte; /* PMSAv7 MPU number of supported regions */ uint32_t pmsav7_dregion; /* PMSAv8 MPU number of supported hyp regions */ @@ -944,7 +1016,6 @@ struct ArchCPU { */ uint32_t kvm_target; -#ifdef CONFIG_KVM /* KVM init features for this CPU */ uint32_t kvm_init_features[7]; @@ -957,7 +1028,6 @@ struct ArchCPU { /* KVM steal time */ OnOffAuto kvm_steal_time; -#endif /* CONFIG_KVM */ /* Uniprocessor system with MP extensions */ bool mp_is_up; @@ -970,6 +1040,9 @@ struct ArchCPU { /* QOM property to indicate we should use the back-compat CNTFRQ default */ bool backcompat_cntfrq; + /* QOM property to indicate we should use the back-compat QARMA5 default */ + bool backcompat_pauth_default_use_qarma5; + /* Specify the number of cores in this CPU cluster. Used for the L2CTLR * register. */ @@ -993,44 +1066,14 @@ struct ArchCPU { * field by reading the value from the KVM vCPU. */ struct ARMISARegisters { - uint32_t id_isar0; - uint32_t id_isar1; - uint32_t id_isar2; - uint32_t id_isar3; - uint32_t id_isar4; - uint32_t id_isar5; - uint32_t id_isar6; - uint32_t id_mmfr0; - uint32_t id_mmfr1; - uint32_t id_mmfr2; - uint32_t id_mmfr3; - uint32_t id_mmfr4; - uint32_t id_mmfr5; - uint32_t id_pfr0; - uint32_t id_pfr1; - uint32_t id_pfr2; uint32_t mvfr0; uint32_t mvfr1; uint32_t mvfr2; - uint32_t id_dfr0; - uint32_t id_dfr1; uint32_t dbgdidr; uint32_t dbgdevid; uint32_t dbgdevid1; - uint64_t id_aa64isar0; - uint64_t id_aa64isar1; - uint64_t id_aa64isar2; - uint64_t id_aa64pfr0; - uint64_t id_aa64pfr1; - uint64_t id_aa64mmfr0; - uint64_t id_aa64mmfr1; - uint64_t id_aa64mmfr2; - uint64_t id_aa64mmfr3; - uint64_t id_aa64dfr0; - uint64_t id_aa64dfr1; - uint64_t id_aa64zfr0; - uint64_t id_aa64smfr0; uint64_t reset_pmcr_el0; + uint64_t idregs[NUM_ID_IDX]; } isar; uint64_t midr; uint32_t revidr; @@ -1039,10 +1082,6 @@ struct ArchCPU { uint32_t reset_sctlr; uint64_t pmceid0; uint64_t pmceid1; - uint32_t id_afr0; - uint64_t id_aa64afr0; - uint64_t id_aa64afr1; - uint64_t clidr; uint64_t mp_affinity; /* MP ID without feature bits */ /* The elements of this array are the CCSIDR values for each cache, * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. @@ -1060,6 +1099,7 @@ struct ArchCPU { bool prop_pauth; bool prop_pauth_impdef; bool prop_pauth_qarma3; + bool prop_pauth_qarma5; bool prop_lpa2; /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ @@ -1092,6 +1132,7 @@ struct ArchCPU { /* Used to set the maximum vector length the cpu will support. */ uint32_t sve_max_vq; + uint32_t sme_max_vq; #ifdef CONFIG_USER_ONLY /* Used to set the default vector length at process start. */ @@ -1108,8 +1149,9 @@ struct ArchCPU { typedef struct ARMCPUInfo { const char *name; + const char *deprecation_note; void (*initfn)(Object *obj); - void (*class_init)(ObjectClass *oc, void *data); + void (*class_init)(ObjectClass *oc, const void *data); } ARMCPUInfo; /** @@ -1127,22 +1169,18 @@ struct ARMCPUClass { ResettablePhases parent_phases; }; -struct AArch64CPUClass { - ARMCPUClass parent_class; -}; - /* Callback functions for the generic timer's timers. */ void arm_gt_ptimer_cb(void *opaque); void arm_gt_vtimer_cb(void *opaque); void arm_gt_htimer_cb(void *opaque); void arm_gt_stimer_cb(void *opaque); void arm_gt_hvtimer_cb(void *opaque); +void arm_gt_sel2timer_cb(void *opaque); +void arm_gt_sel2vtimer_cb(void *opaque); unsigned int gt_cntfrq_period_ns(ARMCPU *cpu); void gt_rme_post_el_change(ARMCPU *cpu, void *opaque); -void arm_cpu_post_init(Object *obj); - #define ARM_AFF0_SHIFT 0 #define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) #define ARM_AFF1_SHIFT 8 @@ -1200,7 +1238,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, */ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); -#ifdef TARGET_AARCH64 int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); @@ -1232,13 +1269,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) #endif } -#else -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } -static inline void aarch64_sve_change_el(CPUARMState *env, int o, - int n, bool a) -{ } -#endif - void aarch64_sync_32_to_64(CPUARMState *env); void aarch64_sync_64_to_32(CPUARMState *env); @@ -1365,6 +1395,7 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */ +#define SCTLR_CMOW (1ULL << 32) /* FEAT_CMOW */ #define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */ #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ @@ -1479,6 +1510,7 @@ FIELD(SVCR, ZA, 1, 1) /* Fields for SMCR_ELx. */ FIELD(SMCR, LEN, 0, 4) +FIELD(SMCR, EZT0, 30, 1) FIELD(SMCR, FA64, 31, 1) /* Write a new value to v7m.exception, thus transitioning into or out @@ -1702,11 +1734,15 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val); */ /* FPCR bits */ +#define FPCR_FIZ (1 << 0) /* Flush Inputs to Zero (FEAT_AFP) */ +#define FPCR_AH (1 << 1) /* Alternate Handling (FEAT_AFP) */ +#define FPCR_NEP (1 << 2) /* SIMD scalar ops preserve elts (FEAT_AFP) */ #define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */ #define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */ #define FPCR_OFE (1 << 10) /* Overflow exception trap enable */ #define FPCR_UFE (1 << 11) /* Underflow exception trap enable */ #define FPCR_IXE (1 << 12) /* Inexact exception trap enable */ +#define FPCR_EBF (1 << 13) /* Extended BFloat16 behaviors */ #define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */ #define FPCR_LEN_MASK (7 << 16) /* LEN, A-profile only */ #define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */ @@ -2176,6 +2212,7 @@ FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) FIELD(ID_AA64ISAR2, RPRFM, 48, 4) FIELD(ID_AA64ISAR2, CSSC, 52, 4) +FIELD(ID_AA64ISAR2, LUT, 56, 4) FIELD(ID_AA64ISAR2, ATS1A, 60, 4) FIELD(ID_AA64PFR0, EL0, 0, 4) @@ -2557,6 +2594,11 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env) return false; } +static inline bool arm_is_el3_or_mon(CPUARMState *env) +{ + return false; +} + static inline ARMSecuritySpace arm_security_space(CPUARMState *env) { return ARMSS_NonSecure; @@ -2589,81 +2631,15 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space); uint64_t arm_hcr_el2_eff(CPUARMState *env); uint64_t arm_hcrx_el2_eff(CPUARMState *env); -/* Return true if the specified exception level is running in AArch64 state. */ -static inline bool arm_el_is_aa64(CPUARMState *env, int el) -{ - /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, - * and if we're not in EL0 then the state of EL0 isn't well defined.) - */ - assert(el >= 1 && el <= 3); - bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); - - /* The highest exception level is always at the maximum supported - * register width, and then lower levels have a register width controlled - * by bits in the SCR or HCR registers. - */ - if (el == 3) { - return aa64; - } - - if (arm_feature(env, ARM_FEATURE_EL3) && - ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { - aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); - } - - if (el == 2) { - return aa64; - } - - if (arm_is_el2_enabled(env)) { - aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); - } - - return aa64; -} - -/* Function for determining whether guest cp register reads and writes should +/* + * Function for determining whether guest cp register reads and writes should * access the secure or non-secure bank of a cp register. When EL3 is * operating in AArch32 state, the NS-bit determines whether the secure * instance of a cp register should be used. When EL3 is AArch64 (or if * it doesn't exist at all) then there is no register banking, and all * accesses are to the non-secure version. */ -static inline bool access_secure_reg(CPUARMState *env) -{ - bool ret = (arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3) && - !(env->cp15.scr_el3 & SCR_NS)); - - return ret; -} - -/* Macros for accessing a specified CP register bank */ -#define A32_BANKED_REG_GET(_env, _regname, _secure) \ - ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) - -#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ - do { \ - if (_secure) { \ - (_env)->cp15._regname##_s = (_val); \ - } else { \ - (_env)->cp15._regname##_ns = (_val); \ - } \ - } while (0) - -/* Macros for automatically accessing a specific CP register bank depending on - * the current secure state of the system. These macros are not intended for - * supporting instruction translation reads/writes as these are dependent - * solely on the SCR.NS bit and not the mode. - */ -#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ - A32_BANKED_REG_GET((_env), _regname, \ - (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) - -#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ - A32_BANKED_REG_SET((_env), _regname, \ - (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ - (_val)) +bool access_secure_reg(CPUARMState *env); uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); @@ -2686,39 +2662,6 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env) return env->v7m.exception != 0; } -/* Return the current Exception Level (as per ARMv8; note that this differs - * from the ARMv7 Privilege Level). - */ -static inline int arm_current_el(CPUARMState *env) -{ - if (arm_feature(env, ARM_FEATURE_M)) { - return arm_v7m_is_handler_mode(env) || - !(env->v7m.control[env->v7m.secure] & 1); - } - - if (is_a64(env)) { - return extract32(env->pstate, 2, 2); - } - - switch (env->uncached_cpsr & 0x1f) { - case ARM_CPU_MODE_USR: - return 0; - case ARM_CPU_MODE_HYP: - return 2; - case ARM_CPU_MODE_MON: - return 3; - default: - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { - /* If EL3 is 32-bit then all secure privileged modes run in - * EL3 - */ - return 3; - } - - return 1; - } -} - /** * write_list_to_cpustate * @cpu: ARMCPU @@ -2772,14 +2715,19 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); * + NonSecure EL1 & 0 stage 2 * + NonSecure EL2 * + NonSecure EL2 & 0 (ARMv8.1-VHE) - * + Secure EL1 & 0 - * + Secure EL3 + * + Secure EL1 & 0 stage 1 + * + Secure EL1 & 0 stage 2 (FEAT_SEL2) + * + Secure EL2 (FEAT_SEL2) + * + Secure EL2 & 0 (FEAT_SEL2) + * + Realm EL1 & 0 stage 1 (FEAT_RME) + * + Realm EL1 & 0 stage 2 (FEAT_RME) + * + Realm EL2 (FEAT_RME) + * + EL3 * If EL3 is 32-bit: * + NonSecure PL1 & 0 stage 1 * + NonSecure PL1 & 0 stage 2 * + NonSecure PL2 - * + Secure PL0 - * + Secure PL1 + * + Secure PL1 & 0 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) * * For QEMU, an mmu_idx is not quite the same as a translation regime because: @@ -2805,29 +2753,34 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); * table over and over. * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access * Never (PAN) bit within PSTATE. - * 7. we fold together the secure and non-secure regimes for A-profile, + * 7. we fold together most secure and non-secure regimes for A-profile, * because there are no banked system registers for aarch64, so the * process of switching between secure and non-secure is * already heavyweight. + * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure, + * because both are in use simultaneously for Secure EL2. * * This gives us the following list of cases: * - * EL0 EL1&0 stage 1+2 (aka NS PL0) - * EL1 EL1&0 stage 1+2 (aka NS PL1) - * EL1 EL1&0 stage 1+2 +PAN + * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2) + * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2) + * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN) * EL0 EL2&0 * EL2 EL2&0 * EL2 EL2&0 +PAN * EL2 (aka NS PL2) - * EL3 (aka S PL1) - * Physical (NS & S) - * Stage2 (NS & S) + * EL3 (aka AArch32 S PL1 PL1&0) + * AArch32 S PL0 PL1&0 (we call this EL30_0) + * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN) + * Stage2 Secure + * Stage2 NonSecure + * plus one TLB per Physical address space: S, NS, Realm, Root * - * for a total of 12 different mmu_idx. + * for a total of 16 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes * as A profile. They only need to distinguish EL0 and EL1 (and - * EL2 if we ever model a Cortex-R52). + * EL2 for cores like the Cortex-R52). * * M profile CPUs are rather different as they do not have a true MMU. * They have the following different MMU indexes: @@ -2887,6 +2840,8 @@ typedef enum ARMMMUIdx { ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A, ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A, ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A, /* * Used for second stage of an S12 page table walk, or for descriptor @@ -2894,14 +2849,14 @@ typedef enum ARMMMUIdx { * are in use simultaneously for SecureEL2: the security state for * the S2 ptw is selected by the NS bit from the S1 ptw. */ - ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A, - ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A, /* TLBs with 1-1 mapping to the physical address spaces. */ - ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A, /* * These are not allocated TLBs and are used only for AT system @@ -2940,6 +2895,8 @@ typedef enum ARMMMUIdxBit { TO_CORE_BIT(E20_2), TO_CORE_BIT(E20_2_PAN), TO_CORE_BIT(E3), + TO_CORE_BIT(E30_0), + TO_CORE_BIT(E30_3_PAN), TO_CORE_BIT(Stage2), TO_CORE_BIT(Stage2_S), @@ -2987,7 +2944,7 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) /* If all the CLIDR.Ctypem bits are 0 there are no caches, and * CSSELR is RAZ/WI. */ - return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; + return (GET_IDREG(&cpu->isar, CLIDR) & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; } static inline bool arm_sctlr_b(CPUARMState *env) @@ -3005,60 +2962,15 @@ static inline bool arm_sctlr_b(CPUARMState *env) uint64_t arm_sctlr(CPUARMState *env, int el); -static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, - bool sctlr_b) -{ -#ifdef CONFIG_USER_ONLY - /* - * In system mode, BE32 is modelled in line with the - * architecture (as word-invariant big-endianness), where loads - * and stores are done little endian but from addresses which - * are adjusted by XORing with the appropriate constant. So the - * endianness to use for the raw data access is not affected by - * SCTLR.B. - * In user mode, however, we model BE32 as byte-invariant - * big-endianness (because user-only code cannot tell the - * difference), and so we need to use a data access endianness - * that depends on SCTLR.B. - */ - if (sctlr_b) { - return true; - } -#endif - /* In 32bit endianness is determined by looking at CPSR's E bit */ - return env->uncached_cpsr & CPSR_E; -} - -static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) -{ - return sctlr & (el ? SCTLR_EE : SCTLR_E0E); -} - -/* Return true if the processor is in big-endian mode. */ -static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) -{ - if (!is_a64(env)) { - return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); - } else { - int cur_el = arm_current_el(env); - uint64_t sctlr = arm_sctlr(env, cur_el); - return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); - } -} - -#include "exec/cpu-all.h" - /* * We have more than 32-bits worth of state per TB, so we split the data * between tb->flags and tb->cs_base, which is otherwise unused for ARM. * We collect these two parts in CPUARMTBFlags where they are named * flags and flags2 respectively. * - * The flags that are shared between all execution modes, TBFLAG_ANY, - * are stored in flags. The flags that are specific to a given mode - * are stores in flags2. Since cs_base is sized on the configured - * address size, flags2 always has 64-bits for A64, and a minimum of - * 32-bits for A32 and M32. + * The flags that are shared between all execution modes, TBFLAG_ANY, are stored + * in flags. The flags that are specific to a given mode are stored in flags2. + * flags2 always has 64-bits, even though only 32-bits are used for A32 and M32. * * The bits for 32-bit A-profile and M-profile partially overlap: * @@ -3168,6 +3080,9 @@ FIELD(TBFLAG_A64, NV2, 34, 1) FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) /* Set if FEAT_NV2 RAM accesses are big-endian */ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) +FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */ +FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */ +FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2) /* * Helpers for using the above. Note that only the A64 accessors use @@ -3229,16 +3144,6 @@ static inline bool bswap_code(bool sctlr_b) #endif } -#ifdef CONFIG_USER_ONLY -static inline bool arm_cpu_bswap_data(CPUARMState *env) -{ - return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); -} -#endif - -void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags); - enum { QEMU_PSCI_CONDUIT_DISABLED = 0, QEMU_PSCI_CONDUIT_SMC = 1, @@ -3336,34 +3241,4 @@ extern const uint64_t pred_esz_masks[5]; #define LOG2_TAG_GRANULE 4 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE) -#ifdef CONFIG_USER_ONLY -#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1)) -#endif - -#ifdef TARGET_TAGGED_ADDRESSES -/** - * cpu_untagged_addr: - * @cs: CPU context - * @x: tagged address - * - * Remove any address tag from @x. This is explicitly related to the - * linux syscall TIF_TAGGED_ADDR setting, not TBI in general. - * - * There should be a better place to put this, but we need this in - * include/exec/cpu_ldst.h, and not some place linux-user specific. - */ -static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x) -{ - CPUARMState *env = cpu_env(cs); - if (env->tagged_addr_enable) { - /* - * TBI is enabled for userspace but not kernelspace addresses. - * Only clear the tag if bit 55 is clear. - */ - x &= sextract64(x, 0, 56); - } - return x; -} -#endif - #endif diff --git a/target/arm/cpu32-stubs.c b/target/arm/cpu32-stubs.c new file mode 100644 index 00000000000..81be44d8462 --- /dev/null +++ b/target/arm/cpu32-stubs.c @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "target/arm/cpu.h" +#include "target/arm/internals.h" +#include + +void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) +{ + g_assert_not_reached(); +} + +void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) +{ + g_assert_not_reached(); +} + +void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) +{ + g_assert_not_reached(); +} + +void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) +{ + g_assert_not_reached(); +} diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 262a1d6c0bb..26cf7e6dfa2 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -23,10 +23,11 @@ #include "cpu.h" #include "cpregs.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/hvf.h" -#include "sysemu/qtest.h" -#include "sysemu/tcg.h" +#include "qemu/units.h" +#include "system/kvm.h" +#include "system/hvf.h" +#include "system/qtest.h" +#include "system/tcg.h" #include "kvm_arm.h" #include "hvf_arm.h" #include "qapi/visitor.h" @@ -35,6 +36,28 @@ #include "cpu-features.h" #include "cpregs.h" +/* convert between _IDX and SYS_ */ +#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \ + [NAME##_IDX] = SYS_##NAME, + +const uint32_t id_register_sysreg[NUM_ID_IDX] = { +#include "cpu-sysregs.h.inc" +}; + +#undef DEF +#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \ + case SYS_##NAME: return NAME##_IDX; + +int get_sysreg_idx(ARMSysRegs sysreg) +{ + switch (sysreg) { +#include "cpu-sysregs.h.inc" + } + g_assert_not_reached(); +} + +#undef DEF + void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { /* @@ -113,7 +136,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) * SVE is disabled and so are all vector lengths. Good. * Disable all SVE extensions as well. */ - cpu->isar.id_aa64zfr0 = 0; + SET_IDREG(&cpu->isar, ID_AA64ZFR0, 0); return; } @@ -236,6 +259,13 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) /* From now on sve_max_vq is the actual maximum supported length. */ cpu->sve_max_vq = max_vq; cpu->sve_vq.map = vq_map; + + /* FEAT_F64MM requires the existence of a 256-bit vector size. */ + if (max_vq < 2) { + uint64_t t = GET_IDREG(&cpu->isar, ID_AA64ZFR0); + t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 0); + SET_IDREG(&cpu->isar, ID_AA64ZFR0, t); + } } /* @@ -287,16 +317,13 @@ static bool cpu_arm_get_sve(Object *obj, Error **errp) static void cpu_arm_set_sve(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - uint64_t t; if (value && kvm_enabled() && !kvm_arm_sve_supported()) { error_setg(errp, "'sve' feature not supported by KVM on this host"); return; } - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, SVE, value); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value); } void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) @@ -308,7 +335,7 @@ void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) if (vq_map == 0) { if (!cpu_isar_feature(aa64_sme, cpu)) { - cpu->isar.id_aa64smfr0 = 0; + SET_IDREG(&cpu->isar, ID_AA64SMFR0, 0); return; } @@ -336,6 +363,7 @@ void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp) } cpu->sme_vq.map = vq_map; + cpu->sme_max_vq = 32 - clz32(vq_map); } static bool cpu_arm_get_sme(Object *obj, Error **errp) @@ -347,11 +375,8 @@ static bool cpu_arm_get_sme(Object *obj, Error **errp) static void cpu_arm_set_sme(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - uint64_t t; - t = cpu->isar.id_aa64pfr1; - t = FIELD_DP64(t, ID_AA64PFR1, SME, value); - cpu->isar.id_aa64pfr1 = t; + FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR1, SME, value); } static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp) @@ -364,11 +389,8 @@ static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp) static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - uint64_t t; - t = cpu->isar.id_aa64smfr0; - t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value); - cpu->isar.id_aa64smfr0 = t; + FIELD_DP64_IDREG(&cpu->isar, ID_AA64SMFR0, FA64, value); } #ifdef CONFIG_USER_ONLY @@ -479,6 +501,7 @@ void aarch64_add_sme_properties(Object *obj) void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu); + ARMISARegisters *isar = &cpu->isar; uint64_t isar1, isar2; /* @@ -489,13 +512,13 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) * * Begin by disabling all fields. */ - isar1 = cpu->isar.id_aa64isar1; + isar1 = GET_IDREG(isar, ID_AA64ISAR1); isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0); isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0); isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0); isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0); - isar2 = cpu->isar.id_aa64isar2; + isar2 = GET_IDREG(isar, ID_AA64ISAR2); isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0); isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0); @@ -519,39 +542,56 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) } if (cpu->prop_pauth) { - if (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) { + if ((cpu->prop_pauth_impdef && cpu->prop_pauth_qarma3) || + (cpu->prop_pauth_impdef && cpu->prop_pauth_qarma5) || + (cpu->prop_pauth_qarma3 && cpu->prop_pauth_qarma5)) { error_setg(errp, - "cannot enable both pauth-impdef and pauth-qarma3"); + "cannot enable pauth-impdef, pauth-qarma3 and " + "pauth-qarma5 at the same time"); return; } - if (cpu->prop_pauth_impdef) { - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features); - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1); + bool use_default = !cpu->prop_pauth_qarma5 && + !cpu->prop_pauth_qarma3 && + !cpu->prop_pauth_impdef; + + if (cpu->prop_pauth_qarma5 || + (use_default && + cpu->backcompat_pauth_default_use_qarma5)) { + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features); + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1); } else if (cpu->prop_pauth_qarma3) { isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, features); isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 1); + } else if (cpu->prop_pauth_impdef || + (use_default && + !cpu->backcompat_pauth_default_use_qarma5)) { + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, features); + isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 1); } else { - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, features); - isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 1); + g_assert_not_reached(); } - } else if (cpu->prop_pauth_impdef || cpu->prop_pauth_qarma3) { - error_setg(errp, "cannot enable pauth-impdef or " - "pauth-qarma3 without pauth"); + } else if (cpu->prop_pauth_impdef || + cpu->prop_pauth_qarma3 || + cpu->prop_pauth_qarma5) { + error_setg(errp, "cannot enable pauth-impdef, pauth-qarma3 or " + "pauth-qarma5 without pauth"); error_append_hint(errp, "Add pauth=on to the CPU property list.\n"); } } - cpu->isar.id_aa64isar1 = isar1; - cpu->isar.id_aa64isar2 = isar2; + SET_IDREG(isar, ID_AA64ISAR1, isar1); + SET_IDREG(isar, ID_AA64ISAR2, isar2); } -static Property arm_cpu_pauth_property = +static const Property arm_cpu_pauth_property = DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true); -static Property arm_cpu_pauth_impdef_property = +static const Property arm_cpu_pauth_impdef_property = DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); -static Property arm_cpu_pauth_qarma3_property = +static const Property arm_cpu_pauth_qarma3_property = DEFINE_PROP_BOOL("pauth-qarma3", ARMCPU, prop_pauth_qarma3, false); +static Property arm_cpu_pauth_qarma5_property = + DEFINE_PROP_BOOL("pauth-qarma5", ARMCPU, prop_pauth_qarma5, false); void aarch64_add_pauth_properties(Object *obj) { @@ -572,6 +612,7 @@ void aarch64_add_pauth_properties(Object *obj) } else { qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property); qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma3_property); + qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_qarma5_property); } } @@ -587,17 +628,18 @@ void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) return; } - t = cpu->isar.id_aa64mmfr0; + t = GET_IDREG(&cpu->isar, ID_AA64MMFR0); t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */ - cpu->isar.id_aa64mmfr0 = t; + SET_IDREG(&cpu->isar, ID_AA64MMFR0, t); } static void aarch64_a57_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a57"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -618,33 +660,36 @@ static void aarch64_a57_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x03010066); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10101105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x00011121); + SET_IDREG(isar, ID_ISAR6, 0); + SET_IDREG(isar, ID_AA64PFR0, 0x00002222); + SET_IDREG(isar, ID_AA64DFR0, 0x10305106); + SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); + SET_IDREG(isar, ID_AA64MMFR0, 0x00001124); cpu->isar.dbgdidr = 0x3516d000; cpu->isar.dbgdevid = 0x01110f13; cpu->isar.dbgdevid1 = 0x2; cpu->isar.reset_pmcr_el0 = 0x41013000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ + SET_IDREG(isar, CLIDR, 0x0a200023); + /* 32KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); + /* 48KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2); + /* 2048KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 2 * MiB, 7); cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; @@ -656,6 +701,7 @@ static void aarch64_a57_initfn(Object *obj) static void aarch64_a53_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a53"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -676,33 +722,36 @@ static void aarch64_a53_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x03010066); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10101105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x00011121); + SET_IDREG(isar, ID_ISAR6, 0); + SET_IDREG(isar, ID_AA64PFR0, 0x00002222); + SET_IDREG(isar, ID_AA64DFR0, 0x10305106); + SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); + SET_IDREG(isar, ID_AA64MMFR0, 0x00001122); /* 40 bit physical addr */ cpu->isar.dbgdidr = 0x3516d000; cpu->isar.dbgdevid = 0x00110f13; cpu->isar.dbgdevid1 = 0x1; cpu->isar.reset_pmcr_el0 = 0x41033000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ + SET_IDREG(isar, CLIDR, 0x0a200023); + /* 32KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); + /* 32KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 1, 64, 32 * KiB, 2); + /* 1024KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7); cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; @@ -756,104 +805,12 @@ static const ARMCPUInfo aarch64_cpus[] = { #endif }; -static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp) -{ - ARMCPU *cpu = ARM_CPU(obj); - - return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); -} - -static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp) -{ - ARMCPU *cpu = ARM_CPU(obj); - - /* At this time, this property is only allowed if KVM is enabled. This - * restriction allows us to avoid fixing up functionality that assumes a - * uniform execution state like do_interrupt. - */ - if (value == false) { - if (!kvm_enabled() || !kvm_arm_aarch32_supported()) { - error_setg(errp, "'aarch64' feature cannot be disabled " - "unless KVM is enabled and 32-bit EL1 " - "is supported"); - return; - } - unset_feature(&cpu->env, ARM_FEATURE_AARCH64); - } else { - set_feature(&cpu->env, ARM_FEATURE_AARCH64); - } -} - -static void aarch64_cpu_finalizefn(Object *obj) -{ -} - -static const gchar *aarch64_gdb_arch_name(CPUState *cs) -{ - return "aarch64"; -} - -static void aarch64_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - - cc->gdb_read_register = aarch64_cpu_gdb_read_register; - cc->gdb_write_register = aarch64_cpu_gdb_write_register; - cc->gdb_core_xml_file = "aarch64-core.xml"; - cc->gdb_arch_name = aarch64_gdb_arch_name; - - object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64, - aarch64_cpu_set_aarch64); - object_class_property_set_description(oc, "aarch64", - "Set on/off to enable/disable aarch64 " - "execution state "); -} - -static void aarch64_cpu_instance_init(Object *obj) -{ - ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); - - acc->info->initfn(obj); - arm_cpu_post_init(obj); -} - -static void cpu_register_class_init(ObjectClass *oc, void *data) -{ - ARMCPUClass *acc = ARM_CPU_CLASS(oc); - - acc->info = data; -} - -void aarch64_cpu_register(const ARMCPUInfo *info) -{ - TypeInfo type_info = { - .parent = TYPE_AARCH64_CPU, - .instance_init = aarch64_cpu_instance_init, - .class_init = info->class_init ?: cpu_register_class_init, - .class_data = (void *)info, - }; - - type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); - type_register(&type_info); - g_free((void *)type_info.name); -} - -static const TypeInfo aarch64_cpu_type_info = { - .name = TYPE_AARCH64_CPU, - .parent = TYPE_ARM_CPU, - .instance_finalize = aarch64_cpu_finalizefn, - .abstract = true, - .class_init = aarch64_cpu_class_init, -}; - static void aarch64_cpu_register_types(void) { size_t i; - type_register_static(&aarch64_cpu_type_info); - for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) { - aarch64_cpu_register(&aarch64_cpus[i]); + arm_cpu_register(&aarch64_cpus[i]); } } diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index 7d856acddf2..579516e1541 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -11,9 +11,11 @@ #include "internals.h" #include "cpu-features.h" #include "cpregs.h" -#include "exec/exec-all.h" -#include "exec/helper-proto.h" -#include "sysemu/tcg.h" +#include "exec/watchpoint.h" +#include "system/tcg.h" + +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" #ifdef CONFIG_TCG /* Return the Exception Level targeted by debug exceptions. */ @@ -378,7 +380,7 @@ bool arm_debug_check_breakpoint(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - target_ulong pc; + vaddr pc; int n; /* @@ -875,12 +877,13 @@ static CPAccessResult access_tdcc(CPUARMState *env, const ARMCPRegInfo *ri, (env->cp15.mdcr_el3 & MDCR_TDCC); if (el < 1 && mdscr_el1_tdcc) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (el < 2 && (mdcr_el2_tda || mdcr_el2_tdcc)) { return CP_ACCESS_TRAP_EL2; } - if (el < 3 && ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { + if (!arm_is_el3_or_mon(env) && + ((env->cp15.mdcr_el3 & MDCR_TDA) || mdcr_el3_tdcc)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; @@ -937,6 +940,13 @@ static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, env->cp15.dbgclaim &= ~(value & 0xFF); } +static CPAccessResult access_bogus(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Always UNDEF, as if this cpreg didn't exist */ + return CP_ACCESS_UNDEFINED; +} + static const ARMCPRegInfo debug_cp_reginfo[] = { /* * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped @@ -985,11 +995,42 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_RW, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ - { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, + /* Architecturally DBGDTRTX is named DBGDTRRX when used for reads */ + { .name = "DBGDTRTX_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, .access = PL0_RW, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDTRTX", .state = ARM_CP_STATE_AA32, .cp = 14, + .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL0_RW, .accessfn = access_tdcc, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* This is AArch64-only and is a combination of DBGDTRTX and DBGDTRRX */ + { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 4, .opc2 = 0, + .access = PL0_RW, .accessfn = access_tdcc, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * This is not a real AArch32 register. We used to incorrectly expose + * this due to a QEMU bug; to avoid breaking migration compatibility we + * need to continue to provide it so that we don't fail the inbound + * migration when it tells us about a sysreg that we don't have. + * We set an always-fails .accessfn, which means that the guest doesn't + * actually see this register (it will always UNDEF, identically to if + * there were no cpreg definition for it other than that we won't print + * a LOG_UNIMP message about it), and we set the ARM_CP_NO_GDB flag so the + * gdbstub won't see it either. + * (We can't just set .access = 0, because add_cpreg_to_hashtable() + * helpfully ignores cpregs which aren't accessible to the highest + * implemented EL.) + * + * TODO: implement a system for being able to describe "this register + * can be ignored if it appears in the inbound stream"; then we can + * remove this temporary hack. + */ + { .name = "BOGUS_DBGDTR_EL0", .state = ARM_CP_STATE_AA32, + .cp = 14, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL0_RW, .accessfn = access_bogus, + .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, /* * OSECCR_EL1 provides a mechanism for an operating system * to access the contents of EDECCR. EDECCR is not implemented though, @@ -1036,7 +1077,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { { .name = "DBGVCR", .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, + .type = ARM_CP_CONST, .resetvalue = 0 }, /* * Dummy MDCCINT_EL1, since we don't implement the Debug Communications * Channel but Linux may try to access this register. The 32-bit @@ -1045,7 +1086,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tdcc, - .type = ARM_CP_NOP }, + .type = ARM_CP_CONST, .resetvalue = 0 }, /* * Dummy DBGCLAIM registers. * "The architecture does not define any functionality for the CLAIM tag bits.", @@ -1074,7 +1115,8 @@ static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, .access = PL2_RW, .accessfn = access_dbgvcr32, - .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, + .type = ARM_CP_CONST | ARM_CP_EL3_NO_EL2_KEEP, + .resetvalue = 0 }, }; static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 554b8736bbf..ce4497ad7c3 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -22,7 +22,7 @@ #include "exec/gdbstub.h" #include "gdbstub/helpers.h" #include "gdbstub/commands.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "internals.h" #include "cpu-features.h" #include "cpregs.h" @@ -44,6 +44,12 @@ int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; +#ifdef TARGET_AARCH64 + if (arm_gdbstub_is_aarch64(cpu)) { + return aarch64_cpu_gdb_read_register(cs, mem_buf, n); + } +#endif + if (n < 16) { /* Core integer register. */ return gdb_get_reg32(mem_buf, env->regs[n]); @@ -66,6 +72,12 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUARMState *env = &cpu->env; uint32_t tmp; +#ifdef TARGET_AARCH64 + if (arm_gdbstub_is_aarch64(cpu)) { + return aarch64_cpu_gdb_write_register(cs, mem_buf, n); + } +#endif + tmp = ldl_p(mem_buf); /* diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 5221381cc85..08e28585396 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -27,6 +27,10 @@ #include #include "mte_user_helper.h" #endif +#ifdef CONFIG_TCG +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/target_page.h" +#endif int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { @@ -111,8 +115,22 @@ int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg) /* 128 bit FP register */ { uint64_t *q = aa64_vfp_qreg(env, reg); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); + + /* + * On the wire these are target-endian 128 bit values. + * In the CPU state these are host-order uint64_t values + * with the least-significant one first. This means they're + * the other way around for target_big_endian() (which is + * only true for us for aarch64_be-linux-user). + */ + if (target_big_endian()) { + q[1] = ldq_p(buf); + q[0] = ldq_p(buf + 8); + } else{ + q[0] = ldq_p(buf); + q[1] = ldq_p(buf + 8); + } + return 16; } case 32: @@ -188,10 +206,17 @@ int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) case 0 ... 31: { int vq, len = 0; - uint64_t *p = (uint64_t *) buf; for (vq = 0; vq < cpu->sve_max_vq; vq++) { - env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; - env->vfp.zregs[reg].d[vq * 2] = *p++; + if (target_big_endian()) { + env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); + buf += 8; + env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); + } else{ + env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); + buf += 8; + env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); + } + buf += 8; len += 16; } return len; @@ -206,9 +231,9 @@ int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) { int preg = reg - 34; int vq, len = 0; - uint64_t *p = (uint64_t *) buf; for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - env->vfp.pregs[preg].p[vq / 4] = *p++; + env->vfp.pregs[preg].p[vq / 4] = ldq_p(buf); + buf += 8; len += 8; } return len; @@ -430,11 +455,14 @@ int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg) return 0; #endif } +#endif /* CONFIG_USER_ONLY */ +#ifdef CONFIG_TCG static void handle_q_memtag(GArray *params, void *user_ctx) { ARMCPU *cpu = ARM_CPU(user_ctx); CPUARMState *env = &cpu->env; + uint32_t mmu_index; uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; @@ -458,8 +486,10 @@ static void handle_q_memtag(GArray *params, void *user_ctx) gdb_put_packet("E03"); } + /* Find out the current translation regime for probe. */ + mmu_index = cpu_mmu_index(env_cpu(env), false); /* Note that tags are packed here (2 tags packed in one byte). */ - tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */, + tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1, MMU_DATA_LOAD, true, 0); if (!tags) { /* Address is not in a tagged region. */ @@ -478,13 +508,16 @@ static void handle_q_isaddresstagged(GArray *params, void *user_ctx) { ARMCPU *cpu = ARM_CPU(user_ctx); CPUARMState *env = &cpu->env; + uint32_t mmu_index; uint64_t addr = gdb_get_cmd_param(params, 0)->val_ull; uint8_t *tags; const char *reply; - tags = allocation_tag_mem_probe(env, 0, addr, MMU_DATA_LOAD, 8 /* 64-bit */, + /* Find out the current translation regime for probe. */ + mmu_index = cpu_mmu_index(env_cpu(env), false); + tags = allocation_tag_mem_probe(env, mmu_index, addr, MMU_DATA_LOAD, 1, MMU_DATA_LOAD, true, 0); reply = tags ? "01" : "00"; @@ -495,6 +528,7 @@ static void handle_Q_memtag(GArray *params, void *user_ctx) { ARMCPU *cpu = ARM_CPU(user_ctx); CPUARMState *env = &cpu->env; + uint32_t mmu_index; uint64_t start_addr = gdb_get_cmd_param(params, 0)->val_ull; uint64_t len = gdb_get_cmd_param(params, 1)->val_ul; @@ -527,8 +561,10 @@ static void handle_Q_memtag(GArray *params, void *user_ctx) * Get all tags in the page starting from the tag of the start address. * Note that there are two tags packed into a single byte here. */ - tags = allocation_tag_mem_probe(env, 0, start_addr, MMU_DATA_STORE, - 8 /* 64-bit */, MMU_DATA_STORE, true, 0); + /* Find out the current translation regime for probe. */ + mmu_index = cpu_mmu_index(env_cpu(env), false); + tags = allocation_tag_mem_probe(env, mmu_index, start_addr, MMU_DATA_STORE, + 1, MMU_DATA_STORE, true, 0); if (!tags) { /* Address is not in a tagged region. */ gdb_put_packet("E04"); @@ -591,13 +627,13 @@ static const GdbCmdParseEntry cmd_handler_table[NUM_CMDS] = { .need_cpu_context = true }, }; -#endif /* CONFIG_USER_ONLY */ +#endif /* CONFIG_TCG */ void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *qsupported, GPtrArray *qtable, GPtrArray *stable) { -#ifdef CONFIG_USER_ONLY /* MTE */ +#ifdef CONFIG_TCG if (cpu_isar_feature(aa64_mte, cpu)) { g_string_append(qsupported, ";memory-tagging+"); diff --git a/target/arm/gtimer.h b/target/arm/gtimer.h index b992941bef1..d49c63cbf87 100644 --- a/target/arm/gtimer.h +++ b/target/arm/gtimer.h @@ -10,12 +10,14 @@ #define TARGET_ARM_GTIMER_H enum { - GTIMER_PHYS = 0, - GTIMER_VIRT = 1, - GTIMER_HYP = 2, - GTIMER_SEC = 3, - GTIMER_HYPVIRT = 4, -#define NUM_GTIMERS 5 + GTIMER_PHYS = 0, /* CNTP_* ; EL1 physical timer */ + GTIMER_VIRT = 1, /* CNTV_* ; EL1 virtual timer */ + GTIMER_HYP = 2, /* CNTHP_* ; EL2 physical timer */ + GTIMER_SEC = 3, /* CNTPS_* ; EL3 physical timer */ + GTIMER_HYPVIRT = 4, /* CNTHV_* ; EL2 virtual timer ; only if FEAT_VHE */ + GTIMER_S_EL2_PHYS = 5, /* CNTHPS_* ; only if FEAT_SEL2 */ + GTIMER_S_EL2_VIRT = 6, /* CNTHVS_* ; only if FEAT_SEL2 */ +#define NUM_GTIMERS 7 }; #endif diff --git a/target/arm/helper.c b/target/arm/helper.c index 8fb4b474e83..0c1299ff841 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -12,31 +12,35 @@ #include "cpu.h" #include "internals.h" #include "cpu-features.h" -#include "exec/helper-proto.h" +#include "exec/page-protection.h" +#include "exec/mmap-lock.h" #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/bitops.h" -#include "qemu/crc32c.h" #include "qemu/qemu-print.h" -#include "exec/exec-all.h" -#include /* For crc32 */ +#include "exec/cputlb.h" +#include "exec/translation-block.h" #include "hw/irq.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "qapi/error.h" #include "qemu/guest-random.h" #ifdef CONFIG_TCG +#include "accel/tcg/probe.h" +#include "accel/tcg/getpc.h" #include "semihosting/common-semi.h" #endif #include "cpregs.h" #include "target/arm/gtimer.h" -#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" static void switch_mode(CPUARMState *env, int mode); -static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) +uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) { assert(ri->fieldoffset); if (cpreg_field_is_64bit(ri)) { @@ -219,7 +223,7 @@ static void count_cpreg(gpointer key, gpointer opaque) } } -static gint cpreg_key_compare(gconstpointer a, gconstpointer b) +static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d) { uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); @@ -243,7 +247,7 @@ void init_cpreg_list(ARMCPU *cpu) int arraylen; keys = g_hash_table_get_keys(cpu->cp_regs); - keys = g_list_sort(keys, cpreg_key_compare); + keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL); cpu->cpreg_array_len = 0; @@ -264,7 +268,7 @@ void init_cpreg_list(ARMCPU *cpu) g_list_free(keys); } -static bool arm_pan_enabled(CPUARMState *env) +bool arm_pan_enabled(CPUARMState *env) { if (is_a64(env)) { if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) { @@ -285,7 +289,7 @@ static CPAccessResult access_el3_aa32ns(CPUARMState *env, { if (!is_a64(env) && arm_current_el(env) == 3 && arm_is_secure_below_el3(env)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -310,26 +314,7 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, return CP_ACCESS_TRAP_EL3; } /* This will be EL1 NS and EL2 NS, which just UNDEF */ - return CP_ACCESS_TRAP_UNCATEGORIZED; -} - -/* - * Check for traps to performance monitor registers, which are controlled - * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. - */ -static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - - if (el < 2 && (mdcr_el2 & MDCR_TPM)) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; + return CP_ACCESS_UNDEFINED; } /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ @@ -365,40 +350,6 @@ static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -/* Check for traps from EL1 due to HCR_EL2.TTLB. */ -static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} - -/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */ -static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && - (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} - -#ifdef TARGET_AARCH64 -/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */ -static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && - (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} -#endif - static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); @@ -438,12 +389,15 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value); } -static int alle1_tlbmask(CPUARMState *env) +int alle1_tlbmask(CPUARMState *env) { /* * Note that the 'ALL' scope must invalidate both stage 1 and * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. + * + * For AArch32 this is only used for TLBIALLNSNH and VTTBR + * writes, so only needs to apply to NS PL1&0, not S PL1&0. */ return (ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | @@ -452,174 +406,6 @@ static int alle1_tlbmask(CPUARMState *env) ARMMMUIdxBit_Stage2_S); } - -/* IS variants of TLB operations must affect all cores */ -static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); -} - -static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); -} - -/* - * Non-IS variants of TLB operations are upgraded to - * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to - * force broadcast of these operations. - */ -static bool tlb_force_broadcast(CPUARMState *env) -{ - return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); -} - -static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate all (TLBIALL) */ - CPUState *cs = env_cpu(env); - - if (tlb_force_broadcast(env)) { - tlb_flush_all_cpus_synced(cs); - } else { - tlb_flush(cs); - } -} - -static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ - CPUState *cs = env_cpu(env); - - value &= TARGET_PAGE_MASK; - if (tlb_force_broadcast(env)) { - tlb_flush_page_all_cpus_synced(cs, value); - } else { - tlb_flush_page(cs, value); - } -} - -static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by ASID (TLBIASID) */ - CPUState *cs = env_cpu(env); - - if (tlb_force_broadcast(env)) { - tlb_flush_all_cpus_synced(cs); - } else { - tlb_flush(cs); - } -} - -static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ - CPUState *cs = env_cpu(env); - - value &= TARGET_PAGE_MASK; - if (tlb_force_broadcast(env)) { - tlb_flush_page_all_cpus_synced(cs, value); - } else { - tlb_flush_page(cs, value); - } -} - -static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); -} - -static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env)); -} - - -static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); -} - -static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); -} - -static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); -} - -static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E2); -} - -static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); -} - -static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); -} - static const ARMCPRegInfo cp_reginfo[] = { /* * Define the secure and non-secure FCSE identifier CP registers @@ -729,22 +515,6 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = { */ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* - * MMU TLB control. Note that the wildcarding means we cover not just - * the unified TLB ops but also the dside/iside/inner-shareable variants. - */ - { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, - .type = ARM_CP_NO_RAW }, { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, @@ -890,2347 +660,1317 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, }; -typedef struct pm_event { - uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ - /* If the event is supported on this CPU (used to generate PMCEID[01]) */ - bool (*supported)(CPUARMState *); - /* - * Retrieve the current count of the underlying event. The programmed - * counters hold a difference from the return value from this function - */ - uint64_t (*get_count)(CPUARMState *); - /* - * Return how many nanoseconds it will take (at a minimum) for count events - * to occur. A negative value indicates the counter will never overflow, or - * that the counter has otherwise arranged for the overflow bit to be set - * and the PMU interrupt to be raised on overflow. - */ - int64_t (*ns_per_count)(uint64_t); -} pm_event; - -static bool event_always_supported(CPUARMState *env) -{ - return true; -} - -static uint64_t swinc_get_count(CPUARMState *env) -{ - /* - * SW_INCR events are written directly to the pmevcntr's by writes to - * PMSWINC, so there is no underlying count maintained by the PMU itself - */ - return 0; -} - -static int64_t swinc_ns_per(uint64_t ignored) -{ - return -1; -} - -/* - * Return the underlying cycle count for the PMU cycle counters. If we're in - * usermode, simply return 0. - */ -static uint64_t cycles_get_count(CPUARMState *env) -{ -#ifndef CONFIG_USER_ONLY - return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); -#else - return cpu_get_host_ticks(); -#endif -} - -#ifndef CONFIG_USER_ONLY -static int64_t cycles_ns_per(uint64_t cycles) -{ - return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; -} - -static bool instructions_supported(CPUARMState *env) -{ - /* Precise instruction counting */ - return icount_enabled() == ICOUNT_PRECISE; -} - -static uint64_t instructions_get_count(CPUARMState *env) -{ - assert(icount_enabled() == ICOUNT_PRECISE); - return (uint64_t)icount_get_raw(); -} - -static int64_t instructions_ns_per(uint64_t icount) -{ - assert(icount_enabled() == ICOUNT_PRECISE); - return icount_to_ns((int64_t)icount); -} -#endif - -static bool pmuv3p1_events_supported(CPUARMState *env) -{ - /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); -} - -static bool pmuv3p4_events_supported(CPUARMState *env) -{ - /* For events which are supported in any v8.1 PMU */ - return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); -} - -static uint64_t zero_event_get_count(CPUARMState *env) -{ - /* For events which on QEMU never fire, so their count is always zero */ - return 0; -} - -static int64_t zero_event_ns_per(uint64_t cycles) -{ - /* An event which never fires can never overflow */ - return -1; -} - -static const pm_event pm_events[] = { - { .number = 0x000, /* SW_INCR */ - .supported = event_always_supported, - .get_count = swinc_get_count, - .ns_per_count = swinc_ns_per, - }, -#ifndef CONFIG_USER_ONLY - { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ - .supported = instructions_supported, - .get_count = instructions_get_count, - .ns_per_count = instructions_ns_per, - }, - { .number = 0x011, /* CPU_CYCLES, Cycle */ - .supported = event_always_supported, - .get_count = cycles_get_count, - .ns_per_count = cycles_ns_per, - }, -#endif - { .number = 0x023, /* STALL_FRONTEND */ - .supported = pmuv3p1_events_supported, - .get_count = zero_event_get_count, - .ns_per_count = zero_event_ns_per, - }, - { .number = 0x024, /* STALL_BACKEND */ - .supported = pmuv3p1_events_supported, - .get_count = zero_event_get_count, - .ns_per_count = zero_event_ns_per, - }, - { .number = 0x03c, /* STALL */ - .supported = pmuv3p4_events_supported, - .get_count = zero_event_get_count, - .ns_per_count = zero_event_ns_per, - }, -}; - /* - * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of - * events (i.e. the statistical profiling extension), this implementation - * should first be updated to something sparse instead of the current - * supported_event_map[] array. + * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. + * We use these to decide whether we need to wrap a write to MDCR_EL2 + * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. */ -#define MAX_EVENT_ID 0x3c -#define UNSUPPORTED_EVENT UINT16_MAX -static uint16_t supported_event_map[MAX_EVENT_ID + 1]; +#define MDCR_EL2_PMU_ENABLE_BITS \ + (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) +#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) -/* - * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map - * of ARM event numbers to indices in our pm_events array. - * - * Note: Events in the 0x40XX range are not currently supported. - */ -void pmu_init(ARMCPU *cpu) +static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - unsigned int i; - /* - * Empty supported_event_map and cpu->pmceid[01] before adding supported - * events to them + * Note that even though the AArch64 view of this register has bits + * [10:0] all RES0 we can only mask the bottom 5, to comply with the + * architectural requirements for bits which are RES0 only in some + * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 + * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) */ - for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { - supported_event_map[i] = UNSUPPORTED_EVENT; - } - cpu->pmceid0 = 0; - cpu->pmceid1 = 0; - - for (i = 0; i < ARRAY_SIZE(pm_events); i++) { - const pm_event *cnt = &pm_events[i]; - assert(cnt->number <= MAX_EVENT_ID); - /* We do not currently support events in the 0x40xx range */ - assert(cnt->number <= 0x3f); - - if (cnt->supported(&cpu->env)) { - supported_event_map[cnt->number] = i; - uint64_t event_mask = 1ULL << (cnt->number & 0x1f); - if (cnt->number & 0x20) { - cpu->pmceid1 |= event_mask; - } else { - cpu->pmceid0 |= event_mask; - } - } - } + raw_write(env, ri, value & ~0x1FULL); } -/* - * Check at runtime whether a PMU event is supported for the current machine - */ -static bool event_supported(uint16_t number) +static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - if (number > MAX_EVENT_ID) { - return false; - } - return supported_event_map[number] != UNSUPPORTED_EVENT; -} + /* Begin with base v8.0 state. */ + uint64_t valid_mask = 0x3fff; + ARMCPU *cpu = env_archcpu(env); + uint64_t changed; -static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ /* - * Performance monitor registers user accessibility is controlled - * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable - * trapping to EL2 or EL3 for other accesses. + * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always + * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64. + * Instead, choose the format based on the mode of EL3. */ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + if (arm_el_is_aa64(env, 3)) { + value |= SCR_FW | SCR_AW; /* RES1 */ + valid_mask &= ~SCR_NET; /* RES0 */ - if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { - return CP_ACCESS_TRAP; - } - if (el < 2 && (mdcr_el2 & MDCR_TPM)) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { - return CP_ACCESS_TRAP_EL3; + if (!cpu_isar_feature(aa64_aa32_el1, cpu) && + !cpu_isar_feature(aa64_aa32_el2, cpu)) { + value |= SCR_RW; /* RAO/WI */ + } + if (cpu_isar_feature(aa64_ras, cpu)) { + valid_mask |= SCR_TERR; + } + if (cpu_isar_feature(aa64_lor, cpu)) { + valid_mask |= SCR_TLOR; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + valid_mask |= SCR_API | SCR_APK; + } + if (cpu_isar_feature(aa64_sel2, cpu)) { + valid_mask |= SCR_EEL2; + } else if (cpu_isar_feature(aa64_rme, cpu)) { + /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */ + value |= SCR_NS; + } + if (cpu_isar_feature(aa64_mte, cpu)) { + valid_mask |= SCR_ATA; + } + if (cpu_isar_feature(aa64_scxtnum, cpu)) { + valid_mask |= SCR_ENSCXT; + } + if (cpu_isar_feature(aa64_doublefault, cpu)) { + valid_mask |= SCR_EASE | SCR_NMEA; + } + if (cpu_isar_feature(aa64_sme, cpu)) { + valid_mask |= SCR_ENTP2; + } + if (cpu_isar_feature(aa64_hcx, cpu)) { + valid_mask |= SCR_HXEN; + } + if (cpu_isar_feature(aa64_fgt, cpu)) { + valid_mask |= SCR_FGTEN; + } + if (cpu_isar_feature(aa64_rme, cpu)) { + valid_mask |= SCR_NSE | SCR_GPF; + } + if (cpu_isar_feature(aa64_ecv, cpu)) { + valid_mask |= SCR_ECVEN; + } + } else { + valid_mask &= ~(SCR_RW | SCR_ST); + if (cpu_isar_feature(aa32_ras, cpu)) { + valid_mask |= SCR_TERR; + } } - return CP_ACCESS_OK; -} + if (!arm_feature(env, ARM_FEATURE_EL2)) { + valid_mask &= ~SCR_HCE; -static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) -{ - /* ER: event counter read trap control */ - if (arm_feature(env, ARM_FEATURE_V8) - && arm_current_el(env) == 0 - && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 - && isread) { - return CP_ACCESS_OK; + /* + * On ARMv7, SMD (or SCD as it is called in v7) is only + * supported if EL2 exists. The bit is UNK/SBZP when + * EL2 is unavailable. In QEMU ARMv7, we force it to always zero + * when EL2 is unavailable. + * On ARMv8, this bit is always available. + */ + if (arm_feature(env, ARM_FEATURE_V7) && + !arm_feature(env, ARM_FEATURE_V8)) { + valid_mask &= ~SCR_SMD; + } } - return pmreg_access(env, ri, isread); -} + /* Clear all-context RES0 bits. */ + value &= valid_mask; + changed = env->cp15.scr_el3 ^ value; + env->cp15.scr_el3 = value; -static CPAccessResult pmreg_access_swinc(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) -{ - /* SW: software increment write trap control */ - if (arm_feature(env, ARM_FEATURE_V8) - && arm_current_el(env) == 0 - && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 - && !isread) { - return CP_ACCESS_OK; + /* + * If SCR_EL3.{NS,NSE} changes, i.e. change of security state, + * we must invalidate all TLBs below EL3. + */ + if (changed & (SCR_NS | SCR_NSE)) { + tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2)); } - - return pmreg_access(env, ri, isread); } -static CPAccessResult pmreg_access_selr(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - /* ER: event counter read trap control */ - if (arm_feature(env, ARM_FEATURE_V8) - && arm_current_el(env) == 0 - && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { - return CP_ACCESS_OK; - } - - return pmreg_access(env, ri, isread); + /* + * scr_write will set the RES1 bits on an AArch64-only CPU. + * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. + */ + scr_write(env, ri, 0); } -static CPAccessResult pmreg_access_ccntr(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult access_tid4(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - /* CR: cycle counter read trap control */ - if (arm_feature(env, ARM_FEATURE_V8) - && arm_current_el(env) == 0 - && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 - && isread) { - return CP_ACCESS_OK; + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) { + return CP_ACCESS_TRAP_EL2; } - return pmreg_access(env, ri, isread); + return CP_ACCESS_OK; } -/* - * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. - * We use these to decide whether we need to wrap a write to MDCR_EL2 - * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. - */ -#define MDCR_EL2_PMU_ENABLE_BITS \ - (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) -#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) - -/* - * Returns true if the counter (pass 31 for PMCCNTR) should count events using - * the current EL, security state, and register configuration. - */ -static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) +static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - uint64_t filter; - bool e, p, u, nsk, nsu, nsh, m; - bool enabled, prohibited = false, filtered; - bool secure = arm_is_secure(env); - int el = arm_current_el(env); - uint64_t mdcr_el2; - uint8_t hpmn; + ARMCPU *cpu = env_archcpu(env); /* - * We might be called for M-profile cores where MDCR_EL2 doesn't - * exist and arm_mdcr_el2_eff() will assert, so this early-exit check - * must be before we read that value. + * Acquire the CSSELR index from the bank corresponding to the CCSIDR + * bank */ - if (!arm_feature(env, ARM_FEATURE_PMU)) { - return false; - } - - mdcr_el2 = arm_mdcr_el2_eff(env); - hpmn = mdcr_el2 & MDCR_HPMN; - - if (!arm_feature(env, ARM_FEATURE_EL2) || - (counter < hpmn || counter == 31)) { - e = env->cp15.c9_pmcr & PMCRE; - } else { - e = mdcr_el2 & MDCR_HPME; - } - enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); - - /* Is event counting prohibited? */ - if (el == 2 && (counter < hpmn || counter == 31)) { - prohibited = mdcr_el2 & MDCR_HPMD; - } - if (secure) { - prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); - } - - if (counter == 31) { - /* - * The cycle counter defaults to running. PMCR.DP says "disable - * the cycle counter when event counting is prohibited". - * Some MDCR bits disable the cycle counter specifically. - */ - prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; - if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { - if (secure) { - prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); - } - if (el == 2) { - prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); - } - } - } - - if (counter == 31) { - filter = env->cp15.pmccfiltr_el0; - } else { - filter = env->cp15.c14_pmevtyper[counter]; - } - - p = filter & PMXEVTYPER_P; - u = filter & PMXEVTYPER_U; - nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); - nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); - nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); - m = arm_el_is_aa64(env, 1) && - arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); - - if (el == 0) { - filtered = secure ? u : u != nsu; - } else if (el == 1) { - filtered = secure ? p : p != nsk; - } else if (el == 2) { - filtered = !nsh; - } else { /* EL3 */ - filtered = m != p; - } - - if (counter != 31) { - /* - * If not checking PMCCNTR, ensure the counter is setup to an event we - * support - */ - uint16_t event = filter & PMXEVTYPER_EVTCOUNT; - if (!event_supported(event)) { - return false; - } - } - - return enabled && !prohibited && !filtered; -} + uint32_t index = A32_BANKED_REG_GET(env, csselr, + ri->secure & ARM_CP_SECSTATE_S); -static void pmu_update_irq(CPUARMState *env) -{ - ARMCPU *cpu = env_archcpu(env); - qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && - (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); + return cpu->ccsidr[index]; } -static bool pmccntr_clockdiv_enabled(CPUARMState *env) +static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * Return true if the clock divider is enabled and the cycle counter - * is supposed to tick only once every 64 clock cycles. This is - * controlled by PMCR.D, but if PMCR.LC is set to enable the long - * (64-bit) cycle counter PMCR.D has no effect. - */ - return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; + raw_write(env, ri, value & 0xf); } -static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) +static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - /* Return true if the specified event counter is configured to be 64 bit */ - - /* This isn't intended to be used with the cycle counter */ - assert(counter < 31); - - if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { - return false; - } - - if (arm_feature(env, ARM_FEATURE_EL2)) { - /* - * MDCR_EL2.HLP still applies even when EL2 is disabled in the - * current security state, so we don't use arm_mdcr_el2_eff() here. - */ - bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; - int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; + CPUState *cs = env_cpu(env); + bool el1 = arm_current_el(env) == 1; + uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; + uint64_t ret = 0; - if (counter >= hpmn) { - return hlp; + if (hcr_el2 & HCR_IMO) { + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + ret |= CPSR_I; } - } - return env->cp15.c9_pmcr & PMCRLP; -} - -/* - * Ensure c15_ccnt is the guest-visible count so that operations such as - * enabling/disabling the counter or filtering, modifying the count itself, - * etc. can be done logically. This is essentially a no-op if the counter is - * not enabled at the time of the call. - */ -static void pmccntr_op_start(CPUARMState *env) -{ - uint64_t cycles = cycles_get_count(env); - - if (pmu_counter_enabled(env, 31)) { - uint64_t eff_cycles = cycles; - if (pmccntr_clockdiv_enabled(env)) { - eff_cycles /= 64; + if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { + ret |= ISR_IS; + ret |= CPSR_I; } - - uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; - - uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ - 1ull << 63 : 1ull << 31; - if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { - env->cp15.c9_pmovsr |= (1ULL << 31); - pmu_update_irq(env); + } else { + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + ret |= CPSR_I; } - env->cp15.c15_ccnt = new_pmccntr; + if (cs->interrupt_request & CPU_INTERRUPT_NMI) { + ret |= ISR_IS; + ret |= CPSR_I; + } } - env->cp15.c15_ccnt_delta = cycles; -} -/* - * If PMCCNTR is enabled, recalculate the delta between the clock and the - * guest-visible count. A call to pmccntr_op_finish should follow every call to - * pmccntr_op_start. - */ -static void pmccntr_op_finish(CPUARMState *env) -{ - if (pmu_counter_enabled(env, 31)) { -#ifndef CONFIG_USER_ONLY - /* Calculate when the counter will next overflow */ - uint64_t remaining_cycles = -env->cp15.c15_ccnt; - if (!(env->cp15.c9_pmcr & PMCRLC)) { - remaining_cycles = (uint32_t)remaining_cycles; + if (hcr_el2 & HCR_FMO) { + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { + ret |= CPSR_F; } - int64_t overflow_in = cycles_ns_per(remaining_cycles); - - if (overflow_in > 0) { - int64_t overflow_at; - - if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - overflow_in, &overflow_at)) { - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); - } + if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { + ret |= ISR_FS; + ret |= CPSR_F; } -#endif - - uint64_t prev_cycles = env->cp15.c15_ccnt_delta; - if (pmccntr_clockdiv_enabled(env)) { - prev_cycles /= 64; + } else { + if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { + ret |= CPSR_F; } - env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; - } -} - -static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) -{ - - uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; - uint64_t count = 0; - if (event_supported(event)) { - uint16_t event_idx = supported_event_map[event]; - count = pm_events[event_idx].get_count(env); } - if (pmu_counter_enabled(env, counter)) { - uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; - uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? - 1ULL << 63 : 1ULL << 31; - - if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { - env->cp15.c9_pmovsr |= (1 << counter); - pmu_update_irq(env); + if (hcr_el2 & HCR_AMO) { + if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { + ret |= CPSR_A; } - env->cp15.c14_pmevcntr[counter] = new_pmevcntr; } - env->cp15.c14_pmevcntr_delta[counter] = count; -} - -static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) -{ - if (pmu_counter_enabled(env, counter)) { -#ifndef CONFIG_USER_ONLY - uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; - uint16_t event_idx = supported_event_map[event]; - uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); - int64_t overflow_in; - - if (!pmevcntr_is_64_bit(env, counter)) { - delta = (uint32_t)delta; - } - overflow_in = pm_events[event_idx].ns_per_count(delta); - - if (overflow_in > 0) { - int64_t overflow_at; - - if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), - overflow_in, &overflow_at)) { - ARMCPU *cpu = env_archcpu(env); - timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); - } - } -#endif - env->cp15.c14_pmevcntr_delta[counter] -= - env->cp15.c14_pmevcntr[counter]; - } + return ret; } -void pmu_op_start(CPUARMState *env) +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - unsigned int i; - pmccntr_op_start(env); - for (i = 0; i < pmu_num_counters(env); i++) { - pmevcntr_op_start(env, i); + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { + return CP_ACCESS_TRAP_EL2; } + + return CP_ACCESS_OK; } -void pmu_op_finish(CPUARMState *env) +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - unsigned int i; - pmccntr_op_finish(env); - for (i = 0; i < pmu_num_counters(env); i++) { - pmevcntr_op_finish(env, i); + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid1(env, ri, isread); } + + return CP_ACCESS_OK; } -void pmu_pre_el_change(ARMCPU *cpu, void *ignored) -{ - pmu_op_start(&cpu->env); -} +static const ARMCPRegInfo v7_cp_reginfo[] = { + /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ + { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, + .access = PL1_R, + .accessfn = access_tid4, + .fgt = FGT_CCSIDR_EL1, + .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, + { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, + .access = PL1_RW, + .accessfn = access_tid4, + .fgt = FGT_CSSELR_EL1, + .writefn = csselr_write, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), + offsetof(CPUARMState, cp15.csselr_ns) } }, + /* + * Auxiliary ID register: this actually has an IMPDEF value but for now + * just RAZ for all cores: + */ + { .name = "AIDR", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid1, + .fgt = FGT_AIDR_EL1, + .resetvalue = 0 }, + /* + * Auxiliary fault status registers: these also are IMPDEF, and we + * choose to RAZ/WI for all cores. + */ + { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_AFSR0_EL1, + .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_AFSR1_EL1, + .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * MAIR can just read-as-written because we don't implement caches + * and so don't need to care about memory attributes. + */ + { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_MAIR_EL1, + .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, + .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), + .resetvalue = 0 }, + { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), + .resetvalue = 0 }, + /* + * For non-long-descriptor page tables these are PRRR and NMRR; + * regardless they still act as reads-as-written for QEMU. + */ + /* + * MAIR0/1 are defined separately from their 64-bit counterpart which + * allows them to assign the correct fieldoffset based on the endianness + * handled in the field definitions. + */ + { .name = "MAIR0", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), + offsetof(CPUARMState, cp15.mair0_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "MAIR1", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), + offsetof(CPUARMState, cp15.mair1_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, + .fgt = FGT_ISR_EL1, + .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, +}; -void pmu_post_el_change(ARMCPU *cpu, void *ignored) +static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - pmu_op_finish(&cpu->env); + value &= 1; + env->teecr = value; } -void arm_pmu_timer_cb(void *opaque) +static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - ARMCPU *cpu = opaque; - /* - * Update all the counter values based on the current underlying counts, - * triggering interrupts to be raised, if necessary. pmu_op_finish() also - * has the effect of setting the cpu->pmu_timer to the next earliest time a - * counter may expire. + * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE + * at all, so we don't need to check whether we're v8A. */ - pmu_op_start(&cpu->env); - pmu_op_finish(&cpu->env); + if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && + (env->cp15.hstr_el2 & HSTR_TTEE)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; } -static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - pmu_op_start(env); - - if (value & PMCRC) { - /* The counter has been reset */ - env->cp15.c15_ccnt = 0; + if (arm_current_el(env) == 0 && (env->teecr & 1)) { + return CP_ACCESS_TRAP_EL1; } + return teecr_access(env, ri, isread); +} - if (value & PMCRP) { - unsigned int i; - for (i = 0; i < pmu_num_counters(env); i++) { - env->cp15.c14_pmevcntr[i] = 0; - } - } +static const ARMCPRegInfo t2ee_cp_reginfo[] = { + { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), + .resetvalue = 0, + .writefn = teecr_write, .accessfn = teecr_access }, + { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, + .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), + .accessfn = teehbr_access, .resetvalue = 0 }, +}; + +static const ARMCPRegInfo v6k_cp_reginfo[] = { + { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, + .access = PL0_RW, + .fgt = FGT_TPIDR_EL0, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, + { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL0_RW, + .fgt = FGT_TPIDR_EL0, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), + offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, + .access = PL0_R | PL1_W, + .fgt = FGT_TPIDRRO_EL0, + .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), + .resetvalue = 0}, + { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, + .access = PL0_R | PL1_W, + .fgt = FGT_TPIDRRO_EL0, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), + offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, + .resetfn = arm_cp_reset_ignore }, + { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, + .access = PL1_RW, + .fgt = FGT_TPIDR_EL1, + .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, + { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, + .access = PL1_RW, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), + offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, + .resetvalue = 0 }, +}; - env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; - env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); +static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) +{ + ARMCPU *cpu = env_archcpu(env); - pmu_op_finish(env); + cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; } -static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - uint64_t pmcr = env->cp15.c9_pmcr; +#ifndef CONFIG_USER_ONLY +static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ /* - * If EL2 is implemented and enabled for the current security state, reads - * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. + * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. + * Writable only at the highest implemented exception level. */ - if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { - pmcr &= ~PMCRN_MASK; - pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; + int el = arm_current_el(env); + uint64_t hcr; + uint32_t cntkctl; + + switch (el) { + case 0: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + cntkctl = env->cp15.cnthctl_el2; + } else { + cntkctl = env->cp15.c14_cntkctl; + } + if (!extract32(cntkctl, 0, 2)) { + return CP_ACCESS_TRAP_EL1; + } + break; + case 1: + if (!isread && ri->state == ARM_CP_STATE_AA32 && + arm_is_secure_below_el3(env)) { + /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ + return CP_ACCESS_UNDEFINED; + } + break; + case 2: + case 3: + break; + } + + if (!isread && el < arm_highest_el(env)) { + return CP_ACCESS_UNDEFINED; } - return pmcr; + return CP_ACCESS_OK; } -static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, + bool isread) { - unsigned int i; - uint64_t overflow_mask, new_pmswinc; - - for (i = 0; i < pmu_num_counters(env); i++) { - /* Increment a counter's count iff: */ - if ((value & (1 << i)) && /* counter's bit is set */ - /* counter is enabled and not filtered */ - pmu_counter_enabled(env, i) && - /* counter is SW_INCR */ - (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { - pmevcntr_op_start(env, i); - - /* - * Detect if this write causes an overflow since we can't predict - * PMSWINC overflows like we can for other events - */ - new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; + unsigned int cur_el = arm_current_el(env); + bool has_el2 = arm_is_el2_enabled(env); + uint64_t hcr = arm_hcr_el2_eff(env); - overflow_mask = pmevcntr_is_64_bit(env, i) ? - 1ULL << 63 : 1ULL << 31; + switch (cur_el) { + case 0: + /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + return (extract32(env->cp15.cnthctl_el2, timeridx, 1) + ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); + } - if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { - env->cp15.c9_pmovsr |= (1 << i); - pmu_update_irq(env); + /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ + if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { + return CP_ACCESS_TRAP_EL1; + } + /* fall through */ + case 1: + /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ + if (has_el2 && timeridx == GTIMER_PHYS && + (hcr & HCR_E2H + ? !extract32(env->cp15.cnthctl_el2, 10, 1) + : !extract32(env->cp15.cnthctl_el2, 0, 1))) { + return CP_ACCESS_TRAP_EL2; + } + if (has_el2 && timeridx == GTIMER_VIRT) { + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { + return CP_ACCESS_TRAP_EL2; } - - env->cp15.c14_pmevcntr[i] = new_pmswinc; - - pmevcntr_op_finish(env, i); } + break; } + return CP_ACCESS_OK; } -static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - uint64_t ret; - pmccntr_op_start(env); - ret = env->cp15.c15_ccnt; - pmccntr_op_finish(env); - return ret; -} - -static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, + bool isread) { - /* - * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and - * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the - * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are - * accessed. - */ - env->cp15.c9_pmselr = value & 0x1f; -} + unsigned int cur_el = arm_current_el(env); + bool has_el2 = arm_is_el2_enabled(env); + uint64_t hcr = arm_hcr_el2_eff(env); -static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - pmccntr_op_start(env); - env->cp15.c15_ccnt = value; - pmccntr_op_finish(env); -} + switch (cur_el) { + case 0: + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ + return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) + ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); + } -static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - uint64_t cur_val = pmccntr_read(env, NULL); - - pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); -} - -static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - pmccntr_op_start(env); - env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; - pmccntr_op_finish(env); -} - -static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - pmccntr_op_start(env); - /* M is not accessible from AArch32 */ - env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | - (value & PMCCFILTR); - pmccntr_op_finish(env); -} + /* + * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from + * EL0 if EL0[PV]TEN is zero. + */ + if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { + return CP_ACCESS_TRAP_EL1; + } + /* fall through */ -static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* M is not visible in AArch32 */ - return env->cp15.pmccfiltr_el0 & PMCCFILTR; + case 1: + if (has_el2 && timeridx == GTIMER_PHYS) { + if (hcr & HCR_E2H) { + /* If HCR_EL2. == '10': check CNTHCTL_EL2.EL1PTEN. */ + if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } else { + /* If HCR_EL2. == 0: check CNTHCTL_EL2.EL1PCEN. */ + if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { + return CP_ACCESS_TRAP_EL2; + } + } + } + if (has_el2 && timeridx == GTIMER_VIRT) { + if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { + return CP_ACCESS_TRAP_EL2; + } + } + break; + } + return CP_ACCESS_OK; } -static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_pct_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - pmu_op_start(env); - value &= pmu_counter_mask(env); - env->cp15.c9_pmcnten |= value; - pmu_op_finish(env); + return gt_counter_access(env, GTIMER_PHYS, isread); } -static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_vct_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - pmu_op_start(env); - value &= pmu_counter_mask(env); - env->cp15.c9_pmcnten &= ~value; - pmu_op_finish(env); + return gt_counter_access(env, GTIMER_VIRT, isread); } -static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - value &= pmu_counter_mask(env); - env->cp15.c9_pmovsr &= ~value; - pmu_update_irq(env); + return gt_timer_access(env, GTIMER_PHYS, isread); } -static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - value &= pmu_counter_mask(env); - env->cp15.c9_pmovsr |= value; - pmu_update_irq(env); + return gt_timer_access(env, GTIMER_VIRT, isread); } -static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value, const uint8_t counter) +static CPAccessResult gt_stimer_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - if (counter == 31) { - pmccfiltr_write(env, ri, value); - } else if (counter < pmu_num_counters(env)) { - pmevcntr_op_start(env, counter); - - /* - * If this counter's event type is changing, store the current - * underlying count for the new type in c14_pmevcntr_delta[counter] so - * pmevcntr_op_finish has the correct baseline when it converts back to - * a delta. - */ - uint16_t old_event = env->cp15.c14_pmevtyper[counter] & - PMXEVTYPER_EVTCOUNT; - uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; - if (old_event != new_event) { - uint64_t count = 0; - if (event_supported(new_event)) { - uint16_t event_idx = supported_event_map[new_event]; - count = pm_events[event_idx].get_count(env); - } - env->cp15.c14_pmevcntr_delta[counter] = count; - } - - env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; - pmevcntr_op_finish(env, counter); - } /* - * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when - * PMSELR value is equal to or greater than the number of implemented - * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. + * The AArch64 register view of the secure physical timer is + * always accessible from EL3, and configurably accessible from + * Secure EL1. */ + switch (arm_current_el(env)) { + case 1: + if (!arm_is_secure(env)) { + return CP_ACCESS_UNDEFINED; + } + if (arm_is_el2_enabled(env)) { + return CP_ACCESS_UNDEFINED; + } + if (!(env->cp15.scr_el3 & SCR_ST)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; + case 0: + case 2: + return CP_ACCESS_UNDEFINED; + case 3: + return CP_ACCESS_OK; + default: + g_assert_not_reached(); + } } -static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, - const uint8_t counter) +static CPAccessResult gt_sel2timer_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - if (counter == 31) { - return env->cp15.pmccfiltr_el0; - } else if (counter < pmu_num_counters(env)) { - return env->cp15.c14_pmevtyper[counter]; - } else { - /* - * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER - * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). - */ - return 0; + /* + * The AArch64 register view of the secure EL2 timers are mostly + * accessible from EL3 and EL2 although can also be trapped to EL2 + * from EL1 depending on nested virt config. + */ + switch (arm_current_el(env)) { + case 0: /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + case 1: + if (!arm_is_secure(env)) { + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + } else if (arm_hcr_el2_eff(env) & HCR_NV) { + /* Aarch64.SystemAccessTrap(EL2, 0x18) */ + return CP_ACCESS_TRAP_EL2; + } + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + case 2: + if (!arm_is_secure(env)) { + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + } + return CP_ACCESS_OK; + case 3: + if (env->cp15.scr_el3 & SCR_EEL2) { + return CP_ACCESS_OK; + } else { + return CP_ACCESS_UNDEFINED; + } + default: + g_assert_not_reached(); } } -static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +uint64_t gt_get_countervalue(CPUARMState *env) { - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - pmevtyper_write(env, ri, value, counter); + ARMCPU *cpu = env_archcpu(env); + + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); } -static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_update_irq(ARMCPU *cpu, int timeridx) { - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - env->cp15.c14_pmevtyper[counter] = value; + CPUARMState *env = &cpu->env; + uint64_t cnthctl = env->cp15.cnthctl_el2; + ARMSecuritySpace ss = arm_security_space(env); + /* ISTATUS && !IMASK */ + int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; /* - * pmevtyper_rawwrite is called between a pair of pmu_op_start and - * pmu_op_finish calls when loading saved state for a migration. Because - * we're potentially updating the type of event here, the value written to - * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a - * different counter type. Therefore, we need to set this value to the - * current count for the counter type we're writing so that pmu_op_finish - * has the correct count for its calculation. + * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK. + * It is RES0 in Secure and NonSecure state. */ - uint16_t event = value & PMXEVTYPER_EVTCOUNT; - if (event_supported(event)) { - uint16_t event_idx = supported_event_map[event]; - env->cp15.c14_pmevcntr_delta[counter] = - pm_events[event_idx].get_count(env); + if ((ss == ARMSS_Root || ss == ARMSS_Realm) && + ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) || + (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) { + irqstate = 0; } -} -static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) -{ - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - return pmevtyper_read(env, ri, counter); + qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); + trace_arm_gt_update_irq(timeridx, irqstate); } -static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void gt_rme_post_el_change(ARMCPU *cpu, void *ignored) { - pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); + /* + * Changing security state between Root and Secure/NonSecure, which may + * happen when switching EL, can change the effective value of CNTHCTL_EL2 + * mask bits. Update the IRQ state accordingly. + */ + gt_update_irq(cpu, GTIMER_VIRT); + gt_update_irq(cpu, GTIMER_PHYS); } -static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) +static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env) { - return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); + if ((env->cp15.scr_el3 & SCR_ECVEN) && + FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && + arm_is_el2_enabled(env) && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + return env->cp15.cntpoff_el2; + } + return 0; } -static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value, uint8_t counter) +static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx) { - if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { - /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ - value &= MAKE_64BIT_MASK(0, 32); - } - if (counter < pmu_num_counters(env)) { - pmevcntr_op_start(env, counter); - env->cp15.c14_pmevcntr[counter] = value; - pmevcntr_op_finish(env, counter); - } /* - * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR - * are CONSTRAINED UNPREDICTABLE. + * Return the timer offset to use for indirect accesses to the timer. + * This is the Offset value as defined in D12.2.4.1 "Operation of the + * CompareValue views of the timers". + * + * The condition here is not always the same as the condition for + * whether to apply an offset register when doing a direct read of + * the counter sysreg; those conditions are described in the + * access pseudocode for each counter register. */ + switch (timeridx) { + case GTIMER_PHYS: + return gt_phys_raw_cnt_offset(env); + case GTIMER_VIRT: + return env->cp15.cntvoff_el2; + case GTIMER_HYP: + case GTIMER_SEC: + case GTIMER_HYPVIRT: + case GTIMER_S_EL2_PHYS: + case GTIMER_S_EL2_VIRT: + return 0; + default: + g_assert_not_reached(); + } } -static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, - uint8_t counter) +uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx) { - if (counter < pmu_num_counters(env)) { - uint64_t ret; - pmevcntr_op_start(env, counter); - ret = env->cp15.c14_pmevcntr[counter]; - pmevcntr_op_finish(env, counter); - if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { - /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ - ret &= MAKE_64BIT_MASK(0, 32); + /* + * Return the timer offset to use for direct accesses to the + * counter registers CNTPCT and CNTVCT, and for direct accesses + * to the CNT*_TVAL registers. + * + * This isn't exactly the same as the indirect-access offset, + * because here we also care about what EL the register access + * is being made from. + * + * This corresponds to the access pseudocode for the registers. + */ + uint64_t hcr; + + switch (timeridx) { + case GTIMER_PHYS: + if (arm_current_el(env) >= 2) { + return 0; } - return ret; - } else { - /* - * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR - * are CONSTRAINED UNPREDICTABLE. - */ + return gt_phys_raw_cnt_offset(env); + case GTIMER_VIRT: + switch (arm_current_el(env)) { + case 2: + hcr = arm_hcr_el2_eff(env); + if (hcr & HCR_E2H) { + return 0; + } + break; + case 0: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + return 0; + } + break; + } + return env->cp15.cntvoff_el2; + case GTIMER_HYP: + case GTIMER_SEC: + case GTIMER_HYPVIRT: + case GTIMER_S_EL2_PHYS: + case GTIMER_S_EL2_VIRT: return 0; + default: + g_assert_not_reached(); } } -static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_recalc_timer(ARMCPU *cpu, int timeridx) { - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - pmevcntr_write(env, ri, value, counter); -} + ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; -static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) -{ - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - return pmevcntr_read(env, ri, counter); -} + if (gt->ctl & 1) { + /* + * Timer enabled: calculate and set current ISTATUS, irq, and + * reset timer to when ISTATUS next has to change + */ + uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx); + uint64_t count = gt_get_countervalue(&cpu->env); + /* Note that this must be unsigned 64 bit arithmetic: */ + int istatus = count - offset >= gt->cval; + uint64_t nexttick; -static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - assert(counter < pmu_num_counters(env)); - env->cp15.c14_pmevcntr[counter] = value; - pmevcntr_write(env, ri, value, counter); + gt->ctl = deposit32(gt->ctl, 2, 1, istatus); + + if (istatus) { + /* + * Next transition is when (count - offset) rolls back over to 0. + * If offset > count then this is when count == offset; + * if offset <= count then this is when count == offset + 2^64 + * For the latter case we set nexttick to an "as far in future + * as possible" value and let the code below handle it. + */ + if (offset > count) { + nexttick = offset; + } else { + nexttick = UINT64_MAX; + } + } else { + /* + * Next transition is when (count - offset) == cval, i.e. + * when count == (cval + offset). + * If that would overflow, then again we set up the next interrupt + * for "as far in the future as possible" for the code below. + */ + if (uadd64_overflow(gt->cval, offset, &nexttick)) { + nexttick = UINT64_MAX; + } + } + /* + * Note that the desired next expiry time might be beyond the + * signed-64-bit range of a QEMUTimer -- in this case we just + * set the timer for as far in the future as possible. When the + * timer expires we will reset the timer for any remaining period. + */ + if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { + timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); + } else { + timer_mod(cpu->gt_timer[timeridx], nexttick); + } + trace_arm_gt_recalc(timeridx, nexttick); + } else { + /* Timer disabled: ISTATUS and timer output always clear */ + gt->ctl &= ~4; + timer_del(cpu->gt_timer[timeridx]); + trace_arm_gt_recalc_disabled(timeridx); + } + gt_update_irq(cpu, timeridx); } -static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx) { - uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); - assert(counter < pmu_num_counters(env)); - return env->cp15.c14_pmevcntr[counter]; + ARMCPU *cpu = env_archcpu(env); + + timer_del(cpu->gt_timer[timeridx]); } -static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { - pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); + uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS); + return gt_get_countervalue(env) - offset; } -static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); + uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT); + return gt_get_countervalue(env) - offset; } -static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) { - if (arm_feature(env, ARM_FEATURE_V8)) { - env->cp15.c9_pmuserenr = value & 0xf; - } else { - env->cp15.c9_pmuserenr = value & 1; - } + trace_arm_gt_cval_write(timeridx, value); + env->cp15.c14_timer[timeridx].cval = value; + gt_recalc_timer(env_archcpu(env), timeridx); } -static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset) { - /* We have no event counters so only the C bit can be changed */ - value &= pmu_counter_mask(env); - env->cp15.c9_pminten |= value; - pmu_update_irq(env); + return (uint32_t)(env->cp15.c14_timer[timeridx].cval - + (gt_get_countervalue(env) - offset)); } -static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx) { - value &= pmu_counter_mask(env); - env->cp15.c9_pminten &= ~value; - pmu_update_irq(env); + uint64_t offset = gt_direct_access_timer_offset(env, timeridx); + + return do_tval_read(env, timeridx, offset); } -static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value, + uint64_t offset) { - /* - * Note that even though the AArch64 view of this register has bits - * [10:0] all RES0 we can only mask the bottom 5, to comply with the - * architectural requirements for bits which are RES0 only in some - * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 - * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) - */ - raw_write(env, ri, value & ~0x1FULL); + trace_arm_gt_tval_write(timeridx, value); + env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + + sextract64(value, 0, 32); + gt_recalc_timer(env_archcpu(env), timeridx); } -static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) { - /* Begin with base v8.0 state. */ - uint64_t valid_mask = 0x3fff; - ARMCPU *cpu = env_archcpu(env); - uint64_t changed; - - /* - * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always - * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64. - * Instead, choose the format based on the mode of EL3. - */ - if (arm_el_is_aa64(env, 3)) { - value |= SCR_FW | SCR_AW; /* RES1 */ - valid_mask &= ~SCR_NET; /* RES0 */ + uint64_t offset = gt_direct_access_timer_offset(env, timeridx); - if (!cpu_isar_feature(aa64_aa32_el1, cpu) && - !cpu_isar_feature(aa64_aa32_el2, cpu)) { - value |= SCR_RW; /* RAO/WI */ - } - if (cpu_isar_feature(aa64_ras, cpu)) { - valid_mask |= SCR_TERR; - } - if (cpu_isar_feature(aa64_lor, cpu)) { - valid_mask |= SCR_TLOR; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - valid_mask |= SCR_API | SCR_APK; - } - if (cpu_isar_feature(aa64_sel2, cpu)) { - valid_mask |= SCR_EEL2; - } else if (cpu_isar_feature(aa64_rme, cpu)) { - /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */ - value |= SCR_NS; - } - if (cpu_isar_feature(aa64_mte, cpu)) { - valid_mask |= SCR_ATA; - } - if (cpu_isar_feature(aa64_scxtnum, cpu)) { - valid_mask |= SCR_ENSCXT; - } - if (cpu_isar_feature(aa64_doublefault, cpu)) { - valid_mask |= SCR_EASE | SCR_NMEA; - } - if (cpu_isar_feature(aa64_sme, cpu)) { - valid_mask |= SCR_ENTP2; - } - if (cpu_isar_feature(aa64_hcx, cpu)) { - valid_mask |= SCR_HXEN; - } - if (cpu_isar_feature(aa64_fgt, cpu)) { - valid_mask |= SCR_FGTEN; - } - if (cpu_isar_feature(aa64_rme, cpu)) { - valid_mask |= SCR_NSE | SCR_GPF; - } - if (cpu_isar_feature(aa64_ecv, cpu)) { - valid_mask |= SCR_ECVEN; - } - } else { - valid_mask &= ~(SCR_RW | SCR_ST); - if (cpu_isar_feature(aa32_ras, cpu)) { - valid_mask |= SCR_TERR; - } - } + do_tval_write(env, timeridx, value, offset); +} - if (!arm_feature(env, ARM_FEATURE_EL2)) { - valid_mask &= ~SCR_HCE; +static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; + trace_arm_gt_ctl_write(timeridx, value); + env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); + if ((oldval ^ value) & 1) { + /* Enable toggled */ + gt_recalc_timer(cpu, timeridx); + } else if ((oldval ^ value) & 2) { /* - * On ARMv7, SMD (or SCD as it is called in v7) is only - * supported if EL2 exists. The bit is UNK/SBZP when - * EL2 is unavailable. In QEMU ARMv7, we force it to always zero - * when EL2 is unavailable. - * On ARMv8, this bit is always available. + * IMASK toggled: don't need to recalculate, + * just set the interrupt line based on ISTATUS */ - if (arm_feature(env, ARM_FEATURE_V7) && - !arm_feature(env, ARM_FEATURE_V8)) { - valid_mask &= ~SCR_SMD; - } - } - - /* Clear all-context RES0 bits. */ - value &= valid_mask; - changed = env->cp15.scr_el3 ^ value; - env->cp15.scr_el3 = value; - - /* - * If SCR_EL3.{NS,NSE} changes, i.e. change of security state, - * we must invalidate all TLBs below EL3. - */ - if (changed & (SCR_NS | SCR_NSE)) { - tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | - ARMMMUIdxBit_E20_0 | - ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2)); + trace_arm_gt_imask_toggle(timeridx); + gt_update_irq(cpu, timeridx); } } -static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - /* - * scr_write will set the RES1 bits on an AArch64-only CPU. - * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. - */ - scr_write(env, ri, 0); + gt_timer_reset(env, ri, GTIMER_PHYS); } -static CPAccessResult access_tid4(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - if (arm_current_el(env) == 1 && - (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) { - return CP_ACCESS_TRAP_EL2; - } - - return CP_ACCESS_OK; + gt_cval_write(env, ri, GTIMER_PHYS, value); } -static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) +static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - ARMCPU *cpu = env_archcpu(env); - - /* - * Acquire the CSSELR index from the bank corresponding to the CCSIDR - * bank - */ - uint32_t index = A32_BANKED_REG_GET(env, csselr, - ri->secure & ARM_CP_SECSTATE_S); - - return cpu->ccsidr[index]; + return gt_tval_read(env, ri, GTIMER_PHYS); } -static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - raw_write(env, ri, value & 0xf); + gt_tval_write(env, ri, GTIMER_PHYS, value); } -static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - CPUState *cs = env_cpu(env); - bool el1 = arm_current_el(env) == 1; - uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; - uint64_t ret = 0; - - if (hcr_el2 & HCR_IMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { - ret |= CPSR_I; - } - if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { - ret |= ISR_IS; - ret |= CPSR_I; - } - } else { - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { - ret |= CPSR_I; - } - - if (cs->interrupt_request & CPU_INTERRUPT_NMI) { - ret |= ISR_IS; - ret |= CPSR_I; - } - } - - if (hcr_el2 & HCR_FMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { - ret |= CPSR_F; - } - if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { - ret |= ISR_FS; - ret |= CPSR_F; - } - } else { - if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { - ret |= CPSR_F; - } - } + gt_ctl_write(env, ri, GTIMER_PHYS, value); +} - if (hcr_el2 & HCR_AMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { - ret |= CPSR_A; - } +static int gt_phys_redir_timeridx(CPUARMState *env) +{ + switch (arm_mmu_idx(env)) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return GTIMER_HYP; + default: + return GTIMER_PHYS; } - - return ret; } -static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static int gt_virt_redir_timeridx(CPUARMState *env) { - if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { - return CP_ACCESS_TRAP_EL2; + switch (arm_mmu_idx(env)) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return GTIMER_HYPVIRT; + default: + return GTIMER_VIRT; } - - return CP_ACCESS_OK; } -static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static uint64_t gt_phys_redir_cval_read(CPUARMState *env, + const ARMCPRegInfo *ri) { - if (arm_feature(env, ARM_FEATURE_V8)) { - return access_aa64_tid1(env, ri, isread); - } - - return CP_ACCESS_OK; + int timeridx = gt_phys_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].cval; } -static const ARMCPRegInfo v7_cp_reginfo[] = { - /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ - { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, - .access = PL1_W, .type = ARM_CP_NOP }, - /* - * Performance monitors are implementation defined in v7, - * but with an ARM recommended set of registers, which we - * follow. - * - * Performance registers fall into three categories: - * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) - * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) - * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) - * For the cases controlled by PMUSERENR we must set .access to PL0_RW - * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. - */ - { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), - .writefn = pmcntenset_write, - .accessfn = pmreg_access, - .fgt = FGT_PMCNTEN, - .raw_writefn = raw_write }, - { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMCNTEN, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, - .writefn = pmcntenset_write, .raw_writefn = raw_write }, - { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, - .access = PL0_RW, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), - .accessfn = pmreg_access, - .fgt = FGT_PMCNTEN, - .writefn = pmcntenclr_write, - .type = ARM_CP_ALIAS | ARM_CP_IO }, - { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMCNTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), - .writefn = pmcntenclr_write }, - { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, - .access = PL0_RW, .type = ARM_CP_IO, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), - .accessfn = pmreg_access, - .fgt = FGT_PMOVS, - .writefn = pmovsr_write, - .raw_writefn = raw_write }, - { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMOVS, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), - .writefn = pmovsr_write, - .raw_writefn = raw_write }, - { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, - .access = PL0_W, .accessfn = pmreg_access_swinc, - .fgt = FGT_PMSWINC_EL0, - .type = ARM_CP_NO_RAW | ARM_CP_IO, - .writefn = pmswinc_write }, - { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, - .access = PL0_W, .accessfn = pmreg_access_swinc, - .fgt = FGT_PMSWINC_EL0, - .type = ARM_CP_NO_RAW | ARM_CP_IO, - .writefn = pmswinc_write }, - { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, - .access = PL0_RW, .type = ARM_CP_ALIAS, - .fgt = FGT_PMSELR_EL0, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), - .accessfn = pmreg_access_selr, .writefn = pmselr_write, - .raw_writefn = raw_write}, - { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, - .access = PL0_RW, .accessfn = pmreg_access_selr, - .fgt = FGT_PMSELR_EL0, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), - .writefn = pmselr_write, .raw_writefn = raw_write, }, - { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, - .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, - .fgt = FGT_PMCCNTR_EL0, - .readfn = pmccntr_read, .writefn = pmccntr_write32, - .accessfn = pmreg_access_ccntr }, - { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, - .access = PL0_RW, .accessfn = pmreg_access_ccntr, - .fgt = FGT_PMCCNTR_EL0, - .type = ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), - .readfn = pmccntr_read, .writefn = pmccntr_write, - .raw_readfn = raw_read, .raw_writefn = raw_write, }, - { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, - .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMCCFILTR_EL0, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .resetvalue = 0, }, - { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, - .writefn = pmccfiltr_write, .raw_writefn = raw_write, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMCCFILTR_EL0, - .type = ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), - .resetvalue = 0, }, - { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, - .accessfn = pmreg_access, - .fgt = FGT_PMEVTYPERN_EL0, - .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, - { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, - .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, - .accessfn = pmreg_access, - .fgt = FGT_PMEVTYPERN_EL0, - .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, - { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, - .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, - .accessfn = pmreg_access_xevcntr, - .fgt = FGT_PMEVCNTRN_EL0, - .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, - { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, - .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, - .accessfn = pmreg_access_xevcntr, - .fgt = FGT_PMEVCNTRN_EL0, - .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, - { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, - .access = PL0_R | PL1_RW, .accessfn = access_tpm, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), - .resetvalue = 0, - .writefn = pmuserenr_write, .raw_writefn = raw_write }, - { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, - .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), - .resetvalue = 0, - .writefn = pmuserenr_write, .raw_writefn = raw_write }, - { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tpm, - .fgt = FGT_PMINTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), - .resetvalue = 0, - .writefn = pmintenset_write, .raw_writefn = raw_write }, - { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tpm, - .fgt = FGT_PMINTEN, - .type = ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), - .writefn = pmintenset_write, .raw_writefn = raw_write, - .resetvalue = 0x0 }, - { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tpm, - .fgt = FGT_PMINTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), - .writefn = pmintenclr_write, }, - { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tpm, - .fgt = FGT_PMINTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), - .writefn = pmintenclr_write }, - { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, - .access = PL1_R, - .accessfn = access_tid4, - .fgt = FGT_CCSIDR_EL1, - .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, - { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, - .access = PL1_RW, - .accessfn = access_tid4, - .fgt = FGT_CSSELR_EL1, - .writefn = csselr_write, .resetvalue = 0, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), - offsetof(CPUARMState, cp15.csselr_ns) } }, - /* - * Auxiliary ID register: this actually has an IMPDEF value but for now - * just RAZ for all cores: - */ - { .name = "AIDR", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, - .access = PL1_R, .type = ARM_CP_CONST, - .accessfn = access_aa64_tid1, - .fgt = FGT_AIDR_EL1, - .resetvalue = 0 }, - /* - * Auxiliary fault status registers: these also are IMPDEF, and we - * choose to RAZ/WI for all cores. - */ - { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_AFSR0_EL1, - .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_AFSR1_EL1, - .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* - * MAIR can just read-as-written because we don't implement caches - * and so don't need to care about memory attributes. - */ - { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_MAIR_EL1, - .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, - .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), - .resetvalue = 0 }, - { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, - .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), - .resetvalue = 0 }, - /* - * For non-long-descriptor page tables these are PRRR and NMRR; - * regardless they still act as reads-as-written for QEMU. - */ - /* - * MAIR0/1 are defined separately from their 64-bit counterpart which - * allows them to assign the correct fieldoffset based on the endianness - * handled in the field definitions. - */ - { .name = "MAIR0", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), - offsetof(CPUARMState, cp15.mair0_ns) }, - .resetfn = arm_cp_reset_ignore }, - { .name = "MAIR1", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), - offsetof(CPUARMState, cp15.mair1_ns) }, - .resetfn = arm_cp_reset_ignore }, - { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, - .fgt = FGT_ISR_EL1, - .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, - /* 32 bit ITLB invalidates */ - { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - /* 32 bit DTLB invalidates */ - { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - /* 32 bit TLB invalidates */ - { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimvaa_write }, -}; - -static const ARMCPRegInfo v7mp_cp_reginfo[] = { - /* 32 bit TLB invalidates, Inner Shareable */ - { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbiall_is_write }, - { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimva_is_write }, - { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbiasid_is_write }, - { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimvaa_is_write }, -}; - -static const ARMCPRegInfo pmovsset_cp_reginfo[] = { - /* PMOVSSET is not implemented in v7 before v7ve */ - { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMOVS, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), - .writefn = pmovsset_write, - .raw_writefn = raw_write }, - { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMOVS, - .type = ARM_CP_ALIAS | ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), - .writefn = pmovsset_write, - .raw_writefn = raw_write }, -}; - -static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - value &= 1; - env->teecr = value; + int timeridx = gt_phys_redir_timeridx(env); + gt_cval_write(env, ri, timeridx, value); } -static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static uint64_t gt_phys_redir_tval_read(CPUARMState *env, + const ARMCPRegInfo *ri) { - /* - * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE - * at all, so we don't need to check whether we're v8A. - */ - if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && - (env->cp15.hstr_el2 & HSTR_TTEE)) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; + int timeridx = gt_phys_redir_timeridx(env); + return gt_tval_read(env, ri, timeridx); } -static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - if (arm_current_el(env) == 0 && (env->teecr & 1)) { - return CP_ACCESS_TRAP; - } - return teecr_access(env, ri, isread); + int timeridx = gt_phys_redir_timeridx(env); + gt_tval_write(env, ri, timeridx, value); } -static const ARMCPRegInfo t2ee_cp_reginfo[] = { - { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, - .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), - .resetvalue = 0, - .writefn = teecr_write, .accessfn = teecr_access }, - { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, - .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), - .accessfn = teehbr_access, .resetvalue = 0 }, -}; - -static const ARMCPRegInfo v6k_cp_reginfo[] = { - { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, - .access = PL0_RW, - .fgt = FGT_TPIDR_EL0, - .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, - { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL0_RW, - .fgt = FGT_TPIDR_EL0, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), - offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, - .resetfn = arm_cp_reset_ignore }, - { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, - .access = PL0_R | PL1_W, - .fgt = FGT_TPIDRRO_EL0, - .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), - .resetvalue = 0}, - { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, - .access = PL0_R | PL1_W, - .fgt = FGT_TPIDRRO_EL0, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), - offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, - .resetfn = arm_cp_reset_ignore }, - { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, - .access = PL1_RW, - .fgt = FGT_TPIDR_EL1, - .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, - { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, - .access = PL1_RW, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), - offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, - .resetvalue = 0 }, -}; +static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + int timeridx = gt_phys_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].ctl; +} -static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) +static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - ARMCPU *cpu = env_archcpu(env); + int timeridx = gt_phys_redir_timeridx(env); + gt_ctl_write(env, ri, timeridx, value); +} - cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; +static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_VIRT); } -#ifndef CONFIG_USER_ONLY +static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_VIRT, value); +} -static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { /* - * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. - * Writable only at the highest implemented exception level. + * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0 + * we always apply CNTVOFF_EL2. Special case that here rather + * than going into the generic gt_tval_read() and then having + * to re-detect that it's this register. + * Note that the accessfn/perms mean we know we're at EL2 or EL3 here. */ - int el = arm_current_el(env); - uint64_t hcr; - uint32_t cntkctl; - - switch (el) { - case 0: - hcr = arm_hcr_el2_eff(env); - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - cntkctl = env->cp15.cnthctl_el2; - } else { - cntkctl = env->cp15.c14_cntkctl; - } - if (!extract32(cntkctl, 0, 2)) { - return CP_ACCESS_TRAP; - } - break; - case 1: - if (!isread && ri->state == ARM_CP_STATE_AA32 && - arm_is_secure_below_el3(env)) { - /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ - return CP_ACCESS_TRAP_UNCATEGORIZED; - } - break; - case 2: - case 3: - break; - } - - if (!isread && el < arm_highest_el(env)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; - } - - return CP_ACCESS_OK; + return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2); } -static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, - bool isread) +static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - unsigned int cur_el = arm_current_el(env); - bool has_el2 = arm_is_el2_enabled(env); - uint64_t hcr = arm_hcr_el2_eff(env); - - switch (cur_el) { - case 0: - /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - return (extract32(env->cp15.cnthctl_el2, timeridx, 1) - ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); - } + /* Similarly for writes to CNTV_TVAL_EL02 */ + do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2); +} - /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ - if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { - return CP_ACCESS_TRAP; - } - /* fall through */ - case 1: - /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ - if (has_el2 && timeridx == GTIMER_PHYS && - (hcr & HCR_E2H - ? !extract32(env->cp15.cnthctl_el2, 10, 1) - : !extract32(env->cp15.cnthctl_el2, 0, 1))) { - return CP_ACCESS_TRAP_EL2; - } - if (has_el2 && timeridx == GTIMER_VIRT) { - if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { - return CP_ACCESS_TRAP_EL2; - } - } - break; - } - return CP_ACCESS_OK; +static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_VIRT, value); } -static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, - bool isread) +static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - unsigned int cur_el = arm_current_el(env); - bool has_el2 = arm_is_el2_enabled(env); - uint64_t hcr = arm_hcr_el2_eff(env); + ARMCPU *cpu = env_archcpu(env); + uint32_t oldval = env->cp15.cnthctl_el2; + uint32_t valid_mask = + R_CNTHCTL_EL0PCTEN_E2H1_MASK | + R_CNTHCTL_EL0VCTEN_E2H1_MASK | + R_CNTHCTL_EVNTEN_MASK | + R_CNTHCTL_EVNTDIR_MASK | + R_CNTHCTL_EVNTI_MASK | + R_CNTHCTL_EL0VTEN_MASK | + R_CNTHCTL_EL0PTEN_MASK | + R_CNTHCTL_EL1PCTEN_E2H1_MASK | + R_CNTHCTL_EL1PTEN_MASK; - switch (cur_el) { - case 0: - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - /* If HCR_EL2. == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ - return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) - ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); - } + if (cpu_isar_feature(aa64_rme, cpu)) { + valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK; + } + if (cpu_isar_feature(aa64_ecv_traps, cpu)) { + valid_mask |= + R_CNTHCTL_EL1TVT_MASK | + R_CNTHCTL_EL1TVCT_MASK | + R_CNTHCTL_EL1NVPCT_MASK | + R_CNTHCTL_EL1NVVCT_MASK | + R_CNTHCTL_EVNTIS_MASK; + } + if (cpu_isar_feature(aa64_ecv, cpu)) { + valid_mask |= R_CNTHCTL_ECV_MASK; + } - /* - * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from - * EL0 if EL0[PV]TEN is zero. - */ - if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { - return CP_ACCESS_TRAP; - } - /* fall through */ + /* Clear RES0 bits */ + value &= valid_mask; - case 1: - if (has_el2 && timeridx == GTIMER_PHYS) { - if (hcr & HCR_E2H) { - /* If HCR_EL2. == '10': check CNTHCTL_EL2.EL1PTEN. */ - if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { - return CP_ACCESS_TRAP_EL2; - } - } else { - /* If HCR_EL2. == 0: check CNTHCTL_EL2.EL1PCEN. */ - if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { - return CP_ACCESS_TRAP_EL2; - } - } - } - if (has_el2 && timeridx == GTIMER_VIRT) { - if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { - return CP_ACCESS_TRAP_EL2; - } - } - break; + raw_write(env, ri, value); + + if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) { + gt_update_irq(cpu, GTIMER_VIRT); + } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) { + gt_update_irq(cpu, GTIMER_PHYS); } - return CP_ACCESS_OK; } -static CPAccessResult gt_pct_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - return gt_counter_access(env, GTIMER_PHYS, isread); + ARMCPU *cpu = env_archcpu(env); + + trace_arm_gt_cntvoff_write(value); + raw_write(env, ri, value); + gt_recalc_timer(cpu, GTIMER_VIRT); } -static CPAccessResult gt_vct_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static uint64_t gt_virt_redir_cval_read(CPUARMState *env, + const ARMCPRegInfo *ri) { - return gt_counter_access(env, GTIMER_VIRT, isread); + int timeridx = gt_virt_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].cval; } -static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - return gt_timer_access(env, GTIMER_PHYS, isread); + int timeridx = gt_virt_redir_timeridx(env); + gt_cval_write(env, ri, timeridx, value); } -static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static uint64_t gt_virt_redir_tval_read(CPUARMState *env, + const ARMCPRegInfo *ri) { - return gt_timer_access(env, GTIMER_VIRT, isread); + int timeridx = gt_virt_redir_timeridx(env); + return gt_tval_read(env, ri, timeridx); } -static CPAccessResult gt_stimer_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) +static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * The AArch64 register view of the secure physical timer is - * always accessible from EL3, and configurably accessible from - * Secure EL1. - */ - switch (arm_current_el(env)) { - case 1: - if (!arm_is_secure(env)) { - return CP_ACCESS_TRAP; - } - if (!(env->cp15.scr_el3 & SCR_ST)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; - case 0: - case 2: - return CP_ACCESS_TRAP; - case 3: - return CP_ACCESS_OK; - default: - g_assert_not_reached(); - } + int timeridx = gt_virt_redir_timeridx(env); + gt_tval_write(env, ri, timeridx, value); } -uint64_t gt_get_countervalue(CPUARMState *env) +static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, + const ARMCPRegInfo *ri) { - ARMCPU *cpu = env_archcpu(env); - - return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); + int timeridx = gt_virt_redir_timeridx(env); + return env->cp15.c14_timer[timeridx].ctl; } -static void gt_update_irq(ARMCPU *cpu, int timeridx) +static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - CPUARMState *env = &cpu->env; - uint64_t cnthctl = env->cp15.cnthctl_el2; - ARMSecuritySpace ss = arm_security_space(env); - /* ISTATUS && !IMASK */ - int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; + int timeridx = gt_virt_redir_timeridx(env); + gt_ctl_write(env, ri, timeridx, value); +} - /* - * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK. - * It is RES0 in Secure and NonSecure state. - */ - if ((ss == ARMSS_Root || ss == ARMSS_Realm) && - ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) || - (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) { - irqstate = 0; - } +static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_HYP); +} - qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); - trace_arm_gt_update_irq(timeridx, irqstate); +static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_HYP, value); } -void gt_rme_post_el_change(ARMCPU *cpu, void *ignored) +static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - /* - * Changing security state between Root and Secure/NonSecure, which may - * happen when switching EL, can change the effective value of CNTHCTL_EL2 - * mask bits. Update the IRQ state accordingly. - */ - gt_update_irq(cpu, GTIMER_VIRT); - gt_update_irq(cpu, GTIMER_PHYS); + return gt_tval_read(env, ri, GTIMER_HYP); } -static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env) +static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - if ((env->cp15.scr_el3 & SCR_ECVEN) && - FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && - arm_is_el2_enabled(env) && - (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { - return env->cp15.cntpoff_el2; - } - return 0; + gt_tval_write(env, ri, GTIMER_HYP, value); } -static uint64_t gt_phys_cnt_offset(CPUARMState *env) +static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - if (arm_current_el(env) >= 2) { - return 0; - } - return gt_phys_raw_cnt_offset(env); + gt_ctl_write(env, ri, GTIMER_HYP, value); } -static void gt_recalc_timer(ARMCPU *cpu, int timeridx) +static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; + gt_timer_reset(env, ri, GTIMER_SEC); +} - if (gt->ctl & 1) { - /* - * Timer enabled: calculate and set current ISTATUS, irq, and - * reset timer to when ISTATUS next has to change - */ - uint64_t offset = timeridx == GTIMER_VIRT ? - cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env); - uint64_t count = gt_get_countervalue(&cpu->env); - /* Note that this must be unsigned 64 bit arithmetic: */ - int istatus = count - offset >= gt->cval; - uint64_t nexttick; +static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_SEC, value); +} - gt->ctl = deposit32(gt->ctl, 2, 1, istatus); - - if (istatus) { - /* - * Next transition is when (count - offset) rolls back over to 0. - * If offset > count then this is when count == offset; - * if offset <= count then this is when count == offset + 2^64 - * For the latter case we set nexttick to an "as far in future - * as possible" value and let the code below handle it. - */ - if (offset > count) { - nexttick = offset; - } else { - nexttick = UINT64_MAX; - } - } else { - /* - * Next transition is when (count - offset) == cval, i.e. - * when count == (cval + offset). - * If that would overflow, then again we set up the next interrupt - * for "as far in the future as possible" for the code below. - */ - if (uadd64_overflow(gt->cval, offset, &nexttick)) { - nexttick = UINT64_MAX; - } - } - /* - * Note that the desired next expiry time might be beyond the - * signed-64-bit range of a QEMUTimer -- in this case we just - * set the timer for as far in the future as possible. When the - * timer expires we will reset the timer for any remaining period. - */ - if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { - timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); - } else { - timer_mod(cpu->gt_timer[timeridx], nexttick); - } - trace_arm_gt_recalc(timeridx, nexttick); - } else { - /* Timer disabled: ISTATUS and timer output always clear */ - gt->ctl &= ~4; - timer_del(cpu->gt_timer[timeridx]); - trace_arm_gt_recalc_disabled(timeridx); - } - gt_update_irq(cpu, timeridx); -} - -static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx) +static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - ARMCPU *cpu = env_archcpu(env); - - timer_del(cpu->gt_timer[timeridx]); + return gt_tval_read(env, ri, GTIMER_SEC); } -static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - return gt_get_countervalue(env) - gt_phys_cnt_offset(env); + gt_tval_write(env, ri, GTIMER_SEC, value); } -uint64_t gt_virt_cnt_offset(CPUARMState *env) +static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t hcr; - - switch (arm_current_el(env)) { - case 2: - hcr = arm_hcr_el2_eff(env); - if (hcr & HCR_E2H) { - return 0; - } - break; - case 0: - hcr = arm_hcr_el2_eff(env); - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - return 0; - } - break; - } - - return env->cp15.cntvoff_el2; + gt_ctl_write(env, ri, GTIMER_SEC, value); } -static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_get_countervalue(env) - gt_virt_cnt_offset(env); + gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS); } -static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx, - uint64_t value) +static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - trace_arm_gt_cval_write(timeridx, value); - env->cp15.c14_timer[timeridx].cval = value; - gt_recalc_timer(env_archcpu(env), timeridx); + gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value); } -static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx) +static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - uint64_t offset = 0; - - switch (timeridx) { - case GTIMER_VIRT: - case GTIMER_HYPVIRT: - offset = gt_virt_cnt_offset(env); - break; - case GTIMER_PHYS: - offset = gt_phys_cnt_offset(env); - break; - } - - return (uint32_t)(env->cp15.c14_timer[timeridx].cval - - (gt_get_countervalue(env) - offset)); + return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS); } -static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx, - uint64_t value) +static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t offset = 0; - - switch (timeridx) { - case GTIMER_VIRT: - case GTIMER_HYPVIRT: - offset = gt_virt_cnt_offset(env); - break; - case GTIMER_PHYS: - offset = gt_phys_cnt_offset(env); - break; - } - - trace_arm_gt_tval_write(timeridx, value); - env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + - sextract64(value, 0, 32); - gt_recalc_timer(env_archcpu(env), timeridx); + gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value); } -static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx, - uint64_t value) +static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - ARMCPU *cpu = env_archcpu(env); - uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; - - trace_arm_gt_ctl_write(timeridx, value); - env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); - if ((oldval ^ value) & 1) { - /* Enable toggled */ - gt_recalc_timer(cpu, timeridx); - } else if ((oldval ^ value) & 2) { - /* - * IMASK toggled: don't need to recalculate, - * just set the interrupt line based on ISTATUS - */ - trace_arm_gt_imask_toggle(timeridx); - gt_update_irq(cpu, timeridx); - } + gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value); } -static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - gt_timer_reset(env, ri, GTIMER_PHYS); + gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT); } -static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - gt_cval_write(env, ri, GTIMER_PHYS, value); + gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value); } -static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_tval_read(env, ri, GTIMER_PHYS); + return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT); } -static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - gt_tval_write(env, ri, GTIMER_PHYS, value); + gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value); } -static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, +static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - gt_ctl_write(env, ri, GTIMER_PHYS, value); + gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value); } -static int gt_phys_redir_timeridx(CPUARMState *env) +static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { - switch (arm_mmu_idx(env)) { - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - return GTIMER_HYP; - default: - return GTIMER_PHYS; - } + gt_timer_reset(env, ri, GTIMER_HYPVIRT); } -static int gt_virt_redir_timeridx(CPUARMState *env) +static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - switch (arm_mmu_idx(env)) { - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - return GTIMER_HYPVIRT; - default: - return GTIMER_VIRT; - } + gt_cval_write(env, ri, GTIMER_HYPVIRT, value); } -static uint64_t gt_phys_redir_cval_read(CPUARMState *env, - const ARMCPRegInfo *ri) +static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - int timeridx = gt_phys_redir_timeridx(env); - return env->cp15.c14_timer[timeridx].cval; + return gt_tval_read(env, ri, GTIMER_HYPVIRT); } -static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - int timeridx = gt_phys_redir_timeridx(env); - gt_cval_write(env, ri, timeridx, value); + gt_tval_write(env, ri, GTIMER_HYPVIRT, value); } -static uint64_t gt_phys_redir_tval_read(CPUARMState *env, - const ARMCPRegInfo *ri) +static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - int timeridx = gt_phys_redir_timeridx(env); - return gt_tval_read(env, ri, timeridx); + gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); } -static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_ptimer_cb(void *opaque) { - int timeridx = gt_phys_redir_timeridx(env); - gt_tval_write(env, ri, timeridx, value); -} + ARMCPU *cpu = opaque; -static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - int timeridx = gt_phys_redir_timeridx(env); - return env->cp15.c14_timer[timeridx].ctl; + gt_recalc_timer(cpu, GTIMER_PHYS); } -static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_vtimer_cb(void *opaque) { - int timeridx = gt_phys_redir_timeridx(env); - gt_ctl_write(env, ri, timeridx, value); -} + ARMCPU *cpu = opaque; -static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - gt_timer_reset(env, ri, GTIMER_VIRT); + gt_recalc_timer(cpu, GTIMER_VIRT); } -static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_htimer_cb(void *opaque) { - gt_cval_write(env, ri, GTIMER_VIRT, value); + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_HYP); } -static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +void arm_gt_stimer_cb(void *opaque) { - return gt_tval_read(env, ri, GTIMER_VIRT); + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_SEC); } -static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_sel2timer_cb(void *opaque) { - gt_tval_write(env, ri, GTIMER_VIRT, value); + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS); } -static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_sel2vtimer_cb(void *opaque) { - gt_ctl_write(env, ri, GTIMER_VIRT, value); + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT); } -static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void arm_gt_hvtimer_cb(void *opaque) { - ARMCPU *cpu = env_archcpu(env); - uint32_t oldval = env->cp15.cnthctl_el2; - uint32_t valid_mask = - R_CNTHCTL_EL0PCTEN_E2H1_MASK | - R_CNTHCTL_EL0VCTEN_E2H1_MASK | - R_CNTHCTL_EVNTEN_MASK | - R_CNTHCTL_EVNTDIR_MASK | - R_CNTHCTL_EVNTI_MASK | - R_CNTHCTL_EL0VTEN_MASK | - R_CNTHCTL_EL0PTEN_MASK | - R_CNTHCTL_EL1PCTEN_E2H1_MASK | - R_CNTHCTL_EL1PTEN_MASK; - - if (cpu_isar_feature(aa64_rme, cpu)) { - valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK; - } - if (cpu_isar_feature(aa64_ecv_traps, cpu)) { - valid_mask |= - R_CNTHCTL_EL1TVT_MASK | - R_CNTHCTL_EL1TVCT_MASK | - R_CNTHCTL_EL1NVPCT_MASK | - R_CNTHCTL_EL1NVVCT_MASK | - R_CNTHCTL_EVNTIS_MASK; - } - if (cpu_isar_feature(aa64_ecv, cpu)) { - valid_mask |= R_CNTHCTL_ECV_MASK; - } - - /* Clear RES0 bits */ - value &= valid_mask; - - raw_write(env, ri, value); - - if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) { - gt_update_irq(cpu, GTIMER_VIRT); - } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) { - gt_update_irq(cpu, GTIMER_PHYS); - } -} - -static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - - trace_arm_gt_cntvoff_write(value); - raw_write(env, ri, value); - gt_recalc_timer(cpu, GTIMER_VIRT); -} - -static uint64_t gt_virt_redir_cval_read(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - int timeridx = gt_virt_redir_timeridx(env); - return env->cp15.c14_timer[timeridx].cval; -} - -static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int timeridx = gt_virt_redir_timeridx(env); - gt_cval_write(env, ri, timeridx, value); -} - -static uint64_t gt_virt_redir_tval_read(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - int timeridx = gt_virt_redir_timeridx(env); - return gt_tval_read(env, ri, timeridx); -} - -static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int timeridx = gt_virt_redir_timeridx(env); - gt_tval_write(env, ri, timeridx, value); -} - -static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, - const ARMCPRegInfo *ri) -{ - int timeridx = gt_virt_redir_timeridx(env); - return env->cp15.c14_timer[timeridx].ctl; -} - -static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - int timeridx = gt_virt_redir_timeridx(env); - gt_ctl_write(env, ri, timeridx, value); -} - -static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - gt_timer_reset(env, ri, GTIMER_HYP); -} - -static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_cval_write(env, ri, GTIMER_HYP, value); -} - -static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_tval_read(env, ri, GTIMER_HYP); -} - -static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_tval_write(env, ri, GTIMER_HYP, value); -} - -static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_ctl_write(env, ri, GTIMER_HYP, value); -} - -static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - gt_timer_reset(env, ri, GTIMER_SEC); -} - -static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_cval_write(env, ri, GTIMER_SEC, value); -} - -static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_tval_read(env, ri, GTIMER_SEC); -} - -static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_tval_write(env, ri, GTIMER_SEC, value); -} - -static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_ctl_write(env, ri, GTIMER_SEC, value); -} - -static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) -{ - gt_timer_reset(env, ri, GTIMER_HYPVIRT); -} - -static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_cval_write(env, ri, GTIMER_HYPVIRT, value); -} - -static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return gt_tval_read(env, ri, GTIMER_HYPVIRT); -} - -static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_tval_write(env, ri, GTIMER_HYPVIRT, value); -} - -static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); -} - -void arm_gt_ptimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_PHYS); -} - -void arm_gt_vtimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_VIRT); -} - -void arm_gt_htimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_HYP); -} - -void arm_gt_stimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; - - gt_recalc_timer(cpu, GTIMER_SEC); -} - -void arm_gt_hvtimer_cb(void *opaque) -{ - ARMCPU *cpu = opaque; + ARMCPU *cpu = opaque; gt_recalc_timer(cpu, GTIMER_HYPVIRT); } @@ -3548,448 +2288,57 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) } } -#ifndef CONFIG_USER_ONLY -/* get_phys_addr() isn't present for user-mode-only targets */ - -static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +/* Return basic MPU access permission bits. */ +static uint32_t simple_mpu_ap_bits(uint32_t val) { - if (ri->opc2 & 4) { - /* - * The ATS12NSO* operations must trap to EL3 or EL2 if executed in - * Secure EL1 (which can only happen if EL3 is AArch64). - * They are simply UNDEF if executed from NS EL1. - * They function normally from EL2 or EL3. - */ - if (arm_current_el(env) == 1) { - if (arm_is_secure_below_el3(env)) { - if (env->cp15.scr_el3 & SCR_EEL2) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_TRAP_UNCATEGORIZED; - } + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val >> i) & mask; + mask <<= 2; } - return CP_ACCESS_OK; + return ret; } -#ifdef CONFIG_TCG -static int par_el1_shareability(GetPhysAddrResult *res) +/* Pad basic MPU access permission bits to extended format. */ +static uint32_t extended_mpu_ap_bits(uint32_t val) { - /* - * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC - * memory -- see pseudocode PAREncodeShareability(). - */ - if (((res->cacheattrs.attrs & 0xf0) == 0) || - res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) { - return 2; + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val & mask) << i; + mask <<= 2; } - return res->cacheattrs.shareability; + return ret; } -static uint64_t do_ats_write(CPUARMState *env, uint64_t value, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - ARMSecuritySpace ss) +static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - bool ret; - uint64_t par64; - bool format64 = false; - ARMMMUFaultInfo fi = {}; - GetPhysAddrResult res = {}; + env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); +} - /* - * I_MXTJT: Granule protection checks are not performed on the final address - * of a successful translation. - */ - ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss, - &res, &fi); +static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); +} - /* - * ATS operations only do S1 or S1+S2 translations, so we never - * have to deal with the ARMCacheAttrs format for S2 only. - */ - assert(!res.cacheattrs.is_s2_format); +static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); +} - if (ret) { - /* - * Some kinds of translation fault must cause exceptions rather - * than being reported in the PAR. - */ - int current_el = arm_current_el(env); - int target_el; - uint32_t syn, fsr, fsc; - bool take_exc = false; - - if (fi.s1ptw && current_el == 1 - && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { - /* - * Synchronous stage 2 fault on an access made as part of the - * translation table walk for AT S1E0* or AT S1E1* insn - * executed from NS EL1. If this is a synchronous external abort - * and SCR_EL3.EA == 1, then we take a synchronous external abort - * to EL3. Otherwise the fault is taken as an exception to EL2, - * and HPFAR_EL2 holds the faulting IPA. - */ - if (fi.type == ARMFault_SyncExternalOnWalk && - (env->cp15.scr_el3 & SCR_EA)) { - target_el = 3; - } else { - env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; - if (arm_is_secure_below_el3(env) && fi.s1ns) { - env->cp15.hpfar_el2 |= HPFAR_NS; - } - target_el = 2; - } - take_exc = true; - } else if (fi.type == ARMFault_SyncExternalOnWalk) { - /* - * Synchronous external aborts during a translation table walk - * are taken as Data Abort exceptions. - */ - if (fi.stage2) { - if (current_el == 3) { - target_el = 3; - } else { - target_el = 2; - } - } else { - target_el = exception_target_el(env); - } - take_exc = true; - } - - if (take_exc) { - /* Construct FSR and FSC using same logic as arm_deliver_fault() */ - if (target_el == 2 || arm_el_is_aa64(env, target_el) || - arm_s1_regime_using_lpae_format(env, mmu_idx)) { - fsr = arm_fi_to_lfsc(&fi); - fsc = extract32(fsr, 0, 6); - } else { - fsr = arm_fi_to_sfsc(&fi); - fsc = 0x3f; - } - /* - * Report exception with ESR indicating a fault due to a - * translation table walk for a cache maintenance instruction. - */ - syn = syn_data_abort_no_iss(current_el == target_el, 0, - fi.ea, 1, fi.s1ptw, 1, fsc); - env->exception.vaddress = value; - env->exception.fsr = fsr; - raise_exception(env, EXCP_DATA_ABORT, syn, target_el); - } - } - - if (is_a64(env)) { - format64 = true; - } else if (arm_feature(env, ARM_FEATURE_LPAE)) { - /* - * ATS1Cxx: - * * TTBCR.EAE determines whether the result is returned using the - * 32-bit or the 64-bit PAR format - * * Instructions executed in Hyp mode always use the 64bit format - * - * ATS1S2NSOxx uses the 64bit format if any of the following is true: - * * The Non-secure TTBCR.EAE bit is set to 1 - * * The implementation includes EL2, and the value of HCR.VM is 1 - * - * (Note that HCR.DC makes HCR.VM behave as if it is 1.) - * - * ATS1Hx always uses the 64bit format. - */ - format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); - - if (arm_feature(env, ARM_FEATURE_EL2)) { - if (mmu_idx == ARMMMUIdx_E10_0 || - mmu_idx == ARMMMUIdx_E10_1 || - mmu_idx == ARMMMUIdx_E10_1_PAN) { - format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); - } else { - format64 |= arm_current_el(env) == 2; - } - } - } - - if (format64) { - /* Create a 64-bit PAR */ - par64 = (1 << 11); /* LPAE bit always set */ - if (!ret) { - par64 |= res.f.phys_addr & ~0xfffULL; - if (!res.f.attrs.secure) { - par64 |= (1 << 9); /* NS */ - } - par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ - par64 |= par_el1_shareability(&res) << 7; /* SH */ - } else { - uint32_t fsr = arm_fi_to_lfsc(&fi); - - par64 |= 1; /* F */ - par64 |= (fsr & 0x3f) << 1; /* FS */ - if (fi.stage2) { - par64 |= (1 << 9); /* S */ - } - if (fi.s1ptw) { - par64 |= (1 << 8); /* PTW */ - } - } - } else { - /* - * fsr is a DFSR/IFSR value for the short descriptor - * translation table format (with WnR always clear). - * Convert it to a 32-bit PAR. - */ - if (!ret) { - /* We do not set any attribute bits in the PAR */ - if (res.f.lg_page_size == 24 - && arm_feature(env, ARM_FEATURE_V7)) { - par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); - } else { - par64 = res.f.phys_addr & 0xfffff000; - } - if (!res.f.attrs.secure) { - par64 |= (1 << 9); /* NS */ - } - } else { - uint32_t fsr = arm_fi_to_sfsc(&fi); - - par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | - ((fsr & 0xf) << 1) | 1; - } - } - return par64; -} -#endif /* CONFIG_TCG */ - -static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) -{ -#ifdef CONFIG_TCG - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; - uint64_t par64; - ARMMMUIdx mmu_idx; - int el = arm_current_el(env); - ARMSecuritySpace ss = arm_security_space(env); - - switch (ri->opc2 & 6) { - case 0: - /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ - switch (el) { - case 3: - mmu_idx = ARMMMUIdx_E3; - break; - case 2: - g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ - /* fall through */ - case 1: - if (ri->crm == 9 && arm_pan_enabled(env)) { - mmu_idx = ARMMMUIdx_Stage1_E1_PAN; - } else { - mmu_idx = ARMMMUIdx_Stage1_E1; - } - break; - default: - g_assert_not_reached(); - } - break; - case 2: - /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ - switch (el) { - case 3: - mmu_idx = ARMMMUIdx_E10_0; - break; - case 2: - g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ - mmu_idx = ARMMMUIdx_Stage1_E0; - break; - case 1: - mmu_idx = ARMMMUIdx_Stage1_E0; - break; - default: - g_assert_not_reached(); - } - break; - case 4: - /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ - mmu_idx = ARMMMUIdx_E10_1; - ss = ARMSS_NonSecure; - break; - case 6: - /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ - mmu_idx = ARMMMUIdx_E10_0; - ss = ARMSS_NonSecure; - break; - default: - g_assert_not_reached(); - } - - par64 = do_ats_write(env, value, access_type, mmu_idx, ss); - - A32_BANKED_CURRENT_REG_SET(env, par, par64); -#else - /* Handled by hardware accelerator. */ - g_assert_not_reached(); -#endif /* CONFIG_TCG */ -} - -static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ -#ifdef CONFIG_TCG - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; - uint64_t par64; - - /* There is no SecureEL2 for AArch32. */ - par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, - ARMSS_NonSecure); - - A32_BANKED_CURRENT_REG_SET(env, par, par64); -#else - /* Handled by hardware accelerator. */ - g_assert_not_reached(); -#endif /* CONFIG_TCG */ -} - -static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - /* - * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level - * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can - * only happen when executing at EL3 because that combination also causes an - * illegal exception return. We don't need to check FEAT_RME either, because - * scr_write() ensures that the NSE bit is not set otherwise. - */ - if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; -} - -static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 3 && - !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { - return CP_ACCESS_TRAP; - } - return at_e012_access(env, ri, isread); -} - -static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) { - return CP_ACCESS_TRAP_EL2; - } - return at_e012_access(env, ri, isread); -} - -static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ -#ifdef CONFIG_TCG - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; - ARMMMUIdx mmu_idx; - uint64_t hcr_el2 = arm_hcr_el2_eff(env); - bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); - bool for_el3 = false; - ARMSecuritySpace ss; - - switch (ri->opc2 & 6) { - case 0: - switch (ri->opc1) { - case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ - if (ri->crm == 9 && arm_pan_enabled(env)) { - mmu_idx = regime_e20 ? - ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; - } else { - mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; - } - break; - case 4: /* AT S1E2R, AT S1E2W */ - mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; - break; - case 6: /* AT S1E3R, AT S1E3W */ - mmu_idx = ARMMMUIdx_E3; - for_el3 = true; - break; - default: - g_assert_not_reached(); - } - break; - case 2: /* AT S1E0R, AT S1E0W */ - mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0; - break; - case 4: /* AT S12E1R, AT S12E1W */ - mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1; - break; - case 6: /* AT S12E0R, AT S12E0W */ - mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0; - break; - default: - g_assert_not_reached(); - } - - ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env); - env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); -#else - /* Handled by hardware accelerator. */ - g_assert_not_reached(); -#endif /* CONFIG_TCG */ -} -#endif - -/* Return basic MPU access permission bits. */ -static uint32_t simple_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val >> i) & mask; - mask <<= 2; - } - return ret; -} - -/* Pad basic MPU access permission bits to extended format. */ -static uint32_t extended_mpu_ap_bits(uint32_t val) -{ - uint32_t ret; - uint32_t mask; - int i; - ret = 0; - mask = 3; - for (i = 0; i < 16; i += 2) { - ret |= (val & mask) << i; - mask <<= 2; - } - return ret; -} - -static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); -} - -static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); -} - -static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); -} - -static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); -} +static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); +} static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) { @@ -4326,1057 +2675,574 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = { { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, -}; - -static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - - if (!arm_feature(env, ARM_FEATURE_V8)) { - if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { - /* - * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when - * using Long-descriptor translation table format - */ - value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); - } else if (arm_feature(env, ARM_FEATURE_EL3)) { - /* - * In an implementation that includes the Security Extensions - * TTBCR has additional fields PD0 [4] and PD1 [5] for - * Short-descriptor translation table format. - */ - value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; - } else { - value &= TTBCR_N; - } - } - - if (arm_feature(env, ARM_FEATURE_LPAE)) { - /* - * With LPAE the TTBCR could result in a change of ASID - * via the TTBCR.A1 bit, so do a TLB flush. - */ - tlb_flush(CPU(cpu)); - } - raw_write(env, ri, value); -} - -static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - - /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ - tlb_flush(CPU(cpu)); - raw_write(env, ri, value); -} - -static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ - if (cpreg_field_is_64bit(ri) && - extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { - ARMCPU *cpu = env_archcpu(env); - tlb_flush(CPU(cpu)); - } - raw_write(env, ri, value); -} - -static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * If we are running with E2&0 regime, then an ASID is active. - * Flush if that might be changing. Note we're not checking - * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that - * holds the active ASID, only checking the field that might. - */ - if (extract64(raw_read(env, ri) ^ value, 48, 16) && - (arm_hcr_el2_eff(env) & HCR_E2H)) { - uint16_t mask = ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; - tlb_flush_by_mmuidx(env_cpu(env), mask); - } - raw_write(env, ri, value); -} - -static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - - /* - * A change in VMID to the stage2 page table (Stage2) invalidates - * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0). - */ - if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { - tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); - } - raw_write(env, ri, value); -} - -static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { - { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), - offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, - { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), - offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, - { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), - offsetof(CPUARMState, cp15.dfar_ns) } }, - { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_FAR_EL1, - .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, - .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), - .resetvalue = 0, }, -}; - -static const ARMCPRegInfo vmsa_cp_reginfo[] = { - { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_ESR_EL1, - .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, - .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, - { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_TTBR0_EL1, - .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, - .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), - offsetof(CPUARMState, cp15.ttbr0_ns) } }, - { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_TTBR1_EL1, - .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, - .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), - offsetof(CPUARMState, cp15.ttbr1_ns) } }, - { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_TCR_EL1, - .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, - .writefn = vmsa_tcr_el12_write, - .raw_writefn = raw_write, - .resetvalue = 0, - .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, - { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, - .raw_writefn = raw_write, - .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), - offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, -}; - -/* - * Note that unlike TTBCR, writing to TTBCR2 does not require flushing - * qemu tlbs nor adjusting cached masks. - */ -static const ARMCPRegInfo ttbcr2_reginfo = { - .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .type = ARM_CP_ALIAS, - .bank_fieldoffsets = { - offsetofhigh32(CPUARMState, cp15.tcr_el[3]), - offsetofhigh32(CPUARMState, cp15.tcr_el[1]), - }, -}; - -static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_ticonfig = value & 0xe7; - /* The OS_TYPE bit in this register changes the reported CPUID! */ - env->cp15.c0_cpuid = (value & (1 << 5)) ? - ARM_CPUID_TI915T : ARM_CPUID_TI925T; -} - -static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_threadid = value & 0xffff; -} - -static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Wait-for-interrupt (deprecated) */ - cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); -} - -static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * On OMAP there are registers indicating the max/min index of dcache lines - * containing a dirty line; cache flush operations have to reset these. - */ - env->cp15.c15_i_max = 0x000; - env->cp15.c15_i_min = 0xff0; -} - -static const ARMCPRegInfo omap_cp_reginfo[] = { - { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, - .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), - .resetvalue = 0, }, - { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_NOP }, - { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, - .writefn = omap_ticonfig_write }, - { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, - { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .resetvalue = 0xff0, - .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, - { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, - .writefn = omap_threadid_write }, - { .name = "TI925T_STATUS", .cp = 15, .crn = 15, - .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, - .type = ARM_CP_NO_RAW, - .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, - /* - * TODO: Peripheral port remap register: - * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller - * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), - * when MMU is off. - */ - { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, - .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, - .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, - .writefn = omap_cachemaint_write }, - { .name = "C9", .cp = 15, .crn = 9, - .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, - .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, -}; - -static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_cpar = value & 0x3fff; -} - -static const ARMCPRegInfo xscale_cp_reginfo[] = { - { .name = "XSCALE_CPAR", - .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, - .writefn = xscale_cpar_write, }, - { .name = "XSCALE_AUXCR", - .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), - .resetvalue = 0, }, - /* - * XScale specific cache-lockdown: since we have no cache we NOP these - * and hope the guest does not really rely on cache behaviour. - */ - { .name = "XSCALE_LOCK_ICACHE_LINE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NOP }, - { .name = "XSCALE_UNLOCK_ICACHE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NOP }, - { .name = "XSCALE_DCACHE_LOCK", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_NOP }, - { .name = "XSCALE_UNLOCK_DCACHE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NOP }, -}; - -static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { - /* - * RAZ/WI the whole crn=15 space, when we don't have a more specific - * implementation of this implementation-defined space. - * Ideally this should eventually disappear in favour of actually - * implementing the correct behaviour for all cores. - */ - { .name = "C15_IMPDEF", .cp = 15, .crn = 15, - .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, - .access = PL1_RW, - .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, - .resetvalue = 0 }, -}; - -static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { - /* Cache status: RAZ because we have no cache so it's always clean */ - { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, - .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, - .resetvalue = 0 }, -}; - -static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { - /* We never have a block transfer operation in progress */ - { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, - .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, - .resetvalue = 0 }, - /* The cache ops themselves: these all NOP for QEMU */ - { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, - { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, - { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, - .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, - { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, - .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, - { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, - .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, - { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, - .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, -}; - -static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { - /* - * The cache test-and-clean instructions always return (1 << 30) - * to indicate that there are no dirty cache lines. - */ - { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, - .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, - .resetvalue = (1 << 30) }, - { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, - .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, - .resetvalue = (1 << 30) }, -}; - -static const ARMCPRegInfo strongarm_cp_reginfo[] = { - /* Ignore ReadBuffer accesses */ - { .name = "C9_READBUFFER", .cp = 15, .crn = 9, - .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, - .access = PL1_RW, .resetvalue = 0, - .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, -}; - -static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - unsigned int cur_el = arm_current_el(env); - - if (arm_is_el2_enabled(env) && cur_el == 1) { - return env->cp15.vpidr_el2; - } - return raw_read(env, ri); -} +}; -static uint64_t mpidr_read_val(CPUARMState *env) +static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { ARMCPU *cpu = env_archcpu(env); - uint64_t mpidr = cpu->mp_affinity; - if (arm_feature(env, ARM_FEATURE_V7MP)) { - mpidr |= (1U << 31); - /* - * Cores which are uniprocessor (non-coherent) - * but still implement the MP extensions set - * bit 30. (For instance, Cortex-R5). - */ - if (cpu->mp_is_up) { - mpidr |= (1u << 30); + if (!arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { + /* + * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when + * using Long-descriptor translation table format + */ + value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); + } else if (arm_feature(env, ARM_FEATURE_EL3)) { + /* + * In an implementation that includes the Security Extensions + * TTBCR has additional fields PD0 [4] and PD1 [5] for + * Short-descriptor translation table format. + */ + value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; + } else { + value &= TTBCR_N; } } - return mpidr; -} -static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - unsigned int cur_el = arm_current_el(env); - - if (arm_is_el2_enabled(env) && cur_el == 1) { - return env->cp15.vmpidr_el2; + if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* + * With LPAE the TTBCR could result in a change of ASID + * via the TTBCR.A1 bit, so do a TLB flush. + */ + tlb_flush(CPU(cpu)); } - return mpidr_read_val(env); -} - -static const ARMCPRegInfo lpae_cp_reginfo[] = { - /* NOP AMAIR0/1 */ - { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .fgt = FGT_AMAIR_EL1, - .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ - { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, - .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), - offsetof(CPUARMState, cp15.par_ns)} }, - { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .type = ARM_CP_64BIT | ARM_CP_ALIAS, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), - offsetof(CPUARMState, cp15.ttbr0_ns) }, - .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, - { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, - .access = PL1_RW, .accessfn = access_tvm_trvm, - .type = ARM_CP_64BIT | ARM_CP_ALIAS, - .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), - offsetof(CPUARMState, cp15.ttbr1_ns) }, - .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, -}; - -static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return vfp_get_fpcr(env); -} - -static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - vfp_set_fpcr(env, value); -} - -static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return vfp_get_fpsr(env); + raw_write(env, ri, value); } -static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - vfp_set_fpsr(env, value); -} + ARMCPU *cpu = env_archcpu(env); -static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { - return CP_ACCESS_TRAP; - } - return CP_ACCESS_OK; + /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ + tlb_flush(CPU(cpu)); + raw_write(env, ri, value); } -static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, +static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - env->daif = value & PSTATE_DAIF; -} - -static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return env->pstate & PSTATE_PAN; -} - -static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); -} - -static const ARMCPRegInfo pan_reginfo = { - .name = "PAN", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_RW, - .readfn = aa64_pan_read, .writefn = aa64_pan_write -}; - -static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return env->pstate & PSTATE_UAO; -} - -static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); -} - -static const ARMCPRegInfo uao_reginfo = { - .name = "UAO", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, - .type = ARM_CP_NO_RAW, .access = PL1_RW, - .readfn = aa64_uao_read, .writefn = aa64_uao_write -}; - -static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return env->pstate & PSTATE_DIT; -} - -static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); -} - -static const ARMCPRegInfo dit_reginfo = { - .name = "DIT", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL0_RW, - .readfn = aa64_dit_read, .writefn = aa64_dit_write -}; - -static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - return env->pstate & PSTATE_SSBS; -} - -static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); -} - -static const ARMCPRegInfo ssbs_reginfo = { - .name = "SSBS", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, - .type = ARM_CP_NO_RAW, .access = PL0_RW, - .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write -}; - -static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) -{ - /* Cache invalidate/clean to Point of Coherency or Persistence... */ - switch (arm_current_el(env)) { - case 0: - /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ - if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { - return CP_ACCESS_TRAP; - } - /* fall through */ - case 1: - /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ - if (arm_hcr_el2_eff(env) & HCR_TPCP) { - return CP_ACCESS_TRAP_EL2; - } - break; + /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ + if (cpreg_field_is_64bit(ri) && + extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { + ARMCPU *cpu = env_archcpu(env); + tlb_flush(CPU(cpu)); } - return CP_ACCESS_OK; + raw_write(env, ri, value); } -static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) +static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* Cache invalidate/clean to Point of Unification... */ - switch (arm_current_el(env)) { - case 0: - /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ - if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { - return CP_ACCESS_TRAP; - } - /* fall through */ - case 1: - /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */ - if (arm_hcr_el2_eff(env) & hcrflags) { - return CP_ACCESS_TRAP_EL2; - } - break; + /* + * If we are running with E2&0 regime, then an ASID is active. + * Flush if that might be changing. Note we're not checking + * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that + * holds the active ASID, only checking the field that might. + */ + if (extract64(raw_read(env, ri) ^ value, 48, 16) && + (arm_hcr_el2_eff(env) & HCR_E2H)) { + uint16_t mask = ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E20_0; + tlb_flush_by_mmuidx(env_cpu(env), mask); } - return CP_ACCESS_OK; + raw_write(env, ri, value); } -static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU); -} + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); -static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); + /* + * A change in VMID to the stage2 page table (Stage2) invalidates + * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0). + */ + if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { + tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); + } + raw_write(env, ri, value); } +static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), + offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, + { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), + offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, + { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), + offsetof(CPUARMState, cp15.dfar_ns) } }, + { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_FAR_EL1, + .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, + .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), + .resetvalue = 0, }, +}; + +static const ARMCPRegInfo vmsa_cp_reginfo[] = { + { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_ESR_EL1, + .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, + .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, + { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_TTBR0_EL1, + .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, + .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), + offsetof(CPUARMState, cp15.ttbr0_ns) } }, + { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_TTBR1_EL1, + .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, + .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), + offsetof(CPUARMState, cp15.ttbr1_ns) } }, + { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_TCR_EL1, + .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, + .writefn = vmsa_tcr_el12_write, + .raw_writefn = raw_write, + .resetvalue = 0, + .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, + { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, + .raw_writefn = raw_write, + .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), + offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, +}; + /* - * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions - * Page D4-1736 (DDI0487A.b) + * Note that unlike TTBCR, writing to TTBCR2 does not require flushing + * qemu tlbs nor adjusting cached masks. */ +static const ARMCPRegInfo ttbcr2_reginfo = { + .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_ALIAS, + .bank_fieldoffsets = { + offsetofhigh32(CPUARMState, cp15.tcr_el[3]), + offsetofhigh32(CPUARMState, cp15.tcr_el[1]), + }, +}; -static int vae1_tlbmask(CPUARMState *env) +static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t hcr = arm_hcr_el2_eff(env); - uint16_t mask; - - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - mask = ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; - } else { - mask = ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; - } - return mask; + env->cp15.c15_ticonfig = value & 0xe7; + /* The OS_TYPE bit in this register changes the reported CPUID! */ + env->cp15.c0_cpuid = (value & (1 << 5)) ? + ARM_CPUID_TI915T : ARM_CPUID_TI925T; } -static int vae2_tlbmask(CPUARMState *env) +static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t hcr = arm_hcr_el2_eff(env); - uint16_t mask; - - if (hcr & HCR_E2H) { - mask = ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; - } else { - mask = ARMMMUIdxBit_E2; - } - return mask; + env->cp15.c15_threadid = value & 0xffff; } -/* Return 56 if TBI is enabled, 64 otherwise. */ -static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, - uint64_t addr) +static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t tcr = regime_tcr(env, mmu_idx); - int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); - int select = extract64(addr, 55, 1); - - return (tbi >> select) & 1 ? 56 : 64; + /* Wait-for-interrupt (deprecated) */ + cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); } -static int vae1_tlbbits(CPUARMState *env, uint64_t addr) +static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - uint64_t hcr = arm_hcr_el2_eff(env); - ARMMMUIdx mmu_idx; - - /* Only the regime of the mmu_idx below is significant. */ - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - mmu_idx = ARMMMUIdx_E20_0; - } else { - mmu_idx = ARMMMUIdx_E10_0; - } - - return tlbbits_for_regime(env, mmu_idx, addr); + /* + * On OMAP there are registers indicating the max/min index of dcache lines + * containing a dirty line; cache flush operations have to reset these. + */ + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; } -static int vae2_tlbbits(CPUARMState *env, uint64_t addr) -{ - uint64_t hcr = arm_hcr_el2_eff(env); - ARMMMUIdx mmu_idx; - +static const ARMCPRegInfo omap_cp_reginfo[] = { + { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, + .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), + .resetvalue = 0, }, + { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, + .writefn = omap_ticonfig_write }, + { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, + { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .resetvalue = 0xff0, + .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, + { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, + .writefn = omap_threadid_write }, + { .name = "TI925T_STATUS", .cp = 15, .crn = 15, + .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .type = ARM_CP_NO_RAW, + .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, /* - * Only the regime of the mmu_idx below is significant. - * Regime EL2&0 has two ranges with separate TBI configuration, while EL2 - * only has one. + * TODO: Peripheral port remap register: + * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller + * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), + * when MMU is off. */ - if (hcr & HCR_E2H) { - mmu_idx = ARMMMUIdx_E20_2; - } else { - mmu_idx = ARMMMUIdx_E2; - } - - return tlbbits_for_regime(env, mmu_idx, addr); -} + { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, + .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, + .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, + .writefn = omap_cachemaint_write }, + { .name = "C9", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, + .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, +}; -static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); + env->cp15.c15_cpar = value & 0x3fff; } -static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); +static const ARMCPRegInfo xscale_cp_reginfo[] = { + { .name = "XSCALE_CPAR", + .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, + .writefn = xscale_cpar_write, }, + { .name = "XSCALE_AUXCR", + .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, + .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), + .resetvalue = 0, }, + /* + * XScale specific cache-lockdown: since we have no cache we NOP these + * and hope the guest does not really rely on cache behaviour. + */ + { .name = "XSCALE_LOCK_ICACHE_LINE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "XSCALE_UNLOCK_ICACHE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, + { .name = "XSCALE_DCACHE_LOCK", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_NOP }, + { .name = "XSCALE_UNLOCK_DCACHE", + .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NOP }, +}; - if (tlb_force_broadcast(env)) { - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); - } else { - tlb_flush_by_mmuidx(cs, mask); - } -} +static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { + /* + * RAZ/WI the whole crn=15 space, when we don't have a more specific + * implementation of this implementation-defined space. + * Ideally this should eventually disappear in favour of actually + * implementing the correct behaviour for all cores. + */ + { .name = "C15_IMPDEF", .cp = 15, .crn = 15, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, + .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, + .resetvalue = 0 }, +}; -static int e2_tlbmask(CPUARMState *env) -{ - return (ARMMMUIdxBit_E20_0 | - ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2); -} +static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { + /* Cache status: RAZ because we have no cache so it's always clean */ + { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, + .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = 0 }, +}; -static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = alle1_tlbmask(env); +static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { + /* We never have a block transfer operation in progress */ + { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = 0 }, + /* The cache ops themselves: these all NOP for QEMU */ + { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, + { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, + { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, + { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, + { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, + .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, + { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, + .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, +}; - tlb_flush_by_mmuidx(cs, mask); -} +static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { + /* + * The cache test-and-clean instructions always return (1 << 30) + * to indicate that there are no dirty cache lines. + */ + { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = (1 << 30) }, + { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, + .resetvalue = (1 << 30) }, +}; -static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static const ARMCPRegInfo strongarm_cp_reginfo[] = { + /* Ignore ReadBuffer accesses */ + { .name = "C9_READBUFFER", .cp = 15, .crn = 9, + .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, + .access = PL1_RW, .resetvalue = 0, + .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, +}; + +static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = env_cpu(env); - int mask = e2_tlbmask(env); + unsigned int cur_el = arm_current_el(env); - tlb_flush_by_mmuidx(cs, mask); + if (arm_is_el2_enabled(env) && cur_el == 1) { + return env->cp15.vpidr_el2; + } + return raw_read(env, ri); } -static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t mpidr_read_val(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); + uint64_t mpidr = cpu->mp_affinity; - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); + if (arm_feature(env, ARM_FEATURE_V7MP)) { + mpidr |= (1U << 31); + /* + * Cores which are uniprocessor (non-coherent) + * but still implement the MP extensions set + * bit 30. (For instance, Cortex-R5). + */ + if (cpu->mp_is_up) { + mpidr |= (1u << 30); + } + } + return mpidr; } -static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = env_cpu(env); - int mask = alle1_tlbmask(env); + unsigned int cur_el = arm_current_el(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); + if (arm_is_el2_enabled(env) && cur_el == 1) { + return env->cp15.vmpidr_el2; + } + return mpidr_read_val(env); } -static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = e2_tlbmask(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); -} +static const ARMCPRegInfo lpae_cp_reginfo[] = { + /* NOP AMAIR0/1 */ + { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .fgt = FGT_AMAIR_EL1, + .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ + { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, + .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), + offsetof(CPUARMState, cp15.par_ns)} }, + { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), + offsetof(CPUARMState, cp15.ttbr0_ns) }, + .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, + { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, + .access = PL1_RW, .accessfn = access_tvm_trvm, + .type = ARM_CP_64BIT | ARM_CP_ALIAS, + .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), + offsetof(CPUARMState, cp15.ttbr1_ns) }, + .writefn = vmsa_ttbr_write, .raw_writefn = raw_write }, +}; -static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); + return vfp_get_fpcr(env); } -static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * Invalidate by VA, EL2 - * Currently handles both VAE2 and VALE2, since we don't support - * flush-last-level-only. - */ - CPUState *cs = env_cpu(env); - int mask = vae2_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae2_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); + vfp_set_fpcr(env, value); } -static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) { - /* - * Invalidate by VA, EL3 - * Currently handles both VAE3 and VALE3, since we don't support - * flush-last-level-only. - */ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); + return vfp_get_fpsr(env); } -static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae1_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); + vfp_set_fpsr(env, value); } -static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - /* - * Invalidate by VA, EL1&0 (AArch64 version). - * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae1_tlbbits(env, pageaddr); - - if (tlb_force_broadcast(env)) { - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); - } else { - tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); + if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { + return CP_ACCESS_TRAP_EL1; } + return CP_ACCESS_OK; } -static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - CPUState *cs = env_cpu(env); - int mask = vae2_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae2_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); + env->daif = value & PSTATE_DAIF; } -static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = env_cpu(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E3, bits); + return env->pstate & PSTATE_PAN; } -static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) +static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * The MSB of value is the NS field, which only applies if SEL2 - * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). - */ - return (value >= 0 - && cpu_isar_feature(aa64_sel2, env_archcpu(env)) - && arm_is_secure_below_el3(env) - ? ARMMMUIdxBit_Stage2_S - : ARMMMUIdxBit_Stage2); + env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); } -static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = ipas2e1_tlbmask(env, value); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - if (tlb_force_broadcast(env)) { - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); - } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, mask); - } -} +static const ARMCPRegInfo pan_reginfo = { + .name = "PAN", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_RW, + .readfn = aa64_pan_read, .writefn = aa64_pan_write +}; -static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) { - CPUState *cs = env_cpu(env); - int mask = ipas2e1_tlbmask(env, value); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + return env->pstate & PSTATE_UAO; } -#ifdef TARGET_AARCH64 -typedef struct { - uint64_t base; - uint64_t length; -} TLBIRange; - -static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) +static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * Note that the TLBI range TG field encoding differs from both - * TG0 and TG1 encodings. - */ - switch (tg) { - case 1: - return Gran4K; - case 2: - return Gran16K; - case 3: - return Gran64K; - default: - return GranInvalid; - } + env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); } -static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, - uint64_t value) -{ - unsigned int page_size_granule, page_shift, num, scale, exponent; - /* Extract one bit to represent the va selector in use. */ - uint64_t select = sextract64(value, 36, 1); - ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false); - TLBIRange ret = { }; - ARMGranuleSize gran; - - page_size_granule = extract64(value, 46, 2); - gran = tlbi_range_tg_to_gran_size(page_size_granule); - - /* The granule encoded in value must match the granule in use. */ - if (gran != param.gran) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", - page_size_granule); - return ret; - } - - page_shift = arm_granule_bits(gran); - num = extract64(value, 39, 5); - scale = extract64(value, 44, 2); - exponent = (5 * scale) + 1; - - ret.length = (num + 1) << (exponent + page_shift); - - if (param.select) { - ret.base = sextract64(value, 0, 37); - } else { - ret.base = extract64(value, 0, 37); - } - if (param.ds) { - /* - * With DS=1, BaseADDR is always shifted 16 so that it is able - * to address all 52 va bits. The input address is perforce - * aligned on a 64k boundary regardless of translation granule. - */ - page_shift = 16; - } - ret.base <<= page_shift; - - return ret; -} +static const ARMCPRegInfo uao_reginfo = { + .name = "UAO", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL1_RW, + .readfn = aa64_uao_read, .writefn = aa64_uao_write +}; -static void do_rvae_write(CPUARMState *env, uint64_t value, - int idxmap, bool synced) +static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) { - ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); - TLBIRange range; - int bits; - - range = tlbi_aa64_get_range(env, one_idx, value); - bits = tlbbits_for_regime(env, one_idx, range.base); - - if (synced) { - tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), - range.base, - range.length, - idxmap, - bits); - } else { - tlb_flush_range_by_mmuidx(env_cpu(env), range.base, - range.length, idxmap, bits); - } + return env->pstate & PSTATE_DIT; } -static void tlbi_aa64_rvae1_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) +static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * Invalidate by VA range, EL1&0. - * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - - do_rvae_write(env, value, vae1_tlbmask(env), - tlb_force_broadcast(env)); + env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); } -static void tlbi_aa64_rvae1is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, Inner/Outer Shareable EL1&0. - * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, - * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support - * flush-for-specific-ASID-only, flush-last-level-only or inner/outer - * shareable specific flushes. - */ - - do_rvae_write(env, value, vae1_tlbmask(env), true); -} +static const ARMCPRegInfo dit_reginfo = { + .name = "DIT", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL0_RW, + .readfn = aa64_dit_read, .writefn = aa64_dit_write +}; -static void tlbi_aa64_rvae2_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) +static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) { - /* - * Invalidate by VA range, EL2. - * Currently handles all of RVAE2 and RVALE2, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - - do_rvae_write(env, value, vae2_tlbmask(env), - tlb_force_broadcast(env)); - - + return env->pstate & PSTATE_SSBS; } -static void tlbi_aa64_rvae2is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) +static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) { - /* - * Invalidate by VA range, Inner/Outer Shareable, EL2. - * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, - * since we don't support flush-for-specific-ASID-only, - * flush-last-level-only or inner/outer shareable specific flushes. - */ - - do_rvae_write(env, value, vae2_tlbmask(env), true); - + env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); } -static void tlbi_aa64_rvae3_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, EL3. - * Currently handles all of RVAE3 and RVALE3, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ +static const ARMCPRegInfo ssbs_reginfo = { + .name = "SSBS", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, + .type = ARM_CP_NO_RAW, .access = PL0_RW, + .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write +}; - do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); +static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* Cache invalidate/clean to Point of Coherency or Persistence... */ + switch (arm_current_el(env)) { + case 0: + /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ + if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { + return CP_ACCESS_TRAP_EL1; + } + /* fall through */ + case 1: + /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ + if (arm_hcr_el2_eff(env) & HCR_TPCP) { + return CP_ACCESS_TRAP_EL2; + } + break; + } + return CP_ACCESS_OK; } -static void tlbi_aa64_rvae3is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) { - /* - * Invalidate by VA range, EL3, Inner/Outer Shareable. - * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, - * since we don't support flush-for-specific-ASID-only, - * flush-last-level-only or inner/outer specific flushes. - */ - - do_rvae_write(env, value, ARMMMUIdxBit_E3, true); + /* Cache invalidate/clean to Point of Unification... */ + switch (arm_current_el(env)) { + case 0: + /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ + if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { + return CP_ACCESS_TRAP_EL1; + } + /* fall through */ + case 1: + /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */ + if (arm_hcr_el2_eff(env) & hcrflags) { + return CP_ACCESS_TRAP_EL2; + } + break; + } + return CP_ACCESS_OK; } -static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - do_rvae_write(env, value, ipas2e1_tlbmask(env, value), - tlb_force_broadcast(env)); + return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU); } -static void tlbi_aa64_ripas2e1is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) +static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true); + return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); } -#endif static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) @@ -5393,7 +3259,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (hcr & HCR_TDZ) { return CP_ACCESS_TRAP_EL2; @@ -5426,7 +3292,7 @@ static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, * Access to SP_EL0 is undefined if it's being used as * the stack pointer. */ - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -5568,7 +3434,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri, mmap_lock(); - tb_invalidate_phys_range(start_address, end_address); + tb_invalidate_phys_range(env_cpu(env), start_address, end_address); mmap_unlock(); } @@ -5590,7 +3456,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, { .name = "FPCR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, - .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, + .access = PL0_RW, .type = ARM_CP_FPU, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, { .name = "FPSR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, @@ -5672,146 +3538,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, .fgt = FGT_DCCISW, .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, - /* TLBI operations */ - { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1IS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1IS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1, - .writefn = tlbi_aa64_vmalle1_write }, - { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1, - .writefn = tlbi_aa64_vmalle1_write }, - { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1is_write }, - { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1is_write }, - { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1_write }, - { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1_write }, - { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1_write }, - { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, -#ifndef CONFIG_USER_ONLY - /* 64 bit address translation operations */ - { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E1R, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, - { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E1W, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, - { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E0R, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, - { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E0W, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, - { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .accessfn = at_e012_access, .writefn = ats_write64 }, - { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .accessfn = at_e012_access, .writefn = ats_write64 }, - { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .accessfn = at_e012_access, .writefn = ats_write64 }, - { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .accessfn = at_e012_access, .writefn = ats_write64 }, - /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ - { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .writefn = ats_write64 }, - { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .writefn = ats_write64 }, { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, @@ -5819,43 +3545,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fgt = FGT_PAR_EL1, .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), .writefn = par_write }, -#endif - /* TLB invalidate last level of translation table walk */ - { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimva_is_write }, - { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimvaa_is_write }, - { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimvaa_write }, - { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_write }, - { .name = "TLBIMVALHIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_is_write }, - { .name = "TLBIIPAS2", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2_hyp_write }, - { .name = "TLBIIPAS2IS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2is_hyp_write }, - { .name = "TLBIIPAS2L", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2_hyp_write }, - { .name = "TLBIIPAS2LIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2is_hyp_write }, /* 32 bit cache operations */ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab }, @@ -6038,6 +3727,11 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) /* Clear RES0 bits. */ value &= valid_mask; + /* RW is RAO/WI if EL1 is AArch64 only */ + if (!cpu_isar_feature(aa64_aa32_el1, cpu)) { + value |= HCR_RW; + } + /* * These bits change the MMU setup: * HCR_VM enables stage 2 translation @@ -6095,6 +3789,12 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); } +static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* hcr_write will set the RES1 bits on an AArch64-only CPU */ + hcr_write(env, ri, 0); +} + /* * Return the effective value of HCR_EL2, at the given security state. * Bits that are not included here: @@ -6216,6 +3916,14 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, if (cpu_isar_feature(aa64_nmi, cpu)) { valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; } + /* FEAT_CMOW adds CMOW */ + if (cpu_isar_feature(aa64_cmow, cpu)) { + valid_mask |= HCRX_CMOW; + } + /* FEAT_XS adds FGTnXS, FnXS */ + if (cpu_isar_feature(aa64_xs, cpu)) { + valid_mask |= HCRX_FGTNXS | HCRX_FNXS; + } /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; @@ -6322,6 +4030,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .nv2_redirect_offset = 0x78, + .resetfn = hcr_reset, .writefn = hcr_write, .raw_writefn = raw_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, @@ -6437,78 +4146,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, - { .name = "TLBIALLNSNH", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_nsnh_write }, - { .name = "TLBIALLNSNHIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_nsnh_is_write }, - { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_hyp_write }, - { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_hyp_is_write }, - { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_write }, - { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_is_write }, - { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2_write }, - { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2_write }, - { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2_write }, - { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2is_write }, - { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, #ifndef CONFIG_USER_ONLY - /* - * Unlike the other EL2-related AT operations, these must - * UNDEF from EL3 if EL2 is not implemented, which is why we - * define them here rather than with the rest of the AT ops. - */ - { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, - .access = PL2_W, .accessfn = at_s1e2_access, - .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = ats_write64 }, - { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, - .access = PL2_W, .accessfn = at_s1e2_access, - .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = ats_write64 }, - /* - * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE - * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 - * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose - * to behave as if SCR.NS was 1. - */ - { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, - .access = PL2_W, - .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, - { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, - .access = PL2_W, - .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, /* @@ -6581,7 +4219,7 @@ static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { return CP_ACCESS_OK; } - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } static const ARMCPRegInfo el2_sec_cp_reginfo[] = { @@ -6595,6 +4233,56 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { .access = PL2_RW, .accessfn = sel2_access, .nv2_redirect_offset = 0x48, .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, +#ifndef CONFIG_USER_ONLY + /* Secure EL2 Physical Timer */ + { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .readfn = gt_sec_pel2_tval_read, + .writefn = gt_sec_pel2_tval_write, + .resetfn = gt_sec_pel2_timer_reset, + }, + { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl), + .resetvalue = 0, + .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval), + .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write, + }, + /* Secure EL2 Virtual Timer */ + { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .readfn = gt_sec_vel2_tval_read, + .writefn = gt_sec_vel2_tval_write, + .resetfn = gt_sec_vel2_timer_reset, + }, + { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl), + .resetvalue = 0, + .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval), + .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write, + }, +#endif }; static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -6617,7 +4305,7 @@ static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, if (isread) { return CP_ACCESS_OK; } - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } static const ARMCPRegInfo el3_cp_reginfo[] = { @@ -6693,30 +4381,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3is_write }, - { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3_write }, - { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3_write }, - { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3_write }, }; #ifndef CONFIG_USER_ONLY @@ -6729,7 +4393,7 @@ static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -6827,7 +4491,7 @@ static CPAccessResult el2_e2h_e12_access(CPUARMState *env, } /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } if (ri->orig_accessfn) { return ri->orig_accessfn(env, ri->opaque, isread); @@ -7004,7 +4668,7 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (hcr & HCR_TID2) { return CP_ACCESS_TRAP_EL2; @@ -7034,7 +4698,7 @@ static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri, if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) { return CP_ACCESS_TRAP_EL2; } - if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) { + if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; @@ -7294,7 +4958,6 @@ static const ARMCPRegInfo zcr_reginfo[] = { .writefn = zcr_write, .raw_writefn = raw_write }, }; -#ifdef TARGET_AARCH64 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -7303,7 +4966,7 @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, if (el == 0) { uint64_t sctlr = arm_sctlr(env, el); if (!(sctlr & SCTLR_EnTP2)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } /* TODO: FEAT_FGT */ @@ -7344,7 +5007,7 @@ static void arm_reset_sve_state(CPUARMState *env) memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); /* Recall that FFR is stored as pregs[16]. */ memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); - vfp_set_fpcr(env, 0x0800009f); + vfp_set_fpsr(env, 0x0800009f); } void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask) @@ -7369,7 +5032,7 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask) * when disabled either. */ if (change & new & R_SVCR_ZA_MASK) { - memset(env->zarray, 0, sizeof(env->zarray)); + memset(&env->za_state, 0, sizeof(env->za_state)); } if (tcg_enabled()) { @@ -7388,10 +5051,14 @@ static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri, { int cur_el = arm_current_el(env); int old_len = sve_vqm1_for_el(env, cur_el); + uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK; int new_len; QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1); - value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK; + if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) { + valid_mask |= R_SMCR_EZT0_MASK; + } + value &= valid_mask; raw_write(env, ri, value); /* @@ -7459,14 +5126,6 @@ static const ARMCPRegInfo sme_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, }; -static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush(cs); -} - static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -7484,14 +5143,6 @@ static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri) env_archcpu(env)->reset_l0gptsz); } -static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - static const ARMCPRegInfo rme_reginfo[] = { { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6, @@ -7503,28 +5154,6 @@ static const ARMCPRegInfo rme_reginfo[] = { { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) }, - { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paall_write }, - { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, - /* - * QEMU does not have a way to invalidate by physical address, thus - * invalidating a range of physical addresses is accomplished by - * flushing all tlb entries in the outer shareable domain, - * just like PAALLOS. - */ - { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, - { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1, .access = PL3_W, .type = ARM_CP_NOP }, @@ -7566,106 +5195,6 @@ static const ARMCPRegInfo nmi_reginfo[] = { .writefn = aa64_allint_write, .readfn = aa64_allint_read, .resetfn = arm_cp_reset_ignore }, }; -#endif /* TARGET_AARCH64 */ - -static void define_pmu_regs(ARMCPU *cpu) -{ - /* - * v7 performance monitor control register: same implementor - * field as main ID register, and we implement four counters in - * addition to the cycle count register. - */ - unsigned int i, pmcrn = pmu_num_counters(&cpu->env); - ARMCPRegInfo pmcr = { - .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, - .access = PL0_RW, - .fgt = FGT_PMCR_EL0, - .type = ARM_CP_IO | ARM_CP_ALIAS, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, - .readfn = pmcr_read, .raw_readfn = raw_read, - .writefn = pmcr_write, .raw_writefn = raw_write, - }; - ARMCPRegInfo pmcr64 = { - .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, - .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMCR_EL0, - .type = ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), - .resetvalue = cpu->isar.reset_pmcr_el0, - .readfn = pmcr_read, .raw_readfn = raw_read, - .writefn = pmcr_write, .raw_writefn = raw_write, - }; - - define_one_arm_cp_reg(cpu, &pmcr); - define_one_arm_cp_reg(cpu, &pmcr64); - for (i = 0; i < pmcrn; i++) { - char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); - char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); - char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); - char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); - ARMCPRegInfo pmev_regs[] = { - { .name = pmevcntr_name, .cp = 15, .crn = 14, - .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, - .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, - .fgt = FGT_PMEVCNTRN_EL0, - .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, - .accessfn = pmreg_access_xevcntr }, - { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), - .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, - .type = ARM_CP_IO, - .fgt = FGT_PMEVCNTRN_EL0, - .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, - .raw_readfn = pmevcntr_rawread, - .raw_writefn = pmevcntr_rawwrite }, - { .name = pmevtyper_name, .cp = 15, .crn = 14, - .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, - .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, - .fgt = FGT_PMEVTYPERN_EL0, - .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, - .accessfn = pmreg_access }, - { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), - .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, - .fgt = FGT_PMEVTYPERN_EL0, - .type = ARM_CP_IO, - .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, - .raw_writefn = pmevtyper_rawwrite }, - }; - define_arm_cp_regs(cpu, pmev_regs); - g_free(pmevcntr_name); - g_free(pmevcntr_el0_name); - g_free(pmevtyper_name); - g_free(pmevtyper_el0_name); - } - if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { - ARMCPRegInfo v81_pmu_regs[] = { - { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = extract64(cpu->pmceid0, 32, 32) }, - { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = extract64(cpu->pmceid1, 32, 32) }, - }; - define_arm_cp_regs(cpu, v81_pmu_regs); - } - if (cpu_isar_feature(any_pmuv3p4, cpu)) { - static const ARMCPRegInfo v84_pmmir = { - .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, - .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMMIR_EL1, - .resetvalue = 0 - }; - define_one_arm_cp_reg(cpu, &v84_pmmir); - } -} #ifndef CONFIG_USER_ONLY /* @@ -7677,7 +5206,7 @@ static void define_pmu_regs(ARMCPU *cpu) static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr1 = cpu->isar.id_pfr1; + uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1); if (env->gicv3state) { pfr1 |= 1 << 28; @@ -7688,7 +5217,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr0 = cpu->isar.id_aa64pfr0; + uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0); if (env->gicv3state) { pfr0 |= 1 << 24; @@ -7719,8 +5248,8 @@ static CPAccessResult access_lor_other(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_is_secure_below_el3(env)) { - /* Access denied in secure mode. */ - return CP_ACCESS_TRAP; + /* UNDEF if SCR_EL3.NS == 0 */ + return CP_ACCESS_UNDEFINED; } return access_lor_ns(env, ri, isread); } @@ -7758,7 +5287,6 @@ static const ARMCPRegInfo lor_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, }; -#ifdef TARGET_AARCH64 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -7830,210 +5358,6 @@ static const ARMCPRegInfo pauth_reginfo[] = { .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, }; -static const ARMCPRegInfo tlbirange_reginfo[] = { - { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1is_write }, - { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1is_write }, - { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1_write }, - { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1_write }, - { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2_write }, - { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2_write }, - { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3_write }, - { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3_write }, -}; - -static const ARMCPRegInfo tlbios_reginfo[] = { - { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1OS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, - .fgt = FGT_TLBIVAE1OS, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1OS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2is_write }, - { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3is_write }, - { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, -}; - static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) { Error *err = NULL; @@ -8345,7 +5669,7 @@ static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, if (hcr & HCR_TGE) { return CP_ACCESS_TRAP_EL2; } - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { return CP_ACCESS_TRAP_EL2; @@ -8455,8 +5779,6 @@ static const ARMCPRegInfo nv2_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) }, }; -#endif /* TARGET_AARCH64 */ - static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -8465,7 +5787,7 @@ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, if (el == 0) { uint64_t sctlr = arm_sctlr(env, el); if (!(sctlr & SCTLR_EnRCTX)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } else if (el == 1) { uint64_t hcr = arm_hcr_el2_eff(env); @@ -8652,32 +5974,6 @@ static const ARMCPRegInfo vhe_reginfo[] = { #endif }; -#ifndef CONFIG_USER_ONLY -static const ARMCPRegInfo ats1e1_reginfo[] = { - { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E1RP, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, - { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .fgt = FGT_ATS1E1WP, - .accessfn = at_s1e01_access, .writefn = ats_write64 }, -}; - -static const ARMCPRegInfo ats1cp_reginfo[] = { - { .name = "ATS1CPRP", - .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .writefn = ats_write }, - { .name = "ATS1CPWP", - .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, - .writefn = ats_write }, -}; -#endif - /* * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field @@ -8702,6 +5998,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ CPUARMState *env = &cpu->env; + ARMISARegisters *isar = &cpu->isar; + if (arm_feature(env, ARM_FEATURE_M)) { /* M profile has no coprocessor registers */ return; @@ -8716,6 +6014,13 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, not_v8_cp_reginfo); } +#ifndef CONFIG_USER_ONLY + if (tcg_enabled()) { + define_tlb_insn_regs(cpu); + define_at_insn_regs(cpu); + } +#endif + if (arm_feature(env, ARM_FEATURE_V6)) { /* The ID registers all have impdef reset values */ ARMCPRegInfo v6_idregs[] = { @@ -8723,7 +6028,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_pfr0 }, + .resetvalue = GET_IDREG(isar, ID_PFR0)}, /* * ID_PFR1 is not a plain ARM_CP_CONST because we don't know * the value of the GIC field until after we define these regs. @@ -8734,7 +6039,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .accessfn = access_aa32_tid3, #ifdef CONFIG_USER_ONLY .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_pfr1, + .resetvalue = GET_IDREG(isar, ID_PFR1), #else .type = ARM_CP_NO_RAW, .accessfn = access_aa32_tid3, @@ -8746,72 +6051,72 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_dfr0 }, + .resetvalue = GET_IDREG(isar, ID_DFR0)}, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_afr0 }, + .resetvalue = GET_IDREG(isar, ID_AFR0)}, { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr0 }, + .resetvalue = GET_IDREG(isar, ID_MMFR0)}, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr1 }, + .resetvalue = GET_IDREG(isar, ID_MMFR1)}, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr2 }, + .resetvalue = GET_IDREG(isar, ID_MMFR2)}, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr3 }, + .resetvalue = GET_IDREG(isar, ID_MMFR3)}, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar0 }, + .resetvalue = GET_IDREG(isar, ID_ISAR0)}, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar1 }, + .resetvalue = GET_IDREG(isar, ID_ISAR1)}, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar2 }, + .resetvalue = GET_IDREG(isar, ID_ISAR2)}, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar3 }, + .resetvalue = GET_IDREG(isar, ID_ISAR3) }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar4 }, + .resetvalue = GET_IDREG(isar, ID_ISAR4) }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar5 }, + .resetvalue = GET_IDREG(isar, ID_ISAR5) }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr4 }, + .resetvalue = GET_IDREG(isar, ID_MMFR4)}, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar6 }, + .resetvalue = GET_IDREG(isar, ID_ISAR6) }, }; define_arm_cp_regs(cpu, v6_idregs); define_arm_cp_regs(cpu, v6_cp_reginfo); @@ -8821,13 +6126,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_V6K)) { define_arm_cp_regs(cpu, v6k_cp_reginfo); } - if (arm_feature(env, ARM_FEATURE_V7MP) && - !arm_feature(env, ARM_FEATURE_PMSA)) { - define_arm_cp_regs(cpu, v7mp_cp_reginfo); - } - if (arm_feature(env, ARM_FEATURE_V7VE)) { - define_arm_cp_regs(cpu, pmovsset_cp_reginfo); - } if (arm_feature(env, ARM_FEATURE_V7)) { ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, @@ -8835,12 +6133,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_tid4, .fgt = FGT_CLIDR_EL1, - .resetvalue = cpu->clidr + .resetvalue = GET_IDREG(isar, CLIDR) }; define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); define_debug_regs(cpu); - define_pmu_regs(cpu); } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } @@ -8866,7 +6163,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, #ifdef CONFIG_USER_ONLY .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64pfr0 + .resetvalue = GET_IDREG(isar, ID_AA64PFR0) #else .type = ARM_CP_NO_RAW, .accessfn = access_aa64_tid3, @@ -8878,7 +6175,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64pfr1}, + .resetvalue = GET_IDREG(isar, ID_AA64PFR1)}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -8893,12 +6190,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64zfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)}, { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64smfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)}, { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, @@ -8913,12 +6210,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64DFR0) }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr1 }, + .resetvalue = GET_IDREG(isar, ID_AA64DFR1) }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -8933,12 +6230,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->id_aa64afr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64AFR0) }, { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->id_aa64afr1 }, + .resetvalue = GET_IDREG(isar, ID_AA64AFR1) }, { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, @@ -8953,17 +6250,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar0 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)}, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar1 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)}, { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar2 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)}, { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -8993,22 +6290,22 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)}, { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr1 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) }, { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr2 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) }, { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr3 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) }, { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, @@ -9080,42 +6377,22 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_pfr2 }, + .resetvalue = GET_IDREG(isar, ID_PFR2)}, { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_dfr1 }, + .resetvalue = GET_IDREG(isar, ID_DFR1)}, { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_mmfr5 }, + .resetvalue = GET_IDREG(isar, ID_MMFR5)}, { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = 0 }, - { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = extract64(cpu->pmceid0, 0, 32) }, - { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = cpu->pmceid0 }, - { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = extract64(cpu->pmceid1, 0, 32) }, - { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .fgt = FGT_PMCEIDN_EL0, - .resetvalue = cpu->pmceid1 }, }; #ifdef CONFIG_USER_ONLY static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { @@ -9450,12 +6727,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), offsetoflow32(CPUARMState, cp15.par_ns) }, .writefn = par_write}, -#ifndef CONFIG_USER_ONLY - /* This underdecoding is safe because the reginfo is NO_RAW. */ - { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, - .access = PL1_W, .accessfn = ats_access, - .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, -#endif }; /* @@ -9861,14 +7132,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_pan, cpu)) { define_one_arm_cp_reg(cpu, &pan_reginfo); } -#ifndef CONFIG_USER_ONLY - if (cpu_isar_feature(aa64_ats1e1, cpu)) { - define_arm_cp_regs(cpu, ats1e1_reginfo); - } - if (cpu_isar_feature(aa32_ats1e1, cpu)) { - define_arm_cp_regs(cpu, ats1cp_reginfo); - } -#endif if (cpu_isar_feature(aa64_uao, cpu)) { define_one_arm_cp_reg(cpu, &uao_reginfo); } @@ -9899,7 +7162,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo); } -#ifdef TARGET_AARCH64 if (cpu_isar_feature(aa64_sme, cpu)) { define_arm_cp_regs(cpu, sme_reginfo); } @@ -9909,12 +7171,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } - if (cpu_isar_feature(aa64_tlbirange, cpu)) { - define_arm_cp_regs(cpu, tlbirange_reginfo); - } - if (cpu_isar_feature(aa64_tlbios, cpu)) { - define_arm_cp_regs(cpu, tlbios_reginfo); - } /* Data Cache clean instructions up to PoP */ if (cpu_isar_feature(aa64_dcpop, cpu)) { define_one_arm_cp_reg(cpu, dcpop_reg); @@ -9966,7 +7222,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_nmi, cpu)) { define_arm_cp_regs(cpu, nmi_reginfo); } -#endif if (cpu_isar_feature(any_predinv, cpu)) { define_arm_cp_regs(cpu, predinv_reginfo); @@ -9976,6 +7231,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, ccsidr2_reginfo); } + define_pm_cpregs(cpu); + #ifndef CONFIG_USER_ONLY /* * Register redirections and aliases must be done last, @@ -10327,6 +7584,31 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, if (r->state != state && r->state != ARM_CP_STATE_BOTH) { continue; } + if ((r->type & ARM_CP_ADD_TLBI_NXS) && + cpu_isar_feature(aa64_xs, cpu)) { + /* + * This is a TLBI insn which has an NXS variant. The + * NXS variant is at the same encoding except that + * crn is +1, and has the same behaviour except for + * fine-grained trapping. Add the NXS insn here and + * then fall through to add the normal register. + * add_cpreg_to_hashtable() copies the cpreg struct + * and name that it is passed, so it's OK to use + * a local struct here. + */ + ARMCPRegInfo nxs_ri = *r; + g_autofree char *name = g_strdup_printf("%sNXS", r->name); + + assert(state == ARM_CP_STATE_AA64); + assert(nxs_ri.crn < 0xf); + nxs_ri.crn++; + if (nxs_ri.fgt) { + nxs_ri.fgt |= R_FGT_NXS_MASK; + } + add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state, + ARM_CP_SECSTATE_NS, + crm, opc1, opc2, name); + } if (state == ARM_CP_STATE_AA32) { /* * Under AArch32 CP registers can be common @@ -10765,7 +8047,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint64_t hcr_el2; if (arm_feature(env, ARM_FEATURE_EL3)) { - rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); + rw = arm_scr_rw_eff(env); } else { /* * Either EL2 is the highest EL (and so the EL2 register width @@ -10840,6 +8122,7 @@ void arm_log_exception(CPUState *cs) [EXCP_NMI] = "NMI", [EXCP_VINMI] = "Virtual IRQ NMI", [EXCP_VFNMI] = "Virtual FIQ NMI", + [EXCP_MON_TRAP] = "Monitor Trap", }; if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { @@ -11406,6 +8689,16 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) mask = CPSR_A | CPSR_I | CPSR_F; offset = 0; break; + case EXCP_MON_TRAP: + new_mode = ARM_CPU_MODE_MON; + addr = 0x04; + mask = CPSR_A | CPSR_I | CPSR_F; + if (env->thumb) { + offset = 2; + } else { + offset = 4; + } + break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ @@ -11539,7 +8832,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; - target_ulong addr = env->cp15.vbar_el[new_el]; + vaddr addr = env->cp15.vbar_el[new_el]; unsigned int new_mode = aarch64_pstate_mode(new_el, true); unsigned int old_mode; unsigned int cur_el = arm_current_el(env); @@ -11563,7 +8856,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) switch (new_el) { case 3: - is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; + is_aa64 = arm_scr_rw_eff(env); break; case 2: hcr = arm_hcr_el2_eff(env); @@ -11861,10 +9154,20 @@ void arm_cpu_do_interrupt(CPUState *cs) uint64_t arm_sctlr(CPUARMState *env, int el) { - /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ + /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */ if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + el = 2; + break; + case ARMMMUIdx_E30_0: + el = 3; + break; + default: + el = 1; + break; + } } return env->cp15.sctlr_el[el]; } @@ -12128,289 +9431,6 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, }; } -/* - * Note that signed overflow is undefined in C. The following routines are - * careful to use unsigned types where modulo arithmetic is required. - * Failure to do so _will_ break on newer gcc. - */ - -/* Signed saturating arithmetic. */ - -/* Perform 16-bit signed saturating addition. */ -static inline uint16_t add16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a + b; - if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating addition. */ -static inline uint8_t add8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a + b; - if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -/* Perform 16-bit signed saturating subtraction. */ -static inline uint16_t sub16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a - b; - if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating subtraction. */ -static inline uint8_t sub8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a - b; - if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); -#define PFX q - -#include "op_addsub.h" - -/* Unsigned saturating arithmetic. */ -static inline uint16_t add16_usat(uint16_t a, uint16_t b) -{ - uint16_t res; - res = a + b; - if (res < a) { - res = 0xffff; - } - return res; -} - -static inline uint16_t sub16_usat(uint16_t a, uint16_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -static inline uint8_t add8_usat(uint8_t a, uint8_t b) -{ - uint8_t res; - res = a + b; - if (res < a) { - res = 0xff; - } - return res; -} - -static inline uint8_t sub8_usat(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); -#define PFX uq - -#include "op_addsub.h" - -/* Signed modulo arithmetic. */ -#define SARITH16(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ - RESULT(sum, n, 16); \ - if (sum >= 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SARITH8(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ - RESULT(sum, n, 8); \ - if (sum >= 0) \ - ge |= 1 << n; \ - } while (0) - - -#define ADD16(a, b, n) SARITH16(a, b, n, +) -#define SUB16(a, b, n) SARITH16(a, b, n, -) -#define ADD8(a, b, n) SARITH8(a, b, n, +) -#define SUB8(a, b, n) SARITH8(a, b, n, -) -#define PFX s -#define ARITH_GE - -#include "op_addsub.h" - -/* Unsigned modulo arithmetic. */ -#define ADD16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 1) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define ADD8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 1) \ - ge |= 1 << n; \ - } while (0) - -#define SUB16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SUB8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 0) \ - ge |= 1 << n; \ - } while (0) - -#define PFX u -#define ARITH_GE - -#include "op_addsub.h" - -/* Halved signed arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) -#define PFX sh - -#include "op_addsub.h" - -/* Halved unsigned arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define PFX uh - -#include "op_addsub.h" - -static inline uint8_t do_usad(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return b - a; - } -} - -/* Unsigned sum of absolute byte differences. */ -uint32_t HELPER(usad8)(uint32_t a, uint32_t b) -{ - uint32_t sum; - sum = do_usad(a, b); - sum += do_usad(a >> 8, b >> 8); - sum += do_usad(a >> 16, b >> 16); - sum += do_usad(a >> 24, b >> 24); - return sum; -} - -/* For ARMv6 SEL instruction. */ -uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) -{ - uint32_t mask; - - mask = 0; - if (flags & 1) { - mask |= 0xff; - } - if (flags & 2) { - mask |= 0xff00; - } - if (flags & 4) { - mask |= 0xff0000; - } - if (flags & 8) { - mask |= 0xff000000; - } - return (a & mask) | (b & ~mask); -} - -/* - * CRC helpers. - * The upper bytes of val (above the number specified by 'bytes') must have - * been zeroed out by the caller. - */ -uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* zlib crc32 converts the accumulator and output to one's complement. */ - return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; -} - -uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* Linux crc32c converts the output to one's complement. */ - return crc32c(acc, buf, bytes) ^ 0xffffffff; -} /* * Return the exception level to which FP-disabled exceptions should @@ -12532,6 +9552,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: @@ -12541,6 +9562,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) case ARMMMUIdx_E20_2_PAN: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_3_PAN: return 3; default: g_assert_not_reached(); @@ -12569,6 +9591,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { idx = ARMMMUIdx_E20_0; + } else if (arm_is_secure_below_el3(env) && + !arm_el_is_aa64(env, 3)) { + idx = ARMMMUIdx_E30_0; } else { idx = ARMMMUIdx_E10_0; } @@ -12593,6 +9618,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) } break; case 3: + if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) { + return ARMMMUIdx_E30_3_PAN; + } return ARMMMUIdx_E3; default: g_assert_not_reached(); @@ -12606,116 +9634,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env) return arm_mmu_idx_el(env, arm_current_el(env)); } -static bool mve_no_pred(CPUARMState *env) -{ - /* - * Return true if there is definitely no predication of MVE - * instructions by VPR or LTPSIZE. (Returning false even if there - * isn't any predication is OK; generated code will just be - * a little worse.) - * If the CPU does not implement MVE then this TB flag is always 0. - * - * NOTE: if you change this logic, the "recalculate s->mve_no_pred" - * logic in gen_update_fp_context() needs to be updated to match. - * - * We do not include the effect of the ECI bits here -- they are - * tracked in other TB flags. This simplifies the logic for - * "when did we emit code that changes the MVE_NO_PRED TB flag - * and thus need to end the TB?". - */ - if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { - return false; - } - if (env->v7m.vpr) { - return false; - } - if (env->v7m.ltpsize < 4) { - return false; - } - return true; -} - -void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - CPUARMTBFlags flags; - - assert_hflags_rebuild_correctly(env); - flags = env->hflags; - - if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { - *pc = env->pc; - if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { - DP_TBFLAG_A64(flags, BTYPE, env->btype); - } - } else { - *pc = env->regs[15]; - - if (arm_feature(env, ARM_FEATURE_M)) { - if (arm_feature(env, ARM_FEATURE_M_SECURITY) && - FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) - != env->v7m.secure) { - DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); - } - - if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && - (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || - (env->v7m.secure && - !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { - /* - * ASPEN is set, but FPCA/SFPA indicate that there is no - * active FP context; we must create a new FP context before - * executing any FP insn. - */ - DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); - } - - bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; - if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { - DP_TBFLAG_M32(flags, LSPACT, 1); - } - - if (mve_no_pred(env)) { - DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); - } - } else { - /* - * Note that XSCALE_CPAR shares bits with VECSTRIDE. - * Note that VECLEN+VECSTRIDE are RES0 for M-profile. - */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); - } else { - DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); - DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); - } - if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { - DP_TBFLAG_A32(flags, VFPEN, 1); - } - } - - DP_TBFLAG_AM32(flags, THUMB, env->thumb); - DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); - } - - /* - * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine - * states defined in the ARM ARM for software singlestep: - * SS_ACTIVE PSTATE.SS State - * 0 x Inactive (the TB flag for SS is always 0) - * 1 0 Active-pending - * 1 1 Active-not-pending - * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. - */ - if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { - DP_TBFLAG_ANY(flags, PSTATE__SS, 1); - } - - *pflags = flags.flags; - *cs_base = flags.flags2; -} - -#ifdef TARGET_AARCH64 /* * The manual says that when SVE is enabled and VQ is widened the * implementation is allowed to zero the previously inaccessible @@ -12830,7 +9748,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, aarch64_sve_narrow_vq(env, new_len + 1); } } -#endif #ifndef CONFIG_USER_ONLY ARMSecuritySpace arm_security_space(CPUARMState *env) diff --git a/target/arm/helper.h b/target/arm/helper.h index 970d059dec5..f340a49a28a 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -1,1103 +1,6 @@ -DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32) -DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32) +/* SPDX-License-Identifier: GPL-2.0-or-later */ -DEF_HELPER_3(add_setq, i32, env, i32, i32) -DEF_HELPER_3(add_saturate, i32, env, i32, i32) -DEF_HELPER_3(sub_saturate, i32, env, i32, i32) -DEF_HELPER_3(add_usaturate, i32, env, i32, i32) -DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) -DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32) -DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32) -DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) - -#define PAS_OP(pfx) \ - DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \ - DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr) - -PAS_OP(s) -PAS_OP(u) -#undef PAS_OP - -#define PAS_OP(pfx) \ - DEF_HELPER_2(pfx ## add8, i32, i32, i32) \ - DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \ - DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \ - DEF_HELPER_2(pfx ## add16, i32, i32, i32) \ - DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \ - DEF_HELPER_2(pfx ## subaddx, i32, i32, i32) -PAS_OP(q) -PAS_OP(sh) -PAS_OP(uq) -PAS_OP(uh) -#undef PAS_OP - -DEF_HELPER_3(ssat, i32, env, i32, i32) -DEF_HELPER_3(usat, i32, env, i32, i32) -DEF_HELPER_3(ssat16, i32, env, i32, i32) -DEF_HELPER_3(usat16, i32, env, i32, i32) - -DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) - -DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, - i32, i32, i32, i32) -DEF_HELPER_2(exception_internal, noreturn, env, i32) -DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32) -DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32) -DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32) -DEF_HELPER_2(exception_swstep, noreturn, env, i32) -DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl) -DEF_HELPER_1(setend, void, env) -DEF_HELPER_2(wfi, void, env, i32) -DEF_HELPER_1(wfe, void, env) -DEF_HELPER_2(wfit, void, env, i64) -DEF_HELPER_1(yield, void, env) -DEF_HELPER_1(pre_hvc, void, env) -DEF_HELPER_2(pre_smc, void, env, i32) -DEF_HELPER_1(vesb, void, env) - -DEF_HELPER_3(cpsr_write, void, env, i32, i32) -DEF_HELPER_2(cpsr_write_eret, void, env, i32) -DEF_HELPER_1(cpsr_read, i32, env) - -DEF_HELPER_3(v7m_msr, void, env, i32, i32) -DEF_HELPER_2(v7m_mrs, i32, env, i32) - -DEF_HELPER_2(v7m_bxns, void, env, i32) -DEF_HELPER_2(v7m_blxns, void, env, i32) - -DEF_HELPER_3(v7m_tt, i32, env, i32, i32) - -DEF_HELPER_1(v7m_preserve_fp_state, void, env) - -DEF_HELPER_2(v7m_vlstm, void, env, i32) -DEF_HELPER_2(v7m_vlldm, void, env, i32) - -DEF_HELPER_2(v8m_stackcheck, void, env, i32) - -DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32) - -DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32) -DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32) -DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_3(set_cp_reg, void, env, cptr, i32) -DEF_HELPER_2(get_cp_reg, i32, env, cptr) -DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64) -DEF_HELPER_2(get_cp_reg64, i64, env, cptr) - -DEF_HELPER_2(get_r13_banked, i32, env, i32) -DEF_HELPER_3(set_r13_banked, void, env, i32, i32) - -DEF_HELPER_3(mrs_banked, i32, env, i32, i32) -DEF_HELPER_4(msr_banked, void, env, i32, i32, i32) - -DEF_HELPER_2(get_user_reg, i32, env, i32) -DEF_HELPER_3(set_user_reg, void, env, i32, i32) - -DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int) -DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env) -DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int) -DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int) - -DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32) - -DEF_HELPER_1(vfp_get_fpscr, i32, env) -DEF_HELPER_2(vfp_set_fpscr, void, env, i32) - -DEF_HELPER_3(vfp_addh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_subh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_mulh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_divh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_maxh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_minh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr) -DEF_HELPER_3(vfp_minnumh, f16, f16, f16, ptr) -DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr) -DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr) -DEF_HELPER_2(vfp_sqrth, f16, f16, env) -DEF_HELPER_2(vfp_sqrts, f32, f32, env) -DEF_HELPER_2(vfp_sqrtd, f64, f64, env) -DEF_HELPER_3(vfp_cmph, void, f16, f16, env) -DEF_HELPER_3(vfp_cmps, void, f32, f32, env) -DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) -DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env) -DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) -DEF_HELPER_3(vfp_cmped, void, f64, f64, env) - -DEF_HELPER_2(vfp_fcvtds, f64, f32, env) -DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) -DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr) -DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, ptr) - -DEF_HELPER_2(vfp_uitoh, f16, i32, ptr) -DEF_HELPER_2(vfp_uitos, f32, i32, ptr) -DEF_HELPER_2(vfp_uitod, f64, i32, ptr) -DEF_HELPER_2(vfp_sitoh, f16, i32, ptr) -DEF_HELPER_2(vfp_sitos, f32, i32, ptr) -DEF_HELPER_2(vfp_sitod, f64, i32, ptr) - -DEF_HELPER_2(vfp_touih, i32, f16, ptr) -DEF_HELPER_2(vfp_touis, i32, f32, ptr) -DEF_HELPER_2(vfp_touid, i32, f64, ptr) -DEF_HELPER_2(vfp_touizh, i32, f16, ptr) -DEF_HELPER_2(vfp_touizs, i32, f32, ptr) -DEF_HELPER_2(vfp_touizd, i32, f64, ptr) -DEF_HELPER_2(vfp_tosih, s32, f16, ptr) -DEF_HELPER_2(vfp_tosis, s32, f32, ptr) -DEF_HELPER_2(vfp_tosid, s32, f64, ptr) -DEF_HELPER_2(vfp_tosizh, s32, f16, ptr) -DEF_HELPER_2(vfp_tosizs, s32, f32, ptr) -DEF_HELPER_2(vfp_tosizd, s32, f64, ptr) - -DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr) -DEF_HELPER_3(vfp_touqh, i64, f16, i32, ptr) -DEF_HELPER_3(vfp_tosqh, i64, f16, i32, ptr) -DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr) -DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) -DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr) -DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr) -DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr) -DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr) -DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_shtoh, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr) -DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr) - -DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, ptr) -DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, ptr) -DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, ptr) -DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, ptr) - -DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr) - -DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32) -DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32) -DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, ptr, i32) -DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32) - -DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) -DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) -DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr) - -DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr) -DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, ptr) -DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32) -DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32) -DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64) - -DEF_HELPER_3(shl_cc, i32, env, i32, i32) -DEF_HELPER_3(shr_cc, i32, env, i32, i32) -DEF_HELPER_3(sar_cc, i32, env, i32, i32) -DEF_HELPER_3(ror_cc, i32, env, i32, i32) - -DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, ptr) -DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, ptr) -DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr) - -DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env) -DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr) - -DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32) - -/* neon_helper.c */ -DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) -DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) -DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) -DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) -DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) -DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) -DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) -DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) - -DEF_HELPER_2(neon_shl_u16, i32, i32, i32) -DEF_HELPER_2(neon_shl_s16, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) -DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) -DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) -DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) -DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) -DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64) -DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) -DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) -DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_2(neon_add_u8, i32, i32, i32) -DEF_HELPER_2(neon_add_u16, i32, i32, i32) -DEF_HELPER_2(neon_sub_u8, i32, i32, i32) -DEF_HELPER_2(neon_sub_u16, i32, i32, i32) -DEF_HELPER_2(neon_mul_u8, i32, i32, i32) -DEF_HELPER_2(neon_mul_u16, i32, i32, i32) - -DEF_HELPER_2(neon_tst_u8, i32, i32, i32) -DEF_HELPER_2(neon_tst_u16, i32, i32, i32) -DEF_HELPER_2(neon_tst_u32, i32, i32, i32) - -DEF_HELPER_1(neon_clz_u8, i32, i32) -DEF_HELPER_1(neon_clz_u16, i32, i32) -DEF_HELPER_1(neon_cls_s8, i32, i32) -DEF_HELPER_1(neon_cls_s16, i32, i32) -DEF_HELPER_1(neon_cls_s32, i32, i32) -DEF_HELPER_1(neon_cnt_u8, i32, i32) -DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32) - -DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) -DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) -DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32) -DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32) -DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) -DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) -DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32) -DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32) - -DEF_HELPER_1(neon_narrow_u8, i32, i64) -DEF_HELPER_1(neon_narrow_u16, i32, i64) -DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64) -DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64) -DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64) -DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64) -DEF_HELPER_1(neon_narrow_high_u8, i32, i64) -DEF_HELPER_1(neon_narrow_high_u16, i32, i64) -DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) -DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) -DEF_HELPER_1(neon_widen_u8, i64, i32) -DEF_HELPER_1(neon_widen_s8, i64, i32) -DEF_HELPER_1(neon_widen_u16, i64, i32) -DEF_HELPER_1(neon_widen_s16, i64, i32) - -DEF_HELPER_2(neon_addl_u16, i64, i64, i64) -DEF_HELPER_2(neon_addl_u32, i64, i64, i64) -DEF_HELPER_2(neon_paddl_u16, i64, i64, i64) -DEF_HELPER_2(neon_paddl_u32, i64, i64, i64) -DEF_HELPER_2(neon_subl_u16, i64, i64, i64) -DEF_HELPER_2(neon_subl_u32, i64, i64, i64) -DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) -DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) -DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) -DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) -DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) -DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) -DEF_HELPER_2(neon_mull_u8, i64, i32, i32) -DEF_HELPER_2(neon_mull_s8, i64, i32, i32) -DEF_HELPER_2(neon_mull_u16, i64, i32, i32) -DEF_HELPER_2(neon_mull_s16, i64, i32, i32) - -DEF_HELPER_1(neon_negl_u16, i64, i64) -DEF_HELPER_1(neon_negl_u32, i64, i64) - -DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64) -DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32) -DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64) - -DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr) -DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr) -DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr) - -/* iwmmxt_helper.c */ -DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) -DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) - -#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ -DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ -DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ -DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \ - -DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) -DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) - -DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) - -DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) - -DEF_IWMMXT_HELPER_SIZE_ENV(mins) -DEF_IWMMXT_HELPER_SIZE_ENV(minu) -DEF_IWMMXT_HELPER_SIZE_ENV(maxs) -DEF_IWMMXT_HELPER_SIZE_ENV(maxu) - -DEF_IWMMXT_HELPER_SIZE_ENV(subn) -DEF_IWMMXT_HELPER_SIZE_ENV(addn) -DEF_IWMMXT_HELPER_SIZE_ENV(subu) -DEF_IWMMXT_HELPER_SIZE_ENV(addu) -DEF_IWMMXT_HELPER_SIZE_ENV(subs) -DEF_IWMMXT_HELPER_SIZE_ENV(adds) - -DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) - -DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) -DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) - -DEF_HELPER_1(iwmmxt_bcstb, i64, i32) -DEF_HELPER_1(iwmmxt_bcstw, i64, i32) -DEF_HELPER_1(iwmmxt_bcstl, i64, i32) - -DEF_HELPER_1(iwmmxt_addcb, i64, i64) -DEF_HELPER_1(iwmmxt_addcw, i64, i64) -DEF_HELPER_1(iwmmxt_addcl, i64, i64) - -DEF_HELPER_1(iwmmxt_msbb, i32, i64) -DEF_HELPER_1(iwmmxt_msbw, i32, i64) -DEF_HELPER_1(iwmmxt_msbl, i32, i64) - -DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) - -DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) - -DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) - -DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr) -DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr) - -DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) -DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) - -DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_fs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_fu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr) - -DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +#include "tcg/helper.h" #ifdef TARGET_AARCH64 #include "tcg/helper-a64.h" diff --git a/target/arm/hvf-stub.c b/target/arm/hvf-stub.c new file mode 100644 index 00000000000..ff137267a03 --- /dev/null +++ b/target/arm/hvf-stub.c @@ -0,0 +1,20 @@ +/* + * QEMU Hypervisor.framework (HVF) stubs for ARM + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hvf_arm.h" + +uint32_t hvf_arm_get_default_ipa_bit_size(void) +{ + g_assert_not_reached(); +} + +uint32_t hvf_arm_get_max_ipa_bit_size(void) +{ + g_assert_not_reached(); +} diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index ace83671b59..47b0cd3a351 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -11,26 +11,30 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qemu/log.h" -#include "sysemu/runstate.h" -#include "sysemu/hvf.h" -#include "sysemu/hvf_int.h" -#include "sysemu/hw_accel.h" +#include "system/runstate.h" +#include "system/hvf.h" +#include "system/hvf_int.h" +#include "system/hw_accel.h" #include "hvf_arm.h" #include "cpregs.h" +#include "cpu-sysregs.h" #include -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "hw/boards.h" #include "hw/irq.h" #include "qemu/main-loop.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "arm-powerctl.h" #include "target/arm/cpu.h" #include "target/arm/internals.h" #include "target/arm/multiprocessing.h" #include "target/arm/gtimer.h" -#include "trace/trace-target_arm_hvf.h" +#include "trace.h" #include "migration/vmstate.h" #include "gdbstub/enums.h" @@ -182,7 +186,9 @@ void hvf_arm_init_debug(void) #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4) #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) +#define SYSREG_LORC_EL1 SYSREG(3, 0, 10, 4, 3) #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) +#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1) #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) @@ -297,6 +303,8 @@ void hvf_arm_init_debug(void) static void hvf_wfi(CPUState *cpu); +static uint32_t chosen_ipa_bit_size; + typedef struct HVFVTimer { /* Vtimer value during migration and paused state */ uint64_t vtimer_val; @@ -806,9 +814,9 @@ int hvf_put_registers(CPUState *cpu) static void flush_cpu_state(CPUState *cpu) { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { hvf_put_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } } @@ -839,6 +847,19 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt) return val; } +static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar) +{ + uint32_t ipa_size = chosen_ipa_bit_size ? + chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size(); + uint64_t id_aa64mmfr0; + + /* Clamp down the PARange to the IPA size the kernel supports. */ + uint8_t index = round_down_to_parange_index(ipa_size); + id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0); + id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index; + SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0); +} + static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) { ARMISARegisters host_isar = {}; @@ -846,16 +867,16 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) int reg; uint64_t *val; } regs[] = { - { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, - { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, - { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, - { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, - { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, - { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, + { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] }, + { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] }, + { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] }, + { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] }, + { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] }, + { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] }, /* Add ID_AA64ISAR2_EL1 here when HVF supports it */ - { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, - { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, - { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, + { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] }, + { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] }, + { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] }, /* Add ID_AA64MMFR3_EL1 here when HVF supports it */ }; hv_vcpu_t fd; @@ -863,7 +884,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) hv_vcpu_exit_t *exit; int i; - ahcf->dtb_compatible = "arm,arm-v8"; + ahcf->dtb_compatible = "arm,armv8"; ahcf->features = (1ULL << ARM_FEATURE_V8) | (1ULL << ARM_FEATURE_NEON) | (1ULL << ARM_FEATURE_AARCH64) | @@ -882,6 +903,21 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr); r |= hv_vcpu_destroy(fd); + clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar); + + /* + * Disable SME, which is not properly handled by QEMU hvf yet. + * To allow this through we would need to: + * - make sure that the SME state is correctly handled in the + * get_registers/put_registers functions + * - get the SME-specific CPU properties to work with accelerators + * other than TCG + * - fix any assumptions we made that SME implies SVE (since + * on the M4 there is SME but not SVE) + */ + SET_IDREG(&host_isar, ID_AA64PFR1, + GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK); + ahcf->isar = host_isar; /* @@ -897,13 +933,37 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ahcf->reset_sctlr |= 0x00800000; /* Make sure we don't advertise AArch32 support for EL0/EL1 */ - if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) { + if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) { return false; } return r == HV_SUCCESS; } +uint32_t hvf_arm_get_default_ipa_bit_size(void) +{ + uint32_t default_ipa_size; + hv_return_t ret = hv_vm_config_get_default_ipa_size(&default_ipa_size); + assert_hvf_ok(ret); + + return default_ipa_size; +} + +uint32_t hvf_arm_get_max_ipa_bit_size(void) +{ + uint32_t max_ipa_size; + hv_return_t ret = hv_vm_config_get_max_ipa_size(&max_ipa_size); + assert_hvf_ok(ret); + + /* + * We clamp any IPA size we want to back the VM with to a valid PARange + * value so the guest doesn't try and map memory outside of the valid range. + * This logic just clamps the passed in IPA bit size to the first valid + * PARange value <= to it. + */ + return round_down_to_parange_bit_size(max_ipa_size); +} + void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu) { if (!arm_host_cpu_features.dtb_compatible) { @@ -929,6 +989,25 @@ void hvf_arch_vcpu_destroy(CPUState *cpu) { } +hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range) +{ + hv_return_t ret; + hv_vm_config_t config = hv_vm_config_create(); + + ret = hv_vm_config_set_ipa_size(config, pa_range); + if (ret != HV_SUCCESS) { + goto cleanup; + } + chosen_ipa_bit_size = pa_range; + + ret = hv_vm_create(config); + +cleanup: + os_release(config); + + return ret; +} + int hvf_arch_init_vcpu(CPUState *cpu) { ARMCPU *arm_cpu = ARM_CPU(cpu); @@ -992,7 +1071,12 @@ int hvf_arch_init_vcpu(CPUState *cpu) /* We're limited to underlying hardware caps, override internal versions */ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, - &arm_cpu->isar.id_aa64mmfr0); + &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]); + assert_hvf_ok(ret); + + clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, + arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]); assert_hvf_ok(ret); return 0; @@ -1005,13 +1089,13 @@ void hvf_kick_vcpu_thread(CPUState *cpu) } static void hvf_raise_exception(CPUState *cpu, uint32_t excp, - uint32_t syndrome) + uint32_t syndrome, int target_el) { ARMCPU *arm_cpu = ARM_CPU(cpu); CPUARMState *env = &arm_cpu->env; cpu->exception_index = excp; - env->exception.target_el = 1; + env->exception.target_el = target_el; env->exception.syndrome = syndrome; arm_cpu_do_interrupt(cpu); @@ -1180,6 +1264,9 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val) ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg)); if (ri) { + if (!cp_access_ok(1, ri, true)) { + return false; + } if (ri->accessfn) { if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) { return false; @@ -1275,6 +1362,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val) case SYSREG_ICC_IGRPEN0_EL1: case SYSREG_ICC_IGRPEN1_EL1: case SYSREG_ICC_PMR_EL1: + case SYSREG_ICC_RPR_EL1: case SYSREG_ICC_SGI0R_EL1: case SYSREG_ICC_SGI1R_EL1: case SYSREG_ICC_SRE_EL1: @@ -1371,7 +1459,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val) SYSREG_CRN(reg), SYSREG_CRM(reg), SYSREG_OP2(reg)); - hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1); return 1; } @@ -1460,6 +1548,9 @@ static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val) ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg)); if (ri) { + if (!cp_access_ok(1, ri, false)) { + return false; + } if (ri->accessfn) { if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) { return false; @@ -1557,9 +1648,19 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) case SYSREG_OSLAR_EL1: env->cp15.oslsr_el1 = val & 1; return 0; + case SYSREG_CNTP_CTL_EL0: + /* + * Guests should not rely on the physical counter, but macOS emits + * disable writes to it. Let it do so, but ignore the requests. + */ + qemu_log_mask(LOG_UNIMP, "Unsupported write to CNTP_CTL_EL0\n"); + return 0; case SYSREG_OSDLR_EL1: /* Dummy register */ return 0; + case SYSREG_LORC_EL1: + /* Dummy register */ + return 0; case SYSREG_ICC_AP0R0_EL1: case SYSREG_ICC_AP0R1_EL1: case SYSREG_ICC_AP0R2_EL1: @@ -1582,6 +1683,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) case SYSREG_ICC_IGRPEN0_EL1: case SYSREG_ICC_IGRPEN1_EL1: case SYSREG_ICC_PMR_EL1: + case SYSREG_ICC_RPR_EL1: case SYSREG_ICC_SGI0R_EL1: case SYSREG_ICC_SGI1R_EL1: case SYSREG_ICC_SRE_EL1: @@ -1674,7 +1776,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) SYSREG_CRN(reg), SYSREG_CRM(reg), SYSREG_OP2(reg)); - hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1); return 1; } @@ -1825,7 +1927,17 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); bql_unlock(); - assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); + r = hv_vcpu_run(cpu->accel->fd); + bql_lock(); + switch (r) { + case HV_SUCCESS: + break; + case HV_ILLEGAL_GUEST_STATE: + trace_hvf_illegal_guest_state(); + /* fall through */ + default: + g_assert_not_reached(); + } /* handle VMEXIT */ uint64_t exit_reason = hvf_exit->reason; @@ -1833,7 +1945,6 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t ec = syn_get_ec(syndrome); ret = 0; - bql_lock(); switch (exit_reason) { case HV_EXIT_REASON_EXCEPTION: /* This is the main one, handle below. */ @@ -1868,7 +1979,7 @@ int hvf_vcpu_exec(CPUState *cpu) if (!hvf_find_sw_breakpoint(cpu, env->pc)) { /* Re-inject into the guest */ ret = 0; - hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0)); + hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1); } break; } @@ -1899,13 +2010,14 @@ int hvf_vcpu_exec(CPUState *cpu) bool isv = syndrome & ARM_EL_ISV; bool iswrite = (syndrome >> 6) & 1; bool s1ptw = (syndrome >> 7) & 1; + bool sse = (syndrome >> 21) & 1; uint32_t sas = (syndrome >> 22) & 3; uint32_t len = 1 << sas; uint32_t srt = (syndrome >> 16) & 0x1f; uint32_t cm = (syndrome >> 8) & 0x1; uint64_t val = 0; - trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, + trace_hvf_data_abort(hvf_exit->exception.virtual_address, hvf_exit->exception.physical_address, isv, iswrite, s1ptw, len, srt); @@ -1926,6 +2038,9 @@ int hvf_vcpu_exec(CPUState *cpu) address_space_read(&address_space_memory, hvf_exit->exception.physical_address, MEMTXATTRS_UNSPECIFIED, &val, len); + if (sse) { + val = sextract64(val, 0, len * 8); + } hvf_set_reg(cpu, srt, val); } @@ -1969,13 +2084,13 @@ int hvf_vcpu_exec(CPUState *cpu) cpu_synchronize_state(cpu); if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) { if (!hvf_handle_psci_call(cpu)) { - trace_hvf_unknown_hvc(env->xregs[0]); + trace_hvf_unknown_hvc(env->pc, env->xregs[0]); /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ env->xregs[0] = -1; } } else { - trace_hvf_unknown_hvc(env->xregs[0]); - hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + trace_hvf_unknown_hvc(env->pc, env->xregs[0]); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1); } break; case EC_AA64_SMC: @@ -1990,7 +2105,7 @@ int hvf_vcpu_exec(CPUState *cpu) } } else { trace_hvf_unknown_smc(env->xregs[0]); - hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1); } break; default: @@ -2189,28 +2304,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu) return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); } -static void hvf_arch_set_traps(void) +static void hvf_arch_set_traps(CPUState *cpu) { - CPUState *cpu; bool should_enable_traps = false; hv_return_t r = HV_SUCCESS; /* Check whether guest debugging is enabled for at least one vCPU; if it * is, enable exiting the guest on all vCPUs */ - CPU_FOREACH(cpu) { - should_enable_traps |= cpu->accel->guest_debug_enabled; - } - CPU_FOREACH(cpu) { - /* Set whether debug exceptions exit the guest */ - r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd, - should_enable_traps); - assert_hvf_ok(r); + should_enable_traps |= cpu->accel->guest_debug_enabled; + /* Set whether debug exceptions exit the guest */ + r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd, + should_enable_traps); + assert_hvf_ok(r); - /* Set whether accesses to debug registers exit the guest */ - r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd, - should_enable_traps); - assert_hvf_ok(r); - } + /* Set whether accesses to debug registers exit the guest */ + r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd, + should_enable_traps); + assert_hvf_ok(r); } void hvf_arch_update_guest_debug(CPUState *cpu) @@ -2251,7 +2361,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu) deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0); } - hvf_arch_set_traps(); + hvf_arch_set_traps(cpu); } bool hvf_arch_supports_guest_debug(void) diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events index 4fbbe4b45ec..b29a995f3d3 100644 --- a/target/arm/hvf/trace-events +++ b/target/arm/hvf/trace-events @@ -2,12 +2,13 @@ hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" hvf_inject_fiq(void) "injecting FIQ" hvf_inject_irq(void) "injecting IRQ" -hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" +hvf_data_abort(uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64 hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")" -hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64 +hvf_unknown_hvc(uint64_t pc, uint64_t x0) "pc=0x%"PRIx64" unknown HVC! 0x%016"PRIx64 hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64 hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]" -hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x" +hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpuid=0x%x" hvf_vgic_write(const char *name, uint64_t val) "vgic write to %s [val=0x%016"PRIx64"]" hvf_vgic_read(const char *name, uint64_t val) "vgic read from %s [val=0x%016"PRIx64"]" +hvf_illegal_guest_state(void) "HV_ILLEGAL_GUEST_STATE" diff --git a/target/arm/hvf/trace.h b/target/arm/hvf/trace.h new file mode 100644 index 00000000000..04a19c1d752 --- /dev/null +++ b/target/arm/hvf/trace.h @@ -0,0 +1 @@ +#include "trace/trace-target_arm_hvf.h" diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h index e848c1d27d4..ea82f2691df 100644 --- a/target/arm/hvf_arm.h +++ b/target/arm/hvf_arm.h @@ -11,7 +11,7 @@ #ifndef QEMU_HVF_ARM_H #define QEMU_HVF_ARM_H -#include "cpu.h" +#include "target/arm/cpu-qom.h" /** * hvf_arm_init_debug() - initialize guest debug capabilities @@ -22,4 +22,7 @@ void hvf_arm_init_debug(void); void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu); +uint32_t hvf_arm_get_default_ipa_bit_size(void); +uint32_t hvf_arm_get_max_ipa_bit_size(void); + #endif diff --git a/target/arm/hyp_gdbstub.c b/target/arm/hyp_gdbstub.c index f120d55caab..bb5969720ce 100644 --- a/target/arm/hyp_gdbstub.c +++ b/target/arm/hyp_gdbstub.c @@ -54,7 +54,7 @@ GArray *hw_breakpoints, *hw_watchpoints; * here so future PC comparisons will work properly. */ -int insert_hw_breakpoint(target_ulong addr) +int insert_hw_breakpoint(vaddr addr) { HWBreakpoint brk = { .bcr = 0x1, /* BCR E=1, enable */ @@ -80,7 +80,7 @@ int insert_hw_breakpoint(target_ulong addr) * Delete a breakpoint and shuffle any above down */ -int delete_hw_breakpoint(target_ulong pc) +int delete_hw_breakpoint(vaddr pc) { int i; for (i = 0; i < hw_breakpoints->len; i++) { @@ -125,7 +125,7 @@ int delete_hw_breakpoint(target_ulong pc) * need to ensure you mask the address as required and set BAS=0xff */ -int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type) +int insert_hw_watchpoint(vaddr addr, vaddr len, int type) { HWWatchpoint wp = { .wcr = R_DBGWCR_E_MASK, /* E=1, enable */ @@ -158,7 +158,6 @@ int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type) break; default: g_assert_not_reached(); - break; } if (len <= 8) { /* we align the address and set the bits in BAS */ @@ -183,7 +182,7 @@ int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type) return 0; } -bool check_watchpoint_in_range(int i, target_ulong addr) +bool check_watchpoint_in_range(int i, vaddr addr) { HWWatchpoint *wp = get_hw_wp(i); uint64_t addr_top, addr_bottom = wp->wvr; @@ -215,7 +214,7 @@ bool check_watchpoint_in_range(int i, target_ulong addr) * Delete a breakpoint and shuffle any above down */ -int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type) +int delete_hw_watchpoint(vaddr addr, vaddr len, int type) { int i; for (i = 0; i < cur_hw_wps; i++) { @@ -227,7 +226,7 @@ int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type) return -ENOENT; } -bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) +bool find_hw_breakpoint(CPUState *cpu, vaddr pc) { int i; @@ -240,7 +239,7 @@ bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) return false; } -CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr) { int i; diff --git a/target/arm/internals.h b/target/arm/internals.h index 757b1fae925..1b3d0244fd6 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -25,9 +25,13 @@ #ifndef TARGET_ARM_INTERNALS_H #define TARGET_ARM_INTERNALS_H +#include "exec/hwaddr.h" +#include "exec/vaddr.h" #include "exec/breakpoint.h" +#include "accel/tcg/tb-cpu-state.h" #include "hw/registerfields.h" #include "tcg/tcg-gvec-desc.h" +#include "system/memory.h" #include "syndrome.h" #include "cpu-features.h" @@ -350,13 +354,14 @@ static inline int r14_bank_number(int mode) } void arm_cpu_register(const ARMCPUInfo *info); -void aarch64_cpu_register(const ARMCPUInfo *info); void register_cp_regs_for_features(ARMCPU *cpu); void init_cpreg_list(ARMCPU *cpu); void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); +void arm_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void arm_cpu_register_gdb_commands(ARMCPU *cpu); void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, @@ -367,10 +372,12 @@ void arm_restore_state_to_opc(CPUState *cs, const uint64_t *data); #ifdef CONFIG_TCG +TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs); void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); /* Our implementation of TCGCPUOps::cpu_exec_halt */ bool arm_cpu_exec_halt(CPUState *cs); +int arm_cpu_mmu_index(CPUState *cs, bool ifetch); #endif /* CONFIG_TCG */ typedef enum ARMFPRounding { @@ -390,6 +397,141 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) return arm_rmode_to_sf_map[rmode]; } +/* Return the effective value of SCR_EL3.RW */ +static inline bool arm_scr_rw_eff(CPUARMState *env) +{ + /* + * SCR_EL3.RW has an effective value of 1 if: + * - we are NS and EL2 is implemented but doesn't support AArch32 + * - we are S and EL2 is enabled (in which case it must be AArch64) + */ + ARMCPU *cpu = env_archcpu(env); + + if (env->cp15.scr_el3 & SCR_RW) { + return true; + } + if (env->cp15.scr_el3 & SCR_NS) { + return arm_feature(env, ARM_FEATURE_EL2) && + !cpu_isar_feature(aa64_aa32_el2, cpu); + } else { + return env->cp15.scr_el3 & SCR_EEL2; + } +} + +/* Return true if the specified exception level is running in AArch64 state. */ +static inline bool arm_el_is_aa64(CPUARMState *env, int el) +{ + /* + * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, + * and if we're not in EL0 then the state of EL0 isn't well defined.) + */ + assert(el >= 1 && el <= 3); + bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); + + /* + * The highest exception level is always at the maximum supported + * register width, and then lower levels have a register width controlled + * by bits in the SCR or HCR registers. + */ + if (el == 3) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + aa64 = aa64 && arm_scr_rw_eff(env); + } + + if (el == 2) { + return aa64; + } + + if (arm_is_el2_enabled(env)) { + aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); + } + + return aa64; +} + +/* + * Return the current Exception Level (as per ARMv8; note that this differs + * from the ARMv7 Privilege Level). + */ +static inline int arm_current_el(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_is_handler_mode(env) || + !(env->v7m.control[env->v7m.secure] & 1); + } + + if (is_a64(env)) { + return extract32(env->pstate, 2, 2); + } + + switch (env->uncached_cpsr & 0x1f) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_MON: + return 3; + default: + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ + return 3; + } + + return 1; + } +} + +static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, + bool sctlr_b) +{ +#ifdef CONFIG_USER_ONLY + /* + * In system mode, BE32 is modelled in line with the + * architecture (as word-invariant big-endianness), where loads + * and stores are done little endian but from addresses which + * are adjusted by XORing with the appropriate constant. So the + * endianness to use for the raw data access is not affected by + * SCTLR.B. + * In user mode, however, we model BE32 as byte-invariant + * big-endianness (because user-only code cannot tell the + * difference), and so we need to use a data access endianness + * that depends on SCTLR.B. + */ + if (sctlr_b) { + return true; + } +#endif + /* In 32bit endianness is determined by looking at CPSR's E bit */ + return env->uncached_cpsr & CPSR_E; +} + +static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) +{ + return sctlr & (el ? SCTLR_EE : SCTLR_E0E); +} + +/* Return true if the processor is in big-endian mode. */ +static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) +{ + if (!is_a64(env)) { + return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); + } else { + int cur_el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, cur_el); + return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); + } +} + +#ifdef CONFIG_USER_ONLY +static inline bool arm_cpu_bswap_data(CPUARMState *env) +{ + return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); +} +#endif + static inline void aarch64_save_sp(CPUARMState *env, int el) { if (env->pstate & PSTATE_SP) { @@ -436,6 +578,25 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) */ unsigned int arm_pamax(ARMCPU *cpu); +/* + * round_down_to_parange_index + * @bit_size: uint8_t + * + * Rounds down the bit_size supplied to the first supported ARM physical + * address range and returns the index for this. The index is intended to + * be used to set ID_AA64MMFR0_EL1's PARANGE bits. + */ +uint8_t round_down_to_parange_index(uint8_t bit_size); + +/* + * round_down_to_parange_bit_size + * @bit_size: uint8_t + * + * Rounds down the bit_size supplied to the first supported ARM physical + * address range bit size and returns this. + */ +uint8_t round_down_to_parange_bit_size(uint8_t bit_size); + /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, * but depends on TTBCR.EAE for 32 bit. @@ -489,16 +650,12 @@ static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { return false; } -static inline void arm_handle_psci_call(ARMCPU *cpu) -{ - g_assert_not_reached(); -} #else /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ bool arm_is_psci_call(ARMCPU *cpu, int excp_type); +#endif /* Actually handle a PSCI call */ void arm_handle_psci_call(ARMCPU *cpu); -#endif /** * arm_clear_exclusive: clear the exclusive monitor @@ -568,8 +725,8 @@ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; struct ARMMMUFaultInfo { ARMFaultType type; ARMGPCF gpcf; - target_ulong s2addr; - target_ulong paddr; + hwaddr s2addr; + hwaddr paddr; ARMSecuritySpace paddr_space; int level; int domain; @@ -783,9 +940,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra); #else -bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); #endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) @@ -852,7 +1009,16 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu) } } -/* Return true if this address translation regime has two ranges. */ +/* + * Return true if this address translation regime has two ranges. + * Note that this will not return the correct answer for AArch32 + * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is + * never called from a context where EL3 can be AArch32. (The + * correct return value for ARMMMUIdx_E3 would be different for + * that case, so we can't just make the function return the + * correct value anyway; we would need an extra "bool e3_is_aarch32" + * argument which all the current callsites would pass as 'false'.) + */ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) { switch (mmu_idx) { @@ -877,6 +1043,7 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E30_3_PAN: return true; default: return false; @@ -900,10 +1067,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_E2: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: return 3; case ARMMMUIdx_E10_0: case ARMMMUIdx_Stage1_E0: - return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1: @@ -925,7 +1093,9 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { + case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_MUser: case ARMMMUIdx_MSUser: @@ -934,10 +1104,6 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) return true; default: return false; - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - g_assert_not_reached(); } } @@ -1005,7 +1171,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) static inline int arm_num_brps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; } @@ -1019,7 +1185,7 @@ static inline int arm_num_brps(ARMCPU *cpu) static inline int arm_num_wrps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; } @@ -1033,7 +1199,7 @@ static inline int arm_num_wrps(ARMCPU *cpu) static inline int arm_num_ctx_cmps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; } @@ -1394,6 +1560,7 @@ typedef struct GetPhysAddrResult { * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @result: set on translation success. * @fi: set to fault info if the translation fails @@ -1411,8 +1578,8 @@ typedef struct GetPhysAddrResult { * * for PSMAv5 based systems we don't bother to return a full FSR format * value. */ -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, +bool get_phys_addr(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); @@ -1422,6 +1589,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @space: security space for the access * @result: set on translation success. @@ -1430,8 +1598,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, * Similar to get_phys_addr, but use the given security space and don't perform * a Granule Protection Check on the resulting address. */ -bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, - MMUAccessType access_type, +bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -1455,19 +1623,13 @@ FIELD(PREDDESC, OPRSZ, 0, 6) FIELD(PREDDESC, ESZ, 6, 2) FIELD(PREDDESC, DATA, 8, 24) -/* - * The SVE simd_data field, for memory ops, contains either - * rd (5 bits) or a shift count (2 bits). - */ -#define SVE_MTEDESC_SHIFT 5 - /* Bits within a descriptor passed to the helper_mte_check* functions. */ FIELD(MTEDESC, MIDX, 0, 4) FIELD(MTEDESC, TBI, 4, 2) FIELD(MTEDESC, TCMA, 6, 2) FIELD(MTEDESC, WRITE, 8, 1) FIELD(MTEDESC, ALIGN, 9, 3) -FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ +FIELD(MTEDESC, SIZEM1, 12, 32 - 12) /* size - 1 */ bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); @@ -1639,7 +1801,6 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); } -#ifdef TARGET_AARCH64 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); @@ -1657,7 +1818,12 @@ void aarch64_max_tcg_initfn(Object *obj); void aarch64_add_pauth_properties(Object *obj); void aarch64_add_sve_properties(Object *obj); void aarch64_add_sme_properties(Object *obj); -#endif + +/* Return true if the gdbstub is presenting an AArch64 CPU */ +static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu) +{ + return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); +} /* Read the CONTROL register as the MRS instruction would. */ uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); @@ -1697,6 +1863,13 @@ static inline uint64_t pauth_ptr_mask(ARMVAParameters param) /* Add the cpreg definitions for debug related system registers */ void define_debug_regs(ARMCPU *cpu); +/* Add the cpreg definitions for TLBI instructions */ +void define_tlb_insn_regs(ARMCPU *cpu); +/* Add the cpreg definitions for AT instructions */ +void define_at_insn_regs(ARMCPU *cpu); +/* Add the cpreg definitions for PM cpregs */ +void define_pm_cpregs(ARMCPU *cpu); + /* Effective value of MDCR_EL2 */ static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) { @@ -1728,8 +1901,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el) (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); } -void assert_hflags_rebuild_correctly(CPUARMState *env); - /* * Although the ARM implementation of hardware assisted debugging * allows for different breakpoints per-core, the current GDB @@ -1771,20 +1942,43 @@ extern GArray *hw_breakpoints, *hw_watchpoints; #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) -bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); -int insert_hw_breakpoint(target_ulong pc); -int delete_hw_breakpoint(target_ulong pc); +bool find_hw_breakpoint(CPUState *cpu, vaddr pc); +int insert_hw_breakpoint(vaddr pc); +int delete_hw_breakpoint(vaddr pc); -bool check_watchpoint_in_range(int i, target_ulong addr); -CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); -int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); -int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); +bool check_watchpoint_in_range(int i, vaddr addr); +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr); +int insert_hw_watchpoint(vaddr addr, vaddr len, int type); +int delete_hw_watchpoint(vaddr addr, vaddr len, int type); /* Return the current value of the system counter in ticks */ uint64_t gt_get_countervalue(CPUARMState *env); /* * Return the currently applicable offset between the system counter - * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). + * and the counter for the specified timer, as used for direct register + * accesses. */ -uint64_t gt_virt_cnt_offset(CPUARMState *env); +uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); + +/* + * Return mask of ARMMMUIdxBit values corresponding to an "invalidate + * all EL1" scope; this covers stage 1 and stage 2. + */ +int alle1_tlbmask(CPUARMState *env); + +/* Set the float_status behaviour to match the Arm defaults */ +void arm_set_default_fp_behaviours(float_status *s); +/* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ +void arm_set_ah_fp_behaviours(float_status *s); +/* Read the float_status info and return the appropriate FPSR value */ +uint32_t vfp_get_fpsr_from_host(CPUARMState *env); +/* Clear the exception status flags from all float_status fields */ +void vfp_clear_float_status_exc_flags(CPUARMState *env); +/* + * Update float_status fields to handle the bits of the FPCR + * specified by mask changing to the values in val. + */ +void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); +bool arm_pan_enabled(CPUARMState *env); + #endif diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c index 965a486b320..c93462c5b9b 100644 --- a/target/arm/kvm-stub.c +++ b/target/arm/kvm-stub.c @@ -22,3 +22,105 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) { g_assert_not_reached(); } + +/* + * It's safe to call these functions without KVM support. + * They should either do nothing or return "not supported". + */ +bool kvm_arm_aarch32_supported(void) +{ + return false; +} + +bool kvm_arm_pmu_supported(void) +{ + return false; +} + +bool kvm_arm_sve_supported(void) +{ + return false; +} + +bool kvm_arm_mte_supported(void) +{ + return false; +} + +bool kvm_arm_el2_supported(void) +{ + return false; +} + +/* + * These functions should never actually be called without KVM support. + */ +void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void kvm_arm_add_vcpu_properties(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa) +{ + g_assert_not_reached(); +} + +int kvm_arm_vgic_probe(void) +{ + g_assert_not_reached(); +} + +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) +{ + g_assert_not_reached(); +} + +void kvm_arm_pmu_init(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) +{ + g_assert_not_reached(); +} + +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) +{ + g_assert_not_reached(); +} + +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void kvm_arm_enable_mte(Object *cpuobj, Error **errp) +{ + g_assert_not_reached(); +} + +void kvm_arm_reset_vcpu(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level) +{ + g_assert_not_reached(); +} + +void kvm_arm_cpu_pre_save(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +bool kvm_arm_cpu_post_load(ARMCPU *cpu) +{ + g_assert_not_reached(); +} diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 849e2e21b30..c78d0d59bbb 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -20,17 +20,18 @@ #include "qemu/main-loop.h" #include "qom/object.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" +#include "system/system.h" +#include "system/runstate.h" +#include "system/kvm.h" +#include "system/kvm_int.h" #include "kvm_arm.h" #include "cpu.h" +#include "cpu-sysregs.h" #include "trace.h" #include "internals.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "gdbstub/enums.h" #include "hw/boards.h" #include "hw/irq.h" @@ -39,8 +40,10 @@ #include "hw/acpi/acpi.h" #include "hw/acpi/ghes.h" #include "target/arm/gtimer.h" +#include "migration/blocker.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_INFO(DEVICE_CTRL), KVM_CAP_LAST_INFO }; @@ -98,8 +101,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } -bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, - int *fdarray, +bool kvm_arm_create_scratch_host_vcpu(int *fdarray, struct kvm_vcpu_init *init) { int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; @@ -119,6 +121,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, if (vmfd < 0) { goto err; } + + /* + * The MTE capability must be enabled by the VMM before creating + * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1 + * register to be probed correctly, as they are masked if MTE + * is not enabled. + */ + if (kvm_arm_mte_supported()) { + KVMState kvm_state; + + kvm_state.fd = kvmfd; + kvm_state.vmfd = vmfd; + kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0); + } + cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0); if (cpufd < 0) { goto err; @@ -133,40 +150,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, struct kvm_vcpu_init preferred; ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred); - if (!ret) { - init->target = preferred.target; - } - } - if (ret >= 0) { - ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); if (ret < 0) { goto err; } - } else if (cpus_to_try) { - /* Old kernel which doesn't know about the - * PREFERRED_TARGET ioctl: we know it will only support - * creating one kind of guest CPU which is its preferred - * CPU type. - */ - struct kvm_vcpu_init try; - - while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) { - try.target = *cpus_to_try++; - memcpy(try.features, init->features, sizeof(init->features)); - ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try); - if (ret >= 0) { - break; - } - } - if (ret < 0) { - goto err; - } - init->target = try.target; - } else { - /* Treat a NULL cpus_to_try argument the same as an empty - * list, which means we will fail the call since this must - * be an old kernel which doesn't support PREFERRED_TARGET. - */ + init->target = preferred.target; + } + ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); + if (ret < 0) { goto err; } @@ -229,6 +219,29 @@ static bool kvm_arm_pauth_supported(void) kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); } + +static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg) +{ + return ARM64_SYS_REG((sysreg & CP_REG_ARM64_SYSREG_OP0_MASK) >> CP_REG_ARM64_SYSREG_OP0_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP1_MASK) >> CP_REG_ARM64_SYSREG_OP1_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRN_MASK) >> CP_REG_ARM64_SYSREG_CRN_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRM_MASK) >> CP_REG_ARM64_SYSREG_CRM_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP2_MASK) >> CP_REG_ARM64_SYSREG_OP2_SHIFT); +} + +/* read a sysreg value and store it in the idregs */ +static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, + ARMIDRegisterIdx index) +{ + uint64_t *reg; + int ret; + + reg = &ahcf->isar.idregs[index]; + ret = read_sys_reg64(fd, reg, + idregs_sysreg_to_kvm_reg(id_register_sysreg[index])); + return ret; +} + static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) { /* Identify the feature bits corresponding to the host CPU, and @@ -238,21 +251,11 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ int fdarray[3]; bool sve_supported; + bool el2_supported; bool pmu_supported = false; uint64_t features = 0; int err; - /* Old kernels may not know about the PREFERRED_TARGET ioctl: however - * we know these will only support creating one kind of guest CPU, - * which is its preferred CPU type. Fortunately these old kernels - * support only a very limited number of CPUs. - */ - static const uint32_t cpus_to_try[] = { - KVM_ARM_TARGET_AEM_V8, - KVM_ARM_TARGET_FOUNDATION_V8, - KVM_ARM_TARGET_CORTEX_A57, - QEMU_KVM_ARM_TARGET_NONE - }; /* * target = -1 informs kvm_arm_create_scratch_host_vcpu() * to use the preferred target @@ -268,6 +271,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) init.features[0] |= 1 << KVM_ARM_VCPU_SVE; } + /* + * Ask for EL2 if supported. + */ + el2_supported = kvm_arm_el2_supported(); + if (el2_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } + /* * Ask for Pointer Authentication if supported, so that we get * the unsanitized field values for AA64ISAR1_EL1. @@ -283,15 +294,15 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) features |= 1ULL << ARM_FEATURE_PMU; } - if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) { return false; } ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; + ahcf->dtb_compatible = "arm,armv8"; + int fd = fdarray[2]; - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); + err = get_host_cpu_reg(fd, ahcf, ID_AA64PFR0_EL1_IDX); if (unlikely(err < 0)) { /* * Before v4.15, the kernel only exposed a limited number of system @@ -309,31 +320,20 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * ??? Either of these sounds like too much effort just * to work around running a modern host kernel. */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + SET_IDREG(&ahcf->isar, ID_AA64PFR0, 0x00000011); /* EL1&0, AArch64 only */ err = 0; } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3, - ARM64_SYS_REG(3, 0, 0, 7, 3)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR3_EL1_IDX); /* * Note that if AArch32 support is not present in the host, @@ -342,49 +342,31 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + err |= get_host_cpu_reg(fd, ahcf, ID_PFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR4_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR5_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR6_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR4_EL1_IDX); + + err |= read_sys_reg32(fd, &ahcf->isar.mvfr0, ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr1, ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR5_EL1_IDX); /* * DBGDIDR is a bit complicated because the kernel doesn't @@ -396,14 +378,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. * We only do this if the CPU supports AArch32 at EL1. */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + if (FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, BRPS); int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, CTX_CMPS); int version = 6; /* ARMv8 debug architecture */ bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + !!FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL3); uint32_t dbgdidr = 0; dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); @@ -418,7 +400,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) if (pmu_supported) { /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + err |= read_sys_reg64(fd, &ahcf->isar.reset_pmcr_el0, ARM64_SYS_REG(3, 3, 9, 12, 0)); } @@ -430,8 +412,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * enabled SVE support, which resulted in an error rather than RAZ. * So only read the register if we set KVM_ARM_VCPU_SVE above. */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX); } } @@ -451,6 +432,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) features |= 1ULL << ARM_FEATURE_AARCH64; features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + if (el2_supported) { + features |= 1ULL << ARM_FEATURE_EL2; + } + ahcf->features = features; return true; @@ -675,19 +660,11 @@ static void kvm_arm_set_device_addr(KVMDevice *kd) { struct kvm_device_attr *attr = &kd->kdattr; int ret; + uint64_t addr = kd->kda.addr; - /* If the device control API is available and we have a device fd on the - * KVMDevice struct, let's use the newer API - */ - if (kd->dev_fd >= 0) { - uint64_t addr = kd->kda.addr; - - addr |= kd->kda_addr_ormask; - attr->addr = (uintptr_t)&addr; - ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); - } else { - ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda); - } + addr |= kd->kda_addr_ormask; + attr->addr = (uintptr_t)&addr; + ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr); if (ret < 0) { fprintf(stderr, "Failed to set device address: %s\n", @@ -968,13 +945,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu) } } -void kvm_arm_cpu_post_load(ARMCPU *cpu) +bool kvm_arm_cpu_post_load(ARMCPU *cpu) { + if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) { + return false; + } + /* Note that it's OK for the TCG side not to know about + * every register in the list; KVM is authoritative if + * we're using it. + */ + write_list_to_cpustate(cpu); + /* KVM virtual time adjustment */ if (cpu->kvm_adjvtime) { cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT); cpu->kvm_vtime_dirty = true; } + + return true; } void kvm_arm_reset_vcpu(ARMCPU *cpu) @@ -1547,7 +1535,7 @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level) int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, uint64_t address, uint32_t data, PCIDevice *dev) { - AddressSpace *as = pci_device_iommu_address_space(dev); + AddressSpace *as = pci_device_iommu_msi_address_space(dev); hwaddr xlat, len, doorbell_gpa; MemoryRegionSection mrs; MemoryRegion *mr; @@ -1788,11 +1776,21 @@ bool kvm_arm_aarch32_supported(void) return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); } +bool kvm_arm_el2_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2); +} + bool kvm_arm_sve_supported(void) { return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); } +bool kvm_arm_mte_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE); +} + QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) @@ -1821,7 +1819,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) probed = true; - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) { error_report("failed to create scratch VCPU with SVE enabled"); abort(); } @@ -1860,6 +1858,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu) #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { int ret; @@ -1868,8 +1871,7 @@ int kvm_arch_init_vcpu(CPUState *cs) CPUARMState *env = &cpu->env; uint64_t psciver; - if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || - !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { error_report("KVM is not supported for this guest CPU type"); return -EINVAL; } @@ -1899,6 +1901,9 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); } + if (cpu->has_el2 && kvm_arm_el2_supported()) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } /* Do KVM_ARM_VCPU_INIT ioctl */ ret = kvm_arm_vcpu_init(cpu); @@ -2042,7 +2047,7 @@ static int kvm_arch_put_sve(CPUState *cs) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { uint64_t val; uint32_t fpr; @@ -2226,7 +2231,7 @@ static int kvm_arch_get_sve(CPUState *cs) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { uint64_t val; unsigned int el; @@ -2373,7 +2378,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) */ if (code == BUS_MCEERR_AR) { kvm_cpu_synchronize_state(c); - if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { kvm_inject_arm_sea(c); } else { error_report("failed to record the error"); @@ -2417,3 +2422,69 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) } return 0; } + +void kvm_arm_enable_mte(Object *cpuobj, Error **errp) +{ + static bool tried_to_enable; + static bool succeeded_to_enable; + Error *mte_migration_blocker = NULL; + ARMCPU *cpu = ARM_CPU(cpuobj); + int ret; + + if (!tried_to_enable) { + /* + * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make + * sense), and we only want a single migration blocker as well. + */ + tried_to_enable = true; + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0); + if (ret) { + error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE"); + return; + } + + /* TODO: Add migration support with MTE enabled */ + error_setg(&mte_migration_blocker, + "Live migration disabled due to MTE enabled"); + if (migrate_add_blocker(&mte_migration_blocker, errp)) { + error_free(mte_migration_blocker); + return; + } + + succeeded_to_enable = true; + } + + if (succeeded_to_enable) { + cpu->kvm_mte = true; + } +} + +void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level) +{ + ARMCPU *cpu = arm_cpu; + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + uint32_t linestate_bit; + int irq_id; + + switch (irq) { + case ARM_CPU_IRQ: + irq_id = KVM_ARM_IRQ_CPU_IRQ; + linestate_bit = CPU_INTERRUPT_HARD; + break; + case ARM_CPU_FIQ: + irq_id = KVM_ARM_IRQ_CPU_FIQ; + linestate_bit = CPU_INTERRUPT_FIQ; + break; + default: + g_assert_not_reached(); + } + + if (level) { + env->irq_line_state |= linestate_bit; + } else { + env->irq_line_state &= ~linestate_bit; + } + kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level); +} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index cfaa0d9bc71..6a9b6374a6d 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -11,7 +11,8 @@ #ifndef QEMU_KVM_ARM_H #define QEMU_KVM_ARM_H -#include "sysemu/kvm.h" +#include "system/kvm.h" +#include "target/arm/cpu-qom.h" #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) @@ -22,17 +23,15 @@ * @devid: the KVM device ID * @group: device control API group for setting addresses * @attr: device control API address type - * @dev_fd: device control device file descriptor (or -1 if not supported) + * @dev_fd: device control device file descriptor * @addr_ormask: value to be OR'ed with resolved address * - * Remember the memory region @mr, and when it is mapped by the - * machine model, tell the kernel that base address using the - * KVM_ARM_SET_DEVICE_ADDRESS ioctl or the newer device control API. @devid - * should be the ID of the device as defined by KVM_ARM_SET_DEVICE_ADDRESS or - * the arm-vgic device in the device control API. - * The machine model may map - * and unmap the device multiple times; the kernel will only be told the final - * address at the point where machine init is complete. + * Remember the memory region @mr, and when it is mapped by the machine + * model, tell the kernel that base address using the device control API. + * @devid should be the ID of the device as defined by the arm-vgic device + * in the device control API. The machine model may map and unmap the device + * multiple times; the kernel will only be told the final address at the + * point where machine init is complete. */ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, uint64_t attr, int dev_fd, uint64_t addr_ormask); @@ -85,8 +84,10 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu); * @cpu: ARMCPU * * Called from cpu_post_load() to update KVM CPU state from the cpreg list. + * + * Returns: true on success, or false if write_list_to_kvmstate failed. */ -void kvm_arm_cpu_post_load(ARMCPU *cpu); +bool kvm_arm_cpu_post_load(ARMCPU *cpu); /** * kvm_arm_reset_vcpu: @@ -96,13 +97,9 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu); */ void kvm_arm_reset_vcpu(ARMCPU *cpu); -#ifdef CONFIG_KVM +struct kvm_vcpu_init; /** * kvm_arm_create_scratch_host_vcpu: - * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with - * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not - * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing - * an empty array. * @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order * @init: filled in with the necessary values for creating a host * vcpu. If NULL is provided, will not init the vCPU (though the cpufd @@ -115,8 +112,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu); * Returns: true on success (and fdarray and init are filled in), * false on failure (and fdarray and init are not valid). */ -bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, - int *fdarray, +bool kvm_arm_create_scratch_host_vcpu(int *fdarray, struct kvm_vcpu_init *init); /** @@ -165,6 +161,14 @@ void kvm_arm_add_vcpu_properties(ARMCPU *cpu); */ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); +/* + * These "is some KVM subfeature enabled?" functions may be called + * when KVM support is not present, including in the user-mode + * emulators. The kvm-stub.c file is only built into the system + * emulators, so for user-mode emulation we provide "always false" + * stubs here. + */ +#ifndef CONFIG_USER_ONLY /** * kvm_arm_aarch32_supported: * @@ -189,37 +193,20 @@ bool kvm_arm_pmu_supported(void); bool kvm_arm_sve_supported(void); /** - * kvm_arm_get_max_vm_ipa_size: - * @ms: Machine state handle - * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case - * for legacy KVM. + * kvm_arm_mte_supported: * - * Returns the number of bits in the IPA address space supported by KVM + * Returns: true if KVM can enable MTE, and false otherwise. */ -int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa); - -int kvm_arm_vgic_probe(void); - -void kvm_arm_pmu_init(ARMCPU *cpu); -void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq); +bool kvm_arm_mte_supported(void); /** - * kvm_arm_pvtime_init: - * @cpu: ARMCPU - * @ipa: Per-vcpu guest physical base address of the pvtime structures + * kvm_arm_el2_supported: * - * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. + * Returns true if KVM can enable EL2 and false otherwise. */ -void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); - -int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); - +bool kvm_arm_el2_supported(void); #else -/* - * It's safe to call these functions without KVM support. - * They should either do nothing or return "not supported". - */ static inline bool kvm_arm_aarch32_supported(void) { return false; @@ -235,54 +222,45 @@ static inline bool kvm_arm_sve_supported(void) return false; } -/* - * These functions should never actually be called without KVM support. - */ -static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) -{ - g_assert_not_reached(); -} - -static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu) +static inline bool kvm_arm_mte_supported(void) { - g_assert_not_reached(); + return false; } -static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa) +static inline bool kvm_arm_el2_supported(void) { - g_assert_not_reached(); + return false; } +#endif -static inline int kvm_arm_vgic_probe(void) -{ - g_assert_not_reached(); -} +/** + * kvm_arm_get_max_vm_ipa_size: + * @ms: Machine state handle + * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case + * for legacy KVM. + * + * Returns the number of bits in the IPA address space supported by KVM + */ +int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa); -static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) -{ - g_assert_not_reached(); -} +int kvm_arm_vgic_probe(void); -static inline void kvm_arm_pmu_init(ARMCPU *cpu) -{ - g_assert_not_reached(); -} +void kvm_arm_pmu_init(ARMCPU *cpu); +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq); -static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) -{ - g_assert_not_reached(); -} +/** + * kvm_arm_pvtime_init: + * @cpu: ARMCPU + * @ipa: Per-vcpu guest physical base address of the pvtime structures + * + * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. + */ +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); -static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) -{ - g_assert_not_reached(); -} +int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); -static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) -{ - g_assert_not_reached(); -} +void kvm_arm_enable_mte(Object *cpuobj, Error **errp); -#endif +void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level); #endif diff --git a/target/arm/machine.c b/target/arm/machine.c index a3c1e05e65d..6986915bee8 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -1,12 +1,13 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "kvm_arm.h" #include "internals.h" #include "cpu-features.h" -#include "migration/cpu.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" #include "target/arm/gtimer.h" static bool vfp_needed(void *opaque) @@ -240,7 +241,6 @@ static const VMStateDescription vmstate_iwmmxt = { } }; -#ifdef TARGET_AARCH64 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build, * and ARMPredicateReg is actively empty. This triggers errors * in the expansion of the VMSTATE macros. @@ -315,12 +315,30 @@ static const VMStateDescription vmstate_za = { .minimum_version_id = 1, .needed = za_needed, .fields = (const VMStateField[]) { - VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0, + VMSTATE_STRUCT_ARRAY(env.za_state.za, ARMCPU, ARM_MAX_VQ * 16, 0, vmstate_vreg, ARMVectorReg), VMSTATE_END_OF_LIST() } }; -#endif /* AARCH64 */ + +static bool zt0_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + + return za_needed(cpu) && cpu_isar_feature(aa64_sme2, cpu); +} + +static const VMStateDescription vmstate_zt0 = { + .name = "cpu/zt0", + .version_id = 1, + .minimum_version_id = 1, + .needed = zt0_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.za_state.zt0, ARMCPU, + ARRAY_SIZE(((CPUARMState *)0)->za_state.zt0)), + VMSTATE_END_OF_LIST() + } +}; static bool serror_needed(void *opaque) { @@ -977,15 +995,9 @@ static int cpu_post_load(void *opaque, int version_id) } if (kvm_enabled()) { - if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) { + if (!kvm_arm_cpu_post_load(cpu)) { return -1; } - /* Note that it's OK for the TCG side not to know about - * every register in the list; KVM is authoritative if - * we're using it. - */ - write_list_to_cpustate(cpu); - kvm_arm_cpu_post_load(cpu); } else { if (!write_list_to_cpustate(cpu)) { return -1; @@ -1101,10 +1113,9 @@ const VMStateDescription vmstate_arm_cpu = { &vmstate_pmsav7, &vmstate_pmsav8, &vmstate_m_security, -#ifdef TARGET_AARCH64 &vmstate_sve, &vmstate_za, -#endif + &vmstate_zt0, &vmstate_serror, &vmstate_irq_line_state, &vmstate_wfxt_timer, diff --git a/target/arm/meson.build b/target/arm/meson.build index 2e10464dbb6..07d9271aa4d 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -1,41 +1,60 @@ arm_ss = ss.source_set() +arm_common_ss = ss.source_set() arm_ss.add(files( - 'cpu.c', - 'debug_helper.c', 'gdbstub.c', - 'helper.c', - 'vfp_helper.c', )) -arm_ss.add(zlib) - -arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c')) -arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( 'cpu64.c', - 'gdbstub64.c', -)) + 'gdbstub64.c')) arm_system_ss = ss.source_set() +arm_common_system_ss = ss.source_set() arm_system_ss.add(files( + 'arm-qmp-cmds.c', +)) +arm_system_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c')) +arm_system_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) + +arm_user_ss = ss.source_set() +arm_user_ss.add(files('cpu.c')) +arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files( + 'cpu32-stubs.c', +)) +arm_user_ss.add(files( + 'cpregs-pmu.c', + 'debug_helper.c', + 'helper.c', + 'vfp_fpscr.c', +)) + +arm_common_system_ss.add(files('cpu.c')) +arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files( + 'cpu32-stubs.c')) +arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) +arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c')) +arm_common_system_ss.add(files( 'arch_dump.c', 'arm-powerctl.c', - 'arm-qmp-cmds.c', 'cortex-regs.c', + 'cpregs-pmu.c', + 'debug_helper.c', + 'helper.c', 'machine.c', 'ptw.c', + 'vfp_fpscr.c', )) -arm_user_ss = ss.source_set() - subdir('hvf') if 'CONFIG_TCG' in config_all_accel subdir('tcg') else - arm_ss.add(files('tcg-stubs.c')) + arm_common_system_ss.add(files('tcg-stubs.c')) endif target_arch += {'arm': arm_ss} target_system_arch += {'arm': arm_system_ss} target_user_arch += {'arm': arm_user_ss} +target_common_arch += {'arm': arm_common_ss} +target_common_system_arch += {'arm': arm_common_system_ss} diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 4476b32ff50..561bf2678e5 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -10,15 +10,14 @@ #include "qemu/log.h" #include "qemu/range.h" #include "qemu/main-loop.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" +#include "accel/tcg/probe.h" #include "cpu.h" #include "internals.h" #include "cpu-features.h" #include "idau.h" -#ifdef CONFIG_TCG -# include "tcg/oversized-guest.h" -#endif typedef struct S1Translate { /* @@ -74,17 +73,21 @@ typedef struct S1Translate { } S1Translate; static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, + int user_rw, int prot_rw, int xn, int pxn, + ARMSecuritySpace in_pa, ARMSecuritySpace out_pa); + /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ static const uint8_t pamax_map[] = { [0] = 32, @@ -96,6 +99,21 @@ static const uint8_t pamax_map[] = { [6] = 52, }; +uint8_t round_down_to_parange_index(uint8_t bit_size) +{ + for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { + if (pamax_map[i] <= bit_size) { + return i; + } + } + g_assert_not_reached(); +} + +uint8_t round_down_to_parange_bit_size(uint8_t bit_size) +{ + return pamax_map[round_down_to_parange_index(bit_size)]; +} + /* * The cpu-specific constant value of PAMax; also used by hw/arm/virt. * Note that machvirt_init calls this on a CPU that is inited but not realized! @@ -104,7 +122,7 @@ unsigned int arm_pamax(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); /* * id_aa64mmfr0 is a read-only register so values outside of the @@ -265,6 +283,8 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_E2: case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: break; case ARMMMUIdx_Phys_S: @@ -312,7 +332,7 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, * physical address size is invalid. */ pps = FIELD_EX64(gpccr, GPCCR, PPS); - if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { + if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) { goto fault_walk; } pps = pamax_map[pps]; @@ -564,7 +584,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, }; GetPhysAddrResult s2 = { }; - if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) { + if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { goto fail; } @@ -717,7 +737,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, uint64_t new_val, S1Translate *ptw, ARMMMUFaultInfo *fi) { -#if defined(TARGET_AARCH64) && defined(CONFIG_TCG) +#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG) uint64_t cur_val; void *host = ptw->out_host; @@ -819,7 +839,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, ptw->out_rw = true; } -#ifdef CONFIG_ATOMIC64 if (ptw->out_be) { old_val = cpu_to_be64(old_val); new_val = cpu_to_be64(new_val); @@ -831,36 +850,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); cur_val = le64_to_cpu(cur_val); } -#else - /* - * We can't support the full 64-bit atomic cmpxchg on the host. - * Because this is only used for FEAT_HAFDBS, which is only for AA64, - * we know that TCG_OVERSIZED_GUEST is set, which means that we are - * running in round-robin mode and could only race with dma i/o. - */ -#if !TCG_OVERSIZED_GUEST -# error "Unexpected configuration" -#endif - bool locked = bql_locked(); - if (!locked) { - bql_lock(); - } - if (ptw->out_be) { - cur_val = ldq_be_p(host); - if (cur_val == old_val) { - stq_be_p(host, new_val); - } - } else { - cur_val = ldq_le_p(host); - if (cur_val == old_val) { - stq_le_p(host, new_val); - } - } - if (!locked) { - bql_unlock(); - } -#endif - return cur_val; #else /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ @@ -1131,7 +1120,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, hwaddr phys_addr; uint32_t dacr; bool ns; - int user_prot; + ARMSecuritySpace out_space; /* Pagetable walk. */ /* Lookup l1 descriptor. */ @@ -1223,16 +1212,19 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, g_assert_not_reached(); } } + out_space = ptw->in_space; + if (ns) { + /* + * The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the output space will already be non-secure. + */ + out_space = ARMSS_NonSecure; + } if (domain_prot == 3) { result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; } else { - if (pxn && !regime_is_user(env, mmu_idx)) { - xn = 1; - } - if (xn && access_type == MMU_INST_FETCH) { - fi->type = ARMFault_Permission; - goto do_fault; - } + int user_rw, prot_rw; if (arm_feature(env, ARM_FEATURE_V6K) && (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { @@ -1242,37 +1234,23 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, fi->type = ARMFault_AccessFlag; goto do_fault; } - result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); - user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1); + prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); + user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1); } else { - result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); - user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); - } - if (result->f.prot && !xn) { - result->f.prot |= PAGE_EXEC; + prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); + user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); } + + result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, + xn, pxn, result->f.attrs.space, out_space); if (!(result->f.prot & (1 << access_type))) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; } - if (regime_is_pan(env, mmu_idx) && - !regime_is_user(env, mmu_idx) && - user_prot && - access_type != MMU_INST_FETCH) { - /* Privileged Access Never fault */ - fi->type = ARMFault_Permission; - goto do_fault; - } - } - if (ns) { - /* The NS bit will (as required by the architecture) have no effect if - * the CPU doesn't support TZ or this is a non-secure translation - * regime, because the attribute will already be non-secure. - */ - result->f.attrs.secure = false; - result->f.attrs.space = ARMSS_NonSecure; } + result->f.attrs.space = out_space; + result->f.attrs.secure = arm_space_is_secure(out_space); result->f.phys_addr = phys_addr; return false; do_fault: @@ -1340,25 +1318,24 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) * @env: CPUARMState * @mmu_idx: MMU index indicating required translation regime * @is_aa64: TRUE if AArch64 - * @ap: The 2-bit simple AP (AP[2:1]) + * @user_rw: Translated AP for user access + * @prot_rw: Translated AP for privileged access * @xn: XN (execute-never) bit * @pxn: PXN (privileged execute-never) bit * @in_pa: The original input pa space * @out_pa: The output pa space, modified by NSTable, NS, and NSE */ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, - int ap, int xn, int pxn, + int user_rw, int prot_rw, int xn, int pxn, ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) { ARMCPU *cpu = env_archcpu(env); bool is_user = regime_is_user(env, mmu_idx); - int prot_rw, user_rw; bool have_wxn; int wxn = 0; assert(!regime_is_stage2(mmu_idx)); - user_rw = simple_ap_to_rw_prot_is_user(ap, true); if (is_user) { prot_rw = user_rw; } else { @@ -1376,8 +1353,6 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, regime_is_pan(env, mmu_idx) && (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { prot_rw = 0; - } else { - prot_rw = simple_ap_to_rw_prot_is_user(ap, false); } } @@ -1669,12 +1644,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) * @ptw: Current and next stage parameters for the walk. * @address: virtual address to get physical address for * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH + * @memop: memory operation feeding this access, or 0 for none * @result: set on translation success, * @fi: set to fault info if the translation fails */ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMCPU *cpu = env_archcpu(env); @@ -1684,7 +1660,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t ttbr; hwaddr descaddr, indexmask, indexmask_grainsize; uint32_t tableattrs; - target_ulong page_size; + uint64_t page_size; uint64_t attrs; int32_t stride; int addrsize, inputsize, outputsize; @@ -1727,7 +1703,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * ID_AA64MMFR0 is a read-only register so values outside of the * supported mappings can be considered an implementation error. */ - ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); ps = MIN(ps, param.ps); assert(ps < ARRAY_SIZE(pamax_map)); outputsize = pamax_map[ps]; @@ -1757,7 +1733,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * validation to do here. */ if (inputsize < addrsize) { - target_ulong top_bits = sextract64(address, inputsize, + uint64_t top_bits = sextract64(address, inputsize, addrsize - inputsize); if (-top_bits != param.select) { /* The gap between the two regions is a Translation fault */ @@ -2013,8 +1989,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = extract64(attrs, 53, 2); result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); } + + result->cacheattrs.is_s2_format = true; + result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { int nse, ns = extract32(attrs, 5, 1); + uint8_t attrindx; + uint64_t mair; + int user_rw, prot_rw; + switch (out_space) { case ARMSS_Root: /* @@ -2080,12 +2069,58 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = 0; ap &= ~1; } + + user_rw = simple_ap_to_rw_prot_is_user(ap, true); + prot_rw = simple_ap_to_rw_prot_is_user(ap, false); /* * Note that we modified ptw->in_space earlier for NSTable, but * result->f.attrs retains a copy of the original security space. */ - result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn, - result->f.attrs.space, out_space); + result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, + xn, pxn, result->f.attrs.space, out_space); + + /* Index into MAIR registers for cache attributes */ + attrindx = extract32(attrs, 2, 3); + mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + assert(attrindx <= 7); + result->cacheattrs.is_s2_format = false; + result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + + /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ + if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { + result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ + } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, the correct ordering for alignment, permission, + * and stage 2 faults is: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * Perform the alignment check now, so that we recognize it in + * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent + * softmmu tlb hit will also check the alignment; clear along the + * non-device path so that tlb_fill_flags is consistent in the + * event of restart_atomic_update. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + unsigned a_bits = memop_atomicity_bits(memop); + if (address & ((1 << a_bits) - 1)) { + fi->type = ARMFault_Alignment; + goto do_fault; + } + result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; + } else { + result->f.tlb_fill_flags = 0; } if (!(result->f.prot & (1 << access_type))) { @@ -2115,51 +2150,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, result->f.attrs.space = out_space; result->f.attrs.secure = arm_space_is_secure(out_space); - if (regime_is_stage2(mmu_idx)) { - result->cacheattrs.is_s2_format = true; - result->cacheattrs.attrs = extract32(attrs, 2, 4); - /* - * Security state does not really affect HCR_EL2.FWB; - * we only need to filter FWB for aa32 or other FEAT. - */ - device = S2_attrs_are_device(arm_hcr_el2_eff(env), - result->cacheattrs.attrs); - } else { - /* Index into MAIR registers for cache attributes */ - uint8_t attrindx = extract32(attrs, 2, 3); - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; - assert(attrindx <= 7); - result->cacheattrs.is_s2_format = false; - result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); - - /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ - if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { - result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ - } - device = S1_attrs_are_device(result->cacheattrs.attrs); - } - - /* - * Enable alignment checks on Device memory. - * - * Per R_XCHFJ, this check is mis-ordered. The correct ordering - * for alignment, permission, and stage 2 faults should be: - * - Alignment fault caused by the memory type - * - Permission fault - * - A stage 2 fault on the memory access - * but due to the way the TCG softmmu TLB operates, we will have - * implicitly done the permission check and the stage2 lookup in - * finding the TLB entry, so the alignment check cannot be done sooner. - * - * In v7, for a CPU without the Virtualization Extensions this - * access is UNPREDICTABLE; we choose to make it take the alignment - * fault as is required for a v7VE CPU. (QEMU doesn't emulate any - * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) - */ - if (device) { - result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; - } - /* * For FEAT_LPA2 and effective DS, the SH field in the attributes * was re-purposed for output address bits. The SH attribute in @@ -3202,7 +3192,7 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, */ static bool get_phys_addr_disabled(CPUARMState *env, S1Translate *ptw, - target_ulong address, + vaddr address, MMUAccessType access_type, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -3285,8 +3275,8 @@ static bool get_phys_addr_disabled(CPUARMState *env, } static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3298,7 +3288,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, ARMSecuritySpace ipa_space; uint64_t hcr; - ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi); /* If S1 fails, return early. */ if (ret) { @@ -3324,7 +3315,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, cacheattrs1 = result->cacheattrs; memset(result, 0, sizeof(*result)); - ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, + memop, result, fi); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -3390,8 +3382,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, } static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3454,7 +3446,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, if (arm_feature(env, ARM_FEATURE_EL2) && !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { return get_phys_addr_twostage(env, ptw, address, access_type, - result, fi); + memop, result, fi); } /* fall through */ @@ -3517,7 +3509,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, } if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, ptw, address, access_type, result, fi); + return get_phys_addr_lpae(env, ptw, address, access_type, + memop, result, fi); } else if (arm_feature(env, ARM_FEATURE_V7) || regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, ptw, address, access_type, result, fi); @@ -3527,12 +3520,13 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, } static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) { + if (get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi)) { return true; } if (!granule_protection_check(env, result->f.phys_addr, @@ -3543,8 +3537,8 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, return false; } -bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, - MMUAccessType access_type, +bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -3553,16 +3547,13 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, .in_mmu_idx = mmu_idx, .in_space = space, }; - return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi); + return get_phys_addr_nogpc(env, &ptw, address, access_type, + memop, result, fi); } -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +static ARMSecuritySpace +arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx) { - S1Translate ptw = { - .in_mmu_idx = mmu_idx, - }; ARMSecuritySpace ss; switch (mmu_idx) { @@ -3604,6 +3595,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, ss = ARMSS_Secure; break; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: if (arm_feature(env, ARM_FEATURE_AARCH64) && cpu_isar_feature(aa64_rme, env_archcpu(env))) { ss = ARMSS_Root; @@ -3621,27 +3614,33 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, g_assert_not_reached(); } - ptw.in_space = ss; - return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi); + return ss; } -hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, - MemTxAttrs *attrs) +bool get_phys_addr(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - ARMMMUIdx mmu_idx = arm_mmu_idx(env); - ARMSecuritySpace ss = arm_security_space(env); S1Translate ptw = { .in_mmu_idx = mmu_idx, - .in_space = ss, + .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), + }; + + return get_phys_addr_gpc(env, &ptw, address, access_type, + memop, result, fi); +} + +static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr, + MemTxAttrs *attrs, ARMMMUIdx mmu_idx) +{ + S1Translate ptw = { + .in_mmu_idx = mmu_idx, + .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), .in_debug = true, }; GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - bool ret; - - ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); + bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); *attrs = res.f.attrs; if (ret) { @@ -3649,3 +3648,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, } return res.f.phys_addr; } + +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + + hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx); + + if (res != -1) { + return res; + } + + /* + * Memory may be accessible for an "unprivileged load/store" variant. + * In this case, get_a64_user_mem_index function generates an op using an + * unprivileged mmu idx, so we need to try with it. + */ + switch (mmu_idx) { + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0); + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0); + default: + return -1; + } +} diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index 3244e0740dd..c48d3b85871 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -80,6 +80,7 @@ typedef enum { SME_ET_Streaming, SME_ET_NotStreaming, SME_ET_InactiveZA, + SME_ET_InaccessibleZT0, } SMEExceptionType; #define ARM_EL_EC_LENGTH 6 diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c index 152b172e243..aac99b2672a 100644 --- a/target/arm/tcg-stubs.c +++ b/target/arm/tcg-stubs.c @@ -21,7 +21,25 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, { g_assert_not_reached(); } -/* Temporarily while cpu_get_tb_cpu_state() is still in common code */ -void assert_hflags_rebuild_correctly(CPUARMState *env) + +/* With KVM, we never use float_status, so these can be no-ops */ +void arm_set_default_fp_behaviours(float_status *s) +{ +} + +void arm_set_ah_fp_behaviours(float_status *s) +{ +} + +uint32_t vfp_get_fpsr_from_host(CPUARMState *env) +{ + return 0; +} + +void vfp_clear_float_status_exc_flags(CPUARMState *env) +{ +} + +void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) { } diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode index 62df4c4ceb4..8c798cde2b4 100644 --- a/target/arm/tcg/a64.decode +++ b/target/arm/tcg/a64.decode @@ -21,28 +21,40 @@ %rd 0:5 %esz_sd 22:1 !function=plus_2 +%esz_hs 22:1 !function=plus_1 %esz_hsd 22:2 !function=xor_2 %hl 11:1 21:1 %hlm 11:1 20:2 &r rn +&rrr rd rn rm &ri rd imm +&rr rd rn +&rr_sf rd rn sf &rri_sf rd rn imm sf +&rrr_sf rd rn rm sf &i imm &rr_e rd rn esz +&rri_e rd rn imm esz &rrr_e rd rn rm esz &rrx_e rd rn rm idx esz &rrrr_e rd rn rm ra esz &qrr_e q rd rn esz +&qrri_e q rd rn imm esz &qrrr_e q rd rn rm esz &qrrx_e q rd rn rm idx esz &qrrrr_e q rd rn rm ra esz @rr_h ........ ... ..... ...... rn:5 rd:5 &rr_e esz=1 +@rr_s ........ ... ..... ...... rn:5 rd:5 &rr_e esz=2 @rr_d ........ ... ..... ...... rn:5 rd:5 &rr_e esz=3 +@rr_e ........ esz:2 . ..... ...... rn:5 rd:5 &rr_e @rr_sd ........ ... ..... ...... rn:5 rd:5 &rr_e esz=%esz_sd +@rr_hsd ........ ... ..... ...... rn:5 rd:5 &rr_e esz=%esz_hsd +@rrr_b ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=0 @rrr_h ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=1 +@rrr_s ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=2 @rrr_d ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=3 @rrr_sd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_sd @rrr_hsd ........ ... rm:5 ...... rn:5 rd:5 &rrr_e esz=%esz_hsd @@ -54,11 +66,20 @@ @rrx_d ........ .. . rm:5 .... idx:1 . rn:5 rd:5 &rrx_e esz=3 @rr_q1e0 ........ ........ ...... rn:5 rd:5 &qrr_e q=1 esz=0 +@rr_q1e2 ........ ........ ...... rn:5 rd:5 &qrr_e q=1 esz=2 @r2r_q1e0 ........ ........ ...... rm:5 rd:5 &qrrr_e rn=%rd q=1 esz=0 @rrr_q1e0 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=0 @rrr_q1e3 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=3 @rrrr_q1e3 ........ ... rm:5 . ra:5 rn:5 rd:5 &qrrrr_e q=1 esz=3 +@qrr_b . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=0 +@qrr_h . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=1 +@qrr_s . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=2 +@qrr_bh . q:1 ...... . esz:1 ...... ...... rn:5 rd:5 &qrr_e +@qrr_hs . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=%esz_hs +@qrr_sd . q:1 ...... .. ...... ...... rn:5 rd:5 &qrr_e esz=%esz_sd +@qrr_e . q:1 ...... esz:2 ...... ...... rn:5 rd:5 &qrr_e + @qrrr_b . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=0 @qrrr_h . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=1 @qrrr_s . q:1 ...... ... rm:5 ...... rn:5 rd:5 &qrrr_e esz=2 @@ -155,7 +176,7 @@ UBFM . 10 100110 . ...... ...... ..... ..... @bitfield_32 EXTR 1 00 100111 1 0 rm:5 imm:6 rn:5 rd:5 &extract sf=1 EXTR 0 00 100111 0 0 rm:5 0 imm:5 rn:5 rd:5 &extract sf=0 -# Branches +### Branches %imm26 0:s26 !function=times_4 @branch . ..... .......................... &i imm=%imm26 @@ -239,6 +260,9 @@ WFIT 1101 0101 0000 0011 0001 0000 001 rd:5 CLREX 1101 0101 0000 0011 0011 ---- 010 11111 DSB_DMB 1101 0101 0000 0011 0011 domain:2 types:2 10- 11111 +# For the DSB nXS variant, types always equals MBReqTypes_All and we ignore the +# domain bits. +DSB_nXS 1101 0101 0000 0011 0011 -- 10 001 11111 ISB 1101 0101 0000 0011 0011 ---- 110 11111 SB 1101 0101 0000 0011 0011 0000 111 11111 @@ -285,7 +309,7 @@ HLT 1101 0100 010 ................ 000 00 @i16 # DCPS2 1101 0100 101 ................ 000 10 @i16 # DCPS3 1101 0100 101 ................ 000 11 @i16 -# Loads and stores +### Loads and stores &stxr rn rt rt2 rs sz lasr &stlr rn rt sz lasr @@ -643,6 +667,138 @@ CPYP 00 011 1 01000 ..... .... 01 ..... ..... @cpy CPYM 00 011 1 01010 ..... .... 01 ..... ..... @cpy CPYE 00 011 1 01100 ..... .... 01 ..... ..... @cpy +### Data Processing (register) + +# Data Processing (2-source) + +@rrr . .......... rm:5 ...... rn:5 rd:5 &rrr +@rrr_sf sf:1 .......... rm:5 ...... rn:5 rd:5 &rrr_sf + +UDIV . 00 11010110 ..... 00001 0 ..... ..... @rrr_sf +SDIV . 00 11010110 ..... 00001 1 ..... ..... @rrr_sf +LSLV . 00 11010110 ..... 00100 0 ..... ..... @rrr_sf +LSRV . 00 11010110 ..... 00100 1 ..... ..... @rrr_sf +ASRV . 00 11010110 ..... 00101 0 ..... ..... @rrr_sf +RORV . 00 11010110 ..... 00101 1 ..... ..... @rrr_sf + +CRC32 0 00 11010110 ..... 0100 00 ..... ..... @rrr_b +CRC32 0 00 11010110 ..... 0100 01 ..... ..... @rrr_h +CRC32 0 00 11010110 ..... 0100 10 ..... ..... @rrr_s +CRC32 1 00 11010110 ..... 0100 11 ..... ..... @rrr_d + +CRC32C 0 00 11010110 ..... 0101 00 ..... ..... @rrr_b +CRC32C 0 00 11010110 ..... 0101 01 ..... ..... @rrr_h +CRC32C 0 00 11010110 ..... 0101 10 ..... ..... @rrr_s +CRC32C 1 00 11010110 ..... 0101 11 ..... ..... @rrr_d + +SUBP 1 00 11010110 ..... 000000 ..... ..... @rrr +SUBPS 1 01 11010110 ..... 000000 ..... ..... @rrr +IRG 1 00 11010110 ..... 000100 ..... ..... @rrr +GMI 1 00 11010110 ..... 000101 ..... ..... @rrr + +PACGA 1 00 11010110 ..... 001100 ..... ..... @rrr + +# Data Processing (1-source) + +@rr . .......... ..... ...... rn:5 rd:5 &rr +@rr_sf sf:1 .......... ..... ...... rn:5 rd:5 &rr_sf + +RBIT . 10 11010110 00000 000000 ..... ..... @rr_sf +REV16 . 10 11010110 00000 000001 ..... ..... @rr_sf +REV32 . 10 11010110 00000 000010 ..... ..... @rr_sf +REV64 1 10 11010110 00000 000011 ..... ..... @rr + +CLZ . 10 11010110 00000 000100 ..... ..... @rr_sf +CLS . 10 11010110 00000 000101 ..... ..... @rr_sf + +&pacaut rd rn z +@pacaut . .. ........ ..... .. z:1 ... rn:5 rd:5 &pacaut + +PACIA 1 10 11010110 00001 00.000 ..... ..... @pacaut +PACIB 1 10 11010110 00001 00.001 ..... ..... @pacaut +PACDA 1 10 11010110 00001 00.010 ..... ..... @pacaut +PACDB 1 10 11010110 00001 00.011 ..... ..... @pacaut + +AUTIA 1 10 11010110 00001 00.100 ..... ..... @pacaut +AUTIB 1 10 11010110 00001 00.101 ..... ..... @pacaut +AUTDA 1 10 11010110 00001 00.110 ..... ..... @pacaut +AUTDB 1 10 11010110 00001 00.111 ..... ..... @pacaut + +XPACI 1 10 11010110 00001 010000 11111 rd:5 +XPACD 1 10 11010110 00001 010001 11111 rd:5 + +# Logical (shifted reg) + +&logic_shift rd rn rm sf sa st n +@logic_shift sf:1 .. ..... st:2 n:1 rm:5 sa:6 rn:5 rd:5 &logic_shift + +AND_r . 00 01010 .. . ..... ...... ..... ..... @logic_shift +ORR_r . 01 01010 .. . ..... ...... ..... ..... @logic_shift +EOR_r . 10 01010 .. . ..... ...... ..... ..... @logic_shift +ANDS_r . 11 01010 .. . ..... ...... ..... ..... @logic_shift + +# Add/subtract (shifted reg) + +&addsub_shift rd rn rm sf sa st +@addsub_shift sf:1 .. ..... st:2 . rm:5 sa:6 rn:5 rd:5 &addsub_shift + +ADD_r . 00 01011 .. 0 ..... ...... ..... ..... @addsub_shift +SUB_r . 10 01011 .. 0 ..... ...... ..... ..... @addsub_shift +ADDS_r . 01 01011 .. 0 ..... ...... ..... ..... @addsub_shift +SUBS_r . 11 01011 .. 0 ..... ...... ..... ..... @addsub_shift + +# Add/subtract (extended reg) + +&addsub_ext rd rn rm sf sa st +@addsub_ext sf:1 .. ........ rm:5 st:3 sa:3 rn:5 rd:5 &addsub_ext + +ADD_ext . 00 01011001 ..... ... ... ..... ..... @addsub_ext +SUB_ext . 10 01011001 ..... ... ... ..... ..... @addsub_ext +ADDS_ext . 01 01011001 ..... ... ... ..... ..... @addsub_ext +SUBS_ext . 11 01011001 ..... ... ... ..... ..... @addsub_ext + +# Add/subtract (carry) + +ADC . 00 11010000 ..... 000000 ..... ..... @rrr_sf +ADCS . 01 11010000 ..... 000000 ..... ..... @rrr_sf +SBC . 10 11010000 ..... 000000 ..... ..... @rrr_sf +SBCS . 11 11010000 ..... 000000 ..... ..... @rrr_sf + +# Rotate right into flags + +RMIF 1 01 11010000 imm:6 00001 rn:5 0 mask:4 + +# Evaluate into flags + +SETF8 0 01 11010000 00000 000010 rn:5 01101 +SETF16 0 01 11010000 00000 010010 rn:5 01101 + +# Conditional compare + +CCMP sf:1 op:1 1 11010010 y:5 cond:4 imm:1 0 rn:5 0 nzcv:4 + +# Conditional select + +CSEL sf:1 else_inv:1 011010100 rm:5 cond:4 0 else_inc:1 rn:5 rd:5 + +# Data Processing (3-source) + +&rrrr rd rn rm ra +@rrrr . .. ........ rm:5 . ra:5 rn:5 rd:5 &rrrr + +MADD_w 0 00 11011000 ..... 0 ..... ..... ..... @rrrr +MSUB_w 0 00 11011000 ..... 1 ..... ..... ..... @rrrr +MADD_x 1 00 11011000 ..... 0 ..... ..... ..... @rrrr +MSUB_x 1 00 11011000 ..... 1 ..... ..... ..... @rrrr + +SMADDL 1 00 11011001 ..... 0 ..... ..... ..... @rrrr +SMSUBL 1 00 11011001 ..... 1 ..... ..... ..... @rrrr +UMADDL 1 00 11011101 ..... 0 ..... ..... ..... @rrrr +UMSUBL 1 00 11011101 ..... 1 ..... ..... ..... @rrrr + +SMULH 1 00 11011010 ..... 0 11111 ..... ..... @rrr +UMULH 1 00 11011110 ..... 0 11111 ..... ..... @rrr + ### Cryptographic AES AESE 01001110 00 10100 00100 10 ..... ..... @r2r_q1e0 @@ -1136,3 +1292,605 @@ FMADD 0001 1111 .. 0 ..... 0 ..... ..... ..... @rrrr_hsd FMSUB 0001 1111 .. 0 ..... 1 ..... ..... ..... @rrrr_hsd FNMADD 0001 1111 .. 1 ..... 0 ..... ..... ..... @rrrr_hsd FNMSUB 0001 1111 .. 1 ..... 1 ..... ..... ..... @rrrr_hsd + +# Advanced SIMD Extract + +EXT_d 0010 1110 00 0 rm:5 00 imm:3 0 rn:5 rd:5 +EXT_q 0110 1110 00 0 rm:5 0 imm:4 0 rn:5 rd:5 + +# Advanced SIMD Table Lookup + +TBL_TBX 0 q:1 00 1110 000 rm:5 0 len:2 tbx:1 00 rn:5 rd:5 + +# Advanced SIMD Permute + +UZP1 0.00 1110 .. 0 ..... 0 001 10 ..... ..... @qrrr_e +UZP2 0.00 1110 .. 0 ..... 0 101 10 ..... ..... @qrrr_e +TRN1 0.00 1110 .. 0 ..... 0 010 10 ..... ..... @qrrr_e +TRN2 0.00 1110 .. 0 ..... 0 110 10 ..... ..... @qrrr_e +ZIP1 0.00 1110 .. 0 ..... 0 011 10 ..... ..... @qrrr_e +ZIP2 0.00 1110 .. 0 ..... 0 111 10 ..... ..... @qrrr_e + +# Advanced SIMD Across Lanes + +ADDV 0.00 1110 .. 11000 11011 10 ..... ..... @qrr_e +SADDLV 0.00 1110 .. 11000 00011 10 ..... ..... @qrr_e +UADDLV 0.10 1110 .. 11000 00011 10 ..... ..... @qrr_e +SMAXV 0.00 1110 .. 11000 01010 10 ..... ..... @qrr_e +UMAXV 0.10 1110 .. 11000 01010 10 ..... ..... @qrr_e +SMINV 0.00 1110 .. 11000 11010 10 ..... ..... @qrr_e +UMINV 0.10 1110 .. 11000 11010 10 ..... ..... @qrr_e + +FMAXNMV_h 0.00 1110 00 11000 01100 10 ..... ..... @qrr_h +FMAXNMV_s 0110 1110 00 11000 01100 10 ..... ..... @rr_q1e2 + +FMINNMV_h 0.00 1110 10 11000 01100 10 ..... ..... @qrr_h +FMINNMV_s 0110 1110 10 11000 01100 10 ..... ..... @rr_q1e2 + +FMAXV_h 0.00 1110 00 11000 01111 10 ..... ..... @qrr_h +FMAXV_s 0110 1110 00 11000 01111 10 ..... ..... @rr_q1e2 + +FMINV_h 0.00 1110 10 11000 01111 10 ..... ..... @qrr_h +FMINV_s 0110 1110 10 11000 01111 10 ..... ..... @rr_q1e2 + +# Conversion between floating-point and fixed-point (general register) + +&fcvt rd rn esz sf shift +%fcvt_shift32 10:5 !function=rsub_32 +%fcvt_shift64 10:6 !function=rsub_64 + +@fcvt32 0 ....... .. ...... 1..... rn:5 rd:5 \ + &fcvt sf=0 esz=%esz_hsd shift=%fcvt_shift32 +@fcvt64 1 ....... .. ...... ...... rn:5 rd:5 \ + &fcvt sf=1 esz=%esz_hsd shift=%fcvt_shift64 + +SCVTF_g . 0011110 .. 000010 ...... ..... ..... @fcvt32 +SCVTF_g . 0011110 .. 000010 ...... ..... ..... @fcvt64 +UCVTF_g . 0011110 .. 000011 ...... ..... ..... @fcvt32 +UCVTF_g . 0011110 .. 000011 ...... ..... ..... @fcvt64 + +FCVTZS_g . 0011110 .. 011000 ...... ..... ..... @fcvt32 +FCVTZS_g . 0011110 .. 011000 ...... ..... ..... @fcvt64 +FCVTZU_g . 0011110 .. 011001 ...... ..... ..... @fcvt32 +FCVTZU_g . 0011110 .. 011001 ...... ..... ..... @fcvt64 + +# Conversion between floating-point and integer (general register) + +@icvt sf:1 ....... .. ...... ...... rn:5 rd:5 \ + &fcvt esz=%esz_hsd shift=0 + +SCVTF_g . 0011110 .. 100010 000000 ..... ..... @icvt +UCVTF_g . 0011110 .. 100011 000000 ..... ..... @icvt + +FCVTNS_g . 0011110 .. 100000 000000 ..... ..... @icvt +FCVTNU_g . 0011110 .. 100001 000000 ..... ..... @icvt +FCVTPS_g . 0011110 .. 101000 000000 ..... ..... @icvt +FCVTPU_g . 0011110 .. 101001 000000 ..... ..... @icvt +FCVTMS_g . 0011110 .. 110000 000000 ..... ..... @icvt +FCVTMU_g . 0011110 .. 110001 000000 ..... ..... @icvt +FCVTZS_g . 0011110 .. 111000 000000 ..... ..... @icvt +FCVTZU_g . 0011110 .. 111001 000000 ..... ..... @icvt +FCVTAS_g . 0011110 .. 100100 000000 ..... ..... @icvt +FCVTAU_g . 0011110 .. 100101 000000 ..... ..... @icvt + +FJCVTZS 0 0011110 01 111110 000000 ..... ..... @rr + +FMOV_ws 0 0011110 00 100110 000000 ..... ..... @rr +FMOV_sw 0 0011110 00 100111 000000 ..... ..... @rr + +FMOV_xd 1 0011110 01 100110 000000 ..... ..... @rr +FMOV_dx 1 0011110 01 100111 000000 ..... ..... @rr + +# Move to/from upper half of 128-bit +FMOV_xu 1 0011110 10 101110 000000 ..... ..... @rr +FMOV_ux 1 0011110 10 101111 000000 ..... ..... @rr + +# Half-precision allows both sf=0 and sf=1 with identical results +FMOV_xh - 0011110 11 100110 000000 ..... ..... @rr +FMOV_hx - 0011110 11 100111 000000 ..... ..... @rr + +# Floating-point data processing (1 source) + +FMOV_s 00011110 .. 1 000000 10000 ..... ..... @rr_hsd +FABS_s 00011110 .. 1 000001 10000 ..... ..... @rr_hsd +FNEG_s 00011110 .. 1 000010 10000 ..... ..... @rr_hsd +FSQRT_s 00011110 .. 1 000011 10000 ..... ..... @rr_hsd + +FRINTN_s 00011110 .. 1 001000 10000 ..... ..... @rr_hsd +FRINTP_s 00011110 .. 1 001001 10000 ..... ..... @rr_hsd +FRINTM_s 00011110 .. 1 001010 10000 ..... ..... @rr_hsd +FRINTZ_s 00011110 .. 1 001011 10000 ..... ..... @rr_hsd +FRINTA_s 00011110 .. 1 001100 10000 ..... ..... @rr_hsd +FRINTX_s 00011110 .. 1 001110 10000 ..... ..... @rr_hsd +FRINTI_s 00011110 .. 1 001111 10000 ..... ..... @rr_hsd + +BFCVT_s 00011110 01 1 000110 10000 ..... ..... @rr_s + +FRINT32Z_s 00011110 0. 1 010000 10000 ..... ..... @rr_sd +FRINT32X_s 00011110 0. 1 010001 10000 ..... ..... @rr_sd +FRINT64Z_s 00011110 0. 1 010010 10000 ..... ..... @rr_sd +FRINT64X_s 00011110 0. 1 010011 10000 ..... ..... @rr_sd + +FCVT_s_ds 00011110 00 1 000101 10000 ..... ..... @rr +FCVT_s_hs 00011110 00 1 000111 10000 ..... ..... @rr +FCVT_s_sd 00011110 01 1 000100 10000 ..... ..... @rr +FCVT_s_hd 00011110 01 1 000111 10000 ..... ..... @rr +FCVT_s_sh 00011110 11 1 000100 10000 ..... ..... @rr +FCVT_s_dh 00011110 11 1 000101 10000 ..... ..... @rr + +# Floating-point Immediate + +FMOVI_s 0001 1110 .. 1 imm:8 100 00000 rd:5 esz=%esz_hsd + +# Floating-point Compare + +FCMP 00011110 .. 1 rm:5 001000 rn:5 e:1 z:1 000 esz=%esz_hsd + +# Floating-point Conditional Compare + +FCCMP 00011110 .. 1 rm:5 cond:4 01 rn:5 e:1 nzcv:4 esz=%esz_hsd + +# Advanced SIMD Modified Immediate / Shift by Immediate + +%abcdefgh 16:3 5:5 + +# Right shifts are encoded as N - shift, where N is the element size in bits. +%neon_rshift_i6 16:6 !function=rsub_64 +%neon_rshift_i5 16:5 !function=rsub_32 +%neon_rshift_i4 16:4 !function=rsub_16 +%neon_rshift_i3 16:3 !function=rsub_8 + +@q_shri_b . q:1 .. ..... 0001 ... ..... . rn:5 rd:5 \ + &qrri_e esz=0 imm=%neon_rshift_i3 +@q_shri_h . q:1 .. ..... 001 .... ..... . rn:5 rd:5 \ + &qrri_e esz=1 imm=%neon_rshift_i4 +@q_shri_s . q:1 .. ..... 01 ..... ..... . rn:5 rd:5 \ + &qrri_e esz=2 imm=%neon_rshift_i5 +@q_shri_d . 1 .. ..... 1 ...... ..... . rn:5 rd:5 \ + &qrri_e esz=3 imm=%neon_rshift_i6 q=1 + +@q_shli_b . q:1 .. ..... 0001 imm:3 ..... . rn:5 rd:5 &qrri_e esz=0 +@q_shli_h . q:1 .. ..... 001 imm:4 ..... . rn:5 rd:5 &qrri_e esz=1 +@q_shli_s . q:1 .. ..... 01 imm:5 ..... . rn:5 rd:5 &qrri_e esz=2 +@q_shli_d . 1 .. ..... 1 imm:6 ..... . rn:5 rd:5 &qrri_e esz=3 q=1 + +FMOVI_v_h 0 q:1 00 1111 00000 ... 1111 11 ..... rd:5 %abcdefgh + +# MOVI, MVNI, ORR, BIC, FMOV are all intermixed via cmode. +Vimm 0 q:1 op:1 0 1111 00000 ... cmode:4 01 ..... rd:5 %abcdefgh + +SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_b +SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_h +SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_s +SSHR_v 0.00 11110 .... ... 00000 1 ..... ..... @q_shri_d + +USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_b +USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_h +USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_s +USHR_v 0.10 11110 .... ... 00000 1 ..... ..... @q_shri_d + +SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_b +SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_h +SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_s +SSRA_v 0.00 11110 .... ... 00010 1 ..... ..... @q_shri_d + +USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_b +USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_h +USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_s +USRA_v 0.10 11110 .... ... 00010 1 ..... ..... @q_shri_d + +SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_b +SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_h +SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_s +SRSHR_v 0.00 11110 .... ... 00100 1 ..... ..... @q_shri_d + +URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_b +URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_h +URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_s +URSHR_v 0.10 11110 .... ... 00100 1 ..... ..... @q_shri_d + +SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_b +SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_h +SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_s +SRSRA_v 0.00 11110 .... ... 00110 1 ..... ..... @q_shri_d + +URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_b +URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_h +URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_s +URSRA_v 0.10 11110 .... ... 00110 1 ..... ..... @q_shri_d + +SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_b +SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_h +SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_s +SRI_v 0.10 11110 .... ... 01000 1 ..... ..... @q_shri_d + +SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_b +SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_h +SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_s +SHL_v 0.00 11110 .... ... 01010 1 ..... ..... @q_shli_d + +SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_b +SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_h +SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_s +SLI_v 0.10 11110 .... ... 01010 1 ..... ..... @q_shli_d + +SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_b +SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_h +SSHLL_v 0.00 11110 .... ... 10100 1 ..... ..... @q_shli_s + +USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_b +USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_h +USHLL_v 0.10 11110 .... ... 10100 1 ..... ..... @q_shli_s + +SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_b +SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_h +SHRN_v 0.00 11110 .... ... 10000 1 ..... ..... @q_shri_s + +RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_b +RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_h +RSHRN_v 0.00 11110 .... ... 10001 1 ..... ..... @q_shri_s + +SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_b +SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_h +SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_s +SQSHL_vi 0.00 11110 .... ... 01110 1 ..... ..... @q_shli_d + +UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_b +UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_h +UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_s +UQSHL_vi 0.10 11110 .... ... 01110 1 ..... ..... @q_shli_d + +SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_b +SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_h +SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_s +SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_d + +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_b +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_h +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_s + +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_b +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_h +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_s + +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_b +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_h +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_s + +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_b +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_h +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_s + +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_b +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_h +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_s + +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_b +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_h +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_s + +# Advanced SIMD scalar shift by immediate + +@shri_b .... ..... 0001 ... ..... . rn:5 rd:5 \ + &rri_e esz=0 imm=%neon_rshift_i3 +@shri_h .... ..... 001 .... ..... . rn:5 rd:5 \ + &rri_e esz=1 imm=%neon_rshift_i4 +@shri_s .... ..... 01 ..... ..... . rn:5 rd:5 \ + &rri_e esz=2 imm=%neon_rshift_i5 +@shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \ + &rri_e esz=3 imm=%neon_rshift_i6 + +@shli_b .... ..... 0001 imm:3 ..... . rn:5 rd:5 &rri_e esz=0 +@shli_h .... ..... 001 imm:4 ..... . rn:5 rd:5 &rri_e esz=1 +@shli_s .... ..... 01 imm:5 ..... . rn:5 rd:5 &rri_e esz=2 +@shli_d .... ..... 1 imm:6 ..... . rn:5 rd:5 &rri_e esz=3 + +SSHR_s 0101 11110 .... ... 00000 1 ..... ..... @shri_d +USHR_s 0111 11110 .... ... 00000 1 ..... ..... @shri_d +SSRA_s 0101 11110 .... ... 00010 1 ..... ..... @shri_d +USRA_s 0111 11110 .... ... 00010 1 ..... ..... @shri_d +SRSHR_s 0101 11110 .... ... 00100 1 ..... ..... @shri_d +URSHR_s 0111 11110 .... ... 00100 1 ..... ..... @shri_d +SRSRA_s 0101 11110 .... ... 00110 1 ..... ..... @shri_d +URSRA_s 0111 11110 .... ... 00110 1 ..... ..... @shri_d +SRI_s 0111 11110 .... ... 01000 1 ..... ..... @shri_d + +SHL_s 0101 11110 .... ... 01010 1 ..... ..... @shli_d +SLI_s 0111 11110 .... ... 01010 1 ..... ..... @shli_d + +SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_b +SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_h +SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_s +SQSHL_si 0101 11110 .... ... 01110 1 ..... ..... @shli_d + +UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_b +UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_h +UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_s +UQSHL_si 0111 11110 .... ... 01110 1 ..... ..... @shli_d + +SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_b +SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_h +SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_s +SQSHLU_si 0111 11110 .... ... 01100 1 ..... ..... @shli_d + +SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_b +SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_h +SQSHRN_si 0101 11110 .... ... 10010 1 ..... ..... @shri_s + +UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_b +UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_h +UQSHRN_si 0111 11110 .... ... 10010 1 ..... ..... @shri_s + +SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_b +SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_h +SQSHRUN_si 0111 11110 .... ... 10000 1 ..... ..... @shri_s + +SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_b +SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_h +SQRSHRN_si 0101 11110 .... ... 10011 1 ..... ..... @shri_s + +UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_b +UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_h +UQRSHRN_si 0111 11110 .... ... 10011 1 ..... ..... @shri_s + +SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_b +SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_h +SQRSHRUN_si 0111 11110 .... ... 10001 1 ..... ..... @shri_s + +# Advanced SIMD scalar two-register miscellaneous + +SQABS_s 0101 1110 ..1 00000 01111 0 ..... ..... @rr_e +SQNEG_s 0111 1110 ..1 00000 01111 0 ..... ..... @rr_e +ABS_s 0101 1110 111 00000 10111 0 ..... ..... @rr +NEG_s 0111 1110 111 00000 10111 0 ..... ..... @rr +CMGT0_s 0101 1110 111 00000 10001 0 ..... ..... @rr +CMGE0_s 0111 1110 111 00000 10001 0 ..... ..... @rr +CMEQ0_s 0101 1110 111 00000 10011 0 ..... ..... @rr +CMLE0_s 0111 1110 111 00000 10011 0 ..... ..... @rr +CMLT0_s 0101 1110 111 00000 10101 0 ..... ..... @rr + +SQXTUN_s 0111 1110 ..1 00001 00101 0 ..... ..... @rr_e +SQXTN_s 0101 1110 ..1 00001 01001 0 ..... ..... @rr_e +UQXTN_s 0111 1110 ..1 00001 01001 0 ..... ..... @rr_e + +FCVTXN_s 0111 1110 011 00001 01101 0 ..... ..... @rr_s + +FCMGT0_s 0101 1110 111 11000 11001 0 ..... ..... @rr_h +FCMGT0_s 0101 1110 1.1 00000 11001 0 ..... ..... @rr_sd + +FCMGE0_s 0111 1110 111 11000 11001 0 ..... ..... @rr_h +FCMGE0_s 0111 1110 1.1 00000 11001 0 ..... ..... @rr_sd + +FCMEQ0_s 0101 1110 111 11000 11011 0 ..... ..... @rr_h +FCMEQ0_s 0101 1110 1.1 00000 11011 0 ..... ..... @rr_sd + +FCMLE0_s 0111 1110 111 11000 11011 0 ..... ..... @rr_h +FCMLE0_s 0111 1110 1.1 00000 11011 0 ..... ..... @rr_sd + +FCMLT0_s 0101 1110 111 11000 11101 0 ..... ..... @rr_h +FCMLT0_s 0101 1110 1.1 00000 11101 0 ..... ..... @rr_sd + +FRECPE_s 0101 1110 111 11001 11011 0 ..... ..... @rr_h +FRECPE_s 0101 1110 1.1 00001 11011 0 ..... ..... @rr_sd + +FRECPX_s 0101 1110 111 11001 11111 0 ..... ..... @rr_h +FRECPX_s 0101 1110 1.1 00001 11111 0 ..... ..... @rr_sd + +FRSQRTE_s 0111 1110 111 11001 11011 0 ..... ..... @rr_h +FRSQRTE_s 0111 1110 1.1 00001 11011 0 ..... ..... @rr_sd + +@icvt_h . ....... .. ...... ...... rn:5 rd:5 \ + &fcvt sf=0 esz=1 shift=0 +@icvt_sd . ....... .. ...... ...... rn:5 rd:5 \ + &fcvt sf=0 esz=%esz_sd shift=0 + +SCVTF_f 0101 1110 011 11001 11011 0 ..... ..... @icvt_h +SCVTF_f 0101 1110 0.1 00001 11011 0 ..... ..... @icvt_sd + +UCVTF_f 0111 1110 011 11001 11011 0 ..... ..... @icvt_h +UCVTF_f 0111 1110 0.1 00001 11011 0 ..... ..... @icvt_sd + +FCVTNS_f 0101 1110 011 11001 10101 0 ..... ..... @icvt_h +FCVTNS_f 0101 1110 0.1 00001 10101 0 ..... ..... @icvt_sd +FCVTNU_f 0111 1110 011 11001 10101 0 ..... ..... @icvt_h +FCVTNU_f 0111 1110 0.1 00001 10101 0 ..... ..... @icvt_sd + +FCVTPS_f 0101 1110 111 11001 10101 0 ..... ..... @icvt_h +FCVTPS_f 0101 1110 1.1 00001 10101 0 ..... ..... @icvt_sd +FCVTPU_f 0111 1110 111 11001 10101 0 ..... ..... @icvt_h +FCVTPU_f 0111 1110 1.1 00001 10101 0 ..... ..... @icvt_sd + +FCVTMS_f 0101 1110 011 11001 10111 0 ..... ..... @icvt_h +FCVTMS_f 0101 1110 0.1 00001 10111 0 ..... ..... @icvt_sd +FCVTMU_f 0111 1110 011 11001 10111 0 ..... ..... @icvt_h +FCVTMU_f 0111 1110 0.1 00001 10111 0 ..... ..... @icvt_sd + +FCVTZS_f 0101 1110 111 11001 10111 0 ..... ..... @icvt_h +FCVTZS_f 0101 1110 1.1 00001 10111 0 ..... ..... @icvt_sd +FCVTZU_f 0111 1110 111 11001 10111 0 ..... ..... @icvt_h +FCVTZU_f 0111 1110 1.1 00001 10111 0 ..... ..... @icvt_sd + +FCVTAS_f 0101 1110 011 11001 11001 0 ..... ..... @icvt_h +FCVTAS_f 0101 1110 0.1 00001 11001 0 ..... ..... @icvt_sd +FCVTAU_f 0111 1110 011 11001 11001 0 ..... ..... @icvt_h +FCVTAU_f 0111 1110 0.1 00001 11001 0 ..... ..... @icvt_sd + +%fcvt_f_sh_h 16:4 !function=rsub_16 +%fcvt_f_sh_s 16:5 !function=rsub_32 +%fcvt_f_sh_d 16:6 !function=rsub_64 + +@fcvt_fixed_h .... .... . 001 .... ...... rn:5 rd:5 \ + &fcvt sf=0 esz=1 shift=%fcvt_f_sh_h +@fcvt_fixed_s .... .... . 01 ..... ...... rn:5 rd:5 \ + &fcvt sf=0 esz=2 shift=%fcvt_f_sh_s +@fcvt_fixed_d .... .... . 1 ...... ...... rn:5 rd:5 \ + &fcvt sf=0 esz=3 shift=%fcvt_f_sh_d + +SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_h +SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_s +SCVTF_f 0101 1111 0 ....... 111001 ..... ..... @fcvt_fixed_d + +UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_h +UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_s +UCVTF_f 0111 1111 0 ....... 111001 ..... ..... @fcvt_fixed_d + +FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_h +FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_s +FCVTZS_f 0101 1111 0 ....... 111111 ..... ..... @fcvt_fixed_d + +FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_h +FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_s +FCVTZU_f 0111 1111 0 ....... 111111 ..... ..... @fcvt_fixed_d + +# Advanced SIMD two-register miscellaneous + +SQABS_v 0.00 1110 ..1 00000 01111 0 ..... ..... @qrr_e +SQNEG_v 0.10 1110 ..1 00000 01111 0 ..... ..... @qrr_e +ABS_v 0.00 1110 ..1 00000 10111 0 ..... ..... @qrr_e +NEG_v 0.10 1110 ..1 00000 10111 0 ..... ..... @qrr_e +CLS_v 0.00 1110 ..1 00000 01001 0 ..... ..... @qrr_e +CLZ_v 0.10 1110 ..1 00000 01001 0 ..... ..... @qrr_e +CNT_v 0.00 1110 001 00000 01011 0 ..... ..... @qrr_b +NOT_v 0.10 1110 001 00000 01011 0 ..... ..... @qrr_b +RBIT_v 0.10 1110 011 00000 01011 0 ..... ..... @qrr_b +CMGT0_v 0.00 1110 ..1 00000 10001 0 ..... ..... @qrr_e +CMGE0_v 0.10 1110 ..1 00000 10001 0 ..... ..... @qrr_e +CMEQ0_v 0.00 1110 ..1 00000 10011 0 ..... ..... @qrr_e +CMLE0_v 0.10 1110 ..1 00000 10011 0 ..... ..... @qrr_e +CMLT0_v 0.00 1110 ..1 00000 10101 0 ..... ..... @qrr_e + +REV16_v 0.00 1110 001 00000 00011 0 ..... ..... @qrr_b +REV32_v 0.10 1110 0.1 00000 00001 0 ..... ..... @qrr_bh +REV64_v 0.00 1110 ..1 00000 00001 0 ..... ..... @qrr_e + +SADDLP_v 0.00 1110 ..1 00000 00101 0 ..... ..... @qrr_e +UADDLP_v 0.10 1110 ..1 00000 00101 0 ..... ..... @qrr_e +SADALP_v 0.00 1110 ..1 00000 01101 0 ..... ..... @qrr_e +UADALP_v 0.10 1110 ..1 00000 01101 0 ..... ..... @qrr_e + +XTN 0.00 1110 ..1 00001 00101 0 ..... ..... @qrr_e +SQXTUN_v 0.10 1110 ..1 00001 00101 0 ..... ..... @qrr_e +SQXTN_v 0.00 1110 ..1 00001 01001 0 ..... ..... @qrr_e +UQXTN_v 0.10 1110 ..1 00001 01001 0 ..... ..... @qrr_e + +FCVTN_v 0.00 1110 0.1 00001 01101 0 ..... ..... @qrr_hs +FCVTXN_v 0.10 1110 011 00001 01101 0 ..... ..... @qrr_s +BFCVTN_v 0.00 1110 101 00001 01101 0 ..... ..... @qrr_h + +SHLL_v 0.10 1110 ..1 00001 00111 0 ..... ..... @qrr_e + +FABS_v 0.00 1110 111 11000 11111 0 ..... ..... @qrr_h +FABS_v 0.00 1110 1.1 00000 11111 0 ..... ..... @qrr_sd + +FNEG_v 0.10 1110 111 11000 11111 0 ..... ..... @qrr_h +FNEG_v 0.10 1110 1.1 00000 11111 0 ..... ..... @qrr_sd + +FSQRT_v 0.10 1110 111 11001 11111 0 ..... ..... @qrr_h +FSQRT_v 0.10 1110 1.1 00001 11111 0 ..... ..... @qrr_sd + +FRINTN_v 0.00 1110 011 11001 10001 0 ..... ..... @qrr_h +FRINTN_v 0.00 1110 0.1 00001 10001 0 ..... ..... @qrr_sd + +FRINTM_v 0.00 1110 011 11001 10011 0 ..... ..... @qrr_h +FRINTM_v 0.00 1110 0.1 00001 10011 0 ..... ..... @qrr_sd + +FRINTP_v 0.00 1110 111 11001 10001 0 ..... ..... @qrr_h +FRINTP_v 0.00 1110 1.1 00001 10001 0 ..... ..... @qrr_sd + +FRINTZ_v 0.00 1110 111 11001 10011 0 ..... ..... @qrr_h +FRINTZ_v 0.00 1110 1.1 00001 10011 0 ..... ..... @qrr_sd + +FRINTA_v 0.10 1110 011 11001 10001 0 ..... ..... @qrr_h +FRINTA_v 0.10 1110 0.1 00001 10001 0 ..... ..... @qrr_sd + +FRINTX_v 0.10 1110 011 11001 10011 0 ..... ..... @qrr_h +FRINTX_v 0.10 1110 0.1 00001 10011 0 ..... ..... @qrr_sd + +FRINTI_v 0.10 1110 111 11001 10011 0 ..... ..... @qrr_h +FRINTI_v 0.10 1110 1.1 00001 10011 0 ..... ..... @qrr_sd + +FRINT32Z_v 0.00 1110 0.1 00001 11101 0 ..... ..... @qrr_sd +FRINT32X_v 0.10 1110 0.1 00001 11101 0 ..... ..... @qrr_sd +FRINT64Z_v 0.00 1110 0.1 00001 11111 0 ..... ..... @qrr_sd +FRINT64X_v 0.10 1110 0.1 00001 11111 0 ..... ..... @qrr_sd + +SCVTF_vi 0.00 1110 011 11001 11011 0 ..... ..... @qrr_h +SCVTF_vi 0.00 1110 0.1 00001 11011 0 ..... ..... @qrr_sd + +UCVTF_vi 0.10 1110 011 11001 11011 0 ..... ..... @qrr_h +UCVTF_vi 0.10 1110 0.1 00001 11011 0 ..... ..... @qrr_sd + +FCVTNS_vi 0.00 1110 011 11001 10101 0 ..... ..... @qrr_h +FCVTNS_vi 0.00 1110 0.1 00001 10101 0 ..... ..... @qrr_sd +FCVTNU_vi 0.10 1110 011 11001 10101 0 ..... ..... @qrr_h +FCVTNU_vi 0.10 1110 0.1 00001 10101 0 ..... ..... @qrr_sd + +FCVTPS_vi 0.00 1110 111 11001 10101 0 ..... ..... @qrr_h +FCVTPS_vi 0.00 1110 1.1 00001 10101 0 ..... ..... @qrr_sd +FCVTPU_vi 0.10 1110 111 11001 10101 0 ..... ..... @qrr_h +FCVTPU_vi 0.10 1110 1.1 00001 10101 0 ..... ..... @qrr_sd + +FCVTMS_vi 0.00 1110 011 11001 10111 0 ..... ..... @qrr_h +FCVTMS_vi 0.00 1110 0.1 00001 10111 0 ..... ..... @qrr_sd +FCVTMU_vi 0.10 1110 011 11001 10111 0 ..... ..... @qrr_h +FCVTMU_vi 0.10 1110 0.1 00001 10111 0 ..... ..... @qrr_sd + +FCVTZS_vi 0.00 1110 111 11001 10111 0 ..... ..... @qrr_h +FCVTZS_vi 0.00 1110 1.1 00001 10111 0 ..... ..... @qrr_sd +FCVTZU_vi 0.10 1110 111 11001 10111 0 ..... ..... @qrr_h +FCVTZU_vi 0.10 1110 1.1 00001 10111 0 ..... ..... @qrr_sd + +FCVTAS_vi 0.00 1110 011 11001 11001 0 ..... ..... @qrr_h +FCVTAS_vi 0.00 1110 0.1 00001 11001 0 ..... ..... @qrr_sd +FCVTAU_vi 0.10 1110 011 11001 11001 0 ..... ..... @qrr_h +FCVTAU_vi 0.10 1110 0.1 00001 11001 0 ..... ..... @qrr_sd + +FCMGT0_v 0.00 1110 111 11000 11001 0 ..... ..... @qrr_h +FCMGT0_v 0.00 1110 1.1 00000 11001 0 ..... ..... @qrr_sd + +FCMGE0_v 0.10 1110 111 11000 11001 0 ..... ..... @qrr_h +FCMGE0_v 0.10 1110 1.1 00000 11001 0 ..... ..... @qrr_sd + +FCMEQ0_v 0.00 1110 111 11000 11011 0 ..... ..... @qrr_h +FCMEQ0_v 0.00 1110 1.1 00000 11011 0 ..... ..... @qrr_sd + +FCMLE0_v 0.10 1110 111 11000 11011 0 ..... ..... @qrr_h +FCMLE0_v 0.10 1110 1.1 00000 11011 0 ..... ..... @qrr_sd + +FCMLT0_v 0.00 1110 111 11000 11101 0 ..... ..... @qrr_h +FCMLT0_v 0.00 1110 1.1 00000 11101 0 ..... ..... @qrr_sd + +FRECPE_v 0.00 1110 111 11001 11011 0 ..... ..... @qrr_h +FRECPE_v 0.00 1110 1.1 00001 11011 0 ..... ..... @qrr_sd + +FRSQRTE_v 0.10 1110 111 11001 11011 0 ..... ..... @qrr_h +FRSQRTE_v 0.10 1110 1.1 00001 11011 0 ..... ..... @qrr_sd + +URECPE_v 0.00 1110 101 00001 11001 0 ..... ..... @qrr_s +URSQRTE_v 0.10 1110 101 00001 11001 0 ..... ..... @qrr_s + +FCVTL_v 0.00 1110 0.1 00001 01111 0 ..... ..... @qrr_sd + +&fcvt_q rd rn esz q shift +@fcvtq_h . q:1 . ...... 001 .... ...... rn:5 rd:5 \ + &fcvt_q esz=1 shift=%fcvt_f_sh_h +@fcvtq_s . q:1 . ...... 01 ..... ...... rn:5 rd:5 \ + &fcvt_q esz=2 shift=%fcvt_f_sh_s +@fcvtq_d . q:1 . ...... 1 ...... ...... rn:5 rd:5 \ + &fcvt_q esz=3 shift=%fcvt_f_sh_d + +SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_h +SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_s +SCVTF_vf 0.00 11110 ....... 111001 ..... ..... @fcvtq_d + +UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_h +UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_s +UCVTF_vf 0.10 11110 ....... 111001 ..... ..... @fcvtq_d + +FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_h +FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_s +FCVTZS_vf 0.00 11110 ....... 111111 ..... ..... @fcvtq_d + +FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_h +FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_s +FCVTZU_vf 0.10 11110 ....... 111111 ..... ..... @fcvtq_d diff --git a/target/arm/tcg/arith_helper.c b/target/arm/tcg/arith_helper.c new file mode 100644 index 00000000000..670139819df --- /dev/null +++ b/target/arm/tcg/arith_helper.c @@ -0,0 +1,297 @@ +/* + * ARM generic helpers for various arithmetical operations. + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qemu/crc32c.h" +#include /* for crc32 */ + +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + +/* + * Note that signed overflow is undefined in C. The following routines are + * careful to use unsigned types where modulo arithmetic is required. + * Failure to do so _will_ break on newer gcc. + */ + +/* Signed saturating arithmetic. */ + +/* Perform 16-bit signed saturating addition. */ +static inline uint16_t add16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a + b; + if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { + if (a & 0x8000) { + res = 0x8000; + } else { + res = 0x7fff; + } + } + return res; +} + +/* Perform 8-bit signed saturating addition. */ +static inline uint8_t add8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a + b; + if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { + if (a & 0x80) { + res = 0x80; + } else { + res = 0x7f; + } + } + return res; +} + +/* Perform 16-bit signed saturating subtraction. */ +static inline uint16_t sub16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a - b; + if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { + if (a & 0x8000) { + res = 0x8000; + } else { + res = 0x7fff; + } + } + return res; +} + +/* Perform 8-bit signed saturating subtraction. */ +static inline uint8_t sub8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a - b; + if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { + if (a & 0x80) { + res = 0x80; + } else { + res = 0x7f; + } + } + return res; +} + +#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); +#define PFX q + +#include "op_addsub.c.inc" + +/* Unsigned saturating arithmetic. */ +static inline uint16_t add16_usat(uint16_t a, uint16_t b) +{ + uint16_t res; + res = a + b; + if (res < a) { + res = 0xffff; + } + return res; +} + +static inline uint16_t sub16_usat(uint16_t a, uint16_t b) +{ + if (a > b) { + return a - b; + } else { + return 0; + } +} + +static inline uint8_t add8_usat(uint8_t a, uint8_t b) +{ + uint8_t res; + res = a + b; + if (res < a) { + res = 0xff; + } + return res; +} + +static inline uint8_t sub8_usat(uint8_t a, uint8_t b) +{ + if (a > b) { + return a - b; + } else { + return 0; + } +} + +#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); +#define PFX uq + +#include "op_addsub.c.inc" + +/* Signed modulo arithmetic. */ +#define SARITH16(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ + RESULT(sum, n, 16); \ + if (sum >= 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SARITH8(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ + RESULT(sum, n, 8); \ + if (sum >= 0) \ + ge |= 1 << n; \ + } while (0) + + +#define ADD16(a, b, n) SARITH16(a, b, n, +) +#define SUB16(a, b, n) SARITH16(a, b, n, -) +#define ADD8(a, b, n) SARITH8(a, b, n, +) +#define SUB8(a, b, n) SARITH8(a, b, n, -) +#define PFX s +#define ARITH_GE + +#include "op_addsub.c.inc" + +/* Unsigned modulo arithmetic. */ +#define ADD16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 1) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define ADD8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 1) \ + ge |= 1 << n; \ + } while (0) + +#define SUB16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 0) \ + ge |= 3 << (n * 2); \ + } while (0) + +#define SUB8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 0) \ + ge |= 1 << n; \ + } while (0) + +#define PFX u +#define ARITH_GE + +#include "op_addsub.c.inc" + +/* Halved signed arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) +#define PFX sh + +#include "op_addsub.c.inc" + +/* Halved unsigned arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define PFX uh + +#include "op_addsub.c.inc" + +static inline uint8_t do_usad(uint8_t a, uint8_t b) +{ + if (a > b) { + return a - b; + } else { + return b - a; + } +} + +/* Unsigned sum of absolute byte differences. */ +uint32_t HELPER(usad8)(uint32_t a, uint32_t b) +{ + uint32_t sum; + sum = do_usad(a, b); + sum += do_usad(a >> 8, b >> 8); + sum += do_usad(a >> 16, b >> 16); + sum += do_usad(a >> 24, b >> 24); + return sum; +} + +/* For ARMv6 SEL instruction. */ +uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) +{ + uint32_t mask; + + mask = 0; + if (flags & 1) { + mask |= 0xff; + } + if (flags & 2) { + mask |= 0xff00; + } + if (flags & 4) { + mask |= 0xff0000; + } + if (flags & 8) { + mask |= 0xff000000; + } + return (a & mask) | (b & ~mask); +} + +/* + * CRC helpers. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* zlib crc32 converts the accumulator and output to one's complement. */ + return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; +} + +uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} diff --git a/target/arm/tcg/cpregs-at.c b/target/arm/tcg/cpregs-at.c new file mode 100644 index 00000000000..398a61d3989 --- /dev/null +++ b/target/arm/tcg/cpregs-at.c @@ -0,0 +1,519 @@ +/* + * System instructions for address translation + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpu-features.h" +#include "internals.h" +#include "cpregs.h" + + +static int par_el1_shareability(GetPhysAddrResult *res) +{ + /* + * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC + * memory -- see pseudocode PAREncodeShareability(). + */ + if (((res->cacheattrs.attrs & 0xf0) == 0) || + res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) { + return 2; + } + return res->cacheattrs.shareability; +} + +static uint64_t do_ats_write(CPUARMState *env, uint64_t value, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + ARMSecuritySpace ss) +{ + bool ret; + uint64_t par64; + bool format64 = false; + ARMMMUFaultInfo fi = {}; + GetPhysAddrResult res = {}; + + /* + * I_MXTJT: Granule protection checks are not performed on the final + * address of a successful translation. This is a translation not a + * memory reference, so "memop = none = 0". + */ + ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0, + mmu_idx, ss, &res, &fi); + + /* + * ATS operations only do S1 or S1+S2 translations, so we never + * have to deal with the ARMCacheAttrs format for S2 only. + */ + assert(!res.cacheattrs.is_s2_format); + + if (ret) { + /* + * Some kinds of translation fault must cause exceptions rather + * than being reported in the PAR. + */ + int current_el = arm_current_el(env); + int target_el; + uint32_t syn, fsr, fsc; + bool take_exc = false; + + if (fi.s1ptw && current_el == 1 + && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { + /* + * Synchronous stage 2 fault on an access made as part of the + * translation table walk for AT S1E0* or AT S1E1* insn + * executed from NS EL1. If this is a synchronous external abort + * and SCR_EL3.EA == 1, then we take a synchronous external abort + * to EL3. Otherwise the fault is taken as an exception to EL2, + * and HPFAR_EL2 holds the faulting IPA. + */ + if (fi.type == ARMFault_SyncExternalOnWalk && + (env->cp15.scr_el3 & SCR_EA)) { + target_el = 3; + } else { + env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; + if (arm_is_secure_below_el3(env) && fi.s1ns) { + env->cp15.hpfar_el2 |= HPFAR_NS; + } + target_el = 2; + } + take_exc = true; + } else if (fi.type == ARMFault_SyncExternalOnWalk) { + /* + * Synchronous external aborts during a translation table walk + * are taken as Data Abort exceptions. + */ + if (fi.stage2) { + if (current_el == 3) { + target_el = 3; + } else { + target_el = 2; + } + } else { + target_el = exception_target_el(env); + } + take_exc = true; + } + + if (take_exc) { + /* Construct FSR and FSC using same logic as arm_deliver_fault() */ + if (target_el == 2 || arm_el_is_aa64(env, target_el) || + arm_s1_regime_using_lpae_format(env, mmu_idx)) { + fsr = arm_fi_to_lfsc(&fi); + fsc = extract32(fsr, 0, 6); + } else { + fsr = arm_fi_to_sfsc(&fi); + fsc = 0x3f; + } + /* + * Report exception with ESR indicating a fault due to a + * translation table walk for a cache maintenance instruction. + */ + syn = syn_data_abort_no_iss(current_el == target_el, 0, + fi.ea, 1, fi.s1ptw, 1, fsc); + env->exception.vaddress = value; + env->exception.fsr = fsr; + raise_exception(env, EXCP_DATA_ABORT, syn, target_el); + } + } + + if (is_a64(env)) { + format64 = true; + } else if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* + * ATS1Cxx: + * * TTBCR.EAE determines whether the result is returned using the + * 32-bit or the 64-bit PAR format + * * Instructions executed in Hyp mode always use the 64bit format + * + * ATS1S2NSOxx uses the 64bit format if any of the following is true: + * * The Non-secure TTBCR.EAE bit is set to 1 + * * The implementation includes EL2, and the value of HCR.VM is 1 + * + * (Note that HCR.DC makes HCR.VM behave as if it is 1.) + * + * ATS1Hx always uses the 64bit format. + */ + format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); + + if (arm_feature(env, ARM_FEATURE_EL2)) { + if (mmu_idx == ARMMMUIdx_E10_0 || + mmu_idx == ARMMMUIdx_E10_1 || + mmu_idx == ARMMMUIdx_E10_1_PAN) { + format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); + } else { + format64 |= arm_current_el(env) == 2; + } + } + } + + if (format64) { + /* Create a 64-bit PAR */ + par64 = (1 << 11); /* LPAE bit always set */ + if (!ret) { + par64 |= res.f.phys_addr & ~0xfffULL; + if (!res.f.attrs.secure) { + par64 |= (1 << 9); /* NS */ + } + par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ + par64 |= par_el1_shareability(&res) << 7; /* SH */ + } else { + uint32_t fsr = arm_fi_to_lfsc(&fi); + + par64 |= 1; /* F */ + par64 |= (fsr & 0x3f) << 1; /* FS */ + if (fi.stage2) { + par64 |= (1 << 9); /* S */ + } + if (fi.s1ptw) { + par64 |= (1 << 8); /* PTW */ + } + } + } else { + /* + * fsr is a DFSR/IFSR value for the short descriptor + * translation table format (with WnR always clear). + * Convert it to a 32-bit PAR. + */ + if (!ret) { + /* We do not set any attribute bits in the PAR */ + if (res.f.lg_page_size == 24 + && arm_feature(env, ARM_FEATURE_V7)) { + par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); + } else { + par64 = res.f.phys_addr & 0xfffff000; + } + if (!res.f.attrs.secure) { + par64 |= (1 << 9); /* NS */ + } + } else { + uint32_t fsr = arm_fi_to_sfsc(&fi); + + par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | + ((fsr & 0xf) << 1) | 1; + } + } + return par64; +} + +static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + uint64_t par64; + ARMMMUIdx mmu_idx; + int el = arm_current_el(env); + ARMSecuritySpace ss = arm_security_space(env); + + switch (ri->opc2 & 6) { + case 0: + /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ + switch (el) { + case 3: + if (ri->crm == 9 && arm_pan_enabled(env)) { + mmu_idx = ARMMMUIdx_E30_3_PAN; + } else { + mmu_idx = ARMMMUIdx_E3; + } + break; + case 2: + g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ + /* fall through */ + case 1: + if (ri->crm == 9 && arm_pan_enabled(env)) { + mmu_idx = ARMMMUIdx_Stage1_E1_PAN; + } else { + mmu_idx = ARMMMUIdx_Stage1_E1; + } + break; + default: + g_assert_not_reached(); + } + break; + case 2: + /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ + switch (el) { + case 3: + mmu_idx = ARMMMUIdx_E30_0; + break; + case 2: + g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ + mmu_idx = ARMMMUIdx_Stage1_E0; + break; + case 1: + mmu_idx = ARMMMUIdx_Stage1_E0; + break; + default: + g_assert_not_reached(); + } + break; + case 4: + /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ + mmu_idx = ARMMMUIdx_E10_1; + ss = ARMSS_NonSecure; + break; + case 6: + /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ + mmu_idx = ARMMMUIdx_E10_0; + ss = ARMSS_NonSecure; + break; + default: + g_assert_not_reached(); + } + + par64 = do_ats_write(env, value, access_type, mmu_idx, ss); + + A32_BANKED_CURRENT_REG_SET(env, par, par64); +} + +static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + uint64_t par64; + + /* There is no SecureEL2 for AArch32. */ + par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, + ARMSS_NonSecure); + + A32_BANKED_CURRENT_REG_SET(env, par, par64); +} + +static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* + * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level + * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can + * only happen when executing at EL3 because that combination also causes an + * illegal exception return. We don't need to check FEAT_RME either, because + * scr_write() ensures that the NSE bit is not set otherwise. + */ + if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { + return CP_ACCESS_UNDEFINED; + } + return CP_ACCESS_OK; +} + +static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 3 && + !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { + return CP_ACCESS_UNDEFINED; + } + return at_e012_access(env, ri, isread); +} + +static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) { + return CP_ACCESS_TRAP_EL2; + } + return at_e012_access(env, ri, isread); +} + +static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + ARMMMUIdx mmu_idx; + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); + bool for_el3 = false; + ARMSecuritySpace ss; + + switch (ri->opc2 & 6) { + case 0: + switch (ri->opc1) { + case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ + if (ri->crm == 9 && arm_pan_enabled(env)) { + mmu_idx = regime_e20 ? + ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; + } else { + mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; + } + break; + case 4: /* AT S1E2R, AT S1E2W */ + mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; + break; + case 6: /* AT S1E3R, AT S1E3W */ + mmu_idx = ARMMMUIdx_E3; + for_el3 = true; + break; + default: + g_assert_not_reached(); + } + break; + case 2: /* AT S1E0R, AT S1E0W */ + mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0; + break; + case 4: /* AT S12E1R, AT S12E1W */ + mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1; + break; + case 6: /* AT S12E0R, AT S12E0W */ + mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0; + break; + default: + g_assert_not_reached(); + } + + ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env); + env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); +} + +static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (ri->opc2 & 4) { + /* + * The ATS12NSO* operations must trap to EL3 or EL2 if executed in + * Secure EL1 (which can only happen if EL3 is AArch64). + * They are simply UNDEF if executed from NS EL1. + * They function normally from EL2 or EL3. + */ + if (arm_current_el(env) == 1) { + if (arm_is_secure_below_el3(env)) { + if (env->cp15.scr_el3 & SCR_EEL2) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_UNDEFINED; + } + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo vapa_ats_reginfo[] = { + /* This underdecoding is safe because the reginfo is NO_RAW. */ + { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, + .access = PL1_W, .accessfn = ats_access, + .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, +}; + +static const ARMCPRegInfo v8_ats_reginfo[] = { + /* 64 bit address translation operations */ + { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E1R, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, + { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E1W, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, + { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E0R, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, + { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E0W, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, + { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .accessfn = at_e012_access, .writefn = ats_write64 }, + { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .accessfn = at_e012_access, .writefn = ats_write64 }, + { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .accessfn = at_e012_access, .writefn = ats_write64 }, + { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .accessfn = at_e012_access, .writefn = ats_write64 }, + /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ + { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, + { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write64 }, +}; + +static const ARMCPRegInfo el2_ats_reginfo[] = { + /* + * Unlike the other EL2-related AT operations, these must + * UNDEF from EL3 if EL2 is not implemented, which is why we + * define them here rather than with the rest of the AT ops. + */ + { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL2_W, .accessfn = at_s1e2_access, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = ats_write64 }, + { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL2_W, .accessfn = at_s1e2_access, + .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = ats_write64 }, + /* + * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE + * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 + * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose + * to behave as if SCR.NS was 1. + */ + { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, + .access = PL2_W, + .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, + { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, + .access = PL2_W, + .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, +}; + +static const ARMCPRegInfo ats1e1_reginfo[] = { + { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E1RP, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, + { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E1WP, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, +}; + +static const ARMCPRegInfo ats1cp_reginfo[] = { + { .name = "ATS1CPRP", + .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write }, + { .name = "ATS1CPWP", + .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_write }, +}; + +void define_at_insn_regs(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + + if (arm_feature(env, ARM_FEATURE_VAPA)) { + define_arm_cp_regs(cpu, vapa_ats_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, v8_ats_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL2) + || (arm_feature(env, ARM_FEATURE_EL3) + && arm_feature(env, ARM_FEATURE_V8))) { + define_arm_cp_regs(cpu, el2_ats_reginfo); + } + if (cpu_isar_feature(aa64_ats1e1, cpu)) { + define_arm_cp_regs(cpu, ats1e1_reginfo); + } + if (cpu_isar_feature(aa32_ats1e1, cpu)) { + define_arm_cp_regs(cpu, ats1cp_reginfo); + } +} diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c index 5496f14dc16..dc249ce1f14 100644 --- a/target/arm/tcg/cpu-v7m.c +++ b/target/arm/tcg/cpu-v7m.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include "internals.h" #if !defined(CONFIG_USER_ONLY) @@ -19,7 +19,6 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - CPUClass *cc = CPU_GET_CLASS(cs); ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; bool ret = false; @@ -35,7 +34,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) if (interrupt_request & CPU_INTERRUPT_HARD && (armv7m_nvic_can_take_pending_exception(env->nvic))) { cs->exception_index = EXCP_IRQ; - cc->tcg_ops->do_interrupt(cs); + cs->cc->tcg_ops->do_interrupt(cs); ret = true; } return ret; @@ -46,6 +45,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) static void cortex_m0_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V6); set_feature(&cpu->env, ARM_FEATURE_M); @@ -59,51 +59,53 @@ static void cortex_m0_initfn(Object *obj) * by looking at ID register fields. We use the same values as * for the M3. */ - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + SET_IDREG(isar, ID_PFR0, 0x00000030); + SET_IDREG(isar, ID_PFR1, 0x00000200); + SET_IDREG(isar, ID_DFR0, 0x00100000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00000030); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x00000000); + SET_IDREG(isar, ID_MMFR3, 0x00000000); + SET_IDREG(isar, ID_ISAR0, 0x01141110); + SET_IDREG(isar, ID_ISAR1, 0x02111000); + SET_IDREG(isar, ID_ISAR2, 0x21112231); + SET_IDREG(isar, ID_ISAR3, 0x01111110); + SET_IDREG(isar, ID_ISAR4, 0x01310102); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); } static void cortex_m3_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); set_feature(&cpu->env, ARM_FEATURE_M_MAIN); cpu->midr = 0x410fc231; cpu->pmsav7_dregion = 8; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + SET_IDREG(isar, ID_PFR0, 0x00000030); + SET_IDREG(isar, ID_PFR1, 0x00000200); + SET_IDREG(isar, ID_DFR0, 0x00100000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00000030); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x00000000); + SET_IDREG(isar, ID_MMFR3, 0x00000000); + SET_IDREG(isar, ID_ISAR0, 0x01141110); + SET_IDREG(isar, ID_ISAR1, 0x02111000); + SET_IDREG(isar, ID_ISAR2, 0x21112231); + SET_IDREG(isar, ID_ISAR3, 0x01111110); + SET_IDREG(isar, ID_ISAR4, 0x01310102); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); } static void cortex_m4_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); @@ -114,26 +116,27 @@ static void cortex_m4_initfn(Object *obj) cpu->isar.mvfr0 = 0x10110021; cpu->isar.mvfr1 = 0x11000011; cpu->isar.mvfr2 = 0x00000000; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + SET_IDREG(isar, ID_PFR0, 0x00000030); + SET_IDREG(isar, ID_PFR1, 0x00000200); + SET_IDREG(isar, ID_DFR0, 0x00100000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00000030); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x00000000); + SET_IDREG(isar, ID_MMFR3, 0x00000000); + SET_IDREG(isar, ID_ISAR0, 0x01141110); + SET_IDREG(isar, ID_ISAR1, 0x02111000); + SET_IDREG(isar, ID_ISAR2, 0x21112231); + SET_IDREG(isar, ID_ISAR3, 0x01111110); + SET_IDREG(isar, ID_ISAR4, 0x01310102); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); } static void cortex_m7_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_M); @@ -144,26 +147,27 @@ static void cortex_m7_initfn(Object *obj) cpu->isar.mvfr0 = 0x10110221; cpu->isar.mvfr1 = 0x12000011; cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00100030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02112000; - cpu->isar.id_isar2 = 0x20232231; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + SET_IDREG(isar, ID_PFR0, 0x00000030); + SET_IDREG(isar, ID_PFR1, 0x00000200); + SET_IDREG(isar, ID_DFR0, 0x00100000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00100030); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x01000000); + SET_IDREG(isar, ID_MMFR3, 0x00000000); + SET_IDREG(isar, ID_ISAR0, 0x01101110); + SET_IDREG(isar, ID_ISAR1, 0x02112000); + SET_IDREG(isar, ID_ISAR2, 0x20232231); + SET_IDREG(isar, ID_ISAR3, 0x01111131); + SET_IDREG(isar, ID_ISAR4, 0x01310132); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); } static void cortex_m33_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_M); @@ -176,28 +180,29 @@ static void cortex_m33_initfn(Object *obj) cpu->isar.mvfr0 = 0x10110021; cpu->isar.mvfr1 = 0x11000011; cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000210; - cpu->isar.id_dfr0 = 0x00200000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00101F40; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; - cpu->clidr = 0x00000000; + SET_IDREG(isar, ID_PFR0, 0x00000030); + SET_IDREG(isar, ID_PFR1, 0x00000210); + SET_IDREG(isar, ID_DFR0, 0x00200000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00101F40); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x01000000); + SET_IDREG(isar, ID_MMFR3, 0x00000000); + SET_IDREG(isar, ID_ISAR0, 0x01101110); + SET_IDREG(isar, ID_ISAR1, 0x02212000); + SET_IDREG(isar, ID_ISAR2, 0x20232232); + SET_IDREG(isar, ID_ISAR3, 0x01111131); + SET_IDREG(isar, ID_ISAR4, 0x01310132); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); + SET_IDREG(isar, CLIDR, 0x00000000); cpu->ctr = 0x8000c000; } static void cortex_m55_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_V8_1M); @@ -213,38 +218,47 @@ static void cortex_m55_initfn(Object *obj) cpu->isar.mvfr0 = 0x10110221; cpu->isar.mvfr1 = 0x12100211; cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x20000030; - cpu->isar.id_pfr1 = 0x00000230; - cpu->isar.id_dfr0 = 0x10200000; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00111040; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000011; - cpu->isar.id_isar0 = 0x01103110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; - cpu->clidr = 0x00000000; /* caches not implemented */ + SET_IDREG(isar, ID_PFR0, 0x20000030); + SET_IDREG(isar, ID_PFR1, 0x00000230); + SET_IDREG(isar, ID_DFR0, 0x10200000); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00111040); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x01000000); + SET_IDREG(isar, ID_MMFR3, 0x00000011); + SET_IDREG(isar, ID_ISAR0, 0x01103110); + SET_IDREG(isar, ID_ISAR1, 0x02212000); + SET_IDREG(isar, ID_ISAR2, 0x20232232); + SET_IDREG(isar, ID_ISAR3, 0x01111131); + SET_IDREG(isar, ID_ISAR4, 0x01310132); + SET_IDREG(isar, ID_ISAR5, 0x00000000); + SET_IDREG(isar, ID_ISAR6, 0x00000000); + SET_IDREG(isar, CLIDR, 0x00000000); /* caches not implemented */ cpu->ctr = 0x8303c003; } static const TCGCPUOps arm_v7m_tcg_ops = { + /* ARM processors have a weak memory model */ + .guest_default_memory_order = 0, + .mttcg_supported = true, + .initialize = arm_translate_init, + .translate_code = arm_translate_code, + .get_tb_cpu_state = arm_get_tb_cpu_state, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, .restore_state_to_opc = arm_restore_state_to_opc, + .mmu_index = arm_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = arm_cpu_record_sigsegv, .record_sigbus = arm_cpu_record_sigbus, #else - .tlb_fill = arm_cpu_tlb_fill, + .tlb_fill_align = arm_cpu_tlb_fill_align, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, .cpu_exec_halt = arm_cpu_exec_halt, + .cpu_exec_reset = cpu_reset, .do_interrupt = arm_v7m_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, .do_unaligned_access = arm_cpu_do_unaligned_access, @@ -254,14 +268,13 @@ static const TCGCPUOps arm_v7m_tcg_ops = { #endif /* !CONFIG_USER_ONLY */ }; -static void arm_v7m_class_init(ObjectClass *oc, void *data) +static void arm_v7m_class_init(ObjectClass *oc, const void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); acc->info = data; cc->tcg_ops = &arm_v7m_tcg_ops; - cc->gdb_core_xml_file = "arm-m-profile.xml"; } static const ARMCPUInfo arm_v7m_cpus[] = { diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c index 20c2737f17b..a2a23eae0d7 100644 --- a/target/arm/tcg/cpu32.c +++ b/target/arm/tcg/cpu32.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" #include "internals.h" #include "target/arm/idau.h" #if !defined(CONFIG_USER_ONLY) @@ -23,18 +23,19 @@ void aa32_max_features(ARMCPU *cpu) { uint32_t t; + ARMISARegisters *isar = &cpu->isar; /* Add additional features supported by QEMU */ - t = cpu->isar.id_isar5; + t = GET_IDREG(isar, ID_ISAR5); t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */ - cpu->isar.id_isar5 = t; + SET_IDREG(isar, ID_ISAR5, t); - t = cpu->isar.id_isar6; + t = GET_IDREG(isar, ID_ISAR6); t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */ t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */ t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */ @@ -42,7 +43,7 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */ t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */ - cpu->isar.id_isar6 = t; + SET_IDREG(isar, ID_ISAR6, t); t = cpu->isar.mvfr1; t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */ @@ -54,38 +55,34 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ cpu->isar.mvfr2 = t; - t = cpu->isar.id_mmfr3; - t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */ - cpu->isar.id_mmfr3 = t; + FIELD_DP32_IDREG(isar, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */ - t = cpu->isar.id_mmfr4; + t = GET_IDREG(isar, ID_MMFR4); t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */ t = FIELD_DP32(t, ID_MMFR4, EVT, 2); /* FEAT_EVT */ - cpu->isar.id_mmfr4 = t; + SET_IDREG(isar, ID_MMFR4, t); - t = cpu->isar.id_mmfr5; - t = FIELD_DP32(t, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */ - cpu->isar.id_mmfr5 = t; + FIELD_DP32_IDREG(isar, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */ - t = cpu->isar.id_pfr0; - t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */ + t = GET_IDREG(isar, ID_PFR0); + t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CSV2 */ t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */ t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */ - cpu->isar.id_pfr0 = t; + SET_IDREG(isar, ID_PFR0, t); - t = cpu->isar.id_pfr2; + t = GET_IDREG(isar, ID_PFR2); t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */ t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */ - cpu->isar.id_pfr2 = t; + SET_IDREG(isar, ID_PFR2, t); - t = cpu->isar.id_dfr0; + t = GET_IDREG(isar, ID_DFR0); t = FIELD_DP32(t, ID_DFR0, COPDBG, 10); /* FEAT_Debugv8p8 */ t = FIELD_DP32(t, ID_DFR0, COPSDBG, 10); /* FEAT_Debugv8p8 */ t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */ - cpu->isar.id_dfr0 = t; + SET_IDREG(isar, ID_DFR0, t); /* Debug ID registers. */ @@ -115,9 +112,7 @@ void aa32_max_features(ARMCPU *cpu) t = FIELD_DP32(t, DBGDEVID1, PCSROFFSET, 2); cpu->isar.dbgdevid1 = t; - t = cpu->isar.id_dfr1; - t = FIELD_DP32(t, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */ - cpu->isar.id_dfr1 = t; + FIELD_DP32_IDREG(isar, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */ } /* CPU models. These are not needed for the AArch64 linux-user build. */ @@ -140,7 +135,7 @@ static void arm926_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. @@ -182,7 +177,7 @@ static void arm1026_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. @@ -206,6 +201,7 @@ static void arm1026_initfn(Object *obj) static void arm1136_r2_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; /* * What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an * older core than plain "arm1136". In particular this does not @@ -226,24 +222,25 @@ static void arm1136_r2_initfn(Object *obj) cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0x2; - cpu->id_afr0 = 0x3; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + SET_IDREG(isar, ID_PFR0, 0x111); + SET_IDREG(isar, ID_PFR1, 0x1); + SET_IDREG(isar, ID_DFR0, 0x2); + SET_IDREG(isar, ID_AFR0, 0x3); + SET_IDREG(isar, ID_MMFR0, 0x01130003); + SET_IDREG(isar, ID_MMFR1, 0x10030302); + SET_IDREG(isar, ID_MMFR2, 0x01222110); + SET_IDREG(isar, ID_ISAR0, 0x00140011); + SET_IDREG(isar, ID_ISAR1, 0x12002111); + SET_IDREG(isar, ID_ISAR2, 0x11231111); + SET_IDREG(isar, ID_ISAR3, 0x01102131); + SET_IDREG(isar, ID_ISAR4, 0x141); cpu->reset_auxcr = 7; } static void arm1136_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,arm1136"; set_feature(&cpu->env, ARM_FEATURE_V6K); @@ -257,24 +254,25 @@ static void arm1136_initfn(Object *obj) cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0x2; - cpu->id_afr0 = 0x3; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + SET_IDREG(isar, ID_PFR0, 0x111); + SET_IDREG(isar, ID_PFR1, 0x1); + SET_IDREG(isar, ID_DFR0, 0x2); + SET_IDREG(isar, ID_AFR0, 0x3); + SET_IDREG(isar, ID_MMFR0, 0x01130003); + SET_IDREG(isar, ID_MMFR1, 0x10030302); + SET_IDREG(isar, ID_MMFR2, 0x01222110); + SET_IDREG(isar, ID_ISAR0, 0x00140011); + SET_IDREG(isar, ID_ISAR1, 0x12002111); + SET_IDREG(isar, ID_ISAR2, 0x11231111); + SET_IDREG(isar, ID_ISAR3, 0x01102131); + SET_IDREG(isar, ID_ISAR4, 0x141); cpu->reset_auxcr = 7; } static void arm1176_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,arm1176"; set_feature(&cpu->env, ARM_FEATURE_V6K); @@ -289,24 +287,25 @@ static void arm1176_initfn(Object *obj) cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x33; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222100; - cpu->isar.id_isar0 = 0x0140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231121; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x01141; + SET_IDREG(isar, ID_PFR0, 0x111); + SET_IDREG(isar, ID_PFR1, 0x11); + SET_IDREG(isar, ID_DFR0, 0x33); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x01130003); + SET_IDREG(isar, ID_MMFR1, 0x10030302); + SET_IDREG(isar, ID_MMFR2, 0x01222100); + SET_IDREG(isar, ID_ISAR0, 0x0140011); + SET_IDREG(isar, ID_ISAR1, 0x12002111); + SET_IDREG(isar, ID_ISAR2, 0x11231121); + SET_IDREG(isar, ID_ISAR3, 0x01102131); + SET_IDREG(isar, ID_ISAR4, 0x01141); cpu->reset_auxcr = 7; } static void arm11mpcore_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,arm11mpcore"; set_feature(&cpu->env, ARM_FEATURE_V6K); @@ -318,18 +317,18 @@ static void arm11mpcore_initfn(Object *obj) cpu->isar.mvfr0 = 0x11111111; cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0; - cpu->id_afr0 = 0x2; - cpu->isar.id_mmfr0 = 0x01100103; - cpu->isar.id_mmfr1 = 0x10020302; - cpu->isar.id_mmfr2 = 0x01222000; - cpu->isar.id_isar0 = 0x00100011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11221011; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + SET_IDREG(isar, ID_PFR0, 0x111); + SET_IDREG(isar, ID_PFR1, 0x1); + SET_IDREG(isar, ID_DFR0, 0); + SET_IDREG(isar, ID_AFR0, 0x2); + SET_IDREG(isar, ID_MMFR0, 0x01100103); + SET_IDREG(isar, ID_MMFR1, 0x10020302); + SET_IDREG(isar, ID_MMFR2, 0x01222000); + SET_IDREG(isar, ID_ISAR0, 0x00100011); + SET_IDREG(isar, ID_ISAR1, 0x12002111); + SET_IDREG(isar, ID_ISAR2, 0x11221011); + SET_IDREG(isar, ID_ISAR3, 0x01102131); + SET_IDREG(isar, ID_ISAR4, 0x141); cpu->reset_auxcr = 1; } @@ -343,6 +342,7 @@ static const ARMCPRegInfo cortexa8_cp_reginfo[] = { static void cortex_a8_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a8"; set_feature(&cpu->env, ARM_FEATURE_V7); @@ -357,21 +357,21 @@ static void cortex_a8_initfn(Object *obj) cpu->isar.mvfr1 = 0x00011111; cpu->ctr = 0x82048004; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x1031; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x400; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x31100003; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01202000; - cpu->isar.id_mmfr3 = 0x11; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x12112111; - cpu->isar.id_isar2 = 0x21232031; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; + SET_IDREG(isar, ID_PFR0, 0x1031); + SET_IDREG(isar, ID_PFR1, 0x11); + SET_IDREG(isar, ID_DFR0, 0x400); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x31100003); + SET_IDREG(isar, ID_MMFR1, 0x20000000); + SET_IDREG(isar, ID_MMFR2, 0x01202000); + SET_IDREG(isar, ID_MMFR3, 0x11); + SET_IDREG(isar, ID_ISAR0, 0x00101111); + SET_IDREG(isar, ID_ISAR1, 0x12112111); + SET_IDREG(isar, ID_ISAR2, 0x21232031); + SET_IDREG(isar, ID_ISAR3, 0x11112131); + SET_IDREG(isar, ID_ISAR4, 0x00111142); cpu->isar.dbgdidr = 0x15141000; - cpu->clidr = (1 << 27) | (2 << 24) | 3; + SET_IDREG(isar, CLIDR, (1 << 27) | (2 << 24) | 3); cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ @@ -412,6 +412,7 @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = { static void cortex_a9_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a9"; set_feature(&cpu->env, ARM_FEATURE_V7); @@ -432,21 +433,21 @@ static void cortex_a9_initfn(Object *obj) cpu->isar.mvfr1 = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x1031; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x000; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x00100103; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01230000; - cpu->isar.id_mmfr3 = 0x00002111; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; + SET_IDREG(isar, ID_PFR0, 0x1031); + SET_IDREG(isar, ID_PFR1, 0x11); + SET_IDREG(isar, ID_DFR0, 0x000); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x00100103); + SET_IDREG(isar, ID_MMFR1, 0x20000000); + SET_IDREG(isar, ID_MMFR2, 0x01230000); + SET_IDREG(isar, ID_MMFR3, 0x00002111); + SET_IDREG(isar, ID_ISAR0, 0x00101111); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232041); + SET_IDREG(isar, ID_ISAR3, 0x11112131); + SET_IDREG(isar, ID_ISAR4, 0x00111142); cpu->isar.dbgdidr = 0x35141000; - cpu->clidr = (1 << 27) | (1 << 24) | 3; + SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 3); cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ cpu->isar.reset_pmcr_el0 = 0x41093000; @@ -479,6 +480,7 @@ static const ARMCPRegInfo cortexa15_cp_reginfo[] = { static void cortex_a7_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a7"; set_feature(&cpu->env, ARM_FEATURE_V7VE); @@ -497,27 +499,27 @@ static void cortex_a7_initfn(Object *obj) cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x84448003; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x00001131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x02010555; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01240000; - cpu->isar.id_mmfr3 = 0x02102211; + SET_IDREG(isar, ID_PFR0, 0x00001131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x02010555); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10101105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01240000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); /* * a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232041); + SET_IDREG(isar, ID_ISAR3, 0x11112131); + SET_IDREG(isar, ID_ISAR4, 0x10011142); cpu->isar.dbgdidr = 0x3515f005; cpu->isar.dbgdevid = 0x01110f13; cpu->isar.dbgdevid1 = 0x1; - cpu->clidr = 0x0a200023; + SET_IDREG(isar, CLIDR, 0x0a200023); cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ @@ -528,6 +530,7 @@ static void cortex_a7_initfn(Object *obj) static void cortex_a15_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a15"; set_feature(&cpu->env, ARM_FEATURE_V7VE); @@ -548,23 +551,23 @@ static void cortex_a15_initfn(Object *obj) cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x00001131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x02010555; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01240000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; + SET_IDREG(isar, ID_PFR0, 0x00001131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x02010555); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x20000000); + SET_IDREG(isar, ID_MMFR2, 0x01240000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232041); + SET_IDREG(isar, ID_ISAR3, 0x11112131); + SET_IDREG(isar, ID_ISAR4, 0x10011142); cpu->isar.dbgdidr = 0x3515f021; cpu->isar.dbgdevid = 0x01110f13; cpu->isar.dbgdevid1 = 0x0; - cpu->clidr = 0x0a200023; + SET_IDREG(isar, CLIDR, 0x0a200023); cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ @@ -574,9 +577,9 @@ static void cortex_a15_initfn(Object *obj) static const ARMCPRegInfo cortexr5_cp_reginfo[] = { /* Dummy the TCM region regs for the moment */ - { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, + { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_CONST }, - { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, + { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_CONST }, { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP }, @@ -585,27 +588,28 @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = { static void cortex_r5_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_PMSA); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x411fc153; /* r1p3 */ - cpu->isar.id_pfr0 = 0x0131; - cpu->isar.id_pfr1 = 0x001; - cpu->isar.id_dfr0 = 0x010400; - cpu->id_afr0 = 0x0; - cpu->isar.id_mmfr0 = 0x0210030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01200000; - cpu->isar.id_mmfr3 = 0x0211; - cpu->isar.id_isar0 = 0x02101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232141; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x0010142; - cpu->isar.id_isar5 = 0x0; - cpu->isar.id_isar6 = 0x0; + SET_IDREG(isar, ID_PFR0, 0x0131); + SET_IDREG(isar, ID_PFR1, 0x001); + SET_IDREG(isar, ID_DFR0, 0x010400); + SET_IDREG(isar, ID_AFR0, 0x0); + SET_IDREG(isar, ID_MMFR0, 0x0210030); + SET_IDREG(isar, ID_MMFR1, 0x00000000); + SET_IDREG(isar, ID_MMFR2, 0x01200000); + SET_IDREG(isar, ID_MMFR3, 0x0211); + SET_IDREG(isar, ID_ISAR0, 0x02101111); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232141); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x0010142); + SET_IDREG(isar, ID_ISAR5, 0x0); + SET_IDREG(isar, ID_ISAR6, 0x0); cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; cpu->isar.reset_pmcr_el0 = 0x41151800; @@ -720,6 +724,7 @@ static const ARMCPRegInfo cortex_r52_cp_reginfo[] = { static void cortex_r52_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_EL2); @@ -737,23 +742,23 @@ static void cortex_r52_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8144c004; cpu->reset_sctlr = 0x30c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x10111001; - cpu->isar.id_dfr0 = 0x03010006; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00211040; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01200000; - cpu->isar.id_mmfr3 = 0xf0102211; - cpu->isar.id_mmfr4 = 0x00000010; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232142; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x00010001; + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x10111001); + SET_IDREG(isar, ID_DFR0, 0x03010006); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x00211040); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01200000); + SET_IDREG(isar, ID_MMFR3, 0xf0102211); + SET_IDREG(isar, ID_MMFR4, 0x00000010); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232142); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x00010001); cpu->isar.dbgdidr = 0x77168000; - cpu->clidr = (1 << 27) | (1 << 24) | 0x3; + SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 0x3); cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ @@ -949,6 +954,7 @@ static void pxa270c5_initfn(Object *obj) static void arm_max_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; /* aarch64_a57_initfn, advertising none of the aarch64 features */ cpu->dtb_compatible = "arm,cortex-a57"; @@ -968,23 +974,23 @@ static void arm_max_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x03010066); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10101105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x00011121); + SET_IDREG(isar, ID_ISAR6, 0); cpu->isar.reset_pmcr_el0 = 0x41013000; - cpu->clidr = 0x0a200023; + SET_IDREG(isar, CLIDR, 0x0a200023); cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ @@ -1026,19 +1032,31 @@ static const ARMCPUInfo arm_tcg_cpus[] = { { .name = "ti925t", .initfn = ti925t_initfn }, { .name = "sa1100", .initfn = sa1100_initfn }, { .name = "sa1110", .initfn = sa1110_initfn }, - { .name = "pxa250", .initfn = pxa250_initfn }, - { .name = "pxa255", .initfn = pxa255_initfn }, - { .name = "pxa260", .initfn = pxa260_initfn }, - { .name = "pxa261", .initfn = pxa261_initfn }, - { .name = "pxa262", .initfn = pxa262_initfn }, + { .name = "pxa250", .initfn = pxa250_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa255", .initfn = pxa255_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa260", .initfn = pxa260_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa261", .initfn = pxa261_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa262", .initfn = pxa262_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, /* "pxa270" is an alias for "pxa270-a0" */ - { .name = "pxa270", .initfn = pxa270a0_initfn }, - { .name = "pxa270-a0", .initfn = pxa270a0_initfn }, - { .name = "pxa270-a1", .initfn = pxa270a1_initfn }, - { .name = "pxa270-b0", .initfn = pxa270b0_initfn }, - { .name = "pxa270-b1", .initfn = pxa270b1_initfn }, - { .name = "pxa270-c0", .initfn = pxa270c0_initfn }, - { .name = "pxa270-c5", .initfn = pxa270c5_initfn }, + { .name = "pxa270", .initfn = pxa270a0_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-a0", .initfn = pxa270a0_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-a1", .initfn = pxa270a1_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-b0", .initfn = pxa270b0_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-b1", .initfn = pxa270b1_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-c0", .initfn = pxa270c0_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, + { .name = "pxa270-c5", .initfn = pxa270c5_initfn, + .deprecation_note = "iwMMXt CPUs are no longer supported", }, #ifndef TARGET_AARCH64 { .name = "max", .initfn = arm_max_initfn }, #endif diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index fe232eb3069..35cddbafa4c 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -29,35 +29,10 @@ #include "cpu-features.h" #include "cpregs.h" -static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize, - unsigned cachesize) -{ - unsigned lg_linesize = ctz32(linesize); - unsigned sets; - - /* - * The 64-bit CCSIDR_EL1 format is: - * [55:32] number of sets - 1 - * [23:3] associativity - 1 - * [2:0] log2(linesize) - 4 - * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc - */ - assert(assoc != 0); - assert(is_power_of_2(linesize)); - assert(lg_linesize >= 4 && lg_linesize <= 7 + 4); - - /* sets * associativity * linesize == cachesize. */ - sets = cachesize / (assoc * linesize); - assert(cachesize % (assoc * linesize) == 0); - - return ((uint64_t)(sets - 1) << 32) - | ((assoc - 1) << 3) - | (lg_linesize - 4); -} - static void aarch64_a35_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a35"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -74,29 +49,29 @@ static void aarch64_a35_initfn(Object *obj) cpu->midr = 0x411fd040; cpu->revidr = 0; cpu->ctr = 0x84448004; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64pfr1 = 0; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64dfr1 = 0; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64isar1 = 0; - cpu->isar.id_aa64mmfr0 = 0x00101122; - cpu->isar.id_aa64mmfr1 = 0; - cpu->clidr = 0x0a200023; + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x03010066); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x00011121); + SET_IDREG(isar, ID_AA64PFR0, 0x00002222); + SET_IDREG(isar, ID_AA64PFR1, 0); + SET_IDREG(isar, ID_AA64DFR0, 0x10305106); + SET_IDREG(isar, ID_AA64DFR1, 0); + SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); + SET_IDREG(isar, ID_AA64ISAR1, 0); + SET_IDREG(isar, ID_AA64MMFR0, 0x00101122); + SET_IDREG(isar, ID_AA64MMFR1, 0); + SET_IDREG(isar, CLIDR, 0x0a200023); cpu->dcz_blocksize = 4; /* From B2.4 AArch64 Virtual Memory control registers */ @@ -106,9 +81,12 @@ static void aarch64_a35_initfn(Object *obj) cpu->isar.reset_pmcr_el0 = 0x410a3000; /* From B2.29 Cache ID registers */ - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */ + /* 32KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); + /* 32KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2); + /* 512KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7); /* From B3.5 VGIC Type register */ cpu->gic_num_lrs = 4; @@ -180,11 +158,8 @@ static bool cpu_arm_get_rme(Object *obj, Error **errp) static void cpu_arm_set_rme(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - uint64_t t; - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, RME, value); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value); } static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name, @@ -221,12 +196,13 @@ static void cpu_max_get_l0gptsz(Object *obj, Visitor *v, const char *name, visit_type_uint32(v, name, &value, errp); } -static Property arm_cpu_lpa2_property = +static const Property arm_cpu_lpa2_property = DEFINE_PROP_BOOL("lpa2", ARMCPU, prop_lpa2, true); static void aarch64_a55_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a55"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -240,41 +216,44 @@ static void aarch64_a55_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMU); /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; + SET_IDREG(isar, CLIDR, 0x82000023); cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->dcz_blocksize = 4; /* 64 bytes */ - cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x0000000010112222ull; - cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_pfr2 = 0x00000011; + SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull); + SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull); + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x04010088); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x01011121); + SET_IDREG(isar, ID_ISAR6, 0x00000010); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x00021110); + SET_IDREG(isar, ID_PFR0, 0x10010131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_PFR2, 0x00000011); cpu->midr = 0x412FD050; /* r2p0 */ cpu->revidr = 0; /* From B2.23 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x200fe01a; /* 32KB L1 icache */ - cpu->ccsidr[2] = 0x703fe07a; /* 512KB L2 cache */ + /* 32KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); + /* 32KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 2); + /* 512KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 512 * KiB, 7); /* From B2.96 SCTLR_EL3 */ cpu->reset_sctlr = 0x30c50838; @@ -296,6 +275,7 @@ static void aarch64_a55_initfn(Object *obj) static void aarch64_a72_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a72"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -315,32 +295,35 @@ static void aarch64_a72_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; + SET_IDREG(isar, ID_PFR0, 0x00000131); + SET_IDREG(isar, ID_PFR1, 0x00011011); + SET_IDREG(isar, ID_DFR0, 0x03010066); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02102211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00011142); + SET_IDREG(isar, ID_ISAR5, 0x00011121); + SET_IDREG(isar, ID_AA64PFR0, 0x00002222); + SET_IDREG(isar, ID_AA64DFR0, 0x10305106); + SET_IDREG(isar, ID_AA64ISAR0, 0x00011120); + SET_IDREG(isar, ID_AA64MMFR0, 0x00001124); cpu->isar.dbgdidr = 0x3516d000; cpu->isar.dbgdevid = 0x01110f13; cpu->isar.dbgdevid1 = 0x2; cpu->isar.reset_pmcr_el0 = 0x41023000; - cpu->clidr = 0x0a200023; - cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ - cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ + SET_IDREG(isar, CLIDR, 0x0a200023); + /* 32KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7); + /* 48KB L1 dcache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 3, 64, 48 * KiB, 2); + /* 1MB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 64, 1 * MiB, 7); cpu->dcz_blocksize = 4; /* 64 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; @@ -352,6 +335,7 @@ static void aarch64_a72_initfn(Object *obj) static void aarch64_a76_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a76"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -365,41 +349,44 @@ static void aarch64_a76_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMU); /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; + SET_IDREG(isar, CLIDR, 0x82000023); cpu->ctr = 0x8444C004; cpu->dcz_blocksize = 4; - cpu->isar.id_aa64dfr0 = 0x0000000010305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000010ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_pfr2 = 0x00000011; + SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull); + SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x04010088); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x01011121); + SET_IDREG(isar, ID_ISAR6, 0x00000010); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x00021110); + SET_IDREG(isar, ID_PFR0, 0x10010131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_PFR2, 0x00000011); cpu->midr = 0x414fd0b1; /* r4p1 */ cpu->revidr = 0; /* From B2.18 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x707fe03a; /* 512KB L2 cache */ + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7); + /* 64KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2); + /* 512KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 512 * KiB, 7); /* From B2.93 SCTLR_EL3 */ cpu->reset_sctlr = 0x30c50838; @@ -422,6 +409,7 @@ static void aarch64_a76_initfn(Object *obj) static void aarch64_a64fx_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,a64fx"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -436,22 +424,25 @@ static void aarch64_a64fx_initfn(Object *obj) cpu->revidr = 0x00000000; cpu->ctr = 0x86668006; cpu->reset_sctlr = 0x30000180; - cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */ - cpu->isar.id_aa64pfr1 = 0x0000000000000000; - cpu->isar.id_aa64dfr0 = 0x0000000010305408; - cpu->isar.id_aa64dfr1 = 0x0000000000000000; - cpu->id_aa64afr0 = 0x0000000000000000; - cpu->id_aa64afr1 = 0x0000000000000000; - cpu->isar.id_aa64mmfr0 = 0x0000000000001122; - cpu->isar.id_aa64mmfr1 = 0x0000000011212100; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011; - cpu->isar.id_aa64isar0 = 0x0000000010211120; - cpu->isar.id_aa64isar1 = 0x0000000000010001; - cpu->isar.id_aa64zfr0 = 0x0000000000000000; - cpu->clidr = 0x0000000080000023; - cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */ + SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000); + SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408); + SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000); + SET_IDREG(isar, ID_AA64AFR0, 0x0000000000000000); + SET_IDREG(isar, ID_AA64AFR1, 0x0000000000000000); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011); + SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001); + SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000); + SET_IDREG(isar, CLIDR, 0x0000000080000023); + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7); + /* 64KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 2); + /* 8MB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 16, 256, 8 * MiB, 7); cpu->dcz_blocksize = 6; /* 256 bytes */ cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; @@ -592,6 +583,7 @@ static void define_neoverse_v1_cp_reginfo(ARMCPU *cpu) static void aarch64_neoverse_n1_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,neoverse-n1"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -605,41 +597,44 @@ static void aarch64_neoverse_n1_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMU); /* Ordered by B2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; + SET_IDREG(isar, CLIDR, 0x82000023); cpu->ctr = 0x8444c004; cpu->dcz_blocksize = 4; - cpu->isar.id_aa64dfr0 = 0x0000000110305408ull; - cpu->isar.id_aa64isar0 = 0x0000100010211120ull; - cpu->isar.id_aa64isar1 = 0x0000000000100001ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull; - cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000020ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x04010088; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x01011121; - cpu->isar.id_isar6 = 0x00000010; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x00021110; - cpu->isar.id_pfr0 = 0x10010131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_pfr2 = 0x00000011; + SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull); + SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x04010088); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x01011121); + SET_IDREG(isar, ID_ISAR6, 0x00000010); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x00021110); + SET_IDREG(isar, ID_PFR0, 0x10010131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_PFR2, 0x00000011); cpu->midr = 0x414fd0c1; /* r4p1 */ cpu->revidr = 0; /* From B2.23 CCSIDR_EL1 */ - cpu->ccsidr[0] = 0x701fe01a; /* 64KB L1 dcache */ - cpu->ccsidr[1] = 0x201fe01a; /* 64KB L1 icache */ - cpu->ccsidr[2] = 0x70ffe03a; /* 1MB L2 cache */ + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7); + /* 64KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2); + /* 1MB L2 dcache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 1 * MiB, 7); /* From B2.98 SCTLR_EL3 */ cpu->reset_sctlr = 0x30c50838; @@ -664,6 +659,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj) static void aarch64_neoverse_v1_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,neoverse-v1"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -677,37 +673,37 @@ static void aarch64_neoverse_v1_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMU); /* Ordered by 3.2.4 AArch64 registers by functional group */ - cpu->clidr = 0x82000023; + SET_IDREG(isar, CLIDR, 0x82000023); cpu->ctr = 0xb444c004; /* With DIC and IDC set */ cpu->dcz_blocksize = 4; - cpu->id_aa64afr0 = 0x00000000; - cpu->id_aa64afr1 = 0x00000000; - cpu->isar.id_aa64dfr0 = 0x000001f210305519ull; - cpu->isar.id_aa64dfr1 = 0x00000000; - cpu->isar.id_aa64isar0 = 0x1011111110212120ull; /* with FEAT_RNG */ - cpu->isar.id_aa64isar1 = 0x0111000001211032ull; - cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x0220011102101011ull; - cpu->isar.id_aa64pfr0 = 0x1101110120111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000020ull; - cpu->id_afr0 = 0x00000000; - cpu->isar.id_dfr0 = 0x15011099; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x11011121; - cpu->isar.id_isar6 = 0x01100111; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_mmfr4 = 0x01021110; - cpu->isar.id_pfr0 = 0x21110131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_pfr2 = 0x00000011; + SET_IDREG(isar, ID_AA64AFR0, 0x00000000); + SET_IDREG(isar, ID_AA64AFR1, 0x00000000); + SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull); + SET_IDREG(isar, ID_AA64DFR1, 0x00000000); + SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */ + SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x15011099); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x11011121); + SET_IDREG(isar, ID_ISAR6, 0x01100111); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x01021110); + SET_IDREG(isar, ID_PFR0, 0x21110131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_PFR2, 0x00000011); cpu->midr = 0x411FD402; /* r1p2 */ cpu->revidr = 0; @@ -721,9 +717,12 @@ static void aarch64_neoverse_v1_initfn(Object *obj) * L2: 8-way set associative, 64 byte line size, either 512K or 1MB. * L3: No L3 (this matches the CLIDR_EL1 value). */ - cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */ - cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */ - cpu->ccsidr[2] = make_ccsidr64(8, 64, 1 * MiB); /* L2 cache */ + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0); + /* 64KB L1 icache */ + cpu->ccsidr[1] = cpu->ccsidr[0]; + /* 1MB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 1 * MiB, 0); /* From 3.2.115 SCTLR_EL3 */ cpu->reset_sctlr = 0x30c50838; @@ -740,7 +739,7 @@ static void aarch64_neoverse_v1_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000043; /* From 3.7.5 ID_AA64ZFR0_EL1 */ - cpu->isar.id_aa64zfr0 = 0x0000100000100000; + SET_IDREG(isar, ID_AA64ZFR0, 0x0000100000100000); cpu->sve_vq.supported = (1 << 0) /* 128bit */ | (1 << 1); /* 256bit */ @@ -887,6 +886,7 @@ static const ARMCPRegInfo cortex_a710_cp_reginfo[] = { static void aarch64_a710_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,cortex-a710"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -902,39 +902,39 @@ static void aarch64_a710_initfn(Object *obj) /* Ordered by Section B.4: AArch64 registers */ cpu->midr = 0x412FD471; /* r2p1 */ cpu->revidr = 0; - cpu->isar.id_pfr0 = 0x21110131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_dfr0 = 0x16011099; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x11011121; /* with Crypto */ - cpu->isar.id_mmfr4 = 0x21021110; - cpu->isar.id_isar6 = 0x01111111; + SET_IDREG(isar, ID_PFR0, 0x21110131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_DFR0, 0x16011099); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */ + SET_IDREG(isar, ID_MMFR4, 0x21021110); + SET_IDREG(isar, ID_ISAR6, 0x01111111); cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x13211111; cpu->isar.mvfr2 = 0x00000043; - cpu->isar.id_pfr2 = 0x00000011; - cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000221ull; - cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */ - cpu->isar.id_aa64dfr0 = 0x000011f010305619ull; - cpu->isar.id_aa64dfr1 = 0; - cpu->id_aa64afr0 = 0; - cpu->id_aa64afr1 = 0; - cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */ - cpu->isar.id_aa64isar1 = 0x0010111101211052ull; - cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull; - cpu->clidr = 0x0000001482000023ull; + SET_IDREG(isar, ID_PFR2, 0x00000011); + SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull); + SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */ + SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull); + SET_IDREG(isar, ID_AA64DFR1, 0); + SET_IDREG(isar, ID_AA64AFR0, 0); + SET_IDREG(isar, ID_AA64AFR1, 0); + SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */ + SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull); + SET_IDREG(isar, CLIDR, 0x0000001482000023ull); cpu->gm_blocksize = 4; cpu->ctr = 0x000000049444c004ull; cpu->dcz_blocksize = 4; @@ -959,9 +959,12 @@ static void aarch64_a710_initfn(Object *obj) * L1: 4-way set associative 64-byte line size, total either 32K or 64K. * L2: 8-way set associative 64 byte line size, total either 256K or 512K. */ - cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */ - cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */ - cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */ + /* L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0); + /* L1 icache */ + cpu->ccsidr[1] = cpu->ccsidr[0]; + /* L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0); /* FIXME: Not documented -- copied from neoverse-v1 */ cpu->reset_sctlr = 0x30c50838; @@ -985,6 +988,7 @@ static const ARMCPRegInfo neoverse_n2_cp_reginfo[] = { static void aarch64_neoverse_n2_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; cpu->dtb_compatible = "arm,neoverse-n2"; set_feature(&cpu->env, ARM_FEATURE_V8); @@ -1000,39 +1004,39 @@ static void aarch64_neoverse_n2_initfn(Object *obj) /* Ordered by Section B.5: AArch64 ID registers */ cpu->midr = 0x410FD493; /* r0p3 */ cpu->revidr = 0; - cpu->isar.id_pfr0 = 0x21110131; - cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */ - cpu->isar.id_dfr0 = 0x16011099; - cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02122211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00010142; - cpu->isar.id_isar5 = 0x11011121; /* with Crypto */ - cpu->isar.id_mmfr4 = 0x01021110; - cpu->isar.id_isar6 = 0x01111111; + SET_IDREG(isar, ID_PFR0, 0x21110131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_DFR0, 0x16011099); + SET_IDREG(isar, ID_AFR0, 0); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */ + SET_IDREG(isar, ID_MMFR4, 0x01021110); + SET_IDREG(isar, ID_ISAR6, 0x01111111); cpu->isar.mvfr0 = 0x10110222; cpu->isar.mvfr1 = 0x13211111; cpu->isar.mvfr2 = 0x00000043; - cpu->isar.id_pfr2 = 0x00000011; - cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */ - cpu->isar.id_aa64pfr1 = 0x0000000000000221ull; - cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */ - cpu->isar.id_aa64dfr0 = 0x000011f210305619ull; - cpu->isar.id_aa64dfr1 = 0; - cpu->id_aa64afr0 = 0; - cpu->id_aa64afr1 = 0; - cpu->isar.id_aa64isar0 = 0x1221111110212120ull; /* with Crypto and FEAT_RNG */ - cpu->isar.id_aa64isar1 = 0x0011111101211052ull; - cpu->isar.id_aa64mmfr0 = 0x0000022200101125ull; - cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull; - cpu->isar.id_aa64mmfr2 = 0x1221011112101011ull; - cpu->clidr = 0x0000001482000023ull; + SET_IDREG(isar, ID_PFR2, 0x00000011); + SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull); + SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */ + SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull); + SET_IDREG(isar, ID_AA64DFR1, 0); + SET_IDREG(isar, ID_AA64AFR0, 0); + SET_IDREG(isar, ID_AA64AFR1, 0); + SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */ + SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull); + SET_IDREG(isar, CLIDR, 0x0000001482000023ull); cpu->gm_blocksize = 4; cpu->ctr = 0x00000004b444c004ull; cpu->dcz_blocksize = 4; @@ -1057,10 +1061,12 @@ static void aarch64_neoverse_n2_initfn(Object *obj) * L1: 4-way set associative 64-byte line size, total 64K. * L2: 8-way set associative 64 byte line size, total either 512K or 1024K. */ - cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */ - cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */ - cpu->ccsidr[2] = make_ccsidr64(8, 64, 512 * KiB); /* L2 cache */ - + /* L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 4, 64, 64 * KiB, 0); + /* L1 icache */ + cpu->ccsidr[1] = cpu->ccsidr[0]; + /* L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_CCIDX, 8, 64, 512 * KiB, 0); /* FIXME: Not documented -- copied from neoverse-v1 */ cpu->reset_sctlr = 0x30c50838; @@ -1083,6 +1089,7 @@ static void aarch64_neoverse_n2_initfn(Object *obj) void aarch64_max_tcg_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; uint64_t t; uint32_t u; @@ -1118,10 +1125,10 @@ void aarch64_max_tcg_initfn(Object *obj) * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS} * are zero. */ - u = cpu->clidr; + u = GET_IDREG(isar, CLIDR); u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0); u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); - cpu->clidr = u; + SET_IDREG(isar, CLIDR, u); /* * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to @@ -1133,7 +1140,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, CTR_EL0, DIC, 1); cpu->ctr = t; - t = cpu->isar.id_aa64isar0; + t = GET_IDREG(isar, ID_AA64ISAR0); t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ @@ -1148,9 +1155,9 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */ - cpu->isar.id_aa64isar0 = t; + SET_IDREG(isar, ID_AA64ISAR0, t); - t = cpu->isar.id_aa64isar1; + t = GET_IDREG(isar, ID_AA64ISAR1); t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */ t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED); t = FIELD_DP64(t, ID_AA64ISAR1, API, 1); @@ -1160,18 +1167,20 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); /* FEAT_FRINTTS */ t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1); /* FEAT_SB */ t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1); /* FEAT_SPECRES */ - t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 2); /* FEAT_BF16, FEAT_EBF16 */ t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */ - cpu->isar.id_aa64isar1 = t; + t = FIELD_DP64(t, ID_AA64ISAR1, XS, 1); /* FEAT_XS */ + SET_IDREG(isar, ID_AA64ISAR1, t); - t = cpu->isar.id_aa64isar2; + t = GET_IDREG(isar, ID_AA64ISAR2); + t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1); /* FEAT_RPRES */ t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */ t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */ - cpu->isar.id_aa64isar2 = t; + SET_IDREG(isar, ID_AA64ISAR2, t); - t = cpu->isar.id_aa64pfr0; + t = GET_IDREG(isar, ID_AA64PFR0); t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */ t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */ @@ -1180,9 +1189,9 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */ t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 3); /* FEAT_CSV2_3 */ t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */ - cpu->isar.id_aa64pfr0 = t; + SET_IDREG(isar, ID_AA64PFR0, t); - t = cpu->isar.id_aa64pfr1; + t = GET_IDREG(isar, ID_AA64PFR1); t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */ t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */ /* @@ -1192,12 +1201,12 @@ void aarch64_max_tcg_initfn(Object *obj) */ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ - t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */ t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */ - cpu->isar.id_aa64pfr1 = t; + SET_IDREG(isar, ID_AA64PFR1, t); - t = cpu->isar.id_aa64mmfr0; + t = GET_IDREG(isar, ID_AA64MMFR0); t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */ @@ -1205,9 +1214,9 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */ t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */ t = FIELD_DP64(t, ID_AA64MMFR0, ECV, 2); /* FEAT_ECV */ - cpu->isar.id_aa64mmfr0 = t; + SET_IDREG(isar, ID_AA64MMFR0, t); - t = cpu->isar.id_aa64mmfr1; + t = GET_IDREG(isar, ID_AA64MMFR1); t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */ t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */ @@ -1217,10 +1226,12 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */ t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 2); /* FEAT_ETS2 */ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ + t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1); /* FEAT_AFP */ t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */ - cpu->isar.id_aa64mmfr1 = t; + t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */ + SET_IDREG(isar, ID_AA64MMFR1, t); - t = cpu->isar.id_aa64mmfr2; + t = GET_IDREG(isar, ID_AA64MMFR2); t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */ t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ @@ -1234,39 +1245,43 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */ t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */ t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ - cpu->isar.id_aa64mmfr2 = t; + SET_IDREG(isar, ID_AA64MMFR2, t); - t = cpu->isar.id_aa64mmfr3; - t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */ - cpu->isar.id_aa64mmfr3 = t; + FIELD_DP64_IDREG(isar, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */ - t = cpu->isar.id_aa64zfr0; - t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); + t = GET_IDREG(isar, ID_AA64ZFR0); + t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 2); /* FEAT_SVE2p1 */ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */ - t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1); /* FEAT_BF16 */ + t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 2); /* FEAT_BF16, FEAT_EBF16 */ + t = FIELD_DP64(t, ID_AA64ZFR0, B16B16, 1); /* FEAT_SVE_B16B16 */ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */ t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */ - cpu->isar.id_aa64zfr0 = t; + SET_IDREG(isar, ID_AA64ZFR0, t); - t = cpu->isar.id_aa64dfr0; + t = GET_IDREG(isar, ID_AA64DFR0); t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 10); /* FEAT_Debugv8p8 */ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */ t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1); /* FEAT_HPMN0 */ - cpu->isar.id_aa64dfr0 = t; + SET_IDREG(isar, ID_AA64DFR0, t); - t = cpu->isar.id_aa64smfr0; + t = GET_IDREG(isar, ID_AA64SMFR0); t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, BI32I32, 1); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F16F16, 1); /* FEAT_SME_F16F16 */ + t = FIELD_DP64(t, ID_AA64SMFR0, B16B16, 1); /* FEAT_SME_B16B16 */ + t = FIELD_DP64(t, ID_AA64SMFR0, I16I32, 5); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, SMEVER, 2); /* FEAT_SME2p1 */ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ - cpu->isar.id_aa64smfr0 = t; + SET_IDREG(isar, ID_AA64SMFR0, t); /* Replicate the same data to the 32-bit id registers. */ aa32_max_features(cpu); @@ -1312,7 +1327,7 @@ static void aarch64_cpu_register_types(void) size_t i; for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) { - aarch64_cpu_register(&aarch64_cpus[i]); + arm_cpu_register(&aarch64_cpus[i]); } } diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c index 7cadd61e124..3428bd1bf0b 100644 --- a/target/arm/tcg/crypto_helper.c +++ b/target/arm/tcg/crypto_helper.c @@ -10,14 +10,16 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" -#include "cpu.h" -#include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "crypto/aes-round.h" #include "crypto/sm4.h" #include "vec_internal.h" +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + union CRYPTO_STATE { uint8_t bytes[16]; uint32_t words[4]; diff --git a/target/arm/tcg/gengvec.c b/target/arm/tcg/gengvec.c index 56a1dc1f755..01867f8ace3 100644 --- a/target/arm/tcg/gengvec.c +++ b/target/arm/tcg/gengvec.c @@ -88,6 +88,25 @@ GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT) #undef GEN_CMP0 +void gen_gvec_sshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + int64_t shift, uint32_t opr_sz, uint32_t max_sz) +{ + /* Signed shift out of range results in all-sign-bits */ + shift = MIN(shift, (8 << vece) - 1); + tcg_gen_gvec_sari(vece, rd_ofs, rm_ofs, shift, opr_sz, max_sz); +} + +void gen_gvec_ushr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + int64_t shift, uint32_t opr_sz, uint32_t max_sz) +{ + /* Unsigned shift out of range results in all-zero-bits */ + if (shift >= (8 << vece)) { + tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0); + } else { + tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift, opr_sz, max_sz); + } +} + static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) { tcg_gen_vec_sar8i_i64(a, a, shift); @@ -285,7 +304,7 @@ void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) tcg_gen_add_i32(d, d, t); } - void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) +void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) { TCGv_i64 t = tcg_temp_new_i64(); @@ -297,10 +316,9 @@ void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec ones = tcg_temp_new_vec_matching(d); + TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1); tcg_gen_shri_vec(vece, t, a, sh - 1); - tcg_gen_dupi_vec(vece, ones, 1); tcg_gen_and_vec(vece, t, t, ones); tcg_gen_sari_vec(vece, d, a, sh); tcg_gen_add_vec(vece, d, d, t); @@ -492,10 +510,9 @@ void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) { TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec ones = tcg_temp_new_vec_matching(d); + TCGv_vec ones = tcg_constant_vec_matching(d, vece, 1); tcg_gen_shri_vec(vece, t, a, shift - 1); - tcg_gen_dupi_vec(vece, ones, 1); tcg_gen_and_vec(vece, t, t, ones); tcg_gen_shri_vec(vece, d, a, shift); tcg_gen_add_vec(vece, d, d, t); @@ -685,9 +702,9 @@ static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec m = tcg_temp_new_vec_matching(d); + int64_t mi = MAKE_64BIT_MASK((8 << vece) - sh, sh); + TCGv_vec m = tcg_constant_vec_matching(d, vece, mi); - tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh)); tcg_gen_shri_vec(vece, t, a, sh); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); @@ -773,10 +790,9 @@ static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) { TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec m = tcg_temp_new_vec_matching(d); + TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, sh)); tcg_gen_shli_vec(vece, t, a, sh); - tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh)); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); } @@ -1044,14 +1060,13 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, TCGv_vec rval = tcg_temp_new_vec_matching(dst); TCGv_vec lsh = tcg_temp_new_vec_matching(dst); TCGv_vec rsh = tcg_temp_new_vec_matching(dst); - TCGv_vec msk, max; + TCGv_vec max, zero; tcg_gen_neg_vec(vece, rsh, shift); if (vece == MO_8) { tcg_gen_mov_vec(lsh, shift); } else { - msk = tcg_temp_new_vec_matching(dst); - tcg_gen_dupi_vec(vece, msk, 0xff); + TCGv_vec msk = tcg_constant_vec_matching(dst, vece, 0xff); tcg_gen_and_vec(vece, lsh, shift, msk); tcg_gen_and_vec(vece, rsh, rsh, msk); } @@ -1064,26 +1079,21 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, tcg_gen_shlv_vec(vece, lval, src, lsh); tcg_gen_shrv_vec(vece, rval, src, rsh); - max = tcg_temp_new_vec_matching(dst); - tcg_gen_dupi_vec(vece, max, 8 << vece); - /* - * The choice of LT (signed) and GEU (unsigned) are biased toward + * The choice of GE (signed) and GEU (unsigned) are biased toward * the instructions of the x86_64 host. For MO_8, the whole byte * is significant so we must use an unsigned compare; otherwise we * have already masked to a byte and so a signed compare works. * Other tcg hosts have a full set of comparisons and do not care. */ + zero = tcg_constant_vec_matching(dst, vece, 0); + max = tcg_constant_vec_matching(dst, vece, 8 << vece); if (vece == MO_8) { - tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max); - tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max); - tcg_gen_andc_vec(vece, lval, lval, lsh); - tcg_gen_andc_vec(vece, rval, rval, rsh); + tcg_gen_cmpsel_vec(TCG_COND_GEU, vece, lval, lsh, max, zero, lval); + tcg_gen_cmpsel_vec(TCG_COND_GEU, vece, rval, rsh, max, zero, rval); } else { - tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max); - tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max); - tcg_gen_and_vec(vece, lval, lval, lsh); - tcg_gen_and_vec(vece, rval, rval, rsh); + tcg_gen_cmpsel_vec(TCG_COND_GE, vece, lval, lsh, max, zero, lval); + tcg_gen_cmpsel_vec(TCG_COND_GE, vece, rval, rsh, max, zero, rval); } tcg_gen_or_vec(vece, dst, lval, rval); } @@ -1093,7 +1103,7 @@ void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, { static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, INDEX_op_shlv_vec, - INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0 + INDEX_op_shrv_vec, INDEX_op_cmpsel_vec, 0 }; static const GVecGen3 ops[4] = { { .fniv = gen_ushl_vec, @@ -1169,7 +1179,7 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst, TCGv_vec rval = tcg_temp_new_vec_matching(dst); TCGv_vec lsh = tcg_temp_new_vec_matching(dst); TCGv_vec rsh = tcg_temp_new_vec_matching(dst); - TCGv_vec tmp = tcg_temp_new_vec_matching(dst); + TCGv_vec max, zero; /* * Rely on the TCG guarantee that out of range shifts produce @@ -1180,29 +1190,28 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst, if (vece == MO_8) { tcg_gen_mov_vec(lsh, shift); } else { - tcg_gen_dupi_vec(vece, tmp, 0xff); - tcg_gen_and_vec(vece, lsh, shift, tmp); - tcg_gen_and_vec(vece, rsh, rsh, tmp); + TCGv_vec msk = tcg_constant_vec_matching(dst, vece, 0xff); + tcg_gen_and_vec(vece, lsh, shift, msk); + tcg_gen_and_vec(vece, rsh, rsh, msk); } /* Bound rsh so out of bound right shift gets -1. */ - tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1); - tcg_gen_umin_vec(vece, rsh, rsh, tmp); - tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp); + max = tcg_constant_vec_matching(dst, vece, (8 << vece) - 1); + tcg_gen_umin_vec(vece, rsh, rsh, max); tcg_gen_shlv_vec(vece, lval, src, lsh); tcg_gen_sarv_vec(vece, rval, src, rsh); /* Select in-bound left shift. */ - tcg_gen_andc_vec(vece, lval, lval, tmp); + zero = tcg_constant_vec_matching(dst, vece, 0); + tcg_gen_cmpsel_vec(TCG_COND_GT, vece, lval, lsh, max, zero, lval); /* Select between left and right shift. */ if (vece == MO_8) { - tcg_gen_dupi_vec(vece, tmp, 0); - tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval); + tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, zero, rval, lval); } else { - tcg_gen_dupi_vec(vece, tmp, 0x80); - tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); + TCGv_vec sgn = tcg_constant_vec_matching(dst, vece, 0x80); + tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, sgn, lval, rval); } } @@ -1211,7 +1220,7 @@ void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, { static const TCGOpcode vecop_list[] = { INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec, - INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0 + INDEX_op_sarv_vec, INDEX_op_cmpsel_vec, 0 }; static const GVecGen3 ops[4] = { { .fniv = gen_sshl_vec, @@ -1304,6 +1313,42 @@ void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, opr_sz, max_sz, 0, fns[vece]); } +void gen_neon_sqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_2_ptr * const fns[] = { + gen_helper_neon_sqshli_b, gen_helper_neon_sqshli_h, + gen_helper_neon_sqshli_s, gen_helper_neon_sqshli_d, + }; + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(c >= 0 && c <= (8 << vece)); + tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]); +} + +void gen_neon_uqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_2_ptr * const fns[] = { + gen_helper_neon_uqshli_b, gen_helper_neon_uqshli_h, + gen_helper_neon_uqshli_s, gen_helper_neon_uqshli_d, + }; + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(c >= 0 && c <= (8 << vece)); + tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]); +} + +void gen_neon_sqshlui(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_2_ptr * const fns[] = { + gen_helper_neon_sqshlui_b, gen_helper_neon_sqshlui_h, + gen_helper_neon_sqshlui_s, gen_helper_neon_sqshlui_d, + }; + tcg_debug_assert(vece <= MO_64); + tcg_debug_assert(c >= 0 && c <= (8 << vece)); + tcg_gen_gvec_2_ptr(rd_ofs, rn_ofs, tcg_env, opr_sz, max_sz, c, fns[vece]); +} + void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz) { uint64_t max = MAKE_64BIT_MASK(0, 8 << esz); @@ -2313,3 +2358,372 @@ void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, assert(vece <= MO_32); tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]); } + +void gen_gvec_cls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const GVecGen2 g[] = { + { .fni4 = gen_helper_neon_cls_s8, + .vece = MO_8 }, + { .fni4 = gen_helper_neon_cls_s16, + .vece = MO_16 }, + { .fni4 = tcg_gen_clrsb_i32, + .vece = MO_32 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +static void gen_clz32_i32(TCGv_i32 d, TCGv_i32 n) +{ + tcg_gen_clzi_i32(d, n, 32); +} + +void gen_gvec_clz(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const GVecGen2 g[] = { + { .fni4 = gen_helper_neon_clz_u8, + .vece = MO_8 }, + { .fni4 = gen_helper_neon_clz_u16, + .vece = MO_16 }, + { .fni4 = gen_clz32_i32, + .vece = MO_32 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +void gen_gvec_cnt(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + assert(vece == MO_8); + tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0, + gen_helper_gvec_cnt_b); +} + +void gen_gvec_rbit(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + assert(vece == MO_8); + tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0, + gen_helper_gvec_rbit_b); +} + +void gen_gvec_rev16(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + assert(vece == MO_8); + tcg_gen_gvec_rotli(MO_16, rd_ofs, rn_ofs, 8, opr_sz, max_sz); +} + +static void gen_bswap32_i64(TCGv_i64 d, TCGv_i64 n) +{ + tcg_gen_bswap64_i64(d, n); + tcg_gen_rotli_i64(d, d, 32); +} + +void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const GVecGen2 g = { + .fni8 = gen_bswap32_i64, + .fni4 = tcg_gen_bswap32_i32, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_32 + }; + + switch (vece) { + case MO_16: + tcg_gen_gvec_rotli(MO_32, rd_ofs, rn_ofs, 16, opr_sz, max_sz); + break; + case MO_8: + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g); + break; + default: + g_assert_not_reached(); + } +} + +void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const GVecGen2 g[] = { + { .fni8 = tcg_gen_bswap64_i64, + .vece = MO_64 }, + { .fni8 = tcg_gen_hswap_i64, + .vece = MO_64 }, + }; + + switch (vece) { + case MO_32: + tcg_gen_gvec_rotli(MO_64, rd_ofs, rn_ofs, 32, opr_sz, max_sz); + break; + case MO_8: + case MO_16: + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); + break; + default: + g_assert_not_reached(); + } +} + +static void gen_saddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n) +{ + int half = 4 << vece; + TCGv_vec t = tcg_temp_new_vec_matching(d); + + tcg_gen_shli_vec(vece, t, n, half); + tcg_gen_sari_vec(vece, d, n, half); + tcg_gen_sari_vec(vece, t, t, half); + tcg_gen_add_vec(vece, d, d, t); +} + +static void gen_saddlp_s_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_ext32s_i64(t, n); + tcg_gen_sari_i64(d, n, 32); + tcg_gen_add_i64(d, d, t); +} + +void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen2 g[] = { + { .fniv = gen_saddlp_vec, + .fni8 = gen_helper_neon_addlp_s8, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = gen_saddlp_vec, + .fni8 = gen_helper_neon_addlp_s16, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = gen_saddlp_vec, + .fni8 = gen_saddlp_s_i64, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +static void gen_sadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n) +{ + TCGv_vec t = tcg_temp_new_vec_matching(d); + + gen_saddlp_vec(vece, t, n); + tcg_gen_add_vec(vece, d, d, t); +} + +static void gen_sadalp_b_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_helper_neon_addlp_s8(t, n); + tcg_gen_vec_add16_i64(d, d, t); +} + +static void gen_sadalp_h_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_helper_neon_addlp_s16(t, n); + tcg_gen_vec_add32_i64(d, d, t); +} + +static void gen_sadalp_s_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_saddlp_s_i64(t, n); + tcg_gen_add_i64(d, d, t); +} + +void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, INDEX_op_shli_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen2 g[] = { + { .fniv = gen_sadalp_vec, + .fni8 = gen_sadalp_b_i64, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_16 }, + { .fniv = gen_sadalp_vec, + .fni8 = gen_sadalp_h_i64, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_32 }, + { .fniv = gen_sadalp_vec, + .fni8 = gen_sadalp_s_i64, + .opt_opc = vecop_list, + .load_dest = true, + .vece = MO_64 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +static void gen_uaddlp_vec(unsigned vece, TCGv_vec d, TCGv_vec n) +{ + int half = 4 << vece; + TCGv_vec t = tcg_temp_new_vec_matching(d); + TCGv_vec m = tcg_constant_vec_matching(d, vece, MAKE_64BIT_MASK(0, half)); + + tcg_gen_shri_vec(vece, t, n, half); + tcg_gen_and_vec(vece, d, n, m); + tcg_gen_add_vec(vece, d, d, t); +} + +static void gen_uaddlp_b_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0xff)); + + tcg_gen_shri_i64(t, n, 8); + tcg_gen_and_i64(d, n, m); + tcg_gen_and_i64(t, t, m); + /* No carry between widened unsigned elements. */ + tcg_gen_add_i64(d, d, t); +} + +static void gen_uaddlp_h_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + TCGv_i64 m = tcg_constant_i64(dup_const(MO_32, 0xffff)); + + tcg_gen_shri_i64(t, n, 16); + tcg_gen_and_i64(d, n, m); + tcg_gen_and_i64(t, t, m); + /* No carry between widened unsigned elements. */ + tcg_gen_add_i64(d, d, t); +} + +static void gen_uaddlp_s_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_ext32u_i64(t, n); + tcg_gen_shri_i64(d, n, 32); + tcg_gen_add_i64(d, d, t); +} + +void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen2 g[] = { + { .fniv = gen_uaddlp_vec, + .fni8 = gen_uaddlp_b_i64, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = gen_uaddlp_vec, + .fni8 = gen_uaddlp_h_i64, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = gen_uaddlp_vec, + .fni8 = gen_uaddlp_s_i64, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +static void gen_uadalp_vec(unsigned vece, TCGv_vec d, TCGv_vec n) +{ + TCGv_vec t = tcg_temp_new_vec_matching(d); + + gen_uaddlp_vec(vece, t, n); + tcg_gen_add_vec(vece, d, d, t); +} + +static void gen_uadalp_b_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_uaddlp_b_i64(t, n); + tcg_gen_vec_add16_i64(d, d, t); +} + +static void gen_uadalp_h_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_uaddlp_h_i64(t, n); + tcg_gen_vec_add32_i64(d, d, t); +} + +static void gen_uadalp_s_i64(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i64 t = tcg_temp_new_i64(); + + gen_uaddlp_s_i64(t, n); + tcg_gen_add_i64(d, d, t); +} + +void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_shri_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen2 g[] = { + { .fniv = gen_uadalp_vec, + .fni8 = gen_uadalp_b_i64, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = gen_uadalp_vec, + .fni8 = gen_uadalp_h_i64, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = gen_uadalp_vec, + .fni8 = gen_uadalp_s_i64, + .load_dest = true, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + assert(vece <= MO_32); + tcg_gen_gvec_2(rd_ofs, rn_ofs, opr_sz, max_sz, &g[vece]); +} + +void gen_gvec_fabs(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + uint64_t s_bit = 1ull << ((8 << vece) - 1); + tcg_gen_gvec_andi(vece, dofs, aofs, s_bit - 1, oprsz, maxsz); +} + +void gen_gvec_fneg(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + uint64_t s_bit = 1ull << ((8 << vece) - 1); + tcg_gen_gvec_xori(vece, dofs, aofs, s_bit, oprsz, maxsz); +} + +void gen_gvec_urecpe(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + assert(vece == MO_32); + tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0, + gen_helper_gvec_urecpe_s); +} + +void gen_gvec_ursqrte(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + assert(vece == MO_32); + tcg_gen_gvec_2_ool(rd_ofs, rn_ofs, opr_sz, max_sz, 0, + gen_helper_gvec_ursqrte_s); +} diff --git a/target/arm/tcg/gengvec64.c b/target/arm/tcg/gengvec64.c index 2617cde0a5f..2429cab1b88 100644 --- a/target/arm/tcg/gengvec64.c +++ b/target/arm/tcg/gengvec64.c @@ -369,3 +369,14 @@ void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs, tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc), rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]); } + +void gen_gvec_sve2_sqdmulh(unsigned vece, uint32_t rd_ofs, + uint32_t rn_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_3 * const fns[4] = { + gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, + gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, + }; + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]); +} diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index c60d2a7ec96..71c6c44ee8a 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -28,12 +28,20 @@ #include "qemu/bitops.h" #include "internals.h" #include "qemu/crc32c.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cpu-common.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/helper-retaddr.h" +#include "accel/tcg/probe.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "fpu/softfloat.h" -#include /* For crc32 */ +#include /* for crc32 */ +#ifdef CONFIG_USER_ONLY +#include "user/page-protection.h" +#endif +#include "vec_internal.h" /* C2.4.7 Multiply and divide */ /* special cases for 0 and LLONG_MIN are mandated by the standard */ @@ -130,40 +138,38 @@ static inline uint32_t float_rel_to_flags(int res) return flags; } -uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status) +uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, float_status *fp_status) { return float_rel_to_flags(float16_compare_quiet(x, y, fp_status)); } -uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status) +uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, float_status *fp_status) { return float_rel_to_flags(float16_compare(x, y, fp_status)); } -uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status) +uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, float_status *fp_status) { return float_rel_to_flags(float32_compare_quiet(x, y, fp_status)); } -uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status) +uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, float_status *fp_status) { return float_rel_to_flags(float32_compare(x, y, fp_status)); } -uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status) +uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, float_status *fp_status) { return float_rel_to_flags(float64_compare_quiet(x, y, fp_status)); } -uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status) +uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, float_status *fp_status) { return float_rel_to_flags(float64_compare(x, y, fp_status)); } -float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) +float32 HELPER(vfp_mulxs)(float32 a, float32 b, float_status *fpst) { - float_status *fpst = fpstp; - a = float32_squash_input_denormal(a, fpst); b = float32_squash_input_denormal(b, fpst); @@ -176,10 +182,8 @@ float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) return float32_mul(a, b, fpst); } -float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) +float64 HELPER(vfp_mulxd)(float64 a, float64 b, float_status *fpst) { - float_status *fpst = fpstp; - a = float64_squash_input_denormal(a, fpst); b = float64_squash_input_denormal(b, fpst); @@ -193,184 +197,71 @@ float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) } /* 64bit/double versions of the neon float compare functions */ -uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) +uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, float_status *fpst) { - float_status *fpst = fpstp; return -float64_eq_quiet(a, b, fpst); } -uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp) +uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, float_status *fpst) { - float_status *fpst = fpstp; return -float64_le(b, a, fpst); } -uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp) +uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, float_status *fpst) { - float_status *fpst = fpstp; return -float64_lt(b, a, fpst); } -/* Reciprocal step and sqrt step. Note that unlike the A32/T32 +/* + * Reciprocal step and sqrt step. Note that unlike the A32/T32 * versions, these do a fully fused multiply-add or * multiply-add-and-halve. + * The FPCR.AH == 1 versions need to avoid flipping the sign of NaN. */ - -uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float16_squash_input_denormal(a, fpst); - b = float16_squash_input_denormal(b, fpst); - - a = float16_chs(a); - if ((float16_is_infinity(a) && float16_is_zero(b)) || - (float16_is_infinity(b) && float16_is_zero(a))) { - return float16_two; - } - return float16_muladd(a, b, float16_two, 0, fpst); -} - -float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float32_squash_input_denormal(a, fpst); - b = float32_squash_input_denormal(b, fpst); - - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_two; - } - return float32_muladd(a, b, float32_two, 0, fpst); -} - -float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float64_squash_input_denormal(a, fpst); - b = float64_squash_input_denormal(b, fpst); - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_two; - } - return float64_muladd(a, b, float64_two, 0, fpst); -} - -uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float16_squash_input_denormal(a, fpst); - b = float16_squash_input_denormal(b, fpst); - - a = float16_chs(a); - if ((float16_is_infinity(a) && float16_is_zero(b)) || - (float16_is_infinity(b) && float16_is_zero(a))) { - return float16_one_point_five; - } - return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst); -} - -float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float32_squash_input_denormal(a, fpst); - b = float32_squash_input_denormal(b, fpst); - - a = float32_chs(a); - if ((float32_is_infinity(a) && float32_is_zero(b)) || - (float32_is_infinity(b) && float32_is_zero(a))) { - return float32_one_point_five; - } - return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); -} - -float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp) -{ - float_status *fpst = fpstp; - - a = float64_squash_input_denormal(a, fpst); - b = float64_squash_input_denormal(b, fpst); - - a = float64_chs(a); - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_infinity(b) && float64_is_zero(a))) { - return float64_one_point_five; - } - return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); -} - -/* Pairwise long add: add pairs of adjacent elements into - * double-width elements in the result (eg _s8 is an 8x8->16 op) - */ -uint64_t HELPER(neon_addlp_s8)(uint64_t a) -{ - uint64_t nsignmask = 0x0080008000800080ULL; - uint64_t wsignmask = 0x8000800080008000ULL; - uint64_t elementmask = 0x00ff00ff00ff00ffULL; - uint64_t tmp1, tmp2; - uint64_t res, signres; - - /* Extract odd elements, sign extend each to a 16 bit field */ - tmp1 = a & elementmask; - tmp1 ^= nsignmask; - tmp1 |= wsignmask; - tmp1 = (tmp1 - nsignmask) ^ wsignmask; - /* Ditto for the even elements */ - tmp2 = (a >> 8) & elementmask; - tmp2 ^= nsignmask; - tmp2 |= wsignmask; - tmp2 = (tmp2 - nsignmask) ^ wsignmask; - - /* calculate the result by summing bits 0..14, 16..22, etc, - * and then adjusting the sign bits 15, 23, etc manually. - * This ensures the addition can't overflow the 16 bit field. - */ - signres = (tmp1 ^ tmp2) & wsignmask; - res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); - res ^= signres; - - return res; -} - -uint64_t HELPER(neon_addlp_u8)(uint64_t a) -{ - uint64_t tmp; - - tmp = a & 0x00ff00ff00ff00ffULL; - tmp += (a >> 8) & 0x00ff00ff00ff00ffULL; - return tmp; -} - -uint64_t HELPER(neon_addlp_s16)(uint64_t a) -{ - int32_t reslo, reshi; - - reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); - reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); - - return (uint32_t)reslo | (((uint64_t)reshi) << 32); -} - -uint64_t HELPER(neon_addlp_u16)(uint64_t a) -{ - uint64_t tmp; - - tmp = a & 0x0000ffff0000ffffULL; - tmp += (a >> 16) & 0x0000ffff0000ffffULL; - return tmp; -} +#define DO_RECPS(NAME, CTYPE, FLOATTYPE, CHSFN) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + a = FLOATTYPE ## _ ## CHSFN(a); \ + if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \ + (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \ + return FLOATTYPE ## _two; \ + } \ + return FLOATTYPE ## _muladd(a, b, FLOATTYPE ## _two, 0, fpst); \ + } + +DO_RECPS(recpsf_f16, uint32_t, float16, chs) +DO_RECPS(recpsf_f32, float32, float32, chs) +DO_RECPS(recpsf_f64, float64, float64, chs) +DO_RECPS(recpsf_ah_f16, uint32_t, float16, ah_chs) +DO_RECPS(recpsf_ah_f32, float32, float32, ah_chs) +DO_RECPS(recpsf_ah_f64, float64, float64, ah_chs) + +#define DO_RSQRTSF(NAME, CTYPE, FLOATTYPE, CHSFN) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + a = FLOATTYPE ## _ ## CHSFN(a); \ + if ((FLOATTYPE ## _is_infinity(a) && FLOATTYPE ## _is_zero(b)) || \ + (FLOATTYPE ## _is_infinity(b) && FLOATTYPE ## _is_zero(a))) { \ + return FLOATTYPE ## _one_point_five; \ + } \ + return FLOATTYPE ## _muladd_scalbn(a, b, FLOATTYPE ## _three, \ + -1, 0, fpst); \ + } \ + +DO_RSQRTSF(rsqrtsf_f16, uint32_t, float16, chs) +DO_RSQRTSF(rsqrtsf_f32, float32, float32, chs) +DO_RSQRTSF(rsqrtsf_f64, float64, float64, chs) +DO_RSQRTSF(rsqrtsf_ah_f16, uint32_t, float16, ah_chs) +DO_RSQRTSF(rsqrtsf_ah_f32, float32, float32, ah_chs) +DO_RSQRTSF(rsqrtsf_ah_f64, float64, float64, ah_chs) /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ -uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp) +uint32_t HELPER(frecpx_f16)(uint32_t a, float_status *fpst) { - float_status *fpst = fpstp; uint16_t val16, sbit; int16_t exp; @@ -401,9 +292,8 @@ uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp) } } -float32 HELPER(frecpx_f32)(float32 a, void *fpstp) +float32 HELPER(frecpx_f32)(float32 a, float_status *fpst) { - float_status *fpst = fpstp; uint32_t val32, sbit; int32_t exp; @@ -434,9 +324,8 @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp) } } -float64 HELPER(frecpx_f64)(float64 a, void *fpstp) +float64 HELPER(frecpx_f64)(float64 a, float_status *fpst) { - float_status *fpst = fpstp; uint64_t val64, sbit; int64_t exp; @@ -467,28 +356,55 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp) } } -float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) +float32 HELPER(fcvtx_f64_to_f32)(float64 a, float_status *fpst) { - /* Von Neumann rounding is implemented by using round-to-zero - * and then setting the LSB of the result if Inexact was raised. - */ float32 r; - float_status *fpst = &env->vfp.fp_status; - float_status tstat = *fpst; - int exflags; - - set_float_rounding_mode(float_round_to_zero, &tstat); - set_float_exception_flags(0, &tstat); - r = float64_to_float32(a, &tstat); - exflags = get_float_exception_flags(&tstat); - if (exflags & float_flag_inexact) { - r = make_float32(float32_val(r) | 1); - } - exflags |= get_float_exception_flags(fpst); - set_float_exception_flags(exflags, fpst); + int old = get_float_rounding_mode(fpst); + + set_float_rounding_mode(float_round_to_odd, fpst); + r = float64_to_float32(a, fpst); + set_float_rounding_mode(old, fpst); return r; } +/* + * AH=1 min/max have some odd special cases: + * comparing two zeroes (regardless of sign), (NaN, anything), + * or (anything, NaN) should return the second argument (possibly + * squashed to zero). + * Also, denormal outputs are not squashed to zero regardless of FZ or FZ16. + */ +#define AH_MINMAX_HELPER(NAME, CTYPE, FLOATTYPE, MINMAX) \ + CTYPE HELPER(NAME)(CTYPE a, CTYPE b, float_status *fpst) \ + { \ + bool save; \ + CTYPE r; \ + a = FLOATTYPE ## _squash_input_denormal(a, fpst); \ + b = FLOATTYPE ## _squash_input_denormal(b, fpst); \ + if (FLOATTYPE ## _is_zero(a) && FLOATTYPE ## _is_zero(b)) { \ + return b; \ + } \ + if (FLOATTYPE ## _is_any_nan(a) || \ + FLOATTYPE ## _is_any_nan(b)) { \ + float_raise(float_flag_invalid, fpst); \ + return b; \ + } \ + save = get_flush_to_zero(fpst); \ + set_flush_to_zero(false, fpst); \ + r = FLOATTYPE ## _ ## MINMAX(a, b, fpst); \ + set_flush_to_zero(save, fpst); \ + return r; \ + } + +AH_MINMAX_HELPER(vfp_ah_minh, dh_ctype_f16, float16, min) +AH_MINMAX_HELPER(vfp_ah_mins, float32, float32, min) +AH_MINMAX_HELPER(vfp_ah_mind, float64, float64, min) +AH_MINMAX_HELPER(vfp_ah_maxh, dh_ctype_f16, float16, max) +AH_MINMAX_HELPER(vfp_ah_maxs, float32, float32, max) +AH_MINMAX_HELPER(vfp_ah_maxd, float64, float64, max) +AH_MINMAX_HELPER(sme2_ah_fmax_b16, bfloat16, bfloat16, max) +AH_MINMAX_HELPER(sme2_ah_fmin_b16, bfloat16, bfloat16, min) + /* 64-bit versions of the CRC helpers. Note that although the operation * (and the prototypes of crc32c() and crc32() mean that only the bottom * 32 bits of the accumulator and result are used, we pass and return @@ -524,27 +440,17 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix)) #define ADVSIMD_HALFOP(name) \ -uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \ +uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, float_status *fpst) \ { \ - float_status *fpst = fpstp; \ return float16_ ## name(a, b, fpst); \ } -ADVSIMD_HALFOP(add) -ADVSIMD_HALFOP(sub) -ADVSIMD_HALFOP(mul) -ADVSIMD_HALFOP(div) -ADVSIMD_HALFOP(min) -ADVSIMD_HALFOP(max) -ADVSIMD_HALFOP(minnum) -ADVSIMD_HALFOP(maxnum) - #define ADVSIMD_TWOHALFOP(name) \ -uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \ +uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, \ + float_status *fpst) \ { \ float16 a1, a2, b1, b2; \ uint32_t r1, r2; \ - float_status *fpst = fpstp; \ a1 = extract32(two_a, 0, 16); \ a2 = extract32(two_a, 16, 16); \ b1 = extract32(two_b, 0, 16); \ @@ -564,10 +470,8 @@ ADVSIMD_TWOHALFOP(minnum) ADVSIMD_TWOHALFOP(maxnum) /* Data processing - scalar floating-point and advanced SIMD */ -static float16 float16_mulx(float16 a, float16 b, void *fpstp) +static float16 float16_mulx(float16 a, float16 b, float_status *fpst) { - float_status *fpst = fpstp; - a = float16_squash_input_denormal(a, fpst); b = float16_squash_input_denormal(b, fpst); @@ -585,16 +489,14 @@ ADVSIMD_TWOHALFOP(mulx) /* fused multiply-accumulate */ uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c, - void *fpstp) + float_status *fpst) { - float_status *fpst = fpstp; return float16_muladd(a, b, c, 0, fpst); } uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b, - uint32_t two_c, void *fpstp) + uint32_t two_c, float_status *fpst) { - float_status *fpst = fpstp; float16 a1, a2, b1, b2, c1, c2; uint32_t r1, r2; a1 = extract32(two_a, 0, 16); @@ -616,31 +518,27 @@ uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b, #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0 -uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; int compare = float16_compare_quiet(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_equal); } -uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; int compare = float16_compare(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater || compare == float_relation_equal); } -uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; int compare = float16_compare(a, b, fpst); return ADVSIMD_CMPRES(compare == float_relation_greater); } -uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; float16 f0 = float16_abs(a); float16 f1 = float16_abs(b); int compare = float16_compare(f0, f1, fpst); @@ -648,9 +546,8 @@ uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp) compare == float_relation_equal); } -uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; float16 f0 = float16_abs(a); float16 f1 = float16_abs(b); int compare = float16_compare(f0, f1, fpst); @@ -658,12 +555,12 @@ uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp) } /* round to integral */ -uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status) +uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, float_status *fp_status) { return float16_round_to_int(x, fp_status); } -uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status) +uint32_t HELPER(advsimd_rinth)(uint32_t x, float_status *fp_status) { int old_flags = get_float_exception_flags(fp_status), new_flags; float16 ret; @@ -679,38 +576,6 @@ uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status) return ret; } -/* - * Half-precision floating point conversion functions - * - * There are a multitude of conversion functions with various - * different rounding modes. This is dealt with by the calling code - * setting the mode appropriately before calling the helper. - */ - -uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp) -{ - float_status *fpst = fpstp; - - /* Invalid if we are passed a NaN */ - if (float16_is_any_nan(a)) { - float_raise(float_flag_invalid, fpst); - return 0; - } - return float16_to_int16(a, fpst); -} - -uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp) -{ - float_status *fpst = fpstp; - - /* Invalid if we are passed a NaN */ - if (float16_is_any_nan(a)) { - float_raise(float_flag_invalid, fpst); - return 0; - } - return float16_to_uint16(a, fpst); -} - static int el_from_spsr(uint32_t spsr) { /* Return the exception level that this SPSR is requesting a return to, @@ -771,6 +636,7 @@ static void cpsr_write_from_spsr_elx(CPUARMState *env, void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) { + ARMCPU *cpu = env_archcpu(env); int cur_el = arm_current_el(env); unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); uint32_t spsr = env->banked_spsr[spsr_idx]; @@ -792,15 +658,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) spsr &= ~PSTATE_SS; } - /* - * FEAT_RME forbids return from EL3 with an invalid security state. - * We don't need an explicit check for FEAT_RME here because we enforce - * in scr_write() that you can't set the NSE bit without it. - */ - if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) { - goto illegal_return; - } - new_el = el_from_spsr(spsr); if (new_el == -1) { goto illegal_return; @@ -812,17 +669,33 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } + /* + * FEAT_RME forbids return from EL3 to a lower exception level + * with an invalid security state. + * We don't need an explicit check for FEAT_RME here because we enforce + * in scr_write() that you can't set the NSE bit without it. + */ + if (cur_el == 3 && new_el < 3 && + (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) { + goto illegal_return; + } + if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) { /* Return to an EL which is configured for a different register width */ goto illegal_return; } + if (!return_to_aa64 && !cpu_isar_feature(aa64_aa32, cpu)) { + /* Return to AArch32 when CPU is AArch64-only */ + goto illegal_return; + } + if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { goto illegal_return; } bql_lock(); - arm_call_pre_el_change_hook(env_archcpu(env)); + arm_call_pre_el_change_hook(cpu); bql_unlock(); if (!return_to_aa64) { @@ -850,7 +723,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) int tbii; env->aarch64 = true; - spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar); + spsr &= aarch64_pstate_valid_mask(&cpu->isar); pstate_write(env, spsr); if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; @@ -889,7 +762,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); bql_lock(); - arm_call_el_change_hook(env_archcpu(env)); + arm_call_el_change_hook(cpu); bql_unlock(); return; @@ -915,17 +788,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); } -/* - * Square Root and Reciprocal square root - */ - -uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp) -{ - float_status *s = fpstp; - - return float16_sqrt(a, s); -} - void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) { uintptr_t ra = GETPC(); @@ -1292,7 +1154,6 @@ static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, env->ZF = 1; /* our env->ZF encoding is inverted */ env->CF = 0; env->VF = 0; - return; } void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc) @@ -1348,7 +1209,7 @@ static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc, /* Do the actual memset: we leave the last partial page to SETE */ stagesetsize = setsize & TARGET_PAGE_MASK; while (stagesetsize > 0) { - step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra); + step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra); toaddr += step; setsize -= step; stagesetsize -= step; @@ -1692,7 +1553,6 @@ static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, env->ZF = 1; /* our env->ZF encoding is inverted */ env->CF = 0; env->VF = 0; - return; } void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, @@ -1877,3 +1737,42 @@ void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc, { do_cpye(env, syndrome, wdesc, rdesc, false, GETPC()); } + +static bool is_guarded_page(CPUARMState *env, target_ulong addr, uintptr_t ra) +{ +#ifdef CONFIG_USER_ONLY + return page_get_flags(addr) & PAGE_BTI; +#else + CPUTLBEntryFull *full; + void *host; + int mmu_idx = cpu_mmu_index(env_cpu(env), true); + int flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, + false, &host, &full, ra); + + assert(!(flags & TLB_INVALID_MASK)); + return full->extra.arm.guarded; +#endif +} + +void HELPER(guarded_page_check)(CPUARMState *env) +{ + /* + * We have already verified that bti is enabled, and that the + * instruction at PC is not ok for BTYPE. This is always at + * the beginning of a block, so PC is always up-to-date and + * no unwind is required. + */ + if (is_guarded_page(env, env->pc, 0)) { + raise_exception(env, EXCP_UDEF, syn_btitrap(env->btype), + exception_target_el(env)); + } +} + +void HELPER(guarded_page_br)(CPUARMState *env, target_ulong pc) +{ + /* + * We have already checked for branch via x16 and x17. + * What remains for choosing BTYPE is checking for a guarded page. + */ + env->btype = is_guarded_page(env, pc, GETPC()) ? 3 : 1; +} diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index 371388f61b5..85023465b76 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -23,64 +23,62 @@ DEF_HELPER_2(msr_i_spsel, void, env, i32) DEF_HELPER_2(msr_i_daifset, void, env, i32) DEF_HELPER_2(msr_i_daifclear, void, env, i32) DEF_HELPER_1(msr_set_allint_el1, void, env) -DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr) -DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr) -DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) -DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) -DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) -DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) -DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) -DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) -DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) -DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) -DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) -DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) -DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, ptr) -DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) +DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, fpst) +DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, fpst) +DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, fpst) +DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, fpst) +DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, fpst) +DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, fpst) +DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst) +DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst) +DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, fpst) +DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(recpsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f16, TCG_CALL_NO_RWG, f16, f16, f16, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f32, TCG_CALL_NO_RWG, f32, f32, f32, fpst) +DEF_HELPER_FLAGS_3(rsqrtsf_ah_f64, TCG_CALL_NO_RWG, f64, f64, f64, fpst) +DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, fpst) +DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, fpst) +DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, fpst) DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) -DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_FLAGS_3(advsimd_minnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_addh, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_subh, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_mulh, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_divh, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, ptr) -DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, ptr) -DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, ptr) -DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, ptr) -DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, ptr) -DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, ptr) -DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, ptr) -DEF_HELPER_3(advsimd_add2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_div2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_max2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_min2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, ptr) -DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, ptr) -DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, ptr) -DEF_HELPER_2(advsimd_rinth_exact, f16, f16, ptr) -DEF_HELPER_2(advsimd_rinth, f16, f16, ptr) -DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr) -DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr) -DEF_HELPER_2(sqrt_f16, f16, f16, ptr) +DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, fpst) +DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, fpst) +DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, fpst) +DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, fpst) +DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, fpst) +DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, fpst) +DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, fpst) +DEF_HELPER_3(advsimd_add2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_div2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_max2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_min2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, fpst) +DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, fpst) +DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, fpst) +DEF_HELPER_2(advsimd_rinth_exact, f16, f16, fpst) +DEF_HELPER_2(advsimd_rinth, f16, f16, fpst) + +DEF_HELPER_3(vfp_ah_minh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_ah_mins, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_ah_mind, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst) DEF_HELPER_2(exception_return, void, env, i64) DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64) @@ -133,14 +131,17 @@ DEF_HELPER_4(cpyfp, void, env, i32, i32, i32) DEF_HELPER_4(cpyfm, void, env, i32, i32, i32) DEF_HELPER_4(cpyfe, void, env, i32, i32, i32) -DEF_HELPER_FLAGS_5(gvec_fdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_1(guarded_page_check, TCG_CALL_NO_WG, void, env) +DEF_HELPER_FLAGS_2(guarded_page_br, TCG_CALL_NO_RWG, void, env, tl) + +DEF_HELPER_FLAGS_5(gvec_fdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) diff --git a/target/arm/tcg/helper-sme.h b/target/arm/tcg/helper-sme.h index d22bf9d21b0..c551797c6fa 100644 --- a/target/arm/tcg/helper-sme.h +++ b/target/arm/tcg/helper-sme.h @@ -33,101 +33,147 @@ DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_3(sme2_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, +DEF_HELPER_FLAGS_7(sme_fmopa_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_bfmopa_w, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_bfmopa, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sme_fmops_w_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_fmops_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_fmops_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_fmops_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_bfmops_w, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_bfmops, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sme_ah_fmops_w_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_ah_fmops_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_ah_fmops_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_ah_fmops_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sme_ah_bfmops_w, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_7(sme_ah_bfmops, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG, @@ -144,3 +190,168 @@ DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sme2_bmopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme2_smopa2_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme2_umopa2_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sme2_fdot_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(sme2_fdot_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(sme2_fvdot_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_svdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_suvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_usvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_svdot_idx_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uvdot_idx_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_svdot_idx_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uvdot_idx_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme2_smlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlall_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlsll_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlsll_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlall_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlsll_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlsll_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_usmlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme2_smlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlall_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlsll_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_smlsll_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlall_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlsll_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_umlsll_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_usmlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme2_sumlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_bfcvt, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_bfcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_fcvt_n, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_fcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_fcvt_w, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_fcvtl, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_scvtf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sme2_ucvtf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_3(sme2_sqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtu_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtu_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtu_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_sqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvtn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtun_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqcvtun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_sunpk2_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sunpk2_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sunpk2_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sunpk4_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sunpk4_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sunpk4_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk2_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk2_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk2_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk4_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk4_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uunpk4_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_zip2_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_zip2_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_zip2_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_zip2_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_zip2_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_uzp2_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uzp2_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uzp2_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uzp2_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uzp2_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_zip4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_zip4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_zip4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_zip4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_zip4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_uzp4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uzp4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uzp4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uzp4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uzp4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_sqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshru_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshru_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshru_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(sme2_sqrshrn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshrn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshrun_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshrun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_uqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(sme2_sqrshrun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_sclamp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_sclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_sclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_sclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_uclamp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_uclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme2_fclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sme2_fclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sme2_fclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sme2_bfclamp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sme2_sel_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) +DEF_HELPER_FLAGS_5(sme2_sel_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) +DEF_HELPER_FLAGS_5(sme2_sel_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) +DEF_HELPER_FLAGS_5(sme2_sel_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index cc4e1d89481..c3541a8ca86 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -541,10 +541,18 @@ DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ah_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -668,11 +676,21 @@ DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tblq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tblq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tblq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tblq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tbxq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tbxq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tbxq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_tbxq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) @@ -693,12 +711,22 @@ DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_zip_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_zipq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_zipq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_zipq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_zipq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_uzp_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uzpq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uzpq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uzpq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uzpq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -929,10 +957,17 @@ DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32) +DEF_HELPER_FLAGS_2(sve2p1_cntp_c, TCG_CALL_NO_RWG_SE, i64, i32, i32) DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32) DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32) +DEF_HELPER_FLAGS_3(sve_while2l, TCG_CALL_NO_RWG, i32, ptr, i32, i32) +DEF_HELPER_FLAGS_3(sve_while2g, TCG_CALL_NO_RWG, i32, ptr, i32, i32) + +DEF_HELPER_FLAGS_3(sve_whilecl, TCG_CALL_NO_RWG, i32, ptr, i32, i32) +DEF_HELPER_FLAGS_3(sve_whilecg, TCG_CALL_NO_RWG, i32, ptr, i32, i32) + DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) @@ -959,433 +994,626 @@ DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_recps_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_recps_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_recps_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_rsqrts_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fmax_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmax_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fmin_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmin_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmin_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fmaxp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fminp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fminp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fminp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_faddv_d, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_h, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_s, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxnmv_d, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_h, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_s, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminnmv_d, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_h, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_s, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fmaxv_d, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminv_h, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG, - i64, ptr, ptr, ptr, i32) + i64, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fmaxv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(sve_ah_fminv_h, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fminv_s, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(sve_ah_fminv_d, TCG_CALL_NO_RWG, + i64, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_faddqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_faddqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_faddqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_fminqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fminqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_fminqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG, - i64, i64, ptr, ptr, ptr, i32) + i64, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG, - i64, i64, ptr, ptr, ptr, i32) + i64, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fadda_d, TCG_CALL_NO_RWG, - i64, i64, ptr, ptr, ptr, i32) + i64, i64, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmge0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmgt0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmlt0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmle0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmeq0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fadd_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fsub_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmul_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmul_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fdiv_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) - + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_fminnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fabd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fabd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fabd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fabd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fscalbn_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmulx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmulx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmulx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadds_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadds_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadds_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubs_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubs_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubs_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmuls_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmuls_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmuls_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsubrs_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnms_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnms_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnms_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnms_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxs_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmins_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, i64, ptr, i32) + void, ptr, ptr, ptr, i64, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmaxs_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) + +DEF_HELPER_FLAGS_6(sve_ah_fmins_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmins_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmins_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i64, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_hs, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_bfcvt, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hs, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_ss, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_hd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzs_dd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hs, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_ss, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_hd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fcvtzu_dd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frint_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frint_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frint_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frintx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frintx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frintx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frecpx_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frecpx_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_frecpx_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_fsqrt_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_hh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_sh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_dh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_ss, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_scvt_dd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_hh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_sh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_dh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_ss, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmge_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmge_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmge_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmgt_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmeq_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmne_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmne_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmne_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcmuo_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facge_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facge_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facge_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facgt_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) -DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -1427,945 +1655,1015 @@ DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hs_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hs_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hs_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hs_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_ldqq_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_ldqq_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_ldqq_le_zd_mte, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_ldqq_be_zd_mte, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_stqq_le_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_stqq_be_zd, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_stqq_le_zd_mte, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_6(sve_stqq_be_zd_mte, TCG_CALL_NO_WG, + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -2582,39 +2880,39 @@ DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) @@ -2682,8 +2980,8 @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) @@ -2755,20 +3053,20 @@ DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve_bfcvtnt, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(sve2_fcvtlt_sd, TCG_CALL_NO_RWG, - void, ptr, ptr, ptr, ptr, i32) + void, ptr, ptr, ptr, fpst, i32) -DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -2802,3 +3100,69 @@ DEF_HELPER_FLAGS_4(sve2_sqshlu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_sqshlu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_sqshlu_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_sqshlu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_addqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_addqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_addqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_addqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_smaxqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_smaxqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_smaxqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_smaxqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_sminqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_sminqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_sminqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_sminqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_umaxqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_umaxqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_umaxqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_umaxqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_uminqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uminqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uminqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_uminqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(pext, TCG_CALL_NO_RWG, void, ptr, i32, i32) + +DEF_HELPER_FLAGS_4(sve2p1_orqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_orqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_orqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_orqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_eorqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_eorqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_eorqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_eorqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2p1_andqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_andqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_andqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2p1_andqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(pmov_pv_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(pmov_pv_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(pmov_pv_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(pmov_vp_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(pmov_vp_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(pmov_vp_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve2p1_ld1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) + +DEF_HELPER_FLAGS_5(sve2p1_st1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h new file mode 100644 index 00000000000..4da32db9021 --- /dev/null +++ b/target/arm/tcg/helper.h @@ -0,0 +1,1218 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32) + +DEF_HELPER_3(add_setq, i32, env, i32, i32) +DEF_HELPER_3(add_saturate, i32, env, i32, i32) +DEF_HELPER_3(sub_saturate, i32, env, i32, i32) +DEF_HELPER_3(add_usaturate, i32, env, i32, i32) +DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32) +DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) + +#define PAS_OP(pfx) \ + DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr) + +PAS_OP(s) +PAS_OP(u) +#undef PAS_OP + +#define PAS_OP(pfx) \ + DEF_HELPER_2(pfx ## add8, i32, i32, i32) \ + DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \ + DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \ + DEF_HELPER_2(pfx ## add16, i32, i32, i32) \ + DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \ + DEF_HELPER_2(pfx ## subaddx, i32, i32, i32) +PAS_OP(q) +PAS_OP(sh) +PAS_OP(uq) +PAS_OP(uh) +#undef PAS_OP + +DEF_HELPER_3(ssat, i32, env, i32, i32) +DEF_HELPER_3(usat, i32, env, i32, i32) +DEF_HELPER_3(ssat16, i32, env, i32, i32) +DEF_HELPER_3(usat16, i32, env, i32, i32) + +DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, + i32, i32, i32, i32) +DEF_HELPER_2(exception_internal, noreturn, env, i32) +DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32) +DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32) +DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32) +DEF_HELPER_2(exception_swstep, noreturn, env, i32) +DEF_HELPER_2(exception_pc_alignment, noreturn, env, vaddr) +DEF_HELPER_1(setend, void, env) +DEF_HELPER_2(wfi, void, env, i32) +DEF_HELPER_1(wfe, void, env) +DEF_HELPER_2(wfit, void, env, i64) +DEF_HELPER_1(yield, void, env) +DEF_HELPER_1(pre_hvc, void, env) +DEF_HELPER_2(pre_smc, void, env, i32) +DEF_HELPER_1(vesb, void, env) + +DEF_HELPER_3(cpsr_write, void, env, i32, i32) +DEF_HELPER_2(cpsr_write_eret, void, env, i32) +DEF_HELPER_1(cpsr_read, i32, env) + +DEF_HELPER_3(v7m_msr, void, env, i32, i32) +DEF_HELPER_2(v7m_mrs, i32, env, i32) + +DEF_HELPER_2(v7m_bxns, void, env, i32) +DEF_HELPER_2(v7m_blxns, void, env, i32) + +DEF_HELPER_3(v7m_tt, i32, env, i32, i32) + +DEF_HELPER_1(v7m_preserve_fp_state, void, env) + +DEF_HELPER_2(v7m_vlstm, void, env, i32) +DEF_HELPER_2(v7m_vlldm, void, env, i32) + +DEF_HELPER_2(v8m_stackcheck, void, env, i32) + +DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32) + +DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32) +DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32) +DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_3(set_cp_reg, void, env, cptr, i32) +DEF_HELPER_2(get_cp_reg, i32, env, cptr) +DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64) +DEF_HELPER_2(get_cp_reg64, i64, env, cptr) + +DEF_HELPER_2(get_r13_banked, i32, env, i32) +DEF_HELPER_3(set_r13_banked, void, env, i32, i32) + +DEF_HELPER_3(mrs_banked, i32, env, i32, i32) +DEF_HELPER_4(msr_banked, void, env, i32, i32, i32) + +DEF_HELPER_2(get_user_reg, i32, env, i32) +DEF_HELPER_3(set_user_reg, void, env, i32, i32) + +DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int) +DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int) +DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int) + +DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, vaddr, i32, i32, i32) + +DEF_HELPER_1(vfp_get_fpscr, i32, env) +DEF_HELPER_2(vfp_set_fpscr, void, env, i32) + +DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst) +DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst) +DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst) +DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst) +DEF_HELPER_2(vfp_sqrth, f16, f16, fpst) +DEF_HELPER_2(vfp_sqrts, f32, f32, fpst) +DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst) +DEF_HELPER_3(vfp_cmph, void, f16, f16, env) +DEF_HELPER_3(vfp_cmps, void, f32, f32, env) +DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) +DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env) +DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) +DEF_HELPER_3(vfp_cmped, void, f64, f64, env) + +DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst) +DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst) +DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst) +DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst) + +DEF_HELPER_2(vfp_uitoh, f16, i32, fpst) +DEF_HELPER_2(vfp_uitos, f32, i32, fpst) +DEF_HELPER_2(vfp_uitod, f64, i32, fpst) +DEF_HELPER_2(vfp_sitoh, f16, i32, fpst) +DEF_HELPER_2(vfp_sitos, f32, i32, fpst) +DEF_HELPER_2(vfp_sitod, f64, i32, fpst) + +DEF_HELPER_2(vfp_touih, i32, f16, fpst) +DEF_HELPER_2(vfp_touis, i32, f32, fpst) +DEF_HELPER_2(vfp_touid, i32, f64, fpst) +DEF_HELPER_2(vfp_touizh, i32, f16, fpst) +DEF_HELPER_2(vfp_touizs, i32, f32, fpst) +DEF_HELPER_2(vfp_touizd, i32, f64, fpst) +DEF_HELPER_2(vfp_tosih, s32, f16, fpst) +DEF_HELPER_2(vfp_tosis, s32, f32, fpst) +DEF_HELPER_2(vfp_tosid, s32, f64, fpst) +DEF_HELPER_2(vfp_tosizh, s32, f16, fpst) +DEF_HELPER_2(vfp_tosizs, s32, f32, fpst) +DEF_HELPER_2(vfp_tosizd, s32, f64, fpst) + +DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst) +DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst) +DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst) +DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst) +DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst) +DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst) +DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst) +DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst) +DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst) +DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst) +DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst) + +DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst) +DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst) +DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst) +DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst) + +DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst) + +DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32) +DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32) + +DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst) +DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst) +DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst) + +DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst) +DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst) +DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst) +DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst) +DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32) +DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32) +DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64) + +DEF_HELPER_3(shl_cc, i32, env, i32, i32) +DEF_HELPER_3(shr_cc, i32, env, i32, i32) +DEF_HELPER_3(sar_cc, i32, env, i32, i32) +DEF_HELPER_3(ror_cc, i32, env, i32, i32) + +DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst) +DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst) +DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst) +DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst) + +DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env) +DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst) + +DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32) + +/* neon_helper.c */ +DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) + +DEF_HELPER_2(neon_shl_u16, i32, i32, i32) +DEF_HELPER_2(neon_shl_s16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) +DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) +DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) +DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme2_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_2(neon_add_u8, i32, i32, i32) +DEF_HELPER_2(neon_add_u16, i32, i32, i32) +DEF_HELPER_2(neon_sub_u8, i32, i32, i32) +DEF_HELPER_2(neon_sub_u16, i32, i32, i32) +DEF_HELPER_2(neon_mul_u8, i32, i32, i32) +DEF_HELPER_2(neon_mul_u16, i32, i32, i32) + +DEF_HELPER_2(neon_tst_u8, i32, i32, i32) +DEF_HELPER_2(neon_tst_u16, i32, i32, i32) +DEF_HELPER_2(neon_tst_u32, i32, i32, i32) + +DEF_HELPER_1(neon_clz_u8, i32, i32) +DEF_HELPER_1(neon_clz_u16, i32, i32) +DEF_HELPER_1(neon_cls_s8, i32, i32) +DEF_HELPER_1(neon_cls_s16, i32, i32) +DEF_HELPER_1(neon_cls_s32, i32, i32) +DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) +DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32) +DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32) +DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) +DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32) +DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32) + +DEF_HELPER_1(neon_narrow_u8, i64, i64) +DEF_HELPER_1(neon_narrow_u16, i64, i64) +DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64) +DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64) +DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64) +DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64) +DEF_HELPER_1(neon_narrow_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_high_u16, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) +DEF_HELPER_1(neon_widen_u8, i64, i32) +DEF_HELPER_1(neon_widen_s8, i64, i32) +DEF_HELPER_1(neon_widen_u16, i64, i32) +DEF_HELPER_1(neon_widen_s16, i64, i32) + +DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) +DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) +DEF_HELPER_2(neon_mull_u8, i64, i32, i32) +DEF_HELPER_2(neon_mull_s8, i64, i32, i32) +DEF_HELPER_2(neon_mull_u16, i64, i32, i32) +DEF_HELPER_2(neon_mull_s16, i64, i32, i32) + +DEF_HELPER_1(neon_negl_u16, i64, i64) +DEF_HELPER_1(neon_negl_u32, i64, i64) + +DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst) +DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst) +DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst) +DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst) +DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst) +DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst) +DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst) + +/* iwmmxt_helper.c */ +DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) +DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) + +#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ +DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ +DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ +DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \ + +DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) +DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) + +DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) + +DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) + +DEF_IWMMXT_HELPER_SIZE_ENV(mins) +DEF_IWMMXT_HELPER_SIZE_ENV(minu) +DEF_IWMMXT_HELPER_SIZE_ENV(maxs) +DEF_IWMMXT_HELPER_SIZE_ENV(maxu) + +DEF_IWMMXT_HELPER_SIZE_ENV(subn) +DEF_IWMMXT_HELPER_SIZE_ENV(addn) +DEF_IWMMXT_HELPER_SIZE_ENV(subu) +DEF_IWMMXT_HELPER_SIZE_ENV(addu) +DEF_IWMMXT_HELPER_SIZE_ENV(subs) +DEF_IWMMXT_HELPER_SIZE_ENV(adds) + +DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) + +DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) +DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) + +DEF_HELPER_1(iwmmxt_bcstb, i64, i32) +DEF_HELPER_1(iwmmxt_bcstw, i64, i32) +DEF_HELPER_1(iwmmxt_bcstl, i64, i32) + +DEF_HELPER_1(iwmmxt_addcb, i64, i64) +DEF_HELPER_1(iwmmxt_addcw, i64, i64) +DEF_HELPER_1(iwmmxt_addcl, i64, i64) + +DEF_HELPER_1(iwmmxt_msbb, i32, i64) +DEF_HELPER_1(iwmmxt_msbw, i32, i64) +DEF_HELPER_1(iwmmxt_msbl, i32, i64) + +DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) + +DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) + +DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) + +DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr) +DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr) + +DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) + +DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_sdot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sdot_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usdot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_sdot_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_sdot_idx_4b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_idx_4b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sdot_idx_4h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_idx_4h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sudot_idx_4b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usdot_idx_4b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_sdot_idx_2h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_udot_idx_2h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fadd_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_bfadd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fsub_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_bfsub, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmul_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmla_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmla_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmls_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmls_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_bfmla, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_bfmls, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ah_bfmls, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmul_idx_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_bfmla_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_bfmls_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_bfmls_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst) +DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst) +DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst) + +DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(sme2_bfvdot_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_bfmlsl, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_bfmlsl, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_bfmlsl_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(gvec_ah_bfmlsl_idx, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(sme2_luti2_1b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_1h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_1s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_luti2_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_luti2_4b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti2_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_luti4_1b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti4_1h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti4_1s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_luti4_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti4_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti4_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(sme2_luti4_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(sme2_luti4_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index f03977b4b00..59ab5263753 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -9,9 +9,13 @@ #include "cpu.h" #include "internals.h" #include "cpu-features.h" -#include "exec/helper-proto.h" +#include "exec/translation-block.h" +#include "accel/tcg/cpu-ops.h" #include "cpregs.h" +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + static inline bool fgt_svc(CPUARMState *env, int el) { /* @@ -63,6 +67,15 @@ static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr) #endif } +bool access_secure_reg(CPUARMState *env) +{ + bool ret = (arm_feature(env, ARM_FEATURE_EL3) && + !arm_el_is_aa64(env, 3) && + !(env->cp15.scr_el3 & SCR_NS)); + + return ret; +} + static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, ARMMMUIdx mmu_idx, CPUARMTBFlags flags) @@ -201,6 +214,31 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } +/* + * Return the exception level to which exceptions should be taken for ZT0. + * C.f. the ARM pseudocode function CheckSMEZT0Enabled, after the ZA check. + */ +static int zt0_exception_el(CPUARMState *env, int el) +{ +#ifndef CONFIG_USER_ONLY + if (el <= 1 + && !el_is_in_host(env, el) + && !FIELD_EX64(env->vfp.smcr_el[1], SMCR, EZT0)) { + return 1; + } + if (el <= 2 + && arm_is_el2_enabled(env) + && !FIELD_EX64(env->vfp.smcr_el[2], SMCR, EZT0)) { + return 2; + } + if (arm_feature(env, ARM_FEATURE_EL3) + && !FIELD_EX64(env->vfp.smcr_el[3], SMCR, EZT0)) { + return 3; + } +#endif + return 0; +} + static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, ARMMMUIdx mmu_idx) { @@ -256,7 +294,14 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, DP_TBFLAG_A64(flags, PSTATE_SM, 1); DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); } - DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); + + if (FIELD_EX64(env->svcr, SVCR, ZA)) { + DP_TBFLAG_A64(flags, PSTATE_ZA, 1); + if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) { + int zt0_el = zt0_exception_el(env, el); + DP_TBFLAG_A64(flags, ZT0EXC_EL, zt0_el); + } + } } sctlr = regime_sctlr(env, stage1); @@ -404,6 +449,19 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); } + if (env->vfp.fpcr & FPCR_AH) { + DP_TBFLAG_A64(flags, AH, 1); + } + if (env->vfp.fpcr & FPCR_NEP) { + /* + * In streaming-SVE without FA64, NEP behaves as if zero; + * compare pseudocode IsMerging() + */ + if (!(EX_TBFLAG_A64(flags, PSTATE_SM) && !sme_fa64(env, el))) { + DP_TBFLAG_A64(flags, NEP, 1); + } + } + return rebuild_hflags_common(env, fp_el, mmu_idx, flags); } @@ -476,7 +534,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); } -void assert_hflags_rebuild_correctly(CPUARMState *env) +static void assert_hflags_rebuild_correctly(CPUARMState *env) { #ifdef CONFIG_DEBUG_TCG CPUARMTBFlags c = env->hflags; @@ -484,10 +542,123 @@ void assert_hflags_rebuild_correctly(CPUARMState *env) if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) { fprintf(stderr, "TCG hflags mismatch " - "(current:(0x%08x,0x" TARGET_FMT_lx ")" - " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n", + "(current:(0x%08x,0x%016" PRIx64 ")" + " rebuilt:(0x%08x,0x%016" PRIx64 ")\n", c.flags, c.flags2, r.flags, r.flags2); abort(); } #endif } + +static bool mve_no_pred(CPUARMState *env) +{ + /* + * Return true if there is definitely no predication of MVE + * instructions by VPR or LTPSIZE. (Returning false even if there + * isn't any predication is OK; generated code will just be + * a little worse.) + * If the CPU does not implement MVE then this TB flag is always 0. + * + * NOTE: if you change this logic, the "recalculate s->mve_no_pred" + * logic in gen_update_fp_context() needs to be updated to match. + * + * We do not include the effect of the ECI bits here -- they are + * tracked in other TB flags. This simplifies the logic for + * "when did we emit code that changes the MVE_NO_PRED TB flag + * and thus need to end the TB?". + */ + if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { + return false; + } + if (env->v7m.vpr) { + return false; + } + if (env->v7m.ltpsize < 4) { + return false; + } + return true; +} + +TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs) +{ + CPUARMState *env = cpu_env(cs); + CPUARMTBFlags flags; + vaddr pc; + + assert_hflags_rebuild_correctly(env); + flags = env->hflags; + + if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { + pc = env->pc; + if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { + DP_TBFLAG_A64(flags, BTYPE, env->btype); + } + } else { + pc = env->regs[15]; + + if (arm_feature(env, ARM_FEATURE_M)) { + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && + FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) + != env->v7m.secure) { + DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); + } + + if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && + (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || + (env->v7m.secure && + !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { + /* + * ASPEN is set, but FPCA/SFPA indicate that there is no + * active FP context; we must create a new FP context before + * executing any FP insn. + */ + DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); + } + + bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; + if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { + DP_TBFLAG_M32(flags, LSPACT, 1); + } + + if (mve_no_pred(env)) { + DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); + } + } else { + /* + * Note that XSCALE_CPAR shares bits with VECSTRIDE. + * Note that VECLEN+VECSTRIDE are RES0 for M-profile. + */ + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); + } else { + DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); + DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); + } + if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { + DP_TBFLAG_A32(flags, VFPEN, 1); + } + } + + DP_TBFLAG_AM32(flags, THUMB, env->thumb); + DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); + } + + /* + * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine + * states defined in the ARM ARM for software singlestep: + * SS_ACTIVE PSTATE.SS State + * 0 x Inactive (the TB flag for SS is always 0) + * 1 0 Active-pending + * 1 1 Active-not-pending + * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. + */ + if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { + DP_TBFLAG_ANY(flags, PSTATE__SS, 1); + } + + return (TCGTBCPUState){ + .pc = pc, + .flags = flags.flags, + .cs_base = flags.flags2, + }; +} diff --git a/target/arm/tcg/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c index 610b1b2103e..ba054b6b4db 100644 --- a/target/arm/tcg/iwmmxt_helper.c +++ b/target/arm/tcg/iwmmxt_helper.c @@ -22,7 +22,9 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/helper-proto.h" + +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" /* iwMMXt macros extracted from GNU gdb. */ diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index 23d7f730357..28307b56151 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -15,10 +15,9 @@ #include "qemu/main-loop.h" #include "qemu/bitops.h" #include "qemu/log.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #ifdef CONFIG_TCG -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "semihosting/common-semi.h" #endif #if !defined(CONFIG_USER_ONLY) @@ -222,7 +221,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, int exc; bool exc_secure; - if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { if (mode == STACK_LAZYFP) { @@ -311,7 +310,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, bool exc_secure; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, @@ -633,8 +632,11 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) } /* Note that these stores can throw exceptions on MPU faults */ - cpu_stl_data_ra(env, sp, nextinst, GETPC()); - cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC()); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, + arm_to_core_mmu_idx(mmu_idx)); + cpu_stl_mmu(env, sp, nextinst, oi, GETPC()); + cpu_stl_mmu(env, sp + 4, saved_psr, oi, GETPC()); env->regs[13] = sp; env->regs[14] = 0xfeffffff; @@ -1049,6 +1051,9 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr) bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK; uintptr_t ra = GETPC(); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, + arm_to_core_mmu_idx(mmu_idx)); assert(env->v7m.secure); @@ -1074,7 +1079,7 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr) * Note that we do not use v7m_stack_write() here, because the * accesses should not set the FSR bits for stacking errors if they * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK - * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions + * or AccType_LAZYFP). Faults in cpu_stl_mmu() will throw exceptions * and longjmp out. */ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) { @@ -1090,12 +1095,12 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr) if (i >= 16) { faddr += 8; /* skip the slot for the FPSCR */ } - cpu_stl_data_ra(env, faddr, slo, ra); - cpu_stl_data_ra(env, faddr + 4, shi, ra); + cpu_stl_mmu(env, faddr, slo, oi, ra); + cpu_stl_mmu(env, faddr + 4, shi, oi, ra); } - cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra); + cpu_stl_mmu(env, fptr + 0x40, vfp_get_fpscr(env), oi, ra); if (cpu_isar_feature(aa32_mve, cpu)) { - cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra); + cpu_stl_mmu(env, fptr + 0x44, env->v7m.vpr, oi, ra); } /* @@ -1122,6 +1127,9 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr) { ARMCPU *cpu = env_archcpu(env); uintptr_t ra = GETPC(); + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, + arm_to_core_mmu_idx(mmu_idx)); /* fptr is the value of Rn, the frame pointer we load the FP regs from */ assert(env->v7m.secure); @@ -1156,16 +1164,16 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr) faddr += 8; /* skip the slot for the FPSCR and VPR */ } - slo = cpu_ldl_data_ra(env, faddr, ra); - shi = cpu_ldl_data_ra(env, faddr + 4, ra); + slo = cpu_ldl_mmu(env, faddr, oi, ra); + shi = cpu_ldl_mmu(env, faddr + 4, oi, ra); dn = (uint64_t) shi << 32 | slo; *aa32_vfp_dreg(env, i / 2) = dn; } - fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra); + fpscr = cpu_ldl_mmu(env, fptr + 0x40, oi, ra); vfp_set_fpscr(env, fpscr); if (cpu_isar_feature(aa32_mve, cpu)) { - env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra); + env->v7m.vpr = cpu_ldl_mmu(env, fptr + 0x44, oi, ra); } } @@ -1938,7 +1946,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) * do them as secure, so work out what MMU index that is. */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); - oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx)); + oi = make_memop_idx(MO_LEUL | MO_ALIGN, arm_to_core_mmu_idx(mmu_idx)); newpc = cpu_ldl_mmu(env, frameptr, oi, 0); newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0); @@ -2009,7 +2017,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure, "...really SecureFault with SFSR.INVEP\n"); return false; } - if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) { /* the MPU lookup failed */ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); @@ -2045,7 +2053,7 @@ static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx, ARMMMUFaultInfo fi = {}; uint32_t value; - if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) { + if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) { /* MPU/SAU lookup failed */ if (fi.type == ARMFault_QEMU_SFault) { qemu_log_mask(CPU_LOG_INT, diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build index 508932a249f..895facdc30b 100644 --- a/target/arm/tcg/meson.build +++ b/target/arm/tcg/meson.build @@ -30,14 +30,9 @@ arm_ss.add(files( 'translate-mve.c', 'translate-neon.c', 'translate-vfp.c', - 'crypto_helper.c', - 'hflags.c', - 'iwmmxt_helper.c', 'm_helper.c', 'mve_helper.c', - 'neon_helper.c', 'op_helper.c', - 'tlb_helper.c', 'vec_helper.c', )) @@ -60,3 +55,27 @@ arm_system_ss.add(files( arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c')) arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c')) + +arm_common_ss.add(zlib) + +arm_common_ss.add(files( + 'arith_helper.c', + 'crypto_helper.c', +)) + +arm_common_system_ss.add(files( + 'cpregs-at.c', + 'hflags.c', + 'iwmmxt_helper.c', + 'neon_helper.c', + 'tlb_helper.c', + 'tlb-insns.c', + 'vfp_helper.c', +)) +arm_user_ss.add(files( + 'hflags.c', + 'iwmmxt_helper.c', + 'neon_helper.c', + 'tlb_helper.c', + 'vfp_helper.c', +)) diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 9d2ba287eeb..0efc18a181e 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -21,17 +21,22 @@ #include "qemu/log.h" #include "cpu.h" #include "internals.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" -#include "exec/ram_addr.h" -#include "exec/cpu_ldst.h" +#ifdef CONFIG_USER_ONLY +#include "user/cpu_loop.h" +#include "user/page-protection.h" +#else +#include "system/ram_addr.h" +#endif +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/helper-proto.h" -#include "hw/core/tcg-cpu-ops.h" +#include "exec/tlb-flags.h" +#include "accel/tcg/cpu-ops.h" #include "qapi/error.h" #include "qemu/guest-random.h" #include "mte_helper.h" - static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) { if (exclude == 0xffff) { @@ -57,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, bool probe, uintptr_t ra) { #ifdef CONFIG_USER_ONLY + const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1); uint64_t clean_ptr = useronly_clean_ptr(ptr); int flags = page_get_flags(clean_ptr); uint8_t *tags; @@ -77,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, return NULL; } - tags = page_get_target_data(clean_ptr); + tags = page_get_target_data(clean_ptr, page_data_size); index = extract32(ptr, LOG2_TAG_GRANULE + 1, TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c index 03ebef5ef21..63ddcf3fecf 100644 --- a/target/arm/tcg/mve_helper.c +++ b/target/arm/tcg/mve_helper.c @@ -22,8 +22,7 @@ #include "internals.h" #include "vec_internal.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" #include "tcg/tcg.h" #include "fpu/softfloat.h" #include "crypto/clmul.h" @@ -149,13 +148,15 @@ static void mve_advance_vpt(CPUARMState *env) } /* For loads, predicated lanes are zeroed instead of keeping their old values */ -#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ +#define DO_VLDR(OP, MFLAG, MSIZE, MTYPE, LDTYPE, ESIZE, TYPE) \ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ { \ TYPE *d = vd; \ uint16_t mask = mve_element_mask(env); \ uint16_t eci_mask = mve_eci_mask(env); \ unsigned b, e; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \ /* \ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ * beats so we don't care if we update part of the dest and \ @@ -164,46 +165,48 @@ static void mve_advance_vpt(CPUARMState *env) for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ if (eci_mask & (1 << b)) { \ d[H##ESIZE(e)] = (mask & (1 << b)) ? \ - cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ + (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0;\ } \ addr += MSIZE; \ } \ mve_advance_vpt(env); \ } -#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ +#define DO_VSTR(OP, MFLAG, MSIZE, STTYPE, ESIZE, TYPE) \ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ { \ TYPE *d = vd; \ uint16_t mask = mve_element_mask(env); \ unsigned b, e; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ if (mask & (1 << b)) { \ - cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ + cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \ } \ addr += MSIZE; \ } \ mve_advance_vpt(env); \ } -DO_VLDR(vldrb, 1, ldub, 1, uint8_t) -DO_VLDR(vldrh, 2, lduw, 2, uint16_t) -DO_VLDR(vldrw, 4, ldl, 4, uint32_t) +DO_VLDR(vldrb, MO_UB, 1, uint8_t, ldb, 1, uint8_t) +DO_VLDR(vldrh, MO_TEUW, 2, uint16_t, ldw, 2, uint16_t) +DO_VLDR(vldrw, MO_TEUL, 4, uint32_t, ldl, 4, uint32_t) -DO_VSTR(vstrb, 1, stb, 1, uint8_t) -DO_VSTR(vstrh, 2, stw, 2, uint16_t) -DO_VSTR(vstrw, 4, stl, 4, uint32_t) +DO_VSTR(vstrb, MO_UB, 1, stb, 1, uint8_t) +DO_VSTR(vstrh, MO_TEUW, 2, stw, 2, uint16_t) +DO_VSTR(vstrw, MO_TEUL, 4, stl, 4, uint32_t) -DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) -DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) -DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) -DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) -DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) -DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) +DO_VLDR(vldrb_sh, MO_SB, 1, int8_t, ldb, 2, int16_t) +DO_VLDR(vldrb_sw, MO_SB, 1, int8_t, ldb, 4, int32_t) +DO_VLDR(vldrb_uh, MO_UB, 1, uint8_t, ldb, 2, uint16_t) +DO_VLDR(vldrb_uw, MO_UB, 1, uint8_t, ldb, 4, uint32_t) +DO_VLDR(vldrh_sw, MO_TESW, 2, int16_t, ldw, 4, int32_t) +DO_VLDR(vldrh_uw, MO_TEUW, 2, uint16_t, ldw, 4, uint32_t) -DO_VSTR(vstrb_h, 1, stb, 2, int16_t) -DO_VSTR(vstrb_w, 1, stb, 4, int32_t) -DO_VSTR(vstrh_w, 2, stw, 4, int32_t) +DO_VSTR(vstrb_h, MO_UB, 1, stb, 2, int16_t) +DO_VSTR(vstrb_w, MO_UB, 1, stb, 4, int32_t) +DO_VSTR(vstrh_w, MO_TEUW, 2, stw, 4, int32_t) #undef DO_VLDR #undef DO_VSTR @@ -215,7 +218,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) * For loads, predicated lanes are zeroed instead of retaining * their previous values. */ -#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ +#define DO_VLDR_SG(OP, MFLAG, MTYPE, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB)\ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ uint32_t base) \ { \ @@ -225,13 +228,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) uint16_t eci_mask = mve_eci_mask(env); \ unsigned e; \ uint32_t addr; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ if (!(eci_mask & 1)) { \ continue; \ } \ addr = ADDRFN(base, m[H##ESIZE(e)]); \ d[H##ESIZE(e)] = (mask & 1) ? \ - cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ + (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0; \ if (WB) { \ m[H##ESIZE(e)] = addr; \ } \ @@ -240,7 +245,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) } /* We know here TYPE is unsigned so always the same as the offset type */ -#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ +#define DO_VSTR_SG(OP, MFLAG, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ uint32_t base) \ { \ @@ -250,13 +255,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) uint16_t eci_mask = mve_eci_mask(env); \ unsigned e; \ uint32_t addr; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ if (!(eci_mask & 1)) { \ continue; \ } \ addr = ADDRFN(base, m[H##ESIZE(e)]); \ if (mask & 1) { \ - cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ + cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \ } \ if (WB) { \ m[H##ESIZE(e)] = addr; \ @@ -283,13 +290,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) uint16_t eci_mask = mve_eci_mask(env); \ unsigned e; \ uint32_t addr; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ if (!(eci_mask & 1)) { \ continue; \ } \ addr = ADDRFN(base, m[H4(e & ~1)]); \ addr += 4 * (e & 1); \ - d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ + d[H4(e)] = (mask & 1) ? cpu_ldl_mmu(env, addr, oi, GETPC()) : 0; \ if (WB && (e & 1)) { \ m[H4(e & ~1)] = addr - 4; \ } \ @@ -307,6 +316,8 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) uint16_t eci_mask = mve_eci_mask(env); \ unsigned e; \ uint32_t addr; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ if (!(eci_mask & 1)) { \ continue; \ @@ -314,7 +325,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) addr = ADDRFN(base, m[H4(e & ~1)]); \ addr += 4 * (e & 1); \ if (mask & 1) { \ - cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ + cpu_stl_mmu(env, addr, d[H4(e)], oi, GETPC()); \ } \ if (WB && (e & 1)) { \ m[H4(e & ~1)] = addr - 4; \ @@ -328,40 +339,44 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t) #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) -DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) -DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) -DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_sh, MO_SB, int8_t, ldb, 2, int16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_sw, MO_SB, int8_t, ldb, 4, int32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_sw, MO_TESW, int16_t, ldw, 4, int32_t, uint32_t, ADDR_ADD, false) -DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) -DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) -DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) -DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) -DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) -DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_ub, MO_UB, uint8_t, ldb, 1, uint8_t, uint8_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_uh, MO_UB, uint8_t, ldb, 2, uint16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrb_sg_uw, MO_UB, uint8_t, ldb, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_uh, MO_TEUW, uint16_t, ldw, 2, uint16_t, uint16_t, ADDR_ADD, false) +DO_VLDR_SG(vldrh_sg_uw, MO_TEUW, uint16_t, ldw, 4, uint32_t, uint32_t, ADDR_ADD, false) +DO_VLDR_SG(vldrw_sg_uw, MO_TEUL, uint32_t, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) -DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) -DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) -DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) -DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) +DO_VLDR_SG(vldrh_sg_os_sw, MO_TESW, int16_t, ldw, 4, + int32_t, uint32_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrh_sg_os_uh, MO_TEUW, uint16_t, ldw, 2, + uint16_t, uint16_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrh_sg_os_uw, MO_TEUW, uint16_t, ldw, 4, + uint32_t, uint32_t, ADDR_ADD_OSH, false) +DO_VLDR_SG(vldrw_sg_os_uw, MO_TEUL, uint32_t, ldl, 4, + uint32_t, uint32_t, ADDR_ADD_OSW, false) DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) -DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) -DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) -DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) -DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) -DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) -DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) +DO_VSTR_SG(vstrb_sg_ub, MO_UB, stb, 1, uint8_t, ADDR_ADD, false) +DO_VSTR_SG(vstrb_sg_uh, MO_UB, stb, 2, uint16_t, ADDR_ADD, false) +DO_VSTR_SG(vstrb_sg_uw, MO_UB, stb, 4, uint32_t, ADDR_ADD, false) +DO_VSTR_SG(vstrh_sg_uh, MO_TEUW, stw, 2, uint16_t, ADDR_ADD, false) +DO_VSTR_SG(vstrh_sg_uw, MO_TEUW, stw, 4, uint32_t, ADDR_ADD, false) +DO_VSTR_SG(vstrw_sg_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD, false) DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) -DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) -DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) -DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) +DO_VSTR_SG(vstrh_sg_os_uh, MO_TEUW, stw, 2, uint16_t, ADDR_ADD_OSH, false) +DO_VSTR_SG(vstrh_sg_os_uw, MO_TEUW, stw, 4, uint32_t, ADDR_ADD_OSH, false) +DO_VSTR_SG(vstrw_sg_os_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD_OSW, false) DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) -DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) +DO_VLDR_SG(vldrw_sg_wb_uw, MO_TEUL, uint32_t, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) -DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) +DO_VSTR_SG(vstrw_sg_wb_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD, true) DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) /* @@ -388,13 +403,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) uint16_t mask = mve_eci_mask(env); \ static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat] * 4; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ for (e = 0; e < 4; e++, data >>= 8) { \ uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ qd[H1(off[beat])] = data; \ @@ -412,13 +429,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) uint32_t addr, data; \ int y; /* y counts 0 2 0 2 */ \ uint16_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat] * 8 + (beat & 1) * 4; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ qd[H2(off[beat])] = data; \ data >>= 16; \ @@ -437,13 +456,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) uint32_t addr, data; \ uint32_t *qd; \ int y; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat] * 4; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ y = (beat + (O1 & 2)) & 3; \ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ qd[H4(off[beat] >> 2)] = data; \ @@ -474,13 +495,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9) static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ uint8_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat] * 2; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ for (e = 0; e < 4; e++, data >>= 8) { \ qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ qd[H1(off[beat] + (e >> 1))] = data; \ @@ -498,13 +521,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9) uint32_t addr, data; \ int e; \ uint16_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat] * 4; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ for (e = 0; e < 2; e++, data >>= 16) { \ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ qd[H2(off[beat])] = data; \ @@ -521,13 +546,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9) static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ uint32_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ continue; \ } \ addr = base + off[beat]; \ - data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ + data = cpu_ldl_mmu(env, addr, oi, GETPC()); \ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ qd[H4(off[beat] >> 3)] = data; \ } \ @@ -550,6 +577,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) uint16_t mask = mve_eci_mask(env); \ static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -561,7 +590,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ data = (data << 8) | qd[H1(off[beat])]; \ } \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -575,6 +604,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) uint32_t addr, data; \ int y; /* y counts 0 2 0 2 */ \ uint16_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -585,7 +616,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) data = qd[H2(off[beat])]; \ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ data |= qd[H2(off[beat])] << 16; \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -599,6 +630,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) uint32_t addr, data; \ uint32_t *qd; \ int y; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -608,7 +641,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20) y = (beat + (O1 & 2)) & 3; \ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ data = qd[H4(off[beat] >> 2)]; \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -636,6 +669,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9) static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ uint8_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -647,7 +682,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9) qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ } \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -661,6 +696,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9) uint32_t addr, data; \ int e; \ uint16_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -672,7 +709,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9) qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ data = (data << 16) | qd[H2(off[beat])]; \ } \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -685,6 +722,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9) static const uint8_t off[4] = { O1, O2, O3, O4 }; \ uint32_t addr, data; \ uint32_t *qd; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \ for (beat = 0; beat < 4; beat++, mask >>= 4) { \ if ((mask & 1) == 0) { \ /* ECI says skip this beat */ \ @@ -693,7 +732,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9) addr = base + off[beat]; \ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ data = qd[H4(off[beat] >> 3)]; \ - cpu_stl_le_data_ra(env, addr, data, GETPC()); \ + cpu_stl_mmu(env, addr, data, oi, GETPC()); \ } \ } @@ -2165,27 +2204,6 @@ DO_VSHLL_ALL(vshllt, true) DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) -static inline uint64_t do_urshr(uint64_t x, unsigned sh) -{ - if (likely(sh < 64)) { - return (x >> sh) + ((x >> (sh - 1)) & 1); - } else if (sh == 64) { - return x >> 63; - } else { - return 0; - } -} - -static inline int64_t do_srshr(int64_t x, unsigned sh) -{ - if (likely(sh < 64)) { - return (x >> sh) + ((x >> (sh - 1)) & 1); - } else { - /* Rounding the sign bit always produces 0. */ - return 0; - } -} - DO_VSHRN_ALL(vshrn, DO_SHR) DO_VSHRN_ALL(vrshrn, do_urshr) @@ -2814,8 +2832,7 @@ DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2888,8 +2905,7 @@ DO_2OP_FP_ALL(vminnma, minnuma) r[e] = 0; \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(tm & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2926,8 +2942,7 @@ DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -2964,8 +2979,7 @@ DO_VFMA(vfmss, 4, float32, true) if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ continue; \ } \ - fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst0 = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ fpst1 = fpst0; \ if (!(mask & 1)) { \ scratch_fpst = *fpst0; \ @@ -3049,8 +3063,7 @@ DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3084,8 +3097,7 @@ DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3116,9 +3128,8 @@ DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) unsigned e; \ TYPE *m = vm; \ TYPE ra = (TYPE)ra_in; \ - float_status *fpst = (ESIZE == 2) ? \ - &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + float_status *fpst = \ + &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ if (mask & 1) { \ TYPE v = m[H##ESIZE(e)]; \ @@ -3168,8 +3179,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3202,8 +3212,7 @@ DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) if ((mask & emask) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & (1 << (e * ESIZE)))) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3267,8 +3276,7 @@ DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ @@ -3300,9 +3308,8 @@ DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) unsigned e; \ float_status *fpst; \ float_status scratch_fpst; \ - float_status *base_fpst = (ESIZE == 2) ? \ - &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + float_status *base_fpst = \ + &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ set_float_rounding_mode(rmode, base_fpst); \ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ @@ -3347,7 +3354,7 @@ static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) unsigned e; float_status *fpst; float_status scratch_fpst; - float_status *base_fpst = &env->vfp.standard_fp_status; + float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; bool old_fz = get_flush_to_zero(base_fpst); set_flush_to_zero(false, base_fpst); for (e = 0; e < 16 / 4; e++, mask >>= 4) { @@ -3377,7 +3384,7 @@ static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) unsigned e; float_status *fpst; float_status scratch_fpst; - float_status *base_fpst = &env->vfp.standard_fp_status; + float_status *base_fpst = &env->vfp.fp_status[FPST_STD]; bool old_fiz = get_flush_inputs_to_zero(base_fpst); set_flush_inputs_to_zero(false, base_fpst); for (e = 0; e < 16 / 4; e++, mask >>= 4) { @@ -3427,8 +3434,7 @@ void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ continue; \ } \ - fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ - &env->vfp.standard_fp_status; \ + fpst = &env->vfp.fp_status[ESIZE == 2 ? FPST_STD_F16 : FPST_STD]; \ if (!(mask & 1)) { \ /* We need the result but without updating flags */ \ scratch_fpst = *fpst; \ diff --git a/target/arm/tcg/neon-dp.decode b/target/arm/tcg/neon-dp.decode index 788578c8fa1..e883c6ab585 100644 --- a/target/arm/tcg/neon-dp.decode +++ b/target/arm/tcg/neon-dp.decode @@ -291,17 +291,17 @@ VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_s VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_h VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_b -VQSHLU_64_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d +VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_s VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_h VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_b -VQSHL_S_64_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d +VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b -VQSHL_U_64_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d +VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c index 082bfd88adf..8d288f3a700 100644 --- a/target/arm/tcg/neon_helper.c +++ b/target/arm/tcg/neon_helper.c @@ -9,11 +9,13 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "fpu/softfloat.h" #include "vec_internal.h" +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + #define SIGNBIT (uint32_t)0x80000000 #define SIGNBIT64 ((uint64_t)1 << 63) @@ -130,17 +132,28 @@ void HELPER(name)(void *vd, void *vn, void *vm, uint32_t desc) \ } #define NEON_GVEC_VOP2_ENV(name, vtype) \ -void HELPER(name)(void *vd, void *vn, void *vm, void *venv, uint32_t desc) \ +void HELPER(name)(void *vd, void *vn, void *vm, CPUARMState *env, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ vtype *d = vd, *n = vn, *m = vm; \ - CPUARMState *env = venv; \ for (i = 0; i < opr_sz / sizeof(vtype); i++) { \ NEON_FN(d[i], n[i], m[i]); \ } \ clear_tail(d, opr_sz, simd_maxsz(desc)); \ } +#define NEON_GVEC_VOP2i_ENV(name, vtype) \ +void HELPER(name)(void *vd, void *vn, CPUARMState *env, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + int imm = simd_data(desc); \ + vtype *d = vd, *n = vn; \ + for (i = 0; i < opr_sz / sizeof(vtype); i++) { \ + NEON_FN(d[i], n[i], imm); \ + } \ + clear_tail(d, opr_sz, simd_maxsz(desc)); \ +} + /* Pairwise operations. */ /* For 32-bit elements each segment only contains a single element, so the elementwise and pairwise operations are the same. */ @@ -215,16 +228,31 @@ NEON_VOP(rshl_s16, neon_s16, 2) NEON_GVEC_VOP2(gvec_srshl_h, int16_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_sqrshl_bhs(src1, src2, 16, true, NULL)) +NEON_GVEC_VOP2(sme2_srshl_h, int16_t) +#undef NEON_FN + #define NEON_FN(dest, src1, src2) \ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, true, NULL)) NEON_GVEC_VOP2(gvec_srshl_s, int32_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_sqrshl_bhs(src1, src2, 32, true, NULL)) +NEON_GVEC_VOP2(sme2_srshl_s, int32_t) +#undef NEON_FN + #define NEON_FN(dest, src1, src2) \ (dest = do_sqrshl_d(src1, (int8_t)src2, true, NULL)) NEON_GVEC_VOP2(gvec_srshl_d, int64_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_sqrshl_d(src1, src2, true, NULL)) +NEON_GVEC_VOP2(sme2_srshl_d, int64_t) +#undef NEON_FN + uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift) { return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL); @@ -247,16 +275,31 @@ NEON_VOP(rshl_u16, neon_u16, 2) NEON_GVEC_VOP2(gvec_urshl_h, uint16_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_uqrshl_bhs(src1, (int16_t)src2, 16, true, NULL)) +NEON_GVEC_VOP2(sme2_urshl_h, uint16_t) +#undef NEON_FN + #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, true, NULL)) NEON_GVEC_VOP2(gvec_urshl_s, int32_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_uqrshl_bhs(src1, src2, 32, true, NULL)) +NEON_GVEC_VOP2(sme2_urshl_s, int32_t) +#undef NEON_FN + #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_d(src1, (int8_t)src2, true, NULL)) NEON_GVEC_VOP2(gvec_urshl_d, int64_t) #undef NEON_FN +#define NEON_FN(dest, src1, src2) \ + (dest = do_uqrshl_d(src1, src2, true, NULL)) +NEON_GVEC_VOP2(sme2_urshl_d, int64_t) +#undef NEON_FN + uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift) { return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL); @@ -271,22 +314,26 @@ uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift) (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) NEON_VOP_ENV(qshl_u8, neon_u8, 4) NEON_GVEC_VOP2_ENV(neon_uqshl_b, uint8_t) +NEON_GVEC_VOP2i_ENV(neon_uqshli_b, uint8_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) NEON_VOP_ENV(qshl_u16, neon_u16, 2) NEON_GVEC_VOP2_ENV(neon_uqshl_h, uint16_t) +NEON_GVEC_VOP2i_ENV(neon_uqshli_h, uint16_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc)) NEON_GVEC_VOP2_ENV(neon_uqshl_s, uint32_t) +NEON_GVEC_VOP2i_ENV(neon_uqshli_s, uint32_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_d(src1, (int8_t)src2, false, env->vfp.qc)) NEON_GVEC_VOP2_ENV(neon_uqshl_d, uint64_t) +NEON_GVEC_VOP2i_ENV(neon_uqshli_d, uint64_t) #undef NEON_FN uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift) @@ -303,22 +350,26 @@ uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift) (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) NEON_VOP_ENV(qshl_s8, neon_s8, 4) NEON_GVEC_VOP2_ENV(neon_sqshl_b, int8_t) +NEON_GVEC_VOP2i_ENV(neon_sqshli_b, int8_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) NEON_VOP_ENV(qshl_s16, neon_s16, 2) NEON_GVEC_VOP2_ENV(neon_sqshl_h, int16_t) +NEON_GVEC_VOP2i_ENV(neon_sqshli_h, int16_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc)) NEON_GVEC_VOP2_ENV(neon_sqshl_s, int32_t) +NEON_GVEC_VOP2i_ENV(neon_sqshli_s, int32_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_sqrshl_d(src1, (int8_t)src2, false, env->vfp.qc)) NEON_GVEC_VOP2_ENV(neon_sqshl_d, int64_t) +NEON_GVEC_VOP2i_ENV(neon_sqshli_d, int64_t) #undef NEON_FN uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift) @@ -334,11 +385,13 @@ uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift) #define NEON_FN(dest, src1, src2) \ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc)) NEON_VOP_ENV(qshlu_s8, neon_s8, 4) +NEON_GVEC_VOP2i_ENV(neon_sqshlui_b, int8_t) #undef NEON_FN #define NEON_FN(dest, src1, src2) \ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc)) NEON_VOP_ENV(qshlu_s16, neon_s16, 2) +NEON_GVEC_VOP2i_ENV(neon_sqshlui_h, int16_t) #undef NEON_FN uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift) @@ -351,6 +404,16 @@ uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift) return do_suqrshl_d(val, (int8_t)shift, false, env->vfp.qc); } +#define NEON_FN(dest, src1, src2) \ + (dest = do_suqrshl_bhs(src1, (int8_t)src2, 32, false, env->vfp.qc)) +NEON_GVEC_VOP2i_ENV(neon_sqshlui_s, int32_t) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) \ + (dest = do_suqrshl_d(src1, (int8_t)src2, false, env->vfp.qc)) +NEON_GVEC_VOP2i_ENV(neon_sqshlui_d, int64_t) +#undef NEON_FN + #define NEON_FN(dest, src1, src2) \ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc)) NEON_VOP_ENV(qrshl_u8, neon_u8, 4) @@ -492,27 +555,6 @@ uint32_t HELPER(neon_cls_s32)(uint32_t x) return count - 1; } -/* Bit count. */ -uint32_t HELPER(neon_cnt_u8)(uint32_t x) -{ - x = (x & 0x55555555) + ((x >> 1) & 0x55555555); - x = (x & 0x33333333) + ((x >> 2) & 0x33333333); - x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); - return x; -} - -/* Reverse bits in each 8 bit word */ -uint32_t HELPER(neon_rbit_u8)(uint32_t x) -{ - x = ((x & 0xf0f0f0f0) >> 4) - | ((x & 0x0f0f0f0f) << 4); - x = ((x & 0x88888888) >> 3) - | ((x & 0x44444444) >> 1) - | ((x & 0x22222222) << 1) - | ((x & 0x11111111) << 3); - return x; -} - #define NEON_QDMULH16(dest, src1, src2, round) do { \ uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ @@ -565,13 +607,15 @@ NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1) #undef NEON_FN #undef NEON_QDMULH32 -uint32_t HELPER(neon_narrow_u8)(uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_u8)(uint64_t x) { return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) | ((x >> 24) & 0xff000000u); } -uint32_t HELPER(neon_narrow_u16)(uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_u16)(uint64_t x) { return (x & 0xffffu) | ((x >> 16) & 0xffff0000u); } @@ -602,7 +646,8 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); } -uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; @@ -629,7 +674,8 @@ uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) { uint16_t s; uint8_t d; @@ -652,7 +698,8 @@ uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) { int16_t s; uint8_t d; @@ -675,7 +722,8 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) return res; } -uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; @@ -695,10 +743,11 @@ uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) high = 0xffff; SET_QC(); } - return low | (high << 16); + return deposit32(low, 16, 16, high); } -uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) { uint32_t high; uint32_t low; @@ -712,10 +761,11 @@ uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) high = 0xffff; SET_QC(); } - return low | (high << 16); + return deposit32(low, 16, 16, high); } -uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) { int32_t low; int32_t high; @@ -729,10 +779,11 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) high = (high >> 31) ^ 0x7fff; SET_QC(); } - return (uint16_t)low | (high << 16); + return deposit32(low, 16, 16, high); } -uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) { if (x & 0x8000000000000000ull) { SET_QC(); @@ -745,7 +796,8 @@ uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) return x; } -uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) { if (x > 0xffffffffu) { SET_QC(); @@ -754,13 +806,14 @@ uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) return x; } -uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) +/* Only the low 32-bits of output are significant. */ +uint64_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) { if ((int64_t)x != (int32_t)x) { SET_QC(); - return ((int64_t)x >> 63) ^ 0x7fffffff; + return (uint32_t)((int64_t)x >> 63) ^ 0x7fffffff; } - return x; + return (uint32_t)x; } uint64_t HELPER(neon_widen_u8)(uint32_t x) @@ -803,62 +856,47 @@ uint64_t HELPER(neon_widen_s16)(uint32_t x) return ((uint32_t)(int16_t)x) | (high << 32); } -uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) -{ - uint64_t mask; - mask = (a ^ b) & 0x8000800080008000ull; - a &= ~0x8000800080008000ull; - b &= ~0x8000800080008000ull; - return (a + b) ^ mask; -} - -uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) -{ - uint64_t mask; - mask = (a ^ b) & 0x8000000080000000ull; - a &= ~0x8000000080000000ull; - b &= ~0x8000000080000000ull; - return (a + b) ^ mask; -} - -uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) -{ - uint64_t tmp; - uint64_t tmp2; +/* Pairwise long add: add pairs of adjacent elements into + * double-width elements in the result (eg _s8 is an 8x8->16 op) + */ +uint64_t HELPER(neon_addlp_s8)(uint64_t a) +{ + uint64_t nsignmask = 0x0080008000800080ULL; + uint64_t wsignmask = 0x8000800080008000ULL; + uint64_t elementmask = 0x00ff00ff00ff00ffULL; + uint64_t tmp1, tmp2; + uint64_t res, signres; + + /* Extract odd elements, sign extend each to a 16 bit field */ + tmp1 = a & elementmask; + tmp1 ^= nsignmask; + tmp1 |= wsignmask; + tmp1 = (tmp1 - nsignmask) ^ wsignmask; + /* Ditto for the even elements */ + tmp2 = (a >> 8) & elementmask; + tmp2 ^= nsignmask; + tmp2 |= wsignmask; + tmp2 = (tmp2 - nsignmask) ^ wsignmask; + + /* calculate the result by summing bits 0..14, 16..22, etc, + * and then adjusting the sign bits 15, 23, etc manually. + * This ensures the addition can't overflow the 16 bit field. + */ + signres = (tmp1 ^ tmp2) & wsignmask; + res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); + res ^= signres; - tmp = a & 0x0000ffff0000ffffull; - tmp += (a >> 16) & 0x0000ffff0000ffffull; - tmp2 = b & 0xffff0000ffff0000ull; - tmp2 += (b << 16) & 0xffff0000ffff0000ull; - return ( tmp & 0xffff) - | ((tmp >> 16) & 0xffff0000ull) - | ((tmp2 << 16) & 0xffff00000000ull) - | ( tmp2 & 0xffff000000000000ull); + return res; } -uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) +uint64_t HELPER(neon_addlp_s16)(uint64_t a) { - uint32_t low = a + (a >> 32); - uint32_t high = b + (b >> 32); - return low + ((uint64_t)high << 32); -} + int32_t reslo, reshi; -uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) -{ - uint64_t mask; - mask = (a ^ ~b) & 0x8000800080008000ull; - a |= 0x8000800080008000ull; - b &= ~0x8000800080008000ull; - return (a - b) ^ mask; -} + reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); + reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); -uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) -{ - uint64_t mask; - mask = (a ^ ~b) & 0x8000000080000000ull; - a |= 0x8000000080000000ull; - b &= ~0x8000000080000000ull; - return (a - b) ^ mask; + return (uint32_t)reslo | (((uint64_t)reshi) << 32); } uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) @@ -1172,51 +1210,44 @@ uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x) * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. */ -uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; return -float32_eq_quiet(make_float32(a), make_float32(b), fpst); } -uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; return -float32_le(make_float32(b), make_float32(a), fpst); } -uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; return -float32_lt(make_float32(b), make_float32(a), fpst); } -uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); return -float32_le(f1, f0, fpst); } -uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) +uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, float_status *fpst) { - float_status *fpst = fpstp; float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); return -float32_lt(f1, f0, fpst); } -uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp) +uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, float_status *fpst) { - float_status *fpst = fpstp; float64 f0 = float64_abs(make_float64(a)); float64 f1 = float64_abs(make_float64(b)); return -float64_le(f1, f0, fpst); } -uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp) +uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, float_status *fpst) { - float_status *fpst = fpstp; float64 f0 = float64_abs(make_float64(a)); float64 f1 = float64_abs(make_float64(b)); return -float64_lt(f1, f0, fpst); diff --git a/target/arm/op_addsub.h b/target/arm/tcg/op_addsub.c.inc similarity index 100% rename from target/arm/op_addsub.h rename to target/arm/tcg/op_addsub.c.inc diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index c083e5cfb87..575e566280b 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -20,10 +20,11 @@ #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" +#include "exec/target_page.h" #include "internals.h" #include "cpu-features.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "cpregs.h" #define SIGNBIT (uint32_t)0x80000000 @@ -313,15 +314,19 @@ void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm) } #ifndef CONFIG_USER_ONLY -/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. +/* + * Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. * The function returns the target EL (1-3) if the instruction is to be trapped; * otherwise it returns 0 indicating it is not trapped. + * For a trap, *excp is updated with the EXCP_* trap type to use. */ -static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) +static inline int check_wfx_trap(CPUARMState *env, bool is_wfe, uint32_t *excp) { int cur_el = arm_current_el(env); uint64_t mask; + *excp = EXCP_UDEF; + if (arm_feature(env, ARM_FEATURE_M)) { /* M profile cores can never trap WFI/WFE. */ return 0; @@ -331,18 +336,9 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) * WFx instructions being trapped to EL1. These trap bits don't exist in v7. */ if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { - int target_el; - mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI; - if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) { - /* Secure EL0 and Secure PL1 is at EL3 */ - target_el = 3; - } else { - target_el = 1; - } - - if (!(env->cp15.sctlr_el[target_el] & mask)) { - return target_el; + if (!(arm_sctlr(env, cur_el) & mask)) { + return exception_target_el(env); } } @@ -358,9 +354,12 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) } /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */ - if (cur_el < 3) { + if (arm_feature(env, ARM_FEATURE_V8) && !arm_is_el3_or_mon(env)) { mask = (is_wfe) ? SCR_TWE : SCR_TWI; if (env->cp15.scr_el3 & mask) { + if (!arm_el_is_aa64(env, 3)) { + *excp = EXCP_MON_TRAP; + } return 3; } } @@ -383,7 +382,8 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) return; #else CPUState *cs = env_cpu(env); - int target_el = check_wfx_trap(env, false); + uint32_t excp; + int target_el = check_wfx_trap(env, false, &excp); if (cpu_has_work(cs)) { /* Don't bother to go into our "low power state" if @@ -399,7 +399,7 @@ void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) env->regs[15] -= insn_len; } - raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2), + raise_exception(env, excp, syn_wfx(1, 0xe, 0, insn_len == 2), target_el); } @@ -424,10 +424,17 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout) #else ARMCPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); - int target_el = check_wfx_trap(env, false); + uint32_t excp; + int target_el = check_wfx_trap(env, false, &excp); /* The WFIT should time out when CNTVCT_EL0 >= the specified value. */ uint64_t cntval = gt_get_countervalue(env); - uint64_t offset = gt_virt_cnt_offset(env); + /* + * We want the value that we would get if we read CNTVCT_EL0 from + * the current exception level, so the direct_access offset, not + * the indirect_access one. Compare the pseudocode LocalTimeoutEvent(), + * which calls VirtualCounterTimer(). + */ + uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT); uint64_t cntvct = cntval - offset; uint64_t nexttick; @@ -441,8 +448,7 @@ void HELPER(wfit)(CPUARMState *env, uint64_t timeout) if (target_el) { env->pc -= 4; - raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, false), - target_el); + raise_exception(env, excp, syn_wfx(1, 0xe, 0, false), target_el); } if (uadd64_overflow(timeout, offset, &nexttick)) { @@ -758,12 +764,13 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key); CPAccessResult res = CP_ACCESS_OK; int target_el; + uint32_t excp; assert(ri != NULL); if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { - res = CP_ACCESS_TRAP; + res = CP_ACCESS_UNDEFINED; goto fail; } @@ -780,7 +787,7 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, * the other trap takes priority. So we take the "check HSTR_EL2" path * for all of those cases.) */ - if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) && + if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) < 2) && arm_current_el(env) == 0) { goto fail; } @@ -817,6 +824,7 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX); unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS); bool rev = FIELD_EX32(ri->fgt, FGT, REV); + bool nxs = FIELD_EX32(ri->fgt, FGT, NXS); bool trapbit; if (ri->fgt & FGT_EXEC) { @@ -830,7 +838,15 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, trapword = env->cp15.fgt_write[idx]; } - trapbit = extract64(trapword, bitpos, 1); + if (nxs && (arm_hcrx_el2_eff(env) & HCRX_FGTNXS)) { + /* + * If HCRX_EL2.FGTnXS is 1 then the fine-grained trap for + * TLBI maintenance insns does *not* apply to the nXS variant. + */ + trapbit = 0; + } else { + trapbit = extract64(trapword, bitpos, 1); + } if (trapbit != rev) { res = CP_ACCESS_TRAP_EL2; goto fail; @@ -842,12 +858,25 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, } fail: - switch (res & ~CP_ACCESS_EL_MASK) { - case CP_ACCESS_TRAP: + excp = EXCP_UDEF; + switch (res) { + /* CP_ACCESS_TRAP* traps are always direct to a specified EL */ + case CP_ACCESS_TRAP_EL3: + /* + * If EL3 is AArch32 then there's no syndrome register; the cases + * where we would raise a SystemAccessTrap to AArch64 EL3 all become + * raising a Monitor trap exception. (Because there's no visible + * syndrome it doesn't matter what we pass to raise_exception().) + */ + if (!arm_el_is_aa64(env, 3)) { + excp = EXCP_MON_TRAP; + } break; - case CP_ACCESS_TRAP_UNCATEGORIZED: - /* Only CP_ACCESS_TRAP traps are direct to a specified EL */ - assert((res & CP_ACCESS_EL_MASK) == 0); + case CP_ACCESS_TRAP_EL2: + case CP_ACCESS_TRAP_EL1: + break; + case CP_ACCESS_UNDEFINED: + /* CP_ACCESS_UNDEFINED is never direct to a specified EL */ if (cpu_isar_feature(aa64_ids, cpu) && isread && arm_cpreg_in_idspace(ri)) { /* @@ -867,6 +896,9 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, case 0: target_el = exception_target_el(env); break; + case 1: + assert(arm_current_el(env) < 2); + break; case 2: assert(arm_current_el(env) != 3); assert(arm_is_el2_enabled(env)); @@ -875,11 +907,10 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, assert(arm_feature(env, ARM_FEATURE_EL3)); break; default: - /* No "direct" traps to EL1 */ g_assert_not_reached(); } - raise_exception(env, EXCP_UDEF, syndrome, target_el); + raise_exception(env, excp, syndrome, target_el); } const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key) @@ -912,7 +943,19 @@ void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome) { /* See arm_sctlr(), but we also need the sctlr el. */ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; + int target_el; + + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + target_el = 2; + break; + case ARMMMUIdx_E30_0: + target_el = 3; + break; + default: + target_el = 1; + break; + } /* * The bit is not valid unless the target el is aa64, but since the @@ -1179,7 +1222,7 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) } } -void HELPER(probe_access)(CPUARMState *env, target_ulong ptr, +void HELPER(probe_access)(CPUARMState *env, vaddr ptr, uint32_t access_type, uint32_t mmu_idx, uint32_t size) { diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c index c4b143024f3..c591c3052c3 100644 --- a/target/arm/tcg/pauth_helper.c +++ b/target/arm/tcg/pauth_helper.c @@ -21,8 +21,7 @@ #include "cpu.h" #include "internals.h" #include "cpu-features.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/helper-proto.h" #include "tcg/tcg-gvec-desc.h" #include "qemu/xxhash.h" diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c index 51d2ca3d30d..cabed43e8a8 100644 --- a/target/arm/tcg/psci.c +++ b/target/arm/tcg/psci.c @@ -21,7 +21,7 @@ #include "exec/helper-proto.h" #include "kvm-consts.h" #include "qemu/main-loop.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "internals.h" #include "arm-powerctl.h" #include "target/arm/multiprocessing.h" diff --git a/target/arm/tcg/sme.decode b/target/arm/tcg/sme.decode index 628804e37a8..6bb9aa2a90e 100644 --- a/target/arm/tcg/sme.decode +++ b/target/arm/tcg/sme.decode @@ -22,30 +22,139 @@ ### SME Misc ZERO 11000000 00 001 00000000000 imm:8 +ZERO_zt0 11000000 01 001 00000000000 00000001 ### SME Move into/from Array %mova_rs 13:2 !function=plus_12 -&mova esz rs pg zr za_imm v:bool to_vec:bool +%mova_rv 13:2 !function=plus_8 +&mova_a rv zr off +&mova_p esz rs pg zr za off v:bool +&mova_t esz rs zr za off v:bool -MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \ - &mova to_vec=0 rs=%mova_rs -MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \ - &mova to_vec=0 rs=%mova_rs esz=4 +MOVA_tz 11000000 00 00000 0 v:1 .. pg:3 zr:5 0 off:4 \ + &mova_p rs=%mova_rs esz=0 za=0 +MOVA_tz 11000000 01 00000 0 v:1 .. pg:3 zr:5 0 za:1 off:3 \ + &mova_p rs=%mova_rs esz=1 +MOVA_tz 11000000 10 00000 0 v:1 .. pg:3 zr:5 0 za:2 off:2 \ + &mova_p rs=%mova_rs esz=2 +MOVA_tz 11000000 11 00000 0 v:1 .. pg:3 zr:5 0 za:3 off:1 \ + &mova_p rs=%mova_rs esz=3 +MOVA_tz 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za:4 \ + &mova_p rs=%mova_rs esz=4 off=0 -MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ - &mova to_vec=1 rs=%mova_rs -MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ - &mova to_vec=1 rs=%mova_rs esz=4 +MOVA_zt 11000000 00 00001 0 v:1 .. pg:3 0 off:4 zr:5 \ + &mova_p rs=%mova_rs esz=0 za=0 +MOVA_zt 11000000 01 00001 0 v:1 .. pg:3 0 za:1 off:3 zr:5 \ + &mova_p rs=%mova_rs esz=1 +MOVA_zt 11000000 10 00001 0 v:1 .. pg:3 0 za:2 off:2 zr:5 \ + &mova_p rs=%mova_rs esz=2 +MOVA_zt 11000000 11 00001 0 v:1 .. pg:3 0 za:3 off:1 zr:5 \ + &mova_p rs=%mova_rs esz=3 +MOVA_zt 11000000 11 00001 1 v:1 .. pg:3 0 za:4 zr:5 \ + &mova_p rs=%mova_rs esz=4 off=0 + +MOVA_tz2 11000000 00 00010 0 v:1 .. 000 zr:4 0 00 off:3 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVA_tz2 11000000 01 00010 0 v:1 .. 000 zr:4 0 00 za:1 off:2 \ + &mova_t rs=%mova_rs esz=1 +MOVA_tz2 11000000 10 00010 0 v:1 .. 000 zr:4 0 00 za:2 off:1 \ + &mova_t rs=%mova_rs esz=2 +MOVA_tz2 11000000 11 00010 0 v:1 .. 000 zr:4 0 00 za:3 \ + &mova_t rs=%mova_rs esz=3 off=0 + +MOVA_zt2 11000000 00 00011 0 v:1 .. 000 00 off:3 zr:4 0 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVA_zt2 11000000 01 00011 0 v:1 .. 000 00 za:1 off:2 zr:4 0 \ + &mova_t rs=%mova_rs esz=1 +MOVA_zt2 11000000 10 00011 0 v:1 .. 000 00 za:2 off:1 zr:4 0 \ + &mova_t rs=%mova_rs esz=2 +MOVA_zt2 11000000 11 00011 0 v:1 .. 000 00 za:3 zr:4 0 \ + &mova_t rs=%mova_rs esz=3 off=0 + +MOVA_tz4 11000000 00 00010 0 v:1 .. 001 zr:3 00 000 off:2 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVA_tz4 11000000 01 00010 0 v:1 .. 001 zr:3 00 000 za:1 off:1 \ + &mova_t rs=%mova_rs esz=1 +MOVA_tz4 11000000 10 00010 0 v:1 .. 001 zr:3 00 000 za:2 \ + &mova_t rs=%mova_rs esz=2 off=0 +MOVA_tz4 11000000 11 00010 0 v:1 .. 001 zr:3 00 00 za:3 \ + &mova_t rs=%mova_rs esz=3 off=0 + +MOVA_zt4 11000000 00 00011 0 v:1 .. 001 000 off:2 zr:3 00 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVA_zt4 11000000 01 00011 0 v:1 .. 001 000 za:1 off:1 zr:3 00 \ + &mova_t rs=%mova_rs esz=1 +MOVA_zt4 11000000 10 00011 0 v:1 .. 001 000 za:2 zr:3 00 \ + &mova_t rs=%mova_rs esz=2 off=0 +MOVA_zt4 11000000 11 00011 0 v:1 .. 001 00 za:3 zr:3 00 \ + &mova_t rs=%mova_rs esz=3 off=0 + +MOVA_az2 11000000 00 00010 00 .. 010 zr:4 000 off:3 \ + &mova_a rv=%mova_rv +MOVA_az4 11000000 00 00010 00 .. 011 zr:3 0000 off:3 \ + &mova_a rv=%mova_rv + +MOVA_za2 11000000 00 00011 00 .. 010 00 off:3 zr:4 0 \ + &mova_a rv=%mova_rv +MOVA_za4 11000000 00 00011 00 .. 011 00 off:3 zr:3 00 \ + &mova_a rv=%mova_rv + +### SME Move and Zero + +MOVAZ_za2 11000000 00000110 0 .. 01010 off:3 zr:4 0 \ + &mova_a rv=%mova_rv +MOVAZ_za4 11000000 00000110 0 .. 01110 off:3 zr:3 00 \ + &mova_a rv=%mova_rv + +MOVAZ_zt 11000000 00 00001 0 v:1 .. 0001 off:4 zr:5 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVAZ_zt 11000000 01 00001 0 v:1 .. 0001 za:1 off:3 zr:5 \ + &mova_t rs=%mova_rs esz=1 +MOVAZ_zt 11000000 10 00001 0 v:1 .. 0001 za:2 off:2 zr:5 \ + &mova_t rs=%mova_rs esz=2 +MOVAZ_zt 11000000 11 00001 0 v:1 .. 0001 za:3 off:1 zr:5 \ + &mova_t rs=%mova_rs esz=3 +MOVAZ_zt 11000000 11 00001 1 v:1 .. 0001 za:4 zr:5 \ + &mova_t rs=%mova_rs esz=4 off=0 + +MOVAZ_zt2 11000000 00 00011 0 v:1 .. 00010 off:3 zr:4 0 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVAZ_zt2 11000000 01 00011 0 v:1 .. 00010 za:1 off:2 zr:4 0 \ + &mova_t rs=%mova_rs esz=1 +MOVAZ_zt2 11000000 10 00011 0 v:1 .. 00010 za:2 off:1 zr:4 0 \ + &mova_t rs=%mova_rs esz=2 +MOVAZ_zt2 11000000 11 00011 0 v:1 .. 00010 za:3 zr:4 0 \ + &mova_t rs=%mova_rs esz=3 off=0 + +MOVAZ_zt4 11000000 00 00011 0 v:1 .. 001100 off:2 zr:3 00 \ + &mova_t rs=%mova_rs esz=0 za=0 +MOVAZ_zt4 11000000 01 00011 0 v:1 .. 001100 za:1 off:1 zr:3 00 \ + &mova_t rs=%mova_rs esz=1 +MOVAZ_zt4 11000000 10 00011 0 v:1 .. 001100 za:2 zr:3 00 \ + &mova_t rs=%mova_rs esz=2 off=0 +MOVAZ_zt4 11000000 11 00011 0 v:1 .. 00110 za:3 zr:3 00 \ + &mova_t rs=%mova_rs esz=3 off=0 + +### SME Move into/from ZT0 + +MOVT_rzt 1100 0000 0100 1100 0 off:3 00 11111 rt:5 +MOVT_ztr 1100 0000 0100 1110 0 off:3 00 11111 rt:5 ### SME Memory -&ldst esz rs pg rn rm za_imm v:bool st:bool +&ldst esz rs pg rn rm za off v:bool st:bool -LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ - &ldst rs=%mova_rs -LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ - &ldst esz=4 rs=%mova_rs +LDST1 1110000 0 00 st:1 rm:5 v:1 .. pg:3 rn:5 0 off:4 \ + &ldst rs=%mova_rs esz=0 za=0 +LDST1 1110000 0 01 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:1 off:3 \ + &ldst rs=%mova_rs esz=1 +LDST1 1110000 0 10 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:2 off:2 \ + &ldst rs=%mova_rs esz=2 +LDST1 1110000 0 11 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:3 off:1 \ + &ldst rs=%mova_rs esz=3 +LDST1 1110000 1 11 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:4 \ + &ldst rs=%mova_rs esz=4 off=0 &ldstr rv rn imm @ldstr ....... ... . ...... .. ... rn:5 . imm:4 \ @@ -54,6 +163,12 @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr +&ldstzt0 rn +@ldstzt0 ....... ... . ...... .. ... rn:5 ..... &ldstzt0 + +LDR_zt0 1110000 100 0 111111 00 000 ..... 00000 @ldstzt0 +STR_zt0 1110000 100 1 111111 00 000 ..... 00000 @ldstzt0 + ### SME Add Vector to Array &adda zad zn pm pn @@ -68,14 +183,18 @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 ### SME Outer Product &op zad zn zm pm pn sub:bool +@op_16 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 ... zad:1 &op @op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op @op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op +FMOPA_h 10000001 100 ..... ... ... ..... . 100 . @op_16 FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 -BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 -FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 +BFMOPA 10000001 101 ..... ... ... ..... . 100 . @op_16 + +BFMOPA_w 10000001 100 ..... ... ... ..... . 00 .. @op_32 +FMOPA_w_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32 SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32 @@ -86,3 +205,789 @@ SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64 SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64 USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64 UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64 + +BMOPA 1000000 0 10 0 ..... ... ... ..... . 10 .. @op_32 +SMOPA2_s 1010000 0 10 0 ..... ... ... ..... . 10 .. @op_32 +UMOPA2_s 1010000 1 10 0 ..... ... ... ..... . 10 .. @op_32 + +### SME2 Multi-vector Multiple and Single SVE Destructive + +%zd_ax2 1:4 !function=times_2 +%zd_ax4 2:3 !function=times_4 + +&z2z_en zdn zm esz n +@z2z_2x1 ....... . esz:2 .. zm:4 ....0. ..... .... . \ + &z2z_en n=2 zdn=%zd_ax2 +@z2z_4x1 ....... . esz:2 .. zm:4 ....1. ..... ...0 . \ + &z2z_en n=4 zdn=%zd_ax4 + +SMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 0 @z2z_2x1 +SMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 0 @z2z_4x1 +UMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 1 @z2z_2x1 +UMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 1 @z2z_4x1 +SMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 0 @z2z_2x1 +SMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 0 @z2z_4x1 +UMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 1 @z2z_2x1 +UMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 1 @z2z_4x1 + +FMAX_n1 1100000 1 .. 10 .... 1010.0 01000 .... 0 @z2z_2x1 +FMAX_n1 1100000 1 .. 10 .... 1010.0 01000 .... 0 @z2z_4x1 +FMIN_n1 1100000 1 .. 10 .... 1010.0 01000 .... 1 @z2z_2x1 +FMIN_n1 1100000 1 .. 10 .... 1010.0 01000 .... 1 @z2z_4x1 +FMAXNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 0 @z2z_2x1 +FMAXNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 0 @z2z_4x1 +FMINNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 1 @z2z_2x1 +FMINNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 1 @z2z_4x1 + +SRSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 0 @z2z_2x1 +SRSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 0 @z2z_4x1 +URSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 1 @z2z_2x1 +URSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 1 @z2z_4x1 + +ADD_n1 1100000 1 .. 10 .... 1010.0 11000 .... 0 @z2z_2x1 +ADD_n1 1100000 1 .. 10 .... 1010.0 11000 .... 0 @z2z_4x1 + +SQDMULH_n1 1100000 1 .. 10 .... 1010.1 00000 .... 0 @z2z_2x1 +SQDMULH_n1 1100000 1 .. 10 .... 1010.1 00000 .... 0 @z2z_4x1 + +### SME2 Multi-vector Multiple Vectors SVE Destructive + +%zm_ax2 17:4 !function=times_2 +%zm_ax4 18:3 !function=times_4 + +@z2z_2x2 ....... . esz:2 . ....0 ....0. ..... .... . \ + &z2z_en n=2 zdn=%zd_ax2 zm=%zm_ax2 +@z2z_4x4 ....... . esz:2 . ...00 ....1. ..... ...0 . \ + &z2z_en n=4 zdn=%zd_ax4 zm=%zm_ax4 + +SMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 0 @z2z_2x2 +SMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 0 @z2z_4x4 +UMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 1 @z2z_2x2 +UMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 1 @z2z_4x4 +SMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 0 @z2z_2x2 +SMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 0 @z2z_4x4 +UMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 1 @z2z_2x2 +UMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 1 @z2z_4x4 + +FMAX_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 0 @z2z_2x2 +FMAX_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 0 @z2z_4x4 +FMIN_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 1 @z2z_2x2 +FMIN_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 1 @z2z_4x4 +FMAXNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 0 @z2z_2x2 +FMAXNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 0 @z2z_4x4 +FMINNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 1 @z2z_2x2 +FMINNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 1 @z2z_4x4 + +SRSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 0 @z2z_2x2 +SRSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 0 @z2z_4x4 +URSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 1 @z2z_2x2 +URSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 1 @z2z_4x4 + +SQDMULH_nn 1100000 1 .. 1 ..... 1011.1 00000 .... 0 @z2z_2x2 +SQDMULH_nn 1100000 1 .. 1 ..... 1011.1 00000 .... 0 @z2z_4x4 + +### SME2 Multi-vector Multiple and Single Array Vectors + +&azz_n n off rv zn zm +@azz_nx1_o3 ........ .... zm:4 ...... zn:5 .. off:3 &azz_n rv=%mova_rv + +ADD_azz_n1_s 11000001 0010 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=2 +ADD_azz_n1_s 11000001 0011 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=4 +ADD_azz_n1_d 11000001 0110 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=2 +ADD_azz_n1_d 11000001 0111 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=4 + +SUB_azz_n1_s 11000001 0010 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=2 +SUB_azz_n1_s 11000001 0011 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=4 +SUB_azz_n1_d 11000001 0110 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=2 +SUB_azz_n1_d 11000001 0111 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=4 + +%off3_x2 0:3 !function=times_2 +%off2_x2 0:2 !function=times_2 + +@azz_nx1_o3x2 ........ ... . zm:4 . .. ... zn:5 .. ... \ + &azz_n off=%off3_x2 rv=%mova_rv +@azz_nx1_o2x2 ........ ... . zm:4 . .. ... zn:5 ... .. \ + &azz_n off=%off2_x2 rv=%mova_rv + +FMLAL_n1 11000001 001 0 .... 0 .. 011 ..... 00 ... @azz_nx1_o3x2 n=1 +FMLAL_n1 11000001 001 0 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=2 +FMLAL_n1 11000001 001 1 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=4 + +FMLSL_n1 11000001 001 0 .... 0 .. 011 ..... 01 ... @azz_nx1_o3x2 n=1 +FMLSL_n1 11000001 001 0 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=2 +FMLSL_n1 11000001 001 1 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=4 + +BFMLAL_n1 11000001 001 0 .... 0 .. 011 ..... 10 ... @azz_nx1_o3x2 n=1 +BFMLAL_n1 11000001 001 0 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=2 +BFMLAL_n1 11000001 001 1 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=4 + +BFMLSL_n1 11000001 001 0 .... 0 .. 011 ..... 11 ... @azz_nx1_o3x2 n=1 +BFMLSL_n1 11000001 001 0 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=2 +BFMLSL_n1 11000001 001 1 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=4 + +FDOT_n1 11000001 001 0 .... 0 .. 100 ..... 00 ... @azz_nx1_o3 n=2 +FDOT_n1 11000001 001 1 .... 0 .. 100 ..... 00 ... @azz_nx1_o3 n=4 + +BFDOT_n1 11000001 001 0 .... 0 .. 100 ..... 10 ... @azz_nx1_o3 n=2 +BFDOT_n1 11000001 001 1 .... 0 .. 100 ..... 10 ... @azz_nx1_o3 n=4 + +USDOT_n1 11000001 001 0 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=2 +USDOT_n1 11000001 001 1 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=4 + +SUDOT_n1 11000001 001 0 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=2 +SUDOT_n1 11000001 001 1 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=4 + +SDOT_n1_4b 11000001 001 0 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=2 +SDOT_n1_4b 11000001 001 1 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=4 +SDOT_n1_4h 11000001 011 0 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=2 +SDOT_n1_4h 11000001 011 1 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=4 +SDOT_n1_2h 11000001 011 0 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=2 +SDOT_n1_2h 11000001 011 1 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=4 + +UDOT_n1_4b 11000001 001 0 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=2 +UDOT_n1_4b 11000001 001 1 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=4 +UDOT_n1_4h 11000001 011 0 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=2 +UDOT_n1_4h 11000001 011 1 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=4 +UDOT_n1_2h 11000001 011 0 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=2 +UDOT_n1_2h 11000001 011 1 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=4 + +SMLAL_n1 11000001 011 0 .... 0 .. 011 ..... 00 ... @azz_nx1_o3x2 n=1 +SMLAL_n1 11000001 011 0 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=2 +SMLAL_n1 11000001 011 1 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=4 + +SMLSL_n1 11000001 011 0 .... 0 .. 011 ..... 01 ... @azz_nx1_o3x2 n=1 +SMLSL_n1 11000001 011 0 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=2 +SMLSL_n1 11000001 011 1 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=4 + +UMLAL_n1 11000001 011 0 .... 0 .. 011 ..... 10 ... @azz_nx1_o3x2 n=1 +UMLAL_n1 11000001 011 0 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=2 +UMLAL_n1 11000001 011 1 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=4 + +UMLSL_n1 11000001 011 0 .... 0 .. 011 ..... 11 ... @azz_nx1_o3x2 n=1 +UMLSL_n1 11000001 011 0 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=2 +UMLSL_n1 11000001 011 1 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=4 + +%off2_x4 0:2 !function=times_4 +%off1_x4 0:1 !function=times_4 + +@azz_nx1_o2x4 ........ ... . zm:4 . .. ... zn:5 ... .. \ + &azz_n off=%off2_x4 rv=%mova_rv +@azz_nx1_o1x4 ........ ... . zm:4 . .. ... zn:5 .... . \ + &azz_n off=%off1_x4 rv=%mova_rv + +SMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 000 .. @azz_nx1_o2x4 n=1 +SMLALL_n1_d 11000001 011 0 .... 0 .. 001 ..... 000 .. @azz_nx1_o2x4 n=1 +SMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=2 +SMLALL_n1_d 11000001 011 0 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=2 +SMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=4 +SMLALL_n1_d 11000001 011 1 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=4 + +SMLSLL_n1_s 11000001 001 0 .... 0 .. 001 ..... 010 .. @azz_nx1_o2x4 n=1 +SMLSLL_n1_d 11000001 011 0 .... 0 .. 001 ..... 010 .. @azz_nx1_o2x4 n=1 +SMLSLL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=2 +SMLSLL_n1_d 11000001 011 0 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=2 +SMLSLL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=4 +SMLSLL_n1_d 11000001 011 1 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=4 + +UMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 100 .. @azz_nx1_o2x4 n=1 +UMLALL_n1_d 11000001 011 0 .... 0 .. 001 ..... 100 .. @azz_nx1_o2x4 n=1 +UMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=2 +UMLALL_n1_d 11000001 011 0 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=2 +UMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=4 +UMLALL_n1_d 11000001 011 1 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=4 + +UMLSLL_n1_s 11000001 001 0 .... 0 .. 001 ..... 110 .. @azz_nx1_o2x4 n=1 +UMLSLL_n1_d 11000001 011 0 .... 0 .. 001 ..... 110 .. @azz_nx1_o2x4 n=1 +UMLSLL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=2 +UMLSLL_n1_d 11000001 011 0 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=2 +UMLSLL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=4 +UMLSLL_n1_d 11000001 011 1 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=4 + +USMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 001 .. @azz_nx1_o2x4 n=1 +USMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0010 . @azz_nx1_o1x4 n=2 +USMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0010 . @azz_nx1_o1x4 n=4 + +SUMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1010 . @azz_nx1_o1x4 n=2 +SUMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1010 . @azz_nx1_o1x4 n=4 + +BFMLA_n1 11000001 011 0 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=2 +FMLA_n1_h 11000001 001 0 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=2 +FMLA_n1_s 11000001 001 0 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=2 +FMLA_n1_d 11000001 011 0 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=2 + +BFMLA_n1 11000001 011 1 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=4 +FMLA_n1_h 11000001 001 1 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=4 +FMLA_n1_s 11000001 001 1 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=4 +FMLA_n1_d 11000001 011 1 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=4 + +BFMLS_n1 11000001 011 0 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=2 +FMLS_n1_h 11000001 001 0 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=2 +FMLS_n1_s 11000001 001 0 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=2 +FMLS_n1_d 11000001 011 0 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=2 + +BFMLS_n1 11000001 011 1 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=4 +FMLS_n1_h 11000001 001 1 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=4 +FMLS_n1_s 11000001 001 1 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=4 +FMLS_n1_d 11000001 011 1 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=4 + +### SME2 Multi-vector Multiple Array Vectors + +%zn_ax2 6:4 !function=times_2 +%zn_ax4 7:3 !function=times_4 + +@azz_2x2_o3 ........ ... ..... . .. ... ..... .. off:3 \ + &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2 +@azz_4x4_o3 ........ ... ..... . .. ... ..... .. off:3 \ + &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4 + +ADD_azz_nn_s 11000001 101 ....0 0 .. 110 ....0 10 ... @azz_2x2_o3 +ADD_azz_nn_s 11000001 101 ...01 0 .. 110 ...00 10 ... @azz_4x4_o3 +ADD_azz_nn_d 11000001 111 ....0 0 .. 110 ....0 10 ... @azz_2x2_o3 +ADD_azz_nn_d 11000001 111 ...01 0 .. 110 ...00 10 ... @azz_4x4_o3 + +SUB_azz_nn_s 11000001 101 ....0 0 .. 110 ....0 11 ... @azz_2x2_o3 +SUB_azz_nn_s 11000001 101 ...01 0 .. 110 ...00 11 ... @azz_4x4_o3 +SUB_azz_nn_d 11000001 111 ....0 0 .. 110 ....0 11 ... @azz_2x2_o3 +SUB_azz_nn_d 11000001 111 ...01 0 .. 110 ...00 11 ... @azz_4x4_o3 + +@azz_2x2_o2x2 ........ ... ..... . .. ... ..... ... .. \ + &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2 off=%off2_x2 +@azz_4x4_o2x2 ........ ... ..... . .. ... ..... ... .. \ + &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4 off=%off2_x2 + +FMLAL_nn 11000001 101 ....0 0 .. 010 ....0 000 .. @azz_2x2_o2x2 +FMLAL_nn 11000001 101 ...01 0 .. 010 ...00 000 .. @azz_4x4_o2x2 + +FMLSL_nn 11000001 101 ....0 0 .. 010 ....0 010 .. @azz_2x2_o2x2 +FMLSL_nn 11000001 101 ...01 0 .. 010 ...00 010 .. @azz_4x4_o2x2 + +BFMLAL_nn 11000001 101 ....0 0 .. 010 ....0 100 .. @azz_2x2_o2x2 +BFMLAL_nn 11000001 101 ...01 0 .. 010 ...00 100 .. @azz_4x4_o2x2 + +BFMLSL_nn 11000001 101 ....0 0 .. 010 ....0 110 .. @azz_2x2_o2x2 +BFMLSL_nn 11000001 101 ...01 0 .. 010 ...00 110 .. @azz_4x4_o2x2 + +FDOT_nn 11000001 101 ....0 0 .. 100 ....0 00 ... @azz_2x2_o3 +FDOT_nn 11000001 101 ...01 0 .. 100 ...00 00 ... @azz_4x4_o3 + +BFDOT_nn 11000001 101 ....0 0 .. 100 ....0 10 ... @azz_2x2_o3 +BFDOT_nn 11000001 101 ...01 0 .. 100 ...00 10 ... @azz_4x4_o3 + +USDOT_nn 11000001 101 ....0 0 .. 101 ....0 01 ... @azz_2x2_o3 +USDOT_nn 11000001 101 ...01 0 .. 101 ...00 01 ... @azz_4x4_o3 + +SDOT_nn_4b 11000001 101 ....0 0 .. 101 ....0 00 ... @azz_2x2_o3 +SDOT_nn_4b 11000001 101 ...01 0 .. 101 ...00 00 ... @azz_4x4_o3 +SDOT_nn_4h 11000001 111 ....0 0 .. 101 ....0 00 ... @azz_2x2_o3 +SDOT_nn_4h 11000001 111 ...01 0 .. 101 ...00 00 ... @azz_4x4_o3 +SDOT_nn_2h 11000001 111 ....0 0 .. 101 ....0 01 ... @azz_2x2_o3 +SDOT_nn_2h 11000001 111 ...01 0 .. 101 ...00 01 ... @azz_4x4_o3 + +UDOT_nn_4b 11000001 101 ....0 0 .. 101 ....0 10 ... @azz_2x2_o3 +UDOT_nn_4b 11000001 101 ...01 0 .. 101 ...00 10 ... @azz_4x4_o3 +UDOT_nn_4h 11000001 111 ....0 0 .. 101 ....0 10 ... @azz_2x2_o3 +UDOT_nn_4h 11000001 111 ...01 0 .. 101 ...00 10 ... @azz_4x4_o3 +UDOT_nn_2h 11000001 111 ....0 0 .. 101 ....0 11 ... @azz_2x2_o3 +UDOT_nn_2h 11000001 111 ...01 0 .. 101 ...00 11 ... @azz_4x4_o3 + +SMLAL_nn 11000001 111 ....0 0 .. 010 ....0 000 .. @azz_2x2_o2x2 +SMLAL_nn 11000001 111 ...01 0 .. 010 ...00 000 .. @azz_4x4_o2x2 + +SMLSL_nn 11000001 111 ....0 0 .. 010 ....0 010 .. @azz_2x2_o2x2 +SMLSL_nn 11000001 111 ...01 0 .. 010 ...00 010 .. @azz_4x4_o2x2 + +UMLAL_nn 11000001 111 ....0 0 .. 010 ....0 100 .. @azz_2x2_o2x2 +UMLAL_nn 11000001 111 ...01 0 .. 010 ...00 100 .. @azz_4x4_o2x2 + +UMLSL_nn 11000001 111 ....0 0 .. 010 ....0 110 .. @azz_2x2_o2x2 +UMLSL_nn 11000001 111 ...01 0 .. 010 ...00 110 .. @azz_4x4_o2x2 + +@azz_2x2_o1x4 ........ ... ..... . .. ... ..... ... .. \ + &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2 off=%off1_x4 +@azz_4x4_o1x4 ........ ... ..... . .. ... ..... ... .. \ + &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4 off=%off1_x4 + +SMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 0000 . @azz_2x2_o1x4 +SMLALL_nn_d 11000001 111 ....0 0 .. 000 ....0 0000 . @azz_2x2_o1x4 +SMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 0000 . @azz_4x4_o1x4 +SMLALL_nn_d 11000001 111 ...01 0 .. 000 ...00 0000 . @azz_4x4_o1x4 + +SMLSLL_nn_s 11000001 101 ....0 0 .. 000 ....0 0100 . @azz_2x2_o1x4 +SMLSLL_nn_d 11000001 111 ....0 0 .. 000 ....0 0100 . @azz_2x2_o1x4 +SMLSLL_nn_s 11000001 101 ...01 0 .. 000 ...00 0100 . @azz_4x4_o1x4 +SMLSLL_nn_d 11000001 111 ...01 0 .. 000 ...00 0100 . @azz_4x4_o1x4 + +UMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 1000 . @azz_2x2_o1x4 +UMLALL_nn_d 11000001 111 ....0 0 .. 000 ....0 1000 . @azz_2x2_o1x4 +UMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 1000 . @azz_4x4_o1x4 +UMLALL_nn_d 11000001 111 ...01 0 .. 000 ...00 1000 . @azz_4x4_o1x4 + +UMLSLL_nn_s 11000001 101 ....0 0 .. 000 ....0 1100 . @azz_2x2_o1x4 +UMLSLL_nn_d 11000001 111 ....0 0 .. 000 ....0 1100 . @azz_2x2_o1x4 +UMLSLL_nn_s 11000001 101 ...01 0 .. 000 ...00 1100 . @azz_4x4_o1x4 +UMLSLL_nn_d 11000001 111 ...01 0 .. 000 ...00 1100 . @azz_4x4_o1x4 + +USMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 0010 . @azz_2x2_o1x4 +USMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 0010 . @azz_4x4_o1x4 + +BFMLA_nn 11000001 111 ....0 0 .. 100 ....0 01 ... @azz_2x2_o3 +FMLA_nn_h 11000001 101 ....0 0 .. 100 ....0 01 ... @azz_2x2_o3 +FMLA_nn_s 11000001 101 ....0 0 .. 110 ....0 00 ... @azz_2x2_o3 +FMLA_nn_d 11000001 111 ....0 0 .. 110 ....0 00 ... @azz_2x2_o3 + +BFMLA_nn 11000001 111 ...01 0 .. 100 ...00 01 ... @azz_4x4_o3 +FMLA_nn_h 11000001 101 ...01 0 .. 100 ...00 01 ... @azz_4x4_o3 +FMLA_nn_s 11000001 101 ...01 0 .. 110 ...00 00 ... @azz_4x4_o3 +FMLA_nn_d 11000001 111 ...01 0 .. 110 ...00 00 ... @azz_4x4_o3 + +BFMLS_nn 11000001 111 ....0 0 .. 100 ....0 11 ... @azz_2x2_o3 +FMLS_nn_h 11000001 101 ....0 0 .. 100 ....0 11 ... @azz_2x2_o3 +FMLS_nn_s 11000001 101 ....0 0 .. 110 ....0 01 ... @azz_2x2_o3 +FMLS_nn_d 11000001 111 ....0 0 .. 110 ....0 01 ... @azz_2x2_o3 + +BFMLS_nn 11000001 111 ...01 0 .. 100 ...00 11 ... @azz_4x4_o3 +FMLS_nn_h 11000001 101 ...01 0 .. 100 ...00 11 ... @azz_4x4_o3 +FMLS_nn_s 11000001 101 ...01 0 .. 110 ...00 01 ... @azz_4x4_o3 +FMLS_nn_d 11000001 111 ...01 0 .. 110 ...00 01 ... @azz_4x4_o3 + +&az_n n off rv zm +@az_2x2_o3 ........ ... ..... . .. ... ..... .. off:3 \ + &az_n n=2 rv=%mova_rv zm=%zn_ax2 +@az_4x4_o3 ........ ... ..... . .. ... ..... .. off:3 \ + &az_n n=4 rv=%mova_rv zm=%zn_ax4 + +FADD_nn_h 11000001 101 00100 0 .. 111 ....0 00 ... @az_2x2_o3 +FADD_nn_s 11000001 101 00000 0 .. 111 ....0 00 ... @az_2x2_o3 +FADD_nn_d 11000001 111 00000 0 .. 111 ....0 00 ... @az_2x2_o3 +FADD_nn_h 11000001 101 00101 0 .. 111 ...00 00 ... @az_4x4_o3 +FADD_nn_s 11000001 101 00001 0 .. 111 ...00 00 ... @az_4x4_o3 +FADD_nn_d 11000001 111 00001 0 .. 111 ...00 00 ... @az_4x4_o3 + +FSUB_nn_h 11000001 101 00100 0 .. 111 ....0 01 ... @az_2x2_o3 +FSUB_nn_s 11000001 101 00000 0 .. 111 ....0 01 ... @az_2x2_o3 +FSUB_nn_d 11000001 111 00000 0 .. 111 ....0 01 ... @az_2x2_o3 +FSUB_nn_h 11000001 101 00101 0 .. 111 ...00 01 ... @az_4x4_o3 +FSUB_nn_s 11000001 101 00001 0 .. 111 ...00 01 ... @az_4x4_o3 +FSUB_nn_d 11000001 111 00001 0 .. 111 ...00 01 ... @az_4x4_o3 + +BFADD_nn 11000001 111 00100 0 .. 111 ....0 00 ... @az_2x2_o3 +BFADD_nn 11000001 111 00101 0 .. 111 ...00 00 ... @az_4x4_o3 +BFSUB_nn 11000001 111 00100 0 .. 111 ....0 01 ... @az_2x2_o3 +BFSUB_nn 11000001 111 00101 0 .. 111 ...00 01 ... @az_4x4_o3 + +### SME2 Multi-vector Indexed + +&azx_n n off rv zn zm idx + +%idx3_15_10 15:1 10:2 +%idx2_10_2 10:2 2:1 + +@azx_1x1_o3x2 ........ .... zm:4 . .. . .. zn:5 .. ... \ + &azx_n n=1 rv=%mova_rv off=%off3_x2 idx=%idx3_15_10 +@azx_2x1_o2x2 ........ .... zm:4 . .. . .. ..... .. ... \ + &azx_n n=2 rv=%mova_rv off=%off2_x2 zn=%zn_ax2 idx=%idx2_10_2 +@azx_4x1_o2x2 ........ .... zm:4 . .. . .. ..... .. ... \ + &azx_n n=4 rv=%mova_rv off=%off2_x2 zn=%zn_ax4 idx=%idx2_10_2 + +FMLAL_nx 11000001 1000 .... . .. 1 .. ..... 00 ... @azx_1x1_o3x2 +FMLAL_nx 11000001 1001 .... 0 .. 1 .. ....0 00 ... @azx_2x1_o2x2 +FMLAL_nx 11000001 1001 .... 1 .. 1 .. ...00 00 ... @azx_4x1_o2x2 + +FMLSL_nx 11000001 1000 .... . .. 1 .. ..... 01 ... @azx_1x1_o3x2 +FMLSL_nx 11000001 1001 .... 0 .. 1 .. ....0 01 ... @azx_2x1_o2x2 +FMLSL_nx 11000001 1001 .... 1 .. 1 .. ...00 01 ... @azx_4x1_o2x2 + +BFMLAL_nx 11000001 1000 .... . .. 1 .. ..... 10 ... @azx_1x1_o3x2 +BFMLAL_nx 11000001 1001 .... 0 .. 1 .. ....0 10 ... @azx_2x1_o2x2 +BFMLAL_nx 11000001 1001 .... 1 .. 1 .. ...00 10 ... @azx_4x1_o2x2 + +BFMLSL_nx 11000001 1000 .... . .. 1 .. ..... 11 ... @azx_1x1_o3x2 +BFMLSL_nx 11000001 1001 .... 0 .. 1 .. ....0 11 ... @azx_2x1_o2x2 +BFMLSL_nx 11000001 1001 .... 1 .. 1 .. ...00 11 ... @azx_4x1_o2x2 + +@azx_2x1_i2_o3 ........ .... zm:4 . .. . idx:2 .... ... off:3 \ + &azx_n n=2 rv=%mova_rv zn=%zn_ax2 +@azx_4x1_i2_o3 ........ .... zm:4 . .. . idx:2 .... ... off:3 \ + &azx_n n=4 rv=%mova_rv zn=%zn_ax4 +@azx_2x1_i1_o3 ........ .... zm:4 . .. .. idx:1 .... ... off:3 \ + &azx_n n=2 rv=%mova_rv zn=%zn_ax2 +@azx_4x1_i1_o3 ........ .... zm:4 . .. .. idx:1 .... ... off:3 \ + &azx_n n=4 rv=%mova_rv zn=%zn_ax4 + +FDOT_nx 11000001 0101 .... 0 .. 1 .. ....0 01 ... @azx_2x1_i2_o3 +FDOT_nx 11000001 0101 .... 1 .. 1 .. ...00 01 ... @azx_4x1_i2_o3 + +BFDOT_nx 11000001 0101 .... 0 .. 1 .. ....0 11 ... @azx_2x1_i2_o3 +BFDOT_nx 11000001 0101 .... 1 .. 1 .. ...00 11 ... @azx_4x1_i2_o3 + +FVDOT 11000001 0101 .... 0 .. 0 .. ....0 01 ... @azx_2x1_i2_o3 +BFVDOT 11000001 0101 .... 0 .. 0 .. ....0 11 ... @azx_2x1_i2_o3 + +SDOT_nx_2h 11000001 0101 .... 0 .. 1 .. ....0 00 ... @azx_2x1_i2_o3 +SDOT_nx_2h 11000001 0101 .... 1 .. 1 .. ...00 00 ... @azx_4x1_i2_o3 +SDOT_nx_4b 11000001 0101 .... 0 .. 1 .. ....1 00 ... @azx_2x1_i2_o3 +SDOT_nx_4b 11000001 0101 .... 1 .. 1 .. ...01 00 ... @azx_4x1_i2_o3 +SDOT_nx_4h 11000001 1101 .... 0 .. 00 . ....0 01 ... @azx_2x1_i1_o3 +SDOT_nx_4h 11000001 1101 .... 1 .. 00 . ...00 01 ... @azx_4x1_i1_o3 + +UDOT_nx_2h 11000001 0101 .... 0 .. 1 .. ....0 10 ... @azx_2x1_i2_o3 +UDOT_nx_2h 11000001 0101 .... 1 .. 1 .. ...00 10 ... @azx_4x1_i2_o3 +UDOT_nx_4b 11000001 0101 .... 0 .. 1 .. ....1 10 ... @azx_2x1_i2_o3 +UDOT_nx_4b 11000001 0101 .... 1 .. 1 .. ...01 10 ... @azx_4x1_i2_o3 +UDOT_nx_4h 11000001 1101 .... 0 .. 00 . ....0 11 ... @azx_2x1_i1_o3 +UDOT_nx_4h 11000001 1101 .... 1 .. 00 . ...00 11 ... @azx_4x1_i1_o3 + +USDOT_nx 11000001 0101 .... 0 .. 1 .. ....1 01 ... @azx_2x1_i2_o3 +USDOT_nx 11000001 0101 .... 1 .. 1 .. ...01 01 ... @azx_4x1_i2_o3 + +SUDOT_nx 11000001 0101 .... 0 .. 1 .. ....1 11 ... @azx_2x1_i2_o3 +SUDOT_nx 11000001 0101 .... 1 .. 1 .. ...01 11 ... @azx_4x1_i2_o3 + +SVDOT_nx_2h 11000001 0101 .... 0 .. 0 .. ....1 00 ... @azx_2x1_i2_o3 +SVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 00 ... @azx_4x1_i2_o3 +SVDOT_nx_4h 11000001 1101 .... 1 .. 01 . ...00 01 ... @azx_4x1_i1_o3 + +UVDOT_nx_2h 11000001 0101 .... 0 .. 0 .. ....1 10 ... @azx_2x1_i2_o3 +UVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 10 ... @azx_4x1_i2_o3 +UVDOT_nx_4h 11000001 1101 .... 1 .. 01 . ...00 11 ... @azx_4x1_i1_o3 + +SUVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 11 ... @azx_4x1_i2_o3 +USVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 01 ... @azx_4x1_i2_o3 + +SMLAL_nx 11000001 1100 .... . .. 1 .. ..... 00 ... @azx_1x1_o3x2 +SMLAL_nx 11000001 1101 .... 0 .. 1 .. ....0 00 ... @azx_2x1_o2x2 +SMLAL_nx 11000001 1101 .... 1 .. 1 .. ...00 00 ... @azx_4x1_o2x2 + +SMLSL_nx 11000001 1100 .... . .. 1 .. ..... 01 ... @azx_1x1_o3x2 +SMLSL_nx 11000001 1101 .... 0 .. 1 .. ....0 01 ... @azx_2x1_o2x2 +SMLSL_nx 11000001 1101 .... 1 .. 1 .. ...00 01 ... @azx_4x1_o2x2 + +UMLAL_nx 11000001 1100 .... . .. 1 .. ..... 10 ... @azx_1x1_o3x2 +UMLAL_nx 11000001 1101 .... 0 .. 1 .. ....0 10 ... @azx_2x1_o2x2 +UMLAL_nx 11000001 1101 .... 1 .. 1 .. ...00 10 ... @azx_4x1_o2x2 + +UMLSL_nx 11000001 1100 .... . .. 1 .. ..... 11 ... @azx_1x1_o3x2 +UMLSL_nx 11000001 1101 .... 0 .. 1 .. ....0 11 ... @azx_2x1_o2x2 +UMLSL_nx 11000001 1101 .... 1 .. 1 .. ...00 11 ... @azx_4x1_o2x2 + +%idx4_15_10 15:1 10:3 +%idx4_10_1 10:2 1:2 +%idx3_10_1 10:1 1:2 + +@azx_1x1_i4_o2 ........ .... zm:4 . .. ... zn:5 ... .. \ + &azx_n n=1 rv=%mova_rv off=%off2_x4 idx=%idx4_15_10 +@azx_1x1_i3_o2 ........ .... zm:4 . .. ... zn:5 ... .. \ + &azx_n n=1 rv=%mova_rv off=%off2_x4 idx=%idx3_15_10 +@azx_2x1_i4_o1 ........ .... zm:4 . .. ... ..... ... .. \ + &azx_n n=2 rv=%mova_rv off=%off1_x4 zn=%zn_ax2 idx=%idx4_10_1 +@azx_2x1_i3_o1 ........ .... zm:4 . .. ... ..... ... .. \ + &azx_n n=2 rv=%mova_rv off=%off1_x4 zn=%zn_ax2 idx=%idx3_10_1 +@azx_4x1_i4_o1 ........ .... zm:4 . .. ... ..... ... .. \ + &azx_n n=4 rv=%mova_rv off=%off1_x4 zn=%zn_ax4 idx=%idx4_10_1 +@azx_4x1_i3_o1 ........ .... zm:4 . .. ... ..... ... .. \ + &azx_n n=4 rv=%mova_rv off=%off1_x4 zn=%zn_ax4 idx=%idx3_10_1 + +SMLALL_nx_s 11000001 0000 .... . .. ... ..... 000 .. @azx_1x1_i4_o2 +SMLALL_nx_d 11000001 1000 .... . .. 0.. ..... 000 .. @azx_1x1_i3_o2 +SMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....0 00 ... @azx_2x1_i4_o1 +SMLALL_nx_d 11000001 1001 .... 0 .. 00. ....0 00 ... @azx_2x1_i3_o1 +SMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...00 00 ... @azx_4x1_i4_o1 +SMLALL_nx_d 11000001 1001 .... 1 .. 00. ...00 00 ... @azx_4x1_i3_o1 + +SMLSLL_nx_s 11000001 0000 .... . .. ... ..... 010 .. @azx_1x1_i4_o2 +SMLSLL_nx_d 11000001 1000 .... . .. 0.. ..... 010 .. @azx_1x1_i3_o2 +SMLSLL_nx_s 11000001 0001 .... 0 .. 0.. ....0 01 ... @azx_2x1_i4_o1 +SMLSLL_nx_d 11000001 1001 .... 0 .. 00. ....0 01 ... @azx_2x1_i3_o1 +SMLSLL_nx_s 11000001 0001 .... 1 .. 0.. ...00 01 ... @azx_4x1_i4_o1 +SMLSLL_nx_d 11000001 1001 .... 1 .. 00. ...00 01 ... @azx_4x1_i3_o1 + +UMLALL_nx_s 11000001 0000 .... . .. ... ..... 100 .. @azx_1x1_i4_o2 +UMLALL_nx_d 11000001 1000 .... . .. 0.. ..... 100 .. @azx_1x1_i3_o2 +UMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....0 10 ... @azx_2x1_i4_o1 +UMLALL_nx_d 11000001 1001 .... 0 .. 00. ....0 10 ... @azx_2x1_i3_o1 +UMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...00 10 ... @azx_4x1_i4_o1 +UMLALL_nx_d 11000001 1001 .... 1 .. 00. ...00 10 ... @azx_4x1_i3_o1 + +UMLSLL_nx_s 11000001 0000 .... . .. ... ..... 110 .. @azx_1x1_i4_o2 +UMLSLL_nx_d 11000001 1000 .... . .. 0.. ..... 110 .. @azx_1x1_i3_o2 +UMLSLL_nx_s 11000001 0001 .... 0 .. 0.. ....0 11 ... @azx_2x1_i4_o1 +UMLSLL_nx_d 11000001 1001 .... 0 .. 00. ....0 11 ... @azx_2x1_i3_o1 +UMLSLL_nx_s 11000001 0001 .... 1 .. 0.. ...00 11 ... @azx_4x1_i4_o1 +UMLSLL_nx_d 11000001 1001 .... 1 .. 00. ...00 11 ... @azx_4x1_i3_o1 + +USMLALL_nx_s 11000001 0000 .... . .. ... ..... 001 .. @azx_1x1_i4_o2 +USMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....1 00 ... @azx_2x1_i4_o1 +USMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...01 00 ... @azx_4x1_i4_o1 + +SUMLALL_nx_s 11000001 0000 .... . .. ... ..... 101 .. @azx_1x1_i4_o2 +SUMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....1 10 ... @azx_2x1_i4_o1 +SUMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...01 10 ... @azx_4x1_i4_o1 + +%idx3_10_3 10:2 3:1 +@azx_2x1_i3_o3 ........ .... zm:4 . .. ... ..... .. off:3 \ + &azx_n n=2 rv=%mova_rv zn=%zn_ax2 idx=%idx3_10_3 +@azx_4x1_i3_o3 ........ .... zm:4 . .. ... ..... .. off:3 \ + &azx_n n=4 rv=%mova_rv zn=%zn_ax4 idx=%idx3_10_3 + +BFMLA_nx 11000001 0001 .... 0 .. 1.. ....1 0 .... @azx_2x1_i3_o3 +FMLA_nx_h 11000001 0001 .... 0 .. 1.. ....0 0 .... @azx_2x1_i3_o3 +FMLA_nx_s 11000001 0101 .... 0 .. 0.. ....0 00 ... @azx_2x1_i2_o3 +FMLA_nx_d 11000001 1101 .... 0 .. 00. ....0 00 ... @azx_2x1_i1_o3 + +BFMLA_nx 11000001 0001 .... 1 .. 1.. ...01 0 .... @azx_4x1_i3_o3 +FMLA_nx_h 11000001 0001 .... 1 .. 1.. ...00 0 .... @azx_4x1_i3_o3 +FMLA_nx_s 11000001 0101 .... 1 .. 0.. ...00 00 ... @azx_4x1_i2_o3 +FMLA_nx_d 11000001 1101 .... 1 .. 00. ...00 00 ... @azx_4x1_i1_o3 + +BFMLS_nx 11000001 0001 .... 0 .. 1.. ....1 1 .... @azx_2x1_i3_o3 +FMLS_nx_h 11000001 0001 .... 0 .. 1.. ....0 1 .... @azx_2x1_i3_o3 +FMLS_nx_s 11000001 0101 .... 0 .. 0.. ....0 10 ... @azx_2x1_i2_o3 +FMLS_nx_d 11000001 1101 .... 0 .. 00. ....0 10 ... @azx_2x1_i1_o3 + +BFMLS_nx 11000001 0001 .... 1 .. 1.. ...01 1 .... @azx_4x1_i3_o3 +FMLS_nx_h 11000001 0001 .... 1 .. 1.. ...00 1 .... @azx_4x1_i3_o3 +FMLS_nx_s 11000001 0101 .... 1 .. 0.. ...00 10 ... @azx_4x1_i2_o3 +FMLS_nx_d 11000001 1101 .... 1 .. 00. ...00 10 ... @azx_4x1_i1_o3 + +### SME2 Add / Sub array accumulators + +ADD_aaz_s 11000001 101 000000 .. 111 ....0 10 ... @az_2x2_o3 +ADD_aaz_s 11000001 101 000010 .. 111 ...00 10 ... @az_4x4_o3 +ADD_aaz_d 11000001 111 000000 .. 111 ....0 10 ... @az_2x2_o3 +ADD_aaz_d 11000001 111 000010 .. 111 ...00 10 ... @az_4x4_o3 + +SUB_aaz_s 11000001 101 000000 .. 111 ....0 11 ... @az_2x2_o3 +SUB_aaz_s 11000001 101 000010 .. 111 ...00 11 ... @az_4x4_o3 +SUB_aaz_d 11000001 111 000000 .. 111 ....0 11 ... @az_2x2_o3 +SUB_aaz_d 11000001 111 000010 .. 111 ...00 11 ... @az_4x4_o3 + +### SME2 Multi-vector SVE Constructive Unary + +&zz_e zd zn esz +&zz_n zd zn n +@zz_1x2 ........ ... ..... ...... ..... zd:5 \ + &zz_n n=1 zn=%zn_ax2 +@zz_1x4 ........ ... ..... ...... ..... zd:5 \ + &zz_n n=1 zn=%zn_ax4 +@zz_2x1 ........ ... ..... ...... zn:5 ..... \ + &zz_n n=1 zd=%zd_ax2 +@zz_2x2 ........ ... ..... ...... .... . ..... \ + &zz_n n=2 zd=%zd_ax2 zn=%zn_ax2 +@zz_4x4 ........ ... ..... ...... .... . ..... \ + &zz_n n=4 zd=%zd_ax4 zn=%zn_ax4 +@zz_4x2_n1 ........ ... ..... ...... .... . ..... \ + &zz_n n=1 zd=%zd_ax4 zn=%zn_ax2 + +BFCVT 11000001 011 00000 111000 ....0 ..... @zz_1x2 +BFCVTN 11000001 011 00000 111000 ....1 ..... @zz_1x2 + +FCVT_n 11000001 001 00000 111000 ....0 ..... @zz_1x2 +FCVTN 11000001 001 00000 111000 ....1 ..... @zz_1x2 + +FCVT_w 11000001 101 00000 111000 ..... ....0 @zz_2x1 +FCVTL 11000001 101 00000 111000 ..... ....1 @zz_2x1 + +FCVTZS 11000001 001 00001 111000 ....0 ....0 @zz_2x2 +FCVTZS 11000001 001 10001 111000 ...00 ...00 @zz_4x4 +FCVTZU 11000001 001 00001 111000 ....1 ....0 @zz_2x2 +FCVTZU 11000001 001 10001 111000 ...01 ...00 @zz_4x4 + +SCVTF 11000001 001 00010 111000 ....0 ....0 @zz_2x2 +SCVTF 11000001 001 10010 111000 ...00 ...00 @zz_4x4 +UCVTF 11000001 001 00010 111000 ....1 ....0 @zz_2x2 +UCVTF 11000001 001 10010 111000 ...01 ...00 @zz_4x4 + +FRINTN 11000001 101 01000 111000 ....0 ....0 @zz_2x2 +FRINTN 11000001 101 11000 111000 ...00 ...00 @zz_4x4 +FRINTP 11000001 101 01001 111000 ....0 ....0 @zz_2x2 +FRINTP 11000001 101 11001 111000 ...00 ...00 @zz_4x4 +FRINTM 11000001 101 01010 111000 ....0 ....0 @zz_2x2 +FRINTM 11000001 101 11010 111000 ...00 ...00 @zz_4x4 +FRINTA 11000001 101 01100 111000 ....0 ....0 @zz_2x2 +FRINTA 11000001 101 11100 111000 ...00 ...00 @zz_4x4 + +SQCVT_sh 11000001 001 00011 111000 ....0 ..... @zz_1x2 +UQCVT_sh 11000001 001 00011 111000 ....1 ..... @zz_1x2 +SQCVTU_sh 11000001 011 00011 111000 ....0 ..... @zz_1x2 + +SQCVT_sb 11000001 001 10011 111000 ...00 ..... @zz_1x4 +UQCVT_sb 11000001 001 10011 111000 ...01 ..... @zz_1x4 +SQCVTU_sb 11000001 011 10011 111000 ...00 ..... @zz_1x4 + +SQCVT_dh 11000001 101 10011 111000 ...00 ..... @zz_1x4 +UQCVT_dh 11000001 101 10011 111000 ...01 ..... @zz_1x4 +SQCVTU_dh 11000001 111 10011 111000 ...00 ..... @zz_1x4 + +SQCVTN_sb 11000001 001 10011 111000 ...10 ..... @zz_1x4 +UQCVTN_sb 11000001 001 10011 111000 ...11 ..... @zz_1x4 +SQCVTUN_sb 11000001 011 10011 111000 ...10 ..... @zz_1x4 + +SQCVTN_dh 11000001 101 10011 111000 ...10 ..... @zz_1x4 +UQCVTN_dh 11000001 101 10011 111000 ...11 ..... @zz_1x4 +SQCVTUN_dh 11000001 111 10011 111000 ...10 ..... @zz_1x4 + +SUNPK_2bh 11000001 011 00101 111000 ..... ....0 @zz_2x1 +SUNPK_2hs 11000001 101 00101 111000 ..... ....0 @zz_2x1 +SUNPK_2sd 11000001 111 00101 111000 ..... ....0 @zz_2x1 + +UUNPK_2bh 11000001 011 00101 111000 ..... ....1 @zz_2x1 +UUNPK_2hs 11000001 101 00101 111000 ..... ....1 @zz_2x1 +UUNPK_2sd 11000001 111 00101 111000 ..... ....1 @zz_2x1 + +SUNPK_4bh 11000001 011 10101 111000 ....0 ...00 @zz_4x2_n1 +SUNPK_4hs 11000001 101 10101 111000 ....0 ...00 @zz_4x2_n1 +SUNPK_4sd 11000001 111 10101 111000 ....0 ...00 @zz_4x2_n1 + +UUNPK_4bh 11000001 011 10101 111000 ....0 ...01 @zz_4x2_n1 +UUNPK_4hs 11000001 101 10101 111000 ....0 ...01 @zz_4x2_n1 +UUNPK_4sd 11000001 111 10101 111000 ....0 ...01 @zz_4x2_n1 + +ZIP_4 11000001 esz:2 1 10110 111000 ...00 ... 00 \ + &zz_e zd=%zd_ax4 zn=%zn_ax4 +ZIP_4 11000001 001 10111 111000 ...00 ... 00 \ + &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4 + +UZP_4 11000001 esz:2 1 10110 111000 ...00 ... 10 \ + &zz_e zd=%zd_ax4 zn=%zn_ax4 +UZP_4 11000001 001 10111 111000 ...00 ... 10 \ + &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4 + +### SME2 Multi-vector SVE Constructive Binary + +&rshr zd zn shift + +%rshr_sh_shift 16:4 !function=rsub_16 +%rshr_sb_shift 16:5 !function=rsub_32 +%rshr_dh_shift 22:1 16:5 !function=rsub_64 + +@rshr_sh ........ .... .... ...... ..... zd:5 \ + &rshr zn=%zn_ax2 shift=%rshr_sh_shift +@rshr_sb ........ ... ..... ...... ..... zd:5 \ + &rshr zn=%zn_ax4 shift=%rshr_sb_shift +@rshr_dh ........ ... ..... ...... ..... zd:5 \ + &rshr zn=%zn_ax4 shift=%rshr_dh_shift + +SQRSHR_sh 11000001 1110 .... 110101 ....0 ..... @rshr_sh +UQRSHR_sh 11000001 1110 .... 110101 ....1 ..... @rshr_sh +SQRSHRU_sh 11000001 1111 .... 110101 ....0 ..... @rshr_sh + +SQRSHR_sb 11000001 011 ..... 110110 ...00 ..... @rshr_sb +SQRSHR_dh 11000001 1.1 ..... 110110 ...00 ..... @rshr_dh +UQRSHR_sb 11000001 011 ..... 110110 ...01 ..... @rshr_sb +UQRSHR_dh 11000001 1.1 ..... 110110 ...01 ..... @rshr_dh +SQRSHRU_sb 11000001 011 ..... 110110 ...10 ..... @rshr_sb +SQRSHRU_dh 11000001 1.1 ..... 110110 ...10 ..... @rshr_dh + +SQRSHRN_sh 01000101 1011 .... 001010 ....0 ..... @rshr_sh +UQRSHRN_sh 01000101 1011 .... 001110 ....0 ..... @rshr_sh +SQRSHRUN_sh 01000101 1011 .... 000010 ....0 ..... @rshr_sh + +SQRSHRN_sb 11000001 011 ..... 110111 ...00 ..... @rshr_sb +SQRSHRN_dh 11000001 1.1 ..... 110111 ...00 ..... @rshr_dh +UQRSHRN_sb 11000001 011 ..... 110111 ...01 ..... @rshr_sb +UQRSHRN_dh 11000001 1.1 ..... 110111 ...01 ..... @rshr_dh +SQRSHRUN_sb 11000001 011 ..... 110111 ...10 ..... @rshr_sb +SQRSHRUN_dh 11000001 1.1 ..... 110111 ...10 ..... @rshr_dh + +&zzz_e zd zn zm esz + +ZIP_2 11000001 esz:2 1 zm:5 110100 zn:5 .... 0 \ + &zzz_e zd=%zd_ax2 +ZIP_2 11000001 00 1 zm:5 110101 zn:5 .... 0 \ + &zzz_e zd=%zd_ax2 esz=4 + +UZP_2 11000001 esz:2 1 zm:5 110100 zn:5 .... 1 \ + &zzz_e zd=%zd_ax2 +UZP_2 11000001 00 1 zm:5 110101 zn:5 .... 1 \ + &zzz_e zd=%zd_ax2 esz=4 + +&zzz_en zd zn zm esz n + +FCLAMP 11000001 esz:2 1 zm:5 110000 zn:5 .... 0 \ + &zzz_en zd=%zd_ax2 n=2 +FCLAMP 11000001 esz:2 1 zm:5 110010 zn:5 ...0 0 \ + &zzz_en zd=%zd_ax4 n=4 + +SCLAMP 11000001 esz:2 1 zm:5 110001 zn:5 .... 0 \ + &zzz_en zd=%zd_ax2 n=2 +SCLAMP 11000001 esz:2 1 zm:5 110011 zn:5 ...0 0 \ + &zzz_en zd=%zd_ax4 n=4 + +UCLAMP 11000001 esz:2 1 zm:5 110001 zn:5 .... 1 \ + &zzz_en zd=%zd_ax2 n=2 +UCLAMP 11000001 esz:2 1 zm:5 110011 zn:5 ...0 1 \ + &zzz_en zd=%zd_ax4 n=4 + +### SME2 Multi-vector SVE Select + +%sel_pg 10:3 !function=plus_8 + +SEL 11000001 esz:2 1 ....0 100 ... ....0 ....0 \ + n=2 zd=%zd_ax2 zn=%zn_ax2 zm=%zm_ax2 pg=%sel_pg +SEL 11000001 esz:2 1 ...01 100 ... ...00 ...00 \ + n=4 zd=%zd_ax4 zn=%zn_ax4 zm=%zm_ax4 pg=%sel_pg + +### SME Multiple Zero + +&zero_za rv off ngrp nvec + +ZERO_za 11000000 000011 000 .. 0000000000 off:3 \ + &zero_za ngrp=2 nvec=1 rv=%mova_rv +ZERO_za 11000000 000011 100 .. 0000000000 off:3 \ + &zero_za ngrp=4 nvec=1 rv=%mova_rv + +ZERO_za 11000000 000011 001 .. 0000000000 ... \ + &zero_za ngrp=1 nvec=2 rv=%mova_rv off=%off3_x2 +ZERO_za 11000000 000011 010 .. 0000000000 0.. \ + &zero_za ngrp=2 nvec=2 rv=%mova_rv off=%off2_x2 +ZERO_za 11000000 000011 011 .. 0000000000 0.. \ + &zero_za ngrp=4 nvec=2 rv=%mova_rv off=%off2_x2 + +ZERO_za 11000000 000011 101 .. 0000000000 0.. \ + &zero_za ngrp=1 nvec=4 rv=%mova_rv off=%off2_x4 +ZERO_za 11000000 000011 110 .. 0000000000 00. \ + &zero_za ngrp=2 nvec=4 rv=%mova_rv off=%off1_x4 +ZERO_za 11000000 000011 111 .. 0000000000 00. \ + &zero_za ngrp=4 nvec=4 rv=%mova_rv off=%off1_x4 + +### SME Lookup Table Read + +&lut zd zn idx + +# LUTI2, consecutive +LUTI2_c_1b 1100 0000 1100 11 idx:4 00 00 zn:5 zd:5 &lut +LUTI2_c_1h 1100 0000 1100 11 idx:4 01 00 zn:5 zd:5 &lut +LUTI2_c_1s 1100 0000 1100 11 idx:4 10 00 zn:5 zd:5 &lut + +LUTI2_c_2b 1100 0000 1000 11 idx:3 1 00 00 zn:5 .... 0 &lut zd=%zd_ax2 +LUTI2_c_2h 1100 0000 1000 11 idx:3 1 01 00 zn:5 .... 0 &lut zd=%zd_ax2 +LUTI2_c_2s 1100 0000 1000 11 idx:3 1 10 00 zn:5 .... 0 &lut zd=%zd_ax2 + +LUTI2_c_4b 1100 0000 1000 11 idx:2 10 00 00 zn:5 ... 00 &lut zd=%zd_ax4 +LUTI2_c_4h 1100 0000 1000 11 idx:2 10 01 00 zn:5 ... 00 &lut zd=%zd_ax4 +LUTI2_c_4s 1100 0000 1000 11 idx:2 10 10 00 zn:5 ... 00 &lut zd=%zd_ax4 + +# LUTI2, strided (must check zd alignment) +LUTI2_s_2b 1100 0000 1001 11 idx:3 1 00 00 zn:5 zd:5 &lut +LUTI2_s_2h 1100 0000 1001 11 idx:3 1 01 00 zn:5 zd:5 &lut + +LUTI2_s_4b 1100 0000 1001 11 idx:2 10 00 00 zn:5 zd:5 &lut +LUTI2_s_4h 1100 0000 1001 11 idx:2 10 01 00 zn:5 zd:5 &lut + +# LUTI4, consecutive +LUTI4_c_1b 1100 0000 1100 101 idx:3 00 00 zn:5 zd:5 &lut +LUTI4_c_1h 1100 0000 1100 101 idx:3 01 00 zn:5 zd:5 &lut +LUTI4_c_1s 1100 0000 1100 101 idx:3 10 00 zn:5 zd:5 &lut + +LUTI4_c_2b 1100 0000 1000 101 idx:2 1 00 00 zn:5 .... 0 &lut zd=%zd_ax2 +LUTI4_c_2h 1100 0000 1000 101 idx:2 1 01 00 zn:5 .... 0 &lut zd=%zd_ax2 +LUTI4_c_2s 1100 0000 1000 101 idx:2 1 10 00 zn:5 .... 0 &lut zd=%zd_ax2 + +LUTI4_c_4h 1100 0000 1000 101 idx:1 10 01 00 zn:5 ... 00 &lut zd=%zd_ax4 +LUTI4_c_4s 1100 0000 1000 101 idx:1 10 10 00 zn:5 ... 00 &lut zd=%zd_ax4 + +# LUTI4, strided (must check zd alignment) +LUTI4_s_2b 1100 0000 1001 101 idx:2 1 00 00 zn:5 zd:5 &lut +LUTI4_s_2h 1100 0000 1001 101 idx:2 1 01 00 zn:5 zd:5 &lut + +LUTI4_s_4h 1100 0000 1001 101 idx:1 10 01 00 zn:5 zd:5 &lut diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c index 02106809ce1..075360d8b8a 100644 --- a/target/arm/tcg/sme_helper.c +++ b/target/arm/tcg/sme_helper.c @@ -22,13 +22,20 @@ #include "internals.h" #include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/helper-retaddr.h" #include "qemu/int128.h" #include "fpu/softfloat.h" #include "vec_internal.h" #include "sve_ldst_internal.h" + +static bool vectors_overlap(ARMVectorReg *x, unsigned nx, + ARMVectorReg *y, unsigned ny) +{ + return !(x + nx <= y || y + ny <= x); +} + void helper_set_svcr(CPUARMState *env, uint32_t val, uint32_t mask) { aarch64_set_svcr(env, val, mask); @@ -39,12 +46,12 @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) uint32_t i; /* - * Special case clearing the entire ZA space. + * Special case clearing the entire ZArray. * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any * parts of the ZA storage outside of SVL. */ if (imm == 0xff) { - memset(env->zarray, 0, sizeof(env->zarray)); + memset(env->za_state.za, 0, sizeof(env->za_state.za)); return; } @@ -54,7 +61,7 @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) */ for (i = 0; i < svl; i++) { if (imm & (1 << (i % 8))) { - memset(&env->zarray[i], 0, svl); + memset(&env->za_state.za[i], 0, svl); } } } @@ -206,6 +213,110 @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) #undef DO_MOVA_Z +void HELPER(sme2_mova_zc_b)(void *vdst, void *vsrc, uint32_t desc) +{ + const uint8_t *src = vsrc; + uint8_t *dst = vdst; + size_t i, n = simd_oprsz(desc); + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + } +} + +void HELPER(sme2_mova_zc_h)(void *vdst, void *vsrc, uint32_t desc) +{ + const uint16_t *src = vsrc; + uint16_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 2; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + } +} + +void HELPER(sme2_mova_zc_s)(void *vdst, void *vsrc, uint32_t desc) +{ + const uint32_t *src = vsrc; + uint32_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 4; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + } +} + +void HELPER(sme2_mova_zc_d)(void *vdst, void *vsrc, uint32_t desc) +{ + const uint64_t *src = vsrc; + uint64_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 8; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + } +} + +void HELPER(sme2p1_movaz_zc_b)(void *vdst, void *vsrc, uint32_t desc) +{ + uint8_t *src = vsrc; + uint8_t *dst = vdst; + size_t i, n = simd_oprsz(desc); + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + src[tile_vslice_index(i)] = 0; + } +} + +void HELPER(sme2p1_movaz_zc_h)(void *vdst, void *vsrc, uint32_t desc) +{ + uint16_t *src = vsrc; + uint16_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 2; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + src[tile_vslice_index(i)] = 0; + } +} + +void HELPER(sme2p1_movaz_zc_s)(void *vdst, void *vsrc, uint32_t desc) +{ + uint32_t *src = vsrc; + uint32_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 4; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + src[tile_vslice_index(i)] = 0; + } +} + +void HELPER(sme2p1_movaz_zc_d)(void *vdst, void *vsrc, uint32_t desc) +{ + uint64_t *src = vsrc; + uint64_t *dst = vdst; + size_t i, n = simd_oprsz(desc) / 8; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + src[tile_vslice_index(i)] = 0; + } +} + +void HELPER(sme2p1_movaz_zc_q)(void *vdst, void *vsrc, uint32_t desc) +{ + Int128 *src = vsrc; + Int128 *dst = vdst; + size_t i, n = simd_oprsz(desc) / 16; + + for (i = 0; i < n; ++i) { + dst[i] = src[tile_vslice_index(i)]; + memset(&src[tile_vslice_index(i)], 0, 16); + } +} + /* * Clear elements in a tile slice comprising len bytes. */ @@ -314,6 +425,26 @@ static void copy_vertical_q(void *vdst, const void *vsrc, size_t len) } } +void HELPER(sme2_mova_cz_b)(void *vdst, void *vsrc, uint32_t desc) +{ + copy_vertical_b(vdst, vsrc, simd_oprsz(desc)); +} + +void HELPER(sme2_mova_cz_h)(void *vdst, void *vsrc, uint32_t desc) +{ + copy_vertical_h(vdst, vsrc, simd_oprsz(desc)); +} + +void HELPER(sme2_mova_cz_s)(void *vdst, void *vsrc, uint32_t desc) +{ + copy_vertical_s(vdst, vsrc, simd_oprsz(desc)); +} + +void HELPER(sme2_mova_cz_d)(void *vdst, void *vsrc, uint32_t desc) +{ + copy_vertical_d(vdst, vsrc, simd_oprsz(desc)); +} + /* * Host and TLB primitives for vertical tile slice addressing. */ @@ -344,54 +475,22 @@ static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ TLB(env, useronly_clean_ptr(addr), val, ra); \ } -/* - * The ARMVectorReg elements are stored in host-endian 64-bit units. - * For 128-bit quantities, the sequence defined by the Elem[] pseudocode - * corresponds to storing the two 64-bit pieces in little-endian order. - */ -#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \ -static inline void HNAME##_host(void *za, intptr_t off, void *host) \ -{ \ - uint64_t val0 = HOST(host), val1 = HOST(host + 8); \ - uint64_t *ptr = za + off; \ - ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ -} \ +#define DO_LDQ(HNAME, VNAME) \ static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ { \ HNAME##_host(za, tile_vslice_offset(off), host); \ } \ -static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ - target_ulong addr, uintptr_t ra) \ -{ \ - uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \ - uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \ - uint64_t *ptr = za + off; \ - ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ -} \ static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ target_ulong addr, uintptr_t ra) \ { \ HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ } -#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \ -static inline void HNAME##_host(void *za, intptr_t off, void *host) \ -{ \ - uint64_t *ptr = za + off; \ - HOST(host, ptr[BE]); \ - HOST(host + 8, ptr[!BE]); \ -} \ +#define DO_STQ(HNAME, VNAME) \ static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ { \ HNAME##_host(za, tile_vslice_offset(off), host); \ } \ -static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ - target_ulong addr, uintptr_t ra) \ -{ \ - uint64_t *ptr = za + off; \ - TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \ - TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \ -} \ static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ target_ulong addr, uintptr_t ra) \ { \ @@ -406,8 +505,8 @@ DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra) DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra) DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra) -DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra) -DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra) +DO_LDQ(sve_ld1qq_be, sme_ld1q_be) +DO_LDQ(sve_ld1qq_le, sme_ld1q_le) DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra) DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra) @@ -417,8 +516,8 @@ DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra) DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra) DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra) -DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra) -DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra) +DO_STQ(sve_st1qq_be, sme_st1q_be) +DO_STQ(sve_st1qq_le, sme_st1q_le) #undef DO_LD #undef DO_ST @@ -567,19 +666,16 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, static inline QEMU_ALWAYS_INLINE void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, - target_ulong addr, uint32_t desc, uintptr_t ra, + target_ulong addr, uint64_t desc, uintptr_t ra, const int esz, bool vertical, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn, ClearFn *clr_fn, CopyFn *cpy_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -592,28 +688,28 @@ void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, #define DO_LD(L, END, ESZ) \ void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ clear_horizontal, copy_horizontal); \ } \ void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ clear_vertical_##L, copy_vertical_##L); \ } \ void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ clear_horizontal, copy_horizontal); \ } \ void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ @@ -755,16 +851,13 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg, static inline QEMU_ALWAYS_INLINE void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, - uint32_t desc, uintptr_t ra, int esz, bool vertical, + uint64_t desc, uintptr_t ra, int esz, bool vertical, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -777,25 +870,25 @@ void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, #define DO_ST(L, END, ESZ) \ void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ } \ void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ } \ void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ } \ void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ @@ -903,28 +996,69 @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, } } -void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, - void *vpm, void *vst, uint32_t desc) +static void do_fmopa_h(void *vza, void *vzn, void *vzm, uint16_t *pn, + uint16_t *pm, float_status *fpst, uint32_t desc, + uint16_t negx, int negf) { intptr_t row, col, oprsz = simd_maxsz(desc); - uint32_t neg = simd_data(desc) << 31; - uint16_t *pn = vpn, *pm = vpm; - float_status fpst; - /* - * Make a copy of float_status because this operation does not - * update the cumulative fp exception status. It also produces - * default nans. - */ - fpst = *(float_status *)vst; - set_default_nan_mode(true, &fpst); + for (row = 0; row < oprsz; ) { + uint16_t pa = pn[H2(row >> 4)]; + do { + if (pa & 1) { + void *vza_row = vza + tile_vslice_offset(row); + uint16_t n = *(uint32_t *)(vzn + H1_2(row)) ^ negx; + + for (col = 0; col < oprsz; ) { + uint16_t pb = pm[H2(col >> 4)]; + do { + if (pb & 1) { + uint16_t *a = vza_row + H1_2(col); + uint16_t *m = vzm + H1_2(col); + *a = float16_muladd(n, *m, *a, negf, fpst); + } + col += 2; + pb >>= 2; + } while (col & 15); + } + } + row += 2; + pa >>= 2; + } while (row & 15); + } +} + +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0); +} + +void HELPER(sme_fmops_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 15, 0); +} + +void HELPER(sme_ah_fmops_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, + float_muladd_negate_product); +} + +static void do_fmopa_s(void *vza, void *vzn, void *vzm, uint16_t *pn, + uint16_t *pm, float_status *fpst, uint32_t desc, + uint32_t negx, int negf) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); for (row = 0; row < oprsz; ) { uint16_t pa = pn[H2(row >> 4)]; do { if (pa & 1) { void *vza_row = vza + tile_vslice_offset(row); - uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg; + uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ negx; for (col = 0; col < oprsz; ) { uint16_t pb = pm[H2(col >> 4)]; @@ -932,7 +1066,7 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, if (pb & 1) { uint32_t *a = vza_row + H1_4(col); uint32_t *m = vzm + H1_4(col); - *a = float32_muladd(n, *m, *a, 0, &fpst); + *a = float32_muladd(n, *m, *a, negf, fpst); } col += 4; pb >>= 4; @@ -945,32 +1079,116 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, } } -void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, - void *vpm, void *vst, uint32_t desc) +void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) { - intptr_t row, col, oprsz = simd_oprsz(desc) / 8; - uint64_t neg = (uint64_t)simd_data(desc) << 63; - uint64_t *za = vza, *zn = vzn, *zm = vzm; - uint8_t *pn = vpn, *pm = vpm; - float_status fpst = *(float_status *)vst; + do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0); +} - set_default_nan_mode(true, &fpst); +void HELPER(sme_fmops_s)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 31, 0); +} + +void HELPER(sme_ah_fmops_s)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, + float_muladd_negate_product); +} + +static void do_fmopa_d(uint64_t *za, uint64_t *zn, uint64_t *zm, uint8_t *pn, + uint8_t *pm, float_status *fpst, uint32_t desc, + uint64_t negx, int negf) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; for (row = 0; row < oprsz; ++row) { if (pn[H1(row)] & 1) { uint64_t *za_row = &za[tile_vslice_index(row)]; - uint64_t n = zn[row] ^ neg; + uint64_t n = zn[row] ^ negx; for (col = 0; col < oprsz; ++col) { if (pm[H1(col)] & 1) { uint64_t *a = &za_row[col]; - *a = float64_muladd(n, zm[col], *a, 0, &fpst); + *a = float64_muladd(n, zm[col], *a, negf, fpst); } } } } } +void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0); +} + +void HELPER(sme_fmops_d)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 1ull << 63, 0); +} + +void HELPER(sme_ah_fmops_d)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, + float_muladd_negate_product); +} + +static void do_bfmopa(void *vza, void *vzn, void *vzm, uint16_t *pn, + uint16_t *pm, float_status *fpst, uint32_t desc, + uint16_t negx, int negf) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + + for (row = 0; row < oprsz; ) { + uint16_t pa = pn[H2(row >> 4)]; + do { + if (pa & 1) { + void *vza_row = vza + tile_vslice_offset(row); + uint16_t n = *(uint32_t *)(vzn + H1_2(row)) ^ negx; + + for (col = 0; col < oprsz; ) { + uint16_t pb = pm[H2(col >> 4)]; + do { + if (pb & 1) { + uint16_t *a = vza_row + H1_2(col); + uint16_t *m = vzm + H1_2(col); + *a = bfloat16_muladd(n, *m, *a, negf, fpst); + } + col += 2; + pb >>= 2; + } while (col & 15); + } + } + row += 2; + pa >>= 2; + } while (row & 15); + } +} + +void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0); +} + +void HELPER(sme_bfmops)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 15, 0); +} + +void HELPER(sme_ah_bfmops)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, float_status *fpst, uint32_t desc) +{ + do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, + float_muladd_negate_product); +} + /* * Alter PAIR as needed for controlling predicates being false, * and for NEG on an enabled row element. @@ -991,6 +1209,20 @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) return pair; } +static inline uint32_t f16mop_ah_neg_adj_pair(uint32_t pair, uint32_t pg) +{ + uint32_t l = pg & 1 ? float16_ah_chs(pair) : 0; + uint32_t h = pg & 4 ? float16_ah_chs(pair >> 16) : 0; + return l | (h << 16); +} + +static inline uint32_t bf16mop_ah_neg_adj_pair(uint32_t pair, uint32_t pg) +{ + uint32_t l = pg & 1 ? bfloat16_ah_chs(pair) : 0; + uint32_t h = pg & 4 ? bfloat16_ah_chs(pair >> 16) : 0; + return l | (h << 16); +} + static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *s_f16, float_status *s_std, float_status *s_odd) @@ -1005,49 +1237,67 @@ static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, * - we have pre-set-up copy of s_std which is set to round-to-odd, * for the multiply (see below) */ - float64 e1r = float16_to_float64(e1 & 0xffff, true, s_f16); - float64 e1c = float16_to_float64(e1 >> 16, true, s_f16); - float64 e2r = float16_to_float64(e2 & 0xffff, true, s_f16); - float64 e2c = float16_to_float64(e2 >> 16, true, s_f16); - float64 t64; + float16 h1r = e1 & 0xffff; + float16 h1c = e1 >> 16; + float16 h2r = e2 & 0xffff; + float16 h2c = e2 >> 16; float32 t32; - /* - * The ARM pseudocode function FPDot performs both multiplies - * and the add with a single rounding operation. Emulate this - * by performing the first multiply in round-to-odd, then doing - * the second multiply as fused multiply-add, and rounding to - * float32 all in one step. - */ - t64 = float64_mul(e1r, e2r, s_odd); - t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); + /* C.f. FPProcessNaNs4 */ + if (float16_is_any_nan(h1r) || float16_is_any_nan(h1c) || + float16_is_any_nan(h2r) || float16_is_any_nan(h2c)) { + float16 t16; + + if (float16_is_signaling_nan(h1r, s_f16)) { + t16 = h1r; + } else if (float16_is_signaling_nan(h1c, s_f16)) { + t16 = h1c; + } else if (float16_is_signaling_nan(h2r, s_f16)) { + t16 = h2r; + } else if (float16_is_signaling_nan(h2c, s_f16)) { + t16 = h2c; + } else if (float16_is_any_nan(h1r)) { + t16 = h1r; + } else if (float16_is_any_nan(h1c)) { + t16 = h1c; + } else if (float16_is_any_nan(h2r)) { + t16 = h2r; + } else { + t16 = h2c; + } + t32 = float16_to_float32(t16, true, s_f16); + } else { + float64 e1r = float16_to_float64(h1r, true, s_f16); + float64 e1c = float16_to_float64(h1c, true, s_f16); + float64 e2r = float16_to_float64(h2r, true, s_f16); + float64 e2c = float16_to_float64(h2c, true, s_f16); + float64 t64; - /* This conversion is exact, because we've already rounded. */ - t32 = float64_to_float32(t64, s_std); + /* + * The ARM pseudocode function FPDot performs both multiplies + * and the add with a single rounding operation. Emulate this + * by performing the first multiply in round-to-odd, then doing + * the second multiply as fused multiply-add, and rounding to + * float32 all in one step. + */ + t64 = float64_mul(e1r, e2r, s_odd); + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); + + /* This conversion is exact, because we've already rounded. */ + t32 = float64_to_float32(t64, s_std); + } /* The final accumulation step is not fused. */ return float32_add(sum, t32, s_std); } -void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, - void *vpm, CPUARMState *env, uint32_t desc) +static void do_fmopa_w_h(void *vza, void *vzn, void *vzm, uint16_t *pn, + uint16_t *pm, CPUARMState *env, uint32_t desc, + uint32_t negx, bool ah_neg) { intptr_t row, col, oprsz = simd_maxsz(desc); - uint32_t neg = simd_data(desc) * 0x80008000u; - uint16_t *pn = vpn, *pm = vpm; - float_status fpst_odd, fpst_std, fpst_f16; + float_status fpst_odd = env->vfp.fp_status[FPST_ZA]; - /* - * Make copies of fp_status and fp_status_f16, because this operation - * does not update the cumulative fp exception status. It also - * produces default NaNs. We also need a second copy of fp_status with - * round-to-odd -- see above. - */ - fpst_f16 = env->vfp.fp_status_f16; - fpst_std = env->vfp.fp_status; - set_default_nan_mode(true, &fpst_std); - set_default_nan_mode(true, &fpst_f16); - fpst_odd = fpst_std; set_float_rounding_mode(float_round_to_odd, &fpst_odd); for (row = 0; row < oprsz; ) { @@ -1056,7 +1306,11 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, void *vza_row = vza + tile_vslice_offset(row); uint32_t n = *(uint32_t *)(vzn + H1_4(row)); - n = f16mop_adj_pair(n, prow, neg); + if (ah_neg) { + n = f16mop_ah_neg_adj_pair(n, prow); + } else { + n = f16mop_adj_pair(n, prow, negx); + } for (col = 0; col < oprsz; ) { uint16_t pcol = pm[H2(col >> 4)]; @@ -1067,7 +1321,9 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, m = f16mop_adj_pair(m, pcol, 0); *a = f16_dotadd(*a, n, m, - &fpst_f16, &fpst_std, &fpst_odd); + &env->vfp.fp_status[FPST_ZA_F16], + &env->vfp.fp_status[FPST_ZA], + &fpst_odd); } col += 4; pcol >>= 4; @@ -1079,41 +1335,188 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, } } -void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, - void *vpm, uint32_t desc) +void HELPER(sme_fmopa_w_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0, false); +} + +void HELPER(sme_fmops_w_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0x80008000u, false); +} + +void HELPER(sme_ah_fmops_w_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0, true); +} + +void HELPER(sme2_fdot_h)(void *vd, void *vn, void *vm, void *va, + CPUARMState *env, uint32_t desc) +{ + intptr_t i, oprsz = simd_maxsz(desc); + bool za = extract32(desc, SIMD_DATA_SHIFT, 1); + float_status *fpst_std = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64]; + float_status *fpst_f16 = &env->vfp.fp_status[za ? FPST_ZA_F16 : FPST_A64_F16]; + float_status fpst_odd = *fpst_std; + float32 *d = vd, *a = va; + uint32_t *n = vn, *m = vm; + + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (i = 0; i < oprsz / sizeof(float32); ++i) { + d[H4(i)] = f16_dotadd(a[H4(i)], n[H4(i)], m[H4(i)], + fpst_f16, fpst_std, &fpst_odd); + } +} + +void HELPER(sme2_fdot_idx_h)(void *vd, void *vn, void *vm, void *va, + CPUARMState *env, uint32_t desc) +{ + intptr_t i, j, oprsz = simd_maxsz(desc); + intptr_t elements = oprsz / sizeof(float32); + intptr_t eltspersegment = MIN(4, elements); + int idx = extract32(desc, SIMD_DATA_SHIFT, 2); + bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + float_status *fpst_std = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64]; + float_status *fpst_f16 = &env->vfp.fp_status[za ? FPST_ZA_F16 : FPST_A64_F16]; + float_status fpst_odd = *fpst_std; + float32 *d = vd, *a = va; + uint32_t *n = vn, *m = (uint32_t *)vm + H4(idx); + + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (i = 0; i < elements; i += eltspersegment) { + uint32_t mm = m[i]; + for (j = 0; j < eltspersegment; ++j) { + d[H4(i + j)] = f16_dotadd(a[H4(i + j)], n[H4(i + j)], mm, + fpst_f16, fpst_std, &fpst_odd); + } + } +} + +void HELPER(sme2_fvdot_idx_h)(void *vd, void *vn, void *vm, void *va, + CPUARMState *env, uint32_t desc) +{ + intptr_t i, j, oprsz = simd_maxsz(desc); + intptr_t elements = oprsz / sizeof(float32); + intptr_t eltspersegment = MIN(4, elements); + int idx = extract32(desc, SIMD_DATA_SHIFT, 2); + int sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + float_status fpst_odd, *fpst_std, *fpst_f16; + float32 *d = vd, *a = va; + uint16_t *n0 = vn; + uint16_t *n1 = vn + sizeof(ARMVectorReg); + uint32_t *m = (uint32_t *)vm + H4(idx); + + fpst_std = &env->vfp.fp_status[FPST_ZA]; + fpst_f16 = &env->vfp.fp_status[FPST_ZA_F16]; + fpst_odd = *fpst_std; + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (i = 0; i < elements; i += eltspersegment) { + uint32_t mm = m[i]; + for (j = 0; j < eltspersegment; ++j) { + uint32_t nn = (n0[H2(2 * (i + j) + sel)]) + | (n1[H2(2 * (i + j) + sel)] << 16); + d[i + H4(j)] = f16_dotadd(a[i + H4(j)], nn, mm, + fpst_f16, fpst_std, &fpst_odd); + } + } +} + +static void do_bfmopa_w(void *vza, void *vzn, void *vzm, + uint16_t *pn, uint16_t *pm, CPUARMState *env, + uint32_t desc, uint32_t negx, bool ah_neg) { intptr_t row, col, oprsz = simd_maxsz(desc); - uint32_t neg = simd_data(desc) * 0x80008000u; - uint16_t *pn = vpn, *pm = vpm; + float_status fpst, fpst_odd; - for (row = 0; row < oprsz; ) { - uint16_t prow = pn[H2(row >> 4)]; - do { - void *vza_row = vza + tile_vslice_offset(row); - uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + if (is_ebf(env, &fpst, &fpst_odd)) { + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); - n = f16mop_adj_pair(n, prow, neg); + if (ah_neg) { + n = bf16mop_ah_neg_adj_pair(n, prow); + } else { + n = f16mop_adj_pair(n, prow, negx); + } - for (col = 0; col < oprsz; ) { - uint16_t pcol = pm[H2(col >> 4)]; - do { - if (prow & pcol & 0b0101) { - uint32_t *a = vza_row + H1_4(col); - uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); - m = f16mop_adj_pair(m, pcol, 0); - *a = bfdotadd(*a, n, m); - } - col += 4; - pcol >>= 4; - } while (col & 15); - } - row += 4; - prow >>= 4; - } while (row & 15); + m = f16mop_adj_pair(m, pcol, 0); + *a = bfdotadd_ebf(*a, n, m, &fpst, &fpst_odd); + } + col += 4; + pcol >>= 4; + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } + } else { + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + if (ah_neg) { + n = bf16mop_ah_neg_adj_pair(n, prow); + } else { + n = f16mop_adj_pair(n, prow, negx); + } + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = bfdotadd(*a, n, m, &fpst); + } + col += 4; + pcol >>= 4; + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } } } +void HELPER(sme_bfmopa_w)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0, false); +} + +void HELPER(sme_bfmops_w)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0x80008000u, false); +} + +void HELPER(sme_ah_bfmops_w)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, CPUARMState *env, uint32_t desc) +{ + do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0, true); +} + typedef uint32_t IMOPFn32(uint32_t, uint32_t, uint32_t, uint8_t, bool); static inline void do_imopa_s(uint32_t *za, uint32_t *zn, uint32_t *zm, uint8_t *pn, uint8_t *pm, @@ -1158,7 +1561,7 @@ static inline void do_imopa_d(uint64_t *za, uint64_t *zn, uint64_t *zm, } } -#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ +#define DEF_IMOP_8x4_32(NAME, NTYPE, MTYPE) \ static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \ { \ uint32_t sum = 0; \ @@ -1171,7 +1574,7 @@ static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \ return neg ? a - sum : a + sum; \ } -#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ +#define DEF_IMOP_16x4_64(NAME, NTYPE, MTYPE) \ static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ { \ uint64_t sum = 0; \ @@ -1184,27 +1587,1070 @@ static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ return neg ? a - sum : a + sum; \ } -DEF_IMOP_32(smopa_s, int8_t, int8_t) -DEF_IMOP_32(umopa_s, uint8_t, uint8_t) -DEF_IMOP_32(sumopa_s, int8_t, uint8_t) -DEF_IMOP_32(usmopa_s, uint8_t, int8_t) +DEF_IMOP_8x4_32(smopa_s, int8_t, int8_t) +DEF_IMOP_8x4_32(umopa_s, uint8_t, uint8_t) +DEF_IMOP_8x4_32(sumopa_s, int8_t, uint8_t) +DEF_IMOP_8x4_32(usmopa_s, uint8_t, int8_t) -DEF_IMOP_64(smopa_d, int16_t, int16_t) -DEF_IMOP_64(umopa_d, uint16_t, uint16_t) -DEF_IMOP_64(sumopa_d, int16_t, uint16_t) -DEF_IMOP_64(usmopa_d, uint16_t, int16_t) +DEF_IMOP_16x4_64(smopa_d, int16_t, int16_t) +DEF_IMOP_16x4_64(umopa_d, uint16_t, uint16_t) +DEF_IMOP_16x4_64(sumopa_d, int16_t, uint16_t) +DEF_IMOP_16x4_64(usmopa_d, uint16_t, int16_t) -#define DEF_IMOPH(NAME, S) \ - void HELPER(sme_##NAME##_##S)(void *vza, void *vzn, void *vzm, \ +#define DEF_IMOPH(P, NAME, S) \ + void HELPER(P##_##NAME##_##S)(void *vza, void *vzn, void *vzm, \ void *vpn, void *vpm, uint32_t desc) \ { do_imopa_##S(vza, vzn, vzm, vpn, vpm, desc, NAME##_##S); } -DEF_IMOPH(smopa, s) -DEF_IMOPH(umopa, s) -DEF_IMOPH(sumopa, s) -DEF_IMOPH(usmopa, s) +DEF_IMOPH(sme, smopa, s) +DEF_IMOPH(sme, umopa, s) +DEF_IMOPH(sme, sumopa, s) +DEF_IMOPH(sme, usmopa, s) + +DEF_IMOPH(sme, smopa, d) +DEF_IMOPH(sme, umopa, d) +DEF_IMOPH(sme, sumopa, d) +DEF_IMOPH(sme, usmopa, d) -DEF_IMOPH(smopa, d) -DEF_IMOPH(umopa, d) -DEF_IMOPH(sumopa, d) -DEF_IMOPH(usmopa, d) +static uint32_t bmopa_s(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) +{ + uint32_t sum = ctpop32(~(n ^ m)); + if (neg) { + sum = -sum; + } + if (!(p & 1)) { + sum = 0; + } + return a + sum; +} + +DEF_IMOPH(sme2, bmopa, s) + +#define DEF_IMOP_16x2_32(NAME, NTYPE, MTYPE) \ +static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \ +{ \ + uint32_t sum = 0; \ + /* Apply P to N as a mask, making the inactive elements 0. */ \ + n &= expand_pred_h(p); \ + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + return neg ? a - sum : a + sum; \ +} + +DEF_IMOP_16x2_32(smopa2_s, int16_t, int16_t) +DEF_IMOP_16x2_32(umopa2_s, uint16_t, uint16_t) + +DEF_IMOPH(sme2, smopa2, s) +DEF_IMOPH(sme2, umopa2, s) + +#define DO_VDOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD, HN) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + intptr_t svl = simd_oprsz(desc); \ + intptr_t elements = svl / sizeof(TYPED); \ + intptr_t eltperseg = 16 / sizeof(TYPED); \ + intptr_t nreg = sizeof(TYPED) / sizeof(TYPEN); \ + intptr_t vstride = (svl / nreg) * sizeof(ARMVectorReg); \ + intptr_t zstride = sizeof(ARMVectorReg) / sizeof(TYPEN); \ + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT, 2); \ + TYPEN *n = vn; \ + TYPEM *m = vm; \ + for (intptr_t r = 0; r < nreg; r++) { \ + TYPED *d = vd + r * vstride; \ + for (intptr_t seg = 0; seg < elements; seg += eltperseg) { \ + intptr_t s = seg + idx; \ + for (intptr_t e = seg; e < seg + eltperseg; e++) { \ + TYPED sum = d[HD(e)]; \ + for (intptr_t i = 0; i < nreg; i++) { \ + TYPED nn = n[i * zstride + HN(nreg * e + r)]; \ + TYPED mm = m[HN(nreg * s + i)]; \ + sum += nn * mm; \ + } \ + d[HD(e)] = sum; \ + } \ + } \ + } \ +} + +DO_VDOT_IDX(sme2_svdot_idx_4b, int32_t, int8_t, int8_t, H4, H1) +DO_VDOT_IDX(sme2_uvdot_idx_4b, uint32_t, uint8_t, uint8_t, H4, H1) +DO_VDOT_IDX(sme2_suvdot_idx_4b, int32_t, int8_t, uint8_t, H4, H1) +DO_VDOT_IDX(sme2_usvdot_idx_4b, int32_t, uint8_t, int8_t, H4, H1) + +DO_VDOT_IDX(sme2_svdot_idx_4h, int64_t, int16_t, int16_t, H8, H2) +DO_VDOT_IDX(sme2_uvdot_idx_4h, uint64_t, uint16_t, uint16_t, H8, H2) + +DO_VDOT_IDX(sme2_svdot_idx_2h, int32_t, int16_t, int16_t, H4, H2) +DO_VDOT_IDX(sme2_uvdot_idx_2h, uint32_t, uint16_t, uint16_t, H4, H2) + +#undef DO_VDOT_IDX + +#define DO_MLALL(NAME, TYPEW, TYPEN, TYPEM, HW, HN, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ +{ \ + intptr_t elements = simd_oprsz(desc) / sizeof(TYPEW); \ + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 2); \ + TYPEW *d = vd, *a = va; TYPEN *n = vn; TYPEM *m = vm; \ + for (intptr_t i = 0; i < elements; ++i) { \ + TYPEW nn = n[HN(i * 4 + sel)]; \ + TYPEM mm = m[HN(i * 4 + sel)]; \ + d[HW(i)] = a[HW(i)] OP (nn * mm); \ + } \ +} + +DO_MLALL(sme2_smlall_s, int32_t, int8_t, int8_t, H4, H1, +) +DO_MLALL(sme2_smlall_d, int64_t, int16_t, int16_t, H8, H2, +) +DO_MLALL(sme2_smlsll_s, int32_t, int8_t, int8_t, H4, H1, -) +DO_MLALL(sme2_smlsll_d, int64_t, int16_t, int16_t, H8, H2, -) + +DO_MLALL(sme2_umlall_s, uint32_t, uint8_t, uint8_t, H4, H1, +) +DO_MLALL(sme2_umlall_d, uint64_t, uint16_t, uint16_t, H8, H2, +) +DO_MLALL(sme2_umlsll_s, uint32_t, uint8_t, uint8_t, H4, H1, -) +DO_MLALL(sme2_umlsll_d, uint64_t, uint16_t, uint16_t, H8, H2, -) + +DO_MLALL(sme2_usmlall_s, uint32_t, uint8_t, int8_t, H4, H1, +) + +#undef DO_MLALL + +#define DO_MLALL_IDX(NAME, TYPEW, TYPEN, TYPEM, HW, HN, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ +{ \ + intptr_t elements = simd_oprsz(desc) / sizeof(TYPEW); \ + intptr_t eltspersegment = 16 / sizeof(TYPEW); \ + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 2); \ + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 4); \ + TYPEW *d = vd, *a = va; TYPEN *n = vn; TYPEM *m = vm; \ + for (intptr_t i = 0; i < elements; i += eltspersegment) { \ + TYPEW mm = m[HN(i * 4 + idx)]; \ + for (intptr_t j = 0; j < eltspersegment; ++j) { \ + TYPEN nn = n[HN((i + j) * 4 + sel)]; \ + d[HW(i + j)] = a[HW(i + j)] OP (nn * mm); \ + } \ + } \ +} + +DO_MLALL_IDX(sme2_smlall_idx_s, int32_t, int8_t, int8_t, H4, H1, +) +DO_MLALL_IDX(sme2_smlall_idx_d, int64_t, int16_t, int16_t, H8, H2, +) +DO_MLALL_IDX(sme2_smlsll_idx_s, int32_t, int8_t, int8_t, H4, H1, -) +DO_MLALL_IDX(sme2_smlsll_idx_d, int64_t, int16_t, int16_t, H8, H2, -) + +DO_MLALL_IDX(sme2_umlall_idx_s, uint32_t, uint8_t, uint8_t, H4, H1, +) +DO_MLALL_IDX(sme2_umlall_idx_d, uint64_t, uint16_t, uint16_t, H8, H2, +) +DO_MLALL_IDX(sme2_umlsll_idx_s, uint32_t, uint8_t, uint8_t, H4, H1, -) +DO_MLALL_IDX(sme2_umlsll_idx_d, uint64_t, uint16_t, uint16_t, H8, H2, -) + +DO_MLALL_IDX(sme2_usmlall_idx_s, uint32_t, uint8_t, int8_t, H4, H1, +) +DO_MLALL_IDX(sme2_sumlall_idx_s, uint32_t, int8_t, uint8_t, H4, H1, +) + +#undef DO_MLALL_IDX + +/* Convert and compress */ +void HELPER(sme2_bfcvt)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + ARMVectorReg scratch; + size_t oprsz = simd_oprsz(desc); + size_t i, n = oprsz / 4; + float32 *s0 = vs; + float32 *s1 = vs + sizeof(ARMVectorReg); + bfloat16 *d = vd; + + if (vd == s1) { + s1 = memcpy(&scratch, s1, oprsz); + } + + for (i = 0; i < n; ++i) { + d[H2(i)] = float32_to_bfloat16(s0[H4(i)], fpst); + } + for (i = 0; i < n; ++i) { + d[H2(i) + n] = float32_to_bfloat16(s1[H4(i)], fpst); + } +} + +void HELPER(sme2_fcvt_n)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + ARMVectorReg scratch; + size_t oprsz = simd_oprsz(desc); + size_t i, n = oprsz / 4; + float32 *s0 = vs; + float32 *s1 = vs + sizeof(ARMVectorReg); + float16 *d = vd; + + if (vd == s1) { + s1 = memcpy(&scratch, s1, oprsz); + } + + for (i = 0; i < n; ++i) { + d[H2(i)] = sve_f32_to_f16(s0[H4(i)], fpst); + } + for (i = 0; i < n; ++i) { + d[H2(i) + n] = sve_f32_to_f16(s1[H4(i)], fpst); + } +} + +#define SQCVT2(NAME, TW, TN, HW, HN, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 2)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(i)] = SAT(s0[HW(i)]); \ + d[HN(i + n)] = SAT(s1[HW(i)]); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQCVT2(sme2_sqcvt_sh, int32_t, int16_t, H4, H2, do_ssat_h) +SQCVT2(sme2_uqcvt_sh, uint32_t, uint16_t, H4, H2, do_usat_h) +SQCVT2(sme2_sqcvtu_sh, int32_t, uint16_t, H4, H2, do_usat_h) + +#undef SQCVT2 + +#define SQCVT4(NAME, TW, TN, HW, HN, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TW *s2 = vs + 2 * sizeof(ARMVectorReg); \ + TW *s3 = vs + 3 * sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 4)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(i)] = SAT(s0[HW(i)]); \ + d[HN(i + n)] = SAT(s1[HW(i)]); \ + d[HN(i + 2 * n)] = SAT(s2[HW(i)]); \ + d[HN(i + 3 * n)] = SAT(s3[HW(i)]); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQCVT4(sme2_sqcvt_sb, int32_t, int8_t, H4, H2, do_ssat_b) +SQCVT4(sme2_uqcvt_sb, uint32_t, uint8_t, H4, H2, do_usat_b) +SQCVT4(sme2_sqcvtu_sb, int32_t, uint8_t, H4, H2, do_usat_b) + +SQCVT4(sme2_sqcvt_dh, int64_t, int16_t, H8, H2, do_ssat_h) +SQCVT4(sme2_uqcvt_dh, uint64_t, uint16_t, H8, H2, do_usat_h) +SQCVT4(sme2_sqcvtu_dh, int64_t, uint16_t, H8, H2, do_usat_h) + +#undef SQCVT4 + +#define SQRSHR2(NAME, TW, TN, HW, HN, RSHR, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + int shift = simd_data(desc); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 2)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \ + d[HN(i + n)] = SAT(RSHR(s1[HW(i)], shift)); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQRSHR2(sme2_sqrshr_sh, int32_t, int16_t, H4, H2, do_srshr, do_ssat_h) +SQRSHR2(sme2_uqrshr_sh, uint32_t, uint16_t, H4, H2, do_urshr, do_usat_h) +SQRSHR2(sme2_sqrshru_sh, int32_t, uint16_t, H4, H2, do_srshr, do_usat_h) + +#undef SQRSHR2 + +#define SQRSHR4(NAME, TW, TN, HW, HN, RSHR, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + int shift = simd_data(desc); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TW *s2 = vs + 2 * sizeof(ARMVectorReg); \ + TW *s3 = vs + 3 * sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 4)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \ + d[HN(i + n)] = SAT(RSHR(s1[HW(i)], shift)); \ + d[HN(i + 2 * n)] = SAT(RSHR(s2[HW(i)], shift)); \ + d[HN(i + 3 * n)] = SAT(RSHR(s3[HW(i)], shift)); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQRSHR4(sme2_sqrshr_sb, int32_t, int8_t, H4, H2, do_srshr, do_ssat_b) +SQRSHR4(sme2_uqrshr_sb, uint32_t, uint8_t, H4, H2, do_urshr, do_usat_b) +SQRSHR4(sme2_sqrshru_sb, int32_t, uint8_t, H4, H2, do_srshr, do_usat_b) + +SQRSHR4(sme2_sqrshr_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h) +SQRSHR4(sme2_uqrshr_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h) +SQRSHR4(sme2_sqrshru_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h) + +#undef SQRSHR4 + +/* Convert and interleave */ +void HELPER(sme2_bfcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + size_t i, n = simd_oprsz(desc) / 4; + float32 *s0 = vs; + float32 *s1 = vs + sizeof(ARMVectorReg); + bfloat16 *d = vd; + + for (i = 0; i < n; ++i) { + bfloat16 d0 = float32_to_bfloat16(s0[H4(i)], fpst); + bfloat16 d1 = float32_to_bfloat16(s1[H4(i)], fpst); + d[H2(i * 2 + 0)] = d0; + d[H2(i * 2 + 1)] = d1; + } +} + +void HELPER(sme2_fcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + size_t i, n = simd_oprsz(desc) / 4; + float32 *s0 = vs; + float32 *s1 = vs + sizeof(ARMVectorReg); + bfloat16 *d = vd; + + for (i = 0; i < n; ++i) { + bfloat16 d0 = sve_f32_to_f16(s0[H4(i)], fpst); + bfloat16 d1 = sve_f32_to_f16(s1[H4(i)], fpst); + d[H2(i * 2 + 0)] = d0; + d[H2(i * 2 + 1)] = d1; + } +} + +#define SQCVTN2(NAME, TW, TN, HW, HN, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 2)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(2 * i + 0)] = SAT(s0[HW(i)]); \ + d[HN(2 * i + 1)] = SAT(s1[HW(i)]); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQCVTN2(sme2_sqcvtn_sh, int32_t, int16_t, H4, H2, do_ssat_h) +SQCVTN2(sme2_uqcvtn_sh, uint32_t, uint16_t, H4, H2, do_usat_h) +SQCVTN2(sme2_sqcvtun_sh, int32_t, uint16_t, H4, H2, do_usat_h) + +#undef SQCVTN2 + +#define SQCVTN4(NAME, TW, TN, HW, HN, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TW *s2 = vs + 2 * sizeof(ARMVectorReg); \ + TW *s3 = vs + 3 * sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 4)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(4 * i + 0)] = SAT(s0[HW(i)]); \ + d[HN(4 * i + 1)] = SAT(s1[HW(i)]); \ + d[HN(4 * i + 2)] = SAT(s2[HW(i)]); \ + d[HN(4 * i + 3)] = SAT(s3[HW(i)]); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQCVTN4(sme2_sqcvtn_sb, int32_t, int8_t, H4, H1, do_ssat_b) +SQCVTN4(sme2_uqcvtn_sb, uint32_t, uint8_t, H4, H1, do_usat_b) +SQCVTN4(sme2_sqcvtun_sb, int32_t, uint8_t, H4, H1, do_usat_b) + +SQCVTN4(sme2_sqcvtn_dh, int64_t, int16_t, H8, H2, do_ssat_h) +SQCVTN4(sme2_uqcvtn_dh, uint64_t, uint16_t, H8, H2, do_usat_h) +SQCVTN4(sme2_sqcvtun_dh, int64_t, uint16_t, H8, H2, do_usat_h) + +#undef SQCVTN4 + +#define SQRSHRN2(NAME, TW, TN, HW, HN, RSHR, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + int shift = simd_data(desc); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 2)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(2 * i + 0)] = SAT(RSHR(s0[HW(i)], shift)); \ + d[HN(2 * i + 1)] = SAT(RSHR(s1[HW(i)], shift)); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQRSHRN2(sme2_sqrshrn_sh, int32_t, int16_t, H4, H2, do_srshr, do_ssat_h) +SQRSHRN2(sme2_uqrshrn_sh, uint32_t, uint16_t, H4, H2, do_urshr, do_usat_h) +SQRSHRN2(sme2_sqrshrun_sh, int32_t, uint16_t, H4, H2, do_srshr, do_usat_h) + +#undef SQRSHRN2 + +#define SQRSHRN4(NAME, TW, TN, HW, HN, RSHR, SAT) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch; \ + size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \ + int shift = simd_data(desc); \ + TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \ + TW *s2 = vs + 2 * sizeof(ARMVectorReg); \ + TW *s3 = vs + 3 * sizeof(ARMVectorReg); \ + TN *d = vd; \ + if (vectors_overlap(vd, 1, vs, 4)) { \ + d = (TN *)&scratch; \ + } \ + for (size_t i = 0; i < n; ++i) { \ + d[HN(4 * i + 0)] = SAT(RSHR(s0[HW(i)], shift)); \ + d[HN(4 * i + 1)] = SAT(RSHR(s1[HW(i)], shift)); \ + d[HN(4 * i + 2)] = SAT(RSHR(s2[HW(i)], shift)); \ + d[HN(4 * i + 3)] = SAT(RSHR(s3[HW(i)], shift)); \ + } \ + if (d != vd) { \ + memcpy(vd, d, oprsz); \ + } \ +} + +SQRSHRN4(sme2_sqrshrn_sb, int32_t, int8_t, H4, H1, do_srshr, do_ssat_b) +SQRSHRN4(sme2_uqrshrn_sb, uint32_t, uint8_t, H4, H1, do_urshr, do_usat_b) +SQRSHRN4(sme2_sqrshrun_sb, int32_t, uint8_t, H4, H1, do_srshr, do_usat_b) + +SQRSHRN4(sme2_sqrshrn_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h) +SQRSHRN4(sme2_uqrshrn_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h) +SQRSHRN4(sme2_sqrshrun_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h) + +#undef SQRSHRN4 + +/* Expand and convert */ +void HELPER(sme2_fcvt_w)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + ARMVectorReg scratch; + size_t oprsz = simd_oprsz(desc); + size_t i, n = oprsz / 4; + float16 *s = vs; + float32 *d0 = vd; + float32 *d1 = vd + sizeof(ARMVectorReg); + + if (vectors_overlap(vd, 1, vs, 2)) { + s = memcpy(&scratch, s, oprsz); + } + + for (i = 0; i < n; ++i) { + d0[H4(i)] = sve_f16_to_f32(s[H2(i)], fpst); + } + for (i = 0; i < n; ++i) { + d1[H4(i)] = sve_f16_to_f32(s[H2(n + i)], fpst); + } +} + +#define UNPK(NAME, SREG, TW, TN, HW, HN) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch[SREG]; \ + size_t oprsz = simd_oprsz(desc); \ + size_t n = oprsz / sizeof(TW); \ + if (vectors_overlap(vd, 2 * SREG, vs, SREG)) { \ + vs = memcpy(scratch, vs, sizeof(scratch)); \ + } \ + for (size_t r = 0; r < SREG; ++r) { \ + TN *s = vs + r * sizeof(ARMVectorReg); \ + for (size_t i = 0; i < 2; ++i) { \ + TW *d = vd + (2 * r + i) * sizeof(ARMVectorReg); \ + for (size_t e = 0; e < n; ++e) { \ + d[HW(e)] = s[HN(i * n + e)]; \ + } \ + } \ + } \ +} + +UNPK(sme2_sunpk2_bh, 1, int16_t, int8_t, H2, H1) +UNPK(sme2_sunpk2_hs, 1, int32_t, int16_t, H4, H2) +UNPK(sme2_sunpk2_sd, 1, int64_t, int32_t, H8, H4) + +UNPK(sme2_sunpk4_bh, 2, int16_t, int8_t, H2, H1) +UNPK(sme2_sunpk4_hs, 2, int32_t, int16_t, H4, H2) +UNPK(sme2_sunpk4_sd, 2, int64_t, int32_t, H8, H4) + +UNPK(sme2_uunpk2_bh, 1, uint16_t, uint8_t, H2, H1) +UNPK(sme2_uunpk2_hs, 1, uint32_t, uint16_t, H4, H2) +UNPK(sme2_uunpk2_sd, 1, uint64_t, uint32_t, H8, H4) + +UNPK(sme2_uunpk4_bh, 2, uint16_t, uint8_t, H2, H1) +UNPK(sme2_uunpk4_hs, 2, uint32_t, uint16_t, H4, H2) +UNPK(sme2_uunpk4_sd, 2, uint64_t, uint32_t, H8, H4) + +#undef UNPK + +/* Deinterleave and convert. */ +void HELPER(sme2_fcvtl)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + size_t i, n = simd_oprsz(desc) / 4; + float16 *s = vs; + float32 *d0 = vd; + float32 *d1 = vd + sizeof(ARMVectorReg); + + for (i = 0; i < n; ++i) { + float32 v0 = sve_f16_to_f32(s[H2(i * 2 + 0)], fpst); + float32 v1 = sve_f16_to_f32(s[H2(i * 2 + 1)], fpst); + d0[H4(i)] = v0; + d1[H4(i)] = v1; + } +} + +void HELPER(sme2_scvtf)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + size_t i, n = simd_oprsz(desc) / 4; + int32_t *d = vd; + float32 *s = vs; + + for (i = 0; i < n; ++i) { + d[i] = int32_to_float32(s[i], fpst); + } +} + +void HELPER(sme2_ucvtf)(void *vd, void *vs, float_status *fpst, uint32_t desc) +{ + size_t i, n = simd_oprsz(desc) / 4; + uint32_t *d = vd; + float32 *s = vs; + + for (i = 0; i < n; ++i) { + d[i] = uint32_to_float32(s[i], fpst); + } +} + +#define ZIP2(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + ARMVectorReg scratch[2]; \ + size_t oprsz = simd_oprsz(desc); \ + size_t pairs = oprsz / (sizeof(TYPE) * 2); \ + TYPE *n = vn, *m = vm; \ + if (vectors_overlap(vd, 2, vn, 1)) { \ + n = memcpy(&scratch[0], vn, oprsz); \ + } \ + if (vectors_overlap(vd, 2, vm, 1)) { \ + m = memcpy(&scratch[1], vm, oprsz); \ + } \ + for (size_t r = 0; r < 2; ++r) { \ + TYPE *d = vd + r * sizeof(ARMVectorReg); \ + size_t base = r * pairs; \ + for (size_t p = 0; p < pairs; ++p) { \ + d[H(2 * p + 0)] = n[base + H(p)]; \ + d[H(2 * p + 1)] = m[base + H(p)]; \ + } \ + } \ +} + +ZIP2(sme2_zip2_b, uint8_t, H1) +ZIP2(sme2_zip2_h, uint16_t, H2) +ZIP2(sme2_zip2_s, uint32_t, H4) +ZIP2(sme2_zip2_d, uint64_t, ) +ZIP2(sme2_zip2_q, Int128, ) + +#undef ZIP2 + +#define ZIP4(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch[4]; \ + size_t oprsz = simd_oprsz(desc); \ + size_t quads = oprsz / (sizeof(TYPE) * 4); \ + TYPE *s0, *s1, *s2, *s3; \ + if (vs == vd) { \ + vs = memcpy(scratch, vs, sizeof(scratch)); \ + } \ + s0 = vs; \ + s1 = vs + sizeof(ARMVectorReg); \ + s2 = vs + 2 * sizeof(ARMVectorReg); \ + s3 = vs + 3 * sizeof(ARMVectorReg); \ + for (size_t r = 0; r < 4; ++r) { \ + TYPE *d = vd + r * sizeof(ARMVectorReg); \ + size_t base = r * quads; \ + for (size_t q = 0; q < quads; ++q) { \ + d[H(4 * q + 0)] = s0[base + H(q)]; \ + d[H(4 * q + 1)] = s1[base + H(q)]; \ + d[H(4 * q + 2)] = s2[base + H(q)]; \ + d[H(4 * q + 3)] = s3[base + H(q)]; \ + } \ + } \ +} + +ZIP4(sme2_zip4_b, uint8_t, H1) +ZIP4(sme2_zip4_h, uint16_t, H2) +ZIP4(sme2_zip4_s, uint32_t, H4) +ZIP4(sme2_zip4_d, uint64_t, ) +ZIP4(sme2_zip4_q, Int128, ) + +#undef ZIP4 + +#define UZP2(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + ARMVectorReg scratch[2]; \ + size_t oprsz = simd_oprsz(desc); \ + size_t pairs = oprsz / (sizeof(TYPE) * 2); \ + TYPE *d0 = vd, *d1 = vd + sizeof(ARMVectorReg); \ + if (vectors_overlap(vd, 2, vn, 1)) { \ + vn = memcpy(&scratch[0], vn, oprsz); \ + } \ + if (vectors_overlap(vd, 2, vm, 1)) { \ + vm = memcpy(&scratch[1], vm, oprsz); \ + } \ + for (size_t r = 0; r < 2; ++r) { \ + TYPE *s = r ? vm : vn; \ + size_t base = r * pairs; \ + for (size_t p = 0; p < pairs; ++p) { \ + d0[base + H(p)] = s[H(2 * p + 0)]; \ + d1[base + H(p)] = s[H(2 * p + 1)]; \ + } \ + } \ +} + +UZP2(sme2_uzp2_b, uint8_t, H1) +UZP2(sme2_uzp2_h, uint16_t, H2) +UZP2(sme2_uzp2_s, uint32_t, H4) +UZP2(sme2_uzp2_d, uint64_t, ) +UZP2(sme2_uzp2_q, Int128, ) + +#undef UZP2 + +#define UZP4(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + ARMVectorReg scratch[4]; \ + size_t oprsz = simd_oprsz(desc); \ + size_t quads = oprsz / (sizeof(TYPE) * 4); \ + TYPE *d0, *d1, *d2, *d3; \ + if (vs == vd) { \ + vs = memcpy(scratch, vs, sizeof(scratch)); \ + } \ + d0 = vd; \ + d1 = vd + sizeof(ARMVectorReg); \ + d2 = vd + 2 * sizeof(ARMVectorReg); \ + d3 = vd + 3 * sizeof(ARMVectorReg); \ + for (size_t r = 0; r < 4; ++r) { \ + TYPE *s = vs + r * sizeof(ARMVectorReg); \ + size_t base = r * quads; \ + for (size_t q = 0; q < quads; ++q) { \ + d0[base + H(q)] = s[H(4 * q + 0)]; \ + d1[base + H(q)] = s[H(4 * q + 1)]; \ + d2[base + H(q)] = s[H(4 * q + 2)]; \ + d3[base + H(q)] = s[H(4 * q + 3)]; \ + } \ + } \ +} + +UZP4(sme2_uzp4_b, uint8_t, H1) +UZP4(sme2_uzp4_h, uint16_t, H2) +UZP4(sme2_uzp4_s, uint32_t, H4) +UZP4(sme2_uzp4_d, uint64_t, ) +UZP4(sme2_uzp4_q, Int128, ) + +#undef UZP4 + +#define ICLAMP(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ +{ \ + size_t stride = sizeof(ARMVectorReg) / sizeof(TYPE); \ + size_t elements = simd_oprsz(desc) / sizeof(TYPE); \ + size_t nreg = simd_data(desc); \ + TYPE *d = vd, *n = vn, *m = vm; \ + for (size_t e = 0; e < elements; e++) { \ + TYPE nn = n[H(e)], mm = m[H(e)]; \ + for (size_t r = 0; r < nreg; r++) { \ + TYPE *dd = &d[r * stride + H(e)]; \ + *dd = MIN(MAX(*dd, nn), mm); \ + } \ + } \ +} + +ICLAMP(sme2_sclamp_b, int8_t, H1) +ICLAMP(sme2_sclamp_h, int16_t, H2) +ICLAMP(sme2_sclamp_s, int32_t, H4) +ICLAMP(sme2_sclamp_d, int64_t, H8) + +ICLAMP(sme2_uclamp_b, uint8_t, H1) +ICLAMP(sme2_uclamp_h, uint16_t, H2) +ICLAMP(sme2_uclamp_s, uint32_t, H4) +ICLAMP(sme2_uclamp_d, uint64_t, H8) + +#undef ICLAMP + +/* + * Note the argument ordering to minnum and maxnum must match + * the ARM pseudocode so that NaNs are propagated properly. + */ +#define FCLAMP(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, \ + float_status *fpst, uint32_t desc) \ +{ \ + size_t stride = sizeof(ARMVectorReg) / sizeof(TYPE); \ + size_t elements = simd_oprsz(desc) / sizeof(TYPE); \ + size_t nreg = simd_data(desc); \ + TYPE *d = vd, *n = vn, *m = vm; \ + for (size_t e = 0; e < elements; e++) { \ + TYPE nn = n[H(e)], mm = m[H(e)]; \ + for (size_t r = 0; r < nreg; r++) { \ + TYPE *dd = &d[r * stride + H(e)]; \ + *dd = TYPE##_minnum(TYPE##_maxnum(nn, *dd, fpst), mm, fpst); \ + } \ + } \ +} + +FCLAMP(sme2_fclamp_h, float16, H2) +FCLAMP(sme2_fclamp_s, float32, H4) +FCLAMP(sme2_fclamp_d, float64, H8) +FCLAMP(sme2_bfclamp, bfloat16, H2) + +#undef FCLAMP + +void HELPER(sme2_sel_b)(void *vd, void *vn, void *vm, + uint32_t png, uint32_t desc) +{ + int vl = simd_oprsz(desc); + int nreg = simd_data(desc); + int elements = vl / sizeof(uint8_t); + DecodeCounter p = decode_counter(png, vl, MO_8); + + if (p.lg2_stride == 0) { + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint8_t *d = vd + r * sizeof(ARMVectorReg); + uint8_t *n = vn + r * sizeof(ARMVectorReg); + uint8_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, n, vl); /* all true */ + } else if (elements <= split) { + memcpy(d, m, vl); /* all false */ + } else { + for (int e = 0; e < split; e++) { + d[H1(e)] = m[H1(e)]; + } + for (int e = split; e < elements; e++) { + d[H1(e)] = n[H1(e)]; + } + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint8_t *d = vd + r * sizeof(ARMVectorReg); + uint8_t *n = vn + r * sizeof(ARMVectorReg); + uint8_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, m, vl); /* all false */ + } else if (elements <= split) { + memcpy(d, n, vl); /* all true */ + } else { + for (int e = 0; e < split; e++) { + d[H1(e)] = n[H1(e)]; + } + for (int e = split; e < elements; e++) { + d[H1(e)] = m[H1(e)]; + } + } + } + } + } else { + int estride = 1 << p.lg2_stride; + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint8_t *d = vd + r * sizeof(ARMVectorReg); + uint8_t *n = vn + r * sizeof(ARMVectorReg); + uint8_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e++) { + d[H1(e)] = m[H1(e)]; + } + for (; e < elements; e += estride) { + d[H1(e)] = n[H1(e)]; + for (int i = 1; i < estride; i++) { + d[H1(e + i)] = m[H1(e + i)]; + } + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint8_t *d = vd + r * sizeof(ARMVectorReg); + uint8_t *n = vn + r * sizeof(ARMVectorReg); + uint8_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e += estride) { + d[H1(e)] = n[H1(e)]; + for (int i = 1; i < estride; i++) { + d[H1(e + i)] = m[H1(e + i)]; + } + } + for (; e < elements; e++) { + d[H1(e)] = m[H1(e)]; + } + } + } + } +} + +void HELPER(sme2_sel_h)(void *vd, void *vn, void *vm, + uint32_t png, uint32_t desc) +{ + int vl = simd_oprsz(desc); + int nreg = simd_data(desc); + int elements = vl / sizeof(uint16_t); + DecodeCounter p = decode_counter(png, vl, MO_16); + + if (p.lg2_stride == 0) { + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint16_t *d = vd + r * sizeof(ARMVectorReg); + uint16_t *n = vn + r * sizeof(ARMVectorReg); + uint16_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, n, vl); /* all true */ + } else if (elements <= split) { + memcpy(d, m, vl); /* all false */ + } else { + for (int e = 0; e < split; e++) { + d[H2(e)] = m[H2(e)]; + } + for (int e = split; e < elements; e++) { + d[H2(e)] = n[H2(e)]; + } + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint16_t *d = vd + r * sizeof(ARMVectorReg); + uint16_t *n = vn + r * sizeof(ARMVectorReg); + uint16_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, m, vl); /* all false */ + } else if (elements <= split) { + memcpy(d, n, vl); /* all true */ + } else { + for (int e = 0; e < split; e++) { + d[H2(e)] = n[H2(e)]; + } + for (int e = split; e < elements; e++) { + d[H2(e)] = m[H2(e)]; + } + } + } + } + } else { + int estride = 1 << p.lg2_stride; + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint16_t *d = vd + r * sizeof(ARMVectorReg); + uint16_t *n = vn + r * sizeof(ARMVectorReg); + uint16_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e++) { + d[H2(e)] = m[H2(e)]; + } + for (; e < elements; e += estride) { + d[H2(e)] = n[H2(e)]; + for (int i = 1; i < estride; i++) { + d[H2(e + i)] = m[H2(e + i)]; + } + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint16_t *d = vd + r * sizeof(ARMVectorReg); + uint16_t *n = vn + r * sizeof(ARMVectorReg); + uint16_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e += estride) { + d[H2(e)] = n[H2(e)]; + for (int i = 1; i < estride; i++) { + d[H2(e + i)] = m[H2(e + i)]; + } + } + for (; e < elements; e++) { + d[H2(e)] = m[H2(e)]; + } + } + } + } +} + +void HELPER(sme2_sel_s)(void *vd, void *vn, void *vm, + uint32_t png, uint32_t desc) +{ + int vl = simd_oprsz(desc); + int nreg = simd_data(desc); + int elements = vl / sizeof(uint32_t); + DecodeCounter p = decode_counter(png, vl, MO_32); + + if (p.lg2_stride == 0) { + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint32_t *d = vd + r * sizeof(ARMVectorReg); + uint32_t *n = vn + r * sizeof(ARMVectorReg); + uint32_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, n, vl); /* all true */ + } else if (elements <= split) { + memcpy(d, m, vl); /* all false */ + } else { + for (int e = 0; e < split; e++) { + d[H4(e)] = m[H4(e)]; + } + for (int e = split; e < elements; e++) { + d[H4(e)] = n[H4(e)]; + } + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint32_t *d = vd + r * sizeof(ARMVectorReg); + uint32_t *n = vn + r * sizeof(ARMVectorReg); + uint32_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, m, vl); /* all false */ + } else if (elements <= split) { + memcpy(d, n, vl); /* all true */ + } else { + for (int e = 0; e < split; e++) { + d[H4(e)] = n[H4(e)]; + } + for (int e = split; e < elements; e++) { + d[H4(e)] = m[H4(e)]; + } + } + } + } + } else { + /* p.esz must be MO_64, so stride must be 2. */ + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint32_t *d = vd + r * sizeof(ARMVectorReg); + uint32_t *n = vn + r * sizeof(ARMVectorReg); + uint32_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e++) { + d[H4(e)] = m[H4(e)]; + } + for (; e < elements; e += 2) { + d[H4(e)] = n[H4(e)]; + d[H4(e + 1)] = m[H4(e + 1)]; + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint32_t *d = vd + r * sizeof(ARMVectorReg); + uint32_t *n = vn + r * sizeof(ARMVectorReg); + uint32_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + int e = 0; + + for (; e < MIN(split, elements); e += 2) { + d[H4(e)] = n[H4(e)]; + d[H4(e + 1)] = m[H4(e + 1)]; + } + for (; e < elements; e++) { + d[H4(e)] = m[H4(e)]; + } + } + } + } +} + +void HELPER(sme2_sel_d)(void *vd, void *vn, void *vm, + uint32_t png, uint32_t desc) +{ + int vl = simd_oprsz(desc); + int nreg = simd_data(desc); + int elements = vl / sizeof(uint64_t); + DecodeCounter p = decode_counter(png, vl, MO_64); + + if (p.invert) { + for (int r = 0; r < nreg; r++) { + uint64_t *d = vd + r * sizeof(ARMVectorReg); + uint64_t *n = vn + r * sizeof(ARMVectorReg); + uint64_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, n, vl); /* all true */ + } else if (elements <= split) { + memcpy(d, m, vl); /* all false */ + } else { + memcpy(d, m, split * sizeof(uint64_t)); + memcpy(d + split, n + split, + (elements - split) * sizeof(uint64_t)); + } + } + } else { + for (int r = 0; r < nreg; r++) { + uint64_t *d = vd + r * sizeof(ARMVectorReg); + uint64_t *n = vn + r * sizeof(ARMVectorReg); + uint64_t *m = vm + r * sizeof(ARMVectorReg); + int split = p.count - r * elements; + + if (split <= 0) { + memcpy(d, m, vl); /* all false */ + } else if (elements <= split) { + memcpy(d, n, vl); /* all true */ + } else { + memcpy(d, n, split * sizeof(uint64_t)); + memcpy(d + split, m + split, + (elements - split) * sizeof(uint64_t)); + } + } + } +} diff --git a/target/arm/tcg/sve.decode b/target/arm/tcg/sve.decode index 04b6fcc0cfd..ab63cfaa0f0 100644 --- a/target/arm/tcg/sve.decode +++ b/target/arm/tcg/sve.decode @@ -30,6 +30,7 @@ %size_23 23:2 %dtype_23_13 23:2 13:2 %index3_22_19 22:1 19:2 +%index3_22_17 22:1 17:2 %index3_19_11 19:2 11:1 %index2_20_11 20:1 11:1 @@ -57,6 +58,11 @@ # as propagated via the MOVPRFX instruction. %reg_movprfx 0:5 +%rn_ax2 6:4 !function=times_2 + +%pnd 0:3 !function=plus_8 +%pnn 5:3 !function=plus_8 + ########################################################################### # Named attribute sets. These are used to make nice(er) names # when creating helpers common to those for the individual @@ -102,6 +108,7 @@ # Two operand @pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz @rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz +@rd_rnx2 ........ ... ..... ...... ..... rd:5 &rr_esz rn=%rn_ax2 # Two operand with governing predicate, flags setting @pd_pg_pn_s ........ . s:1 ...... .. pg:4 . rn:4 . rd:4 &rpr_s @@ -131,11 +138,11 @@ @rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \ &rrrr_esz ra=%reg_movprfx -# Four operand with unused vector element size -@rda_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 \ - &rrrr_esz esz=0 ra=%reg_movprfx -@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \ - &rrrr_esz esz=0 rn=%reg_movprfx +# Four operand with explicit vector element size +@rda_rn_rm_ex ........ ... rm:5 ... ... rn:5 rd:5 \ + &rrrr_esz ra=%reg_movprfx +@rdn_ra_rm_ex ........ ... rm:5 ... ... ra:5 rd:5 \ + &rrrr_esz rn=%reg_movprfx # Three operand with "memory" size, aka immediate left shift @rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri @@ -222,6 +229,9 @@ @rprr_load_dt ....... dtype:4 rm:5 ... pg:3 rn:5 rd:5 &rprr_load @rpri_load_dt ....... dtype:4 . imm:s4 ... pg:3 rn:5 rd:5 &rpri_load +@rprr_load ....... .... rm:5 ... pg:3 rn:5 rd:5 &rprr_load +@rpri_load ....... .... . imm:s4 ... pg:3 rn:5 rd:5 &rpri_load + @rprr_load_msz ....... .... rm:5 ... pg:3 rn:5 rd:5 \ &rprr_load dtype=%msz_dtype @rpri_load_msz ....... .... . imm:s4 ... pg:3 rn:5 rd:5 \ @@ -245,7 +255,7 @@ # Stores; user must fill in ESZ, MSZ, NREG as needed. @rprr_store ....... .. .. rm:5 ... pg:3 rn:5 rd:5 &rprr_store -@rpri_store_msz ....... msz:2 .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store +@rpri_store ....... .. .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store @rprr_store_esz_n0 ....... .. esz:2 rm:5 ... pg:3 rn:5 rd:5 \ &rprr_store nreg=0 @rprr_scatter_store ....... msz:2 .. rm:5 ... pg:3 rn:5 rd:5 \ @@ -320,6 +330,11 @@ ORV 00000100 .. 011 000 001 ... ..... ..... @rd_pg_rn EORV 00000100 .. 011 001 001 ... ..... ..... @rd_pg_rn ANDV 00000100 .. 011 010 001 ... ..... ..... @rd_pg_rn +# SVE2.1 bitwise logical reduction (quadwords) +ORQV 00000100 .. 011 100 001 ... ..... ..... @rd_pg_rn +EORQV 00000100 .. 011 101 001 ... ..... ..... @rd_pg_rn +ANDQV 00000100 .. 011 110 001 ... ..... ..... @rd_pg_rn + # SVE constructive prefix (predicated) MOVPRFX_z 00000100 .. 010 000 001 ... ..... ..... @rd_pg_rn MOVPRFX_m 00000100 .. 010 001 001 ... ..... ..... @rd_pg_rn @@ -335,6 +350,13 @@ UMAXV 00000100 .. 001 001 001 ... ..... ..... @rd_pg_rn SMINV 00000100 .. 001 010 001 ... ..... ..... @rd_pg_rn UMINV 00000100 .. 001 011 001 ... ..... ..... @rd_pg_rn +# SVE2.1 segment reduction +ADDQV 00000100 .. 000 101 001 ... ..... ..... @rd_pg_rn +SMAXQV 00000100 .. 001 100 001 ... ..... ..... @rd_pg_rn +SMINQV 00000100 .. 001 110 001 ... ..... ..... @rd_pg_rn +UMAXQV 00000100 .. 001 101 001 ... ..... ..... @rd_pg_rn +UMINQV 00000100 .. 001 111 001 ... ..... ..... @rd_pg_rn + ### SVE Shift by Immediate - Predicated Group # SVE bitwise shift by immediate (predicated) @@ -428,12 +450,12 @@ XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \ rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr # SVE2 bitwise ternary operations -EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0 -BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 -BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0 -BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 -BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 -NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0 +EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_ex esz=0 +BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0 +BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_ex esz=0 +BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0 +BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0 +NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0 ### SVE Index Generation Group @@ -559,6 +581,14 @@ DUP_s 00000101 .. 1 00000 001110 ..... ..... @rd_rn DUP_x 00000101 .. 1 ..... 001000 rn:5 rd:5 \ &rri imm=%imm7_22_16 +# SVE Permute Vector - one source quadwords +DUPQ 00000101 001 imm:4 1 001001 rn:5 rd:5 &rri_esz esz=0 +DUPQ 00000101 001 imm:3 10 001001 rn:5 rd:5 &rri_esz esz=1 +DUPQ 00000101 001 imm:2 100 001001 rn:5 rd:5 &rri_esz esz=2 +DUPQ 00000101 001 imm:1 1000 001001 rn:5 rd:5 &rri_esz esz=3 + +EXTQ 00000101 0110 imm:4 001001 rn:5 rd:5 &rri + # SVE insert SIMD&FP scalar register INSR_f 00000101 .. 1 10100 001110 ..... ..... @rdn_rm @@ -568,6 +598,22 @@ INSR_r 00000101 .. 1 00100 001110 ..... ..... @rdn_rm # SVE reverse vector elements REV_v 00000101 .. 1 11000 001110 ..... ..... @rd_rn +# SVE move predicate to/from vector + +PMOV_pv 00000101 00 101 01 0001110 rn:5 0 rd:4 \ + &rri_esz esz=0 imm=0 +PMOV_pv 00000101 00 101 1 imm:1 0001110 rn:5 0 rd:4 &rri_esz esz=1 +PMOV_pv 00000101 01 101 imm:2 0001110 rn:5 0 rd:4 &rri_esz esz=2 +PMOV_pv 00000101 1. 101 .. 0001110 rn:5 0 rd:4 \ + &rri_esz esz=3 imm=%index3_22_17 + +PMOV_vp 00000101 00 101 01 1001110 0 rn:4 rd:5 \ + &rri_esz esz=0 imm=0 +PMOV_vp 00000101 00 101 1 imm:1 1001110 0 rn:4 rd:5 &rri_esz esz=1 +PMOV_vp 00000101 01 101 imm:2 1001110 0 rn:4 rd:5 &rri_esz esz=2 +PMOV_vp 00000101 1. 101 .. 1001110 0 rn:4 rd:5 \ + &rri_esz esz=3 imm=%index3_22_17 + # SVE vector table lookup TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm @@ -614,6 +660,15 @@ UZP2_q 00000101 10 1 ..... 000 011 ..... ..... @rd_rn_rm_e0 TRN1_q 00000101 10 1 ..... 000 110 ..... ..... @rd_rn_rm_e0 TRN2_q 00000101 10 1 ..... 000 111 ..... ..... @rd_rn_rm_e0 +# SVE2.1 permute vector elements (quadwords) +ZIPQ1 01000100 .. 0 ..... 111 000 ..... ..... @rd_rn_rm +ZIPQ2 01000100 .. 0 ..... 111 001 ..... ..... @rd_rn_rm +UZPQ1 01000100 .. 0 ..... 111 010 ..... ..... @rd_rn_rm +UZPQ2 01000100 .. 0 ..... 111 011 ..... ..... @rd_rn_rm + +TBLQ 01000100 .. 0 ..... 111 110 ..... ..... @rd_rn_rm +TBXQ 00000101 .. 1 ..... 001 101 ..... ..... @rd_rn_rm + ### SVE Permute - Predicated Group # SVE compress active elements @@ -725,6 +780,7 @@ PTEST 00100101 01 010000 11 pg:4 0 rn:4 0 0000 # SVE predicate initialize PTRUE 00100101 esz:2 01100 s:1 111000 pat:5 0 rd:4 +PTRUE_cnt 00100101 esz:2 1000000111100000010 ... rd=%pnd # SVE initialize FFR SETFFR 00100101 0010 1100 1001 0000 0000 0000 @@ -765,7 +821,8 @@ BRKN 00100101 0. 01100001 .... 0 .... 0 .... @pd_pg_pn_s ### SVE Predicate Count Group # SVE predicate count -CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn +CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn +CNTP_c 00100101 esz:2 100 000 10 000 vl:1 1 rn:4 rd:5 # SVE inc/dec register by predicate count INCDECP_r 00100101 .. 10110 d:1 10001 00 .... ..... @incdec_pred u=1 @@ -786,11 +843,35 @@ SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000 # SVE integer compare scalar count and limit -WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4 +&while esz rd rn rm sf u eq +WHILE_lt 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4 &while +WHILE_gt 00100101 esz:2 1 rm:5 000 sf:1 u:1 0 rn:5 eq:1 rd:4 &while # SVE2 pointer conflict compare WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4 +# SVE2.1 predicate pair +%pd_pair 1:3 !function=times_2 +@while_pair ........ esz:2 . rm:5 .... u:1 . rn:5 . ... eq:1 \ + &while rd=%pd_pair sf=1 + +WHILE_lt_pair 00100101 .. 1 ..... 0101 . 1 ..... 1 ... . @while_pair +WHILE_gt_pair 00100101 .. 1 ..... 0101 . 0 ..... 1 ... . @while_pair + +# SVE2.1 predicate as count +@while_cnt ........ esz:2 . rm:5 .... u:1 . rn:5 . eq:1 ... \ + &while rd=%pnd sf=1 + +WHILE_lt_cnt2 00100101 .. 1 ..... 0100 . 1 ..... 1 . ... @while_cnt +WHILE_lt_cnt4 00100101 .. 1 ..... 0110 . 1 ..... 1 . ... @while_cnt +WHILE_gt_cnt2 00100101 .. 1 ..... 0100 . 0 ..... 1 . ... @while_cnt +WHILE_gt_cnt4 00100101 .. 1 ..... 0110 . 0 ..... 1 . ... @while_cnt + +# SVE2.1 extract mask predicate from predicate-as-counter +&pext rd rn esz imm +PEXT_1 00100101 esz:2 1 00000 0111 00 imm:2 ... 1 rd:4 &pext rn=%pnn +PEXT_2 00100101 esz:2 1 00000 0111 010 imm:1 ... 1 rd:4 &pext rn=%pnn + ### SVE Integer Wide Immediate - Unpredicated Group # SVE broadcast floating-point immediate (unpredicated) @@ -851,10 +932,13 @@ CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx #### SVE Multiply - Indexed # SVE integer dot product (indexed) -SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 -SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 -UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 -UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 +SDOT_zzxw_4s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 +SDOT_zzxw_4d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 +UDOT_zzxw_4s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 +UDOT_zzxw_4d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 + +SDOT_zzxw_2s 01000100 10 0 ..... 110010 ..... ..... @rrxr_2 esz=2 +UDOT_zzxw_2s 01000100 10 0 ..... 110011 ..... ..... @rrxr_2 esz=2 # SVE2 integer multiply-add (indexed) MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1 @@ -873,8 +957,8 @@ SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2 SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3 # SVE mixed sign dot product (indexed) -USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2 -SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2 +USDOT_zzxw_4s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2 +SUDOT_zzxw_4s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2 # SVE2 saturating multiply-add (indexed) SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2 @@ -968,9 +1052,11 @@ FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \ ### SVE FP Multiply-Add Indexed Group # SVE floating-point multiply-add (indexed) +FMLA_zzxz 01100100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=0 FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1 FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 +FMLS_zzxz 01100100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=0 FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1 FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 @@ -978,6 +1064,7 @@ FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 ### SVE FP Multiply Indexed Group # SVE floating-point multiply (indexed) +FMUL_zzx 01100100 0. 1 ..... 001010 ..... ..... @rrx_3 esz=0 FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1 FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2 FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3 @@ -990,6 +1077,14 @@ FMINNMV 01100101 .. 000 101 001 ... ..... ..... @rd_pg_rn FMAXV 01100101 .. 000 110 001 ... ..... ..... @rd_pg_rn FMINV 01100101 .. 000 111 001 ... ..... ..... @rd_pg_rn +### SVE FP recursive reduction (quadwords) + +FADDQV 01100100 .. 010 000 101 ... ..... ..... @rd_pg_rn +FMAXNMQV 01100100 .. 010 100 101 ... ..... ..... @rd_pg_rn +FMINNMQV 01100100 .. 010 101 101 ... ..... ..... @rd_pg_rn +FMAXQV 01100100 .. 010 110 101 ... ..... ..... @rd_pg_rn +FMINQV 01100100 .. 010 111 101 ... ..... ..... @rd_pg_rn + ## SVE Floating Point Unary Operations - Unpredicated Group FRECPE 01100101 .. 001 110 001100 ..... ..... @rd_rn @@ -1151,12 +1246,24 @@ LD1_zpiz 1000010 .. 01 ..... 1.. ... ..... ..... \ # SVE contiguous load (scalar plus scalar) LD_zprr 1010010 .... ..... 010 ... ..... ..... @rprr_load_dt nreg=0 +# LD1W (128-bit element) +LD_zprr 1010010 1000 rm:5 100 pg:3 rn:5 rd:5 \ + &rprr_load dtype=16 nreg=0 +# LD1D (128-bit element) +LD_zprr 1010010 1100 rm:5 100 pg:3 rn:5 rd:5 \ + &rprr_load dtype=17 nreg=0 # SVE contiguous first-fault load (scalar plus scalar) LDFF1_zprr 1010010 .... ..... 011 ... ..... ..... @rprr_load_dt nreg=0 # SVE contiguous load (scalar plus immediate) LD_zpri 1010010 .... 0.... 101 ... ..... ..... @rpri_load_dt nreg=0 +# LD1W (128-bit element) +LD_zpri 1010010 1000 1 imm:s4 001 pg:3 rn:5 rd:5 \ + &rpri_load dtype=16 nreg=0 +# LD1D (128-bit element) +LD_zpri 1010010 1100 1 imm:s4 001 pg:3 rn:5 rd:5 \ + &rpri_load dtype=17 nreg=0 # SVE contiguous non-fault load (scalar plus immediate) LDNF1_zpri 1010010 .... 1.... 101 ... ..... ..... @rpri_load_dt nreg=0 @@ -1166,12 +1273,26 @@ LDNF1_zpri 1010010 .... 1.... 101 ... ..... ..... @rpri_load_dt nreg=0 # SVE load multiple structures (scalar plus scalar) # LD2B, LD2H, LD2W, LD2D; etc. LD_zprr 1010010 .. nreg:2 ..... 110 ... ..... ..... @rprr_load_msz +# LD[234]Q +LD_zprr 1010010 01 01 ..... 100 ... ..... ..... \ + @rprr_load dtype=18 nreg=1 +LD_zprr 1010010 10 01 ..... 100 ... ..... ..... \ + @rprr_load dtype=18 nreg=2 +LD_zprr 1010010 11 01 ..... 100 ... ..... ..... \ + @rprr_load dtype=18 nreg=3 # SVE contiguous non-temporal load (scalar plus immediate) # LDNT1B, LDNT1H, LDNT1W, LDNT1D # SVE load multiple structures (scalar plus immediate) # LD2B, LD2H, LD2W, LD2D; etc. LD_zpri 1010010 .. nreg:2 0.... 111 ... ..... ..... @rpri_load_msz +# LD[234]Q +LD_zpri 1010010 01 001 .... 111 ... ..... ..... \ + @rpri_load dtype=18 nreg=1 +LD_zpri 1010010 10 001 .... 111 ... ..... ..... \ + @rpri_load dtype=18 nreg=2 +LD_zpri 1010010 11 001 .... 111 ... ..... ..... \ + @rpri_load dtype=18 nreg=3 # SVE load and broadcast quadword (scalar plus scalar) LD1RQ_zprr 1010010 .. 00 ..... 000 ... ..... ..... \ @@ -1222,6 +1343,10 @@ LD1_zprz 1100010 10 1. ..... 1.. ... ..... ..... \ LD1_zprz 1100010 11 1. ..... 11. ... ..... ..... \ @rprr_g_load_sc esz=3 msz=3 u=1 +# LD1Q. Note that this is subtly different from LD1_zprz because +# it is vector + scalar, not scalar + vector. +LD1Q 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 + # SVE 64-bit gather load (vector plus immediate) LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ @rpri_g_load esz=3 @@ -1245,8 +1370,20 @@ STR_zri 1110010 11 0. ..... 010 ... ..... ..... @rd_rn_i9 # SVE contiguous store (scalar plus immediate) # ST1B, ST1H, ST1W, ST1D; require msz <= esz -ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \ - @rpri_store_msz nreg=0 +ST_zpri 1110010 00 esz:2 0.... 111 ... ..... ..... \ + @rpri_store msz=0 nreg=0 +ST_zpri 1110010 01 esz:2 0.... 111 ... ..... ..... \ + @rpri_store msz=1 nreg=0 +ST_zpri 1110010 10 10 0.... 111 ... ..... ..... \ + @rpri_store msz=2 esz=2 nreg=0 +ST_zpri 1110010 10 11 0.... 111 ... ..... ..... \ + @rpri_store msz=2 esz=3 nreg=0 +ST_zpri 1110010 11 11 0.... 111 ... ..... ..... \ + @rpri_store msz=3 esz=3 nreg=0 +ST_zpri 1110010 10 00 0.... 111 ... ..... ..... \ + @rpri_store msz=2 esz=4 nreg=0 +ST_zpri 1110010 11 10 0.... 111 ... ..... ..... \ + @rpri_store msz=3 esz=4 nreg=0 # SVE contiguous store (scalar plus scalar) # ST1B, ST1H, ST1W, ST1D; require msz <= esz @@ -1255,20 +1392,40 @@ ST_zprr 1110010 00 .. ..... 010 ... ..... ..... \ @rprr_store_esz_n0 msz=0 ST_zprr 1110010 01 .. ..... 010 ... ..... ..... \ @rprr_store_esz_n0 msz=1 -ST_zprr 1110010 10 .. ..... 010 ... ..... ..... \ - @rprr_store_esz_n0 msz=2 +ST_zprr 1110010 10 10 ..... 010 ... ..... ..... \ + @rprr_store msz=2 esz=2 nreg=0 +ST_zprr 1110010 10 11 ..... 010 ... ..... ..... \ + @rprr_store msz=2 esz=3 nreg=0 ST_zprr 1110010 11 11 ..... 010 ... ..... ..... \ @rprr_store msz=3 esz=3 nreg=0 +ST_zprr 1110010 10 00 ..... 010 ... ..... ..... \ + @rprr_store msz=2 esz=4 nreg=0 +ST_zprr 1110010 11 10 ..... 010 ... ..... ..... \ + @rprr_store msz=3 esz=4 nreg=0 # SVE contiguous non-temporal store (scalar plus immediate) (nreg == 0) # SVE store multiple structures (scalar plus immediate) (nreg != 0) ST_zpri 1110010 .. nreg:2 1.... 111 ... ..... ..... \ - @rpri_store_msz esz=%size_23 + @rpri_store msz=%size_23 esz=%size_23 +# ST[234]Q +ST_zpri 11100100 01 00 .... 000 ... ..... ..... \ + @rpri_store msz=4 esz=4 nreg=1 +ST_zpri 11100100 10 00 .... 000 ... ..... ..... \ + @rpri_store msz=4 esz=4 nreg=2 +ST_zpri 11100100 11 00 .... 000 ... ..... ..... \ + @rpri_store msz=4 esz=4 nreg=3 # SVE contiguous non-temporal store (scalar plus scalar) (nreg == 0) # SVE store multiple structures (scalar plus scalar) (nreg != 0) -ST_zprr 1110010 msz:2 nreg:2 ..... 011 ... ..... ..... \ - @rprr_store esz=%size_23 +ST_zprr 1110010 .. nreg:2 ..... 011 ... ..... ..... \ + @rprr_store msz=%size_23 esz=%size_23 +# ST[234]Q +ST_zprr 11100100 01 1 ..... 000 ... ..... ..... \ + @rprr_store msz=4 esz=4 nreg=1 +ST_zprr 11100100 10 1 ..... 000 ... ..... ..... \ + @rprr_store msz=4 esz=4 nreg=2 +ST_zprr 11100100 11 1 ..... 000 ... ..... ..... \ + @rprr_store msz=4 esz=4 nreg=3 # SVE 32-bit scatter store (scalar plus 32-bit scaled offsets) # Require msz > 0 && msz <= esz. @@ -1293,6 +1450,10 @@ ST1_zprz 1110010 .. 01 ..... 101 ... ..... ..... \ ST1_zprz 1110010 .. 00 ..... 101 ... ..... ..... \ @rprr_scatter_store xs=2 esz=3 scale=0 +# ST1Q. Note that this is subtly different from ST1_zprz because +# it is vector + scalar, not scalar + vector. +ST1Q 1110 0100 001 rm:5 001 pg:3 rn:5 rd:5 + # SVE 64-bit scatter store (vector plus immediate) ST1_zpiz 1110010 .. 10 ..... 101 ... ..... ..... \ @rpri_scatter_store esz=3 @@ -1450,9 +1611,9 @@ EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm ## SVE integer matrix multiply accumulate -SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0 -USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0 -UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0 +SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2 +USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2 +UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2 ## SVE2 bitwise permute @@ -1504,13 +1665,22 @@ UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm #### SVE2 Narrowing ## SVE2 saturating extract narrow - # Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0. -SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl + +{ + SQCVTN_sh 01000101 00 1 10001 010 000 ....0 ..... @rd_rnx2 esz=1 + SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl +} SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl -UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl +{ + UQCVTN_sh 01000101 00 1 10001 010 010 ....0 ..... @rd_rnx2 esz=1 + UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl +} UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl -SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl +{ + SQCVTUN_sh 01000101 00 1 10001 010 100 ....0 ..... @rd_rnx2 esz=1 + SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl +} SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl ## SVE2 bitwise shift right narrow @@ -1597,14 +1767,17 @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx -## SVE mixed sign dot product +## SVE dot product + +SDOT_zzzz_2s 01000100 00 0 ..... 110 010 ..... ..... @rda_rn_rm_ex esz=2 +UDOT_zzzz_2s 01000100 00 0 ..... 110 011 ..... ..... @rda_rn_rm_ex esz=2 -USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm +USDOT_zzzz_4s 01000100 10 0 ..... 011 110 ..... ..... @rda_rn_rm_ex esz=2 ### SVE2 floating point matrix multiply accumulate -BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 -FMMLA_s 01100100 10 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 -FMMLA_d 01100100 11 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 +BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=1 +FMMLA_s 01100100 10 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=2 +FMMLA_d 01100100 11 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=3 ### SVE2 Memory Gather Load Group @@ -1654,26 +1827,35 @@ FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0 FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz ### SVE2 floating-point multiply-add long (vectors) -FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0 -FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0 -FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0 -FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0 +FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2 +FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_ex esz=2 +FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_ex esz=2 +FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_ex esz=2 -BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0 -BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0 +BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2 +BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_ex esz=2 +BFMLSLB_zzzw 01100100 11 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_ex esz=2 +BFMLSLT_zzzw 01100100 11 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_ex esz=2 -### SVE2 floating-point bfloat16 dot-product -BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0 +### SVE2 floating-point dot-product +FDOT_zzzz 01100100 00 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2 +BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2 ### SVE2 floating-point multiply-add long (indexed) + FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2 FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2 FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2 + BFMLALB_zzxw 01100100 11 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2 BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 +BFMLSLB_zzxw 01100100 11 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2 +BFMLSLT_zzxw 01100100 11 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2 -### SVE2 floating-point bfloat16 dot-product (indexed) +### SVE2 floating-point dot-product (indexed) + +FDOT_zzxz 01100100 00 1 ..... 010000 ..... ..... @rrxr_2 esz=2 BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2 ### SVE broadcast predicate element @@ -1700,3 +1882,55 @@ PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm + +FCLAMP 01100100 .. 1 ..... 001001 ..... ..... @rda_rn_rm + +### SVE2p1 multi-vec contiguous load + +&zcrr_ldst rd png rn rm esz nreg +&zcri_ldst rd png rn imm esz nreg +%png 10:3 !function=plus_8 +%zd_ax2 1:4 !function=times_2 +%zd_ax4 2:3 !function=times_4 + +LD1_zcrr 10100000000 rm:5 0 esz:2 ... rn:5 .... - \ + &zcrr_ldst %png rd=%zd_ax2 nreg=2 +LD1_zcrr 10100000000 rm:5 1 esz:2 ... rn:5 ... 0- \ + &zcrr_ldst %png rd=%zd_ax4 nreg=4 + +ST1_zcrr 10100000001 rm:5 0 esz:2 ... rn:5 .... - \ + &zcrr_ldst %png rd=%zd_ax2 nreg=2 +ST1_zcrr 10100000001 rm:5 1 esz:2 ... rn:5 ... 0- \ + &zcrr_ldst %png rd=%zd_ax4 nreg=4 + +LD1_zcri 101000000100 imm:s4 0 esz:2 ... rn:5 .... - \ + &zcri_ldst %png rd=%zd_ax2 nreg=2 +LD1_zcri 101000000100 imm:s4 1 esz:2 ... rn:5 ... 0- \ + &zcri_ldst %png rd=%zd_ax4 nreg=4 + +ST1_zcri 101000000110 imm:s4 0 esz:2 ... rn:5 .... - \ + &zcri_ldst %png rd=%zd_ax2 nreg=2 +ST1_zcri 101000000110 imm:s4 1 esz:2 ... rn:5 ... 0- \ + &zcri_ldst %png rd=%zd_ax4 nreg=4 + +# Note: N bit and 0 bit (for nreg4) still mashed in rd. +# This is handled within gen_ldst_c(). +LD1_zcrr_stride 10100001000 rm:5 0 esz:2 ... rn:5 rd:5 \ + &zcrr_ldst %png nreg=2 +LD1_zcrr_stride 10100001000 rm:5 1 esz:2 ... rn:5 rd:5 \ + &zcrr_ldst %png nreg=4 + +ST1_zcrr_stride 10100001001 rm:5 0 esz:2 ... rn:5 rd:5 \ + &zcrr_ldst %png nreg=2 +ST1_zcrr_stride 10100001001 rm:5 1 esz:2 ... rn:5 rd:5 \ + &zcrr_ldst %png nreg=4 + +LD1_zcri_stride 101000010100 imm:s4 0 esz:2 ... rn:5 rd:5 \ + &zcri_ldst %png nreg=2 +LD1_zcri_stride 101000010100 imm:s4 1 esz:2 ... rn:5 rd:5 \ + &zcri_ldst %png nreg=4 + +ST1_zcri_stride 101000010110 imm:s4 0 esz:2 ... rn:5 rd:5 \ + &zcri_ldst %png nreg=2 +ST1_zcri_stride 101000010110 imm:s4 1 esz:2 ... rn:5 rd:5 \ + &zcri_ldst %png nreg=4 diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index f1ee0e060ff..c442fcb540d 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -20,15 +20,22 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #include "exec/helper-proto.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" #include "tcg/tcg-gvec-desc.h" #include "fpu/softfloat.h" #include "tcg/tcg.h" #include "vec_internal.h" #include "sve_ldst_internal.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/helper-retaddr.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/probe.h" +#ifdef CONFIG_USER_ONLY +#include "user/page-protection.h" +#endif /* Return a value for NZCV as per the ARM PredTest pseudofunction. @@ -116,6 +123,11 @@ static inline uint64_t expand_pred_s(uint8_t byte) return word[byte & 0x11]; } +static inline uint64_t expand_pred_d(uint8_t byte) +{ + return -(uint64_t)(byte & 1); +} + #define LOGICAL_PPPP(NAME, FUNC) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ { \ @@ -199,6 +211,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \ #define DO_EOR(N, M) (N ^ M) #define DO_ORR(N, M) (N | M) #define DO_BIC(N, M) (N & ~M) +#define DO_ORC(N, M) (N | ~M) #define DO_ADD(N, M) (N + M) #define DO_SUB(N, M) (N - M) #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) @@ -520,14 +533,9 @@ DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS) DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS) DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D) -static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max) -{ - return val >= max ? max : val <= min ? min : val; -} - -#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX) -#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX) -#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX) +#define DO_SQADD_B(n, m) do_ssat_b((int64_t)n + m) +#define DO_SQADD_H(n, m) do_ssat_h((int64_t)n + m) +#define DO_SQADD_S(n, m) do_ssat_s((int64_t)n + m) static inline int64_t do_sqadd_d(int64_t n, int64_t m) { @@ -544,9 +552,9 @@ DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H) DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S) DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d) -#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX) -#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX) -#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX) +#define DO_UQADD_B(n, m) do_usat_b((int64_t)n + m) +#define DO_UQADD_H(n, m) do_usat_h((int64_t)n + m) +#define DO_UQADD_S(n, m) do_usat_s((int64_t)n + m) static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m) { @@ -559,9 +567,9 @@ DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H) DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S) DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d) -#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX) -#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX) -#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX) +#define DO_SQSUB_B(n, m) do_ssat_b((int64_t)n - m) +#define DO_SQSUB_H(n, m) do_ssat_h((int64_t)n - m) +#define DO_SQSUB_S(n, m) do_ssat_s((int64_t)n - m) static inline int64_t do_sqsub_d(int64_t n, int64_t m) { @@ -578,9 +586,9 @@ DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H) DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S) DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d) -#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX) -#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX) -#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX) +#define DO_UQSUB_B(n, m) do_usat_b((int64_t)n - m) +#define DO_UQSUB_H(n, m) do_usat_h((int64_t)n - m) +#define DO_UQSUB_S(n, m) do_usat_s((int64_t)n - m) static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m) { @@ -592,12 +600,9 @@ DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H) DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S) DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d) -#define DO_SUQADD_B(n, m) \ - do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX) -#define DO_SUQADD_H(n, m) \ - do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX) -#define DO_SUQADD_S(n, m) \ - do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX) +#define DO_SUQADD_B(n, m) do_ssat_b((int64_t)(int8_t)n + m) +#define DO_SUQADD_H(n, m) do_ssat_h((int64_t)(int16_t)n + m) +#define DO_SUQADD_S(n, m) do_ssat_s((int64_t)(int32_t)n + m) static inline int64_t do_suqadd_d(int64_t n, uint64_t m) { @@ -627,12 +632,9 @@ DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H) DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S) DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d) -#define DO_USQADD_B(n, m) \ - do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX) -#define DO_USQADD_H(n, m) \ - do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX) -#define DO_USQADD_S(n, m) \ - do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX) +#define DO_USQADD_B(n, m) do_usat_b((int64_t)n + (int8_t)m) +#define DO_USQADD_H(n, m) do_usat_h((int64_t)n + (int16_t)m) +#define DO_USQADD_S(n, m) do_usat_s((int64_t)n + (int32_t)m) static inline uint64_t do_usqadd_d(uint64_t n, int64_t m) { @@ -730,7 +732,7 @@ DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN) #define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ - void *status, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ intptr_t i, opr_sz = simd_oprsz(desc); \ for (i = 0; i < opr_sz; ) { \ @@ -876,12 +878,28 @@ DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS) DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS) DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS) +#define DO_AH_FABS_H(N) (float16_is_any_nan(N) ? (N) : DO_FABS(N)) +#define DO_AH_FABS_S(N) (float32_is_any_nan(N) ? (N) : DO_FABS(N)) +#define DO_AH_FABS_D(N) (float64_is_any_nan(N) ? (N) : DO_FABS(N)) + +DO_ZPZ(sve_ah_fabs_h, uint16_t, H1_2, DO_AH_FABS_H) +DO_ZPZ(sve_ah_fabs_s, uint32_t, H1_4, DO_AH_FABS_S) +DO_ZPZ_D(sve_ah_fabs_d, uint64_t, DO_AH_FABS_D) + #define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1)) DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG) DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG) DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG) +#define DO_AH_FNEG_H(N) (float16_is_any_nan(N) ? (N) : DO_FNEG(N)) +#define DO_AH_FNEG_S(N) (float32_is_any_nan(N) ? (N) : DO_FNEG(N)) +#define DO_AH_FNEG_D(N) (float64_is_any_nan(N) ? (N) : DO_FNEG(N)) + +DO_ZPZ(sve_ah_fneg_h, uint16_t, H1_2, DO_AH_FNEG_H) +DO_ZPZ(sve_ah_fneg_s, uint32_t, H1_4, DO_AH_FNEG_S) +DO_ZPZ_D(sve_ah_fneg_d, uint64_t, DO_AH_FNEG_D) + #define DO_NOT(N) (~N) DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT) @@ -1203,37 +1221,29 @@ void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \ } \ } -#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX) -#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX) -#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX) +DO_XTNB(sve2_sqxtnb_h, int16_t, do_ssat_b) +DO_XTNB(sve2_sqxtnb_s, int32_t, do_ssat_h) +DO_XTNB(sve2_sqxtnb_d, int64_t, do_ssat_s) -DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H) -DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S) -DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D) +DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, do_ssat_b) +DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, do_ssat_h) +DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, do_ssat_s) -DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H) -DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S) -DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D) +DO_XTNB(sve2_uqxtnb_h, uint16_t, do_usat_b) +DO_XTNB(sve2_uqxtnb_s, uint32_t, do_usat_h) +DO_XTNB(sve2_uqxtnb_d, uint64_t, do_usat_s) -#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX) -#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX) -#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX) +DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, do_usat_b) +DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, do_usat_h) +DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, do_usat_s) -DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H) -DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S) -DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D) +DO_XTNB(sve2_sqxtunb_h, int16_t, do_usat_b) +DO_XTNB(sve2_sqxtunb_s, int32_t, do_usat_h) +DO_XTNB(sve2_sqxtunb_d, int64_t, do_usat_s) -DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H) -DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S) -DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D) - -DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H) -DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S) -DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D) - -DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H) -DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S) -DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D) +DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, do_usat_b) +DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, do_usat_h) +DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, do_usat_s) #undef DO_XTNB #undef DO_XTNT @@ -1810,6 +1820,52 @@ DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN) #undef DO_VPZ #undef DO_VPZ_D +#define DO_VPQ(NAME, TYPE, H, INIT, OP) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + TYPE tmp[16 / sizeof(TYPE)] = { [0 ... 16 / sizeof(TYPE) - 1] = INIT }; \ + TYPE *n = vn; uint16_t *g = vg; \ + uintptr_t oprsz = simd_oprsz(desc); \ + uintptr_t nseg = oprsz / 16, nsegelt = 16 / sizeof(TYPE); \ + for (uintptr_t s = 0; s < nseg; s++) { \ + uint16_t pg = g[H2(s)]; \ + for (uintptr_t e = 0; e < nsegelt; e++, pg >>= sizeof(TYPE)) { \ + if (pg & 1) { \ + tmp[e] = OP(tmp[H(e)], n[s * nsegelt + H(e)]); \ + } \ + } \ + } \ + memcpy(vd, tmp, 16); \ + clear_tail(vd, 16, simd_maxsz(desc)); \ +} + +DO_VPQ(sve2p1_addqv_b, uint8_t, H1, 0, DO_ADD) +DO_VPQ(sve2p1_addqv_h, uint16_t, H2, 0, DO_ADD) +DO_VPQ(sve2p1_addqv_s, uint32_t, H4, 0, DO_ADD) +DO_VPQ(sve2p1_addqv_d, uint64_t, H8, 0, DO_ADD) + +DO_VPQ(sve2p1_smaxqv_b, int8_t, H1, INT8_MIN, DO_MAX) +DO_VPQ(sve2p1_smaxqv_h, int16_t, H2, INT16_MIN, DO_MAX) +DO_VPQ(sve2p1_smaxqv_s, int32_t, H4, INT32_MIN, DO_MAX) +DO_VPQ(sve2p1_smaxqv_d, int64_t, H8, INT64_MIN, DO_MAX) + +DO_VPQ(sve2p1_sminqv_b, int8_t, H1, INT8_MAX, DO_MIN) +DO_VPQ(sve2p1_sminqv_h, int16_t, H2, INT16_MAX, DO_MIN) +DO_VPQ(sve2p1_sminqv_s, int32_t, H4, INT32_MAX, DO_MIN) +DO_VPQ(sve2p1_sminqv_d, int64_t, H8, INT64_MAX, DO_MIN) + +DO_VPQ(sve2p1_umaxqv_b, uint8_t, H1, 0, DO_MAX) +DO_VPQ(sve2p1_umaxqv_h, uint16_t, H2, 0, DO_MAX) +DO_VPQ(sve2p1_umaxqv_s, uint32_t, H4, 0, DO_MAX) +DO_VPQ(sve2p1_umaxqv_d, uint64_t, H8, 0, DO_MAX) + +DO_VPQ(sve2p1_uminqv_b, uint8_t, H1, -1, DO_MIN) +DO_VPQ(sve2p1_uminqv_h, uint16_t, H2, -1, DO_MIN) +DO_VPQ(sve2p1_uminqv_s, uint32_t, H4, -1, DO_MIN) +DO_VPQ(sve2p1_uminqv_d, uint64_t, H8, -1, DO_MIN) + +#undef DO_VPQ + /* Two vector operand, one scalar operand, unpredicated. */ #define DO_ZZI(NAME, TYPE, OP) \ void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \ @@ -1850,10 +1906,46 @@ DO_ZZI(sve_umini_d, uint64_t, DO_MIN) #undef DO_ZZI +#define DO_LOGIC_QV(NAME, SUFF, INIT, VOP, POP) \ +void HELPER(NAME ## _ ## SUFF)(void *vd, void *vn, void *vg, uint32_t desc) \ +{ \ + unsigned seg = simd_oprsz(desc) / 16; \ + uint64_t r0 = INIT, r1 = INIT; \ + for (unsigned s = 0; s < seg; s++) { \ + uint64_t p0 = expand_pred_##SUFF(*(uint8_t *)(vg + H1(s * 2))); \ + uint64_t p1 = expand_pred_##SUFF(*(uint8_t *)(vg + H1(s * 2 + 1))); \ + uint64_t v0 = *(uint64_t *)(vn + s * 16); \ + uint64_t v1 = *(uint64_t *)(vn + s * 16 + 8); \ + v0 = POP(v0, p0), v1 = POP(v1, p1); \ + r0 = VOP(r0, v0), r1 = VOP(r1, v1); \ + } \ + *(uint64_t *)(vd + 0) = r0; \ + *(uint64_t *)(vd + 8) = r1; \ + clear_tail(vd, 16, simd_maxsz(desc)); \ +} + +DO_LOGIC_QV(sve2p1_orqv, b, 0, DO_ORR, DO_AND) +DO_LOGIC_QV(sve2p1_orqv, h, 0, DO_ORR, DO_AND) +DO_LOGIC_QV(sve2p1_orqv, s, 0, DO_ORR, DO_AND) +DO_LOGIC_QV(sve2p1_orqv, d, 0, DO_ORR, DO_AND) + +DO_LOGIC_QV(sve2p1_eorqv, b, 0, DO_EOR, DO_AND) +DO_LOGIC_QV(sve2p1_eorqv, h, 0, DO_EOR, DO_AND) +DO_LOGIC_QV(sve2p1_eorqv, s, 0, DO_EOR, DO_AND) +DO_LOGIC_QV(sve2p1_eorqv, d, 0, DO_EOR, DO_AND) + +DO_LOGIC_QV(sve2p1_andqv, b, -1, DO_AND, DO_ORC) +DO_LOGIC_QV(sve2p1_andqv, h, -1, DO_AND, DO_ORC) +DO_LOGIC_QV(sve2p1_andqv, s, -1, DO_AND, DO_ORC) +DO_LOGIC_QV(sve2p1_andqv, d, -1, DO_AND, DO_ORC) + +#undef DO_LOGIC_QV + #undef DO_AND #undef DO_ORR #undef DO_EOR #undef DO_BIC +#undef DO_ORC #undef DO_ADD #undef DO_SUB #undef DO_MAX @@ -2046,27 +2138,6 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \ when N is negative, add 2**M-1. */ #define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M) -static inline uint64_t do_urshr(uint64_t x, unsigned sh) -{ - if (likely(sh < 64)) { - return (x >> sh) + ((x >> (sh - 1)) & 1); - } else if (sh == 64) { - return x >> 63; - } else { - return 0; - } -} - -static inline int64_t do_srshr(int64_t x, unsigned sh) -{ - if (likely(sh < 64)) { - return (x >> sh) + ((x >> (sh - 1)) & 1); - } else { - /* Rounding the sign bit always produces 0. */ - return 0; - } -} - DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR) DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR) DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR) @@ -2164,10 +2235,9 @@ DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr) DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr) DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, H1_8, H1_4, do_urshr) -#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX) -#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX) -#define DO_SQSHRUN_D(x, sh) \ - do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX) +#define DO_SQSHRUN_H(x, sh) do_usat_b((int64_t)(x) >> sh) +#define DO_SQSHRUN_S(x, sh) do_usat_h((int64_t)(x) >> sh) +#define DO_SQSHRUN_D(x, sh) do_usat_s((int64_t)(x) >> (sh < 64 ? sh : 63)) DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H) DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S) @@ -2177,9 +2247,9 @@ DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H) DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S) DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRUN_D) -#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX) -#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX) -#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX) +#define DO_SQRSHRUN_H(x, sh) do_usat_b(do_srshr(x, sh)) +#define DO_SQRSHRUN_S(x, sh) do_usat_h(do_srshr(x, sh)) +#define DO_SQRSHRUN_D(x, sh) do_usat_s(do_srshr(x, sh)) DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H) DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S) @@ -2189,9 +2259,9 @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H) DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S) DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQRSHRUN_D) -#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX) -#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX) -#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX) +#define DO_SQSHRN_H(x, sh) do_ssat_b(x >> sh) +#define DO_SQSHRN_S(x, sh) do_ssat_h(x >> sh) +#define DO_SQSHRN_D(x, sh) do_ssat_s(x >> sh) DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H) DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S) @@ -2201,9 +2271,9 @@ DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H) DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S) DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRN_D) -#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX) -#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX) -#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX) +#define DO_SQRSHRN_H(x, sh) do_ssat_b(do_srshr(x, sh)) +#define DO_SQRSHRN_S(x, sh) do_ssat_h(do_srshr(x, sh)) +#define DO_SQRSHRN_D(x, sh) do_ssat_s(do_srshr(x, sh)) DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H) DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S) @@ -2536,6 +2606,7 @@ void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc) void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 2; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint16_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint16_t nn = n[i]; @@ -2543,13 +2614,17 @@ void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float16_one; } - d[i] = nn ^ (mm & 2) << 14; + if (mm & 2) { + nn = float16_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 4; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint32_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint32_t nn = n[i]; @@ -2557,13 +2632,17 @@ void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float32_one; } - d[i] = nn ^ (mm & 2) << 30; + if (mm & 2) { + nn = float32_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT, 1); uint64_t *d = vd, *n = vn, *m = vm; for (i = 0; i < opr_sz; i += 1) { uint64_t nn = n[i]; @@ -2571,7 +2650,10 @@ void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc) if (mm & 1) { nn = float64_one; } - d[i] = nn ^ (mm & 2) << 62; + if (mm & 2) { + nn = float64_maybe_ah_chs(nn, fpcr_ah); + } + d[i] = nn; } } @@ -2953,6 +3035,56 @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc) } } +/* + * TODO: This could use half_shuffle64 and similar bit tricks to + * expand blocks of bits at once. + */ +#define DO_PMOV_PV(NAME, ESIZE) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + unsigned vl = simd_oprsz(desc); \ + unsigned idx = simd_data(desc); \ + unsigned elements = vl / ESIZE; \ + ARMPredicateReg *d = vd; \ + ARMVectorReg *s = vs; \ + memset(d, 0, sizeof(*d)); \ + for (unsigned e = 0; e < elements; ++e) { \ + depositn(d->p, e * ESIZE, 1, extractn(s->d, elements * idx + e, 1)); \ + } \ +} + +DO_PMOV_PV(pmov_pv_h, 2) +DO_PMOV_PV(pmov_pv_s, 4) +DO_PMOV_PV(pmov_pv_d, 8) + +#undef DO_PMOV_PV + +/* + * TODO: This could use half_unshuffle64 and similar bit tricks to + * compress blocks of bits at once. + */ +#define DO_PMOV_VP(NAME, ESIZE) \ +void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \ +{ \ + unsigned vl = simd_oprsz(desc); \ + unsigned idx = simd_data(desc); \ + unsigned elements = vl / ESIZE; \ + ARMVectorReg *d = vd; \ + ARMPredicateReg *s = vs; \ + if (idx == 0) { \ + memset(d, 0, vl); \ + } \ + for (unsigned e = 0; e < elements; ++e) { \ + depositn(d->d, elements * idx + e, 1, extractn(s->p, e * ESIZE, 1)); \ + } \ +} + +DO_PMOV_VP(pmov_vp_h, 2) +DO_PMOV_VP(pmov_vp_s, 4) +DO_PMOV_VP(pmov_vp_d, 8) + +#undef DO_PMOV_VP + typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool); static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc, @@ -3418,6 +3550,45 @@ DO_UZP(sve_uzp_s, uint32_t, H1_4) DO_UZP(sve_uzp_d, uint64_t, H1_8) DO_UZP(sve2_uzp_q, Int128, ) +typedef void perseg_zzz_fn(void *vd, void *vn, void *vm, uint32_t desc); + +static void do_perseg_zzz(void *vd, void *vn, void *vm, + uint32_t desc, perseg_zzz_fn *fn) +{ + intptr_t oprsz = simd_oprsz(desc); + + desc = simd_desc(16, 16, simd_data(desc)); + for (intptr_t i = 0; i < oprsz; i += 16) { + fn(vd + i, vn + i, vm + i, desc); + } +} + +#define DO_PERSEG_ZZZ(NAME, FUNC) \ + void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ + { do_perseg_zzz(vd, vn, vm, desc, FUNC); } + +DO_PERSEG_ZZZ(sve2p1_uzpq_b, helper_sve_uzp_b) +DO_PERSEG_ZZZ(sve2p1_uzpq_h, helper_sve_uzp_h) +DO_PERSEG_ZZZ(sve2p1_uzpq_s, helper_sve_uzp_s) +DO_PERSEG_ZZZ(sve2p1_uzpq_d, helper_sve_uzp_d) + +DO_PERSEG_ZZZ(sve2p1_zipq_b, helper_sve_zip_b) +DO_PERSEG_ZZZ(sve2p1_zipq_h, helper_sve_zip_h) +DO_PERSEG_ZZZ(sve2p1_zipq_s, helper_sve_zip_s) +DO_PERSEG_ZZZ(sve2p1_zipq_d, helper_sve_zip_d) + +DO_PERSEG_ZZZ(sve2p1_tblq_b, helper_sve_tbl_b) +DO_PERSEG_ZZZ(sve2p1_tblq_h, helper_sve_tbl_h) +DO_PERSEG_ZZZ(sve2p1_tblq_s, helper_sve_tbl_s) +DO_PERSEG_ZZZ(sve2p1_tblq_d, helper_sve_tbl_d) + +DO_PERSEG_ZZZ(sve2p1_tbxq_b, helper_sve2_tbx_b) +DO_PERSEG_ZZZ(sve2p1_tbxq_h, helper_sve2_tbx_h) +DO_PERSEG_ZZZ(sve2p1_tbxq_s, helper_sve2_tbx_s) +DO_PERSEG_ZZZ(sve2p1_tbxq_d, helper_sve2_tbx_d) + +#undef DO_PERSEG_ZZZ + #define DO_TRN(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ { \ @@ -3958,15 +4129,6 @@ static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g, return flags; } -static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz) -{ - /* It is quicker to zero the whole predicate than loop on OPRSZ. - * The compiler should turn this into 4 64-bit integer stores. - */ - memset(d, 0, sizeof(ARMPredicateReg)); - return PREDTEST_INIT; -} - void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg, uint32_t pred_desc) { @@ -3974,7 +4136,7 @@ void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg, if (last_active_pred(vn, vg, oprsz)) { compute_brk_z(vd, vm, vg, oprsz, true); } else { - do_zero(vd, oprsz); + memset(vd, 0, sizeof(ARMPredicateReg)); } } @@ -3985,7 +4147,8 @@ uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg, if (last_active_pred(vn, vg, oprsz)) { return compute_brks_z(vd, vm, vg, oprsz, true); } else { - return do_zero(vd, oprsz); + memset(vd, 0, sizeof(ARMPredicateReg)); + return PREDTEST_INIT; } } @@ -3996,7 +4159,7 @@ void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg, if (last_active_pred(vn, vg, oprsz)) { compute_brk_z(vd, vm, vg, oprsz, false); } else { - do_zero(vd, oprsz); + memset(vd, 0, sizeof(ARMPredicateReg)); } } @@ -4007,7 +4170,8 @@ uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg, if (last_active_pred(vn, vg, oprsz)) { return compute_brks_z(vd, vm, vg, oprsz, false); } else { - return do_zero(vd, oprsz); + memset(vd, 0, sizeof(ARMPredicateReg)); + return PREDTEST_INIT; } } @@ -4063,35 +4227,30 @@ void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); if (!last_active_pred(vn, vg, oprsz)) { - do_zero(vd, oprsz); - } -} - -/* As if PredTest(Ones(PL), D, esz). */ -static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz, - uint64_t esz_mask) -{ - uint32_t flags = PREDTEST_INIT; - intptr_t i; - - for (i = 0; i < oprsz / 8; i++) { - flags = iter_predtest_fwd(d->p[i], esz_mask, flags); - } - if (oprsz & 7) { - uint64_t mask = ~(-1ULL << (8 * (oprsz & 7))); - flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags); + memset(vd, 0, sizeof(ARMPredicateReg)); } - return flags; } uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc) { intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); if (last_active_pred(vn, vg, oprsz)) { - return predtest_ones(vd, oprsz, -1); - } else { - return do_zero(vd, oprsz); + ARMPredicateReg *d = vd; + uint32_t flags = PREDTEST_INIT; + intptr_t i; + + /* As if PredTest(Ones(PL), D, MO_8). */ + for (i = 0; i < oprsz / 8; i++) { + flags = iter_predtest_fwd(d->p[i], -1, flags); + } + if (oprsz & 7) { + uint64_t mask = ~(-1ULL << (8 * (oprsz & 7))); + flags = iter_predtest_fwd(d->p[i], mask, flags); + } + return flags; } + memset(vd, 0, sizeof(ARMPredicateReg)); + return PREDTEST_INIT; } uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc) @@ -4108,66 +4267,200 @@ uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc) return sum; } -uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc) +uint64_t HELPER(sve2p1_cntp_c)(uint32_t png, uint32_t desc) { - intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); - intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); - uint64_t esz_mask = pred_esz_masks[esz]; - ARMPredicateReg *d = vd; - uint32_t flags; - intptr_t i; + int pl = FIELD_EX32(desc, PREDDESC, OPRSZ); + int vl = pl * 8; + unsigned v_esz = FIELD_EX32(desc, PREDDESC, ESZ); + int lg2_width = FIELD_EX32(desc, PREDDESC, DATA) + 1; + DecodeCounter p = decode_counter(png, vl, v_esz); + unsigned maxelem = (vl << lg2_width) >> v_esz; + unsigned count = p.count; + + if (p.invert) { + if (count >= maxelem) { + return 0; + } + count = maxelem - count; + } else { + count = MIN(count, maxelem); + } + return count >> p.lg2_stride; +} + +/* C.f. Arm pseudocode EncodePredCount */ +static uint64_t encode_pred_count(uint32_t elements, uint32_t count, + uint32_t esz, bool invert) +{ + uint32_t pred; - /* Begin with a zero predicate register. */ - flags = do_zero(d, oprsz); if (count == 0) { - return flags; + return 0; + } + if (invert) { + count = elements - count; + } else if (count == elements) { + count = 0; + invert = true; } - /* Set all of the requested bits. */ - for (i = 0; i < count / 64; ++i) { - d->p[i] = esz_mask; + pred = (count << 1) | 1; + pred <<= esz; + pred |= invert << 15; + + return pred; +} + +/* C.f. Arm pseudocode PredCountTest */ +static uint32_t pred_count_test(uint32_t elements, uint32_t count, bool invert) +{ + uint32_t flags; + + if (count == 0) { + flags = 1; /* !N, Z, C */ + } else if (!invert) { + flags = (1u << 31) | 2; /* N, !Z */ + flags |= count != elements; /* C */ + } else { + flags = 2; /* !Z, !C */ + flags |= (count == elements) << 31; /* N */ } - if (count & 63) { - d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask; + return flags; +} + +/* D must be cleared on entry. */ +static void do_whilel(ARMPredicateReg *d, uint64_t esz_mask, + uint32_t count, uint32_t oprbits) +{ + tcg_debug_assert(count <= oprbits); + if (count) { + uint32_t i; + + /* Set all of the requested bits. */ + for (i = 0; i < count / 64; ++i) { + d->p[i] = esz_mask; + } + if (count & 63) { + d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask; + } } +} + +uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t oprbits = oprsz * 8; + uint64_t esz_mask = pred_esz_masks[esz]; + ARMPredicateReg *d = vd; - return predtest_ones(d, oprsz, esz_mask); + count <<= esz; + memset(d, 0, sizeof(*d)); + do_whilel(d, esz_mask, count, oprbits); + return pred_count_test(oprbits, count, false); } -uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc) +uint32_t HELPER(sve_while2l)(void *vd, uint32_t count, uint32_t pred_desc) { - intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); - intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t oprbits = oprsz * 8; uint64_t esz_mask = pred_esz_masks[esz]; ARMPredicateReg *d = vd; - intptr_t i, invcount, oprbits; - uint64_t bits; - if (count == 0) { - return do_zero(d, oprsz); + count <<= esz; + memset(d, 0, 2 * sizeof(*d)); + if (count <= oprbits) { + do_whilel(&d[0], esz_mask, count, oprbits); + } else { + do_whilel(&d[0], esz_mask, oprbits, oprbits); + do_whilel(&d[1], esz_mask, count - oprbits, oprbits); } - oprbits = oprsz * 8; + return pred_count_test(2 * oprbits, count, false); +} + +uint32_t HELPER(sve_whilecl)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uint32_t pl = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t scale = FIELD_EX32(pred_desc, PREDDESC, DATA); + uint32_t vl = pl * 8; + uint32_t elements = (vl >> esz) << scale; + ARMPredicateReg *d = vd; + + *d = (ARMPredicateReg) { + .p[0] = encode_pred_count(elements, count, esz, false) + }; + return pred_count_test(elements, count, false); +} + +/* D must be cleared on entry. */ +static void do_whileg(ARMPredicateReg *d, uint64_t esz_mask, + uint32_t count, uint32_t oprbits) +{ tcg_debug_assert(count <= oprbits); + if (count) { + uint32_t i, invcount = oprbits - count; + uint64_t bits = esz_mask & MAKE_64BIT_MASK(invcount & 63, 64); - bits = esz_mask; - if (oprbits & 63) { - bits &= MAKE_64BIT_MASK(0, oprbits & 63); + for (i = invcount / 64; i < oprbits / 64; ++i) { + d->p[i] = bits; + bits = esz_mask; + } + if (oprbits & 63) { + d->p[i] = bits & MAKE_64BIT_MASK(0, oprbits & 63); + } } +} - invcount = oprbits - count; - for (i = (oprsz - 1) / 8; i > invcount / 64; --i) { - d->p[i] = bits; - bits = esz_mask; - } +uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t oprbits = oprsz * 8; + uint64_t esz_mask = pred_esz_masks[esz]; + ARMPredicateReg *d = vd; - d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64); + count <<= esz; + memset(d, 0, sizeof(*d)); + do_whileg(d, esz_mask, count, oprbits); + return pred_count_test(oprbits, count, true); +} + +uint32_t HELPER(sve_while2g)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t oprbits = oprsz * 8; + uint64_t esz_mask = pred_esz_masks[esz]; + ARMPredicateReg *d = vd; - while (--i >= 0) { - d->p[i] = 0; + count <<= esz; + memset(d, 0, 2 * sizeof(*d)); + if (count <= oprbits) { + do_whileg(&d[1], esz_mask, count, oprbits); + } else { + do_whilel(&d[1], esz_mask, oprbits, oprbits); + do_whileg(&d[0], esz_mask, count - oprbits, oprbits); } - return predtest_ones(d, oprsz, esz_mask); + return pred_count_test(2 * oprbits, count, true); +} + +uint32_t HELPER(sve_whilecg)(void *vd, uint32_t count, uint32_t pred_desc) +{ + uint32_t pl = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); + uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); + uint32_t scale = FIELD_EX32(pred_desc, PREDDESC, DATA); + uint32_t vl = pl * 8; + uint32_t elements = (vl >> esz) << scale; + ARMPredicateReg *d = vd; + + *d = (ARMPredicateReg) { + .p[0] = encode_pred_count(elements, count, esz, true) + }; + return pred_count_test(elements, count, true); } /* Recursive reduction on a function; @@ -4178,61 +4471,93 @@ uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc) * The recursion is bounded to depth 7 (128 fp16 elements), so there's * little to gain with a more complex non-recursive form. */ -#define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \ -static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \ +#define DO_REDUCE(NAME, SUF, TYPE, H, FUNC, IDENT) \ +static TYPE FUNC##_reduce(TYPE *data, float_status *status, uintptr_t n) \ { \ if (n == 1) { \ return *data; \ } else { \ uintptr_t half = n / 2; \ - TYPE lo = NAME##_reduce(data, status, half); \ - TYPE hi = NAME##_reduce(data + half, status, half); \ - return TYPE##_##FUNC(lo, hi, status); \ + TYPE lo = FUNC##_reduce(data, status, half); \ + TYPE hi = FUNC##_reduce(data + half, status, half); \ + return FUNC(lo, hi, status); \ } \ } \ -uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \ +uint64_t helper_sve_##NAME##v_##SUF(void *vn, void *vg, \ + float_status *status, uint32_t desc) \ { \ uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \ TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \ + TYPE ident = IDENT; \ for (i = 0; i < oprsz; ) { \ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ do { \ TYPE nn = *(TYPE *)(vn + H(i)); \ - *(TYPE *)((void *)data + i) = (pg & 1 ? nn : IDENT); \ + *(TYPE *)((void *)data + i) = (pg & 1 ? nn : ident); \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ for (; i < maxsz; i += sizeof(TYPE)) { \ - *(TYPE *)((void *)data + i) = IDENT; \ + *(TYPE *)((void *)data + i) = ident; \ } \ - return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \ + return FUNC##_reduce(data, status, maxsz / sizeof(TYPE)); \ +} \ +void helper_sve2p1_##NAME##qv_##SUF(void *vd, void *vn, void *vg, \ + float_status *status, uint32_t desc) \ +{ \ + unsigned oprsz = simd_oprsz(desc), segments = oprsz / 16; \ + TYPE ident = IDENT; \ + for (unsigned e = 0; e < 16; e += sizeof(TYPE)) { \ + TYPE data[ARM_MAX_VQ]; \ + for (unsigned s = 0; s < segments; s++) { \ + uint16_t pg = *(uint16_t *)(vg + H1_2(s * 2)); \ + TYPE nn = *(TYPE *)(vn + (s * 16 + H(e))); \ + data[s] = (pg >> e) & 1 ? nn : ident; \ + } \ + *(TYPE *)(vd + H(e)) = FUNC##_reduce(data, status, segments); \ + } \ + clear_tail(vd, 16, simd_maxsz(desc)); \ } -DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero) -DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero) -DO_REDUCE(sve_faddv_d, float64, H1_8, add, float64_zero) +DO_REDUCE(fadd,h, float16, H1_2, float16_add, float16_zero) +DO_REDUCE(fadd,s, float32, H1_4, float32_add, float32_zero) +DO_REDUCE(fadd,d, float64, H1_8, float64_add, float64_zero) + +/* + * We can't avoid the function call for the default NaN value, because + * it changes when FPCR.AH is set. + */ +DO_REDUCE(fminnm,h, float16, H1_2, float16_minnum, float16_default_nan(status)) +DO_REDUCE(fminnm,s, float32, H1_4, float32_minnum, float32_default_nan(status)) +DO_REDUCE(fminnm,d, float64, H1_8, float64_minnum, float64_default_nan(status)) -/* Identity is floatN_default_nan, without the function call. */ -DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00) -DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000) -DO_REDUCE(sve_fminnmv_d, float64, H1_8, minnum, 0x7FF8000000000000ULL) +DO_REDUCE(fmaxnm,h, float16, H1_2, float16_maxnum, float16_default_nan(status)) +DO_REDUCE(fmaxnm,s, float32, H1_4, float32_maxnum, float32_default_nan(status)) +DO_REDUCE(fmaxnm,d, float64, H1_8, float64_maxnum, float64_default_nan(status)) -DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00) -DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000) -DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, maxnum, 0x7FF8000000000000ULL) +DO_REDUCE(fmin,h, float16, H1_2, float16_min, float16_infinity) +DO_REDUCE(fmin,s, float32, H1_4, float32_min, float32_infinity) +DO_REDUCE(fmin,d, float64, H1_8, float64_min, float64_infinity) -DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity) -DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity) -DO_REDUCE(sve_fminv_d, float64, H1_8, min, float64_infinity) +DO_REDUCE(fmax,h, float16, H1_2, float16_max, float16_chs(float16_infinity)) +DO_REDUCE(fmax,s, float32, H1_4, float32_max, float32_chs(float32_infinity)) +DO_REDUCE(fmax,d, float64, H1_8, float64_max, float64_chs(float64_infinity)) -DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity)) -DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity)) -DO_REDUCE(sve_fmaxv_d, float64, H1_8, max, float64_chs(float64_infinity)) +DO_REDUCE(ah_fmin,h, float16, H1_2, helper_vfp_ah_minh, float16_infinity) +DO_REDUCE(ah_fmin,s, float32, H1_4, helper_vfp_ah_mins, float32_infinity) +DO_REDUCE(ah_fmin,d, float64, H1_8, helper_vfp_ah_mind, float64_infinity) + +DO_REDUCE(ah_fmax,h, float16, H1_2, helper_vfp_ah_maxh, + float16_chs(float16_infinity)) +DO_REDUCE(ah_fmax,s, float32, H1_4, helper_vfp_ah_maxs, + float32_chs(float32_infinity)) +DO_REDUCE(ah_fmax,d, float64, H1_8, helper_vfp_ah_maxd, + float64_chs(float64_infinity)) #undef DO_REDUCE uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg, - void *status, uint32_t desc) + float_status *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc); float16 result = nn; @@ -4252,7 +4577,7 @@ uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg, } uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg, - void *status, uint32_t desc) + float_status *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc); float32 result = nn; @@ -4272,7 +4597,7 @@ uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg, } uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg, - void *status, uint32_t desc) + float_status *status, uint32_t desc) { intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8; uint64_t *m = vm; @@ -4292,7 +4617,7 @@ uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg, */ #define DO_ZPZZ_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ - void *status, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ @@ -4309,14 +4634,17 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ } while (i != 0); \ } +DO_ZPZZ_FP(sve_fadd_b16, uint16_t, H1_2, bfloat16_add) DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add) DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add) DO_ZPZZ_FP(sve_fadd_d, uint64_t, H1_8, float64_add) +DO_ZPZZ_FP(sve_fsub_b16, uint16_t, H1_2, bfloat16_sub) DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub) DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub) DO_ZPZZ_FP(sve_fsub_d, uint64_t, H1_8, float64_sub) +DO_ZPZZ_FP(sve_fmul_b16, uint16_t, H1_2, bfloat16_mul) DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul) DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul) DO_ZPZZ_FP(sve_fmul_d, uint64_t, H1_8, float64_mul) @@ -4325,18 +4653,32 @@ DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div) DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div) DO_ZPZZ_FP(sve_fdiv_d, uint64_t, H1_8, float64_div) +DO_ZPZZ_FP(sve_fmin_b16, uint16_t, H1_2, bfloat16_min) DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min) DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min) DO_ZPZZ_FP(sve_fmin_d, uint64_t, H1_8, float64_min) +DO_ZPZZ_FP(sve_fmax_b16, uint16_t, H1_2, bfloat16_max) DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max) DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max) DO_ZPZZ_FP(sve_fmax_d, uint64_t, H1_8, float64_max) +DO_ZPZZ_FP(sve_ah_fmin_b16, uint16_t, H1_2, helper_sme2_ah_fmin_b16) +DO_ZPZZ_FP(sve_ah_fmin_h, uint16_t, H1_2, helper_vfp_ah_minh) +DO_ZPZZ_FP(sve_ah_fmin_s, uint32_t, H1_4, helper_vfp_ah_mins) +DO_ZPZZ_FP(sve_ah_fmin_d, uint64_t, H1_8, helper_vfp_ah_mind) + +DO_ZPZZ_FP(sve_ah_fmax_b16, uint16_t, H1_2, helper_sme2_ah_fmax_b16) +DO_ZPZZ_FP(sve_ah_fmax_h, uint16_t, H1_2, helper_vfp_ah_maxh) +DO_ZPZZ_FP(sve_ah_fmax_s, uint32_t, H1_4, helper_vfp_ah_maxs) +DO_ZPZZ_FP(sve_ah_fmax_d, uint64_t, H1_8, helper_vfp_ah_maxd) + +DO_ZPZZ_FP(sve_fminnum_b16, uint16_t, H1_2, bfloat16_minnum) DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum) DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum) DO_ZPZZ_FP(sve_fminnum_d, uint64_t, H1_8, float64_minnum) +DO_ZPZZ_FP(sve_fmaxnum_b16, uint16_t, H1_2, bfloat16_maxnum) DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum) DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum) DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, H1_8, float64_maxnum) @@ -4356,9 +4698,31 @@ static inline float64 abd_d(float64 a, float64 b, float_status *s) return float64_abs(float64_sub(a, b, s)); } +/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */ +static float16 ah_abd_h(float16 op1, float16 op2, float_status *stat) +{ + float16 r = float16_sub(op1, op2, stat); + return float16_is_any_nan(r) ? r : float16_abs(r); +} + +static float32 ah_abd_s(float32 op1, float32 op2, float_status *stat) +{ + float32 r = float32_sub(op1, op2, stat); + return float32_is_any_nan(r) ? r : float32_abs(r); +} + +static float64 ah_abd_d(float64 op1, float64 op2, float_status *stat) +{ + float64 r = float64_sub(op1, op2, stat); + return float64_is_any_nan(r) ? r : float64_abs(r); +} + DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h) DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s) DO_ZPZZ_FP(sve_fabd_d, uint64_t, H1_8, abd_d) +DO_ZPZZ_FP(sve_ah_fabd_h, uint16_t, H1_2, ah_abd_h) +DO_ZPZZ_FP(sve_ah_fabd_s, uint32_t, H1_4, ah_abd_s) +DO_ZPZZ_FP(sve_ah_fabd_d, uint64_t, H1_8, ah_abd_d) static inline float64 scalbn_d(float64 a, int64_t b, float_status *s) { @@ -4381,7 +4745,7 @@ DO_ZPZZ_FP(sve_fmulx_d, uint64_t, H1_8, helper_vfp_mulxd) */ #define DO_ZPZS_FP(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \ - void *status, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ @@ -4445,11 +4809,20 @@ DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min) DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min) DO_ZPZS_FP(sve_fmins_d, float64, H1_8, float64_min) +DO_ZPZS_FP(sve_ah_fmaxs_h, float16, H1_2, helper_vfp_ah_maxh) +DO_ZPZS_FP(sve_ah_fmaxs_s, float32, H1_4, helper_vfp_ah_maxs) +DO_ZPZS_FP(sve_ah_fmaxs_d, float64, H1_8, helper_vfp_ah_maxd) + +DO_ZPZS_FP(sve_ah_fmins_h, float16, H1_2, helper_vfp_ah_minh) +DO_ZPZS_FP(sve_ah_fmins_s, float32, H1_4, helper_vfp_ah_mins) +DO_ZPZS_FP(sve_ah_fmins_d, float64, H1_8, helper_vfp_ah_mind) + /* Fully general two-operand expander, controlled by a predicate, * With the extra float_status parameter. */ #define DO_ZPZ_FP(NAME, TYPE, H, OP) \ -void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ @@ -4469,7 +4842,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ * FZ16. When converting from fp16, this affects flushing input denormals; * when converting to fp16, this affects flushing output denormals. */ -static inline float32 sve_f16_to_f32(float16 f, float_status *fpst) +float32 sve_f16_to_f32(float16 f, float_status *fpst) { bool save = get_flush_inputs_to_zero(fpst); float32 ret; @@ -4491,7 +4864,7 @@ static inline float64 sve_f16_to_f64(float16 f, float_status *fpst) return ret; } -static inline float16 sve_f32_to_f16(float32 f, float_status *fpst) +float16 sve_f32_to_f16(float32 f, float_status *fpst) { bool save = get_flush_to_zero(fpst); float16 ret; @@ -4654,7 +5027,7 @@ static int16_t do_float16_logb_as_int(float16 a, float_status *s) return -15 - clz32(frac); } /* flush to zero */ - float_raise(float_flag_input_denormal, s); + float_raise(float_flag_input_denormal_flushed, s); } } else if (unlikely(exp == 0x1f)) { if (frac == 0) { @@ -4682,7 +5055,7 @@ static int32_t do_float32_logb_as_int(float32 a, float_status *s) return -127 - clz32(frac); } /* flush to zero */ - float_raise(float_flag_input_denormal, s); + float_raise(float_flag_input_denormal_flushed, s); } } else if (unlikely(exp == 0xff)) { if (frac == 0) { @@ -4710,7 +5083,7 @@ static int64_t do_float64_logb_as_int(float64 a, float_status *s) return -1023 - clz64(frac); } /* flush to zero */ - float_raise(float_flag_input_denormal, s); + float_raise(float_flag_input_denormal_flushed, s); } } else if (unlikely(exp == 0x7ff)) { if (frac == 0) { @@ -4731,9 +5104,78 @@ DO_ZPZ_FP(flogb_d, float64, H1_8, do_float64_logb_as_int) #undef DO_ZPZ_FP +static void do_fmla_zpzzz_b16(void *vd, void *vn, void *vm, void *va, void *vg, + float_status *status, uint32_t desc, + uint16_t neg1, uint16_t neg3, int flags) +{ + intptr_t i = simd_oprsz(desc); + uint64_t *g = vg; + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + i -= 2; + if (likely((pg >> (i & 63)) & 1)) { + float16 e1, e2, e3, r; + + e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1; + e2 = *(uint16_t *)(vm + H1_2(i)); + e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3; + r = bfloat16_muladd(e1, e2, e3, flags, status); + *(uint16_t *)(vd + H1_2(i)) = r; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, 0); +} + +void HELPER(sve_fmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0x8000, 0, 0); +} + +void HELPER(sve_fnmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000, 0); +} + +void HELPER(sve_fnmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0x8000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); +} + static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint16_t neg1, uint16_t neg3) + uint16_t neg1, uint16_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4748,7 +5190,7 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1; e2 = *(uint16_t *)(vm + H1_2(i)); e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3; - r = float16_muladd(e1, e2, e3, 0, status); + r = float16_muladd(e1, e2, e3, flags, status); *(uint16_t *)(vd + H1_2(i)) = r; } } while (i & 63); @@ -4756,32 +5198,53 @@ static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, } void HELPER(sve_fmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0, 0); } void HELPER(sve_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000, 0); } void HELPER(sve_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000); + do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); } static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint32_t neg1, uint32_t neg3) + uint32_t neg1, uint32_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4796,7 +5259,7 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1; e2 = *(uint32_t *)(vm + H1_4(i)); e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3; - r = float32_muladd(e1, e2, e3, 0, status); + r = float32_muladd(e1, e2, e3, flags, status); *(uint32_t *)(vd + H1_4(i)) = r; } } while (i & 63); @@ -4804,32 +5267,53 @@ static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg, } void HELPER(sve_fmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0, 0); } void HELPER(sve_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000, 0); } void HELPER(sve_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000); + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); } static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, - uint64_t neg1, uint64_t neg3) + uint64_t neg1, uint64_t neg3, int flags) { intptr_t i = simd_oprsz(desc); uint64_t *g = vg; @@ -4844,7 +5328,7 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, e1 = *(uint64_t *)(vn + i) ^ neg1; e2 = *(uint64_t *)(vm + i); e3 = *(uint64_t *)(va + i) ^ neg3; - r = float64_muladd(e1, e2, e3, 0, status); + r = float64_muladd(e1, e2, e3, flags, status); *(uint64_t *)(vd + i) = r; } } while (i & 63); @@ -4852,37 +5336,58 @@ static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg, } void HELPER(sve_fmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, 0); } void HELPER(sve_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0, 0); } void HELPER(sve_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN, 0); } void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { - do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN); + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN, 0); } -/* Two operand floating-point comparison controlled by a predicate. - * Unlike the integer version, we are not allowed to optimistically - * compare operands, since the comparison may have side effects wrt - * the FPSR. - */ +void HELPER(sve_ah_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); +} + +/* Two operand floating-point comparison controlled by a predicate. + * Unlike the integer version, we are not allowed to optimistically + * compare operands, since the comparison may have side effects wrt + * the FPSR. + */ #define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ - void *status, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ uint64_t *d = vd, *g = vg; \ @@ -4944,7 +5449,7 @@ DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT) */ #define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \ void HELPER(NAME)(void *vd, void *vn, void *vg, \ - void *status, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \ uint64_t *d = vd, *g = vg; \ @@ -4982,27 +5487,37 @@ DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE) /* FP Trig Multiply-Add. */ -void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, + float_status *s, uint32_t desc) { static const float16 coeff[16] = { 0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float16 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float16 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float16_is_neg(mm)) { - mm = float16_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float16_abs(mm); + } xx += 8; } - d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs); + d[i] = float16_muladd(n[i], mm, coeff[xx], flags, s); } } -void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, + float_status *s, uint32_t desc) { static const float32 coeff[16] = { 0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9, @@ -5011,20 +5526,29 @@ void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float32 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float32 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float32_is_neg(mm)) { - mm = float32_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float32_abs(mm); + } xx += 8; } - d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs); + d[i] = float32_muladd(n[i], mm, coeff[xx], flags, s); } } -void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) +void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, + float_status *s, uint32_t desc) { static const float64 coeff[16] = { 0x3ff0000000000000ull, 0xbfc5555555555543ull, @@ -5037,16 +5561,24 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull, }; intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64); - intptr_t x = simd_data(desc); + intptr_t x = extract32(desc, SIMD_DATA_SHIFT, 3); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 3, 1); float64 *d = vd, *n = vn, *m = vm; + for (i = 0; i < opr_sz; i++) { float64 mm = m[i]; intptr_t xx = x; + int flags = 0; + if (float64_is_neg(mm)) { - mm = float64_abs(mm); + if (fpcr_ah) { + flags = float_muladd_negate_product; + } else { + mm = float64_abs(mm); + } xx += 8; } - d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs); + d[i] = float64_muladd(n[i], mm, coeff[xx], flags, s); } } @@ -5055,12 +5587,12 @@ void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc) */ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, - void *vs, uint32_t desc) + float_status *s, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float16 neg_imag = float16_set_sign(0, simd_data(desc)); - float16 neg_real = float16_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5072,27 +5604,33 @@ void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float16); e0 = *(float16 *)(vn + H1_2(i)); - e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float16 *)(vm + H1_2(j)); e2 = *(float16 *)(vn + H1_2(j)); - e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float16 *)(vm + H1_2(i)); + + if (rot) { + e3 = float16_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float16_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { - *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, vs); + *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, s); } if (likely((pg >> (j & 63)) & 1)) { - *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, vs); + *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, s); } } while (i & 63); } while (i != 0); } void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, - void *vs, uint32_t desc) + float_status *s, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float32 neg_imag = float32_set_sign(0, simd_data(desc)); - float32 neg_real = float32_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5104,27 +5642,33 @@ void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float32); e0 = *(float32 *)(vn + H1_2(i)); - e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float32 *)(vm + H1_2(j)); e2 = *(float32 *)(vn + H1_2(j)); - e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float32 *)(vm + H1_2(i)); + + if (rot) { + e3 = float32_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float32_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { - *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, vs); + *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, s); } if (likely((pg >> (j & 63)) & 1)) { - *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, vs); + *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, s); } } while (i & 63); } while (i != 0); } void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, - void *vs, uint32_t desc) + float_status *s, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); uint64_t *g = vg; - float64 neg_imag = float64_set_sign(0, simd_data(desc)); - float64 neg_real = float64_chs(neg_imag); + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 1, 1); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5136,15 +5680,21 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, i -= 2 * sizeof(float64); e0 = *(float64 *)(vn + H1_2(i)); - e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real; + e1 = *(float64 *)(vm + H1_2(j)); e2 = *(float64 *)(vn + H1_2(j)); - e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag; + e3 = *(float64 *)(vm + H1_2(i)); + + if (rot) { + e3 = float64_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float64_maybe_ah_chs(e1, fpcr_ah); + } if (likely((pg >> (i & 63)) & 1)) { - *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, vs); + *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, s); } if (likely((pg >> (j & 63)) & 1)) { - *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, vs); + *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, s); } } while (i & 63); } while (i != 0); @@ -5155,16 +5705,21 @@ void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg, */ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float16 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float16 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float16_set_sign(0, (rot & 2) != 0); - neg_real = float16_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5181,18 +5736,18 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, mi = *(float16 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float16 *)(va + H1_2(i)); - d = float16_muladd(e2, e1, d, 0, status); + d = float16_muladd(e2, e1, d, negf_real, status); *(float16 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float16 *)(va + H1_2(j)); - d = float16_muladd(e4, e3, d, 0, status); + d = float16_muladd(e4, e3, d, negf_imag, status); *(float16 *)(vd + H1_2(j)) = d; } } while (i & 63); @@ -5200,16 +5755,21 @@ void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va, } void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float32 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float32 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float32_set_sign(0, (rot & 2) != 0); - neg_real = float32_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5226,18 +5786,18 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, mi = *(float32 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float32 *)(va + H1_2(i)); - d = float32_muladd(e2, e1, d, 0, status); + d = float32_muladd(e2, e1, d, negf_real, status); *(float32 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float32 *)(va + H1_2(j)); - d = float32_muladd(e4, e3, d, 0, status); + d = float32_muladd(e4, e3, d, negf_imag, status); *(float32 *)(vd + H1_2(j)) = d; } } while (i & 63); @@ -5245,16 +5805,21 @@ void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va, } void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, - void *vg, void *status, uint32_t desc) + void *vg, float_status *status, uint32_t desc) { intptr_t j, i = simd_oprsz(desc); - unsigned rot = simd_data(desc); - bool flip = rot & 1; - float64 neg_imag, neg_real; + bool flip = extract32(desc, SIMD_DATA_SHIFT, 1); + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float64 negx_imag, negx_real; uint64_t *g = vg; - neg_imag = float64_set_sign(0, (rot & 2) != 0); - neg_real = float64_set_sign(0, rot == 1 || rot == 2); + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63; + negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); do { uint64_t pg = g[(i - 1) >> 6]; @@ -5271,18 +5836,18 @@ void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va, mi = *(float64 *)(vm + H1_2(j)); e2 = (flip ? ni : nr); - e1 = (flip ? mi : mr) ^ neg_real; + e1 = (flip ? mi : mr) ^ negx_real; e4 = e2; - e3 = (flip ? mr : mi) ^ neg_imag; + e3 = (flip ? mr : mi) ^ negx_imag; if (likely((pg >> (i & 63)) & 1)) { d = *(float64 *)(va + H1_2(i)); - d = float64_muladd(e2, e1, d, 0, status); + d = float64_muladd(e2, e1, d, negf_real, status); *(float64 *)(vd + H1_2(i)) = d; } if (likely((pg >> (j & 63)) & 1)) { d = *(float64 *)(va + H1_2(j)); - d = float64_muladd(e4, e3, d, 0, status); + d = float64_muladd(e4, e3, d, negf_imag, status); *(float64 *)(vd + H1_2(j)) = d; } } while (i & 63); @@ -5797,17 +6362,14 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, - uint32_t desc, const uintptr_t ra, + uint64_t desc, const uintptr_t ra, const int esz, const int msz, const int N, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -5819,13 +6381,13 @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, #define DO_LD1_1(NAME, ESZ) \ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \ sve_##NAME##_host, sve_##NAME##_tlb); \ } \ void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \ sve_##NAME##_host, sve_##NAME##_tlb); \ @@ -5833,25 +6395,25 @@ void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \ #define DO_LD1_2(NAME, ESZ, MSZ) \ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ } \ void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ @@ -5877,18 +6439,21 @@ DO_LD1_2(ld1sds, MO_64, MO_32) DO_LD1_2(ld1dd, MO_64, MO_64) +DO_LD1_2(ld1squ, MO_128, MO_32) +DO_LD1_2(ld1dqu, MO_128, MO_64) + #undef DO_LD1_1 #undef DO_LD1_2 #define DO_LDN_1(N) \ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \ sve_ld1bb_host, sve_ld1bb_tlb); \ } \ void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \ sve_ld1bb_host, sve_ld1bb_tlb); \ @@ -5896,25 +6461,25 @@ void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \ #define DO_LDN_2(N, SUFF, ESZ) \ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \ @@ -5936,6 +6501,10 @@ DO_LDN_2(2, dd, MO_64) DO_LDN_2(3, dd, MO_64) DO_LDN_2(4, dd, MO_64) +DO_LDN_2(2, qq, MO_128) +DO_LDN_2(3, qq, MO_128) +DO_LDN_2(4, qq, MO_128) + #undef DO_LDN_1 #undef DO_LDN_2 @@ -6155,17 +6724,14 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr, - uint32_t desc, const uintptr_t retaddr, + uint64_t desc, const uintptr_t retaddr, const int esz, const int msz, const SVEContFault fault, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -6178,25 +6744,25 @@ void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr, #define DO_LDFF1_LDNF1_1(PART, ESZ) \ void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_FIRST, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_NO, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldff1##PART##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_FIRST, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_NO, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ @@ -6204,49 +6770,49 @@ void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \ #define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \ void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldff1##PART##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldnf1##PART##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldff1##PART##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldnf1##PART##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ @@ -6317,9 +6883,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, flags = info.page[0].flags | info.page[1].flags; if (unlikely(flags != 0)) { -#ifdef CONFIG_USER_ONLY - g_assert_not_reached(); -#else /* * At least one page includes MMIO. * Any bus operation can fail with cpu_transaction_failed, @@ -6350,7 +6913,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, } while (reg_off & 63); } while (reg_off <= reg_last); return; -#endif } mem_off = info.mem_off_first[0]; @@ -6417,17 +6979,14 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, - uint32_t desc, const uintptr_t ra, + uint64_t desc, const uintptr_t ra, const int esz, const int msz, const int N, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -6439,13 +6998,13 @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, #define DO_STN_1(N, NAME, ESZ) \ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \ } \ void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, \ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \ @@ -6453,25 +7012,25 @@ void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \ #define DO_STN_2(N, NAME, ESZ, MSZ) \ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \ } \ void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \ } \ void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \ } \ void HELPER(sve_st##N##NAME##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \ @@ -6503,6 +7062,13 @@ DO_STN_2(2, dd, MO_64, MO_64) DO_STN_2(3, dd, MO_64, MO_64) DO_STN_2(4, dd, MO_64, MO_64) +DO_STN_2(1, sq, MO_128, MO_32) +DO_STN_2(1, dq, MO_128, MO_64) + +DO_STN_2(2, qq, MO_128, MO_128) +DO_STN_2(3, qq, MO_128, MO_128) +DO_STN_2(4, qq, MO_128, MO_128) + #undef DO_STN_1 #undef DO_STN_2 @@ -6608,14 +7174,12 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, int esize, int msize, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -6629,13 +7193,13 @@ void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_LD1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -6643,18 +7207,32 @@ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ #define DO_LD1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } +#define DO_LD1_ZPZ_Q(MEM, OFS, MSZ) \ +void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 16, 1 << MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 16, 1 << MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} + DO_LD1_ZPZ_S(bsu, zsu, MO_8) DO_LD1_ZPZ_S(bsu, zss, MO_8) DO_LD1_ZPZ_D(bdu, zsu, MO_8) @@ -6719,6 +7297,9 @@ DO_LD1_ZPZ_D(dd_be, zsu, MO_64) DO_LD1_ZPZ_D(dd_be, zss, MO_64) DO_LD1_ZPZ_D(dd_be, zd, MO_64) +DO_LD1_ZPZ_Q(qq_le, zd, MO_128) +DO_LD1_ZPZ_Q(qq_be, zd, MO_128) + #undef DO_LD1_ZPZ_S #undef DO_LD1_ZPZ_D @@ -6817,15 +7398,13 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, const int esz, const int msz, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -6840,14 +7419,14 @@ void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_32, MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -6856,14 +7435,14 @@ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ #define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_64, MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -7022,14 +7601,12 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, int esize, int msize, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -7043,13 +7620,13 @@ void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } \ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ @@ -7057,18 +7634,32 @@ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ #define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } \ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } +#define DO_ST1_ZPZ_Q(MEM, OFS, MSZ) \ +void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 16, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} \ +void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 16, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} + DO_ST1_ZPZ_S(bs, zsu, MO_8) DO_ST1_ZPZ_S(hs_le, zsu, MO_16) DO_ST1_ZPZ_S(hs_be, zsu, MO_16) @@ -7105,9 +7696,507 @@ DO_ST1_ZPZ_D(sd_be, zd, MO_32) DO_ST1_ZPZ_D(dd_le, zd, MO_64) DO_ST1_ZPZ_D(dd_be, zd, MO_64) +DO_ST1_ZPZ_Q(qq_le, zd, MO_128) +DO_ST1_ZPZ_Q(qq_be, zd, MO_128) + #undef DO_ST1_ZPZ_S #undef DO_ST1_ZPZ_D +/* + * SVE2.1 consecutive register load/store + */ + +static unsigned sve2p1_cont_ldst_elements(SVEContLdSt *info, vaddr addr, + uint32_t png, intptr_t reg_max, + int N, int v_esz) +{ + const int esize = 1 << v_esz; + intptr_t reg_off_first = -1, reg_off_last = -1, reg_off_split; + DecodeCounter p = decode_counter(png, reg_max, v_esz); + unsigned b_count = p.count << v_esz; + unsigned b_stride = 1 << (v_esz + p.lg2_stride); + intptr_t page_split; + + /* Set all of the element indices to -1, and the TLB data to 0. */ + memset(info, -1, offsetof(SVEContLdSt, page)); + memset(info->page, 0, sizeof(info->page)); + + if (p.invert) { + if (b_count >= reg_max * N) { + return 0; + } + reg_off_first = b_count; + reg_off_last = reg_max * N - b_stride; + } else { + if (b_count == 0) { + return 0; + } + reg_off_first = 0; + reg_off_last = MIN(b_count - esize, reg_max * N - b_stride); + } + + info->reg_off_first[0] = reg_off_first; + info->mem_off_first[0] = reg_off_first; + + page_split = -(addr | TARGET_PAGE_MASK); + if (reg_off_last + esize <= page_split || reg_off_first >= page_split) { + /* The entire operation fits within a single page. */ + info->reg_off_last[0] = reg_off_last; + return b_stride; + } + + info->page_split = page_split; + reg_off_split = ROUND_DOWN(page_split, esize); + + /* + * This is the last full element on the first page, but it is not + * necessarily active. If there is no full element, i.e. the first + * active element is the one that's split, this value remains -1. + * It is useful as iteration bounds. + */ + if (reg_off_split != 0) { + info->reg_off_last[0] = ROUND_DOWN(reg_off_split - esize, b_stride); + } + + /* Determine if an unaligned element spans the pages. */ + if (page_split & (esize - 1)) { + /* It is helpful to know if the split element is active. */ + if ((reg_off_split & (b_stride - 1)) == 0) { + info->reg_off_split = reg_off_split; + info->mem_off_split = reg_off_split; + } + reg_off_split += esize; + } + + /* + * We do want the first active element on the second page, because + * this may affect the address reported in an exception. + */ + reg_off_split = ROUND_UP(reg_off_split, b_stride); + if (reg_off_split <= reg_off_last) { + info->reg_off_first[1] = reg_off_split; + info->mem_off_first[1] = reg_off_split; + info->reg_off_last[1] = reg_off_last; + } + return b_stride; +} + +static void sve2p1_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, + target_ulong addr, unsigned estride, + int esize, int wp_access, uintptr_t ra) +{ +#ifndef CONFIG_USER_ONLY + intptr_t count_off, count_last; + int flags0 = info->page[0].flags; + int flags1 = info->page[1].flags; + + if (likely(!((flags0 | flags1) & TLB_WATCHPOINT))) { + return; + } + + /* Indicate that watchpoints are handled. */ + info->page[0].flags = flags0 & ~TLB_WATCHPOINT; + info->page[1].flags = flags1 & ~TLB_WATCHPOINT; + + if (flags0 & TLB_WATCHPOINT) { + count_off = info->reg_off_first[0]; + count_last = info->reg_off_split; + if (count_last < 0) { + count_last = info->reg_off_last[0]; + } + do { + cpu_check_watchpoint(env_cpu(env), addr + count_off, + esize, info->page[0].attrs, wp_access, ra); + count_off += estride; + } while (count_off <= count_last); + } + + count_off = info->reg_off_first[1]; + if ((flags1 & TLB_WATCHPOINT) && count_off >= 0) { + count_last = info->reg_off_last[1]; + do { + cpu_check_watchpoint(env_cpu(env), addr + count_off, + esize, info->page[1].attrs, + wp_access, ra); + count_off += estride; + } while (count_off <= count_last); + } +#endif +} + +static void sve2p1_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, + target_ulong addr, unsigned estride, + int esize, uint32_t mtedesc, + uintptr_t ra) +{ + intptr_t count_off, count_last; + + /* + * TODO: estride is always a small power of two, <= 8. + * Manipulate the stride within the loops such that + * - first iteration hits addr + off, as required, + * - second iteration hits ALIGN_UP(addr, 16), + * - other iterations advance addr by 16. + * This will minimize the probing to once per MTE granule. + */ + + /* Process the page only if MemAttr == Tagged. */ + if (info->page[0].tagged) { + count_off = info->reg_off_first[0]; + count_last = info->reg_off_split; + if (count_last < 0) { + count_last = info->reg_off_last[0]; + } + + do { + mte_check(env, mtedesc, addr + count_off, ra); + count_off += estride; + } while (count_off <= count_last); + } + + count_off = info->reg_off_first[1]; + if (count_off >= 0 && info->page[1].tagged) { + count_last = info->reg_off_last[1]; + do { + mte_check(env, mtedesc, addr + count_off, ra); + count_off += estride; + } while (count_off <= count_last); + } +} + +static inline QEMU_ALWAYS_INLINE +void sve2p1_ld1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, + uint32_t png, uint64_t desc64, + const uintptr_t ra, const MemOp esz, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc64 >> 32; + uint32_t desc = desc64; + const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2; + const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4); + const intptr_t reg_max = simd_oprsz(desc); + const unsigned esize = 1 << esz; + intptr_t count_off, count_last; + intptr_t reg_off, reg_last, reg_n; + SVEContLdSt info; + unsigned estride, flags; + void *host; + + estride = sve2p1_cont_ldst_elements(&info, addr, png, reg_max, N, esz); + if (estride == 0) { + /* The entire predicate was false; no load occurs. */ + for (unsigned n = 0; n < N; n++) { + memset(zd + n * rstride, 0, reg_max); + } + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra); + + /* Handle watchpoints for all active elements. */ + sve2p1_cont_ldst_watchpoints(&info, env, addr, estride, + esize, BP_MEM_READ, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve2p1_cont_ldst_mte_check(&info, env, estride, addr, + esize, mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. Perform the load + * into scratch memory to preserve register state until the end. + */ + ARMVectorReg scratch[4] = { }; + + count_off = info.reg_off_first[0]; + count_last = info.reg_off_last[1]; + if (count_last < 0) { + count_last = info.reg_off_split; + if (count_last < 0) { + count_last = info.reg_off_last[0]; + } + } + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + + do { + reg_last = MIN(count_last - count_off, reg_max - esize); + do { + tlb_fn(env, &scratch[reg_n], reg_off, addr + count_off, ra); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + + for (unsigned n = 0; n < N; ++n) { + memcpy(&zd[n * rstride], &scratch[n], reg_max); + } + return; + } + + /* The entire operation is in RAM, on valid pages. */ + + for (unsigned n = 0; n < N; ++n) { + memset(&zd[n * rstride], 0, reg_max); + } + + count_off = info.reg_off_first[0]; + count_last = info.reg_off_last[0]; + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + host = info.page[0].host; + + set_helper_retaddr(ra); + + do { + reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize); + do { + host_fn(&zd[reg_n * rstride], reg_off, host + count_off); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + + clear_helper_retaddr(); + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + count_off = info.reg_off_split; + if (unlikely(count_off >= 0)) { + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra); + } + + count_off = info.reg_off_first[1]; + if (unlikely(count_off >= 0)) { + count_last = info.reg_off_last[1]; + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + host = info.page[1].host; + + set_helper_retaddr(ra); + + do { + reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize); + do { + host_fn(&zd[reg_n * rstride], reg_off, host + count_off); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + + clear_helper_retaddr(); + } +} + +void HELPER(sve2p1_ld1bb_c)(CPUARMState *env, void *vd, target_ulong addr, + uint32_t png, uint64_t desc) +{ + sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), MO_8, + sve_ld1bb_host, sve_ld1bb_tlb); +} + +#define DO_LD1_2(NAME, ESZ) \ +void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \ + target_ulong addr, uint32_t png, \ + uint64_t desc) \ +{ \ + sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ + sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ +} \ +void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \ + target_ulong addr, uint32_t png, \ + uint64_t desc) \ +{ \ + sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ + sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ +} + +DO_LD1_2(ld1hh, MO_16) +DO_LD1_2(ld1ss, MO_32) +DO_LD1_2(ld1dd, MO_64) + +#undef DO_LD1_2 + +static inline QEMU_ALWAYS_INLINE +void sve2p1_st1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, + uint32_t png, uint64_t desc64, + const uintptr_t ra, const int esz, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc64 >> 32; + uint32_t desc = desc64; + const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2; + const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4); + const intptr_t reg_max = simd_oprsz(desc); + const unsigned esize = 1 << esz; + intptr_t count_off, count_last; + intptr_t reg_off, reg_last, reg_n; + SVEContLdSt info; + unsigned estride, flags; + void *host; + + estride = sve2p1_cont_ldst_elements(&info, addr, png, reg_max, N, esz); + if (estride == 0) { + /* The entire predicate was false; no store occurs. */ + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra); + + /* Handle watchpoints for all active elements. */ + sve2p1_cont_ldst_watchpoints(&info, env, addr, estride, + esize, BP_MEM_WRITE, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve2p1_cont_ldst_mte_check(&info, env, estride, addr, + esize, mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. Perform the load + * into scratch memory to preserve register state until the end. + */ + count_off = info.reg_off_first[0]; + count_last = info.reg_off_last[1]; + if (count_last < 0) { + count_last = info.reg_off_split; + if (count_last < 0) { + count_last = info.reg_off_last[0]; + } + } + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + + do { + reg_last = MIN(count_last - count_off, reg_max - esize); + do { + tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + return; + } + + /* The entire operation is in RAM, on valid pages. */ + + count_off = info.reg_off_first[0]; + count_last = info.reg_off_last[0]; + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + host = info.page[0].host; + + set_helper_retaddr(ra); + + do { + reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize); + do { + host_fn(&zd[reg_n * rstride], reg_off, host + count_off); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + + clear_helper_retaddr(); + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + count_off = info.reg_off_split; + if (unlikely(count_off >= 0)) { + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra); + } + + count_off = info.reg_off_first[1]; + if (unlikely(count_off >= 0)) { + count_last = info.reg_off_last[1]; + reg_off = count_off % reg_max; + reg_n = count_off / reg_max; + host = info.page[1].host; + + set_helper_retaddr(ra); + + do { + reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize); + do { + host_fn(&zd[reg_n * rstride], reg_off, host + count_off); + reg_off += estride; + count_off += estride; + } while (reg_off <= reg_last); + reg_off = 0; + reg_n++; + } while (count_off <= count_last); + + clear_helper_retaddr(); + } +} + +void HELPER(sve2p1_st1bb_c)(CPUARMState *env, void *vd, target_ulong addr, + uint32_t png, uint64_t desc) +{ + sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), MO_8, + sve_st1bb_host, sve_st1bb_tlb); +} + +#define DO_ST1_2(NAME, ESZ) \ +void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \ + target_ulong addr, uint32_t png, \ + uint64_t desc) \ +{ \ + sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ + sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ +} \ +void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \ + target_ulong addr, uint32_t png, \ + uint64_t desc) \ +{ \ + sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ + sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ +} + +DO_ST1_2(st1hh, MO_16) +DO_ST1_2(st1ss, MO_32) +DO_ST1_2(st1dd, MO_64) + +#undef DO_ST1_2 + void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc) / 8; @@ -7393,7 +8482,7 @@ void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc) } void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va, - void *status, uint32_t desc) + float_status *status, uint32_t desc) { intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4); @@ -7431,7 +8520,7 @@ void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va, } void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va, - void *status, uint32_t desc) + float_status *status, uint32_t desc) { intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4); @@ -7467,7 +8556,8 @@ void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va, } #define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \ -void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ @@ -7488,7 +8578,8 @@ DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16) DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, H1_8, H1_4, float64_to_float32) #define DO_FCVTLT(NAME, TYPEW, TYPEN, HW, HN, OP) \ -void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vg, \ + float_status *status, uint32_t desc) \ { \ intptr_t i = simd_oprsz(desc); \ uint64_t *g = vg; \ @@ -7509,3 +8600,31 @@ DO_FCVTLT(sve2_fcvtlt_sd, uint64_t, uint32_t, H1_8, H1_4, float32_to_float64) #undef DO_FCVTLT #undef DO_FCVTNT + +void HELPER(pext)(void *vd, uint32_t png, uint32_t desc) +{ + int pl = FIELD_EX32(desc, PREDDESC, OPRSZ); + int vl = pl * 8; + unsigned v_esz = FIELD_EX32(desc, PREDDESC, ESZ); + int part = FIELD_EX32(desc, PREDDESC, DATA); + DecodeCounter p = decode_counter(png, vl, v_esz); + uint64_t mask = pred_esz_masks[v_esz + p.lg2_stride]; + ARMPredicateReg *d = vd; + + /* + * Convert from element count to byte count and adjust + * for the portion of the 4*VL counter to be extracted. + */ + int b_count = (p.count << v_esz) - vl * part; + + memset(d, 0, sizeof(*d)); + if (p.invert) { + if (b_count <= 0) { + do_whilel(vd, mask, vl, vl); + } else if (b_count < vl) { + do_whileg(vd, mask, vl - b_count, vl); + } + } else if (b_count > 0) { + do_whilel(vd, mask, MIN(b_count, vl), vl); + } +} diff --git a/target/arm/tcg/sve_ldst_internal.h b/target/arm/tcg/sve_ldst_internal.h index 4f159ec4adf..c67cda9d3b5 100644 --- a/target/arm/tcg/sve_ldst_internal.h +++ b/target/arm/tcg/sve_ldst_internal.h @@ -20,7 +20,7 @@ #ifndef TARGET_ARM_SVE_LDST_INTERNAL_H #define TARGET_ARM_SVE_LDST_INTERNAL_H -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" /* * Load one element into @vd + @reg_off from @host. @@ -116,6 +116,94 @@ DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl) DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq) DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq) +#define DO_LD_PRIM_3(NAME, FUNC) \ + static inline void sve_##NAME##_host(void *vd, \ + intptr_t reg_off, void *host) \ + { sve_##FUNC##_host(vd, reg_off, host); \ + *(uint64_t *)(vd + reg_off + 8) = 0; } \ + static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \ + intptr_t reg_off, target_ulong addr, uintptr_t ra) \ + { sve_##FUNC##_tlb(env, vd, reg_off, addr, ra); \ + *(uint64_t *)(vd + reg_off + 8) = 0; } + +DO_LD_PRIM_3(ld1squ_be, ld1sdu_be) +DO_LD_PRIM_3(ld1squ_le, ld1sdu_le) +DO_LD_PRIM_3(ld1dqu_be, ld1dd_be) +DO_LD_PRIM_3(ld1dqu_le, ld1dd_le) + +#define sve_st1sq_be_host sve_st1sd_be_host +#define sve_st1sq_le_host sve_st1sd_le_host +#define sve_st1sq_be_tlb sve_st1sd_be_tlb +#define sve_st1sq_le_tlb sve_st1sd_le_tlb + +#define sve_st1dq_be_host sve_st1dd_be_host +#define sve_st1dq_le_host sve_st1dd_le_host +#define sve_st1dq_be_tlb sve_st1dd_be_tlb +#define sve_st1dq_le_tlb sve_st1dd_le_tlb + +/* + * The ARMVectorReg elements are stored in host-endian 64-bit units. + * For 128-bit quantities, the sequence defined by the Elem[] pseudocode + * corresponds to storing the two 64-bit pieces in little-endian order. + */ +/* FIXME: Nothing in this file makes any effort at atomicity. */ + +static inline void sve_ld1qq_be_host(void *vd, intptr_t reg_off, void *host) +{ + sve_ld1dd_be_host(vd, reg_off + 8, host); + sve_ld1dd_be_host(vd, reg_off, host + 8); +} + +static inline void sve_ld1qq_le_host(void *vd, intptr_t reg_off, void *host) +{ + sve_ld1dd_le_host(vd, reg_off, host); + sve_ld1dd_le_host(vd, reg_off + 8, host + 8); +} + +static inline void +sve_ld1qq_be_tlb(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong addr, uintptr_t ra) +{ + sve_ld1dd_be_tlb(env, vd, reg_off + 8, addr, ra); + sve_ld1dd_be_tlb(env, vd, reg_off, addr + 8, ra); +} + +static inline void +sve_ld1qq_le_tlb(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong addr, uintptr_t ra) +{ + sve_ld1dd_le_tlb(env, vd, reg_off, addr, ra); + sve_ld1dd_le_tlb(env, vd, reg_off + 8, addr + 8, ra); +} + +static inline void sve_st1qq_be_host(void *vd, intptr_t reg_off, void *host) +{ + sve_st1dd_be_host(vd, reg_off + 8, host); + sve_st1dd_be_host(vd, reg_off, host + 8); +} + +static inline void sve_st1qq_le_host(void *vd, intptr_t reg_off, void *host) +{ + sve_st1dd_le_host(vd, reg_off, host); + sve_st1dd_le_host(vd, reg_off + 8, host + 8); +} + +static inline void +sve_st1qq_be_tlb(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong addr, uintptr_t ra) +{ + sve_st1dd_be_tlb(env, vd, reg_off + 8, addr, ra); + sve_st1dd_be_tlb(env, vd, reg_off, addr + 8, ra); +} + +static inline void +sve_st1qq_le_tlb(CPUARMState *env, void *vd, intptr_t reg_off, + target_ulong addr, uintptr_t ra) +{ + sve_st1dd_le_tlb(env, vd, reg_off, addr, ra); + sve_st1dd_le_tlb(env, vd, reg_off + 8, addr + 8, ra); +} + #undef DO_LD_TLB #undef DO_ST_TLB #undef DO_LD_HOST @@ -123,6 +211,7 @@ DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq) #undef DO_ST_PRIM_1 #undef DO_LD_PRIM_2 #undef DO_ST_PRIM_2 +#undef DO_LD_PRIM_3 /* * Resolve the guest virtual address to info->host and info->flags. diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c new file mode 100644 index 00000000000..95c26c6d463 --- /dev/null +++ b/target/arm/tcg/tlb-insns.c @@ -0,0 +1,1306 @@ +/* + * Helpers for TLBI insns + * + * This code is licensed under the GNU GPL v2 or later. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-features.h" +#include "cpregs.h" + +/* Check for traps from EL1 due to HCR_EL2.TTLB. */ +static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */ +static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */ +static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; +} + +/* IS variants of TLB operations must affect all cores */ +static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +/* + * Non-IS variants of TLB operations are upgraded to + * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to + * force broadcast of these operations. + */ +static bool tlb_force_broadcast(CPUARMState *env) +{ + return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); +} + +static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate all (TLBIALL) */ + CPUState *cs = env_cpu(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_all_cpus_synced(cs); + } else { + tlb_flush(cs); + } +} + +static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ + CPUState *cs = env_cpu(env); + + value &= TARGET_PAGE_MASK; + if (tlb_force_broadcast(env)) { + tlb_flush_page_all_cpus_synced(cs, value); + } else { + tlb_flush_page(cs, value); + } +} + +static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by ASID (TLBIASID) */ + CPUState *cs = env_cpu(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_all_cpus_synced(cs); + } else { + tlb_flush(cs); + } +} + +static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ + CPUState *cs = env_cpu(env); + + value &= TARGET_PAGE_MASK; + if (tlb_force_broadcast(env)) { + tlb_flush_page_all_cpus_synced(cs, value); + } else { + tlb_flush_page(cs, value); + } +} + +static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); +} + +static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E2); +} + +static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + +static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); +} + +static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); +} + +static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env)); +} + + +static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); +} + +static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); +} + +/* + * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions + * Page D4-1736 (DDI0487A.b) + */ + +static int vae1_tlbmask(CPUARMState *env) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + uint16_t mask; + + assert(arm_feature(env, ARM_FEATURE_AARCH64)); + + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + mask = ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E20_0; + } else { + /* This is AArch64 only, so we don't need to touch the EL30_x TLBs */ + mask = ARMMMUIdxBit_E10_1 | + ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_0; + } + return mask; +} + +static int vae2_tlbmask(CPUARMState *env) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + uint16_t mask; + + if (hcr & HCR_E2H) { + mask = ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E20_0; + } else { + mask = ARMMMUIdxBit_E2; + } + return mask; +} + +/* Return 56 if TBI is enabled, 64 otherwise. */ +static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, + uint64_t addr) +{ + uint64_t tcr = regime_tcr(env, mmu_idx); + int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + int select = extract64(addr, 55, 1); + + return (tbi >> select) & 1 ? 56 : 64; +} + +static int vae1_tlbbits(CPUARMState *env, uint64_t addr) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + ARMMMUIdx mmu_idx; + + assert(arm_feature(env, ARM_FEATURE_AARCH64)); + + /* Only the regime of the mmu_idx below is significant. */ + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + mmu_idx = ARMMMUIdx_E20_0; + } else { + mmu_idx = ARMMMUIdx_E10_0; + } + + return tlbbits_for_regime(env, mmu_idx, addr); +} + +static int vae2_tlbbits(CPUARMState *env, uint64_t addr) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + ARMMMUIdx mmu_idx; + + /* + * Only the regime of the mmu_idx below is significant. + * Regime EL2&0 has two ranges with separate TBI configuration, while EL2 + * only has one. + */ + if (hcr & HCR_E2H) { + mmu_idx = ARMMMUIdx_E20_2; + } else { + mmu_idx = ARMMMUIdx_E2; + } + + return tlbbits_for_regime(env, mmu_idx, addr); +} + +static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + + if (tlb_force_broadcast(env)) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); + } else { + tlb_flush_by_mmuidx(cs, mask); + } +} + +static int e2_tlbmask(CPUARMState *env) +{ + return (ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_2 | + ARMMMUIdxBit_E20_2_PAN | + ARMMMUIdxBit_E2); +} + +static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = alle1_tlbmask(env); + + tlb_flush_by_mmuidx(cs, mask); +} + +static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = e2_tlbmask(env); + + tlb_flush_by_mmuidx(cs, mask); +} + +static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); +} + +static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = alle1_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = e2_tlbmask(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); +} + +static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); +} + +static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA, EL2 + * Currently handles both VAE2 and VALE2, since we don't support + * flush-last-level-only. + */ + CPUState *cs = env_cpu(env); + int mask = vae2_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae2_tlbbits(env, pageaddr); + + tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); +} + +static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA, EL3 + * Currently handles both VAE3 and VALE3, since we don't support + * flush-last-level-only. + */ + ARMCPU *cpu = env_archcpu(env); + CPUState *cs = CPU(cpu); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); +} + +static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae1_tlbbits(env, pageaddr); + + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); +} + +static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA, EL1&0 (AArch64 version). + * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae1_tlbbits(env, pageaddr); + + if (tlb_force_broadcast(env)) { + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); + } else { + tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); + } +} + +static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = vae2_tlbmask(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = vae2_tlbbits(env, pageaddr); + + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); +} + +static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); + + tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, + ARMMMUIdxBit_E3, bits); +} + +static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) +{ + /* + * The MSB of value is the NS field, which only applies if SEL2 + * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). + */ + return (value >= 0 + && cpu_isar_feature(aa64_sel2, env_archcpu(env)) + && arm_is_secure_below_el3(env) + ? ARMMMUIdxBit_Stage2_S + : ARMMMUIdxBit_Stage2); +} + +static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = ipas2e1_tlbmask(env, value); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + if (tlb_force_broadcast(env)) { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); + } else { + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); + } +} + +static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + int mask = ipas2e1_tlbmask(env, value); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); +} + +static const ARMCPRegInfo tlbi_not_v7_cp_reginfo[] = { + /* + * MMU TLB control. Note that the wildcarding means we cover not just + * the unified TLB ops but also the dside/iside/inner-shareable variants. + */ + { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, + .type = ARM_CP_NO_RAW }, + { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, + .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, + .type = ARM_CP_NO_RAW }, +}; + +static const ARMCPRegInfo tlbi_v7_cp_reginfo[] = { + /* 32 bit ITLB invalidates */ + { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + /* 32 bit DTLB invalidates */ + { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + /* 32 bit TLB invalidates */ + { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiall_write }, + { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbiasid_write }, + { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_write }, +}; + +static const ARMCPRegInfo tlbi_v7mp_cp_reginfo[] = { + /* 32 bit TLB invalidates, Inner Shareable */ + { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbiall_is_write }, + { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbimva_is_write }, + { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbiasid_is_write }, + { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbimvaa_is_write }, +}; + +static const ARMCPRegInfo tlbi_v8_cp_reginfo[] = { + /* AArch32 TLB invalidate last level of translation table walk */ + { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbimva_is_write }, + { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, + .writefn = tlbimvaa_is_write }, + { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimva_write }, + { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, + .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, + .writefn = tlbimvaa_write }, + { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_write }, + { .name = "TLBIMVALHIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_is_write }, + { .name = "TLBIIPAS2", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_hyp_write }, + { .name = "TLBIIPAS2IS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2is_hyp_write }, + { .name = "TLBIIPAS2L", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2_hyp_write }, + { .name = "TLBIIPAS2LIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiipas2is_hyp_write }, + /* AArch64 TLBI operations */ + { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVMALLE1IS, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAE1IS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIASIDE1IS, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAAE1IS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVALE1IS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAALE1IS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVMALLE1, + .writefn = tlbi_aa64_vmalle1_write }, + { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAE1, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIASIDE1, + .writefn = tlbi_aa64_vmalle1_write }, + { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAAE1, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVALE1, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAALE1, + .writefn = tlbi_aa64_vae1_write }, + { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ipas2e1is_write }, + { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ipas2e1is_write }, + { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ipas2e1_write }, + { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ipas2e1_write }, + { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1_write }, + { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1is_write }, +}; + +static const ARMCPRegInfo tlbi_el2_cp_reginfo[] = { + { .name = "TLBIALLNSNH", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_nsnh_write }, + { .name = "TLBIALLNSNHIS", + .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_nsnh_is_write }, + { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_hyp_write }, + { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbiall_hyp_is_write }, + { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_write }, + { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, + .type = ARM_CP_NO_RAW, .access = PL2_W, + .writefn = tlbimva_hyp_is_write }, + { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_alle2_write }, + { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2_write }, + { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2_write }, + { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_alle2is_write }, + { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, + { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, +}; + +static const ARMCPRegInfo tlbi_el3_cp_reginfo[] = { + { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle3is_write }, + { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle3_write }, + { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3_write }, + { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3_write }, +}; + +typedef struct { + uint64_t base; + uint64_t length; +} TLBIRange; + +static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) +{ + /* + * Note that the TLBI range TG field encoding differs from both + * TG0 and TG1 encodings. + */ + switch (tg) { + case 1: + return Gran4K; + case 2: + return Gran16K; + case 3: + return Gran64K; + default: + return GranInvalid; + } +} + +static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, + uint64_t value) +{ + unsigned int page_size_granule, page_shift, num, scale, exponent; + /* Extract one bit to represent the va selector in use. */ + uint64_t select = sextract64(value, 36, 1); + ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false); + TLBIRange ret = { }; + ARMGranuleSize gran; + + page_size_granule = extract64(value, 46, 2); + gran = tlbi_range_tg_to_gran_size(page_size_granule); + + /* The granule encoded in value must match the granule in use. */ + if (gran != param.gran) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", + page_size_granule); + return ret; + } + + page_shift = arm_granule_bits(gran); + num = extract64(value, 39, 5); + scale = extract64(value, 44, 2); + exponent = (5 * scale) + 1; + + ret.length = (num + 1) << (exponent + page_shift); + + if (param.select) { + ret.base = sextract64(value, 0, 37); + } else { + ret.base = extract64(value, 0, 37); + } + if (param.ds) { + /* + * With DS=1, BaseADDR is always shifted 16 so that it is able + * to address all 52 va bits. The input address is perforce + * aligned on a 64k boundary regardless of translation granule. + */ + page_shift = 16; + } + ret.base <<= page_shift; + + return ret; +} + +static void do_rvae_write(CPUARMState *env, uint64_t value, + int idxmap, bool synced) +{ + ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); + TLBIRange range; + int bits; + + range = tlbi_aa64_get_range(env, one_idx, value); + bits = tlbbits_for_regime(env, one_idx, range.base); + + if (synced) { + tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), + range.base, + range.length, + idxmap, + bits); + } else { + tlb_flush_range_by_mmuidx(env_cpu(env), range.base, + range.length, idxmap, bits); + } +} + +static void tlbi_aa64_rvae1_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, EL1&0. + * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + + do_rvae_write(env, value, vae1_tlbmask(env), + tlb_force_broadcast(env)); +} + +static void tlbi_aa64_rvae1is_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, Inner/Outer Shareable EL1&0. + * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, + * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support + * flush-for-specific-ASID-only, flush-last-level-only or inner/outer + * shareable specific flushes. + */ + + do_rvae_write(env, value, vae1_tlbmask(env), true); +} + +static void tlbi_aa64_rvae2_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, EL2. + * Currently handles all of RVAE2 and RVALE2, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + + do_rvae_write(env, value, vae2_tlbmask(env), + tlb_force_broadcast(env)); + + +} + +static void tlbi_aa64_rvae2is_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, Inner/Outer Shareable, EL2. + * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, + * since we don't support flush-for-specific-ASID-only, + * flush-last-level-only or inner/outer shareable specific flushes. + */ + + do_rvae_write(env, value, vae2_tlbmask(env), true); + +} + +static void tlbi_aa64_rvae3_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, EL3. + * Currently handles all of RVAE3 and RVALE3, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + + do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); +} + +static void tlbi_aa64_rvae3is_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Invalidate by VA range, EL3, Inner/Outer Shareable. + * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, + * since we don't support flush-for-specific-ASID-only, + * flush-last-level-only or inner/outer specific flushes. + */ + + do_rvae_write(env, value, ARMMMUIdxBit_E3, true); +} + +static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + do_rvae_write(env, value, ipas2e1_tlbmask(env, value), + tlb_force_broadcast(env)); +} + +static void tlbi_aa64_ripas2e1is_write(CPUARMState *env, + const ARMCPRegInfo *ri, + uint64_t value) +{ + do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true); +} + +static const ARMCPRegInfo tlbirange_reginfo[] = { + { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAE1IS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAAE1IS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVALE1IS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlbis, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAALE1IS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAE1OS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAAE1OS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVALE1OS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAALE1OS, + .writefn = tlbi_aa64_rvae1is_write }, + { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAE1, + .writefn = tlbi_aa64_rvae1_write }, + { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAAE1, + .writefn = tlbi_aa64_rvae1_write }, + { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVALE1, + .writefn = tlbi_aa64_rvae1_write }, + { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlb, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIRVAALE1, + .writefn = tlbi_aa64_rvae1_write }, + { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ripas2e1is_write }, + { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ripas2e1is_write }, + { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2is_write }, + { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2is_write }, + { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ripas2e1_write }, + { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_ripas2e1_write }, + { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2is_write }, + { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2is_write }, + { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2_write }, + { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_rvae2_write }, + { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3is_write }, + { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3is_write }, + { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3is_write }, + { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3is_write }, + { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3_write }, + { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_rvae3_write }, +}; + +static const ARMCPRegInfo tlbios_reginfo[] = { + { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVMALLE1OS, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, + .fgt = FGT_TLBIVAE1OS, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIASIDE1OS, + .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAAE1OS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVALE1OS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, + .access = PL1_W, .accessfn = access_ttlbos, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .fgt = FGT_TLBIVAALE1OS, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_alle2is_write }, + { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, + { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL2_W, + .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS | ARM_CP_EL3_NO_EL2_UNDEF, + .writefn = tlbi_aa64_vae2is_write }, + { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS }, + { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS }, + { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS }, + { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_ADD_TLBI_NXS }, + { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_alle3is_write }, + { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_ADD_TLBI_NXS, + .writefn = tlbi_aa64_vae3is_write }, +}; + +static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush(cs); +} + +static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = env_cpu(env); + + tlb_flush_all_cpus_synced(cs); +} + +static const ARMCPRegInfo tlbi_rme_reginfo[] = { + { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_paall_write }, + { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_paallos_write }, + /* + * QEMU does not have a way to invalidate by physical address, thus + * invalidating a range of physical addresses is accomplished by + * flushing all tlb entries in the outer shareable domain, + * just like PAALLOS. + */ + { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_paallos_write }, + { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_paallos_write }, +}; + +void define_tlb_insn_regs(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + + if (!arm_feature(env, ARM_FEATURE_V7)) { + define_arm_cp_regs(cpu, tlbi_not_v7_cp_reginfo); + } else { + define_arm_cp_regs(cpu, tlbi_v7_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7MP) && + !arm_feature(env, ARM_FEATURE_PMSA)) { + define_arm_cp_regs(cpu, tlbi_v7mp_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, tlbi_v8_cp_reginfo); + } + /* + * We retain the existing logic for when to register these TLBI + * ops (i.e. matching the condition for el2_cp_reginfo[] in + * helper.c), but we will be able to simplify this later. + */ + if (arm_feature(env, ARM_FEATURE_EL2)) { + define_arm_cp_regs(cpu, tlbi_el2_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_arm_cp_regs(cpu, tlbi_el3_cp_reginfo); + } + if (cpu_isar_feature(aa64_tlbirange, cpu)) { + define_arm_cp_regs(cpu, tlbirange_reginfo); + } + if (cpu_isar_feature(aa64_tlbios, cpu)) { + define_arm_cp_regs(cpu, tlbios_reginfo); + } + if (cpu_isar_feature(aa64_rme, cpu)) { + define_arm_cp_regs(cpu, tlbi_rme_reginfo); + } +} diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 885bf4ec142..23c72a99f5c 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -9,9 +9,9 @@ #include "cpu.h" #include "internals.h" #include "cpu-features.h" -#include "exec/exec-all.h" -#include "exec/helper-proto.h" +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" /* * Returns true if the stage 1 translation regime is using LPAE format page @@ -277,7 +277,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); } -void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc) +void helper_exception_pc_alignment(CPUARMState *env, vaddr pc) { ARMMMUFaultInfo fi = { .type = ARMFault_Alignment }; int target_el = exception_target_el(env); @@ -318,14 +318,13 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } -bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr address, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra) { ARMCPU *cpu = ARM_CPU(cs); GetPhysAddrResult res = {}; ARMMMUFaultInfo local_fi, *fi; - int ret; /* * Allow S1_ptw_translate to see any fault generated here. @@ -339,37 +338,27 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } /* - * Walk the page table and (if the mapping exists) add the page - * to the TLB. On success, return true. Otherwise, if probing, - * return false. Otherwise populate fsr with ARM DFSR/IFSR fault - * register format, and signal the fault. + * Per R_XCHFJ, alignment fault not due to memory type has + * highest precedence. Otherwise, walk the page table and + * and collect the page description. */ - ret = get_phys_addr(&cpu->env, address, access_type, - core_to_arm_mmu_idx(&cpu->env, mmu_idx), - &res, fi); - if (likely(!ret)) { - /* - * Map a single [sub]page. Regions smaller than our declared - * target page size are handled specially, so for those we - * pass in the exact addresses. - */ - if (res.f.lg_page_size >= TARGET_PAGE_BITS) { - res.f.phys_addr &= TARGET_PAGE_MASK; - address &= TARGET_PAGE_MASK; - } - + if (address & ((1 << memop_alignment_bits(memop)) - 1)) { + fi->type = ARMFault_Alignment; + } else if (!get_phys_addr(&cpu->env, address, access_type, memop, + core_to_arm_mmu_idx(&cpu->env, mmu_idx), + &res, fi)) { res.f.extra.arm.pte_attrs = res.cacheattrs.attrs; res.f.extra.arm.shareability = res.cacheattrs.shareability; - - tlb_set_page_full(cs, mmu_idx, address, &res.f); + *out = res.f; return true; - } else if (probe) { + } + if (probe) { return false; - } else { - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr); - arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); } + + /* Now we have a real cpu fault. */ + cpu_restore_state(cs, ra); + arm_deliver_fault(cpu, address, access_type, mmu_idx, fi); } #else void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 148be2826ec..dbf47595dbe 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -17,8 +17,7 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" - -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "translate.h" #include "translate-a64.h" #include "qemu/log.h" @@ -75,17 +74,6 @@ static int scale_by_log2_tag_granule(DisasContext *s, int x) #include "decode-sme-fa64.c.inc" #include "decode-a64.c.inc" -/* Table based decoder typedefs - used when the relevant bits for decode - * are too awkwardly scattered across the instruction (eg SIMD). - */ -typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn); - -typedef struct AArch64DecodeTable { - uint32_t pattern; - uint32_t mask; - AArch64DecodeFn *disas_fn; -} AArch64DecodeTable; - /* initialize TCG globals. */ void a64_translate_init(void) { @@ -294,7 +282,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(memop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); ret = tcg_temp_new_i64(); @@ -326,7 +314,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); - desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop)); + desc = FIELD_DP32(desc, MTEDESC, ALIGN, memop_alignment_bits(single_mop)); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); ret = tcg_temp_new_i64(); @@ -445,12 +433,6 @@ static void gen_rebuild_hflags(DisasContext *s) gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el)); } -static void gen_exception_internal(int excp) -{ - assert(excp_is_internal(excp)); - gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp)); -} - static void gen_exception_internal_insn(DisasContext *s, int excp) { gen_a64_update_pc(s, 0); @@ -628,7 +610,16 @@ static TCGv_i32 read_fp_hreg(DisasContext *s, int reg) return v; } -/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). +static void clear_vec(DisasContext *s, int rd) +{ + unsigned ofs = fp_reg_offset(s, rd, MO_64); + unsigned vsz = vec_full_reg_size(s); + + tcg_gen_gvec_dup_imm(MO_64, ofs, vsz, vsz, 0); +} + +/* + * Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). * If SVE is not enabled, then there are only 128 bits in the vector. */ static void clear_vec_high(DisasContext *s, bool is_q, int rd) @@ -656,6 +647,68 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) write_fp_dreg(s, reg, tmp); } +/* + * Write a double result to 128 bit vector register reg, honouring FPCR.NEP: + * - if FPCR.NEP == 0, clear the high elements of reg + * - if FPCR.NEP == 1, set the high elements of reg from mergereg + * (i.e. merge the result with those high elements) + * In either case, SVE register bits above 128 are zeroed (per R_WKYLB). + */ +static void write_fp_dreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i64 v) +{ + if (!s->fpcr_nep) { + write_fp_dreg(s, reg, v); + return; + } + + /* + * Move from mergereg to reg; this sets the high elements and + * clears the bits above 128 as a side effect. + */ + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st_i64(v, tcg_env, vec_full_reg_offset(s, reg)); +} + +/* + * Write a single-prec result, but only clear the higher elements + * of the destination register if FPCR.NEP is 0; otherwise preserve them. + */ +static void write_fp_sreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i32 v) +{ + if (!s->fpcr_nep) { + write_fp_sreg(s, reg, v); + return; + } + + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32)); +} + +/* + * Write a half-prec result, but only clear the higher elements + * of the destination register if FPCR.NEP is 0; otherwise preserve them. + * The caller must ensure that the top 16 bits of v are zero. + */ +static void write_fp_hreg_merging(DisasContext *s, int reg, int mergereg, + TCGv_i32 v) +{ + if (!s->fpcr_nep) { + write_fp_sreg(s, reg, v); + return; + } + + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, reg), + vec_full_reg_offset(s, mergereg), + 16, vec_full_reg_size(s)); + tcg_gen_st16_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16)); +} + /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, GVecGen2Fn *gvec_fn, int vece) @@ -714,10 +767,10 @@ static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, * an out-of-line helper. */ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, - int rm, bool is_fp16, int data, + int rm, ARMFPStatusFlavour fpsttype, int data, gen_helper_gvec_3_ptr *fn) { - TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); + TCGv_ptr fpst = fpstatus_ptr(fpsttype); tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, @@ -735,15 +788,32 @@ static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); } +/* + * Expand a 4-operand operation using an out-of-line helper that takes + * a pointer to the CPU env. + */ +static void gen_gvec_op4_env(DisasContext *s, bool is_q, int rd, int rn, + int rm, int ra, int data, + gen_helper_gvec_4_ptr *fn) +{ + tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, ra), + tcg_env, + is_q ? 16 : 8, vec_full_reg_size(s), data, fn); +} + /* * Expand a 4-operand + fpstatus pointer + simd data value operation using * an out-of-line helper. */ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, - int rm, int ra, bool is_fp16, int data, + int rm, int ra, ARMFPStatusFlavour fpsttype, + int data, gen_helper_gvec_4_ptr *fn) { - TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); + TCGv_ptr fpst = fpstatus_ptr(fpsttype); tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), @@ -751,6 +821,111 @@ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); } +/* + * When FPCR.AH == 1, NEG and ABS do not flip the sign bit of a NaN. + * These functions implement + * d = floatN_is_any_nan(s) ? s : floatN_chs(s) + * which for float32 is + * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s ^ (1 << 31)) + * and similarly for the other float sizes. + */ +static void gen_vfp_ah_negh(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32(); + + gen_vfp_negh(chs_s, s); + gen_vfp_absh(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7c00), + s, chs_s); +} + +static void gen_vfp_ah_negs(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(), chs_s = tcg_temp_new_i32(); + + gen_vfp_negs(chs_s, s); + gen_vfp_abss(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7f800000UL), + s, chs_s); +} + +static void gen_vfp_ah_negd(TCGv_i64 d, TCGv_i64 s) +{ + TCGv_i64 abs_s = tcg_temp_new_i64(), chs_s = tcg_temp_new_i64(); + + gen_vfp_negd(chs_s, s); + gen_vfp_absd(abs_s, s); + tcg_gen_movcond_i64(TCG_COND_GTU, d, + abs_s, tcg_constant_i64(0x7ff0000000000000ULL), + s, chs_s); +} + +/* + * These functions implement + * d = floatN_is_any_nan(s) ? s : floatN_abs(s) + * which for float32 is + * d = (s & ~(1 << 31)) > 0x7f800000UL) ? s : (s & ~(1 << 31)) + * and similarly for the other float sizes. + */ +static void gen_vfp_ah_absh(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(); + + gen_vfp_absh(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7c00), + s, abs_s); +} + +static void gen_vfp_ah_abss(TCGv_i32 d, TCGv_i32 s) +{ + TCGv_i32 abs_s = tcg_temp_new_i32(); + + gen_vfp_abss(abs_s, s); + tcg_gen_movcond_i32(TCG_COND_GTU, d, + abs_s, tcg_constant_i32(0x7f800000UL), + s, abs_s); +} + +static void gen_vfp_ah_absd(TCGv_i64 d, TCGv_i64 s) +{ + TCGv_i64 abs_s = tcg_temp_new_i64(); + + gen_vfp_absd(abs_s, s); + tcg_gen_movcond_i64(TCG_COND_GTU, d, + abs_s, tcg_constant_i64(0x7ff0000000000000ULL), + s, abs_s); +} + +static void gen_vfp_maybe_ah_negh(DisasContext *dc, TCGv_i32 d, TCGv_i32 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negh(d, s); + } else { + gen_vfp_negh(d, s); + } +} + +static void gen_vfp_maybe_ah_negs(DisasContext *dc, TCGv_i32 d, TCGv_i32 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negs(d, s); + } else { + gen_vfp_negs(d, s); + } +} + +static void gen_vfp_maybe_ah_negd(DisasContext *dc, TCGv_i64 d, TCGv_i64 s) +{ + if (dc->fpcr_ah) { + gen_vfp_ah_negd(d, s); + } else { + gen_vfp_negd(d, s); + } +} + /* Set ZF and NF based on a 64 bit result. This is alas fiddlier * than the 32 bit equivalent. */ @@ -894,11 +1069,9 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) TCGv_i64 cf_64 = tcg_temp_new_i64(); TCGv_i64 vf_64 = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64(); - TCGv_i64 zero = tcg_constant_i64(0); tcg_gen_extu_i32_i64(cf_64, cpu_CF); - tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero); - tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero); + tcg_gen_addcio_i64(result, cf_64, t0, t1, cf_64); tcg_gen_extrl_i64_i32(cpu_CF, cf_64); gen_set_NZ64(result); @@ -912,12 +1085,10 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) TCGv_i32 t0_32 = tcg_temp_new_i32(); TCGv_i32 t1_32 = tcg_temp_new_i32(); TCGv_i32 tmp = tcg_temp_new_i32(); - TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_extrl_i64_i32(t0_32, t0); tcg_gen_extrl_i64_i32(t1_32, t1); - tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero); - tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero); + tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0_32, t1_32, cpu_CF); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); @@ -1199,22 +1370,19 @@ static bool fp_access_check_only(DisasContext *s) { if (s->fp_excp_el) { assert(!s->fp_access_checked); - s->fp_access_checked = true; + s->fp_access_checked = -1; gen_exception_insn_el(s, 0, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false, 0), s->fp_excp_el); return false; } - s->fp_access_checked = true; + s->fp_access_checked = 1; return true; } -static bool fp_access_check(DisasContext *s) +static bool nonstreaming_check(DisasContext *s) { - if (!fp_access_check_only(s)) { - return false; - } if (s->sme_trap_nonstreaming && s->is_nonstreaming) { gen_exception_insn(s, 0, EXCP_UDEF, syn_smetrap(SME_ET_Streaming, false)); @@ -1223,6 +1391,54 @@ static bool fp_access_check(DisasContext *s) return true; } +static bool fp_access_check(DisasContext *s) +{ + return fp_access_check_only(s) && nonstreaming_check(s); +} + +/* + * Return <0 for non-supported element sizes, with MO_16 controlled by + * FEAT_FP16; return 0 for fp disabled; otherwise return >0 for success. + */ +static int fp_access_check_scalar_hsd(DisasContext *s, MemOp esz) +{ + switch (esz) { + case MO_64: + case MO_32: + break; + case MO_16: + if (!dc_isar_feature(aa64_fp16, s)) { + return -1; + } + break; + default: + return -1; + } + return fp_access_check(s); +} + +/* Likewise, but vector MO_64 must have two elements. */ +static int fp_access_check_vector_hsd(DisasContext *s, bool is_q, MemOp esz) +{ + switch (esz) { + case MO_64: + if (!is_q) { + return -1; + } + break; + case MO_32: + break; + case MO_16: + if (!dc_isar_feature(aa64_fp16, s)) { + return -1; + } + break; + default: + return -1; + } + return fp_access_check(s); +} + /* * Check that SVE access is enabled. If it is, return true. * If not, emit code to generate an appropriate exception and return false. @@ -1230,24 +1446,34 @@ static bool fp_access_check(DisasContext *s) */ bool sve_access_check(DisasContext *s) { - if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { - assert(dc_isar_feature(aa64_sme, s)); - if (!sme_sm_enabled_check(s)) { - goto fail_exit; + if (dc_isar_feature(aa64_sme, s)) { + bool ret; + + if (s->pstate_sm) { + ret = sme_enabled_check(s); + } else if (dc_isar_feature(aa64_sve, s)) { + goto continue_sve; + } else { + ret = sme_sm_enabled_check(s); + } + if (ret) { + ret = nonstreaming_check(s); } - } else if (s->sve_excp_el) { + s->sve_access_checked = (ret ? 1 : -1); + return ret; + } + + continue_sve: + if (s->sve_excp_el) { + /* Assert that we only raise one exception per instruction. */ + assert(!s->sve_access_checked); gen_exception_insn_el(s, 0, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); - goto fail_exit; + s->sve_access_checked = -1; + return false; } - s->sve_access_checked = true; + s->sve_access_checked = 1; return fp_access_check(s); - - fail_exit: - /* Assert that we only raise one exception per instruction. */ - assert(!s->sve_access_checked); - s->sve_access_checked = true; - return false; } /* @@ -1274,9 +1500,11 @@ bool sme_enabled_check(DisasContext *s) * to be zero when fp_excp_el has priority. This is because we need * sme_excp_el by itself for cpregs access checks. */ - if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { - s->fp_access_checked = true; - return sme_access_check(s); + if (s->sme_excp_el + && (!s->fp_excp_el || s->sme_excp_el <= s->fp_excp_el)) { + bool ret = sme_access_check(s); + s->fp_access_checked = (ret ? 1 : -1); + return ret; } return fp_access_check_only(s); } @@ -1397,31 +1625,6 @@ static inline void gen_check_sp_alignment(DisasContext *s) */ } -/* - * This provides a simple table based table lookup decoder. It is - * intended to be used when the relevant bits for decode are too - * awkwardly placed and switch/if based logic would be confusing and - * deeply nested. Since it's a linear search through the table, tables - * should be kept small. - * - * It returns the first handler where insn & mask == pattern, or - * NULL if there is no match. - * The table is terminated by an empty mask (i.e. 0) - */ -static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, - uint32_t insn) -{ - const AArch64DecodeTable *tptr = table; - - while (tptr->mask) { - if ((insn & tptr->mask) == tptr->pattern) { - return tptr->disas_fn; - } - tptr++; - } - return NULL; -} - /* * The instruction disassembly implemented here matches * the instruction encoding classifications in chapter C4 @@ -1507,7 +1710,14 @@ static void set_btype_for_br(DisasContext *s, int rn) { if (dc_isar_feature(aa64_bti, s)) { /* BR to {x16,x17} or !guard -> 1, else 3. */ - set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); + if (rn == 16 || rn == 17) { + set_btype(s, 1); + } else { + TCGv_i64 pc = tcg_temp_new_i64(); + gen_pc_plus_diff(s, pc, 0); + gen_helper_guarded_page_br(tcg_env, pc); + s->btype = -1; + } } } @@ -1521,8 +1731,8 @@ static void set_btype_for_blr(DisasContext *s) static bool trans_BR(DisasContext *s, arg_r *a) { - gen_a64_set_pc(s, cpu_reg(s, a->rn)); set_btype_for_br(s, a->rn); + gen_a64_set_pc(s, cpu_reg(s, a->rn)); s->base.is_jmp = DISAS_JUMP; return true; } @@ -1581,8 +1791,8 @@ static bool trans_BRAZ(DisasContext *s, arg_braz *a) } dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); - gen_a64_set_pc(s, dst); set_btype_for_br(s, a->rn); + gen_a64_set_pc(s, dst); s->base.is_jmp = DISAS_JUMP; return true; } @@ -1613,6 +1823,10 @@ static bool trans_RETA(DisasContext *s, arg_reta *a) { TCGv_i64 dst; + if (!dc_isar_feature(aa64_pauth, s)) { + return false; + } + dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m); gen_a64_set_pc(s, dst); s->base.is_jmp = DISAS_JUMP; @@ -1936,6 +2150,15 @@ static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a) return true; } +static bool trans_DSB_nXS(DisasContext *s, arg_DSB_nXS *a) +{ + if (!dc_isar_feature(aa64_xs, s)) { + return false; + } + tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); + return true; +} + static bool trans_ISB(DisasContext *s, arg_ISB *a) { /* @@ -4657,6 +4880,88 @@ static bool trans_EXTR(DisasContext *s, arg_extract *a) return true; } +static bool trans_TBL_TBX(DisasContext *s, arg_TBL_TBX *a) +{ + if (fp_access_check(s)) { + int len = (a->len + 1) * 16; + + tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rm), tcg_env, + a->q ? 16 : 8, vec_full_reg_size(s), + (len << 6) | (a->tbx << 5) | a->rn, + gen_helper_simd_tblx); + } + return true; +} + +typedef int simd_permute_idx_fn(int i, int part, int elements); + +static bool do_simd_permute(DisasContext *s, arg_qrrr_e *a, + simd_permute_idx_fn *fn, int part) +{ + MemOp esz = a->esz; + int datasize = a->q ? 16 : 8; + int elements = datasize >> esz; + TCGv_i64 tcg_res[2], tcg_ele; + + if (esz == MO_64 && !a->q) { + return false; + } + if (!fp_access_check(s)) { + return true; + } + + tcg_res[0] = tcg_temp_new_i64(); + tcg_res[1] = a->q ? tcg_temp_new_i64() : NULL; + tcg_ele = tcg_temp_new_i64(); + + for (int i = 0; i < elements; i++) { + int o, w, idx; + + idx = fn(i, part, elements); + read_vec_element(s, tcg_ele, (idx & elements ? a->rm : a->rn), + idx & (elements - 1), esz); + + w = (i << (esz + 3)) / 64; + o = (i << (esz + 3)) % 64; + if (o == 0) { + tcg_gen_mov_i64(tcg_res[w], tcg_ele); + } else { + tcg_gen_deposit_i64(tcg_res[w], tcg_res[w], tcg_ele, o, 8 << esz); + } + } + + for (int i = a->q; i >= 0; --i) { + write_vec_element(s, tcg_res[i], a->rd, i, MO_64); + } + clear_vec_high(s, a->q, a->rd); + return true; +} + +static int permute_load_uzp(int i, int part, int elements) +{ + return 2 * i + part; +} + +TRANS(UZP1, do_simd_permute, a, permute_load_uzp, 0) +TRANS(UZP2, do_simd_permute, a, permute_load_uzp, 1) + +static int permute_load_trn(int i, int part, int elements) +{ + return (i & 1) * elements + (i & ~1) + part; +} + +TRANS(TRN1, do_simd_permute, a, permute_load_trn, 0) +TRANS(TRN2, do_simd_permute, a, permute_load_trn, 1) + +static int permute_load_zip(int i, int part, int elements) +{ + return (i & 1) * elements + ((part * elements + i) >> 1); +} + +TRANS(ZIP1, do_simd_permute, a, permute_load_zip, 0) +TRANS(ZIP2, do_simd_permute, a, permute_load_zip, 1) + /* * Cryptographic AES, SHA, SHA512 */ @@ -4703,7 +5008,6 @@ static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a) TCGv_i32 tcg_op2 = tcg_temp_new_i32(); TCGv_i32 tcg_op3 = tcg_temp_new_i32(); TCGv_i32 tcg_res = tcg_temp_new_i32(); - unsigned vsz, dofs; read_vec_element_i32(s, tcg_op1, a->rn, 3, MO_32); read_vec_element_i32(s, tcg_op2, a->rm, 3, MO_32); @@ -4715,9 +5019,7 @@ static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a) tcg_gen_rotri_i32(tcg_res, tcg_res, 25); /* Clear the whole register first, then store bits [127:96]. */ - vsz = vec_full_reg_size(s); - dofs = vec_full_reg_offset(s, a->rd); - tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); + clear_vec(s, a->rd); write_vec_element_i32(s, tcg_res, a->rd, 3, MO_32); } return true; @@ -4898,23 +5200,25 @@ typedef struct FPScalar { void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); } FPScalar; -static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) +static bool do_fp3_scalar_with_fpsttype(DisasContext *s, arg_rrr_e *a, + const FPScalar *f, int mergereg, + ARMFPStatusFlavour fpsttype) { switch (a->esz) { case MO_64: if (fp_access_check(s)) { TCGv_i64 t0 = read_fp_dreg(s, a->rn); TCGv_i64 t1 = read_fp_dreg(s, a->rm); - f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); - write_fp_dreg(s, a->rd, t0); + f->gen_d(t0, t0, t1, fpstatus_ptr(fpsttype)); + write_fp_dreg_merging(s, a->rd, mergereg, t0); } break; case MO_32: if (fp_access_check(s)) { TCGv_i32 t0 = read_fp_sreg(s, a->rn); TCGv_i32 t1 = read_fp_sreg(s, a->rm); - f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); - write_fp_sreg(s, a->rd, t0); + f->gen_s(t0, t0, t1, fpstatus_ptr(fpsttype)); + write_fp_sreg_merging(s, a->rd, mergereg, t0); } break; case MO_16: @@ -4924,8 +5228,8 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) if (fp_access_check(s)) { TCGv_i32 t0 = read_fp_hreg(s, a->rn); TCGv_i32 t1 = read_fp_hreg(s, a->rm); - f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); - write_fp_sreg(s, a->rd, t0); + f->gen_h(t0, t0, t1, fpstatus_ptr(fpsttype)); + write_fp_hreg_merging(s, a->rd, mergereg, t0); } break; default: @@ -4934,68 +5238,103 @@ static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) return true; } +static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f, + int mergereg) +{ + return do_fp3_scalar_with_fpsttype(s, a, f, mergereg, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); +} + +static bool do_fp3_scalar_ah_2fn(DisasContext *s, arg_rrr_e *a, + const FPScalar *fnormal, const FPScalar *fah, + int mergereg) +{ + return do_fp3_scalar_with_fpsttype(s, a, s->fpcr_ah ? fah : fnormal, + mergereg, select_ah_fpst(s, a->esz)); +} + +/* Some insns need to call different helpers when FPCR.AH == 1 */ +static bool do_fp3_scalar_2fn(DisasContext *s, arg_rrr_e *a, + const FPScalar *fnormal, + const FPScalar *fah, + int mergereg) +{ + return do_fp3_scalar(s, a, s->fpcr_ah ? fah : fnormal, mergereg); +} + static const FPScalar f_scalar_fadd = { gen_helper_vfp_addh, gen_helper_vfp_adds, gen_helper_vfp_addd, }; -TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd) +TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd, a->rn) static const FPScalar f_scalar_fsub = { gen_helper_vfp_subh, gen_helper_vfp_subs, gen_helper_vfp_subd, }; -TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub) +TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub, a->rn) static const FPScalar f_scalar_fdiv = { gen_helper_vfp_divh, gen_helper_vfp_divs, gen_helper_vfp_divd, }; -TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv) +TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv, a->rn) static const FPScalar f_scalar_fmul = { gen_helper_vfp_mulh, gen_helper_vfp_muls, gen_helper_vfp_muld, }; -TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul) +TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul, a->rn) static const FPScalar f_scalar_fmax = { - gen_helper_advsimd_maxh, + gen_helper_vfp_maxh, gen_helper_vfp_maxs, gen_helper_vfp_maxd, }; -TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax) +static const FPScalar f_scalar_fmax_ah = { + gen_helper_vfp_ah_maxh, + gen_helper_vfp_ah_maxs, + gen_helper_vfp_ah_maxd, +}; +TRANS(FMAX_s, do_fp3_scalar_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah, a->rn) static const FPScalar f_scalar_fmin = { - gen_helper_advsimd_minh, + gen_helper_vfp_minh, gen_helper_vfp_mins, gen_helper_vfp_mind, }; -TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin) +static const FPScalar f_scalar_fmin_ah = { + gen_helper_vfp_ah_minh, + gen_helper_vfp_ah_mins, + gen_helper_vfp_ah_mind, +}; +TRANS(FMIN_s, do_fp3_scalar_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah, a->rn) static const FPScalar f_scalar_fmaxnm = { - gen_helper_advsimd_maxnumh, + gen_helper_vfp_maxnumh, gen_helper_vfp_maxnums, gen_helper_vfp_maxnumd, }; -TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm) +TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm, a->rn) static const FPScalar f_scalar_fminnm = { - gen_helper_advsimd_minnumh, + gen_helper_vfp_minnumh, gen_helper_vfp_minnums, gen_helper_vfp_minnumd, }; -TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm) +TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm, a->rn) static const FPScalar f_scalar_fmulx = { gen_helper_advsimd_mulxh, gen_helper_vfp_mulxs, gen_helper_vfp_mulxd, }; -TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx) +TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx, a->rn) static void gen_fnmul_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) { @@ -5015,47 +5354,70 @@ static void gen_fnmul_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) gen_vfp_negd(d, d); } +static void gen_fnmul_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_mulh(d, n, m, s); + gen_vfp_ah_negh(d, d); +} + +static void gen_fnmul_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_muls(d, n, m, s); + gen_vfp_ah_negs(d, d); +} + +static void gen_fnmul_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) +{ + gen_helper_vfp_muld(d, n, m, s); + gen_vfp_ah_negd(d, d); +} + static const FPScalar f_scalar_fnmul = { gen_fnmul_h, gen_fnmul_s, gen_fnmul_d, }; -TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul) +static const FPScalar f_scalar_ah_fnmul = { + gen_fnmul_ah_h, + gen_fnmul_ah_s, + gen_fnmul_ah_d, +}; +TRANS(FNMUL_s, do_fp3_scalar_2fn, a, &f_scalar_fnmul, &f_scalar_ah_fnmul, a->rn) static const FPScalar f_scalar_fcmeq = { gen_helper_advsimd_ceq_f16, gen_helper_neon_ceq_f32, gen_helper_neon_ceq_f64, }; -TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq) +TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq, a->rm) static const FPScalar f_scalar_fcmge = { gen_helper_advsimd_cge_f16, gen_helper_neon_cge_f32, gen_helper_neon_cge_f64, }; -TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge) +TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge, a->rm) static const FPScalar f_scalar_fcmgt = { gen_helper_advsimd_cgt_f16, gen_helper_neon_cgt_f32, gen_helper_neon_cgt_f64, }; -TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt) +TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt, a->rm) static const FPScalar f_scalar_facge = { gen_helper_advsimd_acge_f16, gen_helper_neon_acge_f32, gen_helper_neon_acge_f64, }; -TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge) +TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge, a->rm) static const FPScalar f_scalar_facgt = { gen_helper_advsimd_acgt_f16, gen_helper_neon_acgt_f32, gen_helper_neon_acgt_f64, }; -TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt) +TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt, a->rm) static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) { @@ -5075,26 +5437,116 @@ static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) gen_vfp_absd(d, d); } +static void gen_fabd_ah_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_subh(d, n, m, s); + gen_vfp_ah_absh(d, d); +} + +static void gen_fabd_ah_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) +{ + gen_helper_vfp_subs(d, n, m, s); + gen_vfp_ah_abss(d, d); +} + +static void gen_fabd_ah_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) +{ + gen_helper_vfp_subd(d, n, m, s); + gen_vfp_ah_absd(d, d); +} + static const FPScalar f_scalar_fabd = { gen_fabd_h, gen_fabd_s, gen_fabd_d, }; -TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd) +static const FPScalar f_scalar_ah_fabd = { + gen_fabd_ah_h, + gen_fabd_ah_s, + gen_fabd_ah_d, +}; +TRANS(FABD_s, do_fp3_scalar_2fn, a, &f_scalar_fabd, &f_scalar_ah_fabd, a->rn) static const FPScalar f_scalar_frecps = { gen_helper_recpsf_f16, gen_helper_recpsf_f32, gen_helper_recpsf_f64, }; -TRANS(FRECPS_s, do_fp3_scalar, a, &f_scalar_frecps) +static const FPScalar f_scalar_ah_frecps = { + gen_helper_recpsf_ah_f16, + gen_helper_recpsf_ah_f32, + gen_helper_recpsf_ah_f64, +}; +TRANS(FRECPS_s, do_fp3_scalar_ah_2fn, a, + &f_scalar_frecps, &f_scalar_ah_frecps, a->rn) static const FPScalar f_scalar_frsqrts = { gen_helper_rsqrtsf_f16, gen_helper_rsqrtsf_f32, gen_helper_rsqrtsf_f64, }; -TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts) +static const FPScalar f_scalar_ah_frsqrts = { + gen_helper_rsqrtsf_ah_f16, + gen_helper_rsqrtsf_ah_f32, + gen_helper_rsqrtsf_ah_f64, +}; +TRANS(FRSQRTS_s, do_fp3_scalar_ah_2fn, a, + &f_scalar_frsqrts, &f_scalar_ah_frsqrts, a->rn) + +static bool do_fcmp0_s(DisasContext *s, arg_rr_e *a, + const FPScalar *f, bool swap) +{ + switch (a->esz) { + case MO_64: + if (fp_access_check(s)) { + TCGv_i64 t0 = read_fp_dreg(s, a->rn); + TCGv_i64 t1 = tcg_constant_i64(0); + if (swap) { + f->gen_d(t0, t1, t0, fpstatus_ptr(FPST_A64)); + } else { + f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64)); + } + write_fp_dreg(s, a->rd, t0); + } + break; + case MO_32: + if (fp_access_check(s)) { + TCGv_i32 t0 = read_fp_sreg(s, a->rn); + TCGv_i32 t1 = tcg_constant_i32(0); + if (swap) { + f->gen_s(t0, t1, t0, fpstatus_ptr(FPST_A64)); + } else { + f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64)); + } + write_fp_sreg(s, a->rd, t0); + } + break; + case MO_16: + if (!dc_isar_feature(aa64_fp16, s)) { + return false; + } + if (fp_access_check(s)) { + TCGv_i32 t0 = read_fp_hreg(s, a->rn); + TCGv_i32 t1 = tcg_constant_i32(0); + if (swap) { + f->gen_h(t0, t1, t0, fpstatus_ptr(FPST_A64_F16)); + } else { + f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16)); + } + write_fp_sreg(s, a->rd, t0); + } + break; + default: + return false; + } + return true; +} + +TRANS(FCMEQ0_s, do_fcmp0_s, a, &f_scalar_fcmeq, false) +TRANS(FCMGT0_s, do_fcmp0_s, a, &f_scalar_fcmgt, false) +TRANS(FCMGE0_s, do_fcmp0_s, a, &f_scalar_fcmge, false) +TRANS(FCMLT0_s, do_fcmp0_s, a, &f_scalar_fcmgt, true) +TRANS(FCMLE0_s, do_fcmp0_s, a, &f_scalar_fcmge, true) static bool do_satacc_s(DisasContext *s, arg_rrr_e *a, MemOp sgn_n, MemOp sgn_m, @@ -5290,34 +5742,46 @@ TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU) TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ) TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE) -static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, - gen_helper_gvec_3_ptr * const fns[3]) +static bool do_fp3_vector_with_fpsttype(DisasContext *s, arg_qrrr_e *a, + int data, + gen_helper_gvec_3_ptr * const fns[3], + ARMFPStatusFlavour fpsttype) { MemOp esz = a->esz; + int check = fp_access_check_vector_hsd(s, a->q, esz); - switch (esz) { - case MO_64: - if (!a->q) { - return false; - } - break; - case MO_32: - break; - case MO_16: - if (!dc_isar_feature(aa64_fp16, s)) { - return false; - } - break; - default: - return false; - } - if (fp_access_check(s)) { - gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, - esz == MO_16, data, fns[esz - 1]); + if (check <= 0) { + return check == 0; } + + gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, fpsttype, + data, fns[esz - 1]); return true; } +static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fns[3]) +{ + return do_fp3_vector_with_fpsttype(s, a, data, fns, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); +} + +static bool do_fp3_vector_2fn(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fnormal[3], + gen_helper_gvec_3_ptr * const fah[3]) +{ + return do_fp3_vector(s, a, data, s->fpcr_ah ? fah : fnormal); +} + +static bool do_fp3_vector_ah_2fn(DisasContext *s, arg_qrrr_e *a, int data, + gen_helper_gvec_3_ptr * const fnormal[3], + gen_helper_gvec_3_ptr * const fah[3]) +{ + return do_fp3_vector_with_fpsttype(s, a, data, s->fpcr_ah ? fah : fnormal, + select_ah_fpst(s, a->esz)); +} + static gen_helper_gvec_3_ptr * const f_vector_fadd[3] = { gen_helper_gvec_fadd_h, gen_helper_gvec_fadd_s, @@ -5351,14 +5815,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmax[3] = { gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_d, }; -TRANS(FMAX_v, do_fp3_vector, a, 0, f_vector_fmax) +static gen_helper_gvec_3_ptr * const f_vector_fmax_ah[3] = { + gen_helper_gvec_ah_fmax_h, + gen_helper_gvec_ah_fmax_s, + gen_helper_gvec_ah_fmax_d, +}; +TRANS(FMAX_v, do_fp3_vector_2fn, a, 0, f_vector_fmax, f_vector_fmax_ah) static gen_helper_gvec_3_ptr * const f_vector_fmin[3] = { gen_helper_gvec_fmin_h, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_d, }; -TRANS(FMIN_v, do_fp3_vector, a, 0, f_vector_fmin) +static gen_helper_gvec_3_ptr * const f_vector_fmin_ah[3] = { + gen_helper_gvec_ah_fmin_h, + gen_helper_gvec_ah_fmin_s, + gen_helper_gvec_ah_fmin_d, +}; +TRANS(FMIN_v, do_fp3_vector_2fn, a, 0, f_vector_fmin, f_vector_fmin_ah) static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[3] = { gen_helper_gvec_fmaxnum_h, @@ -5393,7 +5867,12 @@ static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = { gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_d, }; -TRANS(FMLS_v, do_fp3_vector, a, 0, f_vector_fmls) +static gen_helper_gvec_3_ptr * const f_vector_fmls_ah[3] = { + gen_helper_gvec_ah_vfms_h, + gen_helper_gvec_ah_vfms_s, + gen_helper_gvec_ah_vfms_d, +}; +TRANS(FMLS_v, do_fp3_vector_2fn, a, 0, f_vector_fmls, f_vector_fmls_ah) static gen_helper_gvec_3_ptr * const f_vector_fcmeq[3] = { gen_helper_gvec_fceq_h, @@ -5435,21 +5914,36 @@ static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = { gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_d, }; -TRANS(FABD_v, do_fp3_vector, a, 0, f_vector_fabd) +static gen_helper_gvec_3_ptr * const f_vector_ah_fabd[3] = { + gen_helper_gvec_ah_fabd_h, + gen_helper_gvec_ah_fabd_s, + gen_helper_gvec_ah_fabd_d, +}; +TRANS(FABD_v, do_fp3_vector_2fn, a, 0, f_vector_fabd, f_vector_ah_fabd) static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = { gen_helper_gvec_recps_h, gen_helper_gvec_recps_s, gen_helper_gvec_recps_d, }; -TRANS(FRECPS_v, do_fp3_vector, a, 0, f_vector_frecps) +static gen_helper_gvec_3_ptr * const f_vector_ah_frecps[3] = { + gen_helper_gvec_ah_recps_h, + gen_helper_gvec_ah_recps_s, + gen_helper_gvec_ah_recps_d, +}; +TRANS(FRECPS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frecps, f_vector_ah_frecps) static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = { gen_helper_gvec_rsqrts_h, gen_helper_gvec_rsqrts_s, gen_helper_gvec_rsqrts_d, }; -TRANS(FRSQRTS_v, do_fp3_vector, a, 0, f_vector_frsqrts) +static gen_helper_gvec_3_ptr * const f_vector_ah_frsqrts[3] = { + gen_helper_gvec_ah_rsqrts_h, + gen_helper_gvec_ah_rsqrts_s, + gen_helper_gvec_ah_rsqrts_d, +}; +TRANS(FRSQRTS_v, do_fp3_vector_ah_2fn, a, 0, f_vector_frsqrts, f_vector_ah_frsqrts) static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = { gen_helper_gvec_faddp_h, @@ -5463,14 +5957,24 @@ static gen_helper_gvec_3_ptr * const f_vector_fmaxp[3] = { gen_helper_gvec_fmaxp_s, gen_helper_gvec_fmaxp_d, }; -TRANS(FMAXP_v, do_fp3_vector, a, 0, f_vector_fmaxp) +static gen_helper_gvec_3_ptr * const f_vector_ah_fmaxp[3] = { + gen_helper_gvec_ah_fmaxp_h, + gen_helper_gvec_ah_fmaxp_s, + gen_helper_gvec_ah_fmaxp_d, +}; +TRANS(FMAXP_v, do_fp3_vector_2fn, a, 0, f_vector_fmaxp, f_vector_ah_fmaxp) static gen_helper_gvec_3_ptr * const f_vector_fminp[3] = { gen_helper_gvec_fminp_h, gen_helper_gvec_fminp_s, gen_helper_gvec_fminp_d, }; -TRANS(FMINP_v, do_fp3_vector, a, 0, f_vector_fminp) +static gen_helper_gvec_3_ptr * const f_vector_ah_fminp[3] = { + gen_helper_gvec_ah_fminp_h, + gen_helper_gvec_ah_fminp_s, + gen_helper_gvec_ah_fminp_d, +}; +TRANS(FMINP_v, do_fp3_vector_2fn, a, 0, f_vector_fminp, f_vector_ah_fminp) static gen_helper_gvec_3_ptr * const f_vector_fmaxnmp[3] = { gen_helper_gvec_fmaxnump_h, @@ -5601,11 +6105,20 @@ static bool do_dot_vector(DisasContext *s, arg_qrrr_e *a, return true; } -TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_b) -TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_b) -TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_b) -TRANS_FEAT(BFDOT_v, aa64_bf16, do_dot_vector, a, gen_helper_gvec_bfdot) -TRANS_FEAT(BFMMLA, aa64_bf16, do_dot_vector, a, gen_helper_gvec_bfmmla) +static bool do_dot_vector_env(DisasContext *s, arg_qrrr_e *a, + gen_helper_gvec_4_ptr *fn) +{ + if (fp_access_check(s)) { + gen_gvec_op4_env(s, a->q, a->rd, a->rn, a->rm, a->rd, 0, fn); + } + return true; +} + +TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_4b) +TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_4b) +TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_4b) +TRANS_FEAT(BFDOT_v, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfdot) +TRANS_FEAT(BFMMLA, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfmmla) TRANS_FEAT(SMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_smmla_b) TRANS_FEAT(UMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_ummla_b) TRANS_FEAT(USMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usmmla_b) @@ -5617,7 +6130,8 @@ static bool trans_BFMLAL_v(DisasContext *s, arg_qrrr_e *a) } if (fp_access_check(s)) { /* Q bit selects BFMLALB vs BFMLALT. */ - gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, false, a->q, + gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, + s->fpcr_ah ? FPST_AH : FPST_A64, a->q, gen_helper_gvec_bfmlal); } return true; @@ -5628,39 +6142,36 @@ static gen_helper_gvec_3_ptr * const f_vector_fcadd[3] = { gen_helper_gvec_fcadds, gen_helper_gvec_fcaddd, }; -TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0, f_vector_fcadd) -TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1, f_vector_fcadd) +/* + * Encode FPCR.AH into the data so the helper knows whether the + * negations it does should avoid flipping the sign bit on a NaN + */ +TRANS_FEAT(FCADD_90, aa64_fcma, do_fp3_vector, a, 0 | (s->fpcr_ah << 1), + f_vector_fcadd) +TRANS_FEAT(FCADD_270, aa64_fcma, do_fp3_vector, a, 1 | (s->fpcr_ah << 1), + f_vector_fcadd) static bool trans_FCMLA_v(DisasContext *s, arg_FCMLA_v *a) { - gen_helper_gvec_4_ptr *fn; + static gen_helper_gvec_4_ptr * const fn[] = { + [MO_16] = gen_helper_gvec_fcmlah, + [MO_32] = gen_helper_gvec_fcmlas, + [MO_64] = gen_helper_gvec_fcmlad, + }; + int check; if (!dc_isar_feature(aa64_fcma, s)) { return false; } - switch (a->esz) { - case MO_64: - if (!a->q) { - return false; - } - fn = gen_helper_gvec_fcmlad; - break; - case MO_32: - fn = gen_helper_gvec_fcmlas; - break; - case MO_16: - if (!dc_isar_feature(aa64_fp16, s)) { - return false; - } - fn = gen_helper_gvec_fcmlah; - break; - default: - return false; - } - if (fp_access_check(s)) { - gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - a->esz == MO_16, a->rot, fn); + + check = fp_access_check_vector_hsd(s, a->q, a->esz); + if (check <= 0) { + return check == 0; } + + gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, + a->rot | (s->fpcr_ah << 2), fn[a->esz]); return true; } @@ -6028,8 +6539,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) TCGv_i64 t1 = tcg_temp_new_i64(); read_vec_element(s, t1, a->rm, a->idx, MO_64); - f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); - write_fp_dreg(s, a->rd, t0); + f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64)); + write_fp_dreg_merging(s, a->rd, a->rn, t0); } break; case MO_32: @@ -6038,8 +6549,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) TCGv_i32 t1 = tcg_temp_new_i32(); read_vec_element_i32(s, t1, a->rm, a->idx, MO_32); - f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); - write_fp_sreg(s, a->rd, t0); + f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64)); + write_fp_sreg_merging(s, a->rd, a->rn, t0); } break; case MO_16: @@ -6051,8 +6562,8 @@ static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) TCGv_i32 t1 = tcg_temp_new_i32(); read_vec_element_i32(s, t1, a->rm, a->idx, MO_16); - f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); - write_fp_sreg(s, a->rd, t0); + f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16)); + write_fp_hreg_merging(s, a->rd, a->rn, t0); } break; default: @@ -6075,10 +6586,10 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element(s, t2, a->rm, a->idx, MO_64); if (neg) { - gen_vfp_negd(t1, t1); + gen_vfp_maybe_ah_negd(s, t1, t1); } - gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR)); - write_fp_dreg(s, a->rd, t0); + gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); + write_fp_dreg_merging(s, a->rd, a->rd, t0); } break; case MO_32: @@ -6089,10 +6600,10 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element_i32(s, t2, a->rm, a->idx, MO_32); if (neg) { - gen_vfp_negs(t1, t1); + gen_vfp_maybe_ah_negs(s, t1, t1); } - gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR)); - write_fp_sreg(s, a->rd, t0); + gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_A64)); + write_fp_sreg_merging(s, a->rd, a->rd, t0); } break; case MO_16: @@ -6106,11 +6617,11 @@ static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) read_vec_element_i32(s, t2, a->rm, a->idx, MO_16); if (neg) { - gen_vfp_negh(t1, t1); + gen_vfp_maybe_ah_negh(s, t1, t1); } gen_helper_advsimd_muladdh(t0, t1, t2, t0, - fpstatus_ptr(FPST_FPCR_F16)); - write_fp_sreg(s, a->rd, t0); + fpstatus_ptr(FPST_A64_F16)); + write_fp_hreg_merging(s, a->rd, a->rd, t0); } break; default: @@ -6173,7 +6684,6 @@ static bool do_scalar_muladd_widening_idx(DisasContext *s, arg_rrx_e *a, TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - unsigned vsz, dofs; if (acc) { read_vec_element(s, t0, a->rd, 0, a->esz + 1); @@ -6183,9 +6693,7 @@ static bool do_scalar_muladd_widening_idx(DisasContext *s, arg_rrx_e *a, fn(t0, t1, t2); /* Clear the whole register first, then store scalar. */ - vsz = vec_full_reg_size(s); - dofs = vec_full_reg_offset(s, a->rd); - tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); + clear_vec(s, a->rd); write_vec_element(s, t0, a->rd, 0, a->esz + 1); } return true; @@ -6202,27 +6710,15 @@ static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a, gen_helper_gvec_3_ptr * const fns[3]) { MemOp esz = a->esz; + int check = fp_access_check_vector_hsd(s, a->q, esz); - switch (esz) { - case MO_64: - if (!a->q) { - return false; - } - break; - case MO_32: - break; - case MO_16: - if (!dc_isar_feature(aa64_fp16, s)) { - return false; - } - break; - default: - g_assert_not_reached(); - } - if (fp_access_check(s)) { - gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, - esz == MO_16, a->idx, fns[esz - 1]); + if (check <= 0) { + return check == 0; } + + gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, + esz == MO_16 ? FPST_A64_F16 : FPST_A64, + a->idx, fns[esz - 1]); return true; } @@ -6242,34 +6738,27 @@ TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx) static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg) { - static gen_helper_gvec_4_ptr * const fns[3] = { - gen_helper_gvec_fmla_idx_h, - gen_helper_gvec_fmla_idx_s, - gen_helper_gvec_fmla_idx_d, + static gen_helper_gvec_4_ptr * const fns[3][3] = { + { gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_fmla_idx_s, + gen_helper_gvec_fmla_idx_d }, + { gen_helper_gvec_fmls_idx_h, + gen_helper_gvec_fmls_idx_s, + gen_helper_gvec_fmls_idx_d }, + { gen_helper_gvec_ah_fmls_idx_h, + gen_helper_gvec_ah_fmls_idx_s, + gen_helper_gvec_ah_fmls_idx_d }, }; MemOp esz = a->esz; + int check = fp_access_check_vector_hsd(s, a->q, esz); - switch (esz) { - case MO_64: - if (!a->q) { - return false; - } - break; - case MO_32: - break; - case MO_16: - if (!dc_isar_feature(aa64_fp16, s)) { - return false; - } - break; - default: - g_assert_not_reached(); - } - if (fp_access_check(s)) { - gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - esz == MO_16, (a->idx << 1) | neg, - fns[esz - 1]); + if (check <= 0) { + return check == 0; } + + gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, + esz == MO_16 ? FPST_A64_F16 : FPST_A64, + a->idx, fns[neg ? 1 + s->fpcr_ah : 0][esz - 1]); return true; } @@ -6378,13 +6867,22 @@ static bool do_dot_vector_idx(DisasContext *s, arg_qrrx_e *a, return true; } -TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_b) -TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_b) +static bool do_dot_vector_idx_env(DisasContext *s, arg_qrrx_e *a, + gen_helper_gvec_4_ptr *fn) +{ + if (fp_access_check(s)) { + gen_gvec_op4_env(s, a->q, a->rd, a->rn, a->rm, a->rd, a->idx, fn); + } + return true; +} + +TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_4b) +TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_4b) TRANS_FEAT(SUDOT_vi, aa64_i8mm, do_dot_vector_idx, a, - gen_helper_gvec_sudot_idx_b) + gen_helper_gvec_sudot_idx_4b) TRANS_FEAT(USDOT_vi, aa64_i8mm, do_dot_vector_idx, a, - gen_helper_gvec_usdot_idx_b) -TRANS_FEAT(BFDOT_vi, aa64_bf16, do_dot_vector_idx, a, + gen_helper_gvec_usdot_idx_4b) +TRANS_FEAT(BFDOT_vi, aa64_bf16, do_dot_vector_idx_env, a, gen_helper_gvec_bfdot_idx) static bool trans_BFMLAL_vi(DisasContext *s, arg_qrrx_e *a) @@ -6394,7 +6892,8 @@ static bool trans_BFMLAL_vi(DisasContext *s, arg_qrrx_e *a) } if (fp_access_check(s)) { /* Q bit selects BFMLALB vs BFMLALT. */ - gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, 0, + gen_gvec_op4_fpst(s, true, a->rd, a->rn, a->rm, a->rd, + s->fpcr_ah ? FPST_AH : FPST_A64, (a->idx << 1) | a->q, gen_helper_gvec_bfmlal_idx); } @@ -6423,7 +6922,8 @@ static bool trans_FCMLA_vi(DisasContext *s, arg_FCMLA_vi *a) } if (fp_access_check(s)) { gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, - a->esz == MO_16, (a->idx << 2) | a->rot, fn); + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64, + (s->fpcr_ah << 4) | (a->idx << 2) | a->rot, fn); } return true; } @@ -6442,7 +6942,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) read_vec_element(s, t0, a->rn, 0, MO_64); read_vec_element(s, t1, a->rn, 1, MO_64); - f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); + f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_A64)); write_fp_dreg(s, a->rd, t0); } break; @@ -6453,7 +6953,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) read_vec_element_i32(s, t0, a->rn, 0, MO_32); read_vec_element_i32(s, t1, a->rn, 1, MO_32); - f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); + f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_A64)); write_fp_sreg(s, a->rd, t0); } break; @@ -6467,7 +6967,7 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) read_vec_element_i32(s, t0, a->rn, 0, MO_16); read_vec_element_i32(s, t1, a->rn, 1, MO_16); - f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); + f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_A64_F16)); write_fp_sreg(s, a->rd, t0); } break; @@ -6477,9 +6977,16 @@ static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) return true; } +static bool do_fp3_scalar_pair_2fn(DisasContext *s, arg_rr_e *a, + const FPScalar *fnormal, + const FPScalar *fah) +{ + return do_fp3_scalar_pair(s, a, s->fpcr_ah ? fah : fnormal); +} + TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd) -TRANS(FMAXP_s, do_fp3_scalar_pair, a, &f_scalar_fmax) -TRANS(FMINP_s, do_fp3_scalar_pair, a, &f_scalar_fmin) +TRANS(FMAXP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmax, &f_scalar_fmax_ah) +TRANS(FMINP_s, do_fp3_scalar_pair_2fn, a, &f_scalar_fmin, &f_scalar_fmin_ah) TRANS(FMAXNMP_s, do_fp3_scalar_pair, a, &f_scalar_fmaxnm) TRANS(FMINNMP_s, do_fp3_scalar_pair, a, &f_scalar_fminnm) @@ -6505,22 +7012,10 @@ static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a) { TCGv_i64 t_true, t_false; DisasCompare64 c; + int check = fp_access_check_scalar_hsd(s, a->esz); - switch (a->esz) { - case MO_32: - case MO_64: - break; - case MO_16: - if (!dc_isar_feature(aa64_fp16, s)) { - return false; - } - break; - default: - return false; - } - - if (!fp_access_check(s)) { - return true; + if (check <= 0) { + return check == 0; } /* Zero extend sreg & hreg inputs to 64 bits now. */ @@ -6541,6 +7036,54 @@ static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a) return true; } +/* + * Advanced SIMD Extract + */ + +static bool trans_EXT_d(DisasContext *s, arg_EXT_d *a) +{ + if (fp_access_check(s)) { + TCGv_i64 lo = read_fp_dreg(s, a->rn); + if (a->imm != 0) { + TCGv_i64 hi = read_fp_dreg(s, a->rm); + tcg_gen_extract2_i64(lo, lo, hi, a->imm * 8); + } + write_fp_dreg(s, a->rd, lo); + } + return true; +} + +static bool trans_EXT_q(DisasContext *s, arg_EXT_q *a) +{ + TCGv_i64 lo, hi; + int pos = (a->imm & 7) * 8; + int elt = a->imm >> 3; + + if (!fp_access_check(s)) { + return true; + } + + lo = tcg_temp_new_i64(); + hi = tcg_temp_new_i64(); + + read_vec_element(s, lo, a->rn, elt, MO_64); + elt++; + read_vec_element(s, hi, elt & 2 ? a->rm : a->rn, elt & 1, MO_64); + elt++; + + if (pos != 0) { + TCGv_i64 hh = tcg_temp_new_i64(); + tcg_gen_extract2_i64(lo, lo, hi, pos); + read_vec_element(s, hh, a->rm, elt & 1, MO_64); + tcg_gen_extract2_i64(hi, hi, hh, pos); + } + + write_vec_element(s, lo, a->rd, 0, MO_64); + write_vec_element(s, hi, a->rd, 1, MO_64); + clear_vec_high(s, true, a->rd); + return true; +} + /* * Floating-point data-processing (3 source) */ @@ -6562,14 +7105,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i64 ta = read_fp_dreg(s, a->ra); if (neg_a) { - gen_vfp_negd(ta, ta); + gen_vfp_maybe_ah_negd(s, ta, ta); } if (neg_n) { - gen_vfp_negd(tn, tn); + gen_vfp_maybe_ah_negd(s, tn, tn); } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst); - write_fp_dreg(s, a->rd, ta); + write_fp_dreg_merging(s, a->rd, a->ra, ta); } break; @@ -6580,14 +7123,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i32 ta = read_fp_sreg(s, a->ra); if (neg_a) { - gen_vfp_negs(ta, ta); + gen_vfp_maybe_ah_negs(s, ta, ta); } if (neg_n) { - gen_vfp_negs(tn, tn); + gen_vfp_maybe_ah_negs(s, tn, tn); } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A64); gen_helper_vfp_muladds(ta, tn, tm, ta, fpst); - write_fp_sreg(s, a->rd, ta); + write_fp_sreg_merging(s, a->rd, a->ra, ta); } break; @@ -6601,14 +7144,14 @@ static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) TCGv_i32 ta = read_fp_hreg(s, a->ra); if (neg_a) { - gen_vfp_negh(ta, ta); + gen_vfp_maybe_ah_negh(s, ta, ta); } if (neg_n) { - gen_vfp_negh(tn, tn); + gen_vfp_maybe_ah_negh(s, tn, tn); } - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A64_F16); gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst); - write_fp_sreg(s, a->rd, ta); + write_fp_hreg_merging(s, a->rd, a->ra, ta); } break; @@ -6623,5248 +7166,2879 @@ TRANS(FNMADD, do_fmadd, a, true, true) TRANS(FMSUB, do_fmadd, a, false, true) TRANS(FNMSUB, do_fmadd, a, true, false) -/* Shift a TCGv src by TCGv shift_amount, put result in dst. - * Note that it is the caller's responsibility to ensure that the - * shift amount is in range (ie 0..31 or 0..63) and provide the ARM - * mandated semantics for out of range shifts. +/* + * Advanced SIMD Across Lanes */ -static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, - enum a64_shift_type shift_type, TCGv_i64 shift_amount) + +static bool do_int_reduction(DisasContext *s, arg_qrr_e *a, bool widen, + MemOp src_sign, NeonGenTwo64OpFn *fn) { - switch (shift_type) { - case A64_SHIFT_TYPE_LSL: - tcg_gen_shl_i64(dst, src, shift_amount); - break; - case A64_SHIFT_TYPE_LSR: - tcg_gen_shr_i64(dst, src, shift_amount); - break; - case A64_SHIFT_TYPE_ASR: - if (!sf) { - tcg_gen_ext32s_i64(dst, src); - } - tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount); - break; - case A64_SHIFT_TYPE_ROR: - if (sf) { - tcg_gen_rotr_i64(dst, src, shift_amount); - } else { - TCGv_i32 t0, t1; - t0 = tcg_temp_new_i32(); - t1 = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(t0, src); - tcg_gen_extrl_i64_i32(t1, shift_amount); - tcg_gen_rotr_i32(t0, t0, t1); - tcg_gen_extu_i32_i64(dst, t0); - } - break; - default: - assert(FALSE); /* all shift types should be handled */ - break; + TCGv_i64 tcg_res, tcg_elt; + MemOp src_mop = a->esz | src_sign; + int elements = (a->q ? 16 : 8) >> a->esz; + + /* Reject MO_64, and MO_32 without Q: a minimum of 4 elements. */ + if (elements < 4) { + return false; + } + if (!fp_access_check(s)) { + return true; } - if (!sf) { /* zero extend final result */ - tcg_gen_ext32u_i64(dst, dst); + tcg_res = tcg_temp_new_i64(); + tcg_elt = tcg_temp_new_i64(); + + read_vec_element(s, tcg_res, a->rn, 0, src_mop); + for (int i = 1; i < elements; i++) { + read_vec_element(s, tcg_elt, a->rn, i, src_mop); + fn(tcg_res, tcg_res, tcg_elt); } + + tcg_gen_ext_i64(tcg_res, tcg_res, a->esz + widen); + write_fp_dreg(s, a->rd, tcg_res); + return true; } -/* Shift a TCGv src by immediate, put result in dst. - * The shift amount must be in range (this should always be true as the - * relevant instructions will UNDEF on bad shift immediates). - */ -static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf, - enum a64_shift_type shift_type, unsigned int shift_i) -{ - assert(shift_i < (sf ? 64 : 32)); +TRANS(ADDV, do_int_reduction, a, false, 0, tcg_gen_add_i64) +TRANS(SADDLV, do_int_reduction, a, true, MO_SIGN, tcg_gen_add_i64) +TRANS(UADDLV, do_int_reduction, a, true, 0, tcg_gen_add_i64) +TRANS(SMAXV, do_int_reduction, a, false, MO_SIGN, tcg_gen_smax_i64) +TRANS(UMAXV, do_int_reduction, a, false, 0, tcg_gen_umax_i64) +TRANS(SMINV, do_int_reduction, a, false, MO_SIGN, tcg_gen_smin_i64) +TRANS(UMINV, do_int_reduction, a, false, 0, tcg_gen_umin_i64) - if (shift_i == 0) { - tcg_gen_mov_i64(dst, src); +/* + * do_fp_reduction helper + * + * This mirrors the Reduce() pseudocode in the ARM ARM. It is + * important for correct NaN propagation that we do these + * operations in exactly the order specified by the pseudocode. + * + * This is a recursive function. + */ +static TCGv_i32 do_reduction_op(DisasContext *s, int rn, MemOp esz, + int ebase, int ecount, TCGv_ptr fpst, + NeonGenTwoSingleOpFn *fn) +{ + if (ecount == 1) { + TCGv_i32 tcg_elem = tcg_temp_new_i32(); + read_vec_element_i32(s, tcg_elem, rn, ebase, esz); + return tcg_elem; } else { - shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i)); + int half = ecount >> 1; + TCGv_i32 tcg_hi, tcg_lo, tcg_res; + + tcg_hi = do_reduction_op(s, rn, esz, ebase + half, half, fpst, fn); + tcg_lo = do_reduction_op(s, rn, esz, ebase, half, fpst, fn); + tcg_res = tcg_temp_new_i32(); + + fn(tcg_res, tcg_lo, tcg_hi, fpst); + return tcg_res; } } -/* Logical (shifted register) - * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 - * +----+-----+-----------+-------+---+------+--------+------+------+ - * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | - * +----+-----+-----------+-------+---+------+--------+------+------+ +static bool do_fp_reduction(DisasContext *s, arg_qrr_e *a, + NeonGenTwoSingleOpFn *fnormal, + NeonGenTwoSingleOpFn *fah) +{ + if (fp_access_check(s)) { + MemOp esz = a->esz; + int elts = (a->q ? 16 : 8) >> esz; + TCGv_ptr fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + TCGv_i32 res = do_reduction_op(s, a->rn, esz, 0, elts, fpst, + s->fpcr_ah ? fah : fnormal); + write_fp_sreg(s, a->rd, res); + } + return true; +} + +TRANS_FEAT(FMAXNMV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_maxnumh, gen_helper_vfp_maxnumh) +TRANS_FEAT(FMINNMV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_minnumh, gen_helper_vfp_minnumh) +TRANS_FEAT(FMAXV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_maxh, gen_helper_vfp_ah_maxh) +TRANS_FEAT(FMINV_h, aa64_fp16, do_fp_reduction, a, + gen_helper_vfp_minh, gen_helper_vfp_ah_minh) + +TRANS(FMAXNMV_s, do_fp_reduction, a, + gen_helper_vfp_maxnums, gen_helper_vfp_maxnums) +TRANS(FMINNMV_s, do_fp_reduction, a, + gen_helper_vfp_minnums, gen_helper_vfp_minnums) +TRANS(FMAXV_s, do_fp_reduction, a, gen_helper_vfp_maxs, gen_helper_vfp_ah_maxs) +TRANS(FMINV_s, do_fp_reduction, a, gen_helper_vfp_mins, gen_helper_vfp_ah_mins) + +/* + * Floating-point Immediate */ -static void disas_logic_reg(DisasContext *s, uint32_t insn) + +static bool trans_FMOVI_s(DisasContext *s, arg_FMOVI_s *a) { - TCGv_i64 tcg_rd, tcg_rn, tcg_rm; - unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd; - - sf = extract32(insn, 31, 1); - opc = extract32(insn, 29, 2); - shift_type = extract32(insn, 22, 2); - invert = extract32(insn, 21, 1); - rm = extract32(insn, 16, 5); - shift_amount = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); - - if (!sf && (shift_amount & (1 << 5))) { - unallocated_encoding(s); - return; + int check = fp_access_check_scalar_hsd(s, a->esz); + uint64_t imm; + + if (check <= 0) { + return check == 0; } - tcg_rd = cpu_reg(s, rd); + imm = vfp_expand_imm(a->esz, a->imm); + write_fp_dreg(s, a->rd, tcg_constant_i64(imm)); + return true; +} + +/* + * Floating point compare, conditional compare + */ - if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) { - /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for - * register-register MOV and MVN, so it is worth special casing. - */ - tcg_rm = cpu_reg(s, rm); - if (invert) { - tcg_gen_not_i64(tcg_rd, tcg_rm); - if (!sf) { - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); - } +static void handle_fp_compare(DisasContext *s, int size, + unsigned int rn, unsigned int rm, + bool cmp_with_zero, bool signal_all_nans) +{ + TCGv_i64 tcg_flags = tcg_temp_new_i64(); + TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_A64_F16 : FPST_A64); + + if (size == MO_64) { + TCGv_i64 tcg_vn, tcg_vm; + + tcg_vn = read_fp_dreg(s, rn); + if (cmp_with_zero) { + tcg_vm = tcg_constant_i64(0); } else { - if (sf) { - tcg_gen_mov_i64(tcg_rd, tcg_rm); + tcg_vm = read_fp_dreg(s, rm); + } + if (signal_all_nans) { + gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); + } + } else { + TCGv_i32 tcg_vn = tcg_temp_new_i32(); + TCGv_i32 tcg_vm = tcg_temp_new_i32(); + + read_vec_element_i32(s, tcg_vn, rn, 0, size); + if (cmp_with_zero) { + tcg_gen_movi_i32(tcg_vm, 0); + } else { + read_vec_element_i32(s, tcg_vm, rm, 0, size); + } + + switch (size) { + case MO_32: + if (signal_all_nans) { + gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst); } else { - tcg_gen_ext32u_i64(tcg_rd, tcg_rm); + gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst); + } + break; + case MO_16: + if (signal_all_nans) { + gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst); } + break; + default: + g_assert_not_reached(); } - return; } - tcg_rm = read_cpu_reg(s, rm, sf); + gen_set_nzcv(tcg_flags); +} + +/* FCMP, FCMPE */ +static bool trans_FCMP(DisasContext *s, arg_FCMP *a) +{ + int check = fp_access_check_scalar_hsd(s, a->esz); - if (shift_amount) { - shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount); + if (check <= 0) { + return check == 0; } - tcg_rn = cpu_reg(s, rn); + handle_fp_compare(s, a->esz, a->rn, a->rm, a->z, a->e); + return true; +} - switch (opc | (invert << 2)) { - case 0: /* AND */ - case 3: /* ANDS */ - tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm); - break; - case 1: /* ORR */ - tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm); - break; - case 2: /* EOR */ - tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm); - break; - case 4: /* BIC */ - case 7: /* BICS */ - tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm); - break; - case 5: /* ORN */ - tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm); - break; - case 6: /* EON */ - tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm); - break; - default: - assert(FALSE); - break; +/* FCCMP, FCCMPE */ +static bool trans_FCCMP(DisasContext *s, arg_FCCMP *a) +{ + TCGLabel *label_continue = NULL; + int check = fp_access_check_scalar_hsd(s, a->esz); + + if (check <= 0) { + return check == 0; } - if (!sf) { - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + if (a->cond < 0x0e) { /* not always */ + TCGLabel *label_match = gen_new_label(); + label_continue = gen_new_label(); + arm_gen_test_cc(a->cond, label_match); + /* nomatch: */ + gen_set_nzcv(tcg_constant_i64(a->nzcv << 28)); + tcg_gen_br(label_continue); + gen_set_label(label_match); } - if (opc == 3) { - gen_logic_CC(sf, tcg_rd); + handle_fp_compare(s, a->esz, a->rn, a->rm, false, a->e); + + if (label_continue) { + gen_set_label(label_continue); } + return true; } /* - * Add/subtract (extended register) - * - * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| - * +--+--+--+-----------+-----+--+-------+------+------+----+----+ - * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd | - * +--+--+--+-----------+-----+--+-------+------+------+----+----+ - * - * sf: 0 -> 32bit, 1 -> 64bit - * op: 0 -> add , 1 -> sub - * S: 1 -> set flags - * opt: 00 - * option: extension type (see DecodeRegExtend) - * imm3: optional shift to Rm - * - * Rd = Rn + LSL(extend(Rm), amount) + * Advanced SIMD Modified Immediate */ -static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int imm3 = extract32(insn, 10, 3); - int option = extract32(insn, 13, 3); - int rm = extract32(insn, 16, 5); - int opt = extract32(insn, 22, 2); - bool setflags = extract32(insn, 29, 1); - bool sub_op = extract32(insn, 30, 1); - bool sf = extract32(insn, 31, 1); - - TCGv_i64 tcg_rm, tcg_rn; /* temps */ - TCGv_i64 tcg_rd; - TCGv_i64 tcg_result; - - if (imm3 > 4 || opt != 0) { - unallocated_encoding(s); - return; - } - /* non-flag setting ops may use SP */ - if (!setflags) { - tcg_rd = cpu_reg_sp(s, rd); - } else { - tcg_rd = cpu_reg(s, rd); +static bool trans_FMOVI_v_h(DisasContext *s, arg_FMOVI_v_h *a) +{ + if (!dc_isar_feature(aa64_fp16, s)) { + return false; + } + if (fp_access_check(s)) { + tcg_gen_gvec_dup_imm(MO_16, vec_full_reg_offset(s, a->rd), + a->q ? 16 : 8, vec_full_reg_size(s), + vfp_expand_imm(MO_16, a->abcdefgh)); } - tcg_rn = read_cpu_reg_sp(s, rn, sf); + return true; +} - tcg_rm = read_cpu_reg(s, rm, sf); - ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3); +static void gen_movi(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c); +} - tcg_result = tcg_temp_new_i64(); +static bool trans_Vimm(DisasContext *s, arg_Vimm *a) +{ + GVecGen2iFn *fn; - if (!setflags) { - if (sub_op) { - tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); - } else { - tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); - } + /* Handle decode of cmode/op here between ORR/BIC/MOVI */ + if ((a->cmode & 1) && a->cmode < 12) { + /* For op=1, the imm will be inverted, so BIC becomes AND. */ + fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori; } else { - if (sub_op) { - gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); - } else { - gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); + /* There is one unallocated cmode/op combination in this space */ + if (a->cmode == 15 && a->op == 1 && a->q == 0) { + return false; } + fn = gen_movi; } - if (sf) { - tcg_gen_mov_i64(tcg_rd, tcg_result); - } else { - tcg_gen_ext32u_i64(tcg_rd, tcg_result); + if (fp_access_check(s)) { + uint64_t imm = asimd_imm_const(a->abcdefgh, a->cmode, a->op); + gen_gvec_fn2i(s, a->q, a->rd, a->rd, imm, fn, MO_64); } + return true; } /* - * Add/subtract (shifted register) - * - * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 - * +--+--+--+-----------+-----+--+-------+---------+------+------+ - * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd | - * +--+--+--+-----------+-----+--+-------+---------+------+------+ - * - * sf: 0 -> 32bit, 1 -> 64bit - * op: 0 -> add , 1 -> sub - * S: 1 -> set flags - * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED - * imm6: Shift amount to apply to Rm before the add/sub + * Advanced SIMD Shift by Immediate */ -static void disas_add_sub_reg(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int imm6 = extract32(insn, 10, 6); - int rm = extract32(insn, 16, 5); - int shift_type = extract32(insn, 22, 2); - bool setflags = extract32(insn, 29, 1); - bool sub_op = extract32(insn, 30, 1); - bool sf = extract32(insn, 31, 1); - - TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_rn, tcg_rm; - TCGv_i64 tcg_result; - - if ((shift_type == 3) || (!sf && (imm6 > 31))) { - unallocated_encoding(s); - return; + +static bool do_vec_shift_imm(DisasContext *s, arg_qrri_e *a, GVecGen2iFn *fn) +{ + if (fp_access_check(s)) { + gen_gvec_fn2i(s, a->q, a->rd, a->rn, a->imm, fn, a->esz); } + return true; +} - tcg_rn = read_cpu_reg(s, rn, sf); - tcg_rm = read_cpu_reg(s, rm, sf); +TRANS(SSHR_v, do_vec_shift_imm, a, gen_gvec_sshr) +TRANS(USHR_v, do_vec_shift_imm, a, gen_gvec_ushr) +TRANS(SSRA_v, do_vec_shift_imm, a, gen_gvec_ssra) +TRANS(USRA_v, do_vec_shift_imm, a, gen_gvec_usra) +TRANS(SRSHR_v, do_vec_shift_imm, a, gen_gvec_srshr) +TRANS(URSHR_v, do_vec_shift_imm, a, gen_gvec_urshr) +TRANS(SRSRA_v, do_vec_shift_imm, a, gen_gvec_srsra) +TRANS(URSRA_v, do_vec_shift_imm, a, gen_gvec_ursra) +TRANS(SRI_v, do_vec_shift_imm, a, gen_gvec_sri) +TRANS(SHL_v, do_vec_shift_imm, a, tcg_gen_gvec_shli) +TRANS(SLI_v, do_vec_shift_imm, a, gen_gvec_sli); +TRANS(SQSHL_vi, do_vec_shift_imm, a, gen_neon_sqshli) +TRANS(UQSHL_vi, do_vec_shift_imm, a, gen_neon_uqshli) +TRANS(SQSHLU_vi, do_vec_shift_imm, a, gen_neon_sqshlui) - shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6); +static bool do_vec_shift_imm_wide(DisasContext *s, arg_qrri_e *a, bool is_u) +{ + TCGv_i64 tcg_rn, tcg_rd; + int esz = a->esz; + int esize; - tcg_result = tcg_temp_new_i64(); + if (!fp_access_check(s)) { + return true; + } - if (!setflags) { - if (sub_op) { - tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); - } else { - tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); - } - } else { - if (sub_op) { - gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); + /* + * For the LL variants the store is larger than the load, + * so if rd == rn we would overwrite parts of our input. + * So load everything right now and use shifts in the main loop. + */ + tcg_rd = tcg_temp_new_i64(); + tcg_rn = tcg_temp_new_i64(); + read_vec_element(s, tcg_rn, a->rn, a->q, MO_64); + + esize = 8 << esz; + for (int i = 0, elements = 8 >> esz; i < elements; i++) { + if (is_u) { + tcg_gen_extract_i64(tcg_rd, tcg_rn, i * esize, esize); } else { - gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); + tcg_gen_sextract_i64(tcg_rd, tcg_rn, i * esize, esize); } + tcg_gen_shli_i64(tcg_rd, tcg_rd, a->imm); + write_vec_element(s, tcg_rd, a->rd, i, esz + 1); } - - if (sf) { - tcg_gen_mov_i64(tcg_rd, tcg_result); - } else { - tcg_gen_ext32u_i64(tcg_rd, tcg_result); - } + clear_vec_high(s, true, a->rd); + return true; } -/* Data-processing (3 source) - * - * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 - * +--+------+-----------+------+------+----+------+------+------+ - * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | - * +--+------+-----------+------+------+----+------+------+------+ - */ -static void disas_data_proc_3src(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int ra = extract32(insn, 10, 5); - int rm = extract32(insn, 16, 5); - int op_id = (extract32(insn, 29, 3) << 4) | - (extract32(insn, 21, 3) << 1) | - extract32(insn, 15, 1); - bool sf = extract32(insn, 31, 1); - bool is_sub = extract32(op_id, 0, 1); - bool is_high = extract32(op_id, 2, 1); - bool is_signed = false; - TCGv_i64 tcg_op1; - TCGv_i64 tcg_op2; - TCGv_i64 tcg_tmp; - - /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */ - switch (op_id) { - case 0x42: /* SMADDL */ - case 0x43: /* SMSUBL */ - case 0x44: /* SMULH */ - is_signed = true; - break; - case 0x0: /* MADD (32bit) */ - case 0x1: /* MSUB (32bit) */ - case 0x40: /* MADD (64bit) */ - case 0x41: /* MSUB (64bit) */ - case 0x4a: /* UMADDL */ - case 0x4b: /* UMSUBL */ - case 0x4c: /* UMULH */ - break; - default: - unallocated_encoding(s); - return; - } +TRANS(SSHLL_v, do_vec_shift_imm_wide, a, false) +TRANS(USHLL_v, do_vec_shift_imm_wide, a, true) - if (is_high) { - TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */ - TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_rn = cpu_reg(s, rn); - TCGv_i64 tcg_rm = cpu_reg(s, rm); +static void gen_sshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + assert(shift >= 0 && shift <= 64); + tcg_gen_sari_i64(dst, src, MIN(shift, 63)); +} - if (is_signed) { - tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); - } else { - tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); - } - return; +static void gen_ushr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + assert(shift >= 0 && shift <= 64); + if (shift == 64) { + tcg_gen_movi_i64(dst, 0); + } else { + tcg_gen_shri_i64(dst, src, shift); } +} - tcg_op1 = tcg_temp_new_i64(); - tcg_op2 = tcg_temp_new_i64(); - tcg_tmp = tcg_temp_new_i64(); +static void gen_ssra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + gen_sshr_d(src, src, shift); + tcg_gen_add_i64(dst, dst, src); +} - if (op_id < 0x42) { - tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn)); - tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm)); - } else { - if (is_signed) { - tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn)); - tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm)); - } else { - tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn)); - tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm)); - } - } +static void gen_usra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + gen_ushr_d(src, src, shift); + tcg_gen_add_i64(dst, dst, src); +} - if (ra == 31 && !is_sub) { - /* Special-case MADD with rA == XZR; it is the standard MUL alias */ - tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2); +static void gen_srshr_bhs(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + assert(shift >= 0 && shift <= 32); + if (shift) { + TCGv_i64 rnd = tcg_constant_i64(1ull << (shift - 1)); + tcg_gen_add_i64(dst, src, rnd); + tcg_gen_sari_i64(dst, dst, shift); } else { - tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2); - if (is_sub) { - tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); - } else { - tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); - } + tcg_gen_mov_i64(dst, src); } +} - if (!sf) { - tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd)); +static void gen_urshr_bhs(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + assert(shift >= 0 && shift <= 32); + if (shift) { + TCGv_i64 rnd = tcg_constant_i64(1ull << (shift - 1)); + tcg_gen_add_i64(dst, src, rnd); + tcg_gen_shri_i64(dst, dst, shift); + } else { + tcg_gen_mov_i64(dst, src); } } -/* Add/subtract (with carry) - * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 - * +--+--+--+------------------------+------+-------------+------+-----+ - * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd | - * +--+--+--+------------------------+------+-------------+------+-----+ - */ - -static void disas_adc_sbc(DisasContext *s, uint32_t insn) +static void gen_srshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) { - unsigned int sf, op, setflags, rm, rn, rd; - TCGv_i64 tcg_y, tcg_rn, tcg_rd; - - sf = extract32(insn, 31, 1); - op = extract32(insn, 30, 1); - setflags = extract32(insn, 29, 1); - rm = extract32(insn, 16, 5); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); - - tcg_rd = cpu_reg(s, rd); - tcg_rn = cpu_reg(s, rn); - - if (op) { - tcg_y = tcg_temp_new_i64(); - tcg_gen_not_i64(tcg_y, cpu_reg(s, rm)); + assert(shift >= 0 && shift <= 64); + if (shift == 0) { + tcg_gen_mov_i64(dst, src); + } else if (shift == 64) { + /* Extension of sign bit (0,-1) plus sign bit (0,1) is zero. */ + tcg_gen_movi_i64(dst, 0); } else { - tcg_y = cpu_reg(s, rm); + TCGv_i64 rnd = tcg_temp_new_i64(); + tcg_gen_extract_i64(rnd, src, shift - 1, 1); + tcg_gen_sari_i64(dst, src, shift); + tcg_gen_add_i64(dst, dst, rnd); } +} - if (setflags) { - gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y); +static void gen_urshr_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + assert(shift >= 0 && shift <= 64); + if (shift == 0) { + tcg_gen_mov_i64(dst, src); + } else if (shift == 64) { + /* Rounding will propagate bit 63 into bit 64. */ + tcg_gen_shri_i64(dst, src, 63); } else { - gen_adc(sf, tcg_rd, tcg_rn, tcg_y); + TCGv_i64 rnd = tcg_temp_new_i64(); + tcg_gen_extract_i64(rnd, src, shift - 1, 1); + tcg_gen_shri_i64(dst, src, shift); + tcg_gen_add_i64(dst, dst, rnd); } } -/* - * Rotate right into flags - * 31 30 29 21 15 10 5 4 0 - * +--+--+--+-----------------+--------+-----------+------+--+------+ - * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask | - * +--+--+--+-----------------+--------+-----------+------+--+------+ - */ -static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) +static void gen_srsra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) { - int mask = extract32(insn, 0, 4); - int o2 = extract32(insn, 4, 1); - int rn = extract32(insn, 5, 5); - int imm6 = extract32(insn, 15, 6); - int sf_op_s = extract32(insn, 29, 3); - TCGv_i64 tcg_rn; - TCGv_i32 nzcv; + gen_srshr_d(src, src, shift); + tcg_gen_add_i64(dst, dst, src); +} - if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) { - unallocated_encoding(s); - return; +static void gen_ursra_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + gen_urshr_d(src, src, shift); + tcg_gen_add_i64(dst, dst, src); +} + +static void gen_sri_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) +{ + /* If shift is 64, dst is unchanged. */ + if (shift != 64) { + tcg_gen_shri_i64(src, src, shift); + tcg_gen_deposit_i64(dst, dst, src, 0, 64 - shift); } +} - tcg_rn = read_cpu_reg(s, rn, 1); - tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6); - - nzcv = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(nzcv, tcg_rn); - - if (mask & 8) { /* N */ - tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3); - } - if (mask & 4) { /* Z */ - tcg_gen_not_i32(cpu_ZF, nzcv); - tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4); - } - if (mask & 2) { /* C */ - tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1); - } - if (mask & 1) { /* V */ - tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); - } -} - -/* - * Evaluate into flags - * 31 30 29 21 15 14 10 5 4 0 - * +--+--+--+-----------------+---------+----+---------+------+--+------+ - * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask | - * +--+--+--+-----------------+---------+----+---------+------+--+------+ - */ -static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) +static void gen_sli_d(TCGv_i64 dst, TCGv_i64 src, int64_t shift) { - int o3_mask = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int o2 = extract32(insn, 15, 6); - int sz = extract32(insn, 14, 1); - int sf_op_s = extract32(insn, 29, 3); - TCGv_i32 tmp; - int shift; - - if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd || - !dc_isar_feature(aa64_condm_4, s)) { - unallocated_encoding(s); - return; - } - shift = sz ? 16 : 24; /* SETF16 or SETF8 */ - - tmp = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn)); - tcg_gen_shli_i32(cpu_NF, tmp, shift); - tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); - tcg_gen_mov_i32(cpu_ZF, cpu_NF); - tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); + tcg_gen_deposit_i64(dst, dst, src, shift, 64 - shift); } -/* Conditional compare (immediate / register) - * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 - * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ - * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | - * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ - * [1] y [0] [0] - */ -static void disas_cc(DisasContext *s, uint32_t insn) +static bool do_vec_shift_imm_narrow(DisasContext *s, arg_qrri_e *a, + WideShiftImmFn * const fns[3], MemOp sign) { - unsigned int sf, op, y, cond, rn, nzcv, is_imm; - TCGv_i32 tcg_t0, tcg_t1, tcg_t2; - TCGv_i64 tcg_tmp, tcg_y, tcg_rn; - DisasCompare c; - - if (!extract32(insn, 29, 1)) { - unallocated_encoding(s); - return; - } - if (insn & (1 << 10 | 1 << 4)) { - unallocated_encoding(s); - return; - } - sf = extract32(insn, 31, 1); - op = extract32(insn, 30, 1); - is_imm = extract32(insn, 11, 1); - y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */ - cond = extract32(insn, 12, 4); - rn = extract32(insn, 5, 5); - nzcv = extract32(insn, 0, 4); - - /* Set T0 = !COND. */ - tcg_t0 = tcg_temp_new_i32(); - arm_test_cc(&c, cond); - tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); + TCGv_i64 tcg_rn, tcg_rd; + int esz = a->esz; + int esize; + WideShiftImmFn *fn; - /* Load the arguments for the new comparison. */ - if (is_imm) { - tcg_y = tcg_temp_new_i64(); - tcg_gen_movi_i64(tcg_y, y); - } else { - tcg_y = cpu_reg(s, y); - } - tcg_rn = cpu_reg(s, rn); + tcg_debug_assert(esz >= MO_8 && esz <= MO_32); - /* Set the flags for the new comparison. */ - tcg_tmp = tcg_temp_new_i64(); - if (op) { - gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y); - } else { - gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y); + if (!fp_access_check(s)) { + return true; } - /* If COND was false, force the flags to #nzcv. Compute two masks - * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). - * For tcg hosts that support ANDC, we can make do with just T1. - * In either case, allow the tcg optimizer to delete any unused mask. - */ - tcg_t1 = tcg_temp_new_i32(); - tcg_t2 = tcg_temp_new_i32(); - tcg_gen_neg_i32(tcg_t1, tcg_t0); - tcg_gen_subi_i32(tcg_t2, tcg_t0, 1); + tcg_rn = tcg_temp_new_i64(); + tcg_rd = tcg_temp_new_i64(); + tcg_gen_movi_i64(tcg_rd, 0); - if (nzcv & 8) { /* N */ - tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1); - } else { - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1); - } else { - tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2); - } - } - if (nzcv & 4) { /* Z */ - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1); - } else { - tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2); - } - } else { - tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0); - } - if (nzcv & 2) { /* C */ - tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0); - } else { - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1); - } else { - tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2); - } - } - if (nzcv & 1) { /* V */ - tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1); - } else { - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1); - } else { - tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); - } + fn = fns[esz]; + esize = 8 << esz; + for (int i = 0, elements = 8 >> esz; i < elements; i++) { + read_vec_element(s, tcg_rn, a->rn, i, (esz + 1) | sign); + fn(tcg_rn, tcg_rn, a->imm); + tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, esize * i, esize); } + + write_vec_element(s, tcg_rd, a->rd, a->q, MO_64); + clear_vec_high(s, a->q, a->rd); + return true; } -/* Conditional select - * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 - * +----+----+---+-----------------+------+------+-----+------+------+ - * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | - * +----+----+---+-----------------+------+------+-----+------+------+ - */ -static void disas_cond_select(DisasContext *s, uint32_t insn) +static void gen_sqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) { - unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; - TCGv_i64 tcg_rd, zero; - DisasCompare64 c; + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_narrow_sat_s8(d, tcg_env, d); +} - if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { - /* S == 1 or op2<1> == 1 */ - unallocated_encoding(s); - return; - } - sf = extract32(insn, 31, 1); - else_inv = extract32(insn, 30, 1); - rm = extract32(insn, 16, 5); - cond = extract32(insn, 12, 4); - else_inc = extract32(insn, 10, 1); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); +static void gen_sqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_narrow_sat_s16(d, tcg_env, d); +} - tcg_rd = cpu_reg(s, rd); +static void gen_sqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_sshr_d(d, s, i); + gen_helper_neon_narrow_sat_s32(d, tcg_env, d); +} - a64_test_cc(&c, cond); - zero = tcg_constant_i64(0); +static void gen_uqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_shri_i64(d, s, i); + gen_helper_neon_narrow_sat_u8(d, tcg_env, d); +} - if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { - /* CSET & CSETM. */ - if (else_inv) { - tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond), - tcg_rd, c.value, zero); - } else { - tcg_gen_setcond_i64(tcg_invert_cond(c.cond), - tcg_rd, c.value, zero); - } - } else { - TCGv_i64 t_true = cpu_reg(s, rn); - TCGv_i64 t_false = read_cpu_reg(s, rm, 1); - if (else_inv && else_inc) { - tcg_gen_neg_i64(t_false, t_false); - } else if (else_inv) { - tcg_gen_not_i64(t_false, t_false); - } else if (else_inc) { - tcg_gen_addi_i64(t_false, t_false, 1); - } - tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); - } +static void gen_uqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_shri_i64(d, s, i); + gen_helper_neon_narrow_sat_u16(d, tcg_env, d); +} - if (!sf) { - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); - } +static void gen_uqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_ushr_d(d, s, i); + gen_helper_neon_narrow_sat_u32(d, tcg_env, d); } -static void handle_clz(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_sqshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_rd, tcg_rn; - tcg_rd = cpu_reg(s, rd); - tcg_rn = cpu_reg(s, rn); + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_unarrow_sat8(d, tcg_env, d); +} - if (sf) { - tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); - } else { - TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); - tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32); - tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - } +static void gen_sqshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_unarrow_sat16(d, tcg_env, d); } -static void handle_cls(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_sqshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_rd, tcg_rn; - tcg_rd = cpu_reg(s, rd); - tcg_rn = cpu_reg(s, rn); + gen_sshr_d(d, s, i); + gen_helper_neon_unarrow_sat32(d, tcg_env, d); +} - if (sf) { - tcg_gen_clrsb_i64(tcg_rd, tcg_rn); - } else { - TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); - tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32); - tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - } +static void gen_sqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_narrow_sat_s8(d, tcg_env, d); } -static void handle_rbit(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_sqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_rd, tcg_rn; - tcg_rd = cpu_reg(s, rd); - tcg_rn = cpu_reg(s, rn); + gen_srshr_bhs(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_narrow_sat_s16(d, tcg_env, d); +} - if (sf) { - gen_helper_rbit64(tcg_rd, tcg_rn); - } else { - TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); - gen_helper_rbit(tcg_tmp32, tcg_tmp32); - tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - } +static void gen_sqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_d(d, s, i); + gen_helper_neon_narrow_sat_s32(d, tcg_env, d); } -/* REV with sf==1, opcode==3 ("REV64") */ -static void handle_rev64(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_uqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) { - if (!sf) { - unallocated_encoding(s); - return; - } - tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn)); + gen_urshr_bhs(d, s, i); + gen_helper_neon_narrow_sat_u8(d, tcg_env, d); } -/* REV with sf==0, opcode==2 - * REV32 (sf==1, opcode==2) - */ -static void handle_rev32(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_uqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_rn = cpu_reg(s, rn); + gen_urshr_bhs(d, s, i); + gen_helper_neon_narrow_sat_u16(d, tcg_env, d); +} - if (sf) { - tcg_gen_bswap64_i64(tcg_rd, tcg_rn); - tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32); - } else { - tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ); - } +static void gen_uqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_urshr_d(d, s, i); + gen_helper_neon_narrow_sat_u32(d, tcg_env, d); } -/* REV16 (opcode==1) */ -static void handle_rev16(DisasContext *s, unsigned int sf, - unsigned int rn, unsigned int rd) +static void gen_sqrshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_tmp = tcg_temp_new_i64(); - TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); - TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); + gen_srshr_bhs(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_unarrow_sat8(d, tcg_env, d); +} - tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8); - tcg_gen_and_i64(tcg_rd, tcg_rn, mask); - tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); - tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); - tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); +static void gen_sqrshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_unarrow_sat16(d, tcg_env, d); } -/* Data-processing (1 source) - * 31 30 29 28 21 20 16 15 10 9 5 4 0 - * +----+---+---+-----------------+---------+--------+------+------+ - * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | - * +----+---+---+-----------------+---------+--------+------+------+ - */ -static void disas_data_proc_1src(DisasContext *s, uint32_t insn) +static void gen_sqrshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i) { - unsigned int sf, opcode, opcode2, rn, rd; - TCGv_i64 tcg_rd; + gen_srshr_d(d, s, i); + gen_helper_neon_unarrow_sat32(d, tcg_env, d); +} - if (extract32(insn, 29, 1)) { - unallocated_encoding(s); - return; - } +static WideShiftImmFn * const shrn_fns[] = { + tcg_gen_shri_i64, + tcg_gen_shri_i64, + gen_ushr_d, +}; +TRANS(SHRN_v, do_vec_shift_imm_narrow, a, shrn_fns, 0) - sf = extract32(insn, 31, 1); - opcode = extract32(insn, 10, 6); - opcode2 = extract32(insn, 16, 5); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); +static WideShiftImmFn * const rshrn_fns[] = { + gen_urshr_bhs, + gen_urshr_bhs, + gen_urshr_d, +}; +TRANS(RSHRN_v, do_vec_shift_imm_narrow, a, rshrn_fns, 0) -#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7)) +static WideShiftImmFn * const sqshrn_fns[] = { + gen_sqshrn_b, + gen_sqshrn_h, + gen_sqshrn_s, +}; +TRANS(SQSHRN_v, do_vec_shift_imm_narrow, a, sqshrn_fns, MO_SIGN) - switch (MAP(sf, opcode2, opcode)) { - case MAP(0, 0x00, 0x00): /* RBIT */ - case MAP(1, 0x00, 0x00): - handle_rbit(s, sf, rn, rd); - break; - case MAP(0, 0x00, 0x01): /* REV16 */ - case MAP(1, 0x00, 0x01): - handle_rev16(s, sf, rn, rd); - break; - case MAP(0, 0x00, 0x02): /* REV/REV32 */ - case MAP(1, 0x00, 0x02): - handle_rev32(s, sf, rn, rd); - break; - case MAP(1, 0x00, 0x03): /* REV64 */ - handle_rev64(s, sf, rn, rd); - break; - case MAP(0, 0x00, 0x04): /* CLZ */ - case MAP(1, 0x00, 0x04): - handle_clz(s, sf, rn, rd); - break; - case MAP(0, 0x00, 0x05): /* CLS */ - case MAP(1, 0x00, 0x05): - handle_cls(s, sf, rn, rd); - break; - case MAP(1, 0x01, 0x00): /* PACIA */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x01): /* PACIB */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x02): /* PACDA */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x03): /* PACDB */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x04): /* AUTIA */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x05): /* AUTIB */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x06): /* AUTDA */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x07): /* AUTDB */ - if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); - } else if (!dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - break; - case MAP(1, 0x01, 0x08): /* PACIZA */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x09): /* PACIZB */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0a): /* PACDZA */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0b): /* PACDZB */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0c): /* AUTIZA */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0d): /* AUTIZB */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0e): /* AUTDZA */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x0f): /* AUTDZB */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); - } - break; - case MAP(1, 0x01, 0x10): /* XPACI */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_xpaci(tcg_rd, tcg_env, tcg_rd); - } - break; - case MAP(1, 0x01, 0x11): /* XPACD */ - if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { - goto do_unallocated; - } else if (s->pauth_active) { - tcg_rd = cpu_reg(s, rd); - gen_helper_xpacd(tcg_rd, tcg_env, tcg_rd); - } - break; - default: - do_unallocated: - unallocated_encoding(s); - break; - } +static WideShiftImmFn * const uqshrn_fns[] = { + gen_uqshrn_b, + gen_uqshrn_h, + gen_uqshrn_s, +}; +TRANS(UQSHRN_v, do_vec_shift_imm_narrow, a, uqshrn_fns, 0) -#undef MAP -} +static WideShiftImmFn * const sqshrun_fns[] = { + gen_sqshrun_b, + gen_sqshrun_h, + gen_sqshrun_s, +}; +TRANS(SQSHRUN_v, do_vec_shift_imm_narrow, a, sqshrun_fns, MO_SIGN) + +static WideShiftImmFn * const sqrshrn_fns[] = { + gen_sqrshrn_b, + gen_sqrshrn_h, + gen_sqrshrn_s, +}; +TRANS(SQRSHRN_v, do_vec_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN) + +static WideShiftImmFn * const uqrshrn_fns[] = { + gen_uqrshrn_b, + gen_uqrshrn_h, + gen_uqrshrn_s, +}; +TRANS(UQRSHRN_v, do_vec_shift_imm_narrow, a, uqrshrn_fns, 0) + +static WideShiftImmFn * const sqrshrun_fns[] = { + gen_sqrshrun_b, + gen_sqrshrun_h, + gen_sqrshrun_s, +}; +TRANS(SQRSHRUN_v, do_vec_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN) + +/* + * Advanced SIMD Scalar Shift by Immediate + */ -static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, - unsigned int rm, unsigned int rn, unsigned int rd) +static bool do_scalar_shift_imm(DisasContext *s, arg_rri_e *a, + WideShiftImmFn *fn, bool accumulate, + MemOp sign) { - TCGv_i64 tcg_n, tcg_m, tcg_rd; - tcg_rd = cpu_reg(s, rd); + if (fp_access_check(s)) { + TCGv_i64 rd = tcg_temp_new_i64(); + TCGv_i64 rn = tcg_temp_new_i64(); - if (!sf && is_signed) { - tcg_n = tcg_temp_new_i64(); - tcg_m = tcg_temp_new_i64(); - tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); - tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm)); - } else { - tcg_n = read_cpu_reg(s, rn, sf); - tcg_m = read_cpu_reg(s, rm, sf); + read_vec_element(s, rn, a->rn, 0, a->esz | sign); + if (accumulate) { + read_vec_element(s, rd, a->rd, 0, a->esz | sign); + } + fn(rd, rn, a->imm); + write_fp_dreg(s, a->rd, rd); } + return true; +} - if (is_signed) { - gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m); - } else { - gen_helper_udiv64(tcg_rd, tcg_n, tcg_m); - } +TRANS(SSHR_s, do_scalar_shift_imm, a, gen_sshr_d, false, 0) +TRANS(USHR_s, do_scalar_shift_imm, a, gen_ushr_d, false, 0) +TRANS(SSRA_s, do_scalar_shift_imm, a, gen_ssra_d, true, 0) +TRANS(USRA_s, do_scalar_shift_imm, a, gen_usra_d, true, 0) +TRANS(SRSHR_s, do_scalar_shift_imm, a, gen_srshr_d, false, 0) +TRANS(URSHR_s, do_scalar_shift_imm, a, gen_urshr_d, false, 0) +TRANS(SRSRA_s, do_scalar_shift_imm, a, gen_srsra_d, true, 0) +TRANS(URSRA_s, do_scalar_shift_imm, a, gen_ursra_d, true, 0) +TRANS(SRI_s, do_scalar_shift_imm, a, gen_sri_d, true, 0) - if (!sf) { /* zero extend final result */ - tcg_gen_ext32u_i64(tcg_rd, tcg_rd); - } +TRANS(SHL_s, do_scalar_shift_imm, a, tcg_gen_shli_i64, false, 0) +TRANS(SLI_s, do_scalar_shift_imm, a, gen_sli_d, true, 0) + +static void trunc_i64_env_imm(TCGv_i64 d, TCGv_i64 s, int64_t i, + NeonGenTwoOpEnvFn *fn) +{ + TCGv_i32 t = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(t, s); + fn(t, tcg_env, t, tcg_constant_i32(i)); + tcg_gen_extu_i32_i64(d, t); } -/* LSLV, LSRV, ASRV, RORV */ -static void handle_shift_reg(DisasContext *s, - enum a64_shift_type shift_type, unsigned int sf, - unsigned int rm, unsigned int rn, unsigned int rd) +static void gen_sqshli_b(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_shift = tcg_temp_new_i64(); - TCGv_i64 tcg_rd = cpu_reg(s, rd); - TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s8); +} - tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); - shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); +static void gen_sqshli_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s16); } -/* CRC32[BHWX], CRC32C[BHWX] */ -static void handle_crc32(DisasContext *s, - unsigned int sf, unsigned int sz, bool crc32c, - unsigned int rm, unsigned int rn, unsigned int rd) +static void gen_sqshli_s(TCGv_i64 d, TCGv_i64 s, int64_t i) { - TCGv_i64 tcg_acc, tcg_val; - TCGv_i32 tcg_bytes; + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_s32); +} - if (!dc_isar_feature(aa64_crc32, s) - || (sf == 1 && sz != 3) - || (sf == 0 && sz == 3)) { - unallocated_encoding(s); - return; - } - - if (sz == 3) { - tcg_val = cpu_reg(s, rm); - } else { - uint64_t mask; - switch (sz) { - case 0: - mask = 0xFF; - break; - case 1: - mask = 0xFFFF; - break; - case 2: - mask = 0xFFFFFFFF; - break; - default: - g_assert_not_reached(); - } - tcg_val = tcg_temp_new_i64(); - tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask); - } +static void gen_sqshli_d(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_helper_neon_qshl_s64(d, tcg_env, s, tcg_constant_i64(i)); +} - tcg_acc = cpu_reg(s, rn); - tcg_bytes = tcg_constant_i32(1 << sz); +static void gen_uqshli_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u8); +} - if (crc32c) { - gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); - } else { - gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); - } +static void gen_uqshli_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u16); } -/* Data-processing (2 source) - * 31 30 29 28 21 20 16 15 10 9 5 4 0 - * +----+---+---+-----------------+------+--------+------+------+ - * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | - * +----+---+---+-----------------+------+--------+------+------+ - */ -static void disas_data_proc_2src(DisasContext *s, uint32_t insn) +static void gen_uqshli_s(TCGv_i64 d, TCGv_i64 s, int64_t i) { - unsigned int sf, rm, opcode, rn, rd, setflag; - sf = extract32(insn, 31, 1); - setflag = extract32(insn, 29, 1); - rm = extract32(insn, 16, 5); - opcode = extract32(insn, 10, 6); - rn = extract32(insn, 5, 5); - rd = extract32(insn, 0, 5); + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshl_u32); +} - if (setflag && opcode != 0) { - unallocated_encoding(s); - return; - } +static void gen_uqshli_d(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_helper_neon_qshl_u64(d, tcg_env, s, tcg_constant_i64(i)); +} - switch (opcode) { - case 0: /* SUBP(S) */ - if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { - goto do_unallocated; - } else { - TCGv_i64 tcg_n, tcg_m, tcg_d; +static void gen_sqshlui_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s8); +} - tcg_n = read_cpu_reg_sp(s, rn, true); - tcg_m = read_cpu_reg_sp(s, rm, true); - tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56); - tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56); - tcg_d = cpu_reg(s, rd); +static void gen_sqshlui_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s16); +} - if (setflag) { - gen_sub_CC(true, tcg_d, tcg_n, tcg_m); - } else { - tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m); - } - } - break; - case 2: /* UDIV */ - handle_div(s, false, sf, rm, rn, rd); - break; - case 3: /* SDIV */ - handle_div(s, true, sf, rm, rn, rd); - break; - case 4: /* IRG */ - if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { - goto do_unallocated; - } - if (s->ata[0]) { - gen_helper_irg(cpu_reg_sp(s, rd), tcg_env, - cpu_reg_sp(s, rn), cpu_reg(s, rm)); - } else { - gen_address_with_allocation_tag0(cpu_reg_sp(s, rd), - cpu_reg_sp(s, rn)); - } - break; - case 5: /* GMI */ - if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { - goto do_unallocated; - } else { - TCGv_i64 t = tcg_temp_new_i64(); +static void gen_sqshlui_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + trunc_i64_env_imm(d, s, i, gen_helper_neon_qshlu_s32); +} - tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); - tcg_gen_shl_i64(t, tcg_constant_i64(1), t); - tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); - } - break; - case 8: /* LSLV */ - handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd); - break; - case 9: /* LSRV */ - handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd); - break; - case 10: /* ASRV */ - handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd); - break; - case 11: /* RORV */ - handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); - break; - case 12: /* PACGA */ - if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) { - goto do_unallocated; - } - gen_helper_pacga(cpu_reg(s, rd), tcg_env, - cpu_reg(s, rn), cpu_reg_sp(s, rm)); - break; - case 16: - case 17: - case 18: - case 19: - case 20: - case 21: - case 22: - case 23: /* CRC32 */ - { - int sz = extract32(opcode, 0, 2); - bool crc32c = extract32(opcode, 2, 1); - handle_crc32(s, sf, sz, crc32c, rm, rn, rd); - break; - } - default: - do_unallocated: - unallocated_encoding(s); - break; - } +static void gen_sqshlui_d(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_helper_neon_qshlu_s64(d, tcg_env, s, tcg_constant_i64(i)); } -/* - * Data processing - register - * 31 30 29 28 25 21 20 16 10 0 - * +--+---+--+---+-------+-----+-------+-------+---------+ - * | |op0| |op1| 1 0 1 | op2 | | op3 | | - * +--+---+--+---+-------+-----+-------+-------+---------+ - */ -static void disas_data_proc_reg(DisasContext *s, uint32_t insn) -{ - int op0 = extract32(insn, 30, 1); - int op1 = extract32(insn, 28, 1); - int op2 = extract32(insn, 21, 4); - int op3 = extract32(insn, 10, 6); - - if (!op1) { - if (op2 & 8) { - if (op2 & 1) { - /* Add/sub (extended register) */ - disas_add_sub_ext_reg(s, insn); - } else { - /* Add/sub (shifted register) */ - disas_add_sub_reg(s, insn); - } - } else { - /* Logical (shifted register) */ - disas_logic_reg(s, insn); - } - return; - } +static WideShiftImmFn * const f_scalar_sqshli[] = { + gen_sqshli_b, gen_sqshli_h, gen_sqshli_s, gen_sqshli_d +}; - switch (op2) { - case 0x0: - switch (op3) { - case 0x00: /* Add/subtract (with carry) */ - disas_adc_sbc(s, insn); - break; +static WideShiftImmFn * const f_scalar_uqshli[] = { + gen_uqshli_b, gen_uqshli_h, gen_uqshli_s, gen_uqshli_d +}; - case 0x01: /* Rotate right into flags */ - case 0x21: - disas_rotate_right_into_flags(s, insn); - break; +static WideShiftImmFn * const f_scalar_sqshlui[] = { + gen_sqshlui_b, gen_sqshlui_h, gen_sqshlui_s, gen_sqshlui_d +}; - case 0x02: /* Evaluate into flags */ - case 0x12: - case 0x22: - case 0x32: - disas_evaluate_into_flags(s, insn); - break; +/* Note that the helpers sign-extend their inputs, so don't do it here. */ +TRANS(SQSHL_si, do_scalar_shift_imm, a, f_scalar_sqshli[a->esz], false, 0) +TRANS(UQSHL_si, do_scalar_shift_imm, a, f_scalar_uqshli[a->esz], false, 0) +TRANS(SQSHLU_si, do_scalar_shift_imm, a, f_scalar_sqshlui[a->esz], false, 0) - default: - goto do_unallocated; - } - break; +static bool do_scalar_shift_imm_narrow(DisasContext *s, arg_rri_e *a, + WideShiftImmFn * const fns[3], + MemOp sign, bool zext) +{ + MemOp esz = a->esz; - case 0x2: /* Conditional compare */ - disas_cc(s, insn); /* both imm and reg forms */ - break; + tcg_debug_assert(esz >= MO_8 && esz <= MO_32); - case 0x4: /* Conditional select */ - disas_cond_select(s, insn); - break; + if (fp_access_check(s)) { + TCGv_i64 rd = tcg_temp_new_i64(); + TCGv_i64 rn = tcg_temp_new_i64(); - case 0x6: /* Data-processing */ - if (op0) { /* (1 source) */ - disas_data_proc_1src(s, insn); - } else { /* (2 source) */ - disas_data_proc_2src(s, insn); + read_vec_element(s, rn, a->rn, 0, (esz + 1) | sign); + fns[esz](rd, rn, a->imm); + if (zext) { + tcg_gen_ext_i64(rd, rd, esz); } - break; - case 0x8 ... 0xf: /* (3 source) */ - disas_data_proc_3src(s, insn); - break; - - default: - do_unallocated: - unallocated_encoding(s); - break; + write_fp_dreg(s, a->rd, rd); } + return true; } -static void handle_fp_compare(DisasContext *s, int size, - unsigned int rn, unsigned int rm, - bool cmp_with_zero, bool signal_all_nans) -{ - TCGv_i64 tcg_flags = tcg_temp_new_i64(); - TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); +TRANS(SQSHRN_si, do_scalar_shift_imm_narrow, a, sqshrn_fns, MO_SIGN, true) +TRANS(SQRSHRN_si, do_scalar_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN, true) +TRANS(UQSHRN_si, do_scalar_shift_imm_narrow, a, uqshrn_fns, 0, false) +TRANS(UQRSHRN_si, do_scalar_shift_imm_narrow, a, uqrshrn_fns, 0, false) +TRANS(SQSHRUN_si, do_scalar_shift_imm_narrow, a, sqshrun_fns, MO_SIGN, false) +TRANS(SQRSHRUN_si, do_scalar_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN, false) - if (size == MO_64) { - TCGv_i64 tcg_vn, tcg_vm; +static bool do_div(DisasContext *s, arg_rrr_sf *a, bool is_signed) +{ + TCGv_i64 tcg_n, tcg_m, tcg_rd; + tcg_rd = cpu_reg(s, a->rd); - tcg_vn = read_fp_dreg(s, rn); - if (cmp_with_zero) { - tcg_vm = tcg_constant_i64(0); - } else { - tcg_vm = read_fp_dreg(s, rm); - } - if (signal_all_nans) { - gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } else { - gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } + if (!a->sf && is_signed) { + tcg_n = tcg_temp_new_i64(); + tcg_m = tcg_temp_new_i64(); + tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, a->rn)); + tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, a->rm)); } else { - TCGv_i32 tcg_vn = tcg_temp_new_i32(); - TCGv_i32 tcg_vm = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_vn, rn, 0, size); - if (cmp_with_zero) { - tcg_gen_movi_i32(tcg_vm, 0); - } else { - read_vec_element_i32(s, tcg_vm, rm, 0, size); - } + tcg_n = read_cpu_reg(s, a->rn, a->sf); + tcg_m = read_cpu_reg(s, a->rm, a->sf); + } - switch (size) { - case MO_32: - if (signal_all_nans) { - gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } else { - gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } - break; - case MO_16: - if (signal_all_nans) { - gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } else { - gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst); - } - break; - default: - g_assert_not_reached(); - } + if (is_signed) { + gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m); + } else { + gen_helper_udiv64(tcg_rd, tcg_n, tcg_m); } - gen_set_nzcv(tcg_flags); + if (!a->sf) { /* zero extend final result */ + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + } + return true; } -/* Floating point compare - * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 - * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ - * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | - * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ +TRANS(SDIV, do_div, a, true) +TRANS(UDIV, do_div, a, false) + +/* Shift a TCGv src by TCGv shift_amount, put result in dst. + * Note that it is the caller's responsibility to ensure that the + * shift amount is in range (ie 0..31 or 0..63) and provide the ARM + * mandated semantics for out of range shifts. */ -static void disas_fp_compare(DisasContext *s, uint32_t insn) +static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, + enum a64_shift_type shift_type, TCGv_i64 shift_amount) { - unsigned int mos, type, rm, op, rn, opc, op2r; - int size; - - mos = extract32(insn, 29, 3); - type = extract32(insn, 22, 2); - rm = extract32(insn, 16, 5); - op = extract32(insn, 14, 2); - rn = extract32(insn, 5, 5); - opc = extract32(insn, 3, 2); - op2r = extract32(insn, 0, 3); - - if (mos || op || op2r) { - unallocated_encoding(s); - return; - } - - switch (type) { - case 0: - size = MO_32; + switch (shift_type) { + case A64_SHIFT_TYPE_LSL: + tcg_gen_shl_i64(dst, src, shift_amount); break; - case 1: - size = MO_64; + case A64_SHIFT_TYPE_LSR: + tcg_gen_shr_i64(dst, src, shift_amount); break; - case 3: - size = MO_16; - if (dc_isar_feature(aa64_fp16, s)) { - break; + case A64_SHIFT_TYPE_ASR: + if (!sf) { + tcg_gen_ext32s_i64(dst, src); + } + tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount); + break; + case A64_SHIFT_TYPE_ROR: + if (sf) { + tcg_gen_rotr_i64(dst, src, shift_amount); + } else { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(); + t1 = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(t0, src); + tcg_gen_extrl_i64_i32(t1, shift_amount); + tcg_gen_rotr_i32(t0, t0, t1); + tcg_gen_extu_i32_i64(dst, t0); } - /* fallthru */ + break; default: - unallocated_encoding(s); - return; + assert(FALSE); /* all shift types should be handled */ + break; } - if (!fp_access_check(s)) { - return; + if (!sf) { /* zero extend final result */ + tcg_gen_ext32u_i64(dst, dst); } - - handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2); } -/* Floating point conditional compare - * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 - * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ - * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | - * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ +/* Shift a TCGv src by immediate, put result in dst. + * The shift amount must be in range (this should always be true as the + * relevant instructions will UNDEF on bad shift immediates). */ -static void disas_fp_ccomp(DisasContext *s, uint32_t insn) +static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf, + enum a64_shift_type shift_type, unsigned int shift_i) { - unsigned int mos, type, rm, cond, rn, op, nzcv; - TCGLabel *label_continue = NULL; - int size; - - mos = extract32(insn, 29, 3); - type = extract32(insn, 22, 2); - rm = extract32(insn, 16, 5); - cond = extract32(insn, 12, 4); - rn = extract32(insn, 5, 5); - op = extract32(insn, 4, 1); - nzcv = extract32(insn, 0, 4); + assert(shift_i < (sf ? 64 : 32)); - if (mos) { - unallocated_encoding(s); - return; + if (shift_i == 0) { + tcg_gen_mov_i64(dst, src); + } else { + shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i)); } +} - switch (type) { - case 0: - size = MO_32; +static bool do_shift_reg(DisasContext *s, arg_rrr_sf *a, + enum a64_shift_type shift_type) +{ + TCGv_i64 tcg_shift = tcg_temp_new_i64(); + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_rn = read_cpu_reg(s, a->rn, a->sf); + + tcg_gen_andi_i64(tcg_shift, cpu_reg(s, a->rm), a->sf ? 63 : 31); + shift_reg(tcg_rd, tcg_rn, a->sf, shift_type, tcg_shift); + return true; +} + +TRANS(LSLV, do_shift_reg, a, A64_SHIFT_TYPE_LSL) +TRANS(LSRV, do_shift_reg, a, A64_SHIFT_TYPE_LSR) +TRANS(ASRV, do_shift_reg, a, A64_SHIFT_TYPE_ASR) +TRANS(RORV, do_shift_reg, a, A64_SHIFT_TYPE_ROR) + +static bool do_crc32(DisasContext *s, arg_rrr_e *a, bool crc32c) +{ + TCGv_i64 tcg_acc, tcg_val, tcg_rd; + TCGv_i32 tcg_bytes; + + switch (a->esz) { + case MO_8: + case MO_16: + case MO_32: + tcg_val = tcg_temp_new_i64(); + tcg_gen_extract_i64(tcg_val, cpu_reg(s, a->rm), 0, 8 << a->esz); break; - case 1: - size = MO_64; + case MO_64: + tcg_val = cpu_reg(s, a->rm); break; - case 3: - size = MO_16; - if (dc_isar_feature(aa64_fp16, s)) { - break; - } - /* fallthru */ default: - unallocated_encoding(s); - return; + g_assert_not_reached(); } + tcg_acc = cpu_reg(s, a->rn); + tcg_bytes = tcg_constant_i32(1 << a->esz); + tcg_rd = cpu_reg(s, a->rd); - if (!fp_access_check(s)) { - return; + if (crc32c) { + gen_helper_crc32c_64(tcg_rd, tcg_acc, tcg_val, tcg_bytes); + } else { + gen_helper_crc32_64(tcg_rd, tcg_acc, tcg_val, tcg_bytes); } + return true; +} - if (cond < 0x0e) { /* not always */ - TCGLabel *label_match = gen_new_label(); - label_continue = gen_new_label(); - arm_gen_test_cc(cond, label_match); - /* nomatch: */ - gen_set_nzcv(tcg_constant_i64(nzcv << 28)); - tcg_gen_br(label_continue); - gen_set_label(label_match); - } +TRANS_FEAT(CRC32, aa64_crc32, do_crc32, a, false) +TRANS_FEAT(CRC32C, aa64_crc32, do_crc32, a, true) + +static bool do_subp(DisasContext *s, arg_rrr *a, bool setflag) +{ + TCGv_i64 tcg_n = read_cpu_reg_sp(s, a->rn, true); + TCGv_i64 tcg_m = read_cpu_reg_sp(s, a->rm, true); + TCGv_i64 tcg_d = cpu_reg(s, a->rd); - handle_fp_compare(s, size, rn, rm, false, op); + tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56); + tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56); - if (cond < 0x0e) { - gen_set_label(label_continue); + if (setflag) { + gen_sub_CC(true, tcg_d, tcg_n, tcg_m); + } else { + tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m); } + return true; } -/* Floating-point data-processing (1 source) - half precision */ -static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) +TRANS_FEAT(SUBP, aa64_mte_insn_reg, do_subp, a, false) +TRANS_FEAT(SUBPS, aa64_mte_insn_reg, do_subp, a, true) + +static bool trans_IRG(DisasContext *s, arg_rrr *a) { - TCGv_ptr fpst = NULL; - TCGv_i32 tcg_op = read_fp_hreg(s, rn); - TCGv_i32 tcg_res = tcg_temp_new_i32(); + if (dc_isar_feature(aa64_mte_insn_reg, s)) { + TCGv_i64 tcg_rd = cpu_reg_sp(s, a->rd); + TCGv_i64 tcg_rn = cpu_reg_sp(s, a->rn); - switch (opcode) { - case 0x0: /* FMOV */ - tcg_gen_mov_i32(tcg_res, tcg_op); - break; - case 0x1: /* FABS */ - gen_vfp_absh(tcg_res, tcg_op); - break; - case 0x2: /* FNEG */ - gen_vfp_negh(tcg_res, tcg_op); - break; - case 0x3: /* FSQRT */ - fpst = fpstatus_ptr(FPST_FPCR_F16); - gen_helper_sqrt_f16(tcg_res, tcg_op, fpst); - break; - case 0x8: /* FRINTN */ - case 0x9: /* FRINTP */ - case 0xa: /* FRINTM */ - case 0xb: /* FRINTZ */ - case 0xc: /* FRINTA */ - { - TCGv_i32 tcg_rmode; + if (s->ata[0]) { + gen_helper_irg(tcg_rd, tcg_env, tcg_rn, cpu_reg(s, a->rm)); + } else { + gen_address_with_allocation_tag0(tcg_rd, tcg_rn); + } + return true; + } + return false; +} - fpst = fpstatus_ptr(FPST_FPCR_F16); - tcg_rmode = gen_set_rmode(opcode & 7, fpst); - gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); - gen_restore_rmode(tcg_rmode, fpst); - break; +static bool trans_GMI(DisasContext *s, arg_rrr *a) +{ + if (dc_isar_feature(aa64_mte_insn_reg, s)) { + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_extract_i64(t, cpu_reg_sp(s, a->rn), 56, 4); + tcg_gen_shl_i64(t, tcg_constant_i64(1), t); + tcg_gen_or_i64(cpu_reg(s, a->rd), cpu_reg(s, a->rm), t); + return true; } - case 0xe: /* FRINTX */ - fpst = fpstatus_ptr(FPST_FPCR_F16); - gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst); - break; - case 0xf: /* FRINTI */ - fpst = fpstatus_ptr(FPST_FPCR_F16); - gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); - break; - default: - g_assert_not_reached(); + return false; +} + +static bool trans_PACGA(DisasContext *s, arg_rrr *a) +{ + if (dc_isar_feature(aa64_pauth, s)) { + gen_helper_pacga(cpu_reg(s, a->rd), tcg_env, + cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm)); + return true; } + return false; +} - write_fp_sreg(s, rd, tcg_res); +typedef void ArithOneOp(TCGv_i64, TCGv_i64); + +static bool gen_rr(DisasContext *s, int rd, int rn, ArithOneOp fn) +{ + fn(cpu_reg(s, rd), cpu_reg(s, rn)); + return true; } -/* Floating-point data-processing (1 source) - single precision */ -static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) +static void gen_rbit32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) { - void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr); - TCGv_i32 tcg_op, tcg_res; - TCGv_ptr fpst; - int rmode = -1; - - tcg_op = read_fp_sreg(s, rn); - tcg_res = tcg_temp_new_i32(); - - switch (opcode) { - case 0x0: /* FMOV */ - tcg_gen_mov_i32(tcg_res, tcg_op); - goto done; - case 0x1: /* FABS */ - gen_vfp_abss(tcg_res, tcg_op); - goto done; - case 0x2: /* FNEG */ - gen_vfp_negs(tcg_res, tcg_op); - goto done; - case 0x3: /* FSQRT */ - gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env); - goto done; - case 0x6: /* BFCVT */ - gen_fpst = gen_helper_bfcvt; - break; - case 0x8: /* FRINTN */ - case 0x9: /* FRINTP */ - case 0xa: /* FRINTM */ - case 0xb: /* FRINTZ */ - case 0xc: /* FRINTA */ - rmode = opcode & 7; - gen_fpst = gen_helper_rints; - break; - case 0xe: /* FRINTX */ - gen_fpst = gen_helper_rints_exact; - break; - case 0xf: /* FRINTI */ - gen_fpst = gen_helper_rints; - break; - case 0x10: /* FRINT32Z */ - rmode = FPROUNDING_ZERO; - gen_fpst = gen_helper_frint32_s; - break; - case 0x11: /* FRINT32X */ - gen_fpst = gen_helper_frint32_s; - break; - case 0x12: /* FRINT64Z */ - rmode = FPROUNDING_ZERO; - gen_fpst = gen_helper_frint64_s; - break; - case 0x13: /* FRINT64X */ - gen_fpst = gen_helper_frint64_s; - break; - default: - g_assert_not_reached(); - } + TCGv_i32 t32 = tcg_temp_new_i32(); - fpst = fpstatus_ptr(FPST_FPCR); - if (rmode >= 0) { - TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); - gen_fpst(tcg_res, tcg_op, fpst); - gen_restore_rmode(tcg_rmode, fpst); - } else { - gen_fpst(tcg_res, tcg_op, fpst); - } + tcg_gen_extrl_i64_i32(t32, tcg_rn); + gen_helper_rbit(t32, t32); + tcg_gen_extu_i32_i64(tcg_rd, t32); +} - done: - write_fp_sreg(s, rd, tcg_res); +static void gen_rev16_xx(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 mask) +{ + TCGv_i64 tcg_tmp = tcg_temp_new_i64(); + + tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8); + tcg_gen_and_i64(tcg_rd, tcg_rn, mask); + tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); + tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); + tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); } -/* Floating-point data-processing (1 source) - double precision */ -static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) +static void gen_rev16_32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) { - void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr); - TCGv_i64 tcg_op, tcg_res; - TCGv_ptr fpst; - int rmode = -1; + gen_rev16_xx(tcg_rd, tcg_rn, tcg_constant_i64(0x00ff00ff)); +} - switch (opcode) { - case 0x0: /* FMOV */ - gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0); - return; - } +static void gen_rev16_64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + gen_rev16_xx(tcg_rd, tcg_rn, tcg_constant_i64(0x00ff00ff00ff00ffull)); +} - tcg_op = read_fp_dreg(s, rn); - tcg_res = tcg_temp_new_i64(); +static void gen_rev_32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ); +} - switch (opcode) { - case 0x1: /* FABS */ - gen_vfp_absd(tcg_res, tcg_op); - goto done; - case 0x2: /* FNEG */ - gen_vfp_negd(tcg_res, tcg_op); - goto done; - case 0x3: /* FSQRT */ - gen_helper_vfp_sqrtd(tcg_res, tcg_op, tcg_env); - goto done; - case 0x8: /* FRINTN */ - case 0x9: /* FRINTP */ - case 0xa: /* FRINTM */ - case 0xb: /* FRINTZ */ - case 0xc: /* FRINTA */ - rmode = opcode & 7; - gen_fpst = gen_helper_rintd; - break; - case 0xe: /* FRINTX */ - gen_fpst = gen_helper_rintd_exact; - break; - case 0xf: /* FRINTI */ - gen_fpst = gen_helper_rintd; - break; - case 0x10: /* FRINT32Z */ - rmode = FPROUNDING_ZERO; - gen_fpst = gen_helper_frint32_d; - break; - case 0x11: /* FRINT32X */ - gen_fpst = gen_helper_frint32_d; - break; - case 0x12: /* FRINT64Z */ - rmode = FPROUNDING_ZERO; - gen_fpst = gen_helper_frint64_d; - break; - case 0x13: /* FRINT64X */ - gen_fpst = gen_helper_frint64_d; - break; - default: - g_assert_not_reached(); - } +static void gen_rev32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + tcg_gen_bswap64_i64(tcg_rd, tcg_rn); + tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32); +} - fpst = fpstatus_ptr(FPST_FPCR); - if (rmode >= 0) { - TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); - gen_fpst(tcg_res, tcg_op, fpst); - gen_restore_rmode(tcg_rmode, fpst); - } else { - gen_fpst(tcg_res, tcg_op, fpst); - } +TRANS(RBIT, gen_rr, a->rd, a->rn, a->sf ? gen_helper_rbit64 : gen_rbit32) +TRANS(REV16, gen_rr, a->rd, a->rn, a->sf ? gen_rev16_64 : gen_rev16_32) +TRANS(REV32, gen_rr, a->rd, a->rn, a->sf ? gen_rev32 : gen_rev_32) +TRANS(REV64, gen_rr, a->rd, a->rn, tcg_gen_bswap64_i64) + +static void gen_clz32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + TCGv_i32 t32 = tcg_temp_new_i32(); - done: - write_fp_dreg(s, rd, tcg_res); + tcg_gen_extrl_i64_i32(t32, tcg_rn); + tcg_gen_clzi_i32(t32, t32, 32); + tcg_gen_extu_i32_i64(tcg_rd, t32); } -static void handle_fp_fcvt(DisasContext *s, int opcode, - int rd, int rn, int dtype, int ntype) +static void gen_clz64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) { - switch (ntype) { - case 0x0: - { - TCGv_i32 tcg_rn = read_fp_sreg(s, rn); - if (dtype == 1) { - /* Single to double */ - TCGv_i64 tcg_rd = tcg_temp_new_i64(); - gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, tcg_env); - write_fp_dreg(s, rd, tcg_rd); - } else { - /* Single to half */ - TCGv_i32 tcg_rd = tcg_temp_new_i32(); - TCGv_i32 ahp = get_ahp_flag(); - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); + tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); +} - gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp); - /* write_fp_sreg is OK here because top half of tcg_rd is zero */ - write_fp_sreg(s, rd, tcg_rd); - } - break; - } - case 0x1: - { - TCGv_i64 tcg_rn = read_fp_dreg(s, rn); - TCGv_i32 tcg_rd = tcg_temp_new_i32(); - if (dtype == 0) { - /* Double to single */ - gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, tcg_env); - } else { - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); - TCGv_i32 ahp = get_ahp_flag(); - /* Double to half */ - gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); - /* write_fp_sreg is OK here because top half of tcg_rd is zero */ +static void gen_cls32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + TCGv_i32 t32 = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(t32, tcg_rn); + tcg_gen_clrsb_i32(t32, t32); + tcg_gen_extu_i32_i64(tcg_rd, t32); +} + +TRANS(CLZ, gen_rr, a->rd, a->rn, a->sf ? gen_clz64 : gen_clz32) +TRANS(CLS, gen_rr, a->rd, a->rn, a->sf ? tcg_gen_clrsb_i64 : gen_cls32) + +static bool gen_pacaut(DisasContext *s, arg_pacaut *a, NeonGenTwo64OpEnvFn fn) +{ + TCGv_i64 tcg_rd, tcg_rn; + + if (a->z) { + if (a->rn != 31) { + return false; } - write_fp_sreg(s, rd, tcg_rd); - break; + tcg_rn = tcg_constant_i64(0); + } else { + tcg_rn = cpu_reg_sp(s, a->rn); } - case 0x3: - { - TCGv_i32 tcg_rn = read_fp_sreg(s, rn); - TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR); - TCGv_i32 tcg_ahp = get_ahp_flag(); - tcg_gen_ext16u_i32(tcg_rn, tcg_rn); - if (dtype == 0) { - /* Half to single */ - TCGv_i32 tcg_rd = tcg_temp_new_i32(); - gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); - write_fp_sreg(s, rd, tcg_rd); - } else { - /* Half to double */ - TCGv_i64 tcg_rd = tcg_temp_new_i64(); - gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); - write_fp_dreg(s, rd, tcg_rd); - } - break; + if (s->pauth_active) { + tcg_rd = cpu_reg(s, a->rd); + fn(tcg_rd, tcg_env, tcg_rd, tcg_rn); } - default: - g_assert_not_reached(); + return true; +} + +TRANS_FEAT(PACIA, aa64_pauth, gen_pacaut, a, gen_helper_pacia) +TRANS_FEAT(PACIB, aa64_pauth, gen_pacaut, a, gen_helper_pacib) +TRANS_FEAT(PACDA, aa64_pauth, gen_pacaut, a, gen_helper_pacda) +TRANS_FEAT(PACDB, aa64_pauth, gen_pacaut, a, gen_helper_pacdb) + +TRANS_FEAT(AUTIA, aa64_pauth, gen_pacaut, a, gen_helper_autia) +TRANS_FEAT(AUTIB, aa64_pauth, gen_pacaut, a, gen_helper_autib) +TRANS_FEAT(AUTDA, aa64_pauth, gen_pacaut, a, gen_helper_autda) +TRANS_FEAT(AUTDB, aa64_pauth, gen_pacaut, a, gen_helper_autdb) + +static bool do_xpac(DisasContext *s, int rd, NeonGenOne64OpEnvFn *fn) +{ + if (s->pauth_active) { + TCGv_i64 tcg_rd = cpu_reg(s, rd); + fn(tcg_rd, tcg_env, tcg_rd); } + return true; } -/* Floating point data-processing (1 source) - * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 - * +---+---+---+-----------+------+---+--------+-----------+------+------+ - * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | - * +---+---+---+-----------+------+---+--------+-----------+------+------+ - */ -static void disas_fp_1src(DisasContext *s, uint32_t insn) +TRANS_FEAT(XPACI, aa64_pauth, do_xpac, a->rd, gen_helper_xpaci) +TRANS_FEAT(XPACD, aa64_pauth, do_xpac, a->rd, gen_helper_xpacd) + +static bool do_logic_reg(DisasContext *s, arg_logic_shift *a, + ArithTwoOp *fn, ArithTwoOp *inv_fn, bool setflags) { - int mos = extract32(insn, 29, 3); - int type = extract32(insn, 22, 2); - int opcode = extract32(insn, 15, 6); - int rn = extract32(insn, 5, 5); - int rd = extract32(insn, 0, 5); + TCGv_i64 tcg_rd, tcg_rn, tcg_rm; - if (mos) { - goto do_unallocated; + if (!a->sf && (a->sa & (1 << 5))) { + return false; } - switch (opcode) { - case 0x4: case 0x5: case 0x7: - { - /* FCVT between half, single and double precision */ - int dtype = extract32(opcode, 0, 2); - if (type == 2 || dtype == type) { - goto do_unallocated; - } - if (!fp_access_check(s)) { - return; - } + tcg_rd = cpu_reg(s, a->rd); + tcg_rn = cpu_reg(s, a->rn); - handle_fp_fcvt(s, opcode, rd, rn, dtype, type); - break; + tcg_rm = read_cpu_reg(s, a->rm, a->sf); + if (a->sa) { + shift_reg_imm(tcg_rm, tcg_rm, a->sf, a->st, a->sa); } - case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */ - if (type > 1 || !dc_isar_feature(aa64_frint, s)) { - goto do_unallocated; - } - /* fall through */ - case 0x0 ... 0x3: - case 0x8 ... 0xc: - case 0xe ... 0xf: - /* 32-to-32 and 64-to-64 ops */ - switch (type) { - case 0: - if (!fp_access_check(s)) { - return; - } - handle_fp_1src_single(s, opcode, rd, rn); - break; - case 1: - if (!fp_access_check(s)) { - return; - } - handle_fp_1src_double(s, opcode, rd, rn); - break; - case 3: - if (!dc_isar_feature(aa64_fp16, s)) { - goto do_unallocated; - } + (a->n ? inv_fn : fn)(tcg_rd, tcg_rn, tcg_rm); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + } + if (setflags) { + gen_logic_CC(a->sf, tcg_rd); + } + return true; +} - if (!fp_access_check(s)) { - return; - } - handle_fp_1src_half(s, opcode, rd, rn); - break; - default: - goto do_unallocated; - } - break; +static bool trans_ORR_r(DisasContext *s, arg_logic_shift *a) +{ + /* + * Unshifted ORR and ORN with WZR/XZR is the standard encoding for + * register-register MOV and MVN, so it is worth special casing. + */ + if (a->sa == 0 && a->st == 0 && a->rn == 31) { + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_rm = cpu_reg(s, a->rm); - case 0x6: - switch (type) { - case 1: /* BFCVT */ - if (!dc_isar_feature(aa64_bf16, s)) { - goto do_unallocated; + if (a->n) { + tcg_gen_not_i64(tcg_rd, tcg_rm); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } - if (!fp_access_check(s)) { - return; + } else { + if (a->sf) { + tcg_gen_mov_i64(tcg_rd, tcg_rm); + } else { + tcg_gen_ext32u_i64(tcg_rd, tcg_rm); } - handle_fp_1src_single(s, opcode, rd, rn); - break; - default: - goto do_unallocated; } - break; - - default: - do_unallocated: - unallocated_encoding(s); - break; + return true; } + + return do_logic_reg(s, a, tcg_gen_or_i64, tcg_gen_orc_i64, false); } -/* Floating point immediate - * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 - * +---+---+---+-----------+------+---+------------+-------+------+------+ - * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | - * +---+---+---+-----------+------+---+------------+-------+------+------+ - */ -static void disas_fp_imm(DisasContext *s, uint32_t insn) +TRANS(AND_r, do_logic_reg, a, tcg_gen_and_i64, tcg_gen_andc_i64, false) +TRANS(ANDS_r, do_logic_reg, a, tcg_gen_and_i64, tcg_gen_andc_i64, true) +TRANS(EOR_r, do_logic_reg, a, tcg_gen_xor_i64, tcg_gen_eqv_i64, false) + +static bool do_addsub_ext(DisasContext *s, arg_addsub_ext *a, + bool sub_op, bool setflags) { - int rd = extract32(insn, 0, 5); - int imm5 = extract32(insn, 5, 5); - int imm8 = extract32(insn, 13, 8); - int type = extract32(insn, 22, 2); - int mos = extract32(insn, 29, 3); - uint64_t imm; - MemOp sz; + TCGv_i64 tcg_rm, tcg_rn, tcg_rd, tcg_result; - if (mos || imm5) { - unallocated_encoding(s); - return; + if (a->sa > 4) { + return false; } - switch (type) { - case 0: - sz = MO_32; - break; - case 1: - sz = MO_64; - break; - case 3: - sz = MO_16; - if (dc_isar_feature(aa64_fp16, s)) { - break; - } - /* fallthru */ - default: - unallocated_encoding(s); - return; + /* non-flag setting ops may use SP */ + if (!setflags) { + tcg_rd = cpu_reg_sp(s, a->rd); + } else { + tcg_rd = cpu_reg(s, a->rd); } + tcg_rn = read_cpu_reg_sp(s, a->rn, a->sf); - if (!fp_access_check(s)) { - return; + tcg_rm = read_cpu_reg(s, a->rm, a->sf); + ext_and_shift_reg(tcg_rm, tcg_rm, a->st, a->sa); + + tcg_result = tcg_temp_new_i64(); + if (!setflags) { + if (sub_op) { + tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); + } else { + tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); + } + } else { + if (sub_op) { + gen_sub_CC(a->sf, tcg_result, tcg_rn, tcg_rm); + } else { + gen_add_CC(a->sf, tcg_result, tcg_rn, tcg_rm); + } } - imm = vfp_expand_imm(sz, imm8); - write_fp_dreg(s, rd, tcg_constant_i64(imm)); + if (a->sf) { + tcg_gen_mov_i64(tcg_rd, tcg_result); + } else { + tcg_gen_ext32u_i64(tcg_rd, tcg_result); + } + return true; } -/* Handle floating point <=> fixed point conversions. Note that we can - * also deal with fp <=> integer conversions as a special case (scale == 64) - * OPTME: consider handling that special case specially or at least skipping - * the call to scalbn in the helpers for zero shifts. - */ -static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, - bool itof, int rmode, int scale, int sf, int type) -{ - bool is_signed = !(opcode & 1); - TCGv_ptr tcg_fpstatus; - TCGv_i32 tcg_shift, tcg_single; - TCGv_i64 tcg_double; +TRANS(ADD_ext, do_addsub_ext, a, false, false) +TRANS(SUB_ext, do_addsub_ext, a, true, false) +TRANS(ADDS_ext, do_addsub_ext, a, false, true) +TRANS(SUBS_ext, do_addsub_ext, a, true, true) - tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR); +static bool do_addsub_reg(DisasContext *s, arg_addsub_shift *a, + bool sub_op, bool setflags) +{ + TCGv_i64 tcg_rd, tcg_rn, tcg_rm, tcg_result; - tcg_shift = tcg_constant_i32(64 - scale); + if (a->st == 3 || (!a->sf && (a->sa & 32))) { + return false; + } - if (itof) { - TCGv_i64 tcg_int = cpu_reg(s, rn); - if (!sf) { - TCGv_i64 tcg_extend = tcg_temp_new_i64(); + tcg_rd = cpu_reg(s, a->rd); + tcg_rn = read_cpu_reg(s, a->rn, a->sf); + tcg_rm = read_cpu_reg(s, a->rm, a->sf); - if (is_signed) { - tcg_gen_ext32s_i64(tcg_extend, tcg_int); - } else { - tcg_gen_ext32u_i64(tcg_extend, tcg_int); - } + shift_reg_imm(tcg_rm, tcg_rm, a->sf, a->st, a->sa); - tcg_int = tcg_extend; + tcg_result = tcg_temp_new_i64(); + if (!setflags) { + if (sub_op) { + tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); + } else { + tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); + } + } else { + if (sub_op) { + gen_sub_CC(a->sf, tcg_result, tcg_rn, tcg_rm); + } else { + gen_add_CC(a->sf, tcg_result, tcg_rn, tcg_rm); } + } - switch (type) { - case 1: /* float64 */ - tcg_double = tcg_temp_new_i64(); - if (is_signed) { - gen_helper_vfp_sqtod(tcg_double, tcg_int, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_uqtod(tcg_double, tcg_int, - tcg_shift, tcg_fpstatus); - } - write_fp_dreg(s, rd, tcg_double); - break; + if (a->sf) { + tcg_gen_mov_i64(tcg_rd, tcg_result); + } else { + tcg_gen_ext32u_i64(tcg_rd, tcg_result); + } + return true; +} - case 0: /* float32 */ - tcg_single = tcg_temp_new_i32(); - if (is_signed) { - gen_helper_vfp_sqtos(tcg_single, tcg_int, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_uqtos(tcg_single, tcg_int, - tcg_shift, tcg_fpstatus); - } - write_fp_sreg(s, rd, tcg_single); - break; +TRANS(ADD_r, do_addsub_reg, a, false, false) +TRANS(SUB_r, do_addsub_reg, a, true, false) +TRANS(ADDS_r, do_addsub_reg, a, false, true) +TRANS(SUBS_r, do_addsub_reg, a, true, true) - case 3: /* float16 */ - tcg_single = tcg_temp_new_i32(); - if (is_signed) { - gen_helper_vfp_sqtoh(tcg_single, tcg_int, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_uqtoh(tcg_single, tcg_int, - tcg_shift, tcg_fpstatus); - } - write_fp_sreg(s, rd, tcg_single); - break; +static bool do_mulh(DisasContext *s, arg_rrr *a, + void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i64 discard = tcg_temp_new_i64(); + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + TCGv_i64 tcg_rm = cpu_reg(s, a->rm); - default: - g_assert_not_reached(); - } - } else { - TCGv_i64 tcg_int = cpu_reg(s, rd); - TCGv_i32 tcg_rmode; + fn(discard, tcg_rd, tcg_rn, tcg_rm); + return true; +} - if (extract32(opcode, 2, 1)) { - /* There are too many rounding modes to all fit into rmode, - * so FCVTA[US] is a special case. - */ - rmode = FPROUNDING_TIEAWAY; - } - - tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); - - switch (type) { - case 1: /* float64 */ - tcg_double = read_fp_dreg(s, rn); - if (is_signed) { - if (!sf) { - gen_helper_vfp_tosld(tcg_int, tcg_double, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_tosqd(tcg_int, tcg_double, - tcg_shift, tcg_fpstatus); - } - } else { - if (!sf) { - gen_helper_vfp_tould(tcg_int, tcg_double, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_touqd(tcg_int, tcg_double, - tcg_shift, tcg_fpstatus); - } - } - if (!sf) { - tcg_gen_ext32u_i64(tcg_int, tcg_int); - } - break; - - case 0: /* float32 */ - tcg_single = read_fp_sreg(s, rn); - if (sf) { - if (is_signed) { - gen_helper_vfp_tosqs(tcg_int, tcg_single, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_touqs(tcg_int, tcg_single, - tcg_shift, tcg_fpstatus); - } - } else { - TCGv_i32 tcg_dest = tcg_temp_new_i32(); - if (is_signed) { - gen_helper_vfp_tosls(tcg_dest, tcg_single, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_touls(tcg_dest, tcg_single, - tcg_shift, tcg_fpstatus); - } - tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - } - break; - - case 3: /* float16 */ - tcg_single = read_fp_sreg(s, rn); - if (sf) { - if (is_signed) { - gen_helper_vfp_tosqh(tcg_int, tcg_single, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_touqh(tcg_int, tcg_single, - tcg_shift, tcg_fpstatus); - } - } else { - TCGv_i32 tcg_dest = tcg_temp_new_i32(); - if (is_signed) { - gen_helper_vfp_toslh(tcg_dest, tcg_single, - tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_toulh(tcg_dest, tcg_single, - tcg_shift, tcg_fpstatus); - } - tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - } - break; +TRANS(SMULH, do_mulh, a, tcg_gen_muls2_i64) +TRANS(UMULH, do_mulh, a, tcg_gen_mulu2_i64) - default: - g_assert_not_reached(); - } +static bool do_muladd(DisasContext *s, arg_rrrr *a, + bool sf, bool is_sub, MemOp mop) +{ + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 tcg_op1, tcg_op2; - gen_restore_rmode(tcg_rmode, tcg_fpstatus); + if (mop == MO_64) { + tcg_op1 = cpu_reg(s, a->rn); + tcg_op2 = cpu_reg(s, a->rm); + } else { + tcg_op1 = tcg_temp_new_i64(); + tcg_op2 = tcg_temp_new_i64(); + tcg_gen_ext_i64(tcg_op1, cpu_reg(s, a->rn), mop); + tcg_gen_ext_i64(tcg_op2, cpu_reg(s, a->rm), mop); } -} -/* Floating point <-> fixed point conversions - * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 - * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ - * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | - * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ - */ -static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int scale = extract32(insn, 10, 6); - int opcode = extract32(insn, 16, 3); - int rmode = extract32(insn, 19, 2); - int type = extract32(insn, 22, 2); - bool sbit = extract32(insn, 29, 1); - bool sf = extract32(insn, 31, 1); - bool itof; - - if (sbit || (!sf && scale < 32)) { - unallocated_encoding(s); - return; - } + if (a->ra == 31 && !is_sub) { + /* Special-case MADD with rA == XZR; it is the standard MUL alias */ + tcg_gen_mul_i64(tcg_rd, tcg_op1, tcg_op2); + } else { + TCGv_i64 tcg_tmp = tcg_temp_new_i64(); + TCGv_i64 tcg_ra = cpu_reg(s, a->ra); - switch (type) { - case 0: /* float32 */ - case 1: /* float64 */ - break; - case 3: /* float16 */ - if (dc_isar_feature(aa64_fp16, s)) { - break; + tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2); + if (is_sub) { + tcg_gen_sub_i64(tcg_rd, tcg_ra, tcg_tmp); + } else { + tcg_gen_add_i64(tcg_rd, tcg_ra, tcg_tmp); } - /* fallthru */ - default: - unallocated_encoding(s); - return; } - switch ((rmode << 3) | opcode) { - case 0x2: /* SCVTF */ - case 0x3: /* UCVTF */ - itof = true; - break; - case 0x18: /* FCVTZS */ - case 0x19: /* FCVTZU */ - itof = false; - break; - default: - unallocated_encoding(s); - return; + if (!sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + return true; +} - if (!fp_access_check(s)) { - return; - } +TRANS(MADD_w, do_muladd, a, false, false, MO_64) +TRANS(MSUB_w, do_muladd, a, false, true, MO_64) +TRANS(MADD_x, do_muladd, a, true, false, MO_64) +TRANS(MSUB_x, do_muladd, a, true, true, MO_64) - handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type); -} +TRANS(SMADDL, do_muladd, a, true, false, MO_SL) +TRANS(SMSUBL, do_muladd, a, true, true, MO_SL) +TRANS(UMADDL, do_muladd, a, true, false, MO_UL) +TRANS(UMSUBL, do_muladd, a, true, true, MO_UL) -static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) +static bool do_adc_sbc(DisasContext *s, arg_rrr_sf *a, + bool is_sub, bool setflags) { - /* FMOV: gpr to or from float, double, or top half of quad fp reg, - * without conversion. - */ + TCGv_i64 tcg_y, tcg_rn, tcg_rd; - if (itof) { - TCGv_i64 tcg_rn = cpu_reg(s, rn); - TCGv_i64 tmp; + tcg_rd = cpu_reg(s, a->rd); + tcg_rn = cpu_reg(s, a->rn); - switch (type) { - case 0: - /* 32 bit */ - tmp = tcg_temp_new_i64(); - tcg_gen_ext32u_i64(tmp, tcg_rn); - write_fp_dreg(s, rd, tmp); - break; - case 1: - /* 64 bit */ - write_fp_dreg(s, rd, tcg_rn); - break; - case 2: - /* 64 bit to top half. */ - tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, rd)); - clear_vec_high(s, true, rd); - break; - case 3: - /* 16 bit */ - tmp = tcg_temp_new_i64(); - tcg_gen_ext16u_i64(tmp, tcg_rn); - write_fp_dreg(s, rd, tmp); - break; - default: - g_assert_not_reached(); - } + if (is_sub) { + tcg_y = tcg_temp_new_i64(); + tcg_gen_not_i64(tcg_y, cpu_reg(s, a->rm)); } else { - TCGv_i64 tcg_rd = cpu_reg(s, rd); + tcg_y = cpu_reg(s, a->rm); + } - switch (type) { - case 0: - /* 32 bit */ - tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_32)); - break; - case 1: - /* 64 bit */ - tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_64)); - break; - case 2: - /* 64 bits from top half */ - tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, rn)); - break; - case 3: - /* 16 bit */ - tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_16)); - break; - default: - g_assert_not_reached(); - } + if (setflags) { + gen_adc_CC(a->sf, tcg_rd, tcg_rn, tcg_y); + } else { + gen_adc(a->sf, tcg_rd, tcg_rn, tcg_y); } + return true; } -static void handle_fjcvtzs(DisasContext *s, int rd, int rn) -{ - TCGv_i64 t = read_fp_dreg(s, rn); - TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR); - - gen_helper_fjcvtzs(t, t, fpstatus); +TRANS(ADC, do_adc_sbc, a, false, false) +TRANS(SBC, do_adc_sbc, a, true, false) +TRANS(ADCS, do_adc_sbc, a, false, true) +TRANS(SBCS, do_adc_sbc, a, true, true) - tcg_gen_ext32u_i64(cpu_reg(s, rd), t); - tcg_gen_extrh_i64_i32(cpu_ZF, t); - tcg_gen_movi_i32(cpu_CF, 0); - tcg_gen_movi_i32(cpu_NF, 0); - tcg_gen_movi_i32(cpu_VF, 0); -} +static bool trans_RMIF(DisasContext *s, arg_RMIF *a) +{ + int mask = a->mask; + TCGv_i64 tcg_rn; + TCGv_i32 nzcv; -/* Floating point <-> integer conversions - * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 - * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ - * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | - * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ - */ -static void disas_fp_int_conv(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int opcode = extract32(insn, 16, 3); - int rmode = extract32(insn, 19, 2); - int type = extract32(insn, 22, 2); - bool sbit = extract32(insn, 29, 1); - bool sf = extract32(insn, 31, 1); - bool itof = false; - - if (sbit) { - goto do_unallocated; - } - - switch (opcode) { - case 2: /* SCVTF */ - case 3: /* UCVTF */ - itof = true; - /* fallthru */ - case 4: /* FCVTAS */ - case 5: /* FCVTAU */ - if (rmode != 0) { - goto do_unallocated; - } - /* fallthru */ - case 0: /* FCVT[NPMZ]S */ - case 1: /* FCVT[NPMZ]U */ - switch (type) { - case 0: /* float32 */ - case 1: /* float64 */ - break; - case 3: /* float16 */ - if (!dc_isar_feature(aa64_fp16, s)) { - goto do_unallocated; - } - break; - default: - goto do_unallocated; - } - if (!fp_access_check(s)) { - return; - } - handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); - break; + if (!dc_isar_feature(aa64_condm_4, s)) { + return false; + } - default: - switch (sf << 7 | type << 5 | rmode << 3 | opcode) { - case 0b01100110: /* FMOV half <-> 32-bit int */ - case 0b01100111: - case 0b11100110: /* FMOV half <-> 64-bit int */ - case 0b11100111: - if (!dc_isar_feature(aa64_fp16, s)) { - goto do_unallocated; - } - /* fallthru */ - case 0b00000110: /* FMOV 32-bit */ - case 0b00000111: - case 0b10100110: /* FMOV 64-bit */ - case 0b10100111: - case 0b11001110: /* FMOV top half of 128-bit */ - case 0b11001111: - if (!fp_access_check(s)) { - return; - } - itof = opcode & 1; - handle_fmov(s, rd, rn, type, itof); - break; + tcg_rn = read_cpu_reg(s, a->rn, 1); + tcg_gen_rotri_i64(tcg_rn, tcg_rn, a->imm); - case 0b00111110: /* FJCVTZS */ - if (!dc_isar_feature(aa64_jscvt, s)) { - goto do_unallocated; - } else if (fp_access_check(s)) { - handle_fjcvtzs(s, rd, rn); - } - break; + nzcv = tcg_temp_new_i32(); + tcg_gen_extrl_i64_i32(nzcv, tcg_rn); - default: - do_unallocated: - unallocated_encoding(s); - return; - } - break; + if (mask & 8) { /* N */ + tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3); + } + if (mask & 4) { /* Z */ + tcg_gen_not_i32(cpu_ZF, nzcv); + tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4); + } + if (mask & 2) { /* C */ + tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1); } + if (mask & 1) { /* V */ + tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); + } + return true; } -/* FP-specific subcases of table C3-6 (SIMD and FP data processing) - * 31 30 29 28 25 24 0 - * +---+---+---+---------+-----------------------------+ - * | | 0 | | 1 1 1 1 | | - * +---+---+---+---------+-----------------------------+ - */ -static void disas_data_proc_fp(DisasContext *s, uint32_t insn) +static bool do_setf(DisasContext *s, int rn, int shift) { - if (extract32(insn, 24, 1)) { - unallocated_encoding(s); /* in decodetree */ - } else if (extract32(insn, 21, 1) == 0) { - /* Floating point to fixed point conversions */ - disas_fp_fixed_conv(s, insn); - } else { - switch (extract32(insn, 10, 2)) { - case 1: - /* Floating point conditional compare */ - disas_fp_ccomp(s, insn); - break; - case 2: - /* Floating point data-processing (2 source) */ - unallocated_encoding(s); /* in decodetree */ - break; - case 3: - /* Floating point conditional select */ - unallocated_encoding(s); /* in decodetree */ - break; - case 0: - switch (ctz32(extract32(insn, 12, 4))) { - case 0: /* [15:12] == xxx1 */ - /* Floating point immediate */ - disas_fp_imm(s, insn); - break; - case 1: /* [15:12] == xx10 */ - /* Floating point compare */ - disas_fp_compare(s, insn); - break; - case 2: /* [15:12] == x100 */ - /* Floating point data-processing (1 source) */ - disas_fp_1src(s, insn); - break; - case 3: /* [15:12] == 1000 */ - unallocated_encoding(s); - break; - default: /* [15:12] == 0000 */ - /* Floating point <-> integer conversions */ - disas_fp_int_conv(s, insn); - break; - } - break; - } - } + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn)); + tcg_gen_shli_i32(cpu_NF, tmp, shift); + tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); + tcg_gen_mov_i32(cpu_ZF, cpu_NF); + tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); + return true; } -static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, - int pos) +TRANS_FEAT(SETF8, aa64_condm_4, do_setf, a->rn, 24) +TRANS_FEAT(SETF16, aa64_condm_4, do_setf, a->rn, 16) + +/* CCMP, CCMN */ +static bool trans_CCMP(DisasContext *s, arg_CCMP *a) { - /* Extract 64 bits from the middle of two concatenated 64 bit - * vector register slices left:right. The extracted bits start - * at 'pos' bits into the right (least significant) side. - * We return the result in tcg_right, and guarantee not to - * trash tcg_left. - */ + TCGv_i32 tcg_t0 = tcg_temp_new_i32(); + TCGv_i32 tcg_t1 = tcg_temp_new_i32(); + TCGv_i32 tcg_t2 = tcg_temp_new_i32(); TCGv_i64 tcg_tmp = tcg_temp_new_i64(); - assert(pos > 0 && pos < 64); + TCGv_i64 tcg_rn, tcg_y; + DisasCompare c; + unsigned nzcv; + bool has_andc; - tcg_gen_shri_i64(tcg_right, tcg_right, pos); - tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos); - tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp); -} + /* Set T0 = !COND. */ + arm_test_cc(&c, a->cond); + tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); -/* EXT - * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 - * +---+---+-------------+-----+---+------+---+------+---+------+------+ - * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | - * +---+---+-------------+-----+---+------+---+------+---+------+------+ - */ -static void disas_simd_ext(DisasContext *s, uint32_t insn) -{ - int is_q = extract32(insn, 30, 1); - int op2 = extract32(insn, 22, 2); - int imm4 = extract32(insn, 11, 4); - int rm = extract32(insn, 16, 5); - int rn = extract32(insn, 5, 5); - int rd = extract32(insn, 0, 5); - int pos = imm4 << 3; - TCGv_i64 tcg_resl, tcg_resh; - - if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) { - unallocated_encoding(s); - return; + /* Load the arguments for the new comparison. */ + if (a->imm) { + tcg_y = tcg_constant_i64(a->y); + } else { + tcg_y = cpu_reg(s, a->y); } + tcg_rn = cpu_reg(s, a->rn); - if (!fp_access_check(s)) { - return; + /* Set the flags for the new comparison. */ + if (a->op) { + gen_sub_CC(a->sf, tcg_tmp, tcg_rn, tcg_y); + } else { + gen_add_CC(a->sf, tcg_tmp, tcg_rn, tcg_y); } - tcg_resh = tcg_temp_new_i64(); - tcg_resl = tcg_temp_new_i64(); - - /* Vd gets bits starting at pos bits into Vm:Vn. This is - * either extracting 128 bits from a 128:128 concatenation, or - * extracting 64 bits from a 64:64 concatenation. + /* + * If COND was false, force the flags to #nzcv. Compute two masks + * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). + * For tcg hosts that support ANDC, we can make do with just T1. + * In either case, allow the tcg optimizer to delete any unused mask. */ - if (!is_q) { - read_vec_element(s, tcg_resl, rn, 0, MO_64); - if (pos != 0) { - read_vec_element(s, tcg_resh, rm, 0, MO_64); - do_ext64(s, tcg_resh, tcg_resl, pos); - } - } else { - TCGv_i64 tcg_hh; - typedef struct { - int reg; - int elt; - } EltPosns; - EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} }; - EltPosns *elt = eltposns; + tcg_gen_neg_i32(tcg_t1, tcg_t0); + tcg_gen_subi_i32(tcg_t2, tcg_t0, 1); - if (pos >= 64) { - elt++; - pos -= 64; + nzcv = a->nzcv; + has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0); + if (nzcv & 8) { /* N */ + tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1); + } else { + if (has_andc) { + tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1); + } else { + tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2); } - - read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64); - elt++; - read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64); - elt++; - if (pos != 0) { - do_ext64(s, tcg_resh, tcg_resl, pos); - tcg_hh = tcg_temp_new_i64(); - read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); - do_ext64(s, tcg_hh, tcg_resh, pos); + } + if (nzcv & 4) { /* Z */ + if (has_andc) { + tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1); + } else { + tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2); } + } else { + tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0); } - - write_vec_element(s, tcg_resl, rd, 0, MO_64); - if (is_q) { - write_vec_element(s, tcg_resh, rd, 1, MO_64); + if (nzcv & 2) { /* C */ + tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0); + } else { + if (has_andc) { + tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1); + } else { + tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2); + } } - clear_vec_high(s, is_q, rd); + if (nzcv & 1) { /* V */ + tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1); + } else { + if (has_andc) { + tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1); + } else { + tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); + } + } + return true; } -/* TBL/TBX - * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 - * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ - * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | - * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ - */ -static void disas_simd_tb(DisasContext *s, uint32_t insn) +static bool trans_CSEL(DisasContext *s, arg_CSEL *a) { - int op2 = extract32(insn, 22, 2); - int is_q = extract32(insn, 30, 1); - int rm = extract32(insn, 16, 5); - int rn = extract32(insn, 5, 5); - int rd = extract32(insn, 0, 5); - int is_tbx = extract32(insn, 12, 1); - int len = (extract32(insn, 13, 2) + 1) * 16; - - if (op2 != 0) { - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + TCGv_i64 zero = tcg_constant_i64(0); + DisasCompare64 c; - tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), - vec_full_reg_offset(s, rm), tcg_env, - is_q ? 16 : 8, vec_full_reg_size(s), - (len << 6) | (is_tbx << 5) | rn, - gen_helper_simd_tblx); -} + a64_test_cc(&c, a->cond); -/* ZIP/UZP/TRN - * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 - * +---+---+-------------+------+---+------+---+------------------+------+ - * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | - * +---+---+-------------+------+---+------+---+------------------+------+ - */ -static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int rm = extract32(insn, 16, 5); - int size = extract32(insn, 22, 2); - /* opc field bits [1:0] indicate ZIP/UZP/TRN; - * bit 2 indicates 1 vs 2 variant of the insn. - */ - int opcode = extract32(insn, 12, 2); - bool part = extract32(insn, 14, 1); - bool is_q = extract32(insn, 30, 1); - int esize = 8 << size; - int i; - int datasize = is_q ? 128 : 64; - int elements = datasize / esize; - TCGv_i64 tcg_res[2], tcg_ele; + if (a->rn == 31 && a->rm == 31 && (a->else_inc ^ a->else_inv)) { + /* CSET & CSETM. */ + if (a->else_inv) { + tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond), + tcg_rd, c.value, zero); + } else { + tcg_gen_setcond_i64(tcg_invert_cond(c.cond), + tcg_rd, c.value, zero); + } + } else { + TCGv_i64 t_true = cpu_reg(s, a->rn); + TCGv_i64 t_false = read_cpu_reg(s, a->rm, 1); - if (opcode == 0 || (size == 3 && !is_q)) { - unallocated_encoding(s); - return; + if (a->else_inv && a->else_inc) { + tcg_gen_neg_i64(t_false, t_false); + } else if (a->else_inv) { + tcg_gen_not_i64(t_false, t_false); + } else if (a->else_inc) { + tcg_gen_addi_i64(t_false, t_false, 1); + } + tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); } - if (!fp_access_check(s)) { - return; + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } + return true; +} - tcg_res[0] = tcg_temp_new_i64(); - tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL; - tcg_ele = tcg_temp_new_i64(); - - for (i = 0; i < elements; i++) { - int o, w; +typedef struct FPScalar1Int { + void (*gen_h)(TCGv_i32, TCGv_i32); + void (*gen_s)(TCGv_i32, TCGv_i32); + void (*gen_d)(TCGv_i64, TCGv_i64); +} FPScalar1Int; - switch (opcode) { - case 1: /* UZP1/2 */ - { - int midpoint = elements / 2; - if (i < midpoint) { - read_vec_element(s, tcg_ele, rn, 2 * i + part, size); +static bool do_fp1_scalar_int(DisasContext *s, arg_rr_e *a, + const FPScalar1Int *f, + bool merging) +{ + switch (a->esz) { + case MO_64: + if (fp_access_check(s)) { + TCGv_i64 t = read_fp_dreg(s, a->rn); + f->gen_d(t, t); + if (merging) { + write_fp_dreg_merging(s, a->rd, a->rd, t); } else { - read_vec_element(s, tcg_ele, rm, - 2 * (i - midpoint) + part, size); + write_fp_dreg(s, a->rd, t); } - break; } - case 2: /* TRN1/2 */ - if (i & 1) { - read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size); + break; + case MO_32: + if (fp_access_check(s)) { + TCGv_i32 t = read_fp_sreg(s, a->rn); + f->gen_s(t, t); + if (merging) { + write_fp_sreg_merging(s, a->rd, a->rd, t); } else { - read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size); + write_fp_sreg(s, a->rd, t); } - break; - case 3: /* ZIP1/2 */ - { - int base = part * elements / 2; - if (i & 1) { - read_vec_element(s, tcg_ele, rm, base + (i >> 1), size); - } else { - read_vec_element(s, tcg_ele, rn, base + (i >> 1), size); - } - break; } - default: - g_assert_not_reached(); + break; + case MO_16: + if (!dc_isar_feature(aa64_fp16, s)) { + return false; } - - w = (i * esize) / 64; - o = (i * esize) % 64; - if (o == 0) { - tcg_gen_mov_i64(tcg_res[w], tcg_ele); - } else { - tcg_gen_shli_i64(tcg_ele, tcg_ele, o); - tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele); + if (fp_access_check(s)) { + TCGv_i32 t = read_fp_hreg(s, a->rn); + f->gen_h(t, t); + if (merging) { + write_fp_hreg_merging(s, a->rd, a->rd, t); + } else { + write_fp_sreg(s, a->rd, t); + } } + break; + default: + return false; } - - for (i = 0; i <= is_q; ++i) { - write_vec_element(s, tcg_res[i], rd, i, MO_64); - } - clear_vec_high(s, is_q, rd); + return true; } -/* - * do_reduction_op helper - * - * This mirrors the Reduce() pseudocode in the ARM ARM. It is - * important for correct NaN propagation that we do these - * operations in exactly the order specified by the pseudocode. - * - * This is a recursive function, TCG temps should be freed by the - * calling function once it is done with the values. - */ -static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, - int esize, int size, int vmap, TCGv_ptr fpst) +static bool do_fp1_scalar_int_2fn(DisasContext *s, arg_rr_e *a, + const FPScalar1Int *fnormal, + const FPScalar1Int *fah) { - if (esize == size) { - int element; - MemOp msize = esize == 16 ? MO_16 : MO_32; - TCGv_i32 tcg_elem; - - /* We should have one register left here */ - assert(ctpop8(vmap) == 1); - element = ctz32(vmap); - assert(element < 8); - - tcg_elem = tcg_temp_new_i32(); - read_vec_element_i32(s, tcg_elem, rn, element, msize); - return tcg_elem; - } else { - int bits = size / 2; - int shift = ctpop8(vmap) / 2; - int vmap_lo = (vmap >> shift) & vmap; - int vmap_hi = (vmap & ~vmap_lo); - TCGv_i32 tcg_hi, tcg_lo, tcg_res; - - tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); - tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); - tcg_res = tcg_temp_new_i32(); - - switch (fpopcode) { - case 0x0c: /* fmaxnmv half-precision */ - gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x0f: /* fmaxv half-precision */ - gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x1c: /* fminnmv half-precision */ - gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x1f: /* fminv half-precision */ - gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x2c: /* fmaxnmv */ - gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x2f: /* fmaxv */ - gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x3c: /* fminnmv */ - gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst); - break; - case 0x3f: /* fminv */ - gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst); - break; - default: - g_assert_not_reached(); - } - return tcg_res; - } + return do_fp1_scalar_int(s, a, s->fpcr_ah ? fah : fnormal, true); } -/* AdvSIMD across lanes - * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 - * +---+---+---+-----------+------+-----------+--------+-----+------+------+ - * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | - * +---+---+---+-----------+------+-----------+--------+-----+------+------+ - */ -static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int size = extract32(insn, 22, 2); - int opcode = extract32(insn, 12, 5); - bool is_q = extract32(insn, 30, 1); - bool is_u = extract32(insn, 29, 1); - bool is_fp = false; - bool is_min = false; - int esize; - int elements; - int i; - TCGv_i64 tcg_res, tcg_elt; - - switch (opcode) { - case 0x1b: /* ADDV */ - if (is_u) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x3: /* SADDLV, UADDLV */ - case 0xa: /* SMAXV, UMAXV */ - case 0x1a: /* SMINV, UMINV */ - if (size == 3 || (size == 2 && !is_q)) { - unallocated_encoding(s); - return; - } - break; - case 0xc: /* FMAXNMV, FMINNMV */ - case 0xf: /* FMAXV, FMINV */ - /* Bit 1 of size field encodes min vs max and the actual size - * depends on the encoding of the U bit. If not set (and FP16 - * enabled) then we do half-precision float instead of single - * precision. - */ - is_min = extract32(size, 1, 1); - is_fp = true; - if (!is_u && dc_isar_feature(aa64_fp16, s)) { - size = 1; - } else if (!is_u || !is_q || extract32(size, 0, 1)) { - unallocated_encoding(s); - return; - } else { - size = 2; - } - break; - default: - unallocated_encoding(s); - return; - } +static const FPScalar1Int f_scalar_fmov = { + tcg_gen_mov_i32, + tcg_gen_mov_i32, + tcg_gen_mov_i64, +}; +TRANS(FMOV_s, do_fp1_scalar_int, a, &f_scalar_fmov, false) - if (!fp_access_check(s)) { - return; - } +static const FPScalar1Int f_scalar_fabs = { + gen_vfp_absh, + gen_vfp_abss, + gen_vfp_absd, +}; +static const FPScalar1Int f_scalar_ah_fabs = { + gen_vfp_ah_absh, + gen_vfp_ah_abss, + gen_vfp_ah_absd, +}; +TRANS(FABS_s, do_fp1_scalar_int_2fn, a, &f_scalar_fabs, &f_scalar_ah_fabs) - esize = 8 << size; - elements = (is_q ? 128 : 64) / esize; +static const FPScalar1Int f_scalar_fneg = { + gen_vfp_negh, + gen_vfp_negs, + gen_vfp_negd, +}; +static const FPScalar1Int f_scalar_ah_fneg = { + gen_vfp_ah_negh, + gen_vfp_ah_negs, + gen_vfp_ah_negd, +}; +TRANS(FNEG_s, do_fp1_scalar_int_2fn, a, &f_scalar_fneg, &f_scalar_ah_fneg) - tcg_res = tcg_temp_new_i64(); - tcg_elt = tcg_temp_new_i64(); +typedef struct FPScalar1 { + void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_ptr); + void (*gen_s)(TCGv_i32, TCGv_i32, TCGv_ptr); + void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_ptr); +} FPScalar1; - /* These instructions operate across all lanes of a vector - * to produce a single result. We can guarantee that a 64 - * bit intermediate is sufficient: - * + for [US]ADDLV the maximum element size is 32 bits, and - * the result type is 64 bits - * + for FMAX*V, FMIN*V, ADDV the intermediate type is the - * same as the element size, which is 32 bits at most - * For the integer operations we can choose to work at 64 - * or 32 bits and truncate at the end; for simplicity - * we use 64 bits always. The floating point - * ops do require 32 bit intermediates, though. - */ - if (!is_fp) { - read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN)); - - for (i = 1; i < elements; i++) { - read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN)); - - switch (opcode) { - case 0x03: /* SADDLV / UADDLV */ - case 0x1b: /* ADDV */ - tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt); - break; - case 0x0a: /* SMAXV / UMAXV */ - if (is_u) { - tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt); - } else { - tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt); - } - break; - case 0x1a: /* SMINV / UMINV */ - if (is_u) { - tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt); - } else { - tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt); - } - break; - default: - g_assert_not_reached(); - } +static bool do_fp1_scalar_with_fpsttype(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode, + ARMFPStatusFlavour fpsttype) +{ + TCGv_i32 tcg_rmode = NULL; + TCGv_ptr fpst; + TCGv_i64 t64; + TCGv_i32 t32; + int check = fp_access_check_scalar_hsd(s, a->esz); - } - } else { - /* Floating point vector reduction ops which work across 32 - * bit (single) or 16 bit (half-precision) intermediates. - * Note that correct NaN propagation requires that we do these - * operations in exactly the order specified by the pseudocode. - */ - TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - int fpopcode = opcode | is_min << 4 | is_u << 5; - int vmap = (1 << elements) - 1; - TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, - (is_q ? 128 : 64), vmap, fpst); - tcg_gen_extu_i32_i64(tcg_res, tcg_res32); + if (check <= 0) { + return check == 0; } - /* Now truncate the result to the width required for the final output */ - if (opcode == 0x03) { - /* SADDLV, UADDLV: result is 2*esize */ - size++; + fpst = fpstatus_ptr(fpsttype); + if (rmode >= 0) { + tcg_rmode = gen_set_rmode(rmode, fpst); } - switch (size) { - case 0: - tcg_gen_ext8u_i64(tcg_res, tcg_res); - break; - case 1: - tcg_gen_ext16u_i64(tcg_res, tcg_res); + switch (a->esz) { + case MO_64: + t64 = read_fp_dreg(s, a->rn); + f->gen_d(t64, t64, fpst); + write_fp_dreg_merging(s, a->rd, a->rd, t64); break; - case 2: - tcg_gen_ext32u_i64(tcg_res, tcg_res); + case MO_32: + t32 = read_fp_sreg(s, a->rn); + f->gen_s(t32, t32, fpst); + write_fp_sreg_merging(s, a->rd, a->rd, t32); break; - case 3: + case MO_16: + t32 = read_fp_hreg(s, a->rn); + f->gen_h(t32, t32, fpst); + write_fp_hreg_merging(s, a->rd, a->rd, t32); break; default: g_assert_not_reached(); } - write_fp_dreg(s, rd, tcg_res); + if (rmode >= 0) { + gen_restore_rmode(tcg_rmode, fpst); + } + return true; +} + +static bool do_fp1_scalar(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode) +{ + return do_fp1_scalar_with_fpsttype(s, a, f, rmode, + a->esz == MO_16 ? + FPST_A64_F16 : FPST_A64); } -/* AdvSIMD modified immediate - * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 - * +---+---+----+---------------------+-----+-------+----+---+-------+------+ - * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | - * +---+---+----+---------------------+-----+-------+----+---+-------+------+ - * - * There are a number of operations that can be carried out here: - * MOVI - move (shifted) imm into register - * MVNI - move inverted (shifted) imm into register - * ORR - bitwise OR of (shifted) imm with register - * BIC - bitwise clear of (shifted) imm with register - * With ARMv8.2 we also have: - * FMOV half-precision - */ -static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int cmode = extract32(insn, 12, 4); - int o2 = extract32(insn, 11, 1); - uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); - bool is_neg = extract32(insn, 29, 1); - bool is_q = extract32(insn, 30, 1); - uint64_t imm = 0; - - if (o2) { - if (cmode != 0xf || is_neg) { - unallocated_encoding(s); - return; - } - /* FMOV (vector, immediate) - half-precision */ - if (!dc_isar_feature(aa64_fp16, s)) { - unallocated_encoding(s); - return; - } - imm = vfp_expand_imm(MO_16, abcdefgh); - /* now duplicate across the lanes */ - imm = dup_const(MO_16, imm); - } else { - if (cmode == 0xf && is_neg && !is_q) { - unallocated_encoding(s); - return; - } - imm = asimd_imm_const(abcdefgh, cmode, is_neg); - } +static bool do_fp1_scalar_ah(DisasContext *s, arg_rr_e *a, + const FPScalar1 *f, int rmode) +{ + return do_fp1_scalar_with_fpsttype(s, a, f, rmode, select_ah_fpst(s, a->esz)); +} - if (!fp_access_check(s)) { - return; +static const FPScalar1 f_scalar_fsqrt = { + gen_helper_vfp_sqrth, + gen_helper_vfp_sqrts, + gen_helper_vfp_sqrtd, +}; +TRANS(FSQRT_s, do_fp1_scalar, a, &f_scalar_fsqrt, -1) + +static const FPScalar1 f_scalar_frint = { + gen_helper_advsimd_rinth, + gen_helper_rints, + gen_helper_rintd, +}; +TRANS(FRINTN_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_TIEEVEN) +TRANS(FRINTP_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_POSINF) +TRANS(FRINTM_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_NEGINF) +TRANS(FRINTZ_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_ZERO) +TRANS(FRINTA_s, do_fp1_scalar, a, &f_scalar_frint, FPROUNDING_TIEAWAY) +TRANS(FRINTI_s, do_fp1_scalar, a, &f_scalar_frint, -1) + +static const FPScalar1 f_scalar_frintx = { + gen_helper_advsimd_rinth_exact, + gen_helper_rints_exact, + gen_helper_rintd_exact, +}; +TRANS(FRINTX_s, do_fp1_scalar, a, &f_scalar_frintx, -1) + +static bool trans_BFCVT_s(DisasContext *s, arg_rr_e *a) +{ + ARMFPStatusFlavour fpsttype = s->fpcr_ah ? FPST_AH : FPST_A64; + TCGv_i32 t32; + int check; + + if (!dc_isar_feature(aa64_bf16, s)) { + return false; } - if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { - /* MOVI or MVNI, with MVNI negation handled above. */ - tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8, - vec_full_reg_size(s), imm); - } else { - /* ORR or BIC, with BIC negation to AND handled above. */ - if (is_neg) { - gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64); - } else { - gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64); - } + check = fp_access_check_scalar_hsd(s, a->esz); + + if (check <= 0) { + return check == 0; } + + t32 = read_fp_sreg(s, a->rn); + gen_helper_bfcvt(t32, t32, fpstatus_ptr(fpsttype)); + write_fp_hreg_merging(s, a->rd, a->rd, t32); + return true; } -/* - * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate) - * - * This code is handles the common shifting code and is used by both - * the vector and scalar code. - */ -static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, - TCGv_i64 tcg_rnd, bool accumulate, - bool is_u, int size, int shift) -{ - bool extended_result = false; - bool round = tcg_rnd != NULL; - int ext_lshift = 0; - TCGv_i64 tcg_src_hi; - - if (round && size == 3) { - extended_result = true; - ext_lshift = 64 - shift; - tcg_src_hi = tcg_temp_new_i64(); - } else if (shift == 64) { - if (!accumulate && is_u) { - /* result is zero */ - tcg_gen_movi_i64(tcg_res, 0); - return; - } - } +static const FPScalar1 f_scalar_frint32 = { + NULL, + gen_helper_frint32_s, + gen_helper_frint32_d, +}; +TRANS_FEAT(FRINT32Z_s, aa64_frint, do_fp1_scalar, a, + &f_scalar_frint32, FPROUNDING_ZERO) +TRANS_FEAT(FRINT32X_s, aa64_frint, do_fp1_scalar, a, &f_scalar_frint32, -1) + +static const FPScalar1 f_scalar_frint64 = { + NULL, + gen_helper_frint64_s, + gen_helper_frint64_d, +}; +TRANS_FEAT(FRINT64Z_s, aa64_frint, do_fp1_scalar, a, + &f_scalar_frint64, FPROUNDING_ZERO) +TRANS_FEAT(FRINT64X_s, aa64_frint, do_fp1_scalar, a, &f_scalar_frint64, -1) + +static const FPScalar1 f_scalar_frecpe = { + gen_helper_recpe_f16, + gen_helper_recpe_f32, + gen_helper_recpe_f64, +}; +static const FPScalar1 f_scalar_frecpe_rpres = { + gen_helper_recpe_f16, + gen_helper_recpe_rpres_f32, + gen_helper_recpe_f64, +}; +TRANS(FRECPE_s, do_fp1_scalar_ah, a, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + &f_scalar_frecpe_rpres : &f_scalar_frecpe, -1) + +static const FPScalar1 f_scalar_frecpx = { + gen_helper_frecpx_f16, + gen_helper_frecpx_f32, + gen_helper_frecpx_f64, +}; +TRANS(FRECPX_s, do_fp1_scalar_ah, a, &f_scalar_frecpx, -1) - /* Deal with the rounding step */ - if (round) { - if (extended_result) { - TCGv_i64 tcg_zero = tcg_constant_i64(0); - if (!is_u) { - /* take care of sign extending tcg_res */ - tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63); - tcg_gen_add2_i64(tcg_src, tcg_src_hi, - tcg_src, tcg_src_hi, - tcg_rnd, tcg_zero); - } else { - tcg_gen_add2_i64(tcg_src, tcg_src_hi, - tcg_src, tcg_zero, - tcg_rnd, tcg_zero); - } - } else { - tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd); - } - } +static const FPScalar1 f_scalar_frsqrte = { + gen_helper_rsqrte_f16, + gen_helper_rsqrte_f32, + gen_helper_rsqrte_f64, +}; +static const FPScalar1 f_scalar_frsqrte_rpres = { + gen_helper_rsqrte_f16, + gen_helper_rsqrte_rpres_f32, + gen_helper_rsqrte_f64, +}; +TRANS(FRSQRTE_s, do_fp1_scalar_ah, a, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + &f_scalar_frsqrte_rpres : &f_scalar_frsqrte, -1) - /* Now do the shift right */ - if (round && extended_result) { - /* extended case, >64 bit precision required */ - if (ext_lshift == 0) { - /* special case, only high bits matter */ - tcg_gen_mov_i64(tcg_src, tcg_src_hi); - } else { - tcg_gen_shri_i64(tcg_src, tcg_src, shift); - tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift); - tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi); - } - } else { - if (is_u) { - if (shift == 64) { - /* essentially shifting in 64 zeros */ - tcg_gen_movi_i64(tcg_src, 0); - } else { - tcg_gen_shri_i64(tcg_src, tcg_src, shift); - } - } else { - if (shift == 64) { - /* effectively extending the sign-bit */ - tcg_gen_sari_i64(tcg_src, tcg_src, 63); - } else { - tcg_gen_sari_i64(tcg_src, tcg_src, shift); - } - } - } +static bool trans_FCVT_s_ds(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i32 tcg_rn = read_fp_sreg(s, a->rn); + TCGv_i64 tcg_rd = tcg_temp_new_i64(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); - if (accumulate) { - tcg_gen_add_i64(tcg_res, tcg_res, tcg_src); - } else { - tcg_gen_mov_i64(tcg_res, tcg_src); + gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, fpst); + write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd); } + return true; } -/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ -static void handle_scalar_simd_shri(DisasContext *s, - bool is_u, int immh, int immb, - int opcode, int rn, int rd) +static bool trans_FCVT_s_hs(DisasContext *s, arg_rr *a) { - const int size = 3; - int immhb = immh << 3 | immb; - int shift = 2 * (8 << size) - immhb; - bool accumulate = false; - bool round = false; - bool insert = false; - TCGv_i64 tcg_rn; - TCGv_i64 tcg_rd; - TCGv_i64 tcg_round; + if (fp_access_check(s)) { + TCGv_i32 tmp = read_fp_sreg(s, a->rn); + TCGv_i32 ahp = get_ahp_flag(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); - if (!extract32(immh, 3, 1)) { - unallocated_encoding(s); - return; + gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp); + /* write_fp_hreg_merging is OK here because top half of result is zero */ + write_fp_hreg_merging(s, a->rd, a->rd, tmp); } + return true; +} - if (!fp_access_check(s)) { - return; - } +static bool trans_FCVT_s_sd(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = read_fp_dreg(s, a->rn); + TCGv_i32 tcg_rd = tcg_temp_new_i32(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); - switch (opcode) { - case 0x02: /* SSRA / USRA (accumulate) */ - accumulate = true; - break; - case 0x04: /* SRSHR / URSHR (rounding) */ - round = true; - break; - case 0x06: /* SRSRA / URSRA (accum + rounding) */ - accumulate = round = true; - break; - case 0x08: /* SRI */ - insert = true; - break; + gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, fpst); + write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd); } + return true; +} - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); - } else { - tcg_round = NULL; +static bool trans_FCVT_s_hd(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = read_fp_dreg(s, a->rn); + TCGv_i32 tcg_rd = tcg_temp_new_i32(); + TCGv_i32 ahp = get_ahp_flag(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); + + gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); + /* write_fp_hreg_merging is OK here because top half of tcg_rd is zero */ + write_fp_hreg_merging(s, a->rd, a->rd, tcg_rd); } + return true; +} - tcg_rn = read_fp_dreg(s, rn); - tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); +static bool trans_FCVT_s_sh(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i32 tcg_rn = read_fp_hreg(s, a->rn); + TCGv_i32 tcg_rd = tcg_temp_new_i32(); + TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_A64_F16); + TCGv_i32 tcg_ahp = get_ahp_flag(); - if (insert) { - /* shift count same as element size is valid but does nothing; - * special case to avoid potential shift by 64. - */ - int esize = 8 << size; - if (shift != esize) { - tcg_gen_shri_i64(tcg_rn, tcg_rn, shift); - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); - } - } else { - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, - accumulate, is_u, size, shift); + gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); + write_fp_sreg_merging(s, a->rd, a->rd, tcg_rd); } - - write_fp_dreg(s, rd, tcg_rd); + return true; } -/* SHL/SLI - Scalar shift left */ -static void handle_scalar_simd_shli(DisasContext *s, bool insert, - int immh, int immb, int opcode, - int rn, int rd) +static bool trans_FCVT_s_dh(DisasContext *s, arg_rr *a) { - int size = 32 - clz32(immh) - 1; - int immhb = immh << 3 | immb; - int shift = immhb - (8 << size); - TCGv_i64 tcg_rn; - TCGv_i64 tcg_rd; + if (fp_access_check(s)) { + TCGv_i32 tcg_rn = read_fp_hreg(s, a->rn); + TCGv_i64 tcg_rd = tcg_temp_new_i64(); + TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_A64_F16); + TCGv_i32 tcg_ahp = get_ahp_flag(); - if (!extract32(immh, 3, 1)) { - unallocated_encoding(s); - return; + gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); + write_fp_dreg_merging(s, a->rd, a->rd, tcg_rd); } + return true; +} - if (!fp_access_check(s)) { - return; - } +static bool do_cvtf_scalar(DisasContext *s, MemOp esz, int rd, int shift, + TCGv_i64 tcg_int, bool is_signed) +{ + TCGv_ptr tcg_fpstatus; + TCGv_i32 tcg_shift, tcg_single; + TCGv_i64 tcg_double; - tcg_rn = read_fp_dreg(s, rn); - tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); + tcg_fpstatus = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + tcg_shift = tcg_constant_i32(shift); - if (insert) { - tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); - } else { - tcg_gen_shli_i64(tcg_rd, tcg_rn, shift); - } - - write_fp_dreg(s, rd, tcg_rd); -} - -/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with - * (signed/unsigned) narrowing */ -static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, - bool is_u_shift, bool is_u_narrow, - int immh, int immb, int opcode, - int rn, int rd) -{ - int immhb = immh << 3 | immb; - int size = 32 - clz32(immh) - 1; - int esize = 8 << size; - int shift = (2 * esize) - immhb; - int elements = is_scalar ? 1 : (64 / esize); - bool round = extract32(opcode, 0, 1); - MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); - TCGv_i64 tcg_rn, tcg_rd, tcg_round; - TCGv_i32 tcg_rd_narrowed; - TCGv_i64 tcg_final; - - static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = { - { gen_helper_neon_narrow_sat_s8, - gen_helper_neon_unarrow_sat8 }, - { gen_helper_neon_narrow_sat_s16, - gen_helper_neon_unarrow_sat16 }, - { gen_helper_neon_narrow_sat_s32, - gen_helper_neon_unarrow_sat32 }, - { NULL, NULL }, - }; - static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = { - gen_helper_neon_narrow_sat_u8, - gen_helper_neon_narrow_sat_u16, - gen_helper_neon_narrow_sat_u32, - NULL - }; - NeonGenNarrowEnvFn *narrowfn; + switch (esz) { + case MO_64: + tcg_double = tcg_temp_new_i64(); + if (is_signed) { + gen_helper_vfp_sqtod(tcg_double, tcg_int, tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtod(tcg_double, tcg_int, tcg_shift, tcg_fpstatus); + } + write_fp_dreg_merging(s, rd, rd, tcg_double); + break; - int i; + case MO_32: + tcg_single = tcg_temp_new_i32(); + if (is_signed) { + gen_helper_vfp_sqtos(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtos(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); + } + write_fp_sreg_merging(s, rd, rd, tcg_single); + break; - assert(size < 4); + case MO_16: + tcg_single = tcg_temp_new_i32(); + if (is_signed) { + gen_helper_vfp_sqtoh(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtoh(tcg_single, tcg_int, tcg_shift, tcg_fpstatus); + } + write_fp_hreg_merging(s, rd, rd, tcg_single); + break; - if (extract32(immh, 3, 1)) { - unallocated_encoding(s); - return; + default: + g_assert_not_reached(); } + return true; +} - if (!fp_access_check(s)) { - return; - } +static bool do_cvtf_g(DisasContext *s, arg_fcvt *a, bool is_signed) +{ + TCGv_i64 tcg_int; + int check = fp_access_check_scalar_hsd(s, a->esz); - if (is_u_shift) { - narrowfn = unsigned_narrow_fns[size]; - } else { - narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0]; + if (check <= 0) { + return check == 0; } - tcg_rn = tcg_temp_new_i64(); - tcg_rd = tcg_temp_new_i64(); - tcg_rd_narrowed = tcg_temp_new_i32(); - tcg_final = tcg_temp_new_i64(); - - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); + if (a->sf) { + tcg_int = cpu_reg(s, a->rn); } else { - tcg_round = NULL; - } - - for (i = 0; i < elements; i++) { - read_vec_element(s, tcg_rn, rn, i, ldop); - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, - false, is_u_shift, size+1, shift); - narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd); - tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); - if (i == 0) { - tcg_gen_extract_i64(tcg_final, tcg_rd, 0, esize); + tcg_int = read_cpu_reg(s, a->rn, true); + if (is_signed) { + tcg_gen_ext32s_i64(tcg_int, tcg_int); } else { - tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); + tcg_gen_ext32u_i64(tcg_int, tcg_int); } } + return do_cvtf_scalar(s, a->esz, a->rd, a->shift, tcg_int, is_signed); +} - if (!is_q) { - write_vec_element(s, tcg_final, rd, 0, MO_64); - } else { - write_vec_element(s, tcg_final, rd, 1, MO_64); +TRANS(SCVTF_g, do_cvtf_g, a, true) +TRANS(UCVTF_g, do_cvtf_g, a, false) + +/* + * [US]CVTF (vector), scalar version. + * Which sounds weird, but really just means input from fp register + * instead of input from general register. Input and output element + * size are always equal. + */ +static bool do_cvtf_f(DisasContext *s, arg_fcvt *a, bool is_signed) +{ + TCGv_i64 tcg_int; + int check = fp_access_check_scalar_hsd(s, a->esz); + + if (check <= 0) { + return check == 0; } - clear_vec_high(s, is_q, rd); + + tcg_int = tcg_temp_new_i64(); + read_vec_element(s, tcg_int, a->rn, 0, a->esz | (is_signed ? MO_SIGN : 0)); + return do_cvtf_scalar(s, a->esz, a->rd, a->shift, tcg_int, is_signed); } -/* SQSHLU, UQSHL, SQSHL: saturating left shifts */ -static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, - bool src_unsigned, bool dst_unsigned, - int immh, int immb, int rn, int rd) +TRANS(SCVTF_f, do_cvtf_f, a, true) +TRANS(UCVTF_f, do_cvtf_f, a, false) + +static void do_fcvt_scalar(DisasContext *s, MemOp out, MemOp esz, + TCGv_i64 tcg_out, int shift, int rn, + ARMFPRounding rmode) { - int immhb = immh << 3 | immb; - int size = 32 - clz32(immh) - 1; - int shift = immhb - (8 << size); - int pass; + TCGv_ptr tcg_fpstatus; + TCGv_i32 tcg_shift, tcg_rmode, tcg_single; - assert(immh != 0); - assert(!(scalar && is_q)); + tcg_fpstatus = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + tcg_shift = tcg_constant_i32(shift); + tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); - if (!scalar) { - if (!is_q && extract32(immh, 3, 1)) { - unallocated_encoding(s); - return; + switch (esz) { + case MO_64: + read_vec_element(s, tcg_out, rn, 0, MO_64); + switch (out) { + case MO_64 | MO_SIGN: + gen_helper_vfp_tosqd(tcg_out, tcg_out, tcg_shift, tcg_fpstatus); + break; + case MO_64: + gen_helper_vfp_touqd(tcg_out, tcg_out, tcg_shift, tcg_fpstatus); + break; + case MO_32 | MO_SIGN: + gen_helper_vfp_tosld(tcg_out, tcg_out, tcg_shift, tcg_fpstatus); + break; + case MO_32: + gen_helper_vfp_tould(tcg_out, tcg_out, tcg_shift, tcg_fpstatus); + break; + default: + g_assert_not_reached(); } + break; - /* Since we use the variable-shift helpers we must - * replicate the shift count into each element of - * the tcg_shift value. - */ - switch (size) { - case 0: - shift |= shift << 8; - /* fall through */ - case 1: - shift |= shift << 16; + case MO_32: + tcg_single = read_fp_sreg(s, rn); + switch (out) { + case MO_64 | MO_SIGN: + gen_helper_vfp_tosqs(tcg_out, tcg_single, tcg_shift, tcg_fpstatus); break; - case 2: - case 3: + case MO_64: + gen_helper_vfp_touqs(tcg_out, tcg_single, tcg_shift, tcg_fpstatus); + break; + case MO_32 | MO_SIGN: + gen_helper_vfp_tosls(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); + break; + case MO_32: + gen_helper_vfp_touls(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); break; default: g_assert_not_reached(); } - } + break; - if (!fp_access_check(s)) { - return; + case MO_16: + tcg_single = read_fp_hreg(s, rn); + switch (out) { + case MO_64 | MO_SIGN: + gen_helper_vfp_tosqh(tcg_out, tcg_single, tcg_shift, tcg_fpstatus); + break; + case MO_64: + gen_helper_vfp_touqh(tcg_out, tcg_single, tcg_shift, tcg_fpstatus); + break; + case MO_32 | MO_SIGN: + gen_helper_vfp_toslh(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); + break; + case MO_32: + gen_helper_vfp_toulh(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); + break; + case MO_16 | MO_SIGN: + gen_helper_vfp_toshh(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); + break; + case MO_16: + gen_helper_vfp_touhh(tcg_single, tcg_single, + tcg_shift, tcg_fpstatus); + tcg_gen_extu_i32_i64(tcg_out, tcg_single); + break; + default: + g_assert_not_reached(); + } + break; + + default: + g_assert_not_reached(); } - if (size == 3) { - TCGv_i64 tcg_shift = tcg_constant_i64(shift); - static NeonGenTwo64OpEnvFn * const fns[2][2] = { - { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, - { NULL, gen_helper_neon_qshl_u64 }, - }; - NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned]; - int maxpass = is_q ? 2 : 1; + gen_restore_rmode(tcg_rmode, tcg_fpstatus); +} - for (pass = 0; pass < maxpass; pass++) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); +static bool do_fcvt_g(DisasContext *s, arg_fcvt *a, + ARMFPRounding rmode, bool is_signed) +{ + TCGv_i64 tcg_int; + int check = fp_access_check_scalar_hsd(s, a->esz); - read_vec_element(s, tcg_op, rn, pass, MO_64); - genfn(tcg_op, tcg_env, tcg_op, tcg_shift); - write_vec_element(s, tcg_op, rd, pass, MO_64); - } - clear_vec_high(s, is_q, rd); - } else { - TCGv_i32 tcg_shift = tcg_constant_i32(shift); - static NeonGenTwoOpEnvFn * const fns[2][2][3] = { - { - { gen_helper_neon_qshl_s8, - gen_helper_neon_qshl_s16, - gen_helper_neon_qshl_s32 }, - { gen_helper_neon_qshlu_s8, - gen_helper_neon_qshlu_s16, - gen_helper_neon_qshlu_s32 } - }, { - { NULL, NULL, NULL }, - { gen_helper_neon_qshl_u8, - gen_helper_neon_qshl_u16, - gen_helper_neon_qshl_u32 } - } - }; - NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; - MemOp memop = scalar ? size : MO_32; - int maxpass = scalar ? 1 : is_q ? 4 : 2; - - for (pass = 0; pass < maxpass; pass++) { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_op, rn, pass, memop); - genfn(tcg_op, tcg_env, tcg_op, tcg_shift); - if (scalar) { - switch (size) { - case 0: - tcg_gen_ext8u_i32(tcg_op, tcg_op); - break; - case 1: - tcg_gen_ext16u_i32(tcg_op, tcg_op); - break; - case 2: - break; - default: - g_assert_not_reached(); - } - write_fp_sreg(s, rd, tcg_op); - } else { - write_vec_element_i32(s, tcg_op, rd, pass, MO_32); - } - } + if (check <= 0) { + return check == 0; + } - if (!scalar) { - clear_vec_high(s, is_q, rd); - } + tcg_int = cpu_reg(s, a->rd); + do_fcvt_scalar(s, (a->sf ? MO_64 : MO_32) | (is_signed ? MO_SIGN : 0), + a->esz, tcg_int, a->shift, a->rn, rmode); + + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_int, tcg_int); } + return true; } -/* Common vector code for handling integer to FP conversion */ -static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, - int elements, int is_signed, - int fracbits, int size) -{ - TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - TCGv_i32 tcg_shift = NULL; +TRANS(FCVTNS_g, do_fcvt_g, a, FPROUNDING_TIEEVEN, true) +TRANS(FCVTNU_g, do_fcvt_g, a, FPROUNDING_TIEEVEN, false) +TRANS(FCVTPS_g, do_fcvt_g, a, FPROUNDING_POSINF, true) +TRANS(FCVTPU_g, do_fcvt_g, a, FPROUNDING_POSINF, false) +TRANS(FCVTMS_g, do_fcvt_g, a, FPROUNDING_NEGINF, true) +TRANS(FCVTMU_g, do_fcvt_g, a, FPROUNDING_NEGINF, false) +TRANS(FCVTZS_g, do_fcvt_g, a, FPROUNDING_ZERO, true) +TRANS(FCVTZU_g, do_fcvt_g, a, FPROUNDING_ZERO, false) +TRANS(FCVTAS_g, do_fcvt_g, a, FPROUNDING_TIEAWAY, true) +TRANS(FCVTAU_g, do_fcvt_g, a, FPROUNDING_TIEAWAY, false) - MemOp mop = size | (is_signed ? MO_SIGN : 0); - int pass; +/* + * FCVT* (vector), scalar version. + * Which sounds weird, but really just means output to fp register + * instead of output to general register. Input and output element + * size are always equal. + */ +static bool do_fcvt_f(DisasContext *s, arg_fcvt *a, + ARMFPRounding rmode, bool is_signed) +{ + TCGv_i64 tcg_int; + int check = fp_access_check_scalar_hsd(s, a->esz); - if (fracbits || size == MO_64) { - tcg_shift = tcg_constant_i32(fracbits); + if (check <= 0) { + return check == 0; } - if (size == MO_64) { - TCGv_i64 tcg_int64 = tcg_temp_new_i64(); - TCGv_i64 tcg_double = tcg_temp_new_i64(); + tcg_int = tcg_temp_new_i64(); + do_fcvt_scalar(s, a->esz | (is_signed ? MO_SIGN : 0), + a->esz, tcg_int, a->shift, a->rn, rmode); - for (pass = 0; pass < elements; pass++) { - read_vec_element(s, tcg_int64, rn, pass, mop); + if (!s->fpcr_nep) { + clear_vec(s, a->rd); + } + write_vec_element(s, tcg_int, a->rd, 0, a->esz); + return true; +} - if (is_signed) { - gen_helper_vfp_sqtod(tcg_double, tcg_int64, - tcg_shift, tcg_fpst); - } else { - gen_helper_vfp_uqtod(tcg_double, tcg_int64, - tcg_shift, tcg_fpst); - } - if (elements == 1) { - write_fp_dreg(s, rd, tcg_double); - } else { - write_vec_element(s, tcg_double, rd, pass, MO_64); - } - } - } else { - TCGv_i32 tcg_int32 = tcg_temp_new_i32(); - TCGv_i32 tcg_float = tcg_temp_new_i32(); - - for (pass = 0; pass < elements; pass++) { - read_vec_element_i32(s, tcg_int32, rn, pass, mop); - - switch (size) { - case MO_32: - if (fracbits) { - if (is_signed) { - gen_helper_vfp_sltos(tcg_float, tcg_int32, - tcg_shift, tcg_fpst); - } else { - gen_helper_vfp_ultos(tcg_float, tcg_int32, - tcg_shift, tcg_fpst); - } - } else { - if (is_signed) { - gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst); - } else { - gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst); - } - } - break; - case MO_16: - if (fracbits) { - if (is_signed) { - gen_helper_vfp_sltoh(tcg_float, tcg_int32, - tcg_shift, tcg_fpst); - } else { - gen_helper_vfp_ultoh(tcg_float, tcg_int32, - tcg_shift, tcg_fpst); - } - } else { - if (is_signed) { - gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst); - } else { - gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst); - } - } - break; - default: - g_assert_not_reached(); - } +TRANS(FCVTNS_f, do_fcvt_f, a, FPROUNDING_TIEEVEN, true) +TRANS(FCVTNU_f, do_fcvt_f, a, FPROUNDING_TIEEVEN, false) +TRANS(FCVTPS_f, do_fcvt_f, a, FPROUNDING_POSINF, true) +TRANS(FCVTPU_f, do_fcvt_f, a, FPROUNDING_POSINF, false) +TRANS(FCVTMS_f, do_fcvt_f, a, FPROUNDING_NEGINF, true) +TRANS(FCVTMU_f, do_fcvt_f, a, FPROUNDING_NEGINF, false) +TRANS(FCVTZS_f, do_fcvt_f, a, FPROUNDING_ZERO, true) +TRANS(FCVTZU_f, do_fcvt_f, a, FPROUNDING_ZERO, false) +TRANS(FCVTAS_f, do_fcvt_f, a, FPROUNDING_TIEAWAY, true) +TRANS(FCVTAU_f, do_fcvt_f, a, FPROUNDING_TIEAWAY, false) - if (elements == 1) { - write_fp_sreg(s, rd, tcg_float); - } else { - write_vec_element_i32(s, tcg_float, rd, pass, size); - } - } +static bool trans_FJCVTZS(DisasContext *s, arg_FJCVTZS *a) +{ + if (!dc_isar_feature(aa64_jscvt, s)) { + return false; } + if (fp_access_check(s)) { + TCGv_i64 t = read_fp_dreg(s, a->rn); + TCGv_ptr fpstatus = fpstatus_ptr(FPST_A64); - clear_vec_high(s, elements << size == 16, rd); + gen_helper_fjcvtzs(t, t, fpstatus); + + tcg_gen_ext32u_i64(cpu_reg(s, a->rd), t); + tcg_gen_extrh_i64_i32(cpu_ZF, t); + tcg_gen_movi_i32(cpu_CF, 0); + tcg_gen_movi_i32(cpu_NF, 0); + tcg_gen_movi_i32(cpu_VF, 0); + } + return true; } -/* UCVTF/SCVTF - Integer to FP conversion */ -static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, - bool is_q, bool is_u, - int immh, int immb, int opcode, - int rn, int rd) +static bool trans_FMOV_hx(DisasContext *s, arg_rr *a) { - int size, elements, fracbits; - int immhb = immh << 3 | immb; + if (!dc_isar_feature(aa64_fp16, s)) { + return false; + } + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_ext16u_i64(tmp, tcg_rn); + write_fp_dreg(s, a->rd, tmp); + } + return true; +} - if (immh & 8) { - size = MO_64; - if (!is_scalar && !is_q) { - unallocated_encoding(s); - return; - } - } else if (immh & 4) { - size = MO_32; - } else if (immh & 2) { - size = MO_16; - if (!dc_isar_feature(aa64_fp16, s)) { - unallocated_encoding(s); - return; - } - } else { - /* immh == 0 would be a failure of the decode logic */ - g_assert(immh == 1); - unallocated_encoding(s); - return; +static bool trans_FMOV_sw(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + TCGv_i64 tmp = tcg_temp_new_i64(); + tcg_gen_ext32u_i64(tmp, tcg_rn); + write_fp_dreg(s, a->rd, tmp); } + return true; +} - if (is_scalar) { - elements = 1; - } else { - elements = (8 << is_q) >> size; +static bool trans_FMOV_dx(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + write_fp_dreg(s, a->rd, tcg_rn); } - fracbits = (16 << size) - immhb; + return true; +} - if (!fp_access_check(s)) { - return; +static bool trans_FMOV_ux(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, a->rd)); + clear_vec_high(s, true, a->rd); } + return true; +} - handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); +static bool trans_FMOV_xh(DisasContext *s, arg_rr *a) +{ + if (!dc_isar_feature(aa64_fp16, s)) { + return false; + } + if (fp_access_check(s)) { + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_16)); + } + return true; } -/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */ -static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, - bool is_q, bool is_u, - int immh, int immb, int rn, int rd) +static bool trans_FMOV_ws(DisasContext *s, arg_rr *a) { - int immhb = immh << 3 | immb; - int pass, size, fracbits; - TCGv_ptr tcg_fpstatus; - TCGv_i32 tcg_rmode, tcg_shift; + if (fp_access_check(s)) { + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_32)); + } + return true; +} - if (immh & 0x8) { - size = MO_64; - if (!is_scalar && !is_q) { - unallocated_encoding(s); - return; - } - } else if (immh & 0x4) { - size = MO_32; - } else if (immh & 0x2) { - size = MO_16; - if (!dc_isar_feature(aa64_fp16, s)) { - unallocated_encoding(s); - return; - } - } else { - /* Should have split out AdvSIMD modified immediate earlier. */ - assert(immh == 1); - unallocated_encoding(s); - return; +static bool trans_FMOV_xd(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, a->rn, MO_64)); } + return true; +} - if (!fp_access_check(s)) { - return; +static bool trans_FMOV_xu(DisasContext *s, arg_rr *a) +{ + if (fp_access_check(s)) { + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, a->rn)); } + return true; +} - assert(!(is_scalar && is_q)); +typedef struct ENVScalar1 { + NeonGenOneOpEnvFn *gen_bhs[3]; + NeonGenOne64OpEnvFn *gen_d; +} ENVScalar1; - tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus); - fracbits = (16 << size) - immhb; - tcg_shift = tcg_constant_i32(fracbits); +static bool do_env_scalar1(DisasContext *s, arg_rr_e *a, const ENVScalar1 *f) +{ + if (!fp_access_check(s)) { + return true; + } + if (a->esz == MO_64) { + TCGv_i64 t = read_fp_dreg(s, a->rn); + f->gen_d(t, tcg_env, t); + write_fp_dreg(s, a->rd, t); + } else { + TCGv_i32 t = tcg_temp_new_i32(); - if (size == MO_64) { - int maxpass = is_scalar ? 1 : 2; + read_vec_element_i32(s, t, a->rn, 0, a->esz); + f->gen_bhs[a->esz](t, tcg_env, t); + write_fp_sreg(s, a->rd, t); + } + return true; +} - for (pass = 0; pass < maxpass; pass++) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); +static bool do_env_vector1(DisasContext *s, arg_qrr_e *a, const ENVScalar1 *f) +{ + if (a->esz == MO_64 && !a->q) { + return false; + } + if (!fp_access_check(s)) { + return true; + } + if (a->esz == MO_64) { + TCGv_i64 t = tcg_temp_new_i64(); - read_vec_element(s, tcg_op, rn, pass, MO_64); - if (is_u) { - gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); - } else { - gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); - } - write_vec_element(s, tcg_op, rd, pass, MO_64); + for (int i = 0; i < 2; ++i) { + read_vec_element(s, t, a->rn, i, MO_64); + f->gen_d(t, tcg_env, t); + write_vec_element(s, t, a->rd, i, MO_64); } - clear_vec_high(s, is_q, rd); } else { - void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); - int maxpass = is_scalar ? 1 : ((8 << is_q) >> size); + TCGv_i32 t = tcg_temp_new_i32(); + int n = (a->q ? 16 : 8) >> a->esz; - switch (size) { - case MO_16: - if (is_u) { - fn = gen_helper_vfp_touhh; - } else { - fn = gen_helper_vfp_toshh; - } - break; - case MO_32: - if (is_u) { - fn = gen_helper_vfp_touls; - } else { - fn = gen_helper_vfp_tosls; - } - break; - default: - g_assert_not_reached(); + for (int i = 0; i < n; ++i) { + read_vec_element_i32(s, t, a->rn, i, a->esz); + f->gen_bhs[a->esz](t, tcg_env, t); + write_vec_element_i32(s, t, a->rd, i, a->esz); } + } + clear_vec_high(s, a->q, a->rd); + return true; +} - for (pass = 0; pass < maxpass; pass++) { - TCGv_i32 tcg_op = tcg_temp_new_i32(); +static const ENVScalar1 f_scalar_sqabs = { + { gen_helper_neon_qabs_s8, + gen_helper_neon_qabs_s16, + gen_helper_neon_qabs_s32 }, + gen_helper_neon_qabs_s64, +}; +TRANS(SQABS_s, do_env_scalar1, a, &f_scalar_sqabs) +TRANS(SQABS_v, do_env_vector1, a, &f_scalar_sqabs) + +static const ENVScalar1 f_scalar_sqneg = { + { gen_helper_neon_qneg_s8, + gen_helper_neon_qneg_s16, + gen_helper_neon_qneg_s32 }, + gen_helper_neon_qneg_s64, +}; +TRANS(SQNEG_s, do_env_scalar1, a, &f_scalar_sqneg) +TRANS(SQNEG_v, do_env_vector1, a, &f_scalar_sqneg) - read_vec_element_i32(s, tcg_op, rn, pass, size); - fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); - if (is_scalar) { - if (size == MO_16 && !is_u) { - tcg_gen_ext16u_i32(tcg_op, tcg_op); - } - write_fp_sreg(s, rd, tcg_op); - } else { - write_vec_element_i32(s, tcg_op, rd, pass, size); - } - } - if (!is_scalar) { - clear_vec_high(s, is_q, rd); - } +static bool do_scalar1_d(DisasContext *s, arg_rr *a, ArithOneOp *f) +{ + if (fp_access_check(s)) { + TCGv_i64 t = read_fp_dreg(s, a->rn); + f(t, t); + write_fp_dreg(s, a->rd, t); } - - gen_restore_rmode(tcg_rmode, tcg_fpstatus); + return true; } -/* AdvSIMD scalar shift by immediate - * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 - * +-----+---+-------------+------+------+--------+---+------+------+ - * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | - * +-----+---+-------------+------+------+--------+---+------+------+ - * - * This is the scalar version so it works on a fixed sized registers - */ -static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) +TRANS(ABS_s, do_scalar1_d, a, tcg_gen_abs_i64) +TRANS(NEG_s, do_scalar1_d, a, tcg_gen_neg_i64) + +static bool do_cmop0_d(DisasContext *s, arg_rr *a, TCGCond cond) { - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int opcode = extract32(insn, 11, 5); - int immb = extract32(insn, 16, 3); - int immh = extract32(insn, 19, 4); - bool is_u = extract32(insn, 29, 1); + if (fp_access_check(s)) { + TCGv_i64 t = read_fp_dreg(s, a->rn); + tcg_gen_negsetcond_i64(cond, t, t, tcg_constant_i64(0)); + write_fp_dreg(s, a->rd, t); + } + return true; +} - if (immh == 0) { - unallocated_encoding(s); - return; +TRANS(CMGT0_s, do_cmop0_d, a, TCG_COND_GT) +TRANS(CMGE0_s, do_cmop0_d, a, TCG_COND_GE) +TRANS(CMLE0_s, do_cmop0_d, a, TCG_COND_LE) +TRANS(CMLT0_s, do_cmop0_d, a, TCG_COND_LT) +TRANS(CMEQ0_s, do_cmop0_d, a, TCG_COND_EQ) + +static bool do_2misc_narrow_scalar(DisasContext *s, arg_rr_e *a, + ArithOneOp * const fn[3]) +{ + if (a->esz == MO_64) { + return false; } + if (fp_access_check(s)) { + TCGv_i64 t = tcg_temp_new_i64(); - switch (opcode) { - case 0x08: /* SRI */ - if (!is_u) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x00: /* SSHR / USHR */ - case 0x02: /* SSRA / USRA */ - case 0x04: /* SRSHR / URSHR */ - case 0x06: /* SRSRA / URSRA */ - handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd); - break; - case 0x0a: /* SHL / SLI */ - handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd); - break; - case 0x1c: /* SCVTF, UCVTF */ - handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, - opcode, rn, rd); - break; - case 0x10: /* SQSHRUN, SQSHRUN2 */ - case 0x11: /* SQRSHRUN, SQRSHRUN2 */ - if (!is_u) { - unallocated_encoding(s); - return; - } - handle_vec_simd_sqshrn(s, true, false, false, true, - immh, immb, opcode, rn, rd); - break; - case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */ - case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */ - handle_vec_simd_sqshrn(s, true, false, is_u, is_u, - immh, immb, opcode, rn, rd); - break; - case 0xc: /* SQSHLU */ - if (!is_u) { - unallocated_encoding(s); - return; - } - handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd); - break; - case 0xe: /* SQSHL, UQSHL */ - handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd); - break; - case 0x1f: /* FCVTZS, FCVTZU */ - handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); - break; - default: - unallocated_encoding(s); - break; + read_vec_element(s, t, a->rn, 0, a->esz + 1); + fn[a->esz](t, t); + clear_vec(s, a->rd); + write_vec_element(s, t, a->rd, 0, a->esz); } + return true; } -static void handle_2misc_64(DisasContext *s, int opcode, bool u, - TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, - TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) -{ - /* Handle 64->64 opcodes which are shared between the scalar and - * vector 2-reg-misc groups. We cover every integer opcode where size == 3 - * is valid in either group and also the double-precision fp ops. - * The caller only need provide tcg_rmode and tcg_fpstatus if the op - * requires them. - */ - TCGCond cond; +#define WRAP_ENV(NAME) \ + static void gen_##NAME(TCGv_i64 d, TCGv_i64 n) \ + { gen_helper_##NAME(d, tcg_env, n); } - switch (opcode) { - case 0x4: /* CLS, CLZ */ - if (u) { - tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); - } else { - tcg_gen_clrsb_i64(tcg_rd, tcg_rn); - } - break; - case 0x5: /* NOT */ - /* This opcode is shared with CNT and RBIT but we have earlier - * enforced that size == 3 if and only if this is the NOT insn. - */ - tcg_gen_not_i64(tcg_rd, tcg_rn); - break; - case 0x7: /* SQABS, SQNEG */ - if (u) { - gen_helper_neon_qneg_s64(tcg_rd, tcg_env, tcg_rn); - } else { - gen_helper_neon_qabs_s64(tcg_rd, tcg_env, tcg_rn); - } - break; - case 0xa: /* CMLT */ - cond = TCG_COND_LT; - do_cmop: - /* 64 bit integer comparison against zero, result is test ? -1 : 0. */ - tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0)); - break; - case 0x8: /* CMGT, CMGE */ - cond = u ? TCG_COND_GE : TCG_COND_GT; - goto do_cmop; - case 0x9: /* CMEQ, CMLE */ - cond = u ? TCG_COND_LE : TCG_COND_EQ; - goto do_cmop; - case 0xb: /* ABS, NEG */ - if (u) { - tcg_gen_neg_i64(tcg_rd, tcg_rn); - } else { - tcg_gen_abs_i64(tcg_rd, tcg_rn); - } - break; - case 0x2f: /* FABS */ - gen_vfp_absd(tcg_rd, tcg_rn); - break; - case 0x6f: /* FNEG */ - gen_vfp_negd(tcg_rd, tcg_rn); - break; - case 0x7f: /* FSQRT */ - gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, tcg_env); - break; - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x1c: /* FCVTAS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); - break; - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x5c: /* FCVTAU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); - break; - case 0x18: /* FRINTN */ - case 0x19: /* FRINTM */ - case 0x38: /* FRINTP */ - case 0x39: /* FRINTZ */ - case 0x58: /* FRINTA */ - case 0x79: /* FRINTI */ - gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus); - break; - case 0x59: /* FRINTX */ - gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus); - break; - case 0x1e: /* FRINT32Z */ - case 0x5e: /* FRINT32X */ - gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus); - break; - case 0x1f: /* FRINT64Z */ - case 0x5f: /* FRINT64X */ - gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus); - break; - default: - g_assert_not_reached(); - } -} - -static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, - bool is_scalar, bool is_u, bool is_q, - int size, int rn, int rd) -{ - bool is_double = (size == MO_64); - TCGv_ptr fpst; - - if (!fp_access_check(s)) { - return; - } - - fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); - - if (is_double) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); - TCGv_i64 tcg_zero = tcg_constant_i64(0); - TCGv_i64 tcg_res = tcg_temp_new_i64(); - NeonGenTwoDoubleOpFn *genfn; - bool swap = false; - int pass; - - switch (opcode) { - case 0x2e: /* FCMLT (zero) */ - swap = true; - /* fallthrough */ - case 0x2c: /* FCMGT (zero) */ - genfn = gen_helper_neon_cgt_f64; - break; - case 0x2d: /* FCMEQ (zero) */ - genfn = gen_helper_neon_ceq_f64; - break; - case 0x6d: /* FCMLE (zero) */ - swap = true; - /* fall through */ - case 0x6c: /* FCMGE (zero) */ - genfn = gen_helper_neon_cge_f64; - break; - default: - g_assert_not_reached(); - } - - for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { - read_vec_element(s, tcg_op, rn, pass, MO_64); - if (swap) { - genfn(tcg_res, tcg_zero, tcg_op, fpst); - } else { - genfn(tcg_res, tcg_op, tcg_zero, fpst); - } - write_vec_element(s, tcg_res, rd, pass, MO_64); - } - - clear_vec_high(s, !is_scalar, rd); - } else { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - TCGv_i32 tcg_zero = tcg_constant_i32(0); - TCGv_i32 tcg_res = tcg_temp_new_i32(); - NeonGenTwoSingleOpFn *genfn; - bool swap = false; - int pass, maxpasses; - - if (size == MO_16) { - switch (opcode) { - case 0x2e: /* FCMLT (zero) */ - swap = true; - /* fall through */ - case 0x2c: /* FCMGT (zero) */ - genfn = gen_helper_advsimd_cgt_f16; - break; - case 0x2d: /* FCMEQ (zero) */ - genfn = gen_helper_advsimd_ceq_f16; - break; - case 0x6d: /* FCMLE (zero) */ - swap = true; - /* fall through */ - case 0x6c: /* FCMGE (zero) */ - genfn = gen_helper_advsimd_cge_f16; - break; - default: - g_assert_not_reached(); - } - } else { - switch (opcode) { - case 0x2e: /* FCMLT (zero) */ - swap = true; - /* fall through */ - case 0x2c: /* FCMGT (zero) */ - genfn = gen_helper_neon_cgt_f32; - break; - case 0x2d: /* FCMEQ (zero) */ - genfn = gen_helper_neon_ceq_f32; - break; - case 0x6d: /* FCMLE (zero) */ - swap = true; - /* fall through */ - case 0x6c: /* FCMGE (zero) */ - genfn = gen_helper_neon_cge_f32; - break; - default: - g_assert_not_reached(); - } - } - - if (is_scalar) { - maxpasses = 1; - } else { - int vector_size = 8 << is_q; - maxpasses = vector_size >> size; - } - - for (pass = 0; pass < maxpasses; pass++) { - read_vec_element_i32(s, tcg_op, rn, pass, size); - if (swap) { - genfn(tcg_res, tcg_zero, tcg_op, fpst); - } else { - genfn(tcg_res, tcg_op, tcg_zero, fpst); - } - if (is_scalar) { - write_fp_sreg(s, rd, tcg_res); - } else { - write_vec_element_i32(s, tcg_res, rd, pass, size); - } - } - - if (!is_scalar) { - clear_vec_high(s, is_q, rd); - } - } -} - -static void handle_2misc_reciprocal(DisasContext *s, int opcode, - bool is_scalar, bool is_u, bool is_q, - int size, int rn, int rd) -{ - bool is_double = (size == 3); - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); - - if (is_double) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); - TCGv_i64 tcg_res = tcg_temp_new_i64(); - int pass; - - for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { - read_vec_element(s, tcg_op, rn, pass, MO_64); - switch (opcode) { - case 0x3d: /* FRECPE */ - gen_helper_recpe_f64(tcg_res, tcg_op, fpst); - break; - case 0x3f: /* FRECPX */ - gen_helper_frecpx_f64(tcg_res, tcg_op, fpst); - break; - case 0x7d: /* FRSQRTE */ - gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst); - break; - default: - g_assert_not_reached(); - } - write_vec_element(s, tcg_res, rd, pass, MO_64); - } - clear_vec_high(s, !is_scalar, rd); - } else { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - TCGv_i32 tcg_res = tcg_temp_new_i32(); - int pass, maxpasses; - - if (is_scalar) { - maxpasses = 1; - } else { - maxpasses = is_q ? 4 : 2; - } - - for (pass = 0; pass < maxpasses; pass++) { - read_vec_element_i32(s, tcg_op, rn, pass, MO_32); - - switch (opcode) { - case 0x3c: /* URECPE */ - gen_helper_recpe_u32(tcg_res, tcg_op); - break; - case 0x3d: /* FRECPE */ - gen_helper_recpe_f32(tcg_res, tcg_op, fpst); - break; - case 0x3f: /* FRECPX */ - gen_helper_frecpx_f32(tcg_res, tcg_op, fpst); - break; - case 0x7d: /* FRSQRTE */ - gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst); - break; - default: - g_assert_not_reached(); - } - - if (is_scalar) { - write_fp_sreg(s, rd, tcg_res); - } else { - write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - } - } - if (!is_scalar) { - clear_vec_high(s, is_q, rd); - } - } -} - -static void handle_2misc_narrow(DisasContext *s, bool scalar, - int opcode, bool u, bool is_q, - int size, int rn, int rd) -{ - /* Handle 2-reg-misc ops which are narrowing (so each 2*size element - * in the source becomes a size element in the destination). - */ - int pass; - TCGv_i32 tcg_res[2]; - int destelt = is_q ? 2 : 0; - int passes = scalar ? 1 : 2; - - if (scalar) { - tcg_res[1] = tcg_constant_i32(0); - } - - for (pass = 0; pass < passes; pass++) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); - NeonGenNarrowFn *genfn = NULL; - NeonGenNarrowEnvFn *genenvfn = NULL; - - if (scalar) { - read_vec_element(s, tcg_op, rn, pass, size + 1); - } else { - read_vec_element(s, tcg_op, rn, pass, MO_64); - } - tcg_res[pass] = tcg_temp_new_i32(); - - switch (opcode) { - case 0x12: /* XTN, SQXTUN */ - { - static NeonGenNarrowFn * const xtnfns[3] = { - gen_helper_neon_narrow_u8, - gen_helper_neon_narrow_u16, - tcg_gen_extrl_i64_i32, - }; - static NeonGenNarrowEnvFn * const sqxtunfns[3] = { - gen_helper_neon_unarrow_sat8, - gen_helper_neon_unarrow_sat16, - gen_helper_neon_unarrow_sat32, - }; - if (u) { - genenvfn = sqxtunfns[size]; - } else { - genfn = xtnfns[size]; - } - break; - } - case 0x14: /* SQXTN, UQXTN */ - { - static NeonGenNarrowEnvFn * const fns[3][2] = { - { gen_helper_neon_narrow_sat_s8, - gen_helper_neon_narrow_sat_u8 }, - { gen_helper_neon_narrow_sat_s16, - gen_helper_neon_narrow_sat_u16 }, - { gen_helper_neon_narrow_sat_s32, - gen_helper_neon_narrow_sat_u32 }, - }; - genenvfn = fns[size][u]; - break; - } - case 0x16: /* FCVTN, FCVTN2 */ - /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */ - if (size == 2) { - gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, tcg_env); - } else { - TCGv_i32 tcg_lo = tcg_temp_new_i32(); - TCGv_i32 tcg_hi = tcg_temp_new_i32(); - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); - TCGv_i32 ahp = get_ahp_flag(); - - tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op); - gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); - gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); - tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); - } - break; - case 0x36: /* BFCVTN, BFCVTN2 */ - { - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); - gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst); - } - break; - case 0x56: /* FCVTXN, FCVTXN2 */ - /* 64 bit to 32 bit float conversion - * with von Neumann rounding (round to odd) - */ - assert(size == 2); - gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, tcg_env); - break; - default: - g_assert_not_reached(); - } - - if (genfn) { - genfn(tcg_res[pass], tcg_op); - } else if (genenvfn) { - genenvfn(tcg_res[pass], tcg_env, tcg_op); - } - } - - for (pass = 0; pass < 2; pass++) { - write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); - } - clear_vec_high(s, is_q, rd); -} - -/* AdvSIMD scalar two reg misc - * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 - * +-----+---+-----------+------+-----------+--------+-----+------+------+ - * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | - * +-----+---+-----------+------+-----------+--------+-----+------+------+ - */ -static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int opcode = extract32(insn, 12, 5); - int size = extract32(insn, 22, 2); - bool u = extract32(insn, 29, 1); - bool is_fcvt = false; - int rmode; - TCGv_i32 tcg_rmode; - TCGv_ptr tcg_fpstatus; - - switch (opcode) { - case 0x7: /* SQABS / SQNEG */ - break; - case 0xa: /* CMLT */ - if (u) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x8: /* CMGT, CMGE */ - case 0x9: /* CMEQ, CMLE */ - case 0xb: /* ABS, NEG */ - if (size != 3) { - unallocated_encoding(s); - return; - } - break; - case 0x12: /* SQXTUN */ - if (!u) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x14: /* SQXTN, UQXTN */ - if (size == 3) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); - return; - case 0xc ... 0xf: - case 0x16 ... 0x1d: - case 0x1f: - /* Floating point: U, size[1] and opcode indicate operation; - * size[0] indicates single or double precision. - */ - opcode |= (extract32(size, 1, 1) << 5) | (u << 6); - size = extract32(size, 0, 1) ? 3 : 2; - switch (opcode) { - case 0x2c: /* FCMGT (zero) */ - case 0x2d: /* FCMEQ (zero) */ - case 0x2e: /* FCMLT (zero) */ - case 0x6c: /* FCMGE (zero) */ - case 0x6d: /* FCMLE (zero) */ - handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd); - return; - case 0x1d: /* SCVTF */ - case 0x5d: /* UCVTF */ - { - bool is_signed = (opcode == 0x1d); - if (!fp_access_check(s)) { - return; - } - handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size); - return; - } - case 0x3d: /* FRECPE */ - case 0x3f: /* FRECPX */ - case 0x7d: /* FRSQRTE */ - if (!fp_access_check(s)) { - return; - } - handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd); - return; - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - is_fcvt = true; - rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); - break; - case 0x1c: /* FCVTAS */ - case 0x5c: /* FCVTAU */ - /* TIEAWAY doesn't fit in the usual rounding mode encoding */ - is_fcvt = true; - rmode = FPROUNDING_TIEAWAY; - break; - case 0x56: /* FCVTXN, FCVTXN2 */ - if (size == 2) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); - return; - default: - unallocated_encoding(s); - return; - } - break; - default: - case 0x3: /* USQADD / SUQADD */ - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } - - if (is_fcvt) { - tcg_fpstatus = fpstatus_ptr(FPST_FPCR); - tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); - } else { - tcg_fpstatus = NULL; - tcg_rmode = NULL; - } - - if (size == 3) { - TCGv_i64 tcg_rn = read_fp_dreg(s, rn); - TCGv_i64 tcg_rd = tcg_temp_new_i64(); - - handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); - write_fp_dreg(s, rd, tcg_rd); - } else { - TCGv_i32 tcg_rn = tcg_temp_new_i32(); - TCGv_i32 tcg_rd = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_rn, rn, 0, size); - - switch (opcode) { - case 0x7: /* SQABS, SQNEG */ - { - NeonGenOneOpEnvFn *genfn; - static NeonGenOneOpEnvFn * const fns[3][2] = { - { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, - { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, - { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 }, - }; - genfn = fns[size][u]; - genfn(tcg_rd, tcg_env, tcg_rn); - break; - } - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x1c: /* FCVTAS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0), - tcg_fpstatus); - break; - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x5c: /* FCVTAU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0), - tcg_fpstatus); - break; - default: - g_assert_not_reached(); - } - - write_fp_sreg(s, rd, tcg_rd); - } - - if (is_fcvt) { - gen_restore_rmode(tcg_rmode, tcg_fpstatus); - } -} - -/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ -static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, - int immh, int immb, int opcode, int rn, int rd) -{ - int size = 32 - clz32(immh) - 1; - int immhb = immh << 3 | immb; - int shift = 2 * (8 << size) - immhb; - GVecGen2iFn *gvec_fn; - - if (extract32(immh, 3, 1) && !is_q) { - unallocated_encoding(s); - return; - } - tcg_debug_assert(size <= 3); - - if (!fp_access_check(s)) { - return; - } - - switch (opcode) { - case 0x02: /* SSRA / USRA (accumulate) */ - gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra; - break; - - case 0x08: /* SRI */ - gvec_fn = gen_gvec_sri; - break; - - case 0x00: /* SSHR / USHR */ - if (is_u) { - if (shift == 8 << size) { - /* Shift count the same size as element size produces zero. */ - tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd), - is_q ? 16 : 8, vec_full_reg_size(s), 0); - return; - } - gvec_fn = tcg_gen_gvec_shri; - } else { - /* Shift count the same size as element size produces all sign. */ - if (shift == 8 << size) { - shift -= 1; - } - gvec_fn = tcg_gen_gvec_sari; - } - break; - - case 0x04: /* SRSHR / URSHR (rounding) */ - gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr; - break; - - case 0x06: /* SRSRA / URSRA (accum + rounding) */ - gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra; - break; - - default: - g_assert_not_reached(); - } - - gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size); -} - -/* SHL/SLI - Vector shift left */ -static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, - int immh, int immb, int opcode, int rn, int rd) -{ - int size = 32 - clz32(immh) - 1; - int immhb = immh << 3 | immb; - int shift = immhb - (8 << size); - - /* Range of size is limited by decode: immh is a non-zero 4 bit field */ - assert(size >= 0 && size <= 3); - - if (extract32(immh, 3, 1) && !is_q) { - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } - - if (insert) { - gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size); - } else { - gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); - } -} - -/* USHLL/SHLL - Vector shift left with widening */ -static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, - int immh, int immb, int opcode, int rn, int rd) -{ - int size = 32 - clz32(immh) - 1; - int immhb = immh << 3 | immb; - int shift = immhb - (8 << size); - int dsize = 64; - int esize = 8 << size; - int elements = dsize/esize; - TCGv_i64 tcg_rn = tcg_temp_new_i64(); - TCGv_i64 tcg_rd = tcg_temp_new_i64(); - int i; - - if (size >= 3) { - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } - - /* For the LL variants the store is larger than the load, - * so if rd == rn we would overwrite parts of our input. - * So load everything right now and use shifts in the main loop. - */ - read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64); - - for (i = 0; i < elements; i++) { - tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize); - ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0); - tcg_gen_shli_i64(tcg_rd, tcg_rd, shift); - write_vec_element(s, tcg_rd, rd, i, size + 1); - } -} - -/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */ -static void handle_vec_simd_shrn(DisasContext *s, bool is_q, - int immh, int immb, int opcode, int rn, int rd) -{ - int immhb = immh << 3 | immb; - int size = 32 - clz32(immh) - 1; - int dsize = 64; - int esize = 8 << size; - int elements = dsize/esize; - int shift = (2 * esize) - immhb; - bool round = extract32(opcode, 0, 1); - TCGv_i64 tcg_rn, tcg_rd, tcg_final; - TCGv_i64 tcg_round; - int i; - - if (extract32(immh, 3, 1)) { - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } - - tcg_rn = tcg_temp_new_i64(); - tcg_rd = tcg_temp_new_i64(); - tcg_final = tcg_temp_new_i64(); - read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); - - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); - } else { - tcg_round = NULL; - } - - for (i = 0; i < elements; i++) { - read_vec_element(s, tcg_rn, rn, i, size+1); - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, - false, true, size+1, shift); - - tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); - } - - if (!is_q) { - write_vec_element(s, tcg_final, rd, 0, MO_64); - } else { - write_vec_element(s, tcg_final, rd, 1, MO_64); - } - - clear_vec_high(s, is_q, rd); -} - - -/* AdvSIMD shift by immediate - * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 - * +---+---+---+-------------+------+------+--------+---+------+------+ - * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | - * +---+---+---+-------------+------+------+--------+---+------+------+ - */ -static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) -{ - int rd = extract32(insn, 0, 5); - int rn = extract32(insn, 5, 5); - int opcode = extract32(insn, 11, 5); - int immb = extract32(insn, 16, 3); - int immh = extract32(insn, 19, 4); - bool is_u = extract32(insn, 29, 1); - bool is_q = extract32(insn, 30, 1); - - /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */ - assert(immh != 0); - - switch (opcode) { - case 0x08: /* SRI */ - if (!is_u) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x00: /* SSHR / USHR */ - case 0x02: /* SSRA / USRA (accumulate) */ - case 0x04: /* SRSHR / URSHR (rounding) */ - case 0x06: /* SRSRA / URSRA (accum + rounding) */ - handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd); - break; - case 0x0a: /* SHL / SLI */ - handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd); - break; - case 0x10: /* SHRN */ - case 0x11: /* RSHRN / SQRSHRUN */ - if (is_u) { - handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, - opcode, rn, rd); - } else { - handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd); - } - break; - case 0x12: /* SQSHRN / UQSHRN */ - case 0x13: /* SQRSHRN / UQRSHRN */ - handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, - opcode, rn, rd); - break; - case 0x14: /* SSHLL / USHLL */ - handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd); - break; - case 0x1c: /* SCVTF / UCVTF */ - handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, - opcode, rn, rd); - break; - case 0xc: /* SQSHLU */ - if (!is_u) { - unallocated_encoding(s); - return; - } - handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd); - break; - case 0xe: /* SQSHL, UQSHL */ - handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd); - break; - case 0x1f: /* FCVTZS/ FCVTZU */ - handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd); - return; - default: - unallocated_encoding(s); - return; - } -} - -static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, - int size, int rn, int rd) -{ - /* Handle 2-reg-misc ops which are widening (so each size element - * in the source becomes a 2*size element in the destination. - * The only instruction like this is FCVTL. - */ - int pass; - - if (size == 3) { - /* 32 -> 64 bit fp conversion */ - TCGv_i64 tcg_res[2]; - int srcelt = is_q ? 2 : 0; - - for (pass = 0; pass < 2; pass++) { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - tcg_res[pass] = tcg_temp_new_i64(); - - read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); - gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, tcg_env); - } - for (pass = 0; pass < 2; pass++) { - write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - } - } else { - /* 16 -> 32 bit fp conversion */ - int srcelt = is_q ? 4 : 0; - TCGv_i32 tcg_res[4]; - TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); - TCGv_i32 ahp = get_ahp_flag(); - - for (pass = 0; pass < 4; pass++) { - tcg_res[pass] = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); - gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass], - fpst, ahp); - } - for (pass = 0; pass < 4; pass++) { - write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); - } - } -} - -static void handle_rev(DisasContext *s, int opcode, bool u, - bool is_q, int size, int rn, int rd) -{ - int op = (opcode << 1) | u; - int opsz = op + size; - int grp_size = 3 - opsz; - int dsize = is_q ? 128 : 64; - int i; - - if (opsz >= 3) { - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } - - if (size == 0) { - /* Special case bytes, use bswap op on each group of elements */ - int groups = dsize / (8 << grp_size); - - for (i = 0; i < groups; i++) { - TCGv_i64 tcg_tmp = tcg_temp_new_i64(); - - read_vec_element(s, tcg_tmp, rn, i, grp_size); - switch (grp_size) { - case MO_16: - tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); - break; - case MO_32: - tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); - break; - case MO_64: - tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp); - break; - default: - g_assert_not_reached(); - } - write_vec_element(s, tcg_tmp, rd, i, grp_size); - } - clear_vec_high(s, is_q, rd); - } else { - int revmask = (1 << grp_size) - 1; - int esize = 8 << size; - int elements = dsize / esize; - TCGv_i64 tcg_rn = tcg_temp_new_i64(); - TCGv_i64 tcg_rd[2]; - - for (i = 0; i < 2; i++) { - tcg_rd[i] = tcg_temp_new_i64(); - tcg_gen_movi_i64(tcg_rd[i], 0); - } - - for (i = 0; i < elements; i++) { - int e_rev = (i & 0xf) ^ revmask; - int w = (e_rev * esize) / 64; - int o = (e_rev * esize) % 64; - - read_vec_element(s, tcg_rn, rn, i, size); - tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize); - } - - for (i = 0; i < 2; i++) { - write_vec_element(s, tcg_rd[i], rd, i, MO_64); - } - clear_vec_high(s, true, rd); - } -} - -static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, - bool is_q, int size, int rn, int rd) -{ - /* Implement the pairwise operations from 2-misc: - * SADDLP, UADDLP, SADALP, UADALP. - * These all add pairs of elements in the input to produce a - * double-width result element in the output (possibly accumulating). - */ - bool accum = (opcode == 0x6); - int maxpass = is_q ? 2 : 1; - int pass; - TCGv_i64 tcg_res[2]; - - if (size == 2) { - /* 32 + 32 -> 64 op */ - MemOp memop = size + (u ? 0 : MO_SIGN); - - for (pass = 0; pass < maxpass; pass++) { - TCGv_i64 tcg_op1 = tcg_temp_new_i64(); - TCGv_i64 tcg_op2 = tcg_temp_new_i64(); - - tcg_res[pass] = tcg_temp_new_i64(); - - read_vec_element(s, tcg_op1, rn, pass * 2, memop); - read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop); - tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2); - if (accum) { - read_vec_element(s, tcg_op1, rd, pass, MO_64); - tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1); - } - } - } else { - for (pass = 0; pass < maxpass; pass++) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); - NeonGenOne64OpFn *genfn; - static NeonGenOne64OpFn * const fns[2][2] = { - { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 }, - { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 }, - }; - - genfn = fns[size][u]; - - tcg_res[pass] = tcg_temp_new_i64(); - - read_vec_element(s, tcg_op, rn, pass, MO_64); - genfn(tcg_res[pass], tcg_op); - - if (accum) { - read_vec_element(s, tcg_op, rd, pass, MO_64); - if (size == 0) { - gen_helper_neon_addl_u16(tcg_res[pass], - tcg_res[pass], tcg_op); - } else { - gen_helper_neon_addl_u32(tcg_res[pass], - tcg_res[pass], tcg_op); - } - } - } - } - if (!is_q) { - tcg_res[1] = tcg_constant_i64(0); - } - for (pass = 0; pass < 2; pass++) { - write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - } -} - -static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) -{ - /* Implement SHLL and SHLL2 */ - int pass; - int part = is_q ? 2 : 0; - TCGv_i64 tcg_res[2]; - - for (pass = 0; pass < 2; pass++) { - static NeonGenWidenFn * const widenfns[3] = { - gen_helper_neon_widen_u8, - gen_helper_neon_widen_u16, - tcg_gen_extu_i32_i64, - }; - NeonGenWidenFn *widenfn = widenfns[size]; - TCGv_i32 tcg_op = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32); - tcg_res[pass] = tcg_temp_new_i64(); - widenfn(tcg_res[pass], tcg_op); - tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size); - } - - for (pass = 0; pass < 2; pass++) { - write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - } -} - -/* AdvSIMD two reg misc - * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 - * +---+---+---+-----------+------+-----------+--------+-----+------+------+ - * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | - * +---+---+---+-----------+------+-----------+--------+-----+------+------+ - */ -static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) -{ - int size = extract32(insn, 22, 2); - int opcode = extract32(insn, 12, 5); - bool u = extract32(insn, 29, 1); - bool is_q = extract32(insn, 30, 1); - int rn = extract32(insn, 5, 5); - int rd = extract32(insn, 0, 5); - bool need_fpstatus = false; - int rmode = -1; - TCGv_i32 tcg_rmode; - TCGv_ptr tcg_fpstatus; - - switch (opcode) { - case 0x0: /* REV64, REV32 */ - case 0x1: /* REV16 */ - handle_rev(s, opcode, u, is_q, size, rn, rd); - return; - case 0x5: /* CNT, NOT, RBIT */ - if (u && size == 0) { - /* NOT */ - break; - } else if (u && size == 1) { - /* RBIT */ - break; - } else if (!u && size == 0) { - /* CNT */ - break; - } - unallocated_encoding(s); - return; - case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */ - case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */ - if (size == 3) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } +WRAP_ENV(neon_unarrow_sat8) +WRAP_ENV(neon_unarrow_sat16) +WRAP_ENV(neon_unarrow_sat32) - handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd); - return; - case 0x4: /* CLS, CLZ */ - if (size == 3) { - unallocated_encoding(s); - return; - } - break; - case 0x2: /* SADDLP, UADDLP */ - case 0x6: /* SADALP, UADALP */ - if (size == 3) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd); - return; - case 0x13: /* SHLL, SHLL2 */ - if (u == 0 || size == 3) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_shll(s, is_q, size, rn, rd); - return; - case 0xa: /* CMLT */ - if (u == 1) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x8: /* CMGT, CMGE */ - case 0x9: /* CMEQ, CMLE */ - case 0xb: /* ABS, NEG */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x7: /* SQABS, SQNEG */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0xc ... 0xf: - case 0x16 ... 0x1f: - { - /* Floating point: U, size[1] and opcode indicate operation; - * size[0] indicates single or double precision. - */ - int is_double = extract32(size, 0, 1); - opcode |= (extract32(size, 1, 1) << 5) | (u << 6); - size = is_double ? 3 : 2; - switch (opcode) { - case 0x2f: /* FABS */ - case 0x6f: /* FNEG */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x1d: /* SCVTF */ - case 0x5d: /* UCVTF */ - { - bool is_signed = (opcode == 0x1d) ? true : false; - int elements = is_double ? 2 : is_q ? 4 : 2; - if (is_double && !is_q) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size); - return; - } - case 0x2c: /* FCMGT (zero) */ - case 0x2d: /* FCMEQ (zero) */ - case 0x2e: /* FCMLT (zero) */ - case 0x6c: /* FCMGE (zero) */ - case 0x6d: /* FCMLE (zero) */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd); - return; - case 0x7f: /* FSQRT */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - need_fpstatus = true; - rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x5c: /* FCVTAU */ - case 0x1c: /* FCVTAS */ - need_fpstatus = true; - rmode = FPROUNDING_TIEAWAY; - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x3c: /* URECPE */ - if (size == 3) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x3d: /* FRECPE */ - case 0x7d: /* FRSQRTE */ - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); - return; - case 0x56: /* FCVTXN, FCVTXN2 */ - if (size == 2) { - unallocated_encoding(s); - return; - } - /* fall through */ - case 0x16: /* FCVTN, FCVTN2 */ - /* handle_2misc_narrow does a 2*size -> size operation, but these - * instructions encode the source size rather than dest size. - */ - if (!fp_access_check(s)) { - return; - } - handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); - return; - case 0x36: /* BFCVTN, BFCVTN2 */ - if (!dc_isar_feature(aa64_bf16, s) || size != 2) { - unallocated_encoding(s); - return; - } - if (!fp_access_check(s)) { - return; - } - handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); - return; - case 0x17: /* FCVTL, FCVTL2 */ - if (!fp_access_check(s)) { - return; - } - handle_2misc_widening(s, opcode, is_q, size, rn, rd); - return; - case 0x18: /* FRINTN */ - case 0x19: /* FRINTM */ - case 0x38: /* FRINTP */ - case 0x39: /* FRINTZ */ - rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); - /* fall through */ - case 0x59: /* FRINTX */ - case 0x79: /* FRINTI */ - need_fpstatus = true; - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x58: /* FRINTA */ - rmode = FPROUNDING_TIEAWAY; - need_fpstatus = true; - if (size == 3 && !is_q) { - unallocated_encoding(s); - return; - } - break; - case 0x7c: /* URSQRTE */ - if (size == 3) { - unallocated_encoding(s); - return; - } - break; - case 0x1e: /* FRINT32Z */ - case 0x1f: /* FRINT64Z */ - rmode = FPROUNDING_ZERO; - /* fall through */ - case 0x5e: /* FRINT32X */ - case 0x5f: /* FRINT64X */ - need_fpstatus = true; - if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) { - unallocated_encoding(s); - return; - } - break; - default: - unallocated_encoding(s); - return; - } - break; - } - default: - case 0x3: /* SUQADD, USQADD */ - unallocated_encoding(s); - return; - } - - if (!fp_access_check(s)) { - return; - } +static ArithOneOp * const f_scalar_sqxtun[] = { + gen_neon_unarrow_sat8, + gen_neon_unarrow_sat16, + gen_neon_unarrow_sat32, +}; +TRANS(SQXTUN_s, do_2misc_narrow_scalar, a, f_scalar_sqxtun) - if (need_fpstatus || rmode >= 0) { - tcg_fpstatus = fpstatus_ptr(FPST_FPCR); - } else { - tcg_fpstatus = NULL; - } - if (rmode >= 0) { - tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); - } else { - tcg_rmode = NULL; - } +WRAP_ENV(neon_narrow_sat_s8) +WRAP_ENV(neon_narrow_sat_s16) +WRAP_ENV(neon_narrow_sat_s32) - switch (opcode) { - case 0x5: - if (u && size == 0) { /* NOT */ - gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0); - return; - } - break; - case 0x8: /* CMGT, CMGE */ - if (u) { - gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size); - } else { - gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size); - } - return; - case 0x9: /* CMEQ, CMLE */ - if (u) { - gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size); - } else { - gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size); - } - return; - case 0xa: /* CMLT */ - gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size); - return; - case 0xb: - if (u) { /* ABS, NEG */ - gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); - } else { - gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); - } - return; - } +static ArithOneOp * const f_scalar_sqxtn[] = { + gen_neon_narrow_sat_s8, + gen_neon_narrow_sat_s16, + gen_neon_narrow_sat_s32, +}; +TRANS(SQXTN_s, do_2misc_narrow_scalar, a, f_scalar_sqxtn) - if (size == 3) { - /* All 64-bit element operations can be shared with scalar 2misc */ - int pass; +WRAP_ENV(neon_narrow_sat_u8) +WRAP_ENV(neon_narrow_sat_u16) +WRAP_ENV(neon_narrow_sat_u32) - /* Coverity claims (size == 3 && !is_q) has been eliminated - * from all paths leading to here. +static ArithOneOp * const f_scalar_uqxtn[] = { + gen_neon_narrow_sat_u8, + gen_neon_narrow_sat_u16, + gen_neon_narrow_sat_u32, +}; +TRANS(UQXTN_s, do_2misc_narrow_scalar, a, f_scalar_uqxtn) + +static bool trans_FCVTXN_s(DisasContext *s, arg_rr_e *a) +{ + if (fp_access_check(s)) { + /* + * 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) */ - tcg_debug_assert(is_q); - for (pass = 0; pass < 2; pass++) { - TCGv_i64 tcg_op = tcg_temp_new_i64(); - TCGv_i64 tcg_res = tcg_temp_new_i64(); + TCGv_i64 src = read_fp_dreg(s, a->rn); + TCGv_i32 dst = tcg_temp_new_i32(); + gen_helper_fcvtx_f64_to_f32(dst, src, fpstatus_ptr(FPST_A64)); + write_fp_sreg_merging(s, a->rd, a->rd, dst); + } + return true; +} + +#undef WRAP_ENV + +static bool do_gvec_fn2(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn) +{ + if (!a->q && a->esz == MO_64) { + return false; + } + if (fp_access_check(s)) { + gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz); + } + return true; +} - read_vec_element(s, tcg_op, rn, pass, MO_64); +TRANS(ABS_v, do_gvec_fn2, a, tcg_gen_gvec_abs) +TRANS(NEG_v, do_gvec_fn2, a, tcg_gen_gvec_neg) +TRANS(NOT_v, do_gvec_fn2, a, tcg_gen_gvec_not) +TRANS(CNT_v, do_gvec_fn2, a, gen_gvec_cnt) +TRANS(RBIT_v, do_gvec_fn2, a, gen_gvec_rbit) +TRANS(CMGT0_v, do_gvec_fn2, a, gen_gvec_cgt0) +TRANS(CMGE0_v, do_gvec_fn2, a, gen_gvec_cge0) +TRANS(CMLT0_v, do_gvec_fn2, a, gen_gvec_clt0) +TRANS(CMLE0_v, do_gvec_fn2, a, gen_gvec_cle0) +TRANS(CMEQ0_v, do_gvec_fn2, a, gen_gvec_ceq0) +TRANS(REV16_v, do_gvec_fn2, a, gen_gvec_rev16) +TRANS(REV32_v, do_gvec_fn2, a, gen_gvec_rev32) +TRANS(URECPE_v, do_gvec_fn2, a, gen_gvec_urecpe) +TRANS(URSQRTE_v, do_gvec_fn2, a, gen_gvec_ursqrte) - handle_2misc_64(s, opcode, u, tcg_res, tcg_op, - tcg_rmode, tcg_fpstatus); +static bool do_gvec_fn2_bhs(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn) +{ + if (a->esz == MO_64) { + return false; + } + if (fp_access_check(s)) { + gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz); + } + return true; +} - write_vec_element(s, tcg_res, rd, pass, MO_64); - } - } else { - int pass; - - for (pass = 0; pass < (is_q ? 4 : 2); pass++) { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - TCGv_i32 tcg_res = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_op, rn, pass, MO_32); - - if (size == 2) { - /* Special cases for 32 bit elements */ - switch (opcode) { - case 0x4: /* CLS */ - if (u) { - tcg_gen_clzi_i32(tcg_res, tcg_op, 32); - } else { - tcg_gen_clrsb_i32(tcg_res, tcg_op); - } - break; - case 0x7: /* SQABS, SQNEG */ - if (u) { - gen_helper_neon_qneg_s32(tcg_res, tcg_env, tcg_op); - } else { - gen_helper_neon_qabs_s32(tcg_res, tcg_env, tcg_op); - } - break; - case 0x2f: /* FABS */ - gen_vfp_abss(tcg_res, tcg_op); - break; - case 0x6f: /* FNEG */ - gen_vfp_negs(tcg_res, tcg_op); - break; - case 0x7f: /* FSQRT */ - gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env); - break; - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x1c: /* FCVTAS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - gen_helper_vfp_tosls(tcg_res, tcg_op, - tcg_constant_i32(0), tcg_fpstatus); - break; - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x5c: /* FCVTAU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - gen_helper_vfp_touls(tcg_res, tcg_op, - tcg_constant_i32(0), tcg_fpstatus); - break; - case 0x18: /* FRINTN */ - case 0x19: /* FRINTM */ - case 0x38: /* FRINTP */ - case 0x39: /* FRINTZ */ - case 0x58: /* FRINTA */ - case 0x79: /* FRINTI */ - gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x59: /* FRINTX */ - gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x7c: /* URSQRTE */ - gen_helper_rsqrte_u32(tcg_res, tcg_op); - break; - case 0x1e: /* FRINT32Z */ - case 0x5e: /* FRINT32X */ - gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x1f: /* FRINT64Z */ - case 0x5f: /* FRINT64X */ - gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus); - break; - default: - g_assert_not_reached(); - } - } else { - /* Use helpers for 8 and 16 bit elements */ - switch (opcode) { - case 0x5: /* CNT, RBIT */ - /* For these two insns size is part of the opcode specifier - * (handled earlier); they always operate on byte elements. - */ - if (u) { - gen_helper_neon_rbit_u8(tcg_res, tcg_op); - } else { - gen_helper_neon_cnt_u8(tcg_res, tcg_op); - } - break; - case 0x7: /* SQABS, SQNEG */ - { - NeonGenOneOpEnvFn *genfn; - static NeonGenOneOpEnvFn * const fns[2][2] = { - { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, - { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, - }; - genfn = fns[size][u]; - genfn(tcg_res, tcg_env, tcg_op); - break; - } - case 0x4: /* CLS, CLZ */ - if (u) { - if (size == 0) { - gen_helper_neon_clz_u8(tcg_res, tcg_op); - } else { - gen_helper_neon_clz_u16(tcg_res, tcg_op); - } - } else { - if (size == 0) { - gen_helper_neon_cls_s8(tcg_res, tcg_op); - } else { - gen_helper_neon_cls_s16(tcg_res, tcg_op); - } - } - break; - default: - g_assert_not_reached(); - } - } +TRANS(CLS_v, do_gvec_fn2_bhs, a, gen_gvec_cls) +TRANS(CLZ_v, do_gvec_fn2_bhs, a, gen_gvec_clz) +TRANS(REV64_v, do_gvec_fn2_bhs, a, gen_gvec_rev64) +TRANS(SADDLP_v, do_gvec_fn2_bhs, a, gen_gvec_saddlp) +TRANS(UADDLP_v, do_gvec_fn2_bhs, a, gen_gvec_uaddlp) +TRANS(SADALP_v, do_gvec_fn2_bhs, a, gen_gvec_sadalp) +TRANS(UADALP_v, do_gvec_fn2_bhs, a, gen_gvec_uadalp) - write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - } +static bool do_2misc_narrow_vector(DisasContext *s, arg_qrr_e *a, + ArithOneOp * const fn[3]) +{ + if (a->esz == MO_64) { + return false; } - clear_vec_high(s, is_q, rd); + if (fp_access_check(s)) { + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); - if (tcg_rmode) { - gen_restore_rmode(tcg_rmode, tcg_fpstatus); + read_vec_element(s, t0, a->rn, 0, MO_64); + read_vec_element(s, t1, a->rn, 1, MO_64); + fn[a->esz](t0, t0); + fn[a->esz](t1, t1); + write_vec_element(s, t0, a->rd, a->q ? 2 : 0, MO_32); + write_vec_element(s, t1, a->rd, a->q ? 3 : 1, MO_32); + clear_vec_high(s, a->q, a->rd); } + return true; } -/* AdvSIMD [scalar] two register miscellaneous (FP16) - * - * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0 - * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ - * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd | - * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ - * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00 - * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800 - * - * This actually covers two groups where scalar access is governed by - * bit 28. A bunch of the instructions (float to integral) only exist - * in the vector form and are un-allocated for the scalar decode. Also - * in the scalar decode Q is always 1. - */ -static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) +static ArithOneOp * const f_scalar_xtn[] = { + gen_helper_neon_narrow_u8, + gen_helper_neon_narrow_u16, + tcg_gen_ext32u_i64, +}; +TRANS(XTN, do_2misc_narrow_vector, a, f_scalar_xtn) +TRANS(SQXTUN_v, do_2misc_narrow_vector, a, f_scalar_sqxtun) +TRANS(SQXTN_v, do_2misc_narrow_vector, a, f_scalar_sqxtn) +TRANS(UQXTN_v, do_2misc_narrow_vector, a, f_scalar_uqxtn) + +static void gen_fcvtn_hs(TCGv_i64 d, TCGv_i64 n) { - int fpop, opcode, a, u; - int rn, rd; - bool is_q; - bool is_scalar; - bool only_in_vector = false; + TCGv_i32 tcg_lo = tcg_temp_new_i32(); + TCGv_i32 tcg_hi = tcg_temp_new_i32(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); + TCGv_i32 ahp = get_ahp_flag(); - int pass; - TCGv_i32 tcg_rmode = NULL; - TCGv_ptr tcg_fpstatus = NULL; - bool need_fpst = true; - int rmode = -1; + tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, n); + gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); + gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); + tcg_gen_deposit_i32(tcg_lo, tcg_lo, tcg_hi, 16, 16); + tcg_gen_extu_i32_i64(d, tcg_lo); +} - if (!dc_isar_feature(aa64_fp16, s)) { - unallocated_encoding(s); - return; - } +static void gen_fcvtn_sd(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); + + gen_helper_vfp_fcvtsd(tmp, n, fpst); + tcg_gen_extu_i32_i64(d, tmp); +} + +static void gen_fcvtxn_sd(TCGv_i64 d, TCGv_i64 n) +{ + /* + * 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) + */ + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_fcvtx_f64_to_f32(tmp, n, fpstatus_ptr(FPST_A64)); + tcg_gen_extu_i32_i64(d, tmp); +} - rd = extract32(insn, 0, 5); - rn = extract32(insn, 5, 5); +static ArithOneOp * const f_vector_fcvtn[] = { + NULL, + gen_fcvtn_hs, + gen_fcvtn_sd, +}; +static ArithOneOp * const f_scalar_fcvtxn[] = { + NULL, + NULL, + gen_fcvtxn_sd, +}; +TRANS(FCVTN_v, do_2misc_narrow_vector, a, f_vector_fcvtn) +TRANS(FCVTXN_v, do_2misc_narrow_vector, a, f_scalar_fcvtxn) - a = extract32(insn, 23, 1); - u = extract32(insn, 29, 1); - is_scalar = extract32(insn, 28, 1); - is_q = extract32(insn, 30, 1); +static void gen_bfcvtn_hs(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_ptr fpst = fpstatus_ptr(FPST_A64); + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_bfcvt_pair(tmp, n, fpst); + tcg_gen_extu_i32_i64(d, tmp); +} - opcode = extract32(insn, 12, 5); - fpop = deposit32(opcode, 5, 1, a); - fpop = deposit32(fpop, 6, 1, u); +static void gen_bfcvtn_ah_hs(TCGv_i64 d, TCGv_i64 n) +{ + TCGv_ptr fpst = fpstatus_ptr(FPST_AH); + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_bfcvt_pair(tmp, n, fpst); + tcg_gen_extu_i32_i64(d, tmp); +} - switch (fpop) { - case 0x1d: /* SCVTF */ - case 0x5d: /* UCVTF */ +static ArithOneOp * const f_vector_bfcvtn[2][3] = { { - int elements; + NULL, + gen_bfcvtn_hs, + NULL, + }, { + NULL, + gen_bfcvtn_ah_hs, + NULL, + } +}; +TRANS_FEAT(BFCVTN_v, aa64_bf16, do_2misc_narrow_vector, a, + f_vector_bfcvtn[s->fpcr_ah]) - if (is_scalar) { - elements = 1; - } else { - elements = (is_q ? 8 : 4); - } +static bool trans_SHLL_v(DisasContext *s, arg_qrr_e *a) +{ + static NeonGenWidenFn * const widenfns[3] = { + gen_helper_neon_widen_u8, + gen_helper_neon_widen_u16, + tcg_gen_extu_i32_i64, + }; + NeonGenWidenFn *widenfn; + TCGv_i64 tcg_res[2]; + TCGv_i32 tcg_op; + int part, pass; - if (!fp_access_check(s)) { - return; - } - handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); - return; + if (a->esz == MO_64) { + return false; } - break; - case 0x2c: /* FCMGT (zero) */ - case 0x2d: /* FCMEQ (zero) */ - case 0x2e: /* FCMLT (zero) */ - case 0x6c: /* FCMGE (zero) */ - case 0x6d: /* FCMLE (zero) */ - handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); - return; - case 0x3d: /* FRECPE */ - case 0x3f: /* FRECPX */ - break; - case 0x18: /* FRINTN */ - only_in_vector = true; - rmode = FPROUNDING_TIEEVEN; - break; - case 0x19: /* FRINTM */ - only_in_vector = true; - rmode = FPROUNDING_NEGINF; - break; - case 0x38: /* FRINTP */ - only_in_vector = true; - rmode = FPROUNDING_POSINF; - break; - case 0x39: /* FRINTZ */ - only_in_vector = true; - rmode = FPROUNDING_ZERO; - break; - case 0x58: /* FRINTA */ - only_in_vector = true; - rmode = FPROUNDING_TIEAWAY; - break; - case 0x59: /* FRINTX */ - case 0x79: /* FRINTI */ - only_in_vector = true; - /* current rounding mode */ - break; - case 0x1a: /* FCVTNS */ - rmode = FPROUNDING_TIEEVEN; - break; - case 0x1b: /* FCVTMS */ - rmode = FPROUNDING_NEGINF; - break; - case 0x1c: /* FCVTAS */ - rmode = FPROUNDING_TIEAWAY; - break; - case 0x3a: /* FCVTPS */ - rmode = FPROUNDING_POSINF; - break; - case 0x3b: /* FCVTZS */ - rmode = FPROUNDING_ZERO; - break; - case 0x5a: /* FCVTNU */ - rmode = FPROUNDING_TIEEVEN; - break; - case 0x5b: /* FCVTMU */ - rmode = FPROUNDING_NEGINF; - break; - case 0x5c: /* FCVTAU */ - rmode = FPROUNDING_TIEAWAY; - break; - case 0x7a: /* FCVTPU */ - rmode = FPROUNDING_POSINF; - break; - case 0x7b: /* FCVTZU */ - rmode = FPROUNDING_ZERO; - break; - case 0x2f: /* FABS */ - case 0x6f: /* FNEG */ - need_fpst = false; - break; - case 0x7d: /* FRSQRTE */ - case 0x7f: /* FSQRT (vector) */ - break; - default: - unallocated_encoding(s); - return; + if (!fp_access_check(s)) { + return true; } + tcg_op = tcg_temp_new_i32(); + widenfn = widenfns[a->esz]; + part = a->q ? 2 : 0; - /* Check additional constraints for the scalar encoding */ - if (is_scalar) { - if (!is_q) { - unallocated_encoding(s); - return; - } - /* FRINTxx is only in the vector form */ - if (only_in_vector) { - unallocated_encoding(s); - return; - } + for (pass = 0; pass < 2; pass++) { + read_vec_element_i32(s, tcg_op, a->rn, part + pass, MO_32); + tcg_res[pass] = tcg_temp_new_i64(); + widenfn(tcg_res[pass], tcg_op); + tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << a->esz); } - if (!fp_access_check(s)) { - return; + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], a->rd, pass, MO_64); + } + return true; +} + +static bool do_fabs_fneg_v(DisasContext *s, arg_qrr_e *a, GVecGen2Fn *fn) +{ + int check = fp_access_check_vector_hsd(s, a->q, a->esz); + + if (check <= 0) { + return check == 0; } - if (rmode >= 0 || need_fpst) { - tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); + gen_gvec_fn2(s, a->q, a->rd, a->rn, fn, a->esz); + return true; +} + +TRANS(FABS_v, do_fabs_fneg_v, a, gen_gvec_fabs) +TRANS(FNEG_v, do_fabs_fneg_v, a, gen_gvec_fneg) + +static bool do_fp1_vector(DisasContext *s, arg_qrr_e *a, + const FPScalar1 *f, int rmode) +{ + TCGv_i32 tcg_rmode = NULL; + TCGv_ptr fpst; + int check = fp_access_check_vector_hsd(s, a->q, a->esz); + + if (check <= 0) { + return check == 0; } + fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); if (rmode >= 0) { - tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); + tcg_rmode = gen_set_rmode(rmode, fpst); } - if (is_scalar) { - TCGv_i32 tcg_op = read_fp_hreg(s, rn); - TCGv_i32 tcg_res = tcg_temp_new_i32(); + if (a->esz == MO_64) { + TCGv_i64 t64 = tcg_temp_new_i64(); - switch (fpop) { - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x1c: /* FCVTAS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x3d: /* FRECPE */ - gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x3f: /* FRECPX */ - gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x5c: /* FCVTAU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x6f: /* FNEG */ - tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); - break; - case 0x7d: /* FRSQRTE */ - gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - default: - g_assert_not_reached(); + for (int pass = 0; pass < 2; ++pass) { + read_vec_element(s, t64, a->rn, pass, MO_64); + f->gen_d(t64, t64, fpst); + write_vec_element(s, t64, a->rd, pass, MO_64); } - - /* limit any sign extension going on */ - tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); - write_fp_sreg(s, rd, tcg_res); } else { - for (pass = 0; pass < (is_q ? 8 : 4); pass++) { - TCGv_i32 tcg_op = tcg_temp_new_i32(); - TCGv_i32 tcg_res = tcg_temp_new_i32(); - - read_vec_element_i32(s, tcg_op, rn, pass, MO_16); - - switch (fpop) { - case 0x1a: /* FCVTNS */ - case 0x1b: /* FCVTMS */ - case 0x1c: /* FCVTAS */ - case 0x3a: /* FCVTPS */ - case 0x3b: /* FCVTZS */ - gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x3d: /* FRECPE */ - gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x5a: /* FCVTNU */ - case 0x5b: /* FCVTMU */ - case 0x5c: /* FCVTAU */ - case 0x7a: /* FCVTPU */ - case 0x7b: /* FCVTZU */ - gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x18: /* FRINTN */ - case 0x19: /* FRINTM */ - case 0x38: /* FRINTP */ - case 0x39: /* FRINTZ */ - case 0x58: /* FRINTA */ - case 0x79: /* FRINTI */ - gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x59: /* FRINTX */ - gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x2f: /* FABS */ - tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); - break; - case 0x6f: /* FNEG */ - tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); - break; - case 0x7d: /* FRSQRTE */ - gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - case 0x7f: /* FSQRT */ - gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus); - break; - default: - g_assert_not_reached(); - } + TCGv_i32 t32 = tcg_temp_new_i32(); + void (*gen)(TCGv_i32, TCGv_i32, TCGv_ptr) + = (a->esz == MO_16 ? f->gen_h : f->gen_s); - write_vec_element_i32(s, tcg_res, rd, pass, MO_16); + for (int pass = 0, n = (a->q ? 16 : 8) >> a->esz; pass < n; ++pass) { + read_vec_element_i32(s, t32, a->rn, pass, a->esz); + gen(t32, t32, fpst); + write_vec_element_i32(s, t32, a->rd, pass, a->esz); } + } + clear_vec_high(s, a->q, a->rd); - clear_vec_high(s, is_q, rd); + if (rmode >= 0) { + gen_restore_rmode(tcg_rmode, fpst); } + return true; +} + +TRANS(FSQRT_v, do_fp1_vector, a, &f_scalar_fsqrt, -1) + +TRANS(FRINTN_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_TIEEVEN) +TRANS(FRINTP_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_POSINF) +TRANS(FRINTM_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_NEGINF) +TRANS(FRINTZ_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_ZERO) +TRANS(FRINTA_v, do_fp1_vector, a, &f_scalar_frint, FPROUNDING_TIEAWAY) +TRANS(FRINTI_v, do_fp1_vector, a, &f_scalar_frint, -1) +TRANS(FRINTX_v, do_fp1_vector, a, &f_scalar_frintx, -1) - if (tcg_rmode) { - gen_restore_rmode(tcg_rmode, tcg_fpstatus); +TRANS_FEAT(FRINT32Z_v, aa64_frint, do_fp1_vector, a, + &f_scalar_frint32, FPROUNDING_ZERO) +TRANS_FEAT(FRINT32X_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint32, -1) +TRANS_FEAT(FRINT64Z_v, aa64_frint, do_fp1_vector, a, + &f_scalar_frint64, FPROUNDING_ZERO) +TRANS_FEAT(FRINT64X_v, aa64_frint, do_fp1_vector, a, &f_scalar_frint64, -1) + +static bool do_gvec_op2_fpst_with_fpsttype(DisasContext *s, MemOp esz, + bool is_q, int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3], + ARMFPStatusFlavour fpsttype) +{ + int check = fp_access_check_vector_hsd(s, is_q, esz); + TCGv_ptr fpst; + + if (check <= 0) { + return check == 0; } + + fpst = fpstatus_ptr(fpsttype); + tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), fpst, + is_q ? 16 : 8, vec_full_reg_size(s), + data, fns[esz - 1]); + return true; } -/* C3.6 Data processing - SIMD, inc Crypto - * - * As the decode gets a little complex we are using a table based - * approach for this part of the decode. - */ -static const AArch64DecodeTable data_proc_simd[] = { - /* pattern , mask , fn */ - { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, - { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, - /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */ - { 0x0f000400, 0x9ff80400, disas_simd_mod_imm }, - { 0x0f000400, 0x9f800400, disas_simd_shift_imm }, - { 0x0e000000, 0xbf208c00, disas_simd_tb }, - { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, - { 0x2e000000, 0xbf208400, disas_simd_ext }, - { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, - { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm }, - { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 }, - { 0x00000000, 0x00000000, NULL } +static bool do_gvec_op2_fpst(DisasContext *s, MemOp esz, bool is_q, + int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3]) +{ + return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data, fns, + esz == MO_16 ? FPST_A64_F16 : + FPST_A64); +} + +static bool do_gvec_op2_ah_fpst(DisasContext *s, MemOp esz, bool is_q, + int rd, int rn, int data, + gen_helper_gvec_2_ptr * const fns[3]) +{ + return do_gvec_op2_fpst_with_fpsttype(s, esz, is_q, rd, rn, data, + fns, select_ah_fpst(s, esz)); +} + +static gen_helper_gvec_2_ptr * const f_scvtf_v[] = { + gen_helper_gvec_vcvt_sh, + gen_helper_gvec_vcvt_sf, + gen_helper_gvec_vcvt_sd, +}; +TRANS(SCVTF_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, 0, f_scvtf_v) +TRANS(SCVTF_vf, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, a->shift, f_scvtf_v) + +static gen_helper_gvec_2_ptr * const f_ucvtf_v[] = { + gen_helper_gvec_vcvt_uh, + gen_helper_gvec_vcvt_uf, + gen_helper_gvec_vcvt_ud, +}; +TRANS(UCVTF_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, 0, f_ucvtf_v) +TRANS(UCVTF_vf, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, a->shift, f_ucvtf_v) + +static gen_helper_gvec_2_ptr * const f_fcvtzs_vf[] = { + gen_helper_gvec_vcvt_rz_hs, + gen_helper_gvec_vcvt_rz_fs, + gen_helper_gvec_vcvt_rz_ds, +}; +TRANS(FCVTZS_vf, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, a->shift, f_fcvtzs_vf) + +static gen_helper_gvec_2_ptr * const f_fcvtzu_vf[] = { + gen_helper_gvec_vcvt_rz_hu, + gen_helper_gvec_vcvt_rz_fu, + gen_helper_gvec_vcvt_rz_du, +}; +TRANS(FCVTZU_vf, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, a->shift, f_fcvtzu_vf) + +static gen_helper_gvec_2_ptr * const f_fcvt_s_vi[] = { + gen_helper_gvec_vcvt_rm_sh, + gen_helper_gvec_vcvt_rm_ss, + gen_helper_gvec_vcvt_rm_sd, +}; + +static gen_helper_gvec_2_ptr * const f_fcvt_u_vi[] = { + gen_helper_gvec_vcvt_rm_uh, + gen_helper_gvec_vcvt_rm_us, + gen_helper_gvec_vcvt_rm_ud, +}; + +TRANS(FCVTNS_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_nearest_even, f_fcvt_s_vi) +TRANS(FCVTNU_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_nearest_even, f_fcvt_u_vi) +TRANS(FCVTPS_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_up, f_fcvt_s_vi) +TRANS(FCVTPU_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_up, f_fcvt_u_vi) +TRANS(FCVTMS_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_down, f_fcvt_s_vi) +TRANS(FCVTMU_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_down, f_fcvt_u_vi) +TRANS(FCVTZS_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_to_zero, f_fcvt_s_vi) +TRANS(FCVTZU_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_to_zero, f_fcvt_u_vi) +TRANS(FCVTAS_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_ties_away, f_fcvt_s_vi) +TRANS(FCVTAU_vi, do_gvec_op2_fpst, + a->esz, a->q, a->rd, a->rn, float_round_ties_away, f_fcvt_u_vi) + +static gen_helper_gvec_2_ptr * const f_fceq0[] = { + gen_helper_gvec_fceq0_h, + gen_helper_gvec_fceq0_s, + gen_helper_gvec_fceq0_d, +}; +TRANS(FCMEQ0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fceq0) + +static gen_helper_gvec_2_ptr * const f_fcgt0[] = { + gen_helper_gvec_fcgt0_h, + gen_helper_gvec_fcgt0_s, + gen_helper_gvec_fcgt0_d, +}; +TRANS(FCMGT0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcgt0) + +static gen_helper_gvec_2_ptr * const f_fcge0[] = { + gen_helper_gvec_fcge0_h, + gen_helper_gvec_fcge0_s, + gen_helper_gvec_fcge0_d, +}; +TRANS(FCMGE0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcge0) + +static gen_helper_gvec_2_ptr * const f_fclt0[] = { + gen_helper_gvec_fclt0_h, + gen_helper_gvec_fclt0_s, + gen_helper_gvec_fclt0_d, +}; +TRANS(FCMLT0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fclt0) + +static gen_helper_gvec_2_ptr * const f_fcle0[] = { + gen_helper_gvec_fcle0_h, + gen_helper_gvec_fcle0_s, + gen_helper_gvec_fcle0_d, +}; +TRANS(FCMLE0_v, do_gvec_op2_fpst, a->esz, a->q, a->rd, a->rn, 0, f_fcle0) + +static gen_helper_gvec_2_ptr * const f_frecpe[] = { + gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_s, + gen_helper_gvec_frecpe_d, +}; +static gen_helper_gvec_2_ptr * const f_frecpe_rpres[] = { + gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_rpres_s, + gen_helper_gvec_frecpe_d, +}; +TRANS(FRECPE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frecpe_rpres : f_frecpe) + +static gen_helper_gvec_2_ptr * const f_frsqrte[] = { + gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_s, + gen_helper_gvec_frsqrte_d, +}; +static gen_helper_gvec_2_ptr * const f_frsqrte_rpres[] = { + gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_rpres_s, + gen_helper_gvec_frsqrte_d, }; +TRANS(FRSQRTE_v, do_gvec_op2_ah_fpst, a->esz, a->q, a->rd, a->rn, 0, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? f_frsqrte_rpres : f_frsqrte) -static void disas_data_proc_simd(DisasContext *s, uint32_t insn) +static bool trans_FCVTL_v(DisasContext *s, arg_qrr_e *a) { - /* Note that this is called with all non-FP cases from - * table C3-6 so it must UNDEF for entries not specifically - * allocated to instructions in that table. + /* Handle 2-reg-misc ops which are widening (so each size element + * in the source becomes a 2*size element in the destination. + * The only instruction like this is FCVTL. */ - AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn); - if (fn) { - fn(s, insn); - } else { - unallocated_encoding(s); + int pass; + TCGv_ptr fpst; + + if (!fp_access_check(s)) { + return true; } -} -/* C3.6 Data processing - SIMD and floating point */ -static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) -{ - if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) { - disas_data_proc_fp(s, insn); + if (a->esz == MO_64) { + /* 32 -> 64 bit fp conversion */ + TCGv_i64 tcg_res[2]; + TCGv_i32 tcg_op = tcg_temp_new_i32(); + int srcelt = a->q ? 2 : 0; + + fpst = fpstatus_ptr(FPST_A64); + + for (pass = 0; pass < 2; pass++) { + tcg_res[pass] = tcg_temp_new_i64(); + read_vec_element_i32(s, tcg_op, a->rn, srcelt + pass, MO_32); + gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, fpst); + } + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], a->rd, pass, MO_64); + } } else { - /* SIMD, including crypto */ - disas_data_proc_simd(s, insn); + /* 16 -> 32 bit fp conversion */ + int srcelt = a->q ? 4 : 0; + TCGv_i32 tcg_res[4]; + TCGv_i32 ahp = get_ahp_flag(); + + fpst = fpstatus_ptr(FPST_A64_F16); + + for (pass = 0; pass < 4; pass++) { + tcg_res[pass] = tcg_temp_new_i32(); + read_vec_element_i32(s, tcg_res[pass], a->rn, srcelt + pass, MO_16); + gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass], + fpst, ahp); + } + for (pass = 0; pass < 4; pass++) { + write_vec_element_i32(s, tcg_res[pass], a->rd, pass, MO_32); + } } + clear_vec_high(s, true, a->rd); + return true; } static bool trans_OK(DisasContext *s, arg_OK *a) @@ -11878,37 +10052,6 @@ static bool trans_FAIL(DisasContext *s, arg_OK *a) return true; } -/** - * is_guarded_page: - * @env: The cpu environment - * @s: The DisasContext - * - * Return true if the page is guarded. - */ -static bool is_guarded_page(CPUARMState *env, DisasContext *s) -{ - uint64_t addr = s->base.pc_first; -#ifdef CONFIG_USER_ONLY - return page_get_flags(addr) & PAGE_BTI; -#else - CPUTLBEntryFull *full; - void *host; - int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); - int flags; - - /* - * We test this immediately after reading an insn, which means - * that the TLB entry must be present and valid, and thus this - * access will never raise an exception. - */ - flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, - false, &host, &full, 0); - assert(!(flags & TLB_INVALID_MASK)); - - return full->extra.arm.guarded; -#endif -} - /** * btype_destination_ok: * @insn: The instruction at the branch destination @@ -11961,24 +10104,6 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) return false; } -/* C3.1 A64 instruction index by encoding */ -static void disas_a64_legacy(DisasContext *s, uint32_t insn) -{ - switch (extract32(insn, 25, 4)) { - case 0x5: - case 0xd: /* Data processing - register */ - disas_data_proc_reg(s, insn); - break; - case 0x7: - case 0xf: /* Data processing - SIMD and floating point */ - disas_data_proc_simd_fp(s, insn); - break; - default: - unallocated_encoding(s); - break; - } -} - static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { @@ -12014,8 +10139,10 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET); dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); + dc->zt0_excp_el = EX_TBFLAG_A64(tb_flags, ZT0EXC_EL); dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16; + dc->max_svl = arm_cpu->sme_max_vq * 16; dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); dc->bt = EX_TBFLAG_A64(tb_flags, BT); dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); @@ -12033,6 +10160,8 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); + dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH); + dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; @@ -12126,7 +10255,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * start of the TB. */ assert(s->base.num_insns == 1); - gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc)); + gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc)); s->base.is_jmp = DISAS_NORETURN; s->base.pc_next = QEMU_ALIGN_UP(pc, 4); return; @@ -12137,8 +10266,8 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) s->insn = insn; s->base.pc_next = pc + 4; - s->fp_access_checked = false; - s->sve_access_checked = false; + s->fp_access_checked = 0; + s->sve_access_checked = 0; if (s->pstate_il) { /* @@ -12151,19 +10280,6 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) if (dc_isar_feature(aa64_bti, s)) { if (s->base.num_insns == 1) { - /* - * At the first insn of the TB, compute s->guarded_page. - * We delayed computing this until successfully reading - * the first insn of the TB, above. This (mostly) ensures - * that the softmmu tlb entry has been populated, and the - * page table GP bit is available. - * - * Note that we need to compute this even if btype == 0, - * because this value is used for BR instructions later - * where ENV is not available. - */ - s->guarded_page = is_guarded_page(env, s); - /* First insn can have btype set to non-zero. */ tcg_debug_assert(s->btype >= 0); @@ -12172,12 +10288,13 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * priority -- below debugging exceptions but above most * everything else. This allows us to handle this now * instead of waiting until the insn is otherwise decoded. + * + * We can check all but the guarded page check here; + * defer the latter to a helper. */ if (s->btype != 0 - && s->guarded_page && !btype_destination_ok(insn, s->bt, s->btype)) { - gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype)); - return; + gen_helper_guarded_page_check(tcg_env); } } else { /* Not the first insn: btype must be 0. */ @@ -12193,7 +10310,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) if (!disas_a64(s, insn) && !disas_sme(s, insn) && !disas_sve(s, insn)) { - disas_a64_legacy(s, insn); + unallocated_encoding(s); } /* diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index 0fcf7cb63ad..9c45f89305b 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -28,7 +28,7 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, bool sve_access_check(DisasContext *s); bool sme_enabled_check(DisasContext *s); bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); -uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, +uint64_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, uint32_t msz, bool is_write, uint32_t data); /* This function corresponds to CheckStreamingSVEEnabled. */ @@ -65,7 +65,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, static inline void assert_fp_access_checked(DisasContext *s) { #ifdef CONFIG_DEBUG_TCG - if (unlikely(!s->fp_access_checked || s->fp_excp_el)) { + if (unlikely(s->fp_access_checked <= 0)) { fprintf(stderr, "target-arm: FP access check missing for " "instruction 0x%08x\n", s->insn); abort(); @@ -185,6 +185,19 @@ static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) return ret; } +/* + * Return the ARMFPStatusFlavour to use based on element size and + * whether FPCR.AH is set. + */ +static inline ARMFPStatusFlavour select_ah_fpst(DisasContext *s, MemOp esz) +{ + if (s->fpcr_ah) { + return esz == MO_16 ? FPST_AH_F16 : FPST_AH; + } else { + return esz == MO_16 ? FPST_A64_F16 : FPST_A64; + } +} + bool disas_sve(DisasContext *, uint32_t); bool disas_sme(DisasContext *, uint32_t); @@ -212,7 +225,13 @@ void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); -void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); -void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); +void gen_gvec_sve2_sqdmulh(unsigned vece, uint32_t rd_ofs, + uint32_t rn_ofs, uint32_t rm_ofs, + uint32_t opr_sz, uint32_t max_sz); + +void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, + int len, int rn, int imm, MemOp align); +void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, + int len, int rn, int imm, MemOp align); #endif /* TARGET_ARM_TRANSLATE_A64_H */ diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c index 915c9e56db5..844d2e29e4c 100644 --- a/target/arm/tcg/translate-neon.c +++ b/target/arm/tcg/translate-neon.c @@ -148,6 +148,37 @@ static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm, return true; } +static bool do_neon_ddda_env(DisasContext *s, int q, int vd, int vn, int vm, + int data, gen_helper_gvec_4_ptr *fn_gvec) +{ + /* UNDEF accesses to D16-D31 if they don't exist. */ + if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) { + return false; + } + + /* + * UNDEF accesses to odd registers for each bit of Q. + * Q will be 0b111 for all Q-reg instructions, otherwise + * when we have mixed Q- and D-reg inputs. + */ + if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) { + return false; + } + + if (!vfp_access_check(s)) { + return true; + } + + int opr_sz = q ? 16 : 8; + tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd), + vfp_reg_offset(1, vn), + vfp_reg_offset(1, vm), + vfp_reg_offset(1, vd), + tcg_env, + opr_sz, opr_sz, data, fn_gvec); + return true; +} + static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm, int data, ARMFPStatusFlavour fp_flavour, gen_helper_gvec_4_ptr *fn_gvec_ptr) @@ -240,7 +271,7 @@ static bool trans_VSDOT(DisasContext *s, arg_VSDOT *a) return false; } return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, - gen_helper_gvec_sdot_b); + gen_helper_gvec_sdot_4b); } static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a) @@ -249,7 +280,7 @@ static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a) return false; } return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, - gen_helper_gvec_udot_b); + gen_helper_gvec_udot_4b); } static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a) @@ -258,7 +289,7 @@ static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a) return false; } return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, - gen_helper_gvec_usdot_b); + gen_helper_gvec_usdot_4b); } static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a) @@ -266,8 +297,8 @@ static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a) if (!dc_isar_feature(aa32_bf16, s)) { return false; } - return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0, - gen_helper_gvec_bfdot); + return do_neon_ddda_env(s, a->q * 7, a->vd, a->vn, a->vm, 0, + gen_helper_gvec_bfdot); } static bool trans_VFML(DisasContext *s, arg_VFML *a) @@ -325,7 +356,7 @@ static bool trans_VSDOT_scalar(DisasContext *s, arg_VSDOT_scalar *a) return false; } return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, - gen_helper_gvec_sdot_idx_b); + gen_helper_gvec_sdot_idx_4b); } static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a) @@ -334,7 +365,7 @@ static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a) return false; } return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, - gen_helper_gvec_udot_idx_b); + gen_helper_gvec_udot_idx_4b); } static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a) @@ -343,7 +374,7 @@ static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a) return false; } return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, - gen_helper_gvec_usdot_idx_b); + gen_helper_gvec_usdot_idx_4b); } static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a) @@ -352,7 +383,7 @@ static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a) return false; } return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, - gen_helper_gvec_sudot_idx_b); + gen_helper_gvec_sudot_idx_4b); } static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a) @@ -360,8 +391,8 @@ static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a) if (!dc_isar_feature(aa32_bf16, s)) { return false; } - return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index, - gen_helper_gvec_bfdot_idx); + return do_neon_ddda_env(s, a->q * 6, a->vd, a->vn, a->vm, a->index, + gen_helper_gvec_bfdot_idx); } static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a) @@ -979,8 +1010,8 @@ DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h) DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h) DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h) DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h) -DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h) -DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h) +DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_nf_s, gen_helper_gvec_fmla_nf_h) +DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_nf_s, gen_helper_gvec_fmls_nf_h) DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h) DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h) DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h) @@ -1068,144 +1099,18 @@ DO_2SH(VRSHR_S, gen_gvec_srshr) DO_2SH(VRSHR_U, gen_gvec_urshr) DO_2SH(VRSRA_S, gen_gvec_srsra) DO_2SH(VRSRA_U, gen_gvec_ursra) - -static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a) -{ - /* Signed shift out of range results in all-sign-bits */ - a->shift = MIN(a->shift, (8 << a->size) - 1); - return do_vector_2sh(s, a, tcg_gen_gvec_sari); -} - -static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, - int64_t shift, uint32_t oprsz, uint32_t maxsz) -{ - tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0); -} - -static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a) -{ - /* Shift out of range is architecturally valid and results in zero. */ - if (a->shift >= (8 << a->size)) { - return do_vector_2sh(s, a, gen_zero_rd_2sh); - } else { - return do_vector_2sh(s, a, tcg_gen_gvec_shri); - } -} - -static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, - NeonGenTwo64OpEnvFn *fn) -{ - /* - * 2-reg-and-shift operations, size == 3 case, where the - * function needs to be passed tcg_env. - */ - TCGv_i64 constimm; - int pass; - - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_simd_r32, s) && - ((a->vd | a->vm) & 0x10)) { - return false; - } - - if ((a->vm | a->vd) & a->q) { - return false; - } - - if (!vfp_access_check(s)) { - return true; - } - - /* - * To avoid excessive duplication of ops we implement shift - * by immediate using the variable shift operations. - */ - constimm = tcg_constant_i64(dup_const(a->size, a->shift)); - - for (pass = 0; pass < a->q + 1; pass++) { - TCGv_i64 tmp = tcg_temp_new_i64(); - - read_neon_element64(tmp, a->vm, pass, MO_64); - fn(tmp, tcg_env, tmp, constimm); - write_neon_element64(tmp, a->vd, pass, MO_64); - } - return true; -} - -static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, - NeonGenTwoOpEnvFn *fn) -{ - /* - * 2-reg-and-shift operations, size < 3 case, where the - * helper needs to be passed tcg_env. - */ - TCGv_i32 constimm, tmp; - int pass; - - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_simd_r32, s) && - ((a->vd | a->vm) & 0x10)) { - return false; - } - - if ((a->vm | a->vd) & a->q) { - return false; - } - - if (!vfp_access_check(s)) { - return true; - } - - /* - * To avoid excessive duplication of ops we implement shift - * by immediate using the variable shift operations. - */ - constimm = tcg_constant_i32(dup_const(a->size, a->shift)); - tmp = tcg_temp_new_i32(); - - for (pass = 0; pass < (a->q ? 4 : 2); pass++) { - read_neon_element32(tmp, a->vm, pass, MO_32); - fn(tmp, tcg_env, tmp, constimm); - write_neon_element32(tmp, a->vd, pass, MO_32); - } - return true; -} - -#define DO_2SHIFT_ENV(INSN, FUNC) \ - static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \ - { \ - return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \ - } \ - static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \ - { \ - static NeonGenTwoOpEnvFn * const fns[] = { \ - gen_helper_neon_##FUNC##8, \ - gen_helper_neon_##FUNC##16, \ - gen_helper_neon_##FUNC##32, \ - }; \ - assert(a->size < ARRAY_SIZE(fns)); \ - return do_2shift_env_32(s, a, fns[a->size]); \ - } - -DO_2SHIFT_ENV(VQSHLU, qshlu_s) -DO_2SHIFT_ENV(VQSHL_U, qshl_u) -DO_2SHIFT_ENV(VQSHL_S, qshl_s) +DO_2SH(VSHR_S, gen_gvec_sshr) +DO_2SH(VSHR_U, gen_gvec_ushr) +DO_2SH(VQSHLU, gen_neon_sqshlui) +DO_2SH(VQSHL_U, gen_neon_uqshli) +DO_2SH(VQSHL_S, gen_neon_sqshli) static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, NeonGenTwo64OpFn *shiftfn, - NeonGenNarrowEnvFn *narrowfn) + NeonGenOne64OpEnvFn *narrowfn) { /* 2-reg-and-shift narrowing-shift operations, size == 3 case */ - TCGv_i64 constimm, rm1, rm2; - TCGv_i32 rd; + TCGv_i64 constimm, rm1, rm2, rd; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -1232,7 +1137,7 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, constimm = tcg_constant_i64(-a->shift); rm1 = tcg_temp_new_i64(); rm2 = tcg_temp_new_i64(); - rd = tcg_temp_new_i32(); + rd = tcg_temp_new_i64(); /* Load both inputs first to avoid potential overwrite if rm == rd */ read_neon_element64(rm1, a->vm, 0, MO_64); @@ -1240,18 +1145,18 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, shiftfn(rm1, rm1, constimm); narrowfn(rd, tcg_env, rm1); - write_neon_element32(rd, a->vd, 0, MO_32); + write_neon_element64(rd, a->vd, 0, MO_32); shiftfn(rm2, rm2, constimm); narrowfn(rd, tcg_env, rm2); - write_neon_element32(rd, a->vd, 1, MO_32); + write_neon_element64(rd, a->vd, 1, MO_32); return true; } static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, NeonGenTwoOpFn *shiftfn, - NeonGenNarrowEnvFn *narrowfn) + NeonGenOne64OpEnvFn *narrowfn) { /* 2-reg-and-shift narrowing-shift operations, size < 3 case */ TCGv_i32 constimm, rm1, rm2, rm3, rm4; @@ -1306,16 +1211,16 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, tcg_gen_concat_i32_i64(rtmp, rm1, rm2); - narrowfn(rm1, tcg_env, rtmp); - write_neon_element32(rm1, a->vd, 0, MO_32); + narrowfn(rtmp, tcg_env, rtmp); + write_neon_element64(rtmp, a->vd, 0, MO_32); shiftfn(rm3, rm3, constimm); shiftfn(rm4, rm4, constimm); tcg_gen_concat_i32_i64(rtmp, rm3, rm4); - narrowfn(rm3, tcg_env, rtmp); - write_neon_element32(rm3, a->vd, 1, MO_32); + narrowfn(rtmp, tcg_env, rtmp); + write_neon_element64(rtmp, a->vd, 1, MO_32); return true; } @@ -1330,17 +1235,17 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \ } -static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src) +static void gen_neon_narrow_u32(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src) { - tcg_gen_extrl_i64_i32(dest, src); + tcg_gen_ext32u_i64(dest, src); } -static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src) +static void gen_neon_narrow_u16(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src) { gen_helper_neon_narrow_u16(dest, src); } -static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src) +static void gen_neon_narrow_u8(TCGv_i64 dest, TCGv_ptr env, TCGv_i64 src) { gen_helper_neon_narrow_u8(dest, src); } @@ -1504,13 +1409,13 @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a, DO_FP_2SH(VCVT_SF, gen_helper_gvec_vcvt_sf) DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf) -DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_fs) -DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_fu) +DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_rz_fs) +DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_rz_fu) DO_FP_2SH(VCVT_SH, gen_helper_gvec_vcvt_sh) DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh) -DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs) -DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu) +DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_rz_hs) +DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_rz_hu) static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a, GVecGen2iFn *fn) @@ -1655,8 +1560,8 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, NULL, NULL, \ }; \ static NeonGenTwo64OpFn * const addfn[] = { \ - gen_helper_neon_##OP##l_u16, \ - gen_helper_neon_##OP##l_u32, \ + tcg_gen_vec_##OP##16_i64, \ + tcg_gen_vec_##OP##32_i64, \ tcg_gen_##OP##_i64, \ NULL, \ }; \ @@ -1734,8 +1639,8 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a, static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \ { \ static NeonGenTwo64OpFn * const addfn[] = { \ - gen_helper_neon_##OP##l_u16, \ - gen_helper_neon_##OP##l_u32, \ + tcg_gen_vec_##OP##16_i64, \ + tcg_gen_vec_##OP##32_i64, \ tcg_gen_##OP##_i64, \ NULL, \ }; \ @@ -1856,8 +1761,8 @@ static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a) NULL, }; static NeonGenTwo64OpFn * const addfn[] = { - gen_helper_neon_addl_u16, - gen_helper_neon_addl_u32, + tcg_gen_vec_add16_i64, + tcg_gen_vec_add32_i64, tcg_gen_add_i64, NULL, }; @@ -1874,8 +1779,8 @@ static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a) NULL, }; static NeonGenTwo64OpFn * const addfn[] = { - gen_helper_neon_addl_u16, - gen_helper_neon_addl_u32, + tcg_gen_vec_add16_i64, + tcg_gen_vec_add32_i64, tcg_gen_add_i64, NULL, }; @@ -1935,8 +1840,8 @@ static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a) NULL, \ }; \ static NeonGenTwo64OpFn * const accfn[] = { \ - gen_helper_neon_##ACC##l_u16, \ - gen_helper_neon_##ACC##l_u32, \ + tcg_gen_vec_##ACC##16_i64, \ + tcg_gen_vec_##ACC##32_i64, \ tcg_gen_##ACC##_i64, \ NULL, \ }; \ @@ -2466,7 +2371,7 @@ static bool trans_VMULL_U_2sc(DisasContext *s, arg_2scalar *a) }; \ static NeonGenTwo64OpFn * const accfn[] = { \ NULL, \ - gen_helper_neon_##ACC##l_u32, \ + tcg_gen_vec_##ACC##32_i64, \ tcg_gen_##ACC##_i64, \ NULL, \ }; \ @@ -2660,204 +2565,6 @@ static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a) return true; } -static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) -{ - int pass, half; - TCGv_i32 tmp[2]; - - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_simd_r32, s) && - ((a->vd | a->vm) & 0x10)) { - return false; - } - - if ((a->vd | a->vm) & a->q) { - return false; - } - - if (a->size == 3) { - return false; - } - - if (!vfp_access_check(s)) { - return true; - } - - tmp[0] = tcg_temp_new_i32(); - tmp[1] = tcg_temp_new_i32(); - - for (pass = 0; pass < (a->q ? 2 : 1); pass++) { - for (half = 0; half < 2; half++) { - read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32); - switch (a->size) { - case 0: - tcg_gen_bswap32_i32(tmp[half], tmp[half]); - break; - case 1: - gen_swap_half(tmp[half], tmp[half]); - break; - case 2: - break; - default: - g_assert_not_reached(); - } - } - write_neon_element32(tmp[1], a->vd, pass * 2, MO_32); - write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32); - } - return true; -} - -static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a, - NeonGenWidenFn *widenfn, - NeonGenTwo64OpFn *opfn, - NeonGenTwo64OpFn *accfn) -{ - /* - * Pairwise long operations: widen both halves of the pair, - * combine the pairs with the opfn, and then possibly accumulate - * into the destination with the accfn. - */ - int pass; - - if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { - return false; - } - - /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_simd_r32, s) && - ((a->vd | a->vm) & 0x10)) { - return false; - } - - if ((a->vd | a->vm) & a->q) { - return false; - } - - if (!widenfn) { - return false; - } - - if (!vfp_access_check(s)) { - return true; - } - - for (pass = 0; pass < a->q + 1; pass++) { - TCGv_i32 tmp; - TCGv_i64 rm0_64, rm1_64, rd_64; - - rm0_64 = tcg_temp_new_i64(); - rm1_64 = tcg_temp_new_i64(); - rd_64 = tcg_temp_new_i64(); - - tmp = tcg_temp_new_i32(); - read_neon_element32(tmp, a->vm, pass * 2, MO_32); - widenfn(rm0_64, tmp); - read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32); - widenfn(rm1_64, tmp); - - opfn(rd_64, rm0_64, rm1_64); - - if (accfn) { - TCGv_i64 tmp64 = tcg_temp_new_i64(); - read_neon_element64(tmp64, a->vd, pass, MO_64); - accfn(rd_64, tmp64, rd_64); - } - write_neon_element64(rd_64, a->vd, pass, MO_64); - } - return true; -} - -static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a) -{ - static NeonGenWidenFn * const widenfn[] = { - gen_helper_neon_widen_s8, - gen_helper_neon_widen_s16, - tcg_gen_ext_i32_i64, - NULL, - }; - static NeonGenTwo64OpFn * const opfn[] = { - gen_helper_neon_paddl_u16, - gen_helper_neon_paddl_u32, - tcg_gen_add_i64, - NULL, - }; - - return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL); -} - -static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a) -{ - static NeonGenWidenFn * const widenfn[] = { - gen_helper_neon_widen_u8, - gen_helper_neon_widen_u16, - tcg_gen_extu_i32_i64, - NULL, - }; - static NeonGenTwo64OpFn * const opfn[] = { - gen_helper_neon_paddl_u16, - gen_helper_neon_paddl_u32, - tcg_gen_add_i64, - NULL, - }; - - return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL); -} - -static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a) -{ - static NeonGenWidenFn * const widenfn[] = { - gen_helper_neon_widen_s8, - gen_helper_neon_widen_s16, - tcg_gen_ext_i32_i64, - NULL, - }; - static NeonGenTwo64OpFn * const opfn[] = { - gen_helper_neon_paddl_u16, - gen_helper_neon_paddl_u32, - tcg_gen_add_i64, - NULL, - }; - static NeonGenTwo64OpFn * const accfn[] = { - gen_helper_neon_addl_u16, - gen_helper_neon_addl_u32, - tcg_gen_add_i64, - NULL, - }; - - return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], - accfn[a->size]); -} - -static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a) -{ - static NeonGenWidenFn * const widenfn[] = { - gen_helper_neon_widen_u8, - gen_helper_neon_widen_u16, - tcg_gen_extu_i32_i64, - NULL, - }; - static NeonGenTwo64OpFn * const opfn[] = { - gen_helper_neon_paddl_u16, - gen_helper_neon_paddl_u32, - tcg_gen_add_i64, - NULL, - }; - static NeonGenTwo64OpFn * const accfn[] = { - gen_helper_neon_addl_u16, - gen_helper_neon_addl_u32, - tcg_gen_add_i64, - NULL, - }; - - return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], - accfn[a->size]); -} - typedef void ZipFn(TCGv_ptr, TCGv_ptr); static bool do_zip_uzp(DisasContext *s, arg_2misc *a, @@ -2931,10 +2638,9 @@ static bool trans_VZIP(DisasContext *s, arg_2misc *a) } static bool do_vmovn(DisasContext *s, arg_2misc *a, - NeonGenNarrowEnvFn *narrowfn) + NeonGenOne64OpEnvFn *narrowfn) { - TCGv_i64 rm; - TCGv_i32 rd0, rd1; + TCGv_i64 rm, rd0, rd1; if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { return false; @@ -2959,22 +2665,22 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a, } rm = tcg_temp_new_i64(); - rd0 = tcg_temp_new_i32(); - rd1 = tcg_temp_new_i32(); + rd0 = tcg_temp_new_i64(); + rd1 = tcg_temp_new_i64(); read_neon_element64(rm, a->vm, 0, MO_64); narrowfn(rd0, tcg_env, rm); read_neon_element64(rm, a->vm, 1, MO_64); narrowfn(rd1, tcg_env, rm); - write_neon_element32(rd0, a->vd, 0, MO_32); - write_neon_element32(rd1, a->vd, 1, MO_32); + write_neon_element64(rd0, a->vd, 0, MO_32); + write_neon_element64(rd1, a->vd, 1, MO_32); return true; } #define DO_VMOVN(INSN, FUNC) \ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \ { \ - static NeonGenNarrowEnvFn * const narrowfn[] = { \ + static NeonGenOne64OpEnvFn * const narrowfn[] = { \ FUNC##8, \ FUNC##16, \ FUNC##32, \ @@ -3216,6 +2922,13 @@ DO_2MISC_VEC(VCGT0, gen_gvec_cgt0) DO_2MISC_VEC(VCLE0, gen_gvec_cle0) DO_2MISC_VEC(VCGE0, gen_gvec_cge0) DO_2MISC_VEC(VCLT0, gen_gvec_clt0) +DO_2MISC_VEC(VCLS, gen_gvec_cls) +DO_2MISC_VEC(VCLZ, gen_gvec_clz) +DO_2MISC_VEC(VREV64, gen_gvec_rev64) +DO_2MISC_VEC(VPADDL_S, gen_gvec_saddlp) +DO_2MISC_VEC(VPADDL_U, gen_gvec_uaddlp) +DO_2MISC_VEC(VPADAL_S, gen_gvec_sadalp) +DO_2MISC_VEC(VPADAL_U, gen_gvec_uadalp) static bool trans_VMVN(DisasContext *s, arg_2misc *a) { @@ -3225,6 +2938,30 @@ static bool trans_VMVN(DisasContext *s, arg_2misc *a) return do_2misc_vec(s, a, tcg_gen_gvec_not); } +static bool trans_VCNT(DisasContext *s, arg_2misc *a) +{ + if (a->size != 0) { + return false; + } + return do_2misc_vec(s, a, gen_gvec_cnt); +} + +static bool trans_VREV16(DisasContext *s, arg_2misc *a) +{ + if (a->size != 0) { + return false; + } + return do_2misc_vec(s, a, gen_gvec_rev16); +} + +static bool trans_VREV32(DisasContext *s, arg_2misc *a) +{ + if (a->size != 0 && a->size != 1) { + return false; + } + return do_2misc_vec(s, a, gen_gvec_rev32); +} + #define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \ uint32_t rm_ofs, uint32_t oprsz, \ @@ -3304,68 +3041,6 @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn) return true; } -static bool trans_VREV32(DisasContext *s, arg_2misc *a) -{ - static NeonGenOneOpFn * const fn[] = { - tcg_gen_bswap32_i32, - gen_swap_half, - NULL, - NULL, - }; - return do_2misc(s, a, fn[a->size]); -} - -static bool trans_VREV16(DisasContext *s, arg_2misc *a) -{ - if (a->size != 0) { - return false; - } - return do_2misc(s, a, gen_rev16); -} - -static bool trans_VCLS(DisasContext *s, arg_2misc *a) -{ - static NeonGenOneOpFn * const fn[] = { - gen_helper_neon_cls_s8, - gen_helper_neon_cls_s16, - gen_helper_neon_cls_s32, - NULL, - }; - return do_2misc(s, a, fn[a->size]); -} - -static void do_VCLZ_32(TCGv_i32 rd, TCGv_i32 rm) -{ - tcg_gen_clzi_i32(rd, rm, 32); -} - -static bool trans_VCLZ(DisasContext *s, arg_2misc *a) -{ - static NeonGenOneOpFn * const fn[] = { - gen_helper_neon_clz_u8, - gen_helper_neon_clz_u16, - do_VCLZ_32, - NULL, - }; - return do_2misc(s, a, fn[a->size]); -} - -static bool trans_VCNT(DisasContext *s, arg_2misc *a) -{ - if (a->size != 0) { - return false; - } - return do_2misc(s, a, gen_helper_neon_cnt_u8); -} - -static void gen_VABS_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, - uint32_t oprsz, uint32_t maxsz) -{ - tcg_gen_gvec_andi(vece, rd_ofs, rm_ofs, - vece == MO_16 ? 0x7fff : 0x7fffffff, - oprsz, maxsz); -} - static bool trans_VABS_F(DisasContext *s, arg_2misc *a) { if (a->size == MO_16) { @@ -3375,15 +3050,7 @@ static bool trans_VABS_F(DisasContext *s, arg_2misc *a) } else if (a->size != MO_32) { return false; } - return do_2misc_vec(s, a, gen_VABS_F); -} - -static void gen_VNEG_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, - uint32_t oprsz, uint32_t maxsz) -{ - tcg_gen_gvec_xori(vece, rd_ofs, rm_ofs, - vece == MO_16 ? 0x8000 : 0x80000000, - oprsz, maxsz); + return do_2misc_vec(s, a, gen_gvec_fabs); } static bool trans_VNEG_F(DisasContext *s, arg_2misc *a) @@ -3395,7 +3062,7 @@ static bool trans_VNEG_F(DisasContext *s, arg_2misc *a) } else if (a->size != MO_32) { return false; } - return do_2misc_vec(s, a, gen_VNEG_F); + return do_2misc_vec(s, a, gen_gvec_fneg); } static bool trans_VRECPE(DisasContext *s, arg_2misc *a) @@ -3403,7 +3070,7 @@ static bool trans_VRECPE(DisasContext *s, arg_2misc *a) if (a->size != 2) { return false; } - return do_2misc(s, a, gen_helper_recpe_u32); + return do_2misc_vec(s, a, gen_gvec_urecpe); } static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a) @@ -3411,7 +3078,7 @@ static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a) if (a->size != 2) { return false; } - return do_2misc(s, a, gen_helper_rsqrte_u32); + return do_2misc_vec(s, a, gen_gvec_ursqrte); } #define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \ @@ -3699,8 +3366,8 @@ static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a) if (!dc_isar_feature(aa32_bf16, s)) { return false; } - return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, - gen_helper_gvec_bfmmla); + return do_neon_ddda_env(s, 7, a->vd, a->vn, a->vm, 0, + gen_helper_gvec_bfmmla); } static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a) diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c index ae42ddef7b3..091c56da4f4 100644 --- a/target/arm/tcg/translate-sme.c +++ b/target/arm/tcg/translate-sme.c @@ -27,16 +27,25 @@ #include "decode-sme.c.inc" +static bool sme2_zt0_enabled_check(DisasContext *s) +{ + if (!sme_za_enabled_check(s)) { + return false; + } + if (s->zt0_excp_el) { + gen_exception_insn_el(s, 0, EXCP_UDEF, + syn_smetrap(SME_ET_InaccessibleZT0, false), + s->zt0_excp_el); + return false; + } + return true; +} -/* - * Resolve tile.size[index] to a host pointer, where tile and index - * are always decoded together, dependent on the element size. - */ +/* Resolve tile.size[rs+imm] to a host pointer. */ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, - int tile_index, bool vertical) + int tile, int imm, int div_len, + int vec_mod, bool vertical) { - int tile = tile_index >> (4 - esz); - int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz); int pos, len, offset; TCGv_i32 tmp; TCGv_ptr addr; @@ -44,10 +53,23 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, /* Compute the final index, which is Rs+imm. */ tmp = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs)); - tcg_gen_addi_i32(tmp, tmp, index); + /* + * Round the vector index down to a multiple of vec_mod if necessary. + * We do this before adding the offset, to handle cases like + * MOVA (tile to vector, 2 registers) where we want to call this + * several times in a loop with an increasing offset. We rely on + * the instruction encodings always forcing the initial offset in + * [rs + offset] to be a multiple of vec_mod. The pseudocode usually + * does the round-down after adding the offset rather than before, + * but MOVA is an exception. + */ + if (vec_mod > 1) { + tcg_gen_andc_i32(tmp, tmp, tcg_constant_i32(vec_mod - 1)); + } + tcg_gen_addi_i32(tmp, tmp, imm); /* Prepare a power-of-two modulo via extraction of @len bits. */ - len = ctz32(streaming_vec_reg_size(s)) - esz; + len = ctz32(streaming_vec_reg_size(s) / div_len) - esz; if (!len) { /* @@ -92,7 +114,7 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, offset = tile * sizeof(ARMVectorReg); /* Include the byte offset of zarray to make this relative to env. */ - offset += offsetof(CPUARMState, zarray); + offset += offsetof(CPUARMState, za_state.za); tcg_gen_addi_i32(tmp, tmp, offset); /* Add the byte offset to env to produce the final pointer. */ @@ -103,6 +125,14 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, return addr; } +/* Resolve ZArray[rs+imm] to a host pointer. */ +static TCGv_ptr get_zarray(DisasContext *s, int rs, int imm, + int div_len, int vec_mod) +{ + /* ZA[n] equates to ZA0H.B[n]. */ + return get_tile_rowcol(s, MO_8, rs, 0, imm, div_len, vec_mod, false); +} + /* * Resolve tile.size[0] to a host pointer. * Used by e.g. outer product insns where we require the entire tile. @@ -112,7 +142,7 @@ static TCGv_ptr get_tile(DisasContext *s, int esz, int tile) TCGv_ptr addr = tcg_temp_new_ptr(); int offset; - offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray); + offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, za_state.za); tcg_gen_addi_ptr(addr, tcg_env, offset); return addr; @@ -130,7 +160,40 @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a) return true; } -static bool trans_MOVA(DisasContext *s, arg_MOVA *a) +static bool trans_ZERO_zt0(DisasContext *s, arg_ZERO_zt0 *a) +{ + if (!dc_isar_feature(aa64_sme2, s)) { + return false; + } + if (sme_enabled_check(s) && sme2_zt0_enabled_check(s)) { + tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUARMState, za_state.zt0), + sizeof_field(CPUARMState, za_state.zt0), + sizeof_field(CPUARMState, za_state.zt0), 0); + } + return true; +} + +static bool trans_ZERO_za(DisasContext *s, arg_ZERO_za *a) +{ + if (!dc_isar_feature(aa64_sme2p1, s)) { + return false; + } + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int vstride = svl / a->ngrp; + TCGv_ptr t_za = get_zarray(s, a->rv, a->off, a->ngrp, a->nvec); + + for (int r = 0; r < a->ngrp; ++r) { + for (int i = 0; i < a->nvec; ++i) { + int o_za = (r * vstride + i) * sizeof(ARMVectorReg); + tcg_gen_gvec_dup_imm_var(MO_64, t_za, o_za, svl, svl, 0); + } + } + } + return true; +} + +static bool do_mova_tile(DisasContext *s, arg_mova_p *a, bool to_vec) { static gen_helper_gvec_4 * const h_fns[5] = { gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, @@ -152,14 +215,11 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) TCGv_i32 t_desc; int svl; - if (!dc_isar_feature(aa64_sme, s)) { - return false; - } if (!sme_smza_enabled_check(s)) { return true; } - t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, a->off, 1, 0, a->v); t_zr = vec_full_reg_ptr(s, a->zr); t_pg = pred_full_reg_ptr(s, a->pg); @@ -168,14 +228,14 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) if (a->v) { /* Vertical slice -- use sme mova helpers. */ - if (a->to_vec) { + if (to_vec) { zc_fns[a->esz](t_zr, t_za, t_pg, t_desc); } else { cz_fns[a->esz](t_za, t_zr, t_pg, t_desc); } } else { /* Horizontal slice -- reuse sve sel helpers. */ - if (a->to_vec) { + if (to_vec) { h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc); } else { h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); @@ -184,9 +244,150 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) return true; } +TRANS_FEAT(MOVA_tz, aa64_sme, do_mova_tile, a, false) +TRANS_FEAT(MOVA_zt, aa64_sme, do_mova_tile, a, true) + +static bool do_mova_tile_n(DisasContext *s, arg_mova_t *a, int n, + bool to_vec, bool zero) +{ + static gen_helper_gvec_2 * const cz_fns[] = { + gen_helper_sme2_mova_cz_b, gen_helper_sme2_mova_cz_h, + gen_helper_sme2_mova_cz_s, gen_helper_sme2_mova_cz_d, + }; + static gen_helper_gvec_2 * const zc_fns[] = { + gen_helper_sme2_mova_zc_b, gen_helper_sme2_mova_zc_h, + gen_helper_sme2_mova_zc_s, gen_helper_sme2_mova_zc_d, + }; + static gen_helper_gvec_2 * const zc_z_fns[] = { + gen_helper_sme2p1_movaz_zc_b, gen_helper_sme2p1_movaz_zc_h, + gen_helper_sme2p1_movaz_zc_s, gen_helper_sme2p1_movaz_zc_d, + gen_helper_sme2p1_movaz_zc_q, + }; + TCGv_ptr t_za; + int svl, bytes_per_op = n << a->esz; + + /* + * The MaxImplementedSVL check happens in the decode pseudocode, + * before the SM+ZA enabled check in the operation pseudocode. + * This will (currently) only fail for NREG=4, ESZ=MO_64. + */ + if (s->max_svl < bytes_per_op) { + unallocated_encoding(s); + return true; + } + + assert(a->esz <= MO_64 + zero); + + if (!sme_smza_enabled_check(s)) { + return true; + } + + svl = streaming_vec_reg_size(s); + + /* + * The CurrentVL check happens in the operation pseudocode, + * after the SM+ZA enabled check. + */ + if (svl < bytes_per_op) { + unallocated_encoding(s); + return true; + } + + if (a->v) { + TCGv_i32 t_desc = tcg_constant_i32(simd_desc(svl, svl, 0)); + + for (int i = 0; i < n; ++i) { + TCGv_ptr t_zr = vec_full_reg_ptr(s, a->zr * n + i); + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, + a->off * n + i, 1, n, a->v); + if (zero) { + zc_z_fns[a->esz](t_zr, t_za, t_desc); + } else if (to_vec) { + zc_fns[a->esz](t_zr, t_za, t_desc); + } else { + cz_fns[a->esz](t_za, t_zr, t_desc); + } + } + } else { + for (int i = 0; i < n; ++i) { + int o_zr = vec_full_reg_offset(s, a->zr * n + i); + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, + a->off * n + i, 1, n, a->v); + if (to_vec) { + tcg_gen_gvec_mov_var(MO_8, tcg_env, o_zr, t_za, 0, svl, svl); + if (zero) { + tcg_gen_gvec_dup_imm_var(MO_8, t_za, 0, svl, svl, 0); + } + } else { + tcg_gen_gvec_mov_var(MO_8, t_za, 0, tcg_env, o_zr, svl, svl); + } + } + } + return true; +} + +TRANS_FEAT(MOVA_tz2, aa64_sme2, do_mova_tile_n, a, 2, false, false) +TRANS_FEAT(MOVA_tz4, aa64_sme2, do_mova_tile_n, a, 4, false, false) +TRANS_FEAT(MOVA_zt2, aa64_sme2, do_mova_tile_n, a, 2, true, false) +TRANS_FEAT(MOVA_zt4, aa64_sme2, do_mova_tile_n, a, 4, true, false) + +TRANS_FEAT(MOVAZ_zt, aa64_sme2p1, do_mova_tile_n, a, 1, true, true) +TRANS_FEAT(MOVAZ_zt2, aa64_sme2p1, do_mova_tile_n, a, 2, true, true) +TRANS_FEAT(MOVAZ_zt4, aa64_sme2p1, do_mova_tile_n, a, 4, true, true) + +static bool do_mova_array_n(DisasContext *s, arg_mova_a *a, int n, + bool to_vec, bool zero) +{ + TCGv_ptr t_za; + int svl; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + svl = streaming_vec_reg_size(s); + t_za = get_zarray(s, a->rv, a->off, n, 0); + + for (int i = 0; i < n; ++i) { + int o_za = (svl / n * sizeof(ARMVectorReg)) * i; + int o_zr = vec_full_reg_offset(s, a->zr * n + i); + + if (to_vec) { + tcg_gen_gvec_mov_var(MO_8, tcg_env, o_zr, t_za, o_za, svl, svl); + if (zero) { + tcg_gen_gvec_dup_imm_var(MO_8, t_za, o_za, svl, svl, 0); + } + } else { + tcg_gen_gvec_mov_var(MO_8, t_za, o_za, tcg_env, o_zr, svl, svl); + } + } + return true; +} + +TRANS_FEAT(MOVA_az2, aa64_sme2, do_mova_array_n, a, 2, false, false) +TRANS_FEAT(MOVA_az4, aa64_sme2, do_mova_array_n, a, 4, false, false) +TRANS_FEAT(MOVA_za2, aa64_sme2, do_mova_array_n, a, 2, true, false) +TRANS_FEAT(MOVA_za4, aa64_sme2, do_mova_array_n, a, 4, true, false) + +TRANS_FEAT(MOVAZ_za2, aa64_sme2p1, do_mova_array_n, a, 2, true, true) +TRANS_FEAT(MOVAZ_za4, aa64_sme2p1, do_mova_array_n, a, 4, true, true) + +static bool do_movt(DisasContext *s, arg_MOVT_rzt *a, + void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long)) +{ + if (sme2_zt0_enabled_check(s)) { + func(cpu_reg(s, a->rt), tcg_env, + offsetof(CPUARMState, za_state.zt0) + a->off * 8); + } + return true; +} + +TRANS_FEAT(MOVT_rzt, aa64_sme2, do_movt, a, tcg_gen_ld_i64) +TRANS_FEAT(MOVT_ztr, aa64_sme2, do_movt, a, tcg_gen_st_i64) + static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) { - typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i64); /* * Indexed by [esz][be][v][mte][st], which is (except for load/store) @@ -214,7 +415,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) TCGv_ptr t_za, t_pg; TCGv_i64 addr; - uint32_t desc; + uint64_t desc; bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; @@ -225,7 +426,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) return true; } - t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, a->off, 1, 0, a->v); t_pg = pred_full_reg_ptr(s, a->pg); addr = tcg_temp_new_i64(); @@ -239,32 +440,41 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) desc = make_svemte_desc(s, streaming_vec_reg_size(s), 1, a->esz, a->st, 0); fns[a->esz][be][a->v][mte][a->st](tcg_env, t_za, t_pg, addr, - tcg_constant_i32(desc)); + tcg_constant_i64(desc)); return true; } -typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int); +typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int, MemOp); static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) { - int svl = streaming_vec_reg_size(s); - int imm = a->imm; - TCGv_ptr base; + if (sme_za_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int imm = a->imm; + TCGv_ptr base = get_zarray(s, a->rv, imm, 1, 0); - if (!sme_za_enabled_check(s)) { - return true; + fn(s, base, 0, svl, a->rn, imm * svl, + s->align_mem ? MO_ALIGN_16 : MO_UNALN); } - - /* ZA[n] equates to ZA0H.B[n]. */ - base = get_tile_rowcol(s, MO_8, a->rv, imm, false); - - fn(s, base, 0, svl, a->rn, imm * svl); return true; } TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) +static bool do_ldst_zt0(DisasContext *s, arg_ldstzt0 *a, GenLdStR *fn) +{ + if (sme2_zt0_enabled_check(s)) { + fn(s, tcg_env, offsetof(CPUARMState, za_state.zt0), + sizeof_field(CPUARMState, za_state.zt0), a->rn, 0, + s->align_mem ? MO_ALIGN_16 : MO_UNALN); + } + return true; +} + +TRANS_FEAT(LDR_zt0, aa64_sme2, do_ldst_zt0, a, gen_sve_ldr) +TRANS_FEAT(STR_zt0, aa64_sme2, do_ldst_zt0, a, gen_sve_str) + static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, gen_helper_gvec_4 *fn) { @@ -316,7 +526,7 @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, gen_helper_gvec_5_ptr *fn) { int svl = streaming_vec_reg_size(s); - uint32_t desc = simd_desc(svl, svl, a->sub); + uint32_t desc = simd_desc(svl, svl, 0); TCGv_ptr za, zn, zm, pn, pm, fpst; if (!sme_smza_enabled_check(s)) { @@ -338,7 +548,7 @@ static bool do_outprod_env(DisasContext *s, arg_op *a, MemOp esz, gen_helper_gvec_5_ptr *fn) { int svl = streaming_vec_reg_size(s); - uint32_t desc = simd_desc(svl, svl, a->sub); + uint32_t desc = simd_desc(svl, svl, 0); TCGv_ptr za, zn, zm, pn, pm; if (!sme_smza_enabled_check(s)) { @@ -355,15 +565,32 @@ static bool do_outprod_env(DisasContext *s, arg_op *a, MemOp esz, return true; } -TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_env, a, - MO_32, gen_helper_sme_fmopa_h) -TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, - MO_32, FPST_FPCR, gen_helper_sme_fmopa_s) -TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, - MO_64, FPST_FPCR, gen_helper_sme_fmopa_d) +TRANS_FEAT(FMOPA_w_h, aa64_sme, do_outprod_env, a, MO_32, + !a->sub ? gen_helper_sme_fmopa_w_h + : !s->fpcr_ah ? gen_helper_sme_fmops_w_h + : gen_helper_sme_ah_fmops_w_h) +TRANS_FEAT(FMOPA_h, aa64_sme_f16f16, do_outprod_fpst, a, MO_16, FPST_ZA_F16, + !a->sub ? gen_helper_sme_fmopa_h + : !s->fpcr_ah ? gen_helper_sme_fmops_h + : gen_helper_sme_ah_fmops_h) +TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, FPST_ZA, + !a->sub ? gen_helper_sme_fmopa_s + : !s->fpcr_ah ? gen_helper_sme_fmops_s + : gen_helper_sme_ah_fmops_s) +TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, FPST_ZA, + !a->sub ? gen_helper_sme_fmopa_d + : !s->fpcr_ah ? gen_helper_sme_fmops_d + : gen_helper_sme_ah_fmops_d) + +TRANS_FEAT(BFMOPA, aa64_sme_b16b16, do_outprod_fpst, a, MO_16, FPST_ZA, + !a->sub ? gen_helper_sme_bfmopa + : !s->fpcr_ah ? gen_helper_sme_bfmops + : gen_helper_sme_ah_bfmops) -/* TODO: FEAT_EBF16 */ -TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) +TRANS_FEAT(BFMOPA_w, aa64_sme, do_outprod_env, a, MO_32, + !a->sub ? gen_helper_sme_bfmopa_w + : !s->fpcr_ah ? gen_helper_sme_bfmops_w + : gen_helper_sme_ah_bfmops_w) TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s) TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s) @@ -374,3 +601,1173 @@ TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_ TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d) TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d) TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d) + +TRANS_FEAT(BMOPA, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_bmopa_s) +TRANS_FEAT(SMOPA2_s, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_smopa2_s) +TRANS_FEAT(UMOPA2_s, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_umopa2_s) + +static bool do_z2z_n1(DisasContext *s, arg_z2z_en *a, GVecGen3Fn *fn) +{ + int esz, dn, vsz, mofs, n; + bool overlap = false; + + if (!sme_sm_enabled_check(s)) { + return true; + } + + esz = a->esz; + n = a->n; + dn = a->zdn; + mofs = vec_full_reg_offset(s, a->zm); + vsz = streaming_vec_reg_size(s); + + for (int i = 0; i < n; i++) { + int dofs = vec_full_reg_offset(s, dn + i); + if (dofs == mofs) { + overlap = true; + } else { + fn(esz, dofs, dofs, mofs, vsz, vsz); + } + } + if (overlap) { + fn(esz, mofs, mofs, mofs, vsz, vsz); + } + return true; +} + +static void gen_sme2_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_3 * const fns[] = { + gen_helper_gvec_srshl_b, gen_helper_sme2_srshl_h, + gen_helper_sme2_srshl_s, gen_helper_sme2_srshl_d, + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]); +} + +static void gen_sme2_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static gen_helper_gvec_3 * const fns[] = { + gen_helper_gvec_urshl_b, gen_helper_sme2_urshl_h, + gen_helper_sme2_urshl_s, gen_helper_sme2_urshl_d, + }; + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]); +} + +TRANS_FEAT(ADD_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_add) +TRANS_FEAT(SMAX_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_smax) +TRANS_FEAT(SMIN_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_smin) +TRANS_FEAT(UMAX_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_umax) +TRANS_FEAT(UMIN_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_umin) +TRANS_FEAT(SRSHL_n1, aa64_sme2, do_z2z_n1, a, gen_sme2_srshl) +TRANS_FEAT(URSHL_n1, aa64_sme2, do_z2z_n1, a, gen_sme2_urshl) +TRANS_FEAT(SQDMULH_n1, aa64_sme2, do_z2z_n1, a, gen_gvec_sve2_sqdmulh) + +static bool do_z2z_nn(DisasContext *s, arg_z2z_en *a, GVecGen3Fn *fn) +{ + int esz, dn, dm, vsz, n; + + if (!sme_sm_enabled_check(s)) { + return true; + } + + esz = a->esz; + n = a->n; + dn = a->zdn; + dm = a->zm; + vsz = streaming_vec_reg_size(s); + + for (int i = 0; i < n; i++) { + int dofs = vec_full_reg_offset(s, dn + i); + int mofs = vec_full_reg_offset(s, dm + i); + + fn(esz, dofs, dofs, mofs, vsz, vsz); + } + return true; +} + +TRANS_FEAT(SMAX_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_smax) +TRANS_FEAT(SMIN_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_smin) +TRANS_FEAT(UMAX_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_umax) +TRANS_FEAT(UMIN_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_umin) +TRANS_FEAT(SRSHL_nn, aa64_sme2, do_z2z_nn, a, gen_sme2_srshl) +TRANS_FEAT(URSHL_nn, aa64_sme2, do_z2z_nn, a, gen_sme2_urshl) +TRANS_FEAT(SQDMULH_nn, aa64_sme2, do_z2z_nn, a, gen_gvec_sve2_sqdmulh) + +static bool do_z2z_n1_fpst(DisasContext *s, arg_z2z_en *a, + gen_helper_gvec_3_ptr * const fns[4]) +{ + int esz = a->esz, n, dn, vsz, mofs; + bool overlap = false; + gen_helper_gvec_3_ptr *fn; + TCGv_ptr fpst; + + /* These insns use MO_8 to encode BFloat16. */ + if (esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) { + return false; + } + if (!sme_sm_enabled_check(s)) { + return true; + } + + fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + fn = fns[esz]; + n = a->n; + dn = a->zdn; + mofs = vec_full_reg_offset(s, a->zm); + vsz = streaming_vec_reg_size(s); + + for (int i = 0; i < n; i++) { + int dofs = vec_full_reg_offset(s, dn + i); + if (dofs == mofs) { + overlap = true; + } else { + tcg_gen_gvec_3_ptr(dofs, dofs, mofs, fpst, vsz, vsz, 0, fn); + } + } + if (overlap) { + tcg_gen_gvec_3_ptr(mofs, mofs, mofs, fpst, vsz, vsz, 0, fn); + } + return true; +} + +static bool do_z2z_nn_fpst(DisasContext *s, arg_z2z_en *a, + gen_helper_gvec_3_ptr * const fns[4]) +{ + int esz = a->esz, n, dn, dm, vsz; + gen_helper_gvec_3_ptr *fn; + TCGv_ptr fpst; + + if (esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) { + return false; + } + if (!sme_sm_enabled_check(s)) { + return true; + } + + fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64); + fn = fns[esz]; + n = a->n; + dn = a->zdn; + dm = a->zm; + vsz = streaming_vec_reg_size(s); + + for (int i = 0; i < n; i++) { + int dofs = vec_full_reg_offset(s, dn + i); + int mofs = vec_full_reg_offset(s, dm + i); + + tcg_gen_gvec_3_ptr(dofs, dofs, mofs, fpst, vsz, vsz, 0, fn); + } + return true; +} + +static gen_helper_gvec_3_ptr * const f_vector_fmax[2][4] = { + { gen_helper_gvec_fmax_b16, + gen_helper_gvec_fmax_h, + gen_helper_gvec_fmax_s, + gen_helper_gvec_fmax_d }, + { gen_helper_gvec_ah_fmax_b16, + gen_helper_gvec_ah_fmax_h, + gen_helper_gvec_ah_fmax_s, + gen_helper_gvec_ah_fmax_d }, +}; +TRANS_FEAT(FMAX_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmax[s->fpcr_ah]) +TRANS_FEAT(FMAX_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmax[s->fpcr_ah]) + +static gen_helper_gvec_3_ptr * const f_vector_fmin[2][4] = { + { gen_helper_gvec_fmin_b16, + gen_helper_gvec_fmin_h, + gen_helper_gvec_fmin_s, + gen_helper_gvec_fmin_d }, + { gen_helper_gvec_ah_fmin_b16, + gen_helper_gvec_ah_fmin_h, + gen_helper_gvec_ah_fmin_s, + gen_helper_gvec_ah_fmin_d }, +}; +TRANS_FEAT(FMIN_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmin[s->fpcr_ah]) +TRANS_FEAT(FMIN_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmin[s->fpcr_ah]) + +static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[4] = { + gen_helper_gvec_fmaxnum_b16, + gen_helper_gvec_fmaxnum_h, + gen_helper_gvec_fmaxnum_s, + gen_helper_gvec_fmaxnum_d, +}; +TRANS_FEAT(FMAXNM_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmaxnm) +TRANS_FEAT(FMAXNM_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmaxnm) + +static gen_helper_gvec_3_ptr * const f_vector_fminnm[4] = { + gen_helper_gvec_fminnum_b16, + gen_helper_gvec_fminnum_h, + gen_helper_gvec_fminnum_s, + gen_helper_gvec_fminnum_d, +}; +TRANS_FEAT(FMINNM_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fminnm) +TRANS_FEAT(FMINNM_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fminnm) + +/* Add/Sub vector Z[m] to each Z[n*N] with result in ZA[d*N]. */ +static bool do_azz_n1(DisasContext *s, arg_azz_n *a, int esz, + GVecGen3FnVar *fn) +{ + TCGv_ptr t_za; + int svl, n, o_zm; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + n = a->n; + t_za = get_zarray(s, a->rv, a->off, n, 0); + o_zm = vec_full_reg_offset(s, a->zm); + svl = streaming_vec_reg_size(s); + + for (int i = 0; i < n; ++i) { + int o_za = (svl / n * sizeof(ARMVectorReg)) * i; + int o_zn = vec_full_reg_offset(s, (a->zn + i) % 32); + + fn(esz, t_za, o_za, tcg_env, o_zn, tcg_env, o_zm, svl, svl); + } + return true; +} + +TRANS_FEAT(ADD_azz_n1_s, aa64_sme2, do_azz_n1, a, MO_32, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_azz_n1_s, aa64_sme2, do_azz_n1, a, MO_32, tcg_gen_gvec_sub_var) +TRANS_FEAT(ADD_azz_n1_d, aa64_sme2_i16i64, do_azz_n1, a, MO_64, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_azz_n1_d, aa64_sme2_i16i64, do_azz_n1, a, MO_64, tcg_gen_gvec_sub_var) + +/* Add/Sub each vector Z[m*N] to each Z[n*N] with result in ZA[d*N]. */ +static bool do_azz_nn(DisasContext *s, arg_azz_n *a, int esz, + GVecGen3FnVar *fn) +{ + TCGv_ptr t_za; + int svl, n; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + n = a->n; + t_za = get_zarray(s, a->rv, a->off, n, 1); + svl = streaming_vec_reg_size(s); + + for (int i = 0; i < n; ++i) { + int o_za = (svl / n * sizeof(ARMVectorReg)) * i; + int o_zn = vec_full_reg_offset(s, a->zn + i); + int o_zm = vec_full_reg_offset(s, a->zm + i); + + fn(esz, t_za, o_za, tcg_env, o_zn, tcg_env, o_zm, svl, svl); + } + return true; +} + +TRANS_FEAT(ADD_azz_nn_s, aa64_sme2, do_azz_nn, a, MO_32, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_azz_nn_s, aa64_sme2, do_azz_nn, a, MO_32, tcg_gen_gvec_sub_var) +TRANS_FEAT(ADD_azz_nn_d, aa64_sme2_i16i64, do_azz_nn, a, MO_64, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_azz_nn_d, aa64_sme2_i16i64, do_azz_nn, a, MO_64, tcg_gen_gvec_sub_var) + +/* Add/Sub each ZA[d*N] += Z[m*N] */ +static bool do_aaz(DisasContext *s, arg_az_n *a, int esz, GVecGen3FnVar *fn) +{ + TCGv_ptr t_za; + int svl, n; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + n = a->n; + t_za = get_zarray(s, a->rv, a->off, n, 0); + svl = streaming_vec_reg_size(s); + + for (int i = 0; i < n; ++i) { + int o_za = (svl / n * sizeof(ARMVectorReg)) * i; + int o_zm = vec_full_reg_offset(s, a->zm + i); + + fn(esz, t_za, o_za, t_za, o_za, tcg_env, o_zm, svl, svl); + } + return true; +} + +TRANS_FEAT(ADD_aaz_s, aa64_sme2, do_aaz, a, MO_32, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_aaz_s, aa64_sme2, do_aaz, a, MO_32, tcg_gen_gvec_sub_var) +TRANS_FEAT(ADD_aaz_d, aa64_sme2_i16i64, do_aaz, a, MO_64, tcg_gen_gvec_add_var) +TRANS_FEAT(SUB_aaz_d, aa64_sme2_i16i64, do_aaz, a, MO_64, tcg_gen_gvec_sub_var) + +/* + * Expand array multi-vector single (n1), array multi-vector (nn), + * and array multi-vector indexed (nx), for floating-point accumulate. + * multi: true for nn, false for n1. + * fpst: >= 0 to set ptr argument for FPST_*, < 0 for ENV. + * data: stuff for simd_data, including any index. + */ +#define FPST_ENV -1 + +static bool do_azz_fp(DisasContext *s, int nreg, int nsel, + int rv, int off, int zn, int zm, + int data, int shsel, bool multi, int fpst, + gen_helper_gvec_3_ptr *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int vstride = svl / nreg; + TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel); + TCGv_ptr t, ptr; + + if (fpst >= 0) { + ptr = fpstatus_ptr(fpst); + } else { + ptr = tcg_env; + } + t = tcg_temp_new_ptr(); + + for (int r = 0; r < nreg; ++r) { + TCGv_ptr t_zn = vec_full_reg_ptr(s, zn); + TCGv_ptr t_zm = vec_full_reg_ptr(s, zm); + + for (int i = 0; i < nsel; ++i) { + int o_za = (r * vstride + i) * sizeof(ARMVectorReg); + int desc = simd_desc(svl, svl, data | (i << shsel)); + + tcg_gen_addi_ptr(t, t_za, o_za); + fn(t, t_zn, t_zm, ptr, tcg_constant_i32(desc)); + } + + /* + * For multiple-and-single vectors, Zn may wrap. + * For multiple vectors, both Zn and Zm are aligned. + */ + zn = (zn + 1) % 32; + zm += multi; + } + } + return true; +} + +static bool do_azz_acc_fp(DisasContext *s, int nreg, int nsel, + int rv, int off, int zn, int zm, + int data, int shsel, bool multi, int fpst, + gen_helper_gvec_4_ptr *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int vstride = svl / nreg; + TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel); + TCGv_ptr t, ptr; + + if (fpst >= 0) { + ptr = fpstatus_ptr(fpst); + } else { + ptr = tcg_env; + } + t = tcg_temp_new_ptr(); + + for (int r = 0; r < nreg; ++r) { + TCGv_ptr t_zn = vec_full_reg_ptr(s, zn); + TCGv_ptr t_zm = vec_full_reg_ptr(s, zm); + + for (int i = 0; i < nsel; ++i) { + int o_za = (r * vstride + i) * sizeof(ARMVectorReg); + int desc = simd_desc(svl, svl, data | (i << shsel)); + + tcg_gen_addi_ptr(t, t_za, o_za); + fn(t, t_zn, t_zm, t, ptr, tcg_constant_i32(desc)); + } + + /* + * For multiple-and-single vectors, Zn may wrap. + * For multiple vectors, both Zn and Zm are aligned. + */ + zn = (zn + 1) % 32; + zm += multi; + } + } + return true; +} + +static bool do_fmlal(DisasContext *s, arg_azz_n *a, bool sub, bool multi) +{ + return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + (1 << 2) | sub, 1, + multi, FPST_ENV, gen_helper_sve2_fmlal_zzzw_s); +} + +TRANS_FEAT(FMLAL_n1, aa64_sme2, do_fmlal, a, false, false) +TRANS_FEAT(FMLSL_n1, aa64_sme2, do_fmlal, a, true, false) +TRANS_FEAT(FMLAL_nn, aa64_sme2, do_fmlal, a, false, true) +TRANS_FEAT(FMLSL_nn, aa64_sme2, do_fmlal, a, true, true) + +static bool do_fmlal_nx(DisasContext *s, arg_azx_n *a, bool sub) +{ + return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + (a->idx << 3) | (1 << 2) | sub, 1, + false, FPST_ENV, gen_helper_sve2_fmlal_zzxw_s); +} + +TRANS_FEAT(FMLAL_nx, aa64_sme2, do_fmlal_nx, a, false) +TRANS_FEAT(FMLSL_nx, aa64_sme2, do_fmlal_nx, a, true) + +static bool do_bfmlal(DisasContext *s, arg_azz_n *a, bool sub, bool multi) +{ + return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + 0, 0, multi, FPST_ZA, + (!sub ? gen_helper_gvec_bfmlal + : s->fpcr_ah ? gen_helper_gvec_ah_bfmlsl + : gen_helper_gvec_bfmlsl)); +} + +TRANS_FEAT(BFMLAL_n1, aa64_sme2, do_bfmlal, a, false, false) +TRANS_FEAT(BFMLSL_n1, aa64_sme2, do_bfmlal, a, true, false) +TRANS_FEAT(BFMLAL_nn, aa64_sme2, do_bfmlal, a, false, true) +TRANS_FEAT(BFMLSL_nn, aa64_sme2, do_bfmlal, a, true, true) + +static bool do_bfmlal_nx(DisasContext *s, arg_azx_n *a, bool sub) +{ + return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + a->idx << 1, 0, false, FPST_ZA, + !sub ? gen_helper_gvec_bfmlal_idx + : s->fpcr_ah ? gen_helper_gvec_ah_bfmlsl_idx + : gen_helper_gvec_bfmlsl_idx); +} + +TRANS_FEAT(BFMLAL_nx, aa64_sme2, do_bfmlal_nx, a, false) +TRANS_FEAT(BFMLSL_nx, aa64_sme2, do_bfmlal_nx, a, true) + +static bool do_fdot(DisasContext *s, arg_azz_n *a, bool multi) +{ + return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, 1, 0, + multi, FPST_ENV, gen_helper_sme2_fdot_h); +} + +TRANS_FEAT(FDOT_n1, aa64_sme2, do_fdot, a, false) +TRANS_FEAT(FDOT_nn, aa64_sme2, do_fdot, a, true) + +static bool do_fdot_nx(DisasContext *s, arg_azx_n *a) +{ + return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, + a->idx | (1 << 2), 0, false, FPST_ENV, + gen_helper_sme2_fdot_idx_h); +} + +TRANS_FEAT(FDOT_nx, aa64_sme2, do_fdot_nx, a) + +static bool do_bfdot(DisasContext *s, arg_azz_n *a, bool multi) +{ + return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, 0, 0, + multi, FPST_ENV, gen_helper_gvec_bfdot); +} + +TRANS_FEAT(BFDOT_n1, aa64_sme2, do_bfdot, a, false) +TRANS_FEAT(BFDOT_nn, aa64_sme2, do_bfdot, a, true) + +static bool do_bfdot_nx(DisasContext *s, arg_azx_n *a) +{ + return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, a->idx, 0, + false, FPST_ENV, gen_helper_gvec_bfdot_idx); +} + +TRANS_FEAT(BFDOT_nx, aa64_sme2, do_bfdot_nx, a) + +static bool do_vdot(DisasContext *s, arg_azx_n *a, gen_helper_gvec_4_ptr *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int vstride = svl / 2; + TCGv_ptr t_za = get_zarray(s, a->rv, a->off, 2, 1); + TCGv_ptr t_zn = vec_full_reg_ptr(s, a->zn); + TCGv_ptr t_zm = vec_full_reg_ptr(s, a->zm); + TCGv_ptr t = tcg_temp_new_ptr(); + + for (int i = 0; i < 2; ++i) { + int o_za = i * vstride * sizeof(ARMVectorReg); + int desc = simd_desc(svl, svl, a->idx | (i << 2)); + + tcg_gen_addi_ptr(t, t_za, o_za); + fn(t, t_zn, t_zm, t, tcg_env, tcg_constant_i32(desc)); + } + } + return true; +} + +TRANS_FEAT(FVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_fvdot_idx_h) +TRANS_FEAT(BFVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_bfvdot_idx) + +static bool do_fmla(DisasContext *s, arg_azz_n *a, bool multi, + ARMFPStatusFlavour fpst, gen_helper_gvec_3_ptr *fn) +{ + return do_azz_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, + 0, 0, multi, fpst, fn); +} + +TRANS_FEAT(FMLA_n1_h, aa64_sme_f16f16, do_fmla, a, false, FPST_ZA_F16, + gen_helper_gvec_vfma_h) +TRANS_FEAT(FMLS_n1_h, aa64_sme_f16f16, do_fmla, a, false, FPST_ZA_F16, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_h : gen_helper_gvec_vfms_h) +TRANS_FEAT(FMLA_nn_h, aa64_sme_f16f16, do_fmla, a, true, FPST_ZA_F16, + gen_helper_gvec_vfma_h) +TRANS_FEAT(FMLS_nn_h, aa64_sme_f16f16, do_fmla, a, true, FPST_ZA_F16, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_h : gen_helper_gvec_vfms_h) + +TRANS_FEAT(FMLA_n1_s, aa64_sme2, do_fmla, a, false, FPST_ZA, + gen_helper_gvec_vfma_s) +TRANS_FEAT(FMLS_n1_s, aa64_sme2, do_fmla, a, false, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_s : gen_helper_gvec_vfms_s) +TRANS_FEAT(FMLA_nn_s, aa64_sme2, do_fmla, a, true, FPST_ZA, + gen_helper_gvec_vfma_s) +TRANS_FEAT(FMLS_nn_s, aa64_sme2, do_fmla, a, true, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_s : gen_helper_gvec_vfms_s) + +TRANS_FEAT(FMLA_n1_d, aa64_sme2_f64f64, do_fmla, a, false, FPST_ZA, + gen_helper_gvec_vfma_d) +TRANS_FEAT(FMLS_n1_d, aa64_sme2_f64f64, do_fmla, a, false, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_d : gen_helper_gvec_vfms_d) +TRANS_FEAT(FMLA_nn_d, aa64_sme2_f64f64, do_fmla, a, true, FPST_ZA, + gen_helper_gvec_vfma_d) +TRANS_FEAT(FMLS_nn_d, aa64_sme2_f64f64, do_fmla, a, true, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_vfms_d : gen_helper_gvec_vfms_d) + +TRANS_FEAT(BFMLA_n1, aa64_sme_b16b16, do_fmla, a, false, FPST_ZA, + gen_helper_gvec_bfmla) +TRANS_FEAT(BFMLS_n1, aa64_sme_b16b16, do_fmla, a, false, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_bfmls : gen_helper_gvec_bfmls) +TRANS_FEAT(BFMLA_nn, aa64_sme_b16b16, do_fmla, a, true, FPST_ZA, + gen_helper_gvec_bfmla) +TRANS_FEAT(BFMLS_nn, aa64_sme_b16b16, do_fmla, a, true, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_bfmls : gen_helper_gvec_bfmls) + +static bool do_fmla_nx(DisasContext *s, arg_azx_n *a, + ARMFPStatusFlavour fpst, gen_helper_gvec_4_ptr *fn) +{ + return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, + a->idx, 0, false, fpst, fn); +} + +TRANS_FEAT(FMLA_nx_h, aa64_sme_f16f16, do_fmla_nx, a, FPST_ZA_F16, + gen_helper_gvec_fmla_idx_h) +TRANS_FEAT(FMLS_nx_h, aa64_sme_f16f16, do_fmla_nx, a, FPST_ZA_F16, + s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_h : gen_helper_gvec_fmls_idx_h) +TRANS_FEAT(FMLA_nx_s, aa64_sme2, do_fmla_nx, a, FPST_ZA, + gen_helper_gvec_fmla_idx_s) +TRANS_FEAT(FMLS_nx_s, aa64_sme2, do_fmla_nx, a, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_s : gen_helper_gvec_fmls_idx_s) +TRANS_FEAT(FMLA_nx_d, aa64_sme2_f64f64, do_fmla_nx, a, FPST_ZA, + gen_helper_gvec_fmla_idx_d) +TRANS_FEAT(FMLS_nx_d, aa64_sme2_f64f64, do_fmla_nx, a, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_d : gen_helper_gvec_fmls_idx_d) + +TRANS_FEAT(BFMLA_nx, aa64_sme_b16b16, do_fmla_nx, a, FPST_ZA, + gen_helper_gvec_bfmla_idx) +TRANS_FEAT(BFMLS_nx, aa64_sme_b16b16, do_fmla_nx, a, FPST_ZA, + s->fpcr_ah ? gen_helper_gvec_ah_bfmls_idx : gen_helper_gvec_bfmls_idx) + +static bool do_faddsub(DisasContext *s, arg_az_n *a, ARMFPStatusFlavour fpst, + gen_helper_gvec_3_ptr *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int n = a->n; + int zm = a->zm; + int vstride = svl / n; + TCGv_ptr t_za = get_zarray(s, a->rv, a->off, n, 0); + TCGv_ptr ptr = fpstatus_ptr(fpst); + TCGv_ptr t = tcg_temp_new_ptr(); + + for (int r = 0; r < n; ++r) { + TCGv_ptr t_zm = vec_full_reg_ptr(s, zm + r); + int o_za = r * vstride * sizeof(ARMVectorReg); + int desc = simd_desc(svl, svl, 0); + + tcg_gen_addi_ptr(t, t_za, o_za); + fn(t, t, t_zm, ptr, tcg_constant_i32(desc)); + } + } + return true; +} + +TRANS_FEAT(FADD_nn_h, aa64_sme_f16f16, do_faddsub, a, + FPST_ZA_F16, gen_helper_gvec_fadd_h) +TRANS_FEAT(FSUB_nn_h, aa64_sme_f16f16, do_faddsub, a, + FPST_ZA_F16, gen_helper_gvec_fsub_h) + +TRANS_FEAT(FADD_nn_s, aa64_sme2, do_faddsub, a, + FPST_ZA, gen_helper_gvec_fadd_s) +TRANS_FEAT(FSUB_nn_s, aa64_sme2, do_faddsub, a, + FPST_ZA, gen_helper_gvec_fsub_s) + +TRANS_FEAT(FADD_nn_d, aa64_sme2_f64f64, do_faddsub, a, + FPST_ZA, gen_helper_gvec_fadd_d) +TRANS_FEAT(FSUB_nn_d, aa64_sme2_f64f64, do_faddsub, a, + FPST_ZA, gen_helper_gvec_fsub_d) + +TRANS_FEAT(BFADD_nn, aa64_sme_b16b16, do_faddsub, a, + FPST_ZA, gen_helper_gvec_bfadd) +TRANS_FEAT(BFSUB_nn, aa64_sme_b16b16, do_faddsub, a, + FPST_ZA, gen_helper_gvec_bfsub) + +/* + * Expand array multi-vector single (n1), array multi-vector (nn), + * and array multi-vector indexed (nx), for integer accumulate. + * multi: true for nn, false for n1. + * data: stuff for simd_data, including any index. + */ +static bool do_azz_acc(DisasContext *s, int nreg, int nsel, + int rv, int off, int zn, int zm, + int data, int shsel, bool multi, + gen_helper_gvec_4 *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + int vstride = svl / nreg; + TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel); + TCGv_ptr t = tcg_temp_new_ptr(); + + for (int r = 0; r < nreg; ++r) { + TCGv_ptr t_zn = vec_full_reg_ptr(s, zn); + TCGv_ptr t_zm = vec_full_reg_ptr(s, zm); + + for (int i = 0; i < nsel; ++i) { + int o_za = (r * vstride + i) * sizeof(ARMVectorReg); + int desc = simd_desc(svl, svl, data | (i << shsel)); + + tcg_gen_addi_ptr(t, t_za, o_za); + fn(t, t_zn, t_zm, t, tcg_constant_i32(desc)); + } + + /* + * For multiple-and-single vectors, Zn may wrap. + * For multiple vectors, both Zn and Zm are aligned. + */ + zn = (zn + 1) % 32; + zm += multi; + } + } + return true; +} + +static bool do_dot(DisasContext *s, arg_azz_n *a, bool multi, + gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 1, a->rv, a->off, a->zn, a->zm, + 0, 0, multi, fn); +} + +static void gen_helper_gvec_sudot_4b(TCGv_ptr d, TCGv_ptr n, TCGv_ptr m, + TCGv_ptr a, TCGv_i32 desc) +{ + gen_helper_gvec_usdot_4b(d, m, n, a, desc); +} + +TRANS_FEAT(USDOT_n1, aa64_sme2, do_dot, a, false, gen_helper_gvec_usdot_4b) +TRANS_FEAT(SUDOT_n1, aa64_sme2, do_dot, a, false, gen_helper_gvec_sudot_4b) +TRANS_FEAT(SDOT_n1_2h, aa64_sme2, do_dot, a, false, gen_helper_gvec_sdot_2h) +TRANS_FEAT(UDOT_n1_2h, aa64_sme2, do_dot, a, false, gen_helper_gvec_udot_2h) +TRANS_FEAT(SDOT_n1_4b, aa64_sme2, do_dot, a, false, gen_helper_gvec_sdot_4b) +TRANS_FEAT(UDOT_n1_4b, aa64_sme2, do_dot, a, false, gen_helper_gvec_udot_4b) +TRANS_FEAT(SDOT_n1_4h, aa64_sme2_i16i64, do_dot, a, false, gen_helper_gvec_sdot_4h) +TRANS_FEAT(UDOT_n1_4h, aa64_sme2_i16i64, do_dot, a, false, gen_helper_gvec_udot_4h) + +TRANS_FEAT(USDOT_nn, aa64_sme2, do_dot, a, true, gen_helper_gvec_usdot_4b) +TRANS_FEAT(SDOT_nn_2h, aa64_sme2, do_dot, a, true, gen_helper_gvec_sdot_2h) +TRANS_FEAT(UDOT_nn_2h, aa64_sme2, do_dot, a, true, gen_helper_gvec_udot_2h) +TRANS_FEAT(SDOT_nn_4b, aa64_sme2, do_dot, a, true, gen_helper_gvec_sdot_4b) +TRANS_FEAT(UDOT_nn_4b, aa64_sme2, do_dot, a, true, gen_helper_gvec_udot_4b) +TRANS_FEAT(SDOT_nn_4h, aa64_sme2_i16i64, do_dot, a, true, gen_helper_gvec_sdot_4h) +TRANS_FEAT(UDOT_nn_4h, aa64_sme2_i16i64, do_dot, a, true, gen_helper_gvec_udot_4h) + +static bool do_dot_nx(DisasContext *s, arg_azx_n *a, gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 1, a->rv, a->off, a->zn, a->zm, + a->idx, 0, false, fn); +} + +TRANS_FEAT(USDOT_nx, aa64_sme2, do_dot_nx, a, gen_helper_gvec_usdot_idx_4b) +TRANS_FEAT(SUDOT_nx, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sudot_idx_4b) +TRANS_FEAT(SDOT_nx_2h, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sdot_idx_2h) +TRANS_FEAT(UDOT_nx_2h, aa64_sme2, do_dot_nx, a, gen_helper_gvec_udot_idx_2h) +TRANS_FEAT(SDOT_nx_4b, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sdot_idx_4b) +TRANS_FEAT(UDOT_nx_4b, aa64_sme2, do_dot_nx, a, gen_helper_gvec_udot_idx_4b) +TRANS_FEAT(SDOT_nx_4h, aa64_sme2_i16i64, do_dot_nx, a, gen_helper_gvec_sdot_idx_4h) +TRANS_FEAT(UDOT_nx_4h, aa64_sme2_i16i64, do_dot_nx, a, gen_helper_gvec_udot_idx_4h) + +static bool do_vdot_nx(DisasContext *s, arg_azx_n *a, gen_helper_gvec_3 *fn) +{ + if (sme_smza_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + fn(get_zarray(s, a->rv, a->off, a->n, 0), + vec_full_reg_ptr(s, a->zn), + vec_full_reg_ptr(s, a->zm), + tcg_constant_i32(simd_desc(svl, svl, a->idx))); + } + return true; +} + +TRANS_FEAT(SVDOT_nx_2h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_2h) +TRANS_FEAT(SVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_4b) +TRANS_FEAT(SVDOT_nx_4h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_4h) + +TRANS_FEAT(UVDOT_nx_2h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_2h) +TRANS_FEAT(UVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_4b) +TRANS_FEAT(UVDOT_nx_4h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_4h) + +TRANS_FEAT(SUVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_suvdot_idx_4b) +TRANS_FEAT(USVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_usvdot_idx_4b) + +static bool do_smlal(DisasContext *s, arg_azz_n *a, bool multi, + gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + 0, 0, multi, fn); +} + +TRANS_FEAT(SMLAL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_smlal_zzzw_s) +TRANS_FEAT(SMLSL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_smlsl_zzzw_s) +TRANS_FEAT(UMLAL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_umlal_zzzw_s) +TRANS_FEAT(UMLSL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_umlsl_zzzw_s) + +TRANS_FEAT(SMLAL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_smlal_zzzw_s) +TRANS_FEAT(SMLSL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_smlsl_zzzw_s) +TRANS_FEAT(UMLAL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_umlal_zzzw_s) +TRANS_FEAT(UMLSL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_umlsl_zzzw_s) + +static bool do_smlal_nx(DisasContext *s, arg_azx_n *a, + gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 2, a->rv, a->off, a->zn, a->zm, + a->idx << 1, 0, false, fn); +} + +TRANS_FEAT(SMLAL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_smlal_idx_s) +TRANS_FEAT(SMLSL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_smlsl_idx_s) +TRANS_FEAT(UMLAL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_umlal_idx_s) +TRANS_FEAT(UMLSL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_umlsl_idx_s) + +static bool do_smlall(DisasContext *s, arg_azz_n *a, bool multi, + gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 4, a->rv, a->off, a->zn, a->zm, + 0, 0, multi, fn); +} + +static void gen_helper_sme2_sumlall_s(TCGv_ptr d, TCGv_ptr n, TCGv_ptr m, + TCGv_ptr a, TCGv_i32 desc) +{ + gen_helper_sme2_usmlall_s(d, m, n, a, desc); +} + +TRANS_FEAT(SMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_smlall_s) +TRANS_FEAT(SMLSLL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_smlsll_s) +TRANS_FEAT(UMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_umlall_s) +TRANS_FEAT(UMLSLL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_umlsll_s) +TRANS_FEAT(USMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_usmlall_s) +TRANS_FEAT(SUMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_sumlall_s) + +TRANS_FEAT(SMLALL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_smlall_d) +TRANS_FEAT(SMLSLL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_smlsll_d) +TRANS_FEAT(UMLALL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_umlall_d) +TRANS_FEAT(UMLSLL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_umlsll_d) + +TRANS_FEAT(SMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_smlall_s) +TRANS_FEAT(SMLSLL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_smlsll_s) +TRANS_FEAT(UMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_umlall_s) +TRANS_FEAT(UMLSLL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_umlsll_s) +TRANS_FEAT(USMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_usmlall_s) + +TRANS_FEAT(SMLALL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_smlall_d) +TRANS_FEAT(SMLSLL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_smlsll_d) +TRANS_FEAT(UMLALL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_umlall_d) +TRANS_FEAT(UMLSLL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_umlsll_d) + +static bool do_smlall_nx(DisasContext *s, arg_azx_n *a, + gen_helper_gvec_4 *fn) +{ + return do_azz_acc(s, a->n, 4, a->rv, a->off, a->zn, a->zm, + a->idx << 2, 0, false, fn); +} + +TRANS_FEAT(SMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_smlall_idx_s) +TRANS_FEAT(SMLSLL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_smlsll_idx_s) +TRANS_FEAT(UMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_umlall_idx_s) +TRANS_FEAT(UMLSLL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_umlsll_idx_s) +TRANS_FEAT(USMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_usmlall_idx_s) +TRANS_FEAT(SUMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_sumlall_idx_s) + +TRANS_FEAT(SMLALL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_smlall_idx_d) +TRANS_FEAT(SMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_smlsll_idx_d) +TRANS_FEAT(UMLALL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlall_idx_d) +TRANS_FEAT(UMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlsll_idx_d) + +static bool do_zz_fpst(DisasContext *s, arg_zz_n *a, int data, + ARMFPStatusFlavour type, gen_helper_gvec_2_ptr *fn) +{ + if (sme_sm_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + TCGv_ptr fpst = fpstatus_ptr(type); + + for (int i = 0, n = a->n; i < n; ++i) { + tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->zd + i), + vec_full_reg_offset(s, a->zn + i), + fpst, svl, svl, data, fn); + } + } + return true; +} + +TRANS_FEAT(BFCVT, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_bfcvt) +TRANS_FEAT(BFCVTN, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_bfcvtn) +TRANS_FEAT(FCVT_n, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_fcvt_n) +TRANS_FEAT(FCVTN, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_fcvtn) + +TRANS_FEAT(FCVT_w, aa64_sme_f16f16, do_zz_fpst, a, 0, + FPST_A64_F16, gen_helper_sme2_fcvt_w) +TRANS_FEAT(FCVTL, aa64_sme_f16f16, do_zz_fpst, a, 0, + FPST_A64_F16, gen_helper_sme2_fcvtl) + +TRANS_FEAT(FCVTZS, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_gvec_vcvt_rz_fs) +TRANS_FEAT(FCVTZU, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_gvec_vcvt_rz_fu) + +TRANS_FEAT(SCVTF, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_scvtf) +TRANS_FEAT(UCVTF, aa64_sme2, do_zz_fpst, a, 0, + FPST_A64, gen_helper_sme2_ucvtf) + +TRANS_FEAT(FRINTN, aa64_sme2, do_zz_fpst, a, float_round_nearest_even, + FPST_A64, gen_helper_gvec_vrint_rm_s) +TRANS_FEAT(FRINTP, aa64_sme2, do_zz_fpst, a, float_round_up, + FPST_A64, gen_helper_gvec_vrint_rm_s) +TRANS_FEAT(FRINTM, aa64_sme2, do_zz_fpst, a, float_round_down, + FPST_A64, gen_helper_gvec_vrint_rm_s) +TRANS_FEAT(FRINTA, aa64_sme2, do_zz_fpst, a, float_round_ties_away, + FPST_A64, gen_helper_gvec_vrint_rm_s) + +static bool do_zz(DisasContext *s, arg_zz_n *a, int data, + gen_helper_gvec_2 *fn) +{ + if (sme_sm_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + + for (int i = 0, n = a->n; i < n; ++i) { + tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd + i), + vec_full_reg_offset(s, a->zn + i), + svl, svl, data, fn); + } + } + return true; +} + +TRANS_FEAT(SQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sh) +TRANS_FEAT(UQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sh) +TRANS_FEAT(SQCVTU_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sh) + +TRANS_FEAT(SQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sb) +TRANS_FEAT(UQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sb) +TRANS_FEAT(SQCVTU_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sb) + +TRANS_FEAT(SQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_dh) +TRANS_FEAT(UQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_dh) +TRANS_FEAT(SQCVTU_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_dh) + +TRANS_FEAT(SQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_sb) +TRANS_FEAT(UQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_sb) +TRANS_FEAT(SQCVTUN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_sb) + +TRANS_FEAT(SQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_dh) +TRANS_FEAT(UQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_dh) +TRANS_FEAT(SQCVTUN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_dh) + +TRANS_FEAT(SUNPK_2bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_bh) +TRANS_FEAT(SUNPK_2hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_hs) +TRANS_FEAT(SUNPK_2sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_sd) + +TRANS_FEAT(SUNPK_4bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_bh) +TRANS_FEAT(SUNPK_4hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_hs) +TRANS_FEAT(SUNPK_4sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_sd) + +TRANS_FEAT(UUNPK_2bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_bh) +TRANS_FEAT(UUNPK_2hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_hs) +TRANS_FEAT(UUNPK_2sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_sd) + +TRANS_FEAT(UUNPK_4bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_bh) +TRANS_FEAT(UUNPK_4hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_hs) +TRANS_FEAT(UUNPK_4sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_sd) + +static bool do_zipuzp_4(DisasContext *s, arg_zz_e *a, + gen_helper_gvec_2 * const fn[5]) +{ + int bytes_per_op = 4 << a->esz; + + /* Both MO_64 and MO_128 can fail the size test. */ + if (s->max_svl < bytes_per_op) { + unallocated_encoding(s); + } else if (sme_sm_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + if (svl < bytes_per_op) { + unallocated_encoding(s); + } else { + tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + svl, svl, 0, fn[a->esz]); + } + } + return true; +} + +static gen_helper_gvec_2 * const zip4_fns[] = { + gen_helper_sme2_zip4_b, + gen_helper_sme2_zip4_h, + gen_helper_sme2_zip4_s, + gen_helper_sme2_zip4_d, + gen_helper_sme2_zip4_q, +}; +TRANS_FEAT(ZIP_4, aa64_sme2, do_zipuzp_4, a, zip4_fns) + +static gen_helper_gvec_2 * const uzp4_fns[] = { + gen_helper_sme2_uzp4_b, + gen_helper_sme2_uzp4_h, + gen_helper_sme2_uzp4_s, + gen_helper_sme2_uzp4_d, + gen_helper_sme2_uzp4_q, +}; +TRANS_FEAT(UZP_4, aa64_sme2, do_zipuzp_4, a, uzp4_fns) + +static bool do_zz_rshr(DisasContext *s, arg_rshr *a, gen_helper_gvec_2 *fn) +{ + if (sve_access_check(s)) { + int vl = vec_full_reg_size(s); + tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + vl, vl, a->shift, fn); + } + return true; +} + +TRANS_FEAT(SQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sh) +TRANS_FEAT(UQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sh) +TRANS_FEAT(SQRSHRU_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sh) + +TRANS_FEAT(SQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sb) +TRANS_FEAT(SQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_dh) +TRANS_FEAT(UQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sb) +TRANS_FEAT(UQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_dh) +TRANS_FEAT(SQRSHRU_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sb) +TRANS_FEAT(SQRSHRU_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_dh) + +TRANS_FEAT(SQRSHRN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_sqrshrn_sh) +TRANS_FEAT(UQRSHRN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_uqrshrn_sh) +TRANS_FEAT(SQRSHRUN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_sqrshrun_sh) + +TRANS_FEAT(SQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_sb) +TRANS_FEAT(SQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_dh) +TRANS_FEAT(UQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_sb) +TRANS_FEAT(UQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_dh) +TRANS_FEAT(SQRSHRUN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_sb) +TRANS_FEAT(SQRSHRUN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_dh) + +static bool do_zipuzp_2(DisasContext *s, arg_zzz_e *a, + gen_helper_gvec_3 * const fn[5]) +{ + int bytes_per_op = 2 << a->esz; + + /* MO_128 can fail the size test. */ + if (s->max_svl < bytes_per_op) { + unallocated_encoding(s); + } else if (sme_sm_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + if (svl < bytes_per_op) { + unallocated_encoding(s); + } else { + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + vec_full_reg_offset(s, a->zm), + svl, svl, 0, fn[a->esz]); + } + } + return true; +} + +static gen_helper_gvec_3 * const zip2_fns[] = { + gen_helper_sme2_zip2_b, + gen_helper_sme2_zip2_h, + gen_helper_sme2_zip2_s, + gen_helper_sme2_zip2_d, + gen_helper_sme2_zip2_q, +}; +TRANS_FEAT(ZIP_2, aa64_sme2, do_zipuzp_2, a, zip2_fns) + +static gen_helper_gvec_3 * const uzp2_fns[] = { + gen_helper_sme2_uzp2_b, + gen_helper_sme2_uzp2_h, + gen_helper_sme2_uzp2_s, + gen_helper_sme2_uzp2_d, + gen_helper_sme2_uzp2_q, +}; +TRANS_FEAT(UZP_2, aa64_sme2, do_zipuzp_2, a, uzp2_fns) + +static bool trans_FCLAMP(DisasContext *s, arg_zzz_en *a) +{ + static gen_helper_gvec_3_ptr * const fn[] = { + gen_helper_sme2_bfclamp, + gen_helper_sme2_fclamp_h, + gen_helper_sme2_fclamp_s, + gen_helper_sme2_fclamp_d, + }; + TCGv_ptr fpst; + int vl; + + if (!dc_isar_feature(aa64_sme2, s)) { + return false; + } + /* This insn uses MO_8 to encode BFloat16. */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) { + return false; + } + if (!sme_sm_enabled_check(s)) { + return true; + } + + fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); + vl = vec_full_reg_size(s); + + tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + vec_full_reg_offset(s, a->zm), + fpst, vl, vl, a->n, fn[a->esz]); + return true; +} + +static bool do_clamp(DisasContext *s, arg_zzz_en *a, + gen_helper_gvec_3 * const fn[4]) +{ + int vl; + + if (!dc_isar_feature(aa64_sme2, s)) { + return false; + } + if (!sme_sm_enabled_check(s)) { + return true; + } + + /* + * Clamp is just a min+max, easily supported by most host + * vector operations -- we already have such an expansion in + * translate-sve.c for a single output. + * TODO: Add support in gvec for multiple simultaneous output, + * and/or copy to temporary upon overlap. + */ + vl = vec_full_reg_size(s); + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + vec_full_reg_offset(s, a->zm), + vl, vl, a->n, fn[a->esz]); + return true; +} + +static gen_helper_gvec_3 * const sclamp_fns[] = { + gen_helper_sme2_sclamp_b, + gen_helper_sme2_sclamp_h, + gen_helper_sme2_sclamp_s, + gen_helper_sme2_sclamp_d, +}; +TRANS(SCLAMP, do_clamp, a, sclamp_fns) + +static gen_helper_gvec_3 * const uclamp_fns[] = { + gen_helper_sme2_uclamp_b, + gen_helper_sme2_uclamp_h, + gen_helper_sme2_uclamp_s, + gen_helper_sme2_uclamp_d, +}; +TRANS(UCLAMP, do_clamp, a, uclamp_fns) + +static bool trans_SEL(DisasContext *s, arg_SEL *a) +{ + typedef void sme_sel_fn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32); + static sme_sel_fn * const fns[4] = { + gen_helper_sme2_sel_b, gen_helper_sme2_sel_h, + gen_helper_sme2_sel_s, gen_helper_sme2_sel_d + }; + + if (!dc_isar_feature(aa64_sme2, s)) { + return false; + } + if (sme_sm_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, a->n); + TCGv_ptr t_d = tcg_temp_new_ptr(); + TCGv_ptr t_n = tcg_temp_new_ptr(); + TCGv_ptr t_m = tcg_temp_new_ptr(); + TCGv_i32 png = tcg_temp_new_i32(); + + tcg_gen_addi_ptr(t_d, tcg_env, vec_full_reg_offset(s, a->zd)); + tcg_gen_addi_ptr(t_n, tcg_env, vec_full_reg_offset(s, a->zn)); + tcg_gen_addi_ptr(t_m, tcg_env, vec_full_reg_offset(s, a->zm)); + + tcg_gen_ld16u_i32(png, tcg_env, pred_full_reg_offset(s, a->pg) + ^ (HOST_BIG_ENDIAN ? 6 : 0)); + + fns[a->esz](t_d, t_n, t_m, png, tcg_constant_i32(desc)); + } + return true; +} + +static bool do_lut(DisasContext *s, arg_lut *a, + gen_helper_gvec_2_ptr *fn, bool strided) +{ + if (sme_sm_enabled_check(s) && sme2_zt0_enabled_check(s)) { + int svl = streaming_vec_reg_size(s); + tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->zd), + vec_full_reg_offset(s, a->zn), + tcg_env, svl, svl, strided | (a->idx << 1), fn); + } + return true; +} + +TRANS_FEAT(LUTI2_c_1b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1b, false) +TRANS_FEAT(LUTI2_c_1h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1h, false) +TRANS_FEAT(LUTI2_c_1s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1s, false) + +TRANS_FEAT(LUTI2_c_2b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2b, false) +TRANS_FEAT(LUTI2_c_2h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2h, false) +TRANS_FEAT(LUTI2_c_2s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2s, false) + +TRANS_FEAT(LUTI2_c_4b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4b, false) +TRANS_FEAT(LUTI2_c_4h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4h, false) +TRANS_FEAT(LUTI2_c_4s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4s, false) + +TRANS_FEAT(LUTI4_c_1b, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1b, false) +TRANS_FEAT(LUTI4_c_1h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1h, false) +TRANS_FEAT(LUTI4_c_1s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1s, false) + +TRANS_FEAT(LUTI4_c_2b, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2b, false) +TRANS_FEAT(LUTI4_c_2h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2h, false) +TRANS_FEAT(LUTI4_c_2s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2s, false) + +TRANS_FEAT(LUTI4_c_4h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_4h, false) +TRANS_FEAT(LUTI4_c_4s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_4s, false) + +static bool do_lut_s4(DisasContext *s, arg_lut *a, gen_helper_gvec_2_ptr *fn) +{ + return !(a->zd & 0b01100) && do_lut(s, a, fn, true); +} + +static bool do_lut_s8(DisasContext *s, arg_lut *a, gen_helper_gvec_2_ptr *fn) +{ + return !(a->zd & 0b01000) && do_lut(s, a, fn, true); +} + +TRANS_FEAT(LUTI2_s_2b, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti2_2b) +TRANS_FEAT(LUTI2_s_2h, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti2_2h) + +TRANS_FEAT(LUTI2_s_4b, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti2_4b) +TRANS_FEAT(LUTI2_s_4h, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti2_4h) + +TRANS_FEAT(LUTI4_s_2b, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti4_2b) +TRANS_FEAT(LUTI4_s_2h, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti4_2h) + +TRANS_FEAT(LUTI4_s_4h, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti4_4h) diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index a72c2620960..07b827fa8e8 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -31,9 +31,9 @@ typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr, typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); -typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); +typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i64); typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr, - TCGv_ptr, TCGv_i64, TCGv_i32); + TCGv_ptr, TCGv_i64, TCGv_i64); /* * Helpers for extracting complex instruction fields. @@ -89,7 +89,7 @@ static inline int expand_imm_sh8u(DisasContext *s, int x) */ static inline int msz_dtype(DisasContext *s, int msz) { - static const uint8_t dtype[4] = { 0, 5, 10, 15 }; + static const uint8_t dtype[5] = { 0, 5, 10, 15, 18 }; return dtype[msz]; } @@ -137,11 +137,11 @@ static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, return true; } -static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, - arg_rr_esz *a, int data) +static bool gen_gvec_fpst_ah_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, + arg_rr_esz *a, int data) { return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + select_ah_fpst(s, a->esz)); } /* Invoke an out-of-line helper on 3 Zregs. */ @@ -190,8 +190,19 @@ static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, arg_rrr_esz *a, int data) { + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); +} + +static bool gen_gvec_fpst_ah_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, + arg_rrr_esz *a, int data) +{ + return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, + select_ah_fpst(s, a->esz)); } /* Invoke an out-of-line helper on 4 Zregs. */ @@ -252,6 +263,25 @@ static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, return ret; } +static bool gen_gvec_env_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + int rd, int rn, int rm, int ra, + int data) +{ + return gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, tcg_env); +} + +static bool gen_gvec_env_arg_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + arg_rrrr_esz *a, int data) +{ + return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); +} + +static bool gen_gvec_env_arg_zzxz(DisasContext *s, gen_helper_gvec_4_ptr *fn, + arg_rrxr_esz *a) +{ + return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); +} + /* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */ static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, int rd, int rn, int rm, int ra, int pg, @@ -377,8 +407,12 @@ static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, arg_rprr_esz *a) { + /* These insns use MO_8 to encode BFloat16. */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } /* Invoke a vector expander on two Zregs and an immediate. */ @@ -577,14 +611,8 @@ static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m, TCGv_vec k) { - if (TCG_TARGET_HAS_bitsel_vec) { - tcg_gen_not_vec(vece, n, n); - tcg_gen_bitsel_vec(vece, d, k, n, m); - } else { - tcg_gen_andc_vec(vece, n, k, n); - tcg_gen_andc_vec(vece, m, m, k); - tcg_gen_or_vec(vece, d, n, m); - } + tcg_gen_not_vec(vece, n, n); + tcg_gen_bitsel_vec(vece, d, k, n, m); } static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, @@ -609,7 +637,7 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) * = | ~(m | k) */ tcg_gen_and_i64(n, n, k); - if (TCG_TARGET_HAS_orc_i64) { + if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) { tcg_gen_or_i64(m, m, k); tcg_gen_orc_i64(d, n, m); } else { @@ -621,14 +649,8 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m, TCGv_vec k) { - if (TCG_TARGET_HAS_bitsel_vec) { - tcg_gen_not_vec(vece, m, m); - tcg_gen_bitsel_vec(vece, d, k, n, m); - } else { - tcg_gen_and_vec(vece, n, n, k); - tcg_gen_or_vec(vece, m, m, k); - tcg_gen_orc_vec(vece, d, n, m); - } + tcg_gen_not_vec(vece, m, m); + tcg_gen_bitsel_vec(vece, d, k, n, m); } static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, @@ -764,18 +786,31 @@ DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz) DO_ZPZ(ABS, aa64_sve, sve_abs) DO_ZPZ(NEG, aa64_sve, sve_neg) DO_ZPZ(RBIT, aa64_sve, sve_rbit) +DO_ZPZ(ORQV, aa64_sme2p1_or_sve2p1, sve2p1_orqv) +DO_ZPZ(EORQV, aa64_sme2p1_or_sve2p1, sve2p1_eorqv) +DO_ZPZ(ANDQV, aa64_sme2p1_or_sve2p1, sve2p1_andqv) static gen_helper_gvec_3 * const fabs_fns[4] = { NULL, gen_helper_sve_fabs_h, gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, }; -TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0) +static gen_helper_gvec_3 * const fabs_ah_fns[4] = { + NULL, gen_helper_sve_ah_fabs_h, + gen_helper_sve_ah_fabs_s, gen_helper_sve_ah_fabs_d, +}; +TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, + s->fpcr_ah ? fabs_ah_fns[a->esz] : fabs_fns[a->esz], a, 0) static gen_helper_gvec_3 * const fneg_fns[4] = { NULL, gen_helper_sve_fneg_h, gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, }; -TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0) +static gen_helper_gvec_3 * const fneg_ah_fns[4] = { + NULL, gen_helper_sve_ah_fneg_h, + gen_helper_sve_ah_fneg_s, gen_helper_sve_ah_fneg_d, +}; +TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, + s->fpcr_ah ? fneg_ah_fns[a->esz] : fneg_fns[a->esz], a, 0) static gen_helper_gvec_3 * const sxtb_fns[4] = { NULL, gen_helper_sve_sxtb_h, @@ -804,6 +839,41 @@ TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz, TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0) +static gen_helper_gvec_3 * const addqv_fns[4] = { + gen_helper_sve2p1_addqv_b, gen_helper_sve2p1_addqv_h, + gen_helper_sve2p1_addqv_s, gen_helper_sve2p1_addqv_d, +}; +TRANS_FEAT(ADDQV, aa64_sme2p1_or_sve2p1, + gen_gvec_ool_arg_zpz, addqv_fns[a->esz], a, 0) + +static gen_helper_gvec_3 * const smaxqv_fns[4] = { + gen_helper_sve2p1_smaxqv_b, gen_helper_sve2p1_smaxqv_h, + gen_helper_sve2p1_smaxqv_s, gen_helper_sve2p1_smaxqv_d, +}; +TRANS_FEAT(SMAXQV, aa64_sme2p1_or_sve2p1, + gen_gvec_ool_arg_zpz, smaxqv_fns[a->esz], a, 0) + +static gen_helper_gvec_3 * const sminqv_fns[4] = { + gen_helper_sve2p1_sminqv_b, gen_helper_sve2p1_sminqv_h, + gen_helper_sve2p1_sminqv_s, gen_helper_sve2p1_sminqv_d, +}; +TRANS_FEAT(SMINQV, aa64_sme2p1_or_sve2p1, + gen_gvec_ool_arg_zpz, sminqv_fns[a->esz], a, 0) + +static gen_helper_gvec_3 * const umaxqv_fns[4] = { + gen_helper_sve2p1_umaxqv_b, gen_helper_sve2p1_umaxqv_h, + gen_helper_sve2p1_umaxqv_s, gen_helper_sve2p1_umaxqv_d, +}; +TRANS_FEAT(UMAXQV, aa64_sme2p1_or_sve2p1, + gen_gvec_ool_arg_zpz, umaxqv_fns[a->esz], a, 0) + +static gen_helper_gvec_3 * const uminqv_fns[4] = { + gen_helper_sve2p1_uminqv_b, gen_helper_sve2p1_uminqv_h, + gen_helper_sve2p1_uminqv_s, gen_helper_sve2p1_uminqv_d, +}; +TRANS_FEAT(UMINQV, aa64_sme2p1_or_sve2p1, + gen_gvec_ool_arg_zpz, uminqv_fns[a->esz], a, 0) + /* *** SVE Integer Reduction Group */ @@ -1214,14 +1284,14 @@ static gen_helper_gvec_2 * const fexpa_fns[4] = { gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, }; TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, - fexpa_fns[a->esz], a->rd, a->rn, 0) + fexpa_fns[a->esz], a->rd, a->rn, s->fpcr_ah) static gen_helper_gvec_3 * const ftssel_fns[4] = { NULL, gen_helper_sve_ftssel_h, gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, }; TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, - ftssel_fns[a->esz], a, 0) + ftssel_fns[a->esz], a, s->fpcr_ah) /* *** SVE Predicate Logical Operations Group @@ -1655,6 +1725,22 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) +static bool trans_PTRUE_cnt(DisasContext *s, arg_PTRUE_cnt *a) +{ + if (!dc_isar_feature(aa64_sme2_or_sve2p1, s)) { + return false; + } + if (sve_access_check(s)) { + /* Canonical TRUE is 0 count, invert bit, plus element size. */ + int val = (1 << 15) | (1 << a->esz); + + /* Write val to the first uint64_t; clear all of the rest. */ + tcg_gen_gvec_dup_imm(MO_64, pred_full_reg_offset(s, a->rd), + 8, size_for_gvec(pred_full_reg_size(s)), val); + } + return true; +} + /* Note pat == 31 is #all, to set all elements. */ TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false) @@ -2124,6 +2210,55 @@ static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm) TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm) +static bool trans_EXTQ(DisasContext *s, arg_EXTQ *a) +{ + unsigned vl, dofs, sofs0, sofs1, sofs2, imm; + + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + imm = a->imm; + if (imm == 0) { + /* So far we never optimize Zdn with MOVPRFX, so zd = zn is a nop. */ + return true; + } + + vl = vec_full_reg_size(s); + dofs = vec_full_reg_offset(s, a->rd); + sofs2 = vec_full_reg_offset(s, a->rn); + + if (imm & 8) { + sofs0 = dofs + 8; + sofs1 = sofs2; + sofs2 += 8; + } else { + sofs0 = dofs; + sofs1 = dofs + 8; + } + imm = (imm & 7) << 3; + + for (unsigned i = 0; i < vl; i += 16) { + TCGv_i64 s0 = tcg_temp_new_i64(); + TCGv_i64 s1 = tcg_temp_new_i64(); + TCGv_i64 s2 = tcg_temp_new_i64(); + + tcg_gen_ld_i64(s0, tcg_env, sofs0 + i); + tcg_gen_ld_i64(s1, tcg_env, sofs1 + i); + tcg_gen_ld_i64(s2, tcg_env, sofs2 + i); + + tcg_gen_extract2_i64(s0, s0, s1, imm); + tcg_gen_extract2_i64(s1, s1, s2, imm); + + tcg_gen_st_i64(s0, tcg_env, dofs + i); + tcg_gen_st_i64(s1, tcg_env, dofs + i + 8); + } + return true; +} + /* *** SVE Permute - Unpredicated Group */ @@ -2171,6 +2306,27 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) return true; } +static bool trans_DUPQ(DisasContext *s, arg_DUPQ *a) +{ + unsigned vl, dofs, nofs; + + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vl = vec_full_reg_size(s); + dofs = vec_full_reg_offset(s, a->rd); + nofs = vec_reg_offset(s, a->rn, a->imm, a->esz); + + for (unsigned i = 0; i < vl; i += 16) { + tcg_gen_gvec_dup_mem(a->esz, dofs + i, nofs + i, 16, 16); + } + return true; +} + static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) { typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); @@ -2232,12 +2388,124 @@ static gen_helper_gvec_4 * const sve2_tbl_fns[4] = { TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz], a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0) +static gen_helper_gvec_3 * const tblq_fns[4] = { + gen_helper_sve2p1_tblq_b, gen_helper_sve2p1_tblq_h, + gen_helper_sve2p1_tblq_s, gen_helper_sve2p1_tblq_d +}; +TRANS_FEAT(TBLQ, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + tblq_fns[a->esz], a, 0) + static gen_helper_gvec_3 * const tbx_fns[4] = { gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d }; TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0) +static gen_helper_gvec_3 * const tbxq_fns[4] = { + gen_helper_sve2p1_tbxq_b, gen_helper_sve2p1_tbxq_h, + gen_helper_sve2p1_tbxq_s, gen_helper_sve2p1_tbxq_d +}; +TRANS_FEAT(TBXQ, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + tbxq_fns[a->esz], a, 0) + +static bool trans_PMOV_pv(DisasContext *s, arg_PMOV_pv *a) +{ + static gen_helper_gvec_2 * const fns[4] = { + NULL, gen_helper_pmov_pv_h, + gen_helper_pmov_pv_s, gen_helper_pmov_pv_d + }; + unsigned vl, pl, vofs, pofs; + TCGv_i64 tmp; + + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vl = vec_full_reg_size(s); + if (a->esz != MO_8) { + tcg_gen_gvec_2_ool(pred_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vl, vl, a->imm, fns[a->esz]); + return true; + } + + /* + * Copy the low PL bytes from vector Zn, zero-extending to a + * multiple of 8 bytes, so that Pd is properly cleared. + */ + + pl = vl / 8; + pofs = pred_full_reg_offset(s, a->rd); + vofs = vec_full_reg_offset(s, a->rn); + + QEMU_BUILD_BUG_ON(sizeof(ARMPredicateReg) != 32); + for (unsigned i = 32; i >= 8; i >>= 1) { + if (pl & i) { + tcg_gen_gvec_mov(MO_64, pofs, vofs, i, i); + pofs += i; + vofs += i; + } + } + switch (pl & 7) { + case 0: + return true; + case 2: + tmp = tcg_temp_new_i64(); + tcg_gen_ld16u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 6 : 0)); + break; + case 4: + tmp = tcg_temp_new_i64(); + tcg_gen_ld32u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 4 : 0)); + break; + case 6: + tmp = tcg_temp_new_i64(); + tcg_gen_ld_i64(tmp, tcg_env, vofs); + tcg_gen_extract_i64(tmp, tmp, 0, 48); + break; + default: + g_assert_not_reached(); + } + tcg_gen_st_i64(tmp, tcg_env, pofs); + return true; +} + +static bool trans_PMOV_vp(DisasContext *s, arg_PMOV_pv *a) +{ + static gen_helper_gvec_2 * const fns[4] = { + NULL, gen_helper_pmov_vp_h, + gen_helper_pmov_vp_s, gen_helper_pmov_vp_d + }; + unsigned vl; + + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + vl = vec_full_reg_size(s); + + if (a->esz == MO_8) { + /* + * The low PL bytes are copied from Pn to Zd unchanged. + * We know that the unused portion of Pn is zero, and + * that imm == 0, so the balance of Zd must be zeroed. + */ + tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, a->rd), + pred_full_reg_offset(s, a->rn), + size_for_gvec(vl / 8), vl); + } else { + tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd), + pred_full_reg_offset(s, a->rn), + vl, vl, a->imm, fns[a->esz]); + } + return true; +} + static bool trans_UNPK(DisasContext *s, arg_UNPK *a) { static gen_helper_gvec_2 * const fns[4][2] = { @@ -2328,6 +2596,23 @@ TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p) *** SVE Permute - Interleaving Group */ +static bool do_interleave_q(DisasContext *s, gen_helper_gvec_3 *fn, + arg_rrr_esz *a, int data) +{ + if (sve_access_check(s)) { + unsigned vsz = vec_full_reg_size(s); + if (vsz < 32) { + unallocated_encoding(s); + } else { + tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd), + vec_full_reg_offset(s, a->rn), + vec_full_reg_offset(s, a->rm), + vsz, vsz, data, fn); + } + } + return true; +} + static gen_helper_gvec_3 * const zip_fns[4] = { gen_helper_sve_zip_b, gen_helper_sve_zip_h, gen_helper_sve_zip_s, gen_helper_sve_zip_d, @@ -2337,26 +2622,43 @@ TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz, TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz, zip_fns[a->esz], a, vec_full_reg_size(s) / 2) -TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_zip_q, a, 0) -TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_zip_q, a, - QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) +TRANS_FEAT_NONSTREAMING(ZIP1_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_zip_q, a, 0) +TRANS_FEAT_NONSTREAMING(ZIP2_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_zip_q, a, + QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) + +static gen_helper_gvec_3 * const zipq_fns[4] = { + gen_helper_sve2p1_zipq_b, gen_helper_sve2p1_zipq_h, + gen_helper_sve2p1_zipq_s, gen_helper_sve2p1_zipq_d, +}; +TRANS_FEAT(ZIPQ1, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + zipq_fns[a->esz], a, 0) +TRANS_FEAT(ZIPQ2, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + zipq_fns[a->esz], a, 16 / 2) static gen_helper_gvec_3 * const uzp_fns[4] = { gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, }; - TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz, uzp_fns[a->esz], a, 0) TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz, uzp_fns[a->esz], a, 1 << a->esz) -TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_uzp_q, a, 0) -TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_uzp_q, a, 16) +TRANS_FEAT_NONSTREAMING(UZP1_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_uzp_q, a, 0) +TRANS_FEAT_NONSTREAMING(UZP2_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_uzp_q, a, 16) + +static gen_helper_gvec_3 * const uzpq_fns[4] = { + gen_helper_sve2p1_uzpq_b, gen_helper_sve2p1_uzpq_h, + gen_helper_sve2p1_uzpq_s, gen_helper_sve2p1_uzpq_d, +}; +TRANS_FEAT(UZPQ1, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + uzpq_fns[a->esz], a, 0) +TRANS_FEAT(UZPQ2, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz, + uzpq_fns[a->esz], a, 1 << a->esz) static gen_helper_gvec_3 * const trn_fns[4] = { gen_helper_sve_trn_b, gen_helper_sve_trn_h, @@ -2368,10 +2670,10 @@ TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz, TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz, trn_fns[a->esz], a, 1 << a->esz) -TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_trn_q, a, 0) -TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, - gen_helper_sve2_trn_q, a, 16) +TRANS_FEAT_NONSTREAMING(TRN1_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_trn_q, a, 0) +TRANS_FEAT_NONSTREAMING(TRN2_q, aa64_sve_f64mm, do_interleave_q, + gen_helper_sve2_trn_q, a, 16) /* *** SVE Permute Vector - Predicated Group @@ -2957,6 +3259,36 @@ static bool trans_CNTP(DisasContext *s, arg_CNTP *a) return true; } +static bool trans_CNTP_c(DisasContext *s, arg_CNTP_c *a) +{ + TCGv_i32 t_png; + uint32_t desc = 0; + + if (dc_isar_feature(aa64_sve2p1, s)) { + if (!sve_access_check(s)) { + return true; + } + } else if (dc_isar_feature(aa64_sme2, s)) { + if (!sme_sm_enabled_check(s)) { + return true; + } + } else { + return false; + } + + t_png = tcg_temp_new_i32(); + tcg_gen_ld16u_i32(t_png, tcg_env, + pred_full_reg_offset(s, a->rn) ^ + (HOST_BIG_ENDIAN ? 6 : 0)); + + desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); + desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); + desc = FIELD_DP32(desc, PREDDESC, DATA, a->vl); + + gen_helper_sve2p1_cntp_c(cpu_reg(s, a->rd), t_png, tcg_constant_i32(desc)); + return true; +} + static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) { if (!dc_isar_feature(aa64_sve, s)) { @@ -3067,7 +3399,9 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a) return true; } -static bool trans_WHILE(DisasContext *s, arg_WHILE *a) +typedef void gen_while_fn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); +static bool do_WHILE(DisasContext *s, arg_while *a, + bool lt, int scale, int data, gen_while_fn *fn) { TCGv_i64 op0, op1, t0, t1, tmax; TCGv_i32 t2; @@ -3077,14 +3411,8 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) TCGCond cond; uint64_t maxval; /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */ - bool eq = a->eq == a->lt; + bool eq = a->eq == lt; - /* The greater-than conditions are all SVE2. */ - if (a->lt - ? !dc_isar_feature(aa64_sve, s) - : !dc_isar_feature(aa64_sve2, s)) { - return false; - } if (!sve_access_check(s)) { return true; } @@ -3108,7 +3436,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); - if (a->lt) { + if (lt) { tcg_gen_sub_i64(t0, op1, op0); if (a->u) { maxval = a->sf ? UINT64_MAX : UINT32_MAX; @@ -3128,7 +3456,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) } } - tmax = tcg_constant_i64(vsz >> a->esz); + tmax = tcg_constant_i64((vsz << scale) >> a->esz); if (eq) { /* Equality means one more iteration. */ tcg_gen_addi_i64(t0, t0, 1); @@ -3157,24 +3485,38 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, t0); - /* Scale elements to bits. */ - tcg_gen_shli_i32(t2, t2, a->esz); - desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); + desc = FIELD_DP32(desc, PREDDESC, DATA, data); ptr = tcg_temp_new_ptr(); tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd)); - if (a->lt) { - gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); - } else { - gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); - } + fn(t2, ptr, t2, tcg_constant_i32(desc)); + do_pred_flags(t2); return true; } +TRANS_FEAT(WHILE_lt, aa64_sve, do_WHILE, + a, true, 0, 0, gen_helper_sve_whilel) +TRANS_FEAT(WHILE_gt, aa64_sve2, do_WHILE, + a, false, 0, 0, gen_helper_sve_whileg) + +TRANS_FEAT(WHILE_lt_pair, aa64_sme2_or_sve2p1, do_WHILE, + a, true, 1, 0, gen_helper_sve_while2l) +TRANS_FEAT(WHILE_gt_pair, aa64_sme2_or_sve2p1, do_WHILE, + a, false, 1, 0, gen_helper_sve_while2g) + +TRANS_FEAT(WHILE_lt_cnt2, aa64_sme2_or_sve2p1, do_WHILE, + a, true, 1, 1, gen_helper_sve_whilecl) +TRANS_FEAT(WHILE_lt_cnt4, aa64_sme2_or_sve2p1, do_WHILE, + a, true, 2, 2, gen_helper_sve_whilecl) +TRANS_FEAT(WHILE_gt_cnt2, aa64_sme2_or_sve2p1, do_WHILE, + a, false, 1, 1, gen_helper_sve_whilecg) +TRANS_FEAT(WHILE_gt_cnt4, aa64_sme2_or_sve2p1, do_WHILE, + a, false, 2, 2, gen_helper_sve_whilecg) + static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) { TCGv_i64 op0, op1, diff, t1, tmax; @@ -3193,7 +3535,7 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) op0 = read_cpu_reg(s, a->rn, 1); op1 = read_cpu_reg(s, a->rm, 1); - tmax = tcg_constant_i64(vsz); + tmax = tcg_constant_i64(vsz >> a->esz); diff = tcg_temp_new_i64(); if (a->rw) { @@ -3203,15 +3545,15 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) tcg_gen_sub_i64(diff, op0, op1); tcg_gen_sub_i64(t1, op1, op0); tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); - /* Round down to a multiple of ESIZE. */ - tcg_gen_andi_i64(diff, diff, -1 << a->esz); + /* Divide, rounding down, by ESIZE. */ + tcg_gen_shri_i64(diff, diff, a->esz); /* If op1 == op0, diff == 0, and the condition is always true. */ tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff); } else { /* WHILEWR */ tcg_gen_sub_i64(diff, op1, op0); - /* Round down to a multiple of ESIZE. */ - tcg_gen_andi_i64(diff, diff, -1 << a->esz); + /* Divide, rounding down, by ESIZE. */ + tcg_gen_shri_i64(diff, diff, a->esz); /* If op0 >= op1, diff <= 0, the condition is always true. */ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff); } @@ -3234,6 +3576,42 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) return true; } +static bool do_pext(DisasContext *s, arg_pext *a, int n) +{ + TCGv_i32 t_png; + TCGv_ptr t_pd; + int pl; + + if (!sve_access_check(s)) { + return true; + } + + t_png = tcg_temp_new_i32(); + tcg_gen_ld16u_i32(t_png, tcg_env, + pred_full_reg_offset(s, a->rn) ^ + (HOST_BIG_ENDIAN ? 6 : 0)); + + t_pd = tcg_temp_new_ptr(); + pl = pred_full_reg_size(s); + + for (int i = 0; i < n; ++i) { + int rd = (a->rd + i) % 16; + int part = a->imm * n + i; + unsigned desc = 0; + + desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pl); + desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); + desc = FIELD_DP32(desc, PREDDESC, DATA, part); + + tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, rd)); + gen_helper_pext(t_pd, t_png, tcg_constant_i32(desc)); + } + return true; +} + +TRANS_FEAT(PEXT_1, aa64_sme2_or_sve2p1, do_pext, a, 1) +TRANS_FEAT(PEXT_2, aa64_sme2_or_sve2p1, do_pext, a, 2) + /* *** SVE Integer Wide Immediate - Unpredicated Group */ @@ -3361,8 +3739,8 @@ DO_ZZI(UMIN, umin) #undef DO_ZZI static gen_helper_gvec_4 * const dot_fns[2][2] = { - { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, - { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } + { gen_helper_gvec_sdot_4b, gen_helper_gvec_sdot_4h }, + { gen_helper_gvec_udot_4b, gen_helper_gvec_udot_4h } }; TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0) @@ -3371,19 +3749,24 @@ TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, * SVE Multiply - Indexed */ -TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_sdot_idx_b, a) -TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_sdot_idx_h, a) -TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_udot_idx_b, a) -TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_udot_idx_h, a) - -TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_sudot_idx_b, a) -TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, - gen_helper_gvec_usdot_idx_b, a) +TRANS_FEAT(SDOT_zzxw_4s, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sdot_idx_4b, a) +TRANS_FEAT(SDOT_zzxw_4d, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sdot_idx_4h, a) +TRANS_FEAT(UDOT_zzxw_4s, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_udot_idx_4b, a) +TRANS_FEAT(UDOT_zzxw_4d, aa64_sve, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_udot_idx_4h, a) + +TRANS_FEAT(SUDOT_zzxw_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sudot_idx_4b, a) +TRANS_FEAT(USDOT_zzxw_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_usdot_idx_4b, a) + +TRANS_FEAT(SDOT_zzxw_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_sdot_idx_2h, a) +TRANS_FEAT(UDOT_zzxw_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzxz, + gen_helper_gvec_udot_idx_2h, a) #define DO_SVE2_RRX(NAME, FUNC) \ TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ @@ -3500,33 +3883,43 @@ DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) *** SVE Floating Point Multiply-Add Indexed Group */ -static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) +static bool do_fmla_zzxz(DisasContext *s, arg_rrxr_esz *a, + gen_helper_gvec_4_ptr *fn) { - static gen_helper_gvec_4_ptr * const fns[4] = { - NULL, - gen_helper_gvec_fmla_idx_h, - gen_helper_gvec_fmla_idx_s, - gen_helper_gvec_fmla_idx_d, - }; - return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, - (a->index << 1) | sub, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } + return gen_gvec_fpst_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } -TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false) -TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true) +static gen_helper_gvec_4_ptr * const fmla_idx_fns[4] = { + gen_helper_gvec_bfmla_idx, gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d +}; +TRANS_FEAT(FMLA_zzxz, aa64_sve, do_fmla_zzxz, a, fmla_idx_fns[a->esz]) + +static gen_helper_gvec_4_ptr * const fmls_idx_fns[4][2] = { + { gen_helper_gvec_bfmls_idx, gen_helper_gvec_ah_bfmls_idx }, + { gen_helper_gvec_fmls_idx_h, gen_helper_gvec_ah_fmls_idx_h }, + { gen_helper_gvec_fmls_idx_s, gen_helper_gvec_ah_fmls_idx_s }, + { gen_helper_gvec_fmls_idx_d, gen_helper_gvec_ah_fmls_idx_d }, +}; +TRANS_FEAT(FMLS_zzxz, aa64_sve, do_fmla_zzxz, a, + fmls_idx_fns[a->esz][s->fpcr_ah]) /* *** SVE Floating Point Multiply Indexed Group */ static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { - NULL, gen_helper_gvec_fmul_idx_h, + gen_helper_gvec_fmul_idx_b16, gen_helper_gvec_fmul_idx_h, gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, }; TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* *** SVE Floating Point Fast Reduction Group @@ -3559,7 +3952,7 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a, tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); - status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); fn(temp, t_zn, t_pg, status, t_desc); @@ -3574,14 +3967,74 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a, }; \ TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) +#define DO_VPZ_AH(NAME, name) \ + static gen_helper_fp_reduce * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ + }; \ + static gen_helper_fp_reduce * const name##_ah_fns[4] = { \ + NULL, gen_helper_sve_ah_##name##_h, \ + gen_helper_sve_ah_##name##_s, gen_helper_sve_ah_##name##_d, \ + }; \ + TRANS_FEAT(NAME, aa64_sve, do_reduce, a, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) + DO_VPZ(FADDV, faddv) DO_VPZ(FMINNMV, fminnmv) DO_VPZ(FMAXNMV, fmaxnmv) -DO_VPZ(FMINV, fminv) -DO_VPZ(FMAXV, fmaxv) +DO_VPZ_AH(FMINV, fminv) +DO_VPZ_AH(FMAXV, fmaxv) #undef DO_VPZ +static gen_helper_gvec_3_ptr * const faddqv_fns[4] = { + NULL, gen_helper_sve2p1_faddqv_h, + gen_helper_sve2p1_faddqv_s, gen_helper_sve2p1_faddqv_d, +}; +TRANS_FEAT(FADDQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, + faddqv_fns[a->esz], a, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + +static gen_helper_gvec_3_ptr * const fmaxnmqv_fns[4] = { + NULL, gen_helper_sve2p1_fmaxnmqv_h, + gen_helper_sve2p1_fmaxnmqv_s, gen_helper_sve2p1_fmaxnmqv_d, +}; +TRANS_FEAT(FMAXNMQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, + fmaxnmqv_fns[a->esz], a, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + +static gen_helper_gvec_3_ptr * const fminnmqv_fns[4] = { + NULL, gen_helper_sve2p1_fminnmqv_h, + gen_helper_sve2p1_fminnmqv_s, gen_helper_sve2p1_fminnmqv_d, +}; +TRANS_FEAT(FMINNMQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, + fminnmqv_fns[a->esz], a, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + +static gen_helper_gvec_3_ptr * const fmaxqv_fns[4] = { + NULL, gen_helper_sve2p1_fmaxqv_h, + gen_helper_sve2p1_fmaxqv_s, gen_helper_sve2p1_fmaxqv_d, +}; +static gen_helper_gvec_3_ptr * const fmaxqv_ah_fns[4] = { + NULL, gen_helper_sve2p1_ah_fmaxqv_h, + gen_helper_sve2p1_ah_fmaxqv_s, gen_helper_sve2p1_ah_fmaxqv_d, +}; +TRANS_FEAT(FMAXQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, + (s->fpcr_ah ? fmaxqv_ah_fns : fmaxqv_fns)[a->esz], a, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + +static gen_helper_gvec_3_ptr * const fminqv_fns[4] = { + NULL, gen_helper_sve2p1_fminqv_h, + gen_helper_sve2p1_fminqv_s, gen_helper_sve2p1_fminqv_d, +}; +static gen_helper_gvec_3_ptr * const fminqv_ah_fns[4] = { + NULL, gen_helper_sve2p1_ah_fminqv_h, + gen_helper_sve2p1_ah_fminqv_s, gen_helper_sve2p1_ah_fminqv_d, +}; +TRANS_FEAT(FMINQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, + (s->fpcr_ah ? fminqv_ah_fns : fminqv_fns)[a->esz], a, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + /* *** SVE Floating Point Unary Operations - Unpredicated Group */ @@ -3590,13 +4043,25 @@ static gen_helper_gvec_2_ptr * const frecpe_fns[] = { NULL, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, }; -TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0) +static gen_helper_gvec_2_ptr * const frecpe_rpres_fns[] = { + NULL, gen_helper_gvec_frecpe_h, + gen_helper_gvec_frecpe_rpres_s, gen_helper_gvec_frecpe_d, +}; +TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + frecpe_rpres_fns[a->esz] : frecpe_fns[a->esz], a, 0) static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { NULL, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, }; -TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0) +static gen_helper_gvec_2_ptr * const frsqrte_rpres_fns[] = { + NULL, gen_helper_gvec_frsqrte_h, + gen_helper_gvec_frsqrte_rpres_s, gen_helper_gvec_frsqrte_d, +}; +TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz, + s->fpcr_ah && dc_isar_feature(aa64_rpres, s) ? + frsqrte_rpres_fns[a->esz] : frsqrte_fns[a->esz], a, 0) /* *** SVE Floating Point Compare with Zero Group @@ -3611,7 +4076,7 @@ static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); TCGv_ptr status = - fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), @@ -3646,8 +4111,9 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, }; TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + ftmad_fns[a->esz], a->rd, a->rn, a->rm, + a->imm | (s->fpcr_ah << 3), + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* *** SVE Floating Point Accumulating Reduction Group @@ -3680,7 +4146,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) t_pg = tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); - t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); @@ -3695,16 +4161,28 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) #define DO_FP3(NAME, name) \ static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ - NULL, gen_helper_gvec_##name##_h, \ + gen_helper_gvec_##name##_b16, gen_helper_gvec_##name##_h, \ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ }; \ TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) +#define DO_FP3_AH(NAME, name) \ + static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ + NULL, gen_helper_gvec_##name##_h, \ + gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ + }; \ + static gen_helper_gvec_3_ptr * const name##_ah_fns[4] = { \ + NULL, gen_helper_gvec_ah_##name##_h, \ + gen_helper_gvec_ah_##name##_s, gen_helper_gvec_ah_##name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], a, 0) + DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) DO_FP3(FMUL_zzz, fmul) -DO_FP3(FRECPS, recps) -DO_FP3(FRSQRTS, rsqrts) +DO_FP3_AH(FRECPS, recps) +DO_FP3_AH(FRSQRTS, rsqrts) #undef DO_FP3 @@ -3726,14 +4204,48 @@ TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, }; \ TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) -DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) -DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) -DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) -DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin) -DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax) -DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) -DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) -DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) +#define DO_ZPZZ_AH_FP(NAME, FEAT, name, ah_name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + NULL, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \ + NULL, gen_helper_##ah_name##_h, \ + gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \ + s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ + name##_zpzz_fns[a->esz], a) + +/* Similar, but for insns where sz == 0 encodes bfloat16 */ +#define DO_ZPZZ_FP_B16(NAME, FEAT, name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + gen_helper_##name##_b16, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) + +#define DO_ZPZZ_AH_FP_B16(NAME, FEAT, name, ah_name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + gen_helper_##name##_b16, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \ + gen_helper_##ah_name##_b16, gen_helper_##ah_name##_h, \ + gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \ + s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ + name##_zpzz_fns[a->esz], a) + +DO_ZPZZ_FP_B16(FADD_zpzz, aa64_sve, sve_fadd) +DO_ZPZZ_FP_B16(FSUB_zpzz, aa64_sve, sve_fsub) +DO_ZPZZ_FP_B16(FMUL_zpzz, aa64_sve, sve_fmul) +DO_ZPZZ_AH_FP_B16(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) +DO_ZPZZ_AH_FP_B16(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) +DO_ZPZZ_FP_B16(FMINNM_zpzz, aa64_sve, sve_fminnum) +DO_ZPZZ_FP_B16(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) +DO_ZPZZ_AH_FP(FABD, aa64_sve, sve_fabd, sve_ah_fabd) DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) @@ -3755,7 +4267,7 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn)); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); - status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); + status = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fn(t_zd, t_zn, t_pg, scalar, status, desc); } @@ -3788,14 +4300,35 @@ static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ name##_const[a->esz][a->imm], name##_fns[a->esz]) +#define DO_FP_AH_IMM(NAME, name, const0, const1) \ + static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ + NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_s, \ + gen_helper_sve_##name##_d \ + }; \ + static gen_helper_sve_fp2scalar * const name##_ah_fns[4] = { \ + NULL, gen_helper_sve_ah_##name##_h, \ + gen_helper_sve_ah_##name##_s, \ + gen_helper_sve_ah_##name##_d \ + }; \ + static uint64_t const name##_const[4][2] = { \ + { -1, -1 }, \ + { float16_##const0, float16_##const1 }, \ + { float32_##const0, float32_##const1 }, \ + { float64_##const0, float64_##const1 }, \ + }; \ + TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ + name##_const[a->esz][a->imm], \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) + DO_FP_IMM(FADD, fadds, half, one) DO_FP_IMM(FSUB, fsubs, half, one) DO_FP_IMM(FMUL, fmuls, half, two) DO_FP_IMM(FSUBR, fsubrs, half, one) DO_FP_IMM(FMAXNM, fmaxnms, zero, one) DO_FP_IMM(FMINNM, fminnms, zero, one) -DO_FP_IMM(FMAX, fmaxs, zero, one) -DO_FP_IMM(FMIN, fmins, zero, one) +DO_FP_AH_IMM(FMAX, fmaxs, zero, one) +DO_FP_AH_IMM(FMIN, fmins, zero, one) #undef DO_FP_IMM @@ -3807,7 +4340,7 @@ static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, } if (sve_access_check(s)) { unsigned vsz = vec_full_reg_size(s); - TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd), vec_full_reg_offset(s, a->rn), vec_full_reg_offset(s, a->rm), @@ -3839,22 +4372,37 @@ static gen_helper_gvec_4_ptr * const fcadd_fns[] = { gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, }; TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], - a->rd, a->rn, a->rm, a->pg, a->rot, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1), + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) -#define DO_FMLA(NAME, name) \ +static bool do_fmla_zpzzz(DisasContext *s, arg_rprrr_esz *a, + gen_helper_gvec_5_ptr *fn) +{ + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } + return gen_gvec_fpst_zzzzp(s, fn, a->rd, a->rn, a->rm, a->ra, a->pg, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); +} + +#define DO_FMLA(NAME, name, ah_name) \ static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ - NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_b16, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ - TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \ - a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + static gen_helper_gvec_5_ptr * const name##_ah_fns[4] = { \ + gen_helper_sve_##ah_name##_b16, gen_helper_sve_##ah_name##_h, \ + gen_helper_sve_##ah_name##_s, gen_helper_sve_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, aa64_sve, do_fmla_zpzzz, a, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) -DO_FMLA(FMLA_zpzzz, fmla_zpzzz) -DO_FMLA(FMLS_zpzzz, fmls_zpzzz) -DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) -DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) +/* We don't need an ah_fmla_zpzzz because fmla doesn't negate anything */ +DO_FMLA(FMLA_zpzzz, fmla_zpzzz, fmla_zpzzz) +DO_FMLA(FMLS_zpzzz, fmls_zpzzz, ah_fmls_zpzzz) +DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz, ah_fnmla_zpzzz) +DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz, ah_fnmls_zpzzz) #undef DO_FMLA @@ -3863,67 +4411,68 @@ static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, }; TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], - a->rd, a->rn, a->rm, a->ra, a->pg, a->rot, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a->rd, a->rn, a->rm, a->ra, a->pg, a->rot | (s->fpcr_ah << 2), + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL }; TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* *** SVE Floating Point Unary Operations Predicated Group */ TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_sh, a, 0, FPST_A64) TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_hs, a, 0, FPST_A64_F16) TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, - gen_helper_sve_bfcvt, a, 0, FPST_FPCR) + gen_helper_sve_bfcvt, a, 0, + s->fpcr_ah ? FPST_AH : FPST_A64) TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_dh, a, 0, FPST_A64) TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_hd, a, 0, FPST_A64_F16) TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_ds, a, 0, FPST_A64) TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR) + gen_helper_sve_fcvt_sd, a, 0, FPST_A64) TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzs_hh, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzu_hh, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzs_hs, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzu_hs, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzs_hd, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16) + gen_helper_sve_fcvtzu_hd, a, 0, FPST_A64_F16) TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzs_ss, a, 0, FPST_A64) TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzu_ss, a, 0, FPST_A64) TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzs_sd, a, 0, FPST_A64) TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzu_sd, a, 0, FPST_A64) TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzs_ds, a, 0, FPST_A64) TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzu_ds, a, 0, FPST_A64) TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzs_dd, a, 0, FPST_A64) TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR) + gen_helper_sve_fcvtzu_dd, a, 0, FPST_A64) static gen_helper_gvec_3_ptr * const frint_fns[] = { NULL, @@ -3932,7 +4481,7 @@ static gen_helper_gvec_3_ptr * const frint_fns[] = { gen_helper_sve_frint_d }; TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) static gen_helper_gvec_3_ptr * const frintx_fns[] = { NULL, @@ -3941,7 +4490,7 @@ static gen_helper_gvec_3_ptr * const frintx_fns[] = { gen_helper_sve_frintx_d }; TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) @@ -3958,7 +4507,7 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, } vsz = vec_full_reg_size(s); - status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); + status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); tmode = gen_set_rmode(mode, status); tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), @@ -3986,48 +4535,48 @@ static gen_helper_gvec_3_ptr * const frecpx_fns[] = { gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, }; TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a, 0, select_ah_fpst(s, a->esz)) static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { NULL, gen_helper_sve_fsqrt_h, gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d, }; TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16) + gen_helper_sve_scvt_hh, a, 0, FPST_A64_F16) TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16) + gen_helper_sve_scvt_sh, a, 0, FPST_A64_F16) TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16) + gen_helper_sve_scvt_dh, a, 0, FPST_A64_F16) TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_ss, a, 0, FPST_FPCR) + gen_helper_sve_scvt_ss, a, 0, FPST_A64) TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_ds, a, 0, FPST_FPCR) + gen_helper_sve_scvt_ds, a, 0, FPST_A64) TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_sd, a, 0, FPST_FPCR) + gen_helper_sve_scvt_sd, a, 0, FPST_A64) TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_scvt_dd, a, 0, FPST_FPCR) + gen_helper_sve_scvt_dd, a, 0, FPST_A64) TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16) + gen_helper_sve_ucvt_hh, a, 0, FPST_A64_F16) TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16) + gen_helper_sve_ucvt_sh, a, 0, FPST_A64_F16) TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16) + gen_helper_sve_ucvt_dh, a, 0, FPST_A64_F16) TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR) + gen_helper_sve_ucvt_ss, a, 0, FPST_A64) TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR) + gen_helper_sve_ucvt_ds, a, 0, FPST_A64) TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR) + gen_helper_sve_ucvt_sd, a, 0, FPST_A64) TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, - gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR) + gen_helper_sve_ucvt_dd, a, 0, FPST_A64) /* *** SVE Memory - 32-bit Gather and Unsized Contiguous Group @@ -4038,7 +4587,7 @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, */ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, - int len, int rn, int imm) + int len, int rn, int imm, MemOp align) { int len_align = QEMU_ALIGN_DOWN(len, 16); int len_remain = len % 16; @@ -4067,12 +4616,15 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, for (i = 0; i < len_align; i += 16) { tcg_gen_qemu_ld_i128(t16, clean_addr, midx, - MO_LE | MO_128 | MO_ATOM_NONE); + MO_LE | MO_128 | MO_ATOM_NONE | align); tcg_gen_extr_i128_i64(t0, t1, t16); tcg_gen_st_i64(t0, base, vofs + i); tcg_gen_st_i64(t1, base, vofs + i + 8); tcg_gen_addi_i64(clean_addr, clean_addr, 16); } + if (len_align) { + align = MO_UNALN; + } } else { TCGLabel *loop = gen_new_label(); TCGv_ptr tp, i = tcg_temp_new_ptr(); @@ -4082,7 +4634,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, t16 = tcg_temp_new_i128(); tcg_gen_qemu_ld_i128(t16, clean_addr, midx, - MO_LE | MO_128 | MO_ATOM_NONE); + MO_LE | MO_128 | MO_ATOM_NONE | align); tcg_gen_addi_i64(clean_addr, clean_addr, 16); tp = tcg_temp_new_ptr(); @@ -4097,6 +4649,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_st_i64(t1, tp, vofs + 8); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); + align = MO_UNALN; } /* @@ -4105,7 +4658,9 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, */ if (len_remain >= 8) { t0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, + MO_LEUQ | MO_ATOM_NONE | align); + align = MO_UNALN; tcg_gen_st_i64(t0, base, vofs + len_align); len_remain -= 8; len_align += 8; @@ -4120,12 +4675,14 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, case 4: case 8: tcg_gen_qemu_ld_i64(t0, clean_addr, midx, - MO_LE | ctz32(len_remain) | MO_ATOM_NONE); + MO_LE | ctz32(len_remain) + | MO_ATOM_NONE | align); break; case 6: t1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, + MO_LEUL | MO_ATOM_NONE | align); tcg_gen_addi_i64(clean_addr, clean_addr, 4); tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); tcg_gen_deposit_i64(t0, t0, t1, 32, 32); @@ -4140,7 +4697,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, /* Similarly for stores. */ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, - int len, int rn, int imm) + int len, int rn, int imm, MemOp align) { int len_align = QEMU_ALIGN_DOWN(len, 16); int len_remain = len % 16; @@ -4172,9 +4729,12 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_ld_i64(t1, base, vofs + i + 8); tcg_gen_concat_i64_i128(t16, t0, t1); tcg_gen_qemu_st_i128(t16, clean_addr, midx, - MO_LE | MO_128 | MO_ATOM_NONE); + MO_LE | MO_128 | MO_ATOM_NONE | align); tcg_gen_addi_i64(clean_addr, clean_addr, 16); } + if (len_align) { + align = MO_UNALN; + } } else { TCGLabel *loop = gen_new_label(); TCGv_ptr tp, i = tcg_temp_new_ptr(); @@ -4198,13 +4758,16 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_addi_i64(clean_addr, clean_addr, 16); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); + align = MO_UNALN; } /* Predicate register stores can be any multiple of 2. */ if (len_remain >= 8) { t0 = tcg_temp_new_i64(); tcg_gen_ld_i64(t0, base, vofs + len_align); - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); + tcg_gen_qemu_st_i64(t0, clean_addr, midx, + MO_LEUQ | MO_ATOM_NONE | align); + align = MO_UNALN; len_remain -= 8; len_align += 8; if (len_remain) { @@ -4220,11 +4783,13 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, case 4: case 8: tcg_gen_qemu_st_i64(t0, clean_addr, midx, - MO_LE | ctz32(len_remain) | MO_ATOM_NONE); + MO_LE | ctz32(len_remain) + | MO_ATOM_NONE | align); break; case 6: - tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); + tcg_gen_qemu_st_i64(t0, clean_addr, midx, + MO_LEUL | MO_ATOM_NONE | align); tcg_gen_addi_i64(clean_addr, clean_addr, 4); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); @@ -4244,7 +4809,8 @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size, + s->align_mem ? MO_ALIGN_16 : MO_UNALN); } return true; } @@ -4257,7 +4823,8 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size, + s->align_mem ? MO_ALIGN_2 : MO_UNALN); } return true; } @@ -4270,7 +4837,8 @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); + gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size, + s->align_mem ? MO_ALIGN_16 : MO_UNALN); } return true; } @@ -4283,7 +4851,8 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); + gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size, + s->align_mem ? MO_ALIGN_2 : MO_UNALN); } return true; } @@ -4293,34 +4862,37 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) */ /* The memory mode of the dtype. */ -static const MemOp dtype_mop[16] = { +static const MemOp dtype_mop[19] = { MO_UB, MO_UB, MO_UB, MO_UB, MO_SL, MO_UW, MO_UW, MO_UW, MO_SW, MO_SW, MO_UL, MO_UL, - MO_SB, MO_SB, MO_SB, MO_UQ + MO_SB, MO_SB, MO_SB, MO_UQ, + /* Artificial values used by decode */ + MO_UL, MO_UQ, MO_128, }; #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) /* The vector element size of dtype. */ -static const uint8_t dtype_esz[16] = { +static const uint8_t dtype_esz[19] = { 0, 1, 2, 3, 3, 1, 2, 3, 3, 2, 2, 3, - 3, 2, 1, 3 + 3, 2, 1, 3, + /* Artificial values used by decode */ + 4, 4, 4, }; -uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, +uint64_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, uint32_t msz, bool is_write, uint32_t data) { uint32_t sizem1; - uint32_t desc = 0; + uint64_t desc = 0; /* Assert all of the data fits, with or without MTE enabled. */ assert(nregs >= 1 && nregs <= 4); sizem1 = (nregs << msz) - 1; assert(sizem1 <= R_MTEDESC_SIZEM1_MASK >> R_MTEDESC_SIZEM1_SHIFT); - assert(data < 1u << SVE_MTEDESC_SHIFT); if (s->mte_active[0]) { desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); @@ -4328,9 +4900,9 @@ uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, sizem1); - desc <<= SVE_MTEDESC_SHIFT; + desc <<= 32; } - return simd_desc(vsz, vsz, desc | data); + return simd_desc(vsz, vsz, data) | desc; } static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, @@ -4338,7 +4910,7 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_gvec_mem *fn) { TCGv_ptr t_pg; - uint32_t desc; + uint64_t desc; if (!s->mte_active[0]) { addr = clean_data_tbi(s, addr); @@ -4354,11 +4926,11 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, t_pg = tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); } /* Indexed by [mte][be][dtype][nreg] */ -static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { +static gen_helper_gvec_mem * const ldr_fns[2][2][19][4] = { { /* mte inactive, little-endian */ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, @@ -4382,7 +4954,13 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, - gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, + gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r }, + + { gen_helper_sve_ld1squ_le_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1dqu_le_r, NULL, NULL, NULL }, + { NULL, gen_helper_sve_ld2qq_le_r, + gen_helper_sve_ld3qq_le_r, gen_helper_sve_ld4qq_le_r }, + }, /* mte inactive, big-endian */ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, @@ -4407,7 +4985,14 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, - gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } }, + gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r }, + + { gen_helper_sve_ld1squ_be_r, NULL, NULL, NULL }, + { gen_helper_sve_ld1dqu_be_r, NULL, NULL, NULL }, + { NULL, gen_helper_sve_ld2qq_be_r, + gen_helper_sve_ld3qq_be_r, gen_helper_sve_ld4qq_be_r }, + }, + }, { /* mte active, little-endian */ { { gen_helper_sve_ld1bb_r_mte, @@ -4440,7 +5025,15 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { { gen_helper_sve_ld1dd_le_r_mte, gen_helper_sve_ld2dd_le_r_mte, gen_helper_sve_ld3dd_le_r_mte, - gen_helper_sve_ld4dd_le_r_mte } }, + gen_helper_sve_ld4dd_le_r_mte }, + + { gen_helper_sve_ld1squ_le_r_mte, NULL, NULL, NULL }, + { gen_helper_sve_ld1dqu_le_r_mte, NULL, NULL, NULL }, + { NULL, + gen_helper_sve_ld2qq_le_r_mte, + gen_helper_sve_ld3qq_le_r_mte, + gen_helper_sve_ld4qq_le_r_mte }, + }, /* mte active, big-endian */ { { gen_helper_sve_ld1bb_r_mte, @@ -4473,7 +5066,16 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { { gen_helper_sve_ld1dd_be_r_mte, gen_helper_sve_ld2dd_be_r_mte, gen_helper_sve_ld3dd_be_r_mte, - gen_helper_sve_ld4dd_be_r_mte } } }, + gen_helper_sve_ld4dd_be_r_mte }, + + { gen_helper_sve_ld1squ_be_r_mte, NULL, NULL, NULL }, + { gen_helper_sve_ld1dqu_be_r_mte, NULL, NULL, NULL }, + { NULL, + gen_helper_sve_ld2qq_be_r_mte, + gen_helper_sve_ld3qq_be_r_mte, + gen_helper_sve_ld4qq_be_r_mte }, + }, + }, }; static void do_ld_zpa(DisasContext *s, int zt, int pg, @@ -4492,9 +5094,32 @@ static void do_ld_zpa(DisasContext *s, int zt, int pg, static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) { - if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { + if (a->rm == 31) { return false; } + + /* dtypes 16-18 are artificial, representing 128-bit element */ + switch (a->dtype) { + case 0 ... 15: + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + break; + case 16: case 17: + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + break; + case 18: + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + break; + default: + g_assert_not_reached(); + } + if (sve_access_check(s)) { TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); @@ -4506,9 +5131,28 @@ static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) { - if (!dc_isar_feature(aa64_sve, s)) { - return false; + /* dtypes 16-18 are artificial, representing 128-bit element */ + switch (a->dtype) { + case 0 ... 15: + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + break; + case 16: case 17: + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + break; + case 18: + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + break; + default: + g_assert_not_reached(); } + if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; @@ -4734,7 +5378,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_pg; int poff; - uint32_t desc; + uint64_t desc; /* Load the first quadword using the normal predicated load helpers. */ if (!s->mte_active[0]) { @@ -4765,7 +5409,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) gen_helper_gvec_mem *fn = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); /* Replicate that first quadword. */ if (vsz > 16) { @@ -4808,7 +5452,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) unsigned vsz_r32; TCGv_ptr t_pg; int poff, doff; - uint32_t desc; + uint64_t desc; if (vsz < 32) { /* @@ -4849,7 +5493,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) gen_helper_gvec_mem *fn = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); /* * Replicate that first octaword. @@ -4955,7 +5599,7 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, int msz, int esz, int nreg) { - static gen_helper_gvec_mem * const fn_single[2][2][4][4] = { + static gen_helper_gvec_mem * const fn_single[2][2][4][5] = { { { { gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r, gen_helper_sve_st1bs_r, @@ -4966,9 +5610,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_sve_st1hd_le_r }, { NULL, NULL, gen_helper_sve_st1ss_le_r, - gen_helper_sve_st1sd_le_r }, + gen_helper_sve_st1sd_le_r, + gen_helper_sve_st1sq_le_r, }, { NULL, NULL, NULL, - gen_helper_sve_st1dd_le_r } }, + gen_helper_sve_st1dd_le_r, + gen_helper_sve_st1dq_le_r, } }, { { gen_helper_sve_st1bb_r, gen_helper_sve_st1bh_r, gen_helper_sve_st1bs_r, @@ -4979,9 +5625,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_sve_st1hd_be_r }, { NULL, NULL, gen_helper_sve_st1ss_be_r, - gen_helper_sve_st1sd_be_r }, + gen_helper_sve_st1sd_be_r, + gen_helper_sve_st1sq_be_r }, { NULL, NULL, NULL, - gen_helper_sve_st1dd_be_r } } }, + gen_helper_sve_st1dd_be_r, + gen_helper_sve_st1dq_be_r } } }, { { { gen_helper_sve_st1bb_r_mte, gen_helper_sve_st1bh_r_mte, @@ -4993,9 +5641,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_sve_st1hd_le_r_mte }, { NULL, NULL, gen_helper_sve_st1ss_le_r_mte, - gen_helper_sve_st1sd_le_r_mte }, + gen_helper_sve_st1sd_le_r_mte, + gen_helper_sve_st1sq_le_r_mte }, { NULL, NULL, NULL, - gen_helper_sve_st1dd_le_r_mte } }, + gen_helper_sve_st1dd_le_r_mte, + gen_helper_sve_st1dq_le_r_mte } }, { { gen_helper_sve_st1bb_r_mte, gen_helper_sve_st1bh_r_mte, gen_helper_sve_st1bs_r_mte, @@ -5006,59 +5656,73 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_sve_st1hd_be_r_mte }, { NULL, NULL, gen_helper_sve_st1ss_be_r_mte, - gen_helper_sve_st1sd_be_r_mte }, + gen_helper_sve_st1sd_be_r_mte, + gen_helper_sve_st1sq_be_r_mte }, { NULL, NULL, NULL, - gen_helper_sve_st1dd_be_r_mte } } }, + gen_helper_sve_st1dd_be_r_mte, + gen_helper_sve_st1dq_be_r_mte } } }, }; - static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = { + static gen_helper_gvec_mem * const fn_multiple[2][2][3][5] = { { { { gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_le_r, gen_helper_sve_st2ss_le_r, - gen_helper_sve_st2dd_le_r }, + gen_helper_sve_st2dd_le_r, + gen_helper_sve_st2qq_le_r }, { gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_le_r, gen_helper_sve_st3ss_le_r, - gen_helper_sve_st3dd_le_r }, + gen_helper_sve_st3dd_le_r, + gen_helper_sve_st3qq_le_r }, { gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_le_r, gen_helper_sve_st4ss_le_r, - gen_helper_sve_st4dd_le_r } }, + gen_helper_sve_st4dd_le_r, + gen_helper_sve_st4qq_le_r } }, { { gen_helper_sve_st2bb_r, gen_helper_sve_st2hh_be_r, gen_helper_sve_st2ss_be_r, - gen_helper_sve_st2dd_be_r }, + gen_helper_sve_st2dd_be_r, + gen_helper_sve_st2qq_be_r }, { gen_helper_sve_st3bb_r, gen_helper_sve_st3hh_be_r, gen_helper_sve_st3ss_be_r, - gen_helper_sve_st3dd_be_r }, + gen_helper_sve_st3dd_be_r, + gen_helper_sve_st3qq_be_r }, { gen_helper_sve_st4bb_r, gen_helper_sve_st4hh_be_r, gen_helper_sve_st4ss_be_r, - gen_helper_sve_st4dd_be_r } } }, + gen_helper_sve_st4dd_be_r, + gen_helper_sve_st4qq_be_r } } }, { { { gen_helper_sve_st2bb_r_mte, gen_helper_sve_st2hh_le_r_mte, gen_helper_sve_st2ss_le_r_mte, - gen_helper_sve_st2dd_le_r_mte }, + gen_helper_sve_st2dd_le_r_mte, + gen_helper_sve_st2qq_le_r_mte }, { gen_helper_sve_st3bb_r_mte, gen_helper_sve_st3hh_le_r_mte, gen_helper_sve_st3ss_le_r_mte, - gen_helper_sve_st3dd_le_r_mte }, + gen_helper_sve_st3dd_le_r_mte, + gen_helper_sve_st3qq_le_r_mte }, { gen_helper_sve_st4bb_r_mte, gen_helper_sve_st4hh_le_r_mte, gen_helper_sve_st4ss_le_r_mte, - gen_helper_sve_st4dd_le_r_mte } }, + gen_helper_sve_st4dd_le_r_mte, + gen_helper_sve_st4qq_le_r_mte } }, { { gen_helper_sve_st2bb_r_mte, gen_helper_sve_st2hh_be_r_mte, gen_helper_sve_st2ss_be_r_mte, - gen_helper_sve_st2dd_be_r_mte }, + gen_helper_sve_st2dd_be_r_mte, + gen_helper_sve_st2qq_be_r_mte }, { gen_helper_sve_st3bb_r_mte, gen_helper_sve_st3hh_be_r_mte, gen_helper_sve_st3ss_be_r_mte, - gen_helper_sve_st3dd_be_r_mte }, + gen_helper_sve_st3dd_be_r_mte, + gen_helper_sve_st3qq_be_r_mte }, { gen_helper_sve_st4bb_r_mte, gen_helper_sve_st4hh_be_r_mte, gen_helper_sve_st4ss_be_r_mte, - gen_helper_sve_st4dd_be_r_mte } } }, + gen_helper_sve_st4dd_be_r_mte, + gen_helper_sve_st4qq_be_r_mte } } }, }; gen_helper_gvec_mem *fn; int be = s->be_data == MO_BE; @@ -5077,12 +5741,32 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) { - if (!dc_isar_feature(aa64_sve, s)) { - return false; - } if (a->rm == 31 || a->msz > a->esz) { return false; } + switch (a->esz) { + case MO_8 ... MO_64: + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + break; + case MO_128: + if (a->nreg == 0) { + assert(a->msz < a->esz); + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + } else { + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + } + break; + default: + g_assert_not_reached(); + } + if (sve_access_check(s)) { TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); @@ -5094,12 +5778,32 @@ static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) { - if (!dc_isar_feature(aa64_sve, s)) { - return false; - } if (a->msz > a->esz) { return false; } + switch (a->esz) { + case MO_8 ... MO_64: + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + break; + case MO_128: + if (a->nreg == 0) { + assert(a->msz < a->esz); + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + } else { + if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) { + return false; + } + } + break; + default: + g_assert_not_reached(); + } + if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> a->esz; @@ -5123,14 +5827,14 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, TCGv_ptr t_zm = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr(); TCGv_ptr t_zt = tcg_temp_new_ptr(); - uint32_t desc; + uint64_t desc; tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt)); desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale); - fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); + fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i64(desc)); } /* Indexed by [mte][be][ff][xs][u][msz]. */ @@ -5461,6 +6165,14 @@ gather_load_fn64[2][2][2][3][2][4] = { gen_helper_sve_ldffdd_be_zd_mte, } } } } }, }; +static gen_helper_gvec_mem_scatter * const +gather_load_fn128[2][2] = { + { gen_helper_sve_ldqq_le_zd, + gen_helper_sve_ldqq_be_zd }, + { gen_helper_sve_ldqq_le_zd_mte, + gen_helper_sve_ldqq_be_zd_mte } +}; + static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) { gen_helper_gvec_mem_scatter *fn = NULL; @@ -5482,6 +6194,8 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) case MO_64: fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz]; break; + default: + g_assert_not_reached(); } assert(fn != NULL); @@ -5490,6 +6204,32 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) return true; } +static bool trans_LD1Q(DisasContext *s, arg_LD1Q *a) +{ + gen_helper_gvec_mem_scatter *fn = NULL; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + + fn = gather_load_fn128[mte][be]; + assert(fn != NULL); + + /* + * Unlike LD1_zprz, a->rm is the scalar register and it can be XZR, not XSP. + * a->rn is the vector register. + */ + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), MO_128, false, fn); + return true; +} + static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) { gen_helper_gvec_mem_scatter *fn = NULL; @@ -5649,6 +6389,14 @@ static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = { gen_helper_sve_stdd_be_zd_mte, } } }, }; +static gen_helper_gvec_mem_scatter * const +scatter_store_fn128[2][2] = { + { gen_helper_sve_stqq_le_zd, + gen_helper_sve_stqq_be_zd }, + { gen_helper_sve_stqq_le_zd_mte, + gen_helper_sve_stqq_be_zd_mte } +}; + static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) { gen_helper_gvec_mem_scatter *fn; @@ -5680,6 +6428,29 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) return true; } +static bool trans_ST1Q(DisasContext *s, arg_ST1Q *a) +{ + gen_helper_gvec_mem_scatter *fn; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + fn = scatter_store_fn128[mte][be]; + /* + * Unlike ST1_zprz, a->rm is the scalar register, and it + * can be XZR, not XSP. a->rn is the vector register. + */ + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), MO_128, true, fn); + return true; +} + static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) { gen_helper_gvec_mem_scatter *fn = NULL; @@ -5806,6 +6577,7 @@ TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false) */ TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a) +TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_sve2_sqdmulh, a) static gen_helper_gvec_3 * const smulh_zzz_fns[4] = { gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, @@ -5824,13 +6596,6 @@ TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, gen_helper_gvec_pmul_b, a, 0) -static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = { - gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, - gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, -}; -TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, - sqdmulh_zzz_fns[a->esz], a, 0) - static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = { gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, @@ -6062,9 +6827,9 @@ static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) if (top) { if (shl == halfbits) { - TCGv_vec t = tcg_temp_new_vec_matching(d); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); - tcg_gen_and_vec(vece, d, n, t); + tcg_gen_and_vec(vece, d, n, + tcg_constant_vec_matching(d, vece, + MAKE_64BIT_MASK(halfbits, halfbits))); } else { tcg_gen_sari_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); @@ -6119,18 +6884,18 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) if (top) { if (shl == halfbits) { - TCGv_vec t = tcg_temp_new_vec_matching(d); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); - tcg_gen_and_vec(vece, d, n, t); + tcg_gen_and_vec(vece, d, n, + tcg_constant_vec_matching(d, vece, + MAKE_64BIT_MASK(halfbits, halfbits))); } else { tcg_gen_shri_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); } } else { if (shl == 0) { - TCGv_vec t = tcg_temp_new_vec_matching(d); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_and_vec(vece, d, n, t); + tcg_gen_and_vec(vece, d, n, + tcg_constant_vec_matching(d, vece, + MAKE_64BIT_MASK(0, halfbits))); } else { tcg_gen_shli_vec(vece, d, n, halfbits); tcg_gen_shri_vec(vece, d, d, halfbits - shl); @@ -6298,18 +7063,14 @@ static const TCGOpcode sqxtn_list[] = { static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t mask = (1ull << halfbits) - 1; int64_t min = -1ull << (halfbits - 1); int64_t max = -min - 1; - tcg_gen_dupi_vec(vece, t, min); - tcg_gen_smax_vec(vece, d, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_smin_vec(vece, d, d, t); - tcg_gen_dupi_vec(vece, t, mask); - tcg_gen_and_vec(vece, d, d, t); + tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, min)); + tcg_gen_smin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); + tcg_gen_and_vec(vece, d, d, tcg_constant_vec_matching(d, vece, mask)); } static const GVecGen2 sqxtnb_ops[3] = { @@ -6330,19 +7091,15 @@ TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops) static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t mask = (1ull << halfbits) - 1; int64_t min = -1ull << (halfbits - 1); int64_t max = -min - 1; - tcg_gen_dupi_vec(vece, t, min); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_smin_vec(vece, n, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); + tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_dupi_vec(vece, t, mask); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); } static const GVecGen2 sqxtnt_ops[3] = { @@ -6370,12 +7127,10 @@ static const TCGOpcode uqxtn_list[] = { static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = (1ull << halfbits) - 1; - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_umin_vec(vece, d, n, t); + tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); } static const GVecGen2 uqxtnb_ops[3] = { @@ -6396,14 +7151,13 @@ TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops) static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = (1ull << halfbits) - 1; + TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_umin_vec(vece, n, n, t); + tcg_gen_umin_vec(vece, n, n, maxv); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, maxv, d, n); } static const GVecGen2 uqxtnt_ops[3] = { @@ -6431,14 +7185,11 @@ static const TCGOpcode sqxtun_list[] = { static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = (1ull << halfbits) - 1; - tcg_gen_dupi_vec(vece, t, 0); - tcg_gen_smax_vec(vece, d, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_umin_vec(vece, d, d, t); + tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, 0)); + tcg_gen_umin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); } static const GVecGen2 sqxtunb_ops[3] = { @@ -6459,16 +7210,14 @@ TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops) static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = (1ull << halfbits) - 1; + TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); - tcg_gen_dupi_vec(vece, t, 0); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_umin_vec(vece, n, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); + tcg_gen_umin_vec(vece, n, n, maxv); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, maxv, d, n); } static const GVecGen2 sqxtunt_ops[3] = { @@ -6532,13 +7281,11 @@ static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; uint64_t mask = MAKE_64BIT_MASK(0, halfbits); tcg_gen_shri_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, mask); - tcg_gen_and_vec(vece, d, n, t); + tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); } static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; @@ -6590,13 +7337,11 @@ static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; uint64_t mask = MAKE_64BIT_MASK(0, halfbits); tcg_gen_shli_vec(vece, n, n, halfbits - shr); - tcg_gen_dupi_vec(vece, t, mask); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); } static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; @@ -6639,14 +7384,12 @@ TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops) static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; + uint64_t max = MAKE_64BIT_MASK(0, halfbits); tcg_gen_sari_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, 0); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_umin_vec(vece, d, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); + tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); } static const TCGOpcode sqshrunb_vec_list[] = { @@ -6671,16 +7414,15 @@ TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops) static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; + uint64_t max = MAKE_64BIT_MASK(0, halfbits); + TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); tcg_gen_sari_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, 0); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_umin_vec(vece, n, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); + tcg_gen_umin_vec(vece, n, n, maxv); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, maxv, d, n); } static const TCGOpcode sqshrunt_vec_list[] = { @@ -6723,18 +7465,15 @@ TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops) static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); int64_t min = -max - 1; + int64_t mask = MAKE_64BIT_MASK(0, halfbits); tcg_gen_sari_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, min); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_smin_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_and_vec(vece, d, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); + tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); + tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); } static const TCGOpcode sqshrnb_vec_list[] = { @@ -6759,19 +7498,16 @@ TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops) static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); int64_t min = -max - 1; + int64_t mask = MAKE_64BIT_MASK(0, halfbits); tcg_gen_sari_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, min); - tcg_gen_smax_vec(vece, n, n, t); - tcg_gen_dupi_vec(vece, t, max); - tcg_gen_smin_vec(vece, n, n, t); + tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); + tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); } static const TCGOpcode sqshrnt_vec_list[] = { @@ -6814,12 +7550,11 @@ TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops) static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; + int64_t max = MAKE_64BIT_MASK(0, halfbits); tcg_gen_shri_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_umin_vec(vece, d, n, t); + tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); } static const TCGOpcode uqshrnb_vec_list[] = { @@ -6844,14 +7579,14 @@ TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops) static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) { - TCGv_vec t = tcg_temp_new_vec_matching(d); int halfbits = 4 << vece; + int64_t max = MAKE_64BIT_MASK(0, halfbits); + TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); tcg_gen_shri_vec(vece, n, n, shr); - tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); - tcg_gen_umin_vec(vece, n, n, t); + tcg_gen_umin_vec(vece, n, n, maxv); tcg_gen_shli_vec(vece, n, n, halfbits); - tcg_gen_bitsel_vec(vece, d, t, d, n); + tcg_gen_bitsel_vec(vece, d, maxv, d, n); } static const TCGOpcode uqshrnt_vec_list[] = { @@ -6933,17 +7668,26 @@ DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz) DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz) DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) +static bool do_fmmla(DisasContext *s, arg_rrrr_esz *a, + gen_helper_gvec_4_ptr *fn) +{ + if (sve_access_check(s)) { + if (vec_full_reg_size(s) < 4 * memop_size(a->esz)) { + unallocated_encoding(s); + } else { + gen_gvec_fpst_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, 0, FPST_A64); + } + } + return true; +} + +TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, do_fmmla, a, gen_helper_fmmla_s) +TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, do_fmmla, a, gen_helper_fmmla_d) + /* * SVE Integer Multiply-Add (unpredicated) */ -TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, - gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, - 0, FPST_FPCR) -TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, - gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, - 0, FPST_FPCR) - static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { NULL, gen_helper_sve2_sqdmlal_zzzw_h, gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, @@ -7036,8 +7780,13 @@ static gen_helper_gvec_4 * const sqrdcmlah_fns[] = { TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) -TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, - a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) +TRANS_FEAT(USDOT_zzzz_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_usdot_4b, a, 0) + +TRANS_FEAT(SDOT_zzzz_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_sdot_2h, a, 0) +TRANS_FEAT(UDOT_zzzz_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_udot_2h, a, 0) TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, gen_helper_crypto_aesmc, a->rd, a->rd, 0) @@ -7058,17 +7807,18 @@ TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a) TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, - gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) + gen_helper_sve2_fcvtnt_sh, a, 0, FPST_A64) TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, - gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR) + gen_helper_sve2_fcvtnt_ds, a, 0, FPST_A64) TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, - gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR) + gen_helper_sve_bfcvtnt, a, 0, + s->fpcr_ah ? FPST_AH : FPST_A64) TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, - gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR) + gen_helper_sve2_fcvtlt_hs, a, 0, FPST_A64) TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, - gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR) + gen_helper_sve2_fcvtlt_sd, a, 0, FPST_A64) TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, FPROUNDING_ODD, gen_helper_sve_fcvt_ds) @@ -7080,7 +7830,7 @@ static gen_helper_gvec_3_ptr * const flogb_fns[] = { gen_helper_flogb_s, gen_helper_flogb_d }; TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz], - a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) + a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) { @@ -7098,7 +7848,7 @@ static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) { return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, a->rd, a->rn, a->rm, a->ra, - (a->index << 2) | (sel << 1) | sub, tcg_env); + (a->index << 3) | (sel << 1) | sub, tcg_env); } TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) @@ -7113,18 +7863,24 @@ TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, gen_helper_gvec_ummla_b, a, 0) -TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, +TRANS_FEAT(FDOT_zzzz, aa64_sme2_or_sve2p1, gen_gvec_env_arg_zzzz, + gen_helper_sme2_fdot_h, a, 0) +TRANS_FEAT(FDOT_zzxz, aa64_sme2_or_sve2p1, gen_gvec_env_arg_zzxz, + gen_helper_sme2_fdot_idx_h, a) + +TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_env_arg_zzzz, gen_helper_gvec_bfdot, a, 0) -TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, +TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_env_arg_zzxz, gen_helper_gvec_bfdot_idx, a) -TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, +TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_env_arg_zzzz, gen_helper_gvec_bfmmla, a, 0) static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) { return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, - a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR); + a->rd, a->rn, a->rm, a->ra, sel, + s->fpcr_ah ? FPST_AH : FPST_A64); } TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) @@ -7134,12 +7890,43 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) { return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, a->rd, a->rn, a->rm, a->ra, - (a->index << 1) | sel, FPST_FPCR); + (a->index << 1) | sel, + s->fpcr_ah ? FPST_AH : FPST_A64); } TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) +static bool do_BFMLSL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) +{ + if (s->fpcr_ah) { + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl, + a->rd, a->rn, a->rm, a->ra, sel, FPST_AH); + } else { + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl, + a->rd, a->rn, a->rm, a->ra, sel, FPST_A64); + } +} + +TRANS_FEAT(BFMLSLB_zzzw, aa64_sme2_or_sve2p1, do_BFMLSL_zzzw, a, false) +TRANS_FEAT(BFMLSLT_zzzw, aa64_sme2_or_sve2p1, do_BFMLSL_zzzw, a, true) + +static bool do_BFMLSL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) +{ + if (s->fpcr_ah) { + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl_idx, + a->rd, a->rn, a->rm, a->ra, + (a->index << 1) | sel, FPST_AH); + } else { + return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl_idx, + a->rd, a->rn, a->rm, a->ra, + (a->index << 1) | sel, FPST_A64); + } +} + +TRANS_FEAT(BFMLSLB_zzxw, aa64_sme2_or_sve2p1, do_BFMLSL_zzxw, a, false) +TRANS_FEAT(BFMLSLT_zzxw, aa64_sme2_or_sve2p1, do_BFMLSL_zzxw, a, true) + static bool trans_PSEL(DisasContext *s, arg_psel *a) { int vl = vec_full_reg_size(s); @@ -7148,7 +7935,7 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) TCGv_i64 tmp, didx, dbit; TCGv_ptr ptr; - if (!dc_isar_feature(aa64_sme, s)) { + if (!dc_isar_feature(aa64_sme_or_sve2p1, s)) { return false; } if (!sve_access_check(s)) { @@ -7187,6 +7974,7 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) tcg_gen_neg_i64(tmp, tmp); /* Apply to either copy the source, or write zeros. */ + pl = size_for_gvec(pl); tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), pred_full_reg_offset(s, a->pn), tmp, pl, pl); return true; @@ -7241,7 +8029,7 @@ static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); } -TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) +TRANS_FEAT(SCLAMP, aa64_sme_or_sve2p1, gen_gvec_fn_arg_zzzz, gen_sclamp, a) static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) { @@ -7292,4 +8080,137 @@ static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); } -TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) +TRANS_FEAT(UCLAMP, aa64_sme_or_sve2p1, gen_gvec_fn_arg_zzzz, gen_uclamp, a) + +static bool trans_FCLAMP(DisasContext *s, arg_FCLAMP *a) +{ + static gen_helper_gvec_3_ptr * const fn[] = { + gen_helper_sme2_bfclamp, + gen_helper_sme2_fclamp_h, + gen_helper_sme2_fclamp_s, + gen_helper_sme2_fclamp_d, + }; + + /* This insn uses MO_8 to encode BFloat16. */ + if (a->esz == MO_8 + ? !dc_isar_feature(aa64_sve_b16b16, s) + : !dc_isar_feature(aa64_sme2_or_sve2p1, s)) { + return false; + } + + /* So far we never optimize rda with MOVPRFX */ + assert(a->rd == a->ra); + return gen_gvec_fpst_zzz(s, fn[a->esz], a->rd, a->rn, a->rm, 1, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); +} + +TRANS_FEAT(SQCVTN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz, + gen_helper_sme2_sqcvtn_sh, a->rd, a->rn, 0) +TRANS_FEAT(UQCVTN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz, + gen_helper_sme2_uqcvtn_sh, a->rd, a->rn, 0) +TRANS_FEAT(SQCVTUN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz, + gen_helper_sme2_sqcvtun_sh, a->rd, a->rn, 0) + +static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png, + MemOp esz, bool is_write, int n, bool strided) +{ + typedef void ldst_c_fn(TCGv_env, TCGv_ptr, TCGv_i64, + TCGv_i32, TCGv_i64); + static ldst_c_fn * const f_ldst[2][2][4] = { + { { gen_helper_sve2p1_ld1bb_c, + gen_helper_sve2p1_ld1hh_le_c, + gen_helper_sve2p1_ld1ss_le_c, + gen_helper_sve2p1_ld1dd_le_c, }, + { gen_helper_sve2p1_ld1bb_c, + gen_helper_sve2p1_ld1hh_be_c, + gen_helper_sve2p1_ld1ss_be_c, + gen_helper_sve2p1_ld1dd_be_c, } }, + + { { gen_helper_sve2p1_st1bb_c, + gen_helper_sve2p1_st1hh_le_c, + gen_helper_sve2p1_st1ss_le_c, + gen_helper_sve2p1_st1dd_le_c, }, + { gen_helper_sve2p1_st1bb_c, + gen_helper_sve2p1_st1hh_be_c, + gen_helper_sve2p1_st1ss_be_c, + gen_helper_sve2p1_st1dd_be_c, } } + }; + + TCGv_i32 t_png; + TCGv_i64 t_desc; + TCGv_ptr t_zd; + uint64_t desc, lg2_rstride = 0; + bool be = s->be_data == MO_BE; + + assert(n == 2 || n == 4); + if (strided) { + lg2_rstride = 3; + if (n == 4) { + /* Validate ZD alignment. */ + if (zd & 4) { + return false; + } + lg2_rstride = 2; + } + /* Ignore non-temporal bit */ + zd &= ~8; + } + + if (strided || !dc_isar_feature(aa64_sve2p1, s) + ? !sme_sm_enabled_check(s) + : !sve_access_check(s)) { + return true; + } + + if (!s->mte_active[0]) { + addr = clean_data_tbi(s, addr); + } + + desc = n == 2 ? 0 : 1; + desc = desc | (lg2_rstride << 1); + desc = make_svemte_desc(s, vec_full_reg_size(s), 1, esz, is_write, desc); + t_desc = tcg_constant_i64(desc); + + t_png = tcg_temp_new_i32(); + tcg_gen_ld16u_i32(t_png, tcg_env, + pred_full_reg_offset(s, png) ^ + (HOST_BIG_ENDIAN ? 6 : 0)); + + t_zd = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd)); + + f_ldst[is_write][be][esz](tcg_env, t_zd, addr, t_png, t_desc); + return true; +} + +static bool gen_ldst_zcrr_c(DisasContext *s, arg_zcrr_ldst *a, + bool is_write, bool strided) +{ + TCGv_i64 addr = tcg_temp_new_i64(); + + tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz); + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); + return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write, + a->nreg, strided); +} + +static bool gen_ldst_zcri_c(DisasContext *s, arg_zcri_ldst *a, + bool is_write, bool strided) +{ + TCGv_i64 addr = tcg_temp_new_i64(); + + tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), + a->imm * a->nreg * vec_full_reg_size(s)); + return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write, + a->nreg, strided); +} + +TRANS_FEAT(LD1_zcrr, aa64_sme2_or_sve2p1, gen_ldst_zcrr_c, a, false, false) +TRANS_FEAT(LD1_zcri, aa64_sme2_or_sve2p1, gen_ldst_zcri_c, a, false, false) +TRANS_FEAT(ST1_zcrr, aa64_sme2_or_sve2p1, gen_ldst_zcrr_c, a, true, false) +TRANS_FEAT(ST1_zcri, aa64_sme2_or_sve2p1, gen_ldst_zcri_c, a, true, false) + +TRANS_FEAT(LD1_zcrr_stride, aa64_sme2, gen_ldst_zcrr_c, a, false, true) +TRANS_FEAT(LD1_zcri_stride, aa64_sme2, gen_ldst_zcri_c, a, false, true) +TRANS_FEAT(ST1_zcrr_stride, aa64_sme2, gen_ldst_zcrr_c, a, true, true) +TRANS_FEAT(ST1_zcri_stride, aa64_sme2, gen_ldst_zcri_c, a, true, true) diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c index cd5b8483576..8d9d1ab877a 100644 --- a/target/arm/tcg/translate-vfp.c +++ b/target/arm/tcg/translate-vfp.c @@ -460,9 +460,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) } if (sz == 1) { - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); } else { - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); } tcg_rmode = gen_set_rmode(rounding, fpst); @@ -527,9 +527,9 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } if (sz == 1) { - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); } else { - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); } tcg_shift = tcg_constant_i32(0); @@ -1398,7 +1398,7 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, f0 = tcg_temp_new_i32(); f1 = tcg_temp_new_i32(); fd = tcg_temp_new_i32(); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); vfp_load_reg32(f0, vn); vfp_load_reg32(f1, vm); @@ -1433,7 +1433,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn, /* * Do a half-precision operation. Functionally this is * the same as do_vfp_3op_sp(), except: - * - it uses the FPST_FPCR_F16 + * - it uses the FPST_A32_F16 * - it doesn't need the VFP vector handling (fp16 is a * v8 feature, and in v8 VFP vectors don't exist) * - it does the aa32_fp16_arith feature test @@ -1456,7 +1456,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn, f0 = tcg_temp_new_i32(); f1 = tcg_temp_new_i32(); fd = tcg_temp_new_i32(); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); vfp_load_reg16(f0, vn); vfp_load_reg16(f1, vm); @@ -1517,7 +1517,7 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, f0 = tcg_temp_new_i64(); f1 = tcg_temp_new_i64(); fd = tcg_temp_new_i64(); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); vfp_load_reg64(f0, vn); vfp_load_reg64(f1, vm); @@ -2122,7 +2122,7 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) /* VFNMA, VFNMS */ gen_vfp_negh(vd, vd); } - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); return true; @@ -2181,7 +2181,7 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) /* VFNMA, VFNMS */ gen_vfp_negs(vd, vd); } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_vfp_muladds(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); return true; @@ -2190,8 +2190,8 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) { /* - * VFNMA : fd = muladd(-fd, fn, fm) - * VFNMS : fd = muladd(-fd, -fn, fm) + * VFNMA : fd = muladd(-fd, -fn, fm) + * VFNMS : fd = muladd(-fd, fn, fm) * VFMA : fd = muladd( fd, fn, fm) * VFMS : fd = muladd( fd, -fn, fm) * @@ -2246,7 +2246,7 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) /* VFNMA, VFNMS */ gen_vfp_negd(vd, vd); } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst); vfp_store_reg64(vd, a->vd); return true; @@ -2262,8 +2262,8 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) #define MAKE_VFM_TRANS_FNS(PREC) \ MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \ MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \ - MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \ - MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true) + MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, false, true) \ + MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, true, true) MAKE_VFM_TRANS_FNS(hp) MAKE_VFM_TRANS_FNS(sp) @@ -2424,17 +2424,17 @@ DO_VFP_2OP(VNEG, dp, gen_vfp_negd, aa32_fpdp_v2) static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm) { - gen_helper_vfp_sqrth(vd, vm, tcg_env); + gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_A32_F16)); } static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm) { - gen_helper_vfp_sqrts(vd, vm, tcg_env); + gen_helper_vfp_sqrts(vd, vm, fpstatus_ptr(FPST_A32)); } static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm) { - gen_helper_vfp_sqrtd(vd, vm, tcg_env); + gen_helper_vfp_sqrtd(vd, vm, fpstatus_ptr(FPST_A32)); } DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith) @@ -2565,7 +2565,7 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); ahp_mode = get_ahp_flag(); tmp = tcg_temp_new_i32(); /* The T bit tells us if we want the low or high 16 bits of Vm */ @@ -2599,7 +2599,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); ahp_mode = get_ahp_flag(); tmp = tcg_temp_new_i32(); /* The T bit tells us if we want the low or high 16 bits of Vm */ @@ -2623,7 +2623,7 @@ static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); @@ -2646,7 +2646,7 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); ahp_mode = get_ahp_flag(); tmp = tcg_temp_new_i32(); @@ -2680,7 +2680,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); ahp_mode = get_ahp_flag(); tmp = tcg_temp_new_i32(); vm = tcg_temp_new_i64(); @@ -2706,7 +2706,7 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg16(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); gen_helper_rinth(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); return true; @@ -2727,7 +2727,7 @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_rints(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); return true; @@ -2757,7 +2757,7 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) tmp = tcg_temp_new_i64(); vfp_load_reg64(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_rintd(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); return true; @@ -2779,7 +2779,7 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg16(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rinth(tmp, tmp, fpst); gen_restore_rmode(tcg_rmode, fpst); @@ -2803,7 +2803,7 @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rints(tmp, tmp, fpst); gen_restore_rmode(tcg_rmode, fpst); @@ -2836,7 +2836,7 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) tmp = tcg_temp_new_i64(); vfp_load_reg64(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst); gen_helper_rintd(tmp, tmp, fpst); gen_restore_rmode(tcg_rmode, fpst); @@ -2859,7 +2859,7 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg16(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); gen_helper_rinth_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); return true; @@ -2880,7 +2880,7 @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) tmp = tcg_temp_new_i32(); vfp_load_reg32(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_rints_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); return true; @@ -2910,7 +2910,7 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) tmp = tcg_temp_new_i64(); vfp_load_reg64(tmp, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); gen_helper_rintd_exact(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); return true; @@ -2937,7 +2937,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i64(); vfp_load_reg32(vm, a->vm); - gen_helper_vfp_fcvtds(vd, vm, tcg_env); + gen_helper_vfp_fcvtds(vd, vm, fpstatus_ptr(FPST_A32)); vfp_store_reg64(vd, a->vd); return true; } @@ -2963,7 +2963,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) vd = tcg_temp_new_i32(); vm = tcg_temp_new_i64(); vfp_load_reg64(vm, a->vm); - gen_helper_vfp_fcvtsd(vd, vm, tcg_env); + gen_helper_vfp_fcvtsd(vd, vm, fpstatus_ptr(FPST_A32)); vfp_store_reg32(vd, a->vd); return true; } @@ -2983,7 +2983,7 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a) vm = tcg_temp_new_i32(); vfp_load_reg32(vm, a->vm); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); if (a->s) { /* i32 -> f16 */ gen_helper_vfp_sitoh(vm, vm, fpst); @@ -3010,7 +3010,7 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) vm = tcg_temp_new_i32(); vfp_load_reg32(vm, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); if (a->s) { /* i32 -> f32 */ gen_helper_vfp_sitos(vm, vm, fpst); @@ -3044,7 +3044,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) vm = tcg_temp_new_i32(); vd = tcg_temp_new_i64(); vfp_load_reg32(vm, a->vm); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); if (a->s) { /* i32 -> f64 */ gen_helper_vfp_sitod(vd, vm, fpst); @@ -3105,7 +3105,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) vd = tcg_temp_new_i32(); vfp_load_reg32(vd, a->vd); - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ @@ -3161,7 +3161,7 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) vd = tcg_temp_new_i32(); vfp_load_reg32(vd, a->vd); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ @@ -3223,7 +3223,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) vd = tcg_temp_new_i64(); vfp_load_reg64(vd, a->vd); - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); shift = tcg_constant_i32(frac_bits); /* Switch on op:U:sx bits */ @@ -3273,7 +3273,7 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR_F16); + fpst = fpstatus_ptr(FPST_A32_F16); vm = tcg_temp_new_i32(); vfp_load_reg16(vm, a->vm); @@ -3307,7 +3307,7 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); vm = tcg_temp_new_i32(); vfp_load_reg32(vm, a->vm); @@ -3347,7 +3347,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) return true; } - fpst = fpstatus_ptr(FPST_FPCR); + fpst = fpstatus_ptr(FPST_A32); vm = tcg_temp_new_i64(); vd = tcg_temp_new_i32(); vfp_load_reg64(vm, a->vm); diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index c5bc691d92b..f7d6d8ce196 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -27,6 +27,7 @@ #include "semihosting/semihost.h" #include "cpregs.h" #include "exec/helper-proto.h" +#include "exec/target_page.h" #define HELPER_H "helper.h" #include "exec/helper-info.c.inc" @@ -228,6 +229,9 @@ static inline int get_a32_user_mem_index(DisasContext *s) */ switch (s->mmu_idx) { case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: + return arm_to_core_mmu_idx(ARMMMUIdx_E30_0); case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */ case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: @@ -368,7 +372,7 @@ static void gen_rebuild_hflags(DisasContext *s, bool new_el) } } -static void gen_exception_internal(int excp) +void gen_exception_internal(int excp) { assert(excp_is_internal(excp)); gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp)); @@ -490,20 +494,9 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 tmp = tcg_temp_new_i32(); - if (TCG_TARGET_HAS_add2_i32) { - tcg_gen_movi_i32(tmp, 0); - tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp); - tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp); - } else { - TCGv_i64 q0 = tcg_temp_new_i64(); - TCGv_i64 q1 = tcg_temp_new_i64(); - tcg_gen_extu_i32_i64(q0, t0); - tcg_gen_extu_i32_i64(q1, t1); - tcg_gen_add_i64(q0, q0, q1); - tcg_gen_extu_i32_i64(q1, cpu_CF); - tcg_gen_add_i64(q0, q0, q1); - tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0); - } + + tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF); + tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); tcg_gen_xor_i32(tmp, t0, t1); @@ -3507,7 +3500,7 @@ static int t32_expandimm_rot(DisasContext *s, int x) /* Return the unrotated immediate from T32ExpandImm. */ static int t32_expandimm_imm(DisasContext *s, int x) { - int imm = extract32(x, 0, 8); + uint32_t imm = extract32(x, 0, 8); switch (extract32(x, 8, 4)) { case 0: /* XY */ @@ -4938,7 +4931,7 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a) } static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, - TCGv_i32 addr, int address_offset) + TCGv_i32 addr) { if (!a->p) { TCGv_i32 ofs = load_reg(s, a->rm); @@ -4951,7 +4944,6 @@ static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, } else if (!a->w) { return; } - tcg_gen_addi_i32(addr, addr, address_offset); store_reg(s, a->rn, addr); } @@ -4971,7 +4963,7 @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a, * Perform base writeback before the loaded value to * ensure correct behavior with overlapping index registers. */ - op_addr_rr_post(s, a, addr, 0); + op_addr_rr_post(s, a, addr); store_reg_from_load(s, a->rt, tmp); return true; } @@ -4996,14 +4988,53 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - op_addr_rr_post(s, a, addr, 0); + op_addr_rr_post(s, a, addr); return true; } -static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) +static void do_ldrd_load(DisasContext *s, TCGv_i32 addr, int rt, int rt2) { + /* + * LDRD is required to be an atomic 64-bit access if the + * address is 8-aligned, two atomic 32-bit accesses if + * it's only 4-aligned, and to give an alignment fault + * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN. + * Rt is always the word from the lower address, and Rt2 the + * data from the higher address, regardless of endianness. + * So (like gen_load_exclusive) we avoid gen_aa32_ld_i64() + * so we don't get its SCTLR_B check, and instead do a 64-bit access + * using MO_BE if appropriate and then split the two halves. + * + * For M-profile, and for A-profile before LPAE, the 64-bit + * atomicity is not required. We could model that using + * the looser MO_ATOM_IFALIGN_PAIR, but providing a higher + * level of atomicity than required is harmless (we would not + * currently generate better code for IFALIGN_PAIR here). + * + * This also gives us the correct behaviour of not updating + * rt if the load of rt2 faults; this is required for cases + * like "ldrd r2, r3, [r2]" where rt is also the base register. + */ int mem_idx = get_mem_index(s); - TCGv_i32 addr, tmp; + MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; + TCGv taddr = gen_aa32_addr(s, addr, opc); + TCGv_i64 t64 = tcg_temp_new_i64(); + TCGv_i32 tmp = tcg_temp_new_i32(); + TCGv_i32 tmp2 = tcg_temp_new_i32(); + + tcg_gen_qemu_ld_i64(t64, taddr, mem_idx, opc); + if (s->be_data == MO_BE) { + tcg_gen_extr_i64_i32(tmp2, tmp, t64); + } else { + tcg_gen_extr_i64_i32(tmp, tmp2, t64); + } + store_reg(s, rt, tmp); + store_reg(s, rt2, tmp2); +} + +static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) +{ + TCGv_i32 addr; if (!ENABLE_ARCH_5TE) { return false; @@ -5014,25 +5045,49 @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a) } addr = op_addr_rr_pre(s, a); - tmp = tcg_temp_new_i32(); - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - store_reg(s, a->rt, tmp); - - tcg_gen_addi_i32(addr, addr, 4); - - tmp = tcg_temp_new_i32(); - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - store_reg(s, a->rt + 1, tmp); + do_ldrd_load(s, addr, a->rt, a->rt + 1); /* LDRD w/ base writeback is undefined if the registers overlap. */ - op_addr_rr_post(s, a, addr, -4); + op_addr_rr_post(s, a, addr); return true; } -static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) +static void do_strd_store(DisasContext *s, TCGv_i32 addr, int rt, int rt2) { + /* + * STRD is required to be an atomic 64-bit access if the + * address is 8-aligned, two atomic 32-bit accesses if + * it's only 4-aligned, and to give an alignment fault + * if it's not 4-aligned. + * Rt is always the word from the lower address, and Rt2 the + * data from the higher address, regardless of endianness. + * So (like gen_store_exclusive) we avoid gen_aa32_ld_i64() + * so we don't get its SCTLR_B check, and instead do a 64-bit access + * using MO_BE if appropriate, using a value constructed + * by putting the two halves together in the right order. + * + * As with LDRD, the 64-bit atomicity is not required for + * M-profile, or for A-profile before LPAE, and we provide + * the higher guarantee always for simplicity. + */ int mem_idx = get_mem_index(s); - TCGv_i32 addr, tmp; + MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; + TCGv taddr = gen_aa32_addr(s, addr, opc); + TCGv_i32 t1 = load_reg(s, rt); + TCGv_i32 t2 = load_reg(s, rt2); + TCGv_i64 t64 = tcg_temp_new_i64(); + + if (s->be_data == MO_BE) { + tcg_gen_concat_i32_i64(t64, t2, t1); + } else { + tcg_gen_concat_i32_i64(t64, t1, t2); + } + tcg_gen_qemu_st_i64(t64, taddr, mem_idx, opc); +} + +static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) +{ + TCGv_i32 addr; if (!ENABLE_ARCH_5TE) { return false; @@ -5043,15 +5098,9 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) } addr = op_addr_rr_pre(s, a); - tmp = load_reg(s, a->rt); - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); + do_strd_store(s, addr, a->rt, a->rt + 1); - tcg_gen_addi_i32(addr, addr, 4); - - tmp = load_reg(s, a->rt + 1); - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - - op_addr_rr_post(s, a, addr, -4); + op_addr_rr_post(s, a, addr); return true; } @@ -5087,13 +5136,14 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a) } static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a, - TCGv_i32 addr, int address_offset) + TCGv_i32 addr) { + int address_offset = 0; if (!a->p) { if (a->u) { - address_offset += a->imm; + address_offset = a->imm; } else { - address_offset -= a->imm; + address_offset = -a->imm; } } else if (!a->w) { return; @@ -5118,7 +5168,7 @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a, * Perform base writeback before the loaded value to * ensure correct behavior with overlapping index registers. */ - op_addr_ri_post(s, a, addr, 0); + op_addr_ri_post(s, a, addr); store_reg_from_load(s, a->rt, tmp); return true; } @@ -5143,29 +5193,20 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - op_addr_ri_post(s, a, addr, 0); + op_addr_ri_post(s, a, addr); return true; } static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) { - int mem_idx = get_mem_index(s); - TCGv_i32 addr, tmp; + TCGv_i32 addr; addr = op_addr_ri_pre(s, a); - tmp = tcg_temp_new_i32(); - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - store_reg(s, a->rt, tmp); - - tcg_gen_addi_i32(addr, addr, 4); - - tmp = tcg_temp_new_i32(); - gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - store_reg(s, rt2, tmp); + do_ldrd_load(s, addr, a->rt, rt2); /* LDRD w/ base writeback is undefined if the registers overlap. */ - op_addr_ri_post(s, a, addr, -4); + op_addr_ri_post(s, a, addr); return true; } @@ -5188,20 +5229,13 @@ static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a) static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) { - int mem_idx = get_mem_index(s); - TCGv_i32 addr, tmp; + TCGv_i32 addr; addr = op_addr_ri_pre(s, a); - tmp = load_reg(s, a->rt); - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - - tcg_gen_addi_i32(addr, addr, 4); - - tmp = load_reg(s, rt2); - gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); + do_strd_store(s, addr, a->rt, rt2); - op_addr_ri_post(s, a, addr, -4); + op_addr_ri_post(s, a, addr); return true; } @@ -7726,7 +7760,8 @@ static bool arm_check_ss_active(DisasContext *dc) static void arm_post_translate_insn(DisasContext *dc) { - if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) { + if (dc->condjmp && + (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) { if (dc->pc_save != dc->condlabel.pc_save) { gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save); } @@ -7756,7 +7791,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * be possible after an indirect branch, at the start of the TB. */ assert(dc->base.num_insns == 1); - gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc)); + gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc)); dc->base.is_jmp = DISAS_NORETURN; dc->base.pc_next = QEMU_ALIGN_UP(pc, 4); return; @@ -8090,9 +8125,8 @@ static const TranslatorOps thumb_translator_ops = { .tb_stop = arm_tr_tb_stop, }; -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void arm_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = { }; const TranslatorOps *ops = &arm_translator_ops; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index a8672c857c1..f974996f3f8 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -4,8 +4,8 @@ #include "cpu.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" -#include "exec/exec-all.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/helper-gen.h" #include "internals.h" #include "cpu-features.h" @@ -70,8 +70,10 @@ typedef struct DisasContext { int fp_excp_el; /* FP exception EL or 0 if enabled */ int sve_excp_el; /* SVE exception EL or 0 if enabled */ int sme_excp_el; /* SME exception EL or 0 if enabled */ + int zt0_excp_el; /* ZT0 exception EL or 0 if enabled */ int vl; /* current vector length in bytes */ int svl; /* current streaming vector length in bytes */ + int max_svl; /* maximum implemented streaming vector length */ bool vfp_enabled; /* FP enabled via FPSCR.EN */ int vec_len; int vec_stride; @@ -91,15 +93,19 @@ typedef struct DisasContext { bool aarch64; bool thumb; bool lse2; - /* Because unallocated encodings generate different exception syndrome + /* + * Because unallocated encodings generate different exception syndrome * information from traps due to FP being disabled, we can't do a single * "is fp access disabled" check at a high level in the decode tree. * To help in catching bugs where the access check was forgotten in some * code path, we set this flag when the access check is done, and assert * that it is set at the point where we actually touch the FP regs. + * 0: not checked, + * 1: checked, access ok + * -1: checked, access denied */ - bool fp_access_checked; - bool sve_access_checked; + int8_t fp_access_checked; + int8_t sve_access_checked; /* ARMv8 single-step state (this is distinct from the QEMU gdbstub * single-step support). */ @@ -154,6 +160,10 @@ typedef struct DisasContext { bool nv2_mem_e20; /* True if NV2 enabled and NV2 RAM accesses are big-endian */ bool nv2_mem_be; + /* True if FPCR.AH is 1 (alternate floating point handling) */ + bool fpcr_ah; + /* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */ + bool fpcr_nep; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -163,8 +173,6 @@ typedef struct DisasContext { uint8_t dcz_blocksize; /* A copy of cpu->gm_blocksize. */ uint8_t gm_blocksize; - /* True if this page is guarded. */ - bool guarded_page; /* True if the current insn_start has been updated. */ bool insn_start_updated; /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ @@ -202,6 +210,11 @@ static inline int plus_2(DisasContext *s, int x) return x + 2; } +static inline int plus_8(DisasContext *s, int x) +{ + return x + 8; +} + static inline int plus_12(DisasContext *s, int x) { return x + 12; @@ -341,6 +354,7 @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(int cc, TCGLabel *label); MemOp pow2_align(unsigned i); void unallocated_encoding(DisasContext *s); +void gen_exception_internal(int excp); void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, uint32_t syn, uint32_t target_el); void gen_exception_insn(DisasContext *s, target_long pc_diff, @@ -471,6 +485,13 @@ void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_neon_sqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz); +void gen_neon_uqshli(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz); +void gen_neon_sqshlui(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + int64_t c, uint32_t opr_sz, uint32_t max_sz); + void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -514,6 +535,11 @@ void gen_sqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b); void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_sshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + int64_t shift, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_ushr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, + int64_t shift, uint32_t opr_sz, uint32_t max_sz); + void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, int64_t shift, uint32_t opr_sz, uint32_t max_sz); void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -568,6 +594,41 @@ void gen_gvec_umaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_cls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_clz(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_cnt(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_rbit(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_rev16(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_rev32(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_rev64(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); + +void gen_gvec_saddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_sadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_uaddlp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_uadalp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); + +/* These exclusively manipulate the sign bit. */ +void gen_gvec_fabs(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); +void gen_gvec_fneg(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz); + +void gen_gvec_urecpe(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_ursqrte(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t opr_sz, uint32_t max_sz); + /* * Forward to the isar_feature_* tests given a DisasContext pointer. */ @@ -582,6 +643,8 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); +typedef void GVecGen3FnVar(unsigned, TCGv_ptr, uint32_t, TCGv_ptr, uint32_t, + TCGv_ptr, uint32_t, uint32_t, uint32_t); /* Function prototype for gen_ functions for calling Neon helpers */ typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32); @@ -593,13 +656,13 @@ typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64); typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64); -typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64); typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32); typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32); typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr); typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64); +typedef void NeonGenOne64OpEnvFn(TCGv_i64, TCGv_env, TCGv_i64); typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); @@ -620,54 +683,18 @@ static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) return (CPUARMTBFlags){ tb->flags, tb->cs_base }; } -/* - * Enum for argument to fpstatus_ptr(). - */ -typedef enum ARMFPStatusFlavour { - FPST_FPCR, - FPST_FPCR_F16, - FPST_STD, - FPST_STD_F16, -} ARMFPStatusFlavour; - /** * fpstatus_ptr: return TCGv_ptr to the specified fp_status field * * We have multiple softfloat float_status fields in the Arm CPU state struct * (see the comment in cpu.h for details). Return a TCGv_ptr which has * been set up to point to the requested field in the CPU state struct. - * The options are: - * - * FPST_FPCR - * for non-FP16 operations controlled by the FPCR - * FPST_FPCR_F16 - * for operations controlled by the FPCR where FPCR.FZ16 is to be used - * FPST_STD - * for A32/T32 Neon operations using the "standard FPSCR value" - * FPST_STD_F16 - * as FPST_STD, but where FPCR.FZ16 is to be used */ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) { TCGv_ptr statusptr = tcg_temp_new_ptr(); - int offset; - - switch (flavour) { - case FPST_FPCR: - offset = offsetof(CPUARMState, vfp.fp_status); - break; - case FPST_FPCR_F16: - offset = offsetof(CPUARMState, vfp.fp_status_f16); - break; - case FPST_STD: - offset = offsetof(CPUARMState, vfp.standard_fp_status); - break; - case FPST_STD_F16: - offset = offsetof(CPUARMState, vfp.standard_fp_status_f16); - break; - default: - g_assert_not_reached(); - } + int offset = offsetof(CPUARMState, vfp.fp_status[flavour]); + tcg_gen_addi_ptr(statusptr, tcg_env, offset); return statusptr; } diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 98604d170fd..33a136b90a6 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -825,17 +825,24 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ clear_tail(d, opr_sz, simd_maxsz(desc)); \ } -DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t) -DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t) -DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t) -DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t) -DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t) +DO_DOT(gvec_sdot_4b, int32_t, int8_t, int8_t) +DO_DOT(gvec_udot_4b, uint32_t, uint8_t, uint8_t) +DO_DOT(gvec_usdot_4b, uint32_t, uint8_t, int8_t) +DO_DOT(gvec_sdot_4h, int64_t, int16_t, int16_t) +DO_DOT(gvec_udot_4h, uint64_t, uint16_t, uint16_t) #define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ { \ intptr_t i = 0, opr_sz = simd_oprsz(desc); \ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \ + /* \ + * Special case: opr_sz == 8 from AA64/AA32 advsimd means the \ + * first iteration might not be a full 16 byte segment. But \ + * for vector lengths beyond that this must be SVE and we know \ + * opr_sz is a multiple of 16, so we need not clamp segend \ + * to opr_sz_n when we advance it at the end of the loop. \ + */ \ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \ intptr_t index = simd_data(desc); \ TYPED *d = vd, *a = va; \ @@ -853,39 +860,91 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ n[i * 4 + 2] * m2 + \ n[i * 4 + 3] * m3); \ } while (++i < segend); \ - segend = i + 4; \ + segend = i + (16 / sizeof(TYPED)); \ } while (i < opr_sz_n); \ clear_tail(d, opr_sz, simd_maxsz(desc)); \ } -DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4) -DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4) -DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4) -DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4) -DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, H8) -DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, H8) +DO_DOT_IDX(gvec_sdot_idx_4b, int32_t, int8_t, int8_t, H4) +DO_DOT_IDX(gvec_udot_idx_4b, uint32_t, uint8_t, uint8_t, H4) +DO_DOT_IDX(gvec_sudot_idx_4b, int32_t, int8_t, uint8_t, H4) +DO_DOT_IDX(gvec_usdot_idx_4b, int32_t, uint8_t, int8_t, H4) +DO_DOT_IDX(gvec_sdot_idx_4h, int64_t, int16_t, int16_t, H8) +DO_DOT_IDX(gvec_udot_idx_4h, uint64_t, uint16_t, uint16_t, H8) + +#undef DO_DOT +#undef DO_DOT_IDX + +/* Similar for 2-way dot product */ +#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + TYPED *d = vd, *a = va; \ + TYPEN *n = vn; \ + TYPEM *m = vm; \ + for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \ + d[i] = (a[i] + \ + (TYPED)n[i * 2 + 0] * m[i * 2 + 0] + \ + (TYPED)n[i * 2 + 1] * m[i * 2 + 1]); \ + } \ + clear_tail(d, opr_sz, simd_maxsz(desc)); \ +} + +#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ +{ \ + intptr_t i = 0, opr_sz = simd_oprsz(desc); \ + intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \ + intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \ + intptr_t index = simd_data(desc); \ + TYPED *d = vd, *a = va; \ + TYPEN *n = vn; \ + TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 2; \ + do { \ + TYPED m0 = m_indexed[i * 2 + 0]; \ + TYPED m1 = m_indexed[i * 2 + 1]; \ + do { \ + d[i] = (a[i] + \ + n[i * 2 + 0] * m0 + \ + n[i * 2 + 1] * m1); \ + } while (++i < segend); \ + segend = i + (16 / sizeof(TYPED)); \ + } while (i < opr_sz_n); \ + clear_tail(d, opr_sz, simd_maxsz(desc)); \ +} + +DO_DOT(gvec_sdot_2h, int32_t, int16_t, int16_t) +DO_DOT(gvec_udot_2h, uint32_t, uint16_t, uint16_t) + +DO_DOT_IDX(gvec_sdot_idx_2h, int32_t, int16_t, int16_t, H4) +DO_DOT_IDX(gvec_udot_idx_2h, uint32_t, uint16_t, uint16_t, H4) + +#undef DO_DOT +#undef DO_DOT_IDX void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd; float16 *n = vn; float16 *m = vm; - float_status *fpst = vfpst; - uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; - for (i = 0; i < opr_sz / 2; i += 2) { float16 e0 = n[H2(i)]; - float16 e1 = m[H2(i + 1)] ^ neg_imag; + float16 e1 = m[H2(i + 1)]; float16 e2 = n[H2(i + 1)]; - float16 e3 = m[H2(i)] ^ neg_real; + float16 e3 = m[H2(i)]; + + if (rot) { + e3 = float16_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float16_maybe_ah_chs(e1, fpcr_ah); + } d[H2(i)] = float16_add(e0, e1, fpst); d[H2(i + 1)] = float16_add(e2, e3, fpst); @@ -894,26 +953,27 @@ void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, } void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd; float32 *n = vn; float32 *m = vm; - float_status *fpst = vfpst; - uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; - for (i = 0; i < opr_sz / 4; i += 2) { float32 e0 = n[H4(i)]; - float32 e1 = m[H4(i + 1)] ^ neg_imag; + float32 e1 = m[H4(i + 1)]; float32 e2 = n[H4(i + 1)]; - float32 e3 = m[H4(i)] ^ neg_real; + float32 e3 = m[H4(i)]; + + if (rot) { + e3 = float32_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float32_maybe_ah_chs(e1, fpcr_ah); + } d[H4(i)] = float32_add(e0, e1, fpst); d[H4(i + 1)] = float32_add(e2, e3, fpst); @@ -922,26 +982,27 @@ void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm, } void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float64 *d = vd; float64 *n = vn; float64 *m = vm; - float_status *fpst = vfpst; - uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1); - uint64_t neg_imag = neg_real ^ 1; + bool rot = extract32(desc, SIMD_DATA_SHIFT, 1); + bool fpcr_ah = extract64(desc, SIMD_DATA_SHIFT + 1, 1); uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 63; - neg_imag <<= 63; - for (i = 0; i < opr_sz / 8; i += 2) { float64 e0 = n[i]; - float64 e1 = m[i + 1] ^ neg_imag; + float64 e1 = m[i + 1]; float64 e2 = n[i + 1]; - float64 e3 = m[i] ^ neg_real; + float64 e3 = m[i]; + + if (rot) { + e3 = float64_maybe_ah_chs(e3, fpcr_ah); + } else { + e1 = float64_maybe_ah_chs(e1, fpcr_ah); + } d[i] = float64_add(e0, e1, fpst); d[i + 1] = float64_add(e2, e3, fpst); @@ -950,152 +1011,167 @@ void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm, } void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd, *n = vn, *m = vm, *a = va; - float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float16 negx_imag, negx_real; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 2; i += 2) { float16 e2 = n[H2(i + flip)]; - float16 e1 = m[H2(i + flip)] ^ neg_real; + float16 e1 = m[H2(i + flip)] ^ negx_real; float16 e4 = e2; - float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag; + float16 e3 = m[H2(i + 1 - flip)] ^ negx_imag; - d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst); - d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst); + d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], negf_real, fpst); + d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float16 *d = vd, *n = vn, *m = vm, *a = va; - float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1); + uint32_t negf_real = flip ^ negf_imag; intptr_t elements = opr_sz / sizeof(float16); intptr_t eltspersegment = MIN(16 / sizeof(float16), elements); + float16 negx_imag, negx_real; intptr_t i, j; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 15; - neg_imag <<= 15; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 15; + negx_imag = (negf_imag & ~fpcr_ah) << 15; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < elements; i += eltspersegment) { float16 mr = m[H2(i + 2 * index + 0)]; float16 mi = m[H2(i + 2 * index + 1)]; - float16 e1 = neg_real ^ (flip ? mi : mr); - float16 e3 = neg_imag ^ (flip ? mr : mi); + float16 e1 = negx_real ^ (flip ? mi : mr); + float16 e3 = negx_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float16 e2 = n[H2(j + flip)]; float16 e4 = e2; - d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst); - d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst); + d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], negf_real, fpst); + d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], negf_imag, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd, *n = vn, *m = vm, *a = va; - float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float32 negx_imag, negx_real; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 4; i += 2) { float32 e2 = n[H4(i + flip)]; - float32 e1 = m[H4(i + flip)] ^ neg_real; + float32 e1 = m[H4(i + flip)] ^ negx_real; float32 e4 = e2; - float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag; + float32 e3 = m[H4(i + 1 - flip)] ^ negx_imag; - d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst); - d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst); + d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], negf_real, fpst); + d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float32 *d = vd, *n = vn, *m = vm, *a = va; - float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2); - uint32_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 4, 1); + uint32_t negf_real = flip ^ negf_imag; intptr_t elements = opr_sz / sizeof(float32); intptr_t eltspersegment = MIN(16 / sizeof(float32), elements); + float32 negx_imag, negx_real; intptr_t i, j; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 31; - neg_imag <<= 31; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (negf_real & ~fpcr_ah) << 31; + negx_imag = (negf_imag & ~fpcr_ah) << 31; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < elements; i += eltspersegment) { float32 mr = m[H4(i + 2 * index + 0)]; float32 mi = m[H4(i + 2 * index + 1)]; - float32 e1 = neg_real ^ (flip ? mi : mr); - float32 e3 = neg_imag ^ (flip ? mr : mi); + float32 e1 = negx_real ^ (flip ? mi : mr); + float32 e3 = negx_imag ^ (flip ? mr : mi); for (j = i; j < i + eltspersegment; j += 2) { float32 e2 = n[H4(j + flip)]; float32 e4 = e2; - d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst); - d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst); + d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], negf_real, fpst); + d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], negf_imag, fpst); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va, - void *vfpst, uint32_t desc) + float_status *fpst, uint32_t desc) { uintptr_t opr_sz = simd_oprsz(desc); float64 *d = vd, *n = vn, *m = vm, *a = va; - float_status *fpst = vfpst; intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1); - uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); - uint64_t neg_real = flip ^ neg_imag; + uint32_t fpcr_ah = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + uint32_t negf_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1); + uint32_t negf_real = flip ^ negf_imag; + float64 negx_real, negx_imag; uintptr_t i; - /* Shift boolean to the sign bit so we can xor to negate. */ - neg_real <<= 63; - neg_imag <<= 63; + /* With AH=0, use negx; with AH=1 use negf. */ + negx_real = (uint64_t)(negf_real & ~fpcr_ah) << 63; + negx_imag = (uint64_t)(negf_imag & ~fpcr_ah) << 63; + negf_real = (negf_real & fpcr_ah ? float_muladd_negate_product : 0); + negf_imag = (negf_imag & fpcr_ah ? float_muladd_negate_product : 0); for (i = 0; i < opr_sz / 8; i += 2) { float64 e2 = n[i + flip]; - float64 e1 = m[i + flip] ^ neg_real; + float64 e1 = m[i + flip] ^ negx_real; float64 e4 = e2; - float64 e3 = m[i + 1 - flip] ^ neg_imag; + float64 e3 = m[i + 1 - flip] ^ negx_imag; - d[i] = float64_muladd(e2, e1, a[i], 0, fpst); - d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst); + d[i] = float64_muladd(e2, e1, a[i], negf_real, fpst); + d[i + 1] = float64_muladd(e4, e3, a[i + 1], negf_imag, fpst); } clear_tail(d, opr_sz, simd_maxsz(desc)); } @@ -1180,9 +1256,8 @@ static uint64_t float64_acgt(float64 op1, float64 op2, float_status *stat) return -float64_lt(float64_abs(op2), float64_abs(op1), stat); } -static int16_t vfp_tosszh(float16 x, void *fpstp) +static int16_t vfp_tosszh(float16 x, float_status *fpst) { - float_status *fpst = fpstp; if (float16_is_any_nan(x)) { float_raise(float_flag_invalid, fpst); return 0; @@ -1190,9 +1265,8 @@ static int16_t vfp_tosszh(float16 x, void *fpstp) return float16_to_int16_round_to_zero(x, fpst); } -static uint16_t vfp_touszh(float16 x, void *fpstp) +static uint16_t vfp_touszh(float16 x, float_status *fpst) { - float_status *fpst = fpstp; if (float16_is_any_nan(x)) { float_raise(float_flag_invalid, fpst); return 0; @@ -1201,7 +1275,7 @@ static uint16_t vfp_touszh(float16 x, void *fpstp) } #define DO_2OP(NAME, FUNC, TYPE) \ -void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPE *d = vd, *n = vn; \ @@ -1213,10 +1287,12 @@ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16) DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32) +DO_2OP(gvec_frecpe_rpres_s, helper_recpe_rpres_f32, float32) DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64) DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16) DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32) +DO_2OP(gvec_frsqrte_rpres_s, helper_rsqrte_rpres_f32, float32) DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64) DO_2OP(gvec_vrintx_h, float16_round_to_int, float16) @@ -1246,8 +1322,10 @@ DO_2OP(gvec_touszh, vfp_touszh, float16) #define DO_2OP_CMP0(FN, CMPOP, DIRN) \ WRAP_CMP0_##DIRN(FN, CMPOP, float16) \ WRAP_CMP0_##DIRN(FN, CMPOP, float32) \ + WRAP_CMP0_##DIRN(FN, CMPOP, float64) \ DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \ - DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32) + DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32) \ + DO_2OP(gvec_f##FN##0_d, float64_##FN##0, float64) DO_2OP_CMP0(cgt, cgt, FWD) DO_2OP_CMP0(cge, cge, FWD) @@ -1303,6 +1381,25 @@ static float64 float64_abd(float64 op1, float64 op2, float_status *stat) return float64_abs(float64_sub(op1, op2, stat)); } +/* ABD when FPCR.AH = 1: avoid flipping sign bit of a NaN result */ +static float16 float16_ah_abd(float16 op1, float16 op2, float_status *stat) +{ + float16 r = float16_sub(op1, op2, stat); + return float16_is_any_nan(r) ? r : float16_abs(r); +} + +static float32 float32_ah_abd(float32 op1, float32 op2, float_status *stat) +{ + float32 r = float32_sub(op1, op2, stat); + return float32_is_any_nan(r) ? r : float32_abs(r); +} + +static float64 float64_ah_abd(float64 op1, float64 op2, float_status *stat) +{ + float64 r = float64_sub(op1, op2, stat); + return float64_is_any_nan(r) ? r : float64_abs(r); +} + /* * Reciprocal step. These are the AArch32 version which uses a * non-fused multiply-and-subtract. @@ -1359,7 +1456,8 @@ static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat) } #define DO_3OP(NAME, FUNC, TYPE) \ -void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, \ + float_status *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPE *d = vd, *n = vn, *m = vm; \ @@ -1369,14 +1467,19 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } +DO_3OP(gvec_fadd_b16, bfloat16_add, float16) DO_3OP(gvec_fadd_h, float16_add, float16) DO_3OP(gvec_fadd_s, float32_add, float32) DO_3OP(gvec_fadd_d, float64_add, float64) +DO_3OP(gvec_bfadd, bfloat16_add, bfloat16) +DO_3OP(gvec_fsub_b16, bfloat16_sub, float16) DO_3OP(gvec_fsub_h, float16_sub, float16) DO_3OP(gvec_fsub_s, float32_sub, float32) DO_3OP(gvec_fsub_d, float64_sub, float64) +DO_3OP(gvec_bfsub, bfloat16_sub, bfloat16) +DO_3OP(gvec_fmul_b16, bfloat16_mul, float16) DO_3OP(gvec_fmul_h, float16_mul, float16) DO_3OP(gvec_fmul_s, float32_mul, float32) DO_3OP(gvec_fmul_d, float64_mul, float64) @@ -1389,6 +1492,10 @@ DO_3OP(gvec_fabd_h, float16_abd, float16) DO_3OP(gvec_fabd_s, float32_abd, float32) DO_3OP(gvec_fabd_d, float64_abd, float64) +DO_3OP(gvec_ah_fabd_h, float16_ah_abd, float16) +DO_3OP(gvec_ah_fabd_s, float32_ah_abd, float32) +DO_3OP(gvec_ah_fabd_d, float64_ah_abd, float64) + DO_3OP(gvec_fceq_h, float16_ceq, float16) DO_3OP(gvec_fceq_s, float32_ceq, float32) DO_3OP(gvec_fceq_d, float64_ceq, float64) @@ -1448,6 +1555,29 @@ DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16) DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32) DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64) +DO_3OP(gvec_ah_recps_h, helper_recpsf_ah_f16, float16) +DO_3OP(gvec_ah_recps_s, helper_recpsf_ah_f32, float32) +DO_3OP(gvec_ah_recps_d, helper_recpsf_ah_f64, float64) + +DO_3OP(gvec_ah_rsqrts_h, helper_rsqrtsf_ah_f16, float16) +DO_3OP(gvec_ah_rsqrts_s, helper_rsqrtsf_ah_f32, float32) +DO_3OP(gvec_ah_rsqrts_d, helper_rsqrtsf_ah_f64, float64) + +DO_3OP(gvec_ah_fmax_h, helper_vfp_ah_maxh, float16) +DO_3OP(gvec_ah_fmax_s, helper_vfp_ah_maxs, float32) +DO_3OP(gvec_ah_fmax_d, helper_vfp_ah_maxd, float64) + +DO_3OP(gvec_ah_fmin_h, helper_vfp_ah_minh, float16) +DO_3OP(gvec_ah_fmin_s, helper_vfp_ah_mins, float32) +DO_3OP(gvec_ah_fmin_d, helper_vfp_ah_mind, float64) + +DO_3OP(gvec_fmax_b16, bfloat16_max, bfloat16) +DO_3OP(gvec_fmin_b16, bfloat16_min, bfloat16) +DO_3OP(gvec_fmaxnum_b16, bfloat16_maxnum, bfloat16) +DO_3OP(gvec_fminnum_b16, bfloat16_minnum, bfloat16) +DO_3OP(gvec_ah_fmax_b16, helper_sme2_ah_fmax_b16, bfloat16) +DO_3OP(gvec_ah_fmin_b16, helper_sme2_ah_fmin_b16, bfloat16) + #endif #undef DO_3OP @@ -1483,6 +1613,12 @@ static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2, return float16_muladd(op1, op2, dest, 0, stat); } +static bfloat16 bfloat16_muladd_f(bfloat16 dest, bfloat16 op1, bfloat16 op2, + float_status *stat) +{ + return bfloat16_muladd(op1, op2, dest, 0, stat); +} + static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2, float_status *stat) { @@ -1501,6 +1637,12 @@ static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2, return float16_muladd(float16_chs(op1), op2, dest, 0, stat); } +static bfloat16 bfloat16_mulsub_f(bfloat16 dest, bfloat16 op1, bfloat16 op2, + float_status *stat) +{ + return bfloat16_muladd(bfloat16_chs(op1), op2, dest, 0, stat); +} + static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2, float_status *stat) { @@ -1513,8 +1655,33 @@ static float64 float64_mulsub_f(float64 dest, float64 op1, float64 op2, return float64_muladd(float64_chs(op1), op2, dest, 0, stat); } -#define DO_MULADD(NAME, FUNC, TYPE) \ -void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +static float16 float16_ah_mulsub_f(float16 dest, float16 op1, float16 op2, + float_status *stat) +{ + return float16_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +static bfloat16 bfloat16_ah_mulsub_f(bfloat16 dest, bfloat16 op1, bfloat16 op2, + float_status *stat) +{ + return bfloat16_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +static float32 float32_ah_mulsub_f(float32 dest, float32 op1, float32 op2, + float_status *stat) +{ + return float32_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +static float64 float64_ah_mulsub_f(float64 dest, float64 op1, float64 op2, + float_status *stat) +{ + return float64_muladd(op1, op2, dest, float_muladd_negate_product, stat); +} + +#define DO_MULADD(NAME, FUNC, TYPE) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, \ + float_status *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ TYPE *d = vd, *n = vn, *m = vm; \ @@ -1524,19 +1691,28 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } -DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16) -DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32) +DO_MULADD(gvec_fmla_nf_h, float16_muladd_nf, float16) +DO_MULADD(gvec_fmla_nf_s, float32_muladd_nf, float32) -DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16) -DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32) +DO_MULADD(gvec_fmls_nf_h, float16_mulsub_nf, float16) +DO_MULADD(gvec_fmls_nf_s, float32_mulsub_nf, float32) DO_MULADD(gvec_vfma_h, float16_muladd_f, float16) DO_MULADD(gvec_vfma_s, float32_muladd_f, float32) DO_MULADD(gvec_vfma_d, float64_muladd_f, float64) +DO_MULADD(gvec_bfmla, bfloat16_muladd_f, bfloat16) DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16) DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32) DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64) +DO_MULADD(gvec_bfmls, bfloat16_mulsub_f, bfloat16) + +DO_MULADD(gvec_ah_vfms_h, float16_ah_mulsub_f, float16) +DO_MULADD(gvec_ah_vfms_s, float32_ah_mulsub_f, float32) +DO_MULADD(gvec_ah_vfms_d, float64_ah_mulsub_f, float64) +DO_MULADD(gvec_ah_bfmls, bfloat16_ah_mulsub_f, bfloat16) + +#undef DO_MULADD /* For the indexed ops, SVE applies the index per 128-bit vector segment. * For AdvSIMD, there is of course only one such vector segment. @@ -1591,7 +1767,8 @@ DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, H8) #undef DO_MLA_IDX #define DO_FMUL_IDX(NAME, ADD, MUL, TYPE, H) \ -void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, \ + float_status *stat, uint32_t desc) \ { \ intptr_t i, j, oprsz = simd_oprsz(desc); \ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \ @@ -1608,6 +1785,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ #define nop(N, M, S) (M) +DO_FMUL_IDX(gvec_fmul_idx_b16, nop, bfloat16_mul, float16, H2) DO_FMUL_IDX(gvec_fmul_idx_h, nop, float16_mul, float16, H2) DO_FMUL_IDX(gvec_fmul_idx_s, nop, float32_mul, float32, H4) DO_FMUL_IDX(gvec_fmul_idx_d, nop, float64_mul, float64, H8) @@ -1633,29 +1811,38 @@ DO_FMUL_IDX(gvec_fmls_nf_idx_s, float32_sub, float32_mul, float32, H4) #undef DO_FMUL_IDX -#define DO_FMLA_IDX(NAME, TYPE, H) \ +#define DO_FMLA_IDX(NAME, TYPE, H, NEGX, NEGF) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \ - void *stat, uint32_t desc) \ + float_status *stat, uint32_t desc) \ { \ intptr_t i, j, oprsz = simd_oprsz(desc); \ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \ - TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \ - intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \ + intptr_t idx = simd_data(desc); \ TYPE *d = vd, *n = vn, *m = vm, *a = va; \ - op1_neg <<= (8 * sizeof(TYPE) - 1); \ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ TYPE mm = m[H(i + idx)]; \ for (j = 0; j < segment; j++) { \ - d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \ - mm, a[i + j], 0, stat); \ + d[i + j] = TYPE##_muladd(n[i + j] ^ NEGX, mm, \ + a[i + j], NEGF, stat); \ } \ } \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } -DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2) -DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4) -DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8) +DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2, 0, 0) +DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4, 0, 0) +DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8, 0, 0) +DO_FMLA_IDX(gvec_bfmla_idx, bfloat16, H2, 0, 0) + +DO_FMLA_IDX(gvec_fmls_idx_h, float16, H2, INT16_MIN, 0) +DO_FMLA_IDX(gvec_fmls_idx_s, float32, H4, INT32_MIN, 0) +DO_FMLA_IDX(gvec_fmls_idx_d, float64, H8, INT64_MIN, 0) +DO_FMLA_IDX(gvec_bfmls_idx, bfloat16, H2, INT16_MIN, 0) + +DO_FMLA_IDX(gvec_ah_fmls_idx_h, float16, H2, 0, float_muladd_negate_product) +DO_FMLA_IDX(gvec_ah_fmls_idx_s, float32, H4, 0, float_muladd_negate_product) +DO_FMLA_IDX(gvec_ah_fmls_idx_d, float64, H8, 0, float_muladd_negate_product) +DO_FMLA_IDX(gvec_ah_bfmls_idx, bfloat16, H2, 0, float_muladd_negate_product) #undef DO_FMLA_IDX @@ -2028,135 +2215,173 @@ static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2) * as there is not yet SVE versions that might use blocking. */ -static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst, - uint32_t desc, bool fz16) +static void do_fmlal(float32 *d, void *vn, void *vm, + CPUARMState *env, uint32_t desc, + ARMFPStatusFlavour fpst_idx, + uint64_t negx, int negf) { + float_status *fpst = &env->vfp.fp_status[fpst_idx]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; intptr_t i, oprsz = simd_oprsz(desc); - int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int is_q = oprsz == 16; uint64_t n_4, m_4; - /* Pre-load all of the f16 data, avoiding overlap issues. */ - n_4 = load4_f16(vn, is_q, is_2); + /* + * Pre-load all of the f16 data, avoiding overlap issues. + * Negate all inputs for AH=0 FMLSL at once. + */ + n_4 = load4_f16(vn, is_q, is_2) ^ negx; m_4 = load4_f16(vm, is_q, is_2); - /* Negate all inputs for FMLSL at once. */ - if (is_s) { - n_4 ^= 0x8000800080008000ull; - } - for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16); - d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { - CPUARMState *env = venv; - do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = is_s ? 0x8000800080008000ull : 0; + + do_fmlal(vd, vn, vm, env, desc, FPST_STD, negx, 0); } void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { - CPUARMState *env = venv; - do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = 0; + int negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000800080008000ull; + } + } + do_fmlal(vd, vn, vm, env, desc, FPST_A64, negx, negf); } void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { intptr_t i, oprsz = simd_oprsz(desc); - uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); - CPUARMState *env = venv; - float_status *status = &env->vfp.fp_status; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16); + bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + float_status *status = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; + int negx = 0, negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000; + } + } for (i = 0; i < oprsz; i += sizeof(float32)) { - float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn; + float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negx; float16 mm_16 = *(float16 *)(vm + H1_2(i + sel)); float32 nn = float16_to_float32_by_bits(nn_16, fz16); float32 mm = float16_to_float32_by_bits(mm_16, fz16); float32 aa = *(float32 *)(va + H1_4(i)); - *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status); + *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, negf, status); } } -static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst, - uint32_t desc, bool fz16) +static void do_fmlal_idx(float32 *d, void *vn, void *vm, + CPUARMState *env, uint32_t desc, + ARMFPStatusFlavour fpst_idx, + uint64_t negx, int negf) { + float_status *fpst = &env->vfp.fp_status[fpst_idx]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; intptr_t i, oprsz = simd_oprsz(desc); - int is_s = extract32(desc, SIMD_DATA_SHIFT, 1); int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1); int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3); int is_q = oprsz == 16; uint64_t n_4; float32 m_1; - /* Pre-load all of the f16 data, avoiding overlap issues. */ - n_4 = load4_f16(vn, is_q, is_2); - - /* Negate all inputs for FMLSL at once. */ - if (is_s) { - n_4 ^= 0x8000800080008000ull; - } - + /* + * Pre-load all of the f16 data, avoiding overlap issues. + * Negate all inputs for AH=0 FMLSL at once. + */ + n_4 = load4_f16(vn, is_q, is_2) ^ negx; m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16); for (i = 0; i < oprsz / 4; i++) { float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16); - d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst); + d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], negf, fpst); } clear_tail(d, oprsz, simd_maxsz(desc)); } void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { - CPUARMState *env = venv; - do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = is_s ? 0x8000800080008000ull : 0; + + do_fmlal_idx(vd, vn, vm, env, desc, FPST_STD, negx, 0); } void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { - CPUARMState *env = venv; - do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc, - get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); + uint64_t negx = 0; + int negf = 0; + + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000800080008000ull; + } + } + do_fmlal_idx(vd, vn, vm, env, desc, FPST_A64, negx, negf); } void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va, - void *venv, uint32_t desc) + CPUARMState *env, uint32_t desc) { intptr_t i, j, oprsz = simd_oprsz(desc); - uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15; + bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16); - intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16); - CPUARMState *env = venv; - float_status *status = &env->vfp.fp_status; - bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16); + bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 3, 3) * sizeof(float16); + float_status *status = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64]; + bool fz16 = env->vfp.fpcr & FPCR_FZ16; + int negx = 0, negf = 0; + if (is_s) { + if (env->vfp.fpcr & FPCR_AH) { + negf = float_muladd_negate_product; + } else { + negx = 0x8000; + } + } for (i = 0; i < oprsz; i += 16) { float16 mm_16 = *(float16 *)(vm + i + idx); float32 mm = float16_to_float32_by_bits(mm_16, fz16); for (j = 0; j < 16; j += sizeof(float32)) { - float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn; + float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negx; float32 nn = float16_to_float32_by_bits(nn_16, fz16); float32 aa = *(float32 *)(va + H1_4(i + j)); *(float32 *)(vd + H1_4(i + j)) = - float32_muladd(nn, mm, aa, 0, status); + float32_muladd(nn, mm, aa, negf, status); } } } @@ -2401,7 +2626,8 @@ DO_ABA(gvec_uaba_d, uint64_t) #undef DO_ABA #define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \ -void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, \ + float_status *stat, uint32_t desc) \ { \ ARMVectorReg scratch; \ intptr_t oprsz = simd_oprsz(desc); \ @@ -2439,6 +2665,16 @@ DO_3OP_PAIR(gvec_fminnump_h, float16_minnum, float16, H2) DO_3OP_PAIR(gvec_fminnump_s, float32_minnum, float32, H4) DO_3OP_PAIR(gvec_fminnump_d, float64_minnum, float64, ) +#ifdef TARGET_AARCH64 +DO_3OP_PAIR(gvec_ah_fmaxp_h, helper_vfp_ah_maxh, float16, H2) +DO_3OP_PAIR(gvec_ah_fmaxp_s, helper_vfp_ah_maxs, float32, H4) +DO_3OP_PAIR(gvec_ah_fmaxp_d, helper_vfp_ah_maxd, float64, ) + +DO_3OP_PAIR(gvec_ah_fminp_h, helper_vfp_ah_minh, float16, H2) +DO_3OP_PAIR(gvec_ah_fminp_s, helper_vfp_ah_mins, float32, H4) +DO_3OP_PAIR(gvec_ah_fminp_d, helper_vfp_ah_mind, float64, ) +#endif + #undef DO_3OP_PAIR #define DO_3OP_PAIR(NAME, FUNC, TYPE, H) \ @@ -2486,7 +2722,7 @@ DO_3OP_PAIR(gvec_uminp_s, MIN, uint32_t, H4) #undef DO_3OP_PAIR #define DO_VCVT_FIXED(NAME, FUNC, TYPE) \ - void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ + void HELPER(NAME)(void *vd, void *vn, float_status *stat, uint32_t desc) \ { \ intptr_t i, oprsz = simd_oprsz(desc); \ int shift = simd_data(desc); \ @@ -2498,21 +2734,25 @@ DO_3OP_PAIR(gvec_uminp_s, MIN, uint32_t, H4) clear_tail(d, oprsz, simd_maxsz(desc)); \ } +DO_VCVT_FIXED(gvec_vcvt_sd, helper_vfp_sqtod, uint64_t) +DO_VCVT_FIXED(gvec_vcvt_ud, helper_vfp_uqtod, uint64_t) DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t) DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t) -DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t) -DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t) DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t) DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t) -DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t) -DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t) + +DO_VCVT_FIXED(gvec_vcvt_rz_ds, helper_vfp_tosqd_round_to_zero, uint64_t) +DO_VCVT_FIXED(gvec_vcvt_rz_du, helper_vfp_touqd_round_to_zero, uint64_t) +DO_VCVT_FIXED(gvec_vcvt_rz_fs, helper_vfp_tosls_round_to_zero, uint32_t) +DO_VCVT_FIXED(gvec_vcvt_rz_fu, helper_vfp_touls_round_to_zero, uint32_t) +DO_VCVT_FIXED(gvec_vcvt_rz_hs, helper_vfp_toshh_round_to_zero, uint16_t) +DO_VCVT_FIXED(gvec_vcvt_rz_hu, helper_vfp_touhh_round_to_zero, uint16_t) #undef DO_VCVT_FIXED #define DO_VCVT_RMODE(NAME, FUNC, TYPE) \ - void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ + void HELPER(NAME)(void *vd, void *vn, float_status *fpst, uint32_t desc) \ { \ - float_status *fpst = stat; \ intptr_t i, oprsz = simd_oprsz(desc); \ uint32_t rmode = simd_data(desc); \ uint32_t prev_rmode = get_float_rounding_mode(fpst); \ @@ -2525,6 +2765,8 @@ DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t) clear_tail(d, oprsz, simd_maxsz(desc)); \ } +DO_VCVT_RMODE(gvec_vcvt_rm_sd, helper_vfp_tosqd, uint64_t) +DO_VCVT_RMODE(gvec_vcvt_rm_ud, helper_vfp_touqd, uint64_t) DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t) DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t) DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t) @@ -2533,9 +2775,8 @@ DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t) #undef DO_VCVT_RMODE #define DO_VRINT_RMODE(NAME, FUNC, TYPE) \ - void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \ + void HELPER(NAME)(void *vd, void *vn, float_status *fpst, uint32_t desc) \ { \ - float_status *fpst = stat; \ intptr_t i, oprsz = simd_oprsz(desc); \ uint32_t rmode = simd_data(desc); \ uint32_t prev_rmode = get_float_rounding_mode(fpst); \ @@ -2554,10 +2795,9 @@ DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t) #undef DO_VRINT_RMODE #ifdef TARGET_AARCH64 -void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc) +void HELPER(simd_tblx)(void *vd, void *vm, CPUARMState *env, uint32_t desc) { const uint8_t *indices = vm; - CPUARMState *env = venv; size_t oprsz = simd_oprsz(desc); uint32_t rn = extract32(desc, SIMD_DATA_SHIFT, 5); bool is_tbx = extract32(desc, SIMD_DATA_SHIFT + 5, 1); @@ -2790,44 +3030,140 @@ DO_MMLA_B(gvec_usmmla_b, do_usmmla_b) * BFloat16 Dot Product */ -float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2) +bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp) +{ + /* + * For BFDOT, BFMMLA, etc, the behaviour depends on FPCR.EBF. + * For EBF = 0, we ignore the FPCR bits which determine rounding + * mode and denormal-flushing, and we do unfused multiplies and + * additions with intermediate rounding of all products and sums. + * For EBF = 1, we honour FPCR rounding mode and denormal-flushing bits, + * and we perform a fused two-way sum-of-products without intermediate + * rounding of the products. + * In either case, we don't set fp exception flags. + * + * EBF is AArch64 only, so even if it's set in the FPCR it has + * no effect on AArch32 instructions. + */ + bool ebf = is_a64(env) && env->vfp.fpcr & FPCR_EBF; + + *statusp = env->vfp.fp_status[is_a64(env) ? FPST_A64 : FPST_A32]; + set_default_nan_mode(true, statusp); + + if (ebf) { + /* EBF=1 needs to do a step with round-to-odd semantics */ + *oddstatusp = *statusp; + set_float_rounding_mode(float_round_to_odd, oddstatusp); + } else { + set_flush_to_zero(true, statusp); + set_flush_inputs_to_zero(true, statusp); + set_float_rounding_mode(float_round_to_odd_inf, statusp); + } + return ebf; +} + +float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *fpst) { - /* FPCR is ignored for BFDOT and BFMMLA. */ - float_status bf_status = { - .tininess_before_rounding = float_tininess_before_rounding, - .float_rounding_mode = float_round_to_odd_inf, - .flush_to_zero = true, - .flush_inputs_to_zero = true, - .default_nan_mode = true, - }; float32 t1, t2; /* * Extract each BFloat16 from the element pair, and shift * them such that they become float32. */ - t1 = float32_mul(e1 << 16, e2 << 16, &bf_status); - t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, &bf_status); - t1 = float32_add(t1, t2, &bf_status); - t1 = float32_add(sum, t1, &bf_status); + t1 = float32_mul(e1 << 16, e2 << 16, fpst); + t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, fpst); + t1 = float32_add(t1, t2, fpst); + t1 = float32_add(sum, t1, fpst); return t1; } -void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc) +float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2, + float_status *fpst, float_status *fpst_odd) +{ + float32 s1r = e1 << 16; + float32 s1c = e1 & 0xffff0000u; + float32 s2r = e2 << 16; + float32 s2c = e2 & 0xffff0000u; + float32 t32; + + /* C.f. FPProcessNaNs4 */ + if (float32_is_any_nan(s1r) || float32_is_any_nan(s1c) || + float32_is_any_nan(s2r) || float32_is_any_nan(s2c)) { + if (float32_is_signaling_nan(s1r, fpst)) { + t32 = s1r; + } else if (float32_is_signaling_nan(s1c, fpst)) { + t32 = s1c; + } else if (float32_is_signaling_nan(s2r, fpst)) { + t32 = s2r; + } else if (float32_is_signaling_nan(s2c, fpst)) { + t32 = s2c; + } else if (float32_is_any_nan(s1r)) { + t32 = s1r; + } else if (float32_is_any_nan(s1c)) { + t32 = s1c; + } else if (float32_is_any_nan(s2r)) { + t32 = s2r; + } else { + t32 = s2c; + } + /* + * FPConvertNaN(FPProcessNaN(t32)) will be done as part + * of the final addition below. + */ + } else { + /* + * Compare f16_dotadd() in sme_helper.c, but here we have + * bfloat16 inputs. In particular that means that we do not + * want the FPCR.FZ16 flush semantics, so we use the normal + * float_status for the input handling here. + */ + float64 e1r = float32_to_float64(s1r, fpst); + float64 e1c = float32_to_float64(s1c, fpst); + float64 e2r = float32_to_float64(s2r, fpst); + float64 e2c = float32_to_float64(s2c, fpst); + float64 t64; + + /* + * The ARM pseudocode function FPDot performs both multiplies + * and the add with a single rounding operation. Emulate this + * by performing the first multiply in round-to-odd, then doing + * the second multiply as fused multiply-add, and rounding to + * float32 all in one step. + */ + t64 = float64_mul(e1r, e2r, fpst_odd); + t64 = float64r32_muladd(e1c, e2c, t64, 0, fpst); + + /* This conversion is exact, because we've already rounded. */ + t32 = float64_to_float32(t64, fpst); + } + + /* The final accumulation step is not fused. */ + return float32_add(sum, t32, fpst); +} + +void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, + CPUARMState *env, uint32_t desc) { intptr_t i, opr_sz = simd_oprsz(desc); float32 *d = vd, *a = va; uint32_t *n = vn, *m = vm; + float_status fpst, fpst_odd; - for (i = 0; i < opr_sz / 4; ++i) { - d[i] = bfdotadd(a[i], n[i], m[i]); + if (is_ebf(env, &fpst, &fpst_odd)) { + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = bfdotadd_ebf(a[i], n[i], m[i], &fpst, &fpst_odd); + } + } else { + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = bfdotadd(a[i], n[i], m[i], &fpst); + } } clear_tail(d, opr_sz, simd_maxsz(desc)); } void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm, - void *va, uint32_t desc) + void *va, CPUARMState *env, uint32_t desc) { intptr_t i, j, opr_sz = simd_oprsz(desc); intptr_t index = simd_data(desc); @@ -2835,95 +3171,213 @@ void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm, intptr_t eltspersegment = MIN(16 / 4, elements); float32 *d = vd, *a = va; uint32_t *n = vn, *m = vm; + float_status fpst, fpst_odd; - for (i = 0; i < elements; i += eltspersegment) { - uint32_t m_idx = m[i + H4(index)]; + if (is_ebf(env, &fpst, &fpst_odd)) { + for (i = 0; i < elements; i += eltspersegment) { + uint32_t m_idx = m[i + H4(index)]; - for (j = i; j < i + eltspersegment; j++) { - d[j] = bfdotadd(a[j], n[j], m_idx); + for (j = i; j < i + eltspersegment; j++) { + d[j] = bfdotadd_ebf(a[j], n[j], m_idx, &fpst, &fpst_odd); + } + } + } else { + for (i = 0; i < elements; i += eltspersegment) { + uint32_t m_idx = m[i + H4(index)]; + + for (j = i; j < i + eltspersegment; j++) { + d[j] = bfdotadd(a[j], n[j], m_idx, &fpst); + } } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc) +void HELPER(sme2_bfvdot_idx)(void *vd, void *vn, void *vm, + void *va, CPUARMState *env, uint32_t desc) { - intptr_t s, opr_sz = simd_oprsz(desc); + intptr_t i, j, opr_sz = simd_oprsz(desc); + intptr_t idx = extract32(desc, SIMD_DATA_SHIFT, 2); + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1); + intptr_t elements = opr_sz / 4; + intptr_t eltspersegment = MIN(16 / 4, elements); float32 *d = vd, *a = va; - uint32_t *n = vn, *m = vm; - - for (s = 0; s < opr_sz / 4; s += 4) { - float32 sum00, sum01, sum10, sum11; + uint16_t *n0 = vn; + uint16_t *n1 = vn + sizeof(ARMVectorReg); + uint32_t *m = vm; + float_status fpst, fpst_odd; + + if (is_ebf(env, &fpst, &fpst_odd)) { + for (i = 0; i < elements; i += eltspersegment) { + uint32_t m_idx = m[i + H4(idx)]; + + for (j = 0; j < eltspersegment; j++) { + uint32_t nn = (n0[H2(2 * (i + j) + sel)]) + | (n1[H2(2 * (i + j) + sel)] << 16); + d[i + H4(j)] = bfdotadd_ebf(a[i + H4(j)], nn, m_idx, + &fpst, &fpst_odd); + } + } + } else { + for (i = 0; i < elements; i += eltspersegment) { + uint32_t m_idx = m[i + H4(idx)]; - /* - * Process the entire segment at once, writing back the - * results only after we've consumed all of the inputs. - * - * Key to indices by column: - * i j i k j k - */ - sum00 = a[s + H4(0 + 0)]; - sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]); - sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]); + for (j = 0; j < eltspersegment; j++) { + uint32_t nn = (n0[H2(2 * (i + j) + sel)]) + | (n1[H2(2 * (i + j) + sel)] << 16); + d[i + H4(j)] = bfdotadd(a[i + H4(j)], nn, m_idx, &fpst); + } + } + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} - sum01 = a[s + H4(0 + 1)]; - sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]); - sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]); +void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, + CPUARMState *env, uint32_t desc) +{ + intptr_t s, opr_sz = simd_oprsz(desc); + float32 *d = vd, *a = va; + uint32_t *n = vn, *m = vm; + float_status fpst, fpst_odd; - sum10 = a[s + H4(2 + 0)]; - sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]); - sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]); + if (is_ebf(env, &fpst, &fpst_odd)) { + for (s = 0; s < opr_sz / 4; s += 4) { + float32 sum00, sum01, sum10, sum11; - sum11 = a[s + H4(2 + 1)]; - sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]); - sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]); + /* + * Process the entire segment at once, writing back the + * results only after we've consumed all of the inputs. + * + * Key to indices by column: + * i j i k j k + */ + sum00 = a[s + H4(0 + 0)]; + sum00 = bfdotadd_ebf(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)], &fpst, &fpst_odd); + sum00 = bfdotadd_ebf(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)], &fpst, &fpst_odd); + + sum01 = a[s + H4(0 + 1)]; + sum01 = bfdotadd_ebf(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)], &fpst, &fpst_odd); + sum01 = bfdotadd_ebf(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)], &fpst, &fpst_odd); + + sum10 = a[s + H4(2 + 0)]; + sum10 = bfdotadd_ebf(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)], &fpst, &fpst_odd); + sum10 = bfdotadd_ebf(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)], &fpst, &fpst_odd); + + sum11 = a[s + H4(2 + 1)]; + sum11 = bfdotadd_ebf(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)], &fpst, &fpst_odd); + sum11 = bfdotadd_ebf(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)], &fpst, &fpst_odd); + + d[s + H4(0 + 0)] = sum00; + d[s + H4(0 + 1)] = sum01; + d[s + H4(2 + 0)] = sum10; + d[s + H4(2 + 1)] = sum11; + } + } else { + for (s = 0; s < opr_sz / 4; s += 4) { + float32 sum00, sum01, sum10, sum11; - d[s + H4(0 + 0)] = sum00; - d[s + H4(0 + 1)] = sum01; - d[s + H4(2 + 0)] = sum10; - d[s + H4(2 + 1)] = sum11; + /* + * Process the entire segment at once, writing back the + * results only after we've consumed all of the inputs. + * + * Key to indices by column: + * i j i k j k + */ + sum00 = a[s + H4(0 + 0)]; + sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)], &fpst); + sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)], &fpst); + + sum01 = a[s + H4(0 + 1)]; + sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)], &fpst); + sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)], &fpst); + + sum10 = a[s + H4(2 + 0)]; + sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)], &fpst); + sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)], &fpst); + + sum11 = a[s + H4(2 + 1)]; + sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)], &fpst); + sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)], &fpst); + + d[s + H4(0 + 0)] = sum00; + d[s + H4(0 + 1)] = sum01; + d[s + H4(2 + 0)] = sum10; + d[s + H4(2 + 1)] = sum11; + } } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va, - void *stat, uint32_t desc) +static void do_bfmlal(float32 *d, bfloat16 *n, bfloat16 *m, float32 *a, + float_status *stat, uint32_t desc, int negx, int negf) { intptr_t i, opr_sz = simd_oprsz(desc); - intptr_t sel = simd_data(desc); - float32 *d = vd, *a = va; - bfloat16 *n = vn, *m = vm; + intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1); for (i = 0; i < opr_sz / 4; ++i) { - float32 nn = n[H2(i * 2 + sel)] << 16; + float32 nn = (negx ^ n[H2(i * 2 + sel)]) << 16; float32 mm = m[H2(i * 2 + sel)] << 16; - d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], 0, stat); + d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], negf, stat); } clear_tail(d, opr_sz, simd_maxsz(desc)); } -void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, - void *va, void *stat, uint32_t desc) +void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal(vd, vn, vm, va, stat, desc, 0, 0); +} + +void HELPER(gvec_bfmlsl)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal(vd, vn, vm, va, stat, desc, 0x8000, 0); +} + +void HELPER(gvec_ah_bfmlsl)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal(vd, vn, vm, va, stat, desc, 0, float_muladd_negate_product); +} + +static void do_bfmlal_idx(float32 *d, bfloat16 *n, bfloat16 *m, float32 *a, + float_status *stat, uint32_t desc, int negx, int negf) { intptr_t i, j, opr_sz = simd_oprsz(desc); intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1); intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 1, 3); intptr_t elements = opr_sz / 4; intptr_t eltspersegment = MIN(16 / 4, elements); - float32 *d = vd, *a = va; - bfloat16 *n = vn, *m = vm; for (i = 0; i < elements; i += eltspersegment) { float32 m_idx = m[H2(2 * i + index)] << 16; for (j = i; j < i + eltspersegment; j++) { - float32 n_j = n[H2(2 * j + sel)] << 16; - d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], 0, stat); + float32 n_j = (negx ^ n[H2(2 * j + sel)]) << 16; + d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], negf, stat); } } clear_tail(d, opr_sz, simd_maxsz(desc)); } +void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0, 0); +} + +void HELPER(gvec_bfmlsl_idx)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0x8000, 0); +} + +void HELPER(gvec_ah_bfmlsl_idx)(void *vd, void *vn, void *vm, void *va, + float_status *stat, uint32_t desc) +{ + do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0, float_muladd_negate_product); +} + #define DO_CLAMP(NAME, TYPE) \ void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \ { \ @@ -2947,3 +3401,136 @@ DO_CLAMP(gvec_uclamp_b, uint8_t) DO_CLAMP(gvec_uclamp_h, uint16_t) DO_CLAMP(gvec_uclamp_s, uint32_t) DO_CLAMP(gvec_uclamp_d, uint64_t) + +/* Bit count in each 8-bit word. */ +void HELPER(gvec_cnt_b)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint8_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz; ++i) { + d[i] = ctpop8(n[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* Reverse bits in each 8 bit word */ +void HELPER(gvec_rbit_b)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz / 8; ++i) { + d[i] = revbit64(bswap64(n[i])); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_urecpe_s)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint32_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = helper_recpe_u32(n[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_ursqrte_s)(void *vd, void *vn, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint32_t *d = vd, *n = vn; + + for (i = 0; i < opr_sz / 4; ++i) { + d[i] = helper_rsqrte_u32(n[i]); + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +static inline void do_lut_b(void *zd, uint64_t *indexes, uint64_t *table, + unsigned elements, unsigned segbase, + unsigned dstride, unsigned isize, + unsigned tsize, unsigned nreg) +{ + for (unsigned r = 0; r < nreg; ++r) { + uint8_t *dst = zd + dstride * r; + unsigned base = segbase + r * elements; + + for (unsigned e = 0; e < elements; ++e) { + unsigned index = extractn(indexes, (base + e) * isize, isize); + dst[H1(e)] = extractn(table, index * tsize, 8); + } + } +} + +static inline void do_lut_h(void *zd, uint64_t *indexes, uint64_t *table, + unsigned elements, unsigned segbase, + unsigned dstride, unsigned isize, + unsigned tsize, unsigned nreg) +{ + for (unsigned r = 0; r < nreg; ++r) { + uint16_t *dst = zd + dstride * r; + unsigned base = segbase + r * elements; + + for (unsigned e = 0; e < elements; ++e) { + unsigned index = extractn(indexes, (base + e) * isize, isize); + dst[H2(e)] = extractn(table, index * tsize, 16); + } + } +} + +static inline void do_lut_s(void *zd, uint64_t *indexes, uint32_t *table, + unsigned elements, unsigned segbase, + unsigned dstride, unsigned isize, + unsigned tsize, unsigned nreg) +{ + for (unsigned r = 0; r < nreg; ++r) { + uint32_t *dst = zd + dstride * r; + unsigned base = segbase + r * elements; + + for (unsigned e = 0; e < elements; ++e) { + unsigned index = extractn(indexes, (base + e) * isize, isize); + dst[H4(e)] = table[H4(index)]; + } + } +} + +#define DO_SME2_LUT(ISIZE, NREG, SUFF, ESIZE) \ +void helper_sme2_luti##ISIZE##_##NREG##SUFF \ + (void *zd, void *zn, CPUARMState *env, uint32_t desc) \ +{ \ + unsigned vl = simd_oprsz(desc); \ + unsigned strided = extract32(desc, SIMD_DATA_SHIFT, 1); \ + unsigned idx = extract32(desc, SIMD_DATA_SHIFT + 1, 4); \ + unsigned elements = vl / ESIZE; \ + unsigned dstride = (!strided ? 1 : NREG == 4 ? 4 : 8); \ + unsigned segments = (ESIZE * 8) / (ISIZE * NREG); \ + unsigned segment = idx & (segments - 1); \ + ARMVectorReg indexes; \ + memcpy(&indexes, zn, vl); \ + do_lut_##SUFF(zd, indexes.d, (void *)env->za_state.zt0, elements, \ + segment * NREG * elements, \ + dstride * sizeof(ARMVectorReg), ISIZE, 32, NREG); \ +} + +DO_SME2_LUT(2,1,b, 1) +DO_SME2_LUT(2,1,h, 2) +DO_SME2_LUT(2,1,s, 4) +DO_SME2_LUT(2,2,b, 1) +DO_SME2_LUT(2,2,h, 2) +DO_SME2_LUT(2,2,s, 4) +DO_SME2_LUT(2,4,b, 1) +DO_SME2_LUT(2,4,h, 2) +DO_SME2_LUT(2,4,s, 4) + +DO_SME2_LUT(4,1,b, 1) +DO_SME2_LUT(4,1,h, 2) +DO_SME2_LUT(4,1,s, 4) +DO_SME2_LUT(4,2,b, 1) +DO_SME2_LUT(4,2,h, 2) +DO_SME2_LUT(4,2,s, 4) +DO_SME2_LUT(4,4,h, 2) +DO_SME2_LUT(4,4,s, 4) + +#undef DO_SME2_LUT diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h index 3ca1b94ccf9..cf41b03dbcd 100644 --- a/target/arm/tcg/vec_internal.h +++ b/target/arm/tcg/vec_internal.h @@ -20,6 +20,10 @@ #ifndef TARGET_ARM_VEC_INTERNAL_H #define TARGET_ARM_VEC_INTERNAL_H +#include "fpu/softfloat.h" + +typedef struct CPUArchState CPUARMState; + /* * Note that vector data is stored in host-endian 64-bit chunks, * so addressing units smaller than that needs a host-endian fixup. @@ -219,17 +223,231 @@ int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *); int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *); int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool); +#define do_ssat_b(val) MIN(MAX(val, INT8_MIN), INT8_MAX) +#define do_ssat_h(val) MIN(MAX(val, INT16_MIN), INT16_MAX) +#define do_ssat_s(val) MIN(MAX(val, INT32_MIN), INT32_MAX) +#define do_usat_b(val) MIN(MAX(val, 0), UINT8_MAX) +#define do_usat_h(val) MIN(MAX(val, 0), UINT16_MAX) +#define do_usat_s(val) MIN(MAX(val, 0), UINT32_MAX) + +static inline uint64_t do_urshr(uint64_t x, unsigned sh) +{ + if (likely(sh < 64)) { + return (x >> sh) + ((x >> (sh - 1)) & 1); + } else if (sh == 64) { + return x >> 63; + } else { + return 0; + } +} + +static inline int64_t do_srshr(int64_t x, unsigned sh) +{ + if (likely(sh < 64)) { + return (x >> sh) + ((x >> (sh - 1)) & 1); + } else { + /* Rounding the sign bit always produces 0. */ + return 0; + } +} + /** * bfdotadd: * @sum: addend * @e1, @e2: multiplicand vectors + * @fpst: floating-point status to use * * BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum. * The @e1 and @e2 operands correspond to the 32-bit source vector * slots and contain two Bfloat16 values each. * - * Corresponds to the ARM pseudocode function BFDotAdd. + * Corresponds to the ARM pseudocode function BFDotAdd, specialized + * for the FPCR.EBF == 0 case. + */ +float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *fpst); +/** + * bfdotadd_ebf: + * @sum: addend + * @e1, @e2: multiplicand vectors + * @fpst: floating-point status to use + * @fpst_odd: floating-point status to use for round-to-odd operations + * + * BFloat16 2-way dot product of @e1 & @e2, accumulating with @sum. + * The @e1 and @e2 operands correspond to the 32-bit source vector + * slots and contain two Bfloat16 values each. + * + * Corresponds to the ARM pseudocode function BFDotAdd, specialized + * for the FPCR.EBF == 1 case. + */ +float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2, + float_status *fpst, float_status *fpst_odd); + +/** + * is_ebf: + * @env: CPU state + * @statusp: pointer to floating point status to fill in + * @oddstatusp: pointer to floating point status to fill in for round-to-odd + * + * Determine whether a BFDotAdd operation should use FPCR.EBF = 0 + * or FPCR.EBF = 1 semantics. On return, has initialized *statusp + * and *oddstatusp to suitable float_status arguments to use with either + * bfdotadd() or bfdotadd_ebf(). + * Returns true for EBF = 1, false for EBF = 0. (The caller should use this + * to decide whether to call bfdotadd() or bfdotadd_ebf().) + */ +bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp); + +/* + * Negate as for FPCR.AH=1 -- do not negate NaNs. + */ +static inline float16 bfloat16_ah_chs(float16 a) +{ + return bfloat16_is_any_nan(a) ? a : bfloat16_chs(a); +} + +static inline float16 float16_ah_chs(float16 a) +{ + return float16_is_any_nan(a) ? a : float16_chs(a); +} + +static inline float32 float32_ah_chs(float32 a) +{ + return float32_is_any_nan(a) ? a : float32_chs(a); +} + +static inline float64 float64_ah_chs(float64 a) +{ + return float64_is_any_nan(a) ? a : float64_chs(a); +} + +static inline float16 float16_maybe_ah_chs(float16 a, bool fpcr_ah) +{ + return fpcr_ah && float16_is_any_nan(a) ? a : float16_chs(a); +} + +static inline float32 float32_maybe_ah_chs(float32 a, bool fpcr_ah) +{ + return fpcr_ah && float32_is_any_nan(a) ? a : float32_chs(a); +} + +static inline float64 float64_maybe_ah_chs(float64 a, bool fpcr_ah) +{ + return fpcr_ah && float64_is_any_nan(a) ? a : float64_chs(a); +} + +/* Not actually called directly as a helper, but uses similar machinery. */ +bfloat16 helper_sme2_ah_fmax_b16(bfloat16 a, bfloat16 b, float_status *fpst); +bfloat16 helper_sme2_ah_fmin_b16(bfloat16 a, bfloat16 b, float_status *fpst); + +float32 sve_f16_to_f32(float16 f, float_status *fpst); +float16 sve_f32_to_f16(float32 f, float_status *fpst); + +/* + * Decode helper functions for predicate as counter. */ -float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2); + +typedef struct { + unsigned count; + unsigned lg2_stride; + bool invert; +} DecodeCounter; + +static inline DecodeCounter +decode_counter(unsigned png, unsigned vl, unsigned v_esz) +{ + DecodeCounter ret = { }; + + /* C.f. Arm pseudocode CounterToPredicate. */ + if (likely(png & 0xf)) { + unsigned p_esz = ctz32(png); + + /* + * maxbit = log2(pl(bits) * 4) + * = log2(vl(bytes) * 4) + * = log2(vl) + 2 + * maxbit_mask = ones + * = (1 << (maxbit + 1)) - 1 + * = (1 << (log2(vl) + 2 + 1)) - 1 + * = (1 << (log2(vl) + 3)) - 1 + * = (pow2ceil(vl) << 3) - 1 + */ + ret.count = png & (((unsigned)pow2ceil(vl) << 3) - 1); + ret.count >>= p_esz + 1; + + ret.invert = (png >> 15) & 1; + + /* + * The Arm pseudocode for CounterToPredicate expands the count to + * a set of bits, and then the operation proceeds as for the original + * interpretation of predicates as a set of bits. + * + * We can avoid the expansion by adjusting the count and supplying + * an element stride. + */ + if (unlikely(p_esz != v_esz)) { + if (p_esz < v_esz) { + /* + * For predicate esz < vector esz, the expanded predicate + * will have more bits set than will be consumed. + * Adjust the count down, rounding up. + * Consider p_esz = MO_8, v_esz = MO_64, count 14: + * The expanded predicate would be + * 0011 1111 1111 1111 + * The significant bits are + * ...1 ...1 ...1 ...1 + */ + unsigned shift = v_esz - p_esz; + unsigned trunc = ret.count >> shift; + ret.count = trunc + (ret.count != (trunc << shift)); + } else { + /* + * For predicate esz > vector esz, the expanded predicate + * will have bits set only at power-of-two multiples of + * the vector esz. Bits at other multiples will all be + * false. Adjust the count up, and supply the caller + * with a stride of elements to skip. + */ + unsigned shift = p_esz - v_esz; + ret.count <<= shift; + ret.lg2_stride = shift; + } + } + } + return ret; +} + +/* Extract @len bits from an array of uint64_t at offset @pos bits. */ +static inline uint64_t extractn(uint64_t *p, unsigned pos, unsigned len) +{ + uint64_t x; + + p += pos / 64; + pos = pos % 64; + + x = p[0]; + if (pos + len > 64) { + x = (x >> pos) | (p[1] << (-pos & 63)); + pos = 0; + } + return extract64(x, pos, len); +} + +/* Deposit @len bits into an array of uint64_t at offset @pos bits. */ +static inline void depositn(uint64_t *p, unsigned pos, + unsigned len, uint64_t val) +{ + p += pos / 64; + pos = pos % 64; + + if (pos + len <= 64) { + p[0] = deposit64(p[0], pos, len, val); + } else { + unsigned len0 = 64 - pos; + unsigned len1 = len - len0; + + p[0] = deposit64(p[0], pos, len0, val); + p[1] = deposit64(p[1], 0, len1, val >> len0); + } +} #endif /* TARGET_ARM_VEC_INTERNAL_H */ diff --git a/target/arm/tcg/vfp.decode b/target/arm/tcg/vfp.decode index 5405e80197b..2dd87a27089 100644 --- a/target/arm/tcg/vfp.decode +++ b/target/arm/tcg/vfp.decode @@ -141,18 +141,18 @@ VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d VFMA_hp ---- 1110 1.10 .... .... 1001 .0. 0 .... @vfp_dnm_s VFMS_hp ---- 1110 1.10 .... .... 1001 .1. 0 .... @vfp_dnm_s -VFNMA_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s -VFNMS_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s +VFNMS_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s +VFNMA_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s VFMA_sp ---- 1110 1.10 .... .... 1010 .0. 0 .... @vfp_dnm_s VFMS_sp ---- 1110 1.10 .... .... 1010 .1. 0 .... @vfp_dnm_s -VFNMA_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s -VFNMS_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s +VFNMS_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s +VFNMA_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s VFMA_dp ---- 1110 1.10 .... .... 1011 .0.0 .... @vfp_dnm_d VFMS_dp ---- 1110 1.10 .... .... 1011 .1.0 .... @vfp_dnm_d -VFNMA_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d -VFNMS_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d +VFNMS_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d +VFNMA_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d VMOV_imm_hp ---- 1110 1.11 .... .... 1001 0000 .... \ vd=%vd_sp imm=%vmov_imm diff --git a/target/arm/tcg/vfp_helper.c b/target/arm/tcg/vfp_helper.c new file mode 100644 index 00000000000..e156e3774ad --- /dev/null +++ b/target/arm/tcg/vfp_helper.c @@ -0,0 +1,1380 @@ +/* + * ARM VFP floating-point operations + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-features.h" +#include "fpu/softfloat.h" +#include "qemu/log.h" + +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + +/* + * Set the float_status behaviour to match the Arm defaults: + * * tininess-before-rounding + * * 2-input NaN propagation prefers SNaN over QNaN, and then + * operand A over operand B (see FPProcessNaNs() pseudocode) + * * 3-input NaN propagation prefers SNaN over QNaN, and then + * operand C over A over B (see FPProcessNaNs3() pseudocode, + * but note that for QEMU muladd is a * b + c, whereas for + * the pseudocode function the arguments are in the order c, a, b. + * * 0 * Inf + NaN returns the default NaN if the input NaN is quiet, + * and the input NaN if it is signalling + * * Default NaN has sign bit clear, msb frac bit set + */ +void arm_set_default_fp_behaviours(float_status *s) +{ + set_float_detect_tininess(float_tininess_before_rounding, s); + set_float_ftz_detection(float_ftz_before_rounding, s); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, s); + set_float_3nan_prop_rule(float_3nan_prop_s_cab, s); + set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, s); + set_float_default_nan_pattern(0b01000000, s); +} + +/* + * Set the float_status behaviour to match the FEAT_AFP + * FPCR.AH=1 requirements: + * * tininess-after-rounding + * * 2-input NaN propagation prefers the first NaN + * * 3-input NaN propagation prefers a over b over c + * * 0 * Inf + NaN always returns the input NaN and doesn't + * set Invalid for a QNaN + * * default NaN has sign bit set, msb frac bit set + */ +void arm_set_ah_fp_behaviours(float_status *s) +{ + set_float_detect_tininess(float_tininess_after_rounding, s); + set_float_ftz_detection(float_ftz_after_rounding, s); + set_float_2nan_prop_rule(float_2nan_prop_ab, s); + set_float_3nan_prop_rule(float_3nan_prop_abc, s); + set_float_infzeronan_rule(float_infzeronan_dnan_never | + float_infzeronan_suppress_invalid, s); + set_float_default_nan_pattern(0b11000000, s); +} + +/* Convert host exception flags to vfp form. */ +static inline uint32_t vfp_exceptbits_from_host(int host_bits, bool ah) +{ + uint32_t target_bits = 0; + + if (host_bits & float_flag_invalid) { + target_bits |= FPSR_IOC; + } + if (host_bits & float_flag_divbyzero) { + target_bits |= FPSR_DZC; + } + if (host_bits & float_flag_overflow) { + target_bits |= FPSR_OFC; + } + if (host_bits & (float_flag_underflow | float_flag_output_denormal_flushed)) { + target_bits |= FPSR_UFC; + } + if (host_bits & float_flag_inexact) { + target_bits |= FPSR_IXC; + } + if (host_bits & float_flag_input_denormal_flushed) { + target_bits |= FPSR_IDC; + } + /* + * With FPCR.AH, IDC is set when an input denormal is used, + * and flushing an output denormal to zero sets both IXC and UFC. + */ + if (ah && (host_bits & float_flag_input_denormal_used)) { + target_bits |= FPSR_IDC; + } + if (ah && (host_bits & float_flag_output_denormal_flushed)) { + target_bits |= FPSR_IXC; + } + return target_bits; +} + +uint32_t vfp_get_fpsr_from_host(CPUARMState *env) +{ + uint32_t a32_flags = 0, a64_flags = 0; + + a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A32]); + a32_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_STD]); + /* FZ16 does not generate an input denormal exception. */ + a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A32_F16]) + & ~float_flag_input_denormal_flushed); + a32_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_STD_F16]) + & ~float_flag_input_denormal_flushed); + + a64_flags |= get_float_exception_flags(&env->vfp.fp_status[FPST_A64]); + a64_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A64_F16]) + & ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used)); + /* + * We do not merge in flags from FPST_{AH,ZA} or FPST_{AH,ZA}_F16, because + * they are used for insns that must not set the cumulative exception bits. + */ + + /* + * Flushing an input denormal *only* because FPCR.FIZ == 1 does + * not set FPSR.IDC; if FPCR.FZ is also set then this takes + * precedence and IDC is set (see the FPUnpackBase pseudocode). + * So squash it unless (FPCR.AH == 0 && FPCR.FZ == 1). + * We only do this for the a64 flags because FIZ has no effect + * on AArch32 even if it is set. + */ + if ((env->vfp.fpcr & (FPCR_FZ | FPCR_AH)) != FPCR_FZ) { + a64_flags &= ~float_flag_input_denormal_flushed; + } + return vfp_exceptbits_from_host(a64_flags, env->vfp.fpcr & FPCR_AH) | + vfp_exceptbits_from_host(a32_flags, false); +} + +void vfp_clear_float_status_exc_flags(CPUARMState *env) +{ + /* + * Clear out all the exception-flag information in the float_status + * values. The caller should have arranged for env->vfp.fpsr to + * be the architecturally up-to-date exception flag information first. + */ + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A32_F16]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_A64_F16]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_STD_F16]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH]); + set_float_exception_flags(0, &env->vfp.fp_status[FPST_AH_F16]); +} + +static void vfp_sync_and_clear_float_status_exc_flags(CPUARMState *env) +{ + /* + * Synchronize any pending exception-flag information in the + * float_status values into env->vfp.fpsr, and then clear out + * the float_status data. + */ + env->vfp.fpsr |= vfp_get_fpsr_from_host(env); + vfp_clear_float_status_exc_flags(env); +} + +void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) +{ + uint64_t changed = env->vfp.fpcr; + + changed ^= val; + changed &= mask; + if (changed & (3 << 22)) { + int i = (val >> 22) & 3; + switch (i) { + case FPROUNDING_TIEEVEN: + i = float_round_nearest_even; + break; + case FPROUNDING_POSINF: + i = float_round_up; + break; + case FPROUNDING_NEGINF: + i = float_round_down; + break; + case FPROUNDING_ZERO: + i = float_round_to_zero; + break; + } + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32]); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64]); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_ZA]); + set_float_rounding_mode(i, &env->vfp.fp_status[FPST_ZA_F16]); + } + if (changed & FPCR_FZ16) { + bool ftz_enabled = val & FPCR_FZ16; + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA_F16]); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]); + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA_F16]); + } + if (changed & FPCR_FZ) { + bool ftz_enabled = val & FPCR_FZ; + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64]); + set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA]); + /* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */ + set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]); + } + if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) { + /* + * A64: Flush denormalized inputs to zero if FPCR.FIZ = 1, or + * both FPCR.AH = 0 and FPCR.FZ = 1. + */ + bool fitz_enabled = (val & FPCR_FIZ) || + (val & (FPCR_FZ | FPCR_AH)) == FPCR_FZ; + set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_A64]); + set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_ZA]); + } + if (changed & FPCR_DN) { + bool dnan_enabled = val & FPCR_DN; + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32]); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64]); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A32_F16]); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_A64_F16]); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH]); + set_default_nan_mode(dnan_enabled, &env->vfp.fp_status[FPST_AH_F16]); + } + if (changed & FPCR_AH) { + bool ah_enabled = val & FPCR_AH; + + if (ah_enabled) { + /* Change behaviours for A64 FP operations */ + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64]); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_ZA]); + arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]); + } else { + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA]); + arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]); + } + } + /* + * If any bits changed that we look at in vfp_get_fpsr_from_host(), + * we must sync the float_status flags into vfp.fpsr now (under the + * old regime) before we update vfp.fpcr. + */ + if (changed & (FPCR_FZ | FPCR_AH | FPCR_FIZ)) { + vfp_sync_and_clear_float_status_exc_flags(env); + } +} + +/* + * VFP support. We follow the convention used for VFP instructions: + * Single precision routines have a "s" suffix, double precision a + * "d" suffix. + */ + +#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) + +#define VFP_BINOP(name) \ +dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, float_status *fpst) \ +{ \ + return float16_ ## name(a, b, fpst); \ +} \ +float32 VFP_HELPER(name, s)(float32 a, float32 b, float_status *fpst) \ +{ \ + return float32_ ## name(a, b, fpst); \ +} \ +float64 VFP_HELPER(name, d)(float64 a, float64 b, float_status *fpst) \ +{ \ + return float64_ ## name(a, b, fpst); \ +} +VFP_BINOP(add) +VFP_BINOP(sub) +VFP_BINOP(mul) +VFP_BINOP(div) +VFP_BINOP(min) +VFP_BINOP(max) +VFP_BINOP(minnum) +VFP_BINOP(maxnum) +#undef VFP_BINOP + +dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, float_status *fpst) +{ + return float16_sqrt(a, fpst); +} + +float32 VFP_HELPER(sqrt, s)(float32 a, float_status *fpst) +{ + return float32_sqrt(a, fpst); +} + +float64 VFP_HELPER(sqrt, d)(float64 a, float_status *fpst) +{ + return float64_sqrt(a, fpst); +} + +static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp) +{ + uint32_t flags; + switch (cmp) { + case float_relation_equal: + flags = 0x6; + break; + case float_relation_less: + flags = 0x8; + break; + case float_relation_greater: + flags = 0x2; + break; + case float_relation_unordered: + flags = 0x3; + break; + default: + g_assert_not_reached(); + } + env->vfp.fpsr = deposit64(env->vfp.fpsr, 28, 4, flags); /* NZCV */ +} + +/* XXX: check quiet/signaling case */ +#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \ +void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ +{ \ + softfloat_to_vfp_compare(env, \ + FLOATTYPE ## _compare_quiet(a, b, &env->vfp.fp_status[FPST])); \ +} \ +void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ +{ \ + softfloat_to_vfp_compare(env, \ + FLOATTYPE ## _compare(a, b, &env->vfp.fp_status[FPST])); \ +} +DO_VFP_cmp(h, float16, dh_ctype_f16, FPST_A32_F16) +DO_VFP_cmp(s, float32, float32, FPST_A32) +DO_VFP_cmp(d, float64, float64, FPST_A32) +#undef DO_VFP_cmp + +/* Integer to float and float to integer conversions */ + +#define CONV_ITOF(name, ftype, fsz, sign) \ +ftype HELPER(name)(uint32_t x, float_status *fpst) \ +{ \ + return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ +} + +#define CONV_FTOI(name, ftype, fsz, sign, round) \ +sign##int32_t HELPER(name)(ftype x, float_status *fpst) \ +{ \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##sign##int32##round(x, fpst); \ +} + +#define FLOAT_CONVS(name, p, ftype, fsz, sign) \ + CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ + CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ + CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) + +FLOAT_CONVS(si, h, uint32_t, 16, ) +FLOAT_CONVS(si, s, float32, 32, ) +FLOAT_CONVS(si, d, float64, 64, ) +FLOAT_CONVS(ui, h, uint32_t, 16, u) +FLOAT_CONVS(ui, s, float32, 32, u) +FLOAT_CONVS(ui, d, float64, 64, u) + +#undef CONV_ITOF +#undef CONV_FTOI +#undef FLOAT_CONVS + +/* floating point conversion */ +float64 VFP_HELPER(fcvtd, s)(float32 x, float_status *status) +{ + return float32_to_float64(x, status); +} + +float32 VFP_HELPER(fcvts, d)(float64 x, float_status *status) +{ + return float64_to_float32(x, status); +} + +uint32_t HELPER(bfcvt)(float32 x, float_status *status) +{ + return float32_to_bfloat16(x, status); +} + +uint32_t HELPER(bfcvt_pair)(uint64_t pair, float_status *status) +{ + bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status); + bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status); + return deposit32(lo, 16, 16, hi); +} + +/* + * VFP3 fixed point conversion. The AArch32 versions of fix-to-float + * must always round-to-nearest; the AArch64 ones honour the FPSCR + * rounding mode. (For AArch32 Neon the standard-FPSCR is set to + * round-to-nearest so either helper will work.) AArch32 float-to-fix + * must round-to-zero. + */ +#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ +ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ + float_status *fpst) \ +{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpst); } + +#define VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \ + ftype HELPER(vfp_##name##to##p##_round_to_nearest)(uint##isz##_t x, \ + uint32_t shift, \ + float_status *fpst) \ + { \ + ftype ret; \ + FloatRoundMode oldmode = fpst->float_rounding_mode; \ + fpst->float_rounding_mode = float_round_nearest_even; \ + ret = itype##_to_##float##fsz##_scalbn(x, -shift, fpst); \ + fpst->float_rounding_mode = oldmode; \ + return ret; \ + } + +#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \ +uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \ + float_status *fpst) \ +{ \ + if (unlikely(float##fsz##_is_any_nan(x))) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ +} + +#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ +VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ + float_round_to_zero, _round_to_zero) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ + get_float_rounding_mode(fpst), ) + +#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ + get_float_rounding_mode(fpst), ) + +VFP_CONV_FIX(sh, d, 64, float64, 64, int16) +VFP_CONV_FIX(sl, d, 64, float64, 64, int32) +VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64) +VFP_CONV_FIX(uh, d, 64, float64, 64, uint16) +VFP_CONV_FIX(ul, d, 64, float64, 64, uint32) +VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64) +VFP_CONV_FIX(sh, s, 32, float32, 32, int16) +VFP_CONV_FIX(sl, s, 32, float32, 32, int32) +VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64) +VFP_CONV_FIX(uh, s, 32, float32, 32, uint16) +VFP_CONV_FIX(ul, s, 32, float32, 32, uint32) +VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64) +VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16) +VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32) +VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64) +VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16) +VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32) +VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64) +VFP_CONV_FLOAT_FIX_ROUND(sq, d, 64, float64, 64, int64, + float_round_to_zero, _round_to_zero) +VFP_CONV_FLOAT_FIX_ROUND(uq, d, 64, float64, 64, uint64, + float_round_to_zero, _round_to_zero) + +#undef VFP_CONV_FIX +#undef VFP_CONV_FIX_FLOAT +#undef VFP_CONV_FLOAT_FIX_ROUND +#undef VFP_CONV_FIX_A64 + +/* Set the current fp rounding mode and return the old one. + * The argument is a softfloat float_round_ value. + */ +uint32_t HELPER(set_rmode)(uint32_t rmode, float_status *fp_status) +{ + uint32_t prev_rmode = get_float_rounding_mode(fp_status); + set_float_rounding_mode(rmode, fp_status); + + return prev_rmode; +} + +/* Half precision conversions. */ +float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, float_status *fpst, + uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing input denormals. + */ + bool save = get_flush_inputs_to_zero(fpst); + set_flush_inputs_to_zero(false, fpst); + float32 r = float16_to_float32(a, !ahp_mode, fpst); + set_flush_inputs_to_zero(save, fpst); + return r; +} + +uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, float_status *fpst, + uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing output denormals. + */ + bool save = get_flush_to_zero(fpst); + set_flush_to_zero(false, fpst); + float16 r = float32_to_float16(a, !ahp_mode, fpst); + set_flush_to_zero(save, fpst); + return r; +} + +float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, float_status *fpst, + uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing input denormals. + */ + bool save = get_flush_inputs_to_zero(fpst); + set_flush_inputs_to_zero(false, fpst); + float64 r = float16_to_float64(a, !ahp_mode, fpst); + set_flush_inputs_to_zero(save, fpst); + return r; +} + +uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, float_status *fpst, + uint32_t ahp_mode) +{ + /* Squash FZ16 to 0 for the duration of conversion. In this case, + * it would affect flushing output denormals. + */ + bool save = get_flush_to_zero(fpst); + set_flush_to_zero(false, fpst); + float16 r = float64_to_float16(a, !ahp_mode, fpst); + set_flush_to_zero(save, fpst); + return r; +} + +/* NEON helpers. */ + +/* Constants 256 and 512 are used in some helpers; we avoid relying on + * int->float conversions at run-time. */ +#define float64_256 make_float64(0x4070000000000000LL) +#define float64_512 make_float64(0x4080000000000000LL) +#define float16_maxnorm make_float16(0x7bff) +#define float32_maxnorm make_float32(0x7f7fffff) +#define float64_maxnorm make_float64(0x7fefffffffffffffLL) + +/* Reciprocal functions + * + * The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate + */ + +/* See RecipEstimate() + * + * input is a 9 bit fixed point number + * input range 256 .. 511 for a number from 0.5 <= x < 1.0. + * result range 256 .. 511 for a number from 1.0 to 511/256. + */ + +static int recip_estimate(int input) +{ + int a, b, r; + assert(256 <= input && input < 512); + a = (input * 2) + 1; + b = (1 << 19) / a; + r = (b + 1) >> 1; + assert(256 <= r && r < 512); + return r; +} + +/* + * Increased precision version: + * input is a 13 bit fixed point number + * input range 2048 .. 4095 for a number from 0.5 <= x < 1.0. + * result range 4096 .. 8191 for a number from 1.0 to 2.0 + */ +static int recip_estimate_incprec(int input) +{ + int a, b, r; + assert(2048 <= input && input < 4096); + a = (input * 2) + 1; + /* + * The pseudocode expresses this as an operation on infinite + * precision reals where it calculates 2^25 / a and then looks + * at the error between that and the rounded-down-to-integer + * value to see if it should instead round up. We instead + * follow the same approach as the pseudocode for the 8-bit + * precision version, and calculate (2 * (2^25 / a)) as an + * integer so we can do the "add one and halve" to round it. + * So the 1 << 26 here is correct. + */ + b = (1 << 26) / a; + r = (b + 1) >> 1; + assert(4096 <= r && r < 8192); + return r; +} + +/* + * Common wrapper to call recip_estimate + * + * The parameters are exponent and 64 bit fraction (without implicit + * bit) where the binary point is nominally at bit 52. Returns a + * float64 which can then be rounded to the appropriate size by the + * callee. + */ + +static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac, + bool increasedprecision) +{ + uint32_t scaled, estimate; + uint64_t result_frac; + int result_exp; + + /* Handle sub-normals */ + if (*exp == 0) { + if (extract64(frac, 51, 1) == 0) { + *exp = -1; + frac <<= 2; + } else { + frac <<= 1; + } + } + + if (increasedprecision) { + /* scaled = UInt('1':fraction<51:41>) */ + scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11)); + estimate = recip_estimate_incprec(scaled); + } else { + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + estimate = recip_estimate(scaled); + } + + result_exp = exp_off - *exp; + if (increasedprecision) { + result_frac = deposit64(0, 40, 12, estimate); + } else { + result_frac = deposit64(0, 44, 8, estimate); + } + if (result_exp == 0) { + result_frac = deposit64(result_frac >> 1, 51, 1, 1); + } else if (result_exp == -1) { + result_frac = deposit64(result_frac >> 2, 50, 2, 1); + result_exp = 0; + } + + *exp = result_exp; + + return result_frac; +} + +static bool round_to_inf(float_status *fpst, bool sign_bit) +{ + switch (fpst->float_rounding_mode) { + case float_round_nearest_even: /* Round to Nearest */ + return true; + case float_round_up: /* Round to +Inf */ + return !sign_bit; + case float_round_down: /* Round to -Inf */ + return sign_bit; + case float_round_to_zero: /* Round to Zero */ + return false; + default: + g_assert_not_reached(); + } +} + +uint32_t HELPER(recpe_f16)(uint32_t input, float_status *fpst) +{ + float16 f16 = float16_squash_input_denormal(input, fpst); + uint32_t f16_val = float16_val(f16); + uint32_t f16_sign = float16_is_neg(f16); + int f16_exp = extract32(f16_val, 10, 5); + uint32_t f16_frac = extract32(f16_val, 0, 10); + uint64_t f64_frac; + + if (float16_is_any_nan(f16)) { + float16 nan = f16; + if (float16_is_signaling_nan(f16, fpst)) { + float_raise(float_flag_invalid, fpst); + if (!fpst->default_nan_mode) { + nan = float16_silence_nan(f16, fpst); + } + } + if (fpst->default_nan_mode) { + nan = float16_default_nan(fpst); + } + return nan; + } else if (float16_is_infinity(f16)) { + return float16_set_sign(float16_zero, float16_is_neg(f16)); + } else if (float16_is_zero(f16)) { + float_raise(float_flag_divbyzero, fpst); + return float16_set_sign(float16_infinity, float16_is_neg(f16)); + } else if (float16_abs(f16) < (1 << 8)) { + /* Abs(value) < 2.0^-16 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f16_sign)) { + return float16_set_sign(float16_infinity, f16_sign); + } else { + return float16_set_sign(float16_maxnorm, f16_sign); + } + } else if (f16_exp >= 29 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float16_set_sign(float16_zero, float16_is_neg(f16)); + } + + f64_frac = call_recip_estimate(&f16_exp, 29, + ((uint64_t) f16_frac) << (52 - 10), false); + + /* result = sign : result_exp<4:0> : fraction<51:42> */ + f16_val = deposit32(0, 15, 1, f16_sign); + f16_val = deposit32(f16_val, 10, 5, f16_exp); + f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); + return make_float16(f16_val); +} + +/* + * FEAT_RPRES means the f32 FRECPE has an "increased precision" variant + * which is used when FPCR.AH == 1. + */ +static float32 do_recpe_f32(float32 input, float_status *fpst, bool rpres) +{ + float32 f32 = float32_squash_input_denormal(input, fpst); + uint32_t f32_val = float32_val(f32); + bool f32_sign = float32_is_neg(f32); + int f32_exp = extract32(f32_val, 23, 8); + uint32_t f32_frac = extract32(f32_val, 0, 23); + uint64_t f64_frac; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32, fpst)) { + float_raise(float_flag_invalid, fpst); + if (!fpst->default_nan_mode) { + nan = float32_silence_nan(f32, fpst); + } + } + if (fpst->default_nan_mode) { + nan = float32_default_nan(fpst); + } + return nan; + } else if (float32_is_infinity(f32)) { + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, fpst); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if (float32_abs(f32) < (1ULL << 21)) { + /* Abs(value) < 2.0^-128 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f32_sign)) { + return float32_set_sign(float32_infinity, f32_sign); + } else { + return float32_set_sign(float32_maxnorm, f32_sign); + } + } else if (f32_exp >= 253 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } + + f64_frac = call_recip_estimate(&f32_exp, 253, + ((uint64_t) f32_frac) << (52 - 23), rpres); + + /* result = sign : result_exp<7:0> : fraction<51:29> */ + f32_val = deposit32(0, 31, 1, f32_sign); + f32_val = deposit32(f32_val, 23, 8, f32_exp); + f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); + return make_float32(f32_val); +} + +float32 HELPER(recpe_f32)(float32 input, float_status *fpst) +{ + return do_recpe_f32(input, fpst, false); +} + +float32 HELPER(recpe_rpres_f32)(float32 input, float_status *fpst) +{ + return do_recpe_f32(input, fpst, true); +} + +float64 HELPER(recpe_f64)(float64 input, float_status *fpst) +{ + float64 f64 = float64_squash_input_denormal(input, fpst); + uint64_t f64_val = float64_val(f64); + bool f64_sign = float64_is_neg(f64); + int f64_exp = extract64(f64_val, 52, 11); + uint64_t f64_frac = extract64(f64_val, 0, 52); + + /* Deal with any special cases */ + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64, fpst)) { + float_raise(float_flag_invalid, fpst); + if (!fpst->default_nan_mode) { + nan = float64_silence_nan(f64, fpst); + } + } + if (fpst->default_nan_mode) { + nan = float64_default_nan(fpst); + } + return nan; + } else if (float64_is_infinity(f64)) { + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, fpst); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { + /* Abs(value) < 2.0^-1024 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f64_sign)) { + return float64_set_sign(float64_infinity, f64_sign); + } else { + return float64_set_sign(float64_maxnorm, f64_sign); + } + } else if (f64_exp >= 2045 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } + + f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac, false); + + /* result = sign : result_exp<10:0> : fraction<51:0>; */ + f64_val = deposit64(0, 63, 1, f64_sign); + f64_val = deposit64(f64_val, 52, 11, f64_exp); + f64_val = deposit64(f64_val, 0, 52, f64_frac); + return make_float64(f64_val); +} + +/* The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM. + */ + +static int do_recip_sqrt_estimate(int a) +{ + int b, estimate; + + assert(128 <= a && a < 512); + if (a < 256) { + a = a * 2 + 1; + } else { + a = (a >> 1) << 1; + a = (a + 1) * 2; + } + b = 512; + while (a * (b + 1) * (b + 1) < (1 << 28)) { + b += 1; + } + estimate = (b + 1) / 2; + assert(256 <= estimate && estimate < 512); + + return estimate; +} + +static int do_recip_sqrt_estimate_incprec(int a) +{ + /* + * The Arm ARM describes the 12-bit precision version of RecipSqrtEstimate + * in terms of an infinite-precision floating point calculation of a + * square root. We implement this using the same kind of pure integer + * algorithm as the 8-bit mantissa, to get the same bit-for-bit result. + */ + int64_t b, estimate; + + assert(1024 <= a && a < 4096); + if (a < 2048) { + a = a * 2 + 1; + } else { + a = (a >> 1) << 1; + a = (a + 1) * 2; + } + b = 8192; + while (a * (b + 1) * (b + 1) < (1ULL << 39)) { + b += 1; + } + estimate = (b + 1) / 2; + + assert(4096 <= estimate && estimate < 8192); + + return estimate; +} + +static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac, + bool increasedprecision) +{ + int estimate; + uint32_t scaled; + + if (*exp == 0) { + while (extract64(frac, 51, 1) == 0) { + frac = frac << 1; + *exp -= 1; + } + frac = extract64(frac, 0, 51) << 1; + } + + if (increasedprecision) { + if (*exp & 1) { + /* scaled = UInt('01':fraction<51:42>) */ + scaled = deposit32(1 << 10, 0, 10, extract64(frac, 42, 10)); + } else { + /* scaled = UInt('1':fraction<51:41>) */ + scaled = deposit32(1 << 11, 0, 11, extract64(frac, 41, 11)); + } + estimate = do_recip_sqrt_estimate_incprec(scaled); + } else { + if (*exp & 1) { + /* scaled = UInt('01':fraction<51:45>) */ + scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); + } else { + /* scaled = UInt('1':fraction<51:44>) */ + scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); + } + estimate = do_recip_sqrt_estimate(scaled); + } + + *exp = (exp_off - *exp) / 2; + if (increasedprecision) { + return extract64(estimate, 0, 12) << 40; + } else { + return extract64(estimate, 0, 8) << 44; + } +} + +uint32_t HELPER(rsqrte_f16)(uint32_t input, float_status *s) +{ + float16 f16 = float16_squash_input_denormal(input, s); + uint16_t val = float16_val(f16); + bool f16_sign = float16_is_neg(f16); + int f16_exp = extract32(val, 10, 5); + uint16_t f16_frac = extract32(val, 0, 10); + uint64_t f64_frac; + + if (float16_is_any_nan(f16)) { + float16 nan = f16; + if (float16_is_signaling_nan(f16, s)) { + float_raise(float_flag_invalid, s); + if (!s->default_nan_mode) { + nan = float16_silence_nan(f16, s); + } + } + if (s->default_nan_mode) { + nan = float16_default_nan(s); + } + return nan; + } else if (float16_is_zero(f16)) { + float_raise(float_flag_divbyzero, s); + return float16_set_sign(float16_infinity, f16_sign); + } else if (f16_sign) { + float_raise(float_flag_invalid, s); + return float16_default_nan(s); + } else if (float16_is_infinity(f16)) { + return float16_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + f64_frac = ((uint64_t) f16_frac) << (52 - 10); + + f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac, false); + + /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ + val = deposit32(0, 15, 1, f16_sign); + val = deposit32(val, 10, 5, f16_exp); + val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); + return make_float16(val); +} + +/* + * FEAT_RPRES means the f32 FRSQRTE has an "increased precision" variant + * which is used when FPCR.AH == 1. + */ +static float32 do_rsqrte_f32(float32 input, float_status *s, bool rpres) +{ + float32 f32 = float32_squash_input_denormal(input, s); + uint32_t val = float32_val(f32); + uint32_t f32_sign = float32_is_neg(f32); + int f32_exp = extract32(val, 23, 8); + uint32_t f32_frac = extract32(val, 0, 23); + uint64_t f64_frac; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32, s)) { + float_raise(float_flag_invalid, s); + if (!s->default_nan_mode) { + nan = float32_silence_nan(f32, s); + } + } + if (s->default_nan_mode) { + nan = float32_default_nan(s); + } + return nan; + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, s); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if (float32_is_neg(f32)) { + float_raise(float_flag_invalid, s); + return float32_default_nan(s); + } else if (float32_is_infinity(f32)) { + return float32_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + f64_frac = ((uint64_t) f32_frac) << 29; + + f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac, rpres); + + /* + * result = sign : result_exp<7:0> : estimate<7:0> : Zeros(15) + * or for increased precision + * result = sign : result_exp<7:0> : estimate<11:0> : Zeros(11) + */ + val = deposit32(0, 31, 1, f32_sign); + val = deposit32(val, 23, 8, f32_exp); + if (rpres) { + val = deposit32(val, 11, 12, extract64(f64_frac, 52 - 12, 12)); + } else { + val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); + } + return make_float32(val); +} + +float32 HELPER(rsqrte_f32)(float32 input, float_status *s) +{ + return do_rsqrte_f32(input, s, false); +} + +float32 HELPER(rsqrte_rpres_f32)(float32 input, float_status *s) +{ + return do_rsqrte_f32(input, s, true); +} + +float64 HELPER(rsqrte_f64)(float64 input, float_status *s) +{ + float64 f64 = float64_squash_input_denormal(input, s); + uint64_t val = float64_val(f64); + bool f64_sign = float64_is_neg(f64); + int f64_exp = extract64(val, 52, 11); + uint64_t f64_frac = extract64(val, 0, 52); + + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64, s)) { + float_raise(float_flag_invalid, s); + if (!s->default_nan_mode) { + nan = float64_silence_nan(f64, s); + } + } + if (s->default_nan_mode) { + nan = float64_default_nan(s); + } + return nan; + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, s); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if (float64_is_neg(f64)) { + float_raise(float_flag_invalid, s); + return float64_default_nan(s); + } else if (float64_is_infinity(f64)) { + return float64_zero; + } + + f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac, false); + + /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ + val = deposit64(0, 61, 1, f64_sign); + val = deposit64(val, 52, 11, f64_exp); + val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); + return make_float64(val); +} + +uint32_t HELPER(recpe_u32)(uint32_t a) +{ + int input, estimate; + + if ((a & 0x80000000) == 0) { + return 0xffffffff; + } + + input = extract32(a, 23, 9); + estimate = recip_estimate(input); + + return deposit32(0, (32 - 9), 9, estimate); +} + +uint32_t HELPER(rsqrte_u32)(uint32_t a) +{ + int estimate; + + if ((a & 0xc0000000) == 0) { + return 0xffffffff; + } + + estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); + + return deposit32(0, 23, 9, estimate); +} + +/* VFPv4 fused multiply-accumulate */ +dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b, + dh_ctype_f16 c, float_status *fpst) +{ + return float16_muladd(a, b, c, 0, fpst); +} + +float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, + float_status *fpst) +{ + return float32_muladd(a, b, c, 0, fpst); +} + +float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, + float_status *fpst) +{ + return float64_muladd(a, b, c, 0, fpst); +} + +/* ARMv8 round to integral */ +dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, float_status *fp_status) +{ + return float16_round_to_int(x, fp_status); +} + +float32 HELPER(rints_exact)(float32 x, float_status *fp_status) +{ + return float32_round_to_int(x, fp_status); +} + +float64 HELPER(rintd_exact)(float64 x, float_status *fp_status) +{ + return float64_round_to_int(x, fp_status); +} + +dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, float_status *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float16 ret; + + ret = float16_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +float32 HELPER(rints)(float32 x, float_status *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float32 ret; + + ret = float32_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +float64 HELPER(rintd)(float64 x, float_status *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float64 ret; + + ret = float64_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +/* Convert ARM rounding mode to softfloat */ +const FloatRoundMode arm_rmode_to_sf_map[] = { + [FPROUNDING_TIEEVEN] = float_round_nearest_even, + [FPROUNDING_POSINF] = float_round_up, + [FPROUNDING_NEGINF] = float_round_down, + [FPROUNDING_ZERO] = float_round_to_zero, + [FPROUNDING_TIEAWAY] = float_round_ties_away, + [FPROUNDING_ODD] = float_round_to_odd, +}; + +/* + * Implement float64 to int32_t conversion without saturation; + * the result is supplied modulo 2^32. + */ +uint64_t HELPER(fjcvtzs)(float64 value, float_status *status) +{ + uint32_t frac, e_old, e_new; + bool inexact; + + e_old = get_float_exception_flags(status); + set_float_exception_flags(0, status); + frac = float64_to_int32_modulo(value, float_round_to_zero, status); + e_new = get_float_exception_flags(status); + set_float_exception_flags(e_old | e_new, status); + + /* Normal inexact, denormal with flush-to-zero, or overflow or NaN */ + inexact = e_new & (float_flag_inexact | + float_flag_input_denormal_flushed | + float_flag_invalid); + + /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ + inexact |= value == float64_chs(float64_zero); + + /* Pack the result and the env->ZF representation of Z together. */ + return deposit64(frac, 32, 32, inexact); +} + +uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) +{ + uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status[FPST_A32]); + uint32_t result = pair; + uint32_t z = (pair >> 32) == 0; + + /* Store Z, clear NCV, in FPSCR.NZCV. */ + env->vfp.fpsr = (env->vfp.fpsr & ~FPSR_NZCV_MASK) | (z * FPSR_Z); + + return result; +} + +/* Round a float32 to an integer that fits in int32_t or int64_t. */ +static float32 frint_s(float32 f, float_status *fpst, int intsize) +{ + int old_flags = get_float_exception_flags(fpst); + uint32_t exp = extract32(f, 23, 8); + + if (unlikely(exp == 0xff)) { + /* NaN or Inf. */ + goto overflow; + } + + /* Round and re-extract the exponent. */ + f = float32_round_to_int(f, fpst); + exp = extract32(f, 23, 8); + + /* Validate the range of the result. */ + if (exp < 126 + intsize) { + /* abs(F) <= INT{N}_MAX */ + return f; + } + if (exp == 126 + intsize) { + uint32_t sign = extract32(f, 31, 1); + uint32_t frac = extract32(f, 0, 23); + if (sign && frac == 0) { + /* F == INT{N}_MIN */ + return f; + } + } + + overflow: + /* + * Raise Invalid and return INT{N}_MIN as a float. Revert any + * inexact exception float32_round_to_int may have raised. + */ + set_float_exception_flags(old_flags | float_flag_invalid, fpst); + return (0x100u + 126u + intsize) << 23; +} + +float32 HELPER(frint32_s)(float32 f, float_status *fpst) +{ + return frint_s(f, fpst, 32); +} + +float32 HELPER(frint64_s)(float32 f, float_status *fpst) +{ + return frint_s(f, fpst, 64); +} + +/* Round a float64 to an integer that fits in int32_t or int64_t. */ +static float64 frint_d(float64 f, float_status *fpst, int intsize) +{ + int old_flags = get_float_exception_flags(fpst); + uint32_t exp = extract64(f, 52, 11); + + if (unlikely(exp == 0x7ff)) { + /* NaN or Inf. */ + goto overflow; + } + + /* Round and re-extract the exponent. */ + f = float64_round_to_int(f, fpst); + exp = extract64(f, 52, 11); + + /* Validate the range of the result. */ + if (exp < 1022 + intsize) { + /* abs(F) <= INT{N}_MAX */ + return f; + } + if (exp == 1022 + intsize) { + uint64_t sign = extract64(f, 63, 1); + uint64_t frac = extract64(f, 0, 52); + if (sign && frac == 0) { + /* F == INT{N}_MIN */ + return f; + } + } + + overflow: + /* + * Raise Invalid and return INT{N}_MIN as a float. Revert any + * inexact exception float64_round_to_int may have raised. + */ + set_float_exception_flags(old_flags | float_flag_invalid, fpst); + return (uint64_t)(0x800 + 1022 + intsize) << 52; +} + +float64 HELPER(frint32_d)(float64 f, float_status *fpst) +{ + return frint_d(f, fpst, 32); +} + +float64 HELPER(frint64_d)(float64 f, float_status *fpst) +{ + return frint_d(f, fpst, 64); +} + +void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) +{ + uint32_t syndrome; + + switch (reg) { + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { + return; + } + break; + case ARM_VFP_FPSID: + if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { + return; + } + break; + default: + g_assert_not_reached(); + } + + syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) + | ARM_EL_IL + | (1 << 24) | (0xe << 20) | (7 << 14) + | (reg << 10) | (rt << 5) | 1); + + raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); +} + +uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) +{ + return vfp_get_fpscr(env); +} + +void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) +{ + vfp_set_fpscr(env, val); +} diff --git a/target/arm/vfp_fpscr.c b/target/arm/vfp_fpscr.c new file mode 100644 index 00000000000..92ea60ebbf2 --- /dev/null +++ b/target/arm/vfp_fpscr.c @@ -0,0 +1,155 @@ +/* + * ARM VFP floating-point: handling of FPSCR/FPCR/FPSR + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-features.h" + +uint32_t vfp_get_fpcr(CPUARMState *env) +{ + uint32_t fpcr = env->vfp.fpcr + | (env->vfp.vec_len << 16) + | (env->vfp.vec_stride << 20); + + /* + * M-profile LTPSIZE is the same bits [18:16] as A-profile Len; whichever + * of the two is not applicable to this CPU will always be zero. + */ + fpcr |= env->v7m.ltpsize << 16; + + return fpcr; +} + +uint32_t vfp_get_fpsr(CPUARMState *env) +{ + uint32_t fpsr = env->vfp.fpsr; + uint32_t i; + + fpsr |= vfp_get_fpsr_from_host(env); + + i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; + fpsr |= i ? FPSR_QC : 0; + return fpsr; +} + +uint32_t vfp_get_fpscr(CPUARMState *env) +{ + return (vfp_get_fpcr(env) & FPSCR_FPCR_MASK) | + (vfp_get_fpsr(env) & FPSCR_FPSR_MASK); +} + +void vfp_set_fpsr(CPUARMState *env, uint32_t val) +{ + ARMCPU *cpu = env_archcpu(env); + + if (arm_feature(env, ARM_FEATURE_NEON) || + cpu_isar_feature(aa32_mve, cpu)) { + /* + * The bit we set within vfp.qc[] is arbitrary; the array as a + * whole being zero/non-zero is what counts. + */ + env->vfp.qc[0] = val & FPSR_QC; + env->vfp.qc[1] = 0; + env->vfp.qc[2] = 0; + env->vfp.qc[3] = 0; + } + + /* + * NZCV lives only in env->vfp.fpsr. The cumulative exception flags + * IOC|DZC|OFC|UFC|IXC|IDC also live in env->vfp.fpsr, with possible + * extra pending exception information that hasn't yet been folded in + * living in the float_status values (for TCG). + * Since this FPSR write gives us the up to date values of the exception + * flags, we want to store into vfp.fpsr the NZCV and CEXC bits, zeroing + * anything else. We also need to clear out the float_status exception + * information so that the next vfp_get_fpsr does not fold in stale data. + */ + val &= FPSR_NZCV_MASK | FPSR_CEXC_MASK; + env->vfp.fpsr = val; + vfp_clear_float_status_exc_flags(env); +} + +static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask) +{ + /* + * We only set FPCR bits defined by mask, and leave the others alone. + * We assume the mask is sensible (e.g. doesn't try to set only + * part of a field) + */ + ARMCPU *cpu = env_archcpu(env); + + /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ + if (!cpu_isar_feature(any_fp16, cpu)) { + val &= ~FPCR_FZ16; + } + if (!cpu_isar_feature(aa64_afp, cpu)) { + val &= ~(FPCR_FIZ | FPCR_AH | FPCR_NEP); + } + + if (!cpu_isar_feature(aa64_ebf16, cpu)) { + val &= ~FPCR_EBF; + } + + vfp_set_fpcr_to_host(env, val, mask); + + if (mask & (FPCR_LEN_MASK | FPCR_STRIDE_MASK)) { + if (!arm_feature(env, ARM_FEATURE_M)) { + /* + * Short-vector length and stride; on M-profile these bits + * are used for different purposes. + * We can't make this conditional be "if MVFR0.FPShVec != 0", + * because in v7A no-short-vector-support cores still had to + * allow Stride/Len to be written with the only effect that + * some insns are required to UNDEF if the guest sets them. + */ + env->vfp.vec_len = extract32(val, 16, 3); + env->vfp.vec_stride = extract32(val, 20, 2); + } else if (cpu_isar_feature(aa32_mve, cpu)) { + env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT, + FPCR_LTPSIZE_LENGTH); + } + } + + /* + * We don't implement trapped exception handling, so the + * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) + * + * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode, EBF, FZ16, + * FIZ, AH, and NEP. + * Len, Stride and LTPSIZE we just handled. Store those bits + * there, and zero any of the other FPCR bits and the RES0 and RAZ/WI + * bits. + */ + val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16 | + FPCR_EBF | FPCR_FIZ | FPCR_AH | FPCR_NEP; + env->vfp.fpcr &= ~mask; + env->vfp.fpcr |= val; +} + +void vfp_set_fpcr(CPUARMState *env, uint32_t val) +{ + vfp_set_fpcr_masked(env, val, MAKE_64BIT_MASK(0, 32)); +} + +void vfp_set_fpscr(CPUARMState *env, uint32_t val) +{ + vfp_set_fpcr_masked(env, val, FPSCR_FPCR_MASK); + vfp_set_fpsr(env, val & FPSCR_FPSR_MASK); +} diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c deleted file mode 100644 index b3698da8ca7..00000000000 --- a/target/arm/vfp_helper.c +++ /dev/null @@ -1,1304 +0,0 @@ -/* - * ARM VFP floating-point operations - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/helper-proto.h" -#include "internals.h" -#include "cpu-features.h" -#ifdef CONFIG_TCG -#include "qemu/log.h" -#include "fpu/softfloat.h" -#endif - -/* VFP support. We follow the convention used for VFP instructions: - Single precision routines have a "s" suffix, double precision a - "d" suffix. */ - -#ifdef CONFIG_TCG - -/* Convert host exception flags to vfp form. */ -static inline int vfp_exceptbits_from_host(int host_bits) -{ - int target_bits = 0; - - if (host_bits & float_flag_invalid) { - target_bits |= 1; - } - if (host_bits & float_flag_divbyzero) { - target_bits |= 2; - } - if (host_bits & float_flag_overflow) { - target_bits |= 4; - } - if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { - target_bits |= 8; - } - if (host_bits & float_flag_inexact) { - target_bits |= 0x10; - } - if (host_bits & float_flag_input_denormal) { - target_bits |= 0x80; - } - return target_bits; -} - -/* Convert vfp exception flags to target form. */ -static inline int vfp_exceptbits_to_host(int target_bits) -{ - int host_bits = 0; - - if (target_bits & 1) { - host_bits |= float_flag_invalid; - } - if (target_bits & 2) { - host_bits |= float_flag_divbyzero; - } - if (target_bits & 4) { - host_bits |= float_flag_overflow; - } - if (target_bits & 8) { - host_bits |= float_flag_underflow; - } - if (target_bits & 0x10) { - host_bits |= float_flag_inexact; - } - if (target_bits & 0x80) { - host_bits |= float_flag_input_denormal; - } - return host_bits; -} - -static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) -{ - uint32_t i; - - i = get_float_exception_flags(&env->vfp.fp_status); - i |= get_float_exception_flags(&env->vfp.standard_fp_status); - /* FZ16 does not generate an input denormal exception. */ - i |= (get_float_exception_flags(&env->vfp.fp_status_f16) - & ~float_flag_input_denormal); - i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16) - & ~float_flag_input_denormal); - return vfp_exceptbits_from_host(i); -} - -static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val) -{ - /* - * The exception flags are ORed together when we read fpscr so we - * only need to preserve the current state in one of our - * float_status values. - */ - int i = vfp_exceptbits_to_host(val); - set_float_exception_flags(i, &env->vfp.fp_status); - set_float_exception_flags(0, &env->vfp.fp_status_f16); - set_float_exception_flags(0, &env->vfp.standard_fp_status); - set_float_exception_flags(0, &env->vfp.standard_fp_status_f16); -} - -static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) -{ - uint64_t changed = env->vfp.fpcr; - - changed ^= val; - changed &= mask; - if (changed & (3 << 22)) { - int i = (val >> 22) & 3; - switch (i) { - case FPROUNDING_TIEEVEN: - i = float_round_nearest_even; - break; - case FPROUNDING_POSINF: - i = float_round_up; - break; - case FPROUNDING_NEGINF: - i = float_round_down; - break; - case FPROUNDING_ZERO: - i = float_round_to_zero; - break; - } - set_float_rounding_mode(i, &env->vfp.fp_status); - set_float_rounding_mode(i, &env->vfp.fp_status_f16); - } - if (changed & FPCR_FZ16) { - bool ftz_enabled = val & FPCR_FZ16; - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); - set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16); - } - if (changed & FPCR_FZ) { - bool ftz_enabled = val & FPCR_FZ; - set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); - set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); - } - if (changed & FPCR_DN) { - bool dnan_enabled = val & FPCR_DN; - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); - set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); - } -} - -#else - -static uint32_t vfp_get_fpsr_from_host(CPUARMState *env) -{ - return 0; -} - -static void vfp_set_fpsr_to_host(CPUARMState *env, uint32_t val) -{ -} - -static void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask) -{ -} - -#endif - -uint32_t vfp_get_fpcr(CPUARMState *env) -{ - uint32_t fpcr = env->vfp.fpcr - | (env->vfp.vec_len << 16) - | (env->vfp.vec_stride << 20); - - /* - * M-profile LTPSIZE is the same bits [18:16] as A-profile Len; whichever - * of the two is not applicable to this CPU will always be zero. - */ - fpcr |= env->v7m.ltpsize << 16; - - return fpcr; -} - -uint32_t vfp_get_fpsr(CPUARMState *env) -{ - uint32_t fpsr = env->vfp.fpsr; - uint32_t i; - - fpsr |= vfp_get_fpsr_from_host(env); - - i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; - fpsr |= i ? FPSR_QC : 0; - return fpsr; -} - -uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) -{ - return (vfp_get_fpcr(env) & FPSCR_FPCR_MASK) | - (vfp_get_fpsr(env) & FPSCR_FPSR_MASK); -} - -uint32_t vfp_get_fpscr(CPUARMState *env) -{ - return HELPER(vfp_get_fpscr)(env); -} - -void vfp_set_fpsr(CPUARMState *env, uint32_t val) -{ - ARMCPU *cpu = env_archcpu(env); - - vfp_set_fpsr_to_host(env, val); - - if (arm_feature(env, ARM_FEATURE_NEON) || - cpu_isar_feature(aa32_mve, cpu)) { - /* - * The bit we set within vfp.qc[] is arbitrary; the array as a - * whole being zero/non-zero is what counts. - */ - env->vfp.qc[0] = val & FPSR_QC; - env->vfp.qc[1] = 0; - env->vfp.qc[2] = 0; - env->vfp.qc[3] = 0; - } - - /* - * The only FPSR bits we keep in vfp.fpsr are NZCV: - * the exception flags IOC|DZC|OFC|UFC|IXC|IDC are stored in - * fp_status, and QC is in vfp.qc[]. Store the NZCV bits there, - * and zero any of the other FPSR bits. - */ - val &= FPSR_NZCV_MASK; - env->vfp.fpsr = val; -} - -static void vfp_set_fpcr_masked(CPUARMState *env, uint32_t val, uint32_t mask) -{ - /* - * We only set FPCR bits defined by mask, and leave the others alone. - * We assume the mask is sensible (e.g. doesn't try to set only - * part of a field) - */ - ARMCPU *cpu = env_archcpu(env); - - /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ - if (!cpu_isar_feature(any_fp16, cpu)) { - val &= ~FPCR_FZ16; - } - - vfp_set_fpcr_to_host(env, val, mask); - - if (mask & (FPCR_LEN_MASK | FPCR_STRIDE_MASK)) { - if (!arm_feature(env, ARM_FEATURE_M)) { - /* - * Short-vector length and stride; on M-profile these bits - * are used for different purposes. - * We can't make this conditional be "if MVFR0.FPShVec != 0", - * because in v7A no-short-vector-support cores still had to - * allow Stride/Len to be written with the only effect that - * some insns are required to UNDEF if the guest sets them. - */ - env->vfp.vec_len = extract32(val, 16, 3); - env->vfp.vec_stride = extract32(val, 20, 2); - } else if (cpu_isar_feature(aa32_mve, cpu)) { - env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT, - FPCR_LTPSIZE_LENGTH); - } - } - - /* - * We don't implement trapped exception handling, so the - * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) - * - * The FPCR bits we keep in vfp.fpcr are AHP, DN, FZ, RMode - * and FZ16. Len, Stride and LTPSIZE we just handled. Store those bits - * there, and zero any of the other FPCR bits and the RES0 and RAZ/WI - * bits. - */ - val &= FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK | FPCR_FZ16; - env->vfp.fpcr &= ~mask; - env->vfp.fpcr |= val; -} - -void vfp_set_fpcr(CPUARMState *env, uint32_t val) -{ - vfp_set_fpcr_masked(env, val, MAKE_64BIT_MASK(0, 32)); -} - -void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) -{ - vfp_set_fpcr_masked(env, val, FPSCR_FPCR_MASK); - vfp_set_fpsr(env, val & FPSCR_FPSR_MASK); -} - -void vfp_set_fpscr(CPUARMState *env, uint32_t val) -{ - HELPER(vfp_set_fpscr)(env, val); -} - -#ifdef CONFIG_TCG - -#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) - -#define VFP_BINOP(name) \ -dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return float16_ ## name(a, b, fpst); \ -} \ -float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return float32_ ## name(a, b, fpst); \ -} \ -float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return float64_ ## name(a, b, fpst); \ -} -VFP_BINOP(add) -VFP_BINOP(sub) -VFP_BINOP(mul) -VFP_BINOP(div) -VFP_BINOP(min) -VFP_BINOP(max) -VFP_BINOP(minnum) -VFP_BINOP(maxnum) -#undef VFP_BINOP - -dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env) -{ - return float16_sqrt(a, &env->vfp.fp_status_f16); -} - -float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) -{ - return float32_sqrt(a, &env->vfp.fp_status); -} - -float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) -{ - return float64_sqrt(a, &env->vfp.fp_status); -} - -static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp) -{ - uint32_t flags; - switch (cmp) { - case float_relation_equal: - flags = 0x6; - break; - case float_relation_less: - flags = 0x8; - break; - case float_relation_greater: - flags = 0x2; - break; - case float_relation_unordered: - flags = 0x3; - break; - default: - g_assert_not_reached(); - } - env->vfp.fpsr = deposit64(env->vfp.fpsr, 28, 4, flags); /* NZCV */ -} - -/* XXX: check quiet/signaling case */ -#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \ -void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ -{ \ - softfloat_to_vfp_compare(env, \ - FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \ -} \ -void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \ -{ \ - softfloat_to_vfp_compare(env, \ - FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \ -} -DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16) -DO_VFP_cmp(s, float32, float32, fp_status) -DO_VFP_cmp(d, float64, float64, fp_status) -#undef DO_VFP_cmp - -/* Integer to float and float to integer conversions */ - -#define CONV_ITOF(name, ftype, fsz, sign) \ -ftype HELPER(name)(uint32_t x, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ -} - -#define CONV_FTOI(name, ftype, fsz, sign, round) \ -sign##int32_t HELPER(name)(ftype x, void *fpstp) \ -{ \ - float_status *fpst = fpstp; \ - if (float##fsz##_is_any_nan(x)) { \ - float_raise(float_flag_invalid, fpst); \ - return 0; \ - } \ - return float##fsz##_to_##sign##int32##round(x, fpst); \ -} - -#define FLOAT_CONVS(name, p, ftype, fsz, sign) \ - CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ - CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ - CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) - -FLOAT_CONVS(si, h, uint32_t, 16, ) -FLOAT_CONVS(si, s, float32, 32, ) -FLOAT_CONVS(si, d, float64, 64, ) -FLOAT_CONVS(ui, h, uint32_t, 16, u) -FLOAT_CONVS(ui, s, float32, 32, u) -FLOAT_CONVS(ui, d, float64, 64, u) - -#undef CONV_ITOF -#undef CONV_FTOI -#undef FLOAT_CONVS - -/* floating point conversion */ -float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) -{ - return float32_to_float64(x, &env->vfp.fp_status); -} - -float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) -{ - return float64_to_float32(x, &env->vfp.fp_status); -} - -uint32_t HELPER(bfcvt)(float32 x, void *status) -{ - return float32_to_bfloat16(x, status); -} - -uint32_t HELPER(bfcvt_pair)(uint64_t pair, void *status) -{ - bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status); - bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status); - return deposit32(lo, 16, 16, hi); -} - -/* - * VFP3 fixed point conversion. The AArch32 versions of fix-to-float - * must always round-to-nearest; the AArch64 ones honour the FPSCR - * rounding mode. (For AArch32 Neon the standard-FPSCR is set to - * round-to-nearest so either helper will work.) AArch32 float-to-fix - * must round-to-zero. - */ -#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ -ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ - void *fpstp) \ -{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } - -#define VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \ - ftype HELPER(vfp_##name##to##p##_round_to_nearest)(uint##isz##_t x, \ - uint32_t shift, \ - void *fpstp) \ - { \ - ftype ret; \ - float_status *fpst = fpstp; \ - FloatRoundMode oldmode = fpst->float_rounding_mode; \ - fpst->float_rounding_mode = float_round_nearest_even; \ - ret = itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); \ - fpst->float_rounding_mode = oldmode; \ - return ret; \ - } - -#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \ -uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \ - void *fpst) \ -{ \ - if (unlikely(float##fsz##_is_any_nan(x))) { \ - float_raise(float_flag_invalid, fpst); \ - return 0; \ - } \ - return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ -} - -#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \ -VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ -VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ - float_round_to_zero, _round_to_zero) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ - get_float_rounding_mode(fpst), ) - -#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \ -VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \ -VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \ - get_float_rounding_mode(fpst), ) - -VFP_CONV_FIX(sh, d, 64, float64, 64, int16) -VFP_CONV_FIX(sl, d, 64, float64, 64, int32) -VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64) -VFP_CONV_FIX(uh, d, 64, float64, 64, uint16) -VFP_CONV_FIX(ul, d, 64, float64, 64, uint32) -VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64) -VFP_CONV_FIX(sh, s, 32, float32, 32, int16) -VFP_CONV_FIX(sl, s, 32, float32, 32, int32) -VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64) -VFP_CONV_FIX(uh, s, 32, float32, 32, uint16) -VFP_CONV_FIX(ul, s, 32, float32, 32, uint32) -VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64) -VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16) -VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32) -VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64) -VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16) -VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32) -VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64) - -#undef VFP_CONV_FIX -#undef VFP_CONV_FIX_FLOAT -#undef VFP_CONV_FLOAT_FIX_ROUND -#undef VFP_CONV_FIX_A64 - -/* Set the current fp rounding mode and return the old one. - * The argument is a softfloat float_round_ value. - */ -uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) -{ - float_status *fp_status = fpstp; - - uint32_t prev_rmode = get_float_rounding_mode(fp_status); - set_float_rounding_mode(rmode, fp_status); - - return prev_rmode; -} - -/* Half precision conversions. */ -float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) -{ - /* Squash FZ16 to 0 for the duration of conversion. In this case, - * it would affect flushing input denormals. - */ - float_status *fpst = fpstp; - bool save = get_flush_inputs_to_zero(fpst); - set_flush_inputs_to_zero(false, fpst); - float32 r = float16_to_float32(a, !ahp_mode, fpst); - set_flush_inputs_to_zero(save, fpst); - return r; -} - -uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) -{ - /* Squash FZ16 to 0 for the duration of conversion. In this case, - * it would affect flushing output denormals. - */ - float_status *fpst = fpstp; - bool save = get_flush_to_zero(fpst); - set_flush_to_zero(false, fpst); - float16 r = float32_to_float16(a, !ahp_mode, fpst); - set_flush_to_zero(save, fpst); - return r; -} - -float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) -{ - /* Squash FZ16 to 0 for the duration of conversion. In this case, - * it would affect flushing input denormals. - */ - float_status *fpst = fpstp; - bool save = get_flush_inputs_to_zero(fpst); - set_flush_inputs_to_zero(false, fpst); - float64 r = float16_to_float64(a, !ahp_mode, fpst); - set_flush_inputs_to_zero(save, fpst); - return r; -} - -uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) -{ - /* Squash FZ16 to 0 for the duration of conversion. In this case, - * it would affect flushing output denormals. - */ - float_status *fpst = fpstp; - bool save = get_flush_to_zero(fpst); - set_flush_to_zero(false, fpst); - float16 r = float64_to_float16(a, !ahp_mode, fpst); - set_flush_to_zero(save, fpst); - return r; -} - -/* NEON helpers. */ - -/* Constants 256 and 512 are used in some helpers; we avoid relying on - * int->float conversions at run-time. */ -#define float64_256 make_float64(0x4070000000000000LL) -#define float64_512 make_float64(0x4080000000000000LL) -#define float16_maxnorm make_float16(0x7bff) -#define float32_maxnorm make_float32(0x7f7fffff) -#define float64_maxnorm make_float64(0x7fefffffffffffffLL) - -/* Reciprocal functions - * - * The algorithm that must be used to calculate the estimate - * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate - */ - -/* See RecipEstimate() - * - * input is a 9 bit fixed point number - * input range 256 .. 511 for a number from 0.5 <= x < 1.0. - * result range 256 .. 511 for a number from 1.0 to 511/256. - */ - -static int recip_estimate(int input) -{ - int a, b, r; - assert(256 <= input && input < 512); - a = (input * 2) + 1; - b = (1 << 19) / a; - r = (b + 1) >> 1; - assert(256 <= r && r < 512); - return r; -} - -/* - * Common wrapper to call recip_estimate - * - * The parameters are exponent and 64 bit fraction (without implicit - * bit) where the binary point is nominally at bit 52. Returns a - * float64 which can then be rounded to the appropriate size by the - * callee. - */ - -static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) -{ - uint32_t scaled, estimate; - uint64_t result_frac; - int result_exp; - - /* Handle sub-normals */ - if (*exp == 0) { - if (extract64(frac, 51, 1) == 0) { - *exp = -1; - frac <<= 2; - } else { - frac <<= 1; - } - } - - /* scaled = UInt('1':fraction<51:44>) */ - scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); - estimate = recip_estimate(scaled); - - result_exp = exp_off - *exp; - result_frac = deposit64(0, 44, 8, estimate); - if (result_exp == 0) { - result_frac = deposit64(result_frac >> 1, 51, 1, 1); - } else if (result_exp == -1) { - result_frac = deposit64(result_frac >> 2, 50, 2, 1); - result_exp = 0; - } - - *exp = result_exp; - - return result_frac; -} - -static bool round_to_inf(float_status *fpst, bool sign_bit) -{ - switch (fpst->float_rounding_mode) { - case float_round_nearest_even: /* Round to Nearest */ - return true; - case float_round_up: /* Round to +Inf */ - return !sign_bit; - case float_round_down: /* Round to -Inf */ - return sign_bit; - case float_round_to_zero: /* Round to Zero */ - return false; - default: - g_assert_not_reached(); - } -} - -uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) -{ - float_status *fpst = fpstp; - float16 f16 = float16_squash_input_denormal(input, fpst); - uint32_t f16_val = float16_val(f16); - uint32_t f16_sign = float16_is_neg(f16); - int f16_exp = extract32(f16_val, 10, 5); - uint32_t f16_frac = extract32(f16_val, 0, 10); - uint64_t f64_frac; - - if (float16_is_any_nan(f16)) { - float16 nan = f16; - if (float16_is_signaling_nan(f16, fpst)) { - float_raise(float_flag_invalid, fpst); - if (!fpst->default_nan_mode) { - nan = float16_silence_nan(f16, fpst); - } - } - if (fpst->default_nan_mode) { - nan = float16_default_nan(fpst); - } - return nan; - } else if (float16_is_infinity(f16)) { - return float16_set_sign(float16_zero, float16_is_neg(f16)); - } else if (float16_is_zero(f16)) { - float_raise(float_flag_divbyzero, fpst); - return float16_set_sign(float16_infinity, float16_is_neg(f16)); - } else if (float16_abs(f16) < (1 << 8)) { - /* Abs(value) < 2.0^-16 */ - float_raise(float_flag_overflow | float_flag_inexact, fpst); - if (round_to_inf(fpst, f16_sign)) { - return float16_set_sign(float16_infinity, f16_sign); - } else { - return float16_set_sign(float16_maxnorm, f16_sign); - } - } else if (f16_exp >= 29 && fpst->flush_to_zero) { - float_raise(float_flag_underflow, fpst); - return float16_set_sign(float16_zero, float16_is_neg(f16)); - } - - f64_frac = call_recip_estimate(&f16_exp, 29, - ((uint64_t) f16_frac) << (52 - 10)); - - /* result = sign : result_exp<4:0> : fraction<51:42> */ - f16_val = deposit32(0, 15, 1, f16_sign); - f16_val = deposit32(f16_val, 10, 5, f16_exp); - f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); - return make_float16(f16_val); -} - -float32 HELPER(recpe_f32)(float32 input, void *fpstp) -{ - float_status *fpst = fpstp; - float32 f32 = float32_squash_input_denormal(input, fpst); - uint32_t f32_val = float32_val(f32); - bool f32_sign = float32_is_neg(f32); - int f32_exp = extract32(f32_val, 23, 8); - uint32_t f32_frac = extract32(f32_val, 0, 23); - uint64_t f64_frac; - - if (float32_is_any_nan(f32)) { - float32 nan = f32; - if (float32_is_signaling_nan(f32, fpst)) { - float_raise(float_flag_invalid, fpst); - if (!fpst->default_nan_mode) { - nan = float32_silence_nan(f32, fpst); - } - } - if (fpst->default_nan_mode) { - nan = float32_default_nan(fpst); - } - return nan; - } else if (float32_is_infinity(f32)) { - return float32_set_sign(float32_zero, float32_is_neg(f32)); - } else if (float32_is_zero(f32)) { - float_raise(float_flag_divbyzero, fpst); - return float32_set_sign(float32_infinity, float32_is_neg(f32)); - } else if (float32_abs(f32) < (1ULL << 21)) { - /* Abs(value) < 2.0^-128 */ - float_raise(float_flag_overflow | float_flag_inexact, fpst); - if (round_to_inf(fpst, f32_sign)) { - return float32_set_sign(float32_infinity, f32_sign); - } else { - return float32_set_sign(float32_maxnorm, f32_sign); - } - } else if (f32_exp >= 253 && fpst->flush_to_zero) { - float_raise(float_flag_underflow, fpst); - return float32_set_sign(float32_zero, float32_is_neg(f32)); - } - - f64_frac = call_recip_estimate(&f32_exp, 253, - ((uint64_t) f32_frac) << (52 - 23)); - - /* result = sign : result_exp<7:0> : fraction<51:29> */ - f32_val = deposit32(0, 31, 1, f32_sign); - f32_val = deposit32(f32_val, 23, 8, f32_exp); - f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); - return make_float32(f32_val); -} - -float64 HELPER(recpe_f64)(float64 input, void *fpstp) -{ - float_status *fpst = fpstp; - float64 f64 = float64_squash_input_denormal(input, fpst); - uint64_t f64_val = float64_val(f64); - bool f64_sign = float64_is_neg(f64); - int f64_exp = extract64(f64_val, 52, 11); - uint64_t f64_frac = extract64(f64_val, 0, 52); - - /* Deal with any special cases */ - if (float64_is_any_nan(f64)) { - float64 nan = f64; - if (float64_is_signaling_nan(f64, fpst)) { - float_raise(float_flag_invalid, fpst); - if (!fpst->default_nan_mode) { - nan = float64_silence_nan(f64, fpst); - } - } - if (fpst->default_nan_mode) { - nan = float64_default_nan(fpst); - } - return nan; - } else if (float64_is_infinity(f64)) { - return float64_set_sign(float64_zero, float64_is_neg(f64)); - } else if (float64_is_zero(f64)) { - float_raise(float_flag_divbyzero, fpst); - return float64_set_sign(float64_infinity, float64_is_neg(f64)); - } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { - /* Abs(value) < 2.0^-1024 */ - float_raise(float_flag_overflow | float_flag_inexact, fpst); - if (round_to_inf(fpst, f64_sign)) { - return float64_set_sign(float64_infinity, f64_sign); - } else { - return float64_set_sign(float64_maxnorm, f64_sign); - } - } else if (f64_exp >= 2045 && fpst->flush_to_zero) { - float_raise(float_flag_underflow, fpst); - return float64_set_sign(float64_zero, float64_is_neg(f64)); - } - - f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); - - /* result = sign : result_exp<10:0> : fraction<51:0>; */ - f64_val = deposit64(0, 63, 1, f64_sign); - f64_val = deposit64(f64_val, 52, 11, f64_exp); - f64_val = deposit64(f64_val, 0, 52, f64_frac); - return make_float64(f64_val); -} - -/* The algorithm that must be used to calculate the estimate - * is specified by the ARM ARM. - */ - -static int do_recip_sqrt_estimate(int a) -{ - int b, estimate; - - assert(128 <= a && a < 512); - if (a < 256) { - a = a * 2 + 1; - } else { - a = (a >> 1) << 1; - a = (a + 1) * 2; - } - b = 512; - while (a * (b + 1) * (b + 1) < (1 << 28)) { - b += 1; - } - estimate = (b + 1) / 2; - assert(256 <= estimate && estimate < 512); - - return estimate; -} - - -static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) -{ - int estimate; - uint32_t scaled; - - if (*exp == 0) { - while (extract64(frac, 51, 1) == 0) { - frac = frac << 1; - *exp -= 1; - } - frac = extract64(frac, 0, 51) << 1; - } - - if (*exp & 1) { - /* scaled = UInt('01':fraction<51:45>) */ - scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); - } else { - /* scaled = UInt('1':fraction<51:44>) */ - scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); - } - estimate = do_recip_sqrt_estimate(scaled); - - *exp = (exp_off - *exp) / 2; - return extract64(estimate, 0, 8) << 44; -} - -uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) -{ - float_status *s = fpstp; - float16 f16 = float16_squash_input_denormal(input, s); - uint16_t val = float16_val(f16); - bool f16_sign = float16_is_neg(f16); - int f16_exp = extract32(val, 10, 5); - uint16_t f16_frac = extract32(val, 0, 10); - uint64_t f64_frac; - - if (float16_is_any_nan(f16)) { - float16 nan = f16; - if (float16_is_signaling_nan(f16, s)) { - float_raise(float_flag_invalid, s); - if (!s->default_nan_mode) { - nan = float16_silence_nan(f16, fpstp); - } - } - if (s->default_nan_mode) { - nan = float16_default_nan(s); - } - return nan; - } else if (float16_is_zero(f16)) { - float_raise(float_flag_divbyzero, s); - return float16_set_sign(float16_infinity, f16_sign); - } else if (f16_sign) { - float_raise(float_flag_invalid, s); - return float16_default_nan(s); - } else if (float16_is_infinity(f16)) { - return float16_zero; - } - - /* Scale and normalize to a double-precision value between 0.25 and 1.0, - * preserving the parity of the exponent. */ - - f64_frac = ((uint64_t) f16_frac) << (52 - 10); - - f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); - - /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ - val = deposit32(0, 15, 1, f16_sign); - val = deposit32(val, 10, 5, f16_exp); - val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); - return make_float16(val); -} - -float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) -{ - float_status *s = fpstp; - float32 f32 = float32_squash_input_denormal(input, s); - uint32_t val = float32_val(f32); - uint32_t f32_sign = float32_is_neg(f32); - int f32_exp = extract32(val, 23, 8); - uint32_t f32_frac = extract32(val, 0, 23); - uint64_t f64_frac; - - if (float32_is_any_nan(f32)) { - float32 nan = f32; - if (float32_is_signaling_nan(f32, s)) { - float_raise(float_flag_invalid, s); - if (!s->default_nan_mode) { - nan = float32_silence_nan(f32, fpstp); - } - } - if (s->default_nan_mode) { - nan = float32_default_nan(s); - } - return nan; - } else if (float32_is_zero(f32)) { - float_raise(float_flag_divbyzero, s); - return float32_set_sign(float32_infinity, float32_is_neg(f32)); - } else if (float32_is_neg(f32)) { - float_raise(float_flag_invalid, s); - return float32_default_nan(s); - } else if (float32_is_infinity(f32)) { - return float32_zero; - } - - /* Scale and normalize to a double-precision value between 0.25 and 1.0, - * preserving the parity of the exponent. */ - - f64_frac = ((uint64_t) f32_frac) << 29; - - f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); - - /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ - val = deposit32(0, 31, 1, f32_sign); - val = deposit32(val, 23, 8, f32_exp); - val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); - return make_float32(val); -} - -float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) -{ - float_status *s = fpstp; - float64 f64 = float64_squash_input_denormal(input, s); - uint64_t val = float64_val(f64); - bool f64_sign = float64_is_neg(f64); - int f64_exp = extract64(val, 52, 11); - uint64_t f64_frac = extract64(val, 0, 52); - - if (float64_is_any_nan(f64)) { - float64 nan = f64; - if (float64_is_signaling_nan(f64, s)) { - float_raise(float_flag_invalid, s); - if (!s->default_nan_mode) { - nan = float64_silence_nan(f64, fpstp); - } - } - if (s->default_nan_mode) { - nan = float64_default_nan(s); - } - return nan; - } else if (float64_is_zero(f64)) { - float_raise(float_flag_divbyzero, s); - return float64_set_sign(float64_infinity, float64_is_neg(f64)); - } else if (float64_is_neg(f64)) { - float_raise(float_flag_invalid, s); - return float64_default_nan(s); - } else if (float64_is_infinity(f64)) { - return float64_zero; - } - - f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); - - /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ - val = deposit64(0, 61, 1, f64_sign); - val = deposit64(val, 52, 11, f64_exp); - val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); - return make_float64(val); -} - -uint32_t HELPER(recpe_u32)(uint32_t a) -{ - int input, estimate; - - if ((a & 0x80000000) == 0) { - return 0xffffffff; - } - - input = extract32(a, 23, 9); - estimate = recip_estimate(input); - - return deposit32(0, (32 - 9), 9, estimate); -} - -uint32_t HELPER(rsqrte_u32)(uint32_t a) -{ - int estimate; - - if ((a & 0xc0000000) == 0) { - return 0xffffffff; - } - - estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); - - return deposit32(0, 23, 9, estimate); -} - -/* VFPv4 fused multiply-accumulate */ -dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b, - dh_ctype_f16 c, void *fpstp) -{ - float_status *fpst = fpstp; - return float16_muladd(a, b, c, 0, fpst); -} - -float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) -{ - float_status *fpst = fpstp; - return float32_muladd(a, b, c, 0, fpst); -} - -float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) -{ - float_status *fpst = fpstp; - return float64_muladd(a, b, c, 0, fpst); -} - -/* ARMv8 round to integral */ -dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, void *fp_status) -{ - return float16_round_to_int(x, fp_status); -} - -float32 HELPER(rints_exact)(float32 x, void *fp_status) -{ - return float32_round_to_int(x, fp_status); -} - -float64 HELPER(rintd_exact)(float64 x, void *fp_status) -{ - return float64_round_to_int(x, fp_status); -} - -dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, void *fp_status) -{ - int old_flags = get_float_exception_flags(fp_status), new_flags; - float16 ret; - - ret = float16_round_to_int(x, fp_status); - - /* Suppress any inexact exceptions the conversion produced */ - if (!(old_flags & float_flag_inexact)) { - new_flags = get_float_exception_flags(fp_status); - set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); - } - - return ret; -} - -float32 HELPER(rints)(float32 x, void *fp_status) -{ - int old_flags = get_float_exception_flags(fp_status), new_flags; - float32 ret; - - ret = float32_round_to_int(x, fp_status); - - /* Suppress any inexact exceptions the conversion produced */ - if (!(old_flags & float_flag_inexact)) { - new_flags = get_float_exception_flags(fp_status); - set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); - } - - return ret; -} - -float64 HELPER(rintd)(float64 x, void *fp_status) -{ - int old_flags = get_float_exception_flags(fp_status), new_flags; - float64 ret; - - ret = float64_round_to_int(x, fp_status); - - new_flags = get_float_exception_flags(fp_status); - - /* Suppress any inexact exceptions the conversion produced */ - if (!(old_flags & float_flag_inexact)) { - new_flags = get_float_exception_flags(fp_status); - set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); - } - - return ret; -} - -/* Convert ARM rounding mode to softfloat */ -const FloatRoundMode arm_rmode_to_sf_map[] = { - [FPROUNDING_TIEEVEN] = float_round_nearest_even, - [FPROUNDING_POSINF] = float_round_up, - [FPROUNDING_NEGINF] = float_round_down, - [FPROUNDING_ZERO] = float_round_to_zero, - [FPROUNDING_TIEAWAY] = float_round_ties_away, - [FPROUNDING_ODD] = float_round_to_odd, -}; - -/* - * Implement float64 to int32_t conversion without saturation; - * the result is supplied modulo 2^32. - */ -uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus) -{ - float_status *status = vstatus; - uint32_t frac, e_old, e_new; - bool inexact; - - e_old = get_float_exception_flags(status); - set_float_exception_flags(0, status); - frac = float64_to_int32_modulo(value, float_round_to_zero, status); - e_new = get_float_exception_flags(status); - set_float_exception_flags(e_old | e_new, status); - - /* Normal inexact, denormal with flush-to-zero, or overflow or NaN */ - inexact = e_new & (float_flag_inexact | - float_flag_input_denormal | - float_flag_invalid); - - /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */ - inexact |= value == float64_chs(float64_zero); - - /* Pack the result and the env->ZF representation of Z together. */ - return deposit64(frac, 32, 32, inexact); -} - -uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env) -{ - uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status); - uint32_t result = pair; - uint32_t z = (pair >> 32) == 0; - - /* Store Z, clear NCV, in FPSCR.NZCV. */ - env->vfp.fpsr = (env->vfp.fpsr & ~FPSR_NZCV_MASK) | (z * FPSR_Z); - - return result; -} - -/* Round a float32 to an integer that fits in int32_t or int64_t. */ -static float32 frint_s(float32 f, float_status *fpst, int intsize) -{ - int old_flags = get_float_exception_flags(fpst); - uint32_t exp = extract32(f, 23, 8); - - if (unlikely(exp == 0xff)) { - /* NaN or Inf. */ - goto overflow; - } - - /* Round and re-extract the exponent. */ - f = float32_round_to_int(f, fpst); - exp = extract32(f, 23, 8); - - /* Validate the range of the result. */ - if (exp < 126 + intsize) { - /* abs(F) <= INT{N}_MAX */ - return f; - } - if (exp == 126 + intsize) { - uint32_t sign = extract32(f, 31, 1); - uint32_t frac = extract32(f, 0, 23); - if (sign && frac == 0) { - /* F == INT{N}_MIN */ - return f; - } - } - - overflow: - /* - * Raise Invalid and return INT{N}_MIN as a float. Revert any - * inexact exception float32_round_to_int may have raised. - */ - set_float_exception_flags(old_flags | float_flag_invalid, fpst); - return (0x100u + 126u + intsize) << 23; -} - -float32 HELPER(frint32_s)(float32 f, void *fpst) -{ - return frint_s(f, fpst, 32); -} - -float32 HELPER(frint64_s)(float32 f, void *fpst) -{ - return frint_s(f, fpst, 64); -} - -/* Round a float64 to an integer that fits in int32_t or int64_t. */ -static float64 frint_d(float64 f, float_status *fpst, int intsize) -{ - int old_flags = get_float_exception_flags(fpst); - uint32_t exp = extract64(f, 52, 11); - - if (unlikely(exp == 0x7ff)) { - /* NaN or Inf. */ - goto overflow; - } - - /* Round and re-extract the exponent. */ - f = float64_round_to_int(f, fpst); - exp = extract64(f, 52, 11); - - /* Validate the range of the result. */ - if (exp < 1022 + intsize) { - /* abs(F) <= INT{N}_MAX */ - return f; - } - if (exp == 1022 + intsize) { - uint64_t sign = extract64(f, 63, 1); - uint64_t frac = extract64(f, 0, 52); - if (sign && frac == 0) { - /* F == INT{N}_MIN */ - return f; - } - } - - overflow: - /* - * Raise Invalid and return INT{N}_MIN as a float. Revert any - * inexact exception float64_round_to_int may have raised. - */ - set_float_exception_flags(old_flags | float_flag_invalid, fpst); - return (uint64_t)(0x800 + 1022 + intsize) << 52; -} - -float64 HELPER(frint32_d)(float64 f, void *fpst) -{ - return frint_d(f, fpst, 32); -} - -float64 HELPER(frint64_d)(float64 f, void *fpst) -{ - return frint_d(f, fpst, 64); -} - -void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) -{ - uint32_t syndrome; - - switch (reg) { - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - case ARM_VFP_MVFR2: - if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { - return; - } - break; - case ARM_VFP_FPSID: - if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { - return; - } - break; - default: - g_assert_not_reached(); - } - - syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) - | ARM_EL_IL - | (1 << 24) | (0xe << 20) | (7 << 14) - | (reg << 10) | (rt << 5) | 1); - - raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); -} - -#endif diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h index 93c2f470d07..f74bfc25804 100644 --- a/target/avr/cpu-param.h +++ b/target/avr/cpu-param.h @@ -21,17 +21,10 @@ #ifndef AVR_CPU_PARAM_H #define AVR_CPU_PARAM_H -#define TARGET_LONG_BITS 32 -/* - * TARGET_PAGE_BITS cannot be more than 8 bits because - * 1. all IO registers occupy [0x0000 .. 0x00ff] address range, and they - * should be implemented as a device and not memory - * 2. SRAM starts at the address 0x0100 - */ -#define TARGET_PAGE_BITS 8 +#define TARGET_PAGE_BITS 10 #define TARGET_PHYS_ADDR_SPACE_BITS 24 #define TARGET_VIRT_ADDR_SPACE_BITS 24 -#define TCG_GUEST_DEFAULT_MO 0 +#define TARGET_INSN_START_EXTRA_WORDS 0 #endif diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 3132842d565..6995de6a12b 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -21,11 +21,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/qemu-print.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" +#include "system/address-spaces.h" #include "cpu.h" #include "disas/dis-asm.h" #include "tcg/debug-assert.h" #include "hw/qdev-properties.h" +#include "accel/tcg/cpu-ops.h" static void avr_cpu_set_pc(CPUState *cs, vaddr value) { @@ -52,6 +54,21 @@ static int avr_cpu_mmu_index(CPUState *cs, bool ifetch) return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX; } +static TCGTBCPUState avr_get_tb_cpu_state(CPUState *cs) +{ + CPUAVRState *env = cpu_env(cs); + uint32_t flags = 0; + + if (env->fullacc) { + flags |= TB_FLAGS_FULL_ACCESS; + } + if (env->skip) { + flags |= TB_FLAGS_SKIP; + } + + return (TCGTBCPUState){ .pc = env->pc_w * 2, .flags = flags }; +} + static void avr_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -101,6 +118,7 @@ static void avr_cpu_reset_hold(Object *obj, ResetType type) static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { + info->endian = BFD_ENDIAN_LITTLE; info->mach = bfd_arch_avr; info->print_insn = avr_print_insn; } @@ -108,6 +126,8 @@ static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) static void avr_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); + CPUAVRState *env = cpu_env(cs); + AVRCPU *cpu = env_archcpu(env); AVRCPUClass *mcc = AVR_CPU_GET_CLASS(dev); Error *local_err = NULL; @@ -120,6 +140,19 @@ static void avr_cpu_realizefn(DeviceState *dev, Error **errp) cpu_reset(cs); mcc->parent_realize(dev, errp); + + /* + * Two blocks in the low data space loop back into cpu registers. + */ + memory_region_init_io(&cpu->cpu_reg1, OBJECT(cpu), &avr_cpu_reg1, env, + "avr-cpu-reg1", 32); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA, &cpu->cpu_reg1); + + memory_region_init_io(&cpu->cpu_reg2, OBJECT(cpu), &avr_cpu_reg2, env, + "avr-cpu-reg2", 8); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA + 0x58, &cpu->cpu_reg2); } static void avr_cpu_set_int(void *opaque, int irq, int level) @@ -149,9 +182,8 @@ static void avr_cpu_initfn(Object *obj) sizeof(cpu->env.intsrc) * 8); } -static Property avr_cpu_properties[] = { +static const Property avr_cpu_properties[] = { DEFINE_PROP_UINT32("init-sp", AVRCPU, init_sp, 0), - DEFINE_PROP_END_OF_LIST() }; static ObjectClass *avr_cpu_class_by_name(const char *cpu_model) @@ -200,22 +232,33 @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps avr_sysemu_ops = { + .has_work = avr_cpu_has_work, .get_phys_page_debug = avr_cpu_get_phys_page_debug, }; -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps avr_tcg_ops = { + .guest_default_memory_order = 0, + .mttcg_supported = false, .initialize = avr_cpu_tcg_init, + .translate_code = avr_cpu_translate_code, + .get_tb_cpu_state = avr_get_tb_cpu_state, .synchronize_from_tb = avr_cpu_synchronize_from_tb, .restore_state_to_opc = avr_restore_state_to_opc, + .mmu_index = avr_cpu_mmu_index, .cpu_exec_interrupt = avr_cpu_exec_interrupt, .cpu_exec_halt = avr_cpu_has_work, + .cpu_exec_reset = cpu_reset, .tlb_fill = avr_cpu_tlb_fill, .do_interrupt = avr_cpu_do_interrupt, + /* + * TODO: code and data wrapping are different, but for the most part + * AVR only references bytes or aligned code fetches. But we use + * non-aligned MO_16 accesses for stack push/pop. + */ + .pointer_wrap = cpu_pointer_wrap_uint32, }; -static void avr_cpu_class_init(ObjectClass *oc, void *data) +static void avr_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -231,8 +274,6 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data) cc->class_by_name = avr_cpu_class_by_name; - cc->has_work = avr_cpu_has_work; - cc->mmu_index = avr_cpu_mmu_index; cc->dump_state = avr_cpu_dump_state; cc->set_pc = avr_cpu_set_pc; cc->get_pc = avr_cpu_get_pc; diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 47255351021..518e243d812 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -22,7 +22,10 @@ #define QEMU_AVR_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" +#include "system/memory.h" #ifdef CONFIG_USER_ONLY #error "AVR 8-bit does not support user mode" @@ -44,8 +47,16 @@ /* Number of CPU registers */ #define NUMBER_OF_CPU_REGISTERS 32 -/* Number of IO registers accessible by ld/st/in/out */ -#define NUMBER_OF_IO_REGISTERS 64 + +/* CPU registers mapped into i/o ports 0x38-0x3f. */ +#define REG_38_RAMPD 0 +#define REG_38_RAMPX 1 +#define REG_38_RAMPY 2 +#define REG_38_RAMPZ 3 +#define REG_38_EIDN 4 +#define REG_38_SPL 5 +#define REG_38_SPH 6 +#define REG_38_SREG 7 /* * Offsets of AVR memory regions in host memory space. @@ -60,8 +71,6 @@ #define OFFSET_CODE 0x00000000 /* CPU registers, IO registers, and SRAM */ #define OFFSET_DATA 0x00800000 -/* CPU registers specifically, these are mapped at the start of data */ -#define OFFSET_CPU_REGISTERS OFFSET_DATA /* * IO registers, including status register, stack pointer, and memory * mapped peripherals, mapped just after CPU registers @@ -144,6 +153,9 @@ struct ArchCPU { CPUAVRState env; + MemoryRegion cpu_reg1; + MemoryRegion cpu_reg2; + /* Initial value of stack pointer */ uint32_t init_sp; }; @@ -183,6 +195,8 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) } void avr_cpu_tcg_init(void); +void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); int cpu_avr_exec(CPUState *cpu); @@ -191,24 +205,6 @@ enum { TB_FLAGS_SKIP = 2, }; -static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - uint32_t flags = 0; - - *pc = env->pc_w * 2; - *cs_base = 0; - - if (env->fullacc) { - flags |= TB_FLAGS_FULL_ACCESS; - } - if (env->skip) { - flags |= TB_FLAGS_SKIP; - } - - *pflags = flags; -} - static inline int cpu_interrupts_enabled(CPUAVRState *env) { return env->sregI != 0; @@ -242,6 +238,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -#include "exec/cpu-all.h" +extern const MemoryRegionOps avr_cpu_reg1; +extern const MemoryRegionOps avr_cpu_reg2; #endif /* QEMU_AVR_CPU_H */ diff --git a/target/avr/disas.c b/target/avr/disas.c index b7689e8d7cd..d341030174f 100644 --- a/target/avr/disas.c +++ b/target/avr/disas.c @@ -68,28 +68,35 @@ static bool decode_insn(DisasContext *ctx, uint16_t insn); int avr_print_insn(bfd_vma addr, disassemble_info *info) { - DisasContext ctx; + DisasContext ctx = { info }; DisasContext *pctx = &ctx; bfd_byte buffer[4]; uint16_t insn; int status; - ctx.info = info; - - status = info->read_memory_func(addr, buffer, 4, info); + status = info->read_memory_func(addr, buffer, 2, info); if (status != 0) { info->memory_error_func(status, addr, info); return -1; } insn = bfd_getl16(buffer); - ctx.next_word = bfd_getl16(buffer + 2); - ctx.next_word_used = false; + + status = info->read_memory_func(addr + 2, buffer + 2, 2, info); + if (status == 0) { + ctx.next_word = bfd_getl16(buffer + 2); + } if (!decode_insn(&ctx, insn)) { output(".db", "0x%02x, 0x%02x", buffer[0], buffer[1]); } - return ctx.next_word_used ? 4 : 2; + if (!ctx.next_word_used) { + return 2; + } else if (status == 0) { + return 4; + } + info->memory_error_func(status, addr + 2, info); + return -1; } diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c index d6d3c1479b3..aea71282a58 100644 --- a/target/avr/gdbstub.c +++ b/target/avr/gdbstub.c @@ -69,13 +69,13 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* SP */ if (n == 33) { - env->sp = lduw_p(mem_buf); + env->sp = lduw_le_p(mem_buf); return 2; } /* PC */ if (n == 34) { - env->pc_w = ldl_p(mem_buf) / 2; + env->pc_w = ldl_le_p(mem_buf) / 2; return 4; } diff --git a/target/avr/helper.c b/target/avr/helper.c index 345708a1b39..b9cd6d5ef27 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -22,11 +22,11 @@ #include "qemu/log.h" #include "qemu/error-report.h" #include "cpu.h" -#include "hw/core/tcg-cpu-ops.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ops.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" -#include "exec/cpu_ldst.h" -#include "exec/address-spaces.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/helper-proto.h" bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) @@ -67,6 +67,11 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } +static void do_stb(CPUAVRState *env, uint32_t addr, uint8_t data, uintptr_t ra) +{ + cpu_stb_mmuidx_ra(env, addr, data, MMU_DATA_IDX, ra); +} + void avr_cpu_do_interrupt(CPUState *cs) { CPUAVRState *env = cpu_env(cs); @@ -83,14 +88,14 @@ void avr_cpu_do_interrupt(CPUState *cs) } if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) { - cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); - cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); - cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16); + do_stb(env, env->sp--, ret, 0); + do_stb(env, env->sp--, ret >> 8, 0); + do_stb(env, env->sp--, ret >> 16, 0); } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) { - cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); - cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8); + do_stb(env, env->sp--, ret, 0); + do_stb(env, env->sp--, ret >> 8, 0); } else { - cpu_stb_data(env, env->sp--, (ret & 0x0000ff)); + do_stb(env, env->sp--, ret, 0); } env->pc_w = base + vector * size; @@ -108,7 +113,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { - int prot, page_size = TARGET_PAGE_SIZE; + int prot; uint32_t paddr; address &= TARGET_PAGE_MASK; @@ -133,23 +138,9 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size, /* Access to memory. */ paddr = OFFSET_DATA + address; prot = PAGE_READ | PAGE_WRITE; - if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { - /* - * Access to CPU registers, exit and rebuilt this TB to use - * full access in case it touches specially handled registers - * like SREG or SP. For probing, set page_size = 1, in order - * to force tlb_fill to be called for the next access. - */ - if (probe) { - page_size = 1; - } else { - cpu_env(cs)->fullacc = 1; - cpu_loop_exit_restore(cs, retaddr); - } - } } - tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size); + tlb_set_page(cs, address, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); return true; } @@ -203,156 +194,129 @@ void helper_wdr(CPUAVRState *env) } /* - * This function implements IN instruction - * - * It does the following - * a. if an IO register belongs to CPU, its value is read and returned - * b. otherwise io address is translated to mem address and physical memory - * is read. - * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation - * + * The first 32 bytes of the data space are mapped to the cpu regs. + * We cannot write these from normal store operations because TCG + * does not expect global temps to be modified -- a global may be + * live in a host cpu register across the store. We can however + * read these, as TCG does make sure the global temps are saved + * in case the load operation traps. */ -target_ulong helper_inb(CPUAVRState *env, uint32_t port) + +static uint64_t avr_cpu_reg1_read(void *opaque, hwaddr addr, unsigned size) { - target_ulong data = 0; + CPUAVRState *env = opaque; - switch (port) { - case 0x38: /* RAMPD */ - data = 0xff & (env->rampD >> 16); - break; - case 0x39: /* RAMPX */ - data = 0xff & (env->rampX >> 16); - break; - case 0x3a: /* RAMPY */ - data = 0xff & (env->rampY >> 16); - break; - case 0x3b: /* RAMPZ */ - data = 0xff & (env->rampZ >> 16); - break; - case 0x3c: /* EIND */ - data = 0xff & (env->eind >> 16); - break; - case 0x3d: /* SPL */ - data = env->sp & 0x00ff; - break; - case 0x3e: /* SPH */ - data = env->sp >> 8; - break; - case 0x3f: /* SREG */ - data = cpu_get_sreg(env); - break; - default: - /* not a special register, pass to normal memory access */ - data = address_space_ldub(&address_space_memory, - OFFSET_IO_REGISTERS + port, - MEMTXATTRS_UNSPECIFIED, NULL); + assert(addr < 32); + return env->r[addr]; +} + +/* + * The range 0x38-0x3f of the i/o space is mapped to cpu regs. + * As above, we cannot write these from normal store operations. + */ + +static uint64_t avr_cpu_reg2_read(void *opaque, hwaddr addr, unsigned size) +{ + CPUAVRState *env = opaque; + + switch (addr) { + case REG_38_RAMPD: + return 0xff & (env->rampD >> 16); + case REG_38_RAMPX: + return 0xff & (env->rampX >> 16); + case REG_38_RAMPY: + return 0xff & (env->rampY >> 16); + case REG_38_RAMPZ: + return 0xff & (env->rampZ >> 16); + case REG_38_EIDN: + return 0xff & (env->eind >> 16); + case REG_38_SPL: + return env->sp & 0x00ff; + case REG_38_SPH: + return 0xff & (env->sp >> 8); + case REG_38_SREG: + return cpu_get_sreg(env); } + g_assert_not_reached(); +} - return data; +static void avr_cpu_trap_write(void *opaque, hwaddr addr, + uint64_t data64, unsigned size) +{ + CPUAVRState *env = opaque; + CPUState *cs = env_cpu(env); + + env->fullacc = true; + cpu_loop_exit_restore(cs, cs->mem_io_pc); } +const MemoryRegionOps avr_cpu_reg1 = { + .read = avr_cpu_reg1_read, + .write = avr_cpu_trap_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 1, +}; + +const MemoryRegionOps avr_cpu_reg2 = { + .read = avr_cpu_reg2_read, + .write = avr_cpu_trap_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 1, +}; + /* - * This function implements OUT instruction - * - * It does the following - * a. if an IO register belongs to CPU, its value is written into the register - * b. otherwise io address is translated to mem address and physical memory - * is written. - * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation - * + * this function implements ST instruction when there is a possibility to write + * into a CPU register */ -void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data) +void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr) { - data &= 0x000000ff; + env->fullacc = false; - switch (port) { - case 0x38: /* RAMPD */ + switch (addr) { + case 0 ... 31: + /* CPU registers */ + env->r[addr] = data; + break; + + case REG_38_RAMPD + 0x38 + NUMBER_OF_CPU_REGISTERS: if (avr_feature(env, AVR_FEATURE_RAMPD)) { - env->rampD = (data & 0xff) << 16; + env->rampD = data << 16; } break; - case 0x39: /* RAMPX */ + case REG_38_RAMPX + 0x38 + NUMBER_OF_CPU_REGISTERS: if (avr_feature(env, AVR_FEATURE_RAMPX)) { - env->rampX = (data & 0xff) << 16; + env->rampX = data << 16; } break; - case 0x3a: /* RAMPY */ + case REG_38_RAMPY + 0x38 + NUMBER_OF_CPU_REGISTERS: if (avr_feature(env, AVR_FEATURE_RAMPY)) { - env->rampY = (data & 0xff) << 16; + env->rampY = data << 16; } break; - case 0x3b: /* RAMPZ */ + case REG_38_RAMPZ + 0x38 + NUMBER_OF_CPU_REGISTERS: if (avr_feature(env, AVR_FEATURE_RAMPZ)) { - env->rampZ = (data & 0xff) << 16; + env->rampZ = data << 16; } break; - case 0x3c: /* EIDN */ - env->eind = (data & 0xff) << 16; + case REG_38_EIDN + 0x38 + NUMBER_OF_CPU_REGISTERS: + env->eind = data << 16; break; - case 0x3d: /* SPL */ - env->sp = (env->sp & 0xff00) | (data); + case REG_38_SPL + 0x38 + NUMBER_OF_CPU_REGISTERS: + env->sp = (env->sp & 0xff00) | data; break; - case 0x3e: /* SPH */ + case REG_38_SPH + 0x38 + NUMBER_OF_CPU_REGISTERS: if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) { env->sp = (env->sp & 0x00ff) | (data << 8); } break; - case 0x3f: /* SREG */ + case REG_38_SREG + 0x38 + NUMBER_OF_CPU_REGISTERS: cpu_set_sreg(env, data); break; - default: - /* not a special register, pass to normal memory access */ - address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port, - data, MEMTXATTRS_UNSPECIFIED, NULL); - } -} - -/* - * this function implements LD instruction when there is a possibility to read - * from a CPU register - */ -target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr) -{ - uint8_t data; - - env->fullacc = false; - - if (addr < NUMBER_OF_CPU_REGISTERS) { - /* CPU registers */ - data = env->r[addr]; - } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { - /* IO registers */ - data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS); - } else { - /* memory */ - data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr, - MEMTXATTRS_UNSPECIFIED, NULL); - } - return data; -} -/* - * this function implements ST instruction when there is a possibility to write - * into a CPU register - */ -void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr) -{ - env->fullacc = false; - - /* Following logic assumes this: */ - assert(OFFSET_CPU_REGISTERS == OFFSET_DATA); - assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS + - NUMBER_OF_CPU_REGISTERS); - - if (addr < NUMBER_OF_CPU_REGISTERS) { - /* CPU registers */ - env->r[addr] = data; - } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) { - /* IO registers */ - helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data); - } else { - /* memory */ - address_space_stb(&address_space_memory, OFFSET_DATA + addr, data, - MEMTXATTRS_UNSPECIFIED, NULL); + default: + do_stb(env, addr, data, GETPC()); + break; } } diff --git a/target/avr/helper.h b/target/avr/helper.h index 4d02e648fae..e8d13e925f4 100644 --- a/target/avr/helper.h +++ b/target/avr/helper.h @@ -23,7 +23,4 @@ DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_1(break, noreturn, env) DEF_HELPER_1(sleep, noreturn, env) DEF_HELPER_1(unsupported, noreturn, env) -DEF_HELPER_3(outb, void, env, i32, i32) -DEF_HELPER_2(inb, tl, env, i32) DEF_HELPER_3(fullwr, void, env, i32, i32) -DEF_HELPER_2(fullrd, tl, env, i32) diff --git a/target/avr/insn.decode b/target/avr/insn.decode index 482c23ad0cf..cc302249db7 100644 --- a/target/avr/insn.decode +++ b/target/avr/insn.decode @@ -118,11 +118,8 @@ BRBC 1111 01 ....... ... @op_bit_imm @io_rd_imm .... . .. ..... .... &rd_imm rd=%rd imm=%io_imm @ldst_d .. . . .. . rd:5 . ... &rd_imm imm=%ldst_d_imm -# The 16-bit immediate is completely in the next word. -# Fields cannot be defined with no bits, so we cannot play -# the same trick and append to a zero-bit value. -# Defer reading the immediate until trans_{LDS,STS}. -@ldst_s .... ... rd:5 .... imm=0 +%ldst_imm !function=next_word +@ldst_s .... ... rd:5 .... imm=%ldst_imm MOV 0010 11 . ..... .... @op_rd_rr MOVW 0000 0001 .... .... &rd_rr rd=%rd_d rr=%rr_d diff --git a/target/avr/translate.c b/target/avr/translate.c index 2d518921159..804b0b21dbd 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -22,12 +22,13 @@ #include "qemu/qemu-print.h" #include "tcg/tcg.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "tcg/tcg-op.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/log.h" #include "exec/translator.h" +#include "exec/target_page.h" #define HELPER_H "helper.h" #include "exec/helper-info.c.inc" @@ -193,6 +194,9 @@ static bool avr_have_feature(DisasContext *ctx, int feature) static bool decode_insn(DisasContext *ctx, uint16_t insn); #include "decode-insn.c.inc" +static void gen_inb(DisasContext *ctx, TCGv data, int port); +static void gen_outb(DisasContext *ctx, TCGv data, int port); + /* * Arithmetic Instructions */ @@ -1292,9 +1296,8 @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(data, tcg_env, port); + gen_inb(ctx, data, a->reg); tcg_gen_andi_tl(data, data, 1 << a->bit); ctx->skip_cond = TCG_COND_EQ; ctx->skip_var0 = data; @@ -1310,9 +1313,8 @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(data, tcg_env, port); + gen_inb(ctx, data, a->reg); tcg_gen_andi_tl(data, data, 1 << a->bit); ctx->skip_cond = TCG_COND_NE; ctx->skip_var0 = data; @@ -1501,11 +1503,18 @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr) static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr) { - if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) { - gen_helper_fullrd(data, tcg_env, addr); - } else { - tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); - } + tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB); +} + +static void gen_inb(DisasContext *ctx, TCGv data, int port) +{ + gen_data_load(ctx, data, tcg_constant_i32(port + NUMBER_OF_CPU_REGISTERS)); +} + +static void gen_outb(DisasContext *ctx, TCGv data, int port) +{ + gen_helper_fullwr(tcg_env, data, + tcg_constant_i32(port + NUMBER_OF_CPU_REGISTERS)); } /* @@ -1577,7 +1586,6 @@ static bool trans_LDS(DisasContext *ctx, arg_LDS *a) TCGv Rd = cpu_r[a->rd]; TCGv addr = tcg_temp_new_i32(); TCGv H = cpu_rampD; - a->imm = next_word(ctx); tcg_gen_mov_tl(addr, H); /* addr = H:M:L */ tcg_gen_shli_tl(addr, addr, 16); @@ -1782,7 +1790,6 @@ static bool trans_STS(DisasContext *ctx, arg_STS *a) TCGv Rd = cpu_r[a->rd]; TCGv addr = tcg_temp_new_i32(); TCGv H = cpu_rampD; - a->imm = next_word(ctx); tcg_gen_mov_tl(addr, H); /* addr = H:M:L */ tcg_gen_shli_tl(addr, addr, 16); @@ -2127,9 +2134,8 @@ static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a) static bool trans_IN(DisasContext *ctx, arg_IN *a) { TCGv Rd = cpu_r[a->rd]; - TCGv port = tcg_constant_i32(a->imm); - gen_helper_inb(Rd, tcg_env, port); + gen_inb(ctx, Rd, a->imm); return true; } @@ -2140,9 +2146,8 @@ static bool trans_IN(DisasContext *ctx, arg_IN *a) static bool trans_OUT(DisasContext *ctx, arg_OUT *a) { TCGv Rd = cpu_r[a->rd]; - TCGv port = tcg_constant_i32(a->imm); - gen_helper_outb(tcg_env, port, Rd); + gen_outb(ctx, Rd, a->imm); return true; } @@ -2408,11 +2413,10 @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) static bool trans_SBI(DisasContext *ctx, arg_SBI *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(data, tcg_env, port); + gen_inb(ctx, data, a->reg); tcg_gen_ori_tl(data, data, 1 << a->bit); - gen_helper_outb(tcg_env, port, data); + gen_outb(ctx, data, a->reg); return true; } @@ -2423,11 +2427,10 @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a) static bool trans_CBI(DisasContext *ctx, arg_CBI *a) { TCGv data = tcg_temp_new_i32(); - TCGv port = tcg_constant_i32(a->reg); - gen_helper_inb(data, tcg_env, port); + gen_inb(ctx, data, a->reg); tcg_gen_andi_tl(data, data, ~(1 << a->bit)); - gen_helper_outb(tcg_env, port, data); + gen_outb(ctx, data, a->reg); return true; } @@ -2598,7 +2601,7 @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a) * * - translate() * - canonicalize_skip() - * - gen_intermediate_code() + * - translate_code() * - restore_state_to_opc() * */ @@ -2794,8 +2797,8 @@ static const TranslatorOps avr_tr_ops = { .tb_stop = avr_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base); diff --git a/target/cris/Kconfig b/target/cris/Kconfig deleted file mode 100644 index 3fdc309fbbd..00000000000 --- a/target/cris/Kconfig +++ /dev/null @@ -1,2 +0,0 @@ -config CRIS - bool diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h deleted file mode 100644 index b31b742c0db..00000000000 --- a/target/cris/cpu-param.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * CRIS cpu parameters for qemu. - * - * Copyright (c) 2007 AXIS Communications AB - * SPDX-License-Identifier: LGPL-2.0+ - */ - -#ifndef CRIS_CPU_PARAM_H -#define CRIS_CPU_PARAM_H - -#define TARGET_LONG_BITS 32 -#define TARGET_PAGE_BITS 13 -#define TARGET_PHYS_ADDR_SPACE_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 - -#endif diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h deleted file mode 100644 index 741ca97a1b1..00000000000 --- a/target/cris/cpu-qom.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * QEMU CRIS CPU QOM header (target agnostic) - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ -#ifndef QEMU_CRIS_CPU_QOM_H -#define QEMU_CRIS_CPU_QOM_H - -#include "hw/core/cpu.h" - -#define TYPE_CRIS_CPU "cris-cpu" - -OBJECT_DECLARE_CPU_TYPE(CRISCPU, CRISCPUClass, CRIS_CPU) - -#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU -#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) - -#endif diff --git a/target/cris/cpu.c b/target/cris/cpu.c deleted file mode 100644 index ff31ca7fbc1..00000000000 --- a/target/cris/cpu.c +++ /dev/null @@ -1,323 +0,0 @@ -/* - * QEMU CRIS CPU - * - * Copyright (c) 2008 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * Copyright (c) 2012 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - * - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/qemu-print.h" -#include "cpu.h" -#include "mmu.h" - - -static void cris_cpu_set_pc(CPUState *cs, vaddr value) -{ - CRISCPU *cpu = CRIS_CPU(cs); - - cpu->env.pc = value; -} - -static vaddr cris_cpu_get_pc(CPUState *cs) -{ - CRISCPU *cpu = CRIS_CPU(cs); - - return cpu->env.pc; -} - -static void cris_restore_state_to_opc(CPUState *cs, - const TranslationBlock *tb, - const uint64_t *data) -{ - CRISCPU *cpu = CRIS_CPU(cs); - - cpu->env.pc = data[0]; -} - -static bool cris_cpu_has_work(CPUState *cs) -{ - return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); -} - -static int cris_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return !!(cpu_env(cs)->pregs[PR_CCS] & U_FLAG); -} - -static void cris_cpu_reset_hold(Object *obj, ResetType type) -{ - CPUState *cs = CPU(obj); - CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); - CPUCRISState *env = cpu_env(cs); - uint32_t vr; - - if (ccc->parent_phases.hold) { - ccc->parent_phases.hold(obj, type); - } - - vr = env->pregs[PR_VR]; - memset(env, 0, offsetof(CPUCRISState, end_reset_fields)); - env->pregs[PR_VR] = vr; - -#if defined(CONFIG_USER_ONLY) - /* start in user mode with interrupts enabled. */ - env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG; -#else - cris_mmu_init(env); - env->pregs[PR_CCS] = 0; -#endif -} - -static ObjectClass *cris_cpu_class_by_name(const char *cpu_model) -{ - ObjectClass *oc; - char *typename; - -#if defined(CONFIG_USER_ONLY) - if (strcasecmp(cpu_model, "any") == 0) { - return object_class_by_name(CRIS_CPU_TYPE_NAME("crisv32")); - } -#endif - - typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model); - oc = object_class_by_name(typename); - g_free(typename); - - return oc; -} - -static void cris_cpu_realizefn(DeviceState *dev, Error **errp) -{ - CPUState *cs = CPU(dev); - CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(dev); - Error *local_err = NULL; - - cpu_exec_realizefn(cs, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } - - cpu_reset(cs); - qemu_init_vcpu(cs); - - ccc->parent_realize(dev, errp); -} - -#ifndef CONFIG_USER_ONLY -static void cris_cpu_set_irq(void *opaque, int irq, int level) -{ - CRISCPU *cpu = opaque; - CPUState *cs = CPU(cpu); - int type = irq == CRIS_CPU_IRQ ? CPU_INTERRUPT_HARD : CPU_INTERRUPT_NMI; - - if (irq == CRIS_CPU_IRQ) { - /* - * The PIC passes us the vector for the IRQ as the value it sends - * over the qemu_irq line - */ - cpu->env.interrupt_vector = level; - } - - if (level) { - cpu_interrupt(cs, type); - } else { - cpu_reset_interrupt(cs, type); - } -} -#endif - -static void cris_disas_set_info(CPUState *cpu, disassemble_info *info) -{ - if (cpu_env(cpu)->pregs[PR_VR] != 32) { - info->mach = bfd_mach_cris_v0_v10; - info->print_insn = print_insn_crisv10; - } else { - info->mach = bfd_mach_cris_v32; - info->print_insn = print_insn_crisv32; - } -} - -static void cris_cpu_initfn(Object *obj) -{ - CRISCPU *cpu = CRIS_CPU(obj); - CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); - CPUCRISState *env = &cpu->env; - - env->pregs[PR_VR] = ccc->vr; - -#ifndef CONFIG_USER_ONLY - /* IRQ and NMI lines. */ - qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2); -#endif -} - -#ifndef CONFIG_USER_ONLY -#include "hw/core/sysemu-cpu-ops.h" - -static const struct SysemuCPUOps cris_sysemu_ops = { - .get_phys_page_debug = cris_cpu_get_phys_page_debug, -}; -#endif - -#include "hw/core/tcg-cpu-ops.h" - -static const TCGCPUOps crisv10_tcg_ops = { - .initialize = cris_initialize_crisv10_tcg, - .restore_state_to_opc = cris_restore_state_to_opc, - -#ifndef CONFIG_USER_ONLY - .tlb_fill = cris_cpu_tlb_fill, - .cpu_exec_interrupt = cris_cpu_exec_interrupt, - .cpu_exec_halt = cris_cpu_has_work, - .do_interrupt = crisv10_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ -}; - -static const TCGCPUOps crisv32_tcg_ops = { - .initialize = cris_initialize_tcg, - .restore_state_to_opc = cris_restore_state_to_opc, - -#ifndef CONFIG_USER_ONLY - .tlb_fill = cris_cpu_tlb_fill, - .cpu_exec_interrupt = cris_cpu_exec_interrupt, - .cpu_exec_halt = cris_cpu_has_work, - .do_interrupt = cris_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ -}; - -static void crisv8_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 8; - cc->gdb_read_register = crisv10_cpu_gdb_read_register; - cc->tcg_ops = &crisv10_tcg_ops; -} - -static void crisv9_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 9; - cc->gdb_read_register = crisv10_cpu_gdb_read_register; - cc->tcg_ops = &crisv10_tcg_ops; -} - -static void crisv10_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 10; - cc->gdb_read_register = crisv10_cpu_gdb_read_register; - cc->tcg_ops = &crisv10_tcg_ops; -} - -static void crisv11_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 11; - cc->gdb_read_register = crisv10_cpu_gdb_read_register; - cc->tcg_ops = &crisv10_tcg_ops; -} - -static void crisv17_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 17; - cc->gdb_read_register = crisv10_cpu_gdb_read_register; - cc->tcg_ops = &crisv10_tcg_ops; -} - -static void crisv32_cpu_class_init(ObjectClass *oc, void *data) -{ - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - - ccc->vr = 32; - cc->tcg_ops = &crisv32_tcg_ops; -} - -static void cris_cpu_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - CPUClass *cc = CPU_CLASS(oc); - CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); - ResettableClass *rc = RESETTABLE_CLASS(oc); - - device_class_set_parent_realize(dc, cris_cpu_realizefn, - &ccc->parent_realize); - - resettable_class_set_parent_phases(rc, NULL, cris_cpu_reset_hold, NULL, - &ccc->parent_phases); - - cc->class_by_name = cris_cpu_class_by_name; - cc->has_work = cris_cpu_has_work; - cc->mmu_index = cris_cpu_mmu_index; - cc->dump_state = cris_cpu_dump_state; - cc->set_pc = cris_cpu_set_pc; - cc->get_pc = cris_cpu_get_pc; - cc->gdb_read_register = cris_cpu_gdb_read_register; - cc->gdb_write_register = cris_cpu_gdb_write_register; -#ifndef CONFIG_USER_ONLY - dc->vmsd = &vmstate_cris_cpu; - cc->sysemu_ops = &cris_sysemu_ops; -#endif - - cc->gdb_num_core_regs = 49; - cc->gdb_stop_before_watchpoint = true; - - cc->disas_set_info = cris_disas_set_info; -} - -#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \ - { \ - .parent = TYPE_CRIS_CPU, \ - .class_init = initfn, \ - .name = CRIS_CPU_TYPE_NAME(cpu_model), \ - } - -static const TypeInfo cris_cpu_model_type_infos[] = { - { - .name = TYPE_CRIS_CPU, - .parent = TYPE_CPU, - .instance_size = sizeof(CRISCPU), - .instance_align = __alignof(CRISCPU), - .instance_init = cris_cpu_initfn, - .abstract = true, - .class_size = sizeof(CRISCPUClass), - .class_init = cris_cpu_class_init, - }, - DEFINE_CRIS_CPU_TYPE("crisv8", crisv8_cpu_class_init), - DEFINE_CRIS_CPU_TYPE("crisv9", crisv9_cpu_class_init), - DEFINE_CRIS_CPU_TYPE("crisv10", crisv10_cpu_class_init), - DEFINE_CRIS_CPU_TYPE("crisv11", crisv11_cpu_class_init), - DEFINE_CRIS_CPU_TYPE("crisv17", crisv17_cpu_class_init), - DEFINE_CRIS_CPU_TYPE("crisv32", crisv32_cpu_class_init), -}; - -DEFINE_TYPES(cris_cpu_model_type_infos) diff --git a/target/cris/cpu.h b/target/cris/cpu.h deleted file mode 100644 index 3904e5448c6..00000000000 --- a/target/cris/cpu.h +++ /dev/null @@ -1,286 +0,0 @@ -/* - * CRIS virtual CPU header - * - * Copyright (c) 2007 AXIS Communications AB - * Written by Edgar E. Iglesias - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#ifndef CRIS_CPU_H -#define CRIS_CPU_H - -#include "cpu-qom.h" -#include "exec/cpu-defs.h" - -#define EXCP_NMI 1 -#define EXCP_GURU 2 -#define EXCP_BUSFAULT 3 -#define EXCP_IRQ 4 -#define EXCP_BREAK 5 - -/* CRIS-specific interrupt pending bits. */ -#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 - -/* CRUS CPU device objects interrupt lines. */ -/* PIC passes the vector for the IRQ as the value of it sends over qemu_irq */ -#define CRIS_CPU_IRQ 0 -#define CRIS_CPU_NMI 1 - -/* Register aliases. R0 - R15 */ -#define R_FP 8 -#define R_SP 14 -#define R_ACR 15 - -/* Support regs, P0 - P15 */ -#define PR_BZ 0 -#define PR_VR 1 -#define PR_PID 2 -#define PR_SRS 3 -#define PR_WZ 4 -#define PR_EXS 5 -#define PR_EDA 6 -#define PR_PREFIX 6 /* On CRISv10 P6 is reserved, we use it as prefix. */ -#define PR_MOF 7 -#define PR_DZ 8 -#define PR_EBP 9 -#define PR_ERP 10 -#define PR_SRP 11 -#define PR_NRP 12 -#define PR_CCS 13 -#define PR_USP 14 -#define PRV10_BRP 14 -#define PR_SPC 15 - -/* CPU flags. */ -#define Q_FLAG 0x80000000 -#define M_FLAG_V32 0x40000000 -#define PFIX_FLAG 0x800 /* CRISv10 Only. */ -#define F_FLAG_V10 0x400 -#define P_FLAG_V10 0x200 -#define S_FLAG 0x200 -#define R_FLAG 0x100 -#define P_FLAG 0x80 -#define M_FLAG_V10 0x80 -#define U_FLAG 0x40 -#define I_FLAG 0x20 -#define X_FLAG 0x10 -#define N_FLAG 0x08 -#define Z_FLAG 0x04 -#define V_FLAG 0x02 -#define C_FLAG 0x01 -#define ALU_FLAGS 0x1F - -/* Condition codes. */ -#define CC_CC 0 -#define CC_CS 1 -#define CC_NE 2 -#define CC_EQ 3 -#define CC_VC 4 -#define CC_VS 5 -#define CC_PL 6 -#define CC_MI 7 -#define CC_LS 8 -#define CC_HI 9 -#define CC_GE 10 -#define CC_LT 11 -#define CC_GT 12 -#define CC_LE 13 -#define CC_A 14 -#define CC_P 15 - -typedef struct { - uint32_t hi; - uint32_t lo; -} TLBSet; - -typedef struct CPUArchState { - uint32_t regs[16]; - /* P0 - P15 are referred to as special registers in the docs. */ - uint32_t pregs[16]; - - /* Pseudo register for the PC. Not directly accessible on CRIS. */ - uint32_t pc; - - /* Pseudo register for the kernel stack. */ - uint32_t ksp; - - /* Branch. */ - int dslot; - int btaken; - uint32_t btarget; - - /* Condition flag tracking. */ - uint32_t cc_op; - uint32_t cc_mask; - uint32_t cc_dest; - uint32_t cc_src; - uint32_t cc_result; - /* size of the operation, 1 = byte, 2 = word, 4 = dword. */ - int cc_size; - /* X flag at the time of cc snapshot. */ - int cc_x; - - /* CRIS has certain insns that lockout interrupts. */ - int locked_irq; - int interrupt_vector; - int fault_vector; - int trap_vector; - - /* FIXME: add a check in the translator to avoid writing to support - register sets beyond the 4th. The ISA allows up to 256! but in - practice there is no core that implements more than 4. - - Support function registers are used to control units close to the - core. Accesses do not pass down the normal hierarchy. - */ - uint32_t sregs[4][16]; - - /* Linear feedback shift reg in the mmu. Used to provide pseudo - randomness for the 'hint' the mmu gives to sw for choosing valid - sets on TLB refills. */ - uint32_t mmu_rand_lfsr; - - /* - * We just store the stores to the tlbset here for later evaluation - * when the hw needs access to them. - * - * One for I and another for D. - */ - TLBSet tlbsets[2][4][16]; - - /* Fields up to this point are cleared by a CPU reset */ - struct {} end_reset_fields; - - /* Members from load_info on are preserved across resets. */ - void *load_info; -} CPUCRISState; - -/** - * CRISCPU: - * @env: #CPUCRISState - * - * A CRIS CPU. - */ -struct ArchCPU { - CPUState parent_obj; - - CPUCRISState env; -}; - -/** - * CRISCPUClass: - * @parent_realize: The parent class' realize handler. - * @parent_phases: The parent class' reset phase handlers. - * @vr: Version Register value. - * - * A CRIS CPU model. - */ -struct CRISCPUClass { - CPUClass parent_class; - - DeviceRealize parent_realize; - ResettablePhases parent_phases; - - uint32_t vr; -}; - -#ifndef CONFIG_USER_ONLY -extern const VMStateDescription vmstate_cris_cpu; - -void cris_cpu_do_interrupt(CPUState *cpu); -void crisv10_cpu_do_interrupt(CPUState *cpu); -bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req); - -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -#endif - -void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags); - -int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); -int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); -int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -void cris_initialize_tcg(void); -void cris_initialize_crisv10_tcg(void); - -/* Instead of computing the condition codes after each CRIS instruction, - * QEMU just stores one operand (called CC_SRC), the result - * (called CC_DEST) and the type of operation (called CC_OP). When the - * condition codes are needed, the condition codes can be calculated - * using this information. Condition codes are not generated if they - * are only needed for conditional branches. - */ -enum { - CC_OP_DYNAMIC, /* Use env->cc_op */ - CC_OP_FLAGS, - CC_OP_CMP, - CC_OP_MOVE, - CC_OP_ADD, - CC_OP_ADDC, - CC_OP_MCP, - CC_OP_ADDU, - CC_OP_SUB, - CC_OP_SUBU, - CC_OP_NEG, - CC_OP_BTST, - CC_OP_MULS, - CC_OP_MULU, - CC_OP_DSTEP, - CC_OP_MSTEP, - CC_OP_BOUND, - - CC_OP_OR, - CC_OP_AND, - CC_OP_XOR, - CC_OP_LSL, - CC_OP_LSR, - CC_OP_ASR, - CC_OP_LZ -}; - -/* CRIS uses 8k pages. */ -#define MMAP_SHIFT TARGET_PAGE_BITS - -#define CPU_RESOLVING_TYPE TYPE_CRIS_CPU - -/* MMU modes definitions */ -#define MMU_USER_IDX 1 - -/* Support function regs. */ -#define SFR_RW_GC_CFG 0][0 -#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0 -#define SFR_RW_MM_KBASE_LO env->pregs[PR_SRS]][1 -#define SFR_RW_MM_KBASE_HI env->pregs[PR_SRS]][2 -#define SFR_R_MM_CAUSE env->pregs[PR_SRS]][3 -#define SFR_RW_MM_TLB_SEL env->pregs[PR_SRS]][4 -#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5 -#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6 - -#include "exec/cpu-all.h" - -static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = env->dslot | - (env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG - | X_FLAG | PFIX_FLAG)); -} - -#endif diff --git a/target/cris/crisv10-decode.h b/target/cris/crisv10-decode.h deleted file mode 100644 index 9c531f36b42..00000000000 --- a/target/cris/crisv10-decode.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * CRISv10 insn decoding macros. - * - * Copyright (c) 2010 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#ifndef TARGET_CRIS_CRISV10_DECODE_H -#define TARGET_CRIS_CRISV10_DECODE_H - -#define CRISV10_MODE_QIMMEDIATE 0 -#define CRISV10_MODE_REG 1 -#define CRISV10_MODE_INDIRECT 2 -#define CRISV10_MODE_AUTOINC 3 - -/* Quick Immediate. */ -#define CRISV10_QIMM_BCC_R0 0 -#define CRISV10_QIMM_BCC_R1 1 -#define CRISV10_QIMM_BCC_R2 2 -#define CRISV10_QIMM_BCC_R3 3 - -#define CRISV10_QIMM_BDAP_R0 4 -#define CRISV10_QIMM_BDAP_R1 5 -#define CRISV10_QIMM_BDAP_R2 6 -#define CRISV10_QIMM_BDAP_R3 7 - -#define CRISV10_QIMM_ADDQ 8 -#define CRISV10_QIMM_MOVEQ 9 -#define CRISV10_QIMM_SUBQ 10 -#define CRISV10_QIMM_CMPQ 11 -#define CRISV10_QIMM_ANDQ 12 -#define CRISV10_QIMM_ORQ 13 -#define CRISV10_QIMM_ASHQ 14 -#define CRISV10_QIMM_LSHQ 15 - - -#define CRISV10_REG_ADDX 0 -#define CRISV10_REG_MOVX 1 -#define CRISV10_REG_SUBX 2 -#define CRISV10_REG_LSL 3 -#define CRISV10_REG_ADDI 4 -#define CRISV10_REG_BIAP 5 -#define CRISV10_REG_NEG 6 -#define CRISV10_REG_BOUND 7 -#define CRISV10_REG_ADD 8 -#define CRISV10_REG_MOVE_R 9 -#define CRISV10_REG_MOVE_SPR_R 9 -#define CRISV10_REG_MOVE_R_SPR 8 -#define CRISV10_REG_SUB 10 -#define CRISV10_REG_CMP 11 -#define CRISV10_REG_AND 12 -#define CRISV10_REG_OR 13 -#define CRISV10_REG_ASR 14 -#define CRISV10_REG_LSR 15 - -#define CRISV10_REG_BTST 3 -#define CRISV10_REG_SCC 4 -#define CRISV10_REG_SETF 6 -#define CRISV10_REG_CLEARF 7 -#define CRISV10_REG_BIAP 5 -#define CRISV10_REG_ABS 10 -#define CRISV10_REG_DSTEP 11 -#define CRISV10_REG_LZ 12 -#define CRISV10_REG_NOT 13 -#define CRISV10_REG_SWAP 13 -#define CRISV10_REG_XOR 14 -#define CRISV10_REG_MSTEP 15 - -/* Indirect, var size. */ -#define CRISV10_IND_TEST 14 -#define CRISV10_IND_MUL 4 -#define CRISV10_IND_BDAP_M 5 -#define CRISV10_IND_ADD 8 -#define CRISV10_IND_MOVE_M_R 9 - - -/* indirect fixed size. */ -#define CRISV10_IND_ADDX 0 -#define CRISV10_IND_MOVX 1 -#define CRISV10_IND_SUBX 2 -#define CRISV10_IND_CMPX 3 -#define CRISV10_IND_JUMP_M 4 -#define CRISV10_IND_DIP 5 -#define CRISV10_IND_JUMP_R 6 -#define CRISV17_IND_ADDC 6 -#define CRISV10_IND_BOUND 7 -#define CRISV10_IND_BCC_M 7 -#define CRISV10_IND_MOVE_M_SPR 8 -#define CRISV10_IND_MOVE_SPR_M 9 -#define CRISV10_IND_SUB 10 -#define CRISV10_IND_CMP 11 -#define CRISV10_IND_AND 12 -#define CRISV10_IND_OR 13 -#define CRISV10_IND_MOVE_R_M 15 - -#define CRISV10_IND_MOVEM_M_R 14 -#define CRISV10_IND_MOVEM_R_M 15 - -#endif diff --git a/target/cris/crisv32-decode.h b/target/cris/crisv32-decode.h deleted file mode 100644 index fa0a7f0d63a..00000000000 --- a/target/cris/crisv32-decode.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * CRIS insn decoding macros. - * - * Copyright (c) 2007 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#ifndef CRISV32_DECODE_H -#define CRISV32_DECODE_H - -/* Convenient binary macros. */ -#define HEX__(n) 0x##n##LU -#define B8__(x) ((x&0x0000000FLU)?1:0) \ - + ((x&0x000000F0LU)?2:0) \ - + ((x&0x00000F00LU)?4:0) \ - + ((x&0x0000F000LU)?8:0) \ - + ((x&0x000F0000LU)?16:0) \ - + ((x&0x00F00000LU)?32:0) \ - + ((x&0x0F000000LU)?64:0) \ - + ((x&0xF0000000LU)?128:0) -#define B8(d) ((unsigned char)B8__(HEX__(d))) - -/* Quick imm. */ -#define DEC_BCCQ {B8(00000000), B8(11110000)} -#define DEC_ADDOQ {B8(00010000), B8(11110000)} -#define DEC_ADDQ {B8(00100000), B8(11111100)} -#define DEC_MOVEQ {B8(00100100), B8(11111100)} -#define DEC_SUBQ {B8(00101000), B8(11111100)} -#define DEC_CMPQ {B8(00101100), B8(11111100)} -#define DEC_ANDQ {B8(00110000), B8(11111100)} -#define DEC_ORQ {B8(00110100), B8(11111100)} -#define DEC_BTSTQ {B8(00111000), B8(11111110)} -#define DEC_ASRQ {B8(00111010), B8(11111110)} -#define DEC_LSLQ {B8(00111100), B8(11111110)} -#define DEC_LSRQ {B8(00111110), B8(11111110)} - -/* Register. */ -#define DEC_MOVU_R {B8(01000100), B8(11111110)} -#define DEC_MOVU_R {B8(01000100), B8(11111110)} -#define DEC_MOVS_R {B8(01000110), B8(11111110)} -#define DEC_MOVE_R {B8(01100100), B8(11111100)} -#define DEC_MOVE_RP {B8(01100011), B8(11111111)} -#define DEC_MOVE_PR {B8(01100111), B8(11111111)} -#define DEC_DSTEP_R {B8(01101111), B8(11111111)} -#define DEC_MOVE_RS {B8(10110111), B8(11111111)} -#define DEC_MOVE_SR {B8(11110111), B8(11111111)} -#define DEC_ADDU_R {B8(01000000), B8(11111110)} -#define DEC_ADDS_R {B8(01000010), B8(11111110)} -#define DEC_ADD_R {B8(01100000), B8(11111100)} -#define DEC_ADDI_R {B8(01010000), B8(11111100)} -#define DEC_MULS_R {B8(11010000), B8(11111100)} -#define DEC_MULU_R {B8(10010000), B8(11111100)} -#define DEC_ADDI_ACR {B8(01010100), B8(11111100)} -#define DEC_NEG_R {B8(01011000), B8(11111100)} -#define DEC_BOUND_R {B8(01011100), B8(11111100)} -#define DEC_SUBU_R {B8(01001000), B8(11111110)} -#define DEC_SUBS_R {B8(01001010), B8(11111110)} -#define DEC_SUB_R {B8(01101000), B8(11111100)} -#define DEC_CMP_R {B8(01101100), B8(11111100)} -#define DEC_AND_R {B8(01110000), B8(11111100)} -#define DEC_ABS_R {B8(01101011), B8(11111111)} -#define DEC_LZ_R {B8(01110011), B8(11111111)} -#define DEC_MCP_R {B8(01111111), B8(11111111)} -#define DEC_SWAP_R {B8(01110111), B8(11111111)} -#define DEC_XOR_R {B8(01111011), B8(11111111)} -#define DEC_LSL_R {B8(01001100), B8(11111100)} -#define DEC_LSR_R {B8(01111100), B8(11111100)} -#define DEC_ASR_R {B8(01111000), B8(11111100)} -#define DEC_OR_R {B8(01110100), B8(11111100)} -#define DEC_BTST_R {B8(01001111), B8(11111111)} - -/* Fixed. */ -#define DEC_SETF {B8(01011011), B8(11111111)} -#define DEC_CLEARF {B8(01011111), B8(11111111)} - -/* Memory. */ -#define DEC_ADDU_M {B8(10000000), B8(10111110)} -#define DEC_ADDS_M {B8(10000010), B8(10111110)} -#define DEC_MOVU_M {B8(10000100), B8(10111110)} -#define DEC_MOVS_M {B8(10000110), B8(10111110)} -#define DEC_SUBU_M {B8(10001000), B8(10111110)} -#define DEC_SUBS_M {B8(10001010), B8(10111110)} -#define DEC_CMPU_M {B8(10001100), B8(10111110)} -#define DEC_CMPS_M {B8(10001110), B8(10111110)} -#define DEC_ADDO_M {B8(10010100), B8(10111100)} -#define DEC_BOUND_M {B8(10011100), B8(10111100)} -#define DEC_ADD_M {B8(10100000), B8(10111100)} -#define DEC_MOVE_MR {B8(10100100), B8(10111100)} -#define DEC_SUB_M {B8(10101000), B8(10111100)} -#define DEC_CMP_M {B8(10101100), B8(10111100)} -#define DEC_AND_M {B8(10110000), B8(10111100)} -#define DEC_OR_M {B8(10110100), B8(10111100)} -#define DEC_TEST_M {B8(10111000), B8(10111100)} -#define DEC_MOVE_RM {B8(10111100), B8(10111100)} - -#define DEC_ADDC_R {B8(01010111), B8(11111111)} -#define DEC_ADDC_MR {B8(10011010), B8(10111111)} -#define DEC_LAPCQ {B8(10010111), B8(11111111)} -#define DEC_LAPC_IM {B8(11010111), B8(11111111)} - -#define DEC_MOVE_MP {B8(10100011), B8(10111111)} -#define DEC_MOVE_PM {B8(10100111), B8(10111111)} - -#define DEC_SCC_R {B8(01010011), B8(11111111)} -#define DEC_RFE_ETC {B8(10010011), B8(11111111)} -#define DEC_JUMP_P {B8(10011111), B8(11111111)} -#define DEC_BCC_IM {B8(11011111), B8(11111111)} -#define DEC_JAS_R {B8(10011011), B8(11111111)} -#define DEC_JASC_R {B8(10110011), B8(11111111)} -#define DEC_JAS_IM {B8(11011011), B8(11111111)} -#define DEC_JASC_IM {B8(11110011), B8(11111111)} -#define DEC_BAS_IM {B8(11101011), B8(11111111)} -#define DEC_BASC_IM {B8(11101111), B8(11111111)} -#define DEC_MOVEM_MR {B8(10111011), B8(10111111)} -#define DEC_MOVEM_RM {B8(10111111), B8(10111111)} - -#define DEC_FTAG_FIDX_D_M {B8(10101011), B8(11111111)} -#define DEC_FTAG_FIDX_I_M {B8(11010011), B8(11111111)} - -#endif diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c deleted file mode 100644 index 9e87069da89..00000000000 --- a/target/cris/gdbstub.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * CRIS gdb server stub - * - * Copyright (c) 2003-2005 Fabrice Bellard - * Copyright (c) 2013 SUSE LINUX Products GmbH - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "qemu/osdep.h" -#include "cpu.h" -#include "gdbstub/helpers.h" - -int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) -{ - CPUCRISState *env = cpu_env(cs); - - if (n < 15) { - return gdb_get_reg32(mem_buf, env->regs[n]); - } - - if (n == 15) { - return gdb_get_reg32(mem_buf, env->pc); - } - - if (n < 32) { - switch (n) { - case 16: - return gdb_get_reg8(mem_buf, env->pregs[n - 16]); - case 17: - return gdb_get_reg8(mem_buf, env->pregs[n - 16]); - case 20: - case 21: - return gdb_get_reg16(mem_buf, env->pregs[n - 16]); - default: - if (n >= 23) { - return gdb_get_reg32(mem_buf, env->pregs[n - 16]); - } - break; - } - } - return 0; -} - -int cris_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) -{ - CPUCRISState *env = cpu_env(cs); - uint8_t srs; - - srs = env->pregs[PR_SRS]; - if (n < 16) { - return gdb_get_reg32(mem_buf, env->regs[n]); - } - - if (n >= 21 && n < 32) { - return gdb_get_reg32(mem_buf, env->pregs[n - 16]); - } - if (n >= 33 && n < 49) { - return gdb_get_reg32(mem_buf, env->sregs[srs][n - 33]); - } - switch (n) { - case 16: - return gdb_get_reg8(mem_buf, env->pregs[0]); - case 17: - return gdb_get_reg8(mem_buf, env->pregs[1]); - case 18: - return gdb_get_reg32(mem_buf, env->pregs[2]); - case 19: - return gdb_get_reg8(mem_buf, srs); - case 20: - return gdb_get_reg16(mem_buf, env->pregs[4]); - case 32: - return gdb_get_reg32(mem_buf, env->pc); - } - - return 0; -} - -int cris_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) -{ - CPUCRISState *env = cpu_env(cs); - uint32_t tmp; - - if (n > 49) { - return 0; - } - - tmp = ldl_p(mem_buf); - - if (n < 16) { - env->regs[n] = tmp; - } - - if (n >= 21 && n < 32) { - env->pregs[n - 16] = tmp; - } - - /* FIXME: Should support function regs be writable? */ - switch (n) { - case 16: - return 1; - case 17: - return 1; - case 18: - env->pregs[PR_PID] = tmp; - break; - case 19: - return 1; - case 20: - return 2; - case 32: - env->pc = tmp; - break; - } - - return 4; -} diff --git a/target/cris/helper.c b/target/cris/helper.c deleted file mode 100644 index 1c3f86876f6..00000000000 --- a/target/cris/helper.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * CRIS helper routines. - * - * Copyright (c) 2007 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "cpu.h" -#include "hw/core/tcg-cpu-ops.h" -#include "mmu.h" -#include "qemu/host-utils.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" -#include "exec/helper-proto.h" - - -//#define CRIS_HELPER_DEBUG - - -#ifdef CRIS_HELPER_DEBUG -#define D(x) x -#define D_LOG(...) qemu_log(__VA_ARGS__) -#else -#define D(x) -#define D_LOG(...) do { } while (0) -#endif - -static void cris_shift_ccs(CPUCRISState *env) -{ - uint32_t ccs; - /* Apply the ccs shift. */ - ccs = env->pregs[PR_CCS]; - ccs = ((ccs & 0xc0000000) | ((ccs << 12) >> 2)) & ~0x3ff; - env->pregs[PR_CCS] = ccs; -} - -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - CPUCRISState *env = cpu_env(cs); - struct cris_mmu_result res; - int prot, miss; - target_ulong phy; - - miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK, - access_type, mmu_idx, 0); - if (likely(!miss)) { - /* - * Mask off the cache selection bit. The ETRAX busses do not - * see the top bit. - */ - phy = res.phy & ~0x80000000; - prot = res.prot; - tlb_set_page(cs, address & TARGET_PAGE_MASK, phy, - prot, mmu_idx, TARGET_PAGE_SIZE); - return true; - } - - if (probe) { - return false; - } - - if (cs->exception_index == EXCP_BUSFAULT) { - cpu_abort(cs, "CRIS: Illegal recursive bus fault." - "addr=%" VADDR_PRIx " access_type=%d\n", - address, access_type); - } - - env->pregs[PR_EDA] = address; - cs->exception_index = EXCP_BUSFAULT; - env->fault_vector = res.bf_vec; - if (retaddr) { - if (cpu_restore_state(cs, retaddr)) { - /* Evaluate flags after retranslation. */ - helper_top_evaluate_flags(env); - } - } - cpu_loop_exit(cs); -} - -void crisv10_cpu_do_interrupt(CPUState *cs) -{ - CPUCRISState *env = cpu_env(cs); - int ex_vec = -1; - - D_LOG("exception index=%d interrupt_req=%d\n", - cs->exception_index, - cs->interrupt_request); - - if (env->dslot) { - /* CRISv10 never takes interrupts while in a delay-slot. */ - cpu_abort(cs, "CRIS: Interrupt on delay-slot\n"); - } - - assert(!(env->pregs[PR_CCS] & PFIX_FLAG)); - switch (cs->exception_index) { - case EXCP_BREAK: - /* These exceptions are generated by the core itself. - ERP should point to the insn following the brk. */ - ex_vec = env->trap_vector; - env->pregs[PRV10_BRP] = env->pc; - break; - - case EXCP_NMI: - /* NMI is hardwired to vector zero. */ - ex_vec = 0; - env->pregs[PR_CCS] &= ~M_FLAG_V10; - env->pregs[PRV10_BRP] = env->pc; - break; - - case EXCP_BUSFAULT: - cpu_abort(cs, "Unhandled busfault"); - break; - - default: - /* The interrupt controller gives us the vector. */ - ex_vec = env->interrupt_vector; - /* Normal interrupts are taken between - TB's. env->pc is valid here. */ - env->pregs[PR_ERP] = env->pc; - break; - } - - if (env->pregs[PR_CCS] & U_FLAG) { - /* Swap stack pointers. */ - env->pregs[PR_USP] = env->regs[R_SP]; - env->regs[R_SP] = env->ksp; - } - - /* Now that we are in kernel mode, load the handlers address. */ - env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4); - env->locked_irq = 1; - env->pregs[PR_CCS] |= F_FLAG_V10; /* set F. */ - - qemu_log_mask(CPU_LOG_INT, "%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n", - __func__, env->pc, ex_vec, - env->pregs[PR_CCS], - env->pregs[PR_PID], - env->pregs[PR_ERP]); -} - -void cris_cpu_do_interrupt(CPUState *cs) -{ - CPUCRISState *env = cpu_env(cs); - int ex_vec = -1; - - D_LOG("exception index=%d interrupt_req=%d\n", - cs->exception_index, - cs->interrupt_request); - - switch (cs->exception_index) { - case EXCP_BREAK: - /* These exceptions are generated by the core itself. - ERP should point to the insn following the brk. */ - ex_vec = env->trap_vector; - env->pregs[PR_ERP] = env->pc; - break; - - case EXCP_NMI: - /* NMI is hardwired to vector zero. */ - ex_vec = 0; - env->pregs[PR_CCS] &= ~M_FLAG_V32; - env->pregs[PR_NRP] = env->pc; - break; - - case EXCP_BUSFAULT: - ex_vec = env->fault_vector; - env->pregs[PR_ERP] = env->pc; - break; - - default: - /* The interrupt controller gives us the vector. */ - ex_vec = env->interrupt_vector; - /* Normal interrupts are taken between - TB's. env->pc is valid here. */ - env->pregs[PR_ERP] = env->pc; - break; - } - - /* Fill in the IDX field. */ - env->pregs[PR_EXS] = (ex_vec & 0xff) << 8; - - if (env->dslot) { - D_LOG("excp isr=%x PC=%x ds=%d SP=%x" - " ERP=%x pid=%x ccs=%x cc=%d %x\n", - ex_vec, env->pc, env->dslot, - env->regs[R_SP], - env->pregs[PR_ERP], env->pregs[PR_PID], - env->pregs[PR_CCS], - env->cc_op, env->cc_mask); - /* We loose the btarget, btaken state here so rexec the - branch. */ - env->pregs[PR_ERP] -= env->dslot; - /* Exception starts with dslot cleared. */ - env->dslot = 0; - } - - if (env->pregs[PR_CCS] & U_FLAG) { - /* Swap stack pointers. */ - env->pregs[PR_USP] = env->regs[R_SP]; - env->regs[R_SP] = env->ksp; - } - - /* Apply the CRIS CCS shift. Clears U if set. */ - cris_shift_ccs(env); - - /* Now that we are in kernel mode, load the handlers address. - This load may not fault, real hw leaves that behaviour as - undefined. */ - env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4); - - /* Clear the excption_index to avoid spurious hw_aborts for recursive - bus faults. */ - cs->exception_index = -1; - - D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n", - __func__, env->pc, ex_vec, - env->pregs[PR_CCS], - env->pregs[PR_PID], - env->pregs[PR_ERP]); -} - -hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - CRISCPU *cpu = CRIS_CPU(cs); - uint32_t phy = addr; - struct cris_mmu_result res; - int miss; - - miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_DATA_LOAD, 0, 1); - /* If D TLB misses, try I TLB. */ - if (miss) { - miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_INST_FETCH, 0, 1); - } - - if (!miss) { - phy = res.phy; - } - D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy)); - return phy; -} - -bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUClass *cc = CPU_GET_CLASS(cs); - CPUCRISState *env = cpu_env(cs); - bool ret = false; - - if (interrupt_request & CPU_INTERRUPT_HARD - && (env->pregs[PR_CCS] & I_FLAG) - && !env->locked_irq) { - cs->exception_index = EXCP_IRQ; - cc->tcg_ops->do_interrupt(cs); - ret = true; - } - if (interrupt_request & CPU_INTERRUPT_NMI) { - unsigned int m_flag_archval; - if (env->pregs[PR_VR] < 32) { - m_flag_archval = M_FLAG_V10; - } else { - m_flag_archval = M_FLAG_V32; - } - if ((env->pregs[PR_CCS] & m_flag_archval)) { - cs->exception_index = EXCP_NMI; - cc->tcg_ops->do_interrupt(cs); - ret = true; - } - } - - return ret; -} diff --git a/target/cris/helper.h b/target/cris/helper.h deleted file mode 100644 index 3abf6086828..00000000000 --- a/target/cris/helper.h +++ /dev/null @@ -1,23 +0,0 @@ -DEF_HELPER_2(raise_exception, noreturn, env, i32) -DEF_HELPER_2(tlb_flush_pid, void, env, i32) -DEF_HELPER_2(spc_write, void, env, i32) -DEF_HELPER_1(rfe, void, env) -DEF_HELPER_1(rfn, void, env) - -DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32) -DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32) - -DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32) - -DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32) -DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32) -DEF_HELPER_FLAGS_5(evaluate_flags_mcp, TCG_CALL_NO_SE, i32, env, - i32, i32, i32, i32) -DEF_HELPER_FLAGS_5(evaluate_flags_alu_4, TCG_CALL_NO_SE, i32, env, - i32, i32, i32, i32) -DEF_HELPER_FLAGS_5(evaluate_flags_sub_4, TCG_CALL_NO_SE, i32, env, - i32, i32, i32, i32) -DEF_HELPER_FLAGS_3(evaluate_flags_move_4, TCG_CALL_NO_SE, i32, env, i32, i32) -DEF_HELPER_FLAGS_3(evaluate_flags_move_2, TCG_CALL_NO_SE, i32, env, i32, i32) -DEF_HELPER_1(evaluate_flags, void, env) -DEF_HELPER_1(top_evaluate_flags, void, env) diff --git a/target/cris/machine.c b/target/cris/machine.c deleted file mode 100644 index 7b9bde872aa..00000000000 --- a/target/cris/machine.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * CRIS virtual CPU state save/load support - * - * Copyright (c) 2012 Red Hat, Inc. - * Written by Juan Quintela - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "migration/cpu.h" - -static const VMStateDescription vmstate_tlbset = { - .name = "cpu/tlbset", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(lo, TLBSet), - VMSTATE_UINT32(hi, TLBSet), - VMSTATE_END_OF_LIST() - } -}; - -static const VMStateDescription vmstate_cris_env = { - .name = "env", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16), - VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16), - VMSTATE_UINT32(pc, CPUCRISState), - VMSTATE_UINT32(ksp, CPUCRISState), - VMSTATE_INT32(dslot, CPUCRISState), - VMSTATE_INT32(btaken, CPUCRISState), - VMSTATE_UINT32(btarget, CPUCRISState), - VMSTATE_UINT32(cc_op, CPUCRISState), - VMSTATE_UINT32(cc_mask, CPUCRISState), - VMSTATE_UINT32(cc_dest, CPUCRISState), - VMSTATE_UINT32(cc_src, CPUCRISState), - VMSTATE_UINT32(cc_result, CPUCRISState), - VMSTATE_INT32(cc_size, CPUCRISState), - VMSTATE_INT32(cc_x, CPUCRISState), - VMSTATE_INT32(locked_irq, CPUCRISState), - VMSTATE_INT32(interrupt_vector, CPUCRISState), - VMSTATE_INT32(fault_vector, CPUCRISState), - VMSTATE_INT32(trap_vector, CPUCRISState), - VMSTATE_UINT32_ARRAY(sregs[0], CPUCRISState, 16), - VMSTATE_UINT32_ARRAY(sregs[1], CPUCRISState, 16), - VMSTATE_UINT32_ARRAY(sregs[2], CPUCRISState, 16), - VMSTATE_UINT32_ARRAY(sregs[3], CPUCRISState, 16), - VMSTATE_UINT32(mmu_rand_lfsr, CPUCRISState), - VMSTATE_STRUCT_ARRAY(tlbsets[0][0], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[0][1], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[0][2], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[0][3], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[1][0], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[1][1], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[1][2], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_STRUCT_ARRAY(tlbsets[1][3], CPUCRISState, 16, 0, - vmstate_tlbset, TLBSet), - VMSTATE_END_OF_LIST() - } -}; - -const VMStateDescription vmstate_cris_cpu = { - .name = "cpu", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_CPU(), - VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState), - VMSTATE_END_OF_LIST() - } -}; diff --git a/target/cris/meson.build b/target/cris/meson.build deleted file mode 100644 index bbfcdf7f7a3..00000000000 --- a/target/cris/meson.build +++ /dev/null @@ -1,17 +0,0 @@ -cris_ss = ss.source_set() -cris_ss.add(files( - 'cpu.c', - 'gdbstub.c', - 'op_helper.c', - 'translate.c', -)) - -cris_system_ss = ss.source_set() -cris_system_ss.add(files( - 'helper.c', - 'machine.c', - 'mmu.c', -)) - -target_arch += {'cris': cris_ss} -target_system_arch += {'cris': cris_system_ss} diff --git a/target/cris/mmu.c b/target/cris/mmu.c deleted file mode 100644 index d51008c5410..00000000000 --- a/target/cris/mmu.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * CRIS mmu emulation. - * - * Copyright (c) 2007 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "exec/page-protection.h" -#include "mmu.h" - -#ifdef DEBUG -#define D(x) x -#define D_LOG(...) qemu_log(__VA_ARGS__) -#else -#define D(x) do { } while (0) -#define D_LOG(...) do { } while (0) -#endif - -void cris_mmu_init(CPUCRISState *env) -{ - env->mmu_rand_lfsr = 0xcccc; -} - -#define SR_POLYNOM 0x8805 -static inline unsigned int compute_polynom(unsigned int sr) -{ - unsigned int i; - unsigned int f; - - f = 0; - for (i = 0; i < 16; i++) { - f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1); - } - - return f; -} - -static void cris_mmu_update_rand_lfsr(CPUCRISState *env) -{ - unsigned int f; - - /* Update lfsr at every fault. */ - f = compute_polynom(env->mmu_rand_lfsr); - env->mmu_rand_lfsr >>= 1; - env->mmu_rand_lfsr |= (f << 15); - env->mmu_rand_lfsr &= 0xffff; -} - -static inline int cris_mmu_enabled(uint32_t rw_gc_cfg) -{ - return (rw_gc_cfg & 12) != 0; -} - -static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg) -{ - return (1 << seg) & rw_mm_cfg; -} - -static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg) -{ - uint32_t base; - int i; - - if (seg < 8) { - base = env->sregs[SFR_RW_MM_KBASE_LO]; - } else { - base = env->sregs[SFR_RW_MM_KBASE_HI]; - } - - i = seg & 7; - base >>= i * 4; - base &= 15; - - base <<= 28; - return base; -} - -/* Used by the tlb decoder. */ -#define EXTRACT_FIELD(src, start, end) \ - (((src) >> start) & ((1 << (end - start + 1)) - 1)) - -static inline void set_field(uint32_t *dst, unsigned int val, - unsigned int offset, unsigned int width) -{ - uint32_t mask; - - mask = (1 << width) - 1; - mask <<= offset; - val <<= offset; - - val &= mask; - *dst &= ~(mask); - *dst |= val; -} - -#ifdef DEBUG -static void dump_tlb(CPUCRISState *env, int mmu) -{ - int set; - int idx; - uint32_t hi, lo, tlb_vpn, tlb_pfn; - - for (set = 0; set < 4; set++) { - for (idx = 0; idx < 16; idx++) { - lo = env->tlbsets[mmu][set][idx].lo; - hi = env->tlbsets[mmu][set][idx].hi; - tlb_vpn = EXTRACT_FIELD(hi, 13, 31); - tlb_pfn = EXTRACT_FIELD(lo, 13, 31); - - printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n", - set, idx, hi, lo, tlb_vpn, tlb_pfn); - } - } -} -#endif - -static int cris_mmu_translate_page(struct cris_mmu_result *res, - CPUCRISState *env, uint32_t vaddr, - MMUAccessType access_type, - int usermode, int debug) -{ - unsigned int vpage; - unsigned int idx; - uint32_t pid, lo, hi; - uint32_t tlb_vpn, tlb_pfn = 0; - int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x; - int cfg_v, cfg_k, cfg_w, cfg_x; - int set, match = 0; - uint32_t r_cause; - uint32_t r_cfg; - int rwcause; - int mmu = 1; /* Data mmu is default. */ - int vect_base; - - r_cause = env->sregs[SFR_R_MM_CAUSE]; - r_cfg = env->sregs[SFR_RW_MM_CFG]; - pid = env->pregs[PR_PID] & 0xff; - - switch (access_type) { - case MMU_INST_FETCH: - rwcause = CRIS_MMU_ERR_EXEC; - mmu = 0; - break; - case MMU_DATA_STORE: - rwcause = CRIS_MMU_ERR_WRITE; - break; - default: - case MMU_DATA_LOAD: - rwcause = CRIS_MMU_ERR_READ; - break; - } - - /* I exception vectors 4 - 7, D 8 - 11. */ - vect_base = (mmu + 1) * 4; - - vpage = vaddr >> 13; - - /* - * We know the index which to check on each set. - * Scan both I and D. - */ - idx = vpage & 15; - for (set = 0; set < 4; set++) { - lo = env->tlbsets[mmu][set][idx].lo; - hi = env->tlbsets[mmu][set][idx].hi; - - tlb_vpn = hi >> 13; - tlb_pid = EXTRACT_FIELD(hi, 0, 7); - tlb_g = EXTRACT_FIELD(lo, 4, 4); - - D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n", - mmu, set, idx, tlb_vpn, vpage, lo, hi); - if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) { - match = 1; - break; - } - } - - res->bf_vec = vect_base; - if (match) { - cfg_w = EXTRACT_FIELD(r_cfg, 19, 19); - cfg_k = EXTRACT_FIELD(r_cfg, 18, 18); - cfg_x = EXTRACT_FIELD(r_cfg, 17, 17); - cfg_v = EXTRACT_FIELD(r_cfg, 16, 16); - - tlb_pfn = EXTRACT_FIELD(lo, 13, 31); - tlb_v = EXTRACT_FIELD(lo, 3, 3); - tlb_k = EXTRACT_FIELD(lo, 2, 2); - tlb_w = EXTRACT_FIELD(lo, 1, 1); - tlb_x = EXTRACT_FIELD(lo, 0, 0); - - /* - * set_exception_vector(0x04, i_mmu_refill); - * set_exception_vector(0x05, i_mmu_invalid); - * set_exception_vector(0x06, i_mmu_access); - * set_exception_vector(0x07, i_mmu_execute); - * set_exception_vector(0x08, d_mmu_refill); - * set_exception_vector(0x09, d_mmu_invalid); - * set_exception_vector(0x0a, d_mmu_access); - * set_exception_vector(0x0b, d_mmu_write); - */ - if (cfg_k && tlb_k && usermode) { - D(printf("tlb: kernel protected %x lo=%x pc=%x\n", - vaddr, lo, env->pc)); - match = 0; - res->bf_vec = vect_base + 2; - } else if (access_type == MMU_DATA_STORE && cfg_w && !tlb_w) { - D(printf("tlb: write protected %x lo=%x pc=%x\n", - vaddr, lo, env->pc)); - match = 0; - /* write accesses never go through the I mmu. */ - res->bf_vec = vect_base + 3; - } else if (access_type == MMU_INST_FETCH && cfg_x && !tlb_x) { - D(printf("tlb: exec protected %x lo=%x pc=%x\n", - vaddr, lo, env->pc)); - match = 0; - res->bf_vec = vect_base + 3; - } else if (cfg_v && !tlb_v) { - D(printf("tlb: invalid %x\n", vaddr)); - match = 0; - res->bf_vec = vect_base + 1; - } - - res->prot = 0; - if (match) { - res->prot |= PAGE_READ; - if (tlb_w) { - res->prot |= PAGE_WRITE; - } - if (mmu == 0 && (cfg_x || tlb_x)) { - res->prot |= PAGE_EXEC; - } - } else { - D(dump_tlb(env, mmu)); - } - } else { - /* If refill, provide a randomized set. */ - set = env->mmu_rand_lfsr & 3; - } - - if (!match && !debug) { - cris_mmu_update_rand_lfsr(env); - - /* Compute index. */ - idx = vpage & 15; - - /* Update RW_MM_TLB_SEL. */ - env->sregs[SFR_RW_MM_TLB_SEL] = 0; - set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4); - set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2); - - /* Update RW_MM_CAUSE. */ - set_field(&r_cause, rwcause, 8, 2); - set_field(&r_cause, vpage, 13, 19); - set_field(&r_cause, pid, 0, 8); - env->sregs[SFR_R_MM_CAUSE] = r_cause; - D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc)); - } - - D(printf("%s access=%u mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x" - " %x cause=%x sel=%x sp=%x %x %x\n", - __func__, access_type, match, env->pc, - vaddr, vpage, - tlb_vpn, tlb_pfn, tlb_pid, - pid, - r_cause, - env->sregs[SFR_RW_MM_TLB_SEL], - env->regs[R_SP], env->pregs[PR_USP], env->ksp)); - - res->phy = tlb_pfn << TARGET_PAGE_BITS; - return !match; -} - -void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid) -{ - target_ulong vaddr; - unsigned int idx; - uint32_t lo, hi; - uint32_t tlb_vpn; - int tlb_pid, tlb_g, tlb_v; - unsigned int set; - unsigned int mmu; - - pid &= 0xff; - for (mmu = 0; mmu < 2; mmu++) { - for (set = 0; set < 4; set++) { - for (idx = 0; idx < 16; idx++) { - lo = env->tlbsets[mmu][set][idx].lo; - hi = env->tlbsets[mmu][set][idx].hi; - - tlb_vpn = EXTRACT_FIELD(hi, 13, 31); - tlb_pid = EXTRACT_FIELD(hi, 0, 7); - tlb_g = EXTRACT_FIELD(lo, 4, 4); - tlb_v = EXTRACT_FIELD(lo, 3, 3); - - if (tlb_v && !tlb_g && (tlb_pid == pid)) { - vaddr = tlb_vpn << TARGET_PAGE_BITS; - D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr); - tlb_flush_page(env_cpu(env), vaddr); - } - } - } - } -} - -int cris_mmu_translate(struct cris_mmu_result *res, - CPUCRISState *env, uint32_t vaddr, - MMUAccessType access_type, int mmu_idx, int debug) -{ - int seg; - int miss = 0; - int is_user = mmu_idx == MMU_USER_IDX; - uint32_t old_srs; - - old_srs = env->pregs[PR_SRS]; - - env->pregs[PR_SRS] = access_type == MMU_INST_FETCH ? 1 : 2; - - if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) { - res->phy = vaddr; - res->prot = PAGE_RWX; - goto done; - } - - seg = vaddr >> 28; - if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) { - uint32_t base; - - miss = 0; - base = cris_mmu_translate_seg(env, seg); - res->phy = base | (0x0fffffff & vaddr); - res->prot = PAGE_RWX; - } else { - miss = cris_mmu_translate_page(res, env, vaddr, access_type, - is_user, debug); - } - done: - env->pregs[PR_SRS] = old_srs; - return miss; -} diff --git a/target/cris/mmu.h b/target/cris/mmu.h deleted file mode 100644 index d57386ec6cd..00000000000 --- a/target/cris/mmu.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef TARGET_CRIS_MMU_H -#define TARGET_CRIS_MMU_H - -#define CRIS_MMU_ERR_EXEC 0 -#define CRIS_MMU_ERR_READ 1 -#define CRIS_MMU_ERR_WRITE 2 -#define CRIS_MMU_ERR_FLUSH 3 - -struct cris_mmu_result -{ - uint32_t phy; - int prot; - int bf_vec; -}; - -void cris_mmu_init(CPUCRISState *env); -void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid); -int cris_mmu_translate(struct cris_mmu_result *res, - CPUCRISState *env, uint32_t vaddr, - MMUAccessType access_type, int mmu_idx, int debug); - -#endif diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c deleted file mode 100644 index 98a9aaf5046..00000000000 --- a/target/cris/op_helper.c +++ /dev/null @@ -1,580 +0,0 @@ -/* - * CRIS helper routines - * - * Copyright (c) 2007 AXIS Communications - * Written by Edgar E. Iglesias - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "mmu.h" -#include "exec/helper-proto.h" -#include "qemu/host-utils.h" -#include "exec/exec-all.h" - -//#define CRIS_OP_HELPER_DEBUG - - -#ifdef CRIS_OP_HELPER_DEBUG -#define D(x) x -#define D_LOG(...) qemu_log(__VA_ARGS__) -#else -#define D(x) -#define D_LOG(...) do { } while (0) -#endif - -void helper_raise_exception(CPUCRISState *env, uint32_t index) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = index; - cpu_loop_exit(cs); -} - -void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid) -{ -#if !defined(CONFIG_USER_ONLY) - pid &= 0xff; - if (pid != (env->pregs[PR_PID] & 0xff)) { - cris_mmu_flush_pid(env, env->pregs[PR_PID]); - } -#endif -} - -void helper_spc_write(CPUCRISState *env, uint32_t new_spc) -{ -#if !defined(CONFIG_USER_ONLY) - CPUState *cs = env_cpu(env); - - tlb_flush_page(cs, env->pregs[PR_SPC]); - tlb_flush_page(cs, new_spc); -#endif -} - -/* Used by the tlb decoder. */ -#define EXTRACT_FIELD(src, start, end) \ - (((src) >> start) & ((1 << (end - start + 1)) - 1)) - -void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg) -{ - uint32_t srs; - srs = env->pregs[PR_SRS]; - srs &= 3; - env->sregs[srs][sreg] = env->regs[reg]; - -#if !defined(CONFIG_USER_ONLY) - if (srs == 1 || srs == 2) { - if (sreg == 6) { - /* Writes to tlb-hi write to mm_cause as a side effect. */ - env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg]; - env->sregs[SFR_R_MM_CAUSE] = env->regs[reg]; - } else if (sreg == 5) { - uint32_t set; - uint32_t idx; - uint32_t lo, hi; - uint32_t vaddr; - int tlb_v; - - idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; - set >>= 4; - set &= 3; - - idx &= 15; - /* We've just made a write to tlb_lo. */ - lo = env->sregs[SFR_RW_MM_TLB_LO]; - /* Writes are done via r_mm_cause. */ - hi = env->sregs[SFR_R_MM_CAUSE]; - - vaddr = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].hi, 13, 31); - vaddr <<= TARGET_PAGE_BITS; - tlb_v = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].lo, 3, 3); - env->tlbsets[srs - 1][set][idx].lo = lo; - env->tlbsets[srs - 1][set][idx].hi = hi; - - D_LOG("tlb flush vaddr=%x v=%d pc=%x\n", - vaddr, tlb_v, env->pc); - if (tlb_v) { - tlb_flush_page(env_cpu(env), vaddr); - } - } - } -#endif -} - -void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg) -{ - uint32_t srs; - env->pregs[PR_SRS] &= 3; - srs = env->pregs[PR_SRS]; - -#if !defined(CONFIG_USER_ONLY) - if (srs == 1 || srs == 2) { - uint32_t set; - uint32_t idx; - uint32_t lo, hi; - - idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; - set >>= 4; - set &= 3; - idx &= 15; - - /* Update the mirror regs. */ - hi = env->tlbsets[srs - 1][set][idx].hi; - lo = env->tlbsets[srs - 1][set][idx].lo; - env->sregs[SFR_RW_MM_TLB_HI] = hi; - env->sregs[SFR_RW_MM_TLB_LO] = lo; - } -#endif - env->regs[reg] = env->sregs[srs][sreg]; -} - -static void cris_ccs_rshift(CPUCRISState *env) -{ - uint32_t ccs; - - /* Apply the ccs shift. */ - ccs = env->pregs[PR_CCS]; - ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10); - if (ccs & U_FLAG) { - /* Enter user mode. */ - env->ksp = env->regs[R_SP]; - env->regs[R_SP] = env->pregs[PR_USP]; - } - - env->pregs[PR_CCS] = ccs; -} - -void helper_rfe(CPUCRISState *env) -{ - int rflag = env->pregs[PR_CCS] & R_FLAG; - - D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n", - env->pregs[PR_ERP], env->pregs[PR_PID], - env->pregs[PR_CCS], - env->btarget); - - cris_ccs_rshift(env); - - /* RFE sets the P_FLAG only if the R_FLAG is not set. */ - if (!rflag) { - env->pregs[PR_CCS] |= P_FLAG; - } -} - -void helper_rfn(CPUCRISState *env) -{ - int rflag = env->pregs[PR_CCS] & R_FLAG; - - D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n", - env->pregs[PR_ERP], env->pregs[PR_PID], - env->pregs[PR_CCS], - env->btarget); - - cris_ccs_rshift(env); - - /* Set the P_FLAG only if the R_FLAG is not set. */ - if (!rflag) { - env->pregs[PR_CCS] |= P_FLAG; - } - - /* Always set the M flag. */ - env->pregs[PR_CCS] |= M_FLAG_V32; -} - -uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs) -{ - /* FIXME: clean this up. */ - - /* - * des ref: - * The N flag is set according to the selected bit in the dest reg. - * The Z flag is set if the selected bit and all bits to the right are - * zero. - * The X flag is cleared. - * Other flags are left untouched. - * The destination reg is not affected. - */ - unsigned int fz, sbit, bset, mask, masked_t0; - - sbit = t1 & 31; - bset = !!(t0 & (1 << sbit)); - mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1; - masked_t0 = t0 & mask; - fz = !(masked_t0 | bset); - - /* Clear the X, N and Z flags. */ - ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG); - if (env->pregs[PR_VR] < 32) { - ccs &= ~(V_FLAG | C_FLAG); - } - /* Set the N and Z flags accordingly. */ - ccs |= (bset << 3) | (fz << 2); - return ccs; -} - -static inline uint32_t evaluate_flags_writeback(CPUCRISState *env, - uint32_t flags, uint32_t ccs) -{ - unsigned int x, z, mask; - - /* Extended arithmetic, leave the z flag alone. */ - x = env->cc_x; - mask = env->cc_mask | X_FLAG; - if (x) { - z = flags & Z_FLAG; - mask = mask & ~z; - } - flags &= mask; - - /* all insn clear the x-flag except setf or clrf. */ - ccs &= ~mask; - ccs |= flags; - return ccs; -} - -uint32_t helper_evaluate_flags_muls(CPUCRISState *env, - uint32_t ccs, uint32_t res, uint32_t mof) -{ - uint32_t flags = 0; - int64_t tmp; - int dneg; - - dneg = ((int32_t)res) < 0; - - tmp = mof; - tmp <<= 32; - tmp |= res; - if (tmp == 0) { - flags |= Z_FLAG; - } else if (tmp < 0) { - flags |= N_FLAG; - } - if ((dneg && mof != -1) || (!dneg && mof != 0)) { - flags |= V_FLAG; - } - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_mulu(CPUCRISState *env, - uint32_t ccs, uint32_t res, uint32_t mof) -{ - uint32_t flags = 0; - uint64_t tmp; - - tmp = mof; - tmp <<= 32; - tmp |= res; - if (tmp == 0) { - flags |= Z_FLAG; - } else if (tmp >> 63) { - flags |= N_FLAG; - } - if (mof) { - flags |= V_FLAG; - } - - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs, - uint32_t src, uint32_t dst, uint32_t res) -{ - uint32_t flags = 0; - - src = src & 0x80000000; - dst = dst & 0x80000000; - - if ((res & 0x80000000L) != 0L) { - flags |= N_FLAG; - if (!src && !dst) { - flags |= V_FLAG; - } else if (src & dst) { - flags |= R_FLAG; - } - } else { - if (res == 0L) { - flags |= Z_FLAG; - } - if (src & dst) { - flags |= V_FLAG; - } - if (dst | src) { - flags |= R_FLAG; - } - } - - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs, - uint32_t src, uint32_t dst, uint32_t res) -{ - uint32_t flags = 0; - - src = src & 0x80000000; - dst = dst & 0x80000000; - - if ((res & 0x80000000L) != 0L) { - flags |= N_FLAG; - if (!src && !dst) { - flags |= V_FLAG; - } else if (src & dst) { - flags |= C_FLAG; - } - } else { - if (res == 0L) { - flags |= Z_FLAG; - } - if (src & dst) { - flags |= V_FLAG; - } - if (dst | src) { - flags |= C_FLAG; - } - } - - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs, - uint32_t src, uint32_t dst, uint32_t res) -{ - uint32_t flags = 0; - - src = (~src) & 0x80000000; - dst = dst & 0x80000000; - - if ((res & 0x80000000L) != 0L) { - flags |= N_FLAG; - if (!src && !dst) { - flags |= V_FLAG; - } else if (src & dst) { - flags |= C_FLAG; - } - } else { - if (res == 0L) { - flags |= Z_FLAG; - } - if (src & dst) { - flags |= V_FLAG; - } - if (dst | src) { - flags |= C_FLAG; - } - } - - flags ^= C_FLAG; - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_move_4(CPUCRISState *env, - uint32_t ccs, uint32_t res) -{ - uint32_t flags = 0; - - if ((int32_t)res < 0) { - flags |= N_FLAG; - } else if (res == 0L) { - flags |= Z_FLAG; - } - - return evaluate_flags_writeback(env, flags, ccs); -} - -uint32_t helper_evaluate_flags_move_2(CPUCRISState *env, - uint32_t ccs, uint32_t res) -{ - uint32_t flags = 0; - - if ((int16_t)res < 0L) { - flags |= N_FLAG; - } else if (res == 0) { - flags |= Z_FLAG; - } - - return evaluate_flags_writeback(env, flags, ccs); -} - -/* - * TODO: This is expensive. We could split things up and only evaluate part of - * CCR on a need to know basis. For now, we simply re-evaluate everything. - */ -void helper_evaluate_flags(CPUCRISState *env) -{ - uint32_t src, dst, res; - uint32_t flags = 0; - - src = env->cc_src; - dst = env->cc_dest; - res = env->cc_result; - - if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) { - src = ~src; - } - - /* - * Now, evaluate the flags. This stuff is based on - * Per Zander's CRISv10 simulator. - */ - switch (env->cc_size) { - case 1: - if ((res & 0x80L) != 0L) { - flags |= N_FLAG; - if (((src & 0x80L) == 0L) && ((dst & 0x80L) == 0L)) { - flags |= V_FLAG; - } else if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) { - flags |= C_FLAG; - } - } else { - if ((res & 0xFFL) == 0L) { - flags |= Z_FLAG; - } - if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) { - flags |= V_FLAG; - } - if ((dst & 0x80L) != 0L || (src & 0x80L) != 0L) { - flags |= C_FLAG; - } - } - break; - case 2: - if ((res & 0x8000L) != 0L) { - flags |= N_FLAG; - if (((src & 0x8000L) == 0L) && ((dst & 0x8000L) == 0L)) { - flags |= V_FLAG; - } else if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) { - flags |= C_FLAG; - } - } else { - if ((res & 0xFFFFL) == 0L) { - flags |= Z_FLAG; - } - if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) { - flags |= V_FLAG; - } - if ((dst & 0x8000L) != 0L || (src & 0x8000L) != 0L) { - flags |= C_FLAG; - } - } - break; - case 4: - if ((res & 0x80000000L) != 0L) { - flags |= N_FLAG; - if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) { - flags |= V_FLAG; - } else if (((src & 0x80000000L) != 0L) && - ((dst & 0x80000000L) != 0L)) { - flags |= C_FLAG; - } - } else { - if (res == 0L) { - flags |= Z_FLAG; - } - if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) { - flags |= V_FLAG; - } - if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) { - flags |= C_FLAG; - } - } - break; - default: - break; - } - - if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) { - flags ^= C_FLAG; - } - - env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags, - env->pregs[PR_CCS]); -} - -void helper_top_evaluate_flags(CPUCRISState *env) -{ - switch (env->cc_op) { - case CC_OP_MCP: - env->pregs[PR_CCS] - = helper_evaluate_flags_mcp(env, env->pregs[PR_CCS], - env->cc_src, env->cc_dest, - env->cc_result); - break; - case CC_OP_MULS: - env->pregs[PR_CCS] - = helper_evaluate_flags_muls(env, env->pregs[PR_CCS], - env->cc_result, env->pregs[PR_MOF]); - break; - case CC_OP_MULU: - env->pregs[PR_CCS] - = helper_evaluate_flags_mulu(env, env->pregs[PR_CCS], - env->cc_result, env->pregs[PR_MOF]); - break; - case CC_OP_MOVE: - case CC_OP_AND: - case CC_OP_OR: - case CC_OP_XOR: - case CC_OP_ASR: - case CC_OP_LSR: - case CC_OP_LSL: - switch (env->cc_size) { - case 4: - env->pregs[PR_CCS] = - helper_evaluate_flags_move_4(env, - env->pregs[PR_CCS], - env->cc_result); - break; - case 2: - env->pregs[PR_CCS] = - helper_evaluate_flags_move_2(env, - env->pregs[PR_CCS], - env->cc_result); - break; - default: - helper_evaluate_flags(env); - break; - } - break; - case CC_OP_FLAGS: - /* live. */ - break; - case CC_OP_SUB: - case CC_OP_CMP: - if (env->cc_size == 4) { - env->pregs[PR_CCS] = - helper_evaluate_flags_sub_4(env, - env->pregs[PR_CCS], - env->cc_src, env->cc_dest, - env->cc_result); - } else { - helper_evaluate_flags(env); - } - break; - default: - switch (env->cc_size) { - case 4: - env->pregs[PR_CCS] = - helper_evaluate_flags_alu_4(env, - env->pregs[PR_CCS], - env->cc_src, env->cc_dest, - env->cc_result); - break; - default: - helper_evaluate_flags(env); - break; - } - break; - } -} diff --git a/target/cris/opcode-cris.h b/target/cris/opcode-cris.h deleted file mode 100644 index 40509c88db1..00000000000 --- a/target/cris/opcode-cris.h +++ /dev/null @@ -1,355 +0,0 @@ -/* cris.h -- Header file for CRIS opcode and register tables. - Copyright (C) 2000, 2001, 2004 Free Software Foundation, Inc. - Contributed by Axis Communications AB, Lund, Sweden. - Originally written for GAS 1.38.1 by Mikael Asker. - Updated, BFDized and GNUified by Hans-Peter Nilsson. - -This file is part of GAS, GDB and the GNU binutils. - -GAS, GDB, and GNU binutils is free software; you can redistribute it -and/or modify it under the terms of the GNU General Public License as -published by the Free Software Foundation; either version 2, or (at your -option) any later version. - -GAS, GDB, and GNU binutils are distributed in the hope that they will be -useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, see . */ - -#ifndef TARGET_CRIS_OPCODE_CRIS_H -#define TARGET_CRIS_OPCODE_CRIS_H - -#if !defined(__STDC__) && !defined(const) -#define const -#endif - - -/* Registers. */ -#define MAX_REG (15) -#define CRIS_REG_SP (14) -#define CRIS_REG_PC (15) - -/* CPU version control of disassembly and assembly of instructions. - May affect how the instruction is assembled, at least the size of - immediate operands. */ -enum cris_insn_version_usage -{ - /* Any version. */ - cris_ver_version_all=0, - - /* Indeterminate (intended for disassembly only, or obsolete). */ - cris_ver_warning, - - /* Only for v0..3 (Etrax 1..4). */ - cris_ver_v0_3, - - /* Only for v3 or higher (ETRAX 4 and beyond). */ - cris_ver_v3p, - - /* Only for v8 (Etrax 100). */ - cris_ver_v8, - - /* Only for v8 or higher (ETRAX 100, ETRAX 100 LX). */ - cris_ver_v8p, - - /* Only for v0..10. FIXME: Not sure what to do with this. */ - cris_ver_sim_v0_10, - - /* Only for v0..10. */ - cris_ver_v0_10, - - /* Only for v3..10. (ETRAX 4, ETRAX 100 and ETRAX 100 LX). */ - cris_ver_v3_10, - - /* Only for v8..10 (ETRAX 100 and ETRAX 100 LX). */ - cris_ver_v8_10, - - /* Only for v10 (ETRAX 100 LX) and same series. */ - cris_ver_v10, - - /* Only for v10 (ETRAX 100 LX) and same series. */ - cris_ver_v10p, - - /* Only for v32 or higher (codename GUINNESS). - Of course some or all these of may change to cris_ver_v32p if/when - there's a new revision. */ - cris_ver_v32p -}; - - -/* Special registers. */ -struct cris_spec_reg -{ - const char *const name; - unsigned int number; - - /* The size of the register. */ - unsigned int reg_size; - - /* What CPU version the special register of that name is implemented - in. If cris_ver_warning, emit an unimplemented-warning. */ - enum cris_insn_version_usage applicable_version; - - /* There might be a specific warning for using a special register - here. */ - const char *const warning; -}; -extern const struct cris_spec_reg cris_spec_regs[]; - - -/* Support registers (kind of special too, but not named as such). */ -struct cris_support_reg -{ - const char *const name; - unsigned int number; -}; -extern const struct cris_support_reg cris_support_regs[]; - -/* Opcode-dependent constants. */ -#define AUTOINCR_BIT (0x04) - -/* Prefixes. */ -#define BDAP_QUICK_OPCODE (0x0100) -#define BDAP_QUICK_Z_BITS (0x0e00) - -#define BIAP_OPCODE (0x0540) -#define BIAP_Z_BITS (0x0a80) - -#define DIP_OPCODE (0x0970) -#define DIP_Z_BITS (0xf280) - -#define BDAP_INDIR_LOW (0x40) -#define BDAP_INDIR_LOW_Z (0x80) -#define BDAP_INDIR_HIGH (0x09) -#define BDAP_INDIR_HIGH_Z (0x02) - -#define BDAP_INDIR_OPCODE (BDAP_INDIR_HIGH * 0x0100 + BDAP_INDIR_LOW) -#define BDAP_INDIR_Z_BITS (BDAP_INDIR_HIGH_Z * 0x100 + BDAP_INDIR_LOW_Z) -#define BDAP_PC_LOW (BDAP_INDIR_LOW + CRIS_REG_PC) -#define BDAP_INCR_HIGH (BDAP_INDIR_HIGH + AUTOINCR_BIT) - -/* No prefix must have this code for its "match" bits in the - opcode-table. "BCC .+2" will do nicely. */ -#define NO_CRIS_PREFIX 0 - -/* Definitions for condition codes. */ -#define CC_CC 0x0 -#define CC_HS 0x0 -#define CC_CS 0x1 -#define CC_LO 0x1 -#define CC_NE 0x2 -#define CC_EQ 0x3 -#define CC_VC 0x4 -#define CC_VS 0x5 -#define CC_PL 0x6 -#define CC_MI 0x7 -#define CC_LS 0x8 -#define CC_HI 0x9 -#define CC_GE 0xA -#define CC_LT 0xB -#define CC_GT 0xC -#define CC_LE 0xD -#define CC_A 0xE -#define CC_EXT 0xF - -/* A table of strings "cc", "cs"... indexed with condition code - values as above. */ -extern const char *const cris_cc_strings[]; - -/* Bcc quick. */ -#define BRANCH_QUICK_LOW (0) -#define BRANCH_QUICK_HIGH (0) -#define BRANCH_QUICK_OPCODE (BRANCH_QUICK_HIGH * 0x0100 + BRANCH_QUICK_LOW) -#define BRANCH_QUICK_Z_BITS (0x0F00) - -/* BA quick. */ -#define BA_QUICK_HIGH (BRANCH_QUICK_HIGH + CC_A * 0x10) -#define BA_QUICK_OPCODE (BA_QUICK_HIGH * 0x100 + BRANCH_QUICK_LOW) - -/* Bcc [PC+]. */ -#define BRANCH_PC_LOW (0xFF) -#define BRANCH_INCR_HIGH (0x0D) -#define BA_PC_INCR_OPCODE \ - ((BRANCH_INCR_HIGH + CC_A * 0x10) * 0x0100 + BRANCH_PC_LOW) - -/* Jump. */ -/* Note that old versions generated special register 8 (in high bits) - and not-that-old versions recognized it as a jump-instruction. - That opcode now belongs to JUMPU. */ -#define JUMP_INDIR_OPCODE (0x0930) -#define JUMP_INDIR_Z_BITS (0xf2c0) -#define JUMP_PC_INCR_OPCODE \ - (JUMP_INDIR_OPCODE + AUTOINCR_BIT * 0x0100 + CRIS_REG_PC) - -#define MOVE_M_TO_PREG_OPCODE 0x0a30 -#define MOVE_M_TO_PREG_ZBITS 0x01c0 - -/* BDAP.D N,PC. */ -#define MOVE_PC_INCR_OPCODE_PREFIX \ - (((BDAP_INCR_HIGH | (CRIS_REG_PC << 4)) << 8) | BDAP_PC_LOW | (2 << 4)) -#define MOVE_PC_INCR_OPCODE_SUFFIX \ - (MOVE_M_TO_PREG_OPCODE | CRIS_REG_PC | (AUTOINCR_BIT << 8)) - -#define JUMP_PC_INCR_OPCODE_V32 (0x0DBF) - -/* BA DWORD (V32). */ -#define BA_DWORD_OPCODE (0x0EBF) - -/* Nop. */ -#define NOP_OPCODE (0x050F) -#define NOP_Z_BITS (0xFFFF ^ NOP_OPCODE) - -#define NOP_OPCODE_V32 (0x05B0) -#define NOP_Z_BITS_V32 (0xFFFF ^ NOP_OPCODE_V32) - -/* For the compatibility mode, let's use "MOVE R0,P0". Doesn't affect - registers or flags. Unfortunately shuts off interrupts for one cycle - for < v32, but there doesn't seem to be any alternative without that - effect. */ -#define NOP_OPCODE_COMMON (0x630) -#define NOP_OPCODE_ZBITS_COMMON (0xffff & ~NOP_OPCODE_COMMON) - -/* LAPC.D */ -#define LAPC_DWORD_OPCODE (0x0D7F) -#define LAPC_DWORD_Z_BITS (0x0fff & ~LAPC_DWORD_OPCODE) - -/* Structure of an opcode table entry. */ -enum cris_imm_oprnd_size_type -{ - /* No size is applicable. */ - SIZE_NONE, - - /* Always 32 bits. */ - SIZE_FIX_32, - - /* Indicated by size of special register. */ - SIZE_SPEC_REG, - - /* Indicated by size field, signed. */ - SIZE_FIELD_SIGNED, - - /* Indicated by size field, unsigned. */ - SIZE_FIELD_UNSIGNED, - - /* Indicated by size field, no sign implied. */ - SIZE_FIELD -}; - -/* For GDB. FIXME: Is this the best way to handle opcode - interpretation? */ -enum cris_op_type -{ - cris_not_implemented_op = 0, - cris_abs_op, - cris_addi_op, - cris_asr_op, - cris_asrq_op, - cris_ax_ei_setf_op, - cris_bdap_prefix, - cris_biap_prefix, - cris_break_op, - cris_btst_nop_op, - cris_clearf_di_op, - cris_dip_prefix, - cris_dstep_logshift_mstep_neg_not_op, - cris_eight_bit_offset_branch_op, - cris_move_mem_to_reg_movem_op, - cris_move_reg_to_mem_movem_op, - cris_move_to_preg_op, - cris_muls_op, - cris_mulu_op, - cris_none_reg_mode_add_sub_cmp_and_or_move_op, - cris_none_reg_mode_clear_test_op, - cris_none_reg_mode_jump_op, - cris_none_reg_mode_move_from_preg_op, - cris_quick_mode_add_sub_op, - cris_quick_mode_and_cmp_move_or_op, - cris_quick_mode_bdap_prefix, - cris_reg_mode_add_sub_cmp_and_or_move_op, - cris_reg_mode_clear_op, - cris_reg_mode_jump_op, - cris_reg_mode_move_from_preg_op, - cris_reg_mode_test_op, - cris_scc_op, - cris_sixteen_bit_offset_branch_op, - cris_three_operand_add_sub_cmp_and_or_op, - cris_three_operand_bound_op, - cris_two_operand_bound_op, - cris_xor_op -}; - -struct cris_opcode -{ - /* The name of the insn. */ - const char *name; - - /* Bits that must be 1 for a match. */ - unsigned int match; - - /* Bits that must be 0 for a match. */ - unsigned int lose; - - /* See the table in "opcodes/cris-opc.c". */ - const char *args; - - /* Nonzero if this is a delayed branch instruction. */ - char delayed; - - /* Size of immediate operands. */ - enum cris_imm_oprnd_size_type imm_oprnd_size; - - /* Indicates which version this insn was first implemented in. */ - enum cris_insn_version_usage applicable_version; - - /* What kind of operation this is. */ - enum cris_op_type op; -}; -extern const struct cris_opcode cris_opcodes[]; - - -/* These macros are for the target-specific flags in disassemble_info - used at disassembly. */ - -/* This insn accesses memory. This flag is more trustworthy than - checking insn_type for "dis_dref" which does not work for - e.g. "JSR [foo]". */ -#define CRIS_DIS_FLAG_MEMREF (1 << 0) - -/* The "target" field holds a register number. */ -#define CRIS_DIS_FLAG_MEM_TARGET_IS_REG (1 << 1) - -/* The "target2" field holds a register number; add it to "target". */ -#define CRIS_DIS_FLAG_MEM_TARGET2_IS_REG (1 << 2) - -/* Yet another add-on: the register in "target2" must be multiplied - by 2 before adding to "target". */ -#define CRIS_DIS_FLAG_MEM_TARGET2_MULT2 (1 << 3) - -/* Yet another add-on: the register in "target2" must be multiplied - by 4 (mutually exclusive with .._MULT2). */ -#define CRIS_DIS_FLAG_MEM_TARGET2_MULT4 (1 << 4) - -/* The register in "target2" is an indirect memory reference (of the - register there), add to "target". Assumed size is dword (mutually - exclusive with .._MULT[24]). */ -#define CRIS_DIS_FLAG_MEM_TARGET2_MEM (1 << 5) - -/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "byte"; - sign-extended before adding to "target". */ -#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE (1 << 6) - -/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "word"; - sign-extended before adding to "target". */ -#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD (1 << 7) - -#endif /* TARGET_CRIS_OPCODE_CRIS_H */ - -/* - * Local variables: - * eval: (c-set-style "gnu") - * indent-tabs-mode: t - * End: - */ diff --git a/target/cris/translate.c b/target/cris/translate.c deleted file mode 100644 index a30c67eb075..00000000000 --- a/target/cris/translate.c +++ /dev/null @@ -1,3252 +0,0 @@ -/* - * CRIS emulation for qemu: main translation routines. - * - * Copyright (c) 2008 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -/* - * FIXME: - * The condition code translation is in need of attention. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "exec/exec-all.h" -#include "tcg/tcg-op.h" -#include "exec/helper-proto.h" -#include "mmu.h" -#include "exec/translator.h" -#include "crisv32-decode.h" -#include "qemu/qemu-print.h" -#include "exec/helper-gen.h" -#include "exec/log.h" - -#define HELPER_H "helper.h" -#include "exec/helper-info.c.inc" -#undef HELPER_H - - -#define DISAS_CRIS 0 -#if DISAS_CRIS -# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) -#else -# define LOG_DIS(...) do { } while (0) -#endif - -#define D(x) -#define BUG() (gen_BUG(dc, __FILE__, __LINE__)) -#define BUG_ON(x) ({if (x) BUG();}) - -/* - * Target-specific is_jmp field values - */ -/* Only pc was modified dynamically */ -#define DISAS_JUMP DISAS_TARGET_0 -/* Cpu state was modified dynamically, including pc */ -#define DISAS_UPDATE DISAS_TARGET_1 -/* Cpu state was modified dynamically, excluding pc -- use npc */ -#define DISAS_UPDATE_NEXT DISAS_TARGET_2 -/* PC update for delayed branch, see cpustate_changed otherwise */ -#define DISAS_DBRANCH DISAS_TARGET_3 - -/* Used by the decoder. */ -#define EXTRACT_FIELD(src, start, end) \ - (((src) >> start) & ((1 << (end - start + 1)) - 1)) - -#define CC_MASK_NZ 0xc -#define CC_MASK_NZV 0xe -#define CC_MASK_NZVC 0xf -#define CC_MASK_RNZV 0x10e - -static TCGv cpu_R[16]; -static TCGv cpu_PR[16]; -static TCGv cc_x; -static TCGv cc_src; -static TCGv cc_dest; -static TCGv cc_result; -static TCGv cc_op; -static TCGv cc_size; -static TCGv cc_mask; - -static TCGv env_btaken; -static TCGv env_btarget; -static TCGv env_pc; - -/* This is the state at translation time. */ -typedef struct DisasContext { - DisasContextBase base; - - CRISCPU *cpu; - target_ulong pc, ppc; - int mem_index; - - /* Decoder. */ - unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc); - uint32_t ir; - uint32_t opcode; - unsigned int op1; - unsigned int op2; - unsigned int zsize, zzsize; - unsigned int mode; - unsigned int postinc; - - unsigned int size; - unsigned int src; - unsigned int dst; - unsigned int cond; - - int update_cc; - int cc_op; - int cc_size; - uint32_t cc_mask; - - int cc_size_uptodate; /* -1 invalid or last written value. */ - - int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */ - int flags_uptodate; /* Whether or not $ccs is up-to-date. */ - int flags_x; - - int clear_x; /* Clear x after this insn? */ - int clear_prefix; /* Clear prefix after this insn? */ - int clear_locked_irq; /* Clear the irq lockout. */ - int cpustate_changed; - unsigned int tb_flags; /* tb dependent flags. */ - -#define JMP_NOJMP 0 -#define JMP_DIRECT 1 -#define JMP_DIRECT_CC 2 -#define JMP_INDIRECT 3 - int jmp; /* 0=nojmp, 1=direct, 2=indirect. */ - uint32_t jmp_pc; - - int delayed_branch; -} DisasContext; - -static void gen_BUG(DisasContext *dc, const char *file, int line) -{ - cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc); -} - -static const char * const regnames_v32[] = -{ - "$r0", "$r1", "$r2", "$r3", - "$r4", "$r5", "$r6", "$r7", - "$r8", "$r9", "$r10", "$r11", - "$r12", "$r13", "$sp", "$acr", -}; - -static const char * const pregnames_v32[] = -{ - "$bz", "$vr", "$pid", "$srs", - "$wz", "$exs", "$eda", "$mof", - "$dz", "$ebp", "$erp", "$srp", - "$nrp", "$ccs", "$usp", "$spc", -}; - -/* We need this table to handle preg-moves with implicit width. */ -static const int preg_sizes[] = { - 1, /* bz. */ - 1, /* vr. */ - 4, /* pid. */ - 1, /* srs. */ - 2, /* wz. */ - 4, 4, 4, - 4, 4, 4, 4, - 4, 4, 4, 4, -}; - -#define t_gen_mov_TN_env(tn, member) \ - tcg_gen_ld_tl(tn, tcg_env, offsetof(CPUCRISState, member)) -#define t_gen_mov_env_TN(member, tn) \ - tcg_gen_st_tl(tn, tcg_env, offsetof(CPUCRISState, member)) -#define t_gen_movi_env_TN(member, c) \ - t_gen_mov_env_TN(member, tcg_constant_tl(c)) - -static inline void t_gen_mov_TN_preg(TCGv tn, int r) -{ - assert(r >= 0 && r <= 15); - if (r == PR_BZ || r == PR_WZ || r == PR_DZ) { - tcg_gen_movi_tl(tn, 0); - } else if (r == PR_VR) { - tcg_gen_movi_tl(tn, 32); - } else { - tcg_gen_mov_tl(tn, cpu_PR[r]); - } -} -static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn) -{ - assert(r >= 0 && r <= 15); - if (r == PR_BZ || r == PR_WZ || r == PR_DZ) { - return; - } else if (r == PR_SRS) { - tcg_gen_andi_tl(cpu_PR[r], tn, 3); - } else { - if (r == PR_PID) { - gen_helper_tlb_flush_pid(tcg_env, tn); - } - if (dc->tb_flags & S_FLAG && r == PR_SPC) { - gen_helper_spc_write(tcg_env, tn); - } else if (r == PR_CCS) { - dc->cpustate_changed = 1; - } - tcg_gen_mov_tl(cpu_PR[r], tn); - } -} - -/* Sign extend at translation time. */ -static int sign_extend(unsigned int val, unsigned int width) -{ - int sval; - - /* LSL. */ - val <<= 31 - width; - sval = val; - /* ASR. */ - sval >>= 31 - width; - return sval; -} - -static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr, - unsigned int size, bool sign) -{ - int r; - - switch (size) { - case 4: - r = translator_ldl(env, &dc->base, addr); - break; - case 2: - r = translator_lduw(env, &dc->base, addr); - if (sign) { - r = (int16_t)r; - } - break; - case 1: - r = translator_ldub(env, &dc->base, addr); - if (sign) { - r = (int8_t)r; - } - break; - default: - g_assert_not_reached(); - } - return r; -} - -static void cris_lock_irq(DisasContext *dc) -{ - dc->clear_locked_irq = 0; - t_gen_movi_env_TN(locked_irq, 1); -} - -static inline void t_gen_raise_exception(uint32_t index) -{ - gen_helper_raise_exception(tcg_env, tcg_constant_i32(index)); -} - -static void t_gen_lsl(TCGv d, TCGv a, TCGv b) -{ - TCGv t0, t_31; - - t0 = tcg_temp_new(); - t_31 = tcg_constant_tl(31); - tcg_gen_shl_tl(d, a, b); - - tcg_gen_sub_tl(t0, t_31, b); - tcg_gen_sar_tl(t0, t0, t_31); - tcg_gen_and_tl(t0, t0, d); - tcg_gen_xor_tl(d, d, t0); -} - -static void t_gen_lsr(TCGv d, TCGv a, TCGv b) -{ - TCGv t0, t_31; - - t0 = tcg_temp_new(); - t_31 = tcg_temp_new(); - tcg_gen_shr_tl(d, a, b); - - tcg_gen_movi_tl(t_31, 31); - tcg_gen_sub_tl(t0, t_31, b); - tcg_gen_sar_tl(t0, t0, t_31); - tcg_gen_and_tl(t0, t0, d); - tcg_gen_xor_tl(d, d, t0); -} - -static void t_gen_asr(TCGv d, TCGv a, TCGv b) -{ - TCGv t0, t_31; - - t0 = tcg_temp_new(); - t_31 = tcg_temp_new(); - tcg_gen_sar_tl(d, a, b); - - tcg_gen_movi_tl(t_31, 31); - tcg_gen_sub_tl(t0, t_31, b); - tcg_gen_sar_tl(t0, t0, t_31); - tcg_gen_or_tl(d, d, t0); -} - -static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b) -{ - TCGv t = tcg_temp_new(); - - /* - * d <<= 1 - * if (d >= s) - * d -= s; - */ - tcg_gen_shli_tl(d, a, 1); - tcg_gen_sub_tl(t, d, b); - tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d); -} - -static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs) -{ - TCGv t; - - /* - * d <<= 1 - * if (n) - * d += s; - */ - t = tcg_temp_new(); - tcg_gen_shli_tl(d, a, 1); - tcg_gen_shli_tl(t, ccs, 31 - 3); - tcg_gen_sari_tl(t, t, 31); - tcg_gen_and_tl(t, t, b); - tcg_gen_add_tl(d, d, t); -} - -/* Extended arithmetic on CRIS. */ -static inline void t_gen_add_flag(TCGv d, int flag) -{ - TCGv c; - - c = tcg_temp_new(); - t_gen_mov_TN_preg(c, PR_CCS); - /* Propagate carry into d. */ - tcg_gen_andi_tl(c, c, 1 << flag); - if (flag) { - tcg_gen_shri_tl(c, c, flag); - } - tcg_gen_add_tl(d, d, c); -} - -static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) -{ - if (dc->flags_x) { - TCGv c = tcg_temp_new(); - - t_gen_mov_TN_preg(c, PR_CCS); - /* C flag is already at bit 0. */ - tcg_gen_andi_tl(c, c, C_FLAG); - tcg_gen_add_tl(d, d, c); - } -} - -static inline void t_gen_subx_carry(DisasContext *dc, TCGv d) -{ - if (dc->flags_x) { - TCGv c = tcg_temp_new(); - - t_gen_mov_TN_preg(c, PR_CCS); - /* C flag is already at bit 0. */ - tcg_gen_andi_tl(c, c, C_FLAG); - tcg_gen_sub_tl(d, d, c); - } -} - -/* Swap the two bytes within each half word of the s operand. - T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */ -static inline void t_gen_swapb(TCGv d, TCGv s) -{ - TCGv t, org_s; - - t = tcg_temp_new(); - org_s = tcg_temp_new(); - - /* d and s may refer to the same object. */ - tcg_gen_mov_tl(org_s, s); - tcg_gen_shli_tl(t, org_s, 8); - tcg_gen_andi_tl(d, t, 0xff00ff00); - tcg_gen_shri_tl(t, org_s, 8); - tcg_gen_andi_tl(t, t, 0x00ff00ff); - tcg_gen_or_tl(d, d, t); -} - -/* Swap the halfwords of the s operand. */ -static inline void t_gen_swapw(TCGv d, TCGv s) -{ - TCGv t; - /* d and s refer the same object. */ - t = tcg_temp_new(); - tcg_gen_mov_tl(t, s); - tcg_gen_shli_tl(d, t, 16); - tcg_gen_shri_tl(t, t, 16); - tcg_gen_or_tl(d, d, t); -} - -/* - * Reverse the bits within each byte. - * - * T0 = ((T0 << 7) & 0x80808080) - * | ((T0 << 5) & 0x40404040) - * | ((T0 << 3) & 0x20202020) - * | ((T0 << 1) & 0x10101010) - * | ((T0 >> 1) & 0x08080808) - * | ((T0 >> 3) & 0x04040404) - * | ((T0 >> 5) & 0x02020202) - * | ((T0 >> 7) & 0x01010101); - */ -static void t_gen_swapr(TCGv d, TCGv s) -{ - static const struct { - int shift; /* LSL when positive, LSR when negative. */ - uint32_t mask; - } bitrev[] = { - {7, 0x80808080}, - {5, 0x40404040}, - {3, 0x20202020}, - {1, 0x10101010}, - {-1, 0x08080808}, - {-3, 0x04040404}, - {-5, 0x02020202}, - {-7, 0x01010101} - }; - int i; - TCGv t, org_s; - - /* d and s refer the same object. */ - t = tcg_temp_new(); - org_s = tcg_temp_new(); - tcg_gen_mov_tl(org_s, s); - - tcg_gen_shli_tl(t, org_s, bitrev[0].shift); - tcg_gen_andi_tl(d, t, bitrev[0].mask); - for (i = 1; i < ARRAY_SIZE(bitrev); i++) { - if (bitrev[i].shift >= 0) { - tcg_gen_shli_tl(t, org_s, bitrev[i].shift); - } else { - tcg_gen_shri_tl(t, org_s, -bitrev[i].shift); - } - tcg_gen_andi_tl(t, t, bitrev[i].mask); - tcg_gen_or_tl(d, d, t); - } -} - -static bool use_goto_tb(DisasContext *dc, target_ulong dest) -{ - return translator_use_goto_tb(&dc->base, dest); -} - -static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) -{ - if (use_goto_tb(dc, dest)) { - tcg_gen_goto_tb(n); - tcg_gen_movi_tl(env_pc, dest); - tcg_gen_exit_tb(dc->base.tb, n); - } else { - tcg_gen_movi_tl(env_pc, dest); - tcg_gen_lookup_and_goto_ptr(); - } -} - -static inline void cris_clear_x_flag(DisasContext *dc) -{ - if (dc->flags_x) { - dc->flags_uptodate = 0; - } - dc->flags_x = 0; -} - -static void cris_flush_cc_state(DisasContext *dc) -{ - if (dc->cc_size_uptodate != dc->cc_size) { - tcg_gen_movi_tl(cc_size, dc->cc_size); - dc->cc_size_uptodate = dc->cc_size; - } - tcg_gen_movi_tl(cc_op, dc->cc_op); - tcg_gen_movi_tl(cc_mask, dc->cc_mask); -} - -static void cris_evaluate_flags(DisasContext *dc) -{ - if (dc->flags_uptodate) { - return; - } - - cris_flush_cc_state(dc); - - switch (dc->cc_op) { - case CC_OP_MCP: - gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], tcg_env, - cpu_PR[PR_CCS], cc_src, - cc_dest, cc_result); - break; - case CC_OP_MULS: - gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], tcg_env, - cpu_PR[PR_CCS], cc_result, - cpu_PR[PR_MOF]); - break; - case CC_OP_MULU: - gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], tcg_env, - cpu_PR[PR_CCS], cc_result, - cpu_PR[PR_MOF]); - break; - case CC_OP_MOVE: - case CC_OP_AND: - case CC_OP_OR: - case CC_OP_XOR: - case CC_OP_ASR: - case CC_OP_LSR: - case CC_OP_LSL: - switch (dc->cc_size) { - case 4: - gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS], - tcg_env, cpu_PR[PR_CCS], cc_result); - break; - case 2: - gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS], - tcg_env, cpu_PR[PR_CCS], cc_result); - break; - default: - gen_helper_evaluate_flags(tcg_env); - break; - } - break; - case CC_OP_FLAGS: - /* live. */ - break; - case CC_OP_SUB: - case CC_OP_CMP: - if (dc->cc_size == 4) { - gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], tcg_env, - cpu_PR[PR_CCS], cc_src, cc_dest, cc_result); - } else { - gen_helper_evaluate_flags(tcg_env); - } - - break; - default: - switch (dc->cc_size) { - case 4: - gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], tcg_env, - cpu_PR[PR_CCS], cc_src, cc_dest, cc_result); - break; - default: - gen_helper_evaluate_flags(tcg_env); - break; - } - break; - } - - if (dc->flags_x) { - tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG); - } else if (dc->cc_op == CC_OP_FLAGS) { - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG); - } - dc->flags_uptodate = 1; -} - -static void cris_cc_mask(DisasContext *dc, unsigned int mask) -{ - uint32_t ovl; - - if (!mask) { - dc->update_cc = 0; - return; - } - - /* Check if we need to evaluate the condition codes due to - CC overlaying. */ - ovl = (dc->cc_mask ^ mask) & ~mask; - if (ovl) { - /* TODO: optimize this case. It trigs all the time. */ - cris_evaluate_flags(dc); - } - dc->cc_mask = mask; - dc->update_cc = 1; -} - -static void cris_update_cc_op(DisasContext *dc, int op, int size) -{ - dc->cc_op = op; - dc->cc_size = size; - dc->flags_uptodate = 0; -} - -static inline void cris_update_cc_x(DisasContext *dc) -{ - /* Save the x flag state at the time of the cc snapshot. */ - if (dc->cc_x_uptodate == (2 | dc->flags_x)) { - return; - } - tcg_gen_movi_tl(cc_x, dc->flags_x); - dc->cc_x_uptodate = 2 | dc->flags_x; -} - -/* Update cc prior to executing ALU op. Needs source operands untouched. */ -static void cris_pre_alu_update_cc(DisasContext *dc, int op, - TCGv dst, TCGv src, int size) -{ - if (dc->update_cc) { - cris_update_cc_op(dc, op, size); - tcg_gen_mov_tl(cc_src, src); - - if (op != CC_OP_MOVE - && op != CC_OP_AND - && op != CC_OP_OR - && op != CC_OP_XOR - && op != CC_OP_ASR - && op != CC_OP_LSR - && op != CC_OP_LSL) { - tcg_gen_mov_tl(cc_dest, dst); - } - - cris_update_cc_x(dc); - } -} - -/* Update cc after executing ALU op. needs the result. */ -static inline void cris_update_result(DisasContext *dc, TCGv res) -{ - if (dc->update_cc) { - tcg_gen_mov_tl(cc_result, res); - } -} - -/* Returns one if the write back stage should execute. */ -static void cris_alu_op_exec(DisasContext *dc, int op, - TCGv dst, TCGv a, TCGv b, int size) -{ - /* Emit the ALU insns. */ - switch (op) { - case CC_OP_ADD: - tcg_gen_add_tl(dst, a, b); - /* Extended arithmetic. */ - t_gen_addx_carry(dc, dst); - break; - case CC_OP_ADDC: - tcg_gen_add_tl(dst, a, b); - t_gen_add_flag(dst, 0); /* C_FLAG. */ - break; - case CC_OP_MCP: - tcg_gen_add_tl(dst, a, b); - t_gen_add_flag(dst, 8); /* R_FLAG. */ - break; - case CC_OP_SUB: - tcg_gen_sub_tl(dst, a, b); - /* Extended arithmetic. */ - t_gen_subx_carry(dc, dst); - break; - case CC_OP_MOVE: - tcg_gen_mov_tl(dst, b); - break; - case CC_OP_OR: - tcg_gen_or_tl(dst, a, b); - break; - case CC_OP_AND: - tcg_gen_and_tl(dst, a, b); - break; - case CC_OP_XOR: - tcg_gen_xor_tl(dst, a, b); - break; - case CC_OP_LSL: - t_gen_lsl(dst, a, b); - break; - case CC_OP_LSR: - t_gen_lsr(dst, a, b); - break; - case CC_OP_ASR: - t_gen_asr(dst, a, b); - break; - case CC_OP_NEG: - tcg_gen_neg_tl(dst, b); - /* Extended arithmetic. */ - t_gen_subx_carry(dc, dst); - break; - case CC_OP_LZ: - tcg_gen_clzi_tl(dst, b, TARGET_LONG_BITS); - break; - case CC_OP_MULS: - tcg_gen_muls2_tl(dst, cpu_PR[PR_MOF], a, b); - break; - case CC_OP_MULU: - tcg_gen_mulu2_tl(dst, cpu_PR[PR_MOF], a, b); - break; - case CC_OP_DSTEP: - t_gen_cris_dstep(dst, a, b); - break; - case CC_OP_MSTEP: - t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]); - break; - case CC_OP_BOUND: - tcg_gen_movcond_tl(TCG_COND_LEU, dst, a, b, a, b); - break; - case CC_OP_CMP: - tcg_gen_sub_tl(dst, a, b); - /* Extended arithmetic. */ - t_gen_subx_carry(dc, dst); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "illegal ALU op.\n"); - BUG(); - break; - } - - if (size == 1) { - tcg_gen_andi_tl(dst, dst, 0xff); - } else if (size == 2) { - tcg_gen_andi_tl(dst, dst, 0xffff); - } -} - -static void cris_alu(DisasContext *dc, int op, - TCGv d, TCGv op_a, TCGv op_b, int size) -{ - TCGv tmp; - int writeback; - - writeback = 1; - - if (op == CC_OP_CMP) { - tmp = tcg_temp_new(); - writeback = 0; - } else if (size == 4) { - tmp = d; - writeback = 0; - } else { - tmp = tcg_temp_new(); - } - - - cris_pre_alu_update_cc(dc, op, op_a, op_b, size); - cris_alu_op_exec(dc, op, tmp, op_a, op_b, size); - cris_update_result(dc, tmp); - - /* Writeback. */ - if (writeback) { - if (size == 1) { - tcg_gen_andi_tl(d, d, ~0xff); - } else { - tcg_gen_andi_tl(d, d, ~0xffff); - } - tcg_gen_or_tl(d, d, tmp); - } -} - -static int arith_cc(DisasContext *dc) -{ - if (dc->update_cc) { - switch (dc->cc_op) { - case CC_OP_ADDC: return 1; - case CC_OP_ADD: return 1; - case CC_OP_SUB: return 1; - case CC_OP_DSTEP: return 1; - case CC_OP_LSL: return 1; - case CC_OP_LSR: return 1; - case CC_OP_ASR: return 1; - case CC_OP_CMP: return 1; - case CC_OP_NEG: return 1; - case CC_OP_OR: return 1; - case CC_OP_AND: return 1; - case CC_OP_XOR: return 1; - case CC_OP_MULU: return 1; - case CC_OP_MULS: return 1; - default: - return 0; - } - } - return 0; -} - -static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) -{ - int arith_opt, move_opt; - - /* TODO: optimize more condition codes. */ - - /* - * If the flags are live, we've gotta look into the bits of CCS. - * Otherwise, if we just did an arithmetic operation we try to - * evaluate the condition code faster. - * - * When this function is done, T0 should be non-zero if the condition - * code is true. - */ - arith_opt = arith_cc(dc) && !dc->flags_uptodate; - move_opt = (dc->cc_op == CC_OP_MOVE); - switch (cond) { - case CC_EQ: - if ((arith_opt || move_opt) - && dc->cc_x_uptodate != (2 | X_FLAG)) { - tcg_gen_setcondi_tl(TCG_COND_EQ, cc, cc_result, 0); - } else { - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, - cpu_PR[PR_CCS], Z_FLAG); - } - break; - case CC_NE: - if ((arith_opt || move_opt) - && dc->cc_x_uptodate != (2 | X_FLAG)) { - tcg_gen_mov_tl(cc, cc_result); - } else { - cris_evaluate_flags(dc); - tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], - Z_FLAG); - tcg_gen_andi_tl(cc, cc, Z_FLAG); - } - break; - case CC_CS: - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG); - break; - case CC_CC: - cris_evaluate_flags(dc); - tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG); - tcg_gen_andi_tl(cc, cc, C_FLAG); - break; - case CC_VS: - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG); - break; - case CC_VC: - cris_evaluate_flags(dc); - tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], - V_FLAG); - tcg_gen_andi_tl(cc, cc, V_FLAG); - break; - case CC_PL: - if (arith_opt || move_opt) { - int bits = 31; - - if (dc->cc_size == 1) { - bits = 7; - } else if (dc->cc_size == 2) { - bits = 15; - } - - tcg_gen_shri_tl(cc, cc_result, bits); - tcg_gen_xori_tl(cc, cc, 1); - } else { - cris_evaluate_flags(dc); - tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], - N_FLAG); - tcg_gen_andi_tl(cc, cc, N_FLAG); - } - break; - case CC_MI: - if (arith_opt || move_opt) { - int bits = 31; - - if (dc->cc_size == 1) { - bits = 7; - } else if (dc->cc_size == 2) { - bits = 15; - } - - tcg_gen_shri_tl(cc, cc_result, bits); - tcg_gen_andi_tl(cc, cc, 1); - } else { - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], - N_FLAG); - } - break; - case CC_LS: - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], - C_FLAG | Z_FLAG); - break; - case CC_HI: - cris_evaluate_flags(dc); - { - TCGv tmp; - - tmp = tcg_temp_new(); - tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS], - C_FLAG | Z_FLAG); - /* Overlay the C flag on top of the Z. */ - tcg_gen_shli_tl(cc, tmp, 2); - tcg_gen_and_tl(cc, tmp, cc); - tcg_gen_andi_tl(cc, cc, Z_FLAG); - } - break; - case CC_GE: - cris_evaluate_flags(dc); - /* Overlay the V flag on top of the N. */ - tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2); - tcg_gen_xor_tl(cc, - cpu_PR[PR_CCS], cc); - tcg_gen_andi_tl(cc, cc, N_FLAG); - tcg_gen_xori_tl(cc, cc, N_FLAG); - break; - case CC_LT: - cris_evaluate_flags(dc); - /* Overlay the V flag on top of the N. */ - tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2); - tcg_gen_xor_tl(cc, - cpu_PR[PR_CCS], cc); - tcg_gen_andi_tl(cc, cc, N_FLAG); - break; - case CC_GT: - cris_evaluate_flags(dc); - { - TCGv n, z; - - n = tcg_temp_new(); - z = tcg_temp_new(); - - /* To avoid a shift we overlay everything on - the V flag. */ - tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2); - tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1); - /* invert Z. */ - tcg_gen_xori_tl(z, z, 2); - - tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); - tcg_gen_xori_tl(n, n, 2); - tcg_gen_and_tl(cc, z, n); - tcg_gen_andi_tl(cc, cc, 2); - } - break; - case CC_LE: - cris_evaluate_flags(dc); - { - TCGv n, z; - - n = tcg_temp_new(); - z = tcg_temp_new(); - - /* To avoid a shift we overlay everything on - the V flag. */ - tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2); - tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1); - - tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); - tcg_gen_or_tl(cc, z, n); - tcg_gen_andi_tl(cc, cc, 2); - } - break; - case CC_P: - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG); - break; - case CC_A: - tcg_gen_movi_tl(cc, 1); - break; - default: - BUG(); - break; - }; -} - -static void cris_store_direct_jmp(DisasContext *dc) -{ - /* Store the direct jmp state into the cpu-state. */ - if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) { - if (dc->jmp == JMP_DIRECT) { - tcg_gen_movi_tl(env_btaken, 1); - } - tcg_gen_movi_tl(env_btarget, dc->jmp_pc); - dc->jmp = JMP_INDIRECT; - } -} - -static void cris_prepare_cc_branch (DisasContext *dc, - int offset, int cond) -{ - /* This helps us re-schedule the micro-code to insns in delay-slots - before the actual jump. */ - dc->delayed_branch = 2; - dc->jmp = JMP_DIRECT_CC; - dc->jmp_pc = dc->pc + offset; - - gen_tst_cc(dc, env_btaken, cond); - tcg_gen_movi_tl(env_btarget, dc->jmp_pc); -} - - -/* jumps, when the dest is in a live reg for example. Direct should be set - when the dest addr is constant to allow tb chaining. */ -static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type) -{ - /* This helps us re-schedule the micro-code to insns in delay-slots - before the actual jump. */ - dc->delayed_branch = 2; - dc->jmp = type; - if (type == JMP_INDIRECT) { - tcg_gen_movi_tl(env_btaken, 1); - } -} - -static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr) -{ - /* If we get a fault on a delayslot we must keep the jmp state in - the cpu-state to be able to re-execute the jmp. */ - if (dc->delayed_branch == 1) { - cris_store_direct_jmp(dc); - } - - tcg_gen_qemu_ld_i64(dst, addr, dc->mem_index, MO_TEUQ); -} - -static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, - unsigned int size, int sign) -{ - /* If we get a fault on a delayslot we must keep the jmp state in - the cpu-state to be able to re-execute the jmp. */ - if (dc->delayed_branch == 1) { - cris_store_direct_jmp(dc); - } - - tcg_gen_qemu_ld_tl(dst, addr, dc->mem_index, - MO_TE + ctz32(size) + (sign ? MO_SIGN : 0)); -} - -static void gen_store (DisasContext *dc, TCGv addr, TCGv val, - unsigned int size) -{ - /* If we get a fault on a delayslot we must keep the jmp state in - the cpu-state to be able to re-execute the jmp. */ - if (dc->delayed_branch == 1) { - cris_store_direct_jmp(dc); - } - - - /* Conditional writes. We only support the kind were X and P are known - at translation time. */ - if (dc->flags_x && (dc->tb_flags & P_FLAG)) { - dc->postinc = 0; - cris_evaluate_flags(dc); - tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG); - return; - } - - tcg_gen_qemu_st_tl(val, addr, dc->mem_index, MO_TE + ctz32(size)); - - if (dc->flags_x) { - cris_evaluate_flags(dc); - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG); - } -} - -static inline void t_gen_sext(TCGv d, TCGv s, int size) -{ - if (size == 1) { - tcg_gen_ext8s_i32(d, s); - } else if (size == 2) { - tcg_gen_ext16s_i32(d, s); - } else { - tcg_gen_mov_tl(d, s); - } -} - -static inline void t_gen_zext(TCGv d, TCGv s, int size) -{ - if (size == 1) { - tcg_gen_ext8u_i32(d, s); - } else if (size == 2) { - tcg_gen_ext16u_i32(d, s); - } else { - tcg_gen_mov_tl(d, s); - } -} - -#if DISAS_CRIS -static char memsize_char(int size) -{ - switch (size) { - case 1: return 'b'; - case 2: return 'w'; - case 4: return 'd'; - default: - return 'x'; - } -} -#endif - -static inline unsigned int memsize_z(DisasContext *dc) -{ - return dc->zsize + 1; -} - -static inline unsigned int memsize_zz(DisasContext *dc) -{ - switch (dc->zzsize) { - case 0: return 1; - case 1: return 2; - default: - return 4; - } -} - -static inline void do_postinc (DisasContext *dc, int size) -{ - if (dc->postinc) { - tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size); - } -} - -static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd, - int size, int s_ext, TCGv dst) -{ - if (s_ext) { - t_gen_sext(dst, cpu_R[rs], size); - } else { - t_gen_zext(dst, cpu_R[rs], size); - } -} - -/* Prepare T0 and T1 for a register alu operation. - s_ext decides if the operand1 should be sign-extended or zero-extended when - needed. */ -static void dec_prep_alu_r(DisasContext *dc, int rs, int rd, - int size, int s_ext, TCGv dst, TCGv src) -{ - dec_prep_move_r(dc, rs, rd, size, s_ext, src); - - if (s_ext) { - t_gen_sext(dst, cpu_R[rd], size); - } else { - t_gen_zext(dst, cpu_R[rd], size); - } -} - -static int dec_prep_move_m(CPUCRISState *env, DisasContext *dc, - int s_ext, int memsize, TCGv dst) -{ - unsigned int rs; - uint32_t imm; - int is_imm; - int insn_len = 2; - - rs = dc->op1; - is_imm = rs == 15 && dc->postinc; - - /* Load [$rs] onto T1. */ - if (is_imm) { - insn_len = 2 + memsize; - if (memsize == 1) { - insn_len++; - } - - imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext); - tcg_gen_movi_tl(dst, imm); - dc->postinc = 0; - } else { - cris_flush_cc_state(dc); - gen_load(dc, dst, cpu_R[rs], memsize, 0); - if (s_ext) { - t_gen_sext(dst, dst, memsize); - } else { - t_gen_zext(dst, dst, memsize); - } - } - return insn_len; -} - -/* Prepare T0 and T1 for a memory + alu operation. - s_ext decides if the operand1 should be sign-extended or zero-extended when - needed. */ -static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc, - int s_ext, int memsize, TCGv dst, TCGv src) -{ - int insn_len; - - insn_len = dec_prep_move_m(env, dc, s_ext, memsize, src); - tcg_gen_mov_tl(dst, cpu_R[dc->op2]); - return insn_len; -} - -#if DISAS_CRIS -static const char *cc_name(int cc) -{ - static const char * const cc_names[16] = { - "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi", - "ls", "hi", "ge", "lt", "gt", "le", "a", "p" - }; - assert(cc < 16); - return cc_names[cc]; -} -#endif - -/* Start of insn decoders. */ - -static int dec_bccq(CPUCRISState *env, DisasContext *dc) -{ - int32_t offset; - int sign; - uint32_t cond = dc->op2; - - offset = EXTRACT_FIELD(dc->ir, 1, 7); - sign = EXTRACT_FIELD(dc->ir, 0, 0); - - offset *= 2; - offset |= sign << 8; - offset = sign_extend(offset, 8); - - LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset); - - /* op2 holds the condition-code. */ - cris_cc_mask(dc, 0); - cris_prepare_cc_branch(dc, offset, cond); - return 2; -} -static int dec_addoq(CPUCRISState *env, DisasContext *dc) -{ - int32_t imm; - - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7); - imm = sign_extend(dc->op1, 7); - - LOG_DIS("addoq %d, $r%u\n", imm, dc->op2); - cris_cc_mask(dc, 0); - /* Fetch register operand, */ - tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm); - - return 2; -} -static int dec_addq(CPUCRISState *env, DisasContext *dc) -{ - TCGv c; - LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2); - - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - - cris_cc_mask(dc, CC_MASK_NZVC); - - c = tcg_constant_tl(dc->op1); - cris_alu(dc, CC_OP_ADD, - cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - return 2; -} -static int dec_moveq(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - imm = sign_extend(dc->op1, 5); - LOG_DIS("moveq %d, $r%u\n", imm, dc->op2); - - tcg_gen_movi_tl(cpu_R[dc->op2], imm); - return 2; -} -static int dec_subq(CPUCRISState *env, DisasContext *dc) -{ - TCGv c; - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - - LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(dc->op1); - cris_alu(dc, CC_OP_SUB, - cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - return 2; -} -static int dec_cmpq(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - TCGv c; - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - imm = sign_extend(dc->op1, 5); - - LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2); - cris_cc_mask(dc, CC_MASK_NZVC); - - c = tcg_constant_tl(imm); - cris_alu(dc, CC_OP_CMP, - cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - return 2; -} -static int dec_andq(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - TCGv c; - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - imm = sign_extend(dc->op1, 5); - - LOG_DIS("andq %d, $r%d\n", imm, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - - c = tcg_constant_tl(imm); - cris_alu(dc, CC_OP_AND, - cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - return 2; -} -static int dec_orq(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - TCGv c; - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5); - imm = sign_extend(dc->op1, 5); - LOG_DIS("orq %d, $r%d\n", imm, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - - c = tcg_constant_tl(imm); - cris_alu(dc, CC_OP_OR, - cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - return 2; -} -static int dec_btstq(CPUCRISState *env, DisasContext *dc) -{ - TCGv c; - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4); - LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - c = tcg_constant_tl(dc->op1); - cris_evaluate_flags(dc); - gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2], - c, cpu_PR[PR_CCS]); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - dc->flags_uptodate = 1; - return 2; -} -static int dec_asrq(CPUCRISState *env, DisasContext *dc) -{ - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4); - LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - - tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], - cpu_R[dc->op2], cpu_R[dc->op2], 4); - return 2; -} -static int dec_lslq(CPUCRISState *env, DisasContext *dc) -{ - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4); - LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - - tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1); - - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], - cpu_R[dc->op2], cpu_R[dc->op2], 4); - return 2; -} -static int dec_lsrq(CPUCRISState *env, DisasContext *dc) -{ - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4); - LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - - tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], - cpu_R[dc->op2], cpu_R[dc->op2], 4); - return 2; -} - -static int dec_move_r(CPUCRISState *env, DisasContext *dc) -{ - int size = memsize_zz(dc); - - LOG_DIS("move.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - if (size == 4) { - dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_update_cc_op(dc, CC_OP_MOVE, 4); - cris_update_cc_x(dc); - cris_update_result(dc, cpu_R[dc->op2]); - } else { - TCGv t0; - - t0 = tcg_temp_new(); - dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], - cpu_R[dc->op2], t0, size); - } - return 2; -} - -static int dec_scc_r(CPUCRISState *env, DisasContext *dc) -{ - int cond = dc->op2; - - LOG_DIS("s%s $r%u\n", - cc_name(cond), dc->op1); - - gen_tst_cc(dc, cpu_R[dc->op1], cond); - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->op1], cpu_R[dc->op1], 0); - - cris_cc_mask(dc, 0); - return 2; -} - -static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t) -{ - if (size == 4) { - t[0] = cpu_R[dc->op2]; - t[1] = cpu_R[dc->op1]; - } else { - t[0] = tcg_temp_new(); - t[1] = tcg_temp_new(); - } -} - -static int dec_and_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("and.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_lz_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - LOG_DIS("lz $r%u, $r%u\n", - dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - t0 = tcg_temp_new(); - dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0); - cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -static int dec_lsl_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("lsl.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - tcg_gen_andi_tl(t[1], t[1], 63); - cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_lsr_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("lsr.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - tcg_gen_andi_tl(t[1], t[1], 63); - cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_asr_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("asr.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); - tcg_gen_andi_tl(t[1], t[1], 63); - cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_muls_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("muls.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZV); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); - - cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4); - return 2; -} - -static int dec_mulu_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - - LOG_DIS("mulu.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZV); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - - cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4); - return 2; -} - - -static int dec_dstep_r(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_DSTEP, - cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4); - return 2; -} - -static int dec_xor_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("xor.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - BUG_ON(size != 4); /* xor is dword. */ - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - - cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4); - return 2; -} - -static int dec_bound_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv l0; - int size = memsize_zz(dc); - LOG_DIS("bound.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - l0 = tcg_temp_new(); - dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0); - cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4); - return 2; -} - -static int dec_cmp_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("cmp.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - - cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_abs_r(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("abs $r%u, $r%u\n", - dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - - tcg_gen_abs_tl(cpu_R[dc->op2], cpu_R[dc->op1]); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); - return 2; -} - -static int dec_add_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("add.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - - cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_addc_r(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("addc $r%u, $r%u\n", - dc->op1, dc->op2); - cris_evaluate_flags(dc); - - /* Set for this insn. */ - dc->flags_x = X_FLAG; - - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_ADDC, - cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4); - return 2; -} - -static int dec_mcp_r(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("mcp $p%u, $r%u\n", - dc->op2, dc->op1); - cris_evaluate_flags(dc); - cris_cc_mask(dc, CC_MASK_RNZV); - cris_alu(dc, CC_OP_MCP, - cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4); - return 2; -} - -#if DISAS_CRIS -static char * swapmode_name(int mode, char *modename) { - int i = 0; - if (mode & 8) { - modename[i++] = 'n'; - } - if (mode & 4) { - modename[i++] = 'w'; - } - if (mode & 2) { - modename[i++] = 'b'; - } - if (mode & 1) { - modename[i++] = 'r'; - } - modename[i++] = 0; - return modename; -} -#endif - -static int dec_swap_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; -#if DISAS_CRIS - char modename[4]; -#endif - LOG_DIS("swap%s $r%u\n", - swapmode_name(dc->op2, modename), dc->op1); - - cris_cc_mask(dc, CC_MASK_NZ); - t0 = tcg_temp_new(); - tcg_gen_mov_tl(t0, cpu_R[dc->op1]); - if (dc->op2 & 8) { - tcg_gen_not_tl(t0, t0); - } - if (dc->op2 & 4) { - t_gen_swapw(t0, t0); - } - if (dc->op2 & 2) { - t_gen_swapb(t0, t0); - } - if (dc->op2 & 1) { - t_gen_swapr(t0, t0); - } - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4); - return 2; -} - -static int dec_or_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("or.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_addi_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - LOG_DIS("addi.%c $r%u, $r%u\n", - memsize_char(memsize_zz(dc)), dc->op2, dc->op1); - cris_cc_mask(dc, 0); - t0 = tcg_temp_new(); - tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); - tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0); - return 2; -} - -static int dec_addi_acr(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - LOG_DIS("addi.%c $r%u, $r%u, $acr\n", - memsize_char(memsize_zz(dc)), dc->op2, dc->op1); - cris_cc_mask(dc, 0); - t0 = tcg_temp_new(); - tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); - tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0); - return 2; -} - -static int dec_neg_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("neg.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - - cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -static int dec_btst_r(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("btst $r%u, $r%u\n", - dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZ); - cris_evaluate_flags(dc); - gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->op2], - cpu_R[dc->op1], cpu_PR[PR_CCS]); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], - cpu_R[dc->op2], cpu_R[dc->op2], 4); - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - dc->flags_uptodate = 1; - return 2; -} - -static int dec_sub_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int size = memsize_zz(dc); - LOG_DIS("sub.%c $r%u, $r%u\n", - memsize_char(size), dc->op1, dc->op2); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu_alloc_temps(dc, size, t); - dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); - cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size); - return 2; -} - -/* Zero extension. From size to dword. */ -static int dec_movu_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("movu.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - t0 = tcg_temp_new(); - dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -/* Sign extension. From size to dword. */ -static int dec_movs_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("movs.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZ); - t0 = tcg_temp_new(); - /* Size can only be qi or hi. */ - t_gen_sext(t0, cpu_R[dc->op1], size); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], cpu_R[dc->op1], t0, 4); - return 2; -} - -/* zero extension. From size to dword. */ -static int dec_addu_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("addu.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZVC); - t0 = tcg_temp_new(); - /* Size can only be qi or hi. */ - t_gen_zext(t0, cpu_R[dc->op1], size); - cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -/* Sign extension. From size to dword. */ -static int dec_adds_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("adds.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZVC); - t0 = tcg_temp_new(); - /* Size can only be qi or hi. */ - t_gen_sext(t0, cpu_R[dc->op1], size); - cris_alu(dc, CC_OP_ADD, - cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -/* Zero extension. From size to dword. */ -static int dec_subu_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("subu.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZVC); - t0 = tcg_temp_new(); - /* Size can only be qi or hi. */ - t_gen_zext(t0, cpu_R[dc->op1], size); - cris_alu(dc, CC_OP_SUB, - cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -/* Sign extension. From size to dword. */ -static int dec_subs_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int size = memsize_z(dc); - LOG_DIS("subs.%c $r%u, $r%u\n", - memsize_char(size), - dc->op1, dc->op2); - - cris_cc_mask(dc, CC_MASK_NZVC); - t0 = tcg_temp_new(); - /* Size can only be qi or hi. */ - t_gen_sext(t0, cpu_R[dc->op1], size); - cris_alu(dc, CC_OP_SUB, - cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - return 2; -} - -static int dec_setclrf(CPUCRISState *env, DisasContext *dc) -{ - uint32_t flags; - int set = (~dc->opcode >> 2) & 1; - - - flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4) - | EXTRACT_FIELD(dc->ir, 0, 3); - if (set && flags == 0) { - LOG_DIS("nop\n"); - return 2; - } else if (!set && (flags & 0x20)) { - LOG_DIS("di\n"); - } else { - LOG_DIS("%sf %x\n", set ? "set" : "clr", flags); - } - - /* User space is not allowed to touch these. Silently ignore. */ - if (dc->tb_flags & U_FLAG) { - flags &= ~(S_FLAG | I_FLAG | U_FLAG); - } - - if (flags & X_FLAG) { - if (set) { - dc->flags_x = X_FLAG; - } else { - dc->flags_x = 0; - } - } - - /* Break the TB if any of the SPI flag changes. */ - if (flags & (P_FLAG | S_FLAG)) { - tcg_gen_movi_tl(env_pc, dc->pc + 2); - dc->base.is_jmp = DISAS_UPDATE; - dc->cpustate_changed = 1; - } - - /* For the I flag, only act on posedge. */ - if ((flags & I_FLAG)) { - tcg_gen_movi_tl(env_pc, dc->pc + 2); - dc->base.is_jmp = DISAS_UPDATE; - dc->cpustate_changed = 1; - } - - - /* Simply decode the flags. */ - cris_evaluate_flags(dc); - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - cris_update_cc_x(dc); - tcg_gen_movi_tl(cc_op, dc->cc_op); - - if (set) { - if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) { - /* Enter user mode. */ - t_gen_mov_env_TN(ksp, cpu_R[R_SP]); - tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]); - dc->cpustate_changed = 1; - } - tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags); - } else { - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags); - } - - dc->flags_uptodate = 1; - dc->clear_x = 0; - return 2; -} - -static int dec_move_rs(CPUCRISState *env, DisasContext *dc) -{ - TCGv c2, c1; - LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2); - c1 = tcg_constant_tl(dc->op1); - c2 = tcg_constant_tl(dc->op2); - cris_cc_mask(dc, 0); - gen_helper_movl_sreg_reg(tcg_env, c2, c1); - return 2; -} -static int dec_move_sr(CPUCRISState *env, DisasContext *dc) -{ - TCGv c2, c1; - LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1); - c1 = tcg_constant_tl(dc->op1); - c2 = tcg_constant_tl(dc->op2); - cris_cc_mask(dc, 0); - gen_helper_movl_reg_sreg(tcg_env, c1, c2); - return 2; -} - -static int dec_move_rp(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2); - cris_cc_mask(dc, 0); - - t[0] = tcg_temp_new(); - if (dc->op2 == PR_CCS) { - cris_evaluate_flags(dc); - tcg_gen_mov_tl(t[0], cpu_R[dc->op1]); - if (dc->tb_flags & U_FLAG) { - t[1] = tcg_temp_new(); - /* User space is not allowed to touch all flags. */ - tcg_gen_andi_tl(t[0], t[0], 0x39f); - tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f); - tcg_gen_or_tl(t[0], t[1], t[0]); - } - } else { - tcg_gen_mov_tl(t[0], cpu_R[dc->op1]); - } - - t_gen_mov_preg_TN(dc, dc->op2, t[0]); - if (dc->op2 == PR_CCS) { - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - dc->flags_uptodate = 1; - } - return 2; -} -static int dec_move_pr(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1); - cris_cc_mask(dc, 0); - - if (dc->op2 == PR_CCS) { - cris_evaluate_flags(dc); - } - - if (dc->op2 == PR_DZ) { - tcg_gen_movi_tl(cpu_R[dc->op1], 0); - } else { - t0 = tcg_temp_new(); - t_gen_mov_TN_preg(t0, dc->op2); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op1], cpu_R[dc->op1], t0, - preg_sizes[dc->op2]); - } - return 2; -} - -static int dec_move_mr(CPUCRISState *env, DisasContext *dc) -{ - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("move.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - if (memsize == 4) { - insn_len = dec_prep_move_m(env, dc, 0, 4, cpu_R[dc->op2]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_update_cc_op(dc, CC_OP_MOVE, 4); - cris_update_cc_x(dc); - cris_update_result(dc, cpu_R[dc->op2]); - } else { - TCGv t0; - - t0 = tcg_temp_new(); - insn_len = dec_prep_move_m(env, dc, 0, memsize, t0); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize); - } - do_postinc(dc, memsize); - return insn_len; -} - -static inline void cris_alu_m_alloc_temps(TCGv *t) -{ - t[0] = tcg_temp_new(); - t[1] = tcg_temp_new(); -} - -static int dec_movs_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("movs.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - /* sign extend. */ - insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_MOVE, - cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_addu_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("addu.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - /* sign extend. */ - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_ADD, - cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_adds_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("adds.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - /* sign extend. */ - insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_subu_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("subu.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - /* sign extend. */ - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_subs_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("subs.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - /* sign extend. */ - insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_movu_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - - LOG_DIS("movu.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("cmpu.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_cmps_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_z(dc); - int insn_len; - LOG_DIS("cmps.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_CMP, - cpu_R[dc->op2], cpu_R[dc->op2], t[1], - memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_cmp_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("cmp.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_CMP, - cpu_R[dc->op2], cpu_R[dc->op2], t[1], - memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_test_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2], c; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("test.%c [$r%u%s] op2=%x\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_evaluate_flags(dc); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZ); - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); - - c = tcg_constant_tl(0); - cris_alu(dc, CC_OP_CMP, - cpu_R[dc->op2], t[1], c, memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_and_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("and.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_add_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("add.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_ADD, - cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_addo_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("add.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]); - cris_cc_mask(dc, 0); - cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_bound_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv l[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("bound.%c [$r%u%s, $r%u\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - l[0] = tcg_temp_new(); - l[1] = tcg_temp_new(); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_addc_mr(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int insn_len = 2; - LOG_DIS("addc [$r%u%s, $r%u\n", - dc->op1, dc->postinc ? "+]" : "]", - dc->op2); - - cris_evaluate_flags(dc); - - /* Set for this insn. */ - dc->flags_x = X_FLAG; - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, 4, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4); - do_postinc(dc, 4); - return insn_len; -} - -static int dec_sub_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2, dc->ir, dc->zzsize); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_or_m(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len; - LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n", - memsize_char(memsize), - dc->op1, dc->postinc ? "+]" : "]", - dc->op2, dc->pc); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, CC_MASK_NZ); - cris_alu(dc, CC_OP_OR, - cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_move_mp(CPUCRISState *env, DisasContext *dc) -{ - TCGv t[2]; - int memsize = memsize_zz(dc); - int insn_len = 2; - - LOG_DIS("move.%c [$r%u%s, $p%u\n", - memsize_char(memsize), - dc->op1, - dc->postinc ? "+]" : "]", - dc->op2); - - cris_alu_m_alloc_temps(t); - insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]); - cris_cc_mask(dc, 0); - if (dc->op2 == PR_CCS) { - cris_evaluate_flags(dc); - if (dc->tb_flags & U_FLAG) { - /* User space is not allowed to touch all flags. */ - tcg_gen_andi_tl(t[1], t[1], 0x39f); - tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f); - tcg_gen_or_tl(t[1], t[0], t[1]); - } - } - - t_gen_mov_preg_TN(dc, dc->op2, t[1]); - - do_postinc(dc, memsize); - return insn_len; -} - -static int dec_move_pm(CPUCRISState *env, DisasContext *dc) -{ - TCGv t0; - int memsize; - - memsize = preg_sizes[dc->op2]; - - LOG_DIS("move.%c $p%u, [$r%u%s\n", - memsize_char(memsize), - dc->op2, dc->op1, dc->postinc ? "+]" : "]"); - - /* prepare store. Address in T0, value in T1. */ - if (dc->op2 == PR_CCS) { - cris_evaluate_flags(dc); - } - t0 = tcg_temp_new(); - t_gen_mov_TN_preg(t0, dc->op2); - cris_flush_cc_state(dc); - gen_store(dc, cpu_R[dc->op1], t0, memsize); - - cris_cc_mask(dc, 0); - if (dc->postinc) { - tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize); - } - return 2; -} - -static int dec_movem_mr(CPUCRISState *env, DisasContext *dc) -{ - TCGv_i64 tmp[16]; - TCGv tmp32; - TCGv addr; - int i; - int nr = dc->op2 + 1; - - LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1, - dc->postinc ? "+]" : "]", dc->op2); - - addr = tcg_temp_new(); - /* There are probably better ways of doing this. */ - cris_flush_cc_state(dc); - for (i = 0; i < (nr >> 1); i++) { - tmp[i] = tcg_temp_new_i64(); - tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8); - gen_load64(dc, tmp[i], addr); - } - if (nr & 1) { - tmp32 = tcg_temp_new_i32(); - tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8); - gen_load(dc, tmp32, addr, 4, 0); - } else { - tmp32 = NULL; - } - - for (i = 0; i < (nr >> 1); i++) { - tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]); - tcg_gen_shri_i64(tmp[i], tmp[i], 32); - tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]); - } - if (nr & 1) { - tcg_gen_mov_tl(cpu_R[dc->op2], tmp32); - } - - /* writeback the updated pointer value. */ - if (dc->postinc) { - tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4); - } - - /* gen_load might want to evaluate the previous insns flags. */ - cris_cc_mask(dc, 0); - return 2; -} - -static int dec_movem_rm(CPUCRISState *env, DisasContext *dc) -{ - TCGv tmp; - TCGv addr; - int i; - - LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1, - dc->postinc ? "+]" : "]"); - - cris_flush_cc_state(dc); - - tmp = tcg_temp_new(); - addr = tcg_temp_new(); - tcg_gen_movi_tl(tmp, 4); - tcg_gen_mov_tl(addr, cpu_R[dc->op1]); - for (i = 0; i <= dc->op2; i++) { - /* Displace addr. */ - /* Perform the store. */ - gen_store(dc, addr, cpu_R[i], 4); - tcg_gen_add_tl(addr, addr, tmp); - } - if (dc->postinc) { - tcg_gen_mov_tl(cpu_R[dc->op1], addr); - } - cris_cc_mask(dc, 0); - return 2; -} - -static int dec_move_rm(CPUCRISState *env, DisasContext *dc) -{ - int memsize; - - memsize = memsize_zz(dc); - - LOG_DIS("move.%c $r%u, [$r%u]\n", - memsize_char(memsize), dc->op2, dc->op1); - - /* prepare store. */ - cris_flush_cc_state(dc); - gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize); - - if (dc->postinc) { - tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize); - } - cris_cc_mask(dc, 0); - return 2; -} - -static int dec_lapcq(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("lapcq %x, $r%u\n", - dc->pc + dc->op1*2, dc->op2); - cris_cc_mask(dc, 0); - tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2); - return 2; -} - -static int dec_lapc_im(CPUCRISState *env, DisasContext *dc) -{ - unsigned int rd; - int32_t imm; - int32_t pc; - - rd = dc->op2; - - cris_cc_mask(dc, 0); - imm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2); - - pc = dc->pc; - pc += imm; - tcg_gen_movi_tl(cpu_R[rd], pc); - return 6; -} - -/* Jump to special reg. */ -static int dec_jump_p(CPUCRISState *env, DisasContext *dc) -{ - LOG_DIS("jump $p%u\n", dc->op2); - - if (dc->op2 == PR_CCS) { - cris_evaluate_flags(dc); - } - t_gen_mov_TN_preg(env_btarget, dc->op2); - /* rete will often have low bit set to indicate delayslot. */ - tcg_gen_andi_tl(env_btarget, env_btarget, ~1); - cris_cc_mask(dc, 0); - cris_prepare_jmp(dc, JMP_INDIRECT); - return 2; -} - -/* Jump and save. */ -static int dec_jas_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv c; - LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2); - cris_cc_mask(dc, 0); - /* Store the return address in Pd. */ - tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]); - if (dc->op2 > 15) { - abort(); - } - c = tcg_constant_tl(dc->pc + 4); - t_gen_mov_preg_TN(dc, dc->op2, c); - - cris_prepare_jmp(dc, JMP_INDIRECT); - return 2; -} - -static int dec_jas_im(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - TCGv c; - - imm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - - LOG_DIS("jas 0x%x\n", imm); - cris_cc_mask(dc, 0); - c = tcg_constant_tl(dc->pc + 8); - /* Store the return address in Pd. */ - t_gen_mov_preg_TN(dc, dc->op2, c); - - dc->jmp_pc = imm; - cris_prepare_jmp(dc, JMP_DIRECT); - return 6; -} - -static int dec_jasc_im(CPUCRISState *env, DisasContext *dc) -{ - uint32_t imm; - TCGv c; - - imm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - - LOG_DIS("jasc 0x%x\n", imm); - cris_cc_mask(dc, 0); - c = tcg_constant_tl(dc->pc + 8 + 4); - /* Store the return address in Pd. */ - t_gen_mov_preg_TN(dc, dc->op2, c); - - dc->jmp_pc = imm; - cris_prepare_jmp(dc, JMP_DIRECT); - return 6; -} - -static int dec_jasc_r(CPUCRISState *env, DisasContext *dc) -{ - TCGv c; - LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2); - cris_cc_mask(dc, 0); - /* Store the return address in Pd. */ - tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]); - c = tcg_constant_tl(dc->pc + 4 + 4); - t_gen_mov_preg_TN(dc, dc->op2, c); - cris_prepare_jmp(dc, JMP_INDIRECT); - return 2; -} - -static int dec_bcc_im(CPUCRISState *env, DisasContext *dc) -{ - int32_t offset; - uint32_t cond = dc->op2; - - offset = cris_fetch(env, dc, dc->pc + 2, 2, 1); - - LOG_DIS("b%s %d pc=%x dst=%x\n", - cc_name(cond), offset, - dc->pc, dc->pc + offset); - - cris_cc_mask(dc, 0); - /* op2 holds the condition-code. */ - cris_prepare_cc_branch(dc, offset, cond); - return 4; -} - -static int dec_bas_im(CPUCRISState *env, DisasContext *dc) -{ - int32_t simm; - TCGv c; - - simm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - - LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2); - cris_cc_mask(dc, 0); - c = tcg_constant_tl(dc->pc + 8); - /* Store the return address in Pd. */ - t_gen_mov_preg_TN(dc, dc->op2, c); - - dc->jmp_pc = dc->pc + simm; - cris_prepare_jmp(dc, JMP_DIRECT); - return 6; -} - -static int dec_basc_im(CPUCRISState *env, DisasContext *dc) -{ - int32_t simm; - TCGv c; - simm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - - LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2); - cris_cc_mask(dc, 0); - c = tcg_constant_tl(dc->pc + 12); - /* Store the return address in Pd. */ - t_gen_mov_preg_TN(dc, dc->op2, c); - - dc->jmp_pc = dc->pc + simm; - cris_prepare_jmp(dc, JMP_DIRECT); - return 6; -} - -static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc) -{ - cris_cc_mask(dc, 0); - - if (dc->op2 == 15) { - tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, - -offsetof(CRISCPU, env) + offsetof(CPUState, halted)); - tcg_gen_movi_tl(env_pc, dc->pc + 2); - t_gen_raise_exception(EXCP_HLT); - dc->base.is_jmp = DISAS_NORETURN; - return 2; - } - - switch (dc->op2 & 7) { - case 2: - /* rfe. */ - LOG_DIS("rfe\n"); - cris_evaluate_flags(dc); - gen_helper_rfe(tcg_env); - dc->base.is_jmp = DISAS_UPDATE; - dc->cpustate_changed = true; - break; - case 5: - /* rfn. */ - LOG_DIS("rfn\n"); - cris_evaluate_flags(dc); - gen_helper_rfn(tcg_env); - dc->base.is_jmp = DISAS_UPDATE; - dc->cpustate_changed = true; - break; - case 6: - LOG_DIS("break %d\n", dc->op1); - cris_evaluate_flags(dc); - /* break. */ - tcg_gen_movi_tl(env_pc, dc->pc + 2); - - /* Breaks start at 16 in the exception vector. */ - t_gen_movi_env_TN(trap_vector, dc->op1 + 16); - t_gen_raise_exception(EXCP_BREAK); - dc->base.is_jmp = DISAS_NORETURN; - break; - default: - printf("op2=%x\n", dc->op2); - BUG(); - break; - - } - return 2; -} - -static int dec_ftag_fidx_d_m(CPUCRISState *env, DisasContext *dc) -{ - return 2; -} - -static int dec_ftag_fidx_i_m(CPUCRISState *env, DisasContext *dc) -{ - return 2; -} - -static int dec_null(CPUCRISState *env, DisasContext *dc) -{ - printf("unknown insn pc=%x opc=%x op1=%x op2=%x\n", - dc->pc, dc->opcode, dc->op1, dc->op2); - fflush(NULL); - BUG(); - return 2; -} - -static const struct decoder_info { - struct { - uint32_t bits; - uint32_t mask; - }; - int (*dec)(CPUCRISState *env, DisasContext *dc); -} decinfo[] = { - /* Order matters here. */ - {DEC_MOVEQ, dec_moveq}, - {DEC_BTSTQ, dec_btstq}, - {DEC_CMPQ, dec_cmpq}, - {DEC_ADDOQ, dec_addoq}, - {DEC_ADDQ, dec_addq}, - {DEC_SUBQ, dec_subq}, - {DEC_ANDQ, dec_andq}, - {DEC_ORQ, dec_orq}, - {DEC_ASRQ, dec_asrq}, - {DEC_LSLQ, dec_lslq}, - {DEC_LSRQ, dec_lsrq}, - {DEC_BCCQ, dec_bccq}, - - {DEC_BCC_IM, dec_bcc_im}, - {DEC_JAS_IM, dec_jas_im}, - {DEC_JAS_R, dec_jas_r}, - {DEC_JASC_IM, dec_jasc_im}, - {DEC_JASC_R, dec_jasc_r}, - {DEC_BAS_IM, dec_bas_im}, - {DEC_BASC_IM, dec_basc_im}, - {DEC_JUMP_P, dec_jump_p}, - {DEC_LAPC_IM, dec_lapc_im}, - {DEC_LAPCQ, dec_lapcq}, - - {DEC_RFE_ETC, dec_rfe_etc}, - {DEC_ADDC_MR, dec_addc_mr}, - - {DEC_MOVE_MP, dec_move_mp}, - {DEC_MOVE_PM, dec_move_pm}, - {DEC_MOVEM_MR, dec_movem_mr}, - {DEC_MOVEM_RM, dec_movem_rm}, - {DEC_MOVE_PR, dec_move_pr}, - {DEC_SCC_R, dec_scc_r}, - {DEC_SETF, dec_setclrf}, - {DEC_CLEARF, dec_setclrf}, - - {DEC_MOVE_SR, dec_move_sr}, - {DEC_MOVE_RP, dec_move_rp}, - {DEC_SWAP_R, dec_swap_r}, - {DEC_ABS_R, dec_abs_r}, - {DEC_LZ_R, dec_lz_r}, - {DEC_MOVE_RS, dec_move_rs}, - {DEC_BTST_R, dec_btst_r}, - {DEC_ADDC_R, dec_addc_r}, - - {DEC_DSTEP_R, dec_dstep_r}, - {DEC_XOR_R, dec_xor_r}, - {DEC_MCP_R, dec_mcp_r}, - {DEC_CMP_R, dec_cmp_r}, - - {DEC_ADDI_R, dec_addi_r}, - {DEC_ADDI_ACR, dec_addi_acr}, - - {DEC_ADD_R, dec_add_r}, - {DEC_SUB_R, dec_sub_r}, - - {DEC_ADDU_R, dec_addu_r}, - {DEC_ADDS_R, dec_adds_r}, - {DEC_SUBU_R, dec_subu_r}, - {DEC_SUBS_R, dec_subs_r}, - {DEC_LSL_R, dec_lsl_r}, - - {DEC_AND_R, dec_and_r}, - {DEC_OR_R, dec_or_r}, - {DEC_BOUND_R, dec_bound_r}, - {DEC_ASR_R, dec_asr_r}, - {DEC_LSR_R, dec_lsr_r}, - - {DEC_MOVU_R, dec_movu_r}, - {DEC_MOVS_R, dec_movs_r}, - {DEC_NEG_R, dec_neg_r}, - {DEC_MOVE_R, dec_move_r}, - - {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m}, - {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m}, - - {DEC_MULS_R, dec_muls_r}, - {DEC_MULU_R, dec_mulu_r}, - - {DEC_ADDU_M, dec_addu_m}, - {DEC_ADDS_M, dec_adds_m}, - {DEC_SUBU_M, dec_subu_m}, - {DEC_SUBS_M, dec_subs_m}, - - {DEC_CMPU_M, dec_cmpu_m}, - {DEC_CMPS_M, dec_cmps_m}, - {DEC_MOVU_M, dec_movu_m}, - {DEC_MOVS_M, dec_movs_m}, - - {DEC_CMP_M, dec_cmp_m}, - {DEC_ADDO_M, dec_addo_m}, - {DEC_BOUND_M, dec_bound_m}, - {DEC_ADD_M, dec_add_m}, - {DEC_SUB_M, dec_sub_m}, - {DEC_AND_M, dec_and_m}, - {DEC_OR_M, dec_or_m}, - {DEC_MOVE_RM, dec_move_rm}, - {DEC_TEST_M, dec_test_m}, - {DEC_MOVE_MR, dec_move_mr}, - - {{0, 0}, dec_null} -}; - -static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc) -{ - int insn_len = 2; - int i; - - /* Load a halfword onto the instruction register. */ - dc->ir = cris_fetch(env, dc, dc->pc, 2, 0); - - /* Now decode it. */ - dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11); - dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3); - dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15); - dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4); - dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5); - dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10); - - /* Large switch for all insns. */ - for (i = 0; i < ARRAY_SIZE(decinfo); i++) { - if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) { - insn_len = decinfo[i].dec(env, dc); - break; - } - } - -#if !defined(CONFIG_USER_ONLY) - /* Single-stepping ? */ - if (dc->tb_flags & S_FLAG) { - TCGLabel *l1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1); - /* We treat SPC as a break with an odd trap vector. */ - cris_evaluate_flags(dc); - t_gen_movi_env_TN(trap_vector, 3); - tcg_gen_movi_tl(env_pc, dc->pc + insn_len); - tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len); - t_gen_raise_exception(EXCP_BREAK); - gen_set_label(l1); - } -#endif - return insn_len; -} - -#include "translate_v10.c.inc" - -/* - * Delay slots on QEMU/CRIS. - * - * If an exception hits on a delayslot, the core will let ERP (the Exception - * Return Pointer) point to the branch (the previous) insn and set the lsb to - * to give SW a hint that the exception actually hit on the dslot. - * - * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by - * the core and any jmp to an odd addresses will mask off that lsb. It is - * simply there to let sw know there was an exception on a dslot. - * - * When the software returns from an exception, the branch will re-execute. - * On QEMU care needs to be taken when a branch+delayslot sequence is broken - * and the branch and delayslot don't share pages. - * - * The TB containing the branch insn will set up env->btarget and evaluate - * env->btaken. When the translation loop exits we will note that the branch - * sequence is broken and let env->dslot be the size of the branch insn (those - * vary in length). - * - * The TB containing the delayslot will have the PC of its real insn (i.e no lsb - * set). It will also expect to have env->dslot setup with the size of the - * delay slot so that env->pc - env->dslot point to the branch insn. This TB - * will execute the dslot and take the branch, either to btarget or just one - * insn ahead. - * - * When exceptions occur, we check for env->dslot in do_interrupt to detect - * broken branch sequences and setup $erp accordingly (i.e let it point to the - * branch and set lsb). Then env->dslot gets cleared so that the exception - * handler can enter. When returning from exceptions (jump $erp) the lsb gets - * masked off and we will reexecute the branch insn. - * - */ - -static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - CPUCRISState *env = cpu_env(cs); - uint32_t tb_flags = dc->base.tb->flags; - uint32_t pc_start; - - if (env->pregs[PR_VR] == 32) { - dc->decoder = crisv32_decoder; - dc->clear_locked_irq = 0; - } else { - dc->decoder = crisv10_decoder; - dc->clear_locked_irq = 1; - } - - /* - * Odd PC indicates that branch is rexecuting due to exception in the - * delayslot, like in real hw. - */ - pc_start = dc->base.pc_first & ~1; - dc->base.pc_first = pc_start; - dc->base.pc_next = pc_start; - - dc->cpu = env_archcpu(env); - dc->ppc = pc_start; - dc->pc = pc_start; - dc->mem_index = cpu_mmu_index(cs, false); - dc->flags_uptodate = 1; - dc->flags_x = tb_flags & X_FLAG; - dc->cc_x_uptodate = 0; - dc->cc_mask = 0; - dc->update_cc = 0; - dc->clear_prefix = 0; - dc->cpustate_changed = 0; - - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - dc->cc_size_uptodate = -1; - - /* Decode TB flags. */ - dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG); - dc->delayed_branch = !!(tb_flags & 7); - if (dc->delayed_branch) { - dc->jmp = JMP_INDIRECT; - } else { - dc->jmp = JMP_NOJMP; - } -} - -static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu) -{ -} - -static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - - tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc); -} - -static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - unsigned int insn_len; - - /* Pretty disas. */ - LOG_DIS("%8.8x:\t", dc->pc); - - dc->clear_x = 1; - - insn_len = dc->decoder(cpu_env(cs), dc); - dc->ppc = dc->pc; - dc->pc += insn_len; - dc->base.pc_next += insn_len; - - if (dc->base.is_jmp == DISAS_NORETURN) { - return; - } - - if (dc->clear_x) { - cris_clear_x_flag(dc); - } - - /* - * All branches are delayed branches, handled immediately below. - * We don't expect to see odd combinations of exit conditions. - */ - assert(dc->base.is_jmp == DISAS_NEXT || dc->cpustate_changed); - - if (dc->delayed_branch && --dc->delayed_branch == 0) { - dc->base.is_jmp = DISAS_DBRANCH; - return; - } - - if (dc->base.is_jmp != DISAS_NEXT) { - return; - } - - /* Force an update if the per-tb cpu state has changed. */ - if (dc->cpustate_changed) { - dc->base.is_jmp = DISAS_UPDATE_NEXT; - return; - } - - /* - * FIXME: Only the first insn in the TB should cross a page boundary. - * If we can detect the length of the next insn easily, we should. - * In the meantime, simply stop when we do cross. - */ - if ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) { - dc->base.is_jmp = DISAS_TOO_MANY; - } -} - -static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) -{ - DisasContext *dc = container_of(dcbase, DisasContext, base); - DisasJumpType is_jmp = dc->base.is_jmp; - target_ulong npc = dc->pc; - - if (is_jmp == DISAS_NORETURN) { - /* If we have a broken branch+delayslot sequence, it's too late. */ - assert(dc->delayed_branch != 1); - return; - } - - if (dc->clear_locked_irq) { - t_gen_movi_env_TN(locked_irq, 0); - } - - /* Broken branch+delayslot sequence. */ - if (dc->delayed_branch == 1) { - /* Set env->dslot to the size of the branch insn. */ - t_gen_movi_env_TN(dslot, dc->pc - dc->ppc); - cris_store_direct_jmp(dc); - } - - cris_evaluate_flags(dc); - - /* Evaluate delayed branch destination and fold to another is_jmp case. */ - if (is_jmp == DISAS_DBRANCH) { - if (dc->base.tb->flags & 7) { - t_gen_movi_env_TN(dslot, 0); - } - - switch (dc->jmp) { - case JMP_DIRECT: - npc = dc->jmp_pc; - is_jmp = dc->cpustate_changed ? DISAS_UPDATE_NEXT : DISAS_TOO_MANY; - break; - - case JMP_DIRECT_CC: - /* - * Use a conditional branch if either taken or not-taken path - * can use goto_tb. If neither can, then treat it as indirect. - */ - if (likely(!dc->cpustate_changed) - && (use_goto_tb(dc, dc->jmp_pc) || use_goto_tb(dc, npc))) { - TCGLabel *not_taken = gen_new_label(); - - tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, not_taken); - gen_goto_tb(dc, 1, dc->jmp_pc); - gen_set_label(not_taken); - - /* not-taken case handled below. */ - is_jmp = DISAS_TOO_MANY; - break; - } - tcg_gen_movi_tl(env_btarget, dc->jmp_pc); - /* fall through */ - - case JMP_INDIRECT: - tcg_gen_movcond_tl(TCG_COND_NE, env_pc, - env_btaken, tcg_constant_tl(0), - env_btarget, tcg_constant_tl(npc)); - is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP; - - /* - * We have now consumed btaken and btarget. Hint to the - * tcg compiler that the writeback to env may be dropped. - */ - tcg_gen_discard_tl(env_btaken); - tcg_gen_discard_tl(env_btarget); - break; - - default: - g_assert_not_reached(); - } - } - - switch (is_jmp) { - case DISAS_TOO_MANY: - gen_goto_tb(dc, 0, npc); - break; - case DISAS_UPDATE_NEXT: - tcg_gen_movi_tl(env_pc, npc); - /* fall through */ - case DISAS_JUMP: - tcg_gen_lookup_and_goto_ptr(); - break; - case DISAS_UPDATE: - /* Indicate that interrupts must be re-evaluated before the next TB. */ - tcg_gen_exit_tb(NULL, 0); - break; - default: - g_assert_not_reached(); - } -} - -static const TranslatorOps cris_tr_ops = { - .init_disas_context = cris_tr_init_disas_context, - .tb_start = cris_tr_tb_start, - .insn_start = cris_tr_insn_start, - .translate_insn = cris_tr_translate_insn, - .tb_stop = cris_tr_tb_stop, -}; - -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) -{ - DisasContext dc; - translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base); -} - -void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - CPUCRISState *env = cpu_env(cs); - const char * const *regnames; - const char * const *pregnames; - int i; - - if (!env) { - return; - } - if (env->pregs[PR_VR] < 32) { - pregnames = pregnames_v10; - regnames = regnames_v10; - } else { - pregnames = pregnames_v32; - regnames = regnames_v32; - } - - qemu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n" - "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n", - env->pc, env->pregs[PR_CCS], env->btaken, env->btarget, - env->cc_op, - env->cc_src, env->cc_dest, env->cc_result, env->cc_mask); - - - for (i = 0; i < 16; i++) { - qemu_fprintf(f, "%s=%8.8x ", regnames[i], env->regs[i]); - if ((i + 1) % 4 == 0) { - qemu_fprintf(f, "\n"); - } - } - qemu_fprintf(f, "\nspecial regs:\n"); - for (i = 0; i < 16; i++) { - qemu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]); - if ((i + 1) % 4 == 0) { - qemu_fprintf(f, "\n"); - } - } - if (env->pregs[PR_VR] >= 32) { - uint32_t srs = env->pregs[PR_SRS]; - qemu_fprintf(f, "\nsupport function regs bank %x:\n", srs); - if (srs < ARRAY_SIZE(env->sregs)) { - for (i = 0; i < 16; i++) { - qemu_fprintf(f, "s%2.2d=%8.8x ", - i, env->sregs[srs][i]); - if ((i + 1) % 4 == 0) { - qemu_fprintf(f, "\n"); - } - } - } - } - qemu_fprintf(f, "\n\n"); - -} - -void cris_initialize_tcg(void) -{ - int i; - - cc_x = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_x), "cc_x"); - cc_src = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_src), "cc_src"); - cc_dest = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_dest), - "cc_dest"); - cc_result = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_result), - "cc_result"); - cc_op = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_op), "cc_op"); - cc_size = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_size), - "cc_size"); - cc_mask = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_mask), - "cc_mask"); - - env_pc = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, pc), - "pc"); - env_btarget = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, btarget), - "btarget"); - env_btaken = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, btaken), - "btaken"); - for (i = 0; i < 16; i++) { - cpu_R[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, regs[i]), - regnames_v32[i]); - } - for (i = 0; i < 16; i++) { - cpu_PR[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, pregs[i]), - pregnames_v32[i]); - } -} diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc deleted file mode 100644 index c15ff47505c..00000000000 --- a/target/cris/translate_v10.c.inc +++ /dev/null @@ -1,1262 +0,0 @@ -/* - * CRISv10 emulation for qemu: main translation routines. - * - * Copyright (c) 2010 AXIS Communications AB - * Written by Edgar E. Iglesias. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "crisv10-decode.h" - -static const char * const regnames_v10[] = -{ - "$r0", "$r1", "$r2", "$r3", - "$r4", "$r5", "$r6", "$r7", - "$r8", "$r9", "$r10", "$r11", - "$r12", "$r13", "$sp", "$pc", -}; - -static const char * const pregnames_v10[] = -{ - "$bz", "$vr", "$p2", "$p3", - "$wz", "$ccr", "$p6-prefix", "$mof", - "$dz", "$ibr", "$irp", "$srp", - "$bar", "$dccr", "$brp", "$usp", -}; - -/* We need this table to handle preg-moves with implicit width. */ -static const int preg_sizes_v10[] = { - 1, /* bz. */ - 1, /* vr. */ - 1, /* pid. */ - 1, /* srs. */ - 2, /* wz. */ - 2, 2, 4, - 4, 4, 4, 4, - 4, 4, 4, 4, -}; - -static inline int dec10_size(unsigned int size) -{ - size++; - if (size == 3) - size++; - return size; -} - -static inline void cris_illegal_insn(DisasContext *dc) -{ - qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc); - t_gen_raise_exception(EXCP_BREAK); - dc->base.is_jmp = DISAS_NORETURN; -} - -static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, - unsigned int size, int mem_index) -{ - TCGLabel *l1 = gen_new_label(); - TCGv taddr = tcg_temp_new(); - TCGv tval = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - dc->postinc = 0; - cris_evaluate_flags(dc); - - tcg_gen_mov_tl(taddr, addr); - tcg_gen_mov_tl(tval, val); - - /* Store only if F flag isn't set */ - tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - - tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE); - - gen_set_label(l1); - tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ - tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ -} - -static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, - unsigned int size) -{ - /* If we get a fault on a delayslot we must keep the jmp state in - the cpu-state to be able to re-execute the jmp. */ - if (dc->delayed_branch == 1) { - cris_store_direct_jmp(dc); - } - - /* Conditional writes. */ - if (dc->flags_x) { - gen_store_v10_conditional(dc, addr, val, size, dc->mem_index); - return; - } - - tcg_gen_qemu_st_tl(val, addr, dc->mem_index, ctz32(size) | MO_TE); -} - - -/* Prefix flag and register are used to handle the more complex - addressing modes. */ -static void cris_set_prefix(DisasContext *dc) -{ - dc->clear_prefix = 0; - dc->tb_flags |= PFIX_FLAG; - tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG); - - /* prefix insns don't clear the x flag. */ - dc->clear_x = 0; - cris_lock_irq(dc); -} - -static void crisv10_prepare_memaddr(DisasContext *dc, - TCGv addr, unsigned int size) -{ - if (dc->tb_flags & PFIX_FLAG) { - tcg_gen_mov_tl(addr, cpu_PR[PR_PREFIX]); - } else { - tcg_gen_mov_tl(addr, cpu_R[dc->src]); - } -} - -static unsigned int crisv10_post_memaddr(DisasContext *dc, unsigned int size) -{ - unsigned int insn_len = 0; - - if (dc->tb_flags & PFIX_FLAG) { - if (dc->mode == CRISV10_MODE_AUTOINC) { - tcg_gen_mov_tl(cpu_R[dc->src], cpu_PR[PR_PREFIX]); - } - } else { - if (dc->mode == CRISV10_MODE_AUTOINC) { - if (dc->src == 15) { - insn_len += size & ~1; - } else { - tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], size); - } - } - } - return insn_len; -} - -static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc, - int s_ext, int memsize, TCGv dst) -{ - unsigned int rs; - uint32_t imm; - int is_imm; - int insn_len = 0; - - rs = dc->src; - is_imm = rs == 15 && !(dc->tb_flags & PFIX_FLAG); - LOG_DIS("rs=%d rd=%d is_imm=%d mode=%d pfix=%d\n", - rs, dc->dst, is_imm, dc->mode, dc->tb_flags & PFIX_FLAG); - - /* Load [$rs] onto T1. */ - if (is_imm) { - imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext); - - tcg_gen_movi_tl(dst, imm); - - if (dc->mode == CRISV10_MODE_AUTOINC) { - insn_len += memsize; - if (memsize == 1) - insn_len++; - tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len); - } - } else { - TCGv addr; - - addr = tcg_temp_new(); - cris_flush_cc_state(dc); - crisv10_prepare_memaddr(dc, addr, memsize); - gen_load(dc, dst, addr, memsize, 0); - if (s_ext) - t_gen_sext(dst, dst, memsize); - else - t_gen_zext(dst, dst, memsize); - insn_len += crisv10_post_memaddr(dc, memsize); - } - - if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) { - dc->dst = dc->src; - } - return insn_len; -} - -static unsigned int dec10_quick_imm(DisasContext *dc) -{ - int32_t imm, simm; - int op; - TCGv c; - - /* sign extend. */ - imm = dc->ir & ((1 << 6) - 1); - simm = (int8_t) (imm << 2); - simm >>= 2; - switch (dc->opcode) { - case CRISV10_QIMM_BDAP_R0: - case CRISV10_QIMM_BDAP_R1: - case CRISV10_QIMM_BDAP_R2: - case CRISV10_QIMM_BDAP_R3: - simm = (int8_t)dc->ir; - LOG_DIS("bdap %d $r%d\n", simm, dc->dst); - LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n", - dc->pc, dc->mode, dc->opcode, dc->src, dc->dst); - cris_set_prefix(dc); - if (dc->dst == 15) { - tcg_gen_movi_tl(cpu_PR[PR_PREFIX], dc->pc + 2 + simm); - } else { - tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm); - } - break; - - case CRISV10_QIMM_MOVEQ: - LOG_DIS("moveq %d, $r%d\n", simm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(simm); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_CMPQ: - LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(simm); - cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_ADDQ: - LOG_DIS("addq %d, $r%d\n", imm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(imm); - cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_ANDQ: - LOG_DIS("andq %d, $r%d\n", simm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(simm); - cris_alu(dc, CC_OP_AND, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_ASHQ: - LOG_DIS("ashq %d, $r%d\n", simm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - op = imm & (1 << 5); - imm &= 0x1f; - c = tcg_constant_tl(imm); - if (op) { - cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - } else { - /* BTST */ - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst], - c, cpu_PR[PR_CCS]); - } - break; - case CRISV10_QIMM_LSHQ: - LOG_DIS("lshq %d, $r%d\n", simm, dc->dst); - - op = CC_OP_LSL; - if (imm & (1 << 5)) { - op = CC_OP_LSR; - } - imm &= 0x1f; - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(imm); - cris_alu(dc, op, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_SUBQ: - LOG_DIS("subq %d, $r%d\n", imm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(imm); - cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - case CRISV10_QIMM_ORQ: - LOG_DIS("andq %d, $r%d\n", simm, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - c = tcg_constant_tl(simm); - cris_alu(dc, CC_OP_OR, cpu_R[dc->dst], - cpu_R[dc->dst], c, 4); - break; - - case CRISV10_QIMM_BCC_R0: - case CRISV10_QIMM_BCC_R1: - case CRISV10_QIMM_BCC_R2: - case CRISV10_QIMM_BCC_R3: - imm = dc->ir & 0xff; - /* bit 0 is a sign bit. */ - if (imm & 1) { - imm |= 0xffffff00; /* sign extend. */ - imm &= ~1; /* get rid of the sign bit. */ - } - imm += 2; - LOG_DIS("b%s %d\n", cc_name(dc->cond), imm); - - cris_cc_mask(dc, 0); - cris_prepare_cc_branch(dc, imm, dc->cond); - break; - - default: - LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n", - dc->pc, dc->mode, dc->opcode, dc->src, dc->dst); - cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n"); - break; - } - return 2; -} - -static unsigned int dec10_setclrf(DisasContext *dc) -{ - uint32_t flags; - unsigned int set = ~dc->opcode & 1; - - flags = EXTRACT_FIELD(dc->ir, 0, 3) - | (EXTRACT_FIELD(dc->ir, 12, 15) << 4); - LOG_DIS("%s set=%d flags=%x\n", __func__, set, flags); - - - if (flags & X_FLAG) { - if (set) - dc->flags_x = X_FLAG; - else - dc->flags_x = 0; - } - - cris_evaluate_flags (dc); - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - cris_update_cc_x(dc); - tcg_gen_movi_tl(cc_op, dc->cc_op); - - if (set) { - tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags); - } else { - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], - ~(flags|F_FLAG_V10|P_FLAG_V10)); - } - - dc->flags_uptodate = 1; - dc->clear_x = 0; - cris_lock_irq(dc); - return 2; -} - -static inline void dec10_reg_prep_sext(DisasContext *dc, int size, int sext, - TCGv dd, TCGv ds, TCGv sd, TCGv ss) -{ - if (sext) { - t_gen_sext(dd, sd, size); - t_gen_sext(ds, ss, size); - } else { - t_gen_zext(dd, sd, size); - t_gen_zext(ds, ss, size); - } -} - -static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext) -{ - TCGv t[2]; - - t[0] = tcg_temp_new(); - t[1] = tcg_temp_new(); - dec10_reg_prep_sext(dc, size, sext, - t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]); - - if (op == CC_OP_LSL || op == CC_OP_LSR || op == CC_OP_ASR) { - tcg_gen_andi_tl(t[1], t[1], 63); - } - - assert(dc->dst != 15); - cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size); -} - -static void dec10_reg_bound(DisasContext *dc, int size) -{ - TCGv t; - - t = tcg_temp_new(); - t_gen_zext(t, cpu_R[dc->src], size); - cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); -} - -static void dec10_reg_mul(DisasContext *dc, int size, int sext) -{ - int op = sext ? CC_OP_MULS : CC_OP_MULU; - TCGv t[2]; - - t[0] = tcg_temp_new(); - t[1] = tcg_temp_new(); - dec10_reg_prep_sext(dc, size, sext, - t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]); - - cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4); -} - - -static void dec10_reg_movs(DisasContext *dc) -{ - int size = (dc->size & 1) + 1; - TCGv t; - - LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - - t = tcg_temp_new(); - if (dc->ir & 32) - t_gen_sext(t, cpu_R[dc->src], size); - else - t_gen_zext(t, cpu_R[dc->src], size); - - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); -} - -static void dec10_reg_alux(DisasContext *dc, int op) -{ - int size = (dc->size & 1) + 1; - TCGv t; - - LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - - t = tcg_temp_new(); - if (dc->ir & 32) - t_gen_sext(t, cpu_R[dc->src], size); - else - t_gen_zext(t, cpu_R[dc->src], size); - - cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); -} - -static void dec10_reg_mov_pr(DisasContext *dc) -{ - LOG_DIS("move p%d r%d sz=%d\n", dc->dst, dc->src, preg_sizes_v10[dc->dst]); - cris_lock_irq(dc); - if (dc->src == 15) { - tcg_gen_mov_tl(env_btarget, cpu_PR[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - return; - } - if (dc->dst == PR_CCS) { - cris_evaluate_flags(dc); - } - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], - cpu_R[dc->src], cpu_PR[dc->dst], preg_sizes_v10[dc->dst]); -} - -static void dec10_reg_abs(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("abs $r%u, $r%u\n", dc->src, dc->dst); - - assert(dc->dst != 15); - t0 = tcg_temp_new(); - tcg_gen_sari_tl(t0, cpu_R[dc->src], 31); - tcg_gen_xor_tl(cpu_R[dc->dst], cpu_R[dc->src], t0); - tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0); - - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4); -} - -static void dec10_reg_swap(DisasContext *dc) -{ - TCGv t0; - - LOG_DIS("not $r%d, $r%d\n", dc->src, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - t0 = tcg_temp_new(); - tcg_gen_mov_tl(t0, cpu_R[dc->src]); - if (dc->dst & 8) - tcg_gen_not_tl(t0, t0); - if (dc->dst & 4) - t_gen_swapw(t0, t0); - if (dc->dst & 2) - t_gen_swapb(t0, t0); - if (dc->dst & 1) - t_gen_swapr(t0, t0); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4); -} - -static void dec10_reg_scc(DisasContext *dc) -{ - int cond = dc->dst; - - LOG_DIS("s%s $r%u\n", cc_name(cond), dc->src); - - gen_tst_cc(dc, cpu_R[dc->src], cond); - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->src], cpu_R[dc->src], 0); - - cris_cc_mask(dc, 0); -} - -static unsigned int dec10_reg(DisasContext *dc) -{ - TCGv t; - unsigned int insn_len = 2; - unsigned int size = dec10_size(dc->size); - unsigned int tmp; - - if (dc->size != 3) { - switch (dc->opcode) { - case CRISV10_REG_MOVE_R: - LOG_DIS("move.%d $r%d, $r%d\n", dc->size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_MOVE, size, 0); - if (dc->dst == 15) { - tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - } - break; - case CRISV10_REG_MOVX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_movs(dc); - break; - case CRISV10_REG_ADDX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alux(dc, CC_OP_ADD); - break; - case CRISV10_REG_SUBX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alux(dc, CC_OP_SUB); - break; - case CRISV10_REG_ADD: - LOG_DIS("add $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_ADD, size, 0); - break; - case CRISV10_REG_SUB: - LOG_DIS("sub $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_SUB, size, 0); - break; - case CRISV10_REG_CMP: - LOG_DIS("cmp $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_CMP, size, 0); - break; - case CRISV10_REG_BOUND: - LOG_DIS("bound $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_bound(dc, size); - break; - case CRISV10_REG_AND: - LOG_DIS("and $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_AND, size, 0); - break; - case CRISV10_REG_ADDI: - if (dc->src == 15) { - /* nop. */ - return 2; - } - t = tcg_temp_new(); - LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size); - tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3); - tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t); - break; - case CRISV10_REG_LSL: - LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_LSL, size, 0); - break; - case CRISV10_REG_LSR: - LOG_DIS("lsr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_LSR, size, 0); - break; - case CRISV10_REG_ASR: - LOG_DIS("asr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_ASR, size, 1); - break; - case CRISV10_REG_OR: - LOG_DIS("or $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_OR, size, 0); - break; - case CRISV10_REG_NEG: - LOG_DIS("neg $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_NEG, size, 0); - break; - case CRISV10_REG_BIAP: - LOG_DIS("BIAP pc=%x reg %d r%d r%d size=%d\n", dc->pc, - dc->opcode, dc->src, dc->dst, size); - switch (size) { - case 4: tmp = 2; break; - case 2: tmp = 1; break; - case 1: tmp = 0; break; - default: - cpu_abort(CPU(dc->cpu), "Unhandled BIAP"); - break; - } - - t = tcg_temp_new(); - tcg_gen_shli_tl(t, cpu_R[dc->dst], tmp); - if (dc->src == 15) { - tcg_gen_addi_tl(cpu_PR[PR_PREFIX], t, ((dc->pc +2)| 1) + 1); - } else { - tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t); - } - cris_set_prefix(dc); - break; - - default: - LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc, - dc->opcode, dc->src, dc->dst); - cpu_abort(CPU(dc->cpu), "Unhandled opcode"); - break; - } - } else { - switch (dc->opcode) { - case CRISV10_REG_MOVX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_movs(dc); - break; - case CRISV10_REG_ADDX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alux(dc, CC_OP_ADD); - break; - case CRISV10_REG_SUBX: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alux(dc, CC_OP_SUB); - break; - case CRISV10_REG_MOVE_SPR_R: - cris_evaluate_flags(dc); - cris_cc_mask(dc, 0); - dec10_reg_mov_pr(dc); - break; - case CRISV10_REG_MOVE_R_SPR: - LOG_DIS("move r%d p%d\n", dc->src, dc->dst); - cris_evaluate_flags(dc); - if (dc->src != 11) /* fast for srp. */ - dc->cpustate_changed = 1; - t_gen_mov_preg_TN(dc, dc->dst, cpu_R[dc->src]); - break; - case CRISV10_REG_SETF: - case CRISV10_REG_CLEARF: - dec10_setclrf(dc); - break; - case CRISV10_REG_SWAP: - dec10_reg_swap(dc); - break; - case CRISV10_REG_ABS: - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_abs(dc); - break; - case CRISV10_REG_LZ: - LOG_DIS("lz $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_LZ, 4, 0); - break; - case CRISV10_REG_XOR: - LOG_DIS("xor $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_alu(dc, CC_OP_XOR, 4, 0); - break; - case CRISV10_REG_BTST: - LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_update_cc_op(dc, CC_OP_FLAGS, 4); - gen_helper_btst(cpu_PR[PR_CCS], tcg_env, cpu_R[dc->dst], - cpu_R[dc->src], cpu_PR[PR_CCS]); - break; - case CRISV10_REG_DSTEP: - LOG_DIS("dstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_DSTEP, cpu_R[dc->dst], - cpu_R[dc->dst], cpu_R[dc->src], 4); - break; - case CRISV10_REG_MSTEP: - LOG_DIS("mstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); - cris_evaluate_flags(dc); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu(dc, CC_OP_MSTEP, cpu_R[dc->dst], - cpu_R[dc->dst], cpu_R[dc->src], 4); - break; - case CRISV10_REG_SCC: - dec10_reg_scc(dc); - break; - default: - LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc, - dc->opcode, dc->src, dc->dst); - cpu_abort(CPU(dc->cpu), "Unhandled opcode"); - break; - } - } - return insn_len; -} - -static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc, - unsigned int size) -{ - unsigned int insn_len = 2; - TCGv t; - - LOG_DIS("%s: move.%d [$r%d], $r%d\n", __func__, - size, dc->src, dc->dst); - - cris_cc_mask(dc, CC_MASK_NZVC); - t = tcg_temp_new(); - insn_len += dec10_prep_move_m(env, dc, 0, size, t); - cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, size); - if (dc->dst == 15) { - tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - } - - return insn_len; -} - -static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) -{ - unsigned int insn_len = 2; - TCGv addr; - - LOG_DIS("move.%d $r%d, [$r%d]\n", dc->size, dc->src, dc->dst); - addr = tcg_temp_new(); - crisv10_prepare_memaddr(dc, addr, size); - gen_store_v10(dc, addr, cpu_R[dc->dst], size); - insn_len += crisv10_post_memaddr(dc, size); - - return insn_len; -} - -static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc) -{ - unsigned int insn_len = 2, rd = dc->dst; - TCGv t; - - LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src); - cris_lock_irq(dc); - - t = tcg_temp_new(); - insn_len += dec10_prep_move_m(env, dc, 0, 4, t); - if (rd == 15) { - tcg_gen_mov_tl(env_btarget, t); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - } else { - tcg_gen_mov_tl(cpu_PR[rd], t); - dc->cpustate_changed = 1; - } - return insn_len; -} - -static unsigned int dec10_ind_move_pr_m(DisasContext *dc) -{ - unsigned int insn_len = 2, size = preg_sizes_v10[dc->dst]; - TCGv addr, t0; - - LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src); - - addr = tcg_temp_new(); - crisv10_prepare_memaddr(dc, addr, size); - if (dc->dst == PR_CCS) { - t0 = tcg_temp_new(); - cris_evaluate_flags(dc); - tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG); - gen_store_v10(dc, addr, t0, size); - } else { - gen_store_v10(dc, addr, cpu_PR[dc->dst], size); - } - insn_len += crisv10_post_memaddr(dc, size); - cris_lock_irq(dc); - - return insn_len; -} - -static void dec10_movem_r_m(DisasContext *dc) -{ - int i, pfix = dc->tb_flags & PFIX_FLAG; - TCGv addr, t0; - - LOG_DIS("%s r%d, [r%d] pi=%d ir=%x\n", __func__, - dc->dst, dc->src, dc->postinc, dc->ir); - - addr = tcg_temp_new(); - t0 = tcg_temp_new(); - crisv10_prepare_memaddr(dc, addr, 4); - tcg_gen_mov_tl(t0, addr); - for (i = dc->dst; i >= 0; i--) { - if ((pfix && dc->mode == CRISV10_MODE_AUTOINC) && dc->src == i) { - gen_store_v10(dc, addr, t0, 4); - } else { - gen_store_v10(dc, addr, cpu_R[i], 4); - } - tcg_gen_addi_tl(addr, addr, 4); - } - - if (pfix && dc->mode == CRISV10_MODE_AUTOINC) { - tcg_gen_mov_tl(cpu_R[dc->src], t0); - } - - if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { - tcg_gen_mov_tl(cpu_R[dc->src], addr); - } -} - -static void dec10_movem_m_r(DisasContext *dc) -{ - int i, pfix = dc->tb_flags & PFIX_FLAG; - TCGv addr, t0; - - LOG_DIS("%s [r%d], r%d pi=%d ir=%x\n", __func__, - dc->src, dc->dst, dc->postinc, dc->ir); - - addr = tcg_temp_new(); - t0 = tcg_temp_new(); - crisv10_prepare_memaddr(dc, addr, 4); - tcg_gen_mov_tl(t0, addr); - for (i = dc->dst; i >= 0; i--) { - gen_load(dc, cpu_R[i], addr, 4, 0); - tcg_gen_addi_tl(addr, addr, 4); - } - - if (pfix && dc->mode == CRISV10_MODE_AUTOINC) { - tcg_gen_mov_tl(cpu_R[dc->src], t0); - } - - if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { - tcg_gen_mov_tl(cpu_R[dc->src], addr); - } -} - -static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc, - int op, unsigned int size) -{ - int insn_len = 0; - int rd = dc->dst; - TCGv t[2]; - - cris_alu_m_alloc_temps(t); - insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]); - cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t[0], size); - if (dc->dst == 15) { - tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - return insn_len; - } - return insn_len; -} - -static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc, - unsigned int size) -{ - int insn_len = 0; - int rd = dc->dst; - TCGv t; - - t = tcg_temp_new(); - insn_len += dec10_prep_move_m(env, dc, 0, size, t); - cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4); - if (dc->dst == 15) { - tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - } - - return insn_len; -} - -static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op) -{ - unsigned int size = (dc->size & 1) ? 2 : 1; - unsigned int sx = !!(dc->size & 2); - int insn_len = 2; - int rd = dc->dst; - TCGv t; - - LOG_DIS("addx size=%d sx=%d op=%d %d\n", size, sx, dc->src, dc->dst); - - t = tcg_temp_new(); - - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_prep_move_m(env, dc, sx, size, t); - cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4); - if (dc->dst == 15) { - tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch = 1; - } - - return insn_len; -} - -static int dec10_dip(CPUCRISState *env, DisasContext *dc) -{ - int insn_len = 2; - uint32_t imm; - - LOG_DIS("dip pc=%x opcode=%d r%d r%d\n", - dc->pc, dc->opcode, dc->src, dc->dst); - if (dc->src == 15) { - imm = cris_fetch(env, dc, dc->pc + 2, 4, 0); - tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm); - if (dc->postinc) { - insn_len += 4; - } - tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len - 2); - } else { - gen_load(dc, cpu_PR[PR_PREFIX], cpu_R[dc->src], 4, 0); - if (dc->postinc) - tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], 4); - } - - cris_set_prefix(dc); - return insn_len; -} - -static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size) -{ - int insn_len = 2; - int rd = dc->dst; - - LOG_DIS("bdap_m pc=%x opcode=%d r%d r%d sz=%d\n", - dc->pc, dc->opcode, dc->src, dc->dst, size); - - assert(dc->dst != 15); -#if 0 - /* 8bit embedded offset? */ - if (!dc->postinc && (dc->ir & (1 << 11))) { - int simm = dc->ir & 0xff; - - /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */ - /* sign extended. */ - simm = (int8_t)simm; - - tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm); - - cris_set_prefix(dc); - return insn_len; - } -#endif - /* Now the rest of the modes are truly indirect. */ - insn_len += dec10_prep_move_m(env, dc, 1, size, cpu_PR[PR_PREFIX]); - tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]); - cris_set_prefix(dc); - return insn_len; -} - -static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) -{ - unsigned int insn_len = 2; - unsigned int size = dec10_size(dc->size); - uint32_t imm; - int32_t simm; - TCGv t[2], c; - - if (dc->size != 3) { - switch (dc->opcode) { - case CRISV10_IND_MOVE_M_R: - return dec10_ind_move_m_r(env, dc, size); - case CRISV10_IND_MOVE_R_M: - return dec10_ind_move_r_m(dc, size); - case CRISV10_IND_CMP: - LOG_DIS("cmp size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_CMP, size); - break; - case CRISV10_IND_TEST: - LOG_DIS("test size=%d op=%d %d\n", size, dc->src, dc->dst); - - cris_evaluate_flags(dc); - cris_cc_mask(dc, CC_MASK_NZVC); - cris_alu_m_alloc_temps(t); - insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]); - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3); - c = tcg_constant_tl(0); - cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], - t[0], c, size); - break; - case CRISV10_IND_ADD: - LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_ADD, size); - break; - case CRISV10_IND_SUB: - LOG_DIS("sub size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_SUB, size); - break; - case CRISV10_IND_BOUND: - LOG_DIS("bound size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_bound(env, dc, size); - break; - case CRISV10_IND_AND: - LOG_DIS("and size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_AND, size); - break; - case CRISV10_IND_OR: - LOG_DIS("or size=%d op=%d %d\n", size, dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_OR, size); - break; - case CRISV10_IND_MOVX: - insn_len = dec10_alux_m(env, dc, CC_OP_MOVE); - break; - case CRISV10_IND_ADDX: - insn_len = dec10_alux_m(env, dc, CC_OP_ADD); - break; - case CRISV10_IND_SUBX: - insn_len = dec10_alux_m(env, dc, CC_OP_SUB); - break; - case CRISV10_IND_CMPX: - insn_len = dec10_alux_m(env, dc, CC_OP_CMP); - break; - case CRISV10_IND_MUL: - /* This is a reg insn coded in the mem indir space. */ - LOG_DIS("mul pc=%x opcode=%d\n", dc->pc, dc->opcode); - cris_cc_mask(dc, CC_MASK_NZVC); - dec10_reg_mul(dc, size, dc->ir & (1 << 10)); - break; - case CRISV10_IND_BDAP_M: - insn_len = dec10_bdap_m(env, dc, size); - break; - default: - /* - * ADDC for v17: - * - * Instruction format: ADDC [Rs],Rd - * - * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ - * |Destination(Rd)| 1 0 0 1 1 0 1 0 | Source(Rs)| - * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+--+--+ - * - * Instruction format: ADDC [Rs+],Rd - * - * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ - * |Destination(Rd)| 1 1 0 1 1 0 1 0 | Source(Rs)| - * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ - */ - if (dc->opcode == CRISV17_IND_ADDC && dc->size == 2 && - env->pregs[PR_VR] == 17) { - LOG_DIS("addc op=%d %d\n", dc->src, dc->dst); - cris_cc_mask(dc, CC_MASK_NZVC); - insn_len += dec10_ind_alu(env, dc, CC_OP_ADDC, size); - break; - } - - LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n", - dc->pc, size, dc->opcode, dc->src, dc->dst); - cpu_abort(CPU(dc->cpu), "Unhandled opcode"); - break; - } - return insn_len; - } - - switch (dc->opcode) { - case CRISV10_IND_MOVE_M_SPR: - insn_len = dec10_ind_move_m_pr(env, dc); - break; - case CRISV10_IND_MOVE_SPR_M: - insn_len = dec10_ind_move_pr_m(dc); - break; - case CRISV10_IND_JUMP_M: - if (dc->src == 15) { - LOG_DIS("jump.%d %d r%d r%d direct\n", size, - dc->opcode, dc->src, dc->dst); - imm = cris_fetch(env, dc, dc->pc + 2, size, 0); - if (dc->mode == CRISV10_MODE_AUTOINC) { - insn_len += size; - } - c = tcg_constant_tl(dc->pc + insn_len); - t_gen_mov_preg_TN(dc, dc->dst, c); - dc->jmp_pc = imm; - cris_prepare_jmp(dc, JMP_DIRECT); - dc->delayed_branch--; /* v10 has no dslot here. */ - } else { - if (dc->dst == 14) { - LOG_DIS("break %d\n", dc->src); - cris_evaluate_flags(dc); - tcg_gen_movi_tl(env_pc, dc->pc + 2); - c = tcg_constant_tl(dc->src + 2); - t_gen_mov_env_TN(trap_vector, c); - t_gen_raise_exception(EXCP_BREAK); - dc->base.is_jmp = DISAS_NORETURN; - return insn_len; - } - LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size, - dc->opcode, dc->src, dc->dst); - t[0] = tcg_temp_new(); - c = tcg_constant_tl(dc->pc + insn_len); - t_gen_mov_preg_TN(dc, dc->dst, c); - crisv10_prepare_memaddr(dc, t[0], size); - gen_load(dc, env_btarget, t[0], 4, 0); - insn_len += crisv10_post_memaddr(dc, size); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch--; /* v10 has no dslot here. */ - } - break; - - case CRISV10_IND_MOVEM_R_M: - LOG_DIS("movem_r_m pc=%x opcode=%d r%d r%d\n", - dc->pc, dc->opcode, dc->dst, dc->src); - dec10_movem_r_m(dc); - break; - case CRISV10_IND_MOVEM_M_R: - LOG_DIS("movem_m_r pc=%x opcode=%d\n", dc->pc, dc->opcode); - dec10_movem_m_r(dc); - break; - case CRISV10_IND_JUMP_R: - LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n", - dc->pc, dc->opcode, dc->dst, dc->src); - tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]); - c = tcg_constant_tl(dc->pc + insn_len); - t_gen_mov_preg_TN(dc, dc->dst, c); - cris_prepare_jmp(dc, JMP_INDIRECT); - dc->delayed_branch--; /* v10 has no dslot here. */ - break; - case CRISV10_IND_MOVX: - insn_len = dec10_alux_m(env, dc, CC_OP_MOVE); - break; - case CRISV10_IND_ADDX: - insn_len = dec10_alux_m(env, dc, CC_OP_ADD); - break; - case CRISV10_IND_SUBX: - insn_len = dec10_alux_m(env, dc, CC_OP_SUB); - break; - case CRISV10_IND_CMPX: - insn_len = dec10_alux_m(env, dc, CC_OP_CMP); - break; - case CRISV10_IND_DIP: - insn_len = dec10_dip(env, dc); - break; - case CRISV10_IND_BCC_M: - - cris_cc_mask(dc, 0); - simm = cris_fetch(env, dc, dc->pc + 2, 2, 1); - simm += 4; - - LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm); - cris_prepare_cc_branch(dc, simm, dc->cond); - insn_len = 4; - break; - default: - LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode); - cpu_abort(CPU(dc->cpu), "Unhandled opcode"); - break; - } - - return insn_len; -} - -static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc) -{ - unsigned int insn_len = 2; - - /* Load a halfword onto the instruction register. */ - dc->ir = cris_fetch(env, dc, dc->pc, 2, 0); - - /* Now decode it. */ - dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9); - dc->mode = EXTRACT_FIELD(dc->ir, 10, 11); - dc->src = EXTRACT_FIELD(dc->ir, 0, 3); - dc->size = EXTRACT_FIELD(dc->ir, 4, 5); - dc->cond = dc->dst = EXTRACT_FIELD(dc->ir, 12, 15); - dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10); - - dc->clear_prefix = 1; - - /* FIXME: What if this insn insn't 2 in length?? */ - if (dc->src == 15 || dc->dst == 15) - tcg_gen_movi_tl(cpu_R[15], dc->pc + 2); - - switch (dc->mode) { - case CRISV10_MODE_QIMMEDIATE: - insn_len = dec10_quick_imm(dc); - break; - case CRISV10_MODE_REG: - insn_len = dec10_reg(dc); - break; - case CRISV10_MODE_AUTOINC: - case CRISV10_MODE_INDIRECT: - insn_len = dec10_ind(env, dc); - break; - } - - if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) { - dc->tb_flags &= ~PFIX_FLAG; - tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG); - if (dc->tb_flags != dc->base.tb->flags) { - dc->cpustate_changed = 1; - } - } - - /* CRISv10 locks out interrupts on dslots. */ - if (dc->delayed_branch == 2) { - cris_lock_irq(dc); - } - return insn_len; -} - -void cris_initialize_crisv10_tcg(void) -{ - int i; - - cc_x = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_x), "cc_x"); - cc_src = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_src), "cc_src"); - cc_dest = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_dest), - "cc_dest"); - cc_result = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_result), - "cc_result"); - cc_op = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_op), "cc_op"); - cc_size = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_size), - "cc_size"); - cc_mask = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, cc_mask), - "cc_mask"); - - env_pc = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, pc), - "pc"); - env_btarget = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, btarget), - "btarget"); - env_btaken = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, btaken), - "btaken"); - for (i = 0; i < 16; i++) { - cpu_R[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, regs[i]), - regnames_v10[i]); - } - for (i = 0; i < 16; i++) { - cpu_PR[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUCRISState, pregs[i]), - pregnames_v10[i]); - } -} diff --git a/target/hexagon/README b/target/hexagon/README index 7ffd517d706..ca617e33643 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -282,10 +282,6 @@ For Hexagon Vector eXtensions (HVX), the following fields are used *** Debugging *** -You can turn on a lot of debugging by changing the HEX_DEBUG macro to 1 in -internal.h. This will stream a lot of information as it generates TCG and -executes the code. - To track down nasty issues with Hexagon->TCG generation, we compare the execution results with actual hardware running on a Hexagon Linux target. Run qemu with the "-d cpu" option. Then, we can diff the results and figure @@ -305,8 +301,3 @@ Here are some handy places to set breakpoints The helper function for each instruction is named helper_, so here's an example that will set a breakpoint at the start br helper_A2_add - If you have the HEX_DEBUG macro set, the following will be useful - At the start of execution of a packet for a given PC - br helper_debug_start_packet if env->gpr[41] == 0xdeadbeef - At the end of execution of a packet for a given PC - br helper_debug_commit_end if this_PC == 0xdeadbeef diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h index 71b4a9b83ec..635d509e743 100644 --- a/target/hexagon/cpu-param.h +++ b/target/hexagon/cpu-param.h @@ -19,9 +19,10 @@ #define HEXAGON_CPU_PARAM_H #define TARGET_PAGE_BITS 16 /* 64K pages */ -#define TARGET_LONG_BITS 32 #define TARGET_PHYS_ADDR_SPACE_BITS 36 #define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define TARGET_INSN_START_EXTRA_WORDS 0 + #endif diff --git a/target/hexagon/cpu-qom.h b/target/hexagon/cpu-qom.h index da92fe74682..0b149bd5fea 100644 --- a/target/hexagon/cpu-qom.h +++ b/target/hexagon/cpu-qom.h @@ -16,6 +16,7 @@ #define HEXAGON_CPU_TYPE_SUFFIX "-" TYPE_HEXAGON_CPU #define HEXAGON_CPU_TYPE_NAME(name) (name HEXAGON_CPU_TYPE_SUFFIX) +#define TYPE_HEXAGON_CPU_V66 HEXAGON_CPU_TYPE_NAME("v66") #define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67") #define TYPE_HEXAGON_CPU_V68 HEXAGON_CPU_TYPE_NAME("v68") #define TYPE_HEXAGON_CPU_V69 HEXAGON_CPU_TYPE_NAME("v69") diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 64cc05cca74..a5a04173ab8 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -19,13 +19,15 @@ #include "qemu/qemu-print.h" #include "cpu.h" #include "internal.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "qapi/error.h" #include "hw/qdev-properties.h" #include "fpu/softfloat-helpers.h" #include "tcg/tcg.h" #include "exec/gdbstub.h" +#include "accel/tcg/cpu-ops.h" +static void hexagon_v66_cpu_init(Object *obj) { } static void hexagon_v67_cpu_init(Object *obj) { } static void hexagon_v68_cpu_init(Object *obj) { } static void hexagon_v69_cpu_init(Object *obj) { } @@ -47,13 +49,12 @@ static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) return oc; } -static Property hexagon_lldb_compat_property = - DEFINE_PROP_BOOL("lldb-compat", HexagonCPU, lldb_compat, false); -static Property hexagon_lldb_stack_adjust_property = - DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust, - 0, qdev_prop_uint32, target_ulong); -static Property hexagon_short_circuit_property = - DEFINE_PROP_BOOL("short-circuit", HexagonCPU, short_circuit, true); +static const Property hexagon_cpu_properties[] = { + DEFINE_PROP_BOOL("lldb-compat", HexagonCPU, lldb_compat, false), + DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust, 0, + qdev_prop_uint32, target_ulong), + DEFINE_PROP_BOOL("short-circuit", HexagonCPU, short_circuit, true), +}; const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -254,6 +255,22 @@ static vaddr hexagon_cpu_get_pc(CPUState *cs) return cpu_env(cs)->gpr[HEX_REG_PC]; } +static TCGTBCPUState hexagon_get_tb_cpu_state(CPUState *cs) +{ + CPUHexagonState *env = cpu_env(cs); + vaddr pc = env->gpr[HEX_REG_PC]; + uint32_t hex_flags = 0; + + if (pc == env->gpr[HEX_REG_SA0]) { + hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1); + } + if (pc & PCALIGN_MASK) { + hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0); + } + + return (TCGTBCPUState){ .pc = pc, .flags = hex_flags }; +} + static void hexagon_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -261,11 +278,6 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs, cpu_env(cs)->gpr[HEX_REG_PC] = tb->pc; } -static bool hexagon_cpu_has_work(CPUState *cs) -{ - return true; -} - static void hexagon_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) @@ -285,11 +297,14 @@ static void hexagon_cpu_reset_hold(Object *obj, ResetType type) set_default_nan_mode(1, &env->fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + /* Default NaN value: sign bit set, all frac bits set */ + set_float_default_nan_pattern(0b11111111, &env->fp_status); } static void hexagon_cpu_disas_set_info(CPUState *s, disassemble_info *info) { info->print_insn = print_insn_hexagon; + info->endian = BFD_ENDIAN_LITTLE; } static void hexagon_cpu_realize(DeviceState *dev, Error **errp) @@ -314,22 +329,28 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp) mcc->parent_realize(dev, errp); } -static void hexagon_cpu_init(Object *obj) +static int hexagon_cpu_mmu_index(CPUState *cs, bool ifetch) { - qdev_property_add_static(DEVICE(obj), &hexagon_lldb_compat_property); - qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property); - qdev_property_add_static(DEVICE(obj), &hexagon_short_circuit_property); + return MMU_USER_IDX; } -#include "hw/core/tcg-cpu-ops.h" +static void hexagon_cpu_init(Object *obj) +{ +} static const TCGCPUOps hexagon_tcg_ops = { + /* MTTCG not yet supported: require strict ordering */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = false, .initialize = hexagon_translate_init, + .translate_code = hexagon_translate_code, + .get_tb_cpu_state = hexagon_get_tb_cpu_state, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, .restore_state_to_opc = hexagon_restore_state_to_opc, + .mmu_index = hexagon_cpu_mmu_index, }; -static void hexagon_cpu_class_init(ObjectClass *c, void *data) +static void hexagon_cpu_class_init(ObjectClass *c, const void *data) { HexagonCPUClass *mcc = HEXAGON_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -339,11 +360,11 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) device_class_set_parent_realize(dc, hexagon_cpu_realize, &mcc->parent_realize); + device_class_set_props(dc, hexagon_cpu_properties); resettable_class_set_parent_phases(rc, NULL, hexagon_cpu_reset_hold, NULL, &mcc->parent_phases); cc->class_by_name = hexagon_cpu_class_by_name; - cc->has_work = hexagon_cpu_has_work; cc->dump_state = hexagon_dump_state; cc->set_pc = hexagon_cpu_set_pc; cc->get_pc = hexagon_cpu_get_pc; @@ -373,6 +394,7 @@ static const TypeInfo hexagon_cpu_type_infos[] = { .class_size = sizeof(HexagonCPUClass), .class_init = hexagon_cpu_class_init, }, + DEFINE_CPU(TYPE_HEXAGON_CPU_V66, hexagon_v66_cpu_init), DEFINE_CPU(TYPE_HEXAGON_CPU_V67, hexagon_v67_cpu_init), DEFINE_CPU(TYPE_HEXAGON_CPU_V68, hexagon_v68_cpu_init), DEFINE_CPU(TYPE_HEXAGON_CPU_V69, hexagon_v69_cpu_init), diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 764f3c38cc6..43a854f5172 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -21,11 +21,16 @@ #include "fpu/softfloat-types.h" #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" #include "hex_regs.h" #include "mmvec/mmvec.h" #include "hw/registerfields.h" +#ifndef CONFIG_USER_ONLY +#error "Hexagon does not support system emulation" +#endif + #define NUM_PREGS 4 #define TOTAL_PER_THREAD_REGS 64 @@ -79,12 +84,6 @@ typedef struct CPUArchState { uint8_t slot_cancelled; target_ulong new_value_usr; - /* - * Only used when HEX_DEBUG is on, but unconditionally included - * to reduce recompile time when turning HEX_DEBUG on/off. - */ - target_ulong reg_written[TOTAL_PER_THREAD_REGS]; - MemLog mem_log_stores[STORES_MAX]; float_status fp_status; @@ -138,25 +137,10 @@ G_NORETURN void hexagon_raise_exception_err(CPUHexagonState *env, uint32_t exception, uintptr_t pc); -static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - uint32_t hex_flags = 0; - *pc = env->gpr[HEX_REG_PC]; - *cs_base = 0; - if (*pc == env->gpr[HEX_REG_SA0]) { - hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1); - } - *flags = hex_flags; - if (*pc & PCALIGN_MASK) { - hexagon_raise_exception_err(env, HEX_EXCP_PC_NOT_ALIGNED, 0); - } -} - typedef HexagonCPU ArchCPU; void hexagon_translate_init(void); - -#include "exec/cpu-all.h" +void hexagon_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #endif /* HEXAGON_CPU_H */ diff --git a/target/hexagon/cpu_bits.h b/target/hexagon/cpu_bits.h index 4279281a710..ff596e2a94c 100644 --- a/target/hexagon/cpu_bits.h +++ b/target/hexagon/cpu_bits.h @@ -23,14 +23,21 @@ #define PCALIGN 4 #define PCALIGN_MASK (PCALIGN - 1) -#define HEX_EXCP_FETCH_NO_UPAGE 0x012 -#define HEX_EXCP_INVALID_PACKET 0x015 -#define HEX_EXCP_INVALID_OPCODE 0x015 -#define HEX_EXCP_PC_NOT_ALIGNED 0x01e -#define HEX_EXCP_PRIV_NO_UREAD 0x024 -#define HEX_EXCP_PRIV_NO_UWRITE 0x025 - -#define HEX_EXCP_TRAP0 0x172 +enum hex_event { + HEX_EVENT_NONE = -1, + HEX_EVENT_TRAP0 = 0x008, +}; + +enum hex_cause { + HEX_CAUSE_NONE = -1, + HEX_CAUSE_TRAP0 = 0x172, + HEX_CAUSE_FETCH_NO_UPAGE = 0x012, + HEX_CAUSE_INVALID_PACKET = 0x015, + HEX_CAUSE_INVALID_OPCODE = 0x015, + HEX_CAUSE_PC_NOT_ALIGNED = 0x01e, + HEX_CAUSE_PRIV_NO_UREAD = 0x024, + HEX_CAUSE_PRIV_NO_UWRITE = 0x025, +}; #define PACKET_WORDS_MAX 4 diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c index 05a56d8c10b..c557141f113 100644 --- a/target/hexagon/fma_emu.c +++ b/target/hexagon/fma_emu.c @@ -43,112 +43,51 @@ #define WAY_BIG_EXP 4096 -typedef union { - double f; - uint64_t i; - struct { - uint64_t mant:52; - uint64_t exp:11; - uint64_t sign:1; - }; -} Double; - -typedef union { - float f; - uint32_t i; - struct { - uint32_t mant:23; - uint32_t exp:8; - uint32_t sign:1; - }; -} Float; - static uint64_t float64_getmant(float64 f64) { - Double a = { .i = f64 }; + uint64_t mant = extract64(f64, 0, 52); if (float64_is_normal(f64)) { - return a.mant | 1ULL << 52; + return mant | 1ULL << 52; } if (float64_is_zero(f64)) { return 0; } if (float64_is_denormal(f64)) { - return a.mant; + return mant; } return ~0ULL; } int32_t float64_getexp(float64 f64) { - Double a = { .i = f64 }; + int exp = extract64(f64, 52, 11); if (float64_is_normal(f64)) { - return a.exp; + return exp; } if (float64_is_denormal(f64)) { - return a.exp + 1; + return exp + 1; } return -1; } -static uint64_t float32_getmant(float32 f32) -{ - Float a = { .i = f32 }; - if (float32_is_normal(f32)) { - return a.mant | 1ULL << 23; - } - if (float32_is_zero(f32)) { - return 0; - } - if (float32_is_denormal(f32)) { - return a.mant; - } - return ~0ULL; -} - int32_t float32_getexp(float32 f32) { - Float a = { .i = f32 }; + int exp = float32_getexp_raw(f32); if (float32_is_normal(f32)) { - return a.exp; + return exp; } if (float32_is_denormal(f32)) { - return a.exp + 1; + return exp + 1; } return -1; } -static uint32_t int128_getw0(Int128 x) -{ - return int128_getlo(x); -} - -static uint32_t int128_getw1(Int128 x) -{ - return int128_getlo(x) >> 32; -} - static Int128 int128_mul_6464(uint64_t ai, uint64_t bi) { - Int128 a, b; - uint64_t pp0, pp1a, pp1b, pp1s, pp2; - - a = int128_make64(ai); - b = int128_make64(bi); - pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b); - pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b); - pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a); - pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b); - - pp1s = pp1a + pp1b; - if ((pp1s < pp1a) || (pp1s < pp1b)) { - pp2 += (1ULL << 32); - } - uint64_t ret_low = pp0 + (pp1s << 32); - if ((ret_low < pp0) || (ret_low < (pp1s << 32))) { - pp2 += 1; - } + uint64_t l, h; - return int128_make128(ret_low, pp2 + (pp1s >> 32)); + mulu64(&l, &h, ai, bi); + return int128_make128(l, h); } static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow) @@ -369,298 +308,129 @@ float32 infinite_float32(uint8_t sign) } /* Return a maximum finite value with the requested sign */ -static float32 maxfinite_float32(uint8_t sign) +static float64 accum_round_float64(Accum a, float_status *fp_status) { - if (sign) { - return make_float32(SF_MINUS_MAXF); - } else { - return make_float32(SF_MAXF); - } -} - -/* Return a zero value with requested sign */ -static float32 zero_float32(uint8_t sign) -{ - if (sign) { - return make_float32(0x80000000); - } else { - return float32_zero; + uint64_t ret; + + if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) + && ((a.guard | a.round | a.sticky) == 0)) { + /* result zero */ + switch (fp_status->float_rounding_mode) { + case float_round_down: + return zero_float64(1); + default: + return zero_float64(0); + } } -} - -#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \ -static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \ -{ \ - if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \ - && ((a.guard | a.round | a.sticky) == 0)) { \ - /* result zero */ \ - switch (fp_status->float_rounding_mode) { \ - case float_round_down: \ - return zero_##SUFFIX(1); \ - default: \ - return zero_##SUFFIX(0); \ - } \ - } \ - /* Normalize right */ \ - /* We want MANTBITS bits of mantissa plus the leading one. */ \ - /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \ - /* So we need to normalize right while the high word is non-zero and \ - * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \ - while ((int128_gethi(a.mant) != 0) || \ - ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \ - a = accum_norm_right(a, 1); \ - } \ - /* \ - * OK, now normalize left \ - * We want to normalize left until we have a leading one in bit 24 \ - * Theoretically, we only need to shift a maximum of one to the left if we \ - * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \ - * should be 0 \ - */ \ - while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \ - a = accum_norm_left(a); \ - } \ - /* \ - * OK, now we might need to denormalize because of potential underflow. \ - * We need to do this before rounding, and rounding might make us normal \ - * again \ - */ \ - while (a.exp <= 0) { \ - a = accum_norm_right(a, 1 - a.exp); \ - /* \ - * Do we have underflow? \ - * That's when we get an inexact answer because we ran out of bits \ - * in a denormal. \ - */ \ - if (a.guard || a.round || a.sticky) { \ - float_raise(float_flag_underflow, fp_status); \ - } \ - } \ - /* OK, we're relatively canonical... now we need to round */ \ - if (a.guard || a.round || a.sticky) { \ - float_raise(float_flag_inexact, fp_status); \ - switch (fp_status->float_rounding_mode) { \ - case float_round_to_zero: \ - /* Chop and we're done */ \ - break; \ - case float_round_up: \ - if (a.sign == 0) { \ - a.mant = int128_add(a.mant, int128_one()); \ - } \ - break; \ - case float_round_down: \ - if (a.sign != 0) { \ - a.mant = int128_add(a.mant, int128_one()); \ - } \ - break; \ - default: \ - if (a.round || a.sticky) { \ - /* round up if guard is 1, down if guard is zero */ \ - a.mant = int128_add(a.mant, int128_make64(a.guard)); \ - } else if (a.guard) { \ - /* exactly .5, round up if odd */ \ - a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \ - } \ - break; \ - } \ - } \ - /* \ - * OK, now we might have carried all the way up. \ - * So we might need to shr once \ - * at least we know that the lsb should be zero if we rounded and \ - * got a carry out... \ - */ \ - if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \ - a = accum_norm_right(a, 1); \ - } \ - /* Overflow? */ \ - if (a.exp >= INF_EXP) { \ - /* Yep, inf result */ \ - float_raise(float_flag_overflow, fp_status); \ - float_raise(float_flag_inexact, fp_status); \ - switch (fp_status->float_rounding_mode) { \ - case float_round_to_zero: \ - return maxfinite_##SUFFIX(a.sign); \ - case float_round_up: \ - if (a.sign == 0) { \ - return infinite_##SUFFIX(a.sign); \ - } else { \ - return maxfinite_##SUFFIX(a.sign); \ - } \ - case float_round_down: \ - if (a.sign != 0) { \ - return infinite_##SUFFIX(a.sign); \ - } else { \ - return maxfinite_##SUFFIX(a.sign); \ - } \ - default: \ - return infinite_##SUFFIX(a.sign); \ - } \ - } \ - /* Underflow? */ \ - if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \ - /* Leading one means: No, we're normal. So, we should be done... */ \ - INTERNAL_TYPE ret; \ - ret.i = 0; \ - ret.sign = a.sign; \ - ret.exp = a.exp; \ - ret.mant = int128_getlo(a.mant); \ - return ret.i; \ - } \ - assert(a.exp == 1); \ - INTERNAL_TYPE ret; \ - ret.i = 0; \ - ret.sign = a.sign; \ - ret.exp = 0; \ - ret.mant = int128_getlo(a.mant); \ - return ret.i; \ -} - -GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double) -GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float) - -static bool is_inf_prod(float64 a, float64 b) -{ - return ((float64_is_infinity(a) && float64_is_infinity(b)) || - (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) || - (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a)))); -} - -static float64 special_fma(float64 a, float64 b, float64 c, - float_status *fp_status) -{ - float64 ret = make_float64(0); - /* - * If A multiplied by B is an exact infinity and C is also an infinity - * but with the opposite sign, FMA returns NaN and raises invalid. + * Normalize right + * We want DF_MANTBITS bits of mantissa plus the leading one. + * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF + * So we need to normalize right while the high word is non-zero and + * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ - uint8_t a_sign = float64_is_neg(a); - uint8_t b_sign = float64_is_neg(b); - uint8_t c_sign = float64_is_neg(c); - if (is_inf_prod(a, b) && float64_is_infinity(c)) { - if ((a_sign ^ b_sign) != c_sign) { - ret = make_float64(DF_NAN); - float_raise(float_flag_invalid, fp_status); - return ret; - } - } - if ((float64_is_infinity(a) && float64_is_zero(b)) || - (float64_is_zero(a) && float64_is_infinity(b))) { - ret = make_float64(DF_NAN); - float_raise(float_flag_invalid, fp_status); - return ret; + while ((int128_gethi(a.mant) != 0) || + ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) { + a = accum_norm_right(a, 1); } /* - * If none of the above checks are true and C is a NaN, - * a NaN shall be returned - * If A or B are NaN, a NAN shall be returned. + * OK, now normalize left + * We want to normalize left until we have a leading one in bit 24 + * Theoretically, we only need to shift a maximum of one to the left if we + * shifted out lots of bits from B, or if we had no shift / 1 shift sticky + * should be 0 */ - if (float64_is_any_nan(a) || - float64_is_any_nan(b) || - float64_is_any_nan(c)) { - if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) { - float_raise(float_flag_invalid, fp_status); - } - ret = make_float64(DF_NAN); - return ret; + while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) { + a = accum_norm_left(a); } /* - * We have checked for adding opposite-signed infinities. - * Other infinities return infinity with the correct sign + * OK, now we might need to denormalize because of potential underflow. + * We need to do this before rounding, and rounding might make us normal + * again */ - if (float64_is_infinity(c)) { - ret = infinite_float64(c_sign); - return ret; + while (a.exp <= 0) { + a = accum_norm_right(a, 1 - a.exp); + /* + * Do we have underflow? + * That's when we get an inexact answer because we ran out of bits + * in a denormal. + */ + if (a.guard || a.round || a.sticky) { + float_raise(float_flag_underflow, fp_status); + } } - if (float64_is_infinity(a) || float64_is_infinity(b)) { - ret = infinite_float64(a_sign ^ b_sign); - return ret; + /* OK, we're relatively canonical... now we need to round */ + if (a.guard || a.round || a.sticky) { + float_raise(float_flag_inexact, fp_status); + switch (fp_status->float_rounding_mode) { + case float_round_to_zero: + /* Chop and we're done */ + break; + case float_round_up: + if (a.sign == 0) { + a.mant = int128_add(a.mant, int128_one()); + } + break; + case float_round_down: + if (a.sign != 0) { + a.mant = int128_add(a.mant, int128_one()); + } + break; + default: + if (a.round || a.sticky) { + /* round up if guard is 1, down if guard is zero */ + a.mant = int128_add(a.mant, int128_make64(a.guard)); + } else if (a.guard) { + /* exactly .5, round up if odd */ + a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); + } + break; + } } - g_assert_not_reached(); -} - -static float32 special_fmaf(float32 a, float32 b, float32 c, - float_status *fp_status) -{ - float64 aa, bb, cc; - aa = float32_to_float64(a, fp_status); - bb = float32_to_float64(b, fp_status); - cc = float32_to_float64(c, fp_status); - return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status); -} - -float32 internal_fmafx(float32 a, float32 b, float32 c, int scale, - float_status *fp_status) -{ - Accum prod; - Accum acc; - Accum result; - accum_init(&prod); - accum_init(&acc); - accum_init(&result); - - uint8_t a_sign = float32_is_neg(a); - uint8_t b_sign = float32_is_neg(b); - uint8_t c_sign = float32_is_neg(c); - if (float32_is_infinity(a) || - float32_is_infinity(b) || - float32_is_infinity(c)) { - return special_fmaf(a, b, c, fp_status); - } - if (float32_is_any_nan(a) || - float32_is_any_nan(b) || - float32_is_any_nan(c)) { - return special_fmaf(a, b, c, fp_status); - } - if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) { - float32 tmp = float32_mul(a, b, fp_status); - tmp = float32_add(tmp, c, fp_status); - return tmp; - } - - /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */ - prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b)); - /* - * Note: extracting the mantissa into an int is multiplying by - * 2**23, so adjust here + * OK, now we might have carried all the way up. + * So we might need to shr once + * at least we know that the lsb should be zero if we rounded and + * got a carry out... */ - prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23; - prod.sign = a_sign ^ b_sign; - if (float32_is_zero(a) || float32_is_zero(b)) { - prod.exp = -2 * WAY_BIG_EXP; - } - if ((scale > 0) && float32_is_denormal(c)) { - acc.mant = int128_mul_6464(0, 0); - acc.exp = -WAY_BIG_EXP; - acc.sign = c_sign; - acc.sticky = 1; - result = accum_add(prod, acc); - } else if (!float32_is_zero(c)) { - acc.mant = int128_mul_6464(float32_getmant(c), 1); - acc.exp = float32_getexp(c); - acc.sign = c_sign; - result = accum_add(prod, acc); - } else { - result = prod; + if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) { + a = accum_norm_right(a, 1); + } + /* Overflow? */ + if (a.exp >= DF_INF_EXP) { + /* Yep, inf result */ + float_raise(float_flag_overflow, fp_status); + float_raise(float_flag_inexact, fp_status); + switch (fp_status->float_rounding_mode) { + case float_round_to_zero: + return maxfinite_float64(a.sign); + case float_round_up: + if (a.sign == 0) { + return infinite_float64(a.sign); + } else { + return maxfinite_float64(a.sign); + } + case float_round_down: + if (a.sign != 0) { + return infinite_float64(a.sign); + } else { + return maxfinite_float64(a.sign); + } + default: + return infinite_float64(a.sign); + } } - result.exp += scale; - return accum_round_float32(result, fp_status); -} - -float32 internal_mpyf(float32 a, float32 b, float_status *fp_status) -{ - if (float32_is_zero(a) || float32_is_zero(b)) { - return float32_mul(a, b, fp_status); + /* Underflow? */ + ret = int128_getlo(a.mant); + if (ret & (1ULL << DF_MANTBITS)) { + /* Leading one means: No, we're normal. So, we should be done... */ + ret = deposit64(ret, 52, 11, a.exp); + } else { + assert(a.exp == 1); + ret = deposit64(ret, 52, 11, 0); } - return internal_fmafx(a, b, float32_zero, 0, fp_status); + ret = deposit64(ret, 63, 1, a.sign); + return ret; } float64 internal_mpyhh(float64 a, float64 b, @@ -685,7 +455,7 @@ float64 internal_mpyhh(float64 a, float64 b, float64_is_infinity(b)) { return float64_mul(a, b, fp_status); } - x.mant = int128_mul_6464(accumulated, 1); + x.mant = int128_make64(accumulated); x.sticky = sticky; prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b)); x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL)); diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h index 91591d60503..fed054b609f 100644 --- a/target/hexagon/fma_emu.h +++ b/target/hexagon/fma_emu.h @@ -30,9 +30,6 @@ static inline uint32_t float32_getexp_raw(float32 f32) } int32_t float32_getexp(float32 f32); float32 infinite_float32(uint8_t sign); -float32 internal_fmafx(float32 a, float32 b, float32 c, - int scale, float_status *fp_status); -float32 internal_mpyf(float32 a, float32 b, float_status *fp_status); float64 internal_mpyhh(float64 a, float64 b, unsigned long long int accumulated, float_status *fp_status); diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c index 502c6987f09..12d6b3bbcbb 100644 --- a/target/hexagon/gdbstub.c +++ b/target/hexagon/gdbstub.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2024 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -36,6 +36,14 @@ int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return gdb_get_regl(mem_buf, env->gpr[n]); } + n -= TOTAL_PER_THREAD_REGS; + + if (n < NUM_PREGS) { + return gdb_get_reg8(mem_buf, env->pred[n]); + } + + n -= NUM_PREGS; + g_assert_not_reached(); } @@ -44,7 +52,7 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUHexagonState *env = cpu_env(cs); if (n == HEX_REG_P3_0_ALIASED) { - uint32_t p3_0 = ldtul_p(mem_buf); + uint32_t p3_0 = ldl_le_p(mem_buf); for (int i = 0; i < NUM_PREGS; i++) { env->pred[i] = extract32(p3_0, i * 8, 8); } @@ -52,10 +60,19 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } if (n < TOTAL_PER_THREAD_REGS) { - env->gpr[n] = ldtul_p(mem_buf); + env->gpr[n] = ldl_le_p(mem_buf); return sizeof(target_ulong); } + n -= TOTAL_PER_THREAD_REGS; + + if (n < NUM_PREGS) { + env->pred[n] = ldl_le_p(mem_buf) & 0xff; + return sizeof(uint8_t); + } + + n -= NUM_PREGS; + g_assert_not_reached(); } @@ -100,7 +117,7 @@ static int gdb_put_vreg(CPUHexagonState *env, uint8_t *mem_buf, int n) { int i; for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) { - env->VRegs[n].uw[i] = ldtul_p(mem_buf); + env->VRegs[n].uw[i] = ldl_le_p(mem_buf); mem_buf += 4; } return MAX_VEC_SIZE_BYTES; @@ -110,7 +127,7 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n) { int i; for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) { - env->QRegs[n].uw[i] = ldtul_p(mem_buf); + env->QRegs[n].uw[i] = ldl_le_p(mem_buf); mem_buf += 4; } return MAX_VEC_SIZE_BYTES / 8; diff --git a/target/hexagon/gen_analyze_funcs.py b/target/hexagon/gen_analyze_funcs.py index 54bac197240..3ac7cc2cfe5 100755 --- a/target/hexagon/gen_analyze_funcs.py +++ b/target/hexagon/gen_analyze_funcs.py @@ -78,11 +78,13 @@ def gen_analyze_func(f, tag, regs, imms): def main(): - hex_common.read_common_files() + args = hex_common.parse_common_args( + "Emit functions analyzing register accesses" + ) tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[-1], "w") as f: + with open(args.out, "w") as f: f.write("#ifndef HEXAGON_ANALYZE_FUNCS_C_INC\n") f.write("#define HEXAGON_ANALYZE_FUNCS_C_INC\n\n") diff --git a/target/hexagon/gen_decodetree.py b/target/hexagon/gen_decodetree.py index a4fcd622c54..ce703af41d4 100755 --- a/target/hexagon/gen_decodetree.py +++ b/target/hexagon/gen_decodetree.py @@ -24,6 +24,7 @@ import textwrap import iset import hex_common +import argparse encs = { tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) @@ -191,8 +192,18 @@ def gen_decodetree_file(f, class_to_decode): f.write(f"{tag}\t{enc_str} @{tag}\n") +def main(): + parser = argparse.ArgumentParser( + description="Emit opaque macro calls with instruction semantics" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("class_to_decode", help="instruction class to decode") + parser.add_argument("out", help="output file") + args = parser.parse_args() + + hex_common.read_semantics_file(args.semantics) + with open(args.out, "w") as f: + gen_decodetree_file(f, args.class_to_decode) + if __name__ == "__main__": - hex_common.read_semantics_file(sys.argv[1]) - class_to_decode = sys.argv[2] - with open(sys.argv[3], "w") as f: - gen_decodetree_file(f, class_to_decode) + main() diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index e9685bff2fa..c1f806ac4b2 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -102,12 +102,13 @@ def gen_helper_function(f, tag, tagregs, tagimms): def main(): - hex_common.read_common_files() + args = hex_common.parse_common_args( + "Emit helper function definitions for each instruction" + ) tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - output_file = sys.argv[-1] - with open(output_file, "w") as f: + with open(args.out, "w") as f: for tag in hex_common.tags: ## Skip the priv instructions if "A_PRIV" in hex_common.attribdict[tag]: diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py index fd2bfd0f36b..77f8e0a6a32 100755 --- a/target/hexagon/gen_helper_protos.py +++ b/target/hexagon/gen_helper_protos.py @@ -52,12 +52,13 @@ def gen_helper_prototype(f, tag, tagregs, tagimms): def main(): - hex_common.read_common_files() + args = hex_common.parse_common_args( + "Emit helper function prototypes for each instruction" + ) tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - output_file = sys.argv[-1] - with open(output_file, "w") as f: + with open(args.out, "w") as f: for tag in hex_common.tags: ## Skip the priv instructions if "A_PRIV" in hex_common.attribdict[tag]: diff --git a/target/hexagon/gen_idef_parser_funcs.py b/target/hexagon/gen_idef_parser_funcs.py index eb494abba8d..2f6e826f76d 100644 --- a/target/hexagon/gen_idef_parser_funcs.py +++ b/target/hexagon/gen_idef_parser_funcs.py @@ -20,6 +20,7 @@ import sys import re import string +import argparse from io import StringIO import hex_common @@ -43,14 +44,20 @@ ## them are inputs ("in" prefix), while some others are outputs. ## def main(): - hex_common.read_semantics_file(sys.argv[1]) + parser = argparse.ArgumentParser( + "Emit instruction implementations that can be fed to idef-parser" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) hex_common.calculate_attribs() hex_common.init_registers() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[-1], "w") as f: - f.write('#include "macros.inc"\n\n') + with open(args.out, "w") as f: + f.write('#include "macros.h.inc"\n\n') for tag in hex_common.tags: ## Skip the priv instructions diff --git a/target/hexagon/gen_op_attribs.py b/target/hexagon/gen_op_attribs.py index 99448220da3..bbbb02df3a2 100755 --- a/target/hexagon/gen_op_attribs.py +++ b/target/hexagon/gen_op_attribs.py @@ -21,16 +21,23 @@ import re import string import hex_common +import argparse def main(): - hex_common.read_semantics_file(sys.argv[1]) + parser = argparse.ArgumentParser( + "Emit opaque macro calls containing instruction attributes" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) hex_common.calculate_attribs() ## ## Generate all the attributes associated with each instruction ## - with open(sys.argv[-1], "w") as f: + with open(args.out, "w") as f: for tag in hex_common.tags: f.write( f"OP_ATTRIB({tag},ATTRIBS(" diff --git a/target/hexagon/gen_opcodes_def.py b/target/hexagon/gen_opcodes_def.py index 536f0eb68aa..94a19ff412e 100755 --- a/target/hexagon/gen_opcodes_def.py +++ b/target/hexagon/gen_opcodes_def.py @@ -21,15 +21,22 @@ import re import string import hex_common +import argparse def main(): - hex_common.read_semantics_file(sys.argv[1]) + parser = argparse.ArgumentParser( + description="Emit opaque macro calls with instruction names" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) ## ## Generate a list of all the opcodes ## - with open(sys.argv[-1], "w") as f: + with open(args.out, "w") as f: for tag in hex_common.tags: f.write(f"OPCODE({tag}),\n") diff --git a/target/hexagon/gen_printinsn.py b/target/hexagon/gen_printinsn.py index 8bf4d0985c3..d5f969960ad 100755 --- a/target/hexagon/gen_printinsn.py +++ b/target/hexagon/gen_printinsn.py @@ -21,6 +21,7 @@ import re import string import hex_common +import argparse ## @@ -96,11 +97,17 @@ def spacify(s): def main(): - hex_common.read_semantics_file(sys.argv[1]) + parser = argparse.ArgumentParser( + "Emit opaque macro calls with information for printing string representations of instrucions" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) immext_casere = re.compile(r"IMMEXT\(([A-Za-z])") - with open(sys.argv[-1], "w") as f: + with open(args.out, "w") as f: for tag in hex_common.tags: if not hex_common.behdict[tag]: continue diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index 3fc1f4e2812..8a3b801287c 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -1365,7 +1365,7 @@ do { \ uiV = uiV; \ tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \ - TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \ + TCGv excp = tcg_constant_tl(HEX_EVENT_TRAP0); \ gen_helper_raise_exception(tcg_env, excp); \ } while (0) #endif diff --git a/target/hexagon/gen_tcg_func_table.py b/target/hexagon/gen_tcg_func_table.py index 978ac1819b9..299a39b1aa0 100755 --- a/target/hexagon/gen_tcg_func_table.py +++ b/target/hexagon/gen_tcg_func_table.py @@ -21,15 +21,22 @@ import re import string import hex_common +import argparse def main(): - hex_common.read_semantics_file(sys.argv[1]) + parser = argparse.ArgumentParser( + "Emit opaque macro calls with instruction semantics" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[-1], "w") as f: + with open(args.out, "w") as f: f.write("#ifndef HEXAGON_FUNC_TABLE_H\n") f.write("#define HEXAGON_FUNC_TABLE_H\n\n") diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index 05aa0a7855b..c2ba91ddc04 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -108,15 +108,16 @@ def gen_def_tcg_func(f, tag, tagregs, tagimms): def main(): - is_idef_parser_enabled = hex_common.read_common_files() + args = hex_common.parse_common_args( + "Emit functions calling generated code implementing instruction semantics (helpers, idef-parser)" + ) tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - output_file = sys.argv[-1] - with open(output_file, "w") as f: + with open(args.out, "w") as f: f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") f.write("#define HEXAGON_TCG_FUNCS_H\n\n") - if is_idef_parser_enabled: + if args.idef_parser: f.write('#include "idef-generated-emitter.h.inc"\n\n') for tag in hex_common.tags: diff --git a/target/hexagon/gen_trans_funcs.py b/target/hexagon/gen_trans_funcs.py index 30f0c73e0c7..45da1b7b5df 100755 --- a/target/hexagon/gen_trans_funcs.py +++ b/target/hexagon/gen_trans_funcs.py @@ -24,6 +24,7 @@ import textwrap import iset import hex_common +import argparse encs = { tag: "".join(reversed(iset.iset[tag]["enc"].replace(" ", ""))) @@ -136,8 +137,19 @@ def gen_trans_funcs(f): """)) -if __name__ == "__main__": - hex_common.read_semantics_file(sys.argv[1]) +def main(): + parser = argparse.ArgumentParser( + description="Emit trans_*() functions to be called by " \ + "instruction decoder" + ) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("out", help="output file") + args = parser.parse_args() + hex_common.read_semantics_file(args.semantics) hex_common.init_registers() - with open(sys.argv[2], "w") as f: + with open(args.out, "w") as f: gen_trans_funcs(f) + + +if __name__ == "__main__": + main() diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index dbae6c570a8..08fc5413de7 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -100,10 +100,6 @@ void gen_log_reg_write(DisasContext *ctx, int rnum, TCGv val) gen_masked_reg_write(val, hex_gpr[rnum], reg_mask); tcg_gen_mov_tl(get_result_gpr(ctx, rnum), val); - if (HEX_DEBUG) { - /* Do this so HELPER(debug_commit_end) will know */ - tcg_gen_movi_tl(hex_reg_written[rnum], 1); - } } static void gen_log_reg_write_pair(DisasContext *ctx, int rnum, TCGv_i64 val) @@ -151,9 +147,6 @@ void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) } else { tcg_gen_and_tl(pred, pred, base_val); } - if (HEX_DEBUG) { - tcg_gen_ori_tl(ctx->pred_written, ctx->pred_written, 1 << pnum); - } set_bit(pnum, ctx->pregs_written); } @@ -336,14 +329,14 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) { - tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL); + tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_LE | MO_UL); tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_tl(hex_llsc_val, dest); } static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index) { - tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ); + tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_LE | MO_UQ); tcg_gen_mov_tl(hex_llsc_addr, vaddr); tcg_gen_mov_i64(hex_llsc_val_i64, dest); } @@ -763,7 +756,7 @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA) { Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */ CHECK_NOSHUF(EA, 8); - tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_LE | MO_UQ); } #ifndef CONFIG_HEXAGON_IDEF_PARSER @@ -1237,7 +1230,7 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); } for (int i = 0; i < sizeof(MMVector) / 8; i++) { - tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_LE | MO_UQ); tcg_gen_addi_tl(src, src, 8); tcg_gen_st_i64(tmp, tcg_env, dstoff + i * 8); } diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index fa0ebaf7c83..f8baa599c88 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -19,9 +19,6 @@ #include "helper_protos_generated.h.inc" DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_RETURN, noreturn, env, i32) -DEF_HELPER_1(debug_start_packet, void, env) -DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int) -DEF_HELPER_FLAGS_5(debug_commit_end, TCG_CALL_NO_WG, void, env, i32, int, int, int) DEF_HELPER_2(commit_store, void, env, int) DEF_HELPER_3(gather_store, void, env, i32, int) DEF_HELPER_1(commit_hvx_stores, void, env) diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index 15ed4980e48..758e5fd12df 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -21,6 +21,7 @@ import re import string import textwrap +import argparse behdict = {} # tag ->behavior semdict = {} # tag -> semantics @@ -1181,22 +1182,20 @@ def helper_args(tag, regs, imms): return args -def read_common_files(): - read_semantics_file(sys.argv[1]) - read_overrides_file(sys.argv[2]) - read_overrides_file(sys.argv[3]) - ## Whether or not idef-parser is enabled is - ## determined by the number of arguments to - ## this script: - ## - ## 4 args. -> not enabled, - ## 5 args. -> idef-parser enabled. - ## - ## The 5:th arg. then holds a list of the successfully - ## parsed instructions. - is_idef_parser_enabled = len(sys.argv) > 5 - if is_idef_parser_enabled: - read_idef_parser_enabled_file(sys.argv[4]) +def parse_common_args(desc): + parser = argparse.ArgumentParser(desc) + parser.add_argument("semantics", help="semantics file") + parser.add_argument("overrides", help="overrides file") + parser.add_argument("overrides_vec", help="vector overrides file") + parser.add_argument("out", help="output file") + parser.add_argument("--idef-parser", + help="file of instructions translated by idef-parser") + args = parser.parse_args() + read_semantics_file(args.semantics) + read_overrides_file(args.overrides) + read_overrides_file(args.overrides_vec) + if args.idef_parser: + read_idef_parser_enabled_file(args.idef_parser) calculate_attribs() init_registers() - return is_idef_parser_enabled + return args diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index d0aa34309bc..7199177ee33 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -138,7 +138,7 @@ we obtain the pseudo code with macros such as ``fJUMPR`` intact. The second step is to expand macros into a form suitable for our parser. -These macros are defined in ``idef-parser/macros.inc`` and the step is +These macros are defined in ``idef-parser/macros.h.inc`` and the step is carried out by the ``prepare`` script which runs the C preprocessor on ``idef_parser_input.h.inc`` to produce ``idef_parser_input.preprocessed.h.inc``. @@ -266,7 +266,7 @@ in plain C is defined as #define fABS(A) (((A) < 0) ? (-(A)) : (A)) and returns the absolute value of the argument ``A``. This macro is not included -in ``idef-parser/macros.inc`` and as such is not expanded and kept as a "call" +in ``idef-parser/macros.h.inc`` and as such is not expanded and kept as a "call" ``fABS(...)``. Reason being, that ``fABS`` is easier to match and map to ``tcg_gen_abs_``, compared to the full ternary expression above. Loads of macros in ``macros.h`` are kept unexpanded to aid in parsing, as seen in the diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y index 9ffb9f9699e..c6f17c6afa7 100644 --- a/target/hexagon/idef-parser/idef-parser.y +++ b/target/hexagon/idef-parser/idef-parser.y @@ -800,7 +800,6 @@ rvalue : FAIL lvalue : FAIL { - @1.last_column = @1.last_column; yyassert(c, &@1, false, "Encountered a FAIL token as lvalue.\n"); } | REG diff --git a/target/hexagon/idef-parser/macros.inc b/target/hexagon/idef-parser/macros.h.inc similarity index 100% rename from target/hexagon/idef-parser/macros.inc rename to target/hexagon/idef-parser/macros.h.inc diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index a7dcd85fe43..542af8d0a65 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -1761,7 +1761,7 @@ void gen_load(Context *c, YYLTYPE *locp, HexValue *width, if (signedness == SIGNED) { OUT(c, locp, " | MO_SIGN"); } - OUT(c, locp, " | MO_TE);\n"); + OUT(c, locp, " | MO_LE);\n"); } void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index beb08cb7e38..32e96f00d97 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -20,17 +20,6 @@ #include "qemu/log.h" -/* - * Change HEX_DEBUG to 1 to turn on debugging output - */ -#define HEX_DEBUG 0 -#define HEX_DEBUG_LOG(...) \ - do { \ - if (HEX_DEBUG) { \ - qemu_log(__VA_ARGS__); \ - } \ - } while (0) - int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); int hexagon_hvx_gdb_read_register(CPUState *env, GByteArray *mem_buf, int n); diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index ee3d4c88e7b..9ba9be408db 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -21,6 +21,7 @@ #include "cpu.h" #include "hex_regs.h" #include "reg_fields.h" +#include "accel/tcg/getpc.h" #define GET_FIELD(FIELD, REGIN) \ fEXTRACTU_BITS(REGIN, reg_field_info[FIELD].width, \ @@ -115,27 +116,27 @@ #define MEM_LOAD2s(DST, VA) \ do { \ CHECK_NOSHUF(VA, 2); \ - tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_SW); \ } while (0) #define MEM_LOAD2u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 2); \ - tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_UW); \ } while (0) #define MEM_LOAD4s(DST, VA) \ do { \ CHECK_NOSHUF(VA, 4); \ - tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_SL); \ } while (0) #define MEM_LOAD4u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 4); \ - tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \ + tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_LE | MO_UL); \ } while (0) #define MEM_LOAD8u(DST, VA) \ do { \ CHECK_NOSHUF(VA, 8); \ - tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \ + tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_LE | MO_UQ); \ } while (0) #define MEM_STORE1_FUNC(X) \ diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index b0b253aa6bf..bb4ebaae816 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -284,7 +284,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs 'idef_parser_input.preprocessed.h.inc', output: 'idef_parser_input.preprocessed.h.inc', input: idef_parser_input_generated, - depend_files: [idef_parser_dir / 'macros.inc'], + depend_files: [idef_parser_dir / 'macros.h.inc'], command: [idef_parser_dir / 'prepare', '@INPUT@', '-I' + idef_parser_dir, '-o', '@OUTPUT@'], ) @@ -300,7 +300,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs arguments: ['@INPUT@', '--defines=@OUTPUT1@', '--output=@OUTPUT0@'] ) - glib_dep = dependency('glib-2.0', native: true) + glib_dep = dependency('glib-2.0', native: true, static: false) idef_parser = executable( 'idef-parser', @@ -346,7 +346,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs # Setup input and dependencies for the next step, this depends on whether or # not idef-parser is enabled helper_dep = [semantics_generated, idef_generated_tcg_c, idef_generated_tcg] - helper_in = [semantics_generated, gen_tcg_h, gen_tcg_hvx_h, idef_generated_list] + helper_in = [semantics_generated, gen_tcg_h, gen_tcg_hvx_h, '--idef-parser', idef_generated_list] else # Setup input and dependencies for the next step, this depends on whether or # not idef-parser is enabled diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h index 1ceb9453ee4..c7840fbf2e1 100644 --- a/target/hexagon/mmvec/macros.h +++ b/target/hexagon/mmvec/macros.h @@ -21,28 +21,30 @@ #include "qemu/host-utils.h" #include "arch.h" #include "mmvec/system_ext_mmvec.h" +#include "accel/tcg/getpc.h" +#include "accel/tcg/probe.h" #ifndef QEMU_GENERATE -#define VdV (*(MMVector *)(VdV_void)) -#define VsV (*(MMVector *)(VsV_void)) -#define VuV (*(MMVector *)(VuV_void)) -#define VvV (*(MMVector *)(VvV_void)) -#define VwV (*(MMVector *)(VwV_void)) -#define VxV (*(MMVector *)(VxV_void)) -#define VyV (*(MMVector *)(VyV_void)) +#define VdV (*(MMVector *restrict)(VdV_void)) +#define VsV (*(MMVector *restrict)(VsV_void)) +#define VuV (*(MMVector *restrict)(VuV_void)) +#define VvV (*(MMVector *restrict)(VvV_void)) +#define VwV (*(MMVector *restrict)(VwV_void)) +#define VxV (*(MMVector *restrict)(VxV_void)) +#define VyV (*(MMVector *restrict)(VyV_void)) -#define VddV (*(MMVectorPair *)(VddV_void)) -#define VuuV (*(MMVectorPair *)(VuuV_void)) -#define VvvV (*(MMVectorPair *)(VvvV_void)) -#define VxxV (*(MMVectorPair *)(VxxV_void)) +#define VddV (*(MMVectorPair *restrict)(VddV_void)) +#define VuuV (*(MMVectorPair *restrict)(VuuV_void)) +#define VvvV (*(MMVectorPair *restrict)(VvvV_void)) +#define VxxV (*(MMVectorPair *restrict)(VxxV_void)) -#define QeV (*(MMQReg *)(QeV_void)) -#define QdV (*(MMQReg *)(QdV_void)) -#define QsV (*(MMQReg *)(QsV_void)) -#define QtV (*(MMQReg *)(QtV_void)) -#define QuV (*(MMQReg *)(QuV_void)) -#define QvV (*(MMQReg *)(QvV_void)) -#define QxV (*(MMQReg *)(QxV_void)) +#define QeV (*(MMQReg *restrict)(QeV_void)) +#define QdV (*(MMQReg *restrict)(QdV_void)) +#define QsV (*(MMQReg *restrict)(QsV_void)) +#define QtV (*(MMQReg *restrict)(QtV_void)) +#define QuV (*(MMQReg *restrict)(QuV_void)) +#define QvV (*(MMQReg *restrict)(QvV_void)) +#define QxV (*(MMQReg *restrict)(QxV_void)) #endif #define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \ diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index ae5a6055139..444799d3ade 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2019-2024 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,8 +17,8 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #include "cpu.h" @@ -54,9 +54,6 @@ G_NORETURN void HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp) void log_store32(CPUHexagonState *env, target_ulong addr, target_ulong val, int width, int slot) { - HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx - ", %" PRId32 " [0x08%" PRIx32 "])\n", - width, addr, val, val); env->mem_log_stores[slot].va = addr; env->mem_log_stores[slot].width = width; env->mem_log_stores[slot].data32 = val; @@ -65,35 +62,11 @@ void log_store32(CPUHexagonState *env, target_ulong addr, void log_store64(CPUHexagonState *env, target_ulong addr, int64_t val, int width, int slot) { - HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx - ", %" PRId64 " [0x016%" PRIx64 "])\n", - width, addr, val, val); env->mem_log_stores[slot].va = addr; env->mem_log_stores[slot].width = width; env->mem_log_stores[slot].data64 = val; } -/* Handy place to set a breakpoint */ -void HELPER(debug_start_packet)(CPUHexagonState *env) -{ - HEX_DEBUG_LOG("Start packet: pc = 0x" TARGET_FMT_lx "\n", - env->gpr[HEX_REG_PC]); - - for (int i = 0; i < TOTAL_PER_THREAD_REGS; i++) { - env->reg_written[i] = 0; - } -} - -/* Checks for bookkeeping errors between disassembly context and runtime */ -void HELPER(debug_check_store_width)(CPUHexagonState *env, int slot, int check) -{ - if (env->mem_log_stores[slot].width != check) { - HEX_DEBUG_LOG("ERROR: %d != %d\n", - env->mem_log_stores[slot].width, check); - g_assert_not_reached(); - } -} - static void commit_store(CPUHexagonState *env, int slot_num, uintptr_t ra) { uint8_t width = env->mem_log_stores[slot_num].width; @@ -173,91 +146,6 @@ void HELPER(commit_hvx_stores)(CPUHexagonState *env) } } -static void print_store(CPUHexagonState *env, int slot) -{ - if (!(env->slot_cancelled & (1 << slot))) { - uint8_t width = env->mem_log_stores[slot].width; - if (width == 1) { - uint32_t data = env->mem_log_stores[slot].data32 & 0xff; - HEX_DEBUG_LOG("\tmemb[0x" TARGET_FMT_lx "] = %" PRId32 - " (0x%02" PRIx32 ")\n", - env->mem_log_stores[slot].va, data, data); - } else if (width == 2) { - uint32_t data = env->mem_log_stores[slot].data32 & 0xffff; - HEX_DEBUG_LOG("\tmemh[0x" TARGET_FMT_lx "] = %" PRId32 - " (0x%04" PRIx32 ")\n", - env->mem_log_stores[slot].va, data, data); - } else if (width == 4) { - uint32_t data = env->mem_log_stores[slot].data32; - HEX_DEBUG_LOG("\tmemw[0x" TARGET_FMT_lx "] = %" PRId32 - " (0x%08" PRIx32 ")\n", - env->mem_log_stores[slot].va, data, data); - } else if (width == 8) { - HEX_DEBUG_LOG("\tmemd[0x" TARGET_FMT_lx "] = %" PRId64 - " (0x%016" PRIx64 ")\n", - env->mem_log_stores[slot].va, - env->mem_log_stores[slot].data64, - env->mem_log_stores[slot].data64); - } else { - HEX_DEBUG_LOG("\tBad store width %d\n", width); - g_assert_not_reached(); - } - } -} - -/* This function is a handy place to set a breakpoint */ -void HELPER(debug_commit_end)(CPUHexagonState *env, uint32_t this_PC, - int pred_written, int has_st0, int has_st1) -{ - bool reg_printed = false; - bool pred_printed = false; - int i; - - HEX_DEBUG_LOG("Packet committed: pc = 0x" TARGET_FMT_lx "\n", this_PC); - HEX_DEBUG_LOG("slot_cancelled = %d\n", env->slot_cancelled); - - for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) { - if (env->reg_written[i]) { - if (!reg_printed) { - HEX_DEBUG_LOG("Regs written\n"); - reg_printed = true; - } - HEX_DEBUG_LOG("\tr%d = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")\n", - i, env->gpr[i], env->gpr[i]); - } - } - - for (i = 0; i < NUM_PREGS; i++) { - if (pred_written & (1 << i)) { - if (!pred_printed) { - HEX_DEBUG_LOG("Predicates written\n"); - pred_printed = true; - } - HEX_DEBUG_LOG("\tp%d = 0x" TARGET_FMT_lx "\n", - i, env->pred[i]); - } - } - - if (has_st0 || has_st1) { - HEX_DEBUG_LOG("Stores\n"); - if (has_st0) { - print_store(env, 0); - } - if (has_st1) { - print_store(env, 1); - } - } - - HEX_DEBUG_LOG("Next PC = " TARGET_FMT_lx "\n", env->gpr[HEX_REG_PC]); - HEX_DEBUG_LOG("Exec counters: pkt = " TARGET_FMT_lx - ", insn = " TARGET_FMT_lx - ", hvx = " TARGET_FMT_lx "\n", - env->gpr[HEX_REG_QEMU_PKT_CNT], - env->gpr[HEX_REG_QEMU_INSN_CNT], - env->gpr[HEX_REG_QEMU_HVX_CNT]); - -} - int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS) { uint32_t K_const = extract32(M, 24, 4); @@ -683,7 +571,7 @@ uint32_t HELPER(conv_sf2uw)(CPUHexagonState *env, float32 RsV) uint32_t RdV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) { float_raise(float_flag_invalid, &env->fp_status); RdV = 0; } else { @@ -713,7 +601,7 @@ uint64_t HELPER(conv_sf2ud)(CPUHexagonState *env, float32 RsV) uint64_t RddV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) { float_raise(float_flag_invalid, &env->fp_status); RddV = 0; } else { @@ -743,7 +631,7 @@ uint32_t HELPER(conv_df2uw)(CPUHexagonState *env, float64 RssV) uint32_t RdV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) { float_raise(float_flag_invalid, &env->fp_status); RdV = 0; } else { @@ -773,7 +661,7 @@ uint64_t HELPER(conv_df2ud)(CPUHexagonState *env, float64 RssV) uint64_t RddV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) { float_raise(float_flag_invalid, &env->fp_status); RddV = 0; } else { @@ -803,7 +691,7 @@ uint32_t HELPER(conv_sf2uw_chop)(CPUHexagonState *env, float32 RsV) uint32_t RdV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) { float_raise(float_flag_invalid, &env->fp_status); RdV = 0; } else { @@ -833,7 +721,7 @@ uint64_t HELPER(conv_sf2ud_chop)(CPUHexagonState *env, float32 RsV) uint64_t RddV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) { + if (float32_is_neg(RsV) && !float32_is_any_nan(RsV) && !float32_is_zero(RsV)) { float_raise(float_flag_invalid, &env->fp_status); RddV = 0; } else { @@ -863,7 +751,7 @@ uint32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV) uint32_t RdV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) { float_raise(float_flag_invalid, &env->fp_status); RdV = 0; } else { @@ -893,7 +781,7 @@ uint64_t HELPER(conv_df2ud_chop)(CPUHexagonState *env, float64 RssV) uint64_t RddV; arch_fpop_start(env); /* Hexagon checks the sign before rounding */ - if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) { + if (float64_is_neg(RssV) && !float64_is_any_nan(RssV) && !float64_is_zero(RssV)) { float_raise(float_flag_invalid, &env->fp_status); RddV = 0; } else { @@ -1157,7 +1045,7 @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV) { float32 RdV; arch_fpop_start(env); - RdV = internal_mpyf(RsV, RtV, &env->fp_status); + RdV = float32_mul(RsV, RtV, &env->fp_status); arch_fpop_end(env); return RdV; } @@ -1166,41 +1054,18 @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { arch_fpop_start(env); - RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status); arch_fpop_end(env); return RxV; } -static bool is_zero_prod(float32 a, float32 b) -{ - return ((float32_is_zero(a) && is_finite(b)) || - (float32_is_zero(b) && is_finite(a))); -} - -static float32 check_nan(float32 dst, float32 x, float_status *fp_status) -{ - float32 ret = dst; - if (float32_is_any_nan(x)) { - if (extract32(x, 22, 1) == 0) { - float_raise(float_flag_invalid, fp_status); - } - ret = make_float32(0xffffffff); /* nan */ - } - return ret; -} - float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV, float32 PuV) { - size4s_t tmp; arch_fpop_start(env); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } + RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV), + float_muladd_suppress_add_product_zero, + &env->fp_status); arch_fpop_end(env); return RxV; } @@ -1208,86 +1073,50 @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV, float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - float32 neg_RsV; arch_fpop_start(env); - neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1); - RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product, + &env->fp_status); arch_fpop_end(env); return RxV; } -static bool is_inf_prod(int32_t a, int32_t b) +static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV, + float32 RsV, float32 RtV, int negate) { - return (float32_is_infinity(a) && float32_is_infinity(b)) || - (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) || - (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a)); -} - -float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, - float32 RsV, float32 RtV) -{ - bool infinp; - bool infminusinf; - float32 tmp; + int flags; arch_fpop_start(env); - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - infminusinf = float32_is_infinity(RxV) && - is_inf_prod(RsV, RtV) && - (fGETBIT(31, RsV ^ RxV ^ RtV) != 0); - infinp = float32_is_infinity(RxV) || - float32_is_infinity(RtV) || - float32_is_infinity(RsV); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } - set_float_exception_flags(0, &env->fp_status); - if (float32_is_infinity(RxV) && !infinp) { - RxV = RxV - 1; - } - if (infminusinf) { - RxV = 0; + + set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status); + RxV = float32_muladd(RsV, RtV, RxV, + negate | float_muladd_suppress_add_product_zero, + &env->fp_status); + + flags = get_float_exception_flags(&env->fp_status); + if (flags) { + /* Flags are suppressed by this instruction. */ + set_float_exception_flags(0, &env->fp_status); + + /* Return 0 for Inf - Inf. */ + if (flags & float_flag_invalid_isi) { + RxV = 0; + } } + arch_fpop_end(env); return RxV; } -float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV, +float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV, float32 RsV, float32 RtV) { - bool infinp; - bool infminusinf; - float32 tmp; + return do_sffma_lib(env, RxV, RsV, RtV, 0); +} - arch_fpop_start(env); - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - infminusinf = float32_is_infinity(RxV) && - is_inf_prod(RsV, RtV) && - (fGETBIT(31, RsV ^ RxV ^ RtV) == 0); - infinp = float32_is_infinity(RxV) || - float32_is_infinity(RtV) || - float32_is_infinity(RsV); - RxV = check_nan(RxV, RxV, &env->fp_status); - RxV = check_nan(RxV, RsV, &env->fp_status); - RxV = check_nan(RxV, RtV, &env->fp_status); - float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status); - tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status); - if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) { - RxV = tmp; - } - set_float_exception_flags(0, &env->fp_status); - if (float32_is_infinity(RxV) && !infinp) { - RxV = RxV - 1; - } - if (infminusinf) { - RxV = 0; - } - arch_fpop_end(env); - return RxV; +float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV, + float32 RsV, float32 RtV) +{ + return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product); } float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV) diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 4b1bee3c6d7..02fd40c160f 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -23,7 +23,7 @@ #include "exec/helper-gen.h" #include "exec/helper-proto.h" #include "exec/translation-block.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/log.h" #include "internal.h" #include "attribs.h" @@ -50,7 +50,6 @@ TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; TCGv hex_pred[NUM_PREGS]; TCGv hex_slot_cancelled; TCGv hex_new_value_usr; -TCGv hex_reg_written[TOTAL_PER_THREAD_REGS]; TCGv hex_store_addr[STORES_MAX]; TCGv hex_store_width[STORES_MAX]; TCGv hex_store_val32[STORES_MAX]; @@ -195,21 +194,6 @@ static void gen_exception_end_tb(DisasContext *ctx, int excp) } -#define PACKET_BUFFER_LEN 1028 -static void print_pkt(Packet *pkt) -{ - GString *buf = g_string_sized_new(PACKET_BUFFER_LEN); - snprint_a_pkt_debug(buf, pkt); - HEX_DEBUG_LOG("%s", buf->str); - g_string_free(buf, true); -} -#define HEX_DEBUG_PRINT_PKT(pkt) \ - do { \ - if (HEX_DEBUG) { \ - print_pkt(pkt); \ - } \ - } while (0) - static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, uint32_t words[]) { @@ -235,14 +219,6 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx, g_assert(ctx->base.num_insns == 1); } - HEX_DEBUG_LOG("decode_packet: pc = 0x%" VADDR_PRIx "\n", - ctx->base.pc_next); - HEX_DEBUG_LOG(" words = { "); - for (int i = 0; i < nwords; i++) { - HEX_DEBUG_LOG("0x%x, ", words[i]); - } - HEX_DEBUG_LOG("}\n"); - return nwords; } @@ -465,11 +441,6 @@ static void gen_start_packet(DisasContext *ctx) */ bitmap_zero(ctx->pregs_written, NUM_PREGS); - if (HEX_DEBUG) { - /* Handy place to set a breakpoint before the packet executes */ - gen_helper_debug_start_packet(tcg_env); - } - /* Initialize the runtime state for packet semantics */ if (need_slot_cancelled(pkt)) { tcg_gen_movi_tl(hex_slot_cancelled, 0); @@ -484,10 +455,6 @@ static void gen_start_packet(DisasContext *ctx) tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], next_PC); } } - if (HEX_DEBUG) { - ctx->pred_written = tcg_temp_new(); - tcg_gen_movi_tl(ctx->pred_written, 0); - } /* Preload the predicated registers into get_result_gpr(ctx, i) */ if (ctx->need_commit && @@ -591,7 +558,7 @@ static void gen_insn(DisasContext *ctx) ctx->insn->generate(ctx); mark_store_width(ctx); } else { - gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE); + gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_OPCODE); } } @@ -635,15 +602,6 @@ static void gen_pred_writes(DisasContext *ctx) } } -static void gen_check_store_width(DisasContext *ctx, int slot_num) -{ - if (HEX_DEBUG) { - TCGv slot = tcg_constant_tl(slot_num); - TCGv check = tcg_constant_tl(ctx->store_width[slot_num]); - gen_helper_debug_check_store_width(tcg_env, slot, check); - } -} - static bool slot_is_predicated(Packet *pkt, int slot_num) { for (int i = 0; i < pkt->num_insns; i++) { @@ -691,28 +649,24 @@ void process_store(DisasContext *ctx, int slot_num) */ switch (ctx->store_width[slot_num]) { case 1: - gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st_tl(hex_store_val32[slot_num], hex_store_addr[slot_num], ctx->mem_idx, MO_UB); break; case 2: - gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st_tl(hex_store_val32[slot_num], hex_store_addr[slot_num], - ctx->mem_idx, MO_TEUW); + ctx->mem_idx, MO_LE | MO_UW); break; case 4: - gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st_tl(hex_store_val32[slot_num], hex_store_addr[slot_num], - ctx->mem_idx, MO_TEUL); + ctx->mem_idx, MO_LE | MO_UL); break; case 8: - gen_check_store_width(ctx, slot_num); tcg_gen_qemu_st_i64(hex_store_val64[slot_num], hex_store_addr[slot_num], - ctx->mem_idx, MO_TEUQ); + ctx->mem_idx, MO_LE | MO_UQ); break; default: { @@ -937,16 +891,6 @@ static void gen_commit_packet(DisasContext *ctx) gen_commit_hvx(ctx); } update_exec_counters(ctx); - if (HEX_DEBUG) { - TCGv has_st0 = - tcg_constant_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); - TCGv has_st1 = - tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); - - /* Handy place to set a breakpoint at the end of execution */ - gen_helper_debug_commit_end(tcg_env, tcg_constant_tl(ctx->pkt->pc), - ctx->pred_written, has_st0, has_st1); - } if (pkt->vhist_insn != NULL) { ctx->pre_commit = false; @@ -968,14 +912,13 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) nwords = read_packet_words(env, ctx, words); if (!nwords) { - gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); + gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_PACKET); return; } ctx->pkt = &pkt; if (decode_packet(ctx, nwords, words, &pkt, false) > 0) { pkt.pc = ctx->base.pc_next; - HEX_DEBUG_PRINT_PKT(&pkt); gen_start_packet(ctx); for (i = 0; i < pkt.num_insns; i++) { ctx->insn = &pkt.insn[i]; @@ -984,7 +927,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) gen_commit_packet(ctx); ctx->base.pc_next += pkt.encod_pkt_size_in_bytes; } else { - gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); + gen_exception_end_tb(ctx, HEX_CAUSE_INVALID_PACKET); } } @@ -1083,8 +1026,8 @@ static const TranslatorOps hexagon_tr_ops = { .tb_stop = hexagon_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void hexagon_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; @@ -1093,7 +1036,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, } #define NAME_LEN 64 -static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN]; static char store_addr_names[STORES_MAX][NAME_LEN]; static char store_width_names[STORES_MAX][NAME_LEN]; static char store_val32_names[STORES_MAX][NAME_LEN]; @@ -1112,14 +1054,6 @@ void hexagon_translate_init(void) hex_gpr[i] = tcg_global_mem_new(tcg_env, offsetof(CPUHexagonState, gpr[i]), hexagon_regnames[i]); - - if (HEX_DEBUG) { - snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s", - hexagon_regnames[i]); - hex_reg_written[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUHexagonState, reg_written[i]), - reg_written_names[i]); - } } hex_new_value_usr = tcg_global_mem_new(tcg_env, offsetof(CPUHexagonState, new_value_usr), "new_value_usr"); diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h index 00cc2bcd634..d251e2233fd 100644 --- a/target/hexagon/translate.h +++ b/target/hexagon/translate.h @@ -73,7 +73,6 @@ typedef struct DisasContext { bool has_hvx_overlap; TCGv new_value[TOTAL_PER_THREAD_REGS]; TCGv new_pred_value[NUM_PREGS]; - TCGv pred_written; TCGv branch_taken; TCGv dczero_addr; } DisasContext; @@ -271,7 +270,6 @@ extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; extern TCGv hex_pred[NUM_PREGS]; extern TCGv hex_slot_cancelled; extern TCGv hex_new_value_usr; -extern TCGv hex_reg_written[TOTAL_PER_THREAD_REGS]; extern TCGv hex_store_addr[STORES_MAX]; extern TCGv hex_store_width[STORES_MAX]; extern TCGv hex_store_val32[STORES_MAX]; diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h index 473d489f01d..9bf7ac76d0c 100644 --- a/target/hppa/cpu-param.h +++ b/target/hppa/cpu-param.h @@ -2,14 +2,12 @@ * PA-RISC cpu parameters for qemu. * * Copyright (c) 2016 Richard Henderson - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef HPPA_CPU_PARAM_H #define HPPA_CPU_PARAM_H -#define TARGET_LONG_BITS 64 - #if defined(CONFIG_USER_ONLY) && defined(TARGET_ABI32) # define TARGET_PHYS_ADDR_SPACE_BITS 32 # define TARGET_VIRT_ADDR_SPACE_BITS 32 @@ -21,12 +19,6 @@ #define TARGET_PAGE_BITS 12 -/* PA-RISC 1.x processors have a strong memory model. */ -/* - * ??? While we do not yet implement PA-RISC 2.0, those processors have - * a weak memory model, but with TLB bits that force ordering on a per-page - * basis. It's probably easier to fall back to a strong memory model. - */ -#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL +#define TARGET_INSN_START_EXTRA_WORDS 2 #endif diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 7cf2e2f266d..24777727e62 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -24,9 +24,12 @@ #include "qemu/timer.h" #include "cpu.h" #include "qemu/module.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "fpu/softfloat.h" #include "tcg/tcg.h" +#include "hw/hppa/hppa_hardware.h" +#include "accel/tcg/cpu-ops.h" static void hppa_cpu_set_pc(CPUState *cs, vaddr value) { @@ -43,15 +46,17 @@ static vaddr hppa_cpu_get_pc(CPUState *cs) { CPUHPPAState *env = cpu_env(cs); - return hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0), - env->iaoq_f & -4); + return hppa_form_gva_mask(env->gva_offset_mask, + (env->psw & PSW_C ? env->iasq_f : 0), + env->iaoq_f & -4); } -void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, - uint64_t *pcsbase, uint32_t *pflags) +static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs) { + CPUHPPAState *env = cpu_env(cs); uint32_t flags = 0; uint64_t cs_base = 0; + vaddr pc; /* * TB lookup assumes that PC contains the complete virtual address. @@ -59,7 +64,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, * incomplete virtual address. This also means that we must separate * out current cpu privilege from the low bits of IAOQ_F. */ - *pc = hppa_cpu_get_pc(env_cpu(env)); + pc = hppa_cpu_get_pc(env_cpu(env)); flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT; /* @@ -89,10 +94,13 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, & (env->sr[4] == env->sr[7])) { flags |= TB_FLAG_SR_SAME; } + if ((env->psw & PSW_W) && + (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)) { + flags |= TB_FLAG_SPHASH; + } #endif - *pcsbase = cs_base; - *pflags = flags; + return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base }; } static void hppa_cpu_synchronize_from_tb(CPUState *cs, @@ -124,10 +132,12 @@ static void hppa_restore_state_to_opc(CPUState *cs, env->psw_n = 0; } +#ifndef CONFIG_USER_ONLY static bool hppa_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } +#endif /* !CONFIG_USER_ONLY */ static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -143,6 +153,7 @@ static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch) static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) { info->mach = bfd_mach_hppa20; + info->endian = BFD_ENDIAN_BIG; info->print_insn = print_insn_hppa; } @@ -194,13 +205,33 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp) static void hppa_cpu_initfn(Object *obj) { + CPUHPPAState *env = cpu_env(CPU(obj)); + + env->is_pa20 = !!object_dynamic_cast(obj, TYPE_HPPA64_CPU); +} + +static void hppa_cpu_reset_hold(Object *obj, ResetType type) +{ + HPPACPUClass *scc = HPPA_CPU_GET_CLASS(obj); CPUState *cs = CPU(obj); HPPACPU *cpu = HPPA_CPU(obj); CPUHPPAState *env = &cpu->env; + if (scc->parent_phases.hold) { + scc->parent_phases.hold(obj, type); + } cs->exception_index = -1; + cs->halted = 0; + cpu_set_pc(cs, 0xf0000004); + + memset(env, 0, offsetof(CPUHPPAState, end_reset_fields)); + cpu_hppa_loaded_fr0(env); - cpu_hppa_put_psw(env, PSW_W); + + /* 64-bit machines start with space-register hashing enabled in %dr2 */ + env->dr[2] = hppa_is_pa20(env) ? HPPA64_DIAG_SPHASH_ENABLE : 0; + + cpu_hppa_put_psw(env, PSW_M); } static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) @@ -214,39 +245,54 @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps hppa_sysemu_ops = { + .has_work = hppa_cpu_has_work, .get_phys_page_debug = hppa_cpu_get_phys_page_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps hppa_tcg_ops = { + /* PA-RISC 1.x processors have a strong memory model. */ + /* + * ??? While we do not yet implement PA-RISC 2.0, those processors have + * a weak memory model, but with TLB bits that force ordering on a per-page + * basis. It's probably easier to fall back to a strong memory model. + */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = true, + .initialize = hppa_translate_init, + .translate_code = hppa_translate_code, + .get_tb_cpu_state = hppa_get_tb_cpu_state, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, .restore_state_to_opc = hppa_restore_state_to_opc, + .mmu_index = hppa_cpu_mmu_index, #ifndef CONFIG_USER_ONLY - .tlb_fill = hppa_cpu_tlb_fill, + .tlb_fill_align = hppa_cpu_tlb_fill_align, + .pointer_wrap = cpu_pointer_wrap_notreached, .cpu_exec_interrupt = hppa_cpu_exec_interrupt, .cpu_exec_halt = hppa_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = hppa_cpu_do_interrupt, .do_unaligned_access = hppa_cpu_do_unaligned_access, .do_transaction_failed = hppa_cpu_do_transaction_failed, #endif /* !CONFIG_USER_ONLY */ }; -static void hppa_cpu_class_init(ObjectClass *oc, void *data) +static void hppa_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); HPPACPUClass *acc = HPPA_CPU_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, hppa_cpu_realizefn, &acc->parent_realize); + resettable_class_set_parent_phases(rc, NULL, hppa_cpu_reset_hold, NULL, + &acc->parent_phases); + cc->class_by_name = hppa_cpu_class_by_name; - cc->has_work = hppa_cpu_has_work; - cc->mmu_index = hppa_cpu_mmu_index; cc->dump_state = hppa_cpu_dump_state; cc->set_pc = hppa_cpu_set_pc; cc->get_pc = hppa_cpu_get_pc; diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 2bcb3b602b8..11d59d11ca8 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -21,7 +21,10 @@ #define HPPA_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" +#include "system/memory.h" #include "qemu/cpu-float.h" #include "qemu/interval-tree.h" #include "hw/registerfields.h" @@ -45,8 +48,6 @@ #define PRIV_KERNEL 0 #define PRIV_USER 3 -#define TARGET_INSN_START_EXTRA_WORDS 2 - /* No need to flush MMU_ABS*_IDX */ #define HPPA_MMU_FLUSH_MASK \ (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \ @@ -211,7 +212,7 @@ typedef struct CPUArchState { uint32_t psw; /* All psw bits except the following: */ uint32_t psw_xb; /* X and B, in their normal positions */ target_ulong psw_n; /* boolean */ - target_long psw_v; /* in most significant bit */ + target_long psw_v; /* in bit 31 */ /* Splitting the carry-borrow field into the MSB and "the rest", allows * for "the rest" to be deleted when it is unused, but the MSB is in use. @@ -223,6 +224,7 @@ typedef struct CPUArchState { target_ulong psw_cb; /* in least significant bit of next nibble */ target_ulong psw_cb_msb; /* boolean */ + uint64_t gva_offset_mask; /* cached address mask based on PSW and %dr2 */ uint64_t iasq_f; uint64_t iasq_b; @@ -232,6 +234,7 @@ typedef struct CPUArchState { target_ulong cr[32]; /* control registers */ target_ulong cr_back[2]; /* back of cr17/cr18 */ target_ulong shadow[7]; /* shadow registers */ + target_ulong dr[32]; /* diagnose registers */ /* * During unwind of a memory insn, the base register of the address. @@ -263,6 +266,15 @@ typedef struct CPUArchState { IntervalTreeRoot tlb_root; HPPATLBEntry tlb[HPPA_TLB_ENTRIES]; + + /* Fields up to this point are cleared by a CPU reset */ + struct {} end_reset_fields; + + bool is_pa20; + + target_ulong kernel_entry; /* Linux kernel was loaded here */ + target_ulong cmdline_or_bootorder; + target_ulong initrd_base, initrd_end; } CPUHPPAState; /** @@ -281,7 +293,7 @@ struct ArchCPU { /** * HPPACPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * * An HPPA CPU model. */ @@ -289,14 +301,12 @@ struct HPPACPUClass { CPUClass parent_class; DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; }; -#include "exec/cpu-all.h" - -static inline bool hppa_is_pa20(CPUHPPAState *env) +static inline bool hppa_is_pa20(const CPUHPPAState *env) { - return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL; + return env->is_pa20; } static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) @@ -305,30 +315,25 @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env) } void hppa_translate_init(void); +void hppa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -static inline uint64_t gva_offset_mask(target_ulong psw) -{ - return (psw & PSW_W - ? MAKE_64BIT_MASK(0, 62) - : MAKE_64BIT_MASK(0, 32)); -} - -static inline target_ulong hppa_form_gva_psw(target_ulong psw, uint64_t spc, - target_ulong off) +static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask, + uint64_t spc, target_ulong off) { #ifdef CONFIG_USER_ONLY - return off; + return off & gva_offset_mask; #else - return spc | (off & gva_offset_mask(psw)); + return spc | (off & gva_offset_mask); #endif } static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, target_ulong off) { - return hppa_form_gva_psw(env->psw, spc, off); + return hppa_form_gva_mask(env->gva_offset_mask, spc, off); } hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr); @@ -342,14 +347,13 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr); #define TB_FLAG_SR_SAME PSW_I #define TB_FLAG_PRIV_SHIFT 8 #define TB_FLAG_UNALIGN 0x400 +#define TB_FLAG_SPHASH 0x800 #define CS_BASE_DIFFPAGE (1 << 12) #define CS_BASE_DIFFSPACE (1 << 13) -void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags); - target_ulong cpu_hppa_get_psw(CPUHPPAState *env); void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong); +void update_gva_offset_mask(CPUHPPAState *env); void cpu_hppa_loaded_fr0(CPUHPPAState *env); #ifdef CONFIG_USER_ONLY @@ -365,13 +369,13 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); void hppa_ptlbe(CPUHPPAState *env); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled); -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); void hppa_cpu_do_interrupt(CPUState *cpu); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, - int type, hwaddr *pphys, int *pprot); + int type, MemOp mop, hwaddr *pphys, int *pprot); void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, @@ -383,6 +387,4 @@ void hppa_cpu_alarm_timer(void *); #endif G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); -#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU - #endif /* HPPA_CPU_H */ diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c index deaed2b65d1..45353202fae 100644 --- a/target/hppa/fpu_helper.c +++ b/target/hppa/fpu_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" @@ -49,6 +48,36 @@ void HELPER(loaded_fr0)(CPUHPPAState *env) d = FIELD_EX32(shadow, FPSR, D); set_flush_to_zero(d, &env->fp_status); set_flush_inputs_to_zero(d, &env->fp_status); + + /* + * TODO: we only need to do this at CPU reset, but currently + * HPPA does note implement a CPU reset method at all... + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status); + /* + * TODO: The HPPA architecture reference only documents its NaN + * propagation rule for 2-operand operations. Testing on real hardware + * might be necessary to confirm whether this order for muladd is correct. + * Not preferring the SNaN is almost certainly incorrect as it diverges + * from the documented rules for 2-operand operations. + */ + set_float_3nan_prop_rule(float_3nan_prop_abc, &env->fp_status); + /* For inf * 0 + NaN, return the input NaN */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); + /* Default NaN: sign bit clear, msb-1 frac bit set */ + set_float_default_nan_pattern(0b00100000, &env->fp_status); + set_snan_bit_is_one(true, &env->fp_status); + /* + * "PA-RISC 2.0 Architecture" says it is IMPDEF whether the flushing + * enabled by FPSR.D happens before or after rounding. We pick "before" + * for consistency with tininess detection. + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); + /* + * TODO: "PA-RISC 2.0 Architecture" chapter 10 says that we should + * detect tininess before rounding, but we don't set that here so we + * get the default tininess after rounding. + */ } void cpu_hppa_loaded_fr0(CPUHPPAState *env) @@ -65,7 +94,8 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra) { uint32_t soft_exp = get_float_exception_flags(&env->fp_status); uint32_t hard_exp = 0; - uint32_t shadow = env->fr0_shadow; + uint32_t shadow = env->fr0_shadow & 0x3ffffff; + uint32_t fr1 = 0; if (likely(soft_exp == 0)) { env->fr[0] = (uint64_t)shadow << 32; @@ -78,9 +108,22 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra) hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, R_FPSR_ENA_O_MASK); hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, R_FPSR_ENA_Z_MASK); hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, R_FPSR_ENA_V_MASK); - shadow |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT); + if (hard_exp & shadow) { + shadow = FIELD_DP32(shadow, FPSR, T, 1); + /* fill exception register #1, which is lower 32-bits of fr[0] */ +#if !defined(CONFIG_USER_ONLY) + if (hard_exp & (R_FPSR_ENA_O_MASK | R_FPSR_ENA_U_MASK)) { + /* over- and underflow both set overflow flag only */ + fr1 = FIELD_DP32(fr1, FPSR, C, 1); + fr1 = FIELD_DP32(fr1, FPSR, FLG_O, 1); + } else +#endif + { + fr1 |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT); + } + } env->fr0_shadow = shadow; - env->fr[0] = (uint64_t)shadow << 32; + env->fr[0] = (uint64_t)shadow << 32 | fr1; if (hard_exp & shadow) { hppa_dynamic_excp(env, EXCP_ASSIST, ra); diff --git a/target/hppa/helper.c b/target/hppa/helper.c index b79ddd8184c..d7f8495d982 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -21,9 +21,9 @@ #include "qemu/log.h" #include "cpu.h" #include "fpu/softfloat.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/qemu-print.h" +#include "hw/hppa/hppa_hardware.h" target_ulong cpu_hppa_get_psw(CPUHPPAState *env) { @@ -53,12 +53,28 @@ target_ulong cpu_hppa_get_psw(CPUHPPAState *env) } psw |= env->psw_n * PSW_N; - psw |= (env->psw_v < 0) * PSW_V; + psw |= ((env->psw_v >> 31) & 1) * PSW_V; psw |= env->psw | env->psw_xb; return psw; } +void update_gva_offset_mask(CPUHPPAState *env) +{ + uint64_t gom; + + if (env->psw & PSW_W) { + gom = (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE) + ? MAKE_64BIT_MASK(0, 62) & + ~((uint64_t)HPPA64_PDC_CACHE_RET_SPID_VAL << 48) + : MAKE_64BIT_MASK(0, 62); + } else { + gom = MAKE_64BIT_MASK(0, 32); + } + + env->gva_offset_mask = gom; +} + void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw) { uint64_t reserved; @@ -98,6 +114,8 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw) cb |= ((psw >> 9) & 1) << 8; cb |= ((psw >> 8) & 1) << 4; env->psw_cb = cb; + + update_gva_offset_mask(env); } void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -133,9 +151,11 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n" "IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n", env->iasq_f >> 32, w, m & env->iaoq_f, - hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f), + hppa_form_gva_mask(env->gva_offset_mask, env->iasq_f, + env->iaoq_f), env->iasq_b >> 32, w, m & env->iaoq_b, - hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b)); + hppa_form_gva_mask(env->gva_offset_mask, env->iasq_b, + env->iaoq_b)); psw_c[0] = (psw & PSW_W ? 'W' : '-'); psw_c[1] = (psw & PSW_E ? 'E' : '-'); diff --git a/target/hppa/helper.h b/target/hppa/helper.h index de411923d9a..8369855d78e 100644 --- a/target/hppa/helper.h +++ b/target/hppa/helper.h @@ -99,6 +99,7 @@ DEF_HELPER_FLAGS_2(ptlb_l, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tl, env, tl) DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(update_gva_offset_mask, TCG_CALL_NO_RWG, void, env) DEF_HELPER_1(diag_btlb, void, env) DEF_HELPER_1(diag_console_output, void, env) #endif diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode index 71074a64c18..4eaac750ea8 100644 --- a/target/hppa/insns.decode +++ b/target/hppa/insns.decode @@ -644,10 +644,12 @@ xmpyu 001110 ..... ..... 010 .0111 .00 t:5 r1=%ra64 r2=%rb64 # For 32-bit PA-7300LC (PCX-L2) diag_getshadowregs_pa1 000101 00 0000 0000 0001 1010 0000 0000 diag_putshadowregs_pa1 000101 00 0000 0000 0001 1010 0100 0000 + diag_mfdiag 000101 dr:5 rt:5 0000 0110 0000 0000 + diag_mtdiag 000101 dr:5 r1:5 0001 0110 0000 0000 # For 64-bit PA8700 (PCX-W2) - diag_getshadowregs_pa2 000101 00 0111 1000 0001 1000 0100 0000 - diag_putshadowregs_pa2 000101 00 0111 0000 0001 1000 0100 0000 + diag_mfdiag 000101 dr:5 0 0000 0000 1000 101 rt:5 + diag_mtdiag 000101 dr:5 r1:5 0001 1000 0100 0000 ] diag_unimp 000101 i:26 } diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 391f32f27d8..191ae19404b 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -94,11 +94,12 @@ void hppa_cpu_do_interrupt(CPUState *cs) HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; int i = cs->exception_index; - uint64_t old_psw; + uint64_t old_psw, old_gva_offset_mask; /* As documented in pa2.0 -- interruption handling. */ /* step 1 */ env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env); + old_gva_offset_mask = env->gva_offset_mask; /* step 2 -- Note PSW_W is masked out again for pa1.x */ cpu_hppa_put_psw(env, @@ -112,9 +113,9 @@ void hppa_cpu_do_interrupt(CPUState *cs) */ if (old_psw & PSW_C) { env->cr[CR_IIASQ] = - hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; + hppa_form_gva_mask(old_gva_offset_mask, env->iasq_f, env->iaoq_f) >> 32; env->cr_back[0] = - hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32; + hppa_form_gva_mask(old_gva_offset_mask, env->iasq_b, env->iaoq_b) >> 32; } else { env->cr[CR_IIASQ] = 0; env->cr_back[0] = 0; @@ -165,9 +166,10 @@ void hppa_cpu_do_interrupt(CPUState *cs) if (old_psw & PSW_C) { int prot, t; - vaddr = hppa_form_gva_psw(old_psw, env->iasq_f, vaddr); + vaddr = hppa_form_gva_mask(old_gva_offset_mask, + env->iasq_f, vaddr); t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX, - 0, &paddr, &prot); + 0, 0, &paddr, &prot); if (t >= 0) { /* We can't re-load the instruction. */ env->cr[CR_IIR] = 0; @@ -175,6 +177,10 @@ void hppa_cpu_do_interrupt(CPUState *cs) } } env->cr[CR_IIR] = ldl_phys(cs->as, paddr); + if (i == EXCP_ASSIST) { + /* stuff insn code into bits of FP exception register #1 */ + env->fr[0] |= (env->cr[CR_IIR] & 0x03ffffff); + } } break; diff --git a/target/hppa/machine.c b/target/hppa/machine.c index 211bfcf6407..13e555151a6 100644 --- a/target/hppa/machine.c +++ b/target/hppa/machine.c @@ -198,6 +198,7 @@ static const VMStateField vmstate_env_fields[] = { VMSTATE_UINT64(iasq_b, CPUHPPAState), VMSTATE_UINT32(fr0_shadow, CPUHPPAState), + VMSTATE_UINT64_ARRAY(dr, CPUHPPAState, 32), VMSTATE_END_OF_LIST() }; @@ -208,14 +209,14 @@ static const VMStateDescription * const vmstate_env_subsections[] = { static const VMStateDescription vmstate_env = { .name = "env", - .version_id = 3, - .minimum_version_id = 3, + .version_id = 4, + .minimum_version_id = 4, .fields = vmstate_env_fields, .subsections = vmstate_env_subsections, }; static const VMStateField vmstate_cpu_fields[] = { - VMSTATE_CPU(), + VMSTATE_STRUCT(parent_obj, HPPACPU, 0, vmstate_cpu_common, CPUState), VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState), VMSTATE_END_OF_LIST() }; diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index b984f730aa2..9bdd0a6f23d 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -20,8 +20,11 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "exec/helper-proto.h" #include "hw/core/cpu.h" #include "trace.h" @@ -197,7 +200,7 @@ static int match_prot_id64(CPUHPPAState *env, uint32_t access_id) } int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, - int type, hwaddr *pphys, int *pprot) + int type, MemOp mop, hwaddr *pphys, int *pprot) { hwaddr phys; int prot, r_prot, w_prot, x_prot, priv; @@ -221,7 +224,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, g_assert_not_reached(); } prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - goto egress; + goto egress_align; } /* Find a valid tlb entry that matches the virtual address. */ @@ -267,6 +270,12 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, goto egress; } + if (unlikely(!(prot & type))) { + /* Not allowed -- Inst/Data Memory Access Rights Fault. */ + ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; + goto egress; + } + /* access_id == 0 means public page and no check is performed */ if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { int access_prot = (hppa_is_pa20(env) @@ -281,14 +290,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, prot &= access_prot; } - if (unlikely(!(prot & type))) { - /* Not allowed -- Inst/Data Memory Access Rights Fault. */ - ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; - goto egress; - } - /* - * In priority order, check for conditions which raise faults. + * In reverse priority order, check for conditions which raise faults. * Remove PROT bits that cover the condition we want to check, * so that the resulting PROT will force a re-check of the * architectural TLB entry for the next access. @@ -299,13 +302,15 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, /* The T bit is set -- Page Reference Fault. */ ret = EXCP_PAGE_REF; } - } else if (!ent->d) { + } + if (unlikely(!ent->d)) { prot &= PAGE_READ | PAGE_EXEC; if (type & PAGE_WRITE) { /* The D bit is not set -- TLB Dirty Bit Fault. */ ret = EXCP_TLB_DIRTY; } - } else if (unlikely(ent->b)) { + } + if (unlikely(ent->b)) { prot &= PAGE_READ | PAGE_EXEC; if (type & PAGE_WRITE) { /* @@ -321,6 +326,11 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, } } + egress_align: + if (addr & ((1u << memop_alignment_bits(mop)) - 1)) { + ret = EXCP_UNALIGN; + } + egress: *pphys = phys; *pprot = prot; @@ -340,7 +350,7 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); - excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, + excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, 0, &phys, &prot); /* Since we're translating for debugging, the only error that is a @@ -417,12 +427,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, } } -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, - MMUAccessType type, int mmu_idx, - bool probe, uintptr_t retaddr) +bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra) { - HPPACPU *cpu = HPPA_CPU(cs); - CPUHPPAState *env = &cpu->env; + CPUHPPAState *env = cpu_env(cs); int prot, excp, a_prot; hwaddr phys; @@ -438,7 +447,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, break; } - excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot); + excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop, + &phys, &prot); if (unlikely(excp >= 0)) { if (probe) { return false; @@ -446,7 +456,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); /* Failure. Raise the indicated exception. */ - raise_exception_with_ior(env, excp, retaddr, addr, + raise_exception_with_ior(env, excp, ra, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); } @@ -460,8 +470,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, * the large page protection mask. We do not require this, * because we record the large page here in the hppa tlb. */ - tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE); + memset(out, 0, sizeof(*out)); + out->phys_addr = phys; + out->prot = prot; + out->attrs = MEMTXATTRS_UNSPECIFIED; + out->lg_page_size = TARGET_PAGE_BITS; + return true; } @@ -678,7 +692,7 @@ target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) hwaddr phys; int prot, excp; - excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, + excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, 0, &phys, &prot); if (excp >= 0) { if (excp == EXCP_DTLB_MISS) { @@ -813,3 +827,8 @@ uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f) } return iaoq_f; } + +void HELPER(update_gva_offset_mask)(CPUHPPAState *env) +{ + update_gva_offset_mask(env); +} diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index 7f79196fffc..0458378abb2 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -20,11 +20,14 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "qemu/timer.h" #include "trace.h" +#ifdef CONFIG_USER_ONLY +#include "user/page-protection.h" +#endif G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp) { @@ -334,7 +337,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, } mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P); - excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot); + excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot); if (excp >= 0) { cpu_restore_state(env_cpu(env), GETPC()); hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c index 9b43b556fd9..6e65fadcc7b 100644 --- a/target/hppa/sys_helper.c +++ b/target/hppa/sys_helper.c @@ -20,11 +20,10 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/timer.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" +#include "system/runstate.h" +#include "system/system.h" #include "chardev/char-fe.h" void HELPER(write_interval_timer)(CPUHPPAState *env, target_ulong val) @@ -73,7 +72,7 @@ target_ulong HELPER(swap_system_mask)(CPUHPPAState *env, target_ulong nsm) * machines set the Q bit from 0 to 1 without an exception, * so let this go without comment. */ - env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM); + cpu_hppa_put_psw(env, (psw & ~PSW_SM) | (nsm & PSW_SM)); return psw & PSW_SM; } @@ -88,7 +87,7 @@ void HELPER(rfi)(CPUHPPAState *env) * To recreate the space identifier, remove the offset bits. * For pa1.x, the mask reduces to no change to space. */ - mask = gva_offset_mask(env->psw); + mask = env->gva_offset_mask; env->iaoq_f = env->cr[CR_IIAOQ]; env->iaoq_b = env->cr_back[1]; diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 51c1762435a..7a81cfcb887 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -20,13 +20,14 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "exec/log.h" #define HELPER_H "helper.h" @@ -72,6 +73,7 @@ typedef struct DisasContext { /* IAOQ_Front at entry to TB. */ uint64_t iaoq_first; + uint64_t gva_offset_mask; DisasCond null_cond; TCGLabel *null_lab; @@ -1206,10 +1208,10 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1, cb_msb = tcg_temp_new_i64(); cb = tcg_temp_new_i64(); - tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); if (is_c) { - tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, - get_psw_carry(ctx, d), ctx->zero); + tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d)); + } else { + tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); } tcg_gen_xor_i64(cb, in1, in2); tcg_gen_xor_i64(cb, cb, dest); @@ -1305,9 +1307,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1, if (is_b) { /* DEST,C = IN1 + ~IN2 + C. */ tcg_gen_not_i64(cb, in2); - tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, - get_psw_carry(ctx, d), ctx->zero); - tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero); + tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d)); tcg_gen_xor_i64(cb, cb, in1); tcg_gen_xor_i64(cb, cb, dest); } else { @@ -1576,7 +1576,7 @@ static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs, *pofs = ofs; *pgva = addr = tcg_temp_new_i64(); tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, - gva_offset_mask(ctx->tb_flags)); + ctx->gva_offset_mask); #ifndef CONFIG_USER_ONLY if (!is_phys) { tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base)); @@ -3005,9 +3005,7 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) tcg_gen_xor_i64(add2, in2, addc); tcg_gen_andi_i64(addc, addc, 1); - tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero); - tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, - addc, ctx->zero); + tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc); /* Write back the result register. */ save_gpr(ctx, a->t, dest); @@ -3550,8 +3548,7 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1, TCGv_i64 cb = tcg_temp_new_i64(); TCGv_i64 cb_msb = tcg_temp_new_i64(); - tcg_gen_movi_i64(cb_msb, 0); - tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb); + tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero); tcg_gen_xor_i64(cb, in1, in2); tcg_gen_xor_i64(cb, cb, dest); cb_cond = get_carry(ctx, d, cb, cb_msb); @@ -4592,19 +4589,37 @@ static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a) return !ctx->is_pa20 && do_getshadowregs(ctx); } -static bool trans_diag_getshadowregs_pa2(DisasContext *ctx, arg_empty *a) +static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a) { - return ctx->is_pa20 && do_getshadowregs(ctx); + return !ctx->is_pa20 && do_putshadowregs(ctx); } -static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a) +static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a) { - return !ctx->is_pa20 && do_putshadowregs(ctx); + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); + nullify_over(ctx); + TCGv_i64 dest = dest_gpr(ctx, a->rt); + tcg_gen_ld_i64(dest, tcg_env, + offsetof(CPUHPPAState, dr[a->dr])); + save_gpr(ctx, a->rt, dest); + return nullify_end(ctx); } -static bool trans_diag_putshadowregs_pa2(DisasContext *ctx, arg_empty *a) +static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a) { - return ctx->is_pa20 && do_putshadowregs(ctx); + CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); + nullify_over(ctx); + tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env, + offsetof(CPUHPPAState, dr[a->dr])); +#ifndef CONFIG_USER_ONLY + if (ctx->is_pa20 && (a->dr == 2)) { + /* Update gva_offset_mask from the new value of %dr2 */ + gen_helper_update_gva_offset_mask(tcg_env); + /* Exit to capture the new value for the next TB. */ + ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; + } +#endif + return nullify_end(ctx); } static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a) @@ -4624,6 +4639,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->tb_flags = ctx->base.tb->flags; ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B); + ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask; #ifdef CONFIG_USER_ONLY ctx->privilege = PRIV_USER; @@ -4868,8 +4884,8 @@ static const TranslatorOps hppa_tr_ops = { #endif }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void hppa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx = { }; translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c index c290910a04b..16e47c4747d 100644 --- a/target/i386/arch_dump.c +++ b/target/i386/arch_dump.c @@ -13,9 +13,9 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "sysemu/dump.h" +#include "system/dump.h" #include "elf.h" -#include "sysemu/memory_mapping.h" +#include "system/memory_mapping.h" #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ ((DIV_ROUND_UP((hdr_size), 4) \ diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c index d1ff659128e..a2398c21732 100644 --- a/target/i386/arch_memory_mapping.c +++ b/target/i386/arch_memory_mapping.c @@ -13,7 +13,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "sysemu/memory_mapping.h" +#include "system/memory_mapping.h" +#include "system/memory.h" /* PAE Paging or IA-32e Paging */ static void walk_pte(MemoryMappingList *list, AddressSpace *as, diff --git a/target/i386/confidential-guest.c b/target/i386/confidential-guest.c index b3727845adc..cfb71bf0349 100644 --- a/target/i386/confidential-guest.c +++ b/target/i386/confidential-guest.c @@ -20,7 +20,7 @@ OBJECT_DEFINE_ABSTRACT_TYPE(X86ConfidentialGuest, X86_CONFIDENTIAL_GUEST, CONFIDENTIAL_GUEST_SUPPORT) -static void x86_confidential_guest_class_init(ObjectClass *oc, void *data) +static void x86_confidential_guest_class_init(ObjectClass *oc, const void *data) { } diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h index 7342d2843aa..48b88dbd313 100644 --- a/target/i386/confidential-guest.h +++ b/target/i386/confidential-guest.h @@ -14,7 +14,7 @@ #include "qom/object.h" -#include "exec/confidential-guest-support.h" +#include "system/confidential-guest-support.h" #define TYPE_X86_CONFIDENTIAL_GUEST "x86-confidential-guest" @@ -39,14 +39,16 @@ struct X86ConfidentialGuestClass { /* */ int (*kvm_type)(X86ConfidentialGuest *cg); - uint32_t (*mask_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, - int reg, uint32_t value); + void (*cpu_instance_init)(X86ConfidentialGuest *cg, CPUState *cpu); + uint32_t (*adjust_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, + uint32_t index, int reg, uint32_t value); + int (*check_features)(X86ConfidentialGuest *cg, CPUState *cs); }; /** * x86_confidential_guest_kvm_type: * - * Calls #X86ConfidentialGuestClass.unplug callback of @plug_handler. + * Calls #X86ConfidentialGuestClass.kvm_type() callback. */ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg) { @@ -59,25 +61,47 @@ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg) } } +static inline void x86_confidential_guest_cpu_instance_init(X86ConfidentialGuest *cg, + CPUState *cpu) +{ + X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg); + + if (klass->cpu_instance_init) { + klass->cpu_instance_init(cg, cpu); + } +} + /** - * x86_confidential_guest_mask_cpuid_features: + * x86_confidential_guest_adjust_cpuid_features: * - * Removes unsupported features from a confidential guest's CPUID values, returns - * the value with the bits removed. The bits removed should be those that KVM - * provides independent of host-supported CPUID features, but are not supported by - * the confidential computing firmware. + * Adjust the supported features from a confidential guest's CPUID values, + * returns the adjusted value. There are bits being removed that are not + * supported by the confidential computing firmware or bits being added that + * are forcibly exposed to guest by the confidential computing firmware. */ -static inline int x86_confidential_guest_mask_cpuid_features(X86ConfidentialGuest *cg, +static inline int x86_confidential_guest_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, int reg, uint32_t value) { X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg); - if (klass->mask_cpuid_features) { - return klass->mask_cpuid_features(cg, feature, index, reg, value); + if (klass->adjust_cpuid_features) { + return klass->adjust_cpuid_features(cg, feature, index, reg, value); } else { return value; } } +static inline int x86_confidential_guest_check_features(X86ConfidentialGuest *cg, + CPUState *cs) +{ + X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg); + + if (klass->check_features) { + return klass->check_features(cg, cs); + } + + return 0; +} + #endif diff --git a/target/i386/cpu-apic.c b/target/i386/cpu-apic.c index d397ec94dc1..242a05fdbe9 100644 --- a/target/i386/cpu-apic.c +++ b/target/i386/cpu-apic.c @@ -7,14 +7,14 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" #include "monitor/monitor.h" #include "monitor/hmp-target.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" -#include "sysemu/xen.h" -#include "exec/address-spaces.h" +#include "system/hw_accel.h" +#include "system/kvm.h" +#include "system/xen.h" +#include "system/address-spaces.h" #include "hw/qdev-properties.h" #include "hw/i386/apic_internal.h" #include "cpu-internal.h" diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index 3bb8e440916..a72ed93bd2f 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -27,7 +27,7 @@ /***********************************************************/ /* x86 debug */ -static const char *cc_op_str[CC_OP_NB] = { +static const char * const cc_op_str[] = { [CC_OP_DYNAMIC] = "DYNAMIC", [CC_OP_EFLAGS] = "EFLAGS", @@ -91,7 +91,6 @@ static const char *cc_op_str[CC_OP_NB] = { [CC_OP_BMILGQ] = "BMILGQ", [CC_OP_POPCNT] = "POPCNT", - [CC_OP_CLR] = "CLR", }; static void @@ -347,7 +346,6 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; int eflags, i, nb; - char cc_op_name[32]; static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; eflags = cpu_compute_eflags(env); @@ -456,10 +454,16 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) env->dr[6], env->dr[7]); } if (flags & CPU_DUMP_CCOP) { - if ((unsigned)env->cc_op < CC_OP_NB) - snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); - else - snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); + const char *cc_op_name = NULL; + char cc_op_buf[32]; + + if ((unsigned)env->cc_op < ARRAY_SIZE(cc_op_str)) { + cc_op_name = cc_op_str[env->cc_op]; + } + if (cc_op_name == NULL) { + snprintf(cc_op_buf, sizeof(cc_op_buf), "[%d]", env->cc_op); + cc_op_name = cc_op_buf; + } #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n", diff --git a/target/i386/cpu-internal.h b/target/i386/cpu-internal.h index 9baac5c0b45..37c61a1bc3d 100644 --- a/target/i386/cpu-internal.h +++ b/target/i386/cpu-internal.h @@ -1,5 +1,5 @@ /* - * i386 CPU internal definitions to be shared between cpu.c and cpu-sysemu.c + * i386 CPU internal definitions to be shared between cpu.c and cpu-system.c * * Copyright (c) 2003 Fabrice Bellard * diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h index 5e153352030..ebb844bcc83 100644 --- a/target/i386/cpu-param.h +++ b/target/i386/cpu-param.h @@ -2,14 +2,13 @@ * i386 cpu parameters for qemu. * * Copyright (c) 2003 Fabrice Bellard - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef I386_CPU_PARAM_H #define I386_CPU_PARAM_H #ifdef TARGET_X86_64 -# define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 52 /* * ??? This is really 48 bits, sign-extended, but the only thing @@ -18,13 +17,11 @@ */ # define TARGET_VIRT_ADDR_SPACE_BITS 47 #else -# define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #define TARGET_PAGE_BITS 12 -/* The x86 has a strong memory model with some store-after-load re-ordering */ -#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) +#define TARGET_INSN_START_EXTRA_WORDS 1 #endif diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c deleted file mode 100644 index 227ac021f62..00000000000 --- a/target/i386/cpu-sysemu.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * i386 CPUID, CPU class, definitions, models: sysemu-only code - * - * Copyright (c) 2003 Fabrice Bellard - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "qapi/error.h" -#include "qapi/qapi-visit-run-state.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qobject-input-visitor.h" -#include "qom/qom-qobject.h" -#include "qapi/qapi-commands-machine-target.h" - -#include "cpu-internal.h" - -/* Return a QDict containing keys for all properties that can be included - * in static expansion of CPU models. All properties set by x86_cpu_load_model() - * must be included in the dictionary. - */ -static QDict *x86_cpu_static_props(void) -{ - FeatureWord w; - int i; - static const char *props[] = { - "min-level", - "min-xlevel", - "family", - "model", - "stepping", - "model-id", - "vendor", - "lmce", - NULL, - }; - static QDict *d; - - if (d) { - return d; - } - - d = qdict_new(); - for (i = 0; props[i]; i++) { - qdict_put_null(d, props[i]); - } - - for (w = 0; w < FEATURE_WORDS; w++) { - FeatureWordInfo *fi = &feature_word_info[w]; - int bit; - for (bit = 0; bit < 64; bit++) { - if (!fi->feat_names[bit]) { - continue; - } - qdict_put_null(d, fi->feat_names[bit]); - } - } - - return d; -} - -/* Add an entry to @props dict, with the value for property. */ -static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) -{ - QObject *value = object_property_get_qobject(OBJECT(cpu), prop, - &error_abort); - - qdict_put_obj(props, prop, value); -} - -/* Convert CPU model data from X86CPU object to a property dictionary - * that can recreate exactly the same CPU model. - */ -static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) -{ - QDict *sprops = x86_cpu_static_props(); - const QDictEntry *e; - - for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { - const char *prop = qdict_entry_key(e); - x86_cpu_expand_prop(cpu, props, prop); - } -} - -/* Convert CPU model data from X86CPU object to a property dictionary - * that can recreate exactly the same CPU model, including every - * writable QOM property. - */ -static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) -{ - ObjectPropertyIterator iter; - ObjectProperty *prop; - - object_property_iter_init(&iter, OBJECT(cpu)); - while ((prop = object_property_iter_next(&iter))) { - /* skip read-only or write-only properties */ - if (!prop->get || !prop->set) { - continue; - } - - /* "hotplugged" is the only property that is configurable - * on the command-line but will be set differently on CPUs - * created using "-cpu ... -smp ..." and by CPUs created - * on the fly by x86_cpu_from_model() for querying. Skip it. - */ - if (!strcmp(prop->name, "hotplugged")) { - continue; - } - x86_cpu_expand_prop(cpu, props, prop->name); - } -} - -static void object_apply_props(Object *obj, QObject *props, - const char *props_arg_name, Error **errp) -{ - Visitor *visitor; - QDict *qdict; - const QDictEntry *prop; - - visitor = qobject_input_visitor_new(props); - if (!visit_start_struct(visitor, props_arg_name, NULL, 0, errp)) { - visit_free(visitor); - return; - } - - qdict = qobject_to(QDict, props); - for (prop = qdict_first(qdict); prop; prop = qdict_next(qdict, prop)) { - if (!object_property_set(obj, qdict_entry_key(prop), - visitor, errp)) { - goto out; - } - } - - visit_check_struct(visitor, errp); -out: - visit_end_struct(visitor, NULL); - visit_free(visitor); -} - -/* Create X86CPU object according to model+props specification */ -static X86CPU *x86_cpu_from_model(const char *model, QObject *props, - const char *props_arg_name, Error **errp) -{ - X86CPU *xc = NULL; - X86CPUClass *xcc; - Error *err = NULL; - - xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); - if (xcc == NULL) { - error_setg(&err, "CPU model '%s' not found", model); - goto out; - } - - xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); - if (props) { - object_apply_props(OBJECT(xc), props, props_arg_name, &err); - if (err) { - goto out; - } - } - - x86_cpu_expand_features(xc, &err); - if (err) { - goto out; - } - -out: - if (err) { - error_propagate(errp, err); - object_unref(OBJECT(xc)); - xc = NULL; - } - return xc; -} - -CpuModelExpansionInfo * -qmp_query_cpu_model_expansion(CpuModelExpansionType type, - CpuModelInfo *model, - Error **errp) -{ - X86CPU *xc = NULL; - Error *err = NULL; - CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); - QDict *props = NULL; - const char *base_name; - - xc = x86_cpu_from_model(model->name, model->props, "model.props", &err); - if (err) { - goto out; - } - - props = qdict_new(); - ret->model = g_new0(CpuModelInfo, 1); - ret->model->props = QOBJECT(props); - - switch (type) { - case CPU_MODEL_EXPANSION_TYPE_STATIC: - /* Static expansion will be based on "base" only */ - base_name = "base"; - x86_cpu_to_dict(xc, props); - break; - case CPU_MODEL_EXPANSION_TYPE_FULL: - /* As we don't return every single property, full expansion needs - * to keep the original model name+props, and add extra - * properties on top of that. - */ - base_name = model->name; - x86_cpu_to_dict_full(xc, props); - break; - default: - error_setg(&err, "Unsupported expansion type"); - goto out; - } - - x86_cpu_to_dict(xc, props); - - ret->model->name = g_strdup(base_name); - -out: - object_unref(OBJECT(xc)); - if (err) { - error_propagate(errp, err); - qapi_free_CpuModelExpansionInfo(ret); - ret = NULL; - } - return ret; -} - -void cpu_clear_apic_feature(CPUX86State *env) -{ - env->features[FEAT_1_EDX] &= ~CPUID_APIC; -} - -void cpu_set_apic_feature(CPUX86State *env) -{ - env->features[FEAT_1_EDX] |= CPUID_APIC; -} - -bool cpu_has_x2apic_feature(CPUX86State *env) -{ - return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC; -} - -bool cpu_is_bsp(X86CPU *cpu) -{ - return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; -} - -/* TODO: remove me, when reset over QOM tree is implemented */ -void x86_cpu_machine_reset_cb(void *opaque) -{ - X86CPU *cpu = opaque; - cpu_reset(CPU(cpu)); -} - -GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - GuestPanicInformation *panic_info = NULL; - - if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) { - panic_info = g_new0(GuestPanicInformation, 1); - - panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; - - assert(HV_CRASH_PARAMS >= 5); - panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; - panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; - panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; - panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; - panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; - } - - return panic_info; -} -void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - CPUState *cs = CPU(obj); - GuestPanicInformation *panic_info; - - if (!cs->crash_occurred) { - error_setg(errp, "No crash occurred"); - return; - } - - panic_info = x86_cpu_get_crash_info(cs); - if (panic_info == NULL) { - error_setg(errp, "No crash information"); - return; - } - - visit_type_GuestPanicInformation(v, "crash-information", &panic_info, - errp); - qapi_free_GuestPanicInformation(panic_info); -} diff --git a/target/i386/cpu-system.c b/target/i386/cpu-system.c new file mode 100644 index 00000000000..b1494aa6740 --- /dev/null +++ b/target/i386/cpu-system.c @@ -0,0 +1,322 @@ +/* + * i386 CPUID, CPU class, definitions, models: system-only code + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-run-state.h" +#include "qobject/qdict.h" +#include "qapi/qobject-input-visitor.h" +#include "qom/qom-qobject.h" +#include "qapi/qapi-commands-machine.h" + +#include "cpu-internal.h" + +/* Return a QDict containing keys for all properties that can be included + * in static expansion of CPU models. All properties set by x86_cpu_load_model() + * must be included in the dictionary. + */ +static QDict *x86_cpu_static_props(void) +{ + FeatureWord w; + int i; + static const char *props[] = { + "min-level", + "min-xlevel", + "family", + "model", + "stepping", + "model-id", + "vendor", + "lmce", + NULL, + }; + static QDict *d; + + if (d) { + return d; + } + + d = qdict_new(); + for (i = 0; props[i]; i++) { + qdict_put_null(d, props[i]); + } + + for (w = 0; w < FEATURE_WORDS; w++) { + FeatureWordInfo *fi = &feature_word_info[w]; + int bit; + for (bit = 0; bit < 64; bit++) { + if (!fi->feat_names[bit]) { + continue; + } + qdict_put_null(d, fi->feat_names[bit]); + } + } + + return d; +} + +/* Add an entry to @props dict, with the value for property. */ +static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) +{ + QObject *value = object_property_get_qobject(OBJECT(cpu), prop, + &error_abort); + + qdict_put_obj(props, prop, value); +} + +/* Convert CPU model data from X86CPU object to a property dictionary + * that can recreate exactly the same CPU model. + */ +static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) +{ + QDict *sprops = x86_cpu_static_props(); + const QDictEntry *e; + + for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { + const char *prop = qdict_entry_key(e); + x86_cpu_expand_prop(cpu, props, prop); + } +} + +/* Convert CPU model data from X86CPU object to a property dictionary + * that can recreate exactly the same CPU model, including every + * writable QOM property. + */ +static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) +{ + ObjectPropertyIterator iter; + ObjectProperty *prop; + + object_property_iter_init(&iter, OBJECT(cpu)); + while ((prop = object_property_iter_next(&iter))) { + /* skip read-only or write-only properties */ + if (!prop->get || !prop->set) { + continue; + } + + /* "hotplugged" is the only property that is configurable + * on the command-line but will be set differently on CPUs + * created using "-cpu ... -smp ..." and by CPUs created + * on the fly by x86_cpu_from_model() for querying. Skip it. + */ + if (!strcmp(prop->name, "hotplugged")) { + continue; + } + x86_cpu_expand_prop(cpu, props, prop->name); + } +} + +static void object_apply_props(Object *obj, QObject *props, + const char *props_arg_name, Error **errp) +{ + Visitor *visitor; + QDict *qdict; + const QDictEntry *prop; + + visitor = qobject_input_visitor_new(props); + if (!visit_start_struct(visitor, props_arg_name, NULL, 0, errp)) { + visit_free(visitor); + return; + } + + qdict = qobject_to(QDict, props); + for (prop = qdict_first(qdict); prop; prop = qdict_next(qdict, prop)) { + if (!object_property_set(obj, qdict_entry_key(prop), + visitor, errp)) { + goto out; + } + } + + visit_check_struct(visitor, errp); +out: + visit_end_struct(visitor, NULL); + visit_free(visitor); +} + +/* Create X86CPU object according to model+props specification */ +static X86CPU *x86_cpu_from_model(const char *model, QObject *props, + const char *props_arg_name, Error **errp) +{ + X86CPU *xc = NULL; + X86CPUClass *xcc; + Error *err = NULL; + + xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); + if (xcc == NULL) { + error_setg(&err, "CPU model '%s' not found", model); + goto out; + } + + xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); + if (props) { + object_apply_props(OBJECT(xc), props, props_arg_name, &err); + if (err) { + goto out; + } + } + + x86_cpu_expand_features(xc, &err); + if (err) { + goto out; + } + +out: + if (err) { + error_propagate(errp, err); + object_unref(OBJECT(xc)); + xc = NULL; + } + return xc; +} + +CpuModelExpansionInfo * +qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + X86CPU *xc = NULL; + Error *err = NULL; + CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); + QDict *props = NULL; + const char *base_name; + + xc = x86_cpu_from_model(model->name, model->props, "model.props", &err); + if (err) { + goto out; + } + + props = qdict_new(); + ret->model = g_new0(CpuModelInfo, 1); + ret->model->props = QOBJECT(props); + + switch (type) { + case CPU_MODEL_EXPANSION_TYPE_STATIC: + /* Static expansion will be based on "base" only */ + base_name = "base"; + x86_cpu_to_dict(xc, props); + break; + case CPU_MODEL_EXPANSION_TYPE_FULL: + /* As we don't return every single property, full expansion needs + * to keep the original model name+props, and add extra + * properties on top of that. + */ + base_name = model->name; + x86_cpu_to_dict_full(xc, props); + break; + default: + error_setg(&err, "Unsupported expansion type"); + goto out; + } + + x86_cpu_to_dict(xc, props); + + ret->model->name = g_strdup(base_name); + +out: + object_unref(OBJECT(xc)); + if (err) { + error_propagate(errp, err); + qapi_free_CpuModelExpansionInfo(ret); + ret = NULL; + } + return ret; +} + +void cpu_clear_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] &= ~CPUID_APIC; +} + +void cpu_set_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] |= CPUID_APIC; +} + +bool cpu_has_x2apic_feature(CPUX86State *env) +{ + return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC; +} + +bool cpu_is_bsp(X86CPU *cpu) +{ + return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; +} + +/* TODO: remove me, when reset over QOM tree is implemented */ +void x86_cpu_machine_reset_cb(void *opaque) +{ + X86CPU *cpu = opaque; + cpu_reset(CPU(cpu)); +} + +GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + GuestPanicInformation *panic_info = NULL; + + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) { + panic_info = g_new0(GuestPanicInformation, 1); + + panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; + + assert(HV_CRASH_PARAMS >= 5); + panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; + panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; + panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; + panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; + panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; + } + + return panic_info; +} +void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CPUState *cs = CPU(obj); + GuestPanicInformation *panic_info; + + if (!cs->crash_occurred) { + error_setg(errp, "No crash occurred"); + return; + } + + panic_info = x86_cpu_get_crash_info(cs); + if (panic_info == NULL) { + error_setg(errp, "No crash information"); + return; + } + + visit_type_GuestPanicInformation(v, "crash-information", &panic_info, + errp); + qapi_free_GuestPanicInformation(panic_info); +} + +uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + uint64_t val; + + val = x86_threads_per_pkg(&env->topo_info); /* thread count, bits 15..0 */ + val |= x86_cores_per_pkg(&env->topo_info) << 16; /* core count, bits 31..16 */ + + return val; +} diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 85ef7452c04..6d85149e6e1 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -24,29 +24,37 @@ #include "qemu/hw-version.h" #include "cpu.h" #include "tcg/helper-tcg.h" -#include "sysemu/hvf.h" +#include "exec/translation-block.h" +#include "system/hvf.h" #include "hvf/hvf-i386.h" #include "kvm/kvm_i386.h" +#include "kvm/tdx.h" #include "sev.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qapi/qapi-visit-machine.h" -#include "qapi/qmp/qerror.h" #include "standard-headers/asm-x86/kvm_para.h" #include "hw/qdev-properties.h" #include "hw/i386/topology.h" +#include "exec/watchpoint.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/reset.h" -#include "qapi/qapi-commands-machine-target.h" -#include "exec/address-spaces.h" +#include "confidential-guest.h" +#include "system/reset.h" +#include "qapi/qapi-commands-machine.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/i386/sgx-epc.h" #endif +#include "system/qtest.h" +#include "tcg/tcg-cpu.h" #include "disas/capstone.h" #include "cpu-internal.h" static void x86_cpu_realizefn(DeviceState *dev, Error **errp); +static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx); /* Helpers for building CPUID[2] descriptors: */ @@ -60,6 +68,7 @@ struct CPUID2CacheDescriptorInfo { /* * Known CPUID 2 cache descriptors. + * TLB, prefetch and sectored cache related descriptors are not included. * From Intel SDM Volume 2A, CPUID instruction */ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { @@ -81,18 +90,29 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { .associativity = 2, .line_size = 64, }, [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, .associativity = 8, .line_size = 64, }, - /* lines per sector is not supported cpuid2_cache_descriptor(), - * so descriptors 0x22, 0x23 are not included - */ + /* + * lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x22, 0x23 are not included + */ [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 16, .line_size = 64, }, - /* lines per sector is not supported cpuid2_cache_descriptor(), - * so descriptors 0x25, 0x20 are not included - */ + /* + * lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x25, 0x29 are not included + */ [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, .associativity = 8, .line_size = 64, }, [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, .associativity = 8, .line_size = 64, }, + /* + * Newer Intel CPUs (having the cores without L3, e.g., Intel MTL, ARL) + * use CPUID 0x4 leaf to describe cache topology, by encoding CPUID 0x2 + * leaf with 0xFF. For older CPUs (without 0x4 leaf), it's also valid + * to just ignore L3's code if there's no L3. + * + * This already covers all the cases in QEMU, so code 0x40 is not + * included. + */ [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, .associativity = 4, .line_size = 32, }, [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, @@ -109,7 +129,18 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { .associativity = 8, .line_size = 64, }, [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, .associativity = 12, .line_size = 64, }, - /* Descriptor 0x49 depends on CPU family/model, so it is not included */ + /* + * Descriptor 0x49 has 2 cases: + * - 2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size. + * - 3rd-level cache: 4MB, 16-way set associative, 64-byte line size + * (Intel Xeon processor MP, Family 0FH, Model 06H). + * + * When it represents L3, then it depends on CPU family/model. Fortunately, + * the legacy cache/CPU models don't have such special L3. So, just add it + * to represent the general L2 case. + */ + [0x49] = { .level = 2, .type = UNIFIED_CACHE, .size = 4 * MiB, + .associativity = 16, .line_size = 64, }, [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, .associativity = 12, .line_size = 64, }, [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, @@ -130,9 +161,10 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { .associativity = 4, .line_size = 64, }, [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, .associativity = 4, .line_size = 64, }, - /* lines per sector is not supported cpuid2_cache_descriptor(), - * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. - */ + /* + * lines per sector is not supported cpuid2_cache_descriptor(), + * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. + */ [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, .associativity = 8, .line_size = 64, }, [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, @@ -193,7 +225,7 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { * Return a CPUID 2 cache descriptor for a given cache. * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE */ -static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) +static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache, bool *unmacthed) { int i; @@ -210,9 +242,46 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) } } + *unmacthed |= true; return CACHE_DESCRIPTOR_UNAVAILABLE; } +static const CPUCaches legacy_intel_cpuid2_cache_info; + +/* Encode cache info for CPUID[2] */ +static void encode_cache_cpuid2(X86CPU *cpu, + const CPUCaches *caches, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + CPUX86State *env = &cpu->env; + int l1d, l1i, l2, l3; + bool unmatched = false; + + *eax = 1; /* Number of CPUID[EAX=2] calls required */ + *ebx = *ecx = *edx = 0; + + l1d = cpuid2_cache_descriptor(caches->l1d_cache, &unmatched); + l1i = cpuid2_cache_descriptor(caches->l1i_cache, &unmatched); + l2 = cpuid2_cache_descriptor(caches->l2_cache, &unmatched); + l3 = cpuid2_cache_descriptor(caches->l3_cache, &unmatched); + + if (!cpu->consistent_cache || + (env->cpuid_min_level < 0x4 && !unmatched)) { + /* + * Though SDM defines code 0x40 for cases with no L2 or L3. It's + * also valid to just ignore l3's code if there's no l2. + */ + if (cpu->enable_l3_cache) { + *ecx = l3; + } + *edx = (l1d << 16) | (l1i << 8) | l2; + } else { + *ecx = 0; + *edx = CACHE_DESCRIPTOR_UNAVAILABLE; + } +} + /* CPUID Leaf 4 constants: */ /* EAX: */ @@ -236,23 +305,26 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 0 /* Invalid value */) static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel share_level) + enum CpuTopologyLevel share_level) { uint32_t num_ids = 0; switch (share_level) { - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: num_ids = 1 << apicid_core_offset(topo_info); break; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_MODULE: + num_ids = 1 << apicid_module_offset(topo_info); + break; + case CPU_TOPOLOGY_LEVEL_DIE: num_ids = 1 << apicid_die_offset(topo_info); break; - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: num_ids = 1 << apicid_pkg_offset(topo_info); break; default: /* - * Currently there is no use case for SMT and MODULE, so use + * Currently there is no use case for THREAD, so use * assert directly to facilitate debugging. */ g_assert_not_reached(); @@ -277,11 +349,17 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache, assert(cache->size == cache->line_size * cache->associativity * cache->partitions * cache->sets); + /* + * The following fields have bit-width limitations, so consider the + * maximum values to avoid overflow: + * Bits 25-14: maximum 4095. + * Bits 31-26: maximum 63. + */ *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | - (max_core_ids_in_package(topo_info) << 26) | - (max_thread_ids_for_cache(topo_info, cache->share_level) << 14); + (MIN(max_core_ids_in_package(topo_info), 63) << 26) | + (MIN(max_thread_ids_for_cache(topo_info, cache->share_level), 4095) << 14); assert(cache->line_size > 0); assert(cache->partitions > 0); @@ -301,21 +379,19 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache, } static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 1; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return topo_info->threads_per_core; - case CPU_TOPO_LEVEL_MODULE: - return topo_info->threads_per_core * topo_info->cores_per_module; - case CPU_TOPO_LEVEL_DIE: - return topo_info->threads_per_core * topo_info->cores_per_module * - topo_info->modules_per_die; - case CPU_TOPO_LEVEL_PACKAGE: - return topo_info->threads_per_core * topo_info->cores_per_module * - topo_info->modules_per_die * topo_info->dies_per_pkg; + case CPU_TOPOLOGY_LEVEL_MODULE: + return x86_threads_per_module(topo_info); + case CPU_TOPOLOGY_LEVEL_DIE: + return x86_threads_per_die(topo_info); + case CPU_TOPOLOGY_LEVEL_SOCKET: + return x86_threads_per_pkg(topo_info); default: g_assert_not_reached(); } @@ -323,18 +399,18 @@ static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, } static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 0; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return apicid_core_offset(topo_info); - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return apicid_module_offset(topo_info); - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return apicid_die_offset(topo_info); - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: return apicid_pkg_offset(topo_info); default: g_assert_not_reached(); @@ -342,18 +418,18 @@ static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, return 0; } -static uint32_t cpuid1f_topo_type(enum CPUTopoLevel topo_level) +static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_INVALID: + case CPU_TOPOLOGY_LEVEL_INVALID: return CPUID_1F_ECX_TOPO_LEVEL_INVALID; - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return CPUID_1F_ECX_TOPO_LEVEL_SMT; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return CPUID_1F_ECX_TOPO_LEVEL_CORE; - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return CPUID_1F_ECX_TOPO_LEVEL_MODULE; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return CPUID_1F_ECX_TOPO_LEVEL_DIE; default: /* Other types are not supported in QEMU. */ @@ -368,38 +444,41 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count, uint32_t *ecx, uint32_t *edx) { X86CPU *cpu = env_archcpu(env); - unsigned long level, next_level; + unsigned long level, base_level, next_level; uint32_t num_threads_next_level, offset_next_level; - assert(count + 1 < CPU_TOPO_LEVEL_MAX); + assert(count <= CPU_TOPOLOGY_LEVEL_SOCKET); /* * Find the No.(count + 1) topology level in avail_cpu_topo bitmap. - * The search starts from bit 1 (CPU_TOPO_LEVEL_INVALID + 1). + * The search starts from bit 0 (CPU_TOPOLOGY_LEVEL_THREAD). */ - level = CPU_TOPO_LEVEL_INVALID; + level = CPU_TOPOLOGY_LEVEL_THREAD; + base_level = level; for (int i = 0; i <= count; i++) { level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, - level + 1); + CPU_TOPOLOGY_LEVEL_SOCKET, + base_level); /* * CPUID[0x1f] doesn't explicitly encode the package level, * and it just encodes the invalid level (all fields are 0) * into the last subleaf of 0x1f. */ - if (level == CPU_TOPO_LEVEL_PACKAGE) { - level = CPU_TOPO_LEVEL_INVALID; + if (level == CPU_TOPOLOGY_LEVEL_SOCKET) { + level = CPU_TOPOLOGY_LEVEL_INVALID; break; } + /* Search the next level. */ + base_level = level + 1; } - if (level == CPU_TOPO_LEVEL_INVALID) { + if (level == CPU_TOPOLOGY_LEVEL_INVALID) { num_threads_next_level = 0; offset_next_level = 0; } else { next_level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, + CPU_TOPOLOGY_LEVEL_SOCKET, level + 1); num_threads_next_level = num_threads_by_topo_level(topo_info, next_level); @@ -420,7 +499,6 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count, static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) { assert(cache->size % 1024 == 0); - assert(cache->lines_per_tag > 0); assert(cache->associativity > 0); assert(cache->line_size > 0); return ((cache->size / 1024) << 24) | (cache->associativity << 16) | @@ -429,8 +507,8 @@ static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) #define ASSOC_FULL 0xFF -/* AMD associativity encoding used on CPUID Leaf 0x80000006: */ -#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ +/* x86 associativity encoding used on CPUID Leaf 0x80000006: */ +#define X86_ENC_ASSOC(a) (a <= 1 ? a : \ a == 2 ? 0x2 : \ a == 4 ? 0x4 : \ a == 8 ? 0x6 : \ @@ -453,19 +531,18 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2, { assert(l2->size % 1024 == 0); assert(l2->associativity > 0); - assert(l2->lines_per_tag > 0); assert(l2->line_size > 0); *ecx = ((l2->size / 1024) << 16) | - (AMD_ENC_ASSOC(l2->associativity) << 12) | + (X86_ENC_ASSOC(l2->associativity) << 12) | (l2->lines_per_tag << 8) | (l2->line_size); + /* For Intel, EDX is reserved. */ if (l3) { assert(l3->size % (512 * 1024) == 0); assert(l3->associativity > 0); - assert(l3->lines_per_tag > 0); assert(l3->line_size > 0); *edx = ((l3->size / (512 * 1024)) << 18) | - (AMD_ENC_ASSOC(l3->associativity) << 12) | + (X86_ENC_ASSOC(l3->associativity) << 12) | (l3->lines_per_tag << 8) | (l3->line_size); } else { *edx = 0; @@ -483,7 +560,8 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); - *eax |= max_thread_ids_for_cache(topo_info, cache->share_level) << 14; + /* Bits 25:14 - NumSharingCache: maximum 4095. */ + *eax |= MIN(max_thread_ids_for_cache(topo_info, cache->share_level), 4095) << 14; assert(cache->line_size > 0); assert(cache->partitions > 0); @@ -563,117 +641,172 @@ static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info, * These are legacy cache values. If there is a need to change any * of these values please use builtin_x86_defs */ - -/* L1 data cache: */ -static CPUCacheInfo legacy_l1d_cache = { - .type = DATA_CACHE, - .level = 1, - .size = 32 * KiB, - .self_init = 1, - .line_size = 64, - .associativity = 8, - .sets = 64, - .partitions = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, -}; - -/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ -static CPUCacheInfo legacy_l1d_cache_amd = { - .type = DATA_CACHE, - .level = 1, - .size = 64 * KiB, - .self_init = 1, - .line_size = 64, - .associativity = 2, - .sets = 512, - .partitions = 1, - .lines_per_tag = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, -}; - -/* L1 instruction cache: */ -static CPUCacheInfo legacy_l1i_cache = { - .type = INSTRUCTION_CACHE, - .level = 1, - .size = 32 * KiB, - .self_init = 1, - .line_size = 64, - .associativity = 8, - .sets = 64, - .partitions = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, -}; - -/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ -static CPUCacheInfo legacy_l1i_cache_amd = { - .type = INSTRUCTION_CACHE, - .level = 1, - .size = 64 * KiB, - .self_init = 1, - .line_size = 64, - .associativity = 2, - .sets = 512, - .partitions = 1, - .lines_per_tag = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, -}; - -/* Level 2 unified cache: */ -static CPUCacheInfo legacy_l2_cache = { - .type = UNIFIED_CACHE, - .level = 2, - .size = 4 * MiB, - .self_init = 1, - .line_size = 64, - .associativity = 16, - .sets = 4096, - .partitions = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, -}; - -/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ -static CPUCacheInfo legacy_l2_cache_cpuid2 = { - .type = UNIFIED_CACHE, - .level = 2, - .size = 2 * MiB, - .line_size = 64, - .associativity = 8, - .share_level = CPU_TOPO_LEVEL_INVALID, +static const CPUCaches legacy_amd_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 64 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 2, + .sets = 512, + .partitions = 1, + .lines_per_tag = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 2, + .sets = 512, + .partitions = 1, + .lines_per_tag = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .lines_per_tag = 1, + .associativity = 16, + .sets = 512, + .partitions = 1, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .sets = 16384, + .partitions = 1, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, }; - -/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ -static CPUCacheInfo legacy_l2_cache_amd = { - .type = UNIFIED_CACHE, - .level = 2, - .size = 512 * KiB, - .line_size = 64, - .lines_per_tag = 1, - .associativity = 16, - .sets = 512, - .partitions = 1, - .share_level = CPU_TOPO_LEVEL_CORE, +/* + * Only used for the CPU models with CPUID level < 4. + * These CPUs (CPUID level < 4) only use CPUID leaf 2 to present + * cache information. + * + * Note: This cache model is just a default one, and is not + * guaranteed to match real hardwares. + */ +static const CPUCaches legacy_intel_cpuid2_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 2 * MiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 4096, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .sets = 16384, + .partitions = 1, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, }; -/* Level 3 unified cache: */ -static CPUCacheInfo legacy_l3_cache = { - .type = UNIFIED_CACHE, - .level = 3, - .size = 16 * MiB, - .line_size = 64, - .associativity = 16, - .sets = 16384, - .partitions = 1, - .lines_per_tag = 1, - .self_init = true, - .inclusive = true, - .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, +static const CPUCaches legacy_intel_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .self_init = 1, + .line_size = 64, + .associativity = 8, + .sets = 64, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 4 * MiB, + .self_init = 1, + .line_size = 64, + .associativity = 16, + .sets = 4096, + .partitions = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .sets = 16384, + .partitions = 1, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, }; /* TLB definitions: */ @@ -767,11 +900,12 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ - CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) + CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE | \ + CPUID_HT) /* partly implemented: CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ /* missing: - CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ + CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_TM, CPUID_PBE */ /* * Kernel-only features that can be shown to usermode programs even if @@ -839,7 +973,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \ - CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES) + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES | \ + CPUID_EXT3_CMP_LEG) #define TCG_EXT4_FEATURES 0 @@ -888,6 +1023,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD) +#define TCG_7_1_ECX_FEATURES 0 #define TCG_7_1_EDX_FEATURES 0 #define TCG_7_2_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 @@ -899,6 +1035,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_SGX_12_0_EAX_FEATURES 0 #define TCG_SGX_12_0_EBX_FEATURES 0 #define TCG_SGX_12_1_EAX_FEATURES 0 +#define TCG_24_0_EBX_FEATURES 0 #if defined CONFIG_USER_ONLY #define CPUID_8000_0008_EBX_KERNEL_FEATURES (CPUID_8000_0008_EBX_IBPB | \ @@ -912,6 +1049,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES) +#if defined CONFIG_USER_ONLY +#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS +#else +#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0 +#endif + +#define TCG_8000_0021_EAX_FEATURES ( \ + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \ + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \ + CPUID_8000_0021_EAX_KERNEL_FEATURES) + FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { .type = CPUID_FEATURE_WORD, @@ -1054,9 +1202,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { "fsgsbase", "tsc-adjust", "sgx", "bmi1", - "hle", "avx2", NULL, "smep", + "hle", "avx2", "fdp-excptn-only", "smep", "bmi2", "erms", "invpcid", "rtm", - NULL, NULL, "mpx", NULL, + NULL, "zero-fcs-fds", "mpx", NULL, "avx512f", "avx512dq", "rdseed", "adx", "smap", "avx512ifma", "pcommit", "clflushopt", "clwb", "intel-pt", "avx512pf", "avx512er", @@ -1110,7 +1258,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_1_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - NULL, NULL, NULL, NULL, + "sha512", "sm3", "sm4", NULL, "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", NULL, NULL, "fzrm", "fsrs", "fsrc", NULL, NULL, NULL, @@ -1126,6 +1274,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_1_EAX_FEATURES, }, + [FEAT_7_1_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "msr-imm", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_ECX, + }, + .tcg_features = TCG_7_1_ECX_FEATURES, + }, [FEAT_7_1_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1133,7 +1300,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "avx-vnni-int8", "avx-ne-convert", NULL, NULL, "amx-complex", NULL, "avx-vnni-int16", NULL, NULL, NULL, "prefetchiti", NULL, - NULL, NULL, NULL, NULL, + NULL, NULL, NULL, "avx10", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1148,8 +1315,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_2_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - NULL, NULL, NULL, NULL, - NULL, "mcdt-no", NULL, NULL, + "intel-psfd", "ipred-ctrl", "rrsba-ctrl", "ddpd-u", + "bhi-ctrl", "mcdt-no", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1164,6 +1331,20 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_2_EDX_FEATURES, }, + [FEAT_24_0_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + [16] = "avx10-128", + [17] = "avx10-256", + [18] = "avx10-512", + }, + .cpuid = { + .eax = 0x24, + .needs_ecx = true, .ecx = 0, + .reg = R_EBX, + }, + .tcg_features = TCG_24_0_EBX_FEATURES, + }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1215,16 +1396,38 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_8000_0021_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - "no-nested-data-bp", NULL, "lfence-always-serializing", NULL, + "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL, NULL, NULL, "null-sel-clr-base", NULL, "auto-ibrs", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "prefetchi", NULL, NULL, NULL, + "eraps", NULL, NULL, "sbpb", + "ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL, + }, + .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .tcg_features = TCG_8000_0021_EAX_FEATURES, + .unmigratable_flags = 0, + }, + [FEAT_8000_0021_EBX] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { .eax = 0x80000021, .reg = R_EBX, }, + .tcg_features = 0, + .unmigratable_flags = 0, + }, + [FEAT_8000_0022_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "perfmon-v2", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, - .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .cpuid = { .eax = 0x80000022, .reg = R_EAX, }, .tcg_features = 0, .unmigratable_flags = 0, }, @@ -1297,7 +1500,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .needs_ecx = true, .ecx = 0, .reg = R_EAX, }, - .tcg_features = ~0U, + .tcg_features = XSTATE_FP_MASK | XSTATE_SSE_MASK | + XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | + XSTATE_PKRU_MASK, .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | @@ -1310,7 +1515,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .needs_ecx = true, .ecx = 0, .reg = R_EDX, }, - .tcg_features = ~0U, + .tcg_features = 0U, }, /*Below are MSR exposed features*/ [FEAT_ARCH_CAPABILITIES] = { @@ -1321,9 +1526,17 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "taa-no", NULL, NULL, NULL, NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no", NULL, "fb-clear", NULL, NULL, - NULL, NULL, NULL, NULL, + "bhi-no", NULL, NULL, NULL, "pbrsb-no", NULL, "gds-no", "rfds-no", "rfds-clear", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, "its-no", NULL, }, .msr = { .index = MSR_IA32_ARCH_CAPABILITIES, @@ -1435,7 +1648,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "vmx-exit-save-efer", "vmx-exit-load-efer", "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, - NULL, "vmx-exit-load-pkrs", NULL, NULL, + NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls", }, .msr = { .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, @@ -1450,7 +1663,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, "vmx-entry-ia32e-mode", NULL, NULL, NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, - NULL, NULL, "vmx-entry-load-pkrs", NULL, + NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, @@ -1608,14 +1821,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, }; -typedef struct FeatureMask { - FeatureWord index; - uint64_t mask; -} FeatureMask; +bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg) +{ + FeatureWordInfo *wi; + FeatureWord w; -typedef struct FeatureDep { - FeatureMask from, to; -} FeatureDep; + for (w = 0; w < FEATURE_WORDS; w++) { + wi = &feature_word_info[w]; + if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature && + (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) && + wi->cpuid.reg == reg) { + return true; + } + } + return false; +} static FeatureDep feature_dependencies[] = { { @@ -1726,10 +1946,6 @@ static FeatureDep feature_dependencies[] = { .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_LKGS }, .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, }, - { - .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS }, - .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, - }, { .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, .to = { FEAT_7_0_ECX, CPUID_7_0_ECX_SGX_LC }, @@ -1746,12 +1962,28 @@ static FeatureDep feature_dependencies[] = { .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, .to = { FEAT_SGX_12_1_EAX, ~0ull }, }, -}; - -typedef struct X86RegisterInfo32 { - /* Name of register */ - const char *name; - /* QAPI enum value register */ + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_128 }, + .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 }, + }, + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 }, + .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_512 }, + }, + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_VL_MASK }, + .to = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 }, + }, + { + .from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 }, + .to = { FEAT_24_0_EBX, ~0ull }, + }, +}; + +typedef struct X86RegisterInfo32 { + /* Name of register */ + const char *name; + /* QAPI enum value register */ X86CPURegister32 qapi_enum; } X86RegisterInfo32; @@ -1769,9 +2001,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { }; #undef REGISTER -/* CPUID feature bits available in XSS */ -#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK) - ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ @@ -1837,7 +2066,7 @@ uint32_t xsave_area_size(uint64_t mask, bool compacted) static inline bool accel_uses_host_cpuid(void) { - return kvm_enabled() || hvf_enabled(); + return !tcg_enabled() && !qtest_enabled(); } static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu) @@ -1865,9 +2094,10 @@ static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu) * Returns the set of feature flags that are supported and migratable by * QEMU, for a given FeatureWord. */ -static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) +static uint64_t x86_cpu_get_migratable_flags(X86CPU *cpu, FeatureWord w) { FeatureWordInfo *wi = &feature_word_info[w]; + CPUX86State *env = &cpu->env; uint64_t r = 0; int i; @@ -1881,6 +2111,12 @@ static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) r |= f; } } + + /* when tsc-khz is set explicitly, invtsc is migratable */ + if ((w == FEAT_8000_0007_EDX) && env->user_tsc_khz) { + r |= CPUID_APM_INVTSC; + } + return r; } @@ -1959,6 +2195,7 @@ typedef struct X86CPUDefinition { int family; int model; int stepping; + uint8_t avx10_version; FeatureWordArray features; const char *model_id; const CPUCaches *const cache_info; @@ -2017,7 +2254,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2030,7 +2267,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2041,7 +2278,7 @@ static const CPUCaches epyc_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2055,7 +2292,7 @@ static const CPUCaches epyc_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2071,7 +2308,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2084,7 +2321,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2095,7 +2332,7 @@ static CPUCaches epyc_v4_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2109,7 +2346,61 @@ static CPUCaches epyc_v4_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static CPUCaches epyc_v5_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .line_size = 64, + .associativity = 4, + .partitions = 1, + .sets = 256, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 8 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 8192, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2125,7 +2416,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2138,7 +2429,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2149,7 +2440,7 @@ static const CPUCaches epyc_rome_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2163,7 +2454,7 @@ static const CPUCaches epyc_rome_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2179,7 +2470,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2192,7 +2483,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2203,7 +2494,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2217,7 +2508,61 @@ static const CPUCaches epyc_rome_v3_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_rome_v5_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2233,7 +2578,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2246,7 +2591,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2257,7 +2602,7 @@ static const CPUCaches epyc_milan_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2271,7 +2616,7 @@ static const CPUCaches epyc_milan_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2287,99 +2632,633 @@ static const CPUCaches epyc_milan_v2_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_milan_v3_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_genoa_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 2048, + .lines_per_tag = 1, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_genoa_v2_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 2048, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_turin_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 48 * KiB, + .line_size = 64, + .associativity = 12, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + } +}; + +static const CPUCaches xeon_spr_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x0.EAX */ + .type = DATA_CACHE, + .level = 1, + .self_init = true, + + /* CPUID 0x4.0x0.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 12, + + /* CPUID 0x4.0x0.ECX */ + .sets = 64, + + /* CPUID 0x4.0x0.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 48 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x1.EAX */ + .type = INSTRUCTION_CACHE, + .level = 1, + .self_init = true, + + /* CPUID 0x4.0x1.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 8, + + /* CPUID 0x4.0x1.ECX */ + .sets = 64, + + /* CPUID 0x4.0x1.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 32 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x2.EAX */ + .type = UNIFIED_CACHE, + .level = 2, + .self_init = true, + + /* CPUID 0x4.0x2.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 16, + + /* CPUID 0x4.0x2.ECX */ + .sets = 2048, + + /* CPUID 0x4.0x2.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 2 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x3.EAX */ + .type = UNIFIED_CACHE, + .level = 3, + .self_init = true, + + /* CPUID 0x4.0x3.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 15, + + /* CPUID 0x4.0x3.ECX */ + .sets = 65536, + + /* CPUID 0x4.0x3.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = true, + + .size = 60 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_SOCKET, + }, +}; + +static const CPUCaches xeon_gnr_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x0.EAX */ + .type = DATA_CACHE, + .level = 1, + .self_init = true, + + /* CPUID 0x4.0x0.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 12, + + /* CPUID 0x4.0x0.ECX */ + .sets = 64, + + /* CPUID 0x4.0x0.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 48 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x1.EAX */ + .type = INSTRUCTION_CACHE, + .level = 1, + .self_init = true, + + /* CPUID 0x4.0x1.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 16, + + /* CPUID 0x4.0x1.ECX */ + .sets = 64, + + /* CPUID 0x4.0x1.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 64 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x2.EAX */ + .type = UNIFIED_CACHE, + .level = 2, + .self_init = true, + + /* CPUID 0x4.0x2.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 16, + + /* CPUID 0x4.0x2.ECX */ + .sets = 2048, + + /* CPUID 0x4.0x2.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 2 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x3.EAX */ + .type = UNIFIED_CACHE, + .level = 3, + .self_init = true, + + /* CPUID 0x4.0x3.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 16, + + /* CPUID 0x4.0x3.ECX */ + .sets = 294912, + + /* CPUID 0x4.0x3.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = true, + + .size = 288 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_SOCKET, + }, +}; + +static const CPUCaches xeon_srf_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x0.EAX */ + .type = DATA_CACHE, + .level = 1, + .self_init = true, + + /* CPUID 0x4.0x0.EBX */ + .line_size = 64, + .partitions = 1, + .associativity = 8, + + /* CPUID 0x4.0x0.ECX */ + .sets = 64, + + /* CPUID 0x4.0x0.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 32 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x1.EAX */ .type = INSTRUCTION_CACHE, .level = 1, - .size = 32 * KiB, + .self_init = true, + + /* CPUID 0x4.0x1.EBX */ .line_size = 64, - .associativity = 8, .partitions = 1, - .sets = 64, - .lines_per_tag = 1, - .self_init = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .associativity = 8, + + /* CPUID 0x4.0x1.ECX */ + .sets = 128, + + /* CPUID 0x4.0x1.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 64 * KiB, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x2.EAX */ .type = UNIFIED_CACHE, .level = 2, - .size = 512 * KiB, + .self_init = true, + + /* CPUID 0x4.0x2.EBX */ .line_size = 64, - .associativity = 8, .partitions = 1, - .sets = 1024, - .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .associativity = 16, + + /* CPUID 0x4.0x2.ECX */ + .sets = 4096, + + /* CPUID 0x4.0x2.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + .size = 4 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_MODULE, }, .l3_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x3.EAX */ .type = UNIFIED_CACHE, .level = 3, - .size = 32 * MiB, + .self_init = true, + + /* CPUID 0x4.0x3.EBX */ .line_size = 64, - .associativity = 16, .partitions = 1, - .sets = 32768, - .lines_per_tag = 1, - .self_init = true, - .inclusive = true, - .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .associativity = 12, + + /* CPUID 0x4.0x3.ECX */ + .sets = 147456, + + /* CPUID 0x4.0x3.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = true, + + .size = 108 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_SOCKET, }, }; -static const CPUCaches epyc_genoa_cache_info = { +static const CPUCaches yongfeng_cache_info = { .l1d_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x0.EAX */ .type = DATA_CACHE, .level = 1, - .size = 32 * KiB, + .self_init = true, + + /* CPUID 0x4.0x0.EBX */ .line_size = 64, - .associativity = 8, .partitions = 1, + .associativity = 8, + + /* CPUID 0x4.0x0.ECX */ .sets = 64, + + /* CPUID 0x4.0x0.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + /* CPUID 0x80000005.ECX */ .lines_per_tag = 1, - .self_init = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .size = 32 * KiB, + + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x1.EAX */ .type = INSTRUCTION_CACHE, .level = 1, - .size = 32 * KiB, + .self_init = true, + + /* CPUID 0x4.0x1.EBX */ .line_size = 64, - .associativity = 8, .partitions = 1, + .associativity = 16, + + /* CPUID 0x4.0x1.ECX */ .sets = 64, + + /* CPUID 0x4.0x1.EDX */ + .no_invd_sharing = false, + .inclusive = false, + .complex_indexing = false, + + /* CPUID 0x80000005.EDX */ .lines_per_tag = 1, - .self_init = 1, - .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .size = 64 * KiB, + + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x2.EAX */ .type = UNIFIED_CACHE, .level = 2, - .size = 1 * MiB, + .self_init = true, + + /* CPUID 0x4.0x2.EBX */ .line_size = 64, - .associativity = 8, .partitions = 1, - .sets = 2048, - .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .associativity = 8, + + /* CPUID 0x4.0x2.ECX */ + .sets = 512, + + /* CPUID 0x4.0x2.EDX */ + .no_invd_sharing = false, + .inclusive = true, + .complex_indexing = false, + + /* CPUID 0x80000006.ECX */ + .size = 256 * KiB, + + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { + /* CPUID 0x4.0x3.EAX */ .type = UNIFIED_CACHE, .level = 3, - .size = 32 * MiB, + .self_init = true, + + /* CPUID 0x4.0x3.EBX */ .line_size = 64, - .associativity = 16, .partitions = 1, - .sets = 32768, - .lines_per_tag = 1, - .self_init = true, + .associativity = 16, + + /* CPUID 0x4.0x3.ECX */ + .sets = 8192, + + /* CPUID 0x4.0x3.EDX */ + .no_invd_sharing = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + + .size = 8 * MiB, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2635,6 +3514,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { I486_FEATURES, .xlevel = 0, .model_id = "", + .cache_info = &legacy_intel_cpuid2_cache_info, }, { .name = "pentium", @@ -2647,6 +3527,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { PENTIUM_FEATURES, .xlevel = 0, .model_id = "", + .cache_info = &legacy_intel_cpuid2_cache_info, }, { .name = "pentium2", @@ -2659,6 +3540,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { PENTIUM2_FEATURES, .xlevel = 0, .model_id = "", + .cache_info = &legacy_intel_cpuid2_cache_info, }, { .name = "pentium3", @@ -2671,6 +3553,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { PENTIUM3_FEATURES, .xlevel = 0, .model_id = "", + .cache_info = &legacy_intel_cpuid2_cache_info, }, { .name = "athlon", @@ -3623,6 +4506,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, { .version = 4, + .note = "IBRS, EPT switching, no TSX", .props = (PropValue[]) { { "vmx-eptp-switching", "on" }, { /* end of list */ } @@ -3757,7 +4641,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, }, { .version = 4, - .note = "ARCH_CAPABILITIES, no TSX", + .note = "ARCH_CAPABILITIES, EPT switching, no TSX", .props = (PropValue[]) { { "vmx-eptp-switching", "on" }, { /* end of list */ } @@ -4202,6 +5086,15 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .version = 4, + .note = "with spr-sp cache model and 0x1f leaf", + .cache_info = &xeon_spr_cache_info, + .props = (PropValue[]) { + { "x-force-cpuid-0x1f", "on" }, + { /* end of list */ }, + } + }, { /* end of list */ } } }, @@ -4338,6 +5231,32 @@ static const X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon Processor (GraniteRapids)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "ss", "on" }, + { "tsc-adjust", "on" }, + { "cldemote", "on" }, + { "movdiri", "on" }, + { "movdir64b", "on" }, + { "avx10", "on" }, + { "avx10-128", "on" }, + { "avx10-256", "on" }, + { "avx10-512", "on" }, + { "avx10-version", "1" }, + { "stepping", "1" }, + { /* end of list */ } + } + }, + { + .version = 3, + .note = "with gnr-sp cache model and 0x1f leaf", + .cache_info = &xeon_gnr_cache_info, + .props = (PropValue[]) { + { "x-force-cpuid-0x1f", "on" }, + { /* end of list */ }, + } + }, { /* end of list */ }, }, }, @@ -4380,7 +5299,165 @@ static const X86CPUDefinition builtin_x86_defs[] = { .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI | CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | - CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | + CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO | + MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO | + MSR_ARCH_CAP_PBRSB_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD | + CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA, + .features[FEAT_7_1_EDX] = + CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT, + .features[FEAT_7_2_EDX] = + CPUID_7_2_EDX_MCDT_NO, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (SierraForest)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "ss", "on" }, + { "tsc-adjust", "on" }, + { "cldemote", "on" }, + { "movdiri", "on" }, + { "movdir64b", "on" }, + { "gds-no", "on" }, + { "rfds-no", "on" }, + { "lam", "on" }, + { "intel-psfd", "on"}, + { "ipred-ctrl", "on"}, + { "rrsba-ctrl", "on"}, + { "bhi-ctrl", "on"}, + { "stepping", "3" }, + { /* end of list */ } + } + }, + { + .version = 3, + .note = "with srf-sp cache model and 0x1f leaf", + .cache_info = &xeon_srf_cache_info, + .props = (PropValue[]) { + { "x-force-cpuid-0x1f", "on" }, + { /* end of list */ }, + } + }, + { /* end of list */ }, + }, + }, + { + .name = "ClearwaterForest", + .level = 0x23, + .xlevel = 0x80000008, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 221, + .stepping = 0, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2 | CPUID_SS, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_TSC_ADJUST | + CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT | + CPUID_7_0_ECX_CLDEMOTE | CPUID_7_0_ECX_MOVDIRI | + CPUID_7_0_ECX_MOVDIR64B, .features[FEAT_7_0_EDX] = CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | @@ -4390,19 +5467,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO | MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO | - MSR_ARCH_CAP_PBRSB_NO, + MSR_ARCH_CAP_BHI_NO | MSR_ARCH_CAP_PBRSB_NO | + MSR_ARCH_CAP_GDS_NO | MSR_ARCH_CAP_RFDS_NO, .features[FEAT_XSAVE] = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT, .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_SHA512 | CPUID_7_1_EAX_SM3 | CPUID_7_1_EAX_SM4 | CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD | - CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA, + CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA | + CPUID_7_1_EAX_LAM, .features[FEAT_7_1_EDX] = - CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT, + CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT | + CPUID_7_1_EDX_AVX_VNNI_INT16 | CPUID_7_1_EDX_PREFETCHITI, .features[FEAT_7_2_EDX] = - CPUID_7_2_EDX_MCDT_NO, + CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL | + CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_DDPD_U | + CPUID_7_2_EDX_BHI_CTRL | CPUID_7_2_EDX_MCDT_NO, .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, .features[FEAT_VMX_ENTRY_CTLS] = @@ -4460,8 +5543,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { VMX_SECONDARY_EXEC_XSAVES, .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, - .xlevel = 0x80000008, - .model_id = "Intel Xeon Processor (SierraForest)", + .model_id = "Intel Xeon Processor (ClearwaterForest)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, { /* end of list */ }, @@ -4968,6 +6050,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, .cache_info = &epyc_v4_cache_info }, + { + .version = 5, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-v5 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_v5_cache_info + }, { /* end of list */ } } }, @@ -5106,6 +6207,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { + .version = 5, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-Rome-v5 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_rome_v5_cache_info + }, { /* end of list */ } } }, @@ -5181,6 +6301,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, .cache_info = &epyc_milan_v2_cache_info }, + { + .version = 3, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-Milan-v3 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_milan_v3_cache_info + }, { /* end of list */ } } }, @@ -5220,7 +6359,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, .features[FEAT_8000_0021_EAX] = - CPUID_8000_0021_EAX_No_NESTED_DATA_BP | + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | CPUID_8000_0021_EAX_AUTO_IBRS, @@ -5255,6 +6394,259 @@ static const X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x80000022, .model_id = "AMD EPYC-Genoa Processor", .cache_info = &epyc_genoa_cache_info, + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "fs-gs-base-ns", "on" }, + { "perfmon-v2", "on" }, + { "model-id", + "AMD EPYC-Genoa-v2 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_genoa_v2_cache_info + }, + { /* end of list */ } + } + }, + { + .name = "YongFeng", + .level = 0x1F, + .vendor = CPUID_VENDOR_ZHAOXIN1, + .family = 7, + .model = 11, + .stepping = 3, + /* missing: CPUID_HT, CPUID_TM, CPUID_PBE */ + .features[FEAT_1_EDX] = + CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_ACPI | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | + CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | + CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | + CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87, + /* + * missing: CPUID_EXT_OSXSAVE, CPUID_EXT_XTPR, CPUID_EXT_TM2, + * CPUID_EXT_EST, CPUID_EXT_SMX, CPUID_EXT_VMX + */ + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER | + CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_X2APIC | + CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_PCID | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_BMI2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_FSGSBASE, + /* missing: CPUID_7_0_ECX_OSPKE */ + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_UMIP, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_8000_0007_EDX] = CPUID_APM_INVTSC, + /* + * TODO: When the Linux kernel introduces other existing definitions + * for this leaf, remember to update the definitions here. + */ + .features[FEAT_C000_0001_EDX] = + CPUID_C000_0001_EDX_PMM_EN | CPUID_C000_0001_EDX_PMM | + CPUID_C000_0001_EDX_PHE_EN | CPUID_C000_0001_EDX_PHE | + CPUID_C000_0001_EDX_ACE2 | + CPUID_C000_0001_EDX_XCRYPT_EN | CPUID_C000_0001_EDX_XCRYPT | + CPUID_C000_0001_EDX_XSTORE_EN | CPUID_C000_0001_EDX_XSTORE, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | + MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO | + MSR_ARCH_CAP_SSB_NO, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_INVLPG_EXITING | + VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | + VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR3_LOAD_EXITING | + VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | + VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + /* + * missing: VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING, + * VMX_SECONDARY_EXEC_TSC_SCALING + */ + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | + VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + /* missing: VMX_VM_ENTRY_SMM, VMX_VM_ENTRY_DEACT_DUAL_MONITOR */ + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + /* + * missing: MSR_VMX_MISC_ACTIVITY_SHUTDOWN, + * MSR_VMX_MISC_ACTIVITY_WAIT_SIPI + */ + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + /* missing: MSR_VMX_EPT_UC */ + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Zhaoxin YongFeng Processor", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .note = "with the correct model number", + .props = (PropValue[]) { + { "model", "0x5b" }, + { /* end of list */ } + } + }, + { + .version = 3, + .note = "with the cache model and 0x1f leaf", + .cache_info = &yongfeng_cache_info, + .props = (PropValue[]) { + { "x-force-cpuid-0x1f", "on" }, + { /* end of list */ }, + } + }, + { /* end of list */ } + } + }, + { + .name = "EPYC-Turin", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 26, + .model = 0, + .stepping = 0, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F | + CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI | + CPUID_7_0_ECX_MOVDIR64B, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0007_EBX] = + CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | + CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP | + CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | + CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, + .features[FEAT_8000_0021_EAX] = + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | + CPUID_8000_0021_EAX_FS_GS_BASE_NS | + CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | + CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI | + CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE | + CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO, + .features[FEAT_8000_0022_EAX] = + CPUID_8000_0022_EAX_PERFMON_V2, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE | + CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID | + CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD | + CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF | + CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK, + .xlevel = 0x80000022, + .model_id = "AMD EPYC-Turin Processor", + .cache_info = &epyc_turin_cache_info, }, }; @@ -5299,10 +6691,9 @@ static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) return v; } -static Property max_x86_cpu_properties[] = { +static const Property max_x86_cpu_properties[] = { DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), - DEFINE_PROP_END_OF_LIST() }; static void max_x86_cpu_realize(DeviceState *dev, Error **errp) @@ -5324,13 +6715,14 @@ static void max_x86_cpu_realize(DeviceState *dev, Error **errp) x86_cpu_realizefn(dev, errp); } -static void max_x86_cpu_class_init(ObjectClass *oc, void *data) +static void max_x86_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); X86CPUClass *xcc = X86_CPU_CLASS(oc); xcc->ordering = 9; + xcc->max_features = true; xcc->model_description = "Enables all features supported by the accelerator in the current host"; @@ -5341,22 +6733,21 @@ static void max_x86_cpu_class_init(ObjectClass *oc, void *data) static void max_x86_cpu_initfn(Object *obj) { X86CPU *cpu = X86_CPU(obj); - - /* We can't fill the features array here because we don't know yet if - * "migratable" is true or false. - */ - cpu->max_features = true; - object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); + CPUX86State *env = &cpu->env; /* - * these defaults are used for TCG and all other accelerators - * besides KVM and HVF, which overwrite these values + * these defaults are used for TCG, other accelerators have overwritten + * these values */ - object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, - &error_abort); - object_property_set_str(OBJECT(cpu), "model-id", - "QEMU TCG CPU version " QEMU_HW_VERSION, - &error_abort); + if (!env->cpuid_vendor1) { + object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, + &error_abort); + } + if (!env->cpuid_model[0]) { + object_property_set_str(OBJECT(cpu), "model-id", + "QEMU TCG CPU version " QEMU_HW_VERSION, + &error_abort); + } } static const TypeInfo max_x86_cpu_type_info = { @@ -5366,7 +6757,7 @@ static const TypeInfo max_x86_cpu_type_info = { .class_init = max_x86_cpu_class_init, }; -static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) +static char *feature_word_description(FeatureWordInfo *f) { assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); @@ -5375,11 +6766,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) { const char *reg = get_register_name_32(f->cpuid.reg); assert(reg); - return g_strdup_printf("CPUID.%02XH:%s", - f->cpuid.eax, reg); + if (!f->cpuid.needs_ecx) { + return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg); + } else { + return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s", + f->cpuid.eax, f->cpuid.ecx, reg); + } } case MSR_FEATURE_WORD: - return g_strdup_printf("MSR(%02XH)", + return g_strdup_printf("MSR(%02Xh)", f->msr.index); } @@ -5399,12 +6794,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu) return false; } -static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, - const char *verbose_prefix) +void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix) { CPUX86State *env = &cpu->env; FeatureWordInfo *f = &feature_word_info[w]; int i; + g_autofree char *feat_word_str = feature_word_description(f); if (!cpu->force_features) { env->features[w] &= ~mask; @@ -5417,7 +6813,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, for (i = 0; i < 64; ++i) { if ((1ULL << i) & mask) { - g_autofree char *feat_word_str = feature_word_description(f, i); + warn_report("%s: %s%s%s [bit %d]", + verbose_prefix, + feat_word_str, + f->feat_names[i] ? "." : "", + f->feat_names[i] ? f->feat_names[i] : "", i); + } + } +} + +void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix) +{ + CPUX86State *env = &cpu->env; + FeatureWordInfo *f = &feature_word_info[w]; + int i; + + if (!cpu->force_features) { + env->features[w] |= mask; + } + + cpu->forced_on_features[w] |= mask; + + if (!verbose_prefix) { + return; + } + + for (i = 0; i < 64; ++i) { + if ((1ULL << i) & mask) { + g_autofree char *feat_word_str = feature_word_description(f); warn_report("%s: %s%s%s [bit %d]", verbose_prefix, feat_word_str, @@ -5433,13 +6857,10 @@ static void x86_cpuid_version_get_family(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; - value = (env->cpuid_version >> 8) & 0xf; - if (value == 0xf) { - value += (env->cpuid_version >> 20) & 0xff; - } - visit_type_int(v, name, &value, errp); + value = x86_cpu_family(env->cpuid_version); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_family(Object *obj, Visitor *v, @@ -5448,16 +6869,15 @@ static void x86_cpuid_version_set_family(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff + 0xf; - int64_t value; + const uint64_t max = 0xff + 0xf; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5475,11 +6895,10 @@ static void x86_cpuid_version_get_model(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; - value = (env->cpuid_version >> 4) & 0xf; - value |= ((env->cpuid_version >> 16) & 0xf) << 4; - visit_type_int(v, name, &value, errp); + value = x86_cpu_model(env->cpuid_version); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_model(Object *obj, Visitor *v, @@ -5488,16 +6907,15 @@ static void x86_cpuid_version_set_model(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff; - int64_t value; + const uint64_t max = 0xff; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5511,10 +6929,10 @@ static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; - value = env->cpuid_version & 0xf; - visit_type_int(v, name, &value, errp); + value = x86_cpu_stepping(env->cpuid_version); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, @@ -5523,16 +6941,15 @@ static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xf; - int64_t value; + const uint64_t max = 0xf; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5582,11 +6999,11 @@ static char *x86_cpuid_get_model_id(Object *obj, Error **errp) char *value; int i; - value = g_malloc(48 + 1); - for (i = 0; i < 48; i++) { + value = g_malloc(CPUID_MODEL_ID_SZ + 1); + for (i = 0; i < CPUID_MODEL_ID_SZ; i++) { value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); } - value[48] = '\0'; + value[CPUID_MODEL_ID_SZ] = '\0'; return value; } @@ -5601,7 +7018,7 @@ static void x86_cpuid_set_model_id(Object *obj, const char *model_id, model_id = ""; } len = strlen(model_id); - memset(env->cpuid_model, 0, 48); + memset(env->cpuid_model, 0, CPUID_MODEL_ID_SZ); for (i = 0; i < 48; i++) { if (i >= len) { c = '\0'; @@ -5626,16 +7043,15 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { X86CPU *cpu = X86_CPU(obj); - const int64_t min = 0; const int64_t max = INT64_MAX; int64_t value; if (!visit_type_int(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value < 0 || value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRId64, + name ? name : "null", max); return; } @@ -5814,7 +7230,7 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features, } } -static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); +static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose); /* Build a list with the name of all features on a feature word array */ static void x86_cpu_list_feature_names(FeatureWordArray features, @@ -5865,7 +7281,7 @@ static void listflags(GList *features) } /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ -static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) +static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d) { ObjectClass *class_a = (ObjectClass *)a; ObjectClass *class_b = (ObjectClass *)b; @@ -5886,7 +7302,7 @@ static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) static GSList *get_sorted_cpu_model_list(void) { GSList *list = object_class_get_list(TYPE_X86_CPU, false); - list = g_slist_sort(list, x86_cpu_list_compare); + list = g_slist_sort_with_data(list, x86_cpu_list_compare, NULL); return list; } @@ -5932,7 +7348,7 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); } if (!desc) { - desc = g_strdup_printf("%s", model_id); + desc = g_strdup(model_id); } if (cc->model && cc->model->cpudef->deprecation_note) { @@ -5943,8 +7359,13 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) qemu_printf(" %-20s %s\n", name, desc); } +static gint strcmp_wrap(gconstpointer a, gconstpointer b, gpointer d) +{ + return strcmp(a, b); +} + /* list available CPU models and flags */ -void x86_cpu_list(void) +static void x86_cpu_list(void) { int i, j; GSList *list; @@ -5965,7 +7386,7 @@ void x86_cpu_list(void) } } - names = g_list_sort(names, (GCompareFunc)strcmp); + names = g_list_sort_with_data(names, strcmp_wrap, NULL); qemu_printf("\nRecognized CPUID flags:\n"); listflags(names); @@ -6124,7 +7545,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) r &= ~unavail; if (cpu && cpu->migratable) { - r &= x86_cpu_get_migratable_flags(w); + r &= x86_cpu_get_migratable_flags(cpu, w); } return r; } @@ -6202,7 +7623,7 @@ void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ -static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) +static void x86_cpu_apply_version_props(X86CPU *cpu, const X86CPUModel *model) { const X86CPUVersionDefinition *vdef; X86CPUVersion version = x86_cpu_model_resolve_version(model); @@ -6231,7 +7652,7 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) } static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, - X86CPUModel *model) + const X86CPUModel *model) { const X86CPUVersionDefinition *vdef; X86CPUVersion version = x86_cpu_model_resolve_version(model); @@ -6259,7 +7680,7 @@ static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, * Load data from X86CPUDefinition into a X86CPU object. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ -static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) +static void x86_cpu_load_model(X86CPU *cpu, const X86CPUModel *model) { const X86CPUDefinition *def = model->cpudef; CPUX86State *env = &cpu->env; @@ -6305,6 +7726,9 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) */ object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort); + object_property_set_uint(OBJECT(cpu), "avx10-version", def->avx10_version, + &error_abort); + x86_cpu_apply_version_props(cpu, model); /* @@ -6324,9 +7748,9 @@ static const gchar *x86_gdb_arch_name(CPUState *cs) #endif } -static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) +static void x86_cpu_cpudef_class_init(ObjectClass *oc, const void *data) { - X86CPUModel *model = data; + const X86CPUModel *model = data; X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -6345,7 +7769,7 @@ static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) .class_data = model, }; - type_register(&ti); + type_register_static(&ti); } @@ -6413,18 +7837,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, CPUState *cs = env_cpu(env); uint32_t limit; uint32_t signature[3]; - X86CPUTopoInfo topo_info; - uint32_t cores_per_pkg; + X86CPUTopoInfo *topo_info = &env->topo_info; uint32_t threads_per_pkg; - topo_info.dies_per_pkg = env->nr_dies; - topo_info.modules_per_die = env->nr_modules; - topo_info.cores_per_module = cs->nr_cores / env->nr_dies / env->nr_modules; - topo_info.threads_per_core = cs->nr_threads; - - cores_per_pkg = topo_info.cores_per_module * topo_info.modules_per_die * - topo_info.dies_per_pkg; - threads_per_pkg = cores_per_pkg * topo_info.threads_per_core; + threads_per_pkg = x86_threads_per_pkg(topo_info); /* Calculate & apply limits for different index ranges */ if (index >= 0xC0000000) { @@ -6462,15 +7878,34 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } *edx = env->features[FEAT_1_EDX]; if (threads_per_pkg > 1) { - *ebx |= threads_per_pkg << 16; - *edx |= CPUID_HT; - } - if (!cpu->enable_pmu) { - *ecx &= ~CPUID_EXT_PDCM; + uint32_t num; + + /* + * For CPUID.01H.EBX[Bits 23-16], AMD requires logical processor + * count, but Intel needs maximum number of addressable IDs for + * logical processors per package. + */ + if ((IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { + num = 1 << apicid_pkg_offset(topo_info); + } else { + num = threads_per_pkg; + } + + /* Fixup overflow: max value for bits 23-16 is 255. */ + *ebx |= MIN(num, 255) << 16; } break; - case 2: - /* cache info: needed for Pentium Pro compatibility */ + case 2: { /* cache info: needed for Pentium Pro compatibility */ + const CPUCaches *caches; + + if (env->enable_legacy_cpuid2_cache) { + caches = &legacy_intel_cpuid2_cache_info; + } else if (env->enable_legacy_vendor_cache) { + caches = &legacy_intel_cache_info; + } else { + caches = &env->cache_info; + } + if (cpu->cache_info_passthrough) { x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; @@ -6478,18 +7913,18 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *eax = *ebx = *ecx = *edx = 0; break; } - *eax = 1; /* Number of CPUID[EAX=2] calls required */ - *ebx = 0; - if (!cpu->enable_l3_cache) { - *ecx = 0; + encode_cache_cpuid2(cpu, caches, eax, ebx, ecx, edx); + break; + } + case 4: { + const CPUCaches *caches; + + if (env->enable_legacy_vendor_cache) { + caches = &legacy_intel_cache_info; } else { - *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); + caches = &env->cache_info; } - *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | - (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | - (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); - break; - case 4: + /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx); @@ -6501,13 +7936,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14); *eax &= ~0xFC000000; - *eax |= max_core_ids_in_package(&topo_info) << 26; + *eax |= MIN(max_core_ids_in_package(topo_info), 63) << 26; if (host_vcpus_per_cache > threads_per_pkg) { *eax &= ~0x3FFC000; /* Share the cache at package level. */ - *eax |= max_thread_ids_for_cache(&topo_info, - CPU_TOPO_LEVEL_PACKAGE) << 14; + *eax |= MIN(max_thread_ids_for_cache(topo_info, + CPU_TOPOLOGY_LEVEL_SOCKET), 4095) << 14; } } } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { @@ -6517,30 +7952,26 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, switch (count) { case 0: /* L1 dcache info */ - encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, - &topo_info, + encode_cache_cpuid4(caches->l1d_cache, topo_info, eax, ebx, ecx, edx); if (!cpu->l1_cache_per_core) { *eax &= ~MAKE_64BIT_MASK(14, 12); } break; case 1: /* L1 icache info */ - encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, - &topo_info, + encode_cache_cpuid4(caches->l1i_cache, topo_info, eax, ebx, ecx, edx); if (!cpu->l1_cache_per_core) { *eax &= ~MAKE_64BIT_MASK(14, 12); } break; case 2: /* L2 cache info */ - encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, - &topo_info, + encode_cache_cpuid4(caches->l2_cache, topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ if (cpu->enable_l3_cache) { - encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, - &topo_info, + encode_cache_cpuid4(caches->l3_cache, topo_info, eax, ebx, ecx, edx); break; } @@ -6551,6 +7982,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } break; + } case 5: /* MONITOR/MWAIT Leaf */ *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ @@ -6578,9 +8010,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; + *ecx = env->features[FEAT_7_1_ECX]; *edx = env->features[FEAT_7_1_EDX]; *ebx = 0; - *ecx = 0; } else if (count == 2) { *edx = env->features[FEAT_7_2_EDX]; *eax = 0; @@ -6623,12 +8055,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, switch (count) { case 0: - *eax = apicid_core_offset(&topo_info); - *ebx = topo_info.threads_per_core; + *eax = apicid_core_offset(topo_info); + *ebx = topo_info->threads_per_core; *ecx |= CPUID_B_ECX_TOPO_LEVEL_SMT << 8; break; case 1: - *eax = apicid_pkg_offset(&topo_info); + *eax = apicid_pkg_offset(topo_info); *ebx = threads_per_pkg; *ecx |= CPUID_B_ECX_TOPO_LEVEL_CORE << 8; break; @@ -6641,21 +8073,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, assert(!(*eax & ~0x1f)); *ebx &= 0xffff; /* The count doesn't need to be reliable. */ break; - case 0x1C: - if (cpu->enable_pmu && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { - x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx); - *edx = 0; - } - break; - case 0x1F: - /* V2 Extended Topology Enumeration Leaf */ - if (!x86_has_extended_topo(env->avail_cpu_topo)) { - *eax = *ebx = *ecx = *edx = 0; - break; - } - - encode_topo_cpuid1f(env, count, &topo_info, eax, ebx, ecx, edx); - break; case 0xD: { /* Processor Extended State */ *eax = 0; @@ -6796,6 +8213,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x1C: + if (cpu->enable_pmu && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { + x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx); + *edx = 0; + } + break; case 0x1D: { /* AMX TILE, for now hardcoded for Sapphire Rapids*/ *eax = 0; @@ -6833,6 +8256,25 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x1F: + /* V2 Extended Topology Enumeration Leaf */ + if (!x86_has_cpuid_0x1f(cpu)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + + encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx); + break; + case 0x24: { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && count == 0) { + *ebx = env->features[FEAT_24_0_EBX] | env->avx10_version; + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff @@ -6859,9 +8301,15 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0x80000000: *eax = env->cpuid_xlevel; - *ebx = env->cpuid_vendor1; - *edx = env->cpuid_vendor2; - *ecx = env->cpuid_vendor3; + + if (cpu->vendor_cpuid_only_v2 && + (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { + *ebx = *ecx = *edx = 0; + } else { + *ebx = env->cpuid_vendor1; + *edx = env->cpuid_vendor2; + *ecx = env->cpuid_vendor3; + } break; case 0x80000001: *eax = env->cpuid_version; @@ -6869,18 +8317,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx = env->features[FEAT_8000_0001_ECX]; *edx = env->features[FEAT_8000_0001_EDX]; - /* The Linux kernel checks for the CMPLegacy bit and - * discards multiple thread information if it is set. - * So don't set it here for Intel to make Linux guests happy. - */ - if (threads_per_pkg > 1) { - if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || - env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || - env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { - *ecx |= 1 << 1; /* CmpLegacy bit */ - } - } - if (tcg_enabled() && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && + if (tcg_enabled() && IS_INTEL_CPU(env) && !(env->hflags & HF_LMA_MASK)) { *edx &= ~CPUID_EXT2_SYSCALL; } @@ -6893,41 +8330,78 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; break; - case 0x80000005: - /* cache info (L1 cache) */ + case 0x80000005: { + /* cache info (L1 cache/TLB Associativity Field) */ + const CPUCaches *caches; + + if (env->enable_legacy_vendor_cache) { + caches = &legacy_amd_cache_info; + } else { + caches = &env->cache_info; + } + if (cpu->cache_info_passthrough) { x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; } + + if (cpu->vendor_cpuid_only_v2 && IS_INTEL_CPU(env)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); - *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); - *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); + *ecx = encode_cache_cpuid80000005(caches->l1d_cache); + *edx = encode_cache_cpuid80000005(caches->l1i_cache); break; - case 0x80000006: - /* cache info (L2 cache) */ + } + case 0x80000006: { /* cache info (L2 cache/TLB/L3 cache) */ + const CPUCaches *caches; + + if (env->enable_legacy_vendor_cache) { + caches = &legacy_amd_cache_info; + } else { + caches = &env->cache_info; + } + if (cpu->cache_info_passthrough) { x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx); break; } - *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | + + if (cpu->vendor_cpuid_only_v2 && + (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { + *eax = *ebx = 0; + encode_cache_cpuid80000006(caches->l2_cache, + NULL, ecx, edx); + break; + } + + *eax = (X86_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | (L2_DTLB_2M_ENTRIES << 16) | - (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | + (X86_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | (L2_ITLB_2M_ENTRIES); - *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | + *ebx = (X86_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | (L2_DTLB_4K_ENTRIES << 16) | - (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | + (X86_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | (L2_ITLB_4K_ENTRIES); - encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, + + encode_cache_cpuid80000006(caches->l2_cache, cpu->enable_l3_cache ? - env->cache_info_amd.l3_cache : NULL, + caches->l3_cache : NULL, ecx, edx); break; + } case 0x80000007: *eax = 0; - *ebx = env->features[FEAT_8000_0007_EBX]; + if (cpu->vendor_cpuid_only_v2 && IS_INTEL_CPU(env)) { + *ebx = 0; + } else { + *ebx = env->features[FEAT_8000_0007_EBX]; + } *ecx = 0; *edx = env->features[FEAT_8000_0007_EDX]; break; @@ -6940,6 +8414,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *eax |= (cpu->guest_phys_bits << 16); } *ebx = env->features[FEAT_8000_0008_EBX]; + + /* + * Don't emulate Bits [7:0] & Bits [15:12] for Intel/Zhaoxin, since + * they're using 0x1f leaf. + */ + if (cpu->vendor_cpuid_only_v2 && + (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { + *ecx = *edx = 0; + break; + } + if (threads_per_pkg > 1) { /* * Bits 15:12 is "The number of bits in the initial @@ -6947,7 +8432,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, * thread ID within a package". * Bits 7:0 is "The number of threads in the package is NC+1" */ - *ecx = (apicid_pkg_offset(&topo_info) << 12) | + *ecx = (apicid_pkg_offset(topo_info) << 12) | (threads_per_pkg - 1); } else { *ecx = 0; @@ -6975,20 +8460,20 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } switch (count) { case 0: /* L1 dcache info */ - encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, - &topo_info, eax, ebx, ecx, edx); + encode_cache_cpuid8000001d(env->cache_info.l1d_cache, + topo_info, eax, ebx, ecx, edx); break; case 1: /* L1 icache info */ - encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, - &topo_info, eax, ebx, ecx, edx); + encode_cache_cpuid8000001d(env->cache_info.l1i_cache, + topo_info, eax, ebx, ecx, edx); break; case 2: /* L2 cache info */ - encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, - &topo_info, eax, ebx, ecx, edx); + encode_cache_cpuid8000001d(env->cache_info.l2_cache, + topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ - encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, - &topo_info, eax, ebx, ecx, edx); + encode_cache_cpuid8000001d(env->cache_info.l3_cache, + topo_info, eax, ebx, ecx, edx); break; default: /* end of info */ *eax = *ebx = *ecx = *edx = 0; @@ -7000,7 +8485,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0x8000001E: if (cpu->core_id <= 255) { - encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); + encode_topo_cpuid8000001e(cpu, topo_info, eax, ebx, ecx, edx); } else { *eax = 0; *ebx = 0; @@ -7008,6 +8493,31 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = 0; } break; + case 0x8000001F: + *eax = *ebx = *ecx = *edx = 0; + if (sev_enabled()) { + *eax = 0x2; + *eax |= sev_es_enabled() ? 0x8 : 0; + *eax |= sev_snp_enabled() ? 0x10 : 0; + *ebx = sev_get_cbit_position() & 0x3f; /* EBX[5:0] */ + *ebx |= (sev_get_reduced_phys_bits() & 0x3f) << 6; /* EBX[11:6] */ + } + break; + case 0x80000021: + *eax = *ebx = *ecx = *edx = 0; + *eax = env->features[FEAT_8000_0021_EAX]; + *ebx = env->features[FEAT_8000_0021_EBX]; + break; + case 0x80000022: + *eax = *ebx = *ecx = *edx = 0; + /* AMD Extended Performance Monitoring and Debug */ + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_8000_0022_EAX] & CPUID_8000_0022_EAX_PERFMON_V2)) { + *eax |= CPUID_8000_0022_EAX_PERFMON_V2; + *ebx |= kvm_arch_get_supported_cpuid(cs->kvm_state, index, count, + R_EBX) & 0xf; + } + break; case 0xC0000000: *eax = env->cpuid_xlevel2; *ebx = 0; @@ -7030,20 +8540,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx = 0; *edx = 0; break; - case 0x8000001F: - *eax = *ebx = *ecx = *edx = 0; - if (sev_enabled()) { - *eax = 0x2; - *eax |= sev_es_enabled() ? 0x8 : 0; - *eax |= sev_snp_enabled() ? 0x10 : 0; - *ebx = sev_get_cbit_position() & 0x3f; /* EBX[5:0] */ - *ebx |= (sev_get_reduced_phys_bits() & 0x3f) << 6; /* EBX[11:6] */ - } - break; - case 0x80000021: - *eax = env->features[FEAT_8000_0021_EAX]; - *ebx = *ecx = *edx = 0; - break; default: /* reserved values: zero */ *eax = 0; @@ -7065,6 +8561,23 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) #endif } +static bool cpuid_has_xsave_feature(CPUX86State *env, const ExtSaveArea *esa) +{ + if (!esa->size) { + return false; + } + + if (env->features[esa->feature] & esa->bits) { + return true; + } + if (esa->feature == FEAT_7_0_EBX && esa->bits == CPUID_7_0_EBX_AVX512F + && (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) { + return true; + } + + return false; +} + static void x86_cpu_reset_hold(Object *obj, ResetType type) { CPUState *cs = CPU(obj); @@ -7081,6 +8594,10 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) memset(env, 0, offsetof(CPUX86State, end_reset_fields)); + if (tcg_enabled()) { + cpu_init_fp_statuses(env); + } + env->old_exception = -1; /* init to reset state */ @@ -7173,7 +8690,7 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) { continue; } - if (env->features[esa->feature] & esa->bits) { + if (cpuid_has_xsave_feature(env, esa)) { xcr0 |= 1ull << i; } } @@ -7242,7 +8759,7 @@ static void mce_init(X86CPU *cpu) CPUX86State *cenv = &cpu->env; unsigned int bank; - if (((cenv->cpuid_version >> 8) & 0xf) >= 6 + if (x86_cpu_family(cenv->cpuid_version) >= 6 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == (CPUID_MCE | CPUID_MCA)) { cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | @@ -7311,7 +8828,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) mask = 0; for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; - if (env->features[esa->feature] & esa->bits) { + if (cpuid_has_xsave_feature(env, esa)) { mask |= (1ULL << i); } } @@ -7370,6 +8887,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) */ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) { + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); CPUX86State *env = &cpu->env; FeatureWord w; int i; @@ -7389,12 +8907,12 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) } } - /*TODO: Now cpu->max_features doesn't overwrite features + /* TODO: Now xcc->max_features doesn't overwrite features * set using QOM properties, and we can convert * plus_features & minus_features to global properties * inside x86_cpu_parse_featurestr() too. */ - if (cpu->max_features) { + if (xcc->max_features) { for (w = 0; w < FEATURE_WORDS; w++) { /* Override only features that weren't set explicitly * by the user. @@ -7404,6 +8922,31 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) ~env->user_features[w] & ~feature_word_info[w].no_autoenable_flags; } + + if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && !env->avx10_version) { + uint32_t eax, ebx, ecx, edx; + x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx); + env->avx10_version = ebx & 0xff; + } + } + + if (x86_threads_per_pkg(&env->topo_info) > 1) { + env->features[FEAT_1_EDX] |= CPUID_HT; + + /* + * The Linux kernel checks for the CMPLegacy bit and + * discards multiple thread information if it is set. + * So don't set it here for Intel (and other processors + * following Intel's behavior) to make Linux guests happy. + */ + if (!IS_INTEL_CPU(env) && !IS_ZHAOXIN_CPU(env)) { + env->features[FEAT_8000_0001_ECX] |= CPUID_EXT3_CMP_LEG; + } + } + + /* PDCM is fixed1 bit for TDX */ + if (!cpu->enable_pmu && !is_tdx_vm()) { + env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; } for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { @@ -7434,6 +8977,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); + x86_cpu_adjust_feat_level(cpu, FEAT_7_1_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); @@ -7462,11 +9006,16 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) * cpu->vendor_cpuid_only has been unset for compatibility with older * machine types. */ - if (x86_has_extended_topo(env->avail_cpu_topo) && + if (x86_has_cpuid_0x1f(cpu) && (IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); } + /* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */ + if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24); + } + /* SVM requires CPUID[0x8000000A] */ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); @@ -7510,13 +9059,17 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) * Finishes initialization of CPUID data, filters CPU feature * words based on host availability of each feature. * - * Returns: 0 if all flags are supported by the host, non-zero otherwise. + * Returns: true if any flag is not supported by the host, false otherwise. */ -static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) +static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose) { CPUX86State *env = &cpu->env; FeatureWord w; const char *prefix = NULL; + bool have_filtered_features; + + uint32_t eax_0, ebx_0, ecx_0, edx_0; + uint32_t eax_1, ebx_1, ecx_1, edx_1; if (verbose) { prefix = accel_uses_host_cpuid() @@ -7538,13 +9091,10 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) */ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && kvm_enabled()) { - uint32_t eax_0, ebx_0, ecx_0, edx_0_unused; - uint32_t eax_1, ebx_1, ecx_1_unused, edx_1_unused; - x86_cpu_get_supported_cpuid(0x14, 0, - &eax_0, &ebx_0, &ecx_0, &edx_0_unused); + &eax_0, &ebx_0, &ecx_0, &edx_0); x86_cpu_get_supported_cpuid(0x14, 1, - &eax_1, &ebx_1, &ecx_1_unused, &edx_1_unused); + &eax_1, &ebx_1, &ecx_1, &edx_1); if (!eax_0 || ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || @@ -7564,6 +9114,30 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); } } + + have_filtered_features = x86_cpu_have_filtered_features(cpu); + + if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) { + x86_cpu_get_supported_cpuid(0x24, 0, + &eax_0, &ebx_0, &ecx_0, &edx_0); + uint8_t version = ebx_0 & 0xff; + + if (version < env->avx10_version) { + if (prefix) { + warn_report("%s: avx10.%d. Adjust to avx10.%d", + prefix, env->avx10_version, version); + } + env->avx10_version = version; + have_filtered_features = true; + } + } else if (env->avx10_version) { + if (prefix) { + warn_report("%s: avx10.%d.", prefix, env->avx10_version); + } + have_filtered_features = true; + } + + return have_filtered_features; } static void x86_cpu_hyperv_realize(X86CPU *cpu) @@ -7595,6 +9169,52 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu) cpu->hyperv_limits[2] = 0; } +#ifndef CONFIG_USER_ONLY +static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu, + Error **errp) +{ + CPUX86State *env = &cpu->env; + CpuTopologyLevel level; + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info.l1d_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D, + env->cache_info.l1d_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info.l1i_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I, + env->cache_info.l1i_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info.l2_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2, + env->cache_info.l2_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info.l3_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3, + env->cache_info.l3_cache->share_level); + } + + if (!machine_check_smp_cache(ms, errp)) { + return false; + } + return true; +} +#endif + static void x86_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -7609,6 +9229,16 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) tcg_cflags_set(cs, CF_PCREL); #endif + /* + * x-vendor-cpuid-only and v2 should be initernal only. But + * QEMU doesn't support "internal" property. + */ + if (!cpu->vendor_cpuid_only && cpu->vendor_cpuid_only_v2) { + error_setg(errp, "x-vendor-cpuid-only-v2 property " + "depends on x-vendor-cpuid-only"); + return; + } + if (cpu->apic_id == UNASSIGNED_APIC_ID) { error_setg(errp, "apic-id property was not initialized properly"); return; @@ -7661,14 +9291,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) } } - x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); - - if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { - error_setg(&local_err, - accel_uses_host_cpuid() ? + if (x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid)) { + if (cpu->enforce_cpuid) { + error_setg(&local_err, + accel_uses_host_cpuid() ? "Host doesn't support requested features" : "TCG doesn't support requested features"); - goto out; + goto out; + } } /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on @@ -7730,6 +9360,21 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) */ cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; + /* + * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU + * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX + * based on inputs (sockets,cores,threads), it is still better to give + * users a warning. + */ + if (IS_AMD_CPU(env) && + !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && + env->topo_info.threads_per_core > 1) { + warn_report_once("This family of AMD CPU doesn't support " + "hyperthreading(%d). Please configure -smp " + "options properly or try enabling topoext " + "feature.", env->topo_info.threads_per_core); + } + /* For 64bit systems think about the number of physical bits to present. * ideally this should be the same as the host; anything other than matching * the host can cause incorrect guest behaviour. @@ -7797,28 +9442,34 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) "CPU model '%s' doesn't support legacy-cache=off", name); return; } - env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = - *cache_info; + env->cache_info = *cache_info; } else { /* Build legacy cache information */ - env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; - env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; - env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; - env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; + if (!cpu->consistent_cache) { + env->enable_legacy_cpuid2_cache = true; + } - env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; - env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; - env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; - env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; + if (!cpu->vendor_cpuid_only_v2) { + env->enable_legacy_vendor_cache = true; + } - env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; - env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; - env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; - env->cache_info_amd.l3_cache = &legacy_l3_cache; + if (IS_AMD_CPU(env)) { + env->cache_info = legacy_amd_cache_info; + } else { + env->cache_info = legacy_intel_cache_info; + } } #ifndef CONFIG_USER_ONLY MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (mc->smp_props.has_caches) { + if (!x86_cpu_update_smp_cache_topo(ms, cpu, errp)) { + return; + } + } + qemu_register_reset(x86_cpu_machine_reset_cb, cpu); if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { @@ -7831,26 +9482,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) mce_init(cpu); + x86_cpu_gdb_init(cs); qemu_init_vcpu(cs); - /* - * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU - * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX - * based on inputs (sockets,cores,threads), it is still better to give - * users a warning. - * - * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise - * cs->nr_threads hasn't be populated yet and the checking is incorrect. - */ - if (IS_AMD_CPU(env) && - !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && - cs->nr_threads > 1) { - warn_report_once("This family of AMD CPU doesn't support " - "hyperthreading(%d). Please configure -smp " - "options properly or try enabling topoext " - "feature.", cs->nr_threads); - } - #ifndef CONFIG_USER_ONLY x86_cpu_apic_realize(cpu, &local_err); if (local_err != NULL) { @@ -7982,20 +9616,47 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, static void x86_cpu_post_initfn(Object *obj) { - accel_cpu_instance_init(CPU(obj)); +#ifndef CONFIG_USER_ONLY + if (current_machine && current_machine->cgs) { + x86_confidential_guest_cpu_instance_init( + X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj))); + } +#endif +} + +static void x86_cpu_init_xsave(void) +{ + static bool first = true; + uint64_t supported_xcr0; + int i; + + if (first) { + first = false; + + supported_xcr0 = + ((uint64_t) x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32) | + x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_LO); + + for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { + ExtSaveArea *esa = &x86_ext_save_areas[i]; + + if (!(supported_xcr0 & (1 << i))) { + esa->size = 0; + } + } + } } static void x86_cpu_init_default_topo(X86CPU *cpu) { CPUX86State *env = &cpu->env; - env->nr_modules = 1; - env->nr_dies = 1; + env->topo_info = (X86CPUTopoInfo) {1, 1, 1, 1}; - /* SMT, core and package levels are set by default. */ - set_bit(CPU_TOPO_LEVEL_SMT, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_CORE, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_PACKAGE, env->avail_cpu_topo); + /* thread, core and socket levels are set by default. */ + set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_SOCKET, env->avail_cpu_topo); } static void x86_cpu_initfn(Object *obj) @@ -8052,6 +9713,13 @@ static void x86_cpu_initfn(Object *obj) if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); } + + /* + * accel's cpu_instance_init may have the xsave check, + * so x86_ext_save_areas[] must be initialized before this. + */ + x86_cpu_init_xsave(); + accel_cpu_instance_init(CPU(obj)); } static int64_t x86_cpu_get_arch_id(CPUState *cs) @@ -8085,16 +9753,15 @@ static vaddr x86_cpu_get_pc(CPUState *cs) return cpu->env.eip + cpu->env.segs[R_CS].base; } +#if !defined(CONFIG_USER_ONLY) int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; -#if !defined(CONFIG_USER_ONLY) if (interrupt_request & CPU_INTERRUPT_POLL) { return CPU_INTERRUPT_POLL; } -#endif if (interrupt_request & CPU_INTERRUPT_SIPI) { return CPU_INTERRUPT_SIPI; } @@ -8115,14 +9782,12 @@ int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { return CPU_INTERRUPT_HARD; -#if !defined(CONFIG_USER_ONLY) } else if (env->hflags2 & HF2_VGIF_MASK) { if((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { return CPU_INTERRUPT_VIRQ; } -#endif } } @@ -8133,45 +9798,14 @@ static bool x86_cpu_has_work(CPUState *cs) { return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; } - -int x86_mmu_index_pl(CPUX86State *env, unsigned pl) -{ - int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1; - int mmu_index_base = - pl == 3 ? MMU_USER64_IDX : - !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : - (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; - - return mmu_index_base + mmu_index_32; -} - -static int x86_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - CPUX86State *env = cpu_env(cs); - return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK); -} - -static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) -{ - int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; - int mmu_index_base = - !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : - (pl < 3 && (env->eflags & AC_MASK) - ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); - - return mmu_index_base + mmu_index_32; -} - -int cpu_mmu_index_kernel(CPUX86State *env) -{ - return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); -} +#endif /* !CONFIG_USER_ONLY */ static void x86_disas_set_info(CPUState *cs, disassemble_info *info) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; + info->endian = BFD_ENDIAN_LITTLE; info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 : bfd_mach_i386_i8086); @@ -8226,7 +9860,7 @@ void x86_update_hflags(CPUX86State *env) env->hflags = hflags; } -static Property x86_cpu_properties[] = { +static const Property x86_cpu_properties[] = { #ifdef CONFIG_USER_ONLY /* apic_id = 0 by default for *-user, see commit 9886e834 */ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), @@ -8291,8 +9925,10 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_TLBFLUSH_DIRECT, 0), DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), +#ifdef CONFIG_SYNDBG DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features, HYPERV_FEAT_SYNDBG, 0), +#endif DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false), @@ -8324,11 +9960,13 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), + DEFINE_PROP_UINT8("avx10-version", X86CPU, env.avx10_version, 0), DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), DEFINE_PROP_BOOL("x-vendor-cpuid-only", X86CPU, vendor_cpuid_only, true), + DEFINE_PROP_BOOL("x-vendor-cpuid-only-v2", X86CPU, vendor_cpuid_only_v2, true), DEFINE_PROP_BOOL("x-amd-topoext-features-only", X86CPU, amd_topoext_features_only, true), DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), @@ -8343,6 +9981,7 @@ static Property x86_cpu_properties[] = { * own cache information (see x86_cpu_load_def()). */ DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), + DEFINE_PROP_BOOL("x-consistent-cache", X86CPU, consistent_cache, true), DEFINE_PROP_BOOL("legacy-multi-node", X86CPU, legacy_multi_node, false), DEFINE_PROP_BOOL("xen-vapic", X86CPU, xen_vapic, false), @@ -8364,13 +10003,14 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, true), DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true), - DEFINE_PROP_END_OF_LIST() + DEFINE_PROP_BOOL("x-force-cpuid-0x1f", X86CPU, force_cpuid_0x1f, false), }; #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps i386_sysemu_ops = { + .has_work = x86_cpu_has_work, .get_memory_mapping = x86_cpu_get_memory_mapping, .get_paging_enabled = x86_cpu_get_paging_enabled, .get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug, @@ -8384,7 +10024,7 @@ static const struct SysemuCPUOps i386_sysemu_ops = { }; #endif -static void x86_cpu_common_class_init(ObjectClass *oc, void *data) +static void x86_cpu_common_class_init(ObjectClass *oc, const void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -8403,9 +10043,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; cc->class_by_name = x86_cpu_class_by_name; + cc->list_cpus = x86_cpu_list; cc->parse_features = x86_cpu_parse_featurestr; - cc->has_work = x86_cpu_has_work; - cc->mmu_index = x86_cpu_mmu_index; cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; cc->get_pc = x86_cpu_get_pc; @@ -8416,6 +10055,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &i386_sysemu_ops; #endif /* !CONFIG_USER_ONLY */ +#ifdef CONFIG_TCG + cc->tcg_ops = &x86_tcg_ops; +#endif /* CONFIG_TCG */ cc->gdb_arch_name = x86_gdb_arch_name; #ifdef TARGET_X86_64 @@ -8482,7 +10124,7 @@ static const TypeInfo x86_cpu_type_info = { }; /* "base" CPU model, used by query-cpu-model-expansion */ -static void x86_cpu_base_class_init(ObjectClass *oc, void *data) +static void x86_cpu_base_class_init(ObjectClass *oc, const void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); diff --git a/target/i386/cpu.h b/target/i386/cpu.h index c6cc035df3d..f977fc49a77 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -20,23 +20,21 @@ #ifndef I386_CPU_H #define I386_CPU_H -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "cpu-qom.h" #include "kvm/hyperv-proto.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" +#include "exec/memop.h" #include "hw/i386/topology.h" #include "qapi/qapi-types-common.h" #include "qemu/cpu-float.h" #include "qemu/timer.h" +#include "standard-headers/asm-x86/kvm_para.h" #define XEN_NR_VIRQS 24 -#define KVM_HAVE_MCE_INJECTION 1 - -/* support for self modifying code even if the modified instruction is - close to the modifying instruction */ -#define TARGET_HAS_PRECISE_SMC - #ifdef TARGET_X86_64 #define I386_ELF_MACHINE EM_X86_64 #define ELF_MACHINE_UNAME "x86_64" @@ -267,12 +265,6 @@ typedef enum X86Seg { #define CR4_FRED_MASK 0 #endif -#ifdef TARGET_X86_64 -#define CR4_FRED_MASK (1ULL << 32) -#else -#define CR4_FRED_MASK 0 -#endif - #define CR4_RESERVED_MASK \ (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ @@ -351,6 +343,7 @@ typedef enum X86Seg { #define PG_MODE_PKE (1 << 17) #define PG_MODE_PKS (1 << 18) #define PG_MODE_SMEP (1 << 19) +#define PG_MODE_PG (1 << 20) #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ @@ -539,6 +532,8 @@ typedef enum X86Seg { #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL +#define MSR_K7_HWCR 0xc0010015 + #define MSR_VM_HSAVE_PA 0xc0010117 #define MSR_IA32_XFD 0x000001c4 @@ -589,6 +584,7 @@ typedef enum X86Seg { #define XSTATE_OPMASK_BIT 5 #define XSTATE_ZMM_Hi256_BIT 6 #define XSTATE_Hi16_ZMM_BIT 7 +#define XSTATE_PT_BIT 8 #define XSTATE_PKRU_BIT 9 #define XSTATE_ARCH_LBR_BIT 15 #define XSTATE_XTILE_CFG_BIT 17 @@ -602,6 +598,7 @@ typedef enum X86Seg { #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) +#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT) #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) @@ -624,6 +621,11 @@ typedef enum X86Seg { XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \ XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK) +/* CPUID feature bits available in XSS */ +#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK) + +#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK) + /* CPUID feature words */ typedef enum FeatureWord { FEAT_1_EDX, /* CPUID[1].EDX */ @@ -638,6 +640,8 @@ typedef enum FeatureWord { FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ + FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */ + FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ @@ -664,11 +668,22 @@ typedef enum FeatureWord { FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ + FEAT_7_1_ECX, /* CPUID[EAX=7,ECX=1].ECX */ FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */ + FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */ FEATURE_WORDS, } FeatureWord; +typedef struct FeatureMask { + FeatureWord index; + uint64_t mask; +} FeatureMask; + +typedef struct FeatureDep { + FeatureMask from, to; +} FeatureDep; + typedef uint64_t FeatureWordArray[FEATURE_WORDS]; uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); @@ -826,6 +841,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_0_EBX_HLE (1U << 4) /* Intel Advanced Vector Extensions 2 */ #define CPUID_7_0_EBX_AVX2 (1U << 5) +/* FPU data pointer updated only on x87 exceptions */ +#define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6) /* Supervisor-mode Execution Prevention */ #define CPUID_7_0_EBX_SMEP (1U << 7) /* 2nd Group of Advanced Bit Manipulation Extensions */ @@ -836,6 +853,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_0_EBX_INVPCID (1U << 10) /* Restricted Transactional Memory */ #define CPUID_7_0_EBX_RTM (1U << 11) +/* Zero out FPU CS and FPU DS */ +#define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13) /* Memory Protection Extension */ #define CPUID_7_0_EBX_MPX (1U << 14) /* AVX-512 Foundation */ @@ -897,6 +916,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_0_ECX_LA57 (1U << 16) /* Read Processor ID */ #define CPUID_7_0_ECX_RDPID (1U << 22) +/* KeyLocker */ +#define CPUID_7_0_ECX_KeyLocker (1U << 23) /* Bus Lock Debug Exception */ #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24) /* Cache Line Demote Instruction */ @@ -918,6 +939,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_0_EDX_FSRM (1U << 4) /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */ #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8) + /* "md_clear" VERW clears CPU buffers */ +#define CPUID_7_0_EDX_MD_CLEAR (1U << 10) /* SERIALIZE instruction */ #define CPUID_7_0_EDX_SERIALIZE (1U << 14) /* TSX Suspend Load Address Tracking instruction */ @@ -945,10 +968,18 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); /* Speculative Store Bypass Disable */ #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) +/* SHA512 Instruction */ +#define CPUID_7_1_EAX_SHA512 (1U << 0) +/* SM3 Instruction */ +#define CPUID_7_1_EAX_SM3 (1U << 1) +/* SM4 Instruction */ +#define CPUID_7_1_EAX_SM4 (1U << 2) /* AVX VNNI Instruction */ #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) +/* Linear address space separation */ +#define CPUID_7_1_EAX_LASS (1U << 6) /* CMPCCXADD Instructions */ #define CPUID_7_1_EAX_CMPCCXADD (1U << 7) /* Fast Zero REP MOVS */ @@ -957,6 +988,12 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_7_1_EAX_FSRS (1U << 11) /* Fast Short REP CMPS/SCAS */ #define CPUID_7_1_EAX_FSRC (1U << 12) +/* Flexible return and event delivery (FRED) */ +#define CPUID_7_1_EAX_FRED (1U << 17) +/* Load into IA32_KERNEL_GS_BASE (LKGS) */ +#define CPUID_7_1_EAX_LKGS (1U << 18) +/* Non-Serializing Write to Model Specific Register (WRMSRNS) */ +#define CPUID_7_1_EAX_WRMSRNS (1U << 19) /* Support Tile Computational Operations on FP16 Numbers */ #define CPUID_7_1_EAX_AMX_FP16 (1U << 21) /* Support for VPMADD52[H,L]UQ */ @@ -964,20 +1001,32 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); /* Linear Address Masking */ #define CPUID_7_1_EAX_LAM (1U << 26) +/* The immediate form of MSR access instructions */ +#define CPUID_7_1_ECX_MSR_IMM (1U << 5) + /* Support for VPDPB[SU,UU,SS]D[,S] */ #define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4) /* AVX NE CONVERT Instructions */ #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) /* AMX COMPLEX Instructions */ #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) +/* AVX-VNNI-INT16 Instructions */ +#define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10) /* PREFETCHIT0/1 Instructions */ #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) -/* Flexible return and event delivery (FRED) */ -#define CPUID_7_1_EAX_FRED (1U << 17) -/* Load into IA32_KERNEL_GS_BASE (LKGS) */ -#define CPUID_7_1_EAX_LKGS (1U << 18) -/* Non-Serializing Write to Model Specific Register (WRMSRNS) */ -#define CPUID_7_1_EAX_WRMSRNS (1U << 19) +/* Support for Advanced Vector Extensions 10 */ +#define CPUID_7_1_EDX_AVX10 (1U << 19) + +/* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */ +#define CPUID_7_2_EDX_PSFD (1U << 0) +/* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */ +#define CPUID_7_2_EDX_IPRED_CTRL (1U << 1) +/* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */ +#define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2) +/* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */ +#define CPUID_7_2_EDX_DDPD_U (1U << 3) +/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */ +#define CPUID_7_2_EDX_BHI_CTRL (1U << 4) /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ #define CPUID_7_2_EDX_MCDT_NO (1U << 5) @@ -988,10 +1037,43 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); /* Packets which contain IP payload have LIP values */ #define CPUID_14_0_ECX_LIP (1U << 31) +/* AVX10 128-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_128 (1U << 16) +/* AVX10 256-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_256 (1U << 17) +/* AVX10 512-bit vector support is present */ +#define CPUID_24_0_EBX_AVX10_512 (1U << 18) +/* AVX10 vector length support mask */ +#define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \ + CPUID_24_0_EBX_AVX10_256 | \ + CPUID_24_0_EBX_AVX10_512) + /* RAS Features */ #define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0) #define CPUID_8000_0007_EBX_SUCCOR (1U << 1) +/* (Old) KVM paravirtualized clocksource */ +#define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE) +/* (New) KVM specific paravirtualized clocksource */ +#define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2) +/* KVM asynchronous page fault */ +#define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF) +/* KVM stolen (when guest vCPU is not running) time accounting */ +#define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME) +/* KVM paravirtualized end-of-interrupt signaling */ +#define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI) +/* KVM paravirtualized spinlocks support */ +#define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT) +/* KVM host-side polling on HLT control from the guest */ +#define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL) +/* KVM interrupt based asynchronous page fault*/ +#define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT) +/* KVM 'Extended Destination ID' support for external interrupts */ +#define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID) + +/* Hint to KVM that vCPUs expect never preempted for an unlimited time */ +#define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME) + /* CLZERO instruction */ #define CPUID_8000_0008_EBX_CLZERO (1U << 0) /* Always save/restore FP error pointers */ @@ -1014,25 +1096,71 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) /* Processor ignores nested data breakpoints */ -#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0) +#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0) +/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */ +#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1) /* LFENCE is always serializing */ #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) /* Null Selector Clears Base */ -#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) +#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) /* Automatic IBRS */ -#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) +#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) +/* Indicates support for IC prefetch */ +#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20) +/* Enhanced Return Address Predictor Scurity */ +#define CPUID_8000_0021_EAX_ERAPS (1U << 24) +/* Selective Branch Predictor Barrier */ +#define CPUID_8000_0021_EAX_SBPB (1U << 27) +/* IBPB includes branch type prediction flushing */ +#define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28) +/* Not vulnerable to Speculative Return Stack Overflow */ +#define CPUID_8000_0021_EAX_SRSO_NO (1U << 29) +/* Not vulnerable to SRSO at the user-kernel boundary */ +#define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30) + +/* + * Return Address Predictor size. RapSize x 8 is the minimum number of + * CALL instructions software needs to execute to flush the RAP. + */ +#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16) + +/* Performance Monitoring Version 2 */ +#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0) #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) #define CPUID_XSAVE_XGETBV1 (1U << 2) #define CPUID_XSAVE_XSAVES (1U << 3) +#define CPUID_XSAVE_XFD (1U << 4) #define CPUID_6_EAX_ARAT (1U << 2) /* CPUID[0x80000007].EDX flags: */ #define CPUID_APM_INVTSC (1U << 8) -#define CPUID_VENDOR_SZ 12 +/* "rng" RNG present (xstore) */ +#define CPUID_C000_0001_EDX_XSTORE (1U << 2) +/* "rng_en" RNG enabled */ +#define CPUID_C000_0001_EDX_XSTORE_EN (1U << 3) +/* "ace" on-CPU crypto (xcrypt) */ +#define CPUID_C000_0001_EDX_XCRYPT (1U << 6) +/* "ace_en" on-CPU crypto enabled */ +#define CPUID_C000_0001_EDX_XCRYPT_EN (1U << 7) +/* Advanced Cryptography Engine v2 */ +#define CPUID_C000_0001_EDX_ACE2 (1U << 8) +/* ACE v2 enabled */ +#define CPUID_C000_0001_EDX_ACE2_EN (1U << 9) +/* PadLock Hash Engine */ +#define CPUID_C000_0001_EDX_PHE (1U << 10) +/* PHE enabled */ +#define CPUID_C000_0001_EDX_PHE_EN (1U << 11) +/* PadLock Montgomery Multiplier */ +#define CPUID_C000_0001_EDX_PMM (1U << 12) +/* PMM enabled */ +#define CPUID_C000_0001_EDX_PMM_EN (1U << 13) + +#define CPUID_VENDOR_SZ 12 +#define CPUID_MODEL_ID_SZ 48 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ @@ -1044,7 +1172,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ #define CPUID_VENDOR_AMD "AuthenticAMD" -#define CPUID_VENDOR_VIA "CentaurHauls" +#define CPUID_VENDOR_ZHAOXIN1_1 0x746E6543 /* "Cent" */ +#define CPUID_VENDOR_ZHAOXIN1_2 0x48727561 /* "aurH" */ +#define CPUID_VENDOR_ZHAOXIN1_3 0x736C7561 /* "auls" */ + +#define CPUID_VENDOR_ZHAOXIN2_1 0x68532020 /* " Sh" */ +#define CPUID_VENDOR_ZHAOXIN2_2 0x68676E61 /* "angh" */ +#define CPUID_VENDOR_ZHAOXIN2_3 0x20206961 /* "ai " */ + +#define CPUID_VENDOR_ZHAOXIN1 "CentaurHauls" +#define CPUID_VENDOR_ZHAOXIN2 " Shanghai " #define CPUID_VENDOR_HYGON "HygonGenuine" @@ -1054,6 +1191,15 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) +#define IS_ZHAOXIN1_CPU(env) \ + ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3) +#define IS_ZHAOXIN2_CPU(env) \ + ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3) +#define IS_ZHAOXIN_CPU(env) (IS_ZHAOXIN1_CPU(env) || IS_ZHAOXIN2_CPU(env)) #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ @@ -1084,7 +1230,10 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define MSR_ARCH_CAP_FBSDP_NO (1U << 14) #define MSR_ARCH_CAP_PSDP_NO (1U << 15) #define MSR_ARCH_CAP_FB_CLEAR (1U << 17) +#define MSR_ARCH_CAP_BHI_NO (1U << 20) #define MSR_ARCH_CAP_PBRSB_NO (1U << 24) +#define MSR_ARCH_CAP_GDS_NO (1U << 26) +#define MSR_ARCH_CAP_RFDS_NO (1U << 27) #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) @@ -1192,6 +1341,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 +#define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 @@ -1277,14 +1427,14 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); * are only needed for conditional branches. */ typedef enum { - CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ - CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ - CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ - CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */ - CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ - CC_OP_CLR, /* Z and P set, all other flags clear. */ - - CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */ + CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */ + CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */ + CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ + + /* Low 2 bits = MemOp constant for the size */ +#define CC_OP_FIRST_BWLQ CC_OP_MULB + CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */ CC_OP_MULW, CC_OP_MULL, CC_OP_MULQ, @@ -1339,6 +1489,11 @@ typedef enum { CC_OP_BMILGL, CC_OP_BMILGQ, + CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */ + CC_OP_BLSIW, + CC_OP_BLSIL, + CC_OP_BLSIQ, + /* * Note that only CC_OP_POPCNT (i.e. the one with MO_TL size) * is used or implemented, because the translation needs @@ -1349,10 +1504,24 @@ typedef enum { CC_OP_POPCNTL__, CC_OP_POPCNTQ__, CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__, +#define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__ - CC_OP_NB, + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ } CCOp; -QEMU_BUILD_BUG_ON(CC_OP_NB >= 128); + +/* See X86DecodedInsn.cc_op, using int8_t. */ +QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX); + +static inline MemOp cc_op_size(CCOp op) +{ + MemOp size = op & 3; + + QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3); + assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ); + assert(size <= MO_TL); + + return size; +} typedef struct SegmentCache { uint32_t selector; @@ -1470,8 +1639,6 @@ typedef struct { #define MAX_FIXED_COUNTERS 3 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) -#define TARGET_INSN_START_EXTRA_WORDS 1 - #define NB_OPMASK_REGS 8 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish @@ -1607,12 +1774,6 @@ typedef enum TPRAccess { /* Cache information data structures: */ -enum CacheType { - DATA_CACHE, - INSTRUCTION_CACHE, - UNIFIED_CACHE -}; - typedef struct CPUCacheInfo { enum CacheType type; uint8_t level; @@ -1660,7 +1821,7 @@ typedef struct CPUCacheInfo { * Used to encode CPUID[4].EAX[bits 25:14] or * CPUID[0x8000001D].EAX[bits 25:14]. */ - enum CPUTopoLevel share_level; + CpuTopologyLevel share_level; } CPUCacheInfo; @@ -1671,11 +1832,6 @@ typedef struct CPUCaches { CPUCacheInfo *l3_cache; } CPUCaches; -typedef struct HVFX86LazyFlags { - target_ulong result; - target_ulong auxbits; -} HVFX86LazyFlags; - typedef struct CPUArchState { /* standard registers */ target_ulong regs[CPU_NB_REGS]; @@ -1854,6 +2010,9 @@ typedef struct CPUArchState { uint64_t msr_lbr_depth; LBREntry lbr_records[ARCH_LBR_NR_ENTRIES]; + /* AMD MSRC001_0015 Hardware Configuration */ + uint64_t msr_hwcr; + /* exception/interrupt handling */ int error_code; int exception_is_int; @@ -1909,14 +2068,19 @@ typedef struct CPUArchState { uint32_t cpuid_vendor3; uint32_t cpuid_version; FeatureWordArray features; + /* AVX10 version */ + uint8_t avx10_version; /* Features that were explicitly enabled/disabled */ FeatureWordArray user_features; uint32_t cpuid_model[12]; - /* Cache information for CPUID. When legacy-cache=on, the cache data + /* + * Cache information for CPUID. When legacy-cache=on, the cache data * on each CPUID leaf will be different, because we keep compatibility * with old QEMU versions. */ - CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; + CPUCaches cache_info; + bool enable_legacy_cpuid2_cache; + bool enable_legacy_vendor_cache; /* MTRRs */ uint64_t mtrr_fixed[11]; @@ -1963,8 +2127,7 @@ typedef struct CPUArchState { QemuMutex xen_timers_lock; #endif #if defined(CONFIG_HVF) - HVFX86LazyFlags hvf_lflags; - void *hvf_mmio_buf; + void *emu_mmio_buf; #endif uint64_t mcg_cap; @@ -1983,14 +2146,10 @@ typedef struct CPUArchState { TPRAccess tpr_access_type; - /* Number of dies within this CPU package. */ - unsigned nr_dies; - - /* Number of modules within one die. */ - unsigned nr_modules; + X86CPUTopoInfo topo_info; /* Bitmap of available CPU topology levels for this CPU. */ - DECLARE_BITMAP(avail_cpu_topo, CPU_TOPO_LEVEL_MAX); + DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX); } CPUX86State; struct kvm_msrs; @@ -2041,7 +2200,6 @@ struct ArchCPU { bool expose_tcg; bool migratable; bool migrate_smi_count; - bool max_features; /* Enable all supported features automatically */ uint32_t apic_id; /* Enables publishing of TSC increment and Local APIC bus frequencies to @@ -2063,6 +2221,9 @@ struct ArchCPU { /* Features that were filtered out because of missing host capabilities */ FeatureWordArray filtered_features; + /* Features that are forced enabled by underlying hypervisor, e.g., TDX */ + FeatureWordArray forced_on_features; + /* Enable PMU CPUID bits. This can't be enabled by default yet because * it doesn't have ABI stability guarantees, as it passes all PMU CPUID * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel @@ -2101,6 +2262,13 @@ struct ArchCPU { */ bool legacy_cache; + /* + * Compatibility bits for old machine types. + * If true, use the same cache model in CPUID leaf 0x2 + * and 0x4. + */ + bool consistent_cache; + /* Compatibility bits for old machine types. * If true decode the CPUID Function 0x8000001E_ECX to support multiple * nodes per processor @@ -2110,12 +2278,24 @@ struct ArchCPU { /* Compatibility bits for old machine types: */ bool enable_cpuid_0xb; + /* Force to enable cpuid 0x1f */ + bool force_cpuid_0x1f; + /* Enable auto level-increase for all CPUID leaves */ bool full_cpuid_auto_level; - /* Only advertise CPUID leaves defined by the vendor */ + /* + * Compatibility bits for old machine types (PC machine v6.0 and older). + * Only advertise CPUID leaves defined by the vendor. + */ bool vendor_cpuid_only; + /* + * Compatibility bits for old machine types (PC machine v10.0 and older). + * Only advertise CPUID leaves defined by the vendor. + */ + bool vendor_cpuid_only_v2; + /* Only advertise TOPOEXT features that AMD defines */ bool amd_topoext_features_only; @@ -2186,8 +2366,9 @@ struct X86CPUClass { * CPU definition, automatically loaded by instance_init if not NULL. * Should be eventually replaced by subclass-specific property defaults. */ - X86CPUModel *model; + const X86CPUModel *model; + bool max_features; /* Enable all supported features automatically */ bool host_cpuid_required; int ordering; bool migration_safe; @@ -2208,8 +2389,6 @@ struct X86CPUClass { extern const VMStateDescription vmstate_x86_cpu; #endif -int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); - int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, int cpuid, DumpState *s); int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, @@ -2226,11 +2405,13 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void x86_cpu_gdb_init(CPUState *cs); -void x86_cpu_list(void); int cpu_x86_support_mca_broadcast(CPUX86State *env); #ifndef CONFIG_USER_ONLY +int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); + hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); int cpu_get_pic_interrupt(CPUX86State *s); @@ -2256,7 +2437,14 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, SegmentCache *sc; unsigned int new_hflags; - sc = &env->segs[seg_reg]; + if (seg_reg == R_LDTR) { + sc = &env->ldt; + } else if (seg_reg == R_TR) { + sc = &env->tr; + } else { + sc = &env->segs[seg_reg]; + } + sc->selector = selector; sc->base = base; sc->limit = limit; @@ -2327,6 +2515,8 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, cs->halted = 0; } +uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu); + int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, target_ulong *base, unsigned int *limit, unsigned int *flags); @@ -2368,6 +2558,17 @@ void cpu_set_apic_feature(CPUX86State *env); void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); bool cpu_has_x2apic_feature(CPUX86State *env); +bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg); +void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix); +void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix); + +static inline bool x86_has_cpuid_0x1f(X86CPU *cpu) +{ + return cpu->force_cpuid_0x1f || + x86_has_extended_topo(cpu->env.avail_cpu_topo); +} /* helper.c */ void x86_cpu_set_a20(X86CPU *cpu, int a20_state); @@ -2417,8 +2618,6 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") #endif -#define cpu_list x86_cpu_list - /* MMU modes definitions */ #define MMU_KSMAP64_IDX 0 #define MMU_KSMAP32_IDX 1 @@ -2453,35 +2652,17 @@ static inline bool is_mmu_index_32(int mmu_index) return mmu_index & 1; } -int x86_mmu_index_pl(CPUX86State *env, unsigned pl); -int cpu_mmu_index_kernel(CPUX86State *env); - #define CC_DST (env->cc_dst) #define CC_SRC (env->cc_src) #define CC_SRC2 (env->cc_src2) #define CC_OP (env->cc_op) -#include "exec/cpu-all.h" #include "svm.h" #if !defined(CONFIG_USER_ONLY) #include "hw/i386/apic.h" #endif -static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *flags = env->hflags | - (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); - if (env->hflags & HF_CS64_MASK) { - *cs_base = 0; - *pc = env->eip; - } else { - *cs_base = env->segs[R_CS].base; - *pc = (uint32_t)(*cs_base + env->eip); - } -} - void do_cpu_init(X86CPU *cpu); #define MCE_INJECT_BROADCAST 1 @@ -2516,6 +2697,36 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env) } } +static inline uint32_t x86_cpu_family(uint32_t eax) +{ + uint32_t family = (eax >> 8) & 0xf; + + if (family == 0xf) { + family += (eax >> 20) & 0xff; + } + + return family; +} + +static inline uint32_t x86_cpu_model(uint32_t eax) +{ + uint32_t family, model; + + family = x86_cpu_family(eax); + model = (eax >> 4) & 0xf; + + if (family >= 0x6) { + model += ((eax >> 16) & 0xf) << 4; + } + + return model; +} + +static inline uint32_t x86_cpu_stepping(uint32_t eax) +{ + return eax & 0xf; +} + static inline bool cpu_has_vmx(CPUX86State *env) { return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; @@ -2552,6 +2763,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) int get_pg_mode(CPUX86State *env); /* fpu_helper.c */ + +/* Set all non-runtime-variable float_status fields to x86 handling */ +void cpu_init_fp_statuses(CPUX86State *env); void update_fp_status(CPUX86State *env); void update_mxcsr_status(CPUX86State *env); void update_mxcsr_from_sse_status(CPUX86State *env); @@ -2696,4 +2910,29 @@ static inline bool ctl_has_irq(CPUX86State *env) # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20) #endif +/* majority(NOT a, b, c) = (a ^ b) ? b : c */ +#define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c)) + +/* + * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y) + * + * If two corresponding bits in x and y are the same, that's the carry + * independent of the value (x+y)^x^y. Hence x^y can be replaced with + * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y) + */ +#define ADD_COUT_VEC(op1, op2, result) \ + MAJ_INV1(result, op1, op2) + +/* + * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y) + * = majority(NOT x, y, (x - y) ^ x ^ y) + * + * Note that the carry out is actually a borrow, i.e. it is inverted. + * If two corresponding bits in x and y are different, the value of the + * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced + * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y) + */ +#define SUB_COUT_VEC(op1, op2, result) \ + MAJ_INV1(op1, op2, result) + #endif /* I386_CPU_H */ diff --git a/target/i386/emulate/meson.build b/target/i386/emulate/meson.build new file mode 100644 index 00000000000..4edd4f462fc --- /dev/null +++ b/target/i386/emulate/meson.build @@ -0,0 +1,5 @@ +i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( + 'x86_decode.c', + 'x86_emu.c', + 'x86_flags.c', +)) diff --git a/target/i386/emulate/panic.h b/target/i386/emulate/panic.h new file mode 100644 index 00000000000..71c24874ba0 --- /dev/null +++ b/target/i386/emulate/panic.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2016 Veertu Inc, + * Copyright (C) 2017 Google Inc, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ +#ifndef X86_EMU_PANIC_H +#define X86_EMU_PANIC_H + +#define VM_PANIC(x) {\ + printf("%s\n", x); \ + abort(); \ +} + +#define VM_PANIC_ON(x) {\ + if (x) { \ + printf("%s\n", #x); \ + abort(); \ + } \ +} + +#define VM_PANIC_EX(...) {\ + printf(__VA_ARGS__); \ + abort(); \ +} + +#define VM_PANIC_ON_EX(x, ...) {\ + if (x) { \ + printf(__VA_ARGS__); \ + abort(); \ + } \ +} + +#endif diff --git a/target/i386/hvf/x86.h b/target/i386/emulate/x86.h similarity index 96% rename from target/i386/hvf/x86.h rename to target/i386/emulate/x86.h index 3570f29aa9d..73edccfba00 100644 --- a/target/i386/hvf/x86.h +++ b/target/i386/emulate/x86.h @@ -16,8 +16,8 @@ * License along with this program; if not, see . */ -#ifndef HVF_X86_H -#define HVF_X86_H +#ifndef X86_EMU_DEFS_H +#define X86_EMU_DEFS_H typedef struct x86_register { union { @@ -183,7 +183,7 @@ static inline uint32_t x86_call_gate_offset(x86_call_gate *gate) #define GDT_SEL 0 #define LDT_SEL 1 -typedef struct x68_segment_selector { +typedef struct x86_segment_selector { union { uint16_t sel; struct { @@ -192,7 +192,7 @@ typedef struct x68_segment_selector { uint16_t index:13; }; }; -} __attribute__ ((__packed__)) x68_segment_selector; +} __attribute__ ((__packed__)) x86_segment_selector; /* useful register access macros */ #define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg]) @@ -250,10 +250,10 @@ typedef struct x68_segment_selector { /* deal with GDT/LDT descriptors in memory */ bool x86_read_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, - x68_segment_selector sel); + x86_segment_selector sel); bool x86_write_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, - x68_segment_selector sel); + x86_segment_selector sel); bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc, int gate); diff --git a/target/i386/emulate/x86_decode.c b/target/i386/emulate/x86_decode.c new file mode 100644 index 00000000000..2eca39802e3 --- /dev/null +++ b/target/i386/emulate/x86_decode.c @@ -0,0 +1,2173 @@ +/* + * Copyright (C) 2016 Veertu Inc, + * Copyright (C) 2017 Google Inc, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ + +#include "qemu/osdep.h" + +#include "panic.h" +#include "x86_decode.h" +#include "x86_emu.h" + +#define OPCODE_ESCAPE 0xf + +static void decode_invalid(CPUX86State *env, struct x86_decode *decode) +{ + printf(TARGET_FMT_lx ": failed to decode instruction ", env->eip); + for (int i = 0; i < decode->opcode_len; i++) { + printf("%x ", decode->opcode[i]); + } + printf("\n"); + VM_PANIC("decoder failed\n"); +} + +uint64_t sign(uint64_t val, int size) +{ + switch (size) { + case 1: + val = (int8_t)val; + break; + case 2: + val = (int16_t)val; + break; + case 4: + val = (int32_t)val; + break; + case 8: + val = (int64_t)val; + break; + default: + VM_PANIC_EX("%s invalid size %d\n", __func__, size); + break; + } + return val; +} + +static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode, + int size) +{ + uint64_t val = 0; + + switch (size) { + case 1: + case 2: + case 4: + case 8: + break; + default: + VM_PANIC_EX("%s invalid size %d\n", __func__, size); + break; + } + target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len; + emul_ops->read_mem(env_cpu(env), &val, va, size); + decode->len += size; + + return val; +} + +static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode) +{ + return (uint8_t)decode_bytes(env, decode, 1); +} + +static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode) +{ + return (uint16_t)decode_bytes(env, decode, 2); +} + +static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode) +{ + return (uint32_t)decode_bytes(env, decode, 4); +} + +static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode) +{ + return decode_bytes(env, decode, 8); +} + +static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_RM; +} + +static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_REG; + op->reg = decode->modrm.reg; + op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r, + decode->operand_size); +} + +static void decode_rax(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_REG; + op->reg = R_EAX; + /* Since reg is always AX, REX prefix has no impact. */ + op->regptr = get_reg_ref(env, op->reg, false, 0, + decode->operand_size); +} + +static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *var, int size) +{ + var->type = X86_VAR_IMMEDIATE; + var->size = size; + switch (size) { + case 1: + var->val = decode_byte(env, decode); + break; + case 2: + var->val = decode_word(env, decode); + break; + case 4: + var->val = decode_dword(env, decode); + break; + case 8: + var->val = decode_qword(env, decode); + break; + default: + VM_PANIC_EX("bad size %d\n", size); + } +} + +static void decode_imm8(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + decode_immediate(env, decode, op, 1); + op->type = X86_VAR_IMMEDIATE; +} + +static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + decode_immediate(env, decode, op, 1); + op->val = sign(op->val, 1); + op->type = X86_VAR_IMMEDIATE; +} + +static void decode_imm16(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + decode_immediate(env, decode, op, 2); + op->type = X86_VAR_IMMEDIATE; +} + + +static void decode_imm(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + if (8 == decode->operand_size) { + decode_immediate(env, decode, op, 4); + op->val = sign(op->val, decode->operand_size); + } else { + decode_immediate(env, decode, op, decode->operand_size); + } + op->type = X86_VAR_IMMEDIATE; +} + +static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + decode_immediate(env, decode, op, decode->operand_size); + op->val = sign(op->val, decode->operand_size); + op->type = X86_VAR_IMMEDIATE; +} + +static void decode_imm_1(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_IMMEDIATE; + op->val = 1; +} + +static void decode_imm_0(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_IMMEDIATE; + op->val = 0; +} + + +static void decode_pushseg(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0]; + + decode->op[0].type = X86_VAR_REG; + switch (op) { + case 0xe: + decode->op[0].reg = R_CS; + break; + case 0x16: + decode->op[0].reg = R_SS; + break; + case 0x1e: + decode->op[0].reg = R_DS; + break; + case 0x06: + decode->op[0].reg = R_ES; + break; + case 0xa0: + decode->op[0].reg = R_FS; + break; + case 0xa8: + decode->op[0].reg = R_GS; + break; + } +} + +static void decode_popseg(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0]; + + decode->op[0].type = X86_VAR_REG; + switch (op) { + case 0xf: + decode->op[0].reg = R_CS; + break; + case 0x17: + decode->op[0].reg = R_SS; + break; + case 0x1f: + decode->op[0].reg = R_DS; + break; + case 0x07: + decode->op[0].reg = R_ES; + break; + case 0xa1: + decode->op[0].reg = R_FS; + break; + case 0xa9: + decode->op[0].reg = R_GS; + break; + } +} + +static void decode_incgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0x40; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_decgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0x48; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode) +{ + if (!decode->modrm.reg) { + decode->cmd = X86_DECODE_CMD_INC; + } else if (1 == decode->modrm.reg) { + decode->cmd = X86_DECODE_CMD_DEC; + } +} + +static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0x50; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_popgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0x58; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_jxx(CPUX86State *env, struct x86_decode *decode) +{ + decode->displacement = decode_bytes(env, decode, decode->operand_size); + decode->displacement_size = decode->operand_size; +} + +static void decode_farjmp(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_IMMEDIATE; + decode->op[0].val = decode_bytes(env, decode, decode->operand_size); + decode->displacement = decode_word(env, decode); +} + +static void decode_addgroup(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_ADD, + X86_DECODE_CMD_OR, + X86_DECODE_CMD_ADC, + X86_DECODE_CMD_SBB, + X86_DECODE_CMD_AND, + X86_DECODE_CMD_SUB, + X86_DECODE_CMD_XOR, + X86_DECODE_CMD_CMP + }; + decode->cmd = group[decode->modrm.reg]; +} + +static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_ROL, + X86_DECODE_CMD_ROR, + X86_DECODE_CMD_RCL, + X86_DECODE_CMD_RCR, + X86_DECODE_CMD_SHL, + X86_DECODE_CMD_SHR, + X86_DECODE_CMD_SHL, + X86_DECODE_CMD_SAR + }; + decode->cmd = group[decode->modrm.reg]; +} + +static void decode_f7group(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_TST, + X86_DECODE_CMD_TST, + X86_DECODE_CMD_NOT, + X86_DECODE_CMD_NEG, + X86_DECODE_CMD_MUL, + X86_DECODE_CMD_IMUL_1, + X86_DECODE_CMD_DIV, + X86_DECODE_CMD_IDIV + }; + decode->cmd = group[decode->modrm.reg]; + decode_modrm_rm(env, decode, &decode->op[0]); + + switch (decode->modrm.reg) { + case 0: + case 1: + decode_imm(env, decode, &decode->op[1]); + break; + case 2: + break; + case 3: + decode->op[1].type = X86_VAR_IMMEDIATE; + decode->op[1].val = 0; + break; + default: + break; + } +} + +static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0x90; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_movgroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0xb8; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); + decode_immediate(env, decode, &decode->op[1], decode->operand_size); +} + +static void fetch_moffs(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_OFFSET; + op->addr = decode_bytes(env, decode, decode->addressing_size); +} + +static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[0] - 0xb0; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); + decode_immediate(env, decode, &decode->op[1], decode->operand_size); +} + +static void decode_rcx(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X86_VAR_REG; + op->reg = R_ECX; + op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b, + decode->operand_size); +} + +struct decode_tbl { + uint8_t opcode; + enum x86_decode_cmd cmd; + uint8_t operand_size; + bool is_modrm; + void (*decode_op1)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op1); + void (*decode_op2)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op2); + void (*decode_op3)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op3); + void (*decode_op4)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op4); + void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode); +}; + +struct decode_x87_tbl { + uint8_t opcode; + uint8_t modrm_reg; + uint8_t modrm_mod; + enum x86_decode_cmd cmd; + uint8_t operand_size; + bool rev; + bool pop; + void (*decode_op1)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op1); + void (*decode_op2)(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op2); + void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode); +}; + +struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL, + decode_invalid}; + +struct decode_tbl _decode_tbl1[256]; +struct decode_tbl _decode_tbl2[256]; +struct decode_x87_tbl _decode_tbl3[256]; + +static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode) +{ + struct decode_x87_tbl *decoder; + + decode->is_fpu = true; + int mode = decode->modrm.mod == 3 ? 1 : 0; + int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) | + decode->modrm.reg; + + decoder = &_decode_tbl3[index]; + + decode->cmd = decoder->cmd; + if (decoder->operand_size) { + decode->operand_size = decoder->operand_size; + } + decode->fpop_stack = decoder->pop; + decode->frev = decoder->rev; + + if (decoder->decode_op1) { + decoder->decode_op1(env, decode, &decode->op[0]); + } + if (decoder->decode_op2) { + decoder->decode_op2(env, decode, &decode->op[1]); + } + if (decoder->decode_postfix) { + decoder->decode_postfix(env, decode); + } + + VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n", + decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg, + decoder->modrm_mod); +} + +static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_INC, + X86_DECODE_CMD_DEC, + X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT, + X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT, + X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT, + X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT, + X86_DECODE_CMD_PUSH, + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_INVL + }; + decode->cmd = group[decode->modrm.reg]; +} + +static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode) +{ + + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_SLDT, + X86_DECODE_CMD_STR, + X86_DECODE_CMD_LLDT, + X86_DECODE_CMD_LTR, + X86_DECODE_CMD_VERR, + X86_DECODE_CMD_VERW, + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_INVL + }; + decode->cmd = group[decode->modrm.reg]; +} + +static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_SGDT, + X86_DECODE_CMD_SIDT, + X86_DECODE_CMD_LGDT, + X86_DECODE_CMD_LIDT, + X86_DECODE_CMD_SMSW, + X86_DECODE_CMD_LMSW, + X86_DECODE_CMD_LMSW, + X86_DECODE_CMD_INVLPG + }; + decode->cmd = group[decode->modrm.reg]; + if (0xf9 == decode->modrm.modrm) { + decode->opcode[decode->len++] = decode->modrm.modrm; + decode->cmd = X86_DECODE_CMD_RDTSCP; + } +} + +static void decode_btgroup(CPUX86State *env, struct x86_decode *decode) +{ + enum x86_decode_cmd group[] = { + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_INVL, + X86_DECODE_CMD_BT, + X86_DECODE_CMD_BTS, + X86_DECODE_CMD_BTR, + X86_DECODE_CMD_BTC + }; + decode->cmd = group[decode->modrm.reg]; +} + +static void decode_x87_general(CPUX86State *env, struct x86_decode *decode) +{ + decode->is_fpu = true; +} + +static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X87_VAR_FLOATP; +} + +static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X87_VAR_INTP; +} + +static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X87_VAR_BYTEP; +} + +static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X87_VAR_REG; + op->reg = 0; +} + +static void decode_decode_x87_modrm_st0(CPUX86State *env, + struct x86_decode *decode, + struct x86_decode_op *op) +{ + op->type = X87_VAR_REG; + op->reg = decode->modrm.modrm & 7; +} + + +static void decode_aegroup(CPUX86State *env, struct x86_decode *decode) +{ + decode->is_fpu = true; + switch (decode->modrm.reg) { + case 0: + decode->cmd = X86_DECODE_CMD_FXSAVE; + decode_x87_modrm_bytep(env, decode, &decode->op[0]); + break; + case 1: + decode_x87_modrm_bytep(env, decode, &decode->op[0]); + decode->cmd = X86_DECODE_CMD_FXRSTOR; + break; + case 5: + if (decode->modrm.modrm == 0xe8) { + decode->cmd = X86_DECODE_CMD_LFENCE; + } else { + VM_PANIC("xrstor"); + } + break; + case 6: + VM_PANIC_ON(decode->modrm.modrm != 0xf0); + decode->cmd = X86_DECODE_CMD_MFENCE; + break; + case 7: + if (decode->modrm.modrm == 0xf8) { + decode->cmd = X86_DECODE_CMD_SFENCE; + } else { + decode->cmd = X86_DECODE_CMD_CLFLUSH; + } + break; + default: + VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg); + break; + } +} + +static void decode_bswap(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = decode->opcode[1] - 0xc8; + decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); +} + +static void decode_d9_4(CPUX86State *env, struct x86_decode *decode) +{ + switch (decode->modrm.modrm) { + case 0xe0: + /* FCHS */ + decode->cmd = X86_DECODE_CMD_FCHS; + break; + case 0xe1: + decode->cmd = X86_DECODE_CMD_FABS; + break; + case 0xe4: + VM_PANIC("FTST"); + break; + case 0xe5: + /* FXAM */ + decode->cmd = X86_DECODE_CMD_FXAM; + break; + default: + VM_PANIC("FLDENV"); + break; + } +} + +static void decode_db_4(CPUX86State *env, struct x86_decode *decode) +{ + switch (decode->modrm.modrm) { + case 0xe0: + VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0], + decode->modrm.modrm); + break; + case 0xe1: + VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0], + decode->modrm.modrm); + break; + case 0xe2: + VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0], + decode->modrm.modrm); + break; + case 0xe3: + decode->cmd = X86_DECODE_CMD_FNINIT; + break; + case 0xe4: + decode->cmd = X86_DECODE_CMD_FNSETPM; + break; + default: + VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0], + decode->modrm.modrm); + break; + } +} + + +struct decode_tbl _1op_inst[] = { + {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, + NULL, NULL}, + {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, + NULL, NULL}, + {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, + NULL, NULL}, + {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, + NULL, NULL}, + {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL, + NULL}, + {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL, + NULL}, + {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, + decode_pushseg}, + {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, + decode_popseg}, + {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, + NULL, NULL}, + {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, + NULL, NULL}, + {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, + NULL, NULL}, + {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8, + NULL, NULL, NULL}, + {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false, + NULL, NULL, NULL, decode_pushseg}, + {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false, + NULL, NULL, NULL, decode_popseg}, + + {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false, + NULL, NULL, NULL, decode_pushseg}, + {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false, + NULL, NULL, NULL, decode_popseg}, + + {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8, + NULL, NULL, NULL}, + {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false, + NULL, NULL, NULL, decode_pushseg}, + {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false, + NULL, NULL, NULL, decode_popseg}, + + {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x2f, X86_DECODE_CMD_DAS, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8, + NULL, NULL, NULL}, + {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0x3f, X86_DECODE_CMD_AAS, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0x40, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x41, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x42, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x43, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x44, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x45, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x46, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + {0x47, X86_DECODE_CMD_INC, 0, false, + NULL, NULL, NULL, NULL, decode_incgroup}, + + {0x48, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x49, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4a, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4b, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4c, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4d, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4e, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + {0x4f, X86_DECODE_CMD_DEC, 0, false, + NULL, NULL, NULL, NULL, decode_decgroup}, + + {0x50, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x51, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x52, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x53, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x54, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x55, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x56, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + {0x57, X86_DECODE_CMD_PUSH, 0, false, + NULL, NULL, NULL, NULL, decode_pushgroup}, + + {0x58, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x59, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5a, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5b, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5c, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5d, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5e, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + {0x5f, X86_DECODE_CMD_POP, 0, false, + NULL, NULL, NULL, NULL, decode_popgroup}, + + {0x60, X86_DECODE_CMD_PUSHA, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x61, X86_DECODE_CMD_POPA, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm, + NULL, NULL, NULL, NULL}, + {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed, + NULL, NULL, NULL, NULL}, + {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, + decode_modrm_rm, decode_imm, NULL, NULL}, + {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, + decode_imm8_signed, NULL, NULL}, + + {0x6c, X86_DECODE_CMD_INS, 1, false, + NULL, NULL, NULL, NULL, NULL}, + {0x6d, X86_DECODE_CMD_INS, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x6e, X86_DECODE_CMD_OUTS, 1, false, + NULL, NULL, NULL, NULL, NULL}, + {0x6f, X86_DECODE_CMD_OUTS, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0x70, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x71, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x72, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x73, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x74, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x75, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x76, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x77, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x78, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x79, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7a, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7b, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7c, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7d, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7e, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x7f, X86_DECODE_CMD_JXX, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + + {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, + NULL, NULL, decode_addgroup}, + {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm, + NULL, NULL, decode_addgroup}, + {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, + NULL, NULL, decode_addgroup}, + {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed, + NULL, NULL, decode_addgroup}, + {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm, + decode_modrm_reg, NULL, NULL, NULL}, + {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg, + decode_modrm_rm, NULL, NULL, NULL}, + {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + + {0x90, X86_DECODE_CMD_NOP, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, + NULL, NULL, decode_xchgroup}, + + {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL, + NULL, NULL, NULL}, + + {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL, + NULL, NULL, NULL, decode_farjmp}, + + {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL, + NULL, NULL, NULL}, + /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL, + NULL, NULL, NULL},*/ + {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL, + NULL, NULL, NULL}, + + {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs, + NULL, NULL, NULL}, + {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs, + NULL, NULL, NULL}, + {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax, + NULL, NULL, NULL}, + {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax, + NULL, NULL, NULL}, + + {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL, + NULL, NULL, NULL}, + {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL, + NULL, NULL, NULL}, + {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL, + NULL, NULL, NULL}, + {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL, + NULL, NULL, NULL}, + {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL, + NULL, NULL, NULL}, + {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL, + NULL, NULL, NULL}, + + {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm, + NULL, NULL, NULL}, + + {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL, + NULL, NULL, NULL, decode_movgroup8}, + + {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xba, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL, + NULL, NULL, NULL, decode_movgroup}, + + {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, + NULL, NULL, decode_rotgroup}, + {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, + NULL, NULL, decode_rotgroup}, + + {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16, + NULL, NULL, NULL, NULL}, + {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL, + NULL, NULL, NULL, NULL}, + + {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + + {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8, + NULL, NULL, NULL}, + {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm, + NULL, NULL, NULL}, + + {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8, + NULL, NULL, NULL}, + {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL, + NULL, NULL, NULL}, + {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL, + NULL, NULL, NULL}, + {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL, + NULL, NULL, NULL}, + {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL, + NULL, NULL, NULL}, + /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL, + NULL, NULL, NULL},*/ + + {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1, + NULL, NULL, decode_rotgroup}, + {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1, + NULL, NULL, decode_rotgroup}, + {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx, + NULL, NULL, decode_rotgroup}, + {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx, + NULL, NULL, decode_rotgroup}, + + {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8, + NULL, NULL, NULL, NULL}, + {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8, + NULL, NULL, NULL, NULL}, + + {0xd7, X86_DECODE_CMD_XLAT, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xda, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xde, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL, + NULL, NULL, NULL, decode_x87_ins}, + + {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, + NULL, NULL, NULL, NULL}, + {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, + NULL, NULL, NULL, NULL}, + {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, + NULL, NULL, NULL, NULL}, + + {0xe3, X86_DECODE_CMD_JCXZ, 1, false, + NULL, NULL, NULL, NULL, decode_jxx}, + + {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8, + NULL, NULL, NULL, NULL}, + {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8, + NULL, NULL, NULL, NULL}, + {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8, + NULL, NULL, NULL, NULL}, + {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8, + NULL, NULL, NULL, NULL}, + {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed, + NULL, NULL, NULL, NULL}, + {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed, + NULL, NULL, NULL, NULL}, + {0xea, X86_DECODE_CMD_JMP_FAR, 0, false, + NULL, NULL, NULL, NULL, decode_farjmp}, + {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed, + NULL, NULL, NULL, NULL}, + {0xec, X86_DECODE_CMD_IN, 1, false, + NULL, NULL, NULL, NULL, NULL}, + {0xed, X86_DECODE_CMD_IN, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xee, X86_DECODE_CMD_OUT, 1, false, + NULL, NULL, NULL, NULL, NULL}, + {0xef, X86_DECODE_CMD_OUT, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0xf4, X86_DECODE_CMD_HLT, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0xf5, X86_DECODE_CMD_CMC, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0xf6, X86_DECODE_CMD_INVL, 1, true, + NULL, NULL, NULL, NULL, decode_f7group}, + {0xf7, X86_DECODE_CMD_INVL, 0, true, + NULL, NULL, NULL, NULL, decode_f7group}, + + {0xf8, X86_DECODE_CMD_CLC, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xf9, X86_DECODE_CMD_STC, 0, false, + NULL, NULL, NULL, NULL, NULL}, + + {0xfa, X86_DECODE_CMD_CLI, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xfb, X86_DECODE_CMD_STI, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xfc, X86_DECODE_CMD_CLD, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xfd, X86_DECODE_CMD_STD, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, + NULL, NULL, NULL, decode_incgroup2}, + {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, + NULL, NULL, NULL, decode_ffgroup}, +}; + +struct decode_tbl _2op_inst[] = { + {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, + NULL, NULL, NULL, decode_sldtgroup}, + {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, + NULL, NULL, NULL, decode_lidtgroup}, + {0x6, X86_DECODE_CMD_CLTS, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x9, X86_DECODE_CMD_WBINVD, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x18, X86_DECODE_CMD_PREFETCH, 0, true, + NULL, NULL, NULL, NULL, decode_x87_general}, + {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm, + decode_modrm_reg, NULL, NULL, NULL}, + {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm, + decode_modrm_reg, NULL, NULL, NULL}, + {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg, + decode_modrm_rm, NULL, NULL, NULL}, + {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg, + decode_modrm_rm, NULL, NULL, NULL}, + {0x30, X86_DECODE_CMD_WRMSR, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x31, X86_DECODE_CMD_RDTSC, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x32, X86_DECODE_CMD_RDMSR, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0x77, X86_DECODE_CMD_EMMS, 0, false, + NULL, NULL, NULL, NULL, decode_x87_general}, + {0x82, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x83, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x84, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x85, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x86, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x87, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x88, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x89, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8a, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8b, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8c, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8d, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8e, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x8f, X86_DECODE_CMD_JXX, 0, false, + NULL, NULL, NULL, NULL, decode_jxx}, + {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + + {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + + {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false, + NULL, NULL, NULL, decode_pushseg}, + {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false, + NULL, NULL, NULL, decode_popseg}, + {0xa2, X86_DECODE_CMD_CPUID, 0, false, + NULL, NULL, NULL, NULL, NULL}, + {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, + decode_imm8, NULL, NULL}, + {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, + decode_rcx, NULL, NULL}, + {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false, + NULL, NULL, NULL, decode_pushseg}, + {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false, + NULL, NULL, NULL, decode_popseg}, + {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, + decode_imm8, NULL, NULL}, + {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, + decode_rcx, NULL, NULL}, + + {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, + NULL, NULL, NULL, decode_aegroup}, + + {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, + NULL, NULL, decode_btgroup}, + {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm, + NULL, NULL, NULL}, + + {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg, + NULL, NULL, NULL}, + + {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm, + NULL, NULL, NULL, NULL}, + + {0xc8, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xc9, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xca, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xcb, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xcc, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xcd, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xce, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, + {0xcf, X86_DECODE_CMD_BSWAP, 0, false, + NULL, NULL, NULL, NULL, decode_bswap}, +}; + +struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL, + NULL, decode_invalid}; + +struct decode_x87_tbl _x87_inst[] = { + {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, + decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL}, + {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, + decode_decode_x87_modrm_st0, NULL}, + {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, + decode_x87_modrm_floatp, NULL}, + + {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false, + decode_x87_modrm_st0, NULL, NULL}, + {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, + decode_x87_modrm_floatp, NULL, NULL}, + {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false, + decode_x87_modrm_st0, NULL, NULL}, + {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false, + decode_x87_modrm_st0, NULL, NULL}, + {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false, + decode_x87_modrm_floatp, NULL, NULL}, + {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false, + decode_x87_modrm_st0, NULL, NULL}, + {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true, + decode_x87_modrm_floatp, NULL, NULL}, + {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, + decode_x87_modrm_st0, NULL, decode_d9_4}, + {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL}, + {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + + {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + + {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, + decode_decode_x87_modrm_st0, NULL}, + {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL}, + {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, + decode_decode_x87_modrm_st0, NULL}, + {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL}, + {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL}, + {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, + decode_x87_modrm_intp, NULL}, + + {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, + decode_x87_modrm_st0, NULL}, + {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, + decode_x87_modrm_intp, NULL, NULL}, + {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false, + decode_x87_modrm_intp, NULL, NULL}, + {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true, + decode_x87_modrm_intp, NULL, NULL}, + {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, + decode_db_4}, + {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL}, + {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false, + decode_x87_modrm_floatp, NULL, NULL}, + {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true, + decode_x87_modrm_floatp, NULL, NULL}, + + {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false, + decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL}, + + {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false, + decode_x87_modrm_floatp, NULL, NULL}, + {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false, + decode_x87_modrm_st0, NULL, NULL}, + {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false, + decode_x87_modrm_floatp, NULL, NULL}, + {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true, + decode_x87_modrm_st0, NULL, NULL}, + {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true, + decode_x87_modrm_floatp, NULL, NULL}, + {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false, + decode_x87_modrm_bytep, NULL, NULL}, + + {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false, + decode_x87_modrm_st0, decode_x87_modrm_intp, NULL}, + + {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false, + decode_x87_modrm_intp, NULL, NULL}, + {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false, + decode_x87_modrm_intp, NULL, NULL}, + {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true, + decode_x87_modrm_intp, NULL, NULL}, + {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true, + decode_x87_modrm_bytep, NULL, NULL}, + {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true, + decode_x87_modrm_st0, decode_x87_modrm_st0, NULL}, + {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false, + decode_x87_modrm_intp, NULL, NULL}, + {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true, + decode_x87_modrm_intp, NULL, NULL}, +}; + +void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + target_ulong ptr = 0; + X86Seg seg = R_DS; + + if (!decode->modrm.mod && 6 == decode->modrm.rm) { + ptr = decode->displacement; + goto calc_addr; + } + + if (decode->displacement_size) { + ptr = sign(decode->displacement, decode->displacement_size); + } + + switch (decode->modrm.rm) { + case 0: + ptr += BX(env) + SI(env); + break; + case 1: + ptr += BX(env) + DI(env); + break; + case 2: + ptr += BP(env) + SI(env); + seg = R_SS; + break; + case 3: + ptr += BP(env) + DI(env); + seg = R_SS; + break; + case 4: + ptr += SI(env); + break; + case 5: + ptr += DI(env); + break; + case 6: + ptr += BP(env); + seg = R_SS; + break; + case 7: + ptr += BX(env); + break; + } +calc_addr: + if (X86_DECODE_CMD_LEA == decode->cmd) { + op->addr = (uint16_t)ptr; + } else { + op->addr = decode_linear_addr(env, decode, (uint16_t)ptr, seg); + } +} + +void *get_reg_ref(CPUX86State *env, int reg, int rex_present, + int is_extended, int size) +{ + void *ptr = NULL; + + if (is_extended) { + reg |= R_R8; + } + + switch (size) { + case 1: + if (is_extended || reg < 4 || rex_present) { + ptr = &RL(env, reg); + } else { + ptr = &RH(env, reg - 4); + } + break; + default: + ptr = &RRX(env, reg); + break; + } + return ptr; +} + +target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present, + int is_extended, int size) +{ + target_ulong val = 0; + memcpy(&val, + get_reg_ref(env, reg, rex_present, is_extended, size), + size); + return val; +} + +static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode, + X86Seg *sel) +{ + target_ulong base = 0; + target_ulong scaled_index = 0; + int addr_size = decode->addressing_size; + int base_reg = decode->sib.base; + int index_reg = decode->sib.index; + + *sel = R_DS; + + if (decode->modrm.mod || base_reg != R_EBP) { + if (decode->rex.b) { + base_reg |= R_R8; + } + if (base_reg == R_ESP || base_reg == R_EBP) { + *sel = R_SS; + } + base = get_reg_val(env, decode->sib.base, decode->rex.rex, + decode->rex.b, addr_size); + } + + if (decode->rex.x) { + index_reg |= R_R8; + } + + if (index_reg != R_ESP) { + scaled_index = get_reg_val(env, index_reg, decode->rex.rex, + decode->rex.x, addr_size) << + decode->sib.scale; + } + return base + scaled_index; +} + +void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + X86Seg seg = R_DS; + target_ulong ptr = 0; + int addr_size = decode->addressing_size; + + if (decode->displacement_size) { + ptr = sign(decode->displacement, decode->displacement_size); + } + + if (4 == decode->modrm.rm) { + ptr += get_sib_val(env, decode, &seg); + } else if (!decode->modrm.mod && 5 == decode->modrm.rm) { + if (x86_is_long_mode(env_cpu(env))) { + ptr += env->eip + decode->len; + } else { + ptr = decode->displacement; + } + } else { + if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) { + seg = R_SS; + } + ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex, + decode->rex.b, addr_size); + } + + if (X86_DECODE_CMD_LEA == decode->cmd) { + op->addr = (uint32_t)ptr; + } else { + op->addr = decode_linear_addr(env, decode, (uint32_t)ptr, seg); + } +} + +void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + X86Seg seg = R_DS; + int32_t offset = 0; + int mod = decode->modrm.mod; + int rm = decode->modrm.rm; + target_ulong ptr; + int src = decode->modrm.rm; + + if (decode->displacement_size) { + offset = sign(decode->displacement, decode->displacement_size); + } + + if (4 == rm) { + ptr = get_sib_val(env, decode, &seg) + offset; + } else if (0 == mod && 5 == rm) { + ptr = env->eip + decode->len + (int32_t) offset; + } else { + ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) + + (int64_t) offset; + } + + if (X86_DECODE_CMD_LEA == decode->cmd) { + op->addr = ptr; + } else { + op->addr = decode_linear_addr(env, decode, ptr, seg); + } +} + + +void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode, + struct x86_decode_op *op) +{ + if (3 == decode->modrm.mod) { + op->reg = decode->modrm.reg; + op->type = X86_VAR_REG; + op->regptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex, + decode->rex.b, decode->operand_size); + return; + } + + switch (decode->addressing_size) { + case 2: + calc_modrm_operand16(env, decode, op); + break; + case 4: + calc_modrm_operand32(env, decode, op); + break; + case 8: + calc_modrm_operand64(env, decode, op); + break; + default: + VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size); + break; + } +} + +static void decode_prefix(CPUX86State *env, struct x86_decode *decode) +{ + while (1) { + /* + * REX prefix must come after legacy prefixes. + * REX before legacy is ignored. + * Clear rex to simulate this. + */ + uint8_t byte = decode_byte(env, decode); + switch (byte) { + case PREFIX_LOCK: + decode->lock = byte; + decode->rex.rex = 0; + break; + case PREFIX_REPN: + case PREFIX_REP: + decode->rep = byte; + decode->rex.rex = 0; + break; + case PREFIX_CS_SEG_OVERRIDE: + case PREFIX_SS_SEG_OVERRIDE: + case PREFIX_DS_SEG_OVERRIDE: + case PREFIX_ES_SEG_OVERRIDE: + case PREFIX_FS_SEG_OVERRIDE: + case PREFIX_GS_SEG_OVERRIDE: + decode->segment_override = byte; + decode->rex.rex = 0; + break; + case PREFIX_OP_SIZE_OVERRIDE: + decode->op_size_override = byte; + decode->rex.rex = 0; + break; + case PREFIX_ADDR_SIZE_OVERRIDE: + decode->addr_size_override = byte; + decode->rex.rex = 0; + break; + case PREFIX_REX ... (PREFIX_REX + 0xf): + if (x86_is_long_mode(env_cpu(env))) { + decode->rex.rex = byte; + break; + } + /* fall through when not in long mode */ + default: + decode->len--; + return; + } + } +} + +void set_addressing_size(CPUX86State *env, struct x86_decode *decode) +{ + decode->addressing_size = -1; + if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { + if (decode->addr_size_override) { + decode->addressing_size = 4; + } else { + decode->addressing_size = 2; + } + } else if (!x86_is_long_mode(env_cpu(env))) { + /* protected */ + x86_segment_descriptor cs; + emul_ops->read_segment_descriptor(env_cpu(env), &cs, R_CS); + /* check db */ + if (cs.db) { + if (decode->addr_size_override) { + decode->addressing_size = 2; + } else { + decode->addressing_size = 4; + } + } else { + if (decode->addr_size_override) { + decode->addressing_size = 4; + } else { + decode->addressing_size = 2; + } + } + } else { + /* long */ + if (decode->addr_size_override) { + decode->addressing_size = 4; + } else { + decode->addressing_size = 8; + } + } +} + +void set_operand_size(CPUX86State *env, struct x86_decode *decode) +{ + decode->operand_size = -1; + if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { + if (decode->op_size_override) { + decode->operand_size = 4; + } else { + decode->operand_size = 2; + } + } else if (!x86_is_long_mode(env_cpu(env))) { + /* protected */ + x86_segment_descriptor cs; + emul_ops->read_segment_descriptor(env_cpu(env), &cs, R_CS); + /* check db */ + if (cs.db) { + if (decode->op_size_override) { + decode->operand_size = 2; + } else{ + decode->operand_size = 4; + } + } else { + if (decode->op_size_override) { + decode->operand_size = 4; + } else { + decode->operand_size = 2; + } + } + } else { + /* long */ + if (decode->op_size_override) { + decode->operand_size = 2; + } else { + decode->operand_size = 4; + } + + if (decode->rex.w) { + decode->operand_size = 8; + } + } +} + +static void decode_sib(CPUX86State *env, struct x86_decode *decode) +{ + if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) && + (decode->addressing_size != 2)) { + decode->sib.sib = decode_byte(env, decode); + decode->sib_present = true; + } +} + +/* 16 bit modrm */ +int disp16_tbl[4][8] = { + {0, 0, 0, 0, 0, 0, 2, 0}, + {1, 1, 1, 1, 1, 1, 1, 1}, + {2, 2, 2, 2, 2, 2, 2, 2}, + {0, 0, 0, 0, 0, 0, 0, 0} +}; + +/* 32/64-bit modrm */ +int disp32_tbl[4][8] = { + {0, 0, 0, 0, -1, 4, 0, 0}, + {1, 1, 1, 1, 1, 1, 1, 1}, + {4, 4, 4, 4, 4, 4, 4, 4}, + {0, 0, 0, 0, 0, 0, 0, 0} +}; + +static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode) +{ + int addressing_size = decode->addressing_size; + int mod = decode->modrm.mod; + int rm = decode->modrm.rm; + + decode->displacement_size = 0; + switch (addressing_size) { + case 2: + decode->displacement_size = disp16_tbl[mod][rm]; + if (decode->displacement_size) { + decode->displacement = (uint16_t)decode_bytes(env, decode, + decode->displacement_size); + } + break; + case 4: + case 8: + if (-1 == disp32_tbl[mod][rm]) { + if (5 == decode->sib.base) { + decode->displacement_size = 4; + } + } else { + decode->displacement_size = disp32_tbl[mod][rm]; + } + + if (decode->displacement_size) { + decode->displacement = (uint32_t)decode_bytes(env, decode, + decode->displacement_size); + } + break; + } +} + +static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode) +{ + decode->modrm.modrm = decode_byte(env, decode); + decode->is_modrm = true; + + decode_sib(env, decode); + decode_displacement(env, decode); +} + +static inline void decode_opcode_general(CPUX86State *env, + struct x86_decode *decode, + uint8_t opcode, + struct decode_tbl *inst_decoder) +{ + decode->cmd = inst_decoder->cmd; + if (inst_decoder->operand_size) { + decode->operand_size = inst_decoder->operand_size; + } + + if (inst_decoder->is_modrm) { + decode_modrm(env, decode); + } + if (inst_decoder->decode_op1) { + inst_decoder->decode_op1(env, decode, &decode->op[0]); + } + if (inst_decoder->decode_op2) { + inst_decoder->decode_op2(env, decode, &decode->op[1]); + } + if (inst_decoder->decode_op3) { + inst_decoder->decode_op3(env, decode, &decode->op[2]); + } + if (inst_decoder->decode_op4) { + inst_decoder->decode_op4(env, decode, &decode->op[3]); + } + if (inst_decoder->decode_postfix) { + inst_decoder->decode_postfix(env, decode); + } +} + +static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode, + uint8_t opcode) +{ + struct decode_tbl *inst_decoder = &_decode_tbl1[opcode]; + decode_opcode_general(env, decode, opcode, inst_decoder); +} + + +static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode, + uint8_t opcode) +{ + struct decode_tbl *inst_decoder = &_decode_tbl2[opcode]; + decode_opcode_general(env, decode, opcode, inst_decoder); +} + +static void decode_opcodes(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t opcode; + + opcode = decode_byte(env, decode); + decode->opcode[decode->opcode_len++] = opcode; + if (opcode != OPCODE_ESCAPE) { + decode_opcode_1(env, decode, opcode); + } else { + opcode = decode_byte(env, decode); + decode->opcode[decode->opcode_len++] = opcode; + decode_opcode_2(env, decode, opcode); + } +} + +uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode) +{ + memset(decode, 0, sizeof(*decode)); + decode_prefix(env, decode); + set_addressing_size(env, decode); + set_operand_size(env, decode); + + decode_opcodes(env, decode); + + return decode->len; +} + +void init_decoder(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) { + memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst)); + } + for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) { + memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst)); + } + for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) { + memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87)); + + } + for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) { + _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i]; + } + for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) { + _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i]; + } + for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) { + int index = ((_x87_inst[i].opcode & 0xf) << 4) | + ((_x87_inst[i].modrm_mod & 1) << 3) | + _x87_inst[i].modrm_reg; + _decode_tbl3[index] = _x87_inst[i]; + } +} + + +const char *decode_cmd_to_string(enum x86_decode_cmd cmd) +{ + static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG", + "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT", + "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD", + "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST", + "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR", + "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG", + "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN", + "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW", + "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR", + "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR", + "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL", + "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS", + "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT", + "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA", + "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT", + "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES", + "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT", + "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW", + "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV", + "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE", + "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW", + "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"}; + return cmds[cmd]; +} + +target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode, + target_ulong addr, X86Seg seg) +{ + switch (decode->segment_override) { + case PREFIX_CS_SEG_OVERRIDE: + seg = R_CS; + break; + case PREFIX_SS_SEG_OVERRIDE: + seg = R_SS; + break; + case PREFIX_DS_SEG_OVERRIDE: + seg = R_DS; + break; + case PREFIX_ES_SEG_OVERRIDE: + seg = R_ES; + break; + case PREFIX_FS_SEG_OVERRIDE: + seg = R_FS; + break; + case PREFIX_GS_SEG_OVERRIDE: + seg = R_GS; + break; + default: + break; + } + return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg); +} diff --git a/target/i386/hvf/x86_decode.h b/target/i386/emulate/x86_decode.h similarity index 97% rename from target/i386/hvf/x86_decode.h rename to target/i386/emulate/x86_decode.h index a2d7a2a27b6..927645af1a3 100644 --- a/target/i386/hvf/x86_decode.h +++ b/target/i386/emulate/x86_decode.h @@ -15,8 +15,8 @@ * License along with this program; if not, see . */ -#ifndef HVF_X86_DECODE_H -#define HVF_X86_DECODE_H +#ifndef X86_EMU_DECODE_H +#define X86_EMU_DECODE_H #include "cpu.h" #include "x86.h" @@ -266,7 +266,10 @@ typedef struct x86_decode_op { int reg; target_ulong val; - target_ulong ptr; + union { + target_ulong addr; + void *regptr; + }; } x86_decode_op; typedef struct x86_decode { @@ -295,16 +298,14 @@ typedef struct x86_decode { struct x86_modrm modrm; struct x86_decode_op op[4]; bool is_fpu; - uint32_t flags_mask; - } x86_decode; uint64_t sign(uint64_t val, int size); uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode); -target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present, - int is_extended, int size); +void *get_reg_ref(CPUX86State *env, int reg, int rex_present, + int is_extended, int size); target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present, int is_extended, int size); void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode, diff --git a/target/i386/emulate/x86_emu.c b/target/i386/emulate/x86_emu.c new file mode 100644 index 00000000000..db7a7f7437d --- /dev/null +++ b/target/i386/emulate/x86_emu.c @@ -0,0 +1,1264 @@ +/* + * Copyright (C) 2016 Veertu Inc, + * Copyright (C) 2017 Google Inc, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ + +///////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2001-2012 The Bochs Project +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, see +// . +///////////////////////////////////////////////////////////////////////// + +#include "qemu/osdep.h" +#include "panic.h" +#include "x86_decode.h" +#include "x86.h" +#include "x86_emu.h" +#include "x86_flags.h" + +#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \ +{ \ + fetch_operands(env, decode, 2, true, true, false); \ + switch (decode->operand_size) { \ + case 1: \ + { \ + uint8_t v1 = (uint8_t)decode->op[0].val; \ + uint8_t v2 = (uint8_t)decode->op[1].val; \ + uint8_t diff = v1 cmd v2; \ + if (save_res) { \ + write_val_ext(env, &decode->op[0], diff, 1); \ + } \ + FLAGS_FUNC##8(env, v1, v2, diff); \ + break; \ + } \ + case 2: \ + { \ + uint16_t v1 = (uint16_t)decode->op[0].val; \ + uint16_t v2 = (uint16_t)decode->op[1].val; \ + uint16_t diff = v1 cmd v2; \ + if (save_res) { \ + write_val_ext(env, &decode->op[0], diff, 2); \ + } \ + FLAGS_FUNC##16(env, v1, v2, diff); \ + break; \ + } \ + case 4: \ + { \ + uint32_t v1 = (uint32_t)decode->op[0].val; \ + uint32_t v2 = (uint32_t)decode->op[1].val; \ + uint32_t diff = v1 cmd v2; \ + if (save_res) { \ + write_val_ext(env, &decode->op[0], diff, 4); \ + } \ + FLAGS_FUNC##32(env, v1, v2, diff); \ + break; \ + } \ + default: \ + VM_PANIC("bad size\n"); \ + } \ +} \ + +target_ulong read_reg(CPUX86State *env, int reg, int size) +{ + switch (size) { + case 1: + return x86_reg(env, reg)->lx; + case 2: + return x86_reg(env, reg)->rx; + case 4: + return x86_reg(env, reg)->erx; + case 8: + return x86_reg(env, reg)->rrx; + default: + abort(); + } + return 0; +} + +void write_reg(CPUX86State *env, int reg, target_ulong val, int size) +{ + switch (size) { + case 1: + x86_reg(env, reg)->lx = val; + break; + case 2: + x86_reg(env, reg)->rx = val; + break; + case 4: + x86_reg(env, reg)->rrx = (uint32_t)val; + break; + case 8: + x86_reg(env, reg)->rrx = val; + break; + default: + abort(); + } +} + +target_ulong read_val_from_reg(void *reg_ptr, int size) +{ + target_ulong val; + + switch (size) { + case 1: + val = *(uint8_t *)reg_ptr; + break; + case 2: + val = *(uint16_t *)reg_ptr; + break; + case 4: + val = *(uint32_t *)reg_ptr; + break; + case 8: + val = *(uint64_t *)reg_ptr; + break; + default: + abort(); + } + return val; +} + +void write_val_to_reg(void *reg_ptr, target_ulong val, int size) +{ + switch (size) { + case 1: + *(uint8_t *)reg_ptr = val; + break; + case 2: + *(uint16_t *)reg_ptr = val; + break; + case 4: + *(uint64_t *)reg_ptr = (uint32_t)val; + break; + case 8: + *(uint64_t *)reg_ptr = val; + break; + default: + abort(); + } +} + +static void write_val_to_mem(CPUX86State *env, target_ulong ptr, target_ulong val, int size) +{ + emul_ops->write_mem(env_cpu(env), &val, ptr, size); +} + +void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size) +{ + if (decode->type == X86_VAR_REG) { + write_val_to_reg(decode->regptr, val, size); + } else { + write_val_to_mem(env, decode->addr, val, size); + } +} + +uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes) +{ + emul_ops->read_mem(env_cpu(env), env->emu_mmio_buf, ptr, bytes); + return env->emu_mmio_buf; +} + + +static target_ulong read_val_from_mem(CPUX86State *env, target_long ptr, int size) +{ + target_ulong val; + uint8_t *mmio_ptr; + + mmio_ptr = read_mmio(env, ptr, size); + switch (size) { + case 1: + val = *(uint8_t *)mmio_ptr; + break; + case 2: + val = *(uint16_t *)mmio_ptr; + break; + case 4: + val = *(uint32_t *)mmio_ptr; + break; + case 8: + val = *(uint64_t *)mmio_ptr; + break; + default: + VM_PANIC("bad size\n"); + break; + } + return val; +} + +target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size) +{ + if (decode->type == X86_VAR_REG) { + return read_val_from_reg(decode->regptr, size); + } else { + return read_val_from_mem(env, decode->addr, size); + } +} + +static void fetch_operands(CPUX86State *env, struct x86_decode *decode, + int n, bool val_op0, bool val_op1, bool val_op2) +{ + int i; + bool calc_val[3] = {val_op0, val_op1, val_op2}; + + for (i = 0; i < n; i++) { + switch (decode->op[i].type) { + case X86_VAR_IMMEDIATE: + break; + case X86_VAR_REG: + VM_PANIC_ON(!decode->op[i].regptr); + if (calc_val[i]) { + decode->op[i].val = read_val_from_reg(decode->op[i].regptr, + decode->operand_size); + } + break; + case X86_VAR_RM: + calc_modrm_operand(env, decode, &decode->op[i]); + if (calc_val[i]) { + decode->op[i].val = read_val_ext(env, &decode->op[i], + decode->operand_size); + } + break; + case X86_VAR_OFFSET: + decode->op[i].addr = decode_linear_addr(env, decode, + decode->op[i].addr, + R_DS); + if (calc_val[i]) { + decode->op[i].val = read_val_ext(env, &decode->op[i], + decode->operand_size); + } + break; + default: + break; + } + } +} + +static void exec_mov(CPUX86State *env, struct x86_decode *decode) +{ + fetch_operands(env, decode, 2, false, true, false); + write_val_ext(env, &decode->op[0], decode->op[1].val, + decode->operand_size); + + env->eip += decode->len; +} + +static void exec_add(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); + env->eip += decode->len; +} + +static void exec_or(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true); + env->eip += decode->len; +} + +static void exec_adc(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true); + env->eip += decode->len; +} + +static void exec_sbb(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true); + env->eip += decode->len; +} + +static void exec_and(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true); + env->eip += decode->len; +} + +static void exec_sub(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true); + env->eip += decode->len; +} + +static void exec_xor(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true); + env->eip += decode->len; +} + +static void exec_neg(CPUX86State *env, struct x86_decode *decode) +{ + /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/ + int32_t val; + fetch_operands(env, decode, 2, true, true, false); + + val = 0 - sign(decode->op[1].val, decode->operand_size); + write_val_ext(env, &decode->op[1], val, decode->operand_size); + + if (4 == decode->operand_size) { + SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val); + } else if (2 == decode->operand_size) { + SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val); + } else if (1 == decode->operand_size) { + SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val); + } else { + VM_PANIC("bad op size\n"); + } + + /*lflags_to_rflags(env);*/ + env->eip += decode->len; +} + +static void exec_cmp(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); + env->eip += decode->len; +} + +static void exec_inc(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[1].type = X86_VAR_IMMEDIATE; + decode->op[1].val = 0; + + EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true); + + env->eip += decode->len; +} + +static void exec_dec(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[1].type = X86_VAR_IMMEDIATE; + decode->op[1].val = 0; + + EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true); + env->eip += decode->len; +} + +static void exec_tst(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false); + env->eip += decode->len; +} + +static void exec_not(CPUX86State *env, struct x86_decode *decode) +{ + fetch_operands(env, decode, 1, true, false, false); + + write_val_ext(env, &decode->op[0], ~decode->op[0].val, + decode->operand_size); + env->eip += decode->len; +} + +void exec_movzx(CPUX86State *env, struct x86_decode *decode) +{ + int src_op_size; + int op_size = decode->operand_size; + + fetch_operands(env, decode, 1, false, false, false); + + if (0xb6 == decode->opcode[1]) { + src_op_size = 1; + } else { + src_op_size = 2; + } + decode->operand_size = src_op_size; + calc_modrm_operand(env, decode, &decode->op[1]); + decode->op[1].val = read_val_ext(env, &decode->op[1], src_op_size); + write_val_ext(env, &decode->op[0], decode->op[1].val, op_size); + + env->eip += decode->len; +} + +static void exec_out(CPUX86State *env, struct x86_decode *decode) +{ + switch (decode->opcode[0]) { + case 0xe6: + emul_ops->handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1); + break; + case 0xe7: + emul_ops->handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1, + decode->operand_size, 1); + break; + case 0xee: + emul_ops->handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1); + break; + case 0xef: + emul_ops->handle_io(env_cpu(env), DX(env), &RAX(env), 1, + decode->operand_size, 1); + break; + default: + VM_PANIC("Bad out opcode\n"); + break; + } + env->eip += decode->len; +} + +static void exec_in(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong val = 0; + switch (decode->opcode[0]) { + case 0xe4: + emul_ops->handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1); + break; + case 0xe5: + emul_ops->handle_io(env_cpu(env), decode->op[0].val, &val, 0, + decode->operand_size, 1); + if (decode->operand_size == 2) { + AX(env) = val; + } else { + RAX(env) = (uint32_t)val; + } + break; + case 0xec: + emul_ops->handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1); + break; + case 0xed: + emul_ops->handle_io(env_cpu(env), DX(env), &val, 0, + decode->operand_size, 1); + if (decode->operand_size == 2) { + AX(env) = val; + } else { + RAX(env) = (uint32_t)val; + } + + break; + default: + VM_PANIC("Bad in opcode\n"); + break; + } + + env->eip += decode->len; +} + +static inline void string_increment_reg(CPUX86State *env, int reg, + struct x86_decode *decode) +{ + target_ulong val = read_reg(env, reg, decode->addressing_size); + if (env->eflags & DF_MASK) { + val -= decode->operand_size; + } else { + val += decode->operand_size; + } + write_reg(env, reg, val, decode->addressing_size); +} + +static inline void string_rep(CPUX86State *env, struct x86_decode *decode, + void (*func)(CPUX86State *env, + struct x86_decode *ins), int rep) +{ + target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size); + while (rcx--) { + func(env, decode); + write_reg(env, R_ECX, rcx, decode->addressing_size); + if ((PREFIX_REP == rep) && !env->cc_dst) { + break; + } + if ((PREFIX_REPN == rep) && env->cc_dst) { + break; + } + } +} + +static void exec_ins_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), + decode->addressing_size, R_ES); + + emul_ops->handle_io(env_cpu(env), DX(env), env->emu_mmio_buf, 0, + decode->operand_size, 1); + emul_ops->write_mem(env_cpu(env), env->emu_mmio_buf, addr, + decode->operand_size); + + string_increment_reg(env, R_EDI, decode); +} + +static void exec_ins(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_ins_single, 0); + } else { + exec_ins_single(env, decode); + } + + env->eip += decode->len; +} + +static void exec_outs_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); + + emul_ops->read_mem(env_cpu(env), env->emu_mmio_buf, addr, + decode->operand_size); + emul_ops->handle_io(env_cpu(env), DX(env), env->emu_mmio_buf, 1, + decode->operand_size, 1); + + string_increment_reg(env, R_ESI, decode); +} + +static void exec_outs(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_outs_single, 0); + } else { + exec_outs_single(env, decode); + } + + env->eip += decode->len; +} + +static void exec_movs_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong src_addr; + target_ulong dst_addr; + target_ulong val; + + src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); + dst_addr = linear_addr_size(env_cpu(env), RDI(env), + decode->addressing_size, R_ES); + + val = read_val_from_mem(env, src_addr, decode->operand_size); + write_val_to_mem(env, dst_addr, val, decode->operand_size); + + string_increment_reg(env, R_ESI, decode); + string_increment_reg(env, R_EDI, decode); +} + +static void exec_movs(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_movs_single, 0); + } else { + exec_movs_single(env, decode); + } + + env->eip += decode->len; +} + +static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong src_addr; + target_ulong dst_addr; + + src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); + dst_addr = linear_addr_size(env_cpu(env), RDI(env), + decode->addressing_size, R_ES); + + decode->op[0].type = X86_VAR_IMMEDIATE; + decode->op[0].val = read_val_from_mem(env, src_addr, decode->operand_size); + decode->op[1].type = X86_VAR_IMMEDIATE; + decode->op[1].val = read_val_from_mem(env, dst_addr, decode->operand_size); + + EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); + + string_increment_reg(env, R_ESI, decode); + string_increment_reg(env, R_EDI, decode); +} + +static void exec_cmps(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_cmps_single, decode->rep); + } else { + exec_cmps_single(env, decode); + } + env->eip += decode->len; +} + + +static void exec_stos_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong addr; + target_ulong val; + + addr = linear_addr_size(env_cpu(env), RDI(env), + decode->addressing_size, R_ES); + val = read_reg(env, R_EAX, decode->operand_size); + emul_ops->write_mem(env_cpu(env), &val, addr, decode->operand_size); + + string_increment_reg(env, R_EDI, decode); +} + + +static void exec_stos(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_stos_single, 0); + } else { + exec_stos_single(env, decode); + } + + env->eip += decode->len; +} + +static void exec_scas_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong addr; + + addr = linear_addr_size(env_cpu(env), RDI(env), + decode->addressing_size, R_ES); + decode->op[1].type = X86_VAR_IMMEDIATE; + emul_ops->read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size); + + EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); + string_increment_reg(env, R_EDI, decode); +} + +static void exec_scas(CPUX86State *env, struct x86_decode *decode) +{ + decode->op[0].type = X86_VAR_REG; + decode->op[0].reg = R_EAX; + if (decode->rep) { + string_rep(env, decode, exec_scas_single, decode->rep); + } else { + exec_scas_single(env, decode); + } + + env->eip += decode->len; +} + +static void exec_lods_single(CPUX86State *env, struct x86_decode *decode) +{ + target_ulong addr; + target_ulong val = 0; + + addr = decode_linear_addr(env, decode, RSI(env), R_DS); + emul_ops->read_mem(env_cpu(env), &val, addr, decode->operand_size); + write_reg(env, R_EAX, val, decode->operand_size); + + string_increment_reg(env, R_ESI, decode); +} + +static void exec_lods(CPUX86State *env, struct x86_decode *decode) +{ + if (decode->rep) { + string_rep(env, decode, exec_lods_single, 0); + } else { + exec_lods_single(env, decode); + } + + env->eip += decode->len; +} + +void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code) +{ + env->exception_nr = exception_index; + env->error_code = error_code; + env->has_error_code = true; + env->exception_injected = 1; +} + +static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) +{ + emul_ops->simulate_rdmsr(env_cpu(env)); + env->eip += decode->len; +} + +static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) +{ + emul_ops->simulate_wrmsr(env_cpu(env)); + env->eip += decode->len; +} + +/* + * flag: + * 0 - bt, 1 - btc, 2 - bts, 3 - btr + */ +static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag) +{ + int32_t displacement; + uint8_t index; + bool cf; + int mask = (4 == decode->operand_size) ? 0x1f : 0xf; + + VM_PANIC_ON(decode->rex.rex); + + fetch_operands(env, decode, 2, false, true, false); + index = decode->op[1].val & mask; + + if (decode->op[0].type != X86_VAR_REG) { + if (4 == decode->operand_size) { + displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32; + decode->op[0].addr += 4 * displacement; + } else if (2 == decode->operand_size) { + displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16; + decode->op[0].addr += 2 * displacement; + } else { + VM_PANIC("bt 64bit\n"); + } + } + decode->op[0].val = read_val_ext(env, &decode->op[0], + decode->operand_size); + cf = (decode->op[0].val >> index) & 0x01; + + switch (flag) { + case 0: + set_CF(env, cf); + return; + case 1: + decode->op[0].val ^= (1u << index); + break; + case 2: + decode->op[0].val |= (1u << index); + break; + case 3: + decode->op[0].val &= ~(1u << index); + break; + } + write_val_ext(env, &decode->op[0], decode->op[0].val, + decode->operand_size); + set_CF(env, cf); +} + +static void exec_bt(CPUX86State *env, struct x86_decode *decode) +{ + do_bt(env, decode, 0); + env->eip += decode->len; +} + +static void exec_btc(CPUX86State *env, struct x86_decode *decode) +{ + do_bt(env, decode, 1); + env->eip += decode->len; +} + +static void exec_btr(CPUX86State *env, struct x86_decode *decode) +{ + do_bt(env, decode, 3); + env->eip += decode->len; +} + +static void exec_bts(CPUX86State *env, struct x86_decode *decode) +{ + do_bt(env, decode, 2); + env->eip += decode->len; +} + +void exec_shl(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t count; + int of = 0, cf = 0; + + fetch_operands(env, decode, 2, true, true, false); + + count = decode->op[1].val; + count &= 0x1f; /* count is masked to 5 bits*/ + if (!count) { + goto exit; + } + + switch (decode->operand_size) { + case 1: + { + uint8_t res = 0; + if (count <= 8) { + res = (decode->op[0].val << count); + cf = (decode->op[0].val >> (8 - count)) & 0x1; + of = cf ^ (res >> 7); + } + + write_val_ext(env, &decode->op[0], res, 1); + SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res); + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 2: + { + uint16_t res = 0; + + /* from bochs */ + if (count <= 16) { + res = (decode->op[0].val << count); + cf = (decode->op[0].val >> (16 - count)) & 0x1; + of = cf ^ (res >> 15); /* of = cf ^ result15 */ + } + + write_val_ext(env, &decode->op[0], res, 2); + SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res); + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 4: + { + uint32_t res = decode->op[0].val << count; + + write_val_ext(env, &decode->op[0], res, 4); + SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res); + cf = (decode->op[0].val >> (32 - count)) & 0x1; + of = cf ^ (res >> 31); /* of = cf ^ result31 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + default: + abort(); + } + +exit: + /* lflags_to_rflags(env); */ + env->eip += decode->len; +} + +void exec_movsx(CPUX86State *env, struct x86_decode *decode) +{ + int src_op_size; + int op_size = decode->operand_size; + + fetch_operands(env, decode, 2, false, false, false); + + if (0xbe == decode->opcode[1]) { + src_op_size = 1; + } else { + src_op_size = 2; + } + + decode->operand_size = src_op_size; + calc_modrm_operand(env, decode, &decode->op[1]); + decode->op[1].val = sign(read_val_ext(env, &decode->op[1], src_op_size), + src_op_size); + + write_val_ext(env, &decode->op[0], decode->op[1].val, op_size); + + env->eip += decode->len; +} + +void exec_ror(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t count; + + fetch_operands(env, decode, 2, true, true, false); + count = decode->op[1].val; + + switch (decode->operand_size) { + case 1: + { + uint32_t bit6, bit7; + uint8_t res; + + if ((count & 0x07) == 0) { + if (count & 0x18) { + bit6 = ((uint8_t)decode->op[0].val >> 6) & 1; + bit7 = ((uint8_t)decode->op[0].val >> 7) & 1; + SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); + } + } else { + count &= 0x7; /* use only bottom 3 bits */ + res = ((uint8_t)decode->op[0].val >> count) | + ((uint8_t)decode->op[0].val << (8 - count)); + write_val_ext(env, &decode->op[0], res, 1); + bit6 = (res >> 6) & 1; + bit7 = (res >> 7) & 1; + /* set eflags: ROR count affects the following flags: C, O */ + SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); + } + break; + } + case 2: + { + uint32_t bit14, bit15; + uint16_t res; + + if ((count & 0x0f) == 0) { + if (count & 0x10) { + bit14 = ((uint16_t)decode->op[0].val >> 14) & 1; + bit15 = ((uint16_t)decode->op[0].val >> 15) & 1; + /* of = result14 ^ result15 */ + SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); + } + } else { + count &= 0x0f; /* use only 4 LSB's */ + res = ((uint16_t)decode->op[0].val >> count) | + ((uint16_t)decode->op[0].val << (16 - count)); + write_val_ext(env, &decode->op[0], res, 2); + + bit14 = (res >> 14) & 1; + bit15 = (res >> 15) & 1; + /* of = result14 ^ result15 */ + SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); + } + break; + } + case 4: + { + uint32_t bit31, bit30; + uint32_t res; + + count &= 0x1f; + if (count) { + res = ((uint32_t)decode->op[0].val >> count) | + ((uint32_t)decode->op[0].val << (32 - count)); + write_val_ext(env, &decode->op[0], res, 4); + + bit31 = (res >> 31) & 1; + bit30 = (res >> 30) & 1; + /* of = result30 ^ result31 */ + SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31); + } + break; + } + } + env->eip += decode->len; +} + +void exec_rol(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t count; + + fetch_operands(env, decode, 2, true, true, false); + count = decode->op[1].val; + + switch (decode->operand_size) { + case 1: + { + uint32_t bit0, bit7; + uint8_t res; + + if ((count & 0x07) == 0) { + if (count & 0x18) { + bit0 = ((uint8_t)decode->op[0].val & 1); + bit7 = ((uint8_t)decode->op[0].val >> 7); + SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); + } + } else { + count &= 0x7; /* use only lowest 3 bits */ + res = ((uint8_t)decode->op[0].val << count) | + ((uint8_t)decode->op[0].val >> (8 - count)); + + write_val_ext(env, &decode->op[0], res, 1); + /* set eflags: + * ROL count affects the following flags: C, O + */ + bit0 = (res & 1); + bit7 = (res >> 7); + SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); + } + break; + } + case 2: + { + uint32_t bit0, bit15; + uint16_t res; + + if ((count & 0x0f) == 0) { + if (count & 0x10) { + bit0 = ((uint16_t)decode->op[0].val & 0x1); + bit15 = ((uint16_t)decode->op[0].val >> 15); + /* of = cf ^ result15 */ + SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); + } + } else { + count &= 0x0f; /* only use bottom 4 bits */ + res = ((uint16_t)decode->op[0].val << count) | + ((uint16_t)decode->op[0].val >> (16 - count)); + + write_val_ext(env, &decode->op[0], res, 2); + bit0 = (res & 0x1); + bit15 = (res >> 15); + /* of = cf ^ result15 */ + SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); + } + break; + } + case 4: + { + uint32_t bit0, bit31; + uint32_t res; + + count &= 0x1f; + if (count) { + res = ((uint32_t)decode->op[0].val << count) | + ((uint32_t)decode->op[0].val >> (32 - count)); + + write_val_ext(env, &decode->op[0], res, 4); + bit0 = (res & 0x1); + bit31 = (res >> 31); + /* of = cf ^ result31 */ + SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0); + } + break; + } + } + env->eip += decode->len; +} + + +void exec_rcl(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t count; + int of = 0, cf = 0; + + fetch_operands(env, decode, 2, true, true, false); + count = decode->op[1].val & 0x1f; + + switch (decode->operand_size) { + case 1: + { + uint8_t op1_8 = decode->op[0].val; + uint8_t res; + count %= 9; + if (!count) { + break; + } + + if (1 == count) { + res = (op1_8 << 1) | get_CF(env); + } else { + res = (op1_8 << count) | (get_CF(env) << (count - 1)) | + (op1_8 >> (9 - count)); + } + + write_val_ext(env, &decode->op[0], res, 1); + + cf = (op1_8 >> (8 - count)) & 0x01; + of = cf ^ (res >> 7); /* of = cf ^ result7 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 2: + { + uint16_t res; + uint16_t op1_16 = decode->op[0].val; + + count %= 17; + if (!count) { + break; + } + + if (1 == count) { + res = (op1_16 << 1) | get_CF(env); + } else if (count == 16) { + res = (get_CF(env) << 15) | (op1_16 >> 1); + } else { /* 2..15 */ + res = (op1_16 << count) | (get_CF(env) << (count - 1)) | + (op1_16 >> (17 - count)); + } + + write_val_ext(env, &decode->op[0], res, 2); + + cf = (op1_16 >> (16 - count)) & 0x1; + of = cf ^ (res >> 15); /* of = cf ^ result15 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 4: + { + uint32_t res; + uint32_t op1_32 = decode->op[0].val; + + if (!count) { + break; + } + + if (1 == count) { + res = (op1_32 << 1) | get_CF(env); + } else { + res = (op1_32 << count) | (get_CF(env) << (count - 1)) | + (op1_32 >> (33 - count)); + } + + write_val_ext(env, &decode->op[0], res, 4); + + cf = (op1_32 >> (32 - count)) & 0x1; + of = cf ^ (res >> 31); /* of = cf ^ result31 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + } + env->eip += decode->len; +} + +void exec_rcr(CPUX86State *env, struct x86_decode *decode) +{ + uint8_t count; + int of = 0, cf = 0; + + fetch_operands(env, decode, 2, true, true, false); + count = decode->op[1].val & 0x1f; + + switch (decode->operand_size) { + case 1: + { + uint8_t op1_8 = decode->op[0].val; + uint8_t res; + + count %= 9; + if (!count) { + break; + } + res = (op1_8 >> count) | (get_CF(env) << (8 - count)) | + (op1_8 << (9 - count)); + + write_val_ext(env, &decode->op[0], res, 1); + + cf = (op1_8 >> (count - 1)) & 0x1; + of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 2: + { + uint16_t op1_16 = decode->op[0].val; + uint16_t res; + + count %= 17; + if (!count) { + break; + } + res = (op1_16 >> count) | (get_CF(env) << (16 - count)) | + (op1_16 << (17 - count)); + + write_val_ext(env, &decode->op[0], res, 2); + + cf = (op1_16 >> (count - 1)) & 0x1; + of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^ + result14 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + case 4: + { + uint32_t res; + uint32_t op1_32 = decode->op[0].val; + + if (!count) { + break; + } + + if (1 == count) { + res = (op1_32 >> 1) | (get_CF(env) << 31); + } else { + res = (op1_32 >> count) | (get_CF(env) << (32 - count)) | + (op1_32 << (33 - count)); + } + + write_val_ext(env, &decode->op[0], res, 4); + + cf = (op1_32 >> (count - 1)) & 0x1; + of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */ + SET_FLAGS_OxxxxC(env, of, cf); + break; + } + } + env->eip += decode->len; +} + +static void exec_xchg(CPUX86State *env, struct x86_decode *decode) +{ + fetch_operands(env, decode, 2, true, true, false); + + write_val_ext(env, &decode->op[0], decode->op[1].val, + decode->operand_size); + write_val_ext(env, &decode->op[1], decode->op[0].val, + decode->operand_size); + + env->eip += decode->len; +} + +static void exec_xadd(CPUX86State *env, struct x86_decode *decode) +{ + EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); + write_val_ext(env, &decode->op[1], decode->op[0].val, + decode->operand_size); + + env->eip += decode->len; +} + +static struct cmd_handler { + enum x86_decode_cmd cmd; + void (*handler)(CPUX86State *env, struct x86_decode *ins); +} handlers[] = { + {X86_DECODE_CMD_INVL, NULL,}, + {X86_DECODE_CMD_MOV, exec_mov}, + {X86_DECODE_CMD_ADD, exec_add}, + {X86_DECODE_CMD_OR, exec_or}, + {X86_DECODE_CMD_ADC, exec_adc}, + {X86_DECODE_CMD_SBB, exec_sbb}, + {X86_DECODE_CMD_AND, exec_and}, + {X86_DECODE_CMD_SUB, exec_sub}, + {X86_DECODE_CMD_NEG, exec_neg}, + {X86_DECODE_CMD_XOR, exec_xor}, + {X86_DECODE_CMD_CMP, exec_cmp}, + {X86_DECODE_CMD_INC, exec_inc}, + {X86_DECODE_CMD_DEC, exec_dec}, + {X86_DECODE_CMD_TST, exec_tst}, + {X86_DECODE_CMD_NOT, exec_not}, + {X86_DECODE_CMD_MOVZX, exec_movzx}, + {X86_DECODE_CMD_OUT, exec_out}, + {X86_DECODE_CMD_IN, exec_in}, + {X86_DECODE_CMD_INS, exec_ins}, + {X86_DECODE_CMD_OUTS, exec_outs}, + {X86_DECODE_CMD_RDMSR, exec_rdmsr}, + {X86_DECODE_CMD_WRMSR, exec_wrmsr}, + {X86_DECODE_CMD_BT, exec_bt}, + {X86_DECODE_CMD_BTR, exec_btr}, + {X86_DECODE_CMD_BTC, exec_btc}, + {X86_DECODE_CMD_BTS, exec_bts}, + {X86_DECODE_CMD_SHL, exec_shl}, + {X86_DECODE_CMD_ROL, exec_rol}, + {X86_DECODE_CMD_ROR, exec_ror}, + {X86_DECODE_CMD_RCR, exec_rcr}, + {X86_DECODE_CMD_RCL, exec_rcl}, + /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/ + {X86_DECODE_CMD_MOVS, exec_movs}, + {X86_DECODE_CMD_CMPS, exec_cmps}, + {X86_DECODE_CMD_STOS, exec_stos}, + {X86_DECODE_CMD_SCAS, exec_scas}, + {X86_DECODE_CMD_LODS, exec_lods}, + {X86_DECODE_CMD_MOVSX, exec_movsx}, + {X86_DECODE_CMD_XCHG, exec_xchg}, + {X86_DECODE_CMD_XADD, exec_xadd}, +}; + +static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST]; + +const struct x86_emul_ops *emul_ops; + +static void init_cmd_handler(void) +{ + int i; + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + _cmd_handler[handlers[i].cmd] = handlers[i]; + } +} + +bool exec_instruction(CPUX86State *env, struct x86_decode *ins) +{ + if (!_cmd_handler[ins->cmd].handler) { + printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x) \n", env->eip, + ins->cmd, ins->opcode[0], + ins->opcode_len > 1 ? ins->opcode[1] : 0); + env->eip += ins->len; + return true; + } + + _cmd_handler[ins->cmd].handler(env, ins); + return true; +} + +void init_emu(const struct x86_emul_ops *o) +{ + emul_ops = o; + init_cmd_handler(); +} diff --git a/target/i386/emulate/x86_emu.h b/target/i386/emulate/x86_emu.h new file mode 100644 index 00000000000..a1a961284b2 --- /dev/null +++ b/target/i386/emulate/x86_emu.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 Veertu Inc, + * Copyright (C) 2017 Google Inc, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ + +#ifndef X86_EMU_H +#define X86_EMU_H + +#include "x86.h" +#include "x86_decode.h" +#include "cpu.h" + +struct x86_emul_ops { + void (*read_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes); + void (*write_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes); + void (*read_segment_descriptor)(CPUState *cpu, struct x86_segment_descriptor *desc, + enum X86Seg seg); + void (*handle_io)(CPUState *cpu, uint16_t port, void *data, int direction, + int size, int count); + void (*simulate_rdmsr)(CPUState *cs); + void (*simulate_wrmsr)(CPUState *cs); +}; + +extern const struct x86_emul_ops *emul_ops; + +void init_emu(const struct x86_emul_ops *ops); +bool exec_instruction(CPUX86State *env, struct x86_decode *ins); +void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_code); + +target_ulong read_reg(CPUX86State *env, int reg, int size); +void write_reg(CPUX86State *env, int reg, target_ulong val, int size); +target_ulong read_val_from_reg(void *reg_ptr, int size); +void write_val_to_reg(void *reg_ptr, target_ulong val, int size); +void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size); +uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes); +target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size); + +void exec_movzx(CPUX86State *env, struct x86_decode *decode); +void exec_shl(CPUX86State *env, struct x86_decode *decode); +void exec_movsx(CPUX86State *env, struct x86_decode *decode); +void exec_ror(CPUX86State *env, struct x86_decode *decode); +void exec_rol(CPUX86State *env, struct x86_decode *decode); +void exec_rcl(CPUX86State *env, struct x86_decode *decode); +void exec_rcr(CPUX86State *env, struct x86_decode *decode); +#endif diff --git a/target/i386/emulate/x86_flags.c b/target/i386/emulate/x86_flags.c new file mode 100644 index 00000000000..6592193b5e0 --- /dev/null +++ b/target/i386/emulate/x86_flags.c @@ -0,0 +1,273 @@ +///////////////////////////////////////////////////////////////////////// +// +// Copyright (C) 2001-2012 The Bochs Project +// Copyright (C) 2017 Google Inc. +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, see +// . +///////////////////////////////////////////////////////////////////////// +/* + * flags functions + */ + +#include "qemu/osdep.h" + +#include "panic.h" +#include "cpu.h" +#include "x86_flags.h" +#include "x86.h" + + +/* + * The algorithms here are similar to those in Bochs. After an ALU + * operation, CC_DST can be used to compute ZF, SF and PF, whereas + * CC_SRC is used to compute AF, CF and OF. In reality, SF and PF are the + * XOR of the value computed from CC_DST and the value found in bits 7 and 2 + * of CC_SRC; this way the same logic can be used to compute the flags + * both before and after an ALU operation. + * + * Compared to the TCG CC_OP codes, this avoids conditionals when converting + * to and from the RFLAGS representation. + */ + +#define LF_SIGN_BIT (TARGET_LONG_BITS - 1) + +#define LF_BIT_PD (2) /* lazy Parity Delta, same bit as PF */ +#define LF_BIT_AF (3) /* lazy Adjust flag */ +#define LF_BIT_SD (7) /* lazy Sign Flag Delta, same bit as SF */ +#define LF_BIT_CF (TARGET_LONG_BITS - 1) /* lazy Carry Flag */ +#define LF_BIT_PO (TARGET_LONG_BITS - 2) /* lazy Partial Overflow = CF ^ OF */ + +#define LF_MASK_PD ((target_ulong)0x01 << LF_BIT_PD) +#define LF_MASK_AF ((target_ulong)0x01 << LF_BIT_AF) +#define LF_MASK_SD ((target_ulong)0x01 << LF_BIT_SD) +#define LF_MASK_CF ((target_ulong)0x01 << LF_BIT_CF) +#define LF_MASK_PO ((target_ulong)0x01 << LF_BIT_PO) + +/* ******************* */ +/* OSZAPC */ +/* ******************* */ + +/* use carries to fill in AF, PO and CF, while ensuring PD and SD are clear. + * for full-word operations just clear PD and SD; for smaller operand + * sizes only keep AF in the low byte and shift the carries left to + * place PO and CF in the top two bits. + */ +#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \ + env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \ + target_ulong temp = (lf_carries); \ + if ((size) == TARGET_LONG_BITS) { \ + temp = temp & ~(LF_MASK_PD | LF_MASK_SD); \ + } else { \ + temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \ + } \ + env->cc_src = temp; \ +} + +/* carries, result */ +#define SET_FLAGS_OSZAPC_8(carries, result) \ + SET_FLAGS_OSZAPC_SIZE(8, carries, result) +#define SET_FLAGS_OSZAPC_16(carries, result) \ + SET_FLAGS_OSZAPC_SIZE(16, carries, result) +#define SET_FLAGS_OSZAPC_32(carries, result) \ + SET_FLAGS_OSZAPC_SIZE(32, carries, result) + +/* ******************* */ +/* OSZAP */ +/* ******************* */ +/* same as setting OSZAPC, but preserve CF and flip PO if the old value of CF + * did not match the high bit of lf_carries. */ +#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \ + env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \ + target_ulong temp = (lf_carries); \ + if ((size) == TARGET_LONG_BITS) { \ + temp = (temp & ~(LF_MASK_PD | LF_MASK_SD)); \ + } else { \ + temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \ + } \ + target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0; \ + env->cc_src = temp ^ (cf_changed * (LF_MASK_PO | LF_MASK_CF)); \ +} + +/* carries, result */ +#define SET_FLAGS_OSZAP_8(carries, result) \ + SET_FLAGS_OSZAP_SIZE(8, carries, result) +#define SET_FLAGS_OSZAP_16(carries, result) \ + SET_FLAGS_OSZAP_SIZE(16, carries, result) +#define SET_FLAGS_OSZAP_32(carries, result) \ + SET_FLAGS_OSZAP_SIZE(32, carries, result) + +void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf) +{ + env->cc_src &= ~(LF_MASK_PO | LF_MASK_CF); + env->cc_src |= (-(target_ulong)new_cf << LF_BIT_PO); + env->cc_src ^= ((target_ulong)new_of << LF_BIT_PO); +} + +void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2, + uint32_t diff) +{ + SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2, + uint16_t diff) +{ + SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2, + uint8_t diff) +{ + SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2, + uint32_t diff) +{ + SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2, + uint16_t diff) +{ + SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2, + uint8_t diff) +{ + SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2, + uint32_t diff) +{ + SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2, + uint16_t diff) +{ + SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2, + uint8_t diff) +{ + SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2, + uint32_t diff) +{ + SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2, + uint16_t diff) +{ + SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff); +} + +void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2, + uint8_t diff) +{ + SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff); +} + + +void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2, + uint32_t diff) +{ + SET_FLAGS_OSZAPC_32(0, diff); +} + +void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2, + uint16_t diff) +{ + SET_FLAGS_OSZAPC_16(0, diff); +} + +void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2, + uint8_t diff) +{ + SET_FLAGS_OSZAPC_8(0, diff); +} + +static inline uint32_t get_PF(CPUX86State *env) +{ + return ((parity8(env->cc_dst) - 1) ^ env->cc_src) & CC_P; +} + +static inline uint32_t get_OF(CPUX86State *env) +{ + return ((env->cc_src >> (LF_BIT_CF - 11)) + CC_O / 2) & CC_O; +} + +bool get_CF(CPUX86State *env) +{ + return ((target_long)env->cc_src) < 0; +} + +void set_CF(CPUX86State *env, bool val) +{ + /* If CF changes, flip PO and CF */ + target_ulong temp = -(target_ulong)val; + target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0; + env->cc_src ^= cf_changed * (LF_MASK_PO | LF_MASK_CF); +} + +static inline uint32_t get_ZF(CPUX86State *env) +{ + return env->cc_dst ? 0 : CC_Z; +} + +static inline uint32_t get_SF(CPUX86State *env) +{ + return ((env->cc_dst >> (LF_SIGN_BIT - LF_BIT_SD)) ^ + env->cc_src) & CC_S; +} + +void lflags_to_rflags(CPUX86State *env) +{ + env->eflags &= ~(CC_C|CC_P|CC_A|CC_Z|CC_S|CC_O); + /* rotate left by one to move carry-out bits into CF and AF */ + env->eflags |= ( + (env->cc_src << 1) | + (env->cc_src >> (TARGET_LONG_BITS - 1))) & (CC_C | CC_A); + env->eflags |= get_SF(env); + env->eflags |= get_PF(env); + env->eflags |= get_ZF(env); + env->eflags |= get_OF(env); +} + +void rflags_to_lflags(CPUX86State *env) +{ + target_ulong cf_af, cf_xor_of; + + /* Leave the low byte zero so that parity is always even... */ + env->cc_dst = !(env->eflags & CC_Z) << 8; + + /* ... and therefore cc_src always uses opposite polarity. */ + env->cc_src = CC_P; + env->cc_src ^= env->eflags & (CC_S | CC_P); + + /* rotate right by one to move CF and AF into the carry-out positions */ + cf_af = env->eflags & (CC_C | CC_A); + env->cc_src |= ((cf_af >> 1) | (cf_af << (TARGET_LONG_BITS - 1))); + + cf_xor_of = ((env->eflags & (CC_C | CC_O)) + (CC_O - CC_C)) & CC_O; + env->cc_src |= -cf_xor_of & LF_MASK_PO; +} diff --git a/target/i386/hvf/x86_flags.h b/target/i386/emulate/x86_flags.h similarity index 80% rename from target/i386/hvf/x86_flags.h rename to target/i386/emulate/x86_flags.h index 75c2a7feab5..a395c837a0e 100644 --- a/target/i386/hvf/x86_flags.h +++ b/target/i386/emulate/x86_flags.h @@ -14,34 +14,24 @@ // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA +// License along with this library; if not, see +// . ///////////////////////////////////////////////////////////////////////// /* * x86 eflags functions */ -#ifndef X86_FLAGS_H -#define X86_FLAGS_H +#ifndef X86_EMU_FLAGS_H +#define X86_EMU_FLAGS_H #include "cpu.h" void lflags_to_rflags(CPUX86State *env); void rflags_to_lflags(CPUX86State *env); -bool get_PF(CPUX86State *env); -void set_PF(CPUX86State *env, bool val); bool get_CF(CPUX86State *env); void set_CF(CPUX86State *env, bool val); -bool get_AF(CPUX86State *env); -void set_AF(CPUX86State *env, bool val); -bool get_ZF(CPUX86State *env); -void set_ZF(CPUX86State *env, bool val); -bool get_SF(CPUX86State *env); -void set_SF(CPUX86State *env, bool val); -bool get_OF(CPUX86State *env); -void set_OF(CPUX86State *env, bool val); -void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf); +void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf); void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2, uint32_t diff); @@ -78,4 +68,4 @@ void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2, void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2, uint8_t diff); -#endif /* X86_FLAGS_H */ +#endif /* X86_EMU_FLAGS_H */ diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c index 4acf485879e..04c49e802d7 100644 --- a/target/i386/gdbstub.c +++ b/target/i386/gdbstub.c @@ -18,8 +18,13 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" +#include "accel/tcg/vcpu-state.h" #include "cpu.h" +#include "exec/gdbstub.h" #include "gdbstub/helpers.h" +#ifdef CONFIG_LINUX_USER +#include "linux-user/qemu.h" +#endif #ifdef TARGET_X86_64 static const int gpr_map[16] = { @@ -96,6 +101,19 @@ static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val) return 4; } +static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val) +{ + if (TARGET_LONG_BITS == 64) { + if (env->hflags & HF_CS64_MASK) { + return gdb_get_reg64(mem_buf, val); + } else { + return gdb_get_reg64(mem_buf, val & 0xffffffffUL); + } + } else { + return gdb_get_reg32(mem_buf, val); + } +} + int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); @@ -137,15 +155,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) } else { switch (n) { case IDX_IP_REG: - if (TARGET_LONG_BITS == 64) { - if (env->hflags & HF_CS64_MASK) { - return gdb_get_reg64(mem_buf, env->eip); - } else { - return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL); - } - } else { - return gdb_get_reg32(mem_buf, env->eip); - } + return gdb_get_reg(env, mem_buf, env->eip); case IDX_FLAGS_REG: return gdb_get_reg32(mem_buf, env->eflags); @@ -248,6 +258,21 @@ static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf) return 4; } +static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val) +{ + if (TARGET_LONG_BITS == 64) { + if (env->hflags & HF_CS64_MASK) { + *val = ldq_p(mem_buf); + } else { + *val = ldq_p(mem_buf) & 0xffffffffUL; + } + return 8; + } else { + *val = (uint32_t)ldl_p(mem_buf); + return 4; + } +} + int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); @@ -288,18 +313,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } else { switch (n) { case IDX_IP_REG: - if (TARGET_LONG_BITS == 64) { - if (env->hflags & HF_CS64_MASK) { - env->eip = ldq_p(mem_buf); - } else { - env->eip = ldq_p(mem_buf) & 0xffffffffUL; - } - return 8; - } else { - env->eip &= ~0xffffffffUL; - env->eip |= (uint32_t)ldl_p(mem_buf); - return 4; - } + return gdb_write_reg(env, mem_buf, &env->eip); case IDX_FLAGS_REG: env->eflags = ldl_p(mem_buf); return 4; @@ -397,3 +411,49 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* Unrecognised register. */ return 0; } + +#ifdef CONFIG_LINUX_USER + +#define IDX_ORIG_AX 0 + +static int x86_cpu_gdb_read_linux_register(CPUState *cs, GByteArray *mem_buf, + int n) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + switch (n) { + case IDX_ORIG_AX: + return gdb_get_reg(env, mem_buf, get_task_state(cs)->orig_ax); + } + return 0; +} + +static int x86_cpu_gdb_write_linux_register(CPUState *cs, uint8_t *mem_buf, + int n) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + switch (n) { + case IDX_ORIG_AX: + return gdb_write_reg(env, mem_buf, &get_task_state(cs)->orig_ax); + } + return 0; +} + +#endif + +void x86_cpu_gdb_init(CPUState *cs) +{ +#ifdef CONFIG_LINUX_USER + gdb_register_coprocessor(cs, x86_cpu_gdb_read_linux_register, + x86_cpu_gdb_write_linux_register, +#ifdef TARGET_X86_64 + gdb_find_static_feature("i386-64bit-linux.xml"), +#else + gdb_find_static_feature("i386-32bit-linux.xml"), +#endif + 0); +#endif +} diff --git a/target/i386/helper.c b/target/i386/helper.c index 01a268a30bb..e0aaed3c4c4 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -20,10 +20,13 @@ #include "qemu/osdep.h" #include "qapi/qapi-events-run-state.h" #include "cpu.h" -#include "exec/exec-all.h" -#include "sysemu/runstate.h" +#include "exec/cputlb.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" +#include "system/runstate.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" +#include "system/memory.h" #include "monitor/monitor.h" #include "kvm/kvm_i386.h" #endif @@ -523,7 +526,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, static inline target_ulong get_memio_eip(CPUX86State *env) { #ifdef CONFIG_TCG - uint64_t data[TARGET_INSN_START_WORDS]; + uint64_t data[INSN_START_WORDS]; CPUState *cs = env_cpu(env); if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) { diff --git a/target/i386/helper.h b/target/i386/helper.h index eeb8df56eaa..3f67098f11f 100644 --- a/target/i386/helper.h +++ b/target/i386/helper.h @@ -1,5 +1,6 @@ DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) +DEF_HELPER_FLAGS_3(cc_compute_nz, TCG_CALL_NO_RWG_SE, tl, tl, tl, int) DEF_HELPER_3(write_eflags, void, env, tl, i32) DEF_HELPER_1(read_eflags, tl, env) diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c index 8b8bf5afecc..d5e2bb5e187 100644 --- a/target/i386/host-cpu.c +++ b/target/i386/host-cpu.c @@ -12,10 +12,10 @@ #include "host-cpu.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/sysemu.h" +#include "system/system.h" /* Note: Only safe for use on x86(-64) hosts */ -static uint32_t host_cpu_phys_bits(void) +uint32_t host_cpu_phys_bits(void) { uint32_t eax; uint32_t host_phys_bits; @@ -42,7 +42,7 @@ static uint32_t host_cpu_phys_bits(void) return host_phys_bits; } -static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu) +static void host_cpu_adjust_phys_bits(X86CPU *cpu) { uint32_t host_phys_bits = host_cpu_phys_bits(); uint32_t phys_bits = cpu->phys_bits; @@ -66,7 +66,7 @@ static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu) } } - return phys_bits; + cpu->phys_bits = phys_bits; } bool host_cpu_realizefn(CPUState *cs, Error **errp) @@ -75,22 +75,11 @@ bool host_cpu_realizefn(CPUState *cs, Error **errp) CPUX86State *env = &cpu->env; if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { - uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu); - - if (phys_bits && - (phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || - phys_bits < 32)) { - error_setg(errp, "phys-bits should be between 32 and %u " - " (but is %u)", - TARGET_PHYS_ADDR_SPACE_BITS, phys_bits); - return false; - } - cpu->phys_bits = phys_bits; + host_cpu_adjust_phys_bits(cpu); } return true; } -#define CPUID_MODEL_ID_SZ 48 /** * cpu_x86_fill_model_id: * Get CPUID model ID string from host CPU. @@ -119,18 +108,22 @@ void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping) { uint32_t eax, ebx, ecx, edx; - host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); + host_cpuid(0x0, 0, NULL, &ebx, &ecx, &edx); x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); + if (!family && !model && !stepping) { + return; + } + host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); if (family) { - *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); + *family = x86_cpu_family(eax); } if (model) { - *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); + *model = x86_cpu_model(eax); } if (stepping) { - *stepping = eax & 0x0F; + *stepping = x86_cpu_stepping(eax); } } @@ -138,29 +131,27 @@ void host_cpu_instance_init(X86CPU *cpu) { X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); - if (xcc->model) { - uint32_t ebx = 0, ecx = 0, edx = 0; - char vendor[CPUID_VENDOR_SZ + 1]; - - host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); - x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); - object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); - } -} - -void host_cpu_max_instance_init(X86CPU *cpu) -{ char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; int family, model, stepping; - /* Use max host physical address bits if -cpu max option is applied */ - object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); - + /* + * setting vendor applies to both max/host and builtin_x86_defs CPU. + * FIXME: this probably should warn or should be skipped if vendors do + * not match, because family numbers are incompatible between Intel and AMD. + */ host_cpu_vendor_fms(vendor, &family, &model, &stepping); + object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); + + if (!xcc->max_features) { + return; + } + host_cpu_fill_model_id(model_id); - object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); + /* Use max host physical address bits if -cpu max option is applied */ + object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort); + object_property_set_int(OBJECT(cpu), "family", family, &error_abort); object_property_set_int(OBJECT(cpu), "model", model, &error_abort); object_property_set_int(OBJECT(cpu), "stepping", stepping, @@ -169,7 +160,16 @@ void host_cpu_max_instance_init(X86CPU *cpu) &error_abort); } -static void host_cpu_class_init(ObjectClass *oc, void *data) +bool is_host_cpu_intel(void) +{ + char vendor[CPUID_VENDOR_SZ + 1]; + + host_cpu_vendor_fms(vendor, NULL, NULL, NULL); + + return g_str_equal(vendor, CPUID_VENDOR_INTEL); +} + +static void host_cpu_class_init(ObjectClass *oc, const void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h index 6a9bc918baa..ee653242254 100644 --- a/target/i386/host-cpu.h +++ b/target/i386/host-cpu.h @@ -10,10 +10,11 @@ #ifndef HOST_CPU_H #define HOST_CPU_H +uint32_t host_cpu_phys_bits(void); void host_cpu_instance_init(X86CPU *cpu); -void host_cpu_max_instance_init(X86CPU *cpu); bool host_cpu_realizefn(CPUState *cs, Error **errp); void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping); +bool is_host_cpu_intel(void); #endif /* HOST_CPU_H */ diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c index ac617f17e73..94ee096ecf7 100644 --- a/target/i386/hvf/hvf-cpu.c +++ b/target/i386/hvf/hvf-cpu.c @@ -11,18 +11,16 @@ #include "cpu.h" #include "host-cpu.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" -#include "sysemu/hvf.h" -#include "hw/core/accel-cpu.h" +#include "system/hvf.h" +#include "accel/accel-cpu-target.h" #include "hvf-i386.h" static void hvf_cpu_max_instance_init(X86CPU *cpu) { CPUX86State *env = &cpu->env; - host_cpu_max_instance_init(cpu); - env->cpuid_min_level = hvf_get_supported_cpuid(0x0, 0, R_EAX); env->cpuid_min_xlevel = @@ -61,20 +59,21 @@ static void hvf_cpu_xsave_init(void) static void hvf_cpu_instance_init(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); host_cpu_instance_init(cpu); /* Special cases not set in the X86CPUDefinition structs: */ /* TODO: in-kernel irqchip for hvf */ - if (cpu->max_features) { + if (xcc->max_features) { hvf_cpu_max_instance_init(cpu); } hvf_cpu_xsave_init(); } -static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data) +static void hvf_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h index e99c02cd4bf..8c42ae6b013 100644 --- a/target/i386/hvf/hvf-i386.h +++ b/target/i386/hvf/hvf-i386.h @@ -18,7 +18,9 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, int reg); -void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int); +void hvf_handle_io(CPUState *, uint16_t, void *, int, int, int); +void hvf_simulate_rdmsr(CPUState *cpu); +void hvf_simulate_wrmsr(CPUState *cpu); /* Host specific functions */ int hvf_inject_interrupt(CPUArchState *env, int vector); diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index c9c64e29781..818b50419f4 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -52,18 +52,19 @@ #include "qapi/error.h" #include "migration/blocker.h" -#include "sysemu/hvf.h" -#include "sysemu/hvf_int.h" -#include "sysemu/runstate.h" -#include "sysemu/cpus.h" +#include "system/hvf.h" +#include "system/hvf_int.h" +#include "system/runstate.h" +#include "system/cpus.h" #include "hvf-i386.h" #include "vmcs.h" #include "vmx.h" -#include "x86.h" +#include "emulate/x86.h" #include "x86_descr.h" +#include "emulate/x86_flags.h" #include "x86_mmu.h" -#include "x86_decode.h" -#include "x86_emu.h" +#include "emulate/x86_decode.h" +#include "emulate/x86_emu.h" #include "x86_task.h" #include "x86hvf.h" @@ -75,6 +76,7 @@ #include "qemu/main-loop.h" #include "qemu/accel.h" #include "target/i386/cpu.h" +#include "exec/target_page.h" static Error *invtsc_mig_blocker; @@ -103,7 +105,7 @@ static void update_apic_tpr(CPUState *cpu) #define VECTORING_INFO_VECTOR_MASK 0xff -void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer, +void hvf_handle_io(CPUState *env, uint16_t port, void *buffer, int direction, int size, int count) { int i; @@ -167,7 +169,7 @@ void hvf_arch_vcpu_destroy(CPUState *cpu) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - g_free(env->hvf_mmio_buf); + g_free(env->emu_mmio_buf); } static void init_tsc_freq(CPUX86State *env) @@ -223,6 +225,38 @@ int hvf_arch_init(void) return 0; } +hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range) +{ + return hv_vm_create(HV_VM_DEFAULT); +} + +static void hvf_read_segment_descriptor(CPUState *s, struct x86_segment_descriptor *desc, + X86Seg seg) +{ + struct vmx_segment vmx_segment; + vmx_read_segment_descriptor(s, &vmx_segment, seg); + vmx_segment_to_x86_descriptor(s, &vmx_segment, desc); +} + +static void hvf_read_mem(CPUState *cpu, void *data, target_ulong gva, int bytes) +{ + vmx_read_mem(cpu, data, gva, bytes); +} + +static void hvf_write_mem(CPUState *cpu, void *data, target_ulong gva, int bytes) +{ + vmx_write_mem(cpu, gva, data, bytes); +} + +static const struct x86_emul_ops hvf_x86_emul_ops = { + .read_mem = hvf_read_mem, + .write_mem = hvf_write_mem, + .read_segment_descriptor = hvf_read_segment_descriptor, + .handle_io = hvf_handle_io, + .simulate_rdmsr = hvf_simulate_rdmsr, + .simulate_wrmsr = hvf_simulate_wrmsr, +}; + int hvf_arch_init_vcpu(CPUState *cpu) { X86CPU *x86cpu = X86_CPU(cpu); @@ -231,11 +265,13 @@ int hvf_arch_init_vcpu(CPUState *cpu) int r; uint64_t reqCap; - init_emu(); + init_emu(&hvf_x86_emul_ops); init_decoder(); - hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1); - env->hvf_mmio_buf = g_new(char, 4096); + if (hvf_state->hvf_caps == NULL) { + hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1); + } + env->emu_mmio_buf = g_new(char, 4096); if (x86cpu->vmware_cpuid_freq) { init_tsc_freq(env); @@ -427,6 +463,264 @@ static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } +void hvf_load_regs(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + int i = 0; + RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); + RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); + RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); + RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); + RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); + RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); + RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); + RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); + for (i = 8; i < 16; i++) { + RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); + } + + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); + rflags_to_lflags(env); + env->eip = rreg(cs->accel->fd, HV_X86_RIP); +} + +void hvf_store_regs(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + int i = 0; + wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); + wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); + wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); + wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); + wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); + wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); + wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); + wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); + for (i = 8; i < 16; i++) { + wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); + } + + lflags_to_rflags(env); + wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); + macvm_set_rip(cs, env->eip); +} + +void hvf_simulate_rdmsr(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint32_t msr = ECX(env); + uint64_t val = 0; + + switch (msr) { + case MSR_IA32_TSC: + val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); + break; + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(cpu->apic_state); + break; + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + ret = apic_msr_read(index, &val); + if (ret < 0) { + x86_emul_raise_exception(env, EXCP0D_GPF, 0); + } + + break; + } + case MSR_IA32_UCODE_REV: + val = cpu->ucode_rev; + break; + case MSR_EFER: + val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); + break; + case MSR_FSBASE: + val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE); + break; + case MSR_GSBASE: + val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE); + break; + case MSR_KERNELGSBASE: + val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE); + break; + case MSR_STAR: + abort(); + break; + case MSR_LSTAR: + abort(); + break; + case MSR_CSTAR: + abort(); + break; + case MSR_IA32_MISC_ENABLE: + val = env->msr_ia32_misc_enable; + break; + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask; + break; + case MSR_MTRRfix64K_00000: + val = env->mtrr_fixed[0]; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1]; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3]; + break; + case MSR_MTRRdefType: + val = env->mtrr_deftype; + break; + case MSR_CORE_THREAD_COUNT: + val = cpu_x86_get_msr_core_thread_count(cpu); + break; + default: + /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */ + val = 0; + break; + } + + RAX(env) = (uint32_t)val; + RDX(env) = (uint32_t)(val >> 32); +} + +void hvf_simulate_wrmsr(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + uint32_t msr = ECX(env); + uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env); + + switch (msr) { + case MSR_IA32_TSC: + break; + case MSR_IA32_APICBASE: { + int r; + + r = cpu_set_apic_base(cpu->apic_state, data); + if (r < 0) { + x86_emul_raise_exception(env, EXCP0D_GPF, 0); + } + + break; + } + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + ret = apic_msr_write(index, data); + if (ret < 0) { + x86_emul_raise_exception(env, EXCP0D_GPF, 0); + } + + break; + } + case MSR_FSBASE: + wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data); + break; + case MSR_GSBASE: + wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data); + break; + case MSR_KERNELGSBASE: + wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data); + break; + case MSR_STAR: + abort(); + break; + case MSR_LSTAR: + abort(); + break; + case MSR_CSTAR: + abort(); + break; + case MSR_EFER: + /*printf("new efer %llx\n", EFER(cs));*/ + wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data); + if (data & MSR_EFER_NXE) { + hv_vcpu_invalidate_tlb(cs->accel->fd); + } + break; + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data; + break; + case MSR_MTRRfix64K_00000: + env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data; + break; + case MSR_MTRRdefType: + env->mtrr_deftype = data; + break; + default: + break; + } + + /* Related to support known hypervisor interface */ + /* if (g_hypervisor_iface) + g_hypervisor_iface->wrmsr_handler(cs, msr, data); + + printf("write msr %llx\n", RCX(cs));*/ +} + int hvf_vcpu_exec(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); @@ -439,9 +733,9 @@ int hvf_vcpu_exec(CPUState *cpu) } do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { hvf_put_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (hvf_inject_interrupts(cpu)) { @@ -510,10 +804,10 @@ int hvf_vcpu_exec(CPUState *cpu) if (ept_emulation_fault(slot, gpa, exit_qual)) { struct x86_decode decode; - load_regs(cpu); + hvf_load_regs(cpu); decode_instruction(env, &decode); exec_instruction(env, &decode); - store_regs(cpu); + hvf_store_regs(cpu); break; } break; @@ -528,8 +822,8 @@ int hvf_vcpu_exec(CPUState *cpu) if (!string && in) { uint64_t val = 0; - load_regs(cpu); - hvf_handle_io(env, port, &val, 0, size, 1); + hvf_load_regs(cpu); + hvf_handle_io(env_cpu(env), port, &val, 0, size, 1); if (size == 1) { AL(env) = val; } else if (size == 2) { @@ -540,21 +834,21 @@ int hvf_vcpu_exec(CPUState *cpu) RAX(env) = (uint64_t)val; } env->eip += ins_len; - store_regs(cpu); + hvf_store_regs(cpu); break; } else if (!string && !in) { RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX); - hvf_handle_io(env, port, &RAX(env), 1, size, 1); + hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; } struct x86_decode decode; - load_regs(cpu); + hvf_load_regs(cpu); decode_instruction(env, &decode); assert(ins_len == decode.len); exec_instruction(env, &decode); - store_regs(cpu); + hvf_store_regs(cpu); break; } @@ -579,8 +873,6 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_XSETBV: { - X86CPU *x86_cpu = X86_CPU(cpu); - CPUX86State *env = &x86_cpu->env; uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); @@ -609,21 +901,21 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_RDMSR: case EXIT_REASON_WRMSR: { - load_regs(cpu); + hvf_load_regs(cpu); if (exit_reason == EXIT_REASON_RDMSR) { - simulate_rdmsr(env); + hvf_simulate_rdmsr(cpu); } else { - simulate_wrmsr(env); + hvf_simulate_wrmsr(cpu); } env->eip += ins_len; - store_regs(cpu); + hvf_store_regs(cpu); break; } case EXIT_REASON_CR_ACCESS: { int cr; int reg; - load_regs(cpu); + hvf_load_regs(cpu); cr = exit_qual & 15; reg = (exit_qual >> 8) & 15; @@ -637,7 +929,6 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case 8: { - X86CPU *x86_cpu = X86_CPU(cpu); if (exit_qual & 0x10) { RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state); } else { @@ -652,16 +943,16 @@ int hvf_vcpu_exec(CPUState *cpu) abort(); } env->eip += ins_len; - store_regs(cpu); + hvf_store_regs(cpu); break; } case EXIT_REASON_APIC_ACCESS: { /* TODO */ struct x86_decode decode; - load_regs(cpu); + hvf_load_regs(cpu); decode_instruction(env, &decode); exec_instruction(env, &decode); - store_regs(cpu); + hvf_store_regs(cpu); break; } case EXIT_REASON_TPR: { @@ -670,7 +961,7 @@ int hvf_vcpu_exec(CPUState *cpu) } case EXIT_REASON_TASK_SWITCH: { uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); - x68_segment_selector sel = {.sel = exit_qual & 0xffff}; + x86_segment_selector sel = {.sel = exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo & VMCS_INTR_T_MASK); diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build index 05c3c8cf18b..519d190f0e6 100644 --- a/target/i386/hvf/meson.build +++ b/target/i386/hvf/meson.build @@ -2,10 +2,7 @@ i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( 'hvf.c', 'x86.c', 'x86_cpuid.c', - 'x86_decode.c', 'x86_descr.c', - 'x86_emu.c', - 'x86_flags.c', 'x86_mmu.c', 'x86_task.c', 'x86hvf.c', diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index 3954ef883df..26d6029fa55 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -29,11 +29,12 @@ #include #include "vmcs.h" #include "cpu.h" -#include "x86.h" -#include "sysemu/hvf.h" -#include "sysemu/hvf_int.h" +#include "emulate/x86.h" +#include "system/hvf.h" +#include "system/hvf_int.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/memory.h" static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg) { diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index 80e36136d04..5c75ec9a007 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -19,8 +19,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "x86_decode.h" -#include "x86_emu.h" +#include "emulate/x86_decode.h" +#include "emulate/x86_emu.h" #include "vmcs.h" #include "vmx.h" #include "x86_mmu.h" @@ -48,7 +48,7 @@ bool x86_read_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, - x68_segment_selector sel) + x86_segment_selector sel) { target_ulong base; uint32_t limit; @@ -78,7 +78,7 @@ bool x86_read_segment_descriptor(CPUState *cpu, bool x86_write_segment_descriptor(CPUState *cpu, struct x86_segment_descriptor *desc, - x68_segment_selector sel) + x86_segment_selector sel) { target_ulong base; uint32_t limit; diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c index e56cd8411ba..0798a0cbafb 100644 --- a/target/i386/hvf/x86_cpuid.c +++ b/target/i386/hvf/x86_cpuid.c @@ -21,28 +21,38 @@ */ #include "qemu/osdep.h" +#include "qemu/cpuid.h" +#include "host/cpuinfo.h" #include "cpu.h" -#include "x86.h" +#include "emulate/x86.h" #include "vmx.h" -#include "sysemu/hvf.h" +#include "system/hvf.h" #include "hvf-i386.h" -static bool xgetbv(uint32_t cpuid_ecx, uint32_t idx, uint64_t *xcr) +static bool cached_xcr0; +static uint64_t supported_xcr0; + +static void cache_host_xcr0(void) { - uint32_t xcrl, xcrh; + if (cached_xcr0) { + return; + } - if (cpuid_ecx & CPUID_EXT_OSXSAVE) { - /* - * The xgetbv instruction is not available to older versions of - * the assembler, so we encode the instruction manually. - */ - asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (idx)); + if (cpuinfo & CPUINFO_OSXSAVE) { + uint64_t host_xcr0 = xgetbv_low(0); - *xcr = (((uint64_t)xcrh) << 32) | xcrl; - return true; + /* Only show xcr0 bits corresponding to usable features. */ + supported_xcr0 = host_xcr0 & (XSTATE_FP_MASK | + XSTATE_SSE_MASK | XSTATE_YMM_MASK | + XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | + XSTATE_Hi16_ZMM_MASK); + if ((supported_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK)) != + (XSTATE_FP_MASK | XSTATE_SSE_MASK)) { + supported_xcr0 = 0; + } } - return false; + cached_xcr0 = true; } uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, @@ -51,6 +61,7 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, uint64_t cap; uint32_t eax, ebx, ecx, edx; + cache_host_xcr0(); host_cpuid(func, idx, &eax, &ebx, &ecx, &edx); switch (func) { @@ -62,11 +73,12 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | - CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS; + CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_HT; ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE | - CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE | + CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_X2APIC | + (supported_xcr0 ? CPUID_EXT_XSAVE : 0) | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND; ecx |= CPUID_EXT_HYPERVISOR; break; @@ -107,16 +119,14 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, eax = 0; break; case 0xD: + if (!supported_xcr0 || idx >= 63 || + (idx > 1 && !(supported_xcr0 & (UINT64_C(1) << idx)))) { + eax = ebx = ecx = edx = 0; + break; + } + if (idx == 0) { - uint64_t host_xcr0; - if (xgetbv(ecx, 0, &host_xcr0)) { - uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK | - XSTATE_SSE_MASK | XSTATE_YMM_MASK | - XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | - XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | - XSTATE_Hi16_ZMM_MASK); - eax &= supp_xcr0; - } + eax = supported_xcr0; } else if (idx == 1) { hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap); eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1; diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c deleted file mode 100644 index a4a28f113fd..00000000000 --- a/target/i386/hvf/x86_decode.c +++ /dev/null @@ -1,2196 +0,0 @@ -/* - * Copyright (C) 2016 Veertu Inc, - * Copyright (C) 2017 Google Inc, - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see . - */ - -#include "qemu/osdep.h" - -#include "panic.h" -#include "x86_decode.h" -#include "vmx.h" -#include "x86_mmu.h" -#include "x86_descr.h" - -#define OPCODE_ESCAPE 0xf - -static void decode_invalid(CPUX86State *env, struct x86_decode *decode) -{ - printf("%llx: failed to decode instruction ", env->eip); - for (int i = 0; i < decode->opcode_len; i++) { - printf("%x ", decode->opcode[i]); - } - printf("\n"); - VM_PANIC("decoder failed\n"); -} - -uint64_t sign(uint64_t val, int size) -{ - switch (size) { - case 1: - val = (int8_t)val; - break; - case 2: - val = (int16_t)val; - break; - case 4: - val = (int32_t)val; - break; - case 8: - val = (int64_t)val; - break; - default: - VM_PANIC_EX("%s invalid size %d\n", __func__, size); - break; - } - return val; -} - -static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode, - int size) -{ - target_ulong val = 0; - - switch (size) { - case 1: - case 2: - case 4: - case 8: - break; - default: - VM_PANIC_EX("%s invalid size %d\n", __func__, size); - break; - } - target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len; - vmx_read_mem(env_cpu(env), &val, va, size); - decode->len += size; - - return val; -} - -static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode) -{ - return (uint8_t)decode_bytes(env, decode, 1); -} - -static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode) -{ - return (uint16_t)decode_bytes(env, decode, 2); -} - -static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode) -{ - return (uint32_t)decode_bytes(env, decode, 4); -} - -static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode) -{ - return decode_bytes(env, decode, 8); -} - -static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_RM; -} - -static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_REG; - op->reg = decode->modrm.reg; - op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r, - decode->operand_size); -} - -static void decode_rax(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_REG; - op->reg = R_EAX; - /* Since reg is always AX, REX prefix has no impact. */ - op->ptr = get_reg_ref(env, op->reg, false, 0, - decode->operand_size); -} - -static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *var, int size) -{ - var->type = X86_VAR_IMMEDIATE; - var->size = size; - switch (size) { - case 1: - var->val = decode_byte(env, decode); - break; - case 2: - var->val = decode_word(env, decode); - break; - case 4: - var->val = decode_dword(env, decode); - break; - case 8: - var->val = decode_qword(env, decode); - break; - default: - VM_PANIC_EX("bad size %d\n", size); - } -} - -static void decode_imm8(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - decode_immediate(env, decode, op, 1); - op->type = X86_VAR_IMMEDIATE; -} - -static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - decode_immediate(env, decode, op, 1); - op->val = sign(op->val, 1); - op->type = X86_VAR_IMMEDIATE; -} - -static void decode_imm16(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - decode_immediate(env, decode, op, 2); - op->type = X86_VAR_IMMEDIATE; -} - - -static void decode_imm(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - if (8 == decode->operand_size) { - decode_immediate(env, decode, op, 4); - op->val = sign(op->val, decode->operand_size); - } else { - decode_immediate(env, decode, op, decode->operand_size); - } - op->type = X86_VAR_IMMEDIATE; -} - -static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - decode_immediate(env, decode, op, decode->operand_size); - op->val = sign(op->val, decode->operand_size); - op->type = X86_VAR_IMMEDIATE; -} - -static void decode_imm_1(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_IMMEDIATE; - op->val = 1; -} - -static void decode_imm_0(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_IMMEDIATE; - op->val = 0; -} - - -static void decode_pushseg(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0]; - - decode->op[0].type = X86_VAR_REG; - switch (op) { - case 0xe: - decode->op[0].reg = R_CS; - break; - case 0x16: - decode->op[0].reg = R_SS; - break; - case 0x1e: - decode->op[0].reg = R_DS; - break; - case 0x06: - decode->op[0].reg = R_ES; - break; - case 0xa0: - decode->op[0].reg = R_FS; - break; - case 0xa8: - decode->op[0].reg = R_GS; - break; - } -} - -static void decode_popseg(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0]; - - decode->op[0].type = X86_VAR_REG; - switch (op) { - case 0xf: - decode->op[0].reg = R_CS; - break; - case 0x17: - decode->op[0].reg = R_SS; - break; - case 0x1f: - decode->op[0].reg = R_DS; - break; - case 0x07: - decode->op[0].reg = R_ES; - break; - case 0xa1: - decode->op[0].reg = R_FS; - break; - case 0xa9: - decode->op[0].reg = R_GS; - break; - } -} - -static void decode_incgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0x40; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_decgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0x48; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode) -{ - if (!decode->modrm.reg) { - decode->cmd = X86_DECODE_CMD_INC; - } else if (1 == decode->modrm.reg) { - decode->cmd = X86_DECODE_CMD_DEC; - } -} - -static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0x50; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_popgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0x58; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_jxx(CPUX86State *env, struct x86_decode *decode) -{ - decode->displacement = decode_bytes(env, decode, decode->operand_size); - decode->displacement_size = decode->operand_size; -} - -static void decode_farjmp(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_IMMEDIATE; - decode->op[0].val = decode_bytes(env, decode, decode->operand_size); - decode->displacement = decode_word(env, decode); -} - -static void decode_addgroup(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_ADD, - X86_DECODE_CMD_OR, - X86_DECODE_CMD_ADC, - X86_DECODE_CMD_SBB, - X86_DECODE_CMD_AND, - X86_DECODE_CMD_SUB, - X86_DECODE_CMD_XOR, - X86_DECODE_CMD_CMP - }; - decode->cmd = group[decode->modrm.reg]; -} - -static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_ROL, - X86_DECODE_CMD_ROR, - X86_DECODE_CMD_RCL, - X86_DECODE_CMD_RCR, - X86_DECODE_CMD_SHL, - X86_DECODE_CMD_SHR, - X86_DECODE_CMD_SHL, - X86_DECODE_CMD_SAR - }; - decode->cmd = group[decode->modrm.reg]; -} - -static void decode_f7group(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_TST, - X86_DECODE_CMD_TST, - X86_DECODE_CMD_NOT, - X86_DECODE_CMD_NEG, - X86_DECODE_CMD_MUL, - X86_DECODE_CMD_IMUL_1, - X86_DECODE_CMD_DIV, - X86_DECODE_CMD_IDIV - }; - decode->cmd = group[decode->modrm.reg]; - decode_modrm_rm(env, decode, &decode->op[0]); - - switch (decode->modrm.reg) { - case 0: - case 1: - decode_imm(env, decode, &decode->op[1]); - break; - case 2: - break; - case 3: - decode->op[1].type = X86_VAR_IMMEDIATE; - decode->op[1].val = 0; - break; - default: - break; - } -} - -static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0x90; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_movgroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0xb8; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); - decode_immediate(env, decode, &decode->op[1], decode->operand_size); -} - -static void fetch_moffs(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_OFFSET; - op->ptr = decode_bytes(env, decode, decode->addressing_size); -} - -static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[0] - 0xb0; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); - decode_immediate(env, decode, &decode->op[1], decode->operand_size); -} - -static void decode_rcx(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X86_VAR_REG; - op->reg = R_ECX; - op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b, - decode->operand_size); -} - -struct decode_tbl { - uint8_t opcode; - enum x86_decode_cmd cmd; - uint8_t operand_size; - bool is_modrm; - void (*decode_op1)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op1); - void (*decode_op2)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op2); - void (*decode_op3)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op3); - void (*decode_op4)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op4); - void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode); - uint32_t flags_mask; -}; - -struct decode_x87_tbl { - uint8_t opcode; - uint8_t modrm_reg; - uint8_t modrm_mod; - enum x86_decode_cmd cmd; - uint8_t operand_size; - bool rev; - bool pop; - void (*decode_op1)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op1); - void (*decode_op2)(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op2); - void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode); - uint32_t flags_mask; -}; - -struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL, - decode_invalid}; - -struct decode_tbl _decode_tbl1[256]; -struct decode_tbl _decode_tbl2[256]; -struct decode_x87_tbl _decode_tbl3[256]; - -static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode) -{ - struct decode_x87_tbl *decoder; - - decode->is_fpu = true; - int mode = decode->modrm.mod == 3 ? 1 : 0; - int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) | - decode->modrm.reg; - - decoder = &_decode_tbl3[index]; - - decode->cmd = decoder->cmd; - if (decoder->operand_size) { - decode->operand_size = decoder->operand_size; - } - decode->flags_mask = decoder->flags_mask; - decode->fpop_stack = decoder->pop; - decode->frev = decoder->rev; - - if (decoder->decode_op1) { - decoder->decode_op1(env, decode, &decode->op[0]); - } - if (decoder->decode_op2) { - decoder->decode_op2(env, decode, &decode->op[1]); - } - if (decoder->decode_postfix) { - decoder->decode_postfix(env, decode); - } - - VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n", - decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg, - decoder->modrm_mod); -} - -static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_INC, - X86_DECODE_CMD_DEC, - X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT, - X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT, - X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT, - X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT, - X86_DECODE_CMD_PUSH, - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_INVL - }; - decode->cmd = group[decode->modrm.reg]; - if (decode->modrm.reg > 2) { - decode->flags_mask = 0; - } -} - -static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode) -{ - - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_SLDT, - X86_DECODE_CMD_STR, - X86_DECODE_CMD_LLDT, - X86_DECODE_CMD_LTR, - X86_DECODE_CMD_VERR, - X86_DECODE_CMD_VERW, - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_INVL - }; - decode->cmd = group[decode->modrm.reg]; -} - -static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_SGDT, - X86_DECODE_CMD_SIDT, - X86_DECODE_CMD_LGDT, - X86_DECODE_CMD_LIDT, - X86_DECODE_CMD_SMSW, - X86_DECODE_CMD_LMSW, - X86_DECODE_CMD_LMSW, - X86_DECODE_CMD_INVLPG - }; - decode->cmd = group[decode->modrm.reg]; - if (0xf9 == decode->modrm.modrm) { - decode->opcode[decode->len++] = decode->modrm.modrm; - decode->cmd = X86_DECODE_CMD_RDTSCP; - } -} - -static void decode_btgroup(CPUX86State *env, struct x86_decode *decode) -{ - enum x86_decode_cmd group[] = { - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_INVL, - X86_DECODE_CMD_BT, - X86_DECODE_CMD_BTS, - X86_DECODE_CMD_BTR, - X86_DECODE_CMD_BTC - }; - decode->cmd = group[decode->modrm.reg]; -} - -static void decode_x87_general(CPUX86State *env, struct x86_decode *decode) -{ - decode->is_fpu = true; -} - -static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X87_VAR_FLOATP; -} - -static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X87_VAR_INTP; -} - -static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X87_VAR_BYTEP; -} - -static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X87_VAR_REG; - op->reg = 0; -} - -static void decode_decode_x87_modrm_st0(CPUX86State *env, - struct x86_decode *decode, - struct x86_decode_op *op) -{ - op->type = X87_VAR_REG; - op->reg = decode->modrm.modrm & 7; -} - - -static void decode_aegroup(CPUX86State *env, struct x86_decode *decode) -{ - decode->is_fpu = true; - switch (decode->modrm.reg) { - case 0: - decode->cmd = X86_DECODE_CMD_FXSAVE; - decode_x87_modrm_bytep(env, decode, &decode->op[0]); - break; - case 1: - decode_x87_modrm_bytep(env, decode, &decode->op[0]); - decode->cmd = X86_DECODE_CMD_FXRSTOR; - break; - case 5: - if (decode->modrm.modrm == 0xe8) { - decode->cmd = X86_DECODE_CMD_LFENCE; - } else { - VM_PANIC("xrstor"); - } - break; - case 6: - VM_PANIC_ON(decode->modrm.modrm != 0xf0); - decode->cmd = X86_DECODE_CMD_MFENCE; - break; - case 7: - if (decode->modrm.modrm == 0xf8) { - decode->cmd = X86_DECODE_CMD_SFENCE; - } else { - decode->cmd = X86_DECODE_CMD_CLFLUSH; - } - break; - default: - VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg); - break; - } -} - -static void decode_bswap(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = decode->opcode[1] - 0xc8; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, - decode->rex.b, decode->operand_size); -} - -static void decode_d9_4(CPUX86State *env, struct x86_decode *decode) -{ - switch (decode->modrm.modrm) { - case 0xe0: - /* FCHS */ - decode->cmd = X86_DECODE_CMD_FCHS; - break; - case 0xe1: - decode->cmd = X86_DECODE_CMD_FABS; - break; - case 0xe4: - VM_PANIC("FTST"); - break; - case 0xe5: - /* FXAM */ - decode->cmd = X86_DECODE_CMD_FXAM; - break; - default: - VM_PANIC("FLDENV"); - break; - } -} - -static void decode_db_4(CPUX86State *env, struct x86_decode *decode) -{ - switch (decode->modrm.modrm) { - case 0xe0: - VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0], - decode->modrm.modrm); - break; - case 0xe1: - VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0], - decode->modrm.modrm); - break; - case 0xe2: - VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0], - decode->modrm.modrm); - break; - case 0xe3: - decode->cmd = X86_DECODE_CMD_FNINIT; - break; - case 0xe4: - decode->cmd = X86_DECODE_CMD_FNSETPM; - break; - default: - VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0], - decode->modrm.modrm); - break; - } -} - - -#define RFLAGS_MASK_NONE 0 -#define RFLAGS_MASK_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C) -#define RFLAGS_MASK_LAHF (CC_S | CC_Z | CC_A | CC_P | CC_C) -#define RFLAGS_MASK_CF (CC_C) -#define RFLAGS_MASK_IF (IF_MASK) -#define RFLAGS_MASK_TF (TF_MASK) -#define RFLAGS_MASK_DF (DF_MASK) -#define RFLAGS_MASK_ZF (CC_Z) - -struct decode_tbl _1op_inst[] = { - {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL, - NULL, RFLAGS_MASK_OSZAPC}, - {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL, - NULL, RFLAGS_MASK_OSZAPC}, - {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL, - decode_pushseg, RFLAGS_MASK_NONE}, - {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL, - decode_popseg, RFLAGS_MASK_NONE}, - {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL, - NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false, - NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE}, - {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false, - NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE}, - - {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false, - NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE}, - {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false, - NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE}, - - {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false, - NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE}, - {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false, - NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE}, - - {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x2f, X86_DECODE_CMD_DAS, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x3f, X86_DECODE_CMD_AAS, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x40, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x41, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x42, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x43, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x44, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x45, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x46, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - {0x47, X86_DECODE_CMD_INC, 0, false, - NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC}, - - {0x48, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x49, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4a, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4b, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4c, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4d, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4e, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - {0x4f, X86_DECODE_CMD_DEC, 0, false, - NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC}, - - {0x50, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x51, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x52, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x53, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x54, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x55, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x56, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - {0x57, X86_DECODE_CMD_PUSH, 0, false, - NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE}, - - {0x58, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x59, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5a, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5b, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5c, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5d, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5e, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - {0x5f, X86_DECODE_CMD_POP, 0, false, - NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE}, - - {0x60, X86_DECODE_CMD_PUSHA, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x61, X86_DECODE_CMD_POPA, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, - decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm, - decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0x6c, X86_DECODE_CMD_INS, 1, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x6d, X86_DECODE_CMD_INS, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x6e, X86_DECODE_CMD_OUTS, 1, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x6f, X86_DECODE_CMD_OUTS, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0x70, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x71, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x72, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x73, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x74, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x75, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x76, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x77, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x78, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x79, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7a, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7b, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7c, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7d, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7e, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x7f, X86_DECODE_CMD_JXX, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - - {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, - NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC}, - {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm, - NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC}, - {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, - NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC}, - {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed, - NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC}, - {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm, - decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg, - decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0x90, X86_DECODE_CMD_NOP, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax, - NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE}, - - {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL, - NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE}, - - {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_POPF},*/ - {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_LAHF}, - - {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL, - NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE}, - - {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xba, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL, - NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE}, - - {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - - {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL, - NULL, NULL, NULL, RFLAGS_MASK_IRET},*/ - - {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx, - NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC}, - - {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xd7, X86_DECODE_CMD_XLAT, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xda, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xde, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL, - NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE}, - - {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xe3, X86_DECODE_CMD_JCXZ, 1, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - - {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xea, X86_DECODE_CMD_JMP_FAR, 0, false, - NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE}, - {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xec, X86_DECODE_CMD_IN, 1, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xed, X86_DECODE_CMD_IN, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xee, X86_DECODE_CMD_OUT, 1, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xef, X86_DECODE_CMD_OUT, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xf4, X86_DECODE_CMD_HLT, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xf5, X86_DECODE_CMD_CMC, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF}, - - {0xf6, X86_DECODE_CMD_INVL, 1, true, - NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC}, - {0xf7, X86_DECODE_CMD_INVL, 0, true, - NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC}, - - {0xf8, X86_DECODE_CMD_CLC, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF}, - {0xf9, X86_DECODE_CMD_STC, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF}, - - {0xfa, X86_DECODE_CMD_CLI, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF}, - {0xfb, X86_DECODE_CMD_STI, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF}, - {0xfc, X86_DECODE_CMD_CLD, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF}, - {0xfd, X86_DECODE_CMD_STD, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF}, - {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, - NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC}, - {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, - NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC}, -}; - -struct decode_tbl _2op_inst[] = { - {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, - NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE}, - {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, - NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE}, - {0x6, X86_DECODE_CMD_CLTS, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF}, - {0x9, X86_DECODE_CMD_WBINVD, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x18, X86_DECODE_CMD_PREFETCH, 0, true, - NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE}, - {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm, - decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm, - decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg, - decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg, - decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x30, X86_DECODE_CMD_WRMSR, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x31, X86_DECODE_CMD_RDTSC, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x32, X86_DECODE_CMD_RDMSR, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x77, X86_DECODE_CMD_EMMS, 0, false, - NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE}, - {0x82, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x83, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x84, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x85, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x86, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x87, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x88, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x89, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8a, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8b, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8c, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8d, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8e, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x8f, X86_DECODE_CMD_JXX, 0, false, - NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE}, - {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false, - NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE}, - {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false, - NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE}, - {0xa2, X86_DECODE_CMD_CPUID, 0, false, - NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_CF}, - {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, - decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg, - decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false, - NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE}, - {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false, - NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE}, - {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_CF}, - {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, - decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg, - decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, - NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE}, - - {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_NONE}, - {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8, - NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC}, - {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg, - NULL, NULL, NULL, RFLAGS_MASK_OSZAPC}, - - {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm, - NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF}, - - {0xc8, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xc9, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xca, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xcb, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xcc, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xcd, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xce, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, - {0xcf, X86_DECODE_CMD_BSWAP, 0, false, - NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE}, -}; - -struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL, - NULL, decode_invalid, 0}; - -struct decode_x87_tbl _x87_inst[] = { - {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, - decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0, - decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, - decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - - {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, - decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE}, - {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL, - RFLAGS_MASK_NONE}, - {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, - decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, - RFLAGS_MASK_NONE}, - {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0, - decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, - RFLAGS_MASK_NONE}, - {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, - RFLAGS_MASK_NONE}, - {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0, - decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - - {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0, - decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, - decode_db_4, RFLAGS_MASK_NONE}, - {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL, - RFLAGS_MASK_NONE}, - {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false, - decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE}, - - {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true, - decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true, - decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - - {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false, - decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE}, - - {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true, - decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true, - decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE}, - {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, - {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true, - decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE}, -}; - -void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - target_ulong ptr = 0; - X86Seg seg = R_DS; - - if (!decode->modrm.mod && 6 == decode->modrm.rm) { - ptr = decode->displacement; - goto calc_addr; - } - - if (decode->displacement_size) { - ptr = sign(decode->displacement, decode->displacement_size); - } - - switch (decode->modrm.rm) { - case 0: - ptr += BX(env) + SI(env); - break; - case 1: - ptr += BX(env) + DI(env); - break; - case 2: - ptr += BP(env) + SI(env); - seg = R_SS; - break; - case 3: - ptr += BP(env) + DI(env); - seg = R_SS; - break; - case 4: - ptr += SI(env); - break; - case 5: - ptr += DI(env); - break; - case 6: - ptr += BP(env); - seg = R_SS; - break; - case 7: - ptr += BX(env); - break; - } -calc_addr: - if (X86_DECODE_CMD_LEA == decode->cmd) { - op->ptr = (uint16_t)ptr; - } else { - op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg); - } -} - -target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present, - int is_extended, int size) -{ - target_ulong ptr = 0; - - if (is_extended) { - reg |= R_R8; - } - - switch (size) { - case 1: - if (is_extended || reg < 4 || rex_present) { - ptr = (target_ulong)&RL(env, reg); - } else { - ptr = (target_ulong)&RH(env, reg - 4); - } - break; - default: - ptr = (target_ulong)&RRX(env, reg); - break; - } - return ptr; -} - -target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present, - int is_extended, int size) -{ - target_ulong val = 0; - memcpy(&val, - (void *)get_reg_ref(env, reg, rex_present, is_extended, size), - size); - return val; -} - -static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode, - X86Seg *sel) -{ - target_ulong base = 0; - target_ulong scaled_index = 0; - int addr_size = decode->addressing_size; - int base_reg = decode->sib.base; - int index_reg = decode->sib.index; - - *sel = R_DS; - - if (decode->modrm.mod || base_reg != R_EBP) { - if (decode->rex.b) { - base_reg |= R_R8; - } - if (base_reg == R_ESP || base_reg == R_EBP) { - *sel = R_SS; - } - base = get_reg_val(env, decode->sib.base, decode->rex.rex, - decode->rex.b, addr_size); - } - - if (decode->rex.x) { - index_reg |= R_R8; - } - - if (index_reg != R_ESP) { - scaled_index = get_reg_val(env, index_reg, decode->rex.rex, - decode->rex.x, addr_size) << - decode->sib.scale; - } - return base + scaled_index; -} - -void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - X86Seg seg = R_DS; - target_ulong ptr = 0; - int addr_size = decode->addressing_size; - - if (decode->displacement_size) { - ptr = sign(decode->displacement, decode->displacement_size); - } - - if (4 == decode->modrm.rm) { - ptr += get_sib_val(env, decode, &seg); - } else if (!decode->modrm.mod && 5 == decode->modrm.rm) { - if (x86_is_long_mode(env_cpu(env))) { - ptr += env->eip + decode->len; - } else { - ptr = decode->displacement; - } - } else { - if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) { - seg = R_SS; - } - ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex, - decode->rex.b, addr_size); - } - - if (X86_DECODE_CMD_LEA == decode->cmd) { - op->ptr = (uint32_t)ptr; - } else { - op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg); - } -} - -void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - X86Seg seg = R_DS; - int32_t offset = 0; - int mod = decode->modrm.mod; - int rm = decode->modrm.rm; - target_ulong ptr; - int src = decode->modrm.rm; - - if (decode->displacement_size) { - offset = sign(decode->displacement, decode->displacement_size); - } - - if (4 == rm) { - ptr = get_sib_val(env, decode, &seg) + offset; - } else if (0 == mod && 5 == rm) { - ptr = env->eip + decode->len + (int32_t) offset; - } else { - ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) + - (int64_t) offset; - } - - if (X86_DECODE_CMD_LEA == decode->cmd) { - op->ptr = ptr; - } else { - op->ptr = decode_linear_addr(env, decode, ptr, seg); - } -} - - -void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode, - struct x86_decode_op *op) -{ - if (3 == decode->modrm.mod) { - op->reg = decode->modrm.reg; - op->type = X86_VAR_REG; - op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex, - decode->rex.b, decode->operand_size); - return; - } - - switch (decode->addressing_size) { - case 2: - calc_modrm_operand16(env, decode, op); - break; - case 4: - calc_modrm_operand32(env, decode, op); - break; - case 8: - calc_modrm_operand64(env, decode, op); - break; - default: - VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size); - break; - } -} - -static void decode_prefix(CPUX86State *env, struct x86_decode *decode) -{ - while (1) { - /* - * REX prefix must come after legacy prefixes. - * REX before legacy is ignored. - * Clear rex to simulate this. - */ - uint8_t byte = decode_byte(env, decode); - switch (byte) { - case PREFIX_LOCK: - decode->lock = byte; - decode->rex.rex = 0; - break; - case PREFIX_REPN: - case PREFIX_REP: - decode->rep = byte; - decode->rex.rex = 0; - break; - case PREFIX_CS_SEG_OVERRIDE: - case PREFIX_SS_SEG_OVERRIDE: - case PREFIX_DS_SEG_OVERRIDE: - case PREFIX_ES_SEG_OVERRIDE: - case PREFIX_FS_SEG_OVERRIDE: - case PREFIX_GS_SEG_OVERRIDE: - decode->segment_override = byte; - decode->rex.rex = 0; - break; - case PREFIX_OP_SIZE_OVERRIDE: - decode->op_size_override = byte; - decode->rex.rex = 0; - break; - case PREFIX_ADDR_SIZE_OVERRIDE: - decode->addr_size_override = byte; - decode->rex.rex = 0; - break; - case PREFIX_REX ... (PREFIX_REX + 0xf): - if (x86_is_long_mode(env_cpu(env))) { - decode->rex.rex = byte; - break; - } - /* fall through when not in long mode */ - default: - decode->len--; - return; - } - } -} - -void set_addressing_size(CPUX86State *env, struct x86_decode *decode) -{ - decode->addressing_size = -1; - if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { - if (decode->addr_size_override) { - decode->addressing_size = 4; - } else { - decode->addressing_size = 2; - } - } else if (!x86_is_long_mode(env_cpu(env))) { - /* protected */ - struct vmx_segment cs; - vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS); - /* check db */ - if ((cs.ar >> 14) & 1) { - if (decode->addr_size_override) { - decode->addressing_size = 2; - } else { - decode->addressing_size = 4; - } - } else { - if (decode->addr_size_override) { - decode->addressing_size = 4; - } else { - decode->addressing_size = 2; - } - } - } else { - /* long */ - if (decode->addr_size_override) { - decode->addressing_size = 4; - } else { - decode->addressing_size = 8; - } - } -} - -void set_operand_size(CPUX86State *env, struct x86_decode *decode) -{ - decode->operand_size = -1; - if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { - if (decode->op_size_override) { - decode->operand_size = 4; - } else { - decode->operand_size = 2; - } - } else if (!x86_is_long_mode(env_cpu(env))) { - /* protected */ - struct vmx_segment cs; - vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS); - /* check db */ - if ((cs.ar >> 14) & 1) { - if (decode->op_size_override) { - decode->operand_size = 2; - } else{ - decode->operand_size = 4; - } - } else { - if (decode->op_size_override) { - decode->operand_size = 4; - } else { - decode->operand_size = 2; - } - } - } else { - /* long */ - if (decode->op_size_override) { - decode->operand_size = 2; - } else { - decode->operand_size = 4; - } - - if (decode->rex.w) { - decode->operand_size = 8; - } - } -} - -static void decode_sib(CPUX86State *env, struct x86_decode *decode) -{ - if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) && - (decode->addressing_size != 2)) { - decode->sib.sib = decode_byte(env, decode); - decode->sib_present = true; - } -} - -/* 16 bit modrm */ -int disp16_tbl[4][8] = { - {0, 0, 0, 0, 0, 0, 2, 0}, - {1, 1, 1, 1, 1, 1, 1, 1}, - {2, 2, 2, 2, 2, 2, 2, 2}, - {0, 0, 0, 0, 0, 0, 0, 0} -}; - -/* 32/64-bit modrm */ -int disp32_tbl[4][8] = { - {0, 0, 0, 0, -1, 4, 0, 0}, - {1, 1, 1, 1, 1, 1, 1, 1}, - {4, 4, 4, 4, 4, 4, 4, 4}, - {0, 0, 0, 0, 0, 0, 0, 0} -}; - -static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode) -{ - int addressing_size = decode->addressing_size; - int mod = decode->modrm.mod; - int rm = decode->modrm.rm; - - decode->displacement_size = 0; - switch (addressing_size) { - case 2: - decode->displacement_size = disp16_tbl[mod][rm]; - if (decode->displacement_size) { - decode->displacement = (uint16_t)decode_bytes(env, decode, - decode->displacement_size); - } - break; - case 4: - case 8: - if (-1 == disp32_tbl[mod][rm]) { - if (5 == decode->sib.base) { - decode->displacement_size = 4; - } - } else { - decode->displacement_size = disp32_tbl[mod][rm]; - } - - if (decode->displacement_size) { - decode->displacement = (uint32_t)decode_bytes(env, decode, - decode->displacement_size); - } - break; - } -} - -static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode) -{ - decode->modrm.modrm = decode_byte(env, decode); - decode->is_modrm = true; - - decode_sib(env, decode); - decode_displacement(env, decode); -} - -static inline void decode_opcode_general(CPUX86State *env, - struct x86_decode *decode, - uint8_t opcode, - struct decode_tbl *inst_decoder) -{ - decode->cmd = inst_decoder->cmd; - if (inst_decoder->operand_size) { - decode->operand_size = inst_decoder->operand_size; - } - decode->flags_mask = inst_decoder->flags_mask; - - if (inst_decoder->is_modrm) { - decode_modrm(env, decode); - } - if (inst_decoder->decode_op1) { - inst_decoder->decode_op1(env, decode, &decode->op[0]); - } - if (inst_decoder->decode_op2) { - inst_decoder->decode_op2(env, decode, &decode->op[1]); - } - if (inst_decoder->decode_op3) { - inst_decoder->decode_op3(env, decode, &decode->op[2]); - } - if (inst_decoder->decode_op4) { - inst_decoder->decode_op4(env, decode, &decode->op[3]); - } - if (inst_decoder->decode_postfix) { - inst_decoder->decode_postfix(env, decode); - } -} - -static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode, - uint8_t opcode) -{ - struct decode_tbl *inst_decoder = &_decode_tbl1[opcode]; - decode_opcode_general(env, decode, opcode, inst_decoder); -} - - -static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode, - uint8_t opcode) -{ - struct decode_tbl *inst_decoder = &_decode_tbl2[opcode]; - decode_opcode_general(env, decode, opcode, inst_decoder); -} - -static void decode_opcodes(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t opcode; - - opcode = decode_byte(env, decode); - decode->opcode[decode->opcode_len++] = opcode; - if (opcode != OPCODE_ESCAPE) { - decode_opcode_1(env, decode, opcode); - } else { - opcode = decode_byte(env, decode); - decode->opcode[decode->opcode_len++] = opcode; - decode_opcode_2(env, decode, opcode); - } -} - -uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode) -{ - memset(decode, 0, sizeof(*decode)); - decode_prefix(env, decode); - set_addressing_size(env, decode); - set_operand_size(env, decode); - - decode_opcodes(env, decode); - - return decode->len; -} - -void init_decoder(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) { - memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst)); - } - for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) { - memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst)); - } - for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) { - memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87)); - - } - for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) { - _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i]; - } - for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) { - _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i]; - } - for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) { - int index = ((_x87_inst[i].opcode & 0xf) << 4) | - ((_x87_inst[i].modrm_mod & 1) << 3) | - _x87_inst[i].modrm_reg; - _decode_tbl3[index] = _x87_inst[i]; - } -} - - -const char *decode_cmd_to_string(enum x86_decode_cmd cmd) -{ - static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG", - "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT", - "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD", - "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST", - "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR", - "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG", - "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN", - "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW", - "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR", - "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR", - "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL", - "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS", - "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT", - "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA", - "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT", - "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES", - "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT", - "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW", - "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV", - "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE", - "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW", - "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"}; - return cmds[cmd]; -} - -target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode, - target_ulong addr, X86Seg seg) -{ - switch (decode->segment_override) { - case PREFIX_CS_SEG_OVERRIDE: - seg = R_CS; - break; - case PREFIX_SS_SEG_OVERRIDE: - seg = R_SS; - break; - case PREFIX_DS_SEG_OVERRIDE: - seg = R_DS; - break; - case PREFIX_ES_SEG_OVERRIDE: - seg = R_ES; - break; - case PREFIX_FS_SEG_OVERRIDE: - seg = R_FS; - break; - case PREFIX_GS_SEG_OVERRIDE: - seg = R_GS; - break; - default: - break; - } - return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg); -} diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index f33836d6cba..7b599c90377 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -60,14 +60,14 @@ uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg) return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); } -x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) +x86_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) { - x68_segment_selector sel; + x86_segment_selector sel; sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); return sel; } -void vmx_write_segment_selector(CPUState *cpu, x68_segment_selector selector, X86Seg seg) +void vmx_write_segment_selector(CPUState *cpu, x86_segment_selector selector, X86Seg seg) { wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel); } @@ -90,7 +90,7 @@ void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Se wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar); } -void x86_segment_descriptor_to_vmx(CPUState *cpu, x68_segment_selector selector, +void x86_segment_descriptor_to_vmx(CPUState *cpu, x86_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc) { diff --git a/target/i386/hvf/x86_descr.h b/target/i386/hvf/x86_descr.h index 9f06014b56a..24af4946cd4 100644 --- a/target/i386/hvf/x86_descr.h +++ b/target/i386/hvf/x86_descr.h @@ -19,7 +19,7 @@ #ifndef HVF_X86_DESCR_H #define HVF_X86_DESCR_H -#include "x86.h" +#include "emulate/x86.h" typedef struct vmx_segment { uint16_t sel; @@ -34,10 +34,10 @@ void vmx_read_segment_descriptor(CPUState *cpu, void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, enum X86Seg seg); -x68_segment_selector vmx_read_segment_selector(CPUState *cpu, +x86_segment_selector vmx_read_segment_selector(CPUState *cpu, enum X86Seg seg); void vmx_write_segment_selector(CPUState *cpu, - x68_segment_selector selector, + x86_segment_selector selector, enum X86Seg seg); uint64_t vmx_read_segment_base(CPUState *cpu, enum X86Seg seg); @@ -45,7 +45,7 @@ void vmx_write_segment_base(CPUState *cpu, enum X86Seg seg, uint64_t base); void x86_segment_descriptor_to_vmx(CPUState *cpu, - x68_segment_selector selector, + x86_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc); diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c deleted file mode 100644 index 38c782b8e3b..00000000000 --- a/target/i386/hvf/x86_emu.c +++ /dev/null @@ -1,1487 +0,0 @@ -/* - * Copyright (C) 2016 Veertu Inc, - * Copyright (C) 2017 Google Inc, - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see . - */ - -///////////////////////////////////////////////////////////////////////// -// -// Copyright (C) 2001-2012 The Bochs Project -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA -///////////////////////////////////////////////////////////////////////// - -#include "qemu/osdep.h" -#include "panic.h" -#include "x86_decode.h" -#include "x86.h" -#include "x86_emu.h" -#include "x86_mmu.h" -#include "x86_flags.h" -#include "vmcs.h" -#include "vmx.h" - -void hvf_handle_io(CPUState *cs, uint16_t port, void *data, - int direction, int size, uint32_t count); - -#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \ -{ \ - fetch_operands(env, decode, 2, true, true, false); \ - switch (decode->operand_size) { \ - case 1: \ - { \ - uint8_t v1 = (uint8_t)decode->op[0].val; \ - uint8_t v2 = (uint8_t)decode->op[1].val; \ - uint8_t diff = v1 cmd v2; \ - if (save_res) { \ - write_val_ext(env, decode->op[0].ptr, diff, 1); \ - } \ - FLAGS_FUNC##8(env, v1, v2, diff); \ - break; \ - } \ - case 2: \ - { \ - uint16_t v1 = (uint16_t)decode->op[0].val; \ - uint16_t v2 = (uint16_t)decode->op[1].val; \ - uint16_t diff = v1 cmd v2; \ - if (save_res) { \ - write_val_ext(env, decode->op[0].ptr, diff, 2); \ - } \ - FLAGS_FUNC##16(env, v1, v2, diff); \ - break; \ - } \ - case 4: \ - { \ - uint32_t v1 = (uint32_t)decode->op[0].val; \ - uint32_t v2 = (uint32_t)decode->op[1].val; \ - uint32_t diff = v1 cmd v2; \ - if (save_res) { \ - write_val_ext(env, decode->op[0].ptr, diff, 4); \ - } \ - FLAGS_FUNC##32(env, v1, v2, diff); \ - break; \ - } \ - default: \ - VM_PANIC("bad size\n"); \ - } \ -} \ - -target_ulong read_reg(CPUX86State *env, int reg, int size) -{ - switch (size) { - case 1: - return x86_reg(env, reg)->lx; - case 2: - return x86_reg(env, reg)->rx; - case 4: - return x86_reg(env, reg)->erx; - case 8: - return x86_reg(env, reg)->rrx; - default: - abort(); - } - return 0; -} - -void write_reg(CPUX86State *env, int reg, target_ulong val, int size) -{ - switch (size) { - case 1: - x86_reg(env, reg)->lx = val; - break; - case 2: - x86_reg(env, reg)->rx = val; - break; - case 4: - x86_reg(env, reg)->rrx = (uint32_t)val; - break; - case 8: - x86_reg(env, reg)->rrx = val; - break; - default: - abort(); - } -} - -target_ulong read_val_from_reg(target_ulong reg_ptr, int size) -{ - target_ulong val; - - switch (size) { - case 1: - val = *(uint8_t *)reg_ptr; - break; - case 2: - val = *(uint16_t *)reg_ptr; - break; - case 4: - val = *(uint32_t *)reg_ptr; - break; - case 8: - val = *(uint64_t *)reg_ptr; - break; - default: - abort(); - } - return val; -} - -void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size) -{ - switch (size) { - case 1: - *(uint8_t *)reg_ptr = val; - break; - case 2: - *(uint16_t *)reg_ptr = val; - break; - case 4: - *(uint64_t *)reg_ptr = (uint32_t)val; - break; - case 8: - *(uint64_t *)reg_ptr = val; - break; - default: - abort(); - } -} - -static bool is_host_reg(CPUX86State *env, target_ulong ptr) -{ - return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs); -} - -void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size) -{ - if (is_host_reg(env, ptr)) { - write_val_to_reg(ptr, val, size); - return; - } - vmx_write_mem(env_cpu(env), ptr, &val, size); -} - -uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes) -{ - vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes); - return env->hvf_mmio_buf; -} - - -target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size) -{ - target_ulong val; - uint8_t *mmio_ptr; - - if (is_host_reg(env, ptr)) { - return read_val_from_reg(ptr, size); - } - - mmio_ptr = read_mmio(env, ptr, size); - switch (size) { - case 1: - val = *(uint8_t *)mmio_ptr; - break; - case 2: - val = *(uint16_t *)mmio_ptr; - break; - case 4: - val = *(uint32_t *)mmio_ptr; - break; - case 8: - val = *(uint64_t *)mmio_ptr; - break; - default: - VM_PANIC("bad size\n"); - break; - } - return val; -} - -static void fetch_operands(CPUX86State *env, struct x86_decode *decode, - int n, bool val_op0, bool val_op1, bool val_op2) -{ - int i; - bool calc_val[3] = {val_op0, val_op1, val_op2}; - - for (i = 0; i < n; i++) { - switch (decode->op[i].type) { - case X86_VAR_IMMEDIATE: - break; - case X86_VAR_REG: - VM_PANIC_ON(!decode->op[i].ptr); - if (calc_val[i]) { - decode->op[i].val = read_val_from_reg(decode->op[i].ptr, - decode->operand_size); - } - break; - case X86_VAR_RM: - calc_modrm_operand(env, decode, &decode->op[i]); - if (calc_val[i]) { - decode->op[i].val = read_val_ext(env, decode->op[i].ptr, - decode->operand_size); - } - break; - case X86_VAR_OFFSET: - decode->op[i].ptr = decode_linear_addr(env, decode, - decode->op[i].ptr, - R_DS); - if (calc_val[i]) { - decode->op[i].val = read_val_ext(env, decode->op[i].ptr, - decode->operand_size); - } - break; - default: - break; - } - } -} - -static void exec_mov(CPUX86State *env, struct x86_decode *decode) -{ - fetch_operands(env, decode, 2, false, true, false); - write_val_ext(env, decode->op[0].ptr, decode->op[1].val, - decode->operand_size); - - env->eip += decode->len; -} - -static void exec_add(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); - env->eip += decode->len; -} - -static void exec_or(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true); - env->eip += decode->len; -} - -static void exec_adc(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true); - env->eip += decode->len; -} - -static void exec_sbb(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true); - env->eip += decode->len; -} - -static void exec_and(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true); - env->eip += decode->len; -} - -static void exec_sub(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true); - env->eip += decode->len; -} - -static void exec_xor(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true); - env->eip += decode->len; -} - -static void exec_neg(CPUX86State *env, struct x86_decode *decode) -{ - /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/ - int32_t val; - fetch_operands(env, decode, 2, true, true, false); - - val = 0 - sign(decode->op[1].val, decode->operand_size); - write_val_ext(env, decode->op[1].ptr, val, decode->operand_size); - - if (4 == decode->operand_size) { - SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val); - } else if (2 == decode->operand_size) { - SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val); - } else if (1 == decode->operand_size) { - SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val); - } else { - VM_PANIC("bad op size\n"); - } - - /*lflags_to_rflags(env);*/ - env->eip += decode->len; -} - -static void exec_cmp(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); - env->eip += decode->len; -} - -static void exec_inc(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[1].type = X86_VAR_IMMEDIATE; - decode->op[1].val = 0; - - EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true); - - env->eip += decode->len; -} - -static void exec_dec(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[1].type = X86_VAR_IMMEDIATE; - decode->op[1].val = 0; - - EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true); - env->eip += decode->len; -} - -static void exec_tst(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false); - env->eip += decode->len; -} - -static void exec_not(CPUX86State *env, struct x86_decode *decode) -{ - fetch_operands(env, decode, 1, true, false, false); - - write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val, - decode->operand_size); - env->eip += decode->len; -} - -void exec_movzx(CPUX86State *env, struct x86_decode *decode) -{ - int src_op_size; - int op_size = decode->operand_size; - - fetch_operands(env, decode, 1, false, false, false); - - if (0xb6 == decode->opcode[1]) { - src_op_size = 1; - } else { - src_op_size = 2; - } - decode->operand_size = src_op_size; - calc_modrm_operand(env, decode, &decode->op[1]); - decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size); - write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); - - env->eip += decode->len; -} - -static void exec_out(CPUX86State *env, struct x86_decode *decode) -{ - switch (decode->opcode[0]) { - case 0xe6: - hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1); - break; - case 0xe7: - hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1, - decode->operand_size, 1); - break; - case 0xee: - hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1); - break; - case 0xef: - hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1, - decode->operand_size, 1); - break; - default: - VM_PANIC("Bad out opcode\n"); - break; - } - env->eip += decode->len; -} - -static void exec_in(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong val = 0; - switch (decode->opcode[0]) { - case 0xe4: - hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1); - break; - case 0xe5: - hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0, - decode->operand_size, 1); - if (decode->operand_size == 2) { - AX(env) = val; - } else { - RAX(env) = (uint32_t)val; - } - break; - case 0xec: - hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1); - break; - case 0xed: - hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1); - if (decode->operand_size == 2) { - AX(env) = val; - } else { - RAX(env) = (uint32_t)val; - } - - break; - default: - VM_PANIC("Bad in opcode\n"); - break; - } - - env->eip += decode->len; -} - -static inline void string_increment_reg(CPUX86State *env, int reg, - struct x86_decode *decode) -{ - target_ulong val = read_reg(env, reg, decode->addressing_size); - if (env->eflags & DF_MASK) { - val -= decode->operand_size; - } else { - val += decode->operand_size; - } - write_reg(env, reg, val, decode->addressing_size); -} - -static inline void string_rep(CPUX86State *env, struct x86_decode *decode, - void (*func)(CPUX86State *env, - struct x86_decode *ins), int rep) -{ - target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size); - while (rcx--) { - func(env, decode); - write_reg(env, R_ECX, rcx, decode->addressing_size); - if ((PREFIX_REP == rep) && !get_ZF(env)) { - break; - } - if ((PREFIX_REPN == rep) && get_ZF(env)) { - break; - } - } -} - -static void exec_ins_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), - decode->addressing_size, R_ES); - - hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0, - decode->operand_size, 1); - vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf, - decode->operand_size); - - string_increment_reg(env, R_EDI, decode); -} - -static void exec_ins(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_ins_single, 0); - } else { - exec_ins_single(env, decode); - } - - env->eip += decode->len; -} - -static void exec_outs_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); - - vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr, - decode->operand_size); - hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1, - decode->operand_size, 1); - - string_increment_reg(env, R_ESI, decode); -} - -static void exec_outs(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_outs_single, 0); - } else { - exec_outs_single(env, decode); - } - - env->eip += decode->len; -} - -static void exec_movs_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong src_addr; - target_ulong dst_addr; - target_ulong val; - - src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); - dst_addr = linear_addr_size(env_cpu(env), RDI(env), - decode->addressing_size, R_ES); - - val = read_val_ext(env, src_addr, decode->operand_size); - write_val_ext(env, dst_addr, val, decode->operand_size); - - string_increment_reg(env, R_ESI, decode); - string_increment_reg(env, R_EDI, decode); -} - -static void exec_movs(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_movs_single, 0); - } else { - exec_movs_single(env, decode); - } - - env->eip += decode->len; -} - -static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong src_addr; - target_ulong dst_addr; - - src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); - dst_addr = linear_addr_size(env_cpu(env), RDI(env), - decode->addressing_size, R_ES); - - decode->op[0].type = X86_VAR_IMMEDIATE; - decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size); - decode->op[1].type = X86_VAR_IMMEDIATE; - decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size); - - EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); - - string_increment_reg(env, R_ESI, decode); - string_increment_reg(env, R_EDI, decode); -} - -static void exec_cmps(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_cmps_single, decode->rep); - } else { - exec_cmps_single(env, decode); - } - env->eip += decode->len; -} - - -static void exec_stos_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong addr; - target_ulong val; - - addr = linear_addr_size(env_cpu(env), RDI(env), - decode->addressing_size, R_ES); - val = read_reg(env, R_EAX, decode->operand_size); - vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size); - - string_increment_reg(env, R_EDI, decode); -} - - -static void exec_stos(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_stos_single, 0); - } else { - exec_stos_single(env, decode); - } - - env->eip += decode->len; -} - -static void exec_scas_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong addr; - - addr = linear_addr_size(env_cpu(env), RDI(env), - decode->addressing_size, R_ES); - decode->op[1].type = X86_VAR_IMMEDIATE; - vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size); - - EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); - string_increment_reg(env, R_EDI, decode); -} - -static void exec_scas(CPUX86State *env, struct x86_decode *decode) -{ - decode->op[0].type = X86_VAR_REG; - decode->op[0].reg = R_EAX; - if (decode->rep) { - string_rep(env, decode, exec_scas_single, decode->rep); - } else { - exec_scas_single(env, decode); - } - - env->eip += decode->len; -} - -static void exec_lods_single(CPUX86State *env, struct x86_decode *decode) -{ - target_ulong addr; - target_ulong val = 0; - - addr = decode_linear_addr(env, decode, RSI(env), R_DS); - vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size); - write_reg(env, R_EAX, val, decode->operand_size); - - string_increment_reg(env, R_ESI, decode); -} - -static void exec_lods(CPUX86State *env, struct x86_decode *decode) -{ - if (decode->rep) { - string_rep(env, decode, exec_lods_single, 0); - } else { - exec_lods_single(env, decode); - } - - env->eip += decode->len; -} - -void simulate_rdmsr(CPUX86State *env) -{ - X86CPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); - uint32_t msr = ECX(env); - uint64_t val = 0; - - switch (msr) { - case MSR_IA32_TSC: - val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); - break; - case MSR_IA32_APICBASE: - val = cpu_get_apic_base(cpu->apic_state); - break; - case MSR_IA32_UCODE_REV: - val = cpu->ucode_rev; - break; - case MSR_EFER: - val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); - break; - case MSR_FSBASE: - val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE); - break; - case MSR_GSBASE: - val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE); - break; - case MSR_KERNELGSBASE: - val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE); - break; - case MSR_STAR: - abort(); - break; - case MSR_LSTAR: - abort(); - break; - case MSR_CSTAR: - abort(); - break; - case MSR_IA32_MISC_ENABLE: - val = env->msr_ia32_misc_enable; - break; - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask; - break; - case MSR_MTRRfix64K_00000: - val = env->mtrr_fixed[0]; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1]; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3]; - break; - case MSR_MTRRdefType: - val = env->mtrr_deftype; - break; - case MSR_CORE_THREAD_COUNT: - val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ - val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ - break; - default: - /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */ - val = 0; - break; - } - - RAX(env) = (uint32_t)val; - RDX(env) = (uint32_t)(val >> 32); -} - -static void exec_rdmsr(CPUX86State *env, struct x86_decode *decode) -{ - simulate_rdmsr(env); - env->eip += decode->len; -} - -void simulate_wrmsr(CPUX86State *env) -{ - X86CPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); - uint32_t msr = ECX(env); - uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env); - - switch (msr) { - case MSR_IA32_TSC: - break; - case MSR_IA32_APICBASE: - cpu_set_apic_base(cpu->apic_state, data); - break; - case MSR_FSBASE: - wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data); - break; - case MSR_GSBASE: - wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data); - break; - case MSR_KERNELGSBASE: - wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data); - break; - case MSR_STAR: - abort(); - break; - case MSR_LSTAR: - abort(); - break; - case MSR_CSTAR: - abort(); - break; - case MSR_EFER: - /*printf("new efer %llx\n", EFER(cs));*/ - wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data); - if (data & MSR_EFER_NXE) { - hv_vcpu_invalidate_tlb(cs->accel->fd); - } - break; - case MSR_MTRRphysBase(0): - case MSR_MTRRphysBase(1): - case MSR_MTRRphysBase(2): - case MSR_MTRRphysBase(3): - case MSR_MTRRphysBase(4): - case MSR_MTRRphysBase(5): - case MSR_MTRRphysBase(6): - case MSR_MTRRphysBase(7): - env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data; - break; - case MSR_MTRRphysMask(0): - case MSR_MTRRphysMask(1): - case MSR_MTRRphysMask(2): - case MSR_MTRRphysMask(3): - case MSR_MTRRphysMask(4): - case MSR_MTRRphysMask(5): - case MSR_MTRRphysMask(6): - case MSR_MTRRphysMask(7): - env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data; - break; - case MSR_MTRRfix64K_00000: - env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data; - break; - case MSR_MTRRfix16K_80000: - case MSR_MTRRfix16K_A0000: - env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data; - break; - case MSR_MTRRfix4K_C0000: - case MSR_MTRRfix4K_C8000: - case MSR_MTRRfix4K_D0000: - case MSR_MTRRfix4K_D8000: - case MSR_MTRRfix4K_E0000: - case MSR_MTRRfix4K_E8000: - case MSR_MTRRfix4K_F0000: - case MSR_MTRRfix4K_F8000: - env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data; - break; - case MSR_MTRRdefType: - env->mtrr_deftype = data; - break; - default: - break; - } - - /* Related to support known hypervisor interface */ - /* if (g_hypervisor_iface) - g_hypervisor_iface->wrmsr_handler(cs, msr, data); - - printf("write msr %llx\n", RCX(cs));*/ -} - -static void exec_wrmsr(CPUX86State *env, struct x86_decode *decode) -{ - simulate_wrmsr(env); - env->eip += decode->len; -} - -/* - * flag: - * 0 - bt, 1 - btc, 2 - bts, 3 - btr - */ -static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag) -{ - int32_t displacement; - uint8_t index; - bool cf; - int mask = (4 == decode->operand_size) ? 0x1f : 0xf; - - VM_PANIC_ON(decode->rex.rex); - - fetch_operands(env, decode, 2, false, true, false); - index = decode->op[1].val & mask; - - if (decode->op[0].type != X86_VAR_REG) { - if (4 == decode->operand_size) { - displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32; - decode->op[0].ptr += 4 * displacement; - } else if (2 == decode->operand_size) { - displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16; - decode->op[0].ptr += 2 * displacement; - } else { - VM_PANIC("bt 64bit\n"); - } - } - decode->op[0].val = read_val_ext(env, decode->op[0].ptr, - decode->operand_size); - cf = (decode->op[0].val >> index) & 0x01; - - switch (flag) { - case 0: - set_CF(env, cf); - return; - case 1: - decode->op[0].val ^= (1u << index); - break; - case 2: - decode->op[0].val |= (1u << index); - break; - case 3: - decode->op[0].val &= ~(1u << index); - break; - } - write_val_ext(env, decode->op[0].ptr, decode->op[0].val, - decode->operand_size); - set_CF(env, cf); -} - -static void exec_bt(CPUX86State *env, struct x86_decode *decode) -{ - do_bt(env, decode, 0); - env->eip += decode->len; -} - -static void exec_btc(CPUX86State *env, struct x86_decode *decode) -{ - do_bt(env, decode, 1); - env->eip += decode->len; -} - -static void exec_btr(CPUX86State *env, struct x86_decode *decode) -{ - do_bt(env, decode, 3); - env->eip += decode->len; -} - -static void exec_bts(CPUX86State *env, struct x86_decode *decode) -{ - do_bt(env, decode, 2); - env->eip += decode->len; -} - -void exec_shl(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t count; - int of = 0, cf = 0; - - fetch_operands(env, decode, 2, true, true, false); - - count = decode->op[1].val; - count &= 0x1f; /* count is masked to 5 bits*/ - if (!count) { - goto exit; - } - - switch (decode->operand_size) { - case 1: - { - uint8_t res = 0; - if (count <= 8) { - res = (decode->op[0].val << count); - cf = (decode->op[0].val >> (8 - count)) & 0x1; - of = cf ^ (res >> 7); - } - - write_val_ext(env, decode->op[0].ptr, res, 1); - SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res); - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 2: - { - uint16_t res = 0; - - /* from bochs */ - if (count <= 16) { - res = (decode->op[0].val << count); - cf = (decode->op[0].val >> (16 - count)) & 0x1; - of = cf ^ (res >> 15); /* of = cf ^ result15 */ - } - - write_val_ext(env, decode->op[0].ptr, res, 2); - SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res); - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 4: - { - uint32_t res = decode->op[0].val << count; - - write_val_ext(env, decode->op[0].ptr, res, 4); - SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res); - cf = (decode->op[0].val >> (32 - count)) & 0x1; - of = cf ^ (res >> 31); /* of = cf ^ result31 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - default: - abort(); - } - -exit: - /* lflags_to_rflags(env); */ - env->eip += decode->len; -} - -void exec_movsx(CPUX86State *env, struct x86_decode *decode) -{ - int src_op_size; - int op_size = decode->operand_size; - - fetch_operands(env, decode, 2, false, false, false); - - if (0xbe == decode->opcode[1]) { - src_op_size = 1; - } else { - src_op_size = 2; - } - - decode->operand_size = src_op_size; - calc_modrm_operand(env, decode, &decode->op[1]); - decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size), - src_op_size); - - write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size); - - env->eip += decode->len; -} - -void exec_ror(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t count; - - fetch_operands(env, decode, 2, true, true, false); - count = decode->op[1].val; - - switch (decode->operand_size) { - case 1: - { - uint32_t bit6, bit7; - uint8_t res; - - if ((count & 0x07) == 0) { - if (count & 0x18) { - bit6 = ((uint8_t)decode->op[0].val >> 6) & 1; - bit7 = ((uint8_t)decode->op[0].val >> 7) & 1; - SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); - } - } else { - count &= 0x7; /* use only bottom 3 bits */ - res = ((uint8_t)decode->op[0].val >> count) | - ((uint8_t)decode->op[0].val << (8 - count)); - write_val_ext(env, decode->op[0].ptr, res, 1); - bit6 = (res >> 6) & 1; - bit7 = (res >> 7) & 1; - /* set eflags: ROR count affects the following flags: C, O */ - SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7); - } - break; - } - case 2: - { - uint32_t bit14, bit15; - uint16_t res; - - if ((count & 0x0f) == 0) { - if (count & 0x10) { - bit14 = ((uint16_t)decode->op[0].val >> 14) & 1; - bit15 = ((uint16_t)decode->op[0].val >> 15) & 1; - /* of = result14 ^ result15 */ - SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); - } - } else { - count &= 0x0f; /* use only 4 LSB's */ - res = ((uint16_t)decode->op[0].val >> count) | - ((uint16_t)decode->op[0].val << (16 - count)); - write_val_ext(env, decode->op[0].ptr, res, 2); - - bit14 = (res >> 14) & 1; - bit15 = (res >> 15) & 1; - /* of = result14 ^ result15 */ - SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15); - } - break; - } - case 4: - { - uint32_t bit31, bit30; - uint32_t res; - - count &= 0x1f; - if (count) { - res = ((uint32_t)decode->op[0].val >> count) | - ((uint32_t)decode->op[0].val << (32 - count)); - write_val_ext(env, decode->op[0].ptr, res, 4); - - bit31 = (res >> 31) & 1; - bit30 = (res >> 30) & 1; - /* of = result30 ^ result31 */ - SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31); - } - break; - } - } - env->eip += decode->len; -} - -void exec_rol(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t count; - - fetch_operands(env, decode, 2, true, true, false); - count = decode->op[1].val; - - switch (decode->operand_size) { - case 1: - { - uint32_t bit0, bit7; - uint8_t res; - - if ((count & 0x07) == 0) { - if (count & 0x18) { - bit0 = ((uint8_t)decode->op[0].val & 1); - bit7 = ((uint8_t)decode->op[0].val >> 7); - SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); - } - } else { - count &= 0x7; /* use only lowest 3 bits */ - res = ((uint8_t)decode->op[0].val << count) | - ((uint8_t)decode->op[0].val >> (8 - count)); - - write_val_ext(env, decode->op[0].ptr, res, 1); - /* set eflags: - * ROL count affects the following flags: C, O - */ - bit0 = (res & 1); - bit7 = (res >> 7); - SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0); - } - break; - } - case 2: - { - uint32_t bit0, bit15; - uint16_t res; - - if ((count & 0x0f) == 0) { - if (count & 0x10) { - bit0 = ((uint16_t)decode->op[0].val & 0x1); - bit15 = ((uint16_t)decode->op[0].val >> 15); - /* of = cf ^ result15 */ - SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); - } - } else { - count &= 0x0f; /* only use bottom 4 bits */ - res = ((uint16_t)decode->op[0].val << count) | - ((uint16_t)decode->op[0].val >> (16 - count)); - - write_val_ext(env, decode->op[0].ptr, res, 2); - bit0 = (res & 0x1); - bit15 = (res >> 15); - /* of = cf ^ result15 */ - SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0); - } - break; - } - case 4: - { - uint32_t bit0, bit31; - uint32_t res; - - count &= 0x1f; - if (count) { - res = ((uint32_t)decode->op[0].val << count) | - ((uint32_t)decode->op[0].val >> (32 - count)); - - write_val_ext(env, decode->op[0].ptr, res, 4); - bit0 = (res & 0x1); - bit31 = (res >> 31); - /* of = cf ^ result31 */ - SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0); - } - break; - } - } - env->eip += decode->len; -} - - -void exec_rcl(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t count; - int of = 0, cf = 0; - - fetch_operands(env, decode, 2, true, true, false); - count = decode->op[1].val & 0x1f; - - switch (decode->operand_size) { - case 1: - { - uint8_t op1_8 = decode->op[0].val; - uint8_t res; - count %= 9; - if (!count) { - break; - } - - if (1 == count) { - res = (op1_8 << 1) | get_CF(env); - } else { - res = (op1_8 << count) | (get_CF(env) << (count - 1)) | - (op1_8 >> (9 - count)); - } - - write_val_ext(env, decode->op[0].ptr, res, 1); - - cf = (op1_8 >> (8 - count)) & 0x01; - of = cf ^ (res >> 7); /* of = cf ^ result7 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 2: - { - uint16_t res; - uint16_t op1_16 = decode->op[0].val; - - count %= 17; - if (!count) { - break; - } - - if (1 == count) { - res = (op1_16 << 1) | get_CF(env); - } else if (count == 16) { - res = (get_CF(env) << 15) | (op1_16 >> 1); - } else { /* 2..15 */ - res = (op1_16 << count) | (get_CF(env) << (count - 1)) | - (op1_16 >> (17 - count)); - } - - write_val_ext(env, decode->op[0].ptr, res, 2); - - cf = (op1_16 >> (16 - count)) & 0x1; - of = cf ^ (res >> 15); /* of = cf ^ result15 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 4: - { - uint32_t res; - uint32_t op1_32 = decode->op[0].val; - - if (!count) { - break; - } - - if (1 == count) { - res = (op1_32 << 1) | get_CF(env); - } else { - res = (op1_32 << count) | (get_CF(env) << (count - 1)) | - (op1_32 >> (33 - count)); - } - - write_val_ext(env, decode->op[0].ptr, res, 4); - - cf = (op1_32 >> (32 - count)) & 0x1; - of = cf ^ (res >> 31); /* of = cf ^ result31 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - } - env->eip += decode->len; -} - -void exec_rcr(CPUX86State *env, struct x86_decode *decode) -{ - uint8_t count; - int of = 0, cf = 0; - - fetch_operands(env, decode, 2, true, true, false); - count = decode->op[1].val & 0x1f; - - switch (decode->operand_size) { - case 1: - { - uint8_t op1_8 = decode->op[0].val; - uint8_t res; - - count %= 9; - if (!count) { - break; - } - res = (op1_8 >> count) | (get_CF(env) << (8 - count)) | - (op1_8 << (9 - count)); - - write_val_ext(env, decode->op[0].ptr, res, 1); - - cf = (op1_8 >> (count - 1)) & 0x1; - of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 2: - { - uint16_t op1_16 = decode->op[0].val; - uint16_t res; - - count %= 17; - if (!count) { - break; - } - res = (op1_16 >> count) | (get_CF(env) << (16 - count)) | - (op1_16 << (17 - count)); - - write_val_ext(env, decode->op[0].ptr, res, 2); - - cf = (op1_16 >> (count - 1)) & 0x1; - of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^ - result14 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - case 4: - { - uint32_t res; - uint32_t op1_32 = decode->op[0].val; - - if (!count) { - break; - } - - if (1 == count) { - res = (op1_32 >> 1) | (get_CF(env) << 31); - } else { - res = (op1_32 >> count) | (get_CF(env) << (32 - count)) | - (op1_32 << (33 - count)); - } - - write_val_ext(env, decode->op[0].ptr, res, 4); - - cf = (op1_32 >> (count - 1)) & 0x1; - of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */ - SET_FLAGS_OxxxxC(env, of, cf); - break; - } - } - env->eip += decode->len; -} - -static void exec_xchg(CPUX86State *env, struct x86_decode *decode) -{ - fetch_operands(env, decode, 2, true, true, false); - - write_val_ext(env, decode->op[0].ptr, decode->op[1].val, - decode->operand_size); - write_val_ext(env, decode->op[1].ptr, decode->op[0].val, - decode->operand_size); - - env->eip += decode->len; -} - -static void exec_xadd(CPUX86State *env, struct x86_decode *decode) -{ - EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true); - write_val_ext(env, decode->op[1].ptr, decode->op[0].val, - decode->operand_size); - - env->eip += decode->len; -} - -static struct cmd_handler { - enum x86_decode_cmd cmd; - void (*handler)(CPUX86State *env, struct x86_decode *ins); -} handlers[] = { - {X86_DECODE_CMD_INVL, NULL,}, - {X86_DECODE_CMD_MOV, exec_mov}, - {X86_DECODE_CMD_ADD, exec_add}, - {X86_DECODE_CMD_OR, exec_or}, - {X86_DECODE_CMD_ADC, exec_adc}, - {X86_DECODE_CMD_SBB, exec_sbb}, - {X86_DECODE_CMD_AND, exec_and}, - {X86_DECODE_CMD_SUB, exec_sub}, - {X86_DECODE_CMD_NEG, exec_neg}, - {X86_DECODE_CMD_XOR, exec_xor}, - {X86_DECODE_CMD_CMP, exec_cmp}, - {X86_DECODE_CMD_INC, exec_inc}, - {X86_DECODE_CMD_DEC, exec_dec}, - {X86_DECODE_CMD_TST, exec_tst}, - {X86_DECODE_CMD_NOT, exec_not}, - {X86_DECODE_CMD_MOVZX, exec_movzx}, - {X86_DECODE_CMD_OUT, exec_out}, - {X86_DECODE_CMD_IN, exec_in}, - {X86_DECODE_CMD_INS, exec_ins}, - {X86_DECODE_CMD_OUTS, exec_outs}, - {X86_DECODE_CMD_RDMSR, exec_rdmsr}, - {X86_DECODE_CMD_WRMSR, exec_wrmsr}, - {X86_DECODE_CMD_BT, exec_bt}, - {X86_DECODE_CMD_BTR, exec_btr}, - {X86_DECODE_CMD_BTC, exec_btc}, - {X86_DECODE_CMD_BTS, exec_bts}, - {X86_DECODE_CMD_SHL, exec_shl}, - {X86_DECODE_CMD_ROL, exec_rol}, - {X86_DECODE_CMD_ROR, exec_ror}, - {X86_DECODE_CMD_RCR, exec_rcr}, - {X86_DECODE_CMD_RCL, exec_rcl}, - /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/ - {X86_DECODE_CMD_MOVS, exec_movs}, - {X86_DECODE_CMD_CMPS, exec_cmps}, - {X86_DECODE_CMD_STOS, exec_stos}, - {X86_DECODE_CMD_SCAS, exec_scas}, - {X86_DECODE_CMD_LODS, exec_lods}, - {X86_DECODE_CMD_MOVSX, exec_movsx}, - {X86_DECODE_CMD_XCHG, exec_xchg}, - {X86_DECODE_CMD_XADD, exec_xadd}, -}; - -static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST]; - -static void init_cmd_handler(void) -{ - int i; - for (i = 0; i < ARRAY_SIZE(handlers); i++) { - _cmd_handler[handlers[i].cmd] = handlers[i]; - } -} - -void load_regs(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - - int i = 0; - RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); - RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); - RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); - RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); - RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); - RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); - RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); - RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); - for (i = 8; i < 16; i++) { - RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); - } - - env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); - rflags_to_lflags(env); - env->eip = rreg(cs->accel->fd, HV_X86_RIP); -} - -void store_regs(CPUState *cs) -{ - X86CPU *cpu = X86_CPU(cs); - CPUX86State *env = &cpu->env; - - int i = 0; - wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); - wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); - wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); - wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); - wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); - wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); - wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); - wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); - for (i = 8; i < 16; i++) { - wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); - } - - lflags_to_rflags(env); - wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); - macvm_set_rip(cs, env->eip); -} - -bool exec_instruction(CPUX86State *env, struct x86_decode *ins) -{ - /*if (hvf_vcpu_id(cs)) - printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cs), env->eip, - decode_cmd_to_string(ins->cmd));*/ - - if (!_cmd_handler[ins->cmd].handler) { - printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip, - ins->cmd, ins->opcode[0], - ins->opcode_len > 1 ? ins->opcode[1] : 0); - env->eip += ins->len; - return true; - } - - _cmd_handler[ins->cmd].handler(env, ins); - return true; -} - -void init_emu(void) -{ - init_cmd_handler(); -} diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h deleted file mode 100644 index 8bd97608c42..00000000000 --- a/target/i386/hvf/x86_emu.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2016 Veertu Inc, - * Copyright (C) 2017 Google Inc, - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, see . - */ - -#ifndef X86_EMU_H -#define X86_EMU_H - -#include "x86.h" -#include "x86_decode.h" -#include "cpu.h" - -void init_emu(void); -bool exec_instruction(CPUX86State *env, struct x86_decode *ins); - -void load_regs(CPUState *cpu); -void store_regs(CPUState *cpu); - -void simulate_rdmsr(CPUX86State *env); -void simulate_wrmsr(CPUX86State *env); - -target_ulong read_reg(CPUX86State *env, int reg, int size); -void write_reg(CPUX86State *env, int reg, target_ulong val, int size); -target_ulong read_val_from_reg(target_ulong reg_ptr, int size); -void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size); -void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size); -uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes); -target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size); - -void exec_movzx(CPUX86State *env, struct x86_decode *decode); -void exec_shl(CPUX86State *env, struct x86_decode *decode); -void exec_movsx(CPUX86State *env, struct x86_decode *decode); -void exec_ror(CPUX86State *env, struct x86_decode *decode); -void exec_rol(CPUX86State *env, struct x86_decode *decode); -void exec_rcl(CPUX86State *env, struct x86_decode *decode); -void exec_rcr(CPUX86State *env, struct x86_decode *decode); -#endif diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c deleted file mode 100644 index 03d6de5efc3..00000000000 --- a/target/i386/hvf/x86_flags.c +++ /dev/null @@ -1,313 +0,0 @@ -///////////////////////////////////////////////////////////////////////// -// -// Copyright (C) 2001-2012 The Bochs Project -// Copyright (C) 2017 Google Inc. -// -// This library is free software; you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public -// License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public -// License along with this library; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA -///////////////////////////////////////////////////////////////////////// -/* - * flags functions - */ - -#include "qemu/osdep.h" - -#include "panic.h" -#include "cpu.h" -#include "x86_flags.h" -#include "x86.h" - - -/* this is basically bocsh code */ - -#define LF_SIGN_BIT 31 - -#define LF_BIT_SD (0) /* lazy Sign Flag Delta */ -#define LF_BIT_AF (3) /* lazy Adjust flag */ -#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */ -#define LF_BIT_CF (31) /* lazy Carry Flag */ -#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */ - -#define LF_MASK_SD (0x01 << LF_BIT_SD) -#define LF_MASK_AF (0x01 << LF_BIT_AF) -#define LF_MASK_PDB (0xFF << LF_BIT_PDB) -#define LF_MASK_CF (0x01 << LF_BIT_CF) -#define LF_MASK_PO (0x01 << LF_BIT_PO) - -#define ADD_COUT_VEC(op1, op2, result) \ - (((op1) & (op2)) | (((op1) | (op2)) & (~(result)))) - -#define SUB_COUT_VEC(op1, op2, result) \ - (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result))) - -#define GET_ADD_OVERFLOW(op1, op2, result, mask) \ - ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask)) - -/* ******************* */ -/* OSZAPC */ -/* ******************* */ - -/* size, carries, result */ -#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \ - target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \ - (((lf_carries) >> (size - 2)) << LF_BIT_PO); \ - env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \ - if ((size) == 32) { \ - temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \ - } else if ((size) == 16) { \ - temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \ - } else if ((size) == 8) { \ - temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \ - } else { \ - VM_PANIC("unimplemented"); \ - } \ - env->hvf_lflags.auxbits = (target_ulong)(uint32_t)temp; \ -} - -/* carries, result */ -#define SET_FLAGS_OSZAPC_8(carries, result) \ - SET_FLAGS_OSZAPC_SIZE(8, carries, result) -#define SET_FLAGS_OSZAPC_16(carries, result) \ - SET_FLAGS_OSZAPC_SIZE(16, carries, result) -#define SET_FLAGS_OSZAPC_32(carries, result) \ - SET_FLAGS_OSZAPC_SIZE(32, carries, result) - -/* ******************* */ -/* OSZAP */ -/* ******************* */ -/* size, carries, result */ -#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \ - target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \ - (((lf_carries) >> (size - 2)) << LF_BIT_PO); \ - if ((size) == 32) { \ - temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \ - } else if ((size) == 16) { \ - temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \ - } else if ((size) == 8) { \ - temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \ - } else { \ - VM_PANIC("unimplemented"); \ - } \ - env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \ - target_ulong delta_c = (env->hvf_lflags.auxbits ^ temp) & LF_MASK_CF; \ - delta_c ^= (delta_c >> 1); \ - env->hvf_lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \ -} - -/* carries, result */ -#define SET_FLAGS_OSZAP_8(carries, result) \ - SET_FLAGS_OSZAP_SIZE(8, carries, result) -#define SET_FLAGS_OSZAP_16(carries, result) \ - SET_FLAGS_OSZAP_SIZE(16, carries, result) -#define SET_FLAGS_OSZAP_32(carries, result) \ - SET_FLAGS_OSZAP_SIZE(32, carries, result) - -void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf) -{ - uint32_t temp_po = new_of ^ new_cf; - env->hvf_lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF); - env->hvf_lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF); -} - -void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2, - uint32_t diff) -{ - SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2, - uint16_t diff) -{ - SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2, - uint8_t diff) -{ - SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2, - uint32_t diff) -{ - SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2, - uint16_t diff) -{ - SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2, - uint8_t diff) -{ - SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2, - uint32_t diff) -{ - SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2, - uint16_t diff) -{ - SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2, - uint8_t diff) -{ - SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2, - uint32_t diff) -{ - SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2, - uint16_t diff) -{ - SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff); -} - -void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2, - uint8_t diff) -{ - SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff); -} - - -void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2, - uint32_t diff) -{ - SET_FLAGS_OSZAPC_32(0, diff); -} - -void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2, - uint16_t diff) -{ - SET_FLAGS_OSZAPC_16(0, diff); -} - -void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2, - uint8_t diff) -{ - SET_FLAGS_OSZAPC_8(0, diff); -} - -bool get_PF(CPUX86State *env) -{ - uint32_t temp = (255 & env->hvf_lflags.result); - temp = temp ^ (255 & (env->hvf_lflags.auxbits >> LF_BIT_PDB)); - temp = (temp ^ (temp >> 4)) & 0x0F; - return (0x9669U >> temp) & 1; -} - -void set_PF(CPUX86State *env, bool val) -{ - uint32_t temp = (255 & env->hvf_lflags.result) ^ (!val); - env->hvf_lflags.auxbits &= ~(LF_MASK_PDB); - env->hvf_lflags.auxbits |= (temp << LF_BIT_PDB); -} - -bool get_OF(CPUX86State *env) -{ - return ((env->hvf_lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1; -} - -bool get_CF(CPUX86State *env) -{ - return (env->hvf_lflags.auxbits >> LF_BIT_CF) & 1; -} - -void set_OF(CPUX86State *env, bool val) -{ - bool old_cf = get_CF(env); - SET_FLAGS_OxxxxC(env, val, old_cf); -} - -void set_CF(CPUX86State *env, bool val) -{ - bool old_of = get_OF(env); - SET_FLAGS_OxxxxC(env, old_of, val); -} - -bool get_AF(CPUX86State *env) -{ - return (env->hvf_lflags.auxbits >> LF_BIT_AF) & 1; -} - -void set_AF(CPUX86State *env, bool val) -{ - env->hvf_lflags.auxbits &= ~(LF_MASK_AF); - env->hvf_lflags.auxbits |= val << LF_BIT_AF; -} - -bool get_ZF(CPUX86State *env) -{ - return !env->hvf_lflags.result; -} - -void set_ZF(CPUX86State *env, bool val) -{ - if (val) { - env->hvf_lflags.auxbits ^= - (((env->hvf_lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD); - /* merge the parity bits into the Parity Delta Byte */ - uint32_t temp_pdb = (255 & env->hvf_lflags.result); - env->hvf_lflags.auxbits ^= (temp_pdb << LF_BIT_PDB); - /* now zero the .result value */ - env->hvf_lflags.result = 0; - } else { - env->hvf_lflags.result |= (1 << 8); - } -} - -bool get_SF(CPUX86State *env) -{ - return ((env->hvf_lflags.result >> LF_SIGN_BIT) ^ - (env->hvf_lflags.auxbits >> LF_BIT_SD)) & 1; -} - -void set_SF(CPUX86State *env, bool val) -{ - bool temp_sf = get_SF(env); - env->hvf_lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD; -} - -void lflags_to_rflags(CPUX86State *env) -{ - env->eflags |= get_CF(env) ? CC_C : 0; - env->eflags |= get_PF(env) ? CC_P : 0; - env->eflags |= get_AF(env) ? CC_A : 0; - env->eflags |= get_ZF(env) ? CC_Z : 0; - env->eflags |= get_SF(env) ? CC_S : 0; - env->eflags |= get_OF(env) ? CC_O : 0; -} - -void rflags_to_lflags(CPUX86State *env) -{ - env->hvf_lflags.auxbits = env->hvf_lflags.result = 0; - set_OF(env, env->eflags & CC_O); - set_SF(env, env->eflags & CC_S); - set_ZF(env, env->eflags & CC_Z); - set_AF(env, env->eflags & CC_A); - set_PF(env, env->eflags & CC_P); - set_CF(env, env->eflags & CC_C); -} diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 649074a7d24..afc5c17d5d5 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "panic.h" #include "cpu.h" -#include "x86.h" +#include "emulate/x86.h" #include "x86_mmu.h" #include "vmcs.h" #include "vmx.h" @@ -38,6 +38,7 @@ #define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12) #define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1)) #define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1)) +#define PAE_PTE_SUPER_PAGE_MASK ((-1llu << (30)) & ((1llu << 52) - 1)) struct gpt_translation { target_ulong gva; @@ -96,7 +97,7 @@ static bool get_pt_entry(CPUState *cpu, struct gpt_translation *pt, /* test page table entry */ static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt, - int level, bool *is_large, bool pae) + int level, int *largeness, bool pae) { uint64_t pte = pt->pte[level]; @@ -118,9 +119,9 @@ static bool test_pt_entry(CPUState *cpu, struct gpt_translation *pt, goto exit; } - if (1 == level && pte_large_page(pte)) { + if (level && pte_large_page(pte)) { pt->err_code |= MMU_PAGE_PT; - *is_large = true; + *largeness = level; } if (!level) { pt->err_code |= MMU_PAGE_PT; @@ -152,9 +153,18 @@ static inline uint64_t pse_pte_to_page(uint64_t pte) return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000); } -static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae) +static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae, + int largeness) { - VM_PANIC_ON(!pte_large_page(pt->pte[1])) + VM_PANIC_ON(!pte_large_page(pt->pte[largeness])) + + /* 1Gib large page */ + if (pae && largeness == 2) { + return (pt->pte[2] & PAE_PTE_SUPER_PAGE_MASK) | (pt->gva & 0x3fffffff); + } + + VM_PANIC_ON(largeness != 1) + /* 2Mb large page */ if (pae) { return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff); @@ -170,7 +180,7 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code, struct gpt_translation *pt, bool pae) { int top_level, level; - bool is_large = false; + int largeness = 0; target_ulong cr3 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR3); uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; @@ -186,19 +196,19 @@ static bool walk_gpt(CPUState *cpu, target_ulong addr, int err_code, for (level = top_level; level > 0; level--) { get_pt_entry(cpu, pt, level, pae); - if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) { + if (!test_pt_entry(cpu, pt, level - 1, &largeness, pae)) { return false; } - if (is_large) { + if (largeness) { break; } } - if (!is_large) { + if (!largeness) { pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff); } else { - pt->gpa = large_page_gpa(pt, pae); + pt->gpa = large_page_gpa(pt, pae, largeness); } return true; diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index f09bfbdda5b..bdf8b51ae67 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -10,15 +10,15 @@ #include "panic.h" #include "qemu/error-report.h" -#include "sysemu/hvf.h" +#include "system/hvf.h" #include "hvf-i386.h" #include "vmcs.h" #include "vmx.h" -#include "x86.h" +#include "emulate/x86.h" #include "x86_descr.h" #include "x86_mmu.h" -#include "x86_decode.h" -#include "x86_emu.h" +#include "emulate/x86_decode.h" +#include "emulate/x86_emu.h" #include "x86_task.h" #include "x86hvf.h" @@ -76,16 +76,16 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) RSI(env) = tss->esi; RDI(env) = tss->edi; - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS); - vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ldt}}, R_LDTR); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->es}}, R_ES); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->cs}}, R_CS); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ss}}, R_SS); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->ds}}, R_DS); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->fs}}, R_FS); + vmx_write_segment_selector(cpu, (x86_segment_selector){{tss->gs}}, R_GS); } -static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel, +static int task_switch_32(CPUState *cpu, x86_segment_selector tss_sel, x86_segment_selector old_tss_sel, uint64_t old_tss_base, struct x86_segment_descriptor *new_desc) { struct x86_tss_segment32 tss_seg; @@ -108,7 +108,7 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme return 0; } -void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) +void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP); if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && @@ -119,11 +119,10 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea return; } - load_regs(cpu); + hvf_load_regs(cpu); struct x86_segment_descriptor curr_tss_desc, next_tss_desc; - int ret; - x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); + x86_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); uint32_t desc_limit; struct x86_call_gate task_gate_desc; @@ -138,10 +137,10 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea if (reason == TSR_IDT_GATE && gate_valid) { int dpl; - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); + x86_read_call_gate(cpu, &task_gate_desc, gate); dpl = task_gate_desc.dpl; - x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); + x86_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); if (tss_sel.rpl > dpl || cs.rpl > dpl) ;//DPRINTF("emulate_gp"); } @@ -167,18 +166,19 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); } - if (next_tss_desc.type & 8) - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); - else + if (next_tss_desc.type & 8) { + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); + } else { //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); + } macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | CR0_TS_MASK); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); - store_regs(cpu); + hvf_store_regs(cpu); hv_vcpu_invalidate_tlb(cpu->accel->fd); } diff --git a/target/i386/hvf/x86_task.h b/target/i386/hvf/x86_task.h index 4eaa61a7dee..b9afac6a47b 100644 --- a/target/i386/hvf/x86_task.h +++ b/target/i386/hvf/x86_task.h @@ -15,6 +15,6 @@ #ifndef HVF_X86_TASK_H #define HVF_X86_TASK_H -void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, +void vmx_handle_task_switch(CPUState *cpu, x86_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type); #endif diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 1569f860eb1..17fce1d3cdd 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -24,8 +24,8 @@ #include "vmcs.h" #include "cpu.h" #include "x86_descr.h" -#include "x86_decode.h" -#include "sysemu/hw_accel.h" +#include "emulate/x86_decode.h" +#include "system/hw_accel.h" #include "hw/i386/apic_internal.h" @@ -427,7 +427,7 @@ int hvf_process_events(CPUState *cs) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (!cs->accel->dirty) { + if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h index 423a89b6ad3..8c46ce8ad02 100644 --- a/target/i386/hvf/x86hvf.h +++ b/target/i386/hvf/x86hvf.h @@ -31,4 +31,7 @@ void hvf_get_xsave(CPUState *cs); void hvf_get_msrs(CPUState *cs); void vmx_clear_int_window_exiting(CPUState *cs); void vmx_update_tpr(CPUState *cs); + +void hvf_load_regs(CPUState *cpu); +void hvf_store_regs(CPUState *cpu); #endif diff --git a/target/i386/kvm/hyperv-proto.h b/target/i386/kvm/hyperv-proto.h index 464fbf09e35..a9f056f2f3e 100644 --- a/target/i386/kvm/hyperv-proto.h +++ b/target/i386/kvm/hyperv-proto.h @@ -151,18 +151,6 @@ #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 -/* - * Hyper-V Synthetic debug options MSR - */ -#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 -#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 -#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 -#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 -#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 -#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF - -#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) - /* * Guest crash notification MSRs */ diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c index 3263dcf05d3..5836f53c23b 100644 --- a/target/i386/kvm/hyperv-stub.c +++ b/target/i386/kvm/hyperv-stub.c @@ -56,3 +56,8 @@ void hyperv_x86_synic_update(X86CPU *cpu) void hyperv_x86_set_vmbus_recommended_features_enabled(void) { } + +uint64_t hyperv_syndbg_query_options(void) +{ + return 0; +} diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index b94f12acc2c..9865120cc43 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" +#include "exec/target_page.h" #include "hyperv.h" #include "hw/hyperv/hyperv.h" #include "hyperv-proto.h" @@ -80,6 +81,7 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) * necessary because memory hierarchy is being changed */ async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL); + cpu_exit(CPU(cpu)); return EXCP_INTERRUPT; case KVM_EXIT_HYPERV_HCALL: { diff --git a/target/i386/kvm/hyperv.h b/target/i386/kvm/hyperv.h index e3982c8f4dd..e45a4512fe9 100644 --- a/target/i386/kvm/hyperv.h +++ b/target/i386/kvm/hyperv.h @@ -15,7 +15,7 @@ #define TARGET_I386_HYPERV_H #include "cpu.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/hyperv/hyperv.h" #ifdef CONFIG_KVM diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 6bf8dcfc607..89a79536594 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -11,11 +11,11 @@ #include "cpu.h" #include "host-cpu.h" #include "qapi/error.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "hw/boards.h" #include "kvm_i386.h" -#include "hw/core/accel-cpu.h" +#include "accel/accel-cpu-target.h" static void kvm_set_guest_phys_bits(CPUState *cs) { @@ -41,6 +41,7 @@ static void kvm_set_guest_phys_bits(CPUState *cs) static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) { X86CPU *cpu = X86_CPU(cs); + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); CPUX86State *env = &cpu->env; bool ret; @@ -63,7 +64,7 @@ static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) * check/update ucode_rev, phys_bits, guest_phys_bits, mwait * cpu_common_realizefn() (via xcc->parent_realize) */ - if (cpu->max_features) { + if (xcc->max_features) { if (enable_cpu_pm) { if (kvm_has_waitpkg()) { env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; @@ -72,7 +73,7 @@ static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, &cpu->mwait.ecx, &cpu->mwait.edx); - } + } } if (cpu->ucode_rev == 0) { cpu->ucode_rev = @@ -108,7 +109,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu) CPUX86State *env = &cpu->env; KVMState *s = kvm_state; - host_cpu_max_instance_init(cpu); + object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); if (lmce_supported()) { object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); @@ -143,10 +144,6 @@ static void kvm_cpu_xsave_init(void) if (!esa->size) { continue; } - if ((x86_cpu_get_supported_feature_word(NULL, esa->feature) & esa->bits) - != esa->bits) { - continue; - } host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx); if (eax != 0) { assert(esa->size == eax); @@ -220,14 +217,14 @@ static void kvm_cpu_instance_init(CPUState *cs) x86_cpu_apply_props(cpu, kvm_default_props); } - if (cpu->max_features) { + if (xcc->max_features) { kvm_cpu_max_instance_init(cpu); } kvm_cpu_xsave_init(); } -static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) +static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 31f149c9902..369626f8c8d 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -31,13 +31,14 @@ #include "cpu.h" #include "host-cpu.h" #include "vmsr_energy.h" -#include "sysemu/sysemu.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm_int.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/hw_accel.h" +#include "system/kvm_int.h" +#include "system/runstate.h" #include "kvm_i386.h" #include "../confidential-guest.h" #include "sev.h" +#include "tdx.h" #include "xen-emu.h" #include "hyperv.h" #include "hyperv-proto.h" @@ -67,6 +68,7 @@ #include "hw/pci/msix.h" #include "migration/blocker.h" #include "exec/memattrs.h" +#include "exec/target_page.h" #include "trace.h" #include CONFIG_DEVICES @@ -81,18 +83,35 @@ do { } while (0) #endif +/* + * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. + * In order to use vm86 mode, an EPT identity map and a TSS are needed. + * Since these must be part of guest physical memory, we need to allocate + * them, both by setting their start addresses in the kernel and by + * creating a corresponding e820 entry. We need 4 pages before the BIOS, + * so this value allows up to 16M BIOSes. + */ +#define KVM_IDENTITY_BASE 0xfeffc000 + /* From arch/x86/kvm/lapic.h */ #define KVM_APIC_BUS_CYCLE_NS 1 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) -#define MSR_KVM_WALL_CLOCK 0x11 -#define MSR_KVM_SYSTEM_TIME 0x12 - /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus * 255 kvm_msr_entry structs */ #define MSR_BUF_SIZE 4096 +typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); +typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); +typedef struct { + uint32_t msr; + QEMURDMSRHandler *rdmsr; + QEMUWRMSRHandler *wrmsr; +} KVMMSRHandlers; + static void kvm_init_msrs(X86CPU *cpu); +static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, + QEMUWRMSRHandler *wrmsr); const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), @@ -145,6 +164,7 @@ static bool has_msr_ucode_rev; static bool has_msr_vmx_procbased_ctls2; static bool has_msr_perf_capabs; static bool has_msr_pkrs; +static bool has_msr_hwcr; static uint32_t has_architectural_pmu_version; static uint32_t num_architectural_pmu_gp_counters; @@ -173,6 +193,7 @@ static const char *vm_type_name[] = { [KVM_X86_SEV_VM] = "SEV", [KVM_X86_SEV_ES_VM] = "SEV-ES", [KVM_X86_SNP_VM] = "SEV-SNP", + [KVM_X86_TDX_VM] = "TDX", }; bool kvm_is_vm_type_supported(int type) @@ -307,7 +328,7 @@ void kvm_synchronize_all_tsc(void) { CPUState *cpu; - if (kvm_enabled()) { + if (kvm_enabled() && !is_tdx_vm()) { CPU_FOREACH(cpu) { run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); } @@ -373,7 +394,7 @@ static bool host_tsx_broken(void) /* Returns the value for a specific register on the cpuid entry */ -static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) +uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) { uint32_t ret = 0; switch (reg) { @@ -395,9 +416,9 @@ static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) /* Find matching entry for function/index on kvm_cpuid2 struct */ -static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, - uint32_t function, - uint32_t index) +struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, + uint32_t function, + uint32_t index) { int i; for (i = 0; i < cpuid->nent; ++i) { @@ -482,8 +503,12 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is * returned by KVM_GET_MSR_INDEX_LIST. + * + * But also, because Windows does not like ARCH_CAPABILITIES on AMD + * mcahines at all, do not show the fake ARCH_CAPABILITIES MSR that + * KVM sets up. */ - if (!has_msr_arch_capabs) { + if (!has_msr_arch_capabs || !(edx & CPUID_7_0_EDX_ARCH_CAPABILITIES)) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } } else if (function == 7 && index == 1 && reg == R_EAX) { @@ -543,17 +568,17 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, * be enabled without the in-kernel irqchip */ if (!kvm_irqchip_in_kernel()) { - ret &= ~(1U << KVM_FEATURE_PV_UNHALT); + ret &= ~CPUID_KVM_PV_UNHALT; } if (kvm_irqchip_is_split()) { - ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID; + ret |= CPUID_KVM_MSI_EXT_DEST_ID; } } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { - ret |= 1U << KVM_HINTS_REALTIME; + ret |= CPUID_KVM_HINTS_REALTIME; } if (current_machine->cgs) { - ret = x86_confidential_guest_mask_cpuid_features( + ret = x86_confidential_guest_adjust_cpuid_features( X86_CONFIDENTIAL_GUEST(current_machine->cgs), function, index, reg, ret); } @@ -849,6 +874,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs) int r, cur_freq; bool set_ioctl = false; + /* + * TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope + * VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ + * before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu() + */ + if (is_tdx_vm()) { + return 0; + } + if (!env->tsc_khz) { return 0; } @@ -913,6 +947,7 @@ static struct { uint32_t bits; } flags[2]; uint64_t dependencies; + bool skip_passthrough; } kvm_hyperv_properties[] = { [HYPERV_FEAT_RELAXED] = { .desc = "relaxed timing (hv-relaxed)", @@ -1035,16 +1070,15 @@ static struct { .bits = HV_DEPRECATING_AEOI_RECOMMENDED} } }, -#ifdef CONFIG_SYNDBG [HYPERV_FEAT_SYNDBG] = { .desc = "Enable synthetic kernel debugger channel (hv-syndbg)", .flags = { {.func = HV_CPUID_FEATURES, .reg = R_EDX, .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE} }, - .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED) + .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED), + .skip_passthrough = true, }, -#endif [HYPERV_FEAT_MSR_BITMAP] = { .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)", .flags = { @@ -1296,6 +1330,13 @@ static bool hyperv_feature_supported(CPUState *cs, int feature) uint32_t func, bits; int i, reg; + /* + * kvm_hyperv_properties needs to define at least one CPUID flag which + * must be used to detect the feature, it's hard to say whether it is + * supported or not otherwise. + */ + assert(kvm_hyperv_properties[feature].flags[0].func); + for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { func = kvm_hyperv_properties[feature].flags[i].func; @@ -1445,7 +1486,8 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) * hv_build_cpuid_leaf() uses this info to build guest CPUIDs. */ for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { - if (hyperv_feature_supported(cs, feat)) { + if (hyperv_feature_supported(cs, feat) && + !kvm_hyperv_properties[feat].skip_passthrough) { cpu->hyperv_features |= BIT(feat); } } @@ -1752,8 +1794,6 @@ static int hyperv_init_vcpu(X86CPU *cpu) static Error *invtsc_mig_blocker; -#define KVM_MAX_CPUID_ENTRIES 100 - static void kvm_init_xsave(CPUX86State *env) { if (has_xsave2) { @@ -1796,9 +1836,8 @@ static void kvm_init_nested_state(CPUX86State *env) } } -static uint32_t kvm_x86_build_cpuid(CPUX86State *env, - struct kvm_cpuid_entry2 *entries, - uint32_t cpuid_i) +uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries, + uint32_t cpuid_i) { uint32_t limit, i, j; uint32_t unused; @@ -1818,10 +1857,12 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, int times; c->function = i; - c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | - KVM_CPUID_FLAG_STATE_READ_NEXT; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); times = c->eax & 0xff; + if (times > 1) { + c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | + KVM_CPUID_FLAG_STATE_READ_NEXT; + } for (j = 1; j < times; ++j) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { @@ -1835,7 +1876,7 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, break; } case 0x1f: - if (!x86_has_extended_topo(env->avail_cpu_topo)) { + if (!x86_has_cpuid_0x1f(env_archcpu(env))) { cpuid_i--; break; } @@ -1844,10 +1885,6 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, case 0xb: case 0xd: for (j = 0; ; j++) { - if (i == 0xd && j == 64) { - break; - } - c->function = i; c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; c->index = j; @@ -1863,7 +1900,12 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, break; } if (i == 0xd && c->eax == 0) { - continue; + if (j < 63) { + continue; + } else { + cpuid_i--; + break; + } } if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { goto full; @@ -1891,7 +1933,8 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, case 0x7: case 0x14: case 0x1d: - case 0x1e: { + case 0x1e: + case 0x24: { uint32_t times; c->function = i; @@ -2021,6 +2064,15 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env, abort(); } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + if (is_tdx_vm()) { + return tdx_pre_create_vcpu(cpu, errp); + } + + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { struct { @@ -2045,6 +2097,14 @@ int kvm_arch_init_vcpu(CPUState *cs) int r; Error *local_err = NULL; + if (current_machine->cgs) { + r = x86_confidential_guest_check_features( + X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs); + if (r < 0) { + return r; + } + } + memset(&cpuid_data, 0, sizeof(cpuid_data)); cpuid_i = 0; @@ -2203,7 +2263,7 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i); cpuid_data.cpuid.nent = cpuid_i; - if (((env->cpuid_version >> 8)&0xF) >= 6 + if (x86_cpu_family(env->cpuid_version) >= 6 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == (CPUID_MCE | CPUID_MCA)) { uint64_t mcg_cap, unsupported_caps; @@ -2382,6 +2442,21 @@ void kvm_arch_after_reset_vcpu(X86CPU *cpu) } } +void kvm_arch_reset_parked_vcpu(unsigned long vcpu_id, int kvm_fd) +{ + g_autofree struct kvm_msrs *msrs = NULL; + + msrs = g_malloc0(sizeof(*msrs) + sizeof(msrs->entries[0])); + msrs->entries[0].index = MSR_IA32_TSC; + msrs->entries[0].data = 1; /* match the value in x86_cpu_reset() */ + msrs->nmsrs++; + + if (ioctl(kvm_fd, KVM_SET_MSRS, msrs) != 1) { + warn_report("parked vCPU %lu TSC reset failed: %d", + vcpu_id, errno); + } +} + void kvm_arch_do_init_vcpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; @@ -2554,6 +2629,8 @@ static int kvm_get_supported_msrs(KVMState *s) case MSR_IA32_PKRS: has_msr_pkrs = true; break; + case MSR_K7_HWCR: + has_msr_hwcr = true; } } } @@ -2567,10 +2644,7 @@ static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr, uint64_t *val) { - CPUState *cs = CPU(cpu); - - *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ - *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ + *val = cpu_x86_get_msr_core_thread_count(cpu); return true; } @@ -2712,8 +2786,8 @@ static void *kvm_msr_energy_thread(void *data) thd_stat[i].thread_id = thread_ids[i]; vmsr_read_thread_stat(vmsr->pid, thd_stat[i].thread_id, - thd_stat[i].utime, - thd_stat[i].stime, + &thd_stat[i].utime[0], + &thd_stat[i].stime[0], &thd_stat[i].cpu_id); thd_stat[i].pkg_id = vmsr_get_physical_package_id(thd_stat[i].cpu_id); @@ -2777,8 +2851,8 @@ static void *kvm_msr_energy_thread(void *data) for (int i = 0; i < num_threads; i++) { vmsr_read_thread_stat(vmsr->pid, thd_stat[i].thread_id, - thd_stat[i].utime, - thd_stat[i].stime, + &thd_stat[i].utime[1], + &thd_stat[i].stime[1], &thd_stat[i].cpu_id); if (vmsr->pid < 0) { @@ -2889,23 +2963,20 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); struct KVMMsrEnergy *r = &s->msr_energy; - int ret = 0; /* * Sanity check * 1. Host cpu must be Intel cpu * 2. RAPL must be enabled on the Host */ - if (is_host_cpu_intel()) { - error_report("The RAPL feature can only be enabled on hosts\ - with Intel CPU models"); - ret = 1; - goto out; + if (!is_host_cpu_intel()) { + error_report("The RAPL feature can only be enabled on hosts " + "with Intel CPU models"); + return -1; } if (!is_rapl_enabled()) { - ret = 1; - goto out; + return -1; } /* Retrieve the virtual topology */ @@ -2927,16 +2998,14 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) r->host_topo.maxcpus = vmsr_get_maxcpus(); if (r->host_topo.maxcpus == 0) { error_report("host max cpus = 0"); - ret = 1; - goto out; + return -1; } /* Max number of packages on the host */ r->host_topo.maxpkgs = vmsr_get_max_physical_package(r->host_topo.maxcpus); if (r->host_topo.maxpkgs == 0) { error_report("host max pkgs = 0"); - ret = 1; - goto out; + return -1; } /* Allocate memory for each package on the host */ @@ -2948,8 +3017,7 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) for (int i = 0; i < r->host_topo.maxpkgs; i++) { if (r->host_topo.pkg_cpu_count[i] == 0) { error_report("cpu per packages = 0 on package_%d", i); - ret = 1; - goto out; + return -1; } } @@ -2966,8 +3034,7 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) if (s->msr_energy.sioc == NULL) { error_report("vmsr socket opening failed"); - ret = 1; - goto out; + return -1; } /* Those MSR values should not change */ @@ -2979,15 +3046,13 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) s->msr_energy.sioc); if (r->msr_unit == 0 || r->msr_limit == 0 || r->msr_info == 0) { error_report("can't read any virtual msr"); - ret = 1; - goto out; + return -1; } qemu_thread_create(&r->msr_thr, "kvm-msr", kvm_msr_energy_thread, s, QEMU_THREAD_JOINABLE); -out: - return ret; + return 0; } int kvm_arch_get_default_type(MachineState *ms) @@ -2995,25 +3060,182 @@ int kvm_arch_get_default_type(MachineState *ms) return 0; } -int kvm_arch_init(MachineState *ms, KVMState *s) +static int kvm_vm_enable_exception_payload(KVMState *s) +{ + int ret = 0; + has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); + if (has_exception_payload) { + ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable exception payload cap: %s", + strerror(-ret)); + } + } + + return ret; +} + +static int kvm_vm_enable_triple_fault_event(KVMState *s) +{ + int ret = 0; + has_triple_fault_event = \ + kvm_check_extension(s, + KVM_CAP_X86_TRIPLE_FAULT_EVENT); + if (has_triple_fault_event) { + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable triple fault event cap: %s", + strerror(-ret)); + } + } + return ret; +} + +static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t identity_base) +{ + return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); +} + +static int kvm_vm_set_nr_mmu_pages(KVMState *s) { - uint64_t identity_base = 0xfffbc000; uint64_t shadow_mem; + int ret = 0; + shadow_mem = object_property_get_int(OBJECT(s), + "kvm-shadow-mem", + &error_abort); + if (shadow_mem != -1) { + shadow_mem /= 4096; + ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); + } + return ret; +} + +static int kvm_vm_set_tss_addr(KVMState *s, uint64_t tss_base) +{ + return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, tss_base); +} + +static int kvm_vm_enable_disable_exits(KVMState *s) +{ + int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); + + if (disable_exits) { + disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | + KVM_X86_DISABLE_EXITS_HLT | + KVM_X86_DISABLE_EXITS_PAUSE | + KVM_X86_DISABLE_EXITS_CSTATE); + } + + return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, + disable_exits); +} + +static int kvm_vm_enable_bus_lock_exit(KVMState *s) +{ + int ret = 0; + ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); + if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { + error_report("kvm: bus lock detection unsupported"); + return -ENOTSUP; + } + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, + KVM_BUS_LOCK_DETECTION_EXIT); + if (ret < 0) { + error_report("kvm: Failed to enable bus lock detection cap: %s", + strerror(-ret)); + } + + return ret; +} + +static int kvm_vm_enable_notify_vmexit(KVMState *s) +{ + int ret = 0; + if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) { + uint64_t notify_window_flags = + ((uint64_t)s->notify_window << 32) | + KVM_X86_NOTIFY_VMEXIT_ENABLED | + KVM_X86_NOTIFY_VMEXIT_USER; + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, + notify_window_flags); + if (ret < 0) { + error_report("kvm: Failed to enable notify vmexit cap: %s", + strerror(-ret)); + } + } + return ret; +} + +static int kvm_vm_enable_userspace_msr(KVMState *s) +{ + int ret; + + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, + KVM_MSR_EXIT_REASON_FILTER); + if (ret < 0) { + error_report("Could not enable user space MSRs: %s", + strerror(-ret)); + exit(1); + } + + ret = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, + kvm_rdmsr_core_thread_count, NULL); + if (ret < 0) { + error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s", + strerror(-ret)); + exit(1); + } + + return 0; +} + +static int kvm_vm_enable_energy_msrs(KVMState *s) +{ + int ret; + + if (s->msr_energy.enable == true) { + ret = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT, + kvm_rdmsr_rapl_power_unit, NULL); + if (ret < 0) { + error_report("Could not install MSR_RAPL_POWER_UNIT handler: %s", + strerror(-ret)); + return ret; + } + + ret = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT, + kvm_rdmsr_pkg_power_limit, NULL); + if (ret < 0) { + error_report("Could not install MSR_PKG_POWER_LIMIT handler: %s", + strerror(-ret)); + return ret; + } + + ret = kvm_filter_msr(s, MSR_PKG_POWER_INFO, + kvm_rdmsr_pkg_power_info, NULL); + if (ret < 0) { + error_report("Could not install MSR_PKG_POWER_INFO handler: %s", + strerror(-ret)); + return ret; + } + ret = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS, + kvm_rdmsr_pkg_energy_status, NULL); + if (ret < 0) { + error_report("Could not install MSR_PKG_ENERGY_STATUS handler: %s", + strerror(-ret)); + return ret; + } + } + return 0; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ int ret; struct utsname utsname; Error *local_err = NULL; /* - * Initialize SEV context, if required - * - * If no memory encryption is requested (ms->cgs == NULL) this is - * a no-op. - * - * It's also a no-op if a non-SEV confidential guest support - * mechanism is selected. SEV is the only mechanism available to - * select on x86 at present, so this doesn't arise, but if new - * mechanisms are supported in future (e.g. TDX), they'll need - * their own initialization either here or elsewhere. + * Initialize confidential guest (SEV/TDX) context, if required */ if (ms->cgs) { ret = confidential_guest_kvm_init(ms->cgs, &local_err); @@ -3028,24 +3250,14 @@ int kvm_arch_init(MachineState *ms, KVMState *s) hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); - has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); - if (has_exception_payload) { - ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); - if (ret < 0) { - error_report("kvm: Failed to enable exception payload cap: %s", - strerror(-ret)); - return ret; - } + ret = kvm_vm_enable_exception_payload(s); + if (ret < 0) { + return ret; } - has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT); - if (has_triple_fault_event) { - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); - if (ret < 0) { - error_report("kvm: Failed to enable triple fault event cap: %s", - strerror(-ret)); - return ret; - } + ret = kvm_vm_enable_triple_fault_event(s); + if (ret < 0) { + return ret; } if (s->xen_version) { @@ -3071,41 +3283,31 @@ int kvm_arch_init(MachineState *ms, KVMState *s) return ret; } - kvm_get_supported_feature_msrs(s); + ret = kvm_get_supported_feature_msrs(s); + if (ret < 0) { + return ret; + } uname(&utsname); lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; - /* - * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. - * In order to use vm86 mode, an EPT identity map and a TSS are needed. - * Since these must be part of guest physical memory, we need to allocate - * them, both by setting their start addresses in the kernel and by - * creating a corresponding e820 entry. We need 4 pages before the BIOS, - * so this value allows up to 16M BIOSes. - */ - identity_base = 0xfeffc000; - ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); + ret = kvm_vm_set_identity_map_addr(s, KVM_IDENTITY_BASE); if (ret < 0) { return ret; } /* Set TSS base one page after EPT identity map. */ - ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); + ret = kvm_vm_set_tss_addr(s, KVM_IDENTITY_BASE + 0x1000); if (ret < 0) { return ret; } /* Tell fw_cfg to notify the BIOS to reserve the range. */ - e820_add_entry(identity_base, 0x4000, E820_RESERVED); + e820_add_entry(KVM_IDENTITY_BASE, 0x4000, E820_RESERVED); - shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort); - if (shadow_mem != -1) { - shadow_mem /= 4096; - ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); - if (ret < 0) { - return ret; - } + ret = kvm_vm_set_nr_mmu_pages(s); + if (ret < 0) { + return ret; } if (kvm_check_extension(s, KVM_CAP_X86_SMM) && @@ -3116,23 +3318,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } if (enable_cpu_pm) { - int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); -/* Work around for kernel header with a typo. TODO: fix header and drop. */ -#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) -#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL -#endif - if (disable_exits) { - disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | - KVM_X86_DISABLE_EXITS_HLT | - KVM_X86_DISABLE_EXITS_PAUSE | - KVM_X86_DISABLE_EXITS_CSTATE); - } - - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, - disable_exits); + ret = kvm_vm_enable_disable_exits(s); if (ret < 0) { error_report("kvm: guest stopping CPU not supported: %s", strerror(-ret)); + return ret; } } @@ -3140,16 +3330,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) X86MachineState *x86ms = X86_MACHINE(ms); if (x86ms->bus_lock_ratelimit > 0) { - ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); - if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { - error_report("kvm: bus lock detection unsupported"); - return -ENOTSUP; - } - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, - KVM_BUS_LOCK_DETECTION_EXIT); + ret = kvm_vm_enable_bus_lock_exit(s); if (ret < 0) { - error_report("kvm: Failed to enable bus lock detection cap: %s", - strerror(-ret)); return ret; } ratelimit_init(&bus_lock_ratelimit_ctrl); @@ -3158,80 +3340,30 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } - if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE && - kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { - uint64_t notify_window_flags = - ((uint64_t)s->notify_window << 32) | - KVM_X86_NOTIFY_VMEXIT_ENABLED | - KVM_X86_NOTIFY_VMEXIT_USER; - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, - notify_window_flags); - if (ret < 0) { - error_report("kvm: Failed to enable notify vmexit cap: %s", - strerror(-ret)); - return ret; - } - } - if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { - bool r; - - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, - KVM_MSR_EXIT_REASON_FILTER); - if (ret) { - error_report("Could not enable user space MSRs: %s", - strerror(-ret)); - exit(1); + if (kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { + ret = kvm_vm_enable_notify_vmexit(s); + if (ret < 0) { + return ret; } + } - r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, - kvm_rdmsr_core_thread_count, NULL); - if (!r) { - error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s", - strerror(-ret)); - exit(1); + if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { + ret = kvm_vm_enable_userspace_msr(s); + if (ret < 0) { + return ret; } if (s->msr_energy.enable == true) { - r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT, - kvm_rdmsr_rapl_power_unit, NULL); - if (!r) { - error_report("Could not install MSR_RAPL_POWER_UNIT \ - handler: %s", - strerror(-ret)); - exit(1); - } - - r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT, - kvm_rdmsr_pkg_power_limit, NULL); - if (!r) { - error_report("Could not install MSR_PKG_POWER_LIMIT \ - handler: %s", - strerror(-ret)); - exit(1); + ret = kvm_vm_enable_energy_msrs(s); + if (ret < 0) { + return ret; } - r = kvm_filter_msr(s, MSR_PKG_POWER_INFO, - kvm_rdmsr_pkg_power_info, NULL); - if (!r) { - error_report("Could not install MSR_PKG_POWER_INFO \ - handler: %s", - strerror(-ret)); - exit(1); - } - r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS, - kvm_rdmsr_pkg_energy_status, NULL); - if (!r) { - error_report("Could not install MSR_PKG_ENERGY_STATUS \ - handler: %s", - strerror(-ret)); - exit(1); - } - r = kvm_msr_energy_thread_init(s, ms); - if (r) { - error_report("kvm : error RAPL feature requirement not meet"); - exit(1); + ret = kvm_msr_energy_thread_init(s, ms); + if (ret < 0) { + error_report("kvm : error RAPL feature requirement not met"); + return ret; } - } } @@ -3694,7 +3826,14 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, CR4_VMXE_MASK); - if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { + if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { + /* FRED injected-event data (0x2052). */ + kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52); + } else if (f[FEAT_VMX_EXIT_CTLS] & + VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) { + /* Secondary VM-exit controls (0x2044). */ + kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44); + } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { /* TSC multiplier (0x2032). */ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); } else { @@ -3737,32 +3876,34 @@ static void kvm_init_msrs(X86CPU *cpu) CPUX86State *env = &cpu->env; kvm_msr_buf_reset(cpu); - if (has_msr_arch_capabs) { - kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, - env->features[FEAT_ARCH_CAPABILITIES]); - } - if (has_msr_core_capabs) { - kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, - env->features[FEAT_CORE_CAPABILITY]); - } + if (!is_tdx_vm()) { + if (has_msr_arch_capabs) { + kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, + env->features[FEAT_ARCH_CAPABILITIES]); + } + + if (has_msr_core_capabs) { + kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, + env->features[FEAT_CORE_CAPABILITY]); + } - if (has_msr_perf_capabs && cpu->enable_pmu) { - kvm_msr_entry_add_perf(cpu, env->features); + if (has_msr_perf_capabs && cpu->enable_pmu) { + kvm_msr_entry_add_perf(cpu, env->features); + } + + /* + * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but + * all kernels with MSR features should have them. + */ + if (kvm_feature_msrs && cpu_has_vmx(env)) { + kvm_msr_entry_add_vmx(cpu, env->features); + } } if (has_msr_ucode_rev) { kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); } - - /* - * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but - * all kernels with MSR features should have them. - */ - if (kvm_feature_msrs && cpu_has_vmx(env)) { - kvm_msr_entry_add_vmx(cpu, env->features); - } - assert(kvm_buf_set_msrs(cpu) == 0); } @@ -3824,6 +3965,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_virt_ssbd) { kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); } + if (has_msr_hwcr) { + kvm_msr_entry_add(cpu, MSR_K7_HWCR, env->msr_hwcr); + } #ifdef TARGET_X86_64 if (lm_capable_kernel) { @@ -3851,22 +3995,24 @@ static int kvm_put_msrs(X86CPU *cpu, int level) */ if (level >= KVM_PUT_RESET_STATE) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); - kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); - kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { + if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) { + kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); + kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); + } + if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { + if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { + if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { + if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) { kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); } @@ -3924,13 +4070,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, env->msr_hv_tsc_emulation_status); } -#ifdef CONFIG_SYNDBG if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) && has_msr_hv_syndbg_options) { kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, hyperv_syndbg_query_options()); } -#endif } if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, @@ -4102,7 +4246,8 @@ static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; - int type, ret; + unsigned long type; + int ret; type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); @@ -4307,6 +4452,9 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); env->tsc_valid = !runstate_is_running(); } + if (has_msr_hwcr) { + kvm_msr_entry_add(cpu, MSR_K7_HWCR, 0); + } #ifdef TARGET_X86_64 if (lm_capable_kernel) { @@ -4327,21 +4475,23 @@ static int kvm_get_msrs(X86CPU *cpu) } } #endif - kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); - kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { + if (env->features[FEAT_KVM] & (CPUID_KVM_CLOCK | CPUID_KVM_CLOCK2)) { + kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); + kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); + } + if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { + if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { + if (env->features[FEAT_KVM] & CPUID_KVM_PV_EOI) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { + if (env->features[FEAT_KVM] & CPUID_KVM_STEAL_TIME) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } - if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { + if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) { kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); } if (has_architectural_pmu_version > 0) { @@ -4826,6 +4976,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; break; + case MSR_K7_HWCR: + env->msr_hwcr = msrs[i].data; + break; } } @@ -5117,7 +5270,7 @@ static int kvm_get_nested_state(X86CPU *cpu) return ret; } -int kvm_arch_put_registers(CPUState *cpu, int level) +int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp) { X86CPU *x86_cpu = X86_CPU(cpu); int ret; @@ -5132,6 +5285,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_msr_feature_control(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set feature control MSR"); return ret; } } @@ -5139,12 +5293,14 @@ int kvm_arch_put_registers(CPUState *cpu, int level) /* must be before kvm_put_nested_state so that EFER.SVME is set */ ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set special registers"); return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_nested_state(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set nested state"); return ret; } } @@ -5162,6 +5318,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { ret = kvm_put_xen_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set Xen state"); return ret; } } @@ -5169,43 +5326,51 @@ int kvm_arch_put_registers(CPUState *cpu, int level) ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set general purpose registers"); return ret; } ret = kvm_put_xsave(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set XSAVE"); return ret; } ret = kvm_put_xcrs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set XCRs"); return ret; } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set MSRs"); return ret; } ret = kvm_put_vcpu_events(x86_cpu, level); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set vCPU events"); return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_mp_state(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set MP state"); return ret; } } ret = kvm_put_tscdeadline_msr(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set TSC deadline MSR"); return ret; } ret = kvm_put_debugregs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set debug registers"); return ret; } return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { X86CPU *cpu = X86_CPU(cs); int ret; @@ -5214,6 +5379,7 @@ int kvm_arch_get_registers(CPUState *cs) ret = kvm_get_vcpu_events(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get vCPU events"); goto out; } /* @@ -5222,44 +5388,54 @@ int kvm_arch_get_registers(CPUState *cs) */ ret = kvm_get_mp_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get MP state"); goto out; } ret = kvm_getput_regs(cpu, 0); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get general purpose registers"); goto out; } ret = kvm_get_xsave(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get XSAVE"); goto out; } ret = kvm_get_xcrs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get XCRs"); goto out; } ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get special registers"); goto out; } ret = kvm_get_msrs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get MSRs"); goto out; } ret = kvm_get_apic(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get APIC"); goto out; } ret = kvm_get_debugregs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get debug registers"); goto out; } ret = kvm_get_nested_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get nested state"); goto out; } #ifdef CONFIG_XEN_EMU if (xen_mode == XEN_EMULATE) { ret = kvm_get_xen_state(cs); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get Xen state"); goto out; } } @@ -5690,15 +5866,16 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) } } -static bool kvm_install_msr_filters(KVMState *s) +static int kvm_install_msr_filters(KVMState *s) { uint64_t zero = 0; struct kvm_msr_filter filter = { .flags = KVM_MSR_FILTER_DEFAULT_ALLOW, }; - int r, i, j = 0; + int i, j = 0; - for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) { + QEMU_BUILD_BUG_ON(ARRAY_SIZE(msr_handlers) != ARRAY_SIZE(filter.ranges)); + for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { KVMMSRHandlers *handler = &msr_handlers[i]; if (handler->msr) { struct kvm_msr_filter_range *range = &filter.ranges[j++]; @@ -5720,18 +5897,13 @@ static bool kvm_install_msr_filters(KVMState *s) } } - r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter); - if (r) { - return false; - } - - return true; + return kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter); } -bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, - QEMUWRMSRHandler *wrmsr) +static int kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, + QEMUWRMSRHandler *wrmsr) { - int i; + int i, ret; for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { if (!msr_handlers[i].msr) { @@ -5741,16 +5913,17 @@ bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, .wrmsr = wrmsr, }; - if (!kvm_install_msr_filters(s)) { + ret = kvm_install_msr_filters(s); + if (ret) { msr_handlers[i] = (KVMMSRHandlers) { }; - return false; + return ret; } - return true; + return 0; } } - return false; + return -EINVAL; } static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run) @@ -5770,7 +5943,7 @@ static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run) } } - assert(false); + g_assert_not_reached(); } static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run) @@ -5789,7 +5962,7 @@ static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run) } } - assert(false); + g_assert_not_reached(); } static bool has_sgx_provisioning; @@ -5849,9 +6022,11 @@ static bool host_supports_vmx(void) * because private/shared page tracking is already provided through other * means, these 2 use-cases should be treated as being mutually-exclusive. */ -static int kvm_handle_hc_map_gpa_range(struct kvm_run *run) +static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run) { + struct kvm_pre_fault_memory mem; uint64_t gpa, size, attributes; + int ret; if (!machine_require_guest_memfd(current_machine)) return -EINVAL; @@ -5862,13 +6037,32 @@ static int kvm_handle_hc_map_gpa_range(struct kvm_run *run) trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags); - return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED); + ret = kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED); + if (ret || !kvm_pre_fault_memory_supported) { + return ret; + } + + /* + * Opportunistically pre-fault memory in. Failures are ignored so that any + * errors in faulting in the memory will get captured in KVM page fault + * path when the guest first accesses the page. + */ + memset(&mem, 0, sizeof(mem)); + mem.gpa = gpa; + mem.size = size; + while (mem.size) { + if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) { + break; + } + } + + return 0; } -static int kvm_handle_hypercall(struct kvm_run *run) +static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run) { if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE) - return kvm_handle_hc_map_gpa_range(run); + return kvm_handle_hc_map_gpa_range(cpu, run); return -EINVAL; } @@ -5968,7 +6162,35 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; #endif case KVM_EXIT_HYPERCALL: - ret = kvm_handle_hypercall(run); + ret = kvm_handle_hypercall(cpu, run); + break; + case KVM_EXIT_SYSTEM_EVENT: + switch (run->system_event.type) { + case KVM_SYSTEM_EVENT_TDX_FATAL: + ret = tdx_handle_report_fatal_error(cpu, run); + break; + default: + ret = -1; + break; + } + break; + case KVM_EXIT_TDX: + /* + * run->tdx is already set up for the case where userspace + * does not handle the TDVMCALL. + */ + switch (run->tdx.nr) { + case TDVMCALL_GET_QUOTE: + tdx_handle_get_quote(cpu, run); + break; + case TDVMCALL_GET_TD_VM_CALL_INFO: + tdx_handle_get_tdvmcall_info(cpu, run); + break; + case TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT: + tdx_handle_setup_event_notify_interrupt(cpu, run); + break; + } + ret = 0; break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); @@ -6042,7 +6264,7 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address) return address; } env = &X86_CPU(first_cpu)->env; - if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) { + if (!(env->features[FEAT_KVM] & CPUID_KVM_MSI_EXT_DEST_ID)) { return address; } diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 34fc60774b8..5f83e8850a2 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -11,10 +11,11 @@ #ifndef QEMU_KVM_I386_H #define QEMU_KVM_I386_H -#include "sysemu/kvm.h" +#include "system/kvm.h" -#ifdef CONFIG_KVM +#define KVM_MAX_CPUID_ENTRIES 100 +/* always false if !CONFIG_KVM */ #define kvm_pit_in_kernel() \ (kvm_irqchip_in_kernel() && !kvm_irqchip_is_split()) #define kvm_pic_in_kernel() \ @@ -22,14 +23,6 @@ #define kvm_ioapic_in_kernel() \ (kvm_irqchip_in_kernel() && !kvm_irqchip_is_split()) -#else - -#define kvm_pit_in_kernel() 0 -#define kvm_pic_in_kernel() 0 -#define kvm_ioapic_in_kernel() 0 - -#endif /* CONFIG_KVM */ - bool kvm_has_smm(void); bool kvm_enable_x2apic(void); bool kvm_hv_vpindex_settable(void); @@ -51,6 +44,13 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); #ifdef CONFIG_KVM +#include + +typedef struct KvmCpuidInfo { + struct kvm_cpuid2 cpuid; + struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; +} KvmCpuidInfo; + bool kvm_is_vm_type_supported(int type); bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); @@ -66,17 +66,12 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); void kvm_update_msi_routes_all(void *private, bool global, uint32_t index, uint32_t mask); -typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); -typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); -typedef struct kvm_msr_handlers { - uint32_t msr; - QEMURDMSRHandler *rdmsr; - QEMUWRMSRHandler *wrmsr; -} KVMMSRHandlers; - -bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, - QEMUWRMSRHandler *wrmsr); - +struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, + uint32_t function, + uint32_t index); +uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg); +uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries, + uint32_t cpuid_i); #endif /* CONFIG_KVM */ void kvm_pc_setup_irq_routing(bool pci_enabled); diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 3996cafaf29..2675bf89027 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -8,6 +8,8 @@ i386_kvm_ss.add(files( i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c')) +i386_kvm_ss.add(when: 'CONFIG_TDX', if_true: files('tdx.c', 'tdx-quote-generator.c'), if_false: files('tdx-stub.c')) + i386_system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) i386_system_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss) diff --git a/target/i386/kvm/tdx-quote-generator.c b/target/i386/kvm/tdx-quote-generator.c new file mode 100644 index 00000000000..dee8334b27a --- /dev/null +++ b/target/i386/kvm/tdx-quote-generator.c @@ -0,0 +1,302 @@ +/* + * QEMU TDX Quote Generation Support + * + * Copyright (c) 2025 Intel Corporation + * + * Author: + * Xiaoyao Li + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-sockets.h" + +#include "tdx-quote-generator.h" + +#define QGS_MSG_LIB_MAJOR_VER 1 +#define QGS_MSG_LIB_MINOR_VER 1 + +typedef enum _qgs_msg_type_t { + GET_QUOTE_REQ = 0, + GET_QUOTE_RESP = 1, + GET_COLLATERAL_REQ = 2, + GET_COLLATERAL_RESP = 3, + GET_PLATFORM_INFO_REQ = 4, + GET_PLATFORM_INFO_RESP = 5, + QGS_MSG_TYPE_MAX +} qgs_msg_type_t; + +typedef struct _qgs_msg_header_t { + uint16_t major_version; + uint16_t minor_version; + uint32_t type; + uint32_t size; // size of the whole message, include this header, in byte + uint32_t error_code; // used in response only +} qgs_msg_header_t; + +typedef struct _qgs_msg_get_quote_req_t { + qgs_msg_header_t header; // header.type = GET_QUOTE_REQ + uint32_t report_size; // cannot be 0 + uint32_t id_list_size; // length of id_list, in byte, can be 0 +} qgs_msg_get_quote_req_t; + +typedef struct _qgs_msg_get_quote_resp_s { + qgs_msg_header_t header; // header.type = GET_QUOTE_RESP + uint32_t selected_id_size; // can be 0 in case only one id is sent in request + uint32_t quote_size; // length of quote_data, in byte + uint8_t id_quote[]; // selected id followed by quote +} qgs_msg_get_quote_resp_t; + +#define HEADER_SIZE 4 + +static uint32_t decode_header(const char *buf, size_t len) { + if (len < HEADER_SIZE) { + return 0; + } + uint32_t msg_size = 0; + for (uint32_t i = 0; i < HEADER_SIZE; ++i) { + msg_size = msg_size * 256 + (buf[i] & 0xFF); + } + return msg_size; +} + +static void encode_header(char *buf, size_t len, uint32_t size) { + assert(len >= HEADER_SIZE); + buf[0] = ((size >> 24) & 0xFF); + buf[1] = ((size >> 16) & 0xFF); + buf[2] = ((size >> 8) & 0xFF); + buf[3] = (size & 0xFF); +} + +static void tdx_generate_quote_cleanup(TdxGenerateQuoteTask *task) +{ + timer_del(&task->timer); + + if (task->watch) { + g_source_remove(task->watch); + } + qio_channel_close(QIO_CHANNEL(task->sioc), NULL); + object_unref(OBJECT(task->sioc)); + + task->completion(task); +} + +static gboolean tdx_get_quote_read(QIOChannel *ioc, GIOCondition condition, + gpointer opaque) +{ + TdxGenerateQuoteTask *task = opaque; + Error *err = NULL; + int ret; + + ret = qio_channel_read(ioc, task->receive_buf + task->receive_buf_received, + task->payload_len - task->receive_buf_received, &err); + if (ret < 0) { + if (ret == QIO_CHANNEL_ERR_BLOCK) { + return G_SOURCE_CONTINUE; + } else { + error_report_err(err); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + } + + if (ret == 0) { + error_report("End of file before reply received"); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + + task->receive_buf_received += ret; + if (task->receive_buf_received >= HEADER_SIZE) { + uint32_t len = decode_header(task->receive_buf, + task->receive_buf_received); + if (len == 0 || + len > (task->payload_len - HEADER_SIZE)) { + error_report("Message len %u must be non-zero & less than %zu", + len, (task->payload_len - HEADER_SIZE)); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + + /* Now we know the size, shrink to fit */ + task->payload_len = HEADER_SIZE + len; + task->receive_buf = g_renew(char, + task->receive_buf, + task->payload_len); + } + + if (task->receive_buf_received >= (sizeof(qgs_msg_header_t) + HEADER_SIZE)) { + qgs_msg_header_t *hdr = (qgs_msg_header_t *)(task->receive_buf + HEADER_SIZE); + if (hdr->major_version != QGS_MSG_LIB_MAJOR_VER || + hdr->minor_version != QGS_MSG_LIB_MINOR_VER) { + error_report("Invalid QGS message header version %d.%d", + hdr->major_version, + hdr->minor_version); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + if (hdr->type != GET_QUOTE_RESP) { + error_report("Invalid QGS message type %d", + hdr->type); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + if (hdr->size > (task->payload_len - HEADER_SIZE)) { + error_report("QGS message size %d exceeds payload capacity %zu", + hdr->size, task->payload_len); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + if (hdr->error_code != 0) { + error_report("QGS message error code %d", + hdr->error_code); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + } + if (task->receive_buf_received >= (sizeof(qgs_msg_get_quote_resp_t) + HEADER_SIZE)) { + qgs_msg_get_quote_resp_t *msg = (qgs_msg_get_quote_resp_t *)(task->receive_buf + HEADER_SIZE); + if (msg->selected_id_size != 0) { + error_report("QGS message selected ID was %d not 0", + msg->selected_id_size); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + + if ((task->payload_len - HEADER_SIZE - sizeof(qgs_msg_get_quote_resp_t)) != + msg->quote_size) { + error_report("QGS quote size %d should be %zu", + msg->quote_size, + (task->payload_len - sizeof(qgs_msg_get_quote_resp_t))); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + goto end; + } + } + + if (task->receive_buf_received == task->payload_len) { + size_t strip = HEADER_SIZE + sizeof(qgs_msg_get_quote_resp_t); + memmove(task->receive_buf, + task->receive_buf + strip, + task->receive_buf_received - strip); + task->receive_buf_received -= strip; + task->status_code = TDX_VP_GET_QUOTE_SUCCESS; + goto end; + } + + return G_SOURCE_CONTINUE; + +end: + tdx_generate_quote_cleanup(task); + return G_SOURCE_REMOVE; +} + +static gboolean tdx_send_report(QIOChannel *ioc, GIOCondition condition, + gpointer opaque) +{ + TdxGenerateQuoteTask *task = opaque; + Error *err = NULL; + int ret; + + ret = qio_channel_write(ioc, task->send_data + task->send_data_sent, + task->send_data_size - task->send_data_sent, &err); + if (ret < 0) { + if (ret == QIO_CHANNEL_ERR_BLOCK) { + ret = 0; + } else { + error_report_err(err); + task->status_code = TDX_VP_GET_QUOTE_ERROR; + tdx_generate_quote_cleanup(task); + goto end; + } + } + task->send_data_sent += ret; + + if (task->send_data_sent == task->send_data_size) { + task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_IN, + tdx_get_quote_read, task, NULL); + goto end; + } + + return G_SOURCE_CONTINUE; + +end: + return G_SOURCE_REMOVE; +} + +static void tdx_quote_generator_connected(QIOTask *qio_task, gpointer opaque) +{ + TdxGenerateQuoteTask *task = opaque; + Error *err = NULL; + int ret; + + ret = qio_task_propagate_error(qio_task, &err); + if (ret) { + error_report_err(err); + task->status_code = TDX_VP_GET_QUOTE_QGS_UNAVAILABLE; + tdx_generate_quote_cleanup(task); + return; + } + + task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_OUT, + tdx_send_report, task, NULL); +} + +#define TRANSACTION_TIMEOUT 30000 + +static void getquote_expired(void *opaque) +{ + TdxGenerateQuoteTask *task = opaque; + + task->status_code = TDX_VP_GET_QUOTE_ERROR; + tdx_generate_quote_cleanup(task); +} + +static void setup_get_quote_timer(TdxGenerateQuoteTask *task) +{ + int64_t time; + + timer_init_ms(&task->timer, QEMU_CLOCK_VIRTUAL, getquote_expired, task); + time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + timer_mod(&task->timer, time + TRANSACTION_TIMEOUT); +} + +void tdx_generate_quote(TdxGenerateQuoteTask *task, + SocketAddress *qg_sock_addr) +{ + QIOChannelSocket *sioc; + qgs_msg_get_quote_req_t msg; + + /* Prepare a QGS message prelude */ + msg.header.major_version = QGS_MSG_LIB_MAJOR_VER; + msg.header.minor_version = QGS_MSG_LIB_MINOR_VER; + msg.header.type = GET_QUOTE_REQ; + msg.header.size = sizeof(msg) + task->send_data_size; + msg.header.error_code = 0; + msg.report_size = task->send_data_size; + msg.id_list_size = 0; + + /* Make room to add the QGS message prelude */ + task->send_data = g_renew(char, + task->send_data, + task->send_data_size + sizeof(msg) + HEADER_SIZE); + memmove(task->send_data + sizeof(msg) + HEADER_SIZE, + task->send_data, + task->send_data_size); + memcpy(task->send_data + HEADER_SIZE, + &msg, + sizeof(msg)); + encode_header(task->send_data, HEADER_SIZE, task->send_data_size + sizeof(msg)); + task->send_data_size += sizeof(msg) + HEADER_SIZE; + + sioc = qio_channel_socket_new(); + task->sioc = sioc; + + setup_get_quote_timer(task); + + qio_channel_socket_connect_async(sioc, qg_sock_addr, + tdx_quote_generator_connected, task, + NULL, NULL); +} diff --git a/target/i386/kvm/tdx-quote-generator.h b/target/i386/kvm/tdx-quote-generator.h new file mode 100644 index 00000000000..3bd9b8ef331 --- /dev/null +++ b/target/i386/kvm/tdx-quote-generator.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef QEMU_I386_TDX_QUOTE_GENERATOR_H +#define QEMU_I386_TDX_QUOTE_GENERATOR_H + +#include "qom/object_interfaces.h" +#include "io/channel-socket.h" +#include "exec/hwaddr.h" + +#define TDX_GET_QUOTE_STRUCTURE_VERSION 1ULL + +#define TDX_VP_GET_QUOTE_SUCCESS 0ULL +#define TDX_VP_GET_QUOTE_IN_FLIGHT (-1ULL) +#define TDX_VP_GET_QUOTE_ERROR 0x8000000000000000ULL +#define TDX_VP_GET_QUOTE_QGS_UNAVAILABLE 0x8000000000000001ULL + +/* Limit to avoid resource starvation. */ +#define TDX_GET_QUOTE_MAX_BUF_LEN (128 * 1024) +#define TDX_MAX_GET_QUOTE_REQUEST 16 + +#define TDX_GET_QUOTE_HDR_SIZE 24 + +/* Format of pages shared with guest. */ +struct tdx_get_quote_header { + /* Format version: must be 1 in little endian. */ + uint64_t structure_version; + + /* + * GetQuote status code in little endian: + * Guest must set error_code to 0 to avoid information leak. + * Qemu sets this before interrupting guest. + */ + uint64_t error_code; + + /* + * in-message size in little endian: The message will follow this header. + * The in-message will be send to QGS. + */ + uint32_t in_len; + + /* + * out-message size in little endian: + * On request, out_len must be zero to avoid information leak. + * On return, message size from QGS. Qemu overwrites this field. + * The message will follows this header. The in-message is overwritten. + */ + uint32_t out_len; + + /* + * Message buffer follows. + * Guest sets message that will be send to QGS. If out_len > in_len, guest + * should zero remaining buffer to avoid information leak. + * Qemu overwrites this buffer with a message returned from QGS. + */ +}; + +typedef struct TdxGenerateQuoteTask { + hwaddr buf_gpa; + hwaddr payload_gpa; + uint64_t payload_len; + + char *send_data; + uint64_t send_data_size; + uint64_t send_data_sent; + + char *receive_buf; + uint64_t receive_buf_received; + + uint64_t status_code; + struct tdx_get_quote_header hdr; + + QIOChannelSocket *sioc; + guint watch; + QEMUTimer timer; + + void (*completion)(struct TdxGenerateQuoteTask *task); + void *opaque; +} TdxGenerateQuoteTask; + +void tdx_generate_quote(TdxGenerateQuoteTask *task, SocketAddress *qg_sock_addr); + +#endif /* QEMU_I386_TDX_QUOTE_GENERATOR_H */ diff --git a/target/i386/kvm/tdx-stub.c b/target/i386/kvm/tdx-stub.c new file mode 100644 index 00000000000..1f0e108a69e --- /dev/null +++ b/target/i386/kvm/tdx-stub.c @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" + +#include "tdx.h" + +int tdx_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return -EINVAL; +} + +int tdx_parse_tdvf(void *flash_ptr, int size) +{ + return -EINVAL; +} + +int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run) +{ + return -EINVAL; +} + +void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run) +{ +} + +void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run) +{ +} + +void tdx_handle_setup_event_notify_interrupt(X86CPU *cpu, struct kvm_run *run) +{ +} diff --git a/target/i386/kvm/tdx.c b/target/i386/kvm/tdx.c new file mode 100644 index 00000000000..dbf0fa2c918 --- /dev/null +++ b/target/i386/kvm/tdx.c @@ -0,0 +1,1548 @@ +/* + * QEMU TDX support + * + * Copyright (c) 2025 Intel Corporation + * + * Author: + * Xiaoyao Li + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/base64.h" +#include "qemu/mmap-alloc.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-sockets.h" +#include "qom/object_interfaces.h" +#include "crypto/hash.h" +#include "system/kvm_int.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/ramblock.h" +#include "system/address-spaces.h" + +#include + +#include "cpu.h" +#include "cpu-internal.h" +#include "host-cpu.h" +#include "hw/i386/apic_internal.h" +#include "hw/i386/apic-msidef.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/i386/tdvf.h" +#include "hw/i386/x86.h" +#include "hw/i386/tdvf-hob.h" +#include "hw/pci/msi.h" +#include "kvm_i386.h" +#include "tdx.h" +#include "tdx-quote-generator.h" + +#include "standard-headers/asm-x86/kvm_para.h" + +#define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000) +#define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000) + +#define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0) +#define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28) +#define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30) +#define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63) + +#define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\ + TDX_TD_ATTRIBUTES_PKS | \ + TDX_TD_ATTRIBUTES_PERFMON) + +#define TDX_SUPPORTED_KVM_FEATURES ((1U << KVM_FEATURE_NOP_IO_DELAY) | \ + (1U << KVM_FEATURE_PV_UNHALT) | \ + (1U << KVM_FEATURE_PV_TLB_FLUSH) | \ + (1U << KVM_FEATURE_PV_SEND_IPI) | \ + (1U << KVM_FEATURE_POLL_CONTROL) | \ + (1U << KVM_FEATURE_PV_SCHED_YIELD) | \ + (1U << KVM_FEATURE_MSI_EXT_DEST_ID)) + +static TdxGuest *tdx_guest; + +static struct kvm_tdx_capabilities *tdx_caps; +static struct kvm_cpuid2 *tdx_supported_cpuid; + +/* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */ +bool is_tdx_vm(void) +{ + return !!tdx_guest; +} + +enum tdx_ioctl_level { + TDX_VM_IOCTL, + TDX_VCPU_IOCTL, +}; + +static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state, + int cmd_id, __u32 flags, void *data, + Error **errp) +{ + struct kvm_tdx_cmd tdx_cmd = {}; + int r; + + const char *tdx_ioctl_name[] = { + [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES", + [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM", + [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU", + [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION", + [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM", + [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID", + }; + + tdx_cmd.id = cmd_id; + tdx_cmd.flags = flags; + tdx_cmd.data = (__u64)(unsigned long)data; + + switch (level) { + case TDX_VM_IOCTL: + r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); + break; + case TDX_VCPU_IOCTL: + r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd); + break; + default: + error_setg(errp, "Invalid tdx_ioctl_level %d", level); + return -EINVAL; + } + + if (r < 0) { + error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx", + tdx_ioctl_name[cmd_id], tdx_cmd.hw_error); + } + return r; +} + +static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data, + Error **errp) +{ + return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp); +} + +static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags, + void *data, Error **errp) +{ + return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp); +} + +static int get_tdx_capabilities(Error **errp) +{ + struct kvm_tdx_capabilities *caps; + /* 1st generation of TDX reports 6 cpuid configs */ + int nr_cpuid_configs = 6; + size_t size; + int r; + + do { + Error *local_err = NULL; + size = sizeof(struct kvm_tdx_capabilities) + + nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2); + caps = g_malloc0(size); + caps->cpuid.nent = nr_cpuid_configs; + + r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err); + if (r == -E2BIG) { + g_free(caps); + nr_cpuid_configs *= 2; + if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) { + error_report("KVM TDX seems broken that number of CPUID entries" + " in kvm_tdx_capabilities exceeds limit: %d", + KVM_MAX_CPUID_ENTRIES); + error_propagate(errp, local_err); + return r; + } + error_free(local_err); + } else if (r < 0) { + g_free(caps); + error_propagate(errp, local_err); + return r; + } + } while (r == -E2BIG); + + tdx_caps = caps; + + return 0; +} + +void tdx_set_tdvf_region(MemoryRegion *tdvf_mr) +{ + assert(!tdx_guest->tdvf_mr); + tdx_guest->tdvf_mr = tdvf_mr; +} + +static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx) +{ + TdxFirmwareEntry *entry; + + for_each_tdx_fw_entry(&tdx->tdvf, entry) { + if (entry->type == TDVF_SECTION_TYPE_TD_HOB) { + return entry; + } + } + error_report("TDVF metadata doesn't specify TD_HOB location."); + exit(1); +} + +static void tdx_add_ram_entry(uint64_t address, uint64_t length, + enum TdxRamType type) +{ + uint32_t nr_entries = tdx_guest->nr_ram_entries; + tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries, + nr_entries + 1); + + tdx_guest->ram_entries[nr_entries].address = address; + tdx_guest->ram_entries[nr_entries].length = length; + tdx_guest->ram_entries[nr_entries].type = type; + tdx_guest->nr_ram_entries++; +} + +static int tdx_accept_ram_range(uint64_t address, uint64_t length) +{ + uint64_t head_start, tail_start, head_length, tail_length; + uint64_t tmp_address, tmp_length; + TdxRamEntry *e; + int i = 0; + + do { + if (i == tdx_guest->nr_ram_entries) { + return -1; + } + + e = &tdx_guest->ram_entries[i++]; + } while (address + length <= e->address || address >= e->address + e->length); + + /* + * The to-be-accepted ram range must be fully contained by one + * RAM entry. + */ + if (e->address > address || + e->address + e->length < address + length) { + return -1; + } + + if (e->type == TDX_RAM_ADDED) { + return 0; + } + + tmp_address = e->address; + tmp_length = e->length; + + e->address = address; + e->length = length; + e->type = TDX_RAM_ADDED; + + head_length = address - tmp_address; + if (head_length > 0) { + head_start = tmp_address; + tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED); + } + + tail_start = address + length; + if (tail_start < tmp_address + tmp_length) { + tail_length = tmp_address + tmp_length - tail_start; + tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED); + } + + return 0; +} + +static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_) +{ + const TdxRamEntry *lhs = lhs_; + const TdxRamEntry *rhs = rhs_; + + if (lhs->address == rhs->address) { + return 0; + } + if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) { + return 1; + } + return -1; +} + +static void tdx_init_ram_entries(void) +{ + unsigned i, j, nr_e820_entries; + + nr_e820_entries = e820_get_table(NULL); + tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries); + + for (i = 0, j = 0; i < nr_e820_entries; i++) { + uint64_t addr, len; + + if (e820_get_entry(i, E820_RAM, &addr, &len)) { + tdx_guest->ram_entries[j].address = addr; + tdx_guest->ram_entries[j].length = len; + tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED; + j++; + } + } + tdx_guest->nr_ram_entries = j; +} + +static void tdx_post_init_vcpus(void) +{ + TdxFirmwareEntry *hob; + CPUState *cpu; + + hob = tdx_get_hob_entry(tdx_guest); + CPU_FOREACH(cpu) { + tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)(uintptr_t)hob->address, + &error_fatal); + } +} + +static void tdx_finalize_vm(Notifier *notifier, void *unused) +{ + TdxFirmware *tdvf = &tdx_guest->tdvf; + TdxFirmwareEntry *entry; + RAMBlock *ram_block; + Error *local_err = NULL; + int r; + + tdx_init_ram_entries(); + + for_each_tdx_fw_entry(tdvf, entry) { + switch (entry->type) { + case TDVF_SECTION_TYPE_BFV: + case TDVF_SECTION_TYPE_CFV: + entry->mem_ptr = tdvf->mem_ptr + entry->data_offset; + break; + case TDVF_SECTION_TYPE_TD_HOB: + case TDVF_SECTION_TYPE_TEMP_MEM: + entry->mem_ptr = qemu_ram_mmap(-1, entry->size, + qemu_real_host_page_size(), 0, 0); + if (entry->mem_ptr == MAP_FAILED) { + error_report("Failed to mmap memory for TDVF section %d", + entry->type); + exit(1); + } + if (tdx_accept_ram_range(entry->address, entry->size)) { + error_report("Failed to accept memory for TDVF section %d", + entry->type); + qemu_ram_munmap(-1, entry->mem_ptr, entry->size); + exit(1); + } + break; + default: + error_report("Unsupported TDVF section %d", entry->type); + exit(1); + } + } + + qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries, + sizeof(TdxRamEntry), &tdx_ram_entry_compare); + + tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest)); + + tdx_post_init_vcpus(); + + for_each_tdx_fw_entry(tdvf, entry) { + struct kvm_tdx_init_mem_region region; + uint32_t flags; + + region = (struct kvm_tdx_init_mem_region) { + .source_addr = (uintptr_t)entry->mem_ptr, + .gpa = entry->address, + .nr_pages = entry->size >> 12, + }; + + flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ? + KVM_TDX_MEASURE_MEMORY_REGION : 0; + + do { + error_free(local_err); + local_err = NULL; + r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags, + ®ion, &local_err); + } while (r == -EAGAIN || r == -EINTR); + if (r < 0) { + error_report_err(local_err); + exit(1); + } + + if (entry->type == TDVF_SECTION_TYPE_TD_HOB || + entry->type == TDVF_SECTION_TYPE_TEMP_MEM) { + qemu_ram_munmap(-1, entry->mem_ptr, entry->size); + entry->mem_ptr = NULL; + } + } + + /* + * TDVF image has been copied into private region above via + * KVM_MEMORY_MAPPING. It becomes useless. + */ + ram_block = tdx_guest->tdvf_mr->ram_block; + ram_block_discard_range(ram_block, 0, ram_block->max_length); + + tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal); + CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true; +} + +static Notifier tdx_machine_done_notify = { + .notify = tdx_finalize_vm, +}; + +/* + * Some CPUID bits change from fixed1 to configurable bits when TDX module + * supports TDX_FEATURES0.VE_REDUCTION. e.g., MCA/MCE/MTRR/CORE_CAPABILITY. + * + * To make QEMU work with all the versions of TDX module, keep the fixed1 bits + * here if they are ever fixed1 bits in any of the version though not fixed1 in + * the latest version. Otherwise, with the older version of TDX module, QEMU may + * treat the fixed1 bit as unsupported. + * + * For newer TDX module, it does no harm to keep them in tdx_fixed1_bits even + * though they changed to configurable bits. Because tdx_fixed1_bits is used to + * setup the supported bits. + */ +KvmCpuidInfo tdx_fixed1_bits = { + .cpuid.nent = 8, + .entries[0] = { + .function = 0x1, + .index = 0, + .ecx = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_DTES64 | + CPUID_EXT_DSCPL | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | + CPUID_EXT_PDCM | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE | + CPUID_EXT_RDRAND | CPUID_EXT_HYPERVISOR, + .edx = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_CLFLUSH | CPUID_DTS | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + }, + .entries[1] = { + .function = 0x6, + .index = 0, + .eax = CPUID_6_EAX_ARAT, + }, + .entries[2] = { + .function = 0x7, + .index = 0, + .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, + .ebx = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_FDP_EXCPTN_ONLY | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_ZERO_FCS_FDS | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI, + .ecx = CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_MOVDIRI | + CPUID_7_0_ECX_MOVDIR64B, + .edx = CPUID_7_0_EDX_MD_CLEAR | CPUID_7_0_EDX_SPEC_CTRL | + CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D | + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_CORE_CAPABILITY | + CPUID_7_0_EDX_SPEC_CTRL_SSBD, + }, + .entries[3] = { + .function = 0x7, + .index = 2, + .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, + .edx = CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL | + CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_BHI_CTRL, + }, + .entries[4] = { + .function = 0xD, + .index = 0, + .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, + .eax = XSTATE_FP_MASK | XSTATE_SSE_MASK, + }, + .entries[5] = { + .function = 0xD, + .index = 1, + .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX, + .eax = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC| + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + }, + .entries[6] = { + .function = 0x80000001, + .index = 0, + .ecx = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + /* + * Strictly speaking, SYSCALL is not fixed1 bit since it depends on + * the CPU to be in 64-bit mode. But here fixed1 is used to serve the + * purpose of supported bits for TDX. In this sense, SYACALL is always + * supported. + */ + .edx = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + }, + .entries[7] = { + .function = 0x80000007, + .index = 0, + .edx = CPUID_APM_INVTSC, + }, +}; + +typedef struct TdxAttrsMap { + uint32_t attr_index; + uint32_t cpuid_leaf; + uint32_t cpuid_subleaf; + int cpuid_reg; + uint32_t feat_mask; +} TdxAttrsMap; + +static TdxAttrsMap tdx_attrs_maps[] = { + {.attr_index = 27, + .cpuid_leaf = 7, + .cpuid_subleaf = 1, + .cpuid_reg = R_EAX, + .feat_mask = CPUID_7_1_EAX_LASS,}, + + {.attr_index = 30, + .cpuid_leaf = 7, + .cpuid_subleaf = 0, + .cpuid_reg = R_ECX, + .feat_mask = CPUID_7_0_ECX_PKS,}, + + {.attr_index = 31, + .cpuid_leaf = 7, + .cpuid_subleaf = 0, + .cpuid_reg = R_ECX, + .feat_mask = CPUID_7_0_ECX_KeyLocker,}, +}; + +typedef struct TdxXFAMDep { + int xfam_bit; + FeatureMask feat_mask; +} TdxXFAMDep; + +/* + * Note, only the CPUID bits whose virtualization type are "XFAM & Native" are + * defiend here. + * + * For those whose virtualization type are "XFAM & Configured & Native", they + * are reported as configurable bits. And they are not supported if not in the + * configureable bits list from KVM even if the corresponding XFAM bit is + * supported. + */ +TdxXFAMDep tdx_xfam_deps[] = { + { XSTATE_YMM_BIT, { FEAT_1_ECX, CPUID_EXT_FMA }}, + { XSTATE_YMM_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX2 }}, + { XSTATE_OPMASK_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_AVX512_VBMI}}, + { XSTATE_OPMASK_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AVX512_FP16}}, + { XSTATE_PT_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT}}, + { XSTATE_PKRU_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU}}, + { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_BF16 }}, + { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE }}, + { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_INT8 }}, +}; + +static struct kvm_cpuid_entry2 *find_in_supported_entry(uint32_t function, + uint32_t index) +{ + struct kvm_cpuid_entry2 *e; + + e = cpuid_find_entry(tdx_supported_cpuid, function, index); + if (!e) { + if (tdx_supported_cpuid->nent >= KVM_MAX_CPUID_ENTRIES) { + error_report("tdx_supported_cpuid requries more space than %d entries", + KVM_MAX_CPUID_ENTRIES); + exit(1); + } + e = &tdx_supported_cpuid->entries[tdx_supported_cpuid->nent++]; + e->function = function; + e->index = index; + } + + return e; +} + +static void tdx_add_supported_cpuid_by_fixed1_bits(void) +{ + struct kvm_cpuid_entry2 *e, *e1; + int i; + + for (i = 0; i < tdx_fixed1_bits.cpuid.nent; i++) { + e = &tdx_fixed1_bits.entries[i]; + + e1 = find_in_supported_entry(e->function, e->index); + e1->eax |= e->eax; + e1->ebx |= e->ebx; + e1->ecx |= e->ecx; + e1->edx |= e->edx; + } +} + +static void tdx_add_supported_cpuid_by_attrs(void) +{ + struct kvm_cpuid_entry2 *e; + TdxAttrsMap *map; + int i; + + for (i = 0; i < ARRAY_SIZE(tdx_attrs_maps); i++) { + map = &tdx_attrs_maps[i]; + if (!((1ULL << map->attr_index) & tdx_caps->supported_attrs)) { + continue; + } + + e = find_in_supported_entry(map->cpuid_leaf, map->cpuid_subleaf); + + switch(map->cpuid_reg) { + case R_EAX: + e->eax |= map->feat_mask; + break; + case R_EBX: + e->ebx |= map->feat_mask; + break; + case R_ECX: + e->ecx |= map->feat_mask; + break; + case R_EDX: + e->edx |= map->feat_mask; + break; + } + } +} + +static void tdx_add_supported_cpuid_by_xfam(void) +{ + struct kvm_cpuid_entry2 *e; + int i; + + const TdxXFAMDep *xfam_dep; + const FeatureWordInfo *f; + for (i = 0; i < ARRAY_SIZE(tdx_xfam_deps); i++) { + xfam_dep = &tdx_xfam_deps[i]; + if (!((1ULL << xfam_dep->xfam_bit) & tdx_caps->supported_xfam)) { + continue; + } + + f = &feature_word_info[xfam_dep->feat_mask.index]; + if (f->type != CPUID_FEATURE_WORD) { + continue; + } + + e = find_in_supported_entry(f->cpuid.eax, f->cpuid.ecx); + switch(f->cpuid.reg) { + case R_EAX: + e->eax |= xfam_dep->feat_mask.mask; + break; + case R_EBX: + e->ebx |= xfam_dep->feat_mask.mask; + break; + case R_ECX: + e->ecx |= xfam_dep->feat_mask.mask; + break; + case R_EDX: + e->edx |= xfam_dep->feat_mask.mask; + break; + } + } + + e = find_in_supported_entry(0xd, 0); + e->eax |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK); + e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK) >> 32; + + e = find_in_supported_entry(0xd, 1); + /* + * Mark XFD always support for TDX, it will be cleared finally in + * tdx_adjust_cpuid_features() if XFD is unavailable on the hardware + * because in this case the original data has it as 0. + */ + e->eax |= CPUID_XSAVE_XFD; + e->ecx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK); + e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK) >> 32; +} + +static void tdx_add_supported_kvm_features(void) +{ + struct kvm_cpuid_entry2 *e; + + e = find_in_supported_entry(0x40000001, 0); + e->eax = TDX_SUPPORTED_KVM_FEATURES; +} + +static void tdx_setup_supported_cpuid(void) +{ + if (tdx_supported_cpuid) { + return; + } + + tdx_supported_cpuid = g_malloc0(sizeof(*tdx_supported_cpuid) + + KVM_MAX_CPUID_ENTRIES * sizeof(struct kvm_cpuid_entry2)); + + memcpy(tdx_supported_cpuid->entries, tdx_caps->cpuid.entries, + tdx_caps->cpuid.nent * sizeof(struct kvm_cpuid_entry2)); + tdx_supported_cpuid->nent = tdx_caps->cpuid.nent; + + tdx_add_supported_cpuid_by_fixed1_bits(); + tdx_add_supported_cpuid_by_attrs(); + tdx_add_supported_cpuid_by_xfam(); + + tdx_add_supported_kvm_features(); +} + +static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + X86MachineState *x86ms = X86_MACHINE(ms); + TdxGuest *tdx = TDX_GUEST(cgs); + int r = 0; + + kvm_mark_guest_state_protected(); + + if (x86ms->smm == ON_OFF_AUTO_AUTO) { + x86ms->smm = ON_OFF_AUTO_OFF; + } else if (x86ms->smm == ON_OFF_AUTO_ON) { + error_setg(errp, "TDX VM doesn't support SMM"); + return -EINVAL; + } + + if (x86ms->pic == ON_OFF_AUTO_AUTO) { + x86ms->pic = ON_OFF_AUTO_OFF; + } else if (x86ms->pic == ON_OFF_AUTO_ON) { + error_setg(errp, "TDX VM doesn't support PIC"); + return -EINVAL; + } + + if (kvm_state->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { + kvm_state->kernel_irqchip_split = ON_OFF_AUTO_ON; + } else if (kvm_state->kernel_irqchip_split != ON_OFF_AUTO_ON) { + error_setg(errp, "TDX VM requires kernel_irqchip to be split"); + return -EINVAL; + } + + if (!tdx_caps) { + r = get_tdx_capabilities(errp); + if (r) { + return r; + } + } + + tdx_setup_supported_cpuid(); + + /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL */ + if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) { + return -EOPNOTSUPP; + } + + /* + * Set kvm_readonly_mem_allowed to false, because TDX only supports readonly + * memory for shared memory but not for private memory. Besides, whether a + * memslot is private or shared is not determined by QEMU. + * + * Thus, just mark readonly memory not supported for simplicity. + */ + kvm_readonly_mem_allowed = false; + + qemu_add_machine_init_done_notifier(&tdx_machine_done_notify); + + tdx_guest = tdx; + return 0; +} + +static int tdx_kvm_type(X86ConfidentialGuest *cg) +{ + /* Do the object check */ + TDX_GUEST(cg); + + return KVM_X86_TDX_VM; +} + +static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu) +{ + X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); + X86CPU *x86cpu = X86_CPU(cpu); + + if (xcc->model) { + error_report("Named cpu model is not supported for TDX yet!"); + exit(1); + } + + object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort); + + /* invtsc is fixed1 for TD guest */ + object_property_set_bool(OBJECT(cpu), "invtsc", true, &error_abort); + + x86cpu->force_cpuid_0x1f = true; +} + +static uint32_t tdx_adjust_cpuid_features(X86ConfidentialGuest *cg, + uint32_t feature, uint32_t index, + int reg, uint32_t value) +{ + struct kvm_cpuid_entry2 *e; + + e = cpuid_find_entry(&tdx_fixed1_bits.cpuid, feature, index); + if (e) { + value |= cpuid_entry_get_reg(e, reg); + } + + if (is_feature_word_cpuid(feature, index, reg)) { + e = cpuid_find_entry(tdx_supported_cpuid, feature, index); + if (e) { + value &= cpuid_entry_get_reg(e, reg); + } + } + + return value; +} + +static struct kvm_cpuid2 *tdx_fetch_cpuid(CPUState *cpu, int *ret) +{ + struct kvm_cpuid2 *fetch_cpuid; + int size = KVM_MAX_CPUID_ENTRIES; + Error *local_err = NULL; + int r; + + do { + error_free(local_err); + local_err = NULL; + + fetch_cpuid = g_malloc0(sizeof(*fetch_cpuid) + + sizeof(struct kvm_cpuid_entry2) * size); + fetch_cpuid->nent = size; + r = tdx_vcpu_ioctl(cpu, KVM_TDX_GET_CPUID, 0, fetch_cpuid, &local_err); + if (r == -E2BIG) { + g_free(fetch_cpuid); + size = fetch_cpuid->nent; + } + } while (r == -E2BIG); + + if (r < 0) { + error_report_err(local_err); + *ret = r; + return NULL; + } + + return fetch_cpuid; +} + +static int tdx_check_features(X86ConfidentialGuest *cg, CPUState *cs) +{ + uint64_t actual, requested, unavailable, forced_on; + g_autofree struct kvm_cpuid2 *fetch_cpuid; + const char *forced_on_prefix = NULL; + const char *unav_prefix = NULL; + struct kvm_cpuid_entry2 *entry; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + FeatureWordInfo *wi; + FeatureWord w; + bool mismatch = false; + int r; + + fetch_cpuid = tdx_fetch_cpuid(cs, &r); + if (!fetch_cpuid) { + return r; + } + + if (cpu->check_cpuid || cpu->enforce_cpuid) { + unav_prefix = "TDX doesn't support requested feature"; + forced_on_prefix = "TDX forcibly sets the feature"; + } + + for (w = 0; w < FEATURE_WORDS; w++) { + wi = &feature_word_info[w]; + actual = 0; + + switch (wi->type) { + case CPUID_FEATURE_WORD: + entry = cpuid_find_entry(fetch_cpuid, wi->cpuid.eax, wi->cpuid.ecx); + if (!entry) { + /* + * If KVM doesn't report it means it's totally configurable + * by QEMU + */ + continue; + } + + actual = cpuid_entry_get_reg(entry, wi->cpuid.reg); + break; + case MSR_FEATURE_WORD: + /* + * TODO: + * validate MSR features when KVM has interface report them. + */ + continue; + } + + /* Fixup for special cases */ + switch (w) { + case FEAT_8000_0001_EDX: + /* + * Intel enumerates SYSCALL bit as 1 only when processor in 64-bit + * mode and before vcpu running it's not in 64-bit mode. + */ + actual |= CPUID_EXT2_SYSCALL; + break; + default: + break; + } + + requested = env->features[w]; + unavailable = requested & ~actual; + mark_unavailable_features(cpu, w, unavailable, unav_prefix); + if (unavailable) { + mismatch = true; + } + + forced_on = actual & ~requested; + mark_forced_on_features(cpu, w, forced_on, forced_on_prefix); + if (forced_on) { + mismatch = true; + } + } + + if (cpu->enforce_cpuid && mismatch) { + return -EINVAL; + } + + if (cpu->phys_bits != host_cpu_phys_bits()) { + error_report("TDX requires guest CPU physical bits (%u) " + "to match host CPU physical bits (%u)", + cpu->phys_bits, host_cpu_phys_bits()); + return -EINVAL; + } + + return 0; +} + +static int tdx_validate_attributes(TdxGuest *tdx, Error **errp) +{ + if ((tdx->attributes & ~tdx_caps->supported_attrs)) { + error_setg(errp, "Invalid attributes 0x%"PRIx64" for TDX VM " + "(KVM supported: 0x%"PRIx64")", tdx->attributes, + (uint64_t)tdx_caps->supported_attrs); + return -1; + } + + if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) { + error_setg(errp, "Some QEMU unsupported TD attribute bits being " + "requested: 0x%"PRIx64" (QEMU supported: 0x%"PRIx64")", + tdx->attributes, (uint64_t)TDX_SUPPORTED_TD_ATTRS); + return -1; + } + + return 0; +} + +static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp) +{ + CPUX86State *env = &x86cpu->env; + + tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ? + TDX_TD_ATTRIBUTES_PKS : 0; + tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0; + + return tdx_validate_attributes(tdx_guest, errp); +} + +static int setup_td_xfam(X86CPU *x86cpu, Error **errp) +{ + CPUX86State *env = &x86cpu->env; + uint64_t xfam; + + xfam = env->features[FEAT_XSAVE_XCR0_LO] | + env->features[FEAT_XSAVE_XCR0_HI] | + env->features[FEAT_XSAVE_XSS_LO] | + env->features[FEAT_XSAVE_XSS_HI]; + + if (xfam & ~tdx_caps->supported_xfam) { + error_setg(errp, "Invalid XFAM 0x%"PRIx64" for TDX VM (supported: 0x%"PRIx64"))", + xfam, (uint64_t)tdx_caps->supported_xfam); + return -1; + } + + tdx_guest->xfam = xfam; + return 0; +} + +static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids) +{ + int i, dest_cnt = 0; + struct kvm_cpuid_entry2 *src, *dest, *conf; + + for (i = 0; i < cpuids->nent; i++) { + src = cpuids->entries + i; + conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index); + if (!conf) { + continue; + } + dest = cpuids->entries + dest_cnt; + + dest->function = src->function; + dest->index = src->index; + dest->flags = src->flags; + dest->eax = src->eax & conf->eax; + dest->ebx = src->ebx & conf->ebx; + dest->ecx = src->ecx & conf->ecx; + dest->edx = src->edx & conf->edx; + + dest_cnt++; + } + cpuids->nent = dest_cnt++; +} + +int tdx_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + g_autofree struct kvm_tdx_init_vm *init_vm = NULL; + Error *local_err = NULL; + size_t data_len; + int retry = 10000; + int r = 0; + + QEMU_LOCK_GUARD(&tdx_guest->lock); + if (tdx_guest->initialized) { + return r; + } + + init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) + + sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); + + if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) { + error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS"); + return -EOPNOTSUPP; + } + + r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS, + 0, TDX_APIC_BUS_CYCLES_NS); + if (r < 0) { + error_setg_errno(errp, -r, + "Unable to set core crystal clock frequency to 25MHz"); + return r; + } + + if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ || + env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) { + error_setg(errp, "Invalid TSC %"PRId64" KHz, must specify cpu_frequency " + "between [%d, %d] kHz", env->tsc_khz, + TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ); + return -EINVAL; + } + + if (env->tsc_khz % (25 * 1000)) { + error_setg(errp, "Invalid TSC %"PRId64" KHz, it must be multiple of 25MHz", + env->tsc_khz); + return -EINVAL; + } + + /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */ + r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz); + if (r < 0) { + error_setg_errno(errp, -r, "Unable to set TSC frequency to %"PRId64" kHz", + env->tsc_khz); + return r; + } + + if (tdx_guest->mrconfigid) { + g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid, + strlen(tdx_guest->mrconfigid), &data_len, errp); + if (!data) { + return -1; + } + if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { + error_setg(errp, "TDX 'mrconfigid' sha384 digest was %ld bytes, " + "expected %d bytes", data_len, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + return -1; + } + memcpy(init_vm->mrconfigid, data, data_len); + } + + if (tdx_guest->mrowner) { + g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner, + strlen(tdx_guest->mrowner), &data_len, errp); + if (!data) { + return -1; + } + if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { + error_setg(errp, "TDX 'mrowner' sha384 digest was %ld bytes, " + "expected %d bytes", data_len, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + return -1; + } + memcpy(init_vm->mrowner, data, data_len); + } + + if (tdx_guest->mrownerconfig) { + g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig, + strlen(tdx_guest->mrownerconfig), &data_len, errp); + if (!data) { + return -1; + } + if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) { + error_setg(errp, "TDX 'mrownerconfig' sha384 digest was %ld bytes, " + "expected %d bytes", data_len, + QCRYPTO_HASH_DIGEST_LEN_SHA384); + return -1; + } + memcpy(init_vm->mrownerconfig, data, data_len); + } + + r = setup_td_guest_attributes(x86cpu, errp); + if (r) { + return r; + } + + r = setup_td_xfam(x86cpu, errp); + if (r) { + return r; + } + + init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0); + tdx_filter_cpuid(&init_vm->cpuid); + + init_vm->attributes = tdx_guest->attributes; + init_vm->xfam = tdx_guest->xfam; + + /* + * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE) + * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or + * RDSEED) is busy. + * + * Retry for the case. + */ + do { + error_free(local_err); + local_err = NULL; + r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err); + } while (r == -EAGAIN && --retry); + + if (r < 0) { + if (!retry) { + error_append_hint(&local_err, "Hardware RNG (Random Number " + "Generator) is busy occupied by someone (via RDRAND/RDSEED) " + "maliciously, which leads to KVM_TDX_INIT_VM keeping failure " + "due to lack of entropy.\n"); + } + error_propagate(errp, local_err); + return r; + } + + tdx_guest->initialized = true; + + return 0; +} + +int tdx_parse_tdvf(void *flash_ptr, int size) +{ + return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size); +} + +static void tdx_inject_interrupt(TdxGuest *tdx) +{ + int ret; + uint32_t apicid, vector; + + qemu_mutex_lock(&tdx->lock); + vector = tdx->event_notify_vector; + apicid = tdx->event_notify_apicid; + qemu_mutex_unlock(&tdx->lock); + if (vector < 32 || vector > 255) { + return; + } + + MSIMessage msg = { + .address = ((apicid & 0xff) << MSI_ADDR_DEST_ID_SHIFT) | + (((uint64_t)apicid & 0xffffff00) << 32), + .data = vector | (APIC_DM_FIXED << MSI_DATA_DELIVERY_MODE_SHIFT), + }; + + ret = kvm_irqchip_send_msi(kvm_state, msg); + if (ret < 0) { + /* In this case, no better way to tell it to guest. Log it. */ + error_report("TDX: injection interrupt %d failed, interrupt lost (%s).", + vector, strerror(-ret)); + } +} + +static void tdx_get_quote_completion(TdxGenerateQuoteTask *task) +{ + TdxGuest *tdx = task->opaque; + int ret; + + /* Maintain the number of in-flight requests. */ + qemu_mutex_lock(&tdx->lock); + tdx->num--; + qemu_mutex_unlock(&tdx->lock); + + if (task->status_code == TDX_VP_GET_QUOTE_SUCCESS) { + ret = address_space_write(&address_space_memory, task->payload_gpa, + MEMTXATTRS_UNSPECIFIED, task->receive_buf, + task->receive_buf_received); + if (ret != MEMTX_OK) { + error_report("TDX: get-quote: failed to write quote data."); + } else { + task->hdr.out_len = cpu_to_le64(task->receive_buf_received); + } + } + task->hdr.error_code = cpu_to_le64(task->status_code); + + /* Publish the response contents before marking this request completed. */ + smp_wmb(); + ret = address_space_write(&address_space_memory, task->buf_gpa, + MEMTXATTRS_UNSPECIFIED, &task->hdr, + TDX_GET_QUOTE_HDR_SIZE); + if (ret != MEMTX_OK) { + error_report("TDX: get-quote: failed to update GetQuote header."); + } + + tdx_inject_interrupt(tdx); + + g_free(task->send_data); + g_free(task->receive_buf); + g_free(task); + object_unref(tdx); +} + +void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run) +{ + TdxGenerateQuoteTask *task; + struct tdx_get_quote_header hdr; + hwaddr buf_gpa = run->tdx.get_quote.gpa; + uint64_t buf_len = run->tdx.get_quote.size; + + QEMU_BUILD_BUG_ON(sizeof(struct tdx_get_quote_header) != TDX_GET_QUOTE_HDR_SIZE); + + run->tdx.get_quote.ret = TDG_VP_VMCALL_INVALID_OPERAND; + + if (buf_len == 0) { + return; + } + + if (!QEMU_IS_ALIGNED(buf_gpa, 4096) || !QEMU_IS_ALIGNED(buf_len, 4096)) { + run->tdx.get_quote.ret = TDG_VP_VMCALL_ALIGN_ERROR; + return; + } + + if (address_space_read(&address_space_memory, buf_gpa, MEMTXATTRS_UNSPECIFIED, + &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) { + error_report("TDX: get-quote: failed to read GetQuote header."); + return; + } + + if (le64_to_cpu(hdr.structure_version) != TDX_GET_QUOTE_STRUCTURE_VERSION) { + return; + } + + /* Only safe-guard check to avoid too large buffer size. */ + if (buf_len > TDX_GET_QUOTE_MAX_BUF_LEN || + le32_to_cpu(hdr.in_len) > buf_len - TDX_GET_QUOTE_HDR_SIZE) { + return; + } + + if (!tdx_guest->qg_sock_addr) { + hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_QGS_UNAVAILABLE); + if (address_space_write(&address_space_memory, buf_gpa, + MEMTXATTRS_UNSPECIFIED, + &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) { + error_report("TDX: failed to update GetQuote header."); + return; + } + run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS; + return; + } + + qemu_mutex_lock(&tdx_guest->lock); + if (tdx_guest->num >= TDX_MAX_GET_QUOTE_REQUEST) { + qemu_mutex_unlock(&tdx_guest->lock); + run->tdx.get_quote.ret = TDG_VP_VMCALL_RETRY; + return; + } + tdx_guest->num++; + qemu_mutex_unlock(&tdx_guest->lock); + + task = g_new(TdxGenerateQuoteTask, 1); + task->buf_gpa = buf_gpa; + task->payload_gpa = buf_gpa + TDX_GET_QUOTE_HDR_SIZE; + task->payload_len = buf_len - TDX_GET_QUOTE_HDR_SIZE; + task->hdr = hdr; + task->completion = tdx_get_quote_completion; + + task->send_data_size = le32_to_cpu(hdr.in_len); + task->send_data = g_malloc(task->send_data_size); + task->send_data_sent = 0; + + if (address_space_read(&address_space_memory, task->payload_gpa, + MEMTXATTRS_UNSPECIFIED, task->send_data, + task->send_data_size) != MEMTX_OK) { + goto out_free; + } + + /* Mark the buffer in-flight. */ + hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_IN_FLIGHT); + if (address_space_write(&address_space_memory, buf_gpa, + MEMTXATTRS_UNSPECIFIED, + &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) { + goto out_free; + } + + task->receive_buf = g_malloc0(task->payload_len); + task->receive_buf_received = 0; + task->opaque = tdx_guest; + + object_ref(tdx_guest); + tdx_generate_quote(task, tdx_guest->qg_sock_addr); + run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS; + return; + +out_free: + g_free(task->send_data); + g_free(task); +} + +#define SUPPORTED_TDVMCALLINFO_1_R11 (TDG_VP_VMCALL_SUBFUNC_SET_EVENT_NOTIFY_INTERRUPT) +#define SUPPORTED_TDVMCALLINFO_1_R12 (0) + +void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run) +{ + if (run->tdx.get_tdvmcall_info.leaf != 1) { + return; + } + + run->tdx.get_tdvmcall_info.r11 = (tdx_caps->user_tdvmcallinfo_1_r11 & + SUPPORTED_TDVMCALLINFO_1_R11) | + tdx_caps->kernel_tdvmcallinfo_1_r11; + run->tdx.get_tdvmcall_info.r12 = (tdx_caps->user_tdvmcallinfo_1_r12 & + SUPPORTED_TDVMCALLINFO_1_R12) | + tdx_caps->kernel_tdvmcallinfo_1_r12; + run->tdx.get_tdvmcall_info.r13 = 0; + run->tdx.get_tdvmcall_info.r14 = 0; + + run->tdx.get_tdvmcall_info.ret = TDG_VP_VMCALL_SUCCESS; +} + +void tdx_handle_setup_event_notify_interrupt(X86CPU *cpu, struct kvm_run *run) +{ + uint64_t vector = run->tdx.setup_event_notify.vector; + + if (vector >= 32 && vector < 256) { + qemu_mutex_lock(&tdx_guest->lock); + tdx_guest->event_notify_vector = vector; + tdx_guest->event_notify_apicid = cpu->apic_id; + qemu_mutex_unlock(&tdx_guest->lock); + run->tdx.setup_event_notify.ret = TDG_VP_VMCALL_SUCCESS; + } else { + run->tdx.setup_event_notify.ret = TDG_VP_VMCALL_INVALID_OPERAND; + } +} + +static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code, + char *message, bool has_gpa, + uint64_t gpa) +{ + GuestPanicInformation *panic_info; + + panic_info = g_new0(GuestPanicInformation, 1); + panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX; + panic_info->u.tdx.error_code = (uint32_t) error_code; + panic_info->u.tdx.message = message; + panic_info->u.tdx.gpa = gpa; + panic_info->u.tdx.has_gpa = has_gpa; + + qemu_system_guest_panicked(panic_info); +} + +/* + * Only 8 registers can contain valid ASCII byte stream to form the fatal + * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX + */ +#define TDX_FATAL_MESSAGE_MAX 64 + +#define TDX_REPORT_FATAL_ERROR_GPA_VALID BIT_ULL(63) + +int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run) +{ + uint64_t error_code = run->system_event.data[R_R12]; + uint64_t reg_mask = run->system_event.data[R_ECX]; + char *message = NULL; + uint64_t *tmp; + uint64_t gpa = -1ull; + bool has_gpa = false; + + if (error_code & 0xffff) { + error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%"PRIx64, + error_code); + return -1; + } + + if (reg_mask) { + message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1); + tmp = (uint64_t *)message; + +#define COPY_REG(REG) \ + do { \ + if (reg_mask & BIT_ULL(REG)) { \ + *(tmp++) = run->system_event.data[REG]; \ + } \ + } while (0) + + COPY_REG(R_R14); + COPY_REG(R_R15); + COPY_REG(R_EBX); + COPY_REG(R_EDI); + COPY_REG(R_ESI); + COPY_REG(R_R8); + COPY_REG(R_R9); + COPY_REG(R_EDX); + *((char *)tmp) = '\0'; + } +#undef COPY_REG + + if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) { + gpa = run->system_event.data[R_R13]; + has_gpa = true; + } + + tdx_panicked_on_fatal_error(cpu, error_code, message, has_gpa, gpa); + + return -1; +} + +static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE); +} + +static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + if (value) { + tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; + } else { + tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; + } +} + +static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + return g_strdup(tdx->mrconfigid); +} + +static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + g_free(tdx->mrconfigid); + tdx->mrconfigid = g_strdup(value); +} + +static char *tdx_guest_get_mrowner(Object *obj, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + return g_strdup(tdx->mrowner); +} + +static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + g_free(tdx->mrowner); + tdx->mrowner = g_strdup(value); +} + +static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + return g_strdup(tdx->mrownerconfig); +} + +static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + g_free(tdx->mrownerconfig); + tdx->mrownerconfig = g_strdup(value); +} + +static void tdx_guest_get_qgs(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + + if (!tdx->qg_sock_addr) { + error_setg(errp, "quote-generation-socket is not set"); + return; + } + visit_type_SocketAddress(v, name, &tdx->qg_sock_addr, errp); +} + +static void tdx_guest_set_qgs(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + TdxGuest *tdx = TDX_GUEST(obj); + SocketAddress *sock = NULL; + + if (!visit_type_SocketAddress(v, name, &sock, errp)) { + return; + } + + if (tdx->qg_sock_addr) { + qapi_free_SocketAddress(tdx->qg_sock_addr); + } + + tdx->qg_sock_addr = sock; +} + +/* tdx guest */ +OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest, + tdx_guest, + TDX_GUEST, + X86_CONFIDENTIAL_GUEST, + { TYPE_USER_CREATABLE }, + { NULL }) + +static void tdx_guest_init(Object *obj) +{ + ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj); + TdxGuest *tdx = TDX_GUEST(obj); + + qemu_mutex_init(&tdx->lock); + + cgs->require_guest_memfd = true; + tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE; + + object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes, + OBJ_PROP_FLAG_READWRITE); + object_property_add_bool(obj, "sept-ve-disable", + tdx_guest_get_sept_ve_disable, + tdx_guest_set_sept_ve_disable); + object_property_add_str(obj, "mrconfigid", + tdx_guest_get_mrconfigid, + tdx_guest_set_mrconfigid); + object_property_add_str(obj, "mrowner", + tdx_guest_get_mrowner, tdx_guest_set_mrowner); + object_property_add_str(obj, "mrownerconfig", + tdx_guest_get_mrownerconfig, + tdx_guest_set_mrownerconfig); + + object_property_add(obj, "quote-generation-socket", "SocketAddress", + tdx_guest_get_qgs, + tdx_guest_set_qgs, + NULL, NULL); + + tdx->event_notify_vector = -1; + tdx->event_notify_apicid = -1; +} + +static void tdx_guest_finalize(Object *obj) +{ +} + +static void tdx_guest_class_init(ObjectClass *oc, const void *data) +{ + ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); + X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); + + klass->kvm_init = tdx_kvm_init; + x86_klass->kvm_type = tdx_kvm_type; + x86_klass->cpu_instance_init = tdx_cpu_instance_init; + x86_klass->adjust_cpuid_features = tdx_adjust_cpuid_features; + x86_klass->check_features = tdx_check_features; +} diff --git a/target/i386/kvm/tdx.h b/target/i386/kvm/tdx.h new file mode 100644 index 00000000000..1c38faf9834 --- /dev/null +++ b/target/i386/kvm/tdx.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef QEMU_I386_TDX_H +#define QEMU_I386_TDX_H + +#ifndef CONFIG_USER_ONLY +#include CONFIG_DEVICES /* CONFIG_TDX */ +#endif + +#include "confidential-guest.h" +#include "cpu.h" +#include "hw/i386/tdvf.h" + +#include "tdx-quote-generator.h" + +#define TYPE_TDX_GUEST "tdx-guest" +#define TDX_GUEST(obj) OBJECT_CHECK(TdxGuest, (obj), TYPE_TDX_GUEST) + +typedef struct TdxGuestClass { + X86ConfidentialGuestClass parent_class; +} TdxGuestClass; + +/* TDX requires bus frequency 25MHz */ +#define TDX_APIC_BUS_CYCLES_NS 40 + +#define TDVMCALL_GET_TD_VM_CALL_INFO 0x10000 +#define TDVMCALL_GET_QUOTE 0x10002 +#define TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT 0x10004 + +#define TDG_VP_VMCALL_SUCCESS 0x0000000000000000ULL +#define TDG_VP_VMCALL_RETRY 0x0000000000000001ULL +#define TDG_VP_VMCALL_INVALID_OPERAND 0x8000000000000000ULL +#define TDG_VP_VMCALL_GPA_INUSE 0x8000000000000001ULL +#define TDG_VP_VMCALL_ALIGN_ERROR 0x8000000000000002ULL + +#define TDG_VP_VMCALL_SUBFUNC_SET_EVENT_NOTIFY_INTERRUPT BIT_ULL(1) + +enum TdxRamType { + TDX_RAM_UNACCEPTED, + TDX_RAM_ADDED, +}; + +typedef struct TdxRamEntry { + uint64_t address; + uint64_t length; + enum TdxRamType type; +} TdxRamEntry; + +typedef struct TdxGuest { + X86ConfidentialGuest parent_obj; + + QemuMutex lock; + + bool initialized; + uint64_t attributes; /* TD attributes */ + uint64_t xfam; + char *mrconfigid; /* base64 encoded sha384 digest */ + char *mrowner; /* base64 encoded sha384 digest */ + char *mrownerconfig; /* base64 encoded sha384 digest */ + + MemoryRegion *tdvf_mr; + TdxFirmware tdvf; + + uint32_t nr_ram_entries; + TdxRamEntry *ram_entries; + + /* GetQuote */ + SocketAddress *qg_sock_addr; + int num; + + uint32_t event_notify_vector; + uint32_t event_notify_apicid; +} TdxGuest; + +#ifdef CONFIG_TDX +bool is_tdx_vm(void); +#else +#define is_tdx_vm() 0 +#endif /* CONFIG_TDX */ + +int tdx_pre_create_vcpu(CPUState *cpu, Error **errp); +void tdx_set_tdvf_region(MemoryRegion *tdvf_mr); +int tdx_parse_tdvf(void *flash_ptr, int size); +int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run); +void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run); +void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run); +void tdx_handle_setup_event_notify_interrupt(X86CPU *cpu, struct kvm_run *run); + +#endif /* QEMU_I386_TDX_H */ diff --git a/target/i386/kvm/vmsr_energy.c b/target/i386/kvm/vmsr_energy.c index 7e064c5aef3..58ce3df53a3 100644 --- a/target/i386/kvm/vmsr_energy.c +++ b/target/i386/kvm/vmsr_energy.c @@ -27,16 +27,6 @@ char *vmsr_compute_default_paths(void) return g_build_filename(state, "run", "qemu-vmsr-helper.sock", NULL); } -bool is_host_cpu_intel(void) -{ - int family, model, stepping; - char vendor[CPUID_VENDOR_SZ + 1]; - - host_cpu_vendor_fms(vendor, &family, &model, &stepping); - - return strcmp(vendor, CPUID_VENDOR_INTEL); -} - int is_rapl_enabled(void) { const char *path = "/sys/class/powercap/intel-rapl/enabled"; @@ -285,7 +275,6 @@ void vmsr_read_thread_stat(pid_t pid, } fclose(file); - return; } /* Read QEMU stat task folder to retrieve all QEMU threads ID */ diff --git a/target/i386/kvm/vmsr_energy.h b/target/i386/kvm/vmsr_energy.h index 16cc1f4814f..151bcbd6423 100644 --- a/target/i386/kvm/vmsr_energy.h +++ b/target/i386/kvm/vmsr_energy.h @@ -94,6 +94,5 @@ double vmsr_get_ratio(uint64_t e_delta, unsigned long long delta_ticks, unsigned int maxticks); void vmsr_init_topo_info(X86CPUTopoInfo *topo_info, const MachineState *ms); -bool is_host_cpu_intel(void); int is_rapl_enabled(void); #endif /* VMSR_ENERGY_H */ diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index 2f89dc628ef..284c5ef6f68 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -13,14 +13,15 @@ #include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" +#include "exec/target_page.h" #include "hw/xen/xen.h" -#include "sysemu/kvm_int.h" -#include "sysemu/kvm_xen.h" +#include "system/kvm_int.h" +#include "system/kvm_xen.h" #include "kvm/kvm_i386.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "xen-emu.h" #include "trace.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/pci/msi.h" #include "hw/i386/apic-msidef.h" diff --git a/target/i386/machine.c b/target/i386/machine.c index 39f8294f279..dd2dac1d443 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -1,16 +1,16 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "hw/isa/isa.h" #include "migration/cpu.h" #include "kvm/hyperv.h" #include "hw/i386/x86.h" #include "kvm/kvm_i386.h" #include "hw/xen/xen.h" - -#include "sysemu/kvm.h" -#include "sysemu/kvm_xen.h" -#include "sysemu/tcg.h" +#include "exec/watchpoint.h" +#include "system/kvm.h" +#include "system/kvm_xen.h" +#include "system/tcg.h" #include "qemu/error-report.h" @@ -1060,9 +1060,8 @@ static bool tsc_khz_needed(void *opaque) { X86CPU *cpu = opaque; CPUX86State *env = &cpu->env; - MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); - X86MachineClass *x86mc = X86_MACHINE_CLASS(mc); - return env->tsc_khz && x86mc->save_tsc_khz; + + return env->tsc_khz; } static const VMStateDescription vmstate_tsc_khz = { @@ -1543,6 +1542,25 @@ static const VMStateDescription vmstate_msr_xfd = { } }; +static bool msr_hwcr_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->msr_hwcr != 0; +} + +static const VMStateDescription vmstate_msr_hwcr = { + .name = "cpu/msr_hwcr", + .version_id = 1, + .minimum_version_id = 1, + .needed = msr_hwcr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_hwcr, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + #ifdef TARGET_X86_64 static bool intel_fred_msrs_needed(void *opaque) { @@ -1773,6 +1791,7 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_msr_intel_sgx, &vmstate_pdptrs, &vmstate_msr_xfd, + &vmstate_msr_hwcr, #ifdef TARGET_X86_64 &vmstate_msr_fred, &vmstate_amx_xtile, diff --git a/target/i386/meson.build b/target/i386/meson.build index 075117989b9..092af34e2d8 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -11,6 +11,8 @@ i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'confidential-guest # x86 cpu type i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c')) +i386_ss.add(when: 'CONFIG_WHPX', if_true: files('host-cpu.c')) +i386_ss.add(when: 'CONFIG_NVMM', if_true: files('host-cpu.c')) i386_system_ss = ss.source_set() i386_system_ss.add(files( @@ -19,9 +21,10 @@ i386_system_ss.add(files( 'machine.c', 'monitor.c', 'cpu-apic.c', - 'cpu-sysemu.c', + 'cpu-system.c', )) -i386_system_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-sysemu-stub.c')) +i386_system_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), + if_false: files('sev-system-stub.c')) i386_user_ss = ss.source_set() @@ -30,6 +33,7 @@ subdir('whpx') subdir('nvmm') subdir('hvf') subdir('tcg') +subdir('emulate') target_arch += {'i386': i386_ss} target_system_arch += {'i386': i386_system_ss} diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 2d766b2637f..3c9b6ca62f2 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -27,9 +27,8 @@ #include "monitor/monitor.h" #include "monitor/hmp-target.h" #include "monitor/hmp.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" #include "qapi/qapi-commands-misc.h" /* Perform linear address sign extension */ diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 0ba31201e29..3799260bbde 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -8,12 +8,13 @@ */ #include "qemu/osdep.h" -#include "sysemu/kvm_int.h" +#include "system/kvm_int.h" #include "qemu/main-loop.h" -#include "sysemu/cpus.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" #include "qemu/guest-random.h" -#include "sysemu/nvmm.h" +#include "system/nvmm.h" #include "nvmm-accel-ops.h" static void *qemu_nvmm_cpu_thread_fn(void *arg) @@ -80,12 +81,13 @@ static void nvmm_kick_vcpu_thread(CPUState *cpu) cpus_kick_thread(cpu); } -static void nvmm_accel_ops_class_init(ObjectClass *oc, void *data) +static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = nvmm_start_vcpu_thread; ops->kick_vcpu_thread = nvmm_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset; ops->synchronize_post_init = nvmm_cpu_synchronize_post_init; diff --git a/target/i386/nvmm/nvmm-accel-ops.h b/target/i386/nvmm/nvmm-accel-ops.h index 7c5461bd759..931bb5ca243 100644 --- a/target/i386/nvmm/nvmm-accel-ops.h +++ b/target/i386/nvmm/nvmm-accel-ops.h @@ -10,7 +10,7 @@ #ifndef TARGET_I386_NVMM_ACCEL_OPS_H #define TARGET_I386_NVMM_ACCEL_OPS_H -#include "sysemu/cpus.h" +#include "system/cpus.h" int nvmm_init_vcpu(CPUState *cpu); int nvmm_vcpu_exec(CPUState *cpu); diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 65768aca037..92e3b8b2f45 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -9,16 +9,19 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/address-spaces.h" -#include "exec/ioport.h" +#include "system/address-spaces.h" +#include "system/ioport.h" #include "qemu/accel.h" -#include "sysemu/nvmm.h" -#include "sysemu/cpus.h" -#include "sysemu/runstate.h" +#include "accel/accel-ops.h" +#include "system/nvmm.h" +#include "system/cpus.h" +#include "system/runstate.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "qemu/queue.h" +#include "accel/accel-cpu-target.h" +#include "host-cpu.h" #include "migration/blocker.h" #include "strings.h" @@ -30,7 +33,6 @@ struct AccelCPUState { struct nvmm_vcpu vcpu; uint8_t tpr; bool stop; - bool dirty; /* Window-exiting for INTs/NMIs. */ bool int_window_exit; @@ -47,7 +49,7 @@ struct qemu_machine { /* -------------------------------------------------------------------------- */ -static bool nvmm_allowed; +bool nvmm_allowed; static struct qemu_machine qemu_mach; static struct nvmm_machine * @@ -508,7 +510,7 @@ nvmm_io_callback(struct nvmm_io *io) } /* Needed, otherwise infinite loop. */ - current_cpu->accel->dirty = false; + current_cpu->vcpu_dirty = false; } static void @@ -517,7 +519,7 @@ nvmm_mem_callback(struct nvmm_mem *mem) cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); /* Needed, otherwise infinite loop. */ - current_cpu->accel->dirty = false; + current_cpu->vcpu_dirty = false; } static struct nvmm_assist_callbacks nvmm_callbacks = { @@ -727,9 +729,9 @@ nvmm_vcpu_loop(CPUState *cpu) * Inner VCPU loop. */ do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (qcpu->stop) { @@ -827,32 +829,32 @@ static void do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { nvmm_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } static void do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) { - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } void nvmm_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -982,7 +984,7 @@ nvmm_init_vcpu(CPUState *cpu) } } - qcpu->dirty = true; + qcpu->vcpu_dirty = true; cpu->accel = qcpu; return 0; @@ -1153,7 +1155,7 @@ static struct RAMBlockNotifier nvmm_ram_notifier = { /* -------------------------------------------------------------------------- */ static int -nvmm_accel_init(MachineState *ms) +nvmm_accel_init(AccelState *as, MachineState *ms) { int ret, err; @@ -1193,14 +1195,8 @@ nvmm_accel_init(MachineState *ms) return 0; } -int -nvmm_enabled(void) -{ - return nvmm_allowed; -} - static void -nvmm_accel_class_init(ObjectClass *oc, void *data) +nvmm_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "NVMM"; @@ -1214,10 +1210,33 @@ static const TypeInfo nvmm_accel_type = { .class_init = nvmm_accel_class_init, }; +static void nvmm_cpu_instance_init(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + host_cpu_instance_init(cpu); +} + +static void nvmm_cpu_accel_class_init(ObjectClass *oc, const void *data) +{ + AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); + + acc->cpu_instance_init = nvmm_cpu_instance_init; +} + +static const TypeInfo nvmm_cpu_accel_type = { + .name = ACCEL_CPU_NAME("nvmm"), + + .parent = TYPE_ACCEL_CPU, + .class_init = nvmm_cpu_accel_class_init, + .abstract = true, +}; + static void nvmm_type_init(void) { type_register_static(&nvmm_accel_type); + type_register_static(&nvmm_cpu_accel_type); } type_init(nvmm_type_init); diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index f0aa1894aa2..a2e4d480399 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -842,7 +842,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); int i; for (i = 0; i < 2 << SHIFT; i++) { d->ZMM_S(i) = float32_div(float32_one, @@ -855,7 +855,7 @@ void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) #if SHIFT == 1 void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); int i; d->ZMM_S(0) = float32_div(float32_one, float32_sqrt(s->ZMM_S(0), &env->sse_status), @@ -869,7 +869,7 @@ void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); int i; for (i = 0; i < 2 << SHIFT; i++) { d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status); @@ -880,7 +880,7 @@ void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) #if SHIFT == 1 void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); int i; d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); for (i = 1; i < 2 << SHIFT; i++) { @@ -1714,7 +1714,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; int i; @@ -1738,7 +1738,7 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mode) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; int i; @@ -1763,7 +1763,7 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, uint32_t mode) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; int i; @@ -1788,7 +1788,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, uint32_t mode) { - uint8_t old_flags = get_float_exception_flags(&env->sse_status); + int old_flags = get_float_exception_flags(&env->sse_status); signed char prev_rounding_mode; int i; diff --git a/target/i386/sev-sysemu-stub.c b/target/i386/sev-sysemu-stub.c deleted file mode 100644 index d5bf886e799..00000000000 --- a/target/i386/sev-sysemu-stub.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * QEMU SEV system stub - * - * Copyright Advanced Micro Devices 2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "monitor/monitor.h" -#include "monitor/hmp-target.h" -#include "qapi/qapi-commands-misc-target.h" -#include "qapi/error.h" -#include "sev.h" - -SevInfo *qmp_query_sev(Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -SevCapability *qmp_query_sev_capabilities(Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret, - bool has_gpa, uint64_t gpa, Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); -} - -int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp) -{ - g_assert_not_reached(); -} - -void sev_es_set_reset_vector(CPUState *cpu) -{ -} - -int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) -{ - g_assert_not_reached(); -} - -SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, - Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -void hmp_info_sev(Monitor *mon, const QDict *qdict) -{ - monitor_printf(mon, "SEV is not available in this QEMU\n"); -} - -void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size) -{ -} diff --git a/target/i386/sev-system-stub.c b/target/i386/sev-system-stub.c new file mode 100644 index 00000000000..7c5c02a5657 --- /dev/null +++ b/target/i386/sev-system-stub.c @@ -0,0 +1,41 @@ +/* + * QEMU SEV system stub + * + * Copyright Advanced Micro Devices 2018 + * + * Authors: + * Brijesh Singh + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/error.h" +#include "sev.h" + +int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp) +{ + g_assert_not_reached(); +} + +void sev_es_set_reset_vector(CPUState *cpu) +{ +} + +int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) +{ + g_assert_not_reached(); +} + +void hmp_info_sev(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SEV is not available in this QEMU\n"); +} + +void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size) +{ +} diff --git a/target/i386/sev.c b/target/i386/sev.c index a1157c0ede6..1057b8ab2c6 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -26,21 +26,24 @@ #include "qemu/uuid.h" #include "qemu/error-report.h" #include "crypto/hash.h" -#include "sysemu/kvm.h" +#include "exec/target_page.h" +#include "system/kvm.h" #include "kvm/kvm_i386.h" #include "sev.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/runstate.h" #include "trace.h" #include "migration/blocker.h" #include "qom/object.h" #include "monitor/monitor.h" #include "monitor/hmp-target.h" -#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-commands-misc-i386.h" #include "confidential-guest.h" #include "hw/i386/pc.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "hw/i386/e820_memory_layout.h" #include "qemu/queue.h" +#include "qemu/cutils.h" OBJECT_DECLARE_TYPE(SevCommonState, SevCommonStateClass, SEV_COMMON) OBJECT_DECLARE_TYPE(SevGuestState, SevCommonStateClass, SEV_GUEST) @@ -49,6 +52,15 @@ OBJECT_DECLARE_TYPE(SevSnpGuestState, SevCommonStateClass, SEV_SNP_GUEST) /* hard code sha256 digest size */ #define HASH_SIZE 32 +/* Hard coded GPA that KVM uses for the VMSA */ +#define KVM_VMSA_GPA 0xFFFFFFFFF000 + +/* Convert between SEV-ES VMSA and SegmentCache flags/attributes */ +#define FLAGS_VMSA_TO_SEGCACHE(flags) \ + ((((flags) & 0xff00) << 12) | (((flags) & 0xff) << 8)) +#define FLAGS_SEGCACHE_TO_VMSA(flags) \ + ((((flags) & 0xff00) >> 8) | (((flags) & 0xf00000) >> 12)) + typedef struct QEMU_PACKED SevHashTableEntry { QemuUUID guid; uint16_t len; @@ -88,6 +100,14 @@ typedef struct QEMU_PACKED SevHashTableDescriptor { uint32_t size; } SevHashTableDescriptor; +typedef struct SevLaunchVmsa { + QTAILQ_ENTRY(SevLaunchVmsa) next; + + uint16_t cpu_index; + uint64_t gpa; + struct sev_es_save_area vmsa; +} SevLaunchVmsa; + struct SevCommonState { X86ConfidentialGuest parent_obj; @@ -98,6 +118,8 @@ struct SevCommonState { uint32_t cbitpos; uint32_t reduced_phys_bits; bool kernel_hashes; + uint64_t sev_features; + uint64_t supported_sev_features; /* runtime state */ uint8_t api_major; @@ -106,9 +128,7 @@ struct SevCommonState { int sev_fd; SevState state; - uint32_t reset_cs; - uint32_t reset_ip; - bool reset_data_valid; + QTAILQ_HEAD(, SevLaunchVmsa) launch_vmsa; }; struct SevCommonStateClass { @@ -121,7 +141,8 @@ struct SevCommonStateClass { Error **errp); int (*launch_start)(SevCommonState *sev_common); void (*launch_finish)(SevCommonState *sev_common); - int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa, uint8_t *ptr, size_t len); + int (*launch_update_data)(SevCommonState *sev_common, hwaddr gpa, + uint8_t *ptr, size_t len, Error **errp); int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); }; @@ -211,14 +232,6 @@ static const char *const sev_fw_errlist[] = { #define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist) -/* doesn't expose this, so re-use the max from kvm.c */ -#define KVM_MAX_CPUID_ENTRIES 100 - -typedef struct KvmCpuidInfo { - struct kvm_cpuid2 cpuid; - struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; -} KvmCpuidInfo; - #define SNP_CPUID_FUNCTION_MAXCOUNT 64 #define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF @@ -370,6 +383,288 @@ static struct RAMBlockNotifier sev_ram_notifier = { .ram_block_removed = sev_ram_block_removed, }; +static void sev_apply_cpu_context(CPUState *cpu) +{ + SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); + X86CPU *x86; + CPUX86State *env; + struct SevLaunchVmsa *launch_vmsa; + + /* See if an initial VMSA has been provided for this CPU */ + QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next) + { + if (cpu->cpu_index == launch_vmsa->cpu_index) { + x86 = X86_CPU(cpu); + env = &x86->env; + + /* + * Ideally we would provide the VMSA directly to kvm which would + * ensure that the resulting initial VMSA measurement which is + * calculated during KVM_SEV_LAUNCH_UPDATE_VMSA is calculated from + * exactly what we provide here. Currently this is not possible so + * we need to copy the parts of the VMSA structure that we currently + * support into the CPU state. + */ + cpu_load_efer(env, launch_vmsa->vmsa.efer); + cpu_x86_update_cr4(env, launch_vmsa->vmsa.cr4); + cpu_x86_update_cr0(env, launch_vmsa->vmsa.cr0); + cpu_x86_update_cr3(env, launch_vmsa->vmsa.cr3); + env->xcr0 = launch_vmsa->vmsa.xcr0; + env->pat = launch_vmsa->vmsa.g_pat; + + cpu_x86_load_seg_cache( + env, R_CS, launch_vmsa->vmsa.cs.selector, + launch_vmsa->vmsa.cs.base, launch_vmsa->vmsa.cs.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.cs.attrib)); + cpu_x86_load_seg_cache( + env, R_DS, launch_vmsa->vmsa.ds.selector, + launch_vmsa->vmsa.ds.base, launch_vmsa->vmsa.ds.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ds.attrib)); + cpu_x86_load_seg_cache( + env, R_ES, launch_vmsa->vmsa.es.selector, + launch_vmsa->vmsa.es.base, launch_vmsa->vmsa.es.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.es.attrib)); + cpu_x86_load_seg_cache( + env, R_FS, launch_vmsa->vmsa.fs.selector, + launch_vmsa->vmsa.fs.base, launch_vmsa->vmsa.fs.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.fs.attrib)); + cpu_x86_load_seg_cache( + env, R_GS, launch_vmsa->vmsa.gs.selector, + launch_vmsa->vmsa.gs.base, launch_vmsa->vmsa.gs.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gs.attrib)); + cpu_x86_load_seg_cache( + env, R_SS, launch_vmsa->vmsa.ss.selector, + launch_vmsa->vmsa.ss.base, launch_vmsa->vmsa.ss.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ss.attrib)); + + env->gdt.base = launch_vmsa->vmsa.gdtr.base; + env->gdt.limit = launch_vmsa->vmsa.gdtr.limit; + env->gdt.flags = + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.gdtr.attrib); + env->idt.base = launch_vmsa->vmsa.idtr.base; + env->idt.limit = launch_vmsa->vmsa.idtr.limit; + env->idt.flags = + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.idtr.attrib); + + cpu_x86_load_seg_cache( + env, R_LDTR, launch_vmsa->vmsa.ldtr.selector, + launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.ldtr.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.ldtr.attrib)); + cpu_x86_load_seg_cache( + env, R_TR, launch_vmsa->vmsa.tr.selector, + launch_vmsa->vmsa.ldtr.base, launch_vmsa->vmsa.tr.limit, + FLAGS_VMSA_TO_SEGCACHE(launch_vmsa->vmsa.tr.attrib)); + + env->dr[6] = launch_vmsa->vmsa.dr6; + env->dr[7] = launch_vmsa->vmsa.dr7; + + env->regs[R_EAX] = launch_vmsa->vmsa.rax; + env->regs[R_ECX] = launch_vmsa->vmsa.rcx; + env->regs[R_EDX] = launch_vmsa->vmsa.rdx; + env->regs[R_EBX] = launch_vmsa->vmsa.rbx; + env->regs[R_ESP] = launch_vmsa->vmsa.rsp; + env->regs[R_EBP] = launch_vmsa->vmsa.rbp; + env->regs[R_ESI] = launch_vmsa->vmsa.rsi; + env->regs[R_EDI] = launch_vmsa->vmsa.rdi; +#ifdef TARGET_X86_64 + env->regs[R_R8] = launch_vmsa->vmsa.r8; + env->regs[R_R9] = launch_vmsa->vmsa.r9; + env->regs[R_R10] = launch_vmsa->vmsa.r10; + env->regs[R_R11] = launch_vmsa->vmsa.r11; + env->regs[R_R12] = launch_vmsa->vmsa.r12; + env->regs[R_R13] = launch_vmsa->vmsa.r13; + env->regs[R_R14] = launch_vmsa->vmsa.r14; + env->regs[R_R15] = launch_vmsa->vmsa.r15; +#endif + env->eip = launch_vmsa->vmsa.rip; + env->eflags = launch_vmsa->vmsa.rflags; + + cpu_set_fpuc(env, launch_vmsa->vmsa.x87_fcw); + env->mxcsr = launch_vmsa->vmsa.mxcsr; + + break; + } + } +} + +static int check_sev_features(SevCommonState *sev_common, uint64_t sev_features, + Error **errp) +{ + /* + * Ensure SEV_FEATURES is configured for correct SEV hardware and that + * the requested features are supported. If SEV-SNP is enabled then + * that feature must be enabled, otherwise it must be cleared. + */ + if (sev_snp_enabled() && !(sev_features & SVM_SEV_FEAT_SNP_ACTIVE)) { + error_setg( + errp, + "%s: SEV_SNP is enabled but is not enabled in VMSA sev_features", + __func__); + return -1; + } else if (!sev_snp_enabled() && + (sev_features & SVM_SEV_FEAT_SNP_ACTIVE)) { + error_setg( + errp, + "%s: SEV_SNP is not enabled but is enabled in VMSA sev_features", + __func__); + return -1; + } + if (sev_features & ~sev_common->supported_sev_features) { + error_setg(errp, + "%s: VMSA contains unsupported sev_features: %lX, " + "supported features: %lX", + __func__, sev_features, sev_common->supported_sev_features); + return -1; + } + return 0; +} + +static int check_vmsa_supported(SevCommonState *sev_common, hwaddr gpa, + const struct sev_es_save_area *vmsa, + Error **errp) +{ + struct sev_es_save_area vmsa_check; + + /* + * KVM always populates the VMSA at a fixed GPA which cannot be modified + * from userspace. Specifying a different GPA will not prevent the guest + * from starting but will cause the launch measurement to be different + * from expected. Therefore check that the provided GPA matches the KVM + * hardcoded value. + */ + if (gpa != KVM_VMSA_GPA) { + error_setg(errp, + "%s: The VMSA GPA must be %lX but is specified as %lX", + __func__, KVM_VMSA_GPA, gpa); + return -1; + } + + /* + * Clear all supported fields so we can then check the entire structure + * is zero. + */ + memcpy(&vmsa_check, vmsa, sizeof(struct sev_es_save_area)); + memset(&vmsa_check.es, 0, sizeof(vmsa_check.es)); + memset(&vmsa_check.cs, 0, sizeof(vmsa_check.cs)); + memset(&vmsa_check.ss, 0, sizeof(vmsa_check.ss)); + memset(&vmsa_check.ds, 0, sizeof(vmsa_check.ds)); + memset(&vmsa_check.fs, 0, sizeof(vmsa_check.fs)); + memset(&vmsa_check.gs, 0, sizeof(vmsa_check.gs)); + memset(&vmsa_check.gdtr, 0, sizeof(vmsa_check.gdtr)); + memset(&vmsa_check.idtr, 0, sizeof(vmsa_check.idtr)); + memset(&vmsa_check.ldtr, 0, sizeof(vmsa_check.ldtr)); + memset(&vmsa_check.tr, 0, sizeof(vmsa_check.tr)); + vmsa_check.efer = 0; + vmsa_check.cr0 = 0; + vmsa_check.cr3 = 0; + vmsa_check.cr4 = 0; + vmsa_check.xcr0 = 0; + vmsa_check.dr6 = 0; + vmsa_check.dr7 = 0; + vmsa_check.rax = 0; + vmsa_check.rcx = 0; + vmsa_check.rdx = 0; + vmsa_check.rbx = 0; + vmsa_check.rsp = 0; + vmsa_check.rbp = 0; + vmsa_check.rsi = 0; + vmsa_check.rdi = 0; + vmsa_check.r8 = 0; + vmsa_check.r9 = 0; + vmsa_check.r10 = 0; + vmsa_check.r11 = 0; + vmsa_check.r12 = 0; + vmsa_check.r13 = 0; + vmsa_check.r14 = 0; + vmsa_check.r15 = 0; + vmsa_check.rip = 0; + vmsa_check.rflags = 0; + + vmsa_check.g_pat = 0; + vmsa_check.xcr0 = 0; + + vmsa_check.x87_fcw = 0; + vmsa_check.mxcsr = 0; + + if (check_sev_features(sev_common, vmsa_check.sev_features, errp) < 0) { + return -1; + } + vmsa_check.sev_features = 0; + + if (!buffer_is_zero(&vmsa_check, sizeof(vmsa_check))) { + error_setg(errp, + "%s: The VMSA contains fields that are not " + "synchronized with KVM. Continuing would result in " + "either unpredictable guest behavior, or a " + "mismatched launch measurement.", + __func__); + return -1; + } + return 0; +} + +static int sev_set_cpu_context(uint16_t cpu_index, const void *ctx, + uint32_t ctx_len, hwaddr gpa, Error **errp) +{ + SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); + SevLaunchVmsa *launch_vmsa; + CPUState *cpu; + bool exists = false; + + /* + * Setting the CPU context is only supported for SEV-ES and SEV-SNP. The + * context buffer will contain a sev_es_save_area from the Linux kernel + * which is defined by "Table B-4. VMSA Layout, State Save Area for SEV-ES" + * in the AMD64 APM, Volume 2. + */ + + if (!sev_es_enabled()) { + error_setg(errp, "SEV: unable to set CPU context: Not supported"); + return -1; + } + + if (ctx_len < sizeof(struct sev_es_save_area)) { + error_setg(errp, "SEV: unable to set CPU context: " + "Invalid context provided"); + return -1; + } + + cpu = qemu_get_cpu(cpu_index); + if (!cpu) { + error_setg(errp, "SEV: unable to set CPU context for out of bounds " + "CPU index %d", cpu_index); + return -1; + } + + /* + * If the context of this VP has already been set then replace it with the + * new context. + */ + QTAILQ_FOREACH(launch_vmsa, &sev_common->launch_vmsa, next) + { + if (cpu_index == launch_vmsa->cpu_index) { + launch_vmsa->gpa = gpa; + memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa)); + exists = true; + break; + } + } + + if (!exists) { + /* New VP context */ + launch_vmsa = g_new0(SevLaunchVmsa, 1); + memcpy(&launch_vmsa->vmsa, ctx, sizeof(launch_vmsa->vmsa)); + launch_vmsa->cpu_index = cpu_index; + launch_vmsa->gpa = gpa; + QTAILQ_INSERT_TAIL(&sev_common->launch_vmsa, launch_vmsa, next); + } + + /* Synchronise the VMSA with the current CPU state */ + sev_apply_cpu_context(cpu); + + return 0; +} + bool sev_enabled(void) { @@ -946,7 +1241,7 @@ sev_snp_launch_update(SevSnpGuestState *sev_snp_guest, } static uint32_t -sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, +sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index, int reg, uint32_t value) { switch (feature) { @@ -977,9 +1272,8 @@ sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t return value; } -static int -sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa, - uint8_t *addr, size_t len) +static int sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa, + uint8_t *addr, size_t len, Error **errp) { int ret, fw_error; struct kvm_sev_launch_update_data update; @@ -994,8 +1288,8 @@ sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa, ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA, &update, &fw_error); if (ret) { - error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", - __func__, ret, fw_error, fw_error_to_str(fw_error)); + error_setg(errp, "%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'", __func__, + ret, fw_error, fw_error_to_str(fw_error)); } return ret; @@ -1005,6 +1299,16 @@ static int sev_launch_update_vmsa(SevGuestState *sev_guest) { int ret, fw_error; + CPUState *cpu; + + /* + * The initial CPU state is measured as part of KVM_SEV_LAUNCH_UPDATE_VMSA. + * Synchronise the CPU state to any provided launch VMSA structures. + */ + CPU_FOREACH(cpu) { + sev_apply_cpu_context(cpu); + } + ret = sev_ioctl(SEV_COMMON(sev_guest)->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fw_error); @@ -1123,8 +1427,8 @@ sev_launch_finish(SevCommonState *sev_common) migrate_add_blocker(&sev_mig_blocker, &error_fatal); } -static int -snp_launch_update_data(uint64_t gpa, void *hva, size_t len, int type) +static int snp_launch_update_data(uint64_t gpa, void *hva, size_t len, + int type, Error **errp) { SevLaunchUpdateData *data; @@ -1139,23 +1443,21 @@ snp_launch_update_data(uint64_t gpa, void *hva, size_t len, int type) return 0; } -static int -sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa, - uint8_t *ptr, size_t len) +static int sev_snp_launch_update_data(SevCommonState *sev_common, hwaddr gpa, + uint8_t *ptr, size_t len, Error **errp) { - int ret = snp_launch_update_data(gpa, ptr, len, - KVM_SEV_SNP_PAGE_TYPE_NORMAL); - return ret; + return snp_launch_update_data(gpa, ptr, len, + KVM_SEV_SNP_PAGE_TYPE_NORMAL, errp); } static int sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info, - const KvmCpuidInfo *kvm_cpuid_info) + const KvmCpuidInfo *kvm_cpuid_info, Error **errp) { size_t i; if (kvm_cpuid_info->cpuid.nent > SNP_CPUID_FUNCTION_MAXCOUNT) { - error_report("SEV-SNP: CPUID entry count (%d) exceeds max (%d)", + error_setg(errp, "SEV-SNP: CPUID entry count (%d) exceeds max (%d)", kvm_cpuid_info->cpuid.nent, SNP_CPUID_FUNCTION_MAXCOUNT); return -1; } @@ -1197,8 +1499,8 @@ sev_snp_cpuid_info_fill(SnpCpuidInfo *snp_cpuid_info, return 0; } -static int -snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, size_t cpuid_len) +static int snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, + size_t cpuid_len, Error **errp) { KvmCpuidInfo kvm_cpuid_info = {0}; SnpCpuidInfo snp_cpuid_info; @@ -1215,26 +1517,25 @@ snp_launch_update_cpuid(uint32_t cpuid_addr, void *hva, size_t cpuid_len) } while (ret == -E2BIG); if (ret) { - error_report("SEV-SNP: unable to query CPUID values for CPU: '%s'", - strerror(-ret)); - return 1; + error_setg(errp, "SEV-SNP: unable to query CPUID values for CPU: '%s'", + strerror(-ret)); + return -1; } - ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info); - if (ret) { - error_report("SEV-SNP: failed to generate CPUID table information"); - return 1; + ret = sev_snp_cpuid_info_fill(&snp_cpuid_info, &kvm_cpuid_info, errp); + if (ret < 0) { + return -1; } memcpy(hva, &snp_cpuid_info, sizeof(snp_cpuid_info)); return snp_launch_update_data(cpuid_addr, hva, cpuid_len, - KVM_SEV_SNP_PAGE_TYPE_CPUID); + KVM_SEV_SNP_PAGE_TYPE_CPUID, errp); } -static int -snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, uint32_t addr, - void *hva, uint32_t len) +static int snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, + uint32_t addr, void *hva, + uint32_t len, Error **errp) { int type = KVM_SEV_SNP_PAGE_TYPE_ZERO; if (sev_snp->parent_obj.kernel_hashes) { @@ -1246,7 +1547,7 @@ snp_launch_update_kernel_hashes(SevSnpGuestState *sev_snp, uint32_t addr, sizeof(*sev_snp->kernel_hashes_data)); type = KVM_SEV_SNP_PAGE_TYPE_NORMAL; } - return snp_launch_update_data(addr, hva, len, type); + return snp_launch_update_data(addr, hva, len, type, errp); } static int @@ -1284,12 +1585,14 @@ snp_populate_metadata_pages(SevSnpGuestState *sev_snp, } if (type == KVM_SEV_SNP_PAGE_TYPE_CPUID) { - ret = snp_launch_update_cpuid(desc->base, hva, desc->len); + ret = snp_launch_update_cpuid(desc->base, hva, desc->len, + &error_fatal); } else if (desc->type == SEV_DESC_TYPE_SNP_KERNEL_HASHES) { ret = snp_launch_update_kernel_hashes(sev_snp, desc->base, hva, - desc->len); + desc->len, &error_fatal); } else { - ret = snp_launch_update_data(desc->base, hva, desc->len, type); + ret = snp_launch_update_data(desc->base, hva, desc->len, type, + &error_fatal); } if (ret) { @@ -1311,18 +1614,26 @@ sev_snp_launch_finish(SevCommonState *sev_common) struct kvm_sev_snp_launch_finish *finish = &sev_snp->kvm_finish_conf; /* - * To boot the SNP guest, the hypervisor is required to populate the CPUID - * and Secrets page before finalizing the launch flow. The location of - * the secrets and CPUID page is available through the OVMF metadata GUID. + * Populate all the metadata pages if not using an IGVM file. In the case + * where an IGVM file is provided it will be used to configure the metadata + * pages directly. */ - metadata = pc_system_get_ovmf_sev_metadata_ptr(); - if (metadata == NULL) { - error_report("%s: Failed to locate SEV metadata header", __func__); - exit(1); - } + if (!X86_MACHINE(qdev_get_machine())->igvm) { + /* + * To boot the SNP guest, the hypervisor is required to populate the + * CPUID and Secrets page before finalizing the launch flow. The + * location of the secrets and CPUID page is available through the + * OVMF metadata GUID. + */ + metadata = pc_system_get_ovmf_sev_metadata_ptr(); + if (metadata == NULL) { + error_report("%s: Failed to locate SEV metadata header", __func__); + exit(1); + } - /* Populate all the metadata pages */ - snp_populate_metadata_pages(sev_snp, metadata); + /* Populate all the metadata pages */ + snp_populate_metadata_pages(sev_snp, metadata); + } QTAILQ_FOREACH(data, &launch_update, next) { ret = sev_snp_launch_update(sev_snp, data); @@ -1432,6 +1743,39 @@ static int sev_snp_kvm_type(X86ConfidentialGuest *cg) return KVM_X86_SNP_VM; } +static int sev_init_supported_features(ConfidentialGuestSupport *cgs, + SevCommonState *sev_common, Error **errp) +{ + X86ConfidentialGuestClass *x86_klass = + X86_CONFIDENTIAL_GUEST_GET_CLASS(cgs); + /* + * Older kernels do not support query or setting of sev_features. In this + * case the set of supported features must be zero to match the settings + * in the kernel. + */ + if (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common)) == + KVM_X86_DEFAULT_VM) { + sev_common->supported_sev_features = 0; + return 0; + } + + /* Query KVM for the supported set of sev_features */ + struct kvm_device_attr attr = { + .group = KVM_X86_GRP_SEV, + .attr = KVM_X86_SEV_VMSA_FEATURES, + .addr = (unsigned long)&sev_common->supported_sev_features, + }; + if (kvm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr) < 0) { + error_setg(errp, "%s: failed to query supported sev_features", + __func__); + return -1; + } + if (sev_snp_enabled()) { + sev_common->supported_sev_features |= SVM_SEV_FEAT_SNP_ACTIVE; + } + return 0; +} + static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { char *devname; @@ -1512,6 +1856,10 @@ static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) } } + if (sev_init_supported_features(cgs, sev_common, errp) < 0) { + return -1; + } + trace_kvm_sev_init(); switch (x86_klass->kvm_type(X86_CONFIDENTIAL_GUEST(sev_common))) { case KVM_X86_DEFAULT_VM: @@ -1523,6 +1871,40 @@ static int sev_common_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) case KVM_X86_SEV_ES_VM: case KVM_X86_SNP_VM: { struct kvm_sev_init args = { 0 }; + MachineState *machine = MACHINE(qdev_get_machine()); + X86MachineState *x86machine = X86_MACHINE(qdev_get_machine()); + + /* + * If configuration is provided via an IGVM file then the IGVM file + * might contain configuration of the initial vcpu context. For SEV + * the vcpu context includes the sev_features which should be applied + * to the vcpu. + * + * KVM does not synchronize sev_features from CPU state. Instead it + * requires sev_features to be provided as part of this initialization + * call which is subsequently automatically applied to the VMSA of + * each vcpu. + * + * The IGVM file is normally processed after initialization. Therefore + * we need to pre-process it here to extract sev_features in order to + * provide it to KVM_SEV_INIT2. Each cgs_* function that is called by + * the IGVM processor detects this pre-process by observing the state + * as SEV_STATE_UNINIT. + */ + if (x86machine->igvm) { + if (IGVM_CFG_GET_CLASS(x86machine->igvm) + ->process(x86machine->igvm, machine->cgs, true, errp) == + -1) { + return -1; + } + /* + * KVM maintains a bitmask of allowed sev_features. This does not + * include SVM_SEV_FEAT_SNP_ACTIVE which is set accordingly by KVM + * itself. Therefore we need to clear this flag. + */ + args.vmsa_features = sev_common->sev_features & + ~SVM_SEV_FEAT_SNP_ACTIVE; + } ret = sev_ioctl(sev_common->sev_fd, KVM_SEV_INIT2, &args, &fw_error); break; @@ -1622,9 +2004,8 @@ sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp) if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) { int ret; - ret = klass->launch_update_data(sev_common, gpa, ptr, len); + ret = klass->launch_update_data(sev_common, gpa, ptr, len, errp); if (ret < 0) { - error_setg(errp, "SEV: Failed to encrypt pflash rom"); return ret; } } @@ -1789,40 +2170,109 @@ sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size, return sev_es_parse_reset_block(info, addr); } -void sev_es_set_reset_vector(CPUState *cpu) + +static void seg_to_vmsa(const SegmentCache *cpu_seg, struct vmcb_seg *vmsa_seg) { - X86CPU *x86; - CPUX86State *env; - ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs; - SevCommonState *sev_common = SEV_COMMON( - object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON)); + vmsa_seg->selector = cpu_seg->selector; + vmsa_seg->base = cpu_seg->base; + vmsa_seg->limit = cpu_seg->limit; + vmsa_seg->attrib = FLAGS_SEGCACHE_TO_VMSA(cpu_seg->flags); +} - /* Only update if we have valid reset information */ - if (!sev_common || !sev_common->reset_data_valid) { - return; - } +static void initialize_vmsa(const CPUState *cpu, struct sev_es_save_area *vmsa) +{ + const X86CPU *x86 = X86_CPU(cpu); + const CPUX86State *env = &x86->env; - /* Do not update the BSP reset state */ - if (cpu->cpu_index == 0) { - return; + /* + * Initialize the SEV-ES save area from the current state of + * the CPU. The entire state does not need to be copied, only the state + * that is copied back to the CPUState in sev_apply_cpu_context. + */ + memset(vmsa, 0, sizeof(struct sev_es_save_area)); + vmsa->efer = env->efer; + vmsa->cr0 = env->cr[0]; + vmsa->cr3 = env->cr[3]; + vmsa->cr4 = env->cr[4]; + vmsa->xcr0 = env->xcr0; + vmsa->g_pat = env->pat; + + seg_to_vmsa(&env->segs[R_CS], &vmsa->cs); + seg_to_vmsa(&env->segs[R_DS], &vmsa->ds); + seg_to_vmsa(&env->segs[R_ES], &vmsa->es); + seg_to_vmsa(&env->segs[R_FS], &vmsa->fs); + seg_to_vmsa(&env->segs[R_GS], &vmsa->gs); + seg_to_vmsa(&env->segs[R_SS], &vmsa->ss); + + seg_to_vmsa(&env->gdt, &vmsa->gdtr); + seg_to_vmsa(&env->idt, &vmsa->idtr); + seg_to_vmsa(&env->ldt, &vmsa->ldtr); + seg_to_vmsa(&env->tr, &vmsa->tr); + + vmsa->dr6 = env->dr[6]; + vmsa->dr7 = env->dr[7]; + + vmsa->rax = env->regs[R_EAX]; + vmsa->rcx = env->regs[R_ECX]; + vmsa->rdx = env->regs[R_EDX]; + vmsa->rbx = env->regs[R_EBX]; + vmsa->rsp = env->regs[R_ESP]; + vmsa->rbp = env->regs[R_EBP]; + vmsa->rsi = env->regs[R_ESI]; + vmsa->rdi = env->regs[R_EDI]; + +#ifdef TARGET_X86_64 + vmsa->r8 = env->regs[R_R8]; + vmsa->r9 = env->regs[R_R9]; + vmsa->r10 = env->regs[R_R10]; + vmsa->r11 = env->regs[R_R11]; + vmsa->r12 = env->regs[R_R12]; + vmsa->r13 = env->regs[R_R13]; + vmsa->r14 = env->regs[R_R14]; + vmsa->r15 = env->regs[R_R15]; +#endif + + vmsa->rip = env->eip; + vmsa->rflags = env->eflags; +} + +static void sev_es_set_ap_context(uint32_t reset_addr) +{ + CPUState *cpu; + struct sev_es_save_area vmsa; + SegmentCache cs; + + cs.selector = 0xf000; + cs.base = reset_addr & 0xffff0000; + cs.limit = 0xffff; + cs.flags = DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | + DESC_A_MASK; + + CPU_FOREACH(cpu) { + if (cpu->cpu_index == 0) { + /* Do not update the BSP reset state */ + continue; + } + initialize_vmsa(cpu, &vmsa); + seg_to_vmsa(&cs, &vmsa.cs); + vmsa.rip = reset_addr & 0x0000ffff; + sev_set_cpu_context(cpu->cpu_index, &vmsa, + sizeof(struct sev_es_save_area), + 0, &error_fatal); } +} - x86 = X86_CPU(cpu); - env = &x86->env; - - cpu_x86_load_seg_cache(env, R_CS, 0xf000, sev_common->reset_cs, 0xffff, - DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | - DESC_R_MASK | DESC_A_MASK); - - env->eip = sev_common->reset_ip; +void sev_es_set_reset_vector(CPUState *cpu) +{ + if (sev_enabled()) { + sev_apply_cpu_context(cpu); + } } int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) { - CPUState *cpu; uint32_t addr; int ret; - SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); if (!sev_es_enabled()) { return 0; @@ -1835,14 +2285,12 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) return ret; } + /* + * The reset vector is saved into a CPU context for each AP but not for + * the BSP. This is applied during guest startup or when the CPU is reset. + */ if (addr) { - sev_common->reset_cs = addr & 0xffff0000; - sev_common->reset_ip = addr & 0x0000ffff; - sev_common->reset_data_valid = true; - - CPU_FOREACH(cpu) { - sev_es_set_reset_vector(cpu); - } + sev_es_set_ap_context(addr); } return 0; @@ -1883,7 +2331,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht, * be used. */ hashp = cmdline_hash; - if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data, + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->cmdline_data, ctx->cmdline_size, &hashp, &hash_len, errp) < 0) { return false; } @@ -1894,7 +2342,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht, * -initrd, an empty buffer will be used (ctx->initrd_size == 0). */ hashp = initrd_hash; - if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data, + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALGO_SHA256, ctx->initrd_data, ctx->initrd_size, &hashp, &hash_len, errp) < 0) { return false; } @@ -1906,7 +2354,7 @@ static bool build_kernel_loader_hashes(PaddedSevHashTable *padded_ht, { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size }, { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size } }; - if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov), + if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALGO_SHA256, iov, ARRAY_SIZE(iov), &hashp, &hash_len, errp) < 0) { return false; } @@ -2044,8 +2492,239 @@ static void sev_common_set_kernel_hashes(Object *obj, bool value, Error **errp) SEV_COMMON(obj)->kernel_hashes = value; } +static bool cgs_check_support(ConfidentialGuestPlatformType platform, + uint16_t platform_version, uint8_t highest_vtl, + uint64_t shared_gpa_boundary) +{ + return (((platform == CGS_PLATFORM_SEV_SNP) && sev_snp_enabled()) || + ((platform == CGS_PLATFORM_SEV_ES) && sev_es_enabled()) || + ((platform == CGS_PLATFORM_SEV) && sev_enabled())); +} + +static int cgs_set_guest_state(hwaddr gpa, uint8_t *ptr, uint64_t len, + ConfidentialGuestPageType memory_type, + uint16_t cpu_index, Error **errp) +{ + SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); + SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common); + + if (sev_common->state == SEV_STATE_UNINIT) { + /* Pre-processing of IGVM file called from sev_common_kvm_init() */ + if ((cpu_index == 0) && (memory_type == CGS_PAGE_TYPE_VMSA)) { + const struct sev_es_save_area *sa = + (const struct sev_es_save_area *)ptr; + if (len < sizeof(*sa)) { + error_setg(errp, "%s: invalid VMSA length encountered", + __func__); + return -1; + } + if (check_sev_features(sev_common, sa->sev_features, errp) < 0) { + return -1; + } + sev_common->sev_features = sa->sev_features; + } + return 0; + } + + if (!sev_enabled()) { + error_setg(errp, "%s: attempt to configure guest memory, but SEV " + "is not enabled", __func__); + return -1; + } + + switch (memory_type) { + case CGS_PAGE_TYPE_NORMAL: + case CGS_PAGE_TYPE_ZERO: + return klass->launch_update_data(sev_common, gpa, ptr, len, errp); + + case CGS_PAGE_TYPE_VMSA: + if (!sev_es_enabled()) { + error_setg(errp, + "%s: attempt to configure initial VMSA, but SEV-ES " + "is not supported", + __func__); + return -1; + } + if (check_vmsa_supported(sev_common, gpa, + (const struct sev_es_save_area *)ptr, + errp) < 0) { + return -1; + } + return sev_set_cpu_context(cpu_index, ptr, len, gpa, errp); + + case CGS_PAGE_TYPE_UNMEASURED: + if (sev_snp_enabled()) { + return snp_launch_update_data( + gpa, ptr, len, KVM_SEV_SNP_PAGE_TYPE_UNMEASURED, errp); + } + /* No action required if not SEV-SNP */ + return 0; + + case CGS_PAGE_TYPE_SECRETS: + if (!sev_snp_enabled()) { + error_setg(errp, + "%s: attempt to configure secrets page, but SEV-SNP " + "is not supported", + __func__); + return -1; + } + return snp_launch_update_data(gpa, ptr, len, + KVM_SEV_SNP_PAGE_TYPE_SECRETS, errp); + + case CGS_PAGE_TYPE_REQUIRED_MEMORY: + if (kvm_convert_memory(gpa, len, true) < 0) { + error_setg( + errp, + "%s: failed to configure required memory. gpa: %lX, type: %d", + __func__, gpa, memory_type); + return -1; + } + return 0; + + case CGS_PAGE_TYPE_CPUID: + if (!sev_snp_enabled()) { + error_setg(errp, + "%s: attempt to configure CPUID page, but SEV-SNP " + "is not supported", + __func__); + return -1; + } + return snp_launch_update_cpuid(gpa, ptr, len, errp); + } + error_setg(errp, "%s: failed to update guest. gpa: %lX, type: %d", __func__, + gpa, memory_type); + return -1; +} + +static int cgs_get_mem_map_entry(int index, + ConfidentialGuestMemoryMapEntry *entry, + Error **errp) +{ + struct e820_entry *table; + int num_entries; + + SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); + if (sev_common->state == SEV_STATE_UNINIT) { + /* Pre-processing of IGVM file called from sev_common_kvm_init() */ + return 1; + } + + num_entries = e820_get_table(&table); + if ((index < 0) || (index >= num_entries)) { + return 1; + } + entry->gpa = table[index].address; + entry->size = table[index].length; + switch (table[index].type) { + case E820_RAM: + entry->type = CGS_MEM_RAM; + break; + case E820_RESERVED: + entry->type = CGS_MEM_RESERVED; + break; + case E820_ACPI: + entry->type = CGS_MEM_ACPI; + break; + case E820_NVS: + entry->type = CGS_MEM_NVS; + break; + case E820_UNUSABLE: + entry->type = CGS_MEM_UNUSABLE; + break; + } + return 0; +} + +static int cgs_set_guest_policy(ConfidentialGuestPolicyType policy_type, + uint64_t policy, void *policy_data1, + uint32_t policy_data1_size, void *policy_data2, + uint32_t policy_data2_size, Error **errp) +{ + SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs); + if (sev_common->state == SEV_STATE_UNINIT) { + /* Pre-processing of IGVM file called from sev_common_kvm_init() */ + return 0; + } + + if (policy_type != GUEST_POLICY_SEV) { + error_setg(errp, "%s: Invalid guest policy type provided for SEV: %d", + __func__, policy_type); + return -1; + } + /* + * SEV-SNP handles policy differently. The policy flags are defined in + * kvm_start_conf.policy and an ID block and ID auth can be provided. + */ + if (sev_snp_enabled()) { + SevSnpGuestState *sev_snp_guest = + SEV_SNP_GUEST(MACHINE(qdev_get_machine())->cgs); + struct kvm_sev_snp_launch_finish *finish = + &sev_snp_guest->kvm_finish_conf; + + /* + * The policy consists of flags in 'policy' and optionally an ID block + * and ID auth in policy_data1 and policy_data2 respectively. The ID + * block and auth are optional so clear any previous ID block and auth + * and set them if provided, but always set the policy flags. + */ + g_free(sev_snp_guest->id_block); + g_free((guchar *)finish->id_block_uaddr); + g_free(sev_snp_guest->id_auth); + g_free((guchar *)finish->id_auth_uaddr); + sev_snp_guest->id_block = NULL; + finish->id_block_uaddr = 0; + sev_snp_guest->id_auth = NULL; + finish->id_auth_uaddr = 0; + + if (policy_data1_size > 0) { + struct sev_snp_id_authentication *id_auth = + (struct sev_snp_id_authentication *)policy_data2; + + if (policy_data1_size != KVM_SEV_SNP_ID_BLOCK_SIZE) { + error_setg(errp, "%s: Invalid SEV-SNP ID block: incorrect size", + __func__); + return -1; + } + if (policy_data2_size != KVM_SEV_SNP_ID_AUTH_SIZE) { + error_setg(errp, + "%s: Invalid SEV-SNP ID auth block: incorrect size", + __func__); + return -1; + } + assert(policy_data1 != NULL); + assert(policy_data2 != NULL); + + finish->id_block_uaddr = + (__u64)g_memdup2(policy_data1, KVM_SEV_SNP_ID_BLOCK_SIZE); + finish->id_auth_uaddr = + (__u64)g_memdup2(policy_data2, KVM_SEV_SNP_ID_AUTH_SIZE); + + /* + * Check if an author key has been provided and use that to flag + * whether the author key is enabled. The first of the author key + * must be non-zero to indicate the key type, which will currently + * always be 2. + */ + sev_snp_guest->kvm_finish_conf.auth_key_en = + id_auth->author_key[0] ? 1 : 0; + finish->id_block_en = 1; + } + sev_snp_guest->kvm_start_conf.policy = policy; + } else { + SevGuestState *sev_guest = SEV_GUEST(MACHINE(qdev_get_machine())->cgs); + /* Only the policy flags are supported for SEV and SEV-ES */ + if ((policy_data1_size > 0) || (policy_data2_size > 0) || !sev_guest) { + error_setg(errp, "%s: An ID block/ID auth block has been provided " + "but SEV-SNP is not enabled", __func__); + return -1; + } + sev_guest->policy = policy; + } + return 0; +} + static void -sev_common_class_init(ObjectClass *oc, void *data) +sev_common_class_init(ObjectClass *oc, const void *data) { ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); @@ -2067,6 +2746,8 @@ static void sev_common_instance_init(Object *obj) { SevCommonState *sev_common = SEV_COMMON(obj); + ConfidentialGuestSupportClass *cgs = + CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(obj); sev_common->kvm_type = -1; @@ -2077,6 +2758,12 @@ sev_common_instance_init(Object *obj) object_property_add_uint32_ptr(obj, "reduced-phys-bits", &sev_common->reduced_phys_bits, OBJ_PROP_FLAG_READWRITE); + cgs->check_support = cgs_check_support; + cgs->set_guest_state = cgs_set_guest_state; + cgs->get_mem_map_entry = cgs_get_mem_map_entry; + cgs->set_guest_policy = cgs_set_guest_policy; + + QTAILQ_INIT(&sev_common->launch_vmsa); } /* sev guest info common to sev/sev-es/sev-snp */ @@ -2088,7 +2775,7 @@ static const TypeInfo sev_common_info = { .class_size = sizeof(SevCommonStateClass), .class_init = sev_common_class_init, .abstract = true, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } @@ -2140,7 +2827,7 @@ static void sev_guest_set_legacy_vm_type(Object *obj, Visitor *v, } static void -sev_guest_class_init(ObjectClass *oc, void *data) +sev_guest_class_init(ObjectClass *oc, const void *data) { SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); @@ -2394,7 +3081,7 @@ sev_snp_guest_set_host_data(Object *obj, const char *value, Error **errp) } static void -sev_snp_guest_class_init(ObjectClass *oc, void *data) +sev_snp_guest_class_init(ObjectClass *oc, const void *data) { SevCommonStateClass *klass = SEV_COMMON_CLASS(oc); X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); @@ -2404,7 +3091,7 @@ sev_snp_guest_class_init(ObjectClass *oc, void *data) klass->launch_finish = sev_snp_launch_finish; klass->launch_update_data = sev_snp_launch_update_data; klass->kvm_init = sev_snp_kvm_init; - x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features; + x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features; x86_klass->kvm_type = sev_snp_kvm_type; object_class_property_add(oc, "policy", "uint64", @@ -2422,7 +3109,7 @@ sev_snp_guest_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "author-key-enabled", sev_snp_guest_get_author_key_enabled, sev_snp_guest_set_author_key_enabled); - object_class_property_add_bool(oc, "vcek-required", + object_class_property_add_bool(oc, "vcek-disabled", sev_snp_guest_get_vcek_disabled, sev_snp_guest_set_vcek_disabled); object_class_property_add_str(oc, "host-data", diff --git a/target/i386/sev.h b/target/i386/sev.h index 858005a119c..9db1a802f6b 100644 --- a/target/i386/sev.h +++ b/target/i386/sev.h @@ -18,7 +18,17 @@ #include CONFIG_DEVICES /* CONFIG_SEV */ #endif -#include "exec/confidential-guest-support.h" +#if !defined(CONFIG_SEV) || defined(CONFIG_USER_ONLY) +#define sev_enabled() 0 +#define sev_es_enabled() 0 +#define sev_snp_enabled() 0 +#else +bool sev_enabled(void); +bool sev_es_enabled(void); +bool sev_snp_enabled(void); +#endif + +#if !defined(CONFIG_USER_ONLY) #define TYPE_SEV_COMMON "sev-common" #define TYPE_SEV_GUEST "sev-guest" @@ -34,6 +44,8 @@ #define SEV_SNP_POLICY_SMT 0x10000 #define SEV_SNP_POLICY_DBG 0x80000 +#define SVM_SEV_FEAT_SNP_ACTIVE 1 + typedef struct SevKernelLoaderContext { char *setup_data; size_t setup_size; @@ -45,18 +57,128 @@ typedef struct SevKernelLoaderContext { size_t cmdline_size; } SevKernelLoaderContext; -#ifdef CONFIG_SEV -bool sev_enabled(void); -bool sev_es_enabled(void); -bool sev_snp_enabled(void); -#else -#define sev_enabled() 0 -#define sev_es_enabled() 0 -#define sev_snp_enabled() 0 -#endif +/* Save area definition for SEV-ES and SEV-SNP guests */ +struct QEMU_PACKED sev_es_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + uint64_t vmpl0_ssp; + uint64_t vmpl1_ssp; + uint64_t vmpl2_ssp; + uint64_t vmpl3_ssp; + uint64_t u_cet; + uint8_t reserved_0xc8[2]; + uint8_t vmpl; + uint8_t cpl; + uint8_t reserved_0xcc[4]; + uint64_t efer; + uint8_t reserved_0xd8[104]; + uint64_t xss; + uint64_t cr4; + uint64_t cr3; + uint64_t cr0; + uint64_t dr7; + uint64_t dr6; + uint64_t rflags; + uint64_t rip; + uint64_t dr0; + uint64_t dr1; + uint64_t dr2; + uint64_t dr3; + uint64_t dr0_addr_mask; + uint64_t dr1_addr_mask; + uint64_t dr2_addr_mask; + uint64_t dr3_addr_mask; + uint8_t reserved_0x1c0[24]; + uint64_t rsp; + uint64_t s_cet; + uint64_t ssp; + uint64_t isst_addr; + uint64_t rax; + uint64_t star; + uint64_t lstar; + uint64_t cstar; + uint64_t sfmask; + uint64_t kernel_gs_base; + uint64_t sysenter_cs; + uint64_t sysenter_esp; + uint64_t sysenter_eip; + uint64_t cr2; + uint8_t reserved_0x248[32]; + uint64_t g_pat; + uint64_t dbgctl; + uint64_t br_from; + uint64_t br_to; + uint64_t last_excp_from; + uint64_t last_excp_to; + uint8_t reserved_0x298[80]; + uint32_t pkru; + uint32_t tsc_aux; + uint8_t reserved_0x2f0[24]; + uint64_t rcx; + uint64_t rdx; + uint64_t rbx; + uint64_t reserved_0x320; /* rsp already available at 0x01d8 */ + uint64_t rbp; + uint64_t rsi; + uint64_t rdi; + uint64_t r8; + uint64_t r9; + uint64_t r10; + uint64_t r11; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + uint8_t reserved_0x380[16]; + uint64_t guest_exit_info_1; + uint64_t guest_exit_info_2; + uint64_t guest_exit_int_info; + uint64_t guest_nrip; + uint64_t sev_features; + uint64_t vintr_ctrl; + uint64_t guest_exit_code; + uint64_t virtual_tom; + uint64_t tlb_id; + uint64_t pcpu_id; + uint64_t event_inj; + uint64_t xcr0; + uint8_t reserved_0x3f0[16]; + + /* Floating point area */ + uint64_t x87_dp; + uint32_t mxcsr; + uint16_t x87_ftw; + uint16_t x87_fsw; + uint16_t x87_fcw; + uint16_t x87_fop; + uint16_t x87_ds; + uint16_t x87_cs; + uint64_t x87_rip; + uint8_t fpreg_x87[80]; + uint8_t fpreg_xmm[256]; + uint8_t fpreg_ymm[256]; +}; + +struct QEMU_PACKED sev_snp_id_authentication { + uint32_t id_key_alg; + uint32_t auth_key_algo; + uint8_t reserved[56]; + uint8_t id_block_sig[512]; + uint8_t id_key[1028]; + uint8_t reserved2[60]; + uint8_t id_key_sig[512]; + uint8_t author_key[1028]; + uint8_t reserved3[892]; +}; -uint32_t sev_get_cbit_position(void); -uint32_t sev_get_reduced_phys_bits(void); bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp); int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp); @@ -68,4 +190,9 @@ void sev_es_set_reset_vector(CPUState *cpu); void pc_system_parse_sev_metadata(uint8_t *flash_ptr, size_t flash_size); +#endif /* !CONFIG_USER_ONLY */ + +uint32_t sev_get_cbit_position(void); +uint32_t sev_get_reduced_phys_bits(void); + #endif diff --git a/target/i386/tcg/access.c b/target/i386/tcg/access.c index 56a1181ea50..97e3f0e7568 100644 --- a/target/i386/tcg/access.c +++ b/target/i386/tcg/access.c @@ -3,8 +3,9 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" +#include "exec/target_page.h" #include "access.h" @@ -58,6 +59,11 @@ static void *access_ptr(X86Access *ac, vaddr addr, unsigned len) assert(addr >= ac->vaddr); + /* No haddr means probe_access wants to force slow path */ + if (!ac->haddr1) { + return NULL; + } + #ifdef CONFIG_USER_ONLY assert(offset <= ac->size1 - len); return ac->haddr1 + offset; @@ -78,17 +84,11 @@ static void *access_ptr(X86Access *ac, vaddr addr, unsigned len) #endif } -#ifdef CONFIG_USER_ONLY -# define test_ptr(p) true -#else -# define test_ptr(p) likely(p) -#endif - uint8_t access_ldb(X86Access *ac, vaddr addr) { void *p = access_ptr(ac, addr, sizeof(uint8_t)); - if (test_ptr(p)) { + if (likely(p)) { return ldub_p(p); } return cpu_ldub_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra); @@ -98,7 +98,7 @@ uint16_t access_ldw(X86Access *ac, vaddr addr) { void *p = access_ptr(ac, addr, sizeof(uint16_t)); - if (test_ptr(p)) { + if (likely(p)) { return lduw_le_p(p); } return cpu_lduw_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra); @@ -108,7 +108,7 @@ uint32_t access_ldl(X86Access *ac, vaddr addr) { void *p = access_ptr(ac, addr, sizeof(uint32_t)); - if (test_ptr(p)) { + if (likely(p)) { return ldl_le_p(p); } return cpu_ldl_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra); @@ -118,7 +118,7 @@ uint64_t access_ldq(X86Access *ac, vaddr addr) { void *p = access_ptr(ac, addr, sizeof(uint64_t)); - if (test_ptr(p)) { + if (likely(p)) { return ldq_le_p(p); } return cpu_ldq_le_mmuidx_ra(ac->env, addr, ac->mmu_idx, ac->ra); @@ -128,7 +128,7 @@ void access_stb(X86Access *ac, vaddr addr, uint8_t val) { void *p = access_ptr(ac, addr, sizeof(uint8_t)); - if (test_ptr(p)) { + if (likely(p)) { stb_p(p, val); } else { cpu_stb_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra); @@ -139,7 +139,7 @@ void access_stw(X86Access *ac, vaddr addr, uint16_t val) { void *p = access_ptr(ac, addr, sizeof(uint16_t)); - if (test_ptr(p)) { + if (likely(p)) { stw_le_p(p, val); } else { cpu_stw_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra); @@ -150,7 +150,7 @@ void access_stl(X86Access *ac, vaddr addr, uint32_t val) { void *p = access_ptr(ac, addr, sizeof(uint32_t)); - if (test_ptr(p)) { + if (likely(p)) { stl_le_p(p, val); } else { cpu_stl_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra); @@ -161,7 +161,7 @@ void access_stq(X86Access *ac, vaddr addr, uint64_t val) { void *p = access_ptr(ac, addr, sizeof(uint64_t)); - if (test_ptr(p)) { + if (likely(p)) { stq_le_p(p, val); } else { cpu_stq_le_mmuidx_ra(ac->env, addr, val, ac->mmu_idx, ac->ra); diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c index 301ed954064..f1940b40927 100644 --- a/target/i386/tcg/cc_helper.c +++ b/target/i386/tcg/cc_helper.c @@ -22,41 +22,6 @@ #include "exec/helper-proto.h" #include "helper-tcg.h" -const uint8_t parity_table[256] = { - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, - 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, -}; - #define SHIFT 0 #include "cc_helper_template.h.inc" #undef SHIFT @@ -95,6 +60,19 @@ static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1, return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O); } +target_ulong helper_cc_compute_nz(target_ulong dst, target_ulong src1, + int op) +{ + if (CC_OP_HAS_EFLAGS(op)) { + return ~src1 & CC_Z; + } else { + MemOp size = cc_op_size(op); + target_ulong mask = MAKE_64BIT_MASK(0, 8 << size); + + return dst & mask; + } +} + target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, target_ulong src2, int op) { @@ -104,8 +82,6 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, case CC_OP_EFLAGS: return src1; - case CC_OP_CLR: - return CC_Z | CC_P; case CC_OP_POPCNT: return dst ? 0 : CC_Z; @@ -186,6 +162,13 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, case CC_OP_BMILGL: return compute_all_bmilgl(dst, src1); + case CC_OP_BLSIB: + return compute_all_blsib(dst, src1); + case CC_OP_BLSIW: + return compute_all_blsiw(dst, src1); + case CC_OP_BLSIL: + return compute_all_blsil(dst, src1); + case CC_OP_ADCX: return compute_all_adcx(dst, src1, src2); case CC_OP_ADOX: @@ -216,6 +199,8 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, return compute_all_sarq(dst, src1); case CC_OP_BMILGQ: return compute_all_bmilgq(dst, src1); + case CC_OP_BLSIQ: + return compute_all_blsiq(dst, src1); #endif } } @@ -234,7 +219,6 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: - case CC_OP_CLR: case CC_OP_POPCNT: return 0; @@ -308,6 +292,13 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, case CC_OP_BMILGL: return compute_c_bmilgl(dst, src1); + case CC_OP_BLSIB: + return compute_c_blsib(dst, src1); + case CC_OP_BLSIW: + return compute_c_blsiw(dst, src1); + case CC_OP_BLSIL: + return compute_c_blsil(dst, src1); + #ifdef TARGET_X86_64 case CC_OP_ADDQ: return compute_c_addq(dst, src1); @@ -321,6 +312,8 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, return compute_c_shlq(dst, src1); case CC_OP_BMILGQ: return compute_c_bmilgq(dst, src1); + case CC_OP_BLSIQ: + return compute_c_blsiq(dst, src1); #endif } } diff --git a/target/i386/tcg/cc_helper_template.h.inc b/target/i386/tcg/cc_helper_template.h.inc index bb611feb048..d8fd976ca15 100644 --- a/target/i386/tcg/cc_helper_template.h.inc +++ b/target/i386/tcg/cc_helper_template.h.inc @@ -22,12 +22,17 @@ #if DATA_BITS == 8 #define SUFFIX b #define DATA_TYPE uint8_t +#define WIDER_TYPE uint32_t #elif DATA_BITS == 16 #define SUFFIX w #define DATA_TYPE uint16_t +#define WIDER_TYPE uint32_t #elif DATA_BITS == 32 #define SUFFIX l #define DATA_TYPE uint32_t +#if HOST_LONG_BITS >= 64 +#define WIDER_TYPE uint64_t +#endif #elif DATA_BITS == 64 #define SUFFIX q #define DATA_TYPE uint64_t @@ -39,18 +44,32 @@ /* dynamic flags computation */ -static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_cout, SUFFIX)(DATA_TYPE dst, DATA_TYPE carries) { - int cf, pf, af, zf, sf, of; - DATA_TYPE src2 = dst - src1; + uint32_t af_cf, pf, zf, sf, of; - cf = dst < src1; - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & CC_A; + /* PF, ZF, SF computed from result. */ + pf = compute_pf(dst); zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; - of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + + /* + * AF, CF, OF computed from carry out vector. To compute AF and CF, rotate it + * left by one so cout(DATA_BITS - 1) is in bit 0 and cout(3) in bit 4. + * + * To compute OF, place the highest two carry bits into OF and the bit + * immediately to the right of it; then, adding CC_O / 2 XORs them. + */ + af_cf = ((carries << 1) | (carries >> (DATA_BITS - 1))) & (CC_A | CC_C); + of = (lshift(carries, 12 - DATA_BITS) + CC_O / 2) & CC_O; + return pf + zf + sf + af_cf + of; +} + +static uint32_t glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + DATA_TYPE src2 = dst - src1; + DATA_TYPE carries = ADD_COUT_VEC(src1, src2, dst); + return glue(compute_all_cout, SUFFIX)(dst, carries); } static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) @@ -58,39 +77,31 @@ static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) return dst < src1; } -static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, +static uint32_t glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, DATA_TYPE src3) { - int cf, pf, af, zf, sf, of; DATA_TYPE src2 = dst - src1 - src3; - - cf = (src3 ? dst <= src1 : dst < src1); - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & 0x10; - zf = (dst == 0) << 6; - sf = lshift(dst, 8 - DATA_BITS) & 0x80; - of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + DATA_TYPE carries = ADD_COUT_VEC(src1, src2, dst); + return glue(compute_all_cout, SUFFIX)(dst, carries); } static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, DATA_TYPE src3) { +#ifdef WIDER_TYPE + WIDER_TYPE src13 = (WIDER_TYPE) src1 + (WIDER_TYPE) src3; + + return dst < src13; +#else return src3 ? dst <= src1 : dst < src1; +#endif } -static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) +static uint32_t glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) { - int cf, pf, af, zf, sf, of; DATA_TYPE src1 = dst + src2; - - cf = src1 < src2; - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & CC_A; - zf = (dst == 0) * CC_Z; - sf = lshift(dst, 8 - DATA_BITS) & CC_S; - of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + DATA_TYPE carries = SUB_COUT_VEC(src1, src2, dst); + return glue(compute_all_cout, SUFFIX)(dst, carries); } static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) @@ -100,86 +111,80 @@ static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) return src1 < src2; } -static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, +static uint32_t glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, DATA_TYPE src3) { - int cf, pf, af, zf, sf, of; DATA_TYPE src1 = dst + src2 + src3; - - cf = (src3 ? src1 <= src2 : src1 < src2); - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & 0x10; - zf = (dst == 0) << 6; - sf = lshift(dst, 8 - DATA_BITS) & 0x80; - of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + DATA_TYPE carries = SUB_COUT_VEC(src1, src2, dst); + return glue(compute_all_cout, SUFFIX)(dst, carries); } static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, DATA_TYPE src3) { +#ifdef WIDER_TYPE + WIDER_TYPE src23 = (WIDER_TYPE) src2 + (WIDER_TYPE) src3; + DATA_TYPE src1 = dst + src23; + + return src1 < src23; +#else DATA_TYPE src1 = dst + src2 + src3; return (src3 ? src1 <= src2 : src1 < src2); +#endif } -static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; + uint32_t cf, pf, af, zf, sf, of; cf = 0; - pf = parity_table[(uint8_t)dst]; + pf = compute_pf(dst); af = 0; zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = 0; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } -static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; - DATA_TYPE src2; + uint32_t cf, pf, af, zf, sf, of; cf = src1; - src1 = dst - 1; - src2 = 1; - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & CC_A; + pf = compute_pf(dst); + af = (dst ^ (dst - 1)) & CC_A; /* bits 0..3 are all clear */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = (dst == SIGN_MASK) * CC_O; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } -static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; - DATA_TYPE src2; + uint32_t cf, pf, af, zf, sf, of; cf = src1; - src1 = dst + 1; - src2 = 1; - pf = parity_table[(uint8_t)dst]; - af = (dst ^ src1 ^ src2) & CC_A; + pf = compute_pf(dst); + af = (dst ^ (dst + 1)) & CC_A; /* bits 0..3 are all set */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = (dst == SIGN_MASK - 1) * CC_O; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } -static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; + uint32_t cf, pf, af, zf, sf, of; cf = (src1 >> (DATA_BITS - 1)) & CC_C; - pf = parity_table[(uint8_t)dst]; + pf = compute_pf(dst); af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; /* of is defined iff shift count == 1 */ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) @@ -187,39 +192,39 @@ static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) return (src1 >> (DATA_BITS - 1)) & CC_C; } -static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; + uint32_t cf, pf, af, zf, sf, of; cf = src1 & 1; - pf = parity_table[(uint8_t)dst]; + pf = compute_pf(dst); af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; /* of is defined iff shift count == 1 */ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } /* NOTE: we compute the flags like the P4. On olders CPUs, only OF and CF are modified and it is slower to do that. Note as well that we don't truncate SRC1 for computing carry to DATA_TYPE. */ -static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1) +static uint32_t glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1) { - int cf, pf, af, zf, sf, of; + uint32_t cf, pf, af, zf, sf, of; cf = (src1 != 0); - pf = parity_table[(uint8_t)dst]; + pf = compute_pf(dst); af = 0; /* undefined */ zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = cf * CC_O; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } -static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +static uint32_t glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) { - int cf, pf, af, zf, sf, of; + uint32_t cf, pf, af, zf, sf, of; cf = (src1 == 0); pf = 0; /* undefined */ @@ -227,7 +232,7 @@ static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) zf = (dst == 0) * CC_Z; sf = lshift(dst, 8 - DATA_BITS) & CC_S; of = 0; - return cf | pf | af | zf | sf | of; + return cf + pf + af + zf + sf + of; } static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) @@ -235,8 +240,26 @@ static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) return src1 == 0; } +static int glue(compute_all_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + uint32_t cf, pf, af, zf, sf, of; + + cf = (src1 != 0); + pf = 0; /* undefined */ + af = 0; /* undefined */ + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = 0; + return cf + pf + af + zf + sf + of; +} + +static int glue(compute_c_blsi, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + return src1 != 0; +} + #undef DATA_BITS #undef SIGN_MASK #undef DATA_TYPE -#undef DATA_MASK #undef SUFFIX +#undef WIDER_TYPE diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index d2da1d396d5..51038657f0f 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -129,6 +129,37 @@ * * (^) these are the two cases in which Intel and AMD disagree on the * primary exception class + * + * Instructions still in translate.c + * --------------------------------- + * Generation of TCG opcodes for almost all instructions is in emit.c.inc; + * this file interprets the prefixes and opcode bytes down to individual + * instruction mnemonics. There is only a handful of opcodes still using + * a switch statement to decode modrm bits 3-5 and prefixes after decoding + * is complete; these are relics of the older x86 decoder and their code + * generation is performed in translate.c. + * + * These unconverted opcodes also perform their own effective address + * generation using the gen_lea_modrm() function. + * + * There is nothing particularly complicated about them; simply, they don't + * need any nasty hacks in the decoder, and they shouldn't get in the way + * of the implementation of new x86 instructions, so they are left alone + * for the time being. + * + * x87: + * 0xD8 - 0xDF + * + * privileged/system: + * 0x0F 0x00 group 6 (SLDT, STR, LLDT, LTR, VERR, VERW) + * 0x0F 0x01 group 7 (SGDT, SIDT, LGDT, LIDT, SMSW, LMSW, INVLPG, + * MONITOR, MWAIT, CLAC, STAC, XGETBV, XSETBV, + * SWAPGS, RDTSCP) + * 0x0F 0xC7 (reg operand) group 9 (RDRAND, RDSEED, RDPID) + * + * MPX: + * 0x0F 0x1A BNDLDX, BNDMOV, BNDCL, BNDCU + * 0x0F 0x1B BNDSTX, BNDMOV, BNDMK, BNDCN */ #define X86_OP_NONE { 0 }, @@ -205,6 +236,7 @@ #define sextT0 .special = X86_SPECIAL_SExtT0, #define zextT0 .special = X86_SPECIAL_ZExtT0, #define op0_Mw .special = X86_SPECIAL_Op0_Mw, +#define btEvGv .special = X86_SPECIAL_BitTest, #define vex1 .vex_class = 1, #define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar, @@ -269,6 +301,43 @@ static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEnt } } +static void decode_group8(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86GenFunc group8_gen[8] = { + NULL, NULL, NULL, NULL, + gen_BT, gen_BTS, gen_BTR, gen_BTC, + }; + int op = (get_modrm(s, env) >> 3) & 7; + entry->gen = group8_gen[op]; + if (op == 4) { + /* prevent writeback and LOCK for BT */ + entry->op1 = entry->op0; + entry->op0 = X86_TYPE_None; + entry->s0 = X86_SIZE_None; + } else { + entry->special = X86_SPECIAL_HasLock; + } +} + +static void decode_group9(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry group9_reg = + X86_OP_ENTRY0(multi0F); /* unconverted */ + static const X86OpEntry cmpxchg8b = + X86_OP_ENTRY1(CMPXCHG8B, M,q, lock p_00 cpuid(CX8)); + static const X86OpEntry cmpxchg16b = + X86_OP_ENTRY1(CMPXCHG16B, M,dq, lock p_00 cpuid(CX16)); + + int modrm = get_modrm(s, env); + int op = (modrm >> 3) & 7; + + if ((modrm >> 6) == 3) { + *entry = group9_reg; + } else if (op == 1) { + *entry = REX_W(s) ? cmpxchg16b : cmpxchg8b; + } +} + static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) { static const X86OpEntry group15_reg[8] = { @@ -276,9 +345,9 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, [1] = X86_OP_ENTRYw(RDxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3), [2] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0), [3] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0), - [5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE2) p_00), + [5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE) p_00), [6] = X86_OP_ENTRY0(MFENCE, cpuid(SSE2) p_00), - [7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE2) p_00), + [7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE) p_00), }; static const X86OpEntry group15_mem[8] = { @@ -809,10 +878,10 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), + [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,dq, vex6 chk(W0) cpuid(AVX) p_66), [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX) p_66), - [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), + [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,dq, vex6 chk(W0) cpuid(AVX2) p_66), [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX2) p_66), /* Listed incorrectly as type 4 */ @@ -1073,6 +1142,8 @@ static void decode_MOV_CR_DR(DisasContext *s, CPUX86State *env, X86OpEntry *entr } static const X86OpEntry opcodes_0F[256] = { + [0x00] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted */ + [0x01] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted */ [0x02] = X86_OP_ENTRYwr(LAR, G,v, E,w, chk(prot)), [0x03] = X86_OP_ENTRYwr(LSL, G,v, E,w, chk(prot)), [0x05] = X86_OP_ENTRY0(SYSCALL, chk(o64_intel)), @@ -1162,12 +1233,14 @@ static const X86OpEntry opcodes_0F[256] = { [0xa0] = X86_OP_ENTRYr(PUSH, FS, w), [0xa1] = X86_OP_ENTRYw(POP, FS, w), [0xa2] = X86_OP_ENTRY0(CPUID), + [0xa3] = X86_OP_ENTRYrr(BT, E,v, G,v, btEvGv), [0xa4] = X86_OP_ENTRY4(SHLD, E,v, 2op,v, G,v), [0xa5] = X86_OP_ENTRY3(SHLD, E,v, 2op,v, G,v), [0xb0] = X86_OP_ENTRY2(CMPXCHG,E,b, G,b, lock), [0xb1] = X86_OP_ENTRY2(CMPXCHG,E,v, G,v, lock), [0xb2] = X86_OP_ENTRY3(LSS, G,v, EM,p, None, None), + [0xb3] = X86_OP_ENTRY2(BTR, E,v, G,v, btEvGv), [0xb4] = X86_OP_ENTRY3(LFS, G,v, EM,p, None, None), [0xb5] = X86_OP_ENTRY3(LGS, G,v, EM,p, None, None), [0xb6] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, zextT0), /* MOVZX */ @@ -1180,6 +1253,7 @@ static const X86OpEntry opcodes_0F[256] = { [0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66), [0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66), [0xc6] = X86_OP_ENTRY4(VSHUF, V,x, H,x, W,x, vex4 p_00_66), + [0xc7] = X86_OP_GROUP0(group9), [0xd0] = X86_OP_ENTRY3(VADDSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), [0xd1] = X86_OP_ENTRY3(PSRLW_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), @@ -1222,6 +1296,8 @@ static const X86OpEntry opcodes_0F[256] = { [0x18] = X86_OP_ENTRY1(NOP, nop,v), /* prefetch/reserved NOP */ [0x19] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */ + [0x1a] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted MPX */ + [0x1b] = X86_OP_ENTRY1(multi0F, nop,v, nolea), /* unconverted MPX */ [0x1c] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */ [0x1d] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */ [0x1e] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */ @@ -1294,6 +1370,7 @@ static const X86OpEntry opcodes_0F[256] = { [0xa8] = X86_OP_ENTRYr(PUSH, GS, w), [0xa9] = X86_OP_ENTRYw(POP, GS, w), [0xaa] = X86_OP_ENTRY0(RSM, chk(smm) svm(RSM)), + [0xab] = X86_OP_ENTRY2(BTS, E,v, G,v, btEvGv), [0xac] = X86_OP_ENTRY4(SHRD, E,v, 2op,v, G,v), [0xad] = X86_OP_ENTRY3(SHRD, E,v, 2op,v, G,v), [0xae] = X86_OP_GROUP0(group15), @@ -1306,6 +1383,8 @@ static const X86OpEntry opcodes_0F[256] = { [0xb8] = X86_OP_GROUP0(0FB8), /* decoded as modrm, which is visible as a difference between page fault and #UD */ [0xb9] = X86_OP_ENTRYr(UD, nop,v), /* UD1 */ + [0xba] = X86_OP_GROUP2(group8, E,v, I,b), + [0xbb] = X86_OP_ENTRY2(BTC, E,v, G,v, btEvGv), [0xbc] = X86_OP_GROUP0(0FBC), [0xbd] = X86_OP_GROUP0(0FBD), [0xbe] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, sextT0), /* MOVSX */ @@ -1627,9 +1706,9 @@ static const X86OpEntry opcodes_root[256] = { [0xE2] = X86_OP_ENTRYr(LOOP, J,b), /* implicit: CX with aflag size */ [0xE3] = X86_OP_ENTRYr(JCXZ, J,b), /* implicit: CX with aflag size */ [0xE4] = X86_OP_ENTRYwr(IN, 0,b, I_unsigned,b), /* AL */ - [0xE5] = X86_OP_ENTRYwr(IN, 0,v, I_unsigned,b), /* AX/EAX */ + [0xE5] = X86_OP_ENTRYwr(IN, 0,z, I_unsigned,b), /* AX/EAX */ [0xE6] = X86_OP_ENTRYrr(OUT, 0,b, I_unsigned,b), /* AL */ - [0xE7] = X86_OP_ENTRYrr(OUT, 0,v, I_unsigned,b), /* AX/EAX */ + [0xE7] = X86_OP_ENTRYrr(OUT, 0,z, I_unsigned,b), /* AX/EAX */ [0xF1] = X86_OP_ENTRY0(INT1, svm(ICEBP)), [0xF4] = X86_OP_ENTRY0(HLT, chk(cpl0) svm(HLT)), @@ -1756,14 +1835,27 @@ static const X86OpEntry opcodes_root[256] = { [0xCE] = X86_OP_ENTRY0(INTO), [0xCF] = X86_OP_ENTRY0(IRET, chk(vm86_iopl) svm(IRET)), + /* + * x87 is nolea because it needs the address without segment base, + * in order to store it in fdp. + */ + [0xD8] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xD9] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDA] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDB] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDC] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDD] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDE] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xDF] = X86_OP_ENTRY1(x87, nop,v, nolea), + [0xE8] = X86_OP_ENTRYr(CALL, J,z_f64), [0xE9] = X86_OP_ENTRYr(JMP, J,z_f64), [0xEA] = X86_OP_ENTRYrr(JMPF, I_unsigned,p, I_unsigned,w, chk(i64)), [0xEB] = X86_OP_ENTRYr(JMP, J,b), [0xEC] = X86_OP_ENTRYwr(IN, 0,b, 2,w), /* AL, DX */ - [0xED] = X86_OP_ENTRYwr(IN, 0,v, 2,w), /* AX/EAX, DX */ + [0xED] = X86_OP_ENTRYwr(IN, 0,z, 2,w), /* AX/EAX, DX */ [0xEE] = X86_OP_ENTRYrr(OUT, 0,b, 2,w), /* DX, AL */ - [0xEF] = X86_OP_ENTRYrr(OUT, 0,v, 2,w), /* DX, AX/EAX */ + [0xEF] = X86_OP_ENTRYrr(OUT, 0,z, 2,w), /* DX, AX/EAX */ [0xF8] = X86_OP_ENTRY0(CLC), [0xF9] = X86_OP_ENTRY0(STC), @@ -1799,19 +1891,20 @@ static void decode_root(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui } -static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, - X86DecodedOp *op, X86OpType type) +static int decode_modrm(DisasContext *s, CPUX86State *env, + X86DecodedInsn *decode, X86DecodedOp *op) { int modrm = get_modrm(s, env); if ((modrm >> 6) == 3) { op->n = (modrm & 7); - if (type != X86_TYPE_Q && type != X86_TYPE_N) { + if (op->unit != X86_OP_MMX) { op->n |= REX_B(s); } } else { op->has_ea = true; op->n = -1; - decode->mem = gen_lea_modrm_0(env, s, get_modrm(s, env)); + decode->mem = gen_lea_modrm_0(env, s, modrm, + decode->e.vex_class == 12); } return modrm; } @@ -1978,7 +2071,10 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, op->unit = X86_OP_SSE; } get_reg: - op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s); + op->n = ((get_modrm(s, env) >> 3) & 7); + if (op->unit != X86_OP_MMX) { + op->n |= REX_R(s); + } break; case X86_TYPE_E: /* ALU modrm operand */ @@ -2036,7 +2132,7 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, /* fall through */ case X86_TYPE_nop: /* modrm operand decoded but not fetched */ get_modrm: - decode_modrm(s, env, decode, op, type); + decode_modrm(s, env, decode, op); break; case X86_TYPE_O: /* Absolute address encoded in the instruction */ @@ -2199,8 +2295,12 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) return (s->cpuid_features & CPUID_CMOV); case X86_FEAT_CLFLUSH: return (s->cpuid_features & CPUID_CLFLUSH); + case X86_FEAT_CX8: + return (s->cpuid_features & CPUID_CX8); case X86_FEAT_FXSR: return (s->cpuid_features & CPUID_FXSR); + case X86_FEAT_CX16: + return (s->cpuid_ext_features & CPUID_EXT_CX16); case X86_FEAT_F16C: return (s->cpuid_ext_features & CPUID_EXT_F16C); case X86_FEAT_FMA: @@ -2424,6 +2524,7 @@ static void disas_insn(DisasContext *s, CPUState *cpu) CPUX86State *env = cpu_env(cpu); X86DecodedInsn decode; X86DecodeFunc decode_func = decode_root; + bool accept_lock = false; uint8_t cc_live, b; s->pc = s->base.pc_next; @@ -2441,7 +2542,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu) s->has_modrm = false; s->prefix = 0; - next_byte: + next_byte:; +#ifdef TARGET_X86_64 + /* clear any REX prefix followed by other prefixes. */ + int rex; + rex = -1; + next_byte_rex: +#endif b = x86_ldub_code(env, s); /* Collect prefixes. */ @@ -2484,13 +2591,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu) #ifdef TARGET_X86_64 case 0x40 ... 0x4f: if (CODE64(s)) { - /* REX prefix */ - s->prefix |= PREFIX_REX; - s->vex_w = (b >> 3) & 1; - s->rex_r = (b & 0x4) << 1; - s->rex_x = (b & 0x2) << 2; - s->rex_b = (b & 0x1) << 3; - goto next_byte; + /* + * REX prefix; ignored unless it is the last prefix, so + * for now just stash it + */ + rex = b; + goto next_byte_rex; } break; #endif @@ -2517,10 +2623,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu) /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ - | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) { + | PREFIX_LOCK | PREFIX_DATA)) { goto illegal_op; } #ifdef TARGET_X86_64 + if (rex != -1) { + goto illegal_op; + } s->rex_r = (~vex2 >> 4) & 8; #endif if (b == 0xc5) { @@ -2560,6 +2669,16 @@ static void disas_insn(DisasContext *s, CPUState *cpu) /* Post-process prefixes. */ if (CODE64(s)) { +#ifdef TARGET_X86_64 + if (rex != -1) { + s->prefix |= PREFIX_REX; + s->vex_w = (rex >> 3) & 1; + s->rex_r = (rex & 0x4) << 1; + s->rex_x = (rex & 0x2) << 2; + s->rex_b = (rex & 0x1) << 3; + } +#endif + /* * In 64-bit mode, the default data size is 32-bit. Select 64-bit * data with rex_w, and 16-bit data with 0x66; rex_w takes precedence @@ -2583,34 +2702,6 @@ static void disas_insn(DisasContext *s, CPUState *cpu) } } - /* Go back to old decoder for unconverted opcodes. */ - if (!(s->prefix & PREFIX_VEX)) { - if ((b & ~7) == 0xd8) { - if (!disas_insn_x87(s, cpu, b)) { - goto unknown_op; - } - return; - } - - if (b == 0x0f) { - b = x86_ldub_code(env, s); - switch (b) { - case 0x00 ... 0x01: /* mostly privileged instructions */ - case 0x1a ... 0x1b: /* MPX */ - case 0xa3: /* bt */ - case 0xab: /* bts */ - case 0xb3: /* btr */ - case 0xba ... 0xbb: /* grp8, btc */ - case 0xc7: /* grp9 */ - disas_insn_old(s, cpu, b + 0x100); - return; - default: - decode_func = do_decode_0F; - break; - } - } - } - memset(&decode, 0, sizeof(decode)); decode.cc_op = -1; decode.b = b; @@ -2631,14 +2722,14 @@ static void disas_insn(DisasContext *s, CPUState *cpu) if (decode.e.check & X86_CHECK_i64) { goto illegal_op; } - if ((decode.e.check & X86_CHECK_i64_amd) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) { + if ((decode.e.check & X86_CHECK_i64_amd) && !IS_INTEL_CPU(env)) { goto illegal_op; } } else { if (decode.e.check & X86_CHECK_o64) { goto illegal_op; } - if ((decode.e.check & X86_CHECK_o64_intel) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) { + if ((decode.e.check & X86_CHECK_o64_intel) && IS_INTEL_CPU(env)) { goto illegal_op; } } @@ -2662,9 +2753,10 @@ static void disas_insn(DisasContext *s, CPUState *cpu) if (decode.op[0].has_ea) { s->prefix |= PREFIX_LOCK; } - decode.e.special = X86_SPECIAL_HasLock; /* fallthrough */ case X86_SPECIAL_HasLock: + case X86_SPECIAL_BitTest: + accept_lock = decode.op[0].has_ea; break; case X86_SPECIAL_Op0_Rd: @@ -2706,10 +2798,8 @@ static void disas_insn(DisasContext *s, CPUState *cpu) break; } - if (s->prefix & PREFIX_LOCK) { - if (decode.e.special != X86_SPECIAL_HasLock || !decode.op[0].has_ea) { - goto illegal_op; - } + if ((s->prefix & PREFIX_LOCK) && !accept_lock) { + goto illegal_op; } if (!validate_vex(s, &decode)) { @@ -2755,9 +2845,10 @@ static void disas_insn(DisasContext *s, CPUState *cpu) if (decode.e.special != X86_SPECIAL_NoLoadEA && (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea)) { - gen_load_ea(s, &decode.mem, decode.e.vex_class == 12); + gen_lea_modrm(s, &decode); } if (s->prefix & PREFIX_LOCK) { + assert(decode.op[0].has_ea && !decode.op[2].has_ea); gen_load(s, &decode, 2, s->T1); decode.e.gen(s, &decode); } else { @@ -2792,7 +2883,7 @@ static void disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_i32(cpu_cc_op, decode.cc_op_dynamic); } set_cc_op(s, decode.cc_op); - cc_live = cc_op_live[decode.cc_op]; + cc_live = cc_op_live(decode.cc_op); } else { cc_live = 0; } diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h index f9bf9a60411..7f23d373ea7 100644 --- a/target/i386/tcg/decode-new.h +++ b/target/i386/tcg/decode-new.h @@ -114,6 +114,8 @@ typedef enum X86CPUIDFeature { X86_FEAT_CLWB, X86_FEAT_CMOV, X86_FEAT_CMPCCXADD, + X86_FEAT_CX8, + X86_FEAT_CX16, X86_FEAT_F16C, X86_FEAT_FMA, X86_FEAT_FSGSBASE, @@ -190,6 +192,9 @@ typedef enum X86InsnSpecial { /* Always locked if it has a memory operand (XCHG) */ X86_SPECIAL_Locked, + /* Like HasLock, but also operand 2 provides bit displacement into memory. */ + X86_SPECIAL_BitTest, + /* Do not load effective address in s->A0 */ X86_SPECIAL_NoLoadEA, @@ -261,12 +266,13 @@ typedef enum X86VEXSpecial { typedef struct X86OpEntry X86OpEntry; typedef struct X86DecodedInsn X86DecodedInsn; +struct DisasContext; /* Decode function for multibyte opcodes. */ -typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b); +typedef void (*X86DecodeFunc)(struct DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b); /* Code generation function. */ -typedef void (*X86GenFunc)(DisasContext *s, X86DecodedInsn *decode); +typedef void (*X86GenFunc)(struct DisasContext *s, X86DecodedInsn *decode); struct X86OpEntry { /* Based on the is_decode flags. */ @@ -313,6 +319,14 @@ typedef struct X86DecodedOp { }; } X86DecodedOp; +typedef struct AddressParts { + int def_seg; + int base; + int index; + int scale; + target_long disp; +} AddressParts; + struct X86DecodedInsn { X86OpEntry e; X86DecodedOp op[3]; @@ -330,3 +344,4 @@ struct X86DecodedInsn { uint8_t b; }; +static void gen_lea_modrm(struct DisasContext *s, X86DecodedInsn *decode); diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 016dce81464..1a7fab9333a 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -19,22 +19,13 @@ * License along with this library; if not, see . */ -/* - * Sometimes, knowing what the backend has can produce better code. - * The exact opcode to check depends on 32- vs. 64-bit. - */ -#ifdef TARGET_X86_64 -#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64 -#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i64_valid -#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i64_valid -#else -#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32 -#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i32_valid -#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i32_valid -#endif - +#define MMX_OFFSET(reg) \ + ({ assert((reg) >= 0 && (reg) <= 7); \ + offsetof(CPUX86State, fpregs[reg].mmx); }) -#define ZMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg]) +#define ZMM_OFFSET(reg) \ + ({ assert((reg) >= 0 && (reg) <= 15); \ + offsetof(CPUX86State, xmm_regs[reg]); }) typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); @@ -73,9 +64,26 @@ static void gen_NM_exception(DisasContext *s) gen_exception(s, EXCP07_PREX); } -static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib) +static void gen_lea_modrm(DisasContext *s, X86DecodedInsn *decode) { - TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib); + AddressParts *mem = &decode->mem; + TCGv ea; + + ea = gen_lea_modrm_1(s, *mem, decode->e.vex_class == 12); + if (decode->e.special == X86_SPECIAL_BitTest) { + MemOp ot = decode->op[1].ot; + int poslen = 8 << ot; + int opn = decode->op[2].n; + TCGv ofs = tcg_temp_new(); + + /* Extract memory displacement from the second operand. */ + assert(decode->op[2].unit == X86_OP_INT && decode->op[2].ot != MO_8); + tcg_gen_sextract_tl(ofs, cpu_regs[opn], 3, poslen - 3); + tcg_gen_andi_tl(ofs, ofs, -1 << ot); + tcg_gen_add_tl(s->A0, ea, ofs); + ea = s->A0; + } + gen_lea_v_seg(s, ea, mem->def_seg, s->override); } @@ -168,7 +176,7 @@ static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n) static void compute_mmx_offset(X86DecodedOp *op) { if (!op->has_ea) { - op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot); + op->offset = MMX_OFFSET(op->n) + mmx_offset(op->ot); } else { op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot); } @@ -264,24 +272,25 @@ static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v) gen_op_ld_v(s, op->ot, v, s->A0); } - } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) { - if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) { - tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8); - } else { - tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8); - } - } else if (op->ot < MO_TL && v == s->T0 && (decode->e.special == X86_SPECIAL_SExtT0 || decode->e.special == X86_SPECIAL_ZExtT0)) { - if (decode->e.special == X86_SPECIAL_SExtT0) { - tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN); + if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) { + if (decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8); + } else { + tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8); + } } else { - tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot); + if (decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN); + } else { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot); + } } } else { - tcg_gen_mov_tl(v, cpu_regs[op->n]); + gen_op_mov_v_reg(s, op->ot, v, op->n); } break; case X86_OP_IMM: @@ -333,7 +342,7 @@ static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv break; case X86_OP_SEG: /* Note that gen_movl_seg takes care of interrupt shadow and TF. */ - gen_movl_seg(s, op->n, s->T0); + gen_movl_seg(s, op->n, v, op->n == R_SS); break; case X86_OP_INT: if (op->has_ea) { @@ -407,6 +416,32 @@ static void prepare_update3_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op, decode->cc_op = op; } +/* Set up decode->cc_* to modify CF while keeping other flags unchanged. */ +static void prepare_update_cf(X86DecodedInsn *decode, DisasContext *s, TCGv cf) +{ + switch (s->cc_op) { + case CC_OP_ADOX: + case CC_OP_ADCOX: + decode->cc_src2 = cpu_cc_src2; + decode->cc_src = cpu_cc_src; + decode->cc_op = CC_OP_ADCOX; + break; + + case CC_OP_EFLAGS: + case CC_OP_ADCX: + decode->cc_src = cpu_cc_src; + decode->cc_op = CC_OP_ADCX; + break; + + default: + decode->cc_src = tcg_temp_new(); + gen_mov_eflags(s, decode->cc_src); + decode->cc_op = CC_OP_ADCX; + break; + } + decode->cc_dst = cf; +} + static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs) { MemOp ot = decode->op[0].ot; @@ -1125,11 +1160,28 @@ static void gen_AAS(DisasContext *s, X86DecodedInsn *decode) assume_cc_op(s, CC_OP_EFLAGS); } +static void gen_ADD(DisasContext *s, X86DecodedInsn *decode); static void gen_ADC(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[1].ot; - TCGv c_in = tcg_temp_new(); + TCGv c_in; + + /* + * Try to avoid CC_OP_ADC by transforming as follows: + * CC_ADC: src1 = dst + c_in, src2 = 0, src3 = c_in + * CC_ADD: src1 = dst + c_in, src2 = c_in (no src3) + * + * In general src2 vs. src3 matters when computing AF and OF, but not here: + * - AF is bit 4 of dst^src1^src2, which is bit 4 of dst^src1 in both cases + * - OF is a function of the two MSBs, and in both cases they are zero for src2 + */ + if (decode->e.op2 == X86_TYPE_I && decode->immediate == 0) { + gen_compute_eflags_c(s, s->T1); + gen_ADD(s, decode); + return; + } + c_in = tcg_temp_new(); gen_compute_eflags_c(s, c_in); if (s->prefix & PREFIX_LOCK) { tcg_gen_add_tl(s->T0, c_in, s->T1); @@ -1299,7 +1351,7 @@ static void gen_BLSI(DisasContext *s, X86DecodedInsn *decode) /* input in T1, which is ready for prepare_update2_cc */ tcg_gen_neg_tl(s->T0, s->T1); tcg_gen_and_tl(s->T0, s->T0, s->T1); - prepare_update2_cc(decode, s, CC_OP_BMILGB + ot); + prepare_update2_cc(decode, s, CC_OP_BLSIB + ot); } static void gen_BLSMSK(DisasContext *s, X86DecodedInsn *decode) @@ -1385,6 +1437,121 @@ static void gen_BSWAP(DisasContext *s, X86DecodedInsn *decode) tcg_gen_bswap32_tl(s->T0, s->T0, TCG_BSWAP_OZ); } +static TCGv gen_bt_mask(DisasContext *s, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[1].ot; + TCGv mask = tcg_temp_new(); + + tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1); + tcg_gen_shl_tl(mask, tcg_constant_tl(1), s->T1); + return mask; +} + +/* Expects truncated bit index in COUNT, 1 << COUNT in MASK. */ +static void gen_bt_flags(DisasContext *s, X86DecodedInsn *decode, TCGv src, + TCGv count, TCGv mask) +{ + TCGv cf; + + /* + * C is the result of the test, Z is unchanged, and the others + * are all undefined. + */ + if (s->cc_op == CC_OP_DYNAMIC || CC_OP_HAS_EFLAGS(s->cc_op)) { + /* Generate EFLAGS and replace the C bit. */ + cf = tcg_temp_new(); + tcg_gen_setcond_tl(TCG_COND_TSTNE, cf, src, mask); + prepare_update_cf(decode, s, cf); + } else { + /* + * Z was going to be computed from the non-zero status of CC_DST. + * We can get that same Z value (and the new C value) by leaving + * CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the + * same width. + */ + decode->cc_src = tcg_temp_new(); + decode->cc_dst = cpu_cc_dst; + decode->cc_op = CC_OP_SARB + cc_op_size(s->cc_op); + tcg_gen_shr_tl(decode->cc_src, src, count); + } +} + +static void gen_BT(DisasContext *s, X86DecodedInsn *decode) +{ + TCGv count = s->T1; + TCGv mask; + + /* + * Try to ensure that the rhs of the TSTNE condition is a constant (and a + * power of two), as that is more readily available on most TCG backends. + * + * For immediate bit number gen_bt_mask()'s output is already a constant; + * for register bit number, shift the source right and check bit 0. + */ + if (decode->e.op2 == X86_TYPE_I) { + mask = gen_bt_mask(s, decode); + } else { + MemOp ot = decode->op[1].ot; + + tcg_gen_andi_tl(s->T1, s->T1, (8 << ot) - 1); + tcg_gen_shr_tl(s->T0, s->T0, s->T1); + + count = tcg_constant_tl(0); + mask = tcg_constant_tl(1); + } + gen_bt_flags(s, decode, s->T0, count, mask); +} + +static void gen_BTC(DisasContext *s, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + TCGv old = tcg_temp_new(); + TCGv mask = gen_bt_mask(s, decode); + + if (s->prefix & PREFIX_LOCK) { + tcg_gen_atomic_fetch_xor_tl(old, s->A0, mask, s->mem_index, ot | MO_LE); + } else { + tcg_gen_mov_tl(old, s->T0); + tcg_gen_xor_tl(s->T0, s->T0, mask); + } + + gen_bt_flags(s, decode, old, s->T1, mask); +} + +static void gen_BTR(DisasContext *s, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + TCGv old = tcg_temp_new(); + TCGv mask = gen_bt_mask(s, decode); + + if (s->prefix & PREFIX_LOCK) { + TCGv maskc = tcg_temp_new(); + tcg_gen_not_tl(maskc, mask); + tcg_gen_atomic_fetch_and_tl(old, s->A0, maskc, s->mem_index, ot | MO_LE); + } else { + tcg_gen_mov_tl(old, s->T0); + tcg_gen_andc_tl(s->T0, s->T0, mask); + } + + gen_bt_flags(s, decode, old, s->T1, mask); +} + +static void gen_BTS(DisasContext *s, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[0].ot; + TCGv old = tcg_temp_new(); + TCGv mask = gen_bt_mask(s, decode); + + if (s->prefix & PREFIX_LOCK) { + tcg_gen_atomic_fetch_or_tl(old, s->A0, mask, s->mem_index, ot | MO_LE); + } else { + tcg_gen_mov_tl(old, s->T0); + tcg_gen_or_tl(s->T0, s->T0, mask); + } + + gen_bt_flags(s, decode, old, s->T1, mask); +} + static void gen_BZHI(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1470,7 +1637,7 @@ static void gen_CMC(DisasContext *s, X86DecodedInsn *decode) static void gen_CMOVcc(DisasContext *s, X86DecodedInsn *decode) { - gen_cmovcc1(s, decode->b & 0xf, s->T0, s->T1); + gen_cmovcc(s, decode->b & 0xf, s->T0, s->T1); } static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode) @@ -1533,22 +1700,22 @@ static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode) switch (jcc_op) { case JCC_O: /* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */ + cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(0); tcg_gen_xor_tl(newv, s->cc_srcT, s->T0); - tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv); - tcg_gen_and_tl(s->tmp0, s->tmp0, newv); - tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot); - cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + tcg_gen_xor_tl(cmp_lhs, s->cc_srcT, cmpv); + tcg_gen_and_tl(cmp_lhs, cmp_lhs, newv); + tcg_gen_sextract_tl(cmp_lhs, cmp_lhs, 0, 8 << ot); break; case JCC_P: - tcg_gen_ext8u_tl(s->tmp0, s->T0); - tcg_gen_ctpop_tl(s->tmp0, s->tmp0); - cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(1); + cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(1); + tcg_gen_ext8u_tl(cmp_lhs, s->T0); + tcg_gen_ctpop_tl(cmp_lhs, cmp_lhs); break; case JCC_S: - tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot); - cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + cmp_lhs = tcg_temp_new(), cmp_rhs = tcg_constant_tl(0); + tcg_gen_sextract_tl(cmp_lhs, s->T0, 0, 8 << ot); break; default: @@ -1579,11 +1746,7 @@ static void gen_CMPccXADD(DisasContext *s, X86DecodedInsn *decode) static void gen_CMPS(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[2].ot; - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_nz(s, ot, gen_cmps); - } else { - gen_cmps(s, ot); - } + gen_repz_nz(s, ot, gen_cmps); } static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode) @@ -1637,6 +1800,102 @@ static void gen_CMPXCHG(DisasContext *s, X86DecodedInsn *decode) decode->cc_op = CC_OP_SUBB + ot; } +static void gen_CMPXCHG16B(DisasContext *s, X86DecodedInsn *decode) +{ +#ifdef TARGET_X86_64 + MemOp mop = MO_LE | MO_128 | MO_ALIGN; + TCGv_i64 t0, t1; + TCGv_i128 cmp, val; + + cmp = tcg_temp_new_i128(); + val = tcg_temp_new_i128(); + tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); + tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); + + /* Only require atomic with LOCK; non-parallel handled in generator. */ + if (s->prefix & PREFIX_LOCK) { + tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); + } else { + tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); + } + + tcg_gen_extr_i128_i64(s->T0, s->T1, val); + + /* Determine success after the fact. */ + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); + tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); + tcg_gen_or_i64(t0, t0, t1); + + /* Update Z. */ + gen_compute_eflags(s); + tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); + tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); + + /* + * Extract the result values for the register pair. We may do this + * unconditionally, because on success (Z=1), the old value matches + * the previous value in RDX:RAX. + */ + tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0); + tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1); +#else + abort(); +#endif +} + +static void gen_CMPXCHG8B(DisasContext *s, X86DecodedInsn *decode) +{ + TCGv_i64 cmp, val, old; + TCGv Z; + + cmp = tcg_temp_new_i64(); + val = tcg_temp_new_i64(); + old = tcg_temp_new_i64(); + + /* Construct the comparison values from the register pair. */ + tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); + tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); + + /* Only require atomic with LOCK; non-parallel handled in generator. */ + if (s->prefix & PREFIX_LOCK) { + tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_LEUQ); + } else { + tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, + s->mem_index, MO_LEUQ); + } + + /* Compute the required value of Z. */ + tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); + Z = tcg_temp_new(); + tcg_gen_trunc_i64_tl(Z, cmp); + + /* + * Extract the result values for the register pair. + * For 32-bit, we may do this unconditionally, because on success (Z=1), + * the old value matches the previous value in EDX:EAX. For x86_64, + * the store must be conditional, because we must leave the source + * registers unchanged on success, and zero-extend the writeback + * on failure (Z=0). + */ + if (TARGET_LONG_BITS == 32) { + tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old); + } else { + TCGv zero = tcg_constant_tl(0); + + tcg_gen_extr_i64_tl(s->T0, s->T1, old); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero, + s->T0, cpu_regs[R_EAX]); + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, + s->T1, cpu_regs[R_EDX]); + } + + /* Update Z. */ + gen_compute_eflags(s); + tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); +} + static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode) { gen_update_cc_op(s); @@ -1647,9 +1906,10 @@ static void gen_CPUID(DisasContext *s, X86DecodedInsn *decode) static void gen_CRC32(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[2].ot; + TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot)); + tcg_gen_trunc_tl_i32(tmp, s->T0); + gen_helper_crc32(s->T0, tmp, s->T1, tcg_constant_i32(8 << ot)); } static void gen_CVTPI2Px(DisasContext *s, X86DecodedInsn *decode) @@ -1978,11 +2238,7 @@ static void gen_INS(DisasContext *s, X86DecodedInsn *decode) } translator_io_start(&s->base); - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz(s, ot, gen_ins); - } else { - gen_ins(s, ot); - } + gen_repz(s, ot, gen_ins); } static void gen_INSERTQ_i(DisasContext *s, X86DecodedInsn *decode) @@ -2037,8 +2293,11 @@ static void gen_IRET(DisasContext *s, X86DecodedInsn *decode) static void gen_Jcc(DisasContext *s, X86DecodedInsn *decode) { + TCGLabel *taken = gen_new_label(); + gen_bnd_jmp(s); - gen_jcc(s, decode->b & 0xf, decode->immediate); + gen_jcc(s, decode->b & 0xf, taken); + gen_conditional_jump_labels(s, decode->immediate, NULL, taken); } static void gen_JCXZ(DisasContext *s, X86DecodedInsn *decode) @@ -2108,8 +2367,10 @@ static void gen_LAR(DisasContext *s, X86DecodedInsn *decode) static void gen_LDMXCSR(DisasContext *s, X86DecodedInsn *decode) { - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - gen_helper_ldmxcsr(tcg_env, s->tmp2_i32); + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(tmp, s->T0); + gen_helper_ldmxcsr(tcg_env, tmp); } static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg) @@ -2121,7 +2382,7 @@ static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg) gen_op_ld_v(s, MO_16, s->T1, s->A0); /* load the segment here to handle exceptions properly */ - gen_movl_seg(s, seg, s->T1); + gen_movl_seg(s, seg, s->T1, false); } static void gen_LDS(DisasContext *s, X86DecodedInsn *decode) @@ -2163,11 +2424,7 @@ static void gen_LGS(DisasContext *s, X86DecodedInsn *decode) static void gen_LODS(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[1].ot; - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz(s, ot, gen_lods); - } else { - gen_lods(s, ot); - } + gen_repz(s, ot, gen_lods); } static void gen_LOOP(DisasContext *s, X86DecodedInsn *decode) @@ -2188,7 +2445,7 @@ static void gen_LOOPE(DisasContext *s, X86DecodedInsn *decode) gen_update_cc_op(s); gen_op_add_reg_im(s, s->aflag, R_ECX, -1); gen_op_jz_ecx(s, not_taken); - gen_jcc1(s, (JCC_Z << 1), taken); /* jz taken */ + gen_jcc(s, (JCC_Z << 1), taken); /* jz taken */ gen_conditional_jump_labels(s, decode->immediate, not_taken, taken); } @@ -2200,7 +2457,7 @@ static void gen_LOOPNE(DisasContext *s, X86DecodedInsn *decode) gen_update_cc_op(s); gen_op_add_reg_im(s, s->aflag, R_ECX, -1); gen_op_jz_ecx(s, not_taken); - gen_jcc1(s, (JCC_Z << 1) | 1, taken); /* jnz taken */ + gen_jcc(s, (JCC_Z << 1) | 1, taken); /* jnz taken */ gen_conditional_jump_labels(s, decode->immediate, not_taken, taken); } @@ -2326,11 +2583,13 @@ static void gen_MOVDQ(DisasContext *s, X86DecodedInsn *decode) static void gen_MOVMSK(DisasContext *s, X86DecodedInsn *decode) { typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn; + TCGv_i32 tmp = tcg_temp_new_i32(); + ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm; pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm; fn = s->prefix & PREFIX_DATA ? pd : ps; - fn(s->tmp2_i32, tcg_env, OP_PTR2); - tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); + fn(tmp, tcg_env, OP_PTR2); + tcg_gen_extu_i32_tl(s->T0, tmp); } static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode) @@ -2365,11 +2624,7 @@ static void gen_MOVq_dq(DisasContext *s, X86DecodedInsn *decode) static void gen_MOVS(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[2].ot; - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz(s, ot, gen_movs); - } else { - gen_movs(s, ot); - } + gen_repz(s, ot, gen_movs); } static void gen_MUL(DisasContext *s, X86DecodedInsn *decode) @@ -2431,13 +2686,17 @@ static void gen_MULX(DisasContext *s, X86DecodedInsn *decode) switch (ot) { case MO_32: #ifdef TARGET_X86_64 - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); - tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, - s->tmp2_i32, s->tmp3_i32); - tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); - tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32); - break; + { + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t0, s->T0); + tcg_gen_trunc_tl_i32(t1, s->T1); + tcg_gen_mulu2_i32(t0, t1, t0, t1); + tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], t0); + tcg_gen_extu_i32_tl(s->T0, t1); + break; + } case MO_64: #endif @@ -2531,11 +2790,7 @@ static void gen_OUTS(DisasContext *s, X86DecodedInsn *decode) } translator_io_start(&s->base); - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz(s, ot, gen_outs); - } else { - gen_outs(s, ot); - } + gen_repz(s, ot, gen_outs); } static void gen_PALIGNR(DisasContext *s, X86DecodedInsn *decode) @@ -2758,7 +3013,7 @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode) tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1))); while (vec_len > 8) { vec_len -= 8; - if (TCG_TARGET_HAS_extract2_tl) { + if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) { /* * Load the next byte of the result into the high byte of T. * TCG does a similar expansion of deposit to shl+extract2; by @@ -3107,7 +3362,8 @@ static bool gen_eflags_adcox(DisasContext *s, X86DecodedInsn *decode, bool want_ * bit, we might as well fish CF out of EFLAGS and save a shift. */ if (want_carry && (!need_flags || s->cc_op == CC_OP_SHLB + MO_TL)) { - tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << (s->cc_op - CC_OP_SHLB)) - 1); + MemOp size = cc_op_size(s->cc_op); + tcg_gen_shri_tl(decode->cc_dst, cpu_cc_src, (8 << size) - 1); got_cf = true; } gen_mov_eflags(s, decode->cc_src); @@ -3211,7 +3467,7 @@ static void gen_RCL(DisasContext *s, X86DecodedInsn *decode) } /* Compute high part, including incoming carry. */ - if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) { + if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) { /* high = (T0 << 1) | cin */ TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src; tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1); @@ -3263,7 +3519,7 @@ static void gen_RCR(DisasContext *s, X86DecodedInsn *decode) } /* Save incoming carry into high, it will be shifted later. */ - if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) { + if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) { TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src; tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1); } else { @@ -3484,10 +3740,14 @@ static void gen_RORX(DisasContext *s, X86DecodedInsn *decode) switch (ot) { case MO_32: #ifdef TARGET_X86_64 - tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b); - tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); - break; + { + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(tmp, s->T0); + tcg_gen_rotri_i32(tmp, tmp, b); + tcg_gen_extu_i32_tl(s->T0, tmp); + break; + } case MO_64: #endif @@ -3537,13 +3797,13 @@ static void gen_shift_dynamic_flags(DisasContext *s, X86DecodedInsn *decode, TCG decode->cc_op_dynamic = tcg_temp_new_i32(); assert(decode->cc_dst == s->T0); - if (cc_op_live[s->cc_op] & USES_CC_DST) { + if (cc_op_live(s->cc_op) & USES_CC_DST) { decode->cc_dst = tcg_temp_new(); tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_dst, count, tcg_constant_tl(0), cpu_cc_dst, s->T0); } - if (cc_op_live[s->cc_op] & USES_CC_SRC) { + if (cc_op_live(s->cc_op) & USES_CC_SRC) { tcg_gen_movcond_tl(TCG_COND_EQ, decode->cc_src, count, tcg_constant_tl(0), cpu_cc_src, decode->cc_src); } @@ -3590,22 +3850,64 @@ static void gen_SARX(DisasContext *s, X86DecodedInsn *decode) tcg_gen_sar_tl(s->T0, s->T0, s->T1); } +static void gen_SUB(DisasContext *s, X86DecodedInsn *decode); static void gen_SBB(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - TCGv c_in = tcg_temp_new(); + TCGv c_in; + + /* + * Try to avoid CC_OP_SBB by transforming as follows: + * CC_SBB: src1 = dst + c_in, src2 = 0, src3 = c_in + * CC_SUB: src1 = dst + c_in, src2 = c_in (no src3) + * + * In general src2 vs. src3 matters when computing AF and OF, but not here: + * - AF is bit 4 of dst^src1^src2, which is bit 4 of dst^src1 in both cases + * - OF is a function of the two MSBs, and in both cases they are zero for src2 + */ + if (decode->e.op2 == X86_TYPE_I && decode->immediate == 0) { + gen_compute_eflags_c(s, s->T1); + gen_SUB(s, decode); + return; + } + c_in = tcg_temp_new(); gen_compute_eflags_c(s, c_in); + + /* + * Here the change is as follows: + * CC_SBB: src1 = T0, src2 = T0, src3 = c_in + * CC_SUB: src1 = 0, src2 = c_in (no src3) + * + * The difference also does not matter: + * - AF is bit 4 of dst^src1^src2, but bit 4 of src1^src2 is zero in both cases + * therefore AF comes straight from dst (in fact it is c_in) + * - for OF, src1 and src2 have the same sign in both cases, meaning there + * can be no overflow + */ + if (decode->e.op2 != X86_TYPE_I && !decode->op[0].has_ea && decode->op[0].n == decode->op[2].n) { + if (s->cc_op == CC_OP_DYNAMIC) { + tcg_gen_neg_tl(s->T0, c_in); + } else { + /* + * Do not negate c_in because it will often be dead and only the + * instruction generated by negsetcond will survive. + */ + gen_neg_setcc(s, JCC_B << 1, s->T0); + } + tcg_gen_movi_tl(s->cc_srcT, 0); + decode->cc_src = c_in; + decode->cc_dst = s->T0; + decode->cc_op = CC_OP_SUBB + ot; + return; + } + if (s->prefix & PREFIX_LOCK) { tcg_gen_add_tl(s->T0, s->T1, c_in); tcg_gen_neg_tl(s->T0, s->T0); tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0, s->mem_index, ot | MO_LE); } else { - /* - * TODO: SBB reg, reg could use gen_prepare_eflags_c followed by - * negsetcond, and CC_OP_SUBB as the cc_op. - */ tcg_gen_sub_tl(s->T0, s->T0, s->T1); tcg_gen_sub_tl(s->T0, s->T0, c_in); } @@ -3615,16 +3917,12 @@ static void gen_SBB(DisasContext *s, X86DecodedInsn *decode) static void gen_SCAS(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[2].ot; - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz_nz(s, ot, gen_scas); - } else { - gen_scas(s, ot); - } + gen_repz_nz(s, ot, gen_scas); } static void gen_SETcc(DisasContext *s, X86DecodedInsn *decode) { - gen_setcc1(s, decode->b & 0xf, s->T0); + gen_setcc(s, decode->b & 0xf, s->T0); } static void gen_SFENCE(DisasContext *s, X86DecodedInsn *decode) @@ -3720,8 +4018,7 @@ static void gen_SHLD(DisasContext *s, X86DecodedInsn *decode) } decode->cc_dst = s->T0; - decode->cc_src = s->tmp0; - gen_shiftd_rm_T1(s, ot, false, count); + decode->cc_src = gen_shiftd_rm_T1(s, ot, false, count); if (can_be_zero) { gen_shift_dynamic_flags(s, decode, count, CC_OP_SHLB + ot); } else { @@ -3773,8 +4070,7 @@ static void gen_SHRD(DisasContext *s, X86DecodedInsn *decode) } decode->cc_dst = s->T0; - decode->cc_src = s->tmp0; - gen_shiftd_rm_T1(s, ot, true, count); + decode->cc_src = gen_shiftd_rm_T1(s, ot, true, count); if (can_be_zero) { gen_shift_dynamic_flags(s, decode, count, CC_OP_SARB + ot); } else { @@ -3825,11 +4121,7 @@ static void gen_STMXCSR(DisasContext *s, X86DecodedInsn *decode) static void gen_STOS(DisasContext *s, X86DecodedInsn *decode) { MemOp ot = decode->op[1].ot; - if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { - gen_repz(s, ot, gen_stos); - } else { - gen_stos(s, ot); - } + gen_repz(s, ot, gen_stos); } static void gen_SUB(DisasContext *s, X86DecodedInsn *decode) @@ -4045,7 +4337,7 @@ static void gen_VCVTSI2Sx(DisasContext *s, X86DecodedInsn *decode) } return; } - in = s->tmp2_i32; + in = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(in, s->T1); #else in = s->T1; @@ -4075,7 +4367,7 @@ static inline void gen_VCVTtSx2SI(DisasContext *s, X86DecodedInsn *decode, return; } - out = s->tmp2_i32; + out = tcg_temp_new_i32(); #else out = s->T0; #endif @@ -4127,7 +4419,7 @@ static void gen_VEXTRACTPS(DisasContext *s, X86DecodedInsn *decode) gen_pextr(s, decode, MO_32); } -static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode) +static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode, TCGv_i32 tmp) { int val = decode->immediate; int dest_word = (val >> 4) & 3; @@ -4144,7 +4436,7 @@ static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode) } if (new_mask != (val & 15)) { - tcg_gen_st_i32(s->tmp2_i32, tcg_env, + tcg_gen_st_i32(tmp, tcg_env, vector_elem_offset(&decode->op[0], MO_32, dest_word)); } @@ -4163,15 +4455,19 @@ static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode) static void gen_VINSERTPS_r(DisasContext *s, X86DecodedInsn *decode) { int val = decode->immediate; - tcg_gen_ld_i32(s->tmp2_i32, tcg_env, + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_ld_i32(tmp, tcg_env, vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3)); - gen_vinsertps(s, decode); + gen_vinsertps(s, decode, tmp); } static void gen_VINSERTPS_m(DisasContext *s, X86DecodedInsn *decode) { - tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); - gen_vinsertps(s, decode); + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL); + gen_vinsertps(s, decode, tmp); } static void gen_VINSERTx128(DisasContext *s, X86DecodedInsn *decode) @@ -4292,25 +4588,29 @@ static void gen_VMOVSD_ld(DisasContext *s, X86DecodedInsn *decode) static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode) { int vec_len = vector_len(s, decode); + TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len); - tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); } static void gen_VMOVSS_ld(DisasContext *s, X86DecodedInsn *decode) { int vec_len = vector_len(s, decode); + TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL); tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); - tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0))); } static void gen_VMOVSS_st(DisasContext *s, X86DecodedInsn *decode) { - tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); - tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL); + TCGv_i32 tmp = tcg_temp_new_i32(); + + tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0))); + tcg_gen_qemu_st_i32(tmp, s->A0, s->mem_index, MO_LEUL); } static void gen_VPMASKMOV_st(DisasContext *s, X86DecodedInsn *decode) @@ -4477,7 +4777,8 @@ static void gen_XOR(DisasContext *s, X86DecodedInsn *decode) decode->op[2].unit == X86_OP_INT && decode->op[1].n == decode->op[2].n) { tcg_gen_movi_tl(s->T0, 0); - decode->cc_op = CC_OP_CLR; + decode->cc_op = CC_OP_EFLAGS; + decode->cc_src = tcg_constant_tl(CC_Z | CC_P); } else { MemOp ot = decode->op[1].ot; diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index 72387aac24f..6fb8036d988 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -19,9 +19,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "qemu/log.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "exec/helper-proto.h" #include "helper-tcg.h" diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index e1b850f3fc2..b3b23823fda 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -21,8 +21,8 @@ #include #include "cpu.h" #include "tcg-cpu.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #include "fpu/softfloat-macros.h" @@ -135,16 +135,79 @@ static void fpu_set_exception(CPUX86State *env, int mask) } } -static inline uint8_t save_exception_flags(CPUX86State *env) +void cpu_init_fp_statuses(CPUX86State *env) { - uint8_t old_flags = get_float_exception_flags(&env->fp_status); + /* + * Initialise the non-runtime-varying fields of the various + * float_status words to x86 behaviour. This must be called at + * CPU reset because the float_status words are in the + * "zeroed on reset" portion of the CPU state struct. + * Fields in float_status that vary under guest control are set + * via the codepath for setting that register, eg cpu_set_fpuc(). + */ + /* + * Use x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + /* + * TODO: These are incorrect: the x86 Software Developer's Manual vol 1 + * section 4.8.3.5 "Operating on SNaNs and QNaNs" says that the + * "larger significand" behaviour is only used for x87 FPU operations. + * For SSE the required behaviour is to always return the first NaN, + * which is float_2nan_prop_ab. + * + * mmx_status is used only for the AMD 3DNow! instructions, which + * are documented in the "3DNow! Technology Manual" as not supporting + * NaNs or infinities as inputs. The result of passing two NaNs is + * documented as "undefined", so we can do what we choose. + * (Strictly there is some behaviour we don't implement correctly + * for these "unsupported" NaN and Inf values, like "NaN * 0 == 0".) + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->mmx_status); + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->sse_status); + /* + * Only SSE has multiply-add instructions. In the SDM Section 14.5.2 + * "Fused-Multiply-ADD (FMA) Numeric Behavior" the NaN handling is + * specified -- for 0 * inf + NaN the input NaN is selected, and if + * there are multiple input NaNs they are selected in the order a, b, c. + * We also do not raise Invalid for the 0 * inf + (Q)NaN case. + */ + set_float_infzeronan_rule(float_infzeronan_dnan_never | + float_infzeronan_suppress_invalid, + &env->sse_status); + set_float_3nan_prop_rule(float_3nan_prop_abc, &env->sse_status); + /* Default NaN: sign bit set, most significant frac bit set */ + set_float_default_nan_pattern(0b11000000, &env->fp_status); + set_float_default_nan_pattern(0b11000000, &env->mmx_status); + set_float_default_nan_pattern(0b11000000, &env->sse_status); + /* + * x86 does flush-to-zero detection after rounding (the SDM + * section 10.2.3.3 on the FTZ bit of MXCSR says that we flush + * when we detect underflow, which x86 does after rounding). + */ + set_float_ftz_detection(float_ftz_after_rounding, &env->fp_status); + set_float_ftz_detection(float_ftz_after_rounding, &env->mmx_status); + set_float_ftz_detection(float_ftz_after_rounding, &env->sse_status); +} + +static inline int save_exception_flags(CPUX86State *env) +{ + int old_flags = get_float_exception_flags(&env->fp_status); set_float_exception_flags(0, &env->fp_status); return old_flags; } -static void merge_exception_flags(CPUX86State *env, uint8_t old_flags) +static void merge_exception_flags(CPUX86State *env, int old_flags) { - uint8_t new_flags = get_float_exception_flags(&env->fp_status); + int new_flags = get_float_exception_flags(&env->fp_status); float_raise(old_flags, &env->fp_status); fpu_set_exception(env, ((new_flags & float_flag_invalid ? FPUS_IE : 0) | @@ -152,12 +215,12 @@ static void merge_exception_flags(CPUX86State *env, uint8_t old_flags) (new_flags & float_flag_overflow ? FPUS_OE : 0) | (new_flags & float_flag_underflow ? FPUS_UE : 0) | (new_flags & float_flag_inexact ? FPUS_PE : 0) | - (new_flags & float_flag_input_denormal ? FPUS_DE : 0))); + (new_flags & float_flag_input_denormal_used ? FPUS_DE : 0))); } static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); floatx80 ret = floatx80_div(a, b, &env->fp_status); merge_exception_flags(env, old_flags); return ret; @@ -177,7 +240,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr) void helper_flds_FT0(CPUX86State *env, uint32_t val) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); union { float32 f; uint32_t i; @@ -190,7 +253,7 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val) void helper_fldl_FT0(CPUX86State *env, uint64_t val) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); union { float64 f; uint64_t i; @@ -208,7 +271,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val) void helper_flds_ST0(CPUX86State *env, uint32_t val) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int new_fpstt; union { float32 f; @@ -225,7 +288,7 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val) void helper_fldl_ST0(CPUX86State *env, uint64_t val) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int new_fpstt; union { float64 f; @@ -275,7 +338,7 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val) uint32_t helper_fsts_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); union { float32 f; uint32_t i; @@ -288,7 +351,7 @@ uint32_t helper_fsts_ST0(CPUX86State *env) uint64_t helper_fstl_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); union { float64 f; uint64_t i; @@ -301,7 +364,7 @@ uint64_t helper_fstl_ST0(CPUX86State *env) int32_t helper_fist_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int32_t val; val = floatx80_to_int32(ST0, &env->fp_status); @@ -315,7 +378,7 @@ int32_t helper_fist_ST0(CPUX86State *env) int32_t helper_fistl_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int32_t val; val = floatx80_to_int32(ST0, &env->fp_status); @@ -328,7 +391,7 @@ int32_t helper_fistl_ST0(CPUX86State *env) int64_t helper_fistll_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int64_t val; val = floatx80_to_int64(ST0, &env->fp_status); @@ -341,7 +404,7 @@ int64_t helper_fistll_ST0(CPUX86State *env) int32_t helper_fistt_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int32_t val; val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); @@ -355,7 +418,7 @@ int32_t helper_fistt_ST0(CPUX86State *env) int32_t helper_fisttl_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int32_t val; val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); @@ -368,7 +431,7 @@ int32_t helper_fisttl_ST0(CPUX86State *env) int64_t helper_fisttll_ST0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int64_t val; val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); @@ -464,7 +527,7 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; void helper_fcom_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); FloatRelation ret; ret = floatx80_compare(ST0, FT0, &env->fp_status); @@ -474,7 +537,7 @@ void helper_fcom_ST0_FT0(CPUX86State *env) void helper_fucom_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); FloatRelation ret; ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); @@ -486,7 +549,7 @@ static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; void helper_fcomi_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int eflags; FloatRelation ret; @@ -499,7 +562,7 @@ void helper_fcomi_ST0_FT0(CPUX86State *env) void helper_fucomi_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int eflags; FloatRelation ret; @@ -512,28 +575,28 @@ void helper_fucomi_ST0_FT0(CPUX86State *env) void helper_fadd_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST0 = floatx80_add(ST0, FT0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fmul_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST0 = floatx80_mul(ST0, FT0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fsub_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST0 = floatx80_sub(ST0, FT0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fsubr_ST0_FT0(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST0 = floatx80_sub(FT0, ST0, &env->fp_status); merge_exception_flags(env, old_flags); } @@ -552,28 +615,28 @@ void helper_fdivr_ST0_FT0(CPUX86State *env) void helper_fadd_STN_ST0(CPUX86State *env, int st_index) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fmul_STN_ST0(CPUX86State *env, int st_index) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fsub_STN_ST0(CPUX86State *env, int st_index) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fsubr_STN_ST0(CPUX86State *env, int st_index) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); merge_exception_flags(env, old_flags); } @@ -798,7 +861,7 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr) void helper_fbst_ST0(CPUX86State *env, target_ulong ptr) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); int v; target_ulong mem_ref, mem_end; int64_t val; @@ -1073,12 +1136,12 @@ static const struct f2xm1_data f2xm1_table[65] = { void helper_f2xm1(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); uint64_t sig = extractFloatx80Frac(ST0); int32_t exp = extractFloatx80Exp(ST0); bool sign = extractFloatx80Sign(ST0); - if (floatx80_invalid_encoding(ST0)) { + if (floatx80_invalid_encoding(ST0, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_default_nan(&env->fp_status); } else if (floatx80_is_any_nan(ST0)) { @@ -1306,7 +1369,7 @@ static const struct fpatan_data fpatan_table[9] = { void helper_fpatan(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); uint64_t arg0_sig = extractFloatx80Frac(ST0); int32_t arg0_exp = extractFloatx80Exp(ST0); bool arg0_sign = extractFloatx80Sign(ST0); @@ -1320,8 +1383,8 @@ void helper_fpatan(CPUX86State *env) } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_silence_nan(ST1, &env->fp_status); - } else if (floatx80_invalid_encoding(ST0) || - floatx80_invalid_encoding(ST1)) { + } else if (floatx80_invalid_encoding(ST0, &env->fp_status) || + floatx80_invalid_encoding(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_default_nan(&env->fp_status); } else if (floatx80_is_any_nan(ST0)) { @@ -1330,7 +1393,8 @@ void helper_fpatan(CPUX86State *env) /* Pass this NaN through. */ } else if (floatx80_is_zero(ST1) && !arg0_sign) { /* Pass this zero through. */ - } else if (((floatx80_is_infinity(ST0) && !floatx80_is_infinity(ST1)) || + } else if (((floatx80_is_infinity(ST0, &env->fp_status) && + !floatx80_is_infinity(ST1, &env->fp_status)) || arg0_exp - arg1_exp >= 80) && !arg0_sign) { /* @@ -1379,8 +1443,8 @@ void helper_fpatan(CPUX86State *env) rexp = pi_exp; rsig0 = pi_sig_high; rsig1 = pi_sig_low; - } else if (floatx80_is_infinity(ST1)) { - if (floatx80_is_infinity(ST0)) { + } else if (floatx80_is_infinity(ST1, &env->fp_status)) { + if (floatx80_is_infinity(ST0, &env->fp_status)) { if (arg0_sign) { rexp = pi_34_exp; rsig0 = pi_34_sig_high; @@ -1399,7 +1463,8 @@ void helper_fpatan(CPUX86State *env) rexp = pi_2_exp; rsig0 = pi_2_sig_high; rsig1 = pi_2_sig_low; - } else if (floatx80_is_infinity(ST0) || arg0_exp - arg1_exp >= 80) { + } else if (floatx80_is_infinity(ST0, &env->fp_status) || + arg0_exp - arg1_exp >= 80) { /* ST0 is negative. */ rexp = pi_exp; rsig0 = pi_sig_high; @@ -1743,7 +1808,7 @@ void helper_fpatan(CPUX86State *env) void helper_fxtract(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); CPU_LDoubleU temp; temp.d = ST0; @@ -1754,7 +1819,7 @@ void helper_fxtract(CPUX86State *env) &env->fp_status); fpush(env); ST0 = temp.d; - } else if (floatx80_invalid_encoding(ST0)) { + } else if (floatx80_invalid_encoding(ST0, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_default_nan(&env->fp_status); fpush(env); @@ -1766,10 +1831,10 @@ void helper_fxtract(CPUX86State *env) } fpush(env); ST0 = ST1; - } else if (floatx80_is_infinity(ST0)) { + } else if (floatx80_is_infinity(ST0, &env->fp_status)) { fpush(env); ST0 = ST1; - ST1 = floatx80_infinity; + ST1 = floatx80_default_inf(0, &env->fp_status); } else { int expdif; @@ -1777,7 +1842,7 @@ void helper_fxtract(CPUX86State *env) int shift = clz64(temp.l.lower); temp.l.lower <<= shift; expdif = 1 - EXPBIAS - shift; - float_raise(float_flag_input_denormal, &env->fp_status); + float_raise(float_flag_input_denormal_flushed, &env->fp_status); } else { expdif = EXPD(temp) - EXPBIAS; } @@ -1792,7 +1857,7 @@ void helper_fxtract(CPUX86State *env) static void helper_fprem_common(CPUX86State *env, bool mod) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); uint64_t quotient; CPU_LDoubleU temp0, temp1; int exp0, exp1, expdiff; @@ -1805,7 +1870,8 @@ static void helper_fprem_common(CPUX86State *env, bool mod) env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ if (floatx80_is_zero(ST0) || floatx80_is_zero(ST1) || exp0 == 0x7fff || exp1 == 0x7fff || - floatx80_invalid_encoding(ST0) || floatx80_invalid_encoding(ST1)) { + floatx80_invalid_encoding(ST0, &env->fp_status) || + floatx80_invalid_encoding(ST1, &env->fp_status)) { ST0 = floatx80_modrem(ST0, ST1, mod, "ient, &env->fp_status); } else { if (exp0 == 0) { @@ -1987,7 +2053,7 @@ static void helper_fyl2x_common(CPUX86State *env, floatx80 arg, int32_t *exp, void helper_fyl2xp1(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); uint64_t arg0_sig = extractFloatx80Frac(ST0); int32_t arg0_exp = extractFloatx80Exp(ST0); bool arg0_sign = extractFloatx80Sign(ST0); @@ -2001,8 +2067,8 @@ void helper_fyl2xp1(CPUX86State *env) } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_silence_nan(ST1, &env->fp_status); - } else if (floatx80_invalid_encoding(ST0) || - floatx80_invalid_encoding(ST1)) { + } else if (floatx80_invalid_encoding(ST0, &env->fp_status) || + floatx80_invalid_encoding(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_default_nan(&env->fp_status); } else if (floatx80_is_any_nan(ST0)) { @@ -2085,7 +2151,7 @@ void helper_fyl2xp1(CPUX86State *env) void helper_fyl2x(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); uint64_t arg0_sig = extractFloatx80Frac(ST0); int32_t arg0_exp = extractFloatx80Exp(ST0); bool arg0_sign = extractFloatx80Sign(ST0); @@ -2099,8 +2165,8 @@ void helper_fyl2x(CPUX86State *env) } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_silence_nan(ST1, &env->fp_status); - } else if (floatx80_invalid_encoding(ST0) || - floatx80_invalid_encoding(ST1)) { + } else if (floatx80_invalid_encoding(ST0, &env->fp_status) || + floatx80_invalid_encoding(ST1, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_default_nan(&env->fp_status); } else if (floatx80_is_any_nan(ST0)) { @@ -2110,7 +2176,7 @@ void helper_fyl2x(CPUX86State *env) } else if (arg0_sign && !floatx80_is_zero(ST0)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_default_nan(&env->fp_status); - } else if (floatx80_is_infinity(ST1)) { + } else if (floatx80_is_infinity(ST1, &env->fp_status)) { FloatRelation cmp = floatx80_compare(ST0, floatx80_one, &env->fp_status); switch (cmp) { @@ -2125,7 +2191,7 @@ void helper_fyl2x(CPUX86State *env) ST1 = floatx80_default_nan(&env->fp_status); break; } - } else if (floatx80_is_infinity(ST0)) { + } else if (floatx80_is_infinity(ST0, &env->fp_status)) { if (floatx80_is_zero(ST1)) { float_raise(float_flag_invalid, &env->fp_status); ST1 = floatx80_default_nan(&env->fp_status); @@ -2232,7 +2298,7 @@ void helper_fyl2x(CPUX86State *env) void helper_fsqrt(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); if (floatx80_is_neg(ST0)) { env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ env->fpus |= 0x400; @@ -2258,15 +2324,16 @@ void helper_fsincos(CPUX86State *env) void helper_frndint(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); + int old_flags = save_exception_flags(env); ST0 = floatx80_round_to_int(ST0, &env->fp_status); merge_exception_flags(env, old_flags); } void helper_fscale(CPUX86State *env) { - uint8_t old_flags = save_exception_flags(env); - if (floatx80_invalid_encoding(ST1) || floatx80_invalid_encoding(ST0)) { + int old_flags = save_exception_flags(env); + if (floatx80_invalid_encoding(ST1, &env->fp_status) || + floatx80_invalid_encoding(ST0, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_default_nan(&env->fp_status); } else if (floatx80_is_any_nan(ST1)) { @@ -2278,11 +2345,11 @@ void helper_fscale(CPUX86State *env) float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_silence_nan(ST0, &env->fp_status); } - } else if (floatx80_is_infinity(ST1) && - !floatx80_invalid_encoding(ST0) && + } else if (floatx80_is_infinity(ST1, &env->fp_status) && + !floatx80_invalid_encoding(ST0, &env->fp_status) && !floatx80_is_any_nan(ST0)) { if (floatx80_is_neg(ST1)) { - if (floatx80_is_infinity(ST0)) { + if (floatx80_is_infinity(ST0, &env->fp_status)) { float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_default_nan(&env->fp_status); } else { @@ -2295,15 +2362,14 @@ void helper_fscale(CPUX86State *env) float_raise(float_flag_invalid, &env->fp_status); ST0 = floatx80_default_nan(&env->fp_status); } else { - ST0 = (floatx80_is_neg(ST0) ? - floatx80_chs(floatx80_infinity) : - floatx80_infinity); + ST0 = floatx80_default_inf(floatx80_is_neg(ST0), + &env->fp_status); } } } else { int n; FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision; - uint8_t save_flags = get_float_exception_flags(&env->fp_status); + int save_flags = get_float_exception_flags(&env->fp_status); set_float_exception_flags(0, &env->fp_status); n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); set_float_exception_flags(save_flags, &env->fp_status); @@ -3188,6 +3254,7 @@ void update_mxcsr_status(CPUX86State *env) /* Set exception flags. */ set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) | + (mxcsr & FPUS_DE ? float_flag_input_denormal_used : 0) | (mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) | (mxcsr & FPUS_OE ? float_flag_overflow : 0) | (mxcsr & FPUS_UE ? float_flag_underflow : 0) | @@ -3203,20 +3270,14 @@ void update_mxcsr_status(CPUX86State *env) void update_mxcsr_from_sse_status(CPUX86State *env) { - uint8_t flags = get_float_exception_flags(&env->sse_status); - /* - * The MXCSR denormal flag has opposite semantics to - * float_flag_input_denormal (the softfloat code sets that flag - * only when flushing input denormals to zero, but SSE sets it - * only when not flushing them to zero), so is not converted - * here. - */ + int flags = get_float_exception_flags(&env->sse_status); env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) | + (flags & float_flag_input_denormal_used ? FPUS_DE : 0) | (flags & float_flag_divbyzero ? FPUS_ZE : 0) | (flags & float_flag_overflow ? FPUS_OE : 0) | (flags & float_flag_underflow ? FPUS_UE : 0) | (flags & float_flag_inexact ? FPUS_PE : 0) | - (flags & float_flag_output_denormal ? FPUS_UE | FPUS_PE : + (flags & float_flag_output_denormal_flushed ? FPUS_UE | FPUS_PE : 0)); } diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index 15d6c6f8b4f..be011b06b7c 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -20,7 +20,7 @@ #ifndef I386_HELPER_TCG_H #define I386_HELPER_TCG_H -#include "exec/exec-all.h" +#include "qemu/host-utils.h" /* Maximum instruction code size */ #define TARGET_MAX_INSN_SIZE 16 @@ -58,6 +58,8 @@ static inline target_long lshift(target_long x, int n) /* translate.c */ void tcg_x86_init(void); +void x86_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); /* excp_helper.c */ G_NORETURN void raise_exception(CPUX86State *env, int exception_index); @@ -87,12 +89,15 @@ G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, #endif /* cc_helper.c */ -extern const uint8_t parity_table[256]; +static inline unsigned int compute_pf(uint8_t x) +{ + return !parity8(x) * CC_P; +} /* misc_helper.c */ void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask); -/* sysemu/svm_helper.c */ +/* system/svm_helper.c */ #ifndef CONFIG_USER_ONLY G_NORETURN void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1, uintptr_t retaddr); @@ -110,7 +115,7 @@ int exception_has_error_code(int intno); /* smm_helper.c */ void do_smm_enter(X86CPU *cpu); -/* sysemu/bpt_helper.c */ +/* system/bpt_helper.c */ bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update); /* diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c index e1f92405282..46741d9f68a 100644 --- a/target/i386/tcg/int_helper.c +++ b/target/i386/tcg/int_helper.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "qapi/error.h" @@ -237,7 +236,7 @@ void helper_daa(CPUX86State *env) env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; /* well, speed is not an issue here, so we compute the flags by hand */ eflags |= (al == 0) << 6; /* zf */ - eflags |= parity_table[al]; /* pf */ + eflags |= compute_pf(al); eflags |= (al & 0x80); /* sf */ CC_SRC = eflags; CC_OP = CC_OP_EFLAGS; @@ -269,7 +268,7 @@ void helper_das(CPUX86State *env) env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; /* well, speed is not an issue here, so we compute the flags by hand */ eflags |= (al == 0) << 6; /* zf */ - eflags |= parity_table[al]; /* pf */ + eflags |= compute_pf(al); eflags |= (al & 0x80); /* sf */ CC_SRC = eflags; CC_OP = CC_OP_EFLAGS; diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c index 3ef84e90d94..9e7c2d80293 100644 --- a/target/i386/tcg/mem_helper.c +++ b/target/i386/tcg/mem_helper.c @@ -20,8 +20,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" #include "tcg/tcg.h" diff --git a/target/i386/tcg/meson.build b/target/i386/tcg/meson.build index 1105b35d921..c57e661752e 100644 --- a/target/i386/tcg/meson.build +++ b/target/i386/tcg/meson.build @@ -12,5 +12,5 @@ i386_ss.add(when: 'CONFIG_TCG', if_true: files( 'tcg-cpu.c', 'translate.c'), if_false: files('tcg-stub.c')) -subdir('sysemu') +subdir('system') subdir('user') diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index ed4cda8001e..2b5f092a23f 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -21,7 +21,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "helper-tcg.h" /* diff --git a/target/i386/tcg/mpx_helper.c b/target/i386/tcg/mpx_helper.c index 22423eedcd0..fa8abcc4820 100644 --- a/target/i386/tcg/mpx_helper.c +++ b/target/i386/tcg/mpx_helper.c @@ -20,8 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" +#include "exec/target_page.h" #include "helper-tcg.h" diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index bab552cd535..071f3fbd83d 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -22,12 +22,13 @@ #include "cpu.h" #include "qemu/log.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/log.h" #include "helper-tcg.h" #include "seg_helper.h" #include "access.h" +#include "tcg-cpu.h" #ifdef TARGET_X86_64 #define SET_ESP(val, sp_mask) \ @@ -94,7 +95,7 @@ static uint32_t popl(StackAccess *sa) int get_pg_mode(CPUX86State *env) { - int pg_mode = 0; + int pg_mode = PG_MODE_PG; if (!(env->cr[0] & CR0_PG_MASK)) { return 0; } @@ -128,6 +129,22 @@ int get_pg_mode(CPUX86State *env) return pg_mode; } +static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) +{ + int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; + int mmu_index_base = + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : + (pl < 3 && (env->eflags & AC_MASK) + ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); + + return mmu_index_base + mmu_index_32; +} + +int cpu_mmu_index_kernel(CPUX86State *env) +{ + return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); +} + /* return non zero if error */ static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr, uint32_t *e2_ptr, int selector, @@ -309,10 +326,10 @@ static void tss_set_busy(CPUX86State *env, int tss_selector, bool value, #define SWITCH_TSS_IRET 1 #define SWITCH_TSS_CALL 2 -/* return 0 if switching to a 16-bit selector */ -static int switch_tss_ra(CPUX86State *env, int tss_selector, - uint32_t e1, uint32_t e2, int source, - uint32_t next_eip, uintptr_t retaddr) +static void switch_tss_ra(CPUX86State *env, int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip, bool has_error_code, + uint32_t error_code, uintptr_t retaddr) { int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i; target_ulong tss_base; @@ -378,7 +395,7 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, /* X86Access avoids memory exceptions during the task switch */ mmu_index = cpu_mmu_index_kernel(env); - access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max, + access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1, MMU_DATA_STORE, mmu_index, retaddr); if (source == SWITCH_TSS_CALL) { @@ -386,7 +403,8 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, probe_access(env, tss_base, 2, MMU_DATA_STORE, mmu_index, retaddr); } - access_prepare_mmu(&new, env, tss_base, tss_limit, + /* While true tss_limit may be larger, we don't access the iopb here. */ + access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1, MMU_DATA_LOAD, mmu_index, retaddr); /* save the current state in the old TSS */ @@ -455,10 +473,6 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, new_segs[R_GS] = 0; new_trap = 0; } - /* XXX: avoid a compiler warning, see - http://support.amd.com/us/Processor_TechDocs/24593.pdf - chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ - (void)new_trap; /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { @@ -581,14 +595,43 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector, cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK); } #endif - return type >> 3; + + if (has_error_code) { + int cpl = env->hflags & HF_CPL_MASK; + StackAccess sa; + + /* push the error code */ + sa.env = env; + sa.ra = retaddr; + sa.mmu_index = x86_mmu_index_pl(env, cpl); + sa.sp = env->regs[R_ESP]; + if (env->segs[R_SS].flags & DESC_B_MASK) { + sa.sp_mask = 0xffffffff; + } else { + sa.sp_mask = 0xffff; + } + sa.ss_base = env->segs[R_SS].base; + if (type & 8) { + pushl(&sa, error_code); + } else { + pushw(&sa, error_code); + } + SET_ESP(sa.sp, sa.sp_mask); + } + + if (new_trap) { + env->dr[6] |= DR6_BT; + raise_exception_ra(env, EXCP01_DB, retaddr); + } } -static int switch_tss(CPUX86State *env, int tss_selector, - uint32_t e1, uint32_t e2, int source, - uint32_t next_eip) +static void switch_tss(CPUX86State *env, int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip, bool has_error_code, + int error_code) { - return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0); + switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, + has_error_code, error_code, 0); } static inline unsigned int get_sp_mask(unsigned int e2) @@ -694,7 +737,6 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, sa.env = env; sa.ra = 0; - sa.mmu_index = cpu_mmu_index_kernel(env); if (type == 5) { /* task gate */ @@ -702,23 +744,8 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); } - shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); - if (has_error_code) { - /* push the error code */ - if (env->segs[R_SS].flags & DESC_B_MASK) { - sa.sp_mask = 0xffffffff; - } else { - sa.sp_mask = 0xffff; - } - sa.sp = env->regs[R_ESP]; - sa.ss_base = env->segs[R_SS].base; - if (shift) { - pushl(&sa, error_code); - } else { - pushw(&sa, error_code); - } - SET_ESP(sa.sp, sa.sp_mask); - } + switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip, + has_error_code, error_code); return; } @@ -749,6 +776,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, if (e2 & DESC_C_MASK) { dpl = cpl; } + sa.mmu_index = x86_mmu_index_pl(env, dpl); if (dpl < cpl) { /* to inner privilege */ uint32_t esp; @@ -1000,7 +1028,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int, sa.env = env; sa.ra = 0; - sa.mmu_index = cpu_mmu_index_kernel(env); + sa.mmu_index = x86_mmu_index_pl(env, dpl); sa.sp_mask = -1; sa.ss_base = 0; if (dpl < cpl || ist != 0) { @@ -1134,7 +1162,7 @@ static void do_interrupt_real(CPUX86State *env, int intno, int is_int, sa.sp = env->regs[R_ESP]; sa.sp_mask = 0xffff; sa.ss_base = env->segs[R_SS].base; - sa.mmu_index = cpu_mmu_index_kernel(env); + sa.mmu_index = x86_mmu_index_pl(env, 0); if (is_int) { old_eip = next_eip; @@ -1513,7 +1541,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } - switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC()); + switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, + false, 0, GETPC()); break; case 4: /* 286 call gate */ case 12: /* 386 call gate */ @@ -1598,7 +1627,7 @@ void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip, sa.sp = env->regs[R_ESP]; sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); sa.ss_base = env->segs[R_SS].base; - sa.mmu_index = cpu_mmu_index_kernel(env); + sa.mmu_index = x86_mmu_index_pl(env, 0); if (shift) { pushl(&sa, env->segs[R_CS].selector); @@ -1638,9 +1667,9 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, sa.env = env; sa.ra = GETPC(); - sa.mmu_index = cpu_mmu_index_kernel(env); if (e2 & DESC_S_MASK) { + /* "normal" far call, no stack switch possible */ if (!(e2 & DESC_CS_MASK)) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } @@ -1664,6 +1693,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC()); } + sa.mmu_index = x86_mmu_index_pl(env, cpl); #ifdef TARGET_X86_64 /* XXX: check 16/32 bit cases in long mode */ if (shift == 2) { @@ -1724,7 +1754,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, if (dpl < cpl || dpl < rpl) { raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC()); } - switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC()); + switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, + false, 0, GETPC()); return; case 4: /* 286 call gate */ case 12: /* 386 call gate */ @@ -1791,6 +1822,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner privilege */ + sa.mmu_index = x86_mmu_index_pl(env, dpl); #ifdef TARGET_X86_64 if (shift == 2) { ss = dpl; /* SS = NULL selector with RPL = new CPL */ @@ -1869,6 +1901,7 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, new_stack = 1; } else { /* to same privilege */ + sa.mmu_index = x86_mmu_index_pl(env, cpl); sa.sp = env->regs[R_ESP]; sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); sa.ss_base = env->segs[R_SS].base; @@ -2233,7 +2266,8 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip) if (type != 3) { raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC()); } - switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC()); + switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, + false, 0, GETPC()); } else { helper_ret_protected(env, shift, 1, 0, GETPC()); } diff --git a/target/i386/tcg/seg_helper.h b/target/i386/tcg/seg_helper.h index ebf10352778..ea98e1a98ed 100644 --- a/target/i386/tcg/seg_helper.h +++ b/target/i386/tcg/seg_helper.h @@ -20,6 +20,8 @@ #ifndef SEG_HELPER_H #define SEG_HELPER_H +#include "cpu.h" + //#define DEBUG_PCALL #ifdef DEBUG_PCALL @@ -31,12 +33,12 @@ # define LOG_PCALL_STATE(cpu) do { } while (0) #endif +int cpu_mmu_index_kernel(CPUX86State *env); + /* * TODO: Convert callers to compute cpu_mmu_index_kernel once * and use *_mmuidx_ra directly. */ -#define cpu_ldub_kernel_ra(e, p, r) \ - cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_lduw_kernel_ra(e, p, r) \ cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) #define cpu_ldl_kernel_ra(e, p, r) \ @@ -44,8 +46,6 @@ #define cpu_ldq_kernel_ra(e, p, r) \ cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) -#define cpu_stb_kernel_ra(e, p, v, r) \ - cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_stw_kernel_ra(e, p, v, r) \ cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) #define cpu_stl_kernel_ra(e, p, v, r) \ @@ -53,12 +53,10 @@ #define cpu_stq_kernel_ra(e, p, v, r) \ cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r) -#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0) #define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0) #define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0) #define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0) -#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0) #define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0) #define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0) #define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0) diff --git a/target/i386/tcg/sysemu/bpt_helper.c b/target/i386/tcg/system/bpt_helper.c similarity index 99% rename from target/i386/tcg/sysemu/bpt_helper.c rename to target/i386/tcg/system/bpt_helper.c index b29acf41c38..aebb5caac37 100644 --- a/target/i386/tcg/sysemu/bpt_helper.c +++ b/target/i386/tcg/system/bpt_helper.c @@ -1,5 +1,5 @@ /* - * i386 breakpoint helpers - sysemu code + * i386 breakpoint helpers - system code * * Copyright (c) 2003 Fabrice Bellard * @@ -19,8 +19,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" +#include "exec/watchpoint.h" #include "tcg/helper-tcg.h" diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/system/excp_helper.c similarity index 91% rename from target/i386/tcg/sysemu/excp_helper.c rename to target/i386/tcg/system/excp_helper.c index 8fb05b1f531..50040f6fcaf 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/system/excp_helper.c @@ -1,5 +1,5 @@ /* - * x86 exception helpers - sysemu code + * x86 exception helpers - system code * * Copyright (c) 2003 Fabrice Bellard * @@ -19,9 +19,12 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" #include "tcg/helper-tcg.h" typedef struct TranslateParams { @@ -60,14 +63,13 @@ typedef struct PTETranslate { hwaddr gaddr; } PTETranslate; -static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra) +static bool ptw_translate(PTETranslate *inout, hwaddr addr) { - CPUTLBEntryFull *full; int flags; inout->gaddr = addr; - flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE, - inout->ptw_idx, true, &inout->haddr, &full, ra); + flags = probe_access_full_mmu(inout->env, addr, 0, MMU_DATA_STORE, + inout->ptw_idx, &inout->haddr, NULL); if (unlikely(flags & TLB_INVALID_MASK)) { TranslateFault *err = inout->err; @@ -108,6 +110,10 @@ static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) { uint32_t cmp; + CPUState *cpu = env_cpu(in->env); + /* We are in cpu_exec, and start_exclusive can't be called directly.*/ + g_assert(cpu->running); + cpu_exec_end(cpu); /* Does x86 really perform a rmw cycle on mmio for ptw? */ start_exclusive(); cmp = cpu_ldl_mmuidx_ra(in->env, in->gaddr, in->ptw_idx, 0); @@ -115,6 +121,7 @@ static bool ptw_setl_slow(const PTETranslate *in, uint32_t old, uint32_t new) cpu_stl_mmuidx_ra(in->env, in->gaddr, new, in->ptw_idx, 0); } end_exclusive(); + cpu_exec_start(cpu); return cmp == old; } @@ -150,6 +157,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, uint32_t pkr; int page_size; int error_code; + int prot; restart_all: rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits); @@ -166,7 +174,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 5 */ pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_5: @@ -190,7 +198,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 4 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_4: @@ -210,7 +218,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_3_lma: @@ -237,7 +245,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 3 */ pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } rsvd_mask |= PG_HI_USER_MASK; @@ -259,7 +267,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 2 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_2_pae: @@ -285,7 +293,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } pte = ptw_ldq(&pte_trans, ra); @@ -298,12 +306,12 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; page_size = 4096; - } else { + } else if (pg_mode & PG_MODE_PG) { /* * Page table level 2 */ pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } restart_2_nopae: @@ -332,7 +340,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, * Page table level 1 */ pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc); - if (!ptw_translate(&pte_trans, pte_addr, ra)) { + if (!ptw_translate(&pte_trans, pte_addr)) { return false; } pte = ptw_ldl(&pte_trans, ra); @@ -343,6 +351,15 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, ptep &= pte | PG_NX_MASK; page_size = 4096; rsvd_mask = 0; + } else { + /* + * No paging (real mode), let's tentatively resolve the address as 1:1 + * here, but conditionally still perform an NPT walk on it later. + */ + page_size = 0x40000000; + paddr = in->addr; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + goto stage2; } do_check_protect: @@ -358,7 +375,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, goto do_fault_protect; } - int prot = 0; + prot = 0; if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) { prot |= PAGE_READ; if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) { @@ -420,6 +437,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, /* merge offset within page */ paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1)); + stage2: /* * Note that NPT is walked (for both paging structures and final guest @@ -429,9 +447,8 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in, CPUTLBEntryFull *full; int flags, nested_page_size; - flags = probe_access_full(env, paddr, 0, access_type, - MMU_NESTED_IDX, true, - &pte_trans.haddr, &full, 0); + flags = probe_access_full_mmu(env, paddr, 0, access_type, + MMU_NESTED_IDX, &pte_trans.haddr, &full); if (unlikely(flags & TLB_INVALID_MASK)) { *err = (TranslateFault){ .error_code = env->error_code, @@ -562,7 +579,7 @@ static bool get_physical_address(CPUX86State *env, vaddr addr, addr = (uint32_t)addr; } - if (likely(env->cr[0] & CR0_PG_MASK)) { + if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) { in.cr3 = env->cr[3]; in.mmu_idx = mmu_idx; in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX; diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/system/fpu_helper.c similarity index 99% rename from target/i386/tcg/sysemu/fpu_helper.c rename to target/i386/tcg/system/fpu_helper.c index e0305ba2345..0b4fa187dfb 100644 --- a/target/i386/tcg/sysemu/fpu_helper.c +++ b/target/i386/tcg/system/fpu_helper.c @@ -1,5 +1,5 @@ /* - * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (sysemu code) + * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (system code) * * Copyright (c) 2003 Fabrice Bellard * diff --git a/target/i386/tcg/sysemu/meson.build b/target/i386/tcg/system/meson.build similarity index 100% rename from target/i386/tcg/sysemu/meson.build rename to target/i386/tcg/system/meson.build diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/system/misc_helper.c similarity index 98% rename from target/i386/tcg/sysemu/misc_helper.c rename to target/i386/tcg/system/misc_helper.c index 094aa56a20d..9c3f5cc99b3 100644 --- a/target/i386/tcg/sysemu/misc_helper.c +++ b/target/i386/tcg/system/misc_helper.c @@ -1,5 +1,5 @@ /* - * x86 misc helpers - sysemu code + * x86 misc helpers - system code * * Copyright (c) 2003 Fabrice Bellard * @@ -21,9 +21,10 @@ #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/address-spaces.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "exec/cputlb.h" #include "tcg/helper-tcg.h" #include "hw/i386/apic.h" @@ -468,8 +469,7 @@ void helper_rdmsr(CPUX86State *env) val = x86_cpu->ucode_rev; break; case MSR_CORE_THREAD_COUNT: { - CPUState *cs = CPU(x86_cpu); - val = (cs->nr_threads * cs->nr_cores) | (cs->nr_cores << 16); + val = cpu_x86_get_msr_core_thread_count(x86_cpu); break; } case MSR_APIC_START ... MSR_APIC_END: { diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/system/seg_helper.c similarity index 98% rename from target/i386/tcg/sysemu/seg_helper.c rename to target/i386/tcg/system/seg_helper.c index 05174a79e73..d4ea890c124 100644 --- a/target/i386/tcg/sysemu/seg_helper.c +++ b/target/i386/tcg/system/seg_helper.c @@ -1,5 +1,5 @@ /* - * x86 segmentation related helpers: (sysemu-only code) + * x86 segmentation related helpers: (system-only code) * TSS, interrupts, system calls, jumps and call/task gates, descriptors * * Copyright (c) 2003 Fabrice Bellard @@ -23,7 +23,7 @@ #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "tcg/helper-tcg.h" #include "../seg_helper.h" diff --git a/target/i386/tcg/sysemu/smm_helper.c b/target/i386/tcg/system/smm_helper.c similarity index 99% rename from target/i386/tcg/sysemu/smm_helper.c rename to target/i386/tcg/system/smm_helper.c index a45b5651c36..251eb7856ce 100644 --- a/target/i386/tcg/sysemu/smm_helper.c +++ b/target/i386/tcg/system/smm_helper.c @@ -1,5 +1,5 @@ /* - * x86 SMM helpers (sysemu-only) + * x86 SMM helpers (system-only) * * Copyright (c) 2003 Fabrice Bellard * diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/system/svm_helper.c similarity index 99% rename from target/i386/tcg/sysemu/svm_helper.c rename to target/i386/tcg/system/svm_helper.c index 9db8ad62a01..b27049b9ed1 100644 --- a/target/i386/tcg/sysemu/svm_helper.c +++ b/target/i386/tcg/system/svm_helper.c @@ -1,5 +1,5 @@ /* - * x86 SVM helpers (sysemu only) + * x86 SVM helpers (system only) * * Copyright (c) 2003 Fabrice Bellard * @@ -21,8 +21,8 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-ldst.h" #include "tcg/helper-tcg.h" /* Secure Virtual Machine helpers */ diff --git a/target/i386/tcg/sysemu/tcg-cpu.c b/target/i386/tcg/system/tcg-cpu.c similarity index 94% rename from target/i386/tcg/sysemu/tcg-cpu.c rename to target/i386/tcg/system/tcg-cpu.c index c223c0fe9bc..0538a4fd51a 100644 --- a/target/i386/tcg/sysemu/tcg-cpu.c +++ b/target/i386/tcg/system/tcg-cpu.c @@ -1,5 +1,5 @@ /* - * i386 TCG cpu class initialization functions specific to sysemu + * i386 TCG cpu class initialization functions specific to system emulation * * Copyright (c) 2003 Fabrice Bellard * @@ -21,9 +21,10 @@ #include "cpu.h" #include "tcg/helper-tcg.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qemu/units.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "tcg/tcg-cpu.h" diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index cca19cd40e8..6f5dc06b3b9 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -21,8 +21,10 @@ #include "cpu.h" #include "helper-tcg.h" #include "qemu/accel.h" -#include "hw/core/accel-cpu.h" - +#include "accel/accel-cpu-target.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ops.h" #include "tcg-cpu.h" /* Frob eflags into and out of the CPU temporary format. */ @@ -46,6 +48,25 @@ static void x86_cpu_exec_exit(CPUState *cs) env->eflags = cpu_compute_eflags(env); } +static TCGTBCPUState x86_get_tb_cpu_state(CPUState *cs) +{ + CPUX86State *env = cpu_env(cs); + uint32_t flags, cs_base; + vaddr pc; + + flags = env->hflags | + (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); + if (env->hflags & HF_CS64_MASK) { + cs_base = 0; + pc = env->eip; + } else { + cs_base = env->segs[R_CS].base; + pc = (uint32_t)(cs_base + env->eip); + } + + return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base }; +} + static void x86_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -93,6 +114,23 @@ static void x86_restore_state_to_opc(CPUState *cs, } } +int x86_mmu_index_pl(CPUX86State *env, unsigned pl) +{ + int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1; + int mmu_index_base = + pl == 3 ? MMU_USER64_IDX : + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : + (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; + + return mmu_index_base + mmu_index_32; +} + +static int x86_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPUX86State *env = cpu_env(cs); + return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK); +} + #ifndef CONFIG_USER_ONLY static bool x86_debug_check_breakpoint(CPUState *cs) { @@ -102,14 +140,36 @@ static bool x86_debug_check_breakpoint(CPUState *cs) /* RF disables all architectural breakpoints. */ return !(env->eflags & RF_MASK); } -#endif -#include "hw/core/tcg-cpu-ops.h" +static void x86_cpu_exec_reset(CPUState *cs) +{ + CPUArchState *env = cpu_env(cs); + + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); + do_cpu_init(env_archcpu(env)); + cs->exception_index = EXCP_HALTED; +} + +static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result; +} +#endif -static const TCGCPUOps x86_tcg_ops = { +const TCGCPUOps x86_tcg_ops = { + .mttcg_supported = true, + .precise_smc = true, + /* + * The x86 has a strong memory model with some store-after-load re-ordering + */ + .guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD, .initialize = tcg_x86_init, + .translate_code = x86_translate_code, + .get_tb_cpu_state = x86_get_tb_cpu_state, .synchronize_from_tb = x86_cpu_synchronize_from_tb, .restore_state_to_opc = x86_restore_state_to_opc, + .mmu_index = x86_cpu_mmu_index, .cpu_exec_enter = x86_cpu_exec_enter, .cpu_exec_exit = x86_cpu_exec_exit, #ifdef CONFIG_USER_ONLY @@ -118,9 +178,11 @@ static const TCGCPUOps x86_tcg_ops = { .record_sigbus = x86_cpu_record_sigbus, #else .tlb_fill = x86_cpu_tlb_fill, + .pointer_wrap = x86_pointer_wrap, .do_interrupt = x86_cpu_do_interrupt, .cpu_exec_halt = x86_cpu_exec_halt, .cpu_exec_interrupt = x86_cpu_exec_interrupt, + .cpu_exec_reset = x86_cpu_exec_reset, .do_unaligned_access = x86_cpu_do_unaligned_access, .debug_excp_handler = breakpoint_handler, .debug_check_breakpoint = x86_debug_check_breakpoint, @@ -128,17 +190,6 @@ static const TCGCPUOps x86_tcg_ops = { #endif /* !CONFIG_USER_ONLY */ }; -static void x86_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) -{ - /* for x86, all cpus use the same set of operations */ - cc->tcg_ops = &x86_tcg_ops; -} - -static void x86_tcg_cpu_class_init(CPUClass *cc) -{ - cc->init_accel_cpu = x86_tcg_cpu_init_ops; -} - static void x86_tcg_cpu_xsave_init(void) { #define XO(bit, field) \ @@ -179,7 +230,7 @@ static void x86_tcg_cpu_instance_init(CPUState *cs) x86_tcg_cpu_xsave_init(); } -static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); @@ -187,7 +238,6 @@ static void x86_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) acc->cpu_target_realize = tcg_cpu_realizefn; #endif /* CONFIG_USER_ONLY */ - acc->cpu_class_init = x86_tcg_cpu_class_init; acc->cpu_instance_init = x86_tcg_cpu_instance_init; } static const TypeInfo x86_tcg_cpu_accel_type_info = { diff --git a/target/i386/tcg/tcg-cpu.h b/target/i386/tcg/tcg-cpu.h index 53a84944551..85bcd61678f 100644 --- a/target/i386/tcg/tcg-cpu.h +++ b/target/i386/tcg/tcg-cpu.h @@ -19,6 +19,8 @@ #ifndef TCG_CPU_H #define TCG_CPU_H +#include "cpu.h" + #define XSAVE_FCW_FSW_OFFSET 0x000 #define XSAVE_FTW_FOP_OFFSET 0x004 #define XSAVE_CWD_RIP_OFFSET 0x008 @@ -76,6 +78,10 @@ QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFF QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET); QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET); +extern const TCGCPUOps x86_tcg_ops; + bool tcg_cpu_realizefn(CPUState *cs, Error **errp); +int x86_mmu_index_pl(CPUX86State *env, unsigned pl); + #endif /* TCG_CPU_H */ diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 95bad55bf46..0cb87d02012 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -20,15 +20,18 @@ #include "qemu/host-utils.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/translation-block.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "exec/translator.h" +#include "exec/target_page.h" #include "fpu/softfloat.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "helper-tcg.h" +#include "decode-new.h" #include "exec/log.h" @@ -111,7 +114,6 @@ typedef struct DisasContext { #endif bool vex_w; /* used by AVX even on 32-bit processors */ bool jmp_opt; /* use direct block chaining for direct jumps */ - bool repz_opt; /* optimize jumps within repz instructions */ bool cc_op_dirty; CCOp cc_op; /* current CC operation */ @@ -133,10 +135,7 @@ typedef struct DisasContext { TCGv T1; /* TCG local register indexes (only used inside old micro ops) */ - TCGv tmp0; - TCGv tmp4; TCGv_i32 tmp2_i32; - TCGv_i32 tmp3_i32; TCGv_i64 tmp1_i64; sigjmp_buf jmpbuf; @@ -227,7 +226,7 @@ typedef struct DisasContext { #endif /* - * Many sysemu-only helpers are not reachable for user-only. + * Many system-only helpers are not reachable for user-only. * Define stub generators here, so that we need not either sprinkle * ifdefs through the translator, nor provide the helper function. */ @@ -290,7 +289,7 @@ enum { }; /* Bit set if the global variable is live after setting CC_OP to X. */ -static const uint8_t cc_op_live[CC_OP_NB] = { +static const uint8_t cc_op_live_[] = { [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, [CC_OP_EFLAGS] = USES_CC_SRC, [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, @@ -304,13 +303,28 @@ static const uint8_t cc_op_live[CC_OP_NB] = { [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_BLSIB ... CC_OP_BLSIQ] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, - [CC_OP_CLR] = 0, [CC_OP_POPCNT] = USES_CC_DST, }; +static uint8_t cc_op_live(CCOp op) +{ + uint8_t result; + assert(op >= 0 && op < ARRAY_SIZE(cc_op_live_)); + + /* + * Check that the array is fully populated. A zero entry would correspond + * to a fixed value of EFLAGS, which can be obtained with CC_OP_EFLAGS + * as well. + */ + result = cc_op_live_[op]; + assert(result); + return result; +} + static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty) { int dead; @@ -320,7 +334,7 @@ static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty) } /* Discard CC computation that will no longer be used. */ - dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; + dead = cc_op_live(s->cc_op) & ~cc_op_live(op); if (dead & USES_CC_DST) { tcg_gen_discard_tl(cpu_cc_dst); } @@ -469,7 +483,7 @@ static inline void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg) { if (ot == MO_8 && byte_reg_is_xH(s, reg)) { - tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8); + tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8); } else { tcg_gen_mov_tl(t0, cpu_regs[reg]); } @@ -489,17 +503,24 @@ static inline void gen_op_jmp_v(DisasContext *s, TCGv dest) s->pc_save = -1; } -static inline -void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) +static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val) { - tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val); - gen_op_mov_reg_v(s, size, reg, s->tmp0); + /* Using cpu_regs[reg] does not work for xH registers. */ + assert(size >= MO_16); + if (size == MO_16) { + TCGv temp = tcg_temp_new(); + tcg_gen_add_tl(temp, cpu_regs[reg], val); + gen_op_mov_reg_v(s, size, reg, temp); + } else { + tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], val); + tcg_gen_ext_tl(cpu_regs[reg], cpu_regs[reg], size); + } } -static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val) +static inline +void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) { - tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val); - gen_op_mov_reg_v(s, size, reg, s->tmp0); + gen_op_add_reg(s, size, reg, tcg_constant_tl(val)); } static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) @@ -672,14 +693,6 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1); } -static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot) -{ - TCGv dshift = tcg_temp_new(); - tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df)); - tcg_gen_shli_tl(dshift, dshift, ot); - return dshift; -}; - static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) { if (size == MO_TL) { @@ -692,11 +705,6 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) return dst; } -static void gen_exts(MemOp ot, TCGv reg) -{ - gen_ext_tl(reg, reg, ot, true); -} - static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) { TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false); @@ -714,6 +722,46 @@ static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1) gen_op_j_ecx(s, TCG_COND_NE, label1); } +static void gen_set_hflag(DisasContext *s, uint32_t mask) +{ + if ((s->flags & mask) == 0) { + TCGv_i32 t = tcg_temp_new_i32(); + tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); + tcg_gen_ori_i32(t, t, mask); + tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); + s->flags |= mask; + } +} + +static void gen_reset_hflag(DisasContext *s, uint32_t mask) +{ + if (s->flags & mask) { + TCGv_i32 t = tcg_temp_new_i32(); + tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); + tcg_gen_andi_i32(t, t, ~mask); + tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); + s->flags &= ~mask; + } +} + +static void gen_set_eflags(DisasContext *s, target_ulong mask) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); + tcg_gen_ori_tl(t, t, mask); + tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); +} + +static void gen_reset_eflags(DisasContext *s, target_ulong mask) +{ + TCGv t = tcg_temp_new(); + + tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); + tcg_gen_andi_tl(t, t, ~mask); + tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); +} + static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n) { switch (ot) { @@ -781,16 +829,13 @@ static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, #endif } -static void gen_movs(DisasContext *s, MemOp ot) +static void gen_movs(DisasContext *s, MemOp ot, TCGv dshift) { - TCGv dshift; - gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - dshift = gen_compute_Dshift(s, ot); gen_op_add_reg(s, s->aflag, R_ESI, dshift); gen_op_add_reg(s, s->aflag, R_EDI, dshift); } @@ -806,17 +851,13 @@ static void gen_mov_eflags(DisasContext *s, TCGv reg) tcg_gen_mov_tl(reg, cpu_cc_src); return; } - if (s->cc_op == CC_OP_CLR) { - tcg_gen_movi_tl(reg, CC_Z | CC_P); - return; - } dst = cpu_cc_dst; src1 = cpu_cc_src; src2 = cpu_cc_src2; /* Take care to not read values that are not live. */ - live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; + live = cc_op_live(s->cc_op) & ~USES_CC_SRCT; dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); if (dead) { TCGv zero = tcg_constant_tl(0); @@ -865,6 +906,18 @@ static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size) } } +static CCPrepare gen_prepare_val_nz(TCGv src, MemOp size, bool eqz) +{ + if (size == MO_TL) { + return (CCPrepare) { .cond = eqz ? TCG_COND_EQ : TCG_COND_NE, + .reg = src }; + } else { + return (CCPrepare) { .cond = eqz ? TCG_COND_TSTEQ : TCG_COND_TSTNE, + .imm = MAKE_64BIT_MASK(0, 8 << size), + .reg = src }; + } +} + /* compute eflags.C, trying to store it in reg if not NULL */ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) { @@ -874,21 +927,20 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) case CC_OP_SUBB ... CC_OP_SUBQ: /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_SUBB; - gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false); - gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false); + tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size); + tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size); return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT, .reg2 = cpu_cc_src, .use_reg2 = true }; case CC_OP_ADDB ... CC_OP_ADDQ: /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ - size = s->cc_op - CC_OP_ADDB; - gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false); - gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false); + size = cc_op_size(s->cc_op); + tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size); + tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size); return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst, .reg2 = cpu_cc_src, .use_reg2 = true }; case CC_OP_LOGICB ... CC_OP_LOGICQ: - case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER }; @@ -899,7 +951,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) case CC_OP_SHLB ... CC_OP_SHLQ: /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ - size = s->cc_op - CC_OP_SHLB; + size = cc_op_size(s->cc_op); return gen_prepare_sign_nz(cpu_cc_src, size); case CC_OP_MULB ... CC_OP_MULQ: @@ -907,9 +959,12 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) .reg = cpu_cc_src }; case CC_OP_BMILGB ... CC_OP_BMILGQ: - size = s->cc_op - CC_OP_BMILGB; - gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false); - return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src }; + size = cc_op_size(s->cc_op); + return gen_prepare_val_nz(cpu_cc_src, size, true); + + case CC_OP_BLSIB ... CC_OP_BLSIQ: + size = cc_op_size(s->cc_op); + return gen_prepare_val_nz(cpu_cc_src, size, false); case CC_OP_ADCX: case CC_OP_ADCOX: @@ -957,14 +1012,10 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src, .imm = CC_S }; - case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER }; default: - { - MemOp size = (s->cc_op - CC_OP_ADDB) & 3; - return gen_prepare_sign_nz(cpu_cc_dst, size); - } + return gen_prepare_sign_nz(cpu_cc_dst, cc_op_size(s->cc_op)); } } @@ -976,7 +1027,7 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2, .no_setcond = true }; - case CC_OP_CLR: + case CC_OP_LOGICB ... CC_OP_LOGICQ: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER }; case CC_OP_MULB ... CC_OP_MULQ: @@ -992,26 +1043,25 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) { switch (s->cc_op) { - case CC_OP_DYNAMIC: - gen_compute_eflags(s); - /* FALLTHRU */ case CC_OP_EFLAGS: case CC_OP_ADCX: case CC_OP_ADOX: case CC_OP_ADCOX: return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src, .imm = CC_Z }; - case CC_OP_CLR: - return (CCPrepare) { .cond = TCG_COND_ALWAYS }; + case CC_OP_DYNAMIC: + gen_update_cc_op(s); + if (!reg) { + reg = tcg_temp_new(); + } + gen_helper_cc_compute_nz(reg, cpu_cc_dst, cpu_cc_src, cpu_cc_op); + return (CCPrepare) { .cond = TCG_COND_EQ, .reg = reg, .imm = 0 }; + case CC_OP_POPCNT: + return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst }; default: { - MemOp size = (s->cc_op - CC_OP_ADDB) & 3; - if (size == MO_TL) { - return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst }; - } else { - return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst, - .imm = (1ull << (8 << size)) - 1 }; - } + MemOp size = cc_op_size(s->cc_op); + return gen_prepare_val_nz(cpu_cc_dst, size, true); } } } @@ -1031,11 +1081,11 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) switch (s->cc_op) { case CC_OP_SUBB ... CC_OP_SUBQ: /* We optimize relational operators for the cmp/jcc case. */ - size = s->cc_op - CC_OP_SUBB; + size = cc_op_size(s->cc_op); switch (jcc_op) { case JCC_BE: - gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false); - gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false); + tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size); + tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size); cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT, .reg2 = cpu_cc_src, .use_reg2 = true }; break; @@ -1045,8 +1095,8 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) case JCC_LE: cond = TCG_COND_LE; fast_jcc_l: - gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true); - gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true); + tcg_gen_ext_tl(s->cc_srcT, s->cc_srcT, size | MO_SIGN); + tcg_gen_ext_tl(cpu_cc_src, cpu_cc_src, size | MO_SIGN); cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT, .reg2 = cpu_cc_src, .use_reg2 = true }; break; @@ -1056,6 +1106,28 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) } break; + case CC_OP_LOGICB ... CC_OP_LOGICQ: + /* Mostly used for test+jump */ + size = s->cc_op - CC_OP_LOGICB; + switch (jcc_op) { + case JCC_BE: + /* CF = 0, becomes jz/je */ + jcc_op = JCC_Z; + goto slow_jcc; + case JCC_L: + /* OF = 0, becomes js/jns */ + jcc_op = JCC_S; + goto slow_jcc; + case JCC_LE: + /* SF or ZF, becomes signed <= 0 */ + tcg_gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size | MO_SIGN); + cc = (CCPrepare) { .cond = TCG_COND_LE, .reg = cpu_cc_dst }; + break; + default: + goto slow_jcc; + } + break; + default: slow_jcc: /* This actually generates good code for JC, JZ and JS. */ @@ -1109,7 +1181,27 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) return cc; } -static void gen_setcc1(DisasContext *s, int b, TCGv reg) +static void gen_neg_setcc(DisasContext *s, int b, TCGv reg) +{ + CCPrepare cc = gen_prepare_cc(s, b, reg); + + if (cc.no_setcond) { + if (cc.cond == TCG_COND_EQ) { + tcg_gen_addi_tl(reg, cc.reg, -1); + } else { + tcg_gen_neg_tl(reg, cc.reg); + } + return; + } + + if (cc.use_reg2) { + tcg_gen_negsetcond_tl(cc.cond, reg, cc.reg, cc.reg2); + } else { + tcg_gen_negsetcondi_tl(cc.cond, reg, cc.reg, cc.imm); + } +} + +static void gen_setcc(DisasContext *s, int b, TCGv reg) { CCPrepare cc = gen_prepare_cc(s, b, reg); @@ -1131,12 +1223,12 @@ static void gen_setcc1(DisasContext *s, int b, TCGv reg) static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) { - gen_setcc1(s, JCC_B << 1, reg); + gen_setcc(s, JCC_B << 1, reg); } /* generate a conditional jump to label 'l1' according to jump opcode value 'b'. In the fast case, T0 is guaranteed not to be used. */ -static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) +static inline void gen_jcc_noeob(DisasContext *s, int b, TCGLabel *l1) { CCPrepare cc = gen_prepare_cc(s, b, NULL); @@ -1151,10 +1243,15 @@ static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1) value 'b'. In the fast case, T0 is guaranteed not to be used. One or both of the branches will call gen_jmp_rel, so ensure cc_op is clean. */ -static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) +static inline void gen_jcc(DisasContext *s, int b, TCGLabel *l1) { CCPrepare cc = gen_prepare_cc(s, b, NULL); + /* + * Note that this must be _after_ gen_prepare_cc, because it can change + * the cc_op to CC_OP_EFLAGS (because it's CC_OP_DYNAMIC or because + * it's cheaper to just compute the flags)! + */ gen_update_cc_op(s); if (cc.use_reg2) { tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); @@ -1163,39 +1260,22 @@ static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1) } } -/* XXX: does not work with gdbstub "ice" single step - not a - serious problem. The caller can jump to the returned label - to stop the REP but, if the flags have changed, it has to call - gen_update_cc_op before doing so. */ -static TCGLabel *gen_jz_ecx_string(DisasContext *s) -{ - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - - gen_update_cc_op(s); - gen_op_jnz_ecx(s, l1); - gen_set_label(l2); - gen_jmp_rel_csize(s, 0, 1); - gen_set_label(l1); - return l2; -} - -static void gen_stos(DisasContext *s, MemOp ot) +static void gen_stos(DisasContext *s, MemOp ot, TCGv dshift) { gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } -static void gen_lods(DisasContext *s, MemOp ot) +static void gen_lods(DisasContext *s, MemOp ot, TCGv dshift) { gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, s->T0); - gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); } -static void gen_scas(DisasContext *s, MemOp ot) +static void gen_scas(DisasContext *s, MemOp ot, TCGv dshift) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); @@ -1204,13 +1284,11 @@ static void gen_scas(DisasContext *s, MemOp ot) tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); set_cc_op(s, CC_OP_SUBB + ot); - gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } -static void gen_cmps(DisasContext *s, MemOp ot) +static void gen_cmps(DisasContext *s, MemOp ot, TCGv dshift) { - TCGv dshift; - gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); @@ -1220,7 +1298,6 @@ static void gen_cmps(DisasContext *s, MemOp ot) tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); set_cc_op(s, CC_OP_SUBB + ot); - dshift = gen_compute_Dshift(s, ot); gen_op_add_reg(s, s->aflag, R_ESI, dshift); gen_op_add_reg(s, s->aflag, R_EDI, dshift); } @@ -1239,71 +1316,183 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) } } -static void gen_ins(DisasContext *s, MemOp ot) +static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift) { + TCGv_i32 port = tcg_temp_new_i32(); + gen_string_movl_A0_EDI(s); /* Note: we must do this dummy write first to be restartable in case of page fault. */ tcg_gen_movi_tl(s->T0, 0); gen_op_st_v(s, ot, s->T0, s->A0); - tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); - tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); - gen_helper_in_func(ot, s->T0, s->tmp2_i32); + tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]); + tcg_gen_andi_i32(port, port, 0xffff); + gen_helper_in_func(ot, s->T0, port); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); - gen_bpt_io(s, s->tmp2_i32, ot); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); + gen_bpt_io(s, port, ot); } -static void gen_outs(DisasContext *s, MemOp ot) +static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift) { + TCGv_i32 port = tcg_temp_new_i32(); + TCGv_i32 value = tcg_temp_new_i32(); + gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); - tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]); - tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); - tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); - gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); - gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); - gen_bpt_io(s, s->tmp2_i32, ot); + tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]); + tcg_gen_andi_i32(port, port, 0xffff); + tcg_gen_trunc_tl_i32(value, s->T0); + gen_helper_out_func(ot, port, value); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); + gen_bpt_io(s, port, ot); } -/* Generate jumps to current or next instruction */ -static void gen_repz(DisasContext *s, MemOp ot, - void (*fn)(DisasContext *s, MemOp ot)) +#define REP_MAX 65535 + +static void do_gen_rep(DisasContext *s, MemOp ot, TCGv dshift, + void (*fn)(DisasContext *s, MemOp ot, TCGv dshift), + bool is_repz_nz) { - TCGLabel *l2; - l2 = gen_jz_ecx_string(s); - fn(s, ot); - gen_op_add_reg_im(s, s->aflag, R_ECX, -1); + TCGLabel *last = gen_new_label(); + TCGLabel *loop = gen_new_label(); + TCGLabel *done = gen_new_label(); + + target_ulong cx_mask = MAKE_64BIT_MASK(0, 8 << s->aflag); + TCGv cx_next = tcg_temp_new(); + + /* + * Check if we must translate a single iteration only. Normally, HF_RF_MASK + * would also limit translation blocks to one instruction, so that gen_eob + * can reset the flag; here however RF is set throughout the repetition, so + * we can plow through until CX/ECX/RCX is zero. + */ + bool can_loop = + (!(tb_cflags(s->base.tb) & (CF_USE_ICOUNT | CF_SINGLE_STEP)) + && !(s->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); + bool had_rf = s->flags & HF_RF_MASK; + /* - * A loop would cause two single step exceptions if ECX = 1 - * before rep string_insn + * Even if EFLAGS.RF was set on entry (such as if we're on the second or + * later iteration and an exception or interrupt happened), force gen_eob() + * not to clear the flag. We do that ourselves after the last iteration. */ - if (s->repz_opt) { - gen_op_jz_ecx(s, l2); + s->flags &= ~HF_RF_MASK; + + /* + * For CMPS/SCAS, the CC_OP after a memory fault could come from either + * the previous instruction or the string instruction; but because we + * arrange to keep CC_OP up to date all the time, just mark the whole + * insn as CC_OP_DYNAMIC. + * + * It's not a problem to do this even for instructions that do not + * modify the flags, so do it unconditionally. + */ + gen_update_cc_op(s); + tcg_set_insn_start_param(s->base.insn_start, 1, CC_OP_DYNAMIC); + + /* Any iteration at all? */ + tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cpu_regs[R_ECX], cx_mask, done); + + /* + * From now on we operate on the value of CX/ECX/RCX that will be written + * back, which is stored in cx_next. There can be no carry, so we can zero + * extend here if needed and not do any expensive deposit operations later. + */ + tcg_gen_subi_tl(cx_next, cpu_regs[R_ECX], 1); +#ifdef TARGET_X86_64 + if (s->aflag == MO_32) { + tcg_gen_ext32u_tl(cx_next, cx_next); + cx_mask = ~0; } - gen_jmp_rel_csize(s, -cur_insn_len(s), 0); -} +#endif -static void gen_repz_nz(DisasContext *s, MemOp ot, - void (*fn)(DisasContext *s, MemOp ot)) -{ - TCGLabel *l2; - int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0; + /* + * The last iteration is handled outside the loop, so that cx_next + * can never underflow. + */ + if (can_loop) { + tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last); + } - l2 = gen_jz_ecx_string(s); - fn(s, ot); - gen_op_add_reg_im(s, s->aflag, R_ECX, -1); - gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); - if (s->repz_opt) { - gen_op_jz_ecx(s, l2); + gen_set_label(loop); + fn(s, ot, dshift); + tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next); + gen_update_cc_op(s); + + /* Leave if REP condition fails. */ + if (is_repz_nz) { + int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0; + gen_jcc_noeob(s, (JCC_Z << 1) | (nz ^ 1), done); + /* gen_prepare_eflags_z never changes cc_op. */ + assert(!s->cc_op_dirty); } + + if (can_loop) { + tcg_gen_subi_tl(cx_next, cx_next, 1); + tcg_gen_brcondi_tl(TCG_COND_TSTNE, cx_next, REP_MAX, loop); + tcg_gen_brcondi_tl(TCG_COND_TSTEQ, cx_next, cx_mask, last); + } + /* - * Only one iteration is done at a time, so the translation - * block ends unconditionally after this instruction and there - * is no control flow junction - no need to set CC_OP_DYNAMIC. + * Traps or interrupts set RF_MASK if they happen after any iteration + * but the last. Set it here before giving the main loop a chance to + * execute. (For faults, seg_helper.c sets the flag as usual). */ + if (!had_rf) { + gen_set_eflags(s, RF_MASK); + } + + /* Go to the main loop but reenter the same instruction. */ gen_jmp_rel_csize(s, -cur_insn_len(s), 0); + + if (can_loop) { + /* + * The last iteration needs no conditional jump, even if is_repz_nz, + * because the repeats are ending anyway. + */ + gen_set_label(last); + set_cc_op(s, CC_OP_DYNAMIC); + fn(s, ot, dshift); + tcg_gen_mov_tl(cpu_regs[R_ECX], cx_next); + gen_update_cc_op(s); + } + + /* CX/ECX/RCX is zero, or REPZ/REPNZ broke the repetition. */ + gen_set_label(done); + set_cc_op(s, CC_OP_DYNAMIC); + if (had_rf) { + gen_reset_eflags(s, RF_MASK); + } + gen_jmp_rel_csize(s, 0, 1); +} + +static void do_gen_string(DisasContext *s, MemOp ot, + void (*fn)(DisasContext *s, MemOp ot, TCGv dshift), + bool is_repz_nz) +{ + TCGv dshift = tcg_temp_new(); + tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(dshift, dshift, ot); + + if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) { + do_gen_rep(s, ot, dshift, fn, is_repz_nz); + } else { + fn(s, ot, dshift); + } +} + +static void gen_repz(DisasContext *s, MemOp ot, + void (*fn)(DisasContext *s, MemOp ot, TCGv dshift)) +{ + do_gen_string(s, ot, fn, false); +} + +static void gen_repz_nz(DisasContext *s, MemOp ot, + void (*fn)(DisasContext *s, MemOp ot, TCGv dshift)) +{ + do_gen_string(s, ot, fn, true); } static void gen_helper_fp_arith_ST0_FT0(int op) @@ -1394,10 +1583,13 @@ static bool check_cpl0(DisasContext *s) } /* XXX: add faster immediate case */ -static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, +static TCGv gen_shiftd_rm_T1(DisasContext *s, MemOp ot, bool is_right, TCGv count) { target_ulong mask = (ot == MO_64 ? 63 : 31); + TCGv cc_src = tcg_temp_new(); + TCGv tmp = tcg_temp_new(); + TCGv hishift; switch (ot) { case MO_16: @@ -1405,9 +1597,9 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A portion by constructing it as a 32-bit value. */ if (is_right) { - tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16); + tcg_gen_deposit_tl(tmp, s->T0, s->T1, 16, 16); tcg_gen_mov_tl(s->T1, s->T0); - tcg_gen_mov_tl(s->T0, s->tmp0); + tcg_gen_mov_tl(s->T0, tmp); } else { tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16); } @@ -1418,47 +1610,52 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, case MO_32: #ifdef TARGET_X86_64 /* Concatenate the two 32-bit values and use a 64-bit shift. */ - tcg_gen_subi_tl(s->tmp0, count, 1); + tcg_gen_subi_tl(tmp, count, 1); if (is_right) { tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1); - tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0); + tcg_gen_shr_i64(cc_src, s->T0, tmp); tcg_gen_shr_i64(s->T0, s->T0, count); } else { tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0); - tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0); + tcg_gen_shl_i64(cc_src, s->T0, tmp); tcg_gen_shl_i64(s->T0, s->T0, count); - tcg_gen_shri_i64(s->tmp0, s->tmp0, 32); + tcg_gen_shri_i64(cc_src, cc_src, 32); tcg_gen_shri_i64(s->T0, s->T0, 32); } break; #endif default: - tcg_gen_subi_tl(s->tmp0, count, 1); + hishift = tcg_temp_new(); + tcg_gen_subi_tl(tmp, count, 1); if (is_right) { - tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0); + tcg_gen_shr_tl(cc_src, s->T0, tmp); - tcg_gen_subfi_tl(s->tmp4, mask + 1, count); + /* mask + 1 - count = mask - tmp = mask ^ tmp */ + tcg_gen_xori_tl(hishift, tmp, mask); tcg_gen_shr_tl(s->T0, s->T0, count); - tcg_gen_shl_tl(s->T1, s->T1, s->tmp4); + tcg_gen_shl_tl(s->T1, s->T1, hishift); } else { - tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0); + tcg_gen_shl_tl(cc_src, s->T0, tmp); + + /* mask + 1 - count = mask - tmp = mask ^ tmp */ + tcg_gen_xori_tl(hishift, tmp, mask); + tcg_gen_shl_tl(s->T0, s->T0, count); + tcg_gen_shr_tl(s->T1, s->T1, hishift); + if (ot == MO_16) { /* Only needed if count > 16, for Intel behaviour. */ - tcg_gen_subfi_tl(s->tmp4, 33, count); - tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4); - tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4); + tcg_gen_shri_tl(tmp, s->T1, 1); + tcg_gen_or_tl(cc_src, cc_src, tmp); } - - tcg_gen_subfi_tl(s->tmp4, mask + 1, count); - tcg_gen_shl_tl(s->T0, s->T0, count); - tcg_gen_shr_tl(s->T1, s->T1, s->tmp4); } - tcg_gen_movi_tl(s->tmp4, 0); - tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4, - s->tmp4, s->T1); + tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, + count, tcg_constant_tl(0), + tcg_constant_tl(0), s->T1); tcg_gen_or_tl(s->T0, s->T0, s->T1); break; } + + return cc_src; } #define X86_MAX_INSN_LENGTH 15 @@ -1469,7 +1666,7 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes) /* This is a subsequent insn that crosses a page boundary. */ if (s->base.num_insns > 1 && - !is_same_page(&s->base, s->pc + num_bytes - 1)) { + !translator_is_same_page(&s->base, s->pc + num_bytes - 1)) { siglongjmp(s->jmpbuf, 2); } @@ -1514,16 +1711,8 @@ static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s) /* Decompose an address. */ -typedef struct AddressParts { - int def_seg; - int base; - int index; - int scale; - target_long disp; -} AddressParts; - static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, - int modrm) + int modrm, bool is_vsib) { int def_seg, base, index, scale, mod, rm; target_long disp; @@ -1552,7 +1741,7 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, int code = x86_ldub_code(env, s); scale = (code >> 6) & 3; index = ((code >> 3) & 7) | REX_X(s); - if (index == 4) { + if (index == 4 && !is_vsib) { index = -1; /* no index */ } base = (code & 7) | REX_B(s); @@ -1680,37 +1869,27 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib) return ea; } -static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) -{ - AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a, false); - gen_lea_v_seg(s, ea, a.def_seg, s->override); -} - -static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) -{ - (void)gen_lea_modrm_0(env, s, modrm); -} - /* Used for BNDCL, BNDCU, BNDCN. */ -static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm, +static void gen_bndck(DisasContext *s, X86DecodedInsn *decode, TCGCond cond, TCGv_i64 bndv) { - AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a, false); + TCGv ea = gen_lea_modrm_1(s, decode->mem, false); + TCGv_i32 t32 = tcg_temp_new_i32(); + TCGv_i64 t64 = tcg_temp_new_i64(); - tcg_gen_extu_tl_i64(s->tmp1_i64, ea); + tcg_gen_extu_tl_i64(t64, ea); if (!CODE64(s)) { - tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64); + tcg_gen_ext32u_i64(t64, t64); } - tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv); - tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64); - gen_helper_bndck(tcg_env, s->tmp2_i32); + tcg_gen_setcond_i64(cond, t64, t64, bndv); + tcg_gen_extrl_i64_i32(t32, t64); + gen_helper_bndck(tcg_env, t32); } /* generate modrm load of memory or register. */ -static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot) +static void gen_ld_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot) { + int modrm = s->modrm; int mod, rm; mod = (modrm >> 6) & 3; @@ -1718,14 +1897,15 @@ static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot) if (mod == 3) { gen_op_mov_v_reg(s, ot, s->T0, rm); } else { - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); gen_op_ld_v(s, ot, s->T0, s->A0); } } /* generate modrm store of memory or register. */ -static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot) +static void gen_st_modrm(DisasContext *s, X86DecodedInsn *decode, MemOp ot) { + int modrm = s->modrm; int mod, rm; mod = (modrm >> 6) & 3; @@ -1733,7 +1913,7 @@ static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot) if (mod == 3) { gen_op_mov_reg_v(s, ot, rm, s->T0); } else { - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); gen_op_st_v(s, ot, s->T0, s->A0); } } @@ -1823,15 +2003,7 @@ static void gen_conditional_jump_labels(DisasContext *s, target_long diff, gen_jmp_rel(s, s->dflag, diff, 0); } -static void gen_jcc(DisasContext *s, int b, int diff) -{ - TCGLabel *l1 = gen_new_label(); - - gen_jcc1(s, b, l1); - gen_conditional_jump_labels(s, diff, NULL, l1); -} - -static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src) +static void gen_cmovcc(DisasContext *s, int b, TCGv dest, TCGv src) { CCPrepare cc = gen_prepare_cc(s, b, NULL); @@ -1853,25 +2025,39 @@ static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg) /* move SRC to seg_reg and compute if the CPU state may change. Never call this function with seg_reg == R_CS */ -static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src) +static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq) { if (PE(s) && !VM86(s)) { - tcg_gen_trunc_tl_i32(s->tmp2_i32, src); - gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32); - /* abort translation because the addseg value may change or - because ss32 may change. For R_SS, translation must always - stop as a special handling must be done to disable hardware - interrupts for the next instruction */ - if (seg_reg == R_SS) { - s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; - } else if (CODE32(s) && seg_reg < R_FS) { + TCGv_i32 sel = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(sel, src); + gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel); + + /* + * For moves to SS, the SS32 flag may change. For CODE32 only, changes + * to SS, DS and ES may change the ADDSEG flags. + */ + if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) { s->base.is_jmp = DISAS_EOB_NEXT; } } else { gen_op_movl_seg_real(s, seg_reg, src); - if (seg_reg == R_SS) { - s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; - } + } + + /* + * For MOV or POP to SS (but not LSS) translation must always + * stop as a special handling must be done to disable hardware + * interrupts for the next instruction. + * + * This is the last instruction, so it's okay to overwrite + * HF_TF_MASK; the next TB will start with the flag set. + * + * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which + * might have been set above. + */ + if (inhibit_irq) { + s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ; + s->flags &= ~HF_TF_MASK; } } @@ -2009,14 +2195,17 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) level &= 31; if (level != 0) { int i; + if (level > 1) { + TCGv fp = tcg_temp_new(); - /* Copy level-1 pointers from the previous frame. */ - for (i = 1; i < level; ++i) { - gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i); - gen_op_ld_v(s, d_ot, s->tmp0, s->A0); + /* Copy level-1 pointers from the previous frame. */ + for (i = 1; i < level; ++i) { + gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i); + gen_op_ld_v(s, d_ot, fp, s->A0); - gen_lea_ss_ofs(s, s->A0, s->T1, -size * i); - gen_op_st_v(s, d_ot, s->tmp0, s->A0); + gen_lea_ss_ofs(s, s->A0, s->T1, -size * i); + gen_op_st_v(s, d_ot, fp, s->A0); + } } /* Push the current FrameTemp as the last level. */ @@ -2079,46 +2268,6 @@ static void gen_interrupt(DisasContext *s, uint8_t intno) s->base.is_jmp = DISAS_NORETURN; } -static void gen_set_hflag(DisasContext *s, uint32_t mask) -{ - if ((s->flags & mask) == 0) { - TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); - tcg_gen_ori_i32(t, t, mask); - tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); - s->flags |= mask; - } -} - -static void gen_reset_hflag(DisasContext *s, uint32_t mask) -{ - if (s->flags & mask) { - TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags)); - tcg_gen_andi_i32(t, t, ~mask); - tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags)); - s->flags &= ~mask; - } -} - -static void gen_set_eflags(DisasContext *s, target_ulong mask) -{ - TCGv t = tcg_temp_new(); - - tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); - tcg_gen_ori_tl(t, t, mask); - tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); -} - -static void gen_reset_eflags(DisasContext *s, target_ulong mask) -{ - TCGv t = tcg_temp_new(); - - tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags)); - tcg_gen_andi_tl(t, t, ~mask); - tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags)); -} - /* Clear BND registers during legacy branches. */ static void gen_bnd_jmp(DisasContext *s) { @@ -2153,13 +2302,13 @@ gen_eob(DisasContext *s, int mode) gen_set_hflag(s, HF_INHIBIT_IRQ_MASK); } - if (s->base.tb->flags & HF_RF_MASK) { + if (s->flags & HF_RF_MASK) { gen_reset_eflags(s, RF_MASK); } if (mode == DISAS_EOB_RECHECK_TF) { gen_helper_rechecking_single_step(tcg_env); tcg_gen_exit_tb(NULL, 0); - } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) { + } else if (s->flags & HF_TF_MASK) { gen_helper_single_step(tcg_env); } else if (mode == DISAS_JUMP && /* give irqs a chance to happen */ @@ -2202,7 +2351,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num) * no extra masking to apply (data16 branch in code32, see above), * then we have also proven that the addition does not wrap. */ - if (!use_goto_tb || !is_same_page(&s->base, new_pc)) { + if (!use_goto_tb || !translator_is_same_page(&s->base, new_pc)) { tcg_gen_andi_tl(cpu_eip, cpu_eip, mask); use_goto_tb = false; } @@ -2279,10 +2428,11 @@ static void gen_ldy_env_A0(DisasContext *s, int offset, bool align) int mem_index = s->mem_index; TCGv_i128 t0 = tcg_temp_new_i128(); TCGv_i128 t1 = tcg_temp_new_i128(); + TCGv a0_hi = tcg_temp_new(); tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); - tcg_gen_addi_tl(s->tmp0, s->A0, 16); - tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop); + tcg_gen_addi_tl(a0_hi, s->A0, 16); + tcg_gen_qemu_ld_i128(t1, a0_hi, mem_index, mop); tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); @@ -2293,137 +2443,41 @@ static void gen_sty_env_A0(DisasContext *s, int offset, bool align) MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR; int mem_index = s->mem_index; TCGv_i128 t = tcg_temp_new_i128(); + TCGv a0_hi = tcg_temp_new(); tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0))); tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0)); - tcg_gen_addi_tl(s->tmp0, s->A0, 16); + tcg_gen_addi_tl(a0_hi, s->A0, 16); tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1))); - tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop); + tcg_gen_qemu_st_i128(t, a0_hi, mem_index, mop); } -static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) -{ - TCGv_i64 cmp, val, old; - TCGv Z; - - gen_lea_modrm(env, s, modrm); - - cmp = tcg_temp_new_i64(); - val = tcg_temp_new_i64(); - old = tcg_temp_new_i64(); - - /* Construct the comparison values from the register pair. */ - tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); - tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); - - /* Only require atomic with LOCK; non-parallel handled in generator. */ - if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ); - } else { - tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, - s->mem_index, MO_TEUQ); - } - - /* Set tmp0 to match the required value of Z. */ - tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); - Z = tcg_temp_new(); - tcg_gen_trunc_i64_tl(Z, cmp); - - /* - * Extract the result values for the register pair. - * For 32-bit, we may do this unconditionally, because on success (Z=1), - * the old value matches the previous value in EDX:EAX. For x86_64, - * the store must be conditional, because we must leave the source - * registers unchanged on success, and zero-extend the writeback - * on failure (Z=0). - */ - if (TARGET_LONG_BITS == 32) { - tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old); - } else { - TCGv zero = tcg_constant_tl(0); - - tcg_gen_extr_i64_tl(s->T0, s->T1, old); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero, - s->T0, cpu_regs[R_EAX]); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, - s->T1, cpu_regs[R_EDX]); - } - - /* Update Z. */ - gen_compute_eflags(s); - tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); -} - -#ifdef TARGET_X86_64 -static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) -{ - MemOp mop = MO_TE | MO_128 | MO_ALIGN; - TCGv_i64 t0, t1; - TCGv_i128 cmp, val; - - gen_lea_modrm(env, s, modrm); - - cmp = tcg_temp_new_i128(); - val = tcg_temp_new_i128(); - tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]); - tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]); - - /* Only require atomic with LOCK; non-parallel handled in generator. */ - if (s->prefix & PREFIX_LOCK) { - tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); - } else { - tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop); - } - - tcg_gen_extr_i128_i64(s->T0, s->T1, val); - - /* Determine success after the fact. */ - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); - tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); - tcg_gen_or_i64(t0, t0, t1); - - /* Update Z. */ - gen_compute_eflags(s); - tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); - tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); - - /* - * Extract the result values for the register pair. We may do this - * unconditionally, because on success (Z=1), the old value matches - * the previous value in RDX:RAX. - */ - tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0); - tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1); -} -#endif +#include "emit.c.inc" -static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) +static void gen_x87(DisasContext *s, X86DecodedInsn *decode) { - CPUX86State *env = cpu_env(cpu); bool update_fip = true; - int modrm, mod, rm, op; + int b = decode->b; + int modrm = s->modrm; + int mod, rm, op; if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX); - return true; + return; } - modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ - AddressParts a = gen_lea_modrm_0(env, s, modrm); - TCGv ea = gen_lea_modrm_1(s, a, false); + TCGv ea = gen_lea_modrm_1(s, decode->mem, false); TCGv last_addr = tcg_temp_new(); bool update_fdp = true; tcg_gen_mov_tl(last_addr, ea); - gen_lea_v_seg(s, ea, a.def_seg, s->override); + gen_lea_v_seg(s, ea, decode->mem.def_seg, s->override); switch (op) { case 0x00 ... 0x07: /* fxxxs */ @@ -2613,11 +2667,11 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_helper_fpop(tcg_env); break; default: - return false; + goto illegal_op; } if (update_fdp) { - int last_seg = s->override >= 0 ? s->override : a.def_seg; + int last_seg = s->override >= 0 ? s->override : decode->mem.def_seg; tcg_gen_ld_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, @@ -2654,7 +2708,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) update_fip = false; break; default: - return false; + goto illegal_op; } break; case 0x0c: /* grp d9/4 */ @@ -2673,7 +2727,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_helper_fxam_ST0(tcg_env); break; default: - return false; + goto illegal_op; } break; case 0x0d: /* grp d9/5 */ @@ -2708,7 +2762,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_helper_fldz_ST0(tcg_env); break; default: - return false; + goto illegal_op; } } break; @@ -2810,7 +2864,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_helper_fpop(tcg_env); break; default: - return false; + goto illegal_op; } break; case 0x1c: @@ -2830,7 +2884,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) case 4: /* fsetpm (287 only, just do nop here) */ break; default: - return false; + goto illegal_op; } break; case 0x1d: /* fucomi */ @@ -2882,7 +2936,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_helper_fpop(tcg_env); break; default: - return false; + goto illegal_op; } break; case 0x38: /* ffreep sti, undocumented op */ @@ -2897,7 +2951,7 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0); break; default: - return false; + goto illegal_op; } break; case 0x3d: /* fucomip */ @@ -2937,14 +2991,14 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) } op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); - gen_jcc1_noeob(s, op1, l1); + gen_jcc_noeob(s, op1, l1); gen_helper_fmov_ST0_STN(tcg_env, tcg_constant_i32(opreg)); gen_set_label(l1); } break; default: - return false; + goto illegal_op; } } @@ -2956,49 +3010,29 @@ static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b) tcg_gen_st_tl(eip_cur_tl(s), tcg_env, offsetof(CPUX86State, fpip)); } - return true; + return; illegal_op: gen_illegal_opcode(s); - return true; } -static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) +static void gen_multi0F(DisasContext *s, X86DecodedInsn *decode) { - CPUX86State *env = cpu_env(cpu); int prefixes = s->prefix; MemOp dflag = s->dflag; + int b = decode->b + 0x100; + int modrm = s->modrm; MemOp ot; - int modrm, reg, rm, mod, op, val; + int reg, rm, mod, op; /* now check op code */ switch (b) { - case 0x1c7: /* cmpxchg8b */ - modrm = x86_ldub_code(env, s); + case 0x1c7: /* RDSEED, RDPID with f3 prefix */ mod = (modrm >> 6) & 3; switch ((modrm >> 3) & 7) { - case 1: /* CMPXCHG8, CMPXCHG16 */ - if (mod == 3) { - goto illegal_op; - } -#ifdef TARGET_X86_64 - if (dflag == MO_64) { - if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) { - goto illegal_op; - } - gen_cmpxchg16b(s, env, modrm); - break; - } -#endif - if (!(s->cpuid_features & CPUID_CX8)) { - goto illegal_op; - } - gen_cmpxchg8b(s, env, modrm); - break; - - case 7: /* RDSEED, RDPID with f3 prefix */ + case 7: if (mod != 3 || - (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) { + (s->prefix & PREFIX_REPNZ)) { goto illegal_op; } if (s->prefix & PREFIX_REPZ) { @@ -3018,7 +3052,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) case 6: /* RDRAND */ if (mod != 3 || - (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) || + (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) { goto illegal_op; } @@ -3035,148 +3069,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } break; - /************************/ - /* bit operations */ - case 0x1ba: /* bt/bts/btr/btc Gv, im */ - ot = dflag; - modrm = x86_ldub_code(env, s); - op = (modrm >> 3) & 7; - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - if (mod != 3) { - s->rip_offset = 1; - gen_lea_modrm(env, s, modrm); - if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, s->T0, s->A0); - } - } else { - gen_op_mov_v_reg(s, ot, s->T0, rm); - } - /* load shift */ - val = x86_ldub_code(env, s); - tcg_gen_movi_tl(s->T1, val); - if (op < 4) - goto unknown_op; - op -= 4; - goto bt_op; - case 0x1a3: /* bt Gv, Ev */ - op = 0; - goto do_btx; - case 0x1ab: /* bts */ - op = 1; - goto do_btx; - case 0x1b3: /* btr */ - op = 2; - goto do_btx; - case 0x1bb: /* btc */ - op = 3; - do_btx: - ot = dflag; - modrm = x86_ldub_code(env, s); - reg = ((modrm >> 3) & 7) | REX_R(s); - mod = (modrm >> 6) & 3; - rm = (modrm & 7) | REX_B(s); - gen_op_mov_v_reg(s, MO_32, s->T1, reg); - if (mod != 3) { - AddressParts a = gen_lea_modrm_0(env, s, modrm); - /* specific case: we need to add a displacement */ - gen_exts(ot, s->T1); - tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot); - tcg_gen_shli_tl(s->tmp0, s->tmp0, ot); - tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0); - gen_lea_v_seg(s, s->A0, a.def_seg, s->override); - if (!(s->prefix & PREFIX_LOCK)) { - gen_op_ld_v(s, ot, s->T0, s->A0); - } - } else { - gen_op_mov_v_reg(s, ot, s->T0, rm); - } - bt_op: - tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1); - tcg_gen_movi_tl(s->tmp0, 1); - tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1); - if (s->prefix & PREFIX_LOCK) { - switch (op) { - case 0: /* bt */ - /* Needs no atomic ops; we suppressed the normal - memory load for LOCK above so do it now. */ - gen_op_ld_v(s, ot, s->T0, s->A0); - break; - case 1: /* bts */ - tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0, - s->mem_index, ot | MO_LE); - break; - case 2: /* btr */ - tcg_gen_not_tl(s->tmp0, s->tmp0); - tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0, - s->mem_index, ot | MO_LE); - break; - default: - case 3: /* btc */ - tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0, - s->mem_index, ot | MO_LE); - break; - } - tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); - } else { - tcg_gen_shr_tl(s->tmp4, s->T0, s->T1); - switch (op) { - case 0: /* bt */ - /* Data already loaded; nothing to do. */ - break; - case 1: /* bts */ - tcg_gen_or_tl(s->T0, s->T0, s->tmp0); - break; - case 2: /* btr */ - tcg_gen_andc_tl(s->T0, s->T0, s->tmp0); - break; - default: - case 3: /* btc */ - tcg_gen_xor_tl(s->T0, s->T0, s->tmp0); - break; - } - if (op != 0) { - if (mod != 3) { - gen_op_st_v(s, ot, s->T0, s->A0); - } else { - gen_op_mov_reg_v(s, ot, rm, s->T0); - } - } - } - - /* Delay all CC updates until after the store above. Note that - C is the result of the test, Z is unchanged, and the others - are all undefined. */ - switch (s->cc_op) { - case CC_OP_MULB ... CC_OP_MULQ: - case CC_OP_ADDB ... CC_OP_ADDQ: - case CC_OP_ADCB ... CC_OP_ADCQ: - case CC_OP_SUBB ... CC_OP_SUBQ: - case CC_OP_SBBB ... CC_OP_SBBQ: - case CC_OP_LOGICB ... CC_OP_LOGICQ: - case CC_OP_INCB ... CC_OP_INCQ: - case CC_OP_DECB ... CC_OP_DECQ: - case CC_OP_SHLB ... CC_OP_SHLQ: - case CC_OP_SARB ... CC_OP_SARQ: - case CC_OP_BMILGB ... CC_OP_BMILGQ: - case CC_OP_POPCNT: - /* Z was going to be computed from the non-zero status of CC_DST. - We can get that same Z value (and the new C value) by leaving - CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the - same width. */ - tcg_gen_mov_tl(cpu_cc_src, s->tmp4); - set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); - break; - default: - /* Otherwise, generate EFLAGS and replace the C bit. */ - gen_compute_eflags(s); - tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4, - ctz32(CC_C), 1); - break; - } - break; case 0x100: - modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { @@ -3190,14 +3083,14 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; - gen_st_modrm(env, s, modrm, ot); + gen_st_modrm(s, decode, ot); break; case 2: /* lldt */ if (!PE(s) || VM86(s)) goto illegal_op; if (check_cpl0(s)) { gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE); - gen_ld_modrm(env, s, modrm, MO_16); + gen_ld_modrm(s, decode, MO_16); tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_lldt(tcg_env, s->tmp2_i32); } @@ -3212,14 +3105,14 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; - gen_st_modrm(env, s, modrm, ot); + gen_st_modrm(s, decode, ot); break; case 3: /* ltr */ if (!PE(s) || VM86(s)) goto illegal_op; if (check_cpl0(s)) { gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE); - gen_ld_modrm(env, s, modrm, MO_16); + gen_ld_modrm(s, decode, MO_16); tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_ltr(tcg_env, s->tmp2_i32); } @@ -3228,7 +3121,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) case 5: /* verw */ if (!PE(s) || VM86(s)) goto illegal_op; - gen_ld_modrm(env, s, modrm, MO_16); + gen_ld_modrm(s, decode, MO_16); gen_update_cc_op(s); if (op == 4) { gen_helper_verr(tcg_env, s->T0); @@ -3238,19 +3131,18 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) assume_cc_op(s, CC_OP_EFLAGS); break; default: - goto unknown_op; + goto illegal_op; } break; case 0x101: - modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) { break; } gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ); - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_v(s, MO_16, s->T0, s->A0); @@ -3306,7 +3198,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; } gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ); - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, s->T0, s->A0); gen_add_A0_im(s, 2); @@ -3320,8 +3212,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) case 0xd0: /* xgetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 - || (s->prefix & (PREFIX_LOCK | PREFIX_DATA - | PREFIX_REPZ | PREFIX_REPNZ))) { + || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); @@ -3331,8 +3222,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) case 0xd1: /* xsetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 - || (s->prefix & (PREFIX_LOCK | PREFIX_DATA - | PREFIX_REPZ | PREFIX_REPNZ))) { + || (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_svm_check_intercept(s, SVM_EXIT_XSETBV); @@ -3456,7 +3346,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; } gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE); - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); @@ -3472,7 +3362,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; } gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE); - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); gen_op_ld_v(s, MO_16, s->T1, s->A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0); @@ -3496,11 +3386,10 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) */ mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); - gen_st_modrm(env, s, modrm, ot); + gen_st_modrm(s, decode, ot); break; case 0xee: /* rdpkru */ - if (s->prefix & (PREFIX_LOCK | PREFIX_DATA - | PREFIX_REPZ | PREFIX_REPNZ)) { + if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) { goto illegal_op; } tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]); @@ -3508,8 +3397,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64); break; case 0xef: /* wrpkru */ - if (s->prefix & (PREFIX_LOCK | PREFIX_DATA - | PREFIX_REPZ | PREFIX_REPNZ)) { + if (s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ)) { goto illegal_op; } tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX], @@ -3523,7 +3411,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; } gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0); - gen_ld_modrm(env, s, modrm, MO_16); + gen_ld_modrm(s, decode, MO_16); /* * Only the 4 lower bits of CR0 are modified. * PE cannot be set to zero if already set to one. @@ -3541,7 +3429,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; } gen_svm_check_intercept(s, SVM_EXIT_INVLPG); - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); gen_helper_flush_page(tcg_env, s->A0); s->base.is_jmp = DISAS_EOB_NEXT; break; @@ -3574,33 +3462,30 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) break; default: - goto unknown_op; + goto illegal_op; } break; case 0x11a: - modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | REX_R(s); if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } - gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); + gen_bndck(s, decode, TCG_COND_LTU, cpu_bndl[reg]); } else if (prefixes & PREFIX_REPNZ) { /* bndcu */ if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); - gen_bndck(env, s, modrm, TCG_COND_GTU, notu); + gen_bndck(s, decode, TCG_COND_GTU, notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { @@ -3608,7 +3493,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); - if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { + if (reg2 >= 4) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { @@ -3616,7 +3501,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); } } else { - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); if (CODE64(s)) { tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUQ); @@ -3635,9 +3520,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } } else if (mod != 3) { /* bndldx */ - AddressParts a = gen_lea_modrm_0(env, s, modrm); + AddressParts a = decode->mem; if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; @@ -3665,21 +3549,18 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) gen_set_hflag(s, HF_MPX_IU_MASK); } } - gen_nop_modrm(env, s, modrm); break; case 0x11b: - modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | REX_R(s); if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } - AddressParts a = gen_lea_modrm_0(env, s, modrm); + AddressParts a = decode->mem; if (a.base >= 0) { tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); if (!CODE64(s)) { @@ -3692,7 +3573,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) /* rip-relative generates #ud */ goto illegal_op; } - tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false)); + tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, decode->mem, false)); if (!CODE64(s)) { tcg_gen_ext32u_tl(s->A0, s->A0); } @@ -3703,11 +3584,10 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } else if (prefixes & PREFIX_REPNZ) { /* bndcn */ if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } - gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); + gen_bndck(s, decode, TCG_COND_GTU, cpu_bndu[reg]); } else if (prefixes & PREFIX_DATA) { /* bndmov -- to reg/mem */ if (reg >= 4 || s->aflag == MO_16) { @@ -3715,7 +3595,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); - if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { + if (reg2 >= 4) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { @@ -3723,7 +3603,7 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); } } else { - gen_lea_modrm(env, s, modrm); + gen_lea_modrm(s, decode); if (CODE64(s)) { tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0, s->mem_index, MO_LEUQ); @@ -3740,9 +3620,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } } else if (mod != 3) { /* bndstx */ - AddressParts a = gen_lea_modrm_0(env, s, modrm); + AddressParts a = decode->mem; if (reg >= 4 - || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; @@ -3767,7 +3646,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) } } } - gen_nop_modrm(env, s, modrm); break; default: g_assert_not_reached(); @@ -3775,13 +3653,8 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b) return; illegal_op: gen_illegal_opcode(s); - return; - unknown_op: - gen_unknown_opcode(env, s); } -#include "decode-new.h" -#include "emit.c.inc" #include "decode-new.c.inc" void tcg_x86_init(void) @@ -3917,30 +3790,13 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); - /* - * If jmp_opt, we want to handle each string instruction individually. - * For icount also disable repz optimization so that each iteration - * is accounted separately. - * - * FIXME: this is messy; it makes REP string instructions a lot less - * efficient than they should be and it gets in the way of correct - * handling of RF (interrupts or traps arriving after any iteration - * of a repeated string instruction but the last should set RF to 1). - * Perhaps it would be more efficient if REP string instructions were - * always at the beginning of the TB, or even their own TB? That - * would even allow accounting up to 64k iterations at once for icount. - */ - dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); dc->T0 = tcg_temp_new(); dc->T1 = tcg_temp_new(); dc->A0 = tcg_temp_new(); - dc->tmp0 = tcg_temp_new(); dc->tmp1_i64 = tcg_temp_new_i64(); dc->tmp2_i32 = tcg_temp_new_i32(); - dc->tmp3_i32 = tcg_temp_new_i32(); - dc->tmp4 = tcg_temp_new(); dc->cc_srcT = tcg_temp_new(); } @@ -3989,15 +3845,9 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) case 2: /* Restore state that may affect the next instruction. */ dc->pc = dc->base.pc_next; - /* - * TODO: These save/restore can be removed after the table-based - * decoder is complete; we will be decoding the insn completely - * before any code generation that might affect these variables. - */ - dc->cc_op_dirty = orig_cc_op_dirty; - dc->cc_op = orig_cc_op; - dc->pc_save = orig_pc_save; - /* END TODO */ + assert(dc->cc_op_dirty == orig_cc_op_dirty); + assert(dc->cc_op == orig_cc_op); + assert(dc->pc_save == orig_pc_save); dc->base.num_insns--; tcg_remove_ops_after(dc->prev_insn_end); dc->base.insn_start = dc->prev_insn_start; @@ -4022,7 +3872,7 @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * chance to happen. */ dc->base.is_jmp = DISAS_EOB_NEXT; - } else if (!is_same_page(&dc->base, dc->base.pc_next)) { + } else if (!translator_is_same_page(&dc->base, dc->base.pc_next)) { dc->base.is_jmp = DISAS_TOO_MANY; } } @@ -4073,9 +3923,8 @@ static const TranslatorOps i386_tr_ops = { .tb_stop = i386_tr_tb_stop, }; -/* generate intermediate code for basic block 'tb'. */ -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void x86_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c index b3bdb7831a7..98fab4cbc3f 100644 --- a/target/i386/tcg/user/excp_helper.c +++ b/target/i386/tcg/user/excp_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "tcg/helper-tcg.h" void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, diff --git a/target/i386/tcg/user/seg_helper.c b/target/i386/tcg/user/seg_helper.c index c45f2ac2ba6..263f59937fe 100644 --- a/target/i386/tcg/user/seg_helper.c +++ b/target/i386/tcg/user/seg_helper.c @@ -21,8 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "tcg/helper-tcg.h" #include "tcg/seg_helper.h" diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 1a2b4e1c43d..da58805b1a6 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -9,12 +9,13 @@ */ #include "qemu/osdep.h" -#include "sysemu/kvm_int.h" +#include "system/kvm_int.h" #include "qemu/main-loop.h" -#include "sysemu/cpus.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" #include "qemu/guest-random.h" -#include "sysemu/whpx.h" +#include "system/whpx.h" #include "whpx-internal.h" #include "whpx-accel-ops.h" @@ -82,19 +83,19 @@ static bool whpx_vcpu_thread_is_idle(CPUState *cpu) return !whpx_apic_in_platform(); } -static void whpx_accel_ops_class_init(ObjectClass *oc, void *data) +static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data) { AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = whpx_start_vcpu_thread; ops->kick_vcpu_thread = whpx_kick_vcpu_thread; ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset; ops->synchronize_post_init = whpx_cpu_synchronize_post_init; ops->synchronize_state = whpx_cpu_synchronize_state; ops->synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm; - ops->synchronize_pre_resume = whpx_cpu_synchronize_pre_resume; } static const TypeInfo whpx_accel_ops_type = { diff --git a/target/i386/whpx/whpx-accel-ops.h b/target/i386/whpx/whpx-accel-ops.h index 7a1bb1ab575..54cfc25a147 100644 --- a/target/i386/whpx/whpx-accel-ops.h +++ b/target/i386/whpx/whpx-accel-ops.h @@ -10,7 +10,7 @@ #ifndef TARGET_I386_WHPX_ACCEL_OPS_H #define TARGET_I386_WHPX_ACCEL_OPS_H -#include "sysemu/cpus.h" +#include "system/cpus.h" int whpx_init_vcpu(CPUState *cpu); int whpx_vcpu_exec(CPUState *cpu); @@ -21,7 +21,6 @@ void whpx_cpu_synchronize_state(CPUState *cpu); void whpx_cpu_synchronize_post_reset(CPUState *cpu); void whpx_cpu_synchronize_post_init(CPUState *cpu); void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu); -void whpx_cpu_synchronize_pre_resume(bool step_pending); /* state subset only touched by the VCPU itself during runtime */ #define WHPX_SET_RUNTIME_STATE 1 diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index a6674a826d6..b72dcff3c8d 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -10,13 +10,14 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/address-spaces.h" -#include "exec/ioport.h" +#include "system/address-spaces.h" +#include "system/ioport.h" #include "gdbstub/helpers.h" #include "qemu/accel.h" -#include "sysemu/whpx.h" -#include "sysemu/cpus.h" -#include "sysemu/runstate.h" +#include "accel/accel-ops.h" +#include "system/whpx.h" +#include "system/cpus.h" +#include "system/runstate.h" #include "qemu/main-loop.h" #include "hw/boards.h" #include "hw/intc/ioapic.h" @@ -26,6 +27,8 @@ #include "qapi/qapi-types-common.h" #include "qapi/qapi-visit-common.h" #include "migration/blocker.h" +#include "host-cpu.h" +#include "accel/accel-cpu-target.h" #include #include "whpx-internal.h" @@ -237,13 +240,12 @@ struct AccelCPUState { uint64_t tpr; uint64_t apic_base; bool interruption_pending; - bool dirty; /* Must be the last field as it may have a tail */ WHV_RUN_VP_EXIT_CONTEXT exit_ctx; }; -static bool whpx_allowed; +bool whpx_allowed; static bool whp_dispatch_initialized; static HMODULE hWinHvPlatform, hWinHvEmulation; static uint32_t max_vcpu_index; @@ -549,8 +551,6 @@ static void whpx_set_registers(CPUState *cpu, int level) error_report("WHPX: Failed to set virtual processor context, hr=%08lx", hr); } - - return; } static int whpx_get_tsc(CPUState *cpu) @@ -771,8 +771,6 @@ static void whpx_get_registers(CPUState *cpu) } x86_update_hflags(env); - - return; } static HRESULT CALLBACK whpx_emu_ioport_callback( @@ -840,7 +838,7 @@ static HRESULT CALLBACK whpx_emu_setreg_callback( * The emulator just successfully wrote the register state. We clear the * dirty state so we avoid the double write on resume of the VP. */ - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; return hr; } @@ -1395,7 +1393,7 @@ static int whpx_last_vcpu_stopping(CPUState *cpu) /* Returns the address of the next instruction that is about to be executed. */ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { /* The CPU registers have been modified by other parts of QEMU. */ return cpu_env(cpu)->eip; } else if (exit_context_valid) { @@ -1570,8 +1568,6 @@ static void whpx_vcpu_pre_run(CPUState *cpu) " hr=%08lx", hr); } } - - return; } static void whpx_vcpu_post_run(CPUState *cpu) @@ -1595,8 +1591,6 @@ static void whpx_vcpu_post_run(CPUState *cpu) vcpu->interruptable = !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow; - - return; } static void whpx_vcpu_process_async_events(CPUState *cpu) @@ -1634,8 +1628,6 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); } - - return; } static int whpx_vcpu_run(CPUState *cpu) @@ -1714,9 +1706,9 @@ static int whpx_vcpu_run(CPUState *cpu) } do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (exclusive_step_mode == WHPX_STEP_NONE) { @@ -2064,9 +2056,9 @@ static int whpx_vcpu_run(CPUState *cpu) static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { whpx_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } } @@ -2074,20 +2066,20 @@ static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) { whpx_set_registers(cpu, WHPX_SET_RESET_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_whpx_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { whpx_set_registers(cpu, WHPX_SET_FULL_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) { - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } /* @@ -2096,7 +2088,7 @@ static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu, void whpx_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -2116,7 +2108,7 @@ void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu) run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); } -void whpx_cpu_synchronize_pre_resume(bool step_pending) +static void whpx_pre_resume_vm(AccelState *as, bool step_pending) { whpx_global.step_pending = step_pending; } @@ -2236,7 +2228,7 @@ int whpx_init_vcpu(CPUState *cpu) } vcpu->interruptable = true; - vcpu->dirty = true; + cpu->vcpu_dirty = true; cpu->accel = vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, env); @@ -2280,7 +2272,6 @@ void whpx_destroy_vcpu(CPUState *cpu) whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); g_free(cpu->accel); - return; } void whpx_vcpu_kick(CPUState *cpu) @@ -2512,11 +2503,33 @@ static void whpx_set_kernel_irqchip(Object *obj, Visitor *v, } } +static void whpx_cpu_instance_init(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs); + + host_cpu_instance_init(cpu); +} + +static void whpx_cpu_accel_class_init(ObjectClass *oc, const void *data) +{ + AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); + + acc->cpu_instance_init = whpx_cpu_instance_init; +} + +static const TypeInfo whpx_cpu_accel_type = { + .name = ACCEL_CPU_NAME("whpx"), + + .parent = TYPE_ACCEL_CPU, + .class_init = whpx_cpu_accel_class_init, + .abstract = true, +}; + /* * Partition support */ -static int whpx_accel_init(MachineState *ms) +static int whpx_accel_init(AccelState *as, MachineState *ms) { struct whpx_state *whpx; int ret; @@ -2700,20 +2713,16 @@ static int whpx_accel_init(MachineState *ms) return ret; } -int whpx_enabled(void) -{ - return whpx_allowed; -} - bool whpx_apic_in_platform(void) { return whpx_global.apic_in_platform; } -static void whpx_accel_class_init(ObjectClass *oc, void *data) +static void whpx_accel_class_init(ObjectClass *oc, const void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "WHPX"; ac->init_machine = whpx_accel_init; + ac->pre_resume_vm = whpx_pre_resume_vm; ac->allowed = &whpx_allowed; object_class_property_add(oc, "kernel-irqchip", "on|off|split", @@ -2742,6 +2751,7 @@ static const TypeInfo whpx_accel_type = { static void whpx_type_init(void) { type_register_static(&whpx_accel_type); + type_register_static(&whpx_cpu_accel_type); } bool init_whp_dispatch(void) diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c index 7e14ded9788..e1ef6d4e6d4 100644 --- a/target/i386/whpx/whpx-apic.c +++ b/target/i386/whpx/whpx-apic.c @@ -16,8 +16,8 @@ #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" #include "hw/pci/msi.h" -#include "sysemu/hw_accel.h" -#include "sysemu/whpx.h" +#include "system/hw_accel.h" +#include "system/whpx.h" #include "whpx-internal.h" struct whpx_lapic_state { @@ -231,7 +231,7 @@ static void whpx_apic_mem_write(void *opaque, hwaddr addr, static const MemoryRegionOps whpx_apic_io_ops = { .read = whpx_apic_mem_read, .write = whpx_apic_mem_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, }; static void whpx_apic_reset(APICCommonState *s) @@ -252,7 +252,7 @@ static void whpx_apic_realize(DeviceState *dev, Error **errp) msi_nonbroken = true; } -static void whpx_apic_class_init(ObjectClass *klass, void *data) +static void whpx_apic_class_init(ObjectClass *klass, const void *data) { APICCommonClass *k = APIC_COMMON_CLASS(klass); diff --git a/target/loongarch/README b/target/loongarch/README index 0b9dc0d40a0..1ffd3422d22 100644 --- a/target/loongarch/README +++ b/target/loongarch/README @@ -11,7 +11,7 @@ - System emulation - You can reference docs/system/loongarch/loongson3.rst to get the information about system emulation of LoongArch. + You can reference docs/system/loongarch/virt.rst to get the information about system emulation of LoongArch. - Linux-user emulation diff --git a/target/loongarch/arch_dump.c b/target/loongarch/arch_dump.c new file mode 100644 index 00000000000..2b0955a2091 --- /dev/null +++ b/target/loongarch/arch_dump.c @@ -0,0 +1,163 @@ +/* + * Support for writing ELF notes for LoongArch architectures + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "elf.h" +#include "system/dump.h" +#include "internals.h" + +/* struct user_pt_regs from arch/loongarch/include/uapi/asm/ptrace.h */ +struct loongarch_user_regs { + uint64_t gpr[32]; + uint64_t pad1[1]; + /* Special CSR registers. */ + uint64_t csr_era; + uint64_t csr_badv; + uint64_t pad2[10]; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_user_regs) != 360); + +/* struct elf_prstatus from include/uapi/linux/elfcore.h */ +struct loongarch_elf_prstatus { + char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */ + uint32_t pr_pid; + /* + * 76 == offsetof(struct elf_prstatus, pr_reg) - + * offsetof(struct elf_prstatus, pr_ppid) + */ + char pad2[76]; + struct loongarch_user_regs pr_reg; + uint32_t pr_fpvalid; + char pad3[4]; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_elf_prstatus) != 480); + +/* struct user_fp_state from arch/loongarch/include/uapi/asm/ptrace.h */ +struct loongarch_fpu_struct { + uint64_t fpr[32]; + uint64_t fcc; + unsigned int fcsr; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_fpu_struct) != 268); + +struct loongarch_note { + Elf64_Nhdr hdr; + char name[8]; /* align_up(sizeof("CORE"), 4) */ + union { + struct loongarch_elf_prstatus prstatus; + struct loongarch_fpu_struct fpu; + }; +} QEMU_PACKED; + +#define LOONGARCH_NOTE_HEADER_SIZE offsetof(struct loongarch_note, prstatus) +#define LOONGARCH_PRSTATUS_NOTE_SIZE \ + (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_elf_prstatus)) +#define LOONGARCH_PRFPREG_NOTE_SIZE \ + (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_fpu_struct)) + +static void loongarch_note_init(struct loongarch_note *note, DumpState *s, + const char *name, Elf64_Word namesz, + Elf64_Word type, Elf64_Word descsz) +{ + memset(note, 0, sizeof(*note)); + + note->hdr.n_namesz = cpu_to_dump32(s, namesz); + note->hdr.n_descsz = cpu_to_dump32(s, descsz); + note->hdr.n_type = cpu_to_dump32(s, type); + + memcpy(note->name, name, namesz); +} + +static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f, + CPULoongArchState *env, int cpuid, + DumpState *s) +{ + struct loongarch_note note; + int ret, i; + + loongarch_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu)); + note.fpu.fcsr = cpu_to_dump64(s, env->fcsr0); + note.fpu.fcc = cpu_to_dump64(s, read_fcc(env)); + + for (i = 0; i < 32; ++i) { + note.fpu.fpr[i] = cpu_to_dump64(s, env->fpr[i].vreg.UD[0]); + } + + ret = f(¬e, LOONGARCH_PRFPREG_NOTE_SIZE, s); + if (ret < 0) { + return -1; + } + + return 0; +} + +int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, + int cpuid, DumpState *s) +{ + struct loongarch_note note; + CPULoongArchState *env = &LOONGARCH_CPU(cs)->env; + int ret, i; + + loongarch_note_init(¬e, s, "CORE", 5, NT_PRSTATUS, + sizeof(note.prstatus)); + note.prstatus.pr_pid = cpu_to_dump32(s, cpuid); + note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1); + + for (i = 0; i < 32; ++i) { + note.prstatus.pr_reg.gpr[i] = cpu_to_dump64(s, env->gpr[i]); + } + note.prstatus.pr_reg.csr_era = cpu_to_dump64(s, env->CSR_ERA); + note.prstatus.pr_reg.csr_badv = cpu_to_dump64(s, env->CSR_BADV); + ret = f(¬e, LOONGARCH_PRSTATUS_NOTE_SIZE, s); + if (ret < 0) { + return -1; + } + + ret = loongarch_write_elf64_fprpreg(f, env, cpuid, s); + if (ret < 0) { + return -1; + } + + return ret; +} + +int cpu_get_dump_info(ArchDumpInfo *info, + const GuestPhysBlockList *guest_phys_blocks) +{ + info->d_machine = EM_LOONGARCH; + info->d_endian = ELFDATA2LSB; + info->d_class = ELFCLASS64; + + return 0; +} + +ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) +{ + size_t note_size = 0; + + if (class == ELFCLASS64) { + note_size = LOONGARCH_PRSTATUS_NOTE_SIZE + LOONGARCH_PRFPREG_NOTE_SIZE; + } + + return note_size * nr_cpus; +} diff --git a/target/loongarch/cpu-param.h b/target/loongarch/cpu-param.h index db5ad1c69fa..58cc45a377e 100644 --- a/target/loongarch/cpu-param.h +++ b/target/loongarch/cpu-param.h @@ -8,12 +8,11 @@ #ifndef LOONGARCH_CPU_PARAM_H #define LOONGARCH_CPU_PARAM_H -#define TARGET_LONG_BITS 64 #define TARGET_PHYS_ADDR_SPACE_BITS 48 #define TARGET_VIRT_ADDR_SPACE_BITS 48 #define TARGET_PAGE_BITS 12 -#define TCG_GUEST_DEFAULT_MO (0) +#define TARGET_INSN_START_EXTRA_WORDS 0 #endif diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index 5e85b9dbef3..abad84c0547 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -10,26 +10,29 @@ #include "qemu/qemu-print.h" #include "qapi/error.h" #include "qemu/module.h" -#include "sysemu/qtest.h" -#include "sysemu/tcg.h" -#include "sysemu/kvm.h" +#include "system/qtest.h" +#include "system/tcg.h" +#include "system/kvm.h" #include "kvm/kvm_loongarch.h" -#include "exec/exec-all.h" +#include "hw/qdev-properties.h" +#include "exec/translation-block.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" -#include "cpu-csr.h" +#include "csr.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/reset.h" +#include "system/reset.h" #endif #include "vec.h" #ifdef CONFIG_KVM #include #endif #ifdef CONFIG_TCG -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" #endif +#include "tcg/tcg_loongarch.h" const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -331,8 +334,28 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } return false; } + +static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return is_va32(cpu_env(cs)) ? (uint32_t)result : result; +} #endif +static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + uint32_t flags; + + flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE; + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE; + flags |= is_va32(env) * HW_FLAGS_VA32; + + return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; +} + static void loongarch_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -348,11 +371,9 @@ static void loongarch_restore_state_to_opc(CPUState *cs, } #endif /* CONFIG_TCG */ +#ifndef CONFIG_USER_ONLY static bool loongarch_cpu_has_work(CPUState *cs) { -#ifdef CONFIG_USER_ONLY - return true; -#else bool has_work = false; if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && @@ -361,8 +382,8 @@ static bool loongarch_cpu_has_work(CPUState *cs) } return has_work; -#endif } +#endif /* !CONFIG_USER_ONLY */ static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -374,10 +395,38 @@ static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) return MMU_DA_IDX; } +static void loongarch_la464_init_csr(Object *obj) +{ +#ifndef CONFIG_USER_ONLY + static bool initialized; + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + CPULoongArchState *env = &cpu->env; + int i, num; + + if (!initialized) { + initialized = true; + num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM); + for (i = num; i < 16; i++) { + set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED); + } + set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED); + } +#endif +} + static void loongarch_la464_initfn(Object *obj) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); CPULoongArchState *env = &cpu->env; + uint32_t data = 0, field; int i; for (i = 0; i < 21; i++) { @@ -387,18 +436,23 @@ static void loongarch_la464_initfn(Object *obj) cpu->dtb_compatible = "loongarch,Loongson-3A5000"; env->cpucfg[0] = 0x14c010; /* PRID */ - uint32_t data = 0; data = FIELD_DP32(data, CPUCFG1, ARCH, 2); data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); - data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); + if (kvm_enabled()) { + /* GPA address width of VM is 47, field value is 47 - 1 */ + field = 0x2e; + } else { + field = 0x2f; /* 48 bit - 1 */ + } + data = FIELD_DP32(data, CPUCFG1, PALEN, field); data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); data = FIELD_DP32(data, CPUCFG1, UAL, 1); data = FIELD_DP32(data, CPUCFG1, RI, 1); data = FIELD_DP32(data, CPUCFG1, EP, 1); data = FIELD_DP32(data, CPUCFG1, RPLV, 1); data = FIELD_DP32(data, CPUCFG1, HP, 1); - data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); + data = FIELD_DP32(data, CPUCFG1, CRC, 1); env->cpucfg[1] = data; data = 0; @@ -469,6 +523,7 @@ static void loongarch_la464_initfn(Object *obj) env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); + loongarch_la464_init_csr(obj); loongarch_cpu_post_init(obj); } @@ -476,7 +531,7 @@ static void loongarch_la132_initfn(Object *obj) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); CPULoongArchState *env = &cpu->env; - + uint32_t data = 0; int i; for (i = 0; i < 21; i++) { @@ -486,7 +541,6 @@ static void loongarch_la132_initfn(Object *obj) cpu->dtb_compatible = "loongarch,Loongson-1C103"; env->cpucfg[0] = 0x148042; /* PRID */ - uint32_t data = 0; data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */ data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); @@ -497,7 +551,7 @@ static void loongarch_la132_initfn(Object *obj) data = FIELD_DP32(data, CPUCFG1, EP, 0); data = FIELD_DP32(data, CPUCFG1, RPLV, 0); data = FIELD_DP32(data, CPUCFG1, HP, 1); - data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); + data = FIELD_DP32(data, CPUCFG1, CRC, 1); env->cpucfg[1] = data; } @@ -509,6 +563,7 @@ static void loongarch_max_initfn(Object *obj) static void loongarch_cpu_reset_hold(Object *obj, ResetType type) { + uint8_t tlb_ps; CPUState *cs = CPU(obj); LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj); CPULoongArchState *env = cpu_env(cs); @@ -549,7 +604,25 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type) env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); env->CSR_TID = cs->cpu_index; - + /* + * Workaround for edk2-stable202408, CSR PGD register is set only if + * its value is equal to zero for boot cpu, it causes reboot issue. + * + * Here clear CSR registers relative with TLB. + */ + env->CSR_PGDH = 0; + env->CSR_PGDL = 0; + env->CSR_PWCH = 0; + env->CSR_EENTRY = 0; + env->CSR_TLBRENTRY = 0; + env->CSR_MERRENTRY = 0; + /* set CSR_PWCL.PTBASE and CSR_STLBPS.PS bits from CSR_PRCFG2 */ + if (env->CSR_PRCFG2 == 0) { + env->CSR_PRCFG2 = 0x3fffff000; + } + tlb_ps = ctz32(env->CSR_PRCFG2); + env->CSR_STLBPS = FIELD_DP64(env->CSR_STLBPS, CSR_STLBPS, PS, tlb_ps); + env->CSR_PWCL = FIELD_DP64(env->CSR_PWCL, CSR_PWCL, PTBASE, tlb_ps); for (n = 0; n < 4; n++) { env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); @@ -563,7 +636,7 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type) memset(env->tlb, 0, sizeof(env->tlb)); #endif if (kvm_enabled()) { - kvm_arch_reset_vcpu(env); + kvm_arch_reset_vcpu(cs); } #endif @@ -575,6 +648,7 @@ static void loongarch_cpu_reset_hold(Object *obj, ResetType type) static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) { + info->endian = BFD_ENDIAN_LITTLE; info->print_insn = print_insn_loongarch; } @@ -592,70 +666,111 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) loongarch_cpu_register_gdb_regs_for_features(cs); - cpu_reset(cs); qemu_init_vcpu(cs); + cpu_reset(cs); lacc->parent_realize(dev, errp); } -static bool loongarch_get_lsx(Object *obj, Error **errp) +static void loongarch_cpu_unrealizefn(DeviceState *dev) { - LoongArchCPU *cpu = LOONGARCH_CPU(obj); - bool ret; + LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { - ret = true; - } else { - ret = false; - } - return ret; +#ifndef CONFIG_USER_ONLY + cpu_remove_sync(CPU(dev)); +#endif + + lacc->parent_unrealize(dev); +} + +static bool loongarch_get_lsx(Object *obj, Error **errp) +{ + return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF; } static void loongarch_set_lsx(Object *obj, bool value, Error **errp) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); + uint32_t val; + + cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + if (cpu->lsx == ON_OFF_AUTO_OFF) { + cpu->lasx = ON_OFF_AUTO_OFF; + if (cpu->lasx == ON_OFF_AUTO_ON) { + error_setg(errp, "Failed to disable LSX since LASX is enabled"); + return; + } + } + + if (kvm_enabled()) { + /* kvm feature detection in function kvm_arch_init_vcpu */ + return; + } - if (value) { - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); + /* LSX feature detection in TCG mode */ + val = cpu->env.cpucfg[2]; + if (cpu->lsx == ON_OFF_AUTO_ON) { + if (FIELD_EX32(val, CPUCFG2, LSX) == 0) { + error_setg(errp, "Failed to enable LSX in TCG mode"); + return; + } } else { - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0); - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0); + val = cpu->env.cpucfg[2]; } + + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value); } static bool loongarch_get_lasx(Object *obj, Error **errp) { - LoongArchCPU *cpu = LOONGARCH_CPU(obj); - bool ret; - - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { - ret = true; - } else { - ret = false; - } - return ret; + return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF; } static void loongarch_set_lasx(Object *obj, bool value, Error **errp) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); + uint32_t val; - if (value) { - if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); - } - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1); - } else { - cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); + cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) { + error_setg(errp, "Failed to enable LASX since lSX is disabled"); + return; } + + if (kvm_enabled()) { + /* kvm feature detection in function kvm_arch_init_vcpu */ + return; + } + + /* LASX feature detection in TCG mode */ + val = cpu->env.cpucfg[2]; + if (cpu->lasx == ON_OFF_AUTO_ON) { + if (FIELD_EX32(val, CPUCFG2, LASX) == 0) { + error_setg(errp, "Failed to enable LASX in TCG mode"); + return; + } + } + + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value); } void loongarch_cpu_post_init(Object *obj) { + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->lbt = ON_OFF_AUTO_OFF; + cpu->pmu = ON_OFF_AUTO_OFF; + cpu->lsx = ON_OFF_AUTO_AUTO; + cpu->lasx = ON_OFF_AUTO_AUTO; object_property_add_bool(obj, "lsx", loongarch_get_lsx, loongarch_set_lsx); object_property_add_bool(obj, "lasx", loongarch_get_lasx, loongarch_set_lasx); + /* lbt is enabled only in kvm mode, not supported in tcg mode */ + if (kvm_enabled()) { + kvm_loongarch_cpu_post_init(cpu); + } } static void loongarch_cpu_init(Object *obj) @@ -685,7 +800,55 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) return oc; } -void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) +static void loongarch_cpu_dump_csr(CPUState *cs, FILE *f) +{ +#ifndef CONFIG_USER_ONLY + CPULoongArchState *env = cpu_env(cs); + CSRInfo *csr_info; + int64_t *addr; + int i, j, len, col = 0; + + qemu_fprintf(f, "\n"); + + /* Dump all generic CSR register */ + for (i = 0; i < LOONGARCH_CSR_DBG; i++) { + csr_info = get_csr(i); + if (!csr_info || (csr_info->flags & CSRFL_UNUSED)) { + if (i == (col + 3)) { + qemu_fprintf(f, "\n"); + } + + continue; + } + + if ((i > (col + 3)) || (i == col)) { + col = i & ~3; + qemu_fprintf(f, " CSR%03d:", col); + } + + addr = (void *)env + csr_info->offset; + qemu_fprintf(f, " %s ", csr_info->name); + len = strlen(csr_info->name); + for (; len < 6; len++) { + qemu_fprintf(f, " "); + } + + qemu_fprintf(f, "%" PRIx64, *addr); + j = find_last_bit((void *)addr, BITS_PER_LONG) & (BITS_PER_LONG - 1); + len += j / 4 + 1; + for (; len < 22; len++) { + qemu_fprintf(f, " "); + } + + if (i == (col + 3)) { + qemu_fprintf(f, "\n"); + } + } + qemu_fprintf(f, "\n"); +#endif +} + +static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) { CPULoongArchState *env = cpu_env(cs); int i; @@ -704,22 +867,8 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } - qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); - qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); - qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); - qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); - qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); - qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); - qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); - qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); - qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," - " PRCFG3=%016" PRIx64 "\n", - env->CSR_PRCFG1, env->CSR_PRCFG2, env->CSR_PRCFG3); - qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); - qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); - qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); - qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG); - qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL); + /* csr */ + loongarch_cpu_dump_csr(cs, f); /* fpr */ if (flags & CPU_DUMP_FPU) { @@ -733,17 +882,23 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) } #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps loongarch_tcg_ops = { + .guest_default_memory_order = 0, + .mttcg_supported = true, + .initialize = loongarch_translate_init, + .translate_code = loongarch_translate_code, + .get_tb_cpu_state = loongarch_get_tb_cpu_state, .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, .restore_state_to_opc = loongarch_restore_state_to_opc, + .mmu_index = loongarch_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = loongarch_cpu_tlb_fill, + .pointer_wrap = loongarch_pointer_wrap, .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, .cpu_exec_halt = loongarch_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = loongarch_cpu_do_interrupt, .do_transaction_failed = loongarch_cpu_do_transaction_failed, #endif @@ -754,6 +909,8 @@ static const TCGCPUOps loongarch_tcg_ops = { #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps loongarch_sysemu_ops = { + .has_work = loongarch_cpu_has_work, + .write_elf64_note = loongarch_cpu_write_elf64_note, .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, }; @@ -765,21 +922,29 @@ static int64_t loongarch_cpu_get_arch_id(CPUState *cs) } #endif -static void loongarch_cpu_class_init(ObjectClass *c, void *data) +static const Property loongarch_cpu_properties[] = { + DEFINE_PROP_INT32("socket-id", LoongArchCPU, socket_id, 0), + DEFINE_PROP_INT32("core-id", LoongArchCPU, core_id, 0), + DEFINE_PROP_INT32("thread-id", LoongArchCPU, thread_id, 0), + DEFINE_PROP_INT32("node-id", LoongArchCPU, node_id, CPU_UNSET_NUMA_NODE_ID), +}; + +static void loongarch_cpu_class_init(ObjectClass *c, const void *data) { LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); ResettableClass *rc = RESETTABLE_CLASS(c); + device_class_set_props(dc, loongarch_cpu_properties); device_class_set_parent_realize(dc, loongarch_cpu_realizefn, &lacc->parent_realize); + device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn, + &lacc->parent_unrealize); resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, &lacc->parent_phases); cc->class_by_name = loongarch_cpu_class_by_name; - cc->has_work = loongarch_cpu_has_work; - cc->mmu_index = loongarch_cpu_mmu_index; cc->dump_state = loongarch_cpu_dump_state; cc->set_pc = loongarch_cpu_set_pc; cc->get_pc = loongarch_cpu_get_pc; @@ -796,6 +961,7 @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data) #ifdef CONFIG_TCG cc->tcg_ops = &loongarch_tcg_ops; #endif + dc->user_creatable = true; } static const gchar *loongarch32_gdb_arch_name(CPUState *cs) @@ -803,7 +969,7 @@ static const gchar *loongarch32_gdb_arch_name(CPUState *cs) return "loongarch32"; } -static void loongarch32_cpu_class_init(ObjectClass *c, void *data) +static void loongarch32_cpu_class_init(ObjectClass *c, const void *data) { CPUClass *cc = CPU_CLASS(c); @@ -816,7 +982,7 @@ static const gchar *loongarch64_gdb_arch_name(CPUState *cs) return "loongarch64"; } -static void loongarch64_cpu_class_init(ObjectClass *c, void *data) +static void loongarch64_cpu_class_init(ObjectClass *c, const void *data) { CPUClass *cc = CPU_CLASS(c); diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 6c41fafb708..9538e8d61d7 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -9,12 +9,14 @@ #define LOONGARCH_CPU_H #include "qemu/int128.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "fpu/softfloat-types.h" #include "hw/registerfields.h" #include "qemu/timer.h" #ifndef CONFIG_USER_ONLY -#include "exec/memory.h" +#include "system/memory.h" #endif #include "cpu-csr.h" #include "cpu-qom.h" @@ -129,7 +131,7 @@ FIELD(CPUCFG1, RI, 21, 1) FIELD(CPUCFG1, EP, 22, 1) FIELD(CPUCFG1, RPLV, 23, 1) FIELD(CPUCFG1, HP, 24, 1) -FIELD(CPUCFG1, IOCSR_BRD, 25, 1) +FIELD(CPUCFG1, CRC, 25, 1) FIELD(CPUCFG1, MSG_INT, 26, 1) /* cpucfg[1].arch */ @@ -153,6 +155,7 @@ FIELD(CPUCFG2, LLFTP_VER, 15, 3) FIELD(CPUCFG2, LBT_X86, 18, 1) FIELD(CPUCFG2, LBT_ARM, 19, 1) FIELD(CPUCFG2, LBT_MIPS, 20, 1) +FIELD(CPUCFG2, LBT_ALL, 18, 3) FIELD(CPUCFG2, LSPW, 21, 1) FIELD(CPUCFG2, LAM, 22, 1) @@ -281,6 +284,26 @@ struct LoongArchTLB { typedef struct LoongArchTLB LoongArchTLB; #endif +enum loongarch_features { + LOONGARCH_FEATURE_LSX, + LOONGARCH_FEATURE_LASX, + LOONGARCH_FEATURE_LBT, /* loongson binary translation extension */ + LOONGARCH_FEATURE_PMU, + LOONGARCH_FEATURE_PV_IPI, + LOONGARCH_FEATURE_STEALTIME, +}; + +typedef struct LoongArchBT { + /* scratch registers */ + uint64_t scr0; + uint64_t scr1; + uint64_t scr2; + uint64_t scr3; + /* loongarch eflags */ + uint32_t eflags; + uint32_t ftop; +} lbt_t; + typedef struct CPUArchState { uint64_t gpr[32]; uint64_t pc; @@ -288,8 +311,10 @@ typedef struct CPUArchState { fpr_t fpr[32]; bool cf[8]; uint32_t fcsr0; + lbt_t lbt; uint32_t cpucfg[21]; + uint32_t pv_features; /* LoongArch CSRs */ uint64_t CSR_CRMD; @@ -346,6 +371,9 @@ typedef struct CPUArchState { uint64_t CSR_DBG; uint64_t CSR_DERA; uint64_t CSR_DSAVE; + struct { + uint64_t guest_addr; + } stealtime; #ifdef CONFIG_TCG float_status fp_status; @@ -362,13 +390,17 @@ typedef struct CPUArchState { bool load_elf; uint64_t elf_address; uint32_t mp_state; - /* Store ipistate to access from this struct */ - DeviceState *ipistate; struct loongarch_boot_info *boot_info; #endif } CPULoongArchState; +typedef struct LoongArchCPUTopo { + int32_t socket_id; /* socket-id of this VCPU */ + int32_t core_id; /* core-id of this VCPU */ + int32_t thread_id; /* thread-id of this VCPU */ +} LoongArchCPUTopo; + /** * LoongArchCPU: * @env: #CPULoongArchState @@ -381,11 +413,22 @@ struct ArchCPU { CPULoongArchState env; QEMUTimer timer; uint32_t phy_id; + OnOffAuto lbt; + OnOffAuto pmu; + OnOffAuto lsx; + OnOffAuto lasx; + OnOffAuto kvm_pv_ipi; + OnOffAuto kvm_steal_time; + int32_t socket_id; /* socket-id of this CPU */ + int32_t core_id; /* core-id of this CPU */ + int32_t thread_id; /* thread-id of this CPU */ + int32_t node_id; /* NUMA node of this CPU */ /* 'compatible' string for this CPU for Linux device trees */ const char *dtb_compatible; /* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */ uint64_t kvm_state_counter; + VMChangeStateEntry *vmsentry; }; /** @@ -399,6 +442,7 @@ struct LoongArchCPUClass { CPUClass parent_class; DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; ResettablePhases parent_phases; }; @@ -448,22 +492,17 @@ static inline void set_pc(CPULoongArchState *env, uint64_t value) #define HW_FLAGS_VA32 0x20 #define HW_FLAGS_EUEN_ASXE 0x40 -static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); - *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; - *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE; - *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE; - *flags |= is_va32(env) * HW_FLAGS_VA32; -} - -#include "exec/cpu-all.h" - #define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU void loongarch_cpu_post_init(Object *obj); +#ifdef CONFIG_KVM +void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu); +#else +static inline void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu) +{ +} +#endif +void kvm_loongarch_init_irq_routing(void); + #endif /* LOONGARCH_CPU_H */ diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c index 580362ac3e9..b5f732f15b7 100644 --- a/target/loongarch/cpu_helper.c +++ b/target/loongarch/cpu_helper.c @@ -7,162 +7,143 @@ */ #include "qemu/osdep.h" +#include "system/tcg.h" #include "cpu.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/target_page.h" #include "internals.h" #include "cpu-csr.h" +#include "tcg/tcg_loongarch.h" -#ifdef CONFIG_TCG -static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - int access_type, int index, int mmu_idx) +void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, + uint64_t *dir_width, target_ulong level) { - LoongArchTLB *tlb = &env->tlb[index]; - uint64_t plv = mmu_idx; - uint64_t tlb_entry, tlb_ppn; - uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; - - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + switch (level) { + case 1: + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); + break; + case 2: + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); + break; + case 3: + *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); + break; + case 4: + *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); + *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); + break; + default: + /* level may be zero for ldpte */ + *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); + *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); + break; } - n = (address >> tlb_ps) & 0x1;/* Odd or even */ +} - tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; - tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); - tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); - tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); - if (is_la64(env)) { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); - tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); - tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); - tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); +static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address) +{ + CPUState *cs = env_cpu(env); + target_ulong index, phys; + uint64_t dir_base, dir_width; + uint64_t base; + int level; + + if ((address >> 63) & 0x1) { + base = env->CSR_PGDH; } else { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); - tlb_nx = 0; - tlb_nr = 0; - tlb_rplv = 0; + base = env->CSR_PGDL; } + base &= TARGET_PHYS_MASK; - /* Remove sw bit between bit12 -- bit PS*/ - tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); + for (level = 4; level > 0; level--) { + get_dir_base_width(env, &dir_base, &dir_width, level); - /* Check access rights */ - if (!tlb_v) { - return TLBRET_INVALID; - } + if (dir_width == 0) { + continue; + } - if (access_type == MMU_INST_FETCH && tlb_nx) { - return TLBRET_XI; + /* get next level page directory */ + index = (address >> dir_base) & ((1 << dir_width) - 1); + phys = base | index << 3; + base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; + if (FIELD_EX64(base, TLBENTRY, HUGE)) { + /* base is a huge pte */ + break; + } } - if (access_type == MMU_DATA_LOAD && tlb_nr) { - return TLBRET_RI; + /* pte */ + if (FIELD_EX64(base, TLBENTRY, HUGE)) { + /* Huge Page. base is pte */ + base = FIELD_DP64(base, TLBENTRY, LEVEL, 0); + base = FIELD_DP64(base, TLBENTRY, HUGE, 0); + if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) { + base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0); + base = FIELD_DP64(base, TLBENTRY, G, 1); + } + } else { + /* Normal Page. base points to pte */ + get_dir_base_width(env, &dir_base, &dir_width, 0); + index = (address >> dir_base) & ((1 << dir_width) - 1); + phys = base | index << 3; + base = ldq_phys(cs->as, phys); } - if (((tlb_rplv == 0) && (plv > tlb_plv)) || - ((tlb_rplv == 1) && (plv != tlb_plv))) { - return TLBRET_PE; - } + /* TODO: check plv and other bits? */ - if ((access_type == MMU_DATA_STORE) && !tlb_d) { - return TLBRET_DIRTY; + /* base is pte, in normal pte format */ + if (!FIELD_EX64(base, TLBENTRY, V)) { + return TLBRET_NOMATCH; } - *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | - (address & MAKE_64BIT_MASK(0, tlb_ps)); - *prot = PAGE_READ; - if (tlb_d) { - *prot |= PAGE_WRITE; - } - if (!tlb_nx) { - *prot |= PAGE_EXEC; + if (!FIELD_EX64(base, TLBENTRY, D)) { + *prot = PAGE_READ; + } else { + *prot = PAGE_READ | PAGE_WRITE; } - return TLBRET_MATCH; -} -/* - * One tlb entry holds an adjacent odd/even pair, the vpn is the - * content of the virtual page number divided by 2. So the - * compare vpn is bit[47:15] for 16KiB page. while the vppn - * field in tlb entry contains bit[47:13], so need adjust. - * virt_vpn = vaddr[47:13] - */ -bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, - int *index) -{ - LoongArchTLB *tlb; - uint16_t csr_asid, tlb_asid, stlb_idx; - uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; - int i, compare_shift; - uint64_t vpn, tlb_vppn; - - csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); - stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); - stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ - compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - - /* Search STLB */ - for (i = 0; i < 8; ++i) { - tlb = &env->tlb[i * 256 + stlb_idx]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i * 256 + stlb_idx; - return true; - } - } - } + /* get TARGET_PAGE_SIZE aligned physical address */ + base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1); + /* mask RPLV, NX, NR bits */ + base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0); + base = FIELD_DP64(base, TLBENTRY_64, NX, 0); + base = FIELD_DP64(base, TLBENTRY_64, NR, 0); + /* mask other attribute bits */ + *physical = base & TARGET_PAGE_MASK; - /* Search MTLB */ - for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { - tlb = &env->tlb[i]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i; - return true; - } - } - } - return false; + return 0; } static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) + MMUAccessType access_type, int mmu_idx, + int is_debug) { - int index, match; + int ret; + + if (tcg_enabled()) { + ret = loongarch_get_addr_from_tlb(env, physical, prot, address, + access_type, mmu_idx); + if (ret != TLBRET_NOMATCH) { + return ret; + } + } - match = loongarch_tlb_search(env, address, &index); - if (match) { - return loongarch_map_tlb_entry(env, physical, prot, - address, access_type, index, mmu_idx); + if (is_debug) { + /* + * For debugger memory access, we want to do the map when there is a + * legal mapping, even if the mapping is not yet in TLB. return 0 if + * there is a valid map, else none zero. + */ + return loongarch_page_table_walker(env, physical, prot, address); } return TLBRET_NOMATCH; } -#else -static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - return TLBRET_NOMATCH; -} -#endif static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, target_ulong dmw) @@ -178,7 +159,7 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, int get_physical_address(CPULoongArchState *env, hwaddr *physical, int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) + MMUAccessType access_type, int mmu_idx, int is_debug) { int user_mode = mmu_idx == MMU_USER_IDX; int kernel_mode = mmu_idx == MMU_KERNEL_IDX; @@ -215,14 +196,14 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical, } /* Check valid extension */ - addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); - if (!(addr_high == 0 || addr_high == -1)) { + addr_high = (int64_t)address >> (TARGET_VIRT_ADDR_SPACE_BITS - 1); + if (!(addr_high == 0 || addr_high == -1ULL)) { return TLBRET_BADADDR; } /* Mapped address */ return loongarch_map_address(env, physical, prot, address, - access_type, mmu_idx); + access_type, mmu_idx, is_debug); } hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) @@ -232,7 +213,7 @@ hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int prot; if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(cs, false)) != 0) { + cpu_mmu_index(cs, false), 1) != 0) { return -1; } return phys_addr; diff --git a/target/loongarch/csr.c b/target/loongarch/csr.c new file mode 100644 index 00000000000..7ea0a304506 --- /dev/null +++ b/target/loongarch/csr.c @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ +#include +#include "qemu/osdep.h" +#include "cpu.h" +#include "csr.h" + +#define CSR_OFF_FUNCS(NAME, FL, RD, WR) \ + [LOONGARCH_CSR_##NAME] = { \ + .name = (stringify(NAME)), \ + .offset = offsetof(CPULoongArchState, CSR_##NAME), \ + .flags = FL, .readfn = RD, .writefn = WR \ + } + +#define CSR_OFF_ARRAY(NAME, N) \ + [LOONGARCH_CSR_##NAME(N)] = { \ + .name = (stringify(NAME##N)), \ + .offset = offsetof(CPULoongArchState, CSR_##NAME[N]), \ + .flags = 0, .readfn = NULL, .writefn = NULL \ + } + +#define CSR_OFF_FLAGS(NAME, FL) CSR_OFF_FUNCS(NAME, FL, NULL, NULL) +#define CSR_OFF(NAME) CSR_OFF_FLAGS(NAME, 0) + +static CSRInfo csr_info[] = { + CSR_OFF_FLAGS(CRMD, CSRFL_EXITTB), + CSR_OFF(PRMD), + CSR_OFF_FLAGS(EUEN, CSRFL_EXITTB), + CSR_OFF_FLAGS(MISC, CSRFL_READONLY), + CSR_OFF(ECFG), + CSR_OFF_FLAGS(ESTAT, CSRFL_EXITTB), + CSR_OFF(ERA), + CSR_OFF(BADV), + CSR_OFF_FLAGS(BADI, CSRFL_READONLY), + CSR_OFF(EENTRY), + CSR_OFF(TLBIDX), + CSR_OFF(TLBEHI), + CSR_OFF(TLBELO0), + CSR_OFF(TLBELO1), + CSR_OFF_FLAGS(ASID, CSRFL_EXITTB), + CSR_OFF(PGDL), + CSR_OFF(PGDH), + CSR_OFF_FLAGS(PGD, CSRFL_READONLY), + CSR_OFF(PWCL), + CSR_OFF(PWCH), + CSR_OFF(STLBPS), + CSR_OFF(RVACFG), + CSR_OFF_FLAGS(CPUID, CSRFL_READONLY), + CSR_OFF_FLAGS(PRCFG1, CSRFL_READONLY), + CSR_OFF_FLAGS(PRCFG2, CSRFL_READONLY), + CSR_OFF_FLAGS(PRCFG3, CSRFL_READONLY), + CSR_OFF_ARRAY(SAVE, 0), + CSR_OFF_ARRAY(SAVE, 1), + CSR_OFF_ARRAY(SAVE, 2), + CSR_OFF_ARRAY(SAVE, 3), + CSR_OFF_ARRAY(SAVE, 4), + CSR_OFF_ARRAY(SAVE, 5), + CSR_OFF_ARRAY(SAVE, 6), + CSR_OFF_ARRAY(SAVE, 7), + CSR_OFF_ARRAY(SAVE, 8), + CSR_OFF_ARRAY(SAVE, 9), + CSR_OFF_ARRAY(SAVE, 10), + CSR_OFF_ARRAY(SAVE, 11), + CSR_OFF_ARRAY(SAVE, 12), + CSR_OFF_ARRAY(SAVE, 13), + CSR_OFF_ARRAY(SAVE, 14), + CSR_OFF_ARRAY(SAVE, 15), + CSR_OFF(TID), + CSR_OFF_FLAGS(TCFG, CSRFL_IO), + CSR_OFF_FLAGS(TVAL, CSRFL_READONLY | CSRFL_IO), + CSR_OFF(CNTC), + CSR_OFF_FLAGS(TICLR, CSRFL_IO), + CSR_OFF(LLBCTL), + CSR_OFF(IMPCTL1), + CSR_OFF(IMPCTL2), + CSR_OFF(TLBRENTRY), + CSR_OFF(TLBRBADV), + CSR_OFF(TLBRERA), + CSR_OFF(TLBRSAVE), + CSR_OFF(TLBRELO0), + CSR_OFF(TLBRELO1), + CSR_OFF(TLBREHI), + CSR_OFF(TLBRPRMD), + CSR_OFF(MERRCTL), + CSR_OFF(MERRINFO1), + CSR_OFF(MERRINFO2), + CSR_OFF(MERRENTRY), + CSR_OFF(MERRERA), + CSR_OFF(MERRSAVE), + CSR_OFF(CTAG), + CSR_OFF_ARRAY(DMW, 0), + CSR_OFF_ARRAY(DMW, 1), + CSR_OFF_ARRAY(DMW, 2), + CSR_OFF_ARRAY(DMW, 3), + CSR_OFF(DBG), + CSR_OFF(DERA), + CSR_OFF(DSAVE), +}; + +CSRInfo *get_csr(unsigned int csr_num) +{ + CSRInfo *csr; + + if (csr_num >= ARRAY_SIZE(csr_info)) { + return NULL; + } + + csr = &csr_info[csr_num]; + if (csr->offset == 0) { + return NULL; + } + + return csr; +} + +bool set_csr_flag(unsigned int csr_num, int flag) +{ + CSRInfo *csr; + + csr = get_csr(csr_num); + if (!csr) { + return false; + } + + csr->flags |= flag; + return true; +} diff --git a/target/loongarch/csr.h b/target/loongarch/csr.h new file mode 100644 index 00000000000..81a656baaed --- /dev/null +++ b/target/loongarch/csr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ + +#ifndef TARGET_LOONGARCH_CSR_H +#define TARGET_LOONGARCH_CSR_H + +#include "cpu-csr.h" + +typedef void (*GenCSRFunc)(void); +enum { + CSRFL_READONLY = (1 << 0), + CSRFL_EXITTB = (1 << 1), + CSRFL_IO = (1 << 2), + CSRFL_UNUSED = (1 << 3), +}; + +typedef struct { + const char *name; + int offset; + int flags; + GenCSRFunc readfn; + GenCSRFunc writefn; +} CSRInfo; + +CSRInfo *get_csr(unsigned int csr_num); +bool set_csr_flag(unsigned int csr_num, int flag); +#endif /* TARGET_LOONGARCH_CSR_H */ diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index 7ca245ee81b..471eda28c73 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -34,26 +34,28 @@ void write_fcc(CPULoongArchState *env, uint64_t val) int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { CPULoongArchState *env = cpu_env(cs); - uint64_t val; - - if (0 <= n && n < 32) { - val = env->gpr[n]; - } else if (n == 32) { - /* orig_a0 */ - val = 0; - } else if (n == 33) { - val = env->pc; - } else if (n == 34) { - val = env->CSR_BADV; - } if (0 <= n && n <= 34) { + uint64_t val; + + if (n < 32) { + val = env->gpr[n]; + } else if (n == 32) { + /* orig_a0 */ + val = 0; + } else if (n == 33) { + val = env->pc; + } else /* if (n == 34) */ { + val = env->CSR_BADV; + } + if (is_la64(env)) { return gdb_get_reg64(mem_buf, val); } else { return gdb_get_reg32(mem_buf, val); } } + return 0; } @@ -61,23 +63,24 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { CPULoongArchState *env = cpu_env(cs); target_ulong tmp; - int read_length; int length = 0; + if (n < 0 || n > 34) { + return 0; + } + if (is_la64(env)) { - tmp = ldq_p(mem_buf); - read_length = 8; + tmp = ldq_le_p(mem_buf); + length = 8; } else { - tmp = ldl_p(mem_buf); - read_length = 4; + tmp = ldl_le_p(mem_buf); + length = 4; } if (0 <= n && n < 32) { env->gpr[n] = tmp; - length = read_length; } else if (n == 33) { set_pc(env, tmp); - length = read_length; } return length; } @@ -104,13 +107,13 @@ static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n) int length = 0; if (0 <= n && n < 32) { - env->fpr[n].vreg.D(0) = ldq_p(mem_buf); + env->fpr[n].vreg.D(0) = ldq_le_p(mem_buf); length = 8; } else if (32 <= n && n < 40) { env->cf[n - 32] = ldub_p(mem_buf); length = 1; } else if (n == 40) { - env->fcsr0 = ldl_p(mem_buf); + env->fcsr0 = ldl_le_p(mem_buf); length = 4; } return length; diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h index b3b64a02153..99981abd01c 100644 --- a/target/loongarch/helper.h +++ b/target/loongarch/helper.h @@ -1,720 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright (c) 2021 Loongson Technology Corporation Limited + * Copyright (c) 2025 Loongson Technology Corporation Limited */ -DEF_HELPER_2(raise_exception, noreturn, env, i32) - -DEF_HELPER_FLAGS_1(bitrev_w, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_1(bitrev_d, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) - -DEF_HELPER_FLAGS_3(asrtle_d, TCG_CALL_NO_WG, void, env, tl, tl) -DEF_HELPER_FLAGS_3(asrtgt_d, TCG_CALL_NO_WG, void, env, tl, tl) - -DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) -DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) -DEF_HELPER_FLAGS_2(cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl) - -/* Floating-point helper */ -DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmaxa_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmaxa_d, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmina_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fmina_d, TCG_CALL_NO_WG, i64, env, i64, i64) - -DEF_HELPER_FLAGS_5(fmuladd_s, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) -DEF_HELPER_FLAGS_5(fmuladd_d, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) - -DEF_HELPER_FLAGS_3(fscaleb_s, TCG_CALL_NO_WG, i64, env, i64, i64) -DEF_HELPER_FLAGS_3(fscaleb_d, TCG_CALL_NO_WG, i64, env, i64, i64) - -DEF_HELPER_FLAGS_2(flogb_s, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(flogb_d, TCG_CALL_NO_WG, i64, env, i64) - -DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(frsqrt_s, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(frsqrt_d, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(frecip_s, TCG_CALL_NO_WG, i64, env, i64) -DEF_HELPER_FLAGS_2(frecip_d, TCG_CALL_NO_WG, i64, env, i64) - -DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, i64, env, i64) -DEF_HELPER_FLAGS_2(fclass_d, TCG_CALL_NO_RWG_SE, i64, env, i64) - -/* fcmp.cXXX.s */ -DEF_HELPER_4(fcmp_c_s, i64, env, i64, i64, i32) -/* fcmp.sXXX.s */ -DEF_HELPER_4(fcmp_s_s, i64, env, i64, i64, i32) -/* fcmp.cXXX.d */ -DEF_HELPER_4(fcmp_c_d, i64, env, i64, i64, i32) -/* fcmp.sXXX.d */ -DEF_HELPER_4(fcmp_s_d, i64, env, i64, i64, i32) - -DEF_HELPER_2(fcvt_d_s, i64, env, i64) -DEF_HELPER_2(fcvt_s_d, i64, env, i64) -DEF_HELPER_2(ffint_d_w, i64, env, i64) -DEF_HELPER_2(ffint_d_l, i64, env, i64) -DEF_HELPER_2(ffint_s_w, i64, env, i64) -DEF_HELPER_2(ffint_s_l, i64, env, i64) -DEF_HELPER_2(ftintrm_l_s, i64, env, i64) -DEF_HELPER_2(ftintrm_l_d, i64, env, i64) -DEF_HELPER_2(ftintrm_w_s, i64, env, i64) -DEF_HELPER_2(ftintrm_w_d, i64, env, i64) -DEF_HELPER_2(ftintrp_l_s, i64, env, i64) -DEF_HELPER_2(ftintrp_l_d, i64, env, i64) -DEF_HELPER_2(ftintrp_w_s, i64, env, i64) -DEF_HELPER_2(ftintrp_w_d, i64, env, i64) -DEF_HELPER_2(ftintrz_l_s, i64, env, i64) -DEF_HELPER_2(ftintrz_l_d, i64, env, i64) -DEF_HELPER_2(ftintrz_w_s, i64, env, i64) -DEF_HELPER_2(ftintrz_w_d, i64, env, i64) -DEF_HELPER_2(ftintrne_l_s, i64, env, i64) -DEF_HELPER_2(ftintrne_l_d, i64, env, i64) -DEF_HELPER_2(ftintrne_w_s, i64, env, i64) -DEF_HELPER_2(ftintrne_w_d, i64, env, i64) -DEF_HELPER_2(ftint_l_s, i64, env, i64) -DEF_HELPER_2(ftint_l_d, i64, env, i64) -DEF_HELPER_2(ftint_w_s, i64, env, i64) -DEF_HELPER_2(ftint_w_d, i64, env, i64) -DEF_HELPER_2(frint_s, i64, env, i64) -DEF_HELPER_2(frint_d, i64, env, i64) - -DEF_HELPER_FLAGS_1(set_rounding_mode, TCG_CALL_NO_RWG, void, env) - -DEF_HELPER_1(rdtime_d, i64, env) - -#ifndef CONFIG_USER_ONLY -/* CSRs helper */ -DEF_HELPER_1(csrrd_pgd, i64, env) -DEF_HELPER_1(csrrd_cpuid, i64, env) -DEF_HELPER_1(csrrd_tval, i64, env) -DEF_HELPER_2(csrwr_estat, i64, env, tl) -DEF_HELPER_2(csrwr_asid, i64, env, tl) -DEF_HELPER_2(csrwr_tcfg, i64, env, tl) -DEF_HELPER_2(csrwr_ticlr, i64, env, tl) -DEF_HELPER_2(iocsrrd_b, i64, env, tl) -DEF_HELPER_2(iocsrrd_h, i64, env, tl) -DEF_HELPER_2(iocsrrd_w, i64, env, tl) -DEF_HELPER_2(iocsrrd_d, i64, env, tl) -DEF_HELPER_3(iocsrwr_b, void, env, tl, tl) -DEF_HELPER_3(iocsrwr_h, void, env, tl, tl) -DEF_HELPER_3(iocsrwr_w, void, env, tl, tl) -DEF_HELPER_3(iocsrwr_d, void, env, tl, tl) - -/* TLB helper */ -DEF_HELPER_1(tlbwr, void, env) -DEF_HELPER_1(tlbfill, void, env) -DEF_HELPER_1(tlbsrch, void, env) -DEF_HELPER_1(tlbrd, void, env) -DEF_HELPER_1(tlbclr, void, env) -DEF_HELPER_1(tlbflush, void, env) -DEF_HELPER_1(invtlb_all, void, env) -DEF_HELPER_2(invtlb_all_g, void, env, i32) -DEF_HELPER_2(invtlb_all_asid, void, env, tl) -DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl) -DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl) - -DEF_HELPER_4(lddir, tl, env, tl, tl, i32) -DEF_HELPER_4(ldpte, void, env, tl, tl, i32) -DEF_HELPER_1(ertn, void, env) -DEF_HELPER_1(idle, void, env) -#endif - -/* LoongArch LSX */ -DEF_HELPER_FLAGS_4(vhaddw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhaddw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vhsubw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vdiv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vdiv_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vmod_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_3(vexth_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vexth_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(vext2xv_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_w_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_d_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_d_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_wu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_du_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_du_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vext2xv_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(vmskltz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vmskltz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vmskltz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vmskltz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vmskgez_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vmsknz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vsllwil_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsllwil_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsllwil_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_3(vextl_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsllwil_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsllwil_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsllwil_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_3(vextl_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsrlr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vsrar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrar_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrari_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrari_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrari_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrari_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vsrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vsrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vsrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vsrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vsrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vssrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrln_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrln_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrln_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssran_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vssrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrani_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vssrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrlrn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrlrn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrlrn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vssrarn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vssrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrlrni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vssrarni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_3(vclo_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclo_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclo_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclo_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vclz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_3(vpcnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vpcnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vpcnt_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_3(vpcnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vfrstp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vfrstp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vfrstpi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vfrstpi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_5(vfadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_6(vfmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfnmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfnmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfnmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_6(vfnmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_5(vfmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_5(vfmaxa_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmaxa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmina_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfmina_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vflogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vflogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vfclass_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfclass_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vfsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrecip_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrecip_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vfcvtl_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfcvth_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfcvtl_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfcvth_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfcvt_h_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vfcvt_s_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vfrintrne_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrne_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrz_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrz_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrm_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrintrm_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrint_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vfrint_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vftintrne_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrne_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrz_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrz_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrp_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrp_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrm_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrm_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftint_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftint_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrz_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrz_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftint_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftint_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vftintrne_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vftintrz_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vftintrp_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vftintrm_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vftint_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrnel_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrneh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrzl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrzh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrpl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrph_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrml_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintrmh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftintl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vftinth_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vffint_s_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vffint_d_l, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vffint_s_wu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vffint_d_lu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vffintl_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_4(vffinth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) -DEF_HELPER_FLAGS_5(vffint_s_l, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) - -DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_6(vfcmp_c_s, void, env, i32, i32, i32, i32, i32) -DEF_HELPER_6(vfcmp_s_s, void, env, i32, i32, i32, i32, i32) -DEF_HELPER_6(vfcmp_c_d, void, env, i32, i32, i32, i32, i32) -DEF_HELPER_6(vfcmp_s_d, void, env, i32, i32, i32, i32, i32) - -DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_4(vsetanyeqz_b, void, env, i32, i32, i32) -DEF_HELPER_4(vsetanyeqz_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsetanyeqz_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsetanyeqz_d, void, env, i32, i32, i32) -DEF_HELPER_4(vsetallnez_b, void, env, i32, i32, i32) -DEF_HELPER_4(vsetallnez_h, void, env, i32, i32, i32) -DEF_HELPER_4(vsetallnez_w, void, env, i32, i32, i32) -DEF_HELPER_4(vsetallnez_d, void, env, i32, i32, i32) - -DEF_HELPER_FLAGS_4(xvinsve0_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(xvinsve0_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(xvpickve_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(xvpickve_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vpackev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpackod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vpickev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpickod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_4(vilvl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvl_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vilvh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) - -DEF_HELPER_FLAGS_5(vshuf_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vshuf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vshuf_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vshuf_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vshuf4i_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vshuf4i_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vshuf4i_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vshuf4i_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vperm_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(vpermi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vpermi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vpermi_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) - -DEF_HELPER_FLAGS_4(vextrins_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vextrins_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vextrins_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) -DEF_HELPER_FLAGS_4(vextrins_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +#include "tcg/helper.h" diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index 944153b180e..a7384b0d315 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -17,8 +17,8 @@ #define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS) void loongarch_translate_init(void); - -void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags); +void loongarch_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void G_NORETURN do_raise_exception(CPULoongArchState *env, uint32_t exception, @@ -43,6 +43,8 @@ enum { TLBRET_PE = 7, }; +bool check_ps(CPULoongArchState *ent, uint8_t ps); + extern const VMStateDescription vmstate_loongarch_cpu; void loongarch_cpu_set_irq(void *opaque, int irq, int level); @@ -52,18 +54,13 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, uint64_t value); -bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, - int *index); int get_physical_address(CPULoongArchState *env, hwaddr *physical, int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx); + MMUAccessType access_type, int mmu_idx, int is_debug); +void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, + uint64_t *dir_width, target_ulong level); hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -#ifdef CONFIG_TCG -bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -#endif #endif /* !CONFIG_USER_ONLY */ uint64_t read_fcc(CPULoongArchState *env); @@ -72,5 +69,7 @@ void write_fcc(CPULoongArchState *env, uint64_t val); int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n); int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n); void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs); +int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, DumpState *s); #endif diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c index e1be6a69594..e5ea2dba9da 100644 --- a/target/loongarch/kvm/kvm.c +++ b/target/loongarch/kvm/kvm.c @@ -8,21 +8,23 @@ #include "qemu/osdep.h" #include #include - +#include "asm-loongarch/kvm_para.h" +#include "qapi/error.h" #include "qemu/timer.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" +#include "system/system.h" +#include "system/kvm.h" +#include "system/kvm_int.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/irq.h" +#include "hw/loongarch/virt.h" #include "qemu/log.h" #include "hw/loader.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "cpu-csr.h" #include "kvm_loongarch.h" #include "trace.h" @@ -33,6 +35,82 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO }; +static int kvm_get_stealtime(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + int err; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, + .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, + .addr = (uint64_t)&env->stealtime.guest_addr, + }; + + err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); + if (err) { + return 0; + } + + err = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, attr); + if (err) { + error_report("PVTIME: KVM_GET_DEVICE_ATTR: %s", strerror(errno)); + return err; + } + + return 0; +} + +static int kvm_set_stealtime(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + int err; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, + .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, + .addr = (uint64_t)&env->stealtime.guest_addr, + }; + + err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); + if (err) { + return 0; + } + + err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); + if (err) { + error_report("PVTIME: KVM_SET_DEVICE_ATTR %s with gpa "TARGET_FMT_lx, + strerror(errno), env->stealtime.guest_addr); + return err; + } + + return 0; +} + +static int kvm_set_pv_features(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + int err; + uint64_t val; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_CPUCFG, + .attr = CPUCFG_KVM_FEATURE, + .addr = (uint64_t)&val, + }; + + err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); + if (err) { + return 0; + } + + val = env->pv_features; + err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); + if (err) { + error_report("Fail to set pv feature "TARGET_FMT_lx " with error %s", + val, strerror(errno)); + return err; + } + + return 0; +} + static int kvm_loongarch_get_regs_core(CPUState *cs) { int ret = 0; @@ -476,9 +554,71 @@ static int kvm_loongarch_put_regs_fp(CPUState *cs) return ret; } -void kvm_arch_reset_vcpu(CPULoongArchState *env) +static int kvm_loongarch_put_lbt(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + uint64_t val; + int ret; + + /* check whether vm support LBT firstly */ + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) { + return 0; + } + + /* set six LBT registers including scr0-scr3, eflags, ftop */ + ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0); + ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1); + ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2); + ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3); + /* + * Be cautious, KVM_REG_LOONGARCH_LBT_FTOP is defined as 64-bit however + * lbt.ftop is 32-bit; the same with KVM_REG_LOONGARCH_LBT_EFLAGS register + */ + val = env->lbt.eflags; + ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val); + val = env->lbt.ftop; + ret |= kvm_set_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val); + + return ret; +} + +static int kvm_loongarch_get_lbt(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + uint64_t val; + int ret; + + /* check whether vm support LBT firstly */ + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LBT_ALL) != 7) { + return 0; + } + + /* get six LBT registers including scr0-scr3, eflags, ftop */ + ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR0, &env->lbt.scr0); + ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR1, &env->lbt.scr1); + ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR2, &env->lbt.scr2); + ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_SCR3, &env->lbt.scr3); + ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_EFLAGS, &val); + env->lbt.eflags = (uint32_t)val; + ret |= kvm_get_one_reg(cs, KVM_REG_LOONGARCH_LBT_FTOP, &val); + env->lbt.ftop = (uint32_t)val; + + return ret; +} + +void kvm_arch_reset_vcpu(CPUState *cs) { + CPULoongArchState *env = cpu_env(cs); + int ret = 0; + uint64_t unused = 0; + env->mp_state = KVM_MP_STATE_RUNNABLE; + ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, &unused); + if (ret) { + error_report("Failed to set KVM_REG_LOONGARCH_VCPU_RESET: %s", + strerror(errno)); + exit(EXIT_FAILURE); + } } static int kvm_loongarch_get_mpstate(CPUState *cs) @@ -585,7 +725,7 @@ static int kvm_loongarch_put_cpucfg(CPUState *cs) return ret; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { int ret; @@ -609,13 +749,24 @@ int kvm_arch_get_registers(CPUState *cs) return ret; } + ret = kvm_loongarch_get_lbt(cs); + if (ret) { + return ret; + } + + ret = kvm_get_stealtime(cs); + if (ret) { + return ret; + } + ret = kvm_loongarch_get_mpstate(cs); return ret; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { int ret; + static int once; ret = kvm_loongarch_put_regs_core(cs); if (ret) { @@ -637,6 +788,30 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_loongarch_put_lbt(cs); + if (ret) { + return ret; + } + + if (!once) { + ret = kvm_set_pv_features(cs); + if (ret) { + return ret; + } + once = 1; + } + + if (level >= KVM_PUT_FULL_STATE) { + /* + * only KVM_PUT_FULL_STATE is required, kvm kernel will clear + * guest_addr for KVM_PUT_RESET_STATE + */ + ret = kvm_set_stealtime(cs); + if (ret) { + return ret; + } + } + ret = kvm_loongarch_put_mpstate(cs); return ret; } @@ -663,21 +838,374 @@ static void kvm_loongarch_vm_stage_change(void *opaque, bool running, } } +static bool kvm_feature_supported(CPUState *cs, enum loongarch_features feature) +{ + int ret; + struct kvm_device_attr attr; + uint64_t val; + + switch (feature) { + case LOONGARCH_FEATURE_LSX: + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_LSX; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + if (ret == 0) { + return true; + } + + /* Fallback to old kernel detect interface */ + val = 0; + attr.group = KVM_LOONGARCH_VCPU_CPUCFG; + /* Cpucfg2 */ + attr.attr = 2; + attr.addr = (uint64_t)&val; + ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr); + if (!ret) { + ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr); + if (ret) { + return false; + } + + ret = FIELD_EX32((uint32_t)val, CPUCFG2, LSX); + return (ret != 0); + } + return false; + + case LOONGARCH_FEATURE_LASX: + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_LASX; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + if (ret == 0) { + return true; + } + + /* Fallback to old kernel detect interface */ + val = 0; + attr.group = KVM_LOONGARCH_VCPU_CPUCFG; + /* Cpucfg2 */ + attr.attr = 2; + attr.addr = (uint64_t)&val; + ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr); + if (!ret) { + ret = kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr); + if (ret) { + return false; + } + + ret = FIELD_EX32((uint32_t)val, CPUCFG2, LASX); + return (ret != 0); + } + return false; + + case LOONGARCH_FEATURE_LBT: + /* + * Return all if all the LBT features are supported such as: + * KVM_LOONGARCH_VM_FEAT_X86BT + * KVM_LOONGARCH_VM_FEAT_ARMBT + * KVM_LOONGARCH_VM_FEAT_MIPSBT + */ + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_X86BT; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + attr.attr = KVM_LOONGARCH_VM_FEAT_ARMBT; + ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + attr.attr = KVM_LOONGARCH_VM_FEAT_MIPSBT; + ret |= kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + return (ret == 0); + + case LOONGARCH_FEATURE_PMU: + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_PMU; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + return (ret == 0); + + case LOONGARCH_FEATURE_PV_IPI: + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_PV_IPI; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + return (ret == 0); + + case LOONGARCH_FEATURE_STEALTIME: + attr.group = KVM_LOONGARCH_VM_FEAT_CTRL; + attr.attr = KVM_LOONGARCH_VM_FEAT_PV_STEALTIME; + ret = kvm_vm_ioctl(kvm_state, KVM_HAS_DEVICE_ATTR, &attr); + return (ret == 0); + + default: + return false; + } + + return false; +} + +static int kvm_cpu_check_lsx(CPUState *cs, Error **errp) +{ + CPULoongArchState *env = cpu_env(cs); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + bool kvm_supported; + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LSX); + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 0); + if (cpu->lsx == ON_OFF_AUTO_ON) { + if (kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1); + } else { + error_setg(errp, "'lsx' feature not supported by KVM on this host"); + return -ENOTSUP; + } + } else if ((cpu->lsx == ON_OFF_AUTO_AUTO) && kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LSX, 1); + } + + return 0; +} + +static int kvm_cpu_check_lasx(CPUState *cs, Error **errp) +{ + CPULoongArchState *env = cpu_env(cs); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + bool kvm_supported; + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LASX); + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 0); + if (cpu->lasx == ON_OFF_AUTO_ON) { + if (kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1); + } else { + error_setg(errp, "'lasx' feature not supported by KVM on host"); + return -ENOTSUP; + } + } else if ((cpu->lasx == ON_OFF_AUTO_AUTO) && kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LASX, 1); + } + + return 0; +} + +static int kvm_cpu_check_lbt(CPUState *cs, Error **errp) +{ + CPULoongArchState *env = cpu_env(cs); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + bool kvm_supported; + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_LBT); + if (cpu->lbt == ON_OFF_AUTO_ON) { + if (kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7); + } else { + error_setg(errp, "'lbt' feature not supported by KVM on this host"); + return -ENOTSUP; + } + } else if ((cpu->lbt == ON_OFF_AUTO_AUTO) && kvm_supported) { + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LBT_ALL, 7); + } + + return 0; +} + +static int kvm_cpu_check_pmu(CPUState *cs, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = cpu_env(cs); + bool kvm_supported; + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PMU); + if (cpu->pmu == ON_OFF_AUTO_ON) { + if (!kvm_supported) { + error_setg(errp, "'pmu' feature not supported by KVM on the host"); + return -ENOTSUP; + } + } else if (cpu->pmu != ON_OFF_AUTO_AUTO) { + /* disable pmu if ON_OFF_AUTO_OFF is set */ + kvm_supported = false; + } + + if (kvm_supported) { + env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMP, 1); + env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMNUM, 3); + env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, PMBITS, 63); + env->cpucfg[6] = FIELD_DP32(env->cpucfg[6], CPUCFG6, UPM, 1); + } + return 0; +} + +static int kvm_cpu_check_pv_features(CPUState *cs, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = cpu_env(cs); + bool kvm_supported; + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_PV_IPI); + if (cpu->kvm_pv_ipi == ON_OFF_AUTO_ON) { + if (!kvm_supported) { + error_setg(errp, "'pv_ipi' feature not supported by KVM host"); + return -ENOTSUP; + } + } else if (cpu->kvm_pv_ipi != ON_OFF_AUTO_AUTO) { + kvm_supported = false; + } + + if (kvm_supported) { + env->pv_features |= BIT(KVM_FEATURE_IPI); + } + + kvm_supported = kvm_feature_supported(cs, LOONGARCH_FEATURE_STEALTIME); + if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { + if (!kvm_supported) { + error_setg(errp, "'kvm stealtime' feature not supported by KVM host"); + return -ENOTSUP; + } + } else if (cpu->kvm_steal_time != ON_OFF_AUTO_AUTO) { + kvm_supported = false; + } + + if (kvm_supported) { + env->pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + } + + if (object_dynamic_cast(OBJECT(ms), TYPE_LOONGARCH_VIRT_MACHINE)) { + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms); + + if (virt_is_veiointc_enabled(lvms)) { + env->pv_features |= BIT(KVM_FEATURE_VIRT_EXTIOI); + } + } + return 0; +} + +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { uint64_t val; + int ret; + Error *local_err = NULL; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); - qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); + cpu->vmsentry = qemu_add_vm_change_state_handler( + kvm_loongarch_vm_stage_change, cs); if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) { brk_insn = val; } + ret = kvm_cpu_check_lsx(cs, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + + ret = kvm_cpu_check_lasx(cs, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + + ret = kvm_cpu_check_lbt(cs, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + + ret = kvm_cpu_check_pmu(cs, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + + ret = kvm_cpu_check_pv_features(cs, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + return 0; } +static bool loongarch_get_lbt(Object *obj, Error **errp) +{ + return LOONGARCH_CPU(obj)->lbt != ON_OFF_AUTO_OFF; +} + +static void loongarch_set_lbt(Object *obj, bool value, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->lbt = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; +} + +static bool loongarch_get_pmu(Object *obj, Error **errp) +{ + return LOONGARCH_CPU(obj)->pmu != ON_OFF_AUTO_OFF; +} + +static void loongarch_set_pmu(Object *obj, bool value, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->pmu = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; +} + +static bool kvm_pv_ipi_get(Object *obj, Error **errp) +{ + return LOONGARCH_CPU(obj)->kvm_pv_ipi != ON_OFF_AUTO_OFF; +} + +static void kvm_pv_ipi_set(Object *obj, bool value, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->kvm_pv_ipi = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; +} + +static bool kvm_steal_time_get(Object *obj, Error **errp) +{ + return LOONGARCH_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF; +} + +static void kvm_steal_time_set(Object *obj, bool value, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; +} + +void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu) +{ + cpu->lbt = ON_OFF_AUTO_AUTO; + object_property_add_bool(OBJECT(cpu), "lbt", loongarch_get_lbt, + loongarch_set_lbt); + object_property_set_description(OBJECT(cpu), "lbt", + "Set off to disable Binary Tranlation."); + + cpu->pmu = ON_OFF_AUTO_AUTO; + object_property_add_bool(OBJECT(cpu), "pmu", loongarch_get_pmu, + loongarch_set_pmu); + object_property_set_description(OBJECT(cpu), "pmu", + "Set off to disable performance monitor unit."); + + cpu->kvm_pv_ipi = ON_OFF_AUTO_AUTO; + object_property_add_bool(OBJECT(cpu), "kvm-pv-ipi", kvm_pv_ipi_get, + kvm_pv_ipi_set); + object_property_set_description(OBJECT(cpu), "kvm-pv-ipi", + "Set off to disable KVM paravirt IPI."); + + cpu->kvm_steal_time = ON_OFF_AUTO_AUTO; + object_property_add_bool(OBJECT(cpu), "kvm-steal-time", kvm_steal_time_get, + kvm_steal_time_set); + object_property_set_description(OBJECT(cpu), "kvm-steal-time", + "Set off to disable KVM steal time."); +} + int kvm_arch_destroy_vcpu(CPUState *cs) { + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + + qemu_del_vm_change_state_handler(cpu->vmsentry); return 0; } @@ -712,6 +1240,22 @@ void kvm_arch_init_irq_routing(KVMState *s) { } +void kvm_loongarch_init_irq_routing(void) +{ + int i; + + kvm_async_interrupts_allowed = true; + kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); + if (kvm_has_gsi_routing()) { + for (i = 0; i < KVM_IRQCHIP_NUM_PINS; ++i) { + kvm_irqchip_add_irq_route(kvm_state, i, 0, i); + } + + kvm_gsi_routing_allowed = true; + kvm_irqchip_commit_routes(kvm_state); + } +} + int kvm_arch_get_default_type(MachineState *ms) { return 0; @@ -725,7 +1269,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s) int kvm_arch_irqchip_create(KVMState *s) { - return 0; + if (kvm_kernel_irqchip_split()) { + error_report("kernel_irqchip=split is not supported on LoongArch"); + exit(1); + } + + return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL); } void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h index d945b6bb822..1051a341ec2 100644 --- a/target/loongarch/kvm/kvm_loongarch.h +++ b/target/loongarch/kvm/kvm_loongarch.h @@ -11,6 +11,6 @@ #define QEMU_KVM_LOONGARCH_H int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level); -void kvm_arch_reset_vcpu(CPULoongArchState *env); +void kvm_arch_reset_vcpu(CPUState *cs); #endif diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c index 8721a5eb136..1d8cd32f5fc 100644 --- a/target/loongarch/loongarch-qmp-cmds.c +++ b/target/loongarch/loongarch-qmp-cmds.c @@ -7,10 +7,11 @@ */ #include "qemu/osdep.h" +#include "qemu/target-info.h" #include "qapi/error.h" -#include "qapi/qapi-commands-machine-target.h" +#include "qapi/qapi-commands-machine.h" #include "cpu.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qom/qom-qobject.h" @@ -32,7 +33,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) CpuDefinitionInfoList *cpu_list = NULL; GSList *list; - list = object_class_get_list(TYPE_LOONGARCH_CPU, false); + list = object_class_get_list(target_cpu_type(), false); g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); g_slist_free(list); @@ -40,7 +41,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) } static const char *cpu_model_advertised_features[] = { - "lsx", "lasx", NULL + "lsx", "lasx", "lbt", "pmu", "kvm-pv-ipi", "kvm-steal-time", NULL }; CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c index 08a7fa5370b..4e70f5c8798 100644 --- a/target/loongarch/machine.c +++ b/target/loongarch/machine.c @@ -8,7 +8,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "migration/cpu.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #include "vec.h" static const VMStateDescription vmstate_fpu_reg = { @@ -110,6 +110,29 @@ static const VMStateDescription vmstate_lasx = { }, }; +static bool lbt_needed(void *opaque) +{ + LoongArchCPU *cpu = opaque; + + return !!FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, LBT_ALL); +} + +static const VMStateDescription vmstate_lbt = { + .name = "cpu/lbt", + .version_id = 0, + .minimum_version_id = 0, + .needed = lbt_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(env.lbt.scr0, LoongArchCPU), + VMSTATE_UINT64(env.lbt.scr1, LoongArchCPU), + VMSTATE_UINT64(env.lbt.scr2, LoongArchCPU), + VMSTATE_UINT64(env.lbt.scr3, LoongArchCPU), + VMSTATE_UINT32(env.lbt.eflags, LoongArchCPU), + VMSTATE_UINT32(env.lbt.ftop, LoongArchCPU), + VMSTATE_END_OF_LIST() + }, +}; + #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) static bool tlb_needed(void *opaque) { @@ -145,8 +168,8 @@ static const VMStateDescription vmstate_tlb = { /* LoongArch CPU state */ const VMStateDescription vmstate_loongarch_cpu = { .name = "cpu", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), VMSTATE_UINTTL(env.pc, LoongArchCPU), @@ -209,6 +232,8 @@ const VMStateDescription vmstate_loongarch_cpu = { VMSTATE_UINT64(env.CSR_DSAVE, LoongArchCPU), VMSTATE_UINT64(kvm_state_counter, LoongArchCPU), + /* PV steal time */ + VMSTATE_UINT64(env.stealtime.guest_addr, LoongArchCPU), VMSTATE_END_OF_LIST() }, @@ -219,6 +244,7 @@ const VMStateDescription vmstate_loongarch_cpu = { #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) &vmstate_tlb, #endif + &vmstate_lbt, NULL } }; diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index e002e9aaf65..20bd3e2f0a3 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -8,7 +8,9 @@ loongarch_ss.add(files( loongarch_system_ss = ss.source_set() loongarch_system_ss.add(files( + 'arch_dump.c', 'cpu_helper.c', + 'csr.c', 'loongarch-qmp-cmds.c', 'machine.c', )) diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c index 15f94caefab..28b1bb86bd9 100644 --- a/target/loongarch/tcg/csr_helper.c +++ b/target/loongarch/tcg/csr_helper.c @@ -6,16 +6,37 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "cpu.h" #include "internals.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-ldst.h" #include "hw/irq.h" #include "cpu-csr.h" +target_ulong helper_csrwr_stlbps(CPULoongArchState *env, target_ulong val) +{ + int64_t old_v = env->CSR_STLBPS; + + /* + * The real hardware only supports the min tlb_ps is 12 + * tlb_ps=0 may cause undefined-behavior. + */ + uint8_t tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + if (!check_ps(env, tlb_ps)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted set ps %d\n", tlb_ps); + } else { + /* Only update PS field, reserved bit keeps zero */ + env->CSR_STLBPS = FIELD_DP64(old_v, CSR_STLBPS, PS, tlb_ps); + } + + return old_v; +} + target_ulong helper_csrrd_pgd(CPULoongArchState *env) { int64_t v; @@ -95,3 +116,27 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val) } return old_v; } + +target_ulong helper_csrwr_pwcl(CPULoongArchState *env, target_ulong val) +{ + uint8_t shift, ptbase; + int64_t old_v = env->CSR_PWCL; + + /* + * The real hardware only supports 64bit PTE width now, 128bit or others + * treated as illegal. + */ + shift = FIELD_EX64(val, CSR_PWCL, PTEWIDTH); + ptbase = FIELD_EX64(val, CSR_PWCL, PTBASE); + if (shift) { + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted set pte width with %d bit\n", 64 << shift); + val = FIELD_DP64(val, CSR_PWCL, PTEWIDTH, 0); + } + if (!check_ps(env, ptbase)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Attempted set ptbase 2^%d\n", ptbase); + } + env->CSR_PWCL = val; + return old_v; +} diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c index f6753c5875b..fc9c64c20a8 100644 --- a/target/loongarch/tcg/fpu_helper.c +++ b/target/loongarch/tcg/fpu_helper.c @@ -8,8 +8,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "fpu/softfloat.h" #include "internals.h" @@ -31,6 +30,15 @@ void restore_fp_status(CPULoongArchState *env) set_float_rounding_mode(ieee_rm[(env->fcsr0 >> FCSR0_RM) & 0x3], &env->fp_status); set_flush_to_zero(0, &env->fp_status); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status); + /* + * For LoongArch systems that conform to IEEE754-2008, the (inf,zero,nan) + * case sets InvalidOp and returns the input value 'c' + */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); + set_float_3nan_prop_rule(float_3nan_prop_s_cab, &env->fp_status); + /* Default NaN: sign bit clear, msb frac bit set */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); } int ieee_ex_to_loongarch(int xcpt) @@ -352,8 +360,7 @@ uint64_t helper_fclass_s(CPULoongArchState *env, uint64_t fj) } else if (float32_is_zero_or_denormal(f)) { return sign ? 1 << 4 : 1 << 8; } else if (float32_is_any_nan(f)) { - float_status s = { }; /* for snan_bit_is_one */ - return float32_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0; + return float32_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0; } else { return sign ? 1 << 3 : 1 << 7; } @@ -371,8 +378,7 @@ uint64_t helper_fclass_d(CPULoongArchState *env, uint64_t fj) } else if (float64_is_zero_or_denormal(f)) { return sign ? 1 << 4 : 1 << 8; } else if (float64_is_any_nan(f)) { - float_status s = { }; /* for snan_bit_is_one */ - return float64_is_quiet_nan(f, &s) ? 1 << 1 : 1 << 0; + return float64_is_quiet_nan(f, &env->fp_status) ? 1 << 1 : 1 << 0; } else { return sign ? 1 << 3 : 1 << 7; } diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h new file mode 100644 index 00000000000..1d5cb0198c9 --- /dev/null +++ b/target/loongarch/tcg/helper.h @@ -0,0 +1,722 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + */ + +DEF_HELPER_2(raise_exception, noreturn, env, i32) + +DEF_HELPER_FLAGS_1(bitrev_w, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(bitrev_d, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) + +DEF_HELPER_FLAGS_3(asrtle_d, TCG_CALL_NO_WG, void, env, tl, tl) +DEF_HELPER_FLAGS_3(asrtgt_d, TCG_CALL_NO_WG, void, env, tl, tl) + +DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl) +DEF_HELPER_FLAGS_2(cpucfg, TCG_CALL_NO_RWG_SE, tl, env, tl) + +/* Floating-point helper */ +DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmaxa_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmaxa_d, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmina_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmina_d, TCG_CALL_NO_WG, i64, env, i64, i64) + +DEF_HELPER_FLAGS_5(fmuladd_s, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) +DEF_HELPER_FLAGS_5(fmuladd_d, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i32) + +DEF_HELPER_FLAGS_3(fscaleb_s, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fscaleb_d, TCG_CALL_NO_WG, i64, env, i64, i64) + +DEF_HELPER_FLAGS_2(flogb_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(flogb_d, TCG_CALL_NO_WG, i64, env, i64) + +DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frsqrt_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frsqrt_d, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frecip_s, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(frecip_d, TCG_CALL_NO_WG, i64, env, i64) + +DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, i64, env, i64) +DEF_HELPER_FLAGS_2(fclass_d, TCG_CALL_NO_RWG_SE, i64, env, i64) + +/* fcmp.cXXX.s */ +DEF_HELPER_4(fcmp_c_s, i64, env, i64, i64, i32) +/* fcmp.sXXX.s */ +DEF_HELPER_4(fcmp_s_s, i64, env, i64, i64, i32) +/* fcmp.cXXX.d */ +DEF_HELPER_4(fcmp_c_d, i64, env, i64, i64, i32) +/* fcmp.sXXX.d */ +DEF_HELPER_4(fcmp_s_d, i64, env, i64, i64, i32) + +DEF_HELPER_2(fcvt_d_s, i64, env, i64) +DEF_HELPER_2(fcvt_s_d, i64, env, i64) +DEF_HELPER_2(ffint_d_w, i64, env, i64) +DEF_HELPER_2(ffint_d_l, i64, env, i64) +DEF_HELPER_2(ffint_s_w, i64, env, i64) +DEF_HELPER_2(ffint_s_l, i64, env, i64) +DEF_HELPER_2(ftintrm_l_s, i64, env, i64) +DEF_HELPER_2(ftintrm_l_d, i64, env, i64) +DEF_HELPER_2(ftintrm_w_s, i64, env, i64) +DEF_HELPER_2(ftintrm_w_d, i64, env, i64) +DEF_HELPER_2(ftintrp_l_s, i64, env, i64) +DEF_HELPER_2(ftintrp_l_d, i64, env, i64) +DEF_HELPER_2(ftintrp_w_s, i64, env, i64) +DEF_HELPER_2(ftintrp_w_d, i64, env, i64) +DEF_HELPER_2(ftintrz_l_s, i64, env, i64) +DEF_HELPER_2(ftintrz_l_d, i64, env, i64) +DEF_HELPER_2(ftintrz_w_s, i64, env, i64) +DEF_HELPER_2(ftintrz_w_d, i64, env, i64) +DEF_HELPER_2(ftintrne_l_s, i64, env, i64) +DEF_HELPER_2(ftintrne_l_d, i64, env, i64) +DEF_HELPER_2(ftintrne_w_s, i64, env, i64) +DEF_HELPER_2(ftintrne_w_d, i64, env, i64) +DEF_HELPER_2(ftint_l_s, i64, env, i64) +DEF_HELPER_2(ftint_l_d, i64, env, i64) +DEF_HELPER_2(ftint_w_s, i64, env, i64) +DEF_HELPER_2(ftint_w_d, i64, env, i64) +DEF_HELPER_2(frint_s, i64, env, i64) +DEF_HELPER_2(frint_d, i64, env, i64) + +DEF_HELPER_FLAGS_1(set_rounding_mode, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_1(rdtime_d, i64, env) + +#ifndef CONFIG_USER_ONLY +/* CSRs helper */ +DEF_HELPER_1(csrrd_pgd, i64, env) +DEF_HELPER_1(csrrd_cpuid, i64, env) +DEF_HELPER_1(csrrd_tval, i64, env) +DEF_HELPER_2(csrwr_stlbps, i64, env, tl) +DEF_HELPER_2(csrwr_estat, i64, env, tl) +DEF_HELPER_2(csrwr_asid, i64, env, tl) +DEF_HELPER_2(csrwr_tcfg, i64, env, tl) +DEF_HELPER_2(csrwr_ticlr, i64, env, tl) +DEF_HELPER_2(csrwr_pwcl, i64, env, tl) +DEF_HELPER_2(iocsrrd_b, i64, env, tl) +DEF_HELPER_2(iocsrrd_h, i64, env, tl) +DEF_HELPER_2(iocsrrd_w, i64, env, tl) +DEF_HELPER_2(iocsrrd_d, i64, env, tl) +DEF_HELPER_3(iocsrwr_b, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_h, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_w, void, env, tl, tl) +DEF_HELPER_3(iocsrwr_d, void, env, tl, tl) + +/* TLB helper */ +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbfill, void, env) +DEF_HELPER_1(tlbsrch, void, env) +DEF_HELPER_1(tlbrd, void, env) +DEF_HELPER_1(tlbclr, void, env) +DEF_HELPER_1(tlbflush, void, env) +DEF_HELPER_1(invtlb_all, void, env) +DEF_HELPER_2(invtlb_all_g, void, env, i32) +DEF_HELPER_2(invtlb_all_asid, void, env, tl) +DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl) +DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl) + +DEF_HELPER_4(lddir, tl, env, tl, tl, i32) +DEF_HELPER_4(ldpte, void, env, tl, tl, i32) +DEF_HELPER_1(ertn, void, env) +DEF_HELPER_1(idle, void, env) +#endif + +/* LoongArch LSX */ +DEF_HELPER_FLAGS_4(vhaddw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhaddw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vhsubw_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vdiv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vdiv_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vmod_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(vexth_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vexth_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(vext2xv_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_w_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_d_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_d_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_wu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_du_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_du_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vext2xv_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(vmskltz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vmskltz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vmskltz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vmskltz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vmskgez_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vmsknz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vsllwil_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsllwil_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsllwil_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_3(vextl_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsllwil_hu_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsllwil_wu_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsllwil_du_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_3(vextl_qu_du, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsrlr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vsrar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrar_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrari_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrari_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrari_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrari_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vsrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vsrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vsrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vsrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vsrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vssrln_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrln_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrln_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrln_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrln_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrln_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssran_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vssrlni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrani_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vssrlrn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrlrn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrlrn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrlrn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrlrn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrlrn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vssrarn_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vssrlrni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_b_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_h_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_d_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrlrni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_bu_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_hu_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_wu_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vssrarni_du_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_3(vclo_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclo_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclo_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclo_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclz_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vclz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_3(vpcnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vpcnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vpcnt_w, TCG_CALL_NO_RWG, void, ptr, ptr, i32) +DEF_HELPER_FLAGS_3(vpcnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vfrstp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vfrstp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vfrstpi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vfrstpi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_5(vfadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfdiv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_6(vfmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfnmadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfnmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfnmsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_6(vfnmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_5(vfmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_5(vfmaxa_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmaxa_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmina_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfmina_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vflogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vflogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vfclass_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfclass_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vfsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrecip_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrecip_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrsqrt_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrsqrt_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vfcvtl_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfcvth_s_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfcvtl_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfcvth_d_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfcvt_h_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vfcvt_s_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vfrintrne_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrne_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrz_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrz_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrm_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrintrm_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrint_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vfrint_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vftintrne_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrne_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrz_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrz_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrp_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrp_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrm_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrm_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftint_w_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftint_l_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrz_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrz_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftint_wu_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftint_lu_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vftintrne_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vftintrz_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vftintrp_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vftintrm_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vftint_w_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrnel_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrneh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrzl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrzh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrpl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrph_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrml_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintrmh_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftintl_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vftinth_l_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vffint_s_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vffint_d_l, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vffint_s_wu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vffint_d_lu, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vffintl_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_4(vffinth_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32) +DEF_HELPER_FLAGS_5(vffint_s_l, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_6(vfcmp_c_s, void, env, i32, i32, i32, i32, i32) +DEF_HELPER_6(vfcmp_s_s, void, env, i32, i32, i32, i32, i32) +DEF_HELPER_6(vfcmp_c_d, void, env, i32, i32, i32, i32, i32) +DEF_HELPER_6(vfcmp_s_d, void, env, i32, i32, i32, i32, i32) + +DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_4(vsetanyeqz_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsetanyeqz_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsetanyeqz_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsetanyeqz_d, void, env, i32, i32, i32) +DEF_HELPER_4(vsetallnez_b, void, env, i32, i32, i32) +DEF_HELPER_4(vsetallnez_h, void, env, i32, i32, i32) +DEF_HELPER_4(vsetallnez_w, void, env, i32, i32, i32) +DEF_HELPER_4(vsetallnez_d, void, env, i32, i32, i32) + +DEF_HELPER_FLAGS_4(xvinsve0_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(xvinsve0_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(xvpickve_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(xvpickve_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vpackev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpackod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vpickev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickod_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickod_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickod_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpickod_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(vilvl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvl_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vilvh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(vshuf_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vshuf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vshuf_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vshuf_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vshuf4i_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vshuf4i_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vshuf4i_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vshuf4i_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vperm_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(vpermi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vpermi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vpermi_q, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_FLAGS_4(vextrins_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vextrins_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vextrins_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vextrins_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) diff --git a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc index 974bc2a70fe..3d70d759417 100644 --- a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc @@ -56,7 +56,7 @@ static bool gen_am(DisasContext *ctx, arg_rrr *a, if (a->rd != 0 && (a->rj == a->rd || a->rk == a->rd)) { qemu_log_mask(LOG_GUEST_ERROR, "Warning: source register overlaps destination register" - "in atomic insn at pc=0x" TARGET_FMT_lx "\n", + "in atomic insn at pc=0x%" VADDR_PRIx "\n", ctx->base.pc_next - 4); return false; } diff --git a/target/loongarch/tcg/insn_trans/trans_branch.c.inc b/target/loongarch/tcg/insn_trans/trans_branch.c.inc index 221e5159dba..f94c1f37ab1 100644 --- a/target/loongarch/tcg/insn_trans/trans_branch.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_branch.c.inc @@ -80,5 +80,5 @@ TRANS(bltu, ALL, gen_rr_bc, TCG_COND_LTU) TRANS(bgeu, ALL, gen_rr_bc, TCG_COND_GEU) TRANS(beqz, ALL, gen_rz_bc, TCG_COND_EQ) TRANS(bnez, ALL, gen_rz_bc, TCG_COND_NE) -TRANS(bceqz, 64, gen_cz_bc, TCG_COND_EQ) -TRANS(bcnez, 64, gen_cz_bc, TCG_COND_NE) +TRANS(bceqz, FP, gen_cz_bc, TCG_COND_EQ) +TRANS(bcnez, FP, gen_cz_bc, TCG_COND_NE) diff --git a/target/loongarch/tcg/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc index cfa361fecfa..eda3d6e5611 100644 --- a/target/loongarch/tcg/insn_trans/trans_extra.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_extra.c.inc @@ -97,11 +97,11 @@ static bool gen_crc(DisasContext *ctx, arg_rrr *a, return true; } -TRANS(crc_w_b_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(1)) -TRANS(crc_w_h_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(2)) -TRANS(crc_w_w_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(4)) -TRANS(crc_w_d_w, 64, gen_crc, gen_helper_crc32, tcg_constant_tl(8)) -TRANS(crcc_w_b_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(1)) -TRANS(crcc_w_h_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(2)) -TRANS(crcc_w_w_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(4)) -TRANS(crcc_w_d_w, 64, gen_crc, gen_helper_crc32c, tcg_constant_tl(8)) +TRANS(crc_w_b_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(1)) +TRANS(crc_w_h_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(2)) +TRANS(crc_w_w_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(4)) +TRANS(crc_w_d_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(8)) +TRANS(crcc_w_b_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(1)) +TRANS(crcc_w_h_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(2)) +TRANS(crcc_w_w_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(4)) +TRANS(crcc_w_d_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(8)) diff --git a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc index 3babf69e4ab..6a2c030a6be 100644 --- a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc @@ -4,10 +4,15 @@ */ /* bit0(signaling/quiet) bit1(lt) bit2(eq) bit3(un) bit4(neq) */ -static uint32_t get_fcmp_flags(int cond) +static uint32_t get_fcmp_flags(DisasContext *ctx, int cond) { uint32_t flags = 0; + /*check cond , cond =[0-8,10,12] */ + if ((cond > 8) &&(cond != 10) && (cond != 12)) { + return -1; + } + if (cond & 0x1) { flags |= FCMP_LT; } @@ -26,9 +31,14 @@ static uint32_t get_fcmp_flags(int cond) static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) { TCGv var, src1, src2; - uint32_t flags; + uint32_t flags = get_fcmp_flags(ctx, a->fcond >>1); void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); + if (flags == -1) { + generate_exception(ctx, EXCCODE_INE); + return true; + } + if (!avail_FP_SP(ctx)) { return false; } @@ -39,8 +49,6 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) src1 = get_fpr(ctx, a->fj); src2 = get_fpr(ctx, a->fk); fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s); - flags = get_fcmp_flags(a->fcond >> 1); - fn(var, tcg_env, src1, src2, tcg_constant_i32(flags)); tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd])); @@ -50,9 +58,14 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) { TCGv var, src1, src2; - uint32_t flags; + uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1); void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32); + if (flags == -1) { + generate_exception(ctx, EXCCODE_INE); + return true; + } + if (!avail_FP_DP(ctx)) { return false; } @@ -63,8 +76,6 @@ static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) src1 = get_fpr(ctx, a->fj); src2 = get_fpr(ctx, a->fk); fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d); - flags = get_fcmp_flags(a->fcond >> 1); - fn(var, tcg_env, src1, src2, tcg_constant_i32(flags)); tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd])); diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc index 7e4ec93edb3..ecbfe23b636 100644 --- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc @@ -5,7 +5,7 @@ * LoongArch translation routines for the privileged instructions. */ -#include "cpu-csr.h" +#include "csr.h" #ifdef CONFIG_USER_ONLY @@ -45,112 +45,6 @@ GEN_FALSE_TRANS(idle) typedef void (*GenCSRRead)(TCGv dest, TCGv_ptr env); typedef void (*GenCSRWrite)(TCGv dest, TCGv_ptr env, TCGv src); -typedef struct { - int offset; - int flags; - GenCSRRead readfn; - GenCSRWrite writefn; -} CSRInfo; - -enum { - CSRFL_READONLY = (1 << 0), - CSRFL_EXITTB = (1 << 1), - CSRFL_IO = (1 << 2), -}; - -#define CSR_OFF_FUNCS(NAME, FL, RD, WR) \ - [LOONGARCH_CSR_##NAME] = { \ - .offset = offsetof(CPULoongArchState, CSR_##NAME), \ - .flags = FL, .readfn = RD, .writefn = WR \ - } - -#define CSR_OFF_ARRAY(NAME, N) \ - [LOONGARCH_CSR_##NAME(N)] = { \ - .offset = offsetof(CPULoongArchState, CSR_##NAME[N]), \ - .flags = 0, .readfn = NULL, .writefn = NULL \ - } - -#define CSR_OFF_FLAGS(NAME, FL) \ - CSR_OFF_FUNCS(NAME, FL, NULL, NULL) - -#define CSR_OFF(NAME) \ - CSR_OFF_FLAGS(NAME, 0) - -static const CSRInfo csr_info[] = { - CSR_OFF_FLAGS(CRMD, CSRFL_EXITTB), - CSR_OFF(PRMD), - CSR_OFF_FLAGS(EUEN, CSRFL_EXITTB), - CSR_OFF_FLAGS(MISC, CSRFL_READONLY), - CSR_OFF(ECFG), - CSR_OFF_FUNCS(ESTAT, CSRFL_EXITTB, NULL, gen_helper_csrwr_estat), - CSR_OFF(ERA), - CSR_OFF(BADV), - CSR_OFF_FLAGS(BADI, CSRFL_READONLY), - CSR_OFF(EENTRY), - CSR_OFF(TLBIDX), - CSR_OFF(TLBEHI), - CSR_OFF(TLBELO0), - CSR_OFF(TLBELO1), - CSR_OFF_FUNCS(ASID, CSRFL_EXITTB, NULL, gen_helper_csrwr_asid), - CSR_OFF(PGDL), - CSR_OFF(PGDH), - CSR_OFF_FUNCS(PGD, CSRFL_READONLY, gen_helper_csrrd_pgd, NULL), - CSR_OFF(PWCL), - CSR_OFF(PWCH), - CSR_OFF(STLBPS), - CSR_OFF(RVACFG), - CSR_OFF_FUNCS(CPUID, CSRFL_READONLY, gen_helper_csrrd_cpuid, NULL), - CSR_OFF_FLAGS(PRCFG1, CSRFL_READONLY), - CSR_OFF_FLAGS(PRCFG2, CSRFL_READONLY), - CSR_OFF_FLAGS(PRCFG3, CSRFL_READONLY), - CSR_OFF_ARRAY(SAVE, 0), - CSR_OFF_ARRAY(SAVE, 1), - CSR_OFF_ARRAY(SAVE, 2), - CSR_OFF_ARRAY(SAVE, 3), - CSR_OFF_ARRAY(SAVE, 4), - CSR_OFF_ARRAY(SAVE, 5), - CSR_OFF_ARRAY(SAVE, 6), - CSR_OFF_ARRAY(SAVE, 7), - CSR_OFF_ARRAY(SAVE, 8), - CSR_OFF_ARRAY(SAVE, 9), - CSR_OFF_ARRAY(SAVE, 10), - CSR_OFF_ARRAY(SAVE, 11), - CSR_OFF_ARRAY(SAVE, 12), - CSR_OFF_ARRAY(SAVE, 13), - CSR_OFF_ARRAY(SAVE, 14), - CSR_OFF_ARRAY(SAVE, 15), - CSR_OFF(TID), - CSR_OFF_FUNCS(TCFG, CSRFL_IO, NULL, gen_helper_csrwr_tcfg), - CSR_OFF_FUNCS(TVAL, CSRFL_READONLY | CSRFL_IO, gen_helper_csrrd_tval, NULL), - CSR_OFF(CNTC), - CSR_OFF_FUNCS(TICLR, CSRFL_IO, NULL, gen_helper_csrwr_ticlr), - CSR_OFF(LLBCTL), - CSR_OFF(IMPCTL1), - CSR_OFF(IMPCTL2), - CSR_OFF(TLBRENTRY), - CSR_OFF(TLBRBADV), - CSR_OFF(TLBRERA), - CSR_OFF(TLBRSAVE), - CSR_OFF(TLBRELO0), - CSR_OFF(TLBRELO1), - CSR_OFF(TLBREHI), - CSR_OFF(TLBRPRMD), - CSR_OFF(MERRCTL), - CSR_OFF(MERRINFO1), - CSR_OFF(MERRINFO2), - CSR_OFF(MERRENTRY), - CSR_OFF(MERRERA), - CSR_OFF(MERRSAVE), - CSR_OFF(CTAG), - CSR_OFF_ARRAY(DMW, 0), - CSR_OFF_ARRAY(DMW, 1), - CSR_OFF_ARRAY(DMW, 2), - CSR_OFF_ARRAY(DMW, 3), - CSR_OFF(DBG), - CSR_OFF(DERA), - CSR_OFF(DSAVE), -}; - static bool check_plv(DisasContext *ctx) { if (ctx->plv == MMU_PLV_USER) { @@ -160,20 +54,38 @@ static bool check_plv(DisasContext *ctx) return false; } -static const CSRInfo *get_csr(unsigned csr_num) +static bool set_csr_trans_func(unsigned int csr_num, GenCSRRead readfn, + GenCSRWrite writefn) { - const CSRInfo *csr; + CSRInfo *csr; - if (csr_num >= ARRAY_SIZE(csr_info)) { - return NULL; - } - csr = &csr_info[csr_num]; - if (csr->offset == 0) { - return NULL; + csr = get_csr(csr_num); + if (!csr) { + return false; } - return csr; + + csr->readfn = (GenCSRFunc)readfn; + csr->writefn = (GenCSRFunc)writefn; + return true; } +#define SET_CSR_FUNC(NAME, read, write) \ + set_csr_trans_func(LOONGARCH_CSR_##NAME, read, write) + +void loongarch_csr_translate_init(void) +{ + SET_CSR_FUNC(STLBPS, NULL, gen_helper_csrwr_stlbps); + SET_CSR_FUNC(ESTAT, NULL, gen_helper_csrwr_estat); + SET_CSR_FUNC(ASID, NULL, gen_helper_csrwr_asid); + SET_CSR_FUNC(PGD, gen_helper_csrrd_pgd, NULL); + SET_CSR_FUNC(PWCL, NULL, gen_helper_csrwr_pwcl); + SET_CSR_FUNC(CPUID, gen_helper_csrrd_cpuid, NULL); + SET_CSR_FUNC(TCFG, NULL, gen_helper_csrwr_tcfg); + SET_CSR_FUNC(TVAL, gen_helper_csrrd_tval, NULL); + SET_CSR_FUNC(TICLR, NULL, gen_helper_csrwr_ticlr); +} +#undef SET_CSR_FUNC + static bool check_csr_flags(DisasContext *ctx, const CSRInfo *csr, bool write) { if ((csr->flags & CSRFL_READONLY) && write) { @@ -191,6 +103,7 @@ static bool trans_csrrd(DisasContext *ctx, arg_csrrd *a) { TCGv dest; const CSRInfo *csr; + GenCSRRead readfn; if (check_plv(ctx)) { return false; @@ -202,8 +115,9 @@ static bool trans_csrrd(DisasContext *ctx, arg_csrrd *a) } else { check_csr_flags(ctx, csr, false); dest = gpr_dst(ctx, a->rd, EXT_NONE); - if (csr->readfn) { - csr->readfn(dest, tcg_env); + readfn = (GenCSRRead)csr->readfn; + if (readfn) { + readfn(dest, tcg_env); } else { tcg_gen_ld_tl(dest, tcg_env, csr->offset); } @@ -216,6 +130,7 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a) { TCGv dest, src1; const CSRInfo *csr; + GenCSRWrite writefn; if (check_plv(ctx)) { return false; @@ -231,9 +146,10 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a) return false; } src1 = gpr_src(ctx, a->rd, EXT_NONE); - if (csr->writefn) { + writefn = (GenCSRWrite)csr->writefn; + if (writefn) { dest = gpr_dst(ctx, a->rd, EXT_NONE); - csr->writefn(dest, tcg_env, src1); + writefn(dest, tcg_env, src1); } else { dest = tcg_temp_new(); tcg_gen_ld_tl(dest, tcg_env, csr->offset); @@ -247,6 +163,7 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) { TCGv src1, mask, oldv, newv, temp; const CSRInfo *csr; + GenCSRWrite writefn; if (check_plv(ctx)) { return false; @@ -277,8 +194,9 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) tcg_gen_andc_tl(temp, oldv, mask); tcg_gen_or_tl(newv, newv, temp); - if (csr->writefn) { - csr->writefn(oldv, tcg_env, newv); + writefn = (GenCSRWrite)csr->writefn; + if (writefn) { + writefn(oldv, tcg_env, newv); } else { tcg_gen_st_tl(newv, tcg_env, csr->offset); } diff --git a/target/loongarch/tcg/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc index 92b1d22e289..38bccf28386 100644 --- a/target/loongarch/tcg/insn_trans/trans_vec.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_vec.c.inc @@ -3465,7 +3465,7 @@ TRANS(xvmsknz_b, LASX, gen_xx, gen_helper_vmsknz_b) static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm) { int mode; - uint64_t data, t; + uint64_t data = 0, t; /* * imm bit [11:8] is mode, mode value is 0-12. @@ -3480,7 +3480,7 @@ static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm) break; case 1: /* data: {2{16'0, imm[7:0], 8'0}} */ - data = (t << 24) | (t << 8); + data = (t << 40) | (t << 8); break; case 2: /* data: {2{8'0, imm[7:0], 16'0}} */ @@ -3570,22 +3570,31 @@ static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm) } break; default: - generate_exception(ctx, EXCCODE_INE); g_assert_not_reached(); } return data; } +static bool check_valid_vldi_mode(arg_vldi *a) +{ + return extract32(a->imm, 8, 4) <= 12; +} + static bool gen_vldi(DisasContext *ctx, arg_vldi *a, uint32_t oprsz) { int sel, vece; uint64_t value; - if (!check_vec(ctx, oprsz)) { + sel = (a->imm >> 12) & 0x1; + + if (sel && !check_valid_vldi_mode(a)) { + generate_exception(ctx, EXCCODE_INE); return true; } - sel = (a->imm >> 12) & 0x1; + if (!check_vec(ctx, oprsz)) { + return true; + } if (sel) { value = vldi_get_value(ctx, a->imm); @@ -4655,19 +4664,23 @@ TRANS(xvslti_du, LASX, do_xcmpi, MO_64, TCG_COND_LTU) static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz) { - uint32_t flags; + uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1); void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); TCGv_i32 vd = tcg_constant_i32(a->vd); TCGv_i32 vj = tcg_constant_i32(a->vj); TCGv_i32 vk = tcg_constant_i32(a->vk); TCGv_i32 oprsz = tcg_constant_i32(sz); + if(flags == -1){ + generate_exception(ctx, EXCCODE_INE); + return true; + } + if (!check_vec(ctx, sz)) { return true; } fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s); - flags = get_fcmp_flags(a->fcond >> 1); fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags)); return true; @@ -4675,19 +4688,23 @@ static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz) static bool do_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz) { - uint32_t flags; + uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1); void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); TCGv_i32 vd = tcg_constant_i32(a->vd); TCGv_i32 vj = tcg_constant_i32(a->vj); TCGv_i32 vk = tcg_constant_i32(a->vk); TCGv_i32 oprsz = tcg_constant_i32(sz); + if (flags == -1) { + generate_exception(ctx, EXCCODE_INE); + return true; + } + if (!check_vec(ctx, sz)) { return true; } fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d); - flags = get_fcmp_flags(a->fcond >> 1); fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags)); return true; @@ -5126,7 +5143,7 @@ static bool do_vbsrl_v(DisasContext *ctx, arg_vv_i *a, uint32_t oprsz) { int i, ofs; - if (!check_vec(ctx, 32)) { + if (!check_vec(ctx, oprsz)) { return true; } diff --git a/target/loongarch/tcg/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c index b6916f53d20..c155f48564d 100644 --- a/target/loongarch/tcg/iocsr_helper.c +++ b/target/loongarch/tcg/iocsr_helper.c @@ -9,8 +9,7 @@ #include "cpu.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #define GET_MEMTXATTRS(cas) \ ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index}) diff --git a/target/loongarch/tcg/op_helper.c b/target/loongarch/tcg/op_helper.c index fe79c62fa47..16ac0d43bc9 100644 --- a/target/loongarch/tcg/op_helper.c +++ b/target/loongarch/tcg/op_helper.c @@ -10,11 +10,10 @@ #include "cpu.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "internals.h" #include "qemu/crc32c.h" -#include +#include /* for crc32 */ #include "cpu-csr.h" /* Exceptions helpers */ diff --git a/target/loongarch/tcg/tcg_loongarch.h b/target/loongarch/tcg/tcg_loongarch.h new file mode 100644 index 00000000000..fd4e1160229 --- /dev/null +++ b/target/loongarch/tcg/tcg_loongarch.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch TCG interface + * + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ +#ifndef TARGET_LOONGARCH_TCG_LOONGARCH_H +#define TARGET_LOONGARCH_TCG_LOONGARCH_H +#include "cpu.h" + +void loongarch_csr_translate_init(void); + +bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx); + +#endif /* TARGET_LOONGARCH_TCG_LOONGARCH_H */ diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c index 97f38fc3913..8872593ff01 100644 --- a/target/loongarch/tcg/tlb_helper.c +++ b/target/loongarch/tcg/tlb_helper.c @@ -12,38 +12,20 @@ #include "cpu.h" #include "internals.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" -#include "exec/cpu_ldst.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/log.h" #include "cpu-csr.h" +#include "tcg/tcg_loongarch.h" -static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, - uint64_t *dir_width, target_ulong level) +bool check_ps(CPULoongArchState *env, uint8_t tlb_ps) { - switch (level) { - case 1: - *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE); - *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH); - break; - case 2: - *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE); - *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH); - break; - case 3: - *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE); - *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH); - break; - case 4: - *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE); - *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH); - break; - default: - /* level may be zero for ldpte */ - *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); - *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); - break; + if (tlb_ps >= 64) { + return false; } + return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2); } static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, @@ -123,7 +105,11 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (!tlb_e) { + return; + } if (index >= LOONGARCH_STLB) { tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); } else { @@ -187,10 +173,6 @@ static void fill_tlb_entry(CPULoongArchState *env, int index) lo1 = env->CSR_TLBELO1; } - if (csr_ps == 0) { - qemu_log_mask(CPU_LOG_MMU, "page size is 0\n"); - } - /* Only MTLB has the ps fields */ if (index >= LOONGARCH_STLB) { tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); @@ -214,6 +196,66 @@ static uint32_t get_random_tlb(uint32_t low, uint32_t high) return val % (high - low + 1) + low; } +/* + * One tlb entry holds an adjacent odd/even pair, the vpn is the + * content of the virtual page number divided by 2. So the + * compare vpn is bit[47:15] for 16KiB page. while the vppn + * field in tlb entry contains bit[47:13], so need adjust. + * virt_vpn = vaddr[47:13] + */ +static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index) +{ + LoongArchTLB *tlb; + uint16_t csr_asid, tlb_asid, stlb_idx; + uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; + int i, compare_shift; + uint64_t vpn, tlb_vppn; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); + stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ + compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + /* Search STLB */ + for (i = 0; i < 8; ++i) { + tlb = &env->tlb[i * 256 + stlb_idx]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i * 256 + stlb_idx; + return true; + } + } + } + + /* Search MTLB */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { + tlb = &env->tlb[i]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i; + return true; + } + } + } + return false; +} + void helper_tlbsrch(CPULoongArchState *env) { int index, match; @@ -292,14 +334,16 @@ void helper_tlbfill(CPULoongArchState *env) if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { entryhi = env->CSR_TLBREHI; + /* Validity of pagesize is checked in helper_ldpte() */ pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS); } else { entryhi = env->CSR_TLBEHI; + /* Validity of pagesize is checked in helper_tlbrd() */ pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS); } + /* Validity of stlb_ps is checked in helper_csrwr_stlbps() */ stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - if (pagesize == stlb_ps) { /* Only write into STLB bits [47:13] */ address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); @@ -427,7 +471,11 @@ void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); uint64_t vpn, tlb_vppn; uint8_t tlb_ps, compare_shift; + uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (!tlb_e) { + continue; + } if (i >= LOONGARCH_STLB) { tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); } else { @@ -456,7 +504,11 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env, uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); uint64_t vpn, tlb_vppn; uint8_t tlb_ps, compare_shift; + uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (!tlb_e) { + continue; + } if (i >= LOONGARCH_STLB) { tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); } else { @@ -485,7 +537,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, /* Data access */ ret = get_physical_address(env, &physical, &prot, address, - access_type, mmu_idx); + access_type, mmu_idx, 0); if (ret == TLBRET_MATCH) { tlb_set_page(cs, address & TARGET_PAGE_MASK, @@ -511,8 +563,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, target_ulong level, uint32_t mem_idx) { CPUState *cs = env_cpu(env); - target_ulong badvaddr, index, phys, ret; - int shift; + target_ulong badvaddr, index, phys; uint64_t dir_base, dir_width; if (unlikely((level == 0) || (level > 4))) { @@ -537,27 +588,21 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, badvaddr = env->CSR_TLBRBADV; base = base & TARGET_PHYS_MASK; - - /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ - shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); - shift = (shift + 1) * 3; - get_dir_base_width(env, &dir_base, &dir_width, level); index = (badvaddr >> dir_base) & ((1 << dir_width) - 1); - phys = base | index << shift; - ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; - return ret; + phys = base | index << 3; + return ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; } void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, uint32_t mem_idx) { CPUState *cs = env_cpu(env); - target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv; - int shift; + target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, badv; uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE); uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH); uint64_t dir_base, dir_width; + uint8_t ps; /* * The parameter "base" has only two types, @@ -594,17 +639,18 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, if (odd) { tmp0 += MAKE_64BIT_MASK(ps, 1); } + + if (!check_ps(env, ps)) { + qemu_log_mask(LOG_GUEST_ERROR, "Illegal huge pagesize %d\n", ps); + return; + } } else { - /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */ - shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH); - shift = (shift + 1) * 3; badv = env->CSR_TLBRBADV; ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1); ptindex = ptindex & ~0x1; /* clear bit 0 */ - ptoffset0 = ptindex << shift; - ptoffset1 = (ptindex + 1) << shift; - + ptoffset0 = ptindex << 3; + ptoffset1 = (ptindex + 1) << 3; phys = base | (odd ? ptoffset1 : ptoffset0); tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK; ps = ptbase; @@ -617,3 +663,87 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, } env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); } + +static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + int access_type, int index, int mmu_idx) +{ + LoongArchTLB *tlb = &env->tlb[index]; + uint64_t plv = mmu_idx; + uint64_t tlb_entry, tlb_ppn; + uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + n = (address >> tlb_ps) & 0x1;/* Odd or even */ + + tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; + tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); + tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); + tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); + if (is_la64(env)) { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); + tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); + tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); + tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); + } else { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); + tlb_nx = 0; + tlb_nr = 0; + tlb_rplv = 0; + } + + /* Remove sw bit between bit12 -- bit PS*/ + tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1)); + + /* Check access rights */ + if (!tlb_v) { + return TLBRET_INVALID; + } + + if (access_type == MMU_INST_FETCH && tlb_nx) { + return TLBRET_XI; + } + + if (access_type == MMU_DATA_LOAD && tlb_nr) { + return TLBRET_RI; + } + + if (((tlb_rplv == 0) && (plv > tlb_plv)) || + ((tlb_rplv == 1) && (plv != tlb_plv))) { + return TLBRET_PE; + } + + if ((access_type == MMU_DATA_STORE) && !tlb_d) { + return TLBRET_DIRTY; + } + + *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | + (address & MAKE_64BIT_MASK(0, tlb_ps)); + *prot = PAGE_READ; + if (tlb_d) { + *prot |= PAGE_WRITE; + } + if (!tlb_nx) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; +} + +int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int index, match; + + match = loongarch_tlb_search(env, address, &index); + if (match) { + return loongarch_map_tlb_entry(env, physical, prot, + address, access_type, index, mmu_idx); + } + + return TLBRET_NOMATCH; +} diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c index 1fca4afc731..53a0b4c3ce9 100644 --- a/target/loongarch/tcg/translate.c +++ b/target/loongarch/tcg/translate.c @@ -11,11 +11,13 @@ #include "tcg/tcg-op-gvec.h" #include "exec/translation-block.h" #include "exec/translator.h" +#include "exec/target_page.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/log.h" #include "qemu/qemu-print.h" #include "fpu/softfloat.h" +#include "tcg_loongarch.h" #include "translate.h" #include "internals.h" #include "vec.h" @@ -288,7 +290,7 @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (!decode(ctx, ctx->opcode)) { qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. " - TARGET_FMT_lx ": 0x%x\n", + "0x%" VADDR_PRIx ": 0x%x\n", ctx->base.pc_next, ctx->opcode); generate_exception(ctx, EXCCODE_INE); } @@ -333,8 +335,8 @@ static const TranslatorOps loongarch_tr_ops = { .tb_stop = loongarch_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void loongarch_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; @@ -358,4 +360,8 @@ void loongarch_translate_init(void) offsetof(CPULoongArchState, lladdr), "lladdr"); cpu_llval = tcg_global_mem_new(tcg_env, offsetof(CPULoongArchState, llval), "llval"); + +#ifndef CONFIG_USER_ONLY + loongarch_csr_translate_init(); +#endif } diff --git a/target/loongarch/tcg/vec_helper.c b/target/loongarch/tcg/vec_helper.c index 3faf52cbc46..a270998e638 100644 --- a/target/loongarch/tcg/vec_helper.c +++ b/target/loongarch/tcg/vec_helper.c @@ -7,7 +7,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #include "internals.h" diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h index 195f53573a0..018dc5eb171 100644 --- a/target/loongarch/translate.h +++ b/target/loongarch/translate.h @@ -25,6 +25,7 @@ #define avail_LSX(C) (FIELD_EX32((C)->cpucfg2, CPUCFG2, LSX)) #define avail_LASX(C) (FIELD_EX32((C)->cpucfg2, CPUCFG2, LASX)) #define avail_IOCSR(C) (FIELD_EX32((C)->cpucfg1, CPUCFG1, IOCSR)) +#define avail_CRC(C) (FIELD_EX32((C)->cpucfg1, CPUCFG1, CRC)) /* * If an operation is being performed on less than TARGET_LONG_BITS, diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h index 39dcbcece8f..256a2b5f8b2 100644 --- a/target/m68k/cpu-param.h +++ b/target/m68k/cpu-param.h @@ -2,13 +2,12 @@ * m68k cpu parameters for qemu. * * Copyright (c) 2005-2007 CodeSourcery - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef M68K_CPU_PARAM_H #define M68K_CPU_PARAM_H -#define TARGET_LONG_BITS 32 /* * Coldfire Linux uses 8k pages * and m68k linux uses 4k pages @@ -18,4 +17,6 @@ #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define TARGET_INSN_START_EXTRA_WORDS 1 + #endif diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 1d49f4cb238..6a09db3a6f6 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -23,6 +23,8 @@ #include "cpu.h" #include "migration/vmstate.h" #include "fpu/softfloat.h" +#include "exec/translation-block.h" +#include "accel/tcg/cpu-ops.h" static void m68k_cpu_set_pc(CPUState *cs, vaddr value) { @@ -38,6 +40,24 @@ static vaddr m68k_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState m68k_get_tb_cpu_state(CPUState *cs) +{ + CPUM68KState *env = cpu_env(cs); + uint32_t flags; + + flags = (env->macsr >> 4) & TB_FLAGS_MACSR; + if (env->sr & SR_S) { + flags |= TB_FLAGS_MSR_S; + flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S; + flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S; + } + if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) { + flags |= TB_FLAGS_TRACE; + } + + return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; +} + static void m68k_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) @@ -51,10 +71,12 @@ static void m68k_restore_state_to_opc(CPUState *cs, } } +#ifndef CONFIG_USER_ONLY static bool m68k_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; } +#endif /* !CONFIG_USER_ONLY */ static int m68k_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -76,7 +98,7 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type) CPUState *cs = CPU(obj); M68kCPUClass *mcc = M68K_CPU_GET_CLASS(obj); CPUM68KState *env = cpu_env(cs); - floatx80 nan = floatx80_default_nan(NULL); + floatx80 nan; int i; if (mcc->parent_phases.hold) { @@ -89,6 +111,61 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type) #else cpu_m68k_set_sr(env, SR_S | SR_I); #endif + /* + * M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL + * 3.4 FLOATING-POINT INSTRUCTION DETAILS + * If either operand, but not both operands, of an operation is a + * nonsignaling NaN, then that NaN is returned as the result. If both + * operands are nonsignaling NaNs, then the destination operand + * nonsignaling NaN is returned as the result. + * If either operand to an operation is a signaling NaN (SNaN), then the + * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit + * is set in the FPCR ENABLE byte, then the exception is taken and the + * destination is not modified. If the SNaN exception enable bit is not + * set, setting the SNaN bit in the operand to a one converts the SNaN to + * a nonsignaling NaN. The operation then continues as described in the + * preceding paragraph for nonsignaling NaNs. + */ + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status); + /* Default NaN: sign bit clear, all frac bits set */ + set_float_default_nan_pattern(0b01111111, &env->fp_status); + /* + * m68k-specific floatx80 behaviour: + * * default Infinity values have a zero Integer bit + * * input Infinities may have the Integer bit either 0 or 1 + * * pseudo-denormals supported for input and output + * * don't raise Invalid for pseudo-NaN/pseudo-Inf/Unnormal + * + * With m68k, the explicit integer bit can be zero in the case of: + * - zeros (exp == 0, mantissa == 0) + * - denormalized numbers (exp == 0, mantissa != 0) + * - unnormalized numbers (exp != 0, exp < 0x7FFF) + * - infinities (exp == 0x7FFF, mantissa == 0) + * - not-a-numbers (exp == 0x7FFF, mantissa != 0) + * + * For infinities and NaNs, the explicit integer bit can be either one or + * zero. + * + * The IEEE 754 standard does not define a zero integer bit. Such a number + * is an unnormalized number. Hardware does not directly support + * denormalized and unnormalized numbers, but implicitly supports them by + * trapping them as unimplemented data types, allowing efficient conversion + * in software. + * + * See "M68000 FAMILY PROGRAMMER’S REFERENCE MANUAL", + * "1.6 FLOATING-POINT DATA TYPES" + * + * Note though that QEMU's fp emulation does directly handle both + * denormal and unnormal values, and does not trap to guest software. + */ + set_floatx80_behaviour(floatx80_default_inf_int_bit_is_zero | + floatx80_pseudo_inf_valid | + floatx80_pseudo_nan_valid | + floatx80_unnormal_valid | + floatx80_pseudo_denormal_valid, + &env->fp_status); + + nan = floatx80_default_nan(&env->fp_status); for (i = 0; i < 8; i++) { env->fregs[i].d = nan; } @@ -102,6 +179,7 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type) static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info) { info->print_insn = print_insn_m68k; + info->endian = BFD_ENDIAN_BIG; info->mach = 0; } @@ -523,26 +601,34 @@ static const VMStateDescription vmstate_m68k_cpu = { #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps m68k_sysemu_ops = { + .has_work = m68k_cpu_has_work, .get_phys_page_debug = m68k_cpu_get_phys_page_debug, }; #endif /* !CONFIG_USER_ONLY */ -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps m68k_tcg_ops = { + /* MTTCG not yet supported: require strict ordering */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = false, + .initialize = m68k_tcg_init, + .translate_code = m68k_translate_code, + .get_tb_cpu_state = m68k_get_tb_cpu_state, .restore_state_to_opc = m68k_restore_state_to_opc, + .mmu_index = m68k_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = m68k_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = m68k_cpu_exec_interrupt, .cpu_exec_halt = m68k_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = m68k_cpu_do_interrupt, .do_transaction_failed = m68k_cpu_transaction_failed, #endif /* !CONFIG_USER_ONLY */ }; -static void m68k_cpu_class_init(ObjectClass *c, void *data) +static void m68k_cpu_class_init(ObjectClass *c, const void *data) { M68kCPUClass *mcc = M68K_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -555,8 +641,6 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) &mcc->parent_phases); cc->class_by_name = m68k_cpu_class_by_name; - cc->has_work = m68k_cpu_has_work; - cc->mmu_index = m68k_cpu_mmu_index; cc->dump_state = m68k_cpu_dump_state; cc->set_pc = m68k_cpu_set_pc; cc->get_pc = m68k_cpu_get_pc; @@ -571,7 +655,7 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data) cc->tcg_ops = &m68k_tcg_ops; } -static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data) +static void m68k_cpu_class_init_cf_core(ObjectClass *c, const void *data) { CPUClass *cc = CPU_CLASS(c); @@ -586,7 +670,7 @@ static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data) .class_init = m68k_cpu_class_init_cf_core \ } -static void m68k_cpu_class_init_m68k_core(ObjectClass *c, void *data) +static void m68k_cpu_class_init_m68k_core(ObjectClass *c, const void *data) { CPUClass *cc = CPU_CLASS(c); diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index b5bbeedb7a5..d9db6a486a8 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -21,7 +21,9 @@ #ifndef M68K_CPU_H #define M68K_CPU_H +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" #include "cpu-qom.h" @@ -76,8 +78,6 @@ #define M68K_MAX_TTR 2 #define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index] -#define TARGET_INSN_START_EXTRA_WORDS 1 - typedef CPU_LDoubleU FPReg; typedef struct CPUArchState { @@ -193,6 +193,8 @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void m68k_tcg_init(void); +void m68k_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void m68k_cpu_init_gdb(M68kCPU *cpu); uint32_t cpu_m68k_get_ccr(CPUM68KState *env); void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); @@ -592,8 +594,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, MemTxResult response, uintptr_t retaddr); #endif -#include "exec/cpu-all.h" - /* TB flags */ #define TB_FLAGS_MACSR 0x0f #define TB_FLAGS_MSR_S_BIT 13 @@ -605,22 +605,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, #define TB_FLAGS_TRACE 16 #define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE) -static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = (env->macsr >> 4) & TB_FLAGS_MACSR; - if (env->sr & SR_S) { - *flags |= TB_FLAGS_MSR_S; - *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S; - *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S; - } - if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) { - *flags |= TB_FLAGS_TRACE; - } -} - void dump_mmu(CPUM68KState *env); #endif diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c index 8314791f504..56012863c85 100644 --- a/target/m68k/fpu_helper.c +++ b/target/m68k/fpu_helper.c @@ -21,8 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "softfloat.h" /* @@ -175,7 +174,7 @@ static int cpu_m68k_exceptbits_from_host(int host_bits) if (host_bits & float_flag_overflow) { target_bits |= 0x40; } - if (host_bits & (float_flag_underflow | float_flag_output_denormal)) { + if (host_bits & (float_flag_underflow | float_flag_output_denormal_flushed)) { target_bits |= 0x20; } if (host_bits & float_flag_divbyzero) { @@ -455,7 +454,7 @@ void HELPER(ftst)(CPUM68KState *env, FPReg *val) if (floatx80_is_any_nan(val->d)) { cc |= FPSR_CC_A; - } else if (floatx80_is_infinity(val->d)) { + } else if (floatx80_is_infinity(val->d, &env->fp_status)) { cc |= FPSR_CC_I; } else if (floatx80_is_zero(val->d)) { cc |= FPSR_CC_Z; @@ -615,14 +614,13 @@ void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) fp_rem = floatx80_rem(val1->d, val0->d, &env->fp_status); if (!floatx80_is_any_nan(fp_rem)) { - float_status fp_status = { }; + /* Use local temporary fp_status to set different rounding mode */ + float_status fp_status = env->fp_status; uint32_t quotient; int sign; /* Calculate quotient directly using round to nearest mode */ set_float_rounding_mode(float_round_nearest_even, &fp_status); - set_floatx80_rounding_precision( - get_floatx80_rounding_precision(&env->fp_status), &fp_status); fp_quot.d = floatx80_div(val1->d, val0->d, &fp_status); sign = extractFloatx80Sign(fp_quot.d); diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c index 15547e2313c..136159f98f2 100644 --- a/target/m68k/gdbstub.c +++ b/target/m68k/gdbstub.c @@ -52,7 +52,7 @@ int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUM68KState *env = cpu_env(cs); uint32_t tmp; - tmp = ldl_p(mem_buf); + tmp = ldl_be_p(mem_buf); if (n < 8) { /* D0-D7 */ diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 4c85badd5d3..15f110fa7a2 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -20,10 +20,12 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "exec/gdbstub.h" #include "exec/helper-proto.h" +#include "system/memory.h" #include "gdbstub/helpers.h" #include "fpu/softfloat.h" #include "qemu/qemu-print.h" @@ -36,7 +38,8 @@ static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n) CPUM68KState *env = &cpu->env; if (n < 8) { - float_status s; + /* Use scratch float_status so any exceptions don't change CPU state */ + float_status s = env->fp_status; return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s)); } switch (n) { @@ -56,16 +59,17 @@ static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n) CPUM68KState *env = &cpu->env; if (n < 8) { - float_status s; - env->fregs[n].d = float64_to_floatx80(ldq_p(mem_buf), &s); + /* Use scratch float_status so any exceptions don't change CPU state */ + float_status s = env->fp_status; + env->fregs[n].d = float64_to_floatx80(ldq_be_p(mem_buf), &s); return 8; } switch (n) { case 8: /* fpcontrol */ - cpu_m68k_set_fpcr(env, ldl_p(mem_buf)); + cpu_m68k_set_fpcr(env, ldl_be_p(mem_buf)); return 4; case 9: /* fpstatus */ - env->fpsr = ldl_p(mem_buf); + env->fpsr = ldl_be_p(mem_buf); return 4; case 10: /* fpiar, not implemented */ return 4; @@ -107,10 +111,10 @@ static int m68k_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n) } switch (n) { case 8: /* fpcontrol */ - cpu_m68k_set_fpcr(env, ldl_p(mem_buf)); + cpu_m68k_set_fpcr(env, ldl_be_p(mem_buf)); return 4; case 9: /* fpstatus */ - cpu_m68k_set_fpsr(env, ldl_p(mem_buf)); + cpu_m68k_set_fpsr(env, ldl_be_p(mem_buf)); return 4; case 10: /* fpiar, not implemented */ return 4; @@ -287,7 +291,6 @@ void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) /* Invalid control registers will generate an exception. */ raise_exception_ra(env, EXCP_ILLEGAL, 0); - return; } uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg) diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c index 15bad5dd465..f29ae12af84 100644 --- a/target/m68k/op_helper.c +++ b/target/m68k/op_helper.c @@ -20,8 +20,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "semihosting/semihost.h" #if !defined(CONFIG_USER_ONLY) diff --git a/target/m68k/semihosting-stub.c b/target/m68k/semihosting-stub.c index d6a5965e29c..dbe669cc5f6 100644 --- a/target/m68k/semihosting-stub.c +++ b/target/m68k/semihosting-stub.c @@ -1,8 +1,11 @@ /* * m68k/ColdFire semihosting stub * - * SPDX-FileContributor: Philippe Mathieu-Daudé - * SPDX-FileCopyrightText: 2024 Linaro Ltd. + * Copyright (c) 2024 Linaro Ltd. + * + * Authors: + * Philippe Mathieu-Daudé + * * SPDX-License-Identifier: GPL-2.0-or-later */ diff --git a/target/m68k/softfloat.c b/target/m68k/softfloat.c index 02dcc03d15d..d1f150e641f 100644 --- a/target/m68k/softfloat.c +++ b/target/m68k/softfloat.c @@ -142,8 +142,7 @@ floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status) if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaN(a, b, status); } - return packFloatx80(aSign, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(aSign, status); } if (aExp == 0) { if (aSig == 0) { @@ -245,7 +244,7 @@ floatx80 floatx80_lognp1(floatx80 a, float_status *status) float_raise(float_flag_invalid, status); return floatx80_default_nan(status); } - return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { @@ -255,8 +254,7 @@ floatx80 floatx80_lognp1(floatx80 a, float_status *status) if (aSign && aExp >= one_exp) { if (aExp == one_exp && aSig == one_sig) { float_raise(float_flag_divbyzero, status); - return packFloatx80(aSign, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(aSign, status); } float_raise(float_flag_invalid, status); return floatx80_default_nan(status); @@ -442,8 +440,7 @@ floatx80 floatx80_logn(floatx80 a, float_status *status) propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } } @@ -452,8 +449,7 @@ floatx80 floatx80_logn(floatx80 a, float_status *status) if (aExp == 0) { if (aSig == 0) { /* zero */ float_raise(float_flag_divbyzero, status); - return packFloatx80(1, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(1, status); } if ((aSig & one_sig) == 0) { /* denormal */ normalizeFloatx80Subnormal(aSig, &aExp, &aSig); @@ -610,15 +606,13 @@ floatx80 floatx80_log10(floatx80 a, float_status *status) propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } } if (aExp == 0 && aSig == 0) { float_raise(float_flag_divbyzero, status); - return packFloatx80(1, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(1, status); } if (aSign) { @@ -668,16 +662,14 @@ floatx80 floatx80_log2(floatx80 a, float_status *status) propagateFloatx80NaNOneArg(a, status); } if (aSign == 0) { - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } } if (aExp == 0) { if (aSig == 0) { float_raise(float_flag_divbyzero, status); - return packFloatx80(1, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(1, status); } normalizeFloatx80Subnormal(aSig, &aExp, &aSig); } @@ -740,8 +732,7 @@ floatx80 floatx80_etox(floatx80 a, float_status *status) if (aSign) { return packFloatx80(0, 0, 0); } - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { @@ -924,8 +915,7 @@ floatx80 floatx80_twotox(floatx80 a, float_status *status) if (aSign) { return packFloatx80(0, 0, 0); } - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { @@ -1075,8 +1065,7 @@ floatx80 floatx80_tentox(floatx80 a, float_status *status) if (aSign) { return packFloatx80(0, 0, 0); } - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { @@ -2260,8 +2249,7 @@ floatx80 floatx80_atanh(floatx80 a, float_status *status) if (compact >= 0x3FFF8000) { /* |X| >= 1 */ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */ float_raise(float_flag_divbyzero, status); - return packFloatx80(aSign, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(aSign, status); } else { /* |X| > 1 */ float_raise(float_flag_invalid, status); return floatx80_default_nan(status); @@ -2320,8 +2308,7 @@ floatx80 floatx80_etoxm1(floatx80 a, float_status *status) if (aSign) { return packFloatx80(aSign, one_exp, one_sig); } - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { @@ -2687,8 +2674,7 @@ floatx80 floatx80_sinh(floatx80 a, float_status *status) if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } - return packFloatx80(aSign, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(aSign, status); } if (aExp == 0 && aSig == 0) { @@ -2774,8 +2760,7 @@ floatx80 floatx80_cosh(floatx80 a, float_status *status) if ((uint64_t) (aSig << 1)) { return propagateFloatx80NaNOneArg(a, status); } - return packFloatx80(0, floatx80_infinity.high, - floatx80_infinity.low); + return floatx80_default_inf(0, status); } if (aExp == 0 && aSig == 0) { diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 445966fb6a1..97afceb1297 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -20,7 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "tcg/tcg-op.h" #include "qemu/log.h" #include "qemu/qemu-print.h" @@ -720,7 +721,9 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, } /* fallthru */ case 2: /* Indirect register */ - return get_areg(s, reg0); + tmp = tcg_temp_new(); + tcg_gen_mov_i32(tmp, get_areg(s, reg0)); + return tmp; case 4: /* Indirect predecrememnt. */ if (opsize == OS_UNSIZED) { return NULL_QREG; @@ -747,20 +750,23 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, switch (reg0) { case 0: /* Absolute short. */ offset = (int16_t)read_im16(env, s); - return tcg_constant_i32(offset); + break; case 1: /* Absolute long. */ offset = read_im32(env, s); - return tcg_constant_i32(offset); + break; case 2: /* pc displacement */ offset = s->pc; offset += (int16_t)read_im16(env, s); - return tcg_constant_i32(offset); + break; case 3: /* pc index+displacement. */ return gen_lea_indexed(env, s, NULL_QREG); case 4: /* Immediate. */ default: return NULL_QREG; } + tmp = tcg_temp_new(); + tcg_gen_movi_i32(tmp, offset); + return tmp; } /* Should never happen. */ return NULL_QREG; @@ -6112,8 +6118,8 @@ static const TranslatorOps m68k_tr_ops = { .tb_stop = m68k_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void m68k_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base); diff --git a/target/meson.build b/target/meson.build index 1c2e6f2b192..b29598e7c5e 100644 --- a/target/meson.build +++ b/target/meson.build @@ -1,7 +1,6 @@ subdir('alpha') subdir('arm') subdir('avr') -subdir('cris') subdir('hexagon') subdir('hppa') subdir('i386') diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h index e530fead1c0..e0a37945136 100644 --- a/target/microblaze/cpu-param.h +++ b/target/microblaze/cpu-param.h @@ -2,7 +2,7 @@ * MicroBlaze cpu parameters for qemu. * * Copyright (c) 2009 Edgar E. Iglesias - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef MICROBLAZE_CPU_PARAM_H @@ -17,11 +17,9 @@ * of address space. */ #ifdef CONFIG_USER_ONLY -#define TARGET_LONG_BITS 32 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 #else -#define TARGET_LONG_BITS 64 #define TARGET_PHYS_ADDR_SPACE_BITS 64 #define TARGET_VIRT_ADDR_SPACE_BITS 64 #endif @@ -29,7 +27,6 @@ /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ #define TARGET_PAGE_BITS 12 -/* MicroBlaze is always in-order. */ -#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL +#define TARGET_INSN_START_EXTRA_WORDS 1 #endif diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index 135947ee800..ee0a869a94a 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -27,10 +27,11 @@ #include "cpu.h" #include "qemu/module.h" #include "hw/qdev-properties.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/gdbstub.h" +#include "exec/translation-block.h" #include "fpu/softfloat-helpers.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" static const struct { @@ -94,6 +95,17 @@ static vaddr mb_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState mb_get_tb_cpu_state(CPUState *cs) +{ + CPUMBState *env = cpu_env(cs); + + return (TCGTBCPUState){ + .pc = env->pc, + .flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK), + .cs_base = (env->iflags & IMM_FLAG ? env->imm : 0), + }; +} + static void mb_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -114,10 +126,12 @@ static void mb_restore_state_to_opc(CPUState *cs, cpu->env.iflags = data[1]; } +#ifndef CONFIG_USER_ONLY static bool mb_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } +#endif /* !CONFIG_USER_ONLY */ static int mb_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -201,6 +215,15 @@ static void mb_cpu_reset_hold(Object *obj, ResetType type) env->pc = cpu->cfg.base_vectors; + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + /* + * TODO: this is probably not the correct NaN propagation rule for + * this architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + /* Default NaN: sign bit set, most significant frac bit set */ + set_float_default_nan_pattern(0b11000000, &env->fp_status); + #if defined(CONFIG_USER_ONLY) /* start in user mode with interrupts enabled. */ mb_cpu_write_msr(env, MSR_EE | MSR_IE | MSR_VM | MSR_UM); @@ -214,6 +237,8 @@ static void mb_disas_set_info(CPUState *cpu, disassemble_info *info) { info->mach = bfd_arch_microblaze; info->print_insn = print_insn_microblaze; + info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG + : BFD_ENDIAN_LITTLE; } static void mb_cpu_realizefn(DeviceState *dev, Error **errp) @@ -238,6 +263,11 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp) return; } + gdb_register_coprocessor(cs, mb_cpu_gdb_read_stack_protect, + mb_cpu_gdb_write_stack_protect, + gdb_find_static_feature("microblaze-stack-protect.xml"), + 0); + qemu_init_vcpu(cs); version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION; @@ -310,27 +340,24 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp) static void mb_cpu_initfn(Object *obj) { - MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); - CPUMBState *env = &cpu->env; - - gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect, - mb_cpu_gdb_write_stack_protect, - gdb_find_static_feature("microblaze-stack-protect.xml"), - 0); - - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - #ifndef CONFIG_USER_ONLY /* Inbound IRQ and FIR lines */ - qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2); - qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dp, "ns_axi_dp", 1); - qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ip, "ns_axi_ip", 1); - qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dc, "ns_axi_dc", 1); - qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ic, "ns_axi_ic", 1); + qdev_init_gpio_in(DEVICE(obj), microblaze_cpu_set_irq, 2); + qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dp, "ns_axi_dp", 1); + qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ip, "ns_axi_ip", 1); + qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dc, "ns_axi_dc", 1); + qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ic, "ns_axi_ic", 1); #endif + + /* Restricted 'endianness' property is equivalent of 'little-endian' */ + object_property_add_alias(obj, "little-endian", obj, "endianness"); } -static Property mb_properties[] = { +static const Property mb_properties[] = { + /* + * Following properties are used by Xilinx DTS conversion tool + * do not rename them. + */ DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0), DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot, false), @@ -387,7 +414,9 @@ static Property mb_properties[] = { DEFINE_PROP_UINT8("pvr", MicroBlazeCPU, cfg.pvr, C_PVR_FULL), DEFINE_PROP_UINT8("pvr-user1", MicroBlazeCPU, cfg.pvr_user1, 0), DEFINE_PROP_UINT32("pvr-user2", MicroBlazeCPU, cfg.pvr_user2, 0), - DEFINE_PROP_END_OF_LIST(), + /* + * End of properties reserved by Xilinx DTS conversion tool. + */ }; static ObjectClass *mb_cpu_class_by_name(const char *cpu_model) @@ -399,28 +428,36 @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps mb_sysemu_ops = { + .has_work = mb_cpu_has_work, .get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps mb_tcg_ops = { + /* MicroBlaze is always in-order. */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = true, + .initialize = mb_tcg_init, + .translate_code = mb_translate_code, + .get_tb_cpu_state = mb_get_tb_cpu_state, .synchronize_from_tb = mb_cpu_synchronize_from_tb, .restore_state_to_opc = mb_restore_state_to_opc, + .mmu_index = mb_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = mb_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = mb_cpu_exec_interrupt, .cpu_exec_halt = mb_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = mb_cpu_do_interrupt, .do_transaction_failed = mb_cpu_transaction_failed, .do_unaligned_access = mb_cpu_do_unaligned_access, #endif /* !CONFIG_USER_ONLY */ }; -static void mb_cpu_class_init(ObjectClass *oc, void *data) +static void mb_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -433,8 +470,6 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data) &mcc->parent_phases); cc->class_by_name = mb_cpu_class_by_name; - cc->has_work = mb_cpu_has_work; - cc->mmu_index = mb_cpu_mmu_index; cc->dump_state = mb_cpu_dump_state; cc->set_pc = mb_cpu_set_pc; cc->get_pc = mb_cpu_get_pc; diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index 3e5a3e5c605..3ce28b302fe 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -21,8 +21,10 @@ #define MICROBLAZE_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" +#include "exec/cpu-interrupt.h" typedef struct CPUArchState CPUMBState; #if !defined(CONFIG_USER_ONLY) @@ -231,8 +233,6 @@ typedef struct CPUArchState CPUMBState; #define STREAM_CONTROL (1 << 3) #define STREAM_NONBLOCK (1 << 4) -#define TARGET_INSN_START_EXTRA_WORDS 1 - /* use-non-secure property masks */ #define USE_NON_SECURE_M_AXI_DP_MASK 0x1 #define USE_NON_SECURE_M_AXI_IP_MASK 0x2 @@ -248,7 +248,7 @@ struct CPUArchState { uint32_t pc; uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */ uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */ - target_ulong ear; + uint64_t ear; uint32_t esr; uint32_t fsr; uint32_t btr; @@ -398,6 +398,8 @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val) } void mb_tcg_init(void); +void mb_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU @@ -407,17 +409,14 @@ void mb_tcg_init(void); #define MMU_USER_IDX 2 /* See NB_MMU_MODES in cpu-defs.h. */ -#include "exec/cpu-all.h" - /* Ensure there is no overlap between the two masks. */ QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK); -static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) +static inline bool mb_cpu_is_big_endian(CPUState *cs) { - *pc = env->pc; - *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK); - *cs_base = (*flags & IMM_FLAG ? env->imm : 0); + MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); + + return !cpu->cfg.endi; } #if !defined(CONFIG_USER_ONLY) diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c index 09d74e164d0..d493681d38d 100644 --- a/target/microblaze/gdbstub.c +++ b/target/microblaze/gdbstub.c @@ -110,14 +110,9 @@ int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *mem_buf, int n) int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - CPUClass *cc = CPU_GET_CLASS(cs); CPUMBState *env = cpu_env(cs); uint32_t tmp; - if (n > cc->gdb_num_core_regs) { - return 0; - } - tmp = ldl_p(mem_buf); switch (n) { diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c index 5d3259ce316..ef0e2f973fa 100644 --- a/target/microblaze/helper.c +++ b/target/microblaze/helper.c @@ -20,12 +20,57 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "qemu/host-utils.h" #include "exec/log.h" +#include "exec/helper-proto.h" + + +G_NORETURN +static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr, + uintptr_t retaddr) +{ + CPUMBState *env = cpu_env(cs); + uint32_t esr, iflags; + + /* Recover the pc and iflags from the corresponding insn_start. */ + cpu_restore_state(cs, retaddr); + iflags = env->iflags; + + qemu_log_mask(CPU_LOG_INT, + "Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n", + addr, env->pc, iflags); + + esr = ESR_EC_UNALIGNED_DATA; + if (likely(iflags & ESR_ESS_FLAG)) { + esr |= iflags & ESR_ESS_MASK; + } else { + qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); + } + + env->ear = addr; + env->esr = esr; + cs->exception_index = EXCP_HW_EXCP; + cpu_loop_exit(cs); +} + +void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + mb_unaligned_access_internal(cs, addr, retaddr); +} #ifndef CONFIG_USER_ONLY + +void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr) +{ + mb_unaligned_access_internal(env_cpu(env), addr, GETPC()); +} + static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, MMUAccessType access_type) { @@ -267,31 +312,3 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #endif /* !CONFIG_USER_ONLY */ - -void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) -{ - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - uint32_t esr, iflags; - - /* Recover the pc and iflags from the corresponding insn_start. */ - cpu_restore_state(cs, retaddr); - iflags = cpu->env.iflags; - - qemu_log_mask(CPU_LOG_INT, - "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n", - (target_ulong)addr, cpu->env.pc, iflags); - - esr = ESR_EC_UNALIGNED_DATA; - if (likely(iflags & ESR_ESS_FLAG)) { - esr |= iflags & ESR_ESS_MASK; - } else { - qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n"); - } - - cpu->env.ear = addr; - cpu->env.esr = esr; - cs->exception_index = EXCP_HW_EXCP; - cpu_loop_exit(cs); -} diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h index f740835fcb0..ef4fad9b91e 100644 --- a/target/microblaze/helper.h +++ b/target/microblaze/helper.h @@ -20,12 +20,22 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32) DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32) -#if !defined(CONFIG_USER_ONLY) -DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32) -DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32) -#endif - DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl) - DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32) DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32) +DEF_HELPER_FLAGS_2(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64) +DEF_HELPER_FLAGS_2(lbuea, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_2(lhuea_be, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_2(lhuea_le, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_2(lwea_be, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_2(lwea_le, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_3(sbea, TCG_CALL_NO_WG, void, env, i32, i64) +DEF_HELPER_FLAGS_3(shea_be, TCG_CALL_NO_WG, void, env, i32, i64) +DEF_HELPER_FLAGS_3(shea_le, TCG_CALL_NO_WG, void, env, i32, i64) +DEF_HELPER_FLAGS_3(swea_be, TCG_CALL_NO_WG, void, env, i32, i64) +DEF_HELPER_FLAGS_3(swea_le, TCG_CALL_NO_WG, void, env, i32, i64) +#endif diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c index 51705e4f5c9..a4cf38dc891 100644 --- a/target/microblaze/machine.c +++ b/target/microblaze/machine.c @@ -93,7 +93,7 @@ static const VMStateDescription vmstate_env = { }; static const VMStateField vmstate_cpu_fields[] = { - VMSTATE_CPU(), + VMSTATE_STRUCT(parent_obj, MicroBlazeCPU, 0, vmstate_cpu_common, CPUState), VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState), VMSTATE_END_OF_LIST() }; diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c index 2423ac6172d..8703ff5c657 100644 --- a/target/microblaze/mmu.c +++ b/target/microblaze/mmu.c @@ -21,8 +21,10 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/page-protection.h" +#include "exec/target_page.h" static unsigned int tlb_decode_size(unsigned int f) { @@ -170,7 +172,8 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu, } done: qemu_log_mask(CPU_LOG_MMU, - "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", + "MMU vaddr=0x" TARGET_FMT_lx + " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", vaddr, rw, tlb_wr, tlb_ex, hit); return hit; } diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c index f6378030b7a..b8365b3b1d2 100644 --- a/target/microblaze/op_helper.c +++ b/target/microblaze/op_helper.c @@ -23,8 +23,7 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "fpu/softfloat.h" void helper_put(uint32_t id, uint32_t ctrl, uint32_t data) @@ -383,6 +382,8 @@ void helper_stackprot(CPUMBState *env, target_ulong addr) } #if !defined(CONFIG_USER_ONLY) +#include "system/memory.h" + /* Writes/reads to the MMU's special regs end up here. */ uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn) { @@ -394,38 +395,90 @@ void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v) mmu_write(env, ext, rn, v); } +static void mb_transaction_failed_internal(CPUState *cs, hwaddr physaddr, + uint64_t addr, unsigned size, + MMUAccessType access_type, + uintptr_t retaddr) +{ + CPUMBState *env = cpu_env(cs); + MicroBlazeCPU *cpu = env_archcpu(env); + const char *access_name = "INVALID"; + bool take = env->msr & MSR_EE; + uint32_t esr = ESR_EC_DATA_BUS; + + switch (access_type) { + case MMU_INST_FETCH: + access_name = "INST_FETCH"; + esr = ESR_EC_INSN_BUS; + take &= cpu->cfg.iopb_bus_exception; + break; + case MMU_DATA_LOAD: + access_name = "DATA_LOAD"; + take &= cpu->cfg.dopb_bus_exception; + break; + case MMU_DATA_STORE: + access_name = "DATA_STORE"; + take &= cpu->cfg.dopb_bus_exception; + break; + } + + qemu_log_mask(CPU_LOG_INT, "Transaction failed: addr 0x%" PRIx64 + "physaddr 0x" HWADDR_FMT_plx " size %d access-type %s (%s)\n", + addr, physaddr, size, access_name, + take ? "TAKEN" : "DROPPED"); + + if (take) { + env->esr = esr; + env->ear = addr; + cs->exception_index = EXCP_HW_EXCP; + cpu_loop_exit_restore(cs, retaddr); + } +} + void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr) { - MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - CPUMBState *env = &cpu->env; + mb_transaction_failed_internal(cs, physaddr, addr, size, + access_type, retaddr); +} - qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx - " physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n", - addr, physaddr, size, - access_type == MMU_INST_FETCH ? "INST_FETCH" : - (access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE")); +#define LD_EA(NAME, TYPE, FUNC) \ +uint32_t HELPER(NAME)(CPUMBState *env, uint64_t ea) \ +{ \ + CPUState *cs = env_cpu(env); \ + MemTxResult txres; \ + TYPE ret = FUNC(cs->as, ea, MEMTXATTRS_UNSPECIFIED, &txres); \ + if (unlikely(txres != MEMTX_OK)) { \ + mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \ + MMU_DATA_LOAD, GETPC()); \ + } \ + return ret; \ +} - if (!(env->msr & MSR_EE)) { - return; - } +LD_EA(lbuea, uint8_t, address_space_ldub) +LD_EA(lhuea_be, uint16_t, address_space_lduw_be) +LD_EA(lhuea_le, uint16_t, address_space_lduw_le) +LD_EA(lwea_be, uint32_t, address_space_ldl_be) +LD_EA(lwea_le, uint32_t, address_space_ldl_le) + +#define ST_EA(NAME, TYPE, FUNC) \ +void HELPER(NAME)(CPUMBState *env, uint32_t data, uint64_t ea) \ +{ \ + CPUState *cs = env_cpu(env); \ + MemTxResult txres; \ + FUNC(cs->as, ea, data, MEMTXATTRS_UNSPECIFIED, &txres); \ + if (unlikely(txres != MEMTX_OK)) { \ + mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \ + MMU_DATA_STORE, GETPC()); \ + } \ +} - if (access_type == MMU_INST_FETCH) { - if (!cpu->cfg.iopb_bus_exception) { - return; - } - env->esr = ESR_EC_INSN_BUS; - } else { - if (!cpu->cfg.dopb_bus_exception) { - return; - } - env->esr = ESR_EC_DATA_BUS; - } +ST_EA(sbea, uint8_t, address_space_stb) +ST_EA(shea_be, uint16_t, address_space_stw_be) +ST_EA(shea_le, uint16_t, address_space_stw_le) +ST_EA(swea_be, uint32_t, address_space_stl_be) +ST_EA(swea_le, uint32_t, address_space_stl_le) - env->ear = addr; - cs->exception_index = EXCP_HW_EXCP; - cpu_loop_exit_restore(cs, retaddr); -} #endif diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 4beaf69e76a..5098a1db4dc 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -20,12 +20,13 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "tcg/tcg-op.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "qemu/qemu-print.h" #include "exec/log.h" @@ -62,9 +63,6 @@ typedef struct DisasContext { DisasContextBase base; const MicroBlazeCPUConfig *cfg; - TCGv_i32 r0; - bool r0_set; - /* Decoder. */ uint32_t ext_imm; unsigned int tb_flags; @@ -178,14 +176,7 @@ static TCGv_i32 reg_for_read(DisasContext *dc, int reg) if (likely(reg != 0)) { return cpu_R[reg]; } - if (!dc->r0_set) { - if (dc->r0 == NULL) { - dc->r0 = tcg_temp_new_i32(); - } - tcg_gen_movi_i32(dc->r0, 0); - dc->r0_set = true; - } - return dc->r0; + return tcg_constant_i32(0); } static TCGv_i32 reg_for_write(DisasContext *dc, int reg) @@ -193,10 +184,7 @@ static TCGv_i32 reg_for_write(DisasContext *dc, int reg) if (likely(reg != 0)) { return cpu_R[reg]; } - if (dc->r0 == NULL) { - dc->r0 = tcg_temp_new_i32(); - } - return dc->r0; + return tcg_temp_new_i32(); } static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects, @@ -309,11 +297,7 @@ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) /* Input and output carry. */ static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_constant_i32(0); - TCGv_i32 tmp = tcg_temp_new_i32(); - - tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero); - tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); + tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c); } /* Input carry, but no output carry. */ @@ -542,12 +526,10 @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) /* Input and output carry. */ static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_not_i32(tmp, ina); - tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero); - tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); + tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c); } /* No input or output carry. */ @@ -624,19 +606,18 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32) static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) { - TCGv ret = tcg_temp_new(); + TCGv ret; /* If any of the regs is r0, set t to the value of the other reg. */ if (ra && rb) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]); - tcg_gen_extu_i32_tl(ret, tmp); + ret = tcg_temp_new_i32(); + tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]); } else if (ra) { - tcg_gen_extu_i32_tl(ret, cpu_R[ra]); + ret = cpu_R[ra]; } else if (rb) { - tcg_gen_extu_i32_tl(ret, cpu_R[rb]); + ret = cpu_R[rb]; } else { - tcg_gen_movi_tl(ret, 0); + ret = tcg_constant_i32(0); } if ((ra == 1 || rb == 1) && dc->cfg->stackprot) { @@ -647,15 +628,16 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) { - TCGv ret = tcg_temp_new(); + TCGv ret; /* If any of the regs is r0, set t to the value of the other reg. */ - if (ra) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_addi_i32(tmp, cpu_R[ra], imm); - tcg_gen_extu_i32_tl(ret, tmp); + if (ra && imm) { + ret = tcg_temp_new_i32(); + tcg_gen_addi_i32(ret, cpu_R[ra], imm); + } else if (ra) { + ret = cpu_R[ra]; } else { - tcg_gen_movi_tl(ret, (uint32_t)imm); + ret = tcg_constant_i32(imm); } if (ra == 1 && dc->cfg->stackprot) { @@ -665,23 +647,23 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) } #ifndef CONFIG_USER_ONLY -static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) +static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) { int addr_size = dc->cfg->addr_size; - TCGv ret = tcg_temp_new(); + TCGv_i64 ret = tcg_temp_new_i64(); if (addr_size == 32 || ra == 0) { if (rb) { - tcg_gen_extu_i32_tl(ret, cpu_R[rb]); + tcg_gen_extu_i32_i64(ret, cpu_R[rb]); } else { - tcg_gen_movi_tl(ret, 0); + return tcg_constant_i64(0); } } else { if (rb) { tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]); } else { - tcg_gen_extu_i32_tl(ret, cpu_R[ra]); - tcg_gen_shli_tl(ret, ret, 32); + tcg_gen_extu_i32_i64(ret, cpu_R[ra]); + tcg_gen_shli_i64(ret, ret, 32); } if (addr_size < 64) { /* Mask off out of range bits. */ @@ -705,13 +687,34 @@ static void record_unaligned_ess(DisasContext *dc, int rd, tcg_set_insn_start_param(dc->base.insn_start, 1, iflags); } + +static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb, + int rd, MemOp size, bool store) +{ + if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { + TCGLabel *over = gen_new_label(); + + record_unaligned_ess(dc, rd, size, store); + + tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over); + gen_helper_unaligned_access(tcg_env, ea); + gen_set_label(over); + } +} #endif +static inline MemOp mo_endian(DisasContext *dc) +{ + return dc->cfg->endi ? MO_LE : MO_BE; +} + static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, int mem_index, bool rev) { MemOp size = mop & MO_SIZE; + mop |= mo_endian(dc); + /* * When doing reverse accesses we need to do two things. * @@ -763,10 +766,11 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr); + return true; #endif } @@ -779,13 +783,13 @@ static bool trans_lbui(DisasContext *dc, arg_typeb *arg) static bool trans_lhu(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); + return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_lhur(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); + return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true); } static bool trans_lhuea(DisasContext *dc, arg_typea *arg) @@ -794,29 +798,32 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false); + (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le) + (reg_for_write(dc, arg->rd), tcg_env, addr); + return true; #endif } static bool trans_lhui(DisasContext *dc, arg_typeb *arg) { TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); - return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); + return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_lw(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); + return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_lwr(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); + return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true); } static bool trans_lwea(DisasContext *dc, arg_typea *arg) @@ -825,17 +832,20 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false); + (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le) + (reg_for_write(dc, arg->rd), tcg_env, addr); + return true; #endif } static bool trans_lwi(DisasContext *dc, arg_typeb *arg) { TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); - return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); + return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_lwx(DisasContext *dc, arg_typea *arg) @@ -845,7 +855,8 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg) /* lwx does not throw unaligned access errors, so force alignment */ tcg_gen_andi_tl(addr, addr, ~3); - tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL); + tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, + mo_endian(dc) | MO_UL); tcg_gen_mov_tl(cpu_res_addr, addr); if (arg->rd) { @@ -862,6 +873,8 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, { MemOp size = mop & MO_SIZE; + mop |= mo_endian(dc); + /* * When doing reverse accesses we need to do two things. * @@ -913,10 +926,11 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr); + return true; #endif } @@ -929,13 +943,13 @@ static bool trans_sbi(DisasContext *dc, arg_typeb *arg) static bool trans_sh(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); + return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_shr(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); + return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true); } static bool trans_shea(DisasContext *dc, arg_typea *arg) @@ -944,29 +958,32 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true); + (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le) + (tcg_env, reg_for_read(dc, arg->rd), addr); + return true; #endif } static bool trans_shi(DisasContext *dc, arg_typeb *arg) { TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); - return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); + return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_sw(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); + return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_swr(DisasContext *dc, arg_typea *arg) { TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); + return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true); } static bool trans_swea(DisasContext *dc, arg_typea *arg) @@ -975,17 +992,20 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg) return true; } #ifdef CONFIG_USER_ONLY - return true; + g_assert_not_reached(); #else - TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); - return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); + TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); + gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true); + (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le) + (tcg_env, reg_for_read(dc, arg->rd), addr); + return true; #endif } static bool trans_swi(DisasContext *dc, arg_typeb *arg) { TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); - return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); + return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_swx(DisasContext *dc, arg_typea *arg) @@ -1014,7 +1034,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val, reg_for_write(dc, arg->rd), - dc->mem_index, MO_TEUL); + dc->mem_index, mo_endian(dc) | MO_UL); tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail); @@ -1602,8 +1622,6 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) dc->cfg = &cpu->cfg; dc->tb_flags = dc->base.tb->flags; dc->ext_imm = dc->base.tb->cs_base; - dc->r0 = NULL; - dc->r0_set = false; dc->mem_index = cpu_mmu_index(cs, false); dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER; dc->jmp_dest = -1; @@ -1636,16 +1654,12 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) dc->tb_flags_to_set = 0; - ir = translator_ldl(cpu_env(cs), &dc->base, dc->base.pc_next); + ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next, + mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN); if (!decode(dc, ir)) { trap_illegal(dc, true); } - if (dc->r0) { - dc->r0 = NULL; - dc->r0_set = false; - } - /* Discard the imm global when its contents cannot be used. */ if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) { tcg_gen_discard_i32(cpu_imm); @@ -1778,8 +1792,8 @@ static const TranslatorOps mb_tr_ops = { .tb_stop = mb_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void mb_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base); @@ -1823,7 +1837,7 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags) } qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n" - "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n", + "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n", env->esr, env->fsr, env->btr, env->edr, env->ear, env->slr, env->shr); diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index fbf787d8ce1..d93b9d341ac 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -314,7 +314,7 @@ const mips_def_t mips_defs[] = (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), .SEGBITS = 32, .PABITS = 32, - .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP, .mmu_type = MMU_TYPE_R4000, }, { @@ -478,14 +478,15 @@ const mips_def_t mips_defs[] = (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, - .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) | + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_MSAP) | + (1 << CP0C3_BP) | (1 << CP0C3_BI) | (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1U << CP0C3_M), .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | (3 << CP0C4_IE) | (1U << CP0C4_M), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB), - .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | - (1 << CP0C5_UFE), + .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) | + (1 << CP0C5_FRE) | (1 << CP0C5_SBRI), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, @@ -499,6 +500,7 @@ const mips_def_t mips_defs[] = (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0x0103FFFF, + .MSAIR = 0x03 << MSAIR_ProcID, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS, @@ -541,7 +543,7 @@ const mips_def_t mips_defs[] = .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ISA_NANOMIPS32 | - ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | ASE_MT, + ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3, .mmu_type = MMU_TYPE_R4000, }, #if defined(TARGET_MIPS64) @@ -661,7 +663,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 36, - .insn_flags = CPU_MIPS64R1 | ASE_MIPS3D, + .insn_flags = CPU_MIPS64R1, .mmu_type = MMU_TYPE_R4000, }, { @@ -690,7 +692,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, - .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D, + .insn_flags = CPU_MIPS64R2, .mmu_type = MMU_TYPE_R4000, }, { @@ -754,8 +756,9 @@ const mips_def_t mips_defs[] = (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), - .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | - (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | (3 << CP0C5_GI), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_CRCP) | (1 << CP0C5_XNP) | + (1 << CP0C5_VP) | (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | + (3 << CP0C5_GI), .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, @@ -794,8 +797,9 @@ const mips_def_t mips_defs[] = (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt), .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) | (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist), - .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) | - (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | (3 << CP0C5_GI), + .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_CRCP) | (1 << CP0C5_XNP) | + (1 << CP0C5_VP) | (1 << CP0C5_LLB) | (1 << CP0C5_MRP) | + (3 << CP0C5_GI), .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | (1 << CP0C5_UFE), .CP0_LLAddr_rw_bitmask = 0, diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h index 6f6ac1688f8..58f450827f7 100644 --- a/target/mips/cpu-param.h +++ b/target/mips/cpu-param.h @@ -1,17 +1,12 @@ /* * MIPS cpu parameters for qemu. * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef MIPS_CPU_PARAM_H #define MIPS_CPU_PARAM_H -#ifdef TARGET_MIPS64 -# define TARGET_LONG_BITS 64 -#else -# define TARGET_LONG_BITS 32 -#endif #ifdef TARGET_ABI_MIPSN64 #define TARGET_PHYS_ADDR_SPACE_BITS 48 #define TARGET_VIRT_ADDR_SPACE_BITS 48 @@ -23,13 +18,8 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif #endif -#ifdef CONFIG_USER_ONLY #define TARGET_PAGE_BITS 12 -#else -#define TARGET_PAGE_BITS_VARY -#define TARGET_PAGE_BITS_MIN 12 -#endif -#define TCG_GUEST_DEFAULT_MO (0) +#define TARGET_INSN_START_EXTRA_WORDS 2 #endif diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 89655b1900f..1f6c41fd340 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -27,13 +27,14 @@ #include "internal.h" #include "kvm_mips.h" #include "qemu/module.h" -#include "sysemu/kvm.h" -#include "sysemu/qtest.h" -#include "exec/exec-all.h" +#include "system/kvm.h" +#include "system/qtest.h" #include "hw/qdev-properties.h" #include "hw/qdev-clock.h" -#include "semihosting/semihost.h" #include "fpu_helper.h" +#ifndef CONFIG_USER_ONLY +#include "semihosting/semihost.h" +#endif const char regnames[32][3] = { "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", @@ -132,6 +133,7 @@ static vaddr mips_cpu_get_pc(CPUState *cs) return cpu->env.active_tc.PC; } +#if !defined(CONFIG_USER_ONLY) static bool mips_cpu_has_work(CPUState *cs) { CPUMIPSState *env = cpu_env(cs); @@ -177,11 +179,7 @@ static bool mips_cpu_has_work(CPUState *cs) } return has_work; } - -static int mips_cpu_mmu_index(CPUState *cs, bool ifunc) -{ - return mips_env_mmu_index(cpu_env(cs)); -} +#endif /* !CONFIG_USER_ONLY */ #include "cpu-defs.c.inc" @@ -200,10 +198,8 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type) /* Reset registers to their default values */ env->CP0_PRid = env->cpu_model->CP0_PRid; - env->CP0_Config0 = env->cpu_model->CP0_Config0; -#if TARGET_BIG_ENDIAN - env->CP0_Config0 |= (1 << CP0C0_BE); -#endif + env->CP0_Config0 = deposit32(env->cpu_model->CP0_Config0, + CP0C0_BE, 1, cpu->is_big_endian); env->CP0_Config1 = env->cpu_model->CP0_Config1; env->CP0_Config2 = env->cpu_model->CP0_Config2; env->CP0_Config3 = env->cpu_model->CP0_Config3; @@ -409,18 +405,17 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type) } msa_reset(env); + fp_reset(env); compute_hflags(env); - restore_fp_status(env); restore_pamask(env); cs->exception_index = EXCP_NONE; +#ifndef CONFIG_USER_ONLY if (semihosting_get_argc()) { /* UHI interface can be used to obtain argc and argv */ env->active_tc.gpr[4] = -1; } - -#ifndef CONFIG_USER_ONLY if (kvm_enabled()) { kvm_mips_reset_vcpu(cpu); } @@ -430,13 +425,13 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type) static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info) { if (!(cpu_env(s)->insn_flags & ISA_NANOMIPS32)) { -#if TARGET_BIG_ENDIAN - info->print_insn = print_insn_big_mips; -#else - info->print_insn = print_insn_little_mips; -#endif + info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG + : BFD_ENDIAN_LITTLE; + info->print_insn = TARGET_BIG_ENDIAN ? print_insn_big_mips + : print_insn_little_mips; } else { info->print_insn = print_insn_nanomips; + info->endian = BFD_ENDIAN_LITTLE; } } @@ -536,26 +531,60 @@ static ObjectClass *mips_cpu_class_by_name(const char *cpu_model) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps mips_sysemu_ops = { + .has_work = mips_cpu_has_work, .get_phys_page_debug = mips_cpu_get_phys_page_debug, .legacy_vmsd = &vmstate_mips_cpu, }; #endif +static const Property mips_cpu_properties[] = { + DEFINE_PROP_BOOL("big-endian", MIPSCPU, is_big_endian, TARGET_BIG_ENDIAN), +}; + #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" -/* - * NB: cannot be const, as some elements are changed for specific - * mips hardware (see hw/mips/jazz.c). - */ +#include "accel/tcg/cpu-ops.h" + +static int mips_cpu_mmu_index(CPUState *cs, bool ifunc) +{ + return mips_env_mmu_index(cpu_env(cs)); +} + +static TCGTBCPUState mips_get_tb_cpu_state(CPUState *cs) +{ + CPUMIPSState *env = cpu_env(cs); + + return (TCGTBCPUState){ + .pc = env->active_tc.PC, + .flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | + MIPS_HFLAG_HWRENA_ULR), + }; +} + +#ifndef CONFIG_USER_ONLY +static vaddr mips_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return cpu_env(cs)->hflags & MIPS_HFLAG_AWRAP ? (int32_t)result : result; +} +#endif + static const TCGCPUOps mips_tcg_ops = { + .mttcg_supported = TARGET_LONG_BITS == 32, + .guest_default_memory_order = 0, + .initialize = mips_tcg_init, + .translate_code = mips_translate_code, + .get_tb_cpu_state = mips_get_tb_cpu_state, .synchronize_from_tb = mips_cpu_synchronize_from_tb, .restore_state_to_opc = mips_restore_state_to_opc, + .mmu_index = mips_cpu_mmu_index, #if !defined(CONFIG_USER_ONLY) .tlb_fill = mips_cpu_tlb_fill, + .pointer_wrap = mips_pointer_wrap, .cpu_exec_interrupt = mips_cpu_exec_interrupt, .cpu_exec_halt = mips_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = mips_cpu_do_interrupt, .do_transaction_failed = mips_cpu_do_transaction_failed, .do_unaligned_access = mips_cpu_do_unaligned_access, @@ -564,21 +593,20 @@ static const TCGCPUOps mips_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void mips_cpu_class_init(ObjectClass *c, void *data) +static void mips_cpu_class_init(ObjectClass *c, const void *data) { MIPSCPUClass *mcc = MIPS_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); DeviceClass *dc = DEVICE_CLASS(c); ResettableClass *rc = RESETTABLE_CLASS(c); + device_class_set_props(dc, mips_cpu_properties); device_class_set_parent_realize(dc, mips_cpu_realizefn, &mcc->parent_realize); resettable_class_set_parent_phases(rc, NULL, mips_cpu_reset_hold, NULL, &mcc->parent_phases); cc->class_by_name = mips_cpu_class_by_name; - cc->has_work = mips_cpu_has_work; - cc->mmu_index = mips_cpu_mmu_index; cc->dump_state = mips_cpu_dump_state; cc->set_pc = mips_cpu_set_pc; cc->get_pc = mips_cpu_get_pc; @@ -606,7 +634,7 @@ static const TypeInfo mips_cpu_type_info = { .class_init = mips_cpu_class_init, }; -static void mips_cpu_cpudef_class_init(ObjectClass *oc, void *data) +static void mips_cpu_cpudef_class_init(ObjectClass *oc, const void *data) { MIPSCPUClass *mcc = MIPS_CPU_CLASS(oc); mcc->cpu_def = data; @@ -619,10 +647,10 @@ static void mips_register_cpudef_type(const struct mips_def_t *def) .name = typename, .parent = TYPE_MIPS_CPU, .class_init = mips_cpu_cpudef_class_init, - .class_data = (void *)def, + .class_data = def, }; - type_register(&ti); + type_register_static(&ti); g_free(typename); } @@ -639,12 +667,15 @@ static void mips_cpu_register_types(void) type_init(mips_cpu_register_types) /* Could be used by generic CPU object */ -MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk) +MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk, + bool is_big_endian) { DeviceState *cpu; - cpu = DEVICE(object_new(cpu_type)); + cpu = qdev_new(cpu_type); qdev_connect_clock_in(cpu, "clk-in", cpu_refclk); + object_property_set_bool(OBJECT(cpu), "big-endian", is_big_endian, + &error_abort); qdev_realize(cpu, NULL, &error_abort); return MIPS_CPU(cpu); diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 3e906a175a3..5cd4c6c818d 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -2,9 +2,11 @@ #define MIPS_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #ifndef CONFIG_USER_ONLY -#include "exec/memory.h" +#include "system/memory.h" #endif #include "fpu/softfloat-types.h" #include "hw/clock.h" @@ -98,8 +100,6 @@ struct CPUMIPSFPUContext { #define FP_UNIMPLEMENTED 32 }; -#define TARGET_INSN_START_EXTRA_WORDS 2 - typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; struct CPUMIPSMVPContext { int32_t CP0_MVPControl; @@ -530,7 +530,6 @@ typedef struct CPUArchState { CPUMIPSFPUContext active_fpu; uint32_t current_tc; - uint32_t current_fpu; uint32_t SEGBITS; uint32_t PABITS; @@ -1209,6 +1208,9 @@ struct ArchCPU { Clock *clock; Clock *count_div; /* Divider for CP0_Count clock */ + + /* Properties */ + bool is_big_endian; }; /** @@ -1254,8 +1256,6 @@ static inline int mips_env_mmu_index(CPUMIPSState *env) return hflags_mmu_index(env->hflags); } -#include "exec/cpu-all.h" - /* Exceptions */ enum { EXCP_NONE = -1, @@ -1316,6 +1316,12 @@ bool cpu_type_supports_cps_smp(const char *cpu_type); bool cpu_supports_isa(const CPUMIPSState *env, uint64_t isa_mask); bool cpu_type_supports_isa(const char *cpu_type, uint64_t isa); +/* Check presence of MIPS-3D ASE */ +static inline bool ase_3d_available(const CPUMIPSState *env) +{ + return env->active_fpu.fcr0 & (1 << FCR0_3D); +} + /* Check presence of MSA implementation */ static inline bool ase_msa_available(CPUMIPSState *env) { @@ -1360,25 +1366,18 @@ void cpu_mips_clock_init(MIPSCPU *cpu); /* helper.c */ target_ulong exception_resume_pc(CPUMIPSState *env); -static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->active_tc.PC; - *cs_base = 0; - *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | - MIPS_HFLAG_HWRENA_ULR); -} - /** * mips_cpu_create_with_clock: * @typename: a MIPS CPU type. * @cpu_refclk: this cpu input clock (an output clock of another device) + * @is_big_endian: whether this CPU is configured in big endianness * * Instantiates a MIPS CPU, set the input clock of the CPU to @cpu_refclk, * then realizes the CPU. * * Returns: A #CPUState or %NULL if an error occurred. */ -MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk); +MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk, + bool is_big_endian); #endif /* MIPS_CPU_H */ diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h index ad1116e8c10..08fb4093904 100644 --- a/target/mips/fpu_helper.h +++ b/target/mips/fpu_helper.h @@ -28,6 +28,8 @@ static inline void restore_flush_mode(CPUMIPSState *env) static inline void restore_snan_bit_mode(CPUMIPSState *env) { bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008); + FloatInfZeroNaNRule izn_rule; + Float3NaNPropRule nan3_rule; /* * With nan2008, SNaNs are silenced in the usual way. @@ -35,6 +37,24 @@ static inline void restore_snan_bit_mode(CPUMIPSState *env) */ set_snan_bit_is_one(!nan2008, &env->active_fpu.fp_status); set_default_nan_mode(!nan2008, &env->active_fpu.fp_status); + /* + * For MIPS systems that conform to IEEE754-1985, the (inf,zero,nan) + * case sets InvalidOp and returns the default NaN. + * For MIPS systems that conform to IEEE754-2008, the (inf,zero,nan) + * case sets InvalidOp and returns the input value 'c'. + */ + izn_rule = nan2008 ? float_infzeronan_dnan_never : float_infzeronan_dnan_always; + set_float_infzeronan_rule(izn_rule, &env->active_fpu.fp_status); + nan3_rule = nan2008 ? float_3nan_prop_s_cab : float_3nan_prop_s_abc; + set_float_3nan_prop_rule(nan3_rule, &env->active_fpu.fp_status); + /* + * With nan2008, the default NaN value has the sign bit clear and the + * frac msb set; with the older mode, the sign bit is clear, and all + * frac bits except the msb are set. + */ + set_float_default_nan_pattern(nan2008 ? 0b01000000 : 0b00111111, + &env->active_fpu.fp_status); + } static inline void restore_fp_status(CPUMIPSState *env) @@ -44,6 +64,34 @@ static inline void restore_fp_status(CPUMIPSState *env) restore_snan_bit_mode(env); } +static inline void fp_reset(CPUMIPSState *env) +{ + restore_fp_status(env); + + /* + * According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, + &env->active_fpu.fp_status); + /* + * TODO: the spec does't say clearly whether FTZ happens before + * or after rounding for normal FPU operations. + */ + set_float_ftz_detection(float_ftz_before_rounding, + &env->active_fpu.fp_status); +} + /* MSA */ enum CPUMIPSMSADataFormat { diff --git a/target/mips/helper.h b/target/mips/helper.h index 0f8462febb5..b6cd53c8538 100644 --- a/target/mips/helper.h +++ b/target/mips/helper.h @@ -21,6 +21,8 @@ DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) #endif +DEF_HELPER_3(crc32, tl, tl, tl, i32) +DEF_HELPER_3(crc32c, tl, tl, tl, i32) DEF_HELPER_FLAGS_4(rotx, TCG_CALL_NO_RWG_SE, tl, tl, i32, i32, i32) /* microMIPS functions */ @@ -594,7 +596,7 @@ DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env) DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) #ifndef CONFIG_USER_ONLY -#include "tcg/sysemu_helper.h.inc" +#include "tcg/system_helper.h.inc" #endif /* !CONFIG_USER_ONLY */ #include "tcg/msa_helper.h.inc" diff --git a/target/mips/internal.h b/target/mips/internal.h index a9a22ea00ec..28eb28936ba 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -162,8 +162,6 @@ void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); extern const VMStateDescription vmstate_mips_cpu; -#endif /* !CONFIG_USER_ONLY */ - static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) { return (env->CP0_Status & (1 << CP0St_IE)) && @@ -206,6 +204,8 @@ static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) return r; } +#endif /* !CONFIG_USER_ONLY */ + void msa_reset(CPUMIPSState *env); /* cp0_timer.c */ @@ -225,6 +225,16 @@ static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value) } } +static inline bool mips_env_is_bigendian(CPUMIPSState *env) +{ + return extract32(env->CP0_Config0, CP0C0_BE, 1); +} + +static inline MemOp mo_endian_env(CPUMIPSState *env) +{ + return mips_env_is_bigendian(env) ? MO_BE : MO_LE; +} + static inline void restore_pamask(CPUMIPSState *env) { if (env->hflags & MIPS_HFLAG_ELPA) { diff --git a/target/mips/kvm.c b/target/mips/kvm.c index a631ab544f5..ec53acb51a1 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -18,9 +18,9 @@ #include "internal.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" -#include "sysemu/runstate.h" +#include "system/kvm.h" +#include "system/kvm_int.h" +#include "system/runstate.h" #include "kvm_mips.h" #include "hw/boards.h" #include "fpu_helper.h" @@ -61,6 +61,11 @@ int kvm_arch_irqchip_create(KVMState *s) return 0; } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { CPUMIPSState *env = cpu_env(cs); @@ -1172,7 +1177,7 @@ static int kvm_mips_get_cp0_registers(CPUState *cs) return ret; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { CPUMIPSState *env = cpu_env(cs); struct kvm_regs regs; @@ -1207,7 +1212,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { CPUMIPSState *env = cpu_env(cs); int ret = 0; diff --git a/target/mips/meson.build b/target/mips/meson.build index a26d1e1f792..abf0ce3e8b9 100644 --- a/target/mips/meson.build +++ b/target/mips/meson.build @@ -7,9 +7,10 @@ mips_ss.add(files( 'gdbstub.c', 'msa.c', )) +mips_ss.add(zlib) if have_system - subdir('sysemu') + subdir('system') endif if 'CONFIG_TCG' in config_all_accel diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h index a6cebe0265c..9d4d292586c 100644 --- a/target/mips/mips-defs.h +++ b/target/mips/mips-defs.h @@ -26,12 +26,10 @@ * bits 24-39: MIPS ASEs */ #define ASE_MIPS16 0x0000000001000000ULL -#define ASE_MIPS3D 0x0000000002000000ULL #define ASE_MDMX 0x0000000004000000ULL #define ASE_DSP 0x0000000008000000ULL #define ASE_DSP_R2 0x0000000010000000ULL #define ASE_DSP_R3 0x0000000020000000ULL -#define ASE_MT 0x0000000040000000ULL #define ASE_SMARTMIPS 0x0000000080000000ULL #define ASE_MICROMIPS 0x0000000100000000ULL /* diff --git a/target/mips/msa.c b/target/mips/msa.c index 61f1a9a5936..32c6acbcc56 100644 --- a/target/mips/msa.c +++ b/target/mips/msa.c @@ -48,6 +48,35 @@ void msa_reset(CPUMIPSState *env) /* tininess detected after rounding.*/ set_float_detect_tininess(float_tininess_after_rounding, &env->active_tc.msa_fp_status); + /* + * MSACSR.FS detects tiny results to flush to zero before rounding + * (per "MIPS Architecture for Programmers Volume IV-j: The MIPS64 SIMD + * Architecture Module, Revision 1.1" section 3.5.4), even though it + * detects tininess after rounding for underflow purposes (section 3.4.2 + * table 3.3). + */ + set_float_ftz_detection(float_ftz_before_rounding, + &env->active_tc.msa_fp_status); + + /* + * According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, + &env->active_tc.msa_fp_status); + + set_float_3nan_prop_rule(float_3nan_prop_s_cab, + &env->active_tc.msa_fp_status); /* clear float_status exception flags */ set_float_exception_flags(0, &env->active_tc.msa_fp_status); @@ -57,4 +86,11 @@ void msa_reset(CPUMIPSState *env) /* set proper signanling bit meaning ("1" means "quiet") */ set_snan_bit_is_one(0, &env->active_tc.msa_fp_status); + + /* Inf * 0 + NaN returns the input NaN */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, + &env->active_tc.msa_fp_status); + /* Default NaN: sign bit clear, frac msb set */ + set_float_default_nan_pattern(0b01000000, + &env->active_tc.msa_fp_status); } diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c deleted file mode 100644 index 213fd637fcb..00000000000 --- a/target/mips/sysemu/machine.c +++ /dev/null @@ -1,333 +0,0 @@ -#include "qemu/osdep.h" -#include "cpu.h" -#include "internal.h" -#include "migration/cpu.h" -#include "fpu_helper.h" - -static int cpu_post_load(void *opaque, int version_id) -{ - MIPSCPU *cpu = opaque; - CPUMIPSState *env = &cpu->env; - - restore_fp_status(env); - restore_msa_fp_status(env); - compute_hflags(env); - restore_pamask(env); - - return 0; -} - -/* FPU state */ - -static int get_fpr(QEMUFile *f, void *pv, size_t size, - const VMStateField *field) -{ - int i; - fpr_t *v = pv; - /* Restore entire MSA vector register */ - for (i = 0; i < MSA_WRLEN / 64; i++) { - qemu_get_sbe64s(f, &v->wr.d[i]); - } - return 0; -} - -static int put_fpr(QEMUFile *f, void *pv, size_t size, - const VMStateField *field, JSONWriter *vmdesc) -{ - int i; - fpr_t *v = pv; - /* Save entire MSA vector register */ - for (i = 0; i < MSA_WRLEN / 64; i++) { - qemu_put_sbe64s(f, &v->wr.d[i]); - } - - return 0; -} - -static const VMStateInfo vmstate_info_fpr = { - .name = "fpr", - .get = get_fpr, - .put = put_fpr, -}; - -#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t) - -#define VMSTATE_FPR_ARRAY(_f, _s, _n) \ - VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) - -static const VMStateField vmstate_fpu_fields[] = { - VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32), - VMSTATE_UINT32(fcr0, CPUMIPSFPUContext), - VMSTATE_UINT32(fcr31, CPUMIPSFPUContext), - VMSTATE_END_OF_LIST() -}; - -static const VMStateDescription vmstate_fpu = { - .name = "cpu/fpu", - .version_id = 1, - .minimum_version_id = 1, - .fields = vmstate_fpu_fields -}; - -static const VMStateDescription vmstate_inactive_fpu = { - .name = "cpu/inactive_fpu", - .version_id = 1, - .minimum_version_id = 1, - .fields = vmstate_fpu_fields -}; - -/* TC state */ - -static const VMStateField vmstate_tc_fields[] = { - VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), -#if defined(TARGET_MIPS64) - VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32), -#endif /* TARGET_MIPS64 */ - VMSTATE_UINTTL(PC, TCState), - VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC), - VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC), - VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC), - VMSTATE_UINTTL(DSPControl, TCState), - VMSTATE_INT32(CP0_TCStatus, TCState), - VMSTATE_INT32(CP0_TCBind, TCState), - VMSTATE_UINTTL(CP0_TCHalt, TCState), - VMSTATE_UINTTL(CP0_TCContext, TCState), - VMSTATE_UINTTL(CP0_TCSchedule, TCState), - VMSTATE_UINTTL(CP0_TCScheFBack, TCState), - VMSTATE_INT32(CP0_Debug_tcstatus, TCState), - VMSTATE_UINTTL(CP0_UserLocal, TCState), - VMSTATE_INT32(msacsr, TCState), - VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1), - VMSTATE_UINTTL(mxu_cr, TCState), - VMSTATE_END_OF_LIST() -}; - -static const VMStateDescription vmstate_tc = { - .name = "cpu/tc", - .version_id = 2, - .minimum_version_id = 2, - .fields = vmstate_tc_fields -}; - -static const VMStateDescription vmstate_inactive_tc = { - .name = "cpu/inactive_tc", - .version_id = 2, - .minimum_version_id = 2, - .fields = vmstate_tc_fields -}; - -/* MVP state */ - -static const VMStateDescription vmstate_mvp = { - .name = "cpu/mvp", - .version_id = 1, - .minimum_version_id = 1, - .fields = (const VMStateField[]) { - VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext), - VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext), - VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext), - VMSTATE_END_OF_LIST() - } -}; - -/* TLB state */ - -static int get_tlb(QEMUFile *f, void *pv, size_t size, - const VMStateField *field) -{ - r4k_tlb_t *v = pv; - uint16_t flags; - - qemu_get_betls(f, &v->VPN); - qemu_get_be32s(f, &v->PageMask); - qemu_get_be16s(f, &v->ASID); - qemu_get_be16s(f, &flags); - v->G = (flags >> 10) & 1; - v->C0 = (flags >> 7) & 3; - v->C1 = (flags >> 4) & 3; - v->V0 = (flags >> 3) & 1; - v->V1 = (flags >> 2) & 1; - v->D0 = (flags >> 1) & 1; - v->D1 = (flags >> 0) & 1; - v->EHINV = (flags >> 15) & 1; - v->RI1 = (flags >> 14) & 1; - v->RI0 = (flags >> 13) & 1; - v->XI1 = (flags >> 12) & 1; - v->XI0 = (flags >> 11) & 1; - qemu_get_be64s(f, &v->PFN[0]); - qemu_get_be64s(f, &v->PFN[1]); - - return 0; -} - -static int put_tlb(QEMUFile *f, void *pv, size_t size, - const VMStateField *field, JSONWriter *vmdesc) -{ - r4k_tlb_t *v = pv; - - uint16_t asid = v->ASID; - uint16_t flags = ((v->EHINV << 15) | - (v->RI1 << 14) | - (v->RI0 << 13) | - (v->XI1 << 12) | - (v->XI0 << 11) | - (v->G << 10) | - (v->C0 << 7) | - (v->C1 << 4) | - (v->V0 << 3) | - (v->V1 << 2) | - (v->D0 << 1) | - (v->D1 << 0)); - - qemu_put_betls(f, &v->VPN); - qemu_put_be32s(f, &v->PageMask); - qemu_put_be16s(f, &asid); - qemu_put_be16s(f, &flags); - qemu_put_be64s(f, &v->PFN[0]); - qemu_put_be64s(f, &v->PFN[1]); - - return 0; -} - -static const VMStateInfo vmstate_info_tlb = { - .name = "tlb_entry", - .get = get_tlb, - .put = put_tlb, -}; - -#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t) - -#define VMSTATE_TLB_ARRAY(_f, _s, _n) \ - VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0) - -static const VMStateDescription vmstate_tlb = { - .name = "cpu/tlb", - .version_id = 2, - .minimum_version_id = 2, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext), - VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext), - VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX), - VMSTATE_END_OF_LIST() - } -}; - -/* MIPS CPU state */ - -const VMStateDescription vmstate_mips_cpu = { - .name = "cpu", - .version_id = 21, - .minimum_version_id = 21, - .post_load = cpu_post_load, - .fields = (const VMStateField[]) { - /* Active TC */ - VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState), - - /* Active FPU */ - VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu, - CPUMIPSFPUContext), - - /* MVP */ - VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp, - CPUMIPSMVPContext), - - /* TLB */ - VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb, - CPUMIPSTLBContext), - - /* CPU metastate */ - VMSTATE_UINT32(env.current_tc, MIPSCPU), - VMSTATE_UINT32(env.current_fpu, MIPSCPU), - VMSTATE_INT32(env.error_code, MIPSCPU), - VMSTATE_UINTTL(env.btarget, MIPSCPU), - VMSTATE_UINTTL(env.bcond, MIPSCPU), - - /* Remaining CP0 registers */ - VMSTATE_INT32(env.CP0_Index, MIPSCPU), - VMSTATE_INT32(env.CP0_VPControl, MIPSCPU), - VMSTATE_INT32(env.CP0_Random, MIPSCPU), - VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU), - VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU), - VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU), - VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU), - VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU), - VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU), - VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU), - VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU), - VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU), - VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU), - VMSTATE_UINTTL(env.CP0_Context, MIPSCPU), - VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU), - VMSTATE_INT32(env.CP0_PageMask, MIPSCPU), - VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU), - VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU), - VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU), - VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU), - VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU), - VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU), - VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU), - VMSTATE_INT32(env.CP0_Wired, MIPSCPU), - VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU), - VMSTATE_INT32(env.CP0_HWREna, MIPSCPU), - VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU), - VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU), - VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU), - VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU), - VMSTATE_INT32(env.CP0_Count, MIPSCPU), - VMSTATE_UNUSED(sizeof(uint32_t)), /* was CP0_SAARI */ - VMSTATE_UNUSED(2 * sizeof(uint64_t)), /* was CP0_SAAR[2] */ - VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU), - VMSTATE_INT32(env.CP0_Compare, MIPSCPU), - VMSTATE_INT32(env.CP0_Status, MIPSCPU), - VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU), - VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU), - VMSTATE_INT32(env.CP0_Cause, MIPSCPU), - VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU), - VMSTATE_INT32(env.CP0_PRid, MIPSCPU), - VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU), - VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU), - VMSTATE_INT32(env.CP0_Config0, MIPSCPU), - VMSTATE_INT32(env.CP0_Config1, MIPSCPU), - VMSTATE_INT32(env.CP0_Config2, MIPSCPU), - VMSTATE_INT32(env.CP0_Config3, MIPSCPU), - VMSTATE_INT32(env.CP0_Config4, MIPSCPU), - VMSTATE_INT32(env.CP0_Config5, MIPSCPU), - VMSTATE_INT32(env.CP0_Config6, MIPSCPU), - VMSTATE_INT32(env.CP0_Config7, MIPSCPU), - VMSTATE_UINT64(env.CP0_LLAddr, MIPSCPU), - VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX), - VMSTATE_INT32(env.CP0_MAARI, MIPSCPU), - VMSTATE_UINTTL(env.lladdr, MIPSCPU), - VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8), - VMSTATE_UINT64_ARRAY(env.CP0_WatchHi, MIPSCPU, 8), - VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU), - VMSTATE_INT32(env.CP0_Framemask, MIPSCPU), - VMSTATE_INT32(env.CP0_Debug, MIPSCPU), - VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU), - VMSTATE_INT32(env.CP0_Performance0, MIPSCPU), - VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU), - VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU), - VMSTATE_INT32(env.CP0_DataLo, MIPSCPU), - VMSTATE_INT32(env.CP0_TagHi, MIPSCPU), - VMSTATE_INT32(env.CP0_DataHi, MIPSCPU), - VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU), - VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU), - VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM), - - /* Inactive TC */ - VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1, - vmstate_inactive_tc, TCState), - VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1, - vmstate_inactive_fpu, CPUMIPSFPUContext), - - VMSTATE_END_OF_LIST() - }, -}; diff --git a/target/mips/sysemu/mips-qmp-cmds.c b/target/mips/sysemu/mips-qmp-cmds.c deleted file mode 100644 index 7340ac70ba0..00000000000 --- a/target/mips/sysemu/mips-qmp-cmds.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * QEMU MIPS CPU (monitor definitions) - * - * SPDX-FileCopyrightText: 2012 SUSE LINUX Products GmbH - * - * SPDX-License-Identifier: LGPL-2.1-or-later - */ - -#include "qemu/osdep.h" -#include "qapi/qapi-commands-machine-target.h" -#include "cpu.h" - -static void mips_cpu_add_definition(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CpuDefinitionInfoList **cpu_list = user_data; - CpuDefinitionInfo *info; - const char *typename; - - typename = object_class_get_name(oc); - info = g_malloc0(sizeof(*info)); - info->name = cpu_model_from_type(typename); - info->q_typename = g_strdup(typename); - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - CpuDefinitionInfoList *cpu_list = NULL; - GSList *list; - - list = object_class_get_list(TYPE_MIPS_CPU, false); - g_slist_foreach(list, mips_cpu_add_definition, &cpu_list); - g_slist_free(list); - - return cpu_list; -} diff --git a/target/mips/sysemu/addr.c b/target/mips/system/addr.c similarity index 100% rename from target/mips/sysemu/addr.c rename to target/mips/system/addr.c diff --git a/target/mips/sysemu/cp0.c b/target/mips/system/cp0.c similarity index 99% rename from target/mips/sysemu/cp0.c rename to target/mips/system/cp0.c index bae37f515bf..ff7d3db00c7 100644 --- a/target/mips/sysemu/cp0.c +++ b/target/mips/system/cp0.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" /* Called for updates to CP0_Status. */ void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) diff --git a/target/mips/sysemu/cp0_timer.c b/target/mips/system/cp0_timer.c similarity index 99% rename from target/mips/sysemu/cp0_timer.c rename to target/mips/system/cp0_timer.c index 62de502caa5..ca16945cee1 100644 --- a/target/mips/sysemu/cp0_timer.c +++ b/target/mips/system/cp0_timer.c @@ -23,7 +23,7 @@ #include "qemu/osdep.h" #include "hw/irq.h" #include "qemu/timer.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "internal.h" /* MIPS R4K timer */ diff --git a/target/mips/system/machine.c b/target/mips/system/machine.c new file mode 100644 index 00000000000..8af11fd896b --- /dev/null +++ b/target/mips/system/machine.c @@ -0,0 +1,336 @@ +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "migration/cpu.h" +#include "fpu_helper.h" + +static int cpu_post_load(void *opaque, int version_id) +{ + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + + restore_fp_status(env); + restore_msa_fp_status(env); + compute_hflags(env); + restore_pamask(env); + + return 0; +} + +/* FPU state */ + +static int get_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + int i; + fpr_t *v = pv; + /* Restore entire MSA vector register */ + for (i = 0; i < MSA_WRLEN / 64; i++) { + qemu_get_sbe64s(f, &v->wr.d[i]); + } + return 0; +} + +static int put_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + int i; + fpr_t *v = pv; + /* Save entire MSA vector register */ + for (i = 0; i < MSA_WRLEN / 64; i++) { + qemu_put_sbe64s(f, &v->wr.d[i]); + } + + return 0; +} + +static const VMStateInfo vmstate_info_fpr = { + .name = "fpr", + .get = get_fpr, + .put = put_fpr, +}; + +#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t) + +#define VMSTATE_FPR_ARRAY(_f, _s, _n) \ + VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) + +static const VMStateField vmstate_fpu_fields[] = { + VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32), + VMSTATE_UINT32(fcr0, CPUMIPSFPUContext), + VMSTATE_UINT32(fcr31, CPUMIPSFPUContext), + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_fpu = { + .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_fpu_fields +}; + +static const VMStateDescription vmstate_inactive_fpu = { + .name = "cpu/inactive_fpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_fpu_fields +}; + +/* TC state */ + +static const VMStateField vmstate_tc_fields[] = { + VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), +#if defined(TARGET_MIPS64) + VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32), +#endif /* TARGET_MIPS64 */ + VMSTATE_UINTTL(PC, TCState), + VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC), + VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC), + VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC), + VMSTATE_UINTTL(DSPControl, TCState), + VMSTATE_INT32(CP0_TCStatus, TCState), + VMSTATE_INT32(CP0_TCBind, TCState), + VMSTATE_UINTTL(CP0_TCHalt, TCState), + VMSTATE_UINTTL(CP0_TCContext, TCState), + VMSTATE_UINTTL(CP0_TCSchedule, TCState), + VMSTATE_UINTTL(CP0_TCScheFBack, TCState), + VMSTATE_INT32(CP0_Debug_tcstatus, TCState), + VMSTATE_UINTTL(CP0_UserLocal, TCState), + VMSTATE_INT32(msacsr, TCState), + VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1), + VMSTATE_UINTTL(mxu_cr, TCState), + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_tc = { + .name = "cpu/tc", + .version_id = 2, + .minimum_version_id = 2, + .fields = vmstate_tc_fields +}; + +static const VMStateDescription vmstate_inactive_tc = { + .name = "cpu/inactive_tc", + .version_id = 2, + .minimum_version_id = 2, + .fields = vmstate_tc_fields +}; + +/* MVP state */ + +static const VMStateDescription vmstate_mvp = { + .name = "cpu/mvp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext), + VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext), + VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext), + VMSTATE_END_OF_LIST() + } +}; + +/* TLB state */ + +static int get_tlb(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + r4k_tlb_t *v = pv; + uint16_t flags; + + qemu_get_betls(f, &v->VPN); + qemu_get_be32s(f, &v->PageMask); + qemu_get_be16s(f, &v->ASID); + qemu_get_be32s(f, &v->MMID); + qemu_get_be16s(f, &flags); + v->G = (flags >> 10) & 1; + v->C0 = (flags >> 7) & 3; + v->C1 = (flags >> 4) & 3; + v->V0 = (flags >> 3) & 1; + v->V1 = (flags >> 2) & 1; + v->D0 = (flags >> 1) & 1; + v->D1 = (flags >> 0) & 1; + v->EHINV = (flags >> 15) & 1; + v->RI1 = (flags >> 14) & 1; + v->RI0 = (flags >> 13) & 1; + v->XI1 = (flags >> 12) & 1; + v->XI0 = (flags >> 11) & 1; + qemu_get_be64s(f, &v->PFN[0]); + qemu_get_be64s(f, &v->PFN[1]); + + return 0; +} + +static int put_tlb(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + r4k_tlb_t *v = pv; + + uint16_t asid = v->ASID; + uint32_t mmid = v->MMID; + uint16_t flags = ((v->EHINV << 15) | + (v->RI1 << 14) | + (v->RI0 << 13) | + (v->XI1 << 12) | + (v->XI0 << 11) | + (v->G << 10) | + (v->C0 << 7) | + (v->C1 << 4) | + (v->V0 << 3) | + (v->V1 << 2) | + (v->D0 << 1) | + (v->D1 << 0)); + + qemu_put_betls(f, &v->VPN); + qemu_put_be32s(f, &v->PageMask); + qemu_put_be16s(f, &asid); + qemu_put_be32s(f, &mmid); + qemu_put_be16s(f, &flags); + qemu_put_be64s(f, &v->PFN[0]); + qemu_put_be64s(f, &v->PFN[1]); + + return 0; +} + +static const VMStateInfo vmstate_info_tlb = { + .name = "tlb_entry", + .get = get_tlb, + .put = put_tlb, +}; + +#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t) + +#define VMSTATE_TLB_ARRAY(_f, _s, _n) \ + VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0) + +static const VMStateDescription vmstate_tlb = { + .name = "cpu/tlb", + .version_id = 3, + .minimum_version_id = 3, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext), + VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext), + VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX), + VMSTATE_END_OF_LIST() + } +}; + +/* MIPS CPU state */ + +const VMStateDescription vmstate_mips_cpu = { + .name = "cpu", + .version_id = 21, + .minimum_version_id = 21, + .post_load = cpu_post_load, + .fields = (const VMStateField[]) { + /* Active TC */ + VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState), + + /* Active FPU */ + VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu, + CPUMIPSFPUContext), + + /* MVP */ + VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp, + CPUMIPSMVPContext), + + /* TLB */ + VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb, + CPUMIPSTLBContext), + + /* CPU metastate */ + VMSTATE_UINT32(env.current_tc, MIPSCPU), + VMSTATE_UNUSED(sizeof(uint32_t)), /* was current_fpu */ + VMSTATE_INT32(env.error_code, MIPSCPU), + VMSTATE_UINTTL(env.btarget, MIPSCPU), + VMSTATE_UINTTL(env.bcond, MIPSCPU), + + /* Remaining CP0 registers */ + VMSTATE_INT32(env.CP0_Index, MIPSCPU), + VMSTATE_INT32(env.CP0_VPControl, MIPSCPU), + VMSTATE_INT32(env.CP0_Random, MIPSCPU), + VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU), + VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU), + VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU), + VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU), + VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU), + VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU), + VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU), + VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU), + VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU), + VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU), + VMSTATE_UINTTL(env.CP0_Context, MIPSCPU), + VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU), + VMSTATE_INT32(env.CP0_PageMask, MIPSCPU), + VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU), + VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU), + VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU), + VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU), + VMSTATE_INT32(env.CP0_Wired, MIPSCPU), + VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU), + VMSTATE_INT32(env.CP0_HWREna, MIPSCPU), + VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU), + VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU), + VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU), + VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU), + VMSTATE_INT32(env.CP0_Count, MIPSCPU), + VMSTATE_UNUSED(sizeof(uint32_t)), /* was CP0_SAARI */ + VMSTATE_UNUSED(2 * sizeof(uint64_t)), /* was CP0_SAAR[2] */ + VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU), + VMSTATE_INT32(env.CP0_Compare, MIPSCPU), + VMSTATE_INT32(env.CP0_Status, MIPSCPU), + VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU), + VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU), + VMSTATE_INT32(env.CP0_Cause, MIPSCPU), + VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU), + VMSTATE_INT32(env.CP0_PRid, MIPSCPU), + VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU), + VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU), + VMSTATE_INT32(env.CP0_Config0, MIPSCPU), + VMSTATE_INT32(env.CP0_Config1, MIPSCPU), + VMSTATE_INT32(env.CP0_Config2, MIPSCPU), + VMSTATE_INT32(env.CP0_Config3, MIPSCPU), + VMSTATE_INT32(env.CP0_Config4, MIPSCPU), + VMSTATE_INT32(env.CP0_Config5, MIPSCPU), + VMSTATE_INT32(env.CP0_Config6, MIPSCPU), + VMSTATE_INT32(env.CP0_Config7, MIPSCPU), + VMSTATE_UINT64(env.CP0_LLAddr, MIPSCPU), + VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX), + VMSTATE_INT32(env.CP0_MAARI, MIPSCPU), + VMSTATE_UINTTL(env.lladdr, MIPSCPU), + VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8), + VMSTATE_UINT64_ARRAY(env.CP0_WatchHi, MIPSCPU, 8), + VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU), + VMSTATE_INT32(env.CP0_Framemask, MIPSCPU), + VMSTATE_INT32(env.CP0_Debug, MIPSCPU), + VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU), + VMSTATE_INT32(env.CP0_Performance0, MIPSCPU), + VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU), + VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU), + VMSTATE_INT32(env.CP0_DataLo, MIPSCPU), + VMSTATE_INT32(env.CP0_TagHi, MIPSCPU), + VMSTATE_INT32(env.CP0_DataHi, MIPSCPU), + VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU), + VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU), + VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM), + + /* Inactive TC */ + VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1, + vmstate_inactive_tc, TCState), + VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1, + vmstate_inactive_fpu, CPUMIPSFPUContext), + + VMSTATE_END_OF_LIST() + }, +}; diff --git a/target/mips/sysemu/meson.build b/target/mips/system/meson.build similarity index 100% rename from target/mips/sysemu/meson.build rename to target/mips/system/meson.build diff --git a/target/mips/system/mips-qmp-cmds.c b/target/mips/system/mips-qmp-cmds.c new file mode 100644 index 00000000000..b6a2874f2dd --- /dev/null +++ b/target/mips/system/mips-qmp-cmds.c @@ -0,0 +1,49 @@ +/* + * QEMU MIPS CPU (monitor definitions) + * + * SPDX-FileCopyrightText: 2012 SUSE LINUX Products GmbH + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/target-info.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "cpu.h" + +CpuModelExpansionInfo * +qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + error_setg(errp, "CPU model expansion is not supported on this target"); + return NULL; +} + +static void mips_cpu_add_definition(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CpuDefinitionInfoList **cpu_list = user_data; + CpuDefinitionInfo *info; + const char *typename; + + typename = object_class_get_name(oc); + info = g_malloc0(sizeof(*info)); + info->name = cpu_model_from_type(typename); + info->q_typename = g_strdup(typename); + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + GSList *list; + + list = object_class_get_list(target_cpu_type(), false); + g_slist_foreach(list, mips_cpu_add_definition, &cpu_list); + g_slist_free(list); + + return cpu_list; +} diff --git a/target/mips/sysemu/physaddr.c b/target/mips/system/physaddr.c similarity index 99% rename from target/mips/sysemu/physaddr.c rename to target/mips/system/physaddr.c index 505781d84c1..b8e1a5ac98e 100644 --- a/target/mips/sysemu/physaddr.c +++ b/target/mips/system/physaddr.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #include "../internal.h" diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c index 4886d087b2e..d32bcebf469 100644 --- a/target/mips/tcg/exception.c +++ b/target/mips/tcg/exception.c @@ -23,7 +23,7 @@ #include "cpu.h" #include "internal.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" target_ulong exception_resume_pc(CPUMIPSState *env) { diff --git a/target/mips/tcg/fpu_helper.c b/target/mips/tcg/fpu_helper.c index 45d593de489..36af9808025 100644 --- a/target/mips/tcg/fpu_helper.c +++ b/target/mips/tcg/fpu_helper.c @@ -24,7 +24,6 @@ #include "cpu.h" #include "internal.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" #include "fpu/softfloat.h" #include "fpu_helper.h" diff --git a/target/mips/tcg/godson2.decode b/target/mips/tcg/godson2.decode new file mode 100644 index 00000000000..25b396b6822 --- /dev/null +++ b/target/mips/tcg/godson2.decode @@ -0,0 +1,27 @@ +# Godson2 64-bit Integer instructions +# +# Copyright (C) 2021 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: +# Godson-2E Software Manual +# (Document Number: godson2e-user-manual-V0.6) +# + +&muldiv rs rt rd + +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv + +MULTu_G 011111 ..... ..... ..... 00000 01100- @rs_rt_rd +DMULTu_G 011111 ..... ..... ..... 00000 01110- @rs_rt_rd + +DIV_G 011111 ..... ..... ..... 00000 011010 @rs_rt_rd +DIVU_G 011111 ..... ..... ..... 00000 011011 @rs_rt_rd +DDIV_G 011111 ..... ..... ..... 00000 011110 @rs_rt_rd +DDIVU_G 011111 ..... ..... ..... 00000 011111 @rs_rt_rd + +MOD_G 011111 ..... ..... ..... 00000 100010 @rs_rt_rd +MODU_G 011111 ..... ..... ..... 00000 100011 @rs_rt_rd +DMOD_G 011111 ..... ..... ..... 00000 100110 @rs_rt_rd +DMODU_G 011111 ..... ..... ..... 00000 100111 @rs_rt_rd diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c index 97056d00a27..10319bf03a6 100644 --- a/target/mips/tcg/ldst_helper.c +++ b/target/mips/tcg/ldst_helper.c @@ -23,8 +23,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/memop.h" #include "internal.h" @@ -53,11 +52,6 @@ HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) #endif /* !CONFIG_USER_ONLY */ -static inline bool cpu_is_bigendian(CPUMIPSState *env) -{ - return extract32(env->CP0_Config0, CP0C0_BE, 1); -} - static inline target_ulong get_lmask(CPUMIPSState *env, target_ulong value, unsigned bits) { @@ -65,7 +59,7 @@ static inline target_ulong get_lmask(CPUMIPSState *env, value &= mask; - if (!cpu_is_bigendian(env)) { + if (!mips_env_is_bigendian(env)) { value ^= mask; } @@ -76,7 +70,7 @@ void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 32); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); @@ -100,7 +94,7 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 32); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); @@ -130,7 +124,7 @@ void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 64); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); @@ -174,7 +168,7 @@ void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 64); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); diff --git a/target/mips/tcg/loong-ext.decode b/target/mips/tcg/loong-ext.decode new file mode 100644 index 00000000000..b43979d0ef5 --- /dev/null +++ b/target/mips/tcg/loong-ext.decode @@ -0,0 +1,28 @@ +# Loongson 64-bit Extension instructions +# +# Copyright (C) 2021 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: +# STLS2F01 User Manual +# Appendix A: new integer instructions +# (Document Number: UM0447) +# + +&muldiv rs rt rd !extern + +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv + +MULTu_G 011100 ..... ..... ..... 00000 0100-0 @rs_rt_rd +DMULTu_G 011100 ..... ..... ..... 00000 0100-1 @rs_rt_rd + +DIV_G 011100 ..... ..... ..... 00000 010100 @rs_rt_rd +DDIV_G 011100 ..... ..... ..... 00000 010101 @rs_rt_rd +DIVU_G 011100 ..... ..... ..... 00000 010110 @rs_rt_rd +DDIVU_G 011100 ..... ..... ..... 00000 010111 @rs_rt_rd + +MOD_G 011100 ..... ..... ..... 00000 011100 @rs_rt_rd +DMOD_G 011100 ..... ..... ..... 00000 011101 @rs_rt_rd +MODU_G 011100 ..... ..... ..... 00000 011110 @rs_rt_rd +DMODU_G 011100 ..... ..... ..... 00000 011111 @rs_rt_rd diff --git a/target/mips/tcg/loong_translate.c b/target/mips/tcg/loong_translate.c new file mode 100644 index 00000000000..7d74cc34f8a --- /dev/null +++ b/target/mips/tcg/loong_translate.c @@ -0,0 +1,271 @@ +/* + * MIPS Loongson 64-bit translation routines + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2011 Richard Henderson + * Copyright (c) 2021 Philippe Mathieu-Daudé + * + * This code is licensed under the GNU GPLv2 and later. + */ + +#include "qemu/osdep.h" +#include "translate.h" + +/* Include the auto-generated decoder. */ +#include "decode-godson2.c.inc" +#include "decode-loong-ext.c.inc" + +/* + * Word or double-word Fixed-point instructions. + * --------------------------------------------- + * + * Fixed-point multiplies and divisions write only + * one result into general-purpose registers. + */ + +static bool gen_lext_DIV_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2, *l3; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + l3 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32s_tl(t0, t0); + tcg_gen_ext32s_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l3); + gen_set_label(l1); + + tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2); + tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); + tcg_gen_mov_tl(cpu_gpr[rd], t0); + + tcg_gen_br(l3); + gen_set_label(l2); + tcg_gen_div_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l3); + + return true; +} + +static bool trans_DIV_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DDIV_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_DIVU_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l2); + + return true; +} + +static bool trans_DIVU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DDIVU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MOD_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2, *l3; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + l3 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2); + tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); + gen_set_label(l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l3); + gen_set_label(l2); + tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l3); + + return true; +} + +static bool trans_MOD_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMOD_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MODU_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l2); + + return true; +} + +static bool trans_MODU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMODU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MULT_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + + return true; +} + +static bool trans_MULTu_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMULTu_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, true); +} + +bool decode_ext_loongson(DisasContext *ctx, uint32_t insn) +{ + if (!decode_64bit_enabled(ctx)) { + return false; + } + if ((ctx->insn_flags & INSN_LOONGSON2E) && decode_godson2(ctx, ctx->opcode)) { + return true; + } + if ((ctx->insn_flags & ASE_LEXT) && decode_loong_ext(ctx, ctx->opcode)) { + return true; + } + return false; +} diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build index ea7fb582f2a..fff9cd6c7f5 100644 --- a/target/mips/tcg/meson.build +++ b/target/mips/tcg/meson.build @@ -5,6 +5,8 @@ gen = [ decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'), decodetree.process('octeon.decode', extra_args: '--decode=decode_ext_octeon'), decodetree.process('lcsr.decode', extra_args: '--decode=decode_ase_lcsr'), + decodetree.process('godson2.decode', extra_args: ['--static-decode=decode_godson2']), + decodetree.process('loong-ext.decode', extra_args: ['--static-decode=decode_loong_ext']), ] mips_ss.add(gen) @@ -28,10 +30,11 @@ mips_ss.add(when: 'TARGET_MIPS64', if_true: files( 'tx79_translate.c', 'octeon_translate.c', 'lcsr_translate.c', + 'loong_translate.c', ), if_false: files( 'mxu_translate.c', )) if have_system - subdir('sysemu') + subdir('system') endif diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index 75108317019..8fda7c8a214 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -977,23 +977,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); - tcg_gen_movi_tl(t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + gen_op_addr_addi(ctx, t0, t0, 4); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SWP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); - tcg_gen_movi_tl(t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); + gen_op_addr_addi(ctx, t0, t0, 4); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #ifdef TARGET_MIPS64 @@ -1002,23 +1000,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); - tcg_gen_movi_tl(t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + gen_op_addr_addi(ctx, t0, t0, 8); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SDP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); - tcg_gen_movi_tl(t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); + gen_op_addr_addi(ctx, t0, t0, 8); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -1799,7 +1795,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) return; case LSA: check_insn(ctx, ISA_MIPS_R6); - gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2)); + gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2) + 1); break; case ALIGN: check_insn(ctx, ISA_MIPS_R6); @@ -2488,7 +2484,10 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) mips32_op = OPC_BC1TANY4; do_cp1mips3d: check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + gen_reserved_instruction(ctx); + break; + } /* Fall through */ do_cp1branch: if (env->CP0_Config1 & (1 << CP0C1_FP)) { @@ -2572,13 +2571,13 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) gen_st(ctx, mips32_op, rt, rs, offset); break; case SC: - gen_st_cond(ctx, rt, rs, offset, MO_TESL, false); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, false); break; #if defined(TARGET_MIPS64) case SCD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_UQ, false); break; #endif case LD_EVA: @@ -2659,7 +2658,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) mips32_op = OPC_SHE; goto do_st_lr; case SCE: - gen_st_cond(ctx, rt, rs, offset, MO_TESL, true); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, true); break; case SWE: mips32_op = OPC_SWE; diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 5cffe0e412d..97da3456ea5 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -122,11 +122,21 @@ enum { static int xlat(int r) { - static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; return map[r]; } +static void decr_and_store(DisasContext *ctx, unsigned regidx, TCGv t0) +{ + TCGv t1 = tcg_temp_new(); + + gen_op_addr_addi(ctx, t0, t0, -4); + gen_load_gpr(t1, regidx); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | + ctx->default_tcg_memop_mask); +} + static void gen_mips16_save(DisasContext *ctx, int xsregs, int aregs, int do_ra, int do_s0, int do_s1, @@ -134,7 +144,6 @@ static void gen_mips16_save(DisasContext *ctx, { TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); int args, astatic; switch (aregs) { @@ -172,70 +181,62 @@ static void gen_mips16_save(DisasContext *ctx, case 4: gen_base_offset_addr(ctx, t0, 29, 12); gen_load_gpr(t1, 7); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 3: gen_base_offset_addr(ctx, t0, 29, 8); gen_load_gpr(t1, 6); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 2: gen_base_offset_addr(ctx, t0, 29, 4); gen_load_gpr(t1, 5); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 1: gen_base_offset_addr(ctx, t0, 29, 0); gen_load_gpr(t1, 4); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); } gen_load_gpr(t0, 29); -#define DECR_AND_STORE(reg) do { \ - tcg_gen_movi_tl(t2, -4); \ - gen_op_addr_add(ctx, t0, t0, t2); \ - gen_load_gpr(t1, reg); \ - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \ - ctx->default_tcg_memop_mask); \ - } while (0) - if (do_ra) { - DECR_AND_STORE(31); + decr_and_store(ctx, 31, t0); } switch (xsregs) { case 7: - DECR_AND_STORE(30); + decr_and_store(ctx, 30, t0); /* Fall through */ case 6: - DECR_AND_STORE(23); + decr_and_store(ctx, 23, t0); /* Fall through */ case 5: - DECR_AND_STORE(22); + decr_and_store(ctx, 22, t0); /* Fall through */ case 4: - DECR_AND_STORE(21); + decr_and_store(ctx, 21, t0); /* Fall through */ case 3: - DECR_AND_STORE(20); + decr_and_store(ctx, 20, t0); /* Fall through */ case 2: - DECR_AND_STORE(19); + decr_and_store(ctx, 19, t0); /* Fall through */ case 1: - DECR_AND_STORE(18); + decr_and_store(ctx, 18, t0); } if (do_s1) { - DECR_AND_STORE(17); + decr_and_store(ctx, 17, t0); } if (do_s0) { - DECR_AND_STORE(16); + decr_and_store(ctx, 16, t0); } switch (aregs) { @@ -270,21 +271,31 @@ static void gen_mips16_save(DisasContext *ctx, } if (astatic > 0) { - DECR_AND_STORE(7); + decr_and_store(ctx, 7, t0); if (astatic > 1) { - DECR_AND_STORE(6); + decr_and_store(ctx, 6, t0); if (astatic > 2) { - DECR_AND_STORE(5); + decr_and_store(ctx, 5, t0); if (astatic > 3) { - DECR_AND_STORE(4); + decr_and_store(ctx, 4, t0); } } } } -#undef DECR_AND_STORE - tcg_gen_movi_tl(t2, -framesize); - gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); + gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize); +} + +static void decr_and_load(DisasContext *ctx, unsigned regidx, TCGv t0) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + tcg_gen_movi_tl(t2, -4); + gen_op_addr_add(ctx, t0, t0, t2); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TE | MO_SL | + ctx->default_tcg_memop_mask); + gen_store_gpr(t1, regidx); } static void gen_mips16_restore(DisasContext *ctx, @@ -294,52 +305,41 @@ static void gen_mips16_restore(DisasContext *ctx, { int astatic; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - - tcg_gen_movi_tl(t2, framesize); - gen_op_addr_add(ctx, t0, cpu_gpr[29], t2); -#define DECR_AND_LOAD(reg) do { \ - tcg_gen_movi_tl(t2, -4); \ - gen_op_addr_add(ctx, t0, t0, t2); \ - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \ - ctx->default_tcg_memop_mask); \ - gen_store_gpr(t1, reg); \ - } while (0) + gen_op_addr_addi(ctx, t0, cpu_gpr[29], framesize); if (do_ra) { - DECR_AND_LOAD(31); + decr_and_load(ctx, 31, t0); } switch (xsregs) { case 7: - DECR_AND_LOAD(30); + decr_and_load(ctx, 30, t0); /* Fall through */ case 6: - DECR_AND_LOAD(23); + decr_and_load(ctx, 23, t0); /* Fall through */ case 5: - DECR_AND_LOAD(22); + decr_and_load(ctx, 22, t0); /* Fall through */ case 4: - DECR_AND_LOAD(21); + decr_and_load(ctx, 21, t0); /* Fall through */ case 3: - DECR_AND_LOAD(20); + decr_and_load(ctx, 20, t0); /* Fall through */ case 2: - DECR_AND_LOAD(19); + decr_and_load(ctx, 19, t0); /* Fall through */ case 1: - DECR_AND_LOAD(18); + decr_and_load(ctx, 18, t0); } if (do_s1) { - DECR_AND_LOAD(17); + decr_and_load(ctx, 17, t0); } if (do_s0) { - DECR_AND_LOAD(16); + decr_and_load(ctx, 16, t0); } switch (aregs) { @@ -374,21 +374,19 @@ static void gen_mips16_restore(DisasContext *ctx, } if (astatic > 0) { - DECR_AND_LOAD(7); + decr_and_load(ctx, 7, t0); if (astatic > 1) { - DECR_AND_LOAD(6); + decr_and_load(ctx, 6, t0); if (astatic > 2) { - DECR_AND_LOAD(5); + decr_and_load(ctx, 5, t0); if (astatic > 3) { - DECR_AND_LOAD(4); + decr_and_load(ctx, 4, t0); } } } } -#undef DECR_AND_LOAD - tcg_gen_movi_tl(t2, framesize); - gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); + gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], framesize); } #if defined(TARGET_MIPS64) diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index d2181763e72..f554b3d10ee 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -21,10 +21,11 @@ #include "cpu.h" #include "internal.h" #include "tcg/tcg.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/helper-proto.h" #include "exec/memop.h" +#include "exec/target_page.h" #include "fpu/softfloat.h" #include "fpu_helper.h" @@ -5577,7 +5578,7 @@ static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2) { int64_t q_min = DF_MIN_INT(df); int64_t q_max = DF_MAX_INT(df); - int64_t r_bit = 1 << (DF_BITS(df) - 2); + int64_t r_bit = 1LL << (DF_BITS(df) - 2); if (arg1 == q_min && arg2 == q_min) { return q_max; @@ -5685,7 +5686,7 @@ static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); - int64_t r_bit = 1 << (DF_BITS(df) - 2); + int64_t r_bit = 1LL << (DF_BITS(df) - 2); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1); @@ -5700,7 +5701,7 @@ static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1, int64_t q_max = DF_MAX_INT(df); int64_t q_min = DF_MIN_INT(df); - int64_t r_bit = 1 << (DF_BITS(df) - 2); + int64_t r_bit = 1LL << (DF_BITS(df) - 2); q_prod = arg1 * arg2; q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1); @@ -6231,7 +6232,7 @@ static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; /* Set Inexact (I) when flushing inputs to zero */ - if ((ieee_exception_flags & float_flag_input_denormal) && + if ((ieee_exception_flags & float_flag_input_denormal_flushed) && (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { if (action & CLEAR_IS_INEXACT) { mips_exception_flags &= ~FP_INEXACT; @@ -6241,7 +6242,7 @@ static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) } /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */ - if ((ieee_exception_flags & float_flag_output_denormal) && + if ((ieee_exception_flags & float_flag_output_denormal_flushed) && (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { mips_exception_flags |= FP_INEXACT; if (action & CLEAR_FS_UNDERFLOW) { @@ -8211,15 +8212,6 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) -#if !defined(CONFIG_USER_ONLY) -#define MEMOP_IDX(DF) \ - MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ - mips_env_mmu_index(env)); -#else -#define MEMOP_IDX(DF) -#endif - -#if TARGET_BIG_ENDIAN static inline uint64_t bswap16x4(uint64_t x) { uint64_t m = 0x00ff00ff00ff00ffull; @@ -8230,7 +8222,6 @@ static inline uint64_t bswap32x2(uint64_t x) { return ror64(bswap64(x), 32); } -#endif void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) @@ -8259,10 +8250,10 @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, */ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); -#if TARGET_BIG_ENDIAN - d0 = bswap16x4(d0); - d1 = bswap16x4(d1); -#endif + if (mips_env_is_bigendian(env)) { + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); + } pwd->d[0] = d0; pwd->d[1] = d1; } @@ -8280,10 +8271,10 @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, */ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); -#if TARGET_BIG_ENDIAN - d0 = bswap32x2(d0); - d1 = bswap32x2(d1); -#endif + if (mips_env_is_bigendian(env)) { + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); + } pwd->d[0] = d0; pwd->d[1] = d1; } @@ -8346,10 +8337,10 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, /* Store 8 bytes at a time. See helper_msa_ld_h. */ d0 = pwd->d[0]; d1 = pwd->d[1]; -#if TARGET_BIG_ENDIAN - d0 = bswap16x4(d0); - d1 = bswap16x4(d1); -#endif + if (mips_env_is_bigendian(env)) { + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); + } cpu_stq_le_data_ra(env, addr + 0, d0, ra); cpu_stq_le_data_ra(env, addr + 8, d1, ra); } @@ -8367,10 +8358,10 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, /* Store 8 bytes at a time. See helper_msa_ld_w. */ d0 = pwd->d[0]; d1 = pwd->d[1]; -#if TARGET_BIG_ENDIAN - d0 = bswap32x2(d0); - d1 = bswap32x2(d1); -#endif + if (mips_env_is_bigendian(env)) { + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); + } cpu_stq_le_data_ra(env, addr + 0, d0, ra); cpu_stq_le_data_ra(env, addr + 8, d1, ra); } diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c index 75cf80a20ed..82b149922fa 100644 --- a/target/mips/tcg/msa_translate.c +++ b/target/mips/tcg/msa_translate.c @@ -780,7 +780,7 @@ TRANS_DF_iv(ST, trans_msa_ldst, gen_helper_msa_st); static bool trans_LSA(DisasContext *ctx, arg_r *a) { - return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa); + return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa + 1); } static bool trans_DLSA(DisasContext *ctx, arg_r *a) @@ -788,5 +788,5 @@ static bool trans_DLSA(DisasContext *ctx, arg_r *a) if (TARGET_LONG_BITS != 64) { return false; } - return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa); + return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa + 1); } diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c index c517258ac5a..35ebb0397da 100644 --- a/target/mips/tcg/mxu_translate.c +++ b/target/mips/tcg/mxu_translate.c @@ -1533,7 +1533,7 @@ static void gen_mxu_s32ldxx(DisasContext *ctx, bool reversed, bool postinc) tcg_gen_add_tl(t0, t0, t1); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); @@ -1569,7 +1569,7 @@ static void gen_mxu_s32stxx(DisasContext *ctx, bool reversed, bool postinc) gen_load_mxu_gpr(t1, XRa); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); if (postinc) { @@ -1605,7 +1605,7 @@ static void gen_mxu_s32ldxvx(DisasContext *ctx, bool reversed, tcg_gen_add_tl(t0, t0, t1); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); @@ -1675,7 +1675,7 @@ static void gen_mxu_s32stxvx(DisasContext *ctx, bool reversed, gen_load_mxu_gpr(t1, XRa); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); if (postinc) { @@ -4803,19 +4803,19 @@ static void decode_opc_mxu__pool17(DisasContext *ctx) switch (opcode) { case OPC_MXU_LXW: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UL); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UL); break; case OPC_MXU_LXB: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_SB); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SB); break; case OPC_MXU_LXH: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_SW); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SW); break; case OPC_MXU_LXBU: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UB); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UB); break; case OPC_MXU_LXHU: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UW); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UW); break; default: MIPS_INVAL("decode_opc_mxu"); diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index b4b746d4187..9d4e0bee81f 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -998,8 +998,9 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, TCGv tmp2 = tcg_temp_new(); gen_base_offset_addr(ctx, taddr, base, offset); - tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN); - if (cpu_is_bigendian(ctx)) { + tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, + mo_endian(ctx) | MO_UQ | MO_ALIGN); + if (disas_is_bigendian(ctx)) { tcg_gen_extr_i64_tl(tmp2, tmp1, tval); } else { tcg_gen_extr_i64_tl(tmp1, tmp2, tval); @@ -1031,7 +1032,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, gen_load_gpr(tmp1, reg1); gen_load_gpr(tmp2, reg2); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { tcg_gen_concat_tl_i64(tval, tmp2, tmp1); } else { tcg_gen_concat_tl_i64(tval, tmp1, tmp2); @@ -1052,8 +1053,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, tcg_gen_movi_tl(cpu_gpr[reg1], 0); } gen_set_label(lab_done); - tcg_gen_movi_tl(lladdr, -1); - tcg_gen_st_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr)); + tcg_gen_st_tl(tcg_constant_tl(-1), tcg_env, offsetof(CPUMIPSState, lladdr)); } static void gen_adjust_sp(DisasContext *ctx, int u) @@ -1075,7 +1075,7 @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, gen_base_offset_addr(ctx, va, 29, this_offset); gen_load_gpr(t0, this_rt); tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx, - (MO_TEUL | ctx->default_tcg_memop_mask)); + mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); counter++; } @@ -1095,8 +1095,8 @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); int this_offset = u - ((counter + 1) << 2); gen_base_offset_addr(ctx, va, 29, this_offset); - tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL | - ctx->default_tcg_memop_mask); + tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, + mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); tcg_gen_ext32s_tl(t0, t0); gen_store_gpr(t0, this_rt); counter++; @@ -1543,7 +1543,6 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, { int16_t imm; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); TCGv v0_t = tcg_temp_new(); gen_load_gpr(v0_t, v1); @@ -1570,12 +1569,10 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, check_dsp(ctx); switch (extract32(ctx->opcode, 12, 2)) { case NM_MTHLIP: - tcg_gen_movi_tl(t0, v2 >> 3); - gen_helper_mthlip(t0, v0_t, tcg_env); + gen_helper_mthlip(tcg_constant_tl(v2 >> 3), v0_t, tcg_env); break; case NM_SHILOV: - tcg_gen_movi_tl(t0, v2 >> 3); - gen_helper_shilo(t0, v0_t, tcg_env); + gen_helper_shilo(tcg_constant_tl(v2 >> 3), v0_t, tcg_env); break; default: gen_reserved_instruction(ctx); @@ -1587,39 +1584,34 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, imm = extract32(ctx->opcode, 14, 7); switch (extract32(ctx->opcode, 12, 2)) { case NM_RDDSP: - tcg_gen_movi_tl(t0, imm); - gen_helper_rddsp(t0, t0, tcg_env); + gen_helper_rddsp(t0, tcg_constant_tl(imm), tcg_env); gen_store_gpr(t0, ret); break; case NM_WRDSP: gen_load_gpr(t0, ret); - tcg_gen_movi_tl(t1, imm); - gen_helper_wrdsp(t0, t1, tcg_env); + gen_helper_wrdsp(t0, tcg_constant_tl(imm), tcg_env); break; case NM_EXTP: - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); - gen_helper_extp(t0, t0, t1, tcg_env); + gen_helper_extp(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTPDP: - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); - gen_helper_extpdp(t0, t0, t1, tcg_env); + gen_helper_extpdp(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; } break; case NM_POOL32AXF_1_4: check_dsp(ctx); - tcg_gen_movi_tl(t0, v2 >> 2); switch (extract32(ctx->opcode, 12, 1)) { case NM_SHLL_QB: - gen_helper_shll_qb(t0, t0, v0_t, tcg_env); + gen_helper_shll_qb(t0, tcg_constant_tl(v2 >> 2), v0_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_SHRL_QB: - gen_helper_shrl_qb(t0, t0, v0_t); + gen_helper_shrl_qb(t0, tcg_constant_tl(v2 >> 2), v0_t); gen_store_gpr(t0, ret); break; } @@ -1630,23 +1622,25 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_POOL32AXF_1_7: check_dsp(ctx); - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); switch (extract32(ctx->opcode, 12, 2)) { case NM_EXTR_W: - gen_helper_extr_w(t0, t0, t1, tcg_env); + gen_helper_extr_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_R_W: - gen_helper_extr_r_w(t0, t0, t1, tcg_env); + gen_helper_extr_r_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_RS_W: - gen_helper_extr_rs_w(t0, t0, t1, tcg_env); + gen_helper_extr_rs_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_S_H: - gen_helper_extr_s_h(t0, t0, t1, tcg_env); + gen_helper_extr_s_h(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; } @@ -1848,8 +1842,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, case NM_EXTRV_W: check_dsp(ctx); gen_load_gpr(v1_t, rs); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -1903,8 +1896,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_R_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_r_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_r_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; default: @@ -1923,8 +1915,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTPV: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extp(t0, t0, v1_t, tcg_env); + gen_helper_extp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_MSUB: @@ -1947,8 +1938,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_RS_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_rs_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_rs_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -1964,8 +1954,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTPDPV: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extpdp(t0, t0, v1_t, tcg_env); + gen_helper_extpdp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_MSUBU: @@ -1990,8 +1979,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_S_H: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_s_h(t0, t0, v1_t, tcg_env); + gen_helper_extr_s_h(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -2149,24 +2137,22 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, switch (opc) { case NM_SHRA_R_QB: check_dsp_r2(ctx); - tcg_gen_movi_tl(t0, rd >> 2); switch (extract32(ctx->opcode, 12, 1)) { case 0: /* NM_SHRA_QB */ - gen_helper_shra_qb(t0, t0, rs_t); + gen_helper_shra_qb(t0, tcg_constant_tl(rd >> 2), rs_t); gen_store_gpr(t0, rt); break; case 1: /* NM_SHRA_R_QB */ - gen_helper_shra_r_qb(t0, t0, rs_t); + gen_helper_shra_r_qb(t0, tcg_constant_tl(rd >> 2), rs_t); gen_store_gpr(t0, rt); break; } break; case NM_SHRL_PH: check_dsp_r2(ctx); - tcg_gen_movi_tl(t0, rd >> 1); - gen_helper_shrl_ph(t0, t0, rs_t); + gen_helper_shrl_ph(t0, tcg_constant_tl(rd >> 1), rs_t); gen_store_gpr(t0, rt); break; case NM_REPL_QB: @@ -2180,8 +2166,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, (uint32_t)imm << 8 | (uint32_t)imm; result = (int32_t)result; - tcg_gen_movi_tl(t0, result); - gen_store_gpr(t0, rt); + gen_store_gpr(tcg_constant_tl(result), rt); } break; default: @@ -2302,10 +2287,9 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, { TCGCond cond = TCG_COND_ALWAYS; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); + TCGv timm = tcg_constant_tl(imm); gen_load_gpr(t0, rt); - tcg_gen_movi_tl(t1, imm); ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); /* Load needed operands and calculate btarget */ @@ -2334,7 +2318,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, } else { tcg_gen_shri_tl(t0, t0, imm); tcg_gen_andi_tl(t0, t0, 1); - tcg_gen_movi_tl(t1, 0); + timm = tcg_constant_tl(0); if (opc == NM_BBEQZC) { cond = TCG_COND_EQ; } else { @@ -2389,7 +2373,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, /* Conditional compact branch */ TCGLabel *fs = gen_new_label(); - tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs); + tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, timm, fs); gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(fs); @@ -2403,7 +2387,6 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, int rt) { TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); /* load rs */ gen_load_gpr(t0, rs); @@ -2415,8 +2398,7 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, /* calculate btarget */ tcg_gen_shli_tl(t0, t0, 1); - tcg_gen_movi_tl(t1, ctx->base.pc_next + 4); - gen_op_addr_add(ctx, btarget, t1, t0); + gen_op_addr_add(ctx, btarget, tcg_constant_tl(ctx->base.pc_next + 4), t0); /* branch completion */ clear_branch_hflags(ctx); @@ -2469,11 +2451,9 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_temp_new(); gen_load_gpr(tbase, rt); - tcg_gen_movi_tl(toffset, offset); - gen_op_addr_add(ctx, btarget, tbase, toffset); + gen_op_addr_addi(ctx, btarget, tbase, offset); } break; default: @@ -2647,13 +2627,13 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) case NM_LHX: /*case NM_LHXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LWX: /*case NM_LWXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESL | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LBUX: @@ -2663,7 +2643,7 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) case NM_LHUX: /*case NM_LHUXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TEUW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_SBX: @@ -2676,14 +2656,14 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case NM_SWX: /*case NM_SWXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; case NM_LWC1X: /*case NM_LWC1XS:*/ @@ -3445,13 +3425,10 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, case NM_SHILO: check_dsp(ctx); { - TCGv tv0 = tcg_temp_new(); - TCGv tv1 = tcg_temp_new(); int16_t imm = extract32(ctx->opcode, 16, 7); - tcg_gen_movi_tl(tv0, rd >> 3); - tcg_gen_movi_tl(tv1, imm); - gen_helper_shilo(tv0, tv1, tcg_env); + gen_helper_shilo(tcg_constant_tl(rd >> 3), + tcg_constant_tl(imm), tcg_env); } break; case NM_MULEQ_S_W_PHL: @@ -3506,8 +3483,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, break; case NM_SHRA_R_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd); - gen_helper_shra_r_w(v1_t, t0, v1_t); + gen_helper_shra_r_w(v1_t, tcg_constant_tl(rd), v1_t); gen_store_gpr(v1_t, rt); break; case NM_SHRA_R_PH: @@ -3547,8 +3523,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, break; case NM_SHLL_S_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd); - gen_helper_shll_s_w(v1_t, t0, v1_t, tcg_env); + gen_helper_shll_s_w(v1_t, tcg_constant_tl(rd), v1_t, tcg_env); gen_store_gpr(v1_t, rt); break; case NM_REPL_PH: @@ -3651,12 +3626,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_p_lsx(ctx, rd, rs, rt); break; case NM_LSA: - /* - * In nanoMIPS, the shift field directly encodes the shift - * amount, meaning that the supported shift values are in - * the range 0 to 3 (instead of 1 to 4 in MIPSR6). - */ - gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2) - 1); + gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2)); break; case NM_EXTW: gen_ext(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 6, 5)); @@ -3729,32 +3699,29 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) case NM_LWPC48: check_nms(ctx); if (rt != 0) { - TCGv t0; - t0 = tcg_temp_new(); - target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); - tcg_gen_movi_tl(t0, addr); - tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, - MO_TESL | ctx->default_tcg_memop_mask); + tcg_gen_qemu_ld_tl(cpu_gpr[rt], tcg_constant_tl(addr), + ctx->mem_idx, + mo_endian(ctx) | MO_SL + | ctx->default_tcg_memop_mask); } break; case NM_SWPC48: check_nms(ctx); { - TCGv t0, t1; - t0 = tcg_temp_new(); + TCGv t1; t1 = tcg_temp_new(); target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); - tcg_gen_movi_tl(t0, addr); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL | ctx->default_tcg_memop_mask); + tcg_gen_qemu_st_tl(t1, tcg_constant_tl(addr), ctx->mem_idx, + mo_endian(ctx) | MO_UL + | ctx->default_tcg_memop_mask); } break; default: @@ -4132,14 +4099,14 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 11, 4)) { case NM_UALH: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW | - MO_UNALN); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, + mo_endian(ctx) | MO_SW | MO_UNALN); gen_store_gpr(t0, rt); break; case NM_UASH: gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | - MO_UNALN); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, + mo_endian(ctx) | MO_UW | MO_UNALN); break; } } @@ -4161,7 +4128,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) case NM_P_SC: switch (ctx->opcode & 0x03) { case NM_SC: - gen_st_cond(ctx, rt, rs, s, MO_TESL, false); + gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL, + false); break; case NM_SCWP: check_xnp(ctx); @@ -4274,7 +4242,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); - gen_st_cond(ctx, rt, rs, s, MO_TESL, true); + gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL, + true); break; case NM_SCWPE: check_xnp(ctx); @@ -4317,7 +4286,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 11, 1)) { case NM_LWM: tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx, - memop | MO_TESL); + memop | mo_endian(ctx) | MO_SL); gen_store_gpr(t1, this_rt); if ((this_rt == rs) && (counter != (count - 1))) { @@ -4328,7 +4297,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) this_rt = (rt == 0) ? 0 : this_rt; gen_load_gpr(t1, this_rt); tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx, - memop | MO_TEUL); + memop | mo_endian(ctx) | MO_UL); break; } counter++; diff --git a/target/mips/tcg/octeon.decode b/target/mips/tcg/octeon.decode index 0c787cb498c..102a05860df 100644 --- a/target/mips/tcg/octeon.decode +++ b/target/mips/tcg/octeon.decode @@ -1,6 +1,7 @@ # Octeon Architecture Module instruction set # # Copyright (C) 2022 Pavel Dovgalyuk +# Copyright (C) 2024 Philippe Mathieu-Daudé # # SPDX-License-Identifier: LGPL-2.1-or-later # @@ -39,3 +40,10 @@ CINS 011100 ..... ..... ..... ..... 11001 . @bitfield POP 011100 rs:5 00000 rd:5 00000 10110 dw:1 SEQNE 011100 rs:5 rt:5 rd:5 00000 10101 ne:1 SEQNEI 011100 rs:5 rt:5 imm:s10 10111 ne:1 + +&lx base index rd +@lx ...... base:5 index:5 rd:5 ...... ..... &lx +LWX 011111 ..... ..... ..... 00000 001010 @lx +LHX 011111 ..... ..... ..... 00100 001010 @lx +LBUX 011111 ..... ..... ..... 00110 001010 @lx +LDX 011111 ..... ..... ..... 01000 001010 @lx diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c index e25c4cbaa06..b2eca29e06c 100644 --- a/target/mips/tcg/octeon_translate.c +++ b/target/mips/tcg/octeon_translate.c @@ -18,8 +18,8 @@ static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a) TCGv p; if (ctx->hflags & MIPS_HFLAG_BMASK) { - LOG_DISAS("Branch in delay / forbidden slot at PC 0x" - TARGET_FMT_lx "\n", ctx->base.pc_next); + LOG_DISAS("Branch in delay / forbidden slot at PC 0x%" VADDR_PRIx "\n", + ctx->base.pc_next); generate_exception_end(ctx, EXCP_RI); return true; } @@ -174,3 +174,15 @@ static bool trans_SEQNEI(DisasContext *ctx, arg_SEQNEI *a) } return true; } + +static bool trans_lx(DisasContext *ctx, arg_lx *a, MemOp mop) +{ + gen_lx(ctx, a->rd, a->base, a->index, mop); + + return true; +} + +TRANS(LBUX, trans_lx, MO_UB); +TRANS(LHX, trans_lx, MO_SW); +TRANS(LWX, trans_lx, MO_SL); +TRANS(LDX, trans_lx, MO_UQ); diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c index 65403f1a87b..4502ae2b5be 100644 --- a/target/mips/tcg/op_helper.c +++ b/target/mips/tcg/op_helper.c @@ -22,9 +22,10 @@ #include "cpu.h" #include "internal.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" #include "exec/memop.h" #include "fpu_helper.h" +#include "qemu/crc32c.h" +#include static inline target_ulong bitswap(target_ulong v) { @@ -143,6 +144,30 @@ target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, return (int64_t)(int32_t)(uint32_t)tmp5; } +/* these crc32 functions are based on target/loongarch/tcg/op_helper.c */ +target_ulong helper_crc32(target_ulong val, target_ulong m, uint32_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? + (target_ulong) -1ULL : + ((1ULL << (sz * 8)) - 1); + + m &= mask; + stq_le_p(buf, m); + return (int32_t) (crc32(val ^ 0xffffffff, buf, sz) ^ 0xffffffff); +} + +target_ulong helper_crc32c(target_ulong val, target_ulong m, uint32_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? + (target_ulong) -1ULL : + ((1ULL << (sz * 8)) - 1); + m &= mask; + stq_le_p(buf, m); + return (int32_t) (crc32c(val, buf, sz) ^ 0xffffffff); +} + void helper_fork(target_ulong arg1, target_ulong arg2) { /* diff --git a/target/mips/tcg/rel6.decode b/target/mips/tcg/rel6.decode index d6989cf56e8..7fbcb109b4e 100644 --- a/target/mips/tcg/rel6.decode +++ b/target/mips/tcg/rel6.decode @@ -16,11 +16,16 @@ &r rs rt rd sa +&special3_crc rs rt c sz + @lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r +@crc32 ...... rs:5 rt:5 ..... c:3 sz:2 ...... &special3_crc LSA 000000 ..... ..... ..... 000 .. 000101 @lsa DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa +CRC32 011111 ..... ..... 00000 ... .. 001111 @crc32 + REMOVED 010011 ----- ----- ----- ----- ------ # COP1X (COP3) REMOVED 011100 ----- ----- ----- ----- ------ # SPECIAL2 diff --git a/target/mips/tcg/rel6_translate.c b/target/mips/tcg/rel6_translate.c index 59f237ba3ba..4c056621c97 100644 --- a/target/mips/tcg/rel6_translate.c +++ b/target/mips/tcg/rel6_translate.c @@ -23,7 +23,7 @@ bool trans_REMOVED(DisasContext *ctx, arg_REMOVED *a) static bool trans_LSA(DisasContext *ctx, arg_r *a) { - return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa); + return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa + 1); } static bool trans_DLSA(DisasContext *ctx, arg_r *a) @@ -31,5 +31,17 @@ static bool trans_DLSA(DisasContext *ctx, arg_r *a) if (TARGET_LONG_BITS != 64) { return false; } - return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa); + return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa + 1); +} + +static bool trans_CRC32(DisasContext *ctx, arg_special3_crc *a) +{ + if (unlikely(!ctx->crcp) + || unlikely((a->sz == 3) && (!(ctx->hflags & MIPS_HFLAG_64))) + || unlikely((a->c >= 2))) { + gen_reserved_instruction(ctx); + return true; + } + gen_crc32(ctx, a->rt, a->rs, a->rt, a->sz, a->c); + return true; } diff --git a/target/mips/tcg/sysemu/semihosting-stub.c b/target/mips/tcg/sysemu/semihosting-stub.c deleted file mode 100644 index 7ae27d746f8..00000000000 --- a/target/mips/tcg/sysemu/semihosting-stub.c +++ /dev/null @@ -1,15 +0,0 @@ -/* - * MIPS semihosting stub - * - * SPDX-FileContributor: Philippe Mathieu-Daudé - * SPDX-FileCopyrightText: 2024 Linaro Ltd. - * SPDX-License-Identifier: GPL-2.0-or-later - */ - -#include "qemu/osdep.h" -#include "internal.h" - -void mips_semihosting(CPUMIPSState *env) -{ - g_assert_not_reached(); -} diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc deleted file mode 100644 index 1861d538de1..00000000000 --- a/target/mips/tcg/sysemu_helper.h.inc +++ /dev/null @@ -1,185 +0,0 @@ -/* - * QEMU MIPS sysemu helpers - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * Copyright (c) 2006 Marius Groeger (FPU operations) - * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) - * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) - * - * SPDX-License-Identifier: LGPL-2.1-or-later - */ - -/* CP0 helpers */ -DEF_HELPER_1(mfc0_mvpcontrol, tl, env) -DEF_HELPER_1(mfc0_mvpconf0, tl, env) -DEF_HELPER_1(mfc0_mvpconf1, tl, env) -DEF_HELPER_1(mftc0_vpecontrol, tl, env) -DEF_HELPER_1(mftc0_vpeconf0, tl, env) -DEF_HELPER_1(mfc0_random, tl, env) -DEF_HELPER_1(mfc0_tcstatus, tl, env) -DEF_HELPER_1(mftc0_tcstatus, tl, env) -DEF_HELPER_1(mfc0_tcbind, tl, env) -DEF_HELPER_1(mftc0_tcbind, tl, env) -DEF_HELPER_1(mfc0_tcrestart, tl, env) -DEF_HELPER_1(mftc0_tcrestart, tl, env) -DEF_HELPER_1(mfc0_tchalt, tl, env) -DEF_HELPER_1(mftc0_tchalt, tl, env) -DEF_HELPER_1(mfc0_tccontext, tl, env) -DEF_HELPER_1(mftc0_tccontext, tl, env) -DEF_HELPER_1(mfc0_tcschedule, tl, env) -DEF_HELPER_1(mftc0_tcschedule, tl, env) -DEF_HELPER_1(mfc0_tcschefback, tl, env) -DEF_HELPER_1(mftc0_tcschefback, tl, env) -DEF_HELPER_1(mfc0_count, tl, env) -DEF_HELPER_1(mftc0_entryhi, tl, env) -DEF_HELPER_1(mftc0_status, tl, env) -DEF_HELPER_1(mftc0_cause, tl, env) -DEF_HELPER_1(mftc0_epc, tl, env) -DEF_HELPER_1(mftc0_ebase, tl, env) -DEF_HELPER_2(mftc0_configx, tl, env, tl) -DEF_HELPER_1(mfc0_lladdr, tl, env) -DEF_HELPER_1(mfc0_maar, tl, env) -DEF_HELPER_1(mfhc0_maar, tl, env) -DEF_HELPER_2(mfc0_watchlo, tl, env, i32) -DEF_HELPER_2(mfc0_watchhi, tl, env, i32) -DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) -DEF_HELPER_1(mfc0_debug, tl, env) -DEF_HELPER_1(mftc0_debug, tl, env) -#ifdef TARGET_MIPS64 -DEF_HELPER_1(dmfc0_tcrestart, tl, env) -DEF_HELPER_1(dmfc0_tchalt, tl, env) -DEF_HELPER_1(dmfc0_tccontext, tl, env) -DEF_HELPER_1(dmfc0_tcschedule, tl, env) -DEF_HELPER_1(dmfc0_tcschefback, tl, env) -DEF_HELPER_1(dmfc0_lladdr, tl, env) -DEF_HELPER_1(dmfc0_maar, tl, env) -DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) -DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) -#endif /* TARGET_MIPS64 */ - -DEF_HELPER_2(mtc0_index, void, env, tl) -DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) -DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) -DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) -DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) -DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) -DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) -DEF_HELPER_2(mtc0_yqmask, void, env, tl) -DEF_HELPER_2(mtc0_vpeopt, void, env, tl) -DEF_HELPER_2(mtc0_entrylo0, void, env, tl) -DEF_HELPER_2(mtc0_tcstatus, void, env, tl) -DEF_HELPER_2(mttc0_tcstatus, void, env, tl) -DEF_HELPER_2(mtc0_tcbind, void, env, tl) -DEF_HELPER_2(mttc0_tcbind, void, env, tl) -DEF_HELPER_2(mtc0_tcrestart, void, env, tl) -DEF_HELPER_2(mttc0_tcrestart, void, env, tl) -DEF_HELPER_2(mtc0_tchalt, void, env, tl) -DEF_HELPER_2(mttc0_tchalt, void, env, tl) -DEF_HELPER_2(mtc0_tccontext, void, env, tl) -DEF_HELPER_2(mttc0_tccontext, void, env, tl) -DEF_HELPER_2(mtc0_tcschedule, void, env, tl) -DEF_HELPER_2(mttc0_tcschedule, void, env, tl) -DEF_HELPER_2(mtc0_tcschefback, void, env, tl) -DEF_HELPER_2(mttc0_tcschefback, void, env, tl) -DEF_HELPER_2(mtc0_entrylo1, void, env, tl) -DEF_HELPER_2(mtc0_context, void, env, tl) -DEF_HELPER_2(mtc0_memorymapid, void, env, tl) -DEF_HELPER_2(mtc0_pagemask, void, env, tl) -DEF_HELPER_2(mtc0_pagegrain, void, env, tl) -DEF_HELPER_2(mtc0_segctl0, void, env, tl) -DEF_HELPER_2(mtc0_segctl1, void, env, tl) -DEF_HELPER_2(mtc0_segctl2, void, env, tl) -DEF_HELPER_2(mtc0_pwfield, void, env, tl) -DEF_HELPER_2(mtc0_pwsize, void, env, tl) -DEF_HELPER_2(mtc0_wired, void, env, tl) -DEF_HELPER_2(mtc0_srsconf0, void, env, tl) -DEF_HELPER_2(mtc0_srsconf1, void, env, tl) -DEF_HELPER_2(mtc0_srsconf2, void, env, tl) -DEF_HELPER_2(mtc0_srsconf3, void, env, tl) -DEF_HELPER_2(mtc0_srsconf4, void, env, tl) -DEF_HELPER_2(mtc0_hwrena, void, env, tl) -DEF_HELPER_2(mtc0_pwctl, void, env, tl) -DEF_HELPER_2(mtc0_count, void, env, tl) -DEF_HELPER_2(mtc0_entryhi, void, env, tl) -DEF_HELPER_2(mttc0_entryhi, void, env, tl) -DEF_HELPER_2(mtc0_compare, void, env, tl) -DEF_HELPER_2(mtc0_status, void, env, tl) -DEF_HELPER_2(mttc0_status, void, env, tl) -DEF_HELPER_2(mtc0_intctl, void, env, tl) -DEF_HELPER_2(mtc0_srsctl, void, env, tl) -DEF_HELPER_2(mtc0_cause, void, env, tl) -DEF_HELPER_2(mttc0_cause, void, env, tl) -DEF_HELPER_2(mtc0_ebase, void, env, tl) -DEF_HELPER_2(mttc0_ebase, void, env, tl) -DEF_HELPER_2(mtc0_config0, void, env, tl) -DEF_HELPER_2(mtc0_config2, void, env, tl) -DEF_HELPER_2(mtc0_config3, void, env, tl) -DEF_HELPER_2(mtc0_config4, void, env, tl) -DEF_HELPER_2(mtc0_config5, void, env, tl) -DEF_HELPER_2(mtc0_lladdr, void, env, tl) -DEF_HELPER_2(mtc0_maar, void, env, tl) -DEF_HELPER_2(mthc0_maar, void, env, tl) -DEF_HELPER_2(mtc0_maari, void, env, tl) -DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) -DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) -DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) -DEF_HELPER_2(mtc0_xcontext, void, env, tl) -DEF_HELPER_2(mtc0_framemask, void, env, tl) -DEF_HELPER_2(mtc0_debug, void, env, tl) -DEF_HELPER_2(mttc0_debug, void, env, tl) -DEF_HELPER_2(mtc0_performance0, void, env, tl) -DEF_HELPER_2(mtc0_errctl, void, env, tl) -DEF_HELPER_2(mtc0_taglo, void, env, tl) -DEF_HELPER_2(mtc0_datalo, void, env, tl) -DEF_HELPER_2(mtc0_taghi, void, env, tl) -DEF_HELPER_2(mtc0_datahi, void, env, tl) - -#if defined(TARGET_MIPS64) -DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) -DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) -#endif - -/* MIPS MT functions */ -DEF_HELPER_2(mftgpr, tl, env, i32) -DEF_HELPER_2(mftlo, tl, env, i32) -DEF_HELPER_2(mfthi, tl, env, i32) -DEF_HELPER_2(mftacx, tl, env, i32) -DEF_HELPER_1(mftdsp, tl, env) -DEF_HELPER_3(mttgpr, void, env, tl, i32) -DEF_HELPER_3(mttlo, void, env, tl, i32) -DEF_HELPER_3(mtthi, void, env, tl, i32) -DEF_HELPER_3(mttacx, void, env, tl, i32) -DEF_HELPER_2(mttdsp, void, env, tl) -DEF_HELPER_0(dmt, tl) -DEF_HELPER_0(emt, tl) -DEF_HELPER_1(dvpe, tl, env) -DEF_HELPER_1(evpe, tl, env) - -/* R6 Multi-threading */ -DEF_HELPER_1(dvp, tl, env) -DEF_HELPER_1(evp, tl, env) - -/* TLB */ -DEF_HELPER_1(tlbwi, void, env) -DEF_HELPER_1(tlbwr, void, env) -DEF_HELPER_1(tlbp, void, env) -DEF_HELPER_1(tlbr, void, env) -DEF_HELPER_1(tlbinv, void, env) -DEF_HELPER_1(tlbinvf, void, env) -DEF_HELPER_3(ginvt, void, env, tl, i32) - -/* Special */ -DEF_HELPER_1(di, tl, env) -DEF_HELPER_1(ei, tl, env) -DEF_HELPER_1(eret, void, env) -DEF_HELPER_1(eretnc, void, env) -DEF_HELPER_1(deret, void, env) -DEF_HELPER_3(cache, void, env, tl, i32) - -#ifdef TARGET_MIPS64 -/* Longson CSR */ -DEF_HELPER_2(lcsr_rdcsr, i64, env, tl) -DEF_HELPER_2(lcsr_drdcsr, i64, env, tl) -DEF_HELPER_3(lcsr_wrcsr, void, env, tl, tl) -DEF_HELPER_3(lcsr_dwrcsr, void, env, tl, tl) -#endif diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/system/cp0_helper.c similarity index 96% rename from target/mips/tcg/sysemu/cp0_helper.c rename to target/mips/tcg/system/cp0_helper.c index 79a5c833cee..b69e70d7fcf 100644 --- a/target/mips/tcg/sysemu/cp0_helper.c +++ b/target/mips/tcg/system/cp0_helper.c @@ -27,7 +27,8 @@ #include "internal.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" /* SMP helpers. */ @@ -864,36 +865,24 @@ void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) } } -void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) +uint32_t compute_pagemask(uint32_t val) { - uint32_t mask; - int maskbits; - /* Don't care MASKX as we don't support 1KB page */ - mask = extract32((uint32_t)arg1, CP0PM_MASK, 16); - maskbits = cto32(mask); + uint32_t mask = extract32(val, CP0PM_MASK, 16); + int maskbits = cto32(mask); - /* Ensure no more set bit after first zero */ - if ((mask >> maskbits) != 0) { - goto invalid; - } - /* We don't support VTLB entry smaller than target page */ - if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) { - goto invalid; + /* Ensure no more set bit after first zero, and maskbits even. */ + if ((mask >> maskbits) == 0 && maskbits % 2 == 0) { + return mask << CP0PM_MASK; + } else { + /* When invalid, set to default target page size. */ + return 0; } - env->CP0_PageMask = mask << CP0PM_MASK; - - return; - -invalid: - /* When invalid, set to default target page size. */ - mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN); - env->CP0_PageMask = mask << CP0PM_MASK; } void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) { - update_pagemask(env, arg1, &env->CP0_PageMask); + env->CP0_PageMask = compute_pagemask(arg1); } void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) @@ -1573,12 +1562,14 @@ target_ulong helper_dvpe(CPUMIPSState *env) CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(other_cs); - /* Turn off all VPEs except the one executing the dvpe. */ - if (&other_cpu->env != env) { - other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); - mips_vpe_sleep(other_cpu); + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + /* Turn off all VPEs except the one executing the dvpe. */ + if (&other_cpu->env != env) { + other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); + mips_vpe_sleep(other_cpu); + } } } return prev; @@ -1589,15 +1580,17 @@ target_ulong helper_evpe(CPUMIPSState *env) CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(other_cs); + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); - if (&other_cpu->env != env - /* If the VPE is WFI, don't disturb its sleep. */ - && !mips_vpe_is_wfi(other_cpu)) { - /* Enable the VPE. */ - other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); - mips_vpe_wake(other_cpu); /* And wake it up. */ + if (&other_cpu->env != env + /* If the VPE is WFI, don't disturb its sleep. */ + && !mips_vpe_is_wfi(other_cpu)) { + /* Enable the VPE. */ + other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + mips_vpe_wake(other_cpu); /* And wake it up. */ + } } } return prev; diff --git a/target/mips/tcg/sysemu/lcsr_helper.c b/target/mips/tcg/system/lcsr_helper.c similarity index 100% rename from target/mips/tcg/sysemu/lcsr_helper.c rename to target/mips/tcg/system/lcsr_helper.c diff --git a/target/mips/tcg/sysemu/meson.build b/target/mips/tcg/system/meson.build similarity index 100% rename from target/mips/tcg/sysemu/meson.build rename to target/mips/tcg/system/meson.build diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/system/mips-semi.c similarity index 84% rename from target/mips/tcg/sysemu/mips-semi.c rename to target/mips/tcg/system/mips-semi.c index 5ba06e95734..e822a426145 100644 --- a/target/mips/tcg/sysemu/mips-semi.c +++ b/target/mips/tcg/system/mips-semi.c @@ -168,6 +168,7 @@ static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err) if (!err) { CPUMIPSState *env = cpu_env(cs); + bool swap_needed = HOST_BIG_ENDIAN != mips_env_is_bigendian(env); target_ulong addr = env->active_tc.gpr[5]; UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1); struct gdb_stat s; @@ -179,19 +180,35 @@ static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err) memcpy(&s, dst, sizeof(struct gdb_stat)); memset(dst, 0, sizeof(UHIStat)); - dst->uhi_st_dev = tswap16(be32_to_cpu(s.gdb_st_dev)); - dst->uhi_st_ino = tswap16(be32_to_cpu(s.gdb_st_ino)); - dst->uhi_st_mode = tswap32(be32_to_cpu(s.gdb_st_mode)); - dst->uhi_st_nlink = tswap16(be32_to_cpu(s.gdb_st_nlink)); - dst->uhi_st_uid = tswap16(be32_to_cpu(s.gdb_st_uid)); - dst->uhi_st_gid = tswap16(be32_to_cpu(s.gdb_st_gid)); - dst->uhi_st_rdev = tswap16(be32_to_cpu(s.gdb_st_rdev)); - dst->uhi_st_size = tswap64(be64_to_cpu(s.gdb_st_size)); - dst->uhi_st_atime = tswap64(be32_to_cpu(s.gdb_st_atime)); - dst->uhi_st_mtime = tswap64(be32_to_cpu(s.gdb_st_mtime)); - dst->uhi_st_ctime = tswap64(be32_to_cpu(s.gdb_st_ctime)); - dst->uhi_st_blksize = tswap64(be64_to_cpu(s.gdb_st_blksize)); - dst->uhi_st_blocks = tswap64(be64_to_cpu(s.gdb_st_blocks)); + dst->uhi_st_dev = be32_to_cpu(s.gdb_st_dev); + dst->uhi_st_ino = be32_to_cpu(s.gdb_st_ino); + dst->uhi_st_mode = be32_to_cpu(s.gdb_st_mode); + dst->uhi_st_nlink = be32_to_cpu(s.gdb_st_nlink); + dst->uhi_st_uid = be32_to_cpu(s.gdb_st_uid); + dst->uhi_st_gid = be32_to_cpu(s.gdb_st_gid); + dst->uhi_st_rdev = be32_to_cpu(s.gdb_st_rdev); + dst->uhi_st_size = be64_to_cpu(s.gdb_st_size); + dst->uhi_st_atime = be32_to_cpu(s.gdb_st_atime); + dst->uhi_st_mtime = be32_to_cpu(s.gdb_st_mtime); + dst->uhi_st_ctime = be32_to_cpu(s.gdb_st_ctime); + dst->uhi_st_blksize = be64_to_cpu(s.gdb_st_blksize); + dst->uhi_st_blocks = be64_to_cpu(s.gdb_st_blocks); + + if (swap_needed) { + dst->uhi_st_dev = bswap16(dst->uhi_st_dev); + dst->uhi_st_ino = bswap16(dst->uhi_st_ino); + dst->uhi_st_mode = bswap32(dst->uhi_st_mode); + dst->uhi_st_nlink = bswap16(dst->uhi_st_nlink); + dst->uhi_st_uid = bswap16(dst->uhi_st_uid); + dst->uhi_st_gid = bswap16(dst->uhi_st_gid); + dst->uhi_st_rdev = bswap16(dst->uhi_st_rdev); + dst->uhi_st_size = bswap64(dst->uhi_st_size); + dst->uhi_st_atime = bswap64(dst->uhi_st_atime); + dst->uhi_st_mtime = bswap64(dst->uhi_st_mtime); + dst->uhi_st_ctime = bswap64(dst->uhi_st_ctime); + dst->uhi_st_blksize = bswap64(dst->uhi_st_blksize); + dst->uhi_st_blocks = bswap64(dst->uhi_st_blocks); + } unlock_user(dst, addr, sizeof(UHIStat)); } @@ -357,5 +374,4 @@ void mips_semihosting(CPUMIPSState *env) error_report("Unknown UHI operation %d", op); abort(); } - return; } diff --git a/target/mips/tcg/system/semihosting-stub.c b/target/mips/tcg/system/semihosting-stub.c new file mode 100644 index 00000000000..bb1f7aae624 --- /dev/null +++ b/target/mips/tcg/system/semihosting-stub.c @@ -0,0 +1,16 @@ +/* + * MIPS semihosting stub + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2024 Linaro Ltd. + * Authors: + * Philippe Mathieu-Daudé + */ + +#include "qemu/osdep.h" +#include "internal.h" + +void mips_semihosting(CPUMIPSState *env) +{ + g_assert_not_reached(); +} diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/system/special_helper.c similarity index 99% rename from target/mips/tcg/sysemu/special_helper.c rename to target/mips/tcg/system/special_helper.c index 9ce5e2ceac5..b54cbe88a38 100644 --- a/target/mips/tcg/sysemu/special_helper.c +++ b/target/mips/tcg/system/special_helper.c @@ -22,7 +22,7 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "internal.h" /* Specials */ diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c similarity index 95% rename from target/mips/tcg/sysemu/tlb_helper.c rename to target/mips/tcg/system/tlb_helper.c index 3ba6d369a66..eccaf3624cb 100644 --- a/target/mips/tcg/sysemu/tlb_helper.c +++ b/target/mips/tcg/system/tlb_helper.c @@ -21,9 +21,10 @@ #include "cpu.h" #include "internal.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" -#include "exec/cpu_ldst.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/log.h" #include "exec/helper-proto.h" @@ -592,23 +593,29 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, * resulting in a TLB or XTLB Refill exception. */ -static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size, - uint64_t *pte) +static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op, + uint64_t *pte, unsigned ptw_mmu_idx) { - if ((vaddr & ((entry_size >> 3) - 1)) != 0) { + MemOpIdx oi; + + if ((vaddr & (memop_size(op) - 1)) != 0) { return false; } - if (entry_size == 64) { - *pte = cpu_ldq_code(env, vaddr); + + oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx); + if (op == MO_64) { + *pte = cpu_ldq_mmu(env, vaddr, oi, 0); } else { - *pte = cpu_ldl_code(env, vaddr); + *pte = cpu_ldl_mmu(env, vaddr, oi, 0); } + return true; } static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, - int entry_size, int ptei) + MemOp op, int ptei) { + unsigned entry_size = memop_size(op) << 3; uint64_t result = entry; uint64_t rixi; if (ptei > entry_size) { @@ -624,14 +631,12 @@ static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, int directory_index, bool *huge_page, bool *hgpg_directory_hit, uint64_t *pw_entrylo0, uint64_t *pw_entrylo1, - unsigned directory_shift, unsigned leaf_shift, int ptw_mmu_idx) + MemOp directory_mop, MemOp leaf_mop, int ptw_mmu_idx) { int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; - uint32_t direntry_size = 1 << (directory_shift + 3); - uint32_t leafentry_size = 1 << (leaf_shift + 3); uint64_t entry; uint64_t paddr; int prot; @@ -643,14 +648,14 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, /* wrong base address */ return 0; } - if (!get_pte(env, *vaddr, direntry_size, &entry)) { + if (!get_pte(env, *vaddr, directory_mop, &entry, ptw_mmu_idx)) { return 0; } if ((entry & (1 << psn)) && hugepg) { *huge_page = true; *hgpg_directory_hit = true; - entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew); w = directory_index - 1; if (directory_index & 0x1) { /* Generate adjacent page from same PTE for odd TLB page */ @@ -658,7 +663,7 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, *pw_entrylo0 = entry & ~lsb; /* even page */ *pw_entrylo1 = entry | lsb; /* odd page */ } else if (dph) { - int oddpagebit = 1 << leaf_shift; + int oddpagebit = 1 << leaf_mop; uint64_t vaddr2 = *vaddr ^ oddpagebit; if (*vaddr & oddpagebit) { *pw_entrylo1 = entry; @@ -669,10 +674,10 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, ptw_mmu_idx) != TLBRET_MATCH) { return 0; } - if (!get_pte(env, vaddr2, leafentry_size, &entry)) { + if (!get_pte(env, vaddr2, leaf_mop, &entry, ptw_mmu_idx)) { return 0; } - entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew); if (*vaddr & oddpagebit) { *pw_entrylo0 = entry; } else { @@ -711,7 +716,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, /* Native pointer size */ /*For the 32-bit architectures, this bit is fixed to 0.*/ - int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; + MemOp native_op = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? MO_32 : MO_64; /* Indices from PWField */ int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F; @@ -728,11 +733,10 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, /* Other HTW configs */ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; - unsigned directory_shift, leaf_shift; + MemOp directory_mop, leaf_mop; /* Offsets into tables */ unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1; - uint32_t leafentry_size; /* Starting address - Page Table Base */ uint64_t vaddr = env->CP0_PWBase; @@ -759,23 +763,21 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, } /* HTW Shift values (depend on entry size) */ - directory_shift = (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; - leaf_shift = (ptew == 1) ? native_shift + 1 : native_shift; - - goffset = gindex << directory_shift; - uoffset = uindex << directory_shift; - moffset = mindex << directory_shift; - ptoffset0 = (ptindex >> 1) << (leaf_shift + 1); - ptoffset1 = ptoffset0 | (1 << (leaf_shift)); + directory_mop = (hugepg && (ptew == 1)) ? native_op + 1 : native_op; + leaf_mop = (ptew == 1) ? native_op + 1 : native_op; - leafentry_size = 1 << (leaf_shift + 3); + goffset = gindex << directory_mop; + uoffset = uindex << directory_mop; + moffset = mindex << directory_mop; + ptoffset0 = (ptindex >> 1) << (leaf_mop + 1); + ptoffset1 = ptoffset0 | (1 << (leaf_mop)); /* Global Directory */ if (gdw > 0) { vaddr |= goffset; switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift, ptw_mmu_idx)) + directory_mop, leaf_mop, ptw_mmu_idx)) { case 0: return false; @@ -792,7 +794,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, vaddr |= uoffset; switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift, ptw_mmu_idx)) + directory_mop, leaf_mop, ptw_mmu_idx)) { case 0: return false; @@ -809,7 +811,7 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, vaddr |= moffset; switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, &pw_entrylo0, &pw_entrylo1, - directory_shift, leaf_shift, ptw_mmu_idx)) + directory_mop, leaf_mop, ptw_mmu_idx)) { case 0: return false; @@ -827,10 +829,10 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, ptw_mmu_idx) != TLBRET_MATCH) { return false; } - if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) { return false; } - dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew); pw_entrylo0 = dir_entry; /* Leaf Level Page Table - Second half of PTE pair */ @@ -839,10 +841,10 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, ptw_mmu_idx) != TLBRET_MATCH) { return false; } - if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) { return false; } - dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew); pw_entrylo1 = dir_entry; refill: @@ -873,8 +875,8 @@ static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, break; } } - pw_pagemask = m >> TARGET_PAGE_BITS_MIN; - update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask); + pw_pagemask = m >> TARGET_PAGE_BITS; + pw_pagemask = compute_pagemask(pw_pagemask << CP0PM_MASK); pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF); { target_ulong tmp_entryhi = env->CP0_EntryHi; diff --git a/target/mips/tcg/system_helper.h.inc b/target/mips/tcg/system_helper.h.inc new file mode 100644 index 00000000000..eaac5e2c6f0 --- /dev/null +++ b/target/mips/tcg/system_helper.h.inc @@ -0,0 +1,185 @@ +/* + * QEMU MIPS TCG system helpers + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +/* CP0 helpers */ +DEF_HELPER_1(mfc0_mvpcontrol, tl, env) +DEF_HELPER_1(mfc0_mvpconf0, tl, env) +DEF_HELPER_1(mfc0_mvpconf1, tl, env) +DEF_HELPER_1(mftc0_vpecontrol, tl, env) +DEF_HELPER_1(mftc0_vpeconf0, tl, env) +DEF_HELPER_1(mfc0_random, tl, env) +DEF_HELPER_1(mfc0_tcstatus, tl, env) +DEF_HELPER_1(mftc0_tcstatus, tl, env) +DEF_HELPER_1(mfc0_tcbind, tl, env) +DEF_HELPER_1(mftc0_tcbind, tl, env) +DEF_HELPER_1(mfc0_tcrestart, tl, env) +DEF_HELPER_1(mftc0_tcrestart, tl, env) +DEF_HELPER_1(mfc0_tchalt, tl, env) +DEF_HELPER_1(mftc0_tchalt, tl, env) +DEF_HELPER_1(mfc0_tccontext, tl, env) +DEF_HELPER_1(mftc0_tccontext, tl, env) +DEF_HELPER_1(mfc0_tcschedule, tl, env) +DEF_HELPER_1(mftc0_tcschedule, tl, env) +DEF_HELPER_1(mfc0_tcschefback, tl, env) +DEF_HELPER_1(mftc0_tcschefback, tl, env) +DEF_HELPER_1(mfc0_count, tl, env) +DEF_HELPER_1(mftc0_entryhi, tl, env) +DEF_HELPER_1(mftc0_status, tl, env) +DEF_HELPER_1(mftc0_cause, tl, env) +DEF_HELPER_1(mftc0_epc, tl, env) +DEF_HELPER_1(mftc0_ebase, tl, env) +DEF_HELPER_2(mftc0_configx, tl, env, tl) +DEF_HELPER_1(mfc0_lladdr, tl, env) +DEF_HELPER_1(mfc0_maar, tl, env) +DEF_HELPER_1(mfhc0_maar, tl, env) +DEF_HELPER_2(mfc0_watchlo, tl, env, i32) +DEF_HELPER_2(mfc0_watchhi, tl, env, i32) +DEF_HELPER_2(mfhc0_watchhi, tl, env, i32) +DEF_HELPER_1(mfc0_debug, tl, env) +DEF_HELPER_1(mftc0_debug, tl, env) +#ifdef TARGET_MIPS64 +DEF_HELPER_1(dmfc0_tcrestart, tl, env) +DEF_HELPER_1(dmfc0_tchalt, tl, env) +DEF_HELPER_1(dmfc0_tccontext, tl, env) +DEF_HELPER_1(dmfc0_tcschedule, tl, env) +DEF_HELPER_1(dmfc0_tcschefback, tl, env) +DEF_HELPER_1(dmfc0_lladdr, tl, env) +DEF_HELPER_1(dmfc0_maar, tl, env) +DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) +DEF_HELPER_2(dmfc0_watchhi, tl, env, i32) +#endif /* TARGET_MIPS64 */ + +DEF_HELPER_2(mtc0_index, void, env, tl) +DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) +DEF_HELPER_2(mtc0_yqmask, void, env, tl) +DEF_HELPER_2(mtc0_vpeopt, void, env, tl) +DEF_HELPER_2(mtc0_entrylo0, void, env, tl) +DEF_HELPER_2(mtc0_tcstatus, void, env, tl) +DEF_HELPER_2(mttc0_tcstatus, void, env, tl) +DEF_HELPER_2(mtc0_tcbind, void, env, tl) +DEF_HELPER_2(mttc0_tcbind, void, env, tl) +DEF_HELPER_2(mtc0_tcrestart, void, env, tl) +DEF_HELPER_2(mttc0_tcrestart, void, env, tl) +DEF_HELPER_2(mtc0_tchalt, void, env, tl) +DEF_HELPER_2(mttc0_tchalt, void, env, tl) +DEF_HELPER_2(mtc0_tccontext, void, env, tl) +DEF_HELPER_2(mttc0_tccontext, void, env, tl) +DEF_HELPER_2(mtc0_tcschedule, void, env, tl) +DEF_HELPER_2(mttc0_tcschedule, void, env, tl) +DEF_HELPER_2(mtc0_tcschefback, void, env, tl) +DEF_HELPER_2(mttc0_tcschefback, void, env, tl) +DEF_HELPER_2(mtc0_entrylo1, void, env, tl) +DEF_HELPER_2(mtc0_context, void, env, tl) +DEF_HELPER_2(mtc0_memorymapid, void, env, tl) +DEF_HELPER_2(mtc0_pagemask, void, env, tl) +DEF_HELPER_2(mtc0_pagegrain, void, env, tl) +DEF_HELPER_2(mtc0_segctl0, void, env, tl) +DEF_HELPER_2(mtc0_segctl1, void, env, tl) +DEF_HELPER_2(mtc0_segctl2, void, env, tl) +DEF_HELPER_2(mtc0_pwfield, void, env, tl) +DEF_HELPER_2(mtc0_pwsize, void, env, tl) +DEF_HELPER_2(mtc0_wired, void, env, tl) +DEF_HELPER_2(mtc0_srsconf0, void, env, tl) +DEF_HELPER_2(mtc0_srsconf1, void, env, tl) +DEF_HELPER_2(mtc0_srsconf2, void, env, tl) +DEF_HELPER_2(mtc0_srsconf3, void, env, tl) +DEF_HELPER_2(mtc0_srsconf4, void, env, tl) +DEF_HELPER_2(mtc0_hwrena, void, env, tl) +DEF_HELPER_2(mtc0_pwctl, void, env, tl) +DEF_HELPER_2(mtc0_count, void, env, tl) +DEF_HELPER_2(mtc0_entryhi, void, env, tl) +DEF_HELPER_2(mttc0_entryhi, void, env, tl) +DEF_HELPER_2(mtc0_compare, void, env, tl) +DEF_HELPER_2(mtc0_status, void, env, tl) +DEF_HELPER_2(mttc0_status, void, env, tl) +DEF_HELPER_2(mtc0_intctl, void, env, tl) +DEF_HELPER_2(mtc0_srsctl, void, env, tl) +DEF_HELPER_2(mtc0_cause, void, env, tl) +DEF_HELPER_2(mttc0_cause, void, env, tl) +DEF_HELPER_2(mtc0_ebase, void, env, tl) +DEF_HELPER_2(mttc0_ebase, void, env, tl) +DEF_HELPER_2(mtc0_config0, void, env, tl) +DEF_HELPER_2(mtc0_config2, void, env, tl) +DEF_HELPER_2(mtc0_config3, void, env, tl) +DEF_HELPER_2(mtc0_config4, void, env, tl) +DEF_HELPER_2(mtc0_config5, void, env, tl) +DEF_HELPER_2(mtc0_lladdr, void, env, tl) +DEF_HELPER_2(mtc0_maar, void, env, tl) +DEF_HELPER_2(mthc0_maar, void, env, tl) +DEF_HELPER_2(mtc0_maari, void, env, tl) +DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) +DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) +DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32) +DEF_HELPER_2(mtc0_xcontext, void, env, tl) +DEF_HELPER_2(mtc0_framemask, void, env, tl) +DEF_HELPER_2(mtc0_debug, void, env, tl) +DEF_HELPER_2(mttc0_debug, void, env, tl) +DEF_HELPER_2(mtc0_performance0, void, env, tl) +DEF_HELPER_2(mtc0_errctl, void, env, tl) +DEF_HELPER_2(mtc0_taglo, void, env, tl) +DEF_HELPER_2(mtc0_datalo, void, env, tl) +DEF_HELPER_2(mtc0_taghi, void, env, tl) +DEF_HELPER_2(mtc0_datahi, void, env, tl) + +#if defined(TARGET_MIPS64) +DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) +DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) +#endif + +/* MIPS MT functions */ +DEF_HELPER_2(mftgpr, tl, env, i32) +DEF_HELPER_2(mftlo, tl, env, i32) +DEF_HELPER_2(mfthi, tl, env, i32) +DEF_HELPER_2(mftacx, tl, env, i32) +DEF_HELPER_1(mftdsp, tl, env) +DEF_HELPER_3(mttgpr, void, env, tl, i32) +DEF_HELPER_3(mttlo, void, env, tl, i32) +DEF_HELPER_3(mtthi, void, env, tl, i32) +DEF_HELPER_3(mttacx, void, env, tl, i32) +DEF_HELPER_2(mttdsp, void, env, tl) +DEF_HELPER_0(dmt, tl) +DEF_HELPER_0(emt, tl) +DEF_HELPER_1(dvpe, tl, env) +DEF_HELPER_1(evpe, tl, env) + +/* R6 Multi-threading */ +DEF_HELPER_1(dvp, tl, env) +DEF_HELPER_1(evp, tl, env) + +/* TLB */ +DEF_HELPER_1(tlbwi, void, env) +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbp, void, env) +DEF_HELPER_1(tlbr, void, env) +DEF_HELPER_1(tlbinv, void, env) +DEF_HELPER_1(tlbinvf, void, env) +DEF_HELPER_3(ginvt, void, env, tl, i32) + +/* Special */ +DEF_HELPER_1(di, tl, env) +DEF_HELPER_1(ei, tl, env) +DEF_HELPER_1(eret, void, env) +DEF_HELPER_1(eretnc, void, env) +DEF_HELPER_1(deret, void, env) +DEF_HELPER_3(cache, void, env, tl, i32) + +#ifdef TARGET_MIPS64 +/* Longson CSR */ +DEF_HELPER_2(lcsr_rdcsr, i64, env, tl) +DEF_HELPER_2(lcsr_drdcsr, i64, env, tl) +DEF_HELPER_3(lcsr_wrcsr, void, env, tl, tl) +DEF_HELPER_3(lcsr_dwrcsr, void, env, tl, tl) +#endif diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index aef032c48dc..950e6afc3f2 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -16,6 +16,8 @@ #include "cpu.h" void mips_tcg_init(void); +void mips_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, @@ -45,7 +47,7 @@ bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); void mmu_init(CPUMIPSState *env, const mips_def_t *def); -void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); +uint32_t compute_pagemask(uint32_t val); void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); uint32_t cpu_mips_get_random(CPUMIPSState *env); diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index 333469b268e..d91d6efe02c 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -27,6 +27,7 @@ #include "internal.h" #include "exec/helper-proto.h" #include "exec/translation-block.h" +#include "exec/target_page.h" #include "semihosting/semihost.h" #include "trace.h" #include "fpu_helper.h" @@ -37,7 +38,7 @@ /* - * Many sysemu-only helpers are not reachable for user-only. + * Many system-only helpers are not reachable for user-only. * Define stub generators here, so that we need not either sprinkle * ifdefs through the translator, nor provide the helper function. */ @@ -327,19 +328,6 @@ enum { OPC_MUL = 0x02 | OPC_SPECIAL2, OPC_MSUB = 0x04 | OPC_SPECIAL2, OPC_MSUBU = 0x05 | OPC_SPECIAL2, - /* Loongson 2F */ - OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, - OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, - OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, - OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, - OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, - OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, - OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, - OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, - OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, - OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, - OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, - OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, /* Misc */ OPC_CLZ = 0x20 | OPC_SPECIAL2, OPC_CLO = 0x21 | OPC_SPECIAL2, @@ -368,20 +356,6 @@ enum { OPC_RDHWR = 0x3B | OPC_SPECIAL3, OPC_GINV = 0x3D | OPC_SPECIAL3, - /* Loongson 2E */ - OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, - OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, - OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, - OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, - OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, - OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, - OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, - OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, - OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, - OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, - OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, - OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, - /* MIPS DSP Load */ OPC_LX_DSP = 0x0A | OPC_SPECIAL3, /* MIPS DSP Arithmetic */ @@ -389,16 +363,14 @@ enum { OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, - /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ - /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, /* MIPS DSP GPR-Based Shift Sub-class */ OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, /* MIPS DSP Multiply Sub-class insns */ - /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ - /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, /* DSP Bit/Manipulation Sub-class */ @@ -556,7 +528,6 @@ enum { OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, }; -#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E #define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ @@ -1456,8 +1427,7 @@ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1) #endif } -static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, - target_long ofs) +void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs) { tcg_gen_addi_tl(ret, base, ofs); @@ -1646,13 +1616,18 @@ static inline void check_ps(DisasContext *ctx) check_cp1_64bitmode(ctx); } +bool decode_64bit_enabled(DisasContext *ctx) +{ + return ctx->hflags & MIPS_HFLAG_64; +} + /* * This code generates a "reserved instruction" exception if cpu is not * 64-bit or 64-bit instructions are not enabled. */ void check_mips_64(DisasContext *ctx) { - if (unlikely((TARGET_LONG_BITS != 64) || !(ctx->hflags & MIPS_HFLAG_64))) { + if (unlikely((TARGET_LONG_BITS != 64) || !decode_64bit_enabled(ctx))) { gen_reserved_instruction(ctx); } } @@ -1957,16 +1932,16 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ tcg_gen_st_tl(ret, tcg_env, offsetof(CPUMIPSState, llval)); \ } #else -#define OP_LD_ATOMIC(insn, fname) \ +#define OP_LD_ATOMIC(insn, ignored_memop) \ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ DisasContext *ctx) \ { \ gen_helper_##insn(ret, tcg_env, arg1, tcg_constant_i32(mem_idx)); \ } #endif -OP_LD_ATOMIC(ll, MO_TESL); +OP_LD_ATOMIC(ll, mo_endian(ctx) | MO_SL); #if defined(TARGET_MIPS64) -OP_LD_ATOMIC(lld, MO_TEUQ); +OP_LD_ATOMIC(lld, mo_endian(ctx) | MO_UQ); #endif #undef OP_LD_ATOMIC @@ -1982,6 +1957,17 @@ void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, int offset) } } +void gen_base_index_addr(DisasContext *ctx, TCGv addr, int base, int index) +{ + if (base == 0) { + gen_load_gpr(addr, index); + } else if (index == 0) { + gen_load_gpr(addr, base); + } else { + gen_op_addr_add(ctx, addr, cpu_gpr[base], cpu_gpr[index]); + } +} + static target_ulong pc_relative_pc(DisasContext *ctx) { target_ulong pc = ctx->base.pc_next; @@ -2010,7 +1996,7 @@ static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr, */ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); tcg_gen_andi_tl(t1, addr, sizem1); - if (!cpu_is_bigendian(ctx)) { + if (!disas_is_bigendian(ctx)) { tcg_gen_xori_tl(t1, t1, sizem1); } tcg_gen_shli_tl(t1, t1, 3); @@ -2037,7 +2023,7 @@ static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr, */ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); tcg_gen_andi_tl(t1, addr, sizem1); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { tcg_gen_xori_tl(t1, t1, sizem1); } tcg_gen_shli_tl(t1, t1, 3); @@ -2050,6 +2036,15 @@ static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr, tcg_gen_or_tl(reg, t0, t1); } +void gen_lx(DisasContext *ctx, int rd, int base, int index, MemOp mop) +{ + TCGv t0 = tcg_temp_new(); + + gen_base_index_addr(ctx, t0, base, index); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | mop); + gen_store_gpr(t0, rd); +} + /* Load */ static void gen_ld(DisasContext *ctx, uint32_t opc, int rt, int base, int offset) @@ -2073,12 +2068,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, switch (opc) { #if defined(TARGET_MIPS64) case OPC_LWU: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; case OPC_LD: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2090,33 +2085,33 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LDL: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t1, rt); break; case OPC_LDR: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t1, rt); break; case OPC_LDPC: t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t0, rt); break; #endif case OPC_LWPC: t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL); gen_store_gpr(t0, rt); break; case OPC_LWE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LW: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2124,7 +2119,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LH: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2132,7 +2127,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LHU: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2156,7 +2151,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LWL: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL); + gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_ext32s_tl(t1, t1); gen_store_gpr(t1, rt); break; @@ -2166,7 +2161,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LWR: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL); + gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_ext32s_tl(t1, t1); gen_store_gpr(t1, rt); break; @@ -2194,7 +2189,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, switch (opc) { #if defined(TARGET_MIPS64) case OPC_SD: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; case OPC_SDL: @@ -2208,14 +2203,14 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SW: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; case OPC_SHE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SH: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case OPC_SBE: @@ -2253,8 +2248,7 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, /* compare the address against that of the preceding LL */ gen_base_offset_addr(ctx, addr, base, offset); tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1); - tcg_gen_movi_tl(t0, 0); - gen_store_gpr(t0, rt); + gen_store_gpr(tcg_constant_tl(0), rt); tcg_gen_br(done); gen_set_label(l1); @@ -2281,7 +2275,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, case OPC_LWC1: { TCGv_i32 fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, ft); } @@ -2290,14 +2284,14 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, ft); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); } break; case OPC_LDC1: { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, fp0, ft); } @@ -2306,7 +2300,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, ft); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); } break; @@ -2987,14 +2981,14 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, case R6_OPC_LWPC: offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_SL); break; #if defined(TARGET_MIPS64) case OPC_LWUPC: check_mips_64(ctx); offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UL); break; #endif default: @@ -3021,7 +3015,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, check_mips_64(ctx); offset = sextract32(ctx->opcode << 3, 0, 21); addr = addr_add(ctx, (pc & ~0x7), offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UQ); break; #endif default: @@ -3060,8 +3054,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } @@ -3077,30 +3070,27 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } break; case R6_OPC_DIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } break; case R6_OPC_MODU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } @@ -3155,8 +3145,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); } break; @@ -3169,24 +3158,21 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); } break; case R6_OPC_DDIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); } break; case R6_OPC_DMODU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); } break; @@ -3239,8 +3225,7 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[1], t0, t1); tcg_gen_rem_tl(cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); @@ -3295,8 +3280,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[acc], t0, t1); tcg_gen_rem_tl(cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); @@ -3348,17 +3332,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[acc], t0, t1); tcg_gen_rem_tl(cpu_HI[acc], t0, t1); } break; case OPC_DDIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_i64(cpu_LO[acc], t0, t1); tcg_gen_remu_i64(cpu_HI[acc], t0, t1); } @@ -3600,184 +3582,6 @@ static void gen_cl(DisasContext *ctx, uint32_t opc, } } -/* Godson integer instructions */ -static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGv t0, t1; - - if (rd == 0) { - /* Treat as NOP. */ - return; - } - - t0 = tcg_temp_new(); - t1 = tcg_temp_new(); - gen_load_gpr(t0, rs); - gen_load_gpr(t1, rt); - - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - break; - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - break; - case OPC_DIV_G_2E: - case OPC_DIV_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_ext32s_tl(t0, t0); - tcg_gen_ext32s_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); - tcg_gen_mov_tl(cpu_gpr[rd], t0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_div_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l3); - } - break; - case OPC_DIVU_G_2E: - case OPC_DIVU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l2); - } - break; - case OPC_MOD_G_2E: - case OPC_MOD_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l3); - } - break; - case OPC_MODU_G_2E: - case OPC_MODU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l2); - } - break; -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - break; - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - break; - case OPC_DDIV_G_2E: - case OPC_DDIV_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); - tcg_gen_mov_tl(cpu_gpr[rd], t0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_div_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l3); - } - break; - case OPC_DDIVU_G_2E: - case OPC_DDIVU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l2); - } - break; - case OPC_DMOD_G_2E: - case OPC_DMOD_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l3); - } - break; - case OPC_DMODU_G_2E: - case OPC_DMODU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l2); - } - break; -#endif - } -} - /* Loongson multimedia instructions */ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) { @@ -4160,10 +3964,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, case OPC_GSLQ: t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rt); gen_store_gpr(t0, lsq_rt1); @@ -4172,10 +3976,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, check_cp1_enabled(ctx); t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t1, rt); gen_store_fpr64(ctx, t0, lsq_rt1); @@ -4184,11 +3988,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_gpr(t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; case OPC_GSSQC1: @@ -4196,11 +4000,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_fpr64(ctx, t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -4213,7 +4017,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); break; @@ -4224,7 +4028,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); break; @@ -4234,7 +4038,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, t1, rt); break; case OPC_GSLDRC1: @@ -4242,7 +4046,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, t1, rt); break; #endif @@ -4360,7 +4164,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_store_gpr(t0, rt); break; case OPC_GSLHX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4369,7 +4173,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4379,7 +4183,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4390,7 +4194,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, rt); break; @@ -4400,7 +4204,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t0, rt); break; @@ -4413,34 +4217,34 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, case OPC_GSSHX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case OPC_GSSWX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #if defined(TARGET_MIPS64) case OPC_GSSDX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif case OPC_GSSWXC1: fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, rt); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #if defined(TARGET_MIPS64) case OPC_GSSDXC1: t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -5329,17 +5133,17 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpcontrol(arg, tcg_env); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf0(arg, tcg_env); register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf1(arg, tcg_env); register_name = "MVPConf1"; break; @@ -5360,37 +5164,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask)); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); register_name = "VPEOpt"; break; @@ -5417,37 +5221,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcstatus(arg, tcg_env); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcbind(arg, tcg_env); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcrestart(arg, tcg_env); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tchalt(arg, tcg_env); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tccontext(arg, tcg_env); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcschedule(arg, tcg_env); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcschefback(arg, tcg_env); register_name = "TCScheFBack"; break; @@ -6086,17 +5890,17 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_mvpcontrol(tcg_env, arg); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf1"; break; @@ -6116,39 +5920,39 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpecontrol(tcg_env, arg); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf0(tcg_env, arg); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf1(tcg_env, arg); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_yqmask(tcg_env, arg); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeopt(tcg_env, arg); register_name = "VPEOpt"; break; @@ -6163,37 +5967,37 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcstatus(tcg_env, arg); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcbind(tcg_env, arg); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcrestart(tcg_env, arg); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tchalt(tcg_env, arg); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tccontext(tcg_env, arg); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschedule(tcg_env, arg); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschefback(tcg_env, arg); register_name = "TCScheFBack"; break; @@ -6836,17 +6640,17 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpcontrol(arg, tcg_env); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf0(arg, tcg_env); register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf1(arg, tcg_env); register_name = "MVPConf1"; break; @@ -6867,40 +6671,40 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_YQMask)); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); register_name = "VPEOpt"; break; @@ -6916,37 +6720,37 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcstatus(arg, tcg_env); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcbind(arg, tcg_env); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcrestart(arg, tcg_env); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tchalt(arg, tcg_env); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tccontext(arg, tcg_env); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcschedule(arg, tcg_env); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcschefback(arg, tcg_env); register_name = "TCScheFBack"; break; @@ -7553,17 +7357,17 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_mvpcontrol(tcg_env, arg); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf1"; break; @@ -7583,39 +7387,39 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpecontrol(tcg_env, arg); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf0(tcg_env, arg); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf1(tcg_env, arg); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_yqmask(tcg_env, arg); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeopt(tcg_env, arg); register_name = "VPEOpt"; break; @@ -7630,37 +7434,37 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcstatus(tcg_env, arg); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcbind(tcg_env, arg); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcrestart(tcg_env, arg); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tchalt(tcg_env, arg); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tccontext(tcg_env, arg); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschedule(tcg_env, arg); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschefback(tcg_env, arg); register_name = "TCScheFBack"; break; @@ -10762,13 +10566,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv t0 = tcg_temp_new(); - if (base == 0) { - gen_load_gpr(t0, index); - } else if (index == 0) { - gen_load_gpr(t0, base); - } else { - gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]); - } + gen_base_index_addr(ctx, t0, base, index); /* * Don't do NOP if destination is zero: we must perform the actual * memory access. @@ -10779,7 +10577,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i32 fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(ctx, fp0, fd); } @@ -10789,7 +10587,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, check_cp1_registers(ctx, fd); { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, fp0, fd); } break; @@ -10799,7 +10597,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, fp0, fd); } break; @@ -10808,7 +10606,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); } break; case OPC_SDXC1: @@ -10817,7 +10615,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); } break; case OPC_SUXC1: @@ -10826,7 +10624,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); } break; } @@ -10856,7 +10654,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, tcg_gen_br(l2); gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { gen_load_fpr32(ctx, fp, fs); gen_load_fpr32h(ctx, fph, ft); gen_store_fpr32h(ctx, fp, fd); @@ -11265,10 +11063,9 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_constant_tl(offset); gen_load_gpr(tbase, rt); - gen_op_addr_add(ctx, btarget, tbase, toffset); + gen_op_addr_addi(ctx, btarget, tbase, offset); } break; default: @@ -11428,20 +11225,18 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, void gen_addiupc(DisasContext *ctx, int rx, int imm, int is_64_bit, int extended) { - TCGv t0; + target_ulong npc; if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { gen_reserved_instruction(ctx); return; } - t0 = tcg_temp_new(); - - tcg_gen_movi_tl(t0, pc_relative_pc(ctx)); - tcg_gen_addi_tl(cpu_gpr[rx], t0, imm); + npc = pc_relative_pc(ctx) + imm; if (!is_64_bit) { - tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); + npc = (int32_t)npc; } + tcg_gen_movi_tl(cpu_gpr[rx], npc); } static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, @@ -11476,7 +11271,7 @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd) gen_op_addr_add(ctx, t0, t1, t0); } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL); gen_store_gpr(t1, rd); } @@ -11542,47 +11337,6 @@ enum { /* MIPSDSP functions. */ -/* Indexed load is not for DSP only */ -static void gen_mips_lx(DisasContext *ctx, uint32_t opc, - int rd, int base, int offset) -{ - TCGv t0; - - if (!(ctx->insn_flags & INSN_OCTEON)) { - check_dsp(ctx); - } - t0 = tcg_temp_new(); - - if (base == 0) { - gen_load_gpr(t0, offset); - } else if (offset == 0) { - gen_load_gpr(t0, base); - } else { - gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[offset]); - } - - switch (opc) { - case OPC_LBUX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB); - gen_store_gpr(t0, rd); - break; - case OPC_LHX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW); - gen_store_gpr(t0, rd); - break; - case OPC_LWX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); - gen_store_gpr(t0, rd); - break; -#if defined(TARGET_MIPS64) - case OPC_LDX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); - gen_store_gpr(t0, rd); - break; -#endif - } -} - static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2) { @@ -11601,8 +11355,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_load_gpr(v2_t, v2); switch (op1) { - /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ - case OPC_MULT_G_2E: + case OPC_ADDUH_QB_DSP: check_dsp_r2(ctx); switch (op2) { case OPC_ADDUH_QB: @@ -12285,11 +12038,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_load_gpr(v2_t, v2); switch (op1) { - /* - * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have - * the same mask and op1. - */ - case OPC_MULT_G_2E: + case OPC_MUL_PH_DSP: check_dsp_r2(ctx); switch (op2) { case OPC_MUL_PH: @@ -13641,15 +13390,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_MUL: gen_arith(ctx, op1, rd, rs, rt); break; - case OPC_DIV_G_2F: - case OPC_DIVU_G_2F: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2F: - case OPC_MOD_G_2F: - case OPC_MODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; case OPC_CLO: case OPC_CLZ: check_insn(ctx, ISA_MIPS_R1); @@ -13674,15 +13414,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) check_mips_64(ctx); gen_cl(ctx, op1, rd, rs); break; - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2F: - case OPC_DDIV_G_2F: - case OPC_DDIVU_G_2F: - case OPC_DMOD_G_2F: - case OPC_DMODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; #endif default: /* Invalid */ MIPS_INVAL("special2_legacy"); @@ -13691,6 +13422,29 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) } } +void gen_crc32(DisasContext *ctx, int rd, int rs, int rt, int sz, + int crc32c) +{ + TCGv t0; + TCGv t1; + TCGv_i32 tsz = tcg_constant_i32(1 << sz); + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, rt); + gen_load_gpr(t1, rs); + + if (crc32c) { + gen_helper_crc32c(cpu_gpr[rd], t0, t1, tsz); + } else { + gen_helper_crc32(cpu_gpr[rd], t0, t1, tsz); + } +} + static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) { int rs, rt, rd, sa; @@ -13719,7 +13473,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) } break; case R6_OPC_SC: - gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false); break; case R6_OPC_LL: gen_ld(ctx, op1, rt, rs, imm); @@ -13765,7 +13519,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) #endif #if defined(TARGET_MIPS64) case R6_OPC_SCD: - gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false); break; case R6_OPC_LLD: gen_ld(ctx, op1, rt, rs, imm); @@ -13815,17 +13569,12 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) op1 = MASK_SPECIAL3(ctx->opcode); switch (op1) { - case OPC_DIV_G_2E: - case OPC_DIVU_G_2E: - case OPC_MOD_G_2E: - case OPC_MODU_G_2E: - case OPC_MULT_G_2E: - case OPC_MULTU_G_2E: + case OPC_MUL_PH_DSP: /* - * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ - if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) { + if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MUL_PH_DSP)) { op2 = MASK_ADDUH_QB(ctx->opcode); switch (op2) { case OPC_ADDUH_QB: @@ -13853,22 +13602,27 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) gen_reserved_instruction(ctx); break; } - } else if (ctx->insn_flags & INSN_LOONGSON2E) { - gen_loongson_integer(ctx, op1, rd, rs, rt); } else { gen_reserved_instruction(ctx); } break; case OPC_LX_DSP: + check_dsp(ctx); op2 = MASK_LX(ctx->opcode); switch (op2) { #if defined(TARGET_MIPS64) case OPC_LDX: + gen_lx(ctx, rd, rs, rt, MO_UQ); + break; #endif case OPC_LBUX: + gen_lx(ctx, rd, rs, rt, MO_UB); + break; case OPC_LHX: + gen_lx(ctx, rd, rs, rt, MO_SW); + break; case OPC_LWX: - gen_mips_lx(ctx, op2, rd, rs, rt); + gen_lx(ctx, rd, rs, rt, MO_SL); break; default: /* Invalid */ MIPS_INVAL("MASK LX"); @@ -14083,15 +13837,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) } break; #if defined(TARGET_MIPS64) - case OPC_DDIV_G_2E: - case OPC_DDIVU_G_2E: - case OPC_DMULT_G_2E: - case OPC_DMULTU_G_2E: - case OPC_DMOD_G_2E: - case OPC_DMODU_G_2E: - check_insn(ctx, INSN_LOONGSON2E); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; case OPC_ABSQ_S_QH_DSP: op2 = MASK_ABSQ_S_QH(ctx->opcode); switch (op2) { @@ -14448,7 +14193,7 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) return; case OPC_SCE: check_cp0_enabled(ctx); - gen_st_cond(ctx, rt, rs, imm, MO_TESL, true); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, true); return; case OPC_CACHEE: check_eva(ctx); @@ -14912,7 +14657,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } - gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false); break; case OPC_CACHE: check_cp0_enabled(ctx); @@ -14969,7 +14714,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) } else { /* OPC_BC1ANY2 */ check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + return false; + } gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); } @@ -14984,7 +14731,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) check_cp1_enabled(ctx); check_insn_opc_removed(ctx, ISA_MIPS_R6); check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + return false; + } /* fall through */ case OPC_BC1: check_cp1_enabled(ctx); @@ -15191,7 +14940,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) check_insn_opc_user_only(ctx, INSN_R5900); } check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false); break; case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ if (ctx->insn_flags & ISA_MIPS_R6) { @@ -15284,6 +15033,9 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) if (cpu_supports_isa(env, INSN_VR54XX) && decode_ext_vr54xx(ctx, ctx->opcode)) { return; } + if (TARGET_LONG_BITS == 64 && decode_ext_loongson(ctx, ctx->opcode)) { + return; + } #if defined(TARGET_MIPS64) if (ase_lcsr_available(env) && decode_ase_lcsr(ctx, ctx->opcode)) { return; @@ -15346,6 +15098,7 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1; ctx->mi = (env->CP0_Config5 >> CP0C5_MI) & 1; ctx->gi = (env->CP0_Config5 >> CP0C5_GI) & 3; + ctx->crcp = (env->CP0_Config5 >> CP0C5_CRCP) & 1; restore_cpu_state(env, ctx); #ifdef CONFIG_USER_ONLY ctx->mem_idx = MIPS_HFLAG_UM; @@ -15362,7 +15115,8 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) * hardware does (e.g. if a delay slot instruction faults, the * reported PC is the PC of the branch). */ - if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) { + if ((tb_cflags(ctx->base.tb) & CF_SINGLE_STEP) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { ctx->base.max_insns = 2; } @@ -15445,7 +15199,7 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) * together with its delay slot. */ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE - && !ctx->base.singlestep_enabled) { + && !(tb_cflags(ctx->base.tb) & CF_SINGLE_STEP)) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -15482,8 +15236,8 @@ static const TranslatorOps mips_tr_ops = { .tb_stop = mips_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void mips_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h index 2b6646b339b..89dde1e7124 100644 --- a/target/mips/tcg/translate.h +++ b/target/mips/tcg/translate.h @@ -51,6 +51,7 @@ typedef struct DisasContext { bool abs2008; bool mi; int gi; + bool crcp; } DisasContext; #define DISAS_STOP DISAS_TARGET_0 @@ -153,6 +154,7 @@ void check_cp1_registers(DisasContext *ctx, int regs); void check_cop1x(DisasContext *ctx); void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, int offset); +void gen_base_index_addr(DisasContext *ctx, TCGv addr, int base, int index); void gen_move_low32(TCGv ret, TCGv_i64 arg); void gen_move_high32(TCGv ret, TCGv_i64 arg); void gen_load_gpr(TCGv t, int reg); @@ -167,6 +169,7 @@ void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg); void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg); int get_fp_bit(int cc); +void gen_lx(DisasContext *ctx, int rd, int base, int index, MemOp mop); void gen_ldxs(DisasContext *ctx, int base, int index, int rd); void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp); void gen_addiupc(DisasContext *ctx, int rx, int imm, @@ -176,10 +179,12 @@ void gen_addiupc(DisasContext *ctx, int rx, int imm, * Address Computation and Large Constant Instructions */ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1); +void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs); bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa); bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa); void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel); +void gen_crc32(DisasContext *ctx, int rd, int rs, int rt, int sz, int crc32c); extern TCGv cpu_gpr[32], cpu_PC; #if defined(TARGET_MIPS64) @@ -216,10 +221,13 @@ void msa_translate_init(void); void mxu_translate_init(void); bool decode_ase_mxu(DisasContext *ctx, uint32_t insn); +bool decode_64bit_enabled(DisasContext *ctx); + /* decodetree generated */ bool decode_isa_rel6(DisasContext *ctx, uint32_t insn); bool decode_ase_msa(DisasContext *ctx, uint32_t insn); bool decode_ext_txx9(DisasContext *ctx, uint32_t insn); +bool decode_ext_loongson(DisasContext *ctx, uint32_t insn); #if defined(TARGET_MIPS64) bool decode_ase_lcsr(DisasContext *ctx, uint32_t insn); bool decode_ext_tx79(DisasContext *ctx, uint32_t insn); @@ -227,6 +235,11 @@ bool decode_ext_octeon(DisasContext *ctx, uint32_t insn); #endif bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn); +static inline bool disas_mt_available(DisasContext *ctx) +{ + return ctx->CP0_Config3 & (1 << CP0C3_MT); +} + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. @@ -235,9 +248,19 @@ bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn); static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ { return FUNC(ctx, a, __VA_ARGS__); } -static inline bool cpu_is_bigendian(DisasContext *ctx) +static inline bool disas_is_bigendian(DisasContext *ctx) { return extract32(ctx->CP0_Config0, CP0C0_BE, 1); } +static inline MemOp mo_endian(DisasContext *dc) +{ + return disas_is_bigendian(dc) ? MO_BE : MO_LE; +} + +static inline MemOp mo_endian_rev(DisasContext *dc, bool reversed) +{ + return disas_is_bigendian(dc) ^ reversed ? MO_BE : MO_LE; +} + #endif diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c index 6f4b39f715b..1d140e918da 100644 --- a/target/mips/tcg/translate_addr_const.c +++ b/target/mips/tcg/translate_addr_const.c @@ -26,7 +26,7 @@ bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa) t1 = tcg_temp_new(); gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); - tcg_gen_shli_tl(t0, t0, sa + 1); + tcg_gen_shli_tl(t0, t0, sa); tcg_gen_add_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); return true; @@ -47,7 +47,7 @@ bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa) t1 = tcg_temp_new(); gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); - tcg_gen_shli_tl(t0, t0, sa + 1); + tcg_gen_shli_tl(t0, t0, sa); tcg_gen_add_tl(cpu_gpr[rd], t0, t1); return true; } diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c index dd6fb8a7bd7..ae3f5e19c43 100644 --- a/target/mips/tcg/tx79_translate.c +++ b/target/mips/tcg/tx79_translate.c @@ -340,12 +340,12 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a) tcg_gen_andi_tl(addr, addr, ~0xf); /* Lower half */ - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t0, a->rt); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr_hi(t0, a->rt); return true; } @@ -364,12 +364,12 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a) /* Lower half */ gen_load_gpr(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); gen_load_gpr_hi(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); return true; } diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h index fbfc0f568b1..b4f57bbe692 100644 --- a/target/openrisc/cpu-param.h +++ b/target/openrisc/cpu-param.h @@ -2,17 +2,16 @@ * OpenRISC cpu parameters for qemu. * * Copyright (c) 2011-2012 Jia Liu - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef OPENRISC_CPU_PARAM_H #define OPENRISC_CPU_PARAM_H -#define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 13 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define TCG_GUEST_DEFAULT_MO (0) +#define TARGET_INSN_START_EXTRA_WORDS 1 #endif diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 6ec54ad7a6c..dfbb2df643a 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -21,8 +21,9 @@ #include "qapi/error.h" #include "qemu/qemu-print.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "fpu/softfloat-helpers.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" static void openrisc_cpu_set_pc(CPUState *cs, vaddr value) @@ -40,6 +41,18 @@ static vaddr openrisc_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState openrisc_get_tb_cpu_state(CPUState *cs) +{ + CPUOpenRISCState *env = cpu_env(cs); + + return (TCGTBCPUState){ + .pc = env->pc, + .flags = ((env->dflag ? TB_FLAGS_DFLAG : 0) + | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0) + | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE))), + }; +} + static void openrisc_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -62,11 +75,13 @@ static void openrisc_restore_state_to_opc(CPUState *cs, } } +#ifndef CONFIG_USER_ONLY static bool openrisc_cpu_has_work(CPUState *cs) { return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER); } +#endif /* !CONFIG_USER_ONLY */ static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -82,6 +97,7 @@ static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch) static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info) { + info->endian = BFD_ENDIAN_BIG; info->print_insn = print_insn_or1k; } @@ -105,6 +121,14 @@ static void openrisc_cpu_reset_hold(Object *obj, ResetType type) set_float_detect_tininess(float_tininess_before_rounding, &cpu->env.fp_status); + /* + * TODO: this is probably not the correct NaN propagation rule for + * this architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &cpu->env.fp_status); + + /* Default NaN: sign bit clear, frac msb set */ + set_float_default_nan_pattern(0b01000000, &cpu->env.fp_status); #ifndef CONFIG_USER_ONLY cpu->env.picmr = 0x00000000; @@ -156,6 +180,10 @@ static void openrisc_cpu_realizefn(DeviceState *dev, Error **errp) qemu_init_vcpu(cs); cpu_reset(cs); +#ifndef CONFIG_USER_ONLY + cpu_openrisc_clock_init(OPENRISC_CPU(dev)); +#endif + occ->parent_realize(dev, errp); } @@ -219,26 +247,33 @@ static void openrisc_any_initfn(Object *obj) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps openrisc_sysemu_ops = { + .has_work = openrisc_cpu_has_work, .get_phys_page_debug = openrisc_cpu_get_phys_page_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps openrisc_tcg_ops = { + .guest_default_memory_order = 0, + .mttcg_supported = true, + .initialize = openrisc_translate_init, + .translate_code = openrisc_translate_code, + .get_tb_cpu_state = openrisc_get_tb_cpu_state, .synchronize_from_tb = openrisc_cpu_synchronize_from_tb, .restore_state_to_opc = openrisc_restore_state_to_opc, + .mmu_index = openrisc_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = openrisc_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = openrisc_cpu_exec_interrupt, .cpu_exec_halt = openrisc_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = openrisc_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ }; -static void openrisc_cpu_class_init(ObjectClass *oc, void *data) +static void openrisc_cpu_class_init(ObjectClass *oc, const void *data) { OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(occ); @@ -251,8 +286,6 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) &occ->parent_phases); cc->class_by_name = openrisc_cpu_class_by_name; - cc->has_work = openrisc_cpu_has_work; - cc->mmu_index = openrisc_cpu_mmu_index; cc->dump_state = openrisc_cpu_dump_state; cc->set_pc = openrisc_cpu_set_pc; cc->get_pc = openrisc_cpu_get_pc; diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index c9fe9ae12da..f4bcf00b073 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -21,7 +21,9 @@ #define OPENRISC_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "fpu/softfloat-types.h" /** @@ -38,8 +40,6 @@ struct OpenRISCCPUClass { ResettablePhases parent_phases; }; -#define TARGET_INSN_START_EXTRA_WORDS 1 - enum { MMU_NOMMU_IDX = 0, MMU_SUPERVISOR_IDX = 1, @@ -301,6 +301,8 @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); +void openrisc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); int print_insn_or1k(bfd_vma addr, disassemble_info *info); #ifndef CONFIG_USER_ONLY @@ -330,8 +332,6 @@ void cpu_openrisc_count_stop(OpenRISCCPU *cpu); #define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU -#include "exec/cpu-all.h" - #define TB_FLAGS_SM SR_SM #define TB_FLAGS_DME SR_DME #define TB_FLAGS_IME SR_IME @@ -349,16 +349,6 @@ static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val) env->shadow_gpr[0][i] = val; } -static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = (env->dflag ? TB_FLAGS_DFLAG : 0) - | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0) - | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE)); -} - static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env) { return (env->sr diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c index 8699c3dcea4..e213be36b6b 100644 --- a/target/openrisc/exception.c +++ b/target/openrisc/exception.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exception.h" G_NORETURN void raise_exception(OpenRISCCPU *cpu, uint32_t excp) diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c index 1f5be4bed90..c2c9d136528 100644 --- a/target/openrisc/exception_helper.c +++ b/target/openrisc/exception_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "exception.h" diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c index 8b81d2f62f7..dba997255c6 100644 --- a/target/openrisc/fpu_helper.c +++ b/target/openrisc/fpu_helper.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c index c2a77d5d4d5..45bba80d878 100644 --- a/target/openrisc/gdbstub.c +++ b/target/openrisc/gdbstub.c @@ -47,14 +47,9 @@ int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { - CPUClass *cc = CPU_GET_CLASS(cs); CPUOpenRISCState *env = cpu_env(cs); uint32_t tmp; - if (n > cc->gdb_num_core_regs) { - return 0; - } - tmp = ldl_p(mem_buf); if (n < 32) { diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c index b3b5b405779..486823094c8 100644 --- a/target/openrisc/interrupt.c +++ b/target/openrisc/interrupt.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" #include "gdbstub/helpers.h" #include "qemu/host-utils.h" #ifndef CONFIG_USER_ONLY diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c index ab4ea88b692..1553ebc71b0 100644 --- a/target/openrisc/interrupt_helper.c +++ b/target/openrisc/interrupt_helper.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" void HELPER(rfe)(CPUOpenRISCState *env) diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c index 3574e571cb2..081c706d02c 100644 --- a/target/openrisc/machine.c +++ b/target/openrisc/machine.c @@ -136,7 +136,7 @@ const VMStateDescription vmstate_openrisc_cpu = { .minimum_version_id = 1, .post_load = cpu_post_load, .fields = (const VMStateField[]) { - VMSTATE_CPU(), + VMSTATE_STRUCT(parent_obj, OpenRISCCPU, 0, vmstate_cpu_common, CPUState), VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState), VMSTATE_END_OF_LIST() } diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c index c632d5230b2..acea50c41eb 100644 --- a/target/openrisc/mmu.c +++ b/target/openrisc/mmu.c @@ -21,8 +21,9 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "gdbstub/helpers.h" #include "qemu/host-utils.h" #include "hw/loader.h" diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 77567afba47..d96b41a01c2 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -20,7 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" #include "exec/helper-proto.h" #include "exception.h" #ifndef CONFIG_USER_ONLY @@ -217,7 +218,7 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, { OpenRISCCPU *cpu = env_archcpu(env); #ifndef CONFIG_USER_ONLY - uint64_t data[TARGET_INSN_START_WORDS]; + uint64_t data[INSN_START_WORDS]; MachineState *ms = MACHINE(qdev_get_machine()); CPUState *cs = env_cpu(env); int idx; diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index ca566847cb4..5ab3bc7021d 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -20,13 +20,14 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-mmu-index.h" #include "tcg/tcg-op.h" #include "qemu/log.h" #include "qemu/bitops.h" #include "qemu/qemu-print.h" #include "exec/translator.h" - +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" @@ -219,8 +220,7 @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) TCGv t0 = tcg_temp_new(); TCGv res = tcg_temp_new(); - tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero); - tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero); + tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy); tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); tcg_gen_xor_tl(t0, res, srcb); tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); @@ -1645,8 +1645,8 @@ static const TranslatorOps openrisc_tr_ops = { .tb_stop = openrisc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void openrisc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c index f45474133a7..80ac6c3e320 100644 --- a/target/ppc/arch_dump.c +++ b/target/ppc/arch_dump.c @@ -15,8 +15,8 @@ #include "qemu/osdep.h" #include "cpu.h" #include "elf.h" -#include "sysemu/dump.h" -#include "sysemu/kvm.h" +#include "system/dump.h" +#include "system/kvm.h" #ifdef TARGET_PPC64 #define ELFCLASS ELFCLASS64 diff --git a/target/ppc/compat.c b/target/ppc/compat.c index ebef2cccecf..55de3bd5d5d 100644 --- a/target/ppc/compat.c +++ b/target/ppc/compat.c @@ -18,10 +18,10 @@ */ #include "qemu/osdep.h" -#include "sysemu/hw_accel.h" -#include "sysemu/kvm.h" +#include "system/hw_accel.h" +#include "system/kvm.h" #include "kvm_ppc.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/visitor.h" @@ -100,6 +100,13 @@ static const CompatInfo compat_table[] = { .pcr_level = PCR_COMPAT_3_10, .max_vthreads = 8, }, + { /* POWER11, ISA3.10 */ + .name = "power11", + .pvr = CPU_POWERPC_LOGICAL_3_10_P11, + .pcr = PCR_COMPAT_3_10, + .pcr_level = PCR_COMPAT_3_10, + .max_vthreads = 8, + }, }; static const CompatInfo *compat_by_pvr(uint32_t pvr) @@ -132,6 +139,10 @@ static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr, /* Outside specified range */ return false; } + if (compat->pvr > pcc->spapr_logical_pvr) { + /* Older CPU cannot support a newer processor's compat mode */ + return false; + } if (!(pcc->pcr_supported & compat->pcr_level)) { /* Not supported by this CPU */ return false; diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index f2301b43f78..ea86ea202ab 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -35,7 +35,7 @@ #define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ static void \ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \ - (ObjectClass *oc, void *data) \ + (ObjectClass *oc, const void *data) \ { \ DeviceClass *dc = DEVICE_CLASS(oc); \ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \ @@ -734,6 +734,8 @@ "POWER9 v2.2") POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10, "POWER10 v2.0") + POWERPC_DEF("power11_v2.0", CPU_POWERPC_POWER11_DD20, POWER11, + "POWER11_v2.0") #endif /* defined (TARGET_PPC64) */ /***************************************************************************/ @@ -909,6 +911,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "power8nvl", "power8nvl_v1.0" }, { "power9", "power9_v2.2" }, { "power10", "power10_v2.0" }, + { "power11", "power11_v2.0" }, #endif /* Generic PowerPCs */ diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 0229ef3a9a5..72ad31ba50d 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -354,6 +354,8 @@ enum { CPU_POWERPC_POWER10_BASE = 0x00800000, CPU_POWERPC_POWER10_DD1 = 0x00801100, CPU_POWERPC_POWER10_DD20 = 0x00801200, + CPU_POWERPC_POWER11_BASE = 0x00820000, + CPU_POWERPC_POWER11_DD20 = 0x00821200, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, @@ -391,6 +393,7 @@ enum { CPU_POWERPC_LOGICAL_2_07 = 0x0F000004, CPU_POWERPC_LOGICAL_3_00 = 0x0F000005, CPU_POWERPC_LOGICAL_3_10 = 0x0F000006, + CPU_POWERPC_LOGICAL_3_10_P11 = 0x0F000007, }; /* System version register (used on MPC 8xxx) */ diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h index 77c5ed9a678..e4ed9080ee9 100644 --- a/target/ppc/cpu-param.h +++ b/target/ppc/cpu-param.h @@ -2,14 +2,13 @@ * PowerPC cpu parameters for qemu. * * Copyright (c) 2007 Jocelyn Mayer - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef PPC_CPU_PARAM_H #define PPC_CPU_PARAM_H #ifdef TARGET_PPC64 -# define TARGET_LONG_BITS 64 /* * Note that the official physical address space bits is 62-M where M * is implementation dependent. I've not looked up M for the set of @@ -27,7 +26,6 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 64 # endif #else -# define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif @@ -35,11 +33,10 @@ #ifdef CONFIG_USER_ONLY /* Allow user-only to vary page size from 4k */ # define TARGET_PAGE_BITS_VARY -# define TARGET_PAGE_BITS_MIN 12 #else # define TARGET_PAGE_BITS 12 #endif -#define TCG_GUEST_DEFAULT_MO 0 +#define TARGET_INSN_START_EXTRA_WORDS 0 #endif diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c index e3ad8e0c27f..4d8faaddee2 100644 --- a/target/ppc/cpu.c +++ b/target/ppc/cpu.c @@ -22,10 +22,11 @@ #include "cpu-models.h" #include "cpu-qom.h" #include "exec/log.h" +#include "exec/watchpoint.h" #include "fpu/softfloat-helpers.h" #include "mmu-hash64.h" #include "helper_regs.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" target_ulong cpu_read_xer(const CPUPPCState *env) { @@ -130,11 +131,13 @@ void ppc_store_ciabr(CPUPPCState *env, target_ulong val) ppc_update_ciabr(env); } -void ppc_update_daw0(CPUPPCState *env) +void ppc_update_daw(CPUPPCState *env, int rid) { CPUState *cs = env_cpu(env); - target_ulong deaw = env->spr[SPR_DAWR0] & PPC_BITMASK(0, 60); - uint32_t dawrx = env->spr[SPR_DAWRX0]; + int spr_dawr = rid ? SPR_DAWR1 : SPR_DAWR0; + int spr_dawrx = rid ? SPR_DAWRX1 : SPR_DAWRX0; + target_ulong deaw = env->spr[spr_dawr] & PPC_BITMASK(0, 60); + uint32_t dawrx = env->spr[spr_dawrx]; int mrd = extract32(dawrx, PPC_BIT_NR(48), 54 - 48); bool dw = extract32(dawrx, PPC_BIT_NR(57), 1); bool dr = extract32(dawrx, PPC_BIT_NR(58), 1); @@ -144,9 +147,9 @@ void ppc_update_daw0(CPUPPCState *env) vaddr len; int flags; - if (env->dawr0_watchpoint) { - cpu_watchpoint_remove_by_ref(cs, env->dawr0_watchpoint); - env->dawr0_watchpoint = NULL; + if (env->dawr_watchpoint[rid]) { + cpu_watchpoint_remove_by_ref(cs, env->dawr_watchpoint[rid]); + env->dawr_watchpoint[rid] = NULL; } if (!dr && !dw) { @@ -166,28 +169,45 @@ void ppc_update_daw0(CPUPPCState *env) flags |= BP_MEM_WRITE; } - cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr0_watchpoint); + cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr_watchpoint[rid]); } void ppc_store_dawr0(CPUPPCState *env, target_ulong val) { env->spr[SPR_DAWR0] = val; - ppc_update_daw0(env); + ppc_update_daw(env, 0); } -void ppc_store_dawrx0(CPUPPCState *env, uint32_t val) +static void ppc_store_dawrx(CPUPPCState *env, uint32_t val, int rid) { int hrammc = extract32(val, PPC_BIT_NR(56), 1); if (hrammc) { /* This might be done with a second watchpoint at the xor of DEAW[0] */ - qemu_log_mask(LOG_UNIMP, "%s: DAWRX0[HRAMMC] is unimplemented\n", - __func__); + qemu_log_mask(LOG_UNIMP, "%s: DAWRX%d[HRAMMC] is unimplemented\n", + __func__, rid); } - env->spr[SPR_DAWRX0] = val; - ppc_update_daw0(env); + env->spr[rid ? SPR_DAWRX1 : SPR_DAWRX0] = val; + ppc_update_daw(env, rid); +} + +void ppc_store_dawrx0(CPUPPCState *env, uint32_t val) +{ + ppc_store_dawrx(env, val, 0); +} + +void ppc_store_dawr1(CPUPPCState *env, target_ulong val) +{ + env->spr[SPR_DAWR1] = val; + ppc_update_daw(env, 1); +} + +void ppc_store_dawrx1(CPUPPCState *env, uint32_t val) +{ + ppc_store_dawrx(env, val, 1); } + #endif #endif diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 321ed2da75b..6b90543811f 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -22,7 +22,9 @@ #include "qemu/int128.h" #include "qemu/cpu-float.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "cpu-qom.h" #include "qom/object.h" #include "hw/registerfields.h" @@ -40,6 +42,7 @@ #define PPC_BIT_NR(bit) (63 - (bit)) #define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32_NR(bit) (31 - (bit)) #define PPC_BIT32(bit) (0x80000000 >> (bit)) #define PPC_BIT8(bit) (0x80 >> (bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) @@ -215,6 +218,8 @@ typedef enum powerpc_excp_t { POWERPC_EXCP_POWER9, /* POWER10 exception model */ POWERPC_EXCP_POWER10, + /* POWER11 exception model */ + POWERPC_EXCP_POWER11, } powerpc_excp_t; /*****************************************************************************/ @@ -634,8 +639,8 @@ FIELD(MSR, LE, MSR_LE, 1) #define PSSCR_EC PPC_BIT(43) /* Exit Criterion */ /* HFSCR bits */ -#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ -#define HFSCR_BHRB PPC_BIT(59) /* BHRB Instructions */ +#define HFSCR_MSGP PPC_BIT_NR(53) /* Privileged Message Send Facilities */ +#define HFSCR_BHRB PPC_BIT_NR(59) /* BHRB Instructions */ #define HFSCR_IC_MSGP 0xA #define DBCR0_ICMP (1 << 27) @@ -1250,13 +1255,14 @@ struct CPUArchState { /* For SMT processors */ bool has_smt_siblings; int core_index; + int chip_index; #if !defined(CONFIG_USER_ONLY) /* MMU context, only relevant for full system emulation */ #if defined(TARGET_PPC64) ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */ struct CPUBreakpoint *ciabr_breakpoint; - struct CPUWatchpoint *dawr0_watchpoint; + struct CPUWatchpoint *dawr_watchpoint[2]; #endif target_ulong sr[32]; /* segment registers */ uint32_t nb_BATs; /* number of BATs */ @@ -1352,6 +1358,18 @@ struct CPUArchState { * special way (such as routing some resume causes to 0x100, i.e. sreset). */ bool resume_as_sreset; + + /* + * On powernv, quiesced means the CPU has been stopped using PC direct + * control xscom registers. + * + * On spapr, quiesced means it is in the "RTAS stopped" state. + * + * The core halted/stopped variables aren't sufficient for this, because + * they can be changed with various side-band operations like qmp cont, + * powersave interrupts, etc. + */ + bool quiesced; #endif /* These resources are used only in TCG */ @@ -1408,8 +1426,10 @@ struct CPUArchState { #define THREAD_SIBLING_FOREACH(cs, cs_sibling) \ CPU_FOREACH(cs_sibling) \ - if (POWERPC_CPU(cs)->env.core_index == \ - POWERPC_CPU(cs_sibling)->env.core_index) + if ((POWERPC_CPU(cs)->env.chip_index == \ + POWERPC_CPU(cs_sibling)->env.chip_index) && \ + (POWERPC_CPU(cs)->env.core_index == \ + POWERPC_CPU(cs_sibling)->env.core_index)) #define SET_FIT_PERIOD(a_, b_, c_, d_) \ do { \ @@ -1454,16 +1474,6 @@ struct ArchCPU { /* Those resources are used only during code translation */ /* opcode handlers */ opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN]; - - /* Fields related to migration compatibility hacks */ - bool pre_2_8_migration; - target_ulong mig_msr_mask; - uint64_t mig_insns_flags; - uint64_t mig_insns_flags2; - uint32_t mig_nb_BATs; - bool pre_2_10_migration; - bool pre_3_0_migration; - int32_t mig_slb_nr; }; /** @@ -1482,6 +1492,7 @@ struct PowerPCCPUClass { void (*parent_parse_features)(const char *type, char *str, Error **errp); uint32_t pvr; + uint32_t spapr_logical_pvr; /* * If @best is false, match if pcc is in the family of pvr * Else match only if pcc is the best match for pvr in this family. @@ -1583,20 +1594,22 @@ extern const VMStateDescription vmstate_ppc_cpu; /*****************************************************************************/ void ppc_translate_init(void); +void ppc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #if !defined(CONFIG_USER_ONLY) void ppc_store_sdr1(CPUPPCState *env, target_ulong value); void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val); void ppc_update_ciabr(CPUPPCState *env); void ppc_store_ciabr(CPUPPCState *env, target_ulong value); -void ppc_update_daw0(CPUPPCState *env); +void ppc_update_daw(CPUPPCState *env, int rid); void ppc_store_dawr0(CPUPPCState *env, target_ulong value); void ppc_store_dawrx0(CPUPPCState *env, uint32_t value); +void ppc_store_dawr1(CPUPPCState *env, target_ulong value); +void ppc_store_dawrx1(CPUPPCState *env, uint32_t value); #endif /* !defined(CONFIG_USER_ONLY) */ void ppc_store_msr(CPUPPCState *env, target_ulong value); -void ppc_cpu_list(void); - /* Time-base and decrementer management */ uint64_t cpu_ppc_load_tbl(CPUPPCState *env); uint32_t cpu_ppc_load_tbu(CPUPPCState *env); @@ -1658,8 +1671,6 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn) int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp); int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); -#define cpu_list ppc_cpu_list - /* MMU modes definitions */ #define MMU_USER_IDX 0 static inline int ppc_env_mmu_index(CPUPPCState *env, bool ifetch) @@ -1689,8 +1700,6 @@ void ppc_compat_add_property(Object *obj, const char *name, uint32_t *compat_pvr, const char *basedesc); #endif /* defined(TARGET_PPC64) */ -#include "exec/cpu-all.h" - /*****************************************************************************/ /* CRF definitions */ #define CRF_LT_BIT 3 @@ -2091,6 +2100,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_VTB (0x351) #define SPR_LDBAR (0x352) #define SPR_MMCRC (0x353) +#define SPR_PMSR (0x355) #define SPR_PSSCR (0x357) #define SPR_440_INV0 (0x370) #define SPR_440_INV1 (0x371) @@ -2098,8 +2108,10 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_440_INV2 (0x372) #define SPR_TRIG2 (0x372) #define SPR_440_INV3 (0x373) +#define SPR_PMCR (0x374) #define SPR_440_ITV0 (0x374) #define SPR_440_ITV1 (0x375) +#define SPR_RWMR (0x375) #define SPR_440_ITV2 (0x376) #define SPR_440_ITV3 (0x377) #define SPR_440_CCR1 (0x378) @@ -2739,24 +2751,6 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer); */ #define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B)) -#ifdef CONFIG_DEBUG_TCG -void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags); -#else -static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->nip; - *cs_base = 0; - *flags = env->hflags; -} -#endif - -G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception); -G_NORETURN void raise_exception_ra(CPUPPCState *env, uint32_t exception, - uintptr_t raddr); -G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception, - uint32_t error_code); G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, uint32_t error_code, uintptr_t raddr); @@ -3040,7 +3034,8 @@ static inline int check_attn_none(CPUPPCState *env) #define POWERPC_FAMILY(_name) \ static void \ - glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \ + glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, \ + const void *); \ \ static const TypeInfo \ glue(glue(ppc_, _name), _cpu_family_type_info) = { \ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 23881d09e9f..a0e77f2673e 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -22,9 +22,9 @@ #include "qemu/osdep.h" #include "disas/dis-asm.h" #include "gdbstub/helpers.h" -#include "sysemu/cpus.h" -#include "sysemu/hw_accel.h" -#include "sysemu/tcg.h" +#include "system/cpus.h" +#include "system/hw_accel.h" +#include "system/tcg.h" #include "cpu-models.h" #include "mmu-hash32.h" #include "mmu-hash64.h" @@ -32,7 +32,7 @@ #include "qemu/module.h" #include "qemu/qemu-print.h" #include "qapi/error.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qapi/visitor.h" #include "hw/qdev-properties.h" #include "hw/ppc/ppc.h" @@ -40,18 +40,18 @@ #include "qemu/cutils.h" #include "disas/capstone.h" #include "fpu/softfloat.h" - +#include "exec/watchpoint.h" #include "helper_regs.h" #include "internal.h" #include "spr_common.h" #include "power8-pmu.h" - #ifndef CONFIG_USER_ONLY #include "hw/boards.h" #include "hw/intc/intc.h" #include "kvm_ppc.h" #endif +#include "cpu_init.h" /* #define PPC_DEBUG_SPR */ /* #define USE_APPLE_GDB */ @@ -921,6 +921,18 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask, #endif } +static void register_atb_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_ATBL, "ATBL", + &spr_read_atbl, SPR_NOACCESS, + &spr_read_atbl, SPR_NOACCESS, + 0x00000000); + spr_register(env, SPR_ATBU, "ATBU", + &spr_read_atbu, SPR_NOACCESS, + &spr_read_atbu, SPR_NOACCESS, + 0x00000000); +} + /* SPR specific to PowerPC 440 implementation */ static void register_440_sprs(CPUPPCState *env) { @@ -2154,7 +2166,7 @@ static void init_proc_405(CPUPPCState *env) SET_WDT_PERIOD(16, 20, 24, 28); } -POWERPC_FAMILY(405)(ObjectClass *oc, void *data) +POWERPC_FAMILY(405)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2222,7 +2234,7 @@ static void init_proc_440EP(CPUPPCState *env) SET_WDT_PERIOD(20, 24, 28, 32); } -POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data) +POWERPC_FAMILY(440EP)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2261,7 +2273,7 @@ POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data) POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } -POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data) +POWERPC_FAMILY(460EX)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2316,7 +2328,7 @@ static void init_proc_440GP(CPUPPCState *env) SET_WDT_PERIOD(20, 24, 28, 32); } -POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data) +POWERPC_FAMILY(440GP)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2386,7 +2398,7 @@ static void init_proc_440x5(CPUPPCState *env) SET_WDT_PERIOD(20, 24, 28, 32); } -POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data) +POWERPC_FAMILY(440x5)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2422,7 +2434,7 @@ POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data) POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } -POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data) +POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2471,7 +2483,7 @@ static void init_proc_MPC5xx(CPUPPCState *env) /* XXX: TODO: allocate internal IRQ controller */ } -POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data) +POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2514,7 +2526,7 @@ static void init_proc_MPC8xx(CPUPPCState *env) /* XXX: TODO: allocate internal IRQ controller */ } -POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) +POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2565,7 +2577,7 @@ static void init_proc_G2(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) +POWERPC_FAMILY(G2)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2604,7 +2616,7 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } -POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) +POWERPC_FAMILY(G2LE)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2731,14 +2743,6 @@ static void init_proc_e200(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* TOFIX */ - spr_register(env, SPR_BOOKE_DSRR0, "DSRR0", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); - spr_register(env, SPR_BOOKE_DSRR1, "DSRR1", - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, - 0x00000000); init_tlbs_emb(env); init_excp_e200(env, 0xFFFF0000UL); @@ -2747,7 +2751,7 @@ static void init_proc_e200(CPUPPCState *env) /* XXX: TODO: allocate internal IRQ controller */ } -POWERPC_FAMILY(e200)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e200)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -2910,6 +2914,11 @@ static void init_proc_e500(CPUPPCState *env, int version) register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg); register_usprgh_sprs(env); + if (version != fsl_e500v1) { + /* e500v1 has no support for alternate timebase */ + register_atb_sprs(env); + } + spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -3035,7 +3044,7 @@ static void init_proc_e500v1(CPUPPCState *env) init_proc_e500(env, fsl_e500v1); } -POWERPC_FAMILY(e500v1)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e500v1)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3079,7 +3088,7 @@ static void init_proc_e500v2(CPUPPCState *env) init_proc_e500(env, fsl_e500v2); } -POWERPC_FAMILY(e500v2)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e500v2)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3123,7 +3132,7 @@ static void init_proc_e500mc(CPUPPCState *env) init_proc_e500(env, fsl_e500mc); } -POWERPC_FAMILY(e500mc)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e500mc)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3170,7 +3179,7 @@ static void init_proc_e5500(CPUPPCState *env) init_proc_e500(env, fsl_e5500); } -POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e5500)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3219,7 +3228,7 @@ static void init_proc_e6500(CPUPPCState *env) init_proc_e500(env, fsl_e6500); } -POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e6500)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3282,7 +3291,7 @@ static void init_proc_603(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(603)(ObjectClass *oc, void *data) +POWERPC_FAMILY(603)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3322,7 +3331,7 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data) POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK; } -POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) +POWERPC_FAMILY(603E)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3368,7 +3377,7 @@ static void init_proc_e300(CPUPPCState *env) register_e300_sprs(env); } -POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e300)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3424,7 +3433,7 @@ static void init_proc_604(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(604)(ObjectClass *oc, void *data) +POWERPC_FAMILY(604)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3470,7 +3479,7 @@ static void init_proc_604E(CPUPPCState *env) register_604e_sprs(env); } -POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) +POWERPC_FAMILY(604E)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3527,7 +3536,7 @@ static void init_proc_740(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(740)(ObjectClass *oc, void *data) +POWERPC_FAMILY(740)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3593,7 +3602,7 @@ static void init_proc_750(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(750)(ObjectClass *oc, void *data) +POWERPC_FAMILY(750)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3740,7 +3749,7 @@ static void init_proc_750cl(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) +POWERPC_FAMILY(750cl)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3848,7 +3857,7 @@ static void init_proc_750cx(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) +POWERPC_FAMILY(750cx)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3921,7 +3930,7 @@ static void init_proc_750fx(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) +POWERPC_FAMILY(750fx)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -3994,7 +4003,7 @@ static void init_proc_750gx(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) +POWERPC_FAMILY(750gx)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4054,7 +4063,7 @@ static void init_proc_745(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(745)(ObjectClass *oc, void *data) +POWERPC_FAMILY(745)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4100,7 +4109,7 @@ static void init_proc_755(CPUPPCState *env) register_755_sprs(env); } -POWERPC_FAMILY(755)(ObjectClass *oc, void *data) +POWERPC_FAMILY(755)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4167,7 +4176,7 @@ static void init_proc_7400(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7400)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4247,7 +4256,7 @@ static void init_proc_7410(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7410)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4348,7 +4357,7 @@ static void init_proc_7440(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7440)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4471,7 +4480,7 @@ static void init_proc_7450(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7450)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4601,7 +4610,7 @@ static void init_proc_7445(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7445)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4733,7 +4742,7 @@ static void init_proc_7455(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7455)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -4885,7 +4894,7 @@ static void init_proc_7457(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) +POWERPC_FAMILY(7457)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -5020,7 +5029,7 @@ static void init_proc_e600(CPUPPCState *env) ppc6xx_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) +POWERPC_FAMILY(e600)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -5171,6 +5180,20 @@ static void register_book3s_207_dbg_sprs(CPUPPCState *env) KVM_REG_PPC_CIABR, 0x00000000); } +static void register_book3s_310_dbg_sprs(CPUPPCState *env) +{ + spr_register_kvm_hv(env, SPR_DAWR1, "DAWR1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_dawr1, + KVM_REG_PPC_DAWR1, 0x00000000); + spr_register_kvm_hv(env, SPR_DAWRX1, "DAWRX1", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_dawrx1, + KVM_REG_PPC_DAWRX1, 0x00000000); +} + static void register_970_dbg_sprs(CPUPPCState *env) { /* Breakpoints */ @@ -5772,6 +5795,23 @@ static void register_power9_book4_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_WORT, 0); + spr_register_hv(env, SPR_RWMR, "RWMR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_generic, + 0x00000000); + + /* SPRC/SPRD exist in earlier CPUs but only tested on POWER9/10 */ + spr_register_hv(env, SPR_POWER_SPRC, "SPRC", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sprc, + 0x00000000); + spr_register_hv(env, SPR_POWER_SPRD, "SPRD", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_sprd, &spr_write_sprd, + 0x00000000); #endif } @@ -5793,17 +5833,6 @@ static void register_power8_book4_sprs(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, KVM_REG_PPC_WORT, 0); - /* SPRC/SPRD exist in earlier CPUs but only tested on POWER9/10 */ - spr_register_hv(env, SPR_POWER_SPRC, "SPRC", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_sprc, - 0x00000000); - spr_register_hv(env, SPR_POWER_SPRD, "SPRD", - SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, SPR_NOACCESS, - &spr_read_sprd, &spr_write_sprd, - 0x00000000); #endif } @@ -5965,7 +5994,7 @@ static void init_proc_970(CPUPPCState *env) ppc970_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(970)(ObjectClass *oc, void *data) +POWERPC_FAMILY(970)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -6040,7 +6069,7 @@ static void init_proc_power5plus(CPUPPCState *env) ppc970_irq_init(env_archcpu(env)); } -POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) +POWERPC_FAMILY(POWER5P)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); @@ -6146,13 +6175,14 @@ static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return true; } -POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) +POWERPC_FAMILY(POWER7)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER7"; dc->desc = "POWER7"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_06_PLUS; pcc->pvr_match = ppc_pvr_match_power7; pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; @@ -6309,13 +6339,14 @@ static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return true; } -POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) +POWERPC_FAMILY(POWER8)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER8"; dc->desc = "POWER8"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_07; pcc->pvr_match = ppc_pvr_match_power8; pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; @@ -6407,7 +6438,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = { #endif /* CONFIG_USER_ONLY */ #define POWER9_BHRB_ENTRIES_LOG2 5 -static void init_proc_POWER9(CPUPPCState *env) +static void register_power9_common_sprs(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); @@ -6426,7 +6457,6 @@ static void init_proc_POWER9(CPUPPCState *env) register_power5p_ear_sprs(env); register_power5p_tb_sprs(env); register_power6_common_sprs(env); - register_HEIR32_spr(env); register_power6_dbg_sprs(env); register_power7_common_sprs(env); register_power8_tce_address_control_sprs(env); @@ -6444,16 +6474,32 @@ static void init_proc_POWER9(CPUPPCState *env) register_power8_rpr_sprs(env); register_power9_mmu_sprs(env); - /* POWER9 Specific registers */ - spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, - spr_read_generic, spr_write_generic, - KVM_REG_PPC_TIDR, 0); - /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_PSSCR, 0); + spr_register_hv(env, SPR_PMSR, "PMSR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_pmsr, SPR_NOACCESS, + 0); + spr_register_hv(env, SPR_PMCR, "PMCR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pmcr, + PPC_BIT(63)); /* Version 1 (POWER9/10) */ + +} + +static void init_proc_POWER9(CPUPPCState *env) +{ + register_power9_common_sprs(env); + register_HEIR32_spr(env); + /* POWER9 Specific registers */ + spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_TIDR, 0); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; @@ -6502,66 +6548,24 @@ static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return false; } -POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) +POWERPC_FAMILY(POWER9)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER9"; dc->desc = "POWER9"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_00; pcc->pvr_match = ppc_pvr_match_power9; - pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; - pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | - PCR_COMPAT_2_05; + pcc->pcr_mask = PPC_PCR_MASK_POWER9; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER9; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; pcc->check_attn = check_attn_hid0_power9; - pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | - PPC_FLOAT_FRSQRTES | - PPC_FLOAT_STFIWX | - PPC_FLOAT_EXT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | - PPC_SEGMENT_64B | PPC_SLBI | - PPC_POPCNTB | PPC_POPCNTWD | - PPC_CILDST; - pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | - PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | - PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | - PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | - PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | - PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC | - PPC2_BCDA_ISA206; - pcc->msr_mask = (1ull << MSR_SF) | - (1ull << MSR_HV) | - (1ull << MSR_TM) | - (1ull << MSR_VR) | - (1ull << MSR_VSX) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_PMM) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | - (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | - LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | - (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | - LPCR_DEE | LPCR_OEE)) - | LPCR_MER | LPCR_GTSE | LPCR_TC | - LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER9; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER9; + pcc->msr_mask = PPC_MSR_MASK_POWER9; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER9; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; #if !defined(CONFIG_USER_ONLY) @@ -6574,10 +6578,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) pcc->excp_model = POWERPC_EXCP_POWER9; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->bfd_mach = bfd_mach_ppc64; - pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_PMM | - POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV; + pcc->flags = POWERPC_FLAGS_POWER9; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -6604,50 +6605,13 @@ static struct ppc_radix_page_info POWER10_radix_page_info = { #define POWER10_BHRB_ENTRIES_LOG2 5 static void init_proc_POWER10(CPUPPCState *env) { - /* Common Registers */ - init_proc_book3s_common(env); - register_book3s_207_dbg_sprs(env); - - /* Common TCG PMU */ - init_tcg_pmu_power8(env); - - /* POWER8 Specific Registers */ - register_book3s_ids_sprs(env); - register_amr_sprs(env); - register_iamr_sprs(env); - register_book3s_purr_sprs(env); - register_power5p_common_sprs(env); - register_power5p_lpar_sprs(env); - register_power5p_ear_sprs(env); - register_power5p_tb_sprs(env); - register_power6_common_sprs(env); + register_power9_common_sprs(env); register_HEIR64_spr(env); - register_power6_dbg_sprs(env); - register_power7_common_sprs(env); - register_power8_tce_address_control_sprs(env); - register_power8_ids_sprs(env); - register_power8_ebb_sprs(env); - register_power8_fscr_sprs(env); - register_power8_pmu_sup_sprs(env); - register_power8_pmu_user_sprs(env); - register_power8_tm_sprs(env); - register_power8_pspb_sprs(env); - register_power8_dpdes_sprs(env); - register_vtb_sprs(env); - register_power8_ic_sprs(env); - register_power9_book4_sprs(env); - register_power8_rpr_sprs(env); - register_power9_mmu_sprs(env); + register_book3s_310_dbg_sprs(env); register_power10_hash_sprs(env); register_power10_dexcr_sprs(env); register_power10_pmu_sup_sprs(env); register_power10_pmu_user_sprs(env); - - /* FIXME: Filter fields properly based on privilege level */ - spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, - spr_read_generic, spr_write_generic, - KVM_REG_PPC_PSSCR, 0); - /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; @@ -6682,68 +6646,24 @@ static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr, bool best) return false; } -POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) +POWERPC_FAMILY(POWER10)(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER10"; dc->desc = "POWER10"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10; pcc->pvr_match = ppc_pvr_match_power10; - pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 | - PCR_COMPAT_3_00; - pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | - PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->pcr_mask = PPC_PCR_MASK_POWER10; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER10; pcc->init_proc = init_proc_POWER10; pcc->check_pow = check_pow_nocheck; pcc->check_attn = check_attn_hid0_power9; - pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | - PPC_FLOAT_FRSQRTES | - PPC_FLOAT_STFIWX | - PPC_FLOAT_EXT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | - PPC_SEGMENT_64B | PPC_SLBI | - PPC_POPCNTB | PPC_POPCNTWD | - PPC_CILDST; - pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | - PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | - PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | - PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | - PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | - PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | - PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; - pcc->msr_mask = (1ull << MSR_SF) | - (1ull << MSR_HV) | - (1ull << MSR_VR) | - (1ull << MSR_VSX) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_PMM) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | - (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | - LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | - (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | - LPCR_DEE | LPCR_OEE)) - | LPCR_MER | LPCR_GTSE | LPCR_TC | - LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; - /* DD2 adds an extra HAIL bit */ - pcc->lpcr_mask |= LPCR_HAIL; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER10; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER10; + pcc->msr_mask = PPC_MSR_MASK_POWER10; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER10; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; @@ -6756,11 +6676,67 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) pcc->excp_model = POWERPC_EXCP_POWER10; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->bfd_mach = bfd_mach_ppc64; - pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_PMM | - POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_SCV | - POWERPC_FLAG_BHRB; + pcc->flags = POWERPC_FLAGS_POWER10; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; +} + +static void init_proc_POWER11(CPUPPCState *env) +{ + init_proc_POWER10(env); +} + +static bool ppc_pvr_match_power11(PowerPCCPUClass *pcc, uint32_t pvr, bool best) +{ + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; + + if (!best && (base == CPU_POWERPC_POWER11_BASE)) { + return true; + } + + if (base != pcc_base) { + return false; + } + + if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { + return true; + } + + return false; +} + +POWERPC_FAMILY(POWER11)(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->fw_name = "PowerPC,POWER11"; + dc->desc = "POWER11"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10_P11; + pcc->pvr_match = ppc_pvr_match_power11; + pcc->pcr_mask = PPC_PCR_MASK_POWER11; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER11; + pcc->init_proc = init_proc_POWER11; + pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0_power9; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER11; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER11; + pcc->msr_mask = PPC_MSR_MASK_POWER11; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER11; + + pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; + pcc->mmu_model = POWERPC_MMU_3_00; +#if !defined(CONFIG_USER_ONLY) + /* segment page size remain the same */ + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->radix_page_info = &POWER10_radix_page_info; + pcc->lrg_decr_bits = 56; +#endif + pcc->excp_model = POWERPC_EXCP_POWER11; + pcc->bus_model = PPC_FLAGS_INPUT_POWER9; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAGS_POWER11; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -7105,7 +7081,7 @@ ObjectClass *ppc_cpu_class_by_name(const char *name) if (strcmp(name, "max") == 0) { MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); if (mc) { - return object_class_by_name(mc->default_cpu_type); + return object_class_by_name(machine_class_default_cpu_type(mc)); } } #endif @@ -7138,7 +7114,7 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) } /* Sort by PVR, ordering special case "host" last. */ -static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b) +static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d) { ObjectClass *oc_a = (ObjectClass *)a; ObjectClass *oc_b = (ObjectClass *)b; @@ -7200,13 +7176,13 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) g_free(name); } -void ppc_cpu_list(void) +static void ppc_cpu_list(void) { GSList *list; qemu_printf("Available CPUs:\n"); list = object_class_get_list(TYPE_POWERPC_CPU, false); - list = g_slist_sort(list, ppc_cpu_list_compare); + list = g_slist_sort_with_data(list, ppc_cpu_list_compare, NULL); g_slist_foreach(list, ppc_cpu_list_entry, NULL); g_slist_free(list); @@ -7239,17 +7215,19 @@ static void ppc_restore_state_to_opc(CPUState *cs, cpu->env.nip = data[0]; } -#endif /* CONFIG_TCG */ -static bool ppc_cpu_has_work(CPUState *cs) +static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch) { - return cs->interrupt_request & CPU_INTERRUPT_HARD; + return ppc_env_mmu_index(cpu_env(cs), ifetch); } +#endif /* CONFIG_TCG */ -static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch) +#ifndef CONFIG_USER_ONLY +static bool ppc_cpu_has_work(CPUState *cs) { - return ppc_env_mmu_index(cpu_env(cs), ifetch); + return cs->interrupt_request & CPU_INTERRUPT_HARD; } +#endif /* !CONFIG_USER_ONLY */ static void ppc_cpu_reset_hold(Object *obj, ResetType type) { @@ -7326,6 +7304,36 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + /* Similarly for flush-to-zero */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); + + /* + * PowerPC propagation rules: + * 1. A if it sNaN or qNaN + * 2. B if it sNaN or qNaN + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status); + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->vec_status); + /* + * NaN propagation for fused multiply-add: + * if fRA is a NaN return it; otherwise if fRB is a NaN return it; + * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB + * whereas QEMU labels the operands as (a * b) + c. + */ + set_float_3nan_prop_rule(float_3nan_prop_acb, &env->fp_status); + set_float_3nan_prop_rule(float_3nan_prop_acb, &env->vec_status); + /* + * For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer + * to return an input NaN if we have one (ie c) rather than generating + * a default NaN + */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->vec_status); + + /* Default NaN: sign bit clear, set frac msb */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); + set_float_default_nan_pattern(0b01000000, &env->vec_status); for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { ppc_spr_t *spr = &env->spr_cb[i]; @@ -7378,6 +7386,12 @@ static void ppc_cpu_exec_exit(CPUState *cs) cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu); } } + +static vaddr ppc_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return (cpu_env(cs)->hflags >> HFLAGS_64) & 1 ? result : (uint32_t)result; +} #endif /* CONFIG_TCG */ #endif /* !CONFIG_USER_ONLY */ @@ -7435,6 +7449,8 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) if ((env->hflags >> MSR_LE) & 1) { info->endian = BFD_ENDIAN_LITTLE; + } else { + info->endian = BFD_ENDIAN_BIG; } info->mach = env->bfd_mach; if (!env->bfd_mach) { @@ -7451,19 +7467,11 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) #endif } -static Property ppc_cpu_properties[] = { - DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false), - DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration, - false), - DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration, - false), - DEFINE_PROP_END_OF_LIST(), -}; - #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps ppc_sysemu_ops = { + .has_work = ppc_cpu_has_work, .get_phys_page_debug = ppc_cpu_get_phys_page_debug, .write_elf32_note = ppc32_cpu_write_elf32_note, .write_elf64_note = ppc64_cpu_write_elf64_note, @@ -7473,18 +7481,25 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { #endif #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" static const TCGCPUOps ppc_tcg_ops = { + .mttcg_supported = TARGET_LONG_BITS == 64, + .guest_default_memory_order = 0, .initialize = ppc_translate_init, + .translate_code = ppc_translate_code, + .get_tb_cpu_state = ppc_get_tb_cpu_state, .restore_state_to_opc = ppc_restore_state_to_opc, + .mmu_index = ppc_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = ppc_cpu_record_sigsegv, #else .tlb_fill = ppc_cpu_tlb_fill, + .pointer_wrap = ppc_pointer_wrap, .cpu_exec_interrupt = ppc_cpu_exec_interrupt, .cpu_exec_halt = ppc_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = ppc_cpu_do_interrupt, .cpu_exec_enter = ppc_cpu_exec_enter, .cpu_exec_exit = ppc_cpu_exec_exit, @@ -7497,7 +7512,7 @@ static const TCGCPUOps ppc_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void ppc_cpu_class_init(ObjectClass *oc, void *data) +static void ppc_cpu_class_init(ObjectClass *oc, const void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -7509,14 +7524,12 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) device_class_set_parent_unrealize(dc, ppc_cpu_unrealize, &pcc->parent_unrealize); pcc->pvr_match = ppc_pvr_match_default; - device_class_set_props(dc, ppc_cpu_properties); resettable_class_set_parent_phases(rc, NULL, ppc_cpu_reset_hold, NULL, &pcc->parent_phases); cc->class_by_name = ppc_cpu_class_by_name; - cc->has_work = ppc_cpu_has_work; - cc->mmu_index = ppc_cpu_mmu_index; + cc->list_cpus = ppc_cpu_list; cc->dump_state = ppc_cpu_dump_state; cc->set_pc = ppc_cpu_set_pc; cc->get_pc = ppc_cpu_get_pc; @@ -7565,7 +7578,7 @@ static const TypeInfo ppc_cpu_type_info = { .class_size = sizeof(PowerPCCPUClass), .class_init = ppc_cpu_class_init, #ifndef CONFIG_USER_ONLY - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, diff --git a/target/ppc/cpu_init.h b/target/ppc/cpu_init.h new file mode 100644 index 00000000000..f8fd6ff5cd5 --- /dev/null +++ b/target/ppc/cpu_init.h @@ -0,0 +1,91 @@ +#ifndef TARGET_PPC_CPU_INIT_H +#define TARGET_PPC_CPU_INIT_H + +#define PPC_INSNS_FLAGS_POWER9 \ + (PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | \ + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \ + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | \ + PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | \ + PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \ + PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | \ + PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | \ + PPC_CILDST) + +#define PPC_INSNS_FLAGS_POWER10 PPC_INSNS_FLAGS_POWER9 +#define PPC_INSNS_FLAGS_POWER11 PPC_INSNS_FLAGS_POWER10 + +#define PPC_INSNS_FLAGS2_POWER_COMMON \ + (PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | \ + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \ + PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | \ + PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | \ + PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_ISA300 | PPC2_PRCNTL | \ + PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206) + +#define PPC_INSNS_FLAGS2_POWER9 \ + (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_TM) +#define PPC_INSNS_FLAGS2_POWER10 \ + (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_ISA310) +#define PPC_INSNS_FLAGS2_POWER11 PPC_INSNS_FLAGS2_POWER10 + +#define PPC_MSR_MASK_POWER_COMMON \ + ((1ull << MSR_SF) | \ + (1ull << MSR_HV) | \ + (1ull << MSR_VR) | \ + (1ull << MSR_VSX) | \ + (1ull << MSR_EE) | \ + (1ull << MSR_PR) | \ + (1ull << MSR_FP) | \ + (1ull << MSR_ME) | \ + (1ull << MSR_FE0) | \ + (1ull << MSR_SE) | \ + (1ull << MSR_DE) | \ + (1ull << MSR_FE1) | \ + (1ull << MSR_IR) | \ + (1ull << MSR_DR) | \ + (1ull << MSR_PMM) | \ + (1ull << MSR_RI) | \ + (1ull << MSR_LE)) + +#define PPC_MSR_MASK_POWER9 \ + (PPC_MSR_MASK_POWER_COMMON | (1ull << MSR_TM)) +#define PPC_MSR_MASK_POWER10 \ + PPC_MSR_MASK_POWER_COMMON +#define PPC_MSR_MASK_POWER11 PPC_MSR_MASK_POWER10 + +#define PPC_PCR_MASK_POWER9 \ + (PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07) +#define PPC_PCR_MASK_POWER10 \ + (PPC_PCR_MASK_POWER9 | PCR_COMPAT_3_00) +#define PPC_PCR_MASK_POWER11 PPC_PCR_MASK_POWER10 + +#define PPC_PCR_SUPPORTED_POWER9 \ + (PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05) +#define PPC_PCR_SUPPORTED_POWER10 \ + (PPC_PCR_SUPPORTED_POWER9 | PCR_COMPAT_3_10) +#define PPC_PCR_SUPPORTED_POWER11 PPC_PCR_SUPPORTED_POWER10 + +#define PPC_LPCR_MASK_POWER9 \ + (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | \ + (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | \ + LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | \ + (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | \ + LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | \ + LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE) +/* DD2 adds an extra HAIL bit */ +#define PPC_LPCR_MASK_POWER10 \ + (PPC_LPCR_MASK_POWER9 | LPCR_HAIL) +#define PPC_LPCR_MASK_POWER11 PPC_LPCR_MASK_POWER10 + +#define POWERPC_FLAGS_POWER_COMMON \ + (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | \ + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | \ + POWERPC_FLAG_VSX | POWERPC_FLAG_SCV) + +#define POWERPC_FLAGS_POWER9 \ + (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_TM) +#define POWERPC_FLAGS_POWER10 \ + (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_BHRB) +#define POWERPC_FLAGS_POWER11 POWERPC_FLAGS_POWER10 + +#endif /* TARGET_PPC_CPU_INIT_H */ diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 5967ea07a92..ecc3f793267 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -249,7 +249,7 @@ static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp, fprf = 0x05; break; default: - assert(0); /* should never get here */ + g_assert_not_reached(); } dfp->env->fpscr &= ~FP_FPRF; dfp->env->fpscr |= (fprf << FPSCR_FPRF); @@ -1243,7 +1243,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ } else if (decNumberIsQNaN(&dfp.b)) { \ vt.VsrD(1) = -2; \ } else { \ - assert(0); \ + g_assert_not_reached(); \ } \ set_dfp64(t, &vt); \ } else { \ @@ -1252,7 +1252,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ } else if ((size) == 128) { \ vt.VsrD(1) = dfp.b.exponent + 6176; \ } else { \ - assert(0); \ + g_assert_not_reached(); \ } \ set_dfp64(t, &vt); \ } \ @@ -1300,7 +1300,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ raw_inf = 0x1e000; \ bias = 6176; \ } else { \ - assert(0); \ + g_assert_not_reached(); \ } \ \ if (unlikely((exp < 0) || (exp > max_exp))) { \ diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index f33fc36db2a..1efdc4066eb 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -19,22 +19,17 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "qemu/log.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" +#include "system/memory.h" +#include "system/tcg.h" +#include "system/system.h" +#include "system/runstate.h" #include "cpu.h" -#include "exec/exec-all.h" #include "internal.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" #include "trace.h" -#ifdef CONFIG_TCG -#include "sysemu/tcg.h" -#include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#endif - /*****************************************************************************/ /* Exception processing */ #ifndef CONFIG_USER_ONLY @@ -136,27 +131,6 @@ static void dump_hcall(CPUPPCState *env) env->nip); } -#ifdef CONFIG_TCG -/* Return true iff byteswap is needed to load instruction */ -static inline bool insn_need_byteswap(CPUArchState *env) -{ - /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ - return !!(env->msr & ((target_ulong)1 << MSR_LE)); -} - -static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr) -{ - uint32_t insn = cpu_ldl_code(env, addr); - - if (insn_need_byteswap(env)) { - insn = bswap32(insn); - } - - return insn; -} - -#endif - static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) { const char *es; @@ -324,10 +298,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, } ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; - if (ail == 0) { - return; - } - if (ail == 1) { + if (ail == 0 || ail == 1) { /* AIL=1 is reserved, treat it like AIL=0 */ return; } @@ -351,10 +322,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, } else { ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; } - if (ail == 0) { - return; - } - if (ail == 1 || ail == 2) { + if (ail == 0 || ail == 1 || ail == 2) { /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ return; } @@ -426,57 +394,14 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, env->reserve_addr = -1; } -#ifdef CONFIG_TCG -/* - * This stops the machine and logs CPU state without killing QEMU (like - * cpu_abort()) because it is often a guest error as opposed to a QEMU error, - * so the machine can still be debugged. - */ -static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason) -{ - CPUState *cs = env_cpu(env); - FILE *f; - - f = qemu_log_trylock(); - if (f) { - fprintf(f, "Entering checkstop state: %s\n", reason); - cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP); - qemu_log_unlock(f); - } - - /* - * This stops the machine and logs CPU state without killing QEMU - * (like cpu_abort()) so the machine can still be debugged (because - * it is often a guest error). - */ - qemu_system_guest_panicked(NULL); - cpu_loop_exit_noexc(cs); -} - -#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) -void helper_attn(CPUPPCState *env) -{ - /* POWER attn is unprivileged when enabled by HID, otherwise illegal */ - if ((*env->check_attn)(env)) { - powerpc_checkstop(env, "host executed attn"); - } else { - raise_exception_err(env, POWERPC_EXCP_HV_EMU, - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); - } -} -#endif -#endif /* CONFIG_TCG */ - static void powerpc_mcheck_checkstop(CPUPPCState *env) { /* KVM guests always have MSR[ME] enabled */ -#ifdef CONFIG_TCG if (FIELD_EX64(env->msr, MSR, ME)) { return; } - + assert(tcg_enabled()); powerpc_checkstop(env, "machine check with MSR[ME]=0"); -#endif } static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) @@ -1626,7 +1551,7 @@ static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp) } #endif /* TARGET_PPC64 */ -static void powerpc_excp(PowerPCCPU *cpu, int excp) +void powerpc_excp(PowerPCCPU *cpu, int excp) { CPUPPCState *env = &cpu->env; @@ -1661,6 +1586,7 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_POWER8: case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: powerpc_excp_books(cpu, excp); break; default: @@ -1682,51 +1608,54 @@ void ppc_cpu_do_interrupt(CPUState *cs) PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) -static int p7_interrupt_powersave(CPUPPCState *env) +static int p7_interrupt_powersave(uint32_t pending_interrupts, + target_ulong lpcr) { - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_P7_PECE0)) { return PPC_INTERRUPT_EXT; } - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_P7_PECE1)) { return PPC_INTERRUPT_DECR; } - if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_MCK) && + (lpcr & LPCR_P7_PECE2)) { return PPC_INTERRUPT_MCK; } - if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_HMI) && + (lpcr & LPCR_P7_PECE2)) { return PPC_INTERRUPT_HMI; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p7_next_unmasked_interrupt(CPUPPCState *env) +static int p7_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); /* Ignore MSR[EE] when coming out of some power management states */ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; - assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); + assert((pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); if (cs->halted) { /* LPCR[PECE] controls which interrupts can exit power-saving mode */ - return p7_interrupt_powersave(env); + return p7_interrupt_powersave(pending_interrupts, lpcr); } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { @@ -1736,9 +1665,9 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1748,10 +1677,10 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } } @@ -1764,39 +1693,42 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) -static int p8_interrupt_powersave(CPUPPCState *env) +static int p8_interrupt_powersave(uint32_t pending_interrupts, + target_ulong lpcr) { - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_P8_PECE2)) { return PPC_INTERRUPT_EXT; } - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_P8_PECE3)) { return PPC_INTERRUPT_DECR; } - if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + if ((pending_interrupts & PPC_INTERRUPT_MCK) && + (lpcr & LPCR_P8_PECE4)) { return PPC_INTERRUPT_MCK; } - if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + if ((pending_interrupts & PPC_INTERRUPT_HMI) && + (lpcr & LPCR_P8_PECE4)) { return PPC_INTERRUPT_HMI; } - if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { + if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (lpcr & LPCR_P8_PECE0)) { return PPC_INTERRUPT_DOORBELL; } - if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { + if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (lpcr & LPCR_P8_PECE1)) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p8_next_unmasked_interrupt(CPUPPCState *env) +static int p8_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); @@ -1807,18 +1739,18 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) if (cs->halted) { /* LPCR[PECE] controls which interrupts can exit power-saving mode */ - return p8_interrupt_powersave(env); + return p8_interrupt_powersave(pending_interrupts, lpcr); } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -1826,9 +1758,9 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1838,20 +1770,20 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -1871,60 +1803,65 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) -static int p9_interrupt_powersave(CPUPPCState *env) +static int p9_interrupt_powersave(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { + /* External Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_EEE)) { - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_EEE)) { + bool heic = !!(lpcr & LPCR_HEIC); if (!heic || !FIELD_EX64_HV(env->msr) || FIELD_EX64(env->msr, MSR, PR)) { return PPC_INTERRUPT_EXT; } } /* Decrementer Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_DEE)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_DEE)) { return PPC_INTERRUPT_DECR; } /* Machine Check or Hypervisor Maintenance Exception */ - if (env->spr[SPR_LPCR] & LPCR_OEE) { - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (lpcr & LPCR_OEE) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } - if (env->pending_interrupts & PPC_INTERRUPT_HMI) { + if (pending_interrupts & PPC_INTERRUPT_HMI) { return PPC_INTERRUPT_HMI; } } /* Privileged Doorbell Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && - (env->spr[SPR_LPCR] & LPCR_PDEE)) { + if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (lpcr & LPCR_PDEE)) { return PPC_INTERRUPT_DOORBELL; } /* Hypervisor Doorbell Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && - (env->spr[SPR_LPCR] & LPCR_HDEE)) { + if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (lpcr & LPCR_HDEE)) { return PPC_INTERRUPT_HDOORBELL; } /* Hypervisor virtualization exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && - (env->spr[SPR_LPCR] & LPCR_HVEE)) { + if ((pending_interrupts & PPC_INTERRUPT_HVIRT) && + (lpcr & LPCR_HVEE)) { return PPC_INTERRUPT_HVIRT; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p9_next_unmasked_interrupt(CPUPPCState *env) +static int p9_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); /* Ignore MSR[EE] when coming out of some power management states */ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; - assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); + assert((pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); if (cs->halted) { if (env->spr[SPR_PSSCR] & PSSCR_EC) { @@ -1932,7 +1869,7 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can * wakeup the processor */ - return p9_interrupt_powersave(env); + return p9_interrupt_powersave(env, pending_interrupts, lpcr); } else { /* * When it's clear, any system-caused exception exits power-saving @@ -1943,14 +1880,14 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -1958,18 +1895,18 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } /* Hypervisor virtualization interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { + if (pending_interrupts & PPC_INTERRUPT_HVIRT) { /* LPCR will be clear when not supported so this will work */ - bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + bool hvice = !!(lpcr & LPCR_HVICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { return PPC_INTERRUPT_HVIRT; } } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1979,20 +1916,20 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -2010,27 +1947,35 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) static int ppc_next_unmasked_interrupt(CPUPPCState *env) { + uint32_t pending_interrupts = env->pending_interrupts; + target_ulong lpcr = env->spr[SPR_LPCR]; + bool async_deliver; + + if (unlikely(env->quiesced)) { + return 0; + } + #ifdef TARGET_PPC64 switch (env->excp_model) { case POWERPC_EXCP_POWER7: - return p7_next_unmasked_interrupt(env); + return p7_next_unmasked_interrupt(env, pending_interrupts, lpcr); case POWERPC_EXCP_POWER8: - return p8_next_unmasked_interrupt(env); + return p8_next_unmasked_interrupt(env, pending_interrupts, lpcr); case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: - return p9_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER11: + return p9_next_unmasked_interrupt(env, pending_interrupts, lpcr); default: break; } #endif - bool async_deliver; /* External reset */ - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } #if 0 /* TODO */ @@ -2049,9 +1994,9 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -2059,18 +2004,18 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) } /* Hypervisor virtualization interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { + if (pending_interrupts & PPC_INTERRUPT_HVIRT) { /* LPCR will be clear when not supported so this will work */ - bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + bool hvice = !!(lpcr & LPCR_HVICE); if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { return PPC_INTERRUPT_HVIRT; } } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -2080,45 +2025,45 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) } if (FIELD_EX64(env->msr, MSR, CE)) { /* External critical interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { + if (pending_interrupts & PPC_INTERRUPT_CEXT) { return PPC_INTERRUPT_CEXT; } } if (async_deliver != 0) { /* Watchdog timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_WDT) { + if (pending_interrupts & PPC_INTERRUPT_WDT) { return PPC_INTERRUPT_WDT; } - if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_CDOORBELL) { return PPC_INTERRUPT_CDOORBELL; } /* Fixed interval timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_FIT) { + if (pending_interrupts & PPC_INTERRUPT_FIT) { return PPC_INTERRUPT_FIT; } /* Programmable interval timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_PIT) { + if (pending_interrupts & PPC_INTERRUPT_PIT) { return PPC_INTERRUPT_PIT; } /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* Thermal interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_THERM) { + if (pending_interrupts & PPC_INTERRUPT_THERM) { return PPC_INTERRUPT_THERM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -2187,7 +2132,6 @@ static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case 0: @@ -2238,7 +2182,9 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_DOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + } if (is_book3s_arch2x(env)) { powerpc_excp(cpu, POWERPC_EXCP_SDOOR); } else { @@ -2246,11 +2192,12 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) } break; case PPC_INTERRUPT_HDOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_EBB: /* EBB exception */ @@ -2303,6 +2250,7 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ /* HDEC clears on delivery */ + /* XXX: should not see an HDEC if resume_as_sreset. assert? */ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; powerpc_excp(cpu, POWERPC_EXCP_HDECR); break; @@ -2322,15 +2270,18 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_DOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR); break; case PPC_INTERRUPT_HDOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_EBB: /* EBB exception */ @@ -2372,6 +2323,7 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) return p8_deliver_interrupt(env, interrupt); case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: return p9_deliver_interrupt(env, interrupt); default: break; @@ -2444,7 +2396,6 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_THERM: /* Thermal interrupt */ @@ -2479,10 +2430,16 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) } } +/* + * system reset is not delivered via normal irq method, so have to set + * halted = 0 to resume CPU running if it was halted. Possibly we should + * move it over to using PPC_INTERRUPT_RESET rather than async_run_on_cpu. + */ void ppc_cpu_do_system_reset(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); + cs->halted = 0; powerpc_excp(cpu, POWERPC_EXCP_RESET); } @@ -2504,6 +2461,7 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector) /* Anything for nested required here? MSR[HV] bit? */ + cs->halted = 0; powerpc_set_excp_state(cpu, vector, msr); } @@ -2529,769 +2487,3 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #endif /* !CONFIG_USER_ONLY */ - -/*****************************************************************************/ -/* Exceptions processing helpers */ - -void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, - uint32_t error_code, uintptr_t raddr) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = exception; - env->error_code = error_code; - cpu_loop_exit_restore(cs, raddr); -} - -void raise_exception_err(CPUPPCState *env, uint32_t exception, - uint32_t error_code) -{ - raise_exception_err_ra(env, exception, error_code, 0); -} - -void raise_exception(CPUPPCState *env, uint32_t exception) -{ - raise_exception_err_ra(env, exception, 0, 0); -} - -void raise_exception_ra(CPUPPCState *env, uint32_t exception, - uintptr_t raddr) -{ - raise_exception_err_ra(env, exception, 0, raddr); -} - -#ifdef CONFIG_TCG -void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, - uint32_t error_code) -{ - raise_exception_err_ra(env, exception, error_code, 0); -} - -void helper_raise_exception(CPUPPCState *env, uint32_t exception) -{ - raise_exception_err_ra(env, exception, 0, 0); -} - -#ifndef CONFIG_USER_ONLY -void helper_store_msr(CPUPPCState *env, target_ulong val) -{ - uint32_t excp = hreg_store_msr(env, val, 0); - - if (excp != 0) { - cpu_interrupt_exittb(env_cpu(env)); - raise_exception(env, excp); - } -} - -void helper_ppc_maybe_interrupt(CPUPPCState *env) -{ - ppc_maybe_interrupt(env); -} - -#ifdef TARGET_PPC64 -void helper_scv(CPUPPCState *env, uint32_t lev) -{ - if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { - raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); - } else { - raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); - } -} - -void helper_pminsn(CPUPPCState *env, uint32_t insn) -{ - CPUState *cs = env_cpu(env); - - cs->halted = 1; - - /* Condition for waking up at 0x100 */ - env->resume_as_sreset = (insn != PPC_PM_STOP) || - (env->spr[SPR_PSSCR] & PSSCR_EC); - - /* HDECR is not to wake from PM state, it may have already fired */ - if (env->resume_as_sreset) { - PowerPCCPU *cpu = env_archcpu(env); - ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); - } - - ppc_maybe_interrupt(env); -} -#endif /* TARGET_PPC64 */ - -static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) -{ - /* MSR:POW cannot be set by any form of rfi */ - msr &= ~(1ULL << MSR_POW); - - /* MSR:TGPR cannot be set by any form of rfi */ - if (env->flags & POWERPC_FLAG_TGPR) - msr &= ~(1ULL << MSR_TGPR); - -#ifdef TARGET_PPC64 - /* Switching to 32-bit ? Crop the nip */ - if (!msr_is_64bit(env, msr)) { - nip = (uint32_t)nip; - } -#else - nip = (uint32_t)nip; -#endif - /* XXX: beware: this is false if VLE is supported */ - env->nip = nip & ~((target_ulong)0x00000003); - hreg_store_msr(env, msr, 1); - trace_ppc_excp_rfi(env->nip, env->msr); - /* - * No need to raise an exception here, as rfi is always the last - * insn of a TB - */ - cpu_interrupt_exittb(env_cpu(env)); - /* Reset the reservation */ - env->reserve_addr = -1; - - /* Context synchronizing: check if TCG TLB needs flush */ - check_tlb_flush(env, false); -} - -void helper_rfi(CPUPPCState *env) -{ - do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); -} - -#ifdef TARGET_PPC64 -void helper_rfid(CPUPPCState *env) -{ - /* - * The architecture defines a number of rules for which bits can - * change but in practice, we handle this in hreg_store_msr() - * which will be called by do_rfi(), so there is no need to filter - * here - */ - do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); -} - -void helper_rfscv(CPUPPCState *env) -{ - do_rfi(env, env->lr, env->ctr); -} - -void helper_hrfid(CPUPPCState *env) -{ - do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); -} - -void helper_rfebb(CPUPPCState *env, target_ulong s) -{ - target_ulong msr = env->msr; - - /* - * Handling of BESCR bits 32:33 according to PowerISA v3.1: - * - * "If BESCR 32:33 != 0b00 the instruction is treated as if - * the instruction form were invalid." - */ - if (env->spr[SPR_BESCR] & BESCR_INVALID) { - raise_exception_err(env, POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); - } - - env->nip = env->spr[SPR_EBBRR]; - - /* Switching to 32-bit ? Crop the nip */ - if (!msr_is_64bit(env, msr)) { - env->nip = (uint32_t)env->spr[SPR_EBBRR]; - } - - if (s) { - env->spr[SPR_BESCR] |= BESCR_GE; - } else { - env->spr[SPR_BESCR] &= ~BESCR_GE; - } -} - -/* - * Triggers or queues an 'ebb_excp' EBB exception. All checks - * but FSCR, HFSCR and msr_pr must be done beforehand. - * - * PowerISA v3.1 isn't clear about whether an EBB should be - * postponed or cancelled if the EBB facility is unavailable. - * Our assumption here is that the EBB is cancelled if both - * FSCR and HFSCR EBB facilities aren't available. - */ -static void do_ebb(CPUPPCState *env, int ebb_excp) -{ - PowerPCCPU *cpu = env_archcpu(env); - - /* - * FSCR_EBB and FSCR_IC_EBB are the same bits used with - * HFSCR. - */ - helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); - helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); - - if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { - env->spr[SPR_BESCR] |= BESCR_PMEO; - } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { - env->spr[SPR_BESCR] |= BESCR_EEO; - } - - if (FIELD_EX64(env->msr, MSR, PR)) { - powerpc_excp(cpu, ebb_excp); - } else { - ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); - } -} - -void raise_ebb_perfm_exception(CPUPPCState *env) -{ - bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && - env->spr[SPR_BESCR] & BESCR_PME && - env->spr[SPR_BESCR] & BESCR_GE; - - if (!perfm_ebb_enabled) { - return; - } - - do_ebb(env, POWERPC_EXCP_PERFM_EBB); -} -#endif /* TARGET_PPC64 */ - -/*****************************************************************************/ -/* Embedded PowerPC specific helpers */ -void helper_40x_rfci(CPUPPCState *env) -{ - do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); -} - -void helper_rfci(CPUPPCState *env) -{ - do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); -} - -void helper_rfdi(CPUPPCState *env) -{ - /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ - do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); -} - -void helper_rfmci(CPUPPCState *env) -{ - /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ - do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); -} -#endif /* !CONFIG_USER_ONLY */ - -void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2, - uint32_t flags) -{ - if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || - ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || - ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || - ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || - ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { - raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_TRAP, GETPC()); - } -} - -#ifdef TARGET_PPC64 -void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2, - uint32_t flags) -{ - if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || - ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || - ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || - ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || - ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { - raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_TRAP, GETPC()); - } -} -#endif /* TARGET_PPC64 */ - -static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) -{ - const uint16_t c = 0xfffc; - const uint64_t z0 = 0xfa2561cdf44ac398ULL; - uint16_t z = 0, temp; - uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; - - for (int i = 3; i >= 0; i--) { - k[i] = key & 0xffff; - key >>= 16; - } - xleft[0] = x & 0xffff; - xright[0] = (x >> 16) & 0xffff; - - for (int i = 0; i < 28; i++) { - z = (z0 >> (63 - i)) & 1; - temp = ror16(k[i + 3], 3) ^ k[i + 1]; - k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); - } - - for (int i = 0; i < 8; i++) { - eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; - eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; - eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; - eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; - } - - for (int i = 0; i < 32; i++) { - fxleft[i] = (rol16(xleft[i], 1) & - rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); - xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; - xright[i + 1] = xleft[i]; - } - - return (((uint32_t)xright[32]) << 16) | xleft[32]; -} - -static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) -{ - uint64_t stage0_h = 0ULL, stage0_l = 0ULL; - uint64_t stage1_h, stage1_l; - - for (int i = 0; i < 4; i++) { - stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); - stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); - stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); - stage0_l |= (ra & 0xff) << (8 * 2 * i); - rb >>= 8; - ra >>= 8; - } - - stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; - stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); - stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; - stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); - - return stage1_h ^ stage1_l; -} - -static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, - target_ulong rb, uint64_t key, bool store) -{ - uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; - - if (store) { - cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); - } else { - loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); - if (loaded_hash != calculated_hash) { - raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, - POWERPC_EXCP_TRAP, GETPC()); - } - } -} - -#include "qemu/guest-random.h" - -#ifdef TARGET_PPC64 -#define HELPER_HASH(op, key, store, dexcr_aspect) \ -void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ - target_ulong rb) \ -{ \ - if (env->msr & R_MSR_PR_MASK) { \ - if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ - env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ - return; \ - } else if (!(env->msr & R_MSR_HV_MASK)) { \ - if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ - env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ - return; \ - } else if (!(env->msr & R_MSR_S_MASK)) { \ - if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ - return; \ - } \ - \ - do_hash(env, ea, ra, rb, key, store); \ -} -#else -#define HELPER_HASH(op, key, store, dexcr_aspect) \ -void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ - target_ulong rb) \ -{ \ - do_hash(env, ea, ra, rb, key, store); \ -} -#endif /* TARGET_PPC64 */ - -HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) -HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) -HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) -HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) - -#ifndef CONFIG_USER_ONLY -/* Embedded.Processor Control */ -static int dbell2irq(target_ulong rb) -{ - int msg = rb & DBELL_TYPE_MASK; - int irq = -1; - - switch (msg) { - case DBELL_TYPE_DBELL: - irq = PPC_INTERRUPT_DOORBELL; - break; - case DBELL_TYPE_DBELL_CRIT: - irq = PPC_INTERRUPT_CDOORBELL; - break; - case DBELL_TYPE_G_DBELL: - case DBELL_TYPE_G_DBELL_CRIT: - case DBELL_TYPE_G_DBELL_MC: - /* XXX implement */ - default: - break; - } - - return irq; -} - -void helper_msgclr(CPUPPCState *env, target_ulong rb) -{ - int irq = dbell2irq(rb); - - if (irq < 0) { - return; - } - - ppc_set_irq(env_archcpu(env), irq, 0); -} - -void helper_msgsnd(target_ulong rb) -{ - int irq = dbell2irq(rb); - int pir = rb & DBELL_PIRTAG_MASK; - CPUState *cs; - - if (irq < 0) { - return; - } - - bql_lock(); - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *cenv = &cpu->env; - - if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { - ppc_set_irq(cpu, irq, 1); - } - } - bql_unlock(); -} - -/* Server Processor Control */ - -static bool dbell_type_server(target_ulong rb) -{ - /* - * A Directed Hypervisor Doorbell message is sent only if the - * message type is 5. All other types are reserved and the - * instruction is a no-op - */ - return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; -} - -static inline bool dbell_bcast_core(target_ulong rb) -{ - return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE; -} - -static inline bool dbell_bcast_subproc(target_ulong rb) -{ - return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC; -} - -/* - * Send an interrupt to a thread in the same core as env). - */ -static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq) -{ - PowerPCCPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); - - if (ppc_cpu_lpar_single_threaded(cs)) { - if (target_tir == 0) { - ppc_set_irq(cpu, irq, 1); - } - } else { - CPUState *ccs; - - /* Does iothread need to be locked for walking CPU list? */ - bql_lock(); - THREAD_SIBLING_FOREACH(cs, ccs) { - PowerPCCPU *ccpu = POWERPC_CPU(ccs); - if (target_tir == ppc_cpu_tir(ccpu)) { - ppc_set_irq(ccpu, irq, 1); - break; - } - } - bql_unlock(); - } -} - -void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) -{ - if (!dbell_type_server(rb)) { - return; - } - - ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); -} - -void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) -{ - int pir = rb & DBELL_PROCIDTAG_MASK; - bool brdcast = false; - CPUState *cs, *ccs; - PowerPCCPU *cpu; - - if (!dbell_type_server(rb)) { - return; - } - - /* POWER8 msgsnd is like msgsndp (targets a thread within core) */ - if (!(env->insns_flags2 & PPC2_ISA300)) { - msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL); - return; - } - - /* POWER9 and later msgsnd is a global (targets any thread) */ - cpu = ppc_get_vcpu_by_pir(pir); - if (!cpu) { - return; - } - cs = CPU(cpu); - - if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) && - (env->flags & POWERPC_FLAG_SMT_1LPAR))) { - brdcast = true; - } - - if (ppc_cpu_core_single_threaded(cs) || !brdcast) { - ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1); - return; - } - - /* - * Why is bql needed for walking CPU list? Answer seems to be because ppc - * irq handling needs it, but ppc_set_irq takes the lock itself if needed, - * so could this be removed? - */ - bql_lock(); - THREAD_SIBLING_FOREACH(cs, ccs) { - ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1); - } - bql_unlock(); -} - -#ifdef TARGET_PPC64 -void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) -{ - helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); - - if (!dbell_type_server(rb)) { - return; - } - - ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); -} - -/* - * sends a message to another thread on the same - * multi-threaded processor - */ -void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) -{ - helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); - - if (!dbell_type_server(rb)) { - return; - } - - msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL); -} -#endif /* TARGET_PPC64 */ - -/* Single-step tracing */ -void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip) -{ - uint32_t error_code = 0; - if (env->insns_flags2 & PPC2_ISA207S) { - /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */ - env->spr[SPR_POWER_SIAR] = prev_ip; - error_code = PPC_BIT(33); - } - raise_exception_err(env, POWERPC_EXCP_TRACE, error_code); -} - -void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) -{ - CPUPPCState *env = cpu_env(cs); - uint32_t insn; - - /* Restore state and reload the insn we executed, for filling in DSISR. */ - cpu_restore_state(cs, retaddr); - insn = ppc_ldl_code(env, env->nip); - - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_4xx: - env->spr[SPR_40x_DEAR] = vaddr; - break; - case POWERPC_MMU_BOOKE: - case POWERPC_MMU_BOOKE206: - env->spr[SPR_BOOKE_DEAR] = vaddr; - break; - default: - env->spr[SPR_DAR] = vaddr; - break; - } - - cs->exception_index = POWERPC_EXCP_ALIGN; - env->error_code = insn & 0x03FF0000; - cpu_loop_exit(cs); -} - -void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, - vaddr vaddr, unsigned size, - MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, uintptr_t retaddr) -{ - CPUPPCState *env = cpu_env(cs); - - switch (env->excp_model) { -#if defined(TARGET_PPC64) - case POWERPC_EXCP_POWER8: - case POWERPC_EXCP_POWER9: - case POWERPC_EXCP_POWER10: - /* - * Machine check codes can be found in processor User Manual or - * Linux or skiboot source. - */ - if (access_type == MMU_DATA_LOAD) { - env->spr[SPR_DAR] = vaddr; - env->spr[SPR_DSISR] = PPC_BIT(57); - env->error_code = PPC_BIT(42); - - } else if (access_type == MMU_DATA_STORE) { - /* - * MCE for stores in POWER is asynchronous so hardware does - * not set DAR, but QEMU can do better. - */ - env->spr[SPR_DAR] = vaddr; - env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45); - env->error_code |= PPC_BIT(42); - - } else { /* Fetch */ - /* - * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching - * the instruction, so that must always be clear for fetches. - */ - env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45); - } - break; -#endif - default: - /* - * TODO: Check behaviour for other CPUs, for now do nothing. - * Could add a basic MCE even if real hardware ignores. - */ - return; - } - - cs->exception_index = POWERPC_EXCP_MCHECK; - cpu_loop_exit_restore(cs, retaddr); -} - -void ppc_cpu_debug_excp_handler(CPUState *cs) -{ -#if defined(TARGET_PPC64) - CPUPPCState *env = cpu_env(cs); - - if (env->insns_flags2 & PPC2_ISA207S) { - if (cs->watchpoint_hit) { - if (cs->watchpoint_hit->flags & BP_CPU) { - env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr; - env->spr[SPR_DSISR] = PPC_BIT(41); - cs->watchpoint_hit = NULL; - raise_exception(env, POWERPC_EXCP_DSI); - } - cs->watchpoint_hit = NULL; - } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) { - raise_exception_err(env, POWERPC_EXCP_TRACE, - PPC_BIT(33) | PPC_BIT(43)); - } - } -#endif -} - -bool ppc_cpu_debug_check_breakpoint(CPUState *cs) -{ -#if defined(TARGET_PPC64) - CPUPPCState *env = cpu_env(cs); - - if (env->insns_flags2 & PPC2_ISA207S) { - target_ulong priv; - - priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63); - switch (priv) { - case 0x1: /* problem */ - return env->msr & ((target_ulong)1 << MSR_PR); - case 0x2: /* supervisor */ - return (!(env->msr & ((target_ulong)1 << MSR_PR)) && - !(env->msr & ((target_ulong)1 << MSR_HV))); - case 0x3: /* hypervisor */ - return (!(env->msr & ((target_ulong)1 << MSR_PR)) && - (env->msr & ((target_ulong)1 << MSR_HV))); - default: - g_assert_not_reached(); - } - } -#endif - - return false; -} - -bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) -{ -#if defined(TARGET_PPC64) - CPUPPCState *env = cpu_env(cs); - - if (env->insns_flags2 & PPC2_ISA207S) { - if (wp == env->dawr0_watchpoint) { - uint32_t dawrx = env->spr[SPR_DAWRX0]; - bool wt = extract32(dawrx, PPC_BIT_NR(59), 1); - bool wti = extract32(dawrx, PPC_BIT_NR(60), 1); - bool hv = extract32(dawrx, PPC_BIT_NR(61), 1); - bool sv = extract32(dawrx, PPC_BIT_NR(62), 1); - bool pr = extract32(dawrx, PPC_BIT_NR(62), 1); - - if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) { - return false; - } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) { - return false; - } else if (!sv) { - return false; - } - - if (!wti) { - if (env->msr & ((target_ulong)1 << MSR_DR)) { - if (!wt) { - return false; - } - } else { - if (wt) { - return false; - } - } - } - - return true; - } - } -#endif - - return false; -} - -#endif /* !CONFIG_USER_ONLY */ -#endif /* CONFIG_TCG */ diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index 230466a87f3..07b782f971b 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" #include "internal.h" #include "fpu/softfloat.h" @@ -155,8 +154,7 @@ void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ } else if (tp##_is_infinity(arg)) { \ fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \ } else { \ - float_status dummy = { }; /* snan_bit_is_one = 0 */ \ - if (tp##_is_signaling_nan(arg, &dummy)) { \ + if (tp##_is_signaling_nan(arg, &env->fp_status)) { \ fprf = 0x00 << FPSCR_FPRF; \ } else { \ fprf = 0x11 << FPSCR_FPRF; \ diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 5a77e761bd3..ca414f2f43d 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -28,6 +28,8 @@ DEF_HELPER_2(store_pcr, void, env, tl) DEF_HELPER_2(store_ciabr, void, env, tl) DEF_HELPER_2(store_dawr0, void, env, tl) DEF_HELPER_2(store_dawrx0, void, env, tl) +DEF_HELPER_2(store_dawr1, void, env, tl) +DEF_HELPER_2(store_dawrx1, void, env, tl) DEF_HELPER_2(store_mmcr0, void, env, tl) DEF_HELPER_2(store_mmcr1, void, env, tl) DEF_HELPER_2(store_mmcrA, void, env, tl) @@ -733,6 +735,8 @@ DEF_HELPER_2(store_tfmr, void, env, tl) DEF_HELPER_FLAGS_2(store_sprc, TCG_CALL_NO_RWG, void, env, tl) DEF_HELPER_FLAGS_1(load_sprd, TCG_CALL_NO_RWG_SE, tl, env) DEF_HELPER_FLAGS_2(store_sprd, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_1(load_pmsr, TCG_CALL_NO_RWG_SE, tl, env) +DEF_HELPER_FLAGS_2(store_pmcr, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_2(store_sdr1, void, env, tl) DEF_HELPER_2(store_pidr, void, env, tl) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 02076e96fbd..7e5726871e5 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -20,13 +20,15 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/main-loop.h" -#include "exec/exec-all.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "exec/cputlb.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "helper_regs.h" #include "power8-pmu.h" #include "cpu-models.h" #include "spr_common.h" +#include "accel/tcg/cpu-ops.h" +#include "internal.h" /* Swap temporary saved registers with GPRs */ void hreg_swap_gpr_tgpr(CPUPPCState *env) @@ -83,15 +85,16 @@ static bool hreg_check_bhrb_enable(CPUPPCState *env) static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) { uint32_t hflags = 0; - #if defined(TARGET_PPC64) - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { + target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; + + if (mmcr0 & MMCR0_PMCC0) { hflags |= 1 << HFLAGS_PMCC0; } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { + if (mmcr0 & MMCR0_PMCC1) { hflags |= 1 << HFLAGS_PMCC1; } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { + if (mmcr0 & MMCR0_PMCjCE) { hflags |= 1 << HFLAGS_PMCJCE; } if (hreg_check_bhrb_enable(env)) { @@ -101,9 +104,9 @@ static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) #ifndef CONFIG_USER_ONLY if (env->pmc_ins_cnt) { hflags |= 1 << HFLAGS_INSN_CNT; - } - if (env->pmc_ins_cnt & 0x1e) { - hflags |= 1 << HFLAGS_PMC_OTHER; + if (env->pmc_ins_cnt & 0x1e) { + hflags |= 1 << HFLAGS_PMC_OTHER; + } } #endif #endif @@ -143,10 +146,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (ppc_flags & POWERPC_FLAG_DE) { target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; - if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) { + if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(msr, MSR, DE)) { hflags |= 1 << HFLAGS_SE; } - if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) { + if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(msr, MSR, DE)) { hflags |= 1 << HFLAGS_BE; } } else { @@ -254,26 +257,23 @@ void hreg_update_pmu_hflags(CPUPPCState *env) env->hflags |= hreg_compute_pmu_hflags_value(env); } -#ifdef CONFIG_DEBUG_TCG -void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) +TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs) { + CPUPPCState *env = cpu_env(cs); uint32_t hflags_current = env->hflags; - uint32_t hflags_rebuilt; - *pc = env->nip; - *cs_base = 0; - *flags = hflags_current; - - hflags_rebuilt = hreg_compute_hflags_value(env); +#ifdef CONFIG_DEBUG_TCG + uint32_t hflags_rebuilt = hreg_compute_hflags_value(env); if (unlikely(hflags_current != hflags_rebuilt)) { cpu_abort(env_cpu(env), "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", hflags_current, hflags_rebuilt); } -} #endif + return (TCGTBCPUState){ .pc = env->nip, .flags = hflags_current }; +} + void cpu_interrupt_exittb(CPUState *cs) { /* diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h index 8196c1346dc..b928c2c452d 100644 --- a/target/ppc/helper_regs.h +++ b/target/ppc/helper_regs.h @@ -20,6 +20,8 @@ #ifndef HELPER_REGS_H #define HELPER_REGS_H +#include "target/ppc/cpu.h" + void hreg_swap_gpr_tgpr(CPUPPCState *env); void hreg_compute_hflags(CPUPPCState *env); void hreg_update_pmu_hflags(CPUPPCState *env); diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 20fb2ec593c..7723350227f 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -21,6 +21,7 @@ #include "exec/breakpoint.h" #include "hw/registerfields.h" #include "exec/page-protection.h" +#include "accel/tcg/tb-cpu-state.h" /* PM instructions */ typedef enum { @@ -268,6 +269,8 @@ static inline void pte_invalidate(target_ulong *pte0) #define PTE_PTEM_MASK 0x7FFFFFBF #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) +uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr); + #ifdef CONFIG_USER_ONLY void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr, MMUAccessType access_type, @@ -287,7 +290,11 @@ void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, void ppc_cpu_debug_excp_handler(CPUState *cs); bool ppc_cpu_debug_check_breakpoint(CPUState *cs); bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); -#endif + +G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason); +void powerpc_excp(PowerPCCPU *cpu, int excp); + +#endif /* !CONFIG_USER_ONLY */ FIELD(GER_MSK, XMSK, 0, 4) FIELD(GER_MSK, YMSK, 4, 4) @@ -302,4 +309,6 @@ static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk) return msk; } +TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs); + #endif /* PPC_INTERNAL_H */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 907dba60d1b..015658049e6 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -26,10 +26,10 @@ #include "cpu.h" #include "cpu-models.h" #include "qemu/timer.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" #include "kvm_ppc.h" -#include "sysemu/cpus.h" -#include "sysemu/device_tree.h" +#include "system/cpus.h" +#include "system/device_tree.h" #include "mmu-hash64.h" #include "hw/ppc/spapr.h" @@ -37,19 +37,19 @@ #include "hw/hw.h" #include "hw/ppc/ppc.h" #include "migration/qemu-file-types.h" -#include "sysemu/watchdog.h" +#include "system/watchdog.h" #include "trace.h" #include "gdbstub/enums.h" #include "exec/memattrs.h" -#include "exec/ram_addr.h" -#include "sysemu/hostmem.h" +#include "system/ram_addr.h" +#include "system/hostmem.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" #include "qemu/mmap-alloc.h" #include "elf.h" -#include "sysemu/kvm_int.h" -#include "sysemu/kvm.h" -#include "hw/core/accel-cpu.h" +#include "system/kvm_int.h" +#include "system/kvm.h" +#include "accel/accel-cpu-target.h" #include CONFIG_DEVICES @@ -92,6 +92,7 @@ static int cap_large_decr; static int cap_fwnmi; static int cap_rpt_invalidate; static int cap_ail_mode_3; +static int cap_dawr1; #ifdef CONFIG_PSERIES static int cap_papr; @@ -152,6 +153,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV); cap_large_decr = kvmppc_get_dec_bits(); cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI); + cap_dawr1 = kvm_vm_check_extension(s, KVM_CAP_PPC_DAWR1); /* * Note: setting it to false because there is not such capability * in KVM at this moment. @@ -477,6 +479,11 @@ static void kvmppc_hw_debug_points_init(CPUPPCState *cenv) } } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -900,7 +907,7 @@ int kvmppc_put_books_sregs(PowerPCCPU *cpu) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; @@ -1205,7 +1212,7 @@ static int kvmppc_get_books_sregs(PowerPCCPU *cpu) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; @@ -1330,7 +1337,6 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { - return; } MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) @@ -2114,6 +2120,16 @@ int kvmppc_set_fwnmi(PowerPCCPU *cpu) return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0); } +bool kvmppc_has_cap_dawr1(void) +{ + return !!cap_dawr1; +} + +int kvmppc_set_cap_dawr1(int enable) +{ + return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_DAWR1, 0, enable); +} + int kvmppc_smt_threads(void) { return cap_ppc_smt ? cap_ppc_smt : 1; @@ -2372,7 +2388,7 @@ static bool kvmppc_cpu_realize(CPUState *cs, Error **errp) return true; } -static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) +static void kvmppc_host_cpu_class_init(ObjectClass *oc, const void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size"); @@ -2633,7 +2649,7 @@ static int kvm_ppc_register_host_cpu_type(void) return -1; } type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc)); - type_register(&type_info); + type_register_static(&type_info); /* override TCG default cpu type with 'host' cpu model */ object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE, false, NULL); @@ -2993,7 +3009,7 @@ void kvm_arch_accel_class_init(ObjectClass *oc) { } -static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) +static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h index 1975fb5ee6c..a1d9ce9f9aa 100644 --- a/target/ppc/kvm_ppc.h +++ b/target/ppc/kvm_ppc.h @@ -9,7 +9,7 @@ #ifndef KVM_PPC_H #define KVM_PPC_H -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "exec/hwaddr.h" #include "cpu.h" @@ -68,6 +68,8 @@ bool kvmppc_has_cap_htm(void); bool kvmppc_has_cap_mmu_radix(void); bool kvmppc_has_cap_mmu_hash_v3(void); bool kvmppc_has_cap_xive(void); +bool kvmppc_has_cap_dawr1(void); +int kvmppc_set_cap_dawr1(int enable); int kvmppc_get_cap_safe_cache(void); int kvmppc_get_cap_safe_bounds_check(void); int kvmppc_get_cap_safe_indirect_branch(void); @@ -219,7 +221,6 @@ static inline int kvmppc_smt_threads(void) static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp) { - return; } static inline int kvmppc_set_smt_threads(int smt) @@ -257,7 +258,6 @@ static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online) { - return; } static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) @@ -377,6 +377,16 @@ static inline bool kvmppc_has_cap_xive(void) return false; } +static inline bool kvmppc_has_cap_dawr1(void) +{ + return false; +} + +static inline int kvmppc_set_cap_dawr1(int enable) +{ + abort(); +} + static inline int kvmppc_get_cap_safe_cache(void) { return 0; @@ -444,7 +454,6 @@ static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) static inline void kvmppc_check_papr_resize_hpt(Error **errp) { - return; } static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 731dd8df358..d72e5ecb94b 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -1,15 +1,14 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "helper_regs.h" #include "mmu-hash64.h" #include "migration/cpu.h" #include "qapi/error.h" #include "kvm_ppc.h" #include "power8-pmu.h" -#include "sysemu/replay.h" +#include "system/replay.h" static void post_load_update_msr(CPUPPCState *env) { @@ -118,43 +117,11 @@ static const VMStateInfo vmstate_info_vsr = { #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) -static bool cpu_pre_2_8_migration(void *opaque, int version_id) -{ - PowerPCCPU *cpu = opaque; - - return cpu->pre_2_8_migration; -} - -#if defined(TARGET_PPC64) -static bool cpu_pre_3_0_migration(void *opaque, int version_id) -{ - PowerPCCPU *cpu = opaque; - - return cpu->pre_3_0_migration; -} -#endif - static int cpu_pre_save(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; - uint64_t insns_compat_mask = - PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB - | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES - | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES - | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT - | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ - | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC - | PPC_64B | PPC_64BX | PPC_ALTIVEC - | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; - uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX - | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 - | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 - | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 - | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 - | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM - | PPC2_MEM_LWSYNC; env->spr[SPR_LR] = env->lr; env->spr[SPR_CTR] = env->ctr; @@ -177,35 +144,6 @@ static int cpu_pre_save(void *opaque) env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; } - /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ - if (cpu->pre_2_8_migration) { - /* - * Mask out bits that got added to msr_mask since the versions - * which stupidly included it in the migration stream. - */ - target_ulong metamask = 0 -#if defined(TARGET_PPC64) - | (1ULL << MSR_TS0) - | (1ULL << MSR_TS1) -#endif - ; - cpu->mig_msr_mask = env->msr_mask & ~metamask; - cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; - /* - * CPU models supported by old machines all have - * PPC_MEM_TLBIE, so we set it unconditionally to allow - * backward migration from a POWER9 host to a POWER8 host. - */ - cpu->mig_insns_flags |= PPC_MEM_TLBIE; - cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; - cpu->mig_nb_BATs = env->nb_BATs; - } - if (cpu->pre_3_0_migration) { - if (cpu->hash64_opts) { - cpu->mig_slb_nr = cpu->hash64_opts->slb_size; - } - } - /* Used to retain migration compatibility for pre 6.0 for 601 machines. */ env->hflags_compat_nmsr = 0; @@ -325,7 +263,8 @@ static int cpu_post_load(void *opaque, int version_id) /* Re-set breaks based on regs */ #if defined(TARGET_PPC64) ppc_update_ciabr(env); - ppc_update_daw0(env); + ppc_update_daw(env, 0); + ppc_update_daw(env, 1); #endif /* * TCG needs to re-start the decrementer timer and/or raise the @@ -549,12 +488,11 @@ static int slb_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_slb = { .name = "cpu/slb", - .version_id = 1, + .version_id = 2, .minimum_version_id = 1, .needed = slb_needed, .post_load = slb_post_load, .fields = (const VMStateField[]) { - VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() } @@ -621,7 +559,7 @@ static bool tlbemb_needed(void *opaque) } static const VMStateDescription vmstate_tlbemb = { - .name = "cpu/tlb6xx", + .name = "cpu/tlbemb", .version_id = 1, .minimum_version_id = 1, .needed = tlbemb_needed, @@ -676,7 +614,7 @@ static bool compat_needed(void *opaque) PowerPCCPU *cpu = opaque; assert(!(cpu->compat_pvr && !cpu->vhyp)); - return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; + return cpu->compat_pvr != 0; } static const VMStateDescription vmstate_compat = { @@ -760,12 +698,6 @@ const VMStateDescription vmstate_ppc_cpu = { /* Backward compatible internal state */ VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU), - /* Sanity checking */ - VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, - cpu_pre_2_8_migration), - VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index 51b137febd6..6ab71a6fcb4 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -19,11 +19,13 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "helper_regs.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/helper-retaddr.h" +#include "accel/tcg/probe.h" #include "internal.h" #include "qemu/atomic128.h" diff --git a/target/ppc/meson.build b/target/ppc/meson.build index db3b7a0c33b..8eed1fa40ca 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -14,6 +14,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files( 'int_helper.c', 'mem_helper.c', 'misc_helper.c', + 'tcg-excp_helper.c', 'timebase_helper.c', 'translate.c', 'power8-pmu.c', diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index 1b839713753..e7d94625185 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -233,6 +233,16 @@ void helper_store_dawrx0(CPUPPCState *env, target_ulong value) ppc_store_dawrx0(env, value); } +void helper_store_dawr1(CPUPPCState *env, target_ulong value) +{ + ppc_store_dawr1(env, value); +} + +void helper_store_dawrx1(CPUPPCState *env, target_ulong value) +{ + ppc_store_dawrx1(env, value); +} + /* * DPDES register is shared. Each bit reflects the state of the * doorbell interrupt of a thread of the same core. @@ -288,7 +298,7 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); - ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); + ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); } bql_unlock(); } @@ -321,6 +331,10 @@ target_ulong helper_load_sprd(CPUPPCState *env) PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; target_ulong sprc = env->spr[SPR_POWER_SPRC]; + if (pc->big_core) { + pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); + } + switch (sprc & 0x3e0) { case 0: /* SCRATCH0-3 */ case 1: /* SCRATCH4-7 */ @@ -357,6 +371,10 @@ void helper_store_sprd(CPUPPCState *env, target_ulong val) PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; int nr; + if (pc->big_core) { + pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); + } + switch (sprc & 0x3e0) { case 0: /* SCRATCH0-3 */ case 1: /* SCRATCH4-7 */ @@ -367,7 +385,6 @@ void helper_store_sprd(CPUPPCState *env, target_ulong val) * information. Could also dump these upon checkstop. */ nr = (sprc >> 3) & 0x7; - qemu_log("SPRD write 0x" TARGET_FMT_lx " to SCRATCH%d\n", val, nr); pc->scratch[nr] = val; break; default: @@ -376,6 +393,59 @@ void helper_store_sprd(CPUPPCState *env, target_ulong val) break; } } + +target_ulong helper_load_pmsr(CPUPPCState *env) +{ + target_ulong lowerps = extract64(env->spr[SPR_PMCR], PPC_BIT_NR(15), 8); + target_ulong val = 0; + + val |= PPC_BIT(63); /* verion 0x1 (POWER9/10) */ + /* Pmin = 0 */ + /* XXX: POWER9 should be 3 */ + val |= 4ULL << PPC_BIT_NR(31); /* Pmax */ + val |= lowerps << PPC_BIT_NR(15); /* Local actual Pstate */ + val |= lowerps << PPC_BIT_NR(7); /* Global actual Pstate */ + + return val; +} + +static void ppc_set_pmcr(PowerPCCPU *cpu, target_ulong val) +{ + cpu->env.spr[SPR_PMCR] = val; +} + +void helper_store_pmcr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + CPUState *ccs; + + /* Leave version field unchanged (0x1) */ + val &= ~PPC_BITMASK(60, 63); + val |= PPC_BIT(63); + + val &= ~PPC_BITMASK(0, 7); /* UpperPS ignored */ + if (val & PPC_BITMASK(16, 59)) { + qemu_log_mask(LOG_GUEST_ERROR, "Non-zero PMCR reserved bits " + TARGET_FMT_lx"\n", val); + val &= ~PPC_BITMASK(16, 59); + } + + /* DPDES behaves as 1-thread in LPAR-per-thread mode */ + if (ppc_cpu_lpar_single_threaded(cs)) { + ppc_set_pmcr(cpu, val); + return; + } + + /* Does iothread need to be locked for walking CPU list? */ + bql_lock(); + THREAD_SIBLING_FOREACH(cs, ccs) { + PowerPCCPU *ccpu = POWERPC_CPU(ccs); + ppc_set_pmcr(ccpu, val); + } + bql_unlock(); +} + #endif /* defined(TARGET_PPC64) */ void helper_store_pidr(CPUPPCState *env, target_ulong val) diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c index a812cb51139..38655563105 100644 --- a/target/ppc/mmu-book3s-v3.c +++ b/target/ppc/mmu-book3s-v3.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "system/memory.h" #include "cpu.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c index 44b16142ab8..8b980a5aa90 100644 --- a/target/ppc/mmu-hash32.c +++ b/target/ppc/mmu-hash32.c @@ -20,9 +20,9 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" -#include "sysemu/kvm.h" +#include "exec/target_page.h" +#include "system/kvm.h" #include "kvm_ppc.h" #include "internal.h" #include "mmu-hash32.h" diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h index 2838de031c7..04c23ea75ed 100644 --- a/target/ppc/mmu-hash32.h +++ b/target/ppc/mmu-hash32.h @@ -3,6 +3,8 @@ #ifndef CONFIG_USER_ONLY +#include "system/memory.h" + bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, bool guest_visible); diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 5e1983e3341..dd337558aa6 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -20,11 +20,11 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #include "qemu/error-report.h" #include "qemu/qemu-print.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" +#include "system/memory.h" #include "kvm_ppc.h" #include "mmu-hash64.h" #include "exec/log.h" @@ -993,6 +993,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, int exec_prot, pp_prot, amr_prot, prot; int need_prot; hwaddr raddr; + bool vrma = false; /* * Note on LPCR usage: 970 uses HID4, but our special variant of @@ -1022,6 +1023,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, } } else if (ppc_hash64_use_vrma(env)) { /* Emulated VRMA mode */ + vrma = true; slb = &vrma_slbe; if (build_vrma_slbe(cpu, slb) != 0) { /* Invalid VRMA setup, machine check */ @@ -1136,7 +1138,12 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); - amr_prot = ppc_hash64_amr_prot(cpu, pte); + if (vrma) { + /* VRMA does not check keys */ + amr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + amr_prot = ppc_hash64_amr_prot(cpu, pte); + } prot = exec_prot & pp_prot & amr_prot; need_prot = check_prot_access_type(PAGE_RWX, access_type); diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index be7a45f2549..33ac3412901 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -19,10 +19,10 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" +#include "system/memory.h" #include "kvm_ppc.h" #include "exec/log.h" #include "internal.h" @@ -571,6 +571,20 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, prtbe0 = ldq_phys(cs->as, h_raddr); } + /* + * Some Linux uses a zero process table entry in PID!=0 for kernel context + * without userspace in order to fault on NULL dereference, because using + * PIDR=0 for the kernel causes the Q0 page table to be used to translate + * Q3 as well. Check for that case here to avoid the invalid configuration + * message. + */ + if (unlikely(!prtbe0)) { + if (guest_visible) { + ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); + } + return 1; + } + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ *g_page_size = PRTBE_R_GET_RTS(prtbe0); base_addr = prtbe0 & PRTBE_R_RPDB; diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index 60f87362104..52d48615ac2 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -20,12 +20,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_ppc.h" #include "mmu-hash64.h" #include "mmu-hash32.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "exec/log.h" #include "helper_regs.h" #include "qemu/error-report.h" diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index b0a0676beba..ac607054027 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -20,12 +20,13 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "cpu.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm_ppc.h" #include "mmu-hash64.h" #include "mmu-hash32.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "exec/log.h" #include "helper_regs.h" #include "qemu/error-report.h" @@ -35,7 +36,7 @@ #include "mmu-radix64.h" #include "mmu-booke.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" /* #define FLUSH_ALL_TLBS */ @@ -316,7 +317,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) break; default: /* Should never reach here with other MMU models */ - assert(0); + g_assert_not_reached(); } #else ppc_tlb_invalidate_all(env); diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c index db9ee8e96b0..2a7a5b493af 100644 --- a/target/ppc/power8-pmu.c +++ b/target/ppc/power8-pmu.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "helper_regs.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" #include "qemu/timer.h" diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c index a25d86a8d19..7022564604f 100644 --- a/target/ppc/ppc-qmp-cmds.c +++ b/target/ppc/ppc-qmp-cmds.c @@ -28,7 +28,8 @@ #include "qemu/ctype.h" #include "monitor/hmp-target.h" #include "monitor/hmp.h" -#include "qapi/qapi-commands-machine-target.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" #include "cpu-models.h" #include "cpu-qom.h" @@ -175,6 +176,15 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) return -EINVAL; } +CpuModelExpansionInfo * +qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + error_setg(errp, "CPU model expansion is not supported on this target"); + return NULL; +} + static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; diff --git a/target/ppc/spr_common.h b/target/ppc/spr_common.h index 01aff449bcc..84c910c440f 100644 --- a/target/ppc/spr_common.h +++ b/target/ppc/spr_common.h @@ -165,6 +165,8 @@ void spr_write_cfar(DisasContext *ctx, int sprn, int gprn); void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn); void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn); void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn); +void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn); +void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn); void spr_write_ureg(DisasContext *ctx, int sprn, int gprn); void spr_read_purr(DisasContext *ctx, int gprn, int sprn); void spr_write_purr(DisasContext *ctx, int sprn, int gprn); @@ -204,6 +206,8 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn); void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn); void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn); void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn); +void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn); +void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn); void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn); void spr_read_ppr32(DisasContext *ctx, int sprn, int gprn); void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn); diff --git a/target/ppc/tcg-excp_helper.c b/target/ppc/tcg-excp_helper.c new file mode 100644 index 00000000000..f835be51563 --- /dev/null +++ b/target/ppc/tcg-excp_helper.c @@ -0,0 +1,851 @@ +/* + * PowerPC exception emulation helpers for QEMU (TCG specific) + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/log.h" +#include "target/ppc/cpu.h" +#include "accel/tcg/cpu-ldst.h" +#include "exec/helper-proto.h" +#include "system/runstate.h" + +#include "helper_regs.h" +#include "hw/ppc/ppc.h" +#include "internal.h" +#include "cpu.h" +#include "trace.h" + +/*****************************************************************************/ +/* Exceptions processing helpers */ + +void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, + uint32_t error_code, uintptr_t raddr) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = exception; + env->error_code = error_code; + cpu_loop_exit_restore(cs, raddr); +} + +void helper_raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code) +{ + raise_exception_err_ra(env, exception, error_code, 0); +} + +void helper_raise_exception(CPUPPCState *env, uint32_t exception) +{ + raise_exception_err_ra(env, exception, 0, 0); +} + +#ifndef CONFIG_USER_ONLY + +static G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception, + uint32_t error_code) +{ + raise_exception_err_ra(env, exception, error_code, 0); +} + +static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception) +{ + raise_exception_err_ra(env, exception, 0, 0); +} + +#endif /* !CONFIG_USER_ONLY */ + +void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || + ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) || + ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) || + ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) || + ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP, GETPC()); + } +} + +#ifdef TARGET_PPC64 +void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2, + uint32_t flags) +{ + if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || + ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) || + ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) || + ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) || + ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP, GETPC()); + } +} +#endif /* TARGET_PPC64 */ + +static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane) +{ + const uint16_t c = 0xfffc; + const uint64_t z0 = 0xfa2561cdf44ac398ULL; + uint16_t z = 0, temp; + uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32]; + + for (int i = 3; i >= 0; i--) { + k[i] = key & 0xffff; + key >>= 16; + } + xleft[0] = x & 0xffff; + xright[0] = (x >> 16) & 0xffff; + + for (int i = 0; i < 28; i++) { + z = (z0 >> (63 - i)) & 1; + temp = ror16(k[i + 3], 3) ^ k[i + 1]; + k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1); + } + + for (int i = 0; i < 8; i++) { + eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)]; + eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)]; + eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)]; + eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)]; + } + + for (int i = 0; i < 32; i++) { + fxleft[i] = (rol16(xleft[i], 1) & + rol16(xleft[i], 8)) ^ rol16(xleft[i], 2); + xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i]; + xright[i + 1] = xleft[i]; + } + + return (((uint32_t)xright[32]) << 16) | xleft[32]; +} + +static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key) +{ + uint64_t stage0_h = 0ULL, stage0_l = 0ULL; + uint64_t stage1_h, stage1_l; + + for (int i = 0; i < 4; i++) { + stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1)); + stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i); + stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1)); + stage0_l |= (ra & 0xff) << (8 * 2 * i); + rb >>= 8; + ra >>= 8; + } + + stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32; + stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1); + stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32; + stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3); + + return stage1_h ^ stage1_l; +} + +static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra, + target_ulong rb, uint64_t key, bool store) +{ + uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; + + if (store) { + cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); + } else { + loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); + if (loaded_hash != calculated_hash) { + raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_TRAP, GETPC()); + } + } +} + +#include "qemu/guest-random.h" + +#ifdef TARGET_PPC64 +#define HELPER_HASH(op, key, store, dexcr_aspect) \ +void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ + target_ulong rb) \ +{ \ + if (env->msr & R_MSR_PR_MASK) { \ + if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \ + env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ + return; \ + } else if (!(env->msr & R_MSR_HV_MASK)) { \ + if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \ + env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \ + return; \ + } else if (!(env->msr & R_MSR_S_MASK)) { \ + if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \ + return; \ + } \ + \ + do_hash(env, ea, ra, rb, key, store); \ +} +#else +#define HELPER_HASH(op, key, store, dexcr_aspect) \ +void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \ + target_ulong rb) \ +{ \ + do_hash(env, ea, ra, rb, key, store); \ +} +#endif /* TARGET_PPC64 */ + +HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE) +HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE) +HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE) +HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE) + +#ifndef CONFIG_USER_ONLY + +void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + CPUPPCState *env = cpu_env(cs); + uint32_t insn; + + /* Restore state and reload the insn we executed, for filling in DSISR. */ + cpu_restore_state(cs, retaddr); + insn = ppc_ldl_code(env, env->nip); + + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_4xx: + env->spr[SPR_40x_DEAR] = vaddr; + break; + case POWERPC_MMU_BOOKE: + case POWERPC_MMU_BOOKE206: + env->spr[SPR_BOOKE_DEAR] = vaddr; + break; + default: + env->spr[SPR_DAR] = vaddr; + break; + } + + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = insn & 0x03FF0000; + cpu_loop_exit(cs); +} + +void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr vaddr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ + CPUPPCState *env = cpu_env(cs); + + switch (env->excp_model) { +#if defined(TARGET_PPC64) + case POWERPC_EXCP_POWER8: + case POWERPC_EXCP_POWER9: + case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: + /* + * Machine check codes can be found in processor User Manual or + * Linux or skiboot source. + */ + if (access_type == MMU_DATA_LOAD) { + env->spr[SPR_DAR] = vaddr; + env->spr[SPR_DSISR] = PPC_BIT(57); + env->error_code = PPC_BIT(42); + + } else if (access_type == MMU_DATA_STORE) { + /* + * MCE for stores in POWER is asynchronous so hardware does + * not set DAR, but QEMU can do better. + */ + env->spr[SPR_DAR] = vaddr; + env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45); + env->error_code |= PPC_BIT(42); + + } else { /* Fetch */ + /* + * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching + * the instruction, so that must always be clear for fetches. + */ + env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45); + } + break; +#endif + default: + /* + * TODO: Check behaviour for other CPUs, for now do nothing. + * Could add a basic MCE even if real hardware ignores. + */ + return; + } + + cs->exception_index = POWERPC_EXCP_MCHECK; + cpu_loop_exit_restore(cs, retaddr); +} + +void ppc_cpu_debug_excp_handler(CPUState *cs) +{ +#if defined(TARGET_PPC64) + CPUPPCState *env = cpu_env(cs); + + if (env->insns_flags2 & PPC2_ISA207S) { + if (cs->watchpoint_hit) { + if (cs->watchpoint_hit->flags & BP_CPU) { + env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr; + env->spr[SPR_DSISR] = PPC_BIT(41); + cs->watchpoint_hit = NULL; + raise_exception(env, POWERPC_EXCP_DSI); + } + cs->watchpoint_hit = NULL; + } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) { + raise_exception_err(env, POWERPC_EXCP_TRACE, + PPC_BIT(33) | PPC_BIT(43)); + } + } +#endif +} + +bool ppc_cpu_debug_check_breakpoint(CPUState *cs) +{ +#if defined(TARGET_PPC64) + CPUPPCState *env = cpu_env(cs); + + if (env->insns_flags2 & PPC2_ISA207S) { + target_ulong priv; + + priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63); + switch (priv) { + case 0x1: /* problem */ + return env->msr & ((target_ulong)1 << MSR_PR); + case 0x2: /* supervisor */ + return (!(env->msr & ((target_ulong)1 << MSR_PR)) && + !(env->msr & ((target_ulong)1 << MSR_HV))); + case 0x3: /* hypervisor */ + return (!(env->msr & ((target_ulong)1 << MSR_PR)) && + (env->msr & ((target_ulong)1 << MSR_HV))); + default: + g_assert_not_reached(); + } + } +#endif + + return false; +} + +bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) +{ +#if defined(TARGET_PPC64) + CPUPPCState *env = cpu_env(cs); + bool wt, wti, hv, sv, pr; + uint32_t dawrx; + + if ((env->insns_flags2 & PPC2_ISA207S) && + (wp == env->dawr_watchpoint[0])) { + dawrx = env->spr[SPR_DAWRX0]; + } else if ((env->insns_flags2 & PPC2_ISA310) && + (wp == env->dawr_watchpoint[1])) { + dawrx = env->spr[SPR_DAWRX1]; + } else { + return false; + } + + wt = extract32(dawrx, PPC_BIT_NR(59), 1); + wti = extract32(dawrx, PPC_BIT_NR(60), 1); + hv = extract32(dawrx, PPC_BIT_NR(61), 1); + sv = extract32(dawrx, PPC_BIT_NR(62), 1); + pr = extract32(dawrx, PPC_BIT_NR(62), 1); + + if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) { + return false; + } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) { + return false; + } else if (!sv) { + return false; + } + + if (!wti) { + if (env->msr & ((target_ulong)1 << MSR_DR)) { + return wt; + } else { + return !wt; + } + } + + return true; +#endif + + return false; +} + +/* + * This stops the machine and logs CPU state without killing QEMU (like + * cpu_abort()) because it is often a guest error as opposed to a QEMU error, + * so the machine can still be debugged. + */ +G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason) +{ + CPUState *cs = env_cpu(env); + FILE *f; + + f = qemu_log_trylock(); + if (f) { + fprintf(f, "Entering checkstop state: %s\n", reason); + cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP); + qemu_log_unlock(f); + } + + /* + * This stops the machine and logs CPU state without killing QEMU + * (like cpu_abort()) so the machine can still be debugged (because + * it is often a guest error). + */ + qemu_system_guest_panicked(NULL); + cpu_loop_exit_noexc(cs); +} + +/* Return true iff byteswap is needed to load instruction */ +static inline bool insn_need_byteswap(CPUArchState *env) +{ + /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */ + return !!(env->msr & ((target_ulong)1 << MSR_LE)); +} + +uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr) +{ + uint32_t insn = cpu_ldl_code(env, addr); + + if (insn_need_byteswap(env)) { + insn = bswap32(insn); + } + + return insn; +} + +#if defined(TARGET_PPC64) +void helper_attn(CPUPPCState *env) +{ + /* POWER attn is unprivileged when enabled by HID, otherwise illegal */ + if ((*env->check_attn)(env)) { + powerpc_checkstop(env, "host executed attn"); + } else { + raise_exception_err(env, POWERPC_EXCP_HV_EMU, + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); + } +} + +void helper_scv(CPUPPCState *env, uint32_t lev) +{ + if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) { + raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev); + } else { + raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV); + } +} + +void helper_pminsn(CPUPPCState *env, uint32_t insn) +{ + CPUState *cs = env_cpu(env); + + cs->halted = 1; + + /* Condition for waking up at 0x100 */ + env->resume_as_sreset = (insn != PPC_PM_STOP) || + (env->spr[SPR_PSSCR] & PSSCR_EC); + + /* HDECR is not to wake from PM state, it may have already fired */ + if (env->resume_as_sreset) { + PowerPCCPU *cpu = env_archcpu(env); + ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); + } + + ppc_maybe_interrupt(env); +} + +#endif /* TARGET_PPC64 */ +void helper_store_msr(CPUPPCState *env, target_ulong val) +{ + uint32_t excp = hreg_store_msr(env, val, 0); + + if (excp != 0) { + cpu_interrupt_exittb(env_cpu(env)); + raise_exception(env, excp); + } +} + +void helper_ppc_maybe_interrupt(CPUPPCState *env) +{ + ppc_maybe_interrupt(env); +} + +static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) +{ + /* MSR:POW cannot be set by any form of rfi */ + msr &= ~(1ULL << MSR_POW); + + /* MSR:TGPR cannot be set by any form of rfi */ + if (env->flags & POWERPC_FLAG_TGPR) { + msr &= ~(1ULL << MSR_TGPR); + } + +#ifdef TARGET_PPC64 + /* Switching to 32-bit ? Crop the nip */ + if (!msr_is_64bit(env, msr)) { + nip = (uint32_t)nip; + } +#else + nip = (uint32_t)nip; +#endif + /* XXX: beware: this is false if VLE is supported */ + env->nip = nip & ~((target_ulong)0x00000003); + hreg_store_msr(env, msr, 1); + trace_ppc_excp_rfi(env->nip, env->msr); + /* + * No need to raise an exception here, as rfi is always the last + * insn of a TB + */ + cpu_interrupt_exittb(env_cpu(env)); + /* Reset the reservation */ + env->reserve_addr = -1; + + /* Context synchronizing: check if TCG TLB needs flush */ + check_tlb_flush(env, false); +} + +void helper_rfi(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful); +} + +#ifdef TARGET_PPC64 +void helper_rfid(CPUPPCState *env) +{ + /* + * The architecture defines a number of rules for which bits can + * change but in practice, we handle this in hreg_store_msr() + * which will be called by do_rfi(), so there is no need to filter + * here + */ + do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]); +} + +void helper_rfscv(CPUPPCState *env) +{ + do_rfi(env, env->lr, env->ctr); +} + +void helper_hrfid(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]); +} + +void helper_rfebb(CPUPPCState *env, target_ulong s) +{ + target_ulong msr = env->msr; + + /* + * Handling of BESCR bits 32:33 according to PowerISA v3.1: + * + * "If BESCR 32:33 != 0b00 the instruction is treated as if + * the instruction form were invalid." + */ + if (env->spr[SPR_BESCR] & BESCR_INVALID) { + raise_exception_err(env, POWERPC_EXCP_PROGRAM, + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); + } + + env->nip = env->spr[SPR_EBBRR]; + + /* Switching to 32-bit ? Crop the nip */ + if (!msr_is_64bit(env, msr)) { + env->nip = (uint32_t)env->spr[SPR_EBBRR]; + } + + if (s) { + env->spr[SPR_BESCR] |= BESCR_GE; + } else { + env->spr[SPR_BESCR] &= ~BESCR_GE; + } +} + +/* + * Triggers or queues an 'ebb_excp' EBB exception. All checks + * but FSCR, HFSCR and msr_pr must be done beforehand. + * + * PowerISA v3.1 isn't clear about whether an EBB should be + * postponed or cancelled if the EBB facility is unavailable. + * Our assumption here is that the EBB is cancelled if both + * FSCR and HFSCR EBB facilities aren't available. + */ +static void do_ebb(CPUPPCState *env, int ebb_excp) +{ + PowerPCCPU *cpu = env_archcpu(env); + + /* + * FSCR_EBB and FSCR_IC_EBB are the same bits used with + * HFSCR. + */ + helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB); + helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB); + + if (ebb_excp == POWERPC_EXCP_PERFM_EBB) { + env->spr[SPR_BESCR] |= BESCR_PMEO; + } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) { + env->spr[SPR_BESCR] |= BESCR_EEO; + } + + if (FIELD_EX64(env->msr, MSR, PR)) { + powerpc_excp(cpu, ebb_excp); + } else { + ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1); + } +} + +void raise_ebb_perfm_exception(CPUPPCState *env) +{ + bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE && + env->spr[SPR_BESCR] & BESCR_PME && + env->spr[SPR_BESCR] & BESCR_GE; + + if (!perfm_ebb_enabled) { + return; + } + + do_ebb(env, POWERPC_EXCP_PERFM_EBB); +} +#endif /* TARGET_PPC64 */ + +/*****************************************************************************/ +/* Embedded PowerPC specific helpers */ +void helper_40x_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]); +} + +void helper_rfci(CPUPPCState *env) +{ + do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]); +} + +void helper_rfdi(CPUPPCState *env) +{ + /* FIXME: choose CSRR1 or DSRR1 based on cpu type */ + do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]); +} + +void helper_rfmci(CPUPPCState *env) +{ + /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */ + do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]); +} + +/* Embedded.Processor Control */ +static int dbell2irq(target_ulong rb) +{ + int msg = rb & DBELL_TYPE_MASK; + int irq = -1; + + switch (msg) { + case DBELL_TYPE_DBELL: + irq = PPC_INTERRUPT_DOORBELL; + break; + case DBELL_TYPE_DBELL_CRIT: + irq = PPC_INTERRUPT_CDOORBELL; + break; + case DBELL_TYPE_G_DBELL: + case DBELL_TYPE_G_DBELL_CRIT: + case DBELL_TYPE_G_DBELL_MC: + /* XXX implement */ + default: + break; + } + + return irq; +} + +void helper_msgclr(CPUPPCState *env, target_ulong rb) +{ + int irq = dbell2irq(rb); + + if (irq < 0) { + return; + } + + ppc_set_irq(env_archcpu(env), irq, 0); +} + +void helper_msgsnd(target_ulong rb) +{ + int irq = dbell2irq(rb); + int pir = rb & DBELL_PIRTAG_MASK; + CPUState *cs; + + if (irq < 0) { + return; + } + + bql_lock(); + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *cenv = &cpu->env; + + if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { + ppc_set_irq(cpu, irq, 1); + } + } + bql_unlock(); +} + +/* Server Processor Control */ + +static bool dbell_type_server(target_ulong rb) +{ + /* + * A Directed Hypervisor Doorbell message is sent only if the + * message type is 5. All other types are reserved and the + * instruction is a no-op + */ + return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; +} + +static inline bool dbell_bcast_core(target_ulong rb) +{ + return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE; +} + +static inline bool dbell_bcast_subproc(target_ulong rb) +{ + return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC; +} + +/* + * Send an interrupt to a thread in the same core as env). + */ +static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq) +{ + PowerPCCPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + if (ppc_cpu_lpar_single_threaded(cs)) { + if (target_tir == 0) { + ppc_set_irq(cpu, irq, 1); + } + } else { + CPUState *ccs; + + /* Does iothread need to be locked for walking CPU list? */ + bql_lock(); + THREAD_SIBLING_FOREACH(cs, ccs) { + PowerPCCPU *ccpu = POWERPC_CPU(ccs); + if (target_tir == ppc_cpu_tir(ccpu)) { + ppc_set_irq(ccpu, irq, 1); + break; + } + } + bql_unlock(); + } +} + +void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) +{ + if (!dbell_type_server(rb)) { + return; + } + + ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); +} + +void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) +{ + int pir = rb & DBELL_PROCIDTAG_MASK; + bool brdcast = false; + CPUState *cs, *ccs; + PowerPCCPU *cpu; + + if (!dbell_type_server(rb)) { + return; + } + + /* POWER8 msgsnd is like msgsndp (targets a thread within core) */ + if (!(env->insns_flags2 & PPC2_ISA300)) { + msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL); + return; + } + + /* POWER9 and later msgsnd is a global (targets any thread) */ + cpu = ppc_get_vcpu_by_pir(pir); + if (!cpu) { + return; + } + cs = CPU(cpu); + + if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) && + (env->flags & POWERPC_FLAG_SMT_1LPAR))) { + brdcast = true; + } + + if (ppc_cpu_core_single_threaded(cs) || !brdcast) { + ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1); + return; + } + + /* + * Why is bql needed for walking CPU list? Answer seems to be because ppc + * irq handling needs it, but ppc_set_irq takes the lock itself if needed, + * so could this be removed? + */ + bql_lock(); + THREAD_SIBLING_FOREACH(cs, ccs) { + ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1); + } + bql_unlock(); +} + +#ifdef TARGET_PPC64 +void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb) +{ + helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP); + + if (!dbell_type_server(rb)) { + return; + } + + ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0); +} + +/* + * sends a message to another thread on the same + * multi-threaded processor + */ +void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) +{ + helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP); + + if (!dbell_type_server(rb)) { + return; + } + + msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL); +} +#endif /* TARGET_PPC64 */ + +/* Single-step tracing */ +void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip) +{ + uint32_t error_code = 0; + if (env->insns_flags2 & PPC2_ISA207S) { + /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */ + env->spr[SPR_POWER_SIAR] = prev_ip; + error_code = PPC_BIT(33); + } + raise_exception_err(env, POWERPC_EXCP_TRACE, error_code); +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 73120323b4e..7209b418fb6 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -20,7 +20,6 @@ #include "cpu.h" #include "hw/ppc/ppc.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" #include "qemu/log.h" #include "qemu/main-loop.h" diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 71513ba9646..27f90c3cc56 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/host-utils.h" @@ -30,6 +30,7 @@ #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/log.h" #include "qemu/atomic128.h" #include "spr_common.h" @@ -636,6 +637,18 @@ void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn) translator_io_start(&ctx->base); gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]); } + +void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn) +{ + translator_io_start(&ctx->base); + gen_helper_store_dawr1(tcg_env, cpu_gpr[gprn]); +} + +void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn) +{ + translator_io_start(&ctx->base); + gen_helper_store_dawrx1(tcg_env, cpu_gpr[gprn]); +} #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ /* CTR */ @@ -1325,6 +1338,22 @@ void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) translator_io_start(&ctx->base); gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]); } + +void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn) +{ + translator_io_start(&ctx->base); + gen_helper_load_pmsr(cpu_gpr[gprn], tcg_env); +} + +void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn) +{ + if (!gen_serialize_core_lpar(ctx)) { + return; + } + translator_io_start(&ctx->base); + gen_helper_store_pmcr(tcg_env, cpu_gpr[gprn]); +} + #endif /* !defined(CONFIG_USER_ONLY) */ void spr_read_tar(DisasContext *ctx, int gprn, int sprn) @@ -1588,16 +1617,13 @@ static opc_handler_t invalid_handler = { static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) { TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_movi_tl(t0, CRF_EQ); - tcg_gen_movi_tl(t1, CRF_LT); tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), - t0, arg0, arg1, t1, t0); - tcg_gen_movi_tl(t1, CRF_GT); + t0, arg0, arg1, + tcg_constant_tl(CRF_LT), tcg_constant_tl(CRF_EQ)); tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), - t0, arg0, arg1, t1, t0); + t0, arg0, arg1, tcg_constant_tl(CRF_GT), t0); tcg_gen_trunc_tl_i32(t, t0); tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); @@ -1719,11 +1745,10 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_mov_tl(ca32, ca); } } else { - TCGv zero = tcg_constant_tl(0); if (add_ca) { - tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); - tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); + tcg_gen_addcio_tl(t0, ca, arg1, arg2, ca); } else { + TCGv zero = tcg_constant_tl(0); tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); } gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); @@ -1823,7 +1848,7 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - if (unlikely(Rc(ctx->opcode) != 0)) { + if (unlikely(compute_rc0)) { gen_set_Rc0(ctx, ret); } } @@ -1922,11 +1947,9 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_mov_tl(cpu_ca32, cpu_ca); } } else if (add_ca) { - TCGv zero, inv1 = tcg_temp_new(); + TCGv inv1 = tcg_temp_new(); tcg_gen_not_tl(inv1, arg1); - zero = tcg_constant_tl(0); - tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); - tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); + tcg_gen_addcio_tl(t0, cpu_ca, arg2, inv1, cpu_ca); gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); } else { tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); @@ -2974,8 +2997,8 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop); /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ - tcg_gen_movi_tl(u, 1 << (memop_size(memop) * 8 - 1)); - tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); + tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, + tcg_constant_tl(1 << (memop_size(memop) * 8 - 1))); } static void gen_ld_atomic(DisasContext *ctx, MemOp memop) @@ -3601,7 +3624,6 @@ static void pmu_count_insns(DisasContext *ctx) #else static void pmu_count_insns(DisasContext *ctx) { - return; } #endif /* #if defined(TARGET_PPC64) */ @@ -6426,8 +6448,6 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) opc_handler_t **table, *handler; uint32_t inval; - ctx->opcode = insn; - LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), ctx->le_mode ? "little" : "big"); @@ -6561,6 +6581,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->base.pc_next = pc += 4; if (!is_prefix_insn(ctx, insn)) { + ctx->opcode = insn; ok = (decode_insn32(ctx, insn) || decode_legacy(cpu, ctx, insn)); } else if ((pc & 63) == 0) { @@ -6672,8 +6693,8 @@ static const TranslatorOps ppc_tr_ops = { .tb_stop = ppc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void ppc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 70d0ad2e71a..92d6e8c6032 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -994,8 +994,8 @@ static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, { TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0); - REQUIRE_VECTOR(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); ah = tcg_temp_new_i64(); al = tcg_temp_new_i64(); diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 40a87ddc4a3..00ad57c6282 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -61,8 +61,8 @@ static bool trans_LXVD2X(DisasContext *ctx, arg_LXVD2X *a) TCGv EA; TCGv_i64 t0; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); t0 = tcg_temp_new_i64(); gen_set_access_type(ctx, ACCESS_INT); @@ -80,8 +80,8 @@ static bool trans_LXVW4X(DisasContext *ctx, arg_LXVW4X *a) TCGv EA; TCGv_i64 xth, xtl; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); xth = tcg_temp_new_i64(); xtl = tcg_temp_new_i64(); @@ -113,12 +113,12 @@ static bool trans_LXVWSX(DisasContext *ctx, arg_LXVWSX *a) TCGv EA; TCGv_i32 data; + REQUIRE_INSNS_FLAGS2(ctx, ISA300); if (a->rt < 32) { REQUIRE_VSX(ctx); } else { REQUIRE_VECTOR(ctx); } - REQUIRE_INSNS_FLAGS2(ctx, ISA300); gen_set_access_type(ctx, ACCESS_INT); EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); @@ -133,8 +133,8 @@ static bool trans_LXVDSX(DisasContext *ctx, arg_LXVDSX *a) TCGv EA; TCGv_i64 data; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); gen_set_access_type(ctx, ACCESS_INT); EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); @@ -185,8 +185,8 @@ static bool trans_LXVH8X(DisasContext *ctx, arg_LXVH8X *a) TCGv EA; TCGv_i64 xth, xtl; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); xth = tcg_temp_new_i64(); xtl = tcg_temp_new_i64(); @@ -208,8 +208,8 @@ static bool trans_LXVB16X(DisasContext *ctx, arg_LXVB16X *a) TCGv EA; TCGv_i128 data; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); data = tcg_temp_new_i128(); gen_set_access_type(ctx, ACCESS_INT); @@ -312,8 +312,8 @@ static bool trans_STXVD2X(DisasContext *ctx, arg_STXVD2X *a) TCGv EA; TCGv_i64 t0; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); t0 = tcg_temp_new_i64(); gen_set_access_type(ctx, ACCESS_INT); @@ -331,8 +331,8 @@ static bool trans_STXVW4X(DisasContext *ctx, arg_STXVW4X *a) TCGv EA; TCGv_i64 xsh, xsl; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, VSX); + REQUIRE_VSX(ctx); xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); @@ -364,8 +364,8 @@ static bool trans_STXVH8X(DisasContext *ctx, arg_STXVH8X *a) TCGv EA; TCGv_i64 xsh, xsl; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); @@ -394,8 +394,8 @@ static bool trans_STXVB16X(DisasContext *ctx, arg_STXVB16X *a) TCGv EA; TCGv_i128 data; - REQUIRE_VSX(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VSX(ctx); data = tcg_temp_new_i128(); gen_set_access_type(ctx, ACCESS_INT); @@ -2244,7 +2244,7 @@ static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a, static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired) { - if (paired || a->rt >= 32) { + if (paired || a->rt < 32) { REQUIRE_VSX(ctx); } else { REQUIRE_VECTOR(ctx); diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c index a4d07a0d0dd..ae210eb8474 100644 --- a/target/ppc/user_only_helper.c +++ b/target/ppc/user_only_helper.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "internal.h" void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address, diff --git a/target/riscv/Kconfig b/target/riscv/Kconfig index c332616d361..11bc09b4146 100644 --- a/target/riscv/Kconfig +++ b/target/riscv/Kconfig @@ -1,9 +1,9 @@ config RISCV32 bool - imply ARM_COMPATIBLE_SEMIHOSTING if TCG + select ARM_COMPATIBLE_SEMIHOSTING if TCG select DEVICE_TREE # needed by boot.c config RISCV64 bool - imply ARM_COMPATIBLE_SEMIHOSTING if TCG + select ARM_COMPATIBLE_SEMIHOSTING if TCG select DEVICE_TREE # needed by boot.c diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c index 434c8a3dbb7..12b68799070 100644 --- a/target/riscv/arch_dump.c +++ b/target/riscv/arch_dump.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "elf.h" -#include "sysemu/dump.h" +#include "system/dump.h" /* struct user_regs_struct from arch/riscv/include/uapi/asm/ptrace.h */ struct riscv64_user_regs { diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c index b99c4a39a1f..e9c8d7f7780 100644 --- a/target/riscv/bitmanip_helper.c +++ b/target/riscv/bitmanip_helper.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" +#include "exec/target_long.h" #include "exec/helper-proto.h" #include "tcg/tcg.h" diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h index 1fbd64939dc..cfdc67c258c 100644 --- a/target/riscv/cpu-param.h +++ b/target/riscv/cpu-param.h @@ -2,22 +2,28 @@ * RISC-V cpu parameters for qemu. * * Copyright (c) 2017-2018 SiFive, Inc. - * SPDX-License-Identifier: GPL-2.0+ + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef RISCV_CPU_PARAM_H #define RISCV_CPU_PARAM_H #if defined(TARGET_RISCV64) -# define TARGET_LONG_BITS 64 # define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */ # define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */ #elif defined(TARGET_RISCV32) -# define TARGET_LONG_BITS 32 # define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */ # define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ #endif #define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ + +/* + * RISC-V-specific extra insn start words: + * 1: Original instruction opcode + * 2: more information about instruction + */ +#define TARGET_INSN_START_EXTRA_WORDS 2 + /* * The current MMU Modes are: * - U mode 0b000 @@ -28,6 +34,4 @@ * - M mode HLV/HLVX/HSV 0b111 */ -#define TCG_GUEST_DEFAULT_MO 0 - #endif diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 3670cfe6d9a..75f4e434085 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -29,8 +29,8 @@ #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) -#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") #define TYPE_RISCV_CPU_MAX RISCV_CPU_TYPE_NAME("max") +#define TYPE_RISCV_CPU_MAX32 RISCV_CPU_TYPE_NAME("max32") #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") @@ -40,15 +40,22 @@ #define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e") #define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64") #define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64") +#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64") +#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") +#define TYPE_RISCV_CPU_SIFIVE_E RISCV_CPU_TYPE_NAME("sifive-e") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") #define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") +#define TYPE_RISCV_CPU_SIFIVE_U RISCV_CPU_TYPE_NAME("sifive-u") #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") #define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906") #define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1") +#define TYPE_RISCV_CPU_TT_ASCALON RISCV_CPU_TYPE_NAME("tt-ascalon") +#define TYPE_RISCV_CPU_XIANGSHAN_NANHU RISCV_CPU_TYPE_NAME("xiangshan-nanhu") +#define TYPE_RISCV_CPU_XIANGSHAN_KMH RISCV_CPU_TYPE_NAME("xiangshan-kunminghu") #define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index a90808a3bac..d055ddf4623 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -24,7 +24,6 @@ #include "cpu.h" #include "cpu_vendorid.h" #include "internals.h" -#include "exec/exec-all.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/error-report.h" @@ -32,9 +31,9 @@ #include "hw/core/qdev-prop-internal.h" #include "migration/vmstate.h" #include "fpu/softfloat-helpers.h" -#include "sysemu/device_tree.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/device_tree.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "kvm/kvm_riscv.h" #include "tcg/tcg-cpu.h" #include "tcg/tcg.h" @@ -42,7 +41,7 @@ /* RISC-V CPU definitions */ static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH"; const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, - RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0}; + RVC, RVS, RVU, RVH, RVG, RVB, 0}; /* * From vector_helper.c @@ -74,6 +73,13 @@ bool riscv_cpu_option_set(const char *optname) return g_hash_table_contains(general_user_opts, optname); } +static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src) +{ +#define BOOL_FIELD(x) dest->x |= src->x; +#define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x; +#include "cpu_cfg_fields.h.inc" +} + #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} @@ -105,7 +111,9 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11), - ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse), + ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp), + ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss), ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr), @@ -115,12 +123,12 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), - ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), + ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), - ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), @@ -181,22 +189,50 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), + ISA_EXT_DATA_ENTRY(sdtrig, PRIV_VERSION_1_12_0, debug), + ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), + ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), + ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg), ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), + ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind), + ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp), ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), + ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi), + ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm), + ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm), ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), + ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg), ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind), + ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp), + ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), + ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), + ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), + ISA_EXT_DATA_ENTRY(ssstrict, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12), + ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm), ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade), + ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr), + ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr), ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), + ISA_EXT_DATA_ENTRY(svrsw60t59b, PRIV_VERSION_1_13_0, ext_svrsw60t59b), + ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), + ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), @@ -210,7 +246,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), - DEFINE_PROP_END_OF_LIST(), + { }, }; bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset) @@ -283,7 +319,7 @@ static const char * const riscv_excp_names[] = { "load_page_fault", "reserved", "store_page_fault", - "reserved", + "double_trap", "reserved", "reserved", "reserved", @@ -330,7 +366,7 @@ void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) int riscv_cpu_max_xlen(RISCVCPUClass *mcc) { - return 16 << mcc->misa_mxl_max; + return 16 << mcc->def->misa_mxl_max; } #ifndef CONFIG_USER_ONLY @@ -363,7 +399,7 @@ static uint8_t satp_mode_from_str(const char *satp_mode_str) g_assert_not_reached(); } -uint8_t satp_mode_max_from_map(uint32_t map) +static uint8_t satp_mode_max_from_map(uint32_t map) { /* * 'map = 0' will make us return (31 - 32), which C will @@ -407,17 +443,23 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) g_assert_not_reached(); } -static void set_satp_mode_max_supported(RISCVCPU *cpu, - uint8_t satp_mode) +static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported) { - bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + bool rv32 = riscv_cpu_is_32bit(cpu); const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + int satp_mode = cpu->cfg.max_satp_mode; + + if (satp_mode == -1) { + return false; + } + *supported = 0; for (int i = 0; i <= satp_mode; ++i) { if (valid_vm[i]) { - cpu->cfg.satp_mode.supported |= (1 << i); + *supported |= (1 << i); } } + return true; } /* Set the satp mode to the max supported */ @@ -426,310 +468,26 @@ static void set_satp_mode_default_map(RISCVCPU *cpu) /* * Bare CPUs do not default to the max available. * Users must set a valid satp_mode in the command - * line. + * line. Otherwise, leave the existing max_satp_mode + * in place. */ if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { warn_report("No satp mode set. Defaulting to 'bare'"); - cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); - return; + cpu->cfg.max_satp_mode = VM_1_10_MBARE; } - - cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; -} -#endif - -static void riscv_any_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVU); - -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), - riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? - VM_1_10_SV32 : VM_1_10_SV57); -#endif - - env->priv_ver = PRIV_VERSION_LATEST; - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; -} - -static void riscv_max_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY -#ifdef TARGET_RISCV32 - set_satp_mode_max_supported(cpu, VM_1_10_SV32); -#else - set_satp_mode_max_supported(cpu, VM_1_10_SV57); -#endif -#endif -} - -#if defined(TARGET_RISCV64) -static void rv64_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); -#endif } - -static void rv64_sifive_u_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); #endif - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; -} - -static void rv64_sifive_e_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv64_thead_c906_cpu_init(Object *obj) +static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_11_0; - - cpu->cfg.ext_zfa = true; - cpu->cfg.ext_zfh = true; - cpu->cfg.mmu = true; - cpu->cfg.ext_xtheadba = true; - cpu->cfg.ext_xtheadbb = true; - cpu->cfg.ext_xtheadbs = true; - cpu->cfg.ext_xtheadcmo = true; - cpu->cfg.ext_xtheadcondmov = true; - cpu->cfg.ext_xtheadfmemidx = true; - cpu->cfg.ext_xtheadmac = true; - cpu->cfg.ext_xtheadmemidx = true; - cpu->cfg.ext_xtheadmempair = true; - cpu->cfg.ext_xtheadsync = true; - - cpu->cfg.mvendorid = THEAD_VENDOR_ID; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV39); - th_register_custom_csrs(cpu); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.pmp = true; -} - -static void rv64_veyron_v1_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); - env->priv_ver = PRIV_VERSION_1_12_0; - - /* Enable ISA extensions */ - cpu->cfg.mmu = true; - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; - cpu->cfg.ext_zicbom = true; - cpu->cfg.cbom_blocksize = 64; - cpu->cfg.cboz_blocksize = 64; - cpu->cfg.ext_zicboz = true; - cpu->cfg.ext_smaia = true; - cpu->cfg.ext_ssaia = true; - cpu->cfg.ext_sscofpmf = true; - cpu->cfg.ext_sstc = true; - cpu->cfg.ext_svinval = true; - cpu->cfg.ext_svnapot = true; - cpu->cfg.ext_svpbmt = true; - cpu->cfg.ext_smstateen = true; - cpu->cfg.ext_zba = true; - cpu->cfg.ext_zbb = true; - cpu->cfg.ext_zbc = true; - cpu->cfg.ext_zbs = true; - cpu->cfg.ext_XVentanaCondOps = true; - - cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; - cpu->cfg.marchid = VEYRON_V1_MARCHID; - cpu->cfg.mimpid = VEYRON_V1_MIMPID; - -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV48); -#endif -} - -#ifdef CONFIG_TCG -static void rv128_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - if (qemu_tcg_mttcg_enabled()) { - /* Missing 128-bit aligned atomics */ - error_report("128-bit RISC-V currently does not work with Multi " - "Threaded TCG. Please use: -accel tcg,thread=single"); - exit(EXIT_FAILURE); + for (size_t i = 0; csr_list[i].csr_ops.name; i++) { + int csrno = csr_list[i].csrno; + const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops; + if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) { + riscv_set_csr_ops(csrno, csr_ops); + } } - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); -#endif -} -#endif /* CONFIG_TCG */ - -static void rv64i_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVI); -} - -static void rv64e_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVE); -} - -#else /* !TARGET_RISCV64 */ - -static void rv32_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); -#endif -} - -static void rv32_sifive_u_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; -} - -static void rv32_sifive_e_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv32_ibex_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_12_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; - cpu->cfg.ext_smepmp = true; -} - -static void rv32_imafcu_nommu_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv32i_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVI); -} - -static void rv32e_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVE); } #endif @@ -805,13 +563,6 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) CSR_MSCRATCH, CSR_SSCRATCH, CSR_SATP, - CSR_MMTE, - CSR_UPMBASE, - CSR_UPMMASK, - CSR_SPMBASE, - CSR_SPMMASK, - CSR_MPMBASE, - CSR_MPMMASK, }; for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { @@ -839,6 +590,12 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } if (flags & CPU_DUMP_FPU) { + target_ulong val = 0; + RISCVException res = riscv_csrrw_debug(env, CSR_FCSR, &val, 0, 0); + if (res == RISCV_EXCP_NONE) { + qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", + csr_ops[CSR_FCSR].name, val); + } for (i = 0; i < 32; i++) { qemu_fprintf(f, " %-8s %016" PRIx64, riscv_fpr_regnames[i], env->fpr[i]); @@ -908,9 +665,9 @@ static vaddr riscv_cpu_get_pc(CPUState *cs) return env->pc; } +#ifndef CONFIG_USER_ONLY bool riscv_cpu_has_work(CPUState *cs) { -#ifndef CONFIG_USER_ONLY RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; /* @@ -920,15 +677,8 @@ bool riscv_cpu_has_work(CPUState *cs) return riscv_cpu_all_pending(env) != 0 || riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE || riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE; -#else - return true; -#endif -} - -static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return riscv_env_mmu_index(cpu_env(cs), ifetch); } +#endif /* !CONFIG_USER_ONLY */ static void riscv_cpu_reset_hold(Object *obj, ResetType type) { @@ -945,7 +695,7 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) mcc->parent_phases.hold(obj, type); } #ifndef CONFIG_USER_ONLY - env->misa_mxl = mcc->misa_mxl_max; + env->misa_mxl = mcc->def->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); if (env->misa_mxl > MXL_RV32) { @@ -965,6 +715,9 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) env->mstatus_hs = set_field(env->mstatus_hs, MSTATUS64_UXL, env->misa_mxl); } + if (riscv_cpu_cfg(env)->ext_smdbltrp) { + env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1); + } } env->mcause = 0; env->miclaim = MIP_SGEIP; @@ -991,8 +744,6 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) } i++; } - /* mmte is supposed to have pm.current hardwired to 1 */ - env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); /* * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor @@ -1012,18 +763,35 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) } pmp_unlock_entries(env); +#else + env->priv = PRV_U; + env->senvcfg = 0; + env->menvcfg = 0; #endif + + /* on reset elp is clear */ + env->elp = false; + /* on reset ssp is set to 0 */ + env->ssp = 0; + env->xl = riscv_cpu_mxl(env); - riscv_cpu_update_mask(env); cs->exception_index = RISCV_EXCP_NONE; env->load_res = -1; set_default_nan_mode(1, &env->fp_status); + /* Default NaN value: sign bit clear, frac msb set */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); + env->vill = true; #ifndef CONFIG_USER_ONLY if (cpu->cfg.debug) { riscv_trigger_reset_hold(env); } + if (cpu->cfg.ext_smrnmi) { + env->rnmip = 0; + env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); + } + if (kvm_enabled()) { kvm_riscv_reset_vcpu(cpu); } @@ -1036,6 +804,15 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) CPURISCVState *env = &cpu->env; info->target_info = &cpu->cfg; + /* + * A couple of bits in MSTATUS set the endianness: + * - MSTATUS_UBE (User-mode), + * - MSTATUS_SBE (Supervisor-mode), + * - MSTATUS_MBE (Machine-mode) + * but we don't implement that yet. + */ + info->endian = BFD_ENDIAN_LITTLE; + switch (env->xl) { case MXL_RV32: info->print_insn = print_insn_riscv32; @@ -1055,18 +832,16 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) { bool rv32 = riscv_cpu_is_32bit(cpu); - uint8_t satp_mode_map_max, satp_mode_supported_max; + uint16_t supported; + uint8_t satp_mode_map_max; - /* The CPU wants the OS to decide which satp mode to use */ - if (cpu->cfg.satp_mode.supported == 0) { + if (!get_satp_mode_supported(cpu, &supported)) { + /* The CPU wants the hypervisor to decide which satp mode to allow */ return; } - satp_mode_supported_max = - satp_mode_max_from_map(cpu->cfg.satp_mode.supported); - - if (cpu->cfg.satp_mode.map == 0) { - if (cpu->cfg.satp_mode.init == 0) { + if (cpu->satp_modes.map == 0) { + if (cpu->satp_modes.init == 0) { /* If unset by the user, we fallback to the default satp mode. */ set_satp_mode_default_map(cpu); } else { @@ -1076,27 +851,27 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) * valid_vm_1_10_32/64. */ for (int i = 1; i < 16; ++i) { - if ((cpu->cfg.satp_mode.init & (1 << i)) && - (cpu->cfg.satp_mode.supported & (1 << i))) { + if ((cpu->satp_modes.init & (1 << i)) && + supported & (1 << i)) { for (int j = i - 1; j >= 0; --j) { - if (cpu->cfg.satp_mode.supported & (1 << j)) { - cpu->cfg.satp_mode.map |= (1 << j); - break; + if (supported & (1 << j)) { + cpu->cfg.max_satp_mode = j; + return; } } - break; } } } + return; } - satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map); /* Make sure the user asked for a supported configuration (HW and qemu) */ - if (satp_mode_map_max > satp_mode_supported_max) { + if (satp_mode_map_max > cpu->cfg.max_satp_mode) { error_setg(errp, "satp_mode %s is higher than hw max capability %s", satp_mode_str(satp_mode_map_max, rv32), - satp_mode_str(satp_mode_supported_max, rv32)); + satp_mode_str(cpu->cfg.max_satp_mode, rv32)); return; } @@ -1106,9 +881,9 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) */ if (!rv32) { for (int i = satp_mode_map_max - 1; i >= 0; --i) { - if (!(cpu->cfg.satp_mode.map & (1 << i)) && - (cpu->cfg.satp_mode.init & (1 << i)) && - (cpu->cfg.satp_mode.supported & (1 << i))) { + if (!(cpu->satp_modes.map & (1 << i)) && + (cpu->satp_modes.init & (1 << i)) && + (supported & (1 << i))) { error_setg(errp, "cannot disable %s satp mode if %s " "is enabled", satp_mode_str(i, false), satp_mode_str(satp_mode_map_max, false)); @@ -1117,12 +892,7 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) } } - /* Finally expand the map so that all valid modes are set */ - for (int i = satp_mode_map_max - 1; i >= 0; --i) { - if (cpu->cfg.satp_mode.supported & (1 << i)) { - cpu->cfg.satp_mode.map |= (1 << i); - } - } + cpu->cfg.max_satp_mode = satp_mode_map_max; } #endif @@ -1161,11 +931,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); Error *local_err = NULL; - if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_ANY) != NULL) { - warn_report("The 'any' CPU is deprecated and will be " - "removed in the future."); - } - cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); @@ -1205,11 +970,11 @@ bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - RISCVSATPMap *satp_map = opaque; + RISCVSATPModes *satp_modes = opaque; uint8_t satp = satp_mode_from_str(name); bool value; - value = satp_map->map & (1 << satp); + value = satp_modes->map & (1 << satp); visit_type_bool(v, name, &value, errp); } @@ -1217,7 +982,7 @@ static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - RISCVSATPMap *satp_map = opaque; + RISCVSATPModes *satp_modes = opaque; uint8_t satp = satp_mode_from_str(name); bool value; @@ -1225,8 +990,8 @@ static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, return; } - satp_map->map = deposit32(satp_map->map, satp, 1, value); - satp_map->init |= 1 << satp; + satp_modes->map = deposit32(satp_modes->map, satp, 1, value); + satp_modes->init |= 1 << satp; } void riscv_add_satp_mode_properties(Object *obj) @@ -1235,16 +1000,16 @@ void riscv_add_satp_mode_properties(Object *obj) if (cpu->env.misa_mxl == MXL_RV32) { object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); } else { object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); } } @@ -1309,16 +1074,16 @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level) g_assert_not_reached(); } } -#endif /* CONFIG_USER_ONLY */ -static bool riscv_cpu_is_dynamic(Object *cpu_obj) +static void riscv_cpu_set_nmi(void *opaque, int irq, int level) { - return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; + riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level); } +#endif /* CONFIG_USER_ONLY */ -static void riscv_cpu_post_init(Object *obj) +static bool riscv_cpu_is_dynamic(Object *cpu_obj) { - accel_cpu_instance_init(CPU(obj)); + return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } static void riscv_cpu_init(Object *obj) @@ -1327,11 +1092,13 @@ static void riscv_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - env->misa_mxl = mcc->misa_mxl_max; + env->misa_mxl = mcc->def->misa_mxl_max; #ifndef CONFIG_USER_ONLY qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); + qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi, + "riscv.cpu.rnmi", RNMI_MAX); #endif /* CONFIG_USER_ONLY */ general_user_opts = g_hash_table_new(g_str_hash, g_str_equal); @@ -1343,8 +1110,8 @@ static void riscv_cpu_init(Object *obj) * for all CPUs. Each accelerator will decide what to do when * users disable them. */ - RISCV_CPU(obj)->cfg.ext_zicntr = true; - RISCV_CPU(obj)->cfg.ext_zihpm = true; + RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare; + RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare; /* Default values for non-bool cpu properties */ cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); @@ -1353,35 +1120,30 @@ static void riscv_cpu_init(Object *obj) cpu->cfg.cbom_blocksize = 64; cpu->cfg.cbop_blocksize = 64; cpu->cfg.cboz_blocksize = 64; + cpu->cfg.pmp_regions = 16; cpu->env.vext_ver = VEXT_VERSION_1_00_0; -} + cpu->cfg.max_satp_mode = -1; -static void riscv_bare_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - - /* - * Bare CPUs do not inherit the timer and performance - * counters from the parent class (see riscv_cpu_init() - * for info on why the parent enables them). - * - * Users have to explicitly enable these counters for - * bare CPUs. - */ - cpu->cfg.ext_zicntr = false; - cpu->cfg.ext_zihpm = false; + if (mcc->def->profile) { + mcc->def->profile->enabled = true; + } - /* Set to QEMU's first supported priv version */ - cpu->env.priv_ver = PRIV_VERSION_1_10_0; + env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext; + riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg); - /* - * Support all available satp_mode settings. The default - * value will be set to MBARE if the user doesn't set - * satp_mode manually (see set_satp_mode_default()). - */ + if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { + cpu->env.priv_ver = mcc->def->priv_spec; + } + if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { + cpu->env.vext_ver = mcc->def->vext_spec; + } #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV64); + if (mcc->def->custom_csrs) { + riscv_register_custom_csrs(cpu, mcc->def->custom_csrs); + } #endif + + accel_cpu_instance_init(CPU(obj)); } typedef struct misa_ext_info { @@ -1406,7 +1168,6 @@ static const MISAExtInfo misa_ext_info_arr[] = { MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), MISA_EXT_INFO(RVU, "u", "User-level instructions"), MISA_EXT_INFO(RVH, "h", "Hypervisor"), - MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), MISA_EXT_INFO(RVV, "v", "Vector operations"), MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)") @@ -1417,7 +1178,7 @@ static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) CPUClass *cc = CPU_CLASS(mcc); /* Validate that MISA_MXL is set properly. */ - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { #ifdef TARGET_RISCV64 case MXL_RV64: case MXL_RV128: @@ -1473,7 +1234,15 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { /* Defaults for standard extensions */ MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), + MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false), + MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false), + MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false), + MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false), + MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false), + MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false), MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), + MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false), + MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false), MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), @@ -1499,16 +1268,26 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false), MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false), MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true), + MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false), + MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false), + MULTI_EXT_CFG_BOOL("supm", ext_supm, false), MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false), + MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false), MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false), + MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false), + MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false), + MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false), MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false), MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false), + MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false), MULTI_EXT_CFG_BOOL("svade", ext_svade, false), MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true), MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), + MULTI_EXT_CFG_BOOL("svrsw60t59b", ext_svrsw60t59b, false), + MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true), @@ -1570,7 +1349,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false), MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false), - DEFINE_PROP_END_OF_LIST(), + { }, }; const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { @@ -1587,42 +1366,40 @@ const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = { MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false), MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false), - DEFINE_PROP_END_OF_LIST(), + { }, }; /* These are experimental so mark with 'x-' */ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { - DEFINE_PROP_END_OF_LIST(), + MULTI_EXT_CFG_BOOL("x-svukte", ext_svukte, false), + + { }, }; /* * 'Named features' is the name we give to extensions that we * don't want to expose to users. They are either immutable * (always enabled/disable) or they'll vary depending on - * the resulting CPU state. They have riscv,isa strings - * and priv_ver like regular extensions. + * the resulting CPU state. + * + * Some of them are always enabled depending on priv version + * of the CPU and are declared directly in isa_edata_arr[]. + * The ones listed here have special checks during finalize() + * time and require their own flags like regular extensions. + * See riscv_cpu_update_named_features() for more info. */ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), + MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), + MULTI_EXT_CFG_BOOL("sha", ext_sha, true), - DEFINE_PROP_END_OF_LIST(), -}; - -/* Deprecated entries marked for future removal */ -const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { - MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), - MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), - MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), - MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), - MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), - MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), - MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), - MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), - MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), - MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), - MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), + /* + * 'ziccrse' has its own flag because the KVM driver + * wants to enable/disable it on its own accord. + */ + MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), - DEFINE_PROP_END_OF_LIST(), + { }, }; static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname, @@ -1677,7 +1454,8 @@ static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_pmu_num = { - .name = "pmu-num", + .type = "int8", + .description = "pmu-num", .get = prop_pmu_num_get, .set = prop_pmu_num_set, }; @@ -1718,7 +1496,8 @@ static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_pmu_mask = { - .name = "pmu-mask", + .type = "int8", + .description = "pmu-mask", .get = prop_pmu_mask_get, .set = prop_pmu_mask_set, }; @@ -1749,7 +1528,8 @@ static void prop_mmu_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_mmu = { - .name = "mmu", + .type = "bool", + .description = "mmu", .get = prop_mmu_get, .set = prop_mmu_set, }; @@ -1780,11 +1560,52 @@ static void prop_pmp_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_pmp = { - .name = "pmp", + .type = "bool", + .description = "pmp", .get = prop_pmp_get, .set = prop_pmp_set, }; +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint8_t value; + + visit_type_uint8(v, name, &value, errp); + + if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + return; + } + + if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) { + error_setg(errp, "Number of PMP regions exceeds maximum available"); + return; + } else if (value > MAX_RISCV_PMPS) { + error_setg(errp, "Number of PMP regions exceeds maximum available"); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.pmp_regions = value; +} + +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions; + + visit_type_uint8(v, name, &value, errp); +} + +static const PropertyInfo prop_num_pmp_regions = { + .type = "uint8", + .description = "num-pmp-regions", + .get = prop_num_pmp_regions_get, + .set = prop_num_pmp_regions_set, +}; + static int priv_spec_from_str(const char *priv_spec_str) { int priv_version = -1; @@ -1854,7 +1675,9 @@ static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_priv_spec = { - .name = "priv_spec", + .type = "str", + .description = "priv_spec", + /* FIXME enum? */ .get = prop_priv_spec_get, .set = prop_priv_spec_set, }; @@ -1885,7 +1708,9 @@ static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_vext_spec = { - .name = "vext_spec", + .type = "str", + .description = "vext_spec", + /* FIXME enum? */ .get = prop_vext_spec_get, .set = prop_vext_spec_set, }; @@ -1894,6 +1719,7 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { RISCVCPU *cpu = RISCV_CPU(obj); + uint16_t cpu_vlen = cpu->cfg.vlenb << 3; uint16_t value; if (!visit_type_uint16(v, name, &value, errp)) { @@ -1905,10 +1731,10 @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name, return; } - if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) { + if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) { cpu_set_prop_err(cpu, name, errp); error_append_hint(errp, "Current '%s' val: %u\n", - name, cpu->cfg.vlenb << 3); + name, cpu_vlen); return; } @@ -1925,7 +1751,8 @@ static void prop_vlen_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_vlen = { - .name = "vlen", + .type = "uint16", + .description = "vlen", .get = prop_vlen_get, .set = prop_vlen_set, }; @@ -1965,7 +1792,8 @@ static void prop_elen_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_elen = { - .name = "elen", + .type = "uint16", + .description = "elen", .get = prop_elen_get, .set = prop_elen_set, }; @@ -2000,7 +1828,8 @@ static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_cbom_blksize = { - .name = "cbom_blocksize", + .type = "uint16", + .description = "cbom_blocksize", .get = prop_cbom_blksize_get, .set = prop_cbom_blksize_set, }; @@ -2035,7 +1864,8 @@ static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_cbop_blksize = { - .name = "cbop_blocksize", + .type = "uint16", + .description = "cbop_blocksize", .get = prop_cbop_blksize_get, .set = prop_cbop_blksize_set, }; @@ -2070,7 +1900,8 @@ static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_cboz_blksize = { - .name = "cboz_blocksize", + .type = "uint16", + .description = "cboz_blocksize", .get = prop_cboz_blksize_get, .set = prop_cboz_blksize_set, }; @@ -2105,7 +1936,8 @@ static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_mvendorid = { - .name = "mvendorid", + .type = "uint32", + .description = "mvendorid", .get = prop_mvendorid_get, .set = prop_mvendorid_set, }; @@ -2140,7 +1972,8 @@ static void prop_mimpid_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_mimpid = { - .name = "mimpid", + .type = "uint64", + .description = "mimpid", .get = prop_mimpid_get, .set = prop_mimpid_set, }; @@ -2196,7 +2029,8 @@ static void prop_marchid_get(Object *obj, Visitor *v, const char *name, } static const PropertyInfo prop_marchid = { - .name = "marchid", + .type = "uint64", + .description = "marchid", .get = prop_marchid_get, .set = prop_marchid_set, }; @@ -2208,9 +2042,10 @@ static const PropertyInfo prop_marchid = { * doesn't need to be manually enabled by the profile. */ static RISCVCPUProfile RVA22U64 = { - .parent = NULL, + .u_parent = NULL, + .s_parent = NULL, .name = "rva22u64", - .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, + .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU, .priv_spec = RISCV_PROFILE_ATTR_UNUSED, .satp_mode = RISCV_PROFILE_ATTR_UNUSED, .ext_offsets = { @@ -2240,7 +2075,8 @@ static RISCVCPUProfile RVA22U64 = { * The remaining features/extensions comes from RVA22U64. */ static RISCVCPUProfile RVA22S64 = { - .parent = &RVA22U64, + .u_parent = &RVA22U64, + .s_parent = NULL, .name = "rva22s64", .misa_ext = RVS, .priv_spec = PRIV_VERSION_1_12_0, @@ -2254,9 +2090,65 @@ static RISCVCPUProfile RVA22S64 = { } }; +/* + * All mandatory extensions from RVA22U64 are present + * in RVA23U64 so set RVA22 as a parent. We need to + * declare just the newly added mandatory extensions. + */ +static RISCVCPUProfile RVA23U64 = { + .u_parent = &RVA22U64, + .s_parent = NULL, + .name = "rva23u64", + .misa_ext = RVV, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .satp_mode = RISCV_PROFILE_ATTR_UNUSED, + .ext_offsets = { + CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb), + CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl), + CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop), + CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb), + CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs), + CPU_CFG_OFFSET(ext_supm), + + RISCV_PROFILE_EXT_LIST_END + } +}; + +/* + * As with RVA23U64, RVA23S64 also defines 'named features'. + * + * Cache related features that we consider enabled since we don't + * implement cache: Ssccptr + * + * Other named features that we already implement: Sstvecd, Sstvala, + * Sscounterenw, Ssu64xl + * + * The remaining features/extensions comes from RVA23S64. + */ +static RISCVCPUProfile RVA23S64 = { + .u_parent = &RVA23U64, + .s_parent = &RVA22S64, + .name = "rva23s64", + .misa_ext = RVS, + .priv_spec = PRIV_VERSION_1_13_0, + .satp_mode = VM_1_10_SV39, + .ext_offsets = { + /* New in RVA23S64 */ + CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc), + CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm), + + /* Named features: Sha */ + CPU_CFG_OFFSET(ext_sha), + + RISCV_PROFILE_EXT_LIST_END + } +}; + RISCVCPUProfile *riscv_profiles[] = { &RVA22U64, &RVA22S64, + &RVA23U64, + &RVA23S64, NULL, }; @@ -2635,6 +2527,54 @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = { }, }; +static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = { + .ext = CPU_CFG_OFFSET(ext_ssccfg), + .implied_multi_exts = { + CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind), + CPU_CFG_OFFSET(ext_smcdeleg), + + RISCV_IMPLIED_EXTS_RULE_END + }, +}; + +static RISCVCPUImpliedExtsRule SUPM_IMPLIED = { + .ext = CPU_CFG_OFFSET(ext_supm), + .implied_multi_exts = { + CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm), + + RISCV_IMPLIED_EXTS_RULE_END + }, +}; + +static RISCVCPUImpliedExtsRule SSPM_IMPLIED = { + .ext = CPU_CFG_OFFSET(ext_sspm), + .implied_multi_exts = { + CPU_CFG_OFFSET(ext_smnpm), + + RISCV_IMPLIED_EXTS_RULE_END + }, +}; + +static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = { + .ext = CPU_CFG_OFFSET(ext_smctr), + .implied_misa_exts = RVS, + .implied_multi_exts = { + CPU_CFG_OFFSET(ext_sscsrind), + + RISCV_IMPLIED_EXTS_RULE_END + }, +}; + +static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = { + .ext = CPU_CFG_OFFSET(ext_ssctr), + .implied_misa_exts = RVS, + .implied_multi_exts = { + CPU_CFG_OFFSET(ext_sscsrind), + + RISCV_IMPLIED_EXTS_RULE_END + }, +}; + RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = { &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED, &RVM_IMPLIED, &RVV_IMPLIED, NULL @@ -2652,11 +2592,12 @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = { &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED, &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED, &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED, - &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, + &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED, + &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED, NULL }; -static Property riscv_cpu_properties[] = { +static const Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), {.name = "pmu-mask", .info = &prop_pmu_mask}, @@ -2664,6 +2605,7 @@ static Property riscv_cpu_properties[] = { {.name = "mmu", .info = &prop_mmu}, {.name = "pmp", .info = &prop_pmp}, + {.name = "num-pmp-regions", .info = &prop_num_pmp_regions}, {.name = "priv_spec", .info = &prop_priv_spec}, {.name = "vext_spec", .info = &prop_vext_spec}, @@ -2675,43 +2617,32 @@ static Property riscv_cpu_properties[] = { {.name = "cbop_blocksize", .info = &prop_cbop_blksize}, {.name = "cboz_blocksize", .info = &prop_cboz_blksize}, - {.name = "mvendorid", .info = &prop_mvendorid}, - {.name = "mimpid", .info = &prop_mimpid}, - {.name = "marchid", .info = &prop_marchid}, + {.name = "mvendorid", .info = &prop_mvendorid}, + {.name = "mimpid", .info = &prop_mimpid}, + {.name = "marchid", .info = &prop_marchid}, #ifndef CONFIG_USER_ONLY DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), + DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec, + DEFAULT_RNMI_IRQVEC), + DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec, + DEFAULT_RNMI_EXCPVEC), #endif DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), + DEFINE_PROP_BOOL("rvv_vsetvl_x0_vill", RISCVCPU, cfg.rvv_vsetvl_x0_vill, false), /* * write_misa() is marked as experimental for now so mark * it with -x and default to 'false'. */ DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), - DEFINE_PROP_END_OF_LIST(), }; -#if defined(TARGET_RISCV64) -static void rva22u64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA22U64.enabled = true; -} - -static void rva22s64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA22S64.enabled = true; -} -#endif - static const gchar *riscv_gdb_arch_name(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); @@ -2739,6 +2670,7 @@ static int64_t riscv_get_arch_id(CPUState *cs) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps riscv_sysemu_ops = { + .has_work = riscv_cpu_has_work, .get_phys_page_debug = riscv_cpu_get_phys_page_debug, .write_elf64_note = riscv_cpu_write_elf64_note, .write_elf32_note = riscv_cpu_write_elf32_note, @@ -2746,7 +2678,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { }; #endif -static void riscv_cpu_common_class_init(ObjectClass *c, void *data) +static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -2760,8 +2692,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data) &mcc->parent_phases); cc->class_by_name = riscv_cpu_class_by_name; - cc->has_work = riscv_cpu_has_work; - cc->mmu_index = riscv_cpu_mmu_index; cc->dump_state = riscv_cpu_dump_state; cc->set_pc = riscv_cpu_set_pc; cc->get_pc = riscv_cpu_get_pc; @@ -2774,16 +2704,94 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data) cc->get_arch_id = riscv_get_arch_id; #endif cc->gdb_arch_name = riscv_gdb_arch_name; +#ifdef CONFIG_TCG + cc->tcg_ops = &riscv_tcg_ops; +#endif /* CONFIG_TCG */ device_class_set_props(dc, riscv_cpu_properties); } -static void riscv_cpu_class_init(ObjectClass *c, void *data) +static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent) +{ + RISCVCPUProfile *curr; + if (!parent) { + return true; + } + + curr = trial; + while (curr) { + if (curr == parent) { + return true; + } + curr = curr->u_parent; + } + + curr = trial; + while (curr) { + if (curr == parent) { + return true; + } + curr = curr->s_parent; + } + + return false; +} + +static void riscv_cpu_class_base_init(ObjectClass *c, const void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); + RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c)); + + if (pcc->def) { + mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def)); + } else { + mcc->def = g_new0(RISCVCPUDef, 1); + } + + if (data) { + const RISCVCPUDef *def = data; + mcc->def->bare |= def->bare; + if (def->profile) { + assert(profile_extends(def->profile, mcc->def->profile)); + assert(mcc->def->bare); + mcc->def->profile = def->profile; + } + if (def->misa_mxl_max) { + assert(def->misa_mxl_max <= MXL_RV128); + mcc->def->misa_mxl_max = def->misa_mxl_max; + +#ifndef CONFIG_USER_ONLY + /* + * Hack to simplify CPU class hierarchies that include both 32- and + * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models. + */ + if (mcc->def->misa_mxl_max == MXL_RV32 && + !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) { + mcc->def->cfg.max_satp_mode = VM_1_10_SV32; + } +#endif + } + if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { + assert(def->priv_spec <= PRIV_VERSION_LATEST); + mcc->def->priv_spec = def->priv_spec; + } + if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { + assert(def->vext_spec != 0); + mcc->def->vext_spec = def->vext_spec; + } + mcc->def->misa_ext |= def->misa_ext; + + riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg); - mcc->misa_mxl_max = (uint32_t)(uintptr_t)data; - riscv_cpu_validate_misa_mxl(mcc); + if (def->custom_csrs) { + assert(!mcc->def->custom_csrs); + mcc->def->custom_csrs = def->custom_csrs; + } + } + + if (!object_class_is_abstract(c)) { + riscv_cpu_validate_misa_mxl(mcc); + } } static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, @@ -2878,50 +2886,34 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) } #endif -#define DEFINE_CPU(type_name, misa_mxl_max, initfn) \ +#define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \ { \ .name = (type_name), \ - .parent = TYPE_RISCV_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = (void *)(misa_mxl_max) \ + .parent = (parent_type_name), \ + .abstract = true, \ + .class_data = &(const RISCVCPUDef) { \ + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .cfg.max_satp_mode = -1, \ + __VA_ARGS__ \ + }, \ } -#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ +#define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \ { \ .name = (type_name), \ - .parent = TYPE_RISCV_DYNAMIC_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = (void *)(misa_mxl_max) \ + .parent = (parent_type_name), \ + .class_data = &(const RISCVCPUDef) { \ + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .cfg.max_satp_mode = -1, \ + __VA_ARGS__ \ + }, \ } -#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ - { \ - .name = (type_name), \ - .parent = TYPE_RISCV_VENDOR_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = (void *)(misa_mxl_max) \ - } - -#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ - { \ - .name = (type_name), \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = (void *)(misa_mxl_max) \ - } - -#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ - { \ - .name = (type_name), \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = (void *)(misa_mxl_max) \ - } +#define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \ + DEFINE_RISCV_CPU(type_name, parent_type_name, \ + .profile = &(profile_)) static const TypeInfo riscv_cpu_type_infos[] = { { @@ -2930,53 +2922,370 @@ static const TypeInfo riscv_cpu_type_infos[] = { .instance_size = sizeof(RISCVCPU), .instance_align = __alignof(RISCVCPU), .instance_init = riscv_cpu_init, - .instance_post_init = riscv_cpu_post_init, .abstract = true, .class_size = sizeof(RISCVCPUClass), .class_init = riscv_cpu_common_class_init, + .class_base_init = riscv_cpu_class_base_init, }, - { - .name = TYPE_RISCV_DYNAMIC_CPU, - .parent = TYPE_RISCV_CPU, - .abstract = true, - }, - { - .name = TYPE_RISCV_VENDOR_CPU, - .parent = TYPE_RISCV_CPU, - .abstract = true, - }, - { - .name = TYPE_RISCV_BARE_CPU, - .parent = TYPE_RISCV_CPU, - .instance_init = riscv_bare_cpu_init, - .abstract = true, - }, + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU, + .cfg.mmu = true, + .cfg.pmp = true, + .priv_spec = PRIV_VERSION_LATEST, + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU), + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU, + /* + * Bare CPUs do not inherit the timer and performance + * counters from the parent class (see riscv_cpu_init() + * for info on why the parent enables them). + * + * Users have to explicitly enable these counters for + * bare CPUs. + */ + .bare = true, + + /* Set to QEMU's first supported priv version */ + .priv_spec = PRIV_VERSION_1_10_0, + + /* + * Support all available satp_mode settings. By default + * only MBARE will be available if the user doesn't enable + * a mode manually (see riscv_cpu_satp_mode_finalize()). + */ +#ifdef TARGET_RISCV32 + .cfg.max_satp_mode = VM_1_10_SV32, +#else + .cfg.max_satp_mode = VM_1_10_SV57, +#endif + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU, #if defined(TARGET_RISCV32) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV32, riscv_any_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), + .misa_mxl_max = MXL_RV32, + .cfg.max_satp_mode = VM_1_10_SV32, #elif defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, MXL_RV64, riscv_any_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), -#ifdef CONFIG_TCG - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), + .misa_mxl_max = MXL_RV64, + .cfg.max_satp_mode = VM_1_10_SV57, +#endif + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU, + .misa_ext = RVI | RVM | RVA | RVC | RVU, + .priv_spec = PRIV_VERSION_1_10_0, + .cfg.max_satp_mode = VM_1_10_MBARE, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.pmp_regions = 8 + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU, + .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU, + .priv_spec = PRIV_VERSION_1_10_0, + + .cfg.max_satp_mode = VM_1_10_SV39, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.mmu = true, + .cfg.pmp = true, + .cfg.pmp_regions = 8 + ), + +#if defined(TARGET_RISCV32) || \ + (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV32, + .misa_mxl_max = MXL_RV32, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVI | RVM | RVC | RVU, + .priv_spec = PRIV_VERSION_1_12_0, + .cfg.max_satp_mode = VM_1_10_MBARE, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.ext_smepmp = true, + + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbc = true, + .cfg.ext_zbs = true + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV32 + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVF, /* IMAFCU */ + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV32, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVI + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVE + ), +#endif + +#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV32, + .misa_mxl_max = MXL_RV32, + ), +#endif + +#if defined(TARGET_RISCV64) + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV57, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV64 + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU, + .priv_spec = PRIV_VERSION_1_11_0, + + .cfg.ext_zfa = true, + .cfg.ext_zfh = true, + .cfg.mmu = true, + .cfg.ext_xtheadba = true, + .cfg.ext_xtheadbb = true, + .cfg.ext_xtheadbs = true, + .cfg.ext_xtheadcmo = true, + .cfg.ext_xtheadcondmov = true, + .cfg.ext_xtheadfmemidx = true, + .cfg.ext_xtheadmac = true, + .cfg.ext_xtheadmemidx = true, + .cfg.ext_xtheadmempair = true, + .cfg.ext_xtheadsync = true, + .cfg.pmp = true, + + .cfg.mvendorid = THEAD_VENDOR_ID, + + .cfg.max_satp_mode = VM_1_10_SV39, +#ifndef CONFIG_USER_ONLY + .custom_csrs = th_csr_list, +#endif + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV, + .priv_spec = PRIV_VERSION_1_13_0, + .vext_spec = VEXT_VERSION_1_00_0, + + /* ISA extensions */ + .cfg.mmu = true, + .cfg.vlenb = 256 >> 3, + .cfg.elen = 64, + .cfg.rvv_ma_all_1s = true, + .cfg.rvv_ta_all_1s = true, + .cfg.misa_w = true, + .cfg.pmp = true, + .cfg.cbom_blocksize = 64, + .cfg.cbop_blocksize = 64, + .cfg.cboz_blocksize = 64, + .cfg.ext_zic64b = true, + .cfg.ext_zicbom = true, + .cfg.ext_zicbop = true, + .cfg.ext_zicboz = true, + .cfg.ext_zicntr = true, + .cfg.ext_zicond = true, + .cfg.ext_zicsr = true, + .cfg.ext_zifencei = true, + .cfg.ext_zihintntl = true, + .cfg.ext_zihintpause = true, + .cfg.ext_zihpm = true, + .cfg.ext_zimop = true, + .cfg.ext_zawrs = true, + .cfg.ext_zfa = true, + .cfg.ext_zfbfmin = true, + .cfg.ext_zfh = true, + .cfg.ext_zfhmin = true, + .cfg.ext_zcb = true, + .cfg.ext_zcmop = true, + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbs = true, + .cfg.ext_zkt = true, + .cfg.ext_zvbb = true, + .cfg.ext_zvbc = true, + .cfg.ext_zvfbfmin = true, + .cfg.ext_zvfbfwma = true, + .cfg.ext_zvfh = true, + .cfg.ext_zvfhmin = true, + .cfg.ext_zvkng = true, + .cfg.ext_smaia = true, + .cfg.ext_smstateen = true, + .cfg.ext_ssaia = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_sstc = true, + .cfg.ext_svade = true, + .cfg.ext_svinval = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + + .cfg.max_satp_mode = VM_1_10_SV57, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU | RVH, + .priv_spec = PRIV_VERSION_1_12_0, + + /* ISA extensions */ + .cfg.mmu = true, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.ext_zicbom = true, + .cfg.cbom_blocksize = 64, + .cfg.cboz_blocksize = 64, + .cfg.ext_zicboz = true, + .cfg.ext_smaia = true, + .cfg.ext_ssaia = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_sstc = true, + .cfg.ext_svinval = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + .cfg.ext_smstateen = true, + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbc = true, + .cfg.ext_zbs = true, + .cfg.ext_XVentanaCondOps = true, + + .cfg.mvendorid = VEYRON_V1_MVENDORID, + .cfg.marchid = VEYRON_V1_MARCHID, + .cfg.mimpid = VEYRON_V1_MIMPID, + + .cfg.max_satp_mode = VM_1_10_SV48, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVB | RVS | RVU, + .priv_spec = PRIV_VERSION_1_12_0, + + /* ISA extensions */ + .cfg.ext_zbc = true, + .cfg.ext_zbkb = true, + .cfg.ext_zbkc = true, + .cfg.ext_zbkx = true, + .cfg.ext_zknd = true, + .cfg.ext_zkne = true, + .cfg.ext_zknh = true, + .cfg.ext_zksed = true, + .cfg.ext_zksh = true, + .cfg.ext_svinval = true, + + .cfg.mmu = true, + .cfg.pmp = true, + + .cfg.max_satp_mode = VM_1_10_SV39, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_KMH, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVB | RVS | RVU | RVH | RVV, + .priv_spec = PRIV_VERSION_1_13_0, + /* + * The RISC-V Instruction Set Manual: Volume I + * Unprivileged Architecture + */ + .cfg.ext_zicntr = true, + .cfg.ext_zihpm = true, + .cfg.ext_zihintntl = true, + .cfg.ext_zihintpause = true, + .cfg.ext_zimop = true, + .cfg.ext_zcmop = true, + .cfg.ext_zicond = true, + .cfg.ext_zawrs = true, + .cfg.ext_zacas = true, + .cfg.ext_zfh = true, + .cfg.ext_zfa = true, + .cfg.ext_zcb = true, + .cfg.ext_zbc = true, + .cfg.ext_zvfh = true, + .cfg.ext_zkn = true, + .cfg.ext_zks = true, + .cfg.ext_zkt = true, + .cfg.ext_zvbb = true, + .cfg.ext_zvkt = true, + /* + * The RISC-V Instruction Set Manual: Volume II + * Privileged Architecture + */ + .cfg.ext_smstateen = true, + .cfg.ext_smcsrind = true, + .cfg.ext_sscsrind = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + .cfg.ext_svinval = true, + .cfg.ext_sstc = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_ssdbltrp = true, + .cfg.ext_ssnpm = true, + .cfg.ext_smnpm = true, + .cfg.ext_smmpm = true, + .cfg.ext_sspm = true, + .cfg.ext_supm = true, + /* The RISC-V Advanced Interrupt Architecture */ + .cfg.ext_smaia = true, + .cfg.ext_ssaia = true, + /* RVA23 Profiles */ + .cfg.ext_zicbom = true, + .cfg.ext_zicbop = true, + .cfg.ext_zicboz = true, + .cfg.ext_svade = true, + .cfg.mmu = true, + .cfg.pmp = true, + .cfg.max_satp_mode = VM_1_10_SV48, + ), + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV57, + .misa_mxl_max = MXL_RV128, + ), #endif /* CONFIG_TCG */ - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVI + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVE + ), + + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64), #endif /* TARGET_RISCV64 */ }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 1619c3acb66..4a862da6158 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -23,7 +23,9 @@ #include "hw/core/cpu.h" #include "hw/registerfields.h" #include "hw/qdev-properties.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "exec/gdbstub.h" #include "qemu/cpu-float.h" #include "qom/object.h" @@ -44,10 +46,9 @@ typedef struct CPUArchState CPURISCVState; #endif /* - * RISC-V-specific extra insn start words: - * 1: Original instruction opcode + * b0: Whether a instruction always raise a store AMO or not. */ -#define TARGET_INSN_START_EXTRA_WORDS 1 +#define RISCV_UW2_ALWAYS_STORE_AMO 1 #define RV(x) ((target_ulong)1 << (x - 'A')) @@ -66,7 +67,6 @@ typedef struct CPUArchState CPURISCVState; #define RVS RV('S') #define RVU RV('U') #define RVH RV('H') -#define RVJ RV('J') #define RVG RV('G') #define RVB RV('B') @@ -75,12 +75,29 @@ const char *riscv_get_misa_ext_name(uint32_t bit); const char *riscv_get_misa_ext_description(uint32_t bit); #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) +#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr) typedef struct riscv_cpu_profile { - struct riscv_cpu_profile *parent; + struct riscv_cpu_profile *u_parent; + struct riscv_cpu_profile *s_parent; const char *name; uint32_t misa_ext; + /* + * The profile is enabled/disabled via command line or + * via cpu_init(). Enabling a profile will add all its + * mandatory extensions in the CPU during init(). + */ bool enabled; + /* + * The profile is present in the CPU, i.e. the current set of + * CPU extensions complies with it. A profile can be enabled + * and not present (e.g. the user disabled a mandatory extension) + * and the other way around (e.g. all mandatory extensions are + * present in a non-profile CPU). + * + * QMP uses this flag. + */ + bool present; bool user_set; int priv_spec; int satp_mode; @@ -124,6 +141,14 @@ typedef enum { EXT_STATUS_DIRTY, } RISCVExtStatus; +/* Enum holds PMM field values for Zjpm v1.0 extension */ +typedef enum { + PMM_FIELD_DISABLED = 0, + PMM_FIELD_RESERVED = 1, + PMM_FIELD_PMLEN7 = 2, + PMM_FIELD_PMLEN16 = 3, +} RISCVPmPmm; + typedef struct riscv_cpu_implied_exts_rule { #ifndef CONFIG_USER_ONLY /* @@ -149,7 +174,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[]; #define MMU_USER_IDX 3 -#define MAX_RISCV_PMPS (16) +#define MAX_RISCV_PMPS (64) +#define OLD_MAX_RISCV_PMPS (16) #if !defined(CONFIG_USER_ONLY) #include "pmp.h" @@ -230,12 +256,24 @@ struct CPUArchState { target_ulong jvt; + /* elp state for zicfilp extension */ + bool elp; + /* shadow stack register for zicfiss extension */ + target_ulong ssp; + /* env place holder for extra word 2 during unwind */ + target_ulong excp_uw2; + /* sw check code for sw check exception */ + target_ulong sw_check_code; #ifdef CONFIG_USER_ONLY uint32_t elf_flags; #endif -#ifndef CONFIG_USER_ONLY target_ulong priv; + /* CSRs for execution environment configuration */ + uint64_t menvcfg; + target_ulong senvcfg; + +#ifndef CONFIG_USER_ONLY /* This contains QEMU specific information about the virt state. */ bool virt_enabled; target_ulong geilen; @@ -288,6 +326,15 @@ struct CPUArchState { target_ulong mcause; target_ulong mtval; /* since: priv-1.10.0 */ + uint64_t mctrctl; + uint32_t sctrdepth; + uint32_t sctrstatus; + uint64_t vsctrctl; + + uint64_t ctr_src[16 << SCTRDEPTH_MAX]; + uint64_t ctr_dst[16 << SCTRDEPTH_MAX]; + uint64_t ctr_data[16 << SCTRDEPTH_MAX]; + /* Machine and Supervisor interrupt priorities */ uint8_t miprio[64]; uint8_t siprio[64]; @@ -368,6 +415,7 @@ struct CPUArchState { uint32_t scounteren; uint32_t mcounteren; + uint32_t scountinhibit; uint32_t mcountinhibit; /* PMU cycle & instret privilege mode filtering */ @@ -434,27 +482,11 @@ struct CPUArchState { /* True if in debugger mode. */ bool debugger; - /* - * CSRs for PointerMasking extension - */ - target_ulong mmte; - target_ulong mpmmask; - target_ulong mpmbase; - target_ulong spmmask; - target_ulong spmbase; - target_ulong upmmask; - target_ulong upmbase; - - /* CSRs for execution environment configuration */ - uint64_t menvcfg; uint64_t mstateen[SMSTATEEN_MAX_COUNT]; uint64_t hstateen[SMSTATEEN_MAX_COUNT]; uint64_t sstateen[SMSTATEEN_MAX_COUNT]; - target_ulong senvcfg; uint64_t henvcfg; #endif - target_ulong cur_pmmask; - target_ulong cur_pmbase; /* Fields from here on are preserved across CPU reset. */ QEMUTimer *stimer; /* Internal timer for S-mode interrupt */ @@ -472,8 +504,30 @@ struct CPUArchState { uint64_t kvm_timer_state; uint64_t kvm_timer_frequency; #endif /* CONFIG_KVM */ + + /* RNMI */ + target_ulong mnscratch; + target_ulong mnepc; + target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */ + target_ulong mnstatus; + target_ulong rnmip; + uint64_t rnmi_irqvec; + uint64_t rnmi_excpvec; }; +/* + * map is a 16-bit bitmap: the most significant set bit in map is the maximum + * satp mode that is supported. It may be chosen by the user and must respect + * what qemu implements (valid_1_10_32/64) and what the hw is capable of + * (supported bitmap below). + * + * init is a 16-bit bitmap used to make sure the user selected a correct + * configuration as per the specification. + */ +typedef struct { + uint16_t map, init; +} RISCVSATPModes; + /* * RISCVCPU: * @env: #CPURISCVState @@ -490,6 +544,7 @@ struct ArchCPU { /* Configuration Settings */ RISCVCPUConfig cfg; + RISCVSATPModes satp_modes; QEMUTimer *pmu_timer; /* A bitmask of Available programmable counters */ @@ -499,6 +554,19 @@ struct ArchCPU { const GPtrArray *decoders; }; +typedef struct RISCVCSR RISCVCSR; + +typedef struct RISCVCPUDef { + RISCVMXL misa_mxl_max; /* max mxl for this cpu */ + RISCVCPUProfile *profile; + uint32_t misa_ext; + int priv_spec; + int32_t vext_spec; + RISCVCPUConfig cfg; + bool bare; + const RISCVCSR *custom_csrs; +} RISCVCPUDef; + /** * RISCVCPUClass: * @parent_realize: The parent class' realize handler. @@ -511,7 +579,7 @@ struct RISCVCPUClass { DeviceRealize parent_realize; ResettablePhases parent_phases; - uint32_t misa_mxl_max; /* max mxl for this cpu */ + RISCVCPUDef *def; }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) @@ -544,6 +612,9 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); bool riscv_cpu_vector_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); int riscv_env_mmu_index(CPURISCVState *env, bool ifetch); +bool cpu_get_fcfien(CPURISCVState *env); +bool cpu_get_bcfien(CPURISCVState *env); +bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt); G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); @@ -568,6 +639,7 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value); +void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level); void riscv_cpu_interrupt(CPURISCVState *env); #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *), @@ -585,15 +657,21 @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en); +void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst, + enum CTRType type, target_ulong prev_priv, bool prev_virt); +void riscv_ctr_clear(CPURISCVState *env); + void riscv_translate_init(void); +void riscv_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); + G_NORETURN void riscv_raise_exception(CPURISCVState *env, - uint32_t exception, uintptr_t pc); + RISCVException exception, + uintptr_t pc); target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); -#include "exec/cpu-all.h" - FIELD(TB_FLAGS, MEM_IDX, 0, 3) FIELD(TB_FLAGS, FS, 3, 2) /* Vector flags */ @@ -608,14 +686,22 @@ FIELD(TB_FLAGS, XL, 16, 2) /* If PointerMasking should be applied */ FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1) FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1) -FIELD(TB_FLAGS, VTA, 20, 1) -FIELD(TB_FLAGS, VMA, 21, 1) +FIELD(TB_FLAGS, VTA, 18, 1) +FIELD(TB_FLAGS, VMA, 19, 1) /* Native debug itrigger */ -FIELD(TB_FLAGS, ITRIGGER, 22, 1) +FIELD(TB_FLAGS, ITRIGGER, 20, 1) /* Virtual mode enabled */ -FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1) -FIELD(TB_FLAGS, PRIV, 24, 2) -FIELD(TB_FLAGS, AXL, 26, 2) +FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1) +FIELD(TB_FLAGS, PRIV, 22, 2) +FIELD(TB_FLAGS, AXL, 24, 2) +/* zicfilp needs a TB flag to track indirect branches */ +FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1) +FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1) +/* zicfiss needs a TB flag so that correct TB is located based on tb flags */ +FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1) +/* If pointer masking should be applied and address sign extended */ +FIELD(TB_FLAGS, PM_PMM, 29, 2) +FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1) #ifdef TARGET_RISCV32 #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) @@ -709,11 +795,26 @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) #ifdef CONFIG_USER_ONLY return env->misa_mxl; #else - return get_field(env->mstatus, MSTATUS64_SXL); + if (env->misa_mxl != MXL_RV32) { + return get_field(env->mstatus, MSTATUS64_SXL); + } #endif + return MXL_RV32; } #endif +static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg, + target_long priv_ver, + uint32_t misa_ext) +{ + /* In priv spec version 1.12 or newer, C always implies Zca */ + if (priv_ver >= PRIV_VERSION_1_12_0) { + return cfg->ext_zca; + } else { + return misa_ext & RVC; + } +} + /* * Encode LMUL to lmul as follows: * LMUL vlmul lmul @@ -745,17 +846,19 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, return vlen >> (vsew + 3 - lmul); } -void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags); - -void riscv_cpu_update_mask(CPURISCVState *env); bool riscv_cpu_is_32bit(RISCVCPU *cpu); +bool riscv_cpu_virt_mem_enabled(CPURISCVState *env); +RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env); +RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env); +uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm); + RISCVException riscv_csrr(CPURISCVState *env, int csrno, target_ulong *ret_value); + RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask); + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra); RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -764,13 +867,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, static inline void riscv_csr_write(CPURISCVState *env, int csrno, target_ulong val) { - riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); + riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); } static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; - riscv_csrrw(env, csrno, &val, 0, 0); + riscv_csrrw(env, csrno, &val, 0, 0, 0); return val; } @@ -779,7 +882,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value); typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, - target_ulong new_value); + target_ulong new_value, + uintptr_t ra); typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -788,8 +892,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, Int128 *ret_value); RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask); + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra); typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, Int128 *ret_value); @@ -808,6 +912,12 @@ typedef struct { uint32_t min_priv_ver; } riscv_csr_operations; +struct RISCVCSR { + int csrno; + bool (*insertion_test)(RISCVCPU *cpu); + riscv_csr_operations csr_ops; +}; + /* CSR function table constants */ enum { CSR_TABLE_SIZE = 0x1000 @@ -842,7 +952,6 @@ extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; -extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; typedef struct isa_ext_data { const char *name; @@ -862,18 +971,17 @@ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); -void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); +void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); target_ulong riscv_new_csr_seed(target_ulong new_value, target_ulong write_mask); -uint8_t satp_mode_max_from_map(uint32_t map); const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); -/* Implemented in th_csr.c */ -void th_register_custom_csrs(RISCVCPU *cpu); +/* In th_csr.c */ +extern const RISCVCSR th_csr_list[]; const char *priv_spec_to_str(int priv_version); #endif /* RISCV_CPU_H */ diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 32b068f18aa..b62dd82fe7c 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -34,6 +34,9 @@ /* Control and Status Registers */ +/* zicfiss user ssp csr */ +#define CSR_SSP 0x011 + /* User Trap Setup */ #define CSR_USTATUS 0x000 #define CSR_UIE 0x004 @@ -170,6 +173,13 @@ #define CSR_MISELECT 0x350 #define CSR_MIREG 0x351 +/* Machine Indirect Register Alias */ +#define CSR_MIREG2 0x352 +#define CSR_MIREG3 0x353 +#define CSR_MIREG4 0x355 +#define CSR_MIREG5 0x356 +#define CSR_MIREG6 0x357 + /* Machine-Level Interrupts (AIA) */ #define CSR_MTOPEI 0x35c #define CSR_MTOPI 0xfb0 @@ -200,6 +210,9 @@ #define CSR_SSTATEEN2 0x10E #define CSR_SSTATEEN3 0x10F +/* Supervisor Counter Delegation */ +#define CSR_SCOUNTINHIBIT 0x120 + /* Supervisor Trap Handling */ #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 @@ -219,6 +232,13 @@ #define CSR_SISELECT 0x150 #define CSR_SIREG 0x151 +/* Supervisor Indirect Register Alias */ +#define CSR_SIREG2 0x152 +#define CSR_SIREG3 0x153 +#define CSR_SIREG4 0x155 +#define CSR_SIREG5 0x156 +#define CSR_SIREG6 0x157 + /* Supervisor-Level Interrupts (AIA) */ #define CSR_STOPEI 0x15c #define CSR_STOPI 0xdb0 @@ -227,6 +247,17 @@ #define CSR_SIEH 0x114 #define CSR_SIPH 0x154 +/* Machine-Level Control transfer records CSRs */ +#define CSR_MCTRCTL 0x34e + +/* Supervisor-Level Control transfer records CSRs */ +#define CSR_SCTRCTL 0x14e +#define CSR_SCTRSTATUS 0x14f +#define CSR_SCTRDEPTH 0x15f + +/* VS-Level Control transfer records CSRs */ +#define CSR_VSCTRCTL 0x24e + /* Hpervisor CSRs */ #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 @@ -285,6 +316,13 @@ #define CSR_VSISELECT 0x250 #define CSR_VSIREG 0x251 +/* Virtual Supervisor Indirect Alias */ +#define CSR_VSIREG2 0x252 +#define CSR_VSIREG3 0x253 +#define CSR_VSIREG4 0x255 +#define CSR_VSIREG5 0x256 +#define CSR_VSIREG6 0x257 + /* VS-Level Interrupts (H-extension with AIA) */ #define CSR_VSTOPEI 0x25c #define CSR_VSTOPI 0xeb0 @@ -317,6 +355,7 @@ #define SMSTATEEN0_CS (1ULL << 0) #define SMSTATEEN0_FCSR (1ULL << 1) #define SMSTATEEN0_JVT (1ULL << 2) +#define SMSTATEEN0_CTR (1ULL << 54) #define SMSTATEEN0_P1P13 (1ULL << 56) #define SMSTATEEN0_HSCONTXT (1ULL << 57) #define SMSTATEEN0_IMSIC (1ULL << 58) @@ -333,6 +372,18 @@ #define CSR_PMPCFG1 0x3a1 #define CSR_PMPCFG2 0x3a2 #define CSR_PMPCFG3 0x3a3 +#define CSR_PMPCFG4 0x3a4 +#define CSR_PMPCFG5 0x3a5 +#define CSR_PMPCFG6 0x3a6 +#define CSR_PMPCFG7 0x3a7 +#define CSR_PMPCFG8 0x3a8 +#define CSR_PMPCFG9 0x3a9 +#define CSR_PMPCFG10 0x3aa +#define CSR_PMPCFG11 0x3ab +#define CSR_PMPCFG12 0x3ac +#define CSR_PMPCFG13 0x3ad +#define CSR_PMPCFG14 0x3ae +#define CSR_PMPCFG15 0x3af #define CSR_PMPADDR0 0x3b0 #define CSR_PMPADDR1 0x3b1 #define CSR_PMPADDR2 0x3b2 @@ -349,6 +400,60 @@ #define CSR_PMPADDR13 0x3bd #define CSR_PMPADDR14 0x3be #define CSR_PMPADDR15 0x3bf +#define CSR_PMPADDR16 0x3c0 +#define CSR_PMPADDR17 0x3c1 +#define CSR_PMPADDR18 0x3c2 +#define CSR_PMPADDR19 0x3c3 +#define CSR_PMPADDR20 0x3c4 +#define CSR_PMPADDR21 0x3c5 +#define CSR_PMPADDR22 0x3c6 +#define CSR_PMPADDR23 0x3c7 +#define CSR_PMPADDR24 0x3c8 +#define CSR_PMPADDR25 0x3c9 +#define CSR_PMPADDR26 0x3ca +#define CSR_PMPADDR27 0x3cb +#define CSR_PMPADDR28 0x3cc +#define CSR_PMPADDR29 0x3cd +#define CSR_PMPADDR30 0x3ce +#define CSR_PMPADDR31 0x3cf +#define CSR_PMPADDR32 0x3d0 +#define CSR_PMPADDR33 0x3d1 +#define CSR_PMPADDR34 0x3d2 +#define CSR_PMPADDR35 0x3d3 +#define CSR_PMPADDR36 0x3d4 +#define CSR_PMPADDR37 0x3d5 +#define CSR_PMPADDR38 0x3d6 +#define CSR_PMPADDR39 0x3d7 +#define CSR_PMPADDR40 0x3d8 +#define CSR_PMPADDR41 0x3d9 +#define CSR_PMPADDR42 0x3da +#define CSR_PMPADDR43 0x3db +#define CSR_PMPADDR44 0x3dc +#define CSR_PMPADDR45 0x3dd +#define CSR_PMPADDR46 0x3de +#define CSR_PMPADDR47 0x3df +#define CSR_PMPADDR48 0x3e0 +#define CSR_PMPADDR49 0x3e1 +#define CSR_PMPADDR50 0x3e2 +#define CSR_PMPADDR51 0x3e3 +#define CSR_PMPADDR52 0x3e4 +#define CSR_PMPADDR53 0x3e5 +#define CSR_PMPADDR54 0x3e6 +#define CSR_PMPADDR55 0x3e7 +#define CSR_PMPADDR56 0x3e8 +#define CSR_PMPADDR57 0x3e9 +#define CSR_PMPADDR58 0x3ea +#define CSR_PMPADDR59 0x3eb +#define CSR_PMPADDR60 0x3ec +#define CSR_PMPADDR61 0x3ed +#define CSR_PMPADDR62 0x3ee +#define CSR_PMPADDR63 0x3ef + +/* RNMI */ +#define CSR_MNSCRATCH 0x740 +#define CSR_MNEPC 0x741 +#define CSR_MNCAUSE 0x742 +#define CSR_MNSTATUS 0x744 /* Debug/Trace Registers (shared with Debug Mode) */ #define CSR_TSELECT 0x7a0 @@ -494,37 +599,6 @@ #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f -/* - * User PointerMasking registers - * NB: actual CSR numbers might be changed in future - */ -#define CSR_UMTE 0x4c0 -#define CSR_UPMMASK 0x4c1 -#define CSR_UPMBASE 0x4c2 - -/* - * Machine PointerMasking registers - * NB: actual CSR numbers might be changed in future - */ -#define CSR_MMTE 0x3c0 -#define CSR_MPMMASK 0x3c1 -#define CSR_MPMBASE 0x3c2 - -/* - * Supervisor PointerMaster registers - * NB: actual CSR numbers might be changed in future - */ -#define CSR_SMTE 0x1c0 -#define CSR_SPMMASK 0x1c1 -#define CSR_SPMBASE 0x1c2 - -/* - * Hypervisor PointerMaster registers - * NB: actual CSR numbers might be changed in future - */ -#define CSR_VSMTE 0x2c0 -#define CSR_VSPMMASK 0x2c1 -#define CSR_VSPMBASE 0x2c2 #define CSR_SCOUNTOVF 0xda0 /* Crypto Extension */ @@ -552,8 +626,12 @@ #define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */ #define MSTATUS_TW 0x00200000 /* since: priv-1.10 */ #define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */ +#define MSTATUS_SPELP 0x00800000 /* zicfilp */ +#define MSTATUS_SDT 0x01000000 +#define MSTATUS_MPELP 0x020000000000 /* zicfilp */ #define MSTATUS_GVA 0x4000000000ULL #define MSTATUS_MPV 0x8000000000ULL +#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */ #define MSTATUS64_UXL 0x0000000300000000ULL #define MSTATUS64_SXL 0x0000000C00000000ULL @@ -582,6 +660,8 @@ typedef enum { #define SSTATUS_XS 0x00018000 #define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */ #define SSTATUS_MXR 0x00080000 +#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */ +#define SSTATUS_SDT MSTATUS_SDT #define SSTATUS64_UXL 0x0000000300000000ULL @@ -598,7 +678,9 @@ typedef enum { #define HSTATUS_VTVM 0x00100000 #define HSTATUS_VTW 0x00200000 #define HSTATUS_VTSR 0x00400000 +#define HSTATUS_HUKTE 0x01000000 #define HSTATUS_VSXL 0x300000000 +#define HSTATUS_HUPMM 0x3000000000000 #define HSTATUS32_WPRI 0xFF8FF87E #define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL @@ -627,6 +709,12 @@ typedef enum { #define SATP64_ASID 0x0FFFF00000000000ULL #define SATP64_PPN 0x00000FFFFFFFFFFFULL +/* RNMI mnstatus CSR mask */ +#define MNSTATUS_NMIE 0x00000008 +#define MNSTATUS_MNPV 0x00000080 +#define MNSTATUS_MNPELP 0x00000200 +#define MNSTATUS_MNPP 0x00001800 + /* VM modes (satp.mode) privileged ISA 1.10 */ #define VM_1_10_MBARE 0 #define VM_1_10_SV32 1 @@ -647,7 +735,8 @@ typedef enum { #define PTE_SOFT 0x300 /* Reserved for Software */ #define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */ #define PTE_N 0x8000000000000000ULL /* NAPOT translation */ -#define PTE_RESERVED 0x1FC0000000000000ULL /* Reserved bits */ +#define PTE_RESERVED(svrsw60t59b) \ + (svrsw60t59b ? 0x07C0000000000000ULL : 0x1FC0000000000000ULL) /* Reserved bits */ #define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ /* Page table PPN shift amount */ @@ -662,6 +751,12 @@ typedef enum { /* Default Reset Vector address */ #define DEFAULT_RSTVEC 0x1000 +/* Default RNMI Interrupt Vector address */ +#define DEFAULT_RNMI_IRQVEC 0x0 + +/* Default RNMI Exception Vector address */ +#define DEFAULT_RNMI_EXCPVEC 0x0 + /* Exception causes */ typedef enum RISCVException { RISCV_EXCP_NONE = -1, /* sentinel value */ @@ -680,6 +775,7 @@ typedef enum RISCVException { RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */ RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */ RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */ + RISCV_EXCP_DOUBLE_TRAP = 0x10, RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */ RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */ RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14, @@ -689,6 +785,11 @@ typedef enum RISCVException { RISCV_EXCP_SEMIHOST = 0x3f, } RISCVException; +/* zicfilp defines lp violation results in sw check with tval = 2*/ +#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2 +/* zicfiss defines ss violation results in sw check with tval = 3*/ +#define RISCV_EXCP_SW_CHECK_BCFI_TVAL 3 + #define RISCV_EXCP_INT_FLAG 0x80000000 #define RISCV_EXCP_INT_MASK 0x7fffffff @@ -711,6 +812,9 @@ typedef enum RISCVException { /* -1 is due to bit zero of hgeip and hgeie being ROZ. */ #define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1) +/* RNMI causes */ +#define RNMI_MAX 16 + /* mip masks */ #define MIP_USIP (1 << IRQ_U_SOFT) #define MIP_SSIP (1 << IRQ_S_SOFT) @@ -747,39 +851,49 @@ typedef enum RISCVException { #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS)) -/* General PointerMasking CSR bits */ -#define PM_ENABLE 0x00000001ULL -#define PM_CURRENT 0x00000002ULL -#define PM_INSN 0x00000004ULL - /* Execution environment configuration bits */ #define MENVCFG_FIOM BIT(0) +#define MENVCFG_LPE BIT(2) /* zicfilp */ +#define MENVCFG_SSE BIT(3) /* zicfiss */ #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) +#define MENVCFG_PMM (3ULL << 32) +#define MENVCFG_DTE (1ULL << 59) +#define MENVCFG_CDE (1ULL << 60) #define MENVCFG_ADUE (1ULL << 61) #define MENVCFG_PBMTE (1ULL << 62) #define MENVCFG_STCE (1ULL << 63) /* For RV32 */ +#define MENVCFGH_DTE BIT(27) #define MENVCFGH_ADUE BIT(29) #define MENVCFGH_PBMTE BIT(30) #define MENVCFGH_STCE BIT(31) #define SENVCFG_FIOM MENVCFG_FIOM +#define SENVCFG_LPE MENVCFG_LPE +#define SENVCFG_SSE MENVCFG_SSE #define SENVCFG_CBIE MENVCFG_CBIE #define SENVCFG_CBCFE MENVCFG_CBCFE #define SENVCFG_CBZE MENVCFG_CBZE +#define SENVCFG_UKTE BIT(8) +#define SENVCFG_PMM MENVCFG_PMM #define HENVCFG_FIOM MENVCFG_FIOM +#define HENVCFG_LPE MENVCFG_LPE +#define HENVCFG_SSE MENVCFG_SSE #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE +#define HENVCFG_PMM MENVCFG_PMM +#define HENVCFG_DTE MENVCFG_DTE #define HENVCFG_ADUE MENVCFG_ADUE #define HENVCFG_PBMTE MENVCFG_PBMTE #define HENVCFG_STCE MENVCFG_STCE /* For RV32 */ +#define HENVCFGH_DTE MENVCFGH_DTE #define HENVCFGH_ADUE MENVCFGH_ADUE #define HENVCFGH_PBMTE MENVCFGH_PBMTE #define HENVCFGH_STCE MENVCFGH_STCE @@ -835,6 +949,88 @@ typedef enum RISCVException { #define UMTE_U_PM_INSN U_PM_INSN #define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN) +/* CTR control register commom fields */ +#define XCTRCTL_U BIT_ULL(0) +#define XCTRCTL_S BIT_ULL(1) +#define XCTRCTL_RASEMU BIT_ULL(7) +#define XCTRCTL_STE BIT_ULL(8) +#define XCTRCTL_BPFRZ BIT_ULL(11) +#define XCTRCTL_LCOFIFRZ BIT_ULL(12) +#define XCTRCTL_EXCINH BIT_ULL(33) +#define XCTRCTL_INTRINH BIT_ULL(34) +#define XCTRCTL_TRETINH BIT_ULL(35) +#define XCTRCTL_NTBREN BIT_ULL(36) +#define XCTRCTL_TKBRINH BIT_ULL(37) +#define XCTRCTL_INDCALLINH BIT_ULL(40) +#define XCTRCTL_DIRCALLINH BIT_ULL(41) +#define XCTRCTL_INDJMPINH BIT_ULL(42) +#define XCTRCTL_DIRJMPINH BIT_ULL(43) +#define XCTRCTL_CORSWAPINH BIT_ULL(44) +#define XCTRCTL_RETINH BIT_ULL(45) +#define XCTRCTL_INDLJMPINH BIT_ULL(46) +#define XCTRCTL_DIRLJMPINH BIT_ULL(47) + +#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \ + XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \ + XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \ + XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \ + XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \ + XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \ + XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH) + +#define XCTRCTL_INH_START 32U + +/* CTR mctrctl bits */ +#define MCTRCTL_M BIT_ULL(2) +#define MCTRCTL_MTE BIT_ULL(9) + +#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE) +#define SCTRCTL_MASK XCTRCTL_MASK +#define VSCTRCTL_MASK XCTRCTL_MASK + +/* sctrstatus CSR bits. */ +#define SCTRSTATUS_WRPTR_MASK 0xFF +#define SCTRSTATUS_FROZEN BIT(31) +#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN) + +/* sctrdepth CSR bits. */ +#define SCTRDEPTH_MASK 0x7 +#define SCTRDEPTH_MIN 0U /* 16 Entries. */ +#define SCTRDEPTH_MAX 4U /* 256 Entries. */ + +#define CTR_ENTRIES_FIRST 0x200 +#define CTR_ENTRIES_LAST 0x2ff + +#define CTRSOURCE_VALID BIT(0) +#define CTRTARGET_MISP BIT(0) + +#define CTRDATA_TYPE_MASK 0xF +#define CTRDATA_CCV BIT(15) +#define CTRDATA_CCM_MASK 0xFFF0000 +#define CTRDATA_CCE_MASK 0xF0000000 + +#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \ + CTRDATA_CCM_MASK | CTRDATA_CCE_MASK) + +typedef enum CTRType { + CTRDATA_TYPE_NONE = 0, + CTRDATA_TYPE_EXCEPTION = 1, + CTRDATA_TYPE_INTERRUPT = 2, + CTRDATA_TYPE_EXCEP_INT_RET = 3, + CTRDATA_TYPE_NONTAKEN_BRANCH = 4, + CTRDATA_TYPE_TAKEN_BRANCH = 5, + CTRDATA_TYPE_RESERVED_0 = 6, + CTRDATA_TYPE_RESERVED_1 = 7, + CTRDATA_TYPE_INDIRECT_CALL = 8, + CTRDATA_TYPE_DIRECT_CALL = 9, + CTRDATA_TYPE_INDIRECT_JUMP = 10, + CTRDATA_TYPE_DIRECT_JUMP = 11, + CTRDATA_TYPE_CO_ROUTINE_SWAP = 12, + CTRDATA_TYPE_RETURN = 13, + CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14, + CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15, +} CTRType; + /* MISELECT, SISELECT, and VSISELECT bits (AIA) */ #define ISELECT_IPRIO0 0x30 #define ISELECT_IPRIO15 0x3f @@ -846,10 +1042,15 @@ typedef enum RISCVException { #define ISELECT_IMSIC_EIE63 0xff #define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY #define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63 -#define ISELECT_MASK 0x1ff +#define ISELECT_MASK_AIA 0x1ff + +/* [M|S|VS]SELCT value for Indirect CSR Access Extension */ +#define ISELECT_CD_FIRST 0x40 +#define ISELECT_CD_LAST 0x5f +#define ISELECT_MASK_SXCSRIND 0xfff /* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */ -#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1) +#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1) /* IMSIC bits (AIA) */ #define IMSIC_TOPEI_IID_SHIFT 16 @@ -938,15 +1139,27 @@ typedef enum RISCVException { MHPMEVENTH_BIT_VSINH | \ MHPMEVENTH_BIT_VUINH) -#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) -#define MHPMEVENT_IDX_MASK 0xFFFFF -#define MHPMEVENT_SSCOF_RESVD 16 +#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56) +#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK) + +/* RISC-V-specific interrupt pending bits. */ +#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0 /* JVT CSR bits */ #define JVT_MODE 0x3F #define JVT_BASE (~0x3F) /* Debug Sdtrig CSR masks */ +#define TEXTRA32_MHVALUE 0xFC000000 +#define TEXTRA32_MHSELECT 0x03800000 +#define TEXTRA32_SBYTEMASK 0x000C0000 +#define TEXTRA32_SVALUE 0x0003FFFC +#define TEXTRA32_SSELECT 0x00000003 +#define TEXTRA64_MHVALUE 0xFFF8000000000000ULL +#define TEXTRA64_MHSELECT 0x0007000000000000ULL +#define TEXTRA64_SBYTEMASK 0x000000F000000000ULL +#define TEXTRA64_SVALUE 0x00000003FFFFFFFCULL +#define TEXTRA64_SSELECT 0x0000000000000003ULL #define MCONTEXT32 0x0000003F #define MCONTEXT64 0x0000000000001FFFULL #define MCONTEXT32_HCONTEXT 0x0000007F diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 8b272fb826e..aa28dc8d7e6 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -21,160 +21,10 @@ #ifndef RISCV_CPU_CFG_H #define RISCV_CPU_CFG_H -/* - * map is a 16-bit bitmap: the most significant set bit in map is the maximum - * satp mode that is supported. It may be chosen by the user and must respect - * what qemu implements (valid_1_10_32/64) and what the hw is capable of - * (supported bitmap below). - * - * init is a 16-bit bitmap used to make sure the user selected a correct - * configuration as per the specification. - * - * supported is a 16-bit bitmap used to reflect the hw capabilities. - */ -typedef struct { - uint16_t map, init, supported; -} RISCVSATPMap; - struct RISCVCPUConfig { - bool ext_zba; - bool ext_zbb; - bool ext_zbc; - bool ext_zbkb; - bool ext_zbkc; - bool ext_zbkx; - bool ext_zbs; - bool ext_zca; - bool ext_zcb; - bool ext_zcd; - bool ext_zce; - bool ext_zcf; - bool ext_zcmp; - bool ext_zcmt; - bool ext_zk; - bool ext_zkn; - bool ext_zknd; - bool ext_zkne; - bool ext_zknh; - bool ext_zkr; - bool ext_zks; - bool ext_zksed; - bool ext_zksh; - bool ext_zkt; - bool ext_zifencei; - bool ext_zicntr; - bool ext_zicsr; - bool ext_zicbom; - bool ext_zicbop; - bool ext_zicboz; - bool ext_zicond; - bool ext_zihintntl; - bool ext_zihintpause; - bool ext_zihpm; - bool ext_zimop; - bool ext_zcmop; - bool ext_ztso; - bool ext_smstateen; - bool ext_sstc; - bool ext_smcntrpmf; - bool ext_svadu; - bool ext_svinval; - bool ext_svnapot; - bool ext_svpbmt; - bool ext_zdinx; - bool ext_zaamo; - bool ext_zacas; - bool ext_zama16b; - bool ext_zabha; - bool ext_zalrsc; - bool ext_zawrs; - bool ext_zfa; - bool ext_zfbfmin; - bool ext_zfh; - bool ext_zfhmin; - bool ext_zfinx; - bool ext_zhinx; - bool ext_zhinxmin; - bool ext_zve32f; - bool ext_zve32x; - bool ext_zve64f; - bool ext_zve64d; - bool ext_zve64x; - bool ext_zvbb; - bool ext_zvbc; - bool ext_zvkb; - bool ext_zvkg; - bool ext_zvkned; - bool ext_zvknha; - bool ext_zvknhb; - bool ext_zvksed; - bool ext_zvksh; - bool ext_zvkt; - bool ext_zvkn; - bool ext_zvknc; - bool ext_zvkng; - bool ext_zvks; - bool ext_zvksc; - bool ext_zvksg; - bool ext_zmmul; - bool ext_zvfbfmin; - bool ext_zvfbfwma; - bool ext_zvfh; - bool ext_zvfhmin; - bool ext_smaia; - bool ext_ssaia; - bool ext_sscofpmf; - bool ext_smepmp; - bool rvv_ta_all_1s; - bool rvv_ma_all_1s; - - uint32_t mvendorid; - uint64_t marchid; - uint64_t mimpid; - - /* Named features */ - bool ext_svade; - bool ext_zic64b; - - /* - * Always 'true' booleans for named features - * TCG always implement/can't be user disabled, - * based on spec version. - */ - bool has_priv_1_13; - bool has_priv_1_12; - bool has_priv_1_11; - - /* Vendor-specific custom extensions */ - bool ext_xtheadba; - bool ext_xtheadbb; - bool ext_xtheadbs; - bool ext_xtheadcmo; - bool ext_xtheadcondmov; - bool ext_xtheadfmemidx; - bool ext_xtheadfmv; - bool ext_xtheadmac; - bool ext_xtheadmemidx; - bool ext_xtheadmempair; - bool ext_xtheadsync; - bool ext_XVentanaCondOps; - - uint32_t pmu_mask; - uint16_t vlenb; - uint16_t elen; - uint16_t cbom_blocksize; - uint16_t cbop_blocksize; - uint16_t cboz_blocksize; - bool mmu; - bool pmp; - bool debug; - bool misa_w; - - bool short_isa_string; - -#ifndef CONFIG_USER_ONLY - RISCVSATPMap satp_mode; -#endif +#define BOOL_FIELD(x) bool x; +#define TYPED_FIELD(type, x, default) type x; +#include "cpu_cfg_fields.h.inc" }; typedef struct RISCVCPUConfig RISCVCPUConfig; diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc new file mode 100644 index 00000000000..e2d116f0dfb --- /dev/null +++ b/target/riscv/cpu_cfg_fields.h.inc @@ -0,0 +1,173 @@ +/* + * Required definitions before including this file: + * + * #define BOOL_FIELD(x) + * #define TYPED_FIELD(type, x, default) + */ + +BOOL_FIELD(ext_zba) +BOOL_FIELD(ext_zbb) +BOOL_FIELD(ext_zbc) +BOOL_FIELD(ext_zbkb) +BOOL_FIELD(ext_zbkc) +BOOL_FIELD(ext_zbkx) +BOOL_FIELD(ext_zbs) +BOOL_FIELD(ext_zca) +BOOL_FIELD(ext_zcb) +BOOL_FIELD(ext_zcd) +BOOL_FIELD(ext_zce) +BOOL_FIELD(ext_zcf) +BOOL_FIELD(ext_zcmp) +BOOL_FIELD(ext_zcmt) +BOOL_FIELD(ext_zk) +BOOL_FIELD(ext_zkn) +BOOL_FIELD(ext_zknd) +BOOL_FIELD(ext_zkne) +BOOL_FIELD(ext_zknh) +BOOL_FIELD(ext_zkr) +BOOL_FIELD(ext_zks) +BOOL_FIELD(ext_zksed) +BOOL_FIELD(ext_zksh) +BOOL_FIELD(ext_zkt) +BOOL_FIELD(ext_zifencei) +BOOL_FIELD(ext_zicntr) +BOOL_FIELD(ext_zicsr) +BOOL_FIELD(ext_zicbom) +BOOL_FIELD(ext_zicbop) +BOOL_FIELD(ext_zicboz) +BOOL_FIELD(ext_zicfilp) +BOOL_FIELD(ext_zicfiss) +BOOL_FIELD(ext_zicond) +BOOL_FIELD(ext_zihintntl) +BOOL_FIELD(ext_zihintpause) +BOOL_FIELD(ext_zihpm) +BOOL_FIELD(ext_zimop) +BOOL_FIELD(ext_zcmop) +BOOL_FIELD(ext_ztso) +BOOL_FIELD(ext_smstateen) +BOOL_FIELD(ext_sstc) +BOOL_FIELD(ext_smcdeleg) +BOOL_FIELD(ext_ssccfg) +BOOL_FIELD(ext_smcntrpmf) +BOOL_FIELD(ext_smcsrind) +BOOL_FIELD(ext_sscsrind) +BOOL_FIELD(ext_ssdbltrp) +BOOL_FIELD(ext_smdbltrp) +BOOL_FIELD(ext_svadu) +BOOL_FIELD(ext_svinval) +BOOL_FIELD(ext_svnapot) +BOOL_FIELD(ext_svpbmt) +BOOL_FIELD(ext_svrsw60t59b) +BOOL_FIELD(ext_svvptc) +BOOL_FIELD(ext_svukte) +BOOL_FIELD(ext_zdinx) +BOOL_FIELD(ext_zaamo) +BOOL_FIELD(ext_zacas) +BOOL_FIELD(ext_zama16b) +BOOL_FIELD(ext_zabha) +BOOL_FIELD(ext_zalrsc) +BOOL_FIELD(ext_zawrs) +BOOL_FIELD(ext_zfa) +BOOL_FIELD(ext_zfbfmin) +BOOL_FIELD(ext_zfh) +BOOL_FIELD(ext_zfhmin) +BOOL_FIELD(ext_zfinx) +BOOL_FIELD(ext_zhinx) +BOOL_FIELD(ext_zhinxmin) +BOOL_FIELD(ext_zve32f) +BOOL_FIELD(ext_zve32x) +BOOL_FIELD(ext_zve64f) +BOOL_FIELD(ext_zve64d) +BOOL_FIELD(ext_zve64x) +BOOL_FIELD(ext_zvbb) +BOOL_FIELD(ext_zvbc) +BOOL_FIELD(ext_zvkb) +BOOL_FIELD(ext_zvkg) +BOOL_FIELD(ext_zvkned) +BOOL_FIELD(ext_zvknha) +BOOL_FIELD(ext_zvknhb) +BOOL_FIELD(ext_zvksed) +BOOL_FIELD(ext_zvksh) +BOOL_FIELD(ext_zvkt) +BOOL_FIELD(ext_zvkn) +BOOL_FIELD(ext_zvknc) +BOOL_FIELD(ext_zvkng) +BOOL_FIELD(ext_zvks) +BOOL_FIELD(ext_zvksc) +BOOL_FIELD(ext_zvksg) +BOOL_FIELD(ext_zmmul) +BOOL_FIELD(ext_zvfbfmin) +BOOL_FIELD(ext_zvfbfwma) +BOOL_FIELD(ext_zvfh) +BOOL_FIELD(ext_zvfhmin) +BOOL_FIELD(ext_smaia) +BOOL_FIELD(ext_ssaia) +BOOL_FIELD(ext_smctr) +BOOL_FIELD(ext_ssctr) +BOOL_FIELD(ext_sscofpmf) +BOOL_FIELD(ext_smepmp) +BOOL_FIELD(ext_smrnmi) +BOOL_FIELD(ext_ssnpm) +BOOL_FIELD(ext_smnpm) +BOOL_FIELD(ext_smmpm) +BOOL_FIELD(ext_sspm) +BOOL_FIELD(ext_supm) +BOOL_FIELD(rvv_ta_all_1s) +BOOL_FIELD(rvv_ma_all_1s) +BOOL_FIELD(rvv_vl_half_avl) +BOOL_FIELD(rvv_vsetvl_x0_vill) +/* Named features */ +BOOL_FIELD(ext_svade) +BOOL_FIELD(ext_zic64b) +BOOL_FIELD(ext_ssstateen) +BOOL_FIELD(ext_sha) + +/* + * Always 'true' booleans for named features + * TCG always implement/can't be user disabled, + * based on spec version. + */ +BOOL_FIELD(has_priv_1_13) +BOOL_FIELD(has_priv_1_12) +BOOL_FIELD(has_priv_1_11) + +/* Always enabled for TCG if has_priv_1_11 */ +BOOL_FIELD(ext_ziccrse) + +/* Vendor-specific custom extensions */ +BOOL_FIELD(ext_xtheadba) +BOOL_FIELD(ext_xtheadbb) +BOOL_FIELD(ext_xtheadbs) +BOOL_FIELD(ext_xtheadcmo) +BOOL_FIELD(ext_xtheadcondmov) +BOOL_FIELD(ext_xtheadfmemidx) +BOOL_FIELD(ext_xtheadfmv) +BOOL_FIELD(ext_xtheadmac) +BOOL_FIELD(ext_xtheadmemidx) +BOOL_FIELD(ext_xtheadmempair) +BOOL_FIELD(ext_xtheadsync) +BOOL_FIELD(ext_XVentanaCondOps) + +BOOL_FIELD(mmu) +BOOL_FIELD(pmp) +BOOL_FIELD(debug) +BOOL_FIELD(misa_w) + +BOOL_FIELD(short_isa_string) + +TYPED_FIELD(uint32_t, mvendorid, 0) +TYPED_FIELD(uint64_t, marchid, 0) +TYPED_FIELD(uint64_t, mimpid, 0) + +TYPED_FIELD(uint32_t, pmu_mask, 0) +TYPED_FIELD(uint16_t, vlenb, 0) +TYPED_FIELD(uint16_t, elen, 0) +TYPED_FIELD(uint16_t, cbom_blocksize, 0) +TYPED_FIELD(uint16_t, cbop_blocksize, 0) +TYPED_FIELD(uint16_t, cboz_blocksize, 0) +TYPED_FIELD(uint8_t, pmp_regions, 0) + +TYPED_FIELD(int8_t, max_satp_mode, -1) + +#undef BOOL_FIELD +#undef TYPED_FIELD diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 395a1d91406..3479a62cc7f 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -23,16 +23,19 @@ #include "cpu.h" #include "internals.h" #include "pmu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "system/memory.h" #include "instmap.h" #include "tcg/tcg-op.h" +#include "accel/tcg/cpu-ops.h" #include "trace.h" #include "semihosting/common-semi.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" #include "cpu_bits.h" #include "debug.h" -#include "tcg/oversized-guest.h" +#include "pmp.h" int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) { @@ -63,134 +66,169 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch) #endif } -void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) +bool cpu_get_fcfien(CPURISCVState *env) { - RISCVCPU *cpu = env_archcpu(env); - RISCVExtStatus fs, vs; - uint32_t flags = 0; + /* no cfi extension, return false */ + if (!env_archcpu(env)->cfg.ext_zicfilp) { + return false; + } - *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; - *cs_base = 0; + switch (env->priv) { + case PRV_U: + if (riscv_has_ext(env, RVS)) { + return env->senvcfg & SENVCFG_LPE; + } + return env->menvcfg & MENVCFG_LPE; +#ifndef CONFIG_USER_ONLY + case PRV_S: + if (env->virt_enabled) { + return env->henvcfg & HENVCFG_LPE; + } + return env->menvcfg & MENVCFG_LPE; + case PRV_M: + return env->mseccfg & MSECCFG_MLPE; +#endif + default: + g_assert_not_reached(); + } +} - if (cpu->cfg.ext_zve32x) { +bool cpu_get_bcfien(CPURISCVState *env) +{ + /* no cfi extension, return false */ + if (!env_archcpu(env)->cfg.ext_zicfiss) { + return false; + } + + switch (env->priv) { + case PRV_U: /* - * If env->vl equals to VLMAX, we can use generic vector operation - * expanders (GVEC) to accerlate the vector operations. - * However, as LMUL could be a fractional number. The maximum - * vector size can be operated might be less than 8 bytes, - * which is not supported by GVEC. So we set vl_eq_vlmax flag to true - * only when maxsz >= 8 bytes. + * If S is not implemented then shadow stack for U can't be turned on + * It is checked in `riscv_cpu_validate_set_extensions`, so no need to + * check here or assert here */ - - /* lmul encoded as in DisasContext::lmul */ - int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); - uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); - uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); - uint32_t maxsz = vlmax << vsew; - bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && - (maxsz >= 8); - flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); - flags = FIELD_DP32(flags, TB_FLAGS, LMUL, - FIELD_EX64(env->vtype, VTYPE, VLMUL)); - flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); - flags = FIELD_DP32(flags, TB_FLAGS, VTA, - FIELD_EX64(env->vtype, VTYPE, VTA)); - flags = FIELD_DP32(flags, TB_FLAGS, VMA, - FIELD_EX64(env->vtype, VTYPE, VMA)); - flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); - } else { - flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + return env->senvcfg & SENVCFG_SSE; +#ifndef CONFIG_USER_ONLY + case PRV_S: + if (env->virt_enabled) { + return env->henvcfg & HENVCFG_SSE; + } + return env->menvcfg & MENVCFG_SSE; + case PRV_M: /* M-mode shadow stack is always off */ + return false; +#endif + default: + g_assert_not_reached(); } +} +bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt) +{ #ifdef CONFIG_USER_ONLY - fs = EXT_STATUS_DIRTY; - vs = EXT_STATUS_DIRTY; + return false; #else - flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); - - flags |= riscv_env_mmu_index(env, 0); - fs = get_field(env->mstatus, MSTATUS_FS); - vs = get_field(env->mstatus, MSTATUS_VS); - - if (env->virt_enabled) { - flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); - /* - * Merge DISABLED and !DIRTY states using MIN. - * We will set both fields when dirtying. - */ - fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); - vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); + if (virt) { + return (env->henvcfg & HENVCFG_DTE) != 0; + } else { + return (env->menvcfg & MENVCFG_DTE) != 0; } +#endif +} - /* With Zfinx, floating point is enabled/disabled by Smstateen. */ - if (!riscv_has_ext(env, RVF)) { - fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) - ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; - } +RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env) +{ +#ifndef CONFIG_USER_ONLY + int priv_mode = cpu_address_mode(env); - if (cpu->cfg.debug && !icount_enabled()) { - flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); + if (get_field(env->mstatus, MSTATUS_MPRV) && + get_field(env->mstatus, MSTATUS_MXR)) { + return PMM_FIELD_DISABLED; } -#endif - flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); - flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); - flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); - flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); - if (env->cur_pmmask != 0) { - flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1); - } - if (env->cur_pmbase != 0) { - flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1); + /* Get current PMM field */ + switch (priv_mode) { + case PRV_M: + if (riscv_cpu_cfg(env)->ext_smmpm) { + return get_field(env->mseccfg, MSECCFG_PMM); + } + break; + case PRV_S: + if (riscv_cpu_cfg(env)->ext_smnpm) { + if (get_field(env->mstatus, MSTATUS_MPV)) { + return get_field(env->henvcfg, HENVCFG_PMM); + } else { + return get_field(env->menvcfg, MENVCFG_PMM); + } + } + break; + case PRV_U: + if (riscv_has_ext(env, RVS)) { + if (riscv_cpu_cfg(env)->ext_ssnpm) { + return get_field(env->senvcfg, SENVCFG_PMM); + } + } else { + if (riscv_cpu_cfg(env)->ext_smnpm) { + return get_field(env->menvcfg, MENVCFG_PMM); + } + } + break; + default: + g_assert_not_reached(); } - - *pflags = flags; + return PMM_FIELD_DISABLED; +#else + return PMM_FIELD_DISABLED; +#endif } -void riscv_cpu_update_mask(CPURISCVState *env) +RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env) { - target_ulong mask = 0, base = 0; - RISCVMXL xl = env->xl; - /* - * TODO: Current RVJ spec does not specify - * how the extension interacts with XLEN. - */ #ifndef CONFIG_USER_ONLY - int mode = cpu_address_mode(env); - xl = cpu_get_xl(env, mode); - if (riscv_has_ext(env, RVJ)) { - switch (mode) { - case PRV_M: - if (env->mmte & M_PM_ENABLE) { - mask = env->mpmmask; - base = env->mpmbase; - } - break; - case PRV_S: - if (env->mmte & S_PM_ENABLE) { - mask = env->spmmask; - base = env->spmbase; - } - break; - case PRV_U: - if (env->mmte & U_PM_ENABLE) { - mask = env->upmmask; - base = env->upmbase; - } - break; - default: - g_assert_not_reached(); + int priv_mode = cpu_address_mode(env); + + if (priv_mode == PRV_U) { + return get_field(env->hstatus, HSTATUS_HUPMM); + } else { + if (get_field(env->hstatus, HSTATUS_SPVP)) { + return get_field(env->henvcfg, HENVCFG_PMM); + } else { + return get_field(env->senvcfg, SENVCFG_PMM); } } +#else + return PMM_FIELD_DISABLED; #endif - if (xl == MXL_RV32) { - env->cur_pmmask = mask & UINT32_MAX; - env->cur_pmbase = base & UINT32_MAX; +} + +bool riscv_cpu_virt_mem_enabled(CPURISCVState *env) +{ +#ifndef CONFIG_USER_ONLY + int satp_mode = 0; + int priv_mode = cpu_address_mode(env); + + if (riscv_cpu_mxl(env) == MXL_RV32) { + satp_mode = get_field(env->satp, SATP32_MODE); } else { - env->cur_pmmask = mask; - env->cur_pmbase = base; + satp_mode = get_field(env->satp, SATP64_MODE); + } + + return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M)); +#else + return false; +#endif +} + +uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm) +{ + switch (pmm) { + case PMM_FIELD_DISABLED: + return 0; + case PMM_FIELD_PMLEN7: + return 7; + case PMM_FIELD_PMLEN16: + return 16; + default: + g_assert_not_reached(); } } @@ -434,6 +472,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env) uint64_t vsbits, irq_delegated; int virq; + /* Priority: RNMI > Other interrupt. */ + if (riscv_cpu_cfg(env)->ext_smrnmi) { + /* If mnstatus.NMIE == 0, all interrupts are disabled. */ + if (!get_field(env->mnstatus, MNSTATUS_NMIE)) { + return RISCV_EXCP_NONE; + } + + if (env->rnmip) { + return ctz64(env->rnmip); /* since non-zero */ + } + } + /* Determine interrupt enable state of all privilege modes */ if (env->virt_enabled) { mie = 1; @@ -496,7 +546,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env) bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { - if (interrupt_request & CPU_INTERRUPT_HARD) { + uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI; + + if (interrupt_request & mask) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; int interruptno = riscv_cpu_local_irq_pending(env); @@ -546,8 +598,21 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) } bool current_virt = env->virt_enabled; + /* + * If zicfilp extension available and henvcfg.LPE = 1, + * then apply SPELP mask on mstatus + */ + if (env_archcpu(env)->cfg.ext_zicfilp && + get_field(env->henvcfg, HENVCFG_LPE)) { + mstatus_mask |= SSTATUS_SPELP; + } + g_assert(riscv_has_ext(env, RVH)); + if (riscv_env_smode_dbltrp_enabled(env, current_virt)) { + mstatus_mask |= MSTATUS_SDT; + } + if (current_virt) { /* Current V=1 and we are about to change to V=0 */ env->vsstatus = env->mstatus & mstatus_mask; @@ -619,6 +684,30 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) env->geilen = geilen; } +void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level) +{ + CPURISCVState *env = &cpu->env; + CPUState *cs = CPU(cpu); + bool release_lock = false; + + if (!bql_locked()) { + release_lock = true; + bql_lock(); + } + + if (level) { + env->rnmip |= 1 << irq; + cpu_interrupt(cs, CPU_INTERRUPT_RNMI); + } else { + env->rnmip &= ~(1 << irq); + cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI); + } + + if (release_lock) { + bql_unlock(); + } +} + int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) { CPURISCVState *env = &cpu->env; @@ -691,6 +780,254 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, } } +static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask, + bool virt) +{ + uint64_t ctl = virt ? env->vsctrctl : env->mctrctl; + + assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0); + + if (ctl & freeze_mask) { + env->sctrstatus |= SCTRSTATUS_FROZEN; + } +} + +void riscv_ctr_clear(CPURISCVState *env) +{ + memset(env->ctr_src, 0x0, sizeof(env->ctr_src)); + memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst)); + memset(env->ctr_data, 0x0, sizeof(env->ctr_data)); +} + +static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt) +{ + switch (priv) { + case PRV_M: + return MCTRCTL_M; + case PRV_S: + if (virt) { + return XCTRCTL_S; + } + return XCTRCTL_S; + case PRV_U: + if (virt) { + return XCTRCTL_U; + } + return XCTRCTL_U; + } + + g_assert_not_reached(); +} + +static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv, + bool virt) +{ + switch (priv) { + case PRV_M: + return env->mctrctl; + case PRV_S: + case PRV_U: + if (virt) { + return env->vsctrctl; + } + return env->mctrctl; + } + + g_assert_not_reached(); +} + +/* + * This function assumes that src privilege and target privilege are not same + * and src privilege is less than target privilege. This includes the virtual + * state as well. + */ +static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv, + bool src_virt) +{ + target_long tgt_prv = env->priv; + bool res = true; + + /* + * VS and U mode are same in terms of xTE bits required to record an + * external trap. See 6.1.2. External Traps, table 8 External Trap Enable + * Requirements. This changes VS to U to simplify the logic a bit. + */ + if (src_virt && src_prv == PRV_S) { + src_prv = PRV_U; + } else if (env->virt_enabled && tgt_prv == PRV_S) { + tgt_prv = PRV_U; + } + + /* VU mode is an outlier here. */ + if (src_virt && src_prv == PRV_U) { + res &= !!(env->vsctrctl & XCTRCTL_STE); + } + + switch (src_prv) { + case PRV_U: + if (tgt_prv == PRV_U) { + break; + } + res &= !!(env->mctrctl & XCTRCTL_STE); + /* fall-through */ + case PRV_S: + if (tgt_prv == PRV_S) { + break; + } + res &= !!(env->mctrctl & MCTRCTL_MTE); + /* fall-through */ + case PRV_M: + break; + } + + return res; +} + +/* + * Special cases for traps and trap returns: + * + * 1- Traps, and trap returns, between enabled modes are recorded as normal. + * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an + * enabled mode back to an inhibited mode, are partially recorded. In such + * cases, the PC from the inhibited mode (source PC for traps, and target PC + * for trap returns) is 0. + * + * 3- Trap returns from an inhibited mode to an enabled mode are not recorded. + * Traps from an enabled mode to an inhibited mode, known as external traps, + * receive special handling. + * By default external traps are not recorded, but a handshake mechanism exists + * to allow partial recording. Software running in the target mode of the trap + * can opt-in to allowing CTR to record traps into that mode even when the mode + * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode, + * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that + * x is the target privilege mode of the trap, will CTR record the trap. In such + * cases, the target PC is 0. + */ +/* + * CTR arrays are implemented as circular buffers and new entry is stored at + * sctrstatus.WRPTR, but they are presented to software as moving circular + * buffers. Which means, software get's the illusion that whenever a new entry + * is added the whole buffer is moved by one place and the new entry is added at + * the start keeping new entry at idx 0 and older ones follow. + * + * Depth = 16. + * + * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] + * WRPTR W + * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8 + * + * When a new entry is added: + * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] + * WRPTR W + * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9 + * + * entry here denotes the logical entry number that software can access + * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200 + * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e + * buffer[7]. Here is how we convert entry to buffer idx. + * + * entry = isel - CTR_ENTRIES_FIRST; + * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1); + */ +void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst, + enum CTRType type, target_ulong src_priv, bool src_virt) +{ + bool tgt_virt = env->virt_enabled; + uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt); + uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt); + uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt); + uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt); + uint64_t depth, head; + bool ext_trap = false; + + /* + * Return immediately if both target and src recording is disabled or if + * CTR is in frozen state. + */ + if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) || + env->sctrstatus & SCTRSTATUS_FROZEN) { + return; + } + + /* + * With RAS Emul enabled, only allow Indirect, direct calls, Function + * returns and Co-routine swap types. + */ + if (tgt_ctrl & XCTRCTL_RASEMU && + type != CTRDATA_TYPE_INDIRECT_CALL && + type != CTRDATA_TYPE_DIRECT_CALL && + type != CTRDATA_TYPE_RETURN && + type != CTRDATA_TYPE_CO_ROUTINE_SWAP) { + return; + } + + if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) { + /* Case 2 for traps. */ + if (!(src_ctrl & src_mask)) { + src = 0; + } else if (!(tgt_ctrl & tgt_mask)) { + /* Check if target priv-mode has allowed external trap recording. */ + if (!riscv_ctr_check_xte(env, src_priv, src_virt)) { + return; + } + + ext_trap = true; + dst = 0; + } + } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) { + /* + * Case 3 for trap returns. Trap returns from inhibited mode are not + * recorded. + */ + if (!(src_ctrl & src_mask)) { + return; + } + + /* Case 2 for trap returns. */ + if (!(tgt_ctrl & tgt_mask)) { + dst = 0; + } + } + + /* Ignore filters in case of RASEMU mode or External trap. */ + if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) { + /* + * Check if the specific type is inhibited. Not taken branch filter is + * an enable bit and needs to be checked separatly. + */ + bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START); + if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) || + (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) { + return; + } + } + + head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); + + depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); + if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) { + head = (head - 1) & (depth - 1); + + env->ctr_src[head] &= ~CTRSOURCE_VALID; + env->sctrstatus = + set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head); + return; + } + + /* In case of Co-routine SWAP we overwrite latest entry. */ + if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) { + head = (head - 1) & (depth - 1); + } + + env->ctr_src[head] = src | CTRSOURCE_VALID; + env->ctr_dst[head] = dst & ~CTRTARGET_MISP; + env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type); + + head = (head + 1) & (depth - 1); + + env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head); +} + void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en) { g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); @@ -706,7 +1043,6 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en) /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; env->xl = cpu_recompute_xl(env); - riscv_cpu_update_mask(env); /* * Clear the load reservation - otherwise a reservation placed in one @@ -777,6 +1113,55 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr, return TRANSLATE_SUCCESS; } +/* Returns 'true' if a svukte address check is needed */ +static bool do_svukte_check(CPURISCVState *env, bool first_stage, + int mode, bool virt) +{ + /* Svukte extension depends on Sv39. */ + if (!(env_archcpu(env)->cfg.ext_svukte || + !first_stage || + VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) { + return false; + } + + /* + * Check hstatus.HUKTE if the effective mode is switched to VU-mode by + * executing HLV/HLVX/HSV in U-mode. + * For other cases, check senvcfg.UKTE. + */ + if (env->priv == PRV_U && !env->virt_enabled && virt) { + if (!get_field(env->hstatus, HSTATUS_HUKTE)) { + return false; + } + } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) { + return false; + } + + /* + * Svukte extension is qualified only in U or VU-mode. + * + * Effective mode can be switched to U or VU-mode by: + * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode. + * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0. + * - U-mode. + * - VU-mode. + * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1. + */ + if (mode != PRV_U) { + return false; + } + + return true; +} + +static bool check_svukte_addr(CPURISCVState *env, vaddr addr) +{ + /* svukte extension excludes RV32 */ + uint32_t sxlen = 32 * riscv_cpu_sxl(env); + uint64_t high_bit = addr & (1UL << (sxlen - 1)); + return !high_bit; +} + /* * get_physical_address - get the physical address for this virtual address * @@ -804,7 +1189,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, target_ulong *fault_pte_addr, int access_type, int mmu_idx, bool first_stage, bool two_stage, - bool is_debug) + bool is_debug, bool is_probe) { /* * NOTE: the env->pc value visible here will not be @@ -814,10 +1199,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, MemTxResult res; MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int mode = mmuidx_priv(mmu_idx); + bool virt = mmuidx_2stage(mmu_idx); bool use_background = false; hwaddr ppn; int napot_bits = 0; target_ulong napot_mask; + bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE); + bool sstack_page = false; + + if (do_svukte_check(env, first_stage, mode, virt) && + !check_svukte_addr(env, addr)) { + return TRANSLATE_FAIL; + } /* * Check if we should use the background registers for the two @@ -890,12 +1283,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, CPUState *cs = env_cpu(env); int va_bits = PGSHIFT + levels * ptidxbits + widened; + int sxlen = 16 << riscv_cpu_sxl(env); + int sxlen_bytes = sxlen / 8; if (first_stage == true) { target_ulong mask, masked_msbs; - if (TARGET_LONG_BITS > (va_bits - 1)) { - mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; + if (sxlen > (va_bits - 1)) { + mask = (1L << (sxlen - (va_bits - 1))) - 1; } else { mask = 0; } @@ -914,6 +1309,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, bool svade = riscv_cpu_cfg(env)->ext_svade; bool svadu = riscv_cpu_cfg(env)->ext_svadu; bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade; + bool svrsw60t59b = riscv_cpu_cfg(env)->ext_svrsw60t59b; if (first_stage && two_stage && env->virt_enabled) { pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); @@ -925,9 +1321,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, hwaddr pte_addr; int i; -#if !TCG_OVERSIZED_GUEST -restart: -#endif + restart: for (i = 0; i < levels; i++, ptshift -= ptidxbits) { target_ulong idx; if (i == 0) { @@ -948,7 +1342,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, int vbase_ret = get_physical_address(env, &vbase, &vbase_prot, base, NULL, MMU_DATA_LOAD, MMUIdx_U, false, true, - is_debug); + is_debug, false); if (vbase_ret != TRANSLATE_SUCCESS) { if (fault_pte_addr) { @@ -964,7 +1358,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, int pmp_prot; int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr, - sizeof(target_ulong), + sxlen_bytes, MMU_DATA_LOAD, PRV_S); if (pmp_ret != TRANSLATE_SUCCESS) { return TRANSLATE_PMP_FAIL; @@ -983,15 +1377,28 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, if (riscv_cpu_sxl(env) == MXL_RV32) { ppn = pte >> PTE_PPN_SHIFT; } else { - if (pte & PTE_RESERVED) { + if (pte & PTE_RESERVED(svrsw60t59b)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } if (!pbmte && (pte & PTE_PBMT)) { + /* Reserved without Svpbmt. */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, " + "and Svpbmt extension is disabled: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) { + /* Reserved without Svnapot extension */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, " + "and Svnapot extension is disabled: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } @@ -1002,14 +1409,19 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, /* Invalid PTE */ return TRANSLATE_FAIL; } + if (pte & (PTE_R | PTE_W | PTE_X)) { goto leaf; } - /* Inner PTE, continue walking */ if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { + /* D, A, and U bits are reserved in non-leaf/inner PTEs */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } + /* Inner PTE, continue walking */ base = ppn << PGSHIFT; } @@ -1019,28 +1431,57 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, leaf: if (ppn & ((1ULL << ptshift) - 1)) { /* Misaligned PPN */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } if (!pbmte && (pte & PTE_PBMT)) { /* Reserved without Svpbmt. */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, " + "and Svpbmt extension is disabled: " + "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", + __func__, pte_addr, pte); return TRANSLATE_FAIL; } + target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X); /* Check for reserved combinations of RWX flags. */ - switch (pte & (PTE_R | PTE_W | PTE_X)) { - case PTE_W: + switch (rwx) { case PTE_W | PTE_X: return TRANSLATE_FAIL; + case PTE_W: + /* if bcfi enabled, PTE_W is not reserved and shadow stack page */ + if (cpu_get_bcfien(env) && first_stage) { + sstack_page = true; + /* + * if ss index, read and write allowed. else if not a probe + * then only read allowed + */ + rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R); + break; + } + return TRANSLATE_FAIL; + case PTE_R: + /* + * no matter what's the `access_type`, shadow stack access to readonly + * memory are always store page faults. During unwind, loads will be + * promoted as store fault. + */ + if (is_sstack_idx) { + return TRANSLATE_FAIL; + } + break; } int prot = 0; - if (pte & PTE_R) { + if (rwx & PTE_R) { prot |= PAGE_READ; } - if (pte & PTE_W) { + if (rwx & PTE_W) { prot |= PAGE_WRITE; } - if (pte & PTE_X) { + if (rwx & PTE_X) { bool mxr = false; /* @@ -1084,8 +1525,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } if (!((prot >> access_type) & 1)) { - /* Access check failed */ - return TRANSLATE_FAIL; + /* + * Access check failed, access check failures for shadow stack are + * access faults. + */ + return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL; } target_ulong updated_pte = pte; @@ -1116,24 +1560,23 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, * it is no longer valid and we must re-walk the page table. */ MemoryRegion *mr; - hwaddr l = sizeof(target_ulong), addr1; + hwaddr l = sxlen_bytes, addr1; mr = address_space_translate(cs->as, pte_addr, &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); if (memory_region_is_ram(mr)) { target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); -#if TCG_OVERSIZED_GUEST - /* - * MTTCG is not enabled on oversized TCG guests so - * page table updates do not need to be atomic - */ - *pte_pa = pte = updated_pte; -#else - target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + target_ulong old_pte; + if (riscv_cpu_sxl(env) == MXL_RV32) { + old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte)); + old_pte = le32_to_cpu(old_pte); + } else { + old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte)); + old_pte = le64_to_cpu(old_pte); + } if (old_pte != pte) { goto restart; } pte = updated_pte; -#endif } else { /* * Misconfigured PTE in ROM (AD bits are not preset) or @@ -1223,13 +1666,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int mmu_idx = riscv_env_mmu_index(&cpu->env, false); if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx, - true, env->virt_enabled, true)) { + true, env->virt_enabled, true, false)) { return -1; } if (env->virt_enabled) { if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL, - 0, MMUIdx_U, false, true, true)) { + 0, MMUIdx_U, false, true, true, false)) { return -1; } } @@ -1272,9 +1715,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, break; case MMU_DATA_LOAD: cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; + /* shadow stack mis aligned accesses are access faults */ + if (mmu_idx & MMU_IDX_SS_WRITE) { + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } break; case MMU_DATA_STORE: cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; + /* shadow stack mis aligned accesses are access faults */ + if (mmu_idx & MMU_IDX_SS_WRITE) { + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } break; default: g_assert_not_reached(); @@ -1323,7 +1774,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, int ret = TRANSLATE_FAIL; int mode = mmuidx_priv(mmu_idx); /* default TLB page size */ - target_ulong tlb_size = TARGET_PAGE_SIZE; + hwaddr tlb_size = TARGET_PAGE_SIZE; env->guest_phys_fault_addr = 0; @@ -1335,7 +1786,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, /* Two stage lookup */ ret = get_physical_address(env, &pa, &prot, address, &env->guest_phys_fault_addr, access_type, - mmu_idx, true, true, false); + mmu_idx, true, true, false, probe); /* * A G-stage exception may be triggered during two state lookup. @@ -1358,7 +1809,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ret = get_physical_address(env, &pa, &prot2, im_address, NULL, access_type, MMUIdx_U, false, true, - false); + false, probe); qemu_log_mask(CPU_LOG_MMU, "%s 2nd-stage address=%" VADDR_PRIx @@ -1375,7 +1826,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, qemu_log_mask(CPU_LOG_MMU, "%s PMP address=" HWADDR_FMT_plx " ret %d prot" - " %d tlb_size " TARGET_FMT_lu "\n", + " %d tlb_size %" HWADDR_PRIu "\n", __func__, pa, ret, prot_pmp, tlb_size); prot &= prot_pmp; @@ -1395,7 +1846,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else { /* Single stage lookup */ ret = get_physical_address(env, &pa, &prot, address, NULL, - access_type, mmu_idx, true, false, false); + access_type, mmu_idx, true, false, false, + probe); qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " @@ -1409,7 +1861,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, qemu_log_mask(CPU_LOG_MMU, "%s PMP address=" HWADDR_FMT_plx " ret %d prot" - " %d tlb_size " TARGET_FMT_lu "\n", + " %d tlb_size %" HWADDR_PRIu "\n", __func__, pa, ret, prot_pmp, tlb_size); prot &= prot_pmp; @@ -1427,6 +1879,23 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else if (probe) { return false; } else { + int wp_access = 0; + + if (access_type == MMU_DATA_LOAD) { + wp_access |= BP_MEM_READ; + } else if (access_type == MMU_DATA_STORE) { + wp_access |= BP_MEM_WRITE; + } + + /* + * If a watchpoint isn't found for 'addr' this will + * be a no-op and we'll resume the mmu_exception path. + * Otherwise we'll throw a debug exception and execution + * will continue elsewhere. + */ + cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED, + wp_access, retaddr); + raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error, two_stage_lookup, two_stage_indirect_error); @@ -1641,6 +2110,40 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env, return xinsn; } +static target_ulong promote_load_fault(target_ulong orig_cause) +{ + switch (orig_cause) { + case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: + return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT; + + case RISCV_EXCP_LOAD_ACCESS_FAULT: + return RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + + case RISCV_EXCP_LOAD_PAGE_FAULT: + return RISCV_EXCP_STORE_PAGE_FAULT; + } + + /* if no promotion, return original cause */ + return orig_cause; +} + +static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt) +{ + env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false); + env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt); + env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv); + env->mncause = cause; + env->mnepc = env->pc; + env->pc = env->rnmi_irqvec; + + if (cpu_get_fcfien(env)) { + env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp); + } + + /* Trapping to M mode, virt is disabled */ + riscv_cpu_set_mode(env, PRV_M, false); +} + /* * Handle Traps * @@ -1653,7 +2156,10 @@ void riscv_cpu_do_interrupt(CPUState *cs) CPURISCVState *env = &cpu->env; bool virt = env->virt_enabled; bool write_gva = false; + bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO); + bool vsmode_exc; uint64_t s; + int mode; /* * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide @@ -1662,22 +2168,38 @@ void riscv_cpu_do_interrupt(CPUState *cs) bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; uint64_t deleg = async ? env->mideleg : env->medeleg; - bool s_injected = env->mvip & (1 << cause) & env->mvien && - !(env->mip & (1 << cause)); - bool vs_injected = env->hvip & (1 << cause) & env->hvien && - !(env->mip & (1 << cause)); + bool s_injected = env->mvip & (1ULL << cause) & env->mvien && + !(env->mip & (1ULL << cause)); + bool vs_injected = env->hvip & (1ULL << cause) & env->hvien && + !(env->mip & (1ULL << cause)); + bool smode_double_trap = false; + uint64_t hdeleg = async ? env->hideleg : env->hedeleg; + const bool prev_virt = env->virt_enabled; + const target_ulong prev_priv = env->priv; target_ulong tval = 0; target_ulong tinst = 0; target_ulong htval = 0; target_ulong mtval2 = 0; + target_ulong src; + int sxlen = 0; + int mxlen = 16 << riscv_cpu_mxl(env); + bool nnmi_excep = false; + + if (cpu->cfg.ext_smrnmi && env->rnmip && async) { + riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)), + env->virt_enabled); + return; + } if (!async) { /* set tval to badaddr for traps with address information */ switch (cause) { +#ifdef CONFIG_TCG case RISCV_EXCP_SEMIHOST: do_common_semihosting(cs); env->pc += 4; return; +#endif case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: case RISCV_EXCP_LOAD_ADDR_MIS: @@ -1686,6 +2208,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: case RISCV_EXCP_LOAD_PAGE_FAULT: case RISCV_EXCP_STORE_PAGE_FAULT: + if (always_storeamo) { + cause = promote_load_fault(cause); + } write_gva = env->two_stage_lookup; tval = env->badaddr; if (env->two_stage_indirect_lookup) { @@ -1727,6 +2252,9 @@ void riscv_cpu_do_interrupt(CPUState *cs) cs->watchpoint_hit = NULL; } break; + case RISCV_EXCP_SW_CHECK: + tval = env->sw_check_code; + break; default: break; } @@ -1755,14 +2283,44 @@ void riscv_cpu_do_interrupt(CPUState *cs) __func__, env->mhartid, async, cause, env->pc, tval, riscv_cpu_get_trap_name(cause, async)); - if (env->priv <= PRV_S && cause < 64 && - (((deleg >> cause) & 1) || s_injected || vs_injected)) { - /* handle the trap in S-mode */ + mode = env->priv <= PRV_S && cause < 64 && + (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M; + + vsmode_exc = env->virt_enabled && cause < 64 && + (((hdeleg >> cause) & 1) || vs_injected); + + /* + * Check double trap condition only if already in S-mode and targeting + * S-mode + */ + if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) { + bool dte = (env->menvcfg & MENVCFG_DTE) != 0; + bool sdt = (env->mstatus & MSTATUS_SDT) != 0; + /* In VS or HS */ if (riscv_has_ext(env, RVH)) { - uint64_t hdeleg = async ? env->hideleg : env->hedeleg; + if (vsmode_exc) { + /* VS -> VS, use henvcfg instead of menvcfg*/ + dte = (env->henvcfg & HENVCFG_DTE) != 0; + } else if (env->virt_enabled) { + /* VS -> HS, use mstatus_hs */ + sdt = (env->mstatus_hs & MSTATUS_SDT) != 0; + } + } + smode_double_trap = dte && sdt; + if (smode_double_trap) { + mode = PRV_M; + } + } + + if (mode == PRV_S) { + /* handle the trap in S-mode */ + /* save elp status */ + if (cpu_get_fcfien(env)) { + env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp); + } - if (env->virt_enabled && - (((hdeleg >> cause) & 1) || vs_injected)) { + if (riscv_has_ext(env, RVH)) { + if (vsmode_exc) { /* Trap to VS mode */ /* * See if we need to adjust cause. Yes if its VS mode interrupt @@ -1795,8 +2353,12 @@ void riscv_cpu_do_interrupt(CPUState *cs) s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); s = set_field(s, MSTATUS_SPP, env->priv); s = set_field(s, MSTATUS_SIE, 0); + if (riscv_env_smode_dbltrp_enabled(env, virt)) { + s = set_field(s, MSTATUS_SDT, 1); + } env->mstatus = s; - env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); + sxlen = 16 << riscv_cpu_sxl(env); + env->scause = cause | ((target_ulong)async << (sxlen - 1)); env->sepc = env->pc; env->stval = tval; env->htval = htval; @@ -1804,8 +2366,28 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->pc = (env->stvec >> 2 << 2) + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); riscv_cpu_set_mode(env, PRV_S, virt); + + src = env->sepc; } else { + /* + * If the hart encounters an exception while executing in M-mode + * with the mnstatus.NMIE bit clear, the exception is an RNMI exception. + */ + nnmi_excep = cpu->cfg.ext_smrnmi && + !get_field(env->mnstatus, MNSTATUS_NMIE) && + !async; + /* handle the trap in M-mode */ + /* save elp status */ + if (cpu_get_fcfien(env)) { + if (nnmi_excep) { + env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, + env->elp); + } else { + env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp); + } + } + if (riscv_has_ext(env, RVH)) { if (env->virt_enabled) { riscv_cpu_swap_hypervisor_regs(env); @@ -1821,22 +2403,77 @@ void riscv_cpu_do_interrupt(CPUState *cs) /* Trapping to M mode, virt is disabled */ virt = false; } + /* + * If the hart encounters an exception while executing in M-mode, + * with the mnstatus.NMIE bit clear, the program counter is set to + * the RNMI exception trap handler address. + */ + nnmi_excep = cpu->cfg.ext_smrnmi && + !get_field(env->mnstatus, MNSTATUS_NMIE) && + !async; s = env->mstatus; s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); s = set_field(s, MSTATUS_MPP, env->priv); s = set_field(s, MSTATUS_MIE, 0); + if (cpu->cfg.ext_smdbltrp) { + if (env->mstatus & MSTATUS_MDT) { + assert(env->priv == PRV_M); + if (!cpu->cfg.ext_smrnmi || nnmi_excep) { + cpu_abort(CPU(cpu), "M-mode double trap\n"); + } else { + riscv_do_nmi(env, cause, false); + return; + } + } + + s = set_field(s, MSTATUS_MDT, 1); + } env->mstatus = s; - env->mcause = cause | ~(((target_ulong)-1) >> async); + env->mcause = cause | ((target_ulong)async << (mxlen - 1)); + if (smode_double_trap) { + env->mtval2 = env->mcause; + env->mcause = RISCV_EXCP_DOUBLE_TRAP; + } else { + env->mtval2 = mtval2; + } env->mepc = env->pc; env->mtval = tval; - env->mtval2 = mtval2; env->mtinst = tinst; - env->pc = (env->mtvec >> 2 << 2) + - ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); + + /* + * For RNMI exception, program counter is set to the RNMI exception + * trap handler address. + */ + if (nnmi_excep) { + env->pc = env->rnmi_excpvec; + } else { + env->pc = (env->mtvec >> 2 << 2) + + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); + } riscv_cpu_set_mode(env, PRV_M, virt); + src = env->mepc; + } + + if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) { + if (async && cause == IRQ_PMU_OVF) { + riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt); + } else if (!async && cause == RISCV_EXCP_BREAKPOINT) { + riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt); + } + + riscv_ctr_add_entry(env, src, env->pc, + async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION, + prev_priv, prev_virt); } + /* + * Interrupt/exception/trap delivery is asynchronous event and as per + * zicfilp spec CPU should clear up the ELP state. No harm in clearing + * unconditionally. + */ + env->elp = false; + /* * NOTE: it is not necessary to yield load reservations here. It is only * necessary for an SC from "another hart" to cause a load reservation diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h index 02afad608b4..e6927ff847e 100644 --- a/target/riscv/cpu_user.h +++ b/target/riscv/cpu_user.h @@ -15,5 +15,6 @@ #define xA6 16 #define xA7 17 /* syscall number for RVI ABI */ #define xT0 5 /* syscall number for RVE ABI */ +#define xT2 7 #endif diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c index bb084e00efe..a0fb54bc50c 100644 --- a/target/riscv/crypto_helper.c +++ b/target/riscv/crypto_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "crypto/aes.h" #include "crypto/aes-round.h" diff --git a/target/riscv/csr.c b/target/riscv/csr.c index ea3560342c4..8842e07a735 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -24,11 +24,15 @@ #include "tcg/tcg-cpu.h" #include "pmu.h" #include "time_helper.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/tb-flush.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" +#include "accel/tcg/getpc.h" #include "qemu/guest-random.h" #include "qapi/error.h" +#include "tcg/insn-start-words.h" +#include "internals.h" +#include /* CSR function table public API */ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) @@ -36,7 +40,7 @@ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; } -void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) +void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops) { csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; } @@ -184,6 +188,30 @@ static RISCVException zcmt(CPURISCVState *env, int csrno) return RISCV_EXCP_NONE; } +static RISCVException cfi_ss(CPURISCVState *env, int csrno) +{ + if (!env_archcpu(env)->cfg.ext_zicfiss) { + return RISCV_EXCP_ILLEGAL_INST; + } + + /* If ext implemented, M-mode always have access to SSP CSR */ + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + + /* if bcfi not active for current env, access to csr is illegal */ + if (!cpu_get_bcfien(env)) { +#if !defined(CONFIG_USER_ONLY) + if (env->debugger) { + return RISCV_EXCP_NONE; + } +#endif + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + #if !defined(CONFIG_USER_ONLY) static RISCVException mctr(CPURISCVState *env, int csrno) { @@ -286,6 +314,24 @@ static RISCVException aia_any32(CPURISCVState *env, int csrno) return any32(env, csrno); } +static RISCVException csrind_any(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_cfg(env)->ext_smcsrind) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any(env, csrno); +} + static RISCVException smode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS)) { @@ -306,22 +352,92 @@ static RISCVException smode32(CPURISCVState *env, int csrno) static RISCVException aia_smode(CPURISCVState *env, int csrno) { + int ret; + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } + if (csrno == CSR_STOPEI) { + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC); + } else { + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); + } + + if (ret != RISCV_EXCP_NONE) { + return ret; + } + return smode(env, csrno); } static RISCVException aia_smode32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_cfg(env)->ext_ssaia) { + int ret; + int csr_priv = get_field(csrno, 0x300); + + if (csr_priv == PRV_M && !riscv_cpu_cfg(env)->ext_smaia) { + return RISCV_EXCP_ILLEGAL_INST; + } else if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + return smode32(env, csrno); } +static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (env->virt_enabled) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + + return smode(env, csrno); +} + +static bool csrind_extensions_present(CPURISCVState *env) +{ + return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind; +} + +static bool aia_extensions_present(CPURISCVState *env) +{ + return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia; +} + +static bool csrind_or_aia_extensions_present(CPURISCVState *env) +{ + return csrind_extensions_present(env) || aia_extensions_present(env); +} + +static RISCVException csrind_smode(CPURISCVState *env, int csrno) +{ + if (!csrind_extensions_present(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + +static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno) +{ + if (!csrind_or_aia_extensions_present(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + static RISCVException hmode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVH)) { @@ -341,6 +457,24 @@ static RISCVException hmode32(CPURISCVState *env, int csrno) } +static RISCVException csrind_hmode(CPURISCVState *env, int csrno) +{ + if (!csrind_extensions_present(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + +static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno) +{ + if (!csrind_or_aia_extensions_present(env)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + static RISCVException umode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVU)) { @@ -512,27 +646,82 @@ static RISCVException hgatp(CPURISCVState *env, int csrno) return hmode(env, csrno); } -/* Checks if PointerMasking registers could be accessed */ -static RISCVException pointer_masking(CPURISCVState *env, int csrno) +/* + * M-mode: + * Without ext_smctr raise illegal inst excep. + * Otherwise everything is accessible to m-mode. + * + * S-mode: + * Without ext_ssctr or mstateen.ctr raise illegal inst excep. + * Otherwise everything other than mctrctl is accessible. + * + * VS-mode: + * Without ext_ssctr or mstateen.ctr raise illegal inst excep. + * Without hstateen.ctr raise virtual illegal inst excep. + * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range. + * Always raise illegal instruction exception for sctrdepth. + */ +static RISCVException ctr_mmode(CPURISCVState *env, int csrno) { - /* Check if j-ext is present */ - if (riscv_has_ext(env, RVJ)) { + /* Check if smctr-ext is present */ + if (riscv_cpu_cfg(env)->ext_smctr) { return RISCV_EXCP_NONE; } + return RISCV_EXCP_ILLEGAL_INST; } +static RISCVException ctr_smode(CPURISCVState *env, int csrno) +{ + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); + + if (!cfg->ext_smctr && !cfg->ext_ssctr) { + return RISCV_EXCP_ILLEGAL_INST; + } + + RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR); + if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH && + env->virt_enabled) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + + return ret; +} + static RISCVException aia_hmode(CPURISCVState *env, int csrno) { + int ret; + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } - return hmode(env, csrno); + if (csrno == CSR_VSTOPEI) { + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC); + } else { + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); + } + + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + return hmode(env, csrno); } static RISCVException aia_hmode32(CPURISCVState *env, int csrno) { + int ret; + + if (!riscv_cpu_cfg(env)->ext_ssaia) { + return RISCV_EXCP_ILLEGAL_INST; + } + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -540,10 +729,22 @@ static RISCVException aia_hmode32(CPURISCVState *env, int csrno) return hmode32(env, csrno); } +static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + return RISCV_EXCP_NONE; + } + + return hmode(env, csrno); +} + static RISCVException pmp(CPURISCVState *env, int csrno) { if (riscv_cpu_cfg(env)->pmp) { - if (csrno <= CSR_PMPCFG3) { + int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ? ++ CSR_PMPCFG15 : CSR_PMPCFG3; + + if (csrno <= max_pmpcfg) { uint32_t reg_index = csrno - CSR_PMPCFG0; /* TODO: RV128 restriction check */ @@ -566,6 +767,9 @@ static RISCVException have_mseccfg(CPURISCVState *env, int csrno) if (riscv_cpu_cfg(env)->ext_zkr) { return RISCV_EXCP_NONE; } + if (riscv_cpu_cfg(env)->ext_smmpm) { + return RISCV_EXCP_NONE; + } return RISCV_EXCP_ILLEGAL_INST; } @@ -578,6 +782,17 @@ static RISCVException debug(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } + +static RISCVException rnmi(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + + if (cpu->cfg.ext_smrnmi) { + return RISCV_EXCP_NONE; + } + + return RISCV_EXCP_ILLEGAL_INST; +} #endif static RISCVException seed(CPURISCVState *env, int csrno) @@ -622,6 +837,21 @@ static RISCVException seed(CPURISCVState *env, int csrno) #endif } +/* zicfiss CSR_SSP read and write */ +static RISCVException read_ssp(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->ssp; + return RISCV_EXCP_NONE; +} + +static RISCVException write_ssp(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) +{ + env->ssp = val; + return RISCV_EXCP_NONE; +} + /* User Floating-Point CSRs */ static RISCVException read_fflags(CPURISCVState *env, int csrno, target_ulong *val) @@ -631,7 +861,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno, } static RISCVException write_fflags(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -650,7 +880,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno, } static RISCVException write_frm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -670,7 +900,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno, } static RISCVException write_fcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -722,7 +952,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno, } static RISCVException write_vxrm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -734,17 +964,17 @@ static RISCVException write_vxrm(CPURISCVState *env, int csrno, static RISCVException read_vxsat(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->vxsat; + *val = env->vxsat & BIT(0); return RISCV_EXCP_NONE; } static RISCVException write_vxsat(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; #endif - env->vxsat = val; + env->vxsat = val & BIT(0); return RISCV_EXCP_NONE; } @@ -756,7 +986,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno, } static RISCVException write_vstart(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -777,7 +1007,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno, } static RISCVException write_vcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -835,7 +1065,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -864,7 +1094,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MCYCLECFGH_BIT_MINH); @@ -889,7 +1119,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -916,7 +1146,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MINSTRETCFGH_BIT_MINH); @@ -943,7 +1173,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, } static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; @@ -981,7 +1211,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, } static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; uint64_t mhpmevth_val; @@ -1072,10 +1302,9 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env, return result; } -static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val, + uint32_t ctr_idx) { - int ctr_idx = csrno - CSR_MCYCLE; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; uint64_t mhpmctr_val = val; @@ -1100,10 +1329,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val, + uint32_t ctr_idx) { - int ctr_idx = csrno - CSR_MCYCLEH; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; uint64_t mhpmctr_val = counter->mhpmcounter_val; uint64_t mhpmctrh_val = val; @@ -1125,6 +1353,22 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) +{ + int ctr_idx = csrno - CSR_MCYCLE; + + return riscv_pmu_write_ctr(env, val, ctr_idx); +} + +static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) +{ + int ctr_idx = csrno - CSR_MCYCLEH; + + return riscv_pmu_write_ctrh(env, val, ctr_idx); +} + RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, bool upper_half, uint32_t ctr_idx) { @@ -1190,92 +1434,261 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, return riscv_pmu_read_ctr(env, val, true, ctr_index); } -static RISCVException read_scountovf(CPURISCVState *env, int csrno, - target_ulong *val) +static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { - int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; - int i; - *val = 0; - target_ulong *mhpm_evt_val; - uint64_t of_bit_mask; - - if (riscv_cpu_mxl(env) == MXL_RV32) { - mhpm_evt_val = env->mhpmeventh_val; - of_bit_mask = MHPMEVENTH_BIT_OF; - } else { - mhpm_evt_val = env->mhpmevent_val; - of_bit_mask = MHPMEVENT_BIT_OF; + if (wr_mask != 0 && wr_mask != -1) { + return -EINVAL; } - for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { - if ((get_field(env->mcounteren, BIT(i))) && - (mhpm_evt_val[i] & of_bit_mask)) { - *val |= BIT(i); - } + if (!wr_mask && val) { + riscv_pmu_read_ctr(env, val, false, ctr_idx); + } else if (wr_mask) { + riscv_pmu_write_ctr(env, new_val, ctr_idx); + } else { + return -EINVAL; } - return RISCV_EXCP_NONE; + return 0; } -static RISCVException read_time(CPURISCVState *env, int csrno, - target_ulong *val) +static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { - uint64_t delta = env->virt_enabled ? env->htimedelta : 0; + if (wr_mask != 0 && wr_mask != -1) { + return -EINVAL; + } - if (!env->rdtime_fn) { - return RISCV_EXCP_ILLEGAL_INST; + if (!wr_mask && val) { + riscv_pmu_read_ctr(env, val, true, ctr_idx); + } else if (wr_mask) { + riscv_pmu_write_ctrh(env, new_val, ctr_idx); + } else { + return -EINVAL; } - *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; - return RISCV_EXCP_NONE; + return 0; } -static RISCVException read_timeh(CPURISCVState *env, int csrno, - target_ulong *val) +static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { - uint64_t delta = env->virt_enabled ? env->htimedelta : 0; + uint64_t mhpmevt_val = new_val; - if (!env->rdtime_fn) { - return RISCV_EXCP_ILLEGAL_INST; + if (wr_mask != 0 && wr_mask != -1) { + return -EINVAL; } - *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; - return RISCV_EXCP_NONE; -} - -static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->vstimecmp; + if (!wr_mask && val) { + *val = env->mhpmevent_val[evt_index]; + if (riscv_cpu_cfg(env)->ext_sscofpmf) { + *val &= ~MHPMEVENT_BIT_MINH; + } + } else if (wr_mask) { + wr_mask &= ~MHPMEVENT_BIT_MINH; + mhpmevt_val = (new_val & wr_mask) | + (env->mhpmevent_val[evt_index] & ~wr_mask); + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmevt_val = mhpmevt_val | + ((uint64_t)env->mhpmeventh_val[evt_index] << 32); + } + env->mhpmevent_val[evt_index] = mhpmevt_val; + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); + } else { + return -EINVAL; + } - return RISCV_EXCP_NONE; + return 0; } -static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, - target_ulong *val) +static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) { - *val = env->vstimecmp >> 32; + uint64_t mhpmevth_val; + uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; - return RISCV_EXCP_NONE; -} + if (wr_mask != 0 && wr_mask != -1) { + return -EINVAL; + } -static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) -{ - if (riscv_cpu_mxl(env) == MXL_RV32) { - env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); + if (!wr_mask && val) { + *val = env->mhpmeventh_val[evt_index]; + if (riscv_cpu_cfg(env)->ext_sscofpmf) { + *val &= ~MHPMEVENTH_BIT_MINH; + } + } else if (wr_mask) { + wr_mask &= ~MHPMEVENTH_BIT_MINH; + env->mhpmeventh_val[evt_index] = + (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask); + mhpmevth_val = env->mhpmeventh_val[evt_index]; + mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); } else { - env->vstimecmp = val; + return -EINVAL; } - riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, + return 0; +} + +static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + switch (cfg_index) { + case 0: /* CYCLECFG */ + if (wr_mask) { + wr_mask &= ~MCYCLECFG_BIT_MINH; + env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask); + } else { + *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH; + } + break; + case 2: /* INSTRETCFG */ + if (wr_mask) { + wr_mask &= ~MINSTRETCFG_BIT_MINH; + env->minstretcfg = (new_val & wr_mask) | + (env->minstretcfg & ~wr_mask); + } else { + *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH; + } + break; + default: + return -EINVAL; + } + return 0; +} + +static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + switch (cfg_index) { + case 0: /* CYCLECFGH */ + if (wr_mask) { + wr_mask &= ~MCYCLECFGH_BIT_MINH; + env->mcyclecfgh = (new_val & wr_mask) | + (env->mcyclecfgh & ~wr_mask); + } else { + *val = env->mcyclecfgh; + } + break; + case 2: /* INSTRETCFGH */ + if (wr_mask) { + wr_mask &= ~MINSTRETCFGH_BIT_MINH; + env->minstretcfgh = (new_val & wr_mask) | + (env->minstretcfgh & ~wr_mask); + } else { + *val = env->minstretcfgh; + } + break; + default: + return -EINVAL; + } + return 0; +} + + +static RISCVException read_scountovf(CPURISCVState *env, int csrno, + target_ulong *val) +{ + int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT; + int i; + *val = 0; + target_ulong *mhpm_evt_val; + uint64_t of_bit_mask; + + /* Virtualize scountovf for counter delegation */ + if (riscv_cpu_cfg(env)->ext_sscofpmf && + riscv_cpu_cfg(env)->ext_ssccfg && + get_field(env->menvcfg, MENVCFG_CDE) && + env->virt_enabled) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpm_evt_val = env->mhpmeventh_val; + of_bit_mask = MHPMEVENTH_BIT_OF; + } else { + mhpm_evt_val = env->mhpmevent_val; + of_bit_mask = MHPMEVENT_BIT_OF; + } + + for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) { + if ((get_field(env->mcounteren, BIT(i))) && + (mhpm_evt_val[i] & of_bit_mask)) { + *val |= BIT(i); + } + } + + return RISCV_EXCP_NONE; +} + +static RISCVException read_time(CPURISCVState *env, int csrno, + target_ulong *val) +{ + uint64_t delta = env->virt_enabled ? env->htimedelta : 0; + + if (!env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + *val = env->rdtime_fn(env->rdtime_fn_arg) + delta; + return RISCV_EXCP_NONE; +} + +static RISCVException read_timeh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + uint64_t delta = env->virt_enabled ? env->htimedelta : 0; + + if (!env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32; + return RISCV_EXCP_NONE; +} + +static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->vstimecmp; + + return RISCV_EXCP_NONE; +} + +static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->vstimecmp >> 32; + + return RISCV_EXCP_NONE; +} + +static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) +{ + if (riscv_cpu_mxl(env) == MXL_RV32) { + env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); + } else { + env->vstimecmp = val; + } + + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, env->htimedelta, MIP_VSTIP); return RISCV_EXCP_NONE; } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, @@ -1309,13 +1722,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmp(env, csrno, val); + return write_vstimecmp(env, csrno, val, ra); } if (riscv_cpu_mxl(env) == MXL_RV32) { @@ -1330,13 +1743,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmph(env, csrno, val); + return write_vstimecmph(env, csrno, val, ra); } env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); @@ -1377,6 +1790,7 @@ static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \ + (1ULL << (RISCV_EXCP_SW_CHECK)) | \ (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \ (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \ (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \ @@ -1440,7 +1854,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno, } static RISCVException write_ignore(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -1504,8 +1918,13 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, static bool validate_vm(CPURISCVState *env, target_ulong vm) { - uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map; - return get_field(mode_supported, (1 << vm)); + bool rv32 = riscv_cpu_mxl(env) == MXL_RV32; + RISCVCPU *cpu = env_archcpu(env); + int satp_mode_supported_max = cpu->cfg.max_satp_mode; + const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + + assert(satp_mode_supported_max >= 0); + return vm <= satp_mode_supported_max && valid_vm[vm]; } static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp, @@ -1561,7 +1980,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, } static RISCVException write_mstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mstatus = env->mstatus; uint64_t mask = 0; @@ -1589,6 +2008,20 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, mask |= MSTATUS_VS; } + if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) { + mask |= MSTATUS_SDT; + if ((val & MSTATUS_SDT) != 0) { + val &= ~MSTATUS_SIE; + } + } + + if (riscv_cpu_cfg(env)->ext_smdbltrp) { + mask |= MSTATUS_MDT; + if ((val & MSTATUS_MDT) != 0) { + val &= ~MSTATUS_MIE; + } + } + if (xl != MXL_RV32 || env->debugger) { if (riscv_has_ext(env, RVH)) { mask |= MSTATUS_MPV | MSTATUS_GVA; @@ -1598,6 +2031,11 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, } } + /* If cfi lp extension is available, then apply cfi lp mask */ + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= (MSTATUS_MPELP | MSTATUS_SPELP); + } + mstatus = (mstatus & ~mask) | (val & mask); env->mstatus = mstatus; @@ -1610,7 +2048,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, env->xl = cpu_recompute_xl(env); } - riscv_cpu_update_mask(env); return RISCV_EXCP_NONE; } @@ -1622,11 +2059,17 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno, } static RISCVException write_mstatush(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t valh = (uint64_t)val << 32; uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0; + if (riscv_cpu_cfg(env)->ext_smdbltrp) { + mask |= MSTATUS_MDT; + if ((valh & MSTATUS_MDT) != 0) { + mask |= MSTATUS_MIE; + } + } env->mstatus = (env->mstatus & ~mask) | (valh & mask); return RISCV_EXCP_NONE; @@ -1669,8 +2112,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra) +{ + uint64_t data[INSN_START_WORDS]; + + /* Outside of a running cpu, env contains the next pc. */ + if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) { + return env->pc; + } + + /* Within unwind data, [0] is pc and [1] is the opcode. */ + return data[0] + insn_len(data[1]); +} + static RISCVException write_misa(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); uint32_t orig_misa_ext = env->misa_ext; @@ -1684,11 +2140,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* - * Suppress 'C' if next instruction is not aligned - * TODO: this should check next_pc - */ - if ((val & RVC) && (GETPC() & ~3) != 0) { + /* Suppress 'C' if next instruction is not aligned. */ + if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) { val &= ~RVC; } @@ -1734,7 +2187,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno, } static RISCVException write_medeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); return RISCV_EXCP_NONE; @@ -1928,14 +2381,41 @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) }; } +static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno) +{ + if (!env->virt_enabled) { + return csrno; + } + + switch (csrno) { + case CSR_SISELECT: + return CSR_VSISELECT; + case CSR_SIREG: + case CSR_SIREG2: + case CSR_SIREG3: + case CSR_SIREG4: + case CSR_SIREG5: + case CSR_SIREG6: + return CSR_VSIREG + (csrno - CSR_SIREG); + default: + return csrno; + }; +} + static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, target_ulong new_val, target_ulong wr_mask) { target_ulong *iselect; + int ret; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); + if (ret != RISCV_EXCP_NONE) { + return ret; + } /* Translate CSR number for VS-mode */ - csrno = aia_xlate_vs_csrno(env, csrno); + csrno = csrind_xlate_vs_csrno(env, csrno); /* Find the iselect CSR based on CSR number */ switch (csrno) { @@ -1956,7 +2436,12 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, *val = *iselect; } - wr_mask &= ISELECT_MASK; + if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) { + wr_mask &= ISELECT_MASK_SXCSRIND; + } else { + wr_mask &= ISELECT_MASK_AIA; + } + if (wr_mask) { *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); } @@ -1964,6 +2449,24 @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static bool xiselect_aia_range(target_ulong isel) +{ + return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) || + (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST); +} + +static bool xiselect_cd_range(target_ulong isel) +{ + return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST); +} + +static bool xiselect_ctr_range(int csrno, target_ulong isel) +{ + /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */ + return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST && + csrno < CSR_MIREG; +} + static int rmw_iprio(target_ulong xlen, target_ulong iselect, uint8_t *iprio, target_ulong *val, target_ulong new_val, @@ -2009,45 +2512,162 @@ static int rmw_iprio(target_ulong xlen, return 0; } -static RISCVException rmw_xireg(CPURISCVState *env, int csrno, - target_ulong *val, target_ulong new_val, - target_ulong wr_mask) +static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) { - bool virt, isel_reserved; - uint8_t *iprio; - int ret = -EINVAL; - target_ulong priv, isel, vgein; + /* + * CTR arrays are treated as circular buffers and TOS always points to next + * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry + * 0 is always the latest one, traversal is a bit different here. See the + * below example. + * + * Depth = 16. + * + * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] + * TOS H + * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 + */ + const uint64_t entry = isel - CTR_ENTRIES_FIRST; + const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); + uint64_t idx; + + /* Entry greater than depth-1 is read-only zero */ + if (entry >= depth) { + if (val) { + *val = 0; + } + return 0; + } - /* Translate CSR number for VS-mode */ - csrno = aia_xlate_vs_csrno(env, csrno); + idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); + idx = (idx - entry - 1) & (depth - 1); - /* Decode register details from CSR number */ - virt = false; - isel_reserved = false; + if (val) { + *val = env->ctr_src[idx]; + } + + env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask); + + return 0; +} + +static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + /* + * CTR arrays are treated as circular buffers and TOS always points to next + * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry + * 0 is always the latest one, traversal is a bit different here. See the + * below example. + * + * Depth = 16. + * + * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] + * head H + * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 + */ + const uint64_t entry = isel - CTR_ENTRIES_FIRST; + const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); + uint64_t idx; + + /* Entry greater than depth-1 is read-only zero */ + if (entry >= depth) { + if (val) { + *val = 0; + } + return 0; + } + + idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); + idx = (idx - entry - 1) & (depth - 1); + + if (val) { + *val = env->ctr_dst[idx]; + } + + env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask); + + return 0; +} + +static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + /* + * CTR arrays are treated as circular buffers and TOS always points to next + * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry + * 0 is always the latest one, traversal is a bit different here. See the + * below example. + * + * Depth = 16. + * + * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F] + * head H + * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7 + */ + const uint64_t entry = isel - CTR_ENTRIES_FIRST; + const uint64_t mask = wr_mask & CTRDATA_MASK; + const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); + uint64_t idx; + + /* Entry greater than depth-1 is read-only zero */ + if (entry >= depth) { + if (val) { + *val = 0; + } + return 0; + } + + idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK); + idx = (idx - entry - 1) & (depth - 1); + + if (val) { + *val = env->ctr_data[idx]; + } + + env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask); + + return 0; +} + +static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno, + target_ulong isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + bool virt = false, isel_reserved = false; + int ret = -EINVAL; + uint8_t *iprio; + target_ulong priv, vgein; + + /* VS-mode CSR number passed in has already been translated */ switch (csrno) { case CSR_MIREG: + if (!riscv_cpu_cfg(env)->ext_smaia) { + goto done; + } iprio = env->miprio; - isel = env->miselect; priv = PRV_M; break; case CSR_SIREG: - if (env->priv == PRV_S && env->mvien & MIP_SEIP && + if (!riscv_cpu_cfg(env)->ext_ssaia || + (env->priv == PRV_S && env->mvien & MIP_SEIP && env->siselect >= ISELECT_IMSIC_EIDELIVERY && - env->siselect <= ISELECT_IMSIC_EIE63) { + env->siselect <= ISELECT_IMSIC_EIE63)) { goto done; } iprio = env->siprio; - isel = env->siselect; priv = PRV_S; break; case CSR_VSIREG: + if (!riscv_cpu_cfg(env)->ext_ssaia) { + goto done; + } iprio = env->hviprio; - isel = env->vsiselect; priv = PRV_S; virt = true; break; default: - goto done; + goto done; }; /* Find the selected guest interrupt file */ @@ -2078,13 +2698,224 @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno, } done: + /* + * If AIA is not enabled, illegal instruction exception is always + * returned regardless of whether we are in VS-mode or not + */ if (ret) { return (env->virt_enabled && virt && !isel_reserved) ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; } + return RISCV_EXCP_NONE; } +static int rmw_xireg_cd(CPURISCVState *env, int csrno, + target_ulong isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + int ret = -EINVAL; + int ctr_index = isel - ISELECT_CD_FIRST; + int isel_hpm_start = ISELECT_CD_FIRST + 3; + + if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) { + ret = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + + /* Invalid siselect value for reserved */ + if (ctr_index == 1) { + goto done; + } + + /* sireg4 and sireg5 provides access RV32 only CSRs */ + if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) && + (riscv_cpu_mxl(env) != MXL_RV32)) { + ret = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + + /* Check Sscofpmf dependancy */ + if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 && + (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) { + goto done; + } + + /* Check smcntrpmf dependancy */ + if (!riscv_cpu_cfg(env)->ext_smcntrpmf && + (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) && + (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) { + goto done; + } + + if (!get_field(env->mcounteren, BIT(ctr_index)) || + !get_field(env->menvcfg, MENVCFG_CDE)) { + goto done; + } + + switch (csrno) { + case CSR_SIREG: + ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask); + break; + case CSR_SIREG4: + ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask); + break; + case CSR_SIREG2: + if (ctr_index <= 2) { + ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask); + } else { + ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask); + } + break; + case CSR_SIREG5: + if (ctr_index <= 2) { + ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask); + } else { + ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask); + } + break; + default: + goto done; + } + +done: + return ret; +} + +static int rmw_xireg_ctr(CPURISCVState *env, int csrno, + target_ulong isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) { + return -EINVAL; + } + + if (csrno == CSR_SIREG || csrno == CSR_VSIREG) { + return rmw_ctrsource(env, isel, val, new_val, wr_mask); + } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) { + return rmw_ctrtarget(env, isel, val, new_val, wr_mask); + } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) { + return rmw_ctrdata(env, isel, val, new_val, wr_mask); + } else if (val) { + *val = 0; + } + + return 0; +} + +/* + * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6 + * + * Perform indirect access to xireg and xireg2-xireg6. + * This is a generic interface for all xireg CSRs. Apart from AIA, all other + * extension using csrind should be implemented here. + */ +static int rmw_xireg_csrind(CPURISCVState *env, int csrno, + target_ulong isel, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + bool virt = csrno == CSR_VSIREG ? true : false; + int ret = -EINVAL; + + if (xiselect_cd_range(isel)) { + ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask); + } else if (xiselect_ctr_range(csrno, isel)) { + ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask); + } else { + /* + * As per the specification, access to unimplented region is undefined + * but recommendation is to raise illegal instruction exception. + */ + return RISCV_EXCP_ILLEGAL_INST; + } + + if (ret) { + return (env->virt_enabled && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + +static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + int ret = -EINVAL; + target_ulong isel; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + /* Translate CSR number for VS-mode */ + csrno = csrind_xlate_vs_csrno(env, csrno); + + if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 && + csrno != CSR_MIREG4 - 1) { + isel = env->miselect; + } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 && + csrno != CSR_SIREG4 - 1) { + isel = env->siselect; + } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 && + csrno != CSR_VSIREG4 - 1) { + isel = env->vsiselect; + } else { + return RISCV_EXCP_ILLEGAL_INST; + } + + return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask); +} + +static RISCVException rmw_xireg(CPURISCVState *env, int csrno, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask) +{ + int ret = -EINVAL; + target_ulong isel; + + ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + /* Translate CSR number for VS-mode */ + csrno = csrind_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + switch (csrno) { + case CSR_MIREG: + isel = env->miselect; + break; + case CSR_SIREG: + isel = env->siselect; + break; + case CSR_VSIREG: + isel = env->vsiselect; + break; + default: + goto done; + }; + + /* + * Use the xiselect range to determine actual op on xireg. + * + * Since we only checked the existence of AIA or Indirect Access in the + * predicate, we should check the existence of the exact extension when + * we get to a specific range and return illegal instruction exception even + * in VS-mode. + */ + if (xiselect_aia_range(isel)) { + return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask); + } else if (riscv_cpu_cfg(env)->ext_smcsrind || + riscv_cpu_cfg(env)->ext_sscsrind) { + return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask); + } + +done: + return RISCV_EXCP_ILLEGAL_INST; +} + static RISCVException rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, target_ulong new_val, target_ulong wr_mask) @@ -2151,7 +2982,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno, } static RISCVException write_mtvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -2170,7 +3001,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, } static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int cidx; PMUCTRState *counter; @@ -2236,6 +3067,20 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException read_scountinhibit(CPURISCVState *env, int csrno, + target_ulong *val) +{ + /* S-mode can only access the bits delegated by M-mode */ + *val = env->mcountinhibit & env->mcounteren; + return RISCV_EXCP_NONE; +} + +static RISCVException write_scountinhibit(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) +{ + return write_mcountinhibit(env, csrno, val & env->mcounteren, ra); +} + static RISCVException read_mcounteren(CPURISCVState *env, int csrno, target_ulong *val) { @@ -2244,7 +3089,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno, } static RISCVException write_mcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -2278,7 +3123,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno, } static RISCVException write_mscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mscratch = val; return RISCV_EXCP_NONE; @@ -2287,14 +3132,14 @@ static RISCVException write_mscratch(CPURISCVState *env, int csrno, static RISCVException read_mepc(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->mepc; + *val = env->mepc & get_xepc_mask(env); return RISCV_EXCP_NONE; } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - env->mepc = val; + env->mepc = val & get_xepc_mask(env); return RISCV_EXCP_NONE; } @@ -2306,7 +3151,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno, } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mcause = val; return RISCV_EXCP_NONE; @@ -2320,7 +3165,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno, } static RISCVException write_mtval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval = val; return RISCV_EXCP_NONE; @@ -2334,20 +3179,53 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException write_henvcfg(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra); static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); - uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; + uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | + MENVCFG_CBZE | MENVCFG_CDE; + bool stce_changed = false; if (riscv_cpu_mxl(env) == MXL_RV64) { mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | (cfg->ext_sstc ? MENVCFG_STCE : 0) | - (cfg->ext_svadu ? MENVCFG_ADUE : 0); + (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | + (cfg->ext_svadu ? MENVCFG_ADUE : 0) | + (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0); + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= MENVCFG_LPE; + } + + if (env_archcpu(env)->cfg.ext_zicfiss) { + mask |= MENVCFG_SSE; + } + + /* Update PMM field only if the value is valid according to Zjpm v1.0 */ + if (env_archcpu(env)->cfg.ext_smnpm && + get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) { + mask |= MENVCFG_PMM; + } + + if ((val & MENVCFG_DTE) == 0) { + env->mstatus &= ~MSTATUS_SDT; + } + + if (cfg->ext_sstc && + ((env->menvcfg & MENVCFG_STCE) != (val & MENVCFG_STCE))) { + stce_changed = true; + } } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); - return RISCV_EXCP_NONE; + if (stce_changed) { + riscv_timer_stce_changed(env, true, !!(val & MENVCFG_STCE)); + } + + return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra); } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, @@ -2357,18 +3235,36 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra); static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | (cfg->ext_sstc ? MENVCFG_STCE : 0) | - (cfg->ext_svadu ? MENVCFG_ADUE : 0); + (cfg->ext_svadu ? MENVCFG_ADUE : 0) | + (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | + (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0); uint64_t valh = (uint64_t)val << 32; + bool stce_changed = false; + + if (cfg->ext_sstc && + ((env->menvcfg & MENVCFG_STCE) != (valh & MENVCFG_STCE))) { + stce_changed = true; + } + + if ((valh & MENVCFG_DTE) == 0) { + env->mstatus &= ~MSTATUS_SDT; + } env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); - return RISCV_EXCP_NONE; + if (stce_changed) { + riscv_timer_stce_changed(env, true, !!(valh & MENVCFG_STCE)); + } + + return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra); } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, @@ -2386,16 +3282,37 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; + /* Update PMM field only if the value is valid according to Zjpm v1.0 */ + if (env_archcpu(env)->cfg.ext_ssnpm && + riscv_cpu_mxl(env) == MXL_RV64 && + get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) { + mask |= SENVCFG_PMM; + } ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); if (ret != RISCV_EXCP_NONE) { return ret; } + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SENVCFG_LPE; + } + + /* Higher mode SSE must be ON for next-less mode SSE to be ON */ + if (env_archcpu(env)->cfg.ext_zicfiss && + get_field(env->menvcfg, MENVCFG_SSE) && + (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) { + mask |= SENVCFG_SSE; + } + + if (env_archcpu(env)->cfg.ext_svukte) { + mask |= SENVCFG_UKTE; + } + env->senvcfg = (env->senvcfg & ~mask) | (val & mask); return RISCV_EXCP_NONE; } @@ -2414,17 +3331,20 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 * henvcfg.stce is read_only 0 when menvcfg.stce = 0 * henvcfg.adue is read_only 0 when menvcfg.adue = 0 + * henvcfg.dte is read_only 0 when menvcfg.dte = 0 */ - *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) | - env->menvcfg); + *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | + HENVCFG_DTE) | env->menvcfg); return RISCV_EXCP_NONE; } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; + bool stce_changed = false; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); if (ret != RISCV_EXCP_NONE) { @@ -2432,10 +3352,39 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE); + mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | + HENVCFG_DTE); + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= HENVCFG_LPE; + } + + /* H can light up SSE for VS only if HS had it from menvcfg */ + if (env_archcpu(env)->cfg.ext_zicfiss && + get_field(env->menvcfg, MENVCFG_SSE)) { + mask |= HENVCFG_SSE; + } + + /* Update PMM field only if the value is valid according to Zjpm v1.0 */ + if (env_archcpu(env)->cfg.ext_ssnpm && + get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) { + mask |= HENVCFG_PMM; + } + + if (cfg->ext_sstc && + ((env->henvcfg & HENVCFG_STCE) != (val & HENVCFG_STCE))) { + stce_changed = true; + } } - env->henvcfg = (env->henvcfg & ~mask) | (val & mask); + env->henvcfg = val & mask; + if ((env->henvcfg & HENVCFG_DTE) == 0) { + env->vsstatus &= ~MSTATUS_SDT; + } + + if (stce_changed) { + riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE)); + } return RISCV_EXCP_NONE; } @@ -2450,25 +3399,40 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, return ret; } - *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) | - env->menvcfg)) >> 32; + *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | + HENVCFG_DTE) | env->menvcfg)) >> 32; return RISCV_EXCP_NONE; } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | - HENVCFG_ADUE); + HENVCFG_ADUE | HENVCFG_DTE); uint64_t valh = (uint64_t)val << 32; RISCVException ret; + bool stce_changed = false; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); if (ret != RISCV_EXCP_NONE) { return ret; } - env->henvcfg = (env->henvcfg & ~mask) | (valh & mask); + if (cfg->ext_sstc && + ((env->henvcfg & HENVCFG_STCE) != (valh & HENVCFG_STCE))) { + stce_changed = true; + } + + env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask); + if ((env->henvcfg & HENVCFG_DTE) == 0) { + env->vsstatus &= ~MSTATUS_SDT; + } + + if (stce_changed) { + riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE)); + } + return RISCV_EXCP_NONE; } @@ -2492,7 +3456,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; if (!riscv_has_ext(env, RVF)) { @@ -2503,11 +3467,28 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, wr_mask |= SMSTATEEN0_P1P13; } + if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) { + wr_mask |= SMSTATEEN0_SVSLCT; + } + + /* + * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is + * implemented. However, that information is with MachineState and we can't + * figure that out in csr.c. Just enable if Smaia is available. + */ + if (riscv_cpu_cfg(env)->ext_smaia) { + wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); + } + + if (riscv_cpu_cfg(env)->ext_ssctr) { + wr_mask |= SMSTATEEN0_CTR; + } + return write_mstateen(env, csrno, wr_mask, new_val); } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2534,7 +3515,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2542,11 +3523,15 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, wr_mask |= SMSTATEEN0_P1P13; } + if (riscv_cpu_cfg(env)->ext_ssctr) { + wr_mask |= SMSTATEEN0_CTR; + } + return write_mstateenh(env, csrno, wr_mask, new_val); } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2575,7 +3560,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2583,11 +3568,28 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, wr_mask |= SMSTATEEN0_FCSR; } + if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) { + wr_mask |= SMSTATEEN0_SVSLCT; + } + + /* + * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is + * implemented. However, that information is with MachineState and we can't + * figure that out in csr.c. Just enable if Ssaia is available. + */ + if (riscv_cpu_cfg(env)->ext_ssaia) { + wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC); + } + + if (riscv_cpu_cfg(env)->ext_ssctr) { + wr_mask |= SMSTATEEN0_CTR; + } + return write_hstateen(env, csrno, wr_mask, new_val); } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2618,15 +3620,19 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; + if (riscv_cpu_cfg(env)->ext_ssctr) { + wr_mask |= SMSTATEEN0_CTR; + } + return write_hstateenh(env, csrno, wr_mask, new_val); } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2665,7 +3671,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno, } static RISCVException write_sstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2677,7 +3683,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno, } static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2697,7 +3703,14 @@ static RISCVException rmw_mip64(CPURISCVState *env, int csrno, if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) && get_field(env->menvcfg, MENVCFG_STCE)) { /* sstc extension forbids STIP & VSTIP to be writeable in mip */ - mask = mask & ~(MIP_STIP | MIP_VSTIP); + + /* STIP is not writable when menvcfg.STCE is enabled. */ + mask = mask & ~MIP_STIP; + + /* VSTIP is not writable when both [mh]envcfg.STCE are enabled. */ + if (get_field(env->henvcfg, HENVCFG_STCE)) { + mask = mask & ~MIP_VSTIP; + } } if (mask) { @@ -2896,6 +3909,13 @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, if (env->xl != MXL_RV32 || env->debugger) { mask |= SSTATUS64_UXL; } + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + mask |= SSTATUS_SDT; + } + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus)); return RISCV_EXCP_NONE; @@ -2908,13 +3928,20 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno, if (env->xl != MXL_RV32 || env->debugger) { mask |= SSTATUS64_UXL; } + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + mask |= SSTATUS_SDT; + } /* TODO: Use SXL not MXL. */ *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); return RISCV_EXCP_NONE; } static RISCVException write_sstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong mask = (sstatus_v1_10_mask); @@ -2923,8 +3950,15 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, mask |= SSTATUS64_UXL; } } + + if (env_archcpu(env)->cfg.ext_zicfilp) { + mask |= SSTATUS_SPELP; + } + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + mask |= SSTATUS_SDT; + } target_ulong newval = (env->mstatus & ~mask) | (val & mask); - return write_mstatus(env, CSR_MSTATUS, newval); + return write_mstatus(env, CSR_MSTATUS, newval, ra); } static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, @@ -3076,7 +4110,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno, } static RISCVException write_stvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -3095,7 +4129,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno, } static RISCVException write_scounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -3129,7 +4163,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno, } static RISCVException write_sscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->sscratch = val; return RISCV_EXCP_NONE; @@ -3138,14 +4172,14 @@ static RISCVException write_sscratch(CPURISCVState *env, int csrno, static RISCVException read_sepc(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->sepc; + *val = env->sepc & get_xepc_mask(env); return RISCV_EXCP_NONE; } static RISCVException write_sepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - env->sepc = val; + env->sepc = val & get_xepc_mask(env); return RISCV_EXCP_NONE; } @@ -3157,7 +4191,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno, } static RISCVException write_scause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->scause = val; return RISCV_EXCP_NONE; @@ -3171,7 +4205,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno, } static RISCVException write_stval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->stval = val; return RISCV_EXCP_NONE; @@ -3311,7 +4345,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, } static RISCVException write_satp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; @@ -3321,6 +4355,86 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t mask = wr_mask & SCTRDEPTH_MASK; + + if (ret_val) { + *ret_val = env->sctrdepth; + } + + env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask); + + /* Correct depth. */ + if (mask) { + uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK); + + if (depth > SCTRDEPTH_MAX) { + depth = SCTRDEPTH_MAX; + env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth); + } + + /* Update sctrstatus.WRPTR with a legal value */ + depth = 16ULL << depth; + env->sctrstatus = + env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1)); + } + + return RISCV_EXCP_NONE; +} + +static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK); + uint32_t mask = wr_mask & SCTRSTATUS_MASK; + + if (ret_val) { + *ret_val = env->sctrstatus; + } + + env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask); + + /* Update sctrstatus.WRPTR with a legal value */ + env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1)); + + return RISCV_EXCP_NONE; +} + +static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t csr_mask, mask = wr_mask; + uint64_t *ctl_ptr = &env->mctrctl; + + if (csrno == CSR_MCTRCTL) { + csr_mask = MCTRCTL_MASK; + } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) { + csr_mask = SCTRCTL_MASK; + } else { + /* + * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true + * or csrno == CSR_VSCTRCTL. + */ + csr_mask = VSCTRCTL_MASK; + ctl_ptr = &env->vsctrctl; + } + + mask &= csr_mask; + + if (ret_val) { + *ret_val = *ctl_ptr & csr_mask; + } + + *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask); + + return RISCV_EXCP_NONE; +} + static RISCVException read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) { @@ -3453,9 +4567,20 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno, } static RISCVException write_hstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - env->hstatus = val; + uint64_t mask = (target_ulong)-1; + if (!env_archcpu(env)->cfg.ext_svukte) { + mask &= ~HSTATUS_HUKTE; + } + /* Update PMM field only if the value is valid according to Zjpm v1.0 */ + if (!env_archcpu(env)->cfg.ext_ssnpm || + riscv_cpu_mxl(env) != MXL_RV64 || + get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) { + mask &= ~HSTATUS_HUPMM; + } + env->hstatus = (env->hstatus & ~mask) | (val & mask); + if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); @@ -3474,7 +4599,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno, } static RISCVException write_hedeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hedeleg = val & vs_delegable_excps; return RISCV_EXCP_NONE; @@ -3495,7 +4620,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno, } static RISCVException write_hedelegh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVException ret; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13); @@ -3758,7 +4883,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno, } static RISCVException write_hcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -3778,7 +4903,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno, } static RISCVException write_hgeie(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ val &= ((((target_ulong)1) << env->geilen) - 1) << 1; @@ -3797,7 +4922,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno, } static RISCVException write_htval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->htval = val; return RISCV_EXCP_NONE; @@ -3811,7 +4936,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno, } static RISCVException write_htinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -3833,7 +4958,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno, } static RISCVException write_hgatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hgatp = legalize_xatp(env, env->hgatp, val); return RISCV_EXCP_NONE; @@ -3851,7 +4976,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno, } static RISCVException write_htimedelta(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -3883,7 +5008,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, } static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -3907,7 +5032,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno, } static RISCVException write_hvictl(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hvictl = val & HVICTL_VALID_MASK; return RISCV_EXCP_NONE; @@ -3972,7 +5097,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 0, env->hviprio, val); } @@ -3984,7 +5109,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 4, env->hviprio, val); } @@ -3996,7 +5121,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 8, env->hviprio, val); } @@ -4008,7 +5133,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 12, env->hviprio, val); } @@ -4022,12 +5147,19 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, } static RISCVException write_vsstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = (target_ulong)-1; if ((val & VSSTATUS64_UXL) == 0) { mask &= ~VSSTATUS64_UXL; } + if ((env->henvcfg & HENVCFG_DTE)) { + if ((val & SSTATUS_SDT) != 0) { + val &= ~SSTATUS_SIE; + } + } else { + val &= ~SSTATUS_SDT; + } env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val; return RISCV_EXCP_NONE; } @@ -4040,7 +5172,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno, } static RISCVException write_vstvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -4059,7 +5191,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno, } static RISCVException write_vsscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsscratch = val; return RISCV_EXCP_NONE; @@ -4073,7 +5205,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno, } static RISCVException write_vsepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsepc = val; return RISCV_EXCP_NONE; @@ -4087,7 +5219,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno, } static RISCVException write_vscause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vscause = val; return RISCV_EXCP_NONE; @@ -4101,7 +5233,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno, } static RISCVException write_vstval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstval = val; return RISCV_EXCP_NONE; @@ -4115,7 +5247,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno, } static RISCVException write_vsatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsatp = legalize_xatp(env, env->vsatp, val); return RISCV_EXCP_NONE; @@ -4129,7 +5261,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno, } static RISCVException write_mtval2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval2 = val; return RISCV_EXCP_NONE; @@ -4143,7 +5275,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno, } static RISCVException write_mtinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtinst = val; return RISCV_EXCP_NONE; @@ -4158,7 +5290,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; @@ -4174,7 +5306,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, } static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint32_t reg_index = csrno - CSR_PMPCFG0; @@ -4190,7 +5322,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, } static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); return RISCV_EXCP_NONE; @@ -4204,7 +5336,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno, } static RISCVException write_tselect(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { tselect_csr_write(env, val); return RISCV_EXCP_NONE; @@ -4228,7 +5360,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno, } static RISCVException write_tdata(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!tdata_available(env, csrno - CSR_TDATA1)) { return RISCV_EXCP_ILLEGAL_INST; @@ -4253,7 +5385,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno, } static RISCVException write_mcontext(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; int32_t mask; @@ -4270,299 +5402,71 @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -/* - * Functions to access Pointer Masking feature registers - * We have to check if current priv lvl could modify - * csr in given mode - */ -static bool check_pm_current_disabled(CPURISCVState *env, int csrno) -{ - int csr_priv = get_field(csrno, 0x300); - int pm_current; - - if (env->debugger) { - return false; - } - /* - * If priv lvls differ that means we're accessing csr from higher priv lvl, - * so allow the access - */ - if (env->priv != csr_priv) { - return false; - } - switch (env->priv) { - case PRV_M: - pm_current = get_field(env->mmte, M_PM_CURRENT); - break; - case PRV_S: - pm_current = get_field(env->mmte, S_PM_CURRENT); - break; - case PRV_U: - pm_current = get_field(env->mmte, U_PM_CURRENT); - break; - default: - g_assert_not_reached(); - } - /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ - return !pm_current; -} - -static RISCVException read_mmte(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->mmte & MMTE_MASK; - return RISCV_EXCP_NONE; -} - -static RISCVException write_mmte(CPURISCVState *env, int csrno, - target_ulong val) -{ - uint64_t mstatus; - target_ulong wpri_val = val & MMTE_MASK; - - if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" - TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x", - val, "vs expected 0x", wpri_val); - } - /* for machine mode pm.current is hardwired to 1 */ - wpri_val |= MMTE_M_PM_CURRENT; - - /* hardwiring pm.instruction bit to 0, since it's not supported yet */ - wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); - env->mmte = wpri_val | EXT_STATUS_DIRTY; - riscv_cpu_update_mask(env); - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); - return RISCV_EXCP_NONE; -} - -static RISCVException read_smte(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->mmte & SMTE_MASK; - return RISCV_EXCP_NONE; -} - -static RISCVException write_smte(CPURISCVState *env, int csrno, - target_ulong val) -{ - target_ulong wpri_val = val & SMTE_MASK; - - if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" - TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x", - val, "vs expected 0x", wpri_val); - } - - /* if pm.current==0 we can't modify current PM CSRs */ - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - - wpri_val |= (env->mmte & ~SMTE_MASK); - write_mmte(env, csrno, wpri_val); - return RISCV_EXCP_NONE; -} - -static RISCVException read_umte(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->mmte & UMTE_MASK; - return RISCV_EXCP_NONE; -} - -static RISCVException write_umte(CPURISCVState *env, int csrno, - target_ulong val) -{ - target_ulong wpri_val = val & UMTE_MASK; - - if (val != wpri_val) { - qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" - TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x", - val, "vs expected 0x", wpri_val); - } - - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - - wpri_val |= (env->mmte & ~UMTE_MASK); - write_mmte(env, csrno, wpri_val); - return RISCV_EXCP_NONE; -} - -static RISCVException read_mpmmask(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->mpmmask; - return RISCV_EXCP_NONE; -} - -static RISCVException write_mpmmask(CPURISCVState *env, int csrno, - target_ulong val) -{ - uint64_t mstatus; - - env->mpmmask = val; - if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) { - env->cur_pmmask = val; - } - env->mmte |= EXT_STATUS_DIRTY; - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); - return RISCV_EXCP_NONE; -} - -static RISCVException read_spmmask(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException read_mnscratch(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = env->spmmask; + *val = env->mnscratch; return RISCV_EXCP_NONE; } -static RISCVException write_spmmask(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException write_mnscratch(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { - uint64_t mstatus; - - /* if pm.current==0 we can't modify current PM CSRs */ - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - env->spmmask = val; - if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) { - env->cur_pmmask = val; - if (cpu_get_xl(env, PRV_S) == MXL_RV32) { - env->cur_pmmask &= UINT32_MAX; - } - } - env->mmte |= EXT_STATUS_DIRTY; - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); + env->mnscratch = val; return RISCV_EXCP_NONE; } -static RISCVException read_upmmask(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException read_mnepc(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = env->upmmask; + *val = env->mnepc; return RISCV_EXCP_NONE; } -static RISCVException write_upmmask(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException write_mnepc(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { - uint64_t mstatus; - - /* if pm.current==0 we can't modify current PM CSRs */ - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - env->upmmask = val; - if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) { - env->cur_pmmask = val; - if (cpu_get_xl(env, PRV_U) == MXL_RV32) { - env->cur_pmmask &= UINT32_MAX; - } - } - env->mmte |= EXT_STATUS_DIRTY; - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); + env->mnepc = val; return RISCV_EXCP_NONE; } -static RISCVException read_mpmbase(CPURISCVState *env, int csrno, +static RISCVException read_mncause(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->mpmbase; + *val = env->mncause; return RISCV_EXCP_NONE; } -static RISCVException write_mpmbase(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException write_mncause(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { - uint64_t mstatus; - - env->mpmbase = val; - if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) { - env->cur_pmbase = val; - } - env->mmte |= EXT_STATUS_DIRTY; - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); + env->mncause = val; return RISCV_EXCP_NONE; } -static RISCVException read_spmbase(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException read_mnstatus(CPURISCVState *env, int csrno, + target_ulong *val) { - *val = env->spmbase; + *val = env->mnstatus; return RISCV_EXCP_NONE; } -static RISCVException write_spmbase(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException write_mnstatus(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { - uint64_t mstatus; + target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP); - /* if pm.current==0 we can't modify current PM CSRs */ - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - env->spmbase = val; - if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) { - env->cur_pmbase = val; - if (cpu_get_xl(env, PRV_S) == MXL_RV32) { - env->cur_pmbase &= UINT32_MAX; + if (riscv_has_ext(env, RVH)) { + /* Flush tlb on mnstatus fields that affect VM. */ + if ((val ^ env->mnstatus) & MNSTATUS_MNPV) { + tlb_flush(env_cpu(env)); } - } - env->mmte |= EXT_STATUS_DIRTY; - - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); - return RISCV_EXCP_NONE; -} - -static RISCVException read_upmbase(CPURISCVState *env, int csrno, - target_ulong *val) -{ - *val = env->upmbase; - return RISCV_EXCP_NONE; -} - -static RISCVException write_upmbase(CPURISCVState *env, int csrno, - target_ulong val) -{ - uint64_t mstatus; - /* if pm.current==0 we can't modify current PM CSRs */ - if (check_pm_current_disabled(env, csrno)) { - return RISCV_EXCP_NONE; - } - env->upmbase = val; - if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) { - env->cur_pmbase = val; - if (cpu_get_xl(env, PRV_U) == MXL_RV32) { - env->cur_pmbase &= UINT32_MAX; - } + mask |= MNSTATUS_MNPV; } - env->mmte |= EXT_STATUS_DIRTY; - /* Set XS and SD bits, since PM CSRs are dirty */ - mstatus = env->mstatus | MSTATUS_XS; - write_mstatus(env, csrno, mstatus); + /* mnstatus.mnie can only be cleared by hardware. */ + env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask); return RISCV_EXCP_NONE; } @@ -4676,7 +5580,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, csr_priv = get_field(csrno, 0x300); if (!env->debugger && (effective_priv < csr_priv)) { - if (csr_priv == (PRV_S + 1) && env->virt_enabled) { + if (csr_priv <= (PRV_S + 1) && env->virt_enabled) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } return RISCV_EXCP_ILLEGAL_INST; @@ -4688,7 +5592,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, - target_ulong write_mask) + target_ulong write_mask, + uintptr_t ra) { RISCVException ret; target_ulong old_value = 0; @@ -4718,7 +5623,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, if (write_mask) { new_value = (old_value & ~write_mask) | (new_value & write_mask); if (csr_ops[csrno].write) { - ret = csr_ops[csrno].write(env, csrno, new_value); + ret = csr_ops[csrno].write(env, csrno, new_value, ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -4741,25 +5646,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno, return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, 0, 0); + return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0); } RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra) { RISCVException ret = riscv_csrrw_check(env, csrno, true); if (ret != RISCV_EXCP_NONE) { return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra); } static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, Int128 *ret_value, Int128 new_value, - Int128 write_mask) + Int128 write_mask, uintptr_t ra) { RISCVException ret; Int128 old_value; @@ -4781,7 +5686,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, } } else if (csr_ops[csrno].write) { /* avoids having to write wrappers for all registers */ - ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); + ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -4808,7 +5713,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, if (csr_ops[csrno].read128) { return riscv_csrrw_do128(env, csrno, ret_value, - int128_zero(), int128_zero()); + int128_zero(), int128_zero(), 0); } /* @@ -4819,9 +5724,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, * accesses */ target_ulong old_value; - ret = riscv_csrrw_do64(env, csrno, &old_value, - (target_ulong)0, - (target_ulong)0); + ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -4829,8 +5732,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, } RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask) + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra) { RISCVException ret; @@ -4840,7 +5743,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, } if (csr_ops[csrno].read128) { - return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do128(env, csrno, ret_value, + new_value, write_mask, ra); } /* @@ -4853,7 +5757,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, target_ulong old_value; ret = riscv_csrrw_do64(env, csrno, &old_value, int128_getlo(new_value), - int128_getlo(write_mask)); + int128_getlo(write_mask), ra); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -4876,7 +5780,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, if (!write_mask) { ret = riscv_csrr(env, csrno, ret_value); } else { - ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0); } #if !defined(CONFIG_USER_ONLY) env->debugger = false; @@ -4892,7 +5796,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno, } static RISCVException write_jvt(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->jvt = val; return RISCV_EXCP_NONE; @@ -4934,6 +5838,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* Zcmt Extension */ [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt}, + /* zicfiss Extension, shadow stack register */ + [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp }, + #if !defined(CONFIG_USER_ONLY) /* Machine Timers and Counters */ [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, @@ -4958,8 +5865,8 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { NULL, read_mstatus_i128 }, [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL, read_misa_i128 }, - [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, - [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, + [CSR_MIDELEG] = { "mideleg", smode, NULL, NULL, rmw_mideleg }, + [CSR_MEDELEG] = { "medeleg", smode, read_medeleg, write_medeleg }, [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, @@ -4967,7 +5874,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush }, - [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore, + [CSR_MEDELEGH] = { "medelegh", smode32, read_zero, write_ignore, .min_priv_ver = PRIV_VERSION_1_13_0 }, [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh, .min_priv_ver = PRIV_VERSION_1_13_0 }, @@ -4981,8 +5888,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ - [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect }, - [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg }, + [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL, + rmw_xiselect }, + [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL, + rmw_xireg }, + + /* Machine Indirect Register Alias */ + [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Machine-Level Interrupts (AIA) */ [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, @@ -4993,7 +5914,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip }, /* Machine-Level High-Half CSRs (AIA) */ - [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, + [CSR_MIDELEGH] = { "midelegh", aia_smode32, NULL, NULL, rmw_midelegh }, [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh }, [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph }, @@ -5070,6 +5991,21 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { write_sstateen_1_3, .min_priv_ver = PRIV_VERSION_1_12_0 }, + /* RNMI */ + [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + + /* Supervisor Counter Delegation */ + [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred, + read_scountinhibit, write_scountinhibit, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + /* Supervisor Trap Setup */ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL, read_sstatus_i128 }, @@ -5100,8 +6036,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_SATP] = { "satp", satp, read_satp, write_satp }, /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ - [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, - [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg }, + [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL, + rmw_xiselect }, + [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL, + rmw_xireg }, + + /* Supervisor Indirect Register Alias */ + [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Supervisor-Level Interrupts (AIA) */ [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, @@ -5164,7 +6114,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2, + [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2, .min_priv_ver = PRIV_VERSION_1_12_0 }, [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst, .min_priv_ver = PRIV_VERSION_1_12_0 }, @@ -5180,9 +6130,22 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */ - [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, - rmw_xiselect }, - [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg }, + [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL, + rmw_xiselect }, + [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL, + rmw_xireg }, + + /* Virtual Supervisor Indirect Alias */ + [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* VS-Level Interrupts (H-extension with AIA) */ [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, @@ -5207,6 +6170,30 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, + [CSR_PMPCFG4] = { "pmpcfg4", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG5] = { "pmpcfg5", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG6] = { "pmpcfg6", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG7] = { "pmpcfg7", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG8] = { "pmpcfg8", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG9] = { "pmpcfg9", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG10] = { "pmpcfg10", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG11] = { "pmpcfg11", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG12] = { "pmpcfg12", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG13] = { "pmpcfg13", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG14] = { "pmpcfg14", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG15] = { "pmpcfg15", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, @@ -5221,8 +6208,104 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, - [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, - [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR16] = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR17] = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR18] = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR19] = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR20] = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR21] = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR22] = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR23] = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR24] = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR25] = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR26] = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR27] = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR28] = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR29] = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR30] = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR31] = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR32] = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR33] = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR34] = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR35] = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR36] = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR37] = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR38] = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR39] = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR40] = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR41] = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR42] = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR43] = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR44] = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR45] = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR46] = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR47] = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR48] = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR49] = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR50] = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR51] = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR52] = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR53] = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR54] = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR55] = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR56] = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR57] = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR58] = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR59] = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR60] = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR61] = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR62] = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR63] = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Debug CSRs */ [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, @@ -5232,24 +6315,11 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore }, [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext }, - /* User Pointer Masking */ - [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, - [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, - write_upmmask }, - [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, - write_upmbase }, - /* Machine Pointer Masking */ - [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, - [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, - write_mpmmask }, - [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, - write_mpmbase }, - /* Supervisor Pointer Masking */ - [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, - [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, - write_spmmask }, - [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, - write_spmbase }, + [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl }, + [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl }, + [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl }, + [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth }, + [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus }, /* Performance Counters */ [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter }, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index 0b5099ff9ab..56644667497 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -28,9 +28,10 @@ #include "qapi/error.h" #include "cpu.h" #include "trace.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "sysemu/cpu-timers.h" +#include "exec/watchpoint.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" /* * The following M-mode trigger CSRs are implemented: @@ -217,6 +218,66 @@ static inline void warn_always_zero_bit(target_ulong val, target_ulong mask, } } +static target_ulong textra_validate(CPURISCVState *env, target_ulong tdata3) +{ + target_ulong mhvalue, mhselect; + target_ulong mhselect_new; + target_ulong textra; + const uint32_t mhselect_no_rvh[8] = { 0, 0, 0, 0, 4, 4, 4, 4 }; + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + mhvalue = get_field(tdata3, TEXTRA32_MHVALUE); + mhselect = get_field(tdata3, TEXTRA32_MHSELECT); + /* Validate unimplemented (always zero) bits */ + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SBYTEMASK, + "sbytemask"); + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SVALUE, + "svalue"); + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA32_SSELECT, + "sselect"); + break; + case MXL_RV64: + case MXL_RV128: + mhvalue = get_field(tdata3, TEXTRA64_MHVALUE); + mhselect = get_field(tdata3, TEXTRA64_MHSELECT); + /* Validate unimplemented (always zero) bits */ + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SBYTEMASK, + "sbytemask"); + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SVALUE, + "svalue"); + warn_always_zero_bit(tdata3, (target_ulong)TEXTRA64_SSELECT, + "sselect"); + break; + default: + g_assert_not_reached(); + } + + /* Validate mhselect. */ + mhselect_new = mhselect_no_rvh[mhselect]; + if (mhselect != mhselect_new) { + qemu_log_mask(LOG_UNIMP, "mhselect only supports 0 or 4 for now\n"); + } + + /* Write legal values into textra */ + textra = 0; + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + textra = set_field(textra, TEXTRA32_MHVALUE, mhvalue); + textra = set_field(textra, TEXTRA32_MHSELECT, mhselect_new); + break; + case MXL_RV64: + case MXL_RV128: + textra = set_field(textra, TEXTRA64_MHVALUE, mhvalue); + textra = set_field(textra, TEXTRA64_MHSELECT, mhselect_new); + break; + default: + g_assert_not_reached(); + } + + return textra; +} + static void do_trigger_action(CPURISCVState *env, target_ulong trigger_index) { trigger_action_t action = get_trigger_action(env, trigger_index); @@ -304,11 +365,54 @@ static bool trigger_priv_match(CPURISCVState *env, trigger_type_t type, return false; } +static bool trigger_textra_match(CPURISCVState *env, trigger_type_t type, + int trigger_index) +{ + target_ulong textra = env->tdata3[trigger_index]; + target_ulong mhvalue, mhselect; + + if (type < TRIGGER_TYPE_AD_MATCH || type > TRIGGER_TYPE_AD_MATCH6) { + /* textra checking is only applicable when type is 2, 3, 4, 5, or 6 */ + return true; + } + + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: + mhvalue = get_field(textra, TEXTRA32_MHVALUE); + mhselect = get_field(textra, TEXTRA32_MHSELECT); + break; + case MXL_RV64: + case MXL_RV128: + mhvalue = get_field(textra, TEXTRA64_MHVALUE); + mhselect = get_field(textra, TEXTRA64_MHSELECT); + break; + default: + g_assert_not_reached(); + } + + /* Check mhvalue and mhselect. */ + switch (mhselect) { + case MHSELECT_IGNORE: + break; + case MHSELECT_MCONTEXT: + /* Match if the low bits of mcontext/hcontext equal mhvalue. */ + if (mhvalue != env->mcontext) { + return false; + } + break; + default: + break; + } + + return true; +} + /* Common matching conditions for all types of the triggers. */ static bool trigger_common_match(CPURISCVState *env, trigger_type_t type, int trigger_index) { - return trigger_priv_match(env, type, trigger_index); + return trigger_priv_match(env, type, trigger_index) && + trigger_textra_match(env, type, trigger_index); } /* type 2 trigger */ @@ -375,7 +479,7 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index) bool enabled = type2_breakpoint_enabled(ctrl); CPUState *cs = env_cpu(env); int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; - uint32_t size; + uint32_t size, def_size; if (!enabled) { return; @@ -398,7 +502,9 @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index) cpu_watchpoint_insert(cs, addr, size, flags, &env->cpu_watchpoint[index]); } else { - cpu_watchpoint_insert(cs, addr, 8, flags, + def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4; + + cpu_watchpoint_insert(cs, addr, def_size, flags, &env->cpu_watchpoint[index]); } } @@ -441,14 +547,11 @@ static void type2_reg_write(CPURISCVState *env, target_ulong index, } break; case TDATA3: - qemu_log_mask(LOG_UNIMP, - "tdata3 is not supported for type 2 trigger\n"); + env->tdata3[index] = textra_validate(env, val); break; default: g_assert_not_reached(); } - - return; } /* type 6 trigger */ @@ -558,14 +661,11 @@ static void type6_reg_write(CPURISCVState *env, target_ulong index, } break; case TDATA3: - qemu_log_mask(LOG_UNIMP, - "tdata3 is not supported for type 6 trigger\n"); + env->tdata3[index] = textra_validate(env, val); break; default: g_assert_not_reached(); } - - return; } /* icount trigger type */ @@ -741,14 +841,11 @@ static void itrigger_reg_write(CPURISCVState *env, target_ulong index, "tdata2 is not supported for icount trigger\n"); break; case TDATA3: - qemu_log_mask(LOG_UNIMP, - "tdata3 is not supported for icount trigger\n"); + env->tdata3[index] = textra_validate(env, val); break; default: g_assert_not_reached(); } - - return; } static int itrigger_get_adjust_count(CPURISCVState *env) diff --git a/target/riscv/debug.h b/target/riscv/debug.h index c3478635789..f76b8f944a2 100644 --- a/target/riscv/debug.h +++ b/target/riscv/debug.h @@ -131,6 +131,9 @@ enum { #define ITRIGGER_VU BIT(25) #define ITRIGGER_VS BIT(26) +#define MHSELECT_IGNORE 0 +#define MHSELECT_MCONTEXT 4 + bool tdata_available(CPURISCVState *env, int tdata_index); target_ulong tselect_csr_read(CPURISCVState *env); diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 91b1a56d10a..af40561b318 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #include "internals.h" @@ -756,6 +755,6 @@ uint64_t helper_fcvt_bf16_s(CPURISCVState *env, uint64_t rs1) uint64_t helper_fcvt_s_bf16(CPURISCVState *env, uint64_t rs1) { - float16 frs1 = check_nanbox_h(env, rs1); + float16 frs1 = check_nanbox_bf16(env, rs1); return nanbox_s(env, bfloat16_to_float32(frs1, &env->fp_status)); } diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index c07df972f1e..1934f919c01 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -62,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return 0; } - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: return gdb_get_reg32(mem_buf, tmp); case MXL_RV64: @@ -82,7 +82,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) int length = 0; target_ulong tmp; - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: tmp = (int32_t)ldl_p(mem_buf); length = 4; @@ -213,7 +213,10 @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - return gdb_get_regl(buf, env->priv); + /* Per RiscV debug spec v1.0.0 rc4 */ + target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0; + + return gdb_get_regl(buf, env->priv | vbit); #endif } return 0; @@ -226,10 +229,22 @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - env->priv = ldtul_p(mem_buf) & 0x3; - if (env->priv == PRV_RESERVED) { - env->priv = PRV_S; + target_ulong new_priv = ldtul_p(mem_buf) & 0x3; + bool new_virt = 0; + + if (new_priv == PRV_RESERVED) { + new_priv = PRV_S; + } + + if (new_priv != PRV_M) { + new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2; } + + if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) { + riscv_cpu_swap_hypervisor_regs(env); + } + + riscv_cpu_set_mode(env, new_priv, new_virt); #endif return sizeof(target_ulong); } @@ -344,7 +359,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs), 0); } - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 451261ce5a4..f712b1c368e 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -131,10 +131,13 @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(sret, tl, env) DEF_HELPER_1(mret, tl, env) +DEF_HELPER_1(mnret, tl, env) +DEF_HELPER_1(ctr_clear, void, env) DEF_HELPER_1(wfi, void, env) DEF_HELPER_1(wrs_nto, void, env) DEF_HELPER_1(tlb_flush, void, env) DEF_HELPER_1(tlb_flush_all, void, env) +DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl) /* Native Debug */ DEF_HELPER_1(itrigger_match, void, env) #endif @@ -156,7 +159,7 @@ DEF_HELPER_FLAGS_3(hyp_hsv_d, TCG_CALL_NO_WG, void, env, tl, tl) #endif /* Vector functions */ -DEF_HELPER_3(vsetvl, tl, env, tl, tl) +DEF_HELPER_4(vsetvl, tl, env, tl, tl, tl) DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32) diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index 3953bcf82d6..bf893d1c2e9 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -140,6 +140,10 @@ sw 110 ... ... .. ... 00 @cs_w addi 000 . ..... ..... 01 @ci addi 010 . ..... ..... 01 @c_li { + # c.sspush x1 carving out of zcmops + sspush 011 0 00001 00000 01 &r2_s rs2=1 rs1=0 + # c.sspopchk x5 carving out of zcmops + sspopchk 011 0 00101 00000 01 &r2 rs1=5 rd=0 c_mop_n 011 0 0 n:3 1 00000 01 illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0 addi 011 . 00010 ..... 01 @c_addi16sp diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index c45b8fa1d80..cd23b1f3a9b 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -114,16 +114,22 @@ # *** Privileged Instructions *** ecall 000000000000 00000 000 00000 1110011 ebreak 000000000001 00000 000 00000 1110011 +sctrclr 000100000100 00000 000 00000 1110011 uret 0000000 00010 00000 000 00000 1110011 sret 0001000 00010 00000 000 00000 1110011 mret 0011000 00010 00000 000 00000 1110011 wfi 0001000 00101 00000 000 00000 1110011 sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma -sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm + +# *** NMI *** +mnret 0111000 00010 00000 000 00000 1110011 # *** RV32I Base Instruction Set *** lui .................... ..... 0110111 @u -auipc .................... ..... 0010111 @u +{ + lpad label:20 00000 0010111 + auipc .................... ..... 0010111 @u +} jal .................... ..... 1101111 @j jalr ............ ..... 000 ..... 1100111 @i beq ....... ..... ..... 000 ..... 1100011 @b @@ -243,6 +249,7 @@ remud 0000001 ..... ..... 111 ..... 1111011 @r lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st +ssamoswap_w 01001 . . ..... ..... 010 ..... 0101111 @atom_st amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st @@ -256,6 +263,7 @@ amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st +ssamoswap_d 01001 . . ..... ..... 011 ..... 0101111 @atom_st amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st @@ -695,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm # Vector widening ordered and unordered float reduction sum vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm -vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r -vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r -vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r -vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r -vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r -vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r -vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r -vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r +vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r +vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r +vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r +vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r +vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r +vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r +vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r +vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm @@ -724,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm -vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r +vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd @@ -1019,8 +1027,23 @@ amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st # *** Zimop may-be-operation extension *** -mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 -mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 +{ + # zicfiss instructions carved out of mop.r + [ + ssrdp 1100110 11100 00000 100 rd:5 1110011 + sspopchk 1100110 11100 00001 100 00000 1110011 &r2 rs1=1 rd=0 + sspopchk 1100110 11100 00101 100 00000 1110011 &r2 rs1=5 rd=0 + ] + mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 +} +{ + # zicfiss instruction carved out of mop.rr + [ + sspush 1100111 00001 00000 100 00000 1110011 &r2_s rs2=1 rs1=0 + sspush 1100111 00101 00000 100 00000 1110011 &r2_s rs2=5 rs1=0 + ] + mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 +} # *** Zabhb Standard Extension *** amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index bc5263a4e0f..8a62b4cfcd9 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -18,6 +18,12 @@ * this program. If not, see . */ +#define REQUIRE_SMRNMI(ctx) do { \ + if (!ctx->cfg_ptr->ext_smrnmi) { \ + return false; \ + } \ +} while (0) + static bool trans_ecall(DisasContext *ctx, arg_ecall *a) { /* always generates U-level ECALL, fixed in do_interrupt handler */ @@ -69,6 +75,17 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) return true; } +static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a) +{ +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + gen_helper_ctr_clear(tcg_env); + return true; + } +#endif + return false; +} + static bool trans_uret(DisasContext *ctx, arg_uret *a) { return false; @@ -78,8 +95,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) { #ifndef CONFIG_USER_ONLY if (has_ext(ctx, RVS)) { - decode_save_opc(ctx); + decode_save_opc(ctx, 0); translator_io_start(&ctx->base); + gen_update_pc(ctx, 0); gen_helper_sret(cpu_pc, tcg_env); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; @@ -95,8 +113,9 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) static bool trans_mret(DisasContext *ctx, arg_mret *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); translator_io_start(&ctx->base); + gen_update_pc(ctx, 0); gen_helper_mret(cpu_pc, tcg_env); exit_tb(ctx); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; @@ -106,10 +125,24 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) #endif } +static bool trans_mnret(DisasContext *ctx, arg_mnret *a) +{ +#ifndef CONFIG_USER_ONLY + REQUIRE_SMRNMI(ctx); + decode_save_opc(ctx, 0); + gen_helper_mnret(cpu_pc, tcg_env); + tcg_gen_exit_tb(NULL, 0); /* no chaining */ + ctx->base.is_jmp = DISAS_NORETURN; + return true; +#else + return false; +#endif +} + static bool trans_wfi(DisasContext *ctx, arg_wfi *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_update_pc(ctx, ctx->cur_insn_len); gen_helper_wfi(tcg_env); return true; @@ -121,14 +154,9 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a) static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) { #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_tlb_flush(tcg_env); return true; #endif return false; } - -static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a) -{ - return false; -} diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 39bbf60f3c3..9cf3ae8019b 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -34,7 +34,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) { TCGv src1; - decode_save_opc(ctx); + decode_save_opc(ctx, 0); src1 = get_address(ctx, a->rs1, 0); if (a->rl) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); @@ -61,7 +61,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); src1 = get_address(ctx, a->rs1, 0); tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc index 0a9cd1ec315..066dc364c5b 100644 --- a/target/riscv/insn_trans/trans_rvbf16.c.inc +++ b/target/riscv/insn_trans/trans_rvbf16.c.inc @@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) && - vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) { + vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); @@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) && - vext_check_ds(ctx, a->rd, a->rs2, a->vm)) { + vext_check_ds(ctx, a->rd, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm(ctx, RISCV_FRM_DYN); diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 1f5fac65a22..30883ea37c8 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -47,11 +47,21 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + /* + * FLD and FSD are only guaranteed to execute atomically if the effective + * address is naturally aligned and XLEN≥64. Also, zama16b applies to + * loads and stores of no more than MXLEN bits defined in the F, D, and + * Q extensions. + */ + if (get_xl_max(ctx) == MXL_RV32) { + memop |= MO_ATOM_NONE; + } else if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; + } else { + memop |= MO_ATOM_IFALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); @@ -67,11 +77,15 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a) REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + if (get_xl_max(ctx) == MXL_RV32) { + memop |= MO_ATOM_NONE; + } else if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; + } else { + memop |= MO_ATOM_IFALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index f771aa19391..ed73afe0894 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -48,11 +48,11 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); dest = cpu_fpr[a->rd]; tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); @@ -70,11 +70,11 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a) REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); addr = get_address(ctx, a->rs1, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc index aa9d41c18c6..03c66944308 100644 --- a/target/riscv/insn_trans/trans_rvh.c.inc +++ b/target/riscv/insn_trans/trans_rvh.c.inc @@ -44,7 +44,7 @@ static bool do_hlv(DisasContext *ctx, arg_r2 *a, TCGv dest = dest_gpr(ctx, a->rd); TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); func(dest, tcg_env, addr); gen_set_gpr(ctx, a->rd, dest); return true; @@ -56,7 +56,7 @@ static bool do_hsv(DisasContext *ctx, arg_r2_s *a, TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE); TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); func(tcg_env, addr, data); return true; } @@ -147,7 +147,7 @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_gvma_tlb_flush(tcg_env); return true; #endif @@ -158,7 +158,7 @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a) { REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_tlb_flush(tcg_env); return true; #endif diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 98e3806d5e5..b9c71604687 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -36,6 +36,49 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a) return true; } +static bool trans_lpad(DisasContext *ctx, arg_lpad *a) +{ + /* + * fcfi_lp_expected can set only if fcfi was eanbled. + * translate further only if fcfi_lp_expected set. + * lpad comes from NOP space anyways, so return true if + * fcfi_lp_expected is false. + */ + if (!ctx->fcfi_lp_expected) { + return true; + } + + ctx->fcfi_lp_expected = false; + if ((ctx->base.pc_next) & 0x3) { + /* + * misaligned, according to spec we should raise sw check exception + */ + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + return true; + } + + /* per spec, label check performed only when embedded label non-zero */ + if (a->label != 0) { + TCGLabel *skip = gen_new_label(); + TCGv tmp = tcg_temp_new(); + tcg_gen_extract_tl(tmp, get_gpr(ctx, xT2, EXT_NONE), 12, 20); + tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, a->label, skip); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + gen_set_label(skip); + } + + tcg_gen_st8_tl(tcg_constant_tl(0), tcg_env, + offsetof(CPURISCVState, elp)); + + return true; +} + static bool trans_auipc(DisasContext *ctx, arg_auipc *a) { TCGv target_pc = dest_gpr(ctx, a->rd); @@ -50,6 +93,51 @@ static bool trans_jal(DisasContext *ctx, arg_jal *a) return true; } +#ifndef CONFIG_USER_ONLY +/* + * Indirect calls + * - jalr x1, rs where rs != x5; + * - jalr x5, rs where rs != x1; + * - c.jalr rs1 where rs1 != x5; + * + * Indirect jumps + * - jalr x0, rs where rs != x1 and rs != x5; + * - c.jr rs1 where rs1 != x1 and rs1 != x5. + * + * Returns + * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5; + * - c.jr rs1 where rs1 == x1 or rs1 == x5. + * + * Co-routine swap + * - jalr x1, x5; + * - jalr x5, x1; + * - c.jalr x5. + * + * Other indirect jumps + * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5. + */ +static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest) +{ + TCGv src = tcg_temp_new(); + TCGv type; + + if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) { + type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL); + } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) { + type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP); + } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) { + type = tcg_constant_tl(CTRDATA_TYPE_RETURN); + } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) { + type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP); + } else { + type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP); + } + + gen_pc_plus_diff(src, ctx, 0); + gen_helper_ctr_add_entry(tcg_env, src, dest, type); +} +#endif + static bool trans_jalr(DisasContext *ctx, arg_jalr *a) { TCGLabel *misaligned = NULL; @@ -63,7 +151,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) tcg_gen_ext32s_tl(target_pc, target_pc); } - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) { + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext)) { TCGv t0 = tcg_temp_new(); misaligned = gen_new_label(); @@ -74,7 +164,25 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len); gen_set_gpr(ctx, a->rd, succ_pc); +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + gen_ctr_jalr(ctx, a, target_pc); + } +#endif + tcg_gen_mov_tl(cpu_pc, target_pc); + if (ctx->fcfi_enabled) { + /* + * return from functions (i.e. rs1 == xRA || rs1 == xT0) are not + * tracked. zicfilp introduces sw guarded branch as well. sw guarded + * branch are not tracked. rs1 == xT2 is a sw guarded branch. + */ + if (a->rs1 != xRA && a->rs1 != xT0 && a->rs1 != xT2) { + tcg_gen_st8_tl(tcg_constant_tl(1), + tcg_env, offsetof(CPURISCVState, elp)); + } + } + lookup_and_goto_ptr(ctx); if (misaligned) { @@ -176,18 +284,44 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) } else { tcg_gen_brcond_tl(cond, src1, src2, l); } + +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH); + TCGv dest = tcg_temp_new(); + TCGv src = tcg_temp_new(); + + gen_pc_plus_diff(src, ctx, 0); + gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len); + gen_helper_ctr_add_entry(tcg_env, src, dest, type); + } +#endif + gen_goto_tb(ctx, 1, ctx->cur_insn_len); ctx->pc_save = orig_pc_save; gen_set_label(l); /* branch taken */ - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca && + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext) && (a->imm & 0x3)) { /* misaligned */ TCGv target_pc = tcg_temp_new(); gen_pc_plus_diff(target_pc, ctx, a->imm); gen_exception_inst_addr_mis(ctx, target_pc); } else { +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH); + TCGv dest = tcg_temp_new(); + TCGv src = tcg_temp_new(); + + gen_pc_plus_diff(src, ctx, 0); + gen_pc_plus_diff(dest, ctx, a->imm); + gen_helper_ctr_add_entry(tcg_env, src, dest, type); + } +#endif gen_goto_tb(ctx, 0, a->imm); } ctx->pc_save = -1; @@ -268,10 +402,10 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { bool out; - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); if (get_xl(ctx) == MXL_RV128) { out = gen_load_i128(ctx, a, memop); } else { @@ -369,10 +503,10 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { - if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + if (ctx->cfg_ptr->ext_zama16b) { memop |= MO_ATOM_WITHIN16; } - decode_save_opc(ctx); + decode_save_opc(ctx, 0); if (get_xl(ctx) == MXL_RV128) { return gen_store_i128(ctx, a, memop); } else { @@ -834,7 +968,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) static bool do_csr_post(DisasContext *ctx) { /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); /* We may have changed important cpu state -- exit to main loop. */ gen_update_pc(ctx, ctx->cur_insn_len); exit_tb(ctx); diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 3a3896ba06c..71f98fb350b 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s) } } -/* Destination vector register group cannot overlap source mask register. */ -static bool require_vm(int vm, int vd) +/* + * Source and destination vector register groups cannot overlap source mask + * register: + * + * A vector register cannot be used to provide source operands with more than + * one EEW for a single instruction. A mask register source is considered to + * have EEW=1 for this constraint. An encoding that would result in the same + * vector register being read with two or more different EEWs, including when + * the vector register appears at different positions within two or more vector + * register groups, is reserved. + * (Section 5.2) + * + * A destination vector register group can overlap a source vector + * register group only if one of the following holds: + * 1. The destination EEW equals the source EEW. + * 2. The destination EEW is smaller than the source EEW and the overlap + * is in the lowest-numbered part of the source register group. + * 3. The destination EEW is greater than the source EEW, the source EMUL + * is at least 1, and the overlap is in the highest-numbered part of + * the destination register group. + * For the purpose of determining register group overlap constraints, mask + * elements have EEW=1. + * (Section 5.2) + */ +static bool require_vm(int vm, int v) { - return (vm != 0 || vd != 0); + return (vm != 0 || v != 0); } static bool require_nf(int vd, int nf, int lmul) @@ -179,7 +202,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) s1 = get_gpr(s, rs1, EXT_ZERO); } - gen_helper_vsetvl(dst, tcg_env, s1, s2); + gen_helper_vsetvl(dst, tcg_env, s1, s2, tcg_constant_tl((int) (rd == 0 && rs1 == 0))); gen_set_gpr(s, rd, dst); finalize_rvv_inst(s); @@ -199,7 +222,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) dst = dest_gpr(s, rd); - gen_helper_vsetvl(dst, tcg_env, s1, s2); + gen_helper_vsetvl(dst, tcg_env, s1, s2, tcg_constant_tl(0)); gen_set_gpr(s, rd, dst); finalize_rvv_inst(s); gen_update_pc(s, s->cur_insn_len); @@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, return ret; } +/* + * Check whether a vector register is used to provide source operands with + * more than one EEW for the vector instruction. + * Returns true if the instruction has valid encoding + * Returns false if encoding violates the mismatched input EEWs constraint + */ +static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1, + int vs2, uint8_t eew_vs2, int vm) +{ + bool is_valid = true; + int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul; + int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul; + + /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */ + if ((vs1 != -1 && !require_vm(vm, vs1)) || + (vs2 != -1 && !require_vm(vm, vs2))) { + is_valid = false; + } + + /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */ + if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) && + is_overlapped(vs1, 1 << MAX(emul_vs1, 0), + vs2, 1 << MAX(emul_vs2, 0))) { + is_valid = false; + } + + return is_valid; +} + static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) { return require_vm(vm, vd) && require_align(vd, s->lmul) && - require_align(vs, s->lmul); + require_align(vs, s->lmul) && + vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm); } /* @@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ss(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul); } @@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew, -1, 0, vm) && require_align(vs, s->lmul) && require_noover(vd, s->lmul + 1, vs, s->lmul); } @@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) && require_align(vs, s->lmul + 1); } @@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul) && require_noover(vd, s->lmul + 1, vs1, s->lmul); } @@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs1, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs2, s->lmul + 1); } static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) { - bool ret = vext_narrow_check_common(s, vd, vs, vm); + bool ret = vext_narrow_check_common(s, vd, vs, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm); if (vd != vs) { ret &= require_noover(vd, s->lmul, vs, s->lmul + 1); } @@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_sd(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs1, s->lmul); } @@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2, { bool ret = require_align(vs2, s->lmul) && require_align(vd, s->lmul) && - require_vm(vm, vd); + require_vm(vm, vd) && + vext_check_input_eew(s, -1, 0, vs2, s->sew, vm); + if (is_over) { ret &= (vd != vs2); } @@ -770,6 +832,7 @@ static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew) /* Mask destination register are always tail-agnostic */ data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s); data = FIELD_DP32(data, VDATA, VMA, s->vma); + data = FIELD_DP32(data, VDATA, VM, 1); return ldst_us_trans(a->rd, a->rs1, data, fn, s, false); } @@ -787,6 +850,7 @@ static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew) /* EMUL = 1, NFIELDS = 1 */ data = FIELD_DP32(data, VDATA, LMUL, 0); data = FIELD_DP32(data, VDATA, NF, 1); + data = FIELD_DP32(data, VDATA, VM, 1); return ldst_us_trans(a->rd, a->rs1, data, fn, s, true); } @@ -979,7 +1043,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew); + vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) && + vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check) @@ -1031,7 +1096,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_st_index(s, a->rd, a->rs2, a->nf, eew); + vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) && + vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check) @@ -1061,6 +1127,12 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, tcg_env, desc); finalize_rvv_inst(s); + + /* vector unit-stride fault-only-first load may modify vl CSR */ + gen_update_pc(s, s->cur_insn_len); + lookup_and_goto_ptr(s); + s->base.is_jmp = DISAS_NORETURN; + return true; } @@ -1098,24 +1170,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, - gen_helper_ldst_whole *fn, - DisasContext *s) + uint32_t log2_esz, gen_helper_ldst_whole *fn, + DisasContext *s, bool is_load) { - TCGv_ptr dest; - TCGv base; - TCGv_i32 desc; - - uint32_t data = FIELD_DP32(0, VDATA, NF, nf); - dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, - s->cfg_ptr->vlenb, data)); - - base = get_gpr(s, rs1, EXT_NONE); - tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - mark_vs_dirty(s); - fn(dest, base, tcg_env, desc); + /* + * Load/store multiple bytes per iteration. + * When possible do this atomically. + * Update vstart with the number of processed elements. + * Use the helper function if either: + * - vstart is not 0. + * - the target has 32 bit registers and we are loading/storing 64 bit long + * elements. This is to ensure that we process every element with a single + * memory instruction. + */ + + bool use_helper_fn = !(s->vstart_eq_zero) || + (TCG_TARGET_REG_BITS == 32 && log2_esz == 3); + + if (!use_helper_fn) { + TCGv addr = tcg_temp_new(); + uint32_t size = s->cfg_ptr->vlenb * nf; + TCGv_i64 t8 = tcg_temp_new_i64(); + TCGv_i32 t4 = tcg_temp_new_i32(); + MemOp atomicity = MO_ATOM_NONE; + if (log2_esz == 0) { + atomicity = MO_ATOM_NONE; + } else { + atomicity = MO_ATOM_IFALIGN_PAIR; + } + if (TCG_TARGET_REG_BITS == 64) { + for (int i = 0; i < size; i += 8) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + } + if (i == size - 8) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz); + } + } + } else { + for (int i = 0; i < size; i += 4) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + } + if (i == size - 4) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz); + } + } + } + } else { + TCGv_ptr dest; + TCGv base; + TCGv_i32 desc; + uint32_t data = FIELD_DP32(0, VDATA, NF, nf); + data = FIELD_DP32(data, VDATA, VM, 1); + dest = tcg_temp_new_ptr(); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); + base = get_gpr(s, rs1, EXT_NONE); + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); + fn(dest, base, tcg_env, desc); + } finalize_rvv_inst(s); return true; @@ -1125,42 +1259,42 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, * load and store whole register instructions ignore vtype and vl setting. * Thus, we don't need to check vill bit. (Section 7.9) */ -#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \ -static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ -{ \ - if (require_rvv(s) && \ - QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ - return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \ - gen_helper_##NAME, s); \ - } \ - return false; \ -} - -GEN_LDST_WHOLE_TRANS(vl1re8_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re16_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re32_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re64_v, 1) -GEN_LDST_WHOLE_TRANS(vl2re8_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re16_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re32_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re64_v, 2) -GEN_LDST_WHOLE_TRANS(vl4re8_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re16_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re32_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re64_v, 4) -GEN_LDST_WHOLE_TRANS(vl8re8_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re16_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re32_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re64_v, 8) +#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \ +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ +{ \ + if (require_rvv(s) && \ + QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ + return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \ + gen_helper_##NAME, s, IS_LOAD); \ + } \ + return false; \ +} + +GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true) /* * The vector whole register store instructions are encoded similar to * unmasked unit-stride store of elements with EEW=8. */ -GEN_LDST_WHOLE_TRANS(vs1r_v, 1) -GEN_LDST_WHOLE_TRANS(vs2r_v, 2) -GEN_LDST_WHOLE_TRANS(vs4r_v, 4) -GEN_LDST_WHOLE_TRANS(vs8r_v, 8) +GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false) +GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false) +GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false) +GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false) /* *** Vector Integer Arithmetic Instructions @@ -1472,6 +1606,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +/* OPIVV with overwrite and WIDEN */ +static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, gen_helper_gvec_4_ptr *fn, bool (*checkfn)(DisasContext *, arg_rmrr *)) @@ -1519,6 +1663,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + #define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ @@ -1990,13 +2142,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check) GEN_OPIVX_TRANS(vnmsub_vx, opivx_check) /* Vector Widening Integer Multiply-Add Instructions */ -GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check) /* Vector Integer Merge and Move Instructions */ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) @@ -2337,6 +2489,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVV with WIDEN */ #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ @@ -2376,11 +2539,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVF with WIDEN */ -#define GEN_OPFVF_WIDEN_TRANS(NAME) \ +#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opfvf_widen_check(s, a)) { \ + if (CHECK(s, a)) { \ uint32_t data = 0; \ static gen_helper_opfvf *const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ @@ -2396,8 +2569,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ return false; \ } -GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) -GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) +GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check) static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) { @@ -2479,7 +2652,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check) /* Vector Widening Floating-Point Multiply */ GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmul_vf) +GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check) /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check) @@ -2500,14 +2673,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check) GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check) /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ -GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) +GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check) /* Vector Floating-Point Square-Root Instruction */ @@ -3172,7 +3345,6 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base, break; default: g_assert_not_reached(); - break; } } @@ -3257,7 +3429,6 @@ static void store_element(TCGv_i64 val, TCGv_ptr base, break; default: g_assert_not_reached(); - break; } } @@ -3425,6 +3596,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs1, s->lmul) && require_align(a->rs2, s->lmul) && @@ -3437,6 +3609,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a) int8_t emul = MO_16 - s->sew + s->lmul; return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) && (emul >= -3 && emul <= 3) && require_align(a->rd, s->lmul) && require_align(a->rs1, emul) && @@ -3456,6 +3629,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul) && (a->rd != a->rs2) && @@ -3599,7 +3773,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div) require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul - div) && require_vm(a->vm, a->rd) && - require_noover(a->rd, s->lmul, a->rs2, s->lmul - div); + require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) && + vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm); + return ret; } diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc index ae1f40174a1..27bf3f0b684 100644 --- a/target/riscv/insn_trans/trans_rvvk.c.inc +++ b/target/riscv/insn_trans/trans_rvvk.c.inc @@ -249,7 +249,7 @@ GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -322,7 +322,7 @@ GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -389,7 +389,7 @@ GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS) \ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \ /* save opcode for unwinding in case we throw an exception */ \ - decode_save_opc(s); \ + decode_save_opc(s, 0); \ egs = tcg_constant_i32(EGS); \ gen_helper_egs_check(egs, tcg_env); \ } \ @@ -440,7 +440,7 @@ static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a) if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { /* save opcode for unwinding in case we throw an exception */ - decode_save_opc(s); + decode_save_opc(s, 0); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); } @@ -471,7 +471,7 @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a) if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { /* save opcode for unwinding in case we throw an exception */ - decode_save_opc(s); + decode_save_opc(s, 0); egs = tcg_constant_i32(ZVKNH_EGS); gen_helper_egs_check(egs, tcg_env); } diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc index fcced99fc74..15e688a0331 100644 --- a/target/riscv/insn_trans/trans_rvzacas.c.inc +++ b/target/riscv/insn_trans/trans_rvzacas.c.inc @@ -76,7 +76,7 @@ static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGv src1 = get_address(ctx, a->rs1, 0); TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop); gen_set_gpr_pair(ctx, a->rd, dest); @@ -121,7 +121,7 @@ static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a) tcg_gen_concat_i64_i128(src2, src2l, src2h); tcg_gen_concat_i64_i128(dest, destl, desth); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx, (MO_ALIGN | MO_TEUO)); diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc index cd234ad9607..c77c2b927b0 100644 --- a/target/riscv/insn_trans/trans_rvzce.c.inc +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -203,6 +203,14 @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val) if (ret) { TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN); +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN); + TCGv src = tcg_temp_new(); + gen_pc_plus_diff(src, ctx, 0); + gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type); + } +#endif tcg_gen_mov_tl(cpu_pc, ret_addr); tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; @@ -309,6 +317,19 @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a) gen_set_gpr(ctx, xRA, succ_pc); } +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + if (a->index >= 32) { + TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL); + gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type); + } else { + TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP); + gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type); + } + } +#endif + + tcg_gen_mov_tl(cpu_pc, addr); tcg_gen_lookup_and_goto_ptr(); diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 1eb458b491d..bece48e6009 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -48,7 +48,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a) REQUIRE_FPU; REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { TCGv temp = tcg_temp_new(); @@ -71,7 +71,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a) REQUIRE_FPU; REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx); - decode_save_opc(ctx); + decode_save_opc(ctx, 0); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { TCGv temp = tcg_temp_new(); diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc new file mode 100644 index 00000000000..b0096adcd0e --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc @@ -0,0 +1,131 @@ +/* + * RISC-V translation routines for the Control-Flow Integrity Extension + * + * Copyright (c) 2024 Rivos Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZICFISS(ctx) do { \ + if (!ctx->cfg_ptr->ext_zicfiss) { \ + return false; \ + } \ +} while (0) + +static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a) +{ + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv addr = tcg_temp_new(); + TCGLabel *skip = gen_new_label(); + uint32_t tmp = (get_xl(ctx) == MXL_RV64) ? 8 : 4; + TCGv data = tcg_temp_new(); + tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + tcg_gen_qemu_ld_tl(data, addr, SS_MMU_INDEX(ctx), + mxl_memop(ctx) | MO_ALIGN); + TCGv rs1 = get_gpr(ctx, a->rs1, EXT_NONE); + tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + gen_set_label(skip); + tcg_gen_addi_tl(addr, addr, tmp); + tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + + return true; +} + +static bool trans_sspush(DisasContext *ctx, arg_sspush *a) +{ + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv addr = tcg_temp_new(); + int tmp = (get_xl(ctx) == MXL_RV64) ? -8 : -4; + TCGv data = get_gpr(ctx, a->rs2, EXT_NONE); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + tcg_gen_ld_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + tcg_gen_addi_tl(addr, addr, tmp); + tcg_gen_qemu_st_tl(data, addr, SS_MMU_INDEX(ctx), + mxl_memop(ctx) | MO_ALIGN); + tcg_gen_st_tl(addr, tcg_env, offsetof(CPURISCVState, ssp)); + + return true; +} + +static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a) +{ + if (!ctx->bcfi_enabled || a->rd == 0) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + tcg_gen_ld_tl(dest, tcg_env, offsetof(CPURISCVState, ssp)); + gen_set_gpr(ctx, a->rd, dest); + + return true; +} + +static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a) +{ + REQUIRE_A_OR_ZAAMO(ctx); + REQUIRE_ZICFISS(ctx); + if (ctx->priv == PRV_M) { + generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT); + } + + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + src1 = get_address(ctx, a->rs1, 0); + + tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), + (MO_ALIGN | MO_TESL)); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_A_OR_ZAAMO(ctx); + REQUIRE_ZICFISS(ctx); + if (ctx->priv == PRV_M) { + generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT); + } + + if (!ctx->bcfi_enabled) { + return false; + } + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); + src1 = get_address(ctx, a->rs1, 0); + + tcg_gen_atomic_xchg_tl(dest, src1, src2, SS_MMU_INDEX(ctx), + (MO_ALIGN | MO_TESQ)); + gen_set_gpr(ctx, a->rd, dest); + return true; +} diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc index 0f692a10885..a06c3b214f1 100644 --- a/target/riscv/insn_trans/trans_svinval.c.inc +++ b/target/riscv/insn_trans/trans_svinval.c.inc @@ -28,7 +28,7 @@ static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a) /* Do the same as sfence.vma currently */ REQUIRE_EXT(ctx, RVS); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_tlb_flush(tcg_env); return true; #endif @@ -57,7 +57,7 @@ static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a) /* Do the same as hfence.vvma currently */ REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_tlb_flush(tcg_env); return true; #endif @@ -70,7 +70,7 @@ static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a) /* Do the same as hfence.gvma currently */ REQUIRE_EXT(ctx, RVH); #ifndef CONFIG_USER_ONLY - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_hyp_gvma_tlb_flush(tcg_env); return true; #endif diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 0ac17bc5adb..172296f12e2 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -19,7 +19,10 @@ #ifndef RISCV_CPU_INTERNALS_H #define RISCV_CPU_INTERNALS_H +#include "exec/cpu-common.h" #include "hw/registerfields.h" +#include "fpu/softfloat-types.h" +#include "target/riscv/cpu_bits.h" /* * The current MMU Modes are: @@ -30,12 +33,15 @@ * - U+2STAGE 0b100 * - S+2STAGE 0b101 * - S+SUM+2STAGE 0b110 + * - Shadow stack+U 0b1000 + * - Shadow stack+S 0b1001 */ #define MMUIdx_U 0 #define MMUIdx_S 1 #define MMUIdx_S_SUM 2 #define MMUIdx_M 3 #define MMU_2STAGE_BIT (1 << 2) +#define MMU_IDX_SS_WRITE (1 << 3) static inline int mmuidx_priv(int mmu_idx) { @@ -136,7 +142,95 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) } } -/* Our implementation of CPUClass::has_work */ +static inline float16 check_nanbox_bf16(CPURISCVState *env, uint64_t f) +{ + /* Disable nanbox check when enable zfinx */ + if (env_archcpu(env)->cfg.ext_zfinx) { + return (uint16_t)f; + } + + uint64_t mask = MAKE_64BIT_MASK(16, 48); + + if (likely((f & mask) == mask)) { + return (uint16_t)f; + } else { + return 0x7FC0u; /* default qnan */ + } +} + +static inline target_ulong get_xepc_mask(CPURISCVState *env) +{ + /* When IALIGN=32, both low bits must be zero. + * When IALIGN=16 (has C extension), only bit 0 must be zero. */ + if (riscv_has_ext(env, RVC)) { + return ~(target_ulong)1; + } else { + return ~(target_ulong)3; + } +} + +#ifndef CONFIG_USER_ONLY +/* Our implementation of SysemuCPUOps::has_work */ bool riscv_cpu_has_work(CPUState *cs); +#endif + +/* Zjpm addr masking routine */ +static inline target_ulong adjust_addr_body(CPURISCVState *env, + target_ulong addr, + bool is_virt_addr) +{ + RISCVPmPmm pmm = PMM_FIELD_DISABLED; + uint32_t pmlen = 0; + bool signext = false; + + /* do nothing for rv32 mode */ + if (riscv_cpu_mxl(env) == MXL_RV32) { + return addr; + } + + /* get pmm field depending on whether addr is */ + if (is_virt_addr) { + pmm = riscv_pm_get_virt_pmm(env); + } else { + pmm = riscv_pm_get_pmm(env); + } + + /* if pointer masking is disabled, return original addr */ + if (pmm == PMM_FIELD_DISABLED) { + return addr; + } + + if (!is_virt_addr) { + signext = riscv_cpu_virt_mem_enabled(env); + } + addr = addr << pmlen; + pmlen = riscv_pm_get_pmlen(pmm); + + /* sign/zero extend masked address by N-1 bit */ + if (signext) { + addr = (target_long)addr >> pmlen; + } else { + addr = addr >> pmlen; + } + + return addr; +} + +static inline target_ulong adjust_addr(CPURISCVState *env, + target_ulong addr) +{ + return adjust_addr_body(env, addr, false); +} + +static inline target_ulong adjust_addr_virt(CPURISCVState *env, + target_ulong addr) +{ + return adjust_addr_body(env, addr, true); +} + +static inline int insn_len(uint16_t first_word) +{ + return (first_word & 3) == 3 ? 4 : 2; +} #endif diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index f6e3156b8d2..5c19062c19b 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -27,15 +27,15 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qapi/visitor.h" -#include "sysemu/sysemu.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" +#include "system/system.h" +#include "system/kvm.h" +#include "system/kvm_int.h" #include "cpu.h" #include "trace.h" -#include "hw/core/accel-cpu.h" +#include "accel/accel-cpu-target.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/irq.h" #include "hw/intc/riscv_imsic.h" @@ -45,7 +45,7 @@ #include "sbi_ecall_interface.h" #include "chardev/char-fe.h" #include "migration/misc.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #include "hw/riscv/numa.h" #define PR_RISCV_V_SET_CONTROL 69 @@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level) static bool cap_has_mp_state; -static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, - uint64_t idx) -{ - uint64_t id = KVM_REG_RISCV | type | idx; +#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \ + type | idx) - switch (riscv_cpu_mxl(env)) { - case MXL_RV32: - id |= KVM_REG_SIZE_U32; - break; - case MXL_RV64: - id |= KVM_REG_SIZE_U64; - break; - default: - g_assert_not_reached(); - } - return id; -} +#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \ + type | idx) -static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; -} - -static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; -} +#if defined(TARGET_RISCV64) +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx) +#else +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx) +#endif static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b) { @@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu, return kvm_encode_reg_size_id(id, size_b); } -#define RISCV_CORE_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ +#define RISCV_CORE_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \ KVM_REG_RISCV_CORE_REG(name)) -#define RISCV_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ +#define RISCV_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \ KVM_REG_RISCV_CSR_REG(name)) -#define RISCV_CONFIG_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ +#define RISCV_CONFIG_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \ KVM_REG_RISCV_CONFIG_REG(name)) -#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ +#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \ KVM_REG_RISCV_TIMER_REG(name)) -#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) +#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx) -#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) +#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx) -#define RISCV_VECTOR_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \ +#define RISCV_VECTOR_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \ KVM_REG_RISCV_VECTOR_CSR_REG(name)) -#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - -#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - #define KVM_RISCV_GET_TIMER(cs, name, reg) \ do { \ int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ @@ -167,6 +135,7 @@ typedef struct KVMCPUConfig { const char *description; target_ulong offset; uint64_t kvm_reg_id; + uint32_t prop_size; bool user_set; bool supported; } KVMCPUConfig; @@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) /* If we're here we're going to disable the MISA bit */ reg = 0; - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, misa_cfg->kvm_reg_id); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { @@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) } } +#define KVM_CSR_CFG(_name, _env_prop, reg_id) \ + {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \ + .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \ + .kvm_reg_id = reg_id} + +static KVMCPUConfig kvm_csr_cfgs[] = { + KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)), + KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)), + KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)), + KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)), + KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)), + KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)), + KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)), + KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)), + KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)), + KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)), + KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)), +}; + +static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + return (void *)&cpu->env + csr_cfg->offset; +} + +static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val32; +} + +static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val64; +} + +static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint32_t val) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val32 = val; +} + +static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint64_t val) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val64 = val; +} + #define KVM_EXT_CFG(_name, _prop, _reg_id) \ {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ .kvm_reg_id = _reg_id} @@ -274,6 +293,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM), KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ), + KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE), KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR), KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND), KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR), @@ -281,7 +301,11 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL), KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE), KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM), + KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP), + KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP), + KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA), KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS), + KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS), KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA), KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH), KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN), @@ -292,6 +316,10 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC), KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX), KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS), + KVM_EXT_CFG("zca", ext_zca, KVM_RISCV_ISA_EXT_ZCA), + KVM_EXT_CFG("zcb", ext_zcb, KVM_RISCV_ISA_EXT_ZCB), + KVM_EXT_CFG("zcd", ext_zcd, KVM_RISCV_ISA_EXT_ZCD), + KVM_EXT_CFG("zcf", ext_zcf, KVM_RISCV_ISA_EXT_ZCF), KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND), KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE), KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH), @@ -312,12 +340,18 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED), KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH), KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT), + KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM), KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN), KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA), + KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF), + KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM), KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC), + KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE), + KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU), KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL), KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT), KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT), + KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC), }; static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg) @@ -419,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = { static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; uint64_t id, reg; int i, ret; @@ -430,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) continue; } - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); ret = kvm_set_one_reg(cs, id, ®); @@ -555,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs) target_ulong reg; CPURISCVState *env = &RISCV_CPU(cs)->env; - ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } env->pc = reg; for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); ret = kvm_get_one_reg(cs, id, ®); if (ret) { return ret; @@ -581,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs) CPURISCVState *env = &RISCV_CPU(cs)->env; reg = env->pc; - ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); reg = env->gpr[i]; ret = kvm_set_one_reg(cs, id, ®); if (ret) { @@ -600,38 +633,79 @@ static int kvm_riscv_put_regs_core(CPUState *cs) static int kvm_riscv_get_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; - KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_GET_CSR(cs, env, sie, env->mie); - KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_GET_CSR(cs, env, scause, env->scause); - KVM_RISCV_GET_CSR(cs, env, stval, env->stval); - KVM_RISCV_GET_CSR(cs, env, sip, env->mip); - KVM_RISCV_GET_CSR(cs, env, satp, env->satp); + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + if (!csr_cfg->supported) { + continue; + } + + ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } + + if (csr_cfg->prop_size == sizeof(uint32_t)) { + kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + kvm_cpu_csr_set_u64(cpu, csr_cfg, reg); + } else { + g_assert_not_reached(); + } + } return 0; } static int kvm_riscv_put_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; - KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_SET_CSR(cs, env, sie, env->mie); - KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_SET_CSR(cs, env, scause, env->scause); - KVM_RISCV_SET_CSR(cs, env, stval, env->stval); - KVM_RISCV_SET_CSR(cs, env, sip, env->mip); - KVM_RISCV_SET_CSR(cs, env, satp, env->satp); + if (!csr_cfg->supported) { + continue; + } + + if (csr_cfg->prop_size == sizeof(uint32_t)) { + reg = kvm_cpu_csr_get_u32(cpu, csr_cfg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + reg = kvm_cpu_csr_get_u64(cpu, csr_cfg); + } else { + g_assert_not_reached(); + } + + ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } + } return 0; } +static void kvm_riscv_reset_regs_csr(CPURISCVState *env) +{ + env->mstatus = 0; + env->mie = 0; + env->stvec = 0; + env->sscratch = 0; + env->sepc = 0; + env->scause = 0; + env->stval = 0; + env->mip = 0; + env->satp = 0; + env->scounteren = 0; + env->senvcfg = 0; +} + static int kvm_riscv_get_regs_fp(CPUState *cs) { int ret = 0; @@ -751,11 +825,11 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) env->kvm_timer_dirty = false; } -uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs) +uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu) { uint64_t reg; - KVM_RISCV_GET_TIMER(cs, frequency, reg); + KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg); return reg; } @@ -772,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs) return 0; } - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } env->vstart = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } env->vl = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } env->vtype = reg; if (kvm_v_vlenb.supported) { - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); if (ret) { return ret; } @@ -829,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs) } reg = env->vstart; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } reg = env->vl; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } reg = env->vtype; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } if (kvm_v_vlenb.supported) { reg = cpu->cfg.vlenb; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); for (int i = 0; i < 32; i++) { /* @@ -925,27 +999,39 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch) close(scratch->kvmfd); } +static void kvm_riscv_init_max_satp_mode(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +{ + struct kvm_one_reg reg; + int ret; + + reg.id = RISCV_CONFIG_REG(satp_mode); + reg.addr = (uint64_t)&cpu->cfg.max_satp_mode; + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_report("Unable to retrieve satp mode from host, error %d", ret); + } +} + static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, mvendorid); + reg.id = RISCV_CONFIG_REG(mvendorid); reg.addr = (uint64_t)&cpu->cfg.mvendorid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve mvendorid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, marchid); + reg.id = RISCV_CONFIG_REG(marchid); reg.addr = (uint64_t)&cpu->cfg.marchid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve marchid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, mimpid); + reg.id = RISCV_CONFIG_REG(mimpid); reg.addr = (uint64_t)&cpu->cfg.mimpid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -960,7 +1046,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, isa); + reg.id = RISCV_CONFIG_REG(isa); reg.addr = (uint64_t)&env->misa_ext_mask; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -977,11 +1063,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, KVMCPUConfig *cbomz_cfg) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, cbomz_cfg->kvm_reg_id); reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -995,7 +1080,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; uint64_t val; int i, ret; @@ -1003,7 +1087,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; struct kvm_one_reg reg; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1033,6 +1117,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, } } +static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu) +{ + uint64_t val; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + struct kvm_one_reg reg; + + reg.id = csr_cfg->kvm_reg_id; + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + if (errno == EINVAL) { + csr_cfg->supported = false; + } else { + error_report("Unable to read KVM CSR %s: %s", + csr_cfg->name, strerror(errno)); + exit(EXIT_FAILURE); + } + } else { + csr_cfg->supported = true; + } + } +} + static int uint64_cmp(const void *a, const void *b) { uint64_t val1 = *(const uint64_t *)a; @@ -1050,7 +1160,6 @@ static int uint64_cmp(const void *a, const void *b) } static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu, - KVMScratchCPU *kvmcpu, struct kvm_reg_list *reglist) { struct kvm_reg_list *reg_search; @@ -1090,12 +1199,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, } } -static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist) { + struct kvm_reg_list *reg_search; + uint64_t reg_id; + + for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + reg_id = csr_cfg->kvm_reg_id; + reg_search = bsearch(®_id, reglist->reg, reglist->n, + sizeof(uint64_t), uint64_cmp); + if (!reg_search) { + continue; + } + + csr_cfg->supported = true; + } +} + +static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +{ + g_autofree struct kvm_reg_list *reglist = NULL; KVMCPUConfig *multi_ext_cfg; struct kvm_one_reg reg; struct kvm_reg_list rl_struct; - struct kvm_reg_list *reglist; uint64_t val, reg_id, *reg_search; int i, ret; @@ -1107,7 +1235,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) * (EINVAL). Use read_legacy() in this case. */ if (errno == EINVAL) { - return kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_csr_cfg_legacy(kvmcpu); + return; } else if (errno != E2BIG) { /* * E2BIG is an expected error message for the API since we @@ -1136,7 +1266,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { multi_ext_cfg = &kvm_multi_ext_cfgs[i]; - reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, + reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg_search = bsearch(®_id, reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp); @@ -1169,7 +1299,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) kvm_riscv_read_vlenb(cpu, kvmcpu, reglist); } - kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist); + kvm_riscv_check_sbi_dbcn_support(cpu, reglist); + kvm_riscv_read_csr_cfg(reglist); } static void riscv_init_kvm_registers(Object *cpu_obj) @@ -1183,7 +1314,8 @@ static void riscv_init_kvm_registers(Object *cpu_obj) kvm_riscv_init_machine_ids(cpu, &kvmcpu); kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu); - kvm_riscv_init_multiext_cfg(cpu, &kvmcpu); + kvm_riscv_init_cfg(cpu, &kvmcpu); + kvm_riscv_init_max_satp_mode(cpu, &kvmcpu); kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } @@ -1192,7 +1324,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO }; -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { int ret = 0; @@ -1237,7 +1369,7 @@ int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { int ret = 0; @@ -1315,12 +1447,11 @@ void kvm_arch_init_irq_routing(KVMState *s) static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; target_ulong reg; uint64_t id; int ret; - id = RISCV_CONFIG_REG(env, mvendorid); + id = RISCV_CONFIG_REG(mvendorid); /* * cfg.mvendorid is an uint32 but a target_ulong will * be written. Assign it to a target_ulong var to avoid @@ -1332,13 +1463,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) return ret; } - id = RISCV_CONFIG_REG(env, marchid); + id = RISCV_CONFIG_REG(marchid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); if (ret != 0) { return ret; } - id = RISCV_CONFIG_REG(env, mimpid); + id = RISCV_CONFIG_REG(mimpid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); return ret; @@ -1355,6 +1486,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs) return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®); } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { int ret = 0; @@ -1401,11 +1537,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s) int kvm_arch_irqchip_create(KVMState *s) { - if (kvm_kernel_irqchip_split()) { - error_report("-machine kernel_irqchip=split is not supported on RISC-V."); - exit(1); - } - /* * We can create the VAIA using the newer device control API. */ @@ -1488,7 +1619,7 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) break; case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: ch = run->riscv_sbi.args[0]; - ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch)); + ret = qemu_chr_fe_write_all(serial_hd(0)->be, &ch, sizeof(ch)); if (ret < 0) { error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when " @@ -1601,23 +1732,14 @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu) CPURISCVState *env = &cpu->env; int i; - if (!kvm_enabled()) { - return; - } for (i = 0; i < 32; i++) { env->gpr[i] = 0; } env->pc = cpu->env.kernel_addr; env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */ env->gpr[11] = cpu->env.fdt_addr; /* a1 */ - env->satp = 0; - env->mie = 0; - env->stvec = 0; - env->sscratch = 0; - env->sepc = 0; - env->scause = 0; - env->stval = 0; - env->mip = 0; + + kvm_riscv_reset_regs_csr(env); } void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) @@ -1676,9 +1798,9 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia, riscv_set_kvm_aia); object_class_property_set_description(oc, "riscv-aia", - "Set KVM AIA mode. Valid values are " - "emul, hwaccel, and auto. Default " - "is auto."); + "Set KVM AIA mode. Valid values are 'emul', 'hwaccel' and 'auto'. " + "Changing KVM AIA modes relies on host support. Defaults to 'auto' " + "if the host supports it"); object_property_set_default_str(object_class_property_find(oc, "riscv-aia"), "auto"); } @@ -1695,6 +1817,7 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, uint64_t max_hart_per_socket = 0; uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr; uint64_t socket_bits, hart_bits, guest_bits; + uint64_t max_group_id; aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false); @@ -1710,28 +1833,46 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, error_report("KVM AIA: failed to get current KVM AIA mode"); exit(1); } - qemu_log("KVM AIA: default mode is %s\n", - kvm_aia_mode_str(default_aia_mode)); if (default_aia_mode != aia_mode) { ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, KVM_DEV_RISCV_AIA_CONFIG_MODE, &aia_mode, true, NULL); - if (ret < 0) - warn_report("KVM AIA: failed to set KVM AIA mode"); - else - qemu_log("KVM AIA: set current mode to %s\n", - kvm_aia_mode_str(aia_mode)); - } + if (ret < 0) { + warn_report("KVM AIA: failed to set KVM AIA mode '%s', using " + "default host mode '%s'", + kvm_aia_mode_str(aia_mode), + kvm_aia_mode_str(default_aia_mode)); - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, - KVM_DEV_RISCV_AIA_CONFIG_SRCS, - &aia_irq_num, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set number of input irq lines"); - exit(1); + /* failed to change AIA mode, use default */ + aia_mode = default_aia_mode; + } } + /* + * Skip APLIC creation in KVM if we're running split mode. + * This is done by leaving KVM_DEV_RISCV_AIA_CONFIG_SRCS + * unset. We can also skip KVM_DEV_RISCV_AIA_ADDR_APLIC + * since KVM won't be using it. + */ + if (!kvm_kernel_irqchip_split()) { + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, + KVM_DEV_RISCV_AIA_CONFIG_SRCS, + &aia_irq_num, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set number of input irq lines"); + exit(1); + } + + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR, + KVM_DEV_RISCV_AIA_ADDR_APLIC, + &aplic_base, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set the base address of APLIC"); + exit(1); + } + } + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, KVM_DEV_RISCV_AIA_CONFIG_IDS, &aia_msi_num, true, NULL); @@ -1742,7 +1883,8 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, if (socket_count > 1) { - socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; + max_group_id = socket_count - 1; + socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1; ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, &socket_bits, true, NULL); @@ -1770,14 +1912,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, exit(1); } - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR, - KVM_DEV_RISCV_AIA_ADDR_APLIC, - &aplic_base, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set the base address of APLIC"); - exit(1); - } - for (socket = 0; socket < socket_count; socket++) { socket_imsic_base = imsic_base + socket * (1U << group_shift); hart_count = riscv_socket_hart_count(machine, socket); @@ -1865,7 +1999,7 @@ static bool kvm_cpu_realize(CPUState *cs, Error **errp) } } - return true; + return true; } void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) @@ -1890,7 +2024,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicbom && riscv_cpu_option_set(kvm_cbom_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cbom_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); @@ -1909,7 +2043,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicboz && riscv_cpu_option_set(kvm_cboz_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cboz_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); @@ -1950,7 +2084,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } -static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) +static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); @@ -1971,22 +2105,25 @@ static void kvm_cpu_accel_register_types(void) } type_init(kvm_cpu_accel_register_types); -static void riscv_host_cpu_class_init(ObjectClass *c, void *data) -{ - RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); - -#if defined(TARGET_RISCV32) - mcc->misa_mxl_max = MXL_RV32; -#elif defined(TARGET_RISCV64) - mcc->misa_mxl_max = MXL_RV64; -#endif -} - static const TypeInfo riscv_kvm_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU_HOST, .parent = TYPE_RISCV_CPU, - .class_init = riscv_host_cpu_class_init, +#if defined(TARGET_RISCV32) + .class_data = &(const RISCVCPUDef) { + .misa_mxl_max = MXL_RV32, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, + .cfg.max_satp_mode = -1, + }, +#elif defined(TARGET_RISCV64) + .class_data = &(const RISCVCPUDef) { + .misa_mxl_max = MXL_RV64, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, + .cfg.max_satp_mode = -1, + }, +#endif } }; diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h index 58518988681..b2bcd1041f6 100644 --- a/target/riscv/kvm/kvm_riscv.h +++ b/target/riscv/kvm/kvm_riscv.h @@ -19,6 +19,8 @@ #ifndef QEMU_KVM_RISCV_H #define QEMU_KVM_RISCV_H +#include "target/riscv/cpu-qom.h" + void kvm_riscv_reset_vcpu(RISCVCPU *cpu); void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level); void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, @@ -28,6 +30,6 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, void riscv_kvm_aplic_request(void *opaque, int irq, int level); int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state); void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp); -uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs); +uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu); #endif diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c index ec14aaa901a..7d9b83b1b2c 100644 --- a/target/riscv/m128_helper.c +++ b/target/riscv/m128_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" target_ulong HELPER(divu_i128)(CPURISCVState *env, diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 492c2c6d9d7..1600ec44f0b 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -19,9 +19,9 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "migration/cpu.h" -#include "sysemu/cpu-timers.h" +#include "exec/icount.h" #include "debug.h" static bool pmp_needed(void *opaque) @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id) RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { pmp_update_rule_addr(env, i); } pmp_update_rule_nums(env); @@ -152,25 +153,15 @@ static const VMStateDescription vmstate_vector = { static bool pointermasking_needed(void *opaque) { - RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - - return riscv_has_ext(env, RVJ); + return false; } static const VMStateDescription vmstate_pointermasking = { .name = "cpu/pointer_masking", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .needed = pointermasking_needed, .fields = (const VMStateField[]) { - VMSTATE_UINTTL(env.mmte, RISCVCPU), - VMSTATE_UINTTL(env.mpmmask, RISCVCPU), - VMSTATE_UINTTL(env.mpmbase, RISCVCPU), - VMSTATE_UINTTL(env.spmmask, RISCVCPU), - VMSTATE_UINTTL(env.spmbase, RISCVCPU), - VMSTATE_UINTTL(env.upmmask, RISCVCPU), - VMSTATE_UINTTL(env.upmbase, RISCVCPU), VMSTATE_END_OF_LIST() } @@ -180,7 +171,7 @@ static bool rv128_needed(void *opaque) { RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque); - return mcc->misa_mxl_max == MXL_RV128; + return mcc->def->misa_mxl_max == MXL_RV128; } static const VMStateDescription vmstate_rv128 = { @@ -266,7 +257,6 @@ static int riscv_cpu_post_load(void *opaque, int version_id) CPURISCVState *env = &cpu->env; env->xl = cpu_recompute_xl(env); - riscv_cpu_update_mask(env); return 0; } @@ -311,6 +301,30 @@ static const VMStateDescription vmstate_envcfg = { } }; +static bool ctr_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr; +} + +static const VMStateDescription vmstate_ctr = { + .name = "cpu/ctr", + .version_id = 1, + .minimum_version_id = 1, + .needed = ctr_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(env.mctrctl, RISCVCPU), + VMSTATE_UINT32(env.sctrdepth, RISCVCPU), + VMSTATE_UINT32(env.sctrstatus, RISCVCPU), + VMSTATE_UINT64(env.vsctrctl, RISCVCPU), + VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX), + VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX), + VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX), + VMSTATE_END_OF_LIST() + } +}; + static bool pmu_needed(void *opaque) { RISCVCPU *cpu = opaque; @@ -350,6 +364,42 @@ static const VMStateDescription vmstate_jvt = { } }; +static bool elp_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_zicfilp; +} + +static const VMStateDescription vmstate_elp = { + .name = "cpu/elp", + .version_id = 1, + .minimum_version_id = 1, + .needed = elp_needed, + .fields = (const VMStateField[]) { + VMSTATE_BOOL(env.elp, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool ssp_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + + return cpu->cfg.ext_zicfiss; +} + +static const VMStateDescription vmstate_ssp = { + .name = "cpu/ssp", + .version_id = 1, + .minimum_version_id = 1, + .needed = ssp_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINTTL(env.ssp, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", .version_id = 10, @@ -398,6 +448,7 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.siselect, RISCVCPU), VMSTATE_UINT32(env.scounteren, RISCVCPU), VMSTATE_UINT32(env.mcounteren, RISCVCPU), + VMSTATE_UINT32(env.scountinhibit, RISCVCPU), VMSTATE_UINT32(env.mcountinhibit, RISCVCPU), VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0, vmstate_pmu_ctr_state, PMUCTRState), @@ -422,6 +473,9 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_debug, &vmstate_smstateen, &vmstate_jvt, + &vmstate_elp, + &vmstate_ssp, + &vmstate_ctr, NULL } }; diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index f5b1ffe6c3e..100005ea4e9 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -184,7 +184,6 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env) break; default: g_assert_not_reached(); - break; } /* calculate virtual address bits */ diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 25a5263573b..110292e84da 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -21,15 +21,24 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/helper-proto.h" +#include "exec/tlb-flags.h" +#include "trace.h" /* Exceptions processing helpers */ G_NORETURN void riscv_raise_exception(CPURISCVState *env, - uint32_t exception, uintptr_t pc) + RISCVException exception, + uintptr_t pc) { CPUState *cs = env_cpu(env); + + trace_riscv_exception(exception, + riscv_cpu_get_trap_name(exception, false), + env->pc); + cs->exception_index = exception; cpu_loop_exit_restore(cs, pc); } @@ -62,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr) void helper_csrw(CPURISCVState *env, int csr, target_ulong src) { target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; - RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); + RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -73,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr, target_ulong src, target_ulong write_mask) { target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); + RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -99,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr, { RISCVException ret = riscv_csrrw_i128(env, csr, NULL, int128_make128(srcl, srch), - UINT128_MAX); + UINT128_MAX, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -107,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr, } target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, - target_ulong srcl, target_ulong srch, - target_ulong maskl, target_ulong maskh) + target_ulong srcl, target_ulong srch, + target_ulong maskl, target_ulong maskh) { Int128 rv = int128_zero(); RISCVException ret = riscv_csrrw_i128(env, csr, &rv, int128_make128(srcl, srch), - int128_make128(maskl, maskh)); + int128_make128(maskl, maskh), + GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -263,13 +273,17 @@ target_ulong helper_sret(CPURISCVState *env) { uint64_t mstatus; target_ulong prev_priv, prev_virt = env->virt_enabled; + const target_ulong src_priv = env->priv; + const bool src_virt = env->virt_enabled; if (!(env->priv >= PRV_S)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } - target_ulong retpc = env->sepc; - if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { + target_ulong retpc = env->sepc & get_xepc_mask(env); + if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, + env->priv_ver, + env->misa_ext) && (retpc & 0x3)) { riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); } @@ -287,6 +301,21 @@ target_ulong helper_sret(CPURISCVState *env) get_field(mstatus, MSTATUS_SPIE)); mstatus = set_field(mstatus, MSTATUS_SPIE, 1); mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U); + + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + if (riscv_has_ext(env, RVH)) { + target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) && + prev_priv == PRV_U; + /* Returning to VU from HS, vsstatus.sdt = 0 */ + if (!env->virt_enabled && prev_vu) { + env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0); + } + } + mstatus = set_field(mstatus, MSTATUS_SDT, 0); + } + if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) { + mstatus = set_field(mstatus, MSTATUS_MDT, 0); + } if (env->priv_ver >= PRIV_VERSION_1_12_0) { mstatus = set_field(mstatus, MSTATUS_MPRV, 0); } @@ -297,7 +326,6 @@ target_ulong helper_sret(CPURISCVState *env) target_ulong hstatus = env->hstatus; prev_virt = get_field(hstatus, HSTATUS_SPV); - hstatus = set_field(hstatus, HSTATUS_SPV, 0); env->hstatus = hstatus; @@ -309,27 +337,67 @@ target_ulong helper_sret(CPURISCVState *env) riscv_cpu_set_mode(env, prev_priv, prev_virt); + /* + * If forward cfi enabled for new priv, restore elp status + * and clear spelp in mstatus + */ + if (cpu_get_fcfien(env)) { + env->elp = get_field(env->mstatus, MSTATUS_SPELP); + } + env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0); + + if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) { + riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET, + src_priv, src_virt); + } + return retpc; } -target_ulong helper_mret(CPURISCVState *env) +static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc, + target_ulong prev_priv, + uintptr_t ra) { if (!(env->priv >= PRV_M)) { - riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); } - target_ulong retpc = env->mepc; - if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) { - riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); + if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, + env->priv_ver, + env->misa_ext) && (retpc & 0x3)) { + riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, ra); } - uint64_t mstatus = env->mstatus; - target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (riscv_cpu_cfg(env)->pmp && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { - riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, ra); } +} +static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus, + target_ulong prev_priv, + target_ulong prev_virt) +{ + /* If returning to U, VS or VU, sstatus.sdt = 0 */ + if (prev_priv == PRV_U || (prev_virt && + (prev_priv == PRV_S || prev_priv == PRV_U))) { + mstatus = set_field(mstatus, MSTATUS_SDT, 0); + /* If returning to VU, vsstatus.sdt = 0 */ + if (prev_virt && prev_priv == PRV_U) { + env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0); + } + } + + return mstatus; +} + +target_ulong helper_mret(CPURISCVState *env) +{ + target_ulong retpc = env->mepc & get_xepc_mask(env); + uint64_t mstatus = env->mstatus; + target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); + uintptr_t ra = GETPC(); + + check_ret_from_m_mode(env, retpc, prev_priv, ra); target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) && (prev_priv != PRV_M); @@ -339,6 +407,12 @@ target_ulong helper_mret(CPURISCVState *env) mstatus = set_field(mstatus, MSTATUS_MPP, riscv_has_ext(env, RVU) ? PRV_U : PRV_M); mstatus = set_field(mstatus, MSTATUS_MPV, 0); + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt); + } + if (riscv_cpu_cfg(env)->ext_smdbltrp) { + mstatus = set_field(mstatus, MSTATUS_MDT, 0); + } if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) { mstatus = set_field(mstatus, MSTATUS_MPRV, 0); } @@ -349,10 +423,107 @@ target_ulong helper_mret(CPURISCVState *env) } riscv_cpu_set_mode(env, prev_priv, prev_virt); + /* + * If forward cfi enabled for new priv, restore elp status + * and clear mpelp in mstatus + */ + if (cpu_get_fcfien(env)) { + env->elp = get_field(env->mstatus, MSTATUS_MPELP); + } + env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0); + + if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) { + riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET, + PRV_M, false); + } return retpc; } +target_ulong helper_mnret(CPURISCVState *env) +{ + target_ulong retpc = env->mnepc; + target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP); + target_ulong prev_virt; + uintptr_t ra = GETPC(); + + check_ret_from_m_mode(env, retpc, prev_priv, ra); + + prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) && + (prev_priv != PRV_M); + env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true); + + /* + * If MNRET changes the privilege mode to a mode + * less privileged than M, it also sets mstatus.MPRV to 0. + */ + if (prev_priv < PRV_M) { + env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false); + } + if (riscv_cpu_cfg(env)->ext_ssdbltrp) { + env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt); + } + + if (riscv_cpu_cfg(env)->ext_smdbltrp) { + if (prev_priv < PRV_M) { + env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0); + } + } + + if (riscv_has_ext(env, RVH) && prev_virt) { + riscv_cpu_swap_hypervisor_regs(env); + } + + riscv_cpu_set_mode(env, prev_priv, prev_virt); + + /* + * If forward cfi enabled for new priv, restore elp status + * and clear mnpelp in mnstatus + */ + if (cpu_get_fcfien(env)) { + env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP); + } + env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0); + + return retpc; +} + +void helper_ctr_add_entry(CPURISCVState *env, target_ulong src, + target_ulong dest, target_ulong type) +{ + riscv_ctr_add_entry(env, src, dest, (enum CTRType)type, + env->priv, env->virt_enabled); +} + +void helper_ctr_clear(CPURISCVState *env) +{ + /* + * It's safe to call smstateen_acc_ok() for umode access regardless of the + * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit + * is zero, smstateen_acc_ok() will return the correct exception code and + * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that + * scenario the U-mode check below will handle that case. + */ + RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR); + if (ret != RISCV_EXCP_NONE) { + riscv_raise_exception(env, ret, GETPC()); + } + + if (env->priv == PRV_U) { + /* + * One corner case is when sctrclr is executed from VU-mode and + * mstateen.CTR = 0, in which case we are supposed to raise + * RISCV_EXCP_ILLEGAL_INST. This case is already handled in + * smstateen_acc_ok(). + */ + uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT : + RISCV_EXCP_ILLEGAL_INST; + riscv_raise_exception(env, excep, GETPC()); + } + + riscv_ctr_clear(env); +} + void helper_wfi(CPURISCVState *env) { CPUState *cs = env_cpu(env); @@ -455,7 +626,7 @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); - return cpu_ldb_mmu(env, addr, oi, ra); + return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra); } target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr) @@ -464,7 +635,7 @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); - return cpu_ldw_mmu(env, addr, oi, ra); + return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra); } target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr) @@ -473,7 +644,7 @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); - return cpu_ldl_mmu(env, addr, oi, ra); + return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra); } target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr) @@ -482,7 +653,7 @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); - return cpu_ldq_mmu(env, addr, oi, ra); + return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra); } void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val) @@ -491,7 +662,7 @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); - cpu_stb_mmu(env, addr, val, oi, ra); + cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); } void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val) @@ -500,7 +671,7 @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx); - cpu_stw_mmu(env, addr, val, oi, ra); + cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); } void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val) @@ -509,7 +680,7 @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx); - cpu_stl_mmu(env, addr, val, oi, ra); + cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); } void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val) @@ -518,7 +689,7 @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val) int mmu_idx = check_access_hlsv(env, false, ra); MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx); - cpu_stq_mmu(env, addr, val, oi, ra); + cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra); } /* diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 9eea397e72c..72f1372a495 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -24,13 +24,23 @@ #include "qapi/error.h" #include "cpu.h" #include "trace.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, uint8_t val); static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); +/* + * Convert the PMP permissions to match the truth table in the Smepmp spec. + */ +static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg) +{ + return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) | + (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2); +} + /* * Accessor method to extract address matching type 'a field' from cfg reg */ @@ -45,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg) */ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - return 0; - } - if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { return 1; } - /* Top PMP has no 'next' to check */ - if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { + return 0; +} + +/* + * Check whether a PMP is locked for writing or not. + * (i.e. has LOCK flag and mseccfg.RLB is unset) + */ +static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index) +{ + return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env); +} + +/* + * Check whether `val` is an invalid Smepmp config value + */ +static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val) +{ + /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */ + if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) { return 0; } - return 0; + /* + * Adding a rule with executable privileges that either is M-mode-only + * or a locked Shared-Region is not possible + */ + switch (pmp_get_smepmp_operation(val)) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 12: + case 14: + case 15: + return 0; + case 9: + case 10: + case 11: + case 13: + return 1; + default: + g_assert_not_reached(); + } } /* @@ -75,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env) */ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) { - if (pmp_index < MAX_RISCV_PMPS) { + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (pmp_index < pmp_regions) { return env->pmp_state.pmp[pmp_index].cfg_reg; } @@ -89,46 +138,21 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) */ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { - if (pmp_index < MAX_RISCV_PMPS) { - bool locked = true; - - if (riscv_cpu_cfg(env)->ext_smepmp) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - locked = false; - } - - /* mseccfg.MML is not set */ - if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { - locked = false; - } + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - /* mseccfg.MML is set */ - if (MSECCFG_MML_ISSET(env)) { - /* not adding execute bit */ - if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { - locked = false; - } - /* shared region and not adding X bit */ - if ((val & PMP_LOCK) != PMP_LOCK && - (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { - locked = false; - } - } - } else { - if (!pmp_is_locked(env, pmp_index)) { - locked = false; - } + if (pmp_index < pmp_regions) { + if (env->pmp_state.pmp[pmp_index].cfg_reg == val) { + /* no change */ + return false; } - if (locked) { - qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); - } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { - /* If !mseccfg.MML then ignore writes with encoding RW=01 */ - if ((val & PMP_WRITE) && !(val & PMP_READ) && - !MSECCFG_MML_ISSET(env)) { - return false; - } + if (pmp_is_readonly(env, pmp_index)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - read only\n"); + } else if (pmp_is_invalid_smepmp_cfg(env, val)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - invalid\n"); + } else { env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); return true; @@ -187,11 +211,12 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) break; case PMP_AMATCH_TOR: - sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ - ea = (this_addr << 2) - 1u; - if (sa > ea) { + if (prev_addr >= this_addr) { sa = ea = 0u; + break; } + sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ + ea = (this_addr << 2) - 1u; break; case PMP_AMATCH_NA4: @@ -216,9 +241,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) void pmp_update_rule_nums(CPURISCVState *env) { int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; env->pmp_state.num_rules = 0; - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); if (PMP_AMATCH_OFF != a_field) { @@ -312,6 +338,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, int pmp_size = 0; hwaddr s = 0; hwaddr e = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { @@ -326,7 +353,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, */ pmp_size = -(addr | TARGET_PAGE_MASK); } else { - pmp_size = sizeof(target_ulong); + pmp_size = 2 << riscv_cpu_mxl(env); } } else { pmp_size = size; @@ -336,7 +363,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, * 1.10 draft priv spec states there is an implicit order * from low to high */ - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); @@ -352,16 +379,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - /* - * Convert the PMP permissions to match the truth table in the - * Smepmp spec. - */ - const uint8_t smepmp_operation = - ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | - (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); - if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, @@ -380,6 +397,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ + const uint8_t smepmp_operation = + pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg); + if (mode == PRV_M) { switch (smepmp_operation) { case 0: @@ -514,35 +534,39 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, { trace_pmpaddr_csr_write(env->mhartid, addr_index, val); bool is_next_cfg_tor = false; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (addr_index < pmp_regions) { + if (env->pmp_state.pmp[addr_index].addr_reg == val) { + /* no change */ + return; + } - if (addr_index < MAX_RISCV_PMPS) { /* * In TOR mode, need to check the lock bit of the next pmp * (if there is a next). */ - if (addr_index + 1 < MAX_RISCV_PMPS) { + if (addr_index + 1 < pmp_regions) { uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); - if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) { + if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - pmpcfg + 1 locked\n"); + "ignoring pmpaddr write - pmpcfg+1 read only\n"); return; } } - if (!pmp_is_locked(env, addr_index)) { - if (env->pmp_state.pmp[addr_index].addr_reg != val) { - env->pmp_state.pmp[addr_index].addr_reg = val; - pmp_update_rule_addr(env, addr_index); - if (is_next_cfg_tor) { - pmp_update_rule_addr(env, addr_index + 1); - } - tlb_flush(env_cpu(env)); + if (!pmp_is_readonly(env, addr_index)) { + env->pmp_state.pmp[addr_index].addr_reg = val; + pmp_update_rule_addr(env, addr_index); + if (is_next_cfg_tor) { + pmp_update_rule_addr(env, addr_index + 1); } + tlb_flush(env_cpu(env)); } else { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - locked\n"); + "ignoring pmpaddr write - read only\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, @@ -557,8 +581,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) { target_ulong val = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - if (addr_index < MAX_RISCV_PMPS) { + if (addr_index < pmp_regions) { val = env->pmp_state.pmp[addr_index].addr_reg; trace_pmpaddr_csr_read(env->mhartid, addr_index, val); } else { @@ -575,12 +600,20 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) void mseccfg_csr_write(CPURISCVState *env, target_ulong val) { int i; + uint64_t mask = MSECCFG_MMWP | MSECCFG_MML; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + /* Update PMM field only if the value is valid according to Zjpm v1.0 */ + if (riscv_cpu_cfg(env)->ext_smmpm && + riscv_cpu_mxl(env) == MXL_RV64 && + get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) { + mask |= MSECCFG_PMM; + } trace_mseccfg_csr_write(env->mhartid, val); /* RLB cannot be enabled if it's already 0 and if any regions are locked */ if (!MSECCFG_RLB_ISSET(env)) { - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_is_locked(env, i)) { val &= ~MSECCFG_RLB; break; @@ -590,12 +623,18 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) if (riscv_cpu_cfg(env)->ext_smepmp) { /* Sticky bits */ - val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); - if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) { + val |= (env->mseccfg & mask); + if ((val ^ env->mseccfg) & mask) { tlb_flush(env_cpu(env)); } } else { - val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB); + mask |= MSECCFG_RLB; + val &= ~(mask); + } + + /* M-mode forward cfi to be enabled if cfi extension is implemented */ + if (env_archcpu(env)->cfg.ext_zicfilp) { + val |= (val & MSECCFG_MLPE); } env->mseccfg = val; @@ -630,6 +669,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* * If PMP is not supported or there are no PMP rules, the TLB page will not @@ -640,7 +680,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) return TARGET_PAGE_SIZE; } - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { continue; } diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index f5c10ce85c9..271cf241699 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -44,7 +44,9 @@ typedef enum { MSECCFG_MMWP = 1 << 1, MSECCFG_RLB = 1 << 2, MSECCFG_USEED = 1 << 8, - MSECCFG_SSEED = 1 << 9 + MSECCFG_SSEED = 1 << 9, + MSECCFG_MLPE = 1 << 10, + MSECCFG_PMM = 3ULL << 32, } mseccfg_field_t; typedef struct { diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index 3cc0b3648ca..a68809eef32 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -22,8 +22,8 @@ #include "qemu/timer.h" #include "cpu.h" #include "pmu.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/device_tree.h" +#include "exec/icount.h" +#include "system/device_tree.h" #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */ @@ -204,6 +204,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env, } if (env->virt_enabled) { + g_assert(env->priv <= PRV_S); counter_arr = env->pmu_fixed_ctrs[1].counter_virt; snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev; } else { @@ -212,6 +213,7 @@ static void riscv_pmu_icount_update_priv(CPURISCVState *env, } if (new_virt) { + g_assert(newpriv <= PRV_S); snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev; } else { snapshot_new = env->pmu_fixed_ctrs[1].counter_prev; @@ -242,6 +244,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env, } if (env->virt_enabled) { + g_assert(env->priv <= PRV_S); counter_arr = env->pmu_fixed_ctrs[0].counter_virt; snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev; } else { @@ -250,6 +253,7 @@ static void riscv_pmu_cycle_update_priv(CPURISCVState *env, } if (new_virt) { + g_assert(newpriv <= PRV_S); snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev; } else { snapshot_new = env->pmu_fixed_ctrs[0].counter_prev; @@ -386,7 +390,7 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value, * Expected mhpmevent value is zero for reset case. Remove the current * mapping. */ - if (!value) { + if (!(value & MHPMEVENT_IDX_MASK)) { g_hash_table_foreach_remove(cpu->pmu_event_ctr_map, pmu_remove_event_map, GUINT_TO_POINTER(ctr_idx)); diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index d363dc318d9..8a1856c50e0 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -25,14 +25,14 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qapi-commands-machine-target.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" +#include "qapi/qapi-commands-machine.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" #include "qom/qom-qobject.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "cpu-qom.h" #include "cpu.h" @@ -121,7 +121,7 @@ static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out) for (int i = 0; riscv_profiles[i] != NULL; i++) { profile = riscv_profiles[i]; - value = QOBJECT(qbool_from_bool(profile->enabled)); + value = QOBJECT(qbool_from_bool(profile->present)); qdict_put_obj(qdict_out, profile->name, value); } diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index b8814ab753b..78fb2791847 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "tcg-cpu.h" #include "cpu.h" +#include "exec/target_page.h" #include "internals.h" #include "pmu.h" #include "time_helper.h" @@ -29,11 +30,13 @@ #include "qemu/accel.h" #include "qemu/error-report.h" #include "qemu/log.h" -#include "hw/core/accel-cpu.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/accel-cpu-target.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" +#include "system/tcg.h" +#include "exec/icount.h" #endif /* Hash that stores user set extensions */ @@ -90,6 +93,108 @@ static const char *cpu_priv_ver_to_str(int priv_ver) return priv_spec_str; } +static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return riscv_env_mmu_index(cpu_env(cs), ifetch); +} + +static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs) +{ + CPURISCVState *env = cpu_env(cs); + RISCVCPU *cpu = env_archcpu(env); + RISCVExtStatus fs, vs; + uint32_t flags = 0; + bool pm_signext = riscv_cpu_virt_mem_enabled(env); + + if (cpu->cfg.ext_zve32x) { + /* + * If env->vl equals to VLMAX, we can use generic vector operation + * expanders (GVEC) to accerlate the vector operations. + * However, as LMUL could be a fractional number. The maximum + * vector size can be operated might be less than 8 bytes, + * which is not supported by GVEC. So we set vl_eq_vlmax flag to true + * only when maxsz >= 8 bytes. + */ + + /* lmul encoded as in DisasContext::lmul */ + int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); + uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); + uint32_t maxsz = vlmax << vsew; + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && + (maxsz >= 8); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + flags = FIELD_DP32(flags, TB_FLAGS, VTA, + FIELD_EX64(env->vtype, VTYPE, VTA)); + flags = FIELD_DP32(flags, TB_FLAGS, VMA, + FIELD_EX64(env->vtype, VTYPE, VMA)); + flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } + + if (cpu_get_fcfien(env)) { + /* + * For Forward CFI, only the expectation of a lpad at + * the start of the block is tracked via env->elp. env->elp + * is turned on during jalr translation. + */ + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); + } + + if (cpu_get_bcfien(env)) { + flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); + } + +#ifdef CONFIG_USER_ONLY + fs = EXT_STATUS_DIRTY; + vs = EXT_STATUS_DIRTY; +#else + flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); + + flags |= riscv_env_mmu_index(env, 0); + fs = get_field(env->mstatus, MSTATUS_FS); + vs = get_field(env->mstatus, MSTATUS_VS); + + if (env->virt_enabled) { + flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); + /* + * Merge DISABLED and !DIRTY states using MIN. + * We will set both fields when dirtying. + */ + fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); + vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); + } + + /* With Zfinx, floating point is enabled/disabled by Smstateen. */ + if (!riscv_has_ext(env, RVF)) { + fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) + ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; + } + + if (cpu->cfg.debug && !icount_enabled()) { + flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); + } +#endif + + flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); + flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); + flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); + flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext); + + return (TCGTBCPUState){ + .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc, + .flags = flags + }; +} + static void riscv_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -129,17 +234,51 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->pc = pc; } env->bins = data[1]; + env->excp_uw2 = data[2]; +} + +#ifndef CONFIG_USER_ONLY +static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + CPURISCVState *env = cpu_env(cs); + uint32_t pm_len; + bool pm_signext; + + if (cpu_address_xl(env) == MXL_RV32) { + return (uint32_t)result; + } + + pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env)); + if (pm_len == 0) { + return result; + } + + pm_signext = riscv_cpu_virt_mem_enabled(env); + if (pm_signext) { + return sextract64(result, 0, 64 - pm_len); + } + return extract64(result, 0, 64 - pm_len); } +#endif + +const TCGCPUOps riscv_tcg_ops = { + .mttcg_supported = true, + .guest_default_memory_order = 0, -static const TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, + .translate_code = riscv_translate_code, + .get_tb_cpu_state = riscv_get_tb_cpu_state, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, .restore_state_to_opc = riscv_restore_state_to_opc, + .mmu_index = riscv_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = riscv_cpu_tlb_fill, + .pointer_wrap = riscv_pointer_wrap, .cpu_exec_interrupt = riscv_cpu_exec_interrupt, .cpu_exec_halt = riscv_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = riscv_cpu_do_interrupt, .do_transaction_failed = riscv_cpu_do_transaction_failed, .do_unaligned_access = riscv_cpu_do_unaligned_access, @@ -203,10 +342,20 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) * All other named features are already enabled * in riscv_tcg_cpu_instance_init(). */ - if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { + switch (feat_offset) { + case CPU_CFG_OFFSET(ext_zic64b): cpu->cfg.cbom_blocksize = 64; cpu->cfg.cbop_blocksize = 64; cpu->cfg.cboz_blocksize = 64; + break; + case CPU_CFG_OFFSET(ext_sha): + if (!cpu_misa_ext_is_user_set(RVH)) { + riscv_cpu_write_misa_bit(cpu, RVH, true); + } + /* fallthrough */ + case CPU_CFG_OFFSET(ext_ssstateen): + cpu->cfg.ext_smstateen = true; + break; } } @@ -302,7 +451,25 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) continue; } + /* + * cpu.debug = true is marked as 'sdtrig', priv spec 1.12. + * Skip this warning since existing CPUs with older priv + * spec and debug = true will be impacted. + */ + if (!strcmp(edata->name, "sdtrig")) { + continue; + } + isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); + + /* + * Do not show user warnings for named features that users + * can't enable/disable in the command line. See commit + * 68c9e54bea for more info. + */ + if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { + continue; + } #ifndef CONFIG_USER_ONLY warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx " because privilege spec version does not match", @@ -330,11 +497,16 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu) cpu->cfg.has_priv_1_13 = true; } - /* zic64b is 1.12 or later */ cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && cpu->cfg.cbop_blocksize == 64 && - cpu->cfg.cboz_blocksize == 64 && - cpu->cfg.has_priv_1_12; + cpu->cfg.cboz_blocksize == 64; + + cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; + + cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && + cpu->cfg.ext_ssstateen; + + cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; } static void riscv_cpu_validate_g(RISCVCPU *cpu) @@ -554,7 +726,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; } @@ -618,11 +790,61 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu->cfg.ext_zihpm = false; } + if (cpu->cfg.ext_zicfiss) { + if (!cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfiss extension requires zicsr extension"); + return; + } + if (!riscv_has_ext(env, RVA)) { + error_setg(errp, "zicfiss extension requires A extension"); + return; + } + if (!riscv_has_ext(env, RVS)) { + error_setg(errp, "zicfiss extension requires S"); + return; + } + if (!cpu->cfg.ext_zimop) { + error_setg(errp, "zicfiss extension requires zimop extension"); + return; + } + if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { + error_setg(errp, "zicfiss with zca requires zcmop extension"); + return; + } + } + if (!cpu->cfg.ext_zihpm) { cpu->cfg.pmu_mask = 0; cpu->pmu_avail_ctrs = 0; } + if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfilp extension requires zicsr extension"); + return; + } + + if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { + error_setg(errp, "svukte is not supported for RV32"); + return; + } + + if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && + (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { + if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || + cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { + error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); + return; + } + cpu->cfg.ext_smctr = false; + cpu->cfg.ext_ssctr = false; + } + + if (cpu->cfg.ext_svrsw60t59b && + (!cpu->cfg.mmu || mcc->def->misa_mxl_max == MXL_RV32)) { + error_setg(errp, "svrsw60t59b is not supported on RV32 and MMU-less platforms"); + return; + } + /* * Disable isa extensions based on priv spec after we * validated and set everything we need. @@ -635,8 +857,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, RISCVCPUProfile *profile, bool send_warn) { - int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + int satp_max = cpu->cfg.max_satp_mode; + assert(satp_max >= 0); if (profile->satp_mode > satp_max) { if (send_warn) { bool is_32bit = riscv_cpu_is_32bit(cpu); @@ -655,13 +878,24 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, } #endif +static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile, + RISCVCPUProfile *parent) +{ + if (!profile->present || !parent) { + return; + } + + profile->present = parent->present; +} + static void riscv_cpu_validate_profile(RISCVCPU *cpu, RISCVCPUProfile *profile) { CPURISCVState *env = &cpu->env; const char *warn_msg = "Profile %s mandates disabled extension %s"; bool send_warn = profile->user_set && profile->enabled; - bool parent_enabled, profile_impl = true; + bool profile_impl = true; int i; #ifndef CONFIG_USER_ONLY @@ -672,7 +906,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu, #endif if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && - profile->priv_spec != env->priv_ver) { + profile->priv_spec > env->priv_ver) { profile_impl = false; if (send_warn) { @@ -713,14 +947,10 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu, } } - profile->enabled = profile_impl; + profile->present = profile_impl; - if (profile->parent != NULL) { - parent_enabled = object_property_get_bool(OBJECT(cpu), - profile->parent->name, - NULL); - profile->enabled = profile->enabled && parent_enabled; - } + riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); + riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); } static void riscv_cpu_validate_profiles(RISCVCPU *cpu) @@ -778,11 +1008,18 @@ static void cpu_enable_implied_rule(RISCVCPU *cpu, if (!enabled) { /* Enable the implied MISAs. */ if (rule->implied_misa_exts) { - riscv_cpu_set_misa_ext(env, - env->misa_ext | rule->implied_misa_exts); - for (i = 0; misa_bits[i] != 0; i++) { if (rule->implied_misa_exts & misa_bits[i]) { + /* + * If the user disabled the misa_bit do not re-enable it + * and do not apply any implied rules related to it. + */ + if (cpu_misa_ext_is_user_set(misa_bits[i]) && + !(env->misa_ext & misa_bits[i])) { + continue; + } + + riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); ir = g_hash_table_lookup(misa_ext_implied_rules, GUINT_TO_POINTER(misa_bits[i])); @@ -825,7 +1062,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } } @@ -834,7 +1071,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } @@ -898,6 +1135,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) error_propagate(errp, local_err); return; } +#ifndef CONFIG_USER_ONLY + if (cpu->cfg.pmu_mask) { + riscv_pmu_init(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + if (cpu->cfg.ext_sscofpmf) { + cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + riscv_pmu_timer_cb, cpu); + } + } +#endif } void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) @@ -925,6 +1176,70 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } +static void riscv_cpu_set_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile, + bool enabled) +{ + int i, ext_offset; + + if (profile->u_parent != NULL) { + riscv_cpu_set_profile(cpu, profile->u_parent, enabled); + } + + if (profile->s_parent != NULL) { + riscv_cpu_set_profile(cpu, profile->s_parent, enabled); + } + + profile->enabled = enabled; + + if (profile->enabled) { + cpu->env.priv_ver = profile->priv_spec; + +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + object_property_set_bool(OBJECT(cpu), "mmu", true, NULL); + const char *satp_prop = satp_mode_str(profile->satp_mode, + riscv_cpu_is_32bit(cpu)); + object_property_set_bool(OBJECT(cpu), satp_prop, true, NULL); + } +#endif + } + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (bit == RVI && !profile->enabled) { + /* + * Disabling profiles will not disable the base + * ISA RV64I. + */ + continue; + } + + cpu_misa_ext_add_user_opt(bit, profile->enabled); + riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + ext_offset = profile->ext_offsets[i]; + + if (profile->enabled) { + if (cpu_cfg_offset_is_named_feat(ext_offset)) { + riscv_cpu_enable_named_feat(cpu, ext_offset); + } + + cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); + } + + cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); + isa_ext_update_enabled(cpu, ext_offset, profile->enabled); + } +} + /* * We'll get here via the following path: * @@ -944,8 +1259,17 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) } #ifndef CONFIG_USER_ONLY + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + + if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { + /* Missing 128-bit aligned atomics */ + error_setg(errp, + "128-bit RISC-V currently does not work with Multi " + "Threaded TCG. Please use: -accel tcg,thread=single"); + return false; + } + CPURISCVState *env = &cpu->env; - Error *local_err = NULL; tcg_cflags_set(CPU(cs), CF_PCREL); @@ -953,19 +1277,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) riscv_timer_init(cpu); } - if (cpu->cfg.pmu_mask) { - riscv_pmu_init(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return false; - } - - if (cpu->cfg.ext_sscofpmf) { - cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - riscv_pmu_timer_cb, cpu); - } - } - /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ if (riscv_has_ext(env, RVH)) { env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; @@ -1050,7 +1361,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { MISA_CFG(RVS, true), MISA_CFG(RVU, true), MISA_CFG(RVH, true), - MISA_CFG(RVJ, false), MISA_CFG(RVV, false), MISA_CFG(RVG, false), MISA_CFG(RVB, false), @@ -1096,7 +1406,6 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, RISCVCPUProfile *profile = opaque; RISCVCPU *cpu = RISCV_CPU(obj); bool value; - int i, ext_offset; if (riscv_cpu_is_vendor(obj)) { error_setg(errp, "Profile %s is not available for vendor CPUs", @@ -1115,59 +1424,8 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, } profile->user_set = true; - profile->enabled = value; - - if (profile->parent != NULL) { - object_property_set_bool(obj, profile->parent->name, - profile->enabled, NULL); - } - - if (profile->enabled) { - cpu->env.priv_ver = profile->priv_spec; - } - -#ifndef CONFIG_USER_ONLY - if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { - object_property_set_bool(obj, "mmu", true, NULL); - const char *satp_prop = satp_mode_str(profile->satp_mode, - riscv_cpu_is_32bit(cpu)); - object_property_set_bool(obj, satp_prop, profile->enabled, NULL); - } -#endif - - for (i = 0; misa_bits[i] != 0; i++) { - uint32_t bit = misa_bits[i]; - - if (!(profile->misa_ext & bit)) { - continue; - } - - if (bit == RVI && !profile->enabled) { - /* - * Disabling profiles will not disable the base - * ISA RV64I. - */ - continue; - } - - cpu_misa_ext_add_user_opt(bit, profile->enabled); - riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); - } - - for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { - ext_offset = profile->ext_offsets[i]; - - if (profile->enabled) { - if (cpu_cfg_offset_is_named_feat(ext_offset)) { - riscv_cpu_enable_named_feat(cpu, ext_offset); - } - - cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); - } - cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); - isa_ext_update_enabled(cpu, ext_offset, profile->enabled); - } + riscv_cpu_set_profile(cpu, profile, value); } static void cpu_get_profile(Object *obj, Visitor *v, const char *name, @@ -1182,7 +1440,7 @@ static void cpu_get_profile(Object *obj, Visitor *v, const char *name, static void riscv_cpu_add_profiles(Object *cpu_obj) { for (int i = 0; riscv_profiles[i] != NULL; i++) { - const RISCVCPUProfile *profile = riscv_profiles[i]; + RISCVCPUProfile *profile = riscv_profiles[i]; object_property_add(cpu_obj, profile->name, "bool", cpu_get_profile, cpu_set_profile, @@ -1194,30 +1452,11 @@ static void riscv_cpu_add_profiles(Object *cpu_obj) * case. */ if (profile->enabled) { - object_property_set_bool(cpu_obj, profile->name, true, NULL); + riscv_cpu_set_profile(RISCV_CPU(cpu_obj), profile, true); } } } -static bool cpu_ext_is_deprecated(const char *ext_name) -{ - return isupper(ext_name[0]); -} - -/* - * String will be allocated in the heap. Caller is responsible - * for freeing it. - */ -static char *cpu_ext_to_lower(const char *ext_name) -{ - char *ret = g_malloc0(strlen(ext_name) + 1); - - strcpy(ret, ext_name); - ret[0] = tolower(ret[0]); - - return ret; -} - static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -1230,13 +1469,6 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, return; } - if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { - g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); - - warn_report("CPU property '%s' is deprecated. Please use '%s' instead", - multi_ext_cfg->name, lower); - } - cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); @@ -1272,14 +1504,13 @@ static void cpu_add_multi_ext_prop(Object *cpu_obj, const RISCVCPUMultiExtConfig *multi_cfg) { bool generic_cpu = riscv_cpu_is_generic(cpu_obj); - bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); object_property_add(cpu_obj, multi_cfg->name, "bool", cpu_get_multi_ext_cfg, cpu_set_multi_ext_cfg, NULL, (void *)multi_cfg); - if (!generic_cpu || deprecated_ext) { + if (!generic_cpu) { return; } @@ -1322,8 +1553,6 @@ static void riscv_cpu_add_user_properties(Object *obj) riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); - riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); - riscv_cpu_add_profiles(obj); } @@ -1337,8 +1566,8 @@ static void riscv_init_max_cpu_extensions(Object *obj) CPURISCVState *env = &cpu->env; const RISCVCPUMultiExtConfig *prop; - /* Enable RVG, RVJ and RVV that are disabled by default */ - riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); + /* Enable RVG and RVV that are disabled by default */ + riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { isa_ext_update_enabled(cpu, prop->offset, true); @@ -1365,6 +1594,25 @@ static void riscv_init_max_cpu_extensions(Object *obj) if (env->misa_mxl != MXL_RV32) { isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); + } else { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_svrsw60t59b), false); + } + + /* + * TODO: ext_smrnmi requires OpenSBI changes that our current + * image does not have. Disable it for now. + */ + if (cpu->cfg.ext_smrnmi) { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); + } + + /* + * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup + * to avoid generating a double trap. OpenSBI does not currently support it, + * disable it for now. + */ + if (cpu->cfg.ext_smdbltrp) { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); } } @@ -1396,24 +1644,10 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs) } } -static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) -{ - /* - * All cpus use the same set of operations. - */ - cc->tcg_ops = &riscv_tcg_ops; -} - -static void riscv_tcg_cpu_class_init(CPUClass *cc) -{ - cc->init_accel_cpu = riscv_tcg_cpu_init_ops; -} - -static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); - acc->cpu_class_init = riscv_tcg_cpu_class_init; acc->cpu_instance_init = riscv_tcg_cpu_instance_init; acc->cpu_target_realize = riscv_tcg_cpu_realize; } diff --git a/target/riscv/tcg/tcg-cpu.h b/target/riscv/tcg/tcg-cpu.h index ce94253fe42..a23716a5acf 100644 --- a/target/riscv/tcg/tcg-cpu.h +++ b/target/riscv/tcg/tcg-cpu.h @@ -26,6 +26,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp); void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp); bool riscv_cpu_tcg_compatible(RISCVCPU *cpu); +extern const TCGCPUOps riscv_tcg_ops; + struct DisasContext; struct RISCVCPUConfig; typedef struct RISCVDecoder { diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c index 6c970d4e813..49eb7bbab5f 100644 --- a/target/riscv/th_csr.c +++ b/target/riscv/th_csr.c @@ -27,12 +27,6 @@ #define TH_SXSTATUS_MAEE BIT(21) #define TH_SXSTATUS_THEADISAEE BIT(22) -typedef struct { - int csrno; - int (*insertion_test)(RISCVCPU *cpu); - riscv_csr_operations csr_ops; -} riscv_csr; - static RISCVException smode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS)) { @@ -42,13 +36,9 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int test_thead_mvendorid(RISCVCPU *cpu) +static bool test_thead_mvendorid(RISCVCPU *cpu) { - if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) { - return -1; - } - - return 0; + return cpu->cfg.mvendorid == THEAD_VENDOR_ID; } static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno, @@ -59,21 +49,11 @@ static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static riscv_csr th_csr_list[] = { +const RISCVCSR th_csr_list[] = { { .csrno = CSR_TH_SXSTATUS, .insertion_test = test_thead_mvendorid, .csr_ops = { "th.sxstatus", smode, read_th_sxstatus } - } + }, + { } }; - -void th_register_custom_csrs(RISCVCPU *cpu) -{ - for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) { - int csrno = th_csr_list[i].csrno; - riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops; - if (!th_csr_list[i].insertion_test(cpu)) { - riscv_set_csr_ops(csrno, csr_ops); - } - } -} diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c index 8d245bed3ae..400e9173542 100644 --- a/target/riscv/time_helper.c +++ b/target/riscv/time_helper.c @@ -46,8 +46,23 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, { uint64_t diff, ns_diff, next; RISCVAclintMTimerState *mtimer = env->rdtime_fn_arg; - uint32_t timebase_freq = mtimer->timebase_freq; - uint64_t rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; + uint32_t timebase_freq; + uint64_t rtc_r; + + if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn || + !env->rdtime_fn_arg || !get_field(env->menvcfg, MENVCFG_STCE)) { + /* S/VS Timer IRQ depends on sstc extension, rdtime_fn(), and STCE. */ + return; + } + + if (timer_irq == MIP_VSTIP && + (!riscv_has_ext(env, RVH) || !get_field(env->henvcfg, HENVCFG_STCE))) { + /* VS Timer IRQ also depends on RVH and henvcfg.STCE. */ + return; + } + + timebase_freq = mtimer->timebase_freq; + rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; if (timecmp <= rtc_r) { /* @@ -92,6 +107,7 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, * equals UINT64_MAX. */ if (timecmp == UINT64_MAX) { + timer_del(timer); return; } @@ -124,6 +140,52 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, timer_mod(timer, next); } +/* + * When disabling xenvcfg.STCE, the S/VS Timer may be disabled at the same time. + * It is safe to call this function regardless of whether the timer has been + * deleted or not. timer_del() will do nothing if the timer has already + * been deleted. + */ +static void riscv_timer_disable_timecmp(CPURISCVState *env, QEMUTimer *timer, + uint32_t timer_irq) +{ + /* Disable S-mode Timer IRQ and HW-based STIP */ + if ((timer_irq == MIP_STIP) && !get_field(env->menvcfg, MENVCFG_STCE)) { + riscv_cpu_update_mip(env, timer_irq, BOOL_TO_MASK(0)); + timer_del(timer); + return; + } + + /* Disable VS-mode Timer IRQ and HW-based VSTIP */ + if ((timer_irq == MIP_VSTIP) && + (!get_field(env->menvcfg, MENVCFG_STCE) || + !get_field(env->henvcfg, HENVCFG_STCE))) { + env->vstime_irq = 0; + riscv_cpu_update_mip(env, 0, BOOL_TO_MASK(0)); + timer_del(timer); + return; + } +} + +/* Enable or disable S/VS-mode Timer when xenvcfg.STCE is changed */ +void riscv_timer_stce_changed(CPURISCVState *env, bool is_m_mode, bool enable) +{ + if (enable) { + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + } else { + riscv_timer_disable_timecmp(env, env->vstimer, MIP_VSTIP); + } + + if (is_m_mode) { + if (enable) { + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); + } else { + riscv_timer_disable_timecmp(env, env->stimer, MIP_STIP); + } + } +} + void riscv_timer_init(RISCVCPU *cpu) { CPURISCVState *env; diff --git a/target/riscv/time_helper.h b/target/riscv/time_helper.h index cacd79b80cd..af1f634f890 100644 --- a/target/riscv/time_helper.h +++ b/target/riscv/time_helper.h @@ -25,6 +25,7 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, uint64_t timecmp, uint64_t delta, uint32_t timer_irq); +void riscv_timer_stce_changed(CPURISCVState *env, bool is_m_mode, bool enable); void riscv_timer_init(RISCVCPU *cpu); #endif diff --git a/target/riscv/trace-events b/target/riscv/trace-events index 49ec4d3b7d7..93837f82a19 100644 --- a/target/riscv/trace-events +++ b/target/riscv/trace-events @@ -9,3 +9,6 @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %" mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64 mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64 + +# op_helper.c +riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64"" diff --git a/target/riscv/translate.c b/target/riscv/translate.c index acba90f1702..9ddef2d6e2a 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -20,11 +20,11 @@ #include "qemu/log.h" #include "cpu.h" #include "tcg/tcg-op.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" - +#include "exec/target_page.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/log.h" #include "semihosting/semihost.h" @@ -41,9 +41,6 @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart; static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ static TCGv load_res; static TCGv load_val; -/* globals for PM CSRs */ -static TCGv pm_mask; -static TCGv pm_base; /* * If an operation is being performed on less than TARGET_LONG_BITS, @@ -105,9 +102,9 @@ typedef struct DisasContext { bool vl_eq_vlmax; CPUState *cs; TCGv zero; - /* PointerMasking extension */ - bool pm_mask_enabled; - bool pm_base_enabled; + /* actual address width */ + uint8_t addr_xl; + bool addr_signed; /* Ztso */ bool ztso; /* Use icount trigger for native debug */ @@ -116,6 +113,11 @@ typedef struct DisasContext { bool frm_valid; bool insn_start_updated; const GPtrArray *decoders; + /* zicfilp extension. fcfi_enabled, lp expected or not */ + bool fcfi_enabled; + bool fcfi_lp_expected; + /* zicfiss extension, if shadow stack was enabled during TB gen */ + bool bcfi_enabled; } DisasContext; static inline bool has_ext(DisasContext *ctx, uint32_t ext) @@ -139,6 +141,8 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext) #define get_address_xl(ctx) ((ctx)->address_xl) #endif +#define mxl_memop(ctx) ((get_xl(ctx) + 1) | MO_TE) + /* The word size for this machine mode. */ static inline int __attribute__((unused)) get_xlen(DisasContext *ctx) { @@ -204,11 +208,12 @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); } -static void decode_save_opc(DisasContext *ctx) +static void decode_save_opc(DisasContext *ctx, target_ulong excp_uw2) { assert(!ctx->insn_start_updated); ctx->insn_start_updated = true; tcg_set_insn_start_param(ctx->base.insn_start, 1, ctx->opcode); + tcg_set_insn_start_param(ctx->base.insn_start, 2, excp_uw2); } static void gen_pc_plus_diff(TCGv target, DisasContext *ctx, @@ -236,7 +241,7 @@ static void gen_update_pc(DisasContext *ctx, target_long diff) ctx->pc_save = ctx->base.pc_next + diff; } -static void generate_exception(DisasContext *ctx, int excp) +static void generate_exception(DisasContext *ctx, RISCVException excp) { gen_update_pc(ctx, 0); gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp)); @@ -555,12 +560,54 @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t) } } +#ifndef CONFIG_USER_ONLY +/* + * Direct calls + * - jal x1; + * - jal x5; + * - c.jal. + * - cm.jalt. + * + * Direct jumps + * - jal x0; + * - c.j; + * - cm.jt. + * + * Other direct jumps + * - jal rd where rd != x1 and rd != x5 and rd != x0; + */ +static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm) +{ + TCGv dest = tcg_temp_new(); + TCGv src = tcg_temp_new(); + TCGv type; + + /* + * If rd is x1 or x5 link registers, treat this as direct call otherwise + * its a direct jump. + */ + if (rd == 1 || rd == 5) { + type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL); + } else if (rd == 0) { + type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP); + } else { + type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP); + } + + gen_pc_plus_diff(dest, ctx, imm); + gen_pc_plus_diff(src, ctx, 0); + gen_helper_ctr_add_entry(tcg_env, src, dest, type); +} +#endif + static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) { TCGv succ_pc = dest_gpr(ctx, rd); /* check misaligned: */ - if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) { + if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr, + ctx->priv_ver, + ctx->misa_ext)) { if ((imm & 0x3) != 0) { TCGv target_pc = tcg_temp_new(); gen_pc_plus_diff(target_pc, ctx, imm); @@ -569,6 +616,12 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) } } +#ifndef CONFIG_USER_ONLY + if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) { + gen_ctr_jal(ctx, rd, imm); + } +#endif + gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len); gen_set_gpr(ctx, rd, succ_pc); @@ -583,13 +636,10 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm) TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_addi_tl(addr, src1, imm); - if (ctx->pm_mask_enabled) { - tcg_gen_andc_tl(addr, addr, pm_mask); - } else if (get_address_xl(ctx) == MXL_RV32) { - tcg_gen_ext32u_tl(addr, addr); - } - if (ctx->pm_base_enabled) { - tcg_gen_or_tl(addr, addr, pm_base); + if (ctx->addr_signed) { + tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl); + } else { + tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl); } return addr; @@ -602,14 +652,12 @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs) TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_add_tl(addr, src1, offs); - if (ctx->pm_mask_enabled) { - tcg_gen_andc_tl(addr, addr, pm_mask); - } else if (get_xl(ctx) == MXL_RV32) { - tcg_gen_ext32u_tl(addr, addr); - } - if (ctx->pm_base_enabled) { - tcg_gen_or_tl(addr, addr, pm_base); + if (ctx->addr_signed) { + tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl); + } else { + tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl); } + return addr; } @@ -694,7 +742,7 @@ static void gen_set_rm(DisasContext *ctx, int rm) } /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_set_rounding_mode(tcg_env, tcg_constant_i32(rm)); } @@ -707,7 +755,7 @@ static void gen_set_rm_chkfrm(DisasContext *ctx, int rm) ctx->frm_valid = true; /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */ - decode_save_opc(ctx); + decode_save_opc(ctx, 0); gen_helper_set_rounding_mode_chkfrm(tcg_env, tcg_constant_i32(rm)); } @@ -1091,7 +1139,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, mop |= MO_ALIGN; } - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); src1 = get_address(ctx, a->rs1, 0); func(dest, src1, src2, ctx->mem_idx, mop); @@ -1105,7 +1153,7 @@ static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGv src1 = get_address(ctx, a->rs1, 0); TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); - decode_save_opc(ctx); + decode_save_opc(ctx, RISCV_UW2_ALWAYS_STORE_AMO); tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); gen_set_gpr(ctx, a->rd, dest); @@ -1121,6 +1169,8 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) return translator_ldl(env, &ctx->base, pc); } +#define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE) + /* Include insn module translation function */ #include "insn_trans/trans_rvi.c.inc" #include "insn_trans/trans_rvm.c.inc" @@ -1151,6 +1201,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "decode-insn16.c.inc" #include "insn_trans/trans_rvzce.c.inc" #include "insn_trans/trans_rvzcmop.c.inc" +#include "insn_trans/trans_rvzicfiss.c.inc" /* Include decoders for factored-out extensions */ #include "decode-XVentanaCondOps.c.inc" @@ -1158,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) /* The specification allows for longer insns, but not supported by qemu. */ #define MAX_INSN_LEN 4 -static inline int insn_len(uint16_t first_word) -{ - return (first_word & 3) == 3 ? 4 : 2; -} - const RISCVDecoder decoder_table[] = { { always_true_p, decode_insn32 }, { has_xthead_p, decode_xthead}, @@ -1171,13 +1217,35 @@ const RISCVDecoder decoder_table[] = { const size_t decoder_table_size = ARRAY_SIZE(decoder_table); -static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) +static void decode_opc(CPURISCVState *env, DisasContext *ctx) { + uint32_t opcode; + bool pc_is_4byte_align = ((ctx->base.pc_next % 4) == 0); + ctx->virt_inst_excp = false; - ctx->cur_insn_len = insn_len(opcode); + if (pc_is_4byte_align) { + /* + * Load 4 bytes at once to make instruction fetch atomically. + * + * Note: When pc is 4-byte aligned, 4-byte instruction wouldn't be + * across pages. We could preload 4 bytes instruction no matter + * real one is 2 or 4 bytes. Instruction preload wouldn't trigger + * additional page fault. + */ + opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); + } else { + /* + * For unaligned pc, instruction preload may trigger additional + * page fault so we only load 2 bytes here. + */ + opcode = (uint32_t) translator_lduw(env, &ctx->base, ctx->base.pc_next); + } + ctx->ol = ctx->xl; + + ctx->cur_insn_len = insn_len((uint16_t)opcode); /* Check for compressed insn */ if (ctx->cur_insn_len == 2) { - ctx->opcode = opcode; + ctx->opcode = (uint16_t)opcode; /* * The Zca extension is added as way to refer to instructions in the C * extension that do not include the floating-point loads and stores @@ -1187,15 +1255,17 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) return; } } else { - uint32_t opcode32 = opcode; - opcode32 = deposit32(opcode32, 16, 16, - translator_lduw(env, &ctx->base, - ctx->base.pc_next + 2)); - ctx->opcode = opcode32; + if (!pc_is_4byte_align) { + /* Load last 2 bytes of instruction here */ + opcode = deposit32(opcode, 16, 16, + translator_lduw(env, &ctx->base, + ctx->base.pc_next + 2)); + } + ctx->opcode = opcode; for (guint i = 0; i < ctx->decoders->len; ++i) { riscv_cpu_decode_fn func = g_ptr_array_index(ctx->decoders, i); - if (func(ctx, opcode32)) { + if (func(ctx, opcode)) { return; } } @@ -1230,14 +1300,23 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); - ctx->misa_mxl_max = mcc->misa_mxl_max; + ctx->misa_mxl_max = mcc->def->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL); ctx->cs = cs; - ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); - ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); + if (get_xl(ctx) == MXL_RV32) { + ctx->addr_xl = 32; + ctx->addr_signed = false; + } else { + int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM); + ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm); + ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND); + } ctx->ztso = cpu->cfg.ext_ztso; ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); + ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED); + ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED); + ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED); ctx->zero = tcg_constant_tl(0); ctx->virt_inst_excp = false; ctx->decoders = cpu->decoders; @@ -1256,7 +1335,7 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) pc_next &= ~TARGET_PAGE_MASK; } - tcg_gen_insn_start(pc_next, 0); + tcg_gen_insn_start(pc_next, 0, 0); ctx->insn_start_updated = false; } @@ -1264,15 +1343,31 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu_env(cpu); - uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); - ctx->ol = ctx->xl; - decode_opc(env, ctx, opcode16); + decode_opc(env, ctx); ctx->base.pc_next += ctx->cur_insn_len; + /* + * If 'fcfi_lp_expected' is still true after processing the instruction, + * then we did not see an 'lpad' instruction, and must raise an exception. + * Insert code to raise the exception at the start of the insn; any other + * code the insn may have emitted will be deleted as dead code following + * the noreturn exception + */ + if (ctx->fcfi_lp_expected) { + /* Emit after insn_start, i.e. before the op following insn_start. */ + tcg_ctx->emit_before_op = QTAILQ_NEXT(ctx->base.insn_start, link); + tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL), + tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_helper_raise_exception(tcg_env, + tcg_constant_i32(RISCV_EXCP_SW_CHECK)); + tcg_ctx->emit_before_op = NULL; + ctx->base.is_jmp = DISAS_NORETURN; + } + /* Only the first insn within a TB is allowed to cross a page boundary. */ if (ctx->base.is_jmp == DISAS_NEXT) { - if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) { + if (ctx->itrigger || !translator_is_same_page(&ctx->base, ctx->base.pc_next)) { ctx->base.is_jmp = DISAS_TOO_MANY; } else { unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK; @@ -1282,7 +1377,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) translator_lduw(env, &ctx->base, ctx->base.pc_next); int len = insn_len(next_insn); - if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { + if (!translator_is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -1313,8 +1408,8 @@ static const TranslatorOps riscv_tr_ops = { .tb_stop = riscv_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void riscv_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; @@ -1353,9 +1448,4 @@ void riscv_translate_init(void) "load_res"); load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val), "load_val"); - /* Assign PM CSRs to tcg globals */ - pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask), - "pmmask"); - pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase), - "pmbase"); } diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c index f7423df2264..9a0d9b4f536 100644 --- a/target/riscv/vcrypto_helper.c +++ b/target/riscv/vcrypto_helper.c @@ -26,7 +26,6 @@ #include "crypto/aes-round.h" #include "crypto/sm4.h" #include "exec/memop.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "internals.h" #include "vector_internals.h" @@ -222,7 +221,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ @@ -248,7 +247,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key) uint32_t total_elems = vext_get_total_elems(env, desc, 4); \ uint32_t vta = vext_vta(desc); \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \ AESState round_key; \ @@ -309,7 +308,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); uimm &= 0b1111; if (uimm > 10 || uimm == 0) { @@ -357,7 +356,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t total_elems = vext_get_total_elems(env, desc, 4); uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); uimm &= 0b1111; if (uimm > 14 || uimm < 2) { @@ -465,7 +464,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { if (sew == MO_32) { @@ -582,7 +581,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, @@ -602,7 +601,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, @@ -622,7 +621,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, @@ -642,7 +641,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, uint32_t total_elems; uint32_t vta = vext_vta(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, @@ -676,7 +675,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t *vs1 = vs1_vptr; uint32_t *vs2 = vs2_vptr; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (int i = env->vstart / 8; i < env->vl / 8; i++) { uint32_t w[24]; @@ -777,7 +776,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm, uint32_t *vs2 = vs2_vptr; uint32_t v1[8], v2[8], v3[8]; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (int i = env->vstart / 8; i < env->vl / 8; i++) { for (int k = 0; k < 8; k++) { @@ -802,7 +801,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]}; @@ -841,7 +840,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env, uint32_t vta = vext_vta(desc); uint32_t total_elems = vext_get_total_elems(env, desc, 4); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])}; @@ -879,7 +878,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env, uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; @@ -937,7 +936,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; @@ -973,7 +972,7 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc) uint32_t esz = sizeof(uint32_t); uint32_t total_elems = vext_get_total_elems(env, desc, esz); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (uint32_t i = group_start; i < group_end; ++i) { uint32_t vstart = i * egs; diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 10a52ceb5b1..7c67d67a13f 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -21,10 +21,12 @@ #include "qemu/bitops.h" #include "cpu.h" #include "exec/memop.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" #include "exec/helper-proto.h" +#include "exec/tlb-flags.h" +#include "exec/target_page.h" #include "fpu/softfloat.h" #include "tcg/tcg-gvec-desc.h" #include "internals.h" @@ -32,7 +34,7 @@ #include target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, - target_ulong s2) + target_ulong s2, target_ulong x0) { int vlmax, vl; RISCVCPU *cpu = env_archcpu(env); @@ -75,9 +77,21 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); if (s1 <= vlmax) { vl = s1; + } else if (s1 < 2 * vlmax && cpu->cfg.rvv_vl_half_avl) { + vl = (s1 + 1) >> 1; } else { vl = vlmax; } + + if (cpu->cfg.rvv_vsetvl_x0_vill && x0 && (env->vl != vl)) { + /* only set vill bit. */ + env->vill = 1; + env->vtype = 0; + env->vl = 0; + env->vstart = 0; + return 0; + } + env->vl = vl; env->vtype = s2; env->vstart = 0; @@ -103,11 +117,6 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz) return scale < 0 ? vlenb >> -scale : vlenb << scale; } -static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr) -{ - return (addr & ~env->cur_pmmask) | env->cur_pmbase; -} - /* * This function checks watchpoint before real load operation. * @@ -117,25 +126,42 @@ static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr) * It will trigger an exception if there is no mapping in TLB * and page table walk can't fill the TLB entry. Then the guest * software can return here after process the exception or never return. + * + * This function can also be used when direct access to probe_access_flags is + * needed in order to access the flags. If a pointer to a flags operand is + * provided the function will call probe_access_flags instead, use nonfault + * and update host and flags. */ -static void probe_pages(CPURISCVState *env, target_ulong addr, - target_ulong len, uintptr_t ra, - MMUAccessType access_type) +static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len, + uintptr_t ra, MMUAccessType access_type, int mmu_index, + void **host, int *flags, bool nonfault) { target_ulong pagelen = -(addr | TARGET_PAGE_MASK); target_ulong curlen = MIN(pagelen, len); - int mmu_index = riscv_env_mmu_index(env, false); - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } + if (len > curlen) { addr += curlen; curlen = len - curlen; - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, + host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } } } + static inline void vext_set_elem_mask(void *v0, int index, uint8_t value) { @@ -146,34 +172,90 @@ static inline void vext_set_elem_mask(void *v0, int index, } /* elements operations for load and store */ -typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr, - uint32_t idx, void *vd, uintptr_t retaddr); +typedef void vext_ldst_elem_fn_tlb(CPURISCVState *env, abi_ptr addr, + uint32_t idx, void *vd, uintptr_t retaddr); +typedef void vext_ldst_elem_fn_host(void *vd, uint32_t idx, void *host); + +#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \ +static inline QEMU_ALWAYS_INLINE \ +void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \ + uint32_t idx, void *vd, uintptr_t retaddr) \ +{ \ + ETYPE *cur = ((ETYPE *)vd + H(idx)); \ + *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ +} \ + \ +static inline QEMU_ALWAYS_INLINE \ +void NAME##_host(void *vd, uint32_t idx, void *host) \ +{ \ + ETYPE *cur = ((ETYPE *)vd + H(idx)); \ + *cur = (ETYPE)LDSUF##_p(host); \ +} -#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \ -static void NAME(CPURISCVState *env, abi_ptr addr, \ - uint32_t idx, void *vd, uintptr_t retaddr)\ -{ \ - ETYPE *cur = ((ETYPE *)vd + H(idx)); \ - *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ -} \ - -GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb) -GEN_VEXT_LD_ELEM(lde_h, int16_t, H2, ldsw) -GEN_VEXT_LD_ELEM(lde_w, int32_t, H4, ldl) -GEN_VEXT_LD_ELEM(lde_d, int64_t, H8, ldq) - -#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ -static void NAME(CPURISCVState *env, abi_ptr addr, \ - uint32_t idx, void *vd, uintptr_t retaddr)\ -{ \ - ETYPE data = *((ETYPE *)vd + H(idx)); \ - cpu_##STSUF##_data_ra(env, addr, data, retaddr); \ +GEN_VEXT_LD_ELEM(lde_b, uint8_t, H1, ldub) +GEN_VEXT_LD_ELEM(lde_h, uint16_t, H2, lduw) +GEN_VEXT_LD_ELEM(lde_w, uint32_t, H4, ldl) +GEN_VEXT_LD_ELEM(lde_d, uint64_t, H8, ldq) + +#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ +static inline QEMU_ALWAYS_INLINE \ +void NAME##_tlb(CPURISCVState *env, abi_ptr addr, \ + uint32_t idx, void *vd, uintptr_t retaddr) \ +{ \ + ETYPE data = *((ETYPE *)vd + H(idx)); \ + cpu_##STSUF##_data_ra(env, addr, data, retaddr); \ +} \ + \ +static inline QEMU_ALWAYS_INLINE \ +void NAME##_host(void *vd, uint32_t idx, void *host) \ +{ \ + ETYPE data = *((ETYPE *)vd + H(idx)); \ + STSUF##_p(host, data); \ +} + +GEN_VEXT_ST_ELEM(ste_b, uint8_t, H1, stb) +GEN_VEXT_ST_ELEM(ste_h, uint16_t, H2, stw) +GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl) +GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq) + +static inline QEMU_ALWAYS_INLINE void +vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb, + void *vd, uint32_t evl, target_ulong addr, + uint32_t reg_start, uintptr_t ra, uint32_t esz, + bool is_load) +{ + uint32_t i; + for (i = env->vstart; i < evl; env->vstart = ++i, addr += esz) { + ldst_tlb(env, adjust_addr(env, addr), i, vd, ra); + } } -GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb) -GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) -GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) -GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) +static inline QEMU_ALWAYS_INLINE void +vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host, + void *vd, uint32_t evl, uint32_t reg_start, void *host, + uint32_t esz, bool is_load) +{ +#if HOST_BIG_ENDIAN + for (; reg_start < evl; reg_start++, host += esz) { + ldst_host(vd, reg_start, host); + } +#else + if (esz == 1) { + uint32_t byte_offset = reg_start * esz; + uint32_t size = (evl - reg_start) * esz; + + if (is_load) { + memcpy(vd + byte_offset, host, size); + } else { + memcpy(host, vd + byte_offset, size); + } + } else { + for (; reg_start < evl; reg_start++, host += esz) { + ldst_host(vd, reg_start, host); + } + } +#endif +} static void vext_set_tail_elems_1s(target_ulong vl, void *vd, uint32_t desc, uint32_t nf, @@ -196,11 +278,10 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd, * stride: access vector element from strided memory */ static void -vext_ldst_stride(void *vd, void *v0, target_ulong base, - target_ulong stride, CPURISCVState *env, - uint32_t desc, uint32_t vm, - vext_ldst_elem_fn *ldst_elem, - uint32_t log2_esz, uintptr_t ra) +vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride, + CPURISCVState *env, uint32_t desc, uint32_t vm, + vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz, + uintptr_t ra) { uint32_t i, k; uint32_t nf = vext_nf(desc); @@ -208,7 +289,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); for (i = env->vstart; i < env->vl; env->vstart = ++i) { k = 0; @@ -240,10 +321,10 @@ void HELPER(NAME)(void *vd, void * v0, target_ulong base, \ ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b) -GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h) -GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w) -GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d) +GEN_VEXT_LD_STRIDE(vlse8_v, int8_t, lde_b_tlb) +GEN_VEXT_LD_STRIDE(vlse16_v, int16_t, lde_h_tlb) +GEN_VEXT_LD_STRIDE(vlse32_v, int32_t, lde_w_tlb) +GEN_VEXT_LD_STRIDE(vlse64_v, int64_t, lde_d_tlb) #define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ @@ -255,39 +336,137 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b) -GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h) -GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w) -GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d) +GEN_VEXT_ST_STRIDE(vsse8_v, int8_t, ste_b_tlb) +GEN_VEXT_ST_STRIDE(vsse16_v, int16_t, ste_h_tlb) +GEN_VEXT_ST_STRIDE(vsse32_v, int32_t, ste_w_tlb) +GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d_tlb) /* * unit-stride: access elements stored contiguously in memory */ /* unmasked unit-stride load and store operation */ -static void +static inline QEMU_ALWAYS_INLINE void +vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr, + uint32_t elems, uint32_t nf, uint32_t max_elems, + uint32_t log2_esz, bool is_load, int mmu_index, + vext_ldst_elem_fn_tlb *ldst_tlb, + vext_ldst_elem_fn_host *ldst_host, uintptr_t ra) +{ + void *host; + int i, k, flags; + uint32_t esz = 1 << log2_esz; + uint32_t size = (elems * nf) << log2_esz; + uint32_t evl = env->vstart + elems; + MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE; + + /* Check page permission/pmp/watchpoint/etc. */ + probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags, + true); + + if (flags == 0) { + if (nf == 1) { + vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart, + host, esz, is_load); + } else { + for (i = env->vstart; i < evl; ++i) { + k = 0; + while (k < nf) { + ldst_host(vd, i + k * max_elems, host); + host += esz; + k++; + } + } + } + env->vstart += elems; + } else { + if (nf == 1) { + vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, + ra, esz, is_load); + } else { + /* load bytes from guest memory */ + for (i = env->vstart; i < evl; env->vstart = ++i) { + k = 0; + while (k < nf) { + ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems, + vd, ra); + addr += esz; + k++; + } + } + } + } +} + +static inline QEMU_ALWAYS_INLINE void vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uint32_t evl, - uintptr_t ra) + vext_ldst_elem_fn_tlb *ldst_tlb, + vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, + uint32_t evl, uintptr_t ra, bool is_load) { - uint32_t i, k; + uint32_t k; + target_ulong page_split, elems, addr; uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; + uint32_t msize = nf * esz; + int mmu_index = riscv_env_mmu_index(env, false); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, evl); - /* load bytes from guest memory */ - for (i = env->vstart; i < evl; env->vstart = ++i) { - k = 0; - while (k < nf) { - target_ulong addr = base + ((i * nf + k) << log2_esz); - ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); - k++; +#if defined(CONFIG_USER_ONLY) + /* + * For data sizes <= 6 bytes we get better performance by simply calling + * vext_continuous_ldst_tlb + */ + if (nf == 1 && (evl << log2_esz) <= 6) { + addr = base + (env->vstart << log2_esz); + vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra, + esz, is_load); + + env->vstart = 0; + vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems); + return; + } +#endif + + /* Calculate the page range of first page */ + addr = base + ((env->vstart * nf) << log2_esz); + page_split = -(addr | TARGET_PAGE_MASK); + /* Get number of elements */ + elems = page_split / msize; + if (unlikely(env->vstart + elems >= evl)) { + elems = evl - env->vstart; + } + + /* Load/store elements in the first page */ + if (likely(elems)) { + vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz, + is_load, mmu_index, ldst_tlb, ldst_host, ra); + } + + /* Load/store elements in the second page */ + if (unlikely(env->vstart < evl)) { + /* Cross page element */ + if (unlikely(page_split % msize)) { + for (k = 0; k < nf; k++) { + addr = base + ((env->vstart * nf + k) << log2_esz); + ldst_tlb(env, adjust_addr(env, addr), + env->vstart + k * max_elems, vd, ra); + } + env->vstart++; } + + addr = base + ((env->vstart * nf) << log2_esz); + /* Get number of elements of second page */ + elems = evl - env->vstart; + + /* Load/store elements in the second page */ + vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, log2_esz, + is_load, mmu_index, ldst_tlb, ldst_host, ra); } - env->vstart = 0; + env->vstart = 0; vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems); } @@ -296,47 +475,47 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, * stride, stride = NF * sizeof (ETYPE) */ -#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \ -void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ - vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \ - ctzl(sizeof(ETYPE)), GETPC()); \ -} \ - \ -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldst_us(vd, base, env, desc, LOAD_FN, \ - ctzl(sizeof(ETYPE)), env->vl, GETPC()); \ +#define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \ +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, \ + LOAD_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \ +} \ + \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_us(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \ + ctzl(sizeof(ETYPE)), env->vl, GETPC(), true); \ } -GEN_VEXT_LD_US(vle8_v, int8_t, lde_b) -GEN_VEXT_LD_US(vle16_v, int16_t, lde_h) -GEN_VEXT_LD_US(vle32_v, int32_t, lde_w) -GEN_VEXT_LD_US(vle64_v, int64_t, lde_d) +GEN_VEXT_LD_US(vle8_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LD_US(vle16_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LD_US(vle32_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LD_US(vle64_v, int64_t, lde_d_tlb, lde_d_host) -#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \ +#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \ void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \ - vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ - ctzl(sizeof(ETYPE)), GETPC()); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, \ + STORE_FN_TLB, ctzl(sizeof(ETYPE)), GETPC()); \ } \ \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ CPURISCVState *env, uint32_t desc) \ { \ - vext_ldst_us(vd, base, env, desc, STORE_FN, \ - ctzl(sizeof(ETYPE)), env->vl, GETPC()); \ + vext_ldst_us(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \ + ctzl(sizeof(ETYPE)), env->vl, GETPC(), false); \ } -GEN_VEXT_ST_US(vse8_v, int8_t, ste_b) -GEN_VEXT_ST_US(vse16_v, int16_t, ste_h) -GEN_VEXT_ST_US(vse32_v, int32_t, ste_w) -GEN_VEXT_ST_US(vse64_v, int64_t, ste_d) +GEN_VEXT_ST_US(vse8_v, int8_t, ste_b_tlb, ste_b_host) +GEN_VEXT_ST_US(vse16_v, int16_t, ste_h_tlb, ste_h_host) +GEN_VEXT_ST_US(vse32_v, int32_t, ste_w_tlb, ste_w_host) +GEN_VEXT_ST_US(vse64_v, int64_t, ste_d_tlb, ste_d_host) /* * unit stride mask load and store, EEW = 1 @@ -346,8 +525,8 @@ void HELPER(vlm_v)(void *vd, void *v0, target_ulong base, { /* evl = ceil(vl/8) */ uint8_t evl = (env->vl + 7) >> 3; - vext_ldst_us(vd, base, env, desc, lde_b, - 0, evl, GETPC()); + vext_ldst_us(vd, base, env, desc, lde_b_tlb, lde_b_host, + 0, evl, GETPC(), true); } void HELPER(vsm_v)(void *vd, void *v0, target_ulong base, @@ -355,8 +534,8 @@ void HELPER(vsm_v)(void *vd, void *v0, target_ulong base, { /* evl = ceil(vl/8) */ uint8_t evl = (env->vl + 7) >> 3; - vext_ldst_us(vd, base, env, desc, ste_b, - 0, evl, GETPC()); + vext_ldst_us(vd, base, env, desc, ste_b_tlb, ste_b_host, + 0, evl, GETPC(), false); } /* @@ -381,7 +560,7 @@ static inline void vext_ldst_index(void *vd, void *v0, target_ulong base, void *vs2, CPURISCVState *env, uint32_t desc, vext_get_index_addr get_index_addr, - vext_ldst_elem_fn *ldst_elem, + vext_ldst_elem_fn_tlb *ldst_elem, uint32_t log2_esz, uintptr_t ra) { uint32_t i, k; @@ -391,7 +570,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t esz = 1 << log2_esz; uint32_t vma = vext_vma(desc); - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); /* load bytes from guest memory */ for (i = env->vstart; i < env->vl; env->vstart = ++i) { @@ -422,22 +601,22 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ LOAD_FN, ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b) -GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h) -GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w) -GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d) -GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b) -GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h) -GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w) -GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d) -GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b) -GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h) -GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w) -GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d) -GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b) -GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h) -GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w) -GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d) +GEN_VEXT_LD_INDEX(vlxei8_8_v, int8_t, idx_b, lde_b_tlb) +GEN_VEXT_LD_INDEX(vlxei8_16_v, int16_t, idx_b, lde_h_tlb) +GEN_VEXT_LD_INDEX(vlxei8_32_v, int32_t, idx_b, lde_w_tlb) +GEN_VEXT_LD_INDEX(vlxei8_64_v, int64_t, idx_b, lde_d_tlb) +GEN_VEXT_LD_INDEX(vlxei16_8_v, int8_t, idx_h, lde_b_tlb) +GEN_VEXT_LD_INDEX(vlxei16_16_v, int16_t, idx_h, lde_h_tlb) +GEN_VEXT_LD_INDEX(vlxei16_32_v, int32_t, idx_h, lde_w_tlb) +GEN_VEXT_LD_INDEX(vlxei16_64_v, int64_t, idx_h, lde_d_tlb) +GEN_VEXT_LD_INDEX(vlxei32_8_v, int8_t, idx_w, lde_b_tlb) +GEN_VEXT_LD_INDEX(vlxei32_16_v, int16_t, idx_w, lde_h_tlb) +GEN_VEXT_LD_INDEX(vlxei32_32_v, int32_t, idx_w, lde_w_tlb) +GEN_VEXT_LD_INDEX(vlxei32_64_v, int64_t, idx_w, lde_d_tlb) +GEN_VEXT_LD_INDEX(vlxei64_8_v, int8_t, idx_d, lde_b_tlb) +GEN_VEXT_LD_INDEX(vlxei64_16_v, int16_t, idx_d, lde_h_tlb) +GEN_VEXT_LD_INDEX(vlxei64_32_v, int32_t, idx_d, lde_w_tlb) +GEN_VEXT_LD_INDEX(vlxei64_64_v, int64_t, idx_d, lde_d_tlb) #define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ @@ -448,79 +627,101 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ GETPC()); \ } -GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b) -GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h) -GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w) -GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d) -GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b) -GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h) -GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w) -GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d) -GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b) -GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h) -GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w) -GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d) -GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b) -GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h) -GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w) -GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d) +GEN_VEXT_ST_INDEX(vsxei8_8_v, int8_t, idx_b, ste_b_tlb) +GEN_VEXT_ST_INDEX(vsxei8_16_v, int16_t, idx_b, ste_h_tlb) +GEN_VEXT_ST_INDEX(vsxei8_32_v, int32_t, idx_b, ste_w_tlb) +GEN_VEXT_ST_INDEX(vsxei8_64_v, int64_t, idx_b, ste_d_tlb) +GEN_VEXT_ST_INDEX(vsxei16_8_v, int8_t, idx_h, ste_b_tlb) +GEN_VEXT_ST_INDEX(vsxei16_16_v, int16_t, idx_h, ste_h_tlb) +GEN_VEXT_ST_INDEX(vsxei16_32_v, int32_t, idx_h, ste_w_tlb) +GEN_VEXT_ST_INDEX(vsxei16_64_v, int64_t, idx_h, ste_d_tlb) +GEN_VEXT_ST_INDEX(vsxei32_8_v, int8_t, idx_w, ste_b_tlb) +GEN_VEXT_ST_INDEX(vsxei32_16_v, int16_t, idx_w, ste_h_tlb) +GEN_VEXT_ST_INDEX(vsxei32_32_v, int32_t, idx_w, ste_w_tlb) +GEN_VEXT_ST_INDEX(vsxei32_64_v, int64_t, idx_w, ste_d_tlb) +GEN_VEXT_ST_INDEX(vsxei64_8_v, int8_t, idx_d, ste_b_tlb) +GEN_VEXT_ST_INDEX(vsxei64_16_v, int16_t, idx_d, ste_h_tlb) +GEN_VEXT_ST_INDEX(vsxei64_32_v, int32_t, idx_d, ste_w_tlb) +GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d_tlb) /* * unit-stride fault-only-fisrt load instructions */ static inline void -vext_ldff(void *vd, void *v0, target_ulong base, - CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, - uint32_t log2_esz, uintptr_t ra) +vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, + uint32_t desc, vext_ldst_elem_fn_tlb *ldst_tlb, + vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, uintptr_t ra) { uint32_t i, k, vl = 0; uint32_t nf = vext_nf(desc); uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; + uint32_t msize = nf * esz; uint32_t vma = vext_vma(desc); - target_ulong addr, offset, remain; + target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems; int mmu_index = riscv_env_mmu_index(env, false); + int flags, probe_flags; + void *host; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, env->vl); - /* probe every access */ - for (i = env->vstart; i < env->vl; i++) { - if (!vm && !vext_elem_mask(v0, i)) { - continue; - } - addr = adjust_addr(env, base + i * (nf << log2_esz)); - if (i == 0) { - /* Allow fault on first element. */ - probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD); - } else { - remain = nf << log2_esz; - while (remain > 0) { - void *host; - int flags; - - offset = -(addr | TARGET_PAGE_MASK); - - /* Probe nonfault on subsequent elements. */ - flags = probe_access_flags(env, addr, offset, MMU_DATA_LOAD, - mmu_index, true, &host, 0); - - /* - * Stop if invalid (unmapped) or mmio (transaction may fail). - * Do not stop if watchpoint, as the spec says that - * first-fault should continue to access the same - * elements regardless of any watchpoint. - */ - if (flags & ~TLB_WATCHPOINT) { - vl = i; - goto ProbeSuccess; - } - if (remain <= offset) { - break; + addr = base + ((env->vstart * nf) << log2_esz); + page_split = -(addr | TARGET_PAGE_MASK); + /* Get number of elements */ + elems = page_split / msize; + if (unlikely(env->vstart + elems >= env->vl)) { + elems = env->vl - env->vstart; + } + + /* Check page permission/pmp/watchpoint/etc. */ + probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host, + &flags, true); + + /* If we are crossing a page check also the second page. */ + if (env->vl > elems) { + addr_probe = addr + (elems << log2_esz); + probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD, + mmu_index, &host, &probe_flags, true); + flags |= probe_flags; + } + + if (flags & ~TLB_WATCHPOINT) { + /* probe every access */ + for (i = env->vstart; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, i)) { + continue; + } + addr_i = adjust_addr(env, base + i * (nf << log2_esz)); + if (i == 0) { + /* Allow fault on first element. */ + probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD, + mmu_index, &host, NULL, false); + } else { + remain = nf << log2_esz; + while (remain > 0) { + offset = -(addr_i | TARGET_PAGE_MASK); + + /* Probe nonfault on subsequent elements. */ + probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD, + mmu_index, &host, &flags, true); + + /* + * Stop if invalid (unmapped) or mmio (transaction may + * fail). Do not stop if watchpoint, as the spec says that + * first-fault should continue to access the same + * elements regardless of any watchpoint. + */ + if (flags & ~TLB_WATCHPOINT) { + vl = i; + goto ProbeSuccess; + } + if (remain <= offset) { + break; + } + remain -= offset; + addr_i = adjust_addr(env, addr_i + offset); } - remain -= offset; - addr = adjust_addr(env, addr + offset); } } } @@ -529,19 +730,54 @@ vext_ldff(void *vd, void *v0, target_ulong base, if (vl != 0) { env->vl = vl; } - for (i = env->vstart; i < env->vl; i++) { - k = 0; - while (k < nf) { - if (!vm && !vext_elem_mask(v0, i)) { - /* set masked-off elements to 1s */ - vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, - (i + k * max_elems + 1) * esz); - k++; - continue; + + if (env->vstart < env->vl) { + if (vm) { + /* Load/store elements in the first page */ + if (likely(elems)) { + vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, + log2_esz, true, mmu_index, ldst_tlb, + ldst_host, ra); + } + + /* Load/store elements in the second page */ + if (unlikely(env->vstart < env->vl)) { + /* Cross page element */ + if (unlikely(page_split % msize)) { + for (k = 0; k < nf; k++) { + addr = base + ((env->vstart * nf + k) << log2_esz); + ldst_tlb(env, adjust_addr(env, addr), + env->vstart + k * max_elems, vd, ra); + } + env->vstart++; + } + + addr = base + ((env->vstart * nf) << log2_esz); + /* Get number of elements of second page */ + elems = env->vl - env->vstart; + + /* Load/store elements in the second page */ + vext_page_ldst_us(env, vd, addr, elems, nf, max_elems, + log2_esz, true, mmu_index, ldst_tlb, + ldst_host, ra); + } + } else { + for (i = env->vstart; i < env->vl; i++) { + k = 0; + while (k < nf) { + if (!vext_elem_mask(v0, i)) { + /* set masked-off elements to 1s */ + vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + (i + k * max_elems + 1) * esz); + k++; + continue; + } + addr = base + ((i * nf + k) << log2_esz); + ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems, + vd, ra); + k++; + } } - addr = base + ((i * nf + k) << log2_esz); - ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); - k++; } } env->vstart = 0; @@ -549,18 +785,18 @@ vext_ldff(void *vd, void *v0, target_ulong base, vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems); } -#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldff(vd, v0, base, env, desc, LOAD_FN, \ - ctzl(sizeof(ETYPE)), GETPC()); \ +#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldff(vd, v0, base, env, desc, LOAD_FN_TLB, \ + LOAD_FN_HOST, ctzl(sizeof(ETYPE)), GETPC()); \ } -GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b) -GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h) -GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w) -GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d) +GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb, lde_d_host) #define DO_SWAP(N, M) (M) #define DO_AND(N, M) (N & M) @@ -575,81 +811,93 @@ GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d) /* * load and store whole register instructions */ -static void +static inline QEMU_ALWAYS_INLINE void vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, - vext_ldst_elem_fn *ldst_elem, uint32_t log2_esz, uintptr_t ra) + vext_ldst_elem_fn_tlb *ldst_tlb, + vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, + uintptr_t ra, bool is_load) { - uint32_t i, k, off, pos; + target_ulong page_split, elems, addr; uint32_t nf = vext_nf(desc); uint32_t vlenb = riscv_cpu_cfg(env)->vlenb; uint32_t max_elems = vlenb >> log2_esz; + uint32_t evl = nf * max_elems; + uint32_t esz = 1 << log2_esz; + int mmu_index = riscv_env_mmu_index(env, false); - if (env->vstart >= ((vlenb * nf) >> log2_esz)) { - env->vstart = 0; - return; + /* Calculate the page range of first page */ + addr = base + (env->vstart << log2_esz); + page_split = -(addr | TARGET_PAGE_MASK); + /* Get number of elements */ + elems = page_split / esz; + if (unlikely(env->vstart + elems >= evl)) { + elems = evl - env->vstart; } - k = env->vstart / max_elems; - off = env->vstart % max_elems; - - if (off) { - /* load/store rest of elements of current segment pointed by vstart */ - for (pos = off; pos < max_elems; pos++, env->vstart++) { - target_ulong addr = base + ((pos + k * max_elems) << log2_esz); - ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd, - ra); - } - k++; + /* Load/store elements in the first page */ + if (likely(elems)) { + vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz, + is_load, mmu_index, ldst_tlb, ldst_host, ra); } - /* load/store elements for rest of segments */ - for (; k < nf; k++) { - for (i = 0; i < max_elems; i++, env->vstart++) { - target_ulong addr = base + ((i + k * max_elems) << log2_esz); - ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); + /* Load/store elements in the second page */ + if (unlikely(env->vstart < evl)) { + /* Cross page element */ + if (unlikely(page_split % esz)) { + addr = base + (env->vstart << log2_esz); + ldst_tlb(env, adjust_addr(env, addr), env->vstart, vd, ra); + env->vstart++; } + + addr = base + (env->vstart << log2_esz); + /* Get number of elements of second page */ + elems = evl - env->vstart; + + /* Load/store elements in the second page */ + vext_page_ldst_us(env, vd, addr, elems, 1, max_elems, log2_esz, + is_load, mmu_index, ldst_tlb, ldst_host, ra); } env->vstart = 0; } -#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \ -void HELPER(NAME)(void *vd, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldst_whole(vd, base, env, desc, LOAD_FN, \ - ctzl(sizeof(ETYPE)), GETPC()); \ -} - -GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b) -GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h) -GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w) -GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d) -GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b) -GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h) -GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w) -GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d) -GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b) -GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h) -GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w) -GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d) -GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b) -GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h) -GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w) -GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d) - -#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \ -void HELPER(NAME)(void *vd, target_ulong base, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vext_ldst_whole(vd, base, env, desc, STORE_FN, \ - ctzl(sizeof(ETYPE)), GETPC()); \ -} - -GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b) -GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b) -GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b) -GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b) +#define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \ +void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + vext_ldst_whole(vd, base, env, desc, LOAD_FN_TLB, LOAD_FN_HOST, \ + ctzl(sizeof(ETYPE)), GETPC(), true); \ +} + +GEN_VEXT_LD_WHOLE(vl1re8_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LD_WHOLE(vl1re16_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LD_WHOLE(vl1re32_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LD_WHOLE(vl1re64_v, int64_t, lde_d_tlb, lde_d_host) +GEN_VEXT_LD_WHOLE(vl2re8_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LD_WHOLE(vl2re16_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LD_WHOLE(vl2re32_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LD_WHOLE(vl2re64_v, int64_t, lde_d_tlb, lde_d_host) +GEN_VEXT_LD_WHOLE(vl4re8_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LD_WHOLE(vl4re16_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LD_WHOLE(vl4re32_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LD_WHOLE(vl4re64_v, int64_t, lde_d_tlb, lde_d_host) +GEN_VEXT_LD_WHOLE(vl8re8_v, int8_t, lde_b_tlb, lde_b_host) +GEN_VEXT_LD_WHOLE(vl8re16_v, int16_t, lde_h_tlb, lde_h_host) +GEN_VEXT_LD_WHOLE(vl8re32_v, int32_t, lde_w_tlb, lde_w_host) +GEN_VEXT_LD_WHOLE(vl8re64_v, int64_t, lde_d_tlb, lde_d_host) + +#define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN_TLB, STORE_FN_HOST) \ +void HELPER(NAME)(void *vd, target_ulong base, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + vext_ldst_whole(vd, base, env, desc, STORE_FN_TLB, STORE_FN_HOST, \ + ctzl(sizeof(ETYPE)), GETPC(), false); \ +} + +GEN_VEXT_ST_WHOLE(vs1r_v, int8_t, ste_b_tlb, ste_b_host) +GEN_VEXT_ST_WHOLE(vs2r_v, int8_t, ste_b_tlb, ste_b_host) +GEN_VEXT_ST_WHOLE(vs4r_v, int8_t, ste_b_tlb, ste_b_host) +GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b_tlb, ste_b_host) /* * Vector Integer Arithmetic Instructions @@ -894,7 +1142,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -928,7 +1176,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -965,7 +1213,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1005,7 +1253,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1103,7 +1351,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1152,7 +1400,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -1216,7 +1464,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1283,7 +1531,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1832,7 +2080,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -1858,7 +2106,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ @@ -1883,7 +2131,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \ @@ -1909,7 +2157,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -1956,8 +2204,6 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivv2_rm_fn *fn, uint32_t vma, uint32_t esz) { - VSTART_CHECK_EARLY_EXIT(env); - for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -1981,6 +2227,8 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2, uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); + VSTART_CHECK_EARLY_EXIT(env, vl); + switch (env->vxrm) { case 0: /* rnu */ vext_vv_rm_1(vd, v0, vs1, vs2, @@ -2083,8 +2331,6 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, uint32_t vl, uint32_t vm, int vxrm, opivx2_rm_fn *fn, uint32_t vma, uint32_t esz) { - VSTART_CHECK_EARLY_EXIT(env); - for (uint32_t i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -2108,6 +2354,8 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); + VSTART_CHECK_EARLY_EXIT(env, vl); + switch (env->vxrm) { case 0: /* rnu */ vext_vx_rm_1(vd, v0, s1, vs2, @@ -2882,7 +3130,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -2927,7 +3175,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -3515,7 +3763,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ if (vl == 0) { \ return; \ @@ -4038,7 +4286,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ @@ -4080,7 +4328,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4275,7 +4523,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ @@ -4443,6 +4691,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4450,7 +4700,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ } \ s1 = OP(s1, (TD)s2); \ } \ - *((TD *)vd + HD(0)) = s1; \ + if (vl > 0) { \ + *((TD *)vd + HD(0)) = s1; \ + } \ env->vstart = 0; \ /* set tail elements to 1s */ \ vext_set_elems_1s(vd, vta, esz, vlenb); \ @@ -4529,6 +4781,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ TD s1 = *((TD *)vs1 + HD(0)); \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4536,7 +4790,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ } \ s1 = OP(s1, (TD)s2, &env->fp_status); \ } \ - *((TD *)vd + HD(0)) = s1; \ + if (vl > 0) { \ + *((TD *)vd + HD(0)) = s1; \ + } \ env->vstart = 0; \ /* set tail elements to 1s */ \ vext_set_elems_1s(vd, vta, esz, vlenb); \ @@ -4601,7 +4857,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t i; \ int a, b; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ a = vext_elem_mask(vs1, i); \ @@ -4691,6 +4947,8 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, int i; bool first_mask_bit = false; + VSTART_CHECK_EARLY_EXIT(env, vl); + for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { /* set masked-off elements to 1s */ @@ -4763,6 +5021,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ uint32_t sum = 0; \ int i; \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ /* set masked-off elements to 1s */ \ @@ -4796,7 +5056,7 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ uint32_t vma = vext_vma(desc); \ int i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4833,7 +5093,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong offset = s1, i_min, i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ i_min = MAX(env->vstart, offset); \ for (i = i_min; i < vl; i++) { \ @@ -4868,7 +5128,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint32_t vma = vext_vma(desc); \ target_ulong i_max, i_min, i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \ i_max = MAX(i_min, env->vstart); \ @@ -4882,9 +5142,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ } \ \ for (i = i_max; i < vl; ++i) { \ - if (vm || vext_elem_mask(v0, i)) { \ - *((ETYPE *)vd + H(i)) = 0; \ + if (!vm && !vext_elem_mask(v0, i)) { \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ } \ + *((ETYPE *)vd + H(i)) = 0; \ } \ \ env->vstart = 0; \ @@ -4912,7 +5174,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -4963,7 +5225,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5040,7 +5302,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint64_t index; \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5085,7 +5347,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ uint64_t index = s1; \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ @@ -5121,6 +5383,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ uint32_t vta = vext_vta(desc); \ uint32_t num = 0, i; \ \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ + \ for (i = env->vstart; i < vl; i++) { \ if (!vext_elem_mask(vs1, i)) { \ continue; \ @@ -5130,7 +5394,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ } \ env->vstart = 0; \ /* set tail elements to 1s */ \ - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \ + vext_set_elems_1s(vd, vta, num * esz, total_elems * esz); \ } /* Compress into vd elements of vs2 where vs1 is enabled */ @@ -5181,7 +5445,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c index 05b2d01e58d..b490b1d3989 100644 --- a/target/riscv/vector_internals.c +++ b/target/riscv/vector_internals.c @@ -66,7 +66,7 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { @@ -92,7 +92,7 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, uint32_t vma = vext_vma(desc); uint32_t i; - VSTART_CHECK_EARLY_EXIT(env); + VSTART_CHECK_EARLY_EXIT(env, vl); for (i = env->vstart; i < vl; i++) { if (!vm && !vext_elem_mask(v0, i)) { diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h index 9e1e15b5750..8eee7e5c31c 100644 --- a/target/riscv/vector_internals.h +++ b/target/riscv/vector_internals.h @@ -20,15 +20,16 @@ #define TARGET_RISCV_VECTOR_INTERNALS_H #include "qemu/bitops.h" +#include "hw/registerfields.h" #include "cpu.h" #include "tcg/tcg-gvec-desc.h" #include "internals.h" -#define VSTART_CHECK_EARLY_EXIT(env) do { \ - if (env->vstart >= env->vl) { \ - env->vstart = 0; \ - return; \ - } \ +#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \ + if (env->vstart >= vl) { \ + env->vstart = 0; \ + return; \ + } \ } while (0) static inline uint32_t vext_nf(uint32_t desc) @@ -158,7 +159,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ \ - VSTART_CHECK_EARLY_EXIT(env); \ + VSTART_CHECK_EARLY_EXIT(env, vl); \ \ for (i = env->vstart; i < vl; i++) { \ if (!vm && !vext_elem_mask(v0, i)) { \ diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c index b433bda16dc..55221f5f375 100644 --- a/target/riscv/zce_helper.c +++ b/target/riscv/zce_helper.c @@ -18,9 +18,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" target_ulong HELPER(cm_jalt)(CPURISCVState *env, uint32_t index) { diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h index 521d669bdf7..84934f3bcaf 100644 --- a/target/rx/cpu-param.h +++ b/target/rx/cpu-param.h @@ -19,10 +19,11 @@ #ifndef RX_CPU_PARAM_H #define RX_CPU_PARAM_H -#define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define TARGET_INSN_START_EXTRA_WORDS 0 + #endif diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 36d2a6f1890..c6dd5d6f832 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -21,11 +21,14 @@ #include "qapi/error.h" #include "cpu.h" #include "migration/vmstate.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "hw/loader.h" #include "fpu/softfloat.h" #include "tcg/debug-assert.h" +#include "accel/tcg/cpu-ops.h" static void rx_cpu_set_pc(CPUState *cs, vaddr value) { @@ -41,6 +44,17 @@ static vaddr rx_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState rx_get_tb_cpu_state(CPUState *cs) +{ + CPURXState *env = cpu_env(cs); + uint32_t flags = 0; + + flags = FIELD_DP32(flags, PSW, PM, env->psw_pm); + flags = FIELD_DP32(flags, PSW, U, env->psw_u); + + return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; +} + static void rx_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -65,7 +79,7 @@ static bool rx_cpu_has_work(CPUState *cs) (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); } -static int riscv_cpu_mmu_index(CPUState *cs, bool ifunc) +static int rx_cpu_mmu_index(CPUState *cs, bool ifunc) { return 0; } @@ -93,6 +107,23 @@ static void rx_cpu_reset_hold(Object *obj, ResetType type) env->fpsw = 0; set_flush_to_zero(1, &env->fp_status); set_flush_inputs_to_zero(1, &env->fp_status); + /* + * TODO: this is not the correct NaN propagation rule for this + * architecture. The "RX Family User's Manual: Software" table 1.6 + * defines the propagation rules as "prefer SNaN over QNaN; + * then prefer dest over source", which is float_2nan_prop_s_ab. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + /* Default NaN value: sign bit clear, set frac msb */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); + /* + * TODO: "RX Family RXv1 Instruction Set Architecture" is not 100% clear + * on whether flush-to-zero should happen before or after rounding, but + * section 1.3.2 says that it happens when underflow is detected, and + * implies that underflow is detected after rounding. So this may not + * be the correct setting. + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); } static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) @@ -150,6 +181,7 @@ static void rx_cpu_set_irq(void *opaque, int no, int request) static void rx_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { + info->endian = BFD_ENDIAN_LITTLE; info->mach = bfd_mach_rx; info->print_insn = print_insn_rx; } @@ -174,30 +206,34 @@ static void rx_cpu_init(Object *obj) qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2); } -#ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps rx_sysemu_ops = { + .has_work = rx_cpu_has_work, .get_phys_page_debug = rx_cpu_get_phys_page_debug, }; -#endif - -#include "hw/core/tcg-cpu-ops.h" static const TCGCPUOps rx_tcg_ops = { + /* MTTCG not yet supported: require strict ordering */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = false, + .initialize = rx_translate_init, + .translate_code = rx_translate_code, + .get_tb_cpu_state = rx_get_tb_cpu_state, .synchronize_from_tb = rx_cpu_synchronize_from_tb, .restore_state_to_opc = rx_restore_state_to_opc, + .mmu_index = rx_cpu_mmu_index, .tlb_fill = rx_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, -#ifndef CONFIG_USER_ONLY .cpu_exec_interrupt = rx_cpu_exec_interrupt, .cpu_exec_halt = rx_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = rx_cpu_do_interrupt, -#endif /* !CONFIG_USER_ONLY */ }; -static void rx_cpu_class_init(ObjectClass *klass, void *data) +static void rx_cpu_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CPUClass *cc = CPU_CLASS(klass); @@ -210,15 +246,11 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data) &rcc->parent_phases); cc->class_by_name = rx_cpu_class_by_name; - cc->has_work = rx_cpu_has_work; - cc->mmu_index = riscv_cpu_mmu_index; cc->dump_state = rx_cpu_dump_state; cc->set_pc = rx_cpu_set_pc; cc->get_pc = rx_cpu_get_pc; -#ifndef CONFIG_USER_ONLY cc->sysemu_ops = &rx_sysemu_ops; -#endif cc->gdb_read_register = rx_cpu_gdb_read_register; cc->gdb_write_register = rx_cpu_gdb_write_register; cc->disas_set_info = rx_cpu_disas_set_info; diff --git a/target/rx/cpu.h b/target/rx/cpu.h index c53593d7aa0..ba5761b6472 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -23,9 +23,15 @@ #include "hw/registerfields.h" #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" +#ifdef CONFIG_USER_ONLY +#error "RX does not support user mode emulation" +#endif + /* PSW define */ REG32(PSW, 0) FIELD(PSW, C, 0, 1) @@ -129,35 +135,24 @@ struct RXCPUClass { #define CPU_RESOLVING_TYPE TYPE_RX_CPU const char *rx_crname(uint8_t cr); -#ifndef CONFIG_USER_ONLY void rx_cpu_do_interrupt(CPUState *cpu); bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req); hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); -#endif /* !CONFIG_USER_ONLY */ void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void rx_translate_init(void); +void rx_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); -#include "exec/cpu-all.h" - #define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0 #define CPU_INTERRUPT_FIR CPU_INTERRUPT_TGT_INT_1 #define RX_CPU_IRQ 0 #define RX_CPU_FIR 1 -static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = FIELD_DP32(0, PSW, PM, env->psw_pm); - *flags = FIELD_DP32(*flags, PSW, U, env->psw_u); -} - static inline uint32_t rx_cpu_pack_psw(CPURXState *env) { uint32_t psw = 0; diff --git a/target/rx/helper.c b/target/rx/helper.c index 80912e8dcb4..0640ab322b5 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -20,7 +20,7 @@ #include "qemu/bitops.h" #include "cpu.h" #include "exec/log.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "hw/irq.h" void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) @@ -40,8 +40,6 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) env->psw_c = FIELD_EX32(psw, PSW, C); } -#ifndef CONFIG_USER_ONLY - #define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR) void rx_cpu_do_interrupt(CPUState *cs) { @@ -90,7 +88,7 @@ void rx_cpu_do_interrupt(CPUState *cs) cpu_stl_data(env, env->isp, env->pc); if (vec < 0x100) { - env->pc = cpu_ldl_data(env, 0xffffffc0 + vec * 4); + env->pc = cpu_ldl_data(env, 0xffffff80 + vec * 4); } else { env->pc = cpu_ldl_data(env, env->intb + (vec & 0xff) * 4); } @@ -146,5 +144,3 @@ hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { return addr; } - -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/rx/helper.h b/target/rx/helper.h index ebb47394744..8cc38b0cb71 100644 --- a/target/rx/helper.h +++ b/target/rx/helper.h @@ -4,27 +4,27 @@ DEF_HELPER_1(raise_privilege_violation, noreturn, env) DEF_HELPER_1(wait, noreturn, env) DEF_HELPER_2(rxint, noreturn, env, i32) DEF_HELPER_1(rxbrk, noreturn, env) -DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fsub, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_WG, void, env, f32, f32) -DEF_HELPER_FLAGS_2(ftoi, TCG_CALL_NO_WG, i32, env, f32) -DEF_HELPER_FLAGS_2(round, TCG_CALL_NO_WG, i32, env, f32) -DEF_HELPER_FLAGS_2(itof, TCG_CALL_NO_WG, f32, env, i32) +DEF_HELPER_3(fadd, f32, env, f32, f32) +DEF_HELPER_3(fsub, f32, env, f32, f32) +DEF_HELPER_3(fmul, f32, env, f32, f32) +DEF_HELPER_3(fdiv, f32, env, f32, f32) +DEF_HELPER_3(fcmp, void, env, f32, f32) +DEF_HELPER_2(ftoi, i32, env, f32) +DEF_HELPER_2(round, i32, env, f32) +DEF_HELPER_2(itof, f32, env, i32) DEF_HELPER_2(set_fpsw, void, env, i32) -DEF_HELPER_FLAGS_2(racw, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_FLAGS_2(set_psw_rte, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_FLAGS_2(set_psw, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_2(racw, void, env, i32) +DEF_HELPER_2(set_psw_rte, void, env, i32) +DEF_HELPER_2(set_psw, void, env, i32) DEF_HELPER_1(pack_psw, i32, env) -DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32) -DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32) -DEF_HELPER_FLAGS_1(scmpu, TCG_CALL_NO_WG, void, env) +DEF_HELPER_3(div, i32, env, i32, i32) +DEF_HELPER_3(divu, i32, env, i32, i32) +DEF_HELPER_1(scmpu, void, env) DEF_HELPER_1(smovu, void, env) DEF_HELPER_1(smovf, void, env) DEF_HELPER_1(smovb, void, env) DEF_HELPER_2(sstr, void, env, i32) -DEF_HELPER_FLAGS_2(swhile, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_FLAGS_2(suntil, TCG_CALL_NO_WG, void, env, i32) -DEF_HELPER_FLAGS_2(rmpa, TCG_CALL_NO_WG, void, env, i32) +DEF_HELPER_2(swhile, void, env, i32) +DEF_HELPER_2(suntil, void, env, i32) +DEF_HELPER_2(rmpa, void, env, i32) DEF_HELPER_1(satr, void, env) diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c index 691a12b2be1..2b190a4b2cf 100644 --- a/target/rx/op_helper.c +++ b/target/rx/op_helper.c @@ -19,9 +19,8 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "fpu/softfloat.h" #include "tcg/debug-assert.h" @@ -99,8 +98,8 @@ static void update_fpsw(CPURXState *env, float32 ret, uintptr_t retaddr) if (xcpt & float_flag_inexact) { SET_FPSW(X); } - if ((xcpt & (float_flag_input_denormal - | float_flag_output_denormal)) + if ((xcpt & (float_flag_input_denormal_flushed + | float_flag_output_denormal_flushed)) && !FIELD_EX32(env->fpsw, FPSW, DN)) { env->fpsw = FIELD_DP32(env->fpsw, FPSW, CE, 1); } diff --git a/target/rx/translate.c b/target/rx/translate.c index 9aade2b6e5c..19a9584a829 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -20,11 +20,11 @@ #include "qemu/bswap.h" #include "qemu/qemu-print.h" #include "cpu.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/log.h" #define HELPER_H "helper.h" @@ -2257,8 +2257,8 @@ static const TranslatorOps rx_tr_ops = { .tb_stop = rx_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void rx_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c index 029d91d93a2..2c26e992959 100644 --- a/target/s390x/arch_dump.c +++ b/target/s390x/arch_dump.c @@ -16,7 +16,7 @@ #include "cpu.h" #include "s390x-internal.h" #include "elf.h" -#include "sysemu/dump.h" +#include "system/dump.h" #include "kvm/kvm_s390x.h" #include "target/s390x/kvm/pv.h" diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c index 69cc9f77464..869d3a4ad54 100644 --- a/target/s390x/cpu-dump.c +++ b/target/s390x/cpu-dump.c @@ -23,7 +23,7 @@ #include "cpu.h" #include "s390x-internal.h" #include "qemu/qemu-print.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) { diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h index 11d23b600d2..abfae3bedfb 100644 --- a/target/s390x/cpu-param.h +++ b/target/s390x/cpu-param.h @@ -2,21 +2,16 @@ * S/390 cpu parameters for qemu. * * Copyright (c) 2009 Ulrich Hecht - * SPDX-License-Identifier: GPL-2.0+ + * SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef S390_CPU_PARAM_H #define S390_CPU_PARAM_H -#define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 64 #define TARGET_VIRT_ADDR_SPACE_BITS 64 -/* - * The z/Architecture has a strong memory model with some - * store-after-load re-ordering. - */ -#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) +#define TARGET_INSN_START_EXTRA_WORDS 2 #endif diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c deleted file mode 100644 index 1cd30c1d84f..00000000000 --- a/target/s390x/cpu-sysemu.c +++ /dev/null @@ -1,322 +0,0 @@ -/* - * QEMU S/390 CPU - System Emulation-only code - * - * Copyright (c) 2009 Ulrich Hecht - * Copyright (c) 2011 Alexander Graf - * Copyright (c) 2012 SUSE LINUX Products GmbH - * Copyright (c) 2012 IBM Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/error-report.h" -#include "qapi/error.h" -#include "cpu.h" -#include "s390x-internal.h" -#include "kvm/kvm_s390x.h" -#include "sysemu/kvm.h" -#include "sysemu/reset.h" -#include "qemu/timer.h" -#include "trace.h" -#include "qapi/qapi-visit-run-state.h" -#include "sysemu/hw_accel.h" - -#include "target/s390x/kvm/pv.h" -#include "hw/boards.h" -#include "sysemu/sysemu.h" -#include "sysemu/tcg.h" -#include "hw/core/sysemu-cpu-ops.h" - -/* S390CPUClass::load_normal() */ -static void s390_cpu_load_normal(CPUState *s) -{ - S390CPU *cpu = S390_CPU(s); - uint64_t spsw; - - if (!s390_is_pv()) { - spsw = ldq_phys(s->as, 0); - cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL; - /* - * Invert short psw indication, so SIE will report a specification - * exception if it was not set. - */ - cpu->env.psw.mask ^= PSW_MASK_SHORTPSW; - cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR; - } else { - /* - * Firmware requires us to set the load state before we set - * the cpu to operating on protected guests. - */ - s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu); - } - s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); -} - -void s390_cpu_machine_reset_cb(void *opaque) -{ - S390CPU *cpu = opaque; - - run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL); -} - -static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs) -{ - GuestPanicInformation *panic_info; - S390CPU *cpu = S390_CPU(cs); - - cpu_synchronize_state(cs); - panic_info = g_new0(GuestPanicInformation, 1); - - panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390; - panic_info->u.s390.core = cpu->env.core_id; - panic_info->u.s390.psw_mask = cpu->env.psw.mask; - panic_info->u.s390.psw_addr = cpu->env.psw.addr; - panic_info->u.s390.reason = cpu->env.crash_reason; - - return panic_info; -} - -static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - CPUState *cs = CPU(obj); - GuestPanicInformation *panic_info; - - if (!cs->crash_occurred) { - error_setg(errp, "No crash occurred"); - return; - } - - panic_info = s390_cpu_get_crash_info(cs); - - visit_type_GuestPanicInformation(v, "crash-information", &panic_info, - errp); - qapi_free_GuestPanicInformation(panic_info); -} - -void s390_cpu_init_sysemu(Object *obj) -{ - CPUState *cs = CPU(obj); - S390CPU *cpu = S390_CPU(obj); - - cs->start_powered_off = true; - object_property_add(obj, "crash-information", "GuestPanicInformation", - s390_cpu_get_crash_info_qom, NULL, NULL, NULL); - cpu->env.tod_timer = - timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); - cpu->env.cpu_timer = - timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); - s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); -} - -bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp) -{ - S390CPU *cpu = S390_CPU(dev); - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int max_cpus = ms->smp.max_cpus; - - if (cpu->env.core_id >= max_cpus) { - error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 - ", maximum core-id: %d", cpu->env.core_id, - max_cpus - 1); - return false; - } - - if (cpu_exists(cpu->env.core_id)) { - error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 - ", it already exists", cpu->env.core_id); - return false; - } - - /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ - CPU(cpu)->cpu_index = cpu->env.core_id; - return true; -} - -void s390_cpu_finalize(Object *obj) -{ - S390CPU *cpu = S390_CPU(obj); - - timer_free(cpu->env.tod_timer); - timer_free(cpu->env.cpu_timer); - - qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu); - g_free(cpu->irqstate); -} - -static const struct SysemuCPUOps s390_sysemu_ops = { - .get_phys_page_debug = s390_cpu_get_phys_page_debug, - .get_crash_info = s390_cpu_get_crash_info, - .write_elf64_note = s390_cpu_write_elf64_note, - .legacy_vmsd = &vmstate_s390_cpu, -}; - -void s390_cpu_class_init_sysemu(CPUClass *cc) -{ - S390CPUClass *scc = S390_CPU_CLASS(cc); - - scc->load_normal = s390_cpu_load_normal; - cc->sysemu_ops = &s390_sysemu_ops; -} - -static bool disabled_wait(CPUState *cpu) -{ - return cpu->halted && !(S390_CPU(cpu)->env.psw.mask & - (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK)); -} - -static unsigned s390_count_running_cpus(void) -{ - CPUState *cpu; - int nr_running = 0; - - CPU_FOREACH(cpu) { - uint8_t state = S390_CPU(cpu)->env.cpu_state; - if (state == S390_CPU_STATE_OPERATING || - state == S390_CPU_STATE_LOAD) { - if (!disabled_wait(cpu)) { - nr_running++; - } - } - } - - return nr_running; -} - -unsigned int s390_cpu_halt(S390CPU *cpu) -{ - CPUState *cs = CPU(cpu); - trace_cpu_halt(cs->cpu_index); - - if (!cs->halted) { - cs->halted = 1; - cs->exception_index = EXCP_HLT; - } - - return s390_count_running_cpus(); -} - -void s390_cpu_unhalt(S390CPU *cpu) -{ - CPUState *cs = CPU(cpu); - trace_cpu_unhalt(cs->cpu_index); - - if (cs->halted) { - cs->halted = 0; - cs->exception_index = -1; - } -} - -unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) - { - trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state); - - switch (cpu_state) { - case S390_CPU_STATE_STOPPED: - case S390_CPU_STATE_CHECK_STOP: - /* halt the cpu for common infrastructure */ - s390_cpu_halt(cpu); - break; - case S390_CPU_STATE_OPERATING: - case S390_CPU_STATE_LOAD: - /* - * Starting a CPU with a PSW WAIT bit set: - * KVM: handles this internally and triggers another WAIT exit. - * TCG: will actually try to continue to run. Don't unhalt, will - * be done when the CPU actually has work (an interrupt). - */ - if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) { - s390_cpu_unhalt(cpu); - } - break; - default: - error_report("Requested CPU state is not a valid S390 CPU state: %u", - cpu_state); - exit(1); - } - if (kvm_enabled() && cpu->env.cpu_state != cpu_state) { - kvm_s390_set_cpu_state(cpu, cpu_state); - } - cpu->env.cpu_state = cpu_state; - - return s390_count_running_cpus(); -} - -int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) -{ - if (kvm_enabled()) { - return kvm_s390_set_mem_limit(new_limit, hw_limit); - } - return 0; -} - -void s390_set_max_pagesize(uint64_t pagesize, Error **errp) -{ - if (kvm_enabled()) { - kvm_s390_set_max_pagesize(pagesize, errp); - } -} - -void s390_cmma_reset(void) -{ - if (kvm_enabled()) { - kvm_s390_cmma_reset(); - } -} - -int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id, - int vq, bool assign) -{ - if (kvm_enabled()) { - return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign); - } else { - return 0; - } -} - -void s390_crypto_reset(void) -{ - if (kvm_enabled()) { - kvm_s390_crypto_reset(); - } -} - -void s390_enable_css_support(S390CPU *cpu) -{ - if (kvm_enabled()) { - kvm_s390_enable_css_support(cpu); - } -} - -void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg) -{ - if (kvm_enabled()) { - kvm_s390_set_diag318(cs, arg.host_ulong); - } -} - -void s390_cpu_topology_set_changed(bool changed) -{ - int ret; - - if (kvm_enabled()) { - ret = kvm_s390_topology_set_mtcr(changed); - if (ret) { - error_report("Failed to set Modified Topology Change Report: %s", - strerror(-ret)); - } - } -} diff --git a/target/s390x/cpu-system.c b/target/s390x/cpu-system.c new file mode 100644 index 00000000000..709ccd52992 --- /dev/null +++ b/target/s390x/cpu-system.c @@ -0,0 +1,323 @@ +/* + * QEMU S/390 CPU - System-only code + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf + * Copyright (c) 2012 SUSE LINUX Products GmbH + * Copyright (c) 2012 IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "cpu.h" +#include "s390x-internal.h" +#include "kvm/kvm_s390x.h" +#include "system/kvm.h" +#include "system/reset.h" +#include "qemu/timer.h" +#include "trace.h" +#include "qapi/qapi-visit-run-state.h" +#include "system/hw_accel.h" + +#include "target/s390x/kvm/pv.h" +#include "hw/boards.h" +#include "system/system.h" +#include "system/tcg.h" +#include "hw/core/sysemu-cpu-ops.h" + +bool s390_cpu_has_work(CPUState *cs) +{ + S390CPU *cpu = S390_CPU(cs); + + /* STOPPED cpus can never wake up */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD && + s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { + return false; + } + + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + + return s390_cpu_has_int(cpu); +} + +/* S390CPUClass::load_normal() */ +static void s390_cpu_load_normal(CPUState *s) +{ + S390CPU *cpu = S390_CPU(s); + uint64_t spsw; + + if (!s390_is_pv()) { + spsw = ldq_phys(s->as, 0); + cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL; + /* + * Invert short psw indication, so SIE will report a specification + * exception if it was not set. + */ + cpu->env.psw.mask ^= PSW_MASK_SHORTPSW; + cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR; + } else { + /* + * Firmware requires us to set the load state before we set + * the cpu to operating on protected guests. + */ + s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu); + } + s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); +} + +void s390_cpu_machine_reset_cb(void *opaque) +{ + S390CPU *cpu = opaque; + + run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL); +} + +static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs) +{ + GuestPanicInformation *panic_info; + S390CPU *cpu = S390_CPU(cs); + + cpu_synchronize_state(cs); + panic_info = g_new0(GuestPanicInformation, 1); + + panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390; + panic_info->u.s390.core = cpu->env.core_id; + panic_info->u.s390.psw_mask = cpu->env.psw.mask; + panic_info->u.s390.psw_addr = cpu->env.psw.addr; + panic_info->u.s390.reason = cpu->env.crash_reason; + + return panic_info; +} + +static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + CPUState *cs = CPU(obj); + GuestPanicInformation *panic_info; + + if (!cs->crash_occurred) { + error_setg(errp, "No crash occurred"); + return; + } + + panic_info = s390_cpu_get_crash_info(cs); + + visit_type_GuestPanicInformation(v, "crash-information", &panic_info, + errp); + qapi_free_GuestPanicInformation(panic_info); +} + +void s390_cpu_system_init(Object *obj) +{ + CPUState *cs = CPU(obj); + S390CPU *cpu = S390_CPU(obj); + + cs->start_powered_off = true; + object_property_add(obj, "crash-information", "GuestPanicInformation", + s390_cpu_get_crash_info_qom, NULL, NULL, NULL); + cpu->env.tod_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); + cpu->env.cpu_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); + s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); +} + +bool s390_cpu_system_realize(DeviceState *dev, Error **errp) +{ + S390CPU *cpu = S390_CPU(dev); + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + + if (cpu->env.core_id >= max_cpus) { + error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 + ", maximum core-id: %d", cpu->env.core_id, + max_cpus - 1); + return false; + } + + if (cpu_exists(cpu->env.core_id)) { + error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 + ", it already exists", cpu->env.core_id); + return false; + } + + /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ + CPU(cpu)->cpu_index = cpu->env.core_id; + return true; +} + +void s390_cpu_finalize(Object *obj) +{ + S390CPU *cpu = S390_CPU(obj); + + timer_free(cpu->env.tod_timer); + timer_free(cpu->env.cpu_timer); + + qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu); + g_free(cpu->irqstate); +} + +static const struct SysemuCPUOps s390_sysemu_ops = { + .has_work = s390_cpu_has_work, + .get_phys_page_debug = s390_cpu_get_phys_page_debug, + .get_crash_info = s390_cpu_get_crash_info, + .write_elf64_note = s390_cpu_write_elf64_note, + .legacy_vmsd = &vmstate_s390_cpu, +}; + +void s390_cpu_system_class_init(CPUClass *cc) +{ + S390CPUClass *scc = S390_CPU_CLASS(cc); + + scc->load_normal = s390_cpu_load_normal; + cc->sysemu_ops = &s390_sysemu_ops; +} + +static bool disabled_wait(CPUState *cpu) +{ + return cpu->halted && !(S390_CPU(cpu)->env.psw.mask & + (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK)); +} + +unsigned s390_count_running_cpus(void) +{ + CPUState *cpu; + int nr_running = 0; + + CPU_FOREACH(cpu) { + uint8_t state = S390_CPU(cpu)->env.cpu_state; + if (state == S390_CPU_STATE_OPERATING || + state == S390_CPU_STATE_LOAD) { + if (!disabled_wait(cpu)) { + nr_running++; + } + } + } + + return nr_running; +} + +void s390_cpu_halt(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + trace_cpu_halt(cs->cpu_index); + + if (!cs->halted) { + cs->halted = 1; + cs->exception_index = EXCP_HLT; + } +} + +void s390_cpu_unhalt(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + trace_cpu_unhalt(cs->cpu_index); + + if (cs->halted) { + cs->halted = 0; + cs->exception_index = -1; + } +} + +unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) + { + trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state); + + switch (cpu_state) { + case S390_CPU_STATE_STOPPED: + case S390_CPU_STATE_CHECK_STOP: + /* halt the cpu for common infrastructure */ + s390_cpu_halt(cpu); + break; + case S390_CPU_STATE_OPERATING: + case S390_CPU_STATE_LOAD: + /* + * Starting a CPU with a PSW WAIT bit set: + * KVM: handles this internally and triggers another WAIT exit. + * TCG: will actually try to continue to run. Don't unhalt, will + * be done when the CPU actually has work (an interrupt). + */ + if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) { + s390_cpu_unhalt(cpu); + } + break; + default: + error_report("Requested CPU state is not a valid S390 CPU state: %u", + cpu_state); + exit(1); + } + if (kvm_enabled() && cpu->env.cpu_state != cpu_state) { + kvm_s390_set_cpu_state(cpu, cpu_state); + } + cpu->env.cpu_state = cpu_state; + + return s390_count_running_cpus(); +} + +void s390_cmma_reset(void) +{ + if (kvm_enabled()) { + kvm_s390_cmma_reset(); + } +} + +int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id, + int vq, bool assign) +{ + if (kvm_enabled()) { + return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign); + } else { + return 0; + } +} + +void s390_crypto_reset(void) +{ + if (kvm_enabled()) { + kvm_s390_crypto_reset(); + } +} + +void s390_enable_css_support(S390CPU *cpu) +{ + if (kvm_enabled()) { + kvm_s390_enable_css_support(cpu); + } +} + +void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg) +{ + if (kvm_enabled()) { + kvm_s390_set_diag318(cs, arg.host_ulong); + } +} + +void s390_cpu_topology_set_changed(bool changed) +{ + int ret; + + if (kvm_enabled()) { + ret = kvm_s390_topology_set_mtcr(changed); + if (ret) { + error_report("Failed to set Modified Topology Change Report: %s", + strerror(-ret)); + } + } +} diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 0fbfcd35d83..f05ce317da1 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -25,18 +25,19 @@ #include "cpu.h" #include "s390x-internal.h" #include "kvm/kvm_s390x.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "qemu/module.h" #include "trace.h" #include "qapi/qapi-types-machine.h" -#include "sysemu/hw_accel.h" +#include "system/hw_accel.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" +#include "hw/resettable.h" #include "fpu/softfloat-helpers.h" #include "disas/capstone.h" -#include "sysemu/tcg.h" +#include "system/tcg.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/reset.h" +#include "system/reset.h" #endif #include "hw/s390x/cpu-topology.h" @@ -125,28 +126,6 @@ static vaddr s390_cpu_get_pc(CPUState *cs) return cpu->env.psw.addr; } -static bool s390_cpu_has_work(CPUState *cs) -{ - S390CPU *cpu = S390_CPU(cs); - - /* STOPPED cpus can never wake up */ - if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD && - s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { - return false; - } - - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { - return false; - } - - return s390_cpu_has_int(cpu); -} - -static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return s390x_env_mmu_index(cpu_env(cs), ifetch); -} - static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value) { S390CPU *s390_cpu = S390_CPU(cpu); @@ -162,23 +141,25 @@ static void s390_query_cpu_fast(CPUState *cpu, CpuInfoFast *value) #endif } -/* S390CPUClass::reset() */ -static void s390_cpu_reset(CPUState *s, cpu_reset_type type) +/* S390CPUClass Resettable reset_hold phase method */ +static void s390_cpu_reset_hold(Object *obj, ResetType type) { - S390CPU *cpu = S390_CPU(s); + S390CPU *cpu = S390_CPU(obj); S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); CPUS390XState *env = &cpu->env; - DeviceState *dev = DEVICE(s); - scc->parent_reset(dev); + if (scc->parent_phases.hold) { + scc->parent_phases.hold(obj, type); + } cpu->env.sigp_order = 0; s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); switch (type) { - case S390_CPU_RESET_CLEAR: + default: + /* RESET_TYPE_COLD: power on or "clear" reset */ memset(env, 0, offsetof(CPUS390XState, start_initial_reset_fields)); /* fall through */ - case S390_CPU_RESET_INITIAL: + case RESET_TYPE_S390_CPU_INITIAL: /* initial reset does not clear everything! */ memset(&env->start_initial_reset_fields, 0, offsetof(CPUS390XState, start_normal_reset_fields) - @@ -202,8 +183,14 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type) /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fpu_status); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fpu_status); + set_float_3nan_prop_rule(float_3nan_prop_s_abc, &env->fpu_status); + set_float_infzeronan_rule(float_infzeronan_dnan_always, + &env->fpu_status); + /* Default NaN value: sign bit clear, frac msb set */ + set_float_default_nan_pattern(0b01000000, &env->fpu_status); /* fall through */ - case S390_CPU_RESET_NORMAL: + case RESET_TYPE_S390_CPU_NORMAL: env->psw.mask &= ~PSW_MASK_RI; memset(&env->start_normal_reset_fields, 0, offsetof(CPUS390XState, end_reset_fields) - @@ -212,20 +199,18 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type) env->pfault_token = -1UL; env->bpbc = false; break; - default: - g_assert_not_reached(); } /* Reset state inside the kernel that we cannot access yet from QEMU. */ if (kvm_enabled()) { switch (type) { - case S390_CPU_RESET_CLEAR: + default: kvm_s390_reset_vcpu_clear(cpu); break; - case S390_CPU_RESET_INITIAL: + case RESET_TYPE_S390_CPU_INITIAL: kvm_s390_reset_vcpu_initial(cpu); break; - case S390_CPU_RESET_NORMAL: + case RESET_TYPE_S390_CPU_NORMAL: kvm_s390_reset_vcpu_normal(cpu); break; } @@ -236,6 +221,7 @@ static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { info->mach = bfd_mach_s390_64; info->cap_arch = CS_ARCH_SYSZ; + info->endian = BFD_ENDIAN_BIG; info->cap_insn_unit = 2; info->cap_insn_split = 6; } @@ -253,7 +239,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) } #if !defined(CONFIG_USER_ONLY) - if (!s390_cpu_realize_sysemu(dev, &err)) { + if (!s390_cpu_system_realize(dev, &err)) { goto out; } #endif @@ -293,7 +279,7 @@ static void s390_cpu_initfn(Object *obj) cs->exception_index = EXCP_HLT; #if !defined(CONFIG_USER_ONLY) - s390_cpu_init_sysemu(obj); + s390_cpu_system_init(obj); #endif } @@ -302,8 +288,8 @@ static const gchar *s390_gdb_arch_name(CPUState *cs) return "s390:64-bit"; } -static Property s390x_cpu_properties[] = { -#if !defined(CONFIG_USER_ONLY) +#ifndef CONFIG_USER_ONLY +static const Property s390x_cpu_properties[] = { DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0), DEFINE_PROP_INT32("socket-id", S390CPU, env.socket_id, -1), DEFINE_PROP_INT32("book-id", S390CPU, env.book_id, -1), @@ -311,22 +297,21 @@ static Property s390x_cpu_properties[] = { DEFINE_PROP_BOOL("dedicated", S390CPU, env.dedicated, false), DEFINE_PROP_CPUS390ENTITLEMENT("entitlement", S390CPU, env.entitlement, S390_CPU_ENTITLEMENT_AUTO), -#endif - DEFINE_PROP_END_OF_LIST() }; +#endif -static void s390_cpu_reset_full(DeviceState *dev) +#ifdef CONFIG_TCG +#include "accel/tcg/cpu-ops.h" +#include "tcg/tcg_s390x.h" + +static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch) { - CPUState *s = CPU(dev); - return s390_cpu_reset(s, S390_CPU_RESET_CLEAR); + return s390x_env_mmu_index(cpu_env(cs), ifetch); } -#ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" - -void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) +static TCGTBCPUState s390x_get_tb_cpu_state(CPUState *cs) { + CPUS390XState *env = cpu_env(cs); uint32_t flags; if (env->psw.addr & 1) { @@ -338,9 +323,6 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0); } - *pc = env->psw.addr; - *cs_base = env->ex_value; - flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW; if (env->psw.mask & PSW_MASK_PER) { flags |= env->cregs[9] & (FLAG_MASK_PER_BRANCH | @@ -357,20 +339,46 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, if (env->cregs[0] & CR0_VECTOR) { flags |= FLAG_MASK_VECTOR; } - *pflags = flags; + + return (TCGTBCPUState){ + .pc = env->psw.addr, + .flags = flags, + .cs_base = env->ex_value, + }; } +#ifndef CONFIG_USER_ONLY +static vaddr s390_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return wrap_address(cpu_env(cs), result); +} +#endif + static const TCGCPUOps s390_tcg_ops = { + .mttcg_supported = true, + .precise_smc = true, + /* + * The z/Architecture has a strong memory model with some + * store-after-load re-ordering. + */ + .guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD, + .initialize = s390x_translate_init, + .translate_code = s390x_translate_code, + .get_tb_cpu_state = s390x_get_tb_cpu_state, .restore_state_to_opc = s390x_restore_state_to_opc, + .mmu_index = s390x_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = s390_cpu_record_sigsegv, .record_sigbus = s390_cpu_record_sigbus, #else .tlb_fill = s390_cpu_tlb_fill, + .pointer_wrap = s390_pointer_wrap, .cpu_exec_interrupt = s390_cpu_exec_interrupt, .cpu_exec_halt = s390_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = s390_cpu_do_interrupt, .debug_excp_handler = s390x_cpu_debug_excp_handler, .do_unaligned_access = s390x_cpu_do_unaligned_access, @@ -378,23 +386,22 @@ static const TCGCPUOps s390_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void s390_cpu_class_init(ObjectClass *oc, void *data) +static void s390_cpu_class_init(ObjectClass *oc, const void *data) { S390CPUClass *scc = S390_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(scc); DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); device_class_set_parent_realize(dc, s390_cpu_realizefn, &scc->parent_realize); - device_class_set_props(dc, s390x_cpu_properties); dc->user_creatable = true; - device_class_set_parent_reset(dc, s390_cpu_reset_full, &scc->parent_reset); + resettable_class_set_parent_phases(rc, NULL, s390_cpu_reset_hold, NULL, + &scc->parent_phases); - scc->reset = s390_cpu_reset; - cc->class_by_name = s390_cpu_class_by_name, - cc->has_work = s390_cpu_has_work; - cc->mmu_index = s390x_cpu_mmu_index; + cc->class_by_name = s390_cpu_class_by_name; + cc->list_cpus = s390_cpu_list; cc->dump_state = s390_cpu_dump_state; cc->query_cpu_fast = s390_query_cpu_fast; cc->set_pc = s390_cpu_set_pc; @@ -402,7 +409,8 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_read_register = s390_cpu_gdb_read_register; cc->gdb_write_register = s390_cpu_gdb_write_register; #ifndef CONFIG_USER_ONLY - s390_cpu_class_init_sysemu(cc); + device_class_set_props(dc, s390x_cpu_properties); + s390_cpu_system_class_init(cc); #endif cc->disas_set_info = s390_cpu_disas_set_info; cc->gdb_core_xml_file = "s390x-core64.xml"; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index d6b75ad0e08..aa931cb6748 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -27,16 +27,14 @@ #include "cpu-qom.h" #include "cpu_models.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" #include "qapi/qapi-types-machine-common.h" #define ELF_MACHINE_UNAME "S390X" -#define TARGET_HAS_PRECISE_SMC - -#define TARGET_INSN_START_EXTRA_WORDS 2 - #define MMU_USER_IDX 0 #define S390_MAX_CPUS 248 @@ -133,7 +131,7 @@ typedef struct CPUArchState { int32_t book_id; int32_t drawer_id; bool dedicated; - CpuS390Entitlement entitlement; /* Used only for vertical polarization */ + S390CpuEntitlement entitlement; /* Used only for vertical polarization */ uint64_t cpuid; #endif @@ -177,19 +175,11 @@ struct ArchCPU { uint32_t irqstate_saved_size; }; -typedef enum cpu_reset_type { - S390_CPU_RESET_NORMAL, - S390_CPU_RESET_INITIAL, - S390_CPU_RESET_CLEAR, -} cpu_reset_type; - /** * S390CPUClass: * @parent_realize: The parent class' realize handler. - * @parent_reset: The parent class' reset handler. + * @parent_phases: The parent class' reset phase handlers. * @load_normal: Performs a load normal. - * @cpu_reset: Performs a CPU reset. - * @initial_cpu_reset: Performs an initial CPU reset. * * An S/390 CPU model. */ @@ -203,9 +193,8 @@ struct S390CPUClass { const char *desc; DeviceRealize parent_realize; - DeviceReset parent_reset; + ResettablePhases parent_phases; void (*load_normal)(CPUState *cpu); - void (*reset)(CPUState *cpu, cpu_reset_type type); }; #ifndef CONFIG_USER_ONLY @@ -422,15 +411,6 @@ static inline int s390x_env_mmu_index(CPUS390XState *env, bool ifetch) #endif } -#ifdef CONFIG_TCG - -#include "tcg/tcg_s390x.h" - -void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags); - -#endif /* CONFIG_TCG */ - /* PER bits from control register 9 */ #define PER_CR9_EVENT_BRANCH 0x80000000 #define PER_CR9_EVENT_IFETCH 0x40000000 @@ -872,16 +852,12 @@ static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg) static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg) { - S390CPUClass *scc = S390_CPU_GET_CLASS(cs); - - scc->reset(cs, S390_CPU_RESET_NORMAL); + resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_NORMAL); } static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg) { - S390CPUClass *scc = S390_CPU_GET_CLASS(cs); - - scc->reset(cs, S390_CPU_RESET_INITIAL); + resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_INITIAL); } static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg) @@ -894,8 +870,6 @@ static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg) /* cpu.c */ void s390_crypto_reset(void); -int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit); -void s390_set_max_pagesize(uint64_t pagesize, Error **errp); void s390_cmma_reset(void); void s390_enable_css_support(S390CPU *cpu); void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg); @@ -915,13 +889,6 @@ static inline uint8_t s390_cpu_get_state(S390CPU *cpu) } -/* cpu_models.c */ -void s390_cpu_list(void); -#define cpu_list s390_cpu_list -void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, - const S390FeatInit feat_init); - - /* helper.c */ #define CPU_RESOLVING_TYPE TYPE_S390_CPU @@ -961,6 +928,4 @@ uint64_t s390_cpu_get_psw_mask(CPUS390XState *env); /* outside of target/s390x/ */ S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); -#include "exec/cpu-all.h" - #endif diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c index cb4e2b89208..4b5be6798ef 100644 --- a/target/s390x/cpu_features.c +++ b/target/s390x/cpu_features.c @@ -93,6 +93,7 @@ void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, case S390_FEAT_TYPE_KDSA: case S390_FEAT_TYPE_SORTL: case S390_FEAT_TYPE_DFLTCC: + case S390_FEAT_TYPE_PFCR: set_be_bit(0, data); /* query is always available */ break; default: @@ -239,8 +240,10 @@ void s390_get_deprecated_features(S390FeatBitmap features) /* indexed by feature group number for easy lookup */ static S390FeatGroupDef s390_feature_groups[] = { FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"), + FEAT_GROUP_INIT("plo_ext", PLO_EXT, "PLO-extension facility"), FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"), FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"), + FEAT_GROUP_INIT("gen17ptff", GEN17_PTFF, "PTFF enhancements introduced with gen17"), FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"), FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"), FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"), @@ -252,9 +255,17 @@ static S390FeatGroupDef s390_feature_groups[] = { FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"), FEAT_GROUP_INIT("msa9", MSA_EXT_9, "Message-security-assist-extension 9 facility"), FEAT_GROUP_INIT("msa9_pckmo", MSA_EXT_9_PCKMO, "Message-security-assist-extension 9 PCKMO subfunctions"), + FEAT_GROUP_INIT("msa10", MSA_EXT_10, "Message-security-assist-extension 10 facility"), + FEAT_GROUP_INIT("msa10_pckmo", MSA_EXT_10_PCKMO, "Message-security-assist-extension 10 PCKMO subfunctions"), + FEAT_GROUP_INIT("msa11", MSA_EXT_11, "Message-security-assist-extension 11 facility"), + FEAT_GROUP_INIT("msa11_pckmo", MSA_EXT_11_PCKMO, "Message-security-assist-extension 11 PCKMO subfunctions"), + FEAT_GROUP_INIT("msa12", MSA_EXT_12, "Message-security-assist-extension 12 facility"), + FEAT_GROUP_INIT("msa13", MSA_EXT_13, "Message-security-assist-extension 13 facility"), + FEAT_GROUP_INIT("msa13_pckmo", MSA_EXT_13_PCKMO, "Message-security-assist-extension 13 PCKMO subfunctions"), FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"), FEAT_GROUP_INIT("esort", ENH_SORT, "Enhanced-sort facility"), FEAT_GROUP_INIT("deflate", DEFLATE_CONVERSION, "Deflate-conversion facility"), + FEAT_GROUP_INIT("ccf", CONCURRENT_FUNCTIONS, "Concurrent-functions facility"), }; const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group) diff --git a/target/s390x/cpu_features.h b/target/s390x/cpu_features.h index 661a8cd6dbd..5635839d032 100644 --- a/target/s390x/cpu_features.h +++ b/target/s390x/cpu_features.h @@ -44,6 +44,7 @@ typedef enum { S390_FEAT_TYPE_SORTL, S390_FEAT_TYPE_DFLTCC, S390_FEAT_TYPE_UV_FEAT_GUEST, + S390_FEAT_TYPE_PFCR, } S390FeatType; /* Definition of a CPU feature */ diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc index c53ac133528..c017bffcdc4 100644 --- a/target/s390x/cpu_features_def.h.inc +++ b/target/s390x/cpu_features_def.h.inc @@ -90,6 +90,10 @@ DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2") DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility") DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed") DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking") +DEF_FEAT(MISC_INSTRUCTION_EXT4, "minste4", STFL, 84, "Miscellaneous-Instruction-Extensions Facility 4") +DEF_FEAT(SIF, "sif", STFL, 85, "Sequential-instruction-fetching facility") +DEF_FEAT(MSA_EXT_12, "msa12-base", STFL, 86, "Message-security-assist-extension-12 facility (excluding subfunctions)") +DEF_FEAT(PLO_EXT, "plo-ext", STFL, 87, "PLO-extension facility") DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility") DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility") DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2") @@ -110,11 +114,15 @@ DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility") DEF_FEAT(UNPACK, "unpack", STFL, 161, "Unpack facility") DEF_FEAT(NNPA, "nnpa", STFL, 165, "NNPA facility") +DEF_FEAT(INEFF_NC_TX, "ineff_nc_tx", STFL, 170, "Ineffective-nonconstrained-transaction facility") DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH2, "vxpdeh2", STFL, 192, "Vector-Packed-Decimal-Enhancement facility 2") DEF_FEAT(BEAR_ENH, "beareh", STFL, 193, "BEAR-enhancement facility") DEF_FEAT(RDP, "rdp", STFL, 194, "Reset-DAT-protection facility") DEF_FEAT(PAI, "pai", STFL, 196, "Processor-Activity-Instrumentation facility") DEF_FEAT(PAIE, "paie", STFL, 197, "Processor-Activity-Instrumentation extension-1") +DEF_FEAT(VECTOR_ENH3, "vxeh3", STFL, 198, "Vector Enhancements facility 3") +DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH3, "vxpdeh3", STFL, 199, "Vector-Packed-Decimal-Enhancement facility 3") +DEF_FEAT(CCF_BASE, "ccf-base", STFL, 201, "Concurrent-Functions facility") /* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility") @@ -151,28 +159,66 @@ DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed") /* Features exposed via the PLO instruction. */ DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)") DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)") -DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)") +DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (64 bit in general registers)") DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)") DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)") DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)") -DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (64 bit in general registers)") DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)") DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)") DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)") -DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (64 bit in general registers)") DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)") DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)") DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)") -DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)") +DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (64 bit in general registers)") DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)") DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)") DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)") -DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)") +DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (64 bit in general registers)") DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)") DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)") DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)") -DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)") +DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (64 bit in general registers)") DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)") +DEF_FEAT(PLO_CLO, "plo-clo", PLO, 24, "PLO Compare and load (256 bit in parameter list)") +DEF_FEAT(PLO_CSO, "plo-cso", PLO, 25, "PLO Compare and swap (256 bit in parameter list)") +DEF_FEAT(PLO_DCSO, "plo-dcso", PLO, 26, "PLO Double compare and swap (256 bit in parameter list)") +DEF_FEAT(PLO_CSSTO, "plo-cssto", PLO, 27, "PLO Compare and swap and store (256 bit in parameter list)") +DEF_FEAT(PLO_CSDSTO, "plo-csdsto", PLO, 28, "PLO Compare and swap and double store (256 bit in parameter list)") +DEF_FEAT(PLO_CSTSTO, "plo-cststo", PLO, 29, "PLO Compare and swap and triple store (256 bit in parameter list)") +DEF_FEAT(PLO_TCS, "plo-tcs", PLO, 30, "Triple compare and swap (32 bit in parameter list)") +DEF_FEAT(PLO_TCSG, "plo-tcsg", PLO, 31, "Triple compare and swap (64 bit in parameter list)") +DEF_FEAT(PLO_TCSX, "plo-tcsx", PLO, 32, "Triple compare and swap (128 bit in parameter list)") +DEF_FEAT(PLO_TCSO, "plo-tcso", PLO, 33, "Triple compare and swap (256 bit in parameter list)") +DEF_FEAT(PLO_QCS, "plo-qcs", PLO, 34, "Quadruple compare and swap (32 bit in parameter list)") +DEF_FEAT(PLO_QCSG, "plo-qcsg", PLO, 35, "Quadruple compare and swap (64 bit in parameter list)") +DEF_FEAT(PLO_QCSX, "plo-qcsx", PLO, 36, "Quadruple compare and swap (128 bit in parameter list)") +DEF_FEAT(PLO_QCSO, "plo-qcso", PLO, 37, "Quadruple compare and swap (256 bit in parameter list)") +DEF_FEAT(PLO_LO, "plo-lo", PLO, 38, "Load (256 bit in parameter list)") +DEF_FEAT(PLO_DLX, "plo-dlx", PLO, 39, "Double load (128 bit in parameter list)") +DEF_FEAT(PLO_DLO, "plo-dlo", PLO, 40, "Double load (256 bit in parameter list)") +DEF_FEAT(PLO_TL, "plo-tl", PLO, 41, "Triple load (32 bit in parameter list)") +DEF_FEAT(PLO_TLG, "plo-tlg", PLO, 42, "Triple load (64 bit in parameter list)") +DEF_FEAT(PLO_TLX, "plo-tlx", PLO, 43, "Triple load (128 bit in parameter list)") +DEF_FEAT(PLO_TLO, "plo-tlo", PLO, 44, "Triple load (256 bit in parameter list)") +DEF_FEAT(PLO_QL, "plo-ql", PLO, 45, "Quadruple load (32 bit in parameter list)") +DEF_FEAT(PLO_QLG, "plo-qlg", PLO, 46, "Quadruple load (64 bit in parameter list)") +DEF_FEAT(PLO_QLX, "plo-qlx", PLO, 47, "Quadruple load (128 bit in parameter list)") +DEF_FEAT(PLO_QLO, "plo-qlo", PLO, 48, "Quadruple load (256 bit in parameter list)") +DEF_FEAT(PLO_STO, "plo-sto", PLO, 49, "Store (256 bit in parameter list)") +DEF_FEAT(PLO_DST, "plo-dst", PLO, 50, "Double store (32 bit in parameter list)") +DEF_FEAT(PLO_DSTG, "plo-dstg", PLO, 51, "Double store (64 bit in parameter list)") +DEF_FEAT(PLO_DSTX, "plo-dstx", PLO, 52, "Double store (128 bit in parameter list)") +DEF_FEAT(PLO_DSTO, "plo-dsto", PLO, 53, "Double store (256 bit in parameter list)") +DEF_FEAT(PLO_TST, "plo-tst", PLO, 54, "Triple store (32 bit in parameter list)") +DEF_FEAT(PLO_TSTG, "plo-tstg", PLO, 55, "Triple store (64 bit in parameter list)") +DEF_FEAT(PLO_TSTX, "plo-tstx", PLO, 56, "Triple store (128 bit in parameter list)") +DEF_FEAT(PLO_TSTO, "plo-tsto", PLO, 57, "Triple store (256 bit in parameter list)") +DEF_FEAT(PLO_QST, "plo-qst", PLO, 58, "Quadruple store (32 bit in parameter list)") +DEF_FEAT(PLO_QSTG, "plo-qstg", PLO, 59, "Quadruple store (64 bit in parameter list)") +DEF_FEAT(PLO_QSTX, "plo-qstx", PLO, 60, "Quadruple store (128 bit in parameter list)") +DEF_FEAT(PLO_QSTO, "plo-qsto", PLO, 61, "Quadruple store (256 bit in parameter list)") /* Features exposed via the PTFF instruction. */ DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset") @@ -180,6 +226,7 @@ DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information") DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock") DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information") DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User") +DEF_FEAT(PTFF_QTSE, "ptff-qtse", PTFF, 6, "PTFF Query Time-Stamp Event") DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended") DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended") DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset") @@ -200,6 +247,15 @@ DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256") DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128") DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192") DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256") +DEF_FEAT(KMAC_HMAC_SHA_224, "kmac-hmac-sha-224", KMAC, 112, "KMAC HMAC-SHA-224") +DEF_FEAT(KMAC_HMAC_SHA_256, "kmac-hmac-sha-246", KMAC, 113, "KMAC HMAC-SHA-256") +DEF_FEAT(KMAC_HMAC_SHA_384, "kmac-hmac-sha-384", KMAC, 114, "KMAC HMAC-SHA-384") +DEF_FEAT(KMAC_HMAC_SHA_512, "kmac-hmac-sha-512", KMAC, 115, "KMAC HMAC-SHA-512") +DEF_FEAT(KMAC_HMAC_ESHA_224, "kmac-hmac-esha-224", KMAC, 120, "KMAC HMAC-Encrypted-SHA-224") +DEF_FEAT(KMAC_HMAC_ESHA_256, "kmac-hmac-esha-246", KMAC, 121, "KMAC HMAC-Encrypted-SHA-256") +DEF_FEAT(KMAC_HMAC_ESHA_384, "kmac-hmac-esha-384", KMAC, 122, "KMAC HMAC-Encrypted-SHA-384") +DEF_FEAT(KMAC_HMAC_ESHA_512, "kmac-hmac-esha-512", KMAC, 123, "KMAC HMAC-Encrypted-SHA-512") +DEF_FEAT(KMAC_QAI, "kmac-qai", KMAC, 127, "KMAC Query-Authentication-Information") /* Features exposed via the KMC instruction. */ DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA") @@ -233,6 +289,11 @@ DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128") DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256") DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128") DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256") +DEF_FEAT(KM_FULL_XTS_AES_128, "km-full-xts-aes-128", KM, 82, "KM Full-XTS-AES-128") +DEF_FEAT(KM_FULL_XTS_AES_256, "km-full-xts-aes-256", KM, 84, "KM Full-XTS-AES-256") +DEF_FEAT(KM_FULL_XTS_EAES_128, "km-full-xts-eaes-128", KM, 90, "KM Full-XTS-Encrypted-AES-128") +DEF_FEAT(KM_FULL_XTS_EAES_256, "km-full-xts-eaes-256", KM, 92, "KM Full-XTS-Encrypted-AES-256") +DEF_FEAT(KM_QAI, "km-qai", KM, 127, "KM Query-Authentication-Information") /* Features exposed via the KIMD instruction. */ DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1") @@ -245,6 +306,7 @@ DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512") DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128") DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256") DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH") +DEF_FEAT(KIMD_QAI, "kimd-qai", KIMD, 127, "KIMD Query-Authentication-Information") /* Features exposed via the KLMD instruction. */ DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1") @@ -256,6 +318,7 @@ DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384") DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512") DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128") DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256") +DEF_FEAT(KLMD_QAI, "klmd-qai", KLMD, 127, "KLMD Query-Authentication-Information") /* Features exposed via the PCKMO instruction. */ DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key") @@ -264,11 +327,16 @@ DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192 DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key") DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key") DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key") +DEF_FEAT(PCKMO_AES_XTS_128_DK, "pckmo-aes-xts-128-dk", PCKMO, 21, "PCKMO Encrypt-AES-XTS-128-Double-Key") +DEF_FEAT(PCKMO_AES_XTS_256_DK, "pckmo-aes-xts-256-dk", PCKMO, 22, "PCKMO Encrypt-AES-XTS-256-Double-Key") DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key") DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key") DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key") DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key") DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key") +DEF_FEAT(PCKMO_HMAC_512, "pckmo-hmac-512", PCKMO, 118, "PCKMO Encrypt-HMAC-512-Key") +DEF_FEAT(PCKMO_HMAC_1024, "pckmo-hmac-1024", PCKMO, 122, "PCKMO Encrypt-HMAC-1024-Key") +DEF_FEAT(PCKMO_QAI, "pckmo-qai", PCKMO, 127, "PCKMO Query-Authentication-Information") /* Features exposed via the KMCTR instruction. */ DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA") @@ -283,6 +351,7 @@ DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256") DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128") DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192") DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256") +DEF_FEAT(KMCTR_QAI, "kmctr-qai", KMCTR, 127, "KMCTR Query-Authentication-Information") /* Features exposed via the KMF instruction. */ DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA") @@ -297,6 +366,7 @@ DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256") DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128") DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192") DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256") +DEF_FEAT(KMF_QAI, "kmf-qai", KMF, 127, "KMF Query-Authentication-Information") /* Features exposed via the KMO instruction. */ DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA") @@ -311,6 +381,7 @@ DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256") DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128") DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192") DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256") +DEF_FEAT(KMO_QAI, "kmo-qai", KMO, 127, "KMO Query-Authentication-Information") /* Features exposed via the PCC instruction. */ DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA") @@ -336,11 +407,13 @@ DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scala DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448") DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519") DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448") +DEF_FEAT(PCC_QAI, "pcc-qai", PCC, 127, "PCC Query-Authentication-Information") /* Features exposed via the PPNO/PRNO instruction. */ DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG") DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio") DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG") +DEF_FEAT(PRNO_QAI, "prno-qai", PPNO, 127, "PRNO Query-Authentication-Information") /* Features exposed via the KMA instruction. */ DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128") @@ -349,6 +422,7 @@ DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256") DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128") DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192") DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256") +DEF_FEAT(KMA_QAI, "kma-qai", KMA, 127, "KMA Query-Authentication-Information") /* Features exposed via the KDSA instruction. */ DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256") @@ -366,6 +440,7 @@ DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdD DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448") DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519") DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448") +DEF_FEAT(KDSA_QAI, "kdsa-qai", KDSA, 127, "KDSA Query-Authentication-Information") /* Features exposed via the SORTL instruction. */ DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR") @@ -383,3 +458,10 @@ DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block" /* Features exposed via the UV-CALL instruction */ DEF_FEAT(UV_FEAT_AP, "appv", UV_FEAT_GUEST, 4, "AP instructions installed for secure guests") DEF_FEAT(UV_FEAT_AP_INTR, "appvi", UV_FEAT_GUEST, 5, "AP instructions interruption support for secure guests") + +/* Features exposed via the PFCR instruction (concurrent-functions facility). */ +DEF_FEAT(PFCR_QAF, "pfcr-qaf", PFCR, 0, "PFCR Query-Available-Functions") +DEF_FEAT(PFCR_CSDST, "pfcr-csdst", PFCR, 1, "PFCR Compare-and-Swap-and-Double-Store (32)") +DEF_FEAT(PFCR_CSDSTG, "pfcr-csdstg", PFCR, 2, "PFCR Compare-and-Swap-and-Double-Store (64)") +DEF_FEAT(PFCR_CSTST, "pfcr-cstst", PFCR, 3, "PFCR Compare-and-Swap-and-Triple-Store (32)") +DEF_FEAT(PFCR_CSTSTG, "pfcr-cststg", PFCR, 4, "PFCR Compare-and-Swap-and-Triple-Store (64)") diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index a27f4b6f79b..954a7a99a9e 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -14,8 +14,8 @@ #include "cpu.h" #include "s390x-internal.h" #include "kvm/kvm_s390x.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qapi/visitor.h" @@ -23,7 +23,7 @@ #include "qemu/hw-version.h" #include "qemu/qemu-print.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/sysemu.h" +#include "system/system.h" #include "target/s390x/kvm/pv.h" #include CONFIG_DEVICES #endif @@ -94,6 +94,8 @@ static S390CPUDef s390_cpu_defs[] = { CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"), CPUDEF_INIT(0x3931, 16, 1, 47, 0x08000000U, "gen16a", "IBM 3931 GA1"), CPUDEF_INIT(0x3932, 16, 1, 47, 0x08000000U, "gen16b", "IBM 3932 GA1"), + CPUDEF_INIT(0x9175, 17, 1, 47, 0x08000000U, "gen17a", "IBM 9175 GA1"), + CPUDEF_INIT(0x9176, 17, 1, 47, 0x08000000U, "gen17b", "IBM 9176 GA1"), }; #define QEMU_MAX_CPU_TYPE 0x8561 @@ -371,7 +373,7 @@ static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data) g_free(name); } -static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b) +static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d) { const S390CPUClass *cc_a = S390_CPU_CLASS((ObjectClass *)a); const S390CPUClass *cc_b = S390_CPU_CLASS((ObjectClass *)b); @@ -413,7 +415,7 @@ void s390_cpu_list(void) qemu_printf("Available CPUs:\n"); list = object_class_get_list(TYPE_S390_CPU, false); - list = g_slist_sort(list, s390_cpu_list_compare); + list = g_slist_sort_with_data(list, s390_cpu_list_compare, NULL); g_slist_foreach(list, s390_print_cpu_model_list_entry, NULL); g_slist_free(list); @@ -457,7 +459,10 @@ static void check_consistency(const S390CPUModel *model) { S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR }, { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_VECTOR_PACKED_DECIMAL }, { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH }, + { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2 }, { S390_FEAT_VECTOR_ENH, S390_FEAT_VECTOR }, + { S390_FEAT_VECTOR_ENH2, S390_FEAT_VECTOR_ENH }, + { S390_FEAT_VECTOR_ENH3, S390_FEAT_VECTOR_ENH2 }, { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2 }, { S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP }, { S390_FEAT_CMM_NT, S390_FEAT_CMM }, @@ -477,6 +482,18 @@ static void check_consistency(const S390CPUModel *model) { S390_FEAT_KLMD_SHA3_512, S390_FEAT_MSA }, { S390_FEAT_KLMD_SHAKE_128, S390_FEAT_MSA }, { S390_FEAT_KLMD_SHAKE_256, S390_FEAT_MSA }, + { S390_FEAT_KMAC_HMAC_SHA_224, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_SHA_256, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_SHA_384, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_SHA_512, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_ESHA_224, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_ESHA_256, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_ESHA_384, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KMAC_HMAC_ESHA_512, S390_FEAT_MSA_EXT_3 }, + { S390_FEAT_KM_FULL_XTS_AES_128, S390_FEAT_MSA_EXT_4 }, + { S390_FEAT_KM_FULL_XTS_AES_256, S390_FEAT_MSA_EXT_4 }, + { S390_FEAT_KM_FULL_XTS_EAES_128, S390_FEAT_MSA_EXT_4 }, + { S390_FEAT_KM_FULL_XTS_EAES_256, S390_FEAT_MSA_EXT_4 }, { S390_FEAT_PRNO_TRNG_QRTCR, S390_FEAT_MSA_EXT_5 }, { S390_FEAT_PRNO_TRNG, S390_FEAT_MSA_EXT_5 }, { S390_FEAT_SIE_KSS, S390_FEAT_SIE_F2 }, @@ -492,6 +509,50 @@ static void check_consistency(const S390CPUModel *model) { S390_FEAT_RDP, S390_FEAT_LOCAL_TLB_CLEARING }, { S390_FEAT_UV_FEAT_AP, S390_FEAT_AP }, { S390_FEAT_UV_FEAT_AP_INTR, S390_FEAT_UV_FEAT_AP }, + { S390_FEAT_PFCR_QAF, S390_FEAT_CCF_BASE }, + { S390_FEAT_PFCR_CSDST, S390_FEAT_CCF_BASE }, + { S390_FEAT_PFCR_CSDSTG, S390_FEAT_CCF_BASE }, + { S390_FEAT_PFCR_CSTST, S390_FEAT_CCF_BASE }, + { S390_FEAT_PFCR_CSTSTG, S390_FEAT_CCF_BASE }, + { S390_FEAT_INEFF_NC_TX, S390_FEAT_TRANSACTIONAL_EXE }, + { S390_FEAT_PLO_CLO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_CSO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DCSO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_CSSTO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_CSDSTO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_CSTSTO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TCS, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TCSG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TCSX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TCSO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QCS, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QCSG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QCSX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QCSO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_LO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DLX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DLO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TL, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TLG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TLX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TLO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QL, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QLG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QLX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QLO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_STO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DST, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DSTG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DSTX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_DSTO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TST, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TSTG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TSTX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_TSTO, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QST, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QSTG, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QSTX, S390_FEAT_PLO_EXT }, + { S390_FEAT_PLO_QSTO, S390_FEAT_PLO_EXT }, }; int i; @@ -517,7 +578,6 @@ static void check_compat_model_failed(Error **errp, error_setg(errp, "%s. Maximum supported model in the current configuration: \'%s\'", msg, max_model->def->name); error_append_hint(errp, "Consider a different accelerator, try \"-accel help\"\n"); - return; } static bool check_compatibility(const S390CPUModel *max_model, @@ -859,7 +919,7 @@ void s390_cpu_model_class_register_props(ObjectClass *oc) } #ifdef CONFIG_KVM -static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data) +static void s390_host_cpu_model_class_init(ObjectClass *oc, const void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); @@ -868,7 +928,7 @@ static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data) } #endif -static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data) +static void s390_base_cpu_model_class_init(ObjectClass *oc, const void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); @@ -879,7 +939,7 @@ static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data) xcc->desc = xcc->cpu_def->desc; } -static void s390_cpu_model_class_init(ObjectClass *oc, void *data) +static void s390_cpu_model_class_init(ObjectClass *oc, const void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); @@ -889,7 +949,7 @@ static void s390_cpu_model_class_init(ObjectClass *oc, void *data) xcc->desc = xcc->cpu_def->desc; } -static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data) +static void s390_qemu_cpu_model_class_init(ObjectClass *oc, const void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); @@ -898,7 +958,7 @@ static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data) qemu_hw_version()); } -static void s390_max_cpu_model_class_init(ObjectClass *oc, void *data) +static void s390_max_cpu_model_class_init(ObjectClass *oc, const void *data) { S390CPUClass *xcc = S390_CPU_CLASS(oc); @@ -1012,7 +1072,7 @@ static void register_types(void) .instance_init = s390_cpu_model_initfn, .instance_finalize = s390_cpu_model_finalize, .class_init = s390_base_cpu_model_class_init, - .class_data = (void *) &s390_cpu_defs[i], + .class_data = &s390_cpu_defs[i], }; char *name = s390_cpu_type_name(s390_cpu_defs[i].name); TypeInfo ti = { @@ -1021,7 +1081,7 @@ static void register_types(void) .instance_init = s390_cpu_model_initfn, .instance_finalize = s390_cpu_model_finalize, .class_init = s390_cpu_model_class_init, - .class_data = (void *) &s390_cpu_defs[i], + .class_data = &s390_cpu_defs[i], }; type_register_static(&ti_base); diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h index 71d4bc2dd4a..f701bc0b532 100644 --- a/target/s390x/cpu_models.h +++ b/target/s390x/cpu_models.h @@ -113,6 +113,9 @@ static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) } S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, S390FeatBitmap features); +void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, + const S390FeatInit feat_init); +void s390_cpu_list(void); bool kvm_s390_cpu_models_supported(void); bool kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp); diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c deleted file mode 100644 index f6df691b665..00000000000 --- a/target/s390x/cpu_models_sysemu.c +++ /dev/null @@ -1,433 +0,0 @@ -/* - * CPU models for s390x - System Emulation-only - * - * Copyright 2016 IBM Corp. - * - * Author(s): David Hildenbrand - * - * This work is licensed under the terms of the GNU GPL, version 2 or (at - * your option) any later version. See the COPYING file in the top-level - * directory. - */ - -#include "qemu/osdep.h" -#include "cpu.h" -#include "s390x-internal.h" -#include "kvm/kvm_s390x.h" -#include "sysemu/kvm.h" -#include "qapi/error.h" -#include "qapi/visitor.h" -#include "qapi/qobject-input-visitor.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qapi-commands-machine-target.h" - -static void list_add_feat(const char *name, void *opaque); - -static void check_unavailable_features(const S390CPUModel *max_model, - const S390CPUModel *model, - strList **unavailable) -{ - S390FeatBitmap missing; - - /* check general model compatibility */ - if (max_model->def->gen < model->def->gen || - (max_model->def->gen == model->def->gen && - max_model->def->ec_ga < model->def->ec_ga)) { - list_add_feat("type", unavailable); - } - - /* detect missing features if any to properly report them */ - bitmap_andnot(missing, model->features, max_model->features, - S390_FEAT_MAX); - if (!bitmap_empty(missing, S390_FEAT_MAX)) { - s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat); - } -} - -struct CpuDefinitionInfoListData { - CpuDefinitionInfoList *list; - S390CPUModel *model; -}; - -static void create_cpu_model_list(ObjectClass *klass, void *opaque) -{ - struct CpuDefinitionInfoListData *cpu_list_data = opaque; - CpuDefinitionInfoList **cpu_list = &cpu_list_data->list; - CpuDefinitionInfo *info; - char *name = g_strdup(object_class_get_name(klass)); - S390CPUClass *scc = S390_CPU_CLASS(klass); - - /* strip off the -s390x-cpu */ - g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; - info = g_new0(CpuDefinitionInfo, 1); - info->name = name; - info->has_migration_safe = true; - info->migration_safe = scc->is_migration_safe; - info->q_static = scc->is_static; - info->q_typename = g_strdup(object_class_get_name(klass)); - /* check for unavailable features */ - if (cpu_list_data->model) { - Object *obj; - S390CPU *sc; - obj = object_new_with_class(klass); - sc = S390_CPU(obj); - if (sc->model) { - info->has_unavailable_features = true; - check_unavailable_features(cpu_list_data->model, sc->model, - &info->unavailable_features); - } - object_unref(obj); - } - - QAPI_LIST_PREPEND(*cpu_list, info); -} - -CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) -{ - struct CpuDefinitionInfoListData list_data = { - .list = NULL, - }; - - list_data.model = get_max_cpu_model(NULL); - - object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false, - &list_data); - - return list_data.list; -} - -static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, - const char *info_arg_name, Error **errp) -{ - Error *err = NULL; - const QDict *qdict; - const QDictEntry *e; - Visitor *visitor; - ObjectClass *oc; - S390CPU *cpu; - Object *obj; - - oc = cpu_class_by_name(TYPE_S390_CPU, info->name); - if (!oc) { - error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name); - return; - } - if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) { - error_setg(errp, "The CPU definition '%s' requires KVM", info->name); - return; - } - obj = object_new_with_class(oc); - cpu = S390_CPU(obj); - - if (!cpu->model) { - error_setg(errp, "Details about the host CPU model are not available, " - "it cannot be used."); - object_unref(obj); - return; - } - - if (info->props) { - g_autofree const char *props_name = g_strdup_printf("%s.props", - info_arg_name); - - visitor = qobject_input_visitor_new(info->props); - if (!visit_start_struct(visitor, props_name, NULL, 0, errp)) { - visit_free(visitor); - object_unref(obj); - return; - } - qdict = qobject_to(QDict, info->props); - for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { - if (!object_property_set(obj, e->key, visitor, &err)) { - break; - } - } - if (!err) { - visit_check_struct(visitor, &err); - } - visit_end_struct(visitor, NULL); - visit_free(visitor); - if (err) { - error_propagate(errp, err); - object_unref(obj); - return; - } - } - - /* copy the model and throw the cpu away */ - memcpy(model, cpu->model, sizeof(*model)); - object_unref(obj); -} - -static void qdict_add_disabled_feat(const char *name, void *opaque) -{ - qdict_put_bool(opaque, name, false); -} - -static void qdict_add_enabled_feat(const char *name, void *opaque) -{ - qdict_put_bool(opaque, name, true); -} - -/* convert S390CPUDef into a static CpuModelInfo */ -static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model, - bool delta_changes) -{ - QDict *qdict = qdict_new(); - S390FeatBitmap bitmap; - - /* always fallback to the static base model */ - info->name = g_strdup_printf("%s-base", model->def->name); - - if (delta_changes) { - /* features deleted from the base feature set */ - bitmap_andnot(bitmap, model->def->base_feat, model->features, - S390_FEAT_MAX); - if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { - s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); - } - - /* features added to the base feature set */ - bitmap_andnot(bitmap, model->features, model->def->base_feat, - S390_FEAT_MAX); - if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { - s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat); - } - } else { - /* expand all features */ - s390_feat_bitmap_to_ascii(model->features, qdict, - qdict_add_enabled_feat); - bitmap_complement(bitmap, model->features, S390_FEAT_MAX); - s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); - } - - if (!qdict_size(qdict)) { - qobject_unref(qdict); - } else { - info->props = QOBJECT(qdict); - } -} - -CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, - CpuModelInfo *model, - Error **errp) -{ - Error *err = NULL; - CpuModelExpansionInfo *expansion_info = NULL; - S390CPUModel s390_model; - bool delta_changes = false; - S390FeatBitmap deprecated_feats; - - /* convert it to our internal representation */ - cpu_model_from_info(&s390_model, model, "model", &err); - if (err) { - error_propagate(errp, err); - return NULL; - } - - if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) { - delta_changes = true; - } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) { - error_setg(errp, "The requested expansion type is not supported."); - return NULL; - } - - /* convert it back to a static representation */ - expansion_info = g_new0(CpuModelExpansionInfo, 1); - expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); - cpu_info_from_model(expansion_info->model, &s390_model, delta_changes); - - /* populate list of deprecated features */ - bitmap_zero(deprecated_feats, S390_FEAT_MAX); - s390_get_deprecated_features(deprecated_feats); - - if (delta_changes) { - /* - * Only populate deprecated features that are a - * subset of the features enabled on the CPU model. - */ - bitmap_and(deprecated_feats, deprecated_feats, - s390_model.features, S390_FEAT_MAX); - } - - s390_feat_bitmap_to_ascii(deprecated_feats, - &expansion_info->deprecated_props, list_add_feat); - return expansion_info; -} - -static void list_add_feat(const char *name, void *opaque) -{ - strList **last = (strList **) opaque; - - QAPI_LIST_PREPEND(*last, g_strdup(name)); -} - -CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa, - CpuModelInfo *infob, - Error **errp) -{ - Error *err = NULL; - CpuModelCompareResult feat_result, gen_result; - CpuModelCompareInfo *compare_info; - S390FeatBitmap missing, added; - S390CPUModel modela, modelb; - - /* convert both models to our internal representation */ - cpu_model_from_info(&modela, infoa, "modela", &err); - if (err) { - error_propagate(errp, err); - return NULL; - } - cpu_model_from_info(&modelb, infob, "modelb", &err); - if (err) { - error_propagate(errp, err); - return NULL; - } - compare_info = g_new0(CpuModelCompareInfo, 1); - - /* check the cpu generation and ga level */ - if (modela.def->gen == modelb.def->gen) { - if (modela.def->ec_ga == modelb.def->ec_ga) { - /* ec and corresponding bc are identical */ - gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; - } else if (modela.def->ec_ga < modelb.def->ec_ga) { - gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; - } else { - gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; - } - } else if (modela.def->gen < modelb.def->gen) { - gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; - } else { - gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; - } - if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) { - /* both models cannot be made identical */ - list_add_feat("type", &compare_info->responsible_properties); - } - - /* check the feature set */ - if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) { - feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; - } else { - bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX); - s390_feat_bitmap_to_ascii(missing, - &compare_info->responsible_properties, - list_add_feat); - bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX); - s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties, - list_add_feat); - if (bitmap_empty(missing, S390_FEAT_MAX)) { - feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET; - } else if (bitmap_empty(added, S390_FEAT_MAX)) { - feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; - } else { - feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; - } - } - - /* combine the results */ - if (gen_result == feat_result) { - compare_info->result = gen_result; - } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { - compare_info->result = gen_result; - } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { - compare_info->result = feat_result; - } else { - compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; - } - return compare_info; -} - -CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa, - CpuModelInfo *infob, - Error **errp) -{ - Error *err = NULL; - CpuModelBaselineInfo *baseline_info; - S390CPUModel modela, modelb, model; - uint16_t cpu_type; - uint8_t max_gen_ga; - uint8_t max_gen; - - /* convert both models to our internal representation */ - cpu_model_from_info(&modela, infoa, "modela", &err); - if (err) { - error_propagate(errp, err); - return NULL; - } - - cpu_model_from_info(&modelb, infob, "modelb", &err); - if (err) { - error_propagate(errp, err); - return NULL; - } - - /* features both models support */ - bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX); - - /* detect the maximum model not regarding features */ - if (modela.def->gen == modelb.def->gen) { - if (modela.def->type == modelb.def->type) { - cpu_type = modela.def->type; - } else { - cpu_type = 0; - } - max_gen = modela.def->gen; - max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga); - } else if (modela.def->gen > modelb.def->gen) { - cpu_type = modelb.def->type; - max_gen = modelb.def->gen; - max_gen_ga = modelb.def->ec_ga; - } else { - cpu_type = modela.def->type; - max_gen = modela.def->gen; - max_gen_ga = modela.def->ec_ga; - } - - model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga, - model.features); - - /* models without early base features (esan3) are bad */ - if (!model.def) { - error_setg(errp, "No compatible CPU model could be created as" - " important base features are disabled"); - return NULL; - } - - /* strip off features not part of the max model */ - bitmap_and(model.features, model.features, model.def->full_feat, - S390_FEAT_MAX); - - baseline_info = g_new0(CpuModelBaselineInfo, 1); - baseline_info->model = g_malloc0(sizeof(*baseline_info->model)); - cpu_info_from_model(baseline_info->model, &model, true); - return baseline_info; -} - -void apply_cpu_model(const S390CPUModel *model, Error **errp) -{ - static S390CPUModel applied_model; - static bool applied; - - /* - * We have the same model for all VCPUs. KVM can only be configured before - * any VCPUs are defined in KVM. - */ - if (applied) { - if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { - error_setg(errp, "Mixed CPU models are not supported on s390x."); - } - return; - } - - if (kvm_enabled()) { - if (!kvm_s390_apply_cpu_model(model, errp)) { - return; - } - } - - applied = true; - if (model) { - applied_model = *model; - } -} diff --git a/target/s390x/cpu_models_system.c b/target/s390x/cpu_models_system.c new file mode 100644 index 00000000000..5b846048675 --- /dev/null +++ b/target/s390x/cpu_models_system.c @@ -0,0 +1,436 @@ +/* + * CPU models for s390x - System-only + * + * Copyright 2016 IBM Corp. + * + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "s390x-internal.h" +#include "kvm/kvm_s390x.h" +#include "system/kvm.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qobject-input-visitor.h" +#include "qobject/qdict.h" +#include "qapi/qapi-commands-machine.h" + +static void list_add_feat(const char *name, void *opaque); + +static void check_unavailable_features(const S390CPUModel *max_model, + const S390CPUModel *model, + strList **unavailable) +{ + S390FeatBitmap missing; + + /* check general model compatibility */ + if (max_model->def->gen < model->def->gen || + (max_model->def->gen == model->def->gen && + max_model->def->ec_ga < model->def->ec_ga)) { + list_add_feat("type", unavailable); + } + + /* detect missing features if any to properly report them */ + bitmap_andnot(missing, model->features, max_model->features, + S390_FEAT_MAX); + if (!bitmap_empty(missing, S390_FEAT_MAX)) { + s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat); + } +} + +struct CpuDefinitionInfoListData { + CpuDefinitionInfoList *list; + S390CPUModel *model; +}; + +static void create_cpu_model_list(ObjectClass *klass, void *opaque) +{ + struct CpuDefinitionInfoListData *cpu_list_data = opaque; + CpuDefinitionInfoList **cpu_list = &cpu_list_data->list; + CpuDefinitionInfo *info; + char *name = g_strdup(object_class_get_name(klass)); + S390CPUClass *scc = S390_CPU_CLASS(klass); + + /* strip off the -s390x-cpu */ + g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; + info = g_new0(CpuDefinitionInfo, 1); + info->name = name; + info->has_migration_safe = true; + info->migration_safe = scc->is_migration_safe; + info->q_static = scc->is_static; + info->q_typename = g_strdup(object_class_get_name(klass)); + /* check for unavailable features */ + if (cpu_list_data->model) { + Object *obj; + S390CPU *sc; + obj = object_new_with_class(klass); + sc = S390_CPU(obj); + if (sc->model) { + info->has_unavailable_features = true; + check_unavailable_features(cpu_list_data->model, sc->model, + &info->unavailable_features); + } + object_unref(obj); + } + + QAPI_LIST_PREPEND(*cpu_list, info); +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + struct CpuDefinitionInfoListData list_data = { + .list = NULL, + }; + + list_data.model = get_max_cpu_model(NULL); + + object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false, + &list_data); + + return list_data.list; +} + +static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, + const char *info_arg_name, Error **errp) +{ + Error *err = NULL; + const QDict *qdict; + const QDictEntry *e; + Visitor *visitor; + ObjectClass *oc; + S390CPU *cpu; + Object *obj; + + oc = cpu_class_by_name(TYPE_S390_CPU, info->name); + if (!oc) { + error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name); + return; + } + if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) { + error_setg(errp, "The CPU definition '%s' requires KVM", info->name); + return; + } + obj = object_new_with_class(oc); + cpu = S390_CPU(obj); + + if (!cpu->model) { + error_setg(errp, "Details about the host CPU model are not available, " + "it cannot be used."); + object_unref(obj); + return; + } + + if (info->props) { + g_autofree const char *props_name = g_strdup_printf("%s.props", + info_arg_name); + + visitor = qobject_input_visitor_new(info->props); + if (!visit_start_struct(visitor, props_name, NULL, 0, errp)) { + visit_free(visitor); + object_unref(obj); + return; + } + qdict = qobject_to(QDict, info->props); + for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { + if (!object_property_set(obj, e->key, visitor, &err)) { + break; + } + } + if (!err) { + visit_check_struct(visitor, &err); + } + visit_end_struct(visitor, NULL); + visit_free(visitor); + if (err) { + error_propagate(errp, err); + object_unref(obj); + return; + } + } + + /* copy the model and throw the cpu away */ + memcpy(model, cpu->model, sizeof(*model)); + object_unref(obj); +} + +static void qdict_add_disabled_feat(const char *name, void *opaque) +{ + qdict_put_bool(opaque, name, false); +} + +static void qdict_add_enabled_feat(const char *name, void *opaque) +{ + qdict_put_bool(opaque, name, true); +} + +/* convert S390CPUDef into a static CpuModelInfo */ +static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model, + bool delta_changes) +{ + QDict *qdict = qdict_new(); + S390FeatBitmap bitmap; + + /* always fallback to the static base model */ + info->name = g_strdup_printf("%s-base", model->def->name); + + if (delta_changes) { + /* features deleted from the base feature set */ + bitmap_andnot(bitmap, model->def->base_feat, model->features, + S390_FEAT_MAX); + if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { + s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); + } + + /* features added to the base feature set */ + bitmap_andnot(bitmap, model->features, model->def->base_feat, + S390_FEAT_MAX); + if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { + s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat); + } + } else { + /* expand all features */ + s390_feat_bitmap_to_ascii(model->features, qdict, + qdict_add_enabled_feat); + bitmap_complement(bitmap, model->features, S390_FEAT_MAX); + s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); + } + + if (!qdict_size(qdict)) { + qobject_unref(qdict); + } else { + info->props = QOBJECT(qdict); + } +} + +CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, + CpuModelInfo *model, + Error **errp) +{ + Error *err = NULL; + CpuModelExpansionInfo *expansion_info = NULL; + S390CPUModel s390_model; + bool delta_changes = false; + S390FeatBitmap deprecated_feats; + + /* convert it to our internal representation */ + cpu_model_from_info(&s390_model, model, "model", &err); + if (err) { + error_propagate(errp, err); + return NULL; + } + + if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) { + delta_changes = true; + } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) { + error_setg(errp, "The requested expansion type is not supported."); + return NULL; + } + + /* convert it back to a static representation */ + expansion_info = g_new0(CpuModelExpansionInfo, 1); + expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); + cpu_info_from_model(expansion_info->model, &s390_model, delta_changes); + + /* populate list of deprecated features */ + bitmap_zero(deprecated_feats, S390_FEAT_MAX); + s390_get_deprecated_features(deprecated_feats); + + if (delta_changes) { + /* + * Only populate deprecated features that are a + * subset of the features enabled on the CPU model. + */ + bitmap_and(deprecated_feats, deprecated_feats, + s390_model.features, S390_FEAT_MAX); + } + + s390_feat_bitmap_to_ascii(deprecated_feats, + &expansion_info->deprecated_props, list_add_feat); + + expansion_info->has_deprecated_props = !!expansion_info->deprecated_props; + + return expansion_info; +} + +static void list_add_feat(const char *name, void *opaque) +{ + strList **last = (strList **) opaque; + + QAPI_LIST_PREPEND(*last, g_strdup(name)); +} + +CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa, + CpuModelInfo *infob, + Error **errp) +{ + Error *err = NULL; + CpuModelCompareResult feat_result, gen_result; + CpuModelCompareInfo *compare_info; + S390FeatBitmap missing, added; + S390CPUModel modela, modelb; + + /* convert both models to our internal representation */ + cpu_model_from_info(&modela, infoa, "modela", &err); + if (err) { + error_propagate(errp, err); + return NULL; + } + cpu_model_from_info(&modelb, infob, "modelb", &err); + if (err) { + error_propagate(errp, err); + return NULL; + } + compare_info = g_new0(CpuModelCompareInfo, 1); + + /* check the cpu generation and ga level */ + if (modela.def->gen == modelb.def->gen) { + if (modela.def->ec_ga == modelb.def->ec_ga) { + /* ec and corresponding bc are identical */ + gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; + } else if (modela.def->ec_ga < modelb.def->ec_ga) { + gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; + } else { + gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; + } + } else if (modela.def->gen < modelb.def->gen) { + gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; + } else { + gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; + } + if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) { + /* both models cannot be made identical */ + list_add_feat("type", &compare_info->responsible_properties); + } + + /* check the feature set */ + if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) { + feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; + } else { + bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX); + s390_feat_bitmap_to_ascii(missing, + &compare_info->responsible_properties, + list_add_feat); + bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX); + s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties, + list_add_feat); + if (bitmap_empty(missing, S390_FEAT_MAX)) { + feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET; + } else if (bitmap_empty(added, S390_FEAT_MAX)) { + feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; + } else { + feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; + } + } + + /* combine the results */ + if (gen_result == feat_result) { + compare_info->result = gen_result; + } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { + compare_info->result = gen_result; + } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { + compare_info->result = feat_result; + } else { + compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; + } + return compare_info; +} + +CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa, + CpuModelInfo *infob, + Error **errp) +{ + Error *err = NULL; + CpuModelBaselineInfo *baseline_info; + S390CPUModel modela, modelb, model; + uint16_t cpu_type; + uint8_t max_gen_ga; + uint8_t max_gen; + + /* convert both models to our internal representation */ + cpu_model_from_info(&modela, infoa, "modela", &err); + if (err) { + error_propagate(errp, err); + return NULL; + } + + cpu_model_from_info(&modelb, infob, "modelb", &err); + if (err) { + error_propagate(errp, err); + return NULL; + } + + /* features both models support */ + bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX); + + /* detect the maximum model not regarding features */ + if (modela.def->gen == modelb.def->gen) { + if (modela.def->type == modelb.def->type) { + cpu_type = modela.def->type; + } else { + cpu_type = 0; + } + max_gen = modela.def->gen; + max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga); + } else if (modela.def->gen > modelb.def->gen) { + cpu_type = modelb.def->type; + max_gen = modelb.def->gen; + max_gen_ga = modelb.def->ec_ga; + } else { + cpu_type = modela.def->type; + max_gen = modela.def->gen; + max_gen_ga = modela.def->ec_ga; + } + + model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga, + model.features); + + /* models without early base features (esan3) are bad */ + if (!model.def) { + error_setg(errp, "No compatible CPU model could be created as" + " important base features are disabled"); + return NULL; + } + + /* strip off features not part of the max model */ + bitmap_and(model.features, model.features, model.def->full_feat, + S390_FEAT_MAX); + + baseline_info = g_new0(CpuModelBaselineInfo, 1); + baseline_info->model = g_malloc0(sizeof(*baseline_info->model)); + cpu_info_from_model(baseline_info->model, &model, true); + return baseline_info; +} + +void apply_cpu_model(const S390CPUModel *model, Error **errp) +{ + static S390CPUModel applied_model; + static bool applied; + + /* + * We have the same model for all VCPUs. KVM can only be configured before + * any VCPUs are defined in KVM. + */ + if (applied) { + if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { + error_setg(errp, "Mixed CPU models are not supported on s390x."); + } + return; + } + + if (kvm_enabled()) { + if (!kvm_s390_apply_cpu_model(model, errp)) { + return; + } + } + + applied = true; + if (model) { + applied_model = *model; + } +} diff --git a/target/s390x/diag.c b/target/s390x/diag.c index 27ffd48576d..da44b0133ed 100644 --- a/target/s390x/diag.c +++ b/target/s390x/diag.c @@ -16,10 +16,10 @@ #include "cpu.h" #include "s390x-internal.h" #include "hw/watchdog/wdt_diag288.h" -#include "sysemu/cpus.h" +#include "system/cpus.h" #include "hw/s390x/ipl.h" #include "hw/s390x/s390-virtio-ccw.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "kvm/kvm_s390x.h" #include "target/s390x/kvm/pv.h" #include "qemu/error-report.h" @@ -133,7 +133,14 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra) valid = subcode == DIAG308_PV_SET ? iplb_valid_pv(iplb) : iplb_valid(iplb); if (!valid) { - env->regs[r1 + 1] = DIAG_308_RC_INVALID; + if (subcode == DIAG308_SET && iplb->pbt == S390_IPL_TYPE_QEMU_SCSI) { + s390_rebuild_iplb(iplb->devno, iplb); + s390_ipl_update_diag308(iplb); + env->regs[r1 + 1] = DIAG_308_RC_OK; + } else { + env->regs[r1 + 1] = DIAG_308_RC_INVALID; + } + goto out; } diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c index a9f4eb92adf..6bca376f2b6 100644 --- a/target/s390x/gdbstub.c +++ b/target/s390x/gdbstub.c @@ -21,12 +21,12 @@ #include "qemu/osdep.h" #include "cpu.h" #include "s390x-internal.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/gdbstub.h" #include "gdbstub/helpers.h" #include "qemu/bitops.h" -#include "sysemu/hw_accel.h" -#include "sysemu/tcg.h" +#include "system/hw_accel.h" +#include "system/tcg.h" int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { @@ -46,7 +46,7 @@ int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { CPUS390XState *env = cpu_env(cs); - target_ulong tmpl = ldtul_p(mem_buf); + target_ulong tmpl = ldq_be_p(mem_buf); switch (n) { case S390_PSWM_REGNUM: @@ -88,7 +88,7 @@ static int cpu_write_ac_reg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_A0_REGNUM ... S390_A15_REGNUM: - env->aregs[n] = ldl_p(mem_buf); + env->aregs[n] = ldl_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 4; default: @@ -123,10 +123,10 @@ static int cpu_write_fp_reg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_FPC_REGNUM: - env->fpc = ldl_p(mem_buf); + env->fpc = ldl_be_p(mem_buf); return 4; case S390_F0_REGNUM ... S390_F15_REGNUM: - *get_freg(env, n - S390_F0_REGNUM) = ldtul_p(mem_buf); + *get_freg(env, n - S390_F0_REGNUM) = ldq_be_p(mem_buf); return 8; default: return 0; @@ -167,11 +167,11 @@ static int cpu_write_vreg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_V0L_REGNUM ... S390_V15L_REGNUM: - env->vregs[n][1] = ldtul_p(mem_buf + 8); + env->vregs[n][1] = ldq_be_p(mem_buf + 8); return 8; case S390_V16_REGNUM ... S390_V31_REGNUM: - env->vregs[n][0] = ldtul_p(mem_buf); - env->vregs[n][1] = ldtul_p(mem_buf + 8); + env->vregs[n][0] = ldq_be_p(mem_buf); + env->vregs[n][1] = ldq_be_p(mem_buf + 8); return 16; default: return 0; @@ -203,7 +203,7 @@ static int cpu_write_c_reg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_C0_REGNUM ... S390_C15_REGNUM: - env->cregs[n] = ldtul_p(mem_buf); + env->cregs[n] = ldq_be_p(mem_buf); if (tcg_enabled()) { tlb_flush(env_cpu(env)); } @@ -246,19 +246,19 @@ static int cpu_write_virt_reg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_VIRT_CKC_REGNUM: - env->ckc = ldtul_p(mem_buf); + env->ckc = ldq_be_p(mem_buf); cpu_synchronize_post_init(cs); return 8; case S390_VIRT_CPUTM_REGNUM: - env->cputm = ldtul_p(mem_buf); + env->cputm = ldq_be_p(mem_buf); cpu_synchronize_post_init(cs); return 8; case S390_VIRT_BEA_REGNUM: - env->gbea = ldtul_p(mem_buf); + env->gbea = ldq_be_p(mem_buf); cpu_synchronize_post_init(cs); return 8; case S390_VIRT_PREFIX_REGNUM: - env->psa = ldtul_p(mem_buf); + env->psa = ldq_be_p(mem_buf); cpu_synchronize_post_init(cs); return 8; default: @@ -298,19 +298,19 @@ static int cpu_write_virt_kvm_reg(CPUState *cs, uint8_t *mem_buf, int n) switch (n) { case S390_VIRT_KVM_PP_REGNUM: - env->pp = ldtul_p(mem_buf); + env->pp = ldq_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; case S390_VIRT_KVM_PFT_REGNUM: - env->pfault_token = ldtul_p(mem_buf); + env->pfault_token = ldq_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; case S390_VIRT_KVM_PFS_REGNUM: - env->pfault_select = ldtul_p(mem_buf); + env->pfault_select = ldq_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; case S390_VIRT_KVM_PFC_REGNUM: - env->pfault_compare = ldtul_p(mem_buf); + env->pfault_compare = ldq_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; default: @@ -338,7 +338,7 @@ static int cpu_write_gs_reg(CPUState *cs, uint8_t *mem_buf, int n) S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - env->gscb[n] = ldtul_p(mem_buf); + env->gscb[n] = ldq_be_p(mem_buf); cpu_synchronize_post_init(env_cpu(env)); return 8; } diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c index 2b2bfc3736b..8218e6470ec 100644 --- a/target/s390x/gen-features.c +++ b/target/s390x/gen-features.c @@ -46,6 +46,47 @@ S390_FEAT_PLO_CSTSTGR, \ S390_FEAT_PLO_CSTSTX +#define S390_FEAT_GROUP_PLO_EXT \ + S390_FEAT_PLO_EXT, \ + S390_FEAT_PLO_CLO, \ + S390_FEAT_PLO_CSO, \ + S390_FEAT_PLO_DCSO, \ + S390_FEAT_PLO_CSSTO, \ + S390_FEAT_PLO_CSDSTO, \ + S390_FEAT_PLO_CSTSTO, \ + S390_FEAT_PLO_TCS, \ + S390_FEAT_PLO_TCSG, \ + S390_FEAT_PLO_TCSX, \ + S390_FEAT_PLO_TCSO, \ + S390_FEAT_PLO_QCS, \ + S390_FEAT_PLO_QCSG, \ + S390_FEAT_PLO_QCSX, \ + S390_FEAT_PLO_QCSO, \ + S390_FEAT_PLO_LO, \ + S390_FEAT_PLO_DLX, \ + S390_FEAT_PLO_DLO, \ + S390_FEAT_PLO_TL, \ + S390_FEAT_PLO_TLG, \ + S390_FEAT_PLO_TLX, \ + S390_FEAT_PLO_TLO, \ + S390_FEAT_PLO_QL, \ + S390_FEAT_PLO_QLG, \ + S390_FEAT_PLO_QLX, \ + S390_FEAT_PLO_QLO, \ + S390_FEAT_PLO_STO, \ + S390_FEAT_PLO_DST, \ + S390_FEAT_PLO_DSTG, \ + S390_FEAT_PLO_DSTX, \ + S390_FEAT_PLO_DSTO, \ + S390_FEAT_PLO_TST, \ + S390_FEAT_PLO_TSTG, \ + S390_FEAT_PLO_TSTX, \ + S390_FEAT_PLO_TSTO, \ + S390_FEAT_PLO_QST, \ + S390_FEAT_PLO_QSTG, \ + S390_FEAT_PLO_QSTX, \ + S390_FEAT_PLO_QSTO + #define S390_FEAT_GROUP_TOD_CLOCK_STEERING \ S390_FEAT_TOD_CLOCK_STEERING, \ S390_FEAT_PTFF_QTO, \ @@ -64,6 +105,9 @@ S390_FEAT_PTFF_STOE, \ S390_FEAT_PTFF_STOUE +#define S390_FEAT_GROUP_GEN17_PTFF \ + S390_FEAT_PTFF_QTSE + #define S390_FEAT_GROUP_MSA \ S390_FEAT_MSA, \ S390_FEAT_KMAC_DEA, \ @@ -246,6 +290,49 @@ S390_FEAT_PCKMO_ECC_ED25519, \ S390_FEAT_PCKMO_ECC_ED448 +#define S390_FEAT_GROUP_MSA_EXT_10 \ + S390_FEAT_KM_FULL_XTS_AES_128, \ + S390_FEAT_KM_FULL_XTS_AES_256, \ + S390_FEAT_KM_FULL_XTS_EAES_128, \ + S390_FEAT_KM_FULL_XTS_EAES_256 + +#define S390_FEAT_GROUP_MSA_EXT_10_PCKMO \ + S390_FEAT_PCKMO_AES_XTS_128_DK, \ + S390_FEAT_PCKMO_AES_XTS_256_DK + +#define S390_FEAT_GROUP_MSA_EXT_11 \ + S390_FEAT_KMAC_HMAC_SHA_224, \ + S390_FEAT_KMAC_HMAC_SHA_256, \ + S390_FEAT_KMAC_HMAC_SHA_384, \ + S390_FEAT_KMAC_HMAC_SHA_512, \ + S390_FEAT_KMAC_HMAC_ESHA_224, \ + S390_FEAT_KMAC_HMAC_ESHA_256, \ + S390_FEAT_KMAC_HMAC_ESHA_384, \ + S390_FEAT_KMAC_HMAC_ESHA_512 + +#define S390_FEAT_GROUP_MSA_EXT_11_PCKMO \ + S390_FEAT_PCKMO_HMAC_512, \ + S390_FEAT_PCKMO_HMAC_1024 + +#define S390_FEAT_GROUP_MSA_EXT_12 \ + S390_FEAT_MSA_EXT_12 + +#define S390_FEAT_GROUP_MSA_EXT_13 \ + S390_FEAT_KDSA_QAI, \ + S390_FEAT_KIMD_QAI, \ + S390_FEAT_KLMD_QAI, \ + S390_FEAT_KMAC_QAI, \ + S390_FEAT_KMA_QAI, \ + S390_FEAT_KMCTR_QAI, \ + S390_FEAT_KMF_QAI, \ + S390_FEAT_KMO_QAI, \ + S390_FEAT_KM_QAI, \ + S390_FEAT_PCC_QAI, \ + S390_FEAT_PRNO_QAI + +#define S390_FEAT_GROUP_MSA_EXT_13_PCKMO \ + S390_FEAT_PCKMO_QAI + #define S390_FEAT_GROUP_ENH_SORT \ S390_FEAT_ESORT_BASE, \ S390_FEAT_SORTL_SFLR, \ @@ -262,10 +349,21 @@ S390_FEAT_DEFLATE_XPND, \ S390_FEAT_DEFLATE_F0 +#define S390_FEAT_GROUP_CONCURRENT_FUNCTIONS \ + S390_FEAT_CCF_BASE, \ + S390_FEAT_PFCR_QAF, \ + S390_FEAT_PFCR_CSDST, \ + S390_FEAT_PFCR_CSDSTG, \ + S390_FEAT_PFCR_CSTST, \ + S390_FEAT_PFCR_CSTSTG + /* cpu feature groups */ static uint16_t group_PLO[] = { S390_FEAT_GROUP_PLO, }; +static uint16_t group_PLO_EXT[] = { + S390_FEAT_GROUP_PLO_EXT, +}; static uint16_t group_TOD_CLOCK_STEERING[] = { S390_FEAT_GROUP_TOD_CLOCK_STEERING, }; @@ -275,6 +373,11 @@ static uint16_t group_GEN13_PTFF[] = { static uint16_t group_MULTIPLE_EPOCH_PTFF[] = { S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, }; + +static uint16_t group_GEN17_PTFF[] = { + S390_FEAT_GROUP_GEN17_PTFF, +}; + static uint16_t group_MSA[] = { S390_FEAT_GROUP_MSA, }; @@ -307,10 +410,38 @@ static uint16_t group_MSA_EXT_9[] = { S390_FEAT_GROUP_MSA_EXT_9, }; +static uint16_t group_MSA_EXT_10[] = { + S390_FEAT_GROUP_MSA_EXT_10, +}; + +static uint16_t group_MSA_EXT_11[] = { + S390_FEAT_GROUP_MSA_EXT_11, +}; + +static uint16_t group_MSA_EXT_12[] = { + S390_FEAT_GROUP_MSA_EXT_12, +}; + +static uint16_t group_MSA_EXT_13[] = { + S390_FEAT_GROUP_MSA_EXT_13, +}; + static uint16_t group_MSA_EXT_9_PCKMO[] = { S390_FEAT_GROUP_MSA_EXT_9_PCKMO, }; +static uint16_t group_MSA_EXT_10_PCKMO[] = { + S390_FEAT_GROUP_MSA_EXT_10_PCKMO, +}; + +static uint16_t group_MSA_EXT_11_PCKMO[] = { + S390_FEAT_GROUP_MSA_EXT_11_PCKMO, +}; + +static uint16_t group_MSA_EXT_13_PCKMO[] = { + S390_FEAT_GROUP_MSA_EXT_13_PCKMO, +}; + static uint16_t group_ENH_SORT[] = { S390_FEAT_GROUP_ENH_SORT, }; @@ -319,6 +450,10 @@ static uint16_t group_DEFLATE_CONVERSION[] = { S390_FEAT_GROUP_DEFLATE_CONVERSION, }; +static uint16_t group_CONCURRENT_FUNCTIONS[] = { + S390_FEAT_GROUP_CONCURRENT_FUNCTIONS, +}; + /* Base features (in order of release) * Only non-hypervisor managed features belong here. * Base feature sets are static meaning they do not change in future QEMU @@ -426,6 +561,13 @@ static uint16_t base_GEN15_GA1[] = { #define base_GEN16_GA1 EmptyFeat +static uint16_t base_GEN17_GA1[] = { + S390_FEAT_MISC_INSTRUCTION_EXT4, + S390_FEAT_SIF, + S390_FEAT_GROUP_MSA_EXT_12, + S390_FEAT_GROUP_PLO_EXT, +}; + /* Full features (in order of release) * Automatically includes corresponding base features. * Full features are all features this hardware supports even if kvm/QEMU do not @@ -580,6 +722,20 @@ static uint16_t full_GEN16_GA1[] = { S390_FEAT_UV_FEAT_AP_INTR, }; +static uint16_t full_GEN17_GA1[] = { + S390_FEAT_VECTOR_ENH3, + S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3, + S390_FEAT_INEFF_NC_TX, + S390_FEAT_GROUP_GEN17_PTFF, + S390_FEAT_GROUP_MSA_EXT_10, + S390_FEAT_GROUP_MSA_EXT_10_PCKMO, + S390_FEAT_GROUP_MSA_EXT_11, + S390_FEAT_GROUP_MSA_EXT_11_PCKMO, + S390_FEAT_GROUP_MSA_EXT_13, + S390_FEAT_GROUP_MSA_EXT_13_PCKMO, + S390_FEAT_GROUP_CONCURRENT_FUNCTIONS, +}; + /* Default features (in order of release) * Automatically includes corresponding base features. @@ -675,15 +831,24 @@ static uint16_t default_GEN16_GA1[] = { S390_FEAT_PAIE, }; +static uint16_t default_GEN17_GA1[] = { + S390_FEAT_VECTOR_ENH3, + S390_FEAT_VECTOR_PACKED_DECIMAL_ENH3, + S390_FEAT_GROUP_MSA_EXT_10, + S390_FEAT_GROUP_MSA_EXT_10_PCKMO, + S390_FEAT_GROUP_MSA_EXT_11, + S390_FEAT_GROUP_MSA_EXT_11_PCKMO, + S390_FEAT_GROUP_MSA_EXT_13, + S390_FEAT_GROUP_MSA_EXT_13_PCKMO, +}; + /* QEMU (CPU model) features */ -static uint16_t qemu_V2_11[] = { +static uint16_t qemu_MIN[] = { + /* Features supported by the default CPU of the oldest machine type */ S390_FEAT_GROUP_PLO, S390_FEAT_ESAN3, S390_FEAT_ZARCH, -}; - -static uint16_t qemu_V3_1[] = { S390_FEAT_DAT_ENH, S390_FEAT_IDTE_SEGMENT, S390_FEAT_STFLE, @@ -713,18 +878,12 @@ static uint16_t qemu_V3_1[] = { S390_FEAT_ADAPTER_INT_SUPPRESSION, S390_FEAT_MSA_EXT_3, S390_FEAT_MSA_EXT_4, -}; - -static uint16_t qemu_V4_0[] = { /* * Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not * implemented yet). */ S390_FEAT_FLOATING_POINT_EXT, S390_FEAT_ZPCI, -}; - -static uint16_t qemu_V4_1[] = { S390_FEAT_STFLE_53, S390_FEAT_VECTOR, }; @@ -823,6 +982,7 @@ static CpuFeatDefSpec CpuFeatDef[] = { CPU_FEAT_INITIALIZER(GEN14_GA2), CPU_FEAT_INITIALIZER(GEN15_GA1), CPU_FEAT_INITIALIZER(GEN16_GA1), + CPU_FEAT_INITIALIZER(GEN17_GA1), }; #define FEAT_GROUP_INITIALIZER(_name) \ @@ -845,8 +1005,10 @@ typedef struct { *******************************/ static FeatGroupDefSpec FeatGroupDef[] = { FEAT_GROUP_INITIALIZER(PLO), + FEAT_GROUP_INITIALIZER(PLO_EXT), FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING), FEAT_GROUP_INITIALIZER(GEN13_PTFF), + FEAT_GROUP_INITIALIZER(GEN17_PTFF), FEAT_GROUP_INITIALIZER(MSA), FEAT_GROUP_INITIALIZER(MSA_EXT_1), FEAT_GROUP_INITIALIZER(MSA_EXT_2), @@ -858,9 +1020,17 @@ static FeatGroupDefSpec FeatGroupDef[] = { FEAT_GROUP_INITIALIZER(MSA_EXT_8), FEAT_GROUP_INITIALIZER(MSA_EXT_9), FEAT_GROUP_INITIALIZER(MSA_EXT_9_PCKMO), + FEAT_GROUP_INITIALIZER(MSA_EXT_10), + FEAT_GROUP_INITIALIZER(MSA_EXT_10_PCKMO), + FEAT_GROUP_INITIALIZER(MSA_EXT_11), + FEAT_GROUP_INITIALIZER(MSA_EXT_11_PCKMO), + FEAT_GROUP_INITIALIZER(MSA_EXT_12), + FEAT_GROUP_INITIALIZER(MSA_EXT_13), + FEAT_GROUP_INITIALIZER(MSA_EXT_13_PCKMO), FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF), FEAT_GROUP_INITIALIZER(ENH_SORT), FEAT_GROUP_INITIALIZER(DEFLATE_CONVERSION), + FEAT_GROUP_INITIALIZER(CONCURRENT_FUNCTIONS), }; #define QEMU_FEAT_INITIALIZER(_name) \ @@ -875,10 +1045,7 @@ static FeatGroupDefSpec FeatGroupDef[] = { * QEMU (CPU model) features *******************************/ static FeatGroupDefSpec QemuFeatDef[] = { - QEMU_FEAT_INITIALIZER(V2_11), - QEMU_FEAT_INITIALIZER(V3_1), - QEMU_FEAT_INITIALIZER(V4_0), - QEMU_FEAT_INITIALIZER(V4_1), + QEMU_FEAT_INITIALIZER(MIN), QEMU_FEAT_INITIALIZER(V6_0), QEMU_FEAT_INITIALIZER(V6_2), QEMU_FEAT_INITIALIZER(V7_0), diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 00d5d403f31..5c127da1a6a 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -1,5 +1,5 @@ /* - * S/390 helpers - sysemu only + * S/390 helpers - system only * * Copyright (c) 2009 Ulrich Hecht * Copyright (c) 2011 Alexander Graf @@ -25,8 +25,10 @@ #include "qemu/timer.h" #include "hw/s390x/ioinst.h" #include "target/s390x/kvm/pv.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" +#include "system/hw_accel.h" +#include "system/runstate.h" +#include "exec/target_page.h" +#include "exec/watchpoint.h" void s390x_tod_timer(void *opaque) { @@ -89,7 +91,9 @@ void s390_handle_wait(S390CPU *cpu) { CPUState *cs = CPU(cpu); - if (s390_cpu_halt(cpu) == 0) { + s390_cpu_halt(cpu); + + if (s390_count_running_cpus() == 0) { if (is_special_wait_psw(cpu->env.psw.addr)) { qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } else { diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c index 5195f060ecb..1dca835c5d8 100644 --- a/target/s390x/interrupt.c +++ b/target/s390x/interrupt.c @@ -11,9 +11,8 @@ #include "cpu.h" #include "kvm/kvm_s390x.h" #include "s390x-internal.h" -#include "exec/exec-all.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "hw/s390x/ioinst.h" #include "tcg/tcg_s390x.h" #if !defined(CONFIG_USER_ONLY) @@ -30,6 +29,7 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code) /* env->int_pgm_ilen is already set, or will be set during unwinding */ } +#if !defined(CONFIG_USER_ONLY) void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra) { if (kvm_enabled()) { @@ -41,7 +41,6 @@ void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra) } } -#if !defined(CONFIG_USER_ONLY) void cpu_inject_clock_comparator(S390CPU *cpu) { CPUS390XState *env = &cpu->env; @@ -225,11 +224,9 @@ bool s390_cpu_has_stop_int(S390CPU *cpu) return env->pending_int & INTERRUPT_STOP; } -#endif bool s390_cpu_has_int(S390CPU *cpu) { -#ifndef CONFIG_USER_ONLY if (!tcg_enabled()) { return false; } @@ -238,7 +235,5 @@ bool s390_cpu_has_int(S390CPU *cpu) s390_cpu_has_io_int(cpu) || s390_cpu_has_restart_int(cpu) || s390_cpu_has_stop_int(cpu); -#else - return false; -#endif } +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c index bbe45a497a8..2320dd4c122 100644 --- a/target/s390x/ioinst.c +++ b/target/s390x/ioinst.c @@ -12,11 +12,13 @@ #include "qemu/osdep.h" #include "cpu.h" +#include "exec/target_page.h" #include "s390x-internal.h" #include "hw/s390x/ioinst.h" #include "trace.h" #include "hw/s390x/s390-pci-bus.h" #include "target/s390x/kvm/pv.h" +#include "hw/s390x/ap-bridge.h" /* All I/O instructions but chsc use the s format */ static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb, @@ -573,13 +575,19 @@ static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res) static int chsc_sei_nt0_get_event(void *res) { - /* no events yet */ + if (s390_has_feat(S390_FEAT_AP)) { + return ap_chsc_sei_nt0_get_event(res); + } + return 1; } static int chsc_sei_nt0_have_event(void) { - /* no events yet */ + if (s390_has_feat(S390_FEAT_AP)) { + return ap_chsc_sei_nt0_have_event(); + } + return 0; } @@ -603,7 +611,7 @@ static int chsc_sei_nt2_have_event(void) #define CHSC_SEI_NT2 (1ULL << 61) static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res) { - uint64_t selection_mask = ldq_p(&req->param1); + uint64_t selection_mask = ldq_be_p(&req->param1); uint8_t *res_flags = (uint8_t *)res->data; int have_event = 0; int have_more = 0; diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 94181d9281f..491cc5f9756 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -27,7 +27,7 @@ #include "cpu.h" #include "s390x-internal.h" #include "kvm_s390x.h" -#include "sysemu/kvm_int.h" +#include "system/kvm_int.h" #include "qemu/cutils.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -36,12 +36,12 @@ #include "qemu/main-loop.h" #include "qemu/mmap-alloc.h" #include "qemu/log.h" -#include "sysemu/sysemu.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "sysemu/device_tree.h" +#include "system/system.h" +#include "system/hw_accel.h" +#include "system/runstate.h" +#include "system/device_tree.h" #include "gdbstub/enums.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "trace.h" #include "hw/s390x/s390-pci-inst.h" #include "hw/s390x/s390-pci-bus.h" @@ -49,8 +49,9 @@ #include "hw/s390x/ebcdic.h" #include "exec/memattrs.h" #include "hw/s390x/s390-virtio-ccw.h" -#include "hw/s390x/s390-virtio-hcall.h" +#include "hw/s390x/s390-hypercall.h" #include "target/s390x/kvm/pv.h" +#include CONFIG_DEVICES #define kvm_vm_check_mem_attr(s, attr) \ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) @@ -297,12 +298,6 @@ void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) return; } - if (!hpage_1m_allowed()) { - error_setg(errp, "This QEMU machine does not support huge page " - "mappings"); - return; - } - if (pagesize != 1 * MiB) { error_setg(errp, "Memory backing with 2G pages was specified, " "but KVM does not support this memory backing"); @@ -373,13 +368,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s) kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0); - if (ri_allowed()) { - if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { - cap_ri = 1; - } - } - if (cpu_model_allowed()) { - kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); + kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); + if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { + cap_ri = 1; } /* @@ -388,7 +379,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) * support is considered necessary, we only try to enable this for * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. */ - if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && + if (kvm_kernel_irqchip_allowed() && kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); } @@ -407,6 +398,11 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu) return cpu->cpu_index; } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; @@ -472,7 +468,7 @@ static int can_sync_regs(CPUState *cs, int regs) #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ KVM_SYNC_CRS | KVM_SYNC_PREFIX) -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu = {}; @@ -598,7 +594,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu; @@ -893,7 +889,7 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) return 0; } -static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, +static struct kvm_hw_breakpoint *find_hw_breakpoint(vaddr addr, int len, int type) { int n; @@ -908,7 +904,7 @@ static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, return NULL; } -static int insert_hw_breakpoint(target_ulong addr, int len, int type) +static int insert_hw_breakpoint(vaddr addr, int len, int type) { int size; @@ -1491,20 +1487,6 @@ static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) return r; } -static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) -{ - CPUS390XState *env = &cpu->env; - int ret; - - ret = s390_virtio_hypercall(env); - if (ret == -EINVAL) { - kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); - return 0; - } - - return ret; -} - static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) { uint64_t r1, r3; @@ -1600,9 +1582,11 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) case DIAG_SET_CONTROL_PROGRAM_CODES: handle_diag_318(cpu, run); break; +#ifdef CONFIG_S390_CCW_VIRTIO case DIAG_KVM_HYPERCALL: - r = handle_hypercall(cpu, run); + handle_diag_500(cpu, RA_IGNORED); break; +#endif /* CONFIG_S390_CCW_VIRTIO */ case DIAG_KVM_BREAKPOINT: r = handle_sw_breakpoint(cpu, run); break; @@ -2195,6 +2179,9 @@ static int query_cpu_subfunc(S390FeatBitmap features) if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); } + if (test_bit(S390_FEAT_CCF_BASE, features)) { + s390_add_from_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); + } return 0; } @@ -2248,6 +2235,9 @@ static int configure_cpu_subfunc(const S390FeatBitmap features) if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); } + if (test_bit(S390_FEAT_CCF_BASE, features)) { + s390_fill_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); + } return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); } @@ -2359,10 +2349,6 @@ static int configure_cpu_feat(const S390FeatBitmap features) bool kvm_s390_cpu_models_supported(void) { - if (!cpu_model_allowed()) { - /* compatibility machines interfere with the cpu model */ - return false; - } return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, KVM_S390_VM_CPU_MACHINE) && kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c index dde836d21ae..2bc916a5455 100644 --- a/target/s390x/kvm/pv.c +++ b/target/s390x/kvm/pv.c @@ -16,10 +16,10 @@ #include "qemu/units.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "sysemu/kvm.h" -#include "sysemu/cpus.h" +#include "system/kvm.h" +#include "system/cpus.h" #include "qom/object_interfaces.h" -#include "exec/confidential-guest-support.h" +#include "system/confidential-guest-support.h" #include "hw/s390x/ipl.h" #include "hw/s390x/sclp.h" #include "target/s390x/kvm/kvm_s390x.h" @@ -30,7 +30,7 @@ static struct kvm_s390_pv_info_vm info_vm; static struct kvm_s390_pv_info_dump info_dump; static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data, - int *pvrc) + struct S390PVResponse *pv_resp) { struct kvm_pv_cmd pv_cmd = { .cmd = cmd, @@ -47,8 +47,10 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data, "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc, rc); } - if (pvrc) { - *pvrc = pv_cmd.rc; + if (pv_resp) { + pv_resp->cmd = cmd; + pv_resp->rc = pv_cmd.rc; + pv_resp->rrc = pv_cmd.rrc; } return rc; } @@ -57,16 +59,15 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data, * This macro lets us pass the command as a string to the function so * we can print it on an error. */ -#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL) -#define s390_pv_cmd_pvrc(cmd, data, pvrc) __s390_pv_cmd(cmd, #cmd, data, pvrc) -#define s390_pv_cmd_exit(cmd, data) \ -{ \ - int rc; \ - \ - rc = __s390_pv_cmd(cmd, #cmd, data, NULL); \ - if (rc) { \ - exit(1); \ - } \ +#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL) +#define s390_pv_cmd_pv_resp(cmd, data, pv_resp) \ + __s390_pv_cmd(cmd, #cmd, data, pv_resp) + +static void s390_pv_cmd_exit(uint32_t cmd, void *data) +{ + if (s390_pv_cmd(cmd, data)) { + exit(1); + } } int s390_pv_query_info(void) @@ -133,7 +134,7 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) * If the feature is not present or if the VM is not larger than 2 GiB, * KVM_PV_ASYNC_CLEANUP_PREPARE fill fail; no point in attempting it. */ - if ((MACHINE(ms)->maxram_size <= 2 * GiB) || + if (s390_get_memory_limit(ms) <= 2 * GiB || !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { return false; } @@ -147,18 +148,20 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) return true; } -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp) +#define UV_RC_SSC_INVAL_HOSTKEY 0x0108 +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, + struct S390PVResponse *pv_resp, Error **errp) { - int ret, pvrc; + int ret; struct kvm_s390_pv_sec_parm args = { .origin = origin, .length = length, }; - ret = s390_pv_cmd_pvrc(KVM_PV_SET_SEC_PARMS, &args, &pvrc); + ret = s390_pv_cmd_pv_resp(KVM_PV_SET_SEC_PARMS, &args, pv_resp); if (ret) { error_setg(errp, "Failed to set secure execution parameters"); - if (pvrc == 0x108) { + if (pv_resp->rc == UV_RC_SSC_INVAL_HOSTKEY) { error_append_hint(errp, "Please check whether the image is " "correctly encrypted for this host\n"); } @@ -170,7 +173,8 @@ int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp) /* * Called for each component in the SE type IPL parameter block 0. */ -int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) +int s390_pv_unpack(uint64_t addr, uint64_t size, + uint64_t tweak, struct S390PVResponse *pv_resp) { struct kvm_s390_pv_unp args = { .addr = addr, @@ -178,7 +182,7 @@ int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) .tweak = tweak, }; - return s390_pv_cmd(KVM_PV_UNPACK, &args); + return s390_pv_cmd_pv_resp(KVM_PV_UNPACK, &args, pv_resp); } void s390_pv_prep_reset(void) @@ -186,9 +190,9 @@ void s390_pv_prep_reset(void) s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL); } -int s390_pv_verify(void) +int s390_pv_verify(struct S390PVResponse *pv_resp) { - return s390_pv_cmd(KVM_PV_VERIFY, NULL); + return s390_pv_cmd_pv_resp(KVM_PV_VERIFY, NULL, pv_resp); } void s390_pv_unshare(void) @@ -196,13 +200,29 @@ void s390_pv_unshare(void) s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL); } -void s390_pv_inject_reset_error(CPUState *cs) +void s390_pv_inject_reset_error(CPUState *cs, + struct S390PVResponse pv_resp) { int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4; CPUS390XState *env = &S390_CPU(cs)->env; + union { + struct { + uint16_t pv_cmd; + uint16_t pv_rrc; + uint16_t pv_rc; + uint16_t diag_rc; + }; + uint64_t regs; + } resp = { + .pv_cmd = pv_resp.cmd, + .pv_rrc = pv_resp.rrc, + .pv_rc = pv_resp.rc, + .diag_rc = DIAG_308_RC_INVAL_FOR_PV + }; + /* Report that we are unable to enter protected mode */ - env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV; + env->regs[r1 + 1] = resp.regs; } uint64_t kvm_s390_pv_dmp_get_size_cpu(void) @@ -367,7 +387,7 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest, { TYPE_USER_CREATABLE }, { NULL }) -static void s390_pv_guest_class_init(ObjectClass *oc, void *data) +static void s390_pv_guest_class_init(ObjectClass *oc, const void *data) { ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h index 4b408174391..94e885e9335 100644 --- a/target/s390x/kvm/pv.h +++ b/target/s390x/kvm/pv.h @@ -13,9 +13,15 @@ #define HW_S390_PV_H #include "qapi/error.h" -#include "sysemu/kvm.h" +#include "system/kvm.h" #include "hw/s390x/s390-virtio-ccw.h" +struct S390PVResponse { + uint16_t cmd; + uint16_t rrc; + uint16_t rc; +}; + #ifdef CONFIG_KVM #include "cpu.h" @@ -42,12 +48,15 @@ int s390_pv_query_info(void); int s390_pv_vm_enable(void); void s390_pv_vm_disable(void); bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms); -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp); -int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak); +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, + struct S390PVResponse *pv_resp, Error **errp); +int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak, + struct S390PVResponse *pv_resp); void s390_pv_prep_reset(void); -int s390_pv_verify(void); +int s390_pv_verify(struct S390PVResponse *pv_resp); void s390_pv_unshare(void); -void s390_pv_inject_reset_error(CPUState *cs); +void s390_pv_inject_reset_error(CPUState *cs, + struct S390PVResponse pv_resp); uint64_t kvm_s390_pv_dmp_get_size_cpu(void); uint64_t kvm_s390_pv_dmp_get_size_mem_state(void); uint64_t kvm_s390_pv_dmp_get_size_completion_data(void); @@ -63,12 +72,15 @@ static inline int s390_pv_vm_enable(void) { return 0; } static inline void s390_pv_vm_disable(void) {} static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; } static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, + struct S390PVResponse *pv_resp, Error **errp) { return 0; } -static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; } +static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak, + struct S390PVResponse *pv_resp) { return 0; } static inline void s390_pv_prep_reset(void) {} -static inline int s390_pv_verify(void) { return 0; } +static inline int s390_pv_verify(struct S390PVResponse *pv_resp) { return 0; } static inline void s390_pv_unshare(void) {} -static inline void s390_pv_inject_reset_error(CPUState *cs) {}; +static inline void s390_pv_inject_reset_error(CPUState *cs, + struct S390PVResponse pv_resp) {}; static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; } static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; } static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; } diff --git a/target/s390x/machine.c b/target/s390x/machine.c index a125ebcc2fa..3bea6103ffb 100644 --- a/target/s390x/machine.c +++ b/target/s390x/machine.c @@ -20,8 +20,8 @@ #include "kvm/kvm_s390x.h" #include "migration/vmstate.h" #include "tcg/tcg_s390x.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/kvm.h" +#include "system/tcg.h" static int cpu_post_load(void *opaque, int version_id) { diff --git a/target/s390x/meson.build b/target/s390x/meson.build index 02ca43d9f00..3b34ae034cb 100644 --- a/target/s390x/meson.build +++ b/target/s390x/meson.build @@ -27,8 +27,8 @@ s390x_system_ss.add(files( 'machine.c', 'mmu_helper.c', 'sigp.c', - 'cpu-sysemu.c', - 'cpu_models_sysemu.c', + 'cpu-system.c', + 'cpu_models_system.c', )) s390x_user_ss = ss.source_set() diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c index 6c59d0d216e..00946e9c0fe 100644 --- a/target/s390x/mmu_helper.c +++ b/target/s390x/mmu_helper.c @@ -17,14 +17,14 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "cpu.h" #include "s390x-internal.h" #include "kvm/kvm_s390x.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" -#include "exec/exec-all.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "hw/hw.h" #include "hw/s390x/storage-keys.h" #include "hw/boards.h" diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 825252d7284..56cce2e7f50 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -238,23 +238,15 @@ uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, /* cpu.c */ #ifndef CONFIG_USER_ONLY -unsigned int s390_cpu_halt(S390CPU *cpu); +unsigned int s390_count_running_cpus(void); +void s390_cpu_halt(S390CPU *cpu); void s390_cpu_unhalt(S390CPU *cpu); -void s390_cpu_init_sysemu(Object *obj); -bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp); +void s390_cpu_system_init(Object *obj); +bool s390_cpu_system_realize(DeviceState *dev, Error **errp); void s390_cpu_finalize(Object *obj); -void s390_cpu_class_init_sysemu(CPUClass *cc); +void s390_cpu_system_class_init(CPUClass *cc); void s390_cpu_machine_reset_cb(void *opaque); - -#else -static inline unsigned int s390_cpu_halt(S390CPU *cpu) -{ - return 0; -} - -static inline void s390_cpu_unhalt(S390CPU *cpu) -{ -} +bool s390_cpu_has_work(CPUState *cs); #endif /* CONFIG_USER_ONLY */ @@ -341,6 +333,7 @@ void cpu_unmap_lowcore(LowCore *lowcore); /* interrupt.c */ void trigger_pgm_exception(CPUS390XState *env, uint32_t code); +#ifndef CONFIG_USER_ONLY void cpu_inject_clock_comparator(S390CPU *cpu); void cpu_inject_cpu_timer(S390CPU *cpu); void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr); @@ -353,9 +346,11 @@ bool s390_cpu_has_restart_int(S390CPU *cpu); bool s390_cpu_has_stop_int(S390CPU *cpu); void cpu_inject_restart(S390CPU *cpu); void cpu_inject_stop(S390CPU *cpu); +#endif /* CONFIG_USER_ONLY */ /* ioinst.c */ +#ifndef CONFIG_USER_ONLY void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); @@ -373,6 +368,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra); void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +#endif /* CONFIG_USER_ONLY */ /* mem_helper.c */ @@ -399,6 +395,8 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, /* translate.c */ void s390x_translate_init(void); +void s390x_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void s390x_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data); diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index ad0ad61177d..5e95c4978f9 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -12,11 +12,11 @@ #include "cpu.h" #include "s390x-internal.h" #include "hw/boards.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "exec/address-spaces.h" -#include "exec/exec-all.h" -#include "sysemu/tcg.h" +#include "system/hw_accel.h" +#include "system/runstate.h" +#include "system/address-spaces.h" +#include "exec/cputlb.h" +#include "system/tcg.h" #include "trace.h" #include "qapi/qapi-types-machine.h" @@ -251,24 +251,20 @@ static void sigp_restart(CPUState *cs, run_on_cpu_data arg) static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg) { - S390CPU *cpu = S390_CPU(cs); - S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg.host_ptr; cpu_synchronize_state(cs); - scc->reset(cs, S390_CPU_RESET_INITIAL); + resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_INITIAL); cpu_synchronize_post_reset(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg) { - S390CPU *cpu = S390_CPU(cs); - S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg.host_ptr; cpu_synchronize_state(cs); - scc->reset(cs, S390_CPU_RESET_NORMAL); + resettable_reset(OBJECT(cs), RESET_TYPE_S390_CPU_NORMAL); cpu_synchronize_post_reset(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c index b36f8cdc8b9..6595ac763c3 100644 --- a/target/s390x/tcg/cc_helper.c +++ b/target/s390x/tcg/cc_helper.c @@ -22,7 +22,6 @@ #include "cpu.h" #include "s390x-internal.h" #include "tcg_s390x.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c index 93aabd236f4..4447bb66eee 100644 --- a/target/s390x/tcg/crypto_helper.c +++ b/target/s390x/tcg/crypto_helper.c @@ -17,8 +17,7 @@ #include "s390x-internal.h" #include "tcg_s390x.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" static uint64_t R(uint64_t x, int c) { diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index 4c0b692c9e8..e4c75d0ce01 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -22,12 +22,14 @@ #include "qemu/log.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" +#include "exec/watchpoint.h" #include "s390x-internal.h" #include "tcg_s390x.h" #ifndef CONFIG_USER_ONLY #include "qemu/timer.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/s390x/ioinst.h" #include "hw/s390x/s390_flic.h" #include "hw/boards.h" diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c index d8bd5748faf..1ba43715ac1 100644 --- a/target/s390x/tcg/fpu_helper.c +++ b/target/s390x/tcg/fpu_helper.c @@ -22,7 +22,6 @@ #include "cpu.h" #include "s390x-internal.h" #include "tcg_s390x.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" @@ -780,7 +779,7 @@ uint32_t HELPER(kxb)(CPUS390XState *env, Int128 a, Int128 b) uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { - float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status); + float32 ret = float32_muladd(f3, f2, f1, 0, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } @@ -789,7 +788,7 @@ uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { - float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status); + float64 ret = float64_muladd(f3, f2, f1, 0, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; } @@ -798,7 +797,7 @@ uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { - float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c, + float32 ret = float32_muladd(f3, f2, f1, float_muladd_negate_c, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; @@ -808,7 +807,7 @@ uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, uint64_t f2, uint64_t f3) { - float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c, + float64 ret = float64_muladd(f3, f2, f1, float_muladd_negate_c, &env->fpu_status); handle_exceptions(env, false, GETPC()); return ret; diff --git a/target/s390x/tcg/insn-data.h.inc b/target/s390x/tcg/insn-data.h.inc index e7d61cdec28..ec730ee0919 100644 --- a/target/s390x/tcg/insn-data.h.inc +++ b/target/s390x/tcg/insn-data.h.inc @@ -1012,7 +1012,7 @@ D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM) D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC) D(0xb929, KMA, RRF_b, MSA8, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMA) - D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO) + E(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO, IF_IO) D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD) D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD) diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c index 2af970f2c8b..fbda396f5b4 100644 --- a/target/s390x/tcg/int_helper.c +++ b/target/s390x/tcg/int_helper.c @@ -22,10 +22,9 @@ #include "cpu.h" #include "s390x-internal.h" #include "tcg_s390x.h" -#include "exec/exec-all.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 0e12dae2aa8..f1acb1618f7 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -24,14 +24,21 @@ #include "s390x-internal.h" #include "tcg_s390x.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cpu-common.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" -#include "exec/cpu_ldst.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/helper-retaddr.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#if !defined(CONFIG_USER_ONLY) +#if defined(CONFIG_USER_ONLY) +#include "user/page-protection.h" +#else #include "hw/s390x/storage-keys.h" #include "hw/boards.h" #endif @@ -119,8 +126,8 @@ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, /* An access covers at most 4096 bytes and therefore at most two pages. */ typedef struct S390Access { - target_ulong vaddr1; - target_ulong vaddr2; + vaddr vaddr1; + vaddr vaddr2; void *haddr1; void *haddr2; uint16_t size1; @@ -141,12 +148,12 @@ typedef struct S390Access { * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec. * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr. */ -static inline int s390_probe_access(CPUArchState *env, target_ulong addr, +static inline int s390_probe_access(CPUArchState *env, vaddr addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { - int flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, + int flags = probe_access_flags(env, addr, size, access_type, mmu_idx, nonfault, phost, ra); if (unlikely(flags & TLB_INVALID_MASK)) { @@ -251,7 +258,7 @@ static void access_memset(CPUS390XState *env, S390Access *desta, static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, int offset, uintptr_t ra) { - target_ulong vaddr = access->vaddr1; + vaddr vaddr = access->vaddr1; void *haddr = access->haddr1; if (unlikely(offset >= access->size1)) { @@ -271,7 +278,7 @@ static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, static void access_set_byte(CPUS390XState *env, S390Access *access, int offset, uint8_t byte, uintptr_t ra) { - target_ulong vaddr = access->vaddr1; + vaddr vaddr = access->vaddr1; void *haddr = access->haddr1; if (unlikely(offset >= access->size1)) { diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 303f86d363a..f7101be5745 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -26,23 +26,25 @@ #include "qemu/host-utils.h" #include "exec/helper-proto.h" #include "qemu/timer.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-ldst.h" +#include "exec/target_page.h" #include "qapi/error.h" #include "tcg_s390x.h" #include "s390-tod.h" #if !defined(CONFIG_USER_ONLY) -#include "sysemu/cpus.h" -#include "sysemu/sysemu.h" +#include "system/cpus.h" +#include "system/system.h" #include "hw/s390x/ebcdic.h" -#include "hw/s390x/s390-virtio-hcall.h" +#include "hw/s390x/s390-hypercall.h" #include "hw/s390x/sclp.h" #include "hw/s390x/s390_flic.h" #include "hw/s390x/ioinst.h" #include "hw/s390x/s390-pci-inst.h" #include "hw/boards.h" #include "hw/s390x/tod.h" +#include CONFIG_DEVICES #endif /* #define DEBUG_HELPER */ @@ -116,12 +118,15 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) uint64_t r; switch (num) { +#ifdef CONFIG_S390_CCW_VIRTIO case 0x500: - /* KVM hypercall */ + /* QEMU/KVM hypercall */ bql_lock(); - r = s390_virtio_hypercall(env); + handle_diag_500(env_archcpu(env), GETPC()); bql_unlock(); + r = 0; break; +#endif /* CONFIG_S390_CCW_VIRTIO */ case 0x44: /* yield */ r = 0; diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index c81e035dea4..c7e8574438c 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -31,7 +31,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "s390x-internal.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "qemu/log.h" @@ -40,6 +39,7 @@ #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/log.h" #include "qemu/atomic128.h" @@ -392,7 +392,6 @@ static int get_mem_index(DisasContext *s) return MMU_HOME_IDX; default: g_assert_not_reached(); - break; } #endif } @@ -1250,11 +1249,7 @@ static DisasJumpType op_addc32(DisasContext *s, DisasOps *o) static DisasJumpType op_addc64(DisasContext *s, DisasOps *o) { compute_carry(s); - - TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero); - tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); - + tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src); return DISAS_NEXT; } @@ -6423,8 +6418,8 @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) dc->base.is_jmp = translate_one(env, dc); if (dc->base.is_jmp == DISAS_NEXT) { if (dc->ex_value || - !is_same_page(dcbase, dc->base.pc_next) || - !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) { + !translator_is_same_page(dcbase, dc->base.pc_next) || + !translator_is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) { dc->base.is_jmp = DISAS_TOO_MANY; } } @@ -6481,8 +6476,8 @@ static const TranslatorOps s390x_tr_ops = { .disas_log = s390x_tr_disas_log, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void s390x_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc; diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c index 75cf605b9f4..744f800fb6c 100644 --- a/target/s390x/tcg/vec_fpu_helper.c +++ b/target/s390x/tcg/vec_fpu_helper.c @@ -15,7 +15,6 @@ #include "vec.h" #include "tcg_s390x.h" #include "tcg/tcg-gvec-desc.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" @@ -621,8 +620,8 @@ static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, int i; for (i = 0; i < 4; i++) { - const float32 a = s390_vec_read_float32(v2, i); - const float32 b = s390_vec_read_float32(v3, i); + const float32 a = s390_vec_read_float32(v3, i); + const float32 b = s390_vec_read_float32(v2, i); const float32 c = s390_vec_read_float32(v4, i); float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status); @@ -645,8 +644,8 @@ static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, int i; for (i = 0; i < 2; i++) { - const float64 a = s390_vec_read_float64(v2, i); - const float64 b = s390_vec_read_float64(v3, i); + const float64 a = s390_vec_read_float64(v3, i); + const float64 b = s390_vec_read_float64(v2, i); const float64 c = s390_vec_read_float64(v4, i); const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status); @@ -664,8 +663,8 @@ static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, const S390Vector *v4, CPUS390XState *env, bool s, int flags, uintptr_t retaddr) { - const float128 a = s390_vec_read_float128(v2); - const float128 b = s390_vec_read_float128(v3); + const float128 a = s390_vec_read_float128(v3); + const float128 b = s390_vec_read_float128(v2); const float128 c = s390_vec_read_float128(v4); uint8_t vxc, vec_exc = 0; float128 ret; diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c index dafc4c3582c..46ec4a947dd 100644 --- a/target/s390x/tcg/vec_helper.c +++ b/target/s390x/tcg/vec_helper.c @@ -16,8 +16,7 @@ #include "tcg/tcg.h" #include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-ldst.h" void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3, uint32_t desc) diff --git a/target/s390x/trace-events b/target/s390x/trace-events index d371ef71b9a..ef3120d3b16 100644 --- a/target/s390x/trace-events +++ b/target/s390x/trace-events @@ -6,7 +6,7 @@ ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x. ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)" ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command 0x%04x, len 0x%04x" -# cpu-sysemu.c +# cpu-system.c cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8 cpu_halt(int cpu_index) "halting cpu %d" cpu_unhalt(int cpu_index) "unhalting cpu %d" diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h index a7cdb7edb66..f328715ee86 100644 --- a/target/sh4/cpu-param.h +++ b/target/sh4/cpu-param.h @@ -2,13 +2,12 @@ * SH4 cpu parameters for qemu. * * Copyright (c) 2005 Samuel Tardieu - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef SH4_CPU_PARAM_H #define SH4_CPU_PARAM_H -#define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 /* 4k */ #define TARGET_PHYS_ADDR_SPACE_BITS 32 #ifdef CONFIG_USER_ONLY @@ -17,4 +16,6 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif +#define TARGET_INSN_START_EXTRA_WORDS 1 + #endif diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 8f07261dcfd..4f561e8c912 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -24,8 +24,9 @@ #include "qemu/qemu-print.h" #include "cpu.h" #include "migration/vmstate.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "fpu/softfloat-helpers.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" static void superh_cpu_set_pc(CPUState *cs, vaddr value) @@ -42,6 +43,29 @@ static vaddr superh_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState superh_get_tb_cpu_state(CPUState *cs) +{ + CPUSH4State *env = cpu_env(cs); + uint32_t flags; + + flags = env->flags + | (env->fpscr & TB_FLAG_FPSCR_MASK) + | (env->sr & TB_FLAG_SR_MASK) + | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */ +#ifdef CONFIG_USER_ONLY + flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus; +#endif + + return (TCGTBCPUState){ + .pc = env->pc, + .flags = flags, +#ifdef CONFIG_USER_ONLY + /* For a gUSA region, notice the end of the region. */ + .cs_base = flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0, +#endif + }; +} + static void superh_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -81,12 +105,12 @@ static bool superh_io_recompile_replay_branch(CPUState *cs, } return false; } -#endif static bool superh_cpu_has_work(CPUState *cs) { return cs->interrupt_request & CPU_INTERRUPT_HARD; } +#endif /* !CONFIG_USER_ONLY */ static int sh4_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -127,10 +151,23 @@ static void superh_cpu_reset_hold(Object *obj, ResetType type) set_flush_to_zero(1, &env->fp_status); #endif set_default_nan_mode(1, &env->fp_status); + set_snan_bit_is_one(true, &env->fp_status); + /* sign bit clear, set all frac bits other than msb */ + set_float_default_nan_pattern(0b00111111, &env->fp_status); + /* + * TODO: "SH-4 CPU Core Architecture ADCS 7182230F" doesn't say whether + * it detects tininess before or after rounding. Section 6.4 is clear + * that flush-to-zero happens when the result underflows, though, so + * either this should be "detect ftz after rounding" or else we should + * be setting "detect tininess before rounding". + */ + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); } static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { + info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG + : BFD_ENDIAN_LITTLE; info->mach = bfd_mach_sh4; info->print_insn = print_insn_sh; } @@ -163,7 +200,7 @@ static void sh7750r_cpu_initfn(Object *obj) env->features = SH_FEATURE_BCR3_AND_BCR4; } -static void sh7750r_class_init(ObjectClass *oc, void *data) +static void sh7750r_class_init(ObjectClass *oc, const void *data) { SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc); @@ -180,7 +217,7 @@ static void sh7751r_cpu_initfn(Object *obj) env->features = SH_FEATURE_BCR3_AND_BCR4; } -static void sh7751r_class_init(ObjectClass *oc, void *data) +static void sh7751r_class_init(ObjectClass *oc, const void *data) { SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc); @@ -197,7 +234,7 @@ static void sh7785_cpu_initfn(Object *obj) env->features = SH_FEATURE_SH4A; } -static void sh7785_class_init(ObjectClass *oc, void *data) +static void sh7785_class_init(ObjectClass *oc, const void *data) { SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc); @@ -240,28 +277,36 @@ static const VMStateDescription vmstate_sh_cpu = { #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps sh4_sysemu_ops = { + .has_work = superh_cpu_has_work, .get_phys_page_debug = superh_cpu_get_phys_page_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps superh_tcg_ops = { + /* MTTCG not yet supported: require strict ordering */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = false, + .initialize = sh4_translate_init, + .translate_code = sh4_translate_code, + .get_tb_cpu_state = superh_get_tb_cpu_state, .synchronize_from_tb = superh_cpu_synchronize_from_tb, .restore_state_to_opc = superh_restore_state_to_opc, + .mmu_index = sh4_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = superh_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_notreached, .cpu_exec_interrupt = superh_cpu_exec_interrupt, .cpu_exec_halt = superh_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = superh_cpu_do_interrupt, .do_unaligned_access = superh_cpu_do_unaligned_access, .io_recompile_replay_branch = superh_io_recompile_replay_branch, #endif /* !CONFIG_USER_ONLY */ }; -static void superh_cpu_class_init(ObjectClass *oc, void *data) +static void superh_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -275,8 +320,6 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data) &scc->parent_phases); cc->class_by_name = superh_cpu_class_by_name; - cc->has_work = superh_cpu_has_work; - cc->mmu_index = sh4_cpu_mmu_index; cc->dump_state = superh_cpu_dump_state; cc->set_pc = superh_cpu_set_pc; cc->get_pc = superh_cpu_get_pc; diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index d928bcf0067..c41ab70dd7c 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -21,7 +21,9 @@ #define SH4_CPU_H #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" /* CPU Subtypes */ @@ -125,8 +127,6 @@ typedef struct tlb_t { #define UTLB_SIZE 64 #define ITLB_SIZE 4 -#define TARGET_INSN_START_EXTRA_WORDS 1 - enum sh_features { SH_FEATURE_SH4A = 1, SH_FEATURE_BCR3_AND_BCR4 = 2, @@ -248,6 +248,8 @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, uintptr_t retaddr); void sh4_translate_init(void); +void sh4_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #if !defined(CONFIG_USER_ONLY) hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -284,8 +286,6 @@ void cpu_load_tlb(CPUSH4State * env); /* MMU modes definitions */ #define MMU_USER_IDX 1 -#include "exec/cpu-all.h" - /* MMU control register */ #define MMUCR 0x1F000010 #define MMUCR_AT (1<<0) @@ -380,19 +380,4 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr) env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T)); } -static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - /* For a gUSA region, notice the end of the region. */ - *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0; - *flags = env->flags - | (env->fpscr & TB_FLAG_FPSCR_MASK) - | (env->sr & TB_FLAG_SR_MASK) - | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */ -#ifdef CONFIG_USER_ONLY - *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; -#endif -} - #endif /* SH4_CPU_H */ diff --git a/target/sh4/helper.c b/target/sh4/helper.c index 9659c695504..fb7642bda1b 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -20,13 +20,14 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "exec/log.h" #if !defined(CONFIG_USER_ONLY) #include "hw/sh4/sh_intc.h" -#include "sysemu/runstate.h" +#include "system/runstate.h" #endif #define MMU_OK 0 diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index 99394b714c9..557b1bf4972 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -19,8 +19,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "fpu/softfloat.h" #ifndef CONFIG_USER_ONLY diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 53b092175dc..70fd13aa3f5 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -19,11 +19,12 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" +#include "exec/translation-block.h" #include "exec/translator.h" +#include "exec/target_page.h" #include "exec/log.h" #include "qemu/qemu-print.h" @@ -53,7 +54,7 @@ typedef struct DisasContext { #define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN) #else #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD))) -#define UNALIGN(C) 0 +#define UNALIGN(C) MO_ALIGN #endif /* Target-specific values for ctx->base.is_jmp. */ @@ -693,14 +694,8 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300e: /* addc Rm,Rn */ - { - TCGv t0, t1; - t0 = tcg_constant_tl(0); - t1 = tcg_temp_new(); - tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); - tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, - REG(B11_8), t0, t1, cpu_sr_t); - } + tcg_gen_addcio_i32(REG(B11_8), cpu_sr_t, + REG(B11_8), REG(B7_4), cpu_sr_t); return; case 0x300f: /* addv Rm,Rn */ { @@ -1791,7 +1786,6 @@ static void _decode_opc(DisasContext * ctx) gen_helper_raise_fpu_disable(tcg_env); } ctx->base.is_jmp = DISAS_NORETURN; - return; } static void decode_opc(DisasContext * ctx) @@ -1939,16 +1933,16 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) NEXT_INSN; switch (ctx->opcode & 0xf00f) { case 0x300c: /* add Rm,Rn */ - op_opc = INDEX_op_add_i32; + op_opc = INDEX_op_add; goto do_reg_op; case 0x2009: /* and Rm,Rn */ - op_opc = INDEX_op_and_i32; + op_opc = INDEX_op_and; goto do_reg_op; case 0x200a: /* xor Rm,Rn */ - op_opc = INDEX_op_xor_i32; + op_opc = INDEX_op_xor; goto do_reg_op; case 0x200b: /* or Rm,Rn */ - op_opc = INDEX_op_or_i32; + op_opc = INDEX_op_or; do_reg_op: /* The operation register should be as expected, and the other input cannot depend on the load. */ @@ -1975,7 +1969,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) goto fail; } op_dst = B11_8; - op_opc = INDEX_op_xor_i32; + op_opc = INDEX_op_xor; op_arg = tcg_constant_i32(-1); break; @@ -1983,7 +1977,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) if (op_dst != B11_8 || mv_src >= 0) { goto fail; } - op_opc = INDEX_op_add_i32; + op_opc = INDEX_op_add; op_arg = tcg_constant_i32(B7_0s); break; @@ -1994,7 +1988,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) { goto fail; } - op_opc = INDEX_op_setcond_i32; /* placeholder */ + op_opc = INDEX_op_setcond; /* placeholder */ op_src = (ld_dst == B11_8 ? B7_4 : B11_8); op_arg = REG(op_src); @@ -2029,7 +2023,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) { goto fail; } - op_opc = INDEX_op_setcond_i32; + op_opc = INDEX_op_setcond; op_arg = tcg_constant_i32(0); NEXT_INSN; @@ -2086,7 +2080,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) ctx->memidx, ld_mop); break; - case INDEX_op_add_i32: + case INDEX_op_add: if (op_dst != st_src) { goto fail; } @@ -2104,7 +2098,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } break; - case INDEX_op_and_i32: + case INDEX_op_and: if (op_dst != st_src) { goto fail; } @@ -2118,7 +2112,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } break; - case INDEX_op_or_i32: + case INDEX_op_or: if (op_dst != st_src) { goto fail; } @@ -2132,7 +2126,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } break; - case INDEX_op_xor_i32: + case INDEX_op_xor: if (op_dst != st_src) { goto fail; } @@ -2146,7 +2140,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) } break; - case INDEX_op_setcond_i32: + case INDEX_op_setcond: if (st_src == ld_dst) { goto fail; } @@ -2317,8 +2311,8 @@ static const TranslatorOps sh4_tr_ops = { .tb_stop = sh4_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void sh4_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h index 82293fb8449..45eea9d6bac 100644 --- a/target/sparc/cpu-param.h +++ b/target/sparc/cpu-param.h @@ -1,14 +1,13 @@ /* * Sparc cpu parameters for qemu. * - * SPDX-License-Identifier: LGPL-2.0+ + * SPDX-License-Identifier: LGPL-2.0-or-later */ #ifndef SPARC_CPU_PARAM_H #define SPARC_CPU_PARAM_H #ifdef TARGET_SPARC64 -# define TARGET_LONG_BITS 64 # define TARGET_PAGE_BITS 13 /* 8k */ # define TARGET_PHYS_ADDR_SPACE_BITS 41 # ifdef TARGET_ABI32 @@ -17,33 +16,11 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 44 # endif #else -# define TARGET_LONG_BITS 32 # define TARGET_PAGE_BITS 12 /* 4k */ # define TARGET_PHYS_ADDR_SPACE_BITS 36 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -/* - * From Oracle SPARC Architecture 2015: - * - * Compatibility notes: The PSO memory model described in SPARC V8 and - * SPARC V9 compatibility architecture specifications was never implemented - * in a SPARC V9 implementation and is not included in the Oracle SPARC - * Architecture specification. - * - * The RMO memory model described in the SPARC V9 specification was - * implemented in some non-Sun SPARC V9 implementations, but is not - * directly supported in Oracle SPARC Architecture 2015 implementations. - * - * Therefore always use TSO in QEMU. - * - * D.5 Specification of Partial Store Order (PSO) - * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore. - * - * D.6 Specification of Total Store Order (TSO) - * ... PSO with the additional requirement that all [stores] are followed - * by an implied MEMBAR #StoreStore. - */ -#define TCG_GUEST_DEFAULT_MO (TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST) +#define TARGET_INSN_START_EXTRA_WORDS 1 #endif diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 54cb269e0af..245caf2de02 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -22,10 +22,13 @@ #include "cpu.h" #include "qemu/module.h" #include "qemu/qemu-print.h" -#include "exec/exec-all.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "exec/translation-block.h" #include "hw/qdev-properties.h" #include "qapi/visitor.h" #include "tcg/tcg.h" +#include "fpu/softfloat.h" +#include "target/sparc/translate.h" //#define DEBUG_FEATURES @@ -76,6 +79,7 @@ static void sparc_cpu_reset_hold(Object *obj, ResetType type) env->npc = env->pc + 4; #endif env->cache_control = 0; + cpu_put_fsr(env, 0); } #ifndef CONFIG_USER_ONLY @@ -102,6 +106,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info) { info->print_insn = print_insn_sparc; + info->endian = BFD_ENDIAN_BIG; #ifdef TARGET_SPARC64 info->mach = bfd_mach_sparc_v9b; #endif @@ -574,7 +579,7 @@ static void print_features(uint32_t features, const char *prefix) } } -void sparc_cpu_list(void) +static void sparc_cpu_list(void) { unsigned int i; @@ -711,11 +716,77 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, cpu->env.npc = tb->cs_base; } +static TCGTBCPUState sparc_get_tb_cpu_state(CPUState *cs) +{ + CPUSPARCState *env = cpu_env(cs); + uint32_t flags = cpu_mmu_index(cs, false); + +#ifndef CONFIG_USER_ONLY + if (cpu_supervisor_mode(env)) { + flags |= TB_FLAG_SUPER; + } +#endif +#ifdef TARGET_SPARC64 +#ifndef CONFIG_USER_ONLY + if (cpu_hypervisor_mode(env)) { + flags |= TB_FLAG_HYPER; + } +#endif + if (env->pstate & PS_AM) { + flags |= TB_FLAG_AM_ENABLED; + } + if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) { + flags |= TB_FLAG_FPU_ENABLED; + } + flags |= env->asi << TB_FLAG_ASI_SHIFT; +#else + if (env->psref) { + flags |= TB_FLAG_FPU_ENABLED; + } +#ifndef CONFIG_USER_ONLY + if (env->fsr_qne) { + flags |= TB_FLAG_FSR_QNE; + } +#endif /* !CONFIG_USER_ONLY */ +#endif /* TARGET_SPARC64 */ + + return (TCGTBCPUState){ + .pc = env->pc, + .flags = flags, + .cs_base = env->npc, + }; +} + +static void sparc_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + CPUSPARCState *env = cpu_env(cs); + target_ulong pc = data[0]; + target_ulong npc = data[1]; + + env->pc = pc; + if (npc == DYNAMIC_PC) { + /* dynamic NPC: already stored */ + } else if (npc & JUMP_PC) { + /* jump PC: use 'cond' and the jump targets of the translation */ + if (env->cond) { + env->npc = npc & ~3; + } else { + env->npc = pc + 4; + } + } else { + env->npc = npc; + } +} + +#ifndef CONFIG_USER_ONLY static bool sparc_cpu_has_work(CPUState *cs) { return (cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_interrupts_enabled(cpu_env(cs)); } +#endif /* !CONFIG_USER_ONLY */ static int sparc_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -805,7 +876,19 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) env->version |= env->def.maxtl << 8; env->version |= env->def.nwindows - 1; #endif - cpu_put_fsr(env, 0); + + /* + * Prefer SNaN over QNaN, order B then A. It's OK to do this in realize + * rather than reset, because fp_status is after 'end_reset_fields' in + * the CPU state struct so it won't get zeroed on reset. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &env->fp_status); + /* For fused-multiply add, prefer SNaN over QNaN, then C->B->A */ + set_float_3nan_prop_rule(float_3nan_prop_s_cba, &env->fp_status); + /* For inf * 0 + NaN, return the input NaN */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); + /* Default NaN value: sign bit clear, all frac bits set */ + set_float_default_nan_pattern(0b01111111, &env->fp_status); cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { @@ -860,14 +943,15 @@ static void sparc_set_nwindows(Object *obj, Visitor *v, const char *name, cpu->env.def.nwindows = value; } -static PropertyInfo qdev_prop_nwindows = { - .name = "int", +static const PropertyInfo qdev_prop_nwindows = { + .type = "int", + .description = "Number of register windows", .get = sparc_get_nwindows, .set = sparc_set_nwindows, }; /* This must match feature_name[]. */ -static Property sparc_cpu_properties[] = { +static const Property sparc_cpu_properties[] = { DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features, CPU_FEATURE_BIT_FLOAT128, false), #ifdef TARGET_SPARC64 @@ -903,30 +987,71 @@ static Property sparc_cpu_properties[] = { DEFINE_PROP_UINT32("mmu-version", SPARCCPU, env.def.mmu_version, 0), DEFINE_PROP("nwindows", SPARCCPU, env.def.nwindows, qdev_prop_nwindows, uint32_t), - DEFINE_PROP_END_OF_LIST() }; #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps sparc_sysemu_ops = { + .has_work = sparc_cpu_has_work, .get_phys_page_debug = sparc_cpu_get_phys_page_debug, .legacy_vmsd = &vmstate_sparc_cpu, }; #endif #ifdef CONFIG_TCG -#include "hw/core/tcg-cpu-ops.h" +#include "accel/tcg/cpu-ops.h" + +#ifndef CONFIG_USER_ONLY +static vaddr sparc_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ +#ifdef TARGET_SPARC64 + return cpu_env(cs)->pstate & PS_AM ? (uint32_t)result : result; +#else + return (uint32_t)result; +#endif +} +#endif static const TCGCPUOps sparc_tcg_ops = { + /* + * From Oracle SPARC Architecture 2015: + * + * Compatibility notes: The PSO memory model described in SPARC V8 and + * SPARC V9 compatibility architecture specifications was never + * implemented in a SPARC V9 implementation and is not included in the + * Oracle SPARC Architecture specification. + * + * The RMO memory model described in the SPARC V9 specification was + * implemented in some non-Sun SPARC V9 implementations, but is not + * directly supported in Oracle SPARC Architecture 2015 implementations. + * + * Therefore always use TSO in QEMU. + * + * D.5 Specification of Partial Store Order (PSO) + * ... [loads] are followed by an implied MEMBAR #LoadLoad | #LoadStore. + * + * D.6 Specification of Total Store Order (TSO) + * ... PSO with the additional requirement that all [stores] are followed + * by an implied MEMBAR #StoreStore. + */ + .guest_default_memory_order = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST, + .mttcg_supported = true, + .initialize = sparc_tcg_init, + .translate_code = sparc_translate_code, + .get_tb_cpu_state = sparc_get_tb_cpu_state, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, .restore_state_to_opc = sparc_restore_state_to_opc, + .mmu_index = sparc_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = sparc_cpu_tlb_fill, + .pointer_wrap = sparc_pointer_wrap, .cpu_exec_interrupt = sparc_cpu_exec_interrupt, .cpu_exec_halt = sparc_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = sparc_cpu_do_interrupt, .do_transaction_failed = sparc_cpu_do_transaction_failed, .do_unaligned_access = sparc_cpu_do_unaligned_access, @@ -934,7 +1059,7 @@ static const TCGCPUOps sparc_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void sparc_cpu_class_init(ObjectClass *oc, void *data) +static void sparc_cpu_class_init(ObjectClass *oc, const void *data) { SPARCCPUClass *scc = SPARC_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -949,9 +1074,8 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) &scc->parent_phases); cc->class_by_name = sparc_cpu_class_by_name; + cc->list_cpus = sparc_cpu_list, cc->parse_features = sparc_cpu_parse_features; - cc->has_work = sparc_cpu_has_work; - cc->mmu_index = sparc_cpu_mmu_index; cc->dump_state = sparc_cpu_dump_state; #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) cc->memory_rw_debug = sparc_cpu_memory_rw_debug; @@ -966,6 +1090,7 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data) cc->disas_set_info = cpu_sparc_disas_set_info; #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) + cc->gdb_core_xml_file = "sparc64-core.xml"; cc->gdb_num_core_regs = 86; #else cc->gdb_num_core_regs = 72; @@ -984,7 +1109,7 @@ static const TypeInfo sparc_cpu_type_info = { .class_init = sparc_cpu_class_init, }; -static void sparc_cpu_cpudef_class_init(ObjectClass *oc, void *data) +static void sparc_cpu_cpudef_class_init(ObjectClass *oc, const void *data) { SPARCCPUClass *scc = SPARC_CPU_CLASS(oc); scc->cpu_def = data; @@ -997,10 +1122,10 @@ static void sparc_register_cpudef_type(const struct sparc_def_t *def) .name = typename, .parent = TYPE_SPARC_CPU, .class_init = sparc_cpu_cpudef_class_init, - .class_data = (void *)def, + .class_data = def, }; - type_register(&ti); + type_register_static(&ti); g_free(typename); } diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index dfd9512a216..31cb3d97eb1 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -3,7 +3,9 @@ #include "qemu/bswap.h" #include "cpu-qom.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" #if !defined(TARGET_SPARC64) @@ -184,6 +186,8 @@ enum { #define FSR_FTT_SEQ_ERROR (4ULL << 14) #define FSR_FTT_INVAL_FPR (6ULL << 14) +#define FSR_QNE (1ULL << 13) + #define FSR_FCC0_SHIFT 10 #define FSR_FCC1_SHIFT 32 #define FSR_FCC2_SHIFT 34 @@ -219,7 +223,6 @@ typedef struct trap_state { uint32_t tt; } trap_state; #endif -#define TARGET_INSN_START_EXTRA_WORDS 1 typedef struct sparc_def_t { const char *name; @@ -438,6 +441,26 @@ struct CPUArchState { uint32_t fsr_cexc_ftt; /* cexc, ftt */ uint32_t fcc[TARGET_FCCREGS]; /* fcc* */ +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) + /* + * Single-element FPU fault queue, with address and insn, + * packaged into the double-word with which it is stored. + */ + uint32_t fsr_qne; /* qne */ + union { + uint64_t d; + struct { +#if HOST_BIG_ENDIAN + uint32_t addr; + uint32_t insn; +#else + uint32_t insn; + uint32_t addr; +#endif + } s; + } fq; +#endif + CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */ uint32_t cwp; /* index of current register window (extracted from PSR) */ @@ -552,7 +575,7 @@ struct SPARCCPUClass { DeviceRealize parent_realize; ResettablePhases parent_phases; - sparc_def_t *cpu_def; + const sparc_def_t *cpu_def; }; #ifndef CONFIG_USER_ONLY @@ -572,7 +595,6 @@ G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t); /* cpu_init.c */ void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); -void sparc_cpu_list(void); /* mmu_helper.c */ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, @@ -582,15 +604,13 @@ void dump_mmu(CPUSPARCState *env); #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, - uint8_t *buf, int len, bool is_write); + uint8_t *buf, size_t len, bool is_write); #endif - /* translate.c */ void sparc_tcg_init(void); -void sparc_restore_state_to_opc(CPUState *cs, - const TranslationBlock *tb, - const uint64_t *data); +void sparc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); /* fop_helper.c */ target_ulong cpu_get_fsr(CPUSPARCState *); @@ -645,8 +665,6 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU -#define cpu_list sparc_cpu_list - /* MMU modes definitions */ #if defined (TARGET_SPARC64) #define MMU_USER_IDX 0 @@ -707,8 +725,6 @@ static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil) #endif } -#include "exec/cpu-all.h" - #ifdef TARGET_SPARC64 /* sun4u.c */ void cpu_tick_set_count(CPUTimer *timer, uint64_t count); @@ -722,41 +738,9 @@ trap_state* cpu_tsptr(CPUSPARCState* env); #define TB_FLAG_AM_ENABLED (1 << 5) #define TB_FLAG_SUPER (1 << 6) #define TB_FLAG_HYPER (1 << 7) +#define TB_FLAG_FSR_QNE (1 << 8) #define TB_FLAG_ASI_SHIFT 24 -static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - uint32_t flags; - *pc = env->pc; - *cs_base = env->npc; - flags = cpu_mmu_index(env_cpu(env), false); -#ifndef CONFIG_USER_ONLY - if (cpu_supervisor_mode(env)) { - flags |= TB_FLAG_SUPER; - } -#endif -#ifdef TARGET_SPARC64 -#ifndef CONFIG_USER_ONLY - if (cpu_hypervisor_mode(env)) { - flags |= TB_FLAG_HYPER; - } -#endif - if (env->pstate & PS_AM) { - flags |= TB_FLAG_AM_ENABLED; - } - if ((env->pstate & PS_PEF) && (env->fprs & FPRS_FEF)) { - flags |= TB_FLAG_FPU_ENABLED; - } - flags |= env->asi << TB_FLAG_ASI_SHIFT; -#else - if (env->psref) { - flags |= TB_FLAG_FPU_ENABLED; - } -#endif - *pflags = flags; -} - static inline bool tb_fpu_enabled(int tb_flags) { #if defined(CONFIG_USER_ONLY) diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c index 0b30665b511..29fd1664384 100644 --- a/target/sparc/fop_helper.c +++ b/target/sparc/fop_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" @@ -344,17 +343,17 @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src) } float32 helper_fmadds(CPUSPARCState *env, float32 s1, - float32 s2, float32 s3, uint32_t op) + float32 s2, float32 s3, int32_t sc, uint32_t op) { - float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status); + float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status); check_ieee_exceptions(env, GETPC()); return ret; } float64 helper_fmaddd(CPUSPARCState *env, float64 s1, - float64 s2, float64 s3, uint32_t op) + float64 s2, float64 s3, int32_t sc, uint32_t op) { - float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status); + float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status); check_ieee_exceptions(env, GETPC()); return ret; } @@ -446,7 +445,6 @@ static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra) case float_relation_greater: return 2; case float_relation_unordered: - env->fsr |= FSR_NVA; return 3; } g_assert_not_reached(); @@ -490,14 +488,17 @@ uint32_t helper_fcmpeq(CPUSPARCState *env, Int128 src1, Int128 src2) return finish_fcmp(env, r, GETPC()); } -uint32_t helper_flcmps(float32 src1, float32 src2) +uint32_t helper_flcmps(CPUSPARCState *env, float32 src1, float32 src2) { /* * FLCMP never raises an exception nor modifies any FSR fields. * Perform the comparison with a dummy fp environment. */ - float_status discard = { }; - FloatRelation r = float32_compare_quiet(src1, src2, &discard); + float_status discard = env->fp_status; + FloatRelation r; + + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard); + r = float32_compare_quiet(src1, src2, &discard); switch (r) { case float_relation_equal: @@ -515,10 +516,13 @@ uint32_t helper_flcmps(float32 src1, float32 src2) g_assert_not_reached(); } -uint32_t helper_flcmpd(float64 src1, float64 src2) +uint32_t helper_flcmpd(CPUSPARCState *env, float64 src1, float64 src2) { - float_status discard = { }; - FloatRelation r = float64_compare_quiet(src1, src2, &discard); + float_status discard = env->fp_status; + FloatRelation r; + + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard); + r = float64_compare_quiet(src1, src2, &discard); switch (r) { case float_relation_equal: @@ -545,6 +549,8 @@ target_ulong cpu_get_fsr(CPUSPARCState *env) fsr |= (uint64_t)env->fcc[1] << FSR_FCC1_SHIFT; fsr |= (uint64_t)env->fcc[2] << FSR_FCC2_SHIFT; fsr |= (uint64_t)env->fcc[3] << FSR_FCC3_SHIFT; +#elif !defined(CONFIG_USER_ONLY) + fsr |= env->fsr_qne; #endif /* VER is kept completely separate until re-assembly. */ @@ -591,6 +597,8 @@ void cpu_put_fsr(CPUSPARCState *env, target_ulong fsr) env->fcc[1] = extract64(fsr, FSR_FCC1_SHIFT, 2); env->fcc[2] = extract64(fsr, FSR_FCC2_SHIFT, 2); env->fcc[3] = extract64(fsr, FSR_FCC3_SHIFT, 2); +#elif !defined(CONFIG_USER_ONLY) + env->fsr_qne = fsr & FSR_QNE; #endif set_fsr_nonsplit(env, fsr); diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c index ec0036e9ef6..134617fb232 100644 --- a/target/sparc/gdbstub.c +++ b/target/sparc/gdbstub.c @@ -79,8 +79,13 @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) } } if (n < 80) { - /* f32-f62 (double width, even numbers only) */ - return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll); + /* f32-f62 (16 double width registers, even register numbers only) + * n == 64: f32 : env->fpr[16] + * n == 65: f34 : env->fpr[17] + * etc... + * n == 79: f62 : env->fpr[31] + */ + return gdb_get_reg64(mem_buf, env->fpr[(n - 64) + 16].ll); } switch (n) { case 80: @@ -173,8 +178,13 @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } return 4; } else if (n < 80) { - /* f32-f62 (double width, even numbers only) */ - env->fpr[(n - 32) / 2].ll = tmp; + /* f32-f62 (16 double width registers, even register numbers only) + * n == 64: f32 : env->fpr[16] + * n == 65: f34 : env->fpr[17] + * etc... + * n == 79: f62 : env->fpr[31] + */ + env->fpr[(n - 64) + 16].ll = tmp; } else { switch (n) { case 80: diff --git a/target/sparc/helper.c b/target/sparc/helper.c index 7846ddd6f62..9163b9d46ad 100644 --- a/target/sparc/helper.c +++ b/target/sparc/helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "qemu/timer.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" diff --git a/target/sparc/helper.h b/target/sparc/helper.h index 134e519a377..3a7f7dc1296 100644 --- a/target/sparc/helper.h +++ b/target/sparc/helper.h @@ -51,15 +51,15 @@ DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, i32, env, f64, f64) DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, i32, env, f64, f64) DEF_HELPER_FLAGS_3(fcmpq, TCG_CALL_NO_WG, i32, env, i128, i128) DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_WG, i32, env, i128, i128) -DEF_HELPER_FLAGS_2(flcmps, TCG_CALL_NO_RWG_SE, i32, f32, f32) -DEF_HELPER_FLAGS_2(flcmpd, TCG_CALL_NO_RWG_SE, i32, f64, f64) +DEF_HELPER_FLAGS_3(flcmps, TCG_CALL_NO_RWG_SE, i32, env, f32, f32) +DEF_HELPER_FLAGS_3(flcmpd, TCG_CALL_NO_RWG_SE, i32, env, f64, f64) DEF_HELPER_2(raise_exception, noreturn, env, int) DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64) -DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32) +DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32) DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64) DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64) @@ -72,7 +72,7 @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32) -DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32) +DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32) DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32) DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32) diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode index fbcb4f7aef7..9e39d232735 100644 --- a/target/sparc/insns.decode +++ b/target/sparc/insns.decode @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: LGPL-2.0+ +# SPDX-License-Identifier: LGPL-2.0-or-later # # Sparc instruction decode definitions. # Copyright (c) 2023 Richard Henderson @@ -96,7 +96,10 @@ CALL 01 i:s30 RDTICK 10 rd:5 101000 00100 0 0000000000000 RDPC 10 rd:5 101000 00101 0 0000000000000 RDFPRS 10 rd:5 101000 00110 0 0000000000000 - RDASR17 10 rd:5 101000 10001 0 0000000000000 + { + RDASR17 10 rd:5 101000 10001 0 0000000000000 + RDPIC 10 rd:5 101000 10001 0 0000000000000 + } RDGSR 10 rd:5 101000 10011 0 0000000000000 RDSOFTINT 10 rd:5 101000 10110 0 0000000000000 RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000 @@ -114,6 +117,8 @@ CALL 01 i:s30 WRCCR 10 00010 110000 ..... . ............. @n_r_ri WRASI 10 00011 110000 ..... . ............. @n_r_ri WRFPRS 10 00110 110000 ..... . ............. @n_r_ri + WRPCR 10 10000 110000 01000 0 0000000000000 + WRPIC 10 10001 110000 01000 0 0000000000000 { WRGSR 10 10011 110000 ..... . ............. @n_r_ri WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri @@ -321,12 +326,12 @@ FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @q_d_d FNHADDs 10 ..... 110100 ..... 0 0111 0001 ..... @r_r_r FNHADDd 10 ..... 110100 ..... 0 0111 0010 ..... @d_d_d FNsMULd 10 ..... 110100 ..... 0 0111 1001 ..... @d_r_r -FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2 -FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_d2 -FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_q2 -FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2 -FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_r2 -FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_r2 +FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @d_r2 +FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @d_d2 +FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @d_q2 +FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_d2 +FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_d2 +FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_d2 FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2 FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_d2 FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_q2 @@ -644,8 +649,8 @@ STF 11 ..... 100100 ..... . ............. @r_r_ri_na STFSR 11 00000 100101 ..... . ............. @n_r_ri STXFSR 11 00001 100101 ..... . ............. @n_r_ri { - STQF 11 ..... 100110 ..... . ............. @q_r_ri_na - STDFQ 11 ----- 100110 ----- - ------------- + STQF 11 ..... 100110 ..... . ............. @q_r_ri_na # v9 + STDFQ 11 ..... 100110 ..... . ............. @r_r_ri # v7,v8 } STDF 11 ..... 100111 ..... . ............. @d_r_ri_na diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 6b7d65b0314..39db4ffa70a 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -21,9 +21,9 @@ #include "qemu/main-loop.h" #include "cpu.h" #include "trace.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/log.h" -#include "sysemu/runstate.h" - +#include "system/runstate.h" static const char * const excp_names[0x80] = { [TT_TFAULT] = "Instruction Access Fault", @@ -116,22 +116,9 @@ void sparc_cpu_do_interrupt(CPUState *cs) qemu_log("%6d: %s (v=%02x)\n", count, name, intno); log_cpu_state(cs, 0); -#if 0 - { - int i; - uint8_t *ptr; - - qemu_log(" code="); - ptr = (uint8_t *)env->pc; - for (i = 0; i < 16; i++) { - qemu_log(" %02x", ldub(ptr + i)); - } - qemu_log("\n"); - } -#endif count++; } -#if !defined(CONFIG_USER_ONLY) +#ifndef CONFIG_USER_ONLY if (env->psret == 0) { if (cs->exception_index == 0x80 && env->def.features & CPU_FEATURE_TA0_SHUTDOWN) { @@ -143,6 +130,29 @@ void sparc_cpu_do_interrupt(CPUState *cs) } return; } + if (intno == TT_FP_EXCP) { + /* + * The sparc32 fpu has three states related to exception handling. + * The FPop that signals an exception transitions from fp_execute + * to fp_exception_pending. A subsequent FPop transitions from + * fp_exception_pending to fp_exception, which forces the trap. + * + * If the queue is not empty, this trap is due to execution of an + * illegal FPop while in fp_exception state. Here we are to + * re-enter fp_exception_pending state without queuing the insn. + * + * We do not model the fp_exception_pending state, but instead + * skip directly to fp_exception state. We advance pc/npc to + * mimic delayed trap delivery as if by the subsequent insn. + */ + if (!env->fsr_qne) { + env->fsr_qne = FSR_QNE; + env->fq.s.addr = env->pc; + env->fq.s.insn = cpu_ldl_code(env, env->pc); + } + env->pc = env->npc; + env->npc = env->npc + 4; + } #endif env->psret = 0; cwp = cpu_cwp_dec(env, env->cwp - 1); diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index d92c9f15934..2c63eb9e036 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -23,9 +23,14 @@ #include "cpu.h" #include "tcg/tcg.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" #include "exec/page-protection.h" -#include "exec/cpu_ldst.h" +#include "exec/target_page.h" +#include "accel/tcg/cpu-ldst.h" +#include "system/memory.h" +#ifdef CONFIG_USER_ONLY +#include "user/page-protection.h" +#endif #include "asi.h" //#define DEBUG_MMU @@ -596,6 +601,9 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case 0x0C: /* Leon3 Date Cache config */ if (env->def.features & CPU_FEATURE_CACHE_CTRL) { ret = leon3_cache_control_ld(env, addr, size); + } else { + qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented" + " address, size: %d\n", addr, size); } break; case 0x01c00a00: /* MXCC control register */ @@ -812,6 +820,9 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, case 0x0C: /* Leon3 Date Cache config */ if (env->def.features & CPU_FEATURE_CACHE_CTRL) { leon3_cache_control_st(env, addr, val, size); + } else { + qemu_log_mask(LOG_UNIMP, "0x" TARGET_FMT_lx ": unimplemented" + " address, size: %d\n", addr, size); } break; diff --git a/target/sparc/machine.c b/target/sparc/machine.c index 48e0cf22f30..4dd75aff74a 100644 --- a/target/sparc/machine.c +++ b/target/sparc/machine.c @@ -1,6 +1,5 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "qemu/timer.h" #include "migration/cpu.h" @@ -143,6 +142,24 @@ static const VMStateInfo vmstate_xcc = { .get = get_xcc, .put = put_xcc, }; +#else +static bool fq_needed(void *opaque) +{ + SPARCCPU *cpu = opaque; + return cpu->env.fsr_qne; +} + +static const VMStateDescription vmstate_fq = { + .name = "cpu/fq", + .version_id = 1, + .minimum_version_id = 1, + .needed = fq_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(env.fq.s.addr, SPARCCPU), + VMSTATE_UINT32(env.fq.s.insn, SPARCCPU), + VMSTATE_END_OF_LIST() + }, +}; #endif static int cpu_pre_save(void *opaque) @@ -265,4 +282,11 @@ const VMStateDescription vmstate_sparc_cpu = { #endif VMSTATE_END_OF_LIST() }, +#ifndef TARGET_SPARC64 + .subsections = (const VMStateDescription * const []) { + &vmstate_fq, + NULL + }, +#endif + }; diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 9ff06026b8c..217580a4d8c 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -20,8 +20,12 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" +#include "system/memory.h" #include "qemu/qemu-print.h" #include "trace.h" @@ -389,7 +393,7 @@ void dump_mmu(CPUSPARCState *env) * that the sparc ABI is followed. */ int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, - uint8_t *buf, int len, bool is_write) + uint8_t *buf, size_t len, bool is_write) { CPUSPARCState *env = cpu_env(cs); target_ulong addr = address; diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 113639083b5..b922e53bf12 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -22,14 +22,16 @@ #include "cpu.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "tcg/tcg-op.h" #include "tcg/tcg-op-gvec.h" #include "exec/helper-gen.h" #include "exec/translator.h" +#include "exec/translation-block.h" #include "exec/log.h" #include "fpu/softfloat.h" #include "asi.h" +#include "target/sparc/translate.h" #define HELPER_H "helper.h" #include "exec/helper-info.c.inc" @@ -101,13 +103,6 @@ # define MAXTL_MASK 0 #endif -/* Dynamic PC, must exit to main loop. */ -#define DYNAMIC_PC 1 -/* Dynamic PC, one of two values according to jump_pc[T2]. */ -#define JUMP_PC 2 -/* Dynamic PC, may lookup next TB. */ -#define DYNAMIC_PC_LOOKUP 3 - #define DISAS_EXIT DISAS_TARGET_0 /* global register indexes */ @@ -185,6 +180,8 @@ typedef struct DisasContext { bool supervisor; #ifdef TARGET_SPARC64 bool hypervisor; +#else + bool fsr_qne; #endif #endif @@ -398,8 +395,7 @@ static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin) TCGv z = tcg_constant_tl(0); if (cin) { - tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z); - tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z); + tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin); } else { tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z); } @@ -1362,93 +1358,109 @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src) static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0)); + TCGv_i32 z = tcg_constant_i32(0); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z); } static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0)); + TCGv_i32 z = tcg_constant_i32(0); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z); } static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_c; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_c; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_c | float_muladd_negate_result; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c | + float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_c | float_muladd_negate_result; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c | + float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3) { - int op = float_muladd_negate_result; - gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op); } static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3) { - int op = float_muladd_negate_result; - gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op)); + TCGv_i32 z = tcg_constant_i32(0); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op); } /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */ static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(0); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(0); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */ static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_negate_c | float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_negate_c | float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */ static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2) { - TCGv_i32 one = tcg_constant_i32(float32_one); - int op = float_muladd_negate_result | float_muladd_halve_result; - gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i32 fone = tcg_constant_i32(float32_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2) { - TCGv_i64 one = tcg_constant_i64(float64_one); - int op = float_muladd_negate_result | float_muladd_halve_result; - gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op)); + TCGv_i64 fone = tcg_constant_i64(float64_one); + TCGv_i32 mone = tcg_constant_i32(-1); + TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result); + gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op); } static void gen_op_fpexception_im(DisasContext *dc, int ftt) @@ -1463,15 +1475,48 @@ static void gen_op_fpexception_im(DisasContext *dc, int ftt) gen_exception(dc, TT_FP_EXCP); } -static int gen_trap_ifnofpu(DisasContext *dc) +static bool gen_trap_ifnofpu(DisasContext *dc) { #if !defined(CONFIG_USER_ONLY) if (!dc->fpu_enabled) { gen_exception(dc, TT_NFPU_INSN); - return 1; + return true; } #endif - return 0; + return false; +} + +static bool gen_trap_iffpexception(DisasContext *dc) +{ +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) + /* + * There are 3 states for the sparc32 fpu: + * Normally the fpu is in fp_execute, and all insns are allowed. + * When an exception is signaled, it moves to fp_exception_pending state. + * Upon seeing the next FPop, the fpu moves to fp_exception state, + * populates the FQ, and generates an fp_exception trap. + * The fpu remains in fp_exception state until FQ becomes empty + * after execution of a STDFQ instruction. While the fpu is in + * fp_exception state, and FPop, fp load or fp branch insn will + * return to fp_exception_pending state, set FSR.FTT to sequence_error, + * and the insn will not be entered into the FQ. + * + * In QEMU, we do not model the fp_exception_pending state and + * instead populate FQ and raise the exception immediately. + * But we can still honor fp_exception state by noticing when + * the FQ is not empty. + */ + if (dc->fsr_qne) { + gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); + return true; + } +#endif + return false; +} + +static bool gen_trap_if_nofpu_fpexception(DisasContext *dc) +{ + return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc); } /* asi moves */ @@ -2641,7 +2686,7 @@ static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) { DisasCompare cmp; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } gen_fcompare(&cmp, a->cc, a->cond); @@ -2836,6 +2881,14 @@ static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) +static TCGv do_rdpic(DisasContext *dc, TCGv dst) +{ + return tcg_constant_tl(0); +} + +TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic) + + static TCGv do_rdccr(DisasContext *dc, TCGv dst) { gen_helper_rdccr(dst, tcg_env); @@ -3269,6 +3322,17 @@ static void do_wrfprs(DisasContext *dc, TCGv src) TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs) +static bool do_priv_nop(DisasContext *dc, bool priv) +{ + if (!priv) { + return raise_priv(dc); + } + return advance_pc(dc); +} + +TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc)) +TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc)) + static void do_wrgsr(DisasContext *dc, TCGv src) { gen_trap_ifnofpu(dc); @@ -4480,7 +4544,7 @@ static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) if (addr == NULL) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (sz == MO_128 && gen_trap_float128(dc)) { @@ -4508,6 +4572,7 @@ static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) if (addr == NULL) { return false; } + /* Store insns are ok in fp_exception_pending state. */ if (gen_trap_ifnofpu(dc)) { return true; } @@ -4521,7 +4586,7 @@ static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) TRANS(STF, ALL, do_st_fpr, a, MO_32) TRANS(STDF, ALL, do_st_fpr, a, MO_64) -TRANS(STQF, ALL, do_st_fpr, a, MO_128) +TRANS(STQF, 64, do_st_fpr, a, MO_128) TRANS(STFA, 64, do_st_fpr, a, MO_32) TRANS(STDFA, 64, do_st_fpr, a, MO_64) @@ -4529,17 +4594,41 @@ TRANS(STQFA, 64, do_st_fpr, a, MO_128) static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) { + TCGv addr; + if (!avail_32(dc)) { return false; } + addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); + if (addr == NULL) { + return false; + } if (!supervisor(dc)) { return raise_priv(dc); } +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) if (gen_trap_ifnofpu(dc)) { return true; } - gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - return true; + if (!dc->fsr_qne) { + gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); + return true; + } + + /* Store the single element from the queue. */ + TCGv_i64 fq = tcg_temp_new_i64(); + tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d)); + tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4); + + /* Mark the queue empty, transitioning to fp_execute state. */ + tcg_gen_st_i32(tcg_constant_i32(0), tcg_env, + offsetof(CPUSPARCState, fsr_qne)); + dc->fsr_qne = 0; + + return advance_pc(dc); +#else + qemu_build_not_reached(); +#endif } static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a) @@ -4550,7 +4639,7 @@ static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a) if (addr == NULL) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4574,7 +4663,7 @@ static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire) if (addr == NULL) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4611,6 +4700,7 @@ static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) if (addr == NULL) { return false; } + /* Store insns are ok in fp_exception_pending state. */ if (gen_trap_ifnofpu(dc)) { return true; } @@ -4653,7 +4743,7 @@ static bool do_ff(DisasContext *dc, arg_r_r *a, { TCGv_i32 tmp; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4694,7 +4784,7 @@ static bool do_env_ff(DisasContext *dc, arg_r_r *a, { TCGv_i32 tmp; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4714,7 +4804,7 @@ static bool do_env_fd(DisasContext *dc, arg_r_r *a, TCGv_i32 dst; TCGv_i64 src; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4734,7 +4824,7 @@ static bool do_dd(DisasContext *dc, arg_r_r *a, { TCGv_i64 dst, src; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4756,7 +4846,7 @@ static bool do_env_dd(DisasContext *dc, arg_r_r *a, { TCGv_i64 dst, src; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4796,7 +4886,7 @@ static bool do_env_df(DisasContext *dc, arg_r_r *a, TCGv_i64 dst; TCGv_i32 src; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4839,7 +4929,7 @@ static bool do_env_qq(DisasContext *dc, arg_r_r *a, { TCGv_i128 t; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -4860,7 +4950,7 @@ static bool do_env_fq(DisasContext *dc, arg_r_r *a, TCGv_i128 src; TCGv_i32 dst; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -4883,7 +4973,7 @@ static bool do_env_dq(DisasContext *dc, arg_r_r *a, TCGv_i128 src; TCGv_i64 dst; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -4906,7 +4996,7 @@ static bool do_env_qf(DisasContext *dc, arg_r_r *a, TCGv_i32 src; TCGv_i128 dst; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -4929,10 +5019,7 @@ static bool do_env_qd(DisasContext *dc, arg_r_r *a, TCGv_i64 src; TCGv_i128 dst; - if (gen_trap_ifnofpu(dc)) { - return true; - } - if (gen_trap_float128(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -4989,7 +5076,7 @@ static bool do_env_fff(DisasContext *dc, arg_r_r_r *a, { TCGv_i32 src1, src2; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -5198,7 +5285,7 @@ static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a, { TCGv_i64 dst, src1, src2; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -5222,7 +5309,7 @@ static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a) TCGv_i64 dst; TCGv_i32 src1, src2; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (!(dc->def->features & CPU_FEATURE_FSMULD)) { @@ -5331,7 +5418,7 @@ static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, { TCGv_i128 src1, src2; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -5355,7 +5442,7 @@ static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) TCGv_i64 src1, src2; TCGv_i128 dst; - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -5445,7 +5532,7 @@ static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e) if (avail_32(dc) && a->cc != 0) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -5469,7 +5556,7 @@ static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e) if (avail_32(dc) && a->cc != 0) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } @@ -5493,7 +5580,7 @@ static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) if (avail_32(dc) && a->cc != 0) { return false; } - if (gen_trap_ifnofpu(dc)) { + if (gen_trap_if_nofpu_fpexception(dc)) { return true; } if (gen_trap_float128(dc)) { @@ -5526,7 +5613,7 @@ static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a) src1 = gen_load_fpr_F(dc, a->rs1); src2 = gen_load_fpr_F(dc, a->rs2); - gen_helper_flcmps(cpu_fcc[a->cc], src1, src2); + gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2); return advance_pc(dc); } @@ -5543,7 +5630,7 @@ static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a) src1 = gen_load_fpr_D(dc, a->rs1); src2 = gen_load_fpr_D(dc, a->rs2); - gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2); + gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2); return advance_pc(dc); } @@ -5596,13 +5683,15 @@ static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); #ifndef CONFIG_USER_ONLY dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; +# ifdef TARGET_SPARC64 + dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0; +# else + dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0; +# endif #endif #ifdef TARGET_SPARC64 dc->fprs_dirty = 0; dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; -#ifndef CONFIG_USER_ONLY - dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0; -#endif #endif /* * if we reach a page boundary, we stop generation so that the @@ -5748,8 +5837,8 @@ static const TranslatorOps sparc_tr_ops = { .tb_stop = sparc_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void sparc_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = {}; @@ -5821,26 +5910,3 @@ void sparc_tcg_init(void) gregnames[i]); } } - -void sparc_restore_state_to_opc(CPUState *cs, - const TranslationBlock *tb, - const uint64_t *data) -{ - CPUSPARCState *env = cpu_env(cs); - target_ulong pc = data[0]; - target_ulong npc = data[1]; - - env->pc = pc; - if (npc == DYNAMIC_PC) { - /* dynamic NPC: already stored */ - } else if (npc & JUMP_PC) { - /* jump PC: use 'cond' and the jump targets of the translation */ - if (env->cond) { - env->npc = npc & ~3; - } else { - env->npc = pc + 4; - } - } else { - env->npc = npc; - } -} diff --git a/target/sparc/translate.h b/target/sparc/translate.h new file mode 100644 index 00000000000..a46fa4f124b --- /dev/null +++ b/target/sparc/translate.h @@ -0,0 +1,17 @@ +/* + * QEMU translation definitions for SPARC + * + * Copyright (c) 2024 Linaro, Ltd + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef SPARC_TRANSLATION_H +#define SPARC_TRANSLATION_H + +/* Dynamic PC, must exit to main loop. */ +#define DYNAMIC_PC 1 +/* Dynamic PC, one of two values according to jump_pc[T2]. */ +#define JUMP_PC 2 +/* Dynamic PC, may lookup next TB. */ +#define DYNAMIC_PC_LOOKUP 3 + +#endif diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c index b53fc9ce940..9ad9d01e8b9 100644 --- a/target/sparc/win_helper.c +++ b/target/sparc/win_helper.c @@ -20,33 +20,22 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "trace.h" -static inline void memcpy32(target_ulong *dst, const target_ulong *src) -{ - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - dst[3] = src[3]; - dst[4] = src[4]; - dst[5] = src[5]; - dst[6] = src[6]; - dst[7] = src[7]; -} - void cpu_set_cwp(CPUSPARCState *env, int new_cwp) { /* put the modified wrap registers at their proper location */ if (env->cwp == env->nwindows - 1) { - memcpy32(env->regbase, env->regbase + env->nwindows * 16); + memcpy(env->regbase, env->regbase + env->nwindows * 16, + sizeof(env->gregs)); } env->cwp = new_cwp; /* put the wrap registers at their temporary location */ if (new_cwp == env->nwindows - 1) { - memcpy32(env->regbase + env->nwindows * 16, env->regbase); + memcpy(env->regbase + env->nwindows * 16, env->regbase, + sizeof(env->gregs)); } env->regwptr = env->regbase + (new_cwp * 16); } @@ -361,8 +350,8 @@ void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl) dst = get_gl_gregset(env, env->gl); if (src != dst) { - memcpy32(dst, env->gregs); - memcpy32(env->gregs, src); + memcpy(dst, env->gregs, sizeof(env->gregs)); + memcpy(env->gregs, src, sizeof(env->gregs)); } } @@ -393,8 +382,8 @@ void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate) /* Switch global register bank */ src = get_gregset(env, new_pstate_regs); dst = get_gregset(env, pstate_regs); - memcpy32(dst, env->gregs); - memcpy32(env->gregs, src); + memcpy(dst, env->gregs, sizeof(env->gregs)); + memcpy(env->gregs, src, sizeof(env->gregs)); } else { trace_win_helper_no_switch_pstate(new_pstate_regs); } diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h index e29d551dd67..eb33a67c419 100644 --- a/target/tricore/cpu-param.h +++ b/target/tricore/cpu-param.h @@ -8,9 +8,10 @@ #ifndef TRICORE_CPU_PARAM_H #define TRICORE_CPU_PARAM_H -#define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 14 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define TARGET_INSN_START_EXTRA_WORDS 0 + #endif diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 1a261715907..4f035b6f768 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -20,9 +20,10 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "qemu/error-report.h" #include "tcg/debug-assert.h" +#include "accel/tcg/cpu-ops.h" static inline void set_feature(CPUTriCoreState *env, int feature) { @@ -44,6 +45,16 @@ static vaddr tricore_cpu_get_pc(CPUState *cs) return cpu_env(cs)->PC; } +static TCGTBCPUState tricore_get_tb_cpu_state(CPUState *cs) +{ + CPUTriCoreState *env = cpu_env(cs); + + return (TCGTBCPUState){ + .pc = env->PC, + .flags = FIELD_DP32(0, TB_FLAGS, PRIV, extract32(env->PSW, 10, 2)), + }; +} + static void tricore_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -164,21 +175,28 @@ static bool tricore_cpu_exec_interrupt(CPUState *cs, int interrupt_request) #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps tricore_sysemu_ops = { + .has_work = tricore_cpu_has_work, .get_phys_page_debug = tricore_cpu_get_phys_page_debug, }; -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps tricore_tcg_ops = { + /* MTTCG not yet supported: require strict ordering */ + .guest_default_memory_order = TCG_MO_ALL, + .mttcg_supported = false, .initialize = tricore_tcg_init, + .translate_code = tricore_translate_code, + .get_tb_cpu_state = tricore_get_tb_cpu_state, .synchronize_from_tb = tricore_cpu_synchronize_from_tb, .restore_state_to_opc = tricore_restore_state_to_opc, + .mmu_index = tricore_cpu_mmu_index, .tlb_fill = tricore_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = tricore_cpu_exec_interrupt, .cpu_exec_halt = tricore_cpu_has_work, + .cpu_exec_reset = cpu_reset, }; -static void tricore_cpu_class_init(ObjectClass *c, void *data) +static void tricore_cpu_class_init(ObjectClass *c, const void *data) { TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -191,8 +209,6 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data) resettable_class_set_parent_phases(rc, NULL, tricore_cpu_reset_hold, NULL, &mcc->parent_phases); cc->class_by_name = tricore_cpu_class_by_name; - cc->has_work = tricore_cpu_has_work; - cc->mmu_index = tricore_cpu_mmu_index; cc->gdb_read_register = tricore_cpu_gdb_read_register; cc->gdb_write_register = tricore_cpu_gdb_write_register; diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index 220af69fc25..82085fbc32f 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -22,10 +22,15 @@ #include "cpu-qom.h" #include "hw/registerfields.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" #include "qemu/cpu-float.h" #include "tricore-defs.h" +#ifdef CONFIG_USER_ONLY +#error "TriCore does not support user mode emulation" +#endif + typedef struct CPUArchState { /* GPR Register */ uint32_t gpr_a[16]; @@ -246,24 +251,12 @@ void fpu_set_state(CPUTriCoreState *env); #define MMU_USER_IDX 2 -#include "exec/cpu-all.h" - FIELD(TB_FLAGS, PRIV, 0, 2) void cpu_state_reset(CPUTriCoreState *s); void tricore_tcg_init(void); - -static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - uint32_t new_flags = 0; - *pc = env->PC; - *cs_base = 0; - - new_flags |= FIELD_DP32(new_flags, TB_FLAGS, PRIV, - extract32(env->PSW, 10, 2)); - *flags = new_flags; -} +void tricore_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); #define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c index 5d38aea143a..1b72dcc5f5c 100644 --- a/target/tricore/fpu_helper.c +++ b/target/tricore/fpu_helper.c @@ -43,7 +43,7 @@ static inline uint8_t f_get_excp_flags(CPUTriCoreState *env) & (float_flag_invalid | float_flag_overflow | float_flag_underflow - | float_flag_output_denormal + | float_flag_output_denormal_flushed | float_flag_divbyzero | float_flag_inexact); } @@ -99,7 +99,7 @@ static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags) some_excp = 1; } - if (flags & float_flag_underflow || flags & float_flag_output_denormal) { + if (flags & float_flag_underflow || flags & float_flag_output_denormal_flushed) { env->FPU_FU = 1 << 31; some_excp = 1; } @@ -109,7 +109,7 @@ static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags) some_excp = 1; } - if (flags & float_flag_inexact || flags & float_flag_output_denormal) { + if (flags & float_flag_inexact || flags & float_flag_output_denormal_flushed) { env->PSW |= 1 << 26; some_excp = 1; } diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c index 29a70051ffe..0b73b1280e0 100644 --- a/target/tricore/gdbstub.c +++ b/target/tricore/gdbstub.c @@ -124,7 +124,7 @@ int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUTriCoreState *env = cpu_env(cs); uint32_t tmp; - tmp = ldl_p(mem_buf); + tmp = ldl_le_p(mem_buf); if (n < 16) { /* data registers */ env->gpr_d[n] = tmp; diff --git a/target/tricore/helper.c b/target/tricore/helper.c index 7014255f77c..e4c53d453dd 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -19,8 +19,10 @@ #include "qemu/log.h" #include "hw/registerfields.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/page-protection.h" +#include "exec/target_page.h" #include "fpu/softfloat-helpers.h" #include "qemu/qemu-print.h" @@ -116,7 +118,10 @@ void fpu_set_state(CPUTriCoreState *env) set_flush_inputs_to_zero(1, &env->fp_status); set_flush_to_zero(1, &env->fp_status); set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status); set_default_nan_mode(1, &env->fp_status); + /* Default NaN pattern: sign bit clear, frac msb set */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); } uint32_t psw_read(CPUTriCoreState *env) diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c index a0d5a0da1df..9910c13f4b5 100644 --- a/target/tricore/op_helper.c +++ b/target/tricore/op_helper.c @@ -18,8 +18,7 @@ #include "cpu.h" #include "qemu/host-utils.h" #include "exec/helper-proto.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include /* for crc32 */ diff --git a/target/tricore/translate.c b/target/tricore/translate.c index a46a03e1fd8..3d0e7a10bd8 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -20,9 +20,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" #include "qemu/qemu-print.h" #include "exec/helper-proto.h" @@ -30,6 +29,8 @@ #include "tricore-opcodes.h" #include "exec/translator.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "exec/log.h" #define HELPER_H "helper.h" @@ -1344,15 +1345,11 @@ static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) { - TCGv carry = tcg_temp_new_i32(); - TCGv t0 = tcg_temp_new_i32(); + TCGv t0 = tcg_temp_new_i32(); TCGv result = tcg_temp_new_i32(); - tcg_gen_movi_tl(t0, 0); - tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0); /* Addition, carry and set C/V/SV bits */ - tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0); - tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0); + tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C); /* calc V bit */ tcg_gen_xor_tl(cpu_PSW_V, result, r1); tcg_gen_xor_tl(t0, r1, r2); @@ -2732,8 +2729,7 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); - tcg_gen_movi_tl(mask, 1); - tcg_gen_shl_tl(mask, mask, width); + tcg_gen_shl_tl(mask, tcg_constant_tl(1), width); tcg_gen_subi_tl(mask, mask, 1); tcg_gen_shl_tl(mask, mask, pos); @@ -3980,7 +3976,7 @@ static void decode_bit_andacc(DisasContext *ctx) pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl); break; case OPC2_32_BIT_AND_NOR_T: - if (TCG_TARGET_HAS_andc_i32) { + if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) { gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl); } else { @@ -4113,7 +4109,7 @@ static void decode_bit_orand(DisasContext *ctx) pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl); break; case OPC2_32_BIT_OR_NOR_T: - if (TCG_TARGET_HAS_orc_i32) { + if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) { gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl); } else { @@ -8460,9 +8456,8 @@ static const TranslatorOps tricore_tr_ops = { .tb_stop = tricore_tr_tb_stop, }; - -void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void tricore_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext ctx; translator_loop(cs, tb, max_insns, pc, host_pc, diff --git a/target/xtensa/core-dc232b/gdb-config.c.inc b/target/xtensa/core-dc232b/gdb-config.c.inc index d87168628be..8c88caef59d 100644 --- a/target/xtensa/core-dc232b/gdb-config.c.inc +++ b/target/xtensa/core-dc232b/gdb-config.c.inc @@ -15,9 +15,8 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, - Boston, MA 02110-1301, USA. */ + along with this program; if not, see + . */ XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc, 0, 0, 0, 0, 0, 0) diff --git a/target/xtensa/core-dc232b/xtensa-modules.c.inc b/target/xtensa/core-dc232b/xtensa-modules.c.inc index 164df3b1a47..bb9ebd24b83 100644 --- a/target/xtensa/core-dc232b/xtensa-modules.c.inc +++ b/target/xtensa/core-dc232b/xtensa-modules.c.inc @@ -14,9 +14,8 @@ General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with this program; if not, see + . */ #include "qemu/osdep.h" #include "xtensa-isa.h" diff --git a/target/xtensa/core-fsf/xtensa-modules.c.inc b/target/xtensa/core-fsf/xtensa-modules.c.inc index c32683ff77c..531f5e2b7e6 100644 --- a/target/xtensa/core-fsf/xtensa-modules.c.inc +++ b/target/xtensa/core-fsf/xtensa-modules.c.inc @@ -14,9 +14,8 @@ General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA - 02110-1301, USA. */ + along with this program; if not, see + . */ #include "qemu/osdep.h" #include "xtensa-isa.h" diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h index 0000725f2f9..7a0c22c9005 100644 --- a/target/xtensa/cpu-param.h +++ b/target/xtensa/cpu-param.h @@ -8,7 +8,6 @@ #ifndef XTENSA_CPU_PARAM_H #define XTENSA_CPU_PARAM_H -#define TARGET_LONG_BITS 32 #define TARGET_PAGE_BITS 12 #define TARGET_PHYS_ADDR_SPACE_BITS 32 #ifdef CONFIG_USER_ONLY @@ -17,7 +16,6 @@ #define TARGET_VIRT_ADDR_SPACE_BITS 32 #endif -/* Xtensa processors have a weak memory model */ -#define TCG_GUEST_DEFAULT_MO (0) +#define TARGET_INSN_START_EXTRA_WORDS 0 #endif diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index a08c7a0b1f2..ea9b6df3aa2 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -35,8 +35,9 @@ #include "qemu/module.h" #include "migration/vmstate.h" #include "hw/qdev-clock.h" +#include "accel/tcg/cpu-ops.h" #ifndef CONFIG_USER_ONLY -#include "exec/memory.h" +#include "system/memory.h" #endif @@ -54,6 +55,80 @@ static vaddr xtensa_cpu_get_pc(CPUState *cs) return cpu->env.pc; } +static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs) +{ + CPUXtensaState *env = cpu_env(cs); + uint32_t flags = 0; + target_ulong cs_base = 0; + + flags |= xtensa_get_ring(env); + if (env->sregs[PS] & PS_EXCM) { + flags |= XTENSA_TBFLAG_EXCM; + } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) { + target_ulong lend_dist = + env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS)); + + /* + * 0 in the csbase_lend field means that there may not be a loopback + * for any instruction that starts inside this page. Any other value + * means that an instruction that ends at this offset from the page + * start may loop back and will need loopback code to be generated. + * + * lend_dist is 0 when LEND points to the start of the page, but + * no instruction that starts inside this page may end at offset 0, + * so it's still correct. + * + * When an instruction ends at a page boundary it may only start in + * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE + * for the TB that contains this instruction. + */ + if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) { + target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG]; + + cs_base = lend_dist; + if (lbeg_off < 256) { + cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT; + } + } + } + if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) && + (env->sregs[LITBASE] & 1)) { + flags |= XTENSA_TBFLAG_LITBASE; + } + if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) { + if (xtensa_get_cintlevel(env) < env->config->debug_level) { + flags |= XTENSA_TBFLAG_DEBUG; + } + if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) { + flags |= XTENSA_TBFLAG_ICOUNT; + } + } + if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) { + flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT; + } + if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) && + (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) { + uint32_t windowstart = xtensa_replicate_windowstart(env) >> + (env->sregs[WINDOW_BASE] + 1); + uint32_t w = ctz32(windowstart | 0x8); + + flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE; + flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT, + PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT; + } else { + flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT; + } + if (env->yield_needed) { + flags |= XTENSA_TBFLAG_YIELD; + } + + return (TCGTBCPUState){ + .pc = env->pc, + .flags = flags, + .cs_base = cs_base, + }; +} + static void xtensa_restore_state_to_opc(CPUState *cs, const TranslationBlock *tb, const uint64_t *data) @@ -63,16 +138,14 @@ static void xtensa_restore_state_to_opc(CPUState *cs, cpu->env.pc = data[0]; } +#ifndef CONFIG_USER_ONLY static bool xtensa_cpu_has_work(CPUState *cs) { -#ifndef CONFIG_USER_ONLY - XtensaCPU *cpu = XTENSA_CPU(cs); + CPUXtensaState *env = cpu_env(cs); - return !cpu->env.runstall && cpu->env.pending_irq_level; -#else - return true; -#endif + return !env->runstall && env->pending_irq_level; } +#endif /* !CONFIG_USER_ONLY */ static int xtensa_cpu_mmu_index(CPUState *cs, bool ifetch) { @@ -133,8 +206,12 @@ static void xtensa_cpu_reset_hold(Object *obj, ResetType type) reset_mmu(env); cs->halted = env->runstall; #endif + /* For inf * 0 + NaN, return the input NaN */ + set_float_infzeronan_rule(float_infzeronan_dnan_never, &env->fp_status); set_no_signaling_nans(!dfpu, &env->fp_status); - set_use_first_nan(!dfpu, &env->fp_status); + /* Default NaN value: sign bit clear, set frac msb */ + set_float_default_nan_pattern(0b01000000, &env->fp_status); + xtensa_use_first_nan(env, !dfpu); } static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) @@ -155,6 +232,8 @@ static void xtensa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) info->private_data = cpu->env.config->isa; info->print_insn = print_insn_xtensa; + info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_BIG + : BFD_ENDIAN_LITTLE; } static void xtensa_cpu_realizefn(DeviceState *dev, Error **errp) @@ -204,7 +283,7 @@ XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk) { DeviceState *cpu; - cpu = DEVICE(object_new(cpu_type)); + cpu = qdev_new(cpu_type); qdev_connect_clock_in(cpu, "clk-in", cpu_refclk); qdev_realize(cpu, NULL, &error_abort); @@ -220,21 +299,29 @@ static const VMStateDescription vmstate_xtensa_cpu = { #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps xtensa_sysemu_ops = { + .has_work = xtensa_cpu_has_work, .get_phys_page_debug = xtensa_cpu_get_phys_page_debug, }; #endif -#include "hw/core/tcg-cpu-ops.h" - static const TCGCPUOps xtensa_tcg_ops = { + /* Xtensa processors have a weak memory model */ + .guest_default_memory_order = 0, + .mttcg_supported = true, + .initialize = xtensa_translate_init, + .translate_code = xtensa_translate_code, .debug_excp_handler = xtensa_breakpoint_handler, + .get_tb_cpu_state = xtensa_get_tb_cpu_state, .restore_state_to_opc = xtensa_restore_state_to_opc, + .mmu_index = xtensa_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = xtensa_cpu_tlb_fill, + .pointer_wrap = cpu_pointer_wrap_uint32, .cpu_exec_interrupt = xtensa_cpu_exec_interrupt, .cpu_exec_halt = xtensa_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = xtensa_cpu_do_interrupt, .do_transaction_failed = xtensa_cpu_do_transaction_failed, .do_unaligned_access = xtensa_cpu_do_unaligned_access, @@ -242,7 +329,7 @@ static const TCGCPUOps xtensa_tcg_ops = { #endif /* !CONFIG_USER_ONLY */ }; -static void xtensa_cpu_class_init(ObjectClass *oc, void *data) +static void xtensa_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -256,8 +343,6 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data) &xcc->parent_phases); cc->class_by_name = xtensa_cpu_class_by_name; - cc->has_work = xtensa_cpu_has_work; - cc->mmu_index = xtensa_cpu_mmu_index; cc->dump_state = xtensa_cpu_dump_state; cc->set_pc = xtensa_cpu_set_pc; cc->get_pc = xtensa_cpu_get_pc; diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 9f2341d8563..74122ebe15c 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -30,7 +30,9 @@ #include "cpu-qom.h" #include "qemu/cpu-float.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "hw/clock.h" #include "xtensa-isa.h" @@ -490,7 +492,7 @@ typedef struct XtensaConfig { } XtensaConfig; typedef struct XtensaConfigList { - const XtensaConfig *config; + XtensaConfig *config; struct XtensaConfigList *next; } XtensaConfigList; @@ -617,6 +619,8 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, void xtensa_collect_sr_names(const XtensaConfig *config); void xtensa_translate_init(void); +void xtensa_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void **xtensa_get_regfile_by_name(const char *name, int entries, int bits); void xtensa_breakpoint_handler(CPUState *cs); void xtensa_register_core(XtensaConfigList *node); @@ -729,77 +733,13 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env) #define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000 #define XTENSA_CSBASE_LBEG_OFF_SHIFT 16 -#include "exec/cpu-all.h" - -static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *flags) -{ - *pc = env->pc; - *cs_base = 0; - *flags = 0; - *flags |= xtensa_get_ring(env); - if (env->sregs[PS] & PS_EXCM) { - *flags |= XTENSA_TBFLAG_EXCM; - } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) { - target_ulong lend_dist = - env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS)); - - /* - * 0 in the csbase_lend field means that there may not be a loopback - * for any instruction that starts inside this page. Any other value - * means that an instruction that ends at this offset from the page - * start may loop back and will need loopback code to be generated. - * - * lend_dist is 0 when LEND points to the start of the page, but - * no instruction that starts inside this page may end at offset 0, - * so it's still correct. - * - * When an instruction ends at a page boundary it may only start in - * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE - * for the TB that contains this instruction. - */ - if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) { - target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG]; - - *cs_base = lend_dist; - if (lbeg_off < 256) { - *cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT; - } - } - } - if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) && - (env->sregs[LITBASE] & 1)) { - *flags |= XTENSA_TBFLAG_LITBASE; - } - if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) { - if (xtensa_get_cintlevel(env) < env->config->debug_level) { - *flags |= XTENSA_TBFLAG_DEBUG; - } - if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) { - *flags |= XTENSA_TBFLAG_ICOUNT; - } - } - if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) { - *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT; - } - if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) && - (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) { - uint32_t windowstart = xtensa_replicate_windowstart(env) >> - (env->sregs[WINDOW_BASE] + 1); - uint32_t w = ctz32(windowstart | 0x8); - - *flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE; - *flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT, - PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT; - } else { - *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT; - } - if (env->yield_needed) { - *flags |= XTENSA_TBFLAG_YIELD; - } -} - XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk); +/* + * Set the NaN propagation rule for future FPU operations: + * use_first is true to pick the first NaN as the result if both + * inputs are NaNs, false to pick the second. + */ +void xtensa_use_first_nan(CPUXtensaState *env, bool use_first); #endif diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c index 5546c82ecda..3b91f7c38ac 100644 --- a/target/xtensa/dbg_helper.c +++ b/target/xtensa/dbg_helper.c @@ -30,8 +30,8 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" -#include "exec/address-spaces.h" +#include "exec/watchpoint.h" +#include "system/address-spaces.h" void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) { diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index ca629f071d1..b611c9bf97c 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -32,7 +32,6 @@ #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/atomic.h" -#include "exec/exec-all.h" void HELPER(exception)(CPUXtensaState *env, uint32_t excp) { diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c index 381e83ded83..5358060c50a 100644 --- a/target/xtensa/fpu_helper.c +++ b/target/xtensa/fpu_helper.c @@ -30,7 +30,6 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" #include "fpu/softfloat.h" enum { @@ -57,6 +56,14 @@ static const struct { { XTENSA_FP_V, float_flag_invalid, }, }; +void xtensa_use_first_nan(CPUXtensaState *env, bool use_first) +{ + set_float_2nan_prop_rule(use_first ? float_2nan_prop_ab : float_2nan_prop_ba, + &env->fp_status); + set_float_3nan_prop_rule(use_first ? float_3nan_prop_abc : float_3nan_prop_cba, + &env->fp_status); +} + void HELPER(wur_fpu2k_fcr)(CPUXtensaState *env, uint32_t v) { static const int rounding_mode[] = { @@ -171,87 +178,87 @@ float32 HELPER(fpu2k_msub_s)(CPUXtensaState *env, float64 HELPER(add_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_add(a, b, &env->fp_status); } float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_add(a, b, &env->fp_status); } float64 HELPER(sub_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_sub(a, b, &env->fp_status); } float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_sub(a, b, &env->fp_status); } float64 HELPER(mul_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_mul(a, b, &env->fp_status); } float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_mul(a, b, &env->fp_status); } float64 HELPER(madd_d)(CPUXtensaState *env, float64 a, float64 b, float64 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float64_muladd(b, c, a, 0, &env->fp_status); } float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_muladd(b, c, a, 0, &env->fp_status); } float64 HELPER(msub_d)(CPUXtensaState *env, float64 a, float64 b, float64 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float64_muladd(b, c, a, float_muladd_negate_product, &env->fp_status); } float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_muladd(b, c, a, float_muladd_negate_product, &env->fp_status); } float64 HELPER(mkdadj_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_div(b, a, &env->fp_status); } float32 HELPER(mkdadj_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_div(b, a, &env->fp_status); } float64 HELPER(mksadj_d)(CPUXtensaState *env, float64 v) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_sqrt(v, &env->fp_status); } float32 HELPER(mksadj_s)(CPUXtensaState *env, float32 v) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_sqrt(v, &env->fp_status); } diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index ca214b948a9..2d93b45036d 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -28,7 +28,8 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "cpu.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "exec/target_page.h" #include "gdbstub/helpers.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" @@ -169,13 +170,12 @@ static void xtensa_finalize_config(XtensaConfig *config) } } -static void xtensa_core_class_init(ObjectClass *oc, void *data) +static void xtensa_core_class_init(ObjectClass *oc, const void *data) { CPUClass *cc = CPU_CLASS(oc); XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc); - XtensaConfig *config = data; + const XtensaConfig *config = data; - xtensa_finalize_config(config); xcc->config = config; /* @@ -192,13 +192,15 @@ void xtensa_register_core(XtensaConfigList *node) TypeInfo type = { .parent = TYPE_XTENSA_CPU, .class_init = xtensa_core_class_init, - .class_data = (void *)node->config, + .class_data = node->config, }; + xtensa_finalize_config(node->config); + node->next = xtensa_cores; xtensa_cores = node; type.name = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node->config->name); - type_register(&type); + type_register_static(&type); g_free((gpointer)type.name); } diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c index 29b84d5dbf6..71330fc84b9 100644 --- a/target/xtensa/mmu_helper.c +++ b/target/xtensa/mmu_helper.c @@ -32,8 +32,12 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" +#include "exec/cputlb.h" +#include "accel/tcg/cpu-mmu-index.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "system/memory.h" #define XTENSA_MPU_SEGMENT_MASK 0x0000001f #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00 diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c index 028d4e0a1c7..fc47ebaaf50 100644 --- a/target/xtensa/op_helper.c +++ b/target/xtensa/op_helper.c @@ -30,7 +30,7 @@ #include "exec/helper-proto.h" #include "exec/page-protection.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" +#include "system/memory.h" #include "qemu/atomic.h" #include "qemu/timer.h" diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 75b7bfda4c8..34ae2f4e162 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -31,17 +31,18 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "tcg/tcg-op.h" #include "qemu/log.h" #include "qemu/qemu-print.h" -#include "semihosting/semihost.h" #include "exec/translator.h" - +#include "exec/translation-block.h" +#include "exec/target_page.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" - #include "exec/log.h" +#ifndef CONFIG_USER_ONLY +#include "semihosting/semihost.h" +#endif #define HELPER_H "helper.h" #include "exec/helper-info.c.inc" @@ -521,7 +522,7 @@ static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop, mop |= MO_ALIGN; } if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { - tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop)); + tcg_gen_andi_i32(addr, addr, ~0 << memop_alignment_bits(mop)); } return mop; } @@ -1227,8 +1228,8 @@ static const TranslatorOps xtensa_translator_ops = { .tb_stop = xtensa_tr_tb_stop, }; -void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns, - vaddr pc, void *host_pc) +void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc) { DisasContext dc = {}; translator_loop(cpu, tb, max_insns, pc, host_pc, @@ -1393,11 +1394,11 @@ static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); -#if TARGET_BIG_ENDIAN - tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm); -#else - tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); -#endif + if (TARGET_BIG_ENDIAN) { + tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm); + } else { + tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); + } gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); } @@ -2240,17 +2241,15 @@ static uint32_t test_exceptions_simcall(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - bool is_semi = semihosting_enabled(dc->cring != 0); -#ifdef CONFIG_USER_ONLY - bool ill = true; -#else - /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */ - bool ill = dc->config->hw_version <= 250002 && !is_semi; -#endif - if (ill || !is_semi) { - qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n"); +#ifndef CONFIG_USER_ONLY + if (semihosting_enabled(dc->cring != 0)) { + return 0; } - return ill ? XTENSA_OP_ILL : 0; +#endif + qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n"); + + /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */ + return dc->config->hw_version <= 250002 ? XTENSA_OP_ILL : 0; } static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c index ec9ff44db05..4b25f8f4de7 100644 --- a/target/xtensa/win_helper.c +++ b/target/xtensa/win_helper.c @@ -30,7 +30,6 @@ #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" static void copy_window_from_phys(CPUXtensaState *env, uint32_t window, uint32_t phys, uint32_t n) diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c index fa21b7e11fc..636f421da2b 100644 --- a/target/xtensa/xtensa-semi.c +++ b/target/xtensa/xtensa-semi.c @@ -29,7 +29,9 @@ #include "cpu.h" #include "chardev/char-fe.h" #include "exec/helper-proto.h" +#include "exec/target_page.h" #include "semihosting/semihost.h" +#include "semihosting/uaccess.h" #include "qapi/error.h" #include "qemu/log.h" @@ -323,15 +325,12 @@ void HELPER(simcall)(CPUXtensaState *env) uint32_t fd = regs[3]; uint32_t rq = regs[4]; uint32_t target_tv = regs[5]; - uint32_t target_tvv[2]; struct timeval tv = {0}; if (target_tv) { - cpu_memory_rw_debug(cs, target_tv, - (uint8_t *)target_tvv, sizeof(target_tvv), 0); - tv.tv_sec = (int32_t)tswap32(target_tvv[0]); - tv.tv_usec = (int32_t)tswap32(target_tvv[1]); + get_user_u32(tv.tv_sec, target_tv); + get_user_u32(tv.tv_sec, target_tv + 4); } if (fd < 3 && sim_console) { if ((fd == 1 || fd == 2) && rq == SELECT_ONE_WRITE) { @@ -387,11 +386,8 @@ void HELPER(simcall)(CPUXtensaState *env) const char *str = semihosting_get_arg(i); int str_size = strlen(str) + 1; - argptr = tswap32(regs[3] + str_offset); - - cpu_memory_rw_debug(cs, - regs[3] + i * sizeof(uint32_t), - (uint8_t *)&argptr, sizeof(argptr), 1); + put_user_u32(regs[3] + str_offset, + regs[3] + i * sizeof(uint32_t)); cpu_memory_rw_debug(cs, regs[3] + str_offset, (uint8_t *)str, str_size, 1); diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h index 44fcc1206e0..d0622e65fb0 100644 --- a/tcg/aarch64/tcg-target-con-set.h +++ b/tcg/aarch64/tcg-target-con-set.h @@ -11,20 +11,22 @@ */ C_O0_I1(r) C_O0_I2(r, rC) -C_O0_I2(rZ, r) +C_O0_I2(rz, r) C_O0_I2(w, r) -C_O0_I3(rZ, rZ, r) +C_O0_I3(rz, rz, r) C_O1_I1(r, r) C_O1_I1(w, r) C_O1_I1(w, w) C_O1_I1(w, wr) -C_O1_I2(r, 0, rZ) C_O1_I2(r, r, r) C_O1_I2(r, r, rA) C_O1_I2(r, r, rAL) C_O1_I2(r, r, rC) C_O1_I2(r, r, ri) C_O1_I2(r, r, rL) +C_O1_I2(r, rZ, rA) +C_O1_I2(r, rz, rMZ) +C_O1_I2(r, rz, rz) C_O1_I2(r, rZ, rZ) C_O1_I2(w, 0, w) C_O1_I2(w, w, w) @@ -32,6 +34,5 @@ C_O1_I2(w, w, wN) C_O1_I2(w, w, wO) C_O1_I2(w, w, wZ) C_O1_I3(w, w, w, w) -C_O1_I4(r, r, rC, rZ, rZ) +C_O1_I4(r, r, rC, rz, rz) C_O2_I1(r, r, r) -C_O2_I4(r, r, rZ, rZ, rA, rMZ) diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h new file mode 100644 index 00000000000..69e83efb69d --- /dev/null +++ b/tcg/aarch64/tcg-target-has.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Define target-specific opcode support + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#include "host/cpuinfo.h" + +#define have_lse (cpuinfo & CPUINFO_LSE) +#define have_lse2 (cpuinfo & CPUINFO_LSE2) + +/* optional instructions */ +#define TCG_TARGET_HAS_extr_i64_i32 0 + +/* + * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load, + * which requires writable pages. We must defer to the helper for user-only, + * but in system mode all ram is writable for the host. + */ +#ifdef CONFIG_USER_ONLY +#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2 +#else +#define TCG_TARGET_HAS_qemu_ldst_i128 1 +#endif + +#define TCG_TARGET_HAS_tst 1 + +#define TCG_TARGET_HAS_v64 1 +#define TCG_TARGET_HAS_v128 1 +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_tst_vec 1 + +#define TCG_TARGET_extract_valid(type, ofs, len) 1 +#define TCG_TARGET_sextract_valid(type, ofs, len) 1 +#define TCG_TARGET_deposit_valid(type, ofs, len) 1 + +#endif diff --git a/tcg/aarch64/tcg-target-mo.h b/tcg/aarch64/tcg-target-mo.h new file mode 100644 index 00000000000..e8e89230142 --- /dev/null +++ b/tcg/aarch64/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Define target-specific memory model + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/aarch64/tcg-target-opc.h.inc b/tcg/aarch64/tcg-target-opc.h.inc new file mode 100644 index 00000000000..5382315c414 --- /dev/null +++ b/tcg/aarch64/tcg-target-opc.h.inc @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2019 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(aa64_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(aa64_sli_vec, 1, 2, 1, TCG_OPF_VECTOR) diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index ffa8a3e5193..3b088b7bd97 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -10,10 +10,21 @@ * See the COPYING file in the top-level directory for details. */ -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" #include "qemu/bitops.h" +/* Used for function call generation. */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#ifdef CONFIG_DARWIN +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +#else +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN +#endif +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL + /* We're going to re-use TCGType in setting of the SF bit, which controls the size of the operation performed. If we know the values match, it makes things much cleaner. */ @@ -497,7 +508,9 @@ typedef enum { /* Add/subtract with carry instructions. */ I3503_ADC = 0x1a000000, + I3503_ADCS = 0x3a000000, I3503_SBC = 0x5a000000, + I3503_SBCS = 0x7a000000, /* Conditional select instructions. */ I3506_CSEL = 0x1a800000, @@ -1336,70 +1349,37 @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); } -static inline void tcg_out_shl(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int bits = ext ? 64 : 32; - int max = bits - 1; - tcg_out_ubfm(s, ext, rd, rn, (bits - m) & max, (max - m) & max); -} - -static inline void tcg_out_shr(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_ubfm(s, ext, rd, rn, m & max, max); -} - -static inline void tcg_out_sar(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_sbfm(s, ext, rd, rn, m & max, max); -} - -static inline void tcg_out_rotr(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) -{ - int max = ext ? 63 : 31; - tcg_out_extr(s, ext, rd, rn, rn, m & max); -} - -static inline void tcg_out_rotl(TCGContext *s, TCGType ext, - TCGReg rd, TCGReg rn, unsigned int m) +static void tgen_cmp(TCGContext *s, TCGType ext, TCGCond cond, + TCGReg a, TCGReg b) { - int max = ext ? 63 : 31; - tcg_out_extr(s, ext, rd, rn, rn, -m & max); + if (is_tst_cond(cond)) { + tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b); + } else { + tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); + } } -static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, - TCGReg rn, unsigned lsb, unsigned width) +static void tgen_cmpi(TCGContext *s, TCGType ext, TCGCond cond, + TCGReg a, tcg_target_long b) { - unsigned size = ext ? 64 : 32; - unsigned a = (size - lsb) & (size - 1); - unsigned b = width - 1; - tcg_out_bfm(s, ext, rd, rn, a, b); + if (is_tst_cond(cond)) { + tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b); + } else if (b >= 0) { + tcg_debug_assert(is_aimm(b)); + tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); + } else { + tcg_debug_assert(is_aimm(-b)); + tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); + } } static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a, tcg_target_long b, bool const_b) { - if (is_tst_cond(cond)) { - if (!const_b) { - tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b); - } else { - tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b); - } + if (const_b) { + tgen_cmpi(s, ext, cond, a, b); } else { - if (!const_b) { - tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); - } else if (b >= 0) { - tcg_debug_assert(is_aimm(b)); - tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); - } else { - tcg_debug_assert(is_aimm(-b)); - tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); - } + tgen_cmp(s, ext, cond, a, b); } } @@ -1427,7 +1407,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, tcg_out_call_int(s, target); } -static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) +static void tcg_out_br(TCGContext *s, TCGLabel *l) { if (!l->has_value) { tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0); @@ -1437,8 +1417,16 @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) } } -static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, - TCGArg b, bool b_const, TCGLabel *l) +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, + TCGReg a, TCGReg b, TCGLabel *l) +{ + tgen_cmp(s, type, c, a, b); + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); + tcg_out_insn(s, 3202, B_C, c, 0); +} + +static void tgen_brcondi(TCGContext *s, TCGType ext, TCGCond c, + TCGReg a, tcg_target_long b, TCGLabel *l) { int tbit = -1; bool need_cmp = true; @@ -1447,14 +1435,14 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, case TCG_COND_EQ: case TCG_COND_NE: /* cmp xN,0; b.ne L -> cbnz xN,L */ - if (b_const && b == 0) { + if (b == 0) { need_cmp = false; } break; case TCG_COND_LT: case TCG_COND_GE: /* cmp xN,0; b.mi L -> tbnz xN,63,L */ - if (b_const && b == 0) { + if (b == 0) { c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ); tbit = ext ? 63 : 31; need_cmp = false; @@ -1463,14 +1451,14 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, case TCG_COND_TSTEQ: case TCG_COND_TSTNE: /* tst xN,0xffffffff; b.ne L -> cbnz wN,L */ - if (b_const && b == UINT32_MAX) { + if (b == UINT32_MAX) { c = tcg_tst_eqne_cond(c); ext = TCG_TYPE_I32; need_cmp = false; break; } /* tst xN,1< tbnz xN,B,L */ - if (b_const && is_power_of_2(b)) { + if (is_power_of_2(b)) { tbit = ctz64(b); need_cmp = false; } @@ -1480,7 +1468,7 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, } if (need_cmp) { - tcg_out_cmp(s, ext, c, a, b, b_const); + tgen_cmpi(s, ext, c, a, b); tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0); tcg_out_insn(s, 3202, B_C, c, 0); return; @@ -1513,6 +1501,12 @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a, } } +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rC), + .out_rr = tgen_brcond, + .out_ri = tgen_brcondi, +}; + static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits, TCGReg rd, TCGReg rn) { @@ -1581,67 +1575,7 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) tcg_out_mov(s, TCG_TYPE_I32, rd, rn); } -static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, - TCGReg rn, int64_t aimm) -{ - if (aimm >= 0) { - tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); - } else { - tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); - } -} - -static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl, - TCGReg rh, TCGReg al, TCGReg ah, - tcg_target_long bl, tcg_target_long bh, - bool const_bl, bool const_bh, bool sub) -{ - TCGReg orig_rl = rl; - AArch64Insn insn; - - if (rl == ah || (!const_bh && rl == bh)) { - rl = TCG_REG_TMP0; - } - - if (const_bl) { - if (bl < 0) { - bl = -bl; - insn = sub ? I3401_ADDSI : I3401_SUBSI; - } else { - insn = sub ? I3401_SUBSI : I3401_ADDSI; - } - - if (unlikely(al == TCG_REG_XZR)) { - /* ??? We want to allow al to be zero for the benefit of - negation via subtraction. However, that leaves open the - possibility of adding 0+const in the low part, and the - immediate add instructions encode XSP not XZR. Don't try - anything more elaborate here than loading another zero. */ - al = TCG_REG_TMP0; - tcg_out_movi(s, ext, al, 0); - } - tcg_out_insn_3401(s, insn, ext, rl, al, bl); - } else { - tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); - } - - insn = I3503_ADC; - if (const_bh) { - /* Note that the only two constants we support are 0 and -1, and - that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ - if ((bh != 0) ^ sub) { - insn = I3503_SBC; - } - bh = TCG_REG_XZR; - } else if (sub) { - insn = I3503_SBC; - } - tcg_out_insn_3503(s, insn, ext, rh, ah, bh); - - tcg_out_mov(s, ext, orig_rl, rl); -} - -static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, unsigned a0) { static const uint32_t sync[] = { [0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST, @@ -1653,37 +1587,6 @@ static inline void tcg_out_mb(TCGContext *s, TCGArg a0) tcg_out32(s, sync[a0 & TCG_MO_ALL]); } -static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, - TCGReg a0, TCGArg b, bool const_b, bool is_ctz) -{ - TCGReg a1 = a0; - if (is_ctz) { - a1 = TCG_REG_TMP0; - tcg_out_insn(s, 3507, RBIT, ext, a1, a0); - } - if (const_b && b == (ext ? 64 : 32)) { - tcg_out_insn(s, 3507, CLZ, ext, d, a1); - } else { - AArch64Insn sel = I3506_CSEL; - - tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1); - tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1); - - if (const_b) { - if (b == -1) { - b = TCG_REG_XZR; - sel = I3506_CSINV; - } else if (b == 0) { - b = TCG_REG_XZR; - } else { - tcg_out_movi(s, ext, d, b); - b = d; - } - } - tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP0, b, TCG_COND_NE); - } -} - typedef struct { TCGReg base; TCGReg index; @@ -1758,16 +1661,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, unsigned s_mask = (1u << s_bits) - 1; unsigned mem_index = get_mmuidx(oi); TCGReg addr_adj; - TCGType mask_type; uint64_t compare_mask; ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; - - mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 - ? TCG_TYPE_I64 : TCG_TYPE_I32); + ldst->addr_reg = addr_reg; /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); @@ -1776,9 +1675,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tlb_mask_table_ofs(s, mem_index), 1, 0); /* Extract the TLB index from the address into X0. */ - tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64, + tcg_out_insn(s, 3502S, AND_LSR, TCG_TYPE_I64, TCG_REG_TMP0, TCG_REG_TMP0, addr_reg, - s->page_bits - CPU_TLB_ENTRY_BITS); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); /* Add the tlb_table pointer, forming the CPUTLBEntry address. */ tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0); @@ -1804,7 +1703,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_insn(s, 3401, ADDI, addr_type, addr_adj, addr_reg, s_mask - a_mask); } - compare_mask = (uint64_t)s->page_mask | a_mask; + compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; /* Store the page mask part of the address into TMP2. */ tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2, @@ -1826,7 +1725,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; /* tst addr, #mask */ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask); @@ -1903,8 +1802,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType data_type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; @@ -1919,8 +1818,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_st(TCGContext *s, TCGType data_type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; @@ -1935,6 +1839,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, } } +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out = tgen_qemu_st, +}; + static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg addr_reg, MemOpIdx oi, bool is_ld) { @@ -2037,6 +1946,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, } } +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_O2_I1(r, r, r), + .out = tgen_qemu_ld2, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_O0_I3(rz, rz, r), + .out = tgen_qemu_st2, +}; + static const tcg_insn_unit *tb_ret_addr; static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) @@ -2083,6 +2014,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which) tcg_out_bti(s, BTI_J); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_insn(s, 3207, BR, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -2104,413 +2040,859 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, flush_idcache_range(jmp_rx, jmp_rw, 4); } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - /* 99% of the time, we can signal the use of extension registers - by looking to see if the opcode handles 64-bit data. */ - TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; + tcg_out_insn(s, 3502, ADD, type, a0, a1, a2); +} - /* Hoist the loads of the most common arguments. */ - TCGArg a0 = args[0]; - TCGArg a1 = args[1]; - TCGArg a2 = args[2]; - int c2 = const_args[2]; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 >= 0) { + tcg_out_insn(s, 3401, ADDI, type, a0, a1, a2); + } else { + tcg_out_insn(s, 3401, SUBI, type, a0, a1, -a2); + } +} - /* Some operands are defined with "rZ" constraint, a register or - the zero register. These need not actually test args[I] == 0. */ -#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rA), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out_insn(s, 3207, BR, a0); - break; +static void tgen_addco(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3502, ADDS, type, a0, a1, a2); +} - case INDEX_op_br: - tcg_out_goto_label(s, arg_label(a0)); - break; +static void tgen_addco_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 >= 0) { + tcg_out_insn(s, 3401, ADDSI, type, a0, a1, a2); + } else { + tcg_out_insn(s, 3401, SUBSI, type, a0, a1, -a2); + } +} - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0); - break; - case INDEX_op_ld8s_i32: - tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0); - break; - case INDEX_op_ld8s_i64: - tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1); - break; - case INDEX_op_ld16s_i32: - tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1); - break; - case INDEX_op_ld16s_i64: - tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2); - break; - case INDEX_op_ld32s_i64: - tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2); - break; - case INDEX_op_ld_i64: - tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3); - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_O1_I2(r, r, rA), + .out_rrr = tgen_addco, + .out_rri = tgen_addco_imm, +}; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1); +static void tgen_addci_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3503, ADC, type, a0, a1, a2); +} + +static void tgen_addci_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* + * Note that the only two constants we support are 0 and -1, and + * that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. + */ + if (a2) { + tcg_out_insn(s, 3503, SBC, type, a0, a1, TCG_REG_XZR); + } else { + tcg_out_insn(s, 3503, ADC, type, a0, a1, TCG_REG_XZR); + } +} + +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_O1_I2(r, rz, rMZ), + .out_rrr = tgen_addci_rrr, + .out_rri = tgen_addci_rri, +}; + +static void tgen_addcio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3503, ADCS, type, a0, a1, a2); +} + +static void tgen_addcio_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* Use SBCS w/0 for ADCS w/-1 -- see above. */ + if (a2) { + tcg_out_insn(s, 3503, SBCS, type, a0, a1, TCG_REG_XZR); + } else { + tcg_out_insn(s, 3503, ADCS, type, a0, a1, TCG_REG_XZR); + } +} + +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_O1_I2(r, rz, rMZ), + .out_rrr = tgen_addcio, + .out_rri = tgen_addcio_imm, +}; + +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out_insn(s, 3502, SUBS, TCG_TYPE_I32, + TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR); +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, AND, type, a0, a1, a2); +} + +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_logicali(s, I3404_ANDI, type, a0, a1, a2); +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rL), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; + +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, BIC, type, a0, a1, a2); +} + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; + +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true); + tcg_out_insn(s, 3507, CLZ, type, TCG_REG_TMP0, a1); + tcg_out_insn(s, 3506, CSEL, type, a0, TCG_REG_TMP0, a2, TCG_COND_NE); +} + +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 == (type == TCG_TYPE_I32 ? 32 : 64)) { + tcg_out_insn(s, 3507, CLZ, type, a0, a1); + return; + } + + tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true); + tcg_out_insn(s, 3507, CLZ, type, a0, a1); + + switch (a2) { + case -1: + tcg_out_insn(s, 3506, CSINV, type, a0, a0, TCG_REG_XZR, TCG_COND_NE); break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2); + case 0: + tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_XZR, TCG_COND_NE); break; - case INDEX_op_st_i64: - tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3); + default: + tcg_out_movi(s, type, TCG_REG_TMP0, a2); + tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_TMP0, TCG_COND_NE); break; + } +} - case INDEX_op_add_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_add_i64: - if (c2) { - tcg_out_addsubi(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_O1_I2(r, r, rAL), + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; - case INDEX_op_sub_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_sub_i64: - if (c2) { - tcg_out_addsubi(s, ext, a0, a1, -a2); - } else { - tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); - } - break; +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_neg_i64: - case INDEX_op_neg_i32: - tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); - break; +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1); + tgen_clz(s, type, a0, TCG_REG_TMP0, a2); +} - case INDEX_op_and_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_and_i64: - if (c2) { - tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); - } - break; +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1); + tgen_clzi(s, type, a0, TCG_REG_TMP0, a2); +} - case INDEX_op_andc_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_andc_i64: - if (c2) { - tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_O1_I2(r, r, rAL), + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; - case INDEX_op_or_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_or_i64: - if (c2) { - tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); - } - break; +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, SDIV, type, a0, a1, a2); +} - case INDEX_op_orc_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_orc_i64: - if (c2) { - tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; - case INDEX_op_xor_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_xor_i64: - if (c2) { - tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); - } - break; +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_eqv_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_eqv_i64: - if (c2) { - tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); - } else { - tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); - } - break; +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, UDIV, type, a0, a1, a2); +} - case INDEX_op_not_i64: - case INDEX_op_not_i32: - tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); - break; +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; - case INDEX_op_mul_i64: - case INDEX_op_mul_i32: - tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); - break; +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_div_i64: - case INDEX_op_div_i32: - tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); - break; - case INDEX_op_divu_i64: - case INDEX_op_divu_i32: - tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); - break; +static void tgen_eqv(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, EON, type, a0, a1, a2); +} - case INDEX_op_rem_i64: - case INDEX_op_rem_i32: - tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1); - break; - case INDEX_op_remu_i64: - case INDEX_op_remu_i32: - tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2); - tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1); - break; +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_eqv, +}; - case INDEX_op_shl_i64: - case INDEX_op_shl_i32: - if (c2) { - tcg_out_shl(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); - } - break; +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_ubfm(s, TCG_TYPE_I64, a0, a1, 32, 63); +} - case INDEX_op_shr_i64: - case INDEX_op_shr_i32: - if (c2) { - tcg_out_shr(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); - } - break; +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; - case INDEX_op_sar_i64: - case INDEX_op_sar_i32: - if (c2) { - tcg_out_sar(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); - } - break; +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3509, MADD, type, a0, a1, a2, TCG_REG_XZR); +} - case INDEX_op_rotr_i64: - case INDEX_op_rotr_i32: - if (c2) { - tcg_out_rotr(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; - case INDEX_op_rotl_i64: - case INDEX_op_rotl_i32: - if (c2) { - tcg_out_rotl(s, ext, a0, a1, a2); - } else { - tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP0, TCG_REG_XZR, a2); - tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP0); - } - break; +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_clz_i64: - case INDEX_op_clz_i32: - tcg_out_cltz(s, ext, a0, a1, a2, c2, false); - break; - case INDEX_op_ctz_i64: - case INDEX_op_ctz_i32: - tcg_out_cltz(s, ext, a0, a1, a2, c2, true); - break; +static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented; +} - case INDEX_op_brcond_i32: - a1 = (int32_t)a1; - /* FALLTHRU */ - case INDEX_op_brcond_i64: - tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3])); - break; +static void tgen_mulsh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); +} - case INDEX_op_setcond_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_setcond_i64: - tcg_out_cmp(s, ext, args[3], a1, a2, c2); - /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ - tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, - TCG_REG_XZR, tcg_invert_cond(args[3])); - break; +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mulh, + .out_rrr = tgen_mulsh, +}; - case INDEX_op_negsetcond_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_negsetcond_i64: - tcg_out_cmp(s, ext, args[3], a1, a2, c2); - /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */ - tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR, - TCG_REG_XZR, tcg_invert_cond(args[3])); - break; +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_movcond_i32: - a2 = (int32_t)a2; - /* FALLTHRU */ - case INDEX_op_movcond_i64: - tcg_out_cmp(s, ext, args[5], a1, a2, c2); - tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); - break; +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, a0, a1, a2, ext); - break; - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, REG0(0), a1, a2, ext); - break; - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true); - break; - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false); - break; +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mulh, + .out_rrr = tgen_muluh, +}; - case INDEX_op_bswap64_i64: - tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1); - break; - case INDEX_op_bswap32_i64: - tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_ext32s(s, a0, a0); - } - break; - case INDEX_op_bswap32_i32: - tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1); - break; - case INDEX_op_bswap16_i64: - case INDEX_op_bswap16_i32: - tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1); - if (a2 & TCG_BSWAP_OS) { - /* Output must be sign-extended. */ - tcg_out_ext16s(s, ext, a0, a0); - } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - /* Output must be zero-extended, but input isn't. */ - tcg_out_ext16u(s, a0, a0); - } - break; +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_deposit_i64: - case INDEX_op_deposit_i32: - tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); - break; +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_extract_i64: - case INDEX_op_extract_i32: - tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1); - break; +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, ORR, type, a0, a1, a2); +} - case INDEX_op_sextract_i64: - case INDEX_op_sextract_i32: - tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1); - break; +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_logicali(s, I3404_ORRI, type, a0, a1, a2); +} - case INDEX_op_extract2_i64: - case INDEX_op_extract2_i32: - tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]); - break; +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rL), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; - case INDEX_op_add2_i32: - tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), - (int32_t)args[4], args[5], const_args[4], - const_args[5], false); - break; - case INDEX_op_add2_i64: - tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], - args[5], const_args[4], const_args[5], false); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), - (int32_t)args[4], args[5], const_args[4], - const_args[5], true); - break; - case INDEX_op_sub2_i64: - tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], - args[5], const_args[4], const_args[5], true); - break; +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, ORN, type, a0, a1, a2); +} - case INDEX_op_muluh_i64: - tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); - break; - case INDEX_op_mulsh_i64: - tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); - break; +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_orc, +}; - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, SDIV, type, TCG_REG_TMP0, a1, a2); + tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1); +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rems, +}; + +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, UDIV, type, TCG_REG_TMP0, a1, a2); + tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1); +} + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_remu, +}; + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, RORV, type, a0, a1, a2); +} + +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int max = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_extr(s, type, a0, a1, a1, a2 & max); +} + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, ASRV, type, a0, a1, a2); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int max = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_sbfm(s, type, a0, a1, a2 & max, max); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, LSLV, type, a0, a1, a2); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int max = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_ubfm(s, type, a0, a1, -a2 & max, ~a2 & max); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3508, LSRV, type, a0, a1, a2); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int max = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_ubfm(s, type, a0, a1, a2 & max, max); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3502, SUB, type, a0, a1, a2); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static void tgen_subbo_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3502, SUBS, type, a0, a1, a2); +} + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 >= 0) { + tcg_out_insn(s, 3401, SUBSI, type, a0, a1, a2); + } else { + tcg_out_insn(s, 3401, ADDSI, type, a0, a1, -a2); + } +} + +static void tgen_subbo_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, a2); +} + +static void tgen_subbo_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + if (a2 == 0) { + tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, TCG_REG_XZR); + return; } -#undef REG0 + /* + * We want to allow a1 to be zero for the benefit of negation via + * subtraction. However, that leaves open the possibility of + * adding 0 +/- const, and the immediate add/sub instructions + * encode XSP not XZR. Since we have 0 - non-zero, borrow is + * always set. + */ + tcg_out_movi(s, type, a0, -a2); + tcg_out_set_borrow(s); } +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_O1_I2(r, rZ, rA), + .out_rrr = tgen_subbo_rrr, + .out_rri = tgen_subbo_rri, + .out_rir = tgen_subbo_rir, + .out_rii = tgen_subbo_rii, +}; + +static void tgen_subbi_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3503, SBC, type, a0, a1, a2); +} + +static void tgen_subbi_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_addci_rri(s, type, a0, a1, ~a2); +} + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_O1_I2(r, rz, rMZ), + .out_rrr = tgen_subbi_rrr, + .out_rri = tgen_subbi_rri, +}; + +static void tgen_subbio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3503, SBCS, type, a0, a1, a2); +} + +static void tgen_subbio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_addcio_imm(s, type, a0, a1, ~a2); +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_O1_I2(r, rz, rMZ), + .out_rrr = tgen_subbio_rrr, + .out_rri = tgen_subbio_rri, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out_insn(s, 3502, ADDS, TCG_TYPE_I32, + TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_insn(s, 3510, EOR, type, a0, a1, a2); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_logicali(s, I3404_EORI, type, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rL), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1); + if (flags & TCG_BSWAP_OS) { + /* Output must be sign-extended. */ + tcg_out_ext16s(s, type, a0, a0); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + /* Output must be zero-extended, but input isn't. */ + tcg_out_ext16u(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext32s(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_sub(s, type, a0, TCG_REG_XZR, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_orc(s, type, a0, TCG_REG_XZR, a1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_cset(TCGContext *s, TCGCond cond, TCGReg ret) +{ + /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ + tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, ret, TCG_REG_XZR, + TCG_REG_XZR, tcg_invert_cond(cond)); +} + +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_cmp(s, type, cond, a1, a2); + tgen_cset(s, cond, a0); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_cmpi(s, type, cond, a1, a2); + tgen_cset(s, cond, a0); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_csetm(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret) +{ + /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */ + tcg_out_insn(s, 3506, CSINV, ext, ret, TCG_REG_XZR, + TCG_REG_XZR, tcg_invert_cond(cond)); +} + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_cmp(s, type, cond, a1, a2); + tgen_csetm(s, type, cond, a0); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_cmpi(s, type, cond, a1, a2); + tgen_csetm(s, type, cond, a0); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg vt, bool const_vt, TCGArg vf, bool const_vf) +{ + tcg_out_cmp(s, type, cond, c1, c2, const_c2); + tcg_out_insn(s, 3506, CSEL, type, ret, vt, vf, cond); +} + +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rC, rz, rz), + .out = tgen_movcond, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; + + /* + * Since we can't support "0Z" as a constraint, we allow a1 in + * any register. Fix things up as if a matching constraint. + */ + if (a0 != a1) { + if (a0 == a2) { + tcg_out_mov(s, type, TCG_REG_TMP0, a2); + a2 = TCG_REG_TMP0; + } + tcg_out_mov(s, type, a0, a1); + } + tcg_out_bfm(s, type, a0, a2, -ofs & mask, len - 1); +} + +static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + tcg_target_long a2, unsigned ofs, unsigned len) +{ + tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len)); +} + +static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2, + unsigned ofs, unsigned len) +{ + int max = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_ubfm(s, type, a0, a2, -ofs & max, len - 1); +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, rZ, rZ), + .out_rrr = tgen_deposit, + .out_rri = tgen_depositi, + .out_rzr = tgen_depositz, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + uint64_t mask = MAKE_64BIT_MASK(0, len); + tcg_out_logicali(s, I3404_ANDI, type, a0, a1, mask); + } else { + tcg_out_ubfm(s, type, a0, a1, ofs, ofs + len - 1); + } +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + tcg_out_sbfm(s, type, a0, a1, ofs, ofs + len - 1); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + +static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0, + TCGReg a1, TCGReg a2, unsigned shr) +{ + tcg_out_extr(s, type, a0, a2, a1, shr); +} + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_O1_I2(r, rz, rz), + .out_rrr = tgen_extract2, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_LDRB, dest, base, offset, 0); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSBW : I3312_LDRSBX; + tcg_out_ldst(s, insn, dest, base, offset, 0); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_LDRH, dest, base, offset, 1); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSHW : I3312_LDRSHX; + tcg_out_ldst(s, insn, dest, base, offset, 1); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_LDRW, dest, base, offset, 2); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_LDRSWX, dest, base, offset, 2); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_STRB, data, base, offset, 0); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st8_r, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, I3312_STRH, data, base, offset, 1); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st16_r, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tcg_out_st, +}; + static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg args[TCG_MAX_OP_ARGS], @@ -2951,157 +3333,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, } } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_ext8s_i32: - case INDEX_op_ext16s_i32: - case INDEX_op_ext8u_i32: - case INDEX_op_ext16u_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext16s_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_sextract_i32: - case INDEX_op_sextract_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(rZ, r); - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - return C_O1_I2(r, r, rA); - - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rC); - - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_div_i32: - case INDEX_op_div_i64: - case INDEX_op_divu_i32: - case INDEX_op_divu_i64: - case INDEX_op_rem_i32: - case INDEX_op_rem_i64: - case INDEX_op_remu_i32: - case INDEX_op_remu_i64: - case INDEX_op_muluh_i64: - case INDEX_op_mulsh_i64: - return C_O1_I2(r, r, r); - - case INDEX_op_and_i32: - case INDEX_op_and_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - return C_O1_I2(r, r, rL); - - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - case INDEX_op_rotl_i32: - case INDEX_op_rotr_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i64: - return C_O1_I2(r, r, ri); - - case INDEX_op_clz_i32: - case INDEX_op_ctz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i64: - return C_O1_I2(r, r, rAL); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(r, rC); - - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rC, rZ, rZ); - - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - return C_O1_I1(r, r); - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - return C_O2_I1(r, r, r); - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - return C_O0_I2(rZ, r); - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - return C_O0_I3(rZ, rZ, r); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - return C_O1_I2(r, 0, rZ); - - case INDEX_op_extract2_i32: - case INDEX_op_extract2_i64: - return C_O1_I2(r, rZ, rZ); - - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - return C_O2_I4(r, r, rZ, rZ, rA, rMZ); - case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_mul_vec: @@ -3147,7 +3382,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I2(w, 0, w); default: - g_assert_not_reached(); + return C_NotImplemented; } } diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h index 8bd9e6a5eb4..3f3df5176d9 100644 --- a/tcg/aarch64/tcg-target.h +++ b/tcg/aarch64/tcg-target.h @@ -13,8 +13,6 @@ #ifndef AARCH64_TCG_TARGET_H #define AARCH64_TCG_TARGET_H -#include "host/cpuinfo.h" - #define TCG_TARGET_INSN_UNIT_SIZE 4 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) @@ -47,130 +45,8 @@ typedef enum { TCG_AREG0 = TCG_REG_X19, } TCGReg; -#define TCG_TARGET_NB_REGS 64 - -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_SP -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#ifdef CONFIG_DARWIN -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#else -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#endif -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - -#define have_lse (cpuinfo & CPUINFO_LSE) -#define have_lse2 (cpuinfo & CPUINFO_LSE2) +#define TCG_REG_ZERO TCG_REG_XZR -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_eqv_i32 1 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 1 -#define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 1 -#define TCG_TARGET_HAS_extract2_i32 1 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 1 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 1 -#define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 1 -#define TCG_TARGET_HAS_extract2_i64 1 -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 - -/* - * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load, - * which requires writable pages. We must defer to the helper for user-only, - * but in system mode all ram is writable for the host. - */ -#ifdef CONFIG_USER_ONLY -#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2 -#else -#define TCG_TARGET_HAS_qemu_ldst_i128 1 -#endif - -#define TCG_TARGET_HAS_tst 1 - -#define TCG_TARGET_HAS_v64 1 -#define TCG_TARGET_HAS_v128 1 -#define TCG_TARGET_HAS_v256 0 - -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec 1 -#define TCG_TARGET_HAS_nand_vec 0 -#define TCG_TARGET_HAS_nor_vec 0 -#define TCG_TARGET_HAS_eqv_vec 0 -#define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 1 -#define TCG_TARGET_HAS_abs_vec 1 -#define TCG_TARGET_HAS_roti_vec 0 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 0 -#define TCG_TARGET_HAS_shi_vec 1 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 1 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 1 -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 1 - -#define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS +#define TCG_TARGET_NB_REGS 64 #endif /* AARCH64_TCG_TARGET_H */ diff --git a/tcg/aarch64/tcg-target.opc.h b/tcg/aarch64/tcg-target.opc.h deleted file mode 100644 index bce30accd93..00000000000 --- a/tcg/aarch64/tcg-target.opc.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) 2019 Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * (at your option) any later version. - * - * See the COPYING file in the top-level directory for details. - * - * Target-specific opcodes for host vector expansion. These will be - * emitted by tcg_expand_vec_op. For those familiar with GCC internals, - * consider these to be UNSPEC with names. - */ - -DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) -DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC) diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h index 229ae258ac1..16b1193228c 100644 --- a/tcg/arm/tcg-target-con-set.h +++ b/tcg/arm/tcg-target-con-set.h @@ -30,6 +30,9 @@ C_O1_I2(r, r, rI) C_O1_I2(r, r, rIK) C_O1_I2(r, r, rIN) C_O1_I2(r, r, ri) +C_O1_I2(r, rI, r) +C_O1_I2(r, rI, rIK) +C_O1_I2(r, rI, rIN) C_O1_I2(r, rZ, rZ) C_O1_I2(w, 0, w) C_O1_I2(w, w, w) @@ -42,5 +45,3 @@ C_O1_I4(r, r, rIN, rIK, 0) C_O2_I1(e, p, q) C_O2_I2(e, p, q, q) C_O2_I2(r, r, r, r) -C_O2_I4(r, r, r, r, rIN, rIK) -C_O2_I4(r, r, rI, rI, rIN, rIK) diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h new file mode 100644 index 00000000000..3bbbde5d59e --- /dev/null +++ b/tcg/arm/tcg-target-has.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2008 Fabrice Bellard + * Copyright (c) 2008 Andrzej Zaborowski + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +extern int arm_arch; + +#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) + +#ifdef __ARM_ARCH_EXT_IDIV__ +#define use_idiv_instructions 1 +#else +extern bool use_idiv_instructions; +#endif +#ifdef __ARM_NEON__ +#define use_neon_instructions 1 +#else +extern bool use_neon_instructions; +#endif + +/* optional instructions */ +#define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 1 + +#define TCG_TARGET_HAS_v64 use_neon_instructions +#define TCG_TARGET_HAS_v128 use_neon_instructions +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_tst_vec 1 + +static inline bool +tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (use_armv7_instructions) { + return true; /* SBFX or UBFX */ + } + switch (len) { + case 8: /* SXTB or UXTB */ + case 16: /* SXTH or UXTH */ + return (ofs % 8) == 0; + } + return false; +} + +#define TCG_TARGET_extract_valid tcg_target_extract_valid +#define TCG_TARGET_sextract_valid tcg_target_extract_valid +#define TCG_TARGET_deposit_valid(type, ofs, len) use_armv7_instructions + +#endif diff --git a/tcg/arm/tcg-target-mo.h b/tcg/arm/tcg-target-mo.h new file mode 100644 index 00000000000..12542dfd1cc --- /dev/null +++ b/tcg/arm/tcg-target-mo.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2008 Fabrice Bellard + * Copyright (c) 2008 Andrzej Zaborowski + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/arm/tcg-target-opc.h.inc b/tcg/arm/tcg-target-opc.h.inc new file mode 100644 index 00000000000..70394e0282a --- /dev/null +++ b/tcg/arm/tcg-target-opc.h.inc @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2019 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(arm_sli_vec, 1, 2, 1, TCG_OPF_VECTOR) +DEF(arm_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(arm_ushl_vec, 1, 2, 0, TCG_OPF_VECTOR) diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 3de5f50b62d..836894b16ad 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -23,8 +23,6 @@ */ #include "elf.h" -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" int arm_arch = __ARM_ARCH; @@ -35,6 +33,14 @@ bool use_idiv_instructions; bool use_neon_instructions; #endif +/* Used for function call generation. */ +#define TCG_TARGET_STACK_ALIGN 8 +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF + #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", @@ -172,6 +178,8 @@ typedef enum { INSN_DMB_ISH = 0xf57ff05b, INSN_DMB_MCR = 0xee070fba, + INSN_MSRI_CPSR = 0x0360f000, + /* Architected nop introduced in v6k. */ /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this also Just So Happened to do nothing on pre-v6k so that we @@ -670,14 +678,8 @@ static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt, tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0); } -static void __attribute__((unused)) -tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm) -{ - tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1); -} - -static void __attribute__((unused)) -tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8) +static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, + TCGReg rn, int imm8) { tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0); } @@ -874,22 +876,39 @@ static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc, * Emit either the reg,imm or reg,reg form of a data-processing insn. * rhs must satisfy the "rIK" constraint. */ +static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc, + ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs) +{ + int imm12 = encode_imm(rhs); + if (imm12 < 0) { + imm12 = encode_imm_nofail(~rhs); + opc = opinv; + } + tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); +} + static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc, ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) { if (rhs_is_const) { - int imm12 = encode_imm(rhs); - if (imm12 < 0) { - imm12 = encode_imm_nofail(~rhs); - opc = opinv; - } - tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); + tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } +static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc, + ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs) +{ + int imm12 = encode_imm(rhs); + if (imm12 < 0) { + imm12 = encode_imm_nofail(-rhs); + opc = opneg; + } + tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); +} + static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs, bool rhs_is_const) @@ -898,52 +917,12 @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc, * rhs must satisfy the "rIN" constraint. */ if (rhs_is_const) { - int imm12 = encode_imm(rhs); - if (imm12 < 0) { - imm12 = encode_imm_nofail(-rhs); - opc = opneg; - } - tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12); + tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs); } else { tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0)); } } -static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd, - TCGReg rn, TCGReg rm) -{ - /* mul */ - tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn); -} - -static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0, - TCGReg rd1, TCGReg rn, TCGReg rm) -{ - /* umull */ - tcg_out32(s, (cond << 28) | 0x00800090 | - (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); -} - -static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0, - TCGReg rd1, TCGReg rn, TCGReg rm) -{ - /* smull */ - tcg_out32(s, (cond << 28) | 0x00c00090 | - (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); -} - -static void tcg_out_sdiv(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, TCGReg rm) -{ - tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); -} - -static void tcg_out_udiv(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, TCGReg rm) -{ - tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8)); -} - static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn) { /* sxtb */ @@ -992,56 +971,98 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) g_assert_not_reached(); } -static void tcg_out_bswap16(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int flags) +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) { - if (flags & TCG_BSWAP_OS) { - /* revsh */ - tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn); + /* bfi/bfc */ + tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a1 + | (ofs << 7) | ((ofs + len - 1) << 16)); +} + +static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + tcg_target_long a2, unsigned ofs, unsigned len) +{ + /* bfi becomes bfc with rn == 15. */ + tgen_deposit(s, type, a0, a1, 15, ofs, len); +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, 0, rZ), + .out_rrr = tgen_deposit, + .out_rri = tgen_depositi, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, + unsigned ofs, unsigned len) +{ + /* According to gcc, AND can be faster. */ + if (ofs == 0 && len <= 8) { + tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, + encode_imm_nofail((1 << len) - 1)); return; } - /* rev16 */ - tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn); - if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + if (use_armv7_instructions) { + /* ubfx */ + tcg_out32(s, 0x07e00050 | (COND_AL << 28) | (rd << 12) | rn + | (ofs << 7) | ((len - 1) << 16)); + return; + } + + assert(ofs % 8 == 0); + switch (len) { + case 8: + /* uxtb */ + tcg_out32(s, 0x06ef0070 | (COND_AL << 28) | + (rd << 12) | (ofs << 7) | rn); + break; + case 16: /* uxth */ - tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd); + tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | + (rd << 12) | (ofs << 7) | rn); + break; + default: + g_assert_not_reached(); } } -static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn) -{ - /* rev */ - tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn); -} +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; -static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd, - TCGArg a1, int ofs, int len, bool const_a1) +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, + unsigned ofs, unsigned len) { - if (const_a1) { - /* bfi becomes bfc with rn == 15. */ - a1 = 15; + if (use_armv7_instructions) { + /* sbfx */ + tcg_out32(s, 0x07a00050 | (COND_AL << 28) | (rd << 12) | rn + | (ofs << 7) | ((len - 1) << 16)); + return; } - /* bfi/bfc */ - tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1 - | (ofs << 7) | ((ofs + len - 1) << 16)); -} -static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd, - TCGReg rn, int ofs, int len) -{ - /* ubfx */ - tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn - | (ofs << 7) | ((len - 1) << 16)); + assert(ofs % 8 == 0); + switch (len) { + case 8: + /* sxtb */ + tcg_out32(s, 0x06af0070 | (COND_AL << 28) | + (rd << 12) | (ofs << 7) | rn); + break; + case 16: + /* sxth */ + tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | + (rd << 12) | (ofs << 7) | rn); + break; + default: + g_assert_not_reached(); + } } -static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd, - TCGReg rn, int ofs, int len) -{ - /* sbfx */ - tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn - | (ofs << 7) | ((len - 1) << 16)); -} +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + static void tcg_out_ld32u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn, int32_t offset) @@ -1063,66 +1084,6 @@ static void tcg_out_st32(TCGContext *s, ARMCond cond, tcg_out_st32_12(s, cond, rd, rn, offset); } -static void tcg_out_ld16u(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xff || offset < -0xff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_ld16u_8(s, cond, rd, rn, offset); -} - -static void tcg_out_ld16s(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xff || offset < -0xff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_ld16s_8(s, cond, rd, rn, offset); -} - -static void tcg_out_st16(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xff || offset < -0xff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_st16_8(s, cond, rd, rn, offset); -} - -static void tcg_out_ld8u(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xfff || offset < -0xfff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_ld8_12(s, cond, rd, rn, offset); -} - -static void tcg_out_ld8s(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xff || offset < -0xff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_ld8s_8(s, cond, rd, rn, offset); -} - -static void tcg_out_st8(TCGContext *s, ARMCond cond, - TCGReg rd, TCGReg rn, int32_t offset) -{ - if (offset > 0xfff || offset < -0xfff) { - tcg_out_movi32(s, cond, TCG_REG_TMP, offset); - tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP); - } else - tcg_out_st8_12(s, cond, rd, rn, offset); -} - /* * The _goto case is normally between TBs within the same code buffer, and * with the code buffer limited to 16MB we wouldn't need the long case. @@ -1182,7 +1143,12 @@ static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l) } } -static void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_goto_label(s, COND_AL, l); +} + +static void tcg_out_mb(TCGContext *s, unsigned a0) { if (use_armv7_instructions) { tcg_out32(s, INSN_DMB_ISH); @@ -1191,44 +1157,53 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) } } -static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, - TCGArg b, int b_const) +static TCGCond tgen_cmp(TCGContext *s, TCGCond cond, TCGReg a, TCGReg b) +{ + if (is_tst_cond(cond)) { + tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); + return tcg_tst_eqne_cond(cond); + } + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, a, b, SHIFT_IMM_LSL(0)); + return cond; +} + +static TCGCond tgen_cmpi(TCGContext *s, TCGCond cond, TCGReg a, TCGArg b) { + int imm12; + if (!is_tst_cond(cond)) { - tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const); + tcg_out_dat_IN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b); return cond; } - cond = tcg_tst_eqne_cond(cond); - if (b_const) { - int imm12 = encode_imm(b); - - /* - * The compare constraints allow rIN, but TST does not support N. - * Be prepared to load the constant into a scratch register. - */ - if (imm12 >= 0) { - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); - return cond; - } + /* + * The compare constraints allow rIN, but TST does not support N. + * Be prepared to load the constant into a scratch register. + */ + imm12 = encode_imm(b); + if (imm12 >= 0) { + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12); + } else { tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b); - b = TCG_REG_TMP; + tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, + a, TCG_REG_TMP, SHIFT_IMM_LSL(0)); } - tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0)); - return cond; + return tcg_tst_eqne_cond(cond); } -static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args, - const int *const_args) +static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a, + TCGArg b, int b_const) { - TCGReg al = args[0]; - TCGReg ah = args[1]; - TCGArg bl = args[2]; - TCGArg bh = args[3]; - TCGCond cond = args[4]; - int const_bl = const_args[2]; - int const_bh = const_args[3]; + if (b_const) { + return tgen_cmpi(s, cond, a, b); + } else { + return tgen_cmp(s, cond, a, b); + } +} +static TCGCond tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, TCGArg bh, bool const_bh) +{ switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: @@ -1407,8 +1382,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) #define MIN_TLB_MASK_TABLE_OFS -256 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, bool is_ld) + TCGReg addr, MemOpIdx oi, bool is_ld) { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); @@ -1417,14 +1391,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (tcg_use_softmmu) { *h = (HostAddress){ .cond = COND_AL, - .base = addrlo, + .base = addr, .index = TCG_REG_R1, .index_scratch = true, }; } else { *h = (HostAddress){ .cond = COND_AL, - .base = addrlo, + .base = addr, .index = guest_base ? TCG_REG_GUEST_BASE : -1, .index_scratch = false, }; @@ -1444,8 +1418,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); @@ -1453,30 +1426,19 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); /* Extract the tlb index from the address into R0. */ - tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, - SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS)); + tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr, + SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); /* * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. - * Load the tlb comparator into R2/R3 and the fast path addend into R1. + * Load the tlb comparator into R2 and the fast path addend into R1. */ - QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN); if (cmp_off == 0) { - if (s->addr_type == TCG_TYPE_I32) { - tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, - TCG_REG_R1, TCG_REG_R0); - } else { - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, - TCG_REG_R1, TCG_REG_R0); - } + tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); } else { tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); - if (s->addr_type == TCG_TYPE_I32) { - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); - } else { - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); - } + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); } /* Load the tlb addend. */ @@ -1495,14 +1457,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * This leaves the least significant alignment bits unchanged, and of * course must be zero. */ - t_addr = addrlo; + t_addr = addr; if (a_mask < s_mask) { t_addr = TCG_REG_R0; tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr, - addrlo, s_mask - a_mask); + addr, s_mask - a_mask); } - if (use_armv7_instructions && s->page_bits <= 16) { - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask)); + if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask)); tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, t_addr, TCG_REG_TMP, 0); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, @@ -1510,29 +1472,24 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } else { if (a_mask) { tcg_debug_assert(a_mask <= 0xff); - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask); } tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr, - SHIFT_IMM_LSR(s->page_bits)); + SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, - SHIFT_IMM_LSL(s->page_bits)); - } - - if (s->addr_type != TCG_TYPE_I32) { - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); + SHIFT_IMM_LSL(TARGET_PAGE_BITS)); } } else if (a_mask) { ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* We are expecting alignment to max out at 7 */ tcg_debug_assert(a_mask <= 0xff); /* tst addr, #mask */ - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask); + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask); } return ldst; @@ -1587,7 +1544,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, tcg_debug_assert((datalo & 1) == 0); tcg_debug_assert(datahi == datalo + 1); /* LDRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { + if (memop_alignment_bits(opc) >= MO_64) { if (h.index < 0) { tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0); break; @@ -1629,17 +1586,50 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, true); + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = -1; + + /* + * This a conditional BL only to load a pointer within this + * opcode into LR for the slow path. We will not be using + * the value for a tail call. + */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_bl_imm(s, COND_NE, 0); + } + + tcg_out_qemu_ld_direct(s, opc, data, -1, h); + + if (ldst) { + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, q), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + ldst = prepare_host_addr(s, &h, addr, oi, true); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; @@ -1650,14 +1640,20 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, */ ldst->label_ptr[0] = s->code_ptr; tcg_out_bl_imm(s, COND_NE, 0); + } - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); + tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); + + if (ldst) { ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); - } else { - tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h); } } +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_O2_I1(e, p, q), + .out = tgen_qemu_ld2, +}; + static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, TCGReg datahi, HostAddress h) { @@ -1691,7 +1687,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, tcg_debug_assert((datalo & 1) == 0); tcg_debug_assert(datahi == datalo + 1); /* STRD requires alignment; double-check that. */ - if (get_alignment_bits(opc) >= MO_64) { + if (memop_alignment_bits(opc) >= MO_64) { if (h.index < 0) { tcg_out_strd_8(s, h.cond, datalo, h.base, 0); } else { @@ -1715,17 +1711,46 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, false); + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = -1; + + h.cond = COND_EQ; + tcg_out_qemu_st_direct(s, opc, data, -1, h); + + /* The conditional call is last, as we're going to return here. */ + ldst->label_ptr[0] = s->code_ptr; + tcg_out_bl_imm(s, COND_NE, 0); + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } else { + tcg_out_qemu_st_direct(s, opc, data, -1, h); + } +} + +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(q, q), + .out = tgen_qemu_st, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + ldst = prepare_host_addr(s, &h, addr, oi, false); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; @@ -1741,6 +1766,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, } } +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_O0_I3(Q, p, q), + .out = tgen_qemu_st2, +}; + static void tcg_out_epilogue(TCGContext *s); static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) @@ -1780,6 +1810,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_b_reg(s, COND_AL, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -1799,418 +1834,816 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, flush_idcache_range(jmp_rx, jmp_rw, 4); } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) -{ - TCGArg a0, a1, a2, a3, a4, a5; - int c; - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out_b_reg(s, COND_AL, args[0]); - break; - case INDEX_op_br: - tcg_out_goto_label(s, COND_AL, arg_label(args[0])); - break; +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0)); +} - case INDEX_op_ld8u_i32: - tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_ld8s_i32: - tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_ld16u_i32: - tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_ld16s_i32: - tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i32: - tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_st8_i32: - tcg_out_st8(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_st16_i32: - tcg_out_st16(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_st_i32: - tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); - break; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2); +} - case INDEX_op_movcond_i32: - /* Constraints mean that v2 is always in the same register as dest, - * so we only need to do "if condition passed, move v1 to dest". - */ - c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]); - tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV, - ARITH_MVN, args[0], 0, args[3], const_args[3]); - break; - case INDEX_op_add_i32: - tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB, - args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_sub_i32: - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]); - } else { - tcg_out_dat_rI(s, COND_AL, ARITH_RSB, - args[0], args[2], args[1], 1); - } - } else { - tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD, - args[0], args[1], args[2], const_args[2]); - } - break; - case INDEX_op_and_i32: - tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC, - args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_andc_i32: - tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND, - args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_or_i32: - c = ARITH_ORR; - goto gen_arith; - case INDEX_op_xor_i32: - c = ARITH_EOR; - /* Fall through. */ - gen_arith: - tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]); - break; - case INDEX_op_add2_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - a3 = args[3], a4 = args[4], a5 = args[5]; - if (a0 == a3 || (a0 == a5 && !const_args[5])) { - a0 = TCG_REG_TMP; - } - tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, - a0, a2, a4, const_args[4]); - tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC, - a1, a3, a5, const_args[5]); - tcg_out_mov_reg(s, COND_AL, args[0], a0); - break; - case INDEX_op_sub2_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - a3 = args[3], a4 = args[4], a5 = args[5]; - if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) { - a0 = TCG_REG_TMP; - } - if (const_args[2]) { - if (const_args[4]) { - tcg_out_movi32(s, COND_AL, a0, a4); - a4 = a0; - } - tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1); - } else { - tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR, - ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]); - } - if (const_args[3]) { - if (const_args[5]) { - tcg_out_movi32(s, COND_AL, a1, a5); - a5 = a1; - } - tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1); - } else { - tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC, - a1, a3, a5, const_args[5]); - } - tcg_out_mov_reg(s, COND_AL, args[0], a0); - break; - case INDEX_op_neg_i32: - tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); - break; - case INDEX_op_not_i32: - tcg_out_dat_reg(s, COND_AL, - ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); - break; - case INDEX_op_mul_i32: - tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_mulu2_i32: - tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); - break; - case INDEX_op_muls2_i32: - tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]); - break; - /* XXX: Perhaps args[2] & 0x1f is wrong */ - case INDEX_op_shl_i32: - c = const_args[2] ? - SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); - goto gen_shift32; - case INDEX_op_shr_i32: - c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : - SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); - goto gen_shift32; - case INDEX_op_sar_i32: - c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : - SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); - goto gen_shift32; - case INDEX_op_rotr_i32: - c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) : - SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]); - /* Fall through. */ - gen_shift32: - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rIN), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - case INDEX_op_rotl_i32: - if (const_args[2]) { - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], - ((0x20 - args[2]) & 0x1f) ? - SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) : - SHIFT_IMM_LSL(0)); - } else { - tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20); - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], - SHIFT_REG_ROR(TCG_REG_TMP)); - } - break; +static void tgen_addco(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_ADD | TO_CPSR, + a0, a1, a2, SHIFT_IMM_LSL(0)); +} - case INDEX_op_ctz_i32: - tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0); - a1 = TCG_REG_TMP; - goto do_clz; - - case INDEX_op_clz_i32: - a1 = args[1]; - do_clz: - a0 = args[0]; - a2 = args[2]; - c = const_args[2]; - if (c && a2 == 32) { - tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); - break; - } +static void tgen_addco_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR, + a0, a1, a2); +} + +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_O1_I2(r, r, rIN), + .out_rrr = tgen_addco, + .out_rri = tgen_addco_imm, +}; + +static void tgen_addci(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_ADC, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_addci_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IK(s, COND_AL, ARITH_ADC, ARITH_SBC, a0, a1, a2); +} + +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_O1_I2(r, r, rIK), + .out_rrr = tgen_addci, + .out_rri = tgen_addci_imm, +}; + +static void tgen_addcio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_ADC | TO_CPSR, + a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_addcio_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IK(s, COND_AL, ARITH_ADC | TO_CPSR, ARITH_SBC | TO_CPSR, + a0, a1, a2); +} + +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_O1_I2(r, r, rIK), + .out_rrr = tgen_addcio, + .out_rri = tgen_addcio_imm, +}; + +/* Set C to @c; NZVQ all set to 0. */ +static void tcg_out_movi_apsr_c(TCGContext *s, bool c) +{ + int imm12 = encode_imm_nofail(c << 29); + tcg_out32(s, (COND_AL << 28) | INSN_MSRI_CPSR | 0x80000 | imm12); +} + +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out_movi_apsr_c(s, 1); +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2); +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rIK), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; + +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; + +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); + tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); + tcg_out_mov_reg(s, COND_EQ, a0, a2); +} + +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 == 32) { + tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0); + } else { tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0); tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0); - if (c || a0 != a2) { - tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c); - } - break; + tcg_out_movi32(s, COND_EQ, a0, a2); + } +} - case INDEX_op_brcond_i32: - c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]); - tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3])); - break; - case INDEX_op_setcond_i32: - c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], - ARITH_MOV, args[0], 0, 1); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], - ARITH_MOV, args[0], 0, 0); - break; - case INDEX_op_negsetcond_i32: - c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], - ARITH_MVN, args[0], 0, 0); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], - ARITH_MOV, args[0], 0, 0); - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_O1_I2(r, r, rIK), + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; - case INDEX_op_brcond2_i32: - c = tcg_out_cmp2(s, args, const_args); - tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5])); - break; - case INDEX_op_setcond2_i32: - c = tcg_out_cmp2(s, args + 1, const_args + 1); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1); - tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)], - ARITH_MOV, args[0], 0, 0); - break; +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_qemu_ld_a32_i32: - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a64_i32: - tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); - break; - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); - break; +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0); + tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2); +} - case INDEX_op_qemu_st_a32_i32: - tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a64_i32: - tcg_out_qemu_st(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); - break; - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); - break; +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0); + tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2); +} - case INDEX_op_bswap16_i32: - tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_bswap32_i32: - tcg_out_bswap32(s, COND_AL, args[0], args[1]); - break; +static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags) +{ + return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented; +} - case INDEX_op_deposit_i32: - tcg_out_deposit(s, COND_AL, args[0], args[2], - args[3], args[4], const_args[2]); - break; - case INDEX_op_extract_i32: - tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]); - break; - case INDEX_op_sextract_i32: - tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]); - break; - case INDEX_op_extract2_i32: - /* ??? These optimization vs zero should be generic. */ - /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */ - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi(s, TCG_TYPE_REG, args[0], 0); - } else { - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, - args[2], SHIFT_IMM_LSL(32 - args[3])); - } - } else if (const_args[2]) { - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, - args[1], SHIFT_IMM_LSR(args[3])); - } else { - /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, - args[2], SHIFT_IMM_LSL(32 - args[3])); - tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP, - args[1], SHIFT_IMM_LSR(args[3])); - } - break; +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctz, + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; - case INDEX_op_div_i32: - tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]); - break; - case INDEX_op_divu_i32: - tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]); - break; +static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags) +{ + return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented; +} - case INDEX_op_mb: - tcg_out_mb(s, args[0]); - break; +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + /* sdiv */ + tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8)); +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8u_i32: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16u_i32: - default: - g_assert_not_reached(); +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_idiv, + .out_rrr = tgen_divs, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + /* udiv */ + tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8)); +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_idiv, + .out_rrr = tgen_divu, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + /* mul */ + tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2); +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; + +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) +{ + /* smull */ + tcg_out32(s, (COND_AL << 28) | 0x00c00090 | + (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); +} + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_O2_I2(r, r, r, r), + .out_rrrr = tgen_muls2, +}; + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm) +{ + /* umull */ + tcg_out32(s, (COND_AL << 28) | 0x00800090 | + (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_O2_I2(r, r, r, r), + .out_rrrr = tgen_mulu2, +}; + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2)); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2)); +} + +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f)); +} + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2)); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, + SHIFT_IMM_ASR(a2 & 0x1f)); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2)); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, + SHIFT_IMM_LSL(a2 & 0x1f)); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2)); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, + SHIFT_IMM_LSR(a2 & 0x1f)); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_subfi(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1)); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, rI, r), + .out_rrr = tgen_sub, + .out_rir = tgen_subfi, +}; + +static void tgen_subbo_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_SUB | TO_CPSR, + a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IN(s, COND_AL, ARITH_SUB | TO_CPSR, ARITH_ADD | TO_CPSR, + a0, a1, a2); +} + +static void tgen_subbo_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_RSB | TO_CPSR, + a0, a2, encode_imm_nofail(a1)); +} + +static void tgen_subbo_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2); + tgen_subbo_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP); +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_O1_I2(r, rI, rIN), + .out_rrr = tgen_subbo_rrr, + .out_rri = tgen_subbo_rri, + .out_rir = tgen_subbo_rir, + .out_rii = tgen_subbo_rii, +}; + +static void tgen_subbi_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_SBC, + a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_subbi_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IK(s, COND_AL, ARITH_SBC, ARITH_ADC, a0, a1, a2); +} + +static void tgen_subbi_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_RSC, a0, a2, encode_imm_nofail(a1)); +} + +static void tgen_subbi_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2); + tgen_subbi_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP); +} + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_O1_I2(r, rI, rIK), + .out_rrr = tgen_subbi_rrr, + .out_rri = tgen_subbi_rri, + .out_rir = tgen_subbi_rir, + .out_rii = tgen_subbi_rii, +}; + +static void tgen_subbio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_SBC | TO_CPSR, + a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_subbio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_IK(s, COND_AL, ARITH_SBC | TO_CPSR, ARITH_ADC | TO_CPSR, + a0, a1, a2); +} + +static void tgen_subbio_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_RSC | TO_CPSR, + a0, a2, encode_imm_nofail(a1)); +} + +static void tgen_subbio_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2); + tgen_subbio_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP); +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_O1_I2(r, rI, rIK), + .out_rrr = tgen_subbio_rrr, + .out_rri = tgen_subbio_rri, + .out_rir = tgen_subbio_rir, + .out_rii = tgen_subbio_rii, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out_movi_apsr_c(s, 0); /* borrow = !carry */ +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0)); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2)); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg rd, TCGReg rn, unsigned flags) +{ + if (flags & TCG_BSWAP_OS) { + /* revsh */ + tcg_out32(s, 0x06ff0fb0 | (COND_AL << 28) | (rd << 12) | rn); + return; + } + + /* rev16 */ + tcg_out32(s, 0x06bf0fb0 | (COND_AL << 28) | (rd << 12) | rn); + if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext16u(s, rd, rd); } } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg rd, TCGReg rn, unsigned flags) { - switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_neg_i32: - case INDEX_op_not_i32: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap32_i32: - case INDEX_op_ext8s_i32: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16u_i32: - case INDEX_op_extract_i32: - case INDEX_op_sextract_i32: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - return C_O0_I2(r, r); - - case INDEX_op_add_i32: - case INDEX_op_sub_i32: - case INDEX_op_setcond_i32: - case INDEX_op_negsetcond_i32: - return C_O1_I2(r, r, rIN); - - case INDEX_op_and_i32: - case INDEX_op_andc_i32: - case INDEX_op_clz_i32: - case INDEX_op_ctz_i32: - return C_O1_I2(r, r, rIK); - - case INDEX_op_mul_i32: - case INDEX_op_div_i32: - case INDEX_op_divu_i32: - return C_O1_I2(r, r, r); - - case INDEX_op_mulu2_i32: - case INDEX_op_muls2_i32: - return C_O2_I2(r, r, r, r); - - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - return C_O1_I2(r, r, rI); - - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - case INDEX_op_rotl_i32: - case INDEX_op_rotr_i32: - return C_O1_I2(r, r, ri); - - case INDEX_op_brcond_i32: - return C_O0_I2(r, rIN); - case INDEX_op_deposit_i32: - return C_O1_I2(r, 0, rZ); - case INDEX_op_extract2_i32: - return C_O1_I2(r, rZ, rZ); - case INDEX_op_movcond_i32: - return C_O1_I4(r, r, rIN, rIK, 0); - case INDEX_op_add2_i32: - return C_O2_I4(r, r, r, r, rIN, rIK); - case INDEX_op_sub2_i32: - return C_O2_I4(r, r, rI, rI, rIN, rIK); - case INDEX_op_brcond2_i32: - return C_O0_I4(r, r, rI, rI); - case INDEX_op_setcond2_i32: - return C_O1_I4(r, r, r, rI, rI); - - case INDEX_op_qemu_ld_a32_i32: - return C_O1_I1(r, q); - case INDEX_op_qemu_ld_a64_i32: - return C_O1_I2(r, q, q); - case INDEX_op_qemu_ld_a32_i64: - return C_O2_I1(e, p, q); - case INDEX_op_qemu_ld_a64_i64: - return C_O2_I2(e, p, q, q); - case INDEX_op_qemu_st_a32_i32: - return C_O0_I2(q, q); - case INDEX_op_qemu_st_a64_i32: - return C_O0_I3(q, q, q); - case INDEX_op_qemu_st_a32_i64: - return C_O0_I3(Q, p, q); - case INDEX_op_qemu_st_a64_i64: - return C_O0_I4(Q, p, q, q); + /* rev */ + tcg_out32(s, 0x06bf0f30 | (COND_AL << 28) | (rd << 12) | rn); +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_NotImplemented, +}; +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_subfi(s, type, a0, 0, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0)); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, TCGLabel *l) +{ + cond = tgen_cmp(s, cond, a0, a1); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); +} + +static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, tcg_target_long a1, TCGLabel *l) +{ + cond = tgen_cmpi(s, cond, a0, a1); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); +} + +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rIN), + .out_rr = tgen_brcond, + .out_ri = tgen_brcondi, +}; + +static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg) +{ + tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0); + tcg_out_movi32(s, tcg_cond_to_arm_cond[cond], ret, neg ? -1 : 1); +} + +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + cond = tgen_cmp(s, cond, a1, a2); + finish_setcond(s, cond, a0, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + cond = tgen_cmpi(s, cond, a1, a2); + finish_setcond(s, cond, a0, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rIN), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + cond = tgen_cmp(s, cond, a1, a2); + finish_setcond(s, cond, a0, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + cond = tgen_cmpi(s, cond, a1, a2); + finish_setcond(s, cond, a0, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rIN), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) +{ + cond = tcg_out_cmp(s, cond, c1, c2, const_c2); + tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN, + ret, 0, vt, const_vt); +} + +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0), + .out = tgen_movcond, +}; + +static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, TCGArg bh, bool const_bh, + TCGLabel *l) +{ + cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); + tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l); +} + +static const TCGOutOpBrcond2 outop_brcond2 = { + .base.static_constraint = C_O0_I4(r, r, rI, rI), + .out = tgen_brcond2, +}; + +static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh) +{ + cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); + finish_setcond(s, cond, ret, false); +} + +static const TCGOutOpSetcond2 outop_setcond2 = { + .base.static_constraint = C_O1_I4(r, r, r, rI, rI), + .out = tgen_setcond2, +}; + +static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0, + TCGReg a1, TCGReg a2, unsigned shr) +{ + /* We can do extract2 in 2 insns, vs the 3 required otherwise. */ + tgen_shli(s, TCG_TYPE_I32, TCG_REG_TMP, a2, 32 - shr); + tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, TCG_REG_TMP, + a1, SHIFT_IMM_LSR(shr)); +} + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_extract2, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xfff || offset < -0xfff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_ld8_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_ld8_12(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xff || offset < -0xff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_ld8s_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_ld8s_8(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xff || offset < -0xff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_ld16u_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_ld16u_8(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xff || offset < -0xff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_ld16s_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_ld16s_8(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +static void tgen_st8(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xfff || offset < -0xfff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_st8_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_st8_12(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st8, +}; + +static void tgen_st16(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rn, ptrdiff_t offset) +{ + if (offset > 0xff || offset < -0xff) { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset); + tcg_out_st16_r(s, COND_AL, rd, rn, TCG_REG_TMP); + } else { + tcg_out_st16_8(s, COND_AL, rd, rn, offset); + } +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st16, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tcg_out_st, +}; + +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) +{ + switch (op) { case INDEX_op_st_vec: return C_O0_I2(w, r); case INDEX_op_ld_vec: @@ -2254,7 +2687,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_bitsel_vec: return C_O1_I3(w, w, w, w); default: - g_assert_not_reached(); + return C_NotImplemented; } } diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h index fb7261499b3..4f9f877121e 100644 --- a/tcg/arm/tcg-target.h +++ b/tcg/arm/tcg-target.h @@ -26,10 +26,6 @@ #ifndef ARM_TCG_TARGET_H #define ARM_TCG_TARGET_H -extern int arm_arch; - -#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7) - #define TCG_TARGET_INSN_UNIT_SIZE 4 #define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX @@ -74,86 +70,4 @@ typedef enum { #define TCG_TARGET_NB_REGS 32 -#ifdef __ARM_ARCH_EXT_IDIV__ -#define use_idiv_instructions 1 -#else -extern bool use_idiv_instructions; -#endif -#ifdef __ARM_NEON__ -#define use_neon_instructions 1 -#else -extern bool use_neon_instructions; -#endif - -/* used for function call generation */ -#define TCG_TARGET_STACK_ALIGN 8 -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF - -/* optional instructions */ -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */ -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions -#define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions -#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions -#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions -#define TCG_TARGET_HAS_extract2_i32 1 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 -#define TCG_TARGET_HAS_muls2_i32 1 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_div_i32 use_idiv_instructions -#define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#define TCG_TARGET_HAS_qemu_ldst_i128 0 - -#define TCG_TARGET_HAS_tst 1 - -#define TCG_TARGET_HAS_v64 use_neon_instructions -#define TCG_TARGET_HAS_v128 use_neon_instructions -#define TCG_TARGET_HAS_v256 0 - -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec 1 -#define TCG_TARGET_HAS_nand_vec 0 -#define TCG_TARGET_HAS_nor_vec 0 -#define TCG_TARGET_HAS_eqv_vec 0 -#define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 1 -#define TCG_TARGET_HAS_abs_vec 1 -#define TCG_TARGET_HAS_roti_vec 0 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 0 -#define TCG_TARGET_HAS_shi_vec 1 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 0 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 1 -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 1 - -#define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS - #endif diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h deleted file mode 100644 index d38af9a8088..00000000000 --- a/tcg/arm/tcg-target.opc.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) 2019 Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * (at your option) any later version. - * - * See the COPYING file in the top-level directory for details. - * - * Target-specific opcodes for host vector expansion. These will be - * emitted by tcg_expand_vec_op. For those familiar with GCC internals, - * consider these to be UNSPEC with names. - */ - -DEF(arm_sli_vec, 1, 2, 1, IMPLVEC) -DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC) -DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC) diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h index e24241cfa2d..458d69c3c07 100644 --- a/tcg/i386/tcg-target-con-set.h +++ b/tcg/i386/tcg-target-con-set.h @@ -42,18 +42,19 @@ C_O1_I2(r, 0, reZ) C_O1_I2(r, 0, ri) C_O1_I2(r, 0, rI) C_O1_I2(r, L, L) +C_O1_I2(r, r, r) C_O1_I2(r, r, re) C_O1_I2(r, r, ri) -C_O1_I2(r, r, rI) +C_O1_I2(r, rO, re) C_O1_I2(x, x, x) C_N1_I2(r, r, r) C_N1_I2(r, r, rW) C_O1_I3(x, 0, x, x) C_O1_I3(x, x, x, x) +C_O1_I4(x, x, x, xO, x) C_O1_I4(r, r, reT, r, 0) C_O1_I4(r, r, r, ri, ri) C_O2_I1(r, r, L) C_O2_I2(a, d, a, r) C_O2_I2(r, r, L, L) C_O2_I3(a, d, 0, 1, r) -C_N1_O1_I4(r, r, 0, 1, re, re) diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h index cc22db227ba..dbedff1f541 100644 --- a/tcg/i386/tcg-target-con-str.h +++ b/tcg/i386/tcg-target-con-str.h @@ -20,7 +20,7 @@ REGS('r', ALL_GENERAL_REGS) REGS('x', ALL_VECTOR_REGS) REGS('q', ALL_BYTEL_REGS) /* regs that can be used as a byte operand */ REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_ld/st */ -REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */ +REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st MO_8 data */ /* * Define constraint letters for constants: @@ -28,6 +28,7 @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */ */ CONST('e', TCG_CT_CONST_S32) CONST('I', TCG_CT_CONST_I32) +CONST('O', TCG_CT_CONST_ZERO) CONST('T', TCG_CT_CONST_TST) CONST('W', TCG_CT_CONST_WSZ) CONST('Z', TCG_CT_CONST_U32) diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h new file mode 100644 index 00000000000..42647fabbd7 --- /dev/null +++ b/tcg/i386/tcg-target-has.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#include "host/cpuinfo.h" + +#define have_bmi1 (cpuinfo & CPUINFO_BMI1) +#define have_popcnt (cpuinfo & CPUINFO_POPCNT) +#define have_avx1 (cpuinfo & CPUINFO_AVX1) +#define have_avx2 (cpuinfo & CPUINFO_AVX2) +#define have_movbe (cpuinfo & CPUINFO_MOVBE) + +/* + * There are interesting instructions in AVX512, so long as we have AVX512VL, + * which indicates support for EVEX on sizes smaller than 512 bits. + */ +#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \ + (cpuinfo & CPUINFO_AVX512F)) +#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl) +#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl) +#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl) + +/* optional instructions */ +#if TCG_TARGET_REG_BITS == 64 +/* Keep 32-bit values zero-extended in a register. */ +#define TCG_TARGET_HAS_extr_i64_i32 1 +#endif + +#define TCG_TARGET_HAS_qemu_ldst_i128 \ + (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)) + +#define TCG_TARGET_HAS_tst 1 + +/* We do not support older SSE systems, only beginning with AVX1. */ +#define TCG_TARGET_HAS_v64 have_avx1 +#define TCG_TARGET_HAS_v128 have_avx1 +#define TCG_TARGET_HAS_v256 have_avx2 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec have_avx512vl +#define TCG_TARGET_HAS_nand_vec have_avx512vl +#define TCG_TARGET_HAS_nor_vec have_avx512vl +#define TCG_TARGET_HAS_eqv_vec have_avx512vl +#define TCG_TARGET_HAS_not_vec have_avx512vl +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec have_avx512vl +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec have_avx512vl +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec have_avx2 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec have_avx512vl +#define TCG_TARGET_HAS_cmpsel_vec 1 +#define TCG_TARGET_HAS_tst_vec have_avx512bw + +#define TCG_TARGET_deposit_valid(type, ofs, len) \ + (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \ + (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8)) + +/* + * Check for the possibility of low byte/word extraction, high-byte extraction + * and zero-extending 32-bit right-shift. + * + * We cannot sign-extend from high byte to 64-bits without using the + * REX prefix that explicitly excludes access to the high-byte registers. + */ +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + switch (ofs) { + case 0: + switch (len) { + case 8: + case 16: + return true; + case 32: + return type == TCG_TYPE_I64; + } + return false; + case 8: + return len == 8 && type == TCG_TYPE_I32; + } + return false; +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +static inline bool +tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I64 && ofs + len == 32) { + return true; + } + switch (ofs) { + case 0: + return len == 8 || len == 16; + case 8: + return len == 8; + } + return false; +} +#define TCG_TARGET_extract_valid tcg_target_extract_valid + +#endif diff --git a/tcg/i386/tcg-target-mo.h b/tcg/i386/tcg-target-mo.h new file mode 100644 index 00000000000..7567dc7248b --- /dev/null +++ b/tcg/i386/tcg-target-mo.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +/* + * This defines the natural memory order supported by this architecture + * before guarantees made by various barrier instructions. + * + * The x86 has a pretty strong memory ordering which only really + * allows for some stores to be re-ordered after loads. + */ +#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + +#endif diff --git a/tcg/i386/tcg-target-opc.h.inc b/tcg/i386/tcg-target-opc.h.inc new file mode 100644 index 00000000000..8cc0dbaeafe --- /dev/null +++ b/tcg/i386/tcg-target-opc.h.inc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 Linaro + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(x86_shufps_vec, 1, 2, 1, TCG_OPF_VECTOR) +DEF(x86_blend_vec, 1, 2, 1, TCG_OPF_VECTOR) +DEF(x86_packss_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(x86_packus_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(x86_psrldq_vec, 1, 1, 1, TCG_OPF_VECTOR) +DEF(x86_vperm2i128_vec, 1, 2, 1, TCG_OPF_VECTOR) +DEF(x86_punpckl_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(x86_punpckh_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(x86_vpshldi_vec, 1, 2, 1, TCG_OPF_VECTOR) +DEF(x86_vpshldv_vec, 1, 3, 0, TCG_OPF_VECTOR) +DEF(x86_vpshrdv_vec, 1, 3, 0, TCG_OPF_VECTOR) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 9a54ef7f8db..088c6c9264b 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -22,8 +22,25 @@ * THE SOFTWARE. */ -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" +/* Used for function call generation. */ +#define TCG_TARGET_STACK_ALIGN 16 +#if defined(_WIN64) +#define TCG_TARGET_CALL_STACK_OFFSET 32 +#else +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#endif +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#if defined(_WIN64) +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC +#elif TCG_TARGET_REG_BITS == 64 +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL +#else +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF +#endif #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { @@ -133,6 +150,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_I32 0x400 #define TCG_CT_CONST_WSZ 0x800 #define TCG_CT_CONST_TST 0x1000 +#define TCG_CT_CONST_ZERO 0x2000 /* Registers used with L constraint, which are the first argument registers on x86_64, and two random call clobbered registers on @@ -226,6 +244,9 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return 1; } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } return 0; } @@ -403,12 +424,25 @@ static bool tcg_target_const_match(int64_t val, int ct, #define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) #define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) #define OPC_SHRD_Ib (0xac | P_EXT) +#define OPC_STC (0xf9) #define OPC_TESTB (0x84) #define OPC_TESTL (0x85) #define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3) #define OPC_UD2 (0x0b | P_EXT) #define OPC_VPBLENDD (0x02 | P_EXT3A | P_DATA16) #define OPC_VPBLENDVB (0x4c | P_EXT3A | P_DATA16) +#define OPC_VPBLENDMB (0x66 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPBLENDMW (0x66 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPBLENDMD (0x64 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPBLENDMQ (0x64 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPCMPB (0x3f | P_EXT3A | P_DATA16 | P_EVEX) +#define OPC_VPCMPUB (0x3e | P_EXT3A | P_DATA16 | P_EVEX) +#define OPC_VPCMPW (0x3f | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPCMPUW (0x3e | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPCMPD (0x1f | P_EXT3A | P_DATA16 | P_EVEX) +#define OPC_VPCMPUD (0x1e | P_EXT3A | P_DATA16 | P_EVEX) +#define OPC_VPCMPQ (0x1f | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPCMPUQ (0x1e | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) #define OPC_VPINSRB (0x20 | P_EXT3A | P_DATA16) #define OPC_VPINSRW (0xc4 | P_EXT | P_DATA16) #define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16) @@ -417,6 +451,10 @@ static bool tcg_target_const_match(int64_t val, int ct, #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16) +#define OPC_VPMOVM2B (0x28 | P_EXT38 | P_SIMDF3 | P_EVEX) +#define OPC_VPMOVM2W (0x28 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX) +#define OPC_VPMOVM2D (0x38 | P_EXT38 | P_SIMDF3 | P_EVEX) +#define OPC_VPMOVM2Q (0x38 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX) #define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW) #define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL) #define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX) @@ -442,6 +480,14 @@ static bool tcg_target_const_match(int64_t val, int ct, #define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16) #define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW) #define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPTESTMB (0x26 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPTESTMW (0x26 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPTESTMD (0x27 | P_EXT38 | P_DATA16 | P_EVEX) +#define OPC_VPTESTMQ (0x27 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX) +#define OPC_VPTESTNMB (0x26 | P_EXT38 | P_SIMDF3 | P_EVEX) +#define OPC_VPTESTNMW (0x26 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX) +#define OPC_VPTESTNMD (0x27 | P_EXT38 | P_SIMDF3 | P_EVEX) +#define OPC_VPTESTNMQ (0x27 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX) #define OPC_VZEROUPPER (0x77 | P_EXT) #define OPC_XCHG_ax_r32 (0x90) #define OPC_XCHG_EvGv (0x87) @@ -658,7 +704,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v, } static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v, - int rm, int index) + int rm, int index, int aaa, bool z) { /* The entire 4-byte evex prefix; with R' and V' set. */ uint32_t p = 0x08041062; @@ -695,7 +741,9 @@ static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v, p = deposit32(p, 16, 2, pp); p = deposit32(p, 19, 4, ~v); p = deposit32(p, 23, 1, (opc & P_VEXW) != 0); + p = deposit32(p, 24, 3, aaa); p = deposit32(p, 29, 2, (opc & P_VEXL) != 0); + p = deposit32(p, 31, 1, z); tcg_out32(s, p); tcg_out8(s, opc); @@ -704,13 +752,32 @@ static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v, static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) { if (opc & P_EVEX) { - tcg_out_evex_opc(s, opc, r, v, rm, 0); + tcg_out_evex_opc(s, opc, r, v, rm, 0, 0, false); } else { tcg_out_vex_opc(s, opc, r, v, rm, 0); } tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } +static void tcg_out_vex_modrm_type(TCGContext *s, int opc, + int r, int v, int rm, TCGType type) +{ + if (type == TCG_TYPE_V256) { + opc |= P_VEXL; + } + tcg_out_vex_modrm(s, opc, r, v, rm); +} + +static void tcg_out_evex_modrm_type(TCGContext *s, int opc, int r, int v, + int rm, int aaa, bool z, TCGType type) +{ + if (type == TCG_TYPE_V256) { + opc |= P_VEXL; + } + tcg_out_evex_opc(s, opc, r, v, rm, 0, aaa, z); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + /* Output an opcode with a full "rm + (index<carry_live) { tgen_arithr(s, ARITH_XOR, ret, ret); return; } @@ -1102,7 +1168,7 @@ static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) } } -static inline void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, unsigned a0) { /* Given the strength of x86 memory ordering, we only need care for store-load ordering. Experimentally, "lock orl $0,0(%esp)" is @@ -1264,16 +1330,31 @@ static inline void tcg_out_rolw_8(TCGContext *s, int reg) static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src) { - /* movzbl */ - tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); + if (TCG_TARGET_REG_BITS == 32 && src >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, dest, src); + if (dest >= 4) { + tcg_out_modrm(s, OPC_ARITH_EvIz, ARITH_AND, dest); + tcg_out32(s, 0xff); + return; + } + src = dest; + } tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); } static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; - /* movsbl */ - tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64); + + if (TCG_TARGET_REG_BITS == 32 && src >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, dest, src); + if (dest >= 4) { + tcg_out_shifti(s, SHIFT_SHL, dest, 24); + tcg_out_shifti(s, SHIFT_SAR, dest, 24); + return; + } + src = dest; + } tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); } @@ -1465,6 +1546,11 @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small) } } +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_jxx(s, JCC_JMP, l, 0); +} + static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1, TCGArg arg2, int const_arg2, int rexw) { @@ -1562,96 +1648,78 @@ static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond, tcg_out_jxx(s, jcc, label, small); } -#if TCG_TARGET_REG_BITS == 32 -static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, - const int *const_args, bool small) +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *label) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_brcond(s, rexw, cond, arg1, arg2, false, label, false); +} + +static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, tcg_target_long arg2, TCGLabel *label) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_brcond(s, rexw, cond, arg1, arg2, true, label, false); +} + +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, reT), + .out_rr = tgen_brcond, + .out_ri = tgen_brcondi, +}; + +static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, + TCGReg ah, TCGArg bl, bool blconst, + TCGArg bh, bool bhconst, + TCGLabel *label_this, bool small) { TCGLabel *label_next = gen_new_label(); - TCGLabel *label_this = arg_label(args[5]); - TCGCond cond = args[4]; switch (cond) { case TCG_COND_EQ: case TCG_COND_TSTEQ: tcg_out_brcond(s, 0, tcg_invert_cond(cond), - args[0], args[2], const_args[2], label_next, 1); - tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3], - label_this, small); + al, bl, blconst, label_next, true); + tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small); break; + case TCG_COND_NE: case TCG_COND_TSTNE: - tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2], - label_this, small); - tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3], - label_this, small); - break; - case TCG_COND_LT: - tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_LE: - tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_GT: - tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_GE: - tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_LTU: - tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2], - label_this, small); + tcg_out_brcond(s, 0, cond, al, bl, blconst, label_this, small); + tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small); break; - case TCG_COND_LEU: - tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_GTU: - tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3], - label_this, small); - tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2], - label_this, small); - break; - case TCG_COND_GEU: - tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3], - label_this, small); + + default: + tcg_out_brcond(s, 0, tcg_high_cond(cond), + ah, bh, bhconst, label_this, small); tcg_out_jxx(s, JCC_JNE, label_next, 1); - tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2], - label_this, small); + tcg_out_brcond(s, 0, tcg_unsigned_cond(cond), + al, bl, blconst, label_this, small); break; - default: - g_assert_not_reached(); } tcg_out_label(s, label_next); } + +static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, + TCGReg ah, TCGArg bl, bool blconst, + TCGArg bh, bool bhconst, TCGLabel *l) +{ + tcg_out_brcond2(s, cond, al, ah, bl, blconst, bh, bhconst, l, false); +} + +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) #endif +static const TCGOutOpBrcond2 outop_brcond2 = { + .base.static_constraint = C_O0_I4(r, r, ri, ri), + .out = tgen_brcond2, +}; -static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, - TCGArg dest, TCGArg arg1, TCGArg arg2, - int const_arg2, bool neg) +static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGArg arg2, + bool const_arg2, bool neg) { + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; int cmp_rexw = rexw; bool inv = false; bool cleared; @@ -1726,7 +1794,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, case TCG_COND_LT: /* If arg2 is 0, extract the sign bit. */ if (const_arg2 && arg2 == 0) { - tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, dest, arg1); + tcg_out_mov(s, type, dest, arg1); if (inv) { tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest); } @@ -1762,49 +1830,89 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond, } } -#if TCG_TARGET_REG_BITS == 32 -static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, - const int *const_args) +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(q, r, reT), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) { - TCGArg new_args[6]; - TCGLabel *label_true, *label_over; + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(q, r, reT), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; - memcpy(new_args, args+1, 5*sizeof(TCGArg)); +static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh) +{ + TCGLabel *label_over = gen_new_label(); - if (args[0] == args[1] || args[0] == args[2] - || (!const_args[3] && args[0] == args[3]) - || (!const_args[4] && args[0] == args[4])) { - /* When the destination overlaps with one of the argument - registers, don't do anything tricky. */ - label_true = gen_new_label(); - label_over = gen_new_label(); + if (ret == al || ret == ah + || (!const_bl && ret == bl) + || (!const_bh && ret == bh)) { + /* + * When the destination overlaps with one of the argument + * registers, don't do anything tricky. + */ + TCGLabel *label_true = gen_new_label(); - new_args[5] = label_arg(label_true); - tcg_out_brcond2(s, new_args, const_args+1, 1); + tcg_out_brcond2(s, cond, al, ah, bl, const_bl, + bh, const_bh, label_true, true); - tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + tcg_out_movi(s, TCG_TYPE_I32, ret, 0); tcg_out_jxx(s, JCC_JMP, label_over, 1); tcg_out_label(s, label_true); - tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); - tcg_out_label(s, label_over); + tcg_out_movi(s, TCG_TYPE_I32, ret, 1); } else { - /* When the destination does not overlap one of the arguments, - clear the destination first, jump if cond false, and emit an - increment in the true case. This results in smaller code. */ - - tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + /* + * When the destination does not overlap one of the arguments, + * clear the destination first, jump if cond false, and emit an + * increment in the true case. This results in smaller code. + */ + tcg_out_movi(s, TCG_TYPE_I32, ret, 0); - label_over = gen_new_label(); - new_args[4] = tcg_invert_cond(new_args[4]); - new_args[5] = label_arg(label_over); - tcg_out_brcond2(s, new_args, const_args+1, 1); + tcg_out_brcond2(s, tcg_invert_cond(cond), al, ah, bl, const_bl, + bh, const_bh, label_over, true); - tgen_arithi(s, ARITH_ADD, args[0], 1, 0); - tcg_out_label(s, label_over); + tgen_arithi(s, ARITH_ADD, ret, 1, 0); } + tcg_out_label(s, label_over); } + +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) #endif +static const TCGOutOpSetcond2 outop_setcond2 = { + .base.static_constraint = C_O1_I4(r, r, r, ri, ri), + .out = tgen_setcond2, +}; static void tcg_out_cmov(TCGContext *s, int jcc, int rexw, TCGReg dest, TCGReg v1) @@ -1812,57 +1920,20 @@ static void tcg_out_cmov(TCGContext *s, int jcc, int rexw, tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1); } -static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond, - TCGReg dest, TCGReg c1, TCGArg c2, int const_c2, - TCGReg v1) +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg vt, bool const_vt, + TCGArg vf, bool consf_vf) { + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw); - tcg_out_cmov(s, jcc, rexw, dest, v1); -} - -static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, - TCGArg arg2, bool const_a2) -{ - if (have_bmi1) { - tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1); - if (const_a2) { - tcg_debug_assert(arg2 == (rexw ? 64 : 32)); - } else { - tcg_debug_assert(dest != arg2); - tcg_out_cmov(s, JCC_JB, rexw, dest, arg2); - } - } else { - tcg_debug_assert(dest != arg2); - tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1); - tcg_out_cmov(s, JCC_JE, rexw, dest, arg2); - } + tcg_out_cmov(s, jcc, rexw, dest, vt); } -static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1, - TCGArg arg2, bool const_a2) -{ - if (have_lzcnt) { - tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1); - if (const_a2) { - tcg_debug_assert(arg2 == (rexw ? 64 : 32)); - } else { - tcg_debug_assert(dest != arg2); - tcg_out_cmov(s, JCC_JB, rexw, dest, arg2); - } - } else { - tcg_debug_assert(!const_a2); - tcg_debug_assert(dest != arg1); - tcg_debug_assert(dest != arg2); - - /* Recall that the output of BSR is the index not the count. */ - tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1); - tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0); - - /* Since we have destroyed the flags from BSR, we have to re-test. */ - int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw); - tcg_out_cmov(s, jcc, rexw, dest, arg2); - } -} +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, reT, r, 0), + .out = tgen_movcond, +}; static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest) { @@ -2089,8 +2160,7 @@ static inline int setup_guest_base_seg(void) * is required and fill in @h with the host address for the fast path. */ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, bool is_ld) + TCGReg addr, MemOpIdx oi, bool is_ld) { TCGLabelQemuLdst *ldst = NULL; MemOp opc = get_memop(oi); @@ -2104,7 +2174,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } else { *h = x86_guest_base; } - h->base = addrlo; + h->base = addr; h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128); a_mask = (1 << h->aa.align) - 1; @@ -2122,24 +2192,21 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; if (TCG_TARGET_REG_BITS == 64) { ttype = s->addr_type; trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW); if (TCG_TYPE_PTR == TCG_TYPE_I64) { hrexw = P_REXW; - if (s->page_bits + s->tlb_dyn_max_bits > 32) { - tlbtype = TCG_TYPE_I64; - tlbrexw = P_REXW; - } + tlbtype = TCG_TYPE_I64; + tlbrexw = P_REXW; } } - tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo); + tcg_out_mov(s, tlbtype, TCG_REG_L0, addr); tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0, - s->page_bits - CPU_TLB_ENTRY_BITS); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0, fast_ofs + offsetof(CPUTLBDescFast, mask)); @@ -2153,12 +2220,12 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * check that we don't cross pages for the complete access. */ if (a_mask >= s_mask) { - tcg_out_mov(s, ttype, TCG_REG_L1, addrlo); + tcg_out_mov(s, ttype, TCG_REG_L1, addr); } else { tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1, - addrlo, s_mask - a_mask); + addr, s_mask - a_mask); } - tlb_mask = s->page_mask | a_mask; + tlb_mask = TARGET_PAGE_MASK | a_mask; tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0); /* cmp 0(TCG_REG_L0), TCG_REG_L1 */ @@ -2170,17 +2237,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->label_ptr[0] = s->code_ptr; s->code_ptr += 4; - if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) { - /* cmp 4(TCG_REG_L0), addrhi */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, - TCG_REG_L0, cmp_ofs + 4); - - /* jne slow_path */ - tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); - ldst->label_ptr[1] = s->code_ptr; - s->code_ptr += 4; - } - /* TLB Hit. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0, offsetof(CPUTLBEntry, addend)); @@ -2190,11 +2246,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* jne slow_path */ - jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false); + jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false); tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0); ldst->label_ptr[0] = s->code_ptr; s->code_ptr += 4; @@ -2365,24 +2420,50 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, true); + tcg_out_qemu_ld_direct(s, data, -1, h, type, get_memop(oi)); + + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = -1; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, L), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); - tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi)); + ldst = prepare_host_addr(s, &h, addr, oi, true); + tcg_out_qemu_ld_direct(s, datalo, datahi, h, type, get_memop(oi)); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_O2_I1(r, r, L), + .out = tgen_qemu_ld2, +}; + static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, HostAddress h, MemOp memop) { @@ -2401,7 +2482,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, switch (memop & MO_SIZE) { case MO_8: - /* This is handled with constraints on INDEX_op_qemu_st8_i32. */ + /* This is handled with constraints in cset_qemu_st(). */ tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4); tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg, datalo, h.base, h.index, 0, h.ofs); @@ -2493,24 +2574,58 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, false); + tcg_out_qemu_st_direct(s, data, -1, h, get_memop(oi)); + + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = -1; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static TCGConstraintSetIndex cset_qemu_st(TCGType type, unsigned flags) +{ + return flags == MO_8 ? C_O0_I2(s, L) : C_O0_I2(L, L); +} + +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = + TCG_TARGET_REG_BITS == 32 ? C_Dynamic : C_O0_I2(L, L), + .base.dynamic_constraint = + TCG_TARGET_REG_BITS == 32 ? cset_qemu_st : NULL, + .out = tgen_qemu_st, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + ldst = prepare_host_addr(s, &h, addr, oi, false); tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi)); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_O0_I3(L, L, L), + .out = tgen_qemu_st2, +}; + static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { /* Reuse the zeroing that exists for goto_ptr. */ @@ -2538,6 +2653,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + /* Jump to the given host address (could be epilogue) */ + tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -2547,478 +2668,1144 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, /* no need to flush icache explicitly */ } -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - TCGArg a0, a1, a2; - int c, const_a2, vexop, rexw = 0; + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; -#if TCG_TARGET_REG_BITS == 64 -# define OP_32_64(x) \ - case glue(glue(INDEX_op_, x), _i64): \ - rexw = P_REXW; /* FALLTHRU */ \ - case glue(glue(INDEX_op_, x), _i32) -#else -# define OP_32_64(x) \ - case glue(glue(INDEX_op_, x), _i32) -#endif + if (a0 == a1) { + tgen_arithr(s, ARITH_ADD + rexw, a0, a2); + } else if (a0 == a2) { + tgen_arithr(s, ARITH_ADD + rexw, a0, a1); + } else { + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, 0); + } +} - /* Hoist the loads of the most common arguments. */ - a0 = args[0]; - a1 = args[1]; - a2 = args[2]; - const_a2 = const_args[2]; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; - switch (opc) { - case INDEX_op_goto_ptr: - /* jmp to the given host address (could be epilogue) */ - tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0); - break; - case INDEX_op_br: - tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0); - break; - OP_32_64(ld8u): - /* Note that we can ignore REXW for the zero-extend to 64-bit. */ - tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2); - break; - OP_32_64(ld8s): - tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2); - break; - OP_32_64(ld16u): - /* Note that we can ignore REXW for the zero-extend to 64-bit. */ - tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2); - break; - OP_32_64(ld16s): - tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2); - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_ld32u_i64: -#endif - case INDEX_op_ld_i32: - tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2); - break; + if (a0 == a1) { + tgen_arithi(s, ARITH_ADD + rexw, a0, a2, false); + } else { + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, -1, 0, a2); + } +} - OP_32_64(st8): - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2); - tcg_out8(s, a0); - } else { - tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2); - } - break; - OP_32_64(st16): - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2); - tcg_out16(s, a0); - } else { - tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2); - } - break; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_st32_i64: -#endif - case INDEX_op_st_i32: - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2); - tcg_out32(s, a0); - } else { - tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, re), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - OP_32_64(add): - /* For 3-operand addition, use LEA. */ - if (a0 != a1) { - TCGArg c3 = 0; - if (const_a2) { - c3 = a2, a2 = -1; - } else if (a0 == a2) { - /* Watch out for dest = src + dest, since we've removed - the matching constraint on the add. */ - tgen_arithr(s, ARITH_ADD + rexw, a0, a1); - break; - } +static void tgen_addco(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_ADD + rexw, a0, a2); +} - tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); - break; - } - c = ARITH_ADD; - goto gen_arith; - OP_32_64(sub): - c = ARITH_SUB; - goto gen_arith; - OP_32_64(and): - c = ARITH_AND; - goto gen_arith; - OP_32_64(or): - c = ARITH_OR; - goto gen_arith; - OP_32_64(xor): - c = ARITH_XOR; - goto gen_arith; - gen_arith: - if (const_a2) { - tgen_arithi(s, c + rexw, a0, a2, 0); - } else { - tgen_arithr(s, c + rexw, a0, a2); - } - break; +static void tgen_addco_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_ADD + rexw, a0, a2, true); +} - OP_32_64(andc): - if (const_a2) { - tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); - tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0); - } else { - tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1); - } - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_addco, + .out_rri = tgen_addco_imm, +}; - OP_32_64(mul): - if (const_a2) { - int32_t val; - val = a2; - if (val == (int8_t)val) { - tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0); - tcg_out8(s, val); - } else { - tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0); - tcg_out32(s, val); - } - } else { - tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2); - } - break; +static void tgen_addcio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_ADC + rexw, a0, a2); +} - OP_32_64(div2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); - break; - OP_32_64(divu2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); - break; +static void tgen_addcio_imm(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_ADC + rexw, a0, a2, true); +} - OP_32_64(shl): - /* For small constant 3-operand shift, use LEA. */ - if (const_a2 && a0 != a1 && (a2 - 1) < 3) { - if (a2 - 1 == 0) { - /* shl $1,a1,a0 -> lea (a1,a1),a0 */ - tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0); - } else { - /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */ - tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0); - } - break; - } - c = SHIFT_SHL; - vexop = OPC_SHLX; - goto gen_shift_maybe_vex; - OP_32_64(shr): - c = SHIFT_SHR; - vexop = OPC_SHRX; - goto gen_shift_maybe_vex; - OP_32_64(sar): - c = SHIFT_SAR; - vexop = OPC_SARX; - goto gen_shift_maybe_vex; - OP_32_64(rotl): - c = SHIFT_ROL; - goto gen_shift; - OP_32_64(rotr): - c = SHIFT_ROR; - goto gen_shift; - gen_shift_maybe_vex: - if (have_bmi2) { - if (!const_a2) { - tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1); - break; - } - tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1); - } - /* FALLTHRU */ - gen_shift: - if (const_a2) { - tcg_out_shifti(s, c + rexw, a0, a2); - } else { - tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0); - } - break; +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_addcio, + .out_rri = tgen_addcio_imm, +}; - OP_32_64(ctz): - tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]); - break; - OP_32_64(clz): - tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]); - break; - OP_32_64(ctpop): - tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1); - break; +static void tgen_addci_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + /* Because "0O" is not a valid constraint, we must match ourselves. */ + if (a0 == a2) { + tgen_addcio(s, type, a0, a0, a1); + } else { + tcg_out_mov(s, type, a0, a1); + tgen_addcio(s, type, a0, a0, a2); + } +} - OP_32_64(brcond): - tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1], - arg_label(args[3]), 0); - break; - OP_32_64(setcond): - tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, false); - break; - OP_32_64(negsetcond): - tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, true); - break; - OP_32_64(movcond): - tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]); - break; +static void tgen_addci_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + tgen_addcio_imm(s, type, a0, a0, a2); +} - OP_32_64(bswap16): - if (a2 & TCG_BSWAP_OS) { - /* Output must be sign-extended. */ - if (rexw) { - tcg_out_bswap64(s, a0); - tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48); - } else { - tcg_out_bswap32(s, a0); - tcg_out_shifti(s, SHIFT_SAR, a0, 16); - } - } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - /* Output must be zero-extended, but input isn't. */ - tcg_out_bswap32(s, a0); - tcg_out_shifti(s, SHIFT_SHR, a0, 16); - } else { - tcg_out_rolw_8(s, a0); - } - break; - OP_32_64(bswap32): - tcg_out_bswap32(s, a0); - if (rexw && (a2 & TCG_BSWAP_OS)) { - tcg_out_ext32s(s, a0, a0); +static void tgen_addci_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tgen_addci_rri(s, type, a0, a2, a1); +} + +static void tgen_addci_rii(TCGContext *s, TCGType type, TCGReg a0, + tcg_target_long a1, tcg_target_long a2) +{ + if (a2 == 0) { + /* Implement 0 + 0 + C with -(x - x - c). */ + tgen_arithr(s, ARITH_SBB, a0, a0); + tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, a0); + } else { + tcg_out_movi(s, type, a0, a2); + tgen_addcio_imm(s, type, a0, a0, a1); + } +} + +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_O1_I2(r, rO, re), + .out_rrr = tgen_addci_rrr, + .out_rri = tgen_addci_rri, + .out_rir = tgen_addci_rir, + .out_rii = tgen_addci_rii, +}; + +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out8(s, OPC_STC); +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_AND + rexw, a0, a2); +} + +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_AND + rexw, a0, a2, false); +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, 0, reZ), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; + +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1); +} + +static TCGConstraintSetIndex cset_andc(TCGType type, unsigned flags) +{ + return have_bmi1 ? C_O1_I2(r, r, r) : C_NotImplemented; +} + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_andc, + .out_rrr = tgen_andc, +}; + +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + int jcc; + + if (have_lzcnt) { + tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1); + jcc = JCC_JB; + } else { + /* Recall that the output of BSR is the index not the count. */ + tcg_out_modrm(s, OPC_BSR + rexw, a0, a1); + tgen_arithi(s, ARITH_XOR + rexw, a0, rexw ? 63 : 31, 0); + + /* Since we have destroyed the flags from BSR, we have to re-test. */ + jcc = tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, rexw); + } + tcg_out_cmov(s, jcc, rexw, a0, a2); +} + +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1); +} + +static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags) +{ + return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); +} + +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_clz, + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; + +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1); +} + +static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) +{ + return have_popcnt ? C_O1_I1(r, r) : C_NotImplemented; +} + +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctpop, + .out_rr = tgen_ctpop, +}; + +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + int jcc; + + if (have_bmi1) { + tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1); + jcc = JCC_JB; + } else { + tcg_out_modrm(s, OPC_BSF + rexw, a0, a1); + jcc = JCC_JE; + } + tcg_out_cmov(s, jcc, rexw, a0, a2); +} + +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1); +} + +static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags) +{ + return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); +} + +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctz, + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divs2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a4) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, a4); +} + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_O2_I3(a, d, 0, 1, r), + .out_rr01r = tgen_divs2, +}; + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a4) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, a4); +} + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_O2_I3(a, d, 0, 1, r), + .out_rr01r = tgen_divu2, +}; + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_NotImplemented, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_extrh_i64_i32, +}; +#endif /* TCG_TARGET_REG_BITS == 64 */ + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2); +} + +static void tgen_muli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + if (a2 == (int8_t)a2) { + tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0); + tcg_out8(s, a2); + } else { + tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0); + tcg_out32(s, a2); + } +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_mul, + .out_rri = tgen_muli, +}; + +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, a3); +} + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_O2_I2(a, d, a, r), + .out_rrrr = tgen_muls2, +}; + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, a3); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_O2_I2(a, d, a, r), + .out_rrrr = tgen_mulu2, +}; + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_OR + rexw, a0, a2); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_OR + rexw, a0, a2, false); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_rotl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROL, a0); +} + +static void tgen_rotli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_shifti(s, SHIFT_ROL + rexw, a0, a2); +} + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_O1_I2(r, 0, ci), + .out_rrr = tgen_rotl, + .out_rri = tgen_rotli, +}; + +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROR, a0); +} + +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_shifti(s, SHIFT_ROR + rexw, a0, a2); +} + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_O1_I2(r, 0, ci), + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; + +static TCGConstraintSetIndex cset_shift(TCGType type, unsigned flags) +{ + return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci); +} + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + if (have_bmi2) { + tcg_out_vex_modrm(s, OPC_SARX + rexw, a0, a2, a1); + } else { + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SAR, a0); + } +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + tcg_out_mov(s, type, a0, a1); + tcg_out_shifti(s, SHIFT_SAR + rexw, a0, a2); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_shift, + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + if (have_bmi2) { + tcg_out_vex_modrm(s, OPC_SHLX + rexw, a0, a2, a1); + } else { + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHL, a0); + } +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + /* For small constant 3-operand shift, use LEA. */ + if (a0 != a1 && a2 >= 1 && a2 <= 3) { + if (a2 == 1) { + /* shl $1,a1,a0 -> lea (a1,a1),a0 */ + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0); + } else { + /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */ + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0); } - break; + return; + } + tcg_out_mov(s, type, a0, a1); + tcg_out_shifti(s, SHIFT_SHL + rexw, a0, a2); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_shift, + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + if (have_bmi2) { + tcg_out_vex_modrm(s, OPC_SHRX + rexw, a0, a2, a1); + } else { + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHR, a0); + } +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + tcg_out_mov(s, type, a0, a1); + tcg_out_shifti(s, SHIFT_SHR + rexw, a0, a2); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_shift, + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_SUB + rexw, a0, a2); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, 0, r), + .out_rrr = tgen_sub, +}; + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_SUB + rexw, a0, a2, 1); +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_sub, + .out_rri = tgen_subbo_rri, +}; + +static void tgen_subbio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_SBB + rexw, a0, a2); +} + +static void tgen_subbio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_SBB + rexw, a0, a2, 1); +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_subbio_rrr, + .out_rri = tgen_subbio_rri, +}; + +#define outop_subbi outop_subbio + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out8(s, OPC_STC); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithr(s, ARITH_XOR + rexw, a0, a2); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tgen_arithi(s, ARITH_XOR + rexw, a0, a2, false); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, 0, re), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + if (flags & TCG_BSWAP_OS) { + /* Output must be sign-extended. */ + if (rexw) { + tcg_out_bswap64(s, a0); + tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48); + } else { + tcg_out_bswap32(s, a0); + tcg_out_shifti(s, SHIFT_SAR, a0, 16); + } + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + /* Output must be zero-extended, but input isn't. */ + tcg_out_bswap32(s, a0); + tcg_out_shifti(s, SHIFT_SHR, a0, 16); + } else { + tcg_out_rolw_8(s, a0); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_bswap32(s, a0); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext32s(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_bswap32, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_bswap64(s, a0); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_bswap64, +}; +#endif + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, 0), + .out_rr = tgen_not, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + if (ofs == 0 && len == 8) { + tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0); + } else if (ofs == 0 && len == 16) { + tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0); + } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) { + tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4); + } else { + g_assert_not_reached(); + } +} + +static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + tcg_target_long a2, unsigned ofs, unsigned len) +{ + if (ofs == 0 && len == 8) { + tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0), 0, a0, 0); + tcg_out8(s, a2); + } else if (ofs == 0 && len == 16) { + tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0), 0, a0, 0); + tcg_out16(s, a2); + } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) { + tcg_out8(s, OPC_MOVB_Ib + a0 + 4); + tcg_out8(s, a2); + } else { + g_assert_not_reached(); + } +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(q, 0, qi), + .out_rrr = tgen_deposit, + .out_rri = tgen_depositi, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8u(s, a0, a1); + return; + case 16: + tcg_out_ext16u(s, a0, a1); + return; + case 32: + tcg_out_ext32u(s, a0, a1); + return; + } + } else if (TCG_TARGET_REG_BITS == 64 && ofs + len == 32) { + /* This is a 32-bit zero-extending right shift. */ + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + tcg_out_shifti(s, SHIFT_SHR, a0, ofs); + return; + } else if (ofs == 8 && len == 8) { + /* + * On the off-chance that we can use the high-byte registers. + * Otherwise we emit the same ext16 + shift pattern that we + * would have gotten from the normal tcg-op.c expansion. + */ + if (a1 < 4 && (TCG_TARGET_REG_BITS == 32 || a0 < 8)) { + tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4); + } else { + tcg_out_ext16u(s, a0, a1); + tcg_out_shifti(s, SHIFT_SHR, a0, 8); + } + return; + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, type, a0, a1); + return; + case 16: + tcg_out_ext16s(s, type, a0, a1); + return; + case 32: + tcg_out_ext32s(s, a0, a1); + return; + } + } else if (ofs == 8 && len == 8) { + if (type == TCG_TYPE_I32 && a1 < 4 && a0 < 8) { + tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4); + } else { + tcg_out_ext16s(s, type, a0, a1); + tgen_sari(s, type, a0, a0, 8); + } + return; + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + +static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0, + TCGReg a1, TCGReg a2, unsigned shr) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + + /* Note that SHRD outputs to the r/m operand. */ + tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0); + tcg_out8(s, shr); +} + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_O1_I2(r, 0, r), + .out_rrr = tgen_extract2, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVZBL, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVZWL, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW; + tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVL_GvEv, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVSLQ, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; +#endif + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, data, base, offset); +} + +static void tgen_st8_i(TCGContext *s, TCGType type, tcg_target_long data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, base, offset); + tcg_out8(s, data); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(qi, r), + .out_r = tgen_st8_r, + .out_i = tgen_st8_i, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, data, base, offset); +} + +static void tgen_st16_i(TCGContext *s, TCGType type, tcg_target_long data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, base, offset); + tcg_out16(s, data); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(ri, r), + .out_r = tgen_st16_r, + .out_i = tgen_st16_i, +}; + +static void tgen_st_i(TCGContext *s, TCGType type, tcg_target_long data, + TCGReg base, ptrdiff_t offset) +{ + bool ok = tcg_out_sti(s, type, data, base, offset); + tcg_debug_assert(ok); +} - OP_32_64(neg): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0); - break; - OP_32_64(not): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0); - break; +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(re, r), + .out_r = tcg_out_st, + .out_i = tgen_st_i, +}; - case INDEX_op_qemu_ld_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); - break; - } - /* fall through */ - case INDEX_op_qemu_ld_a32_i32: - tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_ld_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128); - break; +static int const umin_insn[4] = { + OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ +}; - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st8_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32); - break; - } - /* fall through */ - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st8_a32_i32: - tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_st_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128); - break; +static int const umax_insn[4] = { + OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ +}; - OP_32_64(mulu2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); - break; - OP_32_64(muls2): - tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); - break; - OP_32_64(add2): - if (const_args[4]) { - tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1); - } else { - tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]); - } - if (const_args[5]) { - tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1); - } else { - tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]); - } - break; - OP_32_64(sub2): - if (const_args[4]) { - tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1); - } else { - tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]); - } - if (const_args[5]) { - tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1); - } else { - tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]); - } - break; +static bool tcg_out_cmp_vec_noinv(TCGContext *s, TCGType type, unsigned vece, + TCGReg v0, TCGReg v1, TCGReg v2, TCGCond cond) +{ + static int const cmpeq_insn[4] = { + OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ + }; + static int const cmpgt_insn[4] = { + OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ + }; -#if TCG_TARGET_REG_BITS == 32 - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args, const_args, 0); + enum { + NEED_INV = 1, + NEED_SWAP = 2, + NEED_UMIN = 4, + NEED_UMAX = 8, + INVALID = 16, + }; + static const uint8_t cond_fixup[16] = { + [0 ... 15] = INVALID, + [TCG_COND_EQ] = 0, + [TCG_COND_GT] = 0, + [TCG_COND_NE] = NEED_INV, + [TCG_COND_LE] = NEED_INV, + [TCG_COND_LT] = NEED_SWAP, + [TCG_COND_GE] = NEED_SWAP | NEED_INV, + [TCG_COND_LEU] = NEED_UMIN, + [TCG_COND_GTU] = NEED_UMIN | NEED_INV, + [TCG_COND_GEU] = NEED_UMAX, + [TCG_COND_LTU] = NEED_UMAX | NEED_INV, + }; + int fixup = cond_fixup[cond]; + + assert(!(fixup & INVALID)); + + if (fixup & NEED_INV) { + cond = tcg_invert_cond(cond); + } + + if (fixup & NEED_SWAP) { + TCGReg swap = v1; + v1 = v2; + v2 = swap; + cond = tcg_swap_cond(cond); + } + + if (fixup & (NEED_UMIN | NEED_UMAX)) { + int op = (fixup & NEED_UMIN ? umin_insn[vece] : umax_insn[vece]); + + /* avx2 does not have 64-bit min/max; adjusted during expand. */ + assert(vece <= MO_32); + + tcg_out_vex_modrm_type(s, op, TCG_TMP_VEC, v1, v2, type); + v2 = TCG_TMP_VEC; + cond = TCG_COND_EQ; + } + + switch (cond) { + case TCG_COND_EQ: + tcg_out_vex_modrm_type(s, cmpeq_insn[vece], v0, v1, v2, type); break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args, const_args); + case TCG_COND_GT: + tcg_out_vex_modrm_type(s, cmpgt_insn[vece], v0, v1, v2, type); break; -#else /* TCG_TARGET_REG_BITS == 64 */ - case INDEX_op_ld32s_i64: - tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2); + default: + g_assert_not_reached(); + } + return fixup & NEED_INV; +} + +static void tcg_out_cmp_vec_k1(TCGContext *s, TCGType type, unsigned vece, + TCGReg v1, TCGReg v2, TCGCond cond) +{ + static const int cmpm_insn[2][4] = { + { OPC_VPCMPB, OPC_VPCMPW, OPC_VPCMPD, OPC_VPCMPQ }, + { OPC_VPCMPUB, OPC_VPCMPUW, OPC_VPCMPUD, OPC_VPCMPUQ } + }; + static const int testm_insn[4] = { + OPC_VPTESTMB, OPC_VPTESTMW, OPC_VPTESTMD, OPC_VPTESTMQ + }; + static const int testnm_insn[4] = { + OPC_VPTESTNMB, OPC_VPTESTNMW, OPC_VPTESTNMD, OPC_VPTESTNMQ + }; + + static const int cond_ext[16] = { + [TCG_COND_EQ] = 0, + [TCG_COND_NE] = 4, + [TCG_COND_LT] = 1, + [TCG_COND_LTU] = 1, + [TCG_COND_LE] = 2, + [TCG_COND_LEU] = 2, + [TCG_COND_NEVER] = 3, + [TCG_COND_GE] = 5, + [TCG_COND_GEU] = 5, + [TCG_COND_GT] = 6, + [TCG_COND_GTU] = 6, + [TCG_COND_ALWAYS] = 7, + }; + + switch (cond) { + case TCG_COND_TSTNE: + tcg_out_vex_modrm_type(s, testm_insn[vece], /* k1 */ 1, v1, v2, type); break; - case INDEX_op_ld_i64: - tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2); + case TCG_COND_TSTEQ: + tcg_out_vex_modrm_type(s, testnm_insn[vece], /* k1 */ 1, v1, v2, type); break; - case INDEX_op_st_i64: - if (const_args[0]) { - tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2); - tcg_out32(s, a0); - } else { - tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2); - } + default: + tcg_out_vex_modrm_type(s, cmpm_insn[is_unsigned_cond(cond)][vece], + /* k1 */ 1, v1, v2, type); + tcg_out8(s, cond_ext[cond]); break; + } +} - case INDEX_op_bswap64_i64: - tcg_out_bswap64(s, a0); - break; - case INDEX_op_extrh_i64_i32: - tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32); - break; -#endif +static void tcg_out_k1_to_vec(TCGContext *s, TCGType type, + unsigned vece, TCGReg dest) +{ + static const int movm_insn[] = { + OPC_VPMOVM2B, OPC_VPMOVM2W, OPC_VPMOVM2D, OPC_VPMOVM2Q + }; + tcg_out_vex_modrm_type(s, movm_insn[vece], dest, 0, /* k1 */ 1, type); +} - OP_32_64(deposit): - if (args[3] == 0 && args[4] == 8) { - /* load bits 0..7 */ - if (const_a2) { - tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0), - 0, a0, 0); - tcg_out8(s, a2); - } else { - tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0); - } - } else if (TCG_TARGET_REG_BITS == 32 && args[3] == 8 && args[4] == 8) { - /* load bits 8..15 */ - if (const_a2) { - tcg_out8(s, OPC_MOVB_Ib + a0 + 4); - tcg_out8(s, a2); - } else { - tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4); - } - } else if (args[3] == 0 && args[4] == 16) { - /* load bits 0..15 */ - if (const_a2) { - tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0), - 0, a0, 0); - tcg_out16(s, a2); - } else { - tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0); - } - } else { - g_assert_not_reached(); - } - break; +static void tcg_out_cmp_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg v0, TCGReg v1, TCGReg v2, TCGCond cond) +{ + /* + * With avx512, we have a complete set of comparisons into mask. + * Unless there's a single insn expansion for the comparision, + * expand via a mask in k1. + */ + if ((vece <= MO_16 ? have_avx512bw : have_avx512dq) + && cond != TCG_COND_EQ + && cond != TCG_COND_LT + && cond != TCG_COND_GT) { + tcg_out_cmp_vec_k1(s, type, vece, v1, v2, cond); + tcg_out_k1_to_vec(s, type, vece, v0); + return; + } - case INDEX_op_extract_i64: - if (a2 + args[3] == 32) { - /* This is a 32-bit zero-extending right shift. */ - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tcg_out_shifti(s, SHIFT_SHR, a0, a2); - break; - } - /* FALLTHRU */ - case INDEX_op_extract_i32: - /* On the off-chance that we can use the high-byte registers. - Otherwise we emit the same ext16 + shift pattern that we - would have gotten from the normal tcg-op.c expansion. */ - tcg_debug_assert(a2 == 8 && args[3] == 8); - if (a1 < 4 && a0 < 8) { - tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4); - } else { - tcg_out_ext16u(s, a0, a1); - tcg_out_shifti(s, SHIFT_SHR, a0, 8); - } - break; + if (tcg_out_cmp_vec_noinv(s, type, vece, v0, v1, v2, cond)) { + tcg_out_dupi_vec(s, type, vece, TCG_TMP_VEC, -1); + tcg_out_vex_modrm_type(s, OPC_PXOR, v0, v0, TCG_TMP_VEC, type); + } +} - case INDEX_op_sextract_i32: - /* We don't implement sextract_i64, as we cannot sign-extend to - 64-bits without using the REX prefix that explicitly excludes - access to the high-byte registers. */ - tcg_debug_assert(a2 == 8 && args[3] == 8); - if (a1 < 4 && a0 < 8) { - tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4); - } else { - tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1); - tcg_out_shifti(s, SHIFT_SAR, a0, 8); - } - break; +static void tcg_out_cmpsel_vec_k1(TCGContext *s, TCGType type, unsigned vece, + TCGReg v0, TCGReg c1, TCGReg c2, + TCGReg v3, TCGReg v4, TCGCond cond) +{ + static const int vpblendm_insn[] = { + OPC_VPBLENDMB, OPC_VPBLENDMW, OPC_VPBLENDMD, OPC_VPBLENDMQ + }; + bool z = false; - OP_32_64(extract2): - /* Note that SHRD outputs to the r/m operand. */ - tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0); - tcg_out8(s, args[3]); - break; + /* Swap to place constant in V4 to take advantage of zero-masking. */ + if (!v3) { + z = true; + v3 = v4; + cond = tcg_invert_cond(cond); + } - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); + tcg_out_cmp_vec_k1(s, type, vece, c1, c2, cond); + tcg_out_evex_modrm_type(s, vpblendm_insn[vece], v0, v4, v3, + /* k1 */1, z, type); +} + +static void tcg_out_cmpsel_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg v0, TCGReg c1, TCGReg c2, + TCGReg v3, TCGReg v4, TCGCond cond) +{ + bool inv; + + if (vece <= MO_16 ? have_avx512bw : have_avx512vl) { + tcg_out_cmpsel_vec_k1(s, type, vece, v0, c1, c2, v3, v4, cond); + return; } -#undef OP_32_64 + inv = tcg_out_cmp_vec_noinv(s, type, vece, TCG_TMP_VEC, c1, c2, cond); + + /* + * Since XMM0 is 16, the only way we get 0 into V3 + * is via the constant zero constraint. + */ + if (!v3) { + if (inv) { + tcg_out_vex_modrm_type(s, OPC_PAND, v0, TCG_TMP_VEC, v4, type); + } else { + tcg_out_vex_modrm_type(s, OPC_PANDN, v0, TCG_TMP_VEC, v4, type); + } + } else { + if (inv) { + TCGReg swap = v3; + v3 = v4; + v4 = swap; + } + tcg_out_vex_modrm_type(s, OPC_VPBLENDVB, v0, v4, v3, type); + tcg_out8(s, (TCG_TMP_VEC - TCG_REG_XMM0) << 4); + } } static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, @@ -3050,12 +3837,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, static int const shift_imm_insn[4] = { OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib }; - static int const cmpeq_insn[4] = { - OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ - }; - static int const cmpgt_insn[4] = { - OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ - }; static int const punpckl_insn[4] = { OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ }; @@ -3074,12 +3855,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, static int const smax_insn[4] = { OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ }; - static int const umin_insn[4] = { - OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ - }; - static int const umax_insn[4] = { - OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ - }; static int const rotlv_insn[4] = { OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ }; @@ -3231,29 +4006,21 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, goto gen_simd; gen_simd: tcg_debug_assert(insn != OPC_UD2); - if (type == TCG_TYPE_V256) { - insn |= P_VEXL; - } - tcg_out_vex_modrm(s, insn, a0, a1, a2); + tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type); break; case INDEX_op_cmp_vec: - sub = args[3]; - if (sub == TCG_COND_EQ) { - insn = cmpeq_insn[vece]; - } else if (sub == TCG_COND_GT) { - insn = cmpgt_insn[vece]; - } else { - g_assert_not_reached(); - } - goto gen_simd; + tcg_out_cmp_vec(s, type, vece, a0, a1, a2, args[3]); + break; + + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel_vec(s, type, vece, a0, a1, a2, + args[3], args[4], args[5]); + break; case INDEX_op_andc_vec: insn = OPC_PANDN; - if (type == TCG_TYPE_V256) { - insn |= P_VEXL; - } - tcg_out_vex_modrm(s, insn, a0, a2, a1); + tcg_out_vex_modrm_type(s, insn, a0, a2, a1, type); break; case INDEX_op_shli_vec: @@ -3281,10 +4048,7 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, goto gen_shift; gen_shift: tcg_debug_assert(vece != MO_8); - if (type == TCG_TYPE_V256) { - insn |= P_VEXL; - } - tcg_out_vex_modrm(s, insn, sub, a0, a1); + tcg_out_vex_modrm_type(s, insn, sub, a0, a1, type); tcg_out8(s, a2); break; @@ -3361,22 +4125,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, gen_simd_imm8: tcg_debug_assert(insn != OPC_UD2); - if (type == TCG_TYPE_V256) { - insn |= P_VEXL; - } - tcg_out_vex_modrm(s, insn, a0, a1, a2); + tcg_out_vex_modrm_type(s, insn, a0, a1, a2, type); tcg_out8(s, sub); break; - case INDEX_op_x86_vpblendvb_vec: - insn = OPC_VPBLENDVB; - if (type == TCG_TYPE_V256) { - insn |= P_VEXL; - } - tcg_out_vex_modrm(s, insn, a0, a1, a2); - tcg_out8(s, args[3] << 4); - break; - case INDEX_op_x86_psrldq_vec: tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1); tcg_out8(s, a2); @@ -3389,196 +4141,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - return C_O0_I2(qi, r); - - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - return C_O0_I2(ri, r); - - case INDEX_op_st_i64: - return C_O0_I2(re, r); - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - return C_O1_I2(r, r, re); - - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - return C_O1_I2(r, 0, re); - - case INDEX_op_and_i32: - case INDEX_op_and_i64: - return C_O1_I2(r, 0, reZ); - - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - return C_O1_I2(r, r, rI); - - case INDEX_op_shl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i32: - case INDEX_op_shr_i64: - case INDEX_op_sar_i32: - case INDEX_op_sar_i64: - return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci); - - case INDEX_op_rotl_i32: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i32: - case INDEX_op_rotr_i64: - return C_O1_I2(r, 0, ci); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(r, reT); - - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_extrh_i64_i32: - return C_O1_I1(r, 0); - - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - return C_O1_I1(r, q); - - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_sextract_i32: - case INDEX_op_ctpop_i32: - case INDEX_op_ctpop_i64: - return C_O1_I1(r, r); - - case INDEX_op_extract2_i32: - case INDEX_op_extract2_i64: - return C_O1_I2(r, 0, r); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - return C_O1_I2(q, 0, qi); - - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - return C_O1_I2(q, r, reT); - - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, r, reT, r, 0); - - case INDEX_op_div2_i32: - case INDEX_op_div2_i64: - case INDEX_op_divu2_i32: - case INDEX_op_divu2_i64: - return C_O2_I3(a, d, 0, 1, r); - - case INDEX_op_mulu2_i32: - case INDEX_op_mulu2_i64: - case INDEX_op_muls2_i32: - case INDEX_op_muls2_i64: - return C_O2_I2(a, d, a, r); - - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - return C_N1_O1_I4(r, r, 0, 1, re, re); - - case INDEX_op_ctz_i32: - case INDEX_op_ctz_i64: - return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); - - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r); - - case INDEX_op_qemu_ld_a32_i32: - return C_O1_I1(r, L); - case INDEX_op_qemu_ld_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L); - - case INDEX_op_qemu_st_a32_i32: - return C_O0_I2(L, L); - case INDEX_op_qemu_st_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); - case INDEX_op_qemu_st8_a32_i32: - return C_O0_I2(s, L); - case INDEX_op_qemu_st8_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L); - - case INDEX_op_qemu_ld_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L); - case INDEX_op_qemu_ld_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L); - - case INDEX_op_qemu_st_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L); - case INDEX_op_qemu_st_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L); - - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - return C_O2_I1(r, r, L); - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - return C_O0_I3(L, L, L); - - case INDEX_op_brcond2_i32: - return C_O0_I4(r, r, ri, ri); - - case INDEX_op_setcond2_i32: - return C_O1_I4(r, r, r, ri, ri); - case INDEX_op_ld_vec: case INDEX_op_dupm_vec: return C_O1_I1(x, r); @@ -3642,11 +4208,12 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I3(x, 0, x, x); case INDEX_op_bitsel_vec: - case INDEX_op_x86_vpblendvb_vec: return C_O1_I3(x, x, x, x); + case INDEX_op_cmpsel_vec: + return C_O1_I4(x, x, x, xO, x); default: - g_assert_not_reached(); + return C_NotImplemented; } } @@ -3979,145 +4546,59 @@ static void expand_vec_mul(TCGType type, unsigned vece, } } -static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec v1, TCGv_vec v2, TCGCond cond) +static TCGCond expand_vec_cond(TCGType type, unsigned vece, + TCGArg *a1, TCGArg *a2, TCGCond cond) { - enum { - NEED_INV = 1, - NEED_SWAP = 2, - NEED_BIAS = 4, - NEED_UMIN = 8, - NEED_UMAX = 16, - }; - TCGv_vec t1, t2, t3; - uint8_t fixup; - - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_GT: - fixup = 0; - break; - case TCG_COND_NE: - case TCG_COND_LE: - fixup = NEED_INV; - break; - case TCG_COND_LT: - fixup = NEED_SWAP; - break; - case TCG_COND_GE: - fixup = NEED_SWAP | NEED_INV; - break; - case TCG_COND_LEU: - if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) { - fixup = NEED_UMIN; - } else { - fixup = NEED_BIAS | NEED_INV; - } - break; - case TCG_COND_GTU: - if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) { - fixup = NEED_UMIN | NEED_INV; - } else { - fixup = NEED_BIAS; - } - break; - case TCG_COND_GEU: - if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) { - fixup = NEED_UMAX; - } else { - fixup = NEED_BIAS | NEED_SWAP | NEED_INV; - } - break; - case TCG_COND_LTU: - if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) { - fixup = NEED_UMAX | NEED_INV; - } else { - fixup = NEED_BIAS | NEED_SWAP; - } - break; - default: - g_assert_not_reached(); - } - - if (fixup & NEED_INV) { - cond = tcg_invert_cond(cond); - } - if (fixup & NEED_SWAP) { - t1 = v1, v1 = v2, v2 = t1; - cond = tcg_swap_cond(cond); - } + /* + * Without AVX512, there are no 64-bit unsigned comparisons. + * We must bias the inputs so that they become signed. + * All other swapping and inversion are handled during code generation. + */ + if (vece == MO_64 && !have_avx512dq && is_unsigned_cond(cond)) { + TCGv_vec v1 = temp_tcgv_vec(arg_temp(*a1)); + TCGv_vec v2 = temp_tcgv_vec(arg_temp(*a2)); + TCGv_vec t1 = tcg_temp_new_vec(type); + TCGv_vec t2 = tcg_temp_new_vec(type); + TCGv_vec t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1)); - t1 = t2 = NULL; - if (fixup & (NEED_UMIN | NEED_UMAX)) { - t1 = tcg_temp_new_vec(type); - if (fixup & NEED_UMIN) { - tcg_gen_umin_vec(vece, t1, v1, v2); - } else { - tcg_gen_umax_vec(vece, t1, v1, v2); - } - v2 = t1; - cond = TCG_COND_EQ; - } else if (fixup & NEED_BIAS) { - t1 = tcg_temp_new_vec(type); - t2 = tcg_temp_new_vec(type); - t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1)); tcg_gen_sub_vec(vece, t1, v1, t3); tcg_gen_sub_vec(vece, t2, v2, t3); - v1 = t1; - v2 = t2; + *a1 = tcgv_vec_arg(t1); + *a2 = tcgv_vec_arg(t2); cond = tcg_signed_cond(cond); } - - tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT); - /* Expand directly; do not recurse. */ - vec_gen_4(INDEX_op_cmp_vec, type, vece, - tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); - - if (t1) { - tcg_temp_free_vec(t1); - if (t2) { - tcg_temp_free_vec(t2); - } - } - return fixup & NEED_INV; + return cond; } -static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec v1, TCGv_vec v2, TCGCond cond) +static void expand_vec_cmp(TCGType type, unsigned vece, TCGArg a0, + TCGArg a1, TCGArg a2, TCGCond cond) { - if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) { - tcg_gen_not_vec(vece, v0, v0); - } + cond = expand_vec_cond(type, vece, &a1, &a2, cond); + /* Expand directly; do not recurse. */ + vec_gen_4(INDEX_op_cmp_vec, type, vece, a0, a1, a2, cond); } -static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec c1, TCGv_vec c2, - TCGv_vec v3, TCGv_vec v4, TCGCond cond) +static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGArg a0, + TCGArg a1, TCGArg a2, + TCGArg a3, TCGArg a4, TCGCond cond) { - TCGv_vec t = tcg_temp_new_vec(type); - - if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) { - /* Invert the sense of the compare by swapping arguments. */ - TCGv_vec x; - x = v3, v3 = v4, v4 = x; - } - vec_gen_4(INDEX_op_x86_vpblendvb_vec, type, vece, - tcgv_vec_arg(v0), tcgv_vec_arg(v4), - tcgv_vec_arg(v3), tcgv_vec_arg(t)); - tcg_temp_free_vec(t); + cond = expand_vec_cond(type, vece, &a1, &a2, cond); + /* Expand directly; do not recurse. */ + vec_gen_6(INDEX_op_cmpsel_vec, type, vece, a0, a1, a2, a3, a4, cond); } void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; - TCGArg a2; - TCGv_vec v0, v1, v2, v3, v4; + TCGArg a1, a2, a3, a4, a5; + TCGv_vec v0, v1, v2; va_start(va, a0); - v0 = temp_tcgv_vec(arg_temp(a0)); - v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + a1 = va_arg(va, TCGArg); a2 = va_arg(va, TCGArg); + v0 = temp_tcgv_vec(arg_temp(a0)); + v1 = temp_tcgv_vec(arg_temp(a1)); switch (opc) { case INDEX_op_shli_vec: @@ -4153,15 +4634,15 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, break; case INDEX_op_cmp_vec: - v2 = temp_tcgv_vec(arg_temp(a2)); - expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); + a3 = va_arg(va, TCGArg); + expand_vec_cmp(type, vece, a0, a1, a2, a3); break; case INDEX_op_cmpsel_vec: - v2 = temp_tcgv_vec(arg_temp(a2)); - v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); + a3 = va_arg(va, TCGArg); + a4 = va_arg(va, TCGArg); + a5 = va_arg(va, TCGArg); + expand_vec_cmpsel(type, vece, a0, a1, a2, a3, a4, a5); break; default: diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 2f67a97e059..3cbdfbca52e 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -25,8 +25,6 @@ #ifndef I386_TCG_TARGET_H #define I386_TCG_TARGET_H -#include "host/cpuinfo.h" - #define TCG_TARGET_INSN_UNIT_SIZE 1 #ifdef __x86_64__ @@ -90,164 +88,4 @@ typedef enum { TCG_REG_CALL_STACK = TCG_REG_ESP } TCGReg; -/* used for function call generation */ -#define TCG_TARGET_STACK_ALIGN 16 -#if defined(_WIN64) -#define TCG_TARGET_CALL_STACK_OFFSET 32 -#else -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#endif -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#if defined(_WIN64) -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC -#elif TCG_TARGET_REG_BITS == 64 -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL -#else -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#endif - -#define have_bmi1 (cpuinfo & CPUINFO_BMI1) -#define have_popcnt (cpuinfo & CPUINFO_POPCNT) -#define have_avx1 (cpuinfo & CPUINFO_AVX1) -#define have_avx2 (cpuinfo & CPUINFO_AVX2) -#define have_movbe (cpuinfo & CPUINFO_MOVBE) - -/* - * There are interesting instructions in AVX512, so long as we have AVX512VL, - * which indicates support for EVEX on sizes smaller than 512 bits. - */ -#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \ - (cpuinfo & CPUINFO_AVX512F)) -#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl) -#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl) -#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl) - -/* optional instructions */ -#define TCG_TARGET_HAS_div2_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_andc_i32 have_bmi1 -#define TCG_TARGET_HAS_orc_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 1 -#define TCG_TARGET_HAS_ctpop_i32 have_popcnt -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 1 -#define TCG_TARGET_HAS_extract2_i32 1 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 -#define TCG_TARGET_HAS_muls2_i32 1 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 - -#if TCG_TARGET_REG_BITS == 64 -/* Keep 32-bit values zero-extended in a register. */ -#define TCG_TARGET_HAS_extr_i64_i32 1 -#define TCG_TARGET_HAS_div2_i64 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_andc_i64 have_bmi1 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 1 -#define TCG_TARGET_HAS_ctpop_i64 have_popcnt -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 1 -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 1 -#define TCG_TARGET_HAS_muls2_i64 1 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 -#else -#define TCG_TARGET_HAS_qemu_st8_i32 1 -#endif - -#define TCG_TARGET_HAS_qemu_ldst_i128 \ - (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)) - -#define TCG_TARGET_HAS_tst 1 - -/* We do not support older SSE systems, only beginning with AVX1. */ -#define TCG_TARGET_HAS_v64 have_avx1 -#define TCG_TARGET_HAS_v128 have_avx1 -#define TCG_TARGET_HAS_v256 have_avx2 - -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec have_avx512vl -#define TCG_TARGET_HAS_nand_vec have_avx512vl -#define TCG_TARGET_HAS_nor_vec have_avx512vl -#define TCG_TARGET_HAS_eqv_vec have_avx512vl -#define TCG_TARGET_HAS_not_vec have_avx512vl -#define TCG_TARGET_HAS_neg_vec 0 -#define TCG_TARGET_HAS_abs_vec 1 -#define TCG_TARGET_HAS_roti_vec have_avx512vl -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec have_avx512vl -#define TCG_TARGET_HAS_shi_vec 1 -#define TCG_TARGET_HAS_shs_vec 1 -#define TCG_TARGET_HAS_shv_vec have_avx2 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec have_avx512vl -#define TCG_TARGET_HAS_cmpsel_vec -1 -#define TCG_TARGET_HAS_tst_vec 0 - -#define TCG_TARGET_deposit_i32_valid(ofs, len) \ - (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \ - (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8)) -#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid - -/* Check for the possibility of high-byte extraction and, for 64-bit, - zero-extending 32-bit right-shift. */ -#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8) -#define TCG_TARGET_extract_i64_valid(ofs, len) \ - (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32) - -/* This defines the natural memory order supported by this - * architecture before guarantees made by various barrier - * instructions. - * - * The x86 has a pretty strong memory ordering which only really - * allows for some stores to be re-ordered after loads. - */ -#include "tcg/tcg-mo.h" - -#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS - #endif diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target.opc.h deleted file mode 100644 index b5f403e35e1..00000000000 --- a/tcg/i386/tcg-target.opc.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2019 Linaro - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - * Target-specific opcodes for host vector expansion. These will be - * emitted by tcg_expand_vec_op. For those familiar with GCC internals, - * consider these to be UNSPEC with names. - */ - -DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC) -DEF(x86_vpblendvb_vec, 1, 3, 0, IMPLVEC) -DEF(x86_blend_vec, 1, 2, 1, IMPLVEC) -DEF(x86_packss_vec, 1, 2, 0, IMPLVEC) -DEF(x86_packus_vec, 1, 2, 0, IMPLVEC) -DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC) -DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC) -DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC) -DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC) -DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC) -DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC) -DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC) diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h index cae6c2aad64..fd731c0c0fc 100644 --- a/tcg/loongarch64/tcg-target-con-set.h +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -15,27 +15,23 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(rZ, r) -C_O0_I2(rZ, rZ) +C_O0_I2(rz, r) +C_O0_I2(r, rz) C_O0_I2(w, r) C_O0_I3(r, r, r) C_O1_I1(r, r) C_O1_I1(w, r) C_O1_I1(w, w) -C_O1_I2(r, r, rC) +C_O1_I2(r, r, r) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) C_O1_I2(r, r, rJ) C_O1_I2(r, r, rU) C_O1_I2(r, r, rW) -C_O1_I2(r, r, rZ) -C_O1_I2(r, 0, rZ) -C_O1_I2(r, rZ, ri) -C_O1_I2(r, rZ, rJ) -C_O1_I2(r, rZ, rZ) +C_O1_I2(r, 0, rz) C_O1_I2(w, w, w) C_O1_I2(w, w, wM) C_O1_I2(w, w, wA) C_O1_I3(w, w, w, w) -C_O1_I4(r, rZ, rJ, rZ, rZ) +C_O1_I4(r, r, rJ, rz, rz) C_N2_I1(r, r, r) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h index 2ba9c135ac1..e5e57452d6d 100644 --- a/tcg/loongarch64/tcg-target-con-str.h +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -23,8 +23,6 @@ REGS('w', ALL_VECTOR_REGS) CONST('I', TCG_CT_CONST_S12) CONST('J', TCG_CT_CONST_S32) CONST('U', TCG_CT_CONST_U12) -CONST('Z', TCG_CT_CONST_ZERO) -CONST('C', TCG_CT_CONST_C12) CONST('W', TCG_CT_CONST_WSZ) CONST('M', TCG_CT_CONST_VCMP) CONST('A', TCG_CT_CONST_VADD) diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h new file mode 100644 index 00000000000..32abc6f4574 --- /dev/null +++ b/tcg/loongarch64/tcg-target-has.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2021 WANG Xuerui + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#include "host/cpuinfo.h" + +/* 64-bit operations */ +#define TCG_TARGET_HAS_extr_i64_i32 1 + +#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX) + +#define TCG_TARGET_HAS_tst 0 + +#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX) +#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX) +#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX) + +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec 1 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 1 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_tst_vec 0 + +#define TCG_TARGET_extract_valid(type, ofs, len) 1 +#define TCG_TARGET_deposit_valid(type, ofs, len) 1 + +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I64 && ofs + len == 32) { + return true; + } + return ofs == 0 && (len == 8 || len == 16); +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +#endif diff --git a/tcg/loongarch64/tcg-target-mo.h b/tcg/loongarch64/tcg-target-mo.h new file mode 100644 index 00000000000..d35506957ff --- /dev/null +++ b/tcg/loongarch64/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2021 WANG Xuerui + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target-opc.h.inc similarity index 100% rename from tcg/loongarch64/tcg-target.opc.h rename to tcg/loongarch64/tcg-target-opc.h.inc diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 5b7ed5c176b..10c69211ac5 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -29,9 +29,17 @@ * THE SOFTWARE. */ -#include "../tcg-ldst.c.inc" #include +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL + #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "zero", @@ -165,14 +173,12 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_GUEST_BASE_REG TCG_REG_S1 -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_S12 0x200 -#define TCG_CT_CONST_S32 0x400 -#define TCG_CT_CONST_U12 0x800 -#define TCG_CT_CONST_C12 0x1000 -#define TCG_CT_CONST_WSZ 0x2000 -#define TCG_CT_CONST_VCMP 0x4000 -#define TCG_CT_CONST_VADD 0x8000 +#define TCG_CT_CONST_S12 0x100 +#define TCG_CT_CONST_S32 0x200 +#define TCG_CT_CONST_U12 0x400 +#define TCG_CT_CONST_WSZ 0x800 +#define TCG_CT_CONST_VCMP 0x1000 +#define TCG_CT_CONST_VADD 0x2000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -189,9 +195,6 @@ static bool tcg_target_const_match(int64_t val, int ct, if (ct & TCG_CT_CONST) { return true; } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return true; - } if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { return true; } @@ -201,18 +204,27 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { return true; } - if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { - return true; - } if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { return true; } - int64_t vec_val = sextract64(val, 0, 8 << vece); - if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { - return true; - } - if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { - return true; + if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) { + int64_t vec_val = sextract64(val, 0, 8 << vece); + if (ct & TCG_CT_CONST_VCMP) { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_LE: + case TCG_COND_LT: + return -0x10 <= vec_val && vec_val <= 0x0f; + case TCG_COND_LEU: + case TCG_COND_LTU: + return 0x00 <= vec_val && vec_val <= 0x1f; + default: + return false; + } + } + if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { + return true; + } } return false; } @@ -289,7 +301,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, * TCG intrinsics */ -static void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, unsigned a0) { /* Baseline LoongArch only has the full barrier, unfortunately. */ tcg_out_opc_dbar(s, 0); @@ -534,28 +546,6 @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg) tcg_out_ext32s(s, ret, arg); } -static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, - TCGReg a0, TCGReg a1, TCGReg a2, - bool c2, bool is_32bit) -{ - if (c2) { - /* - * Fast path: semantics already satisfied due to constraint and - * insn behavior, single instruction is enough. - */ - tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); - /* all clz/ctz insns belong to DJ-format */ - tcg_out32(s, encode_dj_insn(opc, a0, a1)); - return; - } - - tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); - /* a0 = a1 ? REG_TMP0 : a2 */ - tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); - tcg_out_opc_masknez(s, a0, a2, a1); - tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); -} - #define SETCOND_INV TCG_TARGET_NB_REGS #define SETCOND_NEZ (SETCOND_INV << 1) #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) @@ -650,21 +640,35 @@ static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret, default: g_assert_not_reached(); - break; } return ret | flags; } static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg arg1, tcg_target_long arg2, bool c2) + TCGReg arg1, tcg_target_long arg2, + bool c2, bool neg) { int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2); + TCGReg tmp = tmpflags & ~SETCOND_FLAGS; - if (tmpflags != ret) { - TCGReg tmp = tmpflags & ~SETCOND_FLAGS; - + if (neg) { + /* If intermediate result is zero/non-zero: test != 0. */ + if (tmpflags & SETCOND_NEZ) { + tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); + tmp = ret; + } + /* Produce the 0/-1 result. */ + if (tmpflags & SETCOND_INV) { + tcg_out_opc_addi_d(s, ret, tmp, -1); + } else { + tcg_out_opc_sub_d(s, ret, TCG_REG_ZERO, tmp); + } + } else { switch (tmpflags & SETCOND_FLAGS) { + case 0: + tcg_debug_assert(tmp == ret); + break; case SETCOND_INV: /* Intermediate result is boolean: simply invert. */ tcg_out_opc_xori(s, ret, tmp, 1); @@ -683,11 +687,47 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, } } -static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg c1, tcg_target_long c2, bool const2, - TCGReg v1, TCGReg v2) +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) { - int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2); + tcg_out_setcond(s, cond, dest, arg1, arg2, false, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, cond, dest, arg1, arg2, true, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, cond, dest, arg1, arg2, false, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, cond, dest, arg1, arg2, true, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg v1, bool const_v1, TCGArg v2, bool const_v2) +{ + int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const_c2); TCGReg t; /* Standardize the test below to t != 0. */ @@ -707,10 +747,21 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, } } +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rJ, rz, rz), + .out = tgen_movcond, +}; + /* * Branch helpers */ +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, l, 0); + tcg_out_opc_b(s, 0); +} + static const struct { LoongArchInsn op; bool swap; @@ -727,8 +778,8 @@ static const struct { [TCG_COND_GTU] = { OPC_BGTU, false } }; -static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, - TCGReg arg2, TCGLabel *l) +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *l) { LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; @@ -745,6 +796,11 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); } +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rz), + .out_rr = tgen_brcond, +}; + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; @@ -1003,13 +1059,13 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg, - s->page_bits - CPU_TLB_ENTRY_BITS); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); @@ -1035,7 +1091,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg); } tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO, - a_bits, s->page_bits - 1); + a_bits, TARGET_PAGE_BITS - 1); /* Compare masked address with the TLB entry. */ ldst->label_ptr[0] = s->code_ptr; @@ -1048,7 +1104,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; /* * Without micro-architecture details, we don't know which of @@ -1111,22 +1167,27 @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; ldst = prepare_host_addr(s, &h, addr_reg, oi, true); - tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h); + tcg_out_qemu_ld_indexed(s, get_memop(oi), type, data_reg, h); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, TCGReg rd, HostAddress h) { @@ -1151,8 +1212,8 @@ static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; @@ -1161,12 +1222,17 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out = tgen_qemu_st, +}; + static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi, TCGReg addr_reg, MemOpIdx oi, bool is_ld) { @@ -1214,6 +1280,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi } } +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_N2_I1(r, r, r), + .out = tgen_qemu_ld2, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_O0_I3(r, r, r), + .out = tgen_qemu_st2, +}; + /* * Entry-points */ @@ -1251,6 +1339,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -1271,423 +1364,684 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, flush_idcache_range(jmp_rx, jmp_rw, 4); } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) -{ - TCGArg a0 = args[0]; - TCGArg a1 = args[1]; - TCGArg a2 = args[2]; - TCGArg a3 = args[3]; - int c2 = const_args[2]; - switch (opc) { - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_add_w(s, a0, a1, a2); + } else { + tcg_out_opc_add_d(s, a0, a1, a2); + } +} - case INDEX_op_goto_ptr: - tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_add, + .out_rri = tcg_out_addi, +}; - case INDEX_op_br: - tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), - 0); - tcg_out_opc_b(s, 0); - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); - break; +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_extrh_i64_i32: - tcg_out_opc_srai_d(s, a0, a1, 32); - break; +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_not_i32: - case INDEX_op_not_i64: - tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); - break; +static void tcg_out_set_carry(TCGContext *s) +{ + g_assert_not_reached(); +} - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - if (c2) { - tcg_out_opc_ori(s, a0, a1, a2); - tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); - } else { - tcg_out_opc_nor(s, a0, a1, a2); - } - break; +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_and(s, a0, a1, a2); +} - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - if (c2) { - /* guaranteed to fit due to constraint */ - tcg_out_opc_andi(s, a0, a1, ~a2); - } else { - tcg_out_opc_andn(s, a0, a1, a2); - } - break; +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_andi(s, a0, a1, a2); +} - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - if (c2) { - /* guaranteed to fit due to constraint */ - tcg_out_opc_ori(s, a0, a1, ~a2); - } else { - tcg_out_opc_orn(s, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rU), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; - case INDEX_op_and_i32: - case INDEX_op_and_i64: - if (c2) { - tcg_out_opc_andi(s, a0, a1, a2); - } else { - tcg_out_opc_and(s, a0, a1, a2); - } - break; +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_andn(s, a0, a1, a2); +} - case INDEX_op_or_i32: - case INDEX_op_or_i64: - if (c2) { - tcg_out_opc_ori(s, a0, a1, a2); - } else { - tcg_out_opc_or(s, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - if (c2) { - tcg_out_opc_xori(s, a0, a1, a2); - } else { - tcg_out_opc_xor(s, a0, a1, a2); - } - break; +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* a2 is constrained to exactly the type width. */ + if (type == TCG_TYPE_I32) { + tcg_out_opc_clz_w(s, a0, a1); + } else { + tcg_out_opc_clz_d(s, a0, a1); + } +} - case INDEX_op_extract_i32: - tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); - break; - case INDEX_op_extract_i64: - tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); - break; +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_clzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0); + /* a0 = a1 ? REG_TMP0 : a2 */ + tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); + tcg_out_opc_masknez(s, a0, a2, a1); + tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0); +} - case INDEX_op_deposit_i32: - tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); - break; - case INDEX_op_deposit_i64: - tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_O1_I2(r, r, rW), + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - tcg_out_opc_revb_2h(s, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); - } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_ext16u(s, a0, a0); - } - break; +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_bswap32_i32: - /* All 32-bit values are computed sign-extended in the register. */ - a2 = TCG_BSWAP_OS; - /* fallthrough */ - case INDEX_op_bswap32_i64: - tcg_out_opc_revb_2w(s, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_ext32s(s, a0, a0); - } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_ext32u(s, a0, a0); - } - break; +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* a2 is constrained to exactly the type width. */ + if (type == TCG_TYPE_I32) { + tcg_out_opc_ctz_w(s, a0, a1); + } else { + tcg_out_opc_ctz_d(s, a0, a1); + } +} - case INDEX_op_bswap64_i64: - tcg_out_opc_revb_d(s, a0, a1); - break; +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_ctzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0); + /* a0 = a1 ? REG_TMP0 : a2 */ + tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); + tcg_out_opc_masknez(s, a0, a2, a1); + tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0); +} - case INDEX_op_clz_i32: - tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); - break; - case INDEX_op_clz_i64: - tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); - break; +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_O1_I2(r, r, rW), + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; - case INDEX_op_ctz_i32: - tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); - break; - case INDEX_op_ctz_i64: - tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); - break; +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_div_w(s, a0, a1, a2); + } else { + tcg_out_opc_div_d(s, a0, a1, a2); + } +} - case INDEX_op_shl_i32: - if (c2) { - tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_sll_w(s, a0, a1, a2); - } - break; - case INDEX_op_shl_i64: - if (c2) { - tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_sll_d(s, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; - case INDEX_op_shr_i32: - if (c2) { - tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_srl_w(s, a0, a1, a2); - } - break; - case INDEX_op_shr_i64: - if (c2) { - tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_srl_d(s, a0, a1, a2); - } - break; +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_sar_i32: - if (c2) { - tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_sra_w(s, a0, a1, a2); - } - break; - case INDEX_op_sar_i64: - if (c2) { - tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_sra_d(s, a0, a1, a2); - } - break; +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_div_wu(s, a0, a1, a2); + } else { + tcg_out_opc_div_du(s, a0, a1, a2); + } +} - case INDEX_op_rotl_i32: - /* transform into equivalent rotr/rotri */ - if (c2) { - tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); - } else { - tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); - tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); - } - break; - case INDEX_op_rotl_i64: - /* transform into equivalent rotr/rotri */ - if (c2) { - tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); - } else { - tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); - tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); - } - break; +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; - case INDEX_op_rotr_i32: - if (c2) { - tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_rotr_w(s, a0, a1, a2); - } - break; - case INDEX_op_rotr_i64: - if (c2) { - tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_rotr_d(s, a0, a1, a2); - } - break; +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_add_i32: - if (c2) { - tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2); - } else { - tcg_out_opc_add_w(s, a0, a1, a2); - } - break; - case INDEX_op_add_i64: - if (c2) { - tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2); - } else { - tcg_out_opc_add_d(s, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_sub_i32: - if (c2) { - tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2); - } else { - tcg_out_opc_sub_w(s, a0, a1, a2); - } - break; - case INDEX_op_sub_i64: - if (c2) { - tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2); - } else { - tcg_out_opc_sub_d(s, a0, a1, a2); - } - break; +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_opc_srai_d(s, a0, a1, 32); +} - case INDEX_op_neg_i32: - tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1); - break; - case INDEX_op_neg_i64: - tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1); - break; +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; - case INDEX_op_mul_i32: +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { tcg_out_opc_mul_w(s, a0, a1, a2); - break; - case INDEX_op_mul_i64: + } else { tcg_out_opc_mul_d(s, a0, a1, a2); - break; + } +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_mulsh_i32: +static void tgen_mulsh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { tcg_out_opc_mulh_w(s, a0, a1, a2); - break; - case INDEX_op_mulsh_i64: + } else { tcg_out_opc_mulh_d(s, a0, a1, a2); - break; + } +} - case INDEX_op_muluh_i32: +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mulsh, +}; + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { tcg_out_opc_mulh_wu(s, a0, a1, a2); - break; - case INDEX_op_muluh_i64: + } else { tcg_out_opc_mulh_du(s, a0, a1, a2); - break; + } +} - case INDEX_op_div_i32: - tcg_out_opc_div_w(s, a0, a1, a2); - break; - case INDEX_op_div_i64: - tcg_out_opc_div_d(s, a0, a1, a2); - break; +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_muluh, +}; - case INDEX_op_divu_i32: - tcg_out_opc_div_wu(s, a0, a1, a2); - break; - case INDEX_op_divu_i64: - tcg_out_opc_div_du(s, a0, a1, a2); - break; +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_nor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_nor(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nor, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_or(s, a0, a1, a2); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_ori(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rU), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; - case INDEX_op_rem_i32: +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_orn(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_orc, +}; + +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { tcg_out_opc_mod_w(s, a0, a1, a2); - break; - case INDEX_op_rem_i64: + } else { tcg_out_opc_mod_d(s, a0, a1, a2); - break; + } +} + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rems, +}; - case INDEX_op_remu_i32: +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { tcg_out_opc_mod_wu(s, a0, a1, a2); - break; - case INDEX_op_remu_i64: + } else { tcg_out_opc_mod_du(s, a0, a1, a2); - break; + } +} - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - tcg_out_setcond(s, args[3], a0, a1, a2, c2); - break; +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_remu, +}; - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]); - break; +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); - break; - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); - break; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32s_i64: - tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); - break; - case INDEX_op_ld32u_i64: - tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); - break; - case INDEX_op_ld_i64: - tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); - break; +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_rotr_w(s, a0, a1, a2); + } else { + tcg_out_opc_rotr_d(s, a0, a1, a2); + } +} - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); - break; - case INDEX_op_st_i64: - tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); - break; +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); + } +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); - break; - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true); - break; - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); - break; - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false); - break; +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sra_w(s, a0, a1, a2); + } else { + tcg_out_opc_sra_d(s, a0, a1, a2); + } +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); } } +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sll_w(s, a0, a1, a2); + } else { + tcg_out_opc_sll_d(s, a0, a1, a2); + } +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); + } +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_srl_w(s, a0, a1, a2); + } else { + tcg_out_opc_srl_d(s, a0, a1, a2); + } +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); + } +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sub_w(s, a0, a1, a2); + } else { + tcg_out_opc_sub_d(s, a0, a1, a2); + } +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_NotImplemented, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + g_assert_not_reached(); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_xor(s, a0, a1, a2); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_xori(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rU), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_opc_revb_2h(s, a0, a1); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext16u(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_opc_revb_2w(s, a0, a1); + + /* All 32-bit values are computed sign-extended in the register. */ + if (type == TCG_TYPE_I32 || (flags & TCG_BSWAP_OS)) { + tcg_out_ext32s(s, a0, a0); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext32u(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_opc_revb_d(s, a0, a1); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_sub(s, type, a0, TCG_REG_ZERO, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_nor(s, type, a0, a1, TCG_REG_ZERO); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_bstrins_w(s, a0, a2, ofs, ofs + len - 1); + } else { + tcg_out_opc_bstrins_d(s, a0, a2, ofs, ofs + len - 1); + } +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, 0, rz), + .out_rrr = tgen_deposit, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0 && len <= 12) { + tcg_out_opc_andi(s, a0, a1, (1 << len) - 1); + } else if (type == TCG_TYPE_I32) { + tcg_out_opc_bstrpick_w(s, a0, a1, ofs, ofs + len - 1); + } else { + tcg_out_opc_bstrpick_d(s, a0, a1, ofs, ofs + len - 1); + } +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, type, a0, a1); + return; + case 16: + tcg_out_ext16s(s, type, a0, a1); + return; + case 32: + tcg_out_ext32s(s, a0, a1); + return; + } + } else if (ofs + len == 32) { + tcg_out_opc_srai_w(s, a0, a1, ofs); + return; + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_BU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_B, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_HU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_H, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_WU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LD_W, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_ST_B, data, base, offset); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st8_r, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_ST_H, data, base, offset); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st16_r, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tcg_out_st, +}; + static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg rd, TCGReg rs) { @@ -2002,28 +2356,22 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, * Try vseqi/vslei/vslti */ int64_t value = sextract64(a2, 0, 8 << vece); - if ((cond == TCG_COND_EQ || - cond == TCG_COND_LE || - cond == TCG_COND_LT) && - (-0x10 <= value && value <= 0x0f)) { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_LE: + case TCG_COND_LT: insn = cmp_vec_imm_insn[cond][lasx][vece]; tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value)); break; - } else if ((cond == TCG_COND_LEU || - cond == TCG_COND_LTU) && - (0x00 <= value && value <= 0x1f)) { + case TCG_COND_LEU: + case TCG_COND_LTU: insn = cmp_vec_imm_insn[cond][lasx][vece]; tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value)); break; + default: + g_assert_not_reached(); } - - /* - * Fallback to: - * dupi_vec temp, a2 - * cmp_vec a0, a1, temp, cond - */ - tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2); - a2 = TCG_VEC_TMP0; + break; } insn = cmp_vec_insn[cond][lasx][vece]; @@ -2184,157 +2532,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, g_assert_not_reached(); } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i32: - case INDEX_op_st_i64: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - return C_O0_I2(rZ, r); - - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - return C_N2_I1(r, r, r); - - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - return C_O0_I3(r, r, r); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(rZ, rZ); - - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - case INDEX_op_ext_i32_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld_i64: - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - return C_O1_I1(r, r); - - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - /* - * LoongArch insns for these ops don't have reg-imm forms, but we - * can express using andi/ori if ~constant satisfies - * TCG_CT_CONST_U12. - */ - return C_O1_I2(r, r, rC); - - case INDEX_op_shl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i32: - case INDEX_op_shr_i64: - case INDEX_op_sar_i32: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i32: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i32: - case INDEX_op_rotr_i64: - return C_O1_I2(r, r, ri); - - case INDEX_op_add_i32: - return C_O1_I2(r, r, ri); - case INDEX_op_add_i64: - return C_O1_I2(r, r, rJ); - - case INDEX_op_and_i32: - case INDEX_op_and_i64: - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - /* LoongArch reg-imm bitops have their imms ZERO-extended */ - return C_O1_I2(r, r, rU); - - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i32: - case INDEX_op_ctz_i64: - return C_O1_I2(r, r, rW); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - /* Must deposit into the same register as input */ - return C_O1_I2(r, 0, rZ); - - case INDEX_op_sub_i32: - case INDEX_op_setcond_i32: - return C_O1_I2(r, rZ, ri); - case INDEX_op_sub_i64: - case INDEX_op_setcond_i64: - return C_O1_I2(r, rZ, rJ); - - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_mulsh_i32: - case INDEX_op_mulsh_i64: - case INDEX_op_muluh_i32: - case INDEX_op_muluh_i64: - case INDEX_op_div_i32: - case INDEX_op_div_i64: - case INDEX_op_divu_i32: - case INDEX_op_divu_i64: - case INDEX_op_rem_i32: - case INDEX_op_rem_i64: - case INDEX_op_remu_i32: - case INDEX_op_remu_i64: - return C_O1_I2(r, rZ, rZ); - - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, rZ, rJ, rZ, rZ); - case INDEX_op_ld_vec: case INDEX_op_dupm_vec: case INDEX_op_dup_vec: @@ -2384,7 +2585,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I3(w, w, w, w); default: - g_assert_not_reached(); + return C_NotImplemented; } } @@ -2457,6 +2658,14 @@ static void tcg_out_tb_start(TCGContext *s) /* nothing to do */ } +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + for (int i = 0; i < count; ++i) { + /* Canonical nop is andi r0,r0,0 */ + p[i] = OPC_ANDI; + } +} + static void tcg_target_init(TCGContext *s) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h index 58bd7d258e7..6a206fb97e6 100644 --- a/tcg/loongarch64/tcg-target.h +++ b/tcg/loongarch64/tcg-target.h @@ -29,8 +29,6 @@ #ifndef LOONGARCH_TCG_TARGET_H #define LOONGARCH_TCG_TARGET_H -#include "host/cpuinfo.h" - #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_NB_REGS 64 @@ -87,117 +85,6 @@ typedef enum { TCG_VEC_TMP0 = TCG_REG_V23, } TCGReg; -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_SP -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - -/* optional instructions */ -#define TCG_TARGET_HAS_negsetcond_i32 0 -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 1 -#define TCG_TARGET_HAS_div2_i32 0 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 1 -#define TCG_TARGET_HAS_mulsh_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 1 -#define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_brcond2 0 -#define TCG_TARGET_HAS_setcond2 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -/* 64-bit operations */ -#define TCG_TARGET_HAS_negsetcond_i64 0 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 1 -#define TCG_TARGET_HAS_div2_i64 0 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_extr_i64_i32 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 1 -#define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 - -#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX) - -#define TCG_TARGET_HAS_tst 0 - -#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX) -#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX) -#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX) - -#define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 1 -#define TCG_TARGET_HAS_abs_vec 0 -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec 1 -#define TCG_TARGET_HAS_nand_vec 0 -#define TCG_TARGET_HAS_nor_vec 1 -#define TCG_TARGET_HAS_eqv_vec 0 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_shi_vec 1 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 1 -#define TCG_TARGET_HAS_roti_vec 1 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 1 -#define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 1 -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 0 - -#define TCG_TARGET_DEFAULT_MO (0) - -#define TCG_TARGET_NEED_LDST_LABELS +#define TCG_REG_ZERO TCG_REG_ZERO #endif /* LOONGARCH_TCG_TARGET_H */ diff --git a/tcg/meson.build b/tcg/meson.build index 69ebb4908a6..706a6eb260e 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -1,4 +1,4 @@ -if not get_option('tcg').allowed() +if not have_tcg subdir_done() endif @@ -27,24 +27,5 @@ if host_os == 'linux' tcg_ss.add(files('perf.c')) endif -tcg_ss = tcg_ss.apply({}) - -libtcg_user = static_library('tcg_user', - tcg_ss.sources() + genh, - dependencies: tcg_ss.dependencies(), - c_args: '-DCONFIG_USER_ONLY', - build_by_default: false) - -tcg_user = declare_dependency(objects: libtcg_user.extract_all_objects(recursive: false), - dependencies: tcg_ss.dependencies()) -user_ss.add(tcg_user) - -libtcg_system = static_library('tcg_system', - tcg_ss.sources() + genh, - dependencies: tcg_ss.dependencies(), - c_args: '-DCONFIG_SOFTMMU', - build_by_default: false) - -tcg_system = declare_dependency(objects: libtcg_system.extract_all_objects(recursive: false), - dependencies: tcg_ss.dependencies()) -system_ss.add(tcg_system) +user_ss.add_all(tcg_ss) +system_ss.add_all(tcg_ss) diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h index 864034f4687..5304691dc18 100644 --- a/tcg/mips/tcg-target-con-set.h +++ b/tcg/mips/tcg-target-con-set.h @@ -10,24 +10,21 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(rZ, r) -C_O0_I2(rZ, rZ) -C_O0_I3(rZ, r, r) -C_O0_I3(rZ, rZ, r) -C_O0_I4(rZ, rZ, rZ, rZ) -C_O0_I4(rZ, rZ, r, r) +C_O0_I2(r, rz) +C_O0_I2(rz, r) +C_O0_I3(rz, rz, r) +C_O0_I4(r, r, rz, rz) C_O1_I1(r, r) -C_O1_I2(r, 0, rZ) +C_O1_I2(r, 0, rz) C_O1_I2(r, r, r) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) C_O1_I2(r, r, rIK) C_O1_I2(r, r, rJ) -C_O1_I2(r, r, rWZ) -C_O1_I2(r, rZ, rN) -C_O1_I2(r, rZ, rZ) -C_O1_I4(r, rZ, rZ, rZ, 0) -C_O1_I4(r, rZ, rZ, rZ, rZ) +C_O1_I2(r, r, rz) +C_O1_I2(r, r, rzW) +C_O1_I4(r, r, rz, rz, 0) +C_O1_I4(r, r, rz, rz, rz) +C_O1_I4(r, r, r, rz, rz) C_O2_I1(r, r, r) C_O2_I2(r, r, r, r) -C_O2_I4(r, r, rZ, rZ, rN, rN) diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h index 413c280a7a9..db2b225e4a2 100644 --- a/tcg/mips/tcg-target-con-str.h +++ b/tcg/mips/tcg-target-con-str.h @@ -17,6 +17,4 @@ REGS('r', ALL_GENERAL_REGS) CONST('I', TCG_CT_CONST_U16) CONST('J', TCG_CT_CONST_S16) CONST('K', TCG_CT_CONST_P2M1) -CONST('N', TCG_CT_CONST_N16) CONST('W', TCG_CT_CONST_WSZ) -CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h new file mode 100644 index 00000000000..b9eb3385288 --- /dev/null +++ b/tcg/mips/tcg-target-has.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +/* MOVN/MOVZ instructions detection */ +#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ + defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \ + defined(_MIPS_ARCH_MIPS4) +#define use_movnz_instructions 1 +#else +extern bool use_movnz_instructions; +#endif + +/* MIPS32 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1) +#define use_mips32_instructions 1 +#else +extern bool use_mips32_instructions; +#endif + +/* MIPS32R2 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) +#define use_mips32r2_instructions 1 +#else +extern bool use_mips32r2_instructions; +#endif + +/* MIPS32R6 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) +#define use_mips32r6_instructions 1 +#else +#define use_mips32r6_instructions 0 +#endif + +/* optional instructions */ +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_extr_i64_i32 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#endif + +/* optional instructions detected at runtime */ +#define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 0 + +#define TCG_TARGET_extract_valid(type, ofs, len) use_mips32r2_instructions +#define TCG_TARGET_deposit_valid(type, ofs, len) use_mips32r2_instructions + +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + case 16: + return use_mips32r2_instructions; + case 32: + return type == TCG_TYPE_I64; + } + } + return false; +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +#endif diff --git a/tcg/mips/tcg-target-mo.h b/tcg/mips/tcg-target-mo.h new file mode 100644 index 00000000000..50cefc222d9 --- /dev/null +++ b/tcg/mips/tcg-target-mo.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/mips/tcg-target-opc.h.inc b/tcg/mips/tcg-target-opc.h.inc new file mode 100644 index 00000000000..84e777bfe53 --- /dev/null +++ b/tcg/mips/tcg-target-opc.h.inc @@ -0,0 +1 @@ +/* No target specific opcodes. */ diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 3b5b5c6d5ba..400eafbab4b 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -24,8 +24,19 @@ * THE SOFTWARE. */ -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" +/* used for function call generation */ +#define TCG_TARGET_STACK_ALIGN 16 +#if _MIPS_SIM == _ABIO32 +# define TCG_TARGET_CALL_STACK_OFFSET 16 +# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF +#else +# define TCG_TARGET_CALL_STACK_OFFSET 0 +# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL +#endif +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN #if TCG_TARGET_REG_BITS == 32 # define LO_OFF (HOST_BIG_ENDIAN * 4) @@ -173,12 +184,10 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, g_assert_not_reached(); } -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */ -#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */ -#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */ -#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */ -#define TCG_CT_CONST_WSZ 0x2000 /* word size */ +#define TCG_CT_CONST_U16 0x100 /* Unsigned 16-bit: 0 - 0xffff. */ +#define TCG_CT_CONST_S16 0x200 /* Signed 16-bit: -32768 - 32767 */ +#define TCG_CT_CONST_P2M1 0x400 /* Power of 2 minus 1. */ +#define TCG_CT_CONST_WSZ 0x800 /* word size */ #define ALL_GENERAL_REGS 0xffffffffu @@ -193,14 +202,10 @@ static bool tcg_target_const_match(int64_t val, int ct, { if (ct & TCG_CT_CONST) { return 1; - } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { return 1; } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { return 1; - } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) { - return 1; } else if ((ct & TCG_CT_CONST_P2M1) && use_mips32r2_instructions && is_p2m1(val)) { return 1; @@ -639,7 +644,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type, static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { - tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32); + tcg_debug_assert(use_mips32r2_instructions); tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs); } @@ -650,7 +655,7 @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { - tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32); + tcg_debug_assert(use_mips32r2_instructions); tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs); } @@ -694,39 +699,6 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, g_assert_not_reached(); } -static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags) -{ - /* ret and arg can't be register tmp0 */ - tcg_debug_assert(ret != TCG_TMP0); - tcg_debug_assert(arg != TCG_TMP0); - - /* With arg = abcd: */ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */ - if (flags & TCG_BSWAP_OS) { - tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */ - } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */ - } - return; - } - - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */ - if (!(flags & TCG_BSWAP_IZ)) { - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */ - } - if (flags & TCG_BSWAP_OS) { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */ - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */ - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */ - if (flags & TCG_BSWAP_OZ) { - tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */ - } - } - tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */ -} - static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub) { if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) { @@ -735,39 +707,6 @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub) } } -static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); - tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); - if (flags & TCG_BSWAP_OZ) { - tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0); - } - } else { - if (flags & TCG_BSWAP_OZ) { - tcg_out_bswap_subr(s, bswap32u_addr); - } else { - tcg_out_bswap_subr(s, bswap32_addr); - } - /* delay slot -- never omit the insn, like tcg_out_mov might. */ - tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); - tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); - } -} - -static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); - tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); - } else { - tcg_out_bswap_subr(s, bswap64_addr); - /* delay slot -- never omit the insn, like tcg_out_mov might. */ - tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); - tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); - } -} - static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); @@ -823,55 +762,6 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, return false; } -static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, - TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, - bool cbh, bool is_sub) -{ - TCGReg th = TCG_TMP1; - - /* If we have a negative constant such that negating it would - make the high part zero, we can (usually) eliminate one insn. */ - if (cbl && cbh && bh == -1 && bl != 0) { - bl = -bl; - bh = 0; - is_sub = !is_sub; - } - - /* By operating on the high part first, we get to use the final - carry operation to move back from the temporary. */ - if (!cbh) { - tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh); - } else if (bh != 0 || ah == rl) { - tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh)); - } else { - th = ah; - } - - /* Note that tcg optimization should eliminate the bl == 0 case. */ - if (is_sub) { - if (cbl) { - tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl); - tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl); - } else { - tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl); - tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl); - } - tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0); - } else { - if (cbl) { - tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl); - tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl); - } else if (rl == al && rl == bl) { - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1); - tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); - } else { - tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); - tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl)); - } - tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0); - } -} - #define SETCOND_INV TCG_TARGET_NB_REGS #define SETCOND_NEZ (SETCOND_INV << 1) #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) @@ -944,15 +834,44 @@ static void tcg_out_setcond_end(TCGContext *s, TCGReg ret, int tmpflags) } } -static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg arg1, TCGReg arg2) +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg arg1, TCGReg arg2) { int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2); tcg_out_setcond_end(s, ret, tmpflags); } -static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, - TCGReg arg2, TCGLabel *l) +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rz), + .out_rrr = tgen_setcond, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg arg1, TCGReg arg2) +{ + int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2); + TCGReg tmp = tmpflags & ~SETCOND_FLAGS; + + /* If intermediate result is zero/non-zero: test != 0. */ + if (tmpflags & SETCOND_NEZ) { + tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp); + tmp = ret; + } + /* Produce the 0/-1 result. */ + if (tmpflags & SETCOND_INV) { + tcg_out_opc_imm(s, OPC_ADDIU, ret, tmp, -1); + } else { + tcg_out_opc_reg(s, OPC_SUBU, ret, TCG_REG_ZERO, tmp); + } +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rz), + .out_rrr = tgen_negsetcond, +}; + +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *l) { static const MIPSInsn b_zero[16] = { [TCG_COND_LT] = OPC_BLTZ, @@ -997,6 +916,16 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out_nop(s); } +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rz), + .out_rr = tgen_brcond, +}; + +void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, l); +} + static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { @@ -1014,25 +943,37 @@ static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret, break; default: - tcg_out_setcond(s, TCG_COND_EQ, TCG_TMP0, ah, bh); - tcg_out_setcond(s, tcg_unsigned_cond(cond), TCG_TMP1, al, bl); + tgen_setcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_TMP0, ah, bh); + tgen_setcond(s, TCG_TYPE_I32, tcg_unsigned_cond(cond), + TCG_TMP1, al, bl); tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP0); - tcg_out_setcond(s, tcg_high_cond(cond), TCG_TMP0, ah, bh); + tgen_setcond(s, TCG_TYPE_I32, tcg_high_cond(cond), TCG_TMP0, ah, bh); tcg_out_opc_reg(s, OPC_OR, ret, TCG_TMP0, TCG_TMP1); break; } return ret | flags; } -static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) +static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh) { int tmpflags = tcg_out_setcond2_int(s, cond, ret, al, ah, bl, bh); tcg_out_setcond_end(s, ret, tmpflags); } -static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGReg bl, TCGReg bh, TCGLabel *l) +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpSetcond2 outop_setcond2 = { + .base.static_constraint = C_O1_I4(r, r, r, rz, rz), + .out = tgen_setcond2, +}; + +static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh, TCGLabel *l) { int tmpflags = tcg_out_setcond2_int(s, cond, TCG_TMP0, al, ah, bl, bh); TCGReg tmp = tmpflags & ~SETCOND_FLAGS; @@ -1043,8 +984,17 @@ static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, tcg_out_nop(s); } -static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2) +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpBrcond2 outop_brcond2 = { + .base.static_constraint = C_O0_I4(r, r, rz, rz), + .out = tgen_brcond2, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg v1, bool const_v1, TCGArg v2, bool const_v2) { int tmpflags; bool eqz; @@ -1090,6 +1040,13 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, } } +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = (use_mips32r6_instructions + ? C_O1_I4(r, r, rz, rz, rz) + : C_O1_I4(r, r, rz, rz, 0)), + .out = tgen_movcond, +}; + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { /* @@ -1206,8 +1163,7 @@ bool tcg_target_has_memory_bswap(MemOp memop) * is required and fill in @h with the host address for the fast path. */ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, bool is_ld) + TCGReg addr, MemOpIdx oi, bool is_ld) { TCGType addr_type = s->addr_type; TCGLabelQemuLdst *ldst = NULL; @@ -1234,8 +1190,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); @@ -1243,82 +1198,66 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the TLB index from the address into TMP3. */ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { - tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo, - s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } else { - tcg_out_dsrl(s, TCG_TMP3, addrlo, - s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_dsrl(s, TCG_TMP3, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); /* Add the tlb_table pointer, creating the CPUTLBEntry address. */ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); - if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { - /* Load the (low half) tlb comparator. */ + /* Load the tlb comparator. */ + if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HOST_BIG_ENDIAN * 4); } else { - tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off); + tcg_out_ld(s, TCG_TYPE_REG, TCG_TMP0, TCG_TMP3, cmp_off); } - if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { - /* Load the tlb addend for the fast path. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); - } + /* Load the tlb addend for the fast path. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); /* * Mask the page bits, keeping the alignment bits to compare against. * For unaligned accesses, compare against the end of the access to * verify that it does not cross a page boundary. */ - tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask); + tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask); if (a_mask < s_mask) { tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU), - TCG_TMP2, addrlo, s_mask - a_mask); + TCG_TMP2, addr, s_mask - a_mask); tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); } else { - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo); + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addr); } /* Zero extend a 32-bit guest address for a 64-bit host. */ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { - tcg_out_ext32u(s, TCG_TMP2, addrlo); - addrlo = TCG_TMP2; + tcg_out_ext32u(s, TCG_TMP2, addr); + addr = TCG_TMP2; } ldst->label_ptr[0] = s->code_ptr; tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); - /* Load and test the high half tlb comparator. */ - if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { - /* delay slot */ - tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF); - - /* Load the tlb addend for the fast path. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off); - - ldst->label_ptr[1] = s->code_ptr; - tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0); - } - /* delay slot */ base = TCG_TMP3; - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo); + tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addr); } else { if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) { ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* We are expecting a_bits to max out at 7, much lower than ANDI. */ tcg_debug_assert(a_bits < 16); - tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask); + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addr, a_mask); ldst->label_ptr[0] = s->code_ptr; if (use_mips32r6_instructions) { @@ -1329,7 +1268,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } } - base = addrlo; + base = addr; if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_REG_A0, base); base = TCG_REG_A0; @@ -1448,30 +1387,66 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, true); + + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { + tcg_out_qemu_ld_direct(s, data, 0, h.base, opc, type); + } else { + tcg_out_qemu_ld_unalign(s, data, 0, h.base, opc, type); + } + + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = 0; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + ldst = prepare_host_addr(s, &h, addr, oi, true); if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { - tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type); + tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, type); } else { - tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type); + tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, type); } if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + /* Ensure that the mips32 code is compiled but discarded for mips64. */ + .base.static_constraint = + TCG_TARGET_REG_BITS == 32 ? C_O2_I1(r, r, r) : C_NotImplemented, + .out = + TCG_TARGET_REG_BITS == 32 ? tgen_qemu_ld2 : NULL, +}; + static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { @@ -1535,15 +1510,43 @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + MemOp opc = get_memop(oi); + TCGLabelQemuLdst *ldst; + HostAddress h; + + ldst = prepare_host_addr(s, &h, addr, oi, false); + + if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { + tcg_out_qemu_st_direct(s, data, 0, h.base, opc); + } else { + tcg_out_qemu_st_unalign(s, data, 0, h.base, opc); + } + + if (ldst) { + ldst->type = type; + ldst->datalo_reg = data; + ldst->datahi_reg = 0; + ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); + } +} + +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out = tgen_qemu_st, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + ldst = prepare_host_addr(s, &h, addr, oi, false); if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc); @@ -1552,14 +1555,22 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, } if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = datalo; ldst->datahi_reg = datahi; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } -static void tcg_out_mb(TCGContext *s, TCGArg a0) +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + /* Ensure that the mips32 code is compiled but discarded for mips64. */ + .base.static_constraint = + TCG_TARGET_REG_BITS == 32 ? C_O0_I3(rz, rz, r) : C_NotImplemented, + .out = + TCG_TARGET_REG_BITS == 32 ? tgen_qemu_st2 : NULL, +}; + +static void tcg_out_mb(TCGContext *s, unsigned a0) { static const MIPSInsn sync[] = { /* Note that SYNC_MB is a slightly weaker than SYNC 0, @@ -1575,33 +1586,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) tcg_out32(s, sync[a0 & TCG_MO_ALL]); } -static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, - int width, TCGReg a0, TCGReg a1, TCGArg a2) -{ - if (use_mips32r6_instructions) { - if (a2 == width) { - tcg_out_opc_reg(s, opcv6, a0, a1, 0); - } else { - tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0); - tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0); - } - } else { - if (a2 == width) { - tcg_out_opc_reg(s, opcv2, a0, a1, a1); - } else if (a0 == a2) { - tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); - tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1); - } else if (a0 != a1) { - tcg_out_opc_reg(s, opcv2, a0, a1, a1); - tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1); - } else { - tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); - tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1); - tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0); - } - } -} - static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { TCGReg base = TCG_REG_ZERO; @@ -1661,630 +1645,811 @@ static void tcg_out_goto_tb(TCGContext *s, int which) } } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); + } else { + tcg_out_nop(s); + } +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { /* Always indirect, nothing to do */ } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDU : OPC_DADDU; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) { - MIPSInsn i1, i2; - TCGArg a0, a1, a2; - int c2; + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU; + tcg_out_opc_imm(s, insn, a0, a1, a2); +} - /* - * Note that many operands use the constraint set "rZ". - * We make use of the fact that 0 is the ZERO register, - * and hence such cases need not check for const_args. - */ - a0 = args[0]; - a1 = args[1]; - a2 = args[2]; - c2 = const_args[2]; - - switch (opc) { - case INDEX_op_goto_ptr: - /* jmp to the given host address (could be epilogue) */ - tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; + +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_NotImplemented, +}; + +static void tcg_out_set_carry(TCGContext *s) +{ + g_assert_not_reached(); +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); +} + +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + int msb; + + if (a2 == (uint16_t)a2) { + tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); + return; + } + + tcg_debug_assert(use_mips32r2_instructions); + tcg_debug_assert(is_p2m1(a2)); + msb = ctz64(~a2) - 1; + if (type == TCG_TYPE_I32) { + tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); + } else { + tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0); + } +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rIK), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6; + tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0); + tgen_movcond(s, TCG_TYPE_REG, TCG_COND_EQ, a0, a1, a2, false, + TCG_TMP0, false, TCG_REG_ZERO, false); + } else { + MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ; + if (a0 == a2) { + tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1); + } else if (a0 != a1) { + tcg_out_opc_reg(s, opcv2, a0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1); } else { - tcg_out_nop(s); + tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1); + tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1); + tcg_out_mov(s, type, a0, TCG_TMP0); } - break; - case INDEX_op_br: - tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, - arg_label(a0)); - break; + } +} - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - i1 = OPC_LBU; - goto do_ldst; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - i1 = OPC_LB; - goto do_ldst; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - i1 = OPC_LHU; - goto do_ldst; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - i1 = OPC_LH; - goto do_ldst; - case INDEX_op_ld_i32: - case INDEX_op_ld32s_i64: - i1 = OPC_LW; - goto do_ldst; - case INDEX_op_ld32u_i64: - i1 = OPC_LWU; - goto do_ldst; - case INDEX_op_ld_i64: - i1 = OPC_LD; - goto do_ldst; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - i1 = OPC_SB; - goto do_ldst; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - i1 = OPC_SH; - goto do_ldst; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - i1 = OPC_SW; - goto do_ldst; - case INDEX_op_st_i64: - i1 = OPC_SD; - do_ldst: - tcg_out_ldst(s, i1, a0, a1, a2); - break; +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 == 0) { + tgen_clz(s, type, a0, a1, TCG_REG_ZERO); + } else if (use_mips32r6_instructions) { + MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6; + tcg_out_opc_reg(s, opcv6, a0, a1, 0); + } else { + MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ; + tcg_out_opc_reg(s, opcv2, a0, a1, a1); + } +} - case INDEX_op_add_i32: - i1 = OPC_ADDU, i2 = OPC_ADDIU; - goto do_binary; - case INDEX_op_add_i64: - i1 = OPC_DADDU, i2 = OPC_DADDIU; - goto do_binary; - case INDEX_op_or_i32: - case INDEX_op_or_i64: - i1 = OPC_OR, i2 = OPC_ORI; - goto do_binary; - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - i1 = OPC_XOR, i2 = OPC_XORI; - do_binary: - if (c2) { - tcg_out_opc_imm(s, i2, a0, a1, a2); - break; - } - do_binaryv: - tcg_out_opc_reg(s, i1, a0, a1, a2); - break; +static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags) +{ + return use_mips32r2_instructions ? C_O1_I2(r, r, rzW) : C_NotImplemented; +} - case INDEX_op_sub_i32: - i1 = OPC_SUBU, i2 = OPC_ADDIU; - goto do_subtract; - case INDEX_op_sub_i64: - i1 = OPC_DSUBU, i2 = OPC_DADDIU; - do_subtract: - if (c2) { - tcg_out_opc_imm(s, i2, a0, a1, -a2); - break; - } - goto do_binaryv; - case INDEX_op_and_i32: - if (c2 && a2 != (uint16_t)a2) { - int msb = ctz32(~a2) - 1; - tcg_debug_assert(use_mips32r2_instructions); - tcg_debug_assert(is_p2m1(a2)); - tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); - break; - } - i1 = OPC_AND, i2 = OPC_ANDI; - goto do_binary; - case INDEX_op_and_i64: - if (c2 && a2 != (uint16_t)a2) { - int msb = ctz64(~a2) - 1; - tcg_debug_assert(use_mips32r2_instructions); - tcg_debug_assert(is_p2m1(a2)); - tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0); - break; - } - i1 = OPC_AND, i2 = OPC_ANDI; - goto do_binary; - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - i1 = OPC_NOR; - goto do_binaryv; - - case INDEX_op_mul_i32: - if (use_mips32_instructions) { - tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_clz, + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; + +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + if (type == TCG_TYPE_I32) { + tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2); + } else { + tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2); } - i1 = OPC_MULT, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_mulsh_i32: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2); - break; - } - i1 = OPC_MULT, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_muluh_i32: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2); - break; - } - i1 = OPC_MULTU, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_div_i32: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2); - break; - } - i1 = OPC_DIV, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_divu_i32: - if (use_mips32r6_instructions) { + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + } +} + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + if (type == TCG_TYPE_I32) { tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2); - break; - } - i1 = OPC_DIVU, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_rem_i32: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2); - break; - } - i1 = OPC_DIV, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_remu_i32: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2); - break; - } - i1 = OPC_DIVU, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_mul_i64: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2); - break; - } - i1 = OPC_DMULT, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_mulsh_i64: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2); - break; - } - i1 = OPC_DMULT, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_muluh_i64: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2); - break; - } - i1 = OPC_DMULTU, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_div_i64: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2); - break; - } - i1 = OPC_DDIV, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_divu_i64: - if (use_mips32r6_instructions) { + } else { tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2); - break; } - i1 = OPC_DDIVU, i2 = OPC_MFLO; - goto do_hilo1; - case INDEX_op_rem_i64: - if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2); - break; + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + } +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_NotImplemented, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_dsra(s, a0, a1, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; +#endif + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn; + + if (type == TCG_TYPE_I32) { + if (use_mips32_instructions) { + tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); + return; } - i1 = OPC_DDIV, i2 = OPC_MFHI; - goto do_hilo1; - case INDEX_op_remu_i64: + insn = OPC_MULT; + } else { if (use_mips32r6_instructions) { - tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2); - break; + tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2); + return; } - i1 = OPC_DDIVU, i2 = OPC_MFHI; - do_hilo1: - tcg_out_opc_reg(s, i1, 0, a1, a2); - tcg_out_opc_reg(s, i2, a0, 0, 0); - break; + insn = OPC_DMULT; + } + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); +} - case INDEX_op_muls2_i32: - i1 = OPC_MULT; - goto do_hilo2; - case INDEX_op_mulu2_i32: - i1 = OPC_MULTU; - goto do_hilo2; - case INDEX_op_muls2_i64: - i1 = OPC_DMULT; - goto do_hilo2; - case INDEX_op_mulu2_i64: - i1 = OPC_DMULTU; - do_hilo2: - tcg_out_opc_reg(s, i1, 0, a2, args[3]); - tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); - tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); - break; +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; - case INDEX_op_neg_i32: - i1 = OPC_SUBU; - goto do_unary; - case INDEX_op_neg_i64: - i1 = OPC_DSUBU; - goto do_unary; - case INDEX_op_not_i32: - case INDEX_op_not_i64: - i1 = OPC_NOR; - goto do_unary; - do_unary: - tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); - break; +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT; + tcg_out_opc_reg(s, insn, 0, a2, a3); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); +} - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - tcg_out_bswap16(s, a0, a1, a2); - break; - case INDEX_op_bswap32_i32: - tcg_out_bswap32(s, a0, a1, 0); - break; - case INDEX_op_bswap32_i64: - tcg_out_bswap32(s, a0, a1, a2); - break; - case INDEX_op_bswap64_i64: - tcg_out_bswap64(s, a0, a1); - break; - case INDEX_op_extrh_i64_i32: - tcg_out_dsra(s, a0, a1, 32); - break; +static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) +{ + return use_mips32r6_instructions ? C_NotImplemented : C_O2_I2(r, r, r, r); +} - case INDEX_op_sar_i32: - i1 = OPC_SRAV, i2 = OPC_SRA; - goto do_shift; - case INDEX_op_shl_i32: - i1 = OPC_SLLV, i2 = OPC_SLL; - goto do_shift; - case INDEX_op_shr_i32: - i1 = OPC_SRLV, i2 = OPC_SRL; - goto do_shift; - case INDEX_op_rotr_i32: - i1 = OPC_ROTRV, i2 = OPC_ROTR; - do_shift: - if (c2) { - tcg_out_opc_sa(s, i2, a0, a1, a2); - break; - } - do_shiftv: - tcg_out_opc_reg(s, i1, a0, a2, a1); - break; - case INDEX_op_rotl_i32: - if (c2) { - tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2); +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_muls2, +}; + +static void tgen_mulsh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUH : OPC_DMUH; + tcg_out_opc_reg(s, insn, a0, a1, a2); + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0); + } +} + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mulsh, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU; + tcg_out_opc_reg(s, insn, 0, a2, a3); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_mulu2, +}; + +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUHU : OPC_DMUHU; + tcg_out_opc_reg(s, insn, a0, a1, a2); + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0); + } +} + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_muluh, +}; + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_nor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_NOR, a0, a1, a2); +} + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nor, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + if (type == TCG_TYPE_I32) { + tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2); } else { - tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2); - tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1); - } - break; - case INDEX_op_sar_i64: - if (c2) { - tcg_out_dsra(s, a0, a1, a2); - break; - } - i1 = OPC_DSRAV; - goto do_shiftv; - case INDEX_op_shl_i64: - if (c2) { - tcg_out_dsll(s, a0, a1, a2); - break; - } - i1 = OPC_DSLLV; - goto do_shiftv; - case INDEX_op_shr_i64: - if (c2) { - tcg_out_dsrl(s, a0, a1, a2); - break; - } - i1 = OPC_DSRLV; - goto do_shiftv; - case INDEX_op_rotr_i64: - if (c2) { - tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2); - break; + tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2); } - i1 = OPC_DROTRV; - goto do_shiftv; - case INDEX_op_rotl_i64: - if (c2) { - tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2); + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0); + } +} + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rems, +}; + +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (use_mips32r6_instructions) { + if (type == TCG_TYPE_I32) { + tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2); } else { - tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2); - tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1); + tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2); } - break; + } else { + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU; + tcg_out_opc_reg(s, insn, 0, a1, a2); + tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0); + } +} - case INDEX_op_clz_i32: - tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2); - break; - case INDEX_op_clz_i64: - tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2); - break; +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_remu, +}; - case INDEX_op_deposit_i32: - tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]); - break; - case INDEX_op_deposit_i64: - tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2, - args[3] + args[4] - 1, args[3]); - break; - case INDEX_op_extract_i32: - tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2); - break; - case INDEX_op_extract_i64: - tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, - args[3] - 1, a2); - break; +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); - break; - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); - break; +static TCGConstraintSetIndex cset_rotr(TCGType type, unsigned flags) +{ + return use_mips32r2_instructions ? C_O1_I2(r, r, ri) : C_NotImplemented; +} - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]); - break; +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ROTRV : OPC_DROTRV; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - tcg_out_setcond(s, args[3], a0, a1, a2); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); - break; +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_ROTR, a0, a1, a2); + } else { + tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2); + } +} - case INDEX_op_qemu_ld_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); - break; - } - /* fall through */ - case INDEX_op_qemu_ld_a32_i32: - tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_ld_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_rotr, + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRAV : OPC_DSRAV; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_SRA, a0, a1, a2); + } else { + tcg_out_dsra(s, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SLLV : OPC_DSLLV; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_SLL, a0, a1, a2); + } else { + tcg_out_dsll(s, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRLV : OPC_DSRLV; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_sa(s, OPC_SRL, a0, a1, a2); + } else { + tcg_out_dsrl(s, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SUBU : OPC_DSUBU; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_NotImplemented, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + g_assert_not_reached(); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg, unsigned flags) +{ + /* With arg = abcd: */ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */ + if (flags & TCG_BSWAP_OS) { + tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */ + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */ } - break; + return; + } - case INDEX_op_qemu_st_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32); - break; + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */ + if (!(flags & TCG_BSWAP_IZ)) { + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */ + } + if (flags & TCG_BSWAP_OS) { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */ + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */ + } else { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */ + if (flags & TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */ } - /* fall through */ - case INDEX_op_qemu_st_a32_i32: - tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64); + } + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */ +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg, unsigned flags) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); + if (flags & TCG_BSWAP_OZ) { + tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0); } - break; - case INDEX_op_qemu_st_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64); + } else { + if (flags & TCG_BSWAP_OZ) { + tcg_out_bswap_subr(s, bswap32u_addr); } else { - tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64); + tcg_out_bswap_subr(s, bswap32_addr); } - break; + /* delay slot -- never omit the insn, like tcg_out_mov might. */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); + tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); + } +} - case INDEX_op_add2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], false); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], true); - break; +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); +#if TCG_TARGET_REG_BITS == 64 +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg); + tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret); + } else { + tcg_out_bswap_subr(s, bswap64_addr); + /* delay slot -- never omit the insn, like tcg_out_mov might. */ + tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO); + tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3); } } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) -{ - switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_neg_i32: - case INDEX_op_not_i32: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap32_i32: - case INDEX_op_ext8s_i32: - case INDEX_op_ext16s_i32: - case INDEX_op_extract_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld_i64: - case INDEX_op_neg_i64: - case INDEX_op_not_i64: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_ext8s_i64: - case INDEX_op_ext16s_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - case INDEX_op_extract_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(rZ, r); - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - return C_O1_I2(r, r, rJ); - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - return C_O1_I2(r, rZ, rN); - case INDEX_op_mul_i32: - case INDEX_op_mulsh_i32: - case INDEX_op_muluh_i32: - case INDEX_op_div_i32: - case INDEX_op_divu_i32: - case INDEX_op_rem_i32: - case INDEX_op_remu_i32: - case INDEX_op_nor_i32: - case INDEX_op_setcond_i32: - case INDEX_op_mul_i64: - case INDEX_op_mulsh_i64: - case INDEX_op_muluh_i64: - case INDEX_op_div_i64: - case INDEX_op_divu_i64: - case INDEX_op_rem_i64: - case INDEX_op_remu_i64: - case INDEX_op_nor_i64: - case INDEX_op_setcond_i64: - return C_O1_I2(r, rZ, rZ); - case INDEX_op_muls2_i32: - case INDEX_op_mulu2_i32: - case INDEX_op_muls2_i64: - case INDEX_op_mulu2_i64: - return C_O2_I2(r, r, r, r); - case INDEX_op_and_i32: - case INDEX_op_and_i64: - return C_O1_I2(r, r, rIK); - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - case INDEX_op_or_i64: - case INDEX_op_xor_i64: - return C_O1_I2(r, r, rI); - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - case INDEX_op_rotr_i32: - case INDEX_op_rotl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_rotr_i64: - case INDEX_op_rotl_i64: - return C_O1_I2(r, r, ri); - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - return C_O1_I2(r, r, rWZ); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - return C_O1_I2(r, 0, rZ); - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(rZ, rZ); - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return (use_mips32r6_instructions - ? C_O1_I4(r, rZ, rZ, rZ, rZ) - : C_O1_I4(r, rZ, rZ, rZ, 0)); - case INDEX_op_add2_i32: - case INDEX_op_sub2_i32: - return C_O2_I4(r, r, rZ, rZ, rN, rN); - case INDEX_op_setcond2_i32: - return C_O1_I4(r, rZ, rZ, rZ, rZ); - case INDEX_op_brcond2_i32: - return C_O0_I4(rZ, rZ, rZ, rZ); - - case INDEX_op_qemu_ld_a32_i32: - return C_O1_I1(r, r); - case INDEX_op_qemu_ld_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); - case INDEX_op_qemu_st_a32_i32: - return C_O0_I2(rZ, r); - case INDEX_op_qemu_st_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r); - case INDEX_op_qemu_ld_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); - case INDEX_op_qemu_ld_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); - case INDEX_op_qemu_st_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r); - case INDEX_op_qemu_st_a64_i64: - return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) - : C_O0_I4(rZ, rZ, r, r)); +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; +#endif /* TCG_TARGET_REG_BITS == 64 */ - default: - g_assert_not_reached(); +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_sub(s, type, a0, TCG_REG_ZERO, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_nor(s, type, a0, TCG_REG_ZERO, a1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I32) { + tcg_out_opc_bf(s, OPC_INS, a0, a2, ofs + len - 1, ofs); + } else { + tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2, + ofs + len - 1, ofs); } } +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, 0, rz), + .out_rrr = tgen_deposit, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0 && len <= 16) { + tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << len) - 1); + } else if (type == TCG_TYPE_I32) { + tcg_out_opc_bf(s, OPC_EXT, a0, a1, len - 1, ofs); + } else { + tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, + a0, a1, len - 1, ofs); + } +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, type, a0, a1); + return; + case 16: + tcg_out_ext16s(s, type, a0, a1); + return; + case 32: + tcg_out_ext32s(s, a0, a1); + return; + } + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LBU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LB, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LHU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LH, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LWU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LW, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; +#endif + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_SB, data, base, offset); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st8_r, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_SH, data, base, offset); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st16_r, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tcg_out_st, +}; + + +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) +{ + return C_NotImplemented; +} + static const int tcg_target_callee_save_regs[] = { TCG_REG_S0, TCG_REG_S1, diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index a996aa171dc..bd4ca5f852d 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -70,134 +70,6 @@ typedef enum { TCG_AREG0 = TCG_REG_S8, } TCGReg; -/* used for function call generation */ -#define TCG_TARGET_STACK_ALIGN 16 -#if _MIPS_SIM == _ABIO32 -# define TCG_TARGET_CALL_STACK_OFFSET 16 -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#else -# define TCG_TARGET_CALL_STACK_OFFSET 0 -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL -#endif -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN - -/* MOVN/MOVZ instructions detection */ -#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ - defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \ - defined(_MIPS_ARCH_MIPS4) -#define use_movnz_instructions 1 -#else -extern bool use_movnz_instructions; -#endif - -/* MIPS32 instruction set detection */ -#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1) -#define use_mips32_instructions 1 -#else -extern bool use_mips32_instructions; -#endif - -/* MIPS32R2 instruction set detection */ -#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) -#define use_mips32r2_instructions 1 -#else -extern bool use_mips32r2_instructions; -#endif - -/* MIPS32R6 instruction set detection */ -#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6) -#define use_mips32r6_instructions 1 -#else -#define use_mips32r6_instructions 0 -#endif - -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_andc_i32 0 -#define TCG_TARGET_HAS_orc_i32 0 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions) -#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions) -#define TCG_TARGET_HAS_muluh_i32 1 -#define TCG_TARGET_HAS_mulsh_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_negsetcond_i32 0 - -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_extr_i64_i32 1 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_andc_i64 0 -#define TCG_TARGET_HAS_orc_i64 0 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_add2_i64 0 -#define TCG_TARGET_HAS_sub2_i64 0 -#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions) -#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions) -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_negsetcond_i64 0 -#endif - -/* optional instructions detected at runtime */ -#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions -#define TCG_TARGET_HAS_ctz_i32 0 -#define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 0 -#endif - -/* optional instructions automatically implemented */ -#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */ -#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */ - -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */ -#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */ -#endif - -#define TCG_TARGET_HAS_qemu_ldst_i128 0 - -#define TCG_TARGET_HAS_tst 0 - -#define TCG_TARGET_DEFAULT_MO 0 -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS +#define TCG_REG_ZERO TCG_REG_ZERO #endif diff --git a/tcg/optimize.c b/tcg/optimize.c index ba16ec27e24..3638ab9fea0 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -28,15 +28,8 @@ #include "qemu/interval-tree.h" #include "tcg/tcg-op-common.h" #include "tcg-internal.h" +#include "tcg-has.h" -#define CASE_OP_32_64(x) \ - glue(glue(case INDEX_op_, x), _i32): \ - glue(glue(case INDEX_op_, x), _i64) - -#define CASE_OP_32_64_VEC(x) \ - glue(glue(case INDEX_op_, x), _i32): \ - glue(glue(case INDEX_op_, x), _i64): \ - glue(glue(case INDEX_op_, x), _vec) typedef struct MemCopyInfo { IntervalTreeNode itree; @@ -46,13 +39,12 @@ typedef struct MemCopyInfo { } MemCopyInfo; typedef struct TempOptInfo { - bool is_const; TCGTemp *prev_copy; TCGTemp *next_copy; QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy; - uint64_t val; uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ - uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ + uint64_t o_mask; /* mask bit is 1 if and only if value bit is 1 */ + uint64_t s_mask; /* mask bit is 1 if value bit matches msb */ } TempOptInfo; typedef struct OptContext { @@ -64,70 +56,45 @@ typedef struct OptContext { QSIMPLEQ_HEAD(, MemCopyInfo) mem_free; /* In flight values from optimization. */ - uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ - uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ - uint64_t s_mask; /* mask of clrsb(value) bits */ TCGType type; + int carry_state; /* -1 = non-constant, {0,1} = constant carry-in */ } OptContext; -/* Calculate the smask for a specific value. */ -static uint64_t smask_from_value(uint64_t value) +static inline TempOptInfo *ts_info(TCGTemp *ts) { - int rep = clrsb64(value); - return ~(~0ull >> rep); + return ts->state_ptr; } -/* - * Calculate the smask for a given set of known-zeros. - * If there are lots of zeros on the left, we can consider the remainder - * an unsigned field, and thus the corresponding signed field is one bit - * larger. - */ -static uint64_t smask_from_zmask(uint64_t zmask) +static inline TempOptInfo *arg_info(TCGArg arg) { - /* - * Only the 0 bits are significant for zmask, thus the msb itself - * must be zero, else we have no sign information. - */ - int rep = clz64(zmask); - if (rep == 0) { - return 0; - } - rep -= 1; - return ~(~0ull >> rep); + return ts_info(arg_temp(arg)); } -/* - * Recreate a properly left-aligned smask after manipulation. - * Some bit-shuffling, particularly shifts and rotates, may - * retain sign bits on the left, but may scatter disconnected - * sign bits on the right. Retain only what remains to the left. - */ -static uint64_t smask_from_smask(int64_t smask) +static inline bool ti_is_const(TempOptInfo *ti) { - /* Only the 1 bits are significant for smask */ - return smask_from_zmask(~smask); + /* If all bits that are not known zeros are known ones, it's constant. */ + return ti->z_mask == ti->o_mask; } -static inline TempOptInfo *ts_info(TCGTemp *ts) +static inline uint64_t ti_const_val(TempOptInfo *ti) { - return ts->state_ptr; + /* If constant, both z_mask and o_mask contain the value. */ + return ti->z_mask; } -static inline TempOptInfo *arg_info(TCGArg arg) +static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val) { - return ts_info(arg_temp(arg)); + return ti_is_const(ti) && ti_const_val(ti) == val; } static inline bool ts_is_const(TCGTemp *ts) { - return ts_info(ts)->is_const; + return ti_is_const(ts_info(ts)); } static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val) { - TempOptInfo *ti = ts_info(ts); - return ti->is_const && ti->val == val; + return ti_is_const_val(ts_info(ts), val); } static inline bool arg_is_const(TCGArg arg) @@ -135,6 +102,11 @@ static inline bool arg_is_const(TCGArg arg) return ts_is_const(arg_temp(arg)); } +static inline uint64_t arg_const_val(TCGArg arg) +{ + return ti_const_val(arg_info(arg)); +} + static inline bool arg_is_const_val(TCGArg arg, uint64_t val) { return ts_is_const_val(arg_temp(arg), val); @@ -171,13 +143,12 @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts) ti->prev_copy = ts; QSIMPLEQ_INIT(&ti->mem_copy); if (ts->kind == TEMP_CONST) { - ti->is_const = true; - ti->val = ts->val; ti->z_mask = ts->val; - ti->s_mask = smask_from_value(ts->val); + ti->o_mask = ts->val; + ti->s_mask = INT64_MIN >> clrsb64(ts->val); } else { - ti->is_const = false; ti->z_mask = -1; + ti->o_mask = 0; ti->s_mask = 0; } } @@ -263,8 +234,8 @@ static void reset_ts(OptContext *ctx, TCGTemp *ts) pi->next_copy = ti->next_copy; ti->next_copy = ts; ti->prev_copy = ts; - ti->is_const = false; ti->z_mask = -1; + ti->o_mask = 0; ti->s_mask = 0; if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) { @@ -371,6 +342,18 @@ static TCGArg arg_new_temp(OptContext *ctx) return temp_arg(ts); } +static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op, + TCGOpcode opc, unsigned narg) +{ + return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg); +} + +static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op, + TCGOpcode opc, unsigned narg) +{ + return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg); +} + static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); @@ -390,15 +373,13 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) switch (ctx->type) { case TCG_TYPE_I32: - new_op = INDEX_op_mov_i32; - break; case TCG_TYPE_I64: - new_op = INDEX_op_mov_i64; + new_op = INDEX_op_mov; break; case TCG_TYPE_V64: case TCG_TYPE_V128: case TCG_TYPE_V256: - /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ + /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */ new_op = INDEX_op_mov_vec; break; default: @@ -409,6 +390,7 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) op->args[1] = src; di->z_mask = si->z_mask; + di->o_mask = si->o_mask; di->s_mask = si->s_mask; if (src_ts->type == dst_ts->type) { @@ -418,13 +400,19 @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) di->prev_copy = src_ts; ni->prev_copy = dst_ts; si->next_copy = dst_ts; - di->is_const = si->is_const; - di->val = si->val; if (!QSIMPLEQ_EMPTY(&si->mem_copy) && cmp_better_copy(src_ts, dst_ts) == dst_ts) { move_mem_copies(dst_ts, src_ts); } + } else if (dst_ts->type == TCG_TYPE_I32) { + di->z_mask = (int32_t)di->z_mask; + di->o_mask = (int32_t)di->o_mask; + di->s_mask |= INT32_MIN; + } else { + di->z_mask |= MAKE_64BIT_MASK(32, 32); + di->o_mask = (uint32_t)di->o_mask; + di->s_mask = INT64_MIN; } return true; } @@ -436,162 +424,163 @@ static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val)); } -static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) +static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type, + uint64_t x, uint64_t y) { uint64_t l64, h64; switch (op) { - CASE_OP_32_64(add): + case INDEX_op_add: return x + y; - CASE_OP_32_64(sub): + case INDEX_op_sub: return x - y; - CASE_OP_32_64(mul): + case INDEX_op_mul: return x * y; - CASE_OP_32_64_VEC(and): + case INDEX_op_and: + case INDEX_op_and_vec: return x & y; - CASE_OP_32_64_VEC(or): + case INDEX_op_or: + case INDEX_op_or_vec: return x | y; - CASE_OP_32_64_VEC(xor): + case INDEX_op_xor: + case INDEX_op_xor_vec: return x ^ y; - case INDEX_op_shl_i32: - return (uint32_t)x << (y & 31); - - case INDEX_op_shl_i64: + case INDEX_op_shl: + if (type == TCG_TYPE_I32) { + return (uint32_t)x << (y & 31); + } return (uint64_t)x << (y & 63); - case INDEX_op_shr_i32: - return (uint32_t)x >> (y & 31); - - case INDEX_op_shr_i64: + case INDEX_op_shr: + if (type == TCG_TYPE_I32) { + return (uint32_t)x >> (y & 31); + } return (uint64_t)x >> (y & 63); - case INDEX_op_sar_i32: - return (int32_t)x >> (y & 31); - - case INDEX_op_sar_i64: + case INDEX_op_sar: + if (type == TCG_TYPE_I32) { + return (int32_t)x >> (y & 31); + } return (int64_t)x >> (y & 63); - case INDEX_op_rotr_i32: - return ror32(x, y & 31); - - case INDEX_op_rotr_i64: + case INDEX_op_rotr: + if (type == TCG_TYPE_I32) { + return ror32(x, y & 31); + } return ror64(x, y & 63); - case INDEX_op_rotl_i32: - return rol32(x, y & 31); - - case INDEX_op_rotl_i64: + case INDEX_op_rotl: + if (type == TCG_TYPE_I32) { + return rol32(x, y & 31); + } return rol64(x, y & 63); - CASE_OP_32_64_VEC(not): + case INDEX_op_not: + case INDEX_op_not_vec: return ~x; - CASE_OP_32_64(neg): + case INDEX_op_neg: return -x; - CASE_OP_32_64_VEC(andc): + case INDEX_op_andc: + case INDEX_op_andc_vec: return x & ~y; - CASE_OP_32_64_VEC(orc): + case INDEX_op_orc: + case INDEX_op_orc_vec: return x | ~y; - CASE_OP_32_64_VEC(eqv): + case INDEX_op_eqv: + case INDEX_op_eqv_vec: return ~(x ^ y); - CASE_OP_32_64_VEC(nand): + case INDEX_op_nand: + case INDEX_op_nand_vec: return ~(x & y); - CASE_OP_32_64_VEC(nor): + case INDEX_op_nor: + case INDEX_op_nor_vec: return ~(x | y); - case INDEX_op_clz_i32: - return (uint32_t)x ? clz32(x) : y; - - case INDEX_op_clz_i64: + case INDEX_op_clz: + if (type == TCG_TYPE_I32) { + return (uint32_t)x ? clz32(x) : y; + } return x ? clz64(x) : y; - case INDEX_op_ctz_i32: - return (uint32_t)x ? ctz32(x) : y; - - case INDEX_op_ctz_i64: + case INDEX_op_ctz: + if (type == TCG_TYPE_I32) { + return (uint32_t)x ? ctz32(x) : y; + } return x ? ctz64(x) : y; - case INDEX_op_ctpop_i32: - return ctpop32(x); - - case INDEX_op_ctpop_i64: - return ctpop64(x); - - CASE_OP_32_64(ext8s): - return (int8_t)x; - - CASE_OP_32_64(ext16s): - return (int16_t)x; - - CASE_OP_32_64(ext8u): - return (uint8_t)x; + case INDEX_op_ctpop: + return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x); - CASE_OP_32_64(ext16u): - return (uint16_t)x; - - CASE_OP_32_64(bswap16): + case INDEX_op_bswap16: x = bswap16(x); return y & TCG_BSWAP_OS ? (int16_t)x : x; - CASE_OP_32_64(bswap32): + case INDEX_op_bswap32: x = bswap32(x); return y & TCG_BSWAP_OS ? (int32_t)x : x; - case INDEX_op_bswap64_i64: + case INDEX_op_bswap64: return bswap64(x); case INDEX_op_ext_i32_i64: - case INDEX_op_ext32s_i64: return (int32_t)x; case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: - case INDEX_op_ext32u_i64: return (uint32_t)x; case INDEX_op_extrh_i64_i32: return (uint64_t)x >> 32; - case INDEX_op_muluh_i32: - return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; - case INDEX_op_mulsh_i32: - return ((int64_t)(int32_t)x * (int32_t)y) >> 32; - - case INDEX_op_muluh_i64: + case INDEX_op_muluh: + if (type == TCG_TYPE_I32) { + return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; + } mulu64(&l64, &h64, x, y); return h64; - case INDEX_op_mulsh_i64: + + case INDEX_op_mulsh: + if (type == TCG_TYPE_I32) { + return ((int64_t)(int32_t)x * (int32_t)y) >> 32; + } muls64(&l64, &h64, x, y); return h64; - case INDEX_op_div_i32: + case INDEX_op_divs: /* Avoid crashing on divide by zero, otherwise undefined. */ - return (int32_t)x / ((int32_t)y ? : 1); - case INDEX_op_divu_i32: - return (uint32_t)x / ((uint32_t)y ? : 1); - case INDEX_op_div_i64: + if (type == TCG_TYPE_I32) { + return (int32_t)x / ((int32_t)y ? : 1); + } return (int64_t)x / ((int64_t)y ? : 1); - case INDEX_op_divu_i64: + + case INDEX_op_divu: + if (type == TCG_TYPE_I32) { + return (uint32_t)x / ((uint32_t)y ? : 1); + } return (uint64_t)x / ((uint64_t)y ? : 1); - case INDEX_op_rem_i32: - return (int32_t)x % ((int32_t)y ? : 1); - case INDEX_op_remu_i32: - return (uint32_t)x % ((uint32_t)y ? : 1); - case INDEX_op_rem_i64: + case INDEX_op_rems: + if (type == TCG_TYPE_I32) { + return (int32_t)x % ((int32_t)y ? : 1); + } return (int64_t)x % ((int64_t)y ? : 1); - case INDEX_op_remu_i64: + + case INDEX_op_remu: + if (type == TCG_TYPE_I32) { + return (uint32_t)x % ((uint32_t)y ? : 1); + } return (uint64_t)x % ((uint64_t)y ? : 1); default: @@ -602,7 +591,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) static uint64_t do_constant_folding(TCGOpcode op, TCGType type, uint64_t x, uint64_t y) { - uint64_t res = do_constant_folding_2(op, x, y); + uint64_t res = do_constant_folding_2(op, type, x, y); if (type == TCG_TYPE_I32) { res = (int32_t)res; } @@ -710,8 +699,8 @@ static int do_constant_folding_cond(TCGType type, TCGArg x, TCGArg y, TCGCond c) { if (arg_is_const(x) && arg_is_const(y)) { - uint64_t xv = arg_info(x)->val; - uint64_t yv = arg_info(y)->val; + uint64_t xv = arg_const_val(x); + uint64_t yv = arg_const_val(y); switch (type) { case TCG_TYPE_I32: @@ -752,12 +741,18 @@ static int do_constant_folding_cond(TCGType type, TCGArg x, #define NO_DEST temp_arg(NULL) +static int pref_commutative(TempOptInfo *ti) +{ + /* Slight preference for non-zero constants second. */ + return !ti_is_const(ti) ? 0 : ti_const_val(ti) ? 3 : 2; +} + static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) { TCGArg a1 = *p1, a2 = *p2; int sum = 0; - sum += arg_is_const(a1); - sum -= arg_is_const(a2); + sum += pref_commutative(arg_info(a1)); + sum -= pref_commutative(arg_info(a2)); /* Prefer the constant in second argument, and then the form op a, a, b, which is better handled on non-RISC hosts. */ @@ -772,10 +767,10 @@ static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) static bool swap_commutative2(TCGArg *p1, TCGArg *p2) { int sum = 0; - sum += arg_is_const(p1[0]); - sum += arg_is_const(p1[1]); - sum -= arg_is_const(p2[0]); - sum -= arg_is_const(p2[1]); + sum += pref_commutative(arg_info(p1[0])); + sum += pref_commutative(arg_info(p1[1])); + sum -= pref_commutative(arg_info(p2[0])); + sum -= pref_commutative(arg_info(p2[1])); if (sum > 0) { TCGArg t; t = p1[0], p1[0] = p2[0], p2[0] = t; @@ -789,10 +784,12 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) * Return -1 if the condition can't be simplified, * and the result of the condition (0 or 1) if it can. */ +static bool fold_and(OptContext *ctx, TCGOp *op); static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest, TCGArg *p1, TCGArg *p2, TCGArg *pcond) { TCGCond cond; + TempOptInfo *i1; bool swap; int r; @@ -810,19 +807,21 @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest, return -1; } + i1 = arg_info(*p1); + /* * TSTNE x,x -> NE x,0 - * TSTNE x,-1 -> NE x,0 + * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x */ - if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) { + if (args_are_copies(*p1, *p2) || + (arg_is_const(*p2) && (i1->z_mask & ~arg_const_val(*p2)) == 0)) { *p2 = arg_new_constant(ctx, 0); *pcond = tcg_tst_eqne_cond(cond); return -1; } - /* TSTNE x,sign -> LT x,0 */ - if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32 - ? INT32_MIN : INT64_MIN))) { + /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */ + if (arg_is_const(*p2) && (arg_const_val(*p2) & ~i1->s_mask) == 0) { *p2 = arg_new_constant(ctx, 0); *pcond = tcg_tst_ltge_cond(cond); return -1; @@ -830,14 +829,13 @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest, /* Expand to AND with a temporary if no backend support. */ if (!TCG_TARGET_HAS_tst) { - TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32 - ? INDEX_op_and_i32 : INDEX_op_and_i64); - TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3); + TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3); TCGArg tmp = arg_new_temp(ctx); op2->args[0] = tmp; op2->args[1] = *p1; op2->args[2] = *p2; + fold_and(ctx, op2); *p1 = tmp; *p2 = arg_new_constant(ctx, 0); @@ -865,13 +863,13 @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args) bh = args[3]; if (arg_is_const(bl) && arg_is_const(bh)) { - tcg_target_ulong blv = arg_info(bl)->val; - tcg_target_ulong bhv = arg_info(bh)->val; + tcg_target_ulong blv = arg_const_val(bl); + tcg_target_ulong bhv = arg_const_val(bh); uint64_t b = deposit64(blv, 32, 32, bhv); if (arg_is_const(al) && arg_is_const(ah)) { - tcg_target_ulong alv = arg_info(al)->val; - tcg_target_ulong ahv = arg_info(ah)->val; + tcg_target_ulong alv = arg_const_val(al); + tcg_target_ulong ahv = arg_const_val(ah); uint64_t a = deposit64(alv, 32, 32, ahv); r = do_constant_folding_cond_64(a, b, c); @@ -925,17 +923,20 @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args) /* Expand to AND with a temporary if no backend support. */ if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) { - TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); - TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); + TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3); + TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3); TCGArg t1 = arg_new_temp(ctx); TCGArg t2 = arg_new_temp(ctx); op1->args[0] = t1; op1->args[1] = al; op1->args[2] = bl; + fold_and(ctx, op1); + op2->args[0] = t2; op2->args[1] = ah; op2->args[2] = bh; + fold_and(ctx, op1); args[0] = t1; args[1] = t2; @@ -964,37 +965,31 @@ static void copy_propagate(OptContext *ctx, TCGOp *op, } } -static void finish_folding(OptContext *ctx, TCGOp *op) +static void finish_bb(OptContext *ctx) +{ + /* We only optimize memory barriers across basic blocks. */ + ctx->prev_mb = NULL; +} + +static void finish_ebb(OptContext *ctx) +{ + finish_bb(ctx); + /* We only optimize across extended basic blocks. */ + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); + remove_mem_copy_all(ctx); +} + +static bool finish_folding(OptContext *ctx, TCGOp *op) { const TCGOpDef *def = &tcg_op_defs[op->opc]; int i, nb_oargs; - /* - * We only optimize extended basic blocks. If the opcode ends a BB - * and is not a conditional branch, reset all temp data. - */ - if (def->flags & TCG_OPF_BB_END) { - ctx->prev_mb = NULL; - if (!(def->flags & TCG_OPF_COND_BRANCH)) { - memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); - remove_mem_copy_all(ctx); - } - return; - } - nb_oargs = def->nb_oargs; for (i = 0; i < nb_oargs; i++) { TCGTemp *ts = arg_temp(op->args[i]); reset_ts(ctx, ts); - /* - * Save the corresponding known-zero/sign bits mask for the - * first output argument (only one supported so far). - */ - if (i == 0) { - ts_info(ts)->z_mask = ctx->z_mask; - ts_info(ts)->s_mask = ctx->s_mask; - } } + return true; } /* @@ -1011,9 +1006,8 @@ static void finish_folding(OptContext *ctx, TCGOp *op) static bool fold_const1(OptContext *ctx, TCGOp *op) { if (arg_is_const(op->args[1])) { - uint64_t t; + uint64_t t = arg_const_val(op->args[1]); - t = arg_info(op->args[1])->val; t = do_constant_folding(op->opc, ctx->type, t, 0); return tcg_opt_gen_movi(ctx, op, op->args[0], t); } @@ -1023,8 +1017,8 @@ static bool fold_const1(OptContext *ctx, TCGOp *op) static bool fold_const2(OptContext *ctx, TCGOp *op) { if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t t1 = arg_info(op->args[1])->val; - uint64_t t2 = arg_info(op->args[2])->val; + uint64_t t1 = arg_const_val(op->args[1]); + uint64_t t2 = arg_const_val(op->args[2]); t1 = do_constant_folding(op->opc, ctx->type, t1, t2); return tcg_opt_gen_movi(ctx, op, op->args[0], t1); @@ -1044,11 +1038,23 @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) return fold_const2(ctx, op); } -static bool fold_masks(OptContext *ctx, TCGOp *op) +/* + * Record "zero" and "sign" masks for the single output of @op. + * See TempOptInfo definition of z_mask and s_mask. + * If z_mask allows, fold the output to constant zero. + * The passed s_mask may be augmented by z_mask. + */ +static bool fold_masks_zosa_int(OptContext *ctx, TCGOp *op, + uint64_t z_mask, uint64_t o_mask, + int64_t s_mask, uint64_t a_mask) { - uint64_t a_mask = ctx->a_mask; - uint64_t z_mask = ctx->z_mask; - uint64_t s_mask = ctx->s_mask; + const TCGOpDef *def = &tcg_op_defs[op->opc]; + TCGTemp *ts; + TempOptInfo *ti; + int rep; + + /* Only single-output opcodes are supported here. */ + tcg_debug_assert(def->nb_oargs == 1); /* * 32-bit ops generate 32-bit results, which for the purpose of @@ -1058,22 +1064,76 @@ static bool fold_masks(OptContext *ctx, TCGOp *op) * type changing opcodes. */ if (ctx->type == TCG_TYPE_I32) { - a_mask = (int32_t)a_mask; z_mask = (int32_t)z_mask; - s_mask |= MAKE_64BIT_MASK(32, 32); - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; + o_mask = (int32_t)o_mask; + s_mask |= INT32_MIN; + a_mask = (uint32_t)a_mask; } - if (z_mask == 0) { - return tcg_opt_gen_movi(ctx, op, op->args[0], 0); + /* Bits that are known 1 and bits that are known 0 must not overlap. */ + tcg_debug_assert((o_mask & ~z_mask) == 0); + + /* All bits that are not known zero are known one is a constant. */ + if (z_mask == o_mask) { + return tcg_opt_gen_movi(ctx, op, op->args[0], o_mask); } + + /* If no bits are affected, the operation devolves to a copy. */ if (a_mask == 0) { return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); } + + ts = arg_temp(op->args[0]); + reset_ts(ctx, ts); + + ti = ts_info(ts); + ti->z_mask = z_mask; + + /* Canonicalize s_mask and incorporate data from z_mask. */ + rep = clz64(~s_mask); + rep = MAX(rep, clz64(z_mask)); + rep = MAX(rep, clz64(~o_mask)); + rep = MAX(rep - 1, 0); + ti->s_mask = INT64_MIN >> rep; + return false; } +static bool fold_masks_zosa(OptContext *ctx, TCGOp *op, uint64_t z_mask, + uint64_t o_mask, int64_t s_mask, uint64_t a_mask) +{ + fold_masks_zosa_int(ctx, op, z_mask, o_mask, s_mask, -1); + return true; +} + +static bool fold_masks_zos(OptContext *ctx, TCGOp *op, + uint64_t z_mask, uint64_t o_mask, uint64_t s_mask) +{ + return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, -1); +} + +static bool fold_masks_zo(OptContext *ctx, TCGOp *op, + uint64_t z_mask, uint64_t o_mask) +{ + return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, -1); +} + +static bool fold_masks_zs(OptContext *ctx, TCGOp *op, + uint64_t z_mask, uint64_t s_mask) +{ + return fold_masks_zosa(ctx, op, z_mask, 0, s_mask, -1); +} + +static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask) +{ + return fold_masks_zosa(ctx, op, z_mask, 0, 0, -1); +} + +static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask) +{ + return fold_masks_zosa(ctx, op, -1, 0, s_mask, -1); +} + /* * Convert @op to NOT, if NOT is supported by the host. * Return true f the conversion is successful, which will still @@ -1087,12 +1147,9 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) switch (ctx->type) { case TCG_TYPE_I32: - not_op = INDEX_op_not_i32; - have_not = TCG_TARGET_HAS_not_i32; - break; case TCG_TYPE_I64: - not_op = INDEX_op_not_i64; - have_not = TCG_TARGET_HAS_not_i64; + not_op = INDEX_op_not; + have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0); break; case TCG_TYPE_V64: case TCG_TYPE_V128: @@ -1183,13 +1240,19 @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) * 3) those that produce information about the result value. */ +static bool fold_addco(OptContext *ctx, TCGOp *op); +static bool fold_or(OptContext *ctx, TCGOp *op); +static bool fold_orc(OptContext *ctx, TCGOp *op); +static bool fold_subbo(OptContext *ctx, TCGOp *op); +static bool fold_xor(OptContext *ctx, TCGOp *op); + static bool fold_add(OptContext *ctx, TCGOp *op) { if (fold_const2_commutative(ctx, op) || fold_xi_to_x(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } /* We cannot as yet do_constant_folding with vectors. */ @@ -1199,145 +1262,319 @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } -static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) +static void squash_prev_carryout(OptContext *ctx, TCGOp *op) { - bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]); - bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]); + TempOptInfo *t2; - if (a_const && b_const) { - uint64_t al = arg_info(op->args[2])->val; - uint64_t ah = arg_info(op->args[3])->val; - uint64_t bl = arg_info(op->args[4])->val; - uint64_t bh = arg_info(op->args[5])->val; - TCGArg rl, rh; - TCGOp *op2; + op = QTAILQ_PREV(op, link); + switch (op->opc) { + case INDEX_op_addco: + op->opc = INDEX_op_add; + fold_add(ctx, op); + break; + case INDEX_op_addcio: + op->opc = INDEX_op_addci; + break; + case INDEX_op_addc1o: + op->opc = INDEX_op_add; + t2 = arg_info(op->args[2]); + if (ti_is_const(t2)) { + op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1); + /* Perform other constant folding, if needed. */ + fold_add(ctx, op); + } else { + TCGArg ret = op->args[0]; + op = opt_insert_after(ctx, op, INDEX_op_add, 3); + op->args[0] = ret; + op->args[1] = ret; + op->args[2] = arg_new_constant(ctx, 1); + } + break; + default: + g_assert_not_reached(); + } +} - if (ctx->type == TCG_TYPE_I32) { - uint64_t a = deposit64(al, 32, 32, ah); - uint64_t b = deposit64(bl, 32, 32, bh); +static bool fold_addci(OptContext *ctx, TCGOp *op) +{ + fold_commutative(ctx, op); - if (add) { - a += b; - } else { - a -= b; - } + if (ctx->carry_state < 0) { + return finish_folding(ctx, op); + } + + squash_prev_carryout(ctx, op); + op->opc = INDEX_op_add; + + if (ctx->carry_state > 0) { + TempOptInfo *t2 = arg_info(op->args[2]); - al = sextract64(a, 0, 32); - ah = sextract64(a, 32, 32); + /* + * Propagate the known carry-in into a constant, if possible. + * Otherwise emit a second add +1. + */ + if (ti_is_const(t2)) { + op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1); } else { - Int128 a = int128_make128(al, ah); - Int128 b = int128_make128(bl, bh); + TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_add, 3); - if (add) { - a = int128_add(a, b); - } else { - a = int128_sub(a, b); - } + op2->args[0] = op->args[0]; + op2->args[1] = op->args[1]; + op2->args[2] = op->args[2]; + fold_add(ctx, op2); - al = int128_getlo(a); - ah = int128_gethi(a); + op->args[1] = op->args[0]; + op->args[2] = arg_new_constant(ctx, 1); } + } - rl = op->args[0]; - rh = op->args[1]; + ctx->carry_state = -1; + return fold_add(ctx, op); +} - /* The proper opcode is supplied by tcg_opt_gen_mov. */ - op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); +static bool fold_addcio(OptContext *ctx, TCGOp *op) +{ + TempOptInfo *t1, *t2; + int carry_out = -1; + uint64_t sum, max; - tcg_opt_gen_movi(ctx, op, rl, al); - tcg_opt_gen_movi(ctx, op2, rh, ah); - return true; + fold_commutative(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + /* + * The z_mask value is >= the maximum value that can be represented + * with the known zero bits. So adding the z_mask values will not + * overflow if and only if the true values cannot overflow. + */ + if (!uadd64_overflow(t1->z_mask, t2->z_mask, &sum) && + !uadd64_overflow(sum, ctx->carry_state != 0, &sum)) { + carry_out = 0; } - /* Fold sub2 r,x,i to add2 r,x,-i */ - if (!add && b_const) { - uint64_t bl = arg_info(op->args[4])->val; - uint64_t bh = arg_info(op->args[5])->val; + if (ctx->carry_state < 0) { + ctx->carry_state = carry_out; + return finish_folding(ctx, op); + } - /* Negate the two parts without assembling and disassembling. */ - bl = -bl; - bh = ~bh + !bl; + squash_prev_carryout(ctx, op); + if (ctx->carry_state == 0) { + goto do_addco; + } - op->opc = (ctx->type == TCG_TYPE_I32 - ? INDEX_op_add2_i32 : INDEX_op_add2_i64); - op->args[4] = arg_new_constant(ctx, bl); - op->args[5] = arg_new_constant(ctx, bh); + /* Propagate the known carry-in into a constant, if possible. */ + max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX; + if (ti_is_const(t2)) { + uint64_t v = ti_const_val(t2) & max; + if (v < max) { + op->args[2] = arg_new_constant(ctx, v + 1); + goto do_addco; + } + /* max + known carry in produces known carry out. */ + carry_out = 1; + } + if (ti_is_const(t1)) { + uint64_t v = ti_const_val(t1) & max; + if (v < max) { + op->args[1] = arg_new_constant(ctx, v + 1); + goto do_addco; + } + carry_out = 1; } - return false; + + /* Adjust the opcode to remember the known carry-in. */ + op->opc = INDEX_op_addc1o; + ctx->carry_state = carry_out; + return finish_folding(ctx, op); + + do_addco: + op->opc = INDEX_op_addco; + return fold_addco(ctx, op); } -static bool fold_add2(OptContext *ctx, TCGOp *op) +static bool fold_addco(OptContext *ctx, TCGOp *op) { - /* Note that the high and low parts may be independently swapped. */ - swap_commutative(op->args[0], &op->args[2], &op->args[4]); - swap_commutative(op->args[1], &op->args[3], &op->args[5]); + TempOptInfo *t1, *t2; + int carry_out = -1; + uint64_t ign; + + fold_commutative(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); - return fold_addsub2(ctx, op, true); + if (ti_is_const(t2)) { + uint64_t v2 = ti_const_val(t2); + + if (ti_is_const(t1)) { + uint64_t v1 = ti_const_val(t1); + /* Given sign-extension of z_mask for I32, we need not truncate. */ + carry_out = uadd64_overflow(v1, v2, &ign); + } else if (v2 == 0) { + carry_out = 0; + } + } else { + /* + * The z_mask value is >= the maximum value that can be represented + * with the known zero bits. So adding the z_mask values will not + * overflow if and only if the true values cannot overflow. + */ + if (!uadd64_overflow(t1->z_mask, t2->z_mask, &ign)) { + carry_out = 0; + } + } + ctx->carry_state = carry_out; + return finish_folding(ctx, op); } static bool fold_and(OptContext *ctx, TCGOp *op) { - uint64_t z1, z2; + uint64_t z_mask, o_mask, s_mask, a_mask; + TempOptInfo *t1, *t2; - if (fold_const2_commutative(ctx, op) || - fold_xi_to_i(ctx, op, 0) || - fold_xi_to_x(ctx, op, -1) || - fold_xx_to_x(ctx, op)) { + if (fold_const2_commutative(ctx, op)) { return true; } - z1 = arg_info(op->args[1])->z_mask; - z2 = arg_info(op->args[2])->z_mask; - ctx->z_mask = z1 & z2; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + z_mask = t1->z_mask & t2->z_mask; + o_mask = t1->o_mask & t2->o_mask; /* * Sign repetitions are perforce all identical, whether they are 1 or 0. * Bitwise operations preserve the relative quantity of the repetitions. */ - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - - /* - * Known-zeros does not imply known-ones. Therefore unless - * arg2 is constant, we can't infer affected bits from it. - */ - if (arg_is_const(op->args[2])) { - ctx->a_mask = z1 & ~z2; + s_mask = t1->s_mask & t2->s_mask; + + /* Affected bits are those not known zero, masked by those known one. */ + a_mask = t1->z_mask & ~t2->o_mask; + + if (!fold_masks_zosa_int(ctx, op, z_mask, o_mask, s_mask, a_mask)) { + if (op->opc == INDEX_op_and && ti_is_const(t2)) { + /* + * Canonicalize on extract, if valid. This aids x86 with its + * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode + * which does not require matching operands. Other backends can + * trivially expand the extract to AND during code generation. + */ + uint64_t val = ti_const_val(t2); + if (!(val & (val + 1))) { + unsigned len = ctz64(~val); + if (TCG_TARGET_extract_valid(ctx->type, 0, len)) { + op->opc = INDEX_op_extract; + op->args[2] = 0; + op->args[3] = len; + } + } + } else { + fold_xx_to_x(ctx, op); + } } - - return fold_masks(ctx, op); + return true; } static bool fold_andc(OptContext *ctx, TCGOp *op) { - uint64_t z1; + uint64_t z_mask, o_mask, s_mask, a_mask; + TempOptInfo *t1, *t2; - if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, 0) || - fold_xi_to_x(ctx, op, 0) || + if (fold_const2(ctx, op)) { + return true; + } + + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + if (ti_is_const(t2)) { + /* Fold andc r,x,i to and r,x,~i. */ + switch (ctx->type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + op->opc = INDEX_op_and; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + op->opc = INDEX_op_and_vec; + break; + default: + g_assert_not_reached(); + } + op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2)); + return fold_and(ctx, op); + } + if (fold_xx_to_i(ctx, op, 0) || fold_ix_to_not(ctx, op, -1)) { return true; } - z1 = arg_info(op->args[1])->z_mask; + z_mask = t1->z_mask & ~t2->o_mask; + o_mask = t1->o_mask & ~t2->z_mask; + s_mask = t1->s_mask & t2->s_mask; - /* - * Known-zeros does not imply known-ones. Therefore unless - * arg2 is constant, we can't infer anything from it. - */ - if (arg_is_const(op->args[2])) { - uint64_t z2 = ~arg_info(op->args[2])->z_mask; - ctx->a_mask = z1 & ~z2; - z1 &= z2; + /* Affected bits are those not known zero, masked by those known zero. */ + a_mask = t1->z_mask & t2->z_mask; + + return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask); +} + +static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) +{ + /* If true and false values are the same, eliminate the cmp. */ + if (args_are_copies(op->args[2], op->args[3])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); } - ctx->z_mask = z1; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint64_t tv = arg_const_val(op->args[2]); + uint64_t fv = arg_const_val(op->args[3]); + + if (tv == -1 && fv == 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + if (tv == 0 && fv == -1) { + if (TCG_TARGET_HAS_not_vec) { + op->opc = INDEX_op_not_vec; + return fold_not(ctx, op); + } else { + op->opc = INDEX_op_xor_vec; + op->args[2] = arg_new_constant(ctx, -1); + return fold_xor(ctx, op); + } + } + } + if (arg_is_const(op->args[2])) { + uint64_t tv = arg_const_val(op->args[2]); + if (tv == -1) { + op->opc = INDEX_op_or_vec; + op->args[2] = op->args[3]; + return fold_or(ctx, op); + } + if (tv == 0 && TCG_TARGET_HAS_andc_vec) { + op->opc = INDEX_op_andc_vec; + op->args[2] = op->args[1]; + op->args[1] = op->args[3]; + return fold_andc(ctx, op); + } + } + if (arg_is_const(op->args[3])) { + uint64_t fv = arg_const_val(op->args[3]); + if (fv == 0) { + op->opc = INDEX_op_and_vec; + return fold_and(ctx, op); + } + if (fv == -1 && TCG_TARGET_HAS_orc_vec) { + op->opc = INDEX_op_orc_vec; + op->args[2] = op->args[1]; + op->args[1] = op->args[3]; + return fold_orc(ctx, op); + } + } + return finish_folding(ctx, op); } static bool fold_brcond(OptContext *ctx, TCGOp *op) @@ -1351,8 +1588,11 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op) if (i > 0) { op->opc = INDEX_op_br; op->args[0] = op->args[3]; + finish_ebb(ctx); + } else { + finish_bb(ctx); } - return false; + return true; } static bool fold_brcond2(OptContext *ctx, TCGOp *op) @@ -1422,14 +1662,14 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) break; do_brcond_low: - op->opc = INDEX_op_brcond_i32; + op->opc = INDEX_op_brcond; op->args[1] = op->args[2]; op->args[2] = cond; op->args[3] = label; return fold_brcond(ctx, op); do_brcond_high: - op->opc = INDEX_op_brcond_i32; + op->opc = INDEX_op_brcond; op->args[0] = op->args[1]; op->args[1] = op->args[3]; op->args[2] = cond; @@ -1443,64 +1683,62 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op) } op->opc = INDEX_op_br; op->args[0] = label; - break; + finish_ebb(ctx); + return true; } - return false; + + finish_bb(ctx); + return true; } static bool fold_bswap(OptContext *ctx, TCGOp *op) { - uint64_t z_mask, s_mask, sign; - - if (arg_is_const(op->args[1])) { - uint64_t t = arg_info(op->args[1])->val; + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1 = arg_info(op->args[1]); + int flags = op->args[2]; - t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + do_constant_folding(op->opc, ctx->type, + ti_const_val(t1), flags)); } - z_mask = arg_info(op->args[1])->z_mask; + z_mask = t1->z_mask; + o_mask = t1->o_mask; + s_mask = 0; switch (op->opc) { - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: + case INDEX_op_bswap16: z_mask = bswap16(z_mask); - sign = INT16_MIN; + o_mask = bswap16(o_mask); + if (flags & TCG_BSWAP_OS) { + z_mask = (int16_t)z_mask; + o_mask = (int16_t)o_mask; + s_mask = INT16_MIN; + } else if (!(flags & TCG_BSWAP_OZ)) { + z_mask |= MAKE_64BIT_MASK(16, 48); + } break; - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: + case INDEX_op_bswap32: z_mask = bswap32(z_mask); - sign = INT32_MIN; + o_mask = bswap32(o_mask); + if (flags & TCG_BSWAP_OS) { + z_mask = (int32_t)z_mask; + o_mask = (int32_t)o_mask; + s_mask = INT32_MIN; + } else if (!(flags & TCG_BSWAP_OZ)) { + z_mask |= MAKE_64BIT_MASK(32, 32); + } break; - case INDEX_op_bswap64_i64: + case INDEX_op_bswap64: z_mask = bswap64(z_mask); - sign = INT64_MIN; + o_mask = bswap64(o_mask); break; default: g_assert_not_reached(); } - s_mask = smask_from_zmask(z_mask); - - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { - case TCG_BSWAP_OZ: - break; - case TCG_BSWAP_OS: - /* If the sign bit may be 1, force all the bits above to 1. */ - if (z_mask & sign) { - z_mask |= sign; - s_mask = sign << 1; - } - break; - default: - /* The high bits are undefined: force all bits above the sign to 1. */ - z_mask |= sign << 1; - s_mask = 0; - break; - } - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; - return fold_masks(ctx, op); + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_call(OptContext *ctx, TCGOp *op) @@ -1540,12 +1778,44 @@ static bool fold_call(OptContext *ctx, TCGOp *op) return true; } +static bool fold_cmp_vec(OptContext *ctx, TCGOp *op) +{ + /* Canonicalize the comparison to put immediate second. */ + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[3] = tcg_swap_cond(op->args[3]); + } + return finish_folding(ctx, op); +} + +static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op) +{ + /* If true and false values are the same, eliminate the cmp. */ + if (args_are_copies(op->args[3], op->args[4])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); + } + + /* Canonicalize the comparison to put immediate second. */ + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[5] = tcg_swap_cond(op->args[5]); + } + /* + * Canonicalize the "false" input reg to match the destination, + * so that the tcg backend can implement "move if true". + */ + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { + op->args[5] = tcg_invert_cond(op->args[5]); + } + return finish_folding(ctx, op); +} + static bool fold_count_zeros(OptContext *ctx, TCGOp *op) { - uint64_t z_mask; + uint64_t z_mask, s_mask; + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); - if (arg_is_const(op->args[1])) { - uint64_t t = arg_info(op->args[1])->val; + if (ti_is_const(t1)) { + uint64_t t = ti_const_val(t1); if (t != 0) { t = do_constant_folding(op->opc, ctx->type, t, 0); @@ -1564,79 +1834,79 @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op) default: g_assert_not_reached(); } - ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; - ctx->s_mask = smask_from_zmask(ctx->z_mask); - return false; + s_mask = ~z_mask; + z_mask |= t2->z_mask; + s_mask &= t2->s_mask; + + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_ctpop(OptContext *ctx, TCGOp *op) { + uint64_t z_mask; + if (fold_const1(ctx, op)) { return true; } switch (ctx->type) { case TCG_TYPE_I32: - ctx->z_mask = 32 | 31; + z_mask = 32 | 31; break; case TCG_TYPE_I64: - ctx->z_mask = 64 | 63; + z_mask = 64 | 63; break; default: g_assert_not_reached(); } - ctx->s_mask = smask_from_zmask(ctx->z_mask); - return false; + return fold_masks_z(ctx, op, z_mask); } static bool fold_deposit(OptContext *ctx, TCGOp *op) { - TCGOpcode and_opc; + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); + int ofs = op->args[3]; + int len = op->args[4]; + int width = 8 * tcg_type_size(ctx->type); + uint64_t z_mask, o_mask, s_mask; - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t t1 = arg_info(op->args[1])->val; - uint64_t t2 = arg_info(op->args[2])->val; - - t1 = deposit64(t1, op->args[3], op->args[4], t2); - return tcg_opt_gen_movi(ctx, op, op->args[0], t1); - } - - switch (ctx->type) { - case TCG_TYPE_I32: - and_opc = INDEX_op_and_i32; - break; - case TCG_TYPE_I64: - and_opc = INDEX_op_and_i64; - break; - default: - g_assert_not_reached(); + if (ti_is_const(t1) && ti_is_const(t2)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + deposit64(ti_const_val(t1), ofs, len, + ti_const_val(t2))); } /* Inserting a value into zero at offset 0. */ - if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) { - uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]); + if (ti_is_const_val(t1, 0) && ofs == 0) { + uint64_t mask = MAKE_64BIT_MASK(0, len); - op->opc = and_opc; + op->opc = INDEX_op_and; op->args[1] = op->args[2]; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } /* Inserting zero into a value. */ - if (arg_is_const_val(op->args[2], 0)) { - uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0); + if (ti_is_const_val(t2, 0)) { + uint64_t mask = deposit64(-1, ofs, len, 0); - op->opc = and_opc; + op->opc = INDEX_op_and; op->args[2] = arg_new_constant(ctx, mask); - ctx->z_mask = mask & arg_info(op->args[1])->z_mask; - return false; + return fold_and(ctx, op); } - ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, - op->args[3], op->args[4], - arg_info(op->args[2])->z_mask); - return false; + /* The s_mask from the top portion of the deposit is still valid. */ + if (ofs + len == width) { + s_mask = t2->s_mask << ofs; + } else { + s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len); + } + + z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask); + o_mask = deposit64(t1->o_mask, ofs, len, t2->o_mask); + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_divide(OptContext *ctx, TCGOp *op) @@ -1645,24 +1915,24 @@ static bool fold_divide(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 1)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_dup(OptContext *ctx, TCGOp *op) { if (arg_is_const(op->args[1])) { - uint64_t t = arg_info(op->args[1])->val; + uint64_t t = arg_const_val(op->args[1]); t = dup_const(TCGOP_VECE(op), t); return tcg_opt_gen_movi(ctx, op, op->args[0], t); } - return false; + return finish_folding(ctx, op); } static bool fold_dup2(OptContext *ctx, TCGOp *op) { if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, - arg_info(op->args[2])->val); + uint64_t t = deposit64(arg_const_val(op->args[1]), 32, 32, + arg_const_val(op->args[2])); return tcg_opt_gen_movi(ctx, op, op->args[0], t); } @@ -1670,152 +1940,144 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) op->opc = INDEX_op_dup_vec; TCGOP_VECE(op) = MO_32; } - return false; + return finish_folding(ctx, op); } static bool fold_eqv(OptContext *ctx, TCGOp *op) { - if (fold_const2_commutative(ctx, op) || - fold_xi_to_x(ctx, op, -1) || - fold_xi_to_not(ctx, op, 0)) { + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1, *t2; + + if (fold_const2_commutative(ctx, op)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + t2 = arg_info(op->args[2]); + if (ti_is_const(t2)) { + /* Fold eqv r,x,i to xor r,x,~i. */ + switch (ctx->type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + op->opc = INDEX_op_xor; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + op->opc = INDEX_op_xor_vec; + break; + default: + g_assert_not_reached(); + } + op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2)); + return fold_xor(ctx, op); + } + + t1 = arg_info(op->args[1]); + + z_mask = (t1->z_mask | ~t2->o_mask) & (t2->z_mask | ~t1->o_mask); + o_mask = ~(t1->z_mask | t2->z_mask) | (t1->o_mask & t2->o_mask); + s_mask = t1->s_mask & t2->s_mask; + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_extract(OptContext *ctx, TCGOp *op) { - uint64_t z_mask_old, z_mask; + uint64_t z_mask, o_mask, a_mask; + TempOptInfo *t1 = arg_info(op->args[1]); int pos = op->args[2]; int len = op->args[3]; - if (arg_is_const(op->args[1])) { - uint64_t t; - - t = arg_info(op->args[1])->val; - t = extract64(t, pos, len); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + extract64(ti_const_val(t1), pos, len)); } - z_mask_old = arg_info(op->args[1])->z_mask; - z_mask = extract64(z_mask_old, pos, len); - if (pos == 0) { - ctx->a_mask = z_mask_old ^ z_mask; - } - ctx->z_mask = z_mask; - ctx->s_mask = smask_from_zmask(z_mask); + z_mask = extract64(t1->z_mask, pos, len); + o_mask = extract64(t1->o_mask, pos, len); + a_mask = pos ? -1 : t1->z_mask ^ z_mask; - return fold_masks(ctx, op); + return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, a_mask); } static bool fold_extract2(OptContext *ctx, TCGOp *op) { - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t v1 = arg_info(op->args[1])->val; - uint64_t v2 = arg_info(op->args[2])->val; - int shr = op->args[3]; + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); + uint64_t z1 = t1->z_mask; + uint64_t z2 = t2->z_mask; + uint64_t o1 = t1->o_mask; + uint64_t o2 = t2->o_mask; + int shr = op->args[3]; - if (op->opc == INDEX_op_extract2_i64) { - v1 >>= shr; - v2 <<= 64 - shr; - } else { - v1 = (uint32_t)v1 >> shr; - v2 = (uint64_t)((int32_t)v2 << (32 - shr)); - } - return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); + if (ctx->type == TCG_TYPE_I32) { + z1 = (uint32_t)z1 >> shr; + o1 = (uint32_t)o1 >> shr; + z2 = (uint64_t)((int32_t)z2 << (32 - shr)); + o2 = (uint64_t)((int32_t)o2 << (32 - shr)); + } else { + z1 >>= shr; + o1 >>= shr; + z2 <<= 64 - shr; + o2 <<= 64 - shr; } - return false; + + return fold_masks_zo(ctx, op, z1 | z2, o1 | o2); } static bool fold_exts(OptContext *ctx, TCGOp *op) { - uint64_t s_mask_old, s_mask, z_mask, sign; - bool type_change = false; + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1; if (fold_const1(ctx, op)) { return true; } - z_mask = arg_info(op->args[1])->z_mask; - s_mask = arg_info(op->args[1])->s_mask; - s_mask_old = s_mask; + t1 = arg_info(op->args[1]); + z_mask = t1->z_mask; + o_mask = t1->o_mask; + s_mask = t1->s_mask; switch (op->opc) { - CASE_OP_32_64(ext8s): - sign = INT8_MIN; - z_mask = (uint8_t)z_mask; - break; - CASE_OP_32_64(ext16s): - sign = INT16_MIN; - z_mask = (uint16_t)z_mask; - break; case INDEX_op_ext_i32_i64: - type_change = true; - QEMU_FALLTHROUGH; - case INDEX_op_ext32s_i64: - sign = INT32_MIN; - z_mask = (uint32_t)z_mask; + s_mask |= INT32_MIN; + z_mask = (int32_t)z_mask; + o_mask = (int32_t)o_mask; break; default: g_assert_not_reached(); } - - if (z_mask & sign) { - z_mask |= sign; - } - s_mask |= sign << 1; - - ctx->z_mask = z_mask; - ctx->s_mask = s_mask; - if (!type_change) { - ctx->a_mask = s_mask & ~s_mask_old; - } - - return fold_masks(ctx, op); + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_extu(OptContext *ctx, TCGOp *op) { - uint64_t z_mask_old, z_mask; - bool type_change = false; + uint64_t z_mask, o_mask; + TempOptInfo *t1; if (fold_const1(ctx, op)) { return true; } - z_mask_old = z_mask = arg_info(op->args[1])->z_mask; + t1 = arg_info(op->args[1]); + z_mask = t1->z_mask; + o_mask = t1->o_mask; switch (op->opc) { - CASE_OP_32_64(ext8u): - z_mask = (uint8_t)z_mask; - break; - CASE_OP_32_64(ext16u): - z_mask = (uint16_t)z_mask; - break; case INDEX_op_extrl_i64_i32: case INDEX_op_extu_i32_i64: - type_change = true; - QEMU_FALLTHROUGH; - case INDEX_op_ext32u_i64: z_mask = (uint32_t)z_mask; + o_mask = (uint32_t)o_mask; break; case INDEX_op_extrh_i64_i32: - type_change = true; z_mask >>= 32; + o_mask >>= 32; break; default: g_assert_not_reached(); } - - ctx->z_mask = z_mask; - ctx->s_mask = smask_from_zmask(z_mask); - if (!type_change) { - ctx->a_mask = z_mask_old ^ z_mask; - } - return fold_masks(ctx, op); + return fold_masks_zo(ctx, op, z_mask, o_mask); } static bool fold_mb(OptContext *ctx, TCGOp *op) @@ -1849,8 +2111,15 @@ static bool fold_mov(OptContext *ctx, TCGOp *op) static bool fold_movcond(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *tt, *ft; int i; + /* If true and false values are the same, eliminate the cmp. */ + if (args_are_copies(op->args[3], op->args[4])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]); + } + /* * Canonicalize the "false" input reg to match the destination reg so * that the tcg backend can implement a "move if true" operation. @@ -1865,53 +2134,33 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); } - ctx->z_mask = arg_info(op->args[3])->z_mask - | arg_info(op->args[4])->z_mask; - ctx->s_mask = arg_info(op->args[3])->s_mask - & arg_info(op->args[4])->s_mask; + tt = arg_info(op->args[3]); + ft = arg_info(op->args[4]); + z_mask = tt->z_mask | ft->z_mask; + o_mask = tt->o_mask & ft->o_mask; + s_mask = tt->s_mask & ft->s_mask; - if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { - uint64_t tv = arg_info(op->args[3])->val; - uint64_t fv = arg_info(op->args[4])->val; - TCGOpcode opc, negopc = 0; + if (ti_is_const(tt) && ti_is_const(ft)) { + uint64_t tv = ti_const_val(tt); + uint64_t fv = ti_const_val(ft); TCGCond cond = op->args[5]; - switch (ctx->type) { - case TCG_TYPE_I32: - opc = INDEX_op_setcond_i32; - if (TCG_TARGET_HAS_negsetcond_i32) { - negopc = INDEX_op_negsetcond_i32; - } - tv = (int32_t)tv; - fv = (int32_t)fv; - break; - case TCG_TYPE_I64: - opc = INDEX_op_setcond_i64; - if (TCG_TARGET_HAS_negsetcond_i64) { - negopc = INDEX_op_negsetcond_i64; - } - break; - default: - g_assert_not_reached(); - } - if (tv == 1 && fv == 0) { - op->opc = opc; + op->opc = INDEX_op_setcond; op->args[3] = cond; } else if (fv == 1 && tv == 0) { - op->opc = opc; + op->opc = INDEX_op_setcond; + op->args[3] = tcg_invert_cond(cond); + } else if (tv == -1 && fv == 0) { + op->opc = INDEX_op_negsetcond; + op->args[3] = cond; + } else if (fv == -1 && tv == 0) { + op->opc = INDEX_op_negsetcond; op->args[3] = tcg_invert_cond(cond); - } else if (negopc) { - if (tv == -1 && fv == 0) { - op->opc = negopc; - op->args[3] = cond; - } else if (fv == -1 && tv == 0) { - op->opc = negopc; - op->args[3] = tcg_invert_cond(cond); - } } } - return false; + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_mul(OptContext *ctx, TCGOp *op) @@ -1921,7 +2170,7 @@ static bool fold_mul(OptContext *ctx, TCGOp *op) fold_xi_to_x(ctx, op, 1)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) @@ -1930,7 +2179,7 @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) fold_xi_to_i(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_multiply2(OptContext *ctx, TCGOp *op) @@ -1938,28 +2187,30 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op) swap_commutative(op->args[0], &op->args[2], &op->args[3]); if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { - uint64_t a = arg_info(op->args[2])->val; - uint64_t b = arg_info(op->args[3])->val; + uint64_t a = arg_const_val(op->args[2]); + uint64_t b = arg_const_val(op->args[3]); uint64_t h, l; TCGArg rl, rh; TCGOp *op2; switch (op->opc) { - case INDEX_op_mulu2_i32: - l = (uint64_t)(uint32_t)a * (uint32_t)b; - h = (int32_t)(l >> 32); - l = (int32_t)l; - break; - case INDEX_op_muls2_i32: - l = (int64_t)(int32_t)a * (int32_t)b; - h = l >> 32; - l = (int32_t)l; - break; - case INDEX_op_mulu2_i64: - mulu64(&l, &h, a, b); + case INDEX_op_mulu2: + if (ctx->type == TCG_TYPE_I32) { + l = (uint64_t)(uint32_t)a * (uint32_t)b; + h = (int32_t)(l >> 32); + l = (int32_t)l; + } else { + mulu64(&l, &h, a, b); + } break; - case INDEX_op_muls2_i64: - muls64(&l, &h, a, b); + case INDEX_op_muls2: + if (ctx->type == TCG_TYPE_I32) { + l = (int64_t)(int32_t)a * (int32_t)b; + h = l >> 32; + l = (int32_t)l; + } else { + muls64(&l, &h, a, b); + } break; default: g_assert_not_reached(); @@ -1969,39 +2220,42 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op) rh = op->args[1]; /* The proper opcode is supplied by tcg_opt_gen_mov. */ - op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); + op2 = opt_insert_before(ctx, op, 0, 2); tcg_opt_gen_movi(ctx, op, rl, l); tcg_opt_gen_movi(ctx, op2, rh, h); return true; } - return false; + return finish_folding(ctx, op); } static bool fold_nand(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xi_to_not(ctx, op, -1)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + z_mask = ~(t1->o_mask & t2->o_mask); + o_mask = ~(t1->z_mask & t2->z_mask); + s_mask = t1->s_mask & t2->s_mask; + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_neg_no_const(OptContext *ctx, TCGOp *op) { /* Set to 1 all bits to the left of the rightmost. */ uint64_t z_mask = arg_info(op->args[1])->z_mask; - ctx->z_mask = -(z_mask & -z_mask); + z_mask = -(z_mask & -z_mask); - /* - * Because of fold_sub_to_neg, we want to always return true, - * via finish_folding. - */ - finish_folding(ctx, op); - return true; + return fold_masks_z(ctx, op, z_mask); } static bool fold_neg(OptContext *ctx, TCGOp *op) @@ -2011,83 +2265,138 @@ static bool fold_neg(OptContext *ctx, TCGOp *op) static bool fold_nor(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xi_to_not(ctx, op, 0)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + z_mask = ~(t1->o_mask | t2->o_mask); + o_mask = ~(t1->z_mask | t2->z_mask); + s_mask = t1->s_mask & t2->s_mask; + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } static bool fold_not(OptContext *ctx, TCGOp *op) { + TempOptInfo *t1; + if (fold_const1(ctx, op)) { return true; } - ctx->s_mask = arg_info(op->args[1])->s_mask; - - /* Because of fold_to_not, we want to always return true, via finish. */ - finish_folding(ctx, op); - return true; + t1 = arg_info(op->args[1]); + return fold_masks_zos(ctx, op, ~t1->o_mask, ~t1->z_mask, t1->s_mask); } static bool fold_or(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, o_mask, s_mask, a_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xi_to_x(ctx, op, 0) || fold_xx_to_x(ctx, op)) { return true; } - ctx->z_mask = arg_info(op->args[1])->z_mask - | arg_info(op->args[2])->z_mask; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + z_mask = t1->z_mask | t2->z_mask; + o_mask = t1->o_mask | t2->o_mask; + s_mask = t1->s_mask & t2->s_mask; + + /* Affected bits are those not known one, masked by those known zero. */ + a_mask = ~t1->o_mask & t2->z_mask; + + return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask); } static bool fold_orc(OptContext *ctx, TCGOp *op) { - if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, -1) || - fold_xi_to_x(ctx, op, -1) || + uint64_t z_mask, o_mask, s_mask, a_mask; + TempOptInfo *t1, *t2; + + if (fold_const2(ctx, op)) { + return true; + } + + t2 = arg_info(op->args[2]); + if (ti_is_const(t2)) { + /* Fold orc r,x,i to or r,x,~i. */ + switch (ctx->type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + op->opc = INDEX_op_or; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + op->opc = INDEX_op_or_vec; + break; + default: + g_assert_not_reached(); + } + op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2)); + return fold_or(ctx, op); + } + if (fold_xx_to_i(ctx, op, -1) || fold_ix_to_not(ctx, op, 0)) { return true; } + t1 = arg_info(op->args[1]); - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return false; + z_mask = t1->z_mask | ~t2->o_mask; + o_mask = t1->o_mask | ~t2->z_mask; + s_mask = t1->s_mask & t2->s_mask; + + /* Affected bits are those not known one, masked by those known one. */ + a_mask = ~t1->o_mask & t2->o_mask; + + return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask); } -static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) +static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op) { const TCGOpDef *def = &tcg_op_defs[op->opc]; MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; MemOp mop = get_memop(oi); int width = 8 * memop_size(mop); + uint64_t z_mask = -1, s_mask = 0; if (width < 64) { - ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); - if (!(mop & MO_SIGN)) { - ctx->z_mask = MAKE_64BIT_MASK(0, width); - ctx->s_mask <<= 1; + if (mop & MO_SIGN) { + s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1)); + } else { + z_mask = MAKE_64BIT_MASK(0, width); } } /* Opcodes that touch guest memory stop the mb optimization. */ ctx->prev_mb = NULL; - return false; + + return fold_masks_zs(ctx, op, z_mask, s_mask); +} + +static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op) +{ + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return finish_folding(ctx, op); } static bool fold_qemu_st(OptContext *ctx, TCGOp *op) { /* Opcodes that touch guest memory stop the mb optimization. */ ctx->prev_mb = NULL; - return false; + return true; } static bool fold_remainder(OptContext *ctx, TCGOp *op) @@ -2096,10 +2405,11 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op) fold_xx_to_i(ctx, op, 0)) { return true; } - return false; + return finish_folding(ctx, op); } -static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) +/* Return 1 if finished, -1 if simplified, 0 if unchanged. */ +static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) { uint64_t a_zmask, b_val; TCGCond cond; @@ -2109,7 +2419,7 @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) } a_zmask = arg_info(op->args[1])->z_mask; - b_val = arg_info(op->args[2])->val; + b_val = arg_const_val(op->args[2]); cond = op->args[3]; if (ctx->type == TCG_TYPE_I32) { @@ -2164,47 +2474,27 @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg) break; } if (convert) { - TCGOpcode add_opc, xor_opc, neg_opc; - if (!inv && !neg) { return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); } - switch (ctx->type) { - case TCG_TYPE_I32: - add_opc = INDEX_op_add_i32; - neg_opc = INDEX_op_neg_i32; - xor_opc = INDEX_op_xor_i32; - break; - case TCG_TYPE_I64: - add_opc = INDEX_op_add_i64; - neg_opc = INDEX_op_neg_i64; - xor_opc = INDEX_op_xor_i64; - break; - default: - g_assert_not_reached(); - } - if (!inv) { - op->opc = neg_opc; + op->opc = INDEX_op_neg; } else if (neg) { - op->opc = add_opc; + op->opc = INDEX_op_add; op->args[2] = arg_new_constant(ctx, -1); } else { - op->opc = xor_opc; + op->opc = INDEX_op_xor; op->args[2] = arg_new_constant(ctx, 1); } - return false; + return -1; } } - - return false; + return 0; } static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg) { - TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc; - TCGOpcode uext_opc = 0, sext_opc = 0; TCGCond cond = op->args[3]; TCGArg ret, src1, src2; TCGOp *op2; @@ -2217,79 +2507,52 @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg) } src2 = op->args[2]; - val = arg_info(src2)->val; + val = arg_const_val(src2); if (!is_power_of_2(val)) { return; } sh = ctz64(val); - switch (ctx->type) { - case TCG_TYPE_I32: - and_opc = INDEX_op_and_i32; - sub_opc = INDEX_op_sub_i32; - xor_opc = INDEX_op_xor_i32; - shr_opc = INDEX_op_shr_i32; - neg_opc = INDEX_op_neg_i32; - if (TCG_TARGET_extract_i32_valid(sh, 1)) { - uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0; - sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0; - } - break; - case TCG_TYPE_I64: - and_opc = INDEX_op_and_i64; - sub_opc = INDEX_op_sub_i64; - xor_opc = INDEX_op_xor_i64; - shr_opc = INDEX_op_shr_i64; - neg_opc = INDEX_op_neg_i64; - if (TCG_TARGET_extract_i64_valid(sh, 1)) { - uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0; - sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0; - } - break; - default: - g_assert_not_reached(); - } - ret = op->args[0]; src1 = op->args[1]; inv = cond == TCG_COND_TSTEQ; - if (sh && sext_opc && neg && !inv) { - op->opc = sext_opc; + if (sh && neg && !inv && TCG_TARGET_sextract_valid(ctx->type, sh, 1)) { + op->opc = INDEX_op_sextract; op->args[1] = src1; op->args[2] = sh; op->args[3] = 1; return; - } else if (sh && uext_opc) { - op->opc = uext_opc; + } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) { + op->opc = INDEX_op_extract; op->args[1] = src1; op->args[2] = sh; op->args[3] = 1; } else { if (sh) { - op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3); + op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3); op2->args[0] = ret; op2->args[1] = src1; op2->args[2] = arg_new_constant(ctx, sh); src1 = ret; } - op->opc = and_opc; + op->opc = INDEX_op_and; op->args[1] = src1; op->args[2] = arg_new_constant(ctx, 1); } if (neg && inv) { - op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3); + op2 = opt_insert_after(ctx, op, INDEX_op_add, 3); op2->args[0] = ret; op2->args[1] = ret; - op2->args[2] = arg_new_constant(ctx, 1); + op2->args[2] = arg_new_constant(ctx, -1); } else if (inv) { - op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3); + op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3); op2->args[0] = ret; op2->args[1] = ret; op2->args[2] = arg_new_constant(ctx, 1); } else if (neg) { - op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2); + op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2); op2->args[0] = ret; op2->args[1] = ret; } @@ -2303,14 +2566,15 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_movi(ctx, op, op->args[0], i); } - if (fold_setcond_zmask(ctx, op, false)) { + i = fold_setcond_zmask(ctx, op, false); + if (i > 0) { return true; } - fold_setcond_tst_pow2(ctx, op, false); + if (i == 0) { + fold_setcond_tst_pow2(ctx, op, false); + } - ctx->z_mask = 1; - ctx->s_mask = smask_from_zmask(1); - return false; + return fold_masks_z(ctx, op, 1); } static bool fold_negsetcond(OptContext *ctx, TCGOp *op) @@ -2321,14 +2585,16 @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op) return tcg_opt_gen_movi(ctx, op, op->args[0], -i); } - if (fold_setcond_zmask(ctx, op, true)) { + i = fold_setcond_zmask(ctx, op, true); + if (i > 0) { return true; } - fold_setcond_tst_pow2(ctx, op, true); + if (i == 0) { + fold_setcond_tst_pow2(ctx, op, true); + } /* Value is {0,-1} so all bits are repetitions of the sign. */ - ctx->s_mask = -1; - return false; + return fold_masks_s(ctx, op, -1); } static bool fold_setcond2(OptContext *ctx, TCGOp *op) @@ -2398,20 +2664,18 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) do_setcond_low: op->args[2] = op->args[3]; op->args[3] = cond; - op->opc = INDEX_op_setcond_i32; + op->opc = INDEX_op_setcond; return fold_setcond(ctx, op); do_setcond_high: op->args[1] = op->args[2]; op->args[2] = op->args[4]; op->args[3] = cond; - op->opc = INDEX_op_setcond_i32; + op->opc = INDEX_op_setcond; return fold_setcond(ctx, op); } - ctx->z_mask = 1; - ctx->s_mask = smask_from_zmask(1); - return false; + return fold_masks_z(ctx, op, 1); do_setcond_const: return tcg_opt_gen_movi(ctx, op, op->args[0], i); @@ -2419,37 +2683,30 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op) static bool fold_sextract(OptContext *ctx, TCGOp *op) { - uint64_t z_mask, s_mask, s_mask_old; + uint64_t z_mask, o_mask, s_mask, a_mask; + TempOptInfo *t1 = arg_info(op->args[1]); int pos = op->args[2]; int len = op->args[3]; - if (arg_is_const(op->args[1])) { - uint64_t t; - - t = arg_info(op->args[1])->val; - t = sextract64(t, pos, len); - return tcg_opt_gen_movi(ctx, op, op->args[0], t); + if (ti_is_const(t1)) { + return tcg_opt_gen_movi(ctx, op, op->args[0], + sextract64(ti_const_val(t1), pos, len)); } - z_mask = arg_info(op->args[1])->z_mask; - z_mask = sextract64(z_mask, pos, len); - ctx->z_mask = z_mask; + s_mask = t1->s_mask >> pos; + s_mask |= -1ull << (len - 1); + a_mask = pos ? -1 : s_mask & ~t1->s_mask; - s_mask_old = arg_info(op->args[1])->s_mask; - s_mask = sextract64(s_mask_old, pos, len); - s_mask |= MAKE_64BIT_MASK(len, 64 - len); - ctx->s_mask = s_mask; + z_mask = sextract64(t1->z_mask, pos, len); + o_mask = sextract64(t1->o_mask, pos, len); - if (pos == 0) { - ctx->a_mask = s_mask & ~s_mask_old; - } - - return fold_masks(ctx, op); + return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask); } static bool fold_shift(OptContext *ctx, TCGOp *op) { - uint64_t s_mask, z_mask, sign; + uint64_t s_mask, z_mask, o_mask; + TempOptInfo *t1, *t2; if (fold_const2(ctx, op) || fold_ix_to_i(ctx, op, 0) || @@ -2457,43 +2714,43 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) return true; } - s_mask = arg_info(op->args[1])->s_mask; - z_mask = arg_info(op->args[1])->z_mask; - - if (arg_is_const(op->args[2])) { - int sh = arg_info(op->args[2])->val; + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + s_mask = t1->s_mask; + z_mask = t1->z_mask; + o_mask = t1->o_mask; - ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); + if (ti_is_const(t2)) { + int sh = ti_const_val(t2); + z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); + o_mask = do_constant_folding(op->opc, ctx->type, o_mask, sh); s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); - ctx->s_mask = smask_from_smask(s_mask); - return fold_masks(ctx, op); + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } switch (op->opc) { - CASE_OP_32_64(sar): + case INDEX_op_sar: /* * Arithmetic right shift will not reduce the number of * input sign repetitions. */ - ctx->s_mask = s_mask; - break; - CASE_OP_32_64(shr): + return fold_masks_s(ctx, op, s_mask); + case INDEX_op_shr: /* * If the sign bit is known zero, then logical right shift - * will not reduced the number of input sign repetitions. + * will not reduce the number of input sign repetitions. */ - sign = (s_mask & -s_mask) >> 1; - if (sign && !(z_mask & sign)) { - ctx->s_mask = s_mask; + if (~z_mask & -s_mask) { + return fold_masks_s(ctx, op, s_mask); } break; default: break; } - return false; + return finish_folding(ctx, op); } static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) @@ -2501,17 +2758,14 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) TCGOpcode neg_op; bool have_neg; - if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { + if (!arg_is_const_val(op->args[1], 0)) { return false; } switch (ctx->type) { case TCG_TYPE_I32: - neg_op = INDEX_op_neg_i32; - have_neg = true; - break; case TCG_TYPE_I64: - neg_op = INDEX_op_neg_i64; + neg_op = INDEX_op_neg; have_neg = true; break; case TCG_TYPE_V64: @@ -2540,60 +2794,195 @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op) fold_sub_to_neg(ctx, op)) { return true; } - return false; + return finish_folding(ctx, op); } static bool fold_sub(OptContext *ctx, TCGOp *op) { - if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) { + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_sub_to_neg(ctx, op)) { return true; } /* Fold sub r,x,i to add r,x,-i */ if (arg_is_const(op->args[2])) { - uint64_t val = arg_info(op->args[2])->val; + uint64_t val = arg_const_val(op->args[2]); - op->opc = (ctx->type == TCG_TYPE_I32 - ? INDEX_op_add_i32 : INDEX_op_add_i64); + op->opc = INDEX_op_add; op->args[2] = arg_new_constant(ctx, -val); } - return false; + return finish_folding(ctx, op); +} + +static void squash_prev_borrowout(OptContext *ctx, TCGOp *op) +{ + TempOptInfo *t2; + + op = QTAILQ_PREV(op, link); + switch (op->opc) { + case INDEX_op_subbo: + op->opc = INDEX_op_sub; + fold_sub(ctx, op); + break; + case INDEX_op_subbio: + op->opc = INDEX_op_subbi; + break; + case INDEX_op_subb1o: + t2 = arg_info(op->args[2]); + if (ti_is_const(t2)) { + op->opc = INDEX_op_add; + op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1)); + /* Perform other constant folding, if needed. */ + fold_add(ctx, op); + } else { + TCGArg ret = op->args[0]; + op->opc = INDEX_op_sub; + op = opt_insert_after(ctx, op, INDEX_op_add, 3); + op->args[0] = ret; + op->args[1] = ret; + op->args[2] = arg_new_constant(ctx, -1); + } + break; + default: + g_assert_not_reached(); + } +} + +static bool fold_subbi(OptContext *ctx, TCGOp *op) +{ + TempOptInfo *t2; + int borrow_in = ctx->carry_state; + + if (borrow_in < 0) { + return finish_folding(ctx, op); + } + ctx->carry_state = -1; + + squash_prev_borrowout(ctx, op); + if (borrow_in == 0) { + op->opc = INDEX_op_sub; + return fold_sub(ctx, op); + } + + /* + * Propagate the known carry-in into any constant, then negate to + * transform from sub to add. If there is no constant, emit a + * separate add -1. + */ + t2 = arg_info(op->args[2]); + if (ti_is_const(t2)) { + op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1)); + } else { + TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_sub, 3); + + op2->args[0] = op->args[0]; + op2->args[1] = op->args[1]; + op2->args[2] = op->args[2]; + fold_sub(ctx, op2); + + op->args[1] = op->args[0]; + op->args[2] = arg_new_constant(ctx, -1); + } + op->opc = INDEX_op_add; + return fold_add(ctx, op); } -static bool fold_sub2(OptContext *ctx, TCGOp *op) +static bool fold_subbio(OptContext *ctx, TCGOp *op) { - return fold_addsub2(ctx, op, false); + TempOptInfo *t1, *t2; + int borrow_out = -1; + + if (ctx->carry_state < 0) { + return finish_folding(ctx, op); + } + + squash_prev_borrowout(ctx, op); + if (ctx->carry_state == 0) { + goto do_subbo; + } + + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + /* Propagate the known borrow-in into a constant, if possible. */ + if (ti_is_const(t2)) { + uint64_t max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX; + uint64_t v = ti_const_val(t2) & max; + + if (v < max) { + op->args[2] = arg_new_constant(ctx, v + 1); + goto do_subbo; + } + /* subtracting max + 1 produces known borrow out. */ + borrow_out = 1; + } + if (ti_is_const(t1)) { + uint64_t v = ti_const_val(t1); + if (v != 0) { + op->args[2] = arg_new_constant(ctx, v - 1); + goto do_subbo; + } + } + + /* Adjust the opcode to remember the known carry-in. */ + op->opc = INDEX_op_subb1o; + ctx->carry_state = borrow_out; + return finish_folding(ctx, op); + + do_subbo: + op->opc = INDEX_op_subbo; + return fold_subbo(ctx, op); +} + +static bool fold_subbo(OptContext *ctx, TCGOp *op) +{ + TempOptInfo *t1 = arg_info(op->args[1]); + TempOptInfo *t2 = arg_info(op->args[2]); + int borrow_out = -1; + + if (ti_is_const(t2)) { + uint64_t v2 = ti_const_val(t2); + if (v2 == 0) { + borrow_out = 0; + } else if (ti_is_const(t1)) { + uint64_t v1 = ti_const_val(t1); + borrow_out = v1 < v2; + } + } + ctx->carry_state = borrow_out; + return finish_folding(ctx, op); } static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) { + uint64_t z_mask = -1, s_mask = 0; + /* We can't do any folding with a load, but we can record bits. */ switch (op->opc) { - CASE_OP_32_64(ld8s): - ctx->s_mask = MAKE_64BIT_MASK(8, 56); + case INDEX_op_ld8s: + s_mask = INT8_MIN; break; - CASE_OP_32_64(ld8u): - ctx->z_mask = MAKE_64BIT_MASK(0, 8); - ctx->s_mask = MAKE_64BIT_MASK(9, 55); + case INDEX_op_ld8u: + z_mask = MAKE_64BIT_MASK(0, 8); break; - CASE_OP_32_64(ld16s): - ctx->s_mask = MAKE_64BIT_MASK(16, 48); + case INDEX_op_ld16s: + s_mask = INT16_MIN; break; - CASE_OP_32_64(ld16u): - ctx->z_mask = MAKE_64BIT_MASK(0, 16); - ctx->s_mask = MAKE_64BIT_MASK(17, 47); + case INDEX_op_ld16u: + z_mask = MAKE_64BIT_MASK(0, 16); break; - case INDEX_op_ld32s_i64: - ctx->s_mask = MAKE_64BIT_MASK(32, 32); + case INDEX_op_ld32s: + s_mask = INT32_MIN; break; - case INDEX_op_ld32u_i64: - ctx->z_mask = MAKE_64BIT_MASK(0, 32); - ctx->s_mask = MAKE_64BIT_MASK(33, 31); + case INDEX_op_ld32u: + z_mask = MAKE_64BIT_MASK(0, 32); break; default: g_assert_not_reached(); } - return false; + return fold_masks_zs(ctx, op, z_mask, s_mask); } static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) @@ -2603,7 +2992,7 @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) TCGType type; if (op->args[1] != tcgv_ptr_arg(tcg_env)) { - return false; + return finish_folding(ctx, op); } type = ctx->type; @@ -2626,23 +3015,20 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op) if (op->args[1] != tcgv_ptr_arg(tcg_env)) { remove_mem_copy_all(ctx); - return false; + return true; } switch (op->opc) { - CASE_OP_32_64(st8): + case INDEX_op_st8: lm1 = 0; break; - CASE_OP_32_64(st16): + case INDEX_op_st16: lm1 = 1; break; - case INDEX_op_st32_i64: - case INDEX_op_st_i32: + case INDEX_op_st32: lm1 = 3; break; - case INDEX_op_st_i64: - lm1 = 7; - break; + case INDEX_op_st: case INDEX_op_st_vec: lm1 = tcg_type_size(ctx->type) - 1; break; @@ -2650,7 +3036,7 @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op) g_assert_not_reached(); } remove_mem_copy_in(ctx, ofs, ofs + lm1); - return false; + return true; } static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) @@ -2660,8 +3046,7 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) TCGType type; if (op->args[1] != tcgv_ptr_arg(tcg_env)) { - fold_tcg_st(ctx, op); - return false; + return fold_tcg_st(ctx, op); } src = arg_temp(op->args[0]); @@ -2683,11 +3068,14 @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) last = ofs + tcg_type_size(type) - 1; remove_mem_copy_in(ctx, ofs, last); record_mem_copy(ctx, type, src, ofs, last); - return false; + return true; } static bool fold_xor(OptContext *ctx, TCGOp *op) { + uint64_t z_mask, o_mask, s_mask; + TempOptInfo *t1, *t2; + if (fold_const2_commutative(ctx, op) || fold_xx_to_i(ctx, op, 0) || fold_xi_to_x(ctx, op, 0) || @@ -2695,11 +3083,14 @@ static bool fold_xor(OptContext *ctx, TCGOp *op) return true; } - ctx->z_mask = arg_info(op->args[1])->z_mask - | arg_info(op->args[2])->z_mask; - ctx->s_mask = arg_info(op->args[1])->s_mask - & arg_info(op->args[2])->s_mask; - return fold_masks(ctx, op); + t1 = arg_info(op->args[1]); + t2 = arg_info(op->args[2]); + + z_mask = (t1->z_mask | t2->z_mask) & ~(t1->o_mask & t2->o_mask); + o_mask = (t1->o_mask & ~t2->z_mask) | (t2->o_mask & ~t1->z_mask); + s_mask = t1->s_mask & t2->s_mask; + + return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask); } /* Propagate constants and copies, fold constant expressions. */ @@ -2737,62 +3128,59 @@ void tcg_optimize(TCGContext *s) copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); /* Pre-compute the type of the operation. */ - if (def->flags & TCG_OPF_VECTOR) { - ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); - } else if (def->flags & TCG_OPF_64BIT) { - ctx.type = TCG_TYPE_I64; - } else { - ctx.type = TCG_TYPE_I32; - } - - /* Assume all bits affected, no bits known zero, no sign reps. */ - ctx.a_mask = -1; - ctx.z_mask = -1; - ctx.s_mask = 0; + ctx.type = TCGOP_TYPE(op); /* * Process each opcode. * Sorted alphabetically by opcode as much as possible. */ switch (opc) { - CASE_OP_32_64(add): + case INDEX_op_add: done = fold_add(&ctx, op); break; case INDEX_op_add_vec: done = fold_add_vec(&ctx, op); break; - CASE_OP_32_64(add2): - done = fold_add2(&ctx, op); + case INDEX_op_addci: + done = fold_addci(&ctx, op); + break; + case INDEX_op_addcio: + done = fold_addcio(&ctx, op); + break; + case INDEX_op_addco: + done = fold_addco(&ctx, op); break; - CASE_OP_32_64_VEC(and): + case INDEX_op_and: + case INDEX_op_and_vec: done = fold_and(&ctx, op); break; - CASE_OP_32_64_VEC(andc): + case INDEX_op_andc: + case INDEX_op_andc_vec: done = fold_andc(&ctx, op); break; - CASE_OP_32_64(brcond): + case INDEX_op_brcond: done = fold_brcond(&ctx, op); break; case INDEX_op_brcond2_i32: done = fold_brcond2(&ctx, op); break; - CASE_OP_32_64(bswap16): - CASE_OP_32_64(bswap32): - case INDEX_op_bswap64_i64: + case INDEX_op_bswap16: + case INDEX_op_bswap32: + case INDEX_op_bswap64: done = fold_bswap(&ctx, op); break; - CASE_OP_32_64(clz): - CASE_OP_32_64(ctz): + case INDEX_op_clz: + case INDEX_op_ctz: done = fold_count_zeros(&ctx, op); break; - CASE_OP_32_64(ctpop): + case INDEX_op_ctpop: done = fold_ctpop(&ctx, op); break; - CASE_OP_32_64(deposit): + case INDEX_op_deposit: done = fold_deposit(&ctx, op); break; - CASE_OP_32_64(div): - CASE_OP_32_64(divu): + case INDEX_op_divs: + case INDEX_op_divu: done = fold_divide(&ctx, op); break; case INDEX_op_dup_vec: @@ -2801,149 +3189,162 @@ void tcg_optimize(TCGContext *s) case INDEX_op_dup2_vec: done = fold_dup2(&ctx, op); break; - CASE_OP_32_64_VEC(eqv): + case INDEX_op_eqv: + case INDEX_op_eqv_vec: done = fold_eqv(&ctx, op); break; - CASE_OP_32_64(extract): + case INDEX_op_extract: done = fold_extract(&ctx, op); break; - CASE_OP_32_64(extract2): + case INDEX_op_extract2: done = fold_extract2(&ctx, op); break; - CASE_OP_32_64(ext8s): - CASE_OP_32_64(ext16s): - case INDEX_op_ext32s_i64: case INDEX_op_ext_i32_i64: done = fold_exts(&ctx, op); break; - CASE_OP_32_64(ext8u): - CASE_OP_32_64(ext16u): - case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: done = fold_extu(&ctx, op); break; - CASE_OP_32_64(ld8s): - CASE_OP_32_64(ld8u): - CASE_OP_32_64(ld16s): - CASE_OP_32_64(ld16u): - case INDEX_op_ld32s_i64: - case INDEX_op_ld32u_i64: + case INDEX_op_ld8s: + case INDEX_op_ld8u: + case INDEX_op_ld16s: + case INDEX_op_ld16u: + case INDEX_op_ld32s: + case INDEX_op_ld32u: done = fold_tcg_ld(&ctx, op); break; - case INDEX_op_ld_i32: - case INDEX_op_ld_i64: + case INDEX_op_ld: case INDEX_op_ld_vec: done = fold_tcg_ld_memcopy(&ctx, op); break; - CASE_OP_32_64(st8): - CASE_OP_32_64(st16): - case INDEX_op_st32_i64: + case INDEX_op_st8: + case INDEX_op_st16: + case INDEX_op_st32: done = fold_tcg_st(&ctx, op); break; - case INDEX_op_st_i32: - case INDEX_op_st_i64: + case INDEX_op_st: case INDEX_op_st_vec: done = fold_tcg_st_memcopy(&ctx, op); break; case INDEX_op_mb: done = fold_mb(&ctx, op); break; - CASE_OP_32_64_VEC(mov): + case INDEX_op_mov: + case INDEX_op_mov_vec: done = fold_mov(&ctx, op); break; - CASE_OP_32_64(movcond): + case INDEX_op_movcond: done = fold_movcond(&ctx, op); break; - CASE_OP_32_64(mul): + case INDEX_op_mul: done = fold_mul(&ctx, op); break; - CASE_OP_32_64(mulsh): - CASE_OP_32_64(muluh): + case INDEX_op_mulsh: + case INDEX_op_muluh: done = fold_mul_highpart(&ctx, op); break; - CASE_OP_32_64(muls2): - CASE_OP_32_64(mulu2): + case INDEX_op_muls2: + case INDEX_op_mulu2: done = fold_multiply2(&ctx, op); break; - CASE_OP_32_64_VEC(nand): + case INDEX_op_nand: + case INDEX_op_nand_vec: done = fold_nand(&ctx, op); break; - CASE_OP_32_64(neg): + case INDEX_op_neg: done = fold_neg(&ctx, op); break; - CASE_OP_32_64_VEC(nor): + case INDEX_op_nor: + case INDEX_op_nor_vec: done = fold_nor(&ctx, op); break; - CASE_OP_32_64_VEC(not): + case INDEX_op_not: + case INDEX_op_not_vec: done = fold_not(&ctx, op); break; - CASE_OP_32_64_VEC(or): + case INDEX_op_or: + case INDEX_op_or_vec: done = fold_or(&ctx, op); break; - CASE_OP_32_64_VEC(orc): + case INDEX_op_orc: + case INDEX_op_orc_vec: done = fold_orc(&ctx, op); break; - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - done = fold_qemu_ld(&ctx, op); - break; - case INDEX_op_qemu_st8_a32_i32: - case INDEX_op_qemu_st8_a64_i32: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: + case INDEX_op_qemu_ld: + done = fold_qemu_ld_1reg(&ctx, op); + break; + case INDEX_op_qemu_ld2: + done = fold_qemu_ld_2reg(&ctx, op); + break; + case INDEX_op_qemu_st: + case INDEX_op_qemu_st2: done = fold_qemu_st(&ctx, op); break; - CASE_OP_32_64(rem): - CASE_OP_32_64(remu): + case INDEX_op_rems: + case INDEX_op_remu: done = fold_remainder(&ctx, op); break; - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - CASE_OP_32_64(sar): - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): + case INDEX_op_rotl: + case INDEX_op_rotr: + case INDEX_op_sar: + case INDEX_op_shl: + case INDEX_op_shr: done = fold_shift(&ctx, op); break; - CASE_OP_32_64(setcond): + case INDEX_op_setcond: done = fold_setcond(&ctx, op); break; - CASE_OP_32_64(negsetcond): + case INDEX_op_negsetcond: done = fold_negsetcond(&ctx, op); break; case INDEX_op_setcond2_i32: done = fold_setcond2(&ctx, op); break; - CASE_OP_32_64(sextract): + case INDEX_op_cmp_vec: + done = fold_cmp_vec(&ctx, op); + break; + case INDEX_op_cmpsel_vec: + done = fold_cmpsel_vec(&ctx, op); + break; + case INDEX_op_bitsel_vec: + done = fold_bitsel_vec(&ctx, op); + break; + case INDEX_op_sextract: done = fold_sextract(&ctx, op); break; - CASE_OP_32_64(sub): + case INDEX_op_sub: done = fold_sub(&ctx, op); break; + case INDEX_op_subbi: + done = fold_subbi(&ctx, op); + break; + case INDEX_op_subbio: + done = fold_subbio(&ctx, op); + break; + case INDEX_op_subbo: + done = fold_subbo(&ctx, op); + break; case INDEX_op_sub_vec: done = fold_sub_vec(&ctx, op); break; - CASE_OP_32_64(sub2): - done = fold_sub2(&ctx, op); - break; - CASE_OP_32_64_VEC(xor): + case INDEX_op_xor: + case INDEX_op_xor_vec: done = fold_xor(&ctx, op); break; + case INDEX_op_set_label: + case INDEX_op_br: + case INDEX_op_exit_tb: + case INDEX_op_goto_tb: + case INDEX_op_goto_ptr: + finish_ebb(&ctx); + done = true; + break; default: + done = finish_folding(&ctx, op); break; } - - if (!done) { - finish_folding(&ctx, op); - } + tcg_debug_assert(done); } } diff --git a/tcg/perf.c b/tcg/perf.c index 412a987d956..8fa5fa99917 100644 --- a/tcg/perf.c +++ b/tcg/perf.c @@ -313,7 +313,7 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, const void *start) { struct debuginfo_query *q; - size_t insn, start_words; + size_t insn; uint64_t *gen_insn_data; if (!perfmap && !jitdump) { @@ -329,13 +329,12 @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb, /* Query debuginfo for each guest instruction. */ gen_insn_data = tcg_ctx->gen_insn_data; - start_words = tcg_ctx->insn_start_words; for (insn = 0; insn < tb->icount; insn++) { /* FIXME: This replicates the restore_state_to_opc() logic. */ - q[insn].address = gen_insn_data[insn * start_words + 0]; + q[insn].address = gen_insn_data[insn * INSN_START_WORDS + 0]; if (tb_cflags(tb) & CF_PCREL) { - q[insn].address |= (guest_pc & qemu_target_page_mask()); + q[insn].address |= guest_pc & TARGET_PAGE_MASK; } q[insn].flags = DEBUGINFO_SYMBOL | (jitdump ? DEBUGINFO_LINE : 0); } diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h index 9f99bde505b..da7a383bff5 100644 --- a/tcg/ppc/tcg-target-con-set.h +++ b/tcg/ppc/tcg-target-con-set.h @@ -15,28 +15,29 @@ C_O0_I2(r, rC) C_O0_I2(v, r) C_O0_I3(r, r, r) C_O0_I3(o, m, r) -C_O0_I4(r, r, ri, ri) +C_O0_I4(r, r, rU, rC) C_O0_I4(r, r, r, r) C_O1_I1(r, r) C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I1(v, vr) C_O1_I2(r, 0, rZ) -C_O1_I2(r, rI, ri) -C_O1_I2(r, rI, rT) +C_O1_I2(r, rI, r) C_O1_I2(r, r, r) C_O1_I2(r, r, ri) C_O1_I2(r, r, rC) C_O1_I2(r, r, rI) C_O1_I2(r, r, rT) C_O1_I2(r, r, rU) +C_O1_I2(r, r, rZM) C_O1_I2(r, r, rZW) +C_O1_I2(r, rI, rN) +C_O1_I2(r, rZM, rZM) C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) +C_O1_I4(v, v, v, vZM, v) C_O1_I4(r, r, rC, rZ, rZ) -C_O1_I4(r, r, r, ri, ri) +C_O1_I4(r, r, r, rU, rC) C_O2_I1(r, r, r) C_N1O1_I1(o, m, r) C_O2_I2(r, r, r, r) -C_O2_I4(r, r, rI, rZM, r, r) -C_O2_I4(r, r, r, r, rI, rZM) diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h index 16b687216e0..faf92da47f4 100644 --- a/tcg/ppc/tcg-target-con-str.h +++ b/tcg/ppc/tcg-target-con-str.h @@ -19,6 +19,7 @@ REGS('v', ALL_VECTOR_REGS) CONST('C', TCG_CT_CONST_CMP) CONST('I', TCG_CT_CONST_S16) CONST('M', TCG_CT_CONST_MONE) +CONST('N', TCG_CT_CONST_N16) CONST('T', TCG_CT_CONST_S32) CONST('U', TCG_CT_CONST_U32) CONST('W', TCG_CT_CONST_WSZ) diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h new file mode 100644 index 00000000000..81ec5aece7a --- /dev/null +++ b/tcg/ppc/tcg-target-has.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#include "host/cpuinfo.h" + +#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06) +#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07) +#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0) +#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1) +#define have_altivec (cpuinfo & CPUINFO_ALTIVEC) +#define have_vsx (cpuinfo & CPUINFO_VSX) + +/* optional instructions */ +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_extr_i64_i32 0 +#endif + +#define TCG_TARGET_HAS_qemu_ldst_i128 \ + (TCG_TARGET_REG_BITS == 64 && have_isa_2_07) + +#define TCG_TARGET_HAS_tst 1 + +/* + * While technically Altivec could support V64, it has no 64-bit store + * instruction and substituting two 32-bit stores makes the generated + * code quite large. + */ +#define TCG_TARGET_HAS_v64 have_vsx +#define TCG_TARGET_HAS_v128 have_altivec +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec have_isa_2_07 +#define TCG_TARGET_HAS_nand_vec have_isa_2_07 +#define TCG_TARGET_HAS_nor_vec 1 +#define TCG_TARGET_HAS_eqv_vec have_isa_2_07 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec have_isa_3_00 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec have_vsx +#define TCG_TARGET_HAS_cmpsel_vec 1 +#define TCG_TARGET_HAS_tst_vec 0 + +#define TCG_TARGET_extract_valid(type, ofs, len) 1 +#define TCG_TARGET_deposit_valid(type, ofs, len) 1 + +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I64 && ofs + len == 32) { + return true; + } + return ofs == 0 && (len == 8 || len == 16); +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +#endif diff --git a/tcg/ppc/tcg-target-mo.h b/tcg/ppc/tcg-target-mo.h new file mode 100644 index 00000000000..98bfe03b7ac --- /dev/null +++ b/tcg/ppc/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/ppc/tcg-target-opc.h.inc b/tcg/ppc/tcg-target-opc.h.inc new file mode 100644 index 00000000000..c3635831b57 --- /dev/null +++ b/tcg/ppc/tcg-target-opc.h.inc @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 Linaro Limited + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(ppc_mrgh_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(ppc_mrgl_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(ppc_msum_vec, 1, 3, 0, TCG_OPF_VECTOR) +DEF(ppc_muleu_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(ppc_mulou_vec, 1, 2, 0, TCG_OPF_VECTOR) +DEF(ppc_pkum_vec, 1, 2, 0, TCG_OPF_VECTOR) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 7f3829beeb8..b8b23d44d5e 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -23,8 +23,6 @@ */ #include "elf.h" -#include "../tcg-pool.c.inc" -#include "../tcg-ldst.c.inc" /* * Standardize on the _CALL_FOO symbols used by GCC: @@ -91,14 +89,15 @@ /* Shorthand for size of a register. */ #define SZR (TCG_TARGET_REG_BITS / 8) -#define TCG_CT_CONST_S16 0x100 -#define TCG_CT_CONST_U16 0x200 -#define TCG_CT_CONST_S32 0x400 -#define TCG_CT_CONST_U32 0x800 -#define TCG_CT_CONST_ZERO 0x1000 -#define TCG_CT_CONST_MONE 0x2000 -#define TCG_CT_CONST_WSZ 0x4000 -#define TCG_CT_CONST_CMP 0x8000 +#define TCG_CT_CONST_S16 0x00100 +#define TCG_CT_CONST_U16 0x00200 +#define TCG_CT_CONST_N16 0x00400 +#define TCG_CT_CONST_S32 0x00800 +#define TCG_CT_CONST_U32 0x01000 +#define TCG_CT_CONST_ZERO 0x02000 +#define TCG_CT_CONST_MONE 0x04000 +#define TCG_CT_CONST_WSZ 0x08000 +#define TCG_CT_CONST_CMP 0x10000 #define ALL_GENERAL_REGS 0xffffffffu #define ALL_VECTOR_REGS 0xffffffff00000000ull @@ -325,9 +324,11 @@ static bool tcg_target_const_match(int64_t sval, int ct, if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) { return 1; } - if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32 - ? mask_operand(uval, &mb, &me) - : mask64_operand(uval << clz64(uval), &mb, &me)) { + if (uval == (uint32_t)uval && mask_operand(uval, &mb, &me)) { + return 1; + } + if (TCG_TARGET_REG_BITS == 64 && + mask64_operand(uval << clz64(uval), &mb, &me)) { return 1; } return 0; @@ -342,6 +343,9 @@ static bool tcg_target_const_match(int64_t sval, int ct, if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) { return 1; } + if ((ct & TCG_CT_CONST_N16) && -sval == (int16_t)-sval) { + return 1; + } if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) { return 1; } @@ -909,7 +913,9 @@ static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb, int me, bool rc) { - tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc); + tcg_debug_assert((mb & 0x1f) == mb); + tcg_debug_assert((me & 0x1f) == me); + tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh & 0x1f) | MB(mb) | ME(me) | rc); } static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, @@ -1010,111 +1016,6 @@ static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm) tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2); } -static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags) -{ - TCGReg tmp = dst == src ? TCG_REG_R0 : dst; - - if (have_isa_3_10) { - tcg_out32(s, BRH | RA(dst) | RS(src)); - if (flags & TCG_BSWAP_OS) { - tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst); - } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_ext16u(s, dst, dst); - } - return; - } - - /* - * In the following, - * dep(a, b, m) -> (a & ~m) | (b & m) - * - * Begin with: src = xxxxabcd - */ - /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */ - tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31); - /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */ - tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23); - - if (flags & TCG_BSWAP_OS) { - tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp); - } else { - tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); - } -} - -static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags) -{ - TCGReg tmp = dst == src ? TCG_REG_R0 : dst; - - if (have_isa_3_10) { - tcg_out32(s, BRW | RA(dst) | RS(src)); - if (flags & TCG_BSWAP_OS) { - tcg_out_ext32s(s, dst, dst); - } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_ext32u(s, dst, dst); - } - return; - } - - /* - * Stolen from gcc's builtin_bswap32. - * In the following, - * dep(a, b, m) -> (a & ~m) | (b & m) - * - * Begin with: src = xxxxabcd - */ - /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */ - tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31); - /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */ - tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7); - /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */ - tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23); - - if (flags & TCG_BSWAP_OS) { - tcg_out_ext32s(s, dst, tmp); - } else { - tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); - } -} - -static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src) -{ - TCGReg t0 = dst == src ? TCG_REG_R0 : dst; - TCGReg t1 = dst == src ? dst : TCG_REG_R0; - - if (have_isa_3_10) { - tcg_out32(s, BRD | RA(dst) | RS(src)); - return; - } - - /* - * In the following, - * dep(a, b, m) -> (a & ~m) | (b & m) - * - * Begin with: src = abcdefgh - */ - /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */ - tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31); - /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */ - tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7); - /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */ - tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23); - - /* t0 = rol64(t0, 32) = hgfe0000 */ - tcg_out_rld(s, RLDICL, t0, t0, 32, 0); - /* t1 = rol64(src, 32) = efghabcd */ - tcg_out_rld(s, RLDICL, t1, src, 32, 0); - - /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */ - tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31); - /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */ - tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7); - /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */ - tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23); - - tcg_out_mov(s, TCG_TYPE_REG, dst, t0); -} - /* Emit a move into ret of arg, if it can be done in one insn. */ static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg) { @@ -1749,8 +1650,6 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, if (type == TCG_TYPE_I32) { arg2 = (uint32_t)arg2; - } else if (arg2 == (uint32_t)arg2) { - type = TCG_TYPE_I32; } if ((arg2 & ~0xffff) == 0) { @@ -1761,12 +1660,11 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16)); return; } - if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { - if (mask_operand(arg2, &mb, &me)) { - tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc); - return; - } - } else { + if (arg2 == (uint32_t)arg2 && mask_operand(arg2, &mb, &me)) { + tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc); + return; + } + if (TCG_TARGET_REG_BITS == 64) { int sh = clz64(arg2); if (mask64_operand(arg2 << sh, &mb, &me)) { tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc); @@ -1778,9 +1676,8 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, } static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, - int const_arg2, int cr, TCGType type) + bool const_arg2, int cr, TCGType type) { - int imm; uint32_t op; tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); @@ -1797,18 +1694,15 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, case TCG_COND_EQ: case TCG_COND_NE: if (const_arg2) { - if ((int16_t) arg2 == arg2) { + if ((int16_t)arg2 == arg2) { op = CMPI; - imm = 1; - break; - } else if ((uint16_t) arg2 == arg2) { - op = CMPLI; - imm = 1; break; } + tcg_debug_assert((uint16_t)arg2 == arg2); + op = CMPLI; + break; } op = CMPL; - imm = 0; break; case TCG_COND_TSTEQ: @@ -1822,14 +1716,11 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, case TCG_COND_LE: case TCG_COND_GT: if (const_arg2) { - if ((int16_t) arg2 == arg2) { - op = CMPI; - imm = 1; - break; - } + tcg_debug_assert((int16_t)arg2 == arg2); + op = CMPI; + break; } op = CMP; - imm = 0; break; case TCG_COND_LTU: @@ -1837,30 +1728,20 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, case TCG_COND_LEU: case TCG_COND_GTU: if (const_arg2) { - if ((uint16_t) arg2 == arg2) { - op = CMPLI; - imm = 1; - break; - } + tcg_debug_assert((uint16_t)arg2 == arg2); + op = CMPLI; + break; } op = CMPL; - imm = 0; break; default: g_assert_not_reached(); } op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); - - if (imm) { - tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); - } else { - if (const_arg2) { - tcg_out_movi(s, type, TCG_REG_R0, arg2); - arg2 = TCG_REG_R0; - } - tcg_out32(s, op | RA(arg1) | RB(arg2)); - } + op |= RA(arg1); + op |= const_arg2 ? arg2 & 0xffff : RB(arg2); + tcg_out32(s, op); } static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, @@ -1927,8 +1808,8 @@ static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, } static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, - TCGArg arg0, TCGArg arg1, TCGArg arg2, - int const_arg2, bool neg) + TCGReg arg0, TCGReg arg1, TCGArg arg2, + bool const_arg2, bool neg) { int sh; bool inv; @@ -2073,6 +1954,54 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, } } +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + +void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + uint32_t insn = B; + + if (l->has_value) { + insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr); + } else { + tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0); + } + tcg_out32(s, insn); +} + static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd) { tcg_out32(s, tcg_to_bc[cond] | bd); @@ -2089,17 +2018,29 @@ static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l) tcg_out_bc(s, cond, bd); } -static void tcg_out_brcond(TCGContext *s, TCGCond cond, - TCGArg arg1, TCGArg arg2, int const_arg2, - TCGLabel *l, TCGType type) +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *l) +{ + tcg_out_cmp(s, cond, arg1, arg2, false, 0, type); + tcg_out_bc_lab(s, cond, l); +} + +static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, tcg_target_long arg2, TCGLabel *l) { - tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type); + tcg_out_cmp(s, cond, arg1, arg2, true, 0, type); tcg_out_bc_lab(s, cond, l); } -static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, - TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, - TCGArg v2, bool const_c2) +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rC), + .out_rr = tgen_brcond, + .out_ri = tgen_brcondi, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg v1, bool const_v1, TCGArg v2, bool const_v2) { /* If for some reason both inputs are zero, don't produce bad code. */ if (v1 == 0 && v2 == 0) { @@ -2145,6 +2086,11 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, } } +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rC, rZ, rZ), + .out = tgen_movcond, +}; + static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2) { @@ -2171,8 +2117,8 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, } } -static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, - const int *const_args) +static void tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool blconst, TCGArg bh, bool bhconst) { static const struct { uint8_t bit1, bit2; } bits[] = { [TCG_COND_LT ] = { CR_LT, CR_LT }, @@ -2185,18 +2131,9 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, [TCG_COND_GEU] = { CR_GT, CR_LT }, }; - TCGCond cond = args[4], cond2; - TCGArg al, ah, bl, bh; - int blconst, bhconst; + TCGCond cond2; int op, bit1, bit2; - al = args[0]; - ah = args[1]; - bl = args[2]; - bh = args[3]; - blconst = const_args[2]; - bhconst = const_args[3]; - switch (cond) { case TCG_COND_EQ: op = CRAND; @@ -2248,22 +2185,42 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, } } -static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, - const int *const_args) +static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh) { - tcg_out_cmp2(s, args + 1, const_args + 1); + tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0)); - tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31); + tcg_out_rlw(s, RLWINM, ret, TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31); } -static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, - const int *const_args) +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpSetcond2 outop_setcond2 = { + .base.static_constraint = C_O1_I4(r, r, r, rU, rC), + .out = tgen_setcond2, +}; + +static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh, TCGLabel *l) { - tcg_out_cmp2(s, args, const_args); - tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5])); + assert(TCG_TARGET_REG_BITS == 32); + tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); + tcg_out_bc_lab(s, TCG_COND_EQ, l); } -static void tcg_out_mb(TCGContext *s, TCGArg a0) +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpBrcond2 outop_brcond2 = { + .base.static_constraint = C_O0_I4(r, r, rU, rC), + .out = tgen_brcond2, +}; + +static void tcg_out_mb(TCGContext *s, unsigned a0) { uint32_t insn; @@ -2439,8 +2396,7 @@ bool tcg_target_has_memory_bswap(MemOp memop) * is required and fill in @h with the host address for the fast path. */ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, bool is_ld) + TCGReg addr, MemOpIdx oi, bool is_ld) { TCGType addr_type = s->addr_type; TCGLabelQemuLdst *ldst = NULL; @@ -2475,8 +2431,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off); @@ -2484,36 +2439,25 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the page index, shifted into place for tlb index. */ if (TCG_TARGET_REG_BITS == 32) { - tcg_out_shri32(s, TCG_REG_R0, addrlo, - s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_shri32(s, TCG_REG_R0, addr, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } else { - tcg_out_shri64(s, TCG_REG_R0, addrlo, - s->page_bits - CPU_TLB_ENTRY_BITS); + tcg_out_shri64(s, TCG_REG_R0, addr, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); /* - * Load the (low part) TLB comparator into TMP2. + * Load the TLB comparator into TMP2. * For 64-bit host, always load the entire 64-bit slot for simplicity. * We will ignore the high bits with tcg_out_cmp(..., addr_type). */ - if (TCG_TARGET_REG_BITS == 64) { - if (cmp_off == 0) { - tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, - TCG_REG_TMP1, TCG_REG_TMP2)); - } else { - tcg_out32(s, ADD | TAB(TCG_REG_TMP1, - TCG_REG_TMP1, TCG_REG_TMP2)); - tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, - TCG_REG_TMP1, cmp_off); - } - } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) { - tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, - TCG_REG_TMP1, TCG_REG_TMP2)); + if (cmp_off == 0) { + tcg_out32(s, (TCG_TARGET_REG_BITS == 64 ? LDUX : LWZUX) + | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); } else { tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, - cmp_off + 4 * HOST_BIG_ENDIAN); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off); } /* @@ -2535,10 +2479,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (a_bits < s_bits) { a_bits = s_bits; } - tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, - (32 - a_bits) & 31, 31 - s->page_bits); + tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0, + (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); } else { - TCGReg t = addrlo; + TCGReg t = addr; /* * If the access is unaligned, we need to make sure we fail if we @@ -2557,40 +2501,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Mask the address for the requested alignment. */ if (addr_type == TCG_TYPE_I32) { tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, - (32 - a_bits) & 31, 31 - s->page_bits); + (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); } else if (a_bits == 0) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits); + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); } else { tcg_out_rld(s, RLDICL, TCG_REG_R0, t, - 64 - s->page_bits, s->page_bits - a_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0); + 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); } } - if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) { - /* Low part comparison into cr7. */ - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, - 0, 7, TCG_TYPE_I32); - - /* Load the high part TLB comparator into TMP2. */ - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1, - cmp_off + 4 * !HOST_BIG_ENDIAN); - - /* Load addend, deferred for this case. */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, - offsetof(CPUTLBEntry, addend)); - - /* High part comparison into cr6. */ - tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, - 0, 6, TCG_TYPE_I32); - - /* Combine comparisons into cr0. */ - tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); - } else { - /* Full comparison into cr0. */ - tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, - 0, 0, addr_type); - } + /* Full comparison into cr0. */ + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 0, addr_type); /* Load a pointer into the current opcode w/conditional branch-link. */ ldst->label_ptr[0] = s->code_ptr; @@ -2602,12 +2524,11 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addrlo; - ldst->addrhi_reg = addrhi; + ldst->addr_reg = addr; /* We are expecting a_bits to max out at 7, much lower than ANDI. */ tcg_debug_assert(a_bits < 16); - tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1)); + tcg_out32(s, ANDI | SAI(addr, TCG_REG_R0, (1 << a_bits) - 1)); ldst->label_ptr[0] = s->code_ptr; tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK); @@ -2618,24 +2539,23 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { /* Zero-extend the guest address for use in the host address. */ - tcg_out_ext32u(s, TCG_REG_R0, addrlo); - h->index = TCG_REG_R0; + tcg_out_ext32u(s, TCG_REG_TMP2, addr); + h->index = TCG_REG_TMP2; } else { - h->index = addrlo; + h->index = addr; } return ldst; } static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) + TCGReg addr, MemOpIdx oi, TCGType data_type) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true); + ldst = prepare_host_addr(s, &h, addr, oi, true); if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { if (opc & MO_BSWAP) { @@ -2679,14 +2599,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, } static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, - TCGReg addrlo, TCGReg addrhi, - MemOpIdx oi, TCGType data_type) + TCGReg addr, MemOpIdx oi, TCGType data_type) { MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; - ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false); + ldst = prepare_host_addr(s, &h, addr, oi, false); if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { if (opc & MO_BSWAP) { @@ -2705,9 +2624,9 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; if (!have_isa_2_06 && insn == STDBRX) { tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); - tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP2, h.index, 4)); tcg_out_shri64(s, TCG_REG_R0, datalo, 32); - tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1)); + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP2)); } else { tcg_out32(s, insn | SAB(datalo, h.base, h.index)); } @@ -2730,7 +2649,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, uint32_t insn; TCGReg index; - ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld); + ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld); /* Compose the final address, as LQ/STQ have no indexing. */ index = h.index; @@ -2776,6 +2695,60 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, } } +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + tcg_out_qemu_ld(s, data, -1, addr, oi, type); +} + +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) +{ + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_ld(s, datalo, datahi, addr, oi, type); + } else { + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true); + } +} + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = + TCG_TARGET_REG_BITS == 64 ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r), + .out = tgen_qemu_ld2, +}; + +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + tcg_out_qemu_st(s, data, -1, addr, oi, type); +} + +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(r, r), + .out = tgen_qemu_st, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) +{ + if (TCG_TARGET_REG_BITS == 32) { + tcg_out_qemu_st(s, datalo, datahi, addr, oi, type); + } else { + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, false); + } +} + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = + TCG_TARGET_REG_BITS == 64 ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r), + .out = tgen_qemu_st2, +}; + static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; @@ -2924,6 +2897,13 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out32(s, MTSPR | RS(a0) | CTR); + tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); + tcg_out32(s, BCCTR | BO_ALWAYS); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -2941,603 +2921,913 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, flush_idcache_range(jmp_rx, jmp_rw, 4); } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - TCGArg a0, a1, a2; + tcg_out32(s, ADD | TAB(a0, a1, a2)); +} - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out32(s, MTSPR | RS(args[0]) | CTR); - tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0)); - tcg_out32(s, BCCTR | BO_ALWAYS); - break; - case INDEX_op_br: - { - TCGLabel *l = arg_label(args[0]); - uint32_t insn = B; - - if (l->has_value) { - insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), - l->u.value_ptr); - } else { - tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0); - } - tcg_out32(s, insn); - } - break; - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); - tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); - break; - case INDEX_op_ld32s_i64: - tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); - break; - case INDEX_op_ld_i64: - tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); - break; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); - break; - case INDEX_op_st_i64: - tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); - break; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); +} - case INDEX_op_add_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - do_addi_32: - tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); - } else { - tcg_out32(s, ADD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_sub_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); - } else { - tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); - } - } else if (const_args[2]) { - a2 = -a2; - goto do_addi_32; - } else { - tcg_out32(s, SUBF | TAB(a0, a2, a1)); - } - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rT), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - case INDEX_op_and_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi32(s, a0, a1, a2); - } else { - tcg_out32(s, AND | SAB(a1, a0, a2)); - } - break; - case INDEX_op_and_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi64(s, a0, a1, a2); - } else { - tcg_out32(s, AND | SAB(a1, a0, a2)); - } - break; - case INDEX_op_or_i64: - case INDEX_op_or_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_ori32(s, a0, a1, a2); - } else { - tcg_out32(s, OR | SAB(a1, a0, a2)); - } - break; - case INDEX_op_xor_i64: - case INDEX_op_xor_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_xori32(s, a0, a1, a2); - } else { - tcg_out32(s, XOR | SAB(a1, a0, a2)); - } - break; - case INDEX_op_andc_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi32(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } - break; - case INDEX_op_andc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_andi64(s, a0, a1, ~a2); - } else { - tcg_out32(s, ANDC | SAB(a1, a0, a2)); - } - break; - case INDEX_op_orc_i32: - if (const_args[2]) { - tcg_out_ori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ - case INDEX_op_orc_i64: - tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_eqv_i32: - if (const_args[2]) { - tcg_out_xori32(s, args[0], args[1], ~args[2]); - break; - } - /* FALLTHRU */ - case INDEX_op_eqv_i64: - tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); - break; - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); - break; +static void tgen_addco_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, ADDC | TAB(a0, a1, a2)); +} - case INDEX_op_clz_i32: - tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_ctz_i32: - tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_ctpop_i32: - tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0)); - break; +static void tgen_addco_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out32(s, ADDIC | TAI(a0, a1, a2)); +} - case INDEX_op_clz_i64: - tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_ctz_i64: - tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1], - args[2], const_args[2]); - break; - case INDEX_op_ctpop_i64: - tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0)); - break; +static TCGConstraintSetIndex cset_addco(TCGType type, unsigned flags) +{ + /* + * Note that the CA bit is defined based on the word size of the + * environment. So in 64-bit mode it's always carry-out of bit 63. + * The fallback code using deposit works just as well for TCG_TYPE_I32. + */ + return type == TCG_TYPE_REG ? C_O1_I2(r, r, rI) : C_NotImplemented; +} - case INDEX_op_mul_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out32(s, MULLI | TAI(a0, a1, a2)); - } else { - tcg_out32(s, MULLW | TAB(a0, a1, a2)); - } - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addco, + .out_rrr = tgen_addco_rrr, + .out_rri = tgen_addco_rri, +}; - case INDEX_op_div_i32: - tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); - break; +static void tgen_addcio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, ADDE | TAB(a0, a1, a2)); +} - case INDEX_op_divu_i32: - tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); - break; +static void tgen_addcio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out32(s, (a2 ? ADDME : ADDZE) | RT(a0) | RA(a1)); +} - case INDEX_op_rem_i32: - tcg_out32(s, MODSW | TAB(args[0], args[1], args[2])); - break; +static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_REG ? C_O1_I2(r, r, rZM) : C_NotImplemented; +} - case INDEX_op_remu_i32: - tcg_out32(s, MODUW | TAB(args[0], args[1], args[2])); - break; +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addcio, + .out_rrr = tgen_addcio_rrr, + .out_rri = tgen_addcio_rri, +}; - case INDEX_op_shl_i32: - if (const_args[2]) { - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_shli32(s, args[0], args[1], args[2] & 31); - } else { - tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_shr_i32: - if (const_args[2]) { - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_shri32(s, args[0], args[1], args[2] & 31); - } else { - tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_sar_i32: - if (const_args[2]) { - tcg_out_sari32(s, args[0], args[1], args[2]); - } else { - tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_rotl_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); - } else { - tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) - | MB(0) | ME(31)); - } - break; - case INDEX_op_rotr_i32: - if (const_args[2]) { - tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); - } else { - tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); - tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) - | MB(0) | ME(31)); - } - break; +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addcio, + .out_rrr = tgen_addcio_rrr, + .out_rri = tgen_addcio_rri, +}; - case INDEX_op_brcond_i32: - tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - arg_label(args[3]), TCG_TYPE_I32); - break; - case INDEX_op_brcond_i64: - tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], - arg_label(args[3]), TCG_TYPE_I64); - break; - case INDEX_op_brcond2_i32: - tcg_out_brcond2(s, args, const_args); - break; +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out32(s, SUBFC | TAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_R0)); +} - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); - break; +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, AND | SAB(a1, a0, a2)); +} - case INDEX_op_not_i32: - case INDEX_op_not_i64: - tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); - break; +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_andi32(s, a0, a1, a2); + } else { + tcg_out_andi64(s, a0, a1, a2); + } +} - case INDEX_op_add_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - do_addi_64: - tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); - } else { - tcg_out32(s, ADD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_sub_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[1]) { - if (const_args[2]) { - tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); - } else { - tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); - } - } else if (const_args[2]) { - a2 = -a2; - goto do_addi_64; - } else { - tcg_out32(s, SUBF | TAB(a0, a2, a1)); - } - break; +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; - case INDEX_op_shl_i64: - if (const_args[2]) { - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_shli64(s, args[0], args[1], args[2] & 63); - } else { - tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_shr_i64: - if (const_args[2]) { - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_shri64(s, args[0], args[1], args[2] & 63); - } else { - tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_sar_i64: - if (const_args[2]) { - tcg_out_sari64(s, args[0], args[1], args[2]); - } else { - tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); - } - break; - case INDEX_op_rotl_i64: - if (const_args[2]) { - tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); - } else { - tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); - } - break; - case INDEX_op_rotr_i64: - if (const_args[2]) { - tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); - } else { - tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); - tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); - } - break; +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, ANDC | SAB(a1, a0, a2)); +} - case INDEX_op_mul_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out32(s, MULLI | TAI(a0, a1, a2)); - } else { - tcg_out32(s, MULLD | TAB(a0, a1, a2)); - } - break; - case INDEX_op_div_i64: - tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_divu_i64: - tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_rem_i64: - tcg_out32(s, MODSD | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_remu_i64: - tcg_out32(s, MODUD | TAB(args[0], args[1], args[2])); - break; +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; - case INDEX_op_qemu_ld_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - break; - } - /* fall through */ - case INDEX_op_qemu_ld_a32_i32: - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_ld_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_ld(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I64); - } else { - tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); - } - break; - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true); - break; +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD; + tcg_out_cntxz(s, type, insn, a0, a1, a2, false); +} - case INDEX_op_qemu_st_a64_i32: - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_st(s, args[0], -1, args[1], args[2], - args[3], TCG_TYPE_I32); - break; +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD; + tcg_out_cntxz(s, type, insn, a0, a1, a2, true); +} + +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_O1_I2(r, r, rZW), + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; + +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + uint32_t insn = type == TCG_TYPE_I32 ? CNTPOPW : CNTPOPD; + tcg_out32(s, insn | SAB(a1, a0, 0)); +} + +static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) +{ + return have_isa_2_06 ? C_O1_I1(r, r) : C_NotImplemented; +} + +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctpop, + .out_rr = tgen_ctpop, +}; + +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD; + tcg_out_cntxz(s, type, insn, a0, a1, a2, false); +} + +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD; + tcg_out_cntxz(s, type, insn, a0, a1, a2, true); +} + +static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags) +{ + return have_isa_3_00 ? C_O1_I2(r, r, rZW) : C_NotImplemented; +} + +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctz, + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; + +static void tgen_eqv(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, EQV | SAB(a1, a0, a2)); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_shri64(s, a0, a1, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; +#endif + +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? DIVW : DIVD; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? DIVWU : DIVDU; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_eqv, +}; + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? MULLW : MULLD; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static void tgen_muli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out32(s, MULLI | TAI(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_mul, + .out_rri = tgen_muli, +}; + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulsh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? MULHW : MULHD; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mulsh, +}; + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? MULHWU : MULHDU; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_muluh, +}; + +static void tgen_nand(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, NAND | SAB(a1, a0, a2)); +} + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nand, +}; + +static void tgen_nor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, NOR | SAB(a1, a0, a2)); +} + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nor, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, OR | SAB(a1, a0, a2)); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_ori32(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rU), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, ORC | SAB(a1, a0, a2)); +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_orc, +}; + +static TCGConstraintSetIndex cset_mod(TCGType type, unsigned flags) +{ + return have_isa_3_00 ? C_O1_I2(r, r, r) : C_NotImplemented; +} + +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? MODSW : MODSD; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mod, + .out_rrr = tgen_rems, +}; + +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? MODUW : MODUD; + tcg_out32(s, insn | TAB(a0, a1, a2)); +} + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mod, + .out_rrr = tgen_remu, +}; + +static void tgen_rotl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out32(s, RLWNM | SAB(a1, a0, a2) | MB(0) | ME(31)); + } else { + tcg_out32(s, RLDCL | SAB(a1, a0, a2) | MB64(0)); + } +} + +static void tgen_rotli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_rlw(s, RLWINM, a0, a1, a2, 0, 31); + } else { + tcg_out_rld(s, RLDICL, a0, a1, a2, 0); + } +} + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_rotl, + .out_rri = tgen_rotli, +}; + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SRAW : SRAD; + tcg_out32(s, insn | SAB(a1, a0, a2)); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* Limit immediate shift count lest we create an illegal insn. */ + if (type == TCG_TYPE_I32) { + tcg_out_sari32(s, a0, a1, a2 & 31); + } else { + tcg_out_sari64(s, a0, a1, a2 & 63); + } +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SLW : SLD; + tcg_out32(s, insn | SAB(a1, a0, a2)); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* Limit immediate shift count lest we create an illegal insn. */ + if (type == TCG_TYPE_I32) { + tcg_out_shli32(s, a0, a1, a2 & 31); + } else { + tcg_out_shli64(s, a0, a1, a2 & 63); + } +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SRW : SRD; + tcg_out32(s, insn | SAB(a1, a0, a2)); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + /* Limit immediate shift count lest we create an illegal insn. */ + if (type == TCG_TYPE_I32) { + tcg_out_shri32(s, a0, a1, a2 & 31); + } else { + tcg_out_shri64(s, a0, a1, a2 & 63); + } +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, SUBF | TAB(a0, a2, a1)); +} + +static void tgen_subfi(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, rI, r), + .out_rrr = tgen_sub, + .out_rir = tgen_subfi, +}; + +static void tgen_subbo_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, SUBFC | TAB(a0, a2, a1)); +} + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 == 0) { + tcg_out_movi(s, type, TCG_REG_R0, 0); + tgen_subbo_rrr(s, type, a0, a1, TCG_REG_R0); + } else { + tgen_addco_rri(s, type, a0, a1, -a2); + } +} + +/* The underlying insn for subfi is subfic. */ +#define tgen_subbo_rir tgen_subfi + +static void tgen_subbo_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + tcg_out_movi(s, type, TCG_REG_R0, a2); + tgen_subbo_rir(s, type, a0, a1, TCG_REG_R0); +} + +static TCGConstraintSetIndex cset_subbo(TCGType type, unsigned flags) +{ + /* Recall that the CA bit is defined based on the host word size. */ + return type == TCG_TYPE_REG ? C_O1_I2(r, rI, rN) : C_NotImplemented; +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_subbo, + .out_rrr = tgen_subbo_rrr, + .out_rri = tgen_subbo_rri, + .out_rir = tgen_subbo_rir, + .out_rii = tgen_subbo_rii, +}; + +static void tgen_subbio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, SUBFE | TAB(a0, a2, a1)); +} + +static void tgen_subbio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_addcio_rri(s, type, a0, a1, ~a2); +} + +static void tgen_subbio_rir(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2) +{ + tcg_debug_assert(a1 == 0 || a1 == -1); + tcg_out32(s, (a1 ? SUBFME : SUBFZE) | RT(a0) | RA(a2)); +} + +static void tgen_subbio_rii(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2) +{ + tcg_out_movi(s, type, TCG_REG_R0, a2); + tgen_subbio_rir(s, type, a0, a1, TCG_REG_R0); +} + +static TCGConstraintSetIndex cset_subbio(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_REG ? C_O1_I2(r, rZM, rZM) : C_NotImplemented; +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_subbio, + .out_rrr = tgen_subbio_rrr, + .out_rri = tgen_subbio_rri, + .out_rir = tgen_subbio_rir, + .out_rii = tgen_subbio_rii, +}; + +#define outop_subbi outop_subbio + +static void tcg_out_set_borrow(TCGContext *s) +{ + /* borrow = !carry */ + tcg_out32(s, ADDIC | TAI(TCG_REG_R0, TCG_REG_R0, 0)); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, XOR | SAB(a1, a0, a2)); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_xori32(s, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rU), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg dst, TCGReg src, unsigned flags) +{ + TCGReg tmp = dst == src ? TCG_REG_R0 : dst; + + if (have_isa_3_10) { + tcg_out32(s, BRH | RA(dst) | RS(src)); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext16u(s, dst, dst); } - /* fall through */ - case INDEX_op_qemu_st_a32_i32: - tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, args[0], args[1], args[2], -1, - args[3], TCG_TYPE_I64); + return; + } + + /* + * In the following, + * dep(a, b, m) -> (a & ~m) | (b & m) + * + * Begin with: src = xxxxabcd + */ + /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */ + tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31); + /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */ + tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23); + + if (flags & TCG_BSWAP_OS) { + tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp); + } else { + tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg dst, TCGReg src, unsigned flags) +{ + TCGReg tmp = dst == src ? TCG_REG_R0 : dst; + + if (have_isa_3_10) { + tcg_out32(s, BRW | RA(dst) | RS(src)); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext32s(s, dst, dst); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext32u(s, dst, dst); } - break; - case INDEX_op_qemu_st_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_qemu_st(s, args[0], -1, args[1], -1, - args[2], TCG_TYPE_I64); - } else { - tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], - args[4], TCG_TYPE_I64); + return; + } + + /* + * Stolen from gcc's builtin_bswap32. + * In the following, + * dep(a, b, m) -> (a & ~m) | (b & m) + * + * Begin with: src = xxxxabcd + */ + /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */ + tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31); + /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */ + tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7); + /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */ + tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23); + + if (flags & TCG_BSWAP_OS) { + tcg_out_ext32s(s, dst, tmp); + } else { + tcg_out_mov(s, TCG_TYPE_REG, dst, tmp); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) +{ + TCGReg t0 = dst == src ? TCG_REG_R0 : dst; + TCGReg t1 = dst == src ? dst : TCG_REG_R0; + + if (have_isa_3_10) { + tcg_out32(s, BRD | RA(dst) | RS(src)); + return; + } + + /* + * In the following, + * dep(a, b, m) -> (a & ~m) | (b & m) + * + * Begin with: src = abcdefgh + */ + /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */ + tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31); + /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */ + tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7); + /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */ + tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23); + + /* t0 = rol64(t0, 32) = hgfe0000 */ + tcg_out_rld(s, RLDICL, t0, t0, 32, 0); + /* t1 = rol64(src, 32) = efghabcd */ + tcg_out_rld(s, RLDICL, t1, src, 32, 0); + + /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */ + tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31); + /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */ + tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7); + /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */ + tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23); + + tcg_out_mov(s, TCG_TYPE_REG, dst, t0); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; +#endif /* TCG_TARGET_REG_BITS == 64 */ + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out32(s, NEG | RT(a0) | RA(a1)); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_nor(s, type, a0, a1, a1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I32) { + tcg_out_rlw(s, RLWIMI, a0, a2, ofs, 32 - ofs - len, 31 - ofs); + } else { + tcg_out_rld(s, RLDIMI, a0, a2, ofs, 64 - ofs - len); + } +} + +static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + tcg_target_long a2, unsigned ofs, unsigned len) +{ + tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len)); +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, 0, rZ), + .out_rrr = tgen_deposit, + .out_rri = tgen_depositi, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0 && len <= 16) { + tgen_andi(s, TCG_TYPE_I32, a0, a1, (1 << len) - 1); + } else if (type == TCG_TYPE_I32) { + tcg_out_rlw(s, RLWINM, a0, a1, 32 - ofs, 32 - len, 31); + } else { + tcg_out_rld(s, RLDICL, a0, a1, 64 - ofs, 64 - len); + } +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, type, a0, a1); + return; + case 16: + tcg_out_ext16s(s, type, a0, a1); + return; + case 32: + tcg_out_ext32s(s, a0, a1); + return; } - break; - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false); - break; + } else if (ofs + len == 32) { + tcg_out_sari32(s, a0, a1, ofs); + return; + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; - case INDEX_op_setcond_i32: - tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], - const_args[2], false); - break; - case INDEX_op_setcond_i64: - tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], - const_args[2], false); - break; - case INDEX_op_negsetcond_i32: - tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], - const_args[2], true); - break; - case INDEX_op_negsetcond_i64: - tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], - const_args[2], true); - break; - case INDEX_op_setcond2_i32: - tcg_out_setcond2(s, args, const_args); - break; +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - tcg_out_bswap16(s, args[0], args[1], args[2]); - break; - case INDEX_op_bswap32_i32: - tcg_out_bswap32(s, args[0], args[1], 0); - break; - case INDEX_op_bswap32_i64: - tcg_out_bswap32(s, args[0], args[1], args[2]); - break; - case INDEX_op_bswap64_i64: - tcg_out_bswap64(s, args[0], args[1]); - break; +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, LBZ, LBZX, dest, base, offset); +} - case INDEX_op_deposit_i32: - if (const_args[2]) { - uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; - tcg_out_andi32(s, args[0], args[0], ~mask); - } else { - tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], - 32 - args[3] - args[4], 31 - args[3]); - } - break; - case INDEX_op_deposit_i64: - if (const_args[2]) { - uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; - tcg_out_andi64(s, args[0], args[0], ~mask); - } else { - tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], - 64 - args[3] - args[4]); - } - break; +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; - case INDEX_op_extract_i32: - tcg_out_rlw(s, RLWINM, args[0], args[1], - 32 - args[2], 32 - args[3], 31); - break; - case INDEX_op_extract_i64: - tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]); - break; +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tgen_ld8u(s, type, dest, base, offset); + tcg_out_ext8s(s, type, dest, dest); +} - case INDEX_op_movcond_i32: - tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], - args[3], args[4], const_args[2]); - break; - case INDEX_op_movcond_i64: - tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], - args[3], args[4], const_args[2]); - break; +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; -#if TCG_TARGET_REG_BITS == 64 - case INDEX_op_add2_i64: -#else - case INDEX_op_add2_i32: -#endif - /* Note that the CA bit is defined based on the word size of the - environment. So in 64-bit mode it's always carry-out of bit 63. - The fallback code using deposit works just as well for 32-bit. */ - a0 = args[0], a1 = args[1]; - if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { - a0 = TCG_REG_R0; - } - if (const_args[4]) { - tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); - } else { - tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); - } - if (const_args[5]) { - tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); - } else { - tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); - } - if (a0 != args[0]) { - tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); - } - break; +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, LHZ, LHZX, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, LHA, LHAX, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; #if TCG_TARGET_REG_BITS == 64 - case INDEX_op_sub2_i64: -#else - case INDEX_op_sub2_i32: +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, LWZ, LWZX, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, LWA, LWAX, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; #endif - a0 = args[0], a1 = args[1]; - if (a0 == args[5] || (!const_args[3] && a0 == args[3])) { - a0 = TCG_REG_R0; - } - if (const_args[2]) { - tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2])); - } else { - tcg_out32(s, SUBFC | TAB(a0, args[4], args[2])); - } - if (const_args[3]) { - tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); - } else { - tcg_out32(s, SUBFE | TAB(a1, args[5], args[3])); - } - if (a0 != args[0]) { - tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); - } - break; - case INDEX_op_muluh_i32: - tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_mulsh_i32: - tcg_out32(s, MULHW | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_muluh_i64: - tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); - break; - case INDEX_op_mulsh_i64: - tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); - break; +static void tgen_st8(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, STB, STBX, data, base, offset); +} - case INDEX_op_mb: - tcg_out_mb(s, args[0]); - break; +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st8, +}; - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); - } +static void tgen_st16(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem_long(s, STH, STHX, data, base, offset); } +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st16, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tcg_out_st, +}; + + int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) { switch (opc) { @@ -3568,12 +3858,14 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: return vece <= MO_32; - case INDEX_op_cmp_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: case INDEX_op_rotli_vec: return vece <= MO_32 || have_isa_2_07 ? -1 : 0; + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + return vece <= MO_32 || have_isa_2_07 ? 1 : 0; case INDEX_op_neg_vec: return vece >= MO_32 && have_isa_3_00; case INDEX_op_mul_vec: @@ -3714,6 +4006,149 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, return true; } +static void tcg_out_not_vec(TCGContext *s, TCGReg a0, TCGReg a1) +{ + tcg_out32(s, VNOR | VRT(a0) | VRA(a1) | VRB(a1)); +} + +static void tcg_out_or_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, VOR | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void tcg_out_orc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, VORC | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void tcg_out_and_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, VAND | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void tcg_out_andc_vec(TCGContext *s, TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out32(s, VANDC | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void tcg_out_bitsel_vec(TCGContext *s, TCGReg d, + TCGReg c, TCGReg t, TCGReg f) +{ + if (TCG_TARGET_HAS_bitsel_vec) { + tcg_out32(s, XXSEL | VRT(d) | VRC(c) | VRB(t) | VRA(f)); + } else { + tcg_out_and_vec(s, TCG_VEC_TMP2, t, c); + tcg_out_andc_vec(s, d, f, c); + tcg_out_or_vec(s, d, d, TCG_VEC_TMP2); + } +} + +static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg a1, TCGReg a2, TCGCond cond) +{ + static const uint32_t + eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, + ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, + gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, + gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }; + uint32_t insn; + + bool need_swap = false, need_inv = false; + + tcg_debug_assert(vece <= MO_32 || have_isa_2_07); + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + if (have_isa_3_00 && vece <= MO_32) { + break; + } + /* fall through */ + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGReg swap = a1; + a1 = a2; + a2 = swap; + cond = tcg_swap_cond(cond); + } + + switch (cond) { + case TCG_COND_EQ: + insn = eq_op[vece]; + break; + case TCG_COND_NE: + insn = ne_op[vece]; + break; + case TCG_COND_GT: + insn = gts_op[vece]; + break; + case TCG_COND_GTU: + insn = gtu_op[vece]; + break; + default: + g_assert_not_reached(); + } + tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); + + return need_inv; +} + +static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg a1, TCGReg a2, TCGCond cond) +{ + if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) { + tcg_out_not_vec(s, a0, a0); + } +} + +static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg c1, TCGReg c2, TCGArg v3, int const_v3, + TCGReg v4, TCGCond cond) +{ + bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP1, c1, c2, cond); + + if (!const_v3) { + if (inv) { + tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v4, v3); + } else { + tcg_out_bitsel_vec(s, a0, TCG_VEC_TMP1, v3, v4); + } + } else if (v3) { + if (inv) { + tcg_out_orc_vec(s, a0, v4, TCG_VEC_TMP1); + } else { + tcg_out_or_vec(s, a0, v4, TCG_VEC_TMP1); + } + } else { + if (inv) { + tcg_out_and_vec(s, a0, v4, TCG_VEC_TMP1); + } else { + tcg_out_andc_vec(s, a0, v4, TCG_VEC_TMP1); + } + } +} + static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg args[TCG_MAX_OP_ARGS], @@ -3724,10 +4159,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, mul_op[4] = { 0, 0, VMULUWM, VMULLD }, neg_op[4] = { 0, 0, VNEGW, VNEGD }, - eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, - ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, - gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, - gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, @@ -3809,24 +4240,23 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, insn = sarv_op[vece]; break; case INDEX_op_and_vec: - insn = VAND; - break; + tcg_out_and_vec(s, a0, a1, a2); + return; case INDEX_op_or_vec: - insn = VOR; - break; + tcg_out_or_vec(s, a0, a1, a2); + return; case INDEX_op_xor_vec: insn = VXOR; break; case INDEX_op_andc_vec: - insn = VANDC; - break; + tcg_out_andc_vec(s, a0, a1, a2); + return; case INDEX_op_not_vec: - insn = VNOR; - a2 = a1; - break; + tcg_out_not_vec(s, a0, a1); + return; case INDEX_op_orc_vec: - insn = VORC; - break; + tcg_out_orc_vec(s, a0, a1, a2); + return; case INDEX_op_nand_vec: insn = VNAND; break; @@ -3838,26 +4268,14 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_cmp_vec: - switch (args[3]) { - case TCG_COND_EQ: - insn = eq_op[vece]; - break; - case TCG_COND_NE: - insn = ne_op[vece]; - break; - case TCG_COND_GT: - insn = gts_op[vece]; - break; - case TCG_COND_GTU: - insn = gtu_op[vece]; - break; - default: - g_assert_not_reached(); - } - break; - + tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]); + return; + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel_vec(s, vece, a0, a1, a2, + args[3], const_args[3], args[4], args[5]); + return; case INDEX_op_bitsel_vec: - tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3])); + tcg_out_bitsel_vec(s, a0, a1, a2, args[3]); return; case INDEX_op_dup2_vec: @@ -3922,56 +4340,6 @@ static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0, tcgv_vec_arg(v1), tcgv_vec_arg(t1)); } -static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec v1, TCGv_vec v2, TCGCond cond) -{ - bool need_swap = false, need_inv = false; - - tcg_debug_assert(vece <= MO_32 || have_isa_2_07); - - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_GT: - case TCG_COND_GTU: - break; - case TCG_COND_NE: - if (have_isa_3_00 && vece <= MO_32) { - break; - } - /* fall through */ - case TCG_COND_LE: - case TCG_COND_LEU: - need_inv = true; - break; - case TCG_COND_LT: - case TCG_COND_LTU: - need_swap = true; - break; - case TCG_COND_GE: - case TCG_COND_GEU: - need_swap = need_inv = true; - break; - default: - g_assert_not_reached(); - } - - if (need_inv) { - cond = tcg_invert_cond(cond); - } - if (need_swap) { - TCGv_vec t1; - t1 = v1, v1 = v2, v2 = t1; - cond = tcg_swap_cond(cond); - } - - vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), - tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); - - if (need_inv) { - tcg_gen_not_vec(vece, v0, v0); - } -} - static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2) { @@ -4046,10 +4414,6 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, case INDEX_op_rotli_vec: expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec); break; - case INDEX_op_cmp_vec: - v2 = temp_tcgv_vec(arg_temp(a2)); - expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); - break; case INDEX_op_mul_vec: v2 = temp_tcgv_vec(arg_temp(a2)); expand_vec_mul(type, vece, v0, v1, v2); @@ -4067,165 +4431,15 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, va_end(va); } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_ctpop_i32: - case INDEX_op_neg_i32: - case INDEX_op_not_i32: - case INDEX_op_ext8s_i32: - case INDEX_op_ext16s_i32: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap32_i32: - case INDEX_op_extract_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_ctpop_i64: - case INDEX_op_neg_i64: - case INDEX_op_not_i64: - case INDEX_op_ext8s_i64: - case INDEX_op_ext16s_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_extract_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(r, r); - - case INDEX_op_add_i32: - case INDEX_op_and_i32: - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - case INDEX_op_andc_i32: - case INDEX_op_orc_i32: - case INDEX_op_eqv_i32: - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - case INDEX_op_rotl_i32: - case INDEX_op_rotr_i32: - case INDEX_op_and_i64: - case INDEX_op_andc_i64: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i64: - return C_O1_I2(r, r, ri); - - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - return C_O1_I2(r, r, rI); - - case INDEX_op_div_i32: - case INDEX_op_divu_i32: - case INDEX_op_rem_i32: - case INDEX_op_remu_i32: - case INDEX_op_nand_i32: - case INDEX_op_nor_i32: - case INDEX_op_muluh_i32: - case INDEX_op_mulsh_i32: - case INDEX_op_orc_i64: - case INDEX_op_eqv_i64: - case INDEX_op_nand_i64: - case INDEX_op_nor_i64: - case INDEX_op_div_i64: - case INDEX_op_divu_i64: - case INDEX_op_rem_i64: - case INDEX_op_remu_i64: - case INDEX_op_mulsh_i64: - case INDEX_op_muluh_i64: - return C_O1_I2(r, r, r); - - case INDEX_op_sub_i32: - return C_O1_I2(r, rI, ri); - case INDEX_op_add_i64: - return C_O1_I2(r, r, rT); - case INDEX_op_or_i64: - case INDEX_op_xor_i64: - return C_O1_I2(r, r, rU); - case INDEX_op_sub_i64: - return C_O1_I2(r, rI, rT); - case INDEX_op_clz_i32: - case INDEX_op_ctz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i64: - return C_O1_I2(r, r, rZW); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(r, rC); - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rC); - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rC, rZ, rZ); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - return C_O1_I2(r, 0, rZ); - case INDEX_op_brcond2_i32: - return C_O0_I4(r, r, ri, ri); - case INDEX_op_setcond2_i32: - return C_O1_I4(r, r, r, ri, ri); - case INDEX_op_add2_i64: - case INDEX_op_add2_i32: - return C_O2_I4(r, r, r, r, rI, rZM); - case INDEX_op_sub2_i64: - case INDEX_op_sub2_i32: - return C_O2_I4(r, r, rI, rZM, r, r); - - case INDEX_op_qemu_ld_a32_i32: - return C_O1_I1(r, r); - case INDEX_op_qemu_ld_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); - case INDEX_op_qemu_ld_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); - case INDEX_op_qemu_ld_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); - - case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st: return C_O0_I2(r, r); - case INDEX_op_qemu_st_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); - case INDEX_op_qemu_st_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); - case INDEX_op_qemu_st_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); - - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - return C_N1O1_I1(o, m, r); - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - return C_O0_I3(o, m, r); + case INDEX_op_qemu_st2: + return TCG_TARGET_REG_BITS == 64 + ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r); case INDEX_op_add_vec: case INDEX_op_sub_vec: @@ -4277,9 +4491,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_bitsel_vec: case INDEX_op_ppc_msum_vec: return C_O1_I3(v, v, v, v); + case INDEX_op_cmpsel_vec: + return C_O1_I4(v, v, v, vZM, v); default: - g_assert_not_reached(); + return C_NotImplemented; } } diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index e154fb14df0..5607634e995 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -25,8 +25,6 @@ #ifndef PPC_TCG_TARGET_H #define PPC_TCG_TARGET_H -#include "host/cpuinfo.h" - #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) #define TCG_TARGET_NB_REGS 64 @@ -55,128 +53,4 @@ typedef enum { TCG_AREG0 = TCG_REG_R27 } TCGReg; -typedef enum { - tcg_isa_base, - tcg_isa_2_06, - tcg_isa_2_07, - tcg_isa_3_00, - tcg_isa_3_10, -} TCGPowerISA; - -#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06) -#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07) -#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0) -#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1) -#define have_altivec (cpuinfo & CPUINFO_ALTIVEC) -#define have_vsx (cpuinfo & CPUINFO_VSX) - -/* optional instructions automatically implemented */ -#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ -#define TCG_TARGET_HAS_ext16u_i32 0 - -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 have_isa_3_00 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_eqv_i32 1 -#define TCG_TARGET_HAS_nand_i32 1 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00 -#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 1 -#define TCG_TARGET_HAS_mulsh_i32 1 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_add2_i32 0 -#define TCG_TARGET_HAS_sub2_i32 0 -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 have_isa_3_00 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 0 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 1 -#define TCG_TARGET_HAS_nand_i64 1 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00 -#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 -#endif - -#define TCG_TARGET_HAS_qemu_ldst_i128 \ - (TCG_TARGET_REG_BITS == 64 && have_isa_2_07) - -#define TCG_TARGET_HAS_tst 1 - -/* - * While technically Altivec could support V64, it has no 64-bit store - * instruction and substituting two 32-bit stores makes the generated - * code quite large. - */ -#define TCG_TARGET_HAS_v64 have_vsx -#define TCG_TARGET_HAS_v128 have_altivec -#define TCG_TARGET_HAS_v256 0 - -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec have_isa_2_07 -#define TCG_TARGET_HAS_nand_vec have_isa_2_07 -#define TCG_TARGET_HAS_nor_vec 1 -#define TCG_TARGET_HAS_eqv_vec have_isa_2_07 -#define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec have_isa_3_00 -#define TCG_TARGET_HAS_abs_vec 0 -#define TCG_TARGET_HAS_roti_vec 0 -#define TCG_TARGET_HAS_rots_vec 0 -#define TCG_TARGET_HAS_rotv_vec 1 -#define TCG_TARGET_HAS_shi_vec 0 -#define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 1 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_sat_vec 1 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec have_vsx -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 0 - -#define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS - #endif diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target.opc.h deleted file mode 100644 index db514403c3f..00000000000 --- a/tcg/ppc/tcg-target.opc.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2019 Linaro Limited - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - * Target-specific opcodes for host vector expansion. These will be - * emitted by tcg_expand_vec_op. For those familiar with GCC internals, - * consider these to be UNSPEC with names. - */ - -DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC) -DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC) -DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC) -DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC) -DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC) -DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC) diff --git a/tcg/region.c b/tcg/region.c index 478ec051c4b..7ea0b37a84c 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -422,7 +422,7 @@ void tcg_region_reset_all(void) tcg_region_tree_reset_all(); } -static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) +static size_t tcg_n_regions(size_t tb_size, unsigned max_threads) { #ifdef CONFIG_USER_ONLY return 1; @@ -431,24 +431,25 @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) /* * It is likely that some vCPUs will translate more code than others, - * so we first try to set more regions than max_cpus, with those regions + * so we first try to set more regions than threads, with those regions * being of reasonable size. If that's not possible we make do by evenly * dividing the code_gen_buffer among the vCPUs. + * + * Use a single region if all we have is one vCPU thread. */ - /* Use a single region if all we have is one vCPU thread */ - if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { + if (max_threads == 1) { return 1; } /* - * Try to have more regions than max_cpus, with each region being >= 2 MB. + * Try to have more regions than threads, with each region being >= 2 MB. * If we can't, then just allocate one region per vCPU thread. */ n_regions = tb_size / (2 * MiB); - if (n_regions <= max_cpus) { - return max_cpus; + if (n_regions <= max_threads) { + return max_threads; } - return MIN(n_regions, max_cpus * 8); + return MIN(n_regions, max_threads * 8); #endif } @@ -731,11 +732,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) * and then assigning regions to TCG threads so that the threads can translate * code in parallel without synchronization. * - * In system-mode the number of TCG threads is bounded by max_cpus, so we use at - * least max_cpus regions in MTTCG. In !MTTCG we use a single region. - * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) - * must have been parsed before calling this function, since it calls - * qemu_tcg_mttcg_enabled(). + * In system-mode the number of TCG threads is bounded by max_threads, * * In user-mode we use a single region. Having multiple regions in user-mode * is not supported, because the number of vCPU threads (recall that each thread @@ -749,7 +746,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) * in practice. Multi-threaded guests share most if not all of their translated * code, which makes parallel code generation less appealing than in system-mode */ -void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) +void tcg_region_init(size_t tb_size, int splitwx, unsigned max_threads) { const size_t page_size = qemu_real_host_page_size(); size_t region_size; @@ -787,7 +784,7 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) * As a result of this we might end up with a few extra pages at the end of * the buffer; we will assign those to the last region. */ - region.n = tcg_n_regions(tb_size, max_cpus); + region.n = tcg_n_regions(tb_size, max_threads); region_size = tb_size / region.n; region_size = QEMU_ALIGN_DOWN(region_size, page_size); diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index aac5ceee2b6..0fc26d3f98b 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -10,14 +10,20 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(rZ, r) -C_O0_I2(rZ, rZ) +C_O0_I2(rz, r) +C_O0_I2(r, rz) C_O1_I1(r, r) +C_O1_I2(r, r, r) C_O1_I2(r, r, ri) C_O1_I2(r, r, rI) -C_O1_I2(r, r, rJ) -C_O1_I2(r, rZ, rN) -C_O1_I2(r, rZ, rZ) C_N1_I2(r, r, rM) C_O1_I4(r, r, rI, rM, rM) -C_O2_I4(r, r, rZ, rZ, rM, rM) +C_O0_I2(v, r) +C_O1_I1(v, r) +C_O1_I1(v, v) +C_O1_I2(v, v, r) +C_O1_I2(v, v, v) +C_O1_I2(v, vK, v) +C_O1_I2(v, v, vK) +C_O1_I2(v, v, vL) +C_O1_I4(v, v, vL, vK, vK) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index d5c419dff19..c04e15ddfa3 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -9,13 +9,13 @@ * REGS(letter, register_mask) */ REGS('r', ALL_GENERAL_REGS) +REGS('v', ALL_VECTOR_REGS) /* * Define constraint letters for constants: * CONST(letter, TCG_CT_CONST_* bit set) */ CONST('I', TCG_CT_CONST_S12) -CONST('J', TCG_CT_CONST_J12) -CONST('N', TCG_CT_CONST_N12) +CONST('K', TCG_CT_CONST_S5) +CONST('L', TCG_CT_CONST_CMP_VI) CONST('M', TCG_CT_CONST_M12) -CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h new file mode 100644 index 00000000000..aef10c2d9d6 --- /dev/null +++ b/tcg/riscv/tcg-target-has.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2018 SiFive, Inc + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#include "host/cpuinfo.h" + +/* optional instructions */ +#define TCG_TARGET_HAS_extr_i64_i32 1 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 0 + +/* vector instructions */ +#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X) +#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X) +#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X) +#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 1 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 1 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 1 + +#define TCG_TARGET_HAS_tst_vec 0 + +static inline bool +tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I64 && ofs + len == 32) { + /* ofs > 0 uses SRLIW; ofs == 0 uses add.uw. */ + return ofs || (cpuinfo & CPUINFO_ZBA); + } + switch (len) { + case 1: + return (cpuinfo & CPUINFO_ZBS) && ofs != 0; + case 16: + return (cpuinfo & CPUINFO_ZBB) && ofs == 0; + } + return false; +} +#define TCG_TARGET_extract_valid tcg_target_extract_valid + +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (type == TCG_TYPE_I64 && ofs + len == 32) { + return true; + } + return (cpuinfo & CPUINFO_ZBB) && ofs == 0 && (len == 8 || len == 16); +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +#define TCG_TARGET_deposit_valid(type, ofs, len) 0 + +#endif diff --git a/tcg/riscv/tcg-target-mo.h b/tcg/riscv/tcg-target-mo.h new file mode 100644 index 00000000000..691b5d0da80 --- /dev/null +++ b/tcg/riscv/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2018 SiFive, Inc + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/riscv/tcg-target-opc.h.inc b/tcg/riscv/tcg-target-opc.h.inc new file mode 100644 index 00000000000..b80b39e1e5d --- /dev/null +++ b/tcg/riscv/tcg-target-opc.h.inc @@ -0,0 +1,12 @@ +/* + * Copyright (c) C-SKY Microsystems Co., Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index d334857226f..31b9f7d87a0 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -27,43 +27,25 @@ * THE SOFTWARE. */ -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" +/* Used for function call generation. */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "zero", - "ra", - "sp", - "gp", - "tp", - "t0", - "t1", - "t2", - "s0", - "s1", - "a0", - "a1", - "a2", - "a3", - "a4", - "a5", - "a6", - "a7", - "s2", - "s3", - "s4", - "s5", - "s6", - "s7", - "s8", - "s9", - "s10", - "s11", - "t3", - "t4", - "t5", - "t6" + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", }; #endif @@ -100,6 +82,16 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, + + /* Vector registers and TCG_REG_V0 reserved for mask. */ + TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4, + TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8, + TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12, + TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16, + TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20, + TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24, + TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28, + TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, }; static const int tcg_target_call_iarg_regs[] = { @@ -120,62 +112,47 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) return TCG_REG_A0 + slot; } -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_S12 0x200 -#define TCG_CT_CONST_N12 0x400 -#define TCG_CT_CONST_M12 0x800 -#define TCG_CT_CONST_J12 0x1000 +#define TCG_CT_CONST_S12 0x100 +#define TCG_CT_CONST_M12 0x200 +#define TCG_CT_CONST_S5 0x400 +#define TCG_CT_CONST_CMP_VI 0x800 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) +#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) +#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000 +#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000 #define sextreg sextract64 -/* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, int ct, - TCGType type, TCGCond cond, int vece) -{ - if (ct & TCG_CT_CONST) { - return 1; - } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } - /* - * Sign extended from 12 bits: [-0x800, 0x7ff]. - * Used for most arithmetic, as this is the isa field. - */ - if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { - return 1; - } - /* - * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. - * Used for subtraction, where a constant must be handled by ADDI. - */ - if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { - return 1; - } - /* - * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. - * Used by addsub2 and movcond, which may need the negative value, - * and requires the modified constant to be representable. - */ - if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { - return 1; - } - /* - * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. - * Used to map ANDN back to ANDI, etc. - */ - if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { - return 1; - } - return 0; -} - /* * RISC-V Base ISA opcodes (IM) */ +#define V_OPIVV (0x0 << 12) +#define V_OPFVV (0x1 << 12) +#define V_OPMVV (0x2 << 12) +#define V_OPIVI (0x3 << 12) +#define V_OPIVX (0x4 << 12) +#define V_OPFVF (0x5 << 12) +#define V_OPMVX (0x6 << 12) +#define V_OPCFG (0x7 << 12) + +/* NF <= 7 && NF >= 0 */ +#define V_NF(x) (x << 29) +#define V_UNIT_STRIDE (0x0 << 20) +#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20) + +typedef enum { + VLMUL_M1 = 0, /* LMUL=1 */ + VLMUL_M2, /* LMUL=2 */ + VLMUL_M4, /* LMUL=4 */ + VLMUL_M8, /* LMUL=8 */ + VLMUL_RESERVED, + VLMUL_MF8, /* LMUL=1/8 */ + VLMUL_MF4, /* LMUL=1/4 */ + VLMUL_MF2, /* LMUL=1/2 */ +} RISCVVlmul; + typedef enum { OPC_ADD = 0x33, OPC_ADDI = 0x13, @@ -183,6 +160,7 @@ typedef enum { OPC_ANDI = 0x7013, OPC_AUIPC = 0x17, OPC_BEQ = 0x63, + OPC_BEXTI = 0x48005013, OPC_BGE = 0x5063, OPC_BGEU = 0x7063, OPC_BLT = 0x4063, @@ -271,8 +249,182 @@ typedef enum { /* Zicond: integer conditional operations */ OPC_CZERO_EQZ = 0x0e005033, OPC_CZERO_NEZ = 0x0e007033, + + /* V: Vector extension 1.0 */ + OPC_VSETVLI = 0x57 | V_OPCFG, + OPC_VSETIVLI = 0xc0000057 | V_OPCFG, + OPC_VSETVL = 0x80000057 | V_OPCFG, + + OPC_VLE8_V = 0x7 | V_UNIT_STRIDE, + OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE, + OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE, + OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE, + OPC_VSE8_V = 0x27 | V_UNIT_STRIDE, + OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE, + OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE, + OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE, + + OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), + OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), + OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), + OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + + OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0), + OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1), + OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), + OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + + OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI, + OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV, + + OPC_VADD_VV = 0x57 | V_OPIVV, + OPC_VADD_VI = 0x57 | V_OPIVI, + OPC_VSUB_VV = 0x8000057 | V_OPIVV, + OPC_VRSUB_VI = 0xc000057 | V_OPIVI, + OPC_VAND_VV = 0x24000057 | V_OPIVV, + OPC_VAND_VI = 0x24000057 | V_OPIVI, + OPC_VOR_VV = 0x28000057 | V_OPIVV, + OPC_VOR_VI = 0x28000057 | V_OPIVI, + OPC_VXOR_VV = 0x2c000057 | V_OPIVV, + OPC_VXOR_VI = 0x2c000057 | V_OPIVI, + + OPC_VMUL_VV = 0x94000057 | V_OPMVV, + OPC_VSADD_VV = 0x84000057 | V_OPIVV, + OPC_VSADD_VI = 0x84000057 | V_OPIVI, + OPC_VSSUB_VV = 0x8c000057 | V_OPIVV, + OPC_VSSUB_VI = 0x8c000057 | V_OPIVI, + OPC_VSADDU_VV = 0x80000057 | V_OPIVV, + OPC_VSADDU_VI = 0x80000057 | V_OPIVI, + OPC_VSSUBU_VV = 0x88000057 | V_OPIVV, + OPC_VSSUBU_VI = 0x88000057 | V_OPIVI, + + OPC_VMAX_VV = 0x1c000057 | V_OPIVV, + OPC_VMAX_VI = 0x1c000057 | V_OPIVI, + OPC_VMAXU_VV = 0x18000057 | V_OPIVV, + OPC_VMAXU_VI = 0x18000057 | V_OPIVI, + OPC_VMIN_VV = 0x14000057 | V_OPIVV, + OPC_VMIN_VI = 0x14000057 | V_OPIVI, + OPC_VMINU_VV = 0x10000057 | V_OPIVV, + OPC_VMINU_VI = 0x10000057 | V_OPIVI, + + OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, + OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, + OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, + OPC_VMSNE_VV = 0x64000057 | V_OPIVV, + OPC_VMSNE_VI = 0x64000057 | V_OPIVI, + OPC_VMSNE_VX = 0x64000057 | V_OPIVX, + + OPC_VMSLTU_VV = 0x68000057 | V_OPIVV, + OPC_VMSLTU_VX = 0x68000057 | V_OPIVX, + OPC_VMSLT_VV = 0x6c000057 | V_OPIVV, + OPC_VMSLT_VX = 0x6c000057 | V_OPIVX, + OPC_VMSLEU_VV = 0x70000057 | V_OPIVV, + OPC_VMSLEU_VX = 0x70000057 | V_OPIVX, + OPC_VMSLE_VV = 0x74000057 | V_OPIVV, + OPC_VMSLE_VX = 0x74000057 | V_OPIVX, + + OPC_VMSLEU_VI = 0x70000057 | V_OPIVI, + OPC_VMSLE_VI = 0x74000057 | V_OPIVI, + OPC_VMSGTU_VI = 0x78000057 | V_OPIVI, + OPC_VMSGTU_VX = 0x78000057 | V_OPIVX, + OPC_VMSGT_VI = 0x7c000057 | V_OPIVI, + OPC_VMSGT_VX = 0x7c000057 | V_OPIVX, + + OPC_VSLL_VV = 0x94000057 | V_OPIVV, + OPC_VSLL_VI = 0x94000057 | V_OPIVI, + OPC_VSLL_VX = 0x94000057 | V_OPIVX, + OPC_VSRL_VV = 0xa0000057 | V_OPIVV, + OPC_VSRL_VI = 0xa0000057 | V_OPIVI, + OPC_VSRL_VX = 0xa0000057 | V_OPIVX, + OPC_VSRA_VV = 0xa4000057 | V_OPIVV, + OPC_VSRA_VI = 0xa4000057 | V_OPIVI, + OPC_VSRA_VX = 0xa4000057 | V_OPIVX, + + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, + OPC_VMV_V_I = 0x5e000057 | V_OPIVI, + OPC_VMV_V_X = 0x5e000057 | V_OPIVX, + + OPC_VMVNR_V = 0x9e000057 | V_OPIVI, } RISCVInsn; +static const struct { + RISCVInsn op; + bool swap; +} tcg_cmpcond_to_rvv_vv[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VV, false }, + [TCG_COND_NE] = { OPC_VMSNE_VV, false }, + [TCG_COND_LT] = { OPC_VMSLT_VV, false }, + [TCG_COND_GE] = { OPC_VMSLE_VV, true }, + [TCG_COND_GT] = { OPC_VMSLT_VV, true }, + [TCG_COND_LE] = { OPC_VMSLE_VV, false }, + [TCG_COND_LTU] = { OPC_VMSLTU_VV, false }, + [TCG_COND_GEU] = { OPC_VMSLEU_VV, true }, + [TCG_COND_GTU] = { OPC_VMSLTU_VV, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VV, false } +}; + +static const struct { + RISCVInsn op; + int min; + int max; + bool adjust; +} tcg_cmpcond_to_rvv_vi[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false }, + [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false }, + [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false }, + [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false }, + [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true }, + [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false }, + [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false }, + [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true }, + [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true }, +}; + +/* test if a constant matches the constraint */ +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) +{ + if (ct & TCG_CT_CONST) { + return 1; + } + if (type >= TCG_TYPE_V64) { + /* Val is replicated by VECE; extract the highest element. */ + val >>= (-8 << vece) & 63; + } + /* + * Sign extended from 12 bits: [-0x800, 0x7ff]. + * Used for most arithmetic, as this is the isa field. + */ + if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. + * Used by movcond, which may need the negative value, + * and requires the modified constant to be representable. + */ + if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 5 bits: [-0x10, 0x0f]. + * Used for vector-immediate. + */ + if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { + return 1; + } + /* + * Used for vector compare OPIVI instructions. + */ + if ((ct & TCG_CT_CONST_CMP_VI) && + val >= tcg_cmpcond_to_rvv_vi[cond].min && + val <= tcg_cmpcond_to_rvv_vi[cond].max) { + return true; + } + return 0; +} + /* * RISC-V immediate and instruction encoders (excludes 16-bit RVC) */ @@ -363,6 +515,45 @@ static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); } + +/* Type-OPIVI */ + +static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm, + TCGReg vs2, bool vm) +{ + return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 | + (vs2 & 0x1f) << 20 | (vm << 25); +} + +/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */ + +static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1, + TCGReg s2, bool vm) +{ + return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 | + (s2 & 0x1f) << 20 | (vm << 25); +} + +/* Vector vtype */ + +static uint32_t encode_vtype(bool vta, bool vma, + MemOp vsew, RISCVVlmul vlmul) +{ + return vma << 7 | vta << 6 | vsew << 3 | vlmul; +} + +static int32_t encode_vset(RISCVInsn opc, TCGReg rd, + TCGArg rs1, uint32_t vtype) +{ + return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20; +} + +static int32_t encode_vseti(RISCVInsn opc, TCGReg rd, + uint32_t uimm, uint32_t vtype) +{ + return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20; +} + /* * RISC-V instruction emitters */ @@ -475,6 +666,91 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, } } +/* + * RISC-V vector instruction emitters + */ + +/* + * Vector registers uses the same 5 lower bits as GPR registers, + * and vm=0 (vm = false) means vector masking ENABLED. + * With RVV 1.0, vs2 is the first operand, while rs1/imm is the + * second operand. + */ +static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, TCGReg vs1) +{ + tcg_out32(s, encode_v(opc, vd, vs1, vs2, true)); +} + +static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, TCGReg rs1) +{ + tcg_out32(s, encode_v(opc, vd, rs1, vs2, true)); +} + +static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc, + TCGReg vd, TCGReg vs2, int32_t imm) +{ + tcg_out32(s, encode_vi(opc, vd, imm, vs2, true)); +} + +static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi, + TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1) +{ + if (c_vi1) { + tcg_out_opc_vi(s, o_vi, vd, vs2, vi1); + } else { + tcg_out_opc_vv(s, o_vv, vd, vs2, vi1); + } +} + +static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, int32_t imm) +{ + tcg_out32(s, encode_vi(opc, vd, imm, vs2, false)); +} + +static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, TCGReg vs1) +{ + tcg_out32(s, encode_v(opc, vd, vs1, vs2, false)); +} + +typedef struct VsetCache { + uint32_t movi_insn; + uint32_t vset_insn; +} VsetCache; + +static VsetCache riscv_vset_cache[3][4]; + +static void set_vtype(TCGContext *s, TCGType type, MemOp vsew) +{ + const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; + + s->riscv_cur_type = type; + s->riscv_cur_vsew = vsew; + + if (p->movi_insn) { + tcg_out32(s, p->movi_insn); + } + tcg_out32(s, p->vset_insn); +} + +static MemOp set_vtype_len(TCGContext *s, TCGType type) +{ + if (type != s->riscv_cur_type) { + set_vtype(s, type, MO_64); + } + return s->riscv_cur_vsew; +} + +static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew) +{ + if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) { + set_vtype(s, type, vsew); + } +} + /* * TCG intrinsics */ @@ -489,6 +765,15 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) case TCG_TYPE_I64: tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + { + int lmul = type - riscv_lg2_vlenb; + int nf = 1 << MAX(lmul, 0); + tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1); + } + break; default: g_assert_not_reached(); } @@ -681,18 +966,101 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, } } +static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, + TCGReg addr, intptr_t offset) +{ + tcg_debug_assert(data >= TCG_REG_V0); + tcg_debug_assert(addr < TCG_REG_V0); + + if (offset) { + tcg_debug_assert(addr != TCG_REG_ZERO); + if (offset == sextreg(offset, 0, 12)) { + tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset); + tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr); + } + addr = TCG_REG_TMP0; + } + tcg_out32(s, encode_v(opc, data, addr, 0, true)); +} + static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; - tcg_out_ldst(s, insn, arg, arg1, arg2); + RISCVInsn insn; + + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_LW, arg, arg1, arg2); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_LD, arg, arg1, arg2); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + if (type >= riscv_lg2_vlenb) { + static const RISCVInsn whole_reg_ld[] = { + OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V + }; + unsigned idx = type - riscv_lg2_vlenb; + + tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld)); + insn = whole_reg_ld[idx]; + } else { + static const RISCVInsn unit_stride_ld[] = { + OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V + }; + MemOp prev_vsew = set_vtype_len(s, type); + + tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld)); + insn = unit_stride_ld[prev_vsew]; + } + tcg_out_vec_ldst(s, insn, arg, arg1, arg2); + break; + default: + g_assert_not_reached(); + } } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; - tcg_out_ldst(s, insn, arg, arg1, arg2); + RISCVInsn insn; + + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_SW, arg, arg1, arg2); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_SD, arg, arg1, arg2); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + if (type >= riscv_lg2_vlenb) { + static const RISCVInsn whole_reg_st[] = { + OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V + }; + unsigned idx = type - riscv_lg2_vlenb; + + tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st)); + insn = whole_reg_st[idx]; + } else { + static const RISCVInsn unit_stride_st[] = { + OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V + }; + MemOp prev_vsew = set_vtype_len(s, type); + + tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st)); + insn = unit_stride_st[prev_vsew]; + } + tcg_out_vec_ldst(s, insn, arg, arg1, arg2); + break; + default: + g_assert_not_reached(); + } } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, @@ -705,65 +1073,44 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, return false; } -static void tcg_out_addsub2(TCGContext *s, - TCGReg rl, TCGReg rh, - TCGReg al, TCGReg ah, - TCGArg bl, TCGArg bh, - bool cbl, bool cbh, bool is_sub, bool is32bit) +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) { - const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; - const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; - const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; - TCGReg th = TCG_REG_TMP1; + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src); + return true; +} - /* If we have a negative constant such that negating it would - make the high part zero, we can (usually) eliminate one insn. */ - if (cbl && cbh && bh == -1 && bl != 0) { - bl = -bl; - bh = 0; - is_sub = !is_sub; - } +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset) +{ + tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset); + return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); +} - /* By operating on the high part first, we get to use the final - carry operation to move back from the temporary. */ - if (!cbh) { - tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); - } else if (bh != 0 || ah == rl) { - tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); - } else { - th = ah; - } +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, int64_t arg) +{ + /* Arg is replicated by VECE; extract the highest element. */ + arg >>= (-8 << vece) & 63; - /* Note that tcg optimization should eliminate the bl == 0 case. */ - if (is_sub) { - if (cbl) { - tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); - tcg_out_opc_imm(s, opc_addi, rl, al, -bl); - } else { - tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); - tcg_out_opc_reg(s, opc_sub, rl, al, bl); - } - tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); - } else { - if (cbl) { - tcg_out_opc_imm(s, opc_addi, rl, al, bl); - tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); - } else if (al == bl) { - /* - * If the input regs overlap, this is a simple doubling - * and carry-out is the input msb. This special case is - * required when the output reg overlaps the input, - * but we might as well use it always. - */ - tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); - tcg_out_opc_reg(s, opc_add, rl, al, al); + if (arg >= -16 && arg < 16) { + if (arg == 0 || arg == -1) { + set_vtype_len(s, type); } else { - tcg_out_opc_reg(s, opc_add, rl, al, bl); - tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, - rl, (rl == bl ? al : bl)); + set_vtype_len_sew(s, type, vece); } - tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); + tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg); + return; } + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg); + tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0); +} + +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, l, 0); + tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); } static const struct { @@ -782,8 +1129,8 @@ static const struct { [TCG_COND_GTU] = { OPC_BLTU, true } }; -static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, - TCGReg arg2, TCGLabel *l) +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *l) { RISCVInsn op = tcg_brcond_to_riscv[cond].op; @@ -799,6 +1146,11 @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out_opc_branch(s, op, arg1, arg2, 0); } +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rz), + .out_rr = tgen_brcond, +}; + #define SETCOND_INV TCG_TARGET_NB_REGS #define SETCOND_NEZ (SETCOND_INV << 1) #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ) @@ -923,6 +1275,24 @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, } } +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, cond, dest, arg1, arg2, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, cond, dest, arg1, arg2, true); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, tcg_target_long arg2, bool c2) { @@ -961,6 +1331,24 @@ static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret, } } +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_negsetcond(s, cond, dest, arg1, arg2, false); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_negsetcond(s, cond, dest, arg1, arg2, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne, int val1, bool c_val1, int val2, bool c_val2) @@ -1058,10 +1446,10 @@ static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret, tcg_out_mov(s, TCG_TYPE_REG, ret, tmp); } -static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg cmp1, int cmp2, bool c_cmp2, - TCGReg val1, bool c_val1, - TCGReg val2, bool c_val2) +static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg cmp1, TCGArg cmp2, bool c_cmp2, + TCGArg val1, bool c_val1, + TCGArg val2, bool c_val2) { int tmpflags; TCGReg t; @@ -1088,6 +1476,11 @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, } } +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rI, rM, rM), + .out = tcg_out_movcond, +}; + static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, TCGReg ret, TCGReg src1, int src2, bool c_src2) { @@ -1099,17 +1492,77 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, * Note that constraints put 'ret' in a new register, so the * computation above did not clobber either 'src1' or 'src2'. */ - tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true, + tcg_out_movcond(s, type, TCG_COND_EQ, ret, src1, 0, true, src2, c_src2, ret, false); } } +static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece, + TCGCond cond, TCGReg ret, + TCGReg cmp1, TCGReg cmp2, bool c_cmp2, + TCGReg val1, bool c_val1, + TCGReg val2, bool c_val2) +{ + set_vtype_len_sew(s, type, vece); + + /* Use only vmerge_vim if possible, by inverting the test. */ + if (c_val2 && !c_val1) { + TCGArg temp = val1; + cond = tcg_invert_cond(cond); + val1 = val2; + val2 = temp; + c_val1 = true; + c_val2 = false; + } + + /* Perform the comparison into V0 mask. */ + if (c_cmp2) { + tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1, + cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust); + } else if (tcg_cmpcond_to_rvv_vv[cond].swap) { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp2, cmp1); + } else { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp1, cmp2); + } + if (c_val1) { + if (c_val2) { + tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2); + val2 = ret; + } + /* vd[i] == v0.mask[i] ? imm : vs2[i] */ + tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1); + } else { + /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */ + tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1); + } +} + +static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx, + TCGReg dst, TCGReg src, unsigned imm) +{ + if (imm < 32) { + tcg_out_opc_vi(s, opc_vi, dst, src, imm); + } else { + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm); + tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0); + } +} + +static void init_setting_vtype(TCGContext *s) +{ + s->riscv_cur_type = TCG_TYPE_COUNT; +} + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; ptrdiff_t offset = tcg_pcrel_diff(s, arg); int ret; + init_setting_vtype(s); + tcg_debug_assert((offset & 1) == 0); if (offset == sextreg(offset, 0, 20)) { /* short jump: -2097150 to 2097152 */ @@ -1135,7 +1588,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg, tcg_out_call_int(s, arg, false); } -static void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, unsigned a0) { tcg_insn_unit insn = OPC_FENCE; @@ -1149,7 +1602,7 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) insn |= 0x02100000; } if (a0 & TCG_MO_ST_ST) { - insn |= 0x02200000; + insn |= 0x01100000; } tcg_out32(s, insn); } @@ -1245,13 +1698,15 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; + + init_setting_vtype(s); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg, - s->page_bits - CPU_TLB_ENTRY_BITS); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); @@ -1267,7 +1722,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI, addr_adj, addr_reg, s_mask - a_mask); } - compare_mask = s->page_mask | a_mask; + compare_mask = TARGET_PAGE_MASK | a_mask; if (compare_mask == sextreg(compare_mask, 0, 12)) { tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask); } else { @@ -1306,7 +1761,9 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; + + init_setting_vtype(s); /* We are expecting alignment max 7, so we can always use andi. */ tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12)); @@ -1376,22 +1833,31 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val, } } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; TCGReg base; ldst = prepare_host_addr(s, &base, addr_reg, oi, true); - tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type); + tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), type); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_NotImplemented, +}; + static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, TCGReg base, MemOp opc) { @@ -1416,8 +1882,8 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val, } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; TCGReg base; @@ -1426,12 +1892,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi)); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out = tgen_qemu_st, +}; + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_NotImplemented, +}; + static const tcg_insn_unit *tb_ret_addr; static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) @@ -1458,6 +1933,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -1475,553 +1955,960 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, flush_idcache_range(jmp_rx, jmp_rw, 4); } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - TCGArg a0 = args[0]; - TCGArg a1 = args[1]; - TCGArg a2 = args[2]; - int c2 = const_args[2]; + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); - break; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI; + tcg_out_opc_imm(s, insn, a0, a1, a2); +} - case INDEX_op_br: - tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); - tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - tcg_out_ldst(s, OPC_LBU, a0, a1, a2); - break; - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - tcg_out_ldst(s, OPC_LB, a0, a1, a2); - break; - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - tcg_out_ldst(s, OPC_LHU, a0, a1, a2); - break; - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - tcg_out_ldst(s, OPC_LH, a0, a1, a2); - break; - case INDEX_op_ld32u_i64: - tcg_out_ldst(s, OPC_LWU, a0, a1, a2); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32s_i64: - tcg_out_ldst(s, OPC_LW, a0, a1, a2); - break; - case INDEX_op_ld_i64: - tcg_out_ldst(s, OPC_LD, a0, a1, a2); - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - tcg_out_ldst(s, OPC_SB, a0, a1, a2); - break; - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - tcg_out_ldst(s, OPC_SH, a0, a1, a2); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_ldst(s, OPC_SW, a0, a1, a2); - break; - case INDEX_op_st_i64: - tcg_out_ldst(s, OPC_SD, a0, a1, a2); - break; +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_add_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); - } else { - tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); - } - break; - case INDEX_op_add_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); - } else { - tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_sub_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); - } else { - tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); - } - break; - case INDEX_op_sub_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); - } else { - tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); - } - break; +static void tcg_out_set_carry(TCGContext *s) +{ + g_assert_not_reached(); +} - case INDEX_op_and_i32: - case INDEX_op_and_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); - } else { - tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); - } - break; +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); +} - case INDEX_op_or_i32: - case INDEX_op_or_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); - } else { - tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); - } - break; +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); +} - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); - } else { - tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2); - } else { - tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2); - } - break; - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2); - } else { - tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2); - } - break; - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2); - } else { - tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2); - } - break; +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2); +} - case INDEX_op_not_i32: - case INDEX_op_not_i64: - tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); - break; +static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags) +{ + return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented; +} - case INDEX_op_neg_i32: - tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); - break; - case INDEX_op_neg_i64: - tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); - break; +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_zbb_rrr, + .out_rrr = tgen_andc, +}; - case INDEX_op_mul_i32: - tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); - break; - case INDEX_op_mul_i64: - tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); - break; +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ; + tcg_out_cltz(s, type, insn, a0, a1, a2, false); +} - case INDEX_op_div_i32: - tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); - break; - case INDEX_op_div_i64: - tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); - break; +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ; + tcg_out_cltz(s, type, insn, a0, a1, a2, true); +} - case INDEX_op_divu_i32: - tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); - break; - case INDEX_op_divu_i64: - tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); - break; +static TCGConstraintSetIndex cset_clzctz(TCGType type, unsigned flags) +{ + return cpuinfo & CPUINFO_ZBB ? C_N1_I2(r, r, rM) : C_NotImplemented; +} - case INDEX_op_rem_i32: - tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); - break; - case INDEX_op_rem_i64: - tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_clzctz, + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; - case INDEX_op_remu_i32: - tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); - break; - case INDEX_op_remu_i64: - tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); - break; +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CPOPW : OPC_CPOP; + tcg_out_opc_imm(s, insn, a0, a1, 0); +} - case INDEX_op_shl_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); - } - break; - case INDEX_op_shl_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); - } - break; +static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) +{ + return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented; +} - case INDEX_op_shr_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); - } - break; - case INDEX_op_shr_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); - } - break; +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctpop, + .out_rr = tgen_ctpop, +}; - case INDEX_op_sar_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); - } - break; - case INDEX_op_sar_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); - } - break; +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ; + tcg_out_cltz(s, type, insn, a0, a1, a2, false); +} - case INDEX_op_rotl_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f); - } else { - tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2); - } - break; - case INDEX_op_rotl_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f); - } else { - tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2); - } - break; +static void tgen_ctzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ; + tcg_out_cltz(s, type, insn, a0, a1, a2, true); +} - case INDEX_op_rotr_i32: - if (c2) { - tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f); - } else { - tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2); - } - break; - case INDEX_op_rotr_i64: - if (c2) { - tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f); - } else { - tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2); - } - break; +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_clzctz, + .out_rrr = tgen_ctz, + .out_rri = tgen_ctzi, +}; - case INDEX_op_bswap64_i64: - tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); - break; - case INDEX_op_bswap32_i32: - a2 = 0; - /* fall through */ - case INDEX_op_bswap32_i64: - tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); - if (a2 & TCG_BSWAP_OZ) { - tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32); - } else { - tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32); +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVW : OPC_DIV; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVUW : OPC_DIVU; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_eqv(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2); +} + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_zbb_rrr, + .out_rrr = tgen_eqv, +}; + +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_MULW : OPC_MUL; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_NotImplemented, +}; + +static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r); +} + +static void tgen_mulsh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); +} + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mulh, + .out_rrr = tgen_mulsh, +}; + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); +} + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mulh, + .out_rrr = tgen_muluh, +}; + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2); +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_zbb_rrr, + .out_rrr = tgen_orc, +}; + +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMW : OPC_REM; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rems, +}; + +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMUW : OPC_REMU; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_remu, +}; + +static TCGConstraintSetIndex cset_rot(TCGType type, unsigned flags) +{ + return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, ri) : C_NotImplemented; +} + +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORW : OPC_ROR; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_rotri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORIW : OPC_RORI; + unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); +} + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_rot, + .out_rrr = tgen_rotr, + .out_rri = tgen_rotri, +}; + +static void tgen_rotl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ROLW : OPC_ROL; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_rotli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_rotri(s, type, a0, a1, -a2); +} + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_rot, + .out_rrr = tgen_rotl, + .out_rri = tgen_rotli, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAW : OPC_SRA; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAIW : OPC_SRAI; + unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLW : OPC_SLL; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLIW : OPC_SLLI; + unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLW : OPC_SRL; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLIW : OPC_SRLI; + unsigned mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_opc_imm(s, insn, a0, a1, a2 & mask); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB; + tcg_out_opc_reg(s, insn, a0, a1, a2); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_NotImplemented, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + g_assert_not_reached(); +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rI), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static TCGConstraintSetIndex cset_bswap(TCGType type, unsigned flags) +{ + return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented; +} + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); + if (flags & TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48); + } else { + tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_bswap, + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); + if (flags & TCG_BSWAP_OZ) { + tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32); + } else { + tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_bswap, + .out_rr = tgen_bswap32, +}; + +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_bswap, + .out_rr = tgen_bswap64, +}; + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_sub(s, type, a0, TCG_REG_ZERO, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_xori(s, type, a0, a1, -1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 16: + tcg_out_ext16u(s, a0, a1); + return; + case 32: + tcg_out_ext32u(s, a0, a1); + return; } - break; - case INDEX_op_bswap16_i64: - case INDEX_op_bswap16_i32: - tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0); - if (a2 & TCG_BSWAP_OZ) { - tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48); - } else { - tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48); + } + if (ofs + len == 32) { + tgen_shri(s, TCG_TYPE_I32, a0, a1, ofs); + return; + } + if (len == 1) { + tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, ofs); + return; + } + g_assert_not_reached(); +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, type, a0, a1); + return; + case 16: + tcg_out_ext16s(s, type, a0, a1); + return; + case 32: + tcg_out_ext32s(s, a0, a1); + return; } - break; + } else if (ofs + len == 32) { + tgen_sari(s, TCG_TYPE_I32, a0, a1, ofs); + return; + } + g_assert_not_reached(); +} - case INDEX_op_ctpop_i32: - tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0); - break; - case INDEX_op_ctpop_i64: - tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0); - break; +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; - case INDEX_op_clz_i32: - tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2); - break; - case INDEX_op_clz_i64: - tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2); - break; - case INDEX_op_ctz_i32: - tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2); - break; - case INDEX_op_ctz_i64: - tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2); - break; +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; - case INDEX_op_add2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], false, true); - break; - case INDEX_op_add2_i64: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], false, false); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], true, true); - break; - case INDEX_op_sub2_i64: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], - const_args[4], const_args[5], true, false); - break; +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LBU, dest, base, offset); +} - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); - break; +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - tcg_out_setcond(s, args[3], a0, a1, a2, c2); - break; +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LB, dest, base, offset); +} - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - tcg_out_negsetcond(s, args[3], a0, a1, a2, c2); - break; +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - tcg_out_movcond(s, args[5], a0, a1, a2, c2, - args[3], const_args[3], args[4], const_args[4]); - break; +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LHU, dest, base, offset); +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); - break; - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LH, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LWU, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_LW, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_SB, data, base, offset); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st8_r, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, OPC_SH, data, base, offset); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st16_r, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tcg_out_st, +}; + + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0, a1, a2; + int c2; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + c2 = const_args[2]; + + switch (opc) { + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); break; - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); break; - - case INDEX_op_extrh_i64_i32: - tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); break; - - case INDEX_op_mulsh_i32: - case INDEX_op_mulsh_i64: - tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); + case INDEX_op_add_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2); break; - - case INDEX_op_muluh_i32: - case INDEX_op_muluh_i64: - tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); + case INDEX_op_sub_vec: + set_vtype_len_sew(s, type, vece); + if (const_args[1]) { + tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1); + } else { + tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2); + } break; + case INDEX_op_and_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2); + break; + case INDEX_op_or_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2); + break; + case INDEX_op_xor_vec: + set_vtype_len(s, type); + tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2); + break; + case INDEX_op_not_vec: + set_vtype_len(s, type); + tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1); + break; + case INDEX_op_neg_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0); + break; + case INDEX_op_mul_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2); + break; + case INDEX_op_ssadd_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2); + break; + case INDEX_op_sssub_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2); + break; + case INDEX_op_usadd_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2); + break; + case INDEX_op_ussub_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2); + break; + case INDEX_op_smax_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2); + break; + case INDEX_op_smin_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2); + break; + case INDEX_op_umax_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2); + break; + case INDEX_op_umin_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2); + break; + case INDEX_op_shls_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2); + break; + case INDEX_op_shrs_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2); + break; + case INDEX_op_sars_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2); + break; + case INDEX_op_shlv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); + break; + case INDEX_op_shrv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); + break; + case INDEX_op_sarv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2); + break; + case INDEX_op_shli_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2); + break; + case INDEX_op_shri_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2); + break; + case INDEX_op_sari_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2); + break; + case INDEX_op_rotli_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2); + tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, + -a2 & ((8 << vece) - 1)); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotls_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2); + tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotlv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); + tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0); + tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_rotrv_vec: + set_vtype_len_sew(s, type, vece); + tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0); + tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0); + tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2); + tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0); + break; + case INDEX_op_cmp_vec: + tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2, + -1, true, 0, true); + break; + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2, + args[3], const_args[3], args[4], const_args[4]); + break; + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + g_assert_not_reached(); +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_not_vec: + case INDEX_op_neg_vec: + case INDEX_op_mul_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_shri_vec: + case INDEX_op_shli_vec: + case INDEX_op_sari_vec: + case INDEX_op_rotls_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: + case INDEX_op_rotli_vec: + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + return 1; default: - g_assert_not_reached(); + return 0; } } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_not_i32: - case INDEX_op_neg_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld_i64: - case INDEX_op_not_i64: - case INDEX_op_neg_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - case INDEX_op_ext_i32_i64: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_ctpop_i32: - case INDEX_op_ctpop_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(rZ, r); - - case INDEX_op_add_i32: - case INDEX_op_and_i32: - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - case INDEX_op_add_i64: - case INDEX_op_and_i64: - case INDEX_op_or_i64: - case INDEX_op_xor_i64: - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rI); - - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - return C_O1_I2(r, r, rJ); - - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - return C_O1_I2(r, rZ, rN); - - case INDEX_op_mul_i32: - case INDEX_op_mulsh_i32: - case INDEX_op_muluh_i32: - case INDEX_op_div_i32: - case INDEX_op_divu_i32: - case INDEX_op_rem_i32: - case INDEX_op_remu_i32: - case INDEX_op_mul_i64: - case INDEX_op_mulsh_i64: - case INDEX_op_muluh_i64: - case INDEX_op_div_i64: - case INDEX_op_divu_i64: - case INDEX_op_rem_i64: - case INDEX_op_remu_i64: - return C_O1_I2(r, rZ, rZ); - - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - case INDEX_op_rotl_i32: - case INDEX_op_rotr_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i64: - return C_O1_I2(r, r, ri); - - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i32: - case INDEX_op_ctz_i64: - return C_N1_I2(r, r, rM); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(rZ, rZ); - - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rI, rM, rM); - - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - return C_O2_I4(r, r, rZ, rZ, rM, rM); - - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - return C_O1_I1(r, r); - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - return C_O0_I2(rZ, r); - + case INDEX_op_st_vec: + return C_O0_I2(v, r); + case INDEX_op_dup_vec: + case INDEX_op_dupm_vec: + case INDEX_op_ld_vec: + return C_O1_I1(v, r); + case INDEX_op_neg_vec: + case INDEX_op_not_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + case INDEX_op_rotli_vec: + return C_O1_I1(v, v); + case INDEX_op_add_vec: + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + return C_O1_I2(v, v, vK); + case INDEX_op_sub_vec: + return C_O1_I2(v, vK, v); + case INDEX_op_mul_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: + return C_O1_I2(v, v, v); + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + case INDEX_op_rotls_vec: + return C_O1_I2(v, v, r); + case INDEX_op_cmp_vec: + return C_O1_I2(v, v, vL); + case INDEX_op_cmpsel_vec: + return C_O1_I4(v, v, vL, vK, vK); default: - g_assert_not_reached(); + return C_NotImplemented; } } @@ -2093,7 +2980,65 @@ static void tcg_target_qemu_prologue(TCGContext *s) static void tcg_out_tb_start(TCGContext *s) { - /* nothing to do */ + init_setting_vtype(s); +} + +static bool vtype_check(unsigned vtype) +{ + unsigned long tmp; + + /* vsetvl tmp, zero, vtype */ + asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype)); + return tmp != 0; +} + +static void probe_frac_lmul_1(TCGType type, MemOp vsew) +{ + VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew]; + unsigned avl = tcg_type_size(type) >> vsew; + int lmul = type - riscv_lg2_vlenb; + unsigned vtype = encode_vtype(true, true, vsew, lmul & 7); + bool lmul_eq_avl = true; + + /* Guaranteed by Zve64x. */ + assert(lmul < 3); + + /* + * For LMUL < -3, the host vector size is so large that TYPE + * is smaller than the minimum 1/8 fraction. + * + * For other fractional LMUL settings, implementations must + * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive. + * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32, + * but e64 may not be supported. In other words, the hardware only + * guarantees SEW_MIN <= SEW <= LMUL * ELEN. Check. + */ + if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) { + vtype = encode_vtype(true, true, vsew, VLMUL_M1); + lmul_eq_avl = false; + } + + if (avl < 32) { + p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype); + } else if (lmul_eq_avl) { + /* rd != 0 and rs1 == 0 uses vlmax */ + p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype); + } else { + p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl); + p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype); + } +} + +static void probe_frac_lmul(void) +{ + /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */ + QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3); + + for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) { + for (MemOp e = MO_8; e <= MO_64; e++) { + probe_frac_lmul_1(t, e); + } + } } static void tcg_target_init(TCGContext *s) @@ -2101,7 +3046,7 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; - tcg_target_call_clobber_regs = -1u; + tcg_target_call_clobber_regs = -1; tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); @@ -2123,6 +3068,32 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); + + if (cpuinfo & CPUINFO_ZVE64X) { + switch (riscv_lg2_vlenb) { + case TCG_TYPE_V64: + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS; + s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS); + break; + case TCG_TYPE_V128: + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS; + s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS); + break; + default: + /* Guaranteed by Zve64x. */ + tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256); + tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS; + tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS; + break; + } + tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0); + probe_frac_lmul(); + } } typedef struct { diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index 1a347eaf6ec..6dc77d944ba 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -25,45 +25,29 @@ #ifndef RISCV_TCG_TARGET_H #define RISCV_TCG_TARGET_H -#include "host/cpuinfo.h" - #define TCG_TARGET_INSN_UNIT_SIZE 4 -#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_NB_REGS 64 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) typedef enum { - TCG_REG_ZERO, - TCG_REG_RA, - TCG_REG_SP, - TCG_REG_GP, - TCG_REG_TP, - TCG_REG_T0, - TCG_REG_T1, - TCG_REG_T2, - TCG_REG_S0, - TCG_REG_S1, - TCG_REG_A0, - TCG_REG_A1, - TCG_REG_A2, - TCG_REG_A3, - TCG_REG_A4, - TCG_REG_A5, - TCG_REG_A6, - TCG_REG_A7, - TCG_REG_S2, - TCG_REG_S3, - TCG_REG_S4, - TCG_REG_S5, - TCG_REG_S6, - TCG_REG_S7, - TCG_REG_S8, - TCG_REG_S9, - TCG_REG_S10, - TCG_REG_S11, - TCG_REG_T3, - TCG_REG_T4, - TCG_REG_T5, - TCG_REG_T6, + TCG_REG_ZERO, TCG_REG_RA, TCG_REG_SP, TCG_REG_GP, + TCG_REG_TP, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, + TCG_REG_S0, TCG_REG_S1, TCG_REG_A0, TCG_REG_A1, + TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, + TCG_REG_A6, TCG_REG_A7, TCG_REG_S2, TCG_REG_S3, + TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, + TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, + TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, + + /* RISC-V V Extension registers */ + TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, /* aliases */ TCG_AREG0 = TCG_REG_S0, @@ -73,92 +57,6 @@ typedef enum { TCG_REG_TMP2 = TCG_REG_T4, } TCGReg; -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_SP -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - -/* optional instructions */ -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 1 -#define TCG_TARGET_HAS_div2_i32 0 -#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_deposit_i32 0 -#define TCG_TARGET_HAS_extract_i32 0 -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_andc_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_brcond2 1 -#define TCG_TARGET_HAS_setcond2 1 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 1 -#define TCG_TARGET_HAS_div2_i64 0 -#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_deposit_i64 0 -#define TCG_TARGET_HAS_extract_i64 0 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_extr_i64_i32 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_andc_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB) -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 1 -#define TCG_TARGET_HAS_mulsh_i64 1 - -#define TCG_TARGET_HAS_qemu_ldst_i128 0 - -#define TCG_TARGET_HAS_tst 0 - -#define TCG_TARGET_DEFAULT_MO (0) - -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS +#define TCG_REG_ZERO TCG_REG_ZERO #endif diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h index f75955eaa87..f67fd7898ed 100644 --- a/tcg/s390x/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -22,6 +22,7 @@ C_O1_I1(r, r) C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I1(v, vr) +C_O1_I2(r, 0, r) C_O1_I2(r, 0, ri) C_O1_I2(r, 0, rI) C_O1_I2(r, 0, rJ) @@ -31,18 +32,16 @@ C_O1_I2(r, r, rC) C_O1_I2(r, r, rI) C_O1_I2(r, r, rJ) C_O1_I2(r, r, rK) -C_O1_I2(r, r, rKR) -C_O1_I2(r, r, rNK) C_O1_I2(r, r, rNKR) +C_O1_I2(r, r, rUV) C_O1_I2(r, rZ, r) C_O1_I2(v, v, r) C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) -C_O1_I4(r, r, ri, rI, r) +C_O1_I4(v, v, v, vZ, v) +C_O1_I4(v, v, v, vZM, v) C_O1_I4(r, r, rC, rI, r) C_O2_I1(o, m, r) C_O2_I2(o, m, 0, r) C_O2_I2(o, m, r, r) C_O2_I3(o, m, 0, 1, r) -C_N1_O1_I4(r, r, 0, 1, ri, r) -C_N1_O1_I4(r, r, 0, 1, rJU, r) diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h index 745f6c0df51..636a38a168a 100644 --- a/tcg/s390x/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -20,7 +20,9 @@ CONST('C', TCG_CT_CONST_CMP) CONST('I', TCG_CT_CONST_S16) CONST('J', TCG_CT_CONST_S32) CONST('K', TCG_CT_CONST_P32) +CONST('M', TCG_CT_CONST_M1) CONST('N', TCG_CT_CONST_INV) CONST('R', TCG_CT_CONST_INVRISBG) CONST('U', TCG_CT_CONST_U32) +CONST('V', TCG_CT_CONST_N32) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h new file mode 100644 index 00000000000..0aeb5ba01a9 --- /dev/null +++ b/tcg/s390x/tcg-target-has.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2009 Ulrich Hecht + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +/* Facilities required for proper operation; checked at startup. */ + +#define FACILITY_ZARCH_ACTIVE 2 +#define FACILITY_LONG_DISP 18 +#define FACILITY_EXT_IMM 21 +#define FACILITY_GEN_INST_EXT 34 +#define FACILITY_45 45 + +/* Facilities that are checked at runtime. */ + +#define FACILITY_LOAD_ON_COND2 53 +#define FACILITY_MISC_INSN_EXT2 58 +#define FACILITY_MISC_INSN_EXT3 61 +#define FACILITY_VECTOR 129 +#define FACILITY_VECTOR_ENH1 135 + +extern uint64_t s390_facilities[3]; + +#define HAVE_FACILITY(X) \ + ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1) + +/* optional instructions */ +#define TCG_TARGET_HAS_extr_i64_i32 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 1 +#define TCG_TARGET_HAS_tst 1 + +#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_nor_vec 1 +#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 1 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 1 +#define TCG_TARGET_HAS_tst_vec 0 + +#define TCG_TARGET_extract_valid(type, ofs, len) 1 +#define TCG_TARGET_deposit_valid(type, ofs, len) 1 + +static inline bool +tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + case 16: + return true; + case 32: + return type == TCG_TYPE_I64; + } + } + return false; +} +#define TCG_TARGET_sextract_valid tcg_target_sextract_valid + +#endif diff --git a/tcg/s390x/tcg-target-mo.h b/tcg/s390x/tcg-target-mo.h new file mode 100644 index 00000000000..962295ed513 --- /dev/null +++ b/tcg/s390x/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2009 Ulrich Hecht + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + +#endif diff --git a/tcg/s390x/tcg-target-opc.h.inc b/tcg/s390x/tcg-target-opc.h.inc new file mode 100644 index 00000000000..61237b39cd2 --- /dev/null +++ b/tcg/s390x/tcg-target-opc.h.inc @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ +DEF(s390_vuph_vec, 1, 1, 0, TCG_OPF_VECTOR) +DEF(s390_vupl_vec, 1, 1, 0, TCG_OPF_VECTOR) +DEF(s390_vpks_vec, 1, 2, 0, TCG_OPF_VECTOR) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index ad587325fc8..84a9e73a46e 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -24,10 +24,16 @@ * THE SOFTWARE. */ -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" #include "elf.h" +/* Used for function call generation. */ +#define TCG_TARGET_STACK_ALIGN 8 +#define TCG_TARGET_CALL_STACK_OFFSET 160 +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF + #define TCG_CT_CONST_S16 (1 << 8) #define TCG_CT_CONST_S32 (1 << 9) #define TCG_CT_CONST_U32 (1 << 10) @@ -36,6 +42,8 @@ #define TCG_CT_CONST_INV (1 << 13) #define TCG_CT_CONST_INVRISBG (1 << 14) #define TCG_CT_CONST_CMP (1 << 15) +#define TCG_CT_CONST_M1 (1 << 16) +#define TCG_CT_CONST_N32 (1 << 17) #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -46,6 +54,7 @@ /* A scratch register that may be be used throughout the backend. */ #define TCG_TMP0 TCG_REG_R1 +#define TCG_VEC_TMP0 TCG_REG_V31 #define TCG_GUEST_BASE_REG TCG_REG_R13 @@ -126,6 +135,9 @@ typedef enum S390Opcode { RIEc_CLGIJ = 0xec7d, RIEc_CLIJ = 0xec7f, + RIEd_ALHSIK = 0xecda, + RIEd_ALGHSIK = 0xecdb, + RIEf_RISBG = 0xec55, RIEg_LOCGHI = 0xec46, @@ -164,6 +176,8 @@ typedef enum S390Opcode { RRE_SLBGR = 0xb989, RRE_XGR = 0xb982, + RRFa_ALRK = 0xb9fa, + RRFa_ALGRK = 0xb9ea, RRFa_MGRK = 0xb9ec, RRFa_MSRKC = 0xb9fd, RRFa_MSGRKC = 0xb9ed, @@ -563,6 +577,20 @@ static bool tcg_target_const_match(int64_t val, int ct, } if (ct & TCG_CT_CONST_CMP) { + if (is_tst_cond(cond)) { + if (is_const_p16(uval) >= 0) { + return true; /* TMxx */ + } + if (risbg_mask(uval)) { + return true; /* RISBG */ + } + return false; + } + + if (type == TCG_TYPE_I32) { + return true; + } + switch (cond) { case TCG_COND_EQ: case TCG_COND_NE: @@ -582,13 +610,7 @@ static bool tcg_target_const_match(int64_t val, int ct, break; case TCG_COND_TSTNE: case TCG_COND_TSTEQ: - if (is_const_p16(uval) >= 0) { - return true; /* TMxx */ - } - if (risbg_mask(uval)) { - return true; /* RISBG */ - } - break; + /* checked above, fallthru */ default: g_assert_not_reached(); } @@ -597,7 +619,10 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { return true; } - if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + if ((ct & TCG_CT_CONST_U32) && uval <= UINT32_MAX) { + return true; + } + if ((ct & TCG_CT_CONST_N32) && -uval <= UINT32_MAX) { return true; } if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { @@ -606,6 +631,9 @@ static bool tcg_target_const_match(int64_t val, int ct, if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return true; } + if ((ct & TCG_CT_CONST_M1) && val == -1) { + return true; + } if (ct & TCG_CT_CONST_INV) { val = ~val; @@ -657,8 +685,16 @@ static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); } +static void tcg_out_insn_RIEd(TCGContext *s, S390Opcode op, + TCGReg r1, TCGReg r3, int i2) +{ + tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3); + tcg_out16(s, i2); + tcg_out16(s, op & 0xff); +} + static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1, - int i2, int m3) + int i2, int m3) { tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3); tcg_out32(s, (i2 << 16) | (op & 0xff)); @@ -932,25 +968,32 @@ static void tcg_out_movi(TCGContext *s, TCGType type, if (pc_off == (int32_t)pc_off) { tcg_out_insn(s, RIL, LARL, ret, pc_off); if (sval & 1) { - tcg_out_insn(s, RI, AGHI, ret, 1); + tcg_out_insn(s, RX, LA, ret, ret, TCG_REG_NONE, 1); } return; } - /* Otherwise, load it by parts. */ - i = is_const_p16((uint32_t)uval); - if (i >= 0) { - tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16)); - } else { - tcg_out_insn(s, RIL, LLILF, ret, uval); - } - uval >>= 32; - i = is_const_p16(uval); - if (i >= 0) { - tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16)); - } else { - tcg_out_insn(s, RIL, OIHF, ret, uval); + if (!s->carry_live) { + /* Load by parts, at most 2 instructions. */ + i = is_const_p16((uint32_t)uval); + if (i >= 0) { + tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16)); + } else { + tcg_out_insn(s, RIL, LLILF, ret, uval); + } + uval >>= 32; + i = is_const_p16(uval); + if (i >= 0) { + tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16)); + } else { + tcg_out_insn(s, RIL, OIHF, ret, uval); + } + return; } + + /* Otherwise, stuff it in the constant pool. */ + tcg_out_insn(s, RIL, LGRL, ret, 0); + new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); } /* Emit a load/store type instruction. Inputs are: @@ -1351,9 +1394,9 @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc); } -static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, - TCGReg dest, TCGReg c1, TCGArg c2, - bool c2const, bool neg) +static void tgen_setcond_int(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg c1, TCGArg c2, + bool c2const, bool neg) { int cc; @@ -1445,6 +1488,42 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc); } +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, false); +} + +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rC), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest, TCGArg v3, int v3const, TCGReg v4, int cc, int inv_cc) @@ -1485,9 +1564,9 @@ static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest, tcg_out_insn(s, RRFc, LOCGR, dest, src, cc); } -static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, - TCGReg c1, TCGArg c2, int c2const, - TCGArg v3, int v3const, TCGReg v4) +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, + TCGReg dest, TCGReg c1, TCGArg c2, bool c2const, + TCGArg v3, bool v3const, TCGArg v4, bool v4const) { int cc, inv_cc; @@ -1495,66 +1574,96 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc); } -static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, - TCGArg a2, int a2const) -{ - /* Since this sets both R and R+1, we have no choice but to store the - result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */ - QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1); - tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1); +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rC, rI, r), + .out = tgen_movcond, +}; - if (a2const && a2 == 64) { - tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0); - return; - } +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + unsigned lsb = (63 - ofs); + unsigned msb = lsb - (len - 1); /* - * Conditions from FLOGR are: - * 2 -> one bit found - * 8 -> no one bit found + * Since we can't support "0Z" as a constraint, we allow a1 in + * any register. Fix things up as if a matching constraint. */ - tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2); + if (a0 != a1) { + if (a0 == a2) { + tcg_out_mov(s, type, TCG_TMP0, a2); + a2 = TCG_TMP0; + } + tcg_out_mov(s, type, a0, a1); + } + tcg_out_risbg(s, a0, a2, msb, lsb, ofs, false); } -static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2, + unsigned ofs, unsigned len) { - /* With MIE3, and bit 0 of m4 set, we get the complete result. */ - if (HAVE_FACILITY(MISC_INSN_EXT3)) { - if (type == TCG_TYPE_I32) { + unsigned lsb = (63 - ofs); + unsigned msb = lsb - (len - 1); + tcg_out_risbg(s, a0, a2, msb, lsb, ofs, true); +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, rZ, r), + .out_rrr = tgen_deposit, + .out_rzr = tgen_depositz, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg dest, + TCGReg src, unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8u(s, dest, src); + return; + case 16: + tcg_out_ext16u(s, dest, src); + return; + case 32: tcg_out_ext32u(s, dest, src); - src = dest; + return; } - tcg_out_insn(s, RRFc, POPCNT, dest, src, 8); - return; } + tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1); +} - /* Without MIE3, each byte gets the count of bits for the byte. */ - tcg_out_insn(s, RRFc, POPCNT, dest, src, 0); +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; - /* Multiply to sum each byte at the top of the word. */ - if (type == TCG_TYPE_I32) { - tcg_out_insn(s, RIL, MSFI, dest, 0x01010101); - tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24); - } else { - tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull); - tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0); - tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56); +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg dest, + TCGReg src, unsigned ofs, unsigned len) +{ + if (ofs == 0) { + switch (len) { + case 8: + tcg_out_ext8s(s, TCG_TYPE_REG, dest, src); + return; + case 16: + tcg_out_ext16s(s, TCG_TYPE_REG, dest, src); + return; + case 32: + tcg_out_ext32s(s, dest, src); + return; + } } + g_assert_not_reached(); } -static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, - int ofs, int len, int z) -{ - int lsb = (63 - ofs); - int msb = lsb - (len - 1); - tcg_out_risbg(s, dest, src, msb, lsb, ofs, z); -} +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; -static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src, - int ofs, int len) -{ - tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1); -} +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest) { @@ -1580,6 +1689,11 @@ static void tgen_branch(TCGContext *s, int cc, TCGLabel *l) } } +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tgen_branch(s, S390_CC_ALWAYS, l); +} + static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, TCGReg r1, TCGReg r2, TCGLabel *l) { @@ -1653,6 +1767,24 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, tgen_branch(s, cc, l); } +static void tgen_brcondr(TCGContext *s, TCGType type, TCGCond c, + TCGReg a0, TCGReg a1, TCGLabel *l) +{ + tgen_brcond(s, type, c, a0, a1, false, l); +} + +static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond c, + TCGReg a0, tcg_target_long a1, TCGLabel *l) +{ + tgen_brcond(s, type, c, a0, a1, true, l); +} + +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rC), + .out_rr = tgen_brcondr, + .out_ri = tgen_brcondi, +}; + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest) { ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1; @@ -1869,10 +2001,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE, - s->page_bits - CPU_TLB_ENTRY_BITS); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off); tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off); @@ -1884,7 +2016,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * byte of the access. */ a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask); - tlb_mask = (uint64_t)s->page_mask | a_mask; + tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; if (a_off == 0) { tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask); } else { @@ -1923,7 +2055,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; tcg_debug_assert(a_mask <= 0xffff); tcg_out_insn(s, RI, TMLL, addr_reg, a_mask); @@ -1949,8 +2081,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, return ldst; } -static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; @@ -1959,14 +2091,19 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } -static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - MemOpIdx oi, TCGType data_type) +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg, + TCGReg addr_reg, MemOpIdx oi) { TCGLabelQemuLdst *ldst; HostAddress h; @@ -1975,12 +2112,17 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data_reg; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(r, r), + .out = tgen_qemu_st, +}; + static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, TCGReg addr_reg, MemOpIdx oi, bool is_ld) { @@ -2055,6 +2197,28 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, } } +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_O2_I1(o, m, r), + .out = tgen_qemu_ld2, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr_reg, MemOpIdx oi) +{ + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_O0_I3(o, m, r), + .out = tgen_qemu_st2, +}; + static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { /* Reuse the zeroing that exists for goto_ptr. */ @@ -2081,6 +2245,11 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { @@ -2094,664 +2263,903 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, /* no need to flush icache explicitly */ } -# define OP_32_64(x) \ - case glue(glue(INDEX_op_,x),_i32): \ - case glue(glue(INDEX_op_,x),_i64) -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - S390Opcode op, op2; - TCGArg a0, a1, a2; - - switch (opc) { - case INDEX_op_goto_ptr: - a0 = args[0]; - tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0); - break; - - OP_32_64(ld8u): - /* ??? LLC (RXY format) is only present with the extended-immediate - facility, whereas LLGC is always present. */ - tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]); - break; - - OP_32_64(ld8s): - /* ??? LB is no smaller than LGB, so no point to using it. */ - tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]); - break; - - OP_32_64(ld16u): - /* ??? LLH (RXY format) is only present with the extended-immediate - facility, whereas LLGH is always present. */ - tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]); - break; - - case INDEX_op_ld16s_i32: - tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]); - break; - - case INDEX_op_ld_i32: - tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); - break; - - OP_32_64(st8): - tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1], - TCG_REG_NONE, args[2]); - break; - - OP_32_64(st16): - tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1], - TCG_REG_NONE, args[2]); - break; - - case INDEX_op_st_i32: - tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); - break; + if (a0 != a1) { + tcg_out_insn(s, RX, LA, a0, a1, a2, 0); + } else if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, AR, a0, a2); + } else { + tcg_out_insn(s, RRE, AGR, a0, a2); + } +} - case INDEX_op_add_i32: - a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; - if (const_args[2]) { - do_addi_32: - if (a0 == a1) { - if (a2 == (int16_t)a2) { - tcg_out_insn(s, RI, AHI, a0, a2); - break; - } +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a0 == a1) { + if (type == TCG_TYPE_I32) { + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, AHI, a0, a2); + } else { tcg_out_insn(s, RIL, AFI, a0, a2); - break; } - tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); - } else if (a0 == a1) { - tcg_out_insn(s, RR, AR, a0, a2); - } else { - tcg_out_insn(s, RX, LA, a0, a1, a2, 0); + return; } - break; - case INDEX_op_sub_i32: - a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; - if (const_args[2]) { - a2 = -a2; - goto do_addi_32; - } else if (a0 == a1) { - tcg_out_insn(s, RR, SR, a0, a2); - } else { - tcg_out_insn(s, RRFa, SRK, a0, a1, a2); + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, AGHI, a0, a2); + return; } - break; - - case INDEX_op_and_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_andi(s, TCG_TYPE_I32, a0, a2); - } else if (a0 == a1) { - tcg_out_insn(s, RR, NR, a0, a2); - } else { - tcg_out_insn(s, RRFa, NRK, a0, a1, a2); + if (a2 == (int32_t)a2) { + tcg_out_insn(s, RIL, AGFI, a0, a2); + return; } - break; - case INDEX_op_or_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_ori(s, a0, a2); - } else if (a0 == a1) { - tcg_out_insn(s, RR, OR, a0, a2); - } else { - tcg_out_insn(s, RRFa, ORK, a0, a1, a2); + if (a2 == (uint32_t)a2) { + tcg_out_insn(s, RIL, ALGFI, a0, a2); + return; } - break; - case INDEX_op_xor_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tcg_out_insn(s, RIL, XILF, a0, a2); - } else if (a0 == a1) { - tcg_out_insn(s, RR, XR, args[0], args[2]); - } else { - tcg_out_insn(s, RRFa, XRK, a0, a1, a2); + if (-a2 == (uint32_t)-a2) { + tcg_out_insn(s, RIL, SLGFI, a0, -a2); + return; } - break; + } + tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); +} - case INDEX_op_andc_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2); - } else { - tcg_out_insn(s, RRFa, NCRK, a0, a1, a2); - } - break; - case INDEX_op_orc_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tgen_ori(s, a0, (uint32_t)~a2); - } else { - tcg_out_insn(s, RRFa, OCRK, a0, a1, a2); - } - break; - case INDEX_op_eqv_i32: - a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - tcg_out_insn(s, RIL, XILF, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NXRK, a0, a1, a2); - } - break; - case INDEX_op_nand_i32: - tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]); - break; - case INDEX_op_nor_i32: - tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; - case INDEX_op_neg_i32: - tcg_out_insn(s, RR, LCR, args[0], args[1]); - break; - case INDEX_op_not_i32: - tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]); - break; +static void tgen_addco_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, ALGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, ALR, a0, a2); + } else { + tcg_out_insn(s, RRFa, ALRK, a0, a1, a2); + } +} - case INDEX_op_mul_i32: - a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I32, a0, a1); - if (a2 == (int16_t)a2) { - tcg_out_insn(s, RI, MHI, a0, a2); - } else { - tcg_out_insn(s, RIL, MSFI, a0, a2); - } - } else if (a0 == a1) { - tcg_out_insn(s, RRE, MSR, a0, a2); +static void tgen_addco_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (a2 == (int16_t)a2) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIEd, ALHSIK, a0, a1, a2); } else { - tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2); + tcg_out_insn(s, RIEd, ALGHSIK, a0, a1, a2); } - break; + return; + } - case INDEX_op_div2_i32: - tcg_debug_assert(args[0] == args[2]); - tcg_debug_assert(args[1] == args[3]); - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RR, DR, args[1], args[4]); - break; - case INDEX_op_divu2_i32: - tcg_debug_assert(args[0] == args[2]); - tcg_debug_assert(args[1] == args[3]); - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RRE, DLR, args[1], args[4]); - break; + tcg_out_mov(s, type, a0, a1); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, ALFI, a0, a2); + } else if (a2 >= 0) { + tcg_out_insn(s, RIL, ALGFI, a0, a2); + } else { + tcg_out_insn(s, RIL, SLGFI, a0, -a2); + } +} - case INDEX_op_shl_i32: - op = RS_SLL; - op2 = RSY_SLLK; - do_shift32: - a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; - if (a0 == a1) { - if (const_args[2]) { - tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2); - } else { - tcg_out_sh32(s, op, a0, a2, 0); - } - } else { - /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */ - if (const_args[2]) { - tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2); - } else { - tcg_out_sh64(s, op2, a0, a1, a2, 0); - } - } - break; - case INDEX_op_shr_i32: - op = RS_SRL; - op2 = RSY_SRLK; - goto do_shift32; - case INDEX_op_sar_i32: - op = RS_SRA; - op2 = RSY_SRAK; - goto do_shift32; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_O1_I2(r, r, rUV), + .out_rrr = tgen_addco_rrr, + .out_rri = tgen_addco_rri, +}; - case INDEX_op_rotl_i32: - /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */ - if (const_args[2]) { - tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]); - } else { - tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0); - } - break; - case INDEX_op_rotr_i32: - if (const_args[2]) { - tcg_out_sh64(s, RSY_RLL, args[0], args[1], - TCG_REG_NONE, (32 - args[2]) & 31); - } else { - tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); - tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0); - } - break; +static void tgen_addcio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRE, ALCR, a0, a2); + } else { + tcg_out_insn(s, RRE, ALCGR, a0, a2); + } +} - case INDEX_op_bswap16_i32: - a0 = args[0], a1 = args[1], a2 = args[2]; - tcg_out_insn(s, RRE, LRVR, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16); - } else { - tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16); - } - break; - case INDEX_op_bswap16_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - tcg_out_insn(s, RRE, LRVGR, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48); - } else { - tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48); - } - break; +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_O1_I2(r, 0, r), + .out_rrr = tgen_addcio, +}; - case INDEX_op_bswap32_i32: - tcg_out_insn(s, RRE, LRVR, args[0], args[1]); - break; - case INDEX_op_bswap32_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - tcg_out_insn(s, RRE, LRVR, a0, a1); - if (a2 & TCG_BSWAP_OS) { - tcg_out_ext32s(s, a0, a0); - } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { - tcg_out_ext32u(s, a0, a0); +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_O1_I2(r, 0, r), + .out_rrr = tgen_addcio, +}; + +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out_insn(s, RR, SLR, TCG_REG_R0, TCG_REG_R0); /* cc = 2 */ +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, NR, a0, a2); + } else { + tcg_out_insn(s, RRFa, NRK, a0, a1, a2); + } +} + +static void tgen_andi_3(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + tgen_andi(s, type, a0, a2); +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rNKR), + .out_rrr = tgen_and, + .out_rri = tgen_andi_3, +}; + +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NCRK, a0, a1, a2); + } else { + tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2); + } +} + +static TCGConstraintSetIndex cset_misc3_rrr(TCGType type, unsigned flags) +{ + return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I2(r, r, r) : C_NotImplemented; +} + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_misc3_rrr, + .out_rrr = tgen_andc, +}; + +static void tgen_clz_int(TCGContext *s, TCGReg dest, TCGReg a1, + TCGArg a2, int a2const) +{ + /* + * Since this sets both R and R+1, we have no choice but to store the + * result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. + */ + QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1); + tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1); + + if (a2const && a2 == 64) { + tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0); + return; + } + + /* + * Conditions from FLOGR are: + * 2 -> one bit found + * 8 -> no one bit found + */ + tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2); +} + +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_clz_int(s, a0, a1, a2, false); +} + +static void tgen_clzi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_clz_int(s, a0, a1, a2, true); +} + +static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_I64 ? C_O1_I2(r, r, rI) : C_NotImplemented; +} + +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_clz, + .out_rrr = tgen_clz, + .out_rri = tgen_clzi, +}; + +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +{ + /* With MIE3, and bit 0 of m4 set, we get the complete result. */ + if (HAVE_FACILITY(MISC_INSN_EXT3)) { + if (type == TCG_TYPE_I32) { + tcg_out_ext32u(s, dest, src); + src = dest; } - break; + tcg_out_insn(s, RRFc, POPCNT, dest, src, 8); + return; + } - case INDEX_op_add2_i32: - if (const_args[4]) { - tcg_out_insn(s, RIL, ALFI, args[0], args[4]); + /* Without MIE3, each byte gets the count of bits for the byte. */ + tcg_out_insn(s, RRFc, POPCNT, dest, src, 0); + + /* Multiply to sum each byte at the top of the word. */ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, MSFI, dest, 0x01010101); + tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull); + tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0); + tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56); + } +} + +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_ctpop, +}; + +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divs2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a4) +{ + tcg_debug_assert((a1 & 1) == 0); + tcg_debug_assert(a0 == a1 + 1); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, DR, a1, a4); + } else { + /* + * TODO: Move the sign-extend of the numerator from a2 into a3 + * into the tcg backend, instead of in early expansion. It is + * required for 32-bit DR, but not 64-bit DSGR. + */ + tcg_out_insn(s, RRE, DSGR, a1, a4); + } +} + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_O2_I3(o, m, 0, 1, r), + .out_rr01r = tgen_divs2, +}; + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a4) +{ + tcg_debug_assert((a1 & 1) == 0); + tcg_debug_assert(a0 == a1 + 1); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRE, DLR, a1, a4); + } else { + tcg_out_insn(s, RRE, DLGR, a1, a4); + } +} + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_O2_I3(o, m, 0, 1, r), + .out_rr01r = tgen_divu2, +}; + +static void tgen_eqv(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NXRK, a0, a1, a2); + } else { + tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_misc3_rrr, + .out_rrr = tgen_eqv, +}; + +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_sh64(s, RSY_SRLG, a0, a1, TCG_REG_NONE, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + if (a0 == a1) { + tcg_out_insn(s, RRE, MSR, a0, a2); } else { - tcg_out_insn(s, RR, ALR, args[0], args[4]); + tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2); } - tcg_out_insn(s, RRE, ALCR, args[1], args[5]); - break; - case INDEX_op_sub2_i32: - if (const_args[4]) { - tcg_out_insn(s, RIL, SLFI, args[0], args[4]); + } else { + if (a0 == a1) { + tcg_out_insn(s, RRE, MSGR, a0, a2); } else { - tcg_out_insn(s, RR, SLR, args[0], args[4]); + tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2); } - tcg_out_insn(s, RRE, SLBR, args[1], args[5]); - break; + } +} - case INDEX_op_br: - tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0])); - break; +static void tgen_muli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + if (type == TCG_TYPE_I32) { + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, MHI, a0, a2); + } else { + tcg_out_insn(s, RIL, MSFI, a0, a2); + } + } else { + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, MGHI, a0, a2); + } else { + tcg_out_insn(s, RIL, MSGFI, a0, a2); + } + } +} - case INDEX_op_brcond_i32: - tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], - args[1], const_args[1], arg_label(args[3])); - break; - case INDEX_op_setcond_i32: - tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], - args[2], const_args[2], false); - break; - case INDEX_op_negsetcond_i32: - tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], - args[2], const_args[2], true); - break; - case INDEX_op_movcond_i32: - tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], - args[2], const_args[2], args[3], const_args[3], args[4]); - break; +static TCGConstraintSetIndex cset_mul(TCGType type, unsigned flags) +{ + return (HAVE_FACILITY(MISC_INSN_EXT2) + ? C_O1_I2(r, r, rJ) + : C_O1_I2(r, 0, rJ)); +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64); - break; - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64); - break; - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true); - break; - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false); - break; +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul, + .out_rrr = tgen_mul, + .out_rri = tgen_muli, +}; + +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_debug_assert((a1 & 1) == 0); + tcg_debug_assert(a0 == a1 + 1); + tcg_out_insn(s, RRFa, MGRK, a1, a2, a3); +} + +static TCGConstraintSetIndex cset_muls2(TCGType type, unsigned flags) +{ + return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2) + ? C_O2_I2(o, m, r, r) : C_NotImplemented); +} + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_muls2, + .out_rrrr = tgen_muls2, +}; + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_debug_assert(a0 == a2); + tcg_debug_assert((a1 & 1) == 0); + tcg_debug_assert(a0 == a1 + 1); + tcg_out_insn(s, RRE, MLGR, a1, a3); +} + +static TCGConstraintSetIndex cset_mulu2(TCGType type, unsigned flags) +{ + return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2) + ? C_O2_I2(o, m, 0, r) : C_NotImplemented); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mulu2, + .out_rrrr = tgen_mulu2, +}; + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_nand(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NNRK, a0, a1, a2); + } else { + tcg_out_insn(s, RRFa, NNGRK, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_misc3_rrr, + .out_rrr = tgen_nand, +}; + +static void tgen_nor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, NORK, a0, a1, a2); + } else { + tcg_out_insn(s, RRFa, NOGRK, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_misc3_rrr, + .out_rrr = tgen_nor, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, OGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, OR, a0, a2); + } else { + tcg_out_insn(s, RRFa, ORK, a0, a1, a2); + } +} + +static void tgen_ori_3(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + tgen_ori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rK), + .out_rrr = tgen_or, + .out_rri = tgen_ori_3, +}; + +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, OCRK, a0, a1, a2); + } else { + tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2); + } +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_misc3_rrr, + .out_rrr = tgen_orc, +}; + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_rotl_int(TCGContext *s, TCGType type, TCGReg dst, + TCGReg src, TCGReg v, tcg_target_long i) +{ + S390Opcode insn = type == TCG_TYPE_I32 ? RSY_RLL : RSY_RLLG; + tcg_out_sh64(s, insn, dst, src, v, i); +} + +static void tgen_rotl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_rotl_int(s, type, a0, a1, a2, 0); +} + +static void tgen_rotli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_rotl_int(s, type, a0, a1, TCG_REG_NONE, a2); +} + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_rotl, + .out_rri = tgen_rotli, +}; + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_sar_int(TCGContext *s, TCGType type, TCGReg dst, + TCGReg src, TCGReg v, tcg_target_long i) +{ + if (type != TCG_TYPE_I32) { + tcg_out_sh64(s, RSY_SRAG, dst, src, v, i); + } else if (dst == src) { + tcg_out_sh32(s, RS_SRA, dst, v, i); + } else { + tcg_out_sh64(s, RSY_SRAK, dst, src, v, i); + } +} + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_sar_int(s, type, a0, a1, a2, 0); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_sar_int(s, type, a0, a1, TCG_REG_NONE, a2); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl_int(TCGContext *s, TCGType type, TCGReg dst, + TCGReg src, TCGReg v, tcg_target_long i) +{ + if (type != TCG_TYPE_I32) { + tcg_out_sh64(s, RSY_SLLG, dst, src, v, i); + } else if (dst == src) { + tcg_out_sh32(s, RS_SLL, dst, v, i); + } else { + tcg_out_sh64(s, RSY_SLLK, dst, src, v, i); + } +} + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_shl_int(s, type, a0, a1, a2, 0); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_shl_int(s, type, a0, a1, TCG_REG_NONE, a2); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr_int(TCGContext *s, TCGType type, TCGReg dst, + TCGReg src, TCGReg v, tcg_target_long i) +{ + if (type != TCG_TYPE_I32) { + tcg_out_sh64(s, RSY_SRLG, dst, src, v, i); + } else if (dst == src) { + tcg_out_sh32(s, RS_SRL, dst, v, i); + } else { + tcg_out_sh64(s, RSY_SRLK, dst, src, v, i); + } +} + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_shr_int(s, type, a0, a1, a2, 0); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_shr_int(s, type, a0, a1, TCG_REG_NONE, a2); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, ri), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, SGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, SR, a0, a2); + } else { + tcg_out_insn(s, RRFa, SRK, a0, a1, a2); + } +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static void tgen_subbo_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, SLGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, SLR, a0, a2); + } else { + tcg_out_insn(s, RRFa, SLRK, a0, a1, a2); + } +} + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, SLFI, a0, a2); + } else if (a2 >= 0) { + tcg_out_insn(s, RIL, SLGFI, a0, a2); + } else { + tcg_out_insn(s, RIL, ALGFI, a0, -a2); + } +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_O1_I2(r, r, rUV), + .out_rrr = tgen_subbo_rrr, + .out_rri = tgen_subbo_rri, +}; + +static void tgen_subbio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRE, SLBR, a0, a2); + } else { + tcg_out_insn(s, RRE, SLBGR, a0, a2); + } +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_O1_I2(r, 0, r), + .out_rrr = tgen_subbio, +}; + +#define outop_subbi outop_subbio + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out_insn(s, RR, CLR, TCG_REG_R0, TCG_REG_R0); /* cc = 0 */ +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_insn(s, RRFa, XGRK, a0, a1, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, XR, a0, a2); + } else { + tcg_out_insn(s, RRFa, XRK, a0, a1, a2); + } +} + +static void tgen_xori_3(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_mov(s, type, a0, a1); + tgen_xori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rK), + .out_rrr = tgen_xor, + .out_rri = tgen_xori_3, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RRE, LRVR, a0, a1); + tcg_out_sh32(s, (flags & TCG_BSWAP_OS ? RS_SRA : RS_SRL), + a0, TCG_REG_NONE, 16); + } else { + tcg_out_insn(s, RRE, LRVGR, a0, a1); + tcg_out_sh64(s, (flags & TCG_BSWAP_OS ? RSY_SRAG : RSY_SRLG), + a0, a0, TCG_REG_NONE, 48); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_insn(s, RRE, LRVR, a0, a1); + if (flags & TCG_BSWAP_OS) { + tcg_out_ext32s(s, a0, a0); + } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext32u(s, a0, a0); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_insn(s, RRE, LRVGR, a0, a1); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, LCR, a0, a1); + } else { + tcg_out_insn(s, RRE, LCGR, a0, a1); + } +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_nor(s, type, a0, a1, a1); +} - case INDEX_op_ld16s_i64: - tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]); - break; - case INDEX_op_ld32u_i64: - tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]); - break; - case INDEX_op_ld32s_i64: - tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]); - break; - case INDEX_op_ld_i64: - tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); - break; +static TCGConstraintSetIndex cset_not(TCGType type, unsigned flags) +{ + return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I1(r, r) : C_NotImplemented; +} - case INDEX_op_st32_i64: - tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); - break; - case INDEX_op_st_i64: - tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); - break; +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_not, + .out_rr = tgen_not, +}; - case INDEX_op_add_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - do_addi_64: - if (a0 == a1) { - if (a2 == (int16_t)a2) { - tcg_out_insn(s, RI, AGHI, a0, a2); - break; - } - if (a2 == (int32_t)a2) { - tcg_out_insn(s, RIL, AGFI, a0, a2); - break; - } - if (a2 == (uint32_t)a2) { - tcg_out_insn(s, RIL, ALGFI, a0, a2); - break; - } - if (-a2 == (uint32_t)-a2) { - tcg_out_insn(s, RIL, SLGFI, a0, -a2); - break; - } - } - tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); - } else if (a0 == a1) { - tcg_out_insn(s, RRE, AGR, a0, a2); - } else { - tcg_out_insn(s, RX, LA, a0, a1, a2, 0); - } - break; - case INDEX_op_sub_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - a2 = -a2; - goto do_addi_64; - } else { - tcg_out_insn(s, RRFa, SGRK, a0, a1, a2); - } - break; +static void tcg_out_mb(TCGContext *s, unsigned a0) +{ + /* + * The host memory model is quite strong, we simply need to + * serialize the instruction stream. + */ + if (a0 & TCG_MO_ST_LD) { + /* fast-bcr-serialization facility (45) is present */ + tcg_out_insn(s, RR, BCR, 14, 0); + } +} - case INDEX_op_and_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); - } else { - tcg_out_insn(s, RRFa, NGRK, a0, a1, a2); - } - break; - case INDEX_op_or_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_ori(s, a0, a2); - } else { - tcg_out_insn(s, RRFa, OGRK, a0, a1, a2); - } - break; - case INDEX_op_xor_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_xori(s, a0, a2); - } else { - tcg_out_insn(s, RRFa, XGRK, a0, a1, a2); - } - break; +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, 0, RXY_LLGC, dest, base, TCG_REG_NONE, offset); +} - case INDEX_op_andc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_andi(s, TCG_TYPE_I64, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2); - } - break; - case INDEX_op_orc_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_ori(s, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2); - } - break; - case INDEX_op_eqv_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - tgen_xori(s, a0, ~a2); - } else { - tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2); - } - break; - case INDEX_op_nand_i64: - tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]); - break; - case INDEX_op_nor_i64: - tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]); - break; +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; - case INDEX_op_neg_i64: - tcg_out_insn(s, RRE, LCGR, args[0], args[1]); - break; - case INDEX_op_not_i64: - tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]); - break; - case INDEX_op_bswap64_i64: - tcg_out_insn(s, RRE, LRVGR, args[0], args[1]); - break; +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, 0, RXY_LGB, dest, base, TCG_REG_NONE, offset); +} - case INDEX_op_mul_i64: - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[2]) { - tcg_out_mov(s, TCG_TYPE_I64, a0, a1); - if (a2 == (int16_t)a2) { - tcg_out_insn(s, RI, MGHI, a0, a2); - } else { - tcg_out_insn(s, RIL, MSGFI, a0, a2); - } - } else if (a0 == a1) { - tcg_out_insn(s, RRE, MSGR, a0, a2); - } else { - tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2); - } - break; +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; - case INDEX_op_div2_i64: - /* - * ??? We get an unnecessary sign-extension of the dividend - * into op0 with this definition, but as we do in fact always - * produce both quotient and remainder using INDEX_op_div_i64 - * instead requires jumping through even more hoops. - */ - tcg_debug_assert(args[0] == args[2]); - tcg_debug_assert(args[1] == args[3]); - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RRE, DSGR, args[1], args[4]); - break; - case INDEX_op_divu2_i64: - tcg_debug_assert(args[0] == args[2]); - tcg_debug_assert(args[1] == args[3]); - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RRE, DLGR, args[1], args[4]); - break; - case INDEX_op_mulu2_i64: - tcg_debug_assert(args[0] == args[2]); - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RRE, MLGR, args[1], args[3]); - break; - case INDEX_op_muls2_i64: - tcg_debug_assert((args[1] & 1) == 0); - tcg_debug_assert(args[0] == args[1] + 1); - tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]); - break; - - case INDEX_op_shl_i64: - op = RSY_SLLG; - do_shift64: - if (const_args[2]) { - tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]); - } else { - tcg_out_sh64(s, op, args[0], args[1], args[2], 0); - } - break; - case INDEX_op_shr_i64: - op = RSY_SRLG; - goto do_shift64; - case INDEX_op_sar_i64: - op = RSY_SRAG; - goto do_shift64; +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, 0, RXY_LLGH, dest, base, TCG_REG_NONE, offset); +} - case INDEX_op_rotl_i64: - if (const_args[2]) { - tcg_out_sh64(s, RSY_RLLG, args[0], args[1], - TCG_REG_NONE, args[2]); - } else { - tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0); - } - break; - case INDEX_op_rotr_i64: - if (const_args[2]) { - tcg_out_sh64(s, RSY_RLLG, args[0], args[1], - TCG_REG_NONE, (64 - args[2]) & 63); - } else { - /* We can use the smaller 32-bit negate because only the - low 6 bits are examined for the rotate. */ - tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); - tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0); - } - break; +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; - case INDEX_op_add2_i64: - if (const_args[4]) { - if ((int64_t)args[4] >= 0) { - tcg_out_insn(s, RIL, ALGFI, args[0], args[4]); - } else { - tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]); - } - } else { - tcg_out_insn(s, RRE, ALGR, args[0], args[4]); - } - tcg_out_insn(s, RRE, ALCGR, args[1], args[5]); - break; - case INDEX_op_sub2_i64: - if (const_args[4]) { - if ((int64_t)args[4] >= 0) { - tcg_out_insn(s, RIL, SLGFI, args[0], args[4]); - } else { - tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]); - } - } else { - tcg_out_insn(s, RRE, SLGR, args[0], args[4]); - } - tcg_out_insn(s, RRE, SLBGR, args[1], args[5]); - break; +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + if (type == TCG_TYPE_I32) { + tcg_out_mem(s, RX_LH, RXY_LHY, dest, base, TCG_REG_NONE, offset); + } else { + tcg_out_mem(s, 0, RXY_LGH, dest, base, TCG_REG_NONE, offset); + } +} - case INDEX_op_brcond_i64: - tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], - args[1], const_args[1], arg_label(args[3])); - break; - case INDEX_op_setcond_i64: - tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], - args[2], const_args[2], false); - break; - case INDEX_op_negsetcond_i64: - tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], - args[2], const_args[2], true); - break; - case INDEX_op_movcond_i64: - tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], - args[2], const_args[2], args[3], const_args[3], args[4]); - break; +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; - OP_32_64(deposit): - a0 = args[0], a1 = args[1], a2 = args[2]; - if (const_args[1]) { - tgen_deposit(s, a0, a2, args[3], args[4], 1); - } else { - /* Since we can't support "0Z" as a constraint, we allow a1 in - any register. Fix things up as if a matching constraint. */ - if (a0 != a1) { - TCGType type = (opc == INDEX_op_deposit_i64); - if (a0 == a2) { - tcg_out_mov(s, type, TCG_TMP0, a2); - a2 = TCG_TMP0; - } - tcg_out_mov(s, type, a0, a1); - } - tgen_deposit(s, a0, a2, args[3], args[4], 0); - } - break; +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, 0, RXY_LLGF, dest, base, TCG_REG_NONE, offset); +} - OP_32_64(extract): - tgen_extract(s, args[0], args[1], args[2], args[3]); - break; +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; - case INDEX_op_clz_i64: - tgen_clz(s, args[0], args[1], args[2], const_args[2]); - break; +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, 0, RXY_LGF, dest, base, TCG_REG_NONE, offset); +} - case INDEX_op_ctpop_i32: - tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]); - break; - case INDEX_op_ctpop_i64: - tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]); - break; +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; - case INDEX_op_mb: - /* The host memory model is quite strong, we simply need to - serialize the instruction stream. */ - if (args[0] & TCG_MO_ST_LD) { - /* fast-bcr-serialization facility (45) is present */ - tcg_out_insn(s, RR, BCR, 14, 0); - } - break; +static void tgen_st8(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, RX_STC, RXY_STCY, data, base, TCG_REG_NONE, offset); +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); - } +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st8, +}; + +static void tgen_st16(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_mem(s, RX_STH, RXY_STHY, data, base, TCG_REG_NONE, offset); } +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st16, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tcg_out_st, +}; + + static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { @@ -2841,6 +3249,94 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64); } +static bool tcg_out_cmp_vec_noinv(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg a1, TCGReg a2, TCGCond cond) +{ + bool need_swap = false, need_inv = false; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGReg swap = a1; + a1 = a2; + a2 = swap; + cond = tcg_swap_cond(cond); + } + + switch (cond) { + case TCG_COND_EQ: + tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece); + break; + case TCG_COND_GT: + tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece); + break; + case TCG_COND_GTU: + tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece); + break; + default: + g_assert_not_reached(); + } + return need_inv; +} + +static void tcg_out_cmp_vec(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg a1, TCGReg a2, TCGCond cond) +{ + if (tcg_out_cmp_vec_noinv(s, vece, a0, a1, a2, cond)) { + tcg_out_insn(s, VRRc, VNO, a0, a0, a0, 0); + } +} + +static void tcg_out_cmpsel_vec(TCGContext *s, unsigned vece, TCGReg a0, + TCGReg c1, TCGReg c2, TCGArg v3, + int const_v3, TCGReg v4, TCGCond cond) +{ + bool inv = tcg_out_cmp_vec_noinv(s, vece, TCG_VEC_TMP0, c1, c2, cond); + + if (!const_v3) { + if (inv) { + tcg_out_insn(s, VRRe, VSEL, a0, v4, v3, TCG_VEC_TMP0); + } else { + tcg_out_insn(s, VRRe, VSEL, a0, v3, v4, TCG_VEC_TMP0); + } + } else if (v3) { + if (inv) { + tcg_out_insn(s, VRRc, VOC, a0, v4, TCG_VEC_TMP0, 0); + } else { + tcg_out_insn(s, VRRc, VO, a0, v4, TCG_VEC_TMP0, 0); + } + } else { + if (inv) { + tcg_out_insn(s, VRRc, VN, a0, v4, TCG_VEC_TMP0, 0); + } else { + tcg_out_insn(s, VRRc, VNC, a0, v4, TCG_VEC_TMP0, 0); + } + } +} + static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg args[TCG_MAX_OP_ARGS], @@ -2959,19 +3455,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_cmp_vec: - switch ((TCGCond)args[3]) { - case TCG_COND_EQ: - tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece); - break; - case TCG_COND_GT: - tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece); - break; - case TCG_COND_GTU: - tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece); - break; - default: - g_assert_not_reached(); - } + tcg_out_cmp_vec(s, vece, a0, a1, a2, args[3]); + break; + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel_vec(s, vece, a0, a1, a2, args[3], const_args[3], + args[4], args[5]); break; case INDEX_op_s390_vuph_vec: @@ -3024,9 +3512,9 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_umax_vec: case INDEX_op_umin_vec: case INDEX_op_xor_vec: - return 1; case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: + return 1; case INDEX_op_rotrv_vec: return -1; case INDEX_op_mul_vec: @@ -3039,71 +3527,6 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) } } -static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec v1, TCGv_vec v2, TCGCond cond) -{ - bool need_swap = false, need_inv = false; - - switch (cond) { - case TCG_COND_EQ: - case TCG_COND_GT: - case TCG_COND_GTU: - break; - case TCG_COND_NE: - case TCG_COND_LE: - case TCG_COND_LEU: - need_inv = true; - break; - case TCG_COND_LT: - case TCG_COND_LTU: - need_swap = true; - break; - case TCG_COND_GE: - case TCG_COND_GEU: - need_swap = need_inv = true; - break; - default: - g_assert_not_reached(); - } - - if (need_inv) { - cond = tcg_invert_cond(cond); - } - if (need_swap) { - TCGv_vec t1; - t1 = v1, v1 = v2, v2 = t1; - cond = tcg_swap_cond(cond); - } - - vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), - tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); - - return need_inv; -} - -static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec v1, TCGv_vec v2, TCGCond cond) -{ - if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) { - tcg_gen_not_vec(vece, v0, v0); - } -} - -static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0, - TCGv_vec c1, TCGv_vec c2, - TCGv_vec v3, TCGv_vec v4, TCGCond cond) -{ - TCGv_vec t = tcg_temp_new_vec(type); - - if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) { - /* Invert the sense of the compare by swapping arguments. */ - tcg_gen_bitsel_vec(vece, v0, t, v4, v3); - } else { - tcg_gen_bitsel_vec(vece, v0, t, v3, v4); - } - tcg_temp_free_vec(t); -} - static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc) { @@ -3145,7 +3568,7 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { va_list va; - TCGv_vec v0, v1, v2, v3, v4, t0; + TCGv_vec v0, v1, v2, t0; va_start(va, a0); v0 = temp_tcgv_vec(arg_temp(a0)); @@ -3153,16 +3576,6 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); switch (opc) { - case INDEX_op_cmp_vec: - expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); - break; - - case INDEX_op_cmpsel_vec: - v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); - break; - case INDEX_op_rotrv_vec: t0 = tcg_temp_new_vec(type); tcg_gen_neg_vec(vece, t0, v2); @@ -3183,173 +3596,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, va_end(va); } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(r, r); - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i32: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i32: - case INDEX_op_rotr_i64: - case INDEX_op_setcond_i32: - case INDEX_op_negsetcond_i32: - return C_O1_I2(r, r, ri); - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i64: - return C_O1_I2(r, r, rC); - - case INDEX_op_clz_i64: - return C_O1_I2(r, r, rI); - - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - case INDEX_op_and_i32: - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - return C_O1_I2(r, r, ri); - case INDEX_op_and_i64: - return C_O1_I2(r, r, rNKR); - case INDEX_op_or_i64: - case INDEX_op_xor_i64: - return C_O1_I2(r, r, rK); - - case INDEX_op_andc_i32: - case INDEX_op_orc_i32: - case INDEX_op_eqv_i32: - return C_O1_I2(r, r, ri); - case INDEX_op_andc_i64: - return C_O1_I2(r, r, rKR); - case INDEX_op_orc_i64: - case INDEX_op_eqv_i64: - return C_O1_I2(r, r, rNK); - - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - return C_O1_I2(r, r, r); - - case INDEX_op_mul_i32: - return (HAVE_FACILITY(MISC_INSN_EXT2) - ? C_O1_I2(r, r, ri) - : C_O1_I2(r, 0, ri)); - case INDEX_op_mul_i64: - return (HAVE_FACILITY(MISC_INSN_EXT2) - ? C_O1_I2(r, r, rJ) - : C_O1_I2(r, 0, rJ)); - - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: - return C_O1_I2(r, r, ri); - - case INDEX_op_brcond_i32: - return C_O0_I2(r, ri); - case INDEX_op_brcond_i64: - return C_O0_I2(r, rC); - - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_ctpop_i32: - case INDEX_op_ctpop_i64: - return C_O1_I1(r, r); - - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - return C_O1_I1(r, r); - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - return C_O0_I2(r, r); - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - return C_O2_I1(o, m, r); - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - return C_O0_I3(o, m, r); - - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - return C_O1_I2(r, rZ, r); - - case INDEX_op_movcond_i32: - return C_O1_I4(r, r, ri, rI, r); - case INDEX_op_movcond_i64: - return C_O1_I4(r, r, rC, rI, r); - - case INDEX_op_div2_i32: - case INDEX_op_div2_i64: - case INDEX_op_divu2_i32: - case INDEX_op_divu2_i64: - return C_O2_I3(o, m, 0, 1, r); - - case INDEX_op_mulu2_i64: - return C_O2_I2(o, m, 0, r); - case INDEX_op_muls2_i64: - return C_O2_I2(o, m, r, r); - - case INDEX_op_add2_i32: - case INDEX_op_sub2_i32: - return C_N1_O1_I4(r, r, 0, 1, ri, r); - - case INDEX_op_add2_i64: - case INDEX_op_sub2_i64: - return C_N1_O1_I4(r, r, 0, 1, rJU, r); - case INDEX_op_st_vec: return C_O0_I2(v, r); case INDEX_op_ld_vec: @@ -3397,9 +3647,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) return C_O1_I2(v, v, r); case INDEX_op_bitsel_vec: return C_O1_I3(v, v, v, v); + case INDEX_op_cmpsel_vec: + return (TCG_TARGET_HAS_orc_vec + ? C_O1_I4(v, v, v, vZM, v) + : C_O1_I4(v, v, v, vZ, v)); default: - g_assert_not_reached(); + return C_NotImplemented; } } @@ -3521,6 +3775,7 @@ static void tcg_target_init(TCGContext *s) s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h index 62ce9d792a4..0ef5a6d3ddc 100644 --- a/tcg/s390x/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -51,130 +51,4 @@ typedef enum TCGReg { #define TCG_TARGET_NB_REGS 64 -/* Facilities required for proper operation; checked at startup. */ - -#define FACILITY_ZARCH_ACTIVE 2 -#define FACILITY_LONG_DISP 18 -#define FACILITY_EXT_IMM 21 -#define FACILITY_GEN_INST_EXT 34 -#define FACILITY_45 45 - -/* Facilities that are checked at runtime. */ - -#define FACILITY_LOAD_ON_COND2 53 -#define FACILITY_MISC_INSN_EXT2 58 -#define FACILITY_MISC_INSN_EXT3 61 -#define FACILITY_VECTOR 129 -#define FACILITY_VECTOR_ENH1 135 - -extern uint64_t s390_facilities[3]; - -#define HAVE_FACILITY(X) \ - ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1) - -/* optional instructions */ -#define TCG_TARGET_HAS_div2_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_andc_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_clz_i32 0 -#define TCG_TARGET_HAS_ctz_i32 0 -#define TCG_TARGET_HAS_ctpop_i32 1 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 0 -#define TCG_TARGET_HAS_muls2_i32 0 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#define TCG_TARGET_HAS_div2_i64 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_andc_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3) -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 1 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 1 -#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2) -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 - -#define TCG_TARGET_HAS_qemu_ldst_i128 1 - -#define TCG_TARGET_HAS_tst 1 - -#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) -#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) -#define TCG_TARGET_HAS_v256 0 - -#define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1) -#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1) -#define TCG_TARGET_HAS_nor_vec 1 -#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1) -#define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 1 -#define TCG_TARGET_HAS_abs_vec 1 -#define TCG_TARGET_HAS_roti_vec 1 -#define TCG_TARGET_HAS_rots_vec 1 -#define TCG_TARGET_HAS_rotv_vec 1 -#define TCG_TARGET_HAS_shi_vec 1 -#define TCG_TARGET_HAS_shs_vec 1 -#define TCG_TARGET_HAS_shv_vec 1 -#define TCG_TARGET_HAS_mul_vec 1 -#define TCG_TARGET_HAS_sat_vec 0 -#define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 1 -#define TCG_TARGET_HAS_cmpsel_vec 0 -#define TCG_TARGET_HAS_tst_vec 0 - -/* used for function call generation */ -#define TCG_TARGET_STACK_ALIGN 8 -#define TCG_TARGET_CALL_STACK_OFFSET 160 -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF - -#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS - #endif diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h deleted file mode 100644 index 0eb2350fb39..00000000000 --- a/tcg/s390x/tcg-target.opc.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright (c) 2021 Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * (at your option) any later version. - * - * See the COPYING file in the top-level directory for details. - * - * Target-specific opcodes for host vector expansion. These will be - * emitted by tcg_expand_vec_op. For those familiar with GCC internals, - * consider these to be UNSPEC with names. - */ -DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC) -DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC) -DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC) diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h index 434bf25072d..1a57adc0e8c 100644 --- a/tcg/sparc64/tcg-target-con-set.h +++ b/tcg/sparc64/tcg-target-con-set.h @@ -10,11 +10,12 @@ * tcg-target-con-str.h; the constraint combination is inclusive or. */ C_O0_I1(r) -C_O0_I2(rZ, r) -C_O0_I2(rZ, rJ) +C_O0_I2(rz, r) +C_O0_I2(r, rJ) C_O1_I1(r, r) C_O1_I2(r, r, r) -C_O1_I2(r, rZ, rJ) -C_O1_I4(r, rZ, rJ, rI, 0) -C_O2_I2(r, r, rZ, rJ) -C_O2_I4(r, r, rZ, rZ, rJ, rJ) +C_O1_I2(r, r, rJ) +C_O1_I2(r, rz, rJ) +C_O1_I2(r, rz, rz) +C_O1_I4(r, r, rJ, rI, 0) +C_O2_I2(r, r, r, r) diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h index 0577ec49420..2f033b3ac24 100644 --- a/tcg/sparc64/tcg-target-con-str.h +++ b/tcg/sparc64/tcg-target-con-str.h @@ -16,4 +16,3 @@ REGS('r', ALL_GENERAL_REGS) */ CONST('I', TCG_CT_CONST_S11) CONST('J', TCG_CT_CONST_S13) -CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h new file mode 100644 index 00000000000..b29fd177f69 --- /dev/null +++ b/tcg/sparc64/tcg-target-has.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +/* optional instructions */ +#define TCG_TARGET_HAS_extr_i64_i32 0 +#define TCG_TARGET_HAS_qemu_ldst_i128 0 +#define TCG_TARGET_HAS_tst 1 + +#define TCG_TARGET_extract_valid(type, ofs, len) \ + ((type) == TCG_TYPE_I64 && (ofs) + (len) == 32) + +#define TCG_TARGET_sextract_valid TCG_TARGET_extract_valid + +#define TCG_TARGET_deposit_valid(type, ofs, len) 0 + +#endif diff --git a/tcg/sparc64/tcg-target-mo.h b/tcg/sparc64/tcg-target-mo.h new file mode 100644 index 00000000000..98bfe03b7ac --- /dev/null +++ b/tcg/sparc64/tcg-target-mo.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2008 Fabrice Bellard + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/sparc64/tcg-target-opc.h.inc b/tcg/sparc64/tcg-target-opc.h.inc new file mode 100644 index 00000000000..84e777bfe53 --- /dev/null +++ b/tcg/sparc64/tcg-target-opc.h.inc @@ -0,0 +1 @@ +/* No target specific opcodes. */ diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc index 176c98740bc..5e5c3f1cda5 100644 --- a/tcg/sparc64/tcg-target.c.inc +++ b/tcg/sparc64/tcg-target.c.inc @@ -27,8 +27,15 @@ #error "unsupported code generation mode" #endif -#include "../tcg-ldst.c.inc" -#include "../tcg-pool.c.inc" +/* Used for function call generation. */ +#define TCG_REG_CALL_STACK TCG_REG_O6 +#define TCG_TARGET_STACK_BIAS 2047 +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6 * 8 + TCG_TARGET_STACK_BIAS) +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { @@ -69,7 +76,6 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_CT_CONST_S11 0x100 #define TCG_CT_CONST_S13 0x200 -#define TCG_CT_CONST_ZERO 0x400 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) @@ -193,7 +199,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) +#define ARITH_ADDCCC (INSN_OP(2) | INSN_OP3(0x18)) #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) +#define ARITH_SUBCCC (INSN_OP(2) | INSN_OP3(0x1c)) #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) @@ -202,9 +210,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) +#define ARITH_POPC (INSN_OP(2) | INSN_OP3(0x2e)) #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) +#define ARITH_ADDXCCC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x13)) #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) @@ -217,6 +227,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) +#define WRCCR (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(2)) #define JMPL (INSN_OP(2) | INSN_OP3(0x38)) #define RETURN (INSN_OP(2) | INSN_OP3(0x39)) #define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) @@ -264,8 +275,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) -#ifndef use_vis3_instructions -bool use_vis3_instructions; +static bool use_popc_instructions; +#if defined(__VIS__) && __VIS__ >= 0x300 +#define use_vis3_instructions 1 +#else +static bool use_vis3_instructions; #endif static bool check_fit_i64(int64_t val, unsigned int bits) @@ -333,9 +347,7 @@ static bool tcg_target_const_match(int64_t val, int ct, val = (int32_t)val; } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { + if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { return 1; } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { return 1; @@ -362,7 +374,7 @@ static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, } static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, - int32_t val2, int val2const, int op) + int32_t val2, int val2const, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); @@ -592,21 +604,6 @@ static void tcg_out_sety(TCGContext *s, TCGReg rs) tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); } -static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, - int32_t val2, int val2const, int uns) -{ - /* Load Y with the sign/zero extension of RS1 to 64-bits. */ - if (uns) { - tcg_out_sety(s, TCG_REG_G0); - } else { - tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA); - tcg_out_sety(s, TCG_REG_T1); - } - - tcg_out_arithc(s, rd, rs1, val2, val2const, - uns ? ARITH_UDIV : ARITH_SDIV); -} - static const uint8_t tcg_cond_to_bcond[16] = { [TCG_COND_EQ] = COND_E, [TCG_COND_NE] = COND_NE, @@ -648,6 +645,12 @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l) tcg_out_bpcc0(s, scond, flags, off19); } +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_bpcc(s, COND_A, BPCC_PT, l); + tcg_out_nop(s); +} + static void tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg c1, int32_t c2, int c2const) { @@ -663,11 +666,10 @@ static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, tcg_out_nop(s); } -static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, +static void tcg_out_movcc(TCGContext *s, int scond, int cc, TCGReg ret, int32_t v1, int v1const) { - tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) - | INSN_RS1(tcg_cond_to_bcond[cond]) + tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(scond) | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); } @@ -676,7 +678,7 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, int32_t v1, int v1const) { tcg_out_cmp(s, cond, c1, c2, c2const); - tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); + tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_ICC, ret, v1, v1const); } static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, @@ -720,12 +722,12 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, tcg_out_movr(s, rcond, ret, c1, v1, v1const); } else { tcg_out_cmp(s, cond, c1, c2, c2const); - tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); + tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_XCC, ret, v1, v1const); } } static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg c1, int32_t c2, int c2const, bool neg) + TCGReg c1, int32_t c2, bool c2const, bool neg) { /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ switch (cond) { @@ -745,7 +747,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, } c1 = TCG_REG_G0, c2const = 0; cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); - break; + break; case TCG_COND_TSTEQ: case TCG_COND_TSTNE: @@ -754,7 +756,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, c1 = TCG_REG_G0; c2 = TCG_REG_T1, c2const = 0; cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU); - break; + break; case TCG_COND_GTU: case TCG_COND_LEU: @@ -774,7 +776,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, default: tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movi_s13(s, ret, 0); - tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1); + tcg_out_movcc(s, tcg_cond_to_bcond[cond], + MOVCC_ICC, ret, neg ? -1 : 1, 1); return; } @@ -799,7 +802,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, } static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg c1, int32_t c2, int c2const, bool neg) + TCGReg c1, int32_t c2, bool c2const, bool neg) { int rcond; @@ -829,78 +832,103 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, } else { tcg_out_cmp(s, cond, c1, c2, c2const); tcg_out_movi_s13(s, ret, 0); - tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1); + tcg_out_movcc(s, tcg_cond_to_bcond[cond], + MOVCC_XCC, ret, neg ? -1 : 1, 1); } } -static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, - TCGReg al, TCGReg ah, int32_t bl, int blconst, - int32_t bh, int bhconst, int opl, int oph) +static void tcg_out_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGArg arg2, bool const_arg2, + TCGLabel *l) { - TCGReg tmp = TCG_REG_T1; - - /* Note that the low parts are fully consumed before tmp is set. */ - if (rl != ah && (bhconst || rl != bh)) { - tmp = rl; + if (type == TCG_TYPE_I32) { + tcg_out_brcond_i32(s, cond, arg1, arg2, const_arg2, l); + } else { + tcg_out_brcond_i64(s, cond, arg1, arg2, const_arg2, l); } +} - tcg_out_arithc(s, tmp, al, bl, blconst, opl); - tcg_out_arithc(s, rh, ah, bh, bhconst, oph); - tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, TCGReg arg2, TCGLabel *l) +{ + tcg_out_brcond(s, type, cond, arg1, arg2, false, l); } -static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, - TCGReg al, TCGReg ah, int32_t bl, int blconst, - int32_t bh, int bhconst, bool is_sub) +static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg1, tcg_target_long arg2, TCGLabel *l) { - TCGReg tmp = TCG_REG_T1; + tcg_out_brcond(s, type, cond, arg1, arg2, true, l); +} + +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, rJ), + .out_rr = tgen_brcond, + .out_ri = tgen_brcondi, +}; - /* Note that the low parts are fully consumed before tmp is set. */ - if (rl != ah && (bhconst || rl != bh)) { - tmp = rl; +static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, + TCGArg c2, bool c2const, bool neg) +{ + if (type == TCG_TYPE_I32) { + tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg); + } else { + tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg); } +} - tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false); +} - if (use_vis3_instructions && !is_sub) { - /* Note that ADDXC doesn't accept immediates. */ - if (bhconst && bh != 0) { - tcg_out_movi_s13(s, TCG_REG_T2, bh); - bh = TCG_REG_T2; - } - tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); - } else if (bh == TCG_REG_G0) { - /* If we have a zero, we can perform the operation in two insns, - with the arithmetic first, and a conditional move into place. */ - if (rh == ah) { - tcg_out_arithi(s, TCG_REG_T2, ah, 1, - is_sub ? ARITH_SUB : ARITH_ADD); - tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); - } else { - tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); - tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); - } +static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_setcond, + .out_rri = tgen_setcondi, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true); +} + +static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, tcg_target_long arg2) +{ + tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_negsetcond, + .out_rri = tgen_negsetcondi, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool c2const, + TCGArg v1, bool v1const, TCGArg v2, bool v2consf) +{ + if (type == TCG_TYPE_I32) { + tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const); } else { - /* - * Otherwise adjust BH as if there is carry into T2. - * Note that constant BH is constrained to 11 bits for the MOVCC, - * so the adjustment fits 12 bits. - */ - if (bhconst) { - tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1)); - } else { - tcg_out_arithi(s, TCG_REG_T2, bh, 1, - is_sub ? ARITH_SUB : ARITH_ADD); - } - /* ... smoosh T2 back to original BH if carry is clear ... */ - tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); - /* ... and finally perform the arithmetic with the new operand. */ - tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const); } - - tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); } +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0), + .out = tgen_movcond, +}; + static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest, bool in_prologue, bool tail_call) { @@ -931,7 +959,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest, tcg_out_nop(s); } -static void tcg_out_mb(TCGContext *s, TCGArg a0) +static void tcg_out_mb(TCGContext *s, unsigned a0) { /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */ tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL)); @@ -1092,7 +1120,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* Extract the page index, shifted into place for tlb index. */ tcg_out_arithi(s, TCG_REG_T1, addr_reg, - s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL); + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL); tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND); /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */ @@ -1108,7 +1136,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, h->base = TCG_REG_T1; /* Mask out the page offset, except for the required alignment. */ - compare_mask = s->page_mask | a_mask; + compare_mask = TARGET_PAGE_MASK | a_mask; if (check_fit_tl(compare_mask, 13)) { tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND); } else { @@ -1120,7 +1148,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; ldst->label_ptr[0] = s->code_ptr; /* bne,pn %[xi]cc, label0 */ @@ -1133,14 +1161,14 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * Otherwise, test for at least natural alignment and defer * everything else to the helper functions. */ - if (s_bits != get_alignment_bits(opc)) { + if (s_bits != memop_alignment_bits(opc)) { tcg_debug_assert(check_fit_tl(a_mask, 13)); tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC); ldst = new_ldst_label(s); ldst->is_ld = is_ld; ldst->oi = oi; - ldst->addrlo_reg = addr_reg; + ldst->addr_reg = addr_reg; ldst->label_ptr[0] = s->code_ptr; /* bne,pn %icc, label0 */ @@ -1162,8 +1190,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, return ldst; } -static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, - MemOpIdx oi, TCGType data_type) +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) { static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = LDUB, @@ -1195,14 +1223,23 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } -static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, - MemOpIdx oi, TCGType data_type) +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) { static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = STB, @@ -1225,12 +1262,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]); if (ldst) { - ldst->type = data_type; + ldst->type = type; ldst->datalo_reg = data; ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); } } +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out = tgen_qemu_st, +}; + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = C_NotImplemented, +}; + static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) { if (check_fit_ptr(a0, 13)) { @@ -1276,365 +1322,794 @@ static void tcg_out_goto_tb(TCGContext *s, int which) } } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); + tcg_out_mov_delay(s, TCG_REG_TB, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) + +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - TCGArg a0, a1, a2; - int c, c2; + tcg_out_arith(s, a0, a1, a2, ARITH_ADD); +} - /* Hoist the loads of the most common arguments. */ - a0 = args[0]; - a1 = args[1]; - a2 = args[2]; - c2 = const_args[2]; +static void tgen_addi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_ADD); +} - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); - tcg_out_mov_delay(s, TCG_REG_TB, a0); - break; - case INDEX_op_br: - tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); - tcg_out_nop(s); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_add, + .out_rri = tgen_addi, +}; -#define OP_32_64(x) \ - glue(glue(case INDEX_op_, x), _i32): \ - glue(glue(case INDEX_op_, x), _i64) +static void tgen_addco_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_ADDCC); +} - OP_32_64(ld8u): - tcg_out_ldst(s, a0, a1, a2, LDUB); - break; - OP_32_64(ld8s): - tcg_out_ldst(s, a0, a1, a2, LDSB); - break; - OP_32_64(ld16u): - tcg_out_ldst(s, a0, a1, a2, LDUH); - break; - OP_32_64(ld16s): - tcg_out_ldst(s, a0, a1, a2, LDSH); - break; - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - tcg_out_ldst(s, a0, a1, a2, LDUW); - break; - OP_32_64(st8): - tcg_out_ldst(s, a0, a1, a2, STB); - break; - OP_32_64(st16): - tcg_out_ldst(s, a0, a1, a2, STH); - break; - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - tcg_out_ldst(s, a0, a1, a2, STW); - break; - OP_32_64(add): - c = ARITH_ADD; - goto gen_arith; - OP_32_64(sub): - c = ARITH_SUB; - goto gen_arith; - OP_32_64(and): - c = ARITH_AND; - goto gen_arith; - OP_32_64(andc): - c = ARITH_ANDN; - goto gen_arith; - OP_32_64(or): - c = ARITH_OR; - goto gen_arith; - OP_32_64(orc): - c = ARITH_ORN; - goto gen_arith; - OP_32_64(xor): - c = ARITH_XOR; - goto gen_arith; - case INDEX_op_shl_i32: - c = SHIFT_SLL; - do_shift32: - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); - break; - case INDEX_op_shr_i32: - c = SHIFT_SRL; - goto do_shift32; - case INDEX_op_sar_i32: - c = SHIFT_SRA; - goto do_shift32; - case INDEX_op_mul_i32: - c = ARITH_UMUL; - goto gen_arith; - - OP_32_64(neg): - c = ARITH_SUB; - goto gen_arith1; - OP_32_64(not): - c = ARITH_ORN; - goto gen_arith1; - - case INDEX_op_div_i32: - tcg_out_div32(s, a0, a1, a2, c2, 0); - break; - case INDEX_op_divu_i32: - tcg_out_div32(s, a0, a1, a2, c2, 1); - break; +static void tgen_addco_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCC); +} - case INDEX_op_brcond_i32: - tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3])); - break; - case INDEX_op_setcond_i32: - tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false); - break; - case INDEX_op_negsetcond_i32: - tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true); - break; - case INDEX_op_movcond_i32: - tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_addco_rrr, + .out_rri = tgen_addco_rri, +}; - case INDEX_op_add2_i32: - tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], - args[4], const_args[4], args[5], const_args[5], - ARITH_ADDCC, ARITH_ADDC); - break; - case INDEX_op_sub2_i32: - tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], - args[4], const_args[4], args[5], const_args[5], - ARITH_SUBCC, ARITH_SUBC); - break; - case INDEX_op_mulu2_i32: - c = ARITH_UMUL; - goto do_mul2; - case INDEX_op_muls2_i32: - c = ARITH_SMUL; - do_mul2: - /* The 32-bit multiply insns produce a full 64-bit result. */ - tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); - tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); - break; +static void tgen_addci_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_arith(s, a0, a1, a2, ARITH_ADDC); + } else if (use_vis3_instructions) { + tcg_out_arith(s, a0, a1, a2, ARITH_ADDXC); + } else { + tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */ + tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */ + /* Select the correct result based on actual carry value. */ + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false); + } +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); - break; - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32); - break; - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64); - break; +static void tgen_addci_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_arithi(s, a0, a1, a2, ARITH_ADDC); + return; + } + /* !use_vis3_instructions */ + if (a2 != 0) { + tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */ + tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */ + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false); + } else if (a0 == a1) { + tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_ADD); + tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false); + } else { + tcg_out_arithi(s, a0, a1, 1, ARITH_ADD); + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false); + } +} - case INDEX_op_ld32s_i64: - tcg_out_ldst(s, a0, a1, a2, LDSW); - break; - case INDEX_op_ld_i64: - tcg_out_ldst(s, a0, a1, a2, LDX); - break; - case INDEX_op_st_i64: - tcg_out_ldst(s, a0, a1, a2, STX); - break; - case INDEX_op_shl_i64: - c = SHIFT_SLLX; - do_shift64: - /* Limit immediate shift count lest we create an illegal insn. */ - tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); - break; - case INDEX_op_shr_i64: - c = SHIFT_SRLX; - goto do_shift64; - case INDEX_op_sar_i64: - c = SHIFT_SRAX; - goto do_shift64; - case INDEX_op_mul_i64: - c = ARITH_MULX; - goto gen_arith; - case INDEX_op_div_i64: - c = ARITH_SDIVX; - goto gen_arith; - case INDEX_op_divu_i64: - c = ARITH_UDIVX; - goto gen_arith; - - case INDEX_op_brcond_i64: - tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3])); - break; - case INDEX_op_setcond_i64: - tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false); - break; - case INDEX_op_negsetcond_i64: - tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true); - break; - case INDEX_op_movcond_i64: - tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); - break; - case INDEX_op_add2_i64: - tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], - const_args[4], args[5], const_args[5], false); - break; - case INDEX_op_sub2_i64: - tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], - const_args[4], args[5], const_args[5], true); - break; - case INDEX_op_muluh_i64: - tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); - break; +static TCGConstraintSetIndex cset_addci(TCGType type, unsigned flags) +{ + if (use_vis3_instructions && type == TCG_TYPE_I64) { + /* Note that ADDXC doesn't accept immediates. */ + return C_O1_I2(r, rz, rz); + } + return C_O1_I2(r, rz, rJ); +} - gen_arith: - tcg_out_arithc(s, a0, a1, a2, c2, c); - break; +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addci, + .out_rrr = tgen_addci_rrr, + .out_rri = tgen_addci_rri, +}; - gen_arith1: - tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); - break; +/* Copy %xcc.c to %icc.c */ +static void tcg_out_dup_xcc_c(TCGContext *s) +{ + if (use_vis3_instructions) { + tcg_out_arith(s, TCG_REG_T1, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); + } else { + tcg_out_movi_s13(s, TCG_REG_T1, 0); + tcg_out_movcc(s, COND_CS, MOVCC_XCC, TCG_REG_T1, 1, true); + } + /* Write carry-in into %icc via {0,1} + -1. */ + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, -1, ARITH_ADDCC); +} - case INDEX_op_mb: - tcg_out_mb(s, a0); - break; +static void tgen_addcio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + if (use_vis3_instructions) { + tcg_out_arith(s, a0, a1, a2, ARITH_ADDXCCC); + return; + } + tcg_out_dup_xcc_c(s); + } + tcg_out_arith(s, a0, a1, a2, ARITH_ADDCCC); +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - default: - g_assert_not_reached(); +static void tgen_addcio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type != TCG_TYPE_I32) { + /* !use_vis3_instructions */ + tcg_out_dup_xcc_c(s); + } + tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCCC); +} + +static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags) +{ + if (use_vis3_instructions && type == TCG_TYPE_I64) { + /* Note that ADDXCCC doesn't accept immediates. */ + return C_O1_I2(r, rz, rz); } + return C_O1_I2(r, rz, rJ); +} + +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addcio, + .out_rrr = tgen_addcio_rrr, + .out_rri = tgen_addcio_rri, +}; + +static void tcg_out_set_carry(TCGContext *s) +{ + /* 0x11 -> xcc = nzvC, icc = nzvC */ + tcg_out_arithi(s, 0, TCG_REG_G0, 0x11, WRCCR); +} + +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_AND); +} + +static void tgen_andi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_AND); +} + +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_and, + .out_rri = tgen_andi, +}; + +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_ANDN); +} + +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; + +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_arith(s, a0, TCG_REG_G0, a1, ARITH_POPC); } -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) -{ - switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: +static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) +{ + if (use_popc_instructions && type == TCG_TYPE_I64) { return C_O1_I1(r, r); + } + return C_NotImplemented; +} - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - case INDEX_op_st_i32: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - return C_O0_I2(rZ, r); - - case INDEX_op_add_i32: - case INDEX_op_add_i64: - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_div_i32: - case INDEX_op_div_i64: - case INDEX_op_divu_i32: - case INDEX_op_divu_i64: - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - case INDEX_op_and_i32: - case INDEX_op_and_i64: - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - case INDEX_op_shl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i32: - case INDEX_op_shr_i64: - case INDEX_op_sar_i32: - case INDEX_op_sar_i64: - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: - return C_O1_I2(r, rZ, rJ); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(rZ, rJ); - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - return C_O1_I4(r, rZ, rJ, rI, 0); - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - return C_O2_I4(r, r, rZ, rZ, rJ, rJ); - case INDEX_op_mulu2_i32: - case INDEX_op_muls2_i32: - return C_O2_I2(r, r, rZ, rJ); - case INDEX_op_muluh_i64: - return C_O1_I2(r, r, r); +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctpop, + .out_rr = tgen_ctpop, +}; - default: - g_assert_not_reached(); +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divs_rJ(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGArg a2, bool c2) +{ + uint32_t insn; + + if (type == TCG_TYPE_I32) { + /* Load Y with the sign extension of a1 to 64-bits. */ + tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA); + tcg_out_sety(s, TCG_REG_T1); + insn = ARITH_SDIV; + } else { + insn = ARITH_SDIVX; + } + tcg_out_arithc(s, a0, a1, a2, c2, insn); +} + +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_divs_rJ(s, type, a0, a1, a2, false); +} + +static void tgen_divsi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_divs_rJ(s, type, a0, a1, a2, true); +} + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_divs, + .out_rri = tgen_divsi, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu_rJ(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGArg a2, bool c2) +{ + uint32_t insn; + + if (type == TCG_TYPE_I32) { + /* Load Y with the zero extension to 64-bits. */ + tcg_out_sety(s, TCG_REG_G0); + insn = ARITH_UDIV; + } else { + insn = ARITH_UDIVX; + } + tcg_out_arithc(s, a0, a1, a2, c2, insn); +} + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tgen_divu_rJ(s, type, a0, a1, a2, false); +} + +static void tgen_divui(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tgen_divu_rJ(s, type, a0, a1, a2, true); +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_divu, + .out_rri = tgen_divui, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; + +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX; + tcg_out_arith(s, a0, a1, a2, insn); +} + +static void tgen_muli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX; + tcg_out_arithi(s, a0, a1, a2, insn); +} + +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_mul, + .out_rri = tgen_muli, +}; + +/* + * The 32-bit multiply insns produce a full 64-bit result. + * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions + * before the actual multiply; we only need extract the high part + * into the separate operand. + */ +static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented; +} + +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_out_arith(s, a0, a2, a3, ARITH_SMUL); + tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); +} + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_muls2, +}; + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_out_arith(s, a0, a2, a3, ARITH_UMUL); + tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_mulu2, +}; + +static void tgen_muluh(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI); +} + +static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags) +{ + return (type == TCG_TYPE_I64 && use_vis3_instructions + ? C_O1_I2(r, r, r) : C_NotImplemented); +} + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_muluh, + .out_rrr = tgen_muluh, +}; + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_OR); +} + +static void tgen_ori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_OR); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_or, + .out_rri = tgen_ori, +}; + +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_ORN); +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_orc, +}; + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX; + tcg_out_arith(s, a0, a1, a2, insn); +} + +static void tgen_sari(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX; + uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_arithi(s, a0, a1, a2 & mask, insn); +} + +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_sar, + .out_rri = tgen_sari, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX; + tcg_out_arith(s, a0, a1, a2, insn); +} + +static void tgen_shli(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX; + uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_arithi(s, a0, a1, a2 & mask, insn); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_shl, + .out_rri = tgen_shli, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX; + tcg_out_arith(s, a0, a1, a2, insn); +} + +static void tgen_shri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX; + uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63; + tcg_out_arithi(s, a0, a1, a2 & mask, insn); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_shr, + .out_rri = tgen_shri, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_SUB); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static void tgen_subbo_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_SUBCC); +} + +static void tgen_subbo_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCC); +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_O1_I2(r, rz, rJ), + .out_rrr = tgen_subbo_rrr, + .out_rri = tgen_subbo_rri, +}; + +static void tgen_subbi_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + /* TODO: OSA 2015 added SUBXC */ + if (type == TCG_TYPE_I32) { + tcg_out_arith(s, a0, a1, a2, ARITH_SUBC); + } else { + tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */ + tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */ + /* Select the correct result based on actual borrow value. */ + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false); + } +} + +static void tgen_subbi_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_arithi(s, a0, a1, a2, ARITH_SUBC); + } else if (a2 != 0) { + tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */ + tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */ + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false); + } else if (a0 == a1) { + tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_SUB); + tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false); + } else { + tcg_out_arithi(s, a0, a1, 1, ARITH_SUB); + tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false); + } +} + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_O1_I2(r, rz, rJ), + .out_rrr = tgen_subbi_rrr, + .out_rri = tgen_subbi_rri, +}; + +static void tgen_subbio_rrr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type != TCG_TYPE_I32) { + /* TODO: OSA 2015 added SUBXCCC */ + tcg_out_dup_xcc_c(s); } + tcg_out_arith(s, a0, a1, a2, ARITH_SUBCCC); +} + +static void tgen_subbio_rri(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + if (type != TCG_TYPE_I32) { + tcg_out_dup_xcc_c(s); + } + tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCCC); +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_O1_I2(r, rz, rJ), + .out_rrr = tgen_subbio_rrr, + .out_rri = tgen_subbio_rri, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out_set_carry(s); /* borrow == carry */ +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_arith(s, a0, a1, a2, ARITH_XOR); +} + +static void tgen_xori(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2) +{ + tcg_out_arithi(s, a0, a1, a2, ARITH_XOR); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, rJ), + .out_rrr = tgen_xor, + .out_rri = tgen_xori, +}; + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_NotImplemented, +}; + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_sub(s, type, a0, TCG_REG_G0, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tgen_orc(s, type, a0, TCG_REG_G0, a1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + tcg_debug_assert(ofs + len == 32); + tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRL); +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extract, +}; + +static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len) +{ + tcg_debug_assert(ofs + len == 32); + tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRA); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_sextract, +}; + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDUB); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDSB); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDUH); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDSH); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDUW); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, dest, base, offset, LDSW); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; + +static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, data, base, offset, STB); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st8_r, +}; + +static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, data, base, offset, STH); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tgen_st16_r, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(rz, r), + .out_r = tcg_out_st, +}; + + +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) +{ + return C_NotImplemented; } static void tcg_target_init(TCGContext *s) { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + /* * Only probe for the platform and capabilities if we haven't already * determined maximum values at compile time. */ + use_popc_instructions = (hwcap & HWCAP_SPARC_POPC) != 0; #ifndef use_vis3_instructions - { - unsigned long hwcap = qemu_getauxval(AT_HWCAP); - use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; - } + use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; #endif tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h index a18906a14ec..1b9adccd85f 100644 --- a/tcg/sparc64/tcg-target.h +++ b/tcg/sparc64/tcg-target.h @@ -64,97 +64,7 @@ typedef enum { TCG_REG_I7, } TCGReg; -/* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_O6 - -#define TCG_TARGET_STACK_BIAS 2047 -#define TCG_TARGET_STACK_ALIGN 16 -#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS) -#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND -#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - -#if defined(__VIS__) && __VIS__ >= 0x300 -#define use_vis3_instructions 1 -#else -extern bool use_vis3_instructions; -#endif - -/* optional instructions */ -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 0 -#define TCG_TARGET_HAS_rot_i32 0 -#define TCG_TARGET_HAS_ext8s_i32 0 -#define TCG_TARGET_HAS_ext16s_i32 0 -#define TCG_TARGET_HAS_ext8u_i32 0 -#define TCG_TARGET_HAS_ext16u_i32 0 -#define TCG_TARGET_HAS_bswap16_i32 0 -#define TCG_TARGET_HAS_bswap32_i32 0 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_eqv_i32 0 -#define TCG_TARGET_HAS_nand_i32 0 -#define TCG_TARGET_HAS_nor_i32 0 -#define TCG_TARGET_HAS_clz_i32 0 -#define TCG_TARGET_HAS_ctz_i32 0 -#define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_deposit_i32 0 -#define TCG_TARGET_HAS_extract_i32 0 -#define TCG_TARGET_HAS_sextract_i32 0 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_negsetcond_i32 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 -#define TCG_TARGET_HAS_muls2_i32 1 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 0 -#define TCG_TARGET_HAS_rot_i64 0 -#define TCG_TARGET_HAS_ext8s_i64 0 -#define TCG_TARGET_HAS_ext16s_i64 0 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 0 -#define TCG_TARGET_HAS_ext16u_i64 0 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_bswap16_i64 0 -#define TCG_TARGET_HAS_bswap32_i64 0 -#define TCG_TARGET_HAS_bswap64_i64 0 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 0 -#define TCG_TARGET_HAS_nand_i64 0 -#define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 0 -#define TCG_TARGET_HAS_ctz_i64 0 -#define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_deposit_i64 0 -#define TCG_TARGET_HAS_extract_i64 0 -#define TCG_TARGET_HAS_sextract_i64 0 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_negsetcond_i64 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 0 -#define TCG_TARGET_HAS_muls2_i64 0 -#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions -#define TCG_TARGET_HAS_mulsh_i64 0 - -#define TCG_TARGET_HAS_qemu_ldst_i128 0 - -#define TCG_TARGET_HAS_tst 1 - -#define TCG_AREG0 TCG_REG_I0 - -#define TCG_TARGET_DEFAULT_MO (0) -#define TCG_TARGET_NEED_LDST_LABELS -#define TCG_TARGET_NEED_POOL_LABELS +#define TCG_AREG0 TCG_REG_I0 +#define TCG_REG_ZERO TCG_REG_G0 #endif diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c index 35e7616ae95..e98b3e5fddf 100644 --- a/tcg/tcg-common.c +++ b/tcg/tcg-common.c @@ -24,10 +24,11 @@ #include "qemu/osdep.h" #include "tcg/tcg.h" +#include "tcg-has.h" -TCGOpDef tcg_op_defs[] = { +const TCGOpDef tcg_op_defs[] = { #define DEF(s, oargs, iargs, cargs, flags) \ - { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, NULL }, + { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, #include "tcg/tcg-opc.h" #undef DEF }; diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h new file mode 100644 index 00000000000..2fc0e50d203 --- /dev/null +++ b/tcg/tcg-has.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2024 Linaro, Ltd. + */ + +#ifndef TCG_HAS_H +#define TCG_HAS_H + +#include "tcg-target-has.h" + +#if TCG_TARGET_REG_BITS == 32 +/* Turn some undef macros into false macros. */ +#define TCG_TARGET_HAS_extr_i64_i32 0 +#endif + +#if !defined(TCG_TARGET_HAS_v64) \ + && !defined(TCG_TARGET_HAS_v128) \ + && !defined(TCG_TARGET_HAS_v256) +#define TCG_TARGET_MAYBE_vec 0 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_nand_vec 0 +#define TCG_TARGET_HAS_nor_vec 0 +#define TCG_TARGET_HAS_eqv_vec 0 +#define TCG_TARGET_HAS_roti_vec 0 +#define TCG_TARGET_HAS_rots_vec 0 +#define TCG_TARGET_HAS_rotv_vec 0 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_mul_vec 0 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_tst_vec 0 +#else +#define TCG_TARGET_MAYBE_vec 1 +#endif +#ifndef TCG_TARGET_HAS_v64 +#define TCG_TARGET_HAS_v64 0 +#endif +#ifndef TCG_TARGET_HAS_v128 +#define TCG_TARGET_HAS_v128 0 +#endif +#ifndef TCG_TARGET_HAS_v256 +#define TCG_TARGET_HAS_v256 0 +#endif + +#endif diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 9b0d982f659..d6a12afe064 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -34,7 +34,7 @@ extern TCGContext **tcg_ctxs; extern unsigned int tcg_cur_ctxs; extern unsigned int tcg_max_ctxs; -void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus); +void tcg_region_init(size_t tb_size, int splitwx, unsigned max_threads); bool tcg_region_alloc(TCGContext *s); void tcg_region_initial_alloc(TCGContext *s); void tcg_region_prologue_set(TCGContext *s); @@ -92,15 +92,23 @@ TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind); */ TCGTemp *tcg_constant_internal(TCGType type, int64_t val); -void tcg_gen_op1(TCGOpcode, TCGArg); -void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg); -void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg); -void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg); -void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); -void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); +TCGOp *tcg_gen_op1(TCGOpcode, TCGType, TCGArg); +TCGOp *tcg_gen_op2(TCGOpcode, TCGType, TCGArg, TCGArg); +TCGOp *tcg_gen_op3(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg); +TCGOp *tcg_gen_op4(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg); +TCGOp *tcg_gen_op5(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg); +TCGOp *tcg_gen_op6(TCGOpcode, TCGType, TCGArg, TCGArg, + TCGArg, TCGArg, TCGArg, TCGArg); void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg); void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg); void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg); +void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, + TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e); + +TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, + TCGOpcode, TCGType, unsigned nargs); +TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, + TCGOpcode, TCGType, unsigned nargs); #endif /* TCG_INTERNAL_H */ diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc deleted file mode 100644 index ffada04af09..00000000000 --- a/tcg/tcg-ldst.c.inc +++ /dev/null @@ -1,65 +0,0 @@ -/* - * TCG Backend Data: load-store optimization only. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -/* - * Generate TB finalization at the end of block - */ - -static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); - -static int tcg_out_ldst_finalize(TCGContext *s) -{ - TCGLabelQemuLdst *lb; - - /* qemu_ld/st slow paths */ - QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) { - if (lb->is_ld - ? !tcg_out_qemu_ld_slow_path(s, lb) - : !tcg_out_qemu_st_slow_path(s, lb)) { - return -2; - } - - /* Test for (pending) buffer overflow. The assumption is that any - one operation beginning below the high water mark cannot overrun - the buffer completely. Thus we can test for overflow after - generating code without having to check during generation. */ - if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { - return -1; - } - } - return 0; -} - -/* - * Allocate a new TCGLabelQemuLdst entry. - */ - -static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s) -{ - TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l)); - - memset(l, 0, sizeof(*l)); - QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next); - - return l; -} diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index 0308732d9b5..2d184547ba0 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -23,6 +23,7 @@ #include "tcg/tcg-op-common.h" #include "tcg/tcg-op-gvec-common.h" #include "tcg/tcg-gvec-desc.h" +#include "tcg-has.h" #define MAX_UNROLL 4 @@ -56,30 +57,39 @@ static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs) tcg_debug_assert((ofs & max_align) == 0); } -/* Verify vector overlap rules for two operands. */ -static void check_overlap_2(uint32_t d, uint32_t a, uint32_t s) +/* + * Verify vector overlap rules for two operands. + * When dbase and abase are not the same pointer, we cannot check for + * overlap at compile-time, but the runtime restrictions remain. + */ +static void check_overlap_2(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, uint32_t s) { - tcg_debug_assert(d == a || d + s <= a || a + s <= d); + tcg_debug_assert(dbase != abase || d == a || d + s <= a || a + s <= d); } /* Verify vector overlap rules for three operands. */ -static void check_overlap_3(uint32_t d, uint32_t a, uint32_t b, uint32_t s) +static void check_overlap_3(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, + TCGv_ptr bbase, uint32_t b, uint32_t s) { - check_overlap_2(d, a, s); - check_overlap_2(d, b, s); - check_overlap_2(a, b, s); + check_overlap_2(dbase, d, abase, a, s); + check_overlap_2(dbase, d, bbase, b, s); + check_overlap_2(abase, a, bbase, b, s); } /* Verify vector overlap rules for four operands. */ -static void check_overlap_4(uint32_t d, uint32_t a, uint32_t b, - uint32_t c, uint32_t s) +static void check_overlap_4(TCGv_ptr dbase, uint32_t d, + TCGv_ptr abase, uint32_t a, + TCGv_ptr bbase, uint32_t b, + TCGv_ptr cbase, uint32_t c, uint32_t s) { - check_overlap_2(d, a, s); - check_overlap_2(d, b, s); - check_overlap_2(d, c, s); - check_overlap_2(a, b, s); - check_overlap_2(a, c, s); - check_overlap_2(b, c, s); + check_overlap_2(dbase, d, abase, a, s); + check_overlap_2(dbase, d, bbase, b, s); + check_overlap_2(dbase, d, cbase, c, s); + check_overlap_2(abase, a, bbase, b, s); + check_overlap_2(abase, a, cbase, c, s); + check_overlap_2(bbase, b, cbase, c, s); } /* Create a descriptor from components. */ @@ -88,7 +98,20 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) uint32_t desc = 0; check_size_align(oprsz, maxsz, 0); - tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); + + /* + * We want to check that 'data' will fit into SIMD_DATA_BITS. + * However, some callers want to treat the data as a signed + * value (which they can later get back with simd_data()) + * and some want to treat it as an unsigned value. + * So here we assert only that the data will fit into the + * field in at least one way. This means that some invalid + * values from the caller will not be detected, e.g. if the + * caller wants to handle the value as a signed integer but + * incorrectly passes us 1 << (SIMD_DATA_BITS - 1). + */ + tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) || + data == extract32(data, 0, SIMD_DATA_BITS)); oprsz = (oprsz / 8) - 1; maxsz = (maxsz / 8) - 1; @@ -110,9 +133,10 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) } /* Generate a call to a gvec-style helper with two vector operands. */ -void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz, int32_t data, - gen_helper_gvec_2 *fn) +static void expand_2_ool(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_2 *fn) { TCGv_ptr a0, a1; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); @@ -120,8 +144,8 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, a0 = tcg_temp_ebb_new_ptr(); a1 = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(a0, tcg_env, dofs); - tcg_gen_addi_ptr(a1, tcg_env, aofs); + tcg_gen_addi_ptr(a0, dbase, dofs); + tcg_gen_addi_ptr(a1, abase, aofs); fn(a0, a1, desc); @@ -129,6 +153,13 @@ void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, tcg_temp_free_ptr(a1); } +void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_2 *fn) +{ + expand_2_ool(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, data, fn); +} + /* Generate a call to a gvec-style helper with two vector operands and one scalar operand. */ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, @@ -151,9 +182,11 @@ void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c, } /* Generate a call to a gvec-style helper with three vector operands. */ -void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, - uint32_t oprsz, uint32_t maxsz, int32_t data, - gen_helper_gvec_3 *fn) +static void expand_3_ool(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, + int32_t data, gen_helper_gvec_3 *fn) { TCGv_ptr a0, a1, a2; TCGv_i32 desc = tcg_constant_i32(simd_desc(oprsz, maxsz, data)); @@ -162,9 +195,9 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, a1 = tcg_temp_ebb_new_ptr(); a2 = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(a0, tcg_env, dofs); - tcg_gen_addi_ptr(a1, tcg_env, aofs); - tcg_gen_addi_ptr(a2, tcg_env, bofs); + tcg_gen_addi_ptr(a0, dbase, dofs); + tcg_gen_addi_ptr(a1, abase, aofs); + tcg_gen_addi_ptr(a2, bbase, bofs); fn(a0, a1, a2, desc); @@ -173,6 +206,14 @@ void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_temp_free_ptr(a2); } +void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, int32_t data, + gen_helper_gvec_3 *fn) +{ + expand_3_ool(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz, data, fn); +} + /* Generate a call to a gvec-style helper with four vector operands. */ void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t oprsz, uint32_t maxsz, @@ -366,7 +407,7 @@ static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) return q <= MAX_UNROLL; } -static void expand_clr(uint32_t dofs, uint32_t maxsz); +static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz); /* Duplicate C as per VECE. */ uint64_t (dup_const)(unsigned vece, uint64_t c) @@ -469,8 +510,8 @@ static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece, return 0; } -static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, TCGv_vec t_vec) +static void do_dup_store(TCGType type, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, TCGv_vec t_vec) { uint32_t i = 0; @@ -482,7 +523,7 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, * are misaligned wrt the maximum vector size, so do that first. */ if (dofs & 8) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64); i += 8; } @@ -494,17 +535,17 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ for (; i + 32 <= oprsz; i += 32) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V256); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V256); } /* fallthru */ case TCG_TYPE_V128: for (; i + 16 <= oprsz; i += 16) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V128); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V128); } break; case TCG_TYPE_V64: for (; i < oprsz; i += 8) { - tcg_gen_stl_vec(t_vec, tcg_env, dofs + i, TCG_TYPE_V64); + tcg_gen_stl_vec(t_vec, dbase, dofs + i, TCG_TYPE_V64); } break; default: @@ -512,17 +553,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } -/* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C. +/* + * Set OPRSZ bytes at DBASE + DOFS to replications of IN_32, IN_64 or IN_C. * Only one of IN_32 or IN_64 may be set; * IN_C is used if IN_32 and IN_64 are unset. */ -static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, - uint32_t maxsz, TCGv_i32 in_32, TCGv_i64 in_64, - uint64_t in_c) +static void do_dup(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, + TCGv_i32 in_32, TCGv_i64 in_64, uint64_t in_c) { TCGType type; TCGv_i64 t_64; @@ -560,7 +602,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, } else { tcg_gen_dupi_vec(vece, t_vec, in_c); } - do_dup_store(type, dofs, oprsz, maxsz, t_vec); + do_dup_store(type, dbase, dofs, oprsz, maxsz, t_vec); return; } @@ -604,14 +646,14 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, /* Implement inline if we picked an implementation size above. */ if (t_32) { for (i = 0; i < oprsz; i += 4) { - tcg_gen_st_i32(t_32, tcg_env, dofs + i); + tcg_gen_st_i32(t_32, dbase, dofs + i); } tcg_temp_free_i32(t_32); goto done; } if (t_64) { for (i = 0; i < oprsz; i += 8) { - tcg_gen_st_i64(t_64, tcg_env, dofs + i); + tcg_gen_st_i64(t_64, dbase, dofs + i); } tcg_temp_free_i64(t_64); goto done; @@ -620,7 +662,7 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, /* Otherwise implement out of line. */ t_ptr = tcg_temp_ebb_new_ptr(); - tcg_gen_addi_ptr(t_ptr, tcg_env, dofs); + tcg_gen_addi_ptr(t_ptr, dbase, dofs); /* * This may be expand_clr for the tail of an operation, e.g. @@ -689,31 +731,32 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz, done: if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } /* Likewise, but with zero. */ -static void expand_clr(uint32_t dofs, uint32_t maxsz) +static void expand_clr(TCGv_ptr dbase, uint32_t dofs, uint32_t maxsz) { - do_dup(MO_8, dofs, maxsz, maxsz, NULL, NULL, 0); + do_dup(MO_8, dbase, dofs, maxsz, maxsz, NULL, NULL, 0); } /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */ -static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - bool load_dest, void (*fni)(TCGv_i32, TCGv_i32)) +static void expand_2_i32(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase, + uint32_t aofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { - tcg_gen_ld_i32(t0, tcg_env, aofs + i); + tcg_gen_ld_i32(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_i32(t1, tcg_env, dofs + i); + tcg_gen_ld_i32(t1, dbase, dofs + i); } fni(t1, t0); - tcg_gen_st_i32(t1, tcg_env, dofs + i); + tcg_gen_st_i32(t1, dbase, dofs + i); } tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); @@ -761,8 +804,10 @@ static void expand_2s_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ -static void expand_3_i32(uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, bool load_dest, +static void expand_3_i32(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, bool load_dest, void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t0 = tcg_temp_new_i32(); @@ -771,13 +816,13 @@ static void expand_3_i32(uint32_t dofs, uint32_t aofs, uint32_t i; for (i = 0; i < oprsz; i += 4) { - tcg_gen_ld_i32(t0, tcg_env, aofs + i); - tcg_gen_ld_i32(t1, tcg_env, bofs + i); + tcg_gen_ld_i32(t0, abase, aofs + i); + tcg_gen_ld_i32(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_i32(t2, tcg_env, dofs + i); + tcg_gen_ld_i32(t2, dbase, dofs + i); } fni(t2, t0, t1); - tcg_gen_st_i32(t2, tcg_env, dofs + i); + tcg_gen_st_i32(t2, dbase, dofs + i); } tcg_temp_free_i32(t2); tcg_temp_free_i32(t1); @@ -863,20 +908,21 @@ static void expand_4i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, } /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */ -static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, - bool load_dest, void (*fni)(TCGv_i64, TCGv_i64)) +static void expand_2_i64(TCGv_ptr dbase, uint32_t dofs, TCGv_ptr abase, + uint32_t aofs, uint32_t oprsz, bool load_dest, + void (*fni)(TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { - tcg_gen_ld_i64(t0, tcg_env, aofs + i); + tcg_gen_ld_i64(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_i64(t1, tcg_env, dofs + i); + tcg_gen_ld_i64(t1, dbase, dofs + i); } fni(t1, t0); - tcg_gen_st_i64(t1, tcg_env, dofs + i); + tcg_gen_st_i64(t1, dbase, dofs + i); } tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); @@ -924,8 +970,10 @@ static void expand_2s_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */ -static void expand_3_i64(uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, bool load_dest, +static void expand_3_i64(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, bool load_dest, void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t0 = tcg_temp_new_i64(); @@ -934,13 +982,13 @@ static void expand_3_i64(uint32_t dofs, uint32_t aofs, uint32_t i; for (i = 0; i < oprsz; i += 8) { - tcg_gen_ld_i64(t0, tcg_env, aofs + i); - tcg_gen_ld_i64(t1, tcg_env, bofs + i); + tcg_gen_ld_i64(t0, abase, aofs + i); + tcg_gen_ld_i64(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_i64(t2, tcg_env, dofs + i); + tcg_gen_ld_i64(t2, dbase, dofs + i); } fni(t2, t0, t1); - tcg_gen_st_i64(t2, tcg_env, dofs + i); + tcg_gen_st_i64(t2, dbase, dofs + i); } tcg_temp_free_i64(t2); tcg_temp_free_i64(t1); @@ -1026,7 +1074,8 @@ static void expand_4i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, } /* Expand OPSZ bytes worth of two-operand operations using host vectors. */ -static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, +static void expand_2_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool load_dest, void (*fni)(unsigned, TCGv_vec, TCGv_vec)) @@ -1035,12 +1084,12 @@ static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_vec t0 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type); - tcg_gen_ld_vec(t0, tcg_env, aofs + i); + tcg_gen_ld_vec(t0, abase, aofs + i); if (load_dest) { - tcg_gen_ld_vec(t1, tcg_env, dofs + i); + tcg_gen_ld_vec(t1, dbase, dofs + i); } fni(vece, t1, t0); - tcg_gen_st_vec(t1, tcg_env, dofs + i); + tcg_gen_st_vec(t1, dbase, dofs + i); } } @@ -1084,8 +1133,9 @@ static void expand_2s_vec(unsigned vece, uint32_t dofs, uint32_t aofs, } /* Expand OPSZ bytes worth of three-operand operations using host vectors. */ -static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, +static void expand_3_vec(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, bool load_dest, void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec)) { @@ -1094,13 +1144,13 @@ static void expand_3_vec(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_vec t1 = tcg_temp_new_vec(type); TCGv_vec t2 = tcg_temp_new_vec(type); - tcg_gen_ld_vec(t0, tcg_env, aofs + i); - tcg_gen_ld_vec(t1, tcg_env, bofs + i); + tcg_gen_ld_vec(t0, abase, aofs + i); + tcg_gen_ld_vec(t1, bbase, bofs + i); if (load_dest) { - tcg_gen_ld_vec(t2, tcg_env, dofs + i); + tcg_gen_ld_vec(t2, dbase, dofs + i); } fni(vece, t2, t0, t1); - tcg_gen_st_vec(t2, tcg_env, dofs + i); + tcg_gen_st_vec(t2, dbase, dofs + i); } } @@ -1182,8 +1232,9 @@ static void expand_4i_vec(unsigned vece, uint32_t dofs, uint32_t aofs, } /* Expand a vector two-operand operation. */ -void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) +void tcg_gen_gvec_2_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) { const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); @@ -1191,7 +1242,7 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(dbase, dofs, abase, aofs, maxsz); type = 0; if (g->fniv) { @@ -1204,8 +1255,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); - expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, some, 32, + TCG_TYPE_V256, g->load_dest, g->fniv); if (some == oprsz) { break; } @@ -1215,22 +1266,25 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, maxsz -= some; /* fallthru */ case TCG_TYPE_V128: - expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 16, + TCG_TYPE_V128, g->load_dest, g->fniv); break; case TCG_TYPE_V64: - expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, - g->load_dest, g->fniv); + expand_2_vec(g->vece, dbase, dofs, abase, aofs, oprsz, 8, + TCG_TYPE_V64, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { - expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8); + expand_2_i64(dbase, dofs, abase, aofs, + oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { - expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4); + expand_2_i32(dbase, dofs, abase, aofs, + oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); - tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno); + expand_2_ool(dbase, dofs, abase, aofs, + oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; @@ -1241,10 +1295,16 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } +void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen2 *g) +{ + tcg_gen_gvec_2_var(tcg_env, dofs, tcg_env, aofs, oprsz, maxsz, g); +} + /* Expand a vector operation with two vectors and an immediate. */ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t maxsz, int64_t c, const GVecGen2i *g) @@ -1255,7 +1315,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); type = 0; if (g->fniv) { @@ -1310,7 +1370,7 @@ void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1321,7 +1381,7 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz, TCGType type; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); type = 0; if (g->fniv) { @@ -1387,13 +1447,15 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } /* Expand a vector three-operand operation. */ -void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, - uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) +void tcg_gen_gvec_3_var(TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) { const TCGOpcode *this_list = g->opt_opc ? : vecop_list_empty; const TCGOpcode *hold_list = tcg_swap_vecop_list(this_list); @@ -1401,7 +1463,7 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(dbase, dofs, abase, aofs, bbase, bofs, maxsz); type = 0; if (g->fniv) { @@ -1414,8 +1476,8 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, * that e.g. size == 80 would be expanded with 2x32 + 1x16. */ some = QEMU_ALIGN_DOWN(oprsz, 32); - expand_3_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + some, 32, TCG_TYPE_V256, g->load_dest, g->fniv); if (some == oprsz) { break; } @@ -1426,23 +1488,25 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, maxsz -= some; /* fallthru */ case TCG_TYPE_V128: - expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + oprsz, 16, TCG_TYPE_V128, g->load_dest, g->fniv); break; case TCG_TYPE_V64: - expand_3_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, - g->load_dest, g->fniv); + expand_3_vec(g->vece, dbase, dofs, abase, aofs, bbase, bofs, + oprsz, 8, TCG_TYPE_V64, g->load_dest, g->fniv); break; case 0: if (g->fni8 && check_size_impl(oprsz, 8)) { - expand_3_i64(dofs, aofs, bofs, oprsz, g->load_dest, g->fni8); + expand_3_i64(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, g->load_dest, g->fni8); } else if (g->fni4 && check_size_impl(oprsz, 4)) { - expand_3_i32(dofs, aofs, bofs, oprsz, g->load_dest, g->fni4); + expand_3_i32(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, g->load_dest, g->fni4); } else { assert(g->fno != NULL); - tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, - maxsz, g->data, g->fno); + expand_3_ool(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, g->data, g->fno); oprsz = maxsz; } break; @@ -1453,10 +1517,17 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } } +void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz, const GVecGen3 *g) +{ + tcg_gen_gvec_3_var(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz, g); +} + /* Expand a vector operation with three vectors and an immediate. */ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz, int64_t c, @@ -1468,7 +1539,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz); type = 0; if (g->fniv) { @@ -1522,7 +1593,7 @@ void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1536,7 +1607,8 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); - check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + check_overlap_4(tcg_env, dofs, tcg_env, aofs, + tcg_env, bofs, tcg_env, cofs, maxsz); type = 0; if (g->fniv) { @@ -1591,7 +1663,7 @@ void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1606,7 +1678,8 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs | cofs); - check_overlap_4(dofs, aofs, bofs, cofs, maxsz); + check_overlap_4(tcg_env, dofs, tcg_env, aofs, + tcg_env, bofs, tcg_env, cofs, maxsz); type = 0; if (g->fniv) { @@ -1660,7 +1733,7 @@ void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -1673,8 +1746,9 @@ static void vec_mov2(unsigned vece, TCGv_vec a, TCGv_vec b) tcg_gen_mov_vec(a, b); } -void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_mov_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen2 g = { .fni8 = tcg_gen_mov_i64, @@ -1682,14 +1756,22 @@ void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, .fno = gen_helper_gvec_mov, .prefer_i64 = TCG_TARGET_REG_BITS == 64, }; - if (dofs != aofs) { - tcg_gen_gvec_2(dofs, aofs, oprsz, maxsz, &g); - } else { + + if (dofs == aofs && dbase == abase) { check_size_align(oprsz, maxsz, dofs); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(dbase, dofs + oprsz, maxsz - oprsz); } + return; } + + tcg_gen_gvec_2_var(dbase, dofs, abase, aofs, oprsz, maxsz, &g); +} + +void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_mov_var(vece, tcg_env, dofs, tcg_env, aofs, oprsz, maxsz); } void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz, @@ -1697,7 +1779,7 @@ void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t oprsz, { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_32); - do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0); } void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz, @@ -1705,7 +1787,7 @@ void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t oprsz, { check_size_align(oprsz, maxsz, dofs); tcg_debug_assert(vece <= MO_64); - do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0); } void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -1717,7 +1799,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, if (type != 0) { TCGv_vec t_vec = tcg_temp_new_vec(type); tcg_gen_dup_mem_vec(vece, t_vec, tcg_env, aofs); - do_dup_store(type, dofs, oprsz, maxsz, t_vec); + do_dup_store(type, tcg_env, dofs, oprsz, maxsz, t_vec); } else if (vece <= MO_32) { TCGv_i32 in = tcg_temp_ebb_new_i32(); switch (vece) { @@ -1731,12 +1813,12 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_gen_ld_i32(in, tcg_env, aofs); break; } - do_dup(vece, dofs, oprsz, maxsz, in, NULL, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, in, NULL, 0); tcg_temp_free_i32(in); } else { TCGv_i64 in = tcg_temp_ebb_new_i64(); tcg_gen_ld_i64(in, tcg_env, aofs); - do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0); + do_dup(vece, tcg_env, dofs, oprsz, maxsz, NULL, in, 0); tcg_temp_free_i64(in); } } else if (vece == 4) { @@ -1765,7 +1847,7 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_temp_free_i64(in1); } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } else if (vece == 5) { /* 256-bit duplicate. */ @@ -1808,18 +1890,24 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs, } } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } else { g_assert_not_reached(); } } +void tcg_gen_gvec_dup_imm_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + uint32_t oprsz, uint32_t maxsz, uint64_t x) +{ + check_size_align(oprsz, maxsz, dofs); + do_dup(vece, dbase, dofs, oprsz, maxsz, NULL, NULL, x); +} + void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz, uint32_t maxsz, uint64_t x) { - check_size_align(oprsz, maxsz, dofs); - do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x); + tcg_gen_gvec_dup_imm_var(vece, tcg_env, dofs, oprsz, maxsz, x); } void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -1917,8 +2005,10 @@ void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) static const TCGOpcode vecop_list_add[] = { INDEX_op_add_vec, 0 }; -void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_add_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_add8_i64, @@ -1945,7 +2035,15 @@ void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, }; tcg_debug_assert(vece <= MO_64); - tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); + tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_add_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz); } void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs, @@ -2098,8 +2196,10 @@ void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_temp_free_i64(t2); } -void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, - uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +void tcg_gen_gvec_sub_var(unsigned vece, TCGv_ptr dbase, uint32_t dofs, + TCGv_ptr abase, uint32_t aofs, + TCGv_ptr bbase, uint32_t bofs, + uint32_t oprsz, uint32_t maxsz) { static const GVecGen3 g[4] = { { .fni8 = tcg_gen_vec_sub8_i64, @@ -2126,7 +2226,15 @@ void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, }; tcg_debug_assert(vece <= MO_64); - tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g[vece]); + tcg_gen_gvec_3_var(dbase, dofs, abase, aofs, bbase, bofs, + oprsz, maxsz, &g[vece]); +} + +void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs, + uint32_t bofs, uint32_t oprsz, uint32_t maxsz) +{ + tcg_gen_gvec_sub_var(vece, tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, + oprsz, maxsz); } static const TCGOpcode vecop_list_mul[] = { INDEX_op_mul_vec, 0 }; @@ -3135,7 +3243,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); /* If the backend has a scalar expansion, great. */ type = choose_vector_type(g->s_list, vece, oprsz, vece == MO_64); @@ -3241,7 +3349,7 @@ do_gvec_shifts(unsigned vece, uint32_t dofs, uint32_t aofs, TCGv_i32 shift, clear_tail: if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -3755,10 +3863,10 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t some; check_size_align(oprsz, maxsz, dofs | aofs | bofs); - check_overlap_3(dofs, aofs, bofs, maxsz); + check_overlap_3(tcg_env, dofs, tcg_env, aofs, tcg_env, bofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { - do_dup(MO_8, dofs, oprsz, maxsz, + do_dup(MO_8, tcg_env, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } @@ -3820,7 +3928,7 @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, tcg_swap_vecop_list(hold_list); if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } @@ -3875,10 +3983,10 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, TCGType type; check_size_align(oprsz, maxsz, dofs | aofs); - check_overlap_2(dofs, aofs, maxsz); + check_overlap_2(tcg_env, dofs, tcg_env, aofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { - do_dup(MO_8, dofs, oprsz, maxsz, + do_dup(MO_8, tcg_env, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } @@ -3939,7 +4047,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t i; tcg_gen_extrl_i64_i32(t1, c); - for (i = 0; i < oprsz; i += 8) { + for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(t0, tcg_env, aofs + i); tcg_gen_negsetcond_i32(cond, t0, t0, t1); tcg_gen_st_i32(t0, tcg_env, dofs + i); @@ -3961,7 +4069,7 @@ void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs, } if (oprsz < maxsz) { - expand_clr(dofs + oprsz, maxsz - oprsz); + expand_clr(tcg_env, dofs + oprsz, maxsz - oprsz); } } diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 85101602581..548496002d7 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -27,25 +27,27 @@ #include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op-common.h" #include "tcg/tcg-mo.h" +#include "exec/target_page.h" #include "exec/translation-block.h" #include "exec/plugin-gen.h" #include "tcg-internal.h" - +#include "tcg-has.h" +#include "tcg-target-mo.h" static void check_max_alignment(unsigned a_bits) { /* * The requested alignment cannot overlap the TLB flags. - * FIXME: Must keep the count up-to-date with "exec/cpu-all.h". + * FIXME: Must keep the count up-to-date with "exec/tlb-flags.h". */ if (tcg_use_softmmu) { - tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits); + tcg_debug_assert(a_bits + 5 <= TARGET_PAGE_BITS); } } static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { - unsigned a_bits = get_alignment_bits(op); + unsigned a_bits = memop_alignment_bits(op); check_max_alignment(a_bits); @@ -87,37 +89,40 @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) return op; } -static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh, - TCGTemp *addr, MemOpIdx oi) +static void gen_ldst1(TCGOpcode opc, TCGType type, TCGTemp *v, + TCGTemp *addr, MemOpIdx oi) { - if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) { - if (vh) { - tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi); - } else { - tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi); - } - } else { - /* See TCGV_LOW/HIGH. */ - TCGTemp *al = addr + HOST_BIG_ENDIAN; - TCGTemp *ah = addr + !HOST_BIG_ENDIAN; + TCGOp *op = tcg_gen_op3(opc, type, temp_arg(v), temp_arg(addr), oi); + TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE; +} - if (vh) { - tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh), - temp_arg(al), temp_arg(ah), oi); - } else { - tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi); - } +static void gen_ldst2(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh, + TCGTemp *addr, MemOpIdx oi) +{ + TCGOp *op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh), + temp_arg(addr), oi); + TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE; +} + +static void gen_ld_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi) +{ + if (TCG_TARGET_REG_BITS == 32) { + gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I64, + tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)), + addr, oi); + } else { + gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi); } } -static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi) +static void gen_st_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi) { if (TCG_TARGET_REG_BITS == 32) { - TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v)); - TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v)); - gen_ldst(opc, vl, vh, addr, oi); + gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I64, + tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)), + addr, oi); } else { - gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi); + gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi); } } @@ -148,11 +153,11 @@ static TCGv_i64 plugin_maybe_preserve_addr(TCGTemp *addr) return NULL; } +#ifdef CONFIG_PLUGIN static void plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { -#ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); @@ -172,6 +177,54 @@ plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi, } } } +} +#endif + +static void +plugin_gen_mem_callbacks_i32(TCGv_i32 val, + TCGv_i64 copy_addr, TCGTemp *orig_addr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + tcg_gen_st_i32(val, tcg_env, + offsetof(CPUState, neg.plugin_mem_value_low) - + sizeof(CPUState) + (HOST_BIG_ENDIAN * 4)); + plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw); + } +#endif +} + +static void +plugin_gen_mem_callbacks_i64(TCGv_i64 val, + TCGv_i64 copy_addr, TCGTemp *orig_addr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + tcg_gen_st_i64(val, tcg_env, + offsetof(CPUState, neg.plugin_mem_value_low) - + sizeof(CPUState)); + plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw); + } +#endif +} + +static void +plugin_gen_mem_callbacks_i128(TCGv_i128 val, + TCGv_i64 copy_addr, TCGTemp *orig_addr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn != NULL) { + tcg_gen_st_i64(TCGV128_LOW(val), tcg_env, + offsetof(CPUState, neg.plugin_mem_value_low) - + sizeof(CPUState)); + tcg_gen_st_i64(TCGV128_HIGH(val), tcg_env, + offsetof(CPUState, neg.plugin_mem_value_high) - + sizeof(CPUState)); + plugin_gen_mem_callbacks(copy_addr, orig_addr, oi, rw); + } #endif } @@ -181,7 +234,6 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; - TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); @@ -197,13 +249,9 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_ld_a32_i32; - } else { - opc = INDEX_op_qemu_ld_a64_i32; - } - gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); - plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); + gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi); + plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi, + QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -234,7 +282,6 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, { TCGv_i32 swap = NULL; MemOpIdx orig_oi, oi; - TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); @@ -257,21 +304,8 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_st8_a32_i32; - } else { - opc = INDEX_op_qemu_st8_a64_i32; - } - } else { - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_st_a32_i32; - } else { - opc = INDEX_op_qemu_st_a64_i32; - } - } - gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); - plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); + gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi); + plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); @@ -292,7 +326,6 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; - TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -318,13 +351,9 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_ld_a32_i64; - } else { - opc = INDEX_op_qemu_ld_a64_i64; - } - gen_ldst_i64(opc, val, addr, oi); - plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); + gen_ld_i64(val, addr, oi); + plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi, + QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN @@ -359,7 +388,6 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, { TCGv_i64 swap = NULL; MemOpIdx orig_oi, oi; - TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -390,13 +418,8 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_st_a32_i64; - } else { - opc = INDEX_op_qemu_st_a64_i64; - } - gen_ldst_i64(opc, val, addr, oi); - plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); + gen_st_i64(val, addr, oi); + plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); @@ -507,9 +530,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, { MemOpIdx orig_oi; TCGv_i64 ext_addr = NULL; - TCGOpcode opc; - check_max_alignment(get_alignment_bits(memop)); + check_max_alignment(memop_alignment_bits(memop)); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); /* In serial mode, reduce atomicity. */ @@ -535,12 +557,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_ld_a32_i128; - } else { - opc = INDEX_op_qemu_ld_a64_i128; - } - gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); + gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo), + tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_gen_bswap64_i64(lo, lo); @@ -555,12 +573,6 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); need_bswap = (mop[0] ^ memop) & MO_BSWAP; - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_ld_a32_i64; - } else { - opc = INDEX_op_qemu_ld_a64_i64; - } - /* * Since there are no global TCGv_i128, there is no visible state * changed if the second load faults. Load directly into the two @@ -574,7 +586,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, y = TCGV128_LOW(val); } - gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); + gen_ld_i64(x, addr, make_memop_idx(mop[0], idx)); if (need_bswap) { tcg_gen_bswap64_i64(x, x); @@ -590,7 +602,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, addr_p8 = tcgv_i64_temp(t); } - gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); + gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_internal(addr_p8); if (need_bswap) { @@ -606,7 +618,8 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, tcg_constant_i32(orig_oi)); } - plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); + plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi, + QEMU_PLUGIN_MEM_R); } void tcg_gen_qemu_ld_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, @@ -623,9 +636,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, { MemOpIdx orig_oi; TCGv_i64 ext_addr = NULL; - TCGOpcode opc; - check_max_alignment(get_alignment_bits(memop)); + check_max_alignment(memop_alignment_bits(memop)); tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); /* In serial mode, reduce atomicity. */ @@ -654,12 +666,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_st_a32_i128; - } else { - opc = INDEX_op_qemu_st_a64_i128; - } - gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); + gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128, + tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_temp_free_i64(lo); @@ -672,12 +680,6 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); - if (tcg_ctx->addr_type == TCG_TYPE_I32) { - opc = INDEX_op_qemu_st_a32_i64; - } else { - opc = INDEX_op_qemu_st_a64_i64; - } - if ((memop & MO_BSWAP) == MO_LE) { x = TCGV128_LOW(val); y = TCGV128_HIGH(val); @@ -692,7 +694,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, x = b; } - gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); + gen_st_i64(x, addr, make_memop_idx(mop[0], idx)); if (tcg_ctx->addr_type == TCG_TYPE_I32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); @@ -706,10 +708,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, if (b) { tcg_gen_bswap64_i64(b, y); - gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx)); + gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_i64(b); } else { - gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); + gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx)); } tcg_temp_free_internal(addr_p8); } else { @@ -722,7 +724,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, tcg_constant_i32(orig_oi)); } - plugin_gen_mem_callbacks(ext_addr, addr, orig_oi, QEMU_PLUGIN_MEM_W); + plugin_gen_mem_callbacks_i128(val, ext_addr, addr, orig_oi, + QEMU_PLUGIN_MEM_W); } void tcg_gen_qemu_st_i128_chk(TCGv_i128 val, TCGTemp *addr, TCGArg idx, diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index 84af210bc0b..893d68e7d80 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -23,6 +23,7 @@ #include "tcg/tcg-op-common.h" #include "tcg/tcg-mo.h" #include "tcg-internal.h" +#include "tcg-has.h" /* * Vector optional opcode tracking. @@ -143,7 +144,7 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list, void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a) { TCGOp *op = tcg_emit_op(opc, 2); - TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_TYPE(op) = type; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; @@ -153,7 +154,7 @@ void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a, TCGArg b) { TCGOp *op = tcg_emit_op(opc, 3); - TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_TYPE(op) = type; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; @@ -164,7 +165,7 @@ void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a, TCGArg b, TCGArg c) { TCGOp *op = tcg_emit_op(opc, 4); - TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_TYPE(op) = type; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; @@ -172,11 +173,11 @@ void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece, op->args[3] = c; } -static void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, - TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e) +void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, + TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e) { TCGOp *op = tcg_emit_op(opc, 6); - TCGOP_VECL(op) = type - TCG_TYPE_V64; + TCGOP_TYPE(op) = type; TCGOP_VECE(op) = vece; op->args[0] = r; op->args[1] = a; diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index eff37286220..dfa5c387284 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -29,7 +29,7 @@ #include "exec/translation-block.h" #include "exec/plugin-gen.h" #include "tcg-internal.h" - +#include "tcg-has.h" /* * Encourage the compiler to tail-call to a function, rather than inlining. @@ -37,57 +37,71 @@ */ #define NI __attribute__((noinline)) -void NI tcg_gen_op1(TCGOpcode opc, TCGArg a1) +TCGOp * NI tcg_gen_op1(TCGOpcode opc, TCGType type, TCGArg a1) { TCGOp *op = tcg_emit_op(opc, 1); + TCGOP_TYPE(op) = type; op->args[0] = a1; + return op; } -void NI tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2) +TCGOp * NI tcg_gen_op2(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2) { TCGOp *op = tcg_emit_op(opc, 2); + TCGOP_TYPE(op) = type; op->args[0] = a1; op->args[1] = a2; + return op; } -void NI tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3) +TCGOp * NI tcg_gen_op3(TCGOpcode opc, TCGType type, TCGArg a1, + TCGArg a2, TCGArg a3) { TCGOp *op = tcg_emit_op(opc, 3); + TCGOP_TYPE(op) = type; op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; + return op; } -void NI tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4) +TCGOp * NI tcg_gen_op4(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2, + TCGArg a3, TCGArg a4) { TCGOp *op = tcg_emit_op(opc, 4); + TCGOP_TYPE(op) = type; op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; + return op; } -void NI tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, - TCGArg a4, TCGArg a5) +TCGOp * NI tcg_gen_op5(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2, + TCGArg a3, TCGArg a4, TCGArg a5) { TCGOp *op = tcg_emit_op(opc, 5); + TCGOP_TYPE(op) = type; op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; + return op; } -void NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, - TCGArg a4, TCGArg a5, TCGArg a6) +TCGOp * NI tcg_gen_op6(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2, + TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6) { TCGOp *op = tcg_emit_op(opc, 6); + TCGOP_TYPE(op) = type; op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; op->args[5] = a6; + return op; } /* @@ -100,158 +114,146 @@ void NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, # define DNI #endif -static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1) +static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGType type, TCGv_i32 a1) { - tcg_gen_op1(opc, tcgv_i32_arg(a1)); + tcg_gen_op1(opc, type, tcgv_i32_arg(a1)); } -static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1) +static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGType type, TCGv_i64 a1) { - tcg_gen_op1(opc, tcgv_i64_arg(a1)); + tcg_gen_op1(opc, type, tcgv_i64_arg(a1)); } -static void DNI tcg_gen_op1i(TCGOpcode opc, TCGArg a1) +static TCGOp * DNI tcg_gen_op1i(TCGOpcode opc, TCGType type, TCGArg a1) { - tcg_gen_op1(opc, a1); + return tcg_gen_op1(opc, type, a1); } static void DNI tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2) { - tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2)); + tcg_gen_op2(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2)); } static void DNI tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2) { - tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2)); + tcg_gen_op2(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2)); } static void DNI tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3) { - tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3)); + tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), + tcgv_i32_arg(a2), tcgv_i32_arg(a3)); } static void DNI tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3) { - tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3)); + tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), + tcgv_i64_arg(a2), tcgv_i64_arg(a3)); } static void DNI tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGArg a3) { - tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3); + tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3); } static void DNI tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGArg a3) { - tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3); + tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3); } static void DNI tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val, TCGv_ptr base, TCGArg offset) { - tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset); + tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(val), + tcgv_ptr_arg(base), offset); } static void DNI tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val, TCGv_ptr base, TCGArg offset) { - tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset); + tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(val), + tcgv_ptr_arg(base), offset); } static void DNI tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4) { - tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), tcgv_i32_arg(a4)); } static void DNI tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4) { - tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), + tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), tcgv_i64_arg(a4)); } static void DNI tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4) { - tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), a4); } static void DNI tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4) { - tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), + tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), a4); } -static void DNI tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, - TCGArg a3, TCGArg a4) +static TCGOp * DNI tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGArg a3, TCGArg a4) { - tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4); + return tcg_gen_op4(opc, TCG_TYPE_I32, + tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4); } -static void DNI tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, - TCGArg a3, TCGArg a4) +static TCGOp * DNI tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, + TCGArg a3, TCGArg a4) { - tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4); + return tcg_gen_op4(opc, TCG_TYPE_I64, + tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4); } static void DNI tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5) { - tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5)); } static void DNI tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5) { - tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), + tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5)); } static void DNI tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGArg a4, TCGArg a5) { - tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), a4, a5); } static void DNI tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGArg a4, TCGArg a5) { - tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), + tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), a4, a5); } -static void DNI tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, - TCGv_i32 a3, TCGv_i32 a4, - TCGv_i32 a5, TCGv_i32 a6) -{ - tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), - tcgv_i32_arg(a6)); -} - -static void DNI tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, - TCGv_i64 a3, TCGv_i64 a4, - TCGv_i64 a5, TCGv_i64 a6) -{ - tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), - tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), - tcgv_i64_arg(a6)); -} - static void DNI tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5, TCGArg a6) { - tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6); } @@ -259,16 +261,16 @@ static void DNI tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2, TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5, TCGArg a6) { - tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), + tcg_gen_op6(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6); } -static void DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, - TCGv_i32 a3, TCGv_i32 a4, - TCGArg a5, TCGArg a6) +static TCGOp * DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, + TCGv_i32 a3, TCGv_i32 a4, + TCGArg a5, TCGArg a6) { - tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), - tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6); + return tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), + tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6); } /* Generic ops. */ @@ -276,21 +278,20 @@ static void DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2, void gen_set_label(TCGLabel *l) { l->present = 1; - tcg_gen_op1(INDEX_op_set_label, label_arg(l)); + tcg_gen_op1(INDEX_op_set_label, 0, label_arg(l)); } -static void add_last_as_label_use(TCGLabel *l) +static void add_as_label_use(TCGLabel *l, TCGOp *op) { TCGLabelUse *u = tcg_malloc(sizeof(TCGLabelUse)); - u->op = tcg_last_op(); + u->op = op; QSIMPLEQ_INSERT_TAIL(&l->branches, u, next); } void tcg_gen_br(TCGLabel *l) { - tcg_gen_op1(INDEX_op_br, label_arg(l)); - add_last_as_label_use(l); + add_as_label_use(l, tcg_gen_op1(INDEX_op_br, 0, label_arg(l))); } void tcg_gen_mb(TCGBar mb_type) @@ -308,31 +309,31 @@ void tcg_gen_mb(TCGBar mb_type) #endif if (parallel) { - tcg_gen_op1(INDEX_op_mb, mb_type); + tcg_gen_op1(INDEX_op_mb, 0, mb_type); } } void tcg_gen_plugin_cb(unsigned from) { - tcg_gen_op1(INDEX_op_plugin_cb, from); + tcg_gen_op1(INDEX_op_plugin_cb, 0, from); } void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo) { - tcg_gen_op2(INDEX_op_plugin_mem_cb, tcgv_i64_arg(addr), meminfo); + tcg_gen_op2(INDEX_op_plugin_mem_cb, 0, tcgv_i64_arg(addr), meminfo); } /* 32 bit ops */ void tcg_gen_discard_i32(TCGv_i32 arg) { - tcg_gen_op1_i32(INDEX_op_discard, arg); + tcg_gen_op1_i32(INDEX_op_discard, TCG_TYPE_I32, arg); } void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) { if (ret != arg) { - tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg); + tcg_gen_op2_i32(INDEX_op_mov, ret, arg); } } @@ -343,7 +344,7 @@ void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_add, ret, arg1, arg2); } void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -358,7 +359,7 @@ void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_sub, ret, arg1, arg2); } void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) @@ -377,12 +378,12 @@ void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg) { - tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg); + tcg_gen_op2_i32(INDEX_op_neg, ret, arg); } void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_and, ret, arg1, arg2); } void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -395,17 +396,19 @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) case -1: tcg_gen_mov_i32(ret, arg1); return; - case 0xff: - /* Don't recurse with tcg_gen_ext8u_i32. */ - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1); - return; - } - break; - case 0xffff: - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1); - return; + default: + /* + * Canonicalize on extract, if valid. This aids x86 with its + * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode + * which does not require matching operands. Other backends can + * trivially expand the extract to AND during code generation. + */ + if (!(arg2 & (arg2 + 1))) { + unsigned len = ctz32(~arg2); + if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) { + tcg_gen_extract_i32(ret, arg1, 0, len); + return; + } } break; } @@ -415,7 +418,7 @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_or, ret, arg1, arg2); } void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -432,7 +435,7 @@ void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_xor, ret, arg1, arg2); } void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -440,9 +443,10 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); - } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { + } else if (arg2 == -1 && + tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) { /* Don't recurse with tcg_gen_not_i32. */ - tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1); + tcg_gen_op2_i32(INDEX_op_not, ret, arg1); } else { tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2)); } @@ -450,8 +454,8 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_not_i32) { - tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg); + if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) { + tcg_gen_op2_i32(INDEX_op_not, ret, arg); } else { tcg_gen_xori_i32(ret, arg, -1); } @@ -459,7 +463,7 @@ void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_shl, ret, arg1, arg2); } void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -474,7 +478,7 @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_shr, ret, arg1, arg2); } void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -489,7 +493,7 @@ void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_sar, ret, arg1, arg2); } void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -507,8 +511,9 @@ void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); - add_last_as_label_use(l); + TCGOp *op = tcg_gen_op4ii_i32(INDEX_op_brcond, + arg1, arg2, cond, label_arg(l)); + add_as_label_use(l, op); } } @@ -529,7 +534,7 @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i32(ret, 0); } else { - tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond); + tcg_gen_op4i_i32(INDEX_op_setcond, ret, arg1, arg2, cond); } } @@ -546,11 +551,8 @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret, tcg_gen_movi_i32(ret, -1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i32(ret, 0); - } else if (TCG_TARGET_HAS_negsetcond_i32) { - tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond); } else { - tcg_gen_setcond_i32(cond, ret, arg1, arg2); - tcg_gen_neg_i32(ret, ret); + tcg_gen_op4i_i32(INDEX_op_negsetcond, ret, arg1, arg2, cond); } } @@ -562,7 +564,7 @@ void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret, void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_mul, ret, arg1, arg2); } void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) @@ -578,12 +580,12 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_div_i32) { - tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i32) { + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); - tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); + tcg_gen_op5_i32(INDEX_op_divs2, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_div_i32(ret, arg1, arg2); @@ -592,18 +594,18 @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_rem_i32) { - tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i32) { + if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_rems, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); - tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); - } else if (TCG_TARGET_HAS_div2_i32) { + } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); - tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); + tcg_gen_op5_i32(INDEX_op_divs2, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_rem_i32(ret, arg1, arg2); @@ -612,12 +614,12 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_div_i32) { - tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i32) { + if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 zero = tcg_constant_i32(0); - tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2); + tcg_gen_op5_i32(INDEX_op_divu2, ret, t0, arg1, zero, arg2); tcg_temp_free_i32(t0); } else { gen_helper_divu_i32(ret, arg1, arg2); @@ -626,18 +628,18 @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_rem_i32) { - tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i32) { + if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_remu, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); - tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); - } else if (TCG_TARGET_HAS_div2_i32) { + } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 zero = tcg_constant_i32(0); - tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2); + tcg_gen_op5_i32(INDEX_op_divu2, t0, ret, arg1, zero, arg2); tcg_temp_free_i32(t0); } else { gen_helper_remu_i32(ret, arg1, arg2); @@ -646,8 +648,8 @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_andc_i32) { - tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_andc, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); @@ -658,8 +660,8 @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_eqv_i32) { - tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_eqv, ret, arg1, arg2); } else { tcg_gen_xor_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); @@ -668,8 +670,8 @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_nand_i32) { - tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_nand, ret, arg1, arg2); } else { tcg_gen_and_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); @@ -678,8 +680,8 @@ void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_nor_i32) { - tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_nor, ret, arg1, arg2); } else { tcg_gen_or_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); @@ -688,8 +690,8 @@ void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_orc_i32) { - tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_orc, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); @@ -700,9 +702,9 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_clz_i32) { - tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_clz_i64) { + if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_clz, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) { TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); @@ -725,9 +727,13 @@ void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_ctz_i32) { - tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_ctz_i64) { + TCGv_i32 z, t; + + if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_ctz, ret, arg1, arg2); + return; + } + if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) { TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); @@ -736,34 +742,34 @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) tcg_gen_extrl_i64_i32(ret, t1); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); - } else if (TCG_TARGET_HAS_ctpop_i32 - || TCG_TARGET_HAS_ctpop_i64 - || TCG_TARGET_HAS_clz_i32 - || TCG_TARGET_HAS_clz_i64) { - TCGv_i32 z, t = tcg_temp_ebb_new_i32(); - - if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { - tcg_gen_subi_i32(t, arg1, 1); - tcg_gen_andc_i32(t, t, arg1); - tcg_gen_ctpop_i32(t, t); - } else { - /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */ - tcg_gen_neg_i32(t, arg1); - tcg_gen_and_i32(t, t, arg1); - tcg_gen_clzi_i32(t, t, 32); - tcg_gen_xori_i32(t, t, 31); - } - z = tcg_constant_i32(0); - tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); - tcg_temp_free_i32(t); + return; + } + if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) { + t = tcg_temp_ebb_new_i32(); + tcg_gen_subi_i32(t, arg1, 1); + tcg_gen_andc_i32(t, t, arg1); + tcg_gen_ctpop_i32(t, t); + } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) { + t = tcg_temp_ebb_new_i32(); + tcg_gen_neg_i32(t, arg1); + tcg_gen_and_i32(t, t, arg1); + tcg_gen_clzi_i32(t, t, 32); + tcg_gen_xori_i32(t, t, 31); } else { gen_helper_ctz_i32(ret, arg1, arg2); + return; } + + z = tcg_constant_i32(0); + tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); + tcg_temp_free_i32(t); } void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { - if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { + if (arg2 == 32 + && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0) + && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_subi_i32(t, arg1, 1); @@ -777,7 +783,7 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_clz_i32) { + if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, arg, 31); tcg_gen_xor_i32(t, t, arg); @@ -791,9 +797,9 @@ void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) { - if (TCG_TARGET_HAS_ctpop_i32) { - tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1); - } else if (TCG_TARGET_HAS_ctpop_i64) { + if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) { + tcg_gen_op2_i32(INDEX_op_ctpop, ret, arg1); + } else if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t, arg1); tcg_gen_ctpop_i64(t, t); @@ -806,15 +812,18 @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_rot_i32) { - tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + tcg_gen_neg_i32(t0, arg2); + tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0); + tcg_temp_free_i32(t0); } else { - TCGv_i32 t0, t1; - - t0 = tcg_temp_ebb_new_i32(); - t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shl_i32(t0, arg1, arg2); - tcg_gen_subfi_i32(t1, 32, arg2); + tcg_gen_neg_i32(t1, arg2); tcg_gen_shr_i32(t1, arg1, t1); tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); @@ -828,12 +837,15 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); - } else if (TCG_TARGET_HAS_rot_i32) { - tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); + } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_constant_i32(arg2); + tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0); + } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_constant_i32(32 - arg2); + tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0); } else { - TCGv_i32 t0, t1; - t0 = tcg_temp_ebb_new_i32(); - t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(t0, arg1, arg2); tcg_gen_shri_i32(t1, arg1, 32 - arg2); tcg_gen_or_i32(ret, t0, t1); @@ -844,15 +856,18 @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_rot_i32) { - tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + tcg_gen_neg_i32(t0, arg2); + tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0); + tcg_temp_free_i32(t0); } else { - TCGv_i32 t0, t1; - - t0 = tcg_temp_ebb_new_i32(); - t1 = tcg_temp_ebb_new_i32(); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shr_i32(t0, arg1, arg2); - tcg_gen_subfi_i32(t1, 32, arg2); + tcg_gen_neg_i32(t1, arg2); tcg_gen_shl_i32(t1, arg1, t1); tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); @@ -863,12 +878,7 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i32(ret, arg1); - } else { - tcg_gen_rotli_i32(ret, arg1, 32 - arg2); - } + tcg_gen_rotli_i32(ret, arg1, -arg2 & 31); } void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, @@ -886,14 +896,14 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, tcg_gen_mov_i32(ret, arg2); return; } - if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { - tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); + if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) { + tcg_gen_op5ii_i32(INDEX_op_deposit, ret, arg1, arg2, ofs, len); return; } t1 = tcg_temp_ebb_new_i32(); - if (TCG_TARGET_HAS_extract2_i32) { + if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) { if (ofs + len == 32) { tcg_gen_shli_i32(t1, arg1, len); tcg_gen_extract2_i32(ret, t1, arg2, len); @@ -931,45 +941,24 @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg, tcg_gen_shli_i32(ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i32(ret, arg, (1u << len) - 1); - } else if (TCG_TARGET_HAS_deposit_i32 - && TCG_TARGET_deposit_i32_valid(ofs, len)) { + } else if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) { TCGv_i32 zero = tcg_constant_i32(0); - tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len); - } else { - /* To help two-operand hosts we prefer to zero-extend first, - which allows ARG to stay live. */ - switch (len) { - case 16: - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_ext16u_i32(ret, arg); - tcg_gen_shli_i32(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_ext8u_i32(ret, arg); - tcg_gen_shli_i32(ret, ret, ofs); - return; - } - break; + tcg_gen_op5ii_i32(INDEX_op_deposit, ret, zero, arg, ofs, len); + } else { + /* + * To help two-operand hosts we prefer to zero-extend first, + * which allows ARG to stay live. + */ + if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) { + tcg_gen_extract_i32(ret, arg, 0, len); + tcg_gen_shli_i32(ret, ret, ofs); + return; } /* Otherwise prefer zero-extension over AND for code size. */ - switch (ofs + len) { - case 16: - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_shli_i32(ret, arg, ofs); - tcg_gen_ext16u_i32(ret, ret); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_shli_i32(ret, arg, ofs); - tcg_gen_ext8u_i32(ret, ret); - return; - } - break; + if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) { + tcg_gen_shli_i32(ret, arg, ofs); + tcg_gen_extract_i32(ret, ret, 0, ofs + len); + return; } tcg_gen_andi_i32(ret, arg, (1u << len) - 1); tcg_gen_shli_i32(ret, ret, ofs); @@ -989,33 +978,21 @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg, tcg_gen_shri_i32(ret, arg, 32 - len); return; } - if (ofs == 0) { - tcg_gen_andi_i32(ret, arg, (1u << len) - 1); + + if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) { + tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, ofs, len); return; } - - if (TCG_TARGET_HAS_extract_i32 - && TCG_TARGET_extract_i32_valid(ofs, len)) { - tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len); + if (ofs == 0) { + tcg_gen_andi_i32(ret, arg, (1u << len) - 1); return; } /* Assume that zero-extension, if available, is cheaper than a shift. */ - switch (ofs + len) { - case 16: - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_ext16u_i32(ret, arg); - tcg_gen_shri_i32(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_ext8u_i32(ret, arg); - tcg_gen_shri_i32(ret, ret, ofs); - return; - } - break; + if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) { + tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, 0, ofs + len); + tcg_gen_shri_i32(ret, ret, ofs); + return; } /* ??? Ideally we'd know what values are available for immediate AND. @@ -1046,55 +1023,22 @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg, tcg_gen_sari_i32(ret, arg, 32 - len); return; } - if (ofs == 0) { - switch (len) { - case 16: - tcg_gen_ext16s_i32(ret, arg); - return; - case 8: - tcg_gen_ext8s_i32(ret, arg); - return; - } - } - if (TCG_TARGET_HAS_sextract_i32 - && TCG_TARGET_extract_i32_valid(ofs, len)) { - tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len); + if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, ofs, len)) { + tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, ofs, len); return; } /* Assume that sign-extension, if available, is cheaper than a shift. */ - switch (ofs + len) { - case 16: - if (TCG_TARGET_HAS_ext16s_i32) { - tcg_gen_ext16s_i32(ret, arg); - tcg_gen_sari_i32(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8s_i32) { - tcg_gen_ext8s_i32(ret, arg); - tcg_gen_sari_i32(ret, ret, ofs); - return; - } - break; + if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, ofs + len)) { + tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, 0, ofs + len); + tcg_gen_sari_i32(ret, ret, ofs); + return; } - switch (len) { - case 16: - if (TCG_TARGET_HAS_ext16s_i32) { - tcg_gen_shri_i32(ret, arg, ofs); - tcg_gen_ext16s_i32(ret, ret); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8s_i32) { - tcg_gen_shri_i32(ret, arg, ofs); - tcg_gen_ext8s_i32(ret, ret); - return; - } - break; + if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, len)) { + tcg_gen_shri_i32(ret, arg, ofs); + tcg_gen_op4ii_i32(INDEX_op_sextract, ret, ret, 0, len); + return; } tcg_gen_shli_i32(ret, arg, 32 - len - ofs); @@ -1115,8 +1059,8 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, tcg_gen_mov_i32(ret, ah); } else if (al == ah) { tcg_gen_rotri_i32(ret, al, ofs); - } else if (TCG_TARGET_HAS_extract2_i32) { - tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs); + } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) { + tcg_gen_op4i_i32(INDEX_op_extract2, ret, al, ah, ofs); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, al, ofs); @@ -1133,52 +1077,89 @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i32(ret, v2); } else { - tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); + tcg_gen_op6i_i32(INDEX_op_movcond, ret, c1, c2, v1, v2, cond); } } void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { - if (TCG_TARGET_HAS_add2_i32) { - tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); + if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + tcg_gen_op3_i32(INDEX_op_addco, t0, al, bl); + tcg_gen_op3_i32(INDEX_op_addci, rh, ah, bh); + tcg_gen_mov_i32(rl, t0); + tcg_temp_free_i32(t0); } else { - TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - TCGv_i64 t1 = tcg_temp_ebb_new_i64(); - tcg_gen_concat_i32_i64(t0, al, ah); - tcg_gen_concat_i32_i64(t1, bl, bh); - tcg_gen_add_i64(t0, t0, t1); - tcg_gen_extr_i64_i32(rl, rh, t0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + tcg_gen_add_i32(t0, al, bl); + tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, al); + tcg_gen_add_i32(rh, ah, bh); + tcg_gen_add_i32(rh, rh, t1); + tcg_gen_mov_i32(rl, t0); + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); + } +} + +void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co, + TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci) +{ + if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 zero = tcg_constant_i32(0); + TCGv_i32 mone = tcg_constant_i32(-1); + + tcg_gen_op3_i32(INDEX_op_addco, t0, ci, mone); + tcg_gen_op3_i32(INDEX_op_addcio, r, a, b); + tcg_gen_op3_i32(INDEX_op_addci, co, zero, zero); + tcg_temp_free_i32(t0); + } else { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + + tcg_gen_add_i32(t0, a, b); + tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, a); + tcg_gen_add_i32(r, t0, ci); + tcg_gen_setcond_i32(TCG_COND_LTU, t0, r, t0); + tcg_gen_or_i32(co, t0, t1); + + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); } } void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { - if (TCG_TARGET_HAS_sub2_i32) { - tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); + if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_I32, 0)) { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + tcg_gen_op3_i32(INDEX_op_subbo, t0, al, bl); + tcg_gen_op3_i32(INDEX_op_subbi, rh, ah, bh); + tcg_gen_mov_i32(rl, t0); + tcg_temp_free_i32(t0); } else { - TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - TCGv_i64 t1 = tcg_temp_ebb_new_i64(); - tcg_gen_concat_i32_i64(t0, al, ah); - tcg_gen_concat_i32_i64(t1, bl, bh); - tcg_gen_sub_i64(t0, t0, t1); - tcg_gen_extr_i64_i32(rl, rh, t0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 t1 = tcg_temp_ebb_new_i32(); + tcg_gen_sub_i32(t0, al, bl); + tcg_gen_setcond_i32(TCG_COND_LTU, t1, al, bl); + tcg_gen_sub_i32(rh, ah, bh); + tcg_gen_sub_i32(rh, rh, t1); + tcg_gen_mov_i32(rl, t0); + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); } } void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_mulu2_i32) { - tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); - } else if (TCG_TARGET_HAS_muluh_i32) { + if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I32, 0)) { + tcg_gen_op4_i32(INDEX_op_mulu2, rl, rh, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) { TCGv_i32 t = tcg_temp_ebb_new_i32(); - tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); - tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_muluh, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 64) { @@ -1191,18 +1172,18 @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } else { - qemu_build_not_reached(); + g_assert_not_reached(); } } void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { - if (TCG_TARGET_HAS_muls2_i32) { - tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); - } else if (TCG_TARGET_HAS_mulsh_i32) { + if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I32, 0)) { + tcg_gen_op4_i32(INDEX_op_muls2, rl, rh, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I32, 0)) { TCGv_i32 t = tcg_temp_ebb_new_i32(); - tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); - tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_mulsh, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 32) { @@ -1264,40 +1245,22 @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_ext8s_i32) { - tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg); - } else { - tcg_gen_shli_i32(ret, arg, 24); - tcg_gen_sari_i32(ret, ret, 24); - } + tcg_gen_sextract_i32(ret, arg, 0, 8); } void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_ext16s_i32) { - tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg); - } else { - tcg_gen_shli_i32(ret, arg, 16); - tcg_gen_sari_i32(ret, ret, 16); - } + tcg_gen_sextract_i32(ret, arg, 0, 16); } void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_ext8u_i32) { - tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg); - } else { - tcg_gen_andi_i32(ret, arg, 0xffu); - } + tcg_gen_extract_i32(ret, arg, 0, 8); } void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_ext16u_i32) { - tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg); - } else { - tcg_gen_andi_i32(ret, arg, 0xffffu); - } + tcg_gen_extract_i32(ret, arg, 0, 16); } /* @@ -1313,8 +1276,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) /* Only one extension flag may be present. */ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); - if (TCG_TARGET_HAS_bswap16_i32) { - tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags); + if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I32, 0)) { + tcg_gen_op3i_i32(INDEX_op_bswap16, ret, arg, flags); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); @@ -1350,8 +1313,8 @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) */ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) { - if (TCG_TARGET_HAS_bswap32_i32) { - tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0); + if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I32, 0)) { + tcg_gen_op3i_i32(INDEX_op_bswap32, ret, arg, 0); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); @@ -1416,42 +1379,42 @@ void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a) void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_ld8u, ret, arg2, offset); } void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_ld8s, ret, arg2, offset); } void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_ld16u, ret, arg2, offset); } void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_ld16s, ret, arg2, offset); } void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_ld, ret, arg2, offset); } void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_st8, arg1, arg2, offset); } void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_st16, arg1, arg2, offset); } void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) { - tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset); + tcg_gen_ldst_op_i32(INDEX_op_st, arg1, arg2, offset); } @@ -1460,7 +1423,7 @@ void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_discard_i64(TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op1_i64(INDEX_op_discard, arg); + tcg_gen_op1_i64(INDEX_op_discard, TCG_TYPE_I64, arg); } else { tcg_gen_discard_i32(TCGV_LOW(arg)); tcg_gen_discard_i32(TCGV_HIGH(arg)); @@ -1473,7 +1436,7 @@ void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) return; } if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg); + tcg_gen_op2_i64(INDEX_op_mov, ret, arg); } else { TCGTemp *ts = tcgv_i64_temp(arg); @@ -1500,7 +1463,7 @@ void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld8u, ret, arg2, offset); } else { tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); @@ -1510,7 +1473,7 @@ void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld8s, ret, arg2, offset); } else { tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); @@ -1520,7 +1483,7 @@ void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld16u, ret, arg2, offset); } else { tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); @@ -1530,7 +1493,7 @@ void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld16s, ret, arg2, offset); } else { tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); @@ -1540,7 +1503,7 @@ void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld32u, ret, arg2, offset); } else { tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); @@ -1550,7 +1513,7 @@ void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld32s, ret, arg2, offset); } else { tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); @@ -1564,7 +1527,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) * they cannot be the same temporary -- no chance of overlap. */ if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_ld, ret, arg2, offset); } else if (HOST_BIG_ENDIAN) { tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); @@ -1577,7 +1540,7 @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_st8, arg1, arg2, offset); } else { tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset); } @@ -1586,7 +1549,7 @@ void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_st16, arg1, arg2, offset); } else { tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset); } @@ -1595,7 +1558,7 @@ void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_st32, arg1, arg2, offset); } else { tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); } @@ -1604,7 +1567,7 @@ void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset); + tcg_gen_ldst_op_i64(INDEX_op_st, arg1, arg2, offset); } else if (HOST_BIG_ENDIAN) { tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); @@ -1617,7 +1580,7 @@ void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_add, ret, arg1, arg2); } else { tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2)); @@ -1627,7 +1590,7 @@ void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_sub, ret, arg1, arg2); } else { tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2)); @@ -1637,7 +1600,7 @@ void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_and, ret, arg1, arg2); } else { tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); @@ -1647,7 +1610,7 @@ void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_or, ret, arg1, arg2); } else { tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); @@ -1657,7 +1620,7 @@ void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_xor, ret, arg1, arg2); } else { tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); @@ -1667,7 +1630,7 @@ void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_shl, ret, arg1, arg2); } else { gen_helper_shl_i64(ret, arg1, arg2); } @@ -1676,7 +1639,7 @@ void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_shr, ret, arg1, arg2); } else { gen_helper_shr_i64(ret, arg1, arg2); } @@ -1685,7 +1648,7 @@ void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_sar, ret, arg1, arg2); } else { gen_helper_sar_i64(ret, arg1, arg2); } @@ -1697,7 +1660,7 @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) TCGv_i32 t1; if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_mul, ret, arg1, arg2); return; } @@ -1753,7 +1716,7 @@ void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg); + tcg_gen_op2_i64(INDEX_op_neg, ret, arg); } else { TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), @@ -1777,23 +1740,19 @@ void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) case -1: tcg_gen_mov_i64(ret, arg1); return; - case 0xff: - /* Don't recurse with tcg_gen_ext8u_i64. */ - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1); - return; - } - break; - case 0xffff: - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1); - return; - } - break; - case 0xffffffffu: - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1); - return; + default: + /* + * Canonicalize on extract, if valid. This aids x86 with its + * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode + * which does not require matching operands. Other backends can + * trivially expand the extract to AND during code generation. + */ + if (!(arg2 & (arg2 + 1))) { + unsigned len = ctz64(~arg2); + if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) { + tcg_gen_extract_i64(ret, arg1, 0, len); + return; + } } break; } @@ -1828,9 +1787,10 @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); - } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { + } else if (arg2 == -1 && + tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) { /* Don't recurse with tcg_gen_not_i64. */ - tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1); + tcg_gen_op2_i64(INDEX_op_not, ret, arg1); } else { tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2)); } @@ -1858,7 +1818,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, tcg_gen_movi_i32(TCGV_LOW(ret), 0); } } else if (right) { - if (TCG_TARGET_HAS_extract2_i32) { + if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) { tcg_gen_extract2_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), c); } else { @@ -1872,7 +1832,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); } } else { - if (TCG_TARGET_HAS_extract2_i32) { + if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) { tcg_gen_extract2_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c); } else { @@ -1927,15 +1887,16 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { + TCGOp *op; if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), - TCGV_HIGH(arg1), TCGV_LOW(arg2), - TCGV_HIGH(arg2), cond, label_arg(l)); + op = tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), + TCGV_HIGH(arg1), TCGV_LOW(arg2), + TCGV_HIGH(arg2), cond, label_arg(l)); } else { - tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, - label_arg(l)); + op = tcg_gen_op4ii_i64(INDEX_op_brcond, arg1, arg2, cond, + label_arg(l)); } - add_last_as_label_use(l); + add_as_label_use(l, op); } } @@ -1946,12 +1907,12 @@ void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) } else if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, - TCGV_LOW(arg1), TCGV_HIGH(arg1), - tcg_constant_i32(arg2), - tcg_constant_i32(arg2 >> 32), - cond, label_arg(l)); - add_last_as_label_use(l); + TCGOp *op = tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, + TCGV_LOW(arg1), TCGV_HIGH(arg1), + tcg_constant_i32(arg2), + tcg_constant_i32(arg2 >> 32), + cond, label_arg(l)); + add_as_label_use(l, op); } } @@ -1969,7 +1930,7 @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else { - tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond); + tcg_gen_op4i_i64(INDEX_op_setcond, ret, arg1, arg2, cond); } } } @@ -2005,17 +1966,14 @@ void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret, tcg_gen_movi_i64(ret, -1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i64(ret, 0); - } else if (TCG_TARGET_HAS_negsetcond_i64) { - tcg_gen_op4i_i64(INDEX_op_negsetcond_i64, ret, arg1, arg2, cond); - } else if (TCG_TARGET_REG_BITS == 32) { + } else if (TCG_TARGET_REG_BITS == 64) { + tcg_gen_op4i_i64(INDEX_op_negsetcond, ret, arg1, arg2, cond); + } else { tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); tcg_gen_neg_i32(TCGV_LOW(ret), TCGV_LOW(ret)); tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_LOW(ret)); - } else { - tcg_gen_setcond_i64(cond, ret, arg1, arg2); - tcg_gen_neg_i64(ret, ret); } } @@ -2032,12 +1990,12 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_div_i64) { - tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i64) { + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); - tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); + tcg_gen_op5_i64(INDEX_op_divs2, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_div_i64(ret, arg1, arg2); @@ -2046,18 +2004,18 @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_rem_i64) { - tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i64) { + if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_rems, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); - } else if (TCG_TARGET_HAS_div2_i64) { + } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); - tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); + tcg_gen_op5_i64(INDEX_op_divs2, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_rem_i64(ret, arg1, arg2); @@ -2066,12 +2024,12 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_div_i64) { - tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div2_i64) { + if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2); + tcg_gen_op5_i64(INDEX_op_divu2, ret, t0, arg1, zero, arg2); tcg_temp_free_i64(t0); } else { gen_helper_divu_i64(ret, arg1, arg2); @@ -2080,18 +2038,18 @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_rem_i64) { - tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_div_i64) { + if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_remu, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); - } else if (TCG_TARGET_HAS_div2_i64) { + } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2); + tcg_gen_op5_i64(INDEX_op_divu2, t0, ret, arg1, zero, arg2); tcg_temp_free_i64(t0); } else { gen_helper_remu_i64(ret, arg1, arg2); @@ -2100,77 +2058,32 @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); - } else if (TCG_TARGET_HAS_ext8s_i64) { - tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg); - } else { - tcg_gen_shli_i64(ret, arg, 56); - tcg_gen_sari_i64(ret, ret, 56); - } + tcg_gen_sextract_i64(ret, arg, 0, 8); } void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); - } else if (TCG_TARGET_HAS_ext16s_i64) { - tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg); - } else { - tcg_gen_shli_i64(ret, arg, 48); - tcg_gen_sari_i64(ret, ret, 48); - } + tcg_gen_sextract_i64(ret, arg, 0, 16); } void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); - } else if (TCG_TARGET_HAS_ext32s_i64) { - tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg); - } else { - tcg_gen_shli_i64(ret, arg, 32); - tcg_gen_sari_i64(ret, ret, 32); - } + tcg_gen_sextract_i64(ret, arg, 0, 32); } void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(TCGV_HIGH(ret), 0); - } else if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg); - } else { - tcg_gen_andi_i64(ret, arg, 0xffu); - } + tcg_gen_extract_i64(ret, arg, 0, 8); } void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(TCGV_HIGH(ret), 0); - } else if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg); - } else { - tcg_gen_andi_i64(ret, arg, 0xffffu); - } + tcg_gen_extract_i64(ret, arg, 0, 16); } void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); - tcg_gen_movi_i32(TCGV_HIGH(ret), 0); - } else if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg); - } else { - tcg_gen_andi_i64(ret, arg, 0xffffffffu); - } + tcg_gen_extract_i64(ret, arg, 0, 32); } /* @@ -2193,8 +2106,8 @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else { tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } - } else if (TCG_TARGET_HAS_bswap16_i64) { - tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags); + } else if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I64, 0)) { + tcg_gen_op3i_i64(INDEX_op_bswap16, ret, arg, flags); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); @@ -2243,8 +2156,8 @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) } else { tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } - } else if (TCG_TARGET_HAS_bswap32_i64) { - tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags); + } else if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I64, 0)) { + tcg_gen_op3i_i64(INDEX_op_bswap32, ret, arg, flags); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); @@ -2290,8 +2203,8 @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) tcg_gen_mov_i32(TCGV_HIGH(ret), t0); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); - } else if (TCG_TARGET_HAS_bswap64_i64) { - tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0); + } else if (tcg_op_supported(INDEX_op_bswap64, TCG_TYPE_I64, 0)) { + tcg_gen_op3i_i64(INDEX_op_bswap64, ret, arg, 0); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); @@ -2362,8 +2275,8 @@ void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); - } else if (TCG_TARGET_HAS_not_i64) { - tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg); + } else if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) { + tcg_gen_op2_i64(INDEX_op_not, ret, arg); } else { tcg_gen_xori_i64(ret, arg, -1); } @@ -2374,8 +2287,8 @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); - } else if (TCG_TARGET_HAS_andc_i64) { - tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_andc, ret, arg1, arg2); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); @@ -2389,8 +2302,8 @@ void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); - } else if (TCG_TARGET_HAS_eqv_i64) { - tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_eqv, ret, arg1, arg2); } else { tcg_gen_xor_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); @@ -2402,8 +2315,8 @@ void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); - } else if (TCG_TARGET_HAS_nand_i64) { - tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_nand, ret, arg1, arg2); } else { tcg_gen_and_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); @@ -2415,8 +2328,8 @@ void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); - } else if (TCG_TARGET_HAS_nor_i64) { - tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_nor, ret, arg1, arg2); } else { tcg_gen_or_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); @@ -2428,8 +2341,8 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) if (TCG_TARGET_REG_BITS == 32) { tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); - } else if (TCG_TARGET_HAS_orc_i64) { - tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_orc, ret, arg1, arg2); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); @@ -2440,8 +2353,8 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_clz_i64) { - tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_clz, ret, arg1, arg2); } else { gen_helper_clz_i64(ret, arg1, arg2); } @@ -2450,8 +2363,8 @@ void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { if (TCG_TARGET_REG_BITS == 32 - && TCG_TARGET_HAS_clz_i32 - && arg2 <= 0xffffffffu) { + && arg2 <= 0xffffffffu + && tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); tcg_gen_addi_i32(t, t, 32); @@ -2465,45 +2378,47 @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_ctz_i64) { - tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2); - } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) { - TCGv_i64 z, t = tcg_temp_ebb_new_i64(); + TCGv_i64 z, t; - if (TCG_TARGET_HAS_ctpop_i64) { - tcg_gen_subi_i64(t, arg1, 1); - tcg_gen_andc_i64(t, t, arg1); - tcg_gen_ctpop_i64(t, t); - } else { - /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */ - tcg_gen_neg_i64(t, arg1); - tcg_gen_and_i64(t, t, arg1); - tcg_gen_clzi_i64(t, t, 64); - tcg_gen_xori_i64(t, t, 63); - } - z = tcg_constant_i64(0); - tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); - tcg_temp_free_i64(t); - tcg_temp_free_i64(z); + if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_ctz, ret, arg1, arg2); + return; + } + if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) { + t = tcg_temp_ebb_new_i64(); + tcg_gen_subi_i64(t, arg1, 1); + tcg_gen_andc_i64(t, t, arg1); + tcg_gen_ctpop_i64(t, t); + } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) { + t = tcg_temp_ebb_new_i64(); + tcg_gen_neg_i64(t, arg1); + tcg_gen_and_i64(t, t, arg1); + tcg_gen_clzi_i64(t, t, 64); + tcg_gen_xori_i64(t, t, 63); } else { gen_helper_ctz_i64(ret, arg1, arg2); + return; } + + z = tcg_constant_i64(0); + tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); + tcg_temp_free_i64(t); } void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { if (TCG_TARGET_REG_BITS == 32 - && TCG_TARGET_HAS_ctz_i32 - && arg2 <= 0xffffffffu) { + && arg2 <= 0xffffffffu + && tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) { TCGv_i32 t32 = tcg_temp_ebb_new_i32(); tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); tcg_gen_addi_i32(t32, t32, 32); tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_temp_free_i32(t32); - } else if (!TCG_TARGET_HAS_ctz_i64 - && TCG_TARGET_HAS_ctpop_i64 - && arg2 == 64) { + } else if (arg2 == 64 + && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0) + && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_subi_i64(t, arg1, 1); @@ -2517,7 +2432,7 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) { - if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) { + if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, arg, 63); tcg_gen_xor_i64(t, t, arg); @@ -2531,28 +2446,37 @@ void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1) { - if (TCG_TARGET_HAS_ctpop_i64) { - tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1); - } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) { - tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); - tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); - tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret)); - tcg_gen_movi_i32(TCGV_HIGH(ret), 0); + if (TCG_TARGET_REG_BITS == 64) { + if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) { + tcg_gen_op2_i64(INDEX_op_ctpop, ret, arg1); + return; + } } else { - gen_helper_ctpop_i64(ret, arg1); + if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) { + tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); + tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); + tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret)); + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); + return; + } } + gen_helper_ctpop_i64(ret, arg1); } void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_rot_i64) { - tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) { + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + tcg_gen_neg_i64(t0, arg2); + tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0); + tcg_temp_free_i64(t0); } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_ebb_new_i64(); - t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shl_i64(t0, arg1, arg2); - tcg_gen_subfi_i64(t1, 64, arg2); + tcg_gen_neg_i64(t1, arg2); tcg_gen_shr_i64(t1, arg1, t1); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); @@ -2566,12 +2490,15 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); - } else if (TCG_TARGET_HAS_rot_i64) { - tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); + } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) { + TCGv_i64 t0 = tcg_constant_i64(arg2); + tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0); + } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) { + TCGv_i64 t0 = tcg_constant_i64(64 - arg2); + tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0); } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_ebb_new_i64(); - t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shli_i64(t0, arg1, arg2); tcg_gen_shri_i64(t1, arg1, 64 - arg2); tcg_gen_or_i64(ret, t0, t1); @@ -2582,14 +2509,18 @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_rot_i64) { - tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) { + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + tcg_gen_neg_i64(t0, arg2); + tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0); + tcg_temp_free_i64(t0); } else { - TCGv_i64 t0, t1; - t0 = tcg_temp_ebb_new_i64(); - t1 = tcg_temp_ebb_new_i64(); + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shr_i64(t0, arg1, arg2); - tcg_gen_subfi_i64(t1, 64, arg2); + tcg_gen_neg_i64(t1, arg2); tcg_gen_shl_i64(t1, arg1, t1); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); @@ -2600,12 +2531,7 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); - /* some cases can be optimized here */ - if (arg2 == 0) { - tcg_gen_mov_i64(ret, arg1); - } else { - tcg_gen_rotli_i64(ret, arg1, 64 - arg2); - } + tcg_gen_rotli_i64(ret, arg1, -arg2 & 63); } void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, @@ -2623,12 +2549,13 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, tcg_gen_mov_i64(ret, arg2); return; } - if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { - tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); - return; - } - if (TCG_TARGET_REG_BITS == 32) { + if (TCG_TARGET_REG_BITS == 64) { + if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) { + tcg_gen_op5ii_i64(INDEX_op_deposit, ret, arg1, arg2, ofs, len); + return; + } + } else { if (ofs >= 32) { tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_LOW(arg2), ofs - 32, len); @@ -2645,7 +2572,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, t1 = tcg_temp_ebb_new_i64(); - if (TCG_TARGET_HAS_extract2_i64) { + if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) { if (ofs + len == 64) { tcg_gen_shli_i64(t1, arg1, len); tcg_gen_extract2_i64(ret, t1, arg2, len); @@ -2683,10 +2610,10 @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_shli_i64(ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); - } else if (TCG_TARGET_HAS_deposit_i64 - && TCG_TARGET_deposit_i64_valid(ofs, len)) { + } else if (TCG_TARGET_REG_BITS == 64 && + TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) { TCGv_i64 zero = tcg_constant_i64(0); - tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len); + tcg_gen_op5ii_i64(INDEX_op_deposit, ret, zero, arg, ofs, len); } else { if (TCG_TARGET_REG_BITS == 32) { if (ofs >= 32) { @@ -2701,54 +2628,20 @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg, return; } } - /* To help two-operand hosts we prefer to zero-extend first, - which allows ARG to stay live. */ - switch (len) { - case 32: - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_ext32u_i64(ret, arg); - tcg_gen_shli_i64(ret, ret, ofs); - return; - } - break; - case 16: - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_ext16u_i64(ret, arg); - tcg_gen_shli_i64(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_ext8u_i64(ret, arg); - tcg_gen_shli_i64(ret, ret, ofs); - return; - } - break; + /* + * To help two-operand hosts we prefer to zero-extend first, + * which allows ARG to stay live. + */ + if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) { + tcg_gen_extract_i64(ret, arg, 0, len); + tcg_gen_shli_i64(ret, ret, ofs); + return; } /* Otherwise prefer zero-extension over AND for code size. */ - switch (ofs + len) { - case 32: - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_shli_i64(ret, arg, ofs); - tcg_gen_ext32u_i64(ret, ret); - return; - } - break; - case 16: - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_shli_i64(ret, arg, ofs); - tcg_gen_ext16u_i64(ret, ret); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_shli_i64(ret, arg, ofs); - tcg_gen_ext8u_i64(ret, ret); - return; - } - break; + if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) { + tcg_gen_shli_i64(ret, arg, ofs); + tcg_gen_extract_i64(ret, ret, 0, ofs + len); + return; } tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); tcg_gen_shli_i64(ret, ret, ofs); @@ -2768,10 +2661,6 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_shri_i64(ret, arg, 64 - len); return; } - if (ofs == 0) { - tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); - return; - } if (TCG_TARGET_REG_BITS == 32) { /* Look for a 32-bit extract within one of the two words. */ @@ -2785,40 +2674,34 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_movi_i32(TCGV_HIGH(ret), 0); return; } - /* The field is split across two words. One double-word - shift is better than two double-word shifts. */ - goto do_shift_and; + + /* The field is split across two words. */ + tcg_gen_extract2_i32(TCGV_LOW(ret), TCGV_LOW(arg), + TCGV_HIGH(arg), ofs); + if (len <= 32) { + tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(ret), 0, len); + tcg_gen_movi_i32(TCGV_HIGH(ret), 0); + } else { + tcg_gen_extract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), + ofs, len - 32); + } + return; } - if (TCG_TARGET_HAS_extract_i64 - && TCG_TARGET_extract_i64_valid(ofs, len)) { - tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len); + if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) { + tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, ofs, len); + return; + } + if (ofs == 0) { + tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); return; } /* Assume that zero-extension, if available, is cheaper than a shift. */ - switch (ofs + len) { - case 32: - if (TCG_TARGET_HAS_ext32u_i64) { - tcg_gen_ext32u_i64(ret, arg); - tcg_gen_shri_i64(ret, ret, ofs); - return; - } - break; - case 16: - if (TCG_TARGET_HAS_ext16u_i64) { - tcg_gen_ext16u_i64(ret, arg); - tcg_gen_shri_i64(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8u_i64) { - tcg_gen_ext8u_i64(ret, arg); - tcg_gen_shri_i64(ret, ret, ofs); - return; - } - break; + if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) { + tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, 0, ofs + len); + tcg_gen_shri_i64(ret, ret, ofs); + return; } /* ??? Ideally we'd know what values are available for immediate AND. @@ -2826,7 +2709,6 @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg, so that we get ext8u, ext16u, and ext32u. */ switch (len) { case 1 ... 8: case 16: case 32: - do_shift_and: tcg_gen_shri_i64(ret, arg, ofs); tcg_gen_andi_i64(ret, ret, (1ull << len) - 1); break; @@ -2850,19 +2732,6 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, tcg_gen_sari_i64(ret, arg, 64 - len); return; } - if (ofs == 0) { - switch (len) { - case 32: - tcg_gen_ext32s_i64(ret, arg); - return; - case 16: - tcg_gen_ext16s_i64(ret, arg); - return; - case 8: - tcg_gen_ext8s_i64(ret, arg); - return; - } - } if (TCG_TARGET_REG_BITS == 32) { /* Look for a 32-bit extract within one of the two words. */ @@ -2896,59 +2765,23 @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, return; } - if (TCG_TARGET_HAS_sextract_i64 - && TCG_TARGET_extract_i64_valid(ofs, len)) { - tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len); + if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, ofs, len)) { + tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, ofs, len); return; } /* Assume that sign-extension, if available, is cheaper than a shift. */ - switch (ofs + len) { - case 32: - if (TCG_TARGET_HAS_ext32s_i64) { - tcg_gen_ext32s_i64(ret, arg); - tcg_gen_sari_i64(ret, ret, ofs); - return; - } - break; - case 16: - if (TCG_TARGET_HAS_ext16s_i64) { - tcg_gen_ext16s_i64(ret, arg); - tcg_gen_sari_i64(ret, ret, ofs); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8s_i64) { - tcg_gen_ext8s_i64(ret, arg); - tcg_gen_sari_i64(ret, ret, ofs); - return; - } - break; + if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, ofs + len)) { + tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, 0, ofs + len); + tcg_gen_sari_i64(ret, ret, ofs); + return; } - switch (len) { - case 32: - if (TCG_TARGET_HAS_ext32s_i64) { - tcg_gen_shri_i64(ret, arg, ofs); - tcg_gen_ext32s_i64(ret, ret); - return; - } - break; - case 16: - if (TCG_TARGET_HAS_ext16s_i64) { - tcg_gen_shri_i64(ret, arg, ofs); - tcg_gen_ext16s_i64(ret, ret); - return; - } - break; - case 8: - if (TCG_TARGET_HAS_ext8s_i64) { - tcg_gen_shri_i64(ret, arg, ofs); - tcg_gen_ext8s_i64(ret, ret); - return; - } - break; + if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, len)) { + tcg_gen_shri_i64(ret, arg, ofs); + tcg_gen_op4ii_i64(INDEX_op_sextract, ret, ret, 0, len); + return; } + tcg_gen_shli_i64(ret, arg, 64 - len - ofs); tcg_gen_sari_i64(ret, ret, 64 - len); } @@ -2967,8 +2800,8 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, tcg_gen_mov_i64(ret, ah); } else if (al == ah) { tcg_gen_rotri_i64(ret, al, ofs); - } else if (TCG_TARGET_HAS_extract2_i64) { - tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs); + } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) { + tcg_gen_op4i_i64(INDEX_op_extract2, ret, al, ah, ofs); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, al, ofs); @@ -2985,7 +2818,7 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i64(ret, v2); } else if (TCG_TARGET_REG_BITS == 64) { - tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); + tcg_gen_op6i_i64(INDEX_op_movcond, ret, c1, c2, v1, v2, cond); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 zero = tcg_constant_i32(0); @@ -3006,8 +2839,25 @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { - if (TCG_TARGET_HAS_add2_i64) { - tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); + if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_REG, 0)) { + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_op3_i32(INDEX_op_addco, TCGV_LOW(t0), + TCGV_LOW(al), TCGV_LOW(bl)); + tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(t0), + TCGV_HIGH(al), TCGV_HIGH(bl)); + tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(rh), + TCGV_LOW(ah), TCGV_LOW(bh)); + tcg_gen_op3_i32(INDEX_op_addci, TCGV_HIGH(rh), + TCGV_HIGH(ah), TCGV_HIGH(bh)); + } else { + tcg_gen_op3_i64(INDEX_op_addco, t0, al, bl); + tcg_gen_op3_i64(INDEX_op_addci, rh, ah, bh); + } + + tcg_gen_mov_i64(rl, t0); + tcg_temp_free_i64(t0); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); @@ -3021,11 +2871,96 @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, } } +void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co, + TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci) +{ + if (TCG_TARGET_REG_BITS == 64) { + if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I64, 0)) { + TCGv_i64 discard = tcg_temp_ebb_new_i64(); + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_i64 mone = tcg_constant_i64(-1); + + tcg_gen_op3_i64(INDEX_op_addco, discard, ci, mone); + tcg_gen_op3_i64(INDEX_op_addcio, r, a, b); + tcg_gen_op3_i64(INDEX_op_addci, co, zero, zero); + tcg_temp_free_i64(discard); + } else { + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + TCGv_i64 t1 = tcg_temp_ebb_new_i64(); + + tcg_gen_add_i64(t0, a, b); + tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, a); + tcg_gen_add_i64(r, t0, ci); + tcg_gen_setcond_i64(TCG_COND_LTU, t0, r, t0); + tcg_gen_or_i64(co, t0, t1); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + } + } else { + if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) { + TCGv_i32 discard = tcg_temp_ebb_new_i32(); + TCGv_i32 zero = tcg_constant_i32(0); + TCGv_i32 mone = tcg_constant_i32(-1); + + tcg_gen_op3_i32(INDEX_op_addco, discard, TCGV_LOW(ci), mone); + tcg_gen_op3_i32(INDEX_op_addcio, discard, TCGV_HIGH(ci), mone); + tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(r), + TCGV_LOW(a), TCGV_LOW(b)); + tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(r), + TCGV_HIGH(a), TCGV_HIGH(b)); + tcg_gen_op3_i32(INDEX_op_addci, TCGV_LOW(co), zero, zero); + tcg_temp_free_i32(discard); + } else { + TCGv_i32 t0 = tcg_temp_ebb_new_i32(); + TCGv_i32 c0 = tcg_temp_ebb_new_i32(); + TCGv_i32 c1 = tcg_temp_ebb_new_i32(); + + tcg_gen_or_i32(c1, TCGV_LOW(ci), TCGV_HIGH(ci)); + tcg_gen_setcondi_i32(TCG_COND_NE, c1, c1, 0); + + tcg_gen_add_i32(t0, TCGV_LOW(a), TCGV_LOW(b)); + tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_LOW(a)); + tcg_gen_add_i32(TCGV_LOW(r), t0, c1); + tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_LOW(r), c1); + tcg_gen_or_i32(c1, c1, c0); + + tcg_gen_add_i32(t0, TCGV_HIGH(a), TCGV_HIGH(b)); + tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_HIGH(a)); + tcg_gen_add_i32(TCGV_HIGH(r), t0, c1); + tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_HIGH(r), c1); + tcg_gen_or_i32(TCGV_LOW(co), c0, c1); + + tcg_temp_free_i32(t0); + tcg_temp_free_i32(c0); + tcg_temp_free_i32(c1); + } + tcg_gen_movi_i32(TCGV_HIGH(co), 0); + } +} + void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { - if (TCG_TARGET_HAS_sub2_i64) { - tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); + if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_REG, 0)) { + TCGv_i64 t0 = tcg_temp_ebb_new_i64(); + + if (TCG_TARGET_REG_BITS == 32) { + tcg_gen_op3_i32(INDEX_op_subbo, TCGV_LOW(t0), + TCGV_LOW(al), TCGV_LOW(bl)); + tcg_gen_op3_i32(INDEX_op_subbio, TCGV_HIGH(t0), + TCGV_HIGH(al), TCGV_HIGH(bl)); + tcg_gen_op3_i32(INDEX_op_subbio, TCGV_LOW(rh), + TCGV_LOW(ah), TCGV_LOW(bh)); + tcg_gen_op3_i32(INDEX_op_subbi, TCGV_HIGH(rh), + TCGV_HIGH(ah), TCGV_HIGH(bh)); + } else { + tcg_gen_op3_i64(INDEX_op_subbo, t0, al, bl); + tcg_gen_op3_i64(INDEX_op_subbi, rh, ah, bh); + } + + tcg_gen_mov_i64(rl, t0); + tcg_temp_free_i64(t0); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); @@ -3041,12 +2976,12 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_mulu2_i64) { - tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); - } else if (TCG_TARGET_HAS_muluh_i64) { + if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0)) { + tcg_gen_op4_i64(INDEX_op_mulu2, rl, rh, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) { TCGv_i64 t = tcg_temp_ebb_new_i64(); - tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); - tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_muluh, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else { @@ -3060,15 +2995,16 @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { - if (TCG_TARGET_HAS_muls2_i64) { - tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); - } else if (TCG_TARGET_HAS_mulsh_i64) { + if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I64, 0)) { + tcg_gen_op4_i64(INDEX_op_muls2, rl, rh, arg1, arg2); + } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I64, 0)) { TCGv_i64 t = tcg_temp_ebb_new_i64(); - tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); - tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); - } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { + } else if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0) || + tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); @@ -3147,11 +3083,9 @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(ret, TCGV_LOW(arg)); - } else if (TCG_TARGET_HAS_extr_i64_i32) { - tcg_gen_op2(INDEX_op_extrl_i64_i32, - tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { - tcg_gen_mov_i32(ret, (TCGv_i32)arg); + tcg_gen_op2(INDEX_op_extrl_i64_i32, TCG_TYPE_I32, + tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } } @@ -3159,14 +3093,9 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(ret, TCGV_HIGH(arg)); - } else if (TCG_TARGET_HAS_extr_i64_i32) { - tcg_gen_op2(INDEX_op_extrh_i64_i32, - tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { - TCGv_i64 t = tcg_temp_ebb_new_i64(); - tcg_gen_shri_i64(t, arg, 32); - tcg_gen_mov_i32(ret, (TCGv_i32)t); - tcg_temp_free_i64(t); + tcg_gen_op2(INDEX_op_extrh_i64_i32, TCG_TYPE_I32, + tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } } @@ -3176,7 +3105,7 @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else { - tcg_gen_op2(INDEX_op_extu_i32_i64, + tcg_gen_op2(INDEX_op_extu_i32_i64, TCG_TYPE_I64, tcgv_i64_arg(ret), tcgv_i32_arg(arg)); } } @@ -3187,7 +3116,7 @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg) tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else { - tcg_gen_op2(INDEX_op_ext_i32_i64, + tcg_gen_op2(INDEX_op_ext_i32_i64, TCG_TYPE_I64, tcgv_i64_arg(ret), tcgv_i32_arg(arg)); } } @@ -3209,7 +3138,7 @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) tcg_gen_extu_i32_i64(dest, low); /* If deposit is available, use it. Otherwise use the extra knowledge that we have of the zero-extensions above. */ - if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) { + if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, 32, 32)) { tcg_gen_deposit_i64(dest, dest, tmp, 32, 32); } else { tcg_gen_shli_i64(tmp, tmp, 32); @@ -3312,7 +3241,7 @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx) tcg_debug_assert(idx == TB_EXIT_REQUESTED); } - tcg_gen_op1i(INDEX_op_exit_tb, val); + tcg_gen_op1i(INDEX_op_exit_tb, 0, val); } void tcg_gen_goto_tb(unsigned idx) @@ -3327,7 +3256,7 @@ void tcg_gen_goto_tb(unsigned idx) tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif plugin_gen_disable_mem_helpers(); - tcg_gen_op1i(INDEX_op_goto_tb, idx); + tcg_gen_op1i(INDEX_op_goto_tb, 0, idx); } void tcg_gen_lookup_and_goto_ptr(void) @@ -3342,6 +3271,6 @@ void tcg_gen_lookup_and_goto_ptr(void) plugin_gen_disable_mem_helpers(); ptr = tcg_temp_ebb_new_ptr(); gen_helper_lookup_tb_ptr(ptr, tcg_env); - tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); + tcg_gen_op1i(INDEX_op_goto_ptr, TCG_TYPE_PTR, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); } diff --git a/tcg/tcg-pool.c.inc b/tcg/tcg-pool.c.inc deleted file mode 100644 index 90c2e63b7f3..00000000000 --- a/tcg/tcg-pool.c.inc +++ /dev/null @@ -1,162 +0,0 @@ -/* - * TCG Backend Data: constant pool. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -typedef struct TCGLabelPoolData { - struct TCGLabelPoolData *next; - tcg_insn_unit *label; - intptr_t addend; - int rtype; - unsigned nlong; - tcg_target_ulong data[]; -} TCGLabelPoolData; - - -static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype, - tcg_insn_unit *label, intptr_t addend) -{ - TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData) - + sizeof(tcg_target_ulong) * nlong); - - n->label = label; - n->addend = addend; - n->rtype = rtype; - n->nlong = nlong; - return n; -} - -static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n) -{ - TCGLabelPoolData *i, **pp; - int nlong = n->nlong; - - /* Insertion sort on the pool. */ - for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) { - if (nlong > i->nlong) { - break; - } - if (nlong < i->nlong) { - continue; - } - if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) { - break; - } - } - n->next = *pp; - *pp = n; -} - -/* The "usual" for generic integer code. */ -static inline void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype, - tcg_insn_unit *label, intptr_t addend) -{ - TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend); - n->data[0] = d; - new_pool_insert(s, n); -} - -/* For v64 or v128, depending on the host. */ -static inline void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label, - intptr_t addend, tcg_target_ulong d0, - tcg_target_ulong d1) -{ - TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend); - n->data[0] = d0; - n->data[1] = d1; - new_pool_insert(s, n); -} - -/* For v128 or v256, depending on the host. */ -static inline void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label, - intptr_t addend, tcg_target_ulong d0, - tcg_target_ulong d1, tcg_target_ulong d2, - tcg_target_ulong d3) -{ - TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend); - n->data[0] = d0; - n->data[1] = d1; - n->data[2] = d2; - n->data[3] = d3; - new_pool_insert(s, n); -} - -/* For v256, for 32-bit host. */ -static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label, - intptr_t addend, tcg_target_ulong d0, - tcg_target_ulong d1, tcg_target_ulong d2, - tcg_target_ulong d3, tcg_target_ulong d4, - tcg_target_ulong d5, tcg_target_ulong d6, - tcg_target_ulong d7) -{ - TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend); - n->data[0] = d0; - n->data[1] = d1; - n->data[2] = d2; - n->data[3] = d3; - n->data[4] = d4; - n->data[5] = d5; - n->data[6] = d6; - n->data[7] = d7; - new_pool_insert(s, n); -} - -/* To be provided by cpu/tcg-target.c.inc. */ -static void tcg_out_nop_fill(tcg_insn_unit *p, int count); - -static int tcg_out_pool_finalize(TCGContext *s) -{ - TCGLabelPoolData *p = s->pool_labels; - TCGLabelPoolData *l = NULL; - void *a; - - if (p == NULL) { - return 0; - } - - /* ??? Round up to qemu_icache_linesize, but then do not round - again when allocating the next TranslationBlock structure. */ - a = (void *)ROUND_UP((uintptr_t)s->code_ptr, - sizeof(tcg_target_ulong) * p->nlong); - tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr); - s->data_gen_ptr = a; - - for (; p != NULL; p = p->next) { - size_t size = sizeof(tcg_target_ulong) * p->nlong; - uintptr_t value; - - if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) { - if (unlikely(a > s->code_gen_highwater)) { - return -1; - } - memcpy(a, p->data, size); - a += size; - l = p; - } - - value = (uintptr_t)tcg_splitwx_to_rx(a) - size; - if (!patch_reloc(p->label, p->rtype, value, p->addend)) { - return -2; - } - } - - s->code_ptr = a; - return 0; -} diff --git a/tcg/tcg.c b/tcg/tcg.c index 34e3056380d..afac55a203a 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -34,6 +34,7 @@ #include "qemu/cacheflush.h" #include "qemu/cacheinfo.h" #include "qemu/timer.h" +#include "exec/target_page.h" #include "exec/translation-block.h" #include "exec/tlb-common.h" #include "tcg/startup.h" @@ -56,6 +57,7 @@ #include "tcg/tcg-temp-internal.h" #include "tcg-internal.h" #include "tcg/perf.h" +#include "tcg-has.h" #ifdef CONFIG_USER_ONLY #include "user/guest-base.h" #endif @@ -66,6 +68,11 @@ static void tcg_target_init(TCGContext *s); static void tcg_target_qemu_prologue(TCGContext *s); static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend); +static void tcg_out_nop_fill(tcg_insn_unit *p, int count); + +typedef struct TCGLabelQemuLdst TCGLabelQemuLdst; +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); /* The CIE and FDE header definitions will be common to all hosts. */ typedef struct { @@ -90,18 +97,17 @@ typedef struct QEMU_PACKED { DebugFrameFDEHeader fde; } DebugFrameHeader; -typedef struct TCGLabelQemuLdst { +struct TCGLabelQemuLdst { bool is_ld; /* qemu_ld: true, qemu_st: false */ MemOpIdx oi; TCGType type; /* result type of a load */ - TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ - TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ + TCGReg addr_reg; /* reg index for guest virtual addr */ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next; -} TCGLabelQemuLdst; +}; static void tcg_register_jit_int(const void *buf, size_t size, const void *debug_frame, @@ -128,9 +134,11 @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long); static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2); static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg); static void tcg_out_goto_tb(TCGContext *s, int which); -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]); +static void tcg_out_goto_ptr(TCGContext *s, TCGReg dest); +static void tcg_out_mb(TCGContext *s, unsigned bar); +static void tcg_out_br(TCGContext *s, TCGLabel *l); +static void tcg_out_set_carry(TCGContext *s); +static void tcg_out_set_borrow(TCGContext *s); #if TCG_TARGET_MAYBE_vec static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src); @@ -165,6 +173,10 @@ static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, { g_assert_not_reached(); } +int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve) +{ + return 0; +} #endif static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2); @@ -175,9 +187,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot); static bool tcg_target_const_match(int64_t val, int ct, TCGType type, TCGCond cond, int vece); -#ifdef TCG_TARGET_NEED_LDST_LABELS -static int tcg_out_ldst_finalize(TCGContext *s); -#endif #ifndef CONFIG_USER_ONLY #define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; }) @@ -634,6 +643,197 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, } } +/* + * Allocate a new TCGLabelQemuLdst entry. + */ + +__attribute__((unused)) +static TCGLabelQemuLdst *new_ldst_label(TCGContext *s) +{ + TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l)); + + memset(l, 0, sizeof(*l)); + QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next); + + return l; +} + +/* + * Allocate new constant pool entries. + */ + +typedef struct TCGLabelPoolData { + struct TCGLabelPoolData *next; + tcg_insn_unit *label; + intptr_t addend; + int rtype; + unsigned nlong; + tcg_target_ulong data[]; +} TCGLabelPoolData; + +static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype, + tcg_insn_unit *label, intptr_t addend) +{ + TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData) + + sizeof(tcg_target_ulong) * nlong); + + n->label = label; + n->addend = addend; + n->rtype = rtype; + n->nlong = nlong; + return n; +} + +static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n) +{ + TCGLabelPoolData *i, **pp; + int nlong = n->nlong; + + /* Insertion sort on the pool. */ + for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) { + if (nlong > i->nlong) { + break; + } + if (nlong < i->nlong) { + continue; + } + if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) { + break; + } + } + n->next = *pp; + *pp = n; +} + +/* The "usual" for generic integer code. */ +__attribute__((unused)) +static void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype, + tcg_insn_unit *label, intptr_t addend) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend); + n->data[0] = d; + new_pool_insert(s, n); +} + +/* For v64 or v128, depending on the host. */ +__attribute__((unused)) +static void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + new_pool_insert(s, n); +} + +/* For v128 or v256, depending on the host. */ +__attribute__((unused)) +static void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1, tcg_target_ulong d2, + tcg_target_ulong d3) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + n->data[2] = d2; + n->data[3] = d3; + new_pool_insert(s, n); +} + +/* For v256, for 32-bit host. */ +__attribute__((unused)) +static void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label, + intptr_t addend, tcg_target_ulong d0, + tcg_target_ulong d1, tcg_target_ulong d2, + tcg_target_ulong d3, tcg_target_ulong d4, + tcg_target_ulong d5, tcg_target_ulong d6, + tcg_target_ulong d7) +{ + TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend); + n->data[0] = d0; + n->data[1] = d1; + n->data[2] = d2; + n->data[3] = d3; + n->data[4] = d4; + n->data[5] = d5; + n->data[6] = d6; + n->data[7] = d7; + new_pool_insert(s, n); +} + +/* + * Generate TB finalization at the end of block + */ + +static int tcg_out_ldst_finalize(TCGContext *s) +{ + TCGLabelQemuLdst *lb; + + /* qemu_ld/st slow paths */ + QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) { + if (lb->is_ld + ? !tcg_out_qemu_ld_slow_path(s, lb) + : !tcg_out_qemu_st_slow_path(s, lb)) { + return -2; + } + + /* + * Test for (pending) buffer overflow. The assumption is that any + * one operation beginning below the high water mark cannot overrun + * the buffer completely. Thus we can test for overflow after + * generating code without having to check during generation. + */ + if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { + return -1; + } + } + return 0; +} + +static int tcg_out_pool_finalize(TCGContext *s) +{ + TCGLabelPoolData *p = s->pool_labels; + TCGLabelPoolData *l = NULL; + void *a; + + if (p == NULL) { + return 0; + } + + /* + * ??? Round up to qemu_icache_linesize, but then do not round + * again when allocating the next TranslationBlock structure. + */ + a = (void *)ROUND_UP((uintptr_t)s->code_ptr, + sizeof(tcg_target_ulong) * p->nlong); + tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr); + s->data_gen_ptr = a; + + for (; p != NULL; p = p->next) { + size_t size = sizeof(tcg_target_ulong) * p->nlong; + uintptr_t value; + + if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) { + if (unlikely(a > s->code_gen_highwater)) { + return -1; + } + memcpy(a, p->data, size); + a += size; + l = p; + } + + value = (uintptr_t)tcg_splitwx_to_rx(a) - size; + if (!patch_reloc(p->label, p->rtype, value, p->addend)) { + return -2; + } + } + + s->code_ptr = a; + return 0; +} + #define C_PFX1(P, A) P##A #define C_PFX2(P, A, B) P##A##_##B #define C_PFX3(P, A, B, C) P##A##_##B##_##C @@ -664,10 +864,12 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4), typedef enum { + C_Dynamic = -2, + C_NotImplemented = -1, #include "tcg-target-con-set.h" } TCGConstraintSetIndex; -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); +static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode, TCGType, unsigned); #undef C_O0_I1 #undef C_O0_I2 @@ -688,31 +890,35 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); /* Put all of the constraint sets into an array, indexed by the enum. */ -#define C_O0_I1(I1) { .args_ct_str = { #I1 } }, -#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } }, -#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } }, -#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } }, +typedef struct TCGConstraintSet { + uint8_t nb_oargs, nb_iargs; + const char *args_ct_str[TCG_MAX_OP_ARGS]; +} TCGConstraintSet; + +#define C_O0_I1(I1) { 0, 1, { #I1 } }, +#define C_O0_I2(I1, I2) { 0, 2, { #I1, #I2 } }, +#define C_O0_I3(I1, I2, I3) { 0, 3, { #I1, #I2, #I3 } }, +#define C_O0_I4(I1, I2, I3, I4) { 0, 4, { #I1, #I2, #I3, #I4 } }, -#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } }, -#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } }, -#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } }, -#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } }, +#define C_O1_I1(O1, I1) { 1, 1, { #O1, #I1 } }, +#define C_O1_I2(O1, I1, I2) { 1, 2, { #O1, #I1, #I2 } }, +#define C_O1_I3(O1, I1, I2, I3) { 1, 3, { #O1, #I1, #I2, #I3 } }, +#define C_O1_I4(O1, I1, I2, I3, I4) { 1, 4, { #O1, #I1, #I2, #I3, #I4 } }, -#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } }, -#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } }, -#define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } }, +#define C_N1_I2(O1, I1, I2) { 1, 2, { "&" #O1, #I1, #I2 } }, +#define C_N1O1_I1(O1, O2, I1) { 2, 1, { "&" #O1, #O2, #I1 } }, +#define C_N2_I1(O1, O2, I1) { 2, 1, { "&" #O1, "&" #O2, #I1 } }, -#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } }, -#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } }, -#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } }, -#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } }, -#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } }, +#define C_O2_I1(O1, O2, I1) { 2, 1, { #O1, #O2, #I1 } }, +#define C_O2_I2(O1, O2, I1, I2) { 2, 2, { #O1, #O2, #I1, #I2 } }, +#define C_O2_I3(O1, O2, I1, I2, I3) { 2, 3, { #O1, #O2, #I1, #I2, #I3 } }, +#define C_O2_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { #O1, #O2, #I1, #I2, #I3, #I4 } }, +#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { "&" #O1, #O2, #I1, #I2, #I3, #I4 } }, -static const TCGTargetOpDef constraint_sets[] = { +static const TCGConstraintSet constraint_sets[] = { #include "tcg-target-con-set.h" }; - #undef C_O0_I1 #undef C_O0_I2 #undef C_O0_I3 @@ -752,6 +958,164 @@ static const TCGTargetOpDef constraint_sets[] = { #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4) #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4) +/* + * TCGOutOp is the base class for a set of structures that describe how + * to generate code for a given TCGOpcode. + * + * @static_constraint: + * C_NotImplemented: The TCGOpcode is not supported by the backend. + * C_Dynamic: Use @dynamic_constraint to select a constraint set + * based on any of @type, @flags, or host isa. + * Otherwise: The register allocation constrains for the TCGOpcode. + * + * Subclasses of TCGOutOp will define a set of output routines that may + * be used. Such routines will often be selected by the set of registers + * and constants that come out of register allocation. The set of + * routines that are provided will guide the set of constraints that are + * legal. In particular, assume that tcg_optimize() has done its job in + * swapping commutative operands and folding operations for which all + * operands are constant. + */ +typedef struct TCGOutOp { + TCGConstraintSetIndex static_constraint; + TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags); +} TCGOutOp; + +typedef struct TCGOutOpAddSubCarry { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2); + void (*out_rri)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2); + void (*out_rir)(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2); + void (*out_rii)(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, tcg_target_long a2); +} TCGOutOpAddSubCarry; + +typedef struct TCGOutOpBinary { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2); + void (*out_rri)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, tcg_target_long a2); +} TCGOutOpBinary; + +typedef struct TCGOutOpBrcond { + TCGOutOp base; + void (*out_rr)(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a1, TCGReg a2, TCGLabel *label); + void (*out_ri)(TCGContext *s, TCGType type, TCGCond cond, + TCGReg a1, tcg_target_long a2, TCGLabel *label); +} TCGOutOpBrcond; + +typedef struct TCGOutOpBrcond2 { + TCGOutOp base; + void (*out)(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh, TCGLabel *l); +} TCGOutOpBrcond2; + +typedef struct TCGOutOpBswap { + TCGOutOp base; + void (*out_rr)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags); +} TCGOutOpBswap; + +typedef struct TCGOutOpDeposit { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len); + void (*out_rri)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + tcg_target_long a2, unsigned ofs, unsigned len); + void (*out_rzr)(TCGContext *s, TCGType type, TCGReg a0, + TCGReg a2, unsigned ofs, unsigned len); +} TCGOutOpDeposit; + +typedef struct TCGOutOpDivRem { + TCGOutOp base; + void (*out_rr01r)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a4); +} TCGOutOpDivRem; + +typedef struct TCGOutOpExtract { + TCGOutOp base; + void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + unsigned ofs, unsigned len); +} TCGOutOpExtract; + +typedef struct TCGOutOpExtract2 { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned shr); +} TCGOutOpExtract2; + +typedef struct TCGOutOpLoad { + TCGOutOp base; + void (*out)(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, intptr_t offset); +} TCGOutOpLoad; + +typedef struct TCGOutOpMovcond { + TCGOutOp base; + void (*out)(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf); +} TCGOutOpMovcond; + +typedef struct TCGOutOpMul2 { + TCGOutOp base; + void (*out_rrrr)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3); +} TCGOutOpMul2; + +typedef struct TCGOutOpQemuLdSt { + TCGOutOp base; + void (*out)(TCGContext *s, TCGType type, TCGReg dest, + TCGReg addr, MemOpIdx oi); +} TCGOutOpQemuLdSt; + +typedef struct TCGOutOpQemuLdSt2 { + TCGOutOp base; + void (*out)(TCGContext *s, TCGType type, TCGReg dlo, TCGReg dhi, + TCGReg addr, MemOpIdx oi); +} TCGOutOpQemuLdSt2; + +typedef struct TCGOutOpUnary { + TCGOutOp base; + void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1); +} TCGOutOpUnary; + +typedef struct TCGOutOpSetcond { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg a1, TCGReg a2); + void (*out_rri)(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg a1, tcg_target_long a2); +} TCGOutOpSetcond; + +typedef struct TCGOutOpSetcond2 { + TCGOutOp base; + void (*out)(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, TCGArg bh, bool const_bh); +} TCGOutOpSetcond2; + +typedef struct TCGOutOpStore { + TCGOutOp base; + void (*out_r)(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t offset); + void (*out_i)(TCGContext *s, TCGType type, tcg_target_long data, + TCGReg base, intptr_t offset); +} TCGOutOpStore; + +typedef struct TCGOutOpSubtract { + TCGOutOp base; + void (*out_rrr)(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2); + void (*out_rir)(TCGContext *s, TCGType type, + TCGReg a0, tcg_target_long a1, TCGReg a2); +} TCGOutOpSubtract; + #include "tcg-target.c.inc" #ifndef CONFIG_TCG_INTERPRETER @@ -761,6 +1125,144 @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) - < MIN_TLB_MASK_TABLE_OFS); #endif +#if TCG_TARGET_REG_BITS == 64 +/* + * We require these functions for slow-path function calls. + * Adapt them generically for opcode output. + */ + +static void tgen_exts_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_exts_i32_i64(s, a0, a1); +} + +static const TCGOutOpUnary outop_exts_i32_i64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_exts_i32_i64, +}; + +static void tgen_extu_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_extu_i32_i64(s, a0, a1); +} + +static const TCGOutOpUnary outop_extu_i32_i64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extu_i32_i64, +}; + +static void tgen_extrl_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_extrl_i64_i32(s, a0, a1); +} + +static const TCGOutOpUnary outop_extrl_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = TCG_TARGET_HAS_extr_i64_i32 ? tgen_extrl_i64_i32 : NULL, +}; +#endif + +static const TCGOutOp outop_goto_ptr = { + .static_constraint = C_O0_I1(r), +}; + +static const TCGOutOpLoad outop_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tcg_out_ld, +}; + +/* + * Register V as the TCGOutOp for O. + * This verifies that V is of type T, otherwise give a nice compiler error. + * This prevents trivial mistakes within each arch/tcg-target.c.inc. + */ +#define OUTOP(O, T, V) [O] = _Generic(V, T: &V.base) + +/* Register allocation descriptions for every TCGOpcode. */ +static const TCGOutOp * const all_outop[NB_OPS] = { + OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add), + OUTOP(INDEX_op_addci, TCGOutOpAddSubCarry, outop_addci), + OUTOP(INDEX_op_addcio, TCGOutOpBinary, outop_addcio), + OUTOP(INDEX_op_addco, TCGOutOpBinary, outop_addco), + /* addc1o is implemented with set_carry + addcio */ + OUTOP(INDEX_op_addc1o, TCGOutOpBinary, outop_addcio), + OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and), + OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc), + OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond), + OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16), + OUTOP(INDEX_op_bswap32, TCGOutOpBswap, outop_bswap32), + OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz), + OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop), + OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz), + OUTOP(INDEX_op_deposit, TCGOutOpDeposit, outop_deposit), + OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs), + OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu), + OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2), + OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2), + OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv), + OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract), + OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2), + OUTOP(INDEX_op_ld8u, TCGOutOpLoad, outop_ld8u), + OUTOP(INDEX_op_ld8s, TCGOutOpLoad, outop_ld8s), + OUTOP(INDEX_op_ld16u, TCGOutOpLoad, outop_ld16u), + OUTOP(INDEX_op_ld16s, TCGOutOpLoad, outop_ld16s), + OUTOP(INDEX_op_ld, TCGOutOpLoad, outop_ld), + OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond), + OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul), + OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2), + OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh), + OUTOP(INDEX_op_mulu2, TCGOutOpMul2, outop_mulu2), + OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh), + OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand), + OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg), + OUTOP(INDEX_op_negsetcond, TCGOutOpSetcond, outop_negsetcond), + OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor), + OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not), + OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or), + OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc), + OUTOP(INDEX_op_qemu_ld, TCGOutOpQemuLdSt, outop_qemu_ld), + OUTOP(INDEX_op_qemu_ld2, TCGOutOpQemuLdSt2, outop_qemu_ld2), + OUTOP(INDEX_op_qemu_st, TCGOutOpQemuLdSt, outop_qemu_st), + OUTOP(INDEX_op_qemu_st2, TCGOutOpQemuLdSt2, outop_qemu_st2), + OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems), + OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu), + OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl), + OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr), + OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar), + OUTOP(INDEX_op_setcond, TCGOutOpSetcond, outop_setcond), + OUTOP(INDEX_op_sextract, TCGOutOpExtract, outop_sextract), + OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl), + OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr), + OUTOP(INDEX_op_st, TCGOutOpStore, outop_st), + OUTOP(INDEX_op_st8, TCGOutOpStore, outop_st8), + OUTOP(INDEX_op_st16, TCGOutOpStore, outop_st16), + OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub), + OUTOP(INDEX_op_subbi, TCGOutOpAddSubCarry, outop_subbi), + OUTOP(INDEX_op_subbio, TCGOutOpAddSubCarry, outop_subbio), + OUTOP(INDEX_op_subbo, TCGOutOpAddSubCarry, outop_subbo), + /* subb1o is implemented with set_borrow + subbio */ + OUTOP(INDEX_op_subb1o, TCGOutOpAddSubCarry, outop_subbio), + OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor), + + [INDEX_op_goto_ptr] = &outop_goto_ptr, + +#if TCG_TARGET_REG_BITS == 32 + OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2), + OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2), +#else + OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64), + OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64), + OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64), + OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32), + OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32), + OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u), + OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s), + OUTOP(INDEX_op_st32, TCGOutOpStore, outop_st), +#endif +}; + +#undef OUTOP + /* * All TCG threads except the parent (i.e. the one that called tcg_context_init * and registered the target's TCG globals) must register with this function @@ -829,8 +1331,9 @@ void *tcg_malloc_internal(TCGContext *s, int size) p = s->pool_current; if (!p) { p = s->pool_first; - if (!p) + if (!p) { goto new_pool; + } } else { if (!p->next) { new_pool: @@ -849,8 +1352,8 @@ void *tcg_malloc_internal(TCGContext *s, int size) } } s->pool_current = p; - s->pool_cur = p->data + size; - s->pool_end = p->data + p->size; + s->pool_cur = (uintptr_t)p->data + size; + s->pool_end = (uintptr_t)p->data + p->size; return p->data; } @@ -862,7 +1365,7 @@ void tcg_pool_reset(TCGContext *s) g_free(p); } s->pool_first_large = NULL; - s->pool_cur = s->pool_end = NULL; + s->pool_cur = s->pool_end = 0; s->pool_current = NULL; } @@ -1293,39 +1796,19 @@ static void init_call_layout(TCGHelperInfo *info) } static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)]; -static void process_op_defs(TCGContext *s); +static void process_constraint_sets(void); static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, TCGReg reg, const char *name); -static void tcg_context_init(unsigned max_cpus) +static void tcg_context_init(unsigned max_threads) { TCGContext *s = &tcg_init_ctx; - int op, total_args, n, i; - TCGOpDef *def; - TCGArgConstraint *args_ct; + int n, i; TCGTemp *ts; memset(s, 0, sizeof(*s)); s->nb_globals = 0; - /* Count total number of arguments and allocate the corresponding - space */ - total_args = 0; - for(op = 0; op < NB_OPS; op++) { - def = &tcg_op_defs[op]; - n = def->nb_iargs + def->nb_oargs; - total_args += n; - } - - args_ct = g_new0(TCGArgConstraint, total_args); - - for(op = 0; op < NB_OPS; op++) { - def = &tcg_op_defs[op]; - def->args_ct = args_ct; - n = def->nb_iargs + def->nb_oargs; - args_ct += n; - } - init_call_layout(&info_helper_ld32_mmu); init_call_layout(&info_helper_ld64_mmu); init_call_layout(&info_helper_ld128_mmu); @@ -1334,7 +1817,7 @@ static void tcg_context_init(unsigned max_cpus) init_call_layout(&info_helper_st128_mmu); tcg_target_init(s); - process_op_defs(s); + process_constraint_sets(); /* Reverse the order of the saved registers, assuming they're all at the start of tcg_target_reg_alloc_order. */ @@ -1356,15 +1839,15 @@ static void tcg_context_init(unsigned max_cpus) * In user-mode we simply share the init context among threads, since we * use a single region. See the documentation tcg_region_init() for the * reasoning behind this. - * In system-mode we will have at most max_cpus TCG threads. + * In system-mode we will have at most max_threads TCG threads. */ #ifdef CONFIG_USER_ONLY tcg_ctxs = &tcg_ctx; tcg_cur_ctxs = 1; tcg_max_ctxs = 1; #else - tcg_max_ctxs = max_cpus; - tcg_ctxs = g_new0(TCGContext *, max_cpus); + tcg_max_ctxs = max_threads; + tcg_ctxs = g_new0(TCGContext *, max_threads); #endif tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0)); @@ -1372,10 +1855,10 @@ static void tcg_context_init(unsigned max_cpus) tcg_env = temp_tcgv_ptr(ts); } -void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus) +void tcg_init(size_t tb_size, int splitwx, unsigned max_threads) { - tcg_context_init(max_cpus); - tcg_region_init(tb_size, splitwx, max_cpus); + tcg_context_init(max_threads); + tcg_region_init(tb_size, splitwx, max_threads); } /* @@ -1399,7 +1882,6 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s) goto retry; } qatomic_set(&s->code_gen_ptr, next); - s->data_gen_ptr = NULL; return tb; } @@ -1416,21 +1898,17 @@ void tcg_prologue_init(void) tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr); #endif -#ifdef TCG_TARGET_NEED_POOL_LABELS s->pool_labels = NULL; -#endif qemu_thread_jit_write(); /* Generate the prologue. */ tcg_target_qemu_prologue(s); -#ifdef TCG_TARGET_NEED_POOL_LABELS /* Allow the prologue to put e.g. guest_base into a pool entry. */ { int result = tcg_out_pool_finalize(s); tcg_debug_assert(result == 0); } -#endif prologue_size = tcg_current_code_size(s); perf_report_prologue(s->code_gen_ptr, prologue_size); @@ -1490,7 +1968,7 @@ void tcg_func_start(TCGContext *s) s->nb_temps = s->nb_globals; /* No temps have been previously allocated for size or locality. */ - memset(s->free_temps, 0, sizeof(s->free_temps)); + tcg_temp_ebb_reset_freed(s); /* No constant temps have been previously allocated. */ for (int i = 0; i < TCG_TYPE_COUNT; ++i) { @@ -1512,10 +1990,7 @@ void tcg_func_start(TCGContext *s) s->emit_before_op = NULL; QSIMPLEQ_INIT(&s->labels); - tcg_debug_assert(s->addr_type == TCG_TYPE_I32 || - s->addr_type == TCG_TYPE_I64); - - tcg_debug_assert(s->insn_start_words > 0); + tcg_debug_assert(s->addr_type <= TCG_TYPE_REG); } static TCGTemp *tcg_temp_alloc(TCGContext *s) @@ -1894,6 +2369,11 @@ TCGv_i64 tcg_constant_i64(int64_t val) return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val)); } +TCGv_vaddr tcg_constant_vaddr(uintptr_t val) +{ + return temp_tcgv_vaddr(tcg_constant_internal(TCG_TYPE_PTR, val)); +} + TCGv_ptr tcg_constant_ptr_int(intptr_t val) { return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR, val)); @@ -1932,12 +2412,34 @@ TCGTemp *tcgv_i32_temp(TCGv_i32 v) } #endif /* CONFIG_DEBUG_TCG */ -/* Return true if OP may appear in the opcode stream. - Test the runtime variable that controls each opcode. */ -bool tcg_op_supported(TCGOpcode op) +/* + * Return true if OP may appear in the opcode stream with TYPE. + * Test the runtime variable that controls each opcode. + */ +bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags) { - const bool have_vec - = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256; + bool has_type; + + switch (type) { + case TCG_TYPE_I32: + has_type = true; + break; + case TCG_TYPE_I64: + has_type = TCG_TARGET_REG_BITS == 64; + break; + case TCG_TYPE_V64: + has_type = TCG_TARGET_HAS_v64; + break; + case TCG_TYPE_V128: + has_type = TCG_TARGET_HAS_v128; + break; + case TCG_TYPE_V256: + has_type = TCG_TARGET_HAS_v256; + break; + default: + has_type = false; + break; + } switch (op) { case INDEX_op_discard: @@ -1949,221 +2451,56 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_exit_tb: case INDEX_op_goto_tb: case INDEX_op_goto_ptr: - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: return true; - case INDEX_op_qemu_st8_a32_i32: - case INDEX_op_qemu_st8_a64_i32: - return TCG_TARGET_HAS_qemu_st8_i32; - - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: - return TCG_TARGET_HAS_qemu_ldst_i128; - - case INDEX_op_mov_i32: - case INDEX_op_setcond_i32: - case INDEX_op_brcond_i32: - case INDEX_op_movcond_i32: - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_add_i32: - case INDEX_op_sub_i32: - case INDEX_op_neg_i32: - case INDEX_op_mul_i32: - case INDEX_op_and_i32: - case INDEX_op_or_i32: - case INDEX_op_xor_i32: - case INDEX_op_shl_i32: - case INDEX_op_shr_i32: - case INDEX_op_sar_i32: + case INDEX_op_qemu_ld: + case INDEX_op_qemu_st: + tcg_debug_assert(type <= TCG_TYPE_REG); return true; - case INDEX_op_negsetcond_i32: - return TCG_TARGET_HAS_negsetcond_i32; - case INDEX_op_div_i32: - case INDEX_op_divu_i32: - return TCG_TARGET_HAS_div_i32; - case INDEX_op_rem_i32: - case INDEX_op_remu_i32: - return TCG_TARGET_HAS_rem_i32; - case INDEX_op_div2_i32: - case INDEX_op_divu2_i32: - return TCG_TARGET_HAS_div2_i32; - case INDEX_op_rotl_i32: - case INDEX_op_rotr_i32: - return TCG_TARGET_HAS_rot_i32; - case INDEX_op_deposit_i32: - return TCG_TARGET_HAS_deposit_i32; - case INDEX_op_extract_i32: - return TCG_TARGET_HAS_extract_i32; - case INDEX_op_sextract_i32: - return TCG_TARGET_HAS_sextract_i32; - case INDEX_op_extract2_i32: - return TCG_TARGET_HAS_extract2_i32; - case INDEX_op_add2_i32: - return TCG_TARGET_HAS_add2_i32; - case INDEX_op_sub2_i32: - return TCG_TARGET_HAS_sub2_i32; - case INDEX_op_mulu2_i32: - return TCG_TARGET_HAS_mulu2_i32; - case INDEX_op_muls2_i32: - return TCG_TARGET_HAS_muls2_i32; - case INDEX_op_muluh_i32: - return TCG_TARGET_HAS_muluh_i32; - case INDEX_op_mulsh_i32: - return TCG_TARGET_HAS_mulsh_i32; - case INDEX_op_ext8s_i32: - return TCG_TARGET_HAS_ext8s_i32; - case INDEX_op_ext16s_i32: - return TCG_TARGET_HAS_ext16s_i32; - case INDEX_op_ext8u_i32: - return TCG_TARGET_HAS_ext8u_i32; - case INDEX_op_ext16u_i32: - return TCG_TARGET_HAS_ext16u_i32; - case INDEX_op_bswap16_i32: - return TCG_TARGET_HAS_bswap16_i32; - case INDEX_op_bswap32_i32: - return TCG_TARGET_HAS_bswap32_i32; - case INDEX_op_not_i32: - return TCG_TARGET_HAS_not_i32; - case INDEX_op_andc_i32: - return TCG_TARGET_HAS_andc_i32; - case INDEX_op_orc_i32: - return TCG_TARGET_HAS_orc_i32; - case INDEX_op_eqv_i32: - return TCG_TARGET_HAS_eqv_i32; - case INDEX_op_nand_i32: - return TCG_TARGET_HAS_nand_i32; - case INDEX_op_nor_i32: - return TCG_TARGET_HAS_nor_i32; - case INDEX_op_clz_i32: - return TCG_TARGET_HAS_clz_i32; - case INDEX_op_ctz_i32: - return TCG_TARGET_HAS_ctz_i32; - case INDEX_op_ctpop_i32: - return TCG_TARGET_HAS_ctpop_i32; + case INDEX_op_qemu_ld2: + case INDEX_op_qemu_st2: + if (TCG_TARGET_REG_BITS == 32) { + tcg_debug_assert(type == TCG_TYPE_I64); + return true; + } + tcg_debug_assert(type == TCG_TYPE_I128); + goto do_lookup; + + case INDEX_op_add: + case INDEX_op_and: + case INDEX_op_brcond: + case INDEX_op_deposit: + case INDEX_op_extract: + case INDEX_op_ld8u: + case INDEX_op_ld8s: + case INDEX_op_ld16u: + case INDEX_op_ld16s: + case INDEX_op_ld: + case INDEX_op_mov: + case INDEX_op_movcond: + case INDEX_op_negsetcond: + case INDEX_op_or: + case INDEX_op_setcond: + case INDEX_op_sextract: + case INDEX_op_st8: + case INDEX_op_st16: + case INDEX_op_st: + case INDEX_op_xor: + return has_type; case INDEX_op_brcond2_i32: case INDEX_op_setcond2_i32: return TCG_TARGET_REG_BITS == 32; - case INDEX_op_mov_i64: - case INDEX_op_setcond_i64: - case INDEX_op_brcond_i64: - case INDEX_op_movcond_i64: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - case INDEX_op_add_i64: - case INDEX_op_sub_i64: - case INDEX_op_neg_i64: - case INDEX_op_mul_i64: - case INDEX_op_and_i64: - case INDEX_op_or_i64: - case INDEX_op_xor_i64: - case INDEX_op_shl_i64: - case INDEX_op_shr_i64: - case INDEX_op_sar_i64: + case INDEX_op_ld32u: + case INDEX_op_ld32s: + case INDEX_op_st32: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: - return TCG_TARGET_REG_BITS == 64; - - case INDEX_op_negsetcond_i64: - return TCG_TARGET_HAS_negsetcond_i64; - case INDEX_op_div_i64: - case INDEX_op_divu_i64: - return TCG_TARGET_HAS_div_i64; - case INDEX_op_rem_i64: - case INDEX_op_remu_i64: - return TCG_TARGET_HAS_rem_i64; - case INDEX_op_div2_i64: - case INDEX_op_divu2_i64: - return TCG_TARGET_HAS_div2_i64; - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i64: - return TCG_TARGET_HAS_rot_i64; - case INDEX_op_deposit_i64: - return TCG_TARGET_HAS_deposit_i64; - case INDEX_op_extract_i64: - return TCG_TARGET_HAS_extract_i64; - case INDEX_op_sextract_i64: - return TCG_TARGET_HAS_sextract_i64; - case INDEX_op_extract2_i64: - return TCG_TARGET_HAS_extract2_i64; case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: - return TCG_TARGET_HAS_extr_i64_i32; - case INDEX_op_ext8s_i64: - return TCG_TARGET_HAS_ext8s_i64; - case INDEX_op_ext16s_i64: - return TCG_TARGET_HAS_ext16s_i64; - case INDEX_op_ext32s_i64: - return TCG_TARGET_HAS_ext32s_i64; - case INDEX_op_ext8u_i64: - return TCG_TARGET_HAS_ext8u_i64; - case INDEX_op_ext16u_i64: - return TCG_TARGET_HAS_ext16u_i64; - case INDEX_op_ext32u_i64: - return TCG_TARGET_HAS_ext32u_i64; - case INDEX_op_bswap16_i64: - return TCG_TARGET_HAS_bswap16_i64; - case INDEX_op_bswap32_i64: - return TCG_TARGET_HAS_bswap32_i64; - case INDEX_op_bswap64_i64: - return TCG_TARGET_HAS_bswap64_i64; - case INDEX_op_not_i64: - return TCG_TARGET_HAS_not_i64; - case INDEX_op_andc_i64: - return TCG_TARGET_HAS_andc_i64; - case INDEX_op_orc_i64: - return TCG_TARGET_HAS_orc_i64; - case INDEX_op_eqv_i64: - return TCG_TARGET_HAS_eqv_i64; - case INDEX_op_nand_i64: - return TCG_TARGET_HAS_nand_i64; - case INDEX_op_nor_i64: - return TCG_TARGET_HAS_nor_i64; - case INDEX_op_clz_i64: - return TCG_TARGET_HAS_clz_i64; - case INDEX_op_ctz_i64: - return TCG_TARGET_HAS_ctz_i64; - case INDEX_op_ctpop_i64: - return TCG_TARGET_HAS_ctpop_i64; - case INDEX_op_add2_i64: - return TCG_TARGET_HAS_add2_i64; - case INDEX_op_sub2_i64: - return TCG_TARGET_HAS_sub2_i64; - case INDEX_op_mulu2_i64: - return TCG_TARGET_HAS_mulu2_i64; - case INDEX_op_muls2_i64: - return TCG_TARGET_HAS_muls2_i64; - case INDEX_op_muluh_i64: - return TCG_TARGET_HAS_muluh_i64; - case INDEX_op_mulsh_i64: - return TCG_TARGET_HAS_mulsh_i64; + return TCG_TARGET_REG_BITS == 64; case INDEX_op_mov_vec: case INDEX_op_dup_vec: @@ -2176,67 +2513,106 @@ bool tcg_op_supported(TCGOpcode op) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_cmp_vec: - return have_vec; + return has_type; case INDEX_op_dup2_vec: - return have_vec && TCG_TARGET_REG_BITS == 32; + return has_type && TCG_TARGET_REG_BITS == 32; case INDEX_op_not_vec: - return have_vec && TCG_TARGET_HAS_not_vec; + return has_type && TCG_TARGET_HAS_not_vec; case INDEX_op_neg_vec: - return have_vec && TCG_TARGET_HAS_neg_vec; + return has_type && TCG_TARGET_HAS_neg_vec; case INDEX_op_abs_vec: - return have_vec && TCG_TARGET_HAS_abs_vec; + return has_type && TCG_TARGET_HAS_abs_vec; case INDEX_op_andc_vec: - return have_vec && TCG_TARGET_HAS_andc_vec; + return has_type && TCG_TARGET_HAS_andc_vec; case INDEX_op_orc_vec: - return have_vec && TCG_TARGET_HAS_orc_vec; + return has_type && TCG_TARGET_HAS_orc_vec; case INDEX_op_nand_vec: - return have_vec && TCG_TARGET_HAS_nand_vec; + return has_type && TCG_TARGET_HAS_nand_vec; case INDEX_op_nor_vec: - return have_vec && TCG_TARGET_HAS_nor_vec; + return has_type && TCG_TARGET_HAS_nor_vec; case INDEX_op_eqv_vec: - return have_vec && TCG_TARGET_HAS_eqv_vec; + return has_type && TCG_TARGET_HAS_eqv_vec; case INDEX_op_mul_vec: - return have_vec && TCG_TARGET_HAS_mul_vec; + return has_type && TCG_TARGET_HAS_mul_vec; case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: - return have_vec && TCG_TARGET_HAS_shi_vec; + return has_type && TCG_TARGET_HAS_shi_vec; case INDEX_op_shls_vec: case INDEX_op_shrs_vec: case INDEX_op_sars_vec: - return have_vec && TCG_TARGET_HAS_shs_vec; + return has_type && TCG_TARGET_HAS_shs_vec; case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: - return have_vec && TCG_TARGET_HAS_shv_vec; + return has_type && TCG_TARGET_HAS_shv_vec; case INDEX_op_rotli_vec: - return have_vec && TCG_TARGET_HAS_roti_vec; + return has_type && TCG_TARGET_HAS_roti_vec; case INDEX_op_rotls_vec: - return have_vec && TCG_TARGET_HAS_rots_vec; + return has_type && TCG_TARGET_HAS_rots_vec; case INDEX_op_rotlv_vec: case INDEX_op_rotrv_vec: - return have_vec && TCG_TARGET_HAS_rotv_vec; + return has_type && TCG_TARGET_HAS_rotv_vec; case INDEX_op_ssadd_vec: case INDEX_op_usadd_vec: case INDEX_op_sssub_vec: case INDEX_op_ussub_vec: - return have_vec && TCG_TARGET_HAS_sat_vec; + return has_type && TCG_TARGET_HAS_sat_vec; case INDEX_op_smin_vec: case INDEX_op_umin_vec: case INDEX_op_smax_vec: case INDEX_op_umax_vec: - return have_vec && TCG_TARGET_HAS_minmax_vec; + return has_type && TCG_TARGET_HAS_minmax_vec; case INDEX_op_bitsel_vec: - return have_vec && TCG_TARGET_HAS_bitsel_vec; + return has_type && TCG_TARGET_HAS_bitsel_vec; case INDEX_op_cmpsel_vec: - return have_vec && TCG_TARGET_HAS_cmpsel_vec; + return has_type && TCG_TARGET_HAS_cmpsel_vec; default: - tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS); + if (op < INDEX_op_last_generic) { + const TCGOutOp *outop; + TCGConstraintSetIndex con_set; + + if (!has_type) { + return false; + } + + do_lookup: + outop = all_outop[op]; + tcg_debug_assert(outop != NULL); + + con_set = outop->static_constraint; + if (con_set == C_Dynamic) { + con_set = outop->dynamic_constraint(type, flags); + } + if (con_set >= 0) { + return true; + } + tcg_debug_assert(con_set == C_NotImplemented); + return false; + } + tcg_debug_assert(op < NB_OPS); return true; + + case INDEX_op_last_generic: + g_assert_not_reached(); } } +bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len) +{ + unsigned width; + + tcg_debug_assert(type == TCG_TYPE_I32 || type == TCG_TYPE_I64); + width = (type == TCG_TYPE_I32 ? 32 : 64); + + tcg_debug_assert(ofs < width); + tcg_debug_assert(len > 0); + tcg_debug_assert(len <= width - ofs); + + return TCG_TARGET_deposit_valid(type, ofs, len); +} + static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs); static void tcg_gen_callN(void *func, TCGHelperInfo *info, @@ -2573,7 +2949,7 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) nb_oargs = 0; col += ne_fprintf(f, "\n ----"); - for (i = 0, k = s->insn_start_words; i < k; ++i) { + for (i = 0, k = INSN_START_WORDS; i < k; ++i) { col += ne_fprintf(f, " %016" PRIx64, tcg_get_insn_start_param(op, i)); } @@ -2610,17 +2986,23 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) col += ne_fprintf(f, ",%s", t); } } else { - col += ne_fprintf(f, " %s ", def->name); + if (def->flags & TCG_OPF_INT) { + col += ne_fprintf(f, " %s_i%d ", + def->name, + 8 * tcg_type_size(TCGOP_TYPE(op))); + } else if (def->flags & TCG_OPF_VECTOR) { + col += ne_fprintf(f, "%s v%d,e%d,", + def->name, + 8 * tcg_type_size(TCGOP_TYPE(op)), + 8 << TCGOP_VECE(op)); + } else { + col += ne_fprintf(f, " %s ", def->name); + } nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; nb_cargs = def->nb_cargs; - if (def->flags & TCG_OPF_VECTOR) { - col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op), - 8 << TCGOP_VECE(op)); - } - k = 0; for (i = 0; i < nb_oargs; i++) { const char *sep = k ? "," : ""; @@ -2635,16 +3017,12 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) op->args[k++])); } switch (c) { - case INDEX_op_brcond_i32: - case INDEX_op_setcond_i32: - case INDEX_op_negsetcond_i32: - case INDEX_op_movcond_i32: + case INDEX_op_brcond: + case INDEX_op_setcond: + case INDEX_op_negsetcond: + case INDEX_op_movcond: case INDEX_op_brcond2_i32: case INDEX_op_setcond2_i32: - case INDEX_op_brcond_i64: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i64: - case INDEX_op_movcond_i64: case INDEX_op_cmp_vec: case INDEX_op_cmpsel_vec: if (op->args[k] < ARRAY_SIZE(cond_name) @@ -2655,20 +3033,10 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) } i = 1; break; - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_st_a32_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_st8_a32_i32: - case INDEX_op_qemu_st8_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_st_a64_i64: - case INDEX_op_qemu_ld_a32_i128: - case INDEX_op_qemu_ld_a64_i128: - case INDEX_op_qemu_st_a32_i128: - case INDEX_op_qemu_st_a64_i128: + case INDEX_op_qemu_ld: + case INDEX_op_qemu_st: + case INDEX_op_qemu_ld2: + case INDEX_op_qemu_st2: { const char *s_al, *s_op, *s_at; MemOpIdx oi = op->args[k++]; @@ -2691,11 +3059,9 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) i = 1; } break; - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: + case INDEX_op_bswap16: + case INDEX_op_bswap32: + case INDEX_op_bswap64: { TCGArg flags = op->args[k]; const char *name = NULL; @@ -2736,8 +3102,7 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) switch (c) { case INDEX_op_set_label: case INDEX_op_br: - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: + case INDEX_op_brcond: case INDEX_op_brcond2_i32: col += ne_fprintf(f, "%s$L%d", k ? "," : "", arg_label(op->args[k])->id); @@ -2890,10 +3255,12 @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) } /* we give more priority to constraints with less registers */ -static int get_constraint_priority(const TCGOpDef *def, int k) +static int get_constraint_priority(const TCGArgConstraint *arg_ct, int k) { - const TCGArgConstraint *arg_ct = &def->args_ct[k]; - int n = ctpop64(arg_ct->regs); + int n; + + arg_ct += k; + n = ctpop64(arg_ct->regs); /* * Sort constraints of a single register first, which includes output @@ -2922,10 +3289,9 @@ static int get_constraint_priority(const TCGOpDef *def, int k) } /* sort from highest priority to lowest */ -static void sort_constraints(TCGOpDef *def, int start, int n) +static void sort_constraints(TCGArgConstraint *a, int start, int n) { int i, j; - TCGArgConstraint *a = def->args_ct; for (i = 0; i < n; i++) { a[start + i].sort_index = start + i; @@ -2935,8 +3301,8 @@ static void sort_constraints(TCGOpDef *def, int start, int n) } for (i = 0; i < n - 1; i++) { for (j = i + 1; j < n; j++) { - int p1 = get_constraint_priority(def, a[start + i].sort_index); - int p2 = get_constraint_priority(def, a[start + j].sort_index); + int p1 = get_constraint_priority(a, a[start + i].sort_index); + int p2 = get_constraint_priority(a, a[start + j].sort_index); if (p1 < p2) { int tmp = a[start + i].sort_index; a[start + i].sort_index = a[start + j].sort_index; @@ -2946,56 +3312,39 @@ static void sort_constraints(TCGOpDef *def, int start, int n) } } -static void process_op_defs(TCGContext *s) -{ - TCGOpcode op; +static const TCGArgConstraint empty_cts[TCG_MAX_OP_ARGS]; +static TCGArgConstraint all_cts[ARRAY_SIZE(constraint_sets)][TCG_MAX_OP_ARGS]; - for (op = 0; op < NB_OPS; op++) { - TCGOpDef *def = &tcg_op_defs[op]; - const TCGTargetOpDef *tdefs; +static void process_constraint_sets(void) +{ + for (size_t c = 0; c < ARRAY_SIZE(constraint_sets); ++c) { + const TCGConstraintSet *tdefs = &constraint_sets[c]; + TCGArgConstraint *args_ct = all_cts[c]; + int nb_oargs = tdefs->nb_oargs; + int nb_iargs = tdefs->nb_iargs; + int nb_args = nb_oargs + nb_iargs; bool saw_alias_pair = false; - int i, o, i2, o2, nb_args; - - if (def->flags & TCG_OPF_NOT_PRESENT) { - continue; - } - - nb_args = def->nb_iargs + def->nb_oargs; - if (nb_args == 0) { - continue; - } - - /* - * Macro magic should make it impossible, but double-check that - * the array index is in range. Since the signness of an enum - * is implementation defined, force the result to unsigned. - */ - unsigned con_set = tcg_target_op_def(op); - tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets)); - tdefs = &constraint_sets[con_set]; - for (i = 0; i < nb_args; i++) { + for (int i = 0; i < nb_args; i++) { const char *ct_str = tdefs->args_ct_str[i]; - bool input_p = i >= def->nb_oargs; - - /* Incomplete TCGTargetOpDef entry. */ - tcg_debug_assert(ct_str != NULL); + bool input_p = i >= nb_oargs; + int o; switch (*ct_str) { case '0' ... '9': o = *ct_str - '0'; tcg_debug_assert(input_p); - tcg_debug_assert(o < def->nb_oargs); - tcg_debug_assert(def->args_ct[o].regs != 0); - tcg_debug_assert(!def->args_ct[o].oalias); - def->args_ct[i] = def->args_ct[o]; + tcg_debug_assert(o < nb_oargs); + tcg_debug_assert(args_ct[o].regs != 0); + tcg_debug_assert(!args_ct[o].oalias); + args_ct[i] = args_ct[o]; /* The output sets oalias. */ - def->args_ct[o].oalias = 1; - def->args_ct[o].alias_index = i; + args_ct[o].oalias = 1; + args_ct[o].alias_index = i; /* The input sets ialias. */ - def->args_ct[i].ialias = 1; - def->args_ct[i].alias_index = o; - if (def->args_ct[i].pair) { + args_ct[i].ialias = 1; + args_ct[i].alias_index = o; + if (args_ct[i].pair) { saw_alias_pair = true; } tcg_debug_assert(ct_str[1] == '\0'); @@ -3003,41 +3352,41 @@ static void process_op_defs(TCGContext *s) case '&': tcg_debug_assert(!input_p); - def->args_ct[i].newreg = true; + args_ct[i].newreg = true; ct_str++; break; case 'p': /* plus */ /* Allocate to the register after the previous. */ - tcg_debug_assert(i > (input_p ? def->nb_oargs : 0)); + tcg_debug_assert(i > (input_p ? nb_oargs : 0)); o = i - 1; - tcg_debug_assert(!def->args_ct[o].pair); - tcg_debug_assert(!def->args_ct[o].ct); - def->args_ct[i] = (TCGArgConstraint){ + tcg_debug_assert(!args_ct[o].pair); + tcg_debug_assert(!args_ct[o].ct); + args_ct[i] = (TCGArgConstraint){ .pair = 2, .pair_index = o, - .regs = def->args_ct[o].regs << 1, - .newreg = def->args_ct[o].newreg, + .regs = args_ct[o].regs << 1, + .newreg = args_ct[o].newreg, }; - def->args_ct[o].pair = 1; - def->args_ct[o].pair_index = i; + args_ct[o].pair = 1; + args_ct[o].pair_index = i; tcg_debug_assert(ct_str[1] == '\0'); continue; case 'm': /* minus */ /* Allocate to the register before the previous. */ - tcg_debug_assert(i > (input_p ? def->nb_oargs : 0)); + tcg_debug_assert(i > (input_p ? nb_oargs : 0)); o = i - 1; - tcg_debug_assert(!def->args_ct[o].pair); - tcg_debug_assert(!def->args_ct[o].ct); - def->args_ct[i] = (TCGArgConstraint){ + tcg_debug_assert(!args_ct[o].pair); + tcg_debug_assert(!args_ct[o].ct); + args_ct[i] = (TCGArgConstraint){ .pair = 1, .pair_index = o, - .regs = def->args_ct[o].regs >> 1, - .newreg = def->args_ct[o].newreg, + .regs = args_ct[o].regs >> 1, + .newreg = args_ct[o].newreg, }; - def->args_ct[o].pair = 2; - def->args_ct[o].pair_index = i; + args_ct[o].pair = 2; + args_ct[o].pair_index = i; tcg_debug_assert(ct_str[1] == '\0'); continue; } @@ -3045,16 +3394,21 @@ static void process_op_defs(TCGContext *s) do { switch (*ct_str) { case 'i': - def->args_ct[i].ct |= TCG_CT_CONST; + args_ct[i].ct |= TCG_CT_CONST; + break; +#ifdef TCG_REG_ZERO + case 'z': + args_ct[i].ct |= TCG_CT_REG_ZERO; break; +#endif /* Include all of the target-specific constraints. */ #undef CONST #define CONST(CASE, MASK) \ - case CASE: def->args_ct[i].ct |= MASK; break; + case CASE: args_ct[i].ct |= MASK; break; #define REGS(CASE, MASK) \ - case CASE: def->args_ct[i].regs |= MASK; break; + case CASE: args_ct[i].regs |= MASK; break; #include "tcg-target-con-str.h" @@ -3065,15 +3419,12 @@ static void process_op_defs(TCGContext *s) case '&': case 'p': case 'm': - /* Typo in TCGTargetOpDef constraint. */ + /* Typo in TCGConstraintSet constraint. */ g_assert_not_reached(); } } while (*++ct_str != '\0'); } - /* TCGTargetOpDef entry with too much information? */ - tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); - /* * Fix up output pairs that are aliased with inputs. * When we created the alias, we copied pair from the output. @@ -3094,51 +3445,53 @@ static void process_op_defs(TCGContext *s) * first output to pair=3, and the pair_index'es to match. */ if (saw_alias_pair) { - for (i = def->nb_oargs; i < nb_args; i++) { + for (int i = nb_oargs; i < nb_args; i++) { + int o, o2, i2; + /* * Since [0-9pm] must be alone in the constraint string, * the only way they can both be set is if the pair comes * from the output alias. */ - if (!def->args_ct[i].ialias) { + if (!args_ct[i].ialias) { continue; } - switch (def->args_ct[i].pair) { + switch (args_ct[i].pair) { case 0: break; case 1: - o = def->args_ct[i].alias_index; - o2 = def->args_ct[o].pair_index; - tcg_debug_assert(def->args_ct[o].pair == 1); - tcg_debug_assert(def->args_ct[o2].pair == 2); - if (def->args_ct[o2].oalias) { + o = args_ct[i].alias_index; + o2 = args_ct[o].pair_index; + tcg_debug_assert(args_ct[o].pair == 1); + tcg_debug_assert(args_ct[o2].pair == 2); + if (args_ct[o2].oalias) { /* Case 1a */ - i2 = def->args_ct[o2].alias_index; - tcg_debug_assert(def->args_ct[i2].pair == 2); - def->args_ct[i2].pair_index = i; - def->args_ct[i].pair_index = i2; + i2 = args_ct[o2].alias_index; + tcg_debug_assert(args_ct[i2].pair == 2); + args_ct[i2].pair_index = i; + args_ct[i].pair_index = i2; } else { /* Case 1b */ - def->args_ct[i].pair_index = i; + args_ct[i].pair_index = i; } break; case 2: - o = def->args_ct[i].alias_index; - o2 = def->args_ct[o].pair_index; - tcg_debug_assert(def->args_ct[o].pair == 2); - tcg_debug_assert(def->args_ct[o2].pair == 1); - if (def->args_ct[o2].oalias) { + o = args_ct[i].alias_index; + o2 = args_ct[o].pair_index; + tcg_debug_assert(args_ct[o].pair == 2); + tcg_debug_assert(args_ct[o2].pair == 1); + if (args_ct[o2].oalias) { /* Case 1a */ - i2 = def->args_ct[o2].alias_index; - tcg_debug_assert(def->args_ct[i2].pair == 1); - def->args_ct[i2].pair_index = i; - def->args_ct[i].pair_index = i2; + i2 = args_ct[o2].alias_index; + tcg_debug_assert(args_ct[i2].pair == 1); + args_ct[i2].pair_index = i; + args_ct[i].pair_index = i2; } else { /* Case 2 */ - def->args_ct[i].pair = 3; - def->args_ct[o2].pair = 3; - def->args_ct[i].pair_index = o2; - def->args_ct[o2].pair_index = i; + args_ct[i].pair = 3; + args_ct[o2].pair = 3; + args_ct[i].pair_index = o2; + args_ct[o2].pair_index = i; } break; default: @@ -3148,9 +3501,40 @@ static void process_op_defs(TCGContext *s) } /* sort the constraints (XXX: this is just an heuristic) */ - sort_constraints(def, 0, def->nb_oargs); - sort_constraints(def, def->nb_oargs, def->nb_iargs); + sort_constraints(args_ct, 0, nb_oargs); + sort_constraints(args_ct, nb_oargs, nb_iargs); + } +} + +static const TCGArgConstraint *opcode_args_ct(const TCGOp *op) +{ + TCGOpcode opc = op->opc; + TCGType type = TCGOP_TYPE(op); + unsigned flags = TCGOP_FLAGS(op); + const TCGOpDef *def = &tcg_op_defs[opc]; + const TCGOutOp *outop = all_outop[opc]; + TCGConstraintSetIndex con_set; + + if (def->flags & TCG_OPF_NOT_PRESENT) { + return empty_cts; + } + + if (outop) { + con_set = outop->static_constraint; + if (con_set == C_Dynamic) { + con_set = outop->dynamic_constraint(type, flags); + } + } else { + con_set = tcg_target_op_def(opc, type, flags); } + tcg_debug_assert(con_set >= 0); + tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets)); + + /* The constraint arguments must match TCGOpcode arguments. */ + tcg_debug_assert(constraint_sets[con_set].nb_oargs == def->nb_oargs); + tcg_debug_assert(constraint_sets[con_set].nb_iargs == def->nb_iargs); + + return all_cts[con_set]; } static void remove_label_use(TCGOp *op, int idx) @@ -3173,8 +3557,7 @@ void tcg_op_remove(TCGContext *s, TCGOp *op) case INDEX_op_br: remove_label_use(op, 0); break; - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: + case INDEX_op_brcond: remove_label_use(op, 3); break; case INDEX_op_brcond2_i32: @@ -3246,17 +3629,21 @@ TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs) } TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, - TCGOpcode opc, unsigned nargs) + TCGOpcode opc, TCGType type, unsigned nargs) { TCGOp *new_op = tcg_op_alloc(opc, nargs); + + TCGOP_TYPE(new_op) = type; QTAILQ_INSERT_BEFORE(old_op, new_op, link); return new_op; } TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, - TCGOpcode opc, unsigned nargs) + TCGOpcode opc, TCGType type, unsigned nargs) { TCGOp *new_op = tcg_op_alloc(opc, nargs); + + TCGOP_TYPE(new_op) = type; QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); return new_op; } @@ -3271,8 +3658,7 @@ static void move_label_uses(TCGLabel *to, TCGLabel *from) case INDEX_op_br: op->args[0] = label_arg(to); break; - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: + case INDEX_op_brcond: op->args[3] = label_arg(to); break; case INDEX_op_brcond2_i32: @@ -3591,6 +3977,17 @@ liveness_pass_0(TCGContext *s) } } +static void assert_carry_dead(TCGContext *s) +{ + /* + * Carry operations can be separated by a few insns like mov, + * load or store, but they should always be "close", and + * carry-out operations should always be paired with carry-in. + * At various boundaries, carry must have been consumed. + */ + tcg_debug_assert(!s->carry_live); +} + /* Liveness analysis : update the opc_arg_life array to tell if a given input arguments is dead. Instructions updating dead temporaries are removed. */ @@ -3601,27 +3998,28 @@ liveness_pass_1(TCGContext *s) int nb_temps = s->nb_temps; TCGOp *op, *op_prev; TCGRegSet *prefs; - int i; prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps); - for (i = 0; i < nb_temps; ++i) { + for (int i = 0; i < nb_temps; ++i) { s->temps[i].state_ptr = prefs + i; } /* ??? Should be redundant with the exit_tb that ends the TB. */ la_func_end(s, nb_globals, nb_temps); + s->carry_live = false; QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) { int nb_iargs, nb_oargs; TCGOpcode opc_new, opc_new2; - bool have_opc_new2; TCGLifeData arg_life = 0; TCGTemp *ts; TCGOpcode opc = op->opc; - const TCGOpDef *def = &tcg_op_defs[opc]; + const TCGOpDef *def; + const TCGArgConstraint *args_ct; switch (opc) { case INDEX_op_call: + assert_carry_dead(s); { const TCGHelperInfo *info = tcg_call_info(op); int call_flags = tcg_call_flags(op); @@ -3631,7 +4029,7 @@ liveness_pass_1(TCGContext *s) /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { - for (i = 0; i < nb_oargs; i++) { + for (int i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts->state != TS_DEAD) { goto do_not_remove_call; @@ -3642,7 +4040,7 @@ liveness_pass_1(TCGContext *s) do_not_remove_call: /* Output args are dead. */ - for (i = 0; i < nb_oargs; i++) { + for (int i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; @@ -3665,7 +4063,7 @@ liveness_pass_1(TCGContext *s) } /* Record arguments that die in this helper. */ - for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + for (int i = nb_oargs; i < nb_iargs + nb_oargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; @@ -3685,7 +4083,7 @@ liveness_pass_1(TCGContext *s) * order so that if a temp is used more than once, the stack * reset to max happens before the register reset to 0. */ - for (i = nb_iargs - 1; i >= 0; i--) { + for (int i = nb_iargs - 1; i >= 0; i--) { const TCGCallArgumentLoc *loc = &info->in[i]; ts = arg_temp(op->args[nb_oargs + i]); @@ -3713,7 +4111,7 @@ liveness_pass_1(TCGContext *s) * If a temp is used once, this produces a single set bit; * if a temp is used multiple times, this produces a set. */ - for (i = 0; i < nb_iargs; i++) { + for (int i = 0; i < nb_iargs; i++) { const TCGCallArgumentLoc *loc = &info->in[i]; ts = arg_temp(op->args[nb_oargs + i]); @@ -3733,6 +4131,7 @@ liveness_pass_1(TCGContext *s) } break; case INDEX_op_insn_start: + assert_carry_dead(s); break; case INDEX_op_discard: /* mark the temporary as dead */ @@ -3741,62 +4140,15 @@ liveness_pass_1(TCGContext *s) la_reset_pref(ts); break; - case INDEX_op_add2_i32: - opc_new = INDEX_op_add_i32; - goto do_addsub2; - case INDEX_op_sub2_i32: - opc_new = INDEX_op_sub_i32; - goto do_addsub2; - case INDEX_op_add2_i64: - opc_new = INDEX_op_add_i64; - goto do_addsub2; - case INDEX_op_sub2_i64: - opc_new = INDEX_op_sub_i64; - do_addsub2: - nb_iargs = 4; - nb_oargs = 2; - /* Test if the high part of the operation is dead, but not - the low part. The result can be optimized to a simple - add or sub. This happens often for x86_64 guest when the - cpu mode is set to 32 bit. */ - if (arg_temp(op->args[1])->state == TS_DEAD) { - if (arg_temp(op->args[0])->state == TS_DEAD) { - goto do_remove; - } - /* Replace the opcode and adjust the args in place, - leaving 3 unused args at the end. */ - op->opc = opc = opc_new; - op->args[1] = op->args[2]; - op->args[2] = op->args[4]; - /* Fall through and mark the single-word operation live. */ - nb_iargs = 2; - nb_oargs = 1; - } - goto do_not_remove; - - case INDEX_op_mulu2_i32: - opc_new = INDEX_op_mul_i32; - opc_new2 = INDEX_op_muluh_i32; - have_opc_new2 = TCG_TARGET_HAS_muluh_i32; - goto do_mul2; - case INDEX_op_muls2_i32: - opc_new = INDEX_op_mul_i32; - opc_new2 = INDEX_op_mulsh_i32; - have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; - goto do_mul2; - case INDEX_op_mulu2_i64: - opc_new = INDEX_op_mul_i64; - opc_new2 = INDEX_op_muluh_i64; - have_opc_new2 = TCG_TARGET_HAS_muluh_i64; - goto do_mul2; - case INDEX_op_muls2_i64: - opc_new = INDEX_op_mul_i64; - opc_new2 = INDEX_op_mulsh_i64; - have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; + case INDEX_op_muls2: + opc_new = INDEX_op_mul; + opc_new2 = INDEX_op_mulsh; goto do_mul2; + case INDEX_op_mulu2: + opc_new = INDEX_op_mul; + opc_new2 = INDEX_op_muluh; do_mul2: - nb_iargs = 2; - nb_oargs = 2; + assert_carry_dead(s); if (arg_temp(op->args[1])->state == TS_DEAD) { if (arg_temp(op->args[0])->state == TS_DEAD) { /* Both parts of the operation are dead. */ @@ -3806,7 +4158,8 @@ liveness_pass_1(TCGContext *s) op->opc = opc = opc_new; op->args[1] = op->args[2]; op->args[2] = op->args[3]; - } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { + } else if (arg_temp(op->args[0])->state == TS_DEAD && + tcg_op_supported(opc_new2, TCGOP_TYPE(op), 0)) { /* The low part of the operation is dead; generate the high. */ op->opc = opc = opc_new2; op->args[0] = op->args[1]; @@ -3816,19 +4169,94 @@ liveness_pass_1(TCGContext *s) goto do_not_remove; } /* Mark the single-word operation live. */ - nb_oargs = 1; goto do_not_remove; - default: - /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ - nb_iargs = def->nb_iargs; - nb_oargs = def->nb_oargs; + case INDEX_op_addco: + if (s->carry_live) { + goto do_not_remove; + } + op->opc = opc = INDEX_op_add; + goto do_default; + + case INDEX_op_addcio: + if (s->carry_live) { + goto do_not_remove; + } + op->opc = opc = INDEX_op_addci; + goto do_default; + + case INDEX_op_subbo: + if (s->carry_live) { + goto do_not_remove; + } + /* Lower to sub, but this may also require canonicalization. */ + op->opc = opc = INDEX_op_sub; + ts = arg_temp(op->args[2]); + if (ts->kind == TEMP_CONST) { + ts = tcg_constant_internal(ts->type, -ts->val); + if (ts->state_ptr == NULL) { + tcg_debug_assert(temp_idx(ts) == nb_temps); + nb_temps++; + ts->state_ptr = tcg_malloc(sizeof(TCGRegSet)); + ts->state = TS_DEAD; + la_reset_pref(ts); + } + op->args[2] = temp_arg(ts); + op->opc = opc = INDEX_op_add; + } + goto do_default; + + case INDEX_op_subbio: + if (s->carry_live) { + goto do_not_remove; + } + op->opc = opc = INDEX_op_subbi; + goto do_default; + + case INDEX_op_addc1o: + if (s->carry_live) { + goto do_not_remove; + } + /* Lower to add, add +1. */ + op_prev = tcg_op_insert_before(s, op, INDEX_op_add, + TCGOP_TYPE(op), 3); + op_prev->args[0] = op->args[0]; + op_prev->args[1] = op->args[1]; + op_prev->args[2] = op->args[2]; + op->opc = opc = INDEX_op_add; + op->args[1] = op->args[0]; + ts = arg_temp(op->args[0]); + ts = tcg_constant_internal(ts->type, 1); + op->args[2] = temp_arg(ts); + goto do_default; + + case INDEX_op_subb1o: + if (s->carry_live) { + goto do_not_remove; + } + /* Lower to sub, add -1. */ + op_prev = tcg_op_insert_before(s, op, INDEX_op_sub, + TCGOP_TYPE(op), 3); + op_prev->args[0] = op->args[0]; + op_prev->args[1] = op->args[1]; + op_prev->args[2] = op->args[2]; + op->opc = opc = INDEX_op_add; + op->args[1] = op->args[0]; + ts = arg_temp(op->args[0]); + ts = tcg_constant_internal(ts->type, -1); + op->args[2] = temp_arg(ts); + goto do_default; - /* Test if the operation can be removed because all - its outputs are dead. We assume that nb_oargs == 0 - implies side effects */ - if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { - for (i = 0; i < nb_oargs; i++) { + default: + do_default: + /* + * Test if the operation can be removed because all + * its outputs are dead. We assume that nb_oargs == 0 + * implies side effects. + */ + def = &tcg_op_defs[opc]; + if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && def->nb_oargs != 0) { + for (int i = def->nb_oargs - 1; i >= 0; i--) { if (arg_temp(op->args[i])->state != TS_DEAD) { goto do_not_remove; } @@ -3842,7 +4270,11 @@ liveness_pass_1(TCGContext *s) break; do_not_remove: - for (i = 0; i < nb_oargs; i++) { + def = &tcg_op_defs[opc]; + nb_iargs = def->nb_iargs; + nb_oargs = def->nb_oargs; + + for (int i = 0; i < nb_oargs; i++) { ts = arg_temp(op->args[i]); /* Remember the preference of the uses that followed. */ @@ -3863,12 +4295,16 @@ liveness_pass_1(TCGContext *s) /* If end of basic block, update. */ if (def->flags & TCG_OPF_BB_EXIT) { + assert_carry_dead(s); la_func_end(s, nb_globals, nb_temps); } else if (def->flags & TCG_OPF_COND_BRANCH) { + assert_carry_dead(s); la_bb_sync(s, nb_globals, nb_temps); } else if (def->flags & TCG_OPF_BB_END) { + assert_carry_dead(s); la_bb_end(s, nb_globals, nb_temps); } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { + assert_carry_dead(s); la_global_sync(s, nb_globals); if (def->flags & TCG_OPF_CALL_CLOBBER) { la_cross_call(s, nb_temps); @@ -3876,15 +4312,18 @@ liveness_pass_1(TCGContext *s) } /* Record arguments that die in this opcode. */ - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { arg_life |= DEAD_ARG << i; } } + if (def->flags & TCG_OPF_CARRY_OUT) { + s->carry_live = false; + } /* Input arguments are live for preceding opcodes. */ - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { ts = arg_temp(op->args[i]); if (ts->state & TS_DEAD) { /* For operands that were dead, initially allow @@ -3893,11 +4332,13 @@ liveness_pass_1(TCGContext *s) ts->state &= ~TS_DEAD; } } + if (def->flags & TCG_OPF_CARRY_IN) { + s->carry_live = true; + } /* Incorporate constraints for this operand. */ switch (opc) { - case INDEX_op_mov_i32: - case INDEX_op_mov_i64: + case INDEX_op_mov: /* Note that these are TCG_OPF_NOT_PRESENT and do not have proper constraints. That said, special case moves to propagate preferences backward. */ @@ -3908,8 +4349,9 @@ liveness_pass_1(TCGContext *s) break; default: - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - const TCGArgConstraint *ct = &def->args_ct[i]; + args_ct = opcode_args_ct(op); + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + const TCGArgConstraint *ct = &args_ct[i]; TCGRegSet set, *pset; ts = arg_temp(op->args[i]); @@ -3932,6 +4374,7 @@ liveness_pass_1(TCGContext *s) } op->life = arg_life; } + assert_carry_dead(s); } /* Liveness analysis: Convert indirect regs to direct temporaries. */ @@ -4002,10 +4445,8 @@ liveness_pass_2(TCGContext *s) arg_ts = arg_temp(op->args[i]); dir_ts = arg_ts->state_ptr; if (dir_ts && arg_ts->state == TS_DEAD) { - TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 - ? INDEX_op_ld_i32 - : INDEX_op_ld_i64); - TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3); + TCGOp *lop = tcg_op_insert_before(s, op, INDEX_op_ld, + arg_ts->type, 3); lop->args[0] = temp_arg(dir_ts); lop->args[1] = temp_arg(arg_ts->mem_base); @@ -4054,7 +4495,7 @@ liveness_pass_2(TCGContext *s) } /* Outputs become available. */ - if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) { + if (opc == INDEX_op_mov) { arg_ts = arg_temp(op->args[0]); dir_ts = arg_ts->state_ptr; if (dir_ts) { @@ -4065,10 +4506,8 @@ liveness_pass_2(TCGContext *s) arg_ts->state = 0; if (NEED_SYNC_ARG(0)) { - TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 - ? INDEX_op_st_i32 - : INDEX_op_st_i64); - TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); + TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st, + arg_ts->type, 3); TCGTemp *out_ts = dir_ts; if (IS_DEAD_ARG(0)) { @@ -4101,10 +4540,8 @@ liveness_pass_2(TCGContext *s) /* Sync outputs upon their last write. */ if (NEED_SYNC_ARG(i)) { - TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 - ? INDEX_op_st_i32 - : INDEX_op_st_i64); - TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); + TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st, + arg_ts->type, 3); sop->args[0] = temp_arg(dir_ts); sop->args[1] = temp_arg(arg_ts->mem_base); @@ -4462,6 +4899,9 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, ts->mem_coherent = 0; break; case TEMP_VAL_MEM: + if (!ts->mem_allocated) { + temp_allocate_frame(s, ts); + } reg = tcg_reg_alloc(s, desired_regs, allocated_regs, preferred_regs, ts->indirect_base); tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); @@ -4514,9 +4954,8 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) all globals are stored at their canonical location. */ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) { - int i; - - for (i = s->nb_globals; i < s->nb_temps; i++) { + assert_carry_dead(s); + for (int i = s->nb_globals; i < s->nb_temps; i++) { TCGTemp *ts = &s->temps[i]; switch (ts->kind) { @@ -4547,6 +4986,7 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) */ static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) { + assert_carry_dead(s); sync_globals(s, allocated_regs); for (int i = s->nb_globals; i < s->nb_temps; i++) { @@ -4696,6 +5136,7 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) { const TCGLifeData arg_life = op->life; TCGRegSet dup_out_regs, dup_in_regs; + const TCGArgConstraint *dup_args_ct; TCGTemp *its, *ots; TCGType itype, vtype; unsigned vece; @@ -4710,11 +5151,11 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) itype = its->type; vece = TCGOP_VECE(op); - vtype = TCGOP_VECL(op) + TCG_TYPE_V64; + vtype = TCGOP_TYPE(op); if (its->val_type == TEMP_VAL_CONST) { /* Propagate constant via movi -> dupi. */ - tcg_target_ulong val = its->val; + tcg_target_ulong val = dup_const(vece, its->val); if (IS_DEAD_ARG(1)) { temp_dead(s, its); } @@ -4722,8 +5163,9 @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) return; } - dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; - dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs; + dup_args_ct = opcode_args_ct(op); + dup_out_regs = dup_args_ct[0].regs; + dup_in_regs = dup_args_ct[1].regs; /* Allocate the output register now. */ if (ots->val_type != TEMP_VAL_REG) { @@ -4809,12 +5251,17 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) int i, k, nb_iargs, nb_oargs; TCGReg reg; TCGArg arg; + const TCGArgConstraint *args_ct; const TCGArgConstraint *arg_ct; TCGTemp *ts; TCGArg new_args[TCG_MAX_OP_ARGS]; int const_args[TCG_MAX_OP_ARGS]; TCGCond op_cond; + if (def->flags & TCG_OPF_CARRY_IN) { + tcg_debug_assert(s->carry_live); + } + nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; @@ -4827,22 +5274,18 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) o_allocated_regs = s->reserved_regs; switch (op->opc) { - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: + case INDEX_op_brcond: op_cond = op->args[2]; break; - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_negsetcond_i32: - case INDEX_op_negsetcond_i64: + case INDEX_op_setcond: + case INDEX_op_negsetcond: case INDEX_op_cmp_vec: op_cond = op->args[3]; break; case INDEX_op_brcond2_i32: op_cond = op->args[4]; break; - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: + case INDEX_op_movcond: case INDEX_op_setcond2_i32: case INDEX_op_cmpsel_vec: op_cond = op->args[5]; @@ -4853,6 +5296,8 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) break; } + args_ct = opcode_args_ct(op); + /* satisfy input constraints */ for (k = 0; k < nb_iargs; k++) { TCGRegSet i_preferred_regs, i_required_regs; @@ -4860,18 +5305,28 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) TCGTemp *ts2; int i1, i2; - i = def->args_ct[nb_oargs + k].sort_index; + i = args_ct[nb_oargs + k].sort_index; arg = op->args[i]; - arg_ct = &def->args_ct[i]; + arg_ct = &args_ct[i]; ts = arg_temp(arg); - if (ts->val_type == TEMP_VAL_CONST - && tcg_target_const_match(ts->val, arg_ct->ct, ts->type, - op_cond, TCGOP_VECE(op))) { - /* constant is OK for instruction */ - const_args[i] = 1; - new_args[i] = ts->val; - continue; + if (ts->val_type == TEMP_VAL_CONST) { +#ifdef TCG_REG_ZERO + if (ts->val == 0 && (arg_ct->ct & TCG_CT_REG_ZERO)) { + /* Hardware zero register: indicate register via non-const. */ + const_args[i] = 0; + new_args[i] = TCG_REG_ZERO; + continue; + } +#endif + + if (tcg_target_const_match(ts->val, arg_ct->ct, ts->type, + op_cond, TCGOP_VECE(op))) { + /* constant is OK for instruction */ + const_args[i] = 1; + new_args[i] = ts->val; + continue; + } } reg = ts->reg; @@ -4892,7 +5347,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) * register and move it. */ if (temp_readonly(ts) || !IS_DEAD_ARG(i) - || def->args_ct[arg_ct->alias_index].newreg) { + || args_ct[arg_ct->alias_index].newreg) { allocate_new_reg = true; } else if (ts->val_type == TEMP_VAL_REG) { /* @@ -5063,6 +5518,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) tcg_reg_alloc_bb_end(s, i_allocated_regs); } else { if (def->flags & TCG_OPF_CALL_CLOBBER) { + assert_carry_dead(s); /* XXX: permit generic clobber register list ? */ for (i = 0; i < TCG_TARGET_NB_REGS; i++) { if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { @@ -5077,10 +5533,10 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) } /* satisfy the output constraints */ - for(k = 0; k < nb_oargs; k++) { - i = def->args_ct[k].sort_index; + for (k = 0; k < nb_oargs; k++) { + i = args_ct[k].sort_index; arg = op->args[i]; - arg_ct = &def->args_ct[i]; + arg_ct = &args_ct[i]; ts = arg_temp(arg); /* ENV should not be modified. */ @@ -5139,50 +5595,345 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) } /* emit instruction */ + TCGType type = TCGOP_TYPE(op); switch (op->opc) { - case INDEX_op_ext8s_i32: - tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]); + case INDEX_op_addc1o: + tcg_out_set_carry(s); + /* fall through */ + case INDEX_op_add: + case INDEX_op_addcio: + case INDEX_op_addco: + case INDEX_op_and: + case INDEX_op_andc: + case INDEX_op_clz: + case INDEX_op_ctz: + case INDEX_op_divs: + case INDEX_op_divu: + case INDEX_op_eqv: + case INDEX_op_mul: + case INDEX_op_mulsh: + case INDEX_op_muluh: + case INDEX_op_nand: + case INDEX_op_nor: + case INDEX_op_or: + case INDEX_op_orc: + case INDEX_op_rems: + case INDEX_op_remu: + case INDEX_op_rotl: + case INDEX_op_rotr: + case INDEX_op_sar: + case INDEX_op_shl: + case INDEX_op_shr: + case INDEX_op_xor: + { + const TCGOutOpBinary *out = + container_of(all_outop[op->opc], TCGOutOpBinary, base); + + /* Constants should never appear in the first source operand. */ + tcg_debug_assert(!const_args[1]); + if (const_args[2]) { + out->out_rri(s, type, new_args[0], new_args[1], new_args[2]); + } else { + out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]); + } + } + break; + + case INDEX_op_sub: + { + const TCGOutOpSubtract *out = &outop_sub; + + /* + * Constants should never appear in the second source operand. + * These are folded to add with negative constant. + */ + tcg_debug_assert(!const_args[2]); + if (const_args[1]) { + out->out_rir(s, type, new_args[0], new_args[1], new_args[2]); + } else { + out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]); + } + } + break; + + case INDEX_op_subb1o: + tcg_out_set_borrow(s); + /* fall through */ + case INDEX_op_addci: + case INDEX_op_subbi: + case INDEX_op_subbio: + case INDEX_op_subbo: + { + const TCGOutOpAddSubCarry *out = + container_of(all_outop[op->opc], TCGOutOpAddSubCarry, base); + + if (const_args[2]) { + if (const_args[1]) { + out->out_rii(s, type, new_args[0], + new_args[1], new_args[2]); + } else { + out->out_rri(s, type, new_args[0], + new_args[1], new_args[2]); + } + } else if (const_args[1]) { + out->out_rir(s, type, new_args[0], new_args[1], new_args[2]); + } else { + out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]); + } + } + break; + + case INDEX_op_bswap64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + assert(TCG_TARGET_REG_BITS == 64); + /* fall through */ + case INDEX_op_ctpop: + case INDEX_op_neg: + case INDEX_op_not: + { + const TCGOutOpUnary *out = + container_of(all_outop[op->opc], TCGOutOpUnary, base); + + /* Constants should have been folded. */ + tcg_debug_assert(!const_args[1]); + out->out_rr(s, type, new_args[0], new_args[1]); + } + break; + + case INDEX_op_bswap16: + case INDEX_op_bswap32: + { + const TCGOutOpBswap *out = + container_of(all_outop[op->opc], TCGOutOpBswap, base); + + tcg_debug_assert(!const_args[1]); + out->out_rr(s, type, new_args[0], new_args[1], new_args[2]); + } break; - case INDEX_op_ext8s_i64: - tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]); + + case INDEX_op_deposit: + { + const TCGOutOpDeposit *out = &outop_deposit; + + if (const_args[2]) { + tcg_debug_assert(!const_args[1]); + out->out_rri(s, type, new_args[0], new_args[1], + new_args[2], new_args[3], new_args[4]); + } else if (const_args[1]) { + tcg_debug_assert(new_args[1] == 0); + tcg_debug_assert(!const_args[2]); + out->out_rzr(s, type, new_args[0], new_args[2], + new_args[3], new_args[4]); + } else { + out->out_rrr(s, type, new_args[0], new_args[1], + new_args[2], new_args[3], new_args[4]); + } + } break; - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - tcg_out_ext8u(s, new_args[0], new_args[1]); + + case INDEX_op_divs2: + case INDEX_op_divu2: + { + const TCGOutOpDivRem *out = + container_of(all_outop[op->opc], TCGOutOpDivRem, base); + + /* Only used by x86 and s390x, which use matching constraints. */ + tcg_debug_assert(new_args[0] == new_args[2]); + tcg_debug_assert(new_args[1] == new_args[3]); + tcg_debug_assert(!const_args[4]); + out->out_rr01r(s, type, new_args[0], new_args[1], new_args[4]); + } break; - case INDEX_op_ext16s_i32: - tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]); + + case INDEX_op_extract: + case INDEX_op_sextract: + { + const TCGOutOpExtract *out = + container_of(all_outop[op->opc], TCGOutOpExtract, base); + + tcg_debug_assert(!const_args[1]); + out->out_rr(s, type, new_args[0], new_args[1], + new_args[2], new_args[3]); + } break; - case INDEX_op_ext16s_i64: - tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]); + + case INDEX_op_extract2: + { + const TCGOutOpExtract2 *out = &outop_extract2; + + tcg_debug_assert(!const_args[1]); + tcg_debug_assert(!const_args[2]); + out->out_rrr(s, type, new_args[0], new_args[1], + new_args[2], new_args[3]); + } break; - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - tcg_out_ext16u(s, new_args[0], new_args[1]); + + case INDEX_op_ld8u: + case INDEX_op_ld8s: + case INDEX_op_ld16u: + case INDEX_op_ld16s: + case INDEX_op_ld32u: + case INDEX_op_ld32s: + case INDEX_op_ld: + { + const TCGOutOpLoad *out = + container_of(all_outop[op->opc], TCGOutOpLoad, base); + + tcg_debug_assert(!const_args[1]); + out->out(s, type, new_args[0], new_args[1], new_args[2]); + } break; - case INDEX_op_ext32s_i64: - tcg_out_ext32s(s, new_args[0], new_args[1]); + + case INDEX_op_muls2: + case INDEX_op_mulu2: + { + const TCGOutOpMul2 *out = + container_of(all_outop[op->opc], TCGOutOpMul2, base); + + tcg_debug_assert(!const_args[2]); + tcg_debug_assert(!const_args[3]); + out->out_rrrr(s, type, new_args[0], new_args[1], + new_args[2], new_args[3]); + } break; - case INDEX_op_ext32u_i64: - tcg_out_ext32u(s, new_args[0], new_args[1]); + + case INDEX_op_st32: + /* Use tcg_op_st w/ I32. */ + type = TCG_TYPE_I32; + /* fall through */ + case INDEX_op_st: + case INDEX_op_st8: + case INDEX_op_st16: + { + const TCGOutOpStore *out = + container_of(all_outop[op->opc], TCGOutOpStore, base); + + if (const_args[0]) { + out->out_i(s, type, new_args[0], new_args[1], new_args[2]); + } else { + out->out_r(s, type, new_args[0], new_args[1], new_args[2]); + } + } break; - case INDEX_op_ext_i32_i64: - tcg_out_exts_i32_i64(s, new_args[0], new_args[1]); + + case INDEX_op_qemu_ld: + case INDEX_op_qemu_st: + { + const TCGOutOpQemuLdSt *out = + container_of(all_outop[op->opc], TCGOutOpQemuLdSt, base); + + out->out(s, type, new_args[0], new_args[1], new_args[2]); + } break; - case INDEX_op_extu_i32_i64: - tcg_out_extu_i32_i64(s, new_args[0], new_args[1]); + + case INDEX_op_qemu_ld2: + case INDEX_op_qemu_st2: + { + const TCGOutOpQemuLdSt2 *out = + container_of(all_outop[op->opc], TCGOutOpQemuLdSt2, base); + + out->out(s, type, new_args[0], new_args[1], + new_args[2], new_args[3]); + } break; - case INDEX_op_extrl_i64_i32: - tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]); + + case INDEX_op_brcond: + { + const TCGOutOpBrcond *out = &outop_brcond; + TCGCond cond = new_args[2]; + TCGLabel *label = arg_label(new_args[3]); + + tcg_debug_assert(!const_args[0]); + if (const_args[1]) { + out->out_ri(s, type, cond, new_args[0], new_args[1], label); + } else { + out->out_rr(s, type, cond, new_args[0], new_args[1], label); + } + } break; - default: - if (def->flags & TCG_OPF_VECTOR) { - tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), - new_args, const_args); - } else { - tcg_out_op(s, op->opc, new_args, const_args); + + case INDEX_op_movcond: + { + const TCGOutOpMovcond *out = &outop_movcond; + TCGCond cond = new_args[5]; + + tcg_debug_assert(!const_args[1]); + out->out(s, type, cond, new_args[0], + new_args[1], new_args[2], const_args[2], + new_args[3], const_args[3], + new_args[4], const_args[4]); + } + break; + + case INDEX_op_setcond: + case INDEX_op_negsetcond: + { + const TCGOutOpSetcond *out = + container_of(all_outop[op->opc], TCGOutOpSetcond, base); + TCGCond cond = new_args[3]; + + tcg_debug_assert(!const_args[1]); + if (const_args[2]) { + out->out_rri(s, type, cond, + new_args[0], new_args[1], new_args[2]); + } else { + out->out_rrr(s, type, cond, + new_args[0], new_args[1], new_args[2]); + } + } + break; + +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_brcond2_i32: + { + const TCGOutOpBrcond2 *out = &outop_brcond2; + TCGCond cond = new_args[4]; + TCGLabel *label = arg_label(new_args[5]); + + tcg_debug_assert(!const_args[0]); + tcg_debug_assert(!const_args[1]); + out->out(s, cond, new_args[0], new_args[1], + new_args[2], const_args[2], + new_args[3], const_args[3], label); } break; + case INDEX_op_setcond2_i32: + { + const TCGOutOpSetcond2 *out = &outop_setcond2; + TCGCond cond = new_args[5]; + + tcg_debug_assert(!const_args[1]); + tcg_debug_assert(!const_args[2]); + out->out(s, cond, new_args[0], new_args[1], new_args[2], + new_args[3], const_args[3], new_args[4], const_args[4]); + } + break; +#else + case INDEX_op_brcond2_i32: + case INDEX_op_setcond2_i32: + g_assert_not_reached(); +#endif + + case INDEX_op_goto_ptr: + tcg_debug_assert(!const_args[0]); + tcg_out_goto_ptr(s, new_args[0]); + break; + + default: + tcg_debug_assert(def->flags & TCG_OPF_VECTOR); + tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64, + TCGOP_VECE(op), new_args, const_args); + break; + } + + if (def->flags & TCG_OPF_CARRY_IN) { + s->carry_live = false; + } + if (def->flags & TCG_OPF_CARRY_OUT) { + s->carry_live = true; } /* move the outputs in the correct register if needed */ @@ -5204,7 +5955,7 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op) { const TCGLifeData arg_life = op->life; TCGTemp *ots, *itsl, *itsh; - TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64; + TCGType vtype = TCGOP_TYPE(op); /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */ tcg_debug_assert(TCG_TARGET_REG_BITS == 32); @@ -5220,8 +5971,7 @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op) /* Allocate the output register now. */ if (ots->val_type != TEMP_VAL_REG) { TCGRegSet allocated_regs = s->reserved_regs; - TCGRegSet dup_out_regs = - tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; + TCGRegSet dup_out_regs = opcode_args_ct(op)[0].regs; TCGReg oreg; /* Make sure to not spill the input registers. */ @@ -5506,7 +6256,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc, MemOp host_atom, bool allow_two_ops) { - MemOp align = get_alignment_bits(opc); + MemOp align = memop_alignment_bits(opc); MemOp size = opc & MO_SIZE; MemOp half = size ? size - 1 : 0; MemOp atom = opc & MO_ATOM_MASK; @@ -5852,7 +6602,7 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, */ tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, TCG_TYPE_I32, TCG_TYPE_I32, - ldst->addrlo_reg, -1); + ldst->addr_reg, -1); tcg_out_helper_load_slots(s, 1, mov, parm); tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot, @@ -5860,7 +6610,7 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, next_arg += 2; } else { nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, - ldst->addrlo_reg, ldst->addrhi_reg); + ldst->addr_reg, -1); tcg_out_helper_load_slots(s, nmov, mov, parm); next_arg += nmov; } @@ -6017,21 +6767,22 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, /* Handle addr argument. */ loc = &info->in[next_arg]; - if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + tcg_debug_assert(s->addr_type <= TCG_TYPE_REG); + if (TCG_TARGET_REG_BITS == 32) { /* - * 32-bit host with 32-bit guest: zero-extend the guest address + * 32-bit host (and thus 32-bit guest): zero-extend the guest address * to 64-bits for the helper by storing the low part. Later, * after we have processed the register inputs, we will load a * zero for the high part. */ tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN, TCG_TYPE_I32, TCG_TYPE_I32, - ldst->addrlo_reg, -1); + ldst->addr_reg, -1); next_arg += 2; nmov += 1; } else { n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type, - ldst->addrlo_reg, ldst->addrhi_reg); + ldst->addr_reg, -1); next_arg += n; nmov += n; } @@ -6079,7 +6830,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, g_assert_not_reached(); } - if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) { + if (TCG_TARGET_REG_BITS == 32) { /* Zero extend the address by loading a zero for the high part. */ loc = &info->in[1 + !HOST_BIG_ENDIAN]; tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm); @@ -6090,7 +6841,7 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst, int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) { - int i, start_words, num_insns; + int i, num_insns; TCGOp *op; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) @@ -6121,6 +6872,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) } #endif + /* Do not reuse any EBB that may be allocated within the TB. */ + tcg_temp_ebb_reset_freed(s); + tcg_optimize(s); reachable_code_pass(s); @@ -6172,27 +6926,39 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) */ s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr); s->code_ptr = s->code_buf; + s->data_gen_ptr = NULL; -#ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_INIT(&s->ldst_labels); -#endif -#ifdef TCG_TARGET_NEED_POOL_LABELS s->pool_labels = NULL; -#endif - start_words = s->insn_start_words; s->gen_insn_data = - tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words); + tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * INSN_START_WORDS); tcg_out_tb_start(s); num_insns = -1; + s->carry_live = false; QTAILQ_FOREACH(op, &s->ops, link) { TCGOpcode opc = op->opc; switch (opc) { - case INDEX_op_mov_i32: - case INDEX_op_mov_i64: + case INDEX_op_extrl_i64_i32: + assert(TCG_TARGET_REG_BITS == 64); + /* + * If TCG_TYPE_I32 is represented in some canonical form, + * e.g. zero or sign-extended, then emit as a unary op. + * Otherwise we can treat this as a plain move. + * If the output dies, treat this as a plain move, because + * this will be implemented with a store. + */ + if (TCG_TARGET_HAS_extr_i64_i32) { + TCGLifeData arg_life = op->life; + if (!IS_DEAD_ARG(0)) { + goto do_default; + } + } + /* fall through */ + case INDEX_op_mov: case INDEX_op_mov_vec: tcg_reg_alloc_mov(s, op); break; @@ -6200,6 +6966,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) tcg_reg_alloc_dup(s, op); break; case INDEX_op_insn_start: + assert_carry_dead(s); if (num_insns >= 0) { size_t off = tcg_current_code_size(s); s->gen_insn_end_off[num_insns] = off; @@ -6207,8 +6974,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) assert(s->gen_insn_end_off[num_insns] == off); } num_insns++; - for (i = 0; i < start_words; ++i) { - s->gen_insn_data[num_insns * start_words + i] = + for (i = 0; i < INSN_START_WORDS; ++i) { + s->gen_insn_data[num_insns * INSN_START_WORDS + i] = tcg_get_insn_start_param(op, i); } break; @@ -6220,6 +6987,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) tcg_out_label(s, arg_label(op->args[0])); break; case INDEX_op_call: + assert_carry_dead(s); tcg_reg_alloc_call(s, op); break; case INDEX_op_exit_tb: @@ -6228,14 +6996,22 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) case INDEX_op_goto_tb: tcg_out_goto_tb(s, op->args[0]); break; + case INDEX_op_br: + tcg_out_br(s, arg_label(op->args[0])); + break; + case INDEX_op_mb: + tcg_out_mb(s, op->args[0]); + break; case INDEX_op_dup2_vec: if (tcg_reg_alloc_dup2(s, op)) { break; } /* fall through */ default: + do_default: /* Sanity check that we've not introduced any unhandled opcodes. */ - tcg_debug_assert(tcg_op_supported(opc)); + tcg_debug_assert(tcg_op_supported(opc, TCGOP_TYPE(op), + TCGOP_FLAGS(op))); /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ @@ -6254,22 +7030,20 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) return -2; } } + assert_carry_dead(s); + tcg_debug_assert(num_insns + 1 == s->gen_tb->icount); s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); /* Generate TB finalization at the end of block */ -#ifdef TCG_TARGET_NEED_LDST_LABELS i = tcg_out_ldst_finalize(s); if (i < 0) { return i; } -#endif -#ifdef TCG_TARGET_NEED_POOL_LABELS i = tcg_out_pool_finalize(s); if (i < 0) { return i; } -#endif if (!tcg_resolve_relocs(s)) { return -2; } diff --git a/tcg/tci.c b/tcg/tci.c index 3afb2235285..700e6726169 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -21,9 +21,16 @@ #include "tcg/tcg.h" #include "tcg/helper-info.h" #include "tcg/tcg-ldst.h" +#include "disas/dis-asm.h" +#include "tcg-has.h" #include +#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS) +#define deposit_tr glue(deposit, TCG_TARGET_REG_BITS) +#define extract_tr glue(extract, TCG_TARGET_REG_BITS) +#define sextract_tr glue(sextract, TCG_TARGET_REG_BITS) + /* * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). * Without assertions, the interpreter runs much faster. @@ -152,16 +159,6 @@ static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, *i4 = extract32(insn, 26, 6); } -static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, - TCGReg *r2, TCGReg *r3, TCGReg *r4) -{ - *r0 = extract32(insn, 8, 4); - *r1 = extract32(insn, 12, 4); - *r2 = extract32(insn, 16, 4); - *r3 = extract32(insn, 20, 4); - *r4 = extract32(insn, 24, 4); -} - static void tci_args_rrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) { @@ -182,17 +179,6 @@ static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, *c5 = extract32(insn, 28, 4); } -static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, - TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) -{ - *r0 = extract32(insn, 8, 4); - *r1 = extract32(insn, 12, 4); - *r2 = extract32(insn, 16, 4); - *r3 = extract32(insn, 20, 4); - *r4 = extract32(insn, 24, 4); - *r5 = extract32(insn, 28, 4); -} - static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) { bool result = false; @@ -339,18 +325,6 @@ static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, } } -#if TCG_TARGET_REG_BITS == 64 -# define CASE_32_64(x) \ - case glue(glue(INDEX_op_, x), _i64): \ - case glue(glue(INDEX_op_, x), _i32): -# define CASE_64(x) \ - case glue(glue(INDEX_op_, x), _i64): -#else -# define CASE_32_64(x) \ - case glue(glue(INDEX_op_, x), _i32): -# define CASE_64(x) -#endif - /* Interpret pseudo code in tb. */ /* * Disable CFI checks. @@ -364,6 +338,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tcg_target_ulong regs[TCG_TARGET_NB_REGS]; uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) / sizeof(uint64_t)]; + bool carry = false; regs[TCG_AREG0] = (tcg_target_ulong)env; regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; @@ -372,13 +347,12 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, for (;;) { uint32_t insn; TCGOpcode opc; - TCGReg r0, r1, r2, r3, r4, r5; + TCGReg r0, r1, r2, r3, r4; tcg_target_ulong t1; TCGCond condition; uint8_t pos, len; uint32_t tmp32; uint64_t tmp64, taddr; - uint64_t T1, T2; MemOpIdx oi; int32_t ofs; void *ptr; @@ -444,34 +418,25 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tci_args_l(insn, tb_ptr, &ptr); tb_ptr = ptr; continue; - case INDEX_op_setcond_i32: - tci_args_rrrc(insn, &r0, &r1, &r2, &condition); - regs[r0] = tci_compare32(regs[r1], regs[r2], condition); - break; - case INDEX_op_movcond_i32: - tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); - tmp32 = tci_compare32(regs[r1], regs[r2], condition); - regs[r0] = regs[tmp32 ? r3 : r4]; - break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_setcond2_i32: tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); - T1 = tci_uint64(regs[r2], regs[r1]); - T2 = tci_uint64(regs[r4], regs[r3]); - regs[r0] = tci_compare64(T1, T2, condition); + regs[r0] = tci_compare64(tci_uint64(regs[r2], regs[r1]), + tci_uint64(regs[r4], regs[r3]), + condition); break; #elif TCG_TARGET_REG_BITS == 64 - case INDEX_op_setcond_i64: + case INDEX_op_setcond: tci_args_rrrc(insn, &r0, &r1, &r2, &condition); regs[r0] = tci_compare64(regs[r1], regs[r2], condition); break; - case INDEX_op_movcond_i64: + case INDEX_op_movcond: tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); tmp32 = tci_compare64(regs[r1], regs[r2], condition); regs[r0] = regs[tmp32 ? r3 : r4]; break; #endif - CASE_32_64(mov) + case INDEX_op_mov: tci_args_rr(insn, &r0, &r1); regs[r0] = regs[r1]; break; @@ -483,423 +448,325 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tci_args_rl(insn, tb_ptr, &r0, &ptr); regs[r0] = *(tcg_target_ulong *)ptr; break; + case INDEX_op_tci_setcarry: + carry = true; + break; /* Load/store operations (32 bit). */ - CASE_32_64(ld8u) + case INDEX_op_ld8u: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint8_t *)ptr; break; - CASE_32_64(ld8s) + case INDEX_op_ld8s: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(int8_t *)ptr; break; - CASE_32_64(ld16u) + case INDEX_op_ld16u: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(uint16_t *)ptr; break; - CASE_32_64(ld16s) + case INDEX_op_ld16s: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); regs[r0] = *(int16_t *)ptr; break; - case INDEX_op_ld_i32: - CASE_64(ld32u) + case INDEX_op_ld: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); - regs[r0] = *(uint32_t *)ptr; + regs[r0] = *(tcg_target_ulong *)ptr; break; - CASE_32_64(st8) + case INDEX_op_st8: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint8_t *)ptr = regs[r0]; break; - CASE_32_64(st16) + case INDEX_op_st16: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); *(uint16_t *)ptr = regs[r0]; break; - case INDEX_op_st_i32: - CASE_64(st32) + case INDEX_op_st: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); - *(uint32_t *)ptr = regs[r0]; + *(tcg_target_ulong *)ptr = regs[r0]; break; /* Arithmetic operations (mixed 32/64 bit). */ - CASE_32_64(add) + case INDEX_op_add: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] + regs[r2]; break; - CASE_32_64(sub) + case INDEX_op_sub: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] - regs[r2]; break; - CASE_32_64(mul) + case INDEX_op_mul: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] * regs[r2]; break; - CASE_32_64(and) + case INDEX_op_and: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] & regs[r2]; break; - CASE_32_64(or) + case INDEX_op_or: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] | regs[r2]; break; - CASE_32_64(xor) + case INDEX_op_xor: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] ^ regs[r2]; break; -#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 - CASE_32_64(andc) + case INDEX_op_andc: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] & ~regs[r2]; break; -#endif -#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 - CASE_32_64(orc) + case INDEX_op_orc: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] | ~regs[r2]; break; -#endif -#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 - CASE_32_64(eqv) + case INDEX_op_eqv: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ~(regs[r1] ^ regs[r2]); break; -#endif -#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 - CASE_32_64(nand) + case INDEX_op_nand: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ~(regs[r1] & regs[r2]); break; -#endif -#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 - CASE_32_64(nor) + case INDEX_op_nor: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ~(regs[r1] | regs[r2]); break; + case INDEX_op_neg: + tci_args_rr(insn, &r0, &r1); + regs[r0] = -regs[r1]; + break; + case INDEX_op_not: + tci_args_rr(insn, &r0, &r1); + regs[r0] = ~regs[r1]; + break; + case INDEX_op_ctpop: + tci_args_rr(insn, &r0, &r1); + regs[r0] = ctpop_tr(regs[r1]); + break; + case INDEX_op_addco: + tci_args_rrr(insn, &r0, &r1, &r2); + t1 = regs[r1] + regs[r2]; + carry = t1 < regs[r1]; + regs[r0] = t1; + break; + case INDEX_op_addci: + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] + regs[r2] + carry; + break; + case INDEX_op_addcio: + tci_args_rrr(insn, &r0, &r1, &r2); + if (carry) { + t1 = regs[r1] + regs[r2] + 1; + carry = t1 <= regs[r1]; + } else { + t1 = regs[r1] + regs[r2]; + carry = t1 < regs[r1]; + } + regs[r0] = t1; + break; + case INDEX_op_subbo: + tci_args_rrr(insn, &r0, &r1, &r2); + carry = regs[r1] < regs[r2]; + regs[r0] = regs[r1] - regs[r2]; + break; + case INDEX_op_subbi: + tci_args_rrr(insn, &r0, &r1, &r2); + regs[r0] = regs[r1] - regs[r2] - carry; + break; + case INDEX_op_subbio: + tci_args_rrr(insn, &r0, &r1, &r2); + if (carry) { + carry = regs[r1] <= regs[r2]; + regs[r0] = regs[r1] - regs[r2] - 1; + } else { + carry = regs[r1] < regs[r2]; + regs[r0] = regs[r1] - regs[r2]; + } + break; + case INDEX_op_muls2: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); +#if TCG_TARGET_REG_BITS == 32 + tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; + tci_write_reg64(regs, r1, r0, tmp64); +#else + muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); #endif + break; + case INDEX_op_mulu2: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); +#if TCG_TARGET_REG_BITS == 32 + tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; + tci_write_reg64(regs, r1, r0, tmp64); +#else + mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); +#endif + break; /* Arithmetic operations (32 bit). */ - case INDEX_op_div_i32: + case INDEX_op_tci_divs32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; break; - case INDEX_op_divu_i32: + case INDEX_op_tci_divu32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; break; - case INDEX_op_rem_i32: + case INDEX_op_tci_rems32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; break; - case INDEX_op_remu_i32: + case INDEX_op_tci_remu32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; break; -#if TCG_TARGET_HAS_clz_i32 - case INDEX_op_clz_i32: + case INDEX_op_tci_clz32: tci_args_rrr(insn, &r0, &r1, &r2); tmp32 = regs[r1]; regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; break; -#endif -#if TCG_TARGET_HAS_ctz_i32 - case INDEX_op_ctz_i32: + case INDEX_op_tci_ctz32: tci_args_rrr(insn, &r0, &r1, &r2); tmp32 = regs[r1]; regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; break; -#endif -#if TCG_TARGET_HAS_ctpop_i32 - case INDEX_op_ctpop_i32: - tci_args_rr(insn, &r0, &r1); - regs[r0] = ctpop32(regs[r1]); + case INDEX_op_tci_setcond32: + tci_args_rrrc(insn, &r0, &r1, &r2, &condition); + regs[r0] = tci_compare32(regs[r1], regs[r2], condition); + break; + case INDEX_op_tci_movcond32: + tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); + tmp32 = tci_compare32(regs[r1], regs[r2], condition); + regs[r0] = regs[tmp32 ? r3 : r4]; break; -#endif - /* Shift/rotate operations (32 bit). */ + /* Shift/rotate operations. */ - case INDEX_op_shl_i32: + case INDEX_op_shl: tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); + regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS); break; - case INDEX_op_shr_i32: + case INDEX_op_shr: tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); + regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS); break; - case INDEX_op_sar_i32: + case INDEX_op_sar: tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); + regs[r0] = ((tcg_target_long)regs[r1] + >> (regs[r2] % TCG_TARGET_REG_BITS)); break; -#if TCG_TARGET_HAS_rot_i32 - case INDEX_op_rotl_i32: + case INDEX_op_tci_rotl32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = rol32(regs[r1], regs[r2] & 31); break; - case INDEX_op_rotr_i32: + case INDEX_op_tci_rotr32: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ror32(regs[r1], regs[r2] & 31); break; -#endif -#if TCG_TARGET_HAS_deposit_i32 - case INDEX_op_deposit_i32: + case INDEX_op_deposit: tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); - regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); + regs[r0] = deposit_tr(regs[r1], pos, len, regs[r2]); break; -#endif -#if TCG_TARGET_HAS_extract_i32 - case INDEX_op_extract_i32: + case INDEX_op_extract: tci_args_rrbb(insn, &r0, &r1, &pos, &len); - regs[r0] = extract32(regs[r1], pos, len); + regs[r0] = extract_tr(regs[r1], pos, len); break; -#endif -#if TCG_TARGET_HAS_sextract_i32 - case INDEX_op_sextract_i32: + case INDEX_op_sextract: tci_args_rrbb(insn, &r0, &r1, &pos, &len); - regs[r0] = sextract32(regs[r1], pos, len); + regs[r0] = sextract_tr(regs[r1], pos, len); break; -#endif - case INDEX_op_brcond_i32: + case INDEX_op_brcond: tci_args_rl(insn, tb_ptr, &r0, &ptr); - if ((uint32_t)regs[r0]) { + if (regs[r0]) { tb_ptr = ptr; } break; -#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 - case INDEX_op_add2_i32: - tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); - T1 = tci_uint64(regs[r3], regs[r2]); - T2 = tci_uint64(regs[r5], regs[r4]); - tci_write_reg64(regs, r1, r0, T1 + T2); - break; -#endif -#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 - case INDEX_op_sub2_i32: - tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); - T1 = tci_uint64(regs[r3], regs[r2]); - T2 = tci_uint64(regs[r5], regs[r4]); - tci_write_reg64(regs, r1, r0, T1 - T2); - break; -#endif -#if TCG_TARGET_HAS_mulu2_i32 - case INDEX_op_mulu2_i32: - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; - tci_write_reg64(regs, r1, r0, tmp64); - break; -#endif -#if TCG_TARGET_HAS_muls2_i32 - case INDEX_op_muls2_i32: - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; - tci_write_reg64(regs, r1, r0, tmp64); - break; -#endif -#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 - CASE_32_64(ext8s) - tci_args_rr(insn, &r0, &r1); - regs[r0] = (int8_t)regs[r1]; - break; -#endif -#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ - TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 - CASE_32_64(ext16s) - tci_args_rr(insn, &r0, &r1); - regs[r0] = (int16_t)regs[r1]; - break; -#endif -#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 - CASE_32_64(ext8u) - tci_args_rr(insn, &r0, &r1); - regs[r0] = (uint8_t)regs[r1]; - break; -#endif -#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 - CASE_32_64(ext16u) - tci_args_rr(insn, &r0, &r1); - regs[r0] = (uint16_t)regs[r1]; - break; -#endif -#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 - CASE_32_64(bswap16) + case INDEX_op_bswap16: tci_args_rr(insn, &r0, &r1); regs[r0] = bswap16(regs[r1]); break; -#endif -#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 - CASE_32_64(bswap32) + case INDEX_op_bswap32: tci_args_rr(insn, &r0, &r1); regs[r0] = bswap32(regs[r1]); break; -#endif -#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 - CASE_32_64(not) - tci_args_rr(insn, &r0, &r1); - regs[r0] = ~regs[r1]; - break; -#endif - CASE_32_64(neg) - tci_args_rr(insn, &r0, &r1); - regs[r0] = -regs[r1]; - break; #if TCG_TARGET_REG_BITS == 64 /* Load/store operations (64 bit). */ - case INDEX_op_ld32s_i64: + case INDEX_op_ld32u: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); - regs[r0] = *(int32_t *)ptr; + regs[r0] = *(uint32_t *)ptr; break; - case INDEX_op_ld_i64: + case INDEX_op_ld32s: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); - regs[r0] = *(uint64_t *)ptr; + regs[r0] = *(int32_t *)ptr; break; - case INDEX_op_st_i64: + case INDEX_op_st32: tci_args_rrs(insn, &r0, &r1, &ofs); ptr = (void *)(regs[r1] + ofs); - *(uint64_t *)ptr = regs[r0]; + *(uint32_t *)ptr = regs[r0]; break; /* Arithmetic operations (64 bit). */ - case INDEX_op_div_i64: + case INDEX_op_divs: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; break; - case INDEX_op_divu_i64: + case INDEX_op_divu: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; break; - case INDEX_op_rem_i64: + case INDEX_op_rems: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; break; - case INDEX_op_remu_i64: + case INDEX_op_remu: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; break; -#if TCG_TARGET_HAS_clz_i64 - case INDEX_op_clz_i64: + case INDEX_op_clz: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; break; -#endif -#if TCG_TARGET_HAS_ctz_i64 - case INDEX_op_ctz_i64: + case INDEX_op_ctz: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; break; -#endif -#if TCG_TARGET_HAS_ctpop_i64 - case INDEX_op_ctpop_i64: - tci_args_rr(insn, &r0, &r1); - regs[r0] = ctpop64(regs[r1]); - break; -#endif -#if TCG_TARGET_HAS_mulu2_i64 - case INDEX_op_mulu2_i64: - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); - break; -#endif -#if TCG_TARGET_HAS_muls2_i64 - case INDEX_op_muls2_i64: - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); - break; -#endif -#if TCG_TARGET_HAS_add2_i64 - case INDEX_op_add2_i64: - tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); - T1 = regs[r2] + regs[r4]; - T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); - regs[r0] = T1; - regs[r1] = T2; - break; -#endif -#if TCG_TARGET_HAS_add2_i64 - case INDEX_op_sub2_i64: - tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); - T1 = regs[r2] - regs[r4]; - T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); - regs[r0] = T1; - regs[r1] = T2; - break; -#endif /* Shift/rotate operations (64 bit). */ - case INDEX_op_shl_i64: - tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = regs[r1] << (regs[r2] & 63); - break; - case INDEX_op_shr_i64: - tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = regs[r1] >> (regs[r2] & 63); - break; - case INDEX_op_sar_i64: - tci_args_rrr(insn, &r0, &r1, &r2); - regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); - break; -#if TCG_TARGET_HAS_rot_i64 - case INDEX_op_rotl_i64: + case INDEX_op_rotl: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = rol64(regs[r1], regs[r2] & 63); break; - case INDEX_op_rotr_i64: + case INDEX_op_rotr: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = ror64(regs[r1], regs[r2] & 63); break; -#endif -#if TCG_TARGET_HAS_deposit_i64 - case INDEX_op_deposit_i64: - tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); - regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); - break; -#endif -#if TCG_TARGET_HAS_extract_i64 - case INDEX_op_extract_i64: - tci_args_rrbb(insn, &r0, &r1, &pos, &len); - regs[r0] = extract64(regs[r1], pos, len); - break; -#endif -#if TCG_TARGET_HAS_sextract_i64 - case INDEX_op_sextract_i64: - tci_args_rrbb(insn, &r0, &r1, &pos, &len); - regs[r0] = sextract64(regs[r1], pos, len); - break; -#endif - case INDEX_op_brcond_i64: - tci_args_rl(insn, tb_ptr, &r0, &ptr); - if (regs[r0]) { - tb_ptr = ptr; - } - break; - case INDEX_op_ext32s_i64: case INDEX_op_ext_i32_i64: tci_args_rr(insn, &r0, &r1); regs[r0] = (int32_t)regs[r1]; break; - case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: tci_args_rr(insn, &r0, &r1); regs[r0] = (uint32_t)regs[r1]; break; -#if TCG_TARGET_HAS_bswap64_i64 - case INDEX_op_bswap64_i64: + case INDEX_op_bswap64: tci_args_rr(insn, &r0, &r1); regs[r0] = bswap64(regs[r1]); break; -#endif #endif /* TCG_TARGET_REG_BITS == 64 */ /* QEMU specific operations. */ @@ -922,92 +789,33 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, tb_ptr = ptr; break; - case INDEX_op_qemu_ld_a32_i32: + case INDEX_op_qemu_ld: tci_args_rrm(insn, &r0, &r1, &oi); - taddr = (uint32_t)regs[r1]; - goto do_ld_i32; - case INDEX_op_qemu_ld_a64_i32: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - taddr = regs[r1]; - } else { - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - taddr = tci_uint64(regs[r2], regs[r1]); - oi = regs[r3]; - } - do_ld_i32: + taddr = regs[r1]; regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); break; - case INDEX_op_qemu_ld_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - taddr = (uint32_t)regs[r1]; - } else { - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - taddr = (uint32_t)regs[r2]; - oi = regs[r3]; - } - goto do_ld_i64; - case INDEX_op_qemu_ld_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - taddr = regs[r1]; - } else { - tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); - taddr = tci_uint64(regs[r3], regs[r2]); - oi = regs[r4]; - } - do_ld_i64: - tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); - if (TCG_TARGET_REG_BITS == 32) { - tci_write_reg64(regs, r1, r0, tmp64); - } else { - regs[r0] = tmp64; - } - break; - - case INDEX_op_qemu_st_a32_i32: + case INDEX_op_qemu_st: tci_args_rrm(insn, &r0, &r1, &oi); - taddr = (uint32_t)regs[r1]; - goto do_st_i32; - case INDEX_op_qemu_st_a64_i32: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - taddr = regs[r1]; - } else { - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - taddr = tci_uint64(regs[r2], regs[r1]); - oi = regs[r3]; - } - do_st_i32: + taddr = regs[r1]; tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); break; - case INDEX_op_qemu_st_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - tmp64 = regs[r0]; - taddr = (uint32_t)regs[r1]; - } else { - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - tmp64 = tci_uint64(regs[r1], regs[r0]); - taddr = (uint32_t)regs[r2]; - oi = regs[r3]; - } - goto do_st_i64; - case INDEX_op_qemu_st_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tci_args_rrm(insn, &r0, &r1, &oi); - tmp64 = regs[r0]; - taddr = regs[r1]; - } else { - tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); - tmp64 = tci_uint64(regs[r1], regs[r0]); - taddr = tci_uint64(regs[r3], regs[r2]); - oi = regs[r4]; - } - do_st_i64: + case INDEX_op_qemu_ld2: + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + taddr = regs[r2]; + oi = regs[r3]; + tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); + tci_write_reg64(regs, r1, r0, tmp64); + break; + + case INDEX_op_qemu_st2: + tcg_debug_assert(TCG_TARGET_REG_BITS == 32); + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + tmp64 = tci_uint64(regs[r1], regs[r0]); + taddr = regs[r2]; + oi = regs[r3]; tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); break; @@ -1071,7 +879,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) const char *op_name; uint32_t insn; TCGOpcode op; - TCGReg r0, r1, r2, r3, r4, r5; + TCGReg r0, r1, r2, r3, r4; tcg_target_ulong i1; int32_t s2; TCGCond c; @@ -1106,15 +914,14 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); break; - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: + case INDEX_op_brcond: tci_args_rl(insn, tb_ptr, &r0, &ptr); info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", op_name, str_r(r0), ptr); break; - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: + case INDEX_op_setcond: + case INDEX_op_tci_setcond32: tci_args_rrrc(insn, &r0, &r1, &r2, &c); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); @@ -1132,126 +939,95 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) op_name, str_r(r0), ptr); break; - case INDEX_op_ld8u_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i32: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i32: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i32: - case INDEX_op_ld_i64: - case INDEX_op_st8_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i32: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i32: - case INDEX_op_st_i64: + case INDEX_op_tci_setcarry: + info->fprintf_func(info->stream, "%-12s", op_name); + break; + + case INDEX_op_ld8u: + case INDEX_op_ld8s: + case INDEX_op_ld16u: + case INDEX_op_ld16s: + case INDEX_op_ld32u: + case INDEX_op_ld: + case INDEX_op_st8: + case INDEX_op_st16: + case INDEX_op_st32: + case INDEX_op_st: tci_args_rrs(insn, &r0, &r1, &s2); info->fprintf_func(info->stream, "%-12s %s, %s, %d", op_name, str_r(r0), str_r(r1), s2); break; - case INDEX_op_mov_i32: - case INDEX_op_mov_i64: - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: + case INDEX_op_bswap16: + case INDEX_op_bswap32: + case INDEX_op_ctpop: + case INDEX_op_mov: + case INDEX_op_neg: + case INDEX_op_not: case INDEX_op_ext_i32_i64: case INDEX_op_extu_i32_i64: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_ctpop_i32: - case INDEX_op_ctpop_i64: + case INDEX_op_bswap64: tci_args_rr(insn, &r0, &r1); info->fprintf_func(info->stream, "%-12s %s, %s", op_name, str_r(r0), str_r(r1)); break; - case INDEX_op_add_i32: - case INDEX_op_add_i64: - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_and_i32: - case INDEX_op_and_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - case INDEX_op_div_i32: - case INDEX_op_div_i64: - case INDEX_op_rem_i32: - case INDEX_op_rem_i64: - case INDEX_op_divu_i32: - case INDEX_op_divu_i64: - case INDEX_op_remu_i32: - case INDEX_op_remu_i64: - case INDEX_op_shl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i32: - case INDEX_op_shr_i64: - case INDEX_op_sar_i32: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i32: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i32: - case INDEX_op_rotr_i64: - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i32: - case INDEX_op_ctz_i64: + case INDEX_op_add: + case INDEX_op_addci: + case INDEX_op_addcio: + case INDEX_op_addco: + case INDEX_op_and: + case INDEX_op_andc: + case INDEX_op_clz: + case INDEX_op_ctz: + case INDEX_op_divs: + case INDEX_op_divu: + case INDEX_op_eqv: + case INDEX_op_mul: + case INDEX_op_nand: + case INDEX_op_nor: + case INDEX_op_or: + case INDEX_op_orc: + case INDEX_op_rems: + case INDEX_op_remu: + case INDEX_op_rotl: + case INDEX_op_rotr: + case INDEX_op_sar: + case INDEX_op_shl: + case INDEX_op_shr: + case INDEX_op_sub: + case INDEX_op_subbi: + case INDEX_op_subbio: + case INDEX_op_subbo: + case INDEX_op_xor: + case INDEX_op_tci_ctz32: + case INDEX_op_tci_clz32: + case INDEX_op_tci_divs32: + case INDEX_op_tci_divu32: + case INDEX_op_tci_rems32: + case INDEX_op_tci_remu32: + case INDEX_op_tci_rotl32: + case INDEX_op_tci_rotr32: tci_args_rrr(insn, &r0, &r1, &r2); info->fprintf_func(info->stream, "%-12s %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2)); break; - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: + case INDEX_op_deposit: tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); break; - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_sextract_i32: - case INDEX_op_sextract_i64: + case INDEX_op_extract: + case INDEX_op_sextract: tci_args_rrbb(insn, &r0, &r1, &pos, &len); info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", op_name, str_r(r0), str_r(r1), pos, len); break; - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: + case INDEX_op_tci_movcond32: + case INDEX_op_movcond: case INDEX_op_setcond2_i32: tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", @@ -1259,62 +1035,27 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) str_r(r3), str_r(r4), str_c(c)); break; - case INDEX_op_mulu2_i32: - case INDEX_op_mulu2_i64: - case INDEX_op_muls2_i32: - case INDEX_op_muls2_i64: + case INDEX_op_muls2: + case INDEX_op_mulu2: tci_args_rrrr(insn, &r0, &r1, &r2, &r3); info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", op_name, str_r(r0), str_r(r1), str_r(r2), str_r(r3)); break; - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", - op_name, str_r(r0), str_r(r1), str_r(r2), - str_r(r3), str_r(r4), str_r(r5)); + case INDEX_op_qemu_ld: + case INDEX_op_qemu_st: + tci_args_rrm(insn, &r0, &r1, &oi); + info->fprintf_func(info->stream, "%-12s %s, %s, %x", + op_name, str_r(r0), str_r(r1), oi); break; - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_st_a32_i32: - len = 1 + 1; - goto do_qemu_ldst; - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_st_a32_i64: - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_st_a64_i32: - len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); - goto do_qemu_ldst; - case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_st_a64_i64: - len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); - goto do_qemu_ldst; - do_qemu_ldst: - switch (len) { - case 2: - tci_args_rrm(insn, &r0, &r1, &oi); - info->fprintf_func(info->stream, "%-12s %s, %s, %x", - op_name, str_r(r0), str_r(r1), oi); - break; - case 3: - tci_args_rrrr(insn, &r0, &r1, &r2, &r3); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", - op_name, str_r(r0), str_r(r1), - str_r(r2), str_r(r3)); - break; - case 4: - tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); - info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", - op_name, str_r(r0), str_r(r1), - str_r(r2), str_r(r3), str_r(r4)); - break; - default: - g_assert_not_reached(); - } + case INDEX_op_qemu_ld2: + case INDEX_op_qemu_st2: + tci_args_rrrr(insn, &r0, &r1, &r2, &r3); + info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", + op_name, str_r(r0), str_r(r1), + str_r(r2), str_r(r3)); break; case 0: diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h new file mode 100644 index 00000000000..ab07ce1fcbc --- /dev/null +++ b/tcg/tci/tcg-target-has.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific opcode support + * Copyright (c) 2009, 2011 Stefan Weil + */ + +#ifndef TCG_TARGET_HAS_H +#define TCG_TARGET_HAS_H + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_extr_i64_i32 0 +#endif /* TCG_TARGET_REG_BITS == 64 */ + +#define TCG_TARGET_HAS_qemu_ldst_i128 0 + +#define TCG_TARGET_HAS_tst 1 + +#define TCG_TARGET_extract_valid(type, ofs, len) 1 +#define TCG_TARGET_sextract_valid(type, ofs, len) 1 +#define TCG_TARGET_deposit_valid(type, ofs, len) 1 + +#endif diff --git a/tcg/tci/tcg-target-mo.h b/tcg/tci/tcg-target-mo.h new file mode 100644 index 00000000000..779872e39a0 --- /dev/null +++ b/tcg/tci/tcg-target-mo.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Define target-specific memory model + * Copyright (c) 2009, 2011 Stefan Weil + */ + +#ifndef TCG_TARGET_MO_H +#define TCG_TARGET_MO_H + +/* + * We could notice __i386__ or __s390x__ and reduce the barriers depending + * on the host. But if you want performance, you use the normal backend. + * We prefer consistency across hosts on this. + */ +#define TCG_TARGET_DEFAULT_MO 0 + +#endif diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc new file mode 100644 index 00000000000..4eb32ed7361 --- /dev/null +++ b/tcg/tci/tcg-target-opc.h.inc @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* These opcodes for use between the tci generator and interpreter. */ +DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT) +DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT) +DEF(tci_setcarry, 0, 0, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_clz32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_ctz32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT) +DEF(tci_setcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT) +DEF(tci_movcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT) diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index c740864b96d..35c66a48369 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -22,160 +22,24 @@ * THE SOFTWARE. */ -#include "../tcg-pool.c.inc" - -static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) -{ - switch (op) { - case INDEX_op_goto_ptr: - return C_O0_I1(r); - - case INDEX_op_ld8u_i32: - case INDEX_op_ld8s_i32: - case INDEX_op_ld16u_i32: - case INDEX_op_ld16s_i32: - case INDEX_op_ld_i32: - case INDEX_op_ld8u_i64: - case INDEX_op_ld8s_i64: - case INDEX_op_ld16u_i64: - case INDEX_op_ld16s_i64: - case INDEX_op_ld32u_i64: - case INDEX_op_ld32s_i64: - case INDEX_op_ld_i64: - case INDEX_op_not_i32: - case INDEX_op_not_i64: - case INDEX_op_neg_i32: - case INDEX_op_neg_i64: - case INDEX_op_ext8s_i32: - case INDEX_op_ext8s_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_bswap16_i32: - case INDEX_op_bswap16_i64: - case INDEX_op_bswap32_i32: - case INDEX_op_bswap32_i64: - case INDEX_op_bswap64_i64: - case INDEX_op_extract_i32: - case INDEX_op_extract_i64: - case INDEX_op_sextract_i32: - case INDEX_op_sextract_i64: - case INDEX_op_ctpop_i32: - case INDEX_op_ctpop_i64: - return C_O1_I1(r, r); - - case INDEX_op_st8_i32: - case INDEX_op_st16_i32: - case INDEX_op_st_i32: - case INDEX_op_st8_i64: - case INDEX_op_st16_i64: - case INDEX_op_st32_i64: - case INDEX_op_st_i64: - return C_O0_I2(r, r); - - case INDEX_op_div_i32: - case INDEX_op_div_i64: - case INDEX_op_divu_i32: - case INDEX_op_divu_i64: - case INDEX_op_rem_i32: - case INDEX_op_rem_i64: - case INDEX_op_remu_i32: - case INDEX_op_remu_i64: - case INDEX_op_add_i32: - case INDEX_op_add_i64: - case INDEX_op_sub_i32: - case INDEX_op_sub_i64: - case INDEX_op_mul_i32: - case INDEX_op_mul_i64: - case INDEX_op_and_i32: - case INDEX_op_and_i64: - case INDEX_op_andc_i32: - case INDEX_op_andc_i64: - case INDEX_op_eqv_i32: - case INDEX_op_eqv_i64: - case INDEX_op_nand_i32: - case INDEX_op_nand_i64: - case INDEX_op_nor_i32: - case INDEX_op_nor_i64: - case INDEX_op_or_i32: - case INDEX_op_or_i64: - case INDEX_op_orc_i32: - case INDEX_op_orc_i64: - case INDEX_op_xor_i32: - case INDEX_op_xor_i64: - case INDEX_op_shl_i32: - case INDEX_op_shl_i64: - case INDEX_op_shr_i32: - case INDEX_op_shr_i64: - case INDEX_op_sar_i32: - case INDEX_op_sar_i64: - case INDEX_op_rotl_i32: - case INDEX_op_rotl_i64: - case INDEX_op_rotr_i32: - case INDEX_op_rotr_i64: - case INDEX_op_setcond_i32: - case INDEX_op_setcond_i64: - case INDEX_op_deposit_i32: - case INDEX_op_deposit_i64: - case INDEX_op_clz_i32: - case INDEX_op_clz_i64: - case INDEX_op_ctz_i32: - case INDEX_op_ctz_i64: - return C_O1_I2(r, r, r); - - case INDEX_op_brcond_i32: - case INDEX_op_brcond_i64: - return C_O0_I2(r, r); - - case INDEX_op_add2_i32: - case INDEX_op_add2_i64: - case INDEX_op_sub2_i32: - case INDEX_op_sub2_i64: - return C_O2_I4(r, r, r, r, r, r); - +/* Used for function call generation. */ +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#define TCG_TARGET_STACK_ALIGN 8 #if TCG_TARGET_REG_BITS == 32 - case INDEX_op_brcond2_i32: - return C_O0_I4(r, r, r, r); +# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN +# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN +#else +# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL #endif +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - case INDEX_op_mulu2_i32: - case INDEX_op_mulu2_i64: - case INDEX_op_muls2_i32: - case INDEX_op_muls2_i64: - return C_O2_I2(r, r, r, r); - - case INDEX_op_movcond_i32: - case INDEX_op_movcond_i64: - case INDEX_op_setcond2_i32: - return C_O1_I4(r, r, r, r, r); - - case INDEX_op_qemu_ld_a32_i32: - return C_O1_I1(r, r); - case INDEX_op_qemu_ld_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); - case INDEX_op_qemu_ld_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); - case INDEX_op_qemu_ld_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); - case INDEX_op_qemu_st_a32_i32: - return C_O0_I2(r, r); - case INDEX_op_qemu_st_a64_i32: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); - case INDEX_op_qemu_st_a32_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); - case INDEX_op_qemu_st_a64_i64: - return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); - - default: - g_assert_not_reached(); - } +static TCGConstraintSetIndex +tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) +{ + return C_NotImplemented; } static const int tcg_target_reg_alloc_order[] = { @@ -409,20 +273,6 @@ static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, tcg_out32(s, insn); } -static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0, - TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4) -{ - tcg_insn_unit insn = 0; - - insn = deposit32(insn, 0, 8, op); - insn = deposit32(insn, 8, 4, r0); - insn = deposit32(insn, 12, 4, r1); - insn = deposit32(insn, 16, 4, r2); - insn = deposit32(insn, 20, 4, r3); - insn = deposit32(insn, 24, 4, r4); - tcg_out32(s, insn); -} - static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) { @@ -452,31 +302,13 @@ static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, tcg_out32(s, insn); } -static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, - TCGReg r0, TCGReg r1, TCGReg r2, - TCGReg r3, TCGReg r4, TCGReg r5) -{ - tcg_insn_unit insn = 0; - - insn = deposit32(insn, 0, 8, op); - insn = deposit32(insn, 8, 4, r0); - insn = deposit32(insn, 12, 4, r1); - insn = deposit32(insn, 16, 4, r2); - insn = deposit32(insn, 20, 4, r3); - insn = deposit32(insn, 24, 4, r4); - insn = deposit32(insn, 28, 4, r5); - tcg_out32(s, insn); -} - static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, TCGReg base, intptr_t offset) { stack_bounds_check(base, offset); if (offset != sextract32(offset, 0, 16)) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); - tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32 - ? INDEX_op_add_i32 : INDEX_op_add_i64), - TCG_REG_TMP, TCG_REG_TMP, base); + tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); base = TCG_REG_TMP; offset = 0; } @@ -486,34 +318,17 @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, intptr_t offset) { - switch (type) { - case TCG_TYPE_I32: - tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); - break; -#if TCG_TARGET_REG_BITS == 64 - case TCG_TYPE_I64: - tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); - break; -#endif - default: - g_assert_not_reached(); + TCGOpcode op = INDEX_op_ld; + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + op = INDEX_op_ld32u; } + tcg_out_ldst(s, op, val, base, offset); } static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - switch (type) { - case TCG_TYPE_I32: - tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg); - break; -#if TCG_TARGET_REG_BITS == 64 - case TCG_TYPE_I64: - tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg); - break; -#endif - default: - g_assert_not_reached(); - } + tcg_out_op_rr(s, INDEX_op_mov, ret, arg); return true; } @@ -544,76 +359,62 @@ static void tcg_out_movi(TCGContext *s, TCGType type, } } +static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rs, unsigned pos, unsigned len) +{ + tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len); +} + +static const TCGOutOpExtract outop_extract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tcg_out_extract, +}; + +static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, + TCGReg rs, unsigned pos, unsigned len) +{ + tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len); +} + +static const TCGOutOpExtract outop_sextract = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tcg_out_sextract, +}; + +static const TCGOutOpExtract2 outop_extract2 = { + .base.static_constraint = C_NotImplemented, +}; + static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { - switch (type) { - case TCG_TYPE_I32: - tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32); - tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs); - break; -#if TCG_TARGET_REG_BITS == 64 - case TCG_TYPE_I64: - tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64); - tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs); - break; -#endif - default: - g_assert_not_reached(); - } + tcg_out_sextract(s, type, rd, rs, 0, 8); } static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) { - if (TCG_TARGET_REG_BITS == 64) { - tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64); - tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs); - } else { - tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32); - tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs); - } + tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); } static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) { - switch (type) { - case TCG_TYPE_I32: - tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32); - tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs); - break; -#if TCG_TARGET_REG_BITS == 64 - case TCG_TYPE_I64: - tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64); - tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs); - break; -#endif - default: - g_assert_not_reached(); - } + tcg_out_sextract(s, type, rd, rs, 0, 16); } static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) { - if (TCG_TARGET_REG_BITS == 64) { - tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64); - tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs); - } else { - tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32); - tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs); - } + tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); } static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64); - tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs); + tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); } static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) { tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64); - tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs); + tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); } static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) @@ -665,18 +466,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, tcg_out32(s, insn); } -#if TCG_TARGET_REG_BITS == 64 -# define CASE_32_64(x) \ - case glue(glue(INDEX_op_, x), _i64): \ - case glue(glue(INDEX_op_, x), _i32): -# define CASE_64(x) \ - case glue(glue(INDEX_op_, x), _i64): -#else -# define CASE_32_64(x) \ - case glue(glue(INDEX_op_, x), _i32): -# define CASE_64(x) -#endif - static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) { tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); @@ -689,221 +478,772 @@ static void tcg_out_goto_tb(TCGContext *s, int which) set_jmp_reset_offset(s, which); } +static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) +{ + tcg_out_op_r(s, INDEX_op_goto_ptr, a0); +} + void tb_target_set_jmp_target(const TranslationBlock *tb, int n, uintptr_t jmp_rx, uintptr_t jmp_rw) { /* Always indirect, nothing to do */ } -static void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) +static void tgen_add(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - TCGOpcode exts; + tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); +} - switch (opc) { - case INDEX_op_goto_ptr: - tcg_out_op_r(s, opc, args[0]); - break; +static const TCGOutOpBinary outop_add = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_add, +}; - case INDEX_op_br: - tcg_out_op_l(s, opc, arg_label(args[0])); - break; +static TCGConstraintSetIndex cset_addsubcarry(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_REG ? C_O1_I2(r, r, r) : C_NotImplemented; +} - CASE_32_64(setcond) - tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]); - break; +static void tgen_addco(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_addco, a0, a1, a2); +} - CASE_32_64(movcond) - case INDEX_op_setcond2_i32: - tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], - args[3], args[4], args[5]); - break; +static const TCGOutOpBinary outop_addco = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_addco, +}; - CASE_32_64(ld8u) - CASE_32_64(ld8s) - CASE_32_64(ld16u) - CASE_32_64(ld16s) - case INDEX_op_ld_i32: - CASE_64(ld32u) - CASE_64(ld32s) - CASE_64(ld) - CASE_32_64(st8) - CASE_32_64(st16) - case INDEX_op_st_i32: - CASE_64(st32) - CASE_64(st) - tcg_out_ldst(s, opc, args[0], args[1], args[2]); - break; +static void tgen_addci(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_addci, a0, a1, a2); +} - CASE_32_64(add) - CASE_32_64(sub) - CASE_32_64(mul) - CASE_32_64(and) - CASE_32_64(or) - CASE_32_64(xor) - CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */ - CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */ - CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */ - CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */ - CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */ - CASE_32_64(shl) - CASE_32_64(shr) - CASE_32_64(sar) - CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ - CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ - CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ - CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ - CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ - CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ - CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */ - CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */ - tcg_out_op_rrr(s, opc, args[0], args[1], args[2]); - break; +static const TCGOutOpAddSubCarry outop_addci = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_addci, +}; - CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */ - { - TCGArg pos = args[3], len = args[4]; - TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64; +static void tgen_addcio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_addcio, a0, a1, a2); +} - tcg_debug_assert(pos < max); - tcg_debug_assert(pos + len <= max); +static const TCGOutOpBinary outop_addcio = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_addcio, +}; - tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len); - } - break; +static void tcg_out_set_carry(TCGContext *s) +{ + tcg_out_op_v(s, INDEX_op_tci_setcarry); +} - CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ - CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ - { - TCGArg pos = args[2], len = args[3]; - TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32; +static void tgen_and(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); +} - tcg_debug_assert(pos < max); - tcg_debug_assert(pos + len <= max); +static const TCGOutOpBinary outop_and = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_and, +}; - tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len); - } - break; +static void tgen_andc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); +} - CASE_32_64(brcond) - tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32 - ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64), - TCG_REG_TMP, args[0], args[1], args[2]); - tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3])); - break; +static const TCGOutOpBinary outop_andc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_andc, +}; - CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ - CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ - CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */ - case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ - case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ - tcg_out_op_rr(s, opc, args[0], args[1]); - break; +static void tgen_clz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_clz32 + : INDEX_op_clz); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} - case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ - exts = INDEX_op_ext16s_i32; - goto do_bswap; - case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ - exts = INDEX_op_ext16s_i64; - goto do_bswap; - case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ - exts = INDEX_op_ext32s_i64; - do_bswap: - /* The base tci bswaps zero-extend, and ignore high bits. */ - tcg_out_op_rr(s, opc, args[0], args[1]); - if (args[2] & TCG_BSWAP_OS) { - tcg_out_op_rr(s, exts, args[0], args[0]); - } - break; +static const TCGOutOpBinary outop_clz = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_clz, +}; - CASE_32_64(add2) - CASE_32_64(sub2) - tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], - args[3], args[4], args[5]); - break; +static void tgen_ctz(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_ctz32 + : INDEX_op_ctz); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} -#if TCG_TARGET_REG_BITS == 32 - case INDEX_op_brcond2_i32: - tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, - args[0], args[1], args[2], args[3], args[4]); - tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5])); - break; +static const TCGOutOpBinary outop_ctz = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_ctz, +}; + +static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, + TCGReg a2, unsigned ofs, unsigned len) +{ + tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len); +} + +static const TCGOutOpDeposit outop_deposit = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_deposit, +}; + +static void tgen_divs(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_divs32 + : INDEX_op_divs); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_divs = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divs, +}; + +static const TCGOutOpDivRem outop_divs2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_divu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_divu32 + : INDEX_op_divu); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_divu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_divu, +}; + +static const TCGOutOpDivRem outop_divu2 = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_eqv(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); +} + +static const TCGOutOpBinary outop_eqv = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_eqv, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) +{ + tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32); +} + +static const TCGOutOpUnary outop_extrh_i64_i32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_extrh_i64_i32, +}; #endif - CASE_32_64(mulu2) - CASE_32_64(muls2) - tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); - break; +static void tgen_mul(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2); +} - case INDEX_op_qemu_ld_a32_i32: - case INDEX_op_qemu_st_a32_i32: - tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); - break; - case INDEX_op_qemu_ld_a64_i32: - case INDEX_op_qemu_st_a64_i32: - case INDEX_op_qemu_ld_a32_i64: - case INDEX_op_qemu_st_a32_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); - } else { - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); - tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); - } - break; - case INDEX_op_qemu_ld_a64_i64: - case INDEX_op_qemu_st_a64_i64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); - } else { - tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]); - tcg_out_op_rrrrr(s, opc, args[0], args[1], - args[2], args[3], TCG_REG_TMP); - } - break; +static const TCGOutOpBinary outop_mul = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_mul, +}; - case INDEX_op_mb: - tcg_out_op_v(s, opc); - break; +static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented; +} - case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ - case INDEX_op_mov_i64: - case INDEX_op_call: /* Always emitted via tcg_out_call. */ - case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ - case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ - case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ - case INDEX_op_ext8s_i64: - case INDEX_op_ext8u_i32: - case INDEX_op_ext8u_i64: - case INDEX_op_ext16s_i32: - case INDEX_op_ext16s_i64: - case INDEX_op_ext16u_i32: - case INDEX_op_ext16u_i64: - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - default: - g_assert_not_reached(); +static void tgen_muls2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3); +} + +static const TCGOutOpMul2 outop_muls2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_muls2, +}; + +static const TCGOutOpBinary outop_mulsh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_mulu2(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) +{ + tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3); +} + +static const TCGOutOpMul2 outop_mulu2 = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_mul2, + .out_rrrr = tgen_mulu2, +}; + +static const TCGOutOpBinary outop_muluh = { + .base.static_constraint = C_NotImplemented, +}; + +static void tgen_nand(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); +} + +static const TCGOutOpBinary outop_nand = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nand, +}; + +static void tgen_nor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); +} + +static const TCGOutOpBinary outop_nor = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_nor, +}; + +static void tgen_or(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); +} + +static const TCGOutOpBinary outop_or = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_or, +}; + +static void tgen_orc(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_orc = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_orc, +}; + +static void tgen_rems(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_rems32 + : INDEX_op_rems); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_rems = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rems, +}; + +static void tgen_remu(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_remu32 + : INDEX_op_remu); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_remu = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_remu, +}; + +static void tgen_rotl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_rotl32 + : INDEX_op_rotl); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_rotl = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rotl, +}; + +static void tgen_rotr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_rotr32 + : INDEX_op_rotr); + tcg_out_op_rrr(s, opc, a0, a1, a2); +} + +static const TCGOutOpBinary outop_rotr = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_rotr, +}; + +static void tgen_sar(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type < TCG_TYPE_REG) { + tcg_out_ext32s(s, TCG_REG_TMP, a1); + a1 = TCG_REG_TMP; } + tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2); } -static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, - intptr_t offset) +static const TCGOutOpBinary outop_sar = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sar, +}; + +static void tgen_shl(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) { - switch (type) { - case TCG_TYPE_I32: - tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); - break; + tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2); +} + +static const TCGOutOpBinary outop_shl = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_shl, +}; + +static void tgen_shr(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + if (type < TCG_TYPE_REG) { + tcg_out_ext32u(s, TCG_REG_TMP, a1); + a1 = TCG_REG_TMP; + } + tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2); +} + +static const TCGOutOpBinary outop_shr = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_shr, +}; + +static void tgen_sub(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); +} + +static const TCGOutOpSubtract outop_sub = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_sub, +}; + +static void tgen_subbo(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_subbo, a0, a1, a2); +} + +static const TCGOutOpAddSubCarry outop_subbo = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_subbo, +}; + +static void tgen_subbi(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_subbi, a0, a1, a2); +} + +static const TCGOutOpAddSubCarry outop_subbi = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_subbi, +}; + +static void tgen_subbio(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_subbio, a0, a1, a2); +} + +static const TCGOutOpAddSubCarry outop_subbio = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_addsubcarry, + .out_rrr = tgen_subbio, +}; + +static void tcg_out_set_borrow(TCGContext *s) +{ + tcg_out_op_v(s, INDEX_op_tci_setcarry); /* borrow == carry */ +} + +static void tgen_xor(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, TCGReg a2) +{ + tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); +} + +static const TCGOutOpBinary outop_xor = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_xor, +}; + +static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1); +} + +static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) +{ + return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented; +} + +static const TCGOutOpUnary outop_ctpop = { + .base.static_constraint = C_Dynamic, + .base.dynamic_constraint = cset_ctpop, + .out_rr = tgen_ctpop, +}; + +static void tgen_bswap16(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1); + if (flags & TCG_BSWAP_OS) { + tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16); + } +} + +static const TCGOutOpBswap outop_bswap16 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap16, +}; + +static void tgen_bswap32(TCGContext *s, TCGType type, + TCGReg a0, TCGReg a1, unsigned flags) +{ + tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1); + if (flags & TCG_BSWAP_OS) { + tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32); + } +} + +static const TCGOutOpBswap outop_bswap32 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap32, +}; + #if TCG_TARGET_REG_BITS == 64 - case TCG_TYPE_I64: - tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); - break; +static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1); +} + +static const TCGOutOpUnary outop_bswap64 = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_bswap64, +}; #endif - default: - g_assert_not_reached(); + +static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_op_rr(s, INDEX_op_neg, a0, a1); +} + +static const TCGOutOpUnary outop_neg = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_neg, +}; + +static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) +{ + tcg_out_op_rr(s, INDEX_op_not, a0, a1); +} + +static const TCGOutOpUnary outop_not = { + .base.static_constraint = C_O1_I1(r, r), + .out_rr = tgen_not, +}; + +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_setcond32 + : INDEX_op_setcond); + tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond); +} + +static const TCGOutOpSetcond outop_setcond = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_setcond, +}; + +static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg arg1, TCGReg arg2) +{ + tgen_setcond(s, type, cond, dest, arg1, arg2); + tgen_neg(s, type, dest, dest); +} + +static const TCGOutOpSetcond outop_negsetcond = { + .base.static_constraint = C_O1_I2(r, r, r), + .out_rrr = tgen_negsetcond, +}; + +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg arg0, TCGReg arg1, TCGLabel *l) +{ + tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1); + tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); +} + +static const TCGOutOpBrcond outop_brcond = { + .base.static_constraint = C_O0_I2(r, r), + .out_rr = tgen_brcond, +}; + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, + TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) +{ + TCGOpcode opc = (type == TCG_TYPE_I32 + ? INDEX_op_tci_movcond32 + : INDEX_op_movcond); + tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond); +} + +static const TCGOutOpMovcond outop_movcond = { + .base.static_constraint = C_O1_I4(r, r, r, r, r), + .out = tgen_movcond, +}; + +static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh, TCGLabel *l) +{ + tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, + al, ah, bl, bh, cond); + tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); +} + +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpBrcond2 outop_brcond2 = { + .base.static_constraint = C_O0_I4(r, r, r, r), + .out = tgen_brcond2, +}; + +static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, + TCGArg bl, bool const_bl, + TCGArg bh, bool const_bh) +{ + tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond); +} + +#if TCG_TARGET_REG_BITS != 32 +__attribute__((unused)) +#endif +static const TCGOutOpSetcond2 outop_setcond2 = { + .base.static_constraint = C_O1_I4(r, r, r, r, r), + .out = tgen_setcond2, +}; + +static void tcg_out_mb(TCGContext *s, unsigned a0) +{ + tcg_out_op_v(s, INDEX_op_mb); +} + +static void tcg_out_br(TCGContext *s, TCGLabel *l) +{ + tcg_out_op_l(s, INDEX_op_br, l); +} + +static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld8u, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8u, +}; + +static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld8s, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld8s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld8s, +}; + +static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld16u, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16u, +}; + +static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld16s, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld16s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld16s, +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld32u, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32u = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32u, +}; + +static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_ld32s, dest, base, offset); +} + +static const TCGOutOpLoad outop_ld32s = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_ld32s, +}; +#endif + +static void tgen_st8(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_st8, data, base, offset); +} + +static const TCGOutOpStore outop_st8 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st8, +}; + +static void tgen_st16(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, ptrdiff_t offset) +{ + tcg_out_ldst(s, INDEX_op_st16, data, base, offset); +} + +static const TCGOutOpStore outop_st16 = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tgen_st16, +}; + +static const TCGOutOpStore outop_st = { + .base.static_constraint = C_O0_I2(r, r), + .out_r = tcg_out_st, +}; + +static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + tcg_out_op_rrm(s, INDEX_op_qemu_ld, data, addr, oi); +} + +static const TCGOutOpQemuLdSt outop_qemu_ld = { + .base.static_constraint = C_O1_I1(r, r), + .out = tgen_qemu_ld, +}; + +static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) +{ + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi); + tcg_out_op_rrrr(s, INDEX_op_qemu_ld2, datalo, datahi, addr, TCG_REG_TMP); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { + .base.static_constraint = + TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r), + .out = + TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2, +}; + +static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg addr, MemOpIdx oi) +{ + tcg_out_op_rrm(s, INDEX_op_qemu_st, data, addr, oi); +} + +static const TCGOutOpQemuLdSt outop_qemu_st = { + .base.static_constraint = C_O0_I2(r, r), + .out = tgen_qemu_st, +}; + +static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, + TCGReg datahi, TCGReg addr, MemOpIdx oi) +{ + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi); + tcg_out_op_rrrr(s, INDEX_op_qemu_st2, datalo, datahi, addr, TCG_REG_TMP); +} + +static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { + .base.static_constraint = + TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r), + .out = + TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_st2, +}; + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, + intptr_t offset) +{ + TCGOpcode op = INDEX_op_st; + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + op = INDEX_op_st32; } + tcg_out_ldst(s, op, val, base, offset); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, @@ -965,3 +1305,13 @@ bool tcg_target_has_memory_bswap(MemOp memop) { return true; } + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + g_assert_not_reached(); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + g_assert_not_reached(); +} diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index a076f401d21..bd03aa1bc4a 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -44,81 +44,6 @@ #define TCG_TARGET_INSN_UNIT_SIZE 4 #define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) -/* Optional instructions. */ - -#define TCG_TARGET_HAS_bswap16_i32 1 -#define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_div_i32 1 -#define TCG_TARGET_HAS_rem_i32 1 -#define TCG_TARGET_HAS_ext8s_i32 1 -#define TCG_TARGET_HAS_ext16s_i32 1 -#define TCG_TARGET_HAS_ext8u_i32 1 -#define TCG_TARGET_HAS_ext16u_i32 1 -#define TCG_TARGET_HAS_andc_i32 1 -#define TCG_TARGET_HAS_deposit_i32 1 -#define TCG_TARGET_HAS_extract_i32 1 -#define TCG_TARGET_HAS_sextract_i32 1 -#define TCG_TARGET_HAS_extract2_i32 0 -#define TCG_TARGET_HAS_eqv_i32 1 -#define TCG_TARGET_HAS_nand_i32 1 -#define TCG_TARGET_HAS_nor_i32 1 -#define TCG_TARGET_HAS_clz_i32 1 -#define TCG_TARGET_HAS_ctz_i32 1 -#define TCG_TARGET_HAS_ctpop_i32 1 -#define TCG_TARGET_HAS_not_i32 1 -#define TCG_TARGET_HAS_orc_i32 1 -#define TCG_TARGET_HAS_rot_i32 1 -#define TCG_TARGET_HAS_negsetcond_i32 0 -#define TCG_TARGET_HAS_muls2_i32 1 -#define TCG_TARGET_HAS_muluh_i32 0 -#define TCG_TARGET_HAS_mulsh_i32 0 -#define TCG_TARGET_HAS_qemu_st8_i32 0 - -#if TCG_TARGET_REG_BITS == 64 -#define TCG_TARGET_HAS_extr_i64_i32 0 -#define TCG_TARGET_HAS_bswap16_i64 1 -#define TCG_TARGET_HAS_bswap32_i64 1 -#define TCG_TARGET_HAS_bswap64_i64 1 -#define TCG_TARGET_HAS_deposit_i64 1 -#define TCG_TARGET_HAS_extract_i64 1 -#define TCG_TARGET_HAS_sextract_i64 1 -#define TCG_TARGET_HAS_extract2_i64 0 -#define TCG_TARGET_HAS_div_i64 1 -#define TCG_TARGET_HAS_rem_i64 1 -#define TCG_TARGET_HAS_ext8s_i64 1 -#define TCG_TARGET_HAS_ext16s_i64 1 -#define TCG_TARGET_HAS_ext32s_i64 1 -#define TCG_TARGET_HAS_ext8u_i64 1 -#define TCG_TARGET_HAS_ext16u_i64 1 -#define TCG_TARGET_HAS_ext32u_i64 1 -#define TCG_TARGET_HAS_andc_i64 1 -#define TCG_TARGET_HAS_eqv_i64 1 -#define TCG_TARGET_HAS_nand_i64 1 -#define TCG_TARGET_HAS_nor_i64 1 -#define TCG_TARGET_HAS_clz_i64 1 -#define TCG_TARGET_HAS_ctz_i64 1 -#define TCG_TARGET_HAS_ctpop_i64 1 -#define TCG_TARGET_HAS_not_i64 1 -#define TCG_TARGET_HAS_orc_i64 1 -#define TCG_TARGET_HAS_rot_i64 1 -#define TCG_TARGET_HAS_negsetcond_i64 0 -#define TCG_TARGET_HAS_muls2_i64 1 -#define TCG_TARGET_HAS_add2_i32 1 -#define TCG_TARGET_HAS_sub2_i32 1 -#define TCG_TARGET_HAS_mulu2_i32 1 -#define TCG_TARGET_HAS_add2_i64 1 -#define TCG_TARGET_HAS_sub2_i64 1 -#define TCG_TARGET_HAS_mulu2_i64 1 -#define TCG_TARGET_HAS_muluh_i64 0 -#define TCG_TARGET_HAS_mulsh_i64 0 -#else -#define TCG_TARGET_HAS_mulu2_i32 1 -#endif /* TCG_TARGET_REG_BITS == 64 */ - -#define TCG_TARGET_HAS_qemu_ldst_i128 0 - -#define TCG_TARGET_HAS_tst 1 - /* Number of registers available. */ #define TCG_TARGET_NB_REGS 16 @@ -146,26 +71,6 @@ typedef enum { TCG_REG_CALL_STACK = TCG_REG_R15, } TCGReg; -/* Used for function call generation. */ -#define TCG_TARGET_CALL_STACK_OFFSET 0 -#define TCG_TARGET_STACK_ALIGN 8 -#if TCG_TARGET_REG_BITS == 32 -# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#else -# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL -#endif -#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL - #define HAVE_TCG_QEMU_TB_EXEC -#define TCG_TARGET_NEED_POOL_LABELS - -/* We could notice __i386__ or __s390x__ and reduce the barriers depending - on the host. But if you want performance, you use the normal backend. - We prefer consistency across hosts on this. */ -#define TCG_TARGET_DEFAULT_MO (0) #endif /* TCG_TARGET_H */ diff --git a/tests/Makefile.include b/tests/Makefile.include index 6618bfed702..23fb722d426 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -3,28 +3,28 @@ .PHONY: check-help check-help: @echo "Regression testing targets:" - @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests" - @echo " $(MAKE) bench Run speed tests" + @echo " $(MAKE) check Run block, qapi-schema, unit, softfloat, qtest and decodetree tests" + @echo " $(MAKE) bench Run speed tests" @echo @echo "Individual test suites:" - @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target" - @echo " $(MAKE) check-qtest Run qtest tests" - @echo " $(MAKE) check-unit Run qobject tests" - @echo " $(MAKE) check-qapi-schema Run QAPI schema tests" - @echo " $(MAKE) check-block Run block tests" + @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target" + @echo " $(MAKE) check-qtest Run qtest tests" + @echo " $(MAKE) check-functional Run python-based functional tests" + @echo " $(MAKE) check-functional-TARGET Run functional tests for a given target" + @echo " $(MAKE) check-unit Run qobject tests" + @echo " $(MAKE) check-qapi-schema Run QAPI schema tests" + @echo " $(MAKE) check-block Run block tests" ifneq ($(filter $(all-check-targets), check-softfloat),) - @echo " $(MAKE) check-tcg Run TCG tests" - @echo " $(MAKE) check-softfloat Run FPU emulation tests" + @echo " $(MAKE) check-tcg Run TCG tests" + @echo " $(MAKE) check-softfloat Run FPU emulation tests" endif - @echo " $(MAKE) check-avocado Run avocado (integration) tests for currently configured targets" @echo - @echo " $(MAKE) check-report.junit.xml Generates an aggregated XML test report" - @echo " $(MAKE) check-venv Creates a Python venv for tests" - @echo " $(MAKE) check-clean Clean the tests and related data" + @echo " $(MAKE) check-report.junit.xml Generates an aggregated XML test report" + @echo " $(MAKE) check-venv Creates a Python venv for tests" + @echo " $(MAKE) check-clean Clean the tests and related data" @echo @echo "The following are useful for CI builds" - @echo " $(MAKE) check-build Build most test binaries" - @echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)" + @echo " $(MAKE) check-build Build most test binaries" @echo @echo @echo "The variable SPEED can be set to control the gtester speed setting." @@ -84,26 +84,12 @@ distclean-tcg: $(DISTCLEAN_TCG_TARGET_RULES) # Python venv for running tests -.PHONY: check-venv check-avocado check-acceptance check-acceptance-deprecated-warning +.PHONY: check-venv # Build up our target list from the filtered list of ninja targets TARGETS=$(patsubst libqemu-%.a, %, $(filter libqemu-%.a, $(ninja-targets))) TESTS_VENV_TOKEN=$(BUILD_DIR)/pyvenv/tests.group -TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results -ifndef AVOCADO_TESTS - AVOCADO_TESTS=tests/avocado -endif -# Controls the output generated by Avocado when running tests. -# Any number of command separated loggers are accepted. For more -# information please refer to "avocado --help". -AVOCADO_SHOW?=app -ifndef AVOCADO_TAGS - AVOCADO_CMDLINE_TAGS=$(patsubst %-softmmu,-t arch:%, \ - $(filter %-softmmu,$(TARGETS))) -else - AVOCADO_CMDLINE_TAGS=$(addprefix -t , $(AVOCADO_TAGS)) -endif quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \ $(PYTHON) -m pip -q --disable-pip-version-check $1, \ @@ -111,56 +97,30 @@ quiet-venv-pip = $(quiet-@)$(call quiet-command-run, \ $(TESTS_VENV_TOKEN): $(SRC_PATH)/pythondeps.toml $(call quiet-venv-pip,install -e "$(SRC_PATH)/python/") - $(MKVENV_ENSUREGROUP) $< avocado + $(MKVENV_ENSUREGROUP) $< testdeps $(call quiet-command, touch $@) -$(TESTS_RESULTS_DIR): - $(call quiet-command, mkdir -p $@, \ - MKDIR, $@) - check-venv: $(TESTS_VENV_TOKEN) -FEDORA_31_ARCHES_TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGETS))) -FEDORA_31_ARCHES_CANDIDATES=$(patsubst ppc64,ppc64le,$(FEDORA_31_ARCHES_TARGETS)) -FEDORA_31_ARCHES := x86_64 aarch64 ppc64le s390x -FEDORA_31_DOWNLOAD=$(filter $(FEDORA_31_ARCHES),$(FEDORA_31_ARCHES_CANDIDATES)) - -# download one specific Fedora 31 image -get-vm-image-fedora-31-%: check-venv - $(call quiet-command, \ - $(PYTHON) -m avocado vmimage get \ - --distro=fedora --distro-version=31 --arch=$*, \ - "AVOCADO", "Downloading avocado tests VM image for $*") - -# download all vm images, according to defined targets -get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD)) - -check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images - $(call quiet-command, \ - $(PYTHON) -m avocado \ - --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \ - $(if $(AVOCADO_TAGS),, --filter-by-tags-include-empty \ - --filter-by-tags-include-empty-key) \ - $(AVOCADO_CMDLINE_TAGS) \ - $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \ - "AVOCADO", "tests/avocado") - -check-acceptance-deprecated-warning: - @echo - @echo "Note '$(MAKE) check-acceptance' is deprecated, use '$(MAKE) check-avocado' instead." - @echo +FUNCTIONAL_TARGETS=$(patsubst %-softmmu,check-functional-%, $(filter %-softmmu,$(TARGETS))) +.PHONY: $(FUNCTIONAL_TARGETS) +$(FUNCTIONAL_TARGETS): + @$(MAKE) SPEED=thorough $(subst -functional,-func,$@) -check-acceptance: check-acceptance-deprecated-warning | check-avocado +.PHONY: check-functional +check-functional: + @$(NINJA) precache-functional + @QEMU_TEST_NO_DOWNLOAD=1 $(MAKE) SPEED=thorough check-func check-func-quick # Consolidated targets -.PHONY: check check-clean get-vm-images +.PHONY: check check-clean check: check-build: run-ninja check-clean: - rm -rf $(TESTS_RESULTS_DIR) + rm -rf $(BUILD_DIR)/tests/functional clean: check-clean clean-tcg distclean: distclean-tcg diff --git a/tests/avocado/README.rst b/tests/avocado/README.rst deleted file mode 100644 index 94488371bbe..00000000000 --- a/tests/avocado/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -============================================= -Integration tests using the Avocado Framework -============================================= - -This directory contains integration tests. They're usually higher -level, and may interact with external resources and with various -guest operating systems. - -For more information, please refer to ``docs/devel/testing.rst``, -section "Integration tests using the Avocado Framework". diff --git a/tests/avocado/acpi-bits.py b/tests/avocado/acpi-bits.py deleted file mode 100644 index efe4f52ee05..00000000000 --- a/tests/avocado/acpi-bits.py +++ /dev/null @@ -1,409 +0,0 @@ -#!/usr/bin/env python3 -# group: rw quick -# Exercise QEMU generated ACPI/SMBIOS tables using biosbits, -# https://biosbits.org/ -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -# -# Author: -# Ani Sinha - -# pylint: disable=invalid-name -# pylint: disable=consider-using-f-string - -""" -This is QEMU ACPI/SMBIOS avocado tests using biosbits. -Biosbits is available originally at https://biosbits.org/. -This test uses a fork of the upstream bits and has numerous fixes -including an upgraded acpica. The fork is located here: -https://gitlab.com/qemu-project/biosbits-bits . -""" - -import logging -import os -import platform -import re -import shutil -import subprocess -import tarfile -import tempfile -import time -import zipfile -from typing import ( - List, - Optional, - Sequence, -) -from qemu.machine import QEMUMachine -from avocado import skipIf -from avocado.utils import datadrainer as drainer -from avocado_qemu import QemuBaseTest - -deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box. -supported_platforms = ['x86_64'] # supported test platforms. - -# default timeout of 120 secs is sometimes not enough for bits test. -BITS_TIMEOUT = 200 - -def which(tool): - """ looks up the full path for @tool, returns None if not found - or if @tool does not have executable permissions. - """ - paths=os.getenv('PATH') - for p in paths.split(os.path.pathsep): - p = os.path.join(p, tool) - if os.path.exists(p) and os.access(p, os.X_OK): - return p - return None - -def missing_deps(): - """ returns True if any of the test dependent tools are absent. - """ - for dep in deps: - if which(dep) is None: - return True - return False - -def supported_platform(): - """ checks if the test is running on a supported platform. - """ - return platform.machine() in supported_platforms - -class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods - """ - A QEMU VM, with isa-debugcon enabled and bits iso passed - using -cdrom to QEMU commandline. - - """ - def __init__(self, - binary: str, - args: Sequence[str] = (), - wrapper: Sequence[str] = (), - name: Optional[str] = None, - base_temp_dir: str = "/var/tmp", - debugcon_log: str = "debugcon-log.txt", - debugcon_addr: str = "0x403", - qmp_timer: Optional[float] = None): - # pylint: disable=too-many-arguments - - if name is None: - name = "qemu-bits-%d" % os.getpid() - super().__init__(binary, args, wrapper=wrapper, name=name, - base_temp_dir=base_temp_dir, - qmp_timer=qmp_timer) - self.debugcon_log = debugcon_log - self.debugcon_addr = debugcon_addr - self.base_temp_dir = base_temp_dir - - @property - def _base_args(self) -> List[str]: - args = super()._base_args - args.extend([ - '-chardev', - 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir, - self.debugcon_log), - '-device', - 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr, - ]) - return args - - def base_args(self): - """return the base argument to QEMU binary""" - return self._base_args - -@skipIf(not supported_platform() or missing_deps(), - 'unsupported platform or dependencies (%s) not installed' \ - % ','.join(deps)) -class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes - """ - ACPI and SMBIOS tests using biosbits. - - :avocado: tags=arch:x86_64 - :avocado: tags=acpi - - """ - # in slower systems the test can take as long as 3 minutes to complete. - timeout = BITS_TIMEOUT - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._vm = None - self._workDir = None - self._baseDir = None - - # following are some standard configuration constants - self._bitsInternalVer = 2020 # gitlab CI does shallow clones of depth 20 - self._bitsCommitHash = 'c7920d2b' # commit hash must match - # the artifact tag below - self._bitsTag = "qemu-bits-10262023" # this is the latest bits - # release as of today. - self._bitsArtSHA1Hash = 'b22cdfcfc7453875297d06d626f5474ee36a343f' - self._bitsArtURL = ("https://gitlab.com/qemu-project/" - "biosbits-bits/-/jobs/artifacts/%s/" - "download?job=qemu-bits-build" %self._bitsTag) - self._debugcon_addr = '0x403' - self._debugcon_log = 'debugcon-log.txt' - logging.basicConfig(level=logging.INFO) - self.logger = logging.getLogger('acpi-bits') - - def _print_log(self, log): - self.logger.info('\nlogs from biosbits follows:') - self.logger.info('==========================================\n') - self.logger.info(log) - self.logger.info('==========================================\n') - - def copy_bits_config(self): - """ copies the bios bits config file into bits. - """ - config_file = 'bits-cfg.txt' - bits_config_dir = os.path.join(self._baseDir, 'acpi-bits', - 'bits-config') - target_config_dir = os.path.join(self._workDir, - 'bits-%d' %self._bitsInternalVer, - 'boot') - self.assertTrue(os.path.exists(bits_config_dir)) - self.assertTrue(os.path.exists(target_config_dir)) - self.assertTrue(os.access(os.path.join(bits_config_dir, - config_file), os.R_OK)) - shutil.copy2(os.path.join(bits_config_dir, config_file), - target_config_dir) - self.logger.info('copied config file %s to %s', - config_file, target_config_dir) - - def copy_test_scripts(self): - """copies the python test scripts into bits. """ - - bits_test_dir = os.path.join(self._baseDir, 'acpi-bits', - 'bits-tests') - target_test_dir = os.path.join(self._workDir, - 'bits-%d' %self._bitsInternalVer, - 'boot', 'python') - - self.assertTrue(os.path.exists(bits_test_dir)) - self.assertTrue(os.path.exists(target_test_dir)) - - for filename in os.listdir(bits_test_dir): - if os.path.isfile(os.path.join(bits_test_dir, filename)) and \ - filename.endswith('.py2'): - # all test scripts are named with extension .py2 so that - # avocado does not try to load them. These scripts are - # written for python 2.7 not python 3 and hence if avocado - # loaded them, it would complain about python 3 specific - # syntaxes. - newfilename = os.path.splitext(filename)[0] + '.py' - shutil.copy2(os.path.join(bits_test_dir, filename), - os.path.join(target_test_dir, newfilename)) - self.logger.info('copied test file %s to %s', - filename, target_test_dir) - - # now remove the pyc test file if it exists, otherwise the - # changes in the python test script won't be executed. - testfile_pyc = os.path.splitext(filename)[0] + '.pyc' - if os.access(os.path.join(target_test_dir, testfile_pyc), - os.F_OK): - os.remove(os.path.join(target_test_dir, testfile_pyc)) - self.logger.info('removed compiled file %s', - os.path.join(target_test_dir, - testfile_pyc)) - - def fix_mkrescue(self, mkrescue): - """ grub-mkrescue is a bash script with two variables, 'prefix' and - 'libdir'. They must be pointed to the right location so that the - iso can be generated appropriately. We point the two variables to - the directory where we have extracted our pre-built bits grub - tarball. - """ - grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi') - grub_i386_mods = os.path.join(self._workDir, 'grub-inst') - - self.assertTrue(os.path.exists(grub_x86_64_mods)) - self.assertTrue(os.path.exists(grub_i386_mods)) - - new_script = "" - with open(mkrescue, 'r', encoding='utf-8') as filehandle: - orig_script = filehandle.read() - new_script = re.sub('(^prefix=)(.*)', - r'\1"%s"' %grub_x86_64_mods, - orig_script, flags=re.M) - new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods, - new_script, flags=re.M) - - with open(mkrescue, 'w', encoding='utf-8') as filehandle: - filehandle.write(new_script) - - def generate_bits_iso(self): - """ Uses grub-mkrescue to generate a fresh bits iso with the python - test scripts - """ - bits_dir = os.path.join(self._workDir, - 'bits-%d' %self._bitsInternalVer) - iso_file = os.path.join(self._workDir, - 'bits-%d.iso' %self._bitsInternalVer) - mkrescue_script = os.path.join(self._workDir, - 'grub-inst-x86_64-efi', 'bin', - 'grub-mkrescue') - - self.assertTrue(os.access(mkrescue_script, - os.R_OK | os.W_OK | os.X_OK)) - - self.fix_mkrescue(mkrescue_script) - - self.logger.info('using grub-mkrescue for generating biosbits iso ...') - - try: - if os.getenv('V') or os.getenv('BITS_DEBUG'): - subprocess.check_call([mkrescue_script, '-o', iso_file, - bits_dir], stderr=subprocess.STDOUT) - else: - subprocess.check_call([mkrescue_script, '-o', - iso_file, bits_dir], - stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL) - except Exception as e: # pylint: disable=broad-except - self.skipTest("Error while generating the bits iso. " - "Pass V=1 in the environment to get more details. " - + str(e)) - - self.assertTrue(os.access(iso_file, os.R_OK)) - - self.logger.info('iso file %s successfully generated.', iso_file) - - def setUp(self): # pylint: disable=arguments-differ - super().setUp('qemu-system-') - - self._baseDir = os.getenv('AVOCADO_TEST_BASEDIR') - - # workdir could also be avocado's own workdir in self.workdir. - # At present, I prefer to maintain my own temporary working - # directory. It gives us more control over the generated bits - # log files and also for debugging, we may chose not to remove - # this working directory so that the logs and iso can be - # inspected manually and archived if needed. - self._workDir = tempfile.mkdtemp(prefix='acpi-bits-', - suffix='.tmp') - self.logger.info('working dir: %s', self._workDir) - - prebuiltDir = os.path.join(self._workDir, 'prebuilt') - if not os.path.isdir(prebuiltDir): - os.mkdir(prebuiltDir, mode=0o775) - - bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip' - %(self._bitsInternalVer, - self._bitsCommitHash)) - grub_tar_file = os.path.join(prebuiltDir, - 'bits-%d-%s-grub.tar.gz' - %(self._bitsInternalVer, - self._bitsCommitHash)) - - bitsLocalArtLoc = self.fetch_asset(self._bitsArtURL, - asset_hash=self._bitsArtSHA1Hash) - self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc) - - # extract the bits artifact in the temp working directory - with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref: - zref.extractall(prebuiltDir) - - # extract the bits software in the temp working directory - with zipfile.ZipFile(bits_zip_file, 'r') as zref: - zref.extractall(self._workDir) - - with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball: - tarball.extractall(self._workDir) - - self.copy_test_scripts() - self.copy_bits_config() - self.generate_bits_iso() - - def parse_log(self): - """parse the log generated by running bits tests and - check for failures. - """ - debugconf = os.path.join(self._workDir, self._debugcon_log) - log = "" - with open(debugconf, 'r', encoding='utf-8') as filehandle: - log = filehandle.read() - - matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*', - log) - for match in matchiter: - # verify that no test cases failed. - try: - self.assertEqual(match.group(3).split()[0], '0', - 'Some bits tests seems to have failed. ' \ - 'Please check the test logs for more info.') - except AssertionError as e: - self._print_log(log) - raise e - else: - if os.getenv('V') or os.getenv('BITS_DEBUG'): - self._print_log(log) - - def tearDown(self): - """ - Lets do some cleanups. - """ - if self._vm: - self.assertFalse(not self._vm.is_running) - if not os.getenv('BITS_DEBUG') and self._workDir: - self.logger.info('removing the work directory %s', self._workDir) - shutil.rmtree(self._workDir) - else: - self.logger.info('not removing the work directory %s ' \ - 'as BITS_DEBUG is ' \ - 'passed in the environment', self._workDir) - super().tearDown() - - def test_acpi_smbios_bits(self): - """The main test case implementation.""" - - iso_file = os.path.join(self._workDir, - 'bits-%d.iso' %self._bitsInternalVer) - - self.assertTrue(os.access(iso_file, os.R_OK)) - - self._vm = QEMUBitsMachine(binary=self.qemu_bin, - base_temp_dir=self._workDir, - debugcon_log=self._debugcon_log, - debugcon_addr=self._debugcon_addr) - - self._vm.add_args('-cdrom', '%s' %iso_file) - # the vm needs to be run under icount so that TCG emulation is - # consistent in terms of timing. smilatency tests have consistent - # timing requirements. - self._vm.add_args('-icount', 'auto') - # currently there is no support in bits for recognizing 64-bit SMBIOS - # entry points. QEMU defaults to 64-bit entry points since the - # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0 - # for newer machine models"). Therefore, enforce 32-bit entry point. - self._vm.add_args('-machine', 'smbios-entry-point-type=32') - - # enable console logging - self._vm.set_console() - self._vm.launch() - - self.logger.debug("Console output from bits VM follows ...") - c_drainer = drainer.LineLogger(self._vm.console_socket.fileno(), - logger=self.logger.getChild("console"), - stop_check=(lambda : - not self._vm.is_running())) - c_drainer.start() - - # biosbits has been configured to run all the specified test suites - # in batch mode and then automatically initiate a vm shutdown. - # Set timeout to BITS_TIMEOUT for SHUTDOWN event from bits VM at par - # with the avocado test timeout. - self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT) - self._vm.wait(timeout=None) - self.parse_log() diff --git a/tests/avocado/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py deleted file mode 100644 index a3da2a96bb4..00000000000 --- a/tests/avocado/avocado_qemu/__init__.py +++ /dev/null @@ -1,444 +0,0 @@ -# Test class and utilities for functional tests -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import logging -import os -import subprocess -import sys -import tempfile -import time -import uuid - -import avocado -from avocado.utils import ssh -from avocado.utils.path import find_command - -from qemu.machine import QEMUMachine -from qemu.utils import (get_info_usernet_hostfwd_port, kvm_available, - tcg_available) - - -#: The QEMU build root directory. It may also be the source directory -#: if building from the source dir, but it's safer to use BUILD_DIR for -#: that purpose. Be aware that if this code is moved outside of a source -#: and build tree, it will not be accurate. -BUILD_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) - - -def has_cmd(name, args=None): - """ - This function is for use in a @avocado.skipUnless decorator, e.g.: - - @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true'))) - def test_something_that_needs_sudo(self): - ... - """ - - if args is None: - args = ('which', name) - - try: - _, stderr, exitcode = run_cmd(args) - except Exception as e: - exitcode = -1 - stderr = str(e) - - if exitcode != 0: - cmd_line = ' '.join(args) - err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}' - return (False, err) - else: - return (True, '') - -def has_cmds(*cmds): - """ - This function is for use in a @avocado.skipUnless decorator and - allows checking for the availability of multiple commands, e.g.: - - @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')), - 'cmd2', 'cmd3')) - def test_something_that_needs_cmd1_and_cmd2(self): - ... - """ - - for cmd in cmds: - if isinstance(cmd, str): - cmd = (cmd,) - - ok, errstr = has_cmd(*cmd) - if not ok: - return (False, errstr) - - return (True, '') - -def run_cmd(args): - subp = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - stdout, stderr = subp.communicate() - ret = subp.returncode - - return (stdout, stderr, ret) - -def is_readable_executable_file(path): - return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK) - - -def pick_default_qemu_bin(bin_prefix='qemu-system-', arch=None): - """ - Picks the path of a QEMU binary, starting either in the current working - directory or in the source tree root directory. - - :param arch: the arch to use when looking for a QEMU binary (the target - will match the arch given). If None (the default), arch - will be the current host system arch (as given by - :func:`os.uname`). - :type arch: str - :returns: the path to the default QEMU binary or None if one could not - be found - :rtype: str or None - """ - if arch is None: - arch = os.uname()[4] - # qemu binary path does not match arch for powerpc, handle it - if 'ppc64le' in arch: - arch = 'ppc64' - qemu_bin_name = bin_prefix + arch - qemu_bin_paths = [ - os.path.join(".", qemu_bin_name), - os.path.join(BUILD_DIR, qemu_bin_name), - os.path.join(BUILD_DIR, "build", qemu_bin_name), - ] - for path in qemu_bin_paths: - if is_readable_executable_file(path): - return path - return None - - -def _console_interaction(test, success_message, failure_message, - send_string, keep_sending=False, vm=None): - assert not keep_sending or send_string - if vm is None: - vm = test.vm - console = vm.console_file - console_logger = logging.getLogger('console') - while True: - if send_string: - vm.console_socket.sendall(send_string.encode()) - if not keep_sending: - send_string = None # send only once - try: - msg = console.readline().decode().strip() - except UnicodeDecodeError: - msg = None - if not msg: - continue - console_logger.debug(msg) - if success_message is None or success_message in msg: - break - if failure_message and failure_message in msg: - console.close() - fail = 'Failure message found in console: "%s". Expected: "%s"' % \ - (failure_message, success_message) - test.fail(fail) - -def interrupt_interactive_console_until_pattern(test, success_message, - failure_message=None, - interrupt_string='\r'): - """ - Keep sending a string to interrupt a console prompt, while logging the - console output. Typical use case is to break a boot loader prompt, such: - - Press a key within 5 seconds to interrupt boot process. - 5 - 4 - 3 - 2 - 1 - Booting default image... - - :param test: an Avocado test containing a VM that will have its console - read and probed for a success or failure message - :type test: :class:`avocado_qemu.QemuSystemTest` - :param success_message: if this message appears, test succeeds - :param failure_message: if this message appears, test fails - :param interrupt_string: a string to send to the console before trying - to read a new line - """ - _console_interaction(test, success_message, failure_message, - interrupt_string, True) - -def wait_for_console_pattern(test, success_message, failure_message=None, - vm=None): - """ - Waits for messages to appear on the console, while logging the content - - :param test: an Avocado test containing a VM that will have its console - read and probed for a success or failure message - :type test: :class:`avocado_qemu.QemuSystemTest` - :param success_message: if this message appears, test succeeds - :param failure_message: if this message appears, test fails - """ - _console_interaction(test, success_message, failure_message, None, vm=vm) - -def exec_command(test, command): - """ - Send a command to a console (appending CRLF characters), while logging - the content. - - :param test: an Avocado test containing a VM. - :type test: :class:`avocado_qemu.QemuSystemTest` - :param command: the command to send - :type command: str - """ - _console_interaction(test, None, None, command + '\r') - -def exec_command_and_wait_for_pattern(test, command, - success_message, failure_message=None): - """ - Send a command to a console (appending CRLF characters), then wait - for success_message to appear on the console, while logging the. - content. Mark the test as failed if failure_message is found instead. - - :param test: an Avocado test containing a VM that will have its console - read and probed for a success or failure message - :type test: :class:`avocado_qemu.QemuSystemTest` - :param command: the command to send - :param success_message: if this message appears, test succeeds - :param failure_message: if this message appears, test fails - """ - _console_interaction(test, success_message, failure_message, command + '\r') - -class QemuBaseTest(avocado.Test): - - # default timeout for all tests, can be overridden - timeout = 120 - - def _get_unique_tag_val(self, tag_name): - """ - Gets a tag value, if unique for a key - """ - vals = self.tags.get(tag_name, []) - if len(vals) == 1: - return vals.pop() - return None - - def setUp(self, bin_prefix): - self.arch = self.params.get('arch', - default=self._get_unique_tag_val('arch')) - - self.cpu = self.params.get('cpu', - default=self._get_unique_tag_val('cpu')) - - default_qemu_bin = pick_default_qemu_bin(bin_prefix, arch=self.arch) - self.qemu_bin = self.params.get('qemu_bin', - default=default_qemu_bin) - if self.qemu_bin is None: - self.cancel("No QEMU binary defined or found in the build tree") - - def fetch_asset(self, name, - asset_hash, algorithm=None, - locations=None, expire=None, - find_only=False, cancel_on_missing=True): - return super().fetch_asset(name, - asset_hash=asset_hash, - algorithm=algorithm, - locations=locations, - expire=expire, - find_only=find_only, - cancel_on_missing=cancel_on_missing) - - -class QemuSystemTest(QemuBaseTest): - """Facilitates system emulation tests.""" - - def setUp(self): - self._vms = {} - - super().setUp('qemu-system-') - - accel_required = self._get_unique_tag_val('accel') - if accel_required: - self.require_accelerator(accel_required) - - self.machine = self.params.get('machine', - default=self._get_unique_tag_val('machine')) - - def require_accelerator(self, accelerator): - """ - Requires an accelerator to be available for the test to continue - - It takes into account the currently set qemu binary. - - If the check fails, the test is canceled. If the check itself - for the given accelerator is not available, the test is also - canceled. - - :param accelerator: name of the accelerator, such as "kvm" or "tcg" - :type accelerator: str - """ - checker = {'tcg': tcg_available, - 'kvm': kvm_available}.get(accelerator) - if checker is None: - self.cancel("Don't know how to check for the presence " - "of accelerator %s" % accelerator) - if not checker(qemu_bin=self.qemu_bin): - self.cancel("%s accelerator does not seem to be " - "available" % accelerator) - - def require_netdev(self, netdevname): - netdevhelp = run_cmd([self.qemu_bin, - '-M', 'none', '-netdev', 'help'])[0]; - if netdevhelp.find('\n' + netdevname + '\n') < 0: - self.cancel('no support for user networking') - - def require_multiprocess(self): - """ - Test for the presence of the x-pci-proxy-dev which is required - to support multiprocess. - """ - devhelp = run_cmd([self.qemu_bin, - '-M', 'none', '-device', 'help'])[0]; - if devhelp.find('x-pci-proxy-dev') < 0: - self.cancel('no support for multiprocess device emulation') - - def _new_vm(self, name, *args): - self._sd = tempfile.TemporaryDirectory(prefix="qemu_") - vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir, - log_dir=self.logdir) - self.log.debug('QEMUMachine "%s" created', name) - self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir) - self.log.debug('QEMUMachine "%s" log_dir: %s', name, vm.log_dir) - if args: - vm.add_args(*args) - return vm - - def get_qemu_img(self): - self.log.debug('Looking for and selecting a qemu-img binary') - - # If qemu-img has been built, use it, otherwise the system wide one - # will be used. - qemu_img = os.path.join(BUILD_DIR, 'qemu-img') - if not os.path.exists(qemu_img): - qemu_img = find_command('qemu-img', False) - if qemu_img is False: - self.cancel('Could not find "qemu-img"') - - return qemu_img - - @property - def vm(self): - return self.get_vm(name='default') - - def get_vm(self, *args, name=None): - if not name: - name = str(uuid.uuid4()) - if self._vms.get(name) is None: - self._vms[name] = self._new_vm(name, *args) - if self.cpu is not None: - self._vms[name].add_args('-cpu', self.cpu) - if self.machine is not None: - self._vms[name].set_machine(self.machine) - return self._vms[name] - - def set_vm_arg(self, arg, value): - """ - Set an argument to list of extra arguments to be given to the QEMU - binary. If the argument already exists then its value is replaced. - - :param arg: the QEMU argument, such as "-cpu" in "-cpu host" - :type arg: str - :param value: the argument value, such as "host" in "-cpu host" - :type value: str - """ - if not arg or not value: - return - if arg not in self.vm.args: - self.vm.args.extend([arg, value]) - else: - idx = self.vm.args.index(arg) + 1 - if idx < len(self.vm.args): - self.vm.args[idx] = value - else: - self.vm.args.append(value) - - def tearDown(self): - for vm in self._vms.values(): - vm.shutdown() - self._sd = None - super().tearDown() - - -class QemuUserTest(QemuBaseTest): - """Facilitates user-mode emulation tests.""" - - def setUp(self): - self._ldpath = [] - super().setUp('qemu-') - - def add_ldpath(self, ldpath): - self._ldpath.append(os.path.abspath(ldpath)) - - def run(self, bin_path, args=[]): - qemu_args = " ".join(["-L %s" % ldpath for ldpath in self._ldpath]) - bin_args = " ".join(args) - return process.run("%s %s %s %s" % (self.qemu_bin, qemu_args, - bin_path, bin_args)) - - -class LinuxSSHMixIn: - """Contains utility methods for interacting with a guest via SSH.""" - - def ssh_connect(self, username, credential, credential_is_key=True): - self.ssh_logger = logging.getLogger('ssh') - res = self.vm.cmd('human-monitor-command', - command_line='info usernet') - port = get_info_usernet_hostfwd_port(res) - self.assertIsNotNone(port) - self.assertGreater(port, 0) - self.log.debug('sshd listening on port: %d', port) - if credential_is_key: - self.ssh_session = ssh.Session('127.0.0.1', port=port, - user=username, key=credential) - else: - self.ssh_session = ssh.Session('127.0.0.1', port=port, - user=username, password=credential) - for i in range(10): - try: - self.ssh_session.connect() - return - except: - time.sleep(i) - self.fail('ssh connection timeout') - - def ssh_command(self, command): - self.ssh_logger.info(command) - result = self.ssh_session.cmd(command) - stdout_lines = [line.rstrip() for line - in result.stdout_text.splitlines()] - for line in stdout_lines: - self.ssh_logger.info(line) - stderr_lines = [line.rstrip() for line - in result.stderr_text.splitlines()] - for line in stderr_lines: - self.ssh_logger.warning(line) - - self.assertEqual(result.exit_status, 0, - f'Guest command failed: {command}') - return stdout_lines, stderr_lines - - def ssh_command_output_contains(self, cmd, exp): - stdout, _ = self.ssh_command(cmd) - for line in stdout: - if exp in line: - break - else: - self.fail('"%s" output does not contain "%s"' % (cmd, exp)) diff --git a/tests/avocado/avocado_qemu/linuxtest.py b/tests/avocado/avocado_qemu/linuxtest.py deleted file mode 100644 index e1dc838b1cb..00000000000 --- a/tests/avocado/avocado_qemu/linuxtest.py +++ /dev/null @@ -1,253 +0,0 @@ -# Test class and utilities for functional Linux-based tests -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import shutil - -from avocado.utils import cloudinit, datadrainer, process, vmimage - -from . import LinuxSSHMixIn -from . import QemuSystemTest - -if os.path.islink(os.path.dirname(os.path.dirname(__file__))): - # The link to the avocado tests dir in the source code directory - lnk = os.path.dirname(os.path.dirname(__file__)) - #: The QEMU root source directory - SOURCE_DIR = os.path.dirname(os.path.dirname(os.readlink(lnk))) -else: - SOURCE_DIR = BUILD_DIR - -class LinuxDistro: - """Represents a Linux distribution - - Holds information of known distros. - """ - #: A collection of known distros and their respective image checksum - KNOWN_DISTROS = { - 'fedora': { - '31': { - 'x86_64': - {'checksum': ('e3c1b309d9203604922d6e255c2c5d09' - '8a309c2d46215d8fc026954f3c5c27a0'), - 'pxeboot_url': ('https://archives.fedoraproject.org/' - 'pub/archive/fedora/linux/releases/31/' - 'Everything/x86_64/os/images/pxeboot/'), - 'kernel_params': ('root=UUID=b1438b9b-2cab-4065-a99a-' - '08a96687f73c ro no_timer_check ' - 'net.ifnames=0 console=tty1 ' - 'console=ttyS0,115200n8'), - }, - 'aarch64': - {'checksum': ('1e18d9c0cf734940c4b5d5ec592facae' - 'd2af0ad0329383d5639c997fdf16fe49'), - 'pxeboot_url': 'https://archives.fedoraproject.org/' - 'pub/archive/fedora/linux/releases/31/' - 'Everything/aarch64/os/images/pxeboot/', - 'kernel_params': ('root=UUID=b6950a44-9f3c-4076-a9c2-' - '355e8475b0a7 ro earlyprintk=pl011,0x9000000' - ' ignore_loglevel no_timer_check' - ' printk.time=1 rd_NO_PLYMOUTH' - ' console=ttyAMA0'), - }, - 'ppc64': - {'checksum': ('7c3528b85a3df4b2306e892199a9e1e4' - '3f991c506f2cc390dc4efa2026ad2f58')}, - 's390x': - {'checksum': ('4caaab5a434fd4d1079149a072fdc789' - '1e354f834d355069ca982fdcaf5a122d')}, - }, - '32': { - 'aarch64': - {'checksum': ('b367755c664a2d7a26955bbfff985855' - 'adfa2ca15e908baf15b4b176d68d3967'), - 'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/' - 'releases/32/Server/aarch64/os/images/' - 'pxeboot/'), - 'kernel_params': ('root=UUID=3df75b65-be8d-4db4-8655-' - '14d95c0e90c5 ro no_timer_check net.ifnames=0' - ' console=tty1 console=ttyS0,115200n8'), - }, - }, - '33': { - 'aarch64': - {'checksum': ('e7f75cdfd523fe5ac2ca9eeece68edc1' - 'a81f386a17f969c1d1c7c87031008a6b'), - 'pxeboot_url': ('http://dl.fedoraproject.org/pub/fedora/linux/' - 'releases/33/Server/aarch64/os/images/' - 'pxeboot/'), - 'kernel_params': ('root=UUID=d20b3ffa-6397-4a63-a734-' - '1126a0208f8a ro no_timer_check net.ifnames=0' - ' console=tty1 console=ttyS0,115200n8' - ' console=tty0'), - }, - }, - } - } - - def __init__(self, name, version, arch): - self.name = name - self.version = version - self.arch = arch - try: - info = self.KNOWN_DISTROS.get(name).get(version).get(arch) - except AttributeError: - # Unknown distro - info = None - self._info = info or {} - - @property - def checksum(self): - """Gets the cloud-image file checksum""" - return self._info.get('checksum', None) - - @checksum.setter - def checksum(self, value): - self._info['checksum'] = value - - @property - def pxeboot_url(self): - """Gets the repository url where pxeboot files can be found""" - return self._info.get('pxeboot_url', None) - - @property - def default_kernel_params(self): - """Gets the default kernel parameters""" - return self._info.get('kernel_params', None) - - -class LinuxTest(LinuxSSHMixIn, QemuSystemTest): - """Facilitates having a cloud-image Linux based available. - - For tests that intend to interact with guests, this is a better choice - to start with than the more vanilla `QemuSystemTest` class. - """ - - distro = None - username = 'root' - password = 'password' - smp = '2' - memory = '1024' - - def _set_distro(self): - distro_name = self.params.get( - 'distro', - default=self._get_unique_tag_val('distro')) - if not distro_name: - distro_name = 'fedora' - - distro_version = self.params.get( - 'distro_version', - default=self._get_unique_tag_val('distro_version')) - if not distro_version: - distro_version = '31' - - self.distro = LinuxDistro(distro_name, distro_version, self.arch) - - # The distro checksum behaves differently than distro name and - # version. First, it does not respect a tag with the same - # name, given that it's not expected to be used for filtering - # (distro name versions are the natural choice). Second, the - # order of precedence is: parameter, attribute and then value - # from KNOWN_DISTROS. - distro_checksum = self.params.get('distro_checksum', - default=None) - if distro_checksum: - self.distro.checksum = distro_checksum - - def setUp(self, ssh_pubkey=None, network_device_type='virtio-net'): - super().setUp() - self.require_netdev('user') - self._set_distro() - self.vm.add_args('-smp', self.smp) - self.vm.add_args('-m', self.memory) - # The following network device allows for SSH connections - self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', - '-device', '%s,netdev=vnet' % network_device_type) - self.set_up_boot() - if ssh_pubkey is None: - ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys() - self.set_up_cloudinit(ssh_pubkey) - - def set_up_existing_ssh_keys(self): - ssh_public_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa.pub') - source_private_key = os.path.join(SOURCE_DIR, 'tests', 'keys', 'id_rsa') - ssh_dir = os.path.join(self.workdir, '.ssh') - os.mkdir(ssh_dir, mode=0o700) - ssh_private_key = os.path.join(ssh_dir, - os.path.basename(source_private_key)) - shutil.copyfile(source_private_key, ssh_private_key) - os.chmod(ssh_private_key, 0o600) - return (ssh_public_key, ssh_private_key) - - def download_boot(self): - # Set the qemu-img binary. - # If none is available, the test will cancel. - vmimage.QEMU_IMG = super().get_qemu_img() - - self.log.info('Downloading/preparing boot image') - # Fedora 31 only provides ppc64le images - image_arch = self.arch - if self.distro.name == 'fedora': - if image_arch == 'ppc64': - image_arch = 'ppc64le' - - try: - boot = vmimage.get( - self.distro.name, arch=image_arch, version=self.distro.version, - checksum=self.distro.checksum, - algorithm='sha256', - cache_dir=self.cache_dirs[0], - snapshot_dir=self.workdir) - except: - self.cancel('Failed to download/prepare boot image') - return boot.path - - def prepare_cloudinit(self, ssh_pubkey=None): - self.log.info('Preparing cloudinit image') - try: - cloudinit_iso = os.path.join(self.workdir, 'cloudinit.iso') - pubkey_content = None - if ssh_pubkey: - with open(ssh_pubkey) as pubkey: - pubkey_content = pubkey.read() - cloudinit.iso(cloudinit_iso, self.name, - username=self.username, - password=self.password, - # QEMU's hard coded usermode router address - phone_home_host='10.0.2.2', - phone_home_port=self.phone_server.server_port, - authorized_key=pubkey_content) - except Exception: - self.cancel('Failed to prepare the cloudinit image') - return cloudinit_iso - - def set_up_boot(self): - path = self.download_boot() - self.vm.add_args('-drive', 'file=%s' % path) - - def set_up_cloudinit(self, ssh_pubkey=None): - self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0), - self.name) - cloudinit_iso = self.prepare_cloudinit(ssh_pubkey) - self.vm.add_args('-drive', 'file=%s,format=raw' % cloudinit_iso) - - def launch_and_wait(self, set_up_ssh_connection=True): - self.vm.set_console() - self.vm.launch() - console_drainer = datadrainer.LineLogger(self.vm.console_socket.fileno(), - logger=self.log.getChild('console')) - console_drainer.start() - self.log.info('VM launched, waiting for boot confirmation from guest') - while not self.phone_server.instance_phoned_back: - self.phone_server.handle_request() - - if set_up_ssh_connection: - self.log.info('Setting up the SSH connection') - self.ssh_connect(self.username, self.ssh_key) diff --git a/tests/avocado/boot_linux.py b/tests/avocado/boot_linux.py deleted file mode 100644 index a029ef4ad1e..00000000000 --- a/tests/avocado/boot_linux.py +++ /dev/null @@ -1,132 +0,0 @@ -# Functional test that boots a complete Linux system via a cloud image -# -# Copyright (c) 2018-2020 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado_qemu.linuxtest import LinuxTest -from avocado_qemu import BUILD_DIR - -from avocado import skipUnless - - -class BootLinuxX8664(LinuxTest): - """ - :avocado: tags=arch:x86_64 - """ - timeout = 480 - - def test_pc_i440fx_tcg(self): - """ - :avocado: tags=machine:pc - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg") - self.launch_and_wait(set_up_ssh_connection=False) - - def test_pc_i440fx_kvm(self): - """ - :avocado: tags=machine:pc - :avocado: tags=accel:kvm - """ - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.launch_and_wait(set_up_ssh_connection=False) - - def test_pc_q35_tcg(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg") - self.launch_and_wait(set_up_ssh_connection=False) - - def test_pc_q35_kvm(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=accel:kvm - """ - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.launch_and_wait(set_up_ssh_connection=False) - - -# For Aarch64 we only boot KVM tests in CI as booting the current -# Fedora OS in TCG tests is very heavyweight. There are lighter weight -# distros which we use in the machine_aarch64_virt.py tests. -class BootLinuxAarch64(LinuxTest): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - """ - timeout = 720 - - def test_virt_kvm(self): - """ - :avocado: tags=accel:kvm - :avocado: tags=cpu:host - """ - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.vm.add_args("-machine", "virt,gic-version=host") - self.vm.add_args('-bios', - os.path.join(BUILD_DIR, 'pc-bios', - 'edk2-aarch64-code.fd')) - self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') - self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') - self.launch_and_wait(set_up_ssh_connection=False) - - -# See the tux_baseline.py tests for almost the same coverage in a lot -# less time. -class BootLinuxPPC64(LinuxTest): - """ - :avocado: tags=arch:ppc64 - """ - - timeout = 360 - - @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') - def test_pseries_tcg(self): - """ - :avocado: tags=machine:pseries - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg") - self.launch_and_wait(set_up_ssh_connection=False) - - def test_pseries_kvm(self): - """ - :avocado: tags=machine:pseries - :avocado: tags=accel:kvm - """ - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.vm.add_args("-machine", "cap-ccf-assist=off") - self.launch_and_wait(set_up_ssh_connection=False) - -class BootLinuxS390X(LinuxTest): - """ - :avocado: tags=arch:s390x - """ - - timeout = 240 - - @skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') - def test_s390_ccw_virtio_tcg(self): - """ - :avocado: tags=machine:s390-ccw-virtio - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg") - self.launch_and_wait(set_up_ssh_connection=False) diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py deleted file mode 100644 index 2929aa042da..00000000000 --- a/tests/avocado/boot_linux_console.py +++ /dev/null @@ -1,1551 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import lzma -import gzip -import shutil - -from avocado import skip -from avocado import skipUnless -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import interrupt_interactive_console_until_pattern -from avocado_qemu import wait_for_console_pattern -from avocado.utils import process -from avocado.utils import archive - -""" -Round up to next power of 2 -""" -def pow2ceil(x): - return 1 if x == 0 else 2**(x - 1).bit_length() - -def file_truncate(path, size): - if size != os.path.getsize(path): - with open(path, 'ab+') as fd: - fd.truncate(size) - -""" -Expand file size to next power of 2 -""" -def image_pow2ceil_expand(path): - size = os.path.getsize(path) - size_aligned = pow2ceil(size) - if size != size_aligned: - with open(path, 'ab+') as fd: - fd.truncate(size_aligned) - -class LinuxKernelTest(QemuSystemTest): - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def extract_from_deb(self, deb, path): - """ - Extracts a file from a deb package into the test workdir - - :param deb: path to the deb archive - :param path: path within the deb archive of the file to be extracted - :returns: path of the extracted file - """ - cwd = os.getcwd() - os.chdir(self.workdir) - file_path = process.run("ar t %s" % deb).stdout_text.split()[2] - process.run("ar x %s %s" % (deb, file_path)) - archive.extract(file_path, self.workdir) - os.chdir(cwd) - # Return complete path to extracted file. Because callers to - # extract_from_deb() specify 'path' with a leading slash, it is - # necessary to use os.path.relpath() as otherwise os.path.join() - # interprets it as an absolute path and drops the self.workdir part. - return os.path.normpath(os.path.join(self.workdir, - os.path.relpath(path, '/'))) - - def extract_from_rpm(self, rpm, path): - """ - Extracts a file from an RPM package into the test workdir. - - :param rpm: path to the rpm archive - :param path: path within the rpm archive of the file to be extracted - needs to be a relative path (starting with './') because - cpio(1), which is used to extract the file, expects that. - :returns: path of the extracted file - """ - cwd = os.getcwd() - os.chdir(self.workdir) - process.run("rpm2cpio %s | cpio -id %s" % (rpm, path), shell=True) - os.chdir(cwd) - return os.path.normpath(os.path.join(self.workdir, path)) - -class BootLinuxConsole(LinuxKernelTest): - """ - Boots a Linux kernel and checks that the console is operational and the - kernel command line is properly passed from QEMU to the kernel - """ - timeout = 90 - - def test_x86_64_pc(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:pc - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/x86_64/os/images/pxeboot' - '/vmlinuz') - kernel_hash = '23bebd2680757891cf7adedb033532163a792495' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_mips_malta(self): - """ - :avocado: tags=arch:mips - :avocado: tags=machine:malta - :avocado: tags=endian:big - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20130217T032700Z/pool/main/l/linux-2.6/' - 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb') - deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-2.6.32-5-4kc-malta') - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_mips64el_malta(self): - """ - This test requires the ar tool to extract "data.tar.gz" from - the Debian package. - - The kernel can be rebuilt using this Debian kernel source [1] and - following the instructions on [2]. - - [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/ - #linux-source-2.6.32_2.6.32-48 - [2] https://kernel-team.pages.debian.net/kernel-handbook/ - ch-common-tasks.html#s-common-official - - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20130217T032700Z/pool/main/l/linux-2.6/' - 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb') - deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-2.6.32-5-5kc-malta') - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_mips64el_fuloong2e(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:fuloong2e - :avocado: tags=endian:little - """ - deb_url = ('http://archive.debian.org/debian/pool/main/l/linux/' - 'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb') - deb_hash = 'd04d446045deecf7b755ef576551de0c4184dd44' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-3.16.0-6-loongson-2e') - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_mips_malta_cpio(self): - """ - :avocado: tags=arch:mips - :avocado: tags=machine:malta - :avocado: tags=endian:big - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20160601T041800Z/pool/main/l/linux/' - 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb') - deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-4.5.0-2-4kc-malta') - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' - 'mips/rootfs.cpio.gz') - initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = self.workdir + "rootfs.cpio" - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE - + 'console=ttyS0 console=tty ' - + 'rdinit=/sbin/init noreboot') - self.vm.add_args('-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'BogoMIPS') - exec_command_and_wait_for_pattern(self, 'uname -a', - 'Debian') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_mips64el_malta_5KEc_cpio(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:5KEc - """ - kernel_url = ('https://github.com/philmd/qemu-testing-blob/' - 'raw/9ad2df38/mips/malta/mips64el/' - 'vmlinux-3.19.3.mtoman.20150408') - kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - initrd_url = ('https://github.com/groeck/linux-build-test/' - 'raw/8584a59e/rootfs/' - 'mipsel64/rootfs.mipsel64r1.cpio.gz') - initrd_hash = '1dbb8a396e916847325284dbe2151167' - initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5', - asset_hash=initrd_hash) - initrd_path = self.workdir + "rootfs.cpio" - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE - + 'console=ttyS0 console=tty ' - + 'rdinit=/sbin/init noreboot') - self.vm.add_args('-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - wait_for_console_pattern(self, 'Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'MIPS 5KE') - exec_command_and_wait_for_pattern(self, 'uname -a', - '3.19.3.mtoman.20150408') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - def do_test_mips_malta32el_nanomips(self, kernel_path_xz): - kernel_path = self.workdir + "kernel" - with lzma.open(kernel_path_xz, 'rb') as f_in: - with open(kernel_path, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE - + 'mem=256m@@0x0 ' - + 'console=ttyS0') - self.vm.add_args('-no-reboot', - '-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_mips_malta32el_nanomips_4k(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page4k.xz') - kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) - - def test_mips_malta32el_nanomips_16k_up(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page16k_up.xz') - kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) - - def test_mips_malta32el_nanomips_64k_dbg(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page64k_dbg.xz') - kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) - - def test_aarch64_xlnx_versal_virt(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:xlnx-versal-virt - :avocado: tags=device:pl011 - :avocado: tags=device:arm_gicv3 - :avocado: tags=accel:tcg - """ - images_url = ('http://ports.ubuntu.com/ubuntu-ports/dists/' - 'bionic-updates/main/installer-arm64/' - '20101020ubuntu543.19/images/') - kernel_url = images_url + 'netboot/ubuntu-installer/arm64/linux' - kernel_hash = 'e167757620640eb26de0972f578741924abb3a82' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - initrd_url = images_url + 'netboot/ubuntu-installer/arm64/initrd.gz' - initrd_hash = 'cab5cb3fcefca8408aa5aae57f24574bfce8bdb9' - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - self.vm.set_console() - self.vm.add_args('-m', '2G', - '-accel', 'tcg', - '-kernel', kernel_path, - '-initrd', initrd_path) - self.vm.launch() - self.wait_for_console_pattern('Checked W+X mappings: passed') - - def test_arm_virt(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:virt - :avocado: tags=accel:tcg - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/armhfp/os/images/pxeboot' - '/vmlinuz') - kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_arm_emcraft_sf2(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:emcraft-sf2 - :avocado: tags=endian:little - :avocado: tags=u-boot - :avocado: tags=accel:tcg - """ - self.require_netdev('user') - - uboot_url = ('https://raw.githubusercontent.com/' - 'Subbaraya-Sundeep/qemu-test-binaries/' - 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot') - uboot_hash = 'cbb8cbab970f594bf6523b9855be209c08374ae2' - uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) - spi_url = ('https://raw.githubusercontent.com/' - 'Subbaraya-Sundeep/qemu-test-binaries/' - 'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin') - spi_hash = '65523a1835949b6f4553be96dec1b6a38fb05501' - spi_path = self.fetch_asset(spi_url, asset_hash=spi_hash) - spi_path_rw = os.path.join(self.workdir, os.path.basename(spi_path)) - shutil.copy(spi_path, spi_path_rw) - - file_truncate(spi_path_rw, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE - self.vm.add_args('-kernel', uboot_path, - '-append', kernel_command_line, - '-drive', 'file=' + spi_path_rw + ',if=mtd,format=raw', - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Enter \'help\' for a list') - - exec_command_and_wait_for_pattern(self, 'ifconfig eth0 10.0.2.15', - 'eth0: link becomes ready') - exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', - '3 packets transmitted, 3 packets received, 0% packet loss') - - def do_test_arm_raspi2(self, uart_id): - """ - :avocado: tags=accel:tcg - - The kernel can be rebuilt using the kernel source referenced - and following the instructions on the on: - https://www.raspberrypi.org/documentation/linux/kernel/building.md - """ - serial_kernel_cmdline = { - 0: 'earlycon=pl011,0x3f201000 console=ttyAMA0', - } - deb_url = ('http://archive.raspberrypi.org/debian/' - 'pool/main/r/raspberrypi-firmware/' - 'raspberrypi-kernel_1.20190215-1_armhf.deb') - deb_hash = 'cd284220b32128c5084037553db3c482426f3972' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img') - dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb') - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - serial_kernel_cmdline[uart_id] + - ' root=/dev/mmcblk0p2 rootwait ' + - 'dwc_otg.fiq_fsm_enable=0') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-append', kernel_command_line, - '-device', 'usb-kbd') - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - console_pattern = 'Product: QEMU USB Keyboard' - self.wait_for_console_pattern(console_pattern) - - def test_arm_raspi2_uart0(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:raspi2b - :avocado: tags=device:pl011 - :avocado: tags=accel:tcg - """ - self.do_test_arm_raspi2(0) - - def test_arm_raspi2_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:raspi2b - """ - deb_url = ('http://archive.raspberrypi.org/debian/' - 'pool/main/r/raspberrypi-firmware/' - 'raspberrypi-kernel_1.20190215-1_armhf.deb') - deb_hash = 'cd284220b32128c5084037553db3c482426f3972' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img') - dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb') - - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv7a.cpio.gz') - initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'earlycon=pl011,0x3f201000 console=ttyAMA0 ' - 'panic=-1 noreboot ' + - 'dwc_otg.fiq_fsm_enable=0') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'BCM2835') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - '/soc/cprman@7e101000') - exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') - # Wait for VM to shut down gracefully - self.vm.wait() - - def test_arm_raspi4(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:raspi4b - :avocado: tags=device:pl011 - :avocado: tags=accel:tcg - :avocado: tags=rpi4b - - The kernel can be rebuilt using the kernel source referenced - and following the instructions on the on: - https://www.raspberrypi.org/documentation/linux/kernel/building.md - """ - - deb_url = ('http://archive.raspberrypi.org/debian/' - 'pool/main/r/raspberrypi-firmware/' - 'raspberrypi-kernel_1.20230106-1_arm64.deb') - deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img') - dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb') - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'earlycon=pl011,mmio32,0xfe201000 ' + - 'console=ttyAMA0,115200 ' + - 'root=/dev/mmcblk1p2 rootwait ' + - 'dwc_otg.fiq_fsm_enable=0') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-append', kernel_command_line) - # When PCI is supported we can add a USB controller: - # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', - # '-device', 'usb-kbd,bus=xhci.0', - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - # When USB is enabled we can look for this - # console_pattern = 'Product: QEMU USB Keyboard' - # self.wait_for_console_pattern(console_pattern) - console_pattern = 'Waiting for root device' - self.wait_for_console_pattern(console_pattern) - - - def test_arm_raspi4_initrd(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:raspi4b - :avocado: tags=device:pl011 - :avocado: tags=accel:tcg - :avocado: tags=rpi4b - - The kernel can be rebuilt using the kernel source referenced - and following the instructions on the on: - https://www.raspberrypi.org/documentation/linux/kernel/building.md - """ - deb_url = ('http://archive.raspberrypi.org/debian/' - 'pool/main/r/raspberrypi-firmware/' - 'raspberrypi-kernel_1.20230106-1_arm64.deb') - deb_hash = '08dc55696535b18a6d4fe6fa10d4c0d905cbb2ed' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, '/boot/kernel8.img') - dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2711-rpi-4-b.dtb') - - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '86b2be1384d41c8c388e63078a847f1e1c4cb1de/rootfs/' - 'arm64/rootfs.cpio.gz') - initrd_hash = 'f3d4f9fa92a49aa542f1b44d34be77bbf8ca5b9d' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'earlycon=pl011,mmio32,0xfe201000 ' + - 'console=ttyAMA0,115200 ' + - 'panic=-1 noreboot ' + - 'dwc_otg.fiq_fsm_enable=0') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - # When PCI is supported we can add a USB controller: - # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', - # '-device', 'usb-kbd,bus=xhci.0', - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'BCM2835') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - 'cprman@7e101000') - exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') - # TODO: Raspberry Pi4 doesn't shut down properly with recent kernels - # Wait for VM to shut down gracefully - #self.vm.wait() - - def test_arm_exynos4210_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:smdkc210 - :avocado: tags=accel:tcg - """ - deb_url = ('https://snapshot.debian.org/archive/debian/' - '20190928T224601Z/pool/main/l/linux/' - 'linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb') - deb_hash = 'fa9df4a0d38936cb50084838f2cb933f570d7d82' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-4.19.0-6-armmp') - dtb_path = '/usr/lib/linux-image-4.19.0-6-armmp/exynos4210-smdkv310.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv5.cpio.gz') - initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'earlycon=exynos4210,0x13800000 earlyprintk ' + - 'console=ttySAC0,115200n8 ' + - 'random.trust_cpu=off cryptomgr.notests ' + - 'cpuidle.off=1 panic=-1 noreboot') - - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - - self.wait_for_console_pattern('Boot successful.') - # TODO user command, for now the uart is stuck - - def test_arm_cubieboard_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:cubieboard - :avocado: tags=accel:tcg - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv5.cpio.gz') - initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'usbcore.nousb ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun4i/sun5i') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - 'system-control@1c00000') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - def test_arm_cubieboard_sata(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:cubieboard - :avocado: tags=accel:tcg - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - rootfs_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv5.ext2.gz') - rootfs_hash = '093e89d2b4d982234bf528bc9fb2f2f17a9d1f93' - rootfs_path_gz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) - rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(rootfs_path_gz, rootfs_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'usbcore.nousb ' - 'root=/dev/sda ro ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-drive', 'if=none,format=raw,id=disk0,file=' - + rootfs_path, - '-device', 'ide-hd,bus=ide.0,drive=disk0', - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun4i/sun5i') - exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', - 'sda') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') - def test_arm_cubieboard_openwrt_22_03_2(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:cubieboard - :avocado: tags=device:sd - """ - - # This test download a 7.5 MiB compressed image and expand it - # to 126 MiB. - image_url = ('https://downloads.openwrt.org/releases/22.03.2/targets/' - 'sunxi/cortexa8/openwrt-22.03.2-sunxi-cortexa8-' - 'cubietech_a10-cubieboard-ext4-sdcard.img.gz') - image_hash = ('94b5ecbfbc0b3b56276e5146b899eafa' - '2ac5dc2d08733d6705af9f144f39f554') - image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - image_path = archive.extract(image_path_gz, self.workdir) - image_pow2ceil_expand(image_path) - - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', - '-nic', 'user', - '-no-reboot') - self.vm.launch() - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'usbcore.nousb ' - 'noreboot') - - self.wait_for_console_pattern('U-Boot SPL') - - interrupt_interactive_console_until_pattern( - self, 'Hit any key to stop autoboot:', '=>') - exec_command_and_wait_for_pattern(self, "setenv extraargs '" + - kernel_command_line + "'", '=>') - exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); - - self.wait_for_console_pattern( - 'Please press Enter to activate this console.') - - exec_command_and_wait_for_pattern(self, ' ', 'root@') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun4i/sun5i') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') - def test_arm_quanta_gsj(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:quanta-gsj - :avocado: tags=accel:tcg - """ - # 25 MiB compressed, 32 MiB uncompressed. - image_url = ( - 'https://github.com/hskinnemoen/openbmc/releases/download/' - '20200711-gsj-qemu-0/obmc-phosphor-image-gsj.static.mtd.gz') - image_hash = '14895e634923345cb5c8776037ff7876df96f6b1' - image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash) - image_name = 'obmc.mtd' - image_path = os.path.join(self.workdir, image_name) - archive.gzip_uncompress(image_path_gz, image_path) - - self.vm.set_console() - drive_args = 'file=' + image_path + ',if=mtd,bus=0,unit=0' - self.vm.add_args('-drive', drive_args) - self.vm.launch() - - # Disable drivers and services that stall for a long time during boot, - # to avoid running past the 90-second timeout. These may be removed - # as the corresponding device support is added. - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + ( - 'console=${console} ' - 'mem=${mem} ' - 'initcall_blacklist=npcm_i2c_bus_driver_init ' - 'systemd.mask=systemd-random-seed.service ' - 'systemd.mask=dropbearkey.service ' - ) - - self.wait_for_console_pattern('> BootBlock by Nuvoton') - self.wait_for_console_pattern('>Device: Poleg BMC NPCM730') - self.wait_for_console_pattern('>Skip DDR init.') - self.wait_for_console_pattern('U-Boot ') - interrupt_interactive_console_until_pattern( - self, 'Hit any key to stop autoboot:', 'U-Boot>') - exec_command_and_wait_for_pattern( - self, "setenv bootargs ${bootargs} " + kernel_command_line, - 'U-Boot>') - exec_command_and_wait_for_pattern( - self, 'run romboot', 'Booting Kernel from flash') - self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') - self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') - self.wait_for_console_pattern('OpenBMC Project Reference Distro') - self.wait_for_console_pattern('gsj login:') - - def test_arm_quanta_gsj_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:quanta-gsj - :avocado: tags=accel:tcg - """ - initrd_url = ( - 'https://github.com/hskinnemoen/openbmc/releases/download/' - '20200711-gsj-qemu-0/obmc-phosphor-initramfs-gsj.cpio.xz') - initrd_hash = '98fefe5d7e56727b1eb17d5c00311b1b5c945300' - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - kernel_url = ( - 'https://github.com/hskinnemoen/openbmc/releases/download/' - '20200711-gsj-qemu-0/uImage-gsj.bin') - kernel_hash = 'fa67b2f141d56d39b3c54305c0e8a899c99eb2c7' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - dtb_url = ( - 'https://github.com/hskinnemoen/openbmc/releases/download/' - '20200711-gsj-qemu-0/nuvoton-npcm730-gsj.dtb') - dtb_hash = '18315f7006d7b688d8312d5c727eecd819aa36a4' - dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200n8 ' - 'earlycon=uart8250,mmio32,0xf0001000') - self.vm.add_args('-kernel', kernel_path, - '-initrd', initrd_path, - '-dtb', dtb_path, - '-append', kernel_command_line) - self.vm.launch() - - self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') - self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') - self.wait_for_console_pattern( - 'Give root password for system maintenance') - - def test_arm_bpim2u(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:bpim2u - :avocado: tags=accel:tcg - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/' - 'sun8i-r40-bananapi-m2-ultra.dtb') - dtb_path = self.extract_from_deb(deb_path, dtb_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200n8 ' - 'earlycon=uart,mmio32,0x1c28000') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_arm_bpim2u_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=accel:tcg - :avocado: tags=machine:bpim2u - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/' - 'sun8i-r40-bananapi-m2-ultra.dtb') - dtb_path = self.extract_from_deb(deb_path, dtb_path) - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv7a.cpio.gz') - initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun8i Family') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - 'system-control@1c00000') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - def test_arm_bpim2u_gmac(self): - """ - :avocado: tags=arch:arm - :avocado: tags=accel:tcg - :avocado: tags=machine:bpim2u - :avocado: tags=device:sd - """ - self.require_netdev('user') - - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = ('/usr/lib/linux-image-6.6.16-current-sunxi/' - 'sun8i-r40-bananapi-m2-ultra.dtb') - dtb_path = self.extract_from_deb(deb_path, dtb_path) - rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' - 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') - rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' - rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) - rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.lzma_uncompress(rootfs_path_xz, rootfs_path) - image_pow2ceil_expand(rootfs_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'root=b300 rootwait rw ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', - '-net', 'nic,model=gmac,netdev=host_gmac', - '-netdev', 'user,id=host_gmac', - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - shell_ready = "/bin/sh: can't access tty; job control turned off" - self.wait_for_console_pattern(shell_ready) - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun8i Family') - exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', - 'mmcblk') - exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', - 'eth0: Link is Up') - exec_command_and_wait_for_pattern(self, 'udhcpc eth0', - 'udhcpc: lease of 10.0.2.15 obtained') - exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', - '3 packets transmitted, 3 packets received, 0% packet loss') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') - def test_arm_bpim2u_openwrt_22_03_3(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:bpim2u - :avocado: tags=device:sd - """ - - # This test download a 8.9 MiB compressed image and expand it - # to 127 MiB. - image_url = ('https://downloads.openwrt.org/releases/22.03.3/targets/' - 'sunxi/cortexa7/openwrt-22.03.3-sunxi-cortexa7-' - 'sinovoip_bananapi-m2-ultra-ext4-sdcard.img.gz') - image_hash = ('5b41b4e11423e562c6011640f9a7cd3b' - 'dd0a3d42b83430f7caa70a432e6cd82c') - image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - image_path = archive.extract(image_path_gz, self.workdir) - image_pow2ceil_expand(image_path) - - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', - '-nic', 'user', - '-no-reboot') - self.vm.launch() - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'usbcore.nousb ' - 'noreboot') - - self.wait_for_console_pattern('U-Boot SPL') - - interrupt_interactive_console_until_pattern( - self, 'Hit any key to stop autoboot:', '=>') - exec_command_and_wait_for_pattern(self, "setenv extraargs '" + - kernel_command_line + "'", '=>') - exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); - - self.wait_for_console_pattern( - 'Please press Enter to activate this console.') - - exec_command_and_wait_for_pattern(self, ' ', 'root@') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun8i Family') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - 'system-control@1c00000') - - def test_arm_orangepi(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:orangepi-pc - :avocado: tags=accel:tcg - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200n8 ' - 'earlycon=uart,mmio32,0x1c28000') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_arm_orangepi_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=accel:tcg - :avocado: tags=machine:orangepi-pc - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv7a.cpio.gz') - initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - self.wait_for_console_pattern('Boot successful.') - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun8i Family') - exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', - 'system-control@1c00000') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - def test_arm_orangepi_sd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=accel:tcg - :avocado: tags=machine:orangepi-pc - :avocado: tags=device:sd - """ - self.require_netdev('user') - - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun8i-h3-orangepi-pc.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' - 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') - rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' - rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) - rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.lzma_uncompress(rootfs_path_xz, rootfs_path) - image_pow2ceil_expand(rootfs_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'root=/dev/mmcblk0 rootwait rw ' - 'panic=-1 noreboot') - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', - '-append', kernel_command_line, - '-no-reboot') - self.vm.launch() - shell_ready = "/bin/sh: can't access tty; job control turned off" - self.wait_for_console_pattern(shell_ready) - - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'Allwinner sun8i Family') - exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', - 'mmcblk0') - exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', - 'eth0: Link is Up') - exec_command_and_wait_for_pattern(self, 'udhcpc eth0', - 'udhcpc: lease of 10.0.2.15 obtained') - exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', - '3 packets transmitted, 3 packets received, 0% packet loss') - exec_command_and_wait_for_pattern(self, 'reboot', - 'reboot: Restarting system') - # Wait for VM to shut down gracefully - self.vm.wait() - - @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') - def test_arm_orangepi_bionic_20_08(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:orangepi-pc - :avocado: tags=device:sd - """ - - # This test download a 275 MiB compressed image and expand it - # to 1036 MiB, but the underlying filesystem is 1552 MiB... - # As we expand it to 2 GiB we are safe. - - image_url = ('https://archive.armbian.com/orangepipc/archive/' - 'Armbian_20.08.1_Orangepipc_bionic_current_5.8.5.img.xz') - image_hash = ('b4d6775f5673486329e45a0586bf06b6' - 'dbe792199fd182ac6b9c7bb6c7d3e6dd') - image_path_xz = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - image_path = archive.extract(image_path_xz, self.workdir) - image_pow2ceil_expand(image_path) - - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', - '-nic', 'user', - '-no-reboot') - self.vm.launch() - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'loglevel=7 ' - 'nosmp ' - 'systemd.default_timeout_start_sec=9000 ' - 'systemd.mask=armbian-zram-config.service ' - 'systemd.mask=armbian-ramlog.service') - - self.wait_for_console_pattern('U-Boot SPL') - self.wait_for_console_pattern('Autoboot in ') - exec_command_and_wait_for_pattern(self, ' ', '=>') - exec_command_and_wait_for_pattern(self, "setenv extraargs '" + - kernel_command_line + "'", '=>') - exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); - - self.wait_for_console_pattern('systemd[1]: Set hostname ' + - 'to ') - self.wait_for_console_pattern('Starting Load Kernel Modules...') - - @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') - def test_arm_orangepi_uboot_netbsd9(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:orangepi-pc - :avocado: tags=device:sd - :avocado: tags=os:netbsd - """ - # This test download a 304MB compressed image and expand it to 2GB - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20200108T145233Z/pool/main/u/u-boot/' - 'u-boot-sunxi_2020.01%2Bdfsg-1_armhf.deb') - deb_hash = 'f67f404a80753ca3d1258f13e38f2b060e13db99' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - # We use the common OrangePi PC 'plus' build of U-Boot for our secondary - # program loader (SPL). We will then set the path to the more specific - # OrangePi "PC" device tree blob with 'setenv fdtfile' in U-Boot prompt, - # before to boot NetBSD. - uboot_path = '/usr/lib/u-boot/orangepi_plus/u-boot-sunxi-with-spl.bin' - uboot_path = self.extract_from_deb(deb_path, uboot_path) - image_url = ('https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.0/' - 'evbarm-earmv7hf/binary/gzimg/armv7.img.gz') - image_hash = '2babb29d36d8360adcb39c09e31060945259917a' - image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash) - image_path = os.path.join(self.workdir, 'armv7.img') - archive.gzip_uncompress(image_path_gz, image_path) - image_pow2ceil_expand(image_path) - image_drive_args = 'if=sd,format=raw,snapshot=on,file=' + image_path - - # dd if=u-boot-sunxi-with-spl.bin of=armv7.img bs=1K seek=8 conv=notrunc - with open(uboot_path, 'rb') as f_in: - with open(image_path, 'r+b') as f_out: - f_out.seek(8 * 1024) - shutil.copyfileobj(f_in, f_out) - - self.vm.set_console() - self.vm.add_args('-nic', 'user', - '-drive', image_drive_args, - '-global', 'allwinner-rtc.base-year=2000', - '-no-reboot') - self.vm.launch() - wait_for_console_pattern(self, 'U-Boot 2020.01+dfsg-1') - interrupt_interactive_console_until_pattern(self, - 'Hit any key to stop autoboot:', - 'switch to partitions #0, OK') - - exec_command_and_wait_for_pattern(self, '', '=>') - cmd = 'setenv bootargs root=ld0a' - exec_command_and_wait_for_pattern(self, cmd, '=>') - cmd = 'setenv kernel netbsd-GENERIC.ub' - exec_command_and_wait_for_pattern(self, cmd, '=>') - cmd = 'setenv fdtfile dtb/sun8i-h3-orangepi-pc.dtb' - exec_command_and_wait_for_pattern(self, cmd, '=>') - cmd = ("setenv bootcmd 'fatload mmc 0:1 ${kernel_addr_r} ${kernel}; " - "fatload mmc 0:1 ${fdt_addr_r} ${fdtfile}; " - "fdt addr ${fdt_addr_r}; " - "bootm ${kernel_addr_r} - ${fdt_addr_r}'") - exec_command_and_wait_for_pattern(self, cmd, '=>') - - exec_command_and_wait_for_pattern(self, 'boot', - 'Booting kernel from Legacy Image') - wait_for_console_pattern(self, 'Starting kernel ...') - wait_for_console_pattern(self, 'NetBSD 9.0 (GENERIC)') - # Wait for user-space - wait_for_console_pattern(self, 'Starting root file system check') - - def test_aarch64_raspi3_atf(self): - """ - :avocado: tags=accel:tcg - :avocado: tags=arch:aarch64 - :avocado: tags=machine:raspi3b - :avocado: tags=cpu:cortex-a53 - :avocado: tags=device:pl011 - :avocado: tags=atf - """ - zip_url = ('https://github.com/pbatard/RPi3/releases/download/' - 'v1.15/RPi3_UEFI_Firmware_v1.15.zip') - zip_hash = '74b3bd0de92683cadb14e008a7575e1d0c3cafb9' - zip_path = self.fetch_asset(zip_url, asset_hash=zip_hash) - - archive.extract(zip_path, self.workdir) - efi_fd = os.path.join(self.workdir, 'RPI_EFI.fd') - - self.vm.set_console(console_index=1) - self.vm.add_args('-nodefaults', - '-device', 'loader,file=%s,force-raw=true' % efi_fd) - self.vm.launch() - self.wait_for_console_pattern('version UEFI Firmware v1.15') - - def test_s390x_s390_ccw_virtio(self): - """ - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/s390x/os/images' - '/kernel.img') - kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' - self.vm.add_args('-nodefaults', - '-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_alpha_clipper(self): - """ - :avocado: tags=arch:alpha - :avocado: tags=machine:clipper - """ - kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/' - 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz') - kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - uncompressed_kernel = archive.uncompress(kernel_path, self.workdir) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - self.vm.add_args('-nodefaults', - '-kernel', uncompressed_kernel, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - - def test_m68k_q800(self): - """ - :avocado: tags=arch:m68k - :avocado: tags=machine:q800 - """ - deb_url = ('https://snapshot.debian.org/archive/debian-ports' - '/20191021T083923Z/pool-m68k/main' - '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb') - deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-5.3.0-1-m68k') - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0 vga=off') - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.wait_for_console_pattern(console_pattern) - console_pattern = 'No filesystem could mount root' - self.wait_for_console_pattern(console_pattern) - - def do_test_advcal_2018(self, day, tar_hash, kernel_name, console=0): - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day' + day + '.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console(console_index=console) - self.vm.add_args('-kernel', - self.workdir + '/day' + day + '/' + kernel_name) - self.vm.launch() - self.wait_for_console_pattern('QEMU advent calendar') - - def test_arm_vexpressa9(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:vexpress-a9 - """ - tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b' - self.vm.add_args('-dtb', self.workdir + '/day16/vexpress-v2p-ca9.dtb') - self.do_test_advcal_2018('16', tar_hash, 'winter.zImage') - - def test_arm_ast2600_debian(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:rainier-bmc - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20220606T211338Z/' - 'pool/main/l/linux/' - 'linux-image-5.17.0-2-armmp_5.17.6-1%2Bb1_armhf.deb') - deb_hash = '8acb2b4439faedc2f3ed4bdb2847ad4f6e0491f73debaeb7f660c8abe4dcdc0e' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash, - algorithm='sha256') - kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.17.0-2-armmp') - dtb_path = self.extract_from_deb(deb_path, - '/usr/lib/linux-image-5.17.0-2-armmp/aspeed-bmc-ibm-rainier.dtb') - - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-net', 'nic') - self.vm.launch() - self.wait_for_console_pattern("Booting Linux on physical CPU 0xf00") - self.wait_for_console_pattern("SMP: Total of 2 processors activated") - self.wait_for_console_pattern("No filesystem could mount root") - - def test_m68k_mcf5208evb(self): - """ - :avocado: tags=arch:m68k - :avocado: tags=machine:mcf5208evb - """ - tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c' - self.do_test_advcal_2018('07', tar_hash, 'sanity-clause.elf') - - def test_or1k_sim(self): - """ - :avocado: tags=arch:or1k - :avocado: tags=machine:or1k-sim - """ - tar_hash = '20334cdaf386108c530ff0badaecc955693027dd' - self.do_test_advcal_2018('20', tar_hash, 'vmlinux') - - def test_ppc64_e500(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:ppce500 - :avocado: tags=cpu:e5500 - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' - self.do_test_advcal_2018('19', tar_hash, 'uImage') - - def do_test_ppc64_powernv(self, proc): - self.require_accelerator("tcg") - images_url = ('https://github.com/open-power/op-build/releases/download/v2.7/') - - kernel_url = images_url + 'zImage.epapr' - kernel_hash = '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash, - algorithm='sha256') - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path, - '-append', 'console=tty0 console=hvc0', - '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0', - '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234', - '-device', 'e1000e,bus=bridge1,addr=0x3', - '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2') - self.vm.launch() - - self.wait_for_console_pattern("CPU: " + proc + " generation processor") - self.wait_for_console_pattern("zImage starting: loaded") - self.wait_for_console_pattern("Run /init as init process") - # Device detection output driven by udev probing is sometimes cut off - # from console output, suspect S14silence-console init script. - - def test_ppc_powernv8(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv8 - :avocado: tags=accel:tcg - """ - self.do_test_ppc64_powernv('P8') - - def test_ppc_powernv9(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv9 - :avocado: tags=accel:tcg - """ - self.do_test_ppc64_powernv('P9') - - def test_ppc_powernv10(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv10 - :avocado: tags=accel:tcg - """ - self.do_test_ppc64_powernv('P10') - - def test_ppc_g3beige(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:g3beige - :avocado: tags=accel:tcg - """ - # TODO: g3beige works with kvm_pr but we don't have a - # reliable way ATM (e.g. looking at /proc/modules) to detect - # whether we're running kvm_hv or kvm_pr. For now let's - # disable this test if we don't have TCG support. - self.require_accelerator("tcg") - tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - self.vm.add_args('-M', 'graphics=off') - self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') - - def test_ppc_mac99(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:mac99 - :avocado: tags=accel:tcg - """ - # TODO: mac99 works with kvm_pr but we don't have a - # reliable way ATM (e.g. looking at /proc/modules) to detect - # whether we're running kvm_hv or kvm_pr. For now let's - # disable this test if we don't have TCG support. - self.require_accelerator("tcg") - tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - self.vm.add_args('-M', 'graphics=off') - self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') - - # This test has a 6-10% failure rate on various hosts that look - # like issues with a buggy kernel. As a result we don't want it - # gating releases on Gitlab. - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_sh4_r2d(self): - """ - :avocado: tags=arch:sh4 - :avocado: tags=machine:r2d - :avocado: tags=flaky - """ - tar_hash = 'fe06a4fd8ccbf2e27928d64472939d47829d4c7e' - self.vm.add_args('-append', 'console=ttySC1') - self.do_test_advcal_2018('09', tar_hash, 'zImage', console=1) - - def test_sparc_ss20(self): - """ - :avocado: tags=arch:sparc - :avocado: tags=machine:SS-20 - """ - tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f' - self.do_test_advcal_2018('11', tar_hash, 'zImage.elf') - - def test_xtensa_lx60(self): - """ - :avocado: tags=arch:xtensa - :avocado: tags=machine:lx60 - :avocado: tags=cpu:dc233c - """ - tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34' - self.do_test_advcal_2018('02', tar_hash, 'santas-sleigh-ride.elf') diff --git a/tests/avocado/boot_xen.py b/tests/avocado/boot_xen.py deleted file mode 100644 index f29bc58b9e1..00000000000 --- a/tests/avocado/boot_xen.py +++ /dev/null @@ -1,96 +0,0 @@ -# Functional test that boots a Xen hypervisor with a domU kernel and -# checks the console output is vaguely sane . -# -# Copyright (c) 2020 Linaro -# -# Author: -# Alex Bennée -# -# SPDX-License-Identifier: GPL-2.0-or-later -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado_qemu import wait_for_console_pattern -from boot_linux_console import LinuxKernelTest - - -class BootXen(LinuxKernelTest): - """ - Boots a Xen hypervisor with a Linux DomU kernel. - - :avocado: tags=arch:aarch64 - :avocado: tags=accel:tcg - :avocado: tags=cpu:cortex-a57 - :avocado: tags=machine:virt - """ - - timeout = 90 - XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all' - - def fetch_guest_kernel(self): - # Using my own built kernel - which works - kernel_url = ('https://fileserver.linaro.org/' - 's/JSsewXGZ6mqxPr5/download?path=%2F&files=' - 'linux-5.9.9-arm64-ajb') - kernel_sha1 = '4f92bc4b9f88d5ab792fa7a43a68555d344e1b83' - kernel_path = self.fetch_asset(kernel_url, - asset_hash=kernel_sha1) - - return kernel_path - - def launch_xen(self, xen_path): - """ - Launch Xen with a dom0 guest kernel - """ - self.log.info("launch with xen_path: %s", xen_path) - kernel_path = self.fetch_guest_kernel() - - self.vm.set_console() - - self.vm.add_args('-machine', 'virtualization=on', - '-m', '768', - '-kernel', xen_path, - '-append', self.XEN_COMMON_COMMAND_LINE, - '-device', - 'guest-loader,addr=0x47000000,kernel=%s,bootargs=console=hvc0' - % (kernel_path)) - - self.vm.launch() - - console_pattern = 'VFS: Cannot open root device' - wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:") - - def test_arm64_xen_411_and_dom0(self): - # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ - xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/' - 'download?path=%2F&files=' - 'xen-hypervisor-4.11-arm64_4.11.4%2B37-g3263f257ca-1_arm64.deb') - xen_sha1 = '034e634d4416adbad1212d59b62bccdcda63e62a' - xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) - xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.11-arm64") - - self.launch_xen(xen_path) - - def test_arm64_xen_414_and_dom0(self): - # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ - xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/' - 'download?path=%2F&files=' - 'xen-hypervisor-4.14-arm64_4.14.0%2B80-gd101b417b7-1_arm64.deb') - xen_sha1 = 'b9d209dd689ed2b393e625303a225badefec1160' - xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) - xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.14-arm64") - - self.launch_xen(xen_path) - - def test_arm64_xen_415_and_dom0(self): - xen_url = ('https://fileserver.linaro.org/' - 's/JSsewXGZ6mqxPr5/download' - '?path=%2F&files=xen-upstream-4.15-unstable.deb') - xen_sha1 = 'fc191172b85cf355abb95d275a24cc0f6d6579d8' - xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) - xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.15-unstable") - - self.launch_xen(xen_path) diff --git a/tests/avocado/cpu_queries.py b/tests/avocado/cpu_queries.py deleted file mode 100644 index d3faa14720c..00000000000 --- a/tests/avocado/cpu_queries.py +++ /dev/null @@ -1,35 +0,0 @@ -# Sanity check of query-cpu-* results -# -# Copyright (c) 2019 Red Hat, Inc. -# -# Author: -# Eduardo Habkost -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu import QemuSystemTest - -class QueryCPUModelExpansion(QemuSystemTest): - """ - Run query-cpu-model-expansion for each CPU model, and validate results - """ - - def test(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:none - """ - self.vm.add_args('-S') - self.vm.launch() - - cpus = self.vm.cmd('query-cpu-definitions') - for c in cpus: - self.log.info("Checking CPU: %s", c) - self.assertNotIn('', c['unavailable-features'], c['name']) - - for c in cpus: - model = {'name': c['name']} - e = self.vm.cmd('query-cpu-model-expansion', model=model, - type='full') - self.assertEqual(e['model']['name'], c['name']) diff --git a/tests/avocado/empty_cpu_model.py b/tests/avocado/empty_cpu_model.py deleted file mode 100644 index d906ef3d3cf..00000000000 --- a/tests/avocado/empty_cpu_model.py +++ /dev/null @@ -1,19 +0,0 @@ -# Check for crash when using empty -cpu option -# -# Copyright (c) 2019 Red Hat, Inc. -# -# Author: -# Eduardo Habkost -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. -from avocado_qemu import QemuSystemTest - -class EmptyCPUModel(QemuSystemTest): - def test(self): - self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty') diff --git a/tests/avocado/hotplug_blk.py b/tests/avocado/hotplug_blk.py deleted file mode 100644 index d55ded1c1d7..00000000000 --- a/tests/avocado/hotplug_blk.py +++ /dev/null @@ -1,69 +0,0 @@ -# Functional test that hotplugs a virtio blk disk and checks it on a Linux -# guest -# -# Copyright (c) 2021 Red Hat, Inc. -# Copyright (c) Yandex -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import time - -from avocado_qemu.linuxtest import LinuxTest - - -class HotPlug(LinuxTest): - def blockdev_add(self) -> None: - self.vm.cmd('blockdev-add', **{ - 'driver': 'null-co', - 'size': 1073741824, - 'node-name': 'disk' - }) - - def assert_vda(self) -> None: - self.ssh_command('test -e /sys/block/vda') - - def assert_no_vda(self) -> None: - with self.assertRaises(AssertionError): - self.assert_vda() - - def plug(self) -> None: - args = { - 'driver': 'virtio-blk-pci', - 'drive': 'disk', - 'id': 'virtio-disk0', - 'bus': 'pci.1', - 'addr': 1 - } - - self.assert_no_vda() - self.vm.cmd('device_add', args) - try: - self.assert_vda() - except AssertionError: - time.sleep(1) - self.assert_vda() - - def unplug(self) -> None: - self.vm.cmd('device_del', id='virtio-disk0') - - self.vm.event_wait('DEVICE_DELETED', 1.0, - match={'data': {'device': 'virtio-disk0'}}) - - self.assert_no_vda() - - def test(self) -> None: - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:q35 - :avocado: tags=accel:kvm - """ - self.require_accelerator('kvm') - self.vm.add_args('-accel', 'kvm') - self.vm.add_args('-device', 'pcie-pci-bridge,id=pci.1,bus=pcie.0') - - self.launch_and_wait() - self.blockdev_add() - - self.plug() - self.unplug() diff --git a/tests/avocado/hotplug_cpu.py b/tests/avocado/hotplug_cpu.py deleted file mode 100644 index 342c8385398..00000000000 --- a/tests/avocado/hotplug_cpu.py +++ /dev/null @@ -1,37 +0,0 @@ -# Functional test that hotplugs a CPU and checks it on a Linux guest -# -# Copyright (c) 2021 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu.linuxtest import LinuxTest - - -class HotPlugCPU(LinuxTest): - - def test(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:q35 - :avocado: tags=accel:kvm - """ - self.require_accelerator('kvm') - self.vm.add_args('-accel', 'kvm') - self.vm.add_args('-cpu', 'Haswell') - self.vm.add_args('-smp', '1,sockets=1,cores=2,threads=1,maxcpus=2') - self.launch_and_wait() - - self.ssh_command('test -e /sys/devices/system/cpu/cpu0') - with self.assertRaises(AssertionError): - self.ssh_command('test -e /sys/devices/system/cpu/cpu1') - - self.vm.cmd('device_add', - driver='Haswell-x86_64-cpu', - socket_id=0, - core_id=1, - thread_id=0) - self.ssh_command('test -e /sys/devices/system/cpu/cpu1') diff --git a/tests/avocado/info_usernet.py b/tests/avocado/info_usernet.py deleted file mode 100644 index e1aa7a6e0a0..00000000000 --- a/tests/avocado/info_usernet.py +++ /dev/null @@ -1,33 +0,0 @@ -# Test for the hmp command "info usernet" -# -# Copyright (c) 2021 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu import QemuSystemTest - -from qemu.utils import get_info_usernet_hostfwd_port - - -class InfoUsernet(QemuSystemTest): - """ - :avocado: tags=machine:none - """ - - def test_hostfwd(self): - self.require_netdev('user') - self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22') - self.vm.launch() - res = self.vm.cmd('human-monitor-command', - command_line='info usernet') - port = get_info_usernet_hostfwd_port(res) - self.assertIsNotNone(port, - ('"info usernet" output content does not seem to ' - 'contain the redirected port')) - self.assertGreater(port, 0, - ('Found a redirected port that is not greater than' - ' zero')) diff --git a/tests/avocado/intel_iommu.py b/tests/avocado/intel_iommu.py deleted file mode 100644 index 008f214397a..00000000000 --- a/tests/avocado/intel_iommu.py +++ /dev/null @@ -1,123 +0,0 @@ -# INTEL_IOMMU Functional tests -# -# Copyright (c) 2021 Red Hat, Inc. -# -# Author: -# Eric Auger -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. -import os - -from avocado import skipUnless -from avocado_qemu.linuxtest import LinuxTest - -@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - -class IntelIOMMU(LinuxTest): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=distro:fedora - :avocado: tags=distro_version:31 - :avocado: tags=machine:q35 - :avocado: tags=accel:kvm - :avocado: tags=intel_iommu - :avocado: tags=flaky - """ - - IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' - kernel_path = None - initrd_path = None - kernel_params = None - - def set_up_boot(self): - path = self.download_boot() - self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' + - 'drive=drv0,id=virtio-disk0,bootindex=1,' - 'werror=stop,rerror=stop' + self.IOMMU_ADDON) - self.vm.add_args('-device', 'virtio-gpu-pci' + self.IOMMU_ADDON) - self.vm.add_args('-drive', - 'file=%s,if=none,cache=writethrough,id=drv0' % path) - - def setUp(self): - super(IntelIOMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON) - - def add_common_args(self): - self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') - self.vm.add_args('-object', - 'rng-random,id=rng0,filename=/dev/urandom') - - def common_vm_setup(self, custom_kernel=None): - self.require_accelerator("kvm") - self.add_common_args() - self.vm.add_args("-accel", "kvm") - - if custom_kernel is None: - return - - kernel_url = self.distro.pxeboot_url + 'vmlinuz' - kernel_hash = '5b6f6876e1b5bda314f93893271da0d5777b1f3c' - initrd_url = self.distro.pxeboot_url + 'initrd.img' - initrd_hash = 'dd0340a1b39bd28f88532babd4581c67649ec5b1' - self.kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - def run_and_check(self): - if self.kernel_path: - self.vm.add_args('-kernel', self.kernel_path, - '-append', self.kernel_params, - '-initrd', self.initrd_path) - self.launch_and_wait() - self.ssh_command('cat /proc/cmdline') - self.ssh_command('dmesg | grep -e DMAR -e IOMMU') - self.ssh_command('find /sys/kernel/iommu_groups/ -type l') - self.ssh_command('dnf -y install numactl-devel') - - def test_intel_iommu(self): - """ - :avocado: tags=intel_iommu_intremap - """ - - self.common_vm_setup(True) - self.vm.add_args('-device', 'intel-iommu,intremap=on') - self.vm.add_args('-machine', 'kernel_irqchip=split') - - self.kernel_params = (self.distro.default_kernel_params + - ' quiet intel_iommu=on') - self.run_and_check() - - def test_intel_iommu_strict(self): - """ - :avocado: tags=intel_iommu_strict - """ - - self.common_vm_setup(True) - self.vm.add_args('-device', 'intel-iommu,intremap=on') - self.vm.add_args('-machine', 'kernel_irqchip=split') - self.kernel_params = (self.distro.default_kernel_params + - ' quiet intel_iommu=on,strict') - self.run_and_check() - - def test_intel_iommu_strict_cm(self): - """ - :avocado: tags=intel_iommu_strict_cm - """ - - self.common_vm_setup(True) - self.vm.add_args('-device', 'intel-iommu,intremap=on,caching-mode=on') - self.vm.add_args('-machine', 'kernel_irqchip=split') - self.kernel_params = (self.distro.default_kernel_params + - ' quiet intel_iommu=on,strict') - self.run_and_check() - - def test_intel_iommu_pt(self): - """ - :avocado: tags=intel_iommu_pt - """ - - self.common_vm_setup(True) - self.vm.add_args('-device', 'intel-iommu,intremap=on') - self.vm.add_args('-machine', 'kernel_irqchip=split') - self.kernel_params = (self.distro.default_kernel_params + - ' quiet intel_iommu=on iommu=pt') - self.run_and_check() diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py deleted file mode 100644 index f8cb458d5db..00000000000 --- a/tests/avocado/kvm_xen_guest.py +++ /dev/null @@ -1,171 +0,0 @@ -# KVM Xen guest functional tests -# -# Copyright © 2021 Red Hat, Inc. -# Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Author: -# David Woodhouse -# Alex Bennée -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os - -from qemu.machine import machine - -from avocado_qemu import LinuxSSHMixIn -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class KVMXenGuest(QemuSystemTest, LinuxSSHMixIn): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:q35 - :avocado: tags=accel:kvm - :avocado: tags=kvm_xen_guest - """ - - KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0' - - kernel_path = None - kernel_params = None - - # Fetch assets from the kvm-xen-guest subdir of my shared test - # images directory on fileserver.linaro.org where you can find - # build instructions for how they where assembled. - def get_asset(self, name, sha1): - base_url = ('https://fileserver.linaro.org/s/' - 'kE4nCFLdQcoBF9t/download?' - 'path=%2Fkvm-xen-guest&files=' ) - url = base_url + name - # use explicit name rather than failing to neatly parse the - # URL into a unique one - return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) - - def common_vm_setup(self): - # We also catch lack of KVM_XEN support if we fail to launch - self.require_accelerator("kvm") - - self.vm.set_console() - - self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split") - self.vm.add_args("-smp", "2") - - self.kernel_path = self.get_asset("bzImage", - "367962983d0d32109998a70b45dcee4672d0b045") - self.rootfs = self.get_asset("rootfs.ext4", - "f1478401ea4b3fa2ea196396be44315bab2bb5e4") - - def run_and_check(self): - self.vm.add_args('-kernel', self.kernel_path, - '-append', self.kernel_params, - '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0", - '-device', 'xen-disk,drive=drv0,vdev=xvda', - '-device', 'virtio-net-pci,netdev=unet', - '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') - - try: - self.vm.launch() - except machine.VMLaunchFailure as e: - if "Xen HVM guest support not present" in e.output: - self.cancel("KVM Xen support is not present " - "(need v5.12+ kernel with CONFIG_KVM_XEN)") - elif "Property 'kvm-accel.xen-version' not found" in e.output: - self.cancel("QEMU not built with CONFIG_XEN_EMU support") - else: - raise e - - self.log.info('VM launched, waiting for sshd') - console_pattern = 'Starting dropbear sshd: OK' - wait_for_console_pattern(self, console_pattern, 'Oops') - self.log.info('sshd ready') - self.ssh_connect('root', '', False) - - self.ssh_command('cat /proc/cmdline') - self.ssh_command('dmesg | grep -e "Grant table initialized"') - - def test_kvm_xen_guest(self): - """ - :avocado: tags=kvm_xen_guest - """ - - self.common_vm_setup() - - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks') - self.run_and_check() - self.ssh_command('grep xen-pirq.*msi /proc/interrupts') - - def test_kvm_xen_guest_nomsi(self): - """ - :avocado: tags=kvm_xen_guest_nomsi - """ - - self.common_vm_setup() - - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks pci=nomsi') - self.run_and_check() - self.ssh_command('grep xen-pirq.* /proc/interrupts') - - def test_kvm_xen_guest_noapic_nomsi(self): - """ - :avocado: tags=kvm_xen_guest_noapic_nomsi - """ - - self.common_vm_setup() - - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks noapic pci=nomsi') - self.run_and_check() - self.ssh_command('grep xen-pirq /proc/interrupts') - - def test_kvm_xen_guest_vapic(self): - """ - :avocado: tags=kvm_xen_guest_vapic - """ - - self.common_vm_setup() - self.vm.add_args('-cpu', 'host,+xen-vapic') - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks') - self.run_and_check() - self.ssh_command('grep xen-pirq /proc/interrupts') - self.ssh_command('grep PCI-MSI /proc/interrupts') - - def test_kvm_xen_guest_novector(self): - """ - :avocado: tags=kvm_xen_guest_novector - """ - - self.common_vm_setup() - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks' + - ' xen_no_vector_callback') - self.run_and_check() - self.ssh_command('grep xen-platform-pci /proc/interrupts') - - def test_kvm_xen_guest_novector_nomsi(self): - """ - :avocado: tags=kvm_xen_guest_novector_nomsi - """ - - self.common_vm_setup() - - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks pci=nomsi' + - ' xen_no_vector_callback') - self.run_and_check() - self.ssh_command('grep xen-platform-pci /proc/interrupts') - - def test_kvm_xen_guest_novector_noapic(self): - """ - :avocado: tags=kvm_xen_guest_novector_noapic - """ - - self.common_vm_setup() - self.kernel_params = (self.KERNEL_DEFAULT + - ' xen_emul_unplug=ide-disks' + - ' xen_no_vector_callback noapic') - self.run_and_check() - self.ssh_command('grep xen-platform-pci /proc/interrupts') diff --git a/tests/avocado/linux_initrd.py b/tests/avocado/linux_initrd.py deleted file mode 100644 index aad5b19bd94..00000000000 --- a/tests/avocado/linux_initrd.py +++ /dev/null @@ -1,92 +0,0 @@ -# Linux initrd integration test. -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Wainer dos Santos Moschetta -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import logging -import tempfile - -from avocado_qemu import QemuSystemTest -from avocado import skipUnless - - -class LinuxInitrd(QemuSystemTest): - """ - Checks QEMU evaluates correctly the initrd file passed as -initrd option. - - :avocado: tags=arch:x86_64 - :avocado: tags=machine:pc - """ - - timeout = 300 - - def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self): - """ - Pretends to boot QEMU with an initrd file with size of 2GiB - and expect it exits with error message. - Fedora-18 shipped with linux-3.6 which have not supported xloadflags - cannot support more than 2GiB initrd. - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora/li' - 'nux/releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz') - kernel_hash = '41464f68efe42b9991250bed86c7081d2ccdbb21' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - max_size = 2 * (1024 ** 3) - 1 - - with tempfile.NamedTemporaryFile() as initrd: - initrd.seek(max_size) - initrd.write(b'\0') - initrd.flush() - self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name, - '-m', '4096') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1) - expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % ( - max_size + 1) - self.assertRegex(self.vm.get_log(), expected_msg) - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_with_2gib_file_should_work_with_linux_v4_16(self): - """ - :avocado: tags=flaky - - QEMU has supported up to 4 GiB initrd for recent kernel - Expect guest can reach 'Unpacking initramfs...' - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/28/Everything/x86_64/os/images/pxeboot/' - 'vmlinuz') - kernel_hash = '238e083e114c48200f80d889f7e32eeb2793e02a' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - max_size = 2 * (1024 ** 3) + 1 - - with tempfile.NamedTemporaryFile() as initrd: - initrd.seek(max_size) - initrd.write(b'\0') - initrd.flush() - - self.vm.set_console() - kernel_command_line = 'console=ttyS0' - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line, - '-initrd', initrd.name, - '-m', '5120') - self.vm.launch() - console = self.vm.console_socket.makefile() - console_logger = logging.getLogger('console') - while True: - msg = console.readline() - console_logger.debug(msg.strip()) - if 'Unpacking initramfs...' in msg: - break - if 'Kernel panic - not syncing' in msg: - self.fail("Kernel panic reached") diff --git a/tests/avocado/linux_ssh_mips_malta.py b/tests/avocado/linux_ssh_mips_malta.py deleted file mode 100644 index d9bb525ad9c..00000000000 --- a/tests/avocado/linux_ssh_mips_malta.py +++ /dev/null @@ -1,205 +0,0 @@ -# Functional test that boots a VM and run commands via a SSH session -# -# Copyright (c) Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import re -import base64 -import logging -import time - -from avocado import skipUnless -from avocado_qemu import LinuxSSHMixIn -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado.utils import process -from avocado.utils import archive -from avocado.utils import ssh - - -@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') -@skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available') -class LinuxSSH(QemuSystemTest, LinuxSSHMixIn): - """ - :avocado: tags=accel:tcg - """ - - timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg' - - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - VM_IP = '127.0.0.1' - - BASE_URL = 'https://people.debian.org/~aurel32/qemu/' - IMAGE_INFO = { - 'be': {'base_url': 'mips', - 'image_name': 'debian_wheezy_mips_standard.qcow2', - 'image_hash': '8987a63270df67345b2135a6b7a4885a35e392d5', - 'kernel_hash': { - 32: '592e384a4edc16dade52a6cd5c785c637bcbc9ad', - 64: 'db6eea7de35d36c77d8c165b6bcb222e16eb91db'} - }, - 'le': {'base_url': 'mipsel', - 'image_name': 'debian_wheezy_mipsel_standard.qcow2', - 'image_hash': '7866764d9de3ef536ffca24c9fb9f04ffdb45802', - 'kernel_hash': { - 32: 'a66bea5a8adaa2cb3d36a1d4e0ccdb01be8f6c2a', - 64: '6a7f77245acf231415a0e8b725d91ed2f3487794'} - } - } - CPU_INFO = { - 32: {'cpu': 'MIPS 24Kc', 'kernel_release': '3.2.0-4-4kc-malta'}, - 64: {'cpu': 'MIPS 20Kc', 'kernel_release': '3.2.0-4-5kc-malta'} - } - - def get_url(self, endianess, path=''): - qkey = {'le': 'el', 'be': ''} - return '%s/mips%s/%s' % (self.BASE_URL, qkey[endianess], path) - - def get_image_info(self, endianess): - dinfo = self.IMAGE_INFO[endianess] - image_url = self.get_url(endianess, dinfo['image_name']) - image_hash = dinfo['image_hash'] - return (image_url, image_hash) - - def get_kernel_info(self, endianess, wordsize): - minfo = self.CPU_INFO[wordsize] - kernel_url = self.get_url(endianess, - 'vmlinux-%s' % minfo['kernel_release']) - kernel_hash = self.IMAGE_INFO[endianess]['kernel_hash'][wordsize] - return kernel_url, kernel_hash - - def ssh_disconnect_vm(self): - self.ssh_session.quit() - - def boot_debian_wheezy_image_and_ssh_login(self, endianess, kernel_path): - image_url, image_hash = self.get_image_info(endianess) - image_path = self.fetch_asset(image_url, asset_hash=image_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE - + 'console=ttyS0 root=/dev/sda1') - self.vm.add_args('-no-reboot', - '-kernel', kernel_path, - '-append', kernel_command_line, - '-drive', 'file=%s,snapshot=on' % image_path, - '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', - '-device', 'pcnet,netdev=vnet') - self.vm.launch() - - self.log.info('VM launched, waiting for sshd') - console_pattern = 'Starting OpenBSD Secure Shell server: sshd' - wait_for_console_pattern(self, console_pattern, 'Oops') - self.log.info('sshd ready') - - self.ssh_connect('root', 'root', False) - - def shutdown_via_ssh(self): - self.ssh_command('poweroff') - self.ssh_disconnect_vm() - wait_for_console_pattern(self, 'Power down', 'Oops') - - def run_common_commands(self, wordsize): - self.ssh_command_output_contains( - 'cat /proc/cpuinfo', - self.CPU_INFO[wordsize]['cpu']) - self.ssh_command_output_contains( - 'uname -m', - 'mips') - self.ssh_command_output_contains( - 'uname -r', - self.CPU_INFO[wordsize]['kernel_release']) - self.ssh_command_output_contains( - 'cat /proc/interrupts', - 'XT-PIC timer') - self.ssh_command_output_contains( - 'cat /proc/interrupts', - 'XT-PIC i8042') - self.ssh_command_output_contains( - 'cat /proc/interrupts', - 'XT-PIC serial') - self.ssh_command_output_contains( - 'cat /proc/interrupts', - 'XT-PIC ata_piix') - self.ssh_command_output_contains( - 'cat /proc/interrupts', - 'XT-PIC eth0') - self.ssh_command_output_contains( - 'cat /proc/devices', - 'input') - self.ssh_command_output_contains( - 'cat /proc/devices', - 'usb') - self.ssh_command_output_contains( - 'cat /proc/devices', - 'fb') - self.ssh_command_output_contains( - 'cat /proc/ioports', - ' : serial') - self.ssh_command_output_contains( - 'cat /proc/ioports', - ' : ata_piix') - self.ssh_command_output_contains( - 'cat /proc/ioports', - ' : piix4_smbus') - self.ssh_command_output_contains( - 'lspci -d 11ab:4620', - 'GT-64120') - self.ssh_command_output_contains( - 'cat /sys/bus/i2c/devices/i2c-0/name', - 'SMBus PIIX4 adapter') - self.ssh_command_output_contains( - 'cat /proc/mtd', - 'YAMON') - # Empty 'Board Config' (64KB) - self.ssh_command_output_contains( - 'md5sum /dev/mtd2ro', - '0dfbe8aa4c20b52e1b8bf3cb6cbdf193') - - def check_mips_malta(self, uname_m, endianess): - wordsize = 64 if '64' in uname_m else 32 - kernel_url, kernel_hash = self.get_kernel_info(endianess, wordsize) - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.boot_debian_wheezy_image_and_ssh_login(endianess, kernel_path) - - stdout, _ = self.ssh_command('uname -a') - self.assertIn(True, [uname_m + " GNU/Linux" in line for line in stdout]) - - self.run_common_commands(wordsize) - self.shutdown_via_ssh() - # Wait for VM to shut down gracefully - self.vm.wait() - - def test_mips_malta32eb_kernel3_2_0(self): - """ - :avocado: tags=arch:mips - :avocado: tags=endian:big - :avocado: tags=device:pcnet32 - """ - self.check_mips_malta('mips', 'be') - - def test_mips_malta32el_kernel3_2_0(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=endian:little - :avocado: tags=device:pcnet32 - """ - self.check_mips_malta('mips', 'le') - - def test_mips_malta64eb_kernel3_2_0(self): - """ - :avocado: tags=arch:mips64 - :avocado: tags=endian:big - :avocado: tags=device:pcnet32 - """ - self.check_mips_malta('mips64', 'be') - - def test_mips_malta64el_kernel3_2_0(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=endian:little - :avocado: tags=device:pcnet32 - """ - self.check_mips_malta('mips64', 'le') diff --git a/tests/avocado/load_bflt.py b/tests/avocado/load_bflt.py deleted file mode 100644 index bb50cec1ee8..00000000000 --- a/tests/avocado/load_bflt.py +++ /dev/null @@ -1,54 +0,0 @@ -# Test the bFLT loader format -# -# Copyright (C) 2019 Philippe Mathieu-Daudé -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os -import bz2 -import subprocess - -from avocado import skipUnless -from avocado_qemu import QemuUserTest -from avocado_qemu import has_cmd - - -class LoadBFLT(QemuUserTest): - - def extract_cpio(self, cpio_path): - """ - Extracts a cpio archive into the test workdir - - :param cpio_path: path to the cpio archive - """ - cwd = os.getcwd() - os.chdir(self.workdir) - with bz2.open(cpio_path, 'rb') as archive_cpio: - subprocess.run(['cpio', '-i'], input=archive_cpio.read(), - stderr=subprocess.DEVNULL) - os.chdir(cwd) - - @skipUnless(*has_cmd('cpio')) - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_stm32(self): - """ - :avocado: tags=arch:arm - :avocado: tags=linux_user - :avocado: tags=quick - """ - # See https://elinux.org/STM32#User_Space - rootfs_url = ('https://elinux.org/images/5/51/' - 'Stm32_mini_rootfs.cpio.bz2') - rootfs_hash = '9f065e6ba40cce7411ba757f924f30fcc57951e6' - rootfs_path_bz2 = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) - busybox_path = os.path.join(self.workdir, "/bin/busybox") - - self.extract_cpio(rootfs_path_bz2) - - res = self.run(busybox_path) - ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.' - self.assertIn(ver, res.stdout_text) - - res = self.run(busybox_path, ['uname', '-a']) - unm = 'armv7l GNU/Linux' - self.assertIn(unm, res.stdout_text) diff --git a/tests/avocado/machine_aarch64_sbsaref.py b/tests/avocado/machine_aarch64_sbsaref.py deleted file mode 100644 index e920bbf08c1..00000000000 --- a/tests/avocado/machine_aarch64_sbsaref.py +++ /dev/null @@ -1,236 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# SPDX-FileCopyrightText: 2023-2024 Linaro Ltd. -# SPDX-FileContributor: Philippe Mathieu-Daudé -# SPDX-FileContributor: Marcin Juszkiewicz -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os - -from avocado import skipUnless -from avocado.utils import archive - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import interrupt_interactive_console_until_pattern - - -class Aarch64SbsarefMachine(QemuSystemTest): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:sbsa-ref - :avocado: tags=accel:tcg - - As firmware runs at a higher privilege level than the hypervisor we - can only run these tests under TCG emulation. - """ - - timeout = 180 - - def fetch_firmware(self): - """ - Flash volumes generated using: - - Toolchain from Debian: - aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0 - - Used components: - - - Trusted Firmware v2.11.0 - - Tianocore EDK2 4d4f569924 - - Tianocore EDK2-platforms 3f08401 - - """ - - # Secure BootRom (TF-A code) - fs0_xz_url = ( - "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/" - "20240619-148232/edk2/SBSA_FLASH0.fd.xz" - ) - fs0_xz_hash = "0c954842a590988f526984de22e21ae0ab9cb351a0c99a8a58e928f0c7359cf7" - tar_xz_path = self.fetch_asset(fs0_xz_url, asset_hash=fs0_xz_hash, - algorithm='sha256') - archive.extract(tar_xz_path, self.workdir) - fs0_path = os.path.join(self.workdir, "SBSA_FLASH0.fd") - - # Non-secure rom (UEFI and EFI variables) - fs1_xz_url = ( - "https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/" - "20240619-148232/edk2/SBSA_FLASH1.fd.xz" - ) - fs1_xz_hash = "c6ec39374c4d79bb9e9cdeeb6db44732d90bb4a334cec92002b3f4b9cac4b5ee" - tar_xz_path = self.fetch_asset(fs1_xz_url, asset_hash=fs1_xz_hash, - algorithm='sha256') - archive.extract(tar_xz_path, self.workdir) - fs1_path = os.path.join(self.workdir, "SBSA_FLASH1.fd") - - for path in [fs0_path, fs1_path]: - with open(path, "ab+") as fd: - fd.truncate(256 << 20) # Expand volumes to 256MiB - - self.vm.set_console() - self.vm.add_args( - "-drive", - f"if=pflash,file={fs0_path},format=raw", - "-drive", - f"if=pflash,file={fs1_path},format=raw", - "-machine", - "sbsa-ref", - ) - - def test_sbsaref_edk2_firmware(self): - """ - :avocado: tags=cpu:cortex-a57 - """ - - self.fetch_firmware() - self.vm.launch() - - # TF-A boot sequence: - # - # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\ - # docs/design/trusted-board-boot.rst#trusted-board-boot-sequence - # https://trustedfirmware-a.readthedocs.io/en/v2.8/\ - # design/firmware-design.html#cold-boot - - # AP Trusted ROM - wait_for_console_pattern(self, "Booting Trusted Firmware") - wait_for_console_pattern(self, "BL1: v2.11.0(release):") - wait_for_console_pattern(self, "BL1: Booting BL2") - - # Trusted Boot Firmware - wait_for_console_pattern(self, "BL2: v2.11.0(release)") - wait_for_console_pattern(self, "Booting BL31") - - # EL3 Runtime Software - wait_for_console_pattern(self, "BL31: v2.11.0(release)") - - # Non-trusted Firmware - wait_for_console_pattern(self, "UEFI firmware (version 1.0") - interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine") - - # This tests the whole boot chain from EFI to Userspace - # We only boot a whole OS for the current top level CPU and GIC - # Other test profiles should use more minimal boots - def boot_alpine_linux(self, cpu): - self.fetch_firmware() - - iso_url = ( - "https://dl-cdn.alpinelinux.org/" - "alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso" - ) - - iso_hash = "5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027" - iso_path = self.fetch_asset(iso_url, algorithm="sha256", asset_hash=iso_hash) - - self.vm.set_console() - self.vm.add_args( - "-cpu", - cpu, - "-drive", - f"file={iso_path},format=raw", - ) - - self.vm.launch() - wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17") - - def test_sbsaref_alpine_linux_cortex_a57(self): - """ - :avocado: tags=cpu:cortex-a57 - :avocado: tags=os:linux - """ - self.boot_alpine_linux("cortex-a57") - - def test_sbsaref_alpine_linux_neoverse_n1(self): - """ - :avocado: tags=cpu:neoverse-n1 - :avocado: tags=os:linux - """ - self.boot_alpine_linux("neoverse-n1") - - def test_sbsaref_alpine_linux_max_pauth_off(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:linux - """ - self.boot_alpine_linux("max,pauth=off") - - def test_sbsaref_alpine_linux_max_pauth_impdef(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:linux - """ - self.boot_alpine_linux("max,pauth-impdef=on") - - @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') - def test_sbsaref_alpine_linux_max(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:linux - """ - self.boot_alpine_linux("max") - - - # This tests the whole boot chain from EFI to Userspace - # We only boot a whole OS for the current top level CPU and GIC - # Other test profiles should use more minimal boots - def boot_openbsd73(self, cpu): - self.fetch_firmware() - - img_url = ( - "https://cdn.openbsd.org/pub/OpenBSD/7.3/arm64/miniroot73.img" - ) - - img_hash = "7fc2c75401d6f01fbfa25f4953f72ad7d7c18650056d30755c44b9c129b707e5" - img_path = self.fetch_asset(img_url, algorithm="sha256", asset_hash=img_hash) - - self.vm.set_console() - self.vm.add_args( - "-cpu", - cpu, - "-drive", - f"file={img_path},format=raw", - ) - - self.vm.launch() - wait_for_console_pattern(self, - "Welcome to the OpenBSD/arm64" - " 7.3 installation program.") - - def test_sbsaref_openbsd73_cortex_a57(self): - """ - :avocado: tags=cpu:cortex-a57 - :avocado: tags=os:openbsd - """ - self.boot_openbsd73("cortex-a57") - - def test_sbsaref_openbsd73_neoverse_n1(self): - """ - :avocado: tags=cpu:neoverse-n1 - :avocado: tags=os:openbsd - """ - self.boot_openbsd73("neoverse-n1") - - def test_sbsaref_openbsd73_max_pauth_off(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:openbsd - """ - self.boot_openbsd73("max,pauth=off") - - @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') - def test_sbsaref_openbsd73_max_pauth_impdef(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:openbsd - """ - self.boot_openbsd73("max,pauth-impdef=on") - - @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') - def test_sbsaref_openbsd73_max(self): - """ - :avocado: tags=cpu:max - :avocado: tags=os:openbsd - """ - self.boot_openbsd73("max") diff --git a/tests/avocado/machine_aarch64_virt.py b/tests/avocado/machine_aarch64_virt.py deleted file mode 100644 index a90dc6ff4b4..00000000000 --- a/tests/avocado/machine_aarch64_virt.py +++ /dev/null @@ -1,146 +0,0 @@ -# Functional test that boots a various Linux systems and checks the -# console output. -# -# Copyright (c) 2022 Linaro Ltd. -# -# Author: -# Alex Bennée -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import time -import os -import logging - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command -from avocado_qemu import BUILD_DIR -from avocado.utils import process -from avocado.utils.path import find_command - -class Aarch64VirtMachine(QemuSystemTest): - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - timeout = 360 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - # This tests the whole boot chain from EFI to Userspace - # We only boot a whole OS for the current top level CPU and GIC - # Other test profiles should use more minimal boots - def test_alpine_virt_tcg_gic_max(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=accel:tcg - """ - iso_url = ('https://dl-cdn.alpinelinux.org/' - 'alpine/v3.17/releases/aarch64/' - 'alpine-standard-3.17.2-aarch64.iso') - - # Alpine use sha256 so I recalculated this myself - iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839' - iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - self.require_accelerator("tcg") - - self.vm.add_args("-accel", "tcg") - self.vm.add_args("-cpu", "max,pauth-impdef=on") - self.vm.add_args("-machine", - "virt,acpi=on," - "virtualization=on," - "mte=on," - "gic-version=max,iommu=smmuv3") - self.vm.add_args("-smp", "2", "-m", "1024") - self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', - 'edk2-aarch64-code.fd')) - self.vm.add_args("-drive", f"file={iso_path},format=raw") - self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') - self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') - - self.vm.launch() - self.wait_for_console_pattern('Welcome to Alpine Linux 3.17') - - - def common_aarch64_virt(self, machine): - """ - Common code to launch basic virt machine with kernel+initrd - and a scratch disk. - """ - logger = logging.getLogger('aarch64_virt') - - kernel_url = ('https://fileserver.linaro.org/s/' - 'z6B2ARM7DQT3HWN/download') - kernel_hash = 'ed11daab50c151dde0e1e9c9cb8b2d9bd3215347' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - self.require_accelerator("tcg") - self.vm.add_args('-cpu', 'max,pauth-impdef=on', - '-machine', machine, - '-accel', 'tcg', - '-kernel', kernel_path, - '-append', kernel_command_line) - - # A RNG offers an easy way to generate a few IRQs - self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') - self.vm.add_args('-object', - 'rng-random,id=rng0,filename=/dev/urandom') - - # Also add a scratch block device - logger.info('creating scratch qcow2 image') - image_path = os.path.join(self.workdir, 'scratch.qcow2') - qemu_img = os.path.join(BUILD_DIR, 'qemu-img') - if not os.path.exists(qemu_img): - qemu_img = find_command('qemu-img', False) - if qemu_img is False: - self.cancel('Could not find "qemu-img", which is required to ' - 'create the temporary qcow2 image') - cmd = '%s create -f qcow2 %s 8M' % (qemu_img, image_path) - process.run(cmd) - - # Add the device - self.vm.add_args('-blockdev', - f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch") - self.vm.add_args('-device', - 'virtio-blk-device,drive=scratch') - - self.vm.launch() - self.wait_for_console_pattern('Welcome to Buildroot') - time.sleep(0.1) - exec_command(self, 'root') - time.sleep(0.1) - exec_command(self, 'dd if=/dev/hwrng of=/dev/vda bs=512 count=4') - time.sleep(0.1) - exec_command(self, 'md5sum /dev/vda') - time.sleep(0.1) - exec_command(self, 'cat /proc/interrupts') - time.sleep(0.1) - exec_command(self, 'cat /proc/self/maps') - time.sleep(0.1) - - def test_aarch64_virt_gicv3(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=accel:tcg - :avocado: tags=cpu:max - """ - self.common_aarch64_virt("virt,gic_version=3") - - def test_aarch64_virt_gicv2(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=accel:tcg - :avocado: tags=cpu:max - """ - self.common_aarch64_virt("virt,gic-version=2") diff --git a/tests/avocado/machine_arm_canona1100.py b/tests/avocado/machine_arm_canona1100.py deleted file mode 100644 index a42d8b0f2b0..00000000000 --- a/tests/avocado/machine_arm_canona1100.py +++ /dev/null @@ -1,35 +0,0 @@ -# Functional test that boots the canon-a1100 machine with firmware -# -# Copyright (c) 2020 Red Hat, Inc. -# -# Author: -# Thomas Huth -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive - -class CanonA1100Machine(QemuSystemTest): - """Boots the barebox firmware and checks that the console is operational""" - - timeout = 90 - - def test_arm_canona1100(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:canon-a1100 - :avocado: tags=device:pflash_cfi02 - """ - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day18.tar.xz') - tar_hash = '068b5fc4242b29381acee94713509f8a876e9db6' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-bios', - self.workdir + '/day18/barebox.canon-a1100.bin') - self.vm.launch() - wait_for_console_pattern(self, 'running /env/bin/init') diff --git a/tests/avocado/machine_arm_integratorcp.py b/tests/avocado/machine_arm_integratorcp.py deleted file mode 100644 index 87f5cf3953b..00000000000 --- a/tests/avocado/machine_arm_integratorcp.py +++ /dev/null @@ -1,99 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright (c) 2020 Red Hat, Inc. -# -# Author: -# Thomas Huth -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import logging - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - - -NUMPY_AVAILABLE = True -try: - import numpy as np -except ImportError: - NUMPY_AVAILABLE = False - -CV2_AVAILABLE = True -try: - import cv2 -except ImportError: - CV2_AVAILABLE = False - - -class IntegratorMachine(QemuSystemTest): - - timeout = 90 - - def boot_integratorcp(self): - kernel_url = ('https://github.com/zayac/qemu-arm/raw/master/' - 'arm-test/kernel/zImage.integrator') - kernel_hash = '0d7adba893c503267c946a3cbdc63b4b54f25468' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - initrd_url = ('https://github.com/zayac/qemu-arm/raw/master/' - 'arm-test/kernel/arm_root.img') - initrd_hash = 'b51e4154285bf784e017a37586428332d8c7bd8b' - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path, - '-initrd', initrd_path, - '-append', 'printk.time=0 console=ttyAMA0') - self.vm.launch() - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_integratorcp_console(self): - """ - Boots the Linux kernel and checks that the console is operational - :avocado: tags=arch:arm - :avocado: tags=machine:integratorcp - :avocado: tags=device:pl011 - """ - self.boot_integratorcp() - wait_for_console_pattern(self, 'Log in as root') - - @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') - @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_framebuffer_tux_logo(self): - """ - Boot Linux and verify the Tux logo is displayed on the framebuffer. - :avocado: tags=arch:arm - :avocado: tags=machine:integratorcp - :avocado: tags=device:pl110 - :avocado: tags=device:framebuffer - """ - screendump_path = os.path.join(self.workdir, "screendump.pbm") - tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/' - 'drivers/video/logo/logo_linux_vga16.ppm') - tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af' - tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash) - - self.boot_integratorcp() - framebuffer_ready = 'Console: switching to colour frame buffer device' - wait_for_console_pattern(self, framebuffer_ready) - self.vm.cmd('human-monitor-command', command_line='stop') - self.vm.cmd('human-monitor-command', - command_line='screendump %s' % screendump_path) - logger = logging.getLogger('framebuffer') - - cpu_count = 1 - match_threshold = 0.92 - screendump_bgr = cv2.imread(screendump_path) - screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY) - result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0), - cv2.TM_CCOEFF_NORMED) - loc = np.where(result >= match_threshold) - tux_count = 0 - for tux_count, pt in enumerate(zip(*loc[::-1]), start=1): - logger.debug('found Tux at position [x, y] = %s', pt) - self.assertGreaterEqual(tux_count, cpu_count) diff --git a/tests/avocado/machine_arm_n8x0.py b/tests/avocado/machine_arm_n8x0.py deleted file mode 100644 index 12e9a6803ba..00000000000 --- a/tests/avocado/machine_arm_n8x0.py +++ /dev/null @@ -1,49 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright (c) 2020 Red Hat, Inc. -# -# Author: -# Thomas Huth -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class N8x0Machine(QemuSystemTest): - """Boots the Linux kernel and checks that the console is operational""" - - timeout = 90 - - def __do_test_n8x0(self): - kernel_url = ('http://stskeeps.subnetmask.net/meego-n8x0/' - 'meego-arm-n8x0-1.0.80.20100712.1431-' - 'vmlinuz-2.6.35~rc4-129.1-n8x0') - kernel_hash = 'e9d5ab8d7548923a0061b6fbf601465e479ed269' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console(console_index=1) - self.vm.add_args('-kernel', kernel_path, - '-append', 'printk.time=0 console=ttyS1') - self.vm.launch() - wait_for_console_pattern(self, 'TSC2005 driver initializing') - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_n800(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:n800 - """ - self.__do_test_n8x0() - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_n810(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:n810 - """ - self.__do_test_n8x0() diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py deleted file mode 100644 index f8e263d37ea..00000000000 --- a/tests/avocado/machine_aspeed.py +++ /dev/null @@ -1,480 +0,0 @@ -# Functional test that boots the ASPEED SoCs with firmware -# -# Copyright (C) 2022 ASPEED Technology Inc -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import time -import os -import tempfile -import subprocess - -from avocado_qemu import LinuxSSHMixIn -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import interrupt_interactive_console_until_pattern -from avocado_qemu import has_cmd -from avocado.utils import archive -from avocado import skipUnless -from avocado import skipUnless - - -class AST1030Machine(QemuSystemTest): - """Boots the zephyr os and checks that the console is operational""" - - timeout = 10 - - def test_ast1030_zephyros_1_04(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast1030-evb - :avocado: tags=os:zephyr - """ - tar_url = ('https://github.com/AspeedTech-BMC' - '/zephyr/releases/download/v00.01.04/ast1030-evb-demo.zip') - tar_hash = '4c6a8ce3a8ba76ef1a65dae419ae3409343c4b20' - tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(tar_path, self.workdir) - kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.elf" - self.vm.set_console() - self.vm.add_args('-kernel', kernel_file, - '-nographic') - self.vm.launch() - wait_for_console_pattern(self, "Booting Zephyr OS") - exec_command_and_wait_for_pattern(self, "help", - "Available commands") - - def test_ast1030_zephyros_1_07(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast1030-evb - :avocado: tags=os:zephyr - """ - tar_url = ('https://github.com/AspeedTech-BMC' - '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip') - tar_hash = '40ac87eabdcd3b3454ce5aad11fedc72a33ecda2' - tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(tar_path, self.workdir) - kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.bin" - self.vm.set_console() - self.vm.add_args('-kernel', kernel_file, - '-nographic') - self.vm.launch() - wait_for_console_pattern(self, "Booting Zephyr OS") - for shell_cmd in [ - 'kernel stacks', - 'otp info conf', - 'otp info scu', - 'hwinfo devid', - 'crypto aes256_cbc_vault', - 'random get', - 'jtag JTAG1 sw_xfer high TMS', - 'adc ADC0 resolution 12', - 'adc ADC0 read 42', - 'adc ADC1 read 69', - 'i2c scan I2C_0', - 'i3c attach I3C_0', - 'hash test', - 'kernel uptime', - 'kernel reboot warm', - 'kernel uptime', - 'kernel reboot cold', - 'kernel uptime', - ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$") - -class AST2x00Machine(QemuSystemTest): - - timeout = 180 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def do_test_arm_aspeed(self, image): - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic') - self.vm.launch() - - self.wait_for_console_pattern("U-Boot 2016.07") - self.wait_for_console_pattern("## Loading kernel from FIT Image at 20080000") - self.wait_for_console_pattern("Starting kernel ...") - self.wait_for_console_pattern("Booting Linux on physical CPU 0x0") - wait_for_console_pattern(self, - "aspeed-smc 1e620000.spi: read control register: 203b0641") - self.wait_for_console_pattern("ftgmac100 1e660000.ethernet eth0: irq ") - self.wait_for_console_pattern("systemd[1]: Set hostname to") - - def test_arm_ast2400_palmetto_openbmc_v2_9_0(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:palmetto-bmc - """ - - image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' - 'obmc-phosphor-image-palmetto.static.mtd') - image_hash = ('3e13bbbc28e424865dc42f35ad672b10f2e82cdb11846bb28fa625b48beafd0d') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.do_test_arm_aspeed(image_path) - - def test_arm_ast2500_romulus_openbmc_v2_9_0(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:romulus-bmc - """ - - image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' - 'obmc-phosphor-image-romulus.static.mtd') - image_hash = ('820341076803f1955bc31e647a512c79f9add4f5233d0697678bab4604c7bb25') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.do_test_arm_aspeed(image_path) - - def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB'): - self.require_netdev('user') - - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic', '-net', 'user') - self.vm.launch() - - self.wait_for_console_pattern('U-Boot 2019.04') - self.wait_for_console_pattern('## Loading kernel from FIT Image') - self.wait_for_console_pattern('Starting kernel ...') - self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id) - self.wait_for_console_pattern('lease of 10.0.2.15') - # the line before login: - self.wait_for_console_pattern(pattern) - time.sleep(0.1) - exec_command(self, 'root') - time.sleep(0.1) - exec_command(self, "passw0rd") - - def do_test_arm_aspeed_buildroot_poweroff(self): - exec_command_and_wait_for_pattern(self, 'poweroff', - 'reboot: System halted'); - - def test_arm_ast2500_evb_buildroot(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast2500-evb - """ - - image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' - 'images/ast2500-evb/buildroot-2023.11/flash.img') - image_hash = ('c23db6160cf77d0258397eb2051162c8473a56c441417c52a91ba217186e715f') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.vm.add_args('-device', - 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); - self.do_test_arm_aspeed_buildroot_start(image_path, '0x0', 'Aspeed AST2500 EVB') - - exec_command_and_wait_for_pattern(self, - 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', - 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') - self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', - property='temperature', value=18000); - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') - - self.do_test_arm_aspeed_buildroot_poweroff() - - def test_arm_ast2600_evb_buildroot(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast2600-evb - """ - - image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' - 'images/ast2600-evb/buildroot-2023.11/flash.img') - image_hash = ('b62808daef48b438d0728ee07662290490ecfa65987bb91294cafb1bb7ad1a68') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.vm.add_args('-device', - 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); - self.vm.add_args('-device', - 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); - self.vm.add_args('-device', - 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42'); - self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') - - exec_command_and_wait_for_pattern(self, - 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', - 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') - self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', - property='temperature', value=18000); - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') - - exec_command_and_wait_for_pattern(self, - 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device', - 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32'); - year = time.strftime("%Y") - exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); - - exec_command_and_wait_for_pattern(self, - 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device', - 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64'); - exec_command(self, 'i2cset -y 3 0x42 0x64 0x00 0xaa i'); - time.sleep(0.1) - exec_command_and_wait_for_pattern(self, - 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom', - '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff'); - self.do_test_arm_aspeed_buildroot_poweroff() - - @skipUnless(*has_cmd('swtpm')) - def test_arm_ast2600_evb_buildroot_tpm(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast2600-evb - """ - - image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' - 'images/ast2600-evb/buildroot-2023.02-tpm/flash.img') - image_hash = ('a46009ae8a5403a0826d607215e731a8c68d27c14c41e55331706b8f9c7bd997') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - # force creation of VM object, which also defines self._sd - vm = self.vm - - socket = os.path.join(self._sd.name, 'swtpm-socket') - - subprocess.run(['swtpm', 'socket', '-d', '--tpm2', - '--tpmstate', f'dir={self.vm.temp_dir}', - '--ctrl', f'type=unixio,path={socket}']) - - self.vm.add_args('-chardev', f'socket,id=chrtpm,path={socket}') - self.vm.add_args('-tpmdev', 'emulator,id=tpm0,chardev=chrtpm') - self.vm.add_args('-device', - 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e') - self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') - - exec_command_and_wait_for_pattern(self, - 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device', - 'tpm_tis_i2c 12-002e: 2.0 TPM (device-id 0x1, rev-id 1)'); - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/tpm/tpm0/pcr-sha256/0', - 'B804724EA13F52A9072BA87FE8FDCC497DFC9DF9AA15B9088694639C431688E0'); - - self.do_test_arm_aspeed_buildroot_poweroff() - -class AST2x00MachineSDK(QemuSystemTest, LinuxSSHMixIn): - - EXTRA_BOOTARGS = ( - 'quiet ' - 'systemd.mask=org.openbmc.HostIpmi.service ' - 'systemd.mask=xyz.openbmc_project.Chassis.Control.Power@0.service ' - 'systemd.mask=modprobe@fuse.service ' - 'systemd.mask=rngd.service ' - 'systemd.mask=obmc-console@ttyS2.service ' - ) - - # FIXME: Although these tests boot a whole distro they are still - # slower than comparable machine models. There may be some - # optimisations which bring down the runtime. In the meantime they - # have generous timeouts and are disable for CI which aims for all - # tests to run in less than 60 seconds. - timeout = 240 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def do_test_arm_aspeed_sdk_start(self, image): - self.require_netdev('user') - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic', '-net', 'user,hostfwd=:127.0.0.1:0-:22') - self.vm.launch() - - self.wait_for_console_pattern('U-Boot 2019.04') - interrupt_interactive_console_until_pattern( - self, 'Hit any key to stop autoboot:', 'ast#') - exec_command_and_wait_for_pattern( - self, 'setenv bootargs ${bootargs} ' + self.EXTRA_BOOTARGS, 'ast#') - exec_command_and_wait_for_pattern( - self, 'boot', '## Loading kernel from FIT Image') - self.wait_for_console_pattern('Starting kernel ...') - - def do_test_aarch64_aspeed_sdk_start(self, image): - self.vm.set_console() - self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', - '-net', 'nic', '-net', 'user,hostfwd=:127.0.0.1:0-:22') - - self.vm.launch() - - self.wait_for_console_pattern('U-Boot 2023.10') - self.wait_for_console_pattern('## Loading kernel from FIT Image') - self.wait_for_console_pattern('Starting kernel ...') - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_arm_ast2500_evb_sdk(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast2500-evb - :avocado: tags=flaky - """ - - image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' - 'download/v08.06/ast2500-default-obmc.tar.gz') - image_hash = ('e1755f3cadff69190438c688d52dd0f0d399b70a1e14b1d3d5540fc4851d38ca') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - archive.extract(image_path, self.workdir) - - self.do_test_arm_aspeed_sdk_start( - self.workdir + '/ast2500-default/image-bmc') - self.wait_for_console_pattern('nodistro.0 ast2500-default ttyS4') - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_arm_ast2600_evb_sdk(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:ast2600-evb - :avocado: tags=flaky - """ - - image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' - 'download/v08.06/ast2600-a2-obmc.tar.gz') - image_hash = ('9083506135f622d5e7351fcf7d4e1c7125cee5ba16141220c0ba88931f3681a4') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - archive.extract(image_path, self.workdir) - - self.vm.add_args('-device', - 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test'); - self.vm.add_args('-device', - 'ds1338,bus=aspeed.i2c.bus.5,address=0x32'); - self.do_test_arm_aspeed_sdk_start( - self.workdir + '/ast2600-a2/image-bmc') - self.wait_for_console_pattern('nodistro.0 ast2600-a2 ttyS4') - - self.ssh_connect('root', '0penBmc', False) - self.ssh_command('dmesg -c > /dev/null') - - self.ssh_command_output_contains( - 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device ; ' - 'dmesg -c', - 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d'); - self.ssh_command_output_contains( - 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') - self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', - property='temperature', value=18000); - self.ssh_command_output_contains( - 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') - - self.ssh_command_output_contains( - 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device ; ' - 'dmesg -c', - 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32'); - year = time.strftime("%Y") - self.ssh_command_output_contains('/sbin/hwclock -f /dev/rtc1', year); - - def test_aarch64_ast2700_evb_sdk_v09_02(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:ast2700-evb - """ - - image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' - 'download/v09.02/ast2700-default-obmc.tar.gz') - image_hash = 'ac969c2602f4e6bdb69562ff466b89ae3fe1d86e1f6797bb7969d787f82116a7' - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - archive.extract(image_path, self.workdir) - - num_cpu = 4 - image_dir = self.workdir + '/ast2700-default/' - uboot_size = os.path.getsize(image_dir + 'u-boot-nodtb.bin') - uboot_dtb_load_addr = hex(0x400000000 + uboot_size) - - load_images_list = [ - { - 'addr': '0x400000000', - 'file': image_dir + 'u-boot-nodtb.bin' - }, - { - 'addr': str(uboot_dtb_load_addr), - 'file': image_dir + 'u-boot.dtb' - }, - { - 'addr': '0x430000000', - 'file': image_dir + 'bl31.bin' - }, - { - 'addr': '0x430080000', - 'file': image_dir + 'optee/tee-raw.bin' - } - ] - - for load_image in load_images_list: - addr = load_image['addr'] - file = load_image['file'] - self.vm.add_args('-device', - f'loader,force-raw=on,addr={addr},file={file}') - - for i in range(num_cpu): - self.vm.add_args('-device', - f'loader,addr=0x430000000,cpu-num={i}') - - self.vm.add_args('-smp', str(num_cpu)) - self.do_test_aarch64_aspeed_sdk_start(image_dir + 'image-bmc') - self.wait_for_console_pattern('nodistro.0 ast2700-default ttyS12') - self.ssh_connect('root', '0penBmc', False) - -class AST2x00MachineMMC(QemuSystemTest): - - timeout = 240 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def test_arm_aspeed_emmc_boot(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:rainier-bmc - :avocado: tags=device:emmc - """ - - image_url = ('https://fileserver.linaro.org/s/B6pJTwWEkzSDi36/download/' - 'mmc-p10bmc-20240617.qcow2') - image_hash = ('d523fb478d2b84d5adc5658d08502bc64b1486955683814f89c6137518acd90b') - image_path = self.fetch_asset(image_url, asset_hash=image_hash, - algorithm='sha256') - - self.require_netdev('user') - - self.vm.set_console() - self.vm.add_args('-drive', - 'file=' + image_path + ',if=sd,id=sd2,index=2', - '-net', 'nic', '-net', 'user') - self.vm.launch() - - self.wait_for_console_pattern('U-Boot SPL 2019.04') - self.wait_for_console_pattern('Trying to boot from MMC1') - self.wait_for_console_pattern('U-Boot 2019.04') - self.wait_for_console_pattern('eMMC 2nd Boot') - self.wait_for_console_pattern('## Loading kernel from FIT Image') - self.wait_for_console_pattern('Starting kernel ...') - self.wait_for_console_pattern('Booting Linux on physical CPU 0xf00') - self.wait_for_console_pattern('mmcblk0: p1 p2 p3 p4 p5 p6 p7') - self.wait_for_console_pattern('IBM eBMC (OpenBMC for IBM Enterprise') diff --git a/tests/avocado/machine_avr6.py b/tests/avocado/machine_avr6.py deleted file mode 100644 index 5485db79c68..00000000000 --- a/tests/avocado/machine_avr6.py +++ /dev/null @@ -1,50 +0,0 @@ -# -# QEMU AVR integration tests -# -# Copyright (c) 2019-2020 Michael Rolnik -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# - -import time - -from avocado_qemu import QemuSystemTest - -class AVR6Machine(QemuSystemTest): - timeout = 5 - - def test_freertos(self): - """ - :avocado: tags=arch:avr - :avocado: tags=machine:arduino-mega-2560-v3 - """ - """ - https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf - constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX' - """ - rom_url = ('https://github.com/seharris/qemu-avr-tests' - '/raw/36c3e67b8755dcf/free-rtos/Demo' - '/AVR_ATMega2560_GCC/demo.elf') - rom_hash = '7eb521f511ca8f2622e0a3c5e8dd686efbb911d4' - rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash) - - self.vm.add_args('-bios', rom_path) - self.vm.add_args('-nographic') - self.vm.launch() - - time.sleep(2) - self.vm.shutdown() - - self.assertIn('ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX', - self.vm.get_log()) diff --git a/tests/avocado/machine_loongarch.py b/tests/avocado/machine_loongarch.py deleted file mode 100644 index 8de308f2d65..00000000000 --- a/tests/avocado/machine_loongarch.py +++ /dev/null @@ -1,58 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-or-later -# -# LoongArch virt test. -# -# Copyright (c) 2023 Loongson Technology Corporation Limited -# - -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import wait_for_console_pattern - -class LoongArchMachine(QemuSystemTest): - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - timeout = 120 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def test_loongarch64_devices(self): - - """ - :avocado: tags=arch:loongarch64 - :avocado: tags=machine:virt - """ - - kernel_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' - 'releases/download/2024-05-30/vmlinuz.efi') - kernel_hash = '951b485b16e3788b6db03a3e1793c067009e31a2' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - initrd_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' - 'releases/download/2024-05-30/ramdisk') - initrd_hash = 'c67658d9b2a447ce7db2f73ba3d373c9b2b90ab2' - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - bios_url = ('https://github.com/yangxiaojuan-loongson/qemu-binary/' - 'releases/download/2024-05-30/QEMU_EFI.fd') - bios_hash = ('f4d0966b5117d4cd82327c050dd668741046be69') - bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200') - self.vm.add_args('-nographic', - '-smp', '4', - '-m', '1024', - '-cpu', 'la464', - '-kernel', kernel_path, - '-initrd', initrd_path, - '-bios', bios_path, - '-append', kernel_command_line) - self.vm.launch() - self.wait_for_console_pattern('Run /sbin/init as init process') - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'processor : 3') diff --git a/tests/avocado/machine_m68k_nextcube.py b/tests/avocado/machine_m68k_nextcube.py deleted file mode 100644 index 1f3c883910d..00000000000 --- a/tests/avocado/machine_m68k_nextcube.py +++ /dev/null @@ -1,70 +0,0 @@ -# Functional test that boots a VM and run OCR on the framebuffer -# -# Copyright (c) 2019 Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import time - -from avocado_qemu import QemuSystemTest -from avocado import skipUnless - -from tesseract_utils import tesseract_available, tesseract_ocr - -PIL_AVAILABLE = True -try: - from PIL import Image -except ImportError: - PIL_AVAILABLE = False - - -class NextCubeMachine(QemuSystemTest): - """ - :avocado: tags=arch:m68k - :avocado: tags=machine:next-cube - :avocado: tags=device:framebuffer - """ - - timeout = 15 - - def check_bootrom_framebuffer(self, screenshot_path): - rom_url = ('https://sourceforge.net/p/previous/code/1350/tree/' - 'trunk/src/Rev_2.5_v66.BIN?format=raw') - rom_hash = 'b3534796abae238a0111299fc406a9349f7fee24' - rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash) - - self.vm.add_args('-bios', rom_path) - self.vm.launch() - - self.log.info('VM launched, waiting for display') - # TODO: Use avocado.utils.wait.wait_for to catch the - # 'displaysurface_create 1120x832' trace-event. - time.sleep(2) - - self.vm.cmd('human-monitor-command', - command_line='screendump %s' % screenshot_path) - - @skipUnless(PIL_AVAILABLE, 'Python PIL not installed') - def test_bootrom_framebuffer_size(self): - screenshot_path = os.path.join(self.workdir, "dump.ppm") - self.check_bootrom_framebuffer(screenshot_path) - - width, height = Image.open(screenshot_path).size - self.assertEqual(width, 1120) - self.assertEqual(height, 832) - - # Tesseract 4 adds a new OCR engine based on LSTM neural networks. The - # new version is faster and more accurate than version 3. The drawback is - # that it is still alpha-level software. - @skipUnless(tesseract_available(4), 'tesseract OCR tool not available') - def test_bootrom_framebuffer_ocr_with_tesseract(self): - screenshot_path = os.path.join(self.workdir, "dump.ppm") - self.check_bootrom_framebuffer(screenshot_path) - lines = tesseract_ocr(screenshot_path, tesseract_version=4) - text = '\n'.join(lines) - self.assertIn('Testing the FPU', text) - self.assertIn('System test failed. Error code', text) - self.assertIn('Boot command', text) - self.assertIn('Next>', text) diff --git a/tests/avocado/machine_microblaze.py b/tests/avocado/machine_microblaze.py deleted file mode 100644 index 807709cd11e..00000000000 --- a/tests/avocado/machine_microblaze.py +++ /dev/null @@ -1,61 +0,0 @@ -# Functional test that boots a microblaze Linux kernel and checks the console -# -# Copyright (c) 2018, 2021 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import time -from avocado_qemu import exec_command, exec_command_and_wait_for_pattern -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive - -class MicroblazeMachine(QemuSystemTest): - - timeout = 90 - - def test_microblaze_s3adsp1800(self): - """ - :avocado: tags=arch:microblaze - :avocado: tags=machine:petalogix-s3adsp1800 - """ - - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day17.tar.xz') - tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/day17/ballerina.bin') - self.vm.launch() - wait_for_console_pattern(self, 'This architecture does not have ' - 'kernel memory protection') - # Note: - # The kernel sometimes gets stuck after the "This architecture ..." - # message, that's why we don't test for a later string here. This - # needs some investigation by a microblaze wizard one day... - - def test_microblazeel_s3adsp1800(self): - """ - :avocado: tags=arch:microblazeel - :avocado: tags=machine:petalogix-s3adsp1800 - """ - - self.require_netdev('user') - tar_url = ('http://www.qemu-advent-calendar.org/2023/download/' - 'day13.tar.gz') - tar_hash = '6623d5fff5f84cfa8f34e286f32eff6a26546f44' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/day13/xmaton.bin') - self.vm.add_args('-nic', 'user,tftp=' + self.workdir + '/day13/') - self.vm.launch() - wait_for_console_pattern(self, 'QEMU Advent Calendar 2023') - time.sleep(0.1) - exec_command(self, 'root') - time.sleep(0.1) - exec_command_and_wait_for_pattern(self, - 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png', - '821cd3cab8efd16ad6ee5acc3642a8ea') diff --git a/tests/avocado/machine_mips_fuloong2e.py b/tests/avocado/machine_mips_fuloong2e.py deleted file mode 100644 index 89291f47b24..00000000000 --- a/tests/avocado/machine_mips_fuloong2e.py +++ /dev/null @@ -1,42 +0,0 @@ -# Functional tests for the Lemote Fuloong-2E machine. -# -# Copyright (c) 2019 Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. -# See the COPYING file in the top-level directory. -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class MipsFuloong2e(QemuSystemTest): - - timeout = 60 - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available') - def test_linux_kernel_isa_serial(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:fuloong2e - :avocado: tags=endian:little - :avocado: tags=device:bonito64 - :avocado: tags=device:via686b - """ - # Recovery system for the Yeeloong laptop - # (enough to test the fuloong2e southbridge, accessing its ISA bus) - # http://dev.lemote.com/files/resource/download/rescue/rescue-yl - kernel_hash = 'ec4d1bd89a8439c41033ca63db60160cc6d6f09a' - kernel_path = self.fetch_asset('file://' + os.getenv('RESCUE_YL_PATH'), - asset_hash=kernel_hash) - - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path) - self.vm.launch() - wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote') - cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)' - wait_for_console_pattern(self, cpu_revision) diff --git a/tests/avocado/machine_mips_loongson3v.py b/tests/avocado/machine_mips_loongson3v.py deleted file mode 100644 index 5194cf18c9c..00000000000 --- a/tests/avocado/machine_mips_loongson3v.py +++ /dev/null @@ -1,39 +0,0 @@ -# Functional tests for the Generic Loongson-3 Platform. -# -# Copyright (c) 2021 Jiaxun Yang -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. -# See the COPYING file in the top-level directory. -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os -import time - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class MipsLoongson3v(QemuSystemTest): - timeout = 60 - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_pmon_serial_console(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=endian:little - :avocado: tags=machine:loongson3-virt - :avocado: tags=cpu:Loongson-3A1000 - :avocado: tags=device:liointc - :avocado: tags=device:goldfish_rtc - """ - - pmon_hash = '7c8b45dd81ccfc55ff28f5aa267a41c3' - pmon_path = self.fetch_asset('https://github.com/loongson-community/pmon/' - 'releases/download/20210112/pmon-3avirt.bin', - asset_hash=pmon_hash, algorithm='md5') - - self.vm.set_console() - self.vm.add_args('-bios', pmon_path) - self.vm.launch() - wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:') diff --git a/tests/avocado/machine_mips_malta.py b/tests/avocado/machine_mips_malta.py deleted file mode 100644 index 8cf84bd8050..00000000000 --- a/tests/avocado/machine_mips_malta.py +++ /dev/null @@ -1,164 +0,0 @@ -# Functional tests for the MIPS Malta board -# -# Copyright (c) Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or later. -# See the COPYING file in the top-level directory. -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os -import gzip -import logging - -from avocado import skipUnless -from avocado import skipUnless -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import interrupt_interactive_console_until_pattern -from avocado_qemu import wait_for_console_pattern - - -NUMPY_AVAILABLE = True -try: - import numpy as np -except ImportError: - NUMPY_AVAILABLE = False - -CV2_AVAILABLE = True -try: - import cv2 -except ImportError: - CV2_AVAILABLE = False - - -@skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') -@skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') -class MaltaMachineFramebuffer(QemuSystemTest): - - timeout = 30 - - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - def do_test_i6400_framebuffer_logo(self, cpu_cores_count): - """ - Boot Linux kernel and check Tux logo is displayed on the framebuffer. - """ - screendump_path = os.path.join(self.workdir, 'screendump.pbm') - - kernel_url = ('https://github.com/philmd/qemu-testing-blob/raw/' - 'a5966ca4b5/mips/malta/mips64el/' - 'vmlinux-4.7.0-rc1.I6400.gz') - kernel_hash = '096f50c377ec5072e6a366943324622c312045f6' - kernel_path_gz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - kernel_path = self.workdir + "vmlinux" - archive.gzip_uncompress(kernel_path_gz, kernel_path) - - tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/' - 'drivers/video/logo/logo_linux_vga16.ppm') - tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af' - tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'clocksource=GIC console=tty0 console=ttyS0') - self.vm.add_args('-kernel', kernel_path, - '-smp', '%u' % cpu_cores_count, - '-vga', 'std', - '-append', kernel_command_line) - self.vm.launch() - framebuffer_ready = 'Console: switching to colour frame buffer device' - wait_for_console_pattern(self, framebuffer_ready, - failure_message='Kernel panic - not syncing') - self.vm.cmd('human-monitor-command', command_line='stop') - self.vm.cmd('human-monitor-command', - command_line='screendump %s' % screendump_path) - logger = logging.getLogger('framebuffer') - - match_threshold = 0.95 - screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR) - tuxlogo_bgr = cv2.imread(tuxlogo_path, cv2.IMREAD_COLOR) - result = cv2.matchTemplate(screendump_bgr, tuxlogo_bgr, - cv2.TM_CCOEFF_NORMED) - loc = np.where(result >= match_threshold) - tuxlogo_count = 0 - h, w = tuxlogo_bgr.shape[:2] - debug_png = os.getenv('AVOCADO_CV2_SCREENDUMP_PNG_PATH') - for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1): - logger.debug('found Tux at position (x, y) = %s', pt) - cv2.rectangle(screendump_bgr, pt, - (pt[0] + w, pt[1] + h), (0, 0, 255), 2) - if debug_png: - cv2.imwrite(debug_png, screendump_bgr) - self.assertGreaterEqual(tuxlogo_count, cpu_cores_count) - - def test_mips_malta_i6400_framebuffer_logo_1core(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=cpu:I6400 - """ - self.do_test_i6400_framebuffer_logo(1) - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_mips_malta_i6400_framebuffer_logo_7cores(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=cpu:I6400 - :avocado: tags=mips:smp - :avocado: tags=flaky - """ - self.do_test_i6400_framebuffer_logo(7) - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_mips_malta_i6400_framebuffer_logo_8cores(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=cpu:I6400 - :avocado: tags=mips:smp - :avocado: tags=flaky - """ - self.do_test_i6400_framebuffer_logo(8) - -class MaltaMachine(QemuSystemTest): - - def do_test_yamon(self): - rom_url = ('https://s3-eu-west-1.amazonaws.com/' - 'downloads-mips/mips-downloads/' - 'YAMON/yamon-bin-02.22.zip') - rom_hash = '8da7ecddbc5312704b8b324341ee238189bde480' - zip_path = self.fetch_asset(rom_url, asset_hash=rom_hash) - - archive.extract(zip_path, self.workdir) - yamon_path = os.path.join(self.workdir, 'yamon-02.22.bin') - - self.vm.set_console() - self.vm.add_args('-bios', yamon_path) - self.vm.launch() - - prompt = 'YAMON>' - pattern = 'YAMON ROM Monitor' - interrupt_interactive_console_until_pattern(self, pattern, prompt) - wait_for_console_pattern(self, prompt) - self.vm.shutdown() - - def test_mipsel_malta_yamon(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - """ - self.do_test_yamon() - - def test_mips64el_malta_yamon(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=endian:little - """ - self.do_test_yamon() diff --git a/tests/avocado/machine_rx_gdbsim.py b/tests/avocado/machine_rx_gdbsim.py deleted file mode 100644 index 412a7a5089d..00000000000 --- a/tests/avocado/machine_rx_gdbsim.py +++ /dev/null @@ -1,77 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive - - -class RxGdbSimMachine(QemuSystemTest): - - timeout = 30 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_uboot(self): - """ - U-Boot and checks that the console is operational. - - :avocado: tags=arch:rx - :avocado: tags=machine:gdbsim-r5f562n8 - :avocado: tags=endian:little - :avocado: tags=flaky - """ - uboot_url = ('https://acc.dl.osdn.jp/users/23/23888/u-boot.bin.gz') - uboot_hash = '9b78dbd43b40b2526848c0b1ce9de02c24f4dcdb' - uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) - uboot_path = archive.uncompress(uboot_path, self.workdir) - - self.vm.set_console() - self.vm.add_args('-bios', uboot_path, - '-no-reboot') - self.vm.launch() - uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty' - wait_for_console_pattern(self, uboot_version) - gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)' - # FIXME limit baudrate on chardev, else we type too fast - #exec_command_and_wait_for_pattern(self, 'version', gcc_version) - - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_linux_sash(self): - """ - Boots a Linux kernel and checks that the console is operational. - - :avocado: tags=arch:rx - :avocado: tags=machine:gdbsim-r5f562n7 - :avocado: tags=endian:little - :avocado: tags=flaky - """ - dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb') - dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18' - dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash) - kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage') - kernel_hash = '39a81067f8d72faad90866ddfefa19165d68fc99' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon' - self.vm.add_args('-kernel', kernel_path, - '-dtb', dtb_path, - '-no-reboot') - self.vm.launch() - wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)', - failure_message='Kernel panic - not syncing') - exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux') diff --git a/tests/avocado/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py deleted file mode 100644 index 26e938c9e98..00000000000 --- a/tests/avocado/machine_s390_ccw_virtio.py +++ /dev/null @@ -1,277 +0,0 @@ -# Functional test that boots an s390x Linux guest with ccw and PCI devices -# attached and checks whether the devices are recognized by Linux -# -# Copyright (c) 2020 Red Hat, Inc. -# -# Author: -# Cornelia Huck -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import tempfile - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive - -class S390CCWVirtioMachine(QemuSystemTest): - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - timeout = 120 - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def wait_for_crw_reports(self): - exec_command_and_wait_for_pattern(self, - 'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done', - 'CRW reports') - - dmesg_clear_count = 1 - def clear_guest_dmesg(self): - exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; ' - r'echo dm_clear\ ' + str(self.dmesg_clear_count), - r'dm_clear ' + str(self.dmesg_clear_count)) - self.dmesg_clear_count += 1 - - def test_s390x_devices(self): - - """ - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - - kernel_url = ('https://snapshot.debian.org/archive/debian/' - '20201126T092837Z/dists/buster/main/installer-s390x/' - '20190702+deb10u6/images/generic/kernel.debian') - kernel_hash = '5821fbee57d6220a067a8b967d24595621aa1eb6' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - initrd_url = ('https://snapshot.debian.org/archive/debian/' - '20201126T092837Z/dists/buster/main/installer-s390x/' - '20190702+deb10u6/images/generic/initrd.debian') - initrd_hash = '81ba09c97bef46e8f4660ac25b4ac0a5be3a94d6' - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3') - self.vm.add_args('-nographic', - '-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-cpu', 'max,prno-trng=off', - '-device', 'virtio-net-ccw,devno=fe.1.1111', - '-device', - 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1', - '-device', - 'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2', - '-device', 'zpci,uid=5,target=zzz', - '-device', 'virtio-net-pci,id=zzz', - '-device', 'zpci,uid=0xa,fid=12,target=serial', - '-device', 'virtio-serial-pci,id=serial', - '-device', 'virtio-balloon-ccw') - self.vm.launch() - - shell_ready = "sh: can't access tty; job control turned off" - self.wait_for_console_pattern(shell_ready) - # first debug shell is too early, we need to wait for device detection - exec_command_and_wait_for_pattern(self, 'exit', shell_ready) - - ccw_bus_ids="0.1.1111 0.2.0000 0.3.1234" - pci_bus_ids="0005:00:00.0 000a:00:00.0" - exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/', - ccw_bus_ids) - exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/', - pci_bus_ids) - # check that the device at 0.2.0000 is in legacy mode, while the - # device at 0.3.1234 has the virtio-1 feature bit set - virtio_rng_features="00000000000000000000000000001100" + \ - "10000000000000000000000000000000" - virtio_rng_features_legacy="00000000000000000000000000001100" + \ - "00000000000000000000000000000000" - exec_command_and_wait_for_pattern(self, - 'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features', - virtio_rng_features_legacy) - exec_command_and_wait_for_pattern(self, - 'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features', - virtio_rng_features) - # check that /dev/hwrng works - and that it's gone after ejecting - exec_command_and_wait_for_pattern(self, - 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', - '10+0 records out') - self.clear_guest_dmesg() - self.vm.cmd('device_del', id='rn1') - self.wait_for_crw_reports() - self.clear_guest_dmesg() - self.vm.cmd('device_del', id='rn2') - self.wait_for_crw_reports() - exec_command_and_wait_for_pattern(self, - 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', - 'dd: /dev/hwrng: No such device') - # verify that we indeed have virtio-net devices (without having the - # virtio-net driver handy) - exec_command_and_wait_for_pattern(self, - 'cat /sys/bus/ccw/devices/0.1.1111/cutype', - '3832/01') - exec_command_and_wait_for_pattern(self, - r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor', - r'0x1af4') - exec_command_and_wait_for_pattern(self, - r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device', - r'0x0001') - # check fid propagation - exec_command_and_wait_for_pattern(self, - r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id', - r'0x0000000c') - # add another device - self.clear_guest_dmesg() - self.vm.cmd('device_add', driver='virtio-net-ccw', - devno='fe.0.4711', id='net_4711') - self.wait_for_crw_reports() - exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do ' - 'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;' - 'sleep 1 ; done ; ls /sys/bus/ccw/devices/', - '0.0.4711') - # and detach it again - self.clear_guest_dmesg() - self.vm.cmd('device_del', id='net_4711') - self.vm.event_wait(name='DEVICE_DELETED', - match={'data': {'device': 'net_4711'}}) - self.wait_for_crw_reports() - exec_command_and_wait_for_pattern(self, - 'ls /sys/bus/ccw/devices/0.0.4711', - 'No such file or directory') - # test the virtio-balloon device - exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', - 'MemTotal: 115640 kB') - self.vm.cmd('human-monitor-command', command_line='balloon 96') - exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', - 'MemTotal: 82872 kB') - self.vm.cmd('human-monitor-command', command_line='balloon 128') - exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', - 'MemTotal: 115640 kB') - - - def test_s390x_fedora(self): - - """ - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - :avocado: tags=device:virtio-gpu - :avocado: tags=device:virtio-crypto - :avocado: tags=device:virtio-net - :avocado: tags=flaky - """ - - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/31/Server/s390x/os' - '/images/kernel.img') - kernel_hash = 'b93d1efcafcf29c1673a4ce371a1f8b43941cfeb' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - initrd_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/31/Server/s390x/os' - '/images/initrd.img') - initrd_hash = '3de45d411df5624b8d8ef21cd0b44419ab59b12f' - initrd_path_xz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'initrd-raw.img') - archive.lzma_uncompress(initrd_path_xz, initrd_path) - - self.vm.set_console() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 ' - 'rd.plymouth=0 plymouth.enable=0 rd.rescue') - self.vm.add_args('-nographic', - '-smp', '4', - '-m', '512', - '-name', 'Some Guest Name', - '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa', - '-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line, - '-device', 'zpci,uid=7,target=n', - '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12', - '-device', 'virtio-rng-ccw,devno=fe.1.9876', - '-device', 'virtio-gpu-ccw,devno=fe.2.5432') - self.vm.launch() - self.wait_for_console_pattern('Entering emergency mode') - - # Some tests to see whether the CLI options have been considered: - self.log.info("Test whether QEMU CLI options have been considered") - exec_command_and_wait_for_pattern(self, - 'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done', - 'virtio_net virtio0 enP7p0s0: renamed') - exec_command_and_wait_for_pattern(self, 'lspci', - '0007:00:00.0 Class 0200: Device 1af4:1000') - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/net/enP7p0s0/address', - '02:ca:fe:fa:ce:12') - exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876') - exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432') - exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', - 'processors : 4') - exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo', - 'MemTotal: 499848 kB') - exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo', - 'Extended Name: Some Guest Name') - exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo', - '30de4fd9-b4d5-409e-86a5-09b387f70bfa') - - # Disable blinking cursor, then write some stuff into the framebuffer. - # QEMU's PPM screendumps contain uncompressed 24-bit values, while the - # framebuffer uses 32-bit, so we pad our text with some spaces when - # writing to the framebuffer. Since the PPM is uncompressed, we then - # can simply read the written "magic bytes" back from the PPM file to - # check whether the framebuffer is working as expected. - # Unfortunately, this test is flaky, so we don't run it by default - if os.getenv('QEMU_TEST_FLAKY_TESTS'): - self.log.info("Test screendump of virtio-gpu device") - exec_command_and_wait_for_pattern(self, - 'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done', - 'virtio_gpudrmfb frame buffer device') - exec_command_and_wait_for_pattern(self, - r'echo -e "\e[?25l" > /dev/tty0', ':/#') - exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do ' - 'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;' - 'done', - ':/#') - exec_command_and_wait_for_pattern(self, - 'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt', - '12+0 records out') - with tempfile.NamedTemporaryFile(suffix='.ppm', - prefix='qemu-scrdump-') as ppmfile: - self.vm.cmd('screendump', filename=ppmfile.name) - ppmfile.seek(0) - line = ppmfile.readline() - self.assertEqual(line, b"P6\n") - line = ppmfile.readline() - self.assertEqual(line, b"1280 800\n") - line = ppmfile.readline() - self.assertEqual(line, b"255\n") - line = ppmfile.readline(256) - self.assertEqual(line, b"The quick fox jumps over a lazy dog\n") - else: - self.log.info("Skipped flaky screendump of virtio-gpu device test") - - # Hot-plug a virtio-crypto device and see whether it gets accepted - self.log.info("Test hot-plug virtio-crypto device") - self.clear_guest_dmesg() - self.vm.cmd('object-add', qom_type='cryptodev-backend-builtin', - id='cbe0') - self.vm.cmd('device_add', driver='virtio-crypto-ccw', id='crypdev0', - cryptodev='cbe0', devno='fe.0.2342') - exec_command_and_wait_for_pattern(self, - 'while ! (dmesg -c | grep Accelerator.device) ; do' - ' sleep 1 ; done', 'Accelerator device is ready') - exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342') - self.vm.cmd('device_del', id='crypdev0') - self.vm.cmd('object-del', id='cbe0') - exec_command_and_wait_for_pattern(self, - 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do' - ' sleep 1 ; done', 'Start virtcrypto_remove.') diff --git a/tests/avocado/machine_sparc64_sun4u.py b/tests/avocado/machine_sparc64_sun4u.py deleted file mode 100644 index d333c0ae91c..00000000000 --- a/tests/avocado/machine_sparc64_sun4u.py +++ /dev/null @@ -1,36 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright (c) 2020 Red Hat, Inc. -# -# Author: -# Thomas Huth -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive -from boot_linux_console import LinuxKernelTest - -class Sun4uMachine(LinuxKernelTest): - """Boots the Linux kernel and checks that the console is operational""" - - timeout = 90 - - def test_sparc64_sun4u(self): - """ - :avocado: tags=arch:sparc64 - :avocado: tags=machine:sun4u - """ - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day23.tar.xz') - tar_hash = '142db83cd974ffadc4f75c8a5cad5bcc5722c240' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/day23/vmlinux', - '-append', self.KERNEL_COMMON_COMMAND_LINE) - self.vm.launch() - wait_for_console_pattern(self, 'Starting logging: OK') diff --git a/tests/avocado/mem-addr-space-check.py b/tests/avocado/mem-addr-space-check.py deleted file mode 100644 index d3974599f45..00000000000 --- a/tests/avocado/mem-addr-space-check.py +++ /dev/null @@ -1,354 +0,0 @@ -# Check for crash when using memory beyond the available guest processor -# address space. -# -# Copyright (c) 2023 Red Hat, Inc. -# -# Author: -# Ani Sinha -# -# SPDX-License-Identifier: GPL-2.0-or-later - -from avocado_qemu import QemuSystemTest -import time - -class MemAddrCheck(QemuSystemTest): - # after launch, in order to generate the logs from QEMU we need to - # wait for some time. Launching and then immediately shutting down - # the VM generates empty logs. A delay of 1 second is added for - # this reason. - DELAY_Q35_BOOT_SEQUENCE = 1 - - # first, lets test some 32-bit processors. - # for all 32-bit cases, pci64_hole_size is 0. - def test_phybits_low_pse36(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - With pse36 feature ON, a processor has 36 bits of addressing. So it can - access up to a maximum of 64GiB of memory. Memory hotplug region begins - at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when - we have 0.5 GiB of VM memory, see pc_q35_init()). This means total - hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory - for dimm alignment for all machines. That leaves total hotpluggable - actual memory size of 59 GiB. If the VM is started with 0.5 GiB of - memory, maxmem should be set to a maximum value of 59.5 GiB to ensure - that the processor can address all memory directly. - Note that 64-bit pci hole size is 0 in this case. If maxmem is set to - 59.6G, QEMU should fail to start with a message "phy-bits are too low". - If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU - should start fine. - """ - self.vm.add_args('-S', '-machine', 'q35', '-m', - '512,slots=1,maxmem=59.6G', - '-cpu', 'pentium,pse36=on', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_low_pae(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - With pae feature ON, a processor has 36 bits of addressing. So it can - access up to a maximum of 64GiB of memory. Rest is the same as the case - with pse36 above. - """ - self.vm.add_args('-S', '-machine', 'q35', '-m', - '512,slots=1,maxmem=59.6G', - '-cpu', 'pentium,pae=on', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_pentium_pse36(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Setting maxmem to 59.5G and making sure that QEMU can start with the - same options as the failing case above with pse36 cpu feature. - """ - self.vm.add_args('-machine', 'q35', '-m', - '512,slots=1,maxmem=59.5G', - '-cpu', 'pentium,pse36=on', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_pentium_pae(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Test is same as above but now with pae cpu feature turned on. - Setting maxmem to 59.5G and making sure that QEMU can start fine - with the same options as the case above. - """ - self.vm.add_args('-machine', 'q35', '-m', - '512,slots=1,maxmem=59.5G', - '-cpu', 'pentium,pae=on', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_pentium2(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Pentium2 has 36 bits of addressing, so its same as pentium - with pse36 ON. - """ - self.vm.add_args('-machine', 'q35', '-m', - '512,slots=1,maxmem=59.5G', - '-cpu', 'pentium2', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_low_nonpse36(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Pentium processor has 32 bits of addressing without pse36 or pae - so it can access physical address up to 4 GiB. Setting maxmem to - 4 GiB should make QEMU fail to start with "phys-bits too low" - message because the region for memory hotplug is always placed - above 4 GiB due to the PCI hole and simplicity. - """ - self.vm.add_args('-S', '-machine', 'q35', '-m', - '512,slots=1,maxmem=4G', - '-cpu', 'pentium', '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - # now lets test some 64-bit CPU cases. - def test_phybits_low_tcg_q35_70_amd(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - For q35 7.1 machines and above, there is a HT window that starts at - 1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range, - "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus - in the default case. Lets test without that case for machines 7.0. - For q35-7.0 machines, "above 4G" memory starts are 4G. - pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to - be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of - directly addressable memory. - Hence, maxmem value at most can be - 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB - which is equal to 987.5 GiB. Setting the value to 988 GiB should - make QEMU fail with the error message. - """ - self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m', - '512,slots=1,maxmem=988G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_low_tcg_q35_71_amd(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - AMD_HT_START is defined to be at 1012 GiB. So for q35 machines - version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit - processor address space, it has to be 1012 GiB , that is 12 GiB - less than the case above in order to accommodate HT hole. - Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less - than 988 GiB). - """ - self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m', - '512,slots=1,maxmem=976G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_tcg_q35_70_amd(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Same as q35-7.0 AMD case except that here we check that QEMU can - successfully start when maxmem is < 988G. - """ - self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m', - '512,slots=1,maxmem=987.5G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_tcg_q35_71_amd(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Same as q35-7.1 AMD case except that here we check that QEMU can - successfully start when maxmem is < 976G. - """ - self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m', - '512,slots=1,maxmem=975.5G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_tcg_q35_71_intel(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Same parameters as test_phybits_low_tcg_q35_71_amd() but use - Intel cpu instead. QEMU should start fine in this case as - "above_4G" memory starts at 4G. - """ - self.vm.add_args('-S', '-cpu', 'Skylake-Server', - '-machine', 'pc-q35-7.1', '-m', - '512,slots=1,maxmem=976G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_low_tcg_q35_71_amd_41bits(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - AMD processor with 41 bits. Max cpu hw address = 2 TiB. - By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can - force "above_4G" memory to start at 1 TiB for q35-7.1 machines - (max GPA will be above AMD_HT_START which is defined as 1012 GiB). - - With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5 - GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug - memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should - fail to start. - """ - self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41', - '-machine', 'pc-q35-7.1', '-m', - '512,slots=1,maxmem=992G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_tcg_q35_71_amd_41bits(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - AMD processor with 41 bits. Max cpu hw address = 2 TiB. - Same as above but by setting maxram between 976 GiB and 992 Gib, - QEMU should start fine. - """ - self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41', - '-machine', 'pc-q35-7.1', '-m', - '512,slots=1,maxmem=990G', - '-display', 'none', - '-object', 'memory-backend-ram,id=mem1,size=1G', - '-device', 'pc-dimm,id=vm0,memdev=mem1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_low_tcg_q35_intel_cxl(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - cxl memory window starts after memory device range. Here, we use 1 GiB - of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and - starts after the cxl memory window. - So maxmem here should be at most 986 GiB considering all memory boundary - alignment constraints with 40 bits (1 TiB) of processor physical bits. - """ - self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40', - '-machine', 'q35,cxl=on', '-m', - '512,slots=1,maxmem=987G', - '-display', 'none', - '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1', - '-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - self.vm.wait() - self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") - self.assertRegex(self.vm.get_log(), r'phys-bits too low') - - def test_phybits_ok_tcg_q35_intel_cxl(self): - """ - :avocado: tags=machine:q35 - :avocado: tags=arch:x86_64 - - Same as above but here we do not reserve any cxl memory window. Hence, - with the exact same parameters as above, QEMU should start fine even - with cxl enabled. - """ - self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40', - '-machine', 'q35,cxl=on', '-m', - '512,slots=1,maxmem=987G', - '-display', 'none', - '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1') - self.vm.set_qmp_monitor(enabled=False) - self.vm.launch() - time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) - self.vm.shutdown() - self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') diff --git a/tests/avocado/migration.py b/tests/avocado/migration.py deleted file mode 100644 index be6234b3c24..00000000000 --- a/tests/avocado/migration.py +++ /dev/null @@ -1,135 +0,0 @@ -# Migration test -# -# Copyright (c) 2019 Red Hat, Inc. -# -# Authors: -# Cleber Rosa -# Caio Carrara -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - - -import tempfile -import os - -from avocado_qemu import QemuSystemTest -from avocado import skipUnless - -from avocado.utils.network import ports -from avocado.utils import wait -from avocado.utils.path import find_command - - -class MigrationTest(QemuSystemTest): - """ - :avocado: tags=migration - """ - - timeout = 10 - - @staticmethod - def migration_finished(vm): - return vm.cmd('query-migrate')['status'] in ('completed', 'failed') - - def assert_migration(self, src_vm, dst_vm): - wait.wait_for(self.migration_finished, - timeout=self.timeout, - step=0.1, - args=(src_vm,)) - wait.wait_for(self.migration_finished, - timeout=self.timeout, - step=0.1, - args=(dst_vm,)) - self.assertEqual(src_vm.cmd('query-migrate')['status'], 'completed') - self.assertEqual(dst_vm.cmd('query-migrate')['status'], 'completed') - self.assertEqual(dst_vm.cmd('query-status')['status'], 'running') - self.assertEqual(src_vm.cmd('query-status')['status'],'postmigrate') - - def do_migrate(self, dest_uri, src_uri=None): - dest_vm = self.get_vm('-incoming', dest_uri) - dest_vm.add_args('-nodefaults') - dest_vm.launch() - if src_uri is None: - src_uri = dest_uri - source_vm = self.get_vm() - source_vm.add_args('-nodefaults') - source_vm.launch() - source_vm.qmp('migrate', uri=src_uri) - self.assert_migration(source_vm, dest_vm) - - def _get_free_port(self): - port = ports.find_free_port() - if port is None: - self.cancel('Failed to find a free port') - return port - - def migration_with_tcp_localhost(self): - dest_uri = 'tcp:localhost:%u' % self._get_free_port() - self.do_migrate(dest_uri) - - def migration_with_unix(self): - with tempfile.TemporaryDirectory(prefix='socket_') as socket_path: - dest_uri = 'unix:%s/qemu-test.sock' % socket_path - self.do_migrate(dest_uri) - - @skipUnless(find_command('nc', default=False), "'nc' command not found") - def migration_with_exec(self): - """The test works for both netcat-traditional and netcat-openbsd packages.""" - free_port = self._get_free_port() - dest_uri = 'exec:nc -l localhost %u' % free_port - src_uri = 'exec:nc localhost %u' % free_port - self.do_migrate(dest_uri, src_uri) - - -@skipUnless('aarch64' in os.uname()[4], "host != target") -class Aarch64(MigrationTest): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:max - """ - - def test_migration_with_tcp_localhost(self): - self.migration_with_tcp_localhost() - - def test_migration_with_unix(self): - self.migration_with_unix() - - def test_migration_with_exec(self): - self.migration_with_exec() - - -@skipUnless('x86_64' in os.uname()[4], "host != target") -class X86_64(MigrationTest): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:pc - :avocado: tags=cpu:qemu64 - """ - - def test_migration_with_tcp_localhost(self): - self.migration_with_tcp_localhost() - - def test_migration_with_unix(self): - self.migration_with_unix() - - def test_migration_with_exec(self): - self.migration_with_exec() - - -@skipUnless('ppc64le' in os.uname()[4], "host != target") -class PPC64(MigrationTest): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - def test_migration_with_tcp_localhost(self): - self.migration_with_tcp_localhost() - - def test_migration_with_unix(self): - self.migration_with_unix() - - def test_migration_with_exec(self): - self.migration_with_exec() diff --git a/tests/avocado/multiprocess.py b/tests/avocado/multiprocess.py deleted file mode 100644 index ee7490ae084..00000000000 --- a/tests/avocado/multiprocess.py +++ /dev/null @@ -1,102 +0,0 @@ -# Test for multiprocess qemu -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - - -import os -import socket - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command -from avocado_qemu import exec_command_and_wait_for_pattern - -class Multiprocess(QemuSystemTest): - """ - :avocado: tags=multiprocess - """ - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - - def do_test(self, kernel_url, kernel_hash, initrd_url, initrd_hash, - kernel_command_line, machine_type): - """Main test method""" - self.require_accelerator('kvm') - self.require_multiprocess() - - # Create socketpair to connect proxy and remote processes - proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX, - socket.SOCK_STREAM) - os.set_inheritable(proxy_sock.fileno(), True) - os.set_inheritable(remote_sock.fileno(), True) - - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - - # Create remote process - remote_vm = self.get_vm() - remote_vm.add_args('-machine', 'x-remote') - remote_vm.add_args('-nodefaults') - remote_vm.add_args('-device', 'lsi53c895a,id=lsi1') - remote_vm.add_args('-object', 'x-remote-object,id=robj1,' - 'devid=lsi1,fd='+str(remote_sock.fileno())) - remote_vm.launch() - - # Create proxy process - self.vm.set_console() - self.vm.add_args('-machine', machine_type) - self.vm.add_args('-accel', 'kvm') - self.vm.add_args('-cpu', 'host') - self.vm.add_args('-object', - 'memory-backend-memfd,id=sysmem-file,size=2G') - self.vm.add_args('--numa', 'node,memdev=sysmem-file') - self.vm.add_args('-m', '2048') - self.vm.add_args('-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line) - self.vm.add_args('-device', - 'x-pci-proxy-dev,' - 'id=lsi1,fd='+str(proxy_sock.fileno())) - self.vm.launch() - wait_for_console_pattern(self, 'as init process', - 'Kernel panic - not syncing') - exec_command(self, 'mount -t sysfs sysfs /sys') - exec_command_and_wait_for_pattern(self, - 'cat /sys/bus/pci/devices/*/uevent', - 'PCI_ID=1000:0012') - - def test_multiprocess_x86_64(self): - """ - :avocado: tags=arch:x86_64 - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/31/Everything/x86_64/os/images' - '/pxeboot/vmlinuz') - kernel_hash = '5b6f6876e1b5bda314f93893271da0d5777b1f3c' - initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/31/Everything/x86_64/os/images' - '/pxeboot/initrd.img') - initrd_hash = 'dd0340a1b39bd28f88532babd4581c67649ec5b1' - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0 rdinit=/bin/bash') - machine_type = 'pc' - self.do_test(kernel_url, kernel_hash, initrd_url, initrd_hash, - kernel_command_line, machine_type) - - def test_multiprocess_aarch64(self): - """ - :avocado: tags=arch:aarch64 - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/31/Everything/aarch64/os/images' - '/pxeboot/vmlinuz') - kernel_hash = '3505f2751e2833c681de78cee8dda1e49cabd2e8' - initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/31/Everything/aarch64/os/images' - '/pxeboot/initrd.img') - initrd_hash = '519a1962daf17d67fc3a9c89d45affcb399607db' - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'rdinit=/bin/bash console=ttyAMA0') - machine_type = 'virt,gic-version=3' - self.do_test(kernel_url, kernel_hash, initrd_url, initrd_hash, - kernel_command_line, machine_type) diff --git a/tests/avocado/netdev-ethtool.py b/tests/avocado/netdev-ethtool.py deleted file mode 100644 index 5f33288f811..00000000000 --- a/tests/avocado/netdev-ethtool.py +++ /dev/null @@ -1,101 +0,0 @@ -# ethtool tests for emulated network devices -# -# This test leverages ethtool's --test sequence to validate network -# device behaviour. -# -# SPDX-License-Identifier: GPL-2.0-or-late - -from avocado import skip -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class NetDevEthtool(QemuSystemTest): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:q35 - """ - - # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV - timeout = 45 - - # Fetch assets from the netdev-ethtool subdir of my shared test - # images directory on fileserver.linaro.org. - def get_asset(self, name, sha1): - base_url = ('https://fileserver.linaro.org/s/' - 'kE4nCFLdQcoBF9t/download?' - 'path=%2Fnetdev-ethtool&files=' ) - url = base_url + name - # use explicit name rather than failing to neatly parse the - # URL into a unique one - return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) - - def common_test_code(self, netdev, extra_args=None): - - # This custom kernel has drivers for all the supported network - # devices we can emulate in QEMU - kernel = self.get_asset("bzImage", - "33469d7802732d5815226166581442395cb289e2") - - rootfs = self.get_asset("rootfs.squashfs", - "9793cea7021414ae844bda51f558bd6565b50cdc") - - append = 'printk.time=0 console=ttyS0 ' - append += 'root=/dev/sr0 rootfstype=squashfs ' - - # any additional kernel tweaks for the test - if extra_args: - append += extra_args - - # finally invoke ethtool directly - append += ' init=/usr/sbin/ethtool -- -t eth1 offline' - - # add the rootfs via a readonly cdrom image - drive = f"file={rootfs},if=ide,index=0,media=cdrom" - - self.vm.add_args('-kernel', kernel, - '-append', append, - '-drive', drive, - '-device', netdev) - - self.vm.set_console(console_index=0) - self.vm.launch() - - wait_for_console_pattern(self, - "The test result is PASS", - "The test result is FAIL", - vm=None) - # no need to gracefully shutdown, just finish - self.vm.kill() - - def test_igb(self): - """ - :avocado: tags=device:igb - """ - self.common_test_code("igb") - - def test_igb_nomsi(self): - """ - :avocado: tags=device:igb - """ - self.common_test_code("igb", "pci=nomsi") - - # It seems the other popular cards we model in QEMU currently fail - # the pattern test with: - # - # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A - # - # So for now we skip them. - - @skip("Incomplete reg 0x00178 support") - def test_e1000(self): - """ - :avocado: tags=device:e1000 - """ - self.common_test_code("e1000") - - @skip("Incomplete reg 0x00178 support") - def test_i82550(self): - """ - :avocado: tags=device:i82550 - """ - self.common_test_code("i82550") diff --git a/tests/avocado/pc_cpu_hotplug_props.py b/tests/avocado/pc_cpu_hotplug_props.py deleted file mode 100644 index 4bd3e026651..00000000000 --- a/tests/avocado/pc_cpu_hotplug_props.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Ensure CPU die-id can be omitted on -device -# -# Copyright (c) 2019 Red Hat Inc -# -# Author: -# Eduardo Habkost -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, see . -# - -from avocado_qemu import QemuSystemTest - -class OmittedCPUProps(QemuSystemTest): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=cpu:qemu64 - """ - def test_no_die_id(self): - self.vm.add_args('-nodefaults', '-S') - self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8') - self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0') - self.vm.launch() - self.assertEqual(len(self.vm.cmd('query-cpus-fast')), 2) diff --git a/tests/avocado/ppc_405.py b/tests/avocado/ppc_405.py deleted file mode 100644 index 4e7e01aa762..00000000000 --- a/tests/avocado/ppc_405.py +++ /dev/null @@ -1,36 +0,0 @@ -# Test that the U-Boot firmware boots on ppc 405 machines and check the console -# -# Copyright (c) 2021 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command_and_wait_for_pattern - -class Ppc405Machine(QemuSystemTest): - - timeout = 90 - - def do_test_ppc405(self): - uboot_url = ('https://gitlab.com/huth/u-boot/-/raw/' - 'taihu-2021-10-09/u-boot-taihu.bin') - uboot_hash = ('3208940e908a5edc7c03eab072c60f0dcfadc2ab'); - file_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) - self.vm.set_console(console_index=1) - self.vm.add_args('-bios', file_path) - self.vm.launch() - wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board') - exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP') - - def test_ppc_ref405ep(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:ref405ep - :avocado: tags=cpu:405ep - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.do_test_ppc405() diff --git a/tests/avocado/ppc_74xx.py b/tests/avocado/ppc_74xx.py deleted file mode 100644 index f54757c243f..00000000000 --- a/tests/avocado/ppc_74xx.py +++ /dev/null @@ -1,136 +0,0 @@ -# Smoke tests for 74xx cpus (aka G4). -# -# Copyright (c) 2021, IBM Corp. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class ppc74xxCpu(QemuSystemTest): - """ - :avocado: tags=arch:ppc - :avocado: tags=accel:tcg - """ - timeout = 5 - - def test_ppc_7400(self): - """ - :avocado: tags=cpu:7400 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7410(self): - """ - :avocado: tags=cpu:7410 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,74xx') - - def test_ppc_7441(self): - """ - :avocado: tags=cpu:7441 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7445(self): - """ - :avocado: tags=cpu:7445 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7447(self): - """ - :avocado: tags=cpu:7447 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7447a(self): - """ - :avocado: tags=cpu:7447a - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7448(self): - """ - :avocado: tags=cpu:7448 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx') - - def test_ppc_7450(self): - """ - :avocado: tags=cpu:7450 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7451(self): - """ - :avocado: tags=cpu:7451 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7455(self): - """ - :avocado: tags=cpu:7455 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7457(self): - """ - :avocado: tags=cpu:7457 - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') - - def test_ppc_7457a(self): - """ - :avocado: tags=cpu:7457a - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> CPU type PowerPC,G4') diff --git a/tests/avocado/ppc_amiga.py b/tests/avocado/ppc_amiga.py deleted file mode 100644 index b6f866f91d9..00000000000 --- a/tests/avocado/ppc_amiga.py +++ /dev/null @@ -1,38 +0,0 @@ -# Test AmigaNG boards -# -# Copyright (c) 2023 BALATON Zoltan -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado.utils import process -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class AmigaOneMachine(QemuSystemTest): - - timeout = 90 - - def test_ppc_amigaone(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:amigaone - :avocado: tags=device:articia - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - tar_name = 'A1Firmware_Floppy_05-Mar-2005.zip' - tar_url = ('https://www.hyperion-entertainment.com/index.php/' - 'downloads?view=download&format=raw&file=25') - tar_hash = 'c52e59bc73e31d8bcc3cc2106778f7ac84f6c755' - zip_file = self.fetch_asset(tar_name, locations=tar_url, - asset_hash=tar_hash) - archive.extract(zip_file, self.workdir) - cmd = f"tail -c 524288 {self.workdir}/floppy_edition/updater.image >{self.workdir}/u-boot-amigaone.bin" - process.run(cmd, shell=True) - - self.vm.set_console() - self.vm.add_args('-bios', self.workdir + '/u-boot-amigaone.bin') - self.vm.launch() - wait_for_console_pattern(self, 'FLASH:') diff --git a/tests/avocado/ppc_bamboo.py b/tests/avocado/ppc_bamboo.py deleted file mode 100644 index a81be3d6088..00000000000 --- a/tests/avocado/ppc_bamboo.py +++ /dev/null @@ -1,42 +0,0 @@ -# Test that Linux kernel boots on the ppc bamboo board and check the console -# -# Copyright (c) 2021 Red Hat -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command_and_wait_for_pattern - -class BambooMachine(QemuSystemTest): - - timeout = 90 - - def test_ppc_bamboo(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:bamboo - :avocado: tags=cpu:440epb - :avocado: tags=device:rtl8139 - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.require_netdev('user') - tar_url = ('http://landley.net/aboriginal/downloads/binaries/' - 'system-image-powerpc-440fp.tar.gz') - tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + - '/system-image-powerpc-440fp/linux', - '-initrd', self.workdir + - '/system-image-powerpc-440fp/rootfs.cpio.gz', - '-nic', 'user,model=rtl8139,restrict=on') - self.vm.launch() - wait_for_console_pattern(self, 'Type exit when done') - exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2', - '10.0.2.2 is alive!') - exec_command_and_wait_for_pattern(self, 'halt', 'System Halted') diff --git a/tests/avocado/ppc_hv_tests.py b/tests/avocado/ppc_hv_tests.py deleted file mode 100644 index bf8822bb97b..00000000000 --- a/tests/avocado/ppc_hv_tests.py +++ /dev/null @@ -1,206 +0,0 @@ -# Tests that specifically try to exercise hypervisor features of the -# target machines. powernv supports the Power hypervisor ISA, and -# pseries supports the nested-HV hypervisor spec. -# -# Copyright (c) 2023 IBM Corporation -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado import skipIf, skipUnless -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern, exec_command -import os -import time -import subprocess -from datetime import datetime - -deps = ["xorriso"] # dependent tools needed in the test setup/box. - -def which(tool): - """ looks up the full path for @tool, returns None if not found - or if @tool does not have executable permissions. - """ - paths=os.getenv('PATH') - for p in paths.split(os.path.pathsep): - p = os.path.join(p, tool) - if os.path.exists(p) and os.access(p, os.X_OK): - return p - return None - -def missing_deps(): - """ returns True if any of the test dependent tools are absent. - """ - for dep in deps: - if which(dep) is None: - return True - return False - -# Alpine is a light weight distro that supports QEMU. These tests boot -# that on the machine then run a QEMU guest inside it in KVM mode, -# that runs the same Alpine distro image. -# QEMU packages are downloaded and installed on each test. That's not a -# large download, but it may be more polite to create qcow2 image with -# QEMU already installed and use that. -# XXX: The order of these tests seems to matter, see git blame. -@skipIf(missing_deps(), 'dependencies (%s) not installed' % ','.join(deps)) -@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck due to console handling problem') -@skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') -@skipUnless(os.getenv('SPEED') == 'slow', 'runtime limited') -class HypervisorTest(QemuSystemTest): - - timeout = 1000 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' - panic_message = 'Kernel panic - not syncing' - good_message = 'VFS: Cannot open root device' - - def extract_from_iso(self, iso, path): - """ - Extracts a file from an iso file into the test workdir - - :param iso: path to the iso file - :param path: path within the iso file of the file to be extracted - :returns: path of the extracted file - """ - filename = os.path.basename(path) - - cwd = os.getcwd() - os.chdir(self.workdir) - - with open(filename, "w") as outfile: - cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename) - subprocess.run(cmd.split(), - stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - - os.chdir(cwd) - - # Return complete path to extracted file. Because callers to - # extract_from_iso() specify 'path' with a leading slash, it is - # necessary to use os.path.relpath() as otherwise os.path.join() - # interprets it as an absolute path and drops the self.workdir part. - return os.path.normpath(os.path.join(self.workdir, filename)) - - def setUp(self): - super().setUp() - - iso_url = ('https://dl-cdn.alpinelinux.org/alpine/v3.18/releases/ppc64le/alpine-standard-3.18.4-ppc64le.iso') - - # Alpine use sha256 so I recalculated this myself - iso_sha256 = 'c26b8d3e17c2f3f0fed02b4b1296589c2390e6d5548610099af75300edd7b3ff' - iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha256, - algorithm = "sha256") - - self.iso_path = iso_path - self.vmlinuz = self.extract_from_iso(iso_path, '/boot/vmlinuz-lts') - self.initramfs = self.extract_from_iso(iso_path, '/boot/initramfs-lts') - - def do_start_alpine(self): - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE - self.vm.add_args("-kernel", self.vmlinuz) - self.vm.add_args("-initrd", self.initramfs) - self.vm.add_args("-smp", "4", "-m", "2g") - self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none,id=drive0") - - self.vm.launch() - wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18') - exec_command(self, 'root') - wait_for_console_pattern(self, 'localhost login:') - wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.') - # If the time is wrong, SSL certificates can fail. - exec_command(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"')) - exec_command(self, 'setup-alpine -qe') - wait_for_console_pattern(self, 'Updating repository indexes... done.') - - def do_stop_alpine(self): - exec_command(self, 'poweroff') - wait_for_console_pattern(self, 'alpine:~#') - self.vm.wait() - - def do_setup_kvm(self): - exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/main > /etc/apk/repositories') - wait_for_console_pattern(self, 'alpine:~#') - exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/community >> /etc/apk/repositories') - wait_for_console_pattern(self, 'alpine:~#') - exec_command(self, 'apk update') - wait_for_console_pattern(self, 'alpine:~#') - exec_command(self, 'apk add qemu-system-ppc64') - wait_for_console_pattern(self, 'alpine:~#') - exec_command(self, 'modprobe kvm-hv') - wait_for_console_pattern(self, 'alpine:~#') - - # This uses the host's block device as the source file for guest block - # device for install media. This is a bit hacky but allows reuse of the - # iso without having a passthrough filesystem configured. - def do_test_kvm(self, hpt=False): - if hpt: - append = 'disable_radix' - else: - append = '' - exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g ' - '-machine pseries,x-vof=on,accel=kvm ' - '-machine cap-cfpc=broken,cap-sbbc=broken,' - 'cap-ibs=broken,cap-ccf-assist=off ' - '-drive file=/dev/nvme0n1,format=raw,readonly=on ' - '-initrd /media/nvme0n1/boot/initramfs-lts ' - '-kernel /media/nvme0n1/boot/vmlinuz-lts ' - '-append \'usbcore.nousb ' + append + '\'') - # Alpine 3.18 kernel seems to crash in XHCI USB driver. - wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18') - exec_command(self, 'root') - wait_for_console_pattern(self, 'localhost login:') - wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.') - exec_command(self, 'poweroff >& /dev/null') - wait_for_console_pattern(self, 'localhost:~#') - wait_for_console_pattern(self, 'reboot: Power down') - time.sleep(1) - exec_command(self, '') - wait_for_console_pattern(self, 'alpine:~#') - - def test_hv_pseries(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg,thread=multi") - self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') - self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on") - self.do_start_alpine() - self.do_setup_kvm() - self.do_test_kvm() - self.do_stop_alpine() - - def test_hv_pseries_kvm(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=accel:kvm - """ - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') - self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off") - self.do_start_alpine() - self.do_setup_kvm() - self.do_test_kvm() - self.do_stop_alpine() - - def test_hv_powernv(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.add_args("-accel", "tcg,thread=multi") - self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0', - '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0', - '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine') - self.do_start_alpine() - self.do_setup_kvm() - self.do_test_kvm() - self.do_test_kvm(True) - self.do_stop_alpine() diff --git a/tests/avocado/ppc_mpc8544ds.py b/tests/avocado/ppc_mpc8544ds.py deleted file mode 100644 index b599fb1cc9b..00000000000 --- a/tests/avocado/ppc_mpc8544ds.py +++ /dev/null @@ -1,34 +0,0 @@ -# Test that Linux kernel boots on ppc machines and check the console -# -# Copyright (c) 2018, 2020 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class Mpc8544dsMachine(QemuSystemTest): - - timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - panic_message = 'Kernel panic - not syncing' - - def test_ppc_mpc8544ds(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:mpc8544ds - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day04.tar.xz') - tar_hash = 'f46724d281a9f30fa892d458be7beb7d34dc25f9' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin') - self.vm.launch() - wait_for_console_pattern(self, 'QEMU advent calendar 2020', - self.panic_message) diff --git a/tests/avocado/ppc_powernv.py b/tests/avocado/ppc_powernv.py deleted file mode 100644 index 4342941d5db..00000000000 --- a/tests/avocado/ppc_powernv.py +++ /dev/null @@ -1,102 +0,0 @@ -# Test that Linux kernel boots on ppc powernv machines and check the console -# -# Copyright (c) 2018, 2020 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class powernvMachine(QemuSystemTest): - - timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' - panic_message = 'Kernel panic - not syncing' - good_message = 'VFS: Cannot open root device' - - def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE): - self.require_accelerator("tcg") - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/ppc64le/os' - '/ppc/ppc64/vmlinuz') - kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path, - '-append', command_line) - self.vm.launch() - - def test_linux_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - - self.do_test_linux_boot() - console_pattern = 'VFS: Cannot open root device' - wait_for_console_pattern(self, console_pattern, self.panic_message) - - def test_linux_smp_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - - self.vm.add_args('-smp', '4') - self.do_test_linux_boot() - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_linux_smp_hpt_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - - self.vm.add_args('-smp', '4') - self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + - 'disable_radix') - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', - self.panic_message) - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_linux_smt_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - - self.vm.add_args('-smp', '4,threads=4') - self.do_test_linux_boot() - console_pattern = 'CPU maps initialized for 4 threads per core' - wait_for_console_pattern(self, console_pattern, self.panic_message) - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_linux_big_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - - self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2') - - # powernv does not support NUMA - self.do_test_linux_boot() - console_pattern = 'CPU maps initialized for 4 threads per core' - wait_for_console_pattern(self, console_pattern, self.panic_message) - console_pattern = 'smp: Brought up 2 nodes, 16 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) diff --git a/tests/avocado/ppc_prep_40p.py b/tests/avocado/ppc_prep_40p.py deleted file mode 100644 index d4f1eb7e1d8..00000000000 --- a/tests/avocado/ppc_prep_40p.py +++ /dev/null @@ -1,85 +0,0 @@ -# Functional test that boots a PReP/40p machine and checks its serial console. -# -# Copyright (c) Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os - -from avocado import skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - - -class IbmPrep40pMachine(QemuSystemTest): - - timeout = 60 - - # 12H0455 PPS Firmware Licensed Materials - # Property of IBM (C) Copyright IBM Corp. 1994. - # All rights reserved. - # U.S. Government Users Restricted Rights - Use, duplication or disclosure - # restricted by GSA ADP Schedule Contract with IBM Corp. - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_factory_firmware_and_netbsd(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:40p - :avocado: tags=os:netbsd - :avocado: tags=slowness:high - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - bios_url = ('http://ftpmirror.your.org/pub/misc/' - 'ftp.software.ibm.com/rs6000/firmware/' - '7020-40p/P12H0456.IMG') - bios_hash = '1775face4e6dc27f3a6ed955ef6eb331bf817f03' - bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash) - drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/' - 'NetBSD-4.0/prep/installation/floppy/generic_com0.fs') - drive_hash = 'dbcfc09912e71bd5f0d82c7c1ee43082fb596ceb' - drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash) - - self.vm.set_console() - self.vm.add_args('-bios', bios_path, - '-fda', drive_path) - self.vm.launch() - os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007' - wait_for_console_pattern(self, os_banner) - wait_for_console_pattern(self, 'Model: IBM PPS Model 6015') - - def test_openbios_192m(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:40p - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - self.vm.set_console() - self.vm.add_args('-m', '192') # test fw_cfg - - self.vm.launch() - wait_for_console_pattern(self, '>> OpenBIOS') - wait_for_console_pattern(self, '>> Memory: 192M') - wait_for_console_pattern(self, '>> CPU type PowerPC,604') - - def test_openbios_and_netbsd(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:40p - :avocado: tags=os:netbsd - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/' - 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso') - drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e' - drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash, - algorithm='md5') - self.vm.set_console() - self.vm.add_args('-cdrom', drive_path, - '-boot', 'd') - - self.vm.launch() - wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9') diff --git a/tests/avocado/ppc_pseries.py b/tests/avocado/ppc_pseries.py deleted file mode 100644 index 74aaa4ac4ad..00000000000 --- a/tests/avocado/ppc_pseries.py +++ /dev/null @@ -1,110 +0,0 @@ -# Test that Linux kernel boots on ppc machines and check the console -# -# Copyright (c) 2018, 2020 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class pseriesMachine(QemuSystemTest): - - timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' - panic_message = 'Kernel panic - not syncing' - good_message = 'VFS: Cannot open root device' - - def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE): - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/ppc64le/os' - '/ppc/ppc64/vmlinuz') - kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.vm.set_console() - self.vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line) - self.vm.launch() - - def test_ppc64_vof_linux_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.vm.add_args('-machine', 'x-vof=on') - self.do_test_ppc64_linux_boot() - console_pattern = 'VFS: Cannot open root device' - wait_for_console_pattern(self, console_pattern, self.panic_message) - - def test_ppc64_linux_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.do_test_ppc64_linux_boot() - console_pattern = 'VFS: Cannot open root device' - wait_for_console_pattern(self, console_pattern, self.panic_message) - - def test_ppc64_linux_smp_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.vm.add_args('-smp', '4') - self.do_test_ppc64_linux_boot() - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_ppc64_linux_hpt_smp_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.vm.add_args('-smp', '4') - self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + - 'disable_radix') - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', - self.panic_message) - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_ppc64_linux_smt_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.vm.add_args('-smp', '4,threads=4') - self.do_test_ppc64_linux_boot() - console_pattern = 'CPU maps initialized for 4 threads per core' - wait_for_console_pattern(self, console_pattern, self.panic_message) - console_pattern = 'smp: Brought up 1 node, 4 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) - - def test_ppc64_linux_big_boot(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - """ - - self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2') - self.vm.add_args('-m', '512M', - '-object', 'memory-backend-ram,size=256M,id=m0', - '-object', 'memory-backend-ram,size=256M,id=m1') - self.vm.add_args('-numa', 'node,nodeid=0,memdev=m0') - self.vm.add_args('-numa', 'node,nodeid=1,memdev=m1') - self.do_test_ppc64_linux_boot() - console_pattern = 'CPU maps initialized for 4 threads per core' - wait_for_console_pattern(self, console_pattern, self.panic_message) - console_pattern = 'smp: Brought up 2 nodes, 16 CPUs' - wait_for_console_pattern(self, console_pattern, self.panic_message) - wait_for_console_pattern(self, self.good_message, self.panic_message) diff --git a/tests/avocado/ppc_virtex_ml507.py b/tests/avocado/ppc_virtex_ml507.py deleted file mode 100644 index a73f8ae396a..00000000000 --- a/tests/avocado/ppc_virtex_ml507.py +++ /dev/null @@ -1,36 +0,0 @@ -# Test that Linux kernel boots on ppc machines and check the console -# -# Copyright (c) 2018, 2020 Red Hat, Inc. -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado.utils import archive -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class VirtexMl507Machine(QemuSystemTest): - - timeout = 90 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - panic_message = 'Kernel panic - not syncing' - - def test_ppc_virtex_ml507(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:virtex-ml507 - :avocado: tags=accel:tcg - """ - self.require_accelerator("tcg") - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day08.tar.xz') - tar_hash = '74c68f5af7a7b8f21c03097b298f3bb77ff52c1f' - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - archive.extract(file_path, self.workdir) - self.vm.set_console() - self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux', - '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb', - '-m', '512') - self.vm.launch() - wait_for_console_pattern(self, 'QEMU advent calendar 2020', - self.panic_message) diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py deleted file mode 100644 index 232d287c272..00000000000 --- a/tests/avocado/replay_kernel.py +++ /dev/null @@ -1,550 +0,0 @@ -# Record/replay test that boots a Linux kernel -# -# Copyright (c) 2020 ISP RAS -# -# Author: -# Pavel Dovgalyuk -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import lzma -import shutil -import logging -import time - -from avocado import skip -from avocado import skipUnless -from avocado import skipUnless -from avocado_qemu import wait_for_console_pattern -from avocado.utils import archive -from avocado.utils import process -from boot_linux_console import LinuxKernelTest - -class ReplayKernelBase(LinuxKernelTest): - """ - Boots a Linux kernel in record mode and checks that the console - is operational and the kernel command line is properly passed - from QEMU to the kernel. - Then replays the same scenario and verifies, that QEMU correctly - terminates. - """ - - timeout = 120 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 ' - - def run_vm(self, kernel_path, kernel_command_line, console_pattern, - record, shift, args, replay_path): - # icount requires TCG to be available - self.require_accelerator('tcg') - - logger = logging.getLogger('replay') - start_time = time.time() - vm = self.get_vm() - vm.set_console() - if record: - logger.info('recording the execution...') - mode = 'record' - else: - logger.info('replaying the execution...') - mode = 'replay' - vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % - (shift, mode, replay_path), - '-kernel', kernel_path, - '-append', kernel_command_line, - '-net', 'none', - '-no-reboot') - if args: - vm.add_args(*args) - vm.launch() - self.wait_for_console_pattern(console_pattern, vm) - if record: - vm.shutdown() - logger.info('finished the recording with log size %s bytes' - % os.path.getsize(replay_path)) - else: - vm.wait() - logger.info('successfully finished the replay') - elapsed = time.time() - start_time - logger.info('elapsed time %.2f sec' % elapsed) - return elapsed - - def run_rr(self, kernel_path, kernel_command_line, console_pattern, - shift=7, args=None): - replay_path = os.path.join(self.workdir, 'replay.bin') - t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern, - True, shift, args, replay_path) - t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern, - False, shift, args, replay_path) - logger = logging.getLogger('replay') - logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) - -class ReplayKernelNormal(ReplayKernelBase): - - def test_i386_pc(self): - """ - :avocado: tags=arch:i386 - :avocado: tags=machine:pc - """ - kernel_url = ('https://storage.tuxboot.com/20230331/i386/bzImage') - kernel_hash = 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956' - kernel_path = self.fetch_asset(kernel_url, - asset_hash=kernel_hash, - algorithm = "sha256") - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - console_pattern = 'VFS: Cannot open root device' - - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - - # See https://gitlab.com/qemu-project/qemu/-/issues/2094 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck') - def test_x86_64_pc(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:pc - :avocado: tags=flaky - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/x86_64/os/images/pxeboot' - '/vmlinuz') - kernel_hash = '23bebd2680757891cf7adedb033532163a792495' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - console_pattern = 'VFS: Cannot open root device' - - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - - def test_mips_malta(self): - """ - :avocado: tags=arch:mips - :avocado: tags=machine:malta - :avocado: tags=endian:big - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20130217T032700Z/pool/main/l/linux-2.6/' - 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb') - deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-2.6.32-5-4kc-malta') - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - console_pattern = 'Kernel command line: %s' % kernel_command_line - - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - - def test_mips64el_malta(self): - """ - This test requires the ar tool to extract "data.tar.gz" from - the Debian package. - - The kernel can be rebuilt using this Debian kernel source [1] and - following the instructions on [2]. - - [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/ - #linux-source-2.6.32_2.6.32-48 - [2] https://kernel-team.pages.debian.net/kernel-handbook/ - ch-common-tasks.html#s-common-official - - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20130217T032700Z/pool/main/l/linux-2.6/' - 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb') - deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-2.6.32-5-5kc-malta') - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - - def test_aarch64_virt(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:cortex-a53 - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/aarch64/os/images/pxeboot' - '/vmlinuz') - kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - console_pattern = 'VFS: Cannot open root device' - - self.run_rr(kernel_path, kernel_command_line, console_pattern) - - def test_arm_virt(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:virt - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/armhfp/os/images/pxeboot' - '/vmlinuz') - kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - console_pattern = 'VFS: Cannot open root device' - - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1) - - def test_arm_cubieboard_initrd(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:cubieboard - """ - deb_url = ('https://apt.armbian.com/pool/main/l/' - 'linux-6.6.16/linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb') - deb_hash = 'f7c3c8c5432f765445dc6e7eab02f3bbe668256b' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinuz-6.6.16-current-sunxi') - dtb_path = '/usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb' - dtb_path = self.extract_from_deb(deb_path, dtb_path) - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' - 'arm/rootfs-armv5.cpio.gz') - initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'rootfs.cpio') - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0,115200 ' - 'usbcore.nousb ' - 'panic=-1 noreboot') - console_pattern = 'Boot successful.' - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1, - args=('-dtb', dtb_path, - '-initrd', initrd_path, - '-no-reboot')) - - def test_s390x_s390_ccw_virtio(self): - """ - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/s390x/os/images' - '/kernel.img') - kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9) - - def test_alpha_clipper(self): - """ - :avocado: tags=arch:alpha - :avocado: tags=machine:clipper - """ - kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/' - 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz') - kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - uncompressed_kernel = archive.uncompress(kernel_path, self.workdir) - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.run_rr(uncompressed_kernel, kernel_command_line, console_pattern, shift=9, - args=('-nodefaults', )) - - def test_ppc64_pseries(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=accel:tcg - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/ppc64le/os' - '/ppc/ppc64/vmlinuz') - kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' - console_pattern = 'VFS: Cannot open root device' - self.run_rr(kernel_path, kernel_command_line, console_pattern) - - def test_ppc64_powernv(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=accel:tcg - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/29/Everything/ppc64le/os' - '/ppc/ppc64/vmlinuz') - kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + \ - 'console=tty0 console=hvc0' - console_pattern = 'VFS: Cannot open root device' - self.run_rr(kernel_path, kernel_command_line, console_pattern) - - def test_m68k_q800(self): - """ - :avocado: tags=arch:m68k - :avocado: tags=machine:q800 - """ - deb_url = ('https://snapshot.debian.org/archive/debian-ports' - '/20191021T083923Z/pool-m68k/main' - '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb') - deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-5.3.0-1-m68k') - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0 vga=off') - console_pattern = 'No filesystem could mount root' - self.run_rr(kernel_path, kernel_command_line, console_pattern) - - def do_test_advcal_2018(self, file_path, kernel_name, args=None): - archive.extract(file_path, self.workdir) - - for entry in os.scandir(self.workdir): - if entry.name.startswith('day') and entry.is_dir(): - kernel_path = os.path.join(entry.path, kernel_name) - break - - kernel_command_line = '' - console_pattern = 'QEMU advent calendar' - self.run_rr(kernel_path, kernel_command_line, console_pattern, - args=args) - - def test_arm_vexpressa9(self): - """ - :avocado: tags=arch:arm - :avocado: tags=machine:vexpress-a9 - """ - tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day16.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - dtb_path = self.workdir + '/day16/vexpress-v2p-ca9.dtb' - self.do_test_advcal_2018(file_path, 'winter.zImage', - args=('-dtb', dtb_path)) - - def test_m68k_mcf5208evb(self): - """ - :avocado: tags=arch:m68k - :avocado: tags=machine:mcf5208evb - """ - tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day07.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'sanity-clause.elf') - - def test_microblaze_s3adsp1800(self): - """ - :avocado: tags=arch:microblaze - :avocado: tags=machine:petalogix-s3adsp1800 - """ - tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day17.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'ballerina.bin') - - def test_ppc64_e500(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:ppce500 - :avocado: tags=cpu:e5500 - """ - tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day19.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'uImage') - - def test_or1k_sim(self): - """ - :avocado: tags=arch:or1k - :avocado: tags=machine:or1k-sim - """ - tar_hash = '20334cdaf386108c530ff0badaecc955693027dd' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day20.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'vmlinux') - - def test_ppc_g3beige(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:g3beige - """ - tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day15.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'invaders.elf', - args=('-M', 'graphics=off')) - - def test_ppc_mac99(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:mac99 - """ - tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day15.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'invaders.elf', - args=('-M', 'graphics=off')) - - def test_sparc_ss20(self): - """ - :avocado: tags=arch:sparc - :avocado: tags=machine:SS-20 - """ - tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day11.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'zImage.elf') - - def test_xtensa_lx60(self): - """ - :avocado: tags=arch:xtensa - :avocado: tags=machine:lx60 - :avocado: tags=cpu:dc233c - """ - tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34' - tar_url = ('https://qemu-advcal.gitlab.io' - '/qac-best-of-multiarch/download/day02.tar.xz') - file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) - self.do_test_advcal_2018(file_path, 'santas-sleigh-ride.elf') - -@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') -class ReplayKernelSlow(ReplayKernelBase): - # Override the timeout, because this kernel includes an inner - # loop which is executed with TB recompilings during replay, - # making it very slow. - timeout = 180 - - def test_mips_malta_cpio(self): - """ - :avocado: tags=arch:mips - :avocado: tags=machine:malta - :avocado: tags=endian:big - :avocado: tags=slowness:high - """ - deb_url = ('http://snapshot.debian.org/archive/debian/' - '20160601T041800Z/pool/main/l/linux/' - 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb') - deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8' - deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) - kernel_path = self.extract_from_deb(deb_path, - '/boot/vmlinux-4.5.0-2-4kc-malta') - initrd_url = ('https://github.com/groeck/linux-build-test/raw/' - '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' - 'mips/rootfs.cpio.gz') - initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99' - initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) - initrd_path = self.workdir + "rootfs.cpio" - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0 console=tty ' - 'rdinit=/sbin/init noreboot') - console_pattern = 'Boot successful.' - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, - args=('-initrd', initrd_path)) - - @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') - def test_mips64el_malta_5KEc_cpio(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=slowness:high - :avocado: tags=cpu:5KEc - """ - kernel_url = ('https://github.com/philmd/qemu-testing-blob/' - 'raw/9ad2df38/mips/malta/mips64el/' - 'vmlinux-3.19.3.mtoman.20150408') - kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - initrd_url = ('https://github.com/groeck/linux-build-test/' - 'raw/8584a59e/rootfs/' - 'mipsel64/rootfs.mipsel64r1.cpio.gz') - initrd_hash = '1dbb8a396e916847325284dbe2151167' - initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5', - asset_hash=initrd_hash) - initrd_path = self.workdir + "rootfs.cpio" - archive.gzip_uncompress(initrd_path_gz, initrd_path) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyS0 console=tty ' - 'rdinit=/sbin/init noreboot') - console_pattern = 'Boot successful.' - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, - args=('-initrd', initrd_path)) - - def do_test_mips_malta32el_nanomips(self, kernel_path_xz): - kernel_path = self.workdir + "kernel" - with lzma.open(kernel_path_xz, 'rb') as f_in: - with open(kernel_path, 'wb') as f_out: - shutil.copyfileobj(f_in, f_out) - - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'mem=256m@@0x0 ' - 'console=ttyS0') - console_pattern = 'Kernel command line: %s' % kernel_command_line - self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - - def test_mips_malta32el_nanomips_4k(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page4k.xz') - kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) - - def test_mips_malta32el_nanomips_16k_up(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page16k_up.xz') - kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) - - def test_mips_malta32el_nanomips_64k_dbg(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=endian:little - :avocado: tags=cpu:I7200 - """ - kernel_url = ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' - 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' - 'generic_nano32r6el_page64k_dbg.xz') - kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' - kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - self.do_test_mips_malta32el_nanomips(kernel_path_xz) diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py deleted file mode 100644 index b4673261ceb..00000000000 --- a/tests/avocado/replay_linux.py +++ /dev/null @@ -1,196 +0,0 @@ -# Record/replay test that boots a complete Linux system via a cloud image -# -# Copyright (c) 2020 ISP RAS -# -# Author: -# Pavel Dovgalyuk -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import logging -import time - -from avocado import skipUnless -from avocado_qemu import BUILD_DIR -from avocado.utils import cloudinit -from avocado.utils import network -from avocado.utils import vmimage -from avocado.utils import datadrainer -from avocado.utils.path import find_command -from avocado_qemu.linuxtest import LinuxTest - -class ReplayLinux(LinuxTest): - """ - Boots a Linux system, checking for a successful initialization - """ - - timeout = 1800 - chksum = None - hdd = 'ide-hd' - cd = 'ide-cd' - bus = 'ide' - - def setUp(self): - # LinuxTest does many replay-incompatible things, but includes - # useful methods. Do not setup LinuxTest here and just - # call some functions. - super(LinuxTest, self).setUp() - self._set_distro() - self.boot_path = self.download_boot() - self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0), - self.name) - ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys() - self.cloudinit_path = self.prepare_cloudinit(ssh_pubkey) - - def vm_add_disk(self, vm, path, id, device): - bus_string = '' - if self.bus: - bus_string = ',bus=%s.%d' % (self.bus, id,) - vm.add_args('-drive', 'file=%s,snapshot=on,id=disk%s,if=none' % (path, id)) - vm.add_args('-drive', - 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id)) - vm.add_args('-device', - '%s,drive=disk%s-rr%s' % (device, id, bus_string)) - - def vm_add_cdrom(self, vm, path, id, device): - vm.add_args('-drive', 'file=%s,id=disk%s,if=none,media=cdrom' % (path, id)) - - def launch_and_wait(self, record, args, shift): - self.require_netdev('user') - vm = self.get_vm() - vm.add_args('-smp', '1') - vm.add_args('-m', '1024') - vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', - '-device', 'virtio-net,netdev=vnet') - vm.add_args('-object', 'filter-replay,id=replay,netdev=vnet') - if args: - vm.add_args(*args) - self.vm_add_disk(vm, self.boot_path, 0, self.hdd) - self.vm_add_cdrom(vm, self.cloudinit_path, 1, self.cd) - logger = logging.getLogger('replay') - if record: - logger.info('recording the execution...') - mode = 'record' - else: - logger.info('replaying the execution...') - mode = 'replay' - replay_path = os.path.join(self.workdir, 'replay.bin') - vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % - (shift, mode, replay_path)) - - start_time = time.time() - - vm.set_console() - vm.launch() - console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), - logger=self.log.getChild('console'), - stop_check=(lambda : not vm.is_running())) - console_drainer.start() - if record: - while not self.phone_server.instance_phoned_back: - self.phone_server.handle_request() - vm.shutdown() - logger.info('finished the recording with log size %s bytes' - % os.path.getsize(replay_path)) - else: - vm.event_wait('SHUTDOWN', self.timeout) - vm.wait() - logger.info('successfully finished the replay') - elapsed = time.time() - start_time - logger.info('elapsed time %.2f sec' % elapsed) - return elapsed - - def run_rr(self, args=None, shift=7): - t1 = self.launch_and_wait(True, args, shift) - t2 = self.launch_and_wait(False, args, shift) - logger = logging.getLogger('replay') - logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) - -@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') -class ReplayLinuxX8664(ReplayLinux): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=accel:tcg - """ - - chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' - - def test_pc_i440fx(self): - """ - :avocado: tags=machine:pc - """ - self.run_rr(shift=1) - - def test_pc_q35(self): - """ - :avocado: tags=machine:q35 - """ - self.run_rr(shift=3) - -@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') -class ReplayLinuxX8664Virtio(ReplayLinux): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=virtio - :avocado: tags=accel:tcg - """ - - hdd = 'virtio-blk-pci' - cd = 'virtio-blk-pci' - bus = None - - chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' - - def test_pc_i440fx(self): - """ - :avocado: tags=machine:pc - """ - self.run_rr(shift=1) - - def test_pc_q35(self): - """ - :avocado: tags=machine:q35 - """ - self.run_rr(shift=3) - -@skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') -class ReplayLinuxAarch64(ReplayLinux): - """ - :avocado: tags=accel:tcg - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:max - """ - - chksum = '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49' - - hdd = 'virtio-blk-device' - cd = 'virtio-blk-device' - bus = None - - def get_common_args(self): - return ('-bios', - os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd'), - "-cpu", "max,lpa2=off", - '-device', 'virtio-rng-pci,rng=rng0', - '-object', 'rng-builtin,id=rng0') - - def test_virt_gicv2(self): - """ - :avocado: tags=machine:gic-version=2 - """ - - self.run_rr(shift=3, - args=(*self.get_common_args(), - "-machine", "virt,gic-version=2")) - - def test_virt_gicv3(self): - """ - :avocado: tags=machine:gic-version=3 - """ - - self.run_rr(shift=3, - args=(*self.get_common_args(), - "-machine", "virt,gic-version=3")) diff --git a/tests/avocado/reverse_debugging.py b/tests/avocado/reverse_debugging.py deleted file mode 100644 index 92855a02a54..00000000000 --- a/tests/avocado/reverse_debugging.py +++ /dev/null @@ -1,276 +0,0 @@ -# Reverse debugging test -# -# Copyright (c) 2020 ISP RAS -# -# Author: -# Pavel Dovgalyuk -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. -import os -import logging - -from avocado import skipUnless -from avocado_qemu import BUILD_DIR -from avocado.utils import datadrainer -from avocado.utils import gdb -from avocado.utils import process -from avocado.utils.network.ports import find_free_port -from avocado.utils.path import find_command -from boot_linux_console import LinuxKernelTest - -class ReverseDebugging(LinuxKernelTest): - """ - Test GDB reverse debugging commands: reverse step and reverse continue. - Recording saves the execution of some instructions and makes an initial - VM snapshot to allow reverse execution. - Replay saves the order of the first instructions and then checks that they - are executed backwards in the correct order. - After that the execution is replayed to the end, and reverse continue - command is checked by setting several breakpoints, and asserting - that the execution is stopped at the last of them. - """ - - timeout = 10 - STEPS = 10 - endian_is_le = True - - def run_vm(self, record, shift, args, replay_path, image_path, port): - logger = logging.getLogger('replay') - vm = self.get_vm() - vm.set_console() - if record: - logger.info('recording the execution...') - mode = 'record' - else: - logger.info('replaying the execution...') - mode = 'replay' - vm.add_args('-gdb', 'tcp::%d' % port, '-S') - vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' % - (shift, mode, replay_path), - '-net', 'none') - vm.add_args('-drive', 'file=%s,if=none' % image_path) - if args: - vm.add_args(*args) - vm.launch() - console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), - logger=self.log.getChild('console'), - stop_check=(lambda : not vm.is_running())) - console_drainer.start() - return vm - - @staticmethod - def get_reg_le(g, reg): - res = g.cmd(b'p%x' % reg) - num = 0 - for i in range(len(res))[-2::-2]: - num = 0x100 * num + int(res[i:i + 2], 16) - return num - - @staticmethod - def get_reg_be(g, reg): - res = g.cmd(b'p%x' % reg) - return int(res, 16) - - def get_reg(self, g, reg): - # value may be encoded in BE or LE order - if self.endian_is_le: - return self.get_reg_le(g, reg) - else: - return self.get_reg_be(g, reg) - - def get_pc(self, g): - return self.get_reg(g, self.REG_PC) - - def check_pc(self, g, addr): - pc = self.get_pc(g) - if pc != addr: - self.fail('Invalid PC (read %x instead of %x)' % (pc, addr)) - - @staticmethod - def gdb_step(g): - g.cmd(b's', b'T05thread:01;') - - @staticmethod - def gdb_bstep(g): - g.cmd(b'bs', b'T05thread:01;') - - @staticmethod - def vm_get_icount(vm): - return vm.qmp('query-replay')['return']['icount'] - - def reverse_debugging(self, shift=7, args=None): - logger = logging.getLogger('replay') - - # create qcow2 for snapshots - logger.info('creating qcow2 image for VM snapshots') - image_path = os.path.join(self.workdir, 'disk.qcow2') - qemu_img = os.path.join(BUILD_DIR, 'qemu-img') - if not os.path.exists(qemu_img): - qemu_img = find_command('qemu-img', False) - if qemu_img is False: - self.cancel('Could not find "qemu-img", which is required to ' - 'create the temporary qcow2 image') - cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path) - process.run(cmd) - - replay_path = os.path.join(self.workdir, 'replay.bin') - port = find_free_port() - - # record the log - vm = self.run_vm(True, shift, args, replay_path, image_path, port) - while self.vm_get_icount(vm) <= self.STEPS: - pass - last_icount = self.vm_get_icount(vm) - vm.shutdown() - - logger.info("recorded log with %s+ steps" % last_icount) - - # replay and run debug commands - vm = self.run_vm(False, shift, args, replay_path, image_path, port) - logger.info('connecting to gdbstub') - g = gdb.GDBRemote('127.0.0.1', port, False, False) - g.connect() - r = g.cmd(b'qSupported') - if b'qXfer:features:read+' in r: - g.cmd(b'qXfer:features:read:target.xml:0,ffb') - if b'ReverseStep+' not in r: - self.fail('Reverse step is not supported by QEMU') - if b'ReverseContinue+' not in r: - self.fail('Reverse continue is not supported by QEMU') - - logger.info('stepping forward') - steps = [] - # record first instruction addresses - for _ in range(self.STEPS): - pc = self.get_pc(g) - logger.info('saving position %x' % pc) - steps.append(pc) - self.gdb_step(g) - - # visit the recorded instruction in reverse order - logger.info('stepping backward') - for addr in steps[::-1]: - self.gdb_bstep(g) - self.check_pc(g, addr) - logger.info('found position %x' % addr) - - # visit the recorded instruction in forward order - logger.info('stepping forward') - for addr in steps: - self.check_pc(g, addr) - self.gdb_step(g) - logger.info('found position %x' % addr) - - # set breakpoints for the instructions just stepped over - logger.info('setting breakpoints') - for addr in steps: - # hardware breakpoint at addr with len=1 - g.cmd(b'Z1,%x,1' % addr, b'OK') - - # this may hit a breakpoint if first instructions are executed - # again - logger.info('continuing execution') - vm.qmp('replay-break', icount=last_icount - 1) - # continue - will return after pausing - # This could stop at the end and get a T02 return, or by - # re-executing one of the breakpoints and get a T05 return. - g.cmd(b'c') - if self.vm_get_icount(vm) == last_icount - 1: - logger.info('reached the end (icount %s)' % (last_icount - 1)) - else: - logger.info('hit a breakpoint again at %x (icount %s)' % - (self.get_pc(g), self.vm_get_icount(vm))) - - logger.info('running reverse continue to reach %x' % steps[-1]) - # reverse continue - will return after stopping at the breakpoint - g.cmd(b'bc', b'T05thread:01;') - - # assume that none of the first instructions is executed again - # breaking the order of the breakpoints - self.check_pc(g, steps[-1]) - logger.info('successfully reached %x' % steps[-1]) - - logger.info('exiting gdb and qemu') - vm.shutdown() - -class ReverseDebugging_X86_64(ReverseDebugging): - """ - :avocado: tags=accel:tcg - """ - - REG_PC = 0x10 - REG_CS = 0x12 - def get_pc(self, g): - return self.get_reg_le(g, self.REG_PC) \ - + self.get_reg_le(g, self.REG_CS) * 0x10 - - # unidentified gitlab timeout problem - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_x86_64_pc(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:pc - """ - # start with BIOS only - self.reverse_debugging() - -class ReverseDebugging_AArch64(ReverseDebugging): - """ - :avocado: tags=accel:tcg - """ - - REG_PC = 32 - - # unidentified gitlab timeout problem - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_aarch64_virt(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:cortex-a53 - """ - kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' - '/linux/releases/29/Everything/aarch64/os/images/pxeboot' - '/vmlinuz') - kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493' - kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) - - self.reverse_debugging( - args=('-kernel', kernel_path)) - -class ReverseDebugging_ppc64(ReverseDebugging): - """ - :avocado: tags=accel:tcg - """ - - REG_PC = 0x40 - - # unidentified gitlab timeout problem - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_ppc64_pseries(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=flaky - """ - # SLOF branches back to its entry point, which causes this test - # to take the 'hit a breakpoint again' path. That's not a problem, - # just slightly different than the other machines. - self.endian_is_le = False - self.reverse_debugging() - - # See https://gitlab.com/qemu-project/qemu/-/issues/1992 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - - def test_ppc64_powernv(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:powernv - :avocado: tags=flaky - """ - self.endian_is_le = False - self.reverse_debugging() diff --git a/tests/avocado/riscv_opensbi.py b/tests/avocado/riscv_opensbi.py deleted file mode 100644 index bfff9cc3c33..00000000000 --- a/tests/avocado/riscv_opensbi.py +++ /dev/null @@ -1,63 +0,0 @@ -# OpenSBI boot test for RISC-V machines -# -# Copyright (c) 2022, Ventana Micro -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern - -class RiscvOpenSBI(QemuSystemTest): - """ - :avocado: tags=accel:tcg - """ - timeout = 5 - - def boot_opensbi(self): - self.vm.set_console() - self.vm.launch() - wait_for_console_pattern(self, 'Platform Name') - wait_for_console_pattern(self, 'Boot HART MEDELEG') - - def test_riscv32_spike(self): - """ - :avocado: tags=arch:riscv32 - :avocado: tags=machine:spike - """ - self.boot_opensbi() - - def test_riscv64_spike(self): - """ - :avocado: tags=arch:riscv64 - :avocado: tags=machine:spike - """ - self.boot_opensbi() - - def test_riscv32_sifive_u(self): - """ - :avocado: tags=arch:riscv32 - :avocado: tags=machine:sifive_u - """ - self.boot_opensbi() - - def test_riscv64_sifive_u(self): - """ - :avocado: tags=arch:riscv64 - :avocado: tags=machine:sifive_u - """ - self.boot_opensbi() - - def test_riscv32_virt(self): - """ - :avocado: tags=arch:riscv32 - :avocado: tags=machine:virt - """ - self.boot_opensbi() - - def test_riscv64_virt(self): - """ - :avocado: tags=arch:riscv64 - :avocado: tags=machine:virt - """ - self.boot_opensbi() diff --git a/tests/avocado/s390_topology.py b/tests/avocado/s390_topology.py deleted file mode 100644 index 9154ac87762..00000000000 --- a/tests/avocado/s390_topology.py +++ /dev/null @@ -1,439 +0,0 @@ -# Functional test that boots a Linux kernel and checks the console -# -# Copyright IBM Corp. 2023 -# -# Author: -# Pierre Morel -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import os -import shutil -import time - -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import interrupt_interactive_console_until_pattern -from avocado_qemu import wait_for_console_pattern -from avocado.utils import process -from avocado.utils import archive - - -class S390CPUTopology(QemuSystemTest): - """ - S390x CPU topology consists of 4 topology layers, from bottom to top, - the cores, sockets, books and drawers and 2 modifiers attributes, - the entitlement and the dedication. - See: docs/system/s390x/cpu-topology.rst. - - S390x CPU topology is setup in different ways: - - implicitly from the '-smp' argument by completing each topology - level one after the other beginning with drawer 0, book 0 and - socket 0. - - explicitly from the '-device' argument on the QEMU command line - - explicitly by hotplug of a new CPU using QMP or HMP - - it is modified by using QMP 'set-cpu-topology' - - The S390x modifier attribute entitlement depends on the machine - polarization, which can be horizontal or vertical. - The polarization is changed on a request from the guest. - """ - timeout = 90 - event_timeout = 10 - - KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 ' - 'root=/dev/ram ' - 'selinux=0 ' - 'rdinit=/bin/sh') - - def wait_until_booted(self): - wait_for_console_pattern(self, 'no job control', - failure_message='Kernel panic - not syncing', - vm=None) - - def check_topology(self, c, s, b, d, e, t): - res = self.vm.qmp('query-cpus-fast') - cpus = res['return'] - for cpu in cpus: - core = cpu['props']['core-id'] - socket = cpu['props']['socket-id'] - book = cpu['props']['book-id'] - drawer = cpu['props']['drawer-id'] - entitlement = cpu.get('entitlement') - dedicated = cpu.get('dedicated') - if core == c: - self.assertEqual(drawer, d) - self.assertEqual(book, b) - self.assertEqual(socket, s) - self.assertEqual(entitlement, e) - self.assertEqual(dedicated, t) - - def kernel_init(self): - """ - We need a VM that supports CPU topology, - currently this only the case when using KVM, not TCG. - We need a kernel supporting the CPU topology. - We need a minimal root filesystem with a shell. - """ - self.require_accelerator("kvm") - kernel_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/35/Server/s390x/os' - '/images/kernel.img') - kernel_hash = '0d1aaaf303f07cf0160c8c48e56fe638' - kernel_path = self.fetch_asset(kernel_url, algorithm='md5', - asset_hash=kernel_hash) - - initrd_url = ('https://archives.fedoraproject.org/pub/archive' - '/fedora-secondary/releases/35/Server/s390x/os' - '/images/initrd.img') - initrd_hash = 'a122057d95725ac030e2ec51df46e172' - initrd_path_xz = self.fetch_asset(initrd_url, algorithm='md5', - asset_hash=initrd_hash) - initrd_path = os.path.join(self.workdir, 'initrd-raw.img') - archive.lzma_uncompress(initrd_path_xz, initrd_path) - - self.vm.set_console() - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE - self.vm.add_args('-nographic', - '-enable-kvm', - '-cpu', 'max,ctop=on', - '-m', '512', - '-kernel', kernel_path, - '-initrd', initrd_path, - '-append', kernel_command_line) - - def system_init(self): - self.log.info("System init") - exec_command_and_wait_for_pattern(self, - """ mount proc -t proc /proc; - mount sys -t sysfs /sys; - cat /sys/devices/system/cpu/dispatching """, - '0') - - def test_single(self): - """ - This test checks the simplest topology with a single CPU. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - self.check_topology(0, 0, 0, 0, 'medium', False) - - def test_default(self): - """ - This test checks the implicit topology. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.add_args('-smp', - '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') - self.vm.launch() - self.wait_until_booted() - self.check_topology(0, 0, 0, 0, 'medium', False) - self.check_topology(1, 0, 0, 0, 'medium', False) - self.check_topology(2, 1, 0, 0, 'medium', False) - self.check_topology(3, 1, 0, 0, 'medium', False) - self.check_topology(4, 2, 0, 0, 'medium', False) - self.check_topology(5, 2, 0, 0, 'medium', False) - self.check_topology(6, 0, 1, 0, 'medium', False) - self.check_topology(7, 0, 1, 0, 'medium', False) - self.check_topology(8, 1, 1, 0, 'medium', False) - self.check_topology(9, 1, 1, 0, 'medium', False) - self.check_topology(10, 2, 1, 0, 'medium', False) - self.check_topology(11, 2, 1, 0, 'medium', False) - self.check_topology(12, 0, 0, 1, 'medium', False) - - def test_move(self): - """ - This test checks the topology modification by moving a CPU - to another socket: CPU 0 is moved from socket 0 to socket 2. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.add_args('-smp', - '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') - self.vm.launch() - self.wait_until_booted() - - self.check_topology(0, 0, 0, 0, 'medium', False) - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'}) - self.assertEqual(res['return'], {}) - self.check_topology(0, 2, 0, 0, 'low', False) - - def test_dash_device(self): - """ - This test verifies that a CPU defined with the '-device' - command line option finds its right place inside the topology. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.add_args('-smp', - '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') - self.vm.add_args('-device', 'max-s390x-cpu,core-id=10') - self.vm.add_args('-device', - 'max-s390x-cpu,' - 'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low') - self.vm.add_args('-device', - 'max-s390x-cpu,' - 'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium') - self.vm.add_args('-device', - 'max-s390x-cpu,' - 'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high') - self.vm.add_args('-device', - 'max-s390x-cpu,' - 'core-id=4,socket-id=1,book-id=1,drawer-id=1') - self.vm.add_args('-device', - 'max-s390x-cpu,' - 'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true') - - self.vm.launch() - self.wait_until_booted() - - self.check_topology(10, 2, 1, 0, 'medium', False) - self.check_topology(1, 0, 1, 1, 'low', False) - self.check_topology(2, 0, 1, 1, 'medium', False) - self.check_topology(3, 1, 1, 1, 'high', False) - self.check_topology(4, 1, 1, 1, 'medium', False) - self.check_topology(5, 2, 1, 1, 'high', True) - - - def guest_set_dispatching(self, dispatching): - exec_command(self, - f'echo {dispatching} > /sys/devices/system/cpu/dispatching') - self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout) - exec_command_and_wait_for_pattern(self, - 'cat /sys/devices/system/cpu/dispatching', dispatching) - - - def test_polarization(self): - """ - This test verifies that QEMU modifies the entitlement change after - several guest polarization change requests. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - - self.system_init() - res = self.vm.qmp('query-s390x-cpu-polarization') - self.assertEqual(res['return']['polarization'], 'horizontal') - self.check_topology(0, 0, 0, 0, 'medium', False) - - self.guest_set_dispatching('1'); - res = self.vm.qmp('query-s390x-cpu-polarization') - self.assertEqual(res['return']['polarization'], 'vertical') - self.check_topology(0, 0, 0, 0, 'medium', False) - - self.guest_set_dispatching('0'); - res = self.vm.qmp('query-s390x-cpu-polarization') - self.assertEqual(res['return']['polarization'], 'horizontal') - self.check_topology(0, 0, 0, 0, 'medium', False) - - - def check_polarization(self, polarization): - #We need to wait for the change to have been propagated to the kernel - exec_command_and_wait_for_pattern(self, - "\n".join([ - "timeout 1 sh -c 'while true", - 'do', - ' syspath="/sys/devices/system/cpu/cpu0/polarization"', - ' polarization="$(cat "$syspath")" || exit', - f' if [ "$polarization" = "{polarization}" ]; then', - ' exit 0', - ' fi', - ' sleep 0.01', - #searched for strings mustn't show up in command, '' to obfuscate - "done' && echo succ''ess || echo fail''ure", - ]), - "success", "failure") - - - def test_entitlement(self): - """ - This test verifies that QEMU modifies the entitlement - after a guest request and that the guest sees the change. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - - self.system_init() - - self.check_polarization('horizontal') - self.check_topology(0, 0, 0, 0, 'medium', False) - - self.guest_set_dispatching('1') - self.check_polarization('vertical:medium') - self.check_topology(0, 0, 0, 0, 'medium', False) - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'low'}) - self.assertEqual(res['return'], {}) - self.check_polarization('vertical:low') - self.check_topology(0, 0, 0, 0, 'low', False) - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'medium'}) - self.assertEqual(res['return'], {}) - self.check_polarization('vertical:medium') - self.check_topology(0, 0, 0, 0, 'medium', False) - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'high'}) - self.assertEqual(res['return'], {}) - self.check_polarization('vertical:high') - self.check_topology(0, 0, 0, 0, 'high', False) - - self.guest_set_dispatching('0'); - self.check_polarization("horizontal") - self.check_topology(0, 0, 0, 0, 'high', False) - - - def test_dedicated(self): - """ - This test verifies that QEMU adjusts the entitlement correctly when a - CPU is made dedicated. - QEMU retains the entitlement value when horizontal polarization is in effect. - For the guest, the field shows the effective value of the entitlement. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - - self.system_init() - - self.check_polarization("horizontal") - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'dedicated': True}) - self.assertEqual(res['return'], {}) - self.check_topology(0, 0, 0, 0, 'high', True) - self.check_polarization("horizontal") - - self.guest_set_dispatching('1'); - self.check_topology(0, 0, 0, 0, 'high', True) - self.check_polarization("vertical:high") - - self.guest_set_dispatching('0'); - self.check_topology(0, 0, 0, 0, 'high', True) - self.check_polarization("horizontal") - - - def test_socket_full(self): - """ - This test verifies that QEMU does not accept to overload a socket. - The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can - not accept any new CPU while socket-id 0 on book-id 1 is free. - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.add_args('-smp', - '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') - self.vm.launch() - self.wait_until_booted() - - self.system_init() - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 2, 'socket-id': 0, 'book-id': 0}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 2, 'socket-id': 0, 'book-id': 1}) - self.assertEqual(res['return'], {}) - - def test_dedicated_error(self): - """ - This test verifies that QEMU refuses to lower the entitlement - of a dedicated CPU - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - - self.system_init() - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'dedicated': True}) - self.assertEqual(res['return'], {}) - - self.check_topology(0, 0, 0, 0, 'high', True) - - self.guest_set_dispatching('1'); - - self.check_topology(0, 0, 0, 0, 'high', True) - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'low', 'dedicated': True}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'low'}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'medium', 'dedicated': True}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'medium'}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'low', 'dedicated': False}) - self.assertEqual(res['return'], {}) - - res = self.vm.qmp('set-cpu-topology', - {'core-id': 0, 'entitlement': 'medium', 'dedicated': False}) - self.assertEqual(res['return'], {}) - - def test_move_error(self): - """ - This test verifies that QEMU refuses to move a CPU to an - nonexistent location - - :avocado: tags=arch:s390x - :avocado: tags=machine:s390-ccw-virtio - """ - self.kernel_init() - self.vm.launch() - self.wait_until_booted() - - self.system_init() - - res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1}) - self.assertEqual(res['error']['class'], 'GenericError') - - res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1}) - self.assertEqual(res['error']['class'], 'GenericError') - - self.check_topology(0, 0, 0, 0, 'medium', False) diff --git a/tests/avocado/smmu.py b/tests/avocado/smmu.py deleted file mode 100644 index aadda71e4b2..00000000000 --- a/tests/avocado/smmu.py +++ /dev/null @@ -1,140 +0,0 @@ -# SMMUv3 Functional tests -# -# Copyright (c) 2021 Red Hat, Inc. -# -# Author: -# Eric Auger -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. -import os - -from avocado import skipUnless -from avocado_qemu import BUILD_DIR -from avocado_qemu.linuxtest import LinuxTest - -@skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - -class SMMU(LinuxTest): - """ - :avocado: tags=accel:kvm - :avocado: tags=cpu:host - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=distro:fedora - :avocado: tags=smmu - :avocado: tags=flaky - """ - - IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' - kernel_path = None - initrd_path = None - kernel_params = None - - def set_up_boot(self): - path = self.download_boot() - self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' + - 'drive=drv0,id=virtio-disk0,bootindex=1,' - 'werror=stop,rerror=stop' + self.IOMMU_ADDON) - self.vm.add_args('-drive', - 'file=%s,if=none,cache=writethrough,id=drv0' % path) - - def setUp(self): - super(SMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON) - - def common_vm_setup(self, custom_kernel=False): - self.require_accelerator("kvm") - self.vm.add_args("-accel", "kvm") - self.vm.add_args("-cpu", "host") - self.vm.add_args("-machine", "iommu=smmuv3") - self.vm.add_args("-d", "guest_errors") - self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', - 'edk2-aarch64-code.fd')) - self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') - self.vm.add_args('-object', - 'rng-random,id=rng0,filename=/dev/urandom') - - if custom_kernel is False: - return - - kernel_url = self.distro.pxeboot_url + 'vmlinuz' - initrd_url = self.distro.pxeboot_url + 'initrd.img' - self.kernel_path = self.fetch_asset(kernel_url) - self.initrd_path = self.fetch_asset(initrd_url) - - def run_and_check(self): - if self.kernel_path: - self.vm.add_args('-kernel', self.kernel_path, - '-append', self.kernel_params, - '-initrd', self.initrd_path) - self.launch_and_wait() - self.ssh_command('cat /proc/cmdline') - self.ssh_command('dnf -y install numactl-devel') - - - # 5.3 kernel without RIL # - - def test_smmu_noril(self): - """ - :avocado: tags=smmu_noril - :avocado: tags=smmu_noril_tests - :avocado: tags=distro_version:31 - """ - self.common_vm_setup() - self.run_and_check() - - def test_smmu_noril_passthrough(self): - """ - :avocado: tags=smmu_noril_passthrough - :avocado: tags=smmu_noril_tests - :avocado: tags=distro_version:31 - """ - self.common_vm_setup(True) - self.kernel_params = (self.distro.default_kernel_params + - ' iommu.passthrough=on') - self.run_and_check() - - def test_smmu_noril_nostrict(self): - """ - :avocado: tags=smmu_noril_nostrict - :avocado: tags=smmu_noril_tests - :avocado: tags=distro_version:31 - """ - self.common_vm_setup(True) - self.kernel_params = (self.distro.default_kernel_params + - ' iommu.strict=0') - self.run_and_check() - - # 5.8 kernel featuring range invalidation - # >= v5.7 kernel - - def test_smmu_ril(self): - """ - :avocado: tags=smmu_ril - :avocado: tags=smmu_ril_tests - :avocado: tags=distro_version:33 - """ - self.common_vm_setup() - self.run_and_check() - - def test_smmu_ril_passthrough(self): - """ - :avocado: tags=smmu_ril_passthrough - :avocado: tags=smmu_ril_tests - :avocado: tags=distro_version:33 - """ - self.common_vm_setup(True) - self.kernel_params = (self.distro.default_kernel_params + - ' iommu.passthrough=on') - self.run_and_check() - - def test_smmu_ril_nostrict(self): - """ - :avocado: tags=smmu_ril_nostrict - :avocado: tags=smmu_ril_tests - :avocado: tags=distro_version:33 - """ - self.common_vm_setup(True) - self.kernel_params = (self.distro.default_kernel_params + - ' iommu.strict=0') - self.run_and_check() diff --git a/tests/avocado/tcg_plugins.py b/tests/avocado/tcg_plugins.py deleted file mode 100644 index a6ff457e272..00000000000 --- a/tests/avocado/tcg_plugins.py +++ /dev/null @@ -1,122 +0,0 @@ -# TCG Plugins tests -# -# These are a little more involved than the basic tests run by check-tcg. -# -# Copyright (c) 2021 Linaro -# -# Author: -# Alex Bennée -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import tempfile -import mmap -import re - -from boot_linux_console import LinuxKernelTest - - -class PluginKernelBase(LinuxKernelTest): - """ - Boots a Linux kernel with a TCG plugin enabled. - """ - - timeout = 120 - KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 ' - - def run_vm(self, kernel_path, kernel_command_line, - plugin, plugin_log, console_pattern, args=None): - - vm = self.get_vm() - vm.set_console() - vm.add_args('-kernel', kernel_path, - '-append', kernel_command_line, - '-plugin', plugin, - '-d', 'plugin', - '-D', plugin_log, - '-net', 'none', - '-no-reboot') - if args: - vm.add_args(*args) - - try: - vm.launch() - except: - # TODO: probably fails because plugins not enabled but we - # can't currently probe for the feature. - self.cancel("TCG Plugins not enabled?") - - self.wait_for_console_pattern(console_pattern, vm) - # ensure logs are flushed - vm.shutdown() - - -class PluginKernelNormal(PluginKernelBase): - - def _grab_aarch64_kernel(self): - kernel_url = ('https://storage.tuxboot.com/20230331/arm64/Image') - kernel_sha256 = 'ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7' - kernel_path = self.fetch_asset(kernel_url, - asset_hash=kernel_sha256, - algorithm = "sha256") - return kernel_path - - def test_aarch64_virt_insn(self): - """ - :avocado: tags=accel:tcg - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:cortex-a53 - """ - kernel_path = self._grab_aarch64_kernel() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - console_pattern = 'Kernel panic - not syncing: VFS:' - - plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", - suffix=".log") - - self.run_vm(kernel_path, kernel_command_line, - "tests/tcg/plugins/libinsn.so", plugin_log.name, - console_pattern) - - with plugin_log as lf, \ - mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: - - m = re.search(br"insns: (?P\d+)", s) - if "count" not in m.groupdict(): - self.fail("Failed to find instruction count") - else: - count = int(m.group("count")) - self.log.info(f"Counted: {count} instructions") - - - def test_aarch64_virt_insn_icount(self): - """ - :avocado: tags=accel:tcg - :avocado: tags=arch:aarch64 - :avocado: tags=machine:virt - :avocado: tags=cpu:cortex-a53 - """ - kernel_path = self._grab_aarch64_kernel() - kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + - 'console=ttyAMA0') - console_pattern = 'Kernel panic - not syncing: VFS:' - - plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", - suffix=".log") - - self.run_vm(kernel_path, kernel_command_line, - "tests/tcg/plugins/libinsn.so", plugin_log.name, - console_pattern, - args=('-icount', 'shift=1')) - - with plugin_log as lf, \ - mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: - - m = re.search(br"insns: (?P\d+)", s) - if "count" not in m.groupdict(): - self.fail("Failed to find instruction count") - else: - count = int(m.group("count")) - self.log.info(f"Counted: {count} instructions") diff --git a/tests/avocado/tesseract_utils.py b/tests/avocado/tesseract_utils.py deleted file mode 100644 index 476f528147c..00000000000 --- a/tests/avocado/tesseract_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -# ... -# -# Copyright (c) 2019 Philippe Mathieu-Daudé -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import re -import logging - -from avocado.utils import process -from avocado.utils.path import find_command, CmdNotFoundError - -def tesseract_available(expected_version): - try: - find_command('tesseract') - except CmdNotFoundError: - return False - res = process.run('tesseract --version') - try: - version = res.stdout_text.split()[1] - except IndexError: - version = res.stderr_text.split()[1] - return int(version.split('.')[0]) >= expected_version - - match = re.match(r'tesseract\s(\d)', res) - if match is None: - return False - # now this is guaranteed to be a digit - return int(match.groups()[0]) >= expected_version - - -def tesseract_ocr(image_path, tesseract_args='', tesseract_version=3): - console_logger = logging.getLogger('tesseract') - console_logger.debug(image_path) - if tesseract_version == 4: - tesseract_args += ' --oem 1' - proc = process.run("tesseract {} {} stdout".format(tesseract_args, - image_path)) - lines = [] - for line in proc.stdout_text.split('\n'): - sline = line.strip() - if len(sline): - console_logger.debug(sline) - lines += [sline] - return lines diff --git a/tests/avocado/tuxrun_baselines.py b/tests/avocado/tuxrun_baselines.py deleted file mode 100644 index 736e4aa289c..00000000000 --- a/tests/avocado/tuxrun_baselines.py +++ /dev/null @@ -1,620 +0,0 @@ -# Functional test that boots known good tuxboot images the same way -# that tuxrun (www.tuxrun.org) does. This tool is used by things like -# the LKFT project to run regression tests on kernels. -# -# Copyright (c) 2023 Linaro Ltd. -# -# Author: -# Alex Bennée -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os -import time -import tempfile - -from avocado import skip, skipUnless -from avocado_qemu import QemuSystemTest -from avocado_qemu import exec_command, exec_command_and_wait_for_pattern -from avocado_qemu import wait_for_console_pattern -from avocado.utils import process -from avocado.utils.path import find_command - -class TuxRunBaselineTest(QemuSystemTest): - """ - :avocado: tags=accel:tcg - """ - - KERNEL_COMMON_COMMAND_LINE = 'printk.time=0' - # Tests are ~10-40s, allow for --debug/--enable-gcov overhead - timeout = 100 - - def get_tag(self, tagname, default=None): - """ - Get the metadata tag or return the default. - """ - utag = self._get_unique_tag_val(tagname) - print(f"{tagname}/{default} -> {utag}") - if utag: - return utag - - return default - - def setUp(self): - super().setUp() - - # We need zstd for all the tuxrun tests - # See https://github.com/avocado-framework/avocado/issues/5609 - zstd = find_command('zstd', False) - if zstd is False: - self.cancel('Could not find "zstd", which is required to ' - 'decompress rootfs') - self.zstd = zstd - - # Process the TuxRun specific tags, most machines work with - # reasonable defaults but we sometimes need to tweak the - # config. To avoid open coding everything we store all these - # details in the metadata for each test. - - # The tuxboot tag matches the root directory - self.tuxboot = self.get_tag('tuxboot') - - # Most Linux's use ttyS0 for their serial port - self.console = self.get_tag('console', "ttyS0") - - # Does the machine shutdown QEMU nicely on "halt" - self.shutdown = self.get_tag('shutdown') - - # The name of the kernel Image file - self.image = self.get_tag('image', "Image") - - self.root = self.get_tag('root', "vda") - - # Occasionally we need extra devices to hook things up - self.extradev = self.get_tag('extradev') - - self.qemu_img = super().get_qemu_img() - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern(self, success_message, - failure_message='Kernel panic - not syncing', - vm=vm) - - def fetch_tuxrun_assets(self, csums=None, dt=None): - """ - Fetch the TuxBoot assets. They are stored in a standard way so we - use the per-test tags to fetch details. - """ - base_url = f"https://storage.tuxboot.com/20230331/{self.tuxboot}/" - - # empty hash if we weren't passed one - csums = {} if csums is None else csums - ksum = csums.get(self.image, None) - isum = csums.get("rootfs.ext4.zst", None) - - kernel_image = self.fetch_asset(base_url + self.image, - asset_hash = ksum, - algorithm = "sha256") - disk_image_zst = self.fetch_asset(base_url + "rootfs.ext4.zst", - asset_hash = isum, - algorithm = "sha256") - - cmd = f"{self.zstd} -d {disk_image_zst} -o {self.workdir}/rootfs.ext4" - process.run(cmd) - - if dt: - dsum = csums.get(dt, None) - dtb = self.fetch_asset(base_url + dt, - asset_hash = dsum, - algorithm = "sha256") - else: - dtb = None - - return (kernel_image, self.workdir + "/rootfs.ext4", dtb) - - def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0): - """ - Setup to run and add the common parameters to the system - """ - self.vm.set_console(console_index=console_index) - - # all block devices are raw ext4's - blockdev = "driver=raw,file.driver=file," \ - + f"file.filename={disk},node-name=hd0" - - kcmd_line = self.KERNEL_COMMON_COMMAND_LINE - kcmd_line += f" root=/dev/{self.root}" - kcmd_line += f" console={self.console}" - - self.vm.add_args('-kernel', kernel, - '-append', kcmd_line, - '-blockdev', blockdev) - - # Sometimes we need extra devices attached - if self.extradev: - self.vm.add_args('-device', self.extradev) - - self.vm.add_args('-device', - f"{drive},drive=hd0") - - # Some machines need an explicit DTB - if dtb: - self.vm.add_args('-dtb', dtb) - - def run_tuxtest_tests(self, haltmsg): - """ - Wait for the system to boot up, wait for the login prompt and - then do a few things on the console. Trigger a shutdown and - wait to exit cleanly. - """ - self.wait_for_console_pattern("Welcome to TuxTest") - time.sleep(0.2) - exec_command(self, 'root') - time.sleep(0.2) - exec_command(self, 'cat /proc/interrupts') - time.sleep(0.1) - exec_command(self, 'cat /proc/self/maps') - time.sleep(0.1) - exec_command(self, 'uname -a') - time.sleep(0.1) - exec_command_and_wait_for_pattern(self, 'halt', haltmsg) - - # Wait for VM to shut down gracefully if it can - if self.shutdown == "nowait": - self.vm.shutdown() - else: - self.vm.wait() - - def common_tuxrun(self, - csums=None, - dt=None, - drive="virtio-blk-device", - haltmsg="reboot: System halted", - console_index=0): - """ - Common path for LKFT tests. Unless we need to do something - special with the command line we can process most things using - the tag metadata. - """ - (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums, dt) - - self.prepare_run(kernel, disk, drive, dtb, console_index) - self.vm.launch() - self.run_tuxtest_tests(haltmsg) - - def ppc64_common_tuxrun(self, sums, prefix): - # add device args to command line. - self.require_netdev('user') - self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', - '-device', 'virtio-net,netdev=vnet') - self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}', - '-device', '{"driver":"virtio-net-pci","netdev":' - '"hostnet0","id":"net0","mac":"52:54:00:4c:e3:86",' - '"bus":"pci.0","addr":"0x9"}') - self.vm.add_args('-device', '{"driver":"qemu-xhci","p2":15,"p3":15,' - '"id":"usb","bus":"pci.0","addr":"0x2"}') - self.vm.add_args('-device', '{"driver":"virtio-scsi-pci","id":"scsi0"' - ',"bus":"pci.0","addr":"0x3"}') - self.vm.add_args('-device', '{"driver":"virtio-serial-pci","id":' - '"virtio-serial0","bus":"pci.0","addr":"0x4"}') - self.vm.add_args('-device', '{"driver":"scsi-cd","bus":"scsi0.0"' - ',"channel":0,"scsi-id":0,"lun":0,"device_id":' - '"drive-scsi0-0-0-0","id":"scsi0-0-0-0"}') - self.vm.add_args('-device', '{"driver":"virtio-balloon-pci",' - '"id":"balloon0","bus":"pci.0","addr":"0x6"}') - self.vm.add_args('-audiodev', '{"id":"audio1","driver":"none"}') - self.vm.add_args('-device', '{"driver":"usb-tablet","id":"input0"' - ',"bus":"usb.0","port":"1"}') - self.vm.add_args('-device', '{"driver":"usb-kbd","id":"input1"' - ',"bus":"usb.0","port":"2"}') - self.vm.add_args('-device', '{"driver":"VGA","id":"video0",' - '"vgamem_mb":16,"bus":"pci.0","addr":"0x7"}') - self.vm.add_args('-object', '{"qom-type":"rng-random","id":"objrng0"' - ',"filename":"/dev/urandom"}', - '-device', '{"driver":"virtio-rng-pci","rng":"objrng0"' - ',"id":"rng0","bus":"pci.0","addr":"0x8"}') - self.vm.add_args('-object', '{"qom-type":"cryptodev-backend-builtin",' - '"id":"objcrypto0","queues":1}', - '-device', '{"driver":"virtio-crypto-pci",' - '"cryptodev":"objcrypto0","id":"crypto0","bus"' - ':"pci.0","addr":"0xa"}') - self.vm.add_args('-device', '{"driver":"spapr-pci-host-bridge"' - ',"index":1,"id":"pci.1"}') - self.vm.add_args('-device', '{"driver":"spapr-vscsi","id":"scsi1"' - ',"reg":12288}') - self.vm.add_args('-m', '2G,slots=32,maxmem=4G', - '-object', 'memory-backend-ram,id=ram1,size=1G', - '-device', 'pc-dimm,id=dimm1,memdev=ram1') - - # Create a temporary qcow2 and launch the test-case - with tempfile.NamedTemporaryFile(prefix=prefix, - suffix='.qcow2') as qcow2: - process.run(self.qemu_img + ' create -f qcow2 ' + - qcow2.name + ' 1G') - - self.vm.add_args('-drive', 'file=' + qcow2.name + - ',format=qcow2,if=none,id=' - 'drive-virtio-disk1', - '-device', 'virtio-blk-pci,bus=pci.0,' - 'addr=0xb,drive=drive-virtio-disk1,id=virtio-disk1' - ',bootindex=2') - self.common_tuxrun(csums=sums, drive="scsi-hd") - - # - # The tests themselves. The configuration is derived from how - # tuxrun invokes qemu (with minor tweaks like using -blockdev - # consistently). The tuxrun equivalent is something like: - # - # tuxrun --device qemu-{ARCH} \ - # --kernel https://storage.tuxboot.com/{TUXBOOT}/{IMAGE} - # - - def test_arm64(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=cpu:cortex-a57 - :avocado: tags=machine:virt - :avocado: tags=tuxboot:arm64 - :avocado: tags=console:ttyAMA0 - :avocado: tags=shutdown:nowait - """ - sums = {"Image" : - "ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7", - "rootfs.ext4.zst" : - "bbd5ed4b9c7d3f4ca19ba71a323a843c6b585e880115df3b7765769dbd9dd061"} - self.common_tuxrun(csums=sums) - - def test_arm64be(self): - """ - :avocado: tags=arch:aarch64 - :avocado: tags=cpu:cortex-a57 - :avocado: tags=endian:big - :avocado: tags=machine:virt - :avocado: tags=tuxboot:arm64be - :avocado: tags=console:ttyAMA0 - :avocado: tags=shutdown:nowait - """ - sums = { "Image" : - "e0df4425eb2cd9ea9a283e808037f805641c65d8fcecc8f6407d8f4f339561b4", - "rootfs.ext4.zst" : - "e6ffd8813c8a335bc15728f2835f90539c84be7f8f5f691a8b01451b47fb4bd7"} - self.common_tuxrun(csums=sums) - - def test_armv5(self): - """ - :avocado: tags=arch:arm - :avocado: tags=cpu:arm926 - :avocado: tags=machine:versatilepb - :avocado: tags=tuxboot:armv5 - :avocado: tags=image:zImage - :avocado: tags=console:ttyAMA0 - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "17177afa74e7294da0642861f08c88ca3c836764299a54bf6d1ce276cb9712a5", - "versatile-pb.dtb" : - "0bc0c0b0858cefd3c32b385c0d66d97142ded29472a496f4f490e42fc7615b25", - "zImage" : - "c95af2f27647c12265d75e9df44c22ff5228c59855f54aaa70f41ec2842e3a4d" } - - self.common_tuxrun(csums=sums, - drive="virtio-blk-pci", - dt="versatile-pb.dtb") - - def test_armv7(self): - """ - :avocado: tags=arch:arm - :avocado: tags=cpu:cortex-a15 - :avocado: tags=machine:virt - :avocado: tags=tuxboot:armv7 - :avocado: tags=image:zImage - :avocado: tags=console:ttyAMA0 - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "ab1fbbeaddda1ffdd45c9405a28cd5370c20f23a7cbc809cc90dc9f243a8eb5a", - "zImage" : - "4c7a22e9f15875bec06bd2a29d822496571eb297d4f22694099ffcdb19077572" } - - self.common_tuxrun(csums=sums) - - def test_armv7be(self): - """ - :avocado: tags=arch:arm - :avocado: tags=cpu:cortex-a15 - :avocado: tags=endian:big - :avocado: tags=machine:virt - :avocado: tags=tuxboot:armv7be - :avocado: tags=image:zImage - :avocado: tags=console:ttyAMA0 - :avocado: tags=shutdown:nowait - """ - sums = {"rootfs.ext4.zst" : - "42ed46dd2d59986206c5b1f6cf35eab58fe3fd20c96b41aaa16b32f3f90a9835", - "zImage" : - "7facc62082b57af12015b08f7fdbaf2f123ba07a478367853ae12b219afc9f2f" } - - self.common_tuxrun(csums=sums) - - def test_i386(self): - """ - :avocado: tags=arch:i386 - :avocado: tags=cpu:coreduo - :avocado: tags=machine:q35 - :avocado: tags=tuxboot:i386 - :avocado: tags=image:bzImage - :avocado: tags=shutdown:nowait - """ - sums = {"bzImage" : - "a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956", - "rootfs.ext4.zst" : - "f15e66b2bf673a210ec2a4b2e744a80530b36289e04f5388aab812b97f69754a" } - - self.common_tuxrun(csums=sums, drive="virtio-blk-pci") - - def test_mips32(self): - """ - :avocado: tags=arch:mips - :avocado: tags=machine:malta - :avocado: tags=cpu:mips32r6-generic - :avocado: tags=endian:big - :avocado: tags=tuxboot:mips32 - :avocado: tags=image:vmlinux - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "fc3da0b4c2f38d74c6d705123bb0f633c76ed953128f9d0859378c328a6d11a0", - "vmlinux" : - "bfd2172f8b17fb32970ca0c8c58f59c5a4ca38aa5855d920be3a69b5d16e52f0" } - - self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") - - def test_mips32el(self): - """ - :avocado: tags=arch:mipsel - :avocado: tags=machine:malta - :avocado: tags=cpu:mips32r6-generic - :avocado: tags=tuxboot:mips32el - :avocado: tags=image:vmlinux - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "e799768e289fd69209c21f4dacffa11baea7543d5db101e8ce27e3bc2c41d90e", - "vmlinux" : - "8573867c68a8443db8de6d08bb33fb291c189ca2ca671471d3973a3e712096a3" } - - self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") - - def test_mips64(self): - """ - :avocado: tags=arch:mips64 - :avocado: tags=machine:malta - :avocado: tags=tuxboot:mips64 - :avocado: tags=endian:big - :avocado: tags=image:vmlinux - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "69d91eeb04df3d8d172922c6993bb37d4deeb6496def75d8580f6f9de3e431da", - "vmlinux" : - "09010e51e4b8bcbbd2494786ffb48eca78f228e96e5c5438344b0eac4029dc61" } - - self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") - - def test_mips64el(self): - """ - :avocado: tags=arch:mips64el - :avocado: tags=machine:malta - :avocado: tags=tuxboot:mips64el - :avocado: tags=image:vmlinux - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "fba585368f5915b1498ed081863474b2d7ec4e97cdd46d21bdcb2f9698f83de4", - "vmlinux" : - "d4e08965e2155c4cccce7c5f34d18fe34c636cda2f2c9844387d614950155266" } - - self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") - - def test_ppc32(self): - """ - :avocado: tags=arch:ppc - :avocado: tags=machine:ppce500 - :avocado: tags=cpu:e500mc - :avocado: tags=tuxboot:ppc32 - :avocado: tags=image:uImage - :avocado: tags=shutdown:nowait - """ - sums = { "rootfs.ext4.zst" : - "8885b9d999cc24d679542a02e9b6aaf48f718f2050ece6b8347074b6ee41dd09", - "uImage" : - "1a68f74b860fda022fb12e03c5efece8c2b8b590d96cca37a8481a3ae0b3f81f" } - - self.common_tuxrun(csums=sums, drive="virtio-blk-pci") - - def test_ppc64(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=cpu:POWER10 - :avocado: tags=endian:big - :avocado: tags=console:hvc0 - :avocado: tags=tuxboot:ppc64 - :avocado: tags=image:vmlinux - :avocado: tags=extradev:driver=spapr-vscsi - :avocado: tags=root:sda - """ - sums = { "rootfs.ext4.zst" : - "1d953e81a4379e537fc8e41e05a0a59d9b453eef97aa03d47866c6c45b00bdff", - "vmlinux" : - "f22a9b9e924174a4c199f4c7e5d91a2339fcfe51c6eafd0907dc3e09b64ab728" } - self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64_') - - def test_ppc64le(self): - """ - :avocado: tags=arch:ppc64 - :avocado: tags=machine:pseries - :avocado: tags=cpu:POWER10 - :avocado: tags=console:hvc0 - :avocado: tags=tuxboot:ppc64le - :avocado: tags=image:vmlinux - :avocado: tags=extradev:driver=spapr-vscsi - :avocado: tags=root:sda - """ - sums = { "rootfs.ext4.zst" : - "b442678c93fb8abe1f7d3bfa20556488de6b475c22c8fed363f42cf81a0a3906", - "vmlinux" : - "979eb61b445a010fb13e2b927126991f8ceef9c590fa2be0996c00e293e80cf2" } - self.ppc64_common_tuxrun(sums, prefix='tuxrun_ppc64le_') - - def test_riscv32(self): - """ - :avocado: tags=arch:riscv32 - :avocado: tags=machine:virt - :avocado: tags=tuxboot:riscv32 - """ - sums = { "Image" : - "89599407d7334de629a40e7ad6503c73670359eb5f5ae9d686353a3d6deccbd5", - "fw_jump.elf" : - "f2ef28a0b77826f79d085d3e4aa686f1159b315eff9099a37046b18936676985", - "rootfs.ext4.zst" : - "7168d296d0283238ea73cd5a775b3dd608e55e04c7b92b76ecce31bb13108cba" } - - self.common_tuxrun(csums=sums) - - def test_riscv64(self): - """ - :avocado: tags=arch:riscv64 - :avocado: tags=machine:virt - :avocado: tags=tuxboot:riscv64 - """ - sums = { "Image" : - "cd634badc65e52fb63465ec99e309c0de0369f0841b7d9486f9729e119bac25e", - "fw_jump.elf" : - "6e3373abcab4305fe151b564a4c71110d833c21f2c0a1753b7935459e36aedcf", - "rootfs.ext4.zst" : - "b18e3a3bdf27be03da0b285e84cb71bf09eca071c3a087b42884b6982ed679eb" } - - self.common_tuxrun(csums=sums) - - def test_riscv32_maxcpu(self): - """ - :avocado: tags=arch:riscv32 - :avocado: tags=machine:virt - :avocado: tags=cpu:max - :avocado: tags=tuxboot:riscv32 - """ - sums = { "Image" : - "89599407d7334de629a40e7ad6503c73670359eb5f5ae9d686353a3d6deccbd5", - "fw_jump.elf" : - "f2ef28a0b77826f79d085d3e4aa686f1159b315eff9099a37046b18936676985", - "rootfs.ext4.zst" : - "7168d296d0283238ea73cd5a775b3dd608e55e04c7b92b76ecce31bb13108cba" } - - self.common_tuxrun(csums=sums) - - def test_riscv64_maxcpu(self): - """ - :avocado: tags=arch:riscv64 - :avocado: tags=machine:virt - :avocado: tags=cpu:max - :avocado: tags=tuxboot:riscv64 - """ - sums = { "Image" : - "cd634badc65e52fb63465ec99e309c0de0369f0841b7d9486f9729e119bac25e", - "fw_jump.elf" : - "6e3373abcab4305fe151b564a4c71110d833c21f2c0a1753b7935459e36aedcf", - "rootfs.ext4.zst" : - "b18e3a3bdf27be03da0b285e84cb71bf09eca071c3a087b42884b6982ed679eb" } - - self.common_tuxrun(csums=sums) - - def test_s390(self): - """ - :avocado: tags=arch:s390x - :avocado: tags=endian:big - :avocado: tags=tuxboot:s390 - :avocado: tags=image:bzImage - :avocado: tags=shutdown:nowait - """ - sums = { "bzImage" : - "0414e98dd1c3dafff8496c9cd9c28a5f8d04553bb5ba37e906a812b48d442ef0", - "rootfs.ext4.zst" : - "88c37c32276677f873a25ab9ec6247895b8e3e6f8259134de2a616080b8ab3fc" } - - self.common_tuxrun(csums=sums, - drive="virtio-blk-ccw", - haltmsg="Requesting system halt") - - # Note: some segfaults caused by unaligned userspace access - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - def test_sh4(self): - """ - :avocado: tags=arch:sh4 - :avocado: tags=machine:r2d - :avocado: tags=cpu:sh7785 - :avocado: tags=tuxboot:sh4 - :avocado: tags=image:zImage - :avocado: tags=root:sda - :avocado: tags=console:ttySC1 - :avocado: tags=flaky - """ - sums = { "rootfs.ext4.zst" : - "3592a7a3d5a641e8b9821449e77bc43c9904a56c30d45da0694349cfd86743fd", - "zImage" : - "29d9b2aba604a0f53a5dc3b5d0f2b8e35d497de1129f8ee5139eb6fdf0db692f" } - - # The test is currently too unstable to do much in userspace - # so we skip common_tuxrun and do a minimal boot and shutdown. - (kernel, disk, dtb) = self.fetch_tuxrun_assets(csums=sums) - - # the console comes on the second serial port - self.prepare_run(kernel, disk, - "driver=ide-hd,bus=ide.0,unit=0", - console_index=1) - self.vm.launch() - - self.wait_for_console_pattern("Welcome to TuxTest") - time.sleep(0.1) - exec_command(self, 'root') - time.sleep(0.1) - exec_command_and_wait_for_pattern(self, 'halt', - "reboot: System halted") - - def test_sparc64(self): - """ - :avocado: tags=arch:sparc64 - :avocado: tags=tuxboot:sparc64 - :avocado: tags=image:vmlinux - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - - sums = { "rootfs.ext4.zst" : - "ad2f1dc436ab51583543d25d2c210cab478645d47078d30d129a66ab0e281d76", - "vmlinux" : - "e34313e4325ff21deaa3d38a502aa09a373ef62b9bd4d7f8f29388b688225c55" } - - self.common_tuxrun(csums=sums, drive="driver=ide-hd,bus=ide.0,unit=0") - - def test_x86_64(self): - """ - :avocado: tags=arch:x86_64 - :avocado: tags=machine:q35 - :avocado: tags=cpu:Nehalem - :avocado: tags=tuxboot:x86_64 - :avocado: tags=image:bzImage - :avocado: tags=root:sda - :avocado: tags=shutdown:nowait - """ - sums = { "bzImage" : - "2bc7480a669ee9b6b82500a236aba0c54233debe98cb968268fa230f52f03461", - "rootfs.ext4.zst" : - "b72ac729769b8f51c6dffb221113c9a063c774dbe1d66af30eb593c4e9999b4b" } - - self.common_tuxrun(csums=sums, - drive="driver=ide-hd,bus=ide.0,unit=0") diff --git a/tests/avocado/version.py b/tests/avocado/version.py deleted file mode 100644 index c6139568a16..00000000000 --- a/tests/avocado/version.py +++ /dev/null @@ -1,25 +0,0 @@ -# Version check example test -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - - -from avocado_qemu import QemuSystemTest - - -class Version(QemuSystemTest): - """ - :avocado: tags=quick - :avocado: tags=machine:none - """ - def test_qmp_human_info_version(self): - self.vm.add_args('-nodefaults') - self.vm.launch() - res = self.vm.cmd('human-monitor-command', - command_line='info version') - self.assertRegex(res, r'^(\d+\.\d+\.\d)') diff --git a/tests/avocado/virtio-gpu.py b/tests/avocado/virtio-gpu.py deleted file mode 100644 index 6091f614a4f..00000000000 --- a/tests/avocado/virtio-gpu.py +++ /dev/null @@ -1,157 +0,0 @@ -# virtio-gpu tests -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - - -from avocado_qemu import BUILD_DIR -from avocado_qemu import QemuSystemTest -from avocado_qemu import wait_for_console_pattern -from avocado_qemu import exec_command_and_wait_for_pattern -from avocado_qemu import is_readable_executable_file - -from qemu.utils import kvm_available - -import os -import socket -import subprocess - - -def pick_default_vug_bin(): - relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu" - if is_readable_executable_file(relative_path): - return relative_path - - bld_dir_path = os.path.join(BUILD_DIR, relative_path) - if is_readable_executable_file(bld_dir_path): - return bld_dir_path - - -class VirtioGPUx86(QemuSystemTest): - """ - :avocado: tags=virtio-gpu - :avocado: tags=arch:x86_64 - :avocado: tags=cpu:host - """ - - KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash" - KERNEL_URL = ( - "https://archives.fedoraproject.org/pub/archive/fedora" - "/linux/releases/33/Everything/x86_64/os/images" - "/pxeboot/vmlinuz" - ) - KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf' - INITRD_URL = ( - "https://archives.fedoraproject.org/pub/archive/fedora" - "/linux/releases/33/Everything/x86_64/os/images" - "/pxeboot/initrd.img" - ) - INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9' - - def wait_for_console_pattern(self, success_message, vm=None): - wait_for_console_pattern( - self, - success_message, - failure_message="Kernel panic - not syncing", - vm=vm, - ) - - def test_virtio_vga_virgl(self): - """ - :avocado: tags=device:virtio-vga-gl - """ - # FIXME: should check presence of virtio, virgl etc - self.require_accelerator('kvm') - - kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH) - initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH) - - self.vm.set_console() - self.vm.add_args("-m", "2G") - self.vm.add_args("-machine", "pc,accel=kvm") - self.vm.add_args("-device", "virtio-vga-gl") - self.vm.add_args("-display", "egl-headless") - self.vm.add_args( - "-kernel", - kernel_path, - "-initrd", - initrd_path, - "-append", - self.KERNEL_COMMAND_LINE, - ) - try: - self.vm.launch() - except: - # TODO: probably fails because we are missing the VirGL features - self.cancel("VirGL not enabled?") - - self.wait_for_console_pattern("as init process") - exec_command_and_wait_for_pattern( - self, "/usr/sbin/modprobe virtio_gpu", "" - ) - self.wait_for_console_pattern("features: +virgl +edid") - - def test_vhost_user_vga_virgl(self): - """ - :avocado: tags=device:vhost-user-vga - """ - # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc - self.require_accelerator('kvm') - - vug = pick_default_vug_bin() - if not vug: - self.cancel("Could not find vhost-user-gpu") - - kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH) - initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH) - - # Create socketpair to connect proxy and remote processes - qemu_sock, vug_sock = socket.socketpair( - socket.AF_UNIX, socket.SOCK_STREAM - ) - os.set_inheritable(qemu_sock.fileno(), True) - os.set_inheritable(vug_sock.fileno(), True) - - self._vug_log_path = os.path.join( - self.logdir, "vhost-user-gpu.log" - ) - self._vug_log_file = open(self._vug_log_path, "wb") - self.log.info('Complete vhost-user-gpu.log file can be ' - 'found at %s', self._vug_log_path) - - vugp = subprocess.Popen( - [vug, "--virgl", "--fd=%d" % vug_sock.fileno()], - stdin=subprocess.DEVNULL, - stdout=self._vug_log_file, - stderr=subprocess.STDOUT, - shell=False, - close_fds=False, - ) - - self.vm.set_console() - self.vm.add_args("-m", "2G") - self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G") - self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm") - self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno()) - self.vm.add_args("-device", "vhost-user-vga,chardev=vug") - self.vm.add_args("-display", "egl-headless") - self.vm.add_args( - "-kernel", - kernel_path, - "-initrd", - initrd_path, - "-append", - self.KERNEL_COMMAND_LINE, - ) - try: - self.vm.launch() - except: - # TODO: probably fails because we are missing the VirGL features - self.cancel("VirGL not enabled?") - self.wait_for_console_pattern("as init process") - exec_command_and_wait_for_pattern(self, "/usr/sbin/modprobe virtio_gpu", - "features: +virgl +edid") - self.vm.shutdown() - qemu_sock.close() - vugp.terminate() - vugp.wait() diff --git a/tests/avocado/virtio_version.py b/tests/avocado/virtio_version.py deleted file mode 100644 index afe5e828b55..00000000000 --- a/tests/avocado/virtio_version.py +++ /dev/null @@ -1,175 +0,0 @@ -""" -Check compatibility of virtio device types -""" -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Eduardo Habkost -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. -import sys -import os - -from qemu.machine import QEMUMachine -from avocado_qemu import QemuSystemTest - -# Virtio Device IDs: -VIRTIO_NET = 1 -VIRTIO_BLOCK = 2 -VIRTIO_CONSOLE = 3 -VIRTIO_RNG = 4 -VIRTIO_BALLOON = 5 -VIRTIO_RPMSG = 7 -VIRTIO_SCSI = 8 -VIRTIO_9P = 9 -VIRTIO_RPROC_SERIAL = 11 -VIRTIO_CAIF = 12 -VIRTIO_GPU = 16 -VIRTIO_INPUT = 18 -VIRTIO_VSOCK = 19 -VIRTIO_CRYPTO = 20 - -PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4 - -# Device IDs for legacy/transitional devices: -PCI_LEGACY_DEVICE_IDS = { - VIRTIO_NET: 0x1000, - VIRTIO_BLOCK: 0x1001, - VIRTIO_BALLOON: 0x1002, - VIRTIO_CONSOLE: 0x1003, - VIRTIO_SCSI: 0x1004, - VIRTIO_RNG: 0x1005, - VIRTIO_9P: 0x1009, - VIRTIO_VSOCK: 0x1012, -} - -def pci_modern_device_id(virtio_devid): - return virtio_devid + 0x1040 - -def devtype_implements(vm, devtype, implements): - return devtype in [d['name'] for d in - vm.cmd('qom-list-types', implements=implements)] - -def get_pci_interfaces(vm, devtype): - interfaces = ('pci-express-device', 'conventional-pci-device') - return [i for i in interfaces if devtype_implements(vm, devtype, i)] - -class VirtioVersionCheck(QemuSystemTest): - """ - Check if virtio-version-specific device types result in the - same device tree created by `disable-modern` and - `disable-legacy`. - - :avocado: tags=arch:x86_64 - """ - - # just in case there are failures, show larger diff: - maxDiff = 4096 - - def run_device(self, devtype, opts=None, machine='pc'): - """ - Run QEMU with `-device DEVTYPE`, return device info from `query-pci` - """ - with QEMUMachine(self.qemu_bin) as vm: - vm.set_machine(machine) - if opts: - devtype += ',' + opts - vm.add_args('-device', '%s,id=devfortest' % (devtype)) - vm.add_args('-S') - vm.launch() - - pcibuses = vm.cmd('query-pci') - alldevs = [dev for bus in pcibuses for dev in bus['devices']] - devfortest = [dev for dev in alldevs - if dev['qdev_id'] == 'devfortest'] - return devfortest[0], get_pci_interfaces(vm, devtype) - - - def assert_devids(self, dev, devid, non_transitional=False): - self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET) - self.assertEqual(dev['id']['device'], devid) - if non_transitional: - self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f) - self.assertGreaterEqual(dev['id']['subsystem'], 0x40) - - def check_all_variants(self, qemu_devtype, virtio_devid): - """Check if a virtio device type and its variants behave as expected""" - # Force modern mode: - dev_modern, _ = self.run_device(qemu_devtype, - 'disable-modern=off,disable-legacy=on') - self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), - non_transitional=True) - - # -non-transitional device types should be 100% equivalent to - # ,disable-modern=off,disable-legacy=on - dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype)) - self.assertEqual(dev_modern, dev_1_0) - - # Force transitional mode: - dev_trans, _ = self.run_device(qemu_devtype, - 'disable-modern=off,disable-legacy=off') - self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid]) - - # Force legacy mode: - dev_legacy, _ = self.run_device(qemu_devtype, - 'disable-modern=on,disable-legacy=off') - self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid]) - - # No options: default to transitional on PC machine-type: - no_opts_pc, generic_ifaces = self.run_device(qemu_devtype) - self.assertEqual(dev_trans, no_opts_pc) - - #TODO: check if plugging on a PCI Express bus will make the - # device non-transitional - #no_opts_q35 = self.run_device(qemu_devtype, machine='q35') - #self.assertEqual(dev_modern, no_opts_q35) - - # -transitional device types should be 100% equivalent to - # ,disable-modern=off,disable-legacy=off - dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype)) - self.assertEqual(dev_trans, dev_trans) - - # ensure the interface information is correct: - self.assertIn('conventional-pci-device', generic_ifaces) - self.assertIn('pci-express-device', generic_ifaces) - - self.assertIn('conventional-pci-device', nt_ifaces) - self.assertIn('pci-express-device', nt_ifaces) - - self.assertIn('conventional-pci-device', trans_ifaces) - self.assertNotIn('pci-express-device', trans_ifaces) - - - def test_conventional_devs(self): - self.check_all_variants('virtio-net-pci', VIRTIO_NET) - # virtio-blk requires 'driver' parameter - #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK) - self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE) - self.check_all_variants('virtio-rng-pci', VIRTIO_RNG) - self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON) - self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI) - # virtio-9p requires 'fsdev' parameter - #self.check_all_variants('virtio-9p-pci', VIRTIO_9P) - - def check_modern_only(self, qemu_devtype, virtio_devid): - """Check if a modern-only virtio device type behaves as expected""" - # Force modern mode: - dev_modern, _ = self.run_device(qemu_devtype, - 'disable-modern=off,disable-legacy=on') - self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), - non_transitional=True) - - # No options: should be modern anyway - dev_no_opts, ifaces = self.run_device(qemu_devtype) - self.assertEqual(dev_modern, dev_no_opts) - - self.assertIn('conventional-pci-device', ifaces) - self.assertIn('pci-express-device', ifaces) - - def test_modern_only_devs(self): - self.check_modern_only('virtio-vga', VIRTIO_GPU) - self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU) - self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT) - self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT) - self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT) diff --git a/tests/avocado/vnc.py b/tests/avocado/vnc.py deleted file mode 100644 index 862c8996a83..00000000000 --- a/tests/avocado/vnc.py +++ /dev/null @@ -1,115 +0,0 @@ -# Simple functional tests for VNC functionality -# -# Copyright (c) 2018 Red Hat, Inc. -# -# Author: -# Cleber Rosa -# -# This work is licensed under the terms of the GNU GPL, version 2 or -# later. See the COPYING file in the top-level directory. - -import socket -from typing import List - -from avocado_qemu import QemuSystemTest - - -VNC_ADDR = '127.0.0.1' -VNC_PORT_START = 32768 -VNC_PORT_END = VNC_PORT_START + 1024 - - -def check_bind(port: int) -> bool: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: - try: - sock.bind((VNC_ADDR, port)) - except OSError: - return False - - return True - - -def check_connect(port: int) -> bool: - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: - try: - sock.connect((VNC_ADDR, port)) - except ConnectionRefusedError: - return False - - return True - - -def find_free_ports(count: int) -> List[int]: - result = [] - for port in range(VNC_PORT_START, VNC_PORT_END): - if check_bind(port): - result.append(port) - if len(result) >= count: - break - assert len(result) == count - return result - - -class Vnc(QemuSystemTest): - """ - :avocado: tags=vnc,quick - :avocado: tags=machine:none - """ - def test_no_vnc(self): - self.vm.add_args('-nodefaults', '-S') - self.vm.launch() - self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled']) - - def test_no_vnc_change_password(self): - self.vm.add_args('-nodefaults', '-S') - self.vm.launch() - self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled']) - set_password_response = self.vm.qmp('change-vnc-password', - password='new_password') - self.assertIn('error', set_password_response) - self.assertEqual(set_password_response['error']['class'], - 'GenericError') - self.assertEqual(set_password_response['error']['desc'], - 'Could not set password') - - def test_change_password_requires_a_password(self): - self.vm.add_args('-nodefaults', '-S', '-vnc', ':0') - self.vm.launch() - self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) - set_password_response = self.vm.qmp('change-vnc-password', - password='new_password') - self.assertIn('error', set_password_response) - self.assertEqual(set_password_response['error']['class'], - 'GenericError') - self.assertEqual(set_password_response['error']['desc'], - 'Could not set password') - - def test_change_password(self): - self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password=on') - self.vm.launch() - self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) - self.vm.cmd('change-vnc-password', - password='new_password') - - def test_change_listen(self): - a, b, c = find_free_ports(3) - self.assertFalse(check_connect(a)) - self.assertFalse(check_connect(b)) - self.assertFalse(check_connect(c)) - - self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}') - self.vm.launch() - self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a)) - self.assertTrue(check_connect(a)) - self.assertFalse(check_connect(b)) - self.assertFalse(check_connect(c)) - - self.vm.cmd('display-update', type='vnc', - addresses=[{'type': 'inet', 'host': VNC_ADDR, - 'port': str(b)}, - {'type': 'inet', 'host': VNC_ADDR, - 'port': str(c)}]) - self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b)) - self.assertFalse(check_connect(a)) - self.assertTrue(check_connect(b)) - self.assertTrue(check_connect(c)) diff --git a/tests/avocado/x86_cpu_model_versions.py b/tests/avocado/x86_cpu_model_versions.py deleted file mode 100644 index 11101e02b96..00000000000 --- a/tests/avocado/x86_cpu_model_versions.py +++ /dev/null @@ -1,362 +0,0 @@ -# -# Basic validation of x86 versioned CPU models and CPU model aliases -# -# Copyright (c) 2019 Red Hat Inc -# -# Author: -# Eduardo Habkost -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, see . -# - - -import avocado_qemu -import re - -class X86CPUModelAliases(avocado_qemu.QemuSystemTest): - """ - Validation of PC CPU model versions and CPU model aliases - - :avocado: tags=arch:x86_64 - """ - def validate_aliases(self, cpus): - for c in cpus.values(): - if 'alias-of' in c: - # all aliases must point to a valid CPU model name: - self.assertIn(c['alias-of'], cpus, - '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of'])) - # aliases must not point to aliases - self.assertNotIn('alias-of', cpus[c['alias-of']], - '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of'])) - - # aliases must not be static - self.assertFalse(c['static']) - - def validate_variant_aliases(self, cpus): - # -noTSX, -IBRS and -IBPB variants of CPU models are special: - # they shouldn't have their own versions: - self.assertNotIn("Haswell-noTSX-v1", cpus, - "Haswell-noTSX shouldn't be versioned") - self.assertNotIn("Broadwell-noTSX-v1", cpus, - "Broadwell-noTSX shouldn't be versioned") - self.assertNotIn("Nehalem-IBRS-v1", cpus, - "Nehalem-IBRS shouldn't be versioned") - self.assertNotIn("Westmere-IBRS-v1", cpus, - "Westmere-IBRS shouldn't be versioned") - self.assertNotIn("SandyBridge-IBRS-v1", cpus, - "SandyBridge-IBRS shouldn't be versioned") - self.assertNotIn("IvyBridge-IBRS-v1", cpus, - "IvyBridge-IBRS shouldn't be versioned") - self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus, - "Haswell-noTSX-IBRS shouldn't be versioned") - self.assertNotIn("Haswell-IBRS-v1", cpus, - "Haswell-IBRS shouldn't be versioned") - self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus, - "Broadwell-noTSX-IBRS shouldn't be versioned") - self.assertNotIn("Broadwell-IBRS-v1", cpus, - "Broadwell-IBRS shouldn't be versioned") - self.assertNotIn("Skylake-Client-IBRS-v1", cpus, - "Skylake-Client-IBRS shouldn't be versioned") - self.assertNotIn("Skylake-Server-IBRS-v1", cpus, - "Skylake-Server-IBRS shouldn't be versioned") - self.assertNotIn("EPYC-IBPB-v1", cpus, - "EPYC-IBPB shouldn't be versioned") - - def test_4_0_alias_compatibility(self): - """ - Check if pc-*-4.0 unversioned CPU model won't be reported as aliases - - :avocado: tags=machine:pc-i440fx-4.0 - """ - # pc-*-4.0 won't expose non-versioned CPU models as aliases - # We do this to help management software to keep compatibility - # with older QEMU versions that didn't have the versioned CPU model - self.vm.add_args('-S') - self.vm.launch() - cpus = dict((m['name'], m) for m in - self.vm.cmd('query-cpu-definitions')) - - self.assertFalse(cpus['Cascadelake-Server']['static'], - 'unversioned Cascadelake-Server CPU model must not be static') - self.assertNotIn('alias-of', cpus['Cascadelake-Server'], - 'Cascadelake-Server must not be an alias') - self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], - 'Cascadelake-Server-v1 must not be an alias') - - self.assertFalse(cpus['qemu64']['static'], - 'unversioned qemu64 CPU model must not be static') - self.assertNotIn('alias-of', cpus['qemu64'], - 'qemu64 must not be an alias') - self.assertNotIn('alias-of', cpus['qemu64-v1'], - 'qemu64-v1 must not be an alias') - - self.validate_variant_aliases(cpus) - - # On pc-*-4.0, no CPU model should be reported as an alias: - for name,c in cpus.items(): - self.assertNotIn('alias-of', c, "%s shouldn't be an alias" % (name)) - - def test_4_1_alias(self): - """ - Check if unversioned CPU model is an alias pointing to right version - - :avocado: tags=machine:pc-i440fx-4.1 - """ - self.vm.add_args('-S') - self.vm.launch() - - cpus = dict((m['name'], m) for m in - self.vm.cmd('query-cpu-definitions')) - - self.assertFalse(cpus['Cascadelake-Server']['static'], - 'unversioned Cascadelake-Server CPU model must not be static') - self.assertEqual(cpus['Cascadelake-Server'].get('alias-of'), - 'Cascadelake-Server-v1', - 'Cascadelake-Server must be an alias of Cascadelake-Server-v1') - self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], - 'Cascadelake-Server-v1 must not be an alias') - - self.assertFalse(cpus['qemu64']['static'], - 'unversioned qemu64 CPU model must not be static') - self.assertEqual(cpus['qemu64'].get('alias-of'), 'qemu64-v1', - 'qemu64 must be an alias of qemu64-v1') - self.assertNotIn('alias-of', cpus['qemu64-v1'], - 'qemu64-v1 must not be an alias') - - self.validate_variant_aliases(cpus) - - # On pc-*-4.1, -noTSX and -IBRS models should be aliases: - self.assertEqual(cpus["Haswell"].get('alias-of'), - "Haswell-v1", - "Haswell must be an alias") - self.assertEqual(cpus["Haswell-noTSX"].get('alias-of'), - "Haswell-v2", - "Haswell-noTSX must be an alias") - self.assertEqual(cpus["Haswell-IBRS"].get('alias-of'), - "Haswell-v3", - "Haswell-IBRS must be an alias") - self.assertEqual(cpus["Haswell-noTSX-IBRS"].get('alias-of'), - "Haswell-v4", - "Haswell-noTSX-IBRS must be an alias") - - self.assertEqual(cpus["Broadwell"].get('alias-of'), - "Broadwell-v1", - "Broadwell must be an alias") - self.assertEqual(cpus["Broadwell-noTSX"].get('alias-of'), - "Broadwell-v2", - "Broadwell-noTSX must be an alias") - self.assertEqual(cpus["Broadwell-IBRS"].get('alias-of'), - "Broadwell-v3", - "Broadwell-IBRS must be an alias") - self.assertEqual(cpus["Broadwell-noTSX-IBRS"].get('alias-of'), - "Broadwell-v4", - "Broadwell-noTSX-IBRS must be an alias") - - self.assertEqual(cpus["Nehalem"].get('alias-of'), - "Nehalem-v1", - "Nehalem must be an alias") - self.assertEqual(cpus["Nehalem-IBRS"].get('alias-of'), - "Nehalem-v2", - "Nehalem-IBRS must be an alias") - - self.assertEqual(cpus["Westmere"].get('alias-of'), - "Westmere-v1", - "Westmere must be an alias") - self.assertEqual(cpus["Westmere-IBRS"].get('alias-of'), - "Westmere-v2", - "Westmere-IBRS must be an alias") - - self.assertEqual(cpus["SandyBridge"].get('alias-of'), - "SandyBridge-v1", - "SandyBridge must be an alias") - self.assertEqual(cpus["SandyBridge-IBRS"].get('alias-of'), - "SandyBridge-v2", - "SandyBridge-IBRS must be an alias") - - self.assertEqual(cpus["IvyBridge"].get('alias-of'), - "IvyBridge-v1", - "IvyBridge must be an alias") - self.assertEqual(cpus["IvyBridge-IBRS"].get('alias-of'), - "IvyBridge-v2", - "IvyBridge-IBRS must be an alias") - - self.assertEqual(cpus["Skylake-Client"].get('alias-of'), - "Skylake-Client-v1", - "Skylake-Client must be an alias") - self.assertEqual(cpus["Skylake-Client-IBRS"].get('alias-of'), - "Skylake-Client-v2", - "Skylake-Client-IBRS must be an alias") - - self.assertEqual(cpus["Skylake-Server"].get('alias-of'), - "Skylake-Server-v1", - "Skylake-Server must be an alias") - self.assertEqual(cpus["Skylake-Server-IBRS"].get('alias-of'), - "Skylake-Server-v2", - "Skylake-Server-IBRS must be an alias") - - self.assertEqual(cpus["EPYC"].get('alias-of'), - "EPYC-v1", - "EPYC must be an alias") - self.assertEqual(cpus["EPYC-IBPB"].get('alias-of'), - "EPYC-v2", - "EPYC-IBPB must be an alias") - - self.validate_aliases(cpus) - - def test_none_alias(self): - """ - Check if unversioned CPU model is an alias pointing to some version - - :avocado: tags=machine:none - """ - self.vm.add_args('-S') - self.vm.launch() - - cpus = dict((m['name'], m) for m in - self.vm.cmd('query-cpu-definitions')) - - self.assertFalse(cpus['Cascadelake-Server']['static'], - 'unversioned Cascadelake-Server CPU model must not be static') - self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']), - 'Cascadelake-Server must be an alias of versioned CPU model') - self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], - 'Cascadelake-Server-v1 must not be an alias') - - self.assertFalse(cpus['qemu64']['static'], - 'unversioned qemu64 CPU model must not be static') - self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']), - 'qemu64 must be an alias of versioned CPU model') - self.assertNotIn('alias-of', cpus['qemu64-v1'], - 'qemu64-v1 must not be an alias') - - self.validate_aliases(cpus) - - -class CascadelakeArchCapabilities(avocado_qemu.QemuSystemTest): - """ - Validation of Cascadelake arch-capabilities - - :avocado: tags=arch:x86_64 - """ - def get_cpu_prop(self, prop): - cpu_path = self.vm.cmd('query-cpus-fast')[0].get('qom-path') - return self.vm.cmd('qom-get', path=cpu_path, property=prop) - - def test_4_1(self): - """ - :avocado: tags=machine:pc-i440fx-4.1 - :avocado: tags=cpu:Cascadelake-Server - """ - # machine-type only: - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server,x-force-features=on,check=off,' - 'enforce=off') - self.vm.launch() - self.assertFalse(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.1 + Cascadelake-Server should not have arch-capabilities') - - def test_4_0(self): - """ - :avocado: tags=machine:pc-i440fx-4.0 - :avocado: tags=cpu:Cascadelake-Server - """ - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server,x-force-features=on,check=off,' - 'enforce=off') - self.vm.launch() - self.assertFalse(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.0 + Cascadelake-Server should not have arch-capabilities') - - def test_set_4_0(self): - """ - :avocado: tags=machine:pc-i440fx-4.0 - :avocado: tags=cpu:Cascadelake-Server - """ - # command line must override machine-type if CPU model is not versioned: - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server,x-force-features=on,check=off,' - 'enforce=off,+arch-capabilities') - self.vm.launch() - self.assertTrue(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.0 + Cascadelake-Server,+arch-capabilities should have arch-capabilities') - - def test_unset_4_1(self): - """ - :avocado: tags=machine:pc-i440fx-4.1 - :avocado: tags=cpu:Cascadelake-Server - """ - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server,x-force-features=on,check=off,' - 'enforce=off,-arch-capabilities') - self.vm.launch() - self.assertFalse(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.1 + Cascadelake-Server,-arch-capabilities should not have arch-capabilities') - - def test_v1_4_0(self): - """ - :avocado: tags=machine:pc-i440fx-4.0 - :avocado: tags=cpu:Cascadelake-Server - """ - # versioned CPU model overrides machine-type: - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server-v1,x-force-features=on,check=off,' - 'enforce=off') - self.vm.launch() - self.assertFalse(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.0 + Cascadelake-Server-v1 should not have arch-capabilities') - - def test_v2_4_0(self): - """ - :avocado: tags=machine:pc-i440fx-4.0 - :avocado: tags=cpu:Cascadelake-Server - """ - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server-v2,x-force-features=on,check=off,' - 'enforce=off') - self.vm.launch() - self.assertTrue(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.0 + Cascadelake-Server-v2 should have arch-capabilities') - - def test_v1_set_4_0(self): - """ - :avocado: tags=machine:pc-i440fx-4.0 - :avocado: tags=cpu:Cascadelake-Server - """ - # command line must override machine-type and versioned CPU model: - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server-v1,x-force-features=on,check=off,' - 'enforce=off,+arch-capabilities') - self.vm.launch() - self.assertTrue(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.0 + Cascadelake-Server-v1,+arch-capabilities should have arch-capabilities') - - def test_v2_unset_4_1(self): - """ - :avocado: tags=machine:pc-i440fx-4.1 - :avocado: tags=cpu:Cascadelake-Server - """ - self.vm.add_args('-S') - self.set_vm_arg('-cpu', - 'Cascadelake-Server-v2,x-force-features=on,check=off,' - 'enforce=off,-arch-capabilities') - self.vm.launch() - self.assertFalse(self.get_cpu_prop('arch-capabilities'), - 'pc-i440fx-4.1 + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities') diff --git a/tests/bench/benchmark-crypto-akcipher.c b/tests/bench/benchmark-crypto-akcipher.c index 5e68cb0a1c4..0a6e5db1d6c 100644 --- a/tests/bench/benchmark-crypto-akcipher.c +++ b/tests/bench/benchmark-crypto-akcipher.c @@ -16,19 +16,19 @@ #include "crypto/akcipher.h" #include "standard-headers/linux/virtio_crypto.h" -#include "test_akcipher_keys.inc" +#include "test_akcipher_keys.c.inc" static QCryptoAkCipher *create_rsa_akcipher(const uint8_t *priv_key, size_t keylen, - QCryptoRSAPaddingAlgorithm padding, - QCryptoHashAlgorithm hash) + QCryptoRSAPaddingAlgo padding, + QCryptoHashAlgo hash) { QCryptoAkCipherOptions opt; - opt.alg = QCRYPTO_AKCIPHER_ALG_RSA; + opt.alg = QCRYPTO_AK_CIPHER_ALGO_RSA; opt.u.rsa.padding_alg = padding; opt.u.rsa.hash_alg = hash; - return qcrypto_akcipher_new(&opt, QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + return qcrypto_akcipher_new(&opt, QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, priv_key, keylen, &error_abort); } @@ -39,8 +39,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, #define SHA1_DGST_LEN 20 #define SIGN_TIMES 10000 #define VERIFY_TIMES 100000 -#define PADDING QCRYPTO_RSA_PADDING_ALG_PKCS1 -#define HASH QCRYPTO_HASH_ALG_SHA1 +#define PADDING QCRYPTO_RSA_PADDING_ALGO_PKCS1 +#define HASH QCRYPTO_HASH_ALGO_SHA1 g_autoptr(QCryptoAkCipher) rsa = create_rsa_akcipher(priv_key, keylen, PADDING, HASH); @@ -53,8 +53,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, signature = g_new0(uint8_t, key_size / BYTE); g_test_message("benchmark rsa%zu (%s-%s) sign...", key_size, - QCryptoRSAPaddingAlgorithm_str(PADDING), - QCryptoHashAlgorithm_str(HASH)); + QCryptoRSAPaddingAlgo_str(PADDING), + QCryptoHashAlgo_str(HASH)); g_test_timer_start(); for (count = 0; count < SIGN_TIMES; ++count) { g_assert(qcrypto_akcipher_sign(rsa, dgst, SHA1_DGST_LEN, @@ -64,14 +64,14 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, g_test_timer_elapsed(); g_test_message("rsa%zu (%s-%s) sign %zu times in %.2f seconds," " %.2f times/sec ", - key_size, QCryptoRSAPaddingAlgorithm_str(PADDING), - QCryptoHashAlgorithm_str(HASH), + key_size, QCryptoRSAPaddingAlgo_str(PADDING), + QCryptoHashAlgo_str(HASH), count, g_test_timer_last(), (double)count / g_test_timer_last()); g_test_message("benchmark rsa%zu (%s-%s) verification...", key_size, - QCryptoRSAPaddingAlgorithm_str(PADDING), - QCryptoHashAlgorithm_str(HASH)); + QCryptoRSAPaddingAlgo_str(PADDING), + QCryptoHashAlgo_str(HASH)); g_test_timer_start(); for (count = 0; count < VERIFY_TIMES; ++count) { g_assert(qcrypto_akcipher_verify(rsa, signature, key_size / BYTE, @@ -81,8 +81,8 @@ static void test_rsa_speed(const uint8_t *priv_key, size_t keylen, g_test_timer_elapsed(); g_test_message("rsa%zu (%s-%s) verify %zu times in %.2f seconds," " %.2f times/sec ", - key_size, QCryptoRSAPaddingAlgorithm_str(PADDING), - QCryptoHashAlgorithm_str(HASH), + key_size, QCryptoRSAPaddingAlgo_str(PADDING), + QCryptoHashAlgo_str(HASH), count, g_test_timer_last(), (double)count / g_test_timer_last()); } diff --git a/tests/bench/benchmark-crypto-cipher.c b/tests/bench/benchmark-crypto-cipher.c index c04f0a0fba3..889a29ba5c2 100644 --- a/tests/bench/benchmark-crypto-cipher.c +++ b/tests/bench/benchmark-crypto-cipher.c @@ -17,7 +17,7 @@ static void test_cipher_speed(size_t chunk_size, QCryptoCipherMode mode, - QCryptoCipherAlgorithm alg) + QCryptoCipherAlgo alg) { QCryptoCipher *cipher; Error *err = NULL; @@ -71,7 +71,7 @@ static void test_cipher_speed(size_t chunk_size, g_test_timer_elapsed(); g_test_message("enc(%s-%s) chunk %zu bytes %.2f MB/sec ", - QCryptoCipherAlgorithm_str(alg), + QCryptoCipherAlgo_str(alg), QCryptoCipherMode_str(mode), chunk_size, (double)total / MiB / g_test_timer_last()); @@ -88,7 +88,7 @@ static void test_cipher_speed(size_t chunk_size, g_test_timer_elapsed(); g_test_message("dec(%s-%s) chunk %zu bytes %.2f MB/sec ", - QCryptoCipherAlgorithm_str(alg), + QCryptoCipherAlgo_str(alg), QCryptoCipherMode_str(mode), chunk_size, (double)total / MiB / g_test_timer_last()); @@ -105,7 +105,7 @@ static void test_cipher_speed_ecb_aes_128(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_ECB, - QCRYPTO_CIPHER_ALG_AES_128); + QCRYPTO_CIPHER_ALGO_AES_128); } static void test_cipher_speed_ecb_aes_256(const void *opaque) @@ -113,7 +113,7 @@ static void test_cipher_speed_ecb_aes_256(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_ECB, - QCRYPTO_CIPHER_ALG_AES_256); + QCRYPTO_CIPHER_ALGO_AES_256); } static void test_cipher_speed_cbc_aes_128(const void *opaque) @@ -121,7 +121,7 @@ static void test_cipher_speed_cbc_aes_128(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_CBC, - QCRYPTO_CIPHER_ALG_AES_128); + QCRYPTO_CIPHER_ALGO_AES_128); } static void test_cipher_speed_cbc_aes_256(const void *opaque) @@ -129,7 +129,7 @@ static void test_cipher_speed_cbc_aes_256(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_CBC, - QCRYPTO_CIPHER_ALG_AES_256); + QCRYPTO_CIPHER_ALGO_AES_256); } static void test_cipher_speed_ctr_aes_128(const void *opaque) @@ -137,7 +137,7 @@ static void test_cipher_speed_ctr_aes_128(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_CTR, - QCRYPTO_CIPHER_ALG_AES_128); + QCRYPTO_CIPHER_ALGO_AES_128); } static void test_cipher_speed_ctr_aes_256(const void *opaque) @@ -145,7 +145,7 @@ static void test_cipher_speed_ctr_aes_256(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_CTR, - QCRYPTO_CIPHER_ALG_AES_256); + QCRYPTO_CIPHER_ALGO_AES_256); } static void test_cipher_speed_xts_aes_128(const void *opaque) @@ -153,7 +153,7 @@ static void test_cipher_speed_xts_aes_128(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_XTS, - QCRYPTO_CIPHER_ALG_AES_128); + QCRYPTO_CIPHER_ALGO_AES_128); } static void test_cipher_speed_xts_aes_256(const void *opaque) @@ -161,7 +161,7 @@ static void test_cipher_speed_xts_aes_256(const void *opaque) size_t chunk_size = (size_t)opaque; test_cipher_speed(chunk_size, QCRYPTO_CIPHER_MODE_XTS, - QCRYPTO_CIPHER_ALG_AES_256); + QCRYPTO_CIPHER_ALGO_AES_256); } diff --git a/tests/bench/benchmark-crypto-hash.c b/tests/bench/benchmark-crypto-hash.c index 927b00bb4d6..252098a69d2 100644 --- a/tests/bench/benchmark-crypto-hash.c +++ b/tests/bench/benchmark-crypto-hash.c @@ -17,7 +17,7 @@ typedef struct QCryptoHashOpts { size_t chunk_size; - QCryptoHashAlgorithm alg; + QCryptoHashAlgo alg; } QCryptoHashOpts; static void test_hash_speed(const void *opaque) @@ -49,7 +49,7 @@ static void test_hash_speed(const void *opaque) g_test_timer_elapsed(); g_test_message("hash(%s): chunk %zu bytes %.2f MB/sec", - QCryptoHashAlgorithm_str(opts->alg), + QCryptoHashAlgo_str(opts->alg), opts->chunk_size, total / g_test_timer_last()); g_free(out); @@ -65,14 +65,14 @@ int main(int argc, char **argv) #define TEST_ONE(a, c) \ QCryptoHashOpts opts ## a ## c = { \ - .alg = QCRYPTO_HASH_ALG_ ## a, .chunk_size = c, \ + .alg = QCRYPTO_HASH_ALGO_ ## a, .chunk_size = c, \ }; \ memset(name, 0 , sizeof(name)); \ snprintf(name, sizeof(name), \ "/crypto/benchmark/hash/%s/bufsize-%d", \ - QCryptoHashAlgorithm_str(QCRYPTO_HASH_ALG_ ## a), \ + QCryptoHashAlgo_str(QCRYPTO_HASH_ALGO_ ## a), \ c); \ - if (qcrypto_hash_supports(QCRYPTO_HASH_ALG_ ## a)) \ + if (qcrypto_hash_supports(QCRYPTO_HASH_ALGO_ ## a)) \ g_test_add_data_func(name, \ &opts ## a ## c, \ test_hash_speed); diff --git a/tests/bench/benchmark-crypto-hmac.c b/tests/bench/benchmark-crypto-hmac.c index 5cca6367897..d51de98f472 100644 --- a/tests/bench/benchmark-crypto-hmac.c +++ b/tests/bench/benchmark-crypto-hmac.c @@ -28,7 +28,7 @@ static void test_hmac_speed(const void *opaque) Error *err = NULL; int ret; - if (!qcrypto_hmac_supports(QCRYPTO_HASH_ALG_SHA256)) { + if (!qcrypto_hmac_supports(QCRYPTO_HASH_ALGO_SHA256)) { return; } @@ -40,7 +40,7 @@ static void test_hmac_speed(const void *opaque) g_test_timer_start(); do { - hmac = qcrypto_hmac_new(QCRYPTO_HASH_ALG_SHA256, + hmac = qcrypto_hmac_new(QCRYPTO_HASH_ALGO_SHA256, (const uint8_t *)KEY, strlen(KEY), &err); g_assert(err == NULL); g_assert(hmac != NULL); @@ -56,7 +56,7 @@ static void test_hmac_speed(const void *opaque) total /= MiB; g_test_message("hmac(%s): chunk %zu bytes %.2f MB/sec", - QCryptoHashAlgorithm_str(QCRYPTO_HASH_ALG_SHA256), + QCryptoHashAlgo_str(QCRYPTO_HASH_ALGO_SHA256), chunk_size, total / g_test_timer_last()); g_free(out); diff --git a/tests/bench/test_akcipher_keys.inc b/tests/bench/test_akcipher_keys.c.inc similarity index 100% rename from tests/bench/test_akcipher_keys.inc rename to tests/bench/test_akcipher_keys.c.inc diff --git a/tests/data/acpi/aarch64/virt/APIC.its_off b/tests/data/acpi/aarch64/virt/APIC.its_off new file mode 100644 index 00000000000..6130cb7d071 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/APIC.its_off differ diff --git a/tests/data/acpi/aarch64/virt/DSDT b/tests/data/acpi/aarch64/virt/DSDT index 36d3e5d5a5e..18d97e8f229 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT and b/tests/data/acpi/aarch64/virt/DSDT differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt index e6154d0355f..2cef095bcc1 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt and b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.acpipcihp b/tests/data/acpi/aarch64/virt/DSDT.acpipcihp new file mode 100644 index 00000000000..8d55a877a40 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.acpipcihp differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex b/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex new file mode 100644 index 00000000000..970d43f68bc Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.memhp b/tests/data/acpi/aarch64/virt/DSDT.memhp index 33f011d6b63..372ca3d7fb1 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.memhp and b/tests/data/acpi/aarch64/virt/DSDT.memhp differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.pxb b/tests/data/acpi/aarch64/virt/DSDT.pxb index c0fdc6e9c13..c2779882494 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.pxb and b/tests/data/acpi/aarch64/virt/DSDT.pxb differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev new file mode 100644 index 00000000000..53d4c07f423 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy new file mode 100644 index 00000000000..53d4c07f423 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.topology b/tests/data/acpi/aarch64/virt/DSDT.topology index 029d03eecc4..ebbeedc1ed3 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.topology and b/tests/data/acpi/aarch64/virt/DSDT.topology differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.viot b/tests/data/acpi/aarch64/virt/DSDT.viot new file mode 100644 index 00000000000..b897d667971 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.viot differ diff --git a/tests/data/acpi/aarch64/virt/HEST b/tests/data/acpi/aarch64/virt/HEST new file mode 100644 index 00000000000..4c5d8c5b5da Binary files /dev/null and b/tests/data/acpi/aarch64/virt/HEST differ diff --git a/tests/data/acpi/aarch64/virt/IORT.its_off b/tests/data/acpi/aarch64/virt/IORT.its_off new file mode 100644 index 00000000000..c10da4e61dd Binary files /dev/null and b/tests/data/acpi/aarch64/virt/IORT.its_off differ diff --git a/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev b/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev new file mode 100644 index 00000000000..67be268f62a Binary files /dev/null and b/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev differ diff --git a/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy b/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy new file mode 100644 index 00000000000..41981a449fc Binary files /dev/null and b/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy differ diff --git a/tests/data/acpi/aarch64/virt/PPTT b/tests/data/acpi/aarch64/virt/PPTT index 7a1258ecf12..15598a9b8a3 100644 Binary files a/tests/data/acpi/aarch64/virt/PPTT and b/tests/data/acpi/aarch64/virt/PPTT differ diff --git a/tests/data/acpi/aarch64/virt/PPTT.acpihmatvirt b/tests/data/acpi/aarch64/virt/PPTT.acpihmatvirt index 4eef303a5b6..7b613ddaf4b 100644 Binary files a/tests/data/acpi/aarch64/virt/PPTT.acpihmatvirt and b/tests/data/acpi/aarch64/virt/PPTT.acpihmatvirt differ diff --git a/tests/data/acpi/aarch64/virt/PPTT.topology b/tests/data/acpi/aarch64/virt/PPTT.topology index 3fbcae5ff08..6b864f035c9 100644 Binary files a/tests/data/acpi/aarch64/virt/PPTT.topology and b/tests/data/acpi/aarch64/virt/PPTT.topology differ diff --git a/tests/data/acpi/aarch64/virt/SSDT.memhp b/tests/data/acpi/aarch64/virt/SSDT.memhp index fb3dcde5a10..1deb1d28322 100644 Binary files a/tests/data/acpi/aarch64/virt/SSDT.memhp and b/tests/data/acpi/aarch64/virt/SSDT.memhp differ diff --git a/tests/data/acpi/disassemle-aml.sh b/tests/data/acpi/disassemle-aml.sh index 253b7620a07..89561d233d2 100755 --- a/tests/data/acpi/disassemle-aml.sh +++ b/tests/data/acpi/disassemle-aml.sh @@ -14,7 +14,7 @@ while getopts "o:" arg; do esac done -for machine in tests/data/acpi/* +for machine in tests/data/acpi/*/* do if [[ ! -d "$machine" ]]; then diff --git a/tests/data/acpi/loongarch64/virt/APIC b/tests/data/acpi/loongarch64/virt/APIC new file mode 100644 index 00000000000..3477789f422 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/APIC differ diff --git a/tests/data/acpi/loongarch64/virt/APIC.topology b/tests/data/acpi/loongarch64/virt/APIC.topology new file mode 100644 index 00000000000..da0089d57f6 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/APIC.topology differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT b/tests/data/acpi/loongarch64/virt/DSDT new file mode 100644 index 00000000000..b31841aec6e Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/DSDT differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.memhp b/tests/data/acpi/loongarch64/virt/DSDT.memhp new file mode 100644 index 00000000000..e291200fc91 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/DSDT.memhp differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.numamem b/tests/data/acpi/loongarch64/virt/DSDT.numamem new file mode 100644 index 00000000000..07923ac3958 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/DSDT.numamem differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.topology b/tests/data/acpi/loongarch64/virt/DSDT.topology new file mode 100644 index 00000000000..6dfbb495f88 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/DSDT.topology differ diff --git a/tests/data/acpi/loongarch64/virt/FACP b/tests/data/acpi/loongarch64/virt/FACP new file mode 100644 index 00000000000..04d8d4c26fa Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/FACP differ diff --git a/tests/data/acpi/loongarch64/virt/MCFG b/tests/data/acpi/loongarch64/virt/MCFG new file mode 100644 index 00000000000..5f93b05abe1 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/MCFG differ diff --git a/tests/data/acpi/loongarch64/virt/PPTT b/tests/data/acpi/loongarch64/virt/PPTT new file mode 100644 index 00000000000..15598a9b8a3 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/PPTT differ diff --git a/tests/data/acpi/loongarch64/virt/PPTT.topology b/tests/data/acpi/loongarch64/virt/PPTT.topology new file mode 100644 index 00000000000..7fc92984694 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/PPTT.topology differ diff --git a/tests/qapi-schema/doc-bad-section.out b/tests/data/acpi/loongarch64/virt/SLIT similarity index 100% rename from tests/qapi-schema/doc-bad-section.out rename to tests/data/acpi/loongarch64/virt/SLIT diff --git a/tests/data/acpi/loongarch64/virt/SLIT.numamem b/tests/data/acpi/loongarch64/virt/SLIT.numamem new file mode 100644 index 00000000000..67f00813af7 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SLIT.numamem differ diff --git a/tests/data/acpi/loongarch64/virt/SPCR b/tests/data/acpi/loongarch64/virt/SPCR new file mode 100644 index 00000000000..3cc9bbcfb80 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SPCR differ diff --git a/tests/data/acpi/loongarch64/virt/SRAT b/tests/data/acpi/loongarch64/virt/SRAT new file mode 100644 index 00000000000..ff234ce45cb Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SRAT differ diff --git a/tests/data/acpi/loongarch64/virt/SRAT.memhp b/tests/data/acpi/loongarch64/virt/SRAT.memhp new file mode 100644 index 00000000000..52532189013 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SRAT.memhp differ diff --git a/tests/data/acpi/loongarch64/virt/SRAT.numamem b/tests/data/acpi/loongarch64/virt/SRAT.numamem new file mode 100644 index 00000000000..2972a9abdcf Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SRAT.numamem differ diff --git a/tests/data/acpi/loongarch64/virt/SRAT.topology b/tests/data/acpi/loongarch64/virt/SRAT.topology new file mode 100644 index 00000000000..4a44831f475 Binary files /dev/null and b/tests/data/acpi/loongarch64/virt/SRAT.topology differ diff --git a/tests/data/acpi/rebuild-expected-aml.sh b/tests/data/acpi/rebuild-expected-aml.sh index c1092fb8ba6..cbf9ffe0ddd 100755 --- a/tests/data/acpi/rebuild-expected-aml.sh +++ b/tests/data/acpi/rebuild-expected-aml.sh @@ -12,7 +12,7 @@ # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. -qemu_arches="x86_64 aarch64 riscv64" +qemu_arches="x86_64 aarch64 riscv64 loongarch64" if [ ! -e "tests/qtest/bios-tables-test" ]; then echo "Test: bios-tables-test is required! Run make check before this script." @@ -37,7 +37,7 @@ if [ -z "$qemu_bins" ]; then echo "Only the following architectures are currently supported: $qemu_arches" echo "None of these configured!" echo "To fix, run configure \ - --target-list=x86_64-softmmu,aarch64-softmmu,riscv64-softmmu" + --target-list=x86_64-softmmu,aarch64-softmmu,riscv64-softmmu,loongarch64-softmmu" exit 1; fi diff --git a/tests/data/acpi/riscv64/virt/APIC b/tests/data/acpi/riscv64/virt/APIC index 66a25dfd2d6..3fb5b753596 100644 Binary files a/tests/data/acpi/riscv64/virt/APIC and b/tests/data/acpi/riscv64/virt/APIC differ diff --git a/tests/data/acpi/riscv64/virt/DSDT b/tests/data/acpi/riscv64/virt/DSDT index 6a33f5647dd..527f239dab1 100644 Binary files a/tests/data/acpi/riscv64/virt/DSDT and b/tests/data/acpi/riscv64/virt/DSDT differ diff --git a/tests/data/acpi/riscv64/virt/FACP b/tests/data/acpi/riscv64/virt/FACP index a5276b65ea8..78e1b14b1d4 100644 Binary files a/tests/data/acpi/riscv64/virt/FACP and b/tests/data/acpi/riscv64/virt/FACP differ diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT index 4f231735aba..52a4cc4b638 100644 Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ diff --git a/tests/data/acpi/riscv64/virt/SPCR b/tests/data/acpi/riscv64/virt/SPCR index 4da9daf65f7..09617f8793a 100644 Binary files a/tests/data/acpi/riscv64/virt/SPCR and b/tests/data/acpi/riscv64/virt/SPCR differ diff --git a/tests/data/acpi/riscv64/virt/SRAT.numamem b/tests/data/acpi/riscv64/virt/SRAT.numamem new file mode 100644 index 00000000000..2b6467364b7 Binary files /dev/null and b/tests/data/acpi/riscv64/virt/SRAT.numamem differ diff --git a/tests/data/acpi/x86/microvm/DSDT.pcie b/tests/data/acpi/x86/microvm/DSDT.pcie index 8eacd21d6ec..ba258f454dc 100644 Binary files a/tests/data/acpi/x86/microvm/DSDT.pcie and b/tests/data/acpi/x86/microvm/DSDT.pcie differ diff --git a/tests/data/acpi/x86/pc/DSDT b/tests/data/acpi/x86/pc/DSDT index c93ad6b7f83..4beb5194b84 100644 Binary files a/tests/data/acpi/x86/pc/DSDT and b/tests/data/acpi/x86/pc/DSDT differ diff --git a/tests/data/acpi/x86/pc/DSDT.acpierst b/tests/data/acpi/x86/pc/DSDT.acpierst index f643fa2d034..abda6863b64 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.acpierst and b/tests/data/acpi/x86/pc/DSDT.acpierst differ diff --git a/tests/data/acpi/x86/pc/DSDT.acpihmat b/tests/data/acpi/x86/pc/DSDT.acpihmat index 9d3695ff289..d081db26d7b 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.acpihmat and b/tests/data/acpi/x86/pc/DSDT.acpihmat differ diff --git a/tests/data/acpi/x86/pc/DSDT.bridge b/tests/data/acpi/x86/pc/DSDT.bridge index 840b45f354a..e16897dc5f0 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.bridge and b/tests/data/acpi/x86/pc/DSDT.bridge differ diff --git a/tests/data/acpi/x86/pc/DSDT.cphp b/tests/data/acpi/x86/pc/DSDT.cphp index dbc0141b2bb..e95711cd9cd 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.cphp and b/tests/data/acpi/x86/pc/DSDT.cphp differ diff --git a/tests/data/acpi/x86/pc/DSDT.dimmpxm b/tests/data/acpi/x86/pc/DSDT.dimmpxm index 1294f655d41..90ba66b9164 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.dimmpxm and b/tests/data/acpi/x86/pc/DSDT.dimmpxm differ diff --git a/tests/data/acpi/x86/pc/DSDT.hpbridge b/tests/data/acpi/x86/pc/DSDT.hpbridge index 8012b5eb315..0eafe5fbf3d 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.hpbridge and b/tests/data/acpi/x86/pc/DSDT.hpbridge differ diff --git a/tests/data/acpi/x86/pc/DSDT.hpbrroot b/tests/data/acpi/x86/pc/DSDT.hpbrroot index 4fa0c6fe720..077a4cc988d 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.hpbrroot and b/tests/data/acpi/x86/pc/DSDT.hpbrroot differ diff --git a/tests/data/acpi/x86/pc/DSDT.ipmikcs b/tests/data/acpi/x86/pc/DSDT.ipmikcs index 0a891baf458..8d465f02777 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.ipmikcs and b/tests/data/acpi/x86/pc/DSDT.ipmikcs differ diff --git a/tests/data/acpi/x86/pc/DSDT.memhp b/tests/data/acpi/x86/pc/DSDT.memhp index 9b442a64cf7..e3b49757cb7 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.memhp and b/tests/data/acpi/x86/pc/DSDT.memhp differ diff --git a/tests/data/acpi/x86/pc/DSDT.nohpet b/tests/data/acpi/x86/pc/DSDT.nohpet index 1754c687883..9e772c1316d 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.nohpet and b/tests/data/acpi/x86/pc/DSDT.nohpet differ diff --git a/tests/data/acpi/x86/pc/DSDT.numamem b/tests/data/acpi/x86/pc/DSDT.numamem index 9fc731d3d2b..9bfbfc28213 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.numamem and b/tests/data/acpi/x86/pc/DSDT.numamem differ diff --git a/tests/data/acpi/x86/pc/DSDT.roothp b/tests/data/acpi/x86/pc/DSDT.roothp index e654c83ebe4..efbee6d8aa5 100644 Binary files a/tests/data/acpi/x86/pc/DSDT.roothp and b/tests/data/acpi/x86/pc/DSDT.roothp differ diff --git a/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x b/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x new file mode 100644 index 00000000000..317ddb3fbed Binary files /dev/null and b/tests/data/acpi/x86/q35/APIC.acpihmat-generic-x differ diff --git a/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x b/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x new file mode 100644 index 00000000000..31c90116636 Binary files /dev/null and b/tests/data/acpi/x86/q35/CEDT.acpihmat-generic-x differ diff --git a/tests/data/acpi/x86/q35/DMAR.dmar b/tests/data/acpi/x86/q35/DMAR.dmar index 0dca6e68ad8..0c05976715c 100644 Binary files a/tests/data/acpi/x86/q35/DMAR.dmar and b/tests/data/acpi/x86/q35/DMAR.dmar differ diff --git a/tests/data/acpi/x86/q35/DSDT b/tests/data/acpi/x86/q35/DSDT index fb89ae0ac6d..e5e8d1e041e 100644 Binary files a/tests/data/acpi/x86/q35/DSDT and b/tests/data/acpi/x86/q35/DSDT differ diff --git a/tests/data/acpi/x86/q35/DSDT.acpierst b/tests/data/acpi/x86/q35/DSDT.acpierst index 46fd25400b7..072a3fe2cd1 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.acpierst and b/tests/data/acpi/x86/q35/DSDT.acpierst differ diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat b/tests/data/acpi/x86/q35/DSDT.acpihmat index 61c5bd52a42..2a4f2fc1d5c 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.acpihmat and b/tests/data/acpi/x86/q35/DSDT.acpihmat differ diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x b/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x new file mode 100644 index 00000000000..7911c058bba Binary files /dev/null and b/tests/data/acpi/x86/q35/DSDT.acpihmat-generic-x differ diff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator index 3aaa2bbdf54..580b4a456a2 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator and b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator differ diff --git a/tests/data/acpi/x86/q35/DSDT.applesmc b/tests/data/acpi/x86/q35/DSDT.applesmc index 944209adeaa..5e8220e38d6 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.applesmc and b/tests/data/acpi/x86/q35/DSDT.applesmc differ diff --git a/tests/data/acpi/x86/q35/DSDT.bridge b/tests/data/acpi/x86/q35/DSDT.bridge index d9938dba8fa..ee039453af1 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.bridge and b/tests/data/acpi/x86/q35/DSDT.bridge differ diff --git a/tests/data/acpi/x86/q35/DSDT.core-count b/tests/data/acpi/x86/q35/DSDT.core-count index a24b04cbdbf..7ebfceeb664 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.core-count and b/tests/data/acpi/x86/q35/DSDT.core-count differ diff --git a/tests/data/acpi/x86/q35/DSDT.core-count2 b/tests/data/acpi/x86/q35/DSDT.core-count2 index 3a0cb8c581c..d0394558a1f 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.core-count2 and b/tests/data/acpi/x86/q35/DSDT.core-count2 differ diff --git a/tests/data/acpi/x86/q35/DSDT.cphp b/tests/data/acpi/x86/q35/DSDT.cphp index 20955d0aa30..a055c2e7d3c 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.cphp and b/tests/data/acpi/x86/q35/DSDT.cphp differ diff --git a/tests/data/acpi/x86/q35/DSDT.cxl b/tests/data/acpi/x86/q35/DSDT.cxl index afcdc0d0ba8..20843549f54 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.cxl and b/tests/data/acpi/x86/q35/DSDT.cxl differ diff --git a/tests/data/acpi/x86/q35/DSDT.dimmpxm b/tests/data/acpi/x86/q35/DSDT.dimmpxm index 228374b55bd..664e926e907 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.dimmpxm and b/tests/data/acpi/x86/q35/DSDT.dimmpxm differ diff --git a/tests/data/acpi/x86/q35/DSDT.ipmibt b/tests/data/acpi/x86/q35/DSDT.ipmibt index 45f911ada56..4066a76d26a 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.ipmibt and b/tests/data/acpi/x86/q35/DSDT.ipmibt differ diff --git a/tests/data/acpi/x86/q35/DSDT.ipmismbus b/tests/data/acpi/x86/q35/DSDT.ipmismbus index e5d6811bee1..6d0b6b95c2a 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.ipmismbus and b/tests/data/acpi/x86/q35/DSDT.ipmismbus differ diff --git a/tests/data/acpi/x86/q35/DSDT.ivrs b/tests/data/acpi/x86/q35/DSDT.ivrs index 46fd25400b7..072a3fe2cd1 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.ivrs and b/tests/data/acpi/x86/q35/DSDT.ivrs differ diff --git a/tests/data/acpi/x86/q35/DSDT.memhp b/tests/data/acpi/x86/q35/DSDT.memhp index 5ce081187a5..4f2f9bcfcef 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.memhp and b/tests/data/acpi/x86/q35/DSDT.memhp differ diff --git a/tests/data/acpi/x86/q35/DSDT.mmio64 b/tests/data/acpi/x86/q35/DSDT.mmio64 index bdf36c4d575..0fb6aab16f1 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.mmio64 and b/tests/data/acpi/x86/q35/DSDT.mmio64 differ diff --git a/tests/data/acpi/x86/q35/DSDT.multi-bridge b/tests/data/acpi/x86/q35/DSDT.multi-bridge index 1db43a69e4c..f6afa6d96d2 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.multi-bridge and b/tests/data/acpi/x86/q35/DSDT.multi-bridge differ diff --git a/tests/data/acpi/x86/q35/DSDT.noacpihp b/tests/data/acpi/x86/q35/DSDT.noacpihp index 8bc16887e1c..9f7261d1b06 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.noacpihp and b/tests/data/acpi/x86/q35/DSDT.noacpihp differ diff --git a/tests/data/acpi/x86/q35/DSDT.nohpet b/tests/data/acpi/x86/q35/DSDT.nohpet index c13e45e3612..99ad629c917 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.nohpet and b/tests/data/acpi/x86/q35/DSDT.nohpet differ diff --git a/tests/data/acpi/x86/q35/DSDT.numamem b/tests/data/acpi/x86/q35/DSDT.numamem index ba6669437e6..fd1d8a79d3d 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.numamem and b/tests/data/acpi/x86/q35/DSDT.numamem differ diff --git a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa index 6ad42873e91..89032fa0290 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa and b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa differ diff --git a/tests/data/acpi/x86/q35/DSDT.thread-count b/tests/data/acpi/x86/q35/DSDT.thread-count index a24b04cbdbf..7ebfceeb664 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.thread-count and b/tests/data/acpi/x86/q35/DSDT.thread-count differ diff --git a/tests/data/acpi/x86/q35/DSDT.thread-count2 b/tests/data/acpi/x86/q35/DSDT.thread-count2 index 3a0cb8c581c..d0394558a1f 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.thread-count2 and b/tests/data/acpi/x86/q35/DSDT.thread-count2 differ diff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm12 b/tests/data/acpi/x86/q35/DSDT.tis.tpm12 index e381ce4cbf2..f2ed40ca70c 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.tis.tpm12 and b/tests/data/acpi/x86/q35/DSDT.tis.tpm12 differ diff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm2 b/tests/data/acpi/x86/q35/DSDT.tis.tpm2 index a09253042ce..5c975d2162d 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.tis.tpm2 and b/tests/data/acpi/x86/q35/DSDT.tis.tpm2 differ diff --git a/tests/data/acpi/x86/q35/DSDT.type4-count b/tests/data/acpi/x86/q35/DSDT.type4-count index edc23198cdb..3194a82b8b4 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.type4-count and b/tests/data/acpi/x86/q35/DSDT.type4-count differ diff --git a/tests/data/acpi/x86/q35/DSDT.viot b/tests/data/acpi/x86/q35/DSDT.viot index 64e81f57112..129d43e1e56 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.viot and b/tests/data/acpi/x86/q35/DSDT.viot differ diff --git a/tests/data/acpi/x86/q35/DSDT.xapic b/tests/data/acpi/x86/q35/DSDT.xapic index d4acd851c62..b37ab591110 100644 Binary files a/tests/data/acpi/x86/q35/DSDT.xapic and b/tests/data/acpi/x86/q35/DSDT.xapic differ diff --git a/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x b/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x new file mode 100644 index 00000000000..0e5765f6ee4 Binary files /dev/null and b/tests/data/acpi/x86/q35/HMAT.acpihmat-generic-x differ diff --git a/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x b/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x new file mode 100644 index 00000000000..b45838adb7d Binary files /dev/null and b/tests/data/acpi/x86/q35/SRAT.acpihmat-generic-x differ diff --git a/tests/data/qobject/qdict.txt b/tests/data/qobject/qdict.txt index e2edc88161e..888f3431b53 100644 --- a/tests/data/qobject/qdict.txt +++ b/tests/data/qobject/qdict.txt @@ -3487,12 +3487,6 @@ cred-internals.h: 559 CREDITS: 603 crime.c: 2833 crime.h: 5271 -cris: 4096 -cris_defs_asm.h: 3805 -crisksyms.c: 472 -cris_supp_reg.h: 198 -crisv10.c: 129158 -crisv10.h: 4289 crm_regs.h: 1700 cr_pll.c: 4842 crt0_ram.S: 2152 diff --git a/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2 b/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2 new file mode 100644 index 00000000000..18daee0c52f Binary files /dev/null and b/tests/data/uefi-boot-images/bios-tables-test.loongarch64.iso.qcow2 differ diff --git a/tests/decode/meson.build b/tests/decode/meson.build index b13fada9800..63405ca08fd 100644 --- a/tests/decode/meson.build +++ b/tests/decode/meson.build @@ -41,6 +41,7 @@ succ_tests = [ 'succ_argset_type1.decode', 'succ_function.decode', 'succ_ident1.decode', + 'succ_infer1.decode', 'succ_named_field.decode', 'succ_pattern_group_nest1.decode', 'succ_pattern_group_nest2.decode', diff --git a/tests/decode/succ_infer1.decode b/tests/decode/succ_infer1.decode new file mode 100644 index 00000000000..6fa40bada5c --- /dev/null +++ b/tests/decode/succ_infer1.decode @@ -0,0 +1,4 @@ +&rprr_load rd pg rn rm dtype nreg +@rprr_load .... .... ... rm:5 ... pg:3 rn:5 rd:5 &rprr_load + +LD1Q 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include index 708e3a72fb8..3959d8a028a 100644 --- a/tests/docker/Makefile.include +++ b/tests/docker/Makefile.include @@ -92,10 +92,10 @@ endif docker-image-alpine: NOUSER=1 debian-toolchain-run = \ - $(if $(NOCACHE), \ + $(if $(NOCACHE)$(NOFETCH), \ $(call quiet-command, \ $(DOCKER_SCRIPT) build -t qemu/$1 -f $< \ - $(if $V,,--quiet) --no-cache \ + $(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \ --registry $(DOCKER_REGISTRY) --extra-files \ $(DOCKER_FILES_DIR)/$1.d/build-toolchain.sh, \ "BUILD", $1), \ @@ -117,7 +117,6 @@ docker-image-debian-microblaze-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docke # These images may be good enough for building tests but not for test builds DOCKER_PARTIAL_IMAGES += debian-microblaze-cross DOCKER_PARTIAL_IMAGES += debian-xtensa-cross -DOCKER_PARTIAL_IMAGES += fedora-cris-cross # images that are only used to build other images DOCKER_VIRTUAL_IMAGES := debian-bootstrap debian-toolchain @@ -178,6 +177,7 @@ docker: @echo ' NETWORK=$$BACKEND Enable virtual network interface with $$BACKEND.' @echo ' NOUSER=1 Define to disable adding current user to containers passwd.' @echo ' NOCACHE=1 Ignore cache when build images.' + @echo ' NOFETCH=1 Do not fetch from the registry.' @echo ' EXECUTABLE= Include executable in image.' @echo ' EXTRA_FILES=" [... ]"' @echo ' Include extra files in image.' @@ -185,8 +185,10 @@ docker: docker-help: docker +# Where QEMU caches build artefacts +DOCKER_QEMU_CACHE_DIR := $$HOME/.cache/qemu # Use a global constant ccache directory to speed up repetitive builds -DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache +DOCKER_QEMU_CCACHE_DIR := DOCKER_QEMU_CACHE_DIR/docker-ccache # This rule if for directly running against an arbitrary docker target. # It is called by the expanded docker targets (e.g. make @@ -195,7 +197,7 @@ DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache # For example: make docker-run TEST="test-quick" IMAGE="debian:arm64" EXECUTABLE=./aarch64-linux-user/qemu-aarch64 # docker-run: docker-qemu-src - @mkdir -p "$(DOCKER_CCACHE_DIR)" + @mkdir -p "$(DOCKER_QEMU_CCACHE_DIR)" @if test -z "$(IMAGE)" || test -z "$(TEST)"; \ then echo "Invalid target $(IMAGE)/$(TEST)"; exit 1; \ fi @@ -222,8 +224,8 @@ docker-run: docker-qemu-src -e V=$V -e J=$J -e DEBUG=$(DEBUG) \ -e SHOW_ENV=$(SHOW_ENV) \ $(if $(NOUSER),, \ - -e CCACHE_DIR=/var/tmp/ccache \ - -v $(DOCKER_CCACHE_DIR):/var/tmp/ccache:z \ + -v $(DOCKER_QEMU_CACHE_DIR):$(DOCKER_QEMU_CACHE_DIR) \ + -e CCACHE_DIR=$(DOCKER_QEMU_CCACHE_DIR) \ ) \ -v $$(readlink -e $(DOCKER_SRC_COPY)):/var/tmp/qemu:z$(COMMA)ro \ $(IMAGE) \ @@ -236,3 +238,6 @@ docker-image: ${DOCKER_IMAGES:%=docker-image-%} docker-clean: $(call quiet-command, $(DOCKER_SCRIPT) clean) + +# Overrides +docker-test-rust%: NETWORK=1 diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker index b079a83fe2e..bf3bd5a30dd 100644 --- a/tests/docker/dockerfiles/alpine.docker +++ b/tests/docker/dockerfiles/alpine.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all alpine-319 qemu +# $ lcitool dockerfile --layers all alpine-321 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/alpine:3.19 +FROM docker.io/library/alpine:3.21 RUN apk update && \ apk upgrade && \ @@ -40,10 +40,12 @@ RUN apk update && \ glib-static \ gnutls-dev \ gtk+3.0-dev \ + gtk-vnc-dev \ json-c-dev \ libaio-dev \ libbpf-dev \ libcap-ng-dev \ + libcbor-dev \ libdrm-dev \ libepoxy-dev \ libffi-dev \ @@ -89,6 +91,8 @@ RUN apk update && \ py3-yaml \ python3 \ rpm2cpio \ + rust \ + rust-bindgen \ samurai \ sdl2-dev \ sdl2_image-dev \ @@ -107,6 +111,7 @@ RUN apk update && \ vde2-dev \ virglrenderer-dev \ vte3-dev \ + vulkan-tools \ which \ xen-dev \ xorriso \ diff --git a/tests/docker/dockerfiles/centos9.docker b/tests/docker/dockerfiles/centos9.docker index 0256865b9e9..a942835a1d2 100644 --- a/tests/docker/dockerfiles/centos9.docker +++ b/tests/docker/dockerfiles/centos9.docker @@ -16,6 +16,7 @@ RUN dnf distro-sync -y && \ alsa-lib-devel \ bash \ bc \ + bindgen-cli \ bison \ brlapi-devel \ bzip2 \ @@ -102,6 +103,7 @@ RUN dnf distro-sync -y && \ python3-sphinx_rtd_theme \ python3-tomli \ rdma-core-devel \ + rust \ sed \ snappy-devel \ socat \ @@ -113,6 +115,7 @@ RUN dnf distro-sync -y && \ usbredir-devel \ util-linux \ vte291-devel \ + vulkan-tools \ which \ xorriso \ zlib-devel \ diff --git a/tests/docker/dockerfiles/debian-all-test-cross.docker b/tests/docker/dockerfiles/debian-all-test-cross.docker index 8ab244e018a..420a4e33e60 100644 --- a/tests/docker/dockerfiles/debian-all-test-cross.docker +++ b/tests/docker/dockerfiles/debian-all-test-cross.docker @@ -15,7 +15,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y eatmydata && \ eatmydata apt-get dist-upgrade -y && \ - apt build-dep -yy qemu + apt build-dep -yy --arch-only qemu # Add extra build tools and as many cross compilers as we can for testing RUN DEBIAN_FRONTEND=noninteractive eatmydata \ @@ -23,7 +23,9 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ bison \ ccache \ clang \ + dpkg-dev \ flex \ + gcc \ git \ libclang-rt-dev \ ninja-build \ @@ -33,16 +35,11 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ python3-venv \ python3-wheel -RUN DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-aarch64-linux-gnu \ +# All the generally available compilers +ENV AVAILABLE_COMPILERS gcc-aarch64-linux-gnu \ libc6-dev-arm64-cross \ gcc-arm-linux-gnueabihf \ libc6-dev-armhf-cross \ - gcc-hppa-linux-gnu \ - libc6-dev-hppa-cross \ - gcc-m68k-linux-gnu \ - libc6-dev-m68k-cross \ gcc-mips-linux-gnu \ libc6-dev-mips-cross \ gcc-mips64-linux-gnuabi64 \ @@ -51,18 +48,23 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ libc6-dev-mips64el-cross \ gcc-mipsel-linux-gnu \ libc6-dev-mipsel-cross \ - gcc-powerpc-linux-gnu \ - libc6-dev-powerpc-cross \ - gcc-powerpc64-linux-gnu \ - libc6-dev-ppc64-cross \ gcc-powerpc64le-linux-gnu \ libc6-dev-ppc64el-cross \ gcc-riscv64-linux-gnu \ libc6-dev-riscv64-cross \ gcc-s390x-linux-gnu \ - libc6-dev-s390x-cross \ - gcc-sparc64-linux-gnu \ - libc6-dev-sparc64-cross && \ + libc6-dev-s390x-cross + +RUN if dpkg-architecture -e amd64; then \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-hppa-linux-gnu libc6-dev-hppa-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-m68k-linux-gnu libc6-dev-m68k-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-powerpc-linux-gnu libc6-dev-powerpc-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-sparc64-linux-gnu libc6-dev-sparc64-cross"; \ + fi && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + ${AVAILABLE_COMPILERS} && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt diff --git a/tests/docker/dockerfiles/debian-amd64-cross.docker b/tests/docker/dockerfiles/debian-amd64-cross.docker index 8058695979c..081f3e00f7b 100644 --- a/tests/docker/dockerfiles/debian-amd64-cross.docker +++ b/tests/docker/dockerfiles/debian-amd64-cross.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:amd64 \ libcap-ng-dev:amd64 \ libcapstone-dev:amd64 \ + libcbor-dev:amd64 \ libcmocka-dev:amd64 \ libcurl4-gnutls-dev:amd64 \ libdaxctl-dev:amd64 \ @@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:amd64 \ libgnutls28-dev:amd64 \ libgtk-3-dev:amd64 \ + libgtk-vnc-2.0-dev:amd64 \ libibverbs-dev:amd64 \ libiscsi-dev:amd64 \ libjemalloc-dev:amd64 \ @@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:amd64 \ libnuma-dev:amd64 \ libpam0g-dev:amd64 \ + libpcre2-dev:amd64 \ libpipewire-0.3-dev:amd64 \ libpixman-1-dev:amd64 \ libpmem-dev:amd64 \ @@ -130,8 +133,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:amd64 \ libslirp-dev:amd64 \ libsnappy-dev:amd64 \ + libsndio-dev:amd64 \ + libspice-protocol-dev:amd64 \ libspice-server-dev:amd64 \ - libssh-gcrypt-dev:amd64 \ + libssh-dev:amd64 \ libsystemd-dev:amd64 \ libtasn1-6-dev:amd64 \ libubsan1:amd64 \ @@ -169,6 +174,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/x86_64-linux-gnu && \ ENV ABI "x86_64-linux-gnu" ENV MESON_OPTS "--cross-file=x86_64-linux-gnu" +ENV RUST_TARGET "x86_64-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=x86_64-linux-gnu- ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-arm64-cross.docker b/tests/docker/dockerfiles/debian-arm64-cross.docker index 15457d7657f..91c555a36e9 100644 --- a/tests/docker/dockerfiles/debian-arm64-cross.docker +++ b/tests/docker/dockerfiles/debian-arm64-cross.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:arm64 \ libcap-ng-dev:arm64 \ libcapstone-dev:arm64 \ + libcbor-dev:arm64 \ libcmocka-dev:arm64 \ libcurl4-gnutls-dev:arm64 \ libdaxctl-dev:arm64 \ @@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:arm64 \ libgnutls28-dev:arm64 \ libgtk-3-dev:arm64 \ + libgtk-vnc-2.0-dev:arm64 \ libibverbs-dev:arm64 \ libiscsi-dev:arm64 \ libjemalloc-dev:arm64 \ @@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:arm64 \ libnuma-dev:arm64 \ libpam0g-dev:arm64 \ + libpcre2-dev:arm64 \ libpipewire-0.3-dev:arm64 \ libpixman-1-dev:arm64 \ libpng-dev:arm64 \ @@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:arm64 \ libslirp-dev:arm64 \ libsnappy-dev:arm64 \ + libsndio-dev:arm64 \ + libspice-protocol-dev:arm64 \ libspice-server-dev:arm64 \ - libssh-gcrypt-dev:arm64 \ + libssh-dev:arm64 \ libsystemd-dev:arm64 \ libtasn1-6-dev:arm64 \ libubsan1:arm64 \ @@ -168,6 +173,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/aarch64-linux-gnu && \ ENV ABI "aarch64-linux-gnu" ENV MESON_OPTS "--cross-file=aarch64-linux-gnu" +ENV RUST_TARGET "aarch64-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=aarch64-linux-gnu- ENV DEF_TARGET_LIST aarch64-softmmu,aarch64-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-armel-cross.docker b/tests/docker/dockerfiles/debian-armel-cross.docker deleted file mode 100644 index c26ffc2e9e2..00000000000 --- a/tests/docker/dockerfiles/debian-armel-cross.docker +++ /dev/null @@ -1,178 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool dockerfile --layers all --cross-arch armv6l debian-11 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -FROM docker.io/library/debian:11-slim - -RUN export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get install -y eatmydata && \ - eatmydata apt-get dist-upgrade -y && \ - eatmydata apt-get install --no-install-recommends -y \ - bash \ - bc \ - bison \ - bsdextrautils \ - bzip2 \ - ca-certificates \ - ccache \ - dbus \ - debianutils \ - diffutils \ - exuberant-ctags \ - findutils \ - flex \ - gcc \ - gcovr \ - gettext \ - git \ - hostname \ - libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ - llvm \ - locales \ - make \ - meson \ - mtools \ - ncat \ - ninja-build \ - openssh-client \ - pkgconf \ - python3 \ - python3-numpy \ - python3-opencv \ - python3-pillow \ - python3-pip \ - python3-setuptools \ - python3-sphinx \ - python3-sphinx-rtd-theme \ - python3-venv \ - python3-wheel \ - python3-yaml \ - rpm2cpio \ - sed \ - socat \ - sparse \ - tar \ - tesseract-ocr \ - tesseract-ocr-eng \ - xorriso \ - zstd && \ - eatmydata apt-get autoremove -y && \ - eatmydata apt-get autoclean -y && \ - sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ - dpkg-reconfigure locales && \ - rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED - -RUN /usr/bin/pip3 install tomli - -ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" -ENV LANG "en_US.UTF-8" -ENV MAKE "/usr/bin/make" -ENV NINJA "/usr/bin/ninja" -ENV PYTHON "/usr/bin/python3" - -RUN export DEBIAN_FRONTEND=noninteractive && \ - dpkg --add-architecture armel && \ - eatmydata apt-get update && \ - eatmydata apt-get dist-upgrade -y && \ - eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ - eatmydata apt-get install --no-install-recommends -y \ - gcc-arm-linux-gnueabi \ - libaio-dev:armel \ - libasan6:armel \ - libasound2-dev:armel \ - libattr1-dev:armel \ - libbpf-dev:armel \ - libbrlapi-dev:armel \ - libbz2-dev:armel \ - libc6-dev:armel \ - libcacard-dev:armel \ - libcap-ng-dev:armel \ - libcapstone-dev:armel \ - libcmocka-dev:armel \ - libcurl4-gnutls-dev:armel \ - libdaxctl-dev:armel \ - libdrm-dev:armel \ - libepoxy-dev:armel \ - libfdt-dev:armel \ - libffi-dev:armel \ - libfuse3-dev:armel \ - libgbm-dev:armel \ - libgcrypt20-dev:armel \ - libglib2.0-dev:armel \ - libglusterfs-dev:armel \ - libgnutls28-dev:armel \ - libgtk-3-dev:armel \ - libibverbs-dev:armel \ - libiscsi-dev:armel \ - libjemalloc-dev:armel \ - libjpeg62-turbo-dev:armel \ - libjson-c-dev:armel \ - liblttng-ust-dev:armel \ - liblzo2-dev:armel \ - libncursesw5-dev:armel \ - libnfs-dev:armel \ - libnuma-dev:armel \ - libpam0g-dev:armel \ - libpipewire-0.3-dev:armel \ - libpixman-1-dev:armel \ - libpng-dev:armel \ - libpulse-dev:armel \ - librbd-dev:armel \ - librdmacm-dev:armel \ - libsasl2-dev:armel \ - libsdl2-dev:armel \ - libsdl2-image-dev:armel \ - libseccomp-dev:armel \ - libselinux1-dev:armel \ - libslirp-dev:armel \ - libsnappy-dev:armel \ - libspice-server-dev:armel \ - libssh-gcrypt-dev:armel \ - libsystemd-dev:armel \ - libtasn1-6-dev:armel \ - libubsan1:armel \ - libudev-dev:armel \ - liburing-dev:armel \ - libusb-1.0-0-dev:armel \ - libusbredirhost-dev:armel \ - libvdeplug-dev:armel \ - libvirglrenderer-dev:armel \ - libvte-2.91-dev:armel \ - libzstd-dev:armel \ - nettle-dev:armel \ - systemtap-sdt-dev:armel \ - zlib1g-dev:armel && \ - eatmydata apt-get autoremove -y && \ - eatmydata apt-get autoclean -y && \ - mkdir -p /usr/local/share/meson/cross && \ - printf "[binaries]\n\ -c = '/usr/bin/arm-linux-gnueabi-gcc'\n\ -ar = '/usr/bin/arm-linux-gnueabi-gcc-ar'\n\ -strip = '/usr/bin/arm-linux-gnueabi-strip'\n\ -pkgconfig = '/usr/bin/arm-linux-gnueabi-pkg-config'\n\ -\n\ -[host_machine]\n\ -system = 'linux'\n\ -cpu_family = 'arm'\n\ -cpu = 'arm'\n\ -endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabi && \ - dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt && \ - mkdir -p /usr/libexec/ccache-wrappers && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-cc && \ - ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/arm-linux-gnueabi-gcc - -ENV ABI "arm-linux-gnueabi" -ENV MESON_OPTS "--cross-file=arm-linux-gnueabi" -ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabi- -ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user,armeb-linux-user -# As a final step configure the user (if env is defined) -ARG USER -ARG UID -RUN if [ "${USER}" ]; then \ - id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/debian-armhf-cross.docker b/tests/docker/dockerfiles/debian-armhf-cross.docker index 8f87656d897..f0e2efcda09 100644 --- a/tests/docker/dockerfiles/debian-armhf-cross.docker +++ b/tests/docker/dockerfiles/debian-armhf-cross.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:armhf \ libcap-ng-dev:armhf \ libcapstone-dev:armhf \ + libcbor-dev:armhf \ libcmocka-dev:armhf \ libcurl4-gnutls-dev:armhf \ libdaxctl-dev:armhf \ @@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:armhf \ libgnutls28-dev:armhf \ libgtk-3-dev:armhf \ + libgtk-vnc-2.0-dev:armhf \ libibverbs-dev:armhf \ libiscsi-dev:armhf \ libjemalloc-dev:armhf \ @@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:armhf \ libnuma-dev:armhf \ libpam0g-dev:armhf \ + libpcre2-dev:armhf \ libpipewire-0.3-dev:armhf \ libpixman-1-dev:armhf \ libpng-dev:armhf \ @@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:armhf \ libslirp-dev:armhf \ libsnappy-dev:armhf \ + libsndio-dev:armhf \ + libspice-protocol-dev:armhf \ libspice-server-dev:armhf \ - libssh-gcrypt-dev:armhf \ + libssh-dev:armhf \ libsystemd-dev:armhf \ libtasn1-6-dev:armhf \ libubsan1:armhf \ @@ -168,6 +173,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/arm-linux-gnueabihf && \ ENV ABI "arm-linux-gnueabihf" ENV MESON_OPTS "--cross-file=arm-linux-gnueabihf" +ENV RUST_TARGET "armv7-unknown-linux-gnueabihf" ENV QEMU_CONFIGURE_OPTS --cross-prefix=arm-linux-gnueabihf- ENV DEF_TARGET_LIST arm-softmmu,arm-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-i686-cross.docker b/tests/docker/dockerfiles/debian-i686-cross.docker index f4ef054a2e8..025beb1ce25 100644 --- a/tests/docker/dockerfiles/debian-i686-cross.docker +++ b/tests/docker/dockerfiles/debian-i686-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch i686 debian-11 qemu +# $ lcitool dockerfile --layers all --cross-arch i686 debian-12 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:11-slim +FROM docker.io/library/debian:12-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ - python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-venv \ - python3-wheel \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ + swtpm \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED -RUN /usr/bin/pip3 install tomli - ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -94,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:i386 \ libcap-ng-dev:i386 \ libcapstone-dev:i386 \ + libcbor-dev:i386 \ libcmocka-dev:i386 \ libcurl4-gnutls-dev:i386 \ libdaxctl-dev:i386 \ @@ -108,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:i386 \ libgnutls28-dev:i386 \ libgtk-3-dev:i386 \ + libgtk-vnc-2.0-dev:i386 \ libibverbs-dev:i386 \ libiscsi-dev:i386 \ libjemalloc-dev:i386 \ @@ -119,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:i386 \ libnuma-dev:i386 \ libpam0g-dev:i386 \ + libpcre2-dev:i386 \ libpipewire-0.3-dev:i386 \ libpixman-1-dev:i386 \ libpng-dev:i386 \ @@ -132,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:i386 \ libslirp-dev:i386 \ libsnappy-dev:i386 \ + libsndio-dev:i386 \ + libspice-protocol-dev:i386 \ libspice-server-dev:i386 \ - libssh-gcrypt-dev:i386 \ + libssh-dev:i386 \ libsystemd-dev:i386 \ libtasn1-6-dev:i386 \ libubsan1:i386 \ @@ -144,6 +146,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libvdeplug-dev:i386 \ libvirglrenderer-dev:i386 \ libvte-2.91-dev:i386 \ + libxdp-dev:i386 \ libzstd-dev:i386 \ nettle-dev:i386 \ systemtap-sdt-dev:i386 \ @@ -169,6 +172,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/i686-linux-gnu && \ ENV ABI "i686-linux-gnu" ENV MESON_OPTS "--cross-file=i686-linux-gnu" +ENV RUST_TARGET "i686-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-linux-gnu- ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-loongarch-cross.docker b/tests/docker/dockerfiles/debian-loongarch-cross.docker index 79eab5621ef..538ab534902 100644 --- a/tests/docker/dockerfiles/debian-loongarch-cross.docker +++ b/tests/docker/dockerfiles/debian-loongarch-cross.docker @@ -43,8 +43,8 @@ RUN curl -#SL https://github.com/loongson/build-tools/releases/download/2023.08. ENV PATH $PATH:/opt/cross-tools/bin ENV LD_LIBRARY_PATH /opt/cross-tools/lib:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib:$LD_LIBRARY_PATH -ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools -ENV DEF_TARGET_LIST loongarch64-linux-user,loongarch-softmmu +ENV QEMU_CONFIGURE_OPTS --disable-docs --disable-tools +ENV DEF_TARGET_LIST loongarch64-linux-user,loongarch64-softmmu ENV MAKE /usr/bin/make # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh index 23ec0aa9a72..c5cd0aa931c 100755 --- a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh +++ b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh @@ -10,6 +10,8 @@ TOOLCHAIN_INSTALL=/usr/local TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root +GCC_PATCH0_URL=https://raw.githubusercontent.com/Xilinx/meta-xilinx/refs/tags/xlnx-rel-v2024.1/meta-microblaze/recipes-devtools/gcc/gcc-12/0009-Patch-microblaze-Fix-atomic-boolean-return-value.patch + export PATH=${TOOLCHAIN_BIN}:$PATH # @@ -31,6 +33,12 @@ mv gcc-11.2.0 src-gcc mv musl-1.2.2 src-musl mv linux-5.10.70 src-linux +# +# Patch gcc +# + +wget -O - ${GCC_PATCH0_URL} | patch -d src-gcc -p1 + mkdir -p bld-hdr bld-binu bld-gcc bld-musl mkdir -p ${CROSS_SYSROOT}/usr/include diff --git a/tests/docker/dockerfiles/debian-mips64el-cross.docker b/tests/docker/dockerfiles/debian-mips64el-cross.docker index 59c4c68dce6..4a941dd870e 100644 --- a/tests/docker/dockerfiles/debian-mips64el-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64el-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch mips64el debian-11 qemu +# $ lcitool dockerfile --layers all --cross-arch mips64el debian-12 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:11-slim +FROM docker.io/library/debian:12-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ - python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-venv \ - python3-wheel \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ + swtpm \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED -RUN /usr/bin/pip3 install tomli - ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -93,6 +90,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:mips64el \ libcap-ng-dev:mips64el \ libcapstone-dev:mips64el \ + libcbor-dev:mips64el \ libcmocka-dev:mips64el \ libcurl4-gnutls-dev:mips64el \ libdaxctl-dev:mips64el \ @@ -107,6 +105,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:mips64el \ libgnutls28-dev:mips64el \ libgtk-3-dev:mips64el \ + libgtk-vnc-2.0-dev:mips64el \ libibverbs-dev:mips64el \ libiscsi-dev:mips64el \ libjemalloc-dev:mips64el \ @@ -118,6 +117,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:mips64el \ libnuma-dev:mips64el \ libpam0g-dev:mips64el \ + libpcre2-dev:mips64el \ libpipewire-0.3-dev:mips64el \ libpixman-1-dev:mips64el \ libpng-dev:mips64el \ @@ -131,8 +131,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:mips64el \ libslirp-dev:mips64el \ libsnappy-dev:mips64el \ + libsndio-dev:mips64el \ + libspice-protocol-dev:mips64el \ libspice-server-dev:mips64el \ - libssh-gcrypt-dev:mips64el \ + libssh-dev:mips64el \ libsystemd-dev:mips64el \ libtasn1-6-dev:mips64el \ libudev-dev:mips64el \ @@ -142,6 +144,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libvdeplug-dev:mips64el \ libvirglrenderer-dev:mips64el \ libvte-2.91-dev:mips64el \ + libxdp-dev:mips64el \ libzstd-dev:mips64el \ nettle-dev:mips64el \ systemtap-sdt-dev:mips64el \ @@ -167,6 +170,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/mips64el-linux-gnuabi64 && \ ENV ABI "mips64el-linux-gnuabi64" ENV MESON_OPTS "--cross-file=mips64el-linux-gnuabi64" +ENV RUST_TARGET "mips64el-unknown-linux-gnuabi64" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips64el-linux-gnuabi64- ENV DEF_TARGET_LIST mips64el-softmmu,mips64el-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-mipsel-cross.docker b/tests/docker/dockerfiles/debian-mipsel-cross.docker index 880c774f1c4..4d3e5d711bd 100644 --- a/tests/docker/dockerfiles/debian-mipsel-cross.docker +++ b/tests/docker/dockerfiles/debian-mipsel-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch mipsel debian-11 qemu +# $ lcitool dockerfile --layers all --cross-arch mipsel debian-12 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:11-slim +FROM docker.io/library/debian:12-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -47,19 +45,20 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ - python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-venv \ - python3-wheel \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ + swtpm \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -68,8 +67,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED -RUN /usr/bin/pip3 install tomli - ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -93,6 +90,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:mipsel \ libcap-ng-dev:mipsel \ libcapstone-dev:mipsel \ + libcbor-dev:mipsel \ libcmocka-dev:mipsel \ libcurl4-gnutls-dev:mipsel \ libdaxctl-dev:mipsel \ @@ -107,6 +105,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:mipsel \ libgnutls28-dev:mipsel \ libgtk-3-dev:mipsel \ + libgtk-vnc-2.0-dev:mipsel \ libibverbs-dev:mipsel \ libiscsi-dev:mipsel \ libjemalloc-dev:mipsel \ @@ -118,6 +117,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:mipsel \ libnuma-dev:mipsel \ libpam0g-dev:mipsel \ + libpcre2-dev:mipsel \ libpipewire-0.3-dev:mipsel \ libpixman-1-dev:mipsel \ libpng-dev:mipsel \ @@ -131,8 +131,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:mipsel \ libslirp-dev:mipsel \ libsnappy-dev:mipsel \ + libsndio-dev:mipsel \ + libspice-protocol-dev:mipsel \ libspice-server-dev:mipsel \ - libssh-gcrypt-dev:mipsel \ + libssh-dev:mipsel \ libsystemd-dev:mipsel \ libtasn1-6-dev:mipsel \ libudev-dev:mipsel \ @@ -142,6 +144,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libvdeplug-dev:mipsel \ libvirglrenderer-dev:mipsel \ libvte-2.91-dev:mipsel \ + libxdp-dev:mipsel \ libzstd-dev:mipsel \ nettle-dev:mipsel \ systemtap-sdt-dev:mipsel \ @@ -167,6 +170,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/mipsel-linux-gnu && \ ENV ABI "mipsel-linux-gnu" ENV MESON_OPTS "--cross-file=mipsel-linux-gnu" +ENV RUST_TARGET "mipsel-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=mipsel-linux-gnu- ENV DEF_TARGET_LIST mipsel-softmmu,mipsel-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-ppc64el-cross.docker b/tests/docker/dockerfiles/debian-ppc64el-cross.docker index 1d55b9514c8..22b4457ba99 100644 --- a/tests/docker/dockerfiles/debian-ppc64el-cross.docker +++ b/tests/docker/dockerfiles/debian-ppc64el-cross.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:ppc64el \ libcap-ng-dev:ppc64el \ libcapstone-dev:ppc64el \ + libcbor-dev:ppc64el \ libcmocka-dev:ppc64el \ libcurl4-gnutls-dev:ppc64el \ libdaxctl-dev:ppc64el \ @@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:ppc64el \ libgnutls28-dev:ppc64el \ libgtk-3-dev:ppc64el \ + libgtk-vnc-2.0-dev:ppc64el \ libibverbs-dev:ppc64el \ libiscsi-dev:ppc64el \ libjemalloc-dev:ppc64el \ @@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:ppc64el \ libnuma-dev:ppc64el \ libpam0g-dev:ppc64el \ + libpcre2-dev:ppc64el \ libpipewire-0.3-dev:ppc64el \ libpixman-1-dev:ppc64el \ libpng-dev:ppc64el \ @@ -129,8 +132,10 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:ppc64el \ libslirp-dev:ppc64el \ libsnappy-dev:ppc64el \ + libsndio-dev:ppc64el \ + libspice-protocol-dev:ppc64el \ libspice-server-dev:ppc64el \ - libssh-gcrypt-dev:ppc64el \ + libssh-dev:ppc64el \ libsystemd-dev:ppc64el \ libtasn1-6-dev:ppc64el \ libubsan1:ppc64el \ @@ -167,6 +172,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/powerpc64le-linux-gnu && \ ENV ABI "powerpc64le-linux-gnu" ENV MESON_OPTS "--cross-file=powerpc64le-linux-gnu" +ENV RUST_TARGET "powerpc64le-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=powerpc64le-linux-gnu- ENV DEF_TARGET_LIST ppc64-softmmu,ppc64-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker index 4d8ca83cb37..b0386cd3a1f 100644 --- a/tests/docker/dockerfiles/debian-riscv64-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-sid qemu-minimal +# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-13 qemu-minimal # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:sid-slim +FROM docker.io/library/debian:trixie-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ diff --git a/tests/docker/dockerfiles/debian-s390x-cross.docker b/tests/docker/dockerfiles/debian-s390x-cross.docker index 62ccda6ab18..13ec52c8ad0 100644 --- a/tests/docker/dockerfiles/debian-s390x-cross.docker +++ b/tests/docker/dockerfiles/debian-s390x-cross.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -30,9 +31,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libglib2.0-dev \ - libpcre2-dev \ - libsndio-dev \ - libspice-protocol-dev \ llvm \ locales \ make \ @@ -52,6 +50,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -59,6 +58,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zstd && \ eatmydata apt-get autoremove -y && \ @@ -91,6 +91,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev:s390x \ libcap-ng-dev:s390x \ libcapstone-dev:s390x \ + libcbor-dev:s390x \ libcmocka-dev:s390x \ libcurl4-gnutls-dev:s390x \ libdaxctl-dev:s390x \ @@ -105,6 +106,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev:s390x \ libgnutls28-dev:s390x \ libgtk-3-dev:s390x \ + libgtk-vnc-2.0-dev:s390x \ libibverbs-dev:s390x \ libiscsi-dev:s390x \ libjemalloc-dev:s390x \ @@ -116,6 +118,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libnfs-dev:s390x \ libnuma-dev:s390x \ libpam0g-dev:s390x \ + libpcre2-dev:s390x \ libpipewire-0.3-dev:s390x \ libpixman-1-dev:s390x \ libpng-dev:s390x \ @@ -129,7 +132,9 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libselinux1-dev:s390x \ libslirp-dev:s390x \ libsnappy-dev:s390x \ - libssh-gcrypt-dev:s390x \ + libsndio-dev:s390x \ + libspice-protocol-dev:s390x \ + libssh-dev:s390x \ libsystemd-dev:s390x \ libtasn1-6-dev:s390x \ libubsan1:s390x \ @@ -166,6 +171,7 @@ endian = 'big'\n" > /usr/local/share/meson/cross/s390x-linux-gnu && \ ENV ABI "s390x-linux-gnu" ENV MESON_OPTS "--cross-file=s390x-linux-gnu" +ENV RUST_TARGET "s390x-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=s390x-linux-gnu- ENV DEF_TARGET_LIST s390x-softmmu,s390x-linux-user # As a final step configure the user (if env is defined) diff --git a/tests/docker/dockerfiles/debian-toolchain.docker b/tests/docker/dockerfiles/debian-toolchain.docker index 687a97fec4a..ab4ce29533d 100644 --- a/tests/docker/dockerfiles/debian-toolchain.docker +++ b/tests/docker/dockerfiles/debian-toolchain.docker @@ -10,6 +10,8 @@ FROM docker.io/library/debian:11-slim # ??? The build-dep isn't working, missing a number of # minimal build dependiencies, e.g. libmpc. +RUN sed 's/^deb /deb-src /' /etc/apt/sources.list.d/deb-src.list + RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ DEBIAN_FRONTEND=noninteractive eatmydata \ @@ -33,6 +35,11 @@ RUN cd /root && ./build-toolchain.sh # and the build trees by restoring the original image, # then copying the built toolchain from stage 0. FROM docker.io/library/debian:11-slim +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + libmpc3 COPY --from=0 /usr/local /usr/local # As a final step configure the user (if env is defined) ARG USER diff --git a/tests/docker/dockerfiles/debian-tricore-cross.docker b/tests/docker/dockerfiles/debian-tricore-cross.docker index 479b4d6ebab..7e00e870ceb 100644 --- a/tests/docker/dockerfiles/debian-tricore-cross.docker +++ b/tests/docker/dockerfiles/debian-tricore-cross.docker @@ -11,8 +11,6 @@ # FROM docker.io/library/debian:11-slim -MAINTAINER Philippe Mathieu-Daudé - RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \ diff --git a/tests/docker/dockerfiles/debian.docker b/tests/docker/dockerfiles/debian.docker index 0d1d401eb81..0a57c1a1d37 100644 --- a/tests/docker/dockerfiles/debian.docker +++ b/tests/docker/dockerfiles/debian.docker @@ -13,6 +13,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ bsdextrautils \ bzip2 \ @@ -41,6 +42,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev \ libcap-ng-dev \ libcapstone-dev \ + libcbor-dev \ libcmocka-dev \ libcurl4-gnutls-dev \ libdaxctl-dev \ @@ -55,6 +57,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev \ libgnutls28-dev \ libgtk-3-dev \ + libgtk-vnc-2.0-dev \ libibverbs-dev \ libiscsi-dev \ libjemalloc-dev \ @@ -84,7 +87,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libsndio-dev \ libspice-protocol-dev \ libspice-server-dev \ - libssh-gcrypt-dev \ + libssh-dev \ libsystemd-dev \ libtasn1-6-dev \ libubsan1 \ @@ -119,6 +122,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-web \ sed \ socat \ sparse \ @@ -127,6 +131,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zlib1g-dev \ zstd && \ diff --git a/tests/docker/dockerfiles/emsdk-wasm32-cross.docker b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker new file mode 100644 index 00000000000..60a7d02f561 --- /dev/null +++ b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker @@ -0,0 +1,145 @@ +# syntax = docker/dockerfile:1.5 + +ARG EMSDK_VERSION_QEMU=3.1.50 +ARG ZLIB_VERSION=1.3.1 +ARG GLIB_MINOR_VERSION=2.84 +ARG GLIB_VERSION=${GLIB_MINOR_VERSION}.0 +ARG PIXMAN_VERSION=0.44.2 +ARG FFI_VERSION=v3.4.7 +ARG MESON_VERSION=1.5.0 + +FROM emscripten/emsdk:$EMSDK_VERSION_QEMU AS build-base +ARG MESON_VERSION +ENV TARGET=/builddeps/target +ENV CPATH="$TARGET/include" +ENV PKG_CONFIG_PATH="$TARGET/lib/pkgconfig" +ENV EM_PKG_CONFIG_PATH="$PKG_CONFIG_PATH" +ENV CFLAGS="-O3 -pthread -DWASM_BIGINT" +ENV CXXFLAGS="$CFLAGS" +ENV LDFLAGS="-sWASM_BIGINT -sASYNCIFY=1 -L$TARGET/lib" +RUN apt-get update && apt-get install -y \ + autoconf \ + build-essential \ + libglib2.0-dev \ + libtool \ + pkgconf \ + ninja-build \ + python3-pip +RUN pip3 install meson==${MESON_VERSION} tomli +RUN mkdir /build +WORKDIR /build +RUN mkdir -p $TARGET +RUN < /cross.meson +[host_machine] +system = 'emscripten' +cpu_family = 'wasm32' +cpu = 'wasm32' +endian = 'little' + +[binaries] +c = 'emcc' +cpp = 'em++' +ar = 'emar' +ranlib = 'emranlib' +pkgconfig = ['pkg-config', '--static'] +EOT +EOF + +FROM build-base AS zlib-dev +ARG ZLIB_VERSION +RUN mkdir -p /zlib +RUN curl -Ls https://zlib.net/zlib-$ZLIB_VERSION.tar.xz | \ + tar xJC /zlib --strip-components=1 +WORKDIR /zlib +RUN emconfigure ./configure --prefix=$TARGET --static +RUN emmake make install -j$(nproc) + +FROM build-base AS libffi-dev +ARG FFI_VERSION +RUN mkdir -p /libffi +RUN git clone https://github.com/libffi/libffi /libffi +WORKDIR /libffi +RUN git checkout $FFI_VERSION +RUN autoreconf -fiv +RUN emconfigure ./configure --host=wasm32-unknown-linux \ + --prefix=$TARGET --enable-static \ + --disable-shared --disable-dependency-tracking \ + --disable-builddir --disable-multi-os-directory \ + --disable-raw-api --disable-docs +RUN emmake make install SUBDIRS='include' -j$(nproc) + +FROM build-base AS pixman-dev +ARG PIXMAN_VERSION +RUN mkdir /pixman/ +RUN git clone https://gitlab.freedesktop.org/pixman/pixman /pixman/ +WORKDIR /pixman +RUN git checkout pixman-$PIXMAN_VERSION +RUN <> /cross.meson +[built-in options] +c_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +cpp_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +objc_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +c_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')] +cpp_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')] +EOT +EOF +RUN meson setup _build --prefix=$TARGET --cross-file=/cross.meson \ + --default-library=static \ + --buildtype=release -Dtests=disabled -Ddemos=disabled +RUN meson install -C _build + +FROM build-base AS glib-dev +ARG GLIB_VERSION +ARG GLIB_MINOR_VERSION +RUN mkdir -p /stub +WORKDIR /stub +RUN < res_query.c +#include +int res_query(const char *name, int class, + int type, unsigned char *dest, int len) +{ + h_errno = HOST_NOT_FOUND; + return -1; +} +EOT +EOF +RUN emcc ${CFLAGS} -c res_query.c -fPIC -o libresolv.o +RUN ar rcs libresolv.a libresolv.o +RUN mkdir -p $TARGET/lib/ +RUN cp libresolv.a $TARGET/lib/ + +RUN mkdir -p /glib +RUN curl -Lks https://download.gnome.org/sources/glib/${GLIB_MINOR_VERSION}/glib-$GLIB_VERSION.tar.xz | \ + tar xJC /glib --strip-components=1 + +COPY --link --from=zlib-dev /builddeps/ /builddeps/ +COPY --link --from=libffi-dev /builddeps/ /builddeps/ + +WORKDIR /glib +RUN <> /cross.meson +[built-in options] +c_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +cpp_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +objc_args = [$(printf "'%s', " $CFLAGS | sed 's/, $//')] +c_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')] +cpp_link_args = [$(printf "'%s', " $LDFLAGS | sed 's/, $//')] +EOT +EOF +RUN meson setup _build --prefix=$TARGET --cross-file=/cross.meson \ + --default-library=static --buildtype=release --force-fallback-for=pcre2 \ + -Dselinux=disabled -Dxattr=false -Dlibmount=disabled -Dnls=disabled \ + -Dtests=false -Dglib_debug=disabled -Dglib_assert=false -Dglib_checks=false +# FIXME: emscripten doesn't provide some pthread functions in the final link, +# which isn't detected during meson setup. +RUN sed -i -E "/#define HAVE_POSIX_SPAWN 1/d" ./_build/config.h +RUN sed -i -E "/#define HAVE_PTHREAD_GETNAME_NP 1/d" ./_build/config.h +RUN meson install -C _build + +FROM build-base +COPY --link --from=glib-dev /builddeps/ /builddeps/ +COPY --link --from=pixman-dev /builddeps/ /builddeps/ diff --git a/tests/docker/dockerfiles/fedora-cris-cross.docker b/tests/docker/dockerfiles/fedora-cris-cross.docker deleted file mode 100644 index 97c9d37ede2..00000000000 --- a/tests/docker/dockerfiles/fedora-cris-cross.docker +++ /dev/null @@ -1,14 +0,0 @@ -# -# Cross compiler for cris system tests -# - -FROM registry.fedoraproject.org/fedora:33 -ENV PACKAGES gcc-cris-linux-gnu -ENV MAKE /usr/bin/make -RUN dnf install -y $PACKAGES -RUN rpm -q $PACKAGES | sort > /packages.txt -# As a final step configure the user (if env is defined) -ARG USER -ARG UID -RUN if [ "${USER}" ]; then \ - id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-rust-nightly.docker b/tests/docker/dockerfiles/fedora-rust-nightly.docker new file mode 100644 index 00000000000..4a033309b38 --- /dev/null +++ b/tests/docker/dockerfiles/fedora-rust-nightly.docker @@ -0,0 +1,183 @@ +# THIS FILE WAS AUTO-GENERATED +# +# $ lcitool dockerfile --layers all fedora-40 qemu +# +# https://gitlab.com/libvirt/libvirt-ci + +FROM registry.fedoraproject.org/fedora:40 + +RUN dnf install -y nosync && \ + printf '#!/bin/sh\n\ +if test -d /usr/lib64\n\ +then\n\ + export LD_PRELOAD=/usr/lib64/nosync/nosync.so\n\ +else\n\ + export LD_PRELOAD=/usr/lib/nosync/nosync.so\n\ +fi\n\ +exec "$@"\n' > /usr/bin/nosync && \ + chmod +x /usr/bin/nosync && \ + nosync dnf update -y && \ + nosync dnf install -y \ + SDL2-devel \ + SDL2_image-devel \ + alsa-lib-devel \ + bash \ + bc \ + bindgen-cli \ + bison \ + brlapi-devel \ + bzip2 \ + bzip2-devel \ + ca-certificates \ + capstone-devel \ + ccache \ + clang \ + ctags \ + cyrus-sasl-devel \ + daxctl-devel \ + dbus-daemon \ + device-mapper-multipath-devel \ + diffutils \ + findutils \ + flex \ + fuse3-devel \ + gcc \ + gcovr \ + gettext \ + git \ + glib2-devel \ + glib2-static \ + glibc-langpack-en \ + glibc-static \ + glusterfs-api-devel \ + gnutls-devel \ + gtk-vnc2-devel \ + gtk3-devel \ + hostname \ + jemalloc-devel \ + json-c-devel \ + libaio-devel \ + libasan \ + libattr-devel \ + libbpf-devel \ + libcacard-devel \ + libcap-ng-devel \ + libcbor-devel \ + libcmocka-devel \ + libcurl-devel \ + libdrm-devel \ + libepoxy-devel \ + libfdt-devel \ + libffi-devel \ + libgcrypt-devel \ + libiscsi-devel \ + libjpeg-devel \ + libnfs-devel \ + libpmem-devel \ + libpng-devel \ + librbd-devel \ + libseccomp-devel \ + libselinux-devel \ + libslirp-devel \ + libssh-devel \ + libtasn1-devel \ + libubsan \ + liburing-devel \ + libusbx-devel \ + libxdp-devel \ + libzstd-devel \ + llvm \ + lttng-ust-devel \ + lzo-devel \ + make \ + mesa-libgbm-devel \ + meson \ + mtools \ + ncurses-devel \ + nettle-devel \ + ninja-build \ + nmap-ncat \ + numactl-devel \ + openssh-clients \ + pam-devel \ + pcre-static \ + pipewire-devel \ + pixman-devel \ + pkgconfig \ + pulseaudio-libs-devel \ + python3 \ + python3-PyYAML \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-sphinx \ + python3-sphinx_rtd_theme \ + python3-zombie-imp \ + rdma-core-devel \ + rust \ + sed \ + snappy-devel \ + socat \ + sparse \ + spice-protocol \ + spice-server-devel \ + swtpm \ + systemd-devel \ + systemtap-sdt-devel \ + tar \ + tesseract \ + tesseract-langpack-eng \ + usbredir-devel \ + util-linux \ + virglrenderer-devel \ + vte291-devel \ + vulkan-tools \ + which \ + xen-devel \ + xorriso \ + zlib-devel \ + zlib-static \ + zstd && \ + nosync dnf autoremove -y && \ + nosync dnf clean all -y && \ + rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED && \ + rpm -qa | sort > /packages.txt && \ + mkdir -p /usr/libexec/ccache-wrappers && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ + ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc + +ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" +ENV LANG "en_US.UTF-8" +ENV MAKE "/usr/bin/make" +ENV NINJA "/usr/bin/ninja" +ENV PYTHON "/usr/bin/python3" +RUN dnf install -y wget +ENV RUSTUP_HOME=/usr/local/rustup CARGO_HOME=/usr/local/cargo +ENV RUSTC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustc +ENV RUSTDOC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustdoc +ENV CARGO=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/cargo +RUN set -eux && \ + rustArch='x86_64-unknown-linux-gnu' && \ + rustupSha256='6aeece6993e902708983b209d04c0d1dbb14ebb405ddb87def578d41f920f56d' && \ + url="https://static.rust-lang.org/rustup/archive/1.27.1/${rustArch}/rustup-init" && \ + wget "$url" && \ + echo "${rustupSha256} *rustup-init" | sha256sum -c - && \ + chmod +x rustup-init && \ + ./rustup-init -y --no-modify-path --profile default --default-toolchain nightly --default-host ${rustArch} && \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME && \ + /usr/local/cargo/bin/rustup --version && \ + /usr/local/cargo/bin/rustup run nightly cargo --version && \ + /usr/local/cargo/bin/rustup run nightly rustc --version && \ + test "$CARGO" = "$(/usr/local/cargo/bin/rustup +nightly which cargo)" && \ + test "$RUSTDOC" = "$(/usr/local/cargo/bin/rustup +nightly which rustdoc)" && \ + test "$RUSTC" = "$(/usr/local/cargo/bin/rustup +nightly which rustc)" +ENV PATH=$CARGO_HOME/bin:$PATH +RUN /usr/local/cargo/bin/rustup run nightly cargo install bindgen-cli +RUN $CARGO --list +# As a final step configure the user (if env is defined) +ARG USER +ARG UID +RUN if [ "${USER}" ]; then \ + id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker index 007e1574bd9..a9503444027 100644 --- a/tests/docker/dockerfiles/fedora-win64-cross.docker +++ b/tests/docker/dockerfiles/fedora-win64-cross.docker @@ -20,6 +20,7 @@ exec "$@"\n' > /usr/bin/nosync && \ nosync dnf install -y \ bash \ bc \ + bindgen-cli \ bison \ bzip2 \ ca-certificates \ @@ -42,7 +43,6 @@ exec "$@"\n' > /usr/bin/nosync && \ ninja-build \ nmap-ncat \ openssh-clients \ - pcre-static \ python3 \ python3-PyYAML \ python3-numpy \ @@ -52,15 +52,16 @@ exec "$@"\n' > /usr/bin/nosync && \ python3-sphinx \ python3-sphinx_rtd_theme \ python3-zombie-imp \ + rust \ sed \ socat \ sparse \ - spice-protocol \ swtpm \ tar \ tesseract \ tesseract-langpack-eng \ util-linux \ + vulkan-tools \ which \ xorriso \ zstd && \ @@ -86,6 +87,7 @@ RUN nosync dnf install -y \ mingw64-gettext \ mingw64-glib2 \ mingw64-gnutls \ + mingw64-gtk-vnc2 \ mingw64-gtk3 \ mingw64-libepoxy \ mingw64-libgcrypt \ diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker index 44f239c088b..014e3ccf17d 100644 --- a/tests/docker/dockerfiles/fedora.docker +++ b/tests/docker/dockerfiles/fedora.docker @@ -23,6 +23,7 @@ exec "$@"\n' > /usr/bin/nosync && \ alsa-lib-devel \ bash \ bc \ + bindgen-cli \ bison \ brlapi-devel \ bzip2 \ @@ -50,6 +51,7 @@ exec "$@"\n' > /usr/bin/nosync && \ glibc-static \ glusterfs-api-devel \ gnutls-devel \ + gtk-vnc2-devel \ gtk3-devel \ hostname \ jemalloc-devel \ @@ -60,6 +62,7 @@ exec "$@"\n' > /usr/bin/nosync && \ libbpf-devel \ libcacard-devel \ libcap-ng-devel \ + libcbor-devel \ libcmocka-devel \ libcurl-devel \ libdrm-devel \ @@ -112,6 +115,7 @@ exec "$@"\n' > /usr/bin/nosync && \ python3-sphinx_rtd_theme \ python3-zombie-imp \ rdma-core-devel \ + rust \ sed \ snappy-devel \ socat \ @@ -128,6 +132,7 @@ exec "$@"\n' > /usr/bin/nosync && \ util-linux \ virglrenderer-devel \ vte291-devel \ + vulkan-tools \ which \ xen-devel \ xorriso \ diff --git a/tests/docker/dockerfiles/opensuse-leap.docker b/tests/docker/dockerfiles/opensuse-leap.docker index 836f531ac1c..e90225dc235 100644 --- a/tests/docker/dockerfiles/opensuse-leap.docker +++ b/tests/docker/dockerfiles/opensuse-leap.docker @@ -4,9 +4,10 @@ # # https://gitlab.com/libvirt/libvirt-ci -FROM registry.opensuse.org/opensuse/leap:15.5 +FROM registry.opensuse.org/opensuse/leap:15.6 RUN zypper update -y && \ + zypper addrepo -fc https://download.opensuse.org/update/leap/15.6/backports/openSUSE:Backports:SLE-15-SP6:Update.repo && \ zypper install -y \ Mesa-devel \ alsa-lib-devel \ @@ -33,6 +34,7 @@ RUN zypper update -y && \ glibc-locale \ glibc-static \ glusterfs-devel \ + gtk-vnc-devel \ gtk3-devel \ hostname \ jemalloc-devel \ @@ -45,6 +47,7 @@ RUN zypper update -y && \ libbz2-devel \ libcacard-devel \ libcap-ng-devel \ + libcbor-devel \ libcmocka-devel \ libcurl-devel \ libdrm-devel \ @@ -94,6 +97,8 @@ RUN zypper update -y && \ python311-pip \ python311-setuptools \ rdma-core-devel \ + rust \ + rust-bindgen \ sed \ snappy-devel \ sndio-devel \ @@ -110,6 +115,7 @@ RUN zypper update -y && \ util-linux \ virglrenderer-devel \ vte-devel \ + vulkan-tools \ which \ xen-devel \ xorriso \ @@ -126,7 +132,7 @@ RUN zypper update -y && \ RUN /usr/bin/pip3.11 install \ PyYAML \ - meson==0.63.2 \ + meson==1.5.0 \ pillow \ sphinx \ sphinx-rtd-theme diff --git a/tests/docker/dockerfiles/python.docker b/tests/docker/dockerfiles/python.docker index 8f0af9ef25f..59e70a02484 100644 --- a/tests/docker/dockerfiles/python.docker +++ b/tests/docker/dockerfiles/python.docker @@ -15,7 +15,6 @@ ENV PACKAGES \ python3.11 \ python3.12 \ python3.13 \ - python3.8 \ python3.9 RUN dnf install -y $PACKAGES diff --git a/tests/docker/dockerfiles/ubuntu2204.docker b/tests/docker/dockerfiles/ubuntu2204.docker index beeb44fc283..28a6f932430 100644 --- a/tests/docker/dockerfiles/ubuntu2204.docker +++ b/tests/docker/dockerfiles/ubuntu2204.docker @@ -41,6 +41,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcacard-dev \ libcap-ng-dev \ libcapstone-dev \ + libcbor-dev \ libcmocka-dev \ libcurl4-gnutls-dev \ libdaxctl-dev \ @@ -55,6 +56,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libglusterfs-dev \ libgnutls28-dev \ libgtk-3-dev \ + libgtk-vnc-2.0-dev \ libibverbs-dev \ libiscsi-dev \ libjemalloc-dev \ @@ -119,6 +121,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-venv \ python3-yaml \ rpm2cpio \ + rustc-1.77 \ sed \ socat \ sparse \ @@ -127,6 +130,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ tar \ tesseract-ocr \ tesseract-ocr-eng \ + vulkan-tools \ xorriso \ zlib1g-dev \ zstd && \ @@ -146,6 +150,13 @@ ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" ENV PYTHON "/usr/bin/python3" +ENV RUSTC=/usr/bin/rustc-1.77 +ENV RUSTDOC=/usr/bin/rustdoc-1.77 +ENV CARGO_HOME=/usr/local/cargo +ENV PATH=$CARGO_HOME/bin:$PATH +RUN DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends cargo +RUN cargo install bindgen-cli # As a final step configure the user (if env is defined) ARG USER ARG UID diff --git a/tests/docker/test-debug b/tests/docker/test-debug index f52f16328ca..678ceccc274 100755 --- a/tests/docker/test-debug +++ b/tests/docker/test-debug @@ -1,6 +1,6 @@ #!/bin/bash -e # -# Compile and check with clang & --enable-debug --enable-sanitizers. +# Compile and check with clang & debug & sanitizers # # Copyright (c) 2016-2018 Red Hat Inc. # @@ -19,7 +19,7 @@ requires_binary clang cd "$BUILD_DIR" OPTS="--cxx=clang++ --cc=clang --host-cc=clang" -OPTS="--enable-debug --enable-sanitizers $OPTS" +OPTS="--enable-debug --enable-asan --enable-ubsan $OPTS" export ASAN_OPTIONS=detect_leaks=0 build_qemu $OPTS diff --git a/tests/docker/test-rust b/tests/docker/test-rust new file mode 100755 index 00000000000..e7e3e94a553 --- /dev/null +++ b/tests/docker/test-rust @@ -0,0 +1,21 @@ +#!/bin/bash -e +# +# Run the rust code checks (a.k.a. check-rust-tools-nightly) +# +# Copyright (c) 2025 Linaro Ltd +# +# Authors: +# Alex Bennée +# +# This work is licensed under the terms of the GNU GPL, version 2 +# or (at your option) any later version. See the COPYING file in +# the top-level directory. + +. common.rc + +cd "$BUILD_DIR" + +configure_qemu --disable-user --disable-docs --enable-rust +pyvenv/bin/meson devenv -w $QEMU_SRC/rust ${CARGO-cargo} fmt --check +make clippy +make rustdoc diff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c index 8ce0ca1545d..d90f542ea25 100644 --- a/tests/fp/fp-bench.c +++ b/tests/fp/fp-bench.c @@ -488,6 +488,16 @@ static void run_bench(void) { bench_func_t f; + /* + * These implementation-defined choices for various things IEEE + * doesn't specify match those used by the Arm architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status); + set_float_3nan_prop_rule(float_3nan_prop_s_cab, &soft_status); + set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &soft_status); + set_float_default_nan_pattern(0b01000000, &soft_status); + set_float_ftz_detection(float_ftz_before_rounding, &soft_status); + f = bench_funcs[operation][precision]; g_assert(f); f(); diff --git a/tests/fp/fp-test-log2.c b/tests/fp/fp-test-log2.c index 4eae93eb7cc..79f619cdea9 100644 --- a/tests/fp/fp-test-log2.c +++ b/tests/fp/fp-test-log2.c @@ -70,6 +70,8 @@ int main(int ac, char **av) float_status qsf = {0}; int i; + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf); + set_float_default_nan_pattern(0b01000000, &qsf); set_float_rounding_mode(float_round_nearest_even, &qsf); test.d = 0.0; diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c index 36b5712cda0..c619e5dbf72 100644 --- a/tests/fp/fp-test.c +++ b/tests/fp/fp-test.c @@ -935,6 +935,15 @@ void run_test(void) { unsigned int i; + /* + * These implementation-defined choices for various things IEEE + * doesn't specify match those used by the Arm architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf); + set_float_3nan_prop_rule(float_3nan_prop_s_cab, &qsf); + set_float_default_nan_pattern(0b01000000, &qsf); + set_float_infzeronan_rule(float_infzeronan_dnan_if_qnan, &qsf); + genCases_setLevel(test_level); verCases_maxErrorCount = n_max_errors; diff --git a/tests/fp/meson.build b/tests/fp/meson.build index 114b4b483ea..9059a247521 100644 --- a/tests/fp/meson.build +++ b/tests/fp/meson.build @@ -7,6 +7,16 @@ if host_os == 'windows' subdir_done() endif +# By default tests run with the usual 30s timeout; particularly +# slow tests can have that overridden here. The keys here are +# the testnames without their fp-test- prefix. +slow_fp_tests = { + 'rem': 60, + 'div': 60, + 'mul': 60, + 'mulAdd': 180, +} + sfcflags = [ # softfloat defines '-DSOFTFLOAT_ROUND_ODD', @@ -109,6 +119,7 @@ fptest_rounding_args = ['-r', 'all'] foreach k, v : softfloat_conv_tests test('fp-test-' + k, fptest, args: fptest_args + fptest_rounding_args + v.split(), + timeout: slow_fp_tests.get(k, 30), suite: ['softfloat', 'softfloat-conv']) endforeach @@ -116,6 +127,7 @@ foreach k, v : softfloat_tests test('fp-test-' + k, fptest, args: fptest_args + fptest_rounding_args + ['f16_' + k, 'f32_' + k, 'f64_' + k, 'f128_' + k, 'extF80_' + k], + timeout: slow_fp_tests.get(k, 30), suite: ['softfloat', 'softfloat-' + v]) endforeach @@ -124,7 +136,8 @@ test('fp-test-mulAdd', fptest, # no fptest_rounding_args args: fptest_args + ['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'], - suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 180) + timeout: slow_fp_tests.get('mulAdd', 30), + suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow']) executable( 'fp-bench', @@ -140,4 +153,5 @@ fptestlog2 = executable( c_args: fpcflags, ) test('fp-test-log2', fptestlog2, + timeout: slow_fp_tests.get('log2', 30), suite: ['softfloat', 'softfloat-ops']) diff --git a/tests/avocado/acpi-bits/bits-config/bits-cfg.txt b/tests/functional/acpi-bits/bits-config/bits-cfg.txt similarity index 100% rename from tests/avocado/acpi-bits/bits-config/bits-cfg.txt rename to tests/functional/acpi-bits/bits-config/bits-cfg.txt diff --git a/tests/avocado/acpi-bits/bits-tests/smbios.py2 b/tests/functional/acpi-bits/bits-tests/smbios.py2 similarity index 100% rename from tests/avocado/acpi-bits/bits-tests/smbios.py2 rename to tests/functional/acpi-bits/bits-tests/smbios.py2 diff --git a/tests/avocado/acpi-bits/bits-tests/smilatency.py2 b/tests/functional/acpi-bits/bits-tests/smilatency.py2 similarity index 100% rename from tests/avocado/acpi-bits/bits-tests/smilatency.py2 rename to tests/functional/acpi-bits/bits-tests/smilatency.py2 diff --git a/tests/avocado/acpi-bits/bits-tests/testacpi.py2 b/tests/functional/acpi-bits/bits-tests/testacpi.py2 similarity index 100% rename from tests/avocado/acpi-bits/bits-tests/testacpi.py2 rename to tests/functional/acpi-bits/bits-tests/testacpi.py2 diff --git a/tests/avocado/acpi-bits/bits-tests/testcpuid.py2 b/tests/functional/acpi-bits/bits-tests/testcpuid.py2 similarity index 100% rename from tests/avocado/acpi-bits/bits-tests/testcpuid.py2 rename to tests/functional/acpi-bits/bits-tests/testcpuid.py2 diff --git a/tests/functional/aspeed.py b/tests/functional/aspeed.py new file mode 100644 index 00000000000..b131703c528 --- /dev/null +++ b/tests/functional/aspeed.py @@ -0,0 +1,63 @@ +# Test class to boot aspeed machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import LinuxKernelTest + +class AspeedTest(LinuxKernelTest): + + def do_test_arm_aspeed_openbmc(self, machine, image, uboot='2019.04', + cpu_id='0x0', soc='AST2500 rev A1', + image_hostname=None): + # Allow for the image hostname to not end in "-bmc" + if image_hostname is not None: + hostname = image_hostname + else: + hostname = machine.removesuffix('-bmc') + + self.set_machine(machine) + self.vm.set_console() + self.vm.add_args('-drive', f'file={image},if=mtd,format=raw', + '-snapshot') + self.vm.launch() + + self.wait_for_console_pattern(f'U-Boot {uboot}') + self.wait_for_console_pattern('## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') + self.wait_for_console_pattern(f'Booting Linux on physical CPU {cpu_id}') + self.wait_for_console_pattern(f'ASPEED {soc}') + self.wait_for_console_pattern('/init as init process') + self.wait_for_console_pattern(f'systemd[1]: Hostname set to <{hostname}>.') + + def do_test_arm_aspeed_buildroot_start(self, image, cpu_id, pattern='Aspeed EVB'): + self.require_netdev('user') + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw,read-only=true', + '-net', 'nic', '-net', 'user') + self.vm.launch() + + self.wait_for_console_pattern('U-Boot 2019.04') + self.wait_for_console_pattern('## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') + self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id) + self.wait_for_console_pattern('lease of 10.0.2.15') + # the line before login: + self.wait_for_console_pattern(pattern) + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, 'passw0rd', '#') + + def do_test_arm_aspeed_buildroot_poweroff(self): + exec_command_and_wait_for_pattern(self, 'poweroff', + 'System halted') + + def do_test_arm_aspeed_sdk_start(self, image): + self.require_netdev('user') + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic', '-net', 'user', '-snapshot') + self.vm.launch() + + self.wait_for_console_pattern('U-Boot 2019.04') + self.wait_for_console_pattern('## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') diff --git a/tests/functional/meson.build b/tests/functional/meson.build new file mode 100644 index 00000000000..311c6f18065 --- /dev/null +++ b/tests/functional/meson.build @@ -0,0 +1,431 @@ +# QEMU functional tests: +# Tests that are put in the 'quick' category are run by default during +# 'make check'. Everything that should not be run during 'make check' +# (e.g. tests that fetch assets from the internet) should be put into +# the 'thorough' category instead. + +# Most tests run too slow with TCI enabled, so skip the functional tests there +if get_option('tcg_interpreter') + subdir_done() +endif + +# Timeouts for individual tests that can be slow e.g. with debugging enabled +test_timeouts = { + 'aarch64_aspeed_ast2700' : 600, + 'aarch64_aspeed_ast2700fc' : 600, + 'aarch64_device_passthrough' : 720, + 'aarch64_imx8mp_evk' : 240, + 'aarch64_raspi4' : 480, + 'aarch64_reverse_debug' : 180, + 'aarch64_rme_virt' : 1200, + 'aarch64_rme_sbsaref' : 1200, + 'aarch64_sbsaref_alpine' : 1200, + 'aarch64_sbsaref_freebsd' : 720, + 'aarch64_smmu' : 720, + 'aarch64_tuxrun' : 240, + 'aarch64_virt' : 360, + 'aarch64_virt_gpu' : 480, + 'acpi_bits' : 420, + 'arm_aspeed_palmetto' : 120, + 'arm_aspeed_romulus' : 120, + 'arm_aspeed_witherspoon' : 120, + 'arm_aspeed_ast2500' : 720, + 'arm_aspeed_ast2600' : 1200, + 'arm_aspeed_bletchley' : 480, + 'arm_aspeed_catalina' : 480, + 'arm_aspeed_gb200nvl_bmc' : 480, + 'arm_aspeed_rainier' : 480, + 'arm_bpim2u' : 500, + 'arm_collie' : 180, + 'arm_cubieboard' : 360, + 'arm_orangepi' : 540, + 'arm_quanta_gsj' : 240, + 'arm_raspi2' : 120, + 'arm_replay' : 240, + 'arm_tuxrun' : 240, + 'arm_sx1' : 360, + 'intel_iommu': 300, + 'mips_malta' : 480, + 'mipsel_malta' : 420, + 'mipsel_replay' : 480, + 'mips64_malta' : 240, + 'mips64el_malta' : 420, + 'mips64el_replay' : 180, + 'netdev_ethtool' : 180, + 'ppc_40p' : 240, + 'ppc64_hv' : 1000, + 'ppc64_powernv' : 480, + 'ppc64_pseries' : 480, + 'ppc64_replay' : 210, + 'ppc64_tuxrun' : 420, + 'ppc64_mac99' : 120, + 'riscv64_tuxrun' : 120, + 's390x_ccw_virtio' : 420, + 'sh4_tuxrun' : 240, + 'virtio_balloon': 120, + 'x86_64_kvm_xen' : 180, + 'x86_64_replay' : 480, +} + +tests_generic_system = [ + 'empty_cpu_model', + 'info_usernet', + 'version', +] + +tests_generic_linuxuser = [ +] + +tests_generic_bsduser = [ +] + +tests_aarch64_system_quick = [ + 'migration', +] + +tests_aarch64_system_thorough = [ + 'aarch64_aspeed_ast2700', + 'aarch64_aspeed_ast2700fc', + 'aarch64_device_passthrough', + 'aarch64_hotplug_pci', + 'aarch64_imx8mp_evk', + 'aarch64_kvm', + 'aarch64_raspi3', + 'aarch64_raspi4', + 'aarch64_replay', + 'aarch64_reverse_debug', + 'aarch64_rme_virt', + 'aarch64_rme_sbsaref', + 'aarch64_sbsaref', + 'aarch64_sbsaref_alpine', + 'aarch64_sbsaref_freebsd', + 'aarch64_smmu', + 'aarch64_tcg_plugins', + 'aarch64_tuxrun', + 'aarch64_virt', + 'aarch64_virt_gpu', + 'aarch64_xen', + 'aarch64_xlnx_versal', + 'multiprocess', +] + +tests_alpha_system_quick = [ + 'migration', +] + +tests_alpha_system_thorough = [ + 'alpha_clipper', + 'alpha_replay', +] + +tests_arm_system_quick = [ + 'migration', +] + +tests_arm_system_thorough = [ + 'arm_aspeed_ast1030', + 'arm_aspeed_palmetto', + 'arm_aspeed_romulus', + 'arm_aspeed_witherspoon', + 'arm_aspeed_ast2500', + 'arm_aspeed_ast2600', + 'arm_aspeed_bletchley', + 'arm_aspeed_catalina', + 'arm_aspeed_gb200nvl_bmc', + 'arm_aspeed_rainier', + 'arm_bpim2u', + 'arm_canona1100', + 'arm_collie', + 'arm_cubieboard', + 'arm_emcraft_sf2', + 'arm_integratorcp', + 'arm_max78000fthr', + 'arm_microbit', + 'arm_orangepi', + 'arm_quanta_gsj', + 'arm_raspi2', + 'arm_realview', + 'arm_replay', + 'arm_smdkc210', + 'arm_stellaris', + 'arm_sx1', + 'arm_vexpress', + 'arm_virt', + 'arm_tuxrun', +] + +tests_arm_linuxuser_thorough = [ + 'arm_bflt', +] + +tests_avr_system_thorough = [ + 'avr_mega2560', + 'avr_uno', +] + +tests_hppa_system_quick = [ + 'hppa_seabios', +] + +tests_i386_system_quick = [ + 'migration', +] + +tests_i386_system_thorough = [ + 'i386_replay', + 'i386_tuxrun', +] + +tests_loongarch64_system_thorough = [ + 'loongarch64_virt', +] + +tests_m68k_system_thorough = [ + 'm68k_mcf5208evb', + 'm68k_nextcube', + 'm68k_replay', + 'm68k_q800', + 'm68k_tuxrun', +] + +tests_microblaze_system_thorough = [ + 'microblaze_replay', + 'microblaze_s3adsp1800' +] + +tests_microblazeel_system_thorough = [ + 'microblazeel_s3adsp1800' +] + +tests_mips_system_thorough = [ + 'mips_malta', + 'mips_replay', + 'mips_tuxrun', +] + +tests_mipsel_system_thorough = [ + 'mipsel_malta', + 'mipsel_replay', + 'mipsel_tuxrun', +] + +tests_mips64_system_thorough = [ + 'mips64_malta', + 'mips64_tuxrun', +] + +tests_mips64el_system_thorough = [ + 'mips64el_fuloong2e', + 'mips64el_loongson3v', + 'mips64el_malta', + 'mips64el_replay', + 'mips64el_tuxrun', +] + +tests_or1k_system_thorough = [ + 'or1k_replay', + 'or1k_sim', +] + +tests_ppc_system_quick = [ + 'migration', + 'ppc_74xx', +] + +tests_ppc_system_thorough = [ + 'ppc_40p', + 'ppc_amiga', + 'ppc_bamboo', + 'ppc_mac', + 'ppc_mpc8544ds', + 'ppc_replay', + 'ppc_sam460ex', + 'ppc_tuxrun', + 'ppc_virtex_ml507', +] + +tests_ppc64_system_quick = [ + 'migration', +] + +tests_ppc64_system_thorough = [ + 'ppc64_e500', + 'ppc64_hv', + 'ppc64_powernv', + 'ppc64_pseries', + 'ppc64_replay', + 'ppc64_reverse_debug', + 'ppc64_tuxrun', + 'ppc64_mac99', +] + +tests_riscv32_system_quick = [ + 'migration', + 'riscv_opensbi', +] + +tests_riscv32_system_thorough = [ + 'riscv32_tuxrun', +] + +tests_riscv64_system_quick = [ + 'migration', + 'riscv_opensbi', +] + +tests_riscv64_system_thorough = [ + 'riscv64_sifive_u', + 'riscv64_tuxrun', +] + +tests_rx_system_thorough = [ + 'rx_gdbsim', +] + +tests_s390x_system_thorough = [ + 's390x_ccw_virtio', + 's390x_pxelinux', + 's390x_replay', + 's390x_topology', + 's390x_tuxrun', +] + +tests_sh4_system_thorough = [ + 'sh4_r2d', + 'sh4_tuxrun', +] + +tests_sh4eb_system_thorough = [ + 'sh4eb_r2d', +] + +tests_sparc_system_quick = [ + 'migration', +] + +tests_sparc_system_thorough = [ + 'sparc_replay', + 'sparc_sun4m', +] + +tests_sparc64_system_quick = [ + 'migration', +] + +tests_sparc64_system_thorough = [ + 'sparc64_sun4u', + 'sparc64_tuxrun', +] + +tests_x86_64_system_quick = [ + 'cpu_queries', + 'mem_addr_space', + 'migration', + 'pc_cpu_hotplug_props', + 'virtio_version', + 'x86_cpu_model_versions', + 'vnc', + 'memlock', +] + +tests_x86_64_system_thorough = [ + 'acpi_bits', + 'intel_iommu', + 'linux_initrd', + 'multiprocess', + 'netdev_ethtool', + 'virtio_balloon', + 'virtio_gpu', + 'x86_64_hotplug_blk', + 'x86_64_hotplug_cpu', + 'x86_64_kvm_xen', + 'x86_64_replay', + 'x86_64_reverse_debug', + 'x86_64_tuxrun', +] + +tests_xtensa_system_thorough = [ + 'xtensa_lx60', + 'xtensa_replay', +] + +precache_all = [] +foreach speed : ['quick', 'thorough'] + foreach dir : target_dirs + + target_base = dir.split('-')[0] + + if dir.endswith('-softmmu') + sysmode = 'system' + test_emulator = emulators['qemu-system-' + target_base] + elif dir.endswith('-linux-user') + sysmode = 'linuxuser' + test_emulator = emulators['qemu-' + target_base] + elif dir.endswith('-bsd-user') + sysmode = 'bsduser' + test_emulator = emulators['qemu-' + target_base] + else + continue + endif + + if speed == 'quick' + suites = ['func-quick', 'func-' + target_base] + target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_quick', []) \ + + get_variable('tests_generic_' + sysmode) + else + suites = ['func-' + speed, 'func-' + target_base + '-' + speed, speed] + target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_' + speed, []) + endif + + test_deps = [roms, keymap_targets] + test_env = environment() + if have_tools + test_env.set('QEMU_TEST_QEMU_IMG', meson.global_build_root() / 'qemu-img') + test_deps += [qemu_img] + endif + test_env.set('QEMU_TEST_QEMU_BINARY', test_emulator.full_path()) + test_env.set('QEMU_BUILD_ROOT', meson.project_build_root()) + test_env.set('PYTHONPATH', meson.project_source_root() / 'python:' + + meson.current_source_dir()) + + foreach test : target_tests + testname = '@0@-@1@'.format(target_base, test) + testfile = 'test_' + test + '.py' + testpath = meson.current_source_dir() / testfile + teststamp = testname + '.tstamp' + test_precache_env = environment() + test_precache_env.set('QEMU_TEST_PRECACHE', meson.current_build_dir() / teststamp) + test_precache_env.set('PYTHONPATH', meson.project_source_root() / 'python:' + + meson.current_source_dir()) + precache = custom_target('func-precache-' + testname, + output: teststamp, + command: [python, testpath], + depend_files: files(testpath), + build_by_default: false, + env: test_precache_env) + precache_all += precache + + # Ideally we would add 'precache' to 'depends' here, such that + # 'build_by_default: false' lets the pre-caching automatically + # run immediately before the test runs. In practice this is + # broken in meson, with it running the pre-caching in the normal + # compile phase https://github.com/mesonbuild/meson/issues/2518 + # If the above bug ever gets fixed, when QEMU changes the min + # meson version, add the 'depends' and remove the custom + # 'run_target' logic below & in Makefile.include + test('func-' + testname, + python, + depends: [test_deps, test_emulator, emulator_modules, plugin_modules], + env: test_env, + args: [testpath], + protocol: 'tap', + timeout: test_timeouts.get(test, 90), + priority: test_timeouts.get(test, 90), + suite: suites) + endforeach + endforeach +endforeach + +run_target('precache-functional', + depends: precache_all, + command: [python, '-c', '']) diff --git a/tests/functional/qemu_test/__init__.py b/tests/functional/qemu_test/__init__.py new file mode 100644 index 00000000000..6e666a059fc --- /dev/null +++ b/tests/functional/qemu_test/__init__.py @@ -0,0 +1,20 @@ +# Test class and utilities for functional tests +# +# Copyright 2024 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + + +from .asset import Asset +from .config import BUILD_DIR, dso_suffix +from .cmd import is_readable_executable_file, \ + interrupt_interactive_console_until_pattern, wait_for_console_pattern, \ + exec_command, exec_command_and_wait_for_pattern, get_qemu_img, which +from .testcase import QemuBaseTest, QemuUserTest, QemuSystemTest +from .linuxkernel import LinuxKernelTest +from .decorators import skipIfMissingCommands, skipIfNotMachine, \ + skipFlakyTest, skipUntrustedTest, skipBigDataTest, skipSlowTest, \ + skipIfMissingImports, skipIfOperatingSystem, skipLockedMemoryTest +from .archive import archive_extract +from .uncompress import uncompress diff --git a/tests/functional/qemu_test/archive.py b/tests/functional/qemu_test/archive.py new file mode 100644 index 00000000000..c803fdaf6dc --- /dev/null +++ b/tests/functional/qemu_test/archive.py @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Utilities for python-based QEMU tests +# +# Copyright 2024 Red Hat, Inc. +# +# Authors: +# Thomas Huth + +import os +from subprocess import check_call, run, DEVNULL +import tarfile +from urllib.parse import urlparse +import zipfile + +from .asset import Asset + + +def tar_extract(archive, dest_dir, member=None): + with tarfile.open(archive) as tf: + if hasattr(tarfile, 'data_filter'): + tf.extraction_filter = getattr(tarfile, 'data_filter', + (lambda member, path: member)) + if member: + tf.extract(member=member, path=dest_dir) + else: + tf.extractall(path=dest_dir) + +def cpio_extract(archive, output_path): + cwd = os.getcwd() + os.chdir(output_path) + # Not passing 'check=True' as cpio exits with non-zero + # status if the archive contains any device nodes :-( + if type(archive) == str: + run(['cpio', '-i', '-F', archive], + stdout=DEVNULL, stderr=DEVNULL) + else: + run(['cpio', '-i'], + input=archive.read(), + stdout=DEVNULL, stderr=DEVNULL) + os.chdir(cwd) + +def zip_extract(archive, dest_dir, member=None): + with zipfile.ZipFile(archive, 'r') as zf: + if member: + zf.extract(member=member, path=dest_dir) + else: + zf.extractall(path=dest_dir) + +def deb_extract(archive, dest_dir, member=None): + cwd = os.getcwd() + os.chdir(dest_dir) + try: + proc = run(['ar', 't', archive], + check=True, capture_output=True, encoding='utf8') + file_path = proc.stdout.split()[2] + check_call(['ar', 'x', archive, file_path], + stdout=DEVNULL, stderr=DEVNULL) + tar_extract(file_path, dest_dir, member) + finally: + os.chdir(cwd) + +''' +@params archive: filename, Asset, or file-like object to extract +@params dest_dir: target directory to extract into +@params member: optional member file to limit extraction to + +Extracts @archive into @dest_dir. All files are extracted +unless @member specifies a limit. + +If @format is None, heuristics will be applied to guess the format +from the filename or Asset URL. @format must be non-None if @archive +is a file-like object. +''' +def archive_extract(archive, dest_dir, format=None, member=None): + if format is None: + format = guess_archive_format(archive) + if type(archive) == Asset: + archive = str(archive) + + if format == "tar": + tar_extract(archive, dest_dir, member) + elif format == "zip": + zip_extract(archive, dest_dir, member) + elif format == "cpio": + if member is not None: + raise Exception("Unable to filter cpio extraction") + cpio_extract(archive, dest_dir) + elif format == "deb": + if type(archive) != str: + raise Exception("Unable to use file-like object with deb archives") + deb_extract(archive, dest_dir, "./" + member) + else: + raise Exception(f"Unknown archive format {format}") + +''' +@params archive: filename, or Asset to guess + +Guess the format of @compressed, raising an exception if +no format can be determined +''' +def guess_archive_format(archive): + if type(archive) == Asset: + archive = urlparse(archive.url).path + elif type(archive) != str: + raise Exception(f"Unable to guess archive format for {archive}") + + if ".tar." in archive or archive.endswith("tgz"): + return "tar" + elif archive.endswith(".zip"): + return "zip" + elif archive.endswith(".cpio"): + return "cpio" + elif archive.endswith(".deb") or archive.endswith(".udeb"): + return "deb" + else: + raise Exception(f"Unknown archive format for {archive}") diff --git a/tests/functional/qemu_test/asset.py b/tests/functional/qemu_test/asset.py new file mode 100644 index 00000000000..704b84d0ea6 --- /dev/null +++ b/tests/functional/qemu_test/asset.py @@ -0,0 +1,230 @@ +# Test utilities for fetching & caching assets +# +# Copyright 2024 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import hashlib +import logging +import os +import stat +import sys +import unittest +import urllib.request +from time import sleep +from pathlib import Path +from shutil import copyfileobj +from urllib.error import HTTPError + +class AssetError(Exception): + def __init__(self, asset, msg, transient=False): + self.url = asset.url + self.msg = msg + self.transient = transient + + def __str__(self): + return "%s: %s" % (self.url, self.msg) + +# Instances of this class must be declared as class level variables +# starting with a name "ASSET_". This enables the pre-caching logic +# to easily find all referenced assets and download them prior to +# execution of the tests. +class Asset: + + def __init__(self, url, hashsum): + self.url = url + self.hash = hashsum + cache_dir_env = os.getenv('QEMU_TEST_CACHE_DIR') + if cache_dir_env: + self.cache_dir = Path(cache_dir_env, "download") + else: + self.cache_dir = Path(Path("~").expanduser(), + ".cache", "qemu", "download") + self.cache_file = Path(self.cache_dir, hashsum) + self.log = logging.getLogger('qemu-test') + + def __repr__(self): + return "Asset: url=%s hash=%s cache=%s" % ( + self.url, self.hash, self.cache_file) + + def __str__(self): + return str(self.cache_file) + + def _check(self, cache_file): + if self.hash is None: + return True + if len(self.hash) == 64: + hl = hashlib.sha256() + elif len(self.hash) == 128: + hl = hashlib.sha512() + else: + raise AssetError(self, "unknown hash type") + + # Calculate the hash of the file: + with open(cache_file, 'rb') as file: + while True: + chunk = file.read(1 << 20) + if not chunk: + break + hl.update(chunk) + + return self.hash == hl.hexdigest() + + def valid(self): + return self.cache_file.exists() and self._check(self.cache_file) + + def fetchable(self): + return not os.environ.get("QEMU_TEST_NO_DOWNLOAD", False) + + def available(self): + return self.valid() or self.fetchable() + + def _wait_for_other_download(self, tmp_cache_file): + # Another thread already seems to download the asset, so wait until + # it is done, while also checking the size to see whether it is stuck + try: + current_size = tmp_cache_file.stat().st_size + new_size = current_size + except: + if os.path.exists(self.cache_file): + return True + raise + waittime = lastchange = 600 + while waittime > 0: + sleep(1) + waittime -= 1 + try: + new_size = tmp_cache_file.stat().st_size + except: + if os.path.exists(self.cache_file): + return True + raise + if new_size != current_size: + lastchange = waittime + current_size = new_size + elif lastchange - waittime > 90: + return False + + self.log.debug("Time out while waiting for %s!", tmp_cache_file) + raise + + def fetch(self): + if not self.cache_dir.exists(): + self.cache_dir.mkdir(parents=True, exist_ok=True) + + if self.valid(): + self.log.debug("Using cached asset %s for %s", + self.cache_file, self.url) + return str(self.cache_file) + + if not self.fetchable(): + raise AssetError(self, + "Asset cache is invalid and downloads disabled") + + self.log.info("Downloading %s to %s...", self.url, self.cache_file) + tmp_cache_file = self.cache_file.with_suffix(".download") + + for retries in range(3): + try: + with tmp_cache_file.open("xb") as dst: + with urllib.request.urlopen(self.url) as resp: + copyfileobj(resp, dst) + length_hdr = resp.getheader("Content-Length") + + # Verify downloaded file size against length metadata, if + # available. + if length_hdr is not None: + length = int(length_hdr) + fsize = tmp_cache_file.stat().st_size + if fsize != length: + self.log.error("Unable to download %s: " + "connection closed before " + "transfer complete (%d/%d)", + self.url, fsize, length) + tmp_cache_file.unlink() + continue + break + except FileExistsError: + self.log.debug("%s already exists, " + "waiting for other thread to finish...", + tmp_cache_file) + if self._wait_for_other_download(tmp_cache_file): + return str(self.cache_file) + self.log.debug("%s seems to be stale, " + "deleting and retrying download...", + tmp_cache_file) + tmp_cache_file.unlink() + continue + except HTTPError as e: + tmp_cache_file.unlink() + self.log.error("Unable to download %s: HTTP error %d", + self.url, e.code) + # Treat 404 as fatal, since it is highly likely to + # indicate a broken test rather than a transient + # server or networking problem + if e.code == 404: + raise AssetError(self, "Unable to download: " + "HTTP error %d" % e.code) + continue + except Exception as e: + tmp_cache_file.unlink() + raise AssetError(self, "Unable to download: " % e) + + if not os.path.exists(tmp_cache_file): + raise AssetError(self, "Download retries exceeded", transient=True) + + try: + # Set these just for informational purposes + os.setxattr(str(tmp_cache_file), "user.qemu-asset-url", + self.url.encode('utf8')) + os.setxattr(str(tmp_cache_file), "user.qemu-asset-hash", + self.hash.encode('utf8')) + except Exception as e: + self.log.debug("Unable to set xattr on %s: %s", tmp_cache_file, e) + pass + + if not self._check(tmp_cache_file): + tmp_cache_file.unlink() + raise AssetError(self, "Hash does not match %s" % self.hash) + tmp_cache_file.replace(self.cache_file) + # Remove write perms to stop tests accidentally modifying them + os.chmod(self.cache_file, stat.S_IRUSR | stat.S_IRGRP) + + self.log.info("Cached %s at %s" % (self.url, self.cache_file)) + return str(self.cache_file) + + def precache_test(test): + log = logging.getLogger('qemu-test') + log.setLevel(logging.DEBUG) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + handler.setFormatter(formatter) + log.addHandler(handler) + for name, asset in vars(test.__class__).items(): + if name.startswith("ASSET_") and type(asset) == Asset: + log.info("Attempting to cache '%s'" % asset) + try: + asset.fetch() + except AssetError as e: + if not e.transient: + raise + log.error("%s: skipping asset precache" % e) + + log.removeHandler(handler) + + def precache_suite(suite): + for test in suite: + if isinstance(test, unittest.TestSuite): + Asset.precache_suite(test) + elif isinstance(test, unittest.TestCase): + Asset.precache_test(test) + + def precache_suites(path, cacheTstamp): + loader = unittest.loader.defaultTestLoader + tests = loader.loadTestsFromNames([path], None) + + with open(cacheTstamp, "w") as fh: + Asset.precache_suite(tests) diff --git a/tests/functional/qemu_test/cmd.py b/tests/functional/qemu_test/cmd.py new file mode 100644 index 00000000000..dc5f422b77d --- /dev/null +++ b/tests/functional/qemu_test/cmd.py @@ -0,0 +1,202 @@ +# Test class and utilities for functional tests +# +# Copyright 2018, 2024 Red Hat, Inc. +# +# Original Author (Avocado-based tests): +# Cleber Rosa +# +# Adaption for standalone version: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import logging +import os +import os.path + + +def which(tool): + """ looks up the full path for @tool, returns None if not found + or if @tool does not have executable permissions. + """ + paths=os.getenv('PATH') + for p in paths.split(os.path.pathsep): + p = os.path.join(p, tool) + if os.access(p, os.X_OK): + return p + return None + +def is_readable_executable_file(path): + return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK) + +# @test: functional test to fail if @failure is seen +# @vm: the VM whose console to process +# @success: a non-None string to look for +# @failure: a string to look for that triggers test failure, or None +# +# Read up to 1 line of text from @vm, looking for @success +# and optionally @failure. +# +# If @success or @failure are seen, immediately return True, +# even if end of line is not yet seen. ie remainder of the +# line is left unread. +# +# If end of line is seen, with neither @success or @failure +# return False +# +# If @failure is seen, then mark @test as failed +def _console_read_line_until_match(test, vm, success, failure): + msg = bytes([]) + done = False + while True: + c = vm.console_socket.recv(1) + if c is None: + done = True + test.fail( + f"EOF in console, expected '{success}'") + break + msg += c + + if success in msg: + done = True + break + if failure and failure in msg: + done = True + vm.console_socket.close() + test.fail( + f"'{failure}' found in console, expected '{success}'") + + if c == b'\n': + break + + console_logger = logging.getLogger('console') + try: + console_logger.debug(msg.decode().strip()) + except: + console_logger.debug(msg) + + return done + +def _console_interaction(test, success_message, failure_message, + send_string, keep_sending=False, vm=None): + assert not keep_sending or send_string + assert success_message or send_string + + if vm is None: + vm = test.vm + + test.log.debug( + f"Console interaction: success_msg='{success_message}' " + + f"failure_msg='{failure_message}' send_string='{send_string}'") + + # We'll process console in bytes, to avoid having to + # deal with unicode decode errors from receiving + # partial utf8 byte sequences + success_message_b = None + if success_message is not None: + success_message_b = success_message.encode() + + failure_message_b = None + if failure_message is not None: + failure_message_b = failure_message.encode() + + while True: + if send_string: + vm.console_socket.sendall(send_string.encode()) + if not keep_sending: + send_string = None # send only once + + # Only consume console output if waiting for something + if success_message is None: + if send_string is None: + break + continue + + if _console_read_line_until_match(test, vm, + success_message_b, + failure_message_b): + break + +def interrupt_interactive_console_until_pattern(test, success_message, + failure_message=None, + interrupt_string='\r'): + """ + Keep sending a string to interrupt a console prompt, while logging the + console output. Typical use case is to break a boot loader prompt, such: + + Press a key within 5 seconds to interrupt boot process. + 5 + 4 + 3 + 2 + 1 + Booting default image... + + :param test: a test containing a VM that will have its console + read and probed for a success or failure message + :type test: :class:`qemu_test.QemuSystemTest` + :param success_message: if this message appears, test succeeds + :param failure_message: if this message appears, test fails + :param interrupt_string: a string to send to the console before trying + to read a new line + """ + assert success_message + _console_interaction(test, success_message, failure_message, + interrupt_string, True) + +def wait_for_console_pattern(test, success_message, failure_message=None, + vm=None): + """ + Waits for messages to appear on the console, while logging the content + + :param test: a test containing a VM that will have its console + read and probed for a success or failure message + :type test: :class:`qemu_test.QemuSystemTest` + :param success_message: if this message appears, test succeeds + :param failure_message: if this message appears, test fails + """ + assert success_message + _console_interaction(test, success_message, failure_message, None, vm=vm) + +def exec_command(test, command): + """ + Send a command to a console (appending CRLF characters), while logging + the content. + + :param test: a test containing a VM. + :type test: :class:`qemu_test.QemuSystemTest` + :param command: the command to send + :type command: str + """ + _console_interaction(test, None, None, command + '\r') + +def exec_command_and_wait_for_pattern(test, command, + success_message, failure_message=None): + """ + Send a command to a console (appending CRLF characters), then wait + for success_message to appear on the console, while logging the. + content. Mark the test as failed if failure_message is found instead. + + :param test: a test containing a VM that will have its console + read and probed for a success or failure message + :type test: :class:`qemu_test.QemuSystemTest` + :param command: the command to send + :param success_message: if this message appears, test succeeds + :param failure_message: if this message appears, test fails + """ + assert success_message + _console_interaction(test, success_message, failure_message, command + '\r') + +def get_qemu_img(test): + test.log.debug('Looking for and selecting a qemu-img binary') + + # If qemu-img has been built, use it, otherwise the system wide one + # will be used. + qemu_img = test.build_file('qemu-img') + if os.path.exists(qemu_img): + return qemu_img + qemu_img = which('qemu-img') + if qemu_img is not None: + return qemu_img + test.skipTest(f"qemu-img not found in build dir or '$PATH'") diff --git a/tests/functional/qemu_test/config.py b/tests/functional/qemu_test/config.py new file mode 100644 index 00000000000..6d4c9c3ce1d --- /dev/null +++ b/tests/functional/qemu_test/config.py @@ -0,0 +1,48 @@ +# Test class and utilities for functional tests +# +# Copyright 2018, 2024 Red Hat, Inc. +# +# Original Author (Avocado-based tests): +# Cleber Rosa +# +# Adaption for standalone version: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +from pathlib import Path +import platform + + +def _source_dir(): + # Determine top-level directory of the QEMU sources + return Path(__file__).parent.parent.parent.parent + +def _build_dir(): + root = os.getenv('QEMU_BUILD_ROOT') + if root is not None: + return Path(root) + # Makefile.mtest only exists in build dir, so if it is available, use CWD + if os.path.exists('Makefile.mtest'): + return Path(os.getcwd()) + + root = os.path.join(_source_dir(), 'build') + if os.path.exists(root): + return Path(root) + + raise Exception("Cannot identify build dir, set QEMU_BUILD_ROOT") + +BUILD_DIR = _build_dir() + +def dso_suffix(): + '''Return the dynamic libraries suffix for the current platform''' + + if platform.system() == "Darwin": + return "dylib" + + if platform.system() == "Windows": + return "dll" + + return "so" diff --git a/tests/functional/qemu_test/decorators.py b/tests/functional/qemu_test/decorators.py new file mode 100644 index 00000000000..c0d1567b142 --- /dev/null +++ b/tests/functional/qemu_test/decorators.py @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Decorators useful in functional tests + +import importlib +import os +import platform +import resource +from unittest import skipIf, skipUnless + +from .cmd import which + +''' +Decorator to skip execution of a test if the list +of command binaries is not available in $PATH. +Example: + + @skipIfMissingCommands("mkisofs", "losetup") +''' +def skipIfMissingCommands(*args): + has_cmds = True + for cmd in args: + if not which(cmd): + has_cmds = False + break + + return skipUnless(has_cmds, 'required command(s) "%s" not installed' % + ", ".join(args)) + +''' +Decorator to skip execution of a test if the current +host operating system does match one of the prohibited +ones. +Example + + @skipIfOperatingSystem("Linux", "Darwin") +''' +def skipIfOperatingSystem(*args): + return skipIf(platform.system() in args, + 'running on an OS (%s) that is not able to run this test' % + ", ".join(args)) + +''' +Decorator to skip execution of a test if the current +host machine does not match one of the permitted +machines. +Example + + @skipIfNotMachine("x86_64", "aarch64") +''' +def skipIfNotMachine(*args): + return skipUnless(platform.machine() in args, + 'not running on one of the required machine(s) "%s"' % + ", ".join(args)) + +''' +Decorator to skip execution of flaky tests, unless +the $QEMU_TEST_FLAKY_TESTS environment variable is set. +A bug URL must be provided that documents the observed +failure behaviour, so it can be tracked & re-evaluated +in future. + +Historical tests may be providing "None" as the bug_url +but this should not be done for new test. + +Example: + + @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/NNN") +''' +def skipFlakyTest(bug_url): + if bug_url is None: + bug_url = "FIXME: reproduce flaky test and file bug report or remove" + return skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), + f'Test is unstable: {bug_url}') + +''' +Decorator to skip execution of tests which are likely +to execute untrusted commands on the host, or commands +which process untrusted code, unless the +$QEMU_TEST_ALLOW_UNTRUSTED_CODE env var is set. +Example: + + @skipUntrustedTest() +''' +def skipUntrustedTest(): + return skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), + 'Test runs untrusted code / processes untrusted data') + +''' +Decorator to skip execution of tests which need large +data storage (over around 500MB-1GB mark) on the host, +unless the $QEMU_TEST_ALLOW_LARGE_STORAGE environment +variable is set + +Example: + + @skipBigDataTest() +''' +def skipBigDataTest(): + return skipUnless(os.getenv('QEMU_TEST_ALLOW_LARGE_STORAGE'), + 'Test requires large host storage space') + +''' +Decorator to skip execution of tests which have a really long +runtime (and might e.g. time out if QEMU has been compiled with +debugging enabled) unless the $QEMU_TEST_ALLOW_SLOW +environment variable is set + +Example: + + @skipSlowTest() +''' +def skipSlowTest(): + return skipUnless(os.getenv('QEMU_TEST_ALLOW_SLOW'), + 'Test has a very long runtime and might time out') + +''' +Decorator to skip execution of a test if the list +of python imports is not available. +Example: + + @skipIfMissingImports("numpy", "cv2") +''' +def skipIfMissingImports(*args): + has_imports = True + for impname in args: + try: + importlib.import_module(impname) + except ImportError: + has_imports = False + break + + return skipUnless(has_imports, 'required import(s) "%s" not installed' % + ", ".join(args)) + +''' +Decorator to skip execution of a test if the system's +locked memory limit is below the required threshold. +Takes required locked memory threshold in kB. +Example: + + @skipLockedMemoryTest(2_097_152) +''' +def skipLockedMemoryTest(locked_memory): + # get memlock hard limit in bytes + _, ulimit_memory = resource.getrlimit(resource.RLIMIT_MEMLOCK) + + return skipUnless( + ulimit_memory == resource.RLIM_INFINITY or ulimit_memory >= locked_memory * 1024, + f'Test required {locked_memory} kB of available locked memory', + ) diff --git a/tests/functional/qemu_test/linuxkernel.py b/tests/functional/qemu_test/linuxkernel.py new file mode 100644 index 00000000000..2aca0ee3cd0 --- /dev/null +++ b/tests/functional/qemu_test/linuxkernel.py @@ -0,0 +1,52 @@ +# Test class for testing the boot process of a Linux kernel +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import hashlib +import urllib.request + +from .cmd import wait_for_console_pattern, exec_command_and_wait_for_pattern +from .testcase import QemuSystemTest +from .utils import get_usernet_hostfwd_port + + +class LinuxKernelTest(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def launch_kernel(self, kernel, initrd=None, dtb=None, console_index=0, + wait_for=None): + self.vm.set_console(console_index=console_index) + self.vm.add_args('-kernel', kernel) + if initrd: + self.vm.add_args('-initrd', initrd) + if dtb: + self.vm.add_args('-dtb', dtb) + self.vm.launch() + if wait_for: + self.wait_for_console_pattern(wait_for) + + def check_http_download(self, filename, hashsum, guestport=8080, + pythoncmd='python3 -m http.server'): + exec_command_and_wait_for_pattern(self, + f'{pythoncmd} {guestport} & sleep 1', + f'Serving HTTP on 0.0.0.0 port {guestport}') + hl = hashlib.sha256() + hostport = get_usernet_hostfwd_port(self.vm) + url = f'http://localhost:{hostport}{filename}' + self.log.info(f'Downloading {url} ...') + with urllib.request.urlopen(url) as response: + while True: + chunk = response.read(1 << 20) + if not chunk: + break + hl.update(chunk) + + digest = hl.hexdigest() + self.log.info(f'sha256sum of download is {digest}.') + self.assertEqual(digest, hashsum) diff --git a/tests/functional/qemu_test/ports.py b/tests/functional/qemu_test/ports.py new file mode 100644 index 00000000000..631b77abf6b --- /dev/null +++ b/tests/functional/qemu_test/ports.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# +# Simple functional tests for VNC functionality +# +# Copyright 2018, 2024 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import fcntl +import os +import socket + +from .config import BUILD_DIR +from typing import List + + +class Ports(): + + PORTS_ADDR = '127.0.0.1' + PORTS_RANGE_SIZE = 1024 + PORTS_START = 49152 + ((os.getpid() * PORTS_RANGE_SIZE) % 16384) + PORTS_END = PORTS_START + PORTS_RANGE_SIZE + + def __enter__(self): + lock_file = os.path.join(BUILD_DIR, "tests", "functional", "port_lock") + self.lock_fh = os.open(lock_file, os.O_CREAT) + fcntl.flock(self.lock_fh, fcntl.LOCK_EX) + return self + + def __exit__(self, exc_type, exc_value, traceback): + fcntl.flock(self.lock_fh, fcntl.LOCK_UN) + os.close(self.lock_fh) + + def check_bind(self, port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + try: + sock.bind((self.PORTS_ADDR, port)) + except OSError: + return False + + return True + + def find_free_ports(self, count: int) -> List[int]: + result = [] + for port in range(self.PORTS_START, self.PORTS_END): + if self.check_bind(port): + result.append(port) + if len(result) >= count: + break + assert len(result) == count + return result + + def find_free_port(self) -> int: + return self.find_free_ports(1)[0] diff --git a/tests/functional/qemu_test/tesseract.py b/tests/functional/qemu_test/tesseract.py new file mode 100644 index 00000000000..ede6c6501e2 --- /dev/null +++ b/tests/functional/qemu_test/tesseract.py @@ -0,0 +1,25 @@ +# ... +# +# Copyright (c) 2019 Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import logging +from subprocess import run + + +def tesseract_ocr(image_path, tesseract_args=''): + console_logger = logging.getLogger('console') + console_logger.debug(image_path) + proc = run(['tesseract', image_path, 'stdout'], + capture_output=True, encoding='utf8') + if proc.returncode: + return None + lines = [] + for line in proc.stdout.split('\n'): + sline = line.strip() + if len(sline): + console_logger.debug(sline) + lines += [sline] + return lines diff --git a/tests/functional/qemu_test/testcase.py b/tests/functional/qemu_test/testcase.py new file mode 100644 index 00000000000..5caf7b13fe3 --- /dev/null +++ b/tests/functional/qemu_test/testcase.py @@ -0,0 +1,408 @@ +# Test class and utilities for functional tests +# +# Copyright 2018, 2024 Red Hat, Inc. +# +# Original Author (Avocado-based tests): +# Cleber Rosa +# +# Adaption for standalone version: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import logging +import os +from pathlib import Path +import pycotap +import shutil +from subprocess import run +import sys +import tempfile +import warnings +import unittest +import uuid + +from qemu.machine import QEMUMachine +from qemu.utils import hvf_available, kvm_available, tcg_available + +from .archive import archive_extract +from .asset import Asset +from .config import BUILD_DIR, dso_suffix +from .uncompress import uncompress + + +class QemuBaseTest(unittest.TestCase): + + ''' + @params compressed: filename, Asset, or file-like object to uncompress + @params format: optional compression format (gzip, lzma) + + Uncompresses @compressed into the scratch directory. + + If @format is None, heuristics will be applied to guess the format + from the filename or Asset URL. @format must be non-None if @uncompressed + is a file-like object. + + Returns the fully qualified path to the uncompressed file + ''' + def uncompress(self, compressed, format=None): + self.log.debug(f"Uncompress {compressed} format={format}") + if type(compressed) == Asset: + compressed.fetch() + + (name, ext) = os.path.splitext(str(compressed)) + uncompressed = self.scratch_file(os.path.basename(name)) + + uncompress(compressed, uncompressed, format) + + return uncompressed + + ''' + @params archive: filename, Asset, or file-like object to extract + @params format: optional archive format (tar, zip, deb, cpio) + @params sub_dir: optional sub-directory to extract into + @params member: optional member file to limit extraction to + + Extracts @archive into the scratch directory, or a directory beneath + named by @sub_dir. All files are extracted unless @member specifies + a limit. + + If @format is None, heuristics will be applied to guess the format + from the filename or Asset URL. @format must be non-None if @archive + is a file-like object. + + If @member is non-None, returns the fully qualified path to @member + ''' + def archive_extract(self, archive, format=None, sub_dir=None, member=None): + self.log.debug(f"Extract {archive} format={format}" + + f"sub_dir={sub_dir} member={member}") + if type(archive) == Asset: + archive.fetch() + if sub_dir is None: + archive_extract(archive, self.scratch_file(), format, member) + else: + archive_extract(archive, self.scratch_file(sub_dir), + format, member) + + if member is not None: + return self.scratch_file(member) + return None + + ''' + Create a temporary directory suitable for storing UNIX + socket paths. + + Returns: a tempfile.TemporaryDirectory instance + ''' + def socket_dir(self): + if self.socketdir is None: + self.socketdir = tempfile.TemporaryDirectory( + prefix="qemu_func_test_sock_") + return self.socketdir + + ''' + @params args list of zero or more subdirectories or file + + Construct a path for accessing a data file located + relative to the source directory that is the root for + functional tests. + + @args may be an empty list to reference the root dir + itself, may be a single element to reference a file in + the root directory, or may be multiple elements to + reference a file nested below. The path components + will be joined using the platform appropriate path + separator. + + Returns: string representing a file path + ''' + def data_file(self, *args): + return str(Path(Path(__file__).parent.parent, *args)) + + ''' + @params args list of zero or more subdirectories or file + + Construct a path for accessing a data file located + relative to the build directory root. + + @args may be an empty list to reference the build dir + itself, may be a single element to reference a file in + the build directory, or may be multiple elements to + reference a file nested below. The path components + will be joined using the platform appropriate path + separator. + + Returns: string representing a file path + ''' + def build_file(self, *args): + return str(Path(BUILD_DIR, *args)) + + ''' + @params args list of zero or more subdirectories or file + + Construct a path for accessing/creating a scratch file + located relative to a temporary directory dedicated to + this test case. The directory and its contents will be + purged upon completion of the test. + + @args may be an empty list to reference the scratch dir + itself, may be a single element to reference a file in + the scratch directory, or may be multiple elements to + reference a file nested below. The path components + will be joined using the platform appropriate path + separator. + + Returns: string representing a file path + ''' + def scratch_file(self, *args): + return str(Path(self.workdir, *args)) + + ''' + @params args list of zero or more subdirectories or file + + Construct a path for accessing/creating a log file + located relative to a temporary directory dedicated to + this test case. The directory and its log files will be + preserved upon completion of the test. + + @args may be an empty list to reference the log dir + itself, may be a single element to reference a file in + the log directory, or may be multiple elements to + reference a file nested below. The path components + will be joined using the platform appropriate path + separator. + + Returns: string representing a file path + ''' + def log_file(self, *args): + return str(Path(self.outputdir, *args)) + + ''' + @params plugin name + + Return the full path to the plugin taking into account any host OS + specific suffixes. + ''' + def plugin_file(self, plugin_name): + sfx = dso_suffix() + return os.path.join('tests', 'tcg', 'plugins', f'{plugin_name}.{sfx}') + + def assets_available(self): + for name, asset in vars(self.__class__).items(): + if name.startswith("ASSET_") and type(asset) == Asset: + if not asset.available(): + self.log.debug(f"Asset {asset.url} not available") + return False + return True + + def setUp(self): + self.qemu_bin = os.getenv('QEMU_TEST_QEMU_BINARY') + self.assertIsNotNone(self.qemu_bin, 'QEMU_TEST_QEMU_BINARY must be set') + self.arch = self.qemu_bin.split('-')[-1] + self.socketdir = None + + self.outputdir = self.build_file('tests', 'functional', + self.arch, self.id()) + self.workdir = os.path.join(self.outputdir, 'scratch') + os.makedirs(self.workdir, exist_ok=True) + + self.log_filename = self.log_file('base.log') + self.log = logging.getLogger('qemu-test') + self.log.setLevel(logging.DEBUG) + self._log_fh = logging.FileHandler(self.log_filename, mode='w') + self._log_fh.setLevel(logging.DEBUG) + fileFormatter = logging.Formatter( + '%(asctime)s - %(levelname)s: %(message)s') + self._log_fh.setFormatter(fileFormatter) + self.log.addHandler(self._log_fh) + + # Capture QEMUMachine logging + self.machinelog = logging.getLogger('qemu.machine') + self.machinelog.setLevel(logging.DEBUG) + self.machinelog.addHandler(self._log_fh) + + if not self.assets_available(): + self.skipTest('One or more assets is not available') + + def tearDown(self): + if "QEMU_TEST_KEEP_SCRATCH" not in os.environ: + shutil.rmtree(self.workdir) + if self.socketdir is not None: + shutil.rmtree(self.socketdir.name) + self.socketdir = None + self.machinelog.removeHandler(self._log_fh) + self.log.removeHandler(self._log_fh) + self._log_fh.close() + + def main(): + warnings.simplefilter("default") + os.environ["PYTHONWARNINGS"] = "default" + + path = os.path.basename(sys.argv[0])[:-3] + + cache = os.environ.get("QEMU_TEST_PRECACHE", None) + if cache is not None: + Asset.precache_suites(path, cache) + return + + tr = pycotap.TAPTestRunner(message_log = pycotap.LogMode.LogToError, + test_output_log = pycotap.LogMode.LogToError) + res = unittest.main(module = None, testRunner = tr, exit = False, + argv=[sys.argv[0], path] + sys.argv[1:]) + for (test, message) in res.result.errors + res.result.failures: + + if hasattr(test, "log_filename"): + print('More information on ' + test.id() + ' could be found here:' + '\n %s' % test.log_filename, file=sys.stderr) + if hasattr(test, 'console_log_name'): + print(' %s' % test.console_log_name, file=sys.stderr) + sys.exit(not res.result.wasSuccessful()) + + +class QemuUserTest(QemuBaseTest): + + def setUp(self): + super().setUp() + self._ldpath = [] + + def add_ldpath(self, ldpath): + self._ldpath.append(os.path.abspath(ldpath)) + + def run_cmd(self, bin_path, args=[]): + return run([self.qemu_bin] + + ["-L %s" % ldpath for ldpath in self._ldpath] + + [bin_path] + + args, + text=True, capture_output=True) + +class QemuSystemTest(QemuBaseTest): + """Facilitates system emulation tests.""" + + cpu = None + machine = None + _machinehelp = None + + def setUp(self): + self._vms = {} + + super().setUp() + + console_log = logging.getLogger('console') + console_log.setLevel(logging.DEBUG) + self.console_log_name = self.log_file('console.log') + self._console_log_fh = logging.FileHandler(self.console_log_name, + mode='w') + self._console_log_fh.setLevel(logging.DEBUG) + fileFormatter = logging.Formatter('%(asctime)s: %(message)s') + self._console_log_fh.setFormatter(fileFormatter) + console_log.addHandler(self._console_log_fh) + + def set_machine(self, machinename): + # TODO: We should use QMP to get the list of available machines + if not self._machinehelp: + self._machinehelp = run( + [self.qemu_bin, '-M', 'help'], + capture_output=True, check=True, encoding='utf8').stdout + if self._machinehelp.find(machinename) < 0: + self.skipTest('no support for machine ' + machinename) + self.machine = machinename + + def require_accelerator(self, accelerator): + """ + Requires an accelerator to be available for the test to continue + + It takes into account the currently set qemu binary. + + If the check fails, the test is canceled. If the check itself + for the given accelerator is not available, the test is also + canceled. + + :param accelerator: name of the accelerator, such as "kvm" or "tcg" + :type accelerator: str + """ + checker = {'tcg': tcg_available, + 'kvm': kvm_available, + 'hvf': hvf_available, + }.get(accelerator) + if checker is None: + self.skipTest("Don't know how to check for the presence " + "of accelerator %s" % accelerator) + if not checker(qemu_bin=self.qemu_bin): + self.skipTest("%s accelerator does not seem to be " + "available" % accelerator) + + def require_netdev(self, netdevname): + help = run([self.qemu_bin, + '-M', 'none', '-netdev', 'help'], + capture_output=True, check=True, encoding='utf8').stdout; + if help.find('\n' + netdevname + '\n') < 0: + self.skipTest('no support for " + netdevname + " networking') + + def require_device(self, devicename): + help = run([self.qemu_bin, + '-M', 'none', '-device', 'help'], + capture_output=True, check=True, encoding='utf8').stdout; + if help.find(devicename) < 0: + self.skipTest('no support for device ' + devicename) + + def _new_vm(self, name, *args): + vm = QEMUMachine(self.qemu_bin, + name=name, + base_temp_dir=self.workdir, + log_dir=self.log_file()) + self.log.debug('QEMUMachine "%s" created', name) + self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir) + + sockpath = os.environ.get("QEMU_TEST_QMP_BACKDOOR", None) + if sockpath is not None: + vm.add_args("-chardev", + f"socket,id=backdoor,path={sockpath},server=on,wait=off", + "-mon", "chardev=backdoor,mode=control") + + if args: + vm.add_args(*args) + return vm + + @property + def vm(self): + return self.get_vm(name='default') + + def get_vm(self, *args, name=None): + if not name: + name = str(uuid.uuid4()) + if self._vms.get(name) is None: + self._vms[name] = self._new_vm(name, *args) + if self.cpu is not None: + self._vms[name].add_args('-cpu', self.cpu) + if self.machine is not None: + self._vms[name].set_machine(self.machine) + return self._vms[name] + + def set_vm_arg(self, arg, value): + """ + Set an argument to list of extra arguments to be given to the QEMU + binary. If the argument already exists then its value is replaced. + + :param arg: the QEMU argument, such as "-cpu" in "-cpu host" + :type arg: str + :param value: the argument value, such as "host" in "-cpu host" + :type value: str + """ + if not arg or not value: + return + if arg not in self.vm.args: + self.vm.args.extend([arg, value]) + else: + idx = self.vm.args.index(arg) + 1 + if idx < len(self.vm.args): + self.vm.args[idx] = value + else: + self.vm.args.append(value) + + def tearDown(self): + for vm in self._vms.values(): + vm.shutdown() + logging.getLogger('console').removeHandler(self._console_log_fh) + self._console_log_fh.close() + super().tearDown() diff --git a/tests/functional/qemu_test/tuxruntest.py b/tests/functional/qemu_test/tuxruntest.py new file mode 100644 index 00000000000..6c442ff0dc4 --- /dev/null +++ b/tests/functional/qemu_test/tuxruntest.py @@ -0,0 +1,136 @@ +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern +from qemu_test import which, get_qemu_img + +class TuxRunBaselineTest(QemuSystemTest): + + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0' + # Tests are ~10-40s, allow for --debug/--enable-gcov overhead + timeout = 100 + + def setUp(self): + super().setUp() + + # We need zstd for all the tuxrun tests + if which('zstd') is None: + self.skipTest("zstd not found in $PATH") + + # Pre-init TuxRun specific settings: Most machines work with + # reasonable defaults but we sometimes need to tweak the + # config. To avoid open coding everything we store all these + # details in the metadata for each test. + + # The tuxboot tag matches the root directory + self.tuxboot = self.arch + + # Most Linux's use ttyS0 for their serial port + self.console = "ttyS0" + + # Does the machine shutdown QEMU nicely on "halt" + self.wait_for_shutdown = True + + self.root = "vda" + + # Occasionally we need extra devices to hook things up + self.extradev = None + + self.qemu_img = get_qemu_img(self) + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def fetch_tuxrun_assets(self, kernel_asset, rootfs_asset, dtb_asset=None): + """ + Fetch the TuxBoot assets. + """ + kernel_image = kernel_asset.fetch() + disk_image = self.uncompress(rootfs_asset) + dtb = dtb_asset.fetch() if dtb_asset is not None else None + + return (kernel_image, disk_image, dtb) + + def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0): + """ + Setup to run and add the common parameters to the system + """ + self.vm.set_console(console_index=console_index) + + # all block devices are raw ext4's + blockdev = "driver=raw,file.driver=file," \ + + f"file.filename={disk},node-name=hd0" + + self.kcmd_line = self.KERNEL_COMMON_COMMAND_LINE + self.kcmd_line += f" root=/dev/{self.root}" + self.kcmd_line += f" console={self.console}" + + self.vm.add_args('-kernel', kernel, + '-append', self.kcmd_line, + '-blockdev', blockdev) + + # Sometimes we need extra devices attached + if self.extradev: + self.vm.add_args('-device', self.extradev) + + self.vm.add_args('-device', + f"{drive},drive=hd0") + + # Some machines need an explicit DTB + if dtb: + self.vm.add_args('-dtb', dtb) + + def run_tuxtest_tests(self, haltmsg): + """ + Wait for the system to boot up, wait for the login prompt and + then do a few things on the console. Trigger a shutdown and + wait to exit cleanly. + """ + ps1='root@tuxtest:~#' + self.wait_for_console_pattern(self.kcmd_line) + self.wait_for_console_pattern('tuxtest login:') + exec_command_and_wait_for_pattern(self, 'root', ps1) + exec_command_and_wait_for_pattern(self, 'cat /proc/interrupts', ps1) + exec_command_and_wait_for_pattern(self, 'cat /proc/self/maps', ps1) + exec_command_and_wait_for_pattern(self, 'uname -a', ps1) + exec_command_and_wait_for_pattern(self, 'halt', haltmsg) + + # Wait for VM to shut down gracefully if it can + if self.wait_for_shutdown: + self.vm.wait() + else: + self.vm.shutdown() + + def common_tuxrun(self, + kernel_asset, + rootfs_asset, + dtb_asset=None, + drive="virtio-blk-device", + haltmsg="reboot: System halted", + console_index=0): + """ + Common path for LKFT tests. Unless we need to do something + special with the command line we can process most things using + the tag metadata. + """ + (kernel, disk, dtb) = self.fetch_tuxrun_assets(kernel_asset, rootfs_asset, + dtb_asset) + + self.prepare_run(kernel, disk, drive, dtb, console_index) + self.vm.launch() + self.run_tuxtest_tests(haltmsg) + os.remove(disk) diff --git a/tests/functional/qemu_test/uncompress.py b/tests/functional/qemu_test/uncompress.py new file mode 100644 index 00000000000..b7ef8f759b7 --- /dev/null +++ b/tests/functional/qemu_test/uncompress.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Utilities for python-based QEMU tests +# +# Copyright 2024 Red Hat, Inc. +# +# Authors: +# Thomas Huth + +import gzip +import lzma +import os +import stat +import shutil +from urllib.parse import urlparse +from subprocess import run, CalledProcessError + +from .asset import Asset + + +def gzip_uncompress(gz_path, output_path): + if os.path.exists(output_path): + return + with gzip.open(gz_path, 'rb') as gz_in: + try: + with open(output_path, 'wb') as raw_out: + shutil.copyfileobj(gz_in, raw_out) + except: + os.remove(output_path) + raise + +def lzma_uncompress(xz_path, output_path): + if os.path.exists(output_path): + return + with lzma.open(xz_path, 'rb') as lzma_in: + try: + with open(output_path, 'wb') as raw_out: + shutil.copyfileobj(lzma_in, raw_out) + except: + os.remove(output_path) + raise + + +def zstd_uncompress(zstd_path, output_path): + if os.path.exists(output_path): + return + + try: + run(['zstd', "-f", "-d", zstd_path, + "-o", output_path], capture_output=True, check=True) + except CalledProcessError as e: + os.remove(output_path) + raise Exception( + f"Unable to decompress zstd file {zstd_path} with {e}") from e + + # zstd copies source archive permissions for the output + # file, so must make this writable for QEMU + os.chmod(output_path, stat.S_IRUSR | stat.S_IWUSR) + + +''' +@params compressed: filename, Asset, or file-like object to uncompress +@params uncompressed: filename to uncompress into +@params format: optional compression format (gzip, lzma) + +Uncompresses @compressed into @uncompressed + +If @format is None, heuristics will be applied to guess the format +from the filename or Asset URL. @format must be non-None if @uncompressed +is a file-like object. + +Returns the fully qualified path to the uncompessed file +''' +def uncompress(compressed, uncompressed, format=None): + if format is None: + format = guess_uncompress_format(compressed) + + if format == "xz": + lzma_uncompress(str(compressed), uncompressed) + elif format == "gz": + gzip_uncompress(str(compressed), uncompressed) + elif format == "zstd": + zstd_uncompress(str(compressed), uncompressed) + else: + raise Exception(f"Unknown compression format {format}") + +''' +@params compressed: filename, Asset, or file-like object to guess + +Guess the format of @compressed, raising an exception if +no format can be determined +''' +def guess_uncompress_format(compressed): + if type(compressed) == Asset: + compressed = urlparse(compressed.url).path + elif type(compressed) != str: + raise Exception(f"Unable to guess compression cformat for {compressed}") + + (name, ext) = os.path.splitext(compressed) + if ext == ".xz": + return "xz" + elif ext == ".gz": + return "gz" + elif ext in [".zstd", ".zst"]: + return 'zstd' + else: + raise Exception(f"Unknown compression format for {compressed}") diff --git a/tests/functional/qemu_test/utils.py b/tests/functional/qemu_test/utils.py new file mode 100644 index 00000000000..e7c8de81654 --- /dev/null +++ b/tests/functional/qemu_test/utils.py @@ -0,0 +1,39 @@ +# Utilities for python-based QEMU tests +# +# Copyright 2024 Red Hat, Inc. +# +# Authors: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os + +from qemu.utils import get_info_usernet_hostfwd_port + + +def get_usernet_hostfwd_port(vm): + res = vm.cmd('human-monitor-command', command_line='info usernet') + return get_info_usernet_hostfwd_port(res) + +""" +Round up to next power of 2 +""" +def pow2ceil(x): + return 1 if x == 0 else 2**(x - 1).bit_length() + +def file_truncate(path, size): + if size != os.path.getsize(path): + with open(path, 'ab+') as fd: + fd.truncate(size) + +""" +Expand file size to next power of 2 +""" +def image_pow2ceil_expand(path): + size = os.path.getsize(path) + size_aligned = pow2ceil(size) + if size != size_aligned: + with open(path, 'ab+') as fd: + fd.truncate(size_aligned) diff --git a/tests/functional/replay_kernel.py b/tests/functional/replay_kernel.py new file mode 100644 index 00000000000..80795eb0520 --- /dev/null +++ b/tests/functional/replay_kernel.py @@ -0,0 +1,84 @@ +# Record/replay test that boots a Linux kernel +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +import logging +import time +import subprocess + +from qemu_test.linuxkernel import LinuxKernelTest + +class ReplayKernelBase(LinuxKernelTest): + """ + Boots a Linux kernel in record mode and checks that the console + is operational and the kernel command line is properly passed + from QEMU to the kernel. + Then replays the same scenario and verifies, that QEMU correctly + terminates. + """ + + timeout = 180 + REPLAY_KERNEL_COMMAND_LINE = 'printk.time=1 panic=-1 ' + + def run_vm(self, kernel_path, kernel_command_line, console_pattern, + record, shift, args, replay_path): + # icount requires TCG to be available + self.require_accelerator('tcg') + + logger = logging.getLogger('replay') + start_time = time.time() + vm = self.get_vm(name='recording' if record else 'replay') + vm.set_console() + if record: + logger.info('recording the execution...') + mode = 'record' + else: + logger.info('replaying the execution...') + mode = 'replay' + vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % + (shift, mode, replay_path), + '-kernel', kernel_path, + '-append', kernel_command_line, + '-net', 'none', + '-no-reboot') + if args: + vm.add_args(*args) + vm.launch() + self.wait_for_console_pattern(console_pattern, vm) + if record: + vm.shutdown() + logger.info('finished the recording with log size %s bytes' + % os.path.getsize(replay_path)) + self.run_replay_dump(replay_path) + logger.info('successfully tested replay-dump.py') + else: + vm.wait() + logger.info('successfully finished the replay') + elapsed = time.time() - start_time + logger.info('elapsed time %.2f sec' % elapsed) + return elapsed + + def run_replay_dump(self, replay_path): + try: + subprocess.check_call(["./scripts/replay-dump.py", + "-f", replay_path], + stdout=subprocess.DEVNULL) + except subprocess.CalledProcessError: + self.fail('replay-dump.py failed') + + def run_rr(self, kernel_path, kernel_command_line, console_pattern, + shift=7, args=None): + replay_path = os.path.join(self.workdir, 'replay.bin') + t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern, + True, shift, args, replay_path) + t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern, + False, shift, args, replay_path) + logger = logging.getLogger('replay') + logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) diff --git a/tests/functional/reverse_debugging.py b/tests/functional/reverse_debugging.py new file mode 100644 index 00000000000..f9a1d395f1d --- /dev/null +++ b/tests/functional/reverse_debugging.py @@ -0,0 +1,196 @@ +# Reverse debugging test +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. +import os +import logging + +from qemu_test import LinuxKernelTest, get_qemu_img +from qemu_test.ports import Ports + + +class ReverseDebugging(LinuxKernelTest): + """ + Test GDB reverse debugging commands: reverse step and reverse continue. + Recording saves the execution of some instructions and makes an initial + VM snapshot to allow reverse execution. + Replay saves the order of the first instructions and then checks that they + are executed backwards in the correct order. + After that the execution is replayed to the end, and reverse continue + command is checked by setting several breakpoints, and asserting + that the execution is stopped at the last of them. + """ + + timeout = 10 + STEPS = 10 + endian_is_le = True + + def run_vm(self, record, shift, args, replay_path, image_path, port): + from avocado.utils import datadrainer + + logger = logging.getLogger('replay') + vm = self.get_vm(name='record' if record else 'replay') + vm.set_console() + if record: + logger.info('recording the execution...') + mode = 'record' + else: + logger.info('replaying the execution...') + mode = 'replay' + vm.add_args('-gdb', 'tcp::%d' % port, '-S') + vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' % + (shift, mode, replay_path), + '-net', 'none') + vm.add_args('-drive', 'file=%s,if=none' % image_path) + if args: + vm.add_args(*args) + vm.launch() + console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), + logger=self.log.getChild('console'), + stop_check=(lambda : not vm.is_running())) + console_drainer.start() + return vm + + @staticmethod + def get_reg_le(g, reg): + res = g.cmd(b'p%x' % reg) + num = 0 + for i in range(len(res))[-2::-2]: + num = 0x100 * num + int(res[i:i + 2], 16) + return num + + @staticmethod + def get_reg_be(g, reg): + res = g.cmd(b'p%x' % reg) + return int(res, 16) + + def get_reg(self, g, reg): + # value may be encoded in BE or LE order + if self.endian_is_le: + return self.get_reg_le(g, reg) + else: + return self.get_reg_be(g, reg) + + def get_pc(self, g): + return self.get_reg(g, self.REG_PC) + + def check_pc(self, g, addr): + pc = self.get_pc(g) + if pc != addr: + self.fail('Invalid PC (read %x instead of %x)' % (pc, addr)) + + @staticmethod + def gdb_step(g): + g.cmd(b's', b'T05thread:01;') + + @staticmethod + def gdb_bstep(g): + g.cmd(b'bs', b'T05thread:01;') + + @staticmethod + def vm_get_icount(vm): + return vm.qmp('query-replay')['return']['icount'] + + def reverse_debugging(self, shift=7, args=None): + from avocado.utils import gdb + from avocado.utils import process + + logger = logging.getLogger('replay') + + # create qcow2 for snapshots + logger.info('creating qcow2 image for VM snapshots') + image_path = os.path.join(self.workdir, 'disk.qcow2') + qemu_img = get_qemu_img(self) + if qemu_img is None: + self.skipTest('Could not find "qemu-img", which is required to ' + 'create the temporary qcow2 image') + cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path) + process.run(cmd) + + replay_path = os.path.join(self.workdir, 'replay.bin') + + # record the log + vm = self.run_vm(True, shift, args, replay_path, image_path, -1) + while self.vm_get_icount(vm) <= self.STEPS: + pass + last_icount = self.vm_get_icount(vm) + vm.shutdown() + + logger.info("recorded log with %s+ steps" % last_icount) + + # replay and run debug commands + with Ports() as ports: + port = ports.find_free_port() + vm = self.run_vm(False, shift, args, replay_path, image_path, port) + logger.info('connecting to gdbstub') + g = gdb.GDBRemote('127.0.0.1', port, False, False) + g.connect() + r = g.cmd(b'qSupported') + if b'qXfer:features:read+' in r: + g.cmd(b'qXfer:features:read:target.xml:0,ffb') + if b'ReverseStep+' not in r: + self.fail('Reverse step is not supported by QEMU') + if b'ReverseContinue+' not in r: + self.fail('Reverse continue is not supported by QEMU') + + logger.info('stepping forward') + steps = [] + # record first instruction addresses + for _ in range(self.STEPS): + pc = self.get_pc(g) + logger.info('saving position %x' % pc) + steps.append(pc) + self.gdb_step(g) + + # visit the recorded instruction in reverse order + logger.info('stepping backward') + for addr in steps[::-1]: + self.gdb_bstep(g) + self.check_pc(g, addr) + logger.info('found position %x' % addr) + + # visit the recorded instruction in forward order + logger.info('stepping forward') + for addr in steps: + self.check_pc(g, addr) + self.gdb_step(g) + logger.info('found position %x' % addr) + + # set breakpoints for the instructions just stepped over + logger.info('setting breakpoints') + for addr in steps: + # hardware breakpoint at addr with len=1 + g.cmd(b'Z1,%x,1' % addr, b'OK') + + # this may hit a breakpoint if first instructions are executed + # again + logger.info('continuing execution') + vm.qmp('replay-break', icount=last_icount - 1) + # continue - will return after pausing + # This could stop at the end and get a T02 return, or by + # re-executing one of the breakpoints and get a T05 return. + g.cmd(b'c') + if self.vm_get_icount(vm) == last_icount - 1: + logger.info('reached the end (icount %s)' % (last_icount - 1)) + else: + logger.info('hit a breakpoint again at %x (icount %s)' % + (self.get_pc(g), self.vm_get_icount(vm))) + + logger.info('running reverse continue to reach %x' % steps[-1]) + # reverse continue - will return after stopping at the breakpoint + g.cmd(b'bc', b'T05thread:01;') + + # assume that none of the first instructions is executed again + # breaking the order of the breakpoints + self.check_pc(g, steps[-1]) + logger.info('successfully reached %x' % steps[-1]) + + logger.info('exiting gdb and qemu') + vm.shutdown() diff --git a/tests/functional/test_aarch64_aspeed_ast2700.py b/tests/functional/test_aarch64_aspeed_ast2700.py new file mode 100755 index 00000000000..d02dc7991c1 --- /dev/null +++ b/tests/functional/test_aarch64_aspeed_ast2700.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED SoCs with firmware +# +# Copyright (C) 2022 ASPEED Technology Inc +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + + +class AST2x00MachineSDK(QemuSystemTest): + + def do_test_aarch64_aspeed_sdk_start(self, image): + self.require_netdev('user') + self.vm.set_console() + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.1,address=0x4d,id=tmp-test') + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic', '-net', 'user', '-snapshot') + + self.vm.launch() + + def verify_vbootrom_firmware_flow(self): + wait_for_console_pattern(self, 'Found valid FIT image') + wait_for_console_pattern(self, '[uboot] loading') + wait_for_console_pattern(self, 'done') + wait_for_console_pattern(self, '[fdt] loading') + wait_for_console_pattern(self, 'done') + wait_for_console_pattern(self, '[tee] loading') + wait_for_console_pattern(self, 'done') + wait_for_console_pattern(self, '[atf] loading') + wait_for_console_pattern(self, 'done') + wait_for_console_pattern(self, 'Jumping to BL31 (Trusted Firmware-A)') + + def verify_openbmc_boot_and_login(self, name): + wait_for_console_pattern(self, 'U-Boot 2023.10') + wait_for_console_pattern(self, '## Loading kernel from FIT Image') + wait_for_console_pattern(self, 'Starting kernel ...') + + wait_for_console_pattern(self, f'{name} login:') + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#') + + ASSET_SDK_V906_AST2700 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-a0-default-obmc.tar.gz', + '7247b6f19dbfb700686f8d9f723ac23f3eb229226c0589cb9b06b80d1b61f3cb') + + ASSET_SDK_V906_AST2700A1 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz', + 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6') + + def do_ast2700_i2c_test(self): + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-1/device/new_device ', + 'i2c i2c-1: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000') + + def start_ast2700_test(self, name): + num_cpu = 4 + uboot_size = os.path.getsize(self.scratch_file(name, + 'u-boot-nodtb.bin')) + uboot_dtb_load_addr = hex(0x400000000 + uboot_size) + + load_images_list = [ + { + 'addr': '0x400000000', + 'file': self.scratch_file(name, + 'u-boot-nodtb.bin') + }, + { + 'addr': str(uboot_dtb_load_addr), + 'file': self.scratch_file(name, 'u-boot.dtb') + }, + { + 'addr': '0x430000000', + 'file': self.scratch_file(name, 'bl31.bin') + }, + { + 'addr': '0x430080000', + 'file': self.scratch_file(name, 'optee', + 'tee-raw.bin') + } + ] + + for load_image in load_images_list: + addr = load_image['addr'] + file = load_image['file'] + self.vm.add_args('-device', + f'loader,force-raw=on,addr={addr},file={file}') + + for i in range(num_cpu): + self.vm.add_args('-device', + f'loader,addr=0x430000000,cpu-num={i}') + + self.vm.add_args('-smp', str(num_cpu)) + self.do_test_aarch64_aspeed_sdk_start( + self.scratch_file(name, 'image-bmc')) + + def start_ast2700_test_vbootrom(self, name): + self.vm.add_args('-bios', 'ast27x0_bootrom.bin') + self.do_test_aarch64_aspeed_sdk_start( + self.scratch_file(name, 'image-bmc')) + + def test_aarch64_ast2700_evb_sdk_v09_06(self): + self.set_machine('ast2700-evb') + + self.archive_extract(self.ASSET_SDK_V906_AST2700) + self.start_ast2700_test('ast2700-a0-default') + self.verify_openbmc_boot_and_login('ast2700-a0-default') + self.do_ast2700_i2c_test() + + def test_aarch64_ast2700a1_evb_sdk_v09_06(self): + self.set_machine('ast2700a1-evb') + + self.archive_extract(self.ASSET_SDK_V906_AST2700A1) + self.start_ast2700_test('ast2700-default') + self.verify_openbmc_boot_and_login('ast2700-default') + self.do_ast2700_i2c_test() + + def test_aarch64_ast2700a1_evb_sdk_vbootrom_v09_06(self): + self.set_machine('ast2700a1-evb') + + self.archive_extract(self.ASSET_SDK_V906_AST2700A1) + self.start_ast2700_test_vbootrom('ast2700-default') + self.verify_vbootrom_firmware_flow() + self.verify_openbmc_boot_and_login('ast2700-default') + self.do_ast2700_i2c_test() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_aspeed_ast2700fc.py b/tests/functional/test_aarch64_aspeed_ast2700fc.py new file mode 100755 index 00000000000..b85370e182e --- /dev/null +++ b/tests/functional/test_aarch64_aspeed_ast2700fc.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED SoCs with firmware +# +# Copyright (C) 2022 ASPEED Technology Inc +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + + +class AST2x00MachineSDK(QemuSystemTest): + + def do_test_aarch64_aspeed_sdk_start(self, image): + self.require_netdev('user') + self.vm.set_console() + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.1,address=0x4d,id=tmp-test') + self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', + '-net', 'nic', '-net', 'user', '-snapshot') + + self.vm.launch() + + def verify_openbmc_boot_and_login(self, name): + wait_for_console_pattern(self, 'U-Boot 2023.10') + wait_for_console_pattern(self, '## Loading kernel from FIT Image') + wait_for_console_pattern(self, 'Starting kernel ...') + + wait_for_console_pattern(self, f'{name} login:') + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#') + + ASSET_SDK_V906_AST2700 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz', + 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6') + + def do_ast2700_i2c_test(self): + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-1/device/new_device ', + 'i2c i2c-1: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000') + + def do_ast2700fc_ssp_test(self): + self.vm.shutdown() + self.vm.set_console(console_index=1) + self.vm.launch() + + exec_command_and_wait_for_pattern(self, '\012', 'ssp:~$') + exec_command_and_wait_for_pattern(self, 'version', + 'Zephyr version 3.7.1') + exec_command_and_wait_for_pattern(self, 'md 72c02000 1', + '[72c02000] 06010103') + + def do_ast2700fc_tsp_test(self): + self.vm.shutdown() + self.vm.set_console(console_index=2) + self.vm.launch() + + exec_command_and_wait_for_pattern(self, '\012', 'tsp:~$') + exec_command_and_wait_for_pattern(self, 'version', + 'Zephyr version 3.7.1') + exec_command_and_wait_for_pattern(self, 'md 72c02000 1', + '[72c02000] 06010103') + + def start_ast2700fc_test(self, name): + ca35_core = 4 + uboot_size = os.path.getsize(self.scratch_file(name, + 'u-boot-nodtb.bin')) + uboot_dtb_load_addr = hex(0x400000000 + uboot_size) + + load_images_list = [ + { + 'addr': '0x400000000', + 'file': self.scratch_file(name, + 'u-boot-nodtb.bin') + }, + { + 'addr': str(uboot_dtb_load_addr), + 'file': self.scratch_file(name, 'u-boot.dtb') + }, + { + 'addr': '0x430000000', + 'file': self.scratch_file(name, 'bl31.bin') + }, + { + 'addr': '0x430080000', + 'file': self.scratch_file(name, 'optee', + 'tee-raw.bin') + } + ] + + for load_image in load_images_list: + addr = load_image['addr'] + file = load_image['file'] + self.vm.add_args('-device', + f'loader,force-raw=on,addr={addr},file={file}') + + for i in range(ca35_core): + self.vm.add_args('-device', + f'loader,addr=0x430000000,cpu-num={i}') + + load_elf_list = { + 'ssp': self.scratch_file(name, 'zephyr-aspeed-ssp.elf'), + 'tsp': self.scratch_file(name, 'zephyr-aspeed-tsp.elf') + } + + for cpu_num, key in enumerate(load_elf_list, start=4): + file = load_elf_list[key] + self.vm.add_args('-device', + f'loader,file={file},cpu-num={cpu_num}') + + self.do_test_aarch64_aspeed_sdk_start( + self.scratch_file(name, 'image-bmc')) + + def test_aarch64_ast2700fc_sdk_v09_06(self): + self.set_machine('ast2700fc') + + self.archive_extract(self.ASSET_SDK_V906_AST2700) + self.start_ast2700fc_test('ast2700-default') + self.verify_openbmc_boot_and_login('ast2700-default') + self.do_ast2700_i2c_test() + self.do_ast2700fc_ssp_test() + self.do_ast2700fc_tsp_test() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_device_passthrough.py b/tests/functional/test_aarch64_device_passthrough.py new file mode 100755 index 00000000000..1f3f158a9ff --- /dev/null +++ b/tests/functional/test_aarch64_device_passthrough.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +# +# Boots a nested guest and compare content of a device (passthrough) to a +# reference image. Both vfio group and iommufd passthrough methods are tested. +# +# Copyright (c) 2025 Linaro Ltd. +# +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern +from random import randbytes + +guest_script = ''' +#!/usr/bin/env bash + +set -euo pipefail +set -x + +# find disks from nvme serial +dev_vfio=$(lsblk --nvme | grep vfio | cut -f 1 -d ' ') +dev_iommufd=$(lsblk --nvme | grep iommufd | cut -f 1 -d ' ') +pci_vfio=$(basename $(readlink -f /sys/block/$dev_vfio/../../../)) +pci_iommufd=$(basename $(readlink -f /sys/block/$dev_iommufd/../../../)) + +# bind disks to vfio +for p in "$pci_vfio" "$pci_iommufd"; do + if [ "$(cat /sys/bus/pci/devices/$p/driver_override)" == vfio-pci ]; then + continue + fi + echo $p > /sys/bus/pci/drivers/nvme/unbind + echo vfio-pci > /sys/bus/pci/devices/$p/driver_override + echo $p > /sys/bus/pci/drivers/vfio-pci/bind +done + +# boot nested guest and execute /host/nested_guest.sh +# one disk is passed through vfio group, the other, through iommufd +qemu-system-aarch64 \ +-M virt \ +-display none \ +-serial stdio \ +-cpu host \ +-enable-kvm \ +-m 1G \ +-kernel /host/Image.gz \ +-drive format=raw,file=/host/guest.ext4,if=virtio \ +-append "root=/dev/vda init=/init -- bash /host/nested_guest.sh" \ +-virtfs local,path=/host,mount_tag=host,security_model=mapped,readonly=off \ +-device vfio-pci,host=$pci_vfio \ +-object iommufd,id=iommufd0 \ +-device vfio-pci,host=$pci_iommufd,iommufd=iommufd0 +''' + +nested_guest_script = ''' +#!/usr/bin/env bash + +set -euo pipefail +set -x + +image_vfio=/host/disk_vfio +image_iommufd=/host/disk_iommufd + +dev_vfio=$(lsblk --nvme | grep vfio | cut -f 1 -d ' ') +dev_iommufd=$(lsblk --nvme | grep iommufd | cut -f 1 -d ' ') + +# compare if devices are identical to original images +diff $image_vfio /dev/$dev_vfio +diff $image_iommufd /dev/$dev_iommufd + +echo device_passthrough_test_ok +''' + +class Aarch64DevicePassthrough(QemuSystemTest): + + # https://github.com/pbo-linaro/qemu-linux-stack + # + # Linux kernel is compiled with defconfig + + # IOMMUFD + VFIO_DEVICE_CDEV + ARM_SMMU_V3_IOMMUFD + # https://docs.kernel.org/driver-api/vfio.html#vfio-device-cde + ASSET_DEVICE_PASSTHROUGH_STACK = Asset( + ('https://fileserver.linaro.org/s/fx5DXxBYme8dw2G/' + 'download/device_passthrough.tar.xz'), + '812750b664d61c2986f2b149939ae28cafbd60d53e9c7e4b16e97143845e196d') + + # This tests the device passthrough implementation, by booting a VM + # supporting it with two nvme disks attached, and launching a nested VM + # reading their content. + def test_aarch64_device_passthrough(self): + self.set_machine('virt') + self.require_accelerator('tcg') + + self.vm.set_console() + + stack_path_tar_gz = self.ASSET_DEVICE_PASSTHROUGH_STACK.fetch() + self.archive_extract(stack_path_tar_gz, format="tar") + + stack = self.scratch_file('out') + kernel = os.path.join(stack, 'Image.gz') + rootfs_host = os.path.join(stack, 'host.ext4') + disk_vfio = os.path.join(stack, 'disk_vfio') + disk_iommufd = os.path.join(stack, 'disk_iommufd') + guest_cmd = os.path.join(stack, 'guest.sh') + nested_guest_cmd = os.path.join(stack, 'nested_guest.sh') + # we generate two random disks + with open(disk_vfio, "wb") as d: d.write(randbytes(512)) + with open(disk_iommufd, "wb") as d: d.write(randbytes(1024)) + with open(guest_cmd, 'w') as s: s.write(guest_script) + with open(nested_guest_cmd, 'w') as s: s.write(nested_guest_script) + + self.vm.add_args('-cpu', 'max') + self.vm.add_args('-m', '2G') + self.vm.add_args('-M', 'virt,' + 'virtualization=on,' + 'gic-version=max,' + 'iommu=smmuv3') + self.vm.add_args('-kernel', kernel) + self.vm.add_args('-drive', f'format=raw,file={rootfs_host}') + self.vm.add_args('-drive', + f'file={disk_vfio},if=none,id=vfio,format=raw') + self.vm.add_args('-device', 'nvme,serial=vfio,drive=vfio') + self.vm.add_args('-drive', + f'file={disk_iommufd},if=none,id=iommufd,format=raw') + self.vm.add_args('-device', 'nvme,serial=iommufd,drive=iommufd') + self.vm.add_args('-virtfs', + f'local,path={stack}/,mount_tag=host,' + 'security_model=mapped,readonly=off') + # boot and execute guest script + # init will trigger a kernel panic if script fails + self.vm.add_args('-append', + 'root=/dev/vda init=/init -- bash /host/guest.sh') + + self.vm.launch() + wait_for_console_pattern(self, 'device_passthrough_test_ok', + failure_message='Kernel panic') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_hotplug_pci.py b/tests/functional/test_aarch64_hotplug_pci.py new file mode 100755 index 00000000000..0c67991b89e --- /dev/null +++ b/tests/functional/test_aarch64_hotplug_pci.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# +# The test hotplugs a PCI device and checks it on a Linux guest. +# +# Copyright (c) 2025 Linaro Ltd. +# +# Author: +# Gustavo Romero +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import BUILD_DIR + +class HotplugPCI(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://ftp.debian.org/debian/dists/bookworm/main/installer-arm64/' + '20230607+deb12u11/images/netboot/debian-installer/arm64/linux'), + 'd92a60392ce1e379ca198a1a820899f8f0d39a62d047c41ab79492f81541a9d9') + + ASSET_INITRD = Asset( + ('https://ftp.debian.org/debian/dists/bookworm/main/installer-arm64/' + '20230607+deb12u11/images/netboot/debian-installer/arm64/initrd.gz'), + '9f817f76951f3237bca8216bee35267bfb826815687f4b2fcdd5e6c2a917790c') + + def test_hotplug_pci(self): + + self.set_machine('virt') + + self.vm.add_args('-m', '512M', + '-cpu', 'cortex-a57', + '-append', + 'console=ttyAMA0,115200 init=/bin/sh', + '-device', + 'pcie-root-port,bus=pcie.0,chassis=1,slot=1,id=pcie.1', + '-bios', + self.build_file('pc-bios', 'edk2-aarch64-code.fd')) + + # BusyBox prompt + prompt = "~ #" + self.launch_kernel(self.ASSET_KERNEL.fetch(), + self.ASSET_INITRD.fetch(), + wait_for=prompt) + + # Check for initial state: 2 network adapters, lo and enp0s1. + exec_command_and_wait_for_pattern(self, + 'ls /sys/class/net | wc -l', + '2') + + # Hotplug one network adapter to the root port, i.e. pcie.1 bus. + self.vm.cmd('device_add', + driver='virtio-net-pci', + bus='pcie.1', + addr=0, + id='na') + # Wait for the kernel to recognize the new device. + self.wait_for_console_pattern('virtio-pci') + self.wait_for_console_pattern('virtio_net') + + # Check if there is a new network adapter. + exec_command_and_wait_for_pattern(self, + 'ls /sys/class/net | wc -l', + '3') + + self.vm.cmd('device_del', id='na') + exec_command_and_wait_for_pattern(self, + 'ls /sys/class/net | wc -l', + '2') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_imx8mp_evk.py b/tests/functional/test_aarch64_imx8mp_evk.py new file mode 100755 index 00000000000..99ddcdef835 --- /dev/null +++ b/tests/functional/test_aarch64_imx8mp_evk.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class Imx8mpEvkMachine(LinuxKernelTest): + + ASSET_IMAGE = Asset( + ('https://cloud.debian.org/images/cloud/bookworm/20231210-1590/' + 'debian-12-generic-arm64-20231210-1590.tar.xz'), + '7ebf1577b32d5af6204df74b54ca2e4675de9b5a9fa14f3ff70b88eeb7b3b359') + + KERNEL_OFFSET = 0x51000000 + KERNEL_SIZE = 32622528 + INITRD_OFFSET = 0x76000000 + INITRD_SIZE = 30987766 + DTB_OFFSET = 0x64F51000 + DTB_SIZE = 45 * 1024 + + def extract(self, in_path, out_path, offset, size): + try: + with open(in_path, "rb") as source: + source.seek(offset) + data = source.read(size) + with open(out_path, "wb") as target: + target.write(data) + except (IOError, ValueError) as e: + self.log.error(f"Failed to extract {out_path}: {e}") + raise + + def setUp(self): + super().setUp() + + self.image_path = self.scratch_file("disk.raw") + self.kernel_path = self.scratch_file("linux") + self.initrd_path = self.scratch_file("initrd.zstd") + self.dtb_path = self.scratch_file("imx8mp-evk.dtb") + + self.archive_extract(self.ASSET_IMAGE) + self.extract(self.image_path, self.kernel_path, + self.KERNEL_OFFSET, self.KERNEL_SIZE) + self.extract(self.image_path, self.initrd_path, + self.INITRD_OFFSET, self.INITRD_SIZE) + self.extract(self.image_path, self.dtb_path, + self.DTB_OFFSET, self.DTB_SIZE) + + def test_aarch64_imx8mp_evk_usdhc(self): + self.require_accelerator("tcg") + self.set_machine('imx8mp-evk') + self.vm.set_console(console_index=1) + self.vm.add_args('-m', '2G', + '-smp', '4', + '-kernel', self.kernel_path, + '-initrd', self.initrd_path, + '-dtb', self.dtb_path, + '-append', 'root=/dev/mmcblk2p1', + '-drive', f'file={self.image_path},if=sd,bus=2,' + 'format=raw,id=mmcblk2,snapshot=on') + + self.vm.launch() + self.wait_for_console_pattern('Welcome to ') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_kvm.py b/tests/functional/test_aarch64_kvm.py new file mode 100755 index 00000000000..9fb9286139f --- /dev/null +++ b/tests/functional/test_aarch64_kvm.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# Functional test that runs subsets of kvm-unit-tests on Aarch64. +# These can run on TCG and any accelerator supporting nested +# virtualisation. +# +# Copyright (c) 2025 Linaro +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test import exec_command_and_wait_for_pattern as ec_and_wait +from qemu_test.linuxkernel import LinuxKernelTest + + +class Aarch64VirtKVMTests(LinuxKernelTest): + + ASSET_KVM_TEST_KERNEL = Asset( + 'https://fileserver.linaro.org/s/HmjaxXXYHYSqbes/' + 'download?path=%2F&files=' + 'image-with-kvm-tool-and-unit-tests.gz', + '34de4aaea90db5da42729e7d28b77f392c37a2f4da859f889a5234aaf0970696') + + # make it easier to detect successful return to shell + PS1 = 'RES=[$?] # ' + OK_CMD = 'RES=[0] # ' + + # base of tests + KUT_BASE = "/usr/share/kvm-unit-tests/" + + def _launch_guest(self, kvm_mode="nvhe"): + + self.set_machine('virt') + kernel_path = self.ASSET_KVM_TEST_KERNEL.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + f"console=ttyAMA0 kvm-arm.mode={kvm_mode}") + + self.vm.add_args("-cpu", "cortex-a72") + self.vm.add_args("-machine", "virt,gic-version=3,virtualization=on", + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.add_args("-smp", "2", "-m", "320") + + self.vm.launch() + + self.wait_for_console_pattern('buildroot login:') + ec_and_wait(self, 'root', '#') + ec_and_wait(self, f"export PS1='{self.PS1}'", self.OK_CMD) + + # this is just a smoketest, we don't run all the tests in the image + def _smoketest_kvm(self): + ec_and_wait(self, f"{self.KUT_BASE}/selftest-setup", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-smp", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-vectors-kernel", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-vectors-user", self.OK_CMD) + + def test_aarch64_nvhe_selftest(self): + self._launch_guest("nvhe") + self._smoketest_kvm() + + def test_aarch64_vhe_selftest(self): + self._launch_guest("vhe") + self._smoketest_kvm() + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_raspi3.py b/tests/functional/test_aarch64_raspi3.py new file mode 100755 index 00000000000..74f6630ed26 --- /dev/null +++ b/tests/functional/test_aarch64_raspi3.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Raspberry Pi machine +# and checks the console +# +# Copyright (c) 2020 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class Aarch64Raspi3Machine(LinuxKernelTest): + + ASSET_RPI3_UEFI = Asset( + ('https://github.com/pbatard/RPi3/releases/download/' + 'v1.15/RPi3_UEFI_Firmware_v1.15.zip'), + '8cff2e979560048b4c84921f41a91893240b9fb71a88f0b5c5d6c8edd994bd5b') + + def test_aarch64_raspi3_atf(self): + efi_name = 'RPI_EFI.fd' + efi_fd = self.archive_extract(self.ASSET_RPI3_UEFI, member=efi_name) + + self.set_machine('raspi3b') + self.vm.set_console(console_index=1) + self.vm.add_args('-cpu', 'cortex-a53', + '-nodefaults', + '-device', f'loader,file={efi_fd},force-raw=true') + self.vm.launch() + self.wait_for_console_pattern('version UEFI Firmware v1.15') + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_raspi4.py b/tests/functional/test_aarch64_raspi4.py new file mode 100755 index 00000000000..7a4302b0c5a --- /dev/null +++ b/tests/functional/test_aarch64_raspi4.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Raspberry Pi machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class Aarch64Raspi4Machine(LinuxKernelTest): + + """ + The kernel can be rebuilt using the kernel source referenced + and following the instructions on the on: + https://www.raspberrypi.org/documentation/linux/kernel/building.md + """ + ASSET_KERNEL_20190215 = Asset( + ('http://archive.raspberrypi.org/debian/' + 'pool/main/r/raspberrypi-firmware/' + 'raspberrypi-kernel_1.20230106-1_arm64.deb'), + '56d5713c8f6eee8a0d3f0e73600ec11391144fef318b08943e9abd94c0a9baf7') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '86b2be1384d41c8c388e63078a847f1e1c4cb1de/rootfs/' + 'arm64/rootfs.cpio.gz'), + '7c0b16d1853772f6f4c3ca63e789b3b9ff4936efac9c8a01fb0c98c05c7a7648') + + def test_arm_raspi4(self): + kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/kernel8.img') + dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/bcm2711-rpi-4-b.dtb') + + self.set_machine('raspi4b') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,mmio32,0xfe201000 ' + + 'console=ttyAMA0,115200 ' + + 'root=/dev/mmcblk1p2 rootwait ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-append', kernel_command_line) + # When PCI is supported we can add a USB controller: + # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', + # '-device', 'usb-kbd,bus=xhci.0', + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + # When USB is enabled we can look for this + # console_pattern = 'Product: QEMU USB Keyboard' + # self.wait_for_console_pattern(console_pattern) + console_pattern = 'Waiting for root device' + self.wait_for_console_pattern(console_pattern) + + + def test_arm_raspi4_initrd(self): + kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/kernel8.img') + dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/bcm2711-rpi-4-b.dtb') + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.set_machine('raspi4b') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,mmio32,0xfe201000 ' + + 'console=ttyAMA0,115200 ' + + 'panic=-1 noreboot ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + # When PCI is supported we can add a USB controller: + # '-device', 'qemu-xhci,bus=pcie.1,id=xhci', + # '-device', 'usb-kbd,bus=xhci.0', + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'BCM2835') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'cprman@7e101000') + exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') + # TODO: Raspberry Pi4 doesn't shut down properly with recent kernels + # Wait for VM to shut down gracefully + #self.vm.wait() + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_replay.py b/tests/functional/test_aarch64_replay.py new file mode 100755 index 00000000000..db12e76603f --- /dev/null +++ b/tests/functional/test_aarch64_replay.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an aarch64 machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from subprocess import check_call, DEVNULL + +from qemu_test import Asset, skipIfOperatingSystem, get_qemu_img +from replay_kernel import ReplayKernelBase + + +class Aarch64Replay(ReplayKernelBase): + + ASSET_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64/Image', + 'b74743c5e89e1cea0f73368d24ae0ae85c5204ff84be3b5e9610417417d2f235') + + ASSET_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64/rootfs.ext4.zst', + 'a1acaaae2068df4648d04ff75f532aaa8c5edcd6b936122b6f0db4848a07b465') + + def test_aarch64_virt(self): + self.require_netdev('user') + self.set_machine('virt') + self.cpu = 'cortex-a57' + kernel_path = self.ASSET_KERNEL.fetch() + + raw_disk = self.uncompress(self.ASSET_ROOTFS) + disk = self.scratch_file('scratch.qcow2') + qemu_img = get_qemu_img(self) + check_call([qemu_img, 'create', '-f', 'qcow2', '-b', raw_disk, + '-F', 'raw', disk], stdout=DEVNULL, stderr=DEVNULL) + + args = ('-drive', 'file=%s,snapshot=on,id=hd0,if=none' % disk, + '-drive', 'driver=blkreplay,id=hd0-rr,if=none,image=hd0', + '-device', 'virtio-blk-device,drive=hd0-rr', + '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', + '-device', 'virtio-net,netdev=vnet', + '-object', 'filter-replay,id=replay,netdev=vnet') + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0 root=/dev/vda') + console_pattern = 'Welcome to TuxTest' + self.run_rr(kernel_path, kernel_command_line, console_pattern, + args=args) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_aarch64_reverse_debug.py b/tests/functional/test_aarch64_reverse_debug.py new file mode 100755 index 00000000000..58d45328350 --- /dev/null +++ b/tests/functional/test_aarch64_reverse_debug.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Reverse debugging test +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import Asset, skipIfMissingImports, skipFlakyTest +from reverse_debugging import ReverseDebugging + + +@skipIfMissingImports('avocado.utils') +class ReverseDebugging_AArch64(ReverseDebugging): + + REG_PC = 32 + + KERNEL_ASSET = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/29/Everything/aarch64/os/images/pxeboot/vmlinuz'), + '7e1430b81c26bdd0da025eeb8fbd77b5dc961da4364af26e771bd39f379cbbf7') + + @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2921") + def test_aarch64_virt(self): + self.set_machine('virt') + self.cpu = 'cortex-a53' + kernel_path = self.KERNEL_ASSET.fetch() + self.reverse_debugging(args=('-kernel', kernel_path)) + + +if __name__ == '__main__': + ReverseDebugging.main() diff --git a/tests/functional/test_aarch64_rme_sbsaref.py b/tests/functional/test_aarch64_rme_sbsaref.py new file mode 100755 index 00000000000..746770e776d --- /dev/null +++ b/tests/functional/test_aarch64_rme_sbsaref.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Realms environment on sbsa-ref machine and a +# nested guest VM using it. +# +# Copyright (c) 2024 Linaro Ltd. +# +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern +from test_aarch64_rme_virt import test_realms_guest + + +class Aarch64RMESbsaRefMachine(QemuSystemTest): + + # Stack is built with OP-TEE build environment from those instructions: + # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ + # https://github.com/pbo-linaro/qemu-rme-stack + ASSET_RME_STACK_SBSA = Asset( + ('https://fileserver.linaro.org/s/KJyeBxL82mz2r7F/' + 'download/rme-stack-op-tee-4.2.0-cca-v4-sbsa.tar.gz'), + 'dd9ab28ec869bdf3b5376116cb3689103b43433fd5c4bca0f4a8d8b3c104999e') + + # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, + # and launching a nested VM using it. + def test_aarch64_rme_sbsaref(self): + self.set_machine('sbsa-ref') + self.require_accelerator('tcg') + self.require_netdev('user') + + self.vm.set_console() + + stack_path_tar_gz = self.ASSET_RME_STACK_SBSA.fetch() + self.archive_extract(stack_path_tar_gz, format="tar") + + rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-sbsa') + pflash0 = os.path.join(rme_stack, 'images', 'SBSA_FLASH0.fd') + pflash1 = os.path.join(rme_stack, 'images', 'SBSA_FLASH1.fd') + virtual = os.path.join(rme_stack, 'images', 'disks', 'virtual') + drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4') + + self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on') + self.vm.add_args('-m', '2G') + self.vm.add_args('-M', 'sbsa-ref') + self.vm.add_args('-drive', f'file={pflash0},format=raw,if=pflash') + self.vm.add_args('-drive', f'file={pflash1},format=raw,if=pflash') + self.vm.add_args('-drive', f'file=fat:rw:{virtual},format=raw') + self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0') + self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0') + self.vm.add_args('-device', 'virtio-9p-pci,fsdev=shr0,mount_tag=shr0') + self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0') + self.vm.add_args('-device', 'virtio-net-pci,netdev=net0') + self.vm.add_args('-netdev', 'user,id=net0') + + self.vm.launch() + # Wait for host VM boot to complete. + wait_for_console_pattern(self, 'Welcome to Buildroot', + failure_message='Synchronous Exception at') + exec_command_and_wait_for_pattern(self, 'root', '#') + + test_realms_guest(self) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_rme_virt.py b/tests/functional/test_aarch64_rme_virt.py new file mode 100755 index 00000000000..8452d27928f --- /dev/null +++ b/tests/functional/test_aarch64_rme_virt.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Realms environment on virt machine and a nested +# guest VM using it. +# +# Copyright (c) 2024 Linaro Ltd. +# +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + +def test_realms_guest(test_rme_instance): + + # Boot the (nested) guest VM + exec_command(test_rme_instance, + 'qemu-system-aarch64 -M virt,gic-version=3 ' + '-cpu host -enable-kvm -m 512M ' + '-M confidential-guest-support=rme0 ' + '-object rme-guest,id=rme0 ' + '-device virtio-net-pci,netdev=net0,romfile= ' + '-netdev user,id=net0 ' + '-kernel /mnt/out/bin/Image ' + '-initrd /mnt/out-br/images/rootfs.cpio ' + '-serial stdio') + # Detect Realm activation during (nested) guest boot. + wait_for_console_pattern(test_rme_instance, + 'SMC_RMI_REALM_ACTIVATE') + # Wait for (nested) guest boot to complete. + wait_for_console_pattern(test_rme_instance, + 'Welcome to Buildroot') + exec_command_and_wait_for_pattern(test_rme_instance, 'root', '#') + # query (nested) guest cca report + exec_command(test_rme_instance, 'cca-workload-attestation report') + wait_for_console_pattern(test_rme_instance, + '"cca-platform-hash-algo-id": "sha-256"') + wait_for_console_pattern(test_rme_instance, + '"cca-realm-hash-algo-id": "sha-512"') + wait_for_console_pattern(test_rme_instance, + '"cca-realm-public-key-hash-algo-id": "sha-256"') + +class Aarch64RMEVirtMachine(QemuSystemTest): + + # Stack is built with OP-TEE build environment from those instructions: + # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ + # https://github.com/pbo-linaro/qemu-rme-stack + ASSET_RME_STACK_VIRT = Asset( + ('https://fileserver.linaro.org/s/iaRsNDJp2CXHMSJ/' + 'download/rme-stack-op-tee-4.2.0-cca-v4-qemu_v8.tar.gz'), + '1851adc232b094384d8b879b9a2cfff07ef3d6205032b85e9b3a4a9ae6b0b7ad') + + # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, + # and launching a nested VM using it. + def test_aarch64_rme_virt(self): + self.set_machine('virt') + self.require_accelerator('tcg') + self.require_netdev('user') + + self.vm.set_console() + + stack_path_tar_gz = self.ASSET_RME_STACK_VIRT.fetch() + self.archive_extract(stack_path_tar_gz, format="tar") + + rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-qemu_v8') + kernel = os.path.join(rme_stack, 'out', 'bin', 'Image') + bios = os.path.join(rme_stack, 'out', 'bin', 'flash.bin') + drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4') + + self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on') + self.vm.add_args('-m', '2G') + self.vm.add_args('-M', 'virt,acpi=off,' + 'virtualization=on,' + 'secure=on,' + 'gic-version=3') + self.vm.add_args('-bios', bios) + self.vm.add_args('-kernel', kernel) + self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0') + self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0') + self.vm.add_args('-device', 'virtio-9p-device,fsdev=shr0,mount_tag=shr0') + self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0') + self.vm.add_args('-device', 'virtio-net-pci,netdev=net0') + self.vm.add_args('-netdev', 'user,id=net0') + # We need to add nokaslr to avoid triggering this sporadic bug: + # https://gitlab.com/qemu-project/qemu/-/issues/2823 + self.vm.add_args('-append', 'root=/dev/vda nokaslr') + + self.vm.launch() + # Wait for host VM boot to complete. + wait_for_console_pattern(self, 'Welcome to Buildroot', + failure_message='Synchronous Exception at') + exec_command_and_wait_for_pattern(self, 'root', '#') + + test_realms_guest(self) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_sbsaref.py b/tests/functional/test_aarch64_sbsaref.py new file mode 100755 index 00000000000..d3402f5080a --- /dev/null +++ b/tests/functional/test_aarch64_sbsaref.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a kernel and checks the console +# +# Copyright (c) 2023-2024 Linaro Ltd. +# +# Authors: +# Philippe Mathieu-Daudé +# Marcin Juszkiewicz +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import interrupt_interactive_console_until_pattern + + +def fetch_firmware(test): + """ + Flash volumes generated using: + + Toolchain from Debian: + aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0 + + Used components: + + - Trusted Firmware v2.12.0 + - Tianocore EDK2 edk2-stable202411 + - Tianocore EDK2-platforms 4b3530d + + """ + + # Secure BootRom (TF-A code) + fs0_path = test.uncompress(Aarch64SbsarefMachine.ASSET_FLASH0) + + # Non-secure rom (UEFI and EFI variables) + fs1_path = test.uncompress(Aarch64SbsarefMachine.ASSET_FLASH1) + + for path in [fs0_path, fs1_path]: + with open(path, "ab+") as fd: + fd.truncate(256 << 20) # Expand volumes to 256MiB + + test.vm.add_args( + "-drive", f"if=pflash,file={fs0_path},format=raw", + "-drive", f"if=pflash,file={fs1_path},format=raw", + ) + + +class Aarch64SbsarefMachine(QemuSystemTest): + """ + As firmware runs at a higher privilege level than the hypervisor we + can only run these tests under TCG emulation. + """ + + timeout = 180 + + ASSET_FLASH0 = Asset( + ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/' + '20241122-189881/edk2/SBSA_FLASH0.fd.xz'), + '76eb89d42eebe324e4395329f47447cda9ac920aabcf99aca85424609c3384a5') + + ASSET_FLASH1 = Asset( + ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/' + '20241122-189881/edk2/SBSA_FLASH1.fd.xz'), + 'f850f243bd8dbd49c51e061e0f79f1697546938f454aeb59ab7d93e5f0d412fc') + + def test_sbsaref_edk2_firmware(self): + + self.set_machine('sbsa-ref') + + fetch_firmware(self) + + self.vm.set_console() + self.vm.add_args('-cpu', 'cortex-a57') + self.vm.launch() + + # TF-A boot sequence: + # + # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\ + # docs/design/trusted-board-boot.rst#trusted-board-boot-sequence + # https://trustedfirmware-a.readthedocs.io/en/v2.8/\ + # design/firmware-design.html#cold-boot + + # AP Trusted ROM + wait_for_console_pattern(self, "Booting Trusted Firmware") + wait_for_console_pattern(self, "BL1: v2.12.0(release):") + wait_for_console_pattern(self, "BL1: Booting BL2") + + # Trusted Boot Firmware + wait_for_console_pattern(self, "BL2: v2.12.0(release)") + wait_for_console_pattern(self, "Booting BL31") + + # EL3 Runtime Software + wait_for_console_pattern(self, "BL31: v2.12.0(release)") + + # Non-trusted Firmware + wait_for_console_pattern(self, "UEFI firmware (version 1.0") + interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine") + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_sbsaref_alpine.py b/tests/functional/test_aarch64_sbsaref_alpine.py new file mode 100755 index 00000000000..87769993831 --- /dev/null +++ b/tests/functional/test_aarch64_sbsaref_alpine.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a kernel and checks the console +# +# Copyright (c) 2023-2024 Linaro Ltd. +# +# Authors: +# Philippe Mathieu-Daudé +# Marcin Juszkiewicz +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, skipSlowTest +from qemu_test import wait_for_console_pattern +from test_aarch64_sbsaref import fetch_firmware + + +class Aarch64SbsarefAlpine(QemuSystemTest): + + ASSET_ALPINE_ISO = Asset( + ('https://dl-cdn.alpinelinux.org/' + 'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'), + '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027') + + # This tests the whole boot chain from EFI to Userspace + # We only boot a whole OS for the current top level CPU and GIC + # Other test profiles should use more minimal boots + def boot_alpine_linux(self, cpu=None): + self.set_machine('sbsa-ref') + + fetch_firmware(self) + iso_path = self.ASSET_ALPINE_ISO.fetch() + + self.vm.set_console() + self.vm.add_args( + "-drive", f"file={iso_path},media=cdrom,format=raw", + ) + if cpu: + self.vm.add_args("-cpu", cpu) + + self.vm.launch() + wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17") + + def test_sbsaref_alpine_linux_cortex_a57(self): + self.boot_alpine_linux("cortex-a57") + + def test_sbsaref_alpine_linux_default_cpu(self): + self.boot_alpine_linux() + + def test_sbsaref_alpine_linux_max_pauth_off(self): + self.boot_alpine_linux("max,pauth=off") + + def test_sbsaref_alpine_linux_max_pauth_impdef(self): + self.boot_alpine_linux("max,pauth-impdef=on") + + @skipSlowTest() # Test might timeout due to PAuth emulation + def test_sbsaref_alpine_linux_max(self): + self.boot_alpine_linux("max") + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_sbsaref_freebsd.py b/tests/functional/test_aarch64_sbsaref_freebsd.py new file mode 100755 index 00000000000..7ef016fba62 --- /dev/null +++ b/tests/functional/test_aarch64_sbsaref_freebsd.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a kernel and checks the console +# +# Copyright (c) 2023-2024 Linaro Ltd. +# +# Authors: +# Philippe Mathieu-Daudé +# Marcin Juszkiewicz +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, skipSlowTest +from qemu_test import wait_for_console_pattern +from test_aarch64_sbsaref import fetch_firmware + + +class Aarch64SbsarefFreeBSD(QemuSystemTest): + + ASSET_FREEBSD_ISO = Asset( + ('http://ftp-archive.freebsd.org/pub/FreeBSD-Archive/old-releases/arm64' + '/aarch64/ISO-IMAGES/14.1/FreeBSD-14.1-RELEASE-arm64-aarch64-bootonly.iso.xz'), + '7313a4495ffd71ab77b49b1e83f571521c32756e1d75bf48bd890e0ab0f75827') + + # This tests the whole boot chain from EFI to Userspace + # We only boot a whole OS for the current top level CPU and GIC + # Other test profiles should use more minimal boots + def boot_freebsd14(self, cpu=None): + self.set_machine('sbsa-ref') + + fetch_firmware(self) + img_path = self.uncompress(self.ASSET_FREEBSD_ISO) + + self.vm.set_console() + self.vm.add_args( + "-drive", f"file={img_path},format=raw,snapshot=on", + ) + if cpu: + self.vm.add_args("-cpu", cpu) + + self.vm.launch() + wait_for_console_pattern(self, 'Welcome to FreeBSD!') + + def test_sbsaref_freebsd14_cortex_a57(self): + self.boot_freebsd14("cortex-a57") + + def test_sbsaref_freebsd14_default_cpu(self): + self.boot_freebsd14() + + def test_sbsaref_freebsd14_max_pauth_off(self): + self.boot_freebsd14("max,pauth=off") + + @skipSlowTest() # Test might timeout due to PAuth emulation + def test_sbsaref_freebsd14_max_pauth_impdef(self): + self.boot_freebsd14("max,pauth-impdef=on") + + @skipSlowTest() # Test might timeout due to PAuth emulation + def test_sbsaref_freebsd14_max(self): + self.boot_freebsd14("max") + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_smmu.py b/tests/functional/test_aarch64_smmu.py new file mode 100755 index 00000000000..e0f4a922176 --- /dev/null +++ b/tests/functional/test_aarch64_smmu.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# SMMUv3 Functional tests +# +# Copyright (c) 2021 Red Hat, Inc. +# +# Author: +# Eric Auger +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +import time + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import BUILD_DIR +from qemu.utils import kvm_available, hvf_available + + +class SMMU(LinuxKernelTest): + + default_kernel_params = ('earlyprintk=pl011,0x9000000 no_timer_check ' + 'printk.time=1 rd_NO_PLYMOUTH net.ifnames=0 ' + 'console=ttyAMA0 rd.rescue') + IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' + kernel_path = None + initrd_path = None + kernel_params = None + + GUEST_PORT = 8080 + + def set_up_boot(self, path): + self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' + + 'drive=drv0,id=virtio-disk0,bootindex=1,' + 'werror=stop,rerror=stop' + self.IOMMU_ADDON) + self.vm.add_args('-drive', + f'file={path},if=none,cache=writethrough,id=drv0,snapshot=on') + + self.vm.add_args('-netdev', + 'user,id=n1,hostfwd=tcp:127.0.0.1:0-:%d' % + self.GUEST_PORT) + self.vm.add_args('-device', 'virtio-net,netdev=n1' + self.IOMMU_ADDON) + + def common_vm_setup(self, kernel, initrd, disk): + if hvf_available(self.qemu_bin): + accel = "hvf" + elif kvm_available(self.qemu_bin): + accel = "kvm" + else: + self.skipTest("Neither HVF nor KVM accelerator is available") + self.require_accelerator(accel) + self.require_netdev('user') + self.set_machine("virt") + self.vm.add_args('-m', '1G') + self.vm.add_args("-accel", accel) + self.vm.add_args("-cpu", "host") + self.vm.add_args("-machine", "iommu=smmuv3") + self.vm.add_args("-d", "guest_errors") + self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', + 'edk2-aarch64-code.fd')) + self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') + self.vm.add_args('-object', + 'rng-random,id=rng0,filename=/dev/urandom') + + self.kernel_path = kernel.fetch() + self.initrd_path = initrd.fetch() + self.set_up_boot(disk.fetch()) + + def run_and_check(self, filename, hashsum): + self.vm.add_args('-initrd', self.initrd_path) + self.vm.add_args('-append', self.kernel_params) + self.launch_kernel(self.kernel_path, initrd=self.initrd_path, + wait_for='attach it to a bug report.') + prompt = '# ' + # Fedora 33 requires 'return' to be pressed to enter the shell. + # There seems to be a small race between detecting the previous ':' + # and sending the newline, so we need to add a small delay here. + self.wait_for_console_pattern(':') + time.sleep(0.2) + exec_command_and_wait_for_pattern(self, '\n', prompt) + exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline', + self.kernel_params) + + # Checking for SMMU enablement: + self.log.info("Checking whether SMMU has been enabled...") + exec_command_and_wait_for_pattern(self, 'dmesg | grep smmu', + 'arm-smmu-v3') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, + 'find /sys/kernel/iommu_groups/ -type l', + 'devices/0000:00:') + self.wait_for_console_pattern(prompt) + + # Copy a file (checked later), umount afterwards to drop disk cache: + self.log.info("Checking hard disk...") + exec_command_and_wait_for_pattern(self, + "while ! (dmesg -c | grep vda:) ; do sleep 1 ; done", + "vda2") + exec_command_and_wait_for_pattern(self, 'mount /dev/vda2 /sysroot', + 'mounted filesystem') + exec_command_and_wait_for_pattern(self, 'cp /bin/vi /sysroot/root/vi', + prompt) + exec_command_and_wait_for_pattern(self, 'umount /sysroot', prompt) + # Switch from initrd to the cloud image filesystem: + exec_command_and_wait_for_pattern(self, 'mount /dev/vda2 /sysroot', + prompt) + exec_command_and_wait_for_pattern(self, + ('for d in dev proc sys run ; do ' + 'mount -o bind /$d /sysroot/$d ; done'), prompt) + exec_command_and_wait_for_pattern(self, 'chroot /sysroot', prompt) + # Check files on the hard disk: + exec_command_and_wait_for_pattern(self, + ('if diff -q /root/vi /usr/bin/vi ; then echo "file" "ok" ; ' + 'else echo "files differ"; fi'), 'file ok') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, f'sha256sum {filename}', + hashsum) + + # Check virtio-net via HTTP: + exec_command_and_wait_for_pattern(self, 'dhclient eth0', prompt) + self.check_http_download(filename, hashsum, self.GUEST_PORT) + + + # 5.3 kernel without RIL # + + ASSET_KERNEL_F31 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/31/Server/aarch64/os/images/pxeboot/vmlinuz'), + '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527') + + ASSET_INITRD_F31 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/31/Server/aarch64/os/images/pxeboot/initrd.img'), + '9f3146b28bc531c689f3c5f114cb74e4bd7bd548e0ba19fa77921d8bd256755a') + + ASSET_DISK_F31 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Cloud/aarch64/images/Fedora-Cloud-Base-31-1.9.aarch64.qcow2'), + '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49') + + F31_FILENAME = '/boot/initramfs-5.3.7-301.fc31.aarch64.img' + F31_HSUM = '1a4beec6607d94df73d9dd1b4985c9c23dd0fdcf4e6ca1351d477f190df7bef9' + + def test_smmu_noril(self): + self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31, + self.ASSET_DISK_F31) + self.kernel_params = self.default_kernel_params + self.run_and_check(self.F31_FILENAME, self.F31_HSUM) + + def test_smmu_noril_passthrough(self): + self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31, + self.ASSET_DISK_F31) + self.kernel_params = (self.default_kernel_params + + ' iommu.passthrough=on') + self.run_and_check(self.F31_FILENAME, self.F31_HSUM) + + def test_smmu_noril_nostrict(self): + self.common_vm_setup(self.ASSET_KERNEL_F31, self.ASSET_INITRD_F31, + self.ASSET_DISK_F31) + self.kernel_params = (self.default_kernel_params + + ' iommu.strict=0') + self.run_and_check(self.F31_FILENAME, self.F31_HSUM) + + + # 5.8 kernel featuring range invalidation + # >= v5.7 kernel + + ASSET_KERNEL_F33 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/33/Server/aarch64/os/images/pxeboot/vmlinuz'), + 'd8b1e6f7241f339d8e7609c456cf0461ffa4583ed07e0b55c7d1d8a0c154aa89') + + ASSET_INITRD_F33 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/33/Server/aarch64/os/images/pxeboot/initrd.img'), + '92513f55295c2c16a777f7b6c35ccd70a438e9e1e40b6ba39e0e60900615b3df') + + ASSET_DISK_F33 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/33/Cloud/aarch64/images/Fedora-Cloud-Base-33-1.2.aarch64.qcow2'), + 'e7f75cdfd523fe5ac2ca9eeece68edc1a81f386a17f969c1d1c7c87031008a6b') + + F33_FILENAME = '/boot/initramfs-5.8.15-301.fc33.aarch64.img' + F33_HSUM = '079cfad0caa82e84c8ca1fb0897a4999dd769f262216099f518619e807a550d9' + + def test_smmu_ril(self): + self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33, + self.ASSET_DISK_F33) + self.kernel_params = self.default_kernel_params + self.run_and_check(self.F33_FILENAME, self.F33_HSUM) + + def test_smmu_ril_passthrough(self): + self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33, + self.ASSET_DISK_F33) + self.kernel_params = (self.default_kernel_params + + ' iommu.passthrough=on') + self.run_and_check(self.F33_FILENAME, self.F33_HSUM) + + def test_smmu_ril_nostrict(self): + self.common_vm_setup(self.ASSET_KERNEL_F33, self.ASSET_INITRD_F33, + self.ASSET_DISK_F33) + self.kernel_params = (self.default_kernel_params + + ' iommu.strict=0') + self.run_and_check(self.F33_FILENAME, self.F33_HSUM) + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_tcg_plugins.py b/tests/functional/test_aarch64_tcg_plugins.py new file mode 100755 index 00000000000..cb7e9298fb8 --- /dev/null +++ b/tests/functional/test_aarch64_tcg_plugins.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# +# TCG Plugins tests +# +# These are a little more involved than the basic tests run by check-tcg. +# +# Copyright (c) 2021 Linaro +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import tempfile +import mmap +import re + +from qemu.machine.machine import VMLaunchFailure +from qemu_test import LinuxKernelTest, Asset + + +class PluginKernelBase(LinuxKernelTest): + """ + Boots a Linux kernel with a TCG plugin enabled. + """ + + timeout = 120 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 ' + + def run_vm(self, kernel_path, kernel_command_line, + plugin, plugin_log, console_pattern, args=None): + + vm = self.get_vm() + vm.set_console() + vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-plugin', plugin, + '-d', 'plugin', + '-D', plugin_log, + '-net', 'none', + '-no-reboot') + if args: + vm.add_args(*args) + + try: + vm.launch() + except VMLaunchFailure as excp: + if "plugin interface not enabled in this build" in excp.output: + self.skipTest("TCG plugins not enabled") + else: + self.log.info(f"unhandled launch failure: {excp.output}") + raise excp + + self.wait_for_console_pattern(console_pattern, vm) + # ensure logs are flushed + vm.shutdown() + + +class PluginKernelNormal(PluginKernelBase): + + ASSET_KERNEL = Asset( + ('https://storage.tuxboot.com/20230331/arm64/Image'), + 'ce95a7101a5fecebe0fe630deee6bd97b32ba41bc8754090e9ad8961ea8674c7') + + def test_aarch64_virt_insn(self): + self.set_machine('virt') + self.cpu='cortex-a53' + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + console_pattern = 'Please append a correct "root=" boot option' + + plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", + suffix=".log") + + self.run_vm(kernel_path, kernel_command_line, + self.plugin_file('libinsn'), plugin_log.name, + console_pattern) + + with plugin_log as lf, \ + mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: + + m = re.search(br"insns: (?P\d+)", s) + if "count" not in m.groupdict(): + self.fail("Failed to find instruction count") + else: + count = int(m.group("count")) + self.log.info(f"Counted: {count} instructions") + + + def test_aarch64_virt_insn_icount(self): + self.set_machine('virt') + self.cpu='cortex-a53' + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + console_pattern = 'Please append a correct "root=" boot option' + + plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", + suffix=".log") + + self.run_vm(kernel_path, kernel_command_line, + self.plugin_file('libinsn'), plugin_log.name, + console_pattern, + args=('-icount', 'shift=1')) + + with plugin_log as lf, \ + mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: + + m = re.search(br"insns: (?P\d+)", s) + if "count" not in m.groupdict(): + self.fail("Failed to find instruction count") + else: + count = int(m.group("count")) + self.log.info(f"Counted: {count} instructions") + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_tuxrun.py b/tests/functional/test_aarch64_tuxrun.py new file mode 100755 index 00000000000..75adc8acb83 --- /dev/null +++ b/tests/functional/test_aarch64_tuxrun.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunAarch64Test(TuxRunBaselineTest): + + ASSET_ARM64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64/Image', + 'b74743c5e89e1cea0f73368d24ae0ae85c5204ff84be3b5e9610417417d2f235') + ASSET_ARM64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64/rootfs.ext4.zst', + 'a1acaaae2068df4648d04ff75f532aaa8c5edcd6b936122b6f0db4848a07b465') + + def test_arm64(self): + self.set_machine('virt') + self.cpu='cortex-a57' + self.console='ttyAMA0' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_ARM64_KERNEL, + rootfs_asset=self.ASSET_ARM64_ROOTFS) + + ASSET_ARM64BE_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64be/Image', + 'fd6af4f16689d17a2c24fe0053cc212edcdf77abdcaf301800b8d38fa9f6e109') + ASSET_ARM64BE_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/arm64be/rootfs.ext4.zst', + 'f5e9371b62701aab8dead52592ca7488c8a9e255c9be8d7635c7f30f477c2c21') + + def test_arm64be(self): + self.set_machine('virt') + self.cpu='cortex-a57' + self.console='ttyAMA0' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_ARM64BE_KERNEL, + rootfs_asset=self.ASSET_ARM64BE_ROOTFS) + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py new file mode 100755 index 00000000000..4d0ad90ff89 --- /dev/null +++ b/tests/functional/test_aarch64_virt.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a various Linux systems and checks the +# console output. +# +# Copyright (c) 2022 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import logging +from subprocess import check_call, DEVNULL + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern, get_qemu_img + + +class Aarch64VirtMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + timeout = 360 + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + ASSET_ALPINE_ISO = Asset( + ('https://dl-cdn.alpinelinux.org/' + 'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'), + '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027') + + # This tests the whole boot chain from EFI to Userspace + # We only boot a whole OS for the current top level CPU and GIC + # Other test profiles should use more minimal boots + def test_alpine_virt_tcg_gic_max(self): + iso_path = self.ASSET_ALPINE_ISO.fetch() + + self.set_machine('virt') + self.require_accelerator("tcg") + + self.vm.set_console() + self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "max,pauth-impdef=on") + self.vm.add_args("-machine", + "virt,acpi=on," + "virtualization=on," + "mte=on," + "gic-version=max,iommu=smmuv3") + self.vm.add_args("-smp", "2", "-m", "1024") + self.vm.add_args('-bios', self.build_file('pc-bios', + 'edk2-aarch64-code.fd')) + self.vm.add_args("-drive", f"file={iso_path},media=cdrom,format=raw") + self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') + self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') + + self.vm.launch() + self.wait_for_console_pattern('Welcome to Alpine Linux 3.17') + + + ASSET_KERNEL = Asset( + ('https://fileserver.linaro.org/s/' + 'z6B2ARM7DQT3HWN/download'), + '12a54d4805cda6ab647cb7c7bbdb16fafb3df400e0d6f16445c1a0436100ef8d') + + def common_aarch64_virt(self, machine): + """ + Common code to launch basic virt machine with kernel+initrd + and a scratch disk. + """ + self.set_machine('virt') + self.require_accelerator("tcg") + + logger = logging.getLogger('aarch64_virt') + + kernel_path = self.ASSET_KERNEL.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + self.vm.add_args('-cpu', 'max,pauth-impdef=on', + '-machine', machine, + '-accel', 'tcg', + '-kernel', kernel_path, + '-append', kernel_command_line) + + # A RNG offers an easy way to generate a few IRQs + self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') + self.vm.add_args('-object', + 'rng-random,id=rng0,filename=/dev/urandom') + + # Also add a scratch block device + logger.info('creating scratch qcow2 image') + image_path = self.scratch_file('scratch.qcow2') + qemu_img = get_qemu_img(self) + check_call([qemu_img, 'create', '-f', 'qcow2', image_path, '8M'], + stdout=DEVNULL, stderr=DEVNULL) + + # Add the device + self.vm.add_args('-blockdev', + "driver=qcow2," + "file.driver=file," + f"file.filename={image_path},node-name=scratch") + self.vm.add_args('-device', + 'virtio-blk-device,drive=scratch') + + self.vm.launch() + + ps1='#' + self.wait_for_console_pattern('login:') + + commands = [ + ('root', ps1), + ('cat /proc/interrupts', ps1), + ('cat /proc/self/maps', ps1), + ('uname -a', ps1), + ('dd if=/dev/hwrng of=/dev/vda bs=512 count=4', ps1), + ('md5sum /dev/vda', ps1), + ('halt -n', 'reboot: System halted') + ] + + for cmd, pattern in commands: + exec_command_and_wait_for_pattern(self, cmd, pattern) + + def test_aarch64_virt_gicv3(self): + self.common_aarch64_virt("virt,gic_version=3") + + def test_aarch64_virt_gicv2(self): + self.common_aarch64_virt("virt,gic-version=2") + + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_virt_gpu.py b/tests/functional/test_aarch64_virt_gpu.py new file mode 100755 index 00000000000..38447278579 --- /dev/null +++ b/tests/functional/test_aarch64_virt_gpu.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# +# Functional tests for the various graphics modes we can support. +# +# Copyright (c) 2024, 2025 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu.machine.machine import VMLaunchFailure + +from qemu_test import Asset +from qemu_test import exec_command_and_wait_for_pattern as ec_and_wait +from qemu_test import skipIfMissingCommands + +from qemu_test.linuxkernel import LinuxKernelTest + +from re import search +from subprocess import check_output, CalledProcessError + +class Aarch64VirtGPUMachine(LinuxKernelTest): + + ASSET_VIRT_GPU_KERNEL = Asset( + 'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' + 'download?path=%2F&files=' + 'Image.6.12.16.aarch64', + '7888c51c55d37e86bbbdeb5acea9f08c34e6b0f03c1f5b2463285f6a6f6eec8b') + + ASSET_VIRT_GPU_ROOTFS = Asset( + 'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/' + 'download?path=%2F&files=' + 'rootfs.aarch64.ext2.zstd', + 'd45118c899420b7e673f1539a37a35480134b3e36e3a59e2cb69b1781cbb14ef') + + def _launch_virt_gpu(self, gpu_device): + + self.set_machine('virt') + self.require_accelerator("tcg") + + kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch() + image_path = self.uncompress(self.ASSET_VIRT_GPU_ROOTFS, format="zstd") + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0 root=/dev/vda') + + self.vm.add_args("-accel", "tcg") + self.vm.add_args("-cpu", "cortex-a72") + self.vm.add_args("-machine", "virt,gic-version=max", + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.add_args("-smp", "2", "-m", "2048") + self.vm.add_args("-device", gpu_device) + self.vm.add_args("-display", "egl-headless") + self.vm.add_args("-display", "dbus,gl=on") + + self.vm.add_args("-device", "virtio-blk-device,drive=hd0") + self.vm.add_args("-blockdev", + "driver=raw,file.driver=file," + "node-name=hd0,read-only=on," + f"file.filename={image_path}") + self.vm.add_args("-snapshot") + + try: + self.vm.launch() + except VMLaunchFailure as excp: + if "old virglrenderer, blob resources unsupported" in excp.output: + self.skipTest("No blob support for virtio-gpu") + elif "old virglrenderer, venus unsupported" in excp.output: + self.skipTest("No venus support for virtio-gpu") + elif "egl: no drm render node available" in excp.output: + self.skipTest("Can't access host DRM render node") + elif "'type' does not accept value 'egl-headless'" in excp.output: + self.skipTest("egl-headless support is not available") + elif "'type' does not accept value 'dbus'" in excp.output: + self.skipTest("dbus display support is not available") + else: + self.log.info("unhandled launch failure: %s", excp.output) + raise excp + + self.wait_for_console_pattern('buildroot login:') + ec_and_wait(self, 'root', '#') + + def _run_virt_weston_test(self, cmd, fail = None): + + # make it easier to detect successful return to shell + PS1 = 'RES=[$?] # ' + OK_CMD = 'RES=[0] # ' + + ec_and_wait(self, 'export XDG_RUNTIME_DIR=/tmp', '#') + ec_and_wait(self, f"export PS1='{PS1}'", OK_CMD) + full_cmd = f"weston -B headless --renderer gl --shell kiosk -- {cmd}" + ec_and_wait(self, full_cmd, OK_CMD, fail) + + @skipIfMissingCommands('zstd') + def test_aarch64_virt_with_virgl_gpu(self): + + self.require_device('virtio-gpu-gl-pci') + + self._launch_virt_gpu("virtio-gpu-gl-pci") + + # subset of the glmark tests + tests = " ".join([f"-b {test}" for test in + ["build", "texture", "shading", + "bump", "desktop", "buffer"]]) + + self._run_virt_weston_test("glmark2-wayland --validate " + tests) + + @skipIfMissingCommands('zstd') + def test_aarch64_virt_with_virgl_blobs_gpu(self): + + self.require_device('virtio-gpu-gl-pci') + + self._launch_virt_gpu("virtio-gpu-gl-pci,hostmem=4G,blob=on") + self._run_virt_weston_test("glmark2-wayland -b:duration=1.0") + + @skipIfMissingCommands('zstd') + @skipIfMissingCommands('vulkaninfo') + def test_aarch64_virt_with_vulkan_gpu(self): + + self.require_device('virtio-gpu-gl-pci') + + try: + vk_info = check_output(["vulkaninfo", "--summary"], + encoding="utf-8") + except CalledProcessError as excp: + self.skipTest(f"Miss-configured host Vulkan: {excp.output}") + + if search(r"driverID\s+=\s+DRIVER_ID_NVIDIA_PROPRIETARY", vk_info): + self.skipTest("Test skipped on NVIDIA proprietary driver") + + self._launch_virt_gpu("virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on") + self._run_virt_weston_test("vkmark -b:duration=1.0", + "debug: stuck in fence wait with iter at") + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_xen.py b/tests/functional/test_aarch64_xen.py new file mode 100755 index 00000000000..261d796540d --- /dev/null +++ b/tests/functional/test_aarch64_xen.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Xen hypervisor with a domU kernel and +# checks the console output is vaguely sane . +# +# Copyright (c) 2020 Linaro +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import Asset, LinuxKernelTest, wait_for_console_pattern + + +class BootXen(LinuxKernelTest): + """ + Boots a Xen hypervisor with a Linux DomU kernel. + """ + + timeout = 90 + XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all' + + ASSET_KERNEL = Asset( + ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/' + 'download?path=%2F&files=linux-5.9.9-arm64-ajb'), + '00366fa51ea957c19462d2e2aefd480bef80ce727120e714ae48e0c88f261edb') + + def launch_xen(self, xen_path): + """ + Launch Xen with a dom0 guest kernel + """ + self.require_accelerator("tcg") # virtualization=on + self.set_machine('virt') + self.cpu = "cortex-a57" + self.kernel_path = self.ASSET_KERNEL.fetch() + self.log.info("launch with xen_path: %s", xen_path) + + self.vm.set_console() + + self.vm.add_args('-machine', 'virtualization=on', + '-m', '768', + '-kernel', xen_path, + '-append', self.XEN_COMMON_COMMAND_LINE, + '-device', + 'guest-loader,addr=0x47000000,kernel=%s,bootargs=console=hvc0' + % (self.kernel_path)) + + self.vm.launch() + + console_pattern = 'VFS: Cannot open root device' + wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:") + + ASSET_XEN_4_11 = Asset( + ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&' + 'files=xen-hypervisor-4.11-arm64_4.11.4%2B37-g3263f257ca-1_arm64.deb'), + 'b745c2631342f9fcc0147ddc364edb62c20ecfebd430e5a3546e7d7c6891c0bc') + + def test_arm64_xen_411_and_dom0(self): + # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ + xen_path = self.archive_extract(self.ASSET_XEN_4_11, format='deb', + member="boot/xen-4.11-arm64") + self.launch_xen(xen_path) + + ASSET_XEN_4_14 = Asset( + ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&' + 'files=xen-hypervisor-4.14-arm64_4.14.0%2B80-gd101b417b7-1_arm64.deb'), + 'e930a3293248edabd367d5b4b3b6448b9c99c057096ea8b47228a7870661d5cb') + + def test_arm64_xen_414_and_dom0(self): + # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ + xen_path = self.archive_extract(self.ASSET_XEN_4_14, format='deb', + member="boot/xen-4.14-arm64") + self.launch_xen(xen_path) + + ASSET_XEN_4_15 = Asset( + ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/download?path=%2F&' + 'files=xen-upstream-4.15-unstable.deb'), + '2a9a8af8acf0231844657cc28baab95bd918b0ee2d493ee4ee6f8846e1358bc9') + + def test_arm64_xen_415_and_dom0(self): + xen_path = self.archive_extract(self.ASSET_XEN_4_15, format='deb', + member="boot/xen-4.15-unstable") + self.launch_xen(xen_path) + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_aarch64_xlnx_versal.py b/tests/functional/test_aarch64_xlnx_versal.py new file mode 100755 index 00000000000..4b9c49e5d64 --- /dev/null +++ b/tests/functional/test_aarch64_xlnx_versal.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + +class XlnxVersalVirtMachine(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('http://ports.ubuntu.com/ubuntu-ports/dists/bionic-updates/main/' + 'installer-arm64/20101020ubuntu543.19/images/netboot/' + 'ubuntu-installer/arm64/linux'), + 'ce54f74ab0b15cfd13d1a293f2d27ffd79d8a85b7bb9bf21093ae9513864ac79') + + ASSET_INITRD = Asset( + ('http://ports.ubuntu.com/ubuntu-ports/dists/bionic-updates/main/' + 'installer-arm64/20101020ubuntu543.19/images/netboot/' + '/ubuntu-installer/arm64/initrd.gz'), + 'e7a5e716b6f516d8be315c06e7331aaf16994fe4222e0e7cfb34bc015698929e') + + def test_aarch64_xlnx_versal_virt(self): + self.set_machine('xlnx-versal-virt') + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + + self.vm.set_console() + self.vm.add_args('-m', '2G', + '-accel', 'tcg', + '-kernel', kernel_path, + '-initrd', initrd_path) + self.vm.launch() + self.wait_for_console_pattern('Checked W+X mappings: passed') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_acpi_bits.py b/tests/functional/test_acpi_bits.py new file mode 100755 index 00000000000..8e0563a97b1 --- /dev/null +++ b/tests/functional/test_acpi_bits.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +# +# Exercise QEMU generated ACPI/SMBIOS tables using biosbits, +# https://biosbits.org/ +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# +# Author: +# Ani Sinha + +# pylint: disable=invalid-name +# pylint: disable=consider-using-f-string + +""" +This is QEMU ACPI/SMBIOS functional tests using biosbits. +Biosbits is available originally at https://biosbits.org/. +This test uses a fork of the upstream bits and has numerous fixes +including an upgraded acpica. The fork is located here: +https://gitlab.com/qemu-project/biosbits-bits . +""" + +import os +import re +import shutil +import subprocess + +from typing import ( + List, + Optional, + Sequence, +) +from qemu.machine import QEMUMachine +from qemu_test import (QemuSystemTest, Asset, skipIfMissingCommands, + skipIfNotMachine) + + +# default timeout of 120 secs is sometimes not enough for bits test. +BITS_TIMEOUT = 200 + +class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods + """ + A QEMU VM, with isa-debugcon enabled and bits iso passed + using -cdrom to QEMU commandline. + + """ + def __init__(self, + binary: str, + args: Sequence[str] = (), + wrapper: Sequence[str] = (), + name: Optional[str] = None, + base_temp_dir: str = "/var/tmp", + debugcon_log: str = "debugcon-log.txt", + debugcon_addr: str = "0x403", + qmp_timer: Optional[float] = None): + # pylint: disable=too-many-arguments + + if name is None: + name = "qemu-bits-%d" % os.getpid() + super().__init__(binary, args, wrapper=wrapper, name=name, + base_temp_dir=base_temp_dir, + qmp_timer=qmp_timer) + self.debugcon_log = debugcon_log + self.debugcon_addr = debugcon_addr + self.base_temp_dir = base_temp_dir + + @property + def _base_args(self) -> List[str]: + args = super()._base_args + args.extend([ + '-chardev', + 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir, + self.debugcon_log), + '-device', + 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr, + ]) + return args + + def base_args(self): + """return the base argument to QEMU binary""" + return self._base_args + +@skipIfMissingCommands("xorriso", "mformat") +@skipIfNotMachine("x86_64") +class AcpiBitsTest(QemuSystemTest): #pylint: disable=too-many-instance-attributes + """ + ACPI and SMBIOS tests using biosbits. + """ + # in slower systems the test can take as long as 3 minutes to complete. + timeout = BITS_TIMEOUT + + # following are some standard configuration constants + # gitlab CI does shallow clones of depth 20 + BITS_INTERNAL_VER = 2020 + # commit hash must match the artifact tag below + BITS_COMMIT_HASH = 'c7920d2b' + # this is the latest bits release as of today. + BITS_TAG = "qemu-bits-10262023" + + ASSET_BITS = Asset(("https://gitlab.com/qemu-project/" + "biosbits-bits/-/jobs/artifacts/%s/" + "download?job=qemu-bits-build" % BITS_TAG), + '1b8dd612c6831a6b491716a77acc486666aaa867051cdc34f7ce169c2e25f487') + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._vm = None + + self._debugcon_addr = '0x403' + self._debugcon_log = 'debugcon-log.txt' + + def _print_log(self, log): + self.logger.info('\nlogs from biosbits follows:') + self.logger.info('==========================================\n') + self.logger.info(log) + self.logger.info('==========================================\n') + + def copy_bits_config(self): + """ copies the bios bits config file into bits. + """ + bits_config_file = self.data_file('acpi-bits', + 'bits-config', + 'bits-cfg.txt') + target_config_dir = self.scratch_file('bits-%d' % + self.BITS_INTERNAL_VER, + 'boot') + self.assertTrue(os.path.exists(bits_config_file)) + self.assertTrue(os.path.exists(target_config_dir)) + shutil.copy2(bits_config_file, target_config_dir) + self.logger.info('copied config file %s to %s', + bits_config_file, target_config_dir) + + def copy_test_scripts(self): + """copies the python test scripts into bits. """ + + bits_test_dir = self.data_file('acpi-bits', 'bits-tests') + target_test_dir = self.scratch_file('bits-%d' % self.BITS_INTERNAL_VER, + 'boot', 'python') + + self.assertTrue(os.path.exists(bits_test_dir)) + self.assertTrue(os.path.exists(target_test_dir)) + + for filename in os.listdir(bits_test_dir): + if os.path.isfile(os.path.join(bits_test_dir, filename)) and \ + filename.endswith('.py2'): + # All test scripts are named with extension .py2 so that + # they are not run by accident. + # + # These scripts are intended to run inside the test VM + # and are written for python 2.7 not python 3, hence + # would cause syntax errors if loaded ouside the VM. + newfilename = os.path.splitext(filename)[0] + '.py' + shutil.copy2(os.path.join(bits_test_dir, filename), + os.path.join(target_test_dir, newfilename)) + self.logger.info('copied test file %s to %s', + filename, target_test_dir) + + # now remove the pyc test file if it exists, otherwise the + # changes in the python test script won't be executed. + testfile_pyc = os.path.splitext(filename)[0] + '.pyc' + if os.access(os.path.join(target_test_dir, testfile_pyc), + os.F_OK): + os.remove(os.path.join(target_test_dir, testfile_pyc)) + self.logger.info('removed compiled file %s', + os.path.join(target_test_dir, + testfile_pyc)) + + def fix_mkrescue(self, mkrescue): + """ grub-mkrescue is a bash script with two variables, 'prefix' and + 'libdir'. They must be pointed to the right location so that the + iso can be generated appropriately. We point the two variables to + the directory where we have extracted our pre-built bits grub + tarball. + """ + grub_x86_64_mods = self.scratch_file('grub-inst-x86_64-efi') + grub_i386_mods = self.scratch_file('grub-inst') + + self.assertTrue(os.path.exists(grub_x86_64_mods)) + self.assertTrue(os.path.exists(grub_i386_mods)) + + new_script = "" + with open(mkrescue, 'r', encoding='utf-8') as filehandle: + orig_script = filehandle.read() + new_script = re.sub('(^prefix=)(.*)', + r'\1"%s"' %grub_x86_64_mods, + orig_script, flags=re.M) + new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods, + new_script, flags=re.M) + + with open(mkrescue, 'w', encoding='utf-8') as filehandle: + filehandle.write(new_script) + + def generate_bits_iso(self): + """ Uses grub-mkrescue to generate a fresh bits iso with the python + test scripts + """ + bits_dir = self.scratch_file('bits-%d' % self.BITS_INTERNAL_VER) + iso_file = self.scratch_file('bits-%d.iso' % self.BITS_INTERNAL_VER) + mkrescue_script = self.scratch_file('grub-inst-x86_64-efi', + 'bin', + 'grub-mkrescue') + + self.assertTrue(os.access(mkrescue_script, + os.R_OK | os.W_OK | os.X_OK)) + + self.fix_mkrescue(mkrescue_script) + + self.logger.info('using grub-mkrescue for generating biosbits iso ...') + + try: + if os.getenv('V') or os.getenv('BITS_DEBUG'): + proc = subprocess.run([mkrescue_script, '-o', iso_file, + bits_dir], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + check=True) + self.logger.info("grub-mkrescue output %s" % proc.stdout) + else: + subprocess.check_call([mkrescue_script, '-o', + iso_file, bits_dir], + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL) + except Exception as e: # pylint: disable=broad-except + self.skipTest("Error while generating the bits iso. " + "Pass V=1 in the environment to get more details. " + + str(e)) + + self.assertTrue(os.access(iso_file, os.R_OK)) + + self.logger.info('iso file %s successfully generated.', iso_file) + + def setUp(self): # pylint: disable=arguments-differ + super().setUp() + self.logger = self.log + + prebuiltDir = self.scratch_file('prebuilt') + if not os.path.isdir(prebuiltDir): + os.mkdir(prebuiltDir, mode=0o775) + + bits_zip_file = self.scratch_file('prebuilt', + 'bits-%d-%s.zip' + %(self.BITS_INTERNAL_VER, + self.BITS_COMMIT_HASH)) + grub_tar_file = self.scratch_file('prebuilt', + 'bits-%d-%s-grub.tar.gz' + %(self.BITS_INTERNAL_VER, + self.BITS_COMMIT_HASH)) + + # extract the bits artifact in the temp working directory + self.archive_extract(self.ASSET_BITS, sub_dir='prebuilt', format='zip') + + # extract the bits software in the temp working directory + self.archive_extract(bits_zip_file) + self.archive_extract(grub_tar_file) + + self.copy_test_scripts() + self.copy_bits_config() + self.generate_bits_iso() + + def parse_log(self): + """parse the log generated by running bits tests and + check for failures. + """ + debugconf = self.scratch_file(self._debugcon_log) + log = "" + with open(debugconf, 'r', encoding='utf-8') as filehandle: + log = filehandle.read() + + matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*', + log) + for match in matchiter: + # verify that no test cases failed. + try: + self.assertEqual(match.group(3).split()[0], '0', + 'Some bits tests seems to have failed. ' \ + 'Please check the test logs for more info.') + except AssertionError as e: + self._print_log(log) + raise e + else: + if os.getenv('V') or os.getenv('BITS_DEBUG'): + self._print_log(log) + + def tearDown(self): + """ + Lets do some cleanups. + """ + if self._vm: + self.assertFalse(not self._vm.is_running) + super().tearDown() + + def test_acpi_smbios_bits(self): + """The main test case implementation.""" + + self.set_machine('pc') + iso_file = self.scratch_file('bits-%d.iso' % self.BITS_INTERNAL_VER) + + self.assertTrue(os.access(iso_file, os.R_OK)) + + self._vm = QEMUBitsMachine(binary=self.qemu_bin, + base_temp_dir=self.workdir, + debugcon_log=self._debugcon_log, + debugcon_addr=self._debugcon_addr) + + self._vm.add_args('-cdrom', '%s' %iso_file) + # the vm needs to be run under icount so that TCG emulation is + # consistent in terms of timing. smilatency tests have consistent + # timing requirements. + self._vm.add_args('-icount', 'auto') + # currently there is no support in bits for recognizing 64-bit SMBIOS + # entry points. QEMU defaults to 64-bit entry points since the + # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0 + # for newer machine models"). Therefore, enforce 32-bit entry point. + self._vm.add_args('-machine', 'smbios-entry-point-type=32') + + # enable console logging + self._vm.set_console() + self._vm.launch() + + + # biosbits has been configured to run all the specified test suites + # in batch mode and then automatically initiate a vm shutdown. + self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT) + self._vm.wait(timeout=None) + self.logger.debug("Checking console output ...") + self.parse_log() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_alpha_clipper.py b/tests/functional/test_alpha_clipper.py new file mode 100755 index 00000000000..c5d71819531 --- /dev/null +++ b/tests/functional/test_alpha_clipper.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an Alpha Clipper machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class AlphaClipperTest(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('http://archive.debian.org/debian/dists/lenny/main/' + 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz'), + '34f53da3fa32212e4f00b03cb944b2ad81c06bc8faaf9b7193b2e544ceeca576') + + def test_alpha_clipper(self): + self.set_machine('clipper') + kernel_path = self.ASSET_KERNEL.fetch() + + uncompressed_kernel = self.uncompress(self.ASSET_KERNEL, format="gz") + + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + self.vm.add_args('-nodefaults', + '-kernel', uncompressed_kernel, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_alpha_replay.py b/tests/functional/test_alpha_replay.py new file mode 100755 index 00000000000..24a17ef5904 --- /dev/null +++ b/tests/functional/test_alpha_replay.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an Alpha machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class AlphaReplay(ReplayKernelBase): + + ASSET_KERNEL = Asset( + ('http://archive.debian.org/debian/dists/lenny/main/installer-alpha/' + '20090123lenny10/images/cdrom/vmlinuz'), + '34f53da3fa32212e4f00b03cb944b2ad81c06bc8faaf9b7193b2e544ceeca576') + + def test_clipper(self): + self.set_machine('clipper') + kernel_path = self.uncompress(self.ASSET_KERNEL, format='gz') + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9, + args=('-nodefaults', )) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_arm_aspeed_ast1030.py b/tests/functional/test_arm_aspeed_ast1030.py new file mode 100755 index 00000000000..77037f01793 --- /dev/null +++ b/tests/functional/test_arm_aspeed_ast1030.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED SoCs with firmware +# +# Copyright (C) 2022 ASPEED Technology Inc +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class AST1030Machine(LinuxKernelTest): + + ASSET_ZEPHYR_3_00 = Asset( + ('https://github.com/AspeedTech-BMC' + '/zephyr/releases/download/v00.03.00/ast1030-evb-demo.zip'), + '37fe3ecd4a1b9d620971a15b96492a81093435396eeac69b6f3e384262ff555f') + + def test_ast1030_zephyros_3_00(self): + self.set_machine('ast1030-evb') + + kernel_name = "ast1030-evb-demo/zephyr.elf" + kernel_file = self.archive_extract( + self.ASSET_ZEPHYR_3_00, member=kernel_name) + + self.vm.set_console() + self.vm.add_args('-kernel', kernel_file, '-nographic') + self.vm.launch() + self.wait_for_console_pattern("Booting Zephyr OS") + exec_command_and_wait_for_pattern(self, "help", + "Available commands") + + ASSET_ZEPHYR_1_07 = Asset( + ('https://github.com/AspeedTech-BMC' + '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip'), + 'ad52e27959746988afaed8429bf4e12ab988c05c4d07c9d90e13ec6f7be4574c') + + def test_ast1030_zephyros_1_07(self): + self.set_machine('ast1030-evb') + + kernel_name = "ast1030-evb-demo/zephyr.bin" + kernel_file = self.archive_extract( + self.ASSET_ZEPHYR_1_07, member=kernel_name) + + self.vm.set_console() + self.vm.add_args('-kernel', kernel_file, '-nographic') + self.vm.launch() + self.wait_for_console_pattern("Booting Zephyr OS") + for shell_cmd in [ + 'kernel stacks', + 'otp info conf', + 'otp info scu', + 'hwinfo devid', + 'crypto aes256_cbc_vault', + 'random get', + 'jtag JTAG1 sw_xfer high TMS', + 'adc ADC0 resolution 12', + 'adc ADC0 read 42', + 'adc ADC1 read 69', + 'i2c scan I2C_0', + 'i3c attach I3C_0', + 'hash test', + 'kernel uptime', + 'kernel reboot warm', + 'kernel uptime', + 'kernel reboot cold', + 'kernel uptime', + ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$") + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_aspeed_ast2500.py b/tests/functional/test_arm_aspeed_ast2500.py new file mode 100755 index 00000000000..6923fe87017 --- /dev/null +++ b/tests/functional/test_arm_aspeed_ast2500.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, exec_command_and_wait_for_pattern +from aspeed import AspeedTest + + +class AST2500Machine(AspeedTest): + + ASSET_BR2_202411_AST2500_FLASH = Asset( + ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2500-evb/buildroot-2024.11/flash.img'), + '641e6906c18c0f19a2aeb48099d66d4771929c361001d554d0d45c667413e13a') + + def test_arm_ast2500_evb_buildroot(self): + self.set_machine('ast2500-evb') + + image_path = self.ASSET_BR2_202411_AST2500_FLASH.fetch() + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test') + self.do_test_arm_aspeed_buildroot_start(image_path, '0x0', + 'ast2500-evb login:') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') + + self.do_test_arm_aspeed_buildroot_poweroff() + + ASSET_SDK_V906_AST2500 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2500-default-obmc.tar.gz', + '542db84645b4efd8aed50385d7f4dd1caff379a987032311cfa7b563a3addb2a') + + def test_arm_ast2500_evb_sdk(self): + self.set_machine('ast2500-evb') + + self.archive_extract(self.ASSET_SDK_V906_AST2500) + + self.do_test_arm_aspeed_sdk_start( + self.scratch_file("ast2500-default", "image-bmc")) + + self.wait_for_console_pattern('ast2500-default login:') + + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_ast2600.py b/tests/functional/test_arm_aspeed_ast2600.py new file mode 100755 index 00000000000..fdae4c939d8 --- /dev/null +++ b/tests/functional/test_arm_aspeed_ast2600.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import time +import tempfile +import subprocess + +from qemu_test import Asset +from aspeed import AspeedTest +from qemu_test import exec_command_and_wait_for_pattern, skipIfMissingCommands + + +class AST2600Machine(AspeedTest): + + ASSET_BR2_202411_AST2600_FLASH = Asset( + ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2600-evb/buildroot-2024.11/flash.img'), + '4bb2f3dfdea31199b51d66b42f686dc5374c144a7346fdc650194a5578b73609') + + def test_arm_ast2600_evb_buildroot(self): + self.set_machine('ast2600-evb') + + image_path = self.ASSET_BR2_202411_AST2600_FLASH.fetch() + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test') + self.vm.add_args('-device', + 'ds1338,bus=aspeed.i2c.bus.3,address=0x32') + self.vm.add_args('-device', + 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42') + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', + 'ast2600-evb login:') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') + + exec_command_and_wait_for_pattern(self, + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device', + 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32') + year = time.strftime("%Y") + exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year) + + exec_command_and_wait_for_pattern(self, + 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device', + 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64') + exec_command_and_wait_for_pattern(self, + 'i2cset -y 3 0x42 0x64 0x00 0xaa i', '#') + exec_command_and_wait_for_pattern(self, + 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom', + '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff') + self.do_test_arm_aspeed_buildroot_poweroff() + + ASSET_BR2_202302_AST2600_TPM_FLASH = Asset( + ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' + 'images/ast2600-evb/buildroot-2023.02-tpm/flash.img'), + 'a46009ae8a5403a0826d607215e731a8c68d27c14c41e55331706b8f9c7bd997') + + @skipIfMissingCommands('swtpm') + def test_arm_ast2600_evb_buildroot_tpm(self): + self.set_machine('ast2600-evb') + + image_path = self.ASSET_BR2_202302_AST2600_TPM_FLASH.fetch() + + tpmstate_dir = tempfile.TemporaryDirectory(prefix="qemu_") + socket = os.path.join(tpmstate_dir.name, 'swtpm-socket') + + # We must put the TPM state dir in /tmp/, not the build dir, + # because some distros use AppArmor to lock down swtpm and + # restrict the set of locations it can access files in. + subprocess.run(['swtpm', 'socket', '-d', '--tpm2', + '--tpmstate', f'dir={tpmstate_dir.name}', + '--ctrl', f'type=unixio,path={socket}']) + + self.vm.add_args('-chardev', f'socket,id=chrtpm,path={socket}') + self.vm.add_args('-tpmdev', 'emulator,id=tpm0,chardev=chrtpm') + self.vm.add_args('-device', + 'tpm-tis-i2c,tpmdev=tpm0,bus=aspeed.i2c.bus.12,address=0x2e') + self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00', 'Aspeed AST2600 EVB') + + exec_command_and_wait_for_pattern(self, + 'echo tpm_tis_i2c 0x2e > /sys/bus/i2c/devices/i2c-12/new_device', + 'tpm_tis_i2c 12-002e: 2.0 TPM (device-id 0x1, rev-id 1)') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/tpm/tpm0/pcr-sha256/0', + 'B804724EA13F52A9072BA87FE8FDCC497DFC9DF9AA15B9088694639C431688E0') + + self.do_test_arm_aspeed_buildroot_poweroff() + + ASSET_SDK_V906_AST2600 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2600-default-obmc.tar.gz', + '768d76e247896ad78c154b9cff4f766da2ce65f217d620b286a4a03a8a4f68f5') + + def test_arm_ast2600_evb_sdk(self): + self.set_machine('ast2600-evb') + + self.archive_extract(self.ASSET_SDK_V906_AST2600) + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test') + self.vm.add_args('-device', + 'ds1338,bus=aspeed.i2c.bus.5,address=0x32') + self.do_test_arm_aspeed_sdk_start( + self.scratch_file("ast2600-default", "image-bmc")) + + self.wait_for_console_pattern('ast2600-default login:') + + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, '0penBmc', + 'root@ast2600-default:~#') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') + + exec_command_and_wait_for_pattern(self, + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32') + year = time.strftime("%Y") + exec_command_and_wait_for_pattern(self, + '/sbin/hwclock -f /dev/rtc1', year) + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_bletchley.py b/tests/functional/test_arm_aspeed_bletchley.py new file mode 100644 index 00000000000..5a60b24b3d2 --- /dev/null +++ b/tests/functional/test_arm_aspeed_bletchley.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class BletchleyMachine(AspeedTest): + + ASSET_BLETCHLEY_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/bletchley-bmc/openbmc-20250128071329/obmc-phosphor-image-bletchley-20250128071329.static.mtd.xz', + 'db21d04d47d7bb2a276f59d308614b4dfb70b9c7c81facbbca40a3977a2d8844') + + def test_arm_ast2600_bletchley_openbmc(self): + image_path = self.uncompress(self.ASSET_BLETCHLEY_FLASH) + + self.do_test_arm_aspeed_openbmc('bletchley-bmc', image=image_path, + uboot='2019.04', cpu_id='0xf00', + soc='AST2600 rev A3') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_catalina.py b/tests/functional/test_arm_aspeed_catalina.py new file mode 100755 index 00000000000..dc2f24e7b43 --- /dev/null +++ b/tests/functional/test_arm_aspeed_catalina.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class CatalinaMachine(AspeedTest): + + ASSET_CATALINA_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/a866feb5ef81245b4827a214584bf6bcc72939f6/images/catalina-bmc/obmc-phosphor-image-catalina-20250619123021.static.mtd.xz', + '287402e1ba021991e06be1d098f509444a02a3d81a73a932f66528b159e864f9') + + def test_arm_ast2600_catalina_openbmc(self): + image_path = self.uncompress(self.ASSET_CATALINA_FLASH) + + self.do_test_arm_aspeed_openbmc('catalina-bmc', image=image_path, + uboot='2019.04', cpu_id='0xf00', + soc='AST2600 rev A3') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_gb200nvl_bmc.py b/tests/functional/test_arm_aspeed_gb200nvl_bmc.py new file mode 100644 index 00000000000..8e8e3f05c1b --- /dev/null +++ b/tests/functional/test_arm_aspeed_gb200nvl_bmc.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class GB200Machine(AspeedTest): + + ASSET_GB200_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/refs/heads/master/images/gb200nvl-obmc/obmc-phosphor-image-gb200nvl-obmc-20250702182348.static.mtd.xz', + 'b84819317cb3dc762895ad507705978ef000bfc77c50c33a63bdd37921db0dbc') + + def test_arm_aspeed_gb200_openbmc(self): + image_path = self.uncompress(self.ASSET_GB200_FLASH) + + self.do_test_arm_aspeed_openbmc('gb200nvl-bmc', image=image_path, + uboot='2019.04', cpu_id='0xf00', + soc='AST2600 rev A3', + image_hostname='gb200nvl-obmc') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_palmetto.py b/tests/functional/test_arm_aspeed_palmetto.py new file mode 100755 index 00000000000..ff0b821be65 --- /dev/null +++ b/tests/functional/test_arm_aspeed_palmetto.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class PalmettoMachine(AspeedTest): + + ASSET_PALMETTO_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/palmetto-bmc/openbmc-20250128071432/obmc-phosphor-image-palmetto-20250128071432.static.mtd', + 'bce7c392eec75c707a91cfc8fad7ca9a69d7e4f10df936930d65c1cb9897ac81') + + def test_arm_ast2400_palmetto_openbmc(self): + image_path = self.ASSET_PALMETTO_FLASH.fetch() + + self.do_test_arm_aspeed_openbmc('palmetto-bmc', image=image_path, + uboot='2019.04', cpu_id='0x0', + soc='AST2400 rev A1') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_rainier.py b/tests/functional/test_arm_aspeed_rainier.py new file mode 100755 index 00000000000..602d6194ac8 --- /dev/null +++ b/tests/functional/test_arm_aspeed_rainier.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + +class RainierMachine(AspeedTest): + + ASSET_RAINIER_EMMC = Asset( + ('https://fileserver.linaro.org/s/B6pJTwWEkzSDi36/download/' + 'mmc-p10bmc-20240617.qcow2'), + 'd523fb478d2b84d5adc5658d08502bc64b1486955683814f89c6137518acd90b') + + def test_arm_aspeed_emmc_boot(self): + self.set_machine('rainier-bmc') + self.require_netdev('user') + + image_path = self.ASSET_RAINIER_EMMC.fetch() + + self.vm.set_console() + self.vm.add_args('-drive', + 'file=' + image_path + ',if=sd,id=sd2,index=2', + '-net', 'nic', '-net', 'user', '-snapshot') + self.vm.launch() + + self.wait_for_console_pattern('U-Boot SPL 2019.04') + self.wait_for_console_pattern('Trying to boot from MMC1') + self.wait_for_console_pattern('U-Boot 2019.04') + self.wait_for_console_pattern('eMMC 2nd Boot') + self.wait_for_console_pattern('## Loading kernel from FIT Image') + self.wait_for_console_pattern('Starting kernel ...') + self.wait_for_console_pattern('Booting Linux on physical CPU 0xf00') + self.wait_for_console_pattern('mmcblk0: p1 p2 p3 p4 p5 p6 p7') + self.wait_for_console_pattern('IBM eBMC (OpenBMC for IBM Enterprise') + + ASSET_DEBIAN_LINUX_ARMHF_DEB = Asset( + ('http://snapshot.debian.org/archive/debian/20220606T211338Z/pool/main/l/linux/linux-image-5.17.0-2-armmp_5.17.6-1%2Bb1_armhf.deb'), + '8acb2b4439faedc2f3ed4bdb2847ad4f6e0491f73debaeb7f660c8abe4dcdc0e') + + def test_arm_debian_kernel_boot(self): + self.set_machine('rainier-bmc') + + kernel_path = self.archive_extract( + self.ASSET_DEBIAN_LINUX_ARMHF_DEB, + member='boot/vmlinuz-5.17.0-2-armmp') + dtb_path = self.archive_extract( + self.ASSET_DEBIAN_LINUX_ARMHF_DEB, + member='usr/lib/linux-image-5.17.0-2-armmp/aspeed-bmc-ibm-rainier.dtb') + + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-net', 'nic') + self.vm.launch() + + self.wait_for_console_pattern("Booting Linux on physical CPU 0xf00") + self.wait_for_console_pattern("SMP: Total of 2 processors activated") + self.wait_for_console_pattern("No filesystem could mount root") + + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_romulus.py b/tests/functional/test_arm_aspeed_romulus.py new file mode 100755 index 00000000000..0447212bbf0 --- /dev/null +++ b/tests/functional/test_arm_aspeed_romulus.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class RomulusMachine(AspeedTest): + + ASSET_ROMULUS_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/romulus-bmc/openbmc-20250128071340/obmc-phosphor-image-romulus-20250128071340.static.mtd', + '6d031376440c82ed9d087d25e9fa76aea75b42f80daa252ec402c0bc3cf6cf5b') + + def test_arm_ast2500_romulus_openbmc(self): + image_path = self.ASSET_ROMULUS_FLASH.fetch() + + self.do_test_arm_aspeed_openbmc('romulus-bmc', image=image_path, + uboot='2019.04', cpu_id='0x0', + soc='AST2500 rev A1') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_witherspoon.py b/tests/functional/test_arm_aspeed_witherspoon.py new file mode 100644 index 00000000000..51a2d47af28 --- /dev/null +++ b/tests/functional/test_arm_aspeed_witherspoon.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from aspeed import AspeedTest + + +class WitherspoonMachine(AspeedTest): + + ASSET_WITHERSPOON_FLASH = Asset( + 'https://github.com/legoater/qemu-aspeed-boot/raw/master/images/witherspoon-bmc/openbmc-20240618035022/obmc-phosphor-image-witherspoon-20240618035022.ubi.mtd', + '937d9ed449ea6c6cbed983519088a42d0cafe276bcfe4fce07772ca6673f9213') + + def test_arm_ast2500_witherspoon_openbmc(self): + image_path = self.ASSET_WITHERSPOON_FLASH.fetch() + + self.do_test_arm_aspeed_openbmc('witherspoon-bmc', image=image_path, + uboot='2016.07', cpu_id='0x0', + soc='AST2500 rev A1') + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_bflt.py b/tests/functional/test_arm_bflt.py new file mode 100755 index 00000000000..f273fc83546 --- /dev/null +++ b/tests/functional/test_arm_bflt.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +# +# Test the bFLT loader format +# +# Copyright (C) 2019 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bz2 + +from qemu_test import QemuUserTest, Asset +from qemu_test import skipIfMissingCommands, skipUntrustedTest + + +class LoadBFLT(QemuUserTest): + + ASSET_ROOTFS = Asset( + ('https://elinux.org/images/5/51/Stm32_mini_rootfs.cpio.bz2'), + 'eefb788e4980c9e8d6c9d60ce7d15d4da6bf4fbc6a80f487673824600d5ba9cc') + + @skipIfMissingCommands('cpio') + @skipUntrustedTest() + def test_stm32(self): + # See https://elinux.org/STM32#User_Space + rootfs_path_bz2 = self.ASSET_ROOTFS.fetch() + busybox_path = self.scratch_file("bin", "busybox") + + with bz2.open(rootfs_path_bz2, 'rb') as cpio_handle: + self.archive_extract(cpio_handle, format="cpio") + + res = self.run_cmd(busybox_path) + ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.' + self.assertIn(ver, res.stdout) + + res = self.run_cmd(busybox_path, ['uname', '-a']) + unm = 'armv7l GNU/Linux' + self.assertIn(unm, res.stdout) + + +if __name__ == '__main__': + QemuUserTest.main() diff --git a/tests/functional/test_arm_bpim2u.py b/tests/functional/test_arm_bpim2u.py new file mode 100755 index 00000000000..8bed64b702f --- /dev/null +++ b/tests/functional/test_arm_bpim2u.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Banana Pi machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern +from qemu_test import Asset, interrupt_interactive_console_until_pattern +from qemu_test import skipBigDataTest +from qemu_test.utils import image_pow2ceil_expand + + +class BananaPiMachine(LinuxKernelTest): + + ASSET_DEB = Asset( + ('https://apt.armbian.com/pool/main/l/linux-6.6.16/' + 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'), + '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv7a.cpio.gz'), + '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd') + + ASSET_ROOTFS = Asset( + ('http://storage.kernelci.org/images/rootfs/buildroot/' + 'buildroot-baseline/20230703.0/armel/rootfs.ext2.xz'), + '42b44a12965ac0afe9a88378527fb698a7dc76af50495efc2361ee1595b4e5c6') + + ASSET_SD_IMAGE = Asset( + ('https://downloads.openwrt.org/releases/22.03.3/targets/sunxi/cortexa7/' + 'openwrt-22.03.3-sunxi-cortexa7-sinovoip_bananapi-m2-ultra-ext4-sdcard.img.gz'), + '5b41b4e11423e562c6011640f9a7cd3bdd0a3d42b83430f7caa70a432e6cd82c') + + def test_arm_bpim2u(self): + self.set_machine('bpim2u') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + 'sun8i-r40-bananapi-m2-ultra.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200n8 ' + 'earlycon=uart,mmio32,0x1c28000') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + os.remove(kernel_path) + os.remove(dtb_path) + + def test_arm_bpim2u_initrd(self): + self.set_machine('bpim2u') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + 'sun8i-r40-bananapi-m2-ultra.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun8i Family') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'system-control@1c00000') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + os.remove(kernel_path) + os.remove(dtb_path) + os.remove(initrd_path) + + def test_arm_bpim2u_gmac(self): + self.set_machine('bpim2u') + self.require_netdev('user') + + deb_path = self.ASSET_DEB.fetch() + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + 'sun8i-r40-bananapi-m2-ultra.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + rootfs_path = self.uncompress(self.ASSET_ROOTFS) + image_pow2ceil_expand(rootfs_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'root=b300 rootwait rw ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', + '-net', 'nic,model=gmac,netdev=host_gmac', + '-netdev', 'user,id=host_gmac', + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + shell_ready = "/bin/sh: can't access tty; job control turned off" + self.wait_for_console_pattern(shell_ready) + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun8i Family') + exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', + 'mmcblk') + exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', + 'eth0: Link is Up') + exec_command_and_wait_for_pattern(self, 'udhcpc eth0', + 'udhcpc: lease of 10.0.2.15 obtained') + exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', + '3 packets transmitted, 3 packets received, 0% packet loss') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + os.remove(kernel_path) + os.remove(dtb_path) + os.remove(rootfs_path) + + @skipBigDataTest() + def test_arm_bpim2u_openwrt_22_03_3(self): + self.set_machine('bpim2u') + self.require_netdev('user') + + # This test download a 8.9 MiB compressed image and expand it + # to 127 MiB. + image_path = self.uncompress(self.ASSET_SD_IMAGE) + image_pow2ceil_expand(image_path) + + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', + '-nic', 'user', + '-no-reboot') + self.vm.launch() + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'usbcore.nousb ' + 'noreboot') + + self.wait_for_console_pattern('U-Boot SPL') + + interrupt_interactive_console_until_pattern( + self, 'Hit any key to stop autoboot:', '=>') + exec_command_and_wait_for_pattern(self, "setenv extraargs '" + + kernel_command_line + "'", '=>') + exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...') + + self.wait_for_console_pattern( + 'Please press Enter to activate this console.') + + exec_command_and_wait_for_pattern(self, ' ', 'root@') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun8i Family') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'system-control@1c00000') + os.remove(image_path) + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_canona1100.py b/tests/functional/test_arm_canona1100.py new file mode 100755 index 00000000000..21a1a596a0c --- /dev/null +++ b/tests/functional/test_arm_canona1100.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the canon-a1100 machine with firmware +# +# Copyright (c) 2020 Red Hat, Inc. +# +# Author: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class CanonA1100Machine(QemuSystemTest): + """Boots the barebox firmware and checks that the console is operational""" + + timeout = 90 + + ASSET_BIOS = Asset(('https://qemu-advcal.gitlab.io' + '/qac-best-of-multiarch/download/day18.tar.xz'), + '28e71874ce985be66b7fd1345ed88cb2523b982f899c8d2900d6353054a1be49') + + def test_arm_canona1100(self): + self.set_machine('canon-a1100') + + bios = self.archive_extract(self.ASSET_BIOS, + member="day18/barebox.canon-a1100.bin") + self.vm.set_console() + self.vm.add_args('-bios', bios) + self.vm.launch() + wait_for_console_pattern(self, 'running /env/bin/init') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_arm_collie.py b/tests/functional/test_arm_collie.py new file mode 100755 index 00000000000..fe1be3d0792 --- /dev/null +++ b/tests/functional/test_arm_collie.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a collie machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class CollieTest(LinuxKernelTest): + + ASSET_ZIMAGE = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/collie/zImage', + '10ace8abf9e0875ef8a83b8829cc3b5b50bc6d7bc3ca29f19f49f5673a43c13b') + + ASSET_ROOTFS = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/collie/rootfs-sa110.cpio', + '89ccaaa5c6b33331887047e1618ffe81b0f55909173944347d5d2426f3bcc1f2') + + def test_arm_collie(self): + self.set_machine('collie') + zimage_path = self.ASSET_ZIMAGE.fetch() + rootfs_path = self.ASSET_ROOTFS.fetch() + self.vm.add_args('-append', 'rdinit=/sbin/init console=ttySA1') + self.launch_kernel(zimage_path, + initrd=rootfs_path, + wait_for='reboot: Restarting system') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_cubieboard.py b/tests/functional/test_arm_cubieboard.py new file mode 100755 index 00000000000..b536c2f77a0 --- /dev/null +++ b/tests/functional/test_arm_cubieboard.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import interrupt_interactive_console_until_pattern +from qemu_test import skipBigDataTest +from qemu_test.utils import image_pow2ceil_expand + + +class CubieboardMachine(LinuxKernelTest): + + ASSET_DEB = Asset( + ('https://apt.armbian.com/pool/main/l/linux-6.6.16/' + 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'), + '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv5.cpio.gz'), + '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b') + + ASSET_SATA_ROOTFS = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv5.ext2.gz'), + '17fc750da568580b39372133051ef2f0a963c0c0b369b845614442d025701745') + + ASSET_OPENWRT = Asset( + ('https://downloads.openwrt.org/releases/22.03.2/targets/sunxi/cortexa8/' + 'openwrt-22.03.2-sunxi-cortexa8-cubietech_a10-cubieboard-ext4-sdcard.img.gz'), + '94b5ecbfbc0b3b56276e5146b899eafa2ac5dc2d08733d6705af9f144f39f554') + + def test_arm_cubieboard_initrd(self): + self.set_machine('cubieboard') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + + 'sun4i-a10-cubieboard.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'usbcore.nousb ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun4i/sun5i') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'system-control@1c00000') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + + def test_arm_cubieboard_sata(self): + self.set_machine('cubieboard') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + + 'sun4i-a10-cubieboard.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + + rootfs_path = self.uncompress(self.ASSET_SATA_ROOTFS) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'usbcore.nousb ' + 'root=/dev/sda ro ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-drive', 'if=none,format=raw,id=disk0,file=' + + rootfs_path, + '-device', 'ide-hd,bus=ide.0,drive=disk0', + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun4i/sun5i') + exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', + 'sda') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + + @skipBigDataTest() + def test_arm_cubieboard_openwrt_22_03_2(self): + # This test download a 7.5 MiB compressed image and expand it + # to 126 MiB. + self.set_machine('cubieboard') + self.require_netdev('user') + + image_path = self.uncompress(self.ASSET_OPENWRT) + image_pow2ceil_expand(image_path) + + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', + '-nic', 'user', + '-no-reboot') + self.vm.launch() + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'usbcore.nousb ' + 'noreboot') + + self.wait_for_console_pattern('U-Boot SPL') + + interrupt_interactive_console_until_pattern( + self, 'Hit any key to stop autoboot:', '=>') + exec_command_and_wait_for_pattern(self, "setenv extraargs '" + + kernel_command_line + "'", '=>') + exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...') + + self.wait_for_console_pattern( + 'Please press Enter to activate this console.') + + exec_command_and_wait_for_pattern(self, ' ', 'root@') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun4i/sun5i') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_emcraft_sf2.py b/tests/functional/test_arm_emcraft_sf2.py new file mode 100755 index 00000000000..f9f3f069e2c --- /dev/null +++ b/tests/functional/test_arm_emcraft_sf2.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import shutil + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern +from qemu_test.utils import file_truncate + +class EmcraftSf2Machine(LinuxKernelTest): + + ASSET_UBOOT = Asset( + ('https://raw.githubusercontent.com/Subbaraya-Sundeep/qemu-test-binaries/' + 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot'), + '5c6a15103375db11b21f2236473679a9dbbed6d89652bfcdd501c263d68ab725') + + ASSET_SPI = Asset( + ('https://raw.githubusercontent.com/Subbaraya-Sundeep/qemu-test-binaries/' + 'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin'), + 'cd9bdd2c4cb55a59c3adb6bcf74881667c4500dde0570a43aa3be2b17eecfdb6') + + def test_arm_emcraft_sf2(self): + self.set_machine('emcraft-sf2') + self.require_netdev('user') + + uboot_path = self.ASSET_UBOOT.fetch() + spi_path = self.ASSET_SPI.fetch() + spi_path_rw = self.scratch_file('spi.bin') + shutil.copy(spi_path, spi_path_rw) + os.chmod(spi_path_rw, 0o600) + + file_truncate(spi_path_rw, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB + + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + self.vm.add_args('-kernel', uboot_path, + '-append', kernel_command_line, + '-drive', 'file=' + spi_path_rw + ',if=mtd,format=raw', + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Enter \'help\' for a list') + + exec_command_and_wait_for_pattern(self, 'ifconfig eth0 10.0.2.15', + 'eth0: link becomes ready') + exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', + '3 packets transmitted, 3 packets received, 0% packet loss') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_integratorcp.py b/tests/functional/test_arm_integratorcp.py new file mode 100755 index 00000000000..4f00924aa03 --- /dev/null +++ b/tests/functional/test_arm_integratorcp.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# Copyright (c) 2020 Red Hat, Inc. +# +# Author: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import logging + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import skipIfMissingImports, skipUntrustedTest + + +class IntegratorMachine(QemuSystemTest): + + timeout = 90 + + ASSET_KERNEL = Asset( + ('https://github.com/zayac/qemu-arm/raw/master/' + 'arm-test/kernel/zImage.integrator'), + '26e7c7e8f943de785d95bd3c74d66451604a9b6a7a3d25dceb279e7548fd8e78') + + ASSET_INITRD = Asset( + ('https://github.com/zayac/qemu-arm/raw/master/' + 'arm-test/kernel/arm_root.img'), + 'e187c27fb342ad148c7f33475fbed124933e0b3f4be8c74bc4f3426a4793373a') + + ASSET_TUXLOGO = Asset( + ('https://github.com/torvalds/linux/raw/v2.6.12/' + 'drivers/video/logo/logo_linux_vga16.ppm'), + 'b762f0d91ec018887ad1b334543c2fdf9be9fdfc87672b409211efaa3ea0ef79') + + def boot_integratorcp(self): + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + + self.set_machine('integratorcp') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-initrd', initrd_path, + '-append', 'printk.time=0 console=ttyAMA0') + self.vm.launch() + + @skipUntrustedTest() + def test_integratorcp_console(self): + """ + Boots the Linux kernel and checks that the console is operational + """ + self.boot_integratorcp() + wait_for_console_pattern(self, 'Log in as root') + + @skipIfMissingImports("numpy", "cv2") + @skipUntrustedTest() + def test_framebuffer_tux_logo(self): + """ + Boot Linux and verify the Tux logo is displayed on the framebuffer. + """ + import numpy as np + import cv2 + + screendump_path = self.scratch_file("screendump.pbm") + tuxlogo_path = self.ASSET_TUXLOGO.fetch() + + self.boot_integratorcp() + framebuffer_ready = 'Console: switching to colour frame buffer device' + wait_for_console_pattern(self, framebuffer_ready) + self.vm.cmd('human-monitor-command', command_line='stop') + res = self.vm.cmd('human-monitor-command', + command_line='screendump %s' % screendump_path) + if 'unknown command' in res: + self.skipTest('screendump not available') + logger = logging.getLogger('framebuffer') + + cpu_count = 1 + match_threshold = 0.92 + screendump_bgr = cv2.imread(screendump_path) + screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY) + result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0), + cv2.TM_CCOEFF_NORMED) + loc = np.where(result >= match_threshold) + tux_count = 0 + for tux_count, pt in enumerate(zip(*loc[::-1]), start=1): + logger.debug('found Tux at position [x, y] = %s', pt) + self.assertGreaterEqual(tux_count, cpu_count) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_arm_max78000fthr.py b/tests/functional/test_arm_max78000fthr.py new file mode 100755 index 00000000000..a82980b0f7c --- /dev/null +++ b/tests/functional/test_arm_max78000fthr.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# +# Functional test that checks the max78000fthr machine. +# Tests ICC, GCR, TRNG, AES, and UART +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class Max78000Machine(QemuSystemTest): + + ASSET_FW = Asset( + 'https://github.com/JacksonDonaldson/max78000Test/raw/main/build/max78000.bin', + '86940b4bf60931bc6a8aa5db4b9f7f3cf8f64dbbd7ac534647980e536cf3adf7') + + def test_fthr(self): + self.set_machine('max78000fthr') + fw_path = self.ASSET_FW.fetch() + self.vm.set_console() + self.vm.add_args('-kernel', fw_path) + self.vm.add_args('-device', "loader,file=" + fw_path + ",addr=0x10000000") + self.vm.launch() + + wait_for_console_pattern(self, 'started') + + # i -> prints instruction cache values + exec_command_and_wait_for_pattern(self, 'i', 'CTRL: 00010001') + + # r -> gcr resets the machine + exec_command_and_wait_for_pattern(self, 'r', 'started') + + # z -> sets some memory, then has gcr zero it + exec_command_and_wait_for_pattern(self, 'z', 'initial value: 12345678') + wait_for_console_pattern(self, "after memz: 00000000") + + # t -> runs trng + exec_command_and_wait_for_pattern(self, 't', 'random data:') + + # a -> runs aes + exec_command_and_wait_for_pattern(self, 'a', + 'encrypted to : a47ca9dd e0df4c86 a070af6e 91710dec') + wait_for_console_pattern(self, + 'encrypted to : cab7a28e bf456751 9049fcea 8960494b') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_arm_microbit.py b/tests/functional/test_arm_microbit.py new file mode 100755 index 00000000000..68ea4e73d62 --- /dev/null +++ b/tests/functional/test_arm_microbit.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright 2025, The QEMU Project Developers. +# +# A functional test that runs MicroPython on the arm microbit machine. + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class MicrobitMachine(QemuSystemTest): + + ASSET_MICRO = Asset('https://ozlabs.org/~joel/microbit-micropython.hex', + '021641f93dfb11767d4978dbb3ca7f475d1b13c69e7f4aec3382f212636bffd6') + + def test_arm_microbit(self): + self.set_machine('microbit') + + micropython = self.ASSET_MICRO.fetch() + self.vm.set_console() + self.vm.add_args('-device', f'loader,file={micropython}') + self.vm.launch() + wait_for_console_pattern(self, 'Type "help()" for more information.') + exec_command_and_wait_for_pattern(self, 'import machine as mch', '>>>') + exec_command_and_wait_for_pattern(self, 'mch.reset()', 'MicroPython') + wait_for_console_pattern(self, '>>>') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_arm_orangepi.py b/tests/functional/test_arm_orangepi.py new file mode 100755 index 00000000000..f9bfa8c78d9 --- /dev/null +++ b/tests/functional/test_arm_orangepi.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an Orange Pi machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import shutil + +from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern +from qemu_test import Asset, interrupt_interactive_console_until_pattern +from qemu_test import wait_for_console_pattern, skipBigDataTest +from qemu_test.utils import image_pow2ceil_expand + + +class OrangePiMachine(LinuxKernelTest): + + ASSET_DEB = Asset( + ('https://apt.armbian.com/pool/main/l/linux-6.6.16/' + 'linux-image-current-sunxi_24.2.1_armhf__6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'), + '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv7a.cpio.gz'), + '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd') + + ASSET_ROOTFS = Asset( + ('http://storage.kernelci.org/images/rootfs/buildroot/' + 'buildroot-baseline/20230703.0/armel/rootfs.ext2.xz'), + '42b44a12965ac0afe9a88378527fb698a7dc76af50495efc2361ee1595b4e5c6') + + ASSET_ARMBIAN = Asset( + ('https://k-space.ee.armbian.com/archive/orangepipc/archive/' + 'Armbian_23.8.1_Orangepipc_jammy_current_6.1.47.img.xz'), + 'b386dff6552513b5f164ea00f94814a6b0f1da9fb90b83725e949cf797e11afb') + + ASSET_UBOOT = Asset( + ('http://snapshot.debian.org/archive/debian/20200108T145233Z/pool/' + 'main/u/u-boot/u-boot-sunxi_2020.01%2Bdfsg-1_armhf.deb'), + '9223d94dc283ab54df41ce9d6f69025a5b47fece29fb67a714e23aa0cdf3bdfa') + + ASSET_NETBSD = Asset( + ('https://archive.netbsd.org/pub/NetBSD-archive/NetBSD-9.0/' + 'evbarm-earmv7hf/binary/gzimg/armv7.img.gz'), + '20d3e07dc057e15c12452620e90ecab2047f0f7940d9cba8182ebc795927177f') + + def test_arm_orangepi(self): + self.set_machine('orangepi-pc') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + + 'sun8i-h3-orangepi-pc.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200n8 ' + 'earlycon=uart,mmio32,0x1c28000') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + os.remove(kernel_path) + os.remove(dtb_path) + + def test_arm_orangepi_initrd(self): + self.set_machine('orangepi-pc') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + + 'sun8i-h3-orangepi-pc.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun8i Family') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + 'system-control@1c00000') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + os.remove(kernel_path) + os.remove(dtb_path) + os.remove(initrd_path) + + def test_arm_orangepi_sd(self): + self.set_machine('orangepi-pc') + self.require_netdev('user') + kernel_path = self.archive_extract( + self.ASSET_DEB, member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = ('usr/lib/linux-image-6.6.16-current-sunxi/' + + 'sun8i-h3-orangepi-pc.dtb') + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + rootfs_path = self.uncompress(self.ASSET_ROOTFS) + image_pow2ceil_expand(rootfs_path) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'root=/dev/mmcblk0 rootwait rw ' + 'panic=-1 noreboot') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + shell_ready = "/bin/sh: can't access tty; job control turned off" + self.wait_for_console_pattern(shell_ready) + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'Allwinner sun8i Family') + exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', + 'mmcblk0') + exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', + 'eth0: Link is Up') + exec_command_and_wait_for_pattern(self, 'udhcpc eth0', + 'udhcpc: lease of 10.0.2.15 obtained') + exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', + '3 packets transmitted, 3 packets received, 0% packet loss') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + os.remove(kernel_path) + os.remove(dtb_path) + os.remove(rootfs_path) + + @skipBigDataTest() + def test_arm_orangepi_armbian(self): + self.set_machine('orangepi-pc') + self.require_netdev('user') + + # This test download a 275 MiB compressed image and expand it + # to 1036 MiB, but the underlying filesystem is 1552 MiB... + # As we expand it to 2 GiB we are safe. + image_path = self.uncompress(self.ASSET_ARMBIAN) + image_pow2ceil_expand(image_path) + + self.vm.set_console() + self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', + '-nic', 'user', + '-no-reboot') + self.vm.launch() + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'loglevel=7 ' + 'nosmp ' + 'systemd.default_timeout_start_sec=9000 ' + 'systemd.mask=armbian-zram-config.service ' + 'systemd.mask=armbian-ramlog.service') + + self.wait_for_console_pattern('U-Boot SPL') + self.wait_for_console_pattern('Autoboot in ') + exec_command_and_wait_for_pattern(self, ' ', '=>') + exec_command_and_wait_for_pattern(self, "setenv extraargs '" + + kernel_command_line + "'", '=>') + exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...') + + self.wait_for_console_pattern('systemd[1]: Hostname set ' + + 'to ') + self.wait_for_console_pattern('Starting Load Kernel Modules...') + + @skipBigDataTest() + def test_arm_orangepi_uboot_netbsd9(self): + self.set_machine('orangepi-pc') + self.require_netdev('user') + + # This test download a 304MB compressed image and expand it to 2GB + # We use the common OrangePi PC 'plus' build of U-Boot for our secondary + # program loader (SPL). We will then set the path to the more specific + # OrangePi "PC" device tree blob with 'setenv fdtfile' in U-Boot prompt, + # before to boot NetBSD. + uboot_path = 'usr/lib/u-boot/orangepi_plus/u-boot-sunxi-with-spl.bin' + uboot_path = self.archive_extract(self.ASSET_UBOOT, member=uboot_path) + image_path = self.uncompress(self.ASSET_NETBSD) + image_pow2ceil_expand(image_path) + image_drive_args = 'if=sd,format=raw,snapshot=on,file=' + image_path + + # dd if=u-boot-sunxi-with-spl.bin of=armv7.img bs=1K seek=8 conv=notrunc + with open(uboot_path, 'rb') as f_in: + with open(image_path, 'r+b') as f_out: + f_out.seek(8 * 1024) + shutil.copyfileobj(f_in, f_out) + + self.vm.set_console() + self.vm.add_args('-nic', 'user', + '-drive', image_drive_args, + '-global', 'allwinner-rtc.base-year=2000', + '-no-reboot') + self.vm.launch() + wait_for_console_pattern(self, 'U-Boot 2020.01+dfsg-1') + interrupt_interactive_console_until_pattern(self, + 'Hit any key to stop autoboot:', + 'switch to partitions #0, OK') + + exec_command_and_wait_for_pattern(self, '', '=>') + cmd = 'setenv bootargs root=ld0a' + exec_command_and_wait_for_pattern(self, cmd, '=>') + cmd = 'setenv kernel netbsd-GENERIC.ub' + exec_command_and_wait_for_pattern(self, cmd, '=>') + cmd = 'setenv fdtfile dtb/sun8i-h3-orangepi-pc.dtb' + exec_command_and_wait_for_pattern(self, cmd, '=>') + cmd = ("setenv bootcmd 'fatload mmc 0:1 ${kernel_addr_r} ${kernel}; " + "fatload mmc 0:1 ${fdt_addr_r} ${fdtfile}; " + "fdt addr ${fdt_addr_r}; " + "bootm ${kernel_addr_r} - ${fdt_addr_r}'") + exec_command_and_wait_for_pattern(self, cmd, '=>') + + exec_command_and_wait_for_pattern(self, 'boot', + 'Booting kernel from Legacy Image') + wait_for_console_pattern(self, 'Starting kernel ...') + wait_for_console_pattern(self, 'NetBSD 9.0 (GENERIC)') + # Wait for user-space + wait_for_console_pattern(self, 'Starting root file system check') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_quanta_gsj.py b/tests/functional/test_arm_quanta_gsj.py new file mode 100755 index 00000000000..cb0545f7bfa --- /dev/null +++ b/tests/functional/test_arm_quanta_gsj.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import interrupt_interactive_console_until_pattern, skipSlowTest + + +class EmcraftSf2Machine(LinuxKernelTest): + + ASSET_IMAGE = Asset( + ('https://github.com/hskinnemoen/openbmc/releases/download/' + '20200711-gsj-qemu-0/obmc-phosphor-image-gsj.static.mtd.gz'), + 'eccd4e375cde53034c84aece5c511932cacf838d9fd3f63da368a511757da72b') + + ASSET_INITRD = Asset( + ('https://github.com/hskinnemoen/openbmc/releases/download/' + '20200711-gsj-qemu-0/obmc-phosphor-initramfs-gsj.cpio.xz'), + '37b05009fc54db1434beac12bd7ff99a2e751a2f032ee18d9042f991dd0cdeaa') + + ASSET_KERNEL = Asset( + ('https://github.com/hskinnemoen/openbmc/releases/download/' + '20200711-gsj-qemu-0/uImage-gsj.bin'), + 'ce6d6b37bff46c74fc7b1e90da10a431cc37a62cdb35ec199fa73473d0790110') + + ASSET_DTB = Asset( + ('https://github.com/hskinnemoen/openbmc/releases/download/' + '20200711-gsj-qemu-0/nuvoton-npcm730-gsj.dtb'), + '3249b2da787d4b9ad4e61f315b160abfceb87b5e1895a7ce898ce7f40c8d4045') + + @skipSlowTest() + def test_arm_quanta_gsj(self): + self.set_machine('quanta-gsj') + image_path = self.uncompress(self.ASSET_IMAGE, format='gz') + + self.vm.set_console() + drive_args = 'file=' + image_path + ',if=mtd,bus=0,unit=0' + self.vm.add_args('-drive', drive_args) + self.vm.launch() + + # Disable drivers and services that stall for a long time during boot, + # to avoid running past the 90-second timeout. These may be removed + # as the corresponding device support is added. + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + ( + 'console=${console} ' + 'mem=${mem} ' + 'initcall_blacklist=npcm_i2c_bus_driver_init ' + 'systemd.mask=systemd-random-seed.service ' + 'systemd.mask=dropbearkey.service ' + ) + + self.wait_for_console_pattern('> BootBlock by Nuvoton') + self.wait_for_console_pattern('>Device: Poleg BMC NPCM730') + self.wait_for_console_pattern('>Skip DDR init.') + self.wait_for_console_pattern('U-Boot ') + interrupt_interactive_console_until_pattern( + self, 'Hit any key to stop autoboot:', 'U-Boot>') + exec_command_and_wait_for_pattern( + self, "setenv bootargs ${bootargs} " + kernel_command_line, + 'U-Boot>') + exec_command_and_wait_for_pattern( + self, 'run romboot', 'Booting Kernel from flash') + self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') + self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') + self.wait_for_console_pattern('OpenBMC Project Reference Distro') + self.wait_for_console_pattern('gsj login:') + + def test_arm_quanta_gsj_initrd(self): + self.set_machine('quanta-gsj') + initrd_path = self.ASSET_INITRD.fetch() + kernel_path = self.ASSET_KERNEL.fetch() + dtb_path = self.ASSET_DTB.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200n8 ' + 'earlycon=uart8250,mmio32,0xf0001000') + self.vm.add_args('-kernel', kernel_path, + '-initrd', initrd_path, + '-dtb', dtb_path, + '-append', kernel_command_line) + self.vm.launch() + + self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') + self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') + self.wait_for_console_pattern( + 'Give root password for system maintenance') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_raspi2.py b/tests/functional/test_arm_raspi2.py new file mode 100755 index 00000000000..d3c7aaa39b0 --- /dev/null +++ b/tests/functional/test_arm_raspi2.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Raspberry Pi machine +# and checks the console +# +# Copyright (c) 2019 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class ArmRaspi2Machine(LinuxKernelTest): + + ASSET_KERNEL_20190215 = Asset( + ('http://archive.raspberrypi.org/debian/' + 'pool/main/r/raspberrypi-firmware/' + 'raspberrypi-kernel_1.20190215-1_armhf.deb'), + '9f1759f7228113da24f5ee2aa6312946ec09a83e076aba9406c46ff776dfb291') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' + 'arm/rootfs-armv7a.cpio.gz'), + '2c8dbdb16ea7af2dfbcbea96044dde639fb07d09fd3c4fb31f2027ef71e55ddd') + + def do_test_arm_raspi2(self, uart_id): + """ + The kernel can be rebuilt using the kernel source referenced + and following the instructions on the on: + https://www.raspberrypi.org/documentation/linux/kernel/building.md + """ + serial_kernel_cmdline = { + 0: 'earlycon=pl011,0x3f201000 console=ttyAMA0', + } + kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/kernel7.img') + dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/bcm2709-rpi-2-b.dtb') + + self.set_machine('raspi2b') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + serial_kernel_cmdline[uart_id] + + ' root=/dev/mmcblk0p2 rootwait ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-append', kernel_command_line, + '-device', 'usb-kbd') + self.vm.launch() + + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + self.wait_for_console_pattern('Product: QEMU USB Keyboard') + + def test_arm_raspi2_uart0(self): + self.do_test_arm_raspi2(0) + + def test_arm_raspi2_initrd(self): + kernel_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/kernel7.img') + dtb_path = self.archive_extract(self.ASSET_KERNEL_20190215, + member='boot/bcm2709-rpi-2-b.dtb') + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.set_machine('raspi2b') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=pl011,0x3f201000 console=ttyAMA0 ' + 'panic=-1 noreboot ' + + 'dwc_otg.fiq_fsm_enable=0') + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'BCM2835') + exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', + '/soc/cprman@7e101000') + exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') + # Wait for VM to shut down gracefully + self.vm.wait() + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_realview.py b/tests/functional/test_arm_realview.py new file mode 100755 index 00000000000..82cc964333e --- /dev/null +++ b/tests/functional/test_arm_realview.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a realview arm machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, exec_command_and_wait_for_pattern +from qemu_test import Asset + + +class RealviewMachine(LinuxKernelTest): + + ASSET_REALVIEW_MPCORE = Asset( + ('https://archive.openwrt.org/chaos_calmer/15.05.1/realview/generic/' + 'openwrt-15.05.1-realview-vmlinux-initramfs.elf'), + 'd3a01037f33e7512d46d50975588d5c3a0e0cbf25f37afab44775c2a2be523e6') + + def test_realview_ep_mpcore(self): + self.require_netdev('user') + self.set_machine('realview-eb-mpcore') + kernel_path = self.ASSET_REALVIEW_MPCORE.fetch() + self.vm.set_console() + kernel_param = 'console=ttyAMA0 mem=128M quiet' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_param) + self.vm.launch() + self.wait_for_console_pattern('Please press Enter to activate') + prompt = ':/#' + exec_command_and_wait_for_pattern(self, '', prompt) + exec_command_and_wait_for_pattern(self, 'dmesg', kernel_param) + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, + ('while ! dmesg | grep "br-lan: port 1(eth0) entered" ;' + ' do sleep 1 ; done'), + 'entered forwarding state') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, + 'while ! ifconfig | grep "10.0.2.15" ; do sleep 1 ; done', + 'addr:10.0.2.15') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, 'ping -c 1 10.0.2.2', + '1 packets received, 0% packet loss') + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_replay.py b/tests/functional/test_arm_replay.py new file mode 100755 index 00000000000..e002e6a2647 --- /dev/null +++ b/tests/functional/test_arm_replay.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on arm machines and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class ArmReplay(ReplayKernelBase): + + ASSET_VIRT = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/29/Everything/armhfp/os/images/pxeboot/vmlinuz'), + '18dd5f1a9a28bd539f9d047f7c0677211bae528e8712b40ca5a229a4ad8e2591') + + def test_virt(self): + self.set_machine('virt') + kernel_path = self.ASSET_VIRT.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + console_pattern = 'VFS: Cannot open root device' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1) + + ASSET_CUBIE_KERNEL = Asset( + ('https://apt.armbian.com/pool/main/l/linux-6.6.16/' + 'linux-image-current-sunxi_24.2.1_armhf_' + '_6.6.16-Seb3e-D6b4a-P2359-Ce96bHfe66-HK01ba-V014b-B067e-R448a.deb'), + '3d968c15b121ede871dce49d13ee7644d6f74b6b121b84c9a40f51b0c80d6d22') + + ASSET_CUBIE_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/arm/rootfs-armv5.cpio.gz'), + '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b') + + def test_cubieboard(self): + self.set_machine('cubieboard') + kernel_path = self.archive_extract(self.ASSET_CUBIE_KERNEL, + member='boot/vmlinuz-6.6.16-current-sunxi') + dtb_path = self.archive_extract(self.ASSET_CUBIE_KERNEL, + member='usr/lib/linux-image-6.6.16-current-sunxi/sun4i-a10-cubieboard.dtb') + initrd_path = self.uncompress(self.ASSET_CUBIE_INITRD) + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0,115200 ' + 'usbcore.nousb ' + 'panic=-1 noreboot') + console_pattern = 'Boot successful.' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1, + args=('-dtb', dtb_path, + '-initrd', initrd_path, + '-no-reboot')) + + ASSET_DAY16 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day16.tar.xz', + '63311adb2d4c4e7a73214a86d29988add87266a909719c56acfadd026b4110a7') + + def test_vexpressa9(self): + self.set_machine('vexpress-a9') + self.archive_extract(self.ASSET_DAY16) + kernel_path = self.scratch_file('day16', 'winter.zImage') + dtb_path = self.scratch_file('day16', 'vexpress-v2p-ca9.dtb') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar', args=('-dtb', dtb_path)) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_arm_smdkc210.py b/tests/functional/test_arm_smdkc210.py new file mode 100755 index 00000000000..3154e7f7322 --- /dev/null +++ b/tests/functional/test_arm_smdkc210.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class Smdkc210Machine(LinuxKernelTest): + + ASSET_DEB = Asset( + ('https://snapshot.debian.org/archive/debian/20190928T224601Z/pool/' + 'main/l/linux/linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb'), + '421804e7579ef40d554c962850dbdf1bfc79f7fa7faec9d391397170dc806c3e') + + ASSET_ROOTFS = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/arm/' + 'rootfs-armv5.cpio.gz'), + '334b8d256db67a3f2b3ad070aa08b5ade39624e0e7e35b02f4359a577bc8f39b') + + def test_arm_exynos4210_initrd(self): + self.set_machine('smdkc210') + + kernel_path = self.archive_extract(self.ASSET_DEB, + member='boot/vmlinuz-4.19.0-6-armmp') + dtb_path = 'usr/lib/linux-image-4.19.0-6-armmp/exynos4210-smdkv310.dtb' + dtb_path = self.archive_extract(self.ASSET_DEB, member=dtb_path) + + initrd_path = self.uncompress(self.ASSET_ROOTFS) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=exynos4210,0x13800000 earlyprintk ' + + 'console=ttySAC0,115200n8 ' + + 'random.trust_cpu=off cryptomgr.notests ' + + 'cpuidle.off=1 panic=-1 noreboot') + + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + + self.wait_for_console_pattern('Boot successful.') + # TODO user command, for now the uart is stuck + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_stellaris.py b/tests/functional/test_arm_stellaris.py new file mode 100755 index 00000000000..cbd21cb1a0b --- /dev/null +++ b/tests/functional/test_arm_stellaris.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# +# Functional test that checks the serial console of the stellaris machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class StellarisMachine(QemuSystemTest): + + ASSET_DAY22 = Asset( + 'https://www.qemu-advent-calendar.org/2023/download/day22.tar.gz', + 'ae3a63ef4b7a22c21bfc7fc0d85e402fe95e223308ed23ac854405016431ff51') + + def test_lm3s6965evb(self): + self.set_machine('lm3s6965evb') + kernel_path = self.archive_extract(self.ASSET_DAY22, + member='day22/day22.bin') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path) + self.vm.launch() + + wait_for_console_pattern(self, 'In a one horse open') + + ASSET_NOTMAIN = Asset( + 'https://github.com/Ahelion/QemuArmM4FDemoSw/raw/master/build/notmain.bin', + '6ceda031aa081a420fca2fca9e137fa681d6e3820d820ad1917736cb265e611a') + + def test_lm3s811evb(self): + self.set_machine('lm3s811evb') + kernel_path = self.ASSET_NOTMAIN.fetch() + + self.vm.set_console() + self.vm.add_args('-cpu', 'cortex-m4') + self.vm.add_args('-kernel', kernel_path) + self.vm.launch() + + # The test kernel emits an initial '!' and then waits for input. + # For each character that we send it responds with a certain + # other ASCII character. + wait_for_console_pattern(self, '!') + exec_command_and_wait_for_pattern(self, '789', 'cdf') + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_arm_sx1.py b/tests/functional/test_arm_sx1.py new file mode 100755 index 00000000000..25800b388c9 --- /dev/null +++ b/tests/functional/test_arm_sx1.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2024 Linaro Ltd. +# +# Functional test that boots a Linux kernel on an sx1 machine +# and checks the console. We have three variants: +# * just boot initrd +# * boot with filesystem on SD card +# * boot from flash +# In all cases these images have a userspace that is configured +# to immediately reboot the system on successful boot, so we +# only need to wait for QEMU to exit (via -no-reboot). +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class SX1Test(LinuxKernelTest): + + ASSET_ZIMAGE = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/zImage', + 'a0271899a8dc2165f9e0adb2d0a57fc839ae3a469722ffc56c77e108a8887615') + + ASSET_INITRD = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/rootfs-armv4.cpio', + '35b0721249821aa544cd85b85d3cb8901db4c6d128eed86ab261e5d9e37d58f8') + + ASSET_SD_FS = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/rootfs-armv4.ext2', + 'c1db7f43ef92469ebc8605013728c8950e7608439f01d13678994f0ce101c3a8') + + ASSET_FLASH = Asset( + 'https://github.com/groeck/linux-test-downloads/raw/225223f2ad7d637b34426810bf6c3b727b76a718/sx1/flash', + '17e6a2758fa38efd2666be0879d4751fd37d194f25168a8deede420df519b676') + + CONSOLE_ARGS = 'console=ttyS0,115200 earlycon=uart8250,mmio32,0xfffb0000,115200n8' + + def test_arm_sx1_initrd(self): + self.set_machine('sx1') + zimage_path = self.ASSET_ZIMAGE.fetch() + initrd_path = self.ASSET_INITRD.fetch() + self.vm.add_args('-append', f'kunit.enable=0 rdinit=/sbin/init {self.CONSOLE_ARGS}') + self.vm.add_args('-no-reboot') + self.launch_kernel(zimage_path, + initrd=initrd_path, + wait_for='Boot successful') + self.vm.wait(timeout=120) + + def test_arm_sx1_sd(self): + self.set_machine('sx1') + zimage_path = self.ASSET_ZIMAGE.fetch() + sd_fs_path = self.ASSET_SD_FS.fetch() + self.vm.add_args('-append', f'kunit.enable=0 root=/dev/mmcblk0 rootwait {self.CONSOLE_ARGS}') + self.vm.add_args('-no-reboot') + self.vm.add_args('-snapshot') + self.vm.add_args('-drive', f'format=raw,if=sd,file={sd_fs_path}') + self.launch_kernel(zimage_path, wait_for='Boot successful') + self.vm.wait(timeout=120) + + def test_arm_sx1_flash(self): + self.set_machine('sx1') + zimage_path = self.ASSET_ZIMAGE.fetch() + flash_path = self.ASSET_FLASH.fetch() + self.vm.add_args('-append', f'kunit.enable=0 root=/dev/mtdblock3 rootwait {self.CONSOLE_ARGS}') + self.vm.add_args('-no-reboot') + self.vm.add_args('-snapshot') + self.vm.add_args('-drive', f'format=raw,if=pflash,file={flash_path}') + self.launch_kernel(zimage_path, wait_for='Boot successful') + self.vm.wait(timeout=120) + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_tuxrun.py b/tests/functional/test_arm_tuxrun.py new file mode 100755 index 00000000000..4ac85f48ac1 --- /dev/null +++ b/tests/functional/test_arm_tuxrun.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunArmTest(TuxRunBaselineTest): + + ASSET_ARMV5_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv5/zImage', + '3931a3908dbcf0ec0fe292d035ffc4dfed95f797dedd4a59ccfcf7a46e6f92d4') + ASSET_ARMV5_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv5/rootfs.ext4.zst', + '60ff78b68c7021df378e4fc2d66d3b016484d1acc7e07fb8920c1d8e30f4571f') + ASSET_ARMV5_DTB = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv5/versatile-pb.dtb', + '50988e69ef3f3b08bfb9146e8fe414129990029e8dfbed444953b7e14809530a') + + def test_armv5(self): + self.set_machine('versatilepb') + self.cpu='arm926' + self.console='ttyAMA0' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_ARMV5_KERNEL, + rootfs_asset=self.ASSET_ARMV5_ROOTFS, + dtb_asset=self.ASSET_ARMV5_DTB, + drive="virtio-blk-pci") + + ASSET_ARMV7_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv7/zImage', + '1377bc3d90de5ce57ab17cd67429fe8b15c2e9964248c775c682b67e6299b991') + ASSET_ARMV7_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv7/rootfs.ext4.zst', + 'ed2cbc69bd6b3fbd5cafb5ee961393c7cfbe726446f14301c67d6b1f28bfdb51') + + def test_armv7(self): + self.set_machine('virt') + self.cpu='cortex-a15' + self.console='ttyAMA0' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_ARMV7_KERNEL, + rootfs_asset=self.ASSET_ARMV7_ROOTFS) + + ASSET_ARMV7BE_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv7be/zImage', + 'a244e6da99f1bbd254827ec7681bd4aac9eb1aa05aaebc6b15e5d289ebb683f3') + ASSET_ARMV7BE_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/armv7be/rootfs.ext4.zst', + 'd4f9c57860a512163f30ecc69b2174d1a1bdeb853a43dc49a09cfcfe84e428ea') + + def test_armv7be(self): + self.set_machine('virt') + self.cpu='cortex-a15' + self.console='ttyAMA0' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_ARMV7BE_KERNEL, + rootfs_asset=self.ASSET_ARMV7BE_ROOTFS) + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_arm_vexpress.py b/tests/functional/test_arm_vexpress.py new file mode 100755 index 00000000000..6b115528947 --- /dev/null +++ b/tests/functional/test_arm_vexpress.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an versatile express machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class VExpressTest(LinuxKernelTest): + + ASSET_DAY16 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day16.tar.xz', + '63311adb2d4c4e7a73214a86d29988add87266a909719c56acfadd026b4110a7') + + def test_arm_vexpressa9(self): + self.set_machine('vexpress-a9') + self.archive_extract(self.ASSET_DAY16) + self.launch_kernel(self.scratch_file('day16', 'winter.zImage'), + dtb=self.scratch_file('day16', + 'vexpress-v2p-ca9.dtb'), + wait_for='QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_arm_virt.py b/tests/functional/test_arm_virt.py new file mode 100755 index 00000000000..7b6549176f5 --- /dev/null +++ b/tests/functional/test_arm_virt.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + +class ArmVirtMachine(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/29/Everything/armhfp/os/images/pxeboot/vmlinuz'), + '18dd5f1a9a28bd539f9d047f7c0677211bae528e8712b40ca5a229a4ad8e2591') + + def test_arm_virt(self): + self.set_machine('virt') + kernel_path = self.ASSET_KERNEL.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyAMA0') + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_avr_mega2560.py b/tests/functional/test_avr_mega2560.py new file mode 100755 index 00000000000..6359b72af39 --- /dev/null +++ b/tests/functional/test_avr_mega2560.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# +# QEMU AVR integration tests +# +# Copyright (c) 2019-2020 Michael Rolnik +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern + + +class AVR6Machine(QemuSystemTest): + + ASSET_ROM = Asset(('https://github.com/seharris/qemu-avr-tests' + '/raw/36c3e67b8755dcf/free-rtos/Demo' + '/AVR_ATMega2560_GCC/demo.elf'), + 'ee4833bd65fc69e84a79ed1c608affddbd499a60e63acf87d9113618401904e4') + + def test_freertos(self): + """ + https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf + constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX' + """ + rom_path = self.ASSET_ROM.fetch() + + self.set_machine('arduino-mega-2560-v3') + self.vm.add_args('-bios', rom_path) + self.vm.add_args('-nographic') + self.vm.set_console() + self.vm.launch() + + wait_for_console_pattern(self, + 'XABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWXA') + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_avr_uno.py b/tests/functional/test_avr_uno.py new file mode 100755 index 00000000000..adb3b73da4f --- /dev/null +++ b/tests/functional/test_avr_uno.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# +# QEMU AVR Arduino UNO functional test +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern + + +class UnoMachine(QemuSystemTest): + + ASSET_UNO = Asset( + ('https://github.com/RahulRNandan/LED_Blink_AVR/raw/' + 'c6d602cbb974a193/build/main.elf'), + '3009a4e2cf5c5b65142f538abdf66d4dc6bc6beab7e552fff9ae314583761b72') + + def test_uno(self): + """ + The binary constantly prints out 'LED Blink' + """ + self.set_machine('arduino-uno') + rom_path = self.ASSET_UNO.fetch() + + self.vm.add_args('-bios', rom_path) + self.vm.set_console() + self.vm.launch() + + wait_for_console_pattern(self, 'LED Blink') + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_cpu_queries.py b/tests/functional/test_cpu_queries.py new file mode 100755 index 00000000000..b1122a0e8f7 --- /dev/null +++ b/tests/functional/test_cpu_queries.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Sanity check of query-cpu-* results +# +# Copyright (c) 2019 Red Hat, Inc. +# +# Author: +# Eduardo Habkost +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest + +class QueryCPUModelExpansion(QemuSystemTest): + """ + Run query-cpu-model-expansion for each CPU model, and validate results + """ + + def test(self): + self.set_machine('none') + self.vm.add_args('-S') + self.vm.launch() + + cpus = self.vm.cmd('query-cpu-definitions') + for c in cpus: + self.log.info("Checking CPU: %s", c) + self.assertNotIn('', c['unavailable-features'], c['name']) + + for c in cpus: + model = {'name': c['name']} + e = self.vm.cmd('query-cpu-model-expansion', model=model, + type='full') + self.assertEqual(e['model']['name'], c['name']) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_empty_cpu_model.py b/tests/functional/test_empty_cpu_model.py new file mode 100755 index 00000000000..0081b06d85a --- /dev/null +++ b/tests/functional/test_empty_cpu_model.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +# +# Check for crash when using empty -cpu option +# +# Copyright (c) 2019 Red Hat, Inc. +# +# Author: +# Eduardo Habkost +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. +from qemu_test import QemuSystemTest + +class EmptyCPUModel(QemuSystemTest): + def test(self): + self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_hppa_seabios.py b/tests/functional/test_hppa_seabios.py new file mode 100755 index 00000000000..661b2464e13 --- /dev/null +++ b/tests/functional/test_hppa_seabios.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# SeaBIOS boot test for HPPA machines +# +# Copyright (c) 2024 Linaro, Ltd +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest +from qemu_test import wait_for_console_pattern + +class HppaSeabios(QemuSystemTest): + + timeout = 5 + MACH_BITS = {'B160L': 32, 'C3700': 64} + + def boot_seabios(self): + mach = self.machine + bits = self.MACH_BITS[mach] + self.vm.add_args('-no-shutdown') + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, f'SeaBIOS PA-RISC {bits}-bit Firmware') + wait_for_console_pattern(self, f'Emulated machine: HP {mach} ({bits}-bit') + + def test_hppa_32(self): + self.set_machine('B160L') + self.boot_seabios() + + def test_hppa_64(self): + self.set_machine('C3700') + self.boot_seabios() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_i386_replay.py b/tests/functional/test_i386_replay.py new file mode 100755 index 00000000000..7c4c2602da9 --- /dev/null +++ b/tests/functional/test_i386_replay.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on a i386 machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class I386Replay(ReplayKernelBase): + + ASSET_KERNEL = Asset( + 'https://storage.tuxboot.com/20230331/i386/bzImage', + 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956') + + def test_pc(self): + self.set_machine('pc') + kernel_url = () + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'VFS: Cannot open root device' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_i386_tuxrun.py b/tests/functional/test_i386_tuxrun.py new file mode 100755 index 00000000000..f3ccf11ae8b --- /dev/null +++ b/tests/functional/test_i386_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunI386Test(TuxRunBaselineTest): + + ASSET_I386_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/i386/bzImage', + '47fb44e38e34101eb0f71a2a01742b959d40ed5fd67cefb5608a39be11d3b74e') + ASSET_I386_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/i386/rootfs.ext4.zst', + 'a1a3b3b4c9dccd6475b58db95c107b468b736b700f6620985a8ed050a73d51c8') + + def test_i386(self): + self.set_machine('q35') + self.cpu="coreduo" + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_I386_KERNEL, + rootfs_asset=self.ASSET_I386_ROOTFS, + drive="virtio-blk-pci") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_info_usernet.py b/tests/functional/test_info_usernet.py new file mode 100755 index 00000000000..e8cbc37eed6 --- /dev/null +++ b/tests/functional/test_info_usernet.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# +# Test for the hmp command "info usernet" +# +# Copyright (c) 2021 Red Hat, Inc. +# +# Author: +# Cleber Rosa +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest +from qemu_test.utils import get_usernet_hostfwd_port + + +class InfoUsernet(QemuSystemTest): + + def test_hostfwd(self): + self.require_netdev('user') + self.set_machine('none') + self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22') + self.vm.launch() + + port = get_usernet_hostfwd_port(self.vm) + self.assertIsNotNone(port, + ('"info usernet" output content does not seem to ' + 'contain the redirected port')) + self.assertGreater(port, 0, + ('Found a redirected port that is not greater than' + ' zero')) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_intel_iommu.py b/tests/functional/test_intel_iommu.py new file mode 100755 index 00000000000..62268d6f278 --- /dev/null +++ b/tests/functional/test_intel_iommu.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +# +# INTEL_IOMMU Functional tests +# +# Copyright (c) 2021 Red Hat, Inc. +# +# Author: +# Eric Auger +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern + + +class IntelIOMMU(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/initrd.img'), + '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b') + + ASSET_DISKIMAGE = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2'), + 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0') + + DEFAULT_KERNEL_PARAMS = ('root=/dev/vda1 console=ttyS0 net.ifnames=0 ' + 'quiet rd.rescue ') + GUEST_PORT = 8080 + IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' + kernel_path = None + initrd_path = None + kernel_params = None + + def add_common_args(self, path): + self.vm.add_args('-drive', f'file={path},if=none,id=drv0,snapshot=on') + self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' + + 'drive=drv0,id=virtio-disk0,bootindex=1,' + 'werror=stop,rerror=stop' + self.IOMMU_ADDON) + self.vm.add_args('-device', 'virtio-gpu-pci' + self.IOMMU_ADDON) + + self.vm.add_args('-netdev', + 'user,id=n1,hostfwd=tcp:127.0.0.1:0-:%d' % + self.GUEST_PORT) + self.vm.add_args('-device', + 'virtio-net-pci,netdev=n1' + self.IOMMU_ADDON) + + self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') + self.vm.add_args('-object', + 'rng-random,id=rng0,filename=/dev/urandom') + self.vm.add_args("-m", "1G") + self.vm.add_args("-accel", "kvm") + + def common_vm_setup(self): + self.set_machine('q35') + self.require_accelerator("kvm") + self.require_netdev('user') + + self.kernel_path = self.ASSET_KERNEL.fetch() + self.initrd_path = self.ASSET_INITRD.fetch() + image_path = self.ASSET_DISKIMAGE.fetch() + self.add_common_args(image_path) + self.kernel_params = self.DEFAULT_KERNEL_PARAMS + + def run_and_check(self): + if self.kernel_path: + self.vm.add_args('-kernel', self.kernel_path, + '-append', self.kernel_params, + '-initrd', self.initrd_path) + self.vm.set_console() + self.vm.launch() + self.wait_for_console_pattern('Entering emergency mode.') + prompt = '# ' + self.wait_for_console_pattern(prompt) + + # Copy a file (checked later), umount afterwards to drop disk cache: + exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot', + prompt) + filename = '/boot/initramfs-5.3.7-301.fc31.x86_64.img' + exec_command_and_wait_for_pattern(self, (f'cp /sysroot{filename}' + ' /sysroot/root/data'), + prompt) + exec_command_and_wait_for_pattern(self, 'umount /sysroot', prompt) + + # Switch from initrd to the cloud image filesystem: + exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot', + prompt) + exec_command_and_wait_for_pattern(self, + ('for d in dev proc sys run ; do ' + 'mount -o bind /$d /sysroot/$d ; done'), prompt) + exec_command_and_wait_for_pattern(self, 'chroot /sysroot', prompt) + + # Checking for IOMMU enablement: + self.log.info("Checking whether IOMMU has been enabled...") + exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline', + 'intel_iommu=on') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, 'dmesg | grep DMAR:', + 'IOMMU enabled') + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, + 'find /sys/kernel/iommu_groups/ -type l', + 'devices/0000:00:') + self.wait_for_console_pattern(prompt) + + # Check hard disk device via sha256sum: + self.log.info("Checking hard disk...") + hashsum = '0dc7472f879be70b2f3daae279e3ae47175ffe249691e7d97f47222b65b8a720' + exec_command_and_wait_for_pattern(self, 'sha256sum ' + filename, + hashsum) + self.wait_for_console_pattern(prompt) + exec_command_and_wait_for_pattern(self, 'sha256sum /root/data', + hashsum) + self.wait_for_console_pattern(prompt) + + # Check virtio-net via HTTP: + exec_command_and_wait_for_pattern(self, 'dhclient eth0', prompt) + self.check_http_download(filename, hashsum, self.GUEST_PORT) + + def test_intel_iommu(self): + self.common_vm_setup() + self.vm.add_args('-device', 'intel-iommu,intremap=on') + self.vm.add_args('-machine', 'kernel_irqchip=split') + self.kernel_params += 'intel_iommu=on' + self.run_and_check() + + def test_intel_iommu_strict(self): + self.common_vm_setup() + self.vm.add_args('-device', 'intel-iommu,intremap=on') + self.vm.add_args('-machine', 'kernel_irqchip=split') + self.kernel_params += 'intel_iommu=on,strict' + self.run_and_check() + + def test_intel_iommu_strict_cm(self): + self.common_vm_setup() + self.vm.add_args('-device', 'intel-iommu,intremap=on,caching-mode=on') + self.vm.add_args('-machine', 'kernel_irqchip=split') + self.kernel_params += 'intel_iommu=on,strict' + self.run_and_check() + + def test_intel_iommu_pt(self): + self.common_vm_setup() + self.vm.add_args('-device', 'intel-iommu,intremap=on') + self.vm.add_args('-machine', 'kernel_irqchip=split') + self.kernel_params += 'intel_iommu=on iommu=pt' + self.run_and_check() + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_linux_initrd.py b/tests/functional/test_linux_initrd.py new file mode 100755 index 00000000000..2207f83fbfd --- /dev/null +++ b/tests/functional/test_linux_initrd.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# +# Linux initrd integration test. +# +# Copyright (c) 2018 Red Hat, Inc. +# +# Author: +# Wainer dos Santos Moschetta +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import logging +import tempfile + +from qemu_test import QemuSystemTest, Asset, skipFlakyTest + + +class LinuxInitrd(QemuSystemTest): + """ + Checks QEMU evaluates correctly the initrd file passed as -initrd option. + """ + + timeout = 300 + + ASSET_F18_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz'), + '1a27cb42559ce29237ac186699d063556ad69c8349d732bb1bd8d614e5a8cc2e') + + ASSET_F28_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' + 'releases/28/Everything/x86_64/os/images/pxeboot/vmlinuz'), + 'd05909c9d4a742a6fcc84dcc0361009e4611769619cc187a07107579a035f24e') + + def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self): + """ + Pretends to boot QEMU with an initrd file with size of 2GiB + and expect it exits with error message. + Fedora-18 shipped with linux-3.6 which have not supported xloadflags + cannot support more than 2GiB initrd. + """ + self.set_machine('pc') + kernel_path = self.ASSET_F18_KERNEL.fetch() + max_size = 2 * (1024 ** 3) - 1 + + with tempfile.NamedTemporaryFile() as initrd: + initrd.seek(max_size) + initrd.write(b'\0') + initrd.flush() + self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name, + '-m', '4096') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1) + expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % ( + max_size + 1) + self.assertRegex(self.vm.get_log(), expected_msg) + + # XXX file tracking bug + @skipFlakyTest(bug_url=None) + def test_with_2gib_file_should_work_with_linux_v4_16(self): + """ + QEMU has supported up to 4 GiB initrd for recent kernel + Expect guest can reach 'Unpacking initramfs...' + """ + self.set_machine('pc') + kernel_path = self.ASSET_F28_KERNEL.fetch() + max_size = 2 * (1024 ** 3) + 1 + + with tempfile.NamedTemporaryFile() as initrd: + initrd.seek(max_size) + initrd.write(b'\0') + initrd.flush() + + self.vm.set_console() + kernel_command_line = 'console=ttyS0' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-initrd', initrd.name, + '-m', '5120') + self.vm.launch() + console = self.vm.console_socket.makefile() + console_logger = logging.getLogger('console') + while True: + msg = console.readline() + console_logger.debug(msg.strip()) + if 'Unpacking initramfs...' in msg: + break + if 'Kernel panic - not syncing' in msg: + self.fail("Kernel panic reached") + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_loongarch64_virt.py b/tests/functional/test_loongarch64_virt.py new file mode 100755 index 00000000000..b7d9abf933f --- /dev/null +++ b/tests/functional/test_loongarch64_virt.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# LoongArch virt test. +# +# Copyright (c) 2023 Loongson Technology Corporation Limited +# + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + +class LoongArchMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + timeout = 120 + + ASSET_KERNEL = Asset( + ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/2024-11-26/vmlinuz.efi'), + '08b88a45f48a5fd92260bae895be4e5175be2397481a6f7821b9f39b2965b79e') + ASSET_INITRD = Asset( + ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/2024-11-26/ramdisk'), + '03d6fb6f8ee64ecac961120a0bdacf741f17b3bee2141f17fa01908c8baf176a') + ASSET_BIOS = Asset( + ('https://github.com/yangxiaojuan-loongson/qemu-binary/' + 'releases/download/2024-11-26/QEMU_EFI.fd'), + 'f55fbf5d92e885844631ae9bfa8887f659bbb4f6ef2beea9e9ff8bc0603b6697') + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def test_loongarch64_devices(self): + + self.set_machine('virt') + + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + bios_path = self.ASSET_BIOS.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200') + self.vm.add_args('-nographic', + '-smp', '4', + '-m', '1024', + '-cpu', 'la464', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-bios', bios_path, + '-append', kernel_command_line) + self.vm.launch() + self.wait_for_console_pattern('Run /sbin/init as init process') + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'processor : 3') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_m68k_mcf5208evb.py b/tests/functional/test_m68k_mcf5208evb.py new file mode 100755 index 00000000000..c7d19989334 --- /dev/null +++ b/tests/functional/test_m68k_mcf5208evb.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an MCF5208EVB machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class Mcf5208EvbTest(LinuxKernelTest): + + ASSET_DAY07 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day07.tar.xz', + '753c2f3837126b7c6ba92d0b1e0b156e8a2c5131d2d576bb0b9a763fae73c08a') + + def test_m68k_mcf5208evb(self): + self.set_machine('mcf5208evb') + self.archive_extract(self.ASSET_DAY07) + self.vm.set_console() + self.vm.add_args('-kernel', + self.scratch_file('day07', 'sanity-clause.elf')) + self.vm.launch() + self.wait_for_console_pattern('QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_m68k_nextcube.py b/tests/functional/test_m68k_nextcube.py new file mode 100755 index 00000000000..13c72bd136a --- /dev/null +++ b/tests/functional/test_m68k_nextcube.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a VM and run OCR on the framebuffer +# +# Copyright (c) 2019 Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import time + +from qemu_test import QemuSystemTest, Asset +from qemu_test import skipIfMissingImports, skipIfMissingCommands +from qemu_test.tesseract import tesseract_ocr + + +class NextCubeMachine(QemuSystemTest): + + timeout = 15 + + ASSET_ROM = Asset(('https://sourceforge.net/p/previous/code/1350/tree/' + 'trunk/src/Rev_2.5_v66.BIN?format=raw'), + '1b753890b67095b73e104c939ddf62eca9e7d0aedde5108e3893b0ed9d8000a4') + + def check_bootrom_framebuffer(self, screenshot_path): + rom_path = self.ASSET_ROM.fetch() + + self.vm.add_args('-bios', rom_path) + self.vm.launch() + + self.log.info('VM launched, waiting for display') + # TODO: wait for the 'displaysurface_create 1120x832' trace-event. + time.sleep(2) + + res = self.vm.cmd('human-monitor-command', + command_line='screendump %s' % screenshot_path) + if 'unknown command' in res: + self.skipTest('screendump not available') + + @skipIfMissingImports("PIL") + def test_bootrom_framebuffer_size(self): + self.set_machine('next-cube') + screenshot_path = self.scratch_file("dump.ppm") + self.check_bootrom_framebuffer(screenshot_path) + + from PIL import Image + width, height = Image.open(screenshot_path).size + self.assertEqual(width, 1120) + self.assertEqual(height, 832) + + @skipIfMissingCommands('tesseract') + def test_bootrom_framebuffer_ocr_with_tesseract(self): + self.set_machine('next-cube') + screenshot_path = self.scratch_file("dump.ppm") + self.check_bootrom_framebuffer(screenshot_path) + lines = tesseract_ocr(screenshot_path) + text = '\n'.join(lines) + self.assertIn('Testing the FPU', text) + self.assertIn('System test failed. Error code', text) + self.assertIn('Boot command', text) + self.assertIn('Next>', text) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_m68k_q800.py b/tests/functional/test_m68k_q800.py new file mode 100755 index 00000000000..b3e655346c4 --- /dev/null +++ b/tests/functional/test_m68k_q800.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Functional test for testing the q800 m68k machine +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import LinuxKernelTest, Asset + +class Q800MachineTest(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://snapshot.debian.org/' + 'archive/debian-ports/20191021T083923Z/pool-m68k/main/l/linux/' + 'kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb'), + '949e50d74d4b9bc15d26c06d402717b7a4c0e32ff8100014f5930d8024de7b73') + + def test_m68k_q800(self): + self.set_machine('q800') + + kernel_path = self.archive_extract(self.ASSET_KERNEL, + member='boot/vmlinux-5.3.0-1-m68k') + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 vga=off') + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-audio', 'none') + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + console_pattern = 'No filesystem could mount root' + self.wait_for_console_pattern(console_pattern) + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_m68k_replay.py b/tests/functional/test_m68k_replay.py new file mode 100755 index 00000000000..213d6ae07e0 --- /dev/null +++ b/tests/functional/test_m68k_replay.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an m68k machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class M68kReplay(ReplayKernelBase): + + ASSET_Q800 = Asset( + ('https://snapshot.debian.org/' + 'archive/debian-ports/20191021T083923Z/pool-m68k/main/l/linux/' + 'kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb'), + '949e50d74d4b9bc15d26c06d402717b7a4c0e32ff8100014f5930d8024de7b73') + + def test_q800(self): + self.set_machine('q800') + kernel_path = self.archive_extract(self.ASSET_Q800, + member='boot/vmlinux-5.3.0-1-m68k') + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 vga=off') + console_pattern = 'No filesystem could mount root' + self.run_rr(kernel_path, kernel_command_line, console_pattern, + args=('-audio', 'none')) + + ASSET_MCF5208 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day07.tar.xz', + '753c2f3837126b7c6ba92d0b1e0b156e8a2c5131d2d576bb0b9a763fae73c08a') + + def test_mcf5208evb(self): + self.set_machine('mcf5208evb') + kernel_path = self.archive_extract(self.ASSET_MCF5208, + member='day07/sanity-clause.elf') + self.run_rr(kernel_path, self.KERNEL_COMMON_COMMAND_LINE, + 'QEMU advent calendar') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_m68k_tuxrun.py b/tests/functional/test_m68k_tuxrun.py new file mode 100755 index 00000000000..7eacba135f6 --- /dev/null +++ b/tests/functional/test_m68k_tuxrun.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2024 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunM68KTest(TuxRunBaselineTest): + + ASSET_M68K_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/m68k/vmlinux', + '7754e1d5cec753ccf1dc6894729a7f54c1a4965631ebf56df8e4ce1163ad19d8') + ASSET_M68K_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/m68k/rootfs.ext4.zst', + '557962ffff265607912e82232cf21adbe0e4e5a88e1e1d411ce848c37f0213e9') + + def test_m68k(self): + self.set_machine('virt') + self.cpu="m68040" + self.common_tuxrun(kernel_asset=self.ASSET_M68K_KERNEL, + rootfs_asset=self.ASSET_M68K_ROOTFS, + drive="virtio-blk-device") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_mem_addr_space.py b/tests/functional/test_mem_addr_space.py new file mode 100755 index 00000000000..61b4a190b41 --- /dev/null +++ b/tests/functional/test_mem_addr_space.py @@ -0,0 +1,349 @@ +#!/usr/bin/env python3 +# +# Check for crash when using memory beyond the available guest processor +# address space. +# +# Copyright (c) 2023 Red Hat, Inc. +# +# Author: +# Ani Sinha +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest +import time + +class MemAddrCheck(QemuSystemTest): + # after launch, in order to generate the logs from QEMU we need to + # wait for some time. Launching and then immediately shutting down + # the VM generates empty logs. A delay of 1 second is added for + # this reason. + DELAY_Q35_BOOT_SEQUENCE = 1 + + # This helper can go away when the 32-bit host deprecation + # turns into full & final removal of support. + def ensure_64bit_binary(self): + with open(self.qemu_bin, "rb") as fh: + ident = fh.read(4) + + # "\x7fELF" + if ident != bytes([0x7f, 0x45, 0x4C, 0x46]): + # Non-ELF file implies macOS or Windows which + # we already assume to be 64-bit only + return + + # bits == 1 -> 32-bit; bits == 2 -> 64-bit + bits = int.from_bytes(fh.read(1), byteorder='little') + if bits != 2: + # 32-bit ELF builds won't be able to address sufficient + # RAM to run the tests + self.skipTest("64-bit build host is required") + + # first, lets test some 32-bit processors. + # for all 32-bit cases, pci64_hole_size is 0. + def test_phybits_low_pse36(self): + """ + With pse36 feature ON, a processor has 36 bits of addressing. So it can + access up to a maximum of 64GiB of memory. Memory hotplug region begins + at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when + we have 0.5 GiB of VM memory, see pc_q35_init()). This means total + hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory + for dimm alignment for all machines. That leaves total hotpluggable + actual memory size of 59 GiB. If the VM is started with 0.5 GiB of + memory, maxmem should be set to a maximum value of 59.5 GiB to ensure + that the processor can address all memory directly. + Note that 64-bit pci hole size is 0 in this case. If maxmem is set to + 59.6G, QEMU should fail to start with a message "phy-bits are too low". + If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU + should start fine. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=59.6G', + '-cpu', 'pentium,pse36=on', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_low_pae(self): + """ + With pae feature ON, a processor has 36 bits of addressing. So it can + access up to a maximum of 64GiB of memory. Rest is the same as the case + with pse36 above. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=59.6G', + '-cpu', 'pentium,pae=on', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_pentium_pse36(self): + """ + Setting maxmem to 59.5G and making sure that QEMU can start with the + same options as the failing case above with pse36 cpu feature. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-m', '512,slots=1,maxmem=59.5G', + '-cpu', 'pentium,pse36=on', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_pentium_pae(self): + """ + Test is same as above but now with pae cpu feature turned on. + Setting maxmem to 59.5G and making sure that QEMU can start fine + with the same options as the case above. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-m', '512,slots=1,maxmem=59.5G', + '-cpu', 'pentium,pae=on', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_pentium2(self): + """ + Pentium2 has 36 bits of addressing, so its same as pentium + with pse36 ON. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-m', '512,slots=1,maxmem=59.5G', + '-cpu', 'pentium2', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_low_nonpse36(self): + """ + Pentium processor has 32 bits of addressing without pse36 or pae + so it can access physical address up to 4 GiB. Setting maxmem to + 4 GiB should make QEMU fail to start with "phys-bits too low" + message because the region for memory hotplug is always placed + above 4 GiB due to the PCI hole and simplicity. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=4G', + '-cpu', 'pentium', '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + # now lets test some 64-bit CPU cases. + def test_phybits_low_tcg_q35_70_amd(self): + """ + For q35 7.1 machines and above, there is a HT window that starts at + 1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range, + "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus + in the default case. Lets test without that case for machines 7.0. + For q35-7.0 machines, "above 4G" memory starts are 4G. + pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to + be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of + directly addressable memory. + Hence, maxmem value at most can be + 1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB + which is equal to 987.5 GiB. Setting the value to 988 GiB should + make QEMU fail with the error message. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.0') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=988G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_low_tcg_q35_71_amd(self): + """ + AMD_HT_START is defined to be at 1012 GiB. So for q35 machines + version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit + processor address space, it has to be 1012 GiB , that is 12 GiB + less than the case above in order to accommodate HT hole. + Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less + than 988 GiB). + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.1') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=976G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_tcg_q35_70_amd(self): + """ + Same as q35-7.0 AMD case except that here we check that QEMU can + successfully start when maxmem is < 988G. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.0') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=987.5G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_tcg_q35_71_amd(self): + """ + Same as q35-7.1 AMD case except that here we check that QEMU can + successfully start when maxmem is < 976G. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.1') + self.vm.add_args('-S', '-m', '512,slots=1,maxmem=975.5G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_tcg_q35_71_intel(self): + """ + Same parameters as test_phybits_low_tcg_q35_71_amd() but use + Intel cpu instead. QEMU should start fine in this case as + "above_4G" memory starts at 4G. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.1') + self.vm.add_args('-S', '-cpu', 'Skylake-Server', + '-m', '512,slots=1,maxmem=976G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_low_tcg_q35_71_amd_41bits(self): + """ + AMD processor with 41 bits. Max cpu hw address = 2 TiB. + By setting maxram above 1012 GiB - 32 GiB - 4 GiB = 976 GiB, we can + force "above_4G" memory to start at 1 TiB for q35-7.1 machines + (max GPA will be above AMD_HT_START which is defined as 1012 GiB). + + With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5 + GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug + memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should + fail to start. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.1') + self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41', + '-m', '512,slots=1,maxmem=992G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_tcg_q35_71_amd_41bits(self): + """ + AMD processor with 41 bits. Max cpu hw address = 2 TiB. + Same as above but by setting maxram between 976 GiB and 992 Gib, + QEMU should start fine. + """ + self.ensure_64bit_binary() + self.set_machine('pc-q35-7.1') + self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41', + '-m', '512,slots=1,maxmem=990G', + '-display', 'none', + '-object', 'memory-backend-ram,id=mem1,size=1G', + '-device', 'pc-dimm,id=vm0,memdev=mem1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_low_tcg_q35_intel_cxl(self): + """ + cxl memory window starts after memory device range. Here, we use 1 GiB + of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and + starts after the cxl memory window. + So maxmem here should be at most 986 GiB considering all memory boundary + alignment constraints with 40 bits (1 TiB) of processor physical bits. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40', + '-m', '512,slots=1,maxmem=987G', + '-display', 'none', + '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1', + '-M', 'cxl=on,cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + self.vm.wait() + self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1") + self.assertRegex(self.vm.get_log(), r'phys-bits too low') + + def test_phybits_ok_tcg_q35_intel_cxl(self): + """ + Same as above but here we do not reserve any cxl memory window. Hence, + with the exact same parameters as above, QEMU should start fine even + with cxl enabled. + """ + self.ensure_64bit_binary() + self.set_machine('q35') + self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40', + '-machine', 'cxl=on', + '-m', '512,slots=1,maxmem=987G', + '-display', 'none', + '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1') + self.vm.set_qmp_monitor(enabled=False) + self.vm.launch() + time.sleep(self.DELAY_Q35_BOOT_SEQUENCE) + self.vm.shutdown() + self.assertNotRegex(self.vm.get_log(), r'phys-bits too low') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_memlock.py b/tests/functional/test_memlock.py new file mode 100755 index 00000000000..2b515ff979f --- /dev/null +++ b/tests/functional/test_memlock.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# +# Functional test that check overcommit memlock options +# +# Copyright (c) Yandex Technologies LLC, 2025 +# +# Author: +# Alexandr Moshkov +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import re + +from typing import Dict + +from qemu_test import QemuSystemTest +from qemu_test import skipLockedMemoryTest + + +STATUS_VALUE_PATTERN = re.compile(r'^(\w+):\s+(\d+) kB', re.MULTILINE) + + +@skipLockedMemoryTest(2_097_152) # 2GB +class MemlockTest(QemuSystemTest): + """ + Runs a guest with memlock options. + Then verify, that this options is working correctly + by checking the status file of the QEMU process. + """ + + def common_vm_setup_with_memlock(self, memlock): + self.vm.add_args('-overcommit', f'mem-lock={memlock}') + self.vm.launch() + + def test_memlock_off(self): + self.common_vm_setup_with_memlock('off') + + status = self.get_process_status_values(self.vm.get_pid()) + + self.assertTrue(status['VmLck'] == 0) + + def test_memlock_on(self): + self.common_vm_setup_with_memlock('on') + + status = self.get_process_status_values(self.vm.get_pid()) + + # VmLck > 0 kB and almost all memory is resident + self.assertTrue(status['VmLck'] > 0) + self.assertTrue(status['VmRSS'] >= status['VmSize'] * 0.70) + + def test_memlock_onfault(self): + self.common_vm_setup_with_memlock('on-fault') + + status = self.get_process_status_values(self.vm.get_pid()) + + # VmLck > 0 kB and only few memory is resident + self.assertTrue(status['VmLck'] > 0) + self.assertTrue(status['VmRSS'] <= status['VmSize'] * 0.30) + + def get_process_status_values(self, pid: int) -> Dict[str, int]: + result = {} + raw_status = self._get_raw_process_status(pid) + + for line in raw_status.split('\n'): + if m := STATUS_VALUE_PATTERN.match(line): + result[m.group(1)] = int(m.group(2)) + + return result + + def _get_raw_process_status(self, pid: int) -> str: + try: + with open(f'/proc/{pid}/status', 'r') as f: + return f.read() + except FileNotFoundError: + self.skipTest("Can't open status file of the process") + + +if __name__ == '__main__': + MemlockTest.main() diff --git a/tests/functional/test_microblaze_replay.py b/tests/functional/test_microblaze_replay.py new file mode 100755 index 00000000000..7484c4186f3 --- /dev/null +++ b/tests/functional/test_microblaze_replay.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an microblaze machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class MicroblazeReplay(ReplayKernelBase): + + ASSET_DAY17 = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day17.tar.xz'), + '3ba7439dfbea7af4876662c97f8e1f0cdad9231fc166e4861d17042489270057') + + def test_microblaze_s3adsp1800(self): + self.set_machine('petalogix-s3adsp1800') + kernel_path = self.archive_extract(self.ASSET_DAY17, + member='day17/ballerina.bin') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_microblaze_s3adsp1800.py b/tests/functional/test_microblaze_s3adsp1800.py new file mode 100755 index 00000000000..f093b162c0a --- /dev/null +++ b/tests/functional/test_microblaze_s3adsp1800.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a microblaze Linux kernel and checks the console +# +# Copyright (c) 2018, 2021 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class MicroblazeMachine(QemuSystemTest): + + timeout = 90 + + ASSET_IMAGE_BE = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day17.tar.xz'), + '3ba7439dfbea7af4876662c97f8e1f0cdad9231fc166e4861d17042489270057') + + ASSET_IMAGE_LE = Asset( + ('http://www.qemu-advent-calendar.org/2023/download/day13.tar.gz'), + 'b9b3d43c5dd79db88ada495cc6e0d1f591153fe41355e925d791fbf44de50c22') + + def do_ballerina_be_test(self, force_endianness=False): + self.set_machine('petalogix-s3adsp1800') + self.archive_extract(self.ASSET_IMAGE_BE) + self.vm.set_console() + self.vm.add_args('-kernel', + self.scratch_file('day17', 'ballerina.bin')) + if force_endianness: + self.vm.add_args('-M', 'endianness=big') + self.vm.launch() + wait_for_console_pattern(self, 'This architecture does not have ' + 'kernel memory protection') + # Note: + # The kernel sometimes gets stuck after the "This architecture ..." + # message, that's why we don't test for a later string here. This + # needs some investigation by a microblaze wizard one day... + + def do_xmaton_le_test(self, force_endianness=False): + self.require_netdev('user') + self.set_machine('petalogix-s3adsp1800') + self.archive_extract(self.ASSET_IMAGE_LE) + self.vm.set_console() + self.vm.add_args('-kernel', self.scratch_file('day13', 'xmaton.bin')) + if force_endianness: + self.vm.add_args('-M', 'endianness=little') + tftproot = self.scratch_file('day13') + self.vm.add_args('-nic', f'user,tftp={tftproot}') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU Advent Calendar 2023') + wait_for_console_pattern(self, 'buildroot login:') + exec_command_and_wait_for_pattern(self, 'root', '#') + exec_command_and_wait_for_pattern(self, + 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png', + '821cd3cab8efd16ad6ee5acc3642a8ea') + + +class MicroblazeBigEndianMachine(MicroblazeMachine): + + ASSET_IMAGE_BE = MicroblazeMachine.ASSET_IMAGE_BE + ASSET_IMAGE_LE = MicroblazeMachine.ASSET_IMAGE_LE + + def test_microblaze_s3adsp1800_legacy_be(self): + self.do_ballerina_be_test() + + def test_microblaze_s3adsp1800_legacy_le(self): + self.do_xmaton_le_test(force_endianness=True) + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_microblazeel_s3adsp1800.py b/tests/functional/test_microblazeel_s3adsp1800.py new file mode 100755 index 00000000000..915902d48bd --- /dev/null +++ b/tests/functional/test_microblazeel_s3adsp1800.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a microblaze Linux kernel and checks the console +# +# Copyright (c) 2018, 2021 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from test_microblaze_s3adsp1800 import MicroblazeMachine + + +class MicroblazeLittleEndianMachine(MicroblazeMachine): + + ASSET_IMAGE_LE = MicroblazeMachine.ASSET_IMAGE_LE + ASSET_IMAGE_BE = MicroblazeMachine.ASSET_IMAGE_BE + + def test_microblaze_s3adsp1800_legacy_le(self): + self.do_xmaton_le_test() + + def test_microblaze_s3adsp1800_legacy_be(self): + self.do_ballerina_be_test(force_endianness=True) + + +if __name__ == '__main__': + MicroblazeMachine.main() diff --git a/tests/functional/test_migration.py b/tests/functional/test_migration.py new file mode 100755 index 00000000000..c4393c35434 --- /dev/null +++ b/tests/functional/test_migration.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +# +# Migration test +# +# Copyright (c) 2019 Red Hat, Inc. +# +# Authors: +# Cleber Rosa +# Caio Carrara +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import tempfile +import time + +from qemu_test import QemuSystemTest, skipIfMissingCommands +from qemu_test.ports import Ports + + +class MigrationTest(QemuSystemTest): + + timeout = 10 + + @staticmethod + def migration_finished(vm): + return vm.cmd('query-migrate')['status'] in ('completed', 'failed') + + def assert_migration(self, src_vm, dst_vm): + + end = time.monotonic() + self.timeout + while time.monotonic() < end and not self.migration_finished(src_vm): + time.sleep(0.1) + + end = time.monotonic() + self.timeout + while time.monotonic() < end and not self.migration_finished(dst_vm): + time.sleep(0.1) + + self.assertEqual(src_vm.cmd('query-migrate')['status'], 'completed') + self.assertEqual(dst_vm.cmd('query-migrate')['status'], 'completed') + self.assertEqual(dst_vm.cmd('query-status')['status'], 'running') + self.assertEqual(src_vm.cmd('query-status')['status'],'postmigrate') + + def select_machine(self): + target_machine = { + 'aarch64': 'quanta-gsj', + 'alpha': 'clipper', + 'arm': 'npcm750-evb', + 'i386': 'isapc', + 'ppc': 'sam460ex', + 'ppc64': 'mac99', + 'riscv32': 'spike', + 'riscv64': 'virt', + 'sparc': 'SS-4', + 'sparc64': 'sun4u', + 'x86_64': 'microvm', + } + self.set_machine(target_machine[self.arch]) + + def do_migrate(self, dest_uri, src_uri=None): + self.select_machine() + dest_vm = self.get_vm('-incoming', dest_uri, name="dest-qemu") + dest_vm.add_args('-nodefaults') + dest_vm.launch() + if src_uri is None: + src_uri = dest_uri + source_vm = self.get_vm(name="source-qemu") + source_vm.add_args('-nodefaults') + source_vm.launch() + source_vm.qmp('migrate', uri=src_uri) + self.assert_migration(source_vm, dest_vm) + + def _get_free_port(self, ports): + port = ports.find_free_port() + if port is None: + self.skipTest('Failed to find a free port') + return port + + def test_migration_with_tcp_localhost(self): + with Ports() as ports: + dest_uri = 'tcp:localhost:%u' % self._get_free_port(ports) + self.do_migrate(dest_uri) + + def test_migration_with_unix(self): + with tempfile.TemporaryDirectory(prefix='socket_') as socket_path: + dest_uri = 'unix:%s/qemu-test.sock' % socket_path + self.do_migrate(dest_uri) + + @skipIfMissingCommands('ncat') + def test_migration_with_exec(self): + with Ports() as ports: + free_port = self._get_free_port(ports) + dest_uri = 'exec:ncat -l localhost %u' % free_port + src_uri = 'exec:ncat localhost %u' % free_port + self.do_migrate(dest_uri, src_uri) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_mips64_malta.py b/tests/functional/test_mips64_malta.py new file mode 100755 index 00000000000..53c3e0c1221 --- /dev/null +++ b/tests/functional/test_mips64_malta.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional tests for the big-endian 64-bit MIPS Malta board +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from test_mips_malta import mips_check_wheezy + + +class MaltaMachineConsole(LinuxKernelTest): + + ASSET_WHEEZY_KERNEL = Asset( + ('https://people.debian.org/~aurel32/qemu/mips/' + 'vmlinux-3.2.0-4-5kc-malta'), + '3e4ec154db080b3f1839f04dde83120654a33e5e1716863de576c47cb94f68f6') + + ASSET_WHEEZY_DISK = Asset( + ('https://people.debian.org/~aurel32/qemu/mips/' + 'debian_wheezy_mips_standard.qcow2'), + 'de03599285b8382ad309309a6c4869f6c6c42a5cfc983342bab9ec0dfa7849a2') + + def test_wheezy(self): + kernel_path = self.ASSET_WHEEZY_KERNEL.fetch() + image_path = self.ASSET_WHEEZY_DISK.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 root=/dev/sda1') + mips_check_wheezy(self, + kernel_path, image_path, kernel_command_line, cpuinfo='MIPS 20Kc', + dl_file='/boot/initrd.img-3.2.0-4-5kc-malta', + hsum='d98b953bb4a41c0fc0fd8d19bbc691c08989ac52568c1d3054d92dfd890d3f06') + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_mips64_tuxrun.py b/tests/functional/test_mips64_tuxrun.py new file mode 100755 index 00000000000..0e4c65961d2 --- /dev/null +++ b/tests/functional/test_mips64_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunMips64Test(TuxRunBaselineTest): + + ASSET_MIPS64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips64/vmlinux', + 'fe2882d216898ba2c56b49ba59f46ad392f36871f7fe325373cd926848b9dbdc') + ASSET_MIPS64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips64/rootfs.ext4.zst', + 'b8c98400216b6d4fb3b3ff05e9929aa015948b596cf0b82234813c84a4f7f4d5') + + def test_mips64(self): + self.set_machine('malta') + self.root="sda" + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_MIPS64_KERNEL, + rootfs_asset=self.ASSET_MIPS64_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_mips64el_fuloong2e.py b/tests/functional/test_mips64el_fuloong2e.py new file mode 100755 index 00000000000..35e500b0221 --- /dev/null +++ b/tests/functional/test_mips64el_fuloong2e.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# +# Functional tests for the Lemote Fuloong-2E machine. +# +# Copyright (c) 2019 Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import subprocess + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import wait_for_console_pattern, skipUntrustedTest +from unittest import skipUnless + +class MipsFuloong2e(LinuxKernelTest): + + timeout = 60 + + ASSET_KERNEL = Asset( + ('http://archive.debian.org/debian/pool/main/l/linux/' + 'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb'), + '2a70f15b397f4ced632b0c15cb22660394190644146d804d60a4796eefbe1f50') + + def test_linux_kernel_3_16(self): + kernel_path = self.archive_extract( + self.ASSET_KERNEL, + member='boot/vmlinux-3.16.0-6-loongson-2e') + + self.set_machine('fuloong2e') + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + + @skipUntrustedTest() + @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available') + def test_linux_kernel_2_6_27_isa_serial(self): + # Recovery system for the Yeeloong laptop + # (enough to test the fuloong2e southbridge, accessing its ISA bus) + # http://dev.lemote.com/files/resource/download/rescue/rescue-yl + sha = 'ab588d3316777c62cc81baa20ac92e98b01955c244dff3794b711bc34e26e51d' + kernel_path = os.getenv('RESCUE_YL_PATH') + output = subprocess.check_output(['sha256sum', kernel_path]) + checksum = output.split()[0] + assert checksum.decode("utf-8") == sha + + self.set_machine('fuloong2e') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path) + self.vm.launch() + wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote') + cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)' + wait_for_console_pattern(self, cpu_revision) + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_mips64el_loongson3v.py b/tests/functional/test_mips64el_loongson3v.py new file mode 100755 index 00000000000..f85371e50cd --- /dev/null +++ b/tests/functional/test_mips64el_loongson3v.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Functional tests for the Generic Loongson-3 Platform. +# +# Copyright (c) 2021 Jiaxun Yang +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern, skipUntrustedTest + + +class MipsLoongson3v(QemuSystemTest): + timeout = 60 + + ASSET_PMON = Asset( + ('https://github.com/loongson-community/pmon/' + 'releases/download/20210112/pmon-3avirt.bin'), + 'fcdf6bb2cb7885a4a62f31fcb0d5e368bac7b6cea28f40c6dfa678af22fea20a') + + @skipUntrustedTest() + def test_pmon_serial_console(self): + self.set_machine('loongson3-virt') + + pmon_path = self.ASSET_PMON.fetch() + + self.vm.set_console() + self.vm.add_args('-bios', pmon_path) + self.vm.launch() + wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_mips64el_malta.py b/tests/functional/test_mips64el_malta.py new file mode 100755 index 00000000000..3cc79b74c18 --- /dev/null +++ b/tests/functional/test_mips64el_malta.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +# +# Functional tests for the little-endian 64-bit MIPS Malta board +# +# Copyright (c) Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import logging + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import skipIfMissingImports, skipFlakyTest, skipUntrustedTest + +from test_mips_malta import mips_check_wheezy + + +class MaltaMachineConsole(LinuxKernelTest): + + ASSET_KERNEL_2_63_2 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20130217T032700Z/pool/main/l/linux-2.6/' + 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb'), + '35eb476f03be589824b0310358f1c447d85e645b88cbcd2ac02b97ef560f9f8d') + + def test_mips64el_malta(self): + """ + This test requires the ar tool to extract "data.tar.gz" from + the Debian package. + + The kernel can be rebuilt using this Debian kernel source [1] and + following the instructions on [2]. + + [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/ + #linux-source-2.6.32_2.6.32-48 + [2] https://kernel-team.pages.debian.net/kernel-handbook/ + ch-common-tasks.html#s-common-official + """ + kernel_path = self.archive_extract( + self.ASSET_KERNEL_2_63_2, + member='boot/vmlinux-2.6.32-5-5kc-malta') + + self.set_machine('malta') + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + + ASSET_KERNEL_3_19_3 = Asset( + ('https://github.com/philmd/qemu-testing-blob/' + 'raw/9ad2df38/mips/malta/mips64el/' + 'vmlinux-3.19.3.mtoman.20150408'), + '8d3beb003bc66051ead98e7172139017fcf9ce2172576541c57e86418dfa5ab8') + + ASSET_CPIO_R1 = Asset( + ('https://github.com/groeck/linux-build-test/' + 'raw/8584a59e/rootfs/mipsel64/' + 'rootfs.mipsel64r1.cpio.gz'), + '75ba10cd35fb44e32948eeb26974f061b703c81c4ba2fab1ebcacf1d1bec3b61') + + @skipUntrustedTest() + def test_mips64el_malta_5KEc_cpio(self): + kernel_path = self.ASSET_KERNEL_3_19_3.fetch() + initrd_path = self.uncompress(self.ASSET_CPIO_R1) + + self.set_machine('malta') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 console=tty ' + + 'rdinit=/sbin/init noreboot') + self.vm.add_args('-cpu', '5KEc', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'MIPS 5KE') + exec_command_and_wait_for_pattern(self, 'uname -a', + '3.19.3.mtoman.20150408') + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + + ASSET_WHEEZY_KERNEL = Asset( + ('https://people.debian.org/~aurel32/qemu/mipsel/' + 'vmlinux-3.2.0-4-5kc-malta'), + '5e8b725244c59745bb8b64f5d8f49f25fecfa549f3395fb6d19a3b9e5065b85b') + + ASSET_WHEEZY_DISK = Asset( + ('https://people.debian.org/~aurel32/qemu/mipsel/' + 'debian_wheezy_mipsel_standard.qcow2'), + '454f09ae39f7e6461c84727b927100d2c7813841f2a0a5dce328114887ecf914') + + def test_wheezy(self): + kernel_path = self.ASSET_WHEEZY_KERNEL.fetch() + image_path = self.ASSET_WHEEZY_DISK.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 root=/dev/sda1') + mips_check_wheezy(self, + kernel_path, image_path, kernel_command_line, cpuinfo='MIPS 20Kc', + dl_file='/boot/initrd.img-3.2.0-4-5kc-malta', + hsum='7579f8b56c1187c7c04d0dc3c0c56c7a6314c5ddd3a9bf8803ecc7cf8a3be9f8') + + +@skipIfMissingImports('numpy', 'cv2') +class MaltaMachineFramebuffer(LinuxKernelTest): + + timeout = 30 + + ASSET_KERNEL_4_7_0 = Asset( + ('https://github.com/philmd/qemu-testing-blob/raw/a5966ca4b5/' + 'mips/malta/mips64el/vmlinux-4.7.0-rc1.I6400.gz'), + '1f64efc59968a3c328672e6b10213fe574bb2308d9d2ed44e75e40be59e9fbc2') + + ASSET_TUXLOGO = Asset( + ('https://github.com/torvalds/linux/raw/v2.6.12/' + 'drivers/video/logo/logo_linux_vga16.ppm'), + 'b762f0d91ec018887ad1b334543c2fdf9be9fdfc87672b409211efaa3ea0ef79') + + def do_test_i6400_framebuffer_logo(self, cpu_cores_count): + """ + Boot Linux kernel and check Tux logo is displayed on the framebuffer. + """ + + import numpy as np + import cv2 + + screendump_path = self.scratch_file('screendump.pbm') + + kernel_path = self.uncompress(self.ASSET_KERNEL_4_7_0) + + tuxlogo_path = self.ASSET_TUXLOGO.fetch() + + self.set_machine('malta') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'clocksource=GIC console=tty0 console=ttyS0') + self.vm.add_args('-kernel', kernel_path, + '-cpu', 'I6400', + '-smp', '%u' % cpu_cores_count, + '-vga', 'std', + '-append', kernel_command_line) + self.vm.launch() + framebuffer_ready = 'Console: switching to colour frame buffer device' + self.wait_for_console_pattern(framebuffer_ready) + self.vm.cmd('human-monitor-command', command_line='stop') + res = self.vm.cmd('human-monitor-command', + command_line='screendump %s' % screendump_path) + if 'unknown command' in res: + self.skipTest('screendump not available') + logger = logging.getLogger('framebuffer') + + match_threshold = 0.95 + screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR) + tuxlogo_bgr = cv2.imread(tuxlogo_path, cv2.IMREAD_COLOR) + result = cv2.matchTemplate(screendump_bgr, tuxlogo_bgr, + cv2.TM_CCOEFF_NORMED) + loc = np.where(result >= match_threshold) + tuxlogo_count = 0 + h, w = tuxlogo_bgr.shape[:2] + debug_png = os.getenv('QEMU_TEST_CV2_SCREENDUMP_PNG_PATH') + for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1): + logger.debug('found Tux at position (x, y) = %s', pt) + cv2.rectangle(screendump_bgr, pt, + (pt[0] + w, pt[1] + h), (0, 0, 255), 2) + if debug_png: + cv2.imwrite(debug_png, screendump_bgr) + self.assertGreaterEqual(tuxlogo_count, cpu_cores_count) + + def test_mips_malta_i6400_framebuffer_logo_1core(self): + self.do_test_i6400_framebuffer_logo(1) + + # XXX file tracking bug + @skipFlakyTest(bug_url=None) + def test_mips_malta_i6400_framebuffer_logo_7cores(self): + self.do_test_i6400_framebuffer_logo(7) + + @skipFlakyTest(bug_url=None) + def test_mips_malta_i6400_framebuffer_logo_8cores(self): + self.do_test_i6400_framebuffer_logo(8) + + +from test_mipsel_malta import MaltaMachineYAMON + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_mips64el_replay.py b/tests/functional/test_mips64el_replay.py new file mode 100755 index 00000000000..26a6ccff3f7 --- /dev/null +++ b/tests/functional/test_mips64el_replay.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# +# Replay tests for the little-endian 64-bit MIPS Malta board +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, skipUntrustedTest +from replay_kernel import ReplayKernelBase + + +class Mips64elReplay(ReplayKernelBase): + + ASSET_KERNEL_2_63_2 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20130217T032700Z/pool/main/l/linux-2.6/' + 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb'), + '35eb476f03be589824b0310358f1c447d85e645b88cbcd2ac02b97ef560f9f8d') + + def test_replay_mips64el_malta(self): + self.set_machine('malta') + kernel_path = self.archive_extract(self.ASSET_KERNEL_2_63_2, + member='boot/vmlinux-2.6.32-5-5kc-malta') + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + + ASSET_KERNEL_3_19_3 = Asset( + ('https://github.com/philmd/qemu-testing-blob/' + 'raw/9ad2df38/mips/malta/mips64el/' + 'vmlinux-3.19.3.mtoman.20150408'), + '8d3beb003bc66051ead98e7172139017fcf9ce2172576541c57e86418dfa5ab8') + + ASSET_CPIO_R1 = Asset( + ('https://github.com/groeck/linux-build-test/' + 'raw/8584a59e/rootfs/mipsel64/' + 'rootfs.mipsel64r1.cpio.gz'), + '75ba10cd35fb44e32948eeb26974f061b703c81c4ba2fab1ebcacf1d1bec3b61') + + @skipUntrustedTest() + def test_replay_mips64el_malta_5KEc_cpio(self): + self.set_machine('malta') + self.cpu = '5KEc' + kernel_path = self.ASSET_KERNEL_3_19_3.fetch() + initrd_path = self.uncompress(self.ASSET_CPIO_R1) + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 console=tty ' + 'rdinit=/sbin/init noreboot') + console_pattern = 'Boot successful.' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, + args=('-initrd', initrd_path)) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_mips64el_tuxrun.py b/tests/functional/test_mips64el_tuxrun.py new file mode 100755 index 00000000000..0a24757c518 --- /dev/null +++ b/tests/functional/test_mips64el_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunMips64ELTest(TuxRunBaselineTest): + + ASSET_MIPS64EL_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips64el/vmlinux', + '0d2829a96f005229839c4cd586d4d8a136ea4b488d29821611c8e97f2266bfa9') + ASSET_MIPS64EL_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips64el/rootfs.ext4.zst', + '69c8b69a4f1582ce4c6f01a994968f5d73bffb2fc99cbeeeb26c8b5a28eaeb84') + + def test_mips64el(self): + self.set_machine('malta') + self.root="sda" + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_MIPS64EL_KERNEL, + rootfs_asset=self.ASSET_MIPS64EL_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_mips_malta.py b/tests/functional/test_mips_malta.py new file mode 100755 index 00000000000..30279f0ff21 --- /dev/null +++ b/tests/functional/test_mips_malta.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# +# Functional tests for the big-endian 32-bit MIPS Malta board +# +# Copyright (c) Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import LinuxKernelTest, Asset, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + + +def mips_run_common_commands(test, prompt='#'): + exec_command_and_wait_for_pattern(test, + 'uname -m', + 'mips') + exec_command_and_wait_for_pattern(test, + 'grep XT-PIC /proc/interrupts', + 'timer') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'grep XT-PIC /proc/interrupts', + 'serial') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'grep XT-PIC /proc/interrupts', + 'ata_piix') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'grep XT-PIC /proc/interrupts', + 'rtc') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'cat /proc/devices', + 'input') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'cat /proc/devices', + 'fb') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'cat /proc/ioports', + ' : serial') + wait_for_console_pattern(test, prompt) + exec_command_and_wait_for_pattern(test, + 'cat /proc/ioports', + ' : ata_piix') + wait_for_console_pattern(test, prompt) + +def mips_check_wheezy(test, kernel_path, image_path, kernel_command_line, + dl_file, hsum, nic='pcnet', cpuinfo='MIPS 24Kc'): + test.require_netdev('user') + test.require_device(nic) + test.set_machine('malta') + + port=8080 + test.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-drive', 'file=%s,snapshot=on' % image_path, + '-netdev', 'user,id=n1' + + ',tftp=' + os.path.basename(kernel_path) + + ',hostfwd=tcp:127.0.0.1:0-:%d' % port, + '-device', f'{nic},netdev=n1', + '-no-reboot') + test.vm.set_console() + test.vm.launch() + + wait_for_console_pattern(test, 'login: ', 'Oops') + exec_command_and_wait_for_pattern(test, 'root', 'Password:') + exec_command_and_wait_for_pattern(test, 'root', ':~# ') + mips_run_common_commands(test) + + exec_command_and_wait_for_pattern(test, 'cd /', '# ') + test.check_http_download(dl_file, hsum, port, + pythoncmd='python -m SimpleHTTPServer') + + exec_command_and_wait_for_pattern(test, 'cat /proc/cpuinfo', cpuinfo) + exec_command_and_wait_for_pattern(test, 'cat /proc/devices', 'usb') + exec_command_and_wait_for_pattern(test, 'cat /proc/ioports', + ' : piix4_smbus') + exec_command_and_wait_for_pattern(test, 'lspci -d 11ab:4620', + 'GT-64120') + exec_command_and_wait_for_pattern(test, + 'cat /sys/bus/i2c/devices/i2c-0/name', + 'SMBus PIIX4 adapter') + exec_command_and_wait_for_pattern(test, 'cat /proc/mtd', 'YAMON') + # Empty 'Board Config' (64KB) + exec_command_and_wait_for_pattern(test, 'md5sum /dev/mtd2ro', + '0dfbe8aa4c20b52e1b8bf3cb6cbdf193') + + +class MaltaMachineConsole(LinuxKernelTest): + + ASSET_KERNEL_2_63_2 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20130217T032700Z/pool/main/l/linux-2.6/' + 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb'), + '16ca524148afb0626f483163e5edf352bc1ab0e4fc7b9f9d473252762f2c7a43') + + def test_mips_malta(self): + kernel_path = self.archive_extract( + self.ASSET_KERNEL_2_63_2, + member='boot/vmlinux-2.6.32-5-4kc-malta') + + self.set_machine('malta') + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + + ASSET_KERNEL_4_5_0 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20160601T041800Z/pool/main/l/linux/' + 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb'), + '526b17d5889840888b76fc2c36a0ebde182c9b1410a3a1e68203c3b160eb2027') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' + 'mips/rootfs.cpio.gz'), + 'dcfe3a7fe3200da3a00d176b95caaa086495eb158f2bff64afc67d7e1eb2cddc') + + def test_mips_malta_cpio(self): + self.require_netdev('user') + self.set_machine('malta') + self.require_device('pcnet') + + kernel_path = self.archive_extract( + self.ASSET_KERNEL_4_5_0, + member='boot/vmlinux-4.5.0-2-4kc-malta') + initrd_path = self.uncompress(self.ASSET_INITRD) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 console=tty ' + + 'rdinit=/sbin/init noreboot') + self.vm.add_args('-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-netdev', 'user,id=n1,tftp=' + self.scratch_file('boot'), + '-device', 'pcnet,netdev=n1', + '-no-reboot') + self.vm.launch() + self.wait_for_console_pattern('Boot successful.') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'BogoMIPS') + exec_command_and_wait_for_pattern(self, 'uname -a', + '4.5.0-2-4kc-malta #1 Debian') + mips_run_common_commands(self) + + exec_command_and_wait_for_pattern(self, 'ip link set eth0 up', + 'eth0: link up') + exec_command_and_wait_for_pattern(self, + 'ip addr add 10.0.2.15 dev eth0', + '#') + exec_command_and_wait_for_pattern(self, 'route add default eth0', '#') + exec_command_and_wait_for_pattern(self, + 'tftp -g -r vmlinux-4.5.0-2-4kc-malta 10.0.2.2', '#') + exec_command_and_wait_for_pattern(self, + 'md5sum vmlinux-4.5.0-2-4kc-malta', + 'a98218a7efbdefb2dfdf9ecd08c98318') + + exec_command_and_wait_for_pattern(self, 'reboot', + 'reboot: Restarting system') + # Wait for VM to shut down gracefully + self.vm.wait() + + ASSET_WHEEZY_KERNEL = Asset( + ('https://people.debian.org/~aurel32/qemu/mips/' + 'vmlinux-3.2.0-4-4kc-malta'), + '0377fcda31299213c10b8e5babe7260ef99188b3ae1aca6f56594abb71e7f67e') + + ASSET_WHEEZY_DISK = Asset( + ('https://people.debian.org/~aurel32/qemu/mips/' + 'debian_wheezy_mips_standard.qcow2'), + 'de03599285b8382ad309309a6c4869f6c6c42a5cfc983342bab9ec0dfa7849a2') + + def test_wheezy(self): + kernel_path = self.ASSET_WHEEZY_KERNEL.fetch() + image_path = self.ASSET_WHEEZY_DISK.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 root=/dev/sda1') + mips_check_wheezy(self, + kernel_path, image_path, kernel_command_line, nic='e1000', + dl_file='/boot/initrd.img-3.2.0-4-4kc-malta', + hsum='ff0c0369143d9bbb9a6e6bc79322a2be535619df639e84103237f406e87493dc') + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_mips_replay.py b/tests/functional/test_mips_replay.py new file mode 100755 index 00000000000..4327481e35b --- /dev/null +++ b/tests/functional/test_mips_replay.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# +# Replay tests for the big-endian 32-bit MIPS Malta board +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, skipSlowTest +from replay_kernel import ReplayKernelBase + + +class MipsReplay(ReplayKernelBase): + + ASSET_KERNEL_2_63_2 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20130217T032700Z/pool/main/l/linux-2.6/' + 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb'), + '16ca524148afb0626f483163e5edf352bc1ab0e4fc7b9f9d473252762f2c7a43') + + def test_replay_mips_malta(self): + self.set_machine('malta') + kernel_path = self.archive_extract(self.ASSET_KERNEL_2_63_2, + member='boot/vmlinux-2.6.32-5-4kc-malta') + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + ASSET_KERNEL_4_5_0 = Asset( + ('http://snapshot.debian.org/archive/debian/' + '20160601T041800Z/pool/main/l/linux/' + 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb'), + '526b17d5889840888b76fc2c36a0ebde182c9b1410a3a1e68203c3b160eb2027') + + ASSET_INITRD = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' + 'mips/rootfs.cpio.gz'), + 'dcfe3a7fe3200da3a00d176b95caaa086495eb158f2bff64afc67d7e1eb2cddc') + + @skipSlowTest() + def test_replay_mips_malta_cpio(self): + self.set_machine('malta') + kernel_path = self.archive_extract(self.ASSET_KERNEL_4_5_0, + member='boot/vmlinux-4.5.0-2-4kc-malta') + initrd_path = self.uncompress(self.ASSET_INITRD) + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 console=tty ' + 'rdinit=/sbin/init noreboot') + console_pattern = 'Boot successful.' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, + args=('-initrd', initrd_path)) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_mips_tuxrun.py b/tests/functional/test_mips_tuxrun.py new file mode 100755 index 00000000000..6771dbd57ea --- /dev/null +++ b/tests/functional/test_mips_tuxrun.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunMipsTest(TuxRunBaselineTest): + + ASSET_MIPS_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips32/vmlinux', + 'b6f97fc698ae8c96456ad8c996c7454228074df0d7520dedd0a15e2913700a19') + ASSET_MIPS_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips32/rootfs.ext4.zst', + '87055cf3cbde3fd134e5039e7b87feb03231d8c4b21ee712b8ba3308dfa72f50') + + def test_mips32(self): + self.set_machine('malta') + self.cpu="mips32r6-generic" + self.root="sda" + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_MIPS_KERNEL, + rootfs_asset=self.ASSET_MIPS_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_mipsel_malta.py b/tests/functional/test_mipsel_malta.py new file mode 100755 index 00000000000..9ee2884da8e --- /dev/null +++ b/tests/functional/test_mipsel_malta.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# +# Functional tests for the little-endian 32-bit MIPS Malta board +# +# Copyright (c) Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or later. +# See the COPYING file in the top-level directory. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, LinuxKernelTest, Asset +from qemu_test import interrupt_interactive_console_until_pattern +from qemu_test import wait_for_console_pattern + +from test_mips_malta import mips_check_wheezy + + +class MaltaMachineConsole(LinuxKernelTest): + + ASSET_KERNEL_4K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page4k.xz'), + '019e034094ac6cf3aa77df5e130fb023ce4dbc804b04bfcc560c6403e1ae6bdb') + ASSET_KERNEL_16K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page16k_up.xz'), + '3a54a10b3108c16a448dca9ea3db378733a27423befc2a45a5bdf990bd85e12c') + ASSET_KERNEL_64K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page64k_dbg.xz'), + 'ce21ff4b07a981ecb8a39db2876616f5a2473eb2ab459c6f67465b9914b0c6b6') + + def do_test_mips_malta32el_nanomips(self, kernel): + kernel_path = self.uncompress(kernel) + + self.set_machine('malta') + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'mem=256m@@0x0 ' + + 'console=ttyS0') + self.vm.add_args('-cpu', 'I7200', + '-no-reboot', + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.wait_for_console_pattern(console_pattern) + + def test_mips_malta32el_nanomips_4k(self): + self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_4K) + + def test_mips_malta32el_nanomips_16k_up(self): + self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_16K) + + def test_mips_malta32el_nanomips_64k_dbg(self): + self.do_test_mips_malta32el_nanomips(self.ASSET_KERNEL_64K) + + ASSET_WHEEZY_KERNEL = Asset( + ('https://people.debian.org/~aurel32/qemu/mipsel/' + 'vmlinux-3.2.0-4-4kc-malta'), + 'dc8a3648305b0201ca7a5cd135fe2890067a65d93c38728022bb0e656ad2bf9a') + + ASSET_WHEEZY_DISK = Asset( + ('https://people.debian.org/~aurel32/qemu/mipsel/' + 'debian_wheezy_mipsel_standard.qcow2'), + '454f09ae39f7e6461c84727b927100d2c7813841f2a0a5dce328114887ecf914') + + def test_wheezy(self): + kernel_path = self.ASSET_WHEEZY_KERNEL.fetch() + image_path = self.ASSET_WHEEZY_DISK.fetch() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 root=/dev/sda1') + mips_check_wheezy(self, + kernel_path, image_path, kernel_command_line, + dl_file='/boot/initrd.img-3.2.0-4-4kc-malta', + hsum='9fc9f250ed56a74e35e704ddfd5a1c5a5625adefc5c9da91f649288d3ca000f0') + + +class MaltaMachineYAMON(QemuSystemTest): + + ASSET_YAMON_ROM = Asset( + ('https://s3-eu-west-1.amazonaws.com/downloads-mips/mips-downloads/' + 'YAMON/yamon-bin-02.22.zip'), + 'eef86f0eed0ef554f041dcd47b87eebea0e6f9f1184ed31f7e9e8b4a803860ab') + + def test_mipsel_malta_yamon(self): + yamon_bin = 'yamon-02.22.bin' + self.archive_extract(self.ASSET_YAMON_ROM) + yamon_path = self.scratch_file(yamon_bin) + + self.set_machine('malta') + self.vm.set_console() + self.vm.add_args('-bios', yamon_path) + self.vm.launch() + + prompt = 'YAMON>' + pattern = 'YAMON ROM Monitor' + interrupt_interactive_console_until_pattern(self, pattern, prompt) + wait_for_console_pattern(self, prompt) + self.vm.shutdown() + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_mipsel_replay.py b/tests/functional/test_mipsel_replay.py new file mode 100644 index 00000000000..5f4796cf899 --- /dev/null +++ b/tests/functional/test_mipsel_replay.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# +# Replay tests for the little-endian 32-bit MIPS Malta board +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, skipSlowTest +from replay_kernel import ReplayKernelBase + + +class MipselReplay(ReplayKernelBase): + + ASSET_KERNEL_4K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page4k.xz'), + '019e034094ac6cf3aa77df5e130fb023ce4dbc804b04bfcc560c6403e1ae6bdb') + ASSET_KERNEL_16K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page16k_up.xz'), + '3a54a10b3108c16a448dca9ea3db378733a27423befc2a45a5bdf990bd85e12c') + ASSET_KERNEL_64K = Asset( + ('http://mipsdistros.mips.com/LinuxDistro/nanomips/' + 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' + 'generic_nano32r6el_page64k_dbg.xz'), + 'ce21ff4b07a981ecb8a39db2876616f5a2473eb2ab459c6f67465b9914b0c6b6') + + def do_test_replay_mips_malta32el_nanomips(self, kernel_asset): + self.set_machine('malta') + self.cpu = 'I7200' + kernel_path = self.uncompress(kernel_asset) + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'mem=256m@@0x0 ' + 'console=ttyS0') + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + @skipSlowTest() + def test_replay_mips_malta32el_nanomips_4k(self): + self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_4K) + + @skipSlowTest() + def test_replay_mips_malta32el_nanomips_16k_up(self): + self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_16K) + + @skipSlowTest() + def test_replay_mips_malta32el_nanomips_64k_dbg(self): + self.do_test_replay_mips_malta32el_nanomips(self.ASSET_KERNEL_64K) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_mipsel_tuxrun.py b/tests/functional/test_mipsel_tuxrun.py new file mode 100755 index 00000000000..d4b39baab59 --- /dev/null +++ b/tests/functional/test_mipsel_tuxrun.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunMipsELTest(TuxRunBaselineTest): + + ASSET_MIPSEL_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips32el/vmlinux', + '660dd8c7a6ca7a32d37b4e6348865532ab0edb66802e8cc07869338444cf4929') + ASSET_MIPSEL_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/mips32el/rootfs.ext4.zst', + 'c5d69542bcaed54a4f34671671eb4be5c608ee02671d4d0436544367816a73b1') + + def test_mips32el(self): + self.set_machine('malta') + self.cpu="mips32r6-generic" + self.root="sda" + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_MIPSEL_KERNEL, + rootfs_asset=self.ASSET_MIPSEL_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_multiprocess.py b/tests/functional/test_multiprocess.py new file mode 100755 index 00000000000..92d5207b0eb --- /dev/null +++ b/tests/functional/test_multiprocess.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +# +# Test for multiprocess qemu +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + + +import os +import socket + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern +from qemu_test import exec_command, exec_command_and_wait_for_pattern + +class Multiprocess(QemuSystemTest): + + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + ASSET_KERNEL_X86 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD_X86 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/x86_64/os/images/pxeboot/initrd.img'), + '3b6cb5c91a14c42e2f61520f1689264d865e772a1f0069e660a800d31dd61fb9') + + ASSET_KERNEL_AARCH64 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/aarch64/os/images/pxeboot/vmlinuz'), + '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527') + + ASSET_INITRD_AARCH64 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/aarch64/os/images/pxeboot/initrd.img'), + '9fd230cab10b1dafea41cf00150e6669d37051fad133bd618d2130284e16d526') + + def do_test(self, kernel_asset, initrd_asset, + kernel_command_line, machine_type): + """Main test method""" + self.require_accelerator('kvm') + self.require_device('x-pci-proxy-dev') + + # Create socketpair to connect proxy and remote processes + proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX, + socket.SOCK_STREAM) + os.set_inheritable(proxy_sock.fileno(), True) + os.set_inheritable(remote_sock.fileno(), True) + + kernel_path = kernel_asset.fetch() + initrd_path = initrd_asset.fetch() + + # Create remote process + remote_vm = self.get_vm() + remote_vm.add_args('-machine', 'x-remote') + remote_vm.add_args('-nodefaults') + remote_vm.add_args('-device', 'lsi53c895a,id=lsi1') + remote_vm.add_args('-object', 'x-remote-object,id=robj1,' + 'devid=lsi1,fd='+str(remote_sock.fileno())) + remote_vm.launch() + + # Create proxy process + self.vm.set_console() + self.vm.add_args('-machine', machine_type) + self.vm.add_args('-accel', 'kvm') + self.vm.add_args('-cpu', 'host') + self.vm.add_args('-object', + 'memory-backend-memfd,id=sysmem-file,size=2G') + self.vm.add_args('--numa', 'node,memdev=sysmem-file') + self.vm.add_args('-m', '2048') + self.vm.add_args('-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line) + self.vm.add_args('-device', + 'x-pci-proxy-dev,' + 'id=lsi1,fd='+str(proxy_sock.fileno())) + self.vm.launch() + wait_for_console_pattern(self, 'as init process', + 'Kernel panic - not syncing') + exec_command(self, 'mount -t sysfs sysfs /sys') + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/pci/devices/*/uevent', + 'PCI_ID=1000:0012') + + proxy_sock.close() + remote_sock.close() + + def test_multiprocess(self): + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + if self.arch == 'x86_64': + kernel_command_line += 'console=ttyS0 rdinit=/bin/bash' + self.do_test(self.ASSET_KERNEL_X86, self.ASSET_INITRD_X86, + kernel_command_line, 'pc') + elif self.arch == 'aarch64': + kernel_command_line += 'rdinit=/bin/bash console=ttyAMA0' + self.do_test(self.ASSET_KERNEL_AARCH64, self.ASSET_INITRD_AARCH64, + kernel_command_line, 'virt,gic-version=3') + else: + assert False + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_netdev_ethtool.py b/tests/functional/test_netdev_ethtool.py new file mode 100755 index 00000000000..ee1a397bd24 --- /dev/null +++ b/tests/functional/test_netdev_ethtool.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +# +# ethtool tests for emulated network devices +# +# This test leverages ethtool's --test sequence to validate network +# device behaviour. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from unittest import skip +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + +class NetDevEthtool(QemuSystemTest): + + # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV + timeout = 45 + + # Fetch assets from the netdev-ethtool subdir of my shared test + # images directory on fileserver.linaro.org. + ASSET_BASEURL = ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/' + 'download?path=%2Fnetdev-ethtool&files=') + ASSET_BZIMAGE = Asset( + ASSET_BASEURL + "bzImage", + "ed62ee06ea620b1035747f3f66a5e9fc5d3096b29f75562ada888b04cd1c4baf") + ASSET_ROOTFS = Asset( + ASSET_BASEURL + "rootfs.squashfs", + "8f0207e3c4d40832ae73c1a927e42ca30ccb1e71f047acb6ddb161ba422934e6") + + def common_test_code(self, netdev, extra_args=None): + self.set_machine('q35') + + # This custom kernel has drivers for all the supported network + # devices we can emulate in QEMU + kernel = self.ASSET_BZIMAGE.fetch() + rootfs = self.ASSET_ROOTFS.fetch() + + append = 'printk.time=0 console=ttyS0 ' + append += 'root=/dev/sr0 rootfstype=squashfs ' + + # any additional kernel tweaks for the test + if extra_args: + append += extra_args + + # finally invoke ethtool directly + append += ' init=/usr/sbin/ethtool -- -t eth1 offline' + + # add the rootfs via a readonly cdrom image + drive = f"file={rootfs},if=ide,index=0,media=cdrom" + + self.vm.add_args('-kernel', kernel, + '-append', append, + '-drive', drive, + '-device', netdev) + + self.vm.set_console(console_index=0) + self.vm.launch() + + wait_for_console_pattern(self, + "The test result is PASS", + "The test result is FAIL", + vm=None) + # no need to gracefully shutdown, just finish + self.vm.kill() + + def test_igb(self): + self.common_test_code("igb") + + def test_igb_nomsi(self): + self.common_test_code("igb", "pci=nomsi") + + # It seems the other popular cards we model in QEMU currently fail + # the pattern test with: + # + # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A + # + # So for now we skip them. + + @skip("Incomplete reg 0x00178 support") + def test_e1000(self): + self.common_test_code("e1000") + + @skip("Incomplete reg 0x00178 support") + def test_i82550(self): + self.common_test_code("i82550") + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_or1k_replay.py b/tests/functional/test_or1k_replay.py new file mode 100755 index 00000000000..2b60a9372c5 --- /dev/null +++ b/tests/functional/test_or1k_replay.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an OpenRISC-1000 SIM machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class Or1kReplay(ReplayKernelBase): + + ASSET_DAY20 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day20.tar.xz', + 'ff9d7dd7c6bdba325bd85ee85c02db61ff653e129558aeffe6aff55bffb6763a') + + def test_sim(self): + self.set_machine('or1k-sim') + kernel_path = self.archive_extract(self.ASSET_DAY20, + member='day20/vmlinux') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_or1k_sim.py b/tests/functional/test_or1k_sim.py new file mode 100755 index 00000000000..f9f0b690a0a --- /dev/null +++ b/tests/functional/test_or1k_sim.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an OpenRISC-1000 SIM machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class OpenRISC1kSimTest(LinuxKernelTest): + + ASSET_DAY20 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day20.tar.xz', + 'ff9d7dd7c6bdba325bd85ee85c02db61ff653e129558aeffe6aff55bffb6763a') + + def test_or1k_sim(self): + self.set_machine('or1k-sim') + self.archive_extract(self.ASSET_DAY20) + self.vm.set_console() + self.vm.add_args('-kernel', self.scratch_file('day20', 'vmlinux')) + self.vm.launch() + self.wait_for_console_pattern('QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_pc_cpu_hotplug_props.py b/tests/functional/test_pc_cpu_hotplug_props.py new file mode 100755 index 00000000000..2bed8ada023 --- /dev/null +++ b/tests/functional/test_pc_cpu_hotplug_props.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Ensure CPU die-id can be omitted on -device +# +# Copyright (c) 2019 Red Hat Inc +# +# Author: +# Eduardo Habkost +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . +# + +from qemu_test import QemuSystemTest + +class OmittedCPUProps(QemuSystemTest): + + def test_no_die_id(self): + self.set_machine('pc') + self.vm.add_args('-nodefaults', '-S') + self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8') + self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0') + self.vm.launch() + self.assertEqual(len(self.vm.cmd('query-cpus-fast')), 2) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc64_e500.py b/tests/functional/test_ppc64_e500.py new file mode 100755 index 00000000000..f5fcad9f6b6 --- /dev/null +++ b/tests/functional/test_ppc64_e500.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# +# Boot a Linux kernel on a e500 ppc64 machine and check the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class E500Test(LinuxKernelTest): + + ASSET_BR2_E5500_UIMAGE = Asset( + 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc64_e5500-2023.11-8-gdcd9f0f6eb-20240104/uImage', + '2478187c455d6cca3984e9dfde9c635d824ea16236b85fd6b4809f744706deda') + + ASSET_BR2_E5500_ROOTFS = Asset( + 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main//buildroot/qemu_ppc64_e5500-2023.11-8-gdcd9f0f6eb-20240104/rootfs.ext2', + '9035ef97237c84c7522baaff17d25cdfca4bb7a053d5e296e902919473423d76') + + def test_ppc64_e500_buildroot(self): + self.set_machine('ppce500') + self.require_netdev('user') + self.cpu = 'e5500' + + uimage_path = self.ASSET_BR2_E5500_UIMAGE.fetch() + rootfs_path = self.ASSET_BR2_E5500_ROOTFS.fetch() + + self.vm.set_console() + self.vm.add_args('-kernel', uimage_path, + '-append', 'root=/dev/vda', + '-drive', f'file={rootfs_path},if=virtio,format=raw', + '-snapshot', '-no-shutdown') + self.vm.launch() + + self.wait_for_console_pattern('Linux version') + self.wait_for_console_pattern('/init as init process') + self.wait_for_console_pattern('lease of 10.0.2.15') + self.wait_for_console_pattern('buildroot login:') + exec_command_and_wait_for_pattern(self, 'root', '#') + exec_command_and_wait_for_pattern(self, 'poweroff', 'Power down') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_ppc64_hv.py b/tests/functional/test_ppc64_hv.py new file mode 100755 index 00000000000..d87f440fa79 --- /dev/null +++ b/tests/functional/test_ppc64_hv.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# +# Tests that specifically try to exercise hypervisor features of the +# target machines. powernv supports the Power hypervisor ISA, and +# pseries supports the nested-HV hypervisor spec. +# +# Copyright (c) 2023 IBM Corporation +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +import subprocess + +from datetime import datetime +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern, exec_command +from qemu_test import skipIfMissingCommands, skipBigDataTest +from qemu_test import exec_command_and_wait_for_pattern + +# Alpine is a light weight distro that supports QEMU. These tests boot +# that on the machine then run a QEMU guest inside it in KVM mode, +# that runs the same Alpine distro image. +# QEMU packages are downloaded and installed on each test. That's not a +# large download, but it may be more polite to create qcow2 image with +# QEMU already installed and use that. +# XXX: The order of these tests seems to matter, see git blame. +@skipIfMissingCommands("xorriso") +@skipBigDataTest() +class HypervisorTest(QemuSystemTest): + + timeout = 1000 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' + panic_message = 'Kernel panic - not syncing' + good_message = 'VFS: Cannot open root device' + + ASSET_ISO = Asset( + ('https://dl-cdn.alpinelinux.org/alpine/v3.21/' + 'releases/ppc64le/alpine-standard-3.21.0-ppc64le.iso'), + '7651ab4e3027604535c0b36e86c901b4695bf8fe97b908f5b48590f6baae8f30') + + def extract_from_iso(self, iso, path): + """ + Extracts a file from an iso file into the test workdir + + :param iso: path to the iso file + :param path: path within the iso file of the file to be extracted + :returns: path of the extracted file + """ + filename = self.scratch_file(os.path.basename(path)) + + cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename) + subprocess.run(cmd.split(), + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + + os.chmod(filename, 0o600) + + return filename + + def setUp(self): + super().setUp() + + self.iso_path = self.ASSET_ISO.fetch() + self.vmlinuz = self.extract_from_iso(self.iso_path, '/boot/vmlinuz-lts') + self.initramfs = self.extract_from_iso(self.iso_path, '/boot/initramfs-lts') + + def do_start_alpine(self): + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + self.vm.add_args("-kernel", self.vmlinuz) + self.vm.add_args("-initrd", self.initramfs) + self.vm.add_args("-smp", "4", "-m", "2g") + self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none," + "id=drive0,read-only=true") + + self.vm.launch() + ps1='localhost:~#' + wait_for_console_pattern(self, 'localhost login:') + exec_command_and_wait_for_pattern(self, 'root', ps1) + # If the time is wrong, SSL certificates can fail. + exec_command_and_wait_for_pattern(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"'), ps1) + ps1='alpine:~#' + exec_command_and_wait_for_pattern(self, 'setup-alpine -qe', ps1) + exec_command_and_wait_for_pattern(self, 'setup-apkrepos -c1', ps1) + exec_command_and_wait_for_pattern(self, 'apk update', ps1) + # Could upgrade here but it usually should not be necessary + # exec_command_and_wait_for_pattern(self, 'apk upgrade --available', ps1) + + def do_stop_alpine(self): + exec_command(self, 'echo "TEST ME"') + wait_for_console_pattern(self, 'alpine:~#') + exec_command(self, 'poweroff') + wait_for_console_pattern(self, 'reboot: Power down') + self.vm.wait() + + def do_setup_kvm(self): + ps1='alpine:~#' + exec_command_and_wait_for_pattern(self, 'apk add qemu-system-ppc64', ps1) + exec_command_and_wait_for_pattern(self, 'modprobe kvm-hv', ps1) + + # This uses the host's block device as the source file for guest block + # device for install media. This is a bit hacky but allows reuse of the + # iso without having a passthrough filesystem configured. + def do_test_kvm(self, hpt=False): + if hpt: + append = 'disable_radix' + else: + append = '' + exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g ' + '-machine pseries,x-vof=on,accel=kvm ' + '-machine cap-cfpc=broken,cap-sbbc=broken,' + 'cap-ibs=broken,cap-ccf-assist=off ' + '-drive file=/dev/nvme0n1,format=raw,readonly=on ' + '-initrd /media/nvme0n1/boot/initramfs-lts ' + '-kernel /media/nvme0n1/boot/vmlinuz-lts ' + '-append \'usbcore.nousb ' + append + '\'') + # Alpine 3.21 kernel seems to crash in XHCI USB driver. + ps1='localhost:~#' + wait_for_console_pattern(self, 'localhost login:') + exec_command_and_wait_for_pattern(self, 'root', ps1) + exec_command(self, 'poweroff') + wait_for_console_pattern(self, 'reboot: Power down') + # Now wait for the host's prompt to come back + wait_for_console_pattern(self, 'alpine:~#') + + def test_hv_pseries(self): + self.require_accelerator("tcg") + self.require_netdev('user') + self.set_machine('pseries') + self.vm.add_args("-accel", "tcg,thread=multi") + self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') + self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on") + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_stop_alpine() + + def test_hv_pseries_kvm(self): + self.require_accelerator("kvm") + self.require_netdev('user') + self.set_machine('pseries') + self.vm.add_args("-accel", "kvm") + self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0') + self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off") + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_stop_alpine() + + def test_hv_powernv(self): + self.require_accelerator("tcg") + self.require_netdev('user') + self.set_machine('powernv') + self.vm.add_args("-accel", "tcg,thread=multi") + self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0', + '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0', + '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine') + self.do_start_alpine() + self.do_setup_kvm() + self.do_test_kvm() + self.do_test_kvm(True) + self.do_stop_alpine() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc64_mac99.py b/tests/functional/test_ppc64_mac99.py new file mode 100755 index 00000000000..dfd9c01371d --- /dev/null +++ b/tests/functional/test_ppc64_mac99.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a mac99 machine with a PPC970 CPU +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + +class mac99Test(LinuxKernelTest): + + ASSET_BR2_MAC99_LINUX = Asset( + 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc64_mac99-2023.11-8-gdcd9f0f6eb-20240105/vmlinux', + 'd59307437e4365f2cced0bbd1b04949f7397b282ef349b7cafd894d74aadfbff') + + ASSET_BR2_MAC99_ROOTFS = Asset( + 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main//buildroot/qemu_ppc64_mac99-2023.11-8-gdcd9f0f6eb-20240105/rootfs.ext2', + 'bbd5fd8af62f580bc4e585f326fe584e22856572633a8333178ea6d4ed4955a4') + + def test_ppc64_mac99_buildroot(self): + self.set_machine('mac99') + + linux_path = self.ASSET_BR2_MAC99_LINUX.fetch() + rootfs_path = self.ASSET_BR2_MAC99_ROOTFS.fetch() + + self.vm.set_console() + + # Note: We need '-nographic' to get a serial console + self.vm.add_args('-kernel', linux_path, + '-append', 'root=/dev/sda', + '-drive', f'file={rootfs_path},format=raw', + '-snapshot', '-nographic') + self.vm.launch() + + self.wait_for_console_pattern('>> OpenBIOS') + self.wait_for_console_pattern('Linux version') + self.wait_for_console_pattern('/init as init process') + self.wait_for_console_pattern('gem 0000:f0:0e.0 eth0: Link is up at 100 Mbps') + self.wait_for_console_pattern('buildroot login:') + exec_command_and_wait_for_pattern(self, 'root', '#') + exec_command_and_wait_for_pattern(self, 'poweroff', 'Power down') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_ppc64_powernv.py b/tests/functional/test_ppc64_powernv.py new file mode 100755 index 00000000000..685e2178ed7 --- /dev/null +++ b/tests/functional/test_ppc64_powernv.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# +# Test that Linux kernel boots on ppc powernv machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import wait_for_console_pattern + +class powernvMachine(LinuxKernelTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' + panic_message = 'Kernel panic - not syncing' + good_message = 'VFS: Cannot open root device' + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/' + 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'), + '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f') + + def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE): + self.require_accelerator("tcg") + kernel_path = self.ASSET_KERNEL.fetch() + + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-append', command_line) + self.vm.launch() + + def test_linux_boot(self): + self.set_machine('powernv') + self.do_test_linux_boot() + console_pattern = 'VFS: Cannot open root device' + wait_for_console_pattern(self, console_pattern, self.panic_message) + + def test_linux_smp_boot(self): + self.set_machine('powernv') + self.vm.add_args('-smp', '4') + self.do_test_linux_boot() + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_linux_smp_hpt_boot(self): + self.set_machine('powernv') + self.vm.add_args('-smp', '4') + self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + + 'disable_radix') + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', + self.panic_message) + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_linux_smt_boot(self): + self.set_machine('powernv') + self.vm.add_args('-smp', '4,threads=4') + self.do_test_linux_boot() + console_pattern = 'CPU maps initialized for 4 threads per core' + wait_for_console_pattern(self, console_pattern, self.panic_message) + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_linux_big_boot(self): + self.set_machine('powernv') + self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2') + + # powernv does not support NUMA + self.do_test_linux_boot() + console_pattern = 'CPU maps initialized for 4 threads per core' + wait_for_console_pattern(self, console_pattern, self.panic_message) + console_pattern = 'smp: Brought up 2 nodes, 16 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + + ASSET_EPAPR_KERNEL = Asset( + ('https://github.com/open-power/op-build/releases/download/v2.7/' + 'zImage.epapr'), + '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd') + + def do_test_ppc64_powernv(self, proc): + self.require_accelerator("tcg") + kernel_path = self.ASSET_EPAPR_KERNEL.fetch() + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-append', 'console=tty0 console=hvc0', + '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0', + '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234', + '-device', 'e1000e,bus=bridge1,addr=0x3', + '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2') + self.vm.launch() + + self.wait_for_console_pattern("CPU: " + proc + " generation processor") + self.wait_for_console_pattern("zImage starting: loaded") + self.wait_for_console_pattern("Run /init as init process") + # Device detection output driven by udev probing is sometimes cut off + # from console output, suspect S14silence-console init script. + + def test_powernv8(self): + self.set_machine('powernv8') + self.do_test_ppc64_powernv('P8') + + def test_powernv9(self): + self.set_machine('powernv9') + self.do_test_ppc64_powernv('P9') + + def test_powernv10(self): + self.set_machine('powernv10') + self.do_test_ppc64_powernv('P10') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_ppc64_pseries.py b/tests/functional/test_ppc64_pseries.py new file mode 100755 index 00000000000..67057934e8d --- /dev/null +++ b/tests/functional/test_ppc64_pseries.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + +class pseriesMachine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 ' + panic_message = 'Kernel panic - not syncing' + good_message = 'VFS: Cannot open root device' + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/' + 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'), + '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f') + + def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE): + kernel_path = self.ASSET_KERNEL.fetch() + + self.vm.set_console() + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.launch() + + def test_ppc64_vof_linux_boot(self): + self.set_machine('pseries') + self.vm.add_args('-machine', 'x-vof=on') + self.do_test_ppc64_linux_boot() + console_pattern = 'VFS: Cannot open root device' + wait_for_console_pattern(self, console_pattern, self.panic_message) + + def test_ppc64_linux_boot(self): + self.set_machine('pseries') + self.do_test_ppc64_linux_boot() + console_pattern = 'VFS: Cannot open root device' + wait_for_console_pattern(self, console_pattern, self.panic_message) + + def test_ppc64_linux_smp_boot(self): + self.set_machine('pseries') + self.vm.add_args('-smp', '4') + self.do_test_ppc64_linux_boot() + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_ppc64_linux_hpt_smp_boot(self): + self.set_machine('pseries') + self.vm.add_args('-smp', '4') + self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE + + 'disable_radix') + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu', + self.panic_message) + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_ppc64_linux_smt_boot(self): + self.set_machine('pseries') + self.vm.add_args('-smp', '4,threads=4') + self.do_test_ppc64_linux_boot() + console_pattern = 'CPU maps initialized for 4 threads per core' + wait_for_console_pattern(self, console_pattern, self.panic_message) + console_pattern = 'smp: Brought up 1 node, 4 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + + def test_ppc64_linux_big_boot(self): + self.set_machine('pseries') + self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2') + self.vm.add_args('-m', '512M', + '-object', 'memory-backend-ram,size=256M,id=m0', + '-object', 'memory-backend-ram,size=256M,id=m1') + self.vm.add_args('-numa', 'node,nodeid=0,memdev=m0') + self.vm.add_args('-numa', 'node,nodeid=1,memdev=m1') + self.do_test_ppc64_linux_boot() + console_pattern = 'CPU maps initialized for 4 threads per core' + wait_for_console_pattern(self, console_pattern, self.panic_message) + console_pattern = 'smp: Brought up 2 nodes, 16 CPUs' + wait_for_console_pattern(self, console_pattern, self.panic_message) + wait_for_console_pattern(self, self.good_message, self.panic_message) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc64_replay.py b/tests/functional/test_ppc64_replay.py new file mode 100755 index 00000000000..e8c9c4bcbf8 --- /dev/null +++ b/tests/functional/test_ppc64_replay.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on ppc64 machines +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, skipFlakyTest +from replay_kernel import ReplayKernelBase + + +class Ppc64Replay(ReplayKernelBase): + + ASSET_DAY19 = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day19.tar.xz'), + '20b1bb5a8488c664defbb5d283addc91a05335a936c63b3f5ff7eee74b725755') + + @skipFlakyTest('https://gitlab.com/qemu-project/qemu/-/issues/2523') + def test_ppc64_e500(self): + self.set_machine('ppce500') + self.cpu = 'e5500' + kernel_path = self.archive_extract(self.ASSET_DAY19, + member='day19/uImage') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar') + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/' + 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'), + '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f') + + def test_ppc64_pseries(self): + self.set_machine('pseries') + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' + console_pattern = 'VFS: Cannot open root device' + self.run_rr(kernel_path, kernel_command_line, console_pattern) + + def test_ppc64_powernv(self): + self.set_machine('powernv') + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + \ + 'console=tty0 console=hvc0' + console_pattern = 'VFS: Cannot open root device' + self.run_rr(kernel_path, kernel_command_line, console_pattern) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_ppc64_reverse_debug.py b/tests/functional/test_ppc64_reverse_debug.py new file mode 100755 index 00000000000..5931adef5a9 --- /dev/null +++ b/tests/functional/test_ppc64_reverse_debug.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Reverse debugging test +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import skipIfMissingImports, skipFlakyTest +from reverse_debugging import ReverseDebugging + + +@skipIfMissingImports('avocado.utils') +class ReverseDebugging_ppc64(ReverseDebugging): + + REG_PC = 0x40 + + @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992") + def test_ppc64_pseries(self): + self.set_machine('pseries') + # SLOF branches back to its entry point, which causes this test + # to take the 'hit a breakpoint again' path. That's not a problem, + # just slightly different than the other machines. + self.endian_is_le = False + self.reverse_debugging() + + @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992") + def test_ppc64_powernv(self): + self.set_machine('powernv') + self.endian_is_le = False + self.reverse_debugging() + + +if __name__ == '__main__': + ReverseDebugging.main() diff --git a/tests/functional/test_ppc64_tuxrun.py b/tests/functional/test_ppc64_tuxrun.py new file mode 100755 index 00000000000..e8f79c676e5 --- /dev/null +++ b/tests/functional/test_ppc64_tuxrun.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from subprocess import check_call, DEVNULL +import tempfile + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunPPC64Test(TuxRunBaselineTest): + + def ppc64_common_tuxrun(self, kernel_asset, rootfs_asset, prefix): + self.set_machine('pseries') + self.cpu='POWER10' + self.console='hvc0' + self.root='sda' + self.extradev='spapr-vscsi' + # add device args to command line. + self.require_netdev('user') + self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', + '-device', 'virtio-net,netdev=vnet') + self.vm.add_args('-netdev', '{"type":"user","id":"hostnet0"}', + '-device', '{"driver":"virtio-net-pci","netdev":' + '"hostnet0","id":"net0","mac":"52:54:00:4c:e3:86",' + '"bus":"pci.0","addr":"0x9"}') + self.vm.add_args('-device', '{"driver":"qemu-xhci","p2":15,"p3":15,' + '"id":"usb","bus":"pci.0","addr":"0x2"}') + self.vm.add_args('-device', '{"driver":"virtio-scsi-pci","id":"scsi0"' + ',"bus":"pci.0","addr":"0x3"}') + self.vm.add_args('-device', '{"driver":"virtio-serial-pci","id":' + '"virtio-serial0","bus":"pci.0","addr":"0x4"}') + self.vm.add_args('-device', '{"driver":"scsi-cd","bus":"scsi0.0"' + ',"channel":0,"scsi-id":0,"lun":0,"device_id":' + '"drive-scsi0-0-0-0","id":"scsi0-0-0-0"}') + self.vm.add_args('-device', '{"driver":"virtio-balloon-pci",' + '"id":"balloon0","bus":"pci.0","addr":"0x6"}') + self.vm.add_args('-audiodev', '{"id":"audio1","driver":"none"}') + self.vm.add_args('-device', '{"driver":"usb-tablet","id":"input0"' + ',"bus":"usb.0","port":"1"}') + self.vm.add_args('-device', '{"driver":"usb-kbd","id":"input1"' + ',"bus":"usb.0","port":"2"}') + self.vm.add_args('-device', '{"driver":"VGA","id":"video0",' + '"vgamem_mb":16,"bus":"pci.0","addr":"0x7"}') + self.vm.add_args('-object', '{"qom-type":"rng-random","id":"objrng0"' + ',"filename":"/dev/urandom"}', + '-device', '{"driver":"virtio-rng-pci","rng":"objrng0"' + ',"id":"rng0","bus":"pci.0","addr":"0x8"}') + self.vm.add_args('-object', '{"qom-type":"cryptodev-backend-builtin",' + '"id":"objcrypto0","queues":1}', + '-device', '{"driver":"virtio-crypto-pci",' + '"cryptodev":"objcrypto0","id":"crypto0","bus"' + ':"pci.0","addr":"0xa"}') + self.vm.add_args('-device', '{"driver":"spapr-pci-host-bridge"' + ',"index":1,"id":"pci.1"}') + self.vm.add_args('-device', '{"driver":"spapr-vscsi","id":"scsi1"' + ',"reg":12288}') + self.vm.add_args('-m', '1G,slots=32,maxmem=2G', + '-object', 'memory-backend-ram,id=ram1,size=1G', + '-device', 'pc-dimm,id=dimm1,memdev=ram1') + + # Create a temporary qcow2 and launch the test-case + with tempfile.NamedTemporaryFile(prefix=prefix, + suffix='.qcow2') as qcow2: + check_call([self.qemu_img, 'create', '-f', 'qcow2', + qcow2.name, ' 1G'], + stdout=DEVNULL, stderr=DEVNULL) + + self.vm.add_args('-drive', 'file=' + qcow2.name + + ',format=qcow2,if=none,id=' + 'drive-virtio-disk1', + '-device', 'virtio-blk-pci,bus=pci.0,' + 'addr=0xb,drive=drive-virtio-disk1,id=virtio-disk1' + ',bootindex=2') + self.common_tuxrun(kernel_asset, rootfs_asset=rootfs_asset, + drive="scsi-hd") + + ASSET_PPC64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc64/vmlinux', + '8219d5cb26e7654ad7826fe8aee6290f7c01eef44f2cd6d26c15fe8f99e1c17c') + ASSET_PPC64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc64/rootfs.ext4.zst', + 'b68e12314303c5dd0fef37ae98021299a206085ae591893e73557af99a02d373') + + def test_ppc64(self): + self.ppc64_common_tuxrun(kernel_asset=self.ASSET_PPC64_KERNEL, + rootfs_asset=self.ASSET_PPC64_ROOTFS, + prefix='tuxrun_ppc64_') + + ASSET_PPC64LE_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc64le/vmlinux', + '21aea1fbc18bf6fa7d8ca4ea48d4940b2c8363c077acd564eb47d769b7495279') + ASSET_PPC64LE_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc64le/rootfs.ext4.zst', + '67d36a3f9597b738e8b7359bdf04500f4d9bb82fc35eaa65aa439d888b2392f4') + + def test_ppc64le(self): + self.ppc64_common_tuxrun(kernel_asset=self.ASSET_PPC64LE_KERNEL, + rootfs_asset=self.ASSET_PPC64LE_ROOTFS, + prefix='tuxrun_ppc64le_') + + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_ppc_40p.py b/tests/functional/test_ppc_40p.py new file mode 100755 index 00000000000..614972a7eb3 --- /dev/null +++ b/tests/functional/test_ppc_40p.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a PReP/40p machine and checks its serial console. +# +# Copyright (c) Philippe Mathieu-Daudé +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern, skipUntrustedTest +from qemu_test import exec_command_and_wait_for_pattern + + +class IbmPrep40pMachine(QemuSystemTest): + + timeout = 60 + + ASSET_BIOS = Asset( + ('http://ftpmirror.your.org/pub/misc/' + 'ftp.software.ibm.com/rs6000/firmware/' + '7020-40p/P12H0456.IMG'), + 'd957f79c73f760d1455d2286fcd901ed6d06167320eb73511b478a939be25b3f') + ASSET_NETBSD40 = Asset( + ('https://archive.netbsd.org/pub/NetBSD-archive/' + 'NetBSD-4.0/prep/installation/floppy/generic_com0.fs'), + 'f86236e9d01b3f0dd0f5d3b8d5bbd40c68e78b4db560a108358f5ad58e636619') + ASSET_NETBSD71 = Asset( + ('https://archive.netbsd.org/pub/NetBSD-archive/' + 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso'), + 'cc7cb290b06aaa839362deb7bd9f417ac5015557db24088508330f76c3f825ec') + + # 12H0455 PPS Firmware Licensed Materials + # Property of IBM (C) Copyright IBM Corp. 1994. + # All rights reserved. + # U.S. Government Users Restricted Rights - Use, duplication or disclosure + # restricted by GSA ADP Schedule Contract with IBM Corp. + @skipUntrustedTest() + def test_factory_firmware_and_netbsd(self): + self.set_machine('40p') + self.require_accelerator("tcg") + bios_path = self.ASSET_BIOS.fetch() + drive_path = self.ASSET_NETBSD40.fetch() + + self.vm.set_console() + self.vm.add_args('-bios', bios_path, + '-drive', + f"file={drive_path},format=raw,if=floppy,read-only=true") + self.vm.launch() + os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007' + wait_for_console_pattern(self, os_banner) + wait_for_console_pattern(self, 'Model: IBM PPS Model 6015') + + def test_openbios_192m(self): + self.set_machine('40p') + self.require_accelerator("tcg") + self.vm.set_console() + self.vm.add_args('-m', '192') # test fw_cfg + + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> Memory: 192M') + wait_for_console_pattern(self, '>> CPU type PowerPC,604') + + def test_openbios_and_netbsd(self): + self.set_machine('40p') + self.require_accelerator("tcg") + drive_path = self.ASSET_NETBSD71.fetch() + self.vm.set_console() + self.vm.add_args('-cdrom', drive_path, + '-boot', 'd') + + self.vm.launch() + wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9') + + ASSET_40P_SANDALFOOT = Asset( + 'http://www.juneau-lug.org/zImage.initrd.sandalfoot', + '749ab02f576c6dc8f33b9fb022ecb44bf6a35a0472f2ea6a5e9956bc15933901') + + def test_openbios_and_linux(self): + self.set_machine('40p') + self.require_accelerator("tcg") + drive_path = self.ASSET_40P_SANDALFOOT.fetch() + self.vm.set_console() + self.vm.add_args('-cdrom', drive_path, + '-boot', 'd') + + self.vm.launch() + wait_for_console_pattern(self, 'Please press Enter to activate this console.') + exec_command_and_wait_for_pattern(self, '\012', '#') + exec_command_and_wait_for_pattern(self, 'uname -a', 'Linux ppc 2.4.18') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_74xx.py b/tests/functional/test_ppc_74xx.py new file mode 100755 index 00000000000..5386016f261 --- /dev/null +++ b/tests/functional/test_ppc_74xx.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# +# Smoke tests for 74xx cpus (aka G4). +# +# Copyright (c) 2021, IBM Corp. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest +from qemu_test import wait_for_console_pattern + +class ppc74xxCpu(QemuSystemTest): + + timeout = 5 + + def test_ppc_7400(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7400') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7410(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7410') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,74xx') + + def test_ppc_7441(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7441') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7445(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7445') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7447(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7447') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7447a(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7447a') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7448(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7448') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx') + + def test_ppc_7450(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7450') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7451(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7451') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7455(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7455') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7457(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7457') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + + def test_ppc_7457a(self): + self.require_accelerator("tcg") + self.set_machine('g3beige') + self.vm.set_console() + self.vm.add_args('-cpu', '7457a') + self.vm.launch() + wait_for_console_pattern(self, '>> OpenBIOS') + wait_for_console_pattern(self, '>> CPU type PowerPC,G4') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_amiga.py b/tests/functional/test_ppc_amiga.py new file mode 100755 index 00000000000..8600e2e9633 --- /dev/null +++ b/tests/functional/test_ppc_amiga.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# +# Test AmigaNG boards +# +# Copyright (c) 2023 BALATON Zoltan +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import subprocess + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class AmigaOneMachine(QemuSystemTest): + + timeout = 90 + + ASSET_IMAGE = Asset( + ('https://www.hyperion-entertainment.com/index.php/' + 'downloads?view=download&format=raw&file=25'), + '8ff39330ba47d4f64de4ee8fd6809e9c010a9ef17fe51e95c3c1d53437cb481f') + + def test_ppc_amigaone(self): + self.require_accelerator("tcg") + self.set_machine('amigaone') + tar_name = 'A1Firmware_Floppy_05-Mar-2005.zip' + self.archive_extract(self.ASSET_IMAGE, format="zip") + bios = self.scratch_file("u-boot-amigaone.bin") + with open(bios, "wb") as bios_fh: + subprocess.run(['tail', '-c', '524288', + self.scratch_file("floppy_edition", + "updater.image")], + stdout=bios_fh) + + self.vm.set_console() + self.vm.add_args('-bios', bios) + self.vm.launch() + wait_for_console_pattern(self, 'FLASH:') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_bamboo.py b/tests/functional/test_ppc_bamboo.py new file mode 100755 index 00000000000..c634ae7b4a7 --- /dev/null +++ b/tests/functional/test_ppc_bamboo.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# +# Test that Linux kernel boots on the ppc bamboo board and check the console +# +# Copyright (c) 2021 Red Hat +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + + +class BambooMachine(QemuSystemTest): + + timeout = 90 + + ASSET_KERNEL = Asset( + ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/' + 'buildroot/qemu_ppc_bamboo-2023.11-8-gdcd9f0f6eb-20240105/vmlinux'), + 'a2e12eb45b73491ac62fc0bbeb68dead0dc5c0f22cf83146558389209b420ad1') + ASSET_INITRD = Asset( + ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/' + 'buildroot/qemu_ppc_bamboo-2023.11-8-gdcd9f0f6eb-20240105/rootfs.cpio'), + 'd2a36bdb8763b389765dc8c29d4904cec2bd001c587f92e85ab9eb10d5ddda54') + + def test_ppc_bamboo(self): + self.set_machine('bamboo') + self.require_accelerator("tcg") + self.require_netdev('user') + + kernel = self.ASSET_KERNEL.fetch() + initrd = self.ASSET_INITRD.fetch() + + self.vm.set_console() + self.vm.add_args('-kernel', kernel, + '-initrd', initrd, + '-nic', 'user,model=virtio-net-pci,restrict=on') + self.vm.launch() + wait_for_console_pattern(self, 'buildroot login:') + exec_command_and_wait_for_pattern(self, 'root', '#') + exec_command_and_wait_for_pattern(self, 'ping -c1 10.0.2.2', + '1 packets transmitted, 1 packets received, 0% packet loss') + exec_command_and_wait_for_pattern(self, 'halt', 'System Halted') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_mac.py b/tests/functional/test_ppc_mac.py new file mode 100755 index 00000000000..9e4bc1a52c7 --- /dev/null +++ b/tests/functional/test_ppc_mac.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Boot Linux kernel on a mac99 and g3beige ppc machine and check the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class MacTest(LinuxKernelTest): + + ASSET_DAY15 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day15.tar.xz', + '03e0757c131d2959decf293a3572d3b96c5a53587165bf05ce41b2818a2bccd5') + + def do_day15_test(self): + # mac99 also works with kvm_pr but we don't have a reliable way at + # the moment (e.g. by looking at /proc/modules) to detect whether + # we're running kvm_hv or kvm_pr. For now let's disable this test + # if we don't have TCG support. + self.require_accelerator("tcg") + self.archive_extract(self.ASSET_DAY15) + self.vm.add_args('-M', 'graphics=off') + self.launch_kernel(self.scratch_file('day15', 'invaders.elf'), + wait_for='QEMU advent calendar') + + def test_ppc_g3beige(self): + self.set_machine('g3beige') + self.do_day15_test() + + def test_ppc_mac99(self): + self.set_machine('mac99') + self.do_day15_test() + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_ppc_mpc8544ds.py b/tests/functional/test_ppc_mpc8544ds.py new file mode 100755 index 00000000000..0715410d7a2 --- /dev/null +++ b/tests/functional/test_ppc_mpc8544ds.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class Mpc8544dsMachine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + panic_message = 'Kernel panic - not syncing' + + ASSET_IMAGE = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day04.tar.xz'), + '88bc83f3c9f3d633bcfc108a6342d677abca247066a2fb8d4636744a0d319f94') + + def test_ppc_mpc8544ds(self): + self.require_accelerator("tcg") + self.set_machine('mpc8544ds') + kernel_file = self.archive_extract(self.ASSET_IMAGE, + member='creek/creek.bin') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_file) + self.vm.launch() + wait_for_console_pattern(self, 'QEMU advent calendar 2020', + self.panic_message) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_replay.py b/tests/functional/test_ppc_replay.py new file mode 100755 index 00000000000..8382070abd1 --- /dev/null +++ b/tests/functional/test_ppc_replay.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# +# Replay tests for ppc machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class PpcReplay(ReplayKernelBase): + + ASSET_DAY15 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day15.tar.xz', + '03e0757c131d2959decf293a3572d3b96c5a53587165bf05ce41b2818a2bccd5') + + def do_day15_test(self): + self.require_accelerator("tcg") + kernel_path = self.archive_extract(self.ASSET_DAY15, + member='day15/invaders.elf') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar', args=('-M', 'graphics=off')) + + def test_g3beige(self): + self.set_machine('g3beige') + self.do_day15_test() + + def test_mac99(self): + self.set_machine('mac99') + self.do_day15_test() + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_ppc_sam460ex.py b/tests/functional/test_ppc_sam460ex.py new file mode 100644 index 00000000000..31cf9dd6de8 --- /dev/null +++ b/tests/functional/test_ppc_sam460ex.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a sam460ex machine with a PPC 460EX CPU +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class sam460exTest(LinuxKernelTest): + + ASSET_BR2_SAM460EX_LINUX = Asset( + 'https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/buildroot/qemu_ppc_sam460ex-2023.11-8-gdcd9f0f6eb-20240105/vmlinux', + '6f46346f3e20e8b5fc050ff363f350f8b9d76a051b9e0bd7ea470cc680c14df2') + + def test_ppc_sam460ex_buildroot(self): + self.set_machine('sam460ex') + self.require_netdev('user') + + linux_path = self.ASSET_BR2_SAM460EX_LINUX.fetch() + + self.vm.set_console() + self.vm.add_args('-kernel', linux_path, + '-device', 'virtio-net-pci,netdev=net0', + '-netdev', 'user,id=net0') + self.vm.launch() + + self.wait_for_console_pattern('Linux version') + self.wait_for_console_pattern('Hardware name: amcc,canyonlands 460EX') + self.wait_for_console_pattern('/init as init process') + self.wait_for_console_pattern('lease of 10.0.2.15 obtained') + self.wait_for_console_pattern('buildroot login:') + exec_command_and_wait_for_pattern(self, 'root', '#') + exec_command_and_wait_for_pattern(self, 'poweroff', 'System Halted') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_ppc_tuxrun.py b/tests/functional/test_ppc_tuxrun.py new file mode 100755 index 00000000000..5458a7fb716 --- /dev/null +++ b/tests/functional/test_ppc_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunPPC32Test(TuxRunBaselineTest): + + ASSET_PPC32_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc32/uImage', + 'aa5d81deabdb255a318c4bc5ffd6fdd2b5da1ef39f1955dcc35b671d258b68e9') + ASSET_PPC32_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/ppc32/rootfs.ext4.zst', + '67554f830269d6bf53b67c7dd206bcc821e463993d526b1644066fea8117019b') + + def test_ppc32(self): + self.set_machine('ppce500') + self.cpu='e500mc' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_PPC32_KERNEL, + rootfs_asset=self.ASSET_PPC32_ROOTFS, + drive="virtio-blk-pci") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_ppc_virtex_ml507.py b/tests/functional/test_ppc_virtex_ml507.py new file mode 100755 index 00000000000..8fe43549b78 --- /dev/null +++ b/tests/functional/test_ppc_virtex_ml507.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# +# Test that Linux kernel boots on ppc machines and check the console +# +# Copyright (c) 2018, 2020 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class VirtexMl507Machine(QemuSystemTest): + + timeout = 90 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + panic_message = 'Kernel panic - not syncing' + + ASSET_IMAGE = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day08.tar.xz'), + 'cefe5b8aeb5e9d2d1d4fd22dcf48d917d68d5a765132bf2ddd6332dc393b824c') + + def test_ppc_virtex_ml507(self): + self.require_accelerator("tcg") + self.set_machine('virtex-ml507') + self.archive_extract(self.ASSET_IMAGE) + self.vm.set_console() + self.vm.add_args('-kernel', self.scratch_file('hippo', 'hippo.linux'), + '-dtb', self.scratch_file('hippo', + 'virtex440-ml507.dtb'), + '-m', '512') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU advent calendar 2020', + self.panic_message) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_riscv32_tuxrun.py b/tests/functional/test_riscv32_tuxrun.py new file mode 100755 index 00000000000..3c570208d03 --- /dev/null +++ b/tests/functional/test_riscv32_tuxrun.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunRiscV32Test(TuxRunBaselineTest): + + ASSET_RISCV32_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv32/Image', + '872bc8f8e0d4661825d5f47f7bec64988e9d0a8bd5db8917d57e16f66d83b329') + ASSET_RISCV32_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv32/rootfs.ext4.zst', + '511ad34e63222db08d6c1da16fad224970de36517a784110956ba6a24a0ee5f6') + + def test_riscv32(self): + self.set_machine('virt') + self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL, + rootfs_asset=self.ASSET_RISCV32_ROOTFS) + + def test_riscv32_maxcpu(self): + self.set_machine('virt') + self.cpu='max' + self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL, + rootfs_asset=self.ASSET_RISCV32_ROOTFS) + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_riscv64_sifive_u.py b/tests/functional/test_riscv64_sifive_u.py new file mode 100755 index 00000000000..358ff0d1f60 --- /dev/null +++ b/tests/functional/test_riscv64_sifive_u.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Sifive U machine +# and checks the console +# +# Copyright (c) Linaro Ltd. +# +# Author: +# Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import Asset, LinuxKernelTest +from qemu_test import skipIfMissingCommands + + +class SifiveU(LinuxKernelTest): + + ASSET_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv64/Image', + '2bd8132a3bf21570290042324fff48c987f42f2a00c08de979f43f0662ebadba') + ASSET_ROOTFS = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '9819da19e6eef291686fdd7b029ea00e764dc62f/rootfs/riscv64/' + 'rootfs.ext2.gz'), + 'b6ed95610310b7956f9bf20c4c9c0c05fea647900df441da9dfe767d24e8b28b') + + def do_test_riscv64_sifive_u_mmc_spi(self, connect_card): + self.set_machine('sifive_u') + kernel_path = self.ASSET_KERNEL.fetch() + rootfs_path = self.uncompress(self.ASSET_ROOTFS) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=sbi console=ttySIF0 ' + 'root=/dev/mmcblk0 ') + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-no-reboot') + if connect_card: + kernel_command_line += 'panic=-1 noreboot rootwait ' + self.vm.add_args('-drive', f'file={rootfs_path},if=sd,format=raw') + pattern = 'Boot successful.' + else: + kernel_command_line += 'panic=0 noreboot ' + pattern = 'Cannot open root device "mmcblk0" or unknown-block(0,0)' + + self.vm.launch() + self.wait_for_console_pattern(pattern) + + os.remove(rootfs_path) + + def test_riscv64_sifive_u_nommc_spi(self): + self.do_test_riscv64_sifive_u_mmc_spi(False) + + def test_riscv64_sifive_u_mmc_spi(self): + self.do_test_riscv64_sifive_u_mmc_spi(True) + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_riscv64_tuxrun.py b/tests/functional/test_riscv64_tuxrun.py new file mode 100755 index 00000000000..0d8de36204a --- /dev/null +++ b/tests/functional/test_riscv64_tuxrun.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunRiscV64Test(TuxRunBaselineTest): + + ASSET_RISCV64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv64/Image', + '2bd8132a3bf21570290042324fff48c987f42f2a00c08de979f43f0662ebadba') + ASSET_RISCV64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv64/rootfs.ext4.zst', + 'aa4736a9872651dfc0d95e709465eedf1134fd19d42b8cb305bfd776f9801004') + + ASSET_RISCV32_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv32/Image', + '872bc8f8e0d4661825d5f47f7bec64988e9d0a8bd5db8917d57e16f66d83b329') + ASSET_RISCV32_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv32/rootfs.ext4.zst', + '511ad34e63222db08d6c1da16fad224970de36517a784110956ba6a24a0ee5f6') + + def test_riscv64(self): + self.set_machine('virt') + self.common_tuxrun(kernel_asset=self.ASSET_RISCV64_KERNEL, + rootfs_asset=self.ASSET_RISCV64_ROOTFS) + + def test_riscv64_maxcpu(self): + self.set_machine('virt') + self.cpu='max' + self.common_tuxrun(kernel_asset=self.ASSET_RISCV64_KERNEL, + rootfs_asset=self.ASSET_RISCV64_ROOTFS) + + def test_riscv64_rv32(self): + self.set_machine('virt') + self.cpu='rv32' + self.common_tuxrun(kernel_asset=self.ASSET_RISCV32_KERNEL, + rootfs_asset=self.ASSET_RISCV32_ROOTFS) + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_riscv_opensbi.py b/tests/functional/test_riscv_opensbi.py new file mode 100755 index 00000000000..d077e40f427 --- /dev/null +++ b/tests/functional/test_riscv_opensbi.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# OpenSBI boot test for RISC-V machines +# +# Copyright (c) 2022, Ventana Micro +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest +from qemu_test import wait_for_console_pattern + +class RiscvOpenSBI(QemuSystemTest): + + timeout = 5 + + def boot_opensbi(self): + self.vm.set_console() + self.vm.launch() + wait_for_console_pattern(self, 'Platform Name') + wait_for_console_pattern(self, 'Boot HART MEDELEG') + + def test_riscv_spike(self): + self.set_machine('spike') + self.boot_opensbi() + + def test_riscv_sifive_u(self): + self.set_machine('sifive_u') + self.boot_opensbi() + + def test_riscv_virt(self): + self.set_machine('virt') + self.boot_opensbi() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_rx_gdbsim.py b/tests/functional/test_rx_gdbsim.py new file mode 100755 index 00000000000..49245793e17 --- /dev/null +++ b/tests/functional/test_rx_gdbsim.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# Copyright (c) 2018 Red Hat, Inc. +# +# Author: +# Cleber Rosa +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern, skipFlakyTest + + +class RxGdbSimMachine(QemuSystemTest): + + timeout = 30 + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + ASSET_UBOOT = Asset( + ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/' + 'u-boot.bin'), + 'dd7dd4220cccf7aeb32227b26233bf39600db05c3f8e26005bcc2bf6c927207d') + ASSET_DTB = Asset( + ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/' + 'rx-gdbsim.dtb'), + 'aa278d9c1907a4501741d7ee57e7f65c02dd1b3e0323b33c6d4247f1b32cf29a') + ASSET_KERNEL = Asset( + ('https://github.com/philmd/qemu-testing-blob/raw/rx-gdbsim/rx/gdbsim/' + 'zImage'), + 'baa43205e74a7220ed8482188c5e9ce497226712abb7f4e7e4f825ce19ff9656') + + def test_uboot(self): + """ + U-Boot and checks that the console is operational. + """ + self.set_machine('gdbsim-r5f562n8') + + uboot_path = self.ASSET_UBOOT.fetch() + + self.vm.set_console() + self.vm.add_args('-bios', uboot_path, + '-no-reboot') + self.vm.launch() + uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty' + wait_for_console_pattern(self, uboot_version) + gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)' + # FIXME limit baudrate on chardev, else we type too fast + # https://gitlab.com/qemu-project/qemu/-/issues/2691 + #exec_command_and_wait_for_pattern(self, 'version', gcc_version) + + @skipFlakyTest(bug_url="https://gitlab.com/qemu-project/qemu/-/issues/2691") + def test_linux_sash(self): + """ + Boots a Linux kernel and checks that the console is operational. + """ + self.set_machine('gdbsim-r5f562n7') + + dtb_path = self.ASSET_DTB.fetch() + kernel_path = self.ASSET_KERNEL.fetch() + + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon' + self.vm.add_args('-kernel', kernel_path, + '-dtb', dtb_path, + '-no-reboot') + self.vm.launch() + wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)', + failure_message='Kernel panic - not syncing') + exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_s390x_ccw_virtio.py b/tests/functional/test_s390x_ccw_virtio.py new file mode 100755 index 00000000000..453711aa0f5 --- /dev/null +++ b/tests/functional/test_s390x_ccw_virtio.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 +# +# Functional test that boots an s390x Linux guest with ccw and PCI devices +# attached and checks whether the devices are recognized by Linux +# +# Copyright (c) 2020 Red Hat, Inc. +# +# Author: +# Cornelia Huck +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import os +import tempfile + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class S390CCWVirtioMachine(QemuSystemTest): + KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' + + timeout = 120 + + ASSET_BUSTER_KERNEL = Asset( + ('https://snapshot.debian.org/archive/debian/' + '20201126T092837Z/dists/buster/main/installer-s390x/' + '20190702+deb10u6/images/generic/kernel.debian'), + 'd411d17c39ae7ad38d27534376cbe88b68b403c325739364122c2e6f1537e818') + ASSET_BUSTER_INITRD = Asset( + ('https://snapshot.debian.org/archive/debian/' + '20201126T092837Z/dists/buster/main/installer-s390x/' + '20190702+deb10u6/images/generic/initrd.debian'), + '836bbd0fe6a5ca81274c28c2b063ea315ce1868660866e9b60180c575fef9fd5') + + ASSET_F31_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/31/Server/s390x/os' + '/images/kernel.img'), + '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b') + ASSET_F31_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/31/Server/s390x/os' + '/images/initrd.img'), + '04c46095b2c49020b1c2327158898b7db747e4892ae319726192fb949716aa9c') + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern(self, success_message, + failure_message='Kernel panic - not syncing', + vm=vm) + + def wait_for_crw_reports(self): + exec_command_and_wait_for_pattern(self, + 'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done', + 'CRW reports') + + dmesg_clear_count = 1 + def clear_guest_dmesg(self): + exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; ' + r'echo dm_clear\ ' + str(self.dmesg_clear_count), + r'dm_clear ' + str(self.dmesg_clear_count)) + self.dmesg_clear_count += 1 + + def test_s390x_devices(self): + self.set_machine('s390-ccw-virtio') + + kernel_path = self.ASSET_BUSTER_KERNEL.fetch() + initrd_path = self.ASSET_BUSTER_INITRD.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3') + self.vm.add_args('-nographic', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-cpu', 'max,prno-trng=off', + '-device', 'virtio-net-ccw,devno=fe.1.1111', + '-device', + 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1', + '-device', + 'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2', + '-device', 'zpci,uid=5,target=zzz', + '-device', 'virtio-net-pci,id=zzz', + '-device', 'zpci,uid=0xa,fid=12,target=serial', + '-device', 'virtio-serial-pci,id=serial', + '-device', 'virtio-balloon-ccw') + self.vm.launch() + + shell_ready = "sh: can't access tty; job control turned off" + self.wait_for_console_pattern(shell_ready) + # first debug shell is too early, we need to wait for device detection + exec_command_and_wait_for_pattern(self, 'exit', shell_ready) + + ccw_bus_ids="0.1.1111 0.2.0000 0.3.1234" + pci_bus_ids="0005:00:00.0 000a:00:00.0" + exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/', + ccw_bus_ids) + exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/', + pci_bus_ids) + # check that the device at 0.2.0000 is in legacy mode, while the + # device at 0.3.1234 has the virtio-1 feature bit set + virtio_rng_features="00000000000000000000000000001100" + \ + "10000000000000000000000000000000" + virtio_rng_features_legacy="00000000000000000000000000001100" + \ + "00000000000000000000000000000000" + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features', + virtio_rng_features_legacy) + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features', + virtio_rng_features) + # check that /dev/hwrng works - and that it's gone after ejecting + exec_command_and_wait_for_pattern(self, + 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', + '10+0 records out') + self.clear_guest_dmesg() + self.vm.cmd('device_del', id='rn1') + self.wait_for_crw_reports() + self.clear_guest_dmesg() + self.vm.cmd('device_del', id='rn2') + self.wait_for_crw_reports() + exec_command_and_wait_for_pattern(self, + 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', + 'dd: /dev/hwrng: No such device') + # verify that we indeed have virtio-net devices (without having the + # virtio-net driver handy) + exec_command_and_wait_for_pattern(self, + 'cat /sys/bus/ccw/devices/0.1.1111/cutype', + '3832/01') + exec_command_and_wait_for_pattern(self, + r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor', + r'0x1af4') + exec_command_and_wait_for_pattern(self, + r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device', + r'0x0001') + # check fid propagation + exec_command_and_wait_for_pattern(self, + r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id', + r'0x0000000c') + # add another device + self.clear_guest_dmesg() + self.vm.cmd('device_add', driver='virtio-net-ccw', + devno='fe.0.4711', id='net_4711') + self.wait_for_crw_reports() + exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do ' + 'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;' + 'sleep 1 ; done ; ls /sys/bus/ccw/devices/', + '0.0.4711') + # and detach it again + self.clear_guest_dmesg() + self.vm.cmd('device_del', id='net_4711') + self.vm.event_wait(name='DEVICE_DELETED', + match={'data': {'device': 'net_4711'}}) + self.wait_for_crw_reports() + exec_command_and_wait_for_pattern(self, + 'ls /sys/bus/ccw/devices/0.0.4711', + 'No such file or directory') + # test the virtio-balloon device + exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', + 'MemTotal: 115640 kB') + self.vm.cmd('human-monitor-command', command_line='balloon 96') + exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', + 'MemTotal: 82872 kB') + self.vm.cmd('human-monitor-command', command_line='balloon 128') + exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', + 'MemTotal: 115640 kB') + + + def test_s390x_fedora(self): + self.set_machine('s390-ccw-virtio') + + kernel_path = self.ASSET_F31_KERNEL.fetch() + + initrd_path = self.uncompress(self.ASSET_F31_INITRD, format="xz") + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 ' + 'rd.plymouth=0 plymouth.enable=0 rd.rescue') + self.vm.add_args('-nographic', + '-smp', '4', + '-m', '512', + '-name', 'Some Guest Name', + '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line, + '-device', 'zpci,uid=7,target=n', + '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12', + '-device', 'virtio-rng-ccw,devno=fe.1.9876', + '-device', 'virtio-gpu-ccw,devno=fe.2.5432') + self.vm.launch() + self.wait_for_console_pattern('Kernel command line: %s' + % kernel_command_line) + self.wait_for_console_pattern('Entering emergency mode') + + # Some tests to see whether the CLI options have been considered: + self.log.info("Test whether QEMU CLI options have been considered") + exec_command_and_wait_for_pattern(self, + 'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done', + 'virtio_net virtio0 enP7p0s0: renamed') + exec_command_and_wait_for_pattern(self, 'lspci', + '0007:00:00.0 Class 0200: Device 1af4:1000') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/net/enP7p0s0/address', + '02:ca:fe:fa:ce:12') + exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876') + exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432') + exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', + 'processors : 4') + exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo', + 'MemTotal: 499848 kB') + exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo', + 'Extended Name: Some Guest Name') + exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo', + '30de4fd9-b4d5-409e-86a5-09b387f70bfa') + + # Disable blinking cursor, then write some stuff into the framebuffer. + # QEMU's PPM screendumps contain uncompressed 24-bit values, while the + # framebuffer uses 32-bit, so we pad our text with some spaces when + # writing to the framebuffer. Since the PPM is uncompressed, we then + # can simply read the written "magic bytes" back from the PPM file to + # check whether the framebuffer is working as expected. + # Unfortunately, this test is flaky, so we don't run it by default + if os.getenv('QEMU_TEST_FLAKY_TESTS'): + self.log.info("Test screendump of virtio-gpu device") + exec_command_and_wait_for_pattern(self, + 'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done', + 'virtio_gpudrmfb frame buffer device') + exec_command_and_wait_for_pattern(self, + r'echo -e "\e[?25l" > /dev/tty0', ':/#') + exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do ' + 'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;' + 'done', + ':/#') + exec_command_and_wait_for_pattern(self, + 'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt', + '12+0 records out') + with tempfile.NamedTemporaryFile(suffix='.ppm', + prefix='qemu-scrdump-') as ppmfile: + self.vm.cmd('screendump', filename=ppmfile.name) + ppmfile.seek(0) + line = ppmfile.readline() + self.assertEqual(line, b"P6\n") + line = ppmfile.readline() + self.assertEqual(line, b"1280 800\n") + line = ppmfile.readline() + self.assertEqual(line, b"255\n") + line = ppmfile.readline(256) + self.assertEqual(line, b"The quick fox jumps over a lazy dog\n") + else: + self.log.info("Skipped flaky screendump of virtio-gpu device test") + + # Hot-plug a virtio-crypto device and see whether it gets accepted + self.log.info("Test hot-plug virtio-crypto device") + self.clear_guest_dmesg() + self.vm.cmd('object-add', qom_type='cryptodev-backend-builtin', + id='cbe0') + self.vm.cmd('device_add', driver='virtio-crypto-ccw', id='crypdev0', + cryptodev='cbe0', devno='fe.0.2342') + exec_command_and_wait_for_pattern(self, + 'while ! (dmesg -c | grep Accelerator.device) ; do' + ' sleep 1 ; done', 'Accelerator device is ready') + exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342') + self.vm.cmd('device_del', id='crypdev0') + self.vm.cmd('object-del', id='cbe0') + exec_command_and_wait_for_pattern(self, + 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do' + ' sleep 1 ; done', 'Start virtcrypto_remove.') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_s390x_pxelinux.py b/tests/functional/test_s390x_pxelinux.py new file mode 100755 index 00000000000..4fc33b8c46d --- /dev/null +++ b/tests/functional/test_s390x_pxelinux.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Functional test that checks the pxelinux.cfg network booting of a s390x VM +# (TFTP booting without config file is already tested by the pxe qtest, so +# we don't repeat that here). + +import os +import shutil + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern + + +pxelinux_cfg_contents='''# pxelinux.cfg style config file +default Debian +label Nonexisting +kernel kernel.notavailable +initrd initrd.notavailable +label Debian +kernel kernel.debian +initrd initrd.debian +append testoption=teststring +label Fedora +kernel kernel.fedora +''' + +class S390PxeLinux(QemuSystemTest): + + ASSET_DEBIAN_KERNEL = Asset( + ('https://snapshot.debian.org/archive/debian/' + '20201126T092837Z/dists/buster/main/installer-s390x/' + '20190702+deb10u6/images/generic/kernel.debian'), + 'd411d17c39ae7ad38d27534376cbe88b68b403c325739364122c2e6f1537e818') + + ASSET_DEBIAN_INITRD = Asset( + ('https://snapshot.debian.org/archive/debian/' + '20201126T092837Z/dists/buster/main/installer-s390x/' + '20190702+deb10u6/images/generic/initrd.debian'), + '836bbd0fe6a5ca81274c28c2b063ea315ce1868660866e9b60180c575fef9fd5') + + ASSET_FEDORA_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/31/Server/s390x/os' + '/images/kernel.img'), + '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b') + + def pxelinux_launch(self, pl_name='default', extra_opts=None): + self.require_netdev('user') + self.set_machine('s390-ccw-virtio') + + debian_kernel = self.ASSET_DEBIAN_KERNEL.fetch() + debian_initrd = self.ASSET_DEBIAN_INITRD.fetch() + fedora_kernel = self.ASSET_FEDORA_KERNEL.fetch() + + # Prepare a folder for the TFTP "server": + tftpdir = self.scratch_file('tftp') + shutil.rmtree(tftpdir, ignore_errors=True) # Remove stale stuff + os.mkdir(tftpdir) + shutil.copy(debian_kernel, os.path.join(tftpdir, 'kernel.debian')) + shutil.copy(debian_initrd, os.path.join(tftpdir, 'initrd.debian')) + shutil.copy(fedora_kernel, os.path.join(tftpdir, 'kernel.fedora')) + + pxelinuxdir = self.scratch_file('tftp', 'pxelinux.cfg') + os.mkdir(pxelinuxdir) + + cfg_fname = self.scratch_file('tftp', 'pxelinux.cfg', pl_name) + with open(cfg_fname, 'w', encoding='utf-8') as f: + f.write(pxelinux_cfg_contents) + + virtio_net_dev = 'virtio-net-ccw,netdev=n1,bootindex=1' + if extra_opts: + virtio_net_dev += ',' + extra_opts + + self.vm.add_args('-m', '384', + '-netdev', f'user,id=n1,tftp={tftpdir}', + '-device', virtio_net_dev) + self.vm.set_console() + self.vm.launch() + + + def test_default(self): + self.pxelinux_launch() + # The kernel prints its arguments to the console, so we can use + # this to check whether the kernel parameters are correctly handled: + wait_for_console_pattern(self, 'testoption=teststring') + # Now also check that we've successfully loaded the initrd: + wait_for_console_pattern(self, 'Unpacking initramfs...') + wait_for_console_pattern(self, 'Run /init as init process') + + def test_mac(self): + self.pxelinux_launch(pl_name='01-02-ca-fe-ba-be-42', + extra_opts='mac=02:ca:fe:ba:be:42,loadparm=3') + wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x') + + def test_uuid(self): + # Also add a non-bootable disk to check the fallback to network boot: + self.vm.add_args('-blockdev', 'null-co,size=65536,node-name=d1', + '-device', 'virtio-blk,drive=d1,bootindex=0,loadparm=1', + '-uuid', '550e8400-e29b-11d4-a716-446655441234') + self.pxelinux_launch(pl_name='550e8400-e29b-11d4-a716-446655441234') + wait_for_console_pattern(self, 'Debian 4.19.146-1 (2020-09-17)') + + def test_ip(self): + self.vm.add_args('-M', 'loadparm=3') + self.pxelinux_launch(pl_name='0A00020F') + wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x') + + def test_menu(self): + self.vm.add_args('-boot', 'menu=on,splash-time=10') + self.pxelinux_launch(pl_name='0A00') + wait_for_console_pattern(self, '[1] Nonexisting') + wait_for_console_pattern(self, '[2] Debian') + wait_for_console_pattern(self, '[3] Fedora') + wait_for_console_pattern(self, 'Debian 4.19.146-1 (2020-09-17)') + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_s390x_replay.py b/tests/functional/test_s390x_replay.py new file mode 100755 index 00000000000..33b5843adae --- /dev/null +++ b/tests/functional/test_s390x_replay.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an s390x machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class S390xReplay(ReplayKernelBase): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/' + 'releases/29/Everything/s390x/os/images/kernel.img'), + 'dace03b8ae0c9f670ebb9b8d6ce5eb24b62987f346de8f1300a439bb00bb99e7') + + def test_s390_ccw_virtio(self): + self.set_machine('s390-ccw-virtio') + kernel_path = self.ASSET_KERNEL.fetch() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' + console_pattern = 'Kernel command line: %s' % kernel_command_line + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9) + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_s390x_topology.py b/tests/functional/test_s390x_topology.py new file mode 100755 index 00000000000..1b5dc651353 --- /dev/null +++ b/tests/functional/test_s390x_topology.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# Copyright IBM Corp. 2023 +# +# Author: +# Pierre Morel +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class S390CPUTopology(QemuSystemTest): + """ + S390x CPU topology consists of 4 topology layers, from bottom to top, + the cores, sockets, books and drawers and 2 modifiers attributes, + the entitlement and the dedication. + See: docs/system/s390x/cpu-topology.rst. + + S390x CPU topology is setup in different ways: + - implicitly from the '-smp' argument by completing each topology + level one after the other beginning with drawer 0, book 0 and + socket 0. + - explicitly from the '-device' argument on the QEMU command line + - explicitly by hotplug of a new CPU using QMP or HMP + - it is modified by using QMP 'set-cpu-topology' + + The S390x modifier attribute entitlement depends on the machine + polarization, which can be horizontal or vertical. + The polarization is changed on a request from the guest. + """ + timeout = 90 + event_timeout = 10 + + KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 ' + 'root=/dev/ram ' + 'selinux=0 ' + 'rdinit=/bin/sh') + ASSET_F35_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/35/Server/s390x/os' + '/images/kernel.img'), + '1f2dddfd11bb1393dd2eb2e784036fbf6fc11057a6d7d27f9eb12d3edc67ef73') + + ASSET_F35_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive' + '/fedora-secondary/releases/35/Server/s390x/os' + '/images/initrd.img'), + '1100145fbca00240c8c372ae4b89b48c99844bc189b3dfbc3f481dc60055ca46') + + def wait_until_booted(self): + wait_for_console_pattern(self, 'no job control', + failure_message='Kernel panic - not syncing', + vm=None) + + def check_topology(self, c, s, b, d, e, t): + res = self.vm.qmp('query-cpus-fast') + cpus = res['return'] + for cpu in cpus: + core = cpu['props']['core-id'] + socket = cpu['props']['socket-id'] + book = cpu['props']['book-id'] + drawer = cpu['props']['drawer-id'] + entitlement = cpu.get('entitlement') + dedicated = cpu.get('dedicated') + if core == c: + self.assertEqual(drawer, d) + self.assertEqual(book, b) + self.assertEqual(socket, s) + self.assertEqual(entitlement, e) + self.assertEqual(dedicated, t) + + def kernel_init(self): + """ + We need a VM that supports CPU topology, + currently this only the case when using KVM, not TCG. + We need a kernel supporting the CPU topology. + We need a minimal root filesystem with a shell. + """ + self.require_accelerator("kvm") + kernel_path = self.ASSET_F35_KERNEL.fetch() + initrd_path = self.uncompress(self.ASSET_F35_INITRD, format="xz") + + self.vm.set_console() + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + self.vm.add_args('-nographic', + '-enable-kvm', + '-cpu', 'max,ctop=on', + '-m', '512', + '-kernel', kernel_path, + '-initrd', initrd_path, + '-append', kernel_command_line) + + def system_init(self): + self.log.info("System init") + exec_command_and_wait_for_pattern(self, + """ mount proc -t proc /proc; + mount sys -t sysfs /sys; + cat /sys/devices/system/cpu/dispatching """, + '0') + + def test_single(self): + """ + This test checks the simplest topology with a single CPU. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + self.check_topology(0, 0, 0, 0, 'medium', False) + + def test_default(self): + """ + This test checks the implicit topology. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.add_args('-smp', + '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') + self.vm.launch() + self.wait_until_booted() + self.check_topology(0, 0, 0, 0, 'medium', False) + self.check_topology(1, 0, 0, 0, 'medium', False) + self.check_topology(2, 1, 0, 0, 'medium', False) + self.check_topology(3, 1, 0, 0, 'medium', False) + self.check_topology(4, 2, 0, 0, 'medium', False) + self.check_topology(5, 2, 0, 0, 'medium', False) + self.check_topology(6, 0, 1, 0, 'medium', False) + self.check_topology(7, 0, 1, 0, 'medium', False) + self.check_topology(8, 1, 1, 0, 'medium', False) + self.check_topology(9, 1, 1, 0, 'medium', False) + self.check_topology(10, 2, 1, 0, 'medium', False) + self.check_topology(11, 2, 1, 0, 'medium', False) + self.check_topology(12, 0, 0, 1, 'medium', False) + + def test_move(self): + """ + This test checks the topology modification by moving a CPU + to another socket: CPU 0 is moved from socket 0 to socket 2. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.add_args('-smp', + '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') + self.vm.launch() + self.wait_until_booted() + + self.check_topology(0, 0, 0, 0, 'medium', False) + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'}) + self.assertEqual(res['return'], {}) + self.check_topology(0, 2, 0, 0, 'low', False) + + def test_dash_device(self): + """ + This test verifies that a CPU defined with the '-device' + command line option finds its right place inside the topology. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.add_args('-smp', + '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') + self.vm.add_args('-device', 'max-s390x-cpu,core-id=10') + self.vm.add_args('-device', + 'max-s390x-cpu,' + 'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low') + self.vm.add_args('-device', + 'max-s390x-cpu,' + 'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium') + self.vm.add_args('-device', + 'max-s390x-cpu,' + 'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high') + self.vm.add_args('-device', + 'max-s390x-cpu,' + 'core-id=4,socket-id=1,book-id=1,drawer-id=1') + self.vm.add_args('-device', + 'max-s390x-cpu,' + 'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true') + + self.vm.launch() + self.wait_until_booted() + + self.check_topology(10, 2, 1, 0, 'medium', False) + self.check_topology(1, 0, 1, 1, 'low', False) + self.check_topology(2, 0, 1, 1, 'medium', False) + self.check_topology(3, 1, 1, 1, 'high', False) + self.check_topology(4, 1, 1, 1, 'medium', False) + self.check_topology(5, 2, 1, 1, 'high', True) + + + def guest_set_dispatching(self, dispatching): + exec_command(self, + f'echo {dispatching} > /sys/devices/system/cpu/dispatching') + self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout) + exec_command_and_wait_for_pattern(self, + 'cat /sys/devices/system/cpu/dispatching', dispatching) + + + def test_polarization(self): + """ + This test verifies that QEMU modifies the entitlement change after + several guest polarization change requests. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + + self.system_init() + res = self.vm.qmp('query-s390x-cpu-polarization') + self.assertEqual(res['return']['polarization'], 'horizontal') + self.check_topology(0, 0, 0, 0, 'medium', False) + + self.guest_set_dispatching('1') + res = self.vm.qmp('query-s390x-cpu-polarization') + self.assertEqual(res['return']['polarization'], 'vertical') + self.check_topology(0, 0, 0, 0, 'medium', False) + + self.guest_set_dispatching('0') + res = self.vm.qmp('query-s390x-cpu-polarization') + self.assertEqual(res['return']['polarization'], 'horizontal') + self.check_topology(0, 0, 0, 0, 'medium', False) + + + def check_polarization(self, polarization): + #We need to wait for the change to have been propagated to the kernel + exec_command_and_wait_for_pattern(self, + "\n".join([ + "timeout 1 sh -c 'while true", + 'do', + ' syspath="/sys/devices/system/cpu/cpu0/polarization"', + ' polarization="$(cat "$syspath")" || exit', + f' if [ "$polarization" = "{polarization}" ]; then', + ' exit 0', + ' fi', + ' sleep 0.01', + #searched for strings mustn't show up in command, '' to obfuscate + "done' && echo succ''ess || echo fail''ure", + ]), + "success", "failure") + + + def test_entitlement(self): + """ + This test verifies that QEMU modifies the entitlement + after a guest request and that the guest sees the change. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + + self.system_init() + + self.check_polarization('horizontal') + self.check_topology(0, 0, 0, 0, 'medium', False) + + self.guest_set_dispatching('1') + self.check_polarization('vertical:medium') + self.check_topology(0, 0, 0, 0, 'medium', False) + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'low'}) + self.assertEqual(res['return'], {}) + self.check_polarization('vertical:low') + self.check_topology(0, 0, 0, 0, 'low', False) + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'medium'}) + self.assertEqual(res['return'], {}) + self.check_polarization('vertical:medium') + self.check_topology(0, 0, 0, 0, 'medium', False) + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'high'}) + self.assertEqual(res['return'], {}) + self.check_polarization('vertical:high') + self.check_topology(0, 0, 0, 0, 'high', False) + + self.guest_set_dispatching('0') + self.check_polarization("horizontal") + self.check_topology(0, 0, 0, 0, 'high', False) + + + def test_dedicated(self): + """ + This test verifies that QEMU adjusts the entitlement correctly when a + CPU is made dedicated. + QEMU retains the entitlement value when horizontal polarization is in effect. + For the guest, the field shows the effective value of the entitlement. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + + self.system_init() + + self.check_polarization("horizontal") + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'dedicated': True}) + self.assertEqual(res['return'], {}) + self.check_topology(0, 0, 0, 0, 'high', True) + self.check_polarization("horizontal") + + self.guest_set_dispatching('1') + self.check_topology(0, 0, 0, 0, 'high', True) + self.check_polarization("vertical:high") + + self.guest_set_dispatching('0') + self.check_topology(0, 0, 0, 0, 'high', True) + self.check_polarization("horizontal") + + + def test_socket_full(self): + """ + This test verifies that QEMU does not accept to overload a socket. + The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can + not accept any new CPU while socket-id 0 on book-id 1 is free. + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.add_args('-smp', + '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24') + self.vm.launch() + self.wait_until_booted() + + self.system_init() + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 2, 'socket-id': 0, 'book-id': 0}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 2, 'socket-id': 0, 'book-id': 1}) + self.assertEqual(res['return'], {}) + + def test_dedicated_error(self): + """ + This test verifies that QEMU refuses to lower the entitlement + of a dedicated CPU + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + + self.system_init() + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'dedicated': True}) + self.assertEqual(res['return'], {}) + + self.check_topology(0, 0, 0, 0, 'high', True) + + self.guest_set_dispatching('1') + + self.check_topology(0, 0, 0, 0, 'high', True) + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'low', 'dedicated': True}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'low'}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'medium', 'dedicated': True}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'medium'}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'low', 'dedicated': False}) + self.assertEqual(res['return'], {}) + + res = self.vm.qmp('set-cpu-topology', + {'core-id': 0, 'entitlement': 'medium', 'dedicated': False}) + self.assertEqual(res['return'], {}) + + def test_move_error(self): + """ + This test verifies that QEMU refuses to move a CPU to an + nonexistent location + """ + self.set_machine('s390-ccw-virtio') + self.kernel_init() + self.vm.launch() + self.wait_until_booted() + + self.system_init() + + res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1}) + self.assertEqual(res['error']['class'], 'GenericError') + + res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1}) + self.assertEqual(res['error']['class'], 'GenericError') + + self.check_topology(0, 0, 0, 0, 'medium', False) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_s390x_tuxrun.py b/tests/functional/test_s390x_tuxrun.py new file mode 100755 index 00000000000..8df3c6893b7 --- /dev/null +++ b/tests/functional/test_s390x_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunS390xTest(TuxRunBaselineTest): + + ASSET_S390X_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/s390/bzImage', + 'ee67e91db52a2aed104a7c72b2a08987c678f8179c029626789c35d6dd0fedf1') + ASSET_S390X_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/s390/rootfs.ext4.zst', + 'bff7971fc2fef56372d98afe4557b82fd0a785a241e44c29b058e577ad1bbb44') + + def test_s390(self): + self.set_machine('s390-ccw-virtio') + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_S390X_KERNEL, + rootfs_asset=self.ASSET_S390X_ROOTFS, + drive="virtio-blk-ccw", + haltmsg="Requesting system halt") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_sh4_r2d.py b/tests/functional/test_sh4_r2d.py new file mode 100755 index 00000000000..03a648374ce --- /dev/null +++ b/tests/functional/test_sh4_r2d.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# Boot a Linux kernel on a r2d sh4 machine and check the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset, skipFlakyTest + + +class R2dTest(LinuxKernelTest): + + ASSET_DAY09 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day09.tar.xz', + 'a61b44d2630a739d1380cc4ff4b80981d47ccfd5992f1484ccf48322c35f09ac') + + # This test has a 6-10% failure rate on various hosts that look + # like issues with a buggy kernel. + # XXX file tracking bug + @skipFlakyTest(bug_url=None) + def test_r2d(self): + self.set_machine('r2d') + self.archive_extract(self.ASSET_DAY09) + self.vm.add_args('-append', 'console=ttySC1') + self.launch_kernel(self.scratch_file('day09', 'zImage'), + console_index=1, + wait_for='QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_sh4_tuxrun.py b/tests/functional/test_sh4_tuxrun.py new file mode 100755 index 00000000000..1748f8c7eff --- /dev/null +++ b/tests/functional/test_sh4_tuxrun.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset, exec_command_and_wait_for_pattern +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunSh4Test(TuxRunBaselineTest): + + ASSET_SH4_KERNEL = Asset( + 'https://storage.tuxboot.com/20230331/sh4/zImage', + '29d9b2aba604a0f53a5dc3b5d0f2b8e35d497de1129f8ee5139eb6fdf0db692f') + ASSET_SH4_ROOTFS = Asset( + 'https://storage.tuxboot.com/20230331/sh4/rootfs.ext4.zst', + '3592a7a3d5a641e8b9821449e77bc43c9904a56c30d45da0694349cfd86743fd') + + def test_sh4(self): + self.set_machine('r2d') + self.cpu='sh7785' + self.root='sda' + self.console='ttySC1' + + # The test is currently too unstable to do much in userspace + # so we skip common_tuxrun and do a minimal boot and shutdown. + (kernel, disk, dtb) = self.fetch_tuxrun_assets(self.ASSET_SH4_KERNEL, + self.ASSET_SH4_ROOTFS) + + # the console comes on the second serial port + self.prepare_run(kernel, disk, + "driver=ide-hd,bus=ide.0,unit=0", + console_index=1) + self.vm.launch() + + self.wait_for_console_pattern("tuxtest login:") + exec_command_and_wait_for_pattern(self, 'root', 'root@tuxtest:~#') + exec_command_and_wait_for_pattern(self, 'halt', + "reboot: System halted") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_sh4eb_r2d.py b/tests/functional/test_sh4eb_r2d.py new file mode 100755 index 00000000000..473093bbe13 --- /dev/null +++ b/tests/functional/test_sh4eb_r2d.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Boot a Linux kernel on a r2d sh4eb machine and check the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset +from qemu_test import exec_command_and_wait_for_pattern + + +class R2dEBTest(LinuxKernelTest): + + ASSET_TGZ = Asset( + 'https://landley.net/bin/mkroot/0.8.11/sh4eb.tgz', + 'be8c6cb5aef8406899dc5aa5e22b6aa45840eb886cdd3ced51555c10577ada2c') + + def test_sh4eb_r2d(self): + self.set_machine('r2d') + self.archive_extract(self.ASSET_TGZ) + self.vm.add_args('-append', 'console=ttySC1 noiotrap') + self.launch_kernel(self.scratch_file('sh4eb', 'linux-kernel'), + initrd=self.scratch_file('sh4eb', + 'initramfs.cpio.gz'), + console_index=1, wait_for='Type exit when done') + exec_command_and_wait_for_pattern(self, 'exit', 'Restarting system') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_sparc64_sun4u.py b/tests/functional/test_sparc64_sun4u.py new file mode 100755 index 00000000000..27ac2896590 --- /dev/null +++ b/tests/functional/test_sparc64_sun4u.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel and checks the console +# +# Copyright (c) 2020 Red Hat, Inc. +# +# Author: +# Thomas Huth +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern + + +class Sun4uMachine(QemuSystemTest): + """Boots the Linux kernel and checks that the console is operational""" + + timeout = 90 + + ASSET_IMAGE = Asset( + ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/' + 'day23.tar.xz'), + 'a3ed92450704af244178351afd0e769776e7decb298e95a63abfd9a6e3f6c854') + + def test_sparc64_sun4u(self): + self.set_machine('sun4u') + kernel_file = self.archive_extract(self.ASSET_IMAGE, + member='day23/vmlinux') + self.vm.set_console() + self.vm.add_args('-kernel', kernel_file, + '-append', 'printk.time=0') + self.vm.launch() + wait_for_console_pattern(self, 'Starting logging: OK') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_sparc64_tuxrun.py b/tests/functional/test_sparc64_tuxrun.py new file mode 100755 index 00000000000..0d7b43dd74c --- /dev/null +++ b/tests/functional/test_sparc64_tuxrun.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunSparc64Test(TuxRunBaselineTest): + + ASSET_SPARC64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/sparc64/vmlinux', + 'a04cfb2e70a264051d161fdd93aabf4b2a9472f2e435c14ed18c5848c5fed261') + ASSET_SPARC64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/sparc64/rootfs.ext4.zst', + '479c3dc104c82b68be55e2c0c5c38cd473d0b37ad4badccde4775bb88ce34611') + + def test_sparc64(self): + self.set_machine('sun4u') + self.root='sda' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_SPARC64_KERNEL, + rootfs_asset=self.ASSET_SPARC64_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_sparc_replay.py b/tests/functional/test_sparc_replay.py new file mode 100755 index 00000000000..865d6486f99 --- /dev/null +++ b/tests/functional/test_sparc_replay.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on a sparc sun4m machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class SparcReplay(ReplayKernelBase): + + ASSET_DAY11 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day11.tar.xz', + 'c776533ba756bf4dd3f1fc4c024fb50ef0d853e05c5f5ddf0900a32d1eaa49e0') + + def test_replay(self): + self.set_machine('SS-10') + kernel_path = self.archive_extract(self.ASSET_DAY11, + member="day11/zImage.elf") + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_sparc_sun4m.py b/tests/functional/test_sparc_sun4m.py new file mode 100755 index 00000000000..7cd28ebdd1f --- /dev/null +++ b/tests/functional/test_sparc_sun4m.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a sparc sun4m machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class Sun4mTest(LinuxKernelTest): + + ASSET_DAY11 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day11.tar.xz', + 'c776533ba756bf4dd3f1fc4c024fb50ef0d853e05c5f5ddf0900a32d1eaa49e0') + + def test_sparc_ss20(self): + self.set_machine('SS-20') + self.archive_extract(self.ASSET_DAY11) + self.launch_kernel(self.scratch_file('day11', 'zImage.elf'), + wait_for='QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_version.py b/tests/functional/test_version.py new file mode 100755 index 00000000000..3ab3b67f7e3 --- /dev/null +++ b/tests/functional/test_version.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Version check example test +# +# Copyright (c) 2018 Red Hat, Inc. +# +# Author: +# Cleber Rosa +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + + +from qemu_test import QemuSystemTest + + +class Version(QemuSystemTest): + + def test_qmp_human_info_version(self): + self.set_machine('none') + self.vm.add_args('-nodefaults') + self.vm.launch() + res = self.vm.cmd('human-monitor-command', + command_line='info version') + self.assertRegex(res, r'^(\d+\.\d+\.\d)') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_virtio_balloon.py b/tests/functional/test_virtio_balloon.py new file mode 100755 index 00000000000..5877b6c408c --- /dev/null +++ b/tests/functional/test_virtio_balloon.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +# +# virtio-balloon tests +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import time + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + +UNSET_STATS_VALUE = 18446744073709551615 + + +class VirtioBalloonx86(QemuSystemTest): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/initrd.img'), + '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b') + + ASSET_DISKIMAGE = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2'), + 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0') + + DEFAULT_KERNEL_PARAMS = ('root=/dev/vda1 console=ttyS0 net.ifnames=0 ' + 'rd.rescue quiet') + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern( + self, + success_message, + failure_message="Kernel panic - not syncing", + vm=vm, + ) + + def mount_root(self): + self.wait_for_console_pattern('Entering emergency mode.') + prompt = '# ' + self.wait_for_console_pattern(prompt) + + # Synchronize on virtio-block driver creating the root device + exec_command_and_wait_for_pattern(self, + "while ! (dmesg -c | grep vda:) ; do sleep 1 ; done", + "vda1") + + exec_command_and_wait_for_pattern(self, 'mount /dev/vda1 /sysroot', + prompt) + exec_command_and_wait_for_pattern(self, 'chroot /sysroot', + prompt) + exec_command_and_wait_for_pattern(self, "modprobe virtio-balloon", + prompt) + + def assert_initial_stats(self): + ret = self.vm.qmp('qom-get', + {'path': '/machine/peripheral/balloon', + 'property': 'guest-stats'})['return'] + when = ret.get('last-update') + assert when == 0 + stats = ret.get('stats') + for name, val in stats.items(): + assert val == UNSET_STATS_VALUE + + def assert_running_stats(self, then): + # We told the QEMU to refresh stats every 100ms, but + # there can be a delay between virtio-ballon driver + # being modprobed and seeing the first stats refresh + # Retry a few times for robustness under heavy load + retries = 10 + when = 0 + while when == 0 and retries: + ret = self.vm.qmp('qom-get', + {'path': '/machine/peripheral/balloon', + 'property': 'guest-stats'})['return'] + when = ret.get('last-update') + if when == 0: + retries = retries - 1 + time.sleep(0.5) + + now = time.time() + + assert when > then and when < now + stats = ret.get('stats') + # Stat we expect this particular Kernel to have set + expectData = [ + "stat-available-memory", + "stat-disk-caches", + "stat-free-memory", + "stat-htlb-pgalloc", + "stat-htlb-pgfail", + "stat-major-faults", + "stat-minor-faults", + "stat-swap-in", + "stat-swap-out", + "stat-total-memory", + ] + for name, val in stats.items(): + if name in expectData: + assert val != UNSET_STATS_VALUE + else: + assert val == UNSET_STATS_VALUE + + def test_virtio_balloon_stats(self): + self.set_machine('q35') + self.require_accelerator("kvm") + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + diskimage_path = self.ASSET_DISKIMAGE.fetch() + + self.vm.set_console() + self.vm.add_args("-S") + self.vm.add_args("-cpu", "max") + self.vm.add_args("-m", "2G") + # Slow down BIOS phase with boot menu, so that after a system + # reset, we can reliably catch the clean stats again in BIOS + # phase before the guest OS launches + self.vm.add_args("-boot", "menu=on") + self.vm.add_args("-accel", "kvm") + self.vm.add_args("-device", "virtio-balloon,id=balloon") + self.vm.add_args('-drive', + f'file={diskimage_path},if=none,id=drv0,snapshot=on') + self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,' + + 'drive=drv0,id=virtio-disk0,bootindex=1') + + self.vm.add_args( + "-kernel", + kernel_path, + "-initrd", + initrd_path, + "-append", + self.DEFAULT_KERNEL_PARAMS + ) + self.vm.launch() + + # Poll stats at 100ms + self.vm.qmp('qom-set', + {'path': '/machine/peripheral/balloon', + 'property': 'guest-stats-polling-interval', + 'value': 100 }) + + # We've not run any guest code yet, neither BIOS or guest, + # so stats should be all default values + self.assert_initial_stats() + + self.vm.qmp('cont') + + then = time.time() + self.mount_root() + self.assert_running_stats(then) + + # Race window between these two commands, where we + # rely on '-boot menu=on' to (hopefully) ensure we're + # still executing the BIOS when QEMU processes the + # 'stop', and thus have not loaded the virtio-balloon + # driver in the guest + self.vm.qmp('system_reset') + self.vm.qmp('stop') + + # If the above assumption held, we're in BIOS now and + # stats should be all back at their default values + self.assert_initial_stats() + self.vm.qmp('cont') + + then = time.time() + self.mount_root() + self.assert_running_stats(then) + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_virtio_gpu.py b/tests/functional/test_virtio_gpu.py new file mode 100755 index 00000000000..be96de24da2 --- /dev/null +++ b/tests/functional/test_virtio_gpu.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +# +# virtio-gpu tests +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + + +from qemu_test import QemuSystemTest, Asset +from qemu_test import wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import is_readable_executable_file + + +import os +import socket +import subprocess + + +def pick_default_vug_bin(test): + bld_dir_path = test.build_file("contrib", "vhost-user-gpu", "vhost-user-gpu") + if is_readable_executable_file(bld_dir_path): + return bld_dir_path + + +class VirtioGPUx86(QemuSystemTest): + + KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash" + ASSET_KERNEL = Asset( + ("https://archives.fedoraproject.org/pub/archive/fedora" + "/linux/releases/33/Everything/x86_64/os/images" + "/pxeboot/vmlinuz"), + '2dc5fb5cfe9ac278fa45640f3602d9b7a08cc189ed63fd9b162b07073e4df397') + ASSET_INITRD = Asset( + ("https://archives.fedoraproject.org/pub/archive/fedora" + "/linux/releases/33/Everything/x86_64/os/images" + "/pxeboot/initrd.img"), + 'c49b97f893a5349e4883452178763e402bdc5caa8845b226a2d1329b5f356045') + + def wait_for_console_pattern(self, success_message, vm=None): + wait_for_console_pattern( + self, + success_message, + failure_message="Kernel panic - not syncing", + vm=vm, + ) + + def test_virtio_vga_virgl(self): + # FIXME: should check presence of virtio, virgl etc + self.require_accelerator('kvm') + + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + + self.vm.set_console() + self.vm.add_args("-cpu", "host") + self.vm.add_args("-m", "2G") + self.vm.add_args("-machine", "pc,accel=kvm") + self.vm.add_args("-device", "virtio-vga-gl") + self.vm.add_args("-display", "egl-headless") + self.vm.add_args( + "-kernel", + kernel_path, + "-initrd", + initrd_path, + "-append", + self.KERNEL_COMMAND_LINE, + ) + try: + self.vm.launch() + except: + # TODO: probably fails because we are missing the VirGL features + self.skipTest("VirGL not enabled?") + + self.wait_for_console_pattern("as init process") + exec_command_and_wait_for_pattern( + self, "/usr/sbin/modprobe virtio_gpu", "features: +virgl +edid" + ) + + def test_vhost_user_vga_virgl(self): + # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc + self.require_accelerator('kvm') + + vug = pick_default_vug_bin(self) + if not vug: + self.skipTest("Could not find vhost-user-gpu") + + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() + + # Create socketpair to connect proxy and remote processes + qemu_sock, vug_sock = socket.socketpair( + socket.AF_UNIX, socket.SOCK_STREAM + ) + os.set_inheritable(qemu_sock.fileno(), True) + os.set_inheritable(vug_sock.fileno(), True) + + self._vug_log_path = self.log_file("vhost-user-gpu.log") + self._vug_log_file = open(self._vug_log_path, "wb") + self.log.info('Complete vhost-user-gpu.log file can be ' + 'found at %s', self._vug_log_path) + + vugp = subprocess.Popen( + [vug, "--virgl", "--fd=%d" % vug_sock.fileno()], + stdin=subprocess.DEVNULL, + stdout=self._vug_log_file, + stderr=subprocess.STDOUT, + shell=False, + close_fds=False, + ) + self._vug_log_file.close() + + self.vm.set_console() + self.vm.add_args("-cpu", "host") + self.vm.add_args("-m", "2G") + self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G") + self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm") + self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno()) + self.vm.add_args("-device", "vhost-user-vga,chardev=vug") + self.vm.add_args("-display", "egl-headless") + self.vm.add_args( + "-kernel", + kernel_path, + "-initrd", + initrd_path, + "-append", + self.KERNEL_COMMAND_LINE, + ) + try: + self.vm.launch() + except: + # TODO: probably fails because we are missing the VirGL features + self.skipTest("VirGL not enabled?") + self.wait_for_console_pattern("as init process") + exec_command_and_wait_for_pattern(self, "/usr/sbin/modprobe virtio_gpu", + "features: +virgl +edid") + self.vm.shutdown() + qemu_sock.close() + vug_sock.close() + vugp.terminate() + vugp.wait() + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_virtio_version.py b/tests/functional/test_virtio_version.py new file mode 100755 index 00000000000..a5ea73237f5 --- /dev/null +++ b/tests/functional/test_virtio_version.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 +""" +Check compatibility of virtio device types +""" +# Copyright (c) 2018 Red Hat, Inc. +# +# Author: +# Eduardo Habkost +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu.machine import QEMUMachine +from qemu_test import QemuSystemTest + +# Virtio Device IDs: +VIRTIO_NET = 1 +VIRTIO_BLOCK = 2 +VIRTIO_CONSOLE = 3 +VIRTIO_RNG = 4 +VIRTIO_BALLOON = 5 +VIRTIO_RPMSG = 7 +VIRTIO_SCSI = 8 +VIRTIO_9P = 9 +VIRTIO_RPROC_SERIAL = 11 +VIRTIO_CAIF = 12 +VIRTIO_GPU = 16 +VIRTIO_INPUT = 18 +VIRTIO_VSOCK = 19 +VIRTIO_CRYPTO = 20 + +PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4 + +# Device IDs for legacy/transitional devices: +PCI_LEGACY_DEVICE_IDS = { + VIRTIO_NET: 0x1000, + VIRTIO_BLOCK: 0x1001, + VIRTIO_BALLOON: 0x1002, + VIRTIO_CONSOLE: 0x1003, + VIRTIO_SCSI: 0x1004, + VIRTIO_RNG: 0x1005, + VIRTIO_9P: 0x1009, + VIRTIO_VSOCK: 0x1012, +} + +def pci_modern_device_id(virtio_devid): + return virtio_devid + 0x1040 + +def devtype_implements(vm, devtype, implements): + return devtype in [d['name'] for d in + vm.cmd('qom-list-types', implements=implements)] + +def get_pci_interfaces(vm, devtype): + interfaces = ('pci-express-device', 'conventional-pci-device') + return [i for i in interfaces if devtype_implements(vm, devtype, i)] + +class VirtioVersionCheck(QemuSystemTest): + """ + Check if virtio-version-specific device types result in the + same device tree created by `disable-modern` and + `disable-legacy`. + """ + + # just in case there are failures, show larger diff: + maxDiff = 4096 + + def run_device(self, devtype, opts=None, machine='pc'): + """ + Run QEMU with `-device DEVTYPE`, return device info from `query-pci` + """ + with QEMUMachine(self.qemu_bin) as vm: + vm.set_machine(machine) + if opts: + devtype += ',' + opts + vm.add_args('-device', '%s,id=devfortest' % (devtype)) + vm.add_args('-S') + vm.launch() + + pcibuses = vm.cmd('query-pci') + alldevs = [dev for bus in pcibuses for dev in bus['devices']] + devfortest = [dev for dev in alldevs + if dev['qdev_id'] == 'devfortest'] + return devfortest[0], get_pci_interfaces(vm, devtype) + + + def assert_devids(self, dev, devid, non_transitional=False): + self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET) + self.assertEqual(dev['id']['device'], devid) + if non_transitional: + self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f) + self.assertGreaterEqual(dev['id']['subsystem'], 0x40) + + def check_all_variants(self, qemu_devtype, virtio_devid): + """Check if a virtio device type and its variants behave as expected""" + # Force modern mode: + dev_modern, _ = self.run_device(qemu_devtype, + 'disable-modern=off,disable-legacy=on') + self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), + non_transitional=True) + + # -non-transitional device types should be 100% equivalent to + # ,disable-modern=off,disable-legacy=on + dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype)) + self.assertEqual(dev_modern, dev_1_0) + + # Force transitional mode: + dev_trans, _ = self.run_device(qemu_devtype, + 'disable-modern=off,disable-legacy=off') + self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid]) + + # Force legacy mode: + dev_legacy, _ = self.run_device(qemu_devtype, + 'disable-modern=on,disable-legacy=off') + self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid]) + + # No options: default to transitional on PC machine-type: + no_opts_pc, generic_ifaces = self.run_device(qemu_devtype) + self.assertEqual(dev_trans, no_opts_pc) + + #TODO: check if plugging on a PCI Express bus will make the + # device non-transitional + #no_opts_q35 = self.run_device(qemu_devtype, machine='q35') + #self.assertEqual(dev_modern, no_opts_q35) + + # -transitional device types should be 100% equivalent to + # ,disable-modern=off,disable-legacy=off + dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype)) + self.assertEqual(dev_trans, dev_trans) + + # ensure the interface information is correct: + self.assertIn('conventional-pci-device', generic_ifaces) + self.assertIn('pci-express-device', generic_ifaces) + + self.assertIn('conventional-pci-device', nt_ifaces) + self.assertIn('pci-express-device', nt_ifaces) + + self.assertIn('conventional-pci-device', trans_ifaces) + self.assertNotIn('pci-express-device', trans_ifaces) + + + def test_conventional_devs(self): + self.set_machine('pc') + self.check_all_variants('virtio-net-pci', VIRTIO_NET) + # virtio-blk requires 'driver' parameter + #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK) + self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE) + self.check_all_variants('virtio-rng-pci', VIRTIO_RNG) + self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON) + self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI) + # virtio-9p requires 'fsdev' parameter + #self.check_all_variants('virtio-9p-pci', VIRTIO_9P) + + def check_modern_only(self, qemu_devtype, virtio_devid): + """Check if a modern-only virtio device type behaves as expected""" + # Force modern mode: + dev_modern, _ = self.run_device(qemu_devtype, + 'disable-modern=off,disable-legacy=on') + self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), + non_transitional=True) + + # No options: should be modern anyway + dev_no_opts, ifaces = self.run_device(qemu_devtype) + self.assertEqual(dev_modern, dev_no_opts) + + self.assertIn('conventional-pci-device', ifaces) + self.assertIn('pci-express-device', ifaces) + + def test_modern_only_devs(self): + self.set_machine('pc') + self.check_modern_only('virtio-vga', VIRTIO_GPU) + self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU) + self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT) + self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT) + self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_vnc.py b/tests/functional/test_vnc.py new file mode 100755 index 00000000000..f1dd1597cf1 --- /dev/null +++ b/tests/functional/test_vnc.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +# +# Simple functional tests for VNC functionality +# +# Copyright (c) 2018 Red Hat, Inc. +# +# Author: +# Cleber Rosa +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +import socket + +from qemu.machine.machine import VMLaunchFailure +from qemu_test import QemuSystemTest +from qemu_test.ports import Ports + + +VNC_ADDR = '127.0.0.1' + +def check_connect(port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + try: + sock.connect((VNC_ADDR, port)) + except ConnectionRefusedError: + return False + + return True + +class Vnc(QemuSystemTest): + + def test_no_vnc_change_password(self): + self.set_machine('none') + self.vm.add_args('-nodefaults', '-S') + self.vm.launch() + + query_vnc_response = self.vm.qmp('query-vnc') + if 'error' in query_vnc_response: + self.assertEqual(query_vnc_response['error']['class'], + 'CommandNotFound') + self.skipTest('VNC support not available') + self.assertFalse(query_vnc_response['return']['enabled']) + + set_password_response = self.vm.qmp('change-vnc-password', + password='new_password') + self.assertIn('error', set_password_response) + self.assertEqual(set_password_response['error']['class'], + 'GenericError') + self.assertEqual(set_password_response['error']['desc'], + 'Could not set password') + + def launch_guarded(self): + try: + self.vm.launch() + except VMLaunchFailure as excp: + if "-vnc: invalid option" in excp.output: + self.skipTest("VNC support not available") + elif "Cipher backend does not support DES algorithm" in excp.output: + self.skipTest("No cryptographic backend available") + else: + self.log.info("unhandled launch failure: %s", excp.output) + raise excp + + def test_change_password_requires_a_password(self): + self.set_machine('none') + self.vm.add_args('-nodefaults', '-S', '-vnc', ':1,to=999') + self.launch_guarded() + self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) + set_password_response = self.vm.qmp('change-vnc-password', + password='new_password') + self.assertIn('error', set_password_response) + self.assertEqual(set_password_response['error']['class'], + 'GenericError') + self.assertEqual(set_password_response['error']['desc'], + 'Could not set password') + + def test_change_password(self): + self.set_machine('none') + self.vm.add_args('-nodefaults', '-S', '-vnc', ':1,to=999,password=on') + self.launch_guarded() + self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) + self.vm.cmd('change-vnc-password', + password='new_password') + + def do_test_change_listen(self, a, b, c): + self.assertFalse(check_connect(a)) + self.assertFalse(check_connect(b)) + self.assertFalse(check_connect(c)) + + self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}') + self.launch_guarded() + self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a)) + self.assertTrue(check_connect(a)) + self.assertFalse(check_connect(b)) + self.assertFalse(check_connect(c)) + + self.vm.cmd('display-update', type='vnc', + addresses=[{'type': 'inet', 'host': VNC_ADDR, + 'port': str(b)}, + {'type': 'inet', 'host': VNC_ADDR, + 'port': str(c)}]) + self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b)) + self.assertFalse(check_connect(a)) + self.assertTrue(check_connect(b)) + self.assertTrue(check_connect(c)) + + def test_change_listen(self): + self.set_machine('none') + with Ports() as ports: + a, b, c = ports.find_free_ports(3) + self.do_test_change_listen(a, b, c) + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_x86_64_hotplug_blk.py b/tests/functional/test_x86_64_hotplug_blk.py new file mode 100755 index 00000000000..7ddbfefc210 --- /dev/null +++ b/tests/functional/test_x86_64_hotplug_blk.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# +# Functional test that hotplugs a virtio blk disk and checks it on a Linux +# guest +# +# Copyright (c) 2021 Red Hat, Inc. +# Copyright (c) Yandex +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern + + +class HotPlugBlk(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/initrd.img'), + '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b') + + def blockdev_add(self) -> None: + self.vm.cmd('blockdev-add', **{ + 'driver': 'null-co', + 'size': 1073741824, + 'node-name': 'disk' + }) + + def assert_vda(self) -> None: + exec_command_and_wait_for_pattern(self, 'while ! test -e /sys/block/vda ;' + ' do sleep 0.2 ; done', '# ') + + def assert_no_vda(self) -> None: + exec_command_and_wait_for_pattern(self, 'while test -e /sys/block/vda ;' + ' do sleep 0.2 ; done', '# ') + + def plug(self) -> None: + args = { + 'driver': 'virtio-blk-pci', + 'drive': 'disk', + 'id': 'virtio-disk0', + 'bus': 'pci.1', + 'addr': '1', + } + + self.assert_no_vda() + self.vm.cmd('device_add', args) + self.wait_for_console_pattern('virtio_blk virtio0: [vda]') + self.assert_vda() + + def unplug(self) -> None: + self.vm.cmd('device_del', id='virtio-disk0') + + self.vm.event_wait('DEVICE_DELETED', 1.0, + match={'data': {'device': 'virtio-disk0'}}) + + self.assert_no_vda() + + def test(self) -> None: + self.require_accelerator('kvm') + self.set_machine('q35') + + self.vm.add_args('-accel', 'kvm') + self.vm.add_args('-device', 'pcie-pci-bridge,id=pci.1,bus=pcie.0') + self.vm.add_args('-m', '1G') + self.vm.add_args('-append', 'console=ttyS0 rd.rescue') + + self.launch_kernel(self.ASSET_KERNEL.fetch(), + self.ASSET_INITRD.fetch(), + wait_for='Entering emergency mode.') + self.wait_for_console_pattern('# ') + + self.blockdev_add() + + self.plug() + self.unplug() + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_x86_64_hotplug_cpu.py b/tests/functional/test_x86_64_hotplug_cpu.py new file mode 100755 index 00000000000..7b9200ac2e8 --- /dev/null +++ b/tests/functional/test_x86_64_hotplug_cpu.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# Functional test that hotplugs a CPU and checks it on a Linux guest +# +# Copyright (c) 2021 Red Hat, Inc. +# +# Author: +# Cleber Rosa +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import LinuxKernelTest, Asset, exec_command_and_wait_for_pattern + + +class HotPlugCPU(LinuxKernelTest): + + ASSET_KERNEL = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux/releases' + '/31/Server/x86_64/os/images/pxeboot/initrd.img'), + '277cd6c7adf77c7e63d73bbb2cded8ef9e2d3a2f100000e92ff1f8396513cd8b') + + def test_hotplug(self): + + self.require_accelerator('kvm') + self.vm.add_args('-accel', 'kvm') + self.vm.add_args('-cpu', 'Haswell') + self.vm.add_args('-smp', '1,sockets=1,cores=2,threads=1,maxcpus=2') + self.vm.add_args('-m', '1G') + self.vm.add_args('-append', 'console=ttyS0 rd.rescue') + + self.launch_kernel(self.ASSET_KERNEL.fetch(), + self.ASSET_INITRD.fetch(), + wait_for='Entering emergency mode.') + prompt = '# ' + self.wait_for_console_pattern(prompt) + + exec_command_and_wait_for_pattern(self, + 'cd /sys/devices/system/cpu/cpu0', + 'cpu0#') + exec_command_and_wait_for_pattern(self, + 'cd /sys/devices/system/cpu/cpu1', + 'No such file or directory') + + self.vm.cmd('device_add', + driver='Haswell-x86_64-cpu', + id='c1', + socket_id=0, + core_id=1, + thread_id=0) + self.wait_for_console_pattern('CPU1 has been hot-added') + + exec_command_and_wait_for_pattern(self, + 'cd /sys/devices/system/cpu/cpu1', + 'cpu1#') + + exec_command_and_wait_for_pattern(self, 'cd ..', prompt) + self.vm.cmd('device_del', id='c1') + + exec_command_and_wait_for_pattern(self, + 'while cd /sys/devices/system/cpu/cpu1 ;' + ' do sleep 0.2 ; done', + 'No such file or directory') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_x86_64_kvm_xen.py b/tests/functional/test_x86_64_kvm_xen.py new file mode 100755 index 00000000000..a5d445023c9 --- /dev/null +++ b/tests/functional/test_x86_64_kvm_xen.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +# +# KVM Xen guest functional tests +# +# Copyright © 2021 Red Hat, Inc. +# Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Author: +# David Woodhouse +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu.machine import machine + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + +class KVMXenGuest(QemuSystemTest): + + KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0 quiet' + + kernel_path = None + kernel_params = None + + # Fetch assets from the kvm-xen-guest subdir of my shared test + # images directory on fileserver.linaro.org where you can find + # build instructions for how they where assembled. + ASSET_KERNEL = Asset( + ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/download?' + 'path=%2Fkvm-xen-guest&files=bzImage'), + 'ec0ad7bb8c33c5982baee0a75505fe7dbf29d3ff5d44258204d6307c6fe0132a') + + ASSET_ROOTFS = Asset( + ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/download?' + 'path=%2Fkvm-xen-guest&files=rootfs.ext4'), + 'b11045d649006c649c184e93339aaa41a8fe20a1a86620af70323252eb29e40b') + + def common_vm_setup(self): + # We also catch lack of KVM_XEN support if we fail to launch + self.require_accelerator("kvm") + self.require_netdev('user') + + self.vm.set_console() + + self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split") + self.vm.add_args("-smp", "2") + + self.kernel_path = self.ASSET_KERNEL.fetch() + self.rootfs = self.ASSET_ROOTFS.fetch() + + def run_and_check(self): + self.vm.add_args('-kernel', self.kernel_path, + '-append', self.kernel_params, + '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0", + '-device', 'xen-disk,drive=drv0,vdev=xvda', + '-device', 'virtio-net-pci,netdev=unet', + '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') + + try: + self.vm.launch() + except machine.VMLaunchFailure as e: + if "Xen HVM guest support not present" in e.output: + self.skipTest("KVM Xen support is not present " + "(need v5.12+ kernel with CONFIG_KVM_XEN)") + elif "Property 'kvm-accel.xen-version' not found" in e.output: + self.skipTest("QEMU not built with CONFIG_XEN_EMU support") + else: + raise e + + self.log.info('VM launched, waiting for sshd') + console_pattern = 'Starting dropbear sshd: OK' + wait_for_console_pattern(self, console_pattern, 'Oops') + self.log.info('sshd ready') + + exec_command_and_wait_for_pattern(self, 'cat /proc/cmdline', 'xen') + exec_command_and_wait_for_pattern(self, 'dmesg | grep "Grant table"', + 'Grant table initialized') + wait_for_console_pattern(self, '#', 'Oops') + + def test_kvm_xen_guest(self): + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-pirq.*msi /proc/interrupts', + 'virtio0-output') + + def test_kvm_xen_guest_nomsi(self): + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-pirq.* /proc/interrupts', + 'virtio0') + + def test_kvm_xen_guest_noapic_nomsi(self): + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks noapic pci=nomsi') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-pirq /proc/interrupts', + 'virtio0') + + def test_kvm_xen_guest_vapic(self): + self.common_vm_setup() + self.vm.add_args('-cpu', 'host,+xen-vapic') + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-pirq /proc/interrupts', + 'acpi') + wait_for_console_pattern(self, '#') + exec_command_and_wait_for_pattern(self, + 'grep PCI-MSI /proc/interrupts', + 'virtio0-output') + + def test_kvm_xen_guest_novector(self): + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-platform-pci /proc/interrupts', + 'fasteoi') + + def test_kvm_xen_guest_novector_nomsi(self): + self.common_vm_setup() + + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks pci=nomsi' + + ' xen_no_vector_callback') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-platform-pci /proc/interrupts', + 'IO-APIC') + + def test_kvm_xen_guest_novector_noapic(self): + self.common_vm_setup() + self.kernel_params = (self.KERNEL_DEFAULT + + ' xen_emul_unplug=ide-disks' + + ' xen_no_vector_callback noapic') + self.run_and_check() + exec_command_and_wait_for_pattern(self, + 'grep xen-platform-pci /proc/interrupts', + 'XT-PIC') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_x86_64_replay.py b/tests/functional/test_x86_64_replay.py new file mode 100755 index 00000000000..27287d452dc --- /dev/null +++ b/tests/functional/test_x86_64_replay.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on x86_64 machines +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from subprocess import check_call, DEVNULL + +from qemu_test import Asset, skipFlakyTest, get_qemu_img +from replay_kernel import ReplayKernelBase + + +class X86Replay(ReplayKernelBase): + + ASSET_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/x86_64/bzImage', + 'f57bfc6553bcd6e0a54aab86095bf642b33b5571d14e3af1731b18c87ed5aef8') + + ASSET_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/x86_64/rootfs.ext4.zst', + '4b8b2a99117519c5290e1202cb36eb6c7aaba92b357b5160f5970cf5fb78a751') + + def do_test_x86(self, machine, blkdevice, devroot): + self.require_netdev('user') + self.set_machine(machine) + self.cpu="Nehalem" + kernel_path = self.ASSET_KERNEL.fetch() + + raw_disk = self.uncompress(self.ASSET_ROOTFS) + disk = self.scratch_file('scratch.qcow2') + qemu_img = get_qemu_img(self) + check_call([qemu_img, 'create', '-f', 'qcow2', '-b', raw_disk, + '-F', 'raw', disk], stdout=DEVNULL, stderr=DEVNULL) + + args = ('-drive', 'file=%s,snapshot=on,id=hd0,if=none' % disk, + '-drive', 'driver=blkreplay,id=hd0-rr,if=none,image=hd0', + '-device', '%s,drive=hd0-rr' % blkdevice, + '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', + '-device', 'virtio-net,netdev=vnet', + '-object', 'filter-replay,id=replay,netdev=vnet') + + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + f"console=ttyS0 root=/dev/{devroot}") + console_pattern = 'Welcome to TuxTest' + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, + args=args) + + @skipFlakyTest('https://gitlab.com/qemu-project/qemu/-/issues/2094') + def test_pc(self): + self.do_test_x86('pc', 'virtio-blk', 'vda') + + def test_q35(self): + self.do_test_x86('q35', 'ide-hd', 'sda') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/functional/test_x86_64_reverse_debug.py b/tests/functional/test_x86_64_reverse_debug.py new file mode 100755 index 00000000000..d713e91e144 --- /dev/null +++ b/tests/functional/test_x86_64_reverse_debug.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Reverse debugging test +# +# Copyright (c) 2020 ISP RAS +# +# Author: +# Pavel Dovgalyuk +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from qemu_test import skipIfMissingImports, skipFlakyTest +from reverse_debugging import ReverseDebugging + + +@skipIfMissingImports('avocado.utils') +class ReverseDebugging_X86_64(ReverseDebugging): + + REG_PC = 0x10 + REG_CS = 0x12 + def get_pc(self, g): + return self.get_reg_le(g, self.REG_PC) \ + + self.get_reg_le(g, self.REG_CS) * 0x10 + + @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2922") + def test_x86_64_pc(self): + self.set_machine('pc') + # start with BIOS only + self.reverse_debugging() + + +if __name__ == '__main__': + ReverseDebugging.main() diff --git a/tests/functional/test_x86_64_tuxrun.py b/tests/functional/test_x86_64_tuxrun.py new file mode 100755 index 00000000000..fcbc62b1b0f --- /dev/null +++ b/tests/functional/test_x86_64_tuxrun.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# +# Functional test that boots known good tuxboot images the same way +# that tuxrun (www.tuxrun.org) does. This tool is used by things like +# the LKFT project to run regression tests on kernels. +# +# Copyright (c) 2023 Linaro Ltd. +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test.tuxruntest import TuxRunBaselineTest + +class TuxRunX86Test(TuxRunBaselineTest): + + ASSET_X86_64_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/x86_64/bzImage', + 'f57bfc6553bcd6e0a54aab86095bf642b33b5571d14e3af1731b18c87ed5aef8') + ASSET_X86_64_ROOTFS = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/x86_64/rootfs.ext4.zst', + '4b8b2a99117519c5290e1202cb36eb6c7aaba92b357b5160f5970cf5fb78a751') + + def test_x86_64(self): + self.set_machine('q35') + self.cpu="Nehalem" + self.root='sda' + self.wait_for_shutdown=False + self.common_tuxrun(kernel_asset=self.ASSET_X86_64_KERNEL, + rootfs_asset=self.ASSET_X86_64_ROOTFS, + drive="driver=ide-hd,bus=ide.0,unit=0") + +if __name__ == '__main__': + TuxRunBaselineTest.main() diff --git a/tests/functional/test_x86_cpu_model_versions.py b/tests/functional/test_x86_cpu_model_versions.py new file mode 100755 index 00000000000..36c968f1c0b --- /dev/null +++ b/tests/functional/test_x86_cpu_model_versions.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# +# Basic validation of x86 versioned CPU models and CPU model aliases +# +# Copyright (c) 2019 Red Hat Inc +# +# Author: +# Eduardo Habkost +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see . +# + +import re + +from qemu_test import QemuSystemTest + +class X86CPUModelAliases(QemuSystemTest): + """ + Validation of PC CPU model versions and CPU model aliases + """ + def validate_aliases(self, cpus): + for c in cpus.values(): + if 'alias-of' in c: + # all aliases must point to a valid CPU model name: + self.assertIn(c['alias-of'], cpus, + '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of'])) + # aliases must not point to aliases + self.assertNotIn('alias-of', cpus[c['alias-of']], + '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of'])) + + # aliases must not be static + self.assertFalse(c['static']) + + def validate_variant_aliases(self, cpus): + # -noTSX, -IBRS and -IBPB variants of CPU models are special: + # they shouldn't have their own versions: + self.assertNotIn("Haswell-noTSX-v1", cpus, + "Haswell-noTSX shouldn't be versioned") + self.assertNotIn("Broadwell-noTSX-v1", cpus, + "Broadwell-noTSX shouldn't be versioned") + self.assertNotIn("Nehalem-IBRS-v1", cpus, + "Nehalem-IBRS shouldn't be versioned") + self.assertNotIn("Westmere-IBRS-v1", cpus, + "Westmere-IBRS shouldn't be versioned") + self.assertNotIn("SandyBridge-IBRS-v1", cpus, + "SandyBridge-IBRS shouldn't be versioned") + self.assertNotIn("IvyBridge-IBRS-v1", cpus, + "IvyBridge-IBRS shouldn't be versioned") + self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus, + "Haswell-noTSX-IBRS shouldn't be versioned") + self.assertNotIn("Haswell-IBRS-v1", cpus, + "Haswell-IBRS shouldn't be versioned") + self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus, + "Broadwell-noTSX-IBRS shouldn't be versioned") + self.assertNotIn("Broadwell-IBRS-v1", cpus, + "Broadwell-IBRS shouldn't be versioned") + self.assertNotIn("Skylake-Client-IBRS-v1", cpus, + "Skylake-Client-IBRS shouldn't be versioned") + self.assertNotIn("Skylake-Server-IBRS-v1", cpus, + "Skylake-Server-IBRS shouldn't be versioned") + self.assertNotIn("EPYC-IBPB-v1", cpus, + "EPYC-IBPB shouldn't be versioned") + + def test_unversioned_alias(self): + """ + Check if unversioned CPU model is an alias pointing to right version + """ + self.set_machine('pc') + self.vm.add_args('-S') + self.vm.launch() + + cpus = dict((m['name'], m) for m in + self.vm.cmd('query-cpu-definitions')) + + self.assertFalse(cpus['Cascadelake-Server']['static'], + 'unversioned Cascadelake-Server CPU model must not be static') + self.assertEqual(cpus['Cascadelake-Server'].get('alias-of'), + 'Cascadelake-Server-v1', + 'Cascadelake-Server must be an alias of Cascadelake-Server-v1') + self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], + 'Cascadelake-Server-v1 must not be an alias') + + self.assertFalse(cpus['qemu64']['static'], + 'unversioned qemu64 CPU model must not be static') + self.assertEqual(cpus['qemu64'].get('alias-of'), 'qemu64-v1', + 'qemu64 must be an alias of qemu64-v1') + self.assertNotIn('alias-of', cpus['qemu64-v1'], + 'qemu64-v1 must not be an alias') + + self.validate_variant_aliases(cpus) + + # On recent PC machines, -noTSX and -IBRS models should be aliases: + self.assertEqual(cpus["Haswell"].get('alias-of'), + "Haswell-v1", + "Haswell must be an alias") + self.assertEqual(cpus["Haswell-noTSX"].get('alias-of'), + "Haswell-v2", + "Haswell-noTSX must be an alias") + self.assertEqual(cpus["Haswell-IBRS"].get('alias-of'), + "Haswell-v3", + "Haswell-IBRS must be an alias") + self.assertEqual(cpus["Haswell-noTSX-IBRS"].get('alias-of'), + "Haswell-v4", + "Haswell-noTSX-IBRS must be an alias") + + self.assertEqual(cpus["Broadwell"].get('alias-of'), + "Broadwell-v1", + "Broadwell must be an alias") + self.assertEqual(cpus["Broadwell-noTSX"].get('alias-of'), + "Broadwell-v2", + "Broadwell-noTSX must be an alias") + self.assertEqual(cpus["Broadwell-IBRS"].get('alias-of'), + "Broadwell-v3", + "Broadwell-IBRS must be an alias") + self.assertEqual(cpus["Broadwell-noTSX-IBRS"].get('alias-of'), + "Broadwell-v4", + "Broadwell-noTSX-IBRS must be an alias") + + self.assertEqual(cpus["Nehalem"].get('alias-of'), + "Nehalem-v1", + "Nehalem must be an alias") + self.assertEqual(cpus["Nehalem-IBRS"].get('alias-of'), + "Nehalem-v2", + "Nehalem-IBRS must be an alias") + + self.assertEqual(cpus["Westmere"].get('alias-of'), + "Westmere-v1", + "Westmere must be an alias") + self.assertEqual(cpus["Westmere-IBRS"].get('alias-of'), + "Westmere-v2", + "Westmere-IBRS must be an alias") + + self.assertEqual(cpus["SandyBridge"].get('alias-of'), + "SandyBridge-v1", + "SandyBridge must be an alias") + self.assertEqual(cpus["SandyBridge-IBRS"].get('alias-of'), + "SandyBridge-v2", + "SandyBridge-IBRS must be an alias") + + self.assertEqual(cpus["IvyBridge"].get('alias-of'), + "IvyBridge-v1", + "IvyBridge must be an alias") + self.assertEqual(cpus["IvyBridge-IBRS"].get('alias-of'), + "IvyBridge-v2", + "IvyBridge-IBRS must be an alias") + + self.assertEqual(cpus["Skylake-Client"].get('alias-of'), + "Skylake-Client-v1", + "Skylake-Client must be an alias") + self.assertEqual(cpus["Skylake-Client-IBRS"].get('alias-of'), + "Skylake-Client-v2", + "Skylake-Client-IBRS must be an alias") + + self.assertEqual(cpus["Skylake-Server"].get('alias-of'), + "Skylake-Server-v1", + "Skylake-Server must be an alias") + self.assertEqual(cpus["Skylake-Server-IBRS"].get('alias-of'), + "Skylake-Server-v2", + "Skylake-Server-IBRS must be an alias") + + self.assertEqual(cpus["EPYC"].get('alias-of'), + "EPYC-v1", + "EPYC must be an alias") + self.assertEqual(cpus["EPYC-IBPB"].get('alias-of'), + "EPYC-v2", + "EPYC-IBPB must be an alias") + + self.validate_aliases(cpus) + + def test_none_alias(self): + """ + Check if unversioned CPU model is an alias pointing to some version + """ + self.set_machine('none') + self.vm.add_args('-S') + self.vm.launch() + + cpus = dict((m['name'], m) for m in + self.vm.cmd('query-cpu-definitions')) + + self.assertFalse(cpus['Cascadelake-Server']['static'], + 'unversioned Cascadelake-Server CPU model must not be static') + self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']), + 'Cascadelake-Server must be an alias of versioned CPU model') + self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], + 'Cascadelake-Server-v1 must not be an alias') + + self.assertFalse(cpus['qemu64']['static'], + 'unversioned qemu64 CPU model must not be static') + self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']), + 'qemu64 must be an alias of versioned CPU model') + self.assertNotIn('alias-of', cpus['qemu64-v1'], + 'qemu64-v1 must not be an alias') + + self.validate_aliases(cpus) + + +class CascadelakeArchCapabilities(QemuSystemTest): + """ + Validation of Cascadelake arch-capabilities + """ + def get_cpu_prop(self, prop): + cpu_path = self.vm.cmd('query-cpus-fast')[0].get('qom-path') + return self.vm.cmd('qom-get', path=cpu_path, property=prop) + + def test(self): + self.set_machine('pc') + # machine-type only: + self.vm.add_args('-S') + self.set_vm_arg('-cpu', + 'Cascadelake-Server,x-force-features=on,check=off,' + 'enforce=off') + self.vm.launch() + self.assertFalse(self.get_cpu_prop('arch-capabilities'), + 'pc + Cascadelake-Server should not have arch-capabilities') + + def test_unset(self): + self.set_machine('pc') + self.vm.add_args('-S') + self.set_vm_arg('-cpu', + 'Cascadelake-Server,x-force-features=on,check=off,' + 'enforce=off,-arch-capabilities') + self.vm.launch() + self.assertFalse(self.get_cpu_prop('arch-capabilities'), + 'pc + Cascadelake-Server,-arch-capabilities should not have arch-capabilities') + + def test_v2_unset(self): + self.set_machine('pc') + self.vm.add_args('-S') + self.set_vm_arg('-cpu', + 'Cascadelake-Server-v2,x-force-features=on,check=off,' + 'enforce=off,-arch-capabilities') + self.vm.launch() + self.assertFalse(self.get_cpu_prop('arch-capabilities'), + 'pc + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_xtensa_lx60.py b/tests/functional/test_xtensa_lx60.py new file mode 100755 index 00000000000..147c9208991 --- /dev/null +++ b/tests/functional/test_xtensa_lx60.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on an xtensa lx650 machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import LinuxKernelTest, Asset + + +class XTensaLX60Test(LinuxKernelTest): + + ASSET_DAY02 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day02.tar.xz', + '68ff07f9b3fd3df36d015eb46299ba44748e94bfbb2d5295fddc1a8d4a9fd324') + + def test_xtensa_lx60(self): + self.set_machine('lx60') + self.cpu = 'dc233c' + self.archive_extract(self.ASSET_DAY02) + self.launch_kernel(self.scratch_file('day02', + 'santas-sleigh-ride.elf'), + wait_for='QEMU advent calendar') + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_xtensa_replay.py b/tests/functional/test_xtensa_replay.py new file mode 100755 index 00000000000..eb00a3b0044 --- /dev/null +++ b/tests/functional/test_xtensa_replay.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# Replay test that boots a Linux kernel on an xtensa lx650 machine +# and checks the console +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from replay_kernel import ReplayKernelBase + + +class XTensaReplay(ReplayKernelBase): + + ASSET_DAY02 = Asset( + 'https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/day02.tar.xz', + '68ff07f9b3fd3df36d015eb46299ba44748e94bfbb2d5295fddc1a8d4a9fd324') + + def test_replay(self): + self.set_machine('lx60') + self.cpu = 'dc233c' + kernel_path = self.archive_extract(self.ASSET_DAY02, + member='day02/santas-sleigh-ride.elf') + self.run_rr(kernel_path, self.REPLAY_KERNEL_COMMAND_LINE, + 'QEMU advent calendar') + + +if __name__ == '__main__': + ReplayKernelBase.main() diff --git a/tests/guest-debug/run-test.py b/tests/guest-debug/run-test.py index 368ff8a8903..75e9c92e036 100755 --- a/tests/guest-debug/run-test.py +++ b/tests/guest-debug/run-test.py @@ -27,11 +27,17 @@ def get_args(): parser.add_argument("--binary", help="Binary to debug", required=True) parser.add_argument("--test", help="GDB test script") + parser.add_argument('test_args', nargs='*', + help="Additional args for GDB test script. " + "The args should be preceded by -- to avoid confusion " + "with flags for runner script") parser.add_argument("--gdb", help="The gdb binary to use", default=None) parser.add_argument("--gdb-args", help="Additional gdb arguments") parser.add_argument("--output", help="A file to redirect output to") parser.add_argument("--stderr", help="A file to redirect stderr to") + parser.add_argument("--no-suspend", action="store_true", + help="Ask the binary to not wait for GDB connection") return parser.parse_args() @@ -69,10 +75,19 @@ def log(output, msg): # Launch QEMU with binary if "system" in args.qemu: + if args.no_suspend: + suspend = '' + else: + suspend = ' -S' cmd = f'{args.qemu} {args.qargs} {args.binary}' \ - f' -S -gdb unix:path={socket_name},server=on' + f'{suspend} -gdb unix:path={socket_name},server=on' else: - cmd = f'{args.qemu} {args.qargs} -g {socket_name} {args.binary}' + if args.no_suspend: + suspend = ',suspend=n' + else: + suspend = '' + cmd = f'{args.qemu} {args.qargs} -g {socket_name}{suspend}' \ + f' {args.binary}' log(output, "QEMU CMD: %s" % (cmd)) inferior = subprocess.Popen(shlex.split(cmd)) @@ -91,6 +106,8 @@ def log(output, msg): gdb_cmd += " -ex 'target remote %s'" % (socket_name) # finally the test script itself if args.test: + if args.test_args: + gdb_cmd += f" -ex \"py sys.argv={args.test_args}\"" gdb_cmd += " -x %s" % (args.test) diff --git a/tests/guest-debug/test_gdbstub.py b/tests/guest-debug/test_gdbstub.py index 46fbf98f0c6..4f08089e6a9 100644 --- a/tests/guest-debug/test_gdbstub.py +++ b/tests/guest-debug/test_gdbstub.py @@ -2,6 +2,7 @@ """ from __future__ import print_function +import argparse import gdb import os import sys @@ -10,6 +11,16 @@ fail_count = 0 +def gdb_exit(status): + gdb.execute(f"exit {status}") + + +class arg_parser(argparse.ArgumentParser): + def exit(self, status=None, message=""): + print("Wrong GDB script test argument! " + message) + gdb_exit(1) + + def report(cond, msg): """Report success/fail of a test""" if cond: @@ -33,11 +44,11 @@ def main(test, expected_arch=None): "connected to {}".format(expected_arch)) except (gdb.error, AttributeError): print("SKIP: not connected") - exit(0) + gdb_exit(0) if gdb.parse_and_eval("$pc") == 0: print("SKIP: PC not set") - exit(0) + gdb_exit(0) try: test() @@ -57,4 +68,4 @@ def main(test, expected_arch=None): pass print("All tests complete: {} failures".format(fail_count)) - gdb.execute(f"exit {fail_count}") + gdb_exit(fail_count) diff --git a/tests/include/meson.build b/tests/include/meson.build index 9abba308fa0..8e8d1ec4e69 100644 --- a/tests/include/meson.build +++ b/tests/include/meson.build @@ -13,4 +13,4 @@ test_qapi_outputs_extra = [ test_qapi_files_extra = custom_target('QAPI test (include)', output: test_qapi_outputs_extra, input: test_qapi_files, - command: 'true') + command: [python, '-c', '']) diff --git a/tests/lcitool/libvirt-ci b/tests/lcitool/libvirt-ci index 789b4601bce..18c4bfe02c4 160000 --- a/tests/lcitool/libvirt-ci +++ b/tests/lcitool/libvirt-ci @@ -1 +1 @@ -Subproject commit 789b4601bce4e01f43fdb6ad4ce5ab4e46674440 +Subproject commit 18c4bfe02c467e5639bf9a687139735ccd7a3fff diff --git a/tests/lcitool/mappings.yml b/tests/lcitool/mappings.yml index 03b974ad023..8f0e95e1c56 100644 --- a/tests/lcitool/mappings.yml +++ b/tests/lcitool/mappings.yml @@ -1,9 +1,17 @@ mappings: + # Too old on Ubuntu 22.04; we install it from cargo instead + bindgen: + Ubuntu2204: + flake8: OpenSUSELeap15: meson: OpenSUSELeap15: + # Use Meson from PyPI wherever Rust is enabled + Debian: + Fedora: + Ubuntu: python3: OpenSUSELeap15: python311-base @@ -60,10 +68,15 @@ mappings: python3-wheel: OpenSUSELeap15: python311-pip + rust: + Debian12: rustc-web + Ubuntu2204: rustc-1.77 + Ubuntu2404: rustc-1.77 + pypi_mappings: # Request more recent version meson: - default: meson==0.63.2 + default: meson==1.8.1 # Drop packages that need devel headers python3-numpy: diff --git a/tests/lcitool/projects/qemu.yml b/tests/lcitool/projects/qemu.yml index 0c85784259a..c07242f2728 100644 --- a/tests/lcitool/projects/qemu.yml +++ b/tests/lcitool/projects/qemu.yml @@ -3,6 +3,7 @@ packages: - alsa - bash - bc + - bindgen - bison - brlapi - bzip2 @@ -32,6 +33,7 @@ packages: - glusterfs - gnutls - gtk3 + - gtk-vnc - hostname - json-c - libaio @@ -41,6 +43,7 @@ packages: - libc-static - libcacard - libcap-ng + - libcbor - libcurl - libdrm - libepoxy @@ -100,6 +103,7 @@ packages: - python3-tomli - python3-venv - rpm2cpio + - rust - sdl2 - sdl2-image - sed @@ -118,6 +122,7 @@ packages: - usbredir - virglrenderer - vte + - vulkan-tools - which - xen - xorriso diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh index ac803e34f15..d3488b2679e 100755 --- a/tests/lcitool/refresh +++ b/tests/lcitool/refresh @@ -116,6 +116,42 @@ debian12_extras = [ "ENV QEMU_CONFIGURE_OPTS --enable-netmap\n" ] +# Based on the hub.docker.com/library/rust Dockerfiles +fedora_rustup_nightly_extras = [ + "RUN dnf install -y wget\n", + "ENV RUSTUP_HOME=/usr/local/rustup CARGO_HOME=/usr/local/cargo\n", + "ENV RUSTC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustc\n", + "ENV RUSTDOC=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/rustdoc\n", + "ENV CARGO=/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/cargo\n", + "RUN set -eux && \\\n", + " rustArch='x86_64-unknown-linux-gnu' && \\\n", + " rustupSha256='6aeece6993e902708983b209d04c0d1dbb14ebb405ddb87def578d41f920f56d' && \\\n", + ' url="https://static.rust-lang.org/rustup/archive/1.27.1/${rustArch}/rustup-init" && \\\n', + ' wget "$url" && \\\n', + ' echo "${rustupSha256} *rustup-init" | sha256sum -c - && \\\n', + " chmod +x rustup-init && \\\n", + " ./rustup-init -y --no-modify-path --profile default --default-toolchain nightly --default-host ${rustArch} && \\\n", + " chmod -R a+w $RUSTUP_HOME $CARGO_HOME && \\\n", + " /usr/local/cargo/bin/rustup --version && \\\n", + " /usr/local/cargo/bin/rustup run nightly cargo --version && \\\n", + " /usr/local/cargo/bin/rustup run nightly rustc --version && \\\n", + ' test "$CARGO" = "$(/usr/local/cargo/bin/rustup +nightly which cargo)" && \\\n', + ' test "$RUSTDOC" = "$(/usr/local/cargo/bin/rustup +nightly which rustdoc)" && \\\n', + ' test "$RUSTC" = "$(/usr/local/cargo/bin/rustup +nightly which rustc)"\n', + 'ENV PATH=$CARGO_HOME/bin:$PATH\n', + 'RUN /usr/local/cargo/bin/rustup run nightly cargo install bindgen-cli\n', + 'RUN $CARGO --list\n', +] + +ubuntu2204_rust_extras = [ + "ENV RUSTC=/usr/bin/rustc-1.77\n", + "ENV RUSTDOC=/usr/bin/rustdoc-1.77\n", + "ENV CARGO_HOME=/usr/local/cargo\n", + 'ENV PATH=$CARGO_HOME/bin:$PATH\n', + "RUN DEBIAN_FRONTEND=noninteractive eatmydata \\\n", + " apt install -y --no-install-recommends cargo\n", + 'RUN cargo install bindgen-cli\n', +] def cross_build(prefix, targets): conf = "ENV QEMU_CONFIGURE_OPTS --cross-prefix=%s\n" % (prefix) @@ -131,13 +167,20 @@ try: # # Standard native builds # - generate_dockerfile("alpine", "alpine-319") + generate_dockerfile("alpine", "alpine-321") generate_dockerfile("centos9", "centos-stream-9") generate_dockerfile("debian", "debian-12", trailer="".join(debian12_extras)) generate_dockerfile("fedora", "fedora-40") generate_dockerfile("opensuse-leap", "opensuse-leap-15") - generate_dockerfile("ubuntu2204", "ubuntu-2204") + generate_dockerfile("ubuntu2204", "ubuntu-2204", + trailer="".join(ubuntu2204_rust_extras)) + + # + # Non-fatal Rust-enabled build + # + generate_dockerfile("fedora-rust-nightly", "fedora-40", + trailer="".join(fedora_rustup_nightly_extras)) # # Cross compiling builds @@ -154,30 +197,24 @@ try: trailer=cross_build("aarch64-linux-gnu-", "aarch64-softmmu,aarch64-linux-user")) - # migration to bookworm stalled: https://lists.debian.org/debian-arm/2023/09/msg00006.html - generate_dockerfile("debian-armel-cross", "debian-11", - cross="armv6l", - trailer=cross_build("arm-linux-gnueabi-", - "arm-softmmu,arm-linux-user,armeb-linux-user")) - generate_dockerfile("debian-armhf-cross", "debian-12", cross="armv7l", trailer=cross_build("arm-linux-gnueabihf-", "arm-softmmu,arm-linux-user")) - generate_dockerfile("debian-i686-cross", "debian-11", + generate_dockerfile("debian-i686-cross", "debian-12", cross="i686", trailer=cross_build("i686-linux-gnu-", "x86_64-softmmu," "x86_64-linux-user," "i386-softmmu,i386-linux-user")) - generate_dockerfile("debian-mips64el-cross", "debian-11", + generate_dockerfile("debian-mips64el-cross", "debian-12", cross="mips64el", trailer=cross_build("mips64el-linux-gnuabi64-", "mips64el-softmmu,mips64el-linux-user")) - generate_dockerfile("debian-mipsel-cross", "debian-11", + generate_dockerfile("debian-mipsel-cross", "debian-12", cross="mipsel", trailer=cross_build("mipsel-linux-gnu-", "mipsel-softmmu,mipsel-linux-user")) @@ -187,7 +224,9 @@ try: trailer=cross_build("powerpc64le-linux-gnu-", "ppc64-softmmu,ppc64-linux-user")) - generate_dockerfile("debian-riscv64-cross", "debian-sid", + # while not yet a release architecture the packages are still + # build while part of testing + generate_dockerfile("debian-riscv64-cross", "debian-13", project="qemu-minimal", cross="riscv64", trailer=cross_build("riscv64-linux-gnu-", @@ -207,14 +246,13 @@ try: # # Cirrus packages lists for GitLab # - generate_cirrus("freebsd-13") - generate_cirrus("macos-13") + generate_cirrus("freebsd-14") generate_cirrus("macos-14") # # VM packages lists # - generate_pkglist("freebsd", "freebsd-13") + generate_pkglist("freebsd", "freebsd-14") # # Ansible package lists diff --git a/tests/meson.build b/tests/meson.build index 80dd3029cf7..c59619220f7 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -16,6 +16,8 @@ test_qapi_outputs = [ 'test-qapi-events-sub-sub-module.h', 'test-qapi-events.c', 'test-qapi-events.h', + 'test-qapi-features.c', + 'test-qapi-features.h', 'test-qapi-init-commands.c', 'test-qapi-init-commands.h', 'test-qapi-introspect.c', @@ -84,4 +86,5 @@ endif subdir('unit') subdir('qapi-schema') subdir('qtest') -subdir('migration') +subdir('migration-stress') +subdir('functional') diff --git a/tests/migration/guestperf-batch.py b/tests/migration-stress/guestperf-batch.py similarity index 100% rename from tests/migration/guestperf-batch.py rename to tests/migration-stress/guestperf-batch.py diff --git a/tests/migration/guestperf-plot.py b/tests/migration-stress/guestperf-plot.py similarity index 100% rename from tests/migration/guestperf-plot.py rename to tests/migration-stress/guestperf-plot.py diff --git a/tests/migration/guestperf.py b/tests/migration-stress/guestperf.py similarity index 100% rename from tests/migration/guestperf.py rename to tests/migration-stress/guestperf.py diff --git a/tests/qapi-schema/doc-non-first-section.out b/tests/migration-stress/guestperf/__init__.py similarity index 100% rename from tests/qapi-schema/doc-non-first-section.out rename to tests/migration-stress/guestperf/__init__.py diff --git a/tests/migration/guestperf/comparison.py b/tests/migration-stress/guestperf/comparison.py similarity index 89% rename from tests/migration/guestperf/comparison.py rename to tests/migration-stress/guestperf/comparison.py index 42cc0372d11..dee3ac25e46 100644 --- a/tests/migration/guestperf/comparison.py +++ b/tests/migration-stress/guestperf/comparison.py @@ -127,7 +127,7 @@ def __init__(self, name, scenarios): # varying numbers of channels Comparison("compr-multifd", scenarios = [ Scenario("compr-multifd-channels-4", - multifd=True, multifd_channels=2), + multifd=True, multifd_channels=4), Scenario("compr-multifd-channels-8", multifd=True, multifd_channels=8), Scenario("compr-multifd-channels-32", @@ -158,4 +158,17 @@ def __init__(self, name, scenarios): Scenario("compr-dirty-limit-50MB", dirty_limit=True, vcpu_dirty_limit=50), ]), + + # Looking at effect of multifd with + # different compression algorithms + Comparison("compr-multifd-compression", scenarios = [ + Scenario("compr-multifd-compression-zlib", + multifd=True, multifd_channels=2, multifd_compression="zlib"), + Scenario("compr-multifd-compression-zstd", + multifd=True, multifd_channels=2, multifd_compression="zstd"), + Scenario("compr-multifd-compression-qpl", + multifd=True, multifd_channels=2, multifd_compression="qpl"), + Scenario("compr-multifd-compression-uadk", + multifd=True, multifd_channels=2, multifd_compression="uadk"), + ]), ] diff --git a/tests/migration/guestperf/engine.py b/tests/migration-stress/guestperf/engine.py similarity index 91% rename from tests/migration/guestperf/engine.py rename to tests/migration-stress/guestperf/engine.py index 608d7270f65..d8462db7653 100644 --- a/tests/migration/guestperf/engine.py +++ b/tests/migration-stress/guestperf/engine.py @@ -24,13 +24,15 @@ import time from guestperf.progress import Progress, ProgressStats -from guestperf.report import Report +from guestperf.report import Report, ReportResult from guestperf.timings import TimingRecord, Timings sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'python')) from qemu.machine import QEMUMachine +# multifd supported compression algorithms +MULTIFD_CMP_ALGS = ("zlib", "zstd", "qpl", "uadk") class Engine(object): @@ -106,7 +108,8 @@ def _migrate_progress(self, vm): info.get("dirty-limit-ring-full-time", 0), ) - def _migrate(self, hardware, scenario, src, dst, connect_uri): + def _migrate(self, hardware, scenario, src, + dst, connect_uri, defer_migrate): src_qemu_time = [] src_vcpu_time = [] src_pid = src.get_pid() @@ -190,6 +193,12 @@ def _migrate(self, hardware, scenario, src, dst, connect_uri): scenario._compression_xbzrle_cache)) if scenario._multifd: + if (scenario._multifd_compression and + (scenario._multifd_compression not in MULTIFD_CMP_ALGS)): + raise Exception("unsupported multifd compression " + "algorithm: %s" % + scenario._multifd_compression) + resp = src.cmd("migrate-set-capabilities", capabilities = [ { "capability": "multifd", @@ -205,6 +214,12 @@ def _migrate(self, hardware, scenario, src, dst, connect_uri): resp = dst.cmd("migrate-set-parameters", multifd_channels=scenario._multifd_channels) + if scenario._multifd_compression: + resp = src.cmd("migrate-set-parameters", + multifd_compression=scenario._multifd_compression) + resp = dst.cmd("migrate-set-parameters", + multifd_compression=scenario._multifd_compression) + if scenario._dirty_limit: if not hardware._dirty_ring_size: raise Exception("dirty ring size must be configured when " @@ -220,6 +235,8 @@ def _migrate(self, hardware, scenario, src, dst, connect_uri): resp = src.cmd("migrate-set-parameters", vcpu_dirty_limit=scenario._vcpu_dirty_limit) + if defer_migrate: + resp = dst.cmd("migrate-incoming", uri=connect_uri) resp = src.cmd("migrate", uri=connect_uri) post_copy = False @@ -259,7 +276,11 @@ def _migrate(self, hardware, scenario, src, dst, connect_uri): src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) sleep_secs -= 1 - return [progress_history, src_qemu_time, src_vcpu_time] + result = ReportResult() + if progress._status == "completed" and not paused: + result = ReportResult(True) + + return [progress_history, src_qemu_time, src_vcpu_time, result] if self._verbose and (loop % 20) == 0: print("Iter %d: remain %5dMB of %5dMB (total %5dMB @ %5dMb/sec)" % ( @@ -373,11 +394,14 @@ def _get_common_args(self, hardware, tunnelled=False): def _get_src_args(self, hardware): return self._get_common_args(hardware) - def _get_dst_args(self, hardware, uri): + def _get_dst_args(self, hardware, uri, defer_migrate): tunnelled = False if self._dst_host != "localhost": tunnelled = True argv = self._get_common_args(hardware, tunnelled) + + if defer_migrate: + return argv + ["-incoming", "defer"] return argv + ["-incoming", uri] @staticmethod @@ -424,6 +448,7 @@ def _get_timings(self, vm): def run(self, hardware, scenario, result_dir=os.getcwd()): abs_result_dir = os.path.join(result_dir, scenario._name) + defer_migrate = False if self._transport == "tcp": uri = "tcp:%s:9000" % self._dst_host @@ -439,6 +464,9 @@ def run(self, hardware, scenario, result_dir=os.getcwd()): except: pass + if scenario._multifd: + defer_migrate = True + if self._dst_host != "localhost": dstmonaddr = ("localhost", 9001) else: @@ -452,7 +480,7 @@ def run(self, hardware, scenario, result_dir=os.getcwd()): monitor_address=srcmonaddr) dst = QEMUMachine(self._binary, - args=self._get_dst_args(hardware, uri), + args=self._get_dst_args(hardware, uri, defer_migrate), wrapper=self._get_dst_wrapper(hardware), name="qemu-dst-%d" % os.getpid(), monitor_address=dstmonaddr) @@ -461,10 +489,12 @@ def run(self, hardware, scenario, result_dir=os.getcwd()): src.launch() dst.launch() - ret = self._migrate(hardware, scenario, src, dst, uri) + ret = self._migrate(hardware, scenario, src, + dst, uri, defer_migrate) progress_history = ret[0] qemu_timings = ret[1] vcpu_timings = ret[2] + result = ret[3] if uri[0:5] == "unix:" and os.path.exists(uri[5:]): os.remove(uri[5:]) @@ -484,6 +514,7 @@ def run(self, hardware, scenario, result_dir=os.getcwd()): Timings(self._get_timings(src) + self._get_timings(dst)), Timings(qemu_timings), Timings(vcpu_timings), + result, self._binary, self._dst_host, self._kernel, self._initrd, self._transport, self._sleep) except Exception as e: diff --git a/tests/migration/guestperf/hardware.py b/tests/migration-stress/guestperf/hardware.py similarity index 100% rename from tests/migration/guestperf/hardware.py rename to tests/migration-stress/guestperf/hardware.py diff --git a/tests/migration/guestperf/plot.py b/tests/migration-stress/guestperf/plot.py similarity index 100% rename from tests/migration/guestperf/plot.py rename to tests/migration-stress/guestperf/plot.py diff --git a/tests/migration/guestperf/progress.py b/tests/migration-stress/guestperf/progress.py similarity index 100% rename from tests/migration/guestperf/progress.py rename to tests/migration-stress/guestperf/progress.py diff --git a/tests/migration/guestperf/report.py b/tests/migration-stress/guestperf/report.py similarity index 88% rename from tests/migration/guestperf/report.py rename to tests/migration-stress/guestperf/report.py index 1efd40c8680..e135e01be6e 100644 --- a/tests/migration/guestperf/report.py +++ b/tests/migration-stress/guestperf/report.py @@ -24,6 +24,22 @@ from guestperf.progress import Progress from guestperf.timings import Timings +class ReportResult(object): + + def __init__(self, success=False): + self._success = success + + def serialize(self): + return { + "success": self._success, + } + + @classmethod + def deserialize(cls, data): + return cls( + data["success"]) + + class Report(object): def __init__(self, @@ -33,6 +49,7 @@ def __init__(self, guest_timings, qemu_timings, vcpu_timings, + result, binary, dst_host, kernel, @@ -46,6 +63,7 @@ def __init__(self, self._guest_timings = guest_timings self._qemu_timings = qemu_timings self._vcpu_timings = vcpu_timings + self._result = result self._binary = binary self._dst_host = dst_host self._kernel = kernel @@ -61,6 +79,7 @@ def serialize(self): "guest_timings": self._guest_timings.serialize(), "qemu_timings": self._qemu_timings.serialize(), "vcpu_timings": self._vcpu_timings.serialize(), + "result": self._result.serialize(), "binary": self._binary, "dst_host": self._dst_host, "kernel": self._kernel, @@ -78,6 +97,7 @@ def deserialize(cls, data): Timings.deserialize(data["guest_timings"]), Timings.deserialize(data["qemu_timings"]), Timings.deserialize(data["vcpu_timings"]), + ReportResult.deserialize(data["result"]), data["binary"], data["dst_host"], data["kernel"], diff --git a/tests/migration/guestperf/scenario.py b/tests/migration-stress/guestperf/scenario.py similarity index 93% rename from tests/migration/guestperf/scenario.py rename to tests/migration-stress/guestperf/scenario.py index 154c4f5d5fa..4be7fafebf9 100644 --- a/tests/migration/guestperf/scenario.py +++ b/tests/migration-stress/guestperf/scenario.py @@ -30,7 +30,7 @@ def __init__(self, name, auto_converge=False, auto_converge_step=10, compression_mt=False, compression_mt_threads=1, compression_xbzrle=False, compression_xbzrle_cache=10, - multifd=False, multifd_channels=2, + multifd=False, multifd_channels=2, multifd_compression="", dirty_limit=False, x_vcpu_dirty_limit_period=500, vcpu_dirty_limit=1): @@ -61,6 +61,7 @@ def __init__(self, name, self._multifd = multifd self._multifd_channels = multifd_channels + self._multifd_compression = multifd_compression self._dirty_limit = dirty_limit self._x_vcpu_dirty_limit_period = x_vcpu_dirty_limit_period @@ -85,6 +86,7 @@ def serialize(self): "compression_xbzrle_cache": self._compression_xbzrle_cache, "multifd": self._multifd, "multifd_channels": self._multifd_channels, + "multifd_compression": self._multifd_compression, "dirty_limit": self._dirty_limit, "x_vcpu_dirty_limit_period": self._x_vcpu_dirty_limit_period, "vcpu_dirty_limit": self._vcpu_dirty_limit, @@ -109,4 +111,5 @@ def deserialize(cls, data): data["compression_xbzrle"], data["compression_xbzrle_cache"], data["multifd"], - data["multifd_channels"]) + data["multifd_channels"], + data["multifd_compression"]) diff --git a/tests/migration/guestperf/shell.py b/tests/migration-stress/guestperf/shell.py similarity index 97% rename from tests/migration/guestperf/shell.py rename to tests/migration-stress/guestperf/shell.py index c85d89efecb..63bbe3226c6 100644 --- a/tests/migration/guestperf/shell.py +++ b/tests/migration-stress/guestperf/shell.py @@ -46,7 +46,8 @@ def __init__(self): parser.add_argument("--binary", dest="binary", default="/usr/bin/qemu-system-x86_64") parser.add_argument("--dst-host", dest="dst_host", default="localhost") parser.add_argument("--kernel", dest="kernel", default="/boot/vmlinuz-%s" % platform.release()) - parser.add_argument("--initrd", dest="initrd", default="tests/migration/initrd-stress.img") + parser.add_argument("--initrd", dest="initrd", + default="tests/migration-stress/initrd-stress.img") parser.add_argument("--transport", dest="transport", default="unix") @@ -130,6 +131,8 @@ def __init__(self): action="store_true") parser.add_argument("--multifd-channels", dest="multifd_channels", default=2, type=int) + parser.add_argument("--multifd-compression", dest="multifd_compression", + default="") parser.add_argument("--dirty-limit", dest="dirty_limit", default=False, action="store_true") @@ -166,6 +169,7 @@ def get_scenario(self, args): multifd=args.multifd, multifd_channels=args.multifd_channels, + multifd_compression=args.multifd_compression, dirty_limit=args.dirty_limit, x_vcpu_dirty_limit_period=\ diff --git a/tests/migration/guestperf/timings.py b/tests/migration-stress/guestperf/timings.py similarity index 100% rename from tests/migration/guestperf/timings.py rename to tests/migration-stress/guestperf/timings.py diff --git a/tests/migration/initrd-stress.sh b/tests/migration-stress/initrd-stress.sh similarity index 100% rename from tests/migration/initrd-stress.sh rename to tests/migration-stress/initrd-stress.sh diff --git a/tests/migration/meson.build b/tests/migration-stress/meson.build similarity index 100% rename from tests/migration/meson.build rename to tests/migration-stress/meson.build diff --git a/tests/migration/stress.c b/tests/migration-stress/stress.c similarity index 100% rename from tests/migration/stress.c rename to tests/migration-stress/stress.c diff --git a/tests/migration/migration-test.h b/tests/migration/migration-test.h deleted file mode 100644 index 194df7df6f8..00000000000 --- a/tests/migration/migration-test.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef MIGRATION_TEST_H -#define MIGRATION_TEST_H - -/* Common */ -#define TEST_MEM_PAGE_SIZE 4096 - -/* x86 */ -#define X86_TEST_MEM_START (1 * 1024 * 1024) -#define X86_TEST_MEM_END (100 * 1024 * 1024) - -/* S390 */ -#define S390_TEST_MEM_START (1 * 1024 * 1024) -#define S390_TEST_MEM_END (100 * 1024 * 1024) - -/* PPC */ -#define PPC_TEST_MEM_START (1 * 1024 * 1024) -#define PPC_TEST_MEM_END (100 * 1024 * 1024) -#define PPC_H_PUT_TERM_CHAR 0x58 - -/* ARM */ -#define ARM_TEST_MEM_START (0x40000000 + 1 * 1024 * 1024) -#define ARM_TEST_MEM_END (0x40000000 + 100 * 1024 * 1024) -#define ARM_MACH_VIRT_UART 0x09000000 -/* AArch64 kernel load address is 0x40080000, and the test memory starts at - * 0x40100000. So the maximum allowable kernel size is 512KB. - */ -#define ARM_TEST_MAX_KERNEL_SIZE (512 * 1024) - -#endif /* MIGRATION_TEST_H */ diff --git a/tests/qapi-schema/alternate-array.out b/tests/qapi-schema/alternate-array.out index a657d85738f..2f30973ac3f 100644 --- a/tests/qapi-schema/alternate-array.out +++ b/tests/qapi-schema/alternate-array.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/comments.out b/tests/qapi-schema/comments.out index ce4f6a4f0f5..937070c2c42 100644 --- a/tests/qapi-schema/comments.out +++ b/tests/qapi-schema/comments.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/doc-bad-section.err b/tests/qapi-schema/doc-bad-section.err deleted file mode 100644 index 785cacc08c4..00000000000 --- a/tests/qapi-schema/doc-bad-section.err +++ /dev/null @@ -1 +0,0 @@ -doc-bad-section.json:5:1: unexpected '=' markup in definition documentation diff --git a/tests/qapi-schema/doc-bad-section.json b/tests/qapi-schema/doc-bad-section.json deleted file mode 100644 index 8175d95867e..00000000000 --- a/tests/qapi-schema/doc-bad-section.json +++ /dev/null @@ -1,10 +0,0 @@ -# = section within an expression comment - -## -# @Enum: -# == No good here -# @one: The _one_ {and only} -# -# @two is undocumented -## -{ 'enum': 'Enum', 'data': [ 'one', 'two' ] } diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json index f64bf38d854..fac13425b72 100644 --- a/tests/qapi-schema/doc-good.json +++ b/tests/qapi-schema/doc-good.json @@ -8,11 +8,18 @@ 'documentation-exceptions': [ 'Enum', 'Variant1', 'Alternate', 'cmd' ] } } ## -# = Section +# ******* +# Section +# ******* ## ## -# == Subsection +# Just text, no heading. +## + +## +# Subsection +# ========== # # *with emphasis* # @var {in braces} @@ -140,7 +147,8 @@ 'if': { 'not': { 'any': [ 'IFONE', 'IFTWO' ] } } } ## -# == Another subsection +# Another subsection +# ================== ## ## @@ -208,7 +216,7 @@ # # -> "this example" # -# <- "has no title" +# <- ... has no title ... ## { 'command': 'cmd-boxed', 'boxed': true, 'data': 'Object', diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out index 6d24f1127b9..04a55072646 100644 --- a/tests/qapi-schema/doc-good.out +++ b/tests/qapi-schema/doc-good.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum @@ -56,10 +55,16 @@ event EVT_BOXED Object feature feat3 doc freeform body= -= Section +******* +Section +******* doc freeform body= -== Subsection +Just text, no heading. +doc freeform + body= +Subsection +========== *with emphasis* @var {in braces} @@ -111,7 +116,7 @@ The _one_ {and only}, description on the same line Also _one_ {and only} feature=enum-member-feat a member feature - section=None + section=Plain @two is undocumented doc symbol=Base body= @@ -153,7 +158,8 @@ description starts on the same line a feature doc freeform body= -== Another subsection +Another subsection +================== doc symbol=cmd body= @@ -169,15 +175,15 @@ description starts on the same line a feature feature=cmd-feat2 another feature - section=None + section=Plain .. note:: @arg3 is undocumented section=Returns @Object section=Errors some - section=TODO + section=Todo frobnicate - section=None + section=Plain .. admonition:: Notes - Lorem ipsum dolor sit amet @@ -210,12 +216,12 @@ If you're bored enough to read this, go see a video of boxed cats a feature feature=cmd-feat2 another feature - section=None + section=Plain .. qmp-example:: -> "this example" - <- "has no title" + <- ... has no title ... doc symbol=EVT_BOXED body= diff --git a/tests/qapi-schema/doc-good.txt b/tests/qapi-schema/doc-good.txt index cb37db606a6..74b73681d32 100644 --- a/tests/qapi-schema/doc-good.txt +++ b/tests/qapi-schema/doc-good.txt @@ -1,6 +1,8 @@ Section ******* +Just text, no heading. + Subsection ========== @@ -35,249 +37,145 @@ Example: -> in <- out Examples: - *verbatim* - {braces} +Enum Enum + *Availability*: "IFCOND" -"Enum" (Enum) -------------- - - -Values -~~~~~~ - -"one" (**If: **"IFONE") - The _one_ {and only}, description on the same line - -"two" - Not documented - - -Features -~~~~~~~~ - -"enum-feat" - Also _one_ {and only} - -"enum-member-feat" - a member feature - -"two" is undocumented - - -If -~~ - -"IFCOND" - - -"Base" (Object) ---------------- - - -Members -~~~~~~~ - -"base1": "Enum" - description starts on a new line, minimally indented - - -If -~~ - -"IFALL1 and IFALL2" - - -"Variant1" (Object) -------------------- - -A paragraph - -Another paragraph - -"var1" is undocumented + Values: + * **one** -- The _one_ {and only}, description on the same line + * **two** -- Not documented -Members -~~~~~~~ + Features: + * **enum-feat** -- Also _one_ {and only} -"var1": "string" (**If: **"IFSTR") - Not documented + * **enum-member-feat** -- a member feature + "two" is undocumented -Features -~~~~~~~~ +Object Base + *Availability*: "IFALL1 and IFALL2" -"variant1-feat" - a feature + Members: + * **base1** ("Enum") -- description starts on a new line, + minimally indented -"member-feat" - a member feature +Object Variant1 + A paragraph -"Variant2" (Object) -------------------- + Another paragraph + "var1" is undocumented -"Object" (Object) ------------------ + Members: + * **var1** ("string") -- Not documented + Features: + * **variant1-feat** -- a feature -Members -~~~~~~~ + * **member-feat** -- a member feature -The members of "Base" -The members of "Variant1" when "base1" is ""one"" -The members of "Variant2" when "base1" is ""two"" (**If: **"IFONE or -IFTWO") +Object Variant2 -Features -~~~~~~~~ +Object Object -"union-feat1" - a feature + Members: + * The members of "Base". + * When "base1" is "one": The members of "Variant1". -"Alternate" (Alternate) ------------------------ + * When "base1" is "two": The members of "Variant2". + Features: + * **union-feat1** -- a feature -Members -~~~~~~~ +Alternate Alternate + *Availability*: "not (IFONE or IFTWO)" -"i": "int" - description starts on the same line remainder indented the same "b" - is undocumented + Alternatives: + * **i** ("int") -- description starts on the same line remainder + indented the same "b" is undocumented -"b": "boolean" - Not documented + * **b** ("boolean") -- Not documented - -Features -~~~~~~~~ - -"alt-feat" - a feature - - -If -~~ - -"not (IFONE or IFTWO)" + Features: + * **alt-feat** -- a feature Another subsection ================== +Command cmd (Since: 2.10) -"cmd" (Command) ---------------- - - -Arguments -~~~~~~~~~ - -"arg1": "int" - description starts on a new line, indented - -"arg2": "string" (optional) - description starts on the same line remainder indented differently - -"arg3": "boolean" - Not documented - - -Features -~~~~~~~~ - -"cmd-feat1" - a feature + Arguments: + * **arg1** ("int") -- description starts on a new line, indented -"cmd-feat2" - another feature - -Note: - - "arg3" is undocumented - - -Returns -~~~~~~~ - -"Object" - - -Errors -~~~~~~ - -some - -Notes: - -* Lorem ipsum dolor sit amet - -* Ut enim ad minim veniam - -Duis aute irure dolor + * **arg2** ("string", *optional*) -- description starts on the + same line remainder indented differently -Example: Ideal fast-food burger situation: + * **arg3** ("boolean") -- Not documented - -> "in" - <- "out" + Features: + * **cmd-feat1** -- a feature -Examples: + * **cmd-feat2** -- another feature - - Not a QMP code block - - Merely a preformatted code block literal - It isn't even an rST list. - - *verbatim* - - {braces} + Note: -Note:: - Ceci n'est pas une note + "arg3" is undocumented + Return: + "Object" -- "Object" -Since -~~~~~ + Errors: + some -2.10 + Notes: + * Lorem ipsum dolor sit amet -"cmd-boxed" (Command) ---------------------- + * Ut enim ad minim veniam -If you're bored enough to read this, go see a video of boxed cats + Duis aute irure dolor + Example: Ideal fast-food burger situation: -Arguments -~~~~~~~~~ + -> "in" + <- "out" -The members of "Object" + Examples: -Features -~~~~~~~~ + - Not a QMP code block + - Merely a preformatted code block literal + It isn't even an rST list. + - *verbatim* + - {braces} -"cmd-feat1" - a feature + Note:: + Ceci n'est pas une note -"cmd-feat2" - another feature +Command cmd-boxed -Example:: + If you're bored enough to read this, go see a video of boxed cats - -> "this example" + Arguments: + * The members of "Object". - <- "has no title" + Features: + * **cmd-feat1** -- a feature + * **cmd-feat2** -- another feature -"EVT_BOXED" (Event) -------------------- + Example:: + -> "this example" -Arguments -~~~~~~~~~ + <- ... has no title ... -The members of "Object" +Event EVT_BOXED -Features -~~~~~~~~ + Members: + * The members of "Object". -"feat3" - a feature + Features: + * **feat3** -- a feature diff --git a/tests/qapi-schema/doc-non-first-section.err b/tests/qapi-schema/doc-non-first-section.err deleted file mode 100644 index eeced2bca71..00000000000 --- a/tests/qapi-schema/doc-non-first-section.err +++ /dev/null @@ -1 +0,0 @@ -doc-non-first-section.json:5:1: '=' heading must come first in a comment block diff --git a/tests/qapi-schema/doc-non-first-section.json b/tests/qapi-schema/doc-non-first-section.json deleted file mode 100644 index 1590876061d..00000000000 --- a/tests/qapi-schema/doc-non-first-section.json +++ /dev/null @@ -1,6 +0,0 @@ -# = section must be first line - -## -# -# = Not first -## diff --git a/tests/qapi-schema/empty.out b/tests/qapi-schema/empty.out index 3feb3f69d3d..d1981f85861 100644 --- a/tests/qapi-schema/empty.out +++ b/tests/qapi-schema/empty.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/features-too-many.err b/tests/qapi-schema/features-too-many.err new file mode 100644 index 00000000000..bbbd6e52026 --- /dev/null +++ b/tests/qapi-schema/features-too-many.err @@ -0,0 +1,2 @@ +features-too-many.json: In command 'go-fish': +features-too-many.json:2: Maximum of 64 schema features is permitted diff --git a/tests/qapi-schema/features-too-many.json b/tests/qapi-schema/features-too-many.json new file mode 100644 index 00000000000..aab0a0b5f1b --- /dev/null +++ b/tests/qapi-schema/features-too-many.json @@ -0,0 +1,13 @@ +# Max 64 features, with 2 specials, so 63rd custom is invalid +{ 'command': 'go-fish', + 'features': [ + 'f00', 'f01', 'f02', 'f03', 'f04', 'f05', 'f06', 'f07', + 'f08', 'f09', 'f0a', 'f0b', 'f0c', 'f0d', 'f0e', 'f0f', + 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', + 'f18', 'f19', 'f1a', 'f1b', 'f1c', 'f1d', 'f1e', 'f1f', + 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', + 'f28', 'f29', 'f2a', 'f2b', 'f2c', 'f2d', 'f2e', 'f2f', + 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', + 'f38', 'f39', 'f3a', 'f3b', 'f3c', 'f3d', 'f3e' + ] +} diff --git a/tests/qapi-schema/features-too-many.out b/tests/qapi-schema/features-too-many.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qapi-schema/include-repetition.out b/tests/qapi-schema/include-repetition.out index 16dbd9b8194..c564d278626 100644 --- a/tests/qapi-schema/include-repetition.out +++ b/tests/qapi-schema/include-repetition.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/include-simple.out b/tests/qapi-schema/include-simple.out index 48e923bfbc8..ec8200ab18b 100644 --- a/tests/qapi-schema/include-simple.out +++ b/tests/qapi-schema/include-simple.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/indented-expr.out b/tests/qapi-schema/indented-expr.out index 6a30ded3fa6..a7c22c3eefc 100644 --- a/tests/qapi-schema/indented-expr.out +++ b/tests/qapi-schema/indented-expr.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build index 0f479d93170..c47025d16d8 100644 --- a/tests/qapi-schema/meson.build +++ b/tests/qapi-schema/meson.build @@ -61,7 +61,6 @@ schemas = [ 'doc-bad-event-arg.json', 'doc-bad-feature.json', 'doc-bad-indent.json', - 'doc-bad-section.json', 'doc-bad-symbol.json', 'doc-bad-union-member.json', 'doc-before-include.json', @@ -105,6 +104,7 @@ schemas = [ 'event-case.json', 'event-member-invalid-dict.json', 'event-nest-struct.json', + 'features-too-many.json', 'features-bad-type.json', 'features-deprecated-type.json', 'features-duplicate-name.json', diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index e2f0981348b..4617eb4e98a 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -1,7 +1,6 @@ module ./builtin object q_empty enum QType - prefix QTYPE member none member qnull member qnum diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index 7e3f9f4aa1f..4be930228cc 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -96,17 +96,8 @@ def _print_variants(variants): @staticmethod def _print_if(ifcond, indent=4): - # TODO Drop this hack after replacing OrderedDict by plain - # dict (requires Python 3.7) - def _massage(subcond): - if isinstance(subcond, str): - return subcond - if isinstance(subcond, list): - return [_massage(val) for val in subcond] - return {key: _massage(val) for key, val in subcond.items()} - if ifcond.is_present(): - print('%sif %s' % (' ' * indent, _massage(ifcond.ifcond))) + print('%sif %s' % (' ' * indent, ifcond.ifcond)) @classmethod def _print_features(cls, features, indent=4): @@ -131,7 +122,7 @@ def test_frontend(fname): for feat, section in doc.features.items(): print(' feature=%s\n%s' % (feat, section.text)) for section in doc.sections: - print(' section=%s\n%s' % (section.tag, section.text)) + print(' section=%s\n%s' % (section.kind, section.text)) def open_test_result(dir_name, file_name, update): diff --git a/tests/qemu-iotests/024 b/tests/qemu-iotests/024 index 285f17e79f0..b29c76e1618 100755 --- a/tests/qemu-iotests/024 +++ b/tests/qemu-iotests/024 @@ -283,7 +283,7 @@ TEST_IMG=$BASE_OLD _make_test_img -b "$BASE_NEW" -F $IMGFMT \ CLUSTER_SIZE=$(( CLUSTER_SIZE * 2 )) TEST_IMG=$OVERLAY \ _make_test_img -b "$BASE_OLD" -F $IMGFMT $(( CLUSTER_SIZE * 6 )) -TEST_IMG=$OVERLAY _img_info +TEST_IMG=$OVERLAY _img_info | grep -v '^backing file format:' echo echo "Fill backing files with data" diff --git a/tests/qemu-iotests/024.out b/tests/qemu-iotests/024.out index e1e8eea8634..3d1e31927a2 100644 --- a/tests/qemu-iotests/024.out +++ b/tests/qemu-iotests/024.out @@ -214,7 +214,6 @@ file format: IMGFMT virtual size: 384 KiB (393216 bytes) cluster_size: 131072 backing file: TEST_DIR/subdir/t.IMGFMT.base_old -backing file format: IMGFMT Fill backing files with data diff --git a/tests/qemu-iotests/039.out b/tests/qemu-iotests/039.out index e52484d4be1..8fdbcc528aa 100644 --- a/tests/qemu-iotests/039.out +++ b/tests/qemu-iotests/039.out @@ -11,7 +11,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 ERROR OFLAG_COPIED data cluster: l2_entry=8000000000050000 refcount=0 @@ -46,7 +46,7 @@ read 512/512 bytes at offset 0 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 Rebuilding refcount structure @@ -60,7 +60,7 @@ incompatible_features [] Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [] No errors were found on the image. @@ -79,7 +79,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 ERROR OFLAG_COPIED data cluster: l2_entry=8000000000050000 refcount=0 @@ -89,7 +89,7 @@ Data may be corrupted, or further writes to the image may corrupt it. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [] No errors were found on the image. *** done diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041 index 98d17b13883..8452845f448 100755 --- a/tests/qemu-iotests/041 +++ b/tests/qemu-iotests/041 @@ -1100,10 +1100,8 @@ class TestRepairQuorum(iotests.QMPTestCase): # Check the full error message now self.vm.shutdown() - log = self.vm.get_log() - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) + log = iotests.filter_qtest(self.vm.get_log()) log = re.sub(r'^Formatting.*\n', '', log) - log = re.sub(r'\n\[I \+\d+\.\d+\] CLOSED\n?$', '', log) log = re.sub(r'^%s: ' % os.path.basename(iotests.qemu_prog), '', log) self.assertEqual(log, diff --git a/tests/qemu-iotests/049.out b/tests/qemu-iotests/049.out index 34e1b452e6e..70c627538bc 100644 --- a/tests/qemu-iotests/049.out +++ b/tests/qemu-iotests/049.out @@ -98,8 +98,7 @@ qemu-img create -f qcow2 -o size=-1024 TEST_DIR/t.qcow2 qemu-img: TEST_DIR/t.qcow2: Value '-1024' is out of range for parameter 'size' qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- -1k -qemu-img: Invalid image size specified. You may use k, M, G, T, P or E suffixes for -qemu-img: kilobytes, megabytes, gigabytes, terabytes, petabytes and exabytes. +qemu-img: Invalid image size specified: '-1k' qemu-img create -f qcow2 -o size=-1k TEST_DIR/t.qcow2 qemu-img: TEST_DIR/t.qcow2: Parameter 'size' expects a non-negative number below 2^64 @@ -107,8 +106,7 @@ Optional suffix k, M, G, T, P or E means kilo-, mega-, giga-, tera-, peta- and exabytes, respectively. qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- 1kilobyte -qemu-img: Invalid image size specified. You may use k, M, G, T, P or E suffixes for -qemu-img: kilobytes, megabytes, gigabytes, terabytes, petabytes and exabytes. +qemu-img: Invalid image size specified: '1kilobyte' qemu-img create -f qcow2 -o size=1kilobyte TEST_DIR/t.qcow2 qemu-img: TEST_DIR/t.qcow2: Parameter 'size' expects a non-negative number below 2^64 @@ -116,8 +114,7 @@ Optional suffix k, M, G, T, P or E means kilo-, mega-, giga-, tera-, peta- and exabytes, respectively. qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- foobar -qemu-img: Invalid image size specified. You may use k, M, G, T, P or E suffixes for -qemu-img: kilobytes, megabytes, gigabytes, terabytes, petabytes and exabytes. +qemu-img: Invalid image size specified: 'foobar' qemu-img create -f qcow2 -o size=foobar TEST_DIR/t.qcow2 qemu-img: TEST_DIR/t.qcow2: Parameter 'size' expects a non-negative number below 2^64 diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out index 7e10c5fa1b0..f19b532eb9b 100644 --- a/tests/qemu-iotests/051.pc.out +++ b/tests/qemu-iotests/051.pc.out @@ -181,7 +181,7 @@ QEMU X.Y.Z monitor - type 'help' for more information Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-scsi,id=virtio-scsi1 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on: Cannot change iothread of active block backend +(qemu) quit Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information diff --git a/tests/qemu-iotests/061.out b/tests/qemu-iotests/061.out index 24c33add7ce..951c6bf3e62 100644 --- a/tests/qemu-iotests/061.out +++ b/tests/qemu-iotests/061.out @@ -118,7 +118,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 wrote 131072/131072 bytes at offset 0 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) magic 0x514649fb version 3 backing_file_offset 0x0 @@ -304,7 +304,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 wrote 131072/131072 bytes at offset 0 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) magic 0x514649fb version 3 backing_file_offset 0x0 diff --git a/tests/qemu-iotests/106 b/tests/qemu-iotests/106 index ae0fc466910..55548439aad 100755 --- a/tests/qemu-iotests/106 +++ b/tests/qemu-iotests/106 @@ -40,6 +40,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt raw _supported_proto file fuse _supported_os Linux +_require_disk_usage # in kB CREATION_SIZE=128 diff --git a/tests/qemu-iotests/125 b/tests/qemu-iotests/125 index 46279d6b382..708e7c5ba21 100755 --- a/tests/qemu-iotests/125 +++ b/tests/qemu-iotests/125 @@ -35,7 +35,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 get_image_size_on_host() { - echo $(($(stat -c '%b * %B' "$TEST_IMG_FILE"))) + disk_usage "$TEST_IMG_FILE" } # get standard environment and filters diff --git a/tests/qemu-iotests/137.out b/tests/qemu-iotests/137.out index 86377c80cde..e19df5b6ba8 100644 --- a/tests/qemu-iotests/137.out +++ b/tests/qemu-iotests/137.out @@ -35,7 +35,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 qemu-io: Unsupported value 'blubb' for qcow2 option 'overlap-check'. Allowed are any of the following: none, constant, cached, all wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) OK: Dirty bit not set Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 qemu-io: Parameter 'lazy-refcounts' expects 'on' or 'off' diff --git a/tests/qemu-iotests/153 b/tests/qemu-iotests/153 index 9bc3be8f75c..1e02f6a6e34 100755 --- a/tests/qemu-iotests/153 +++ b/tests/qemu-iotests/153 @@ -63,7 +63,7 @@ _supported_proto file _run_cmd() { echo - (echo "$@"; "$@" 2>&1 1>/dev/null) | _filter_testdir + (echo "$@"; "$@" 2>&1 1>/dev/null) | _filter_testdir | _filter_qemu_img } _do_run_qemu() diff --git a/tests/qemu-iotests/153.out b/tests/qemu-iotests/153.out index ff8e55864a5..28e1a22952b 100644 --- a/tests/qemu-iotests/153.out +++ b/tests/qemu-iotests/153.out @@ -120,16 +120,16 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2 _qemu_img_wrapper map -U TEST_DIR/t.qcow2 _qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img amend: invalid option -- 'U' +Try 'qemu-img amend --help' for more information _qemu_img_wrapper commit -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img commit: invalid option -- 'U' +Try 'qemu-img commit --help' for more information _qemu_img_wrapper resize -U TEST_DIR/t.qcow2 32M -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img resize: invalid option -- 'U' +Try 'qemu-img resize --help' for more information _qemu_img_wrapper rebase -U TEST_DIR/t.qcow2 -b TEST_DIR/t.qcow2.base -F qcow2 qemu-img: Could not open 'TEST_DIR/t.qcow2': Failed to get "write" lock @@ -244,16 +244,16 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2 _qemu_img_wrapper map -U TEST_DIR/t.qcow2 _qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img amend: invalid option -- 'U' +Try 'qemu-img amend --help' for more information _qemu_img_wrapper commit -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img commit: invalid option -- 'U' +Try 'qemu-img commit --help' for more information _qemu_img_wrapper resize -U TEST_DIR/t.qcow2 32M -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img resize: invalid option -- 'U' +Try 'qemu-img resize --help' for more information _qemu_img_wrapper rebase -U TEST_DIR/t.qcow2 -b TEST_DIR/t.qcow2.base -F qcow2 qemu-img: Could not open 'TEST_DIR/t.qcow2': Failed to get "write" lock @@ -349,16 +349,16 @@ _qemu_img_wrapper compare -U TEST_DIR/t.qcow2 TEST_DIR/t.qcow2 _qemu_img_wrapper map -U TEST_DIR/t.qcow2 _qemu_img_wrapper amend -o size=32M -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img amend: invalid option -- 'U' +Try 'qemu-img amend --help' for more information _qemu_img_wrapper commit -U TEST_DIR/t.qcow2 -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img commit: invalid option -- 'U' +Try 'qemu-img commit --help' for more information _qemu_img_wrapper resize -U TEST_DIR/t.qcow2 32M -qemu-img: unrecognized option '-U' -Try 'qemu-img --help' for more information +qemu-img resize: invalid option -- 'U' +Try 'qemu-img resize --help' for more information _qemu_img_wrapper rebase -U TEST_DIR/t.qcow2 -b TEST_DIR/t.qcow2.base -F qcow2 diff --git a/tests/qemu-iotests/165 b/tests/qemu-iotests/165 index b24907a62f3..b3b1709d71f 100755 --- a/tests/qemu-iotests/165 +++ b/tests/qemu-iotests/165 @@ -82,9 +82,7 @@ class TestPersistentDirtyBitmap(iotests.QMPTestCase): self.vm.shutdown() #catch 'Persistent bitmaps are lost' possible error - log = self.vm.get_log() - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) + log = iotests.filter_qtest(self.vm.get_log()) if log: print(log) diff --git a/tests/qemu-iotests/172.out b/tests/qemu-iotests/172.out index 07eebf35836..146fc723883 100644 --- a/tests/qemu-iotests/172.out +++ b/tests/qemu-iotests/172.out @@ -68,9 +68,6 @@ floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -125,9 +122,6 @@ ide1-cd0: [not inserted] floppy0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -183,9 +177,6 @@ floppy1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -265,9 +256,6 @@ floppy0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -322,9 +310,6 @@ ide1-cd0: [not inserted] floppy0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -380,9 +365,6 @@ floppy1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -422,9 +404,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -461,9 +440,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -519,9 +495,6 @@ none1 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -586,9 +559,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -644,9 +614,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -702,9 +669,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -760,9 +724,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -827,9 +788,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -885,9 +843,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2.2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -930,9 +885,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -1106,9 +1058,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -1145,9 +1094,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -1187,9 +1133,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit @@ -1226,9 +1169,6 @@ none0 (NODE_NAME): TEST_DIR/t.qcow2 (qcow2) ide1-cd0: [not inserted] Attached to: /machine/unattached/device[N] Removable device: not locked, tray closed - -sd0: [not inserted] - Removable device: not locked, tray closed (qemu) quit diff --git a/tests/qemu-iotests/175 b/tests/qemu-iotests/175 index f74f053b719..bbbf550a5af 100755 --- a/tests/qemu-iotests/175 +++ b/tests/qemu-iotests/175 @@ -77,6 +77,7 @@ _supported_os Linux _default_cache_mode none _supported_cache_modes none directsync +_require_disk_usage size=$((1 * 1024 * 1024)) diff --git a/tests/qemu-iotests/178 b/tests/qemu-iotests/178 index 8df241ead83..463c59a77f1 100755 --- a/tests/qemu-iotests/178 +++ b/tests/qemu-iotests/178 @@ -58,7 +58,7 @@ $QEMU_IMG measure -f qcow2 # missing filename $QEMU_IMG measure -l snap1 # missing filename $QEMU_IMG measure -o , # invalid option list $QEMU_IMG measure -l snapshot.foo=bar # invalid snapshot option -$QEMU_IMG measure --output foo # invalid output format +$QEMU_IMG measure --output foo 2>&1 | _filter_qemu_img # invalid output format $QEMU_IMG measure --size -1 # invalid image size $QEMU_IMG measure -O foo "$TEST_IMG" # unknown image file format diff --git a/tests/qemu-iotests/178.out.qcow2 b/tests/qemu-iotests/178.out.qcow2 index fe193fd5f4f..61506b519f4 100644 --- a/tests/qemu-iotests/178.out.qcow2 +++ b/tests/qemu-iotests/178.out.qcow2 @@ -12,7 +12,8 @@ qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: Invalid option list: , qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo=bar' -qemu-img: --output must be used with human or json as argument. +qemu-img: --output expects 'human' or 'json', not 'foo' +Try 'qemu-img measure --help' for more information qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807. qemu-img: Unknown file format 'foo' diff --git a/tests/qemu-iotests/178.out.raw b/tests/qemu-iotests/178.out.raw index 445e460fad9..6d994a433a6 100644 --- a/tests/qemu-iotests/178.out.raw +++ b/tests/qemu-iotests/178.out.raw @@ -12,7 +12,8 @@ qemu-img: --image-opts, -f, and -l require a filename argument. qemu-img: Invalid option list: , qemu-img: Invalid parameter 'snapshot.foo' qemu-img: Failed in parsing snapshot param 'snapshot.foo=bar' -qemu-img: --output must be used with human or json as argument. +qemu-img: --output expects 'human' or 'json', not 'foo' +Try 'qemu-img measure --help' for more information qemu-img: Invalid image size specified. Must be between 0 and 9223372036854775807. qemu-img: Unknown file format 'foo' diff --git a/tests/qemu-iotests/184.out b/tests/qemu-iotests/184.out index e8f631f8532..ef99bb2e9ab 100644 --- a/tests/qemu-iotests/184.out +++ b/tests/qemu-iotests/184.out @@ -26,6 +26,7 @@ Testing: { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 1073741824, @@ -40,6 +41,12 @@ Testing: }, "iops_wr": 0, "ro": false, + "children": [ + { + "node-name": "disk0", + "child": "file" + } + ], "node-name": "throttle0", "backing_file_depth": 1, "drv": "throttle", @@ -59,6 +66,7 @@ Testing: { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 1073741824, "filename": "null-co://", @@ -67,6 +75,8 @@ Testing: }, "iops_wr": 0, "ro": false, + "children": [ + ], "node-name": "disk0", "backing_file_depth": 0, "drv": "null-co", diff --git a/tests/qemu-iotests/191.out b/tests/qemu-iotests/191.out index c3309e4bc69..2a72ca7106e 100644 --- a/tests/qemu-iotests/191.out +++ b/tests/qemu-iotests/191.out @@ -114,6 +114,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 67108864, @@ -155,6 +156,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT.ovl2", @@ -183,6 +185,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 67108864, @@ -224,6 +227,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT", @@ -252,6 +256,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 67108864, @@ -293,6 +298,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 393216, "filename": "TEST_DIR/t.IMGFMT.mid", @@ -321,6 +327,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 67108864, "filename": "TEST_DIR/t.IMGFMT.base", @@ -350,6 +357,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 393216, "filename": "TEST_DIR/t.IMGFMT.base", @@ -521,6 +529,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 67108864, @@ -562,6 +571,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT.ovl2", @@ -590,6 +600,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "backing-image": { @@ -642,6 +653,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT.ovl3", @@ -670,6 +682,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 67108864, "filename": "TEST_DIR/t.IMGFMT.base", @@ -699,6 +712,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 393216, "filename": "TEST_DIR/t.IMGFMT.base", @@ -727,6 +741,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 67108864, @@ -768,6 +783,7 @@ wrote 65536/65536 bytes at offset 1048576 { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT", diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 index c0ce82dd257..e114c0b2695 100755 --- a/tests/qemu-iotests/194 +++ b/tests/qemu-iotests/194 @@ -34,6 +34,7 @@ with iotests.FilePath('source.img') as source_img_path, \ img_size = '1G' iotests.qemu_img_create('-f', iotests.imgfmt, source_img_path, img_size) + iotests.qemu_io('-f', iotests.imgfmt, '-c', 'write 512M 1M', source_img_path) iotests.qemu_img_create('-f', iotests.imgfmt, dest_img_path, img_size) iotests.log('Launching VMs...') @@ -61,7 +62,8 @@ with iotests.FilePath('source.img') as source_img_path, \ iotests.log('Waiting for `drive-mirror` to complete...') iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), - filters=[iotests.filter_qmp_event]) + filters=[iotests.filter_qmp_event, + iotests.filter_block_job]) iotests.log('Starting migration...') capabilities = [{'capability': 'events', 'state': True}, @@ -87,7 +89,8 @@ with iotests.FilePath('source.img') as source_img_path, \ while True: event2 = source_vm.event_wait('BLOCK_JOB_COMPLETED') - iotests.log(event2, filters=[iotests.filter_qmp_event]) + iotests.log(event2, filters=[iotests.filter_qmp_event, + iotests.filter_block_job]) if event2['event'] == 'BLOCK_JOB_COMPLETED': iotests.log('Stopping the NBD server on destination...') iotests.log(dest_vm.qmp('nbd-server-stop')) diff --git a/tests/qemu-iotests/194.out b/tests/qemu-iotests/194.out index 376ed1d2e63..d02655a5147 100644 --- a/tests/qemu-iotests/194.out +++ b/tests/qemu-iotests/194.out @@ -7,17 +7,18 @@ Launching NBD server on destination... Starting `drive-mirror` on source... {"return": {}} Waiting for `drive-mirror` to complete... -{"data": {"device": "mirror-job0", "len": 1073741824, "offset": 1073741824, "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "mirror-job0", "len": "LEN", "offset": "OFFSET", "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} Starting migration... {"return": {}} {"execute": "migrate-start-postcopy", "arguments": {}} {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} Gracefully ending the `drive-mirror` job on source... {"return": {}} -{"data": {"device": "mirror-job0", "len": 1073741824, "offset": 1073741824, "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"device": "mirror-job0", "len": "LEN", "offset": "OFFSET", "speed": 0, "type": "mirror"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} Stopping the NBD server on destination... {"return": {}} Wait for migration completion on target... diff --git a/tests/qemu-iotests/203.out b/tests/qemu-iotests/203.out index 9d4abba8c5d..8e58705e515 100644 --- a/tests/qemu-iotests/203.out +++ b/tests/qemu-iotests/203.out @@ -8,4 +8,5 @@ Starting migration... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} diff --git a/tests/qemu-iotests/211.out b/tests/qemu-iotests/211.out index f02c75409ca..ff9f9a6913a 100644 --- a/tests/qemu-iotests/211.out +++ b/tests/qemu-iotests/211.out @@ -17,7 +17,7 @@ file format: IMGFMT virtual size: 128 MiB (134217728 bytes) cluster_size: 1048576 -[{"data": false, "depth": 0, "length": 134217728, "present": true, "start": 0, "zero": true}] +[{"compressed": false, "data": false, "depth": 0, "length": 134217728, "present": true, "start": 0, "zero": true}] === Successful image creation (explicit defaults) === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}} @@ -35,7 +35,7 @@ file format: IMGFMT virtual size: 64 MiB (67108864 bytes) cluster_size: 1048576 -[{"data": false, "depth": 0, "length": 67108864, "present": true, "start": 0, "zero": true}] +[{"compressed": false, "data": false, "depth": 0, "length": 67108864, "present": true, "start": 0, "zero": true}] === Successful image creation (with non-default options) === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "file", "filename": "TEST_DIR/PID-t.vdi", "size": 0}}} @@ -53,7 +53,7 @@ file format: IMGFMT virtual size: 32 MiB (33554432 bytes) cluster_size: 1048576 -[{"data": true, "depth": 0, "length": 3072, "offset": 1024, "present": true, "start": 0, "zero": false}, {"data": true, "depth": 0, "length": 33551360, "offset": 4096, "present": true, "start": 3072, "zero": true}] +[{"compressed": false, "data": true, "depth": 0, "length": 3072, "offset": 1024, "present": true, "start": 0, "zero": false}, {"compressed": false, "data": true, "depth": 0, "length": 33551360, "offset": 4096, "present": true, "start": 3072, "zero": true}] === Invalid BlockdevRef === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "vdi", "file": "this doesn't exist", "size": 33554432}}} diff --git a/tests/qemu-iotests/221 b/tests/qemu-iotests/221 index c463fd4b113..eba00b80adb 100755 --- a/tests/qemu-iotests/221 +++ b/tests/qemu-iotests/221 @@ -41,6 +41,7 @@ _supported_os Linux _default_cache_mode writeback _supported_cache_modes writeback writethrough unsafe +_require_disk_usage echo echo "=== Check mapping of unaligned raw image ===" diff --git a/tests/qemu-iotests/233.out b/tests/qemu-iotests/233.out index 1910f7df20f..d498d55e0e7 100644 --- a/tests/qemu-iotests/233.out +++ b/tests/qemu-iotests/233.out @@ -69,8 +69,8 @@ read 1048576/1048576 bytes at offset 1048576 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) == check TLS with authorization == -qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort -qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: Software caused connection abort +qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: The TLS connection was non-properly terminated. +qemu-img: Could not open 'driver=nbd,host=127.0.0.1,port=PORT,tls-creds=tls0': Failed to read option reply: Cannot read from TLS channel: The TLS connection was non-properly terminated. == check TLS fail over UNIX with no hostname == qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0': No hostname for certificate validation @@ -103,14 +103,14 @@ qemu-img: Could not open 'driver=nbd,path=SOCK_DIR/qemu-nbd.sock,tls-creds=tls0' qemu-nbd: TLS handshake failed: The TLS connection was non-properly terminated. == final server log == -qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort -qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated. +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated. qemu-nbd: option negotiation failed: Verify failed: No certificate was found. qemu-nbd: option negotiation failed: Verify failed: No certificate was found. qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied qemu-nbd: option negotiation failed: TLS x509 authz check for DISTINGUISHED-NAME is denied -qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort -qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: Software caused connection abort +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated. +qemu-nbd: option negotiation failed: Failed to read opts magic: Cannot read from TLS channel: The TLS connection was non-properly terminated. qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received. qemu-nbd: option negotiation failed: TLS handshake failed: An illegal parameter has been received. *** done diff --git a/tests/qemu-iotests/234.out b/tests/qemu-iotests/234.out index ac8b64350c3..be3e138b585 100644 --- a/tests/qemu-iotests/234.out +++ b/tests/qemu-iotests/234.out @@ -10,6 +10,7 @@ Starting migration to B... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} @@ -27,6 +28,7 @@ Starting migration back to A... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} diff --git a/tests/qemu-iotests/240 b/tests/qemu-iotests/240 index 9b281e1dc03..f8af9ff648e 100755 --- a/tests/qemu-iotests/240 +++ b/tests/qemu-iotests/240 @@ -81,8 +81,6 @@ class TestCase(iotests.QMPTestCase): self.vm.qmp_log('device_del', id='scsi-hd0') self.vm.event_wait('DEVICE_DELETED') - self.vm.qmp_log('device_add', id='scsi-hd1', driver='scsi-hd', drive='hd0', bus="scsi1.0") - self.vm.qmp_log('device_del', id='scsi-hd1') self.vm.event_wait('DEVICE_DELETED') self.vm.qmp_log('blockdev-del', node_name='hd0') diff --git a/tests/qemu-iotests/240.out b/tests/qemu-iotests/240.out index 89ed25e506c..10dcc42e06d 100644 --- a/tests/qemu-iotests/240.out +++ b/tests/qemu-iotests/240.out @@ -46,10 +46,8 @@ {"execute": "device_add", "arguments": {"bus": "scsi0.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd0"}} {"return": {}} {"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}} -{"error": {"class": "GenericError", "desc": "Cannot change iothread of active block backend"}} -{"execute": "device_del", "arguments": {"id": "scsi-hd0"}} {"return": {}} -{"execute": "device_add", "arguments": {"bus": "scsi1.0", "drive": "hd0", "driver": "scsi-hd", "id": "scsi-hd1"}} +{"execute": "device_del", "arguments": {"id": "scsi-hd0"}} {"return": {}} {"execute": "device_del", "arguments": {"id": "scsi-hd1"}} {"return": {}} diff --git a/tests/qemu-iotests/250 b/tests/qemu-iotests/250 index af48f83abac..c0a0dbc0ff1 100755 --- a/tests/qemu-iotests/250 +++ b/tests/qemu-iotests/250 @@ -52,11 +52,6 @@ _unsupported_imgopts data_file # bdrv_co_truncate(bs->file) call in qcow2_co_truncate(), which might succeed # anyway. -disk_usage() -{ - du --block-size=1 $1 | awk '{print $1}' -} - size=2100M _make_test_img -o "cluster_size=1M,preallocation=metadata" $size diff --git a/tests/qemu-iotests/253 b/tests/qemu-iotests/253 index 35039d20a89..6da85e6a113 100755 --- a/tests/qemu-iotests/253 +++ b/tests/qemu-iotests/253 @@ -41,6 +41,7 @@ _supported_os Linux _default_cache_mode none _supported_cache_modes none directsync +_require_disk_usage echo echo "=== Check mapping of unaligned raw image ===" diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out index b8a2d3598d8..bd7706b84b1 100644 --- a/tests/qemu-iotests/262.out +++ b/tests/qemu-iotests/262.out @@ -8,6 +8,7 @@ Starting migration to B... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} diff --git a/tests/qemu-iotests/273.out b/tests/qemu-iotests/273.out index 71843f02de6..c19753c685a 100644 --- a/tests/qemu-iotests/273.out +++ b/tests/qemu-iotests/273.out @@ -23,6 +23,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "backing-image": { @@ -74,6 +75,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT", @@ -102,6 +104,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "backing-image": { "virtual-size": 197120, @@ -142,6 +145,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT.mid", @@ -170,6 +174,7 @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev { "iops_rd": 0, "detect_zeroes": "off", + "active": true, "image": { "virtual-size": 197120, "filename": "TEST_DIR/t.IMGFMT.base", diff --git a/tests/qemu-iotests/280.out b/tests/qemu-iotests/280.out index 546dbb4a68a..37411144caf 100644 --- a/tests/qemu-iotests/280.out +++ b/tests/qemu-iotests/280.out @@ -7,6 +7,7 @@ Enabling migration QMP events on VM... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"status": "device"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} VM is now stopped: diff --git a/tests/qemu-iotests/302 b/tests/qemu-iotests/302 index a6d79e727b5..e980ec513f2 100755 --- a/tests/qemu-iotests/302 +++ b/tests/qemu-iotests/302 @@ -115,13 +115,22 @@ with tarfile.open(tar_file, "w") as tar: disk = tarfile.TarInfo("disk") disk.size = actual_size - tar.addfile(disk) - # 6. Shrink the tar to the actual size, aligned to 512 bytes. + # Since python 3.13 we cannot use addfile() to create the member header. + # Add the tarinfo directly using public but undocumented attributes. - tar_size = offset + (disk.size + 511) & ~511 - tar.fileobj.seek(tar_size) - tar.fileobj.truncate(tar_size) + buf = disk.tobuf(tar.format, tar.encoding, tar.errors) + tar.fileobj.write(buf) + tar.members.append(disk) + + # Update the offset and position to the location of the next member. + + tar.offset = offset + (disk.size + 511) & ~511 + tar.fileobj.seek(tar.offset) + + # 6. Shrink the tar to the actual size. + + tar.fileobj.truncate(tar.offset) with tarfile.open(tar_file) as tar: members = [{"name": m.name, "size": m.size, "offset": m.offset_data} diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308 index ea81dc496a0..6eced3aefb9 100755 --- a/tests/qemu-iotests/308 +++ b/tests/qemu-iotests/308 @@ -51,6 +51,7 @@ _unsupported_fmt vpc _supported_proto file # We create the FUSE export manually _supported_os Linux # We need /dev/urandom +_require_disk_usage # $1: Export ID # $2: Options (beyond the node-name and ID) @@ -290,7 +291,7 @@ echo '--- Try growing non-growable export ---' # Get the current size so we can write beyond the EOF orig_len=$(get_proto_len "$EXT_MP" "$TEST_IMG") -orig_disk_usage=$(stat -c '%b' "$TEST_IMG") +orig_disk_usage=$(disk_usage "$TEST_IMG") # Should fail (exports are non-growable by default) # (Note that qemu-io can never write beyond the EOF, so we have to use @@ -312,7 +313,7 @@ else echo 'OK: Post-truncate image size is as expected' fi -new_disk_usage=$(stat -c '%b' "$TEST_IMG") +new_disk_usage=$(disk_usage "$TEST_IMG") if [ "$new_disk_usage" -gt "$orig_disk_usage" ]; then echo 'OK: Disk usage grew with fallocate' else diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check index 56d88ca4236..545f9ec7bdd 100755 --- a/tests/qemu-iotests/check +++ b/tests/qemu-iotests/check @@ -84,7 +84,7 @@ def make_argparser() -> argparse.ArgumentParser: p.set_defaults(imgfmt='raw', imgproto='file') format_list = ['raw', 'bochs', 'cloop', 'parallels', 'qcow', 'qcow2', - 'qed', 'vdi', 'vpc', 'vhdx', 'vmdk', 'luks', 'dmg'] + 'qed', 'vdi', 'vpc', 'vhdx', 'vmdk', 'luks', 'dmg', 'vvfat'] g_fmt = p.add_argument_group( ' image format options', 'The following options set the IMGFMT environment variable. ' diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter index fc3c64bcb8e..511a55b1e88 100644 --- a/tests/qemu-iotests/common.filter +++ b/tests/qemu-iotests/common.filter @@ -74,7 +74,7 @@ _filter_qemu_io() { _filter_win32 | \ gsed -e "s/[0-9]* ops\; [0-9/:. sec]* ([0-9/.inf]* [EPTGMKiBbytes]*\/sec and [0-9/.inf]* ops\/sec)/X ops\; XX:XX:XX.X (XXX YYY\/sec and XXX ops\/sec)/" \ - -e "s/: line [0-9][0-9]*: *[0-9][0-9]*\( Aborted\| Killed\)/:\1/" \ + -e "s/: line [0-9][0-9]*: *[0-9][0-9]*\( Aborted\| Killed\) \{2,\}/:\1 /" \ -e "s/qemu-io> //g" } @@ -86,6 +86,12 @@ _filter_qemu() -e $'s#\r##' # QEMU monitor uses \r\n line endings } +# replace occurrences of QEMU_IMG_PROG with "qemu-img" +_filter_qemu_img() +{ + sed -e "s#$QEMU_IMG_PROG#qemu-img#g" +} + # replace problematic QMP output like timestamps _filter_qmp() { diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc index 95c12577dd4..e977cb4eb61 100644 --- a/tests/qemu-iotests/common.rc +++ b/tests/qemu-iotests/common.rc @@ -140,6 +140,12 @@ _optstr_add() fi } +# report real disk usage for sparse files +disk_usage() +{ + du --block-size=1 "$1" | awk '{print $1}' +} + # Set the variables to the empty string to turn Valgrind off # for specific processes, e.g. # $ VALGRIND_QEMU_IO= ./check -qcow2 -valgrind 015 @@ -990,6 +996,36 @@ _require_large_file() rm "$FILENAME" } +# Check whether disk_usage can be reliably used. +_require_disk_usage() +{ + local unusable=false + # ZFS triggers known failures on this front; it does not immediately + # allocate files, and then aggressively compresses writes even when full + # allocation was requested. + if [ -z "$TEST_IMG_FILE" ]; then + FILENAME="$TEST_IMG" + else + FILENAME="$TEST_IMG_FILE" + fi + if [ -e "FILENAME" ]; then + echo "unwilling to overwrite existing file" + exit 1 + fi + $QEMU_IMG create -f raw "$FILENAME" 5M > /dev/null + if [ $(disk_usage "$FILENAME") -gt $((1024*1024)) ]; then + unusable=true + fi + $QEMU_IMG create -f raw -o preallocation=full "$FILENAME" 5M > /dev/null + if [ $(disk_usage "$FILENAME") -lt $((4*1024*1024)) ]; then + unusable=true + fi + rm -f "$FILENAME" + if $unusable; then + _notrun "file system on $TEST_DIR does not handle sparse files nicely" + fi +} + # Check that a set of devices is available in the QEMU binary # _require_devices() diff --git a/tests/qemu-iotests/fat16.py b/tests/qemu-iotests/fat16.py new file mode 100644 index 00000000000..7d2d0524133 --- /dev/null +++ b/tests/qemu-iotests/fat16.py @@ -0,0 +1,690 @@ +# A simple FAT16 driver that is used to test the `vvfat` driver in QEMU. +# +# Copyright (C) 2024 Amjad Alsharafi +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from typing import Callable, List, Optional, Protocol, Set +import string + +SECTOR_SIZE = 512 +DIRENTRY_SIZE = 32 +ALLOWED_FILE_CHARS = set( + "!#$%&'()-@^_`{}~" + string.digits + string.ascii_uppercase +) + + +class MBR: + def __init__(self, data: bytes): + assert len(data) == 512 + self.partition_table = [] + for i in range(4): + partition = data[446 + i * 16 : 446 + (i + 1) * 16] + self.partition_table.append( + { + "status": partition[0], + "start_head": partition[1], + "start_sector": partition[2] & 0x3F, + "start_cylinder": ((partition[2] & 0xC0) << 2) + | partition[3], + "type": partition[4], + "end_head": partition[5], + "end_sector": partition[6] & 0x3F, + "end_cylinder": ((partition[6] & 0xC0) << 2) + | partition[7], + "start_lba": int.from_bytes(partition[8:12], "little"), + "size": int.from_bytes(partition[12:16], "little"), + } + ) + + def __str__(self): + return "\n".join( + [ + f"{i}: {partition}" + for i, partition in enumerate(self.partition_table) + ] + ) + + +class FatBootSector: + # pylint: disable=too-many-instance-attributes + def __init__(self, data: bytes): + assert len(data) == 512 + self.bytes_per_sector = int.from_bytes(data[11:13], "little") + self.sectors_per_cluster = data[13] + self.reserved_sectors = int.from_bytes(data[14:16], "little") + self.fat_count = data[16] + self.root_entries = int.from_bytes(data[17:19], "little") + total_sectors_16 = int.from_bytes(data[19:21], "little") + self.media_descriptor = data[21] + self.sectors_per_fat = int.from_bytes(data[22:24], "little") + self.sectors_per_track = int.from_bytes(data[24:26], "little") + self.heads = int.from_bytes(data[26:28], "little") + self.hidden_sectors = int.from_bytes(data[28:32], "little") + total_sectors_32 = int.from_bytes(data[32:36], "little") + assert ( + total_sectors_16 == 0 or total_sectors_32 == 0 + ), "Both total sectors (16 and 32) fields are non-zero" + self.total_sectors = total_sectors_16 or total_sectors_32 + self.drive_number = data[36] + self.volume_id = int.from_bytes(data[39:43], "little") + self.volume_label = data[43:54].decode("ascii").strip() + self.fs_type = data[54:62].decode("ascii").strip() + + def root_dir_start(self): + """ + Calculate the start sector of the root directory. + """ + return self.reserved_sectors + self.fat_count * self.sectors_per_fat + + def root_dir_size(self): + """ + Calculate the size of the root directory in sectors. + """ + return ( + self.root_entries * DIRENTRY_SIZE + self.bytes_per_sector - 1 + ) // self.bytes_per_sector + + def data_sector_start(self): + """ + Calculate the start sector of the data region. + """ + return self.root_dir_start() + self.root_dir_size() + + def first_sector_of_cluster(self, cluster: int) -> int: + """ + Calculate the first sector of the given cluster. + """ + return ( + self.data_sector_start() + (cluster - 2) * self.sectors_per_cluster + ) + + def cluster_bytes(self): + """ + Calculate the number of bytes in a cluster. + """ + return self.bytes_per_sector * self.sectors_per_cluster + + def __str__(self): + return ( + f"Bytes per sector: {self.bytes_per_sector}\n" + f"Sectors per cluster: {self.sectors_per_cluster}\n" + f"Reserved sectors: {self.reserved_sectors}\n" + f"FAT count: {self.fat_count}\n" + f"Root entries: {self.root_entries}\n" + f"Total sectors: {self.total_sectors}\n" + f"Media descriptor: {self.media_descriptor}\n" + f"Sectors per FAT: {self.sectors_per_fat}\n" + f"Sectors per track: {self.sectors_per_track}\n" + f"Heads: {self.heads}\n" + f"Hidden sectors: {self.hidden_sectors}\n" + f"Drive number: {self.drive_number}\n" + f"Volume ID: {self.volume_id}\n" + f"Volume label: {self.volume_label}\n" + f"FS type: {self.fs_type}\n" + ) + + +class FatDirectoryEntry: + # pylint: disable=too-many-instance-attributes + def __init__(self, data: bytes, sector: int, offset: int): + self.name = data[0:8].decode("ascii").strip() + self.ext = data[8:11].decode("ascii").strip() + self.attributes = data[11] + self.reserved = data[12] + self.create_time_tenth = data[13] + self.create_time = int.from_bytes(data[14:16], "little") + self.create_date = int.from_bytes(data[16:18], "little") + self.last_access_date = int.from_bytes(data[18:20], "little") + high_cluster = int.from_bytes(data[20:22], "little") + self.last_mod_time = int.from_bytes(data[22:24], "little") + self.last_mod_date = int.from_bytes(data[24:26], "little") + low_cluster = int.from_bytes(data[26:28], "little") + self.cluster = (high_cluster << 16) | low_cluster + self.size_bytes = int.from_bytes(data[28:32], "little") + + # extra (to help write back to disk) + self.sector = sector + self.offset = offset + + def as_bytes(self) -> bytes: + return ( + self.name.ljust(8, " ").encode("ascii") + + self.ext.ljust(3, " ").encode("ascii") + + self.attributes.to_bytes(1, "little") + + self.reserved.to_bytes(1, "little") + + self.create_time_tenth.to_bytes(1, "little") + + self.create_time.to_bytes(2, "little") + + self.create_date.to_bytes(2, "little") + + self.last_access_date.to_bytes(2, "little") + + (self.cluster >> 16).to_bytes(2, "little") + + self.last_mod_time.to_bytes(2, "little") + + self.last_mod_date.to_bytes(2, "little") + + (self.cluster & 0xFFFF).to_bytes(2, "little") + + self.size_bytes.to_bytes(4, "little") + ) + + def whole_name(self): + if self.ext: + return f"{self.name}.{self.ext}" + else: + return self.name + + def __str__(self): + return ( + f"Name: {self.name}\n" + f"Ext: {self.ext}\n" + f"Attributes: {self.attributes}\n" + f"Reserved: {self.reserved}\n" + f"Create time tenth: {self.create_time_tenth}\n" + f"Create time: {self.create_time}\n" + f"Create date: {self.create_date}\n" + f"Last access date: {self.last_access_date}\n" + f"Last mod time: {self.last_mod_time}\n" + f"Last mod date: {self.last_mod_date}\n" + f"Cluster: {self.cluster}\n" + f"Size: {self.size_bytes}\n" + ) + + def __repr__(self): + # convert to dict + return str(vars(self)) + + +class SectorReader(Protocol): + def __call__(self, start_sector: int, num_sectors: int = 1) -> bytes: ... + +# pylint: disable=broad-exception-raised +class Fat16: + def __init__( + self, + start_sector: int, + size: int, + sector_reader: SectorReader, + sector_writer: Callable[[int, bytes], None] + ): + self.start_sector = start_sector + self.size_in_sectors = size + self.sector_reader = sector_reader + self.sector_writer = sector_writer + + self.boot_sector = FatBootSector(self.sector_reader(start_sector, 1)) + + fat_size_in_sectors = ( + self.boot_sector.sectors_per_fat * self.boot_sector.fat_count + ) + self.fats = self.read_sectors( + self.boot_sector.reserved_sectors, fat_size_in_sectors + ) + self.fats_dirty_sectors: Set[int] = set() + + def read_sectors(self, start_sector: int, num_sectors: int) -> bytes: + return self.sector_reader(start_sector + self.start_sector, + num_sectors) + + def write_sectors(self, start_sector: int, data: bytes) -> None: + return self.sector_writer(start_sector + self.start_sector, data) + + def directory_from_bytes( + self, data: bytes, start_sector: int + ) -> List[FatDirectoryEntry]: + """ + Convert `bytes` into a list of `FatDirectoryEntry` objects. + Will ignore long file names. + Will stop when it encounters a 0x00 byte. + """ + + entries = [] + for i in range(0, len(data), DIRENTRY_SIZE): + entry = data[i : i + DIRENTRY_SIZE] + + current_sector = start_sector + (i // SECTOR_SIZE) + current_offset = i % SECTOR_SIZE + + if entry[0] == 0: + break + + if entry[0] == 0xE5: + # Deleted file + continue + + if entry[11] & 0xF == 0xF: + # Long file name + continue + + entries.append( + FatDirectoryEntry(entry, current_sector, current_offset) + ) + return entries + + def read_root_directory(self) -> List[FatDirectoryEntry]: + root_dir = self.read_sectors( + self.boot_sector.root_dir_start(), self.boot_sector.root_dir_size() + ) + return self.directory_from_bytes( + root_dir, self.boot_sector.root_dir_start() + ) + + def read_fat_entry(self, cluster: int) -> int: + """ + Read the FAT entry for the given cluster. + """ + fat_offset = cluster * 2 # FAT16 + return int.from_bytes(self.fats[fat_offset : fat_offset + 2], "little") + + def write_fat_entry(self, cluster: int, value: int) -> None: + """ + Write the FAT entry for the given cluster. + """ + fat_offset = cluster * 2 + self.fats = ( + self.fats[:fat_offset] + + value.to_bytes(2, "little") + + self.fats[fat_offset + 2 :] + ) + self.fats_dirty_sectors.add(fat_offset // SECTOR_SIZE) + + def flush_fats(self) -> None: + """ + Write the FATs back to the disk. + """ + for sector in self.fats_dirty_sectors: + data = self.fats[sector * SECTOR_SIZE : (sector + 1) * SECTOR_SIZE] + sector = self.boot_sector.reserved_sectors + sector + self.write_sectors(sector, data) + self.fats_dirty_sectors = set() + + def next_cluster(self, cluster: int) -> Optional[int]: + """ + Get the next cluster in the chain. + If its `None`, then its the last cluster. + The function will crash if the next cluster + is `FREE` (unexpected) or invalid entry. + """ + fat_entry = self.read_fat_entry(cluster) + if fat_entry == 0: + raise Exception("Unexpected: FREE cluster") + if fat_entry == 1: + raise Exception("Unexpected: RESERVED cluster") + if fat_entry >= 0xFFF8: + return None + if fat_entry >= 0xFFF7: + raise Exception("Invalid FAT entry") + + return fat_entry + + def next_free_cluster(self) -> int: + """ + Find the next free cluster. + """ + # simple linear search + for i in range(2, 0xFFFF): + if self.read_fat_entry(i) == 0: + return i + raise Exception("No free clusters") + + def next_free_cluster_non_continuous(self) -> int: + """ + Find the next free cluster, but makes sure + that the cluster before and after it are not allocated. + """ + # simple linear search + before = False + for i in range(2, 0xFFFF): + if self.read_fat_entry(i) == 0: + if before and self.read_fat_entry(i + 1) == 0: + return i + else: + before = True + else: + before = False + + raise Exception("No free clusters") + + def read_cluster(self, cluster: int) -> bytes: + """ + Read the cluster at the given cluster. + """ + return self.read_sectors( + self.boot_sector.first_sector_of_cluster(cluster), + self.boot_sector.sectors_per_cluster, + ) + + def write_cluster(self, cluster: int, data: bytes) -> None: + """ + Write the cluster at the given cluster. + """ + assert len(data) == self.boot_sector.cluster_bytes() + self.write_sectors( + self.boot_sector.first_sector_of_cluster(cluster), + data, + ) + + def read_directory( + self, cluster: Optional[int] + ) -> List[FatDirectoryEntry]: + """ + Read the directory at the given cluster. + """ + entries = [] + while cluster is not None: + data = self.read_cluster(cluster) + entries.extend( + self.directory_from_bytes( + data, self.boot_sector.first_sector_of_cluster(cluster) + ) + ) + cluster = self.next_cluster(cluster) + return entries + + def add_direntry( + self, cluster: Optional[int], name: str, ext: str, attributes: int + ) -> FatDirectoryEntry: + """ + Add a new directory entry to the given cluster. + If the cluster is `None`, then it will be added to the root directory. + """ + + def find_free_entry(data: bytes) -> Optional[int]: + for i in range(0, len(data), DIRENTRY_SIZE): + entry = data[i : i + DIRENTRY_SIZE] + if entry[0] == 0 or entry[0] == 0xE5: + return i + return None + + assert len(name) <= 8, "Name must be 8 characters or less" + assert len(ext) <= 3, "Ext must be 3 characters or less" + assert attributes % 0x15 != 0x15, "Invalid attributes" + + # initial dummy data + new_entry = FatDirectoryEntry(b"\0" * 32, 0, 0) + new_entry.name = name.ljust(8, " ") + new_entry.ext = ext.ljust(3, " ") + new_entry.attributes = attributes + new_entry.reserved = 0 + new_entry.create_time_tenth = 0 + new_entry.create_time = 0 + new_entry.create_date = 0 + new_entry.last_access_date = 0 + new_entry.last_mod_time = 0 + new_entry.last_mod_date = 0 + new_entry.cluster = self.next_free_cluster() + new_entry.size_bytes = 0 + + # mark as EOF + self.write_fat_entry(new_entry.cluster, 0xFFFF) + + if cluster is None: + for i in range(self.boot_sector.root_dir_size()): + sector_data = self.read_sectors( + self.boot_sector.root_dir_start() + i, 1 + ) + offset = find_free_entry(sector_data) + if offset is not None: + new_entry.sector = self.boot_sector.root_dir_start() + i + new_entry.offset = offset + self.update_direntry(new_entry) + return new_entry + else: + while cluster is not None: + data = self.read_cluster(cluster) + offset = find_free_entry(data) + if offset is not None: + new_entry.sector = ( + self.boot_sector.first_sector_of_cluster(cluster) + + (offset // SECTOR_SIZE)) + new_entry.offset = offset % SECTOR_SIZE + self.update_direntry(new_entry) + return new_entry + cluster = self.next_cluster(cluster) + + raise Exception("No free directory entries") + + def update_direntry(self, entry: FatDirectoryEntry) -> None: + """ + Write the directory entry back to the disk. + """ + sector = self.read_sectors(entry.sector, 1) + sector = ( + sector[: entry.offset] + + entry.as_bytes() + + sector[entry.offset + DIRENTRY_SIZE :] + ) + self.write_sectors(entry.sector, sector) + + def find_direntry(self, path: str) -> Optional[FatDirectoryEntry]: + """ + Find the directory entry for the given path. + """ + assert path[0] == "/", "Path must start with /" + + path = path[1:] # remove the leading / + parts = path.split("/") + directory = self.read_root_directory() + + current_entry = None + + for i, part in enumerate(parts): + is_last = i == len(parts) - 1 + + for entry in directory: + if entry.whole_name() == part: + current_entry = entry + break + if current_entry is None: + return None + + if is_last: + return current_entry + + if current_entry.attributes & 0x10 == 0: + raise Exception( + f"{current_entry.whole_name()} is not a directory" + ) + + directory = self.read_directory(current_entry.cluster) + + assert False, "Exited loop with is_last == False" + + def read_file(self, entry: Optional[FatDirectoryEntry]) -> Optional[bytes]: + """ + Read the content of the file at the given path. + """ + if entry is None: + return None + if entry.attributes & 0x10 != 0: + raise Exception(f"{entry.whole_name()} is a directory") + + data = b"" + cluster: Optional[int] = entry.cluster + while cluster is not None and len(data) <= entry.size_bytes: + data += self.read_cluster(cluster) + cluster = self.next_cluster(cluster) + return data[: entry.size_bytes] + + def truncate_file( + self, + entry: FatDirectoryEntry, + new_size: int, + allocate_non_continuous: bool = False, + ) -> None: + """ + Truncate the file at the given path to the new size. + """ + if entry is None: + raise Exception("entry is None") + if entry.attributes & 0x10 != 0: + raise Exception(f"{entry.whole_name()} is a directory") + + def clusters_from_size(size: int) -> int: + return ( + size + self.boot_sector.cluster_bytes() - 1 + ) // self.boot_sector.cluster_bytes() + + # First, allocate new FATs if we need to + required_clusters = clusters_from_size(new_size) + current_clusters = clusters_from_size(entry.size_bytes) + + affected_clusters = set() + + # Keep at least one cluster, easier to manage this way + if required_clusters == 0: + required_clusters = 1 + if current_clusters == 0: + current_clusters = 1 + + cluster: Optional[int] + + if required_clusters > current_clusters: + # Allocate new clusters + cluster = entry.cluster + to_add = required_clusters + for _ in range(current_clusters - 1): + to_add -= 1 + assert cluster is not None, "Cluster is None" + affected_clusters.add(cluster) + cluster = self.next_cluster(cluster) + assert required_clusters > 0, "No new clusters to allocate" + assert cluster is not None, "Cluster is None" + assert ( + self.next_cluster(cluster) is None + ), "Cluster is not the last cluster" + + # Allocate new clusters + for _ in range(to_add - 1): + if allocate_non_continuous: + new_cluster = self.next_free_cluster_non_continuous() + else: + new_cluster = self.next_free_cluster() + self.write_fat_entry(cluster, new_cluster) + self.write_fat_entry(new_cluster, 0xFFFF) + cluster = new_cluster + + elif required_clusters < current_clusters: + # Truncate the file + cluster = entry.cluster + for _ in range(required_clusters - 1): + assert cluster is not None, "Cluster is None" + cluster = self.next_cluster(cluster) + assert cluster is not None, "Cluster is None" + + next_cluster = self.next_cluster(cluster) + # mark last as EOF + self.write_fat_entry(cluster, 0xFFFF) + # free the rest + while next_cluster is not None: + cluster = next_cluster + next_cluster = self.next_cluster(next_cluster) + self.write_fat_entry(cluster, 0) + + self.flush_fats() + + # verify number of clusters + cluster = entry.cluster + count = 0 + while cluster is not None: + count += 1 + affected_clusters.add(cluster) + cluster = self.next_cluster(cluster) + assert ( + count == required_clusters + ), f"Expected {required_clusters} clusters, got {count}" + + # update the size + entry.size_bytes = new_size + self.update_direntry(entry) + + # trigger every affected cluster + for cluster in affected_clusters: + first_sector = self.boot_sector.first_sector_of_cluster(cluster) + first_sector_data = self.read_sectors(first_sector, 1) + self.write_sectors(first_sector, first_sector_data) + + def write_file(self, entry: FatDirectoryEntry, data: bytes) -> None: + """ + Write the content of the file at the given path. + """ + if entry is None: + raise Exception("entry is None") + if entry.attributes & 0x10 != 0: + raise Exception(f"{entry.whole_name()} is a directory") + + data_len = len(data) + + self.truncate_file(entry, data_len) + + cluster: Optional[int] = entry.cluster + while cluster is not None: + data_to_write = data[: self.boot_sector.cluster_bytes()] + if len(data_to_write) < self.boot_sector.cluster_bytes(): + old_data = self.read_cluster(cluster) + data_to_write += old_data[len(data_to_write) :] + + self.write_cluster(cluster, data_to_write) + data = data[self.boot_sector.cluster_bytes() :] + if len(data) == 0: + break + cluster = self.next_cluster(cluster) + + assert ( + len(data) == 0 + ), "Data was not written completely, clusters missing" + + def create_file(self, path: str) -> Optional[FatDirectoryEntry]: + """ + Create a new file at the given path. + """ + assert path[0] == "/", "Path must start with /" + + path = path[1:] # remove the leading / + + parts = path.split("/") + + directory_cluster = None + directory = self.read_root_directory() + + parts, filename = parts[:-1], parts[-1] + + for _, part in enumerate(parts): + current_entry = None + for entry in directory: + if entry.whole_name() == part: + current_entry = entry + break + if current_entry is None: + return None + + if current_entry.attributes & 0x10 == 0: + raise Exception( + f"{current_entry.whole_name()} is not a directory" + ) + + directory = self.read_directory(current_entry.cluster) + directory_cluster = current_entry.cluster + + # add new entry to the directory + + filename, ext = filename.split(".") + + if len(ext) > 3: + raise Exception("Ext must be 3 characters or less") + if len(filename) > 8: + raise Exception("Name must be 8 characters or less") + + for c in filename + ext: + + if c not in ALLOWED_FILE_CHARS: + raise Exception("Invalid character in filename") + + return self.add_direntry(directory_cluster, filename, ext, 0) diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index ea48af4a7b6..05274772ce4 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -601,13 +601,23 @@ def filter_chown(msg): return chown_re.sub("chown UID:GID", msg) def filter_qmp_event(event): - '''Filter a QMP event dict''' + '''Filter the timestamp of a QMP event dict''' event = dict(event) if 'timestamp' in event: event['timestamp']['seconds'] = 'SECS' event['timestamp']['microseconds'] = 'USECS' return event +def filter_block_job(event): + '''Filter the offset and length of a QMP block job event dict''' + event = dict(event) + if 'data' in event: + if 'offset' in event['data']: + event['data']['offset'] = 'OFFSET' + if 'len' in event['data']: + event['data']['len'] = 'LEN' + return event + def filter_qmp(qmsg, filter_fn): '''Given a string filter, filter a QMP object's values. filter_fn takes a (key, value) pair.''' @@ -701,6 +711,10 @@ def _filter(_key, value): def filter_nbd_exports(output: str) -> str: return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output) +def filter_qtest(output: str) -> str: + output = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', output) + output = re.sub(r'\n?\[I \+\d+\.\d+\] CLOSED\n?$', '', output) + return output Msg = TypeVar('Msg', Dict[str, Any], List[Any], str) @@ -909,6 +923,10 @@ def add_incoming(self, addr): self._args.append(addr) return self + def add_paused(self): + self._args.append('-S') + return self + def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage: cmd = 'human-monitor-command' kwargs: Dict[str, Any] = {'command-line': command_line} @@ -1614,10 +1632,13 @@ def write(self, arg=None): self.stream.write(arg) class ReproducibleTestRunner(unittest.TextTestRunner): - def __init__(self, stream: Optional[TextIO] = None, - resultclass: Type[unittest.TestResult] = - ReproducibleTestResult, - **kwargs: Any) -> None: + def __init__( + self, + stream: Optional[TextIO] = None, + resultclass: Type[unittest.TextTestResult] = + ReproducibleTestResult, + **kwargs: Any + ) -> None: rstream = ReproducibleStreamWrapper(stream or sys.stdout) super().__init__(stream=rstream, # type: ignore descriptions=True, diff --git a/tests/qemu-iotests/pylintrc b/tests/qemu-iotests/pylintrc index 05b75ee59bf..c5f4833e458 100644 --- a/tests/qemu-iotests/pylintrc +++ b/tests/qemu-iotests/pylintrc @@ -13,6 +13,7 @@ disable=invalid-name, no-else-return, too-few-public-methods, too-many-arguments, + too-many-positional-arguments, too-many-branches, too-many-lines, too-many-locals, diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index 96d69e56963..6326e46b7b1 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -240,9 +240,12 @@ def __init__(self, source_dir: str, build_dir: str, ('aarch64', 'virt'), ('avr', 'mega2560'), ('m68k', 'virt'), + ('or1k', 'virt'), ('riscv32', 'virt'), ('riscv64', 'virt'), ('rx', 'gdbsim-r5f562n8'), + ('sh4', 'r2d'), + ('sh4eb', 'r2d'), ('tricore', 'tricore_testboard') ) for suffix, machine in machine_map: @@ -255,7 +258,7 @@ def __init__(self, source_dir: str, build_dir: str, self.qemu_img_options = os.getenv('QEMU_IMG_OPTIONS') self.qemu_nbd_options = os.getenv('QEMU_NBD_OPTIONS') - is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg'] + is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg', 'vvfat'] self.imgfmt_generic = 'true' if is_generic else 'false' self.qemu_io_options = f'--cache {self.cachemode} --aio {self.aiomode}' diff --git a/tests/qemu-iotests/tests/backup-discard-source b/tests/qemu-iotests/tests/backup-discard-source index 2391b12acd3..17fef9c6d34 100755 --- a/tests/qemu-iotests/tests/backup-discard-source +++ b/tests/qemu-iotests/tests/backup-discard-source @@ -28,20 +28,14 @@ from iotests import qemu_img_create, qemu_img_map, qemu_io temp_img = os.path.join(iotests.test_dir, 'temp') source_img = os.path.join(iotests.test_dir, 'source') target_img = os.path.join(iotests.test_dir, 'target') -size = '1M' - - -def get_actual_size(vm, node_name): - nodes = vm.cmd('query-named-block-nodes', flat=True) - node = next(n for n in nodes if n['node-name'] == node_name) - return node['image']['actual-size'] +size = 1024 * 1024 class TestBackup(iotests.QMPTestCase): def setUp(self): - qemu_img_create('-f', iotests.imgfmt, source_img, size) - qemu_img_create('-f', iotests.imgfmt, temp_img, size) - qemu_img_create('-f', iotests.imgfmt, target_img, size) + qemu_img_create('-f', iotests.imgfmt, source_img, str(size)) + qemu_img_create('-f', iotests.imgfmt, temp_img, str(size)) + qemu_img_create('-f', iotests.imgfmt, target_img, str(size)) qemu_io('-c', 'write 0 1M', source_img) self.vm = iotests.VM() @@ -84,7 +78,12 @@ class TestBackup(iotests.QMPTestCase): } }) - self.assertLess(get_actual_size(self.vm, 'temp'), 512 * 1024) + self.bitmap = { + 'node': 'temp', + 'name': 'bitmap0' + } + + self.vm.cmd('block-dirty-bitmap-add', self.bitmap) def tearDown(self): # That should fail, because region is discarded @@ -98,7 +97,7 @@ class TestBackup(iotests.QMPTestCase): mapping = qemu_img_map(temp_img) self.assertEqual(len(mapping), 1) self.assertEqual(mapping[0]['start'], 0) - self.assertEqual(mapping[0]['length'], 1024 * 1024) + self.assertEqual(mapping[0]['length'], size) self.assertEqual(mapping[0]['data'], False) os.remove(temp_img) @@ -113,6 +112,13 @@ class TestBackup(iotests.QMPTestCase): self.vm.event_wait(name='BLOCK_JOB_COMPLETED') + def get_bitmap_count(self): + nodes = self.vm.cmd('query-named-block-nodes', flat=True) + temp = next(n for n in nodes if n['node-name'] == 'temp') + bitmap = temp['dirty-bitmaps'][0] + assert bitmap['name'] == self.bitmap['name'] + return bitmap['count'] + def test_discard_written(self): """ 1. Guest writes @@ -125,7 +131,7 @@ class TestBackup(iotests.QMPTestCase): self.assert_qmp(result, 'return', '') # Check that data is written to temporary image - self.assertGreater(get_actual_size(self.vm, 'temp'), 1024 * 1024) + self.assertEqual(self.get_bitmap_count(), size) self.do_backup() @@ -138,13 +144,18 @@ class TestBackup(iotests.QMPTestCase): """ self.do_backup() + # backup job did discard operation and pollute the bitmap, + # we have to clean the bitmap, to check next write + self.assertEqual(self.get_bitmap_count(), size) + self.vm.cmd('block-dirty-bitmap-clear', self.bitmap) + # Try trigger copy-before-write operation result = self.vm.hmp_qemu_io('cbw', 'write 0 1M') self.assert_qmp(result, 'return', '') # Check that data is not written to temporary image, as region # is discarded from copy-before-write process - self.assertLess(get_actual_size(self.vm, 'temp'), 512 * 1024) + self.assertEqual(self.get_bitmap_count(), 0) if __name__ == '__main__': diff --git a/tests/qemu-iotests/tests/commit-zero-blocks b/tests/qemu-iotests/tests/commit-zero-blocks new file mode 100755 index 00000000000..de00273e727 --- /dev/null +++ b/tests/qemu-iotests/tests/commit-zero-blocks @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# group: rw quick +# +# Test for commit of discarded blocks +# +# This tests committing a live snapshot where some of the blocks that +# are present in the base image are discarded in the intermediate image. +# This intends to check that these blocks are also discarded in the base +# image after the commit. +# +# Copyright (C) 2024 Vincent Vanlaer. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# creator +owner=libvirt-e6954efa@volkihar.be + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_qemu + _rm_test_img "${TEST_IMG}.base" + _rm_test_img "${TEST_IMG}.mid" + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +cd .. +. ./common.rc +. ./common.filter +. ./common.qemu + +_supported_fmt qcow2 +_supported_proto file + +size="1M" + +TEST_IMG="$TEST_IMG.base" _make_test_img $size +TEST_IMG="$TEST_IMG.mid" _make_test_img -b "$TEST_IMG.base" -F $IMGFMT $size +_make_test_img -b "${TEST_IMG}.mid" -F $IMGFMT $size + +$QEMU_IO -c "write -P 0x01 64k 128k" "$TEST_IMG.base" | _filter_qemu_io +$QEMU_IO -c "discard 64k 64k" "$TEST_IMG.mid" | _filter_qemu_io + +echo +echo "=== Base image info before commit ===" +TEST_IMG="${TEST_IMG}.base" _img_info | _filter_img_info +$QEMU_IMG map --output=json "$TEST_IMG.base" | _filter_qemu_img_map + +echo +echo "=== Middle image info before commit ===" +TEST_IMG="${TEST_IMG}.mid" _img_info | _filter_img_info +$QEMU_IMG map --output=json "$TEST_IMG.mid" | _filter_qemu_img_map + +echo +echo === Running QEMU Live Commit Test === +echo + +qemu_comm_method="qmp" +_launch_qemu -drive file="${TEST_IMG}",if=virtio,id=test +h=$QEMU_HANDLE + +_send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" "return" + +_send_qemu_cmd $h "{ 'execute': 'block-commit', + 'arguments': { 'device': 'test', + 'top': '"${TEST_IMG}.mid"', + 'base': '"${TEST_IMG}.base"'} }" '"status": "null"' + +_cleanup_qemu + +echo +echo "=== Base image info after commit ===" +TEST_IMG="${TEST_IMG}.base" _img_info | _filter_img_info +$QEMU_IMG map --output=json "$TEST_IMG.base" | _filter_qemu_img_map + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/commit-zero-blocks.out b/tests/qemu-iotests/tests/commit-zero-blocks.out new file mode 100644 index 00000000000..85bdc46aafd --- /dev/null +++ b/tests/qemu-iotests/tests/commit-zero-blocks.out @@ -0,0 +1,54 @@ +QA output created by commit-zero-blocks +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=1048576 +Formatting 'TEST_DIR/t.IMGFMT.mid', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT.mid backing_fmt=IMGFMT +wrote 131072/131072 bytes at offset 65536 +128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +discard 65536/65536 bytes at offset 65536 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +=== Base image info before commit === +image: TEST_DIR/t.IMGFMT.base +file format: IMGFMT +virtual size: 1 MiB (1048576 bytes) +[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}, +{ "start": 65536, "length": 131072, "depth": 0, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET}, +{ "start": 196608, "length": 851968, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}] + +=== Middle image info before commit === +image: TEST_DIR/t.IMGFMT.mid +file format: IMGFMT +virtual size: 1 MiB (1048576 bytes) +backing file: TEST_DIR/t.IMGFMT.base +backing file format: IMGFMT +[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false, "compressed": false}, +{ "start": 65536, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "compressed": false}, +{ "start": 131072, "length": 65536, "depth": 1, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET}, +{ "start": 196608, "length": 851968, "depth": 1, "present": false, "zero": true, "data": false, "compressed": false}] + +=== Running QEMU Live Commit Test === + +{ 'execute': 'qmp_capabilities' } +{"return": {}} +{ 'execute': 'block-commit', + 'arguments': { 'device': 'test', + 'top': 'TEST_DIR/t.IMGFMT.mid', + 'base': 'TEST_DIR/t.IMGFMT.base'} } +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "test"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "test"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "test"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "test"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "test", "len": 1048576, "offset": 1048576, "speed": 0, "type": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "test"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "test"}} + +=== Base image info after commit === +image: TEST_DIR/t.IMGFMT.base +file format: IMGFMT +virtual size: 1 MiB (1048576 bytes) +[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}, +{ "start": 65536, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "compressed": false}, +{ "start": 131072, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true, "compressed": false, "offset": OFFSET}, +{ "start": 196608, "length": 851968, "depth": 0, "present": false, "zero": true, "data": false, "compressed": false}] +*** done diff --git a/tests/qemu-iotests/tests/copy-before-write b/tests/qemu-iotests/tests/copy-before-write index d33bea577db..236cb8ac374 100755 --- a/tests/qemu-iotests/tests/copy-before-write +++ b/tests/qemu-iotests/tests/copy-before-write @@ -95,8 +95,69 @@ class TestCbwError(iotests.QMPTestCase): self.vm.shutdown() log = self.vm.get_log() - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) + log = iotests.filter_qtest(log) + log = iotests.filter_qemu_io(log) + return log + + def do_cbw_error_via_blockdev_backup(self, on_cbw_error=None): + self.vm.cmd('blockdev-add', { + 'node-name': 'source', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source_img + } + }) + + self.vm.cmd('blockdev-add', { + 'node-name': 'target', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': temp_img + }, + 'inject-error': [ + { + 'event': 'write_aio', + 'errno': 5, + 'immediately': False, + 'once': True + } + ] + } + }) + + blockdev_backup_options = { + 'device': 'source', + 'target': 'target', + 'sync': 'none', + 'job-id': 'job-id', + 'filter-node-name': 'cbw' + } + + if on_cbw_error: + blockdev_backup_options['on-cbw-error'] = on_cbw_error + + self.vm.cmd('blockdev-backup', blockdev_backup_options) + + self.vm.cmd('blockdev-add', { + 'node-name': 'access', + 'driver': 'snapshot-access', + 'file': 'cbw' + }) + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io cbw "write 0 1M"') + self.assert_qmp(result, 'return', '') + + result = self.vm.qmp('human-monitor-command', + command_line='qemu-io access "read 0 1M"') + self.assert_qmp(result, 'return', '') + + self.vm.shutdown() + log = self.vm.get_log() log = iotests.filter_qemu_io(log) return log @@ -124,6 +185,39 @@ read failed: Permission denied write failed: Input/output error read 1048576/1048576 bytes at offset 0 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +""") + + def test_break_snapshot_policy_forwarding(self): + """Ensure CBW filter accepts break-snapshot policy + specified in blockdev-backup QMP command. + """ + log = self.do_cbw_error_via_blockdev_backup('break-snapshot') + self.assertEqual(log, """\ +wrote 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read failed: Permission denied +""") + + def test_break_guest_write_policy_forwarding(self): + """Ensure CBW filter accepts break-guest-write policy + specified in blockdev-backup QMP command. + """ + log = self.do_cbw_error_via_blockdev_backup('break-guest-write') + self.assertEqual(log, """\ +write failed: Input/output error +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +""") + + def test_default_on_cbw_error_policy_forwarding(self): + """Ensure break-guest-write policy is used by default when + on-cbw-error is not explicitly specified. + """ + log = self.do_cbw_error_via_blockdev_backup() + self.assertEqual(log, """\ +write failed: Input/output error +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) """) def do_cbw_timeout(self, on_cbw_error): diff --git a/tests/qemu-iotests/tests/copy-before-write.out b/tests/qemu-iotests/tests/copy-before-write.out index 89968f35d78..2f7d3902f23 100644 --- a/tests/qemu-iotests/tests/copy-before-write.out +++ b/tests/qemu-iotests/tests/copy-before-write.out @@ -1,5 +1,5 @@ -.... +....... ---------------------------------------------------------------------- -Ran 4 tests +Ran 7 tests OK diff --git a/tests/qemu-iotests/tests/graph-changes-while-io b/tests/qemu-iotests/tests/graph-changes-while-io index 194fda500e6..dca1167b6d1 100755 --- a/tests/qemu-iotests/tests/graph-changes-while-io +++ b/tests/qemu-iotests/tests/graph-changes-while-io @@ -27,6 +27,7 @@ from iotests import imgfmt, qemu_img, qemu_img_create, qemu_io, \ top = os.path.join(iotests.test_dir, 'top.img') +mid = os.path.join(iotests.test_dir, 'mid.img') nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') @@ -57,6 +58,16 @@ class TestGraphChangesWhileIO(QMPTestCase): def tearDown(self) -> None: self.qsd.stop() + os.remove(top) + + def _wait_for_blockjob(self, status: str) -> None: + done = False + while not done: + for event in self.qsd.get_qmp().get_events(wait=10.0): + if event['event'] != 'JOB_STATUS_CHANGE': + continue + if event['data']['status'] == status: + done = True def test_blockdev_add_while_io(self) -> None: # Run qemu-img bench in the background @@ -116,15 +127,92 @@ class TestGraphChangesWhileIO(QMPTestCase): 'device': 'job0', }) - cancelled = False - while not cancelled: - for event in self.qsd.get_qmp().get_events(wait=10.0): - if event['event'] != 'JOB_STATUS_CHANGE': - continue - if event['data']['status'] == 'null': - cancelled = True + self._wait_for_blockjob('null') + + bench_thr.join() + + def test_remove_lower_snapshot_while_io(self) -> None: + # Run qemu-img bench in the background + bench_thr = Thread(target=do_qemu_img_bench, args=(100000, )) + bench_thr.start() + + # While I/O is performed on 'node0' node, consequently add 2 snapshots + # on top of it, then remove (commit) them starting from lower one. + while bench_thr.is_alive(): + # Recreate snapshot images on every iteration + qemu_img_create('-f', imgfmt, mid, '1G') + qemu_img_create('-f', imgfmt, top, '1G') + + self.qsd.cmd('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'mid', + 'file': { + 'driver': 'file', + 'filename': mid + } + }) + + self.qsd.cmd('blockdev-snapshot', { + 'node': 'node0', + 'overlay': 'mid', + }) + + self.qsd.cmd('blockdev-add', { + 'driver': imgfmt, + 'node-name': 'top', + 'file': { + 'driver': 'file', + 'filename': top + } + }) + + self.qsd.cmd('blockdev-snapshot', { + 'node': 'mid', + 'overlay': 'top', + }) + + self.qsd.cmd('block-commit', { + 'job-id': 'commit-mid', + 'device': 'top', + 'top-node': 'mid', + 'base-node': 'node0', + 'auto-finalize': True, + 'auto-dismiss': False, + }) + + self._wait_for_blockjob('concluded') + self.qsd.cmd('job-dismiss', { + 'id': 'commit-mid', + }) + + self.qsd.cmd('block-commit', { + 'job-id': 'commit-top', + 'device': 'top', + 'top-node': 'top', + 'base-node': 'node0', + 'auto-finalize': True, + 'auto-dismiss': False, + }) + + self._wait_for_blockjob('ready') + self.qsd.cmd('job-complete', { + 'id': 'commit-top', + }) + + self._wait_for_blockjob('concluded') + self.qsd.cmd('job-dismiss', { + 'id': 'commit-top', + }) + + self.qsd.cmd('blockdev-del', { + 'node-name': 'mid' + }) + self.qsd.cmd('blockdev-del', { + 'node-name': 'top' + }) bench_thr.join() + os.remove(mid) if __name__ == '__main__': # Format must support raw backing files diff --git a/tests/qemu-iotests/tests/graph-changes-while-io.out b/tests/qemu-iotests/tests/graph-changes-while-io.out index fbc63e62f88..8d7e9967009 100644 --- a/tests/qemu-iotests/tests/graph-changes-while-io.out +++ b/tests/qemu-iotests/tests/graph-changes-while-io.out @@ -1,5 +1,5 @@ -.. +... ---------------------------------------------------------------------- -Ran 2 tests +Ran 3 tests OK diff --git a/tests/qemu-iotests/tests/inactive-node-nbd b/tests/qemu-iotests/tests/inactive-node-nbd new file mode 100755 index 00000000000..a95b37e7962 --- /dev/null +++ b/tests/qemu-iotests/tests/inactive-node-nbd @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Copyright (C) Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf + +import iotests + +from iotests import QemuIoInteractive +from iotests import filter_qemu_io, filter_qtest, filter_qmp_testfiles + +iotests.script_initialize(supported_fmts=['generic'], + supported_protocols=['file'], + supported_platforms=['linux']) + +def get_export(node_name='disk-fmt', allow_inactive=None): + exp = { + 'id': 'exp0', + 'type': 'nbd', + 'node-name': node_name, + 'writable': True, + } + + if allow_inactive is not None: + exp['allow-inactive'] = allow_inactive + + return exp + +def node_is_active(_vm, node_name): + nodes = _vm.cmd('query-named-block-nodes', flat=True) + node = next(n for n in nodes if n['node-name'] == node_name) + return node['active'] + +with iotests.FilePath('disk.img') as path, \ + iotests.FilePath('snap.qcow2') as snap_path, \ + iotests.FilePath('snap2.qcow2') as snap2_path, \ + iotests.FilePath('target.img') as target_path, \ + iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock, \ + iotests.VM() as vm: + + img_size = '10M' + + iotests.log('Preparing disk...') + iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size) + iotests.qemu_img_create('-f', iotests.imgfmt, target_path, img_size) + + iotests.qemu_img_create('-f', 'qcow2', '-b', path, '-F', iotests.imgfmt, + snap_path) + iotests.qemu_img_create('-f', 'qcow2', '-b', snap_path, '-F', 'qcow2', + snap2_path) + + iotests.log('Launching VM...') + vm.add_blockdev(f'file,node-name=disk-file,filename={path}') + vm.add_blockdev(f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,' + 'active=off') + vm.add_blockdev(f'file,node-name=target-file,filename={target_path}') + vm.add_blockdev(f'{iotests.imgfmt},file=target-file,node-name=target-fmt') + vm.add_blockdev(f'file,node-name=snap-file,filename={snap_path}') + vm.add_blockdev(f'file,node-name=snap2-file,filename={snap2_path}') + + # Actually running the VM activates all images + vm.add_paused() + + vm.launch() + vm.qmp_log('nbd-server-start', + addr={'type': 'unix', 'data':{'path': nbd_sock}}, + filters=[filter_qmp_testfiles]) + + iotests.log('\n=== Creating export of inactive node ===') + + iotests.log('\nExports activate nodes without allow-inactive') + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('block-export-add', **get_export()) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\nExports activate nodes with allow-inactive=false') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('block-export-add', **get_export(allow_inactive=False)) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\nExport leaves nodes inactive with allow-inactive=true') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('block-export-add', **get_export(allow_inactive=True)) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\n=== Inactivating node with existing export ===') + + iotests.log('\nInactivating nodes with an export fails without ' + 'allow-inactive') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) + vm.qmp_log('block-export-add', **get_export(node_name='disk-fmt')) + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\nInactivating nodes with an export fails with ' + 'allow-inactive=false') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) + vm.qmp_log('block-export-add', + **get_export(node_name='disk-fmt', allow_inactive=False)) + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\nInactivating nodes with an export works with ' + 'allow-inactive=true') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) + vm.qmp_log('block-export-add', + **get_export(node_name='disk-fmt', allow_inactive=True)) + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + vm.qmp_log('query-block-exports') + vm.qmp_log('block-export-del', id='exp0') + vm.event_wait('BLOCK_EXPORT_DELETED') + vm.qmp_log('query-block-exports') + + iotests.log('\n=== Inactive nodes with parent ===') + + iotests.log('\nInactivating nodes with an active parent fails') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) + vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False) + iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file')) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + + iotests.log('\nInactivating nodes with an inactive parent works') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) + vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False) + iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file')) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + + iotests.log('\nCreating active parent node with an inactive child fails') + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', + node_name='disk-filter') + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', + node_name='disk-filter', active=True) + + iotests.log('\nCreating inactive parent node with an inactive child works') + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', + node_name='disk-filter', active=False) + vm.qmp_log('blockdev-del', node_name='disk-filter') + + iotests.log('\n=== Resizing an inactive node ===') + vm.qmp_log('block_resize', node_name='disk-fmt', size=16*1024*1024) + + iotests.log('\n=== Taking a snapshot of an inactive node ===') + + iotests.log('\nActive overlay over inactive backing file automatically ' + 'makes both inactive for compatibility') + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt', + file='snap-file', backing=None) + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt') + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + vm.qmp_log('blockdev-del', node_name='snap-fmt') + + iotests.log('\nInactive overlay over inactive backing file just works') + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt', + file='snap-file', backing=None, active=False) + vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt') + + iotests.log('\n=== Block jobs with inactive nodes ===') + + iotests.log('\nStreaming into an inactive node') + vm.qmp_log('block-stream', device='snap-fmt', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nCommitting an inactive root node (active commit)') + vm.qmp_log('block-commit', job_id='job0', device='snap-fmt', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nCommitting an inactive intermediate node to inactive base') + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap2-fmt', + file='snap2-file', backing='snap-fmt', active=False) + + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) + + vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt', + top_node='snap-fmt', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nCommitting an inactive intermediate node to active base') + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) + vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt', + top_node='snap-fmt', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nMirror from inactive source to active target') + vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt', + target='target-fmt', sync='full', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nMirror from active source to inactive target') + + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) + + # Activating snap2-fmt recursively activates the whole backing chain + vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=True) + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False) + + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) + + vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt', + target='target-fmt', sync='full', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nBackup from active source to inactive target') + + vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt', + target='target-fmt', sync='full', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\nBackup from inactive source to active target') + + # Inactivating snap2-fmt recursively inactivates the whole backing chain + vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=False) + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=True) + + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) + + vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt', + target='target-fmt', sync='full', + filters=[iotests.filter_qmp_generated_node_ids]) + + iotests.log('\n=== Accessing export on inactive node ===') + + # Use the target node because it has the right image format and isn't the + # (read-only) backing file of a qcow2 node + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False) + vm.qmp_log('block-export-add', + **get_export(node_name='target-fmt', allow_inactive=True)) + + # The read should succeed, everything else should fail gracefully + qemu_io = QemuIoInteractive('-f', 'raw', + f'nbd+unix:///target-fmt?socket={nbd_sock}') + iotests.log(qemu_io.cmd('read 0 64k'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('write 0 64k'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('write -z 0 64k'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('write -zu 0 64k'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('discard 0 64k'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('flush'), filters=[filter_qemu_io]) + iotests.log(qemu_io.cmd('map'), filters=[filter_qemu_io]) + qemu_io.close() + + iotests.log('\n=== Resuming VM activates all images ===') + vm.qmp_log('cont') + + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) + + iotests.log('\nShutting down...') + vm.shutdown() + log = vm.get_log() + if log: + iotests.log(log, [filter_qtest, filter_qemu_io]) diff --git a/tests/qemu-iotests/tests/inactive-node-nbd.out b/tests/qemu-iotests/tests/inactive-node-nbd.out new file mode 100644 index 00000000000..a458b4fc055 --- /dev/null +++ b/tests/qemu-iotests/tests/inactive-node-nbd.out @@ -0,0 +1,239 @@ +Preparing disk... +Launching VM... +{"execute": "nbd-server-start", "arguments": {"addr": {"data": {"path": "SOCK_DIR/PID-nbd.sock"}, "type": "unix"}}} +{"return": {}} + +=== Creating export of inactive node === + +Exports activate nodes without allow-inactive +disk-fmt active: False +{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +disk-fmt active: True +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +Exports activate nodes with allow-inactive=false +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"return": {}} +disk-fmt active: False +{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +disk-fmt active: True +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +Export leaves nodes inactive with allow-inactive=true +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"return": {}} +disk-fmt active: False +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +disk-fmt active: False +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +=== Inactivating node with existing export === + +Inactivating nodes with an export fails without allow-inactive +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}} +disk-fmt active: True +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +Inactivating nodes with an export fails with allow-inactive=false +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}} +disk-fmt active: True +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +Inactivating nodes with an export works with allow-inactive=true +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"return": {}} +disk-fmt active: False +{"execute": "query-block-exports", "arguments": {}} +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} +{"execute": "block-export-del", "arguments": {"id": "exp0"}} +{"return": {}} +{"execute": "query-block-exports", "arguments": {}} +{"return": []} + +=== Inactive nodes with parent === + +Inactivating nodes with an active parent fails +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}} +{"error": {"class": "GenericError", "desc": "Node has active parent node"}} +disk-file active: True +disk-fmt active: True + +Inactivating nodes with an inactive parent works +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}} +{"return": {}} +disk-file active: False +disk-fmt active: False + +Creating active parent node with an inactive child fails +{"execute": "blockdev-add", "arguments": {"driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} +{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}} +{"execute": "blockdev-add", "arguments": {"active": true, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} +{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}} + +Creating inactive parent node with an inactive child works +{"execute": "blockdev-add", "arguments": {"active": false, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": {"node-name": "disk-filter"}} +{"return": {}} + +=== Resizing an inactive node === +{"execute": "block_resize", "arguments": {"node-name": "disk-fmt", "size": 16777216}} +{"error": {"class": "GenericError", "desc": "Permission 'resize' unavailable on inactive node"}} + +=== Taking a snapshot of an inactive node === + +Active overlay over inactive backing file automatically makes both inactive for compatibility +{"execute": "blockdev-add", "arguments": {"backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}} +{"return": {}} +disk-fmt active: False +snap-fmt active: True +{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}} +{"return": {}} +disk-fmt active: False +snap-fmt active: False +{"execute": "blockdev-del", "arguments": {"node-name": "snap-fmt"}} +{"return": {}} + +Inactive overlay over inactive backing file just works +{"execute": "blockdev-add", "arguments": {"active": false, "backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}} +{"return": {}} +{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}} +{"return": {}} + +=== Block jobs with inactive nodes === + +Streaming into an inactive node +{"execute": "block-stream", "arguments": {"device": "snap-fmt"}} +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap-fmt' can't be a file child of active 'NODE_NAME'"}} + +Committing an inactive root node (active commit) +{"execute": "block-commit", "arguments": {"device": "snap-fmt", "job-id": "job0"}} +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} + +Committing an inactive intermediate node to inactive base +{"execute": "blockdev-add", "arguments": {"active": false, "backing": "snap-fmt", "driver": "qcow2", "file": "snap2-file", "node-name": "snap2-fmt"}} +{"return": {}} +disk-fmt active: False +snap-fmt active: False +snap2-fmt active: False +{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}} +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} + +Committing an inactive intermediate node to active base +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} +{"return": {}} +{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}} +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} + +Mirror from inactive source to active target +{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} +{"error": {"class": "GenericError", "desc": "Inactive 'snap2-fmt' can't be a backing child of active 'NODE_NAME'"}} + +Mirror from active source to inactive target +disk-fmt active: True +snap-fmt active: False +snap2-fmt active: False +target-fmt active: True +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "snap2-fmt"}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}} +{"return": {}} +disk-fmt active: True +snap-fmt active: True +snap2-fmt active: True +target-fmt active: False +{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} +{"error": {"class": "GenericError", "desc": "Permission 'write' unavailable on inactive node"}} + +Backup from active source to inactive target +{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'target-fmt' can't be a target child of active 'NODE_NAME'"}} + +Backup from inactive source to active target +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "snap2-fmt"}} +{"return": {}} +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "target-fmt"}} +{"return": {}} +disk-fmt active: False +snap-fmt active: False +snap2-fmt active: False +target-fmt active: True +{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap2-fmt' can't be a file child of active 'NODE_NAME'"}} + +=== Accessing export on inactive node === +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}} +{"return": {}} +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "target-fmt", "type": "nbd", "writable": true}} +{"return": {}} +read 65536/65536 bytes at offset 0 +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +write failed: Operation not permitted + +write failed: Operation not permitted + +write failed: Operation not permitted + +discard failed: Operation not permitted + + +qemu-io: Failed to get allocation status: Operation not permitted + + +=== Resuming VM activates all images === +{"execute": "cont", "arguments": {}} +{"return": {}} +disk-fmt active: True +snap-fmt active: True +snap2-fmt active: True +target-fmt active: True + +Shutting down... + diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test index f98e721e97d..8fb4099201d 100755 --- a/tests/qemu-iotests/tests/migrate-bitmaps-test +++ b/tests/qemu-iotests/tests/migrate-bitmaps-test @@ -122,11 +122,10 @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): # catch 'Could not reopen qcow2 layer: Bitmap already exists' # possible error - log = self.vm_a.get_log() - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) - log = re.sub(r'^(wrote .* bytes at offset .*\n.*KiB.*ops.*sec.*\n){3}', + log = iotests.filter_qtest(self.vm_a.get_log()) + log = re.sub(r'^(wrote .* bytes at offset .*\n' + r'.*KiB.*ops.*sec.*\n?){3}', '', log) - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) self.assertEqual(log, '') # test that bitmap is still persistent diff --git a/tests/qemu-iotests/tests/mirror-sparse b/tests/qemu-iotests/tests/mirror-sparse new file mode 100755 index 00000000000..ee7101bd50e --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-sparse @@ -0,0 +1,129 @@ +#!/usr/bin/env bash +# group: rw auto quick +# +# Test blockdev-mirror with raw sparse destination +# +# Copyright (C) 2025 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +seq="$(basename $0)" +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img + _cleanup_qemu +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +cd .. +. ./common.rc +. ./common.filter +. ./common.qemu + +_supported_fmt qcow2 raw # Format of the source. dst is always raw file +_supported_proto file +_supported_os Linux +_require_o_direct +_require_disk_usage + +echo +echo "=== Initial image setup ===" +echo + +TEST_IMG="$TEST_IMG.base" _make_test_img 20M +$QEMU_IO -c 'w 8M 2M' -f $IMGFMT "$TEST_IMG.base" | _filter_qemu_io + +_launch_qemu \ + -blockdev '{"driver":"file", "cache":{"direct":true, "no-flush":false}, + "filename":"'"$TEST_IMG.base"'", "node-name":"src-file"}' \ + -blockdev '{"driver":"'$IMGFMT'", "node-name":"src", "file":"src-file"}' +h1=$QEMU_HANDLE +_send_qemu_cmd $h1 '{"execute": "qmp_capabilities"}' 'return' + +# Check several combinations; most should result in a sparse destination; +# the destination should only be fully allocated if pre-allocated +# and not punching holes due to detect-zeroes +# do_test creation discard zeroes result +do_test() { + creation=$1 + discard=$2 + zeroes=$3 + expected=$4 + +echo +echo "=== Testing creation=$creation discard=$discard zeroes=$zeroes ===" +echo + +rm -f $TEST_IMG +if test $creation = external; then + truncate --size=20M $TEST_IMG +else + _send_qemu_cmd $h1 '{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"'$TEST_IMG'", + "size":'$((20*1024*1024))', "preallocation":"'$creation'"}, + "job-id":"job1"}}' 'concluded' + _send_qemu_cmd $h1 '{"execute": "job-dismiss", "arguments": + {"id": "job1"}}' 'return' +fi +_send_qemu_cmd $h1 '{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"'$TEST_IMG'", "aio":"threads", + "auto-read-only":true, "discard":"'$discard'", + "detect-zeroes":"'$zeroes'"}}' 'return' +_send_qemu_cmd $h1 '{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}}' 'return' +_timed_wait_for $h1 '"ready"' +_send_qemu_cmd $h1 '{"execute": "job-complete", "arguments": + {"id":"job2"}}' 'return' \ + | _filter_block_job_offset | _filter_block_job_len +_send_qemu_cmd $h1 '{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}}' 'return' \ + | _filter_block_job_offset | _filter_block_job_len +$QEMU_IMG compare -U -f $IMGFMT -F raw $TEST_IMG.base $TEST_IMG +# Some filesystems can fudge allocations for various reasons; rather +# than expecting precise 2M and 20M images, it is better to allow for slop. +result=$(disk_usage $TEST_IMG) +if test $result -lt $((4*1024*1024)); then + actual=sparse +elif test $result -gt $((19*1024*1024)); then + actual=full +else + actual="unexpected size ($result)" +fi +echo "Destination is $actual; expected $expected" +} + +do_test external ignore off sparse +do_test external unmap off sparse +do_test external unmap unmap sparse +do_test off ignore off sparse +do_test off unmap off sparse +do_test off unmap unmap sparse +do_test full ignore off full +do_test full unmap off sparse +do_test full unmap unmap sparse + +_send_qemu_cmd $h1 '{"execute":"quit"}' '' + +# success, all done +echo '*** done' +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/mirror-sparse.out b/tests/qemu-iotests/tests/mirror-sparse.out new file mode 100644 index 00000000000..2103b891c3f --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-sparse.out @@ -0,0 +1,365 @@ +QA output created by mirror-sparse + +=== Initial image setup === + +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=20971520 +wrote 2097152/2097152 bytes at offset 8388608 +2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +{"execute": "qmp_capabilities"} +{"return": {}} + +=== Testing creation=external discard=ignore zeroes=off === + +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"ignore", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=external discard=unmap zeroes=off === + +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=external discard=unmap zeroes=unmap === + +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"unmap"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=off discard=ignore zeroes=off === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"off"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"ignore", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=off discard=unmap zeroes=off === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"off"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=off discard=unmap zeroes=unmap === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"off"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"unmap"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=full discard=ignore zeroes=off === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"full"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"ignore", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is full; expected full + +=== Testing creation=full discard=unmap zeroes=off === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"full"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"off"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse + +=== Testing creation=full discard=unmap zeroes=unmap === + +{"execute": "blockdev-create", "arguments": + {"options": {"driver":"file", "filename":"TEST_DIR/t.IMGFMT", + "size":20971520, "preallocation":"full"}, + "job-id":"job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job1"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job1"}} +{"execute": "job-dismiss", "arguments": + {"id": "job1"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job1"}} +{"return": {}} +{"execute": "blockdev-add", "arguments": + {"node-name": "dst", "driver":"file", + "filename":"TEST_DIR/t.IMGFMT", "aio":"threads", + "auto-read-only":true, "discard":"unmap", + "detect-zeroes":"unmap"}} +{"return": {}} +{"execute":"blockdev-mirror", "arguments": + {"sync":"full", "device":"src", "target":"dst", + "job-id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job2"}} +{"return": {}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "job2"}} +{"execute": "job-complete", "arguments": + {"id":"job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_READY", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"return": {}} +{"execute": "blockdev-del", "arguments": + {"node-name": "dst"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job2", "len": LEN, "offset": OFFSET, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "job2"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "job2"}} +{"return": {}} +Images are identical. +Destination is sparse; expected sparse +{"execute":"quit"} +*** done diff --git a/tests/qemu-iotests/tests/qcow2-encryption b/tests/qemu-iotests/tests/qcow2-encryption new file mode 100755 index 00000000000..95f6195ab86 --- /dev/null +++ b/tests/qemu-iotests/tests/qcow2-encryption @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# group: rw quick +# +# Test case for encryption support in qcow2 +# +# Copyright (C) 2025 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# creator +owner=kwolf@redhat.com + +seq="$(basename $0)" +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ../common.rc +. ../common.filter + +# This tests qcow2-specific low-level functionality +_supported_fmt qcow2 +_supported_proto file +_require_working_luks + +IMG_SIZE=64M + +echo +echo "=== Create an encrypted image ===" +echo + +_make_test_img --object secret,id=sec0,data=123456 -o encrypt.format=luks,encrypt.key-secret=sec0 $IMG_SIZE +$PYTHON ../qcow2.py "$TEST_IMG" dump-header-exts +_img_info +$QEMU_IMG check \ + --object secret,id=sec0,data=123456 \ + --image-opts file.filename="$TEST_IMG",encrypt.key-secret=sec0 \ + | _filter_qemu_img_check + +echo +echo "=== Remove the header extension ===" +echo + +$PYTHON ../qcow2.py "$TEST_IMG" del-header-ext 0x0537be77 +$PYTHON ../qcow2.py "$TEST_IMG" dump-header-exts +_img_info +$QEMU_IMG check \ + --object secret,id=sec0,data=123456 \ + --image-opts file.filename="$TEST_IMG",encrypt.key-secret=sec0 2>&1 \ + | _filter_qemu_img_check \ + | _filter_testdir + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/qcow2-encryption.out b/tests/qemu-iotests/tests/qcow2-encryption.out new file mode 100644 index 00000000000..9b549dc2abb --- /dev/null +++ b/tests/qemu-iotests/tests/qcow2-encryption.out @@ -0,0 +1,32 @@ +QA output created by qcow2-encryption + +=== Create an encrypted image === + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +Header extension: +magic 0x537be77 (Crypto header) +length 16 +data + +Header extension: +magic 0x6803f857 (Feature table) +length 384 +data + +image: TEST_DIR/t.IMGFMT +file format: IMGFMT +virtual size: 64 MiB (67108864 bytes) +encrypted: yes +cluster_size: 65536 +No errors were found on the image. + +=== Remove the header extension === + +Header extension: +magic 0x6803f857 (Feature table) +length 384 +data + +qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Missing CRYPTO header for crypt method 2 +qemu-img: Could not open 'file.filename=TEST_DIR/t.qcow2,encrypt.key-secret=sec0': Missing CRYPTO header for crypt method 2 +*** done diff --git a/tests/qemu-iotests/tests/qom-set-drive b/tests/qemu-iotests/tests/qom-set-drive new file mode 100755 index 00000000000..ec8ddac4fdd --- /dev/null +++ b/tests/qemu-iotests/tests/qom-set-drive @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# group: quick +# +# Test how changing the 'drive' property via 'qom-set' behaves. +# +# Copyright (C) Proxmox Server Solutions GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import imgfmt, log, qemu_img_create, QMPTestCase + +image_size = 1 * 1024 * 1024 +images = [os.path.join(iotests.test_dir, f'{i}.img') for i in range(0, 4)] + +class TestQOMSetDrive(QMPTestCase): + def setUp(self) -> None: + for image in images: + qemu_img_create('-f', imgfmt, image, str(image_size)) + + self.vm = iotests.VM() + for i, image in enumerate(images): + self.vm.add_blockdev(self.vm.qmp_to_opts({ + 'driver': imgfmt, + 'node-name': f'node{i}', + 'file': { + 'driver': 'file', + 'filename': image, + } + })) + self.vm.add_object('iothread,id=iothread0') + self.vm.add_device('virtio-scsi,iothread=iothread0') + self.vm.add_device('scsi-hd,id=iot,drive=node0') + self.vm.add_device('virtio-scsi') + self.vm.add_device('scsi-hd,id=no-iot,drive=node1') + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + for image in images: + os.remove(image) + + def test_qom_set_drive(self) -> None: + log(self.vm.qmp('qom-get', path='/machine/peripheral/iot', + property='drive')) + log(self.vm.qmp('qom-set', path='/machine/peripheral/iot', + property='drive', value='node2')) + log(self.vm.qmp('qom-get', path='/machine/peripheral/iot', + property='drive')) + + log(self.vm.qmp('qom-get', path='/machine/peripheral/no-iot', + property='drive')) + log(self.vm.qmp('qom-set', path='/machine/peripheral/no-iot', + property='drive', value='node3')) + log(self.vm.qmp('qom-get', path='/machine/peripheral/no-iot', + property='drive')) + +if __name__ == '__main__': + iotests.activate_logging() + # LUKS would require special key-secret handling in add_blockdevs() + iotests.main(supported_fmts=['generic'], + unsupported_fmts=['luks']) diff --git a/tests/qemu-iotests/tests/qom-set-drive.out b/tests/qemu-iotests/tests/qom-set-drive.out new file mode 100644 index 00000000000..7fc243dca6c --- /dev/null +++ b/tests/qemu-iotests/tests/qom-set-drive.out @@ -0,0 +1,11 @@ +{"return": "node0"} +{"error": {"class": "GenericError", "desc": "Different aio context is not supported for new node"}} +{"return": "node0"} +{"return": "node1"} +{"return": {}} +{"return": "node3"} +. +---------------------------------------------------------------------- +Ran 1 tests + +OK diff --git a/tests/qemu-iotests/tests/qsd-migrate b/tests/qemu-iotests/tests/qsd-migrate new file mode 100755 index 00000000000..a4c6592420c --- /dev/null +++ b/tests/qemu-iotests/tests/qsd-migrate @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Copyright (C) Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# Creator/Owner: Kevin Wolf + +import iotests + +from iotests import filter_qemu_io, filter_qtest + +iotests.script_initialize(supported_fmts=['qcow2', 'qed', 'raw'], + supported_protocols=['file'], + supported_platforms=['linux']) + +with iotests.FilePath('disk.img') as path, \ + iotests.FilePath('nbd-src.sock', base_dir=iotests.sock_dir) as nbd_src, \ + iotests.FilePath('nbd-dst.sock', base_dir=iotests.sock_dir) as nbd_dst, \ + iotests.FilePath('migrate.sock', base_dir=iotests.sock_dir) as mig_sock, \ + iotests.VM(path_suffix="-src") as vm_src, \ + iotests.VM(path_suffix="-dst") as vm_dst: + + img_size = '10M' + + iotests.log('Preparing disk...') + iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size) + + iotests.log('Launching source QSD...') + qsd_src = iotests.QemuStorageDaemon( + '--blockdev', f'file,node-name=disk-file,filename={path}', + '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt', + '--nbd-server', f'addr.type=unix,addr.path={nbd_src}', + '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,' + 'allow-inactive=true', + qmp=True, + ) + + iotests.log('Launching source VM...') + vm_src.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,' + f'server.path={nbd_src},export=disk-fmt') + vm_src.add_args('-device', 'virtio-blk,drive=disk,id=virtio0') + vm_src.launch() + + iotests.log('Launching destination QSD...') + qsd_dst = iotests.QemuStorageDaemon( + '--blockdev', f'file,node-name=disk-file,filename={path},active=off', + '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,' + f'active=off', + '--nbd-server', f'addr.type=unix,addr.path={nbd_dst}', + '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,' + 'allow-inactive=true', + qmp=True, + instance_id='b', + ) + + iotests.log('Launching destination VM...') + vm_dst.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,' + f'server.path={nbd_dst},export=disk-fmt') + vm_dst.add_args('-device', 'virtio-blk,drive=disk,id=virtio0') + vm_dst.add_args('-incoming', f'unix:{mig_sock}') + vm_dst.launch() + + iotests.log('\nTest I/O on the source') + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x11 0 4k', + use_log=True, qdev=True) + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', + use_log=True, qdev=True) + + iotests.log('\nStarting migration...') + + mig_caps = [ + {'capability': 'events', 'state': True}, + {'capability': 'pause-before-switchover', 'state': True}, + ] + vm_src.qmp_log('migrate-set-capabilities', capabilities=mig_caps) + vm_dst.qmp_log('migrate-set-capabilities', capabilities=mig_caps) + vm_src.qmp_log('migrate', uri=f'unix:{mig_sock}', + filters=[iotests.filter_qmp_testfiles]) + + vm_src.event_wait('MIGRATION', + match={'data': {'status': 'pre-switchover'}}) + + iotests.log('\nPre-switchover: Reconfigure QSD instances') + + iotests.log(qsd_src.qmp('blockdev-set-active', {'active': False})) + + # Reading is okay from both sides while the image is inactive. Note that + # the destination may have stale data until it activates the image, though. + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', + use_log=True, qdev=True) + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read 0 4k', + use_log=True, qdev=True) + + iotests.log(qsd_dst.qmp('blockdev-set-active', {'active': True})) + + iotests.log('\nCompleting migration...') + + vm_src.qmp_log('migrate-continue', state='pre-switchover') + vm_dst.event_wait('MIGRATION', match={'data': {'status': 'completed'}}) + + iotests.log('\nTest I/O on the destination') + + # Now the destination must see what the source wrote + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', + use_log=True, qdev=True) + + # And be able to overwrite it + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x22 0 4k', + use_log=True, qdev=True) + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x22 0 4k', + use_log=True, qdev=True) + + iotests.log('\nDone') + + vm_src.shutdown() + iotests.log('\n--- vm_src log ---') + log = vm_src.get_log() + if log: + iotests.log(log, [filter_qtest, filter_qemu_io]) + qsd_src.stop() + + vm_dst.shutdown() + iotests.log('\n--- vm_dst log ---') + log = vm_dst.get_log() + if log: + iotests.log(log, [filter_qtest, filter_qemu_io]) + qsd_dst.stop() diff --git a/tests/qemu-iotests/tests/qsd-migrate.out b/tests/qemu-iotests/tests/qsd-migrate.out new file mode 100644 index 00000000000..4a5241e5d40 --- /dev/null +++ b/tests/qemu-iotests/tests/qsd-migrate.out @@ -0,0 +1,59 @@ +Preparing disk... +Launching source QSD... +Launching source VM... +Launching destination QSD... +Launching destination VM... + +Test I/O on the source +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x11 0 4k\""}} +{"return": ""} +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} +{"return": ""} + +Starting migration... +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}} +{"return": {}} +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}} +{"return": {}} +{"execute": "migrate", "arguments": {"uri": "unix:SOCK_DIR/PID-migrate.sock"}} +{"return": {}} + +Pre-switchover: Reconfigure QSD instances +{"return": {}} +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} +{"return": ""} +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read 0 4k\""}} +{"return": ""} +{"return": {}} + +Completing migration... +{"execute": "migrate-continue", "arguments": {"state": "pre-switchover"}} +{"return": {}} + +Test I/O on the destination +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} +{"return": ""} +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x22 0 4k\""}} +{"return": ""} +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x22 0 4k\""}} +{"return": ""} + +Done + +--- vm_src log --- +wrote 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +--- vm_dst log --- +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +wrote 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) diff --git a/tests/qemu-iotests/tests/vvfat b/tests/qemu-iotests/tests/vvfat new file mode 100755 index 00000000000..acdc6ce8fff --- /dev/null +++ b/tests/qemu-iotests/tests/vvfat @@ -0,0 +1,485 @@ +#!/usr/bin/env python3 +# group: rw vvfat +# +# Test vvfat driver implementation +# Here, we use a simple FAT16 implementation and check the behavior of +# the vvfat driver. +# +# Copyright (C) 2024 Amjad Alsharafi +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import shutil +import iotests +from iotests import imgfmt, QMPTestCase +from fat16 import MBR, Fat16, DIRENTRY_SIZE + +filesystem = os.path.join(iotests.test_dir, "filesystem") + +nbd_sock = iotests.file_path("nbd.sock", base_dir=iotests.sock_dir) +nbd_uri = "nbd+unix:///disk?socket=" + nbd_sock + +SECTOR_SIZE = 512 + + +class TestVVFatDriver(QMPTestCase): + # pylint: disable=broad-exception-raised + def setUp(self) -> None: + if os.path.exists(filesystem): + if os.path.isdir(filesystem): + shutil.rmtree(filesystem) + else: + raise Exception(f"{filesystem} exists and is not a directory") + + os.mkdir(filesystem) + + # Add some text files to the filesystem + for i in range(10): + with open(os.path.join(filesystem, f"file{i}.txt"), + "w", encoding="ascii") as f: + f.write(f"Hello, world! {i}\n") + + # Add 2 large files, above the cluster size (8KB) + with open(os.path.join(filesystem, "large1.txt"), "wb") as f: + # write 'A' * 1KB, 'B' * 1KB, 'C' * 1KB, ... + for i in range(8 * 2): # two clusters + f.write(bytes([0x41 + i] * 1024)) + + with open(os.path.join(filesystem, "large2.txt"), "wb") as f: + # write 'A' * 1KB, 'B' * 1KB, 'C' * 1KB, ... + for i in range(8 * 3): # 3 clusters + f.write(bytes([0x41 + i] * 1024)) + + self.vm = iotests.VM() + + self.vm.add_blockdev( + self.vm.qmp_to_opts( + { + "driver": imgfmt, + "node-name": "disk", + "rw": "true", + "fat-type": "16", + "dir": filesystem, + } + ) + ) + + self.vm.launch() + + self.vm.qmp_log("block-dirty-bitmap-add", **{ + "node": "disk", + "name": "bitmap0", + }) + + # attach nbd server + self.vm.qmp_log( + "nbd-server-start", + **{"addr": {"type": "unix", "data": {"path": nbd_sock}}}, + filters=[], + ) + + self.vm.qmp_log( + "nbd-server-add", + **{"device": "disk", "writable": True, "bitmap": "bitmap0"}, + ) + + self.qio = iotests.QemuIoInteractive("-f", "raw", nbd_uri) + + def tearDown(self) -> None: + self.qio.close() + self.vm.shutdown() + # print(self.vm.get_log()) + shutil.rmtree(filesystem) + + def read_sectors(self, sector: int, num: int = 1) -> bytes: + """ + Read `num` sectors starting from `sector` from the `disk`. + This uses `QemuIoInteractive` to read the sectors into `stdout` and + then parse the output. + """ + self.assertGreater(num, 0) + + # The output contains the content of the sector in hex dump format + # We need to extract the content from it + output = self.qio.cmd( + f"read -v {sector * SECTOR_SIZE} {num * SECTOR_SIZE}") + + # Each row is 16 bytes long, and we are writing `num` sectors + rows = num * SECTOR_SIZE // 16 + output_rows = output.split("\n")[:rows] + + hex_content = "".join( + [(row.split(": ")[1]).split(" ")[0] for row in output_rows] + ) + bytes_content = bytes.fromhex(hex_content) + + self.assertEqual(len(bytes_content), num * SECTOR_SIZE) + + return bytes_content + + def write_sectors(self, sector: int, data: bytes) -> None: + """ + Write `data` to the `disk` starting from `sector`. + This uses `QemuIoInteractive` to write the data into the disk. + """ + + self.assertGreater(len(data), 0) + self.assertEqual(len(data) % SECTOR_SIZE, 0) + + temp_file = os.path.join(iotests.test_dir, "temp.bin") + with open(temp_file, "wb") as f: + f.write(data) + + self.qio.cmd( + f"write -s {temp_file} {sector * SECTOR_SIZE} {len(data)}" + ) + + os.remove(temp_file) + + def init_fat16(self): + mbr = MBR(self.read_sectors(0)) + return Fat16( + mbr.partition_table[0]["start_lba"], + mbr.partition_table[0]["size"], + self.read_sectors, + self.write_sectors, + ) + + # Tests + + def test_fat_filesystem(self): + """ + Test that vvfat produce a valid FAT16 and MBR sectors + """ + mbr = MBR(self.read_sectors(0)) + + self.assertEqual(mbr.partition_table[0]["status"], 0x80) + self.assertEqual(mbr.partition_table[0]["type"], 6) + + fat16 = Fat16( + mbr.partition_table[0]["start_lba"], + mbr.partition_table[0]["size"], + self.read_sectors, + self.write_sectors, + ) + self.assertEqual(fat16.boot_sector.bytes_per_sector, 512) + self.assertEqual(fat16.boot_sector.volume_label, "QEMU VVFAT") + + def test_read_root_directory(self): + """ + Test the content of the root directory + """ + fat16 = self.init_fat16() + + root_dir = fat16.read_root_directory() + + self.assertEqual(len(root_dir), 13) # 12 + 1 special file + + files = { + "QEMU VVF.AT": 0, # special empty file + "FILE0.TXT": 16, + "FILE1.TXT": 16, + "FILE2.TXT": 16, + "FILE3.TXT": 16, + "FILE4.TXT": 16, + "FILE5.TXT": 16, + "FILE6.TXT": 16, + "FILE7.TXT": 16, + "FILE8.TXT": 16, + "FILE9.TXT": 16, + "LARGE1.TXT": 0x2000 * 2, + "LARGE2.TXT": 0x2000 * 3, + } + + for entry in root_dir: + self.assertIn(entry.whole_name(), files) + self.assertEqual(entry.size_bytes, files[entry.whole_name()]) + + def test_direntry_as_bytes(self): + """ + Test if we can convert Direntry back to bytes, so that we can write it + back to the disk safely. + """ + fat16 = self.init_fat16() + + root_dir = fat16.read_root_directory() + first_entry_bytes = fat16.read_sectors( + fat16.boot_sector.root_dir_start(), 1) + + # The first entry won't be deleted, so we can compare it with the first + # entry in the root directory + self.assertEqual(root_dir[0].as_bytes(), + first_entry_bytes[:DIRENTRY_SIZE]) + + def test_read_files(self): + """ + Test reading the content of the files + """ + fat16 = self.init_fat16() + + for i in range(10): + file = fat16.find_direntry(f"/FILE{i}.TXT") + self.assertIsNotNone(file) + self.assertEqual( + fat16.read_file(file), f"Hello, world! {i}\n".encode("ascii") + ) + + # test large files + large1 = fat16.find_direntry("/LARGE1.TXT") + with open(os.path.join(filesystem, "large1.txt"), "rb") as f: + self.assertEqual(fat16.read_file(large1), f.read()) + + large2 = fat16.find_direntry("/LARGE2.TXT") + self.assertIsNotNone(large2) + with open(os.path.join(filesystem, "large2.txt"), "rb") as f: + self.assertEqual(fat16.read_file(large2), f.read()) + + def test_write_file_same_content_direct(self): + """ + Similar to `test_write_file_in_same_content`, but we write the file + directly clusters and thus we don't go through the modification of + direntry. + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/FILE0.TXT") + self.assertIsNotNone(file) + + data = fat16.read_cluster(file.cluster) + fat16.write_cluster(file.cluster, data) + + with open(os.path.join(filesystem, "file0.txt"), "rb") as f: + self.assertEqual(fat16.read_file(file), f.read()) + + def test_write_file_in_same_content(self): + """ + Test writing the same content to the file back to it + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/FILE0.TXT") + self.assertIsNotNone(file) + + self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n") + + fat16.write_file(file, b"Hello, world! 0\n") + self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n") + + with open(os.path.join(filesystem, "file0.txt"), "rb") as f: + self.assertEqual(f.read(), b"Hello, world! 0\n") + + def test_modify_content_same_clusters(self): + """ + Test modifying the content of the file without changing the number of + clusters + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/FILE0.TXT") + self.assertIsNotNone(file) + + new_content = b"Hello, world! Modified\n" + self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n") + + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "file0.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_truncate_file_same_clusters_less(self): + """ + Test truncating the file without changing number of clusters + Test decreasing the file size + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/FILE0.TXT") + self.assertIsNotNone(file) + + self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n") + + fat16.truncate_file(file, 5) + new_content = fat16.read_file(file) + self.assertEqual(new_content, b"Hello") + + with open(os.path.join(filesystem, "file0.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_truncate_file_same_clusters_more(self): + """ + Test truncating the file without changing number of clusters + Test increase the file size + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/FILE0.TXT") + self.assertIsNotNone(file) + + self.assertEqual(fat16.read_file(file), b"Hello, world! 0\n") + + fat16.truncate_file(file, 20) + new_content = fat16.read_file(file) + self.assertIsNotNone(new_content) + + # random pattern will be appended to the file, and its not always the + # same + self.assertEqual(new_content[:16], b"Hello, world! 0\n") + self.assertEqual(len(new_content), 20) + + with open(os.path.join(filesystem, "file0.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_write_large_file(self): + """ + Test writing a large file + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE1.TXT") + self.assertIsNotNone(file) + + # The content of LARGE1 is A * 1KB, B * 1KB, C * 1KB, ..., P * 1KB + # Lets change it to be Z * 1KB, Y * 1KB, X * 1KB, ..., K * 1KB + # without changing the number of clusters or filesize + new_content = b"".join([bytes([0x5A - i] * 1024) for i in range(16)]) + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "large1.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_truncate_file_change_clusters_less(self): + """ + Test truncating a file by reducing the number of clusters + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE1.TXT") + self.assertIsNotNone(file) + + fat16.truncate_file(file, 1) + self.assertEqual(fat16.read_file(file), b"A") + + with open(os.path.join(filesystem, "large1.txt"), "rb") as f: + self.assertEqual(f.read(), b"A") + + def test_write_file_change_clusters_less(self): + """ + Test truncating a file by reducing the number of clusters + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE2.TXT") + self.assertIsNotNone(file) + + new_content = b"X" * 8 * 1024 + b"Y" * 8 * 1024 + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "large2.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_write_file_change_clusters_more(self): + """ + Test truncating a file by increasing the number of clusters + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE2.TXT") + self.assertIsNotNone(file) + + # from 3 clusters to 4 clusters + new_content = ( + b"W" * 8 * 1024 + + b"X" * 8 * 1024 + + b"Y" * 8 * 1024 + + b"Z" * 8 * 1024 + ) + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "large2.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_write_file_change_clusters_more_non_contiguous_2_mappings(self): + """ + Test truncating a file by increasing the number of clusters Here we + allocate the new clusters in a way that makes them non-contiguous so + that we will get 2 cluster mappings for the file + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE1.TXT") + self.assertIsNotNone(file) + + # from 2 clusters to 3 clusters with non-contiguous allocation + fat16.truncate_file(file, 3 * 0x2000, allocate_non_continuous=True) + new_content = b"X" * 8 * 1024 + b"Y" * 8 * 1024 + b"Z" * 8 * 1024 + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "large1.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_write_file_change_clusters_more_non_contiguous_3_mappings(self): + """ + Test truncating a file by increasing the number of clusters Here we + allocate the new clusters in a way that makes them non-contiguous so + that we will get 3 cluster mappings for the file + """ + fat16 = self.init_fat16() + + file = fat16.find_direntry("/LARGE1.TXT") + self.assertIsNotNone(file) + + # from 2 clusters to 4 clusters with non-contiguous allocation + fat16.truncate_file(file, 4 * 0x2000, allocate_non_continuous=True) + new_content = ( + b"W" * 8 * 1024 + + b"X" * 8 * 1024 + + b"Y" * 8 * 1024 + + b"Z" * 8 * 1024 + ) + fat16.write_file(file, new_content) + self.assertEqual(fat16.read_file(file), new_content) + + with open(os.path.join(filesystem, "large1.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + def test_create_file(self): + """ + Test creating a new file + """ + fat16 = self.init_fat16() + + new_file = fat16.create_file("/NEWFILE.TXT") + + self.assertIsNotNone(new_file) + self.assertEqual(new_file.size_bytes, 0) + + new_content = b"Hello, world! New file\n" + fat16.write_file(new_file, new_content) + self.assertEqual(fat16.read_file(new_file), new_content) + + with open(os.path.join(filesystem, "newfile.txt"), "rb") as f: + self.assertEqual(f.read(), new_content) + + # TODO: support deleting files + + +if __name__ == "__main__": + # This is a specific test for vvfat driver + iotests.main(supported_fmts=["vvfat"], supported_protocols=["file"]) diff --git a/tests/qemu-iotests/tests/vvfat.out b/tests/qemu-iotests/tests/vvfat.out new file mode 100755 index 00000000000..b6f257674e9 --- /dev/null +++ b/tests/qemu-iotests/tests/vvfat.out @@ -0,0 +1,5 @@ +................ +---------------------------------------------------------------------- +Ran 16 tests + +OK diff --git a/tests/qemu-iotests/tests/write-zeroes-unmap b/tests/qemu-iotests/tests/write-zeroes-unmap index 7cfeeaf8391..f90fb8e8d27 100755 --- a/tests/qemu-iotests/tests/write-zeroes-unmap +++ b/tests/qemu-iotests/tests/write-zeroes-unmap @@ -32,6 +32,7 @@ cd .. _supported_fmt raw _supported_proto file _supported_os Linux +_require_disk_usage create_test_image() { _make_test_img -f $IMGFMT 1m diff --git a/tests/qtest/acpi-utils.c b/tests/qtest/acpi-utils.c index 673fc975862..9dc24fbe5a0 100644 --- a/tests/qtest/acpi-utils.c +++ b/tests/qtest/acpi-utils.c @@ -156,5 +156,4 @@ uint64_t acpi_find_rsdp_address_uefi(QTestState *qts, uint64_t start, g_usleep(TEST_DELAY); } g_assert_not_reached(); - return 0; } diff --git a/tests/qtest/adm1266-test.c b/tests/qtest/adm1266-test.c index 6c312c499fc..5ae82062342 100644 --- a/tests/qtest/adm1266-test.c +++ b/tests/qtest/adm1266-test.c @@ -13,8 +13,8 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/bitops.h" #define TEST_ID "adm1266-test" diff --git a/tests/qtest/adm1272-test.c b/tests/qtest/adm1272-test.c index 63f8514801b..2abda8d5be4 100644 --- a/tests/qtest/adm1272-test.c +++ b/tests/qtest/adm1272-test.c @@ -12,8 +12,8 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/bitops.h" #define TEST_ID "adm1272-test" diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c index 5a1923f721b..e8aabfc13f5 100644 --- a/tests/qtest/ahci-test.c +++ b/tests/qtest/ahci-test.c @@ -30,7 +30,7 @@ #include "libqos/ahci.h" #include "libqos/pci-pc.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/host-utils.h" #include "hw/pci/pci_ids.h" @@ -1881,7 +1881,6 @@ static void test_io_interface(gconstpointer opaque) sector = offset_sector(opts->offset, opts->address_type, bufsize); test_io_rw_interface(opts->address_type, opts->io_type, bufsize, sector); g_free(opts); - return; } static void create_ahci_io_test(enum IOMode type, enum AddrMode addr, diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c index cfd6f773535..eb8ddebffbf 100644 --- a/tests/qtest/arm-cpu-features.c +++ b/tests/qtest/arm-cpu-features.c @@ -11,8 +11,8 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" /* * We expect the SVE max-vq to be 16. Also it must be <= 64 @@ -419,21 +419,28 @@ static void pauth_tests_default(QTestState *qts, const char *cpu_type) assert_has_feature_enabled(qts, cpu_type, "pauth"); assert_has_feature_disabled(qts, cpu_type, "pauth-impdef"); assert_has_feature_disabled(qts, cpu_type, "pauth-qarma3"); + assert_has_feature_disabled(qts, cpu_type, "pauth-qarma5"); assert_set_feature(qts, cpu_type, "pauth", false); assert_set_feature(qts, cpu_type, "pauth", true); assert_set_feature(qts, cpu_type, "pauth-impdef", true); assert_set_feature(qts, cpu_type, "pauth-impdef", false); assert_set_feature(qts, cpu_type, "pauth-qarma3", true); assert_set_feature(qts, cpu_type, "pauth-qarma3", false); + assert_set_feature(qts, cpu_type, "pauth-qarma5", true); + assert_set_feature(qts, cpu_type, "pauth-qarma5", false); assert_error(qts, cpu_type, - "cannot enable pauth-impdef or pauth-qarma3 without pauth", + "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth", "{ 'pauth': false, 'pauth-impdef': true }"); assert_error(qts, cpu_type, - "cannot enable pauth-impdef or pauth-qarma3 without pauth", + "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth", "{ 'pauth': false, 'pauth-qarma3': true }"); assert_error(qts, cpu_type, - "cannot enable both pauth-impdef and pauth-qarma3", - "{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true }"); + "cannot enable pauth-impdef, pauth-qarma3 or pauth-qarma5 without pauth", + "{ 'pauth': false, 'pauth-qarma5': true }"); + assert_error(qts, cpu_type, + "cannot enable pauth-impdef, pauth-qarma3 and pauth-qarma5 at the same time", + "{ 'pauth': true, 'pauth-impdef': true, 'pauth-qarma3': true," + " 'pauth-qarma5': true }"); } static void test_query_cpu_model_expansion(const void *data) diff --git a/tests/qtest/aspeed-hace-utils.c b/tests/qtest/aspeed-hace-utils.c new file mode 100644 index 00000000000..0f7f911e5eb --- /dev/null +++ b/tests/qtest/aspeed-hace-utils.c @@ -0,0 +1,646 @@ +/* + * QTest testcase for the ASPEED Hash and Crypto Engine + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright 2021 IBM Corp. + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "qemu/bitops.h" +#include "aspeed-hace-utils.h" + +/* + * Test vector is the ascii "abc" + * + * Expected results were generated using command line utitiles: + * + * echo -n -e 'abc' | dd of=/tmp/test + * for hash in sha512sum sha384sum sha256sum md5sum; do $hash /tmp/test; done + * + */ +static const uint8_t test_vector[3] = {0x61, 0x62, 0x63}; + +static const uint8_t test_result_sha512[64] = { + 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, + 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, + 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, + 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd, + 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, + 0xa5, 0x4c, 0xa4, 0x9f}; + +static const uint8_t test_result_sha384[48] = { + 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b, 0xb5, 0xa0, 0x3d, 0x69, + 0x9a, 0xc6, 0x50, 0x07, 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63, + 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed, 0x80, 0x86, 0x07, 0x2b, + 0xa1, 0xe7, 0xcc, 0x23, 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7}; + +static const uint8_t test_result_sha256[32] = { + 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, + 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, + 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}; + +static const uint8_t test_result_md5[16] = { + 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0, 0xd6, 0x96, 0x3f, 0x7d, + 0x28, 0xe1, 0x7f, 0x72}; + +/* + * The Scatter-Gather Test vector is the ascii "abc" "def" "ghi", broken + * into blocks of 3 characters as shown + * + * Expected results were generated using command line utitiles: + * + * echo -n -e 'abcdefghijkl' | dd of=/tmp/test + * for hash in sha512sum sha384sum sha256sum; do $hash /tmp/test; done + * + */ +static const uint8_t test_vector_sg1[6] = {0x61, 0x62, 0x63, 0x64, 0x65, 0x66}; +static const uint8_t test_vector_sg2[3] = {0x67, 0x68, 0x69}; +static const uint8_t test_vector_sg3[3] = {0x6a, 0x6b, 0x6c}; + +static const uint8_t test_result_sg_sha512[64] = { + 0x17, 0x80, 0x7c, 0x72, 0x8e, 0xe3, 0xba, 0x35, 0xe7, 0xcf, 0x7a, 0xf8, + 0x23, 0x11, 0x6d, 0x26, 0xe4, 0x1e, 0x5d, 0x4d, 0x6c, 0x2f, 0xf1, 0xf3, + 0x72, 0x0d, 0x3d, 0x96, 0xaa, 0xcb, 0x6f, 0x69, 0xde, 0x64, 0x2e, 0x63, + 0xd5, 0xb7, 0x3f, 0xc3, 0x96, 0xc1, 0x2b, 0xe3, 0x8b, 0x2b, 0xd5, 0xd8, + 0x84, 0x25, 0x7c, 0x32, 0xc8, 0xf6, 0xd0, 0x85, 0x4a, 0xe6, 0xb5, 0x40, + 0xf8, 0x6d, 0xda, 0x2e}; + +static const uint8_t test_result_sg_sha384[48] = { + 0x10, 0x3c, 0xa9, 0x6c, 0x06, 0xa1, 0xce, 0x79, 0x8f, 0x08, 0xf8, 0xef, + 0xf0, 0xdf, 0xb0, 0xcc, 0xdb, 0x56, 0x7d, 0x48, 0xb2, 0x85, 0xb2, 0x3d, + 0x0c, 0xd7, 0x73, 0x45, 0x46, 0x67, 0xa3, 0xc2, 0xfa, 0x5f, 0x1b, 0x58, + 0xd9, 0xcd, 0xf2, 0x32, 0x9b, 0xd9, 0x97, 0x97, 0x30, 0xbf, 0xaa, 0xff}; + +static const uint8_t test_result_sg_sha256[32] = { + 0xd6, 0x82, 0xed, 0x4c, 0xa4, 0xd9, 0x89, 0xc1, 0x34, 0xec, 0x94, 0xf1, + 0x55, 0x1e, 0x1e, 0xc5, 0x80, 0xdd, 0x6d, 0x5a, 0x6e, 0xcd, 0xe9, 0xf3, + 0xd3, 0x5e, 0x6e, 0x4a, 0x71, 0x7f, 0xbd, 0xe4}; + +/* + * The accumulative mode requires firmware to provide internal initial state + * and message padding (including length L at the end of padding). + * + * This test vector is a ascii text "abc" with padding message. + * + * Expected results were generated using command line utitiles: + * + * echo -n -e 'abc' | dd of=/tmp/test + * for hash in sha512sum sha384sum sha256sum; do $hash /tmp/test; done + */ +static const uint8_t test_vector_accum_512[128] = { + 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; + +static const uint8_t test_vector_accum_384[128] = { + 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; + +static const uint8_t test_vector_accum_256[64] = { + 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; + +static const uint8_t test_result_accum_sha512[64] = { + 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, + 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, + 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, + 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd, + 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, + 0xa5, 0x4c, 0xa4, 0x9f}; + +static const uint8_t test_result_accum_sha384[48] = { + 0xcb, 0x00, 0x75, 0x3f, 0x45, 0xa3, 0x5e, 0x8b, 0xb5, 0xa0, 0x3d, 0x69, + 0x9a, 0xc6, 0x50, 0x07, 0x27, 0x2c, 0x32, 0xab, 0x0e, 0xde, 0xd1, 0x63, + 0x1a, 0x8b, 0x60, 0x5a, 0x43, 0xff, 0x5b, 0xed, 0x80, 0x86, 0x07, 0x2b, + 0xa1, 0xe7, 0xcc, 0x23, 0x58, 0xba, 0xec, 0xa1, 0x34, 0xc8, 0x25, 0xa7}; + +static const uint8_t test_result_accum_sha256[32] = { + 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, + 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, + 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}; + +static void write_regs(QTestState *s, uint32_t base, uint64_t src, + uint32_t length, uint64_t out, uint32_t method) +{ + qtest_writel(s, base + HACE_HASH_SRC, extract64(src, 0, 32)); + qtest_writel(s, base + HACE_HASH_SRC_HI, extract64(src, 32, 32)); + qtest_writel(s, base + HACE_HASH_DIGEST, extract64(out, 0, 32)); + qtest_writel(s, base + HACE_HASH_DIGEST_HI, extract64(out, 32, 32)); + qtest_writel(s, base + HACE_HASH_DATA_LEN, length); + qtest_writel(s, base + HACE_HASH_CMD, HACE_SHA_BE_EN | method); +} + +void aspeed_test_md5(const char *machine, const uint32_t base, + const uint64_t src_addr) + +{ + QTestState *s = qtest_init(machine); + + uint64_t digest_addr = src_addr + 0x010000; + uint8_t digest[16] = {0}; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); + + write_regs(s, base, src_addr, sizeof(test_vector), + digest_addr, HACE_ALGO_MD5); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_md5, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha256(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t digest_addr = src_addr + 0x10000; + uint8_t digest[32] = {0}; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); + + write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, + HACE_ALGO_SHA256); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sha256, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha384(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t digest_addr = src_addr + 0x10000; + uint8_t digest[48] = {0}; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); + + write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, + HACE_ALGO_SHA384); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sha384, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha512(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t digest_addr = src_addr + 0x10000; + uint8_t digest[64] = {0}; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); + + write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, + HACE_ALGO_SHA512); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sha512, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha256_sg(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t src_addr_1 = src_addr + 0x10000; + const uint64_t src_addr_2 = src_addr + 0x20000; + const uint64_t src_addr_3 = src_addr + 0x30000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[32] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_sg1)), + cpu_to_le32(src_addr_1) }, + { cpu_to_le32(sizeof(test_vector_sg2)), + cpu_to_le32(src_addr_2) }, + { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST), + cpu_to_le32(src_addr_3) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1)); + qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2)); + qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, + (sizeof(test_vector_sg1) + + sizeof(test_vector_sg2) + + sizeof(test_vector_sg3)), + digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sg_sha256, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha384_sg(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t src_addr_1 = src_addr + 0x10000; + const uint64_t src_addr_2 = src_addr + 0x20000; + const uint64_t src_addr_3 = src_addr + 0x30000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[48] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_sg1)), + cpu_to_le32(src_addr_1) }, + { cpu_to_le32(sizeof(test_vector_sg2)), + cpu_to_le32(src_addr_2) }, + { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST), + cpu_to_le32(src_addr_3) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1)); + qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2)); + qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, + (sizeof(test_vector_sg1) + + sizeof(test_vector_sg2) + + sizeof(test_vector_sg3)), + digest_addr, HACE_ALGO_SHA384 | HACE_SG_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sg_sha384, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha512_sg(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t src_addr_1 = src_addr + 0x10000; + const uint64_t src_addr_2 = src_addr + 0x20000; + const uint64_t src_addr_3 = src_addr + 0x30000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[64] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_sg1)), + cpu_to_le32(src_addr_1) }, + { cpu_to_le32(sizeof(test_vector_sg2)), + cpu_to_le32(src_addr_2) }, + { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST), + cpu_to_le32(src_addr_3) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1)); + qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2)); + qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, + (sizeof(test_vector_sg1) + + sizeof(test_vector_sg2) + + sizeof(test_vector_sg3)), + digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_sg_sha512, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha256_accum(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t buffer_addr = src_addr + 0x10000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[32] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_accum_256) | SG_LIST_LEN_LAST), + cpu_to_le32(buffer_addr) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, buffer_addr, test_vector_accum_256, + sizeof(test_vector_accum_256)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, sizeof(test_vector_accum_256), + digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN | HACE_ACCUM_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_accum_sha256, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha384_accum(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t buffer_addr = src_addr + 0x10000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[48] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_accum_384) | SG_LIST_LEN_LAST), + cpu_to_le32(buffer_addr) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, buffer_addr, test_vector_accum_384, + sizeof(test_vector_accum_384)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, sizeof(test_vector_accum_384), + digest_addr, HACE_ALGO_SHA384 | HACE_SG_EN | HACE_ACCUM_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_accum_sha384, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_sha512_accum(const char *machine, const uint32_t base, + const uint64_t src_addr) +{ + QTestState *s = qtest_init(machine); + + const uint64_t buffer_addr = src_addr + 0x10000; + const uint64_t digest_addr = src_addr + 0x40000; + uint8_t digest[64] = {0}; + struct AspeedSgList array[] = { + { cpu_to_le32(sizeof(test_vector_accum_512) | SG_LIST_LEN_LAST), + cpu_to_le32(buffer_addr) }, + }; + + /* Check engine is idle, no busy or irq bits set */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Write test vector into memory */ + qtest_memwrite(s, buffer_addr, test_vector_accum_512, + sizeof(test_vector_accum_512)); + qtest_memwrite(s, src_addr, array, sizeof(array)); + + write_regs(s, base, src_addr, sizeof(test_vector_accum_512), + digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN | HACE_ACCUM_EN); + + /* Check hash IRQ status is asserted */ + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); + + /* Clear IRQ status and check status is deasserted */ + qtest_writel(s, base + HACE_STS, 0x00000200); + g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); + + /* Read computed digest from memory */ + qtest_memread(s, digest_addr, digest, sizeof(digest)); + + /* Check result of computation */ + g_assert_cmpmem(digest, sizeof(digest), + test_result_accum_sha512, sizeof(digest)); + + qtest_quit(s); +} + +void aspeed_test_addresses(const char *machine, const uint32_t base, + const struct AspeedMasks *expected) +{ + QTestState *s = qtest_init(machine); + + /* + * Check command mode is zero, meaning engine is in direct access mode, + * as this affects the masking behavior of the HASH_SRC register. + */ + g_assert_cmphex(qtest_readl(s, base + HACE_CMD), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0); + + /* Check that the address masking is correct */ + qtest_writel(s, base + HACE_HASH_SRC, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, expected->src); + + qtest_writel(s, base + HACE_HASH_SRC_HI, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI), + ==, expected->src_hi); + + qtest_writel(s, base + HACE_HASH_DIGEST, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, + expected->dest); + + qtest_writel(s, base + HACE_HASH_DIGEST_HI, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==, + expected->dest_hi); + + qtest_writel(s, base + HACE_HASH_KEY_BUFF, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==, + expected->key); + + qtest_writel(s, base + HACE_HASH_KEY_BUFF_HI, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==, + expected->key_hi); + + qtest_writel(s, base + HACE_HASH_DATA_LEN, 0xffffffff); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, + expected->len); + + /* Reset to zero */ + qtest_writel(s, base + HACE_HASH_SRC, 0); + qtest_writel(s, base + HACE_HASH_SRC_HI, 0); + qtest_writel(s, base + HACE_HASH_DIGEST, 0); + qtest_writel(s, base + HACE_HASH_DIGEST_HI, 0); + qtest_writel(s, base + HACE_HASH_KEY_BUFF, 0); + qtest_writel(s, base + HACE_HASH_KEY_BUFF_HI, 0); + qtest_writel(s, base + HACE_HASH_DATA_LEN, 0); + + /* Check that all bits are now zero */ + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_KEY_BUFF_HI), ==, 0); + g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0); + + qtest_quit(s); +} + diff --git a/tests/qtest/aspeed-hace-utils.h b/tests/qtest/aspeed-hace-utils.h new file mode 100644 index 00000000000..c8b2ec45af2 --- /dev/null +++ b/tests/qtest/aspeed-hace-utils.h @@ -0,0 +1,84 @@ +/* + * QTest testcase for the ASPEED Hash and Crypto Engine + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright 2021 IBM Corp. + */ + +#ifndef TESTS_ASPEED_HACE_UTILS_H +#define TESTS_ASPEED_HACE_UTILS_H + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "qemu/bitops.h" + +#define HACE_CMD 0x10 +#define HACE_SHA_BE_EN BIT(3) +#define HACE_MD5_LE_EN BIT(2) +#define HACE_ALGO_MD5 0 +#define HACE_ALGO_SHA1 BIT(5) +#define HACE_ALGO_SHA224 BIT(6) +#define HACE_ALGO_SHA256 (BIT(4) | BIT(6)) +#define HACE_ALGO_SHA512 (BIT(5) | BIT(6)) +#define HACE_ALGO_SHA384 (BIT(5) | BIT(6) | BIT(10)) +#define HACE_SG_EN BIT(18) +#define HACE_ACCUM_EN BIT(8) + +#define HACE_STS 0x1c +#define HACE_RSA_ISR BIT(13) +#define HACE_CRYPTO_ISR BIT(12) +#define HACE_HASH_ISR BIT(9) +#define HACE_RSA_BUSY BIT(2) +#define HACE_CRYPTO_BUSY BIT(1) +#define HACE_HASH_BUSY BIT(0) +#define HACE_HASH_SRC 0x20 +#define HACE_HASH_DIGEST 0x24 +#define HACE_HASH_KEY_BUFF 0x28 +#define HACE_HASH_DATA_LEN 0x2c +#define HACE_HASH_CMD 0x30 +#define HACE_HASH_SRC_HI 0x90 +#define HACE_HASH_DIGEST_HI 0x94 +#define HACE_HASH_KEY_BUFF_HI 0x98 + +/* Scatter-Gather Hash */ +#define SG_LIST_LEN_LAST BIT(31) +struct AspeedSgList { + uint32_t len; + uint32_t addr; +} __attribute__ ((__packed__)); + +struct AspeedMasks { + uint32_t src; + uint32_t dest; + uint32_t key; + uint32_t len; + uint32_t src_hi; + uint32_t dest_hi; + uint32_t key_hi; +}; + +void aspeed_test_md5(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha256(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha384(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha512(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha256_sg(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha384_sg(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha512_sg(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha256_accum(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha384_accum(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_sha512_accum(const char *machine, const uint32_t base, + const uint64_t src_addr); +void aspeed_test_addresses(const char *machine, const uint32_t base, + const struct AspeedMasks *expected); + +#endif /* TESTS_ASPEED_HACE_UTILS_H */ + diff --git a/tests/qtest/aspeed-smc-utils.c b/tests/qtest/aspeed-smc-utils.c new file mode 100644 index 00000000000..c27d09e767e --- /dev/null +++ b/tests/qtest/aspeed-smc-utils.c @@ -0,0 +1,686 @@ +/* + * QTest testcase for the M25P80 Flash (Using the Aspeed SPI + * Controller) + * + * Copyright (C) 2016 IBM Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "libqtest-single.h" +#include "qemu/bitops.h" +#include "aspeed-smc-utils.h" + +/* + * Use an explicit bswap for the values read/wrote to the flash region + * as they are BE and the Aspeed CPU is LE. + */ +static inline uint32_t make_be32(uint32_t data) +{ + return bswap32(data); +} + +static inline void spi_writel(const AspeedSMCTestData *data, uint64_t offset, + uint32_t value) +{ + qtest_writel(data->s, data->spi_base + offset, value); +} + +static inline uint32_t spi_readl(const AspeedSMCTestData *data, uint64_t offset) +{ + return qtest_readl(data->s, data->spi_base + offset); +} + +static inline void flash_writeb(const AspeedSMCTestData *data, uint64_t offset, + uint8_t value) +{ + qtest_writeb(data->s, data->flash_base + offset, value); +} + +static inline void flash_writel(const AspeedSMCTestData *data, uint64_t offset, + uint32_t value) +{ + qtest_writel(data->s, data->flash_base + offset, value); +} + +static inline uint8_t flash_readb(const AspeedSMCTestData *data, + uint64_t offset) +{ + return qtest_readb(data->s, data->flash_base + offset); +} + +static inline uint32_t flash_readl(const AspeedSMCTestData *data, + uint64_t offset) +{ + return qtest_readl(data->s, data->flash_base + offset); +} + +static void spi_conf(const AspeedSMCTestData *data, uint32_t value) +{ + uint32_t conf = spi_readl(data, R_CONF); + + conf |= value; + spi_writel(data, R_CONF, conf); +} + +static void spi_conf_remove(const AspeedSMCTestData *data, uint32_t value) +{ + uint32_t conf = spi_readl(data, R_CONF); + + conf &= ~value; + spi_writel(data, R_CONF, conf); +} + +static void spi_ce_ctrl(const AspeedSMCTestData *data, uint32_t value) +{ + uint32_t conf = spi_readl(data, R_CE_CTRL); + + conf |= value; + spi_writel(data, R_CE_CTRL, conf); +} + +static void spi_ctrl_setmode(const AspeedSMCTestData *data, uint8_t mode, + uint8_t cmd) +{ + uint32_t ctrl_reg = R_CTRL0 + data->cs * 4; + uint32_t ctrl = spi_readl(data, ctrl_reg); + ctrl &= ~(CTRL_USERMODE | 0xff << 16); + ctrl |= mode | (cmd << 16); + spi_writel(data, ctrl_reg, ctrl); +} + +static void spi_ctrl_start_user(const AspeedSMCTestData *data) +{ + uint32_t ctrl_reg = R_CTRL0 + data->cs * 4; + uint32_t ctrl = spi_readl(data, ctrl_reg); + + ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE; + spi_writel(data, ctrl_reg, ctrl); + + ctrl &= ~CTRL_CE_STOP_ACTIVE; + spi_writel(data, ctrl_reg, ctrl); +} + +static void spi_ctrl_stop_user(const AspeedSMCTestData *data) +{ + uint32_t ctrl_reg = R_CTRL0 + data->cs * 4; + uint32_t ctrl = spi_readl(data, ctrl_reg); + + ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE; + spi_writel(data, ctrl_reg, ctrl); +} + +static void spi_ctrl_set_io_mode(const AspeedSMCTestData *data, uint32_t value) +{ + uint32_t ctrl_reg = R_CTRL0 + data->cs * 4; + uint32_t ctrl = spi_readl(data, ctrl_reg); + uint32_t mode; + + mode = value & CTRL_IO_MODE_MASK; + ctrl &= ~CTRL_IO_MODE_MASK; + ctrl |= mode; + spi_writel(data, ctrl_reg, ctrl); +} + +static void flash_reset(const AspeedSMCTestData *data) +{ + spi_conf(data, 1 << (CONF_ENABLE_W0 + data->cs)); + + spi_ctrl_start_user(data); + flash_writeb(data, 0, RESET_ENABLE); + flash_writeb(data, 0, RESET_MEMORY); + flash_writeb(data, 0, WREN); + flash_writeb(data, 0, BULK_ERASE); + flash_writeb(data, 0, WRDI); + spi_ctrl_stop_user(data); + + spi_conf_remove(data, 1 << (CONF_ENABLE_W0 + data->cs)); +} + +static void read_page(const AspeedSMCTestData *data, uint32_t addr, + uint32_t *page) +{ + int i; + + spi_ctrl_start_user(data); + + flash_writeb(data, 0, EN_4BYTE_ADDR); + flash_writeb(data, 0, READ); + flash_writel(data, 0, make_be32(addr)); + + /* Continuous read are supported */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + page[i] = make_be32(flash_readl(data, 0)); + } + spi_ctrl_stop_user(data); +} + +static void read_page_mem(const AspeedSMCTestData *data, uint32_t addr, + uint32_t *page) +{ + int i; + + /* move out USER mode to use direct reads from the AHB bus */ + spi_ctrl_setmode(data, CTRL_READMODE, READ); + + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + page[i] = make_be32(flash_readl(data, addr + i * 4)); + } +} + +static void write_page_mem(const AspeedSMCTestData *data, uint32_t addr, + uint32_t write_value) +{ + spi_ctrl_setmode(data, CTRL_WRITEMODE, PP); + + for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(data, addr + i * 4, write_value); + } +} + +static void assert_page_mem(const AspeedSMCTestData *data, uint32_t addr, + uint32_t expected_value) +{ + uint32_t page[FLASH_PAGE_SIZE / 4]; + read_page_mem(data, addr, page); + for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, expected_value); + } +} + +void aspeed_smc_test_read_jedec(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t jedec = 0x0; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, JEDEC_READ); + jedec |= flash_readb(test_data, 0) << 16; + jedec |= flash_readb(test_data, 0) << 8; + jedec |= flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + + flash_reset(test_data); + + g_assert_cmphex(jedec, ==, test_data->jedec_id); +} + +void aspeed_smc_test_erase_sector(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t some_page_addr = test_data->page_addr; + uint32_t page[FLASH_PAGE_SIZE / 4]; + int i; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + /* + * Previous page should be full of 0xffs after backend is + * initialized + */ + read_page(test_data, some_page_addr - FLASH_PAGE_SIZE, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, PP); + flash_writel(test_data, 0, make_be32(some_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, 0, make_be32(some_page_addr + i * 4)); + } + spi_ctrl_stop_user(test_data); + + /* Check the page is correctly written */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, some_page_addr + i * 4); + } + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, ERASE_SECTOR); + flash_writel(test_data, 0, make_be32(some_page_addr)); + spi_ctrl_stop_user(test_data); + + /* Check the page is erased */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + flash_reset(test_data); +} + +void aspeed_smc_test_erase_all(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t some_page_addr = test_data->page_addr; + uint32_t page[FLASH_PAGE_SIZE / 4]; + int i; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + /* + * Previous page should be full of 0xffs after backend is + * initialized + */ + read_page(test_data, some_page_addr - FLASH_PAGE_SIZE, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, PP); + flash_writel(test_data, 0, make_be32(some_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, 0, make_be32(some_page_addr + i * 4)); + } + spi_ctrl_stop_user(test_data); + + /* Check the page is correctly written */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, some_page_addr + i * 4); + } + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, BULK_ERASE); + spi_ctrl_stop_user(test_data); + + /* Check the page is erased */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + flash_reset(test_data); +} + +void aspeed_smc_test_write_page(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t my_page_addr = test_data->page_addr; + uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE; + uint32_t page[FLASH_PAGE_SIZE / 4]; + int i; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, PP); + flash_writel(test_data, 0, make_be32(my_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, 0, make_be32(my_page_addr + i * 4)); + } + spi_ctrl_stop_user(test_data); + + /* Check what was written */ + read_page(test_data, my_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, my_page_addr + i * 4); + } + + /* Check some other page. It should be full of 0xff */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + flash_reset(test_data); +} + +void aspeed_smc_test_read_page_mem(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t my_page_addr = test_data->page_addr; + uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE; + uint32_t page[FLASH_PAGE_SIZE / 4]; + int i; + + /* + * Enable 4BYTE mode for controller. + */ + spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs)); + + /* Enable 4BYTE mode for flash. */ + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, PP); + flash_writel(test_data, 0, make_be32(my_page_addr)); + + /* Fill the page with its own addresses */ + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, 0, make_be32(my_page_addr + i * 4)); + } + spi_ctrl_stop_user(test_data); + spi_conf_remove(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + /* Check what was written */ + read_page_mem(test_data, my_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, my_page_addr + i * 4); + } + + /* Check some other page. It should be full of 0xff */ + read_page_mem(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + flash_reset(test_data); +} + +void aspeed_smc_test_write_page_mem(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t my_page_addr = test_data->page_addr; + uint32_t page[FLASH_PAGE_SIZE / 4]; + int i; + + /* + * Enable 4BYTE mode for controller. + */ + spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs)); + + /* Enable 4BYTE mode for flash. */ + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + spi_ctrl_stop_user(test_data); + + /* move out USER mode to use direct writes to the AHB bus */ + spi_ctrl_setmode(test_data, CTRL_WRITEMODE, PP); + + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, my_page_addr + i * 4, + make_be32(my_page_addr + i * 4)); + } + + /* Check what was written */ + read_page_mem(test_data, my_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, my_page_addr + i * 4); + } + + flash_reset(test_data); +} + +void aspeed_smc_test_read_status_reg(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint8_t r; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + + g_assert_cmphex(r & SR_WEL, ==, 0); + g_assert(!qtest_qom_get_bool + (test_data->s, test_data->node, "write-enable")); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + + g_assert_cmphex(r & SR_WEL, ==, SR_WEL); + g_assert(qtest_qom_get_bool + (test_data->s, test_data->node, "write-enable")); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WRDI); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + + g_assert_cmphex(r & SR_WEL, ==, 0); + g_assert(!qtest_qom_get_bool + (test_data->s, test_data->node, "write-enable")); + + flash_reset(test_data); +} + +void aspeed_smc_test_status_reg_write_protection(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint8_t r; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + /* default case: WP# is high and SRWD is low -> status register writable */ + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + /* test ability to write SRWD */ + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, SRWD); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + g_assert_cmphex(r & SRWD, ==, SRWD); + + /* WP# high and SRWD high -> status register writable */ + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + /* test ability to write SRWD */ + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, 0); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + g_assert_cmphex(r & SRWD, ==, 0); + + /* WP# low and SRWD low -> status register writable */ + qtest_set_irq_in(test_data->s, test_data->node, "WP#", 0, 0); + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + /* test ability to write SRWD */ + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, SRWD); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + g_assert_cmphex(r & SRWD, ==, SRWD); + + /* WP# low and SRWD high -> status register NOT writable */ + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0 , WREN); + /* test ability to write SRWD */ + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, 0); + flash_writeb(test_data, 0, RDSR); + r = flash_readb(test_data, 0); + spi_ctrl_stop_user(test_data); + /* write is not successful */ + g_assert_cmphex(r & SRWD, ==, SRWD); + + qtest_set_irq_in(test_data->s, test_data->node, "WP#", 0, 1); + flash_reset(test_data); +} + +void aspeed_smc_test_write_block_protect(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t sector_size = 65536; + uint32_t n_sectors = 512; + + spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs)); + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + uint32_t bp_bits = 0b0; + + for (int i = 0; i < 16; i++) { + bp_bits = ((i & 0b1000) << 3) | ((i & 0b0111) << 2); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, BULK_ERASE); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, bp_bits); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + spi_ctrl_stop_user(test_data); + + uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; + uint32_t protection_start = n_sectors - num_protected_sectors; + uint32_t protection_end = n_sectors; + + for (int sector = 0; sector < n_sectors; sector++) { + uint32_t addr = sector * sector_size; + + assert_page_mem(test_data, addr, 0xffffffff); + write_page_mem(test_data, addr, make_be32(0xabcdef12)); + + uint32_t expected_value = protection_start <= sector + && sector < protection_end + ? 0xffffffff : 0xabcdef12; + + assert_page_mem(test_data, addr, expected_value); + } + } + + flash_reset(test_data); +} + +void aspeed_smc_test_write_block_protect_bottom_bit(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t sector_size = 65536; + uint32_t n_sectors = 512; + + spi_ce_ctrl(test_data, 1 << (CRTL_EXTENDED0 + test_data->cs)); + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + /* top bottom bit is enabled */ + uint32_t bp_bits = 0b00100 << 3; + + for (int i = 0; i < 16; i++) { + bp_bits = (((i & 0b1000) | 0b0100) << 3) | ((i & 0b0111) << 2); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, BULK_ERASE); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, WRSR); + flash_writeb(test_data, 0, bp_bits); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + spi_ctrl_stop_user(test_data); + + uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; + uint32_t protection_start = 0; + uint32_t protection_end = num_protected_sectors; + + for (int sector = 0; sector < n_sectors; sector++) { + uint32_t addr = sector * sector_size; + + assert_page_mem(test_data, addr, 0xffffffff); + write_page_mem(test_data, addr, make_be32(0xabcdef12)); + + uint32_t expected_value = protection_start <= sector + && sector < protection_end + ? 0xffffffff : 0xabcdef12; + + assert_page_mem(test_data, addr, expected_value); + } + } + + flash_reset(test_data); +} + +void aspeed_smc_test_write_page_qpi(const void *data) +{ + const AspeedSMCTestData *test_data = (const AspeedSMCTestData *)data; + uint32_t my_page_addr = test_data->page_addr; + uint32_t some_page_addr = my_page_addr + FLASH_PAGE_SIZE; + uint32_t page[FLASH_PAGE_SIZE / 4]; + uint32_t page_pattern[] = { + 0xebd8c134, 0x5da196bc, 0xae15e729, 0x5085ccdf + }; + int i; + + spi_conf(test_data, 1 << (CONF_ENABLE_W0 + test_data->cs)); + + spi_ctrl_start_user(test_data); + flash_writeb(test_data, 0, EN_4BYTE_ADDR); + flash_writeb(test_data, 0, WREN); + flash_writeb(test_data, 0, PP); + flash_writel(test_data, 0, make_be32(my_page_addr)); + + /* Set QPI mode */ + spi_ctrl_set_io_mode(test_data, CTRL_IO_QUAD_IO); + + /* Fill the page pattern */ + for (i = 0; i < ARRAY_SIZE(page_pattern); i++) { + flash_writel(test_data, 0, make_be32(page_pattern[i])); + } + + /* Fill the page with its own addresses */ + for (; i < FLASH_PAGE_SIZE / 4; i++) { + flash_writel(test_data, 0, make_be32(my_page_addr + i * 4)); + } + + /* Restore io mode */ + spi_ctrl_set_io_mode(test_data, 0); + spi_ctrl_stop_user(test_data); + + /* Check what was written */ + read_page(test_data, my_page_addr, page); + for (i = 0; i < ARRAY_SIZE(page_pattern); i++) { + g_assert_cmphex(page[i], ==, page_pattern[i]); + } + for (; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, my_page_addr + i * 4); + } + + /* Check some other page. It should be full of 0xff */ + read_page(test_data, some_page_addr, page); + for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { + g_assert_cmphex(page[i], ==, 0xffffffff); + } + + flash_reset(test_data); +} + diff --git a/tests/qtest/aspeed-smc-utils.h b/tests/qtest/aspeed-smc-utils.h new file mode 100644 index 00000000000..b07870f3b8f --- /dev/null +++ b/tests/qtest/aspeed-smc-utils.h @@ -0,0 +1,95 @@ +/* + * QTest testcase for the M25P80 Flash (Using the Aspeed SPI + * Controller) + * + * Copyright (C) 2016 IBM Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TESTS_ASPEED_SMC_UTILS_H +#define TESTS_ASPEED_SMC_UTILS_H + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "libqtest-single.h" +#include "qemu/bitops.h" + +/* + * ASPEED SPI Controller registers + */ +#define R_CONF 0x00 +#define CONF_ENABLE_W0 16 +#define R_CE_CTRL 0x04 +#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */ +#define R_CTRL0 0x10 +#define CTRL_IO_QUAD_IO BIT(31) +#define CTRL_CE_STOP_ACTIVE BIT(2) +#define CTRL_READMODE 0x0 +#define CTRL_FREADMODE 0x1 +#define CTRL_WRITEMODE 0x2 +#define CTRL_USERMODE 0x3 +#define SR_WEL BIT(1) + +/* + * Flash commands + */ +enum { + JEDEC_READ = 0x9f, + RDSR = 0x5, + WRDI = 0x4, + BULK_ERASE = 0xc7, + READ = 0x03, + PP = 0x02, + WRSR = 0x1, + WREN = 0x6, + SRWD = 0x80, + RESET_ENABLE = 0x66, + RESET_MEMORY = 0x99, + EN_4BYTE_ADDR = 0xB7, + ERASE_SECTOR = 0xd8, +}; + +#define CTRL_IO_MODE_MASK (BIT(31) | BIT(30) | BIT(29) | BIT(28)) +#define FLASH_PAGE_SIZE 256 + +typedef struct AspeedSMCTestData { + QTestState *s; + uint64_t spi_base; + uint64_t flash_base; + uint32_t jedec_id; + char *tmp_path; + uint8_t cs; + const char *node; + uint32_t page_addr; +} AspeedSMCTestData; + +void aspeed_smc_test_read_jedec(const void *data); +void aspeed_smc_test_erase_sector(const void *data); +void aspeed_smc_test_erase_all(const void *data); +void aspeed_smc_test_write_page(const void *data); +void aspeed_smc_test_read_page_mem(const void *data); +void aspeed_smc_test_write_page_mem(const void *data); +void aspeed_smc_test_read_status_reg(const void *data); +void aspeed_smc_test_status_reg_write_protection(const void *data); +void aspeed_smc_test_write_block_protect(const void *data); +void aspeed_smc_test_write_block_protect_bottom_bit(const void *data); +void aspeed_smc_test_write_page_qpi(const void *data); + +#endif /* TESTS_ASPEED_SMC_UTILS_H */ diff --git a/tests/qtest/aspeed_gpio-test.c b/tests/qtest/aspeed_gpio-test.c index d38f51d7190..12675d4cbba 100644 --- a/tests/qtest/aspeed_gpio-test.c +++ b/tests/qtest/aspeed_gpio-test.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "qemu/timer.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "libqtest-single.h" #define AST2600_GPIO_BASE 0x1E780000 diff --git a/tests/qtest/aspeed_hace-test.c b/tests/qtest/aspeed_hace-test.c index ce86a44672e..38777020ca3 100644 --- a/tests/qtest/aspeed_hace-test.c +++ b/tests/qtest/aspeed_hace-test.c @@ -6,599 +6,222 @@ */ #include "qemu/osdep.h" - #include "libqtest.h" #include "qemu/bitops.h" +#include "aspeed-hace-utils.h" -#define HACE_CMD 0x10 -#define HACE_SHA_BE_EN BIT(3) -#define HACE_MD5_LE_EN BIT(2) -#define HACE_ALGO_MD5 0 -#define HACE_ALGO_SHA1 BIT(5) -#define HACE_ALGO_SHA224 BIT(6) -#define HACE_ALGO_SHA256 (BIT(4) | BIT(6)) -#define HACE_ALGO_SHA512 (BIT(5) | BIT(6)) -#define HACE_ALGO_SHA384 (BIT(5) | BIT(6) | BIT(10)) -#define HACE_SG_EN BIT(18) -#define HACE_ACCUM_EN BIT(8) - -#define HACE_STS 0x1c -#define HACE_RSA_ISR BIT(13) -#define HACE_CRYPTO_ISR BIT(12) -#define HACE_HASH_ISR BIT(9) -#define HACE_RSA_BUSY BIT(2) -#define HACE_CRYPTO_BUSY BIT(1) -#define HACE_HASH_BUSY BIT(0) -#define HACE_HASH_SRC 0x20 -#define HACE_HASH_DIGEST 0x24 -#define HACE_HASH_KEY_BUFF 0x28 -#define HACE_HASH_DATA_LEN 0x2c -#define HACE_HASH_CMD 0x30 -/* Scatter-Gather Hash */ -#define SG_LIST_LEN_LAST BIT(31) -struct AspeedSgList { - uint32_t len; - uint32_t addr; -} __attribute__ ((__packed__)); - -/* - * Test vector is the ascii "abc" - * - * Expected results were generated using command line utitiles: - * - * echo -n -e 'abc' | dd of=/tmp/test - * for hash in sha512sum sha256sum md5sum; do $hash /tmp/test; done - * - */ -static const uint8_t test_vector[] = {0x61, 0x62, 0x63}; - -static const uint8_t test_result_sha512[] = { - 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, - 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, - 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, - 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd, - 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, - 0xa5, 0x4c, 0xa4, 0x9f}; +static const struct AspeedMasks ast1030_masks = { + .src = 0x7fffffff, + .dest = 0x7ffffff8, + .key = 0x7ffffff8, + .len = 0x0fffffff, +}; -static const uint8_t test_result_sha256[] = { - 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, - 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, - 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}; +static const struct AspeedMasks ast2600_masks = { + .src = 0x7fffffff, + .dest = 0x7ffffff8, + .key = 0x7ffffff8, + .len = 0x0fffffff, +}; -static const uint8_t test_result_md5[] = { - 0x90, 0x01, 0x50, 0x98, 0x3c, 0xd2, 0x4f, 0xb0, 0xd6, 0x96, 0x3f, 0x7d, - 0x28, 0xe1, 0x7f, 0x72}; +static const struct AspeedMasks ast2500_masks = { + .src = 0x3fffffff, + .dest = 0x3ffffff8, + .key = 0x3fffffc0, + .len = 0x0fffffff, +}; -/* - * The Scatter-Gather Test vector is the ascii "abc" "def" "ghi", broken - * into blocks of 3 characters as shown - * - * Expected results were generated using command line utitiles: - * - * echo -n -e 'abcdefghijkl' | dd of=/tmp/test - * for hash in sha512sum sha256sum; do $hash /tmp/test; done - * - */ -static const uint8_t test_vector_sg1[] = {0x61, 0x62, 0x63, 0x64, 0x65, 0x66}; -static const uint8_t test_vector_sg2[] = {0x67, 0x68, 0x69}; -static const uint8_t test_vector_sg3[] = {0x6a, 0x6b, 0x6c}; - -static const uint8_t test_result_sg_sha512[] = { - 0x17, 0x80, 0x7c, 0x72, 0x8e, 0xe3, 0xba, 0x35, 0xe7, 0xcf, 0x7a, 0xf8, - 0x23, 0x11, 0x6d, 0x26, 0xe4, 0x1e, 0x5d, 0x4d, 0x6c, 0x2f, 0xf1, 0xf3, - 0x72, 0x0d, 0x3d, 0x96, 0xaa, 0xcb, 0x6f, 0x69, 0xde, 0x64, 0x2e, 0x63, - 0xd5, 0xb7, 0x3f, 0xc3, 0x96, 0xc1, 0x2b, 0xe3, 0x8b, 0x2b, 0xd5, 0xd8, - 0x84, 0x25, 0x7c, 0x32, 0xc8, 0xf6, 0xd0, 0x85, 0x4a, 0xe6, 0xb5, 0x40, - 0xf8, 0x6d, 0xda, 0x2e}; - -static const uint8_t test_result_sg_sha256[] = { - 0xd6, 0x82, 0xed, 0x4c, 0xa4, 0xd9, 0x89, 0xc1, 0x34, 0xec, 0x94, 0xf1, - 0x55, 0x1e, 0x1e, 0xc5, 0x80, 0xdd, 0x6d, 0x5a, 0x6e, 0xcd, 0xe9, 0xf3, - 0xd3, 0x5e, 0x6e, 0x4a, 0x71, 0x7f, 0xbd, 0xe4}; +static const struct AspeedMasks ast2400_masks = { + .src = 0x0fffffff, + .dest = 0x0ffffff8, + .key = 0x0fffffc0, + .len = 0x0fffffff, +}; -/* - * The accumulative mode requires firmware to provide internal initial state - * and message padding (including length L at the end of padding). - * - * This test vector is a ascii text "abc" with padding message. - * - * Expected results were generated using command line utitiles: - * - * echo -n -e 'abc' | dd of=/tmp/test - * for hash in sha512sum sha256sum; do $hash /tmp/test; done - */ -static const uint8_t test_vector_accum_512[] = { - 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; - -static const uint8_t test_vector_accum_256[] = { - 0x61, 0x62, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18}; - -static const uint8_t test_result_accum_sha512[] = { - 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, - 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, - 0x0a, 0x9e, 0xee, 0xe6, 0x4b, 0x55, 0xd3, 0x9a, 0x21, 0x92, 0x99, 0x2a, - 0x27, 0x4f, 0xc1, 0xa8, 0x36, 0xba, 0x3c, 0x23, 0xa3, 0xfe, 0xeb, 0xbd, - 0x45, 0x4d, 0x44, 0x23, 0x64, 0x3c, 0xe8, 0x0e, 0x2a, 0x9a, 0xc9, 0x4f, - 0xa5, 0x4c, 0xa4, 0x9f}; - -static const uint8_t test_result_accum_sha256[] = { - 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, - 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, - 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}; - -static void write_regs(QTestState *s, uint32_t base, uint32_t src, - uint32_t length, uint32_t out, uint32_t method) +/* ast1030 */ +static void test_md5_ast1030(void) { - qtest_writel(s, base + HACE_HASH_SRC, src); - qtest_writel(s, base + HACE_HASH_DIGEST, out); - qtest_writel(s, base + HACE_HASH_DATA_LEN, length); - qtest_writel(s, base + HACE_HASH_CMD, HACE_SHA_BE_EN | method); + aspeed_test_md5("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_md5(const char *machine, const uint32_t base, - const uint32_t src_addr) - +static void test_sha256_ast1030(void) { - QTestState *s = qtest_init(machine); - - uint32_t digest_addr = src_addr + 0x01000000; - uint8_t digest[16] = {0}; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); - - write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_MD5); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_md5, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha256("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha256(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha256_sg_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t digest_addr = src_addr + 0x1000000; - uint8_t digest[32] = {0}; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); - - write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_SHA256); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_sha256, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha256_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha512(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha384_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t digest_addr = src_addr + 0x1000000; - uint8_t digest[64] = {0}; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, src_addr, test_vector, sizeof(test_vector)); - - write_regs(s, base, src_addr, sizeof(test_vector), digest_addr, HACE_ALGO_SHA512); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_sha512, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha384("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha256_sg(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha384_sg_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t src_addr_1 = src_addr + 0x1000000; - const uint32_t src_addr_2 = src_addr + 0x2000000; - const uint32_t src_addr_3 = src_addr + 0x3000000; - const uint32_t digest_addr = src_addr + 0x4000000; - uint8_t digest[32] = {0}; - struct AspeedSgList array[] = { - { cpu_to_le32(sizeof(test_vector_sg1)), - cpu_to_le32(src_addr_1) }, - { cpu_to_le32(sizeof(test_vector_sg2)), - cpu_to_le32(src_addr_2) }, - { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST), - cpu_to_le32(src_addr_3) }, - }; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1)); - qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2)); - qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3)); - qtest_memwrite(s, src_addr, array, sizeof(array)); - - write_regs(s, base, src_addr, - (sizeof(test_vector_sg1) - + sizeof(test_vector_sg2) - + sizeof(test_vector_sg3)), - digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_sg_sha256, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha384_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha512_sg(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha512_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t src_addr_1 = src_addr + 0x1000000; - const uint32_t src_addr_2 = src_addr + 0x2000000; - const uint32_t src_addr_3 = src_addr + 0x3000000; - const uint32_t digest_addr = src_addr + 0x4000000; - uint8_t digest[64] = {0}; - struct AspeedSgList array[] = { - { cpu_to_le32(sizeof(test_vector_sg1)), - cpu_to_le32(src_addr_1) }, - { cpu_to_le32(sizeof(test_vector_sg2)), - cpu_to_le32(src_addr_2) }, - { cpu_to_le32(sizeof(test_vector_sg3) | SG_LIST_LEN_LAST), - cpu_to_le32(src_addr_3) }, - }; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, src_addr_1, test_vector_sg1, sizeof(test_vector_sg1)); - qtest_memwrite(s, src_addr_2, test_vector_sg2, sizeof(test_vector_sg2)); - qtest_memwrite(s, src_addr_3, test_vector_sg3, sizeof(test_vector_sg3)); - qtest_memwrite(s, src_addr, array, sizeof(array)); - - write_regs(s, base, src_addr, - (sizeof(test_vector_sg1) - + sizeof(test_vector_sg2) - + sizeof(test_vector_sg3)), - digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_sg_sha512, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha512("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha256_accum(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha512_sg_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t buffer_addr = src_addr + 0x1000000; - const uint32_t digest_addr = src_addr + 0x4000000; - uint8_t digest[32] = {0}; - struct AspeedSgList array[] = { - { cpu_to_le32(sizeof(test_vector_accum_256) | SG_LIST_LEN_LAST), - cpu_to_le32(buffer_addr) }, - }; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, buffer_addr, test_vector_accum_256, - sizeof(test_vector_accum_256)); - qtest_memwrite(s, src_addr, array, sizeof(array)); - - write_regs(s, base, src_addr, sizeof(test_vector_accum_256), - digest_addr, HACE_ALGO_SHA256 | HACE_SG_EN | HACE_ACCUM_EN); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_accum_sha256, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha512_sg("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -static void test_sha512_accum(const char *machine, const uint32_t base, - const uint32_t src_addr) +static void test_sha256_accum_ast1030(void) { - QTestState *s = qtest_init(machine); - - const uint32_t buffer_addr = src_addr + 0x1000000; - const uint32_t digest_addr = src_addr + 0x4000000; - uint8_t digest[64] = {0}; - struct AspeedSgList array[] = { - { cpu_to_le32(sizeof(test_vector_accum_512) | SG_LIST_LEN_LAST), - cpu_to_le32(buffer_addr) }, - }; - - /* Check engine is idle, no busy or irq bits set */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Write test vector into memory */ - qtest_memwrite(s, buffer_addr, test_vector_accum_512, - sizeof(test_vector_accum_512)); - qtest_memwrite(s, src_addr, array, sizeof(array)); - - write_regs(s, base, src_addr, sizeof(test_vector_accum_512), - digest_addr, HACE_ALGO_SHA512 | HACE_SG_EN | HACE_ACCUM_EN); - - /* Check hash IRQ status is asserted */ - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0x00000200); - - /* Clear IRQ status and check status is deasserted */ - qtest_writel(s, base + HACE_STS, 0x00000200); - g_assert_cmphex(qtest_readl(s, base + HACE_STS), ==, 0); - - /* Read computed digest from memory */ - qtest_memread(s, digest_addr, digest, sizeof(digest)); - - /* Check result of computation */ - g_assert_cmpmem(digest, sizeof(digest), - test_result_accum_sha512, sizeof(digest)); - - qtest_quit(s); + aspeed_test_sha256_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000); } -struct masks { - uint32_t src; - uint32_t dest; - uint32_t len; -}; - -static const struct masks ast2600_masks = { - .src = 0x7fffffff, - .dest = 0x7ffffff8, - .len = 0x0fffffff, -}; - -static const struct masks ast2500_masks = { - .src = 0x3fffffff, - .dest = 0x3ffffff8, - .len = 0x0fffffff, -}; - -static const struct masks ast2400_masks = { - .src = 0x0fffffff, - .dest = 0x0ffffff8, - .len = 0x0fffffff, -}; - -static void test_addresses(const char *machine, const uint32_t base, - const struct masks *expected) +static void test_sha384_accum_ast1030(void) { - QTestState *s = qtest_init(machine); - - /* - * Check command mode is zero, meaning engine is in direct access mode, - * as this affects the masking behavior of the HASH_SRC register. - */ - g_assert_cmphex(qtest_readl(s, base + HACE_CMD), ==, 0); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0); - - - /* Check that the address masking is correct */ - qtest_writel(s, base + HACE_HASH_SRC, 0xffffffff); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, expected->src); - - qtest_writel(s, base + HACE_HASH_DIGEST, 0xffffffff); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, expected->dest); - - qtest_writel(s, base + HACE_HASH_DATA_LEN, 0xffffffff); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, expected->len); - - /* Reset to zero */ - qtest_writel(s, base + HACE_HASH_SRC, 0); - qtest_writel(s, base + HACE_HASH_DIGEST, 0); - qtest_writel(s, base + HACE_HASH_DATA_LEN, 0); + aspeed_test_sha384_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000); +} - /* Check that all bits are now zero */ - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_SRC), ==, 0); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DIGEST), ==, 0); - g_assert_cmphex(qtest_readl(s, base + HACE_HASH_DATA_LEN), ==, 0); +static void test_sha512_accum_ast1030(void) +{ + aspeed_test_sha512_accum("-machine ast1030-evb", 0x7e6d0000, 0x00000000); +} - qtest_quit(s); +static void test_addresses_ast1030(void) +{ + aspeed_test_addresses("-machine ast1030-evb", 0x7e6d0000, &ast1030_masks); } /* ast2600 */ static void test_md5_ast2600(void) { - test_md5("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_md5("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha256_ast2600(void) { - test_sha256("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha256("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha256_sg_ast2600(void) { - test_sha256_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha256_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); +} + +static void test_sha384_ast2600(void) +{ + aspeed_test_sha384("-machine ast2600-evb", 0x1e6d0000, 0x80000000); +} + +static void test_sha384_sg_ast2600(void) +{ + aspeed_test_sha384_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha512_ast2600(void) { - test_sha512("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha512("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha512_sg_ast2600(void) { - test_sha512_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha512_sg("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha256_accum_ast2600(void) { - test_sha256_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha256_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); +} + +static void test_sha384_accum_ast2600(void) +{ + aspeed_test_sha384_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_sha512_accum_ast2600(void) { - test_sha512_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); + aspeed_test_sha512_accum("-machine ast2600-evb", 0x1e6d0000, 0x80000000); } static void test_addresses_ast2600(void) { - test_addresses("-machine ast2600-evb", 0x1e6d0000, &ast2600_masks); + aspeed_test_addresses("-machine ast2600-evb", 0x1e6d0000, &ast2600_masks); } /* ast2500 */ static void test_md5_ast2500(void) { - test_md5("-machine ast2500-evb", 0x1e6e3000, 0x80000000); + aspeed_test_md5("-machine ast2500-evb", 0x1e6e3000, 0x80000000); } static void test_sha256_ast2500(void) { - test_sha256("-machine ast2500-evb", 0x1e6e3000, 0x80000000); + aspeed_test_sha256("-machine ast2500-evb", 0x1e6e3000, 0x80000000); } static void test_sha512_ast2500(void) { - test_sha512("-machine ast2500-evb", 0x1e6e3000, 0x80000000); + aspeed_test_sha512("-machine ast2500-evb", 0x1e6e3000, 0x80000000); } static void test_addresses_ast2500(void) { - test_addresses("-machine ast2500-evb", 0x1e6e3000, &ast2500_masks); + aspeed_test_addresses("-machine ast2500-evb", 0x1e6e3000, &ast2500_masks); } /* ast2400 */ static void test_md5_ast2400(void) { - test_md5("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); + aspeed_test_md5("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); } static void test_sha256_ast2400(void) { - test_sha256("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); + aspeed_test_sha256("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); } static void test_sha512_ast2400(void) { - test_sha512("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); + aspeed_test_sha512("-machine palmetto-bmc", 0x1e6e3000, 0x40000000); } static void test_addresses_ast2400(void) { - test_addresses("-machine palmetto-bmc", 0x1e6e3000, &ast2400_masks); + aspeed_test_addresses("-machine palmetto-bmc", 0x1e6e3000, &ast2400_masks); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); + qtest_add_func("ast1030/hace/addresses", test_addresses_ast1030); + qtest_add_func("ast1030/hace/sha512", test_sha512_ast1030); + qtest_add_func("ast1030/hace/sha384", test_sha384_ast1030); + qtest_add_func("ast1030/hace/sha256", test_sha256_ast1030); + qtest_add_func("ast1030/hace/md5", test_md5_ast1030); + + qtest_add_func("ast1030/hace/sha512_sg", test_sha512_sg_ast1030); + qtest_add_func("ast1030/hace/sha384_sg", test_sha384_sg_ast1030); + qtest_add_func("ast1030/hace/sha256_sg", test_sha256_sg_ast1030); + + qtest_add_func("ast1030/hace/sha512_accum", test_sha512_accum_ast1030); + qtest_add_func("ast1030/hace/sha384_accum", test_sha384_accum_ast1030); + qtest_add_func("ast1030/hace/sha256_accum", test_sha256_accum_ast1030); + qtest_add_func("ast2600/hace/addresses", test_addresses_ast2600); qtest_add_func("ast2600/hace/sha512", test_sha512_ast2600); + qtest_add_func("ast2600/hace/sha384", test_sha384_ast2600); qtest_add_func("ast2600/hace/sha256", test_sha256_ast2600); qtest_add_func("ast2600/hace/md5", test_md5_ast2600); qtest_add_func("ast2600/hace/sha512_sg", test_sha512_sg_ast2600); + qtest_add_func("ast2600/hace/sha384_sg", test_sha384_sg_ast2600); qtest_add_func("ast2600/hace/sha256_sg", test_sha256_sg_ast2600); qtest_add_func("ast2600/hace/sha512_accum", test_sha512_accum_ast2600); + qtest_add_func("ast2600/hace/sha384_accum", test_sha384_accum_ast2600); qtest_add_func("ast2600/hace/sha256_accum", test_sha256_accum_ast2600); qtest_add_func("ast2500/hace/addresses", test_addresses_ast2500); diff --git a/tests/qtest/aspeed_scu-test.c b/tests/qtest/aspeed_scu-test.c new file mode 100644 index 00000000000..ca09f9171f7 --- /dev/null +++ b/tests/qtest/aspeed_scu-test.c @@ -0,0 +1,231 @@ +/* + * QTest testcase for the ASPEED AST2500 and AST2600 SCU. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2025 Tan Siewert + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" + +/* + * SCU base, as well as protection key are + * the same on AST2500 and 2600. + */ +#define AST_SCU_BASE 0x1E6E2000 +#define AST_SCU_PROT_LOCK_STATE 0x0 +#define AST_SCU_PROT_LOCK_VALUE 0x2 +#define AST_SCU_PROT_UNLOCK_STATE 0x1 +#define AST_SCU_PROT_UNLOCK_VALUE 0x1688A8A8 + +#define AST2500_MACHINE "-machine ast2500-evb" +#define AST2500_SCU_PROT_REG 0x00 +#define AST2500_SCU_MISC_2_CONTROL_REG 0x4C + +#define AST2600_MACHINE "-machine ast2600-evb" +/* AST2600 has two protection registers */ +#define AST2600_SCU_PROT_REG 0x000 +#define AST2600_SCU_PROT_REG2 0x010 +#define AST2600_SCU_MISC_2_CONTROL_REG 0x0C4 + +#define TEST_LOCK_ARBITRARY_VALUE 0xABCDEFAB + +/** + * Assert that a given register matches an expected value. + * + * Reads the register and checks if its value equals the expected value. + * + * @param *s - QTest machine state + * @param reg - Address of the register to be checked + * @param expected - Expected register value + */ +static inline void assert_register_eq(QTestState *s, + uint32_t reg, + uint32_t expected) +{ + uint32_t value = qtest_readl(s, reg); + g_assert_cmphex(value, ==, expected); +} + +/** + * Assert that a given register does not match a specific value. + * + * Reads the register and checks that its value is not equal to the + * provided value. + * + * @param *s - QTest machine state + * @param reg - Address of the register to be checked + * @param not_expected - Value the register must not contain + */ +static inline void assert_register_neq(QTestState *s, + uint32_t reg, + uint32_t not_expected) +{ + uint32_t value = qtest_readl(s, reg); + g_assert_cmphex(value, !=, not_expected); +} + +/** + * Test whether the SCU can be locked and unlocked correctly. + * + * When testing multiple registers, this function assumes that writing + * to the first register also affects the others. However, writing to + * any other register only affects itself. + * + * @param *machine - input machine configuration, passed directly + * to QTest + * @param regs[] - List of registers to be checked + * @param regc - amount of arguments for registers to be checked + */ +static void test_protection_register(const char *machine, + const uint32_t regs[], + const int regc) +{ + QTestState *s = qtest_init(machine); + + for (int i = 0; i < regc; i++) { + uint32_t reg = regs[i]; + + qtest_writel(s, reg, AST_SCU_PROT_UNLOCK_VALUE); + assert_register_eq(s, reg, AST_SCU_PROT_UNLOCK_STATE); + + /** + * Check that other registers are unlocked too, if more + * than one is available. + */ + if (regc > 1 && i == 0) { + /* Initialise at 1 instead of 0 to skip first */ + for (int j = 1; j < regc; j++) { + uint32_t add_reg = regs[j]; + assert_register_eq(s, add_reg, AST_SCU_PROT_UNLOCK_STATE); + } + } + + /* Lock the register again */ + qtest_writel(s, reg, AST_SCU_PROT_LOCK_VALUE); + assert_register_eq(s, reg, AST_SCU_PROT_LOCK_STATE); + + /* And the same for locked state */ + if (regc > 1 && i == 0) { + /* Initialise at 1 instead of 0 to skip first */ + for (int j = 1; j < regc; j++) { + uint32_t add_reg = regs[j]; + assert_register_eq(s, add_reg, AST_SCU_PROT_LOCK_STATE); + } + } + } + + qtest_quit(s); +} + +static void test_2500_protection_register(void) +{ + uint32_t regs[] = { AST_SCU_BASE + AST2500_SCU_PROT_REG }; + + test_protection_register(AST2500_MACHINE, + regs, + ARRAY_SIZE(regs)); +} + +static void test_2600_protection_register(void) +{ + /** + * The AST2600 has two protection registers, both + * being required to be unlocked to do any operation. + * + * Modifying SCU000 also modifies SCU010, but modifying + * SCU010 only will keep SCU000 untouched. + */ + uint32_t regs[] = { AST_SCU_BASE + AST2600_SCU_PROT_REG, + AST_SCU_BASE + AST2600_SCU_PROT_REG2 }; + + test_protection_register(AST2600_MACHINE, + regs, + ARRAY_SIZE(regs)); +} + +/** + * Test if SCU register writes are correctly allowed or blocked + * depending on the protection register state. + * + * The test first locks the protection register and verifies that + * writes to the target SCU register are rejected. It then unlocks + * the protection register and confirms that the written value is + * retained when unlocked. + * + * @param *machine - input machine configuration, passed directly + * to QTest + * @param protection_register - first SCU protection key register + * (only one for keeping it simple) + * @param test_register - Register to be used for writing arbitrary + * values + */ +static void test_write_permission_lock_state(const char *machine, + const uint32_t protection_register, + const uint32_t test_register) +{ + QTestState *s = qtest_init(machine); + + /* Arbitrary value to lock provided SCU protection register */ + qtest_writel(s, protection_register, AST_SCU_PROT_LOCK_VALUE); + + /* Ensure that the SCU is really locked */ + assert_register_eq(s, protection_register, AST_SCU_PROT_LOCK_STATE); + + /* Write a known arbitrary value to test that the write is blocked */ + qtest_writel(s, test_register, TEST_LOCK_ARBITRARY_VALUE); + + /* We do not want to have the written value to be saved */ + assert_register_neq(s, test_register, TEST_LOCK_ARBITRARY_VALUE); + + /** + * Unlock the SCU and verify that it can be written to. + * Assumes that the first SCU protection register is sufficient to + * unlock all protection registers, if multiple are present. + */ + qtest_writel(s, protection_register, AST_SCU_PROT_UNLOCK_VALUE); + assert_register_eq(s, protection_register, AST_SCU_PROT_UNLOCK_STATE); + + /* Write a known arbitrary value to test that the write works */ + qtest_writel(s, test_register, TEST_LOCK_ARBITRARY_VALUE); + + /* Ensure that the written value is retained */ + assert_register_eq(s, test_register, TEST_LOCK_ARBITRARY_VALUE); + + qtest_quit(s); +} + +static void test_2500_write_permission_lock_state(void) +{ + test_write_permission_lock_state( + AST2500_MACHINE, + AST_SCU_BASE + AST2500_SCU_PROT_REG, + AST_SCU_BASE + AST2500_SCU_MISC_2_CONTROL_REG + ); +} + +static void test_2600_write_permission_lock_state(void) +{ + test_write_permission_lock_state( + AST2600_MACHINE, + AST_SCU_BASE + AST2600_SCU_PROT_REG, + AST_SCU_BASE + AST2600_SCU_MISC_2_CONTROL_REG + ); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/ast2500/scu/protection_register", + test_2500_protection_register); + qtest_add_func("/ast2600/scu/protection_register", + test_2600_protection_register); + + qtest_add_func("/ast2500/scu/write_permission_lock_state", + test_2500_write_permission_lock_state); + qtest_add_func("/ast2600/scu/write_permission_lock_state", + test_2600_write_permission_lock_state); + + return g_test_run(); +} diff --git a/tests/qtest/aspeed_smc-test.c b/tests/qtest/aspeed_smc-test.c index c713a3700b6..52a00e6f0a7 100644 --- a/tests/qtest/aspeed_smc-test.c +++ b/tests/qtest/aspeed_smc-test.c @@ -27,623 +27,211 @@ #include "qemu/bswap.h" #include "libqtest-single.h" #include "qemu/bitops.h" +#include "aspeed-smc-utils.h" -/* - * ASPEED SPI Controller registers - */ -#define R_CONF 0x00 -#define CONF_ENABLE_W0 (1 << 16) -#define R_CE_CTRL 0x04 -#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */ -#define R_CTRL0 0x10 -#define CTRL_CE_STOP_ACTIVE (1 << 2) -#define CTRL_READMODE 0x0 -#define CTRL_FREADMODE 0x1 -#define CTRL_WRITEMODE 0x2 -#define CTRL_USERMODE 0x3 -#define SR_WEL BIT(1) - -#define ASPEED_FMC_BASE 0x1E620000 -#define ASPEED_FLASH_BASE 0x20000000 - -/* - * Flash commands - */ -enum { - JEDEC_READ = 0x9f, - RDSR = 0x5, - WRDI = 0x4, - BULK_ERASE = 0xc7, - READ = 0x03, - PP = 0x02, - WRSR = 0x1, - WREN = 0x6, - SRWD = 0x80, - RESET_ENABLE = 0x66, - RESET_MEMORY = 0x99, - EN_4BYTE_ADDR = 0xB7, - ERASE_SECTOR = 0xd8, -}; - -#define FLASH_JEDEC 0x20ba19 /* n25q256a */ -#define FLASH_SIZE (32 * 1024 * 1024) - -#define FLASH_PAGE_SIZE 256 - -/* - * Use an explicit bswap for the values read/wrote to the flash region - * as they are BE and the Aspeed CPU is LE. - */ -static inline uint32_t make_be32(uint32_t data) +static void test_palmetto_bmc(AspeedSMCTestData *data) { - return bswap32(data); -} - -static void spi_conf(uint32_t value) -{ - uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF); - - conf |= value; - writel(ASPEED_FMC_BASE + R_CONF, conf); -} - -static void spi_conf_remove(uint32_t value) -{ - uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF); - - conf &= ~value; - writel(ASPEED_FMC_BASE + R_CONF, conf); -} - -static void spi_ce_ctrl(uint32_t value) -{ - uint32_t conf = readl(ASPEED_FMC_BASE + R_CE_CTRL); - - conf |= value; - writel(ASPEED_FMC_BASE + R_CE_CTRL, conf); -} - -static void spi_ctrl_setmode(uint8_t mode, uint8_t cmd) -{ - uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0); - ctrl &= ~(CTRL_USERMODE | 0xff << 16); - ctrl |= mode | (cmd << 16); - writel(ASPEED_FMC_BASE + R_CTRL0, ctrl); -} - -static void spi_ctrl_start_user(void) -{ - uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0); - - ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE; - writel(ASPEED_FMC_BASE + R_CTRL0, ctrl); - - ctrl &= ~CTRL_CE_STOP_ACTIVE; - writel(ASPEED_FMC_BASE + R_CTRL0, ctrl); -} - -static void spi_ctrl_stop_user(void) -{ - uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0); - - ctrl |= CTRL_USERMODE | CTRL_CE_STOP_ACTIVE; - writel(ASPEED_FMC_BASE + R_CTRL0, ctrl); -} - -static void flash_reset(void) -{ - spi_conf(CONF_ENABLE_W0); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, RESET_ENABLE); - writeb(ASPEED_FLASH_BASE, RESET_MEMORY); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, BULK_ERASE); - writeb(ASPEED_FLASH_BASE, WRDI); - spi_ctrl_stop_user(); - - spi_conf_remove(CONF_ENABLE_W0); -} - -static void test_read_jedec(void) -{ - uint32_t jedec = 0x0; - - spi_conf(CONF_ENABLE_W0); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, JEDEC_READ); - jedec |= readb(ASPEED_FLASH_BASE) << 16; - jedec |= readb(ASPEED_FLASH_BASE) << 8; - jedec |= readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - - flash_reset(); - - g_assert_cmphex(jedec, ==, FLASH_JEDEC); -} - -static void read_page(uint32_t addr, uint32_t *page) -{ - int i; - - spi_ctrl_start_user(); - - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, READ); - writel(ASPEED_FLASH_BASE, make_be32(addr)); - - /* Continuous read are supported */ - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - page[i] = make_be32(readl(ASPEED_FLASH_BASE)); - } - spi_ctrl_stop_user(); -} - -static void read_page_mem(uint32_t addr, uint32_t *page) -{ - int i; - - /* move out USER mode to use direct reads from the AHB bus */ - spi_ctrl_setmode(CTRL_READMODE, READ); - - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - page[i] = make_be32(readl(ASPEED_FLASH_BASE + addr + i * 4)); - } -} - -static void write_page_mem(uint32_t addr, uint32_t write_value) -{ - spi_ctrl_setmode(CTRL_WRITEMODE, PP); - - for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE + addr + i * 4, write_value); - } -} - -static void assert_page_mem(uint32_t addr, uint32_t expected_value) -{ - uint32_t page[FLASH_PAGE_SIZE / 4]; - read_page_mem(addr, page); - for (int i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, expected_value); - } -} - -static void test_erase_sector(void) -{ - uint32_t some_page_addr = 0x600 * FLASH_PAGE_SIZE; - uint32_t page[FLASH_PAGE_SIZE / 4]; - int i; - - spi_conf(CONF_ENABLE_W0); - - /* - * Previous page should be full of 0xffs after backend is - * initialized - */ - read_page(some_page_addr - FLASH_PAGE_SIZE, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, PP); - writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); - - /* Fill the page with its own addresses */ - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4)); - } - spi_ctrl_stop_user(); - - /* Check the page is correctly written */ - read_page(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, some_page_addr + i * 4); - } - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, ERASE_SECTOR); - writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); - spi_ctrl_stop_user(); - - /* Check the page is erased */ - read_page(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - flash_reset(); -} - -static void test_erase_all(void) -{ - uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE; - uint32_t page[FLASH_PAGE_SIZE / 4]; - int i; - - spi_conf(CONF_ENABLE_W0); - - /* - * Previous page should be full of 0xffs after backend is - * initialized - */ - read_page(some_page_addr - FLASH_PAGE_SIZE, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, PP); - writel(ASPEED_FLASH_BASE, make_be32(some_page_addr)); - - /* Fill the page with its own addresses */ - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE, make_be32(some_page_addr + i * 4)); - } - spi_ctrl_stop_user(); - - /* Check the page is correctly written */ - read_page(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, some_page_addr + i * 4); - } - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, BULK_ERASE); - spi_ctrl_stop_user(); - - /* Check the page is erased */ - read_page(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - flash_reset(); -} - -static void test_write_page(void) -{ - uint32_t my_page_addr = 0x14000 * FLASH_PAGE_SIZE; /* beyond 16MB */ - uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE; - uint32_t page[FLASH_PAGE_SIZE / 4]; - int i; - - spi_conf(CONF_ENABLE_W0); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, PP); - writel(ASPEED_FLASH_BASE, make_be32(my_page_addr)); - - /* Fill the page with its own addresses */ - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE, make_be32(my_page_addr + i * 4)); - } - spi_ctrl_stop_user(); - - /* Check what was written */ - read_page(my_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, my_page_addr + i * 4); - } - - /* Check some other page. It should be full of 0xff */ - read_page(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - flash_reset(); -} - -static void test_read_page_mem(void) -{ - uint32_t my_page_addr = 0x14000 * FLASH_PAGE_SIZE; /* beyond 16MB */ - uint32_t some_page_addr = 0x15000 * FLASH_PAGE_SIZE; - uint32_t page[FLASH_PAGE_SIZE / 4]; - int i; - - /* Enable 4BYTE mode for controller. This is should be strapped by - * HW for CE0 anyhow. - */ - spi_ce_ctrl(1 << CRTL_EXTENDED0); - - /* Enable 4BYTE mode for flash. */ - spi_conf(CONF_ENABLE_W0); - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, PP); - writel(ASPEED_FLASH_BASE, make_be32(my_page_addr)); - - /* Fill the page with its own addresses */ - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE, make_be32(my_page_addr + i * 4)); - } - spi_ctrl_stop_user(); - spi_conf_remove(CONF_ENABLE_W0); - - /* Check what was written */ - read_page_mem(my_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, my_page_addr + i * 4); - } - - /* Check some other page. It should be full of 0xff */ - read_page_mem(some_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, 0xffffffff); - } - - flash_reset(); -} - -static void test_write_page_mem(void) -{ - uint32_t my_page_addr = 0x15000 * FLASH_PAGE_SIZE; - uint32_t page[FLASH_PAGE_SIZE / 4]; - int i; - - /* Enable 4BYTE mode for controller. This is should be strapped by - * HW for CE0 anyhow. - */ - spi_ce_ctrl(1 << CRTL_EXTENDED0); - - /* Enable 4BYTE mode for flash. */ - spi_conf(CONF_ENABLE_W0); - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - spi_ctrl_stop_user(); - - /* move out USER mode to use direct writes to the AHB bus */ - spi_ctrl_setmode(CTRL_WRITEMODE, PP); - - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - writel(ASPEED_FLASH_BASE + my_page_addr + i * 4, - make_be32(my_page_addr + i * 4)); - } - - /* Check what was written */ - read_page_mem(my_page_addr, page); - for (i = 0; i < FLASH_PAGE_SIZE / 4; i++) { - g_assert_cmphex(page[i], ==, my_page_addr + i * 4); - } - - flash_reset(); -} - -static void test_read_status_reg(void) -{ - uint8_t r; - - spi_conf(CONF_ENABLE_W0); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - - g_assert_cmphex(r & SR_WEL, ==, 0); - g_assert(!qtest_qom_get_bool - (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - - g_assert_cmphex(r & SR_WEL, ==, SR_WEL); - g_assert(qtest_qom_get_bool - (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WRDI); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - - g_assert_cmphex(r & SR_WEL, ==, 0); - g_assert(!qtest_qom_get_bool - (global_qtest, "/machine/soc/fmc/ssi.0/child[0]", "write-enable")); + int ret; + int fd; - flash_reset(); -} + fd = g_file_open_tmp("qtest.m25p80.n25q256a.XXXXXX", &data->tmp_path, NULL); + g_assert(fd >= 0); + ret = ftruncate(fd, 32 * 1024 * 1024); + g_assert(ret == 0); + close(fd); -static void test_status_reg_write_protection(void) + data->s = qtest_initf("-m 256 -machine palmetto-bmc " + "-drive file=%s,format=raw,if=mtd", + data->tmp_path); + + /* fmc cs0 with n25q256a flash */ + data->flash_base = 0x20000000; + data->spi_base = 0x1E620000; + data->jedec_id = 0x20ba19; + data->cs = 0; + data->node = "/machine/soc/fmc/ssi.0/child[0]"; + /* beyond 16MB */ + data->page_addr = 0x14000 * FLASH_PAGE_SIZE; + + qtest_add_data_func("/ast2400/smc/read_jedec", + data, aspeed_smc_test_read_jedec); + qtest_add_data_func("/ast2400/smc/erase_sector", + data, aspeed_smc_test_erase_sector); + qtest_add_data_func("/ast2400/smc/erase_all", + data, aspeed_smc_test_erase_all); + qtest_add_data_func("/ast2400/smc/write_page", + data, aspeed_smc_test_write_page); + qtest_add_data_func("/ast2400/smc/read_page_mem", + data, aspeed_smc_test_read_page_mem); + qtest_add_data_func("/ast2400/smc/write_page_mem", + data, aspeed_smc_test_write_page_mem); + qtest_add_data_func("/ast2400/smc/read_status_reg", + data, aspeed_smc_test_read_status_reg); + qtest_add_data_func("/ast2400/smc/status_reg_write_protection", + data, aspeed_smc_test_status_reg_write_protection); + qtest_add_data_func("/ast2400/smc/write_block_protect", + data, aspeed_smc_test_write_block_protect); + qtest_add_data_func("/ast2400/smc/write_block_protect_bottom_bit", + data, aspeed_smc_test_write_block_protect_bottom_bit); +} + +static void test_ast2500_evb(AspeedSMCTestData *data) { - uint8_t r; - - spi_conf(CONF_ENABLE_W0); - - /* default case: WP# is high and SRWD is low -> status register writable */ - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - /* test ability to write SRWD */ - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, SRWD); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - g_assert_cmphex(r & SRWD, ==, SRWD); - - /* WP# high and SRWD high -> status register writable */ - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - /* test ability to write SRWD */ - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, 0); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - g_assert_cmphex(r & SRWD, ==, 0); - - /* WP# low and SRWD low -> status register writable */ - qtest_set_irq_in(global_qtest, - "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 0); - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - /* test ability to write SRWD */ - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, SRWD); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - g_assert_cmphex(r & SRWD, ==, SRWD); - - /* WP# low and SRWD high -> status register NOT writable */ - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - /* test ability to write SRWD */ - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, 0); - writeb(ASPEED_FLASH_BASE, RDSR); - r = readb(ASPEED_FLASH_BASE); - spi_ctrl_stop_user(); - /* write is not successful */ - g_assert_cmphex(r & SRWD, ==, SRWD); + int ret; + int fd; - qtest_set_irq_in(global_qtest, - "/machine/soc/fmc/ssi.0/child[0]", "WP#", 0, 1); - flash_reset(); -} + fd = g_file_open_tmp("qtest.m25p80.mx25l25635e.XXXXXX", + &data->tmp_path, NULL); + g_assert(fd >= 0); + ret = ftruncate(fd, 32 * 1024 * 1024); + g_assert(ret == 0); + close(fd); -static void test_write_block_protect(void) + data->s = qtest_initf("-machine ast2500-evb " + "-drive file=%s,format=raw,if=mtd", + data->tmp_path); + + /* fmc cs0 with mx25l25635e flash */ + data->flash_base = 0x20000000; + data->spi_base = 0x1E620000; + data->jedec_id = 0xc22019; + data->cs = 0; + data->node = "/machine/soc/fmc/ssi.0/child[0]"; + /* beyond 16MB */ + data->page_addr = 0x14000 * FLASH_PAGE_SIZE; + + qtest_add_data_func("/ast2500/smc/read_jedec", + data, aspeed_smc_test_read_jedec); + qtest_add_data_func("/ast2500/smc/erase_sector", + data, aspeed_smc_test_erase_sector); + qtest_add_data_func("/ast2500/smc/erase_all", + data, aspeed_smc_test_erase_all); + qtest_add_data_func("/ast2500/smc/write_page", + data, aspeed_smc_test_write_page); + qtest_add_data_func("/ast2500/smc/read_page_mem", + data, aspeed_smc_test_read_page_mem); + qtest_add_data_func("/ast2500/smc/write_page_mem", + data, aspeed_smc_test_write_page_mem); + qtest_add_data_func("/ast2500/smc/read_status_reg", + data, aspeed_smc_test_read_status_reg); + qtest_add_data_func("/ast2500/smc/write_page_qpi", + data, aspeed_smc_test_write_page_qpi); +} + +static void test_ast2600_evb(AspeedSMCTestData *data) { - uint32_t sector_size = 65536; - uint32_t n_sectors = 512; - - spi_ce_ctrl(1 << CRTL_EXTENDED0); - spi_conf(CONF_ENABLE_W0); - - uint32_t bp_bits = 0b0; - - for (int i = 0; i < 16; i++) { - bp_bits = ((i & 0b1000) << 3) | ((i & 0b0111) << 2); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, BULK_ERASE); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, bp_bits); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - spi_ctrl_stop_user(); - - uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; - uint32_t protection_start = n_sectors - num_protected_sectors; - uint32_t protection_end = n_sectors; - - for (int sector = 0; sector < n_sectors; sector++) { - uint32_t addr = sector * sector_size; - - assert_page_mem(addr, 0xffffffff); - write_page_mem(addr, make_be32(0xabcdef12)); - - uint32_t expected_value = protection_start <= sector - && sector < protection_end - ? 0xffffffff : 0xabcdef12; - - assert_page_mem(addr, expected_value); - } - } + int ret; + int fd; - flash_reset(); -} + fd = g_file_open_tmp("qtest.m25p80.mx66u51235f.XXXXXX", + &data->tmp_path, NULL); + g_assert(fd >= 0); + ret = ftruncate(fd, 64 * 1024 * 1024); + g_assert(ret == 0); + close(fd); -static void test_write_block_protect_bottom_bit(void) + data->s = qtest_initf("-machine ast2600-evb " + "-drive file=%s,format=raw,if=mtd", + data->tmp_path); + + /* fmc cs0 with mx66u51235f flash */ + data->flash_base = 0x20000000; + data->spi_base = 0x1E620000; + data->jedec_id = 0xc2253a; + data->cs = 0; + data->node = "/machine/soc/fmc/ssi.0/child[0]"; + /* beyond 16MB */ + data->page_addr = 0x14000 * FLASH_PAGE_SIZE; + + qtest_add_data_func("/ast2600/smc/read_jedec", + data, aspeed_smc_test_read_jedec); + qtest_add_data_func("/ast2600/smc/erase_sector", + data, aspeed_smc_test_erase_sector); + qtest_add_data_func("/ast2600/smc/erase_all", + data, aspeed_smc_test_erase_all); + qtest_add_data_func("/ast2600/smc/write_page", + data, aspeed_smc_test_write_page); + qtest_add_data_func("/ast2600/smc/read_page_mem", + data, aspeed_smc_test_read_page_mem); + qtest_add_data_func("/ast2600/smc/write_page_mem", + data, aspeed_smc_test_write_page_mem); + qtest_add_data_func("/ast2600/smc/read_status_reg", + data, aspeed_smc_test_read_status_reg); + qtest_add_data_func("/ast2600/smc/write_page_qpi", + data, aspeed_smc_test_write_page_qpi); +} + +static void test_ast1030_evb(AspeedSMCTestData *data) { - uint32_t sector_size = 65536; - uint32_t n_sectors = 512; - - spi_ce_ctrl(1 << CRTL_EXTENDED0); - spi_conf(CONF_ENABLE_W0); - - /* top bottom bit is enabled */ - uint32_t bp_bits = 0b00100 << 3; - - for (int i = 0; i < 16; i++) { - bp_bits = (((i & 0b1000) | 0b0100) << 3) | ((i & 0b0111) << 2); - - spi_ctrl_start_user(); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, BULK_ERASE); - writeb(ASPEED_FLASH_BASE, WREN); - writeb(ASPEED_FLASH_BASE, WRSR); - writeb(ASPEED_FLASH_BASE, bp_bits); - writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR); - writeb(ASPEED_FLASH_BASE, WREN); - spi_ctrl_stop_user(); - - uint32_t num_protected_sectors = i ? MIN(1 << (i - 1), n_sectors) : 0; - uint32_t protection_start = 0; - uint32_t protection_end = num_protected_sectors; - - for (int sector = 0; sector < n_sectors; sector++) { - uint32_t addr = sector * sector_size; - - assert_page_mem(addr, 0xffffffff); - write_page_mem(addr, make_be32(0xabcdef12)); - - uint32_t expected_value = protection_start <= sector - && sector < protection_end - ? 0xffffffff : 0xabcdef12; + int ret; + int fd; - assert_page_mem(addr, expected_value); - } - } + fd = g_file_open_tmp("qtest.m25p80.w25q80bl.XXXXXX", + &data->tmp_path, NULL); + g_assert(fd >= 0); + ret = ftruncate(fd, 1 * 1024 * 1024); + g_assert(ret == 0); + close(fd); - flash_reset(); + data->s = qtest_initf("-machine ast1030-evb " + "-drive file=%s,format=raw,if=mtd", + data->tmp_path); + + /* fmc cs0 with w25q80bl flash */ + data->flash_base = 0x80000000; + data->spi_base = 0x7E620000; + data->jedec_id = 0xef4014; + data->cs = 0; + data->node = "/machine/soc/fmc/ssi.0/child[0]"; + /* beyond 512KB */ + data->page_addr = 0x800 * FLASH_PAGE_SIZE; + + qtest_add_data_func("/ast1030/smc/read_jedec", + data, aspeed_smc_test_read_jedec); + qtest_add_data_func("/ast1030/smc/erase_sector", + data, aspeed_smc_test_erase_sector); + qtest_add_data_func("/ast1030/smc/erase_all", + data, aspeed_smc_test_erase_all); + qtest_add_data_func("/ast1030/smc/write_page", + data, aspeed_smc_test_write_page); + qtest_add_data_func("/ast1030/smc/read_page_mem", + data, aspeed_smc_test_read_page_mem); + qtest_add_data_func("/ast1030/smc/write_page_mem", + data, aspeed_smc_test_write_page_mem); + qtest_add_data_func("/ast1030/smc/read_status_reg", + data, aspeed_smc_test_read_status_reg); + qtest_add_data_func("/ast1030/smc/write_page_qpi", + data, aspeed_smc_test_write_page_qpi); } int main(int argc, char **argv) { - g_autofree char *tmp_path = NULL; + AspeedSMCTestData palmetto_data; + AspeedSMCTestData ast2500_evb_data; + AspeedSMCTestData ast2600_evb_data; + AspeedSMCTestData ast1030_evb_data; int ret; - int fd; g_test_init(&argc, &argv, NULL); - fd = g_file_open_tmp("qtest.m25p80.XXXXXX", &tmp_path, NULL); - g_assert(fd >= 0); - ret = ftruncate(fd, FLASH_SIZE); - g_assert(ret == 0); - close(fd); - - global_qtest = qtest_initf("-m 256 -machine palmetto-bmc " - "-drive file=%s,format=raw,if=mtd", - tmp_path); - - qtest_add_func("/ast2400/smc/read_jedec", test_read_jedec); - qtest_add_func("/ast2400/smc/erase_sector", test_erase_sector); - qtest_add_func("/ast2400/smc/erase_all", test_erase_all); - qtest_add_func("/ast2400/smc/write_page", test_write_page); - qtest_add_func("/ast2400/smc/read_page_mem", test_read_page_mem); - qtest_add_func("/ast2400/smc/write_page_mem", test_write_page_mem); - qtest_add_func("/ast2400/smc/read_status_reg", test_read_status_reg); - qtest_add_func("/ast2400/smc/status_reg_write_protection", - test_status_reg_write_protection); - qtest_add_func("/ast2400/smc/write_block_protect", - test_write_block_protect); - qtest_add_func("/ast2400/smc/write_block_protect_bottom_bit", - test_write_block_protect_bottom_bit); - - flash_reset(); + test_palmetto_bmc(&palmetto_data); + test_ast2500_evb(&ast2500_evb_data); + test_ast2600_evb(&ast2600_evb_data); + test_ast1030_evb(&ast1030_evb_data); ret = g_test_run(); - qtest_quit(global_qtest); - unlink(tmp_path); + qtest_quit(palmetto_data.s); + qtest_quit(ast2500_evb_data.s); + qtest_quit(ast2600_evb_data.s); + qtest_quit(ast1030_evb_data.s); + unlink(palmetto_data.tmp_path); + unlink(ast2500_evb_data.tmp_path); + unlink(ast2600_evb_data.tmp_path); + unlink(ast1030_evb_data.tmp_path); + g_free(palmetto_data.tmp_path); + g_free(ast2500_evb_data.tmp_path); + g_free(ast2600_evb_data.tmp_path); + g_free(ast1030_evb_data.tmp_path); + return ret; } diff --git a/tests/qtest/ast2700-gpio-test.c b/tests/qtest/ast2700-gpio-test.c new file mode 100644 index 00000000000..eeae9bf11fc --- /dev/null +++ b/tests/qtest/ast2700-gpio-test.c @@ -0,0 +1,95 @@ +/* + * QTest testcase for the ASPEED AST2700 GPIO Controller. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 ASPEED Technology Inc. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "qobject/qdict.h" +#include "libqtest-single.h" + +#define AST2700_GPIO_BASE 0x14C0B000 +#define GPIOA0_CONTROL 0x180 + +static void test_output_pins(const char *machine, const uint32_t base) +{ + QTestState *s = qtest_init(machine); + uint32_t offset = 0; + uint32_t value = 0; + uint32_t pin = 0; + + for (char c = 'A'; c <= 'D'; c++) { + for (int i = 0; i < 8; i++) { + offset = base + (pin * 4); + + /* output direction and output hi */ + qtest_writel(s, offset, 0x00000003); + value = qtest_readl(s, offset); + g_assert_cmphex(value, ==, 0x00000003); + + /* output direction and output low */ + qtest_writel(s, offset, 0x00000002); + value = qtest_readl(s, offset); + g_assert_cmphex(value, ==, 0x00000002); + pin++; + } + } + + qtest_quit(s); +} + +static void test_input_pins(const char *machine, const uint32_t base) +{ + QTestState *s = qtest_init(machine); + char name[16]; + uint32_t offset = 0; + uint32_t value = 0; + uint32_t pin = 0; + + for (char c = 'A'; c <= 'D'; c++) { + for (int i = 0; i < 8; i++) { + sprintf(name, "gpio%c%d", c, i); + offset = base + (pin * 4); + /* input direction */ + qtest_writel(s, offset, 0); + + /* set input */ + qtest_qom_set_bool(s, "/machine/soc/gpio", name, true); + value = qtest_readl(s, offset); + g_assert_cmphex(value, ==, 0x00002000); + + /* clear input */ + qtest_qom_set_bool(s, "/machine/soc/gpio", name, false); + value = qtest_readl(s, offset); + g_assert_cmphex(value, ==, 0); + pin++; + } + } + + qtest_quit(s); +} + +static void test_2700_input_pins(void) +{ + test_input_pins("-machine ast2700-evb", + AST2700_GPIO_BASE + GPIOA0_CONTROL); +} + +static void test_2700_output_pins(void) +{ + test_output_pins("-machine ast2700-evb", + AST2700_GPIO_BASE + GPIOA0_CONTROL); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/ast2700/gpio/input_pins", test_2700_input_pins); + qtest_add_func("/ast2700/gpio/output_pins", test_2700_output_pins); + + return g_test_run(); +} diff --git a/tests/qtest/ast2700-hace-test.c b/tests/qtest/ast2700-hace-test.c new file mode 100644 index 00000000000..a400e2962b0 --- /dev/null +++ b/tests/qtest/ast2700-hace-test.c @@ -0,0 +1,98 @@ +/* + * QTest testcase for the ASPEED Hash and Crypto Engine + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2025 ASPEED Technology Inc. + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "qemu/bitops.h" +#include "aspeed-hace-utils.h" + +static const struct AspeedMasks as2700_masks = { + .src = 0x7fffffff, + .dest = 0x7ffffff8, + .key = 0x7ffffff8, + .len = 0x0fffffff, + .src_hi = 0x00000003, + .dest_hi = 0x00000003, + .key_hi = 0x00000003, +}; + +/* ast2700 */ +static void test_md5_ast2700(void) +{ + aspeed_test_md5("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha256_ast2700(void) +{ + aspeed_test_sha256("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha256_sg_ast2700(void) +{ + aspeed_test_sha256_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha384_ast2700(void) +{ + aspeed_test_sha384("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha384_sg_ast2700(void) +{ + aspeed_test_sha384_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha512_ast2700(void) +{ + aspeed_test_sha512("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha512_sg_ast2700(void) +{ + aspeed_test_sha512_sg("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha256_accum_ast2700(void) +{ + aspeed_test_sha256_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha384_accum_ast2700(void) +{ + aspeed_test_sha384_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_sha512_accum_ast2700(void) +{ + aspeed_test_sha512_accum("-machine ast2700a1-evb", 0x12070000, 0x400000000); +} + +static void test_addresses_ast2700(void) +{ + aspeed_test_addresses("-machine ast2700a1-evb", 0x12070000, &as2700_masks); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("ast2700/hace/addresses", test_addresses_ast2700); + qtest_add_func("ast2700/hace/sha512", test_sha512_ast2700); + qtest_add_func("ast2700/hace/sha384", test_sha384_ast2700); + qtest_add_func("ast2700/hace/sha256", test_sha256_ast2700); + qtest_add_func("ast2700/hace/md5", test_md5_ast2700); + + qtest_add_func("ast2700/hace/sha512_sg", test_sha512_sg_ast2700); + qtest_add_func("ast2700/hace/sha384_sg", test_sha384_sg_ast2700); + qtest_add_func("ast2700/hace/sha256_sg", test_sha256_sg_ast2700); + + qtest_add_func("ast2700/hace/sha512_accum", test_sha512_accum_ast2700); + qtest_add_func("ast2700/hace/sha384_accum", test_sha384_accum_ast2700); + qtest_add_func("ast2700/hace/sha256_accum", test_sha256_accum_ast2700); + + return g_test_run(); +} diff --git a/tests/qtest/ast2700-smc-test.c b/tests/qtest/ast2700-smc-test.c new file mode 100644 index 00000000000..62d538d8a3a --- /dev/null +++ b/tests/qtest/ast2700-smc-test.c @@ -0,0 +1,72 @@ +/* + * QTest testcase for the M25P80 Flash using the ASPEED SPI Controller since + * AST2700. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (C) 2024 ASPEED Technology Inc. + */ + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "libqtest-single.h" +#include "qemu/bitops.h" +#include "aspeed-smc-utils.h" + +static void test_ast2700_evb(AspeedSMCTestData *data) +{ + int ret; + int fd; + + fd = g_file_open_tmp("qtest.m25p80.w25q01jvq.XXXXXX", + &data->tmp_path, NULL); + g_assert(fd >= 0); + ret = ftruncate(fd, 128 * 1024 * 1024); + g_assert(ret == 0); + close(fd); + + data->s = qtest_initf("-machine ast2700-evb " + "-drive file=%s,format=raw,if=mtd", + data->tmp_path); + + /* fmc cs0 with w25q01jvq flash */ + data->flash_base = 0x100000000; + data->spi_base = 0x14000000; + data->jedec_id = 0xef4021; + data->cs = 0; + data->node = "/machine/soc/fmc/ssi.0/child[0]"; + /* beyond 64MB */ + data->page_addr = 0x40000 * FLASH_PAGE_SIZE; + + qtest_add_data_func("/ast2700/smc/read_jedec", + data, aspeed_smc_test_read_jedec); + qtest_add_data_func("/ast2700/smc/erase_sector", + data, aspeed_smc_test_erase_sector); + qtest_add_data_func("/ast2700/smc/erase_all", + data, aspeed_smc_test_erase_all); + qtest_add_data_func("/ast2700/smc/write_page", + data, aspeed_smc_test_write_page); + qtest_add_data_func("/ast2700/smc/read_page_mem", + data, aspeed_smc_test_read_page_mem); + qtest_add_data_func("/ast2700/smc/write_page_mem", + data, aspeed_smc_test_write_page_mem); + qtest_add_data_func("/ast2700/smc/read_status_reg", + data, aspeed_smc_test_read_status_reg); + qtest_add_data_func("/ast2700/smc/write_page_qpi", + data, aspeed_smc_test_write_page_qpi); +} + +int main(int argc, char **argv) +{ + AspeedSMCTestData ast2700_evb_data; + int ret; + + g_test_init(&argc, &argv, NULL); + + test_ast2700_evb(&ast2700_evb_data); + ret = g_test_run(); + + qtest_quit(ast2700_evb_data.s); + unlink(ast2700_evb_data.tmp_path); + g_free(ast2700_evb_data.tmp_path); + return ret; +} diff --git a/tests/qtest/bcm2835-i2c-test.c b/tests/qtest/bcm2835-i2c-test.c index 513ecce61dc..15991949260 100644 --- a/tests/qtest/bcm2835-i2c-test.c +++ b/tests/qtest/bcm2835-i2c-test.c @@ -81,7 +81,7 @@ static void test_i2c_read_write(gconstpointer data) g_assert_cmpint(i2cdata, ==, 0xde); i2cdata = readl(base_addr + BCM2835_I2C_FIFO); - g_assert_cmpint(i2cdata, ==, 0xad); + g_assert_cmpint(i2cdata, ==, 0xa0); /* Clear flags */ writel(base_addr + BCM2835_I2C_S, BCM2835_I2C_S_DONE | BCM2835_I2C_S_ERR | diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 36e5c0adde1..4fa8ac5096a 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -292,6 +292,7 @@ static void dump_aml_files(test_data *data, bool rebuild) g_free(aml_file); } + free_test_data(&exp_data); } static bool create_tmp_asl(AcpiSdtTable *sdt) @@ -959,7 +960,7 @@ static void test_acpi_piix4_tcg_bridge(void) free_test_data(&data); /* check that reboot/reset doesn't change any ACPI tables */ - qtest_qmp_send(data.qts, "{'execute':'system_reset' }"); + qtest_system_reset(data.qts); process_acpi_tables(&data); free_test_data(&data); } @@ -1216,7 +1217,7 @@ static void test_acpi_q35_multif_bridge(void) free_test_data(&data); /* check that reboot/reset doesn't change any ACPI tables */ - qtest_qmp_send(data.qts, "{'execute':'system_reset' }"); + qtest_system_reset(data.qts); process_acpi_tables(&data); free_test_data(&data); } @@ -1621,7 +1622,7 @@ static void test_acpi_aarch64_virt_tcg_memhp(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 256ULL * 1024 * 1024, + .scan_len = 256ULL * MiB, }; data.variant = ".memhp"; @@ -1642,6 +1643,54 @@ static void test_acpi_aarch64_virt_tcg_memhp(void) } +static void test_acpi_aarch64_virt_acpi_pci_hotplug(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 256ULL * MiB, + .variant = ".acpipcihp", + }; + + /* Use ACPI PCI Hotplug */ + test_acpi_one(" -global acpi-ged.acpi-pci-hotplug-with-bridge-support=on" + " -cpu cortex-a57" + " -device pcie-root-port,id=pcie.1,bus=pcie.0,chassis=0,slot=1,addr=7.0" + " -device pci-testdev,bus=pcie.1", + &data); + + free_test_data(&data); +} + +static void test_acpi_aarch64_virt_pcie_root_port_hpoff(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 256ULL * MiB, + .variant = ".hpoffacpiindex", + }; + + /* turn hotplug off on the pcie-root-port and use static acpi-index*/ + test_acpi_one(" -device pcie-root-port,id=pcie.1,chassis=0," + "slot=1,hotplug=off,addr=7.0" + " -device pci-testdev,bus=pcie.1,acpi-index=12" + " -cpu cortex-a57", + &data); + + free_test_data(&data); +} + static void test_acpi_microvm_prepare(test_data *data) { data->machine = "microvm"; @@ -1706,6 +1755,32 @@ static void test_acpi_microvm_ioapic2_tcg(void) free_test_data(&data); } +static void test_acpi_riscv64_virt_tcg_numamem(void) +{ + test_data data = { + .machine = "virt", + .arch = "riscv64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-riscv-code.fd", + .uefi_fl2 = "pc-bios/edk2-riscv-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.riscv64.iso.qcow2", + .ram_start = 0x80000000ULL, + .scan_len = 128ULL * MiB, + }; + + data.variant = ".numamem"; + /* + * RHCT will have ISA string encoded. To reduce the effort + * of updating expected AML file for any new default ISA extension, + * use the profile rva22s64. + */ + test_acpi_one(" -cpu rva22s64" + " -object memory-backend-ram,id=ram0,size=128M" + " -numa node,memdev=ram0", + &data); + free_test_data(&data); +} + static void test_acpi_aarch64_virt_tcg_numamem(void) { test_data data = { @@ -1716,7 +1791,7 @@ static void test_acpi_aarch64_virt_tcg_numamem(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; data.variant = ".numamem"; @@ -1738,7 +1813,7 @@ static void test_acpi_aarch64_virt_tcg_pxb(void) .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; /* * While using -cdrom, the cdrom would auto plugged into pxb-pcie, @@ -1762,6 +1837,44 @@ static void test_acpi_aarch64_virt_tcg_pxb(void) free_test_data(&data); } +static void test_acpi_aarch64_virt_tcg_acpi_spcr(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * 1024 * 1024, + .variant = ".acpispcr", + }; + + test_acpi_one("-cpu cortex-a57 " + " -machine spcr=off", &data); + free_test_data(&data); +} + +static void test_acpi_riscv64_virt_tcg_acpi_spcr(void) +{ + test_data data = { + .machine = "virt", + .arch = "riscv64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-riscv-code.fd", + .uefi_fl2 = "pc-bios/edk2-riscv-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.riscv64.iso.qcow2", + .ram_start = 0x80000000ULL, + .scan_len = 128ULL * 1024 * 1024, + .variant = ".acpispcr", + }; + + test_acpi_one("-cpu rva22s64 " + "-machine spcr=off", &data); + free_test_data(&data); +} + static void test_acpi_tcg_acpi_hmat(const char *machine, const char *arch) { test_data data = {}; @@ -1814,7 +1927,7 @@ static void test_acpi_aarch64_virt_tcg_acpi_hmat(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; data.variant = ".acpihmatvirt"; @@ -1910,6 +2023,101 @@ static void test_acpi_q35_tcg_acpi_hmat_noinitiator(void) free_test_data(&data); } +/* Test intended to hit corner cases of SRAT and HMAT */ +static void test_acpi_q35_tcg_acpi_hmat_generic_x(void) +{ + test_data data = {}; + + data.machine = MACHINE_Q35; + data.arch = "x86"; + data.variant = ".acpihmat-generic-x"; + test_acpi_one(" -machine hmat=on,cxl=on" + " -smp 3,sockets=3" + " -m 128M,maxmem=384M,slots=2" + " -device pcie-root-port,chassis=1,id=pci.1" + " -device pci-testdev,bus=pci.1," + "multifunction=on,addr=00.0" + " -device pci-testdev,bus=pci.1,addr=00.1" + " -device pci-testdev,bus=pci.1,id=gidev,addr=00.2" + " -device pxb-cxl,bus_nr=64,bus=pcie.0,id=cxl.1" + " -object memory-backend-ram,size=64M,id=ram0" + " -object memory-backend-ram,size=64M,id=ram1" + " -numa node,nodeid=0,cpus=0,memdev=ram0" + " -numa node,nodeid=1" + " -object acpi-generic-initiator,id=gi0,pci-dev=gidev,node=1" + " -numa node,nodeid=2" + " -object acpi-generic-port,id=gp0,pci-bus=cxl.1,node=2" + " -numa node,nodeid=3,cpus=1" + " -numa node,nodeid=4,memdev=ram1" + " -numa node,nodeid=5,cpus=2" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=0,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=800M" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-latency,latency=100" + " -numa hmat-lb,initiator=0,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=0,target=4,hierarchy=memory," + "data-type=access-latency,latency=100" + " -numa hmat-lb,initiator=0,target=4,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=0,target=5,hierarchy=memory," + "data-type=access-latency,latency=200" + " -numa hmat-lb,initiator=0,target=5,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=400M" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-latency,latency=500" + " -numa hmat-lb,initiator=1,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=100M" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-latency,latency=50" + " -numa hmat-lb,initiator=1,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=400M" + " -numa hmat-lb,initiator=1,target=4,hierarchy=memory," + "data-type=access-latency,latency=50" + " -numa hmat-lb,initiator=1,target=4,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=800M" + " -numa hmat-lb,initiator=1,target=5,hierarchy=memory," + "data-type=access-latency,latency=500" + " -numa hmat-lb,initiator=1,target=5,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=100M" + " -numa hmat-lb,initiator=3,target=0,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=3,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=400M" + " -numa hmat-lb,initiator=3,target=2,hierarchy=memory," + "data-type=access-latency,latency=80" + " -numa hmat-lb,initiator=3,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=3,target=4,hierarchy=memory," + "data-type=access-latency,latency=80" + " -numa hmat-lb,initiator=3,target=4,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=3,target=5,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=3,target=5,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=400M" + " -numa hmat-lb,initiator=5,target=0,hierarchy=memory," + "data-type=access-latency,latency=20" + " -numa hmat-lb,initiator=5,target=0,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=400M" + " -numa hmat-lb,initiator=5,target=2,hierarchy=memory," + "data-type=access-latency,latency=80" + " -numa hmat-lb,initiator=5,target=4,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=5,target=4,hierarchy=memory," + "data-type=access-latency,latency=80" + " -numa hmat-lb,initiator=5,target=2,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=200M" + " -numa hmat-lb,initiator=5,target=5,hierarchy=memory," + "data-type=access-latency,latency=10" + " -numa hmat-lb,initiator=5,target=5,hierarchy=memory," + "data-type=access-bandwidth,bandwidth=800M", + &data); + free_test_data(&data); +} + #ifdef CONFIG_POSIX static void test_acpi_erst(const char *machine, const char *arch) { @@ -1973,7 +2181,7 @@ static void test_acpi_riscv64_virt_tcg(void) .uefi_fl2 = "pc-bios/edk2-riscv-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.riscv64.iso.qcow2", .ram_start = 0x80000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; /* @@ -1995,12 +2203,12 @@ static void test_acpi_aarch64_virt_tcg(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; data.smbios_cpu_max_speed = 2900; data.smbios_cpu_curr_speed = 2700; - test_acpi_one("-cpu cortex-a57 " + test_acpi_one("-cpu cortex-a57 -machine ras=on " "-smbios type=4,max-speed=2900,current-speed=2700", &data); free_test_data(&data); } @@ -2016,7 +2224,7 @@ static void test_acpi_aarch64_virt_tcg_topology(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; test_acpi_one("-cpu cortex-a57 " @@ -2024,6 +2232,25 @@ static void test_acpi_aarch64_virt_tcg_topology(void) free_test_data(&data); } +static void test_acpi_aarch64_virt_tcg_its_off(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .variant = ".its_off", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * 1024 * 1024, + }; + + test_acpi_one("-cpu cortex-a57 " + "-M gic-version=3,iommu=smmuv3,its=off", &data); + free_test_data(&data); +} + static void test_acpi_q35_viot(void) { test_data data = { @@ -2096,12 +2323,13 @@ static void test_acpi_aarch64_virt_viot(void) test_data data = { .machine = "virt", .arch = "aarch64", + .variant = ".viot", .tcg_only = true, .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; test_acpi_one("-cpu cortex-a57 " @@ -2109,6 +2337,86 @@ static void test_acpi_aarch64_virt_viot(void) free_test_data(&data); } +static void test_acpi_aarch64_virt_smmuv3_legacy(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * MiB, + }; + + /* + * cdrom is plugged into scsi controller to avoid conflict + * with pxb-pcie. See comments in test_acpi_aarch64_virt_tcg_pxb() for + * details. + * + * The setup includes three PCIe root complexes, one of which has + * bypass_iommu enabled. The generated IORT table contains a single + * SMMUv3 node and a Root Complex node with three ID mappings. Two + * of the ID mappings have output references pointing to the SMMUv3 + * node and the remaining one points to ITS. + */ + data.variant = ".smmuv3-legacy"; + test_acpi_one(" -device pcie-root-port,chassis=1,id=pci.1" + " -device virtio-scsi-pci,id=scsi0,bus=pci.1" + " -drive file=" + "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2," + "if=none,media=cdrom,id=drive-scsi0-0-0-1,readonly=on" + " -device scsi-cd,bus=scsi0.0,scsi-id=0," + "drive=drive-scsi0-0-0-1,id=scsi0-0-0-1,bootindex=1" + " -cpu cortex-a57" + " -M iommu=smmuv3" + " -device pxb-pcie,id=pcie.1,bus=pcie.0,bus_nr=0x10" + " -device pxb-pcie,id=pcie.2,bus=pcie.0,bus_nr=0x20,bypass_iommu=on", + &data); + free_test_data(&data); +} + +static void test_acpi_aarch64_virt_smmuv3_dev(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * MiB, + }; + + /* + * cdrom is plugged into scsi controller to avoid conflict + * with pxb-pcie. See comments in test_acpi_aarch64_virt_tcg_pxb() + * for details. + * + * The setup includes three PCie root complexes, two of which are + * connected to separate SMMUv3 devices. The resulting IORT table + * contains two SMMUv3 nodes and a Root Complex node with ID mappings + * of which two of the ID mappings have output references pointing + * to two different SMMUv3 nodes and the remaining ones pointing to + * ITS. + */ + data.variant = ".smmuv3-dev"; + test_acpi_one(" -device pcie-root-port,chassis=1,id=pci.1" + " -device virtio-scsi-pci,id=scsi0,bus=pci.1" + " -drive file=" + "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2," + "if=none,media=cdrom,id=drive-scsi0-0-0-1,readonly=on" + " -device scsi-cd,bus=scsi0.0,scsi-id=0," + "drive=drive-scsi0-0-0-1,id=scsi0-0-0-1,bootindex=1" + " -cpu cortex-a57" + " -device arm-smmuv3,primary-bus=pcie.0,id=smmuv3.0" + " -device pxb-pcie,id=pcie.1,bus=pcie.0,bus_nr=0x10" + " -device arm-smmuv3,primary-bus=pcie.1,id=smmuv3.1" + " -device pxb-pcie,id=pcie.2,bus=pcie.0,bus_nr=0x20", + &data); + free_test_data(&data); +} + #ifndef _WIN32 # define DEV_NULL "/dev/null" #else @@ -2285,7 +2593,7 @@ static void test_acpi_aarch64_virt_oem_fields(void) .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", .cd = "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2", .ram_start = 0x40000000ULL, - .scan_len = 128ULL * 1024 * 1024, + .scan_len = 128ULL * MiB, }; char *args; @@ -2298,6 +2606,74 @@ static void test_acpi_aarch64_virt_oem_fields(void) g_free(args); } +#define LOONGARCH64_INIT_TEST_DATA(data) \ + test_data data = { \ + .machine = "virt", \ + .arch = "loongarch64", \ + .tcg_only = true, \ + .uefi_fl1 = "pc-bios/edk2-loongarch64-code.fd", \ + .uefi_fl2 = "pc-bios/edk2-loongarch64-vars.fd", \ + .cd = "tests/data/uefi-boot-images/" \ + "bios-tables-test.loongarch64.iso.qcow2", \ + .ram_start = 0, \ + .scan_len = 128ULL * MiB, \ + } + +static void test_acpi_loongarch64_virt(void) +{ + LOONGARCH64_INIT_TEST_DATA(data); + + test_acpi_one("-cpu la464 ", &data); + free_test_data(&data); +} + +static void test_acpi_loongarch64_virt_topology(void) +{ + LOONGARCH64_INIT_TEST_DATA(data); + + data.variant = ".topology"; + test_acpi_one("-cpu la464 -smp sockets=1,cores=2,threads=2", &data); + free_test_data(&data); +} + +static void test_acpi_loongarch64_virt_numamem(void) +{ + LOONGARCH64_INIT_TEST_DATA(data); + + data.variant = ".numamem"; + test_acpi_one(" -cpu la464 -m 128" + " -object memory-backend-ram,id=ram0,size=64M" + " -object memory-backend-ram,id=ram1,size=64M" + " -numa node,memdev=ram0 -numa node,memdev=ram1" + " -numa dist,src=0,dst=1,val=21", + &data); + free_test_data(&data); +} + +static void test_acpi_loongarch64_virt_memhp(void) +{ + LOONGARCH64_INIT_TEST_DATA(data); + + data.variant = ".memhp"; + test_acpi_one(" -cpu la464 -m 128,slots=2,maxmem=256M" + " -object memory-backend-ram,id=ram0,size=128M", + &data); + free_test_data(&data); +} + +static void test_acpi_loongarch64_virt_oem_fields(void) +{ + LOONGARCH64_INIT_TEST_DATA(data); + char *args; + + args = test_acpi_create_args(&data, "-cpu la464 "OEM_TEST_ARGS); + data.qts = qtest_init(args); + test_acpi_load_tables(&data); + test_oem_fields(&data); + qtest_quit(data.qts); + free_test_data(&data); + g_free(args); +} int main(int argc, char *argv[]) { @@ -2388,6 +2764,8 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/q35/nohpet", test_acpi_q35_tcg_nohpet); qtest_add_func("acpi/q35/acpihmat-noinitiator", test_acpi_q35_tcg_acpi_hmat_noinitiator); + qtest_add_func("acpi/q35/acpihmat-genericx", + test_acpi_q35_tcg_acpi_hmat_generic_x); /* i386 does not support memory hotplug */ if (strcmp(arch, "i386")) { @@ -2453,19 +2831,48 @@ int main(int argc, char *argv[]) test_acpi_aarch64_virt_tcg_acpi_hmat); qtest_add_func("acpi/virt/topology", test_acpi_aarch64_virt_tcg_topology); + qtest_add_func("acpi/virt/its_off", + test_acpi_aarch64_virt_tcg_its_off); qtest_add_func("acpi/virt/numamem", test_acpi_aarch64_virt_tcg_numamem); qtest_add_func("acpi/virt/memhp", test_acpi_aarch64_virt_tcg_memhp); + qtest_add_func("acpi/virt/acpipcihp", + test_acpi_aarch64_virt_acpi_pci_hotplug); + qtest_add_func("acpi/virt/hpoffacpiindex", + test_acpi_aarch64_virt_pcie_root_port_hpoff); qtest_add_func("acpi/virt/pxb", test_acpi_aarch64_virt_tcg_pxb); qtest_add_func("acpi/virt/oem-fields", test_acpi_aarch64_virt_oem_fields); + qtest_add_func("acpi/virt/acpispcr", + test_acpi_aarch64_virt_tcg_acpi_spcr); if (qtest_has_device("virtio-iommu-pci")) { qtest_add_func("acpi/virt/viot", test_acpi_aarch64_virt_viot); } + qtest_add_func("acpi/virt/smmuv3-legacy", + test_acpi_aarch64_virt_smmuv3_legacy); + if (qtest_has_device("arm-smmuv3")) { + qtest_add_func("acpi/virt/smmuv3-dev", + test_acpi_aarch64_virt_smmuv3_dev); + } } } else if (strcmp(arch, "riscv64") == 0) { if (has_tcg && qtest_has_device("virtio-blk-pci")) { qtest_add_func("acpi/virt", test_acpi_riscv64_virt_tcg); + qtest_add_func("acpi/virt/numamem", + test_acpi_riscv64_virt_tcg_numamem); + qtest_add_func("acpi/virt/acpispcr", + test_acpi_riscv64_virt_tcg_acpi_spcr); + } + } else if (strcmp(arch, "loongarch64") == 0) { + if (has_tcg) { + qtest_add_func("acpi/virt", test_acpi_loongarch64_virt); + qtest_add_func("acpi/virt/topology", + test_acpi_loongarch64_virt_topology); + qtest_add_func("acpi/virt/numamem", + test_acpi_loongarch64_virt_numamem); + qtest_add_func("acpi/virt/memhp", test_acpi_loongarch64_virt_memhp); + qtest_add_func("acpi/virt/oem-fields", + test_acpi_loongarch64_virt_oem_fields); } } ret = g_test_run(); diff --git a/tests/qtest/boot-order-test.c b/tests/qtest/boot-order-test.c index 8f2b6ef05a5..74d6b82dd20 100644 --- a/tests/qtest/boot-order-test.c +++ b/tests/qtest/boot-order-test.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "libqos/fw_cfg.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "standard-headers/linux/qemu_fw_cfg.h" typedef struct { @@ -31,7 +31,7 @@ static void test_a_boot_order(const char *machine, uint64_t actual; QTestState *qts; - if (machine && !qtest_has_machine(machine)) { + if (!qtest_has_machine(machine)) { g_test_skip("Machine is not available"); return; } @@ -40,12 +40,7 @@ static void test_a_boot_order(const char *machine, machine ?: "", test_args); actual = read_boot_order(qts); g_assert_cmphex(actual, ==, expected_boot); - qtest_qmp_assert_success(qts, "{ 'execute': 'system_reset' }"); - /* - * system_reset only requests reset. We get a RESET event after - * the actual reset completes. Need to wait for that. - */ - qtest_qmp_eventwait(qts, "RESET"); + qtest_system_reset(qts); actual = read_boot_order(qts); g_assert_cmphex(actual, ==, expected_reboot); qtest_quit(qts); @@ -107,7 +102,7 @@ static const boot_order_test test_cases_pc[] = { static void test_pc_boot_order(void) { - test_boot_orders(NULL, read_boot_order_pc, test_cases_pc); + test_boot_orders("pc", read_boot_order_pc, test_cases_pc); } static uint64_t read_boot_order_pmac(QTestState *qts) diff --git a/tests/qtest/boot-serial-test.c b/tests/qtest/boot-serial-test.c index 3b92fa5d506..a05d26ee996 100644 --- a/tests/qtest/boot-serial-test.c +++ b/tests/qtest/boot-serial-test.c @@ -70,18 +70,23 @@ static const uint8_t kernel_plml605[] = { }; static const uint8_t bios_raspi2[] = { - 0x08, 0x30, 0x9f, 0xe5, /* ldr r3,[pc,#8] Get base */ - 0x54, 0x20, 0xa0, 0xe3, /* mov r2,#'T' */ - 0x00, 0x20, 0xc3, 0xe5, /* strb r2,[r3] */ - 0xfb, 0xff, 0xff, 0xea, /* b loop */ - 0x00, 0x10, 0x20, 0x3f, /* 0x3f201000 = UART0 base addr */ + 0x10, 0x30, 0x9f, 0xe5, /* ldr r3, [pc, #16] Get &UART0 */ + 0x10, 0x20, 0x9f, 0xe5, /* ldr r2, [pc, #16] Get &CR */ + 0xb0, 0x23, 0xc3, 0xe1, /* strh r2, [r3, #48] Set CR */ + 0x54, 0x20, 0xa0, 0xe3, /* mov r2, #'T' */ + 0x00, 0x20, 0xc3, 0xe5, /* loop: strb r2, [r3] *TXDAT = 'T' */ + 0xff, 0xff, 0xff, 0xea, /* b -4 (loop) */ + 0x00, 0x10, 0x20, 0x3f, /* UART0: 0x3f201000 */ + 0x01, 0x01, 0x00, 0x00, /* CR: 0x101 = UARTEN|TXE */ }; static const uint8_t kernel_aarch64[] = { - 0x81, 0x0a, 0x80, 0x52, /* mov w1, #0x54 */ - 0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 */ - 0x41, 0x00, 0x00, 0x39, /* strb w1, [x2] */ - 0xfd, 0xff, 0xff, 0x17, /* b -12 (loop) */ + 0x02, 0x20, 0xa1, 0xd2, /* mov x2, #0x9000000 Load UART0 */ + 0x21, 0x20, 0x80, 0x52, /* mov w1, 0x101 CR = UARTEN|TXE */ + 0x41, 0x60, 0x00, 0x79, /* strh w1, [x2, #48] Set CR */ + 0x81, 0x0a, 0x80, 0x52, /* mov w1, #'T' */ + 0x41, 0x00, 0x00, 0x39, /* loop: strb w1, [x2] *TXDAT = 'T' */ + 0xff, 0xff, 0xff, 0x17, /* b -4 (loop) */ }; static const uint8_t kernel_nrf51[] = { @@ -184,8 +189,6 @@ static const testdef_t tests[] = { { "microblazeel", "petalogix-ml605", "", "TT", sizeof(kernel_plml605), kernel_plml605 }, { "arm", "raspi2b", "", "TT", sizeof(bios_raspi2), 0, bios_raspi2 }, - /* For hppa, force bios to output to serial by disabling graphics. */ - { "hppa", "hppa", "-vga none", "SeaBIOS wants SYSTEM HALT" }, { "aarch64", "virt", "-cpu max", "TT", sizeof(kernel_aarch64), kernel_aarch64 }, { "arm", "microbit", "", "T", sizeof(kernel_nrf51), kernel_nrf51 }, diff --git a/tests/qtest/cdrom-test.c b/tests/qtest/cdrom-test.c index 5d89e62515c..56e2d283a9d 100644 --- a/tests/qtest/cdrom-test.c +++ b/tests/qtest/cdrom-test.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "libqtest.h" #include "boot-sector.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" static char isoimage[] = "cdrom-boot-iso-XXXXXX"; @@ -135,13 +135,35 @@ static void add_x86_tests(void) return; } - qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot); - if (qtest_has_device("virtio-scsi-ccw")) { - qtest_add_data_func("cdrom/boot/virtio-scsi", - "-device virtio-scsi -device scsi-cd,drive=cdr " - "-blockdev file,node-name=cdr,filename=", - test_cdboot); + if (qtest_has_machine("pc")) { + qtest_add_data_func("cdrom/boot/default", "-cdrom ", test_cdboot); + if (qtest_has_device("virtio-scsi-ccw")) { + qtest_add_data_func("cdrom/boot/virtio-scsi", + "-device virtio-scsi -device scsi-cd,drive=cdr " + "-blockdev file,node-name=cdr,filename=", + test_cdboot); + } + + if (qtest_has_device("am53c974")) { + qtest_add_data_func("cdrom/boot/am53c974", + "-device am53c974 -device scsi-cd,drive=cd1 " + "-drive if=none,id=cd1,format=raw,file=", + test_cdboot); + } + if (qtest_has_device("dc390")) { + qtest_add_data_func("cdrom/boot/dc390", + "-device dc390 -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } + if (qtest_has_device("lsi53c895a")) { + qtest_add_data_func("cdrom/boot/lsi53c895a", + "-device lsi53c895a -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } } + /* * Unstable CI test under load * See https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05509.html @@ -150,35 +172,20 @@ static void add_x86_tests(void) qtest_add_data_func("cdrom/boot/isapc", "-M isapc " "-drive if=ide,media=cdrom,file=", test_cdboot); } - if (qtest_has_device("am53c974")) { - qtest_add_data_func("cdrom/boot/am53c974", - "-device am53c974 -device scsi-cd,drive=cd1 " - "-drive if=none,id=cd1,format=raw,file=", - test_cdboot); - } - if (qtest_has_device("dc390")) { - qtest_add_data_func("cdrom/boot/dc390", - "-device dc390 -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", - test_cdboot); - } - if (qtest_has_device("lsi53c895a")) { - qtest_add_data_func("cdrom/boot/lsi53c895a", - "-device lsi53c895a -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", - test_cdboot); - } - if (qtest_has_device("megasas")) { - qtest_add_data_func("cdrom/boot/megasas", "-M q35 " - "-device megasas -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", - test_cdboot); - } - if (qtest_has_device("megasas-gen2")) { - qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 " - "-device megasas-gen2 -device scsi-cd,drive=cd1 " - "-blockdev file,node-name=cd1,filename=", - test_cdboot); + + if (qtest_has_machine("q35")) { + if (qtest_has_device("megasas")) { + qtest_add_data_func("cdrom/boot/megasas", "-M q35 " + "-device megasas -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } + if (qtest_has_device("megasas-gen2")) { + qtest_add_data_func("cdrom/boot/megasas-gen2", "-M q35 " + "-device megasas-gen2 -device scsi-cd,drive=cd1 " + "-blockdev file,node-name=cd1,filename=", + test_cdboot); + } } } @@ -206,6 +213,30 @@ static void add_s390x_tests(void) "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " "-device virtio-blk,drive=d2,bootindex=1 " "-drive if=none,id=d2,media=cdrom,file=", test_cdboot); + qtest_add_data_func("cdrom/boot/as-fallback-device", + "-device virtio-serial -device virtio-scsi " + "-device virtio-blk,drive=d1,bootindex=1 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " + "-device virtio-blk,drive=d2,bootindex=2 " + "-drive if=none,id=d2,media=cdrom,file=", test_cdboot); + qtest_add_data_func("cdrom/boot/as-last-option", + "-device virtio-serial -device virtio-scsi " + "-device virtio-blk,drive=d1,bootindex=1 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d1 " + "-device virtio-blk,drive=d2,bootindex=2 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d2 " + "-device virtio-blk,drive=d3,bootindex=3 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d3 " + "-device scsi-hd,drive=d4,bootindex=4 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d4 " + "-device scsi-hd,drive=d5,bootindex=5 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d5 " + "-device virtio-blk,drive=d6,bootindex=6 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d6 " + "-device scsi-hd,drive=d7,bootindex=7 " + "-drive driver=null-co,read-zeroes=on,if=none,id=d7 " + "-device scsi-cd,drive=d8,bootindex=8 " + "-drive if=none,id=d8,media=cdrom,file=", test_cdboot); if (qtest_has_device("x-terminal3270")) { qtest_add_data_func("cdrom/boot/without-bootindex", "-device virtio-scsi -device virtio-serial " diff --git a/tests/qtest/cmsdk-apb-watchdog-test.c b/tests/qtest/cmsdk-apb-watchdog-test.c index 00b5dbbc812..cd0c6023617 100644 --- a/tests/qtest/cmsdk-apb-watchdog-test.c +++ b/tests/qtest/cmsdk-apb-watchdog-test.c @@ -15,14 +15,12 @@ */ #include "qemu/osdep.h" +#include "exec/hwaddr.h" #include "qemu/bitops.h" #include "libqtest-single.h" -/* - * lm3s811evb watchdog; at board startup this runs at 200MHz / 16 == 12.5MHz, - * which is 80ns per tick. - */ #define WDOG_BASE 0x40000000 +#define WDOG_BASE_MPS2 0x40008000 #define WDOGLOAD 0 #define WDOGVALUE 4 @@ -37,39 +35,97 @@ #define SYSDIV_SHIFT 23 #define SYSDIV_LENGTH 4 -static void test_watchdog(void) +#define WDOGLOAD_DEFAULT 0xFFFFFFFF +#define WDOGVALUE_DEFAULT 0xFFFFFFFF + +typedef struct CMSDKAPBWatchdogTestArgs { + int64_t tick; + hwaddr wdog_base; + const char *machine; +} CMSDKAPBWatchdogTestArgs; + +enum { + MACHINE_LM3S811EVB, + MACHINE_MPS2_AN385, +}; + +/* + * lm3s811evb watchdog; at board startup this runs at 200MHz / 16 == 12.5MHz, + * which is 80ns per tick. + * + * IoTKit/ARMSSE dualtimer; driven at 25MHz in mps2-an385, so 40ns per tick + */ +static const CMSDKAPBWatchdogTestArgs machine_info[] = { + [MACHINE_LM3S811EVB] = { + .tick = 80, + .wdog_base = WDOG_BASE, + .machine = "lm3s811evb", + }, + [MACHINE_MPS2_AN385] = { + .tick = 40, + .wdog_base = WDOG_BASE_MPS2, + .machine = "mps2-an385", + }, +}; + +static void system_reset(QTestState *qtest) { - g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0); + QDict *resp; - writel(WDOG_BASE + WDOGCONTROL, 1); - writel(WDOG_BASE + WDOGLOAD, 1000); + resp = qtest_qmp(qtest, "{'execute': 'system_reset'}"); + g_assert(qdict_haskey(resp, "return")); + qobject_unref(resp); + qtest_qmp_eventwait(qtest, "RESET"); +} + +static void test_watchdog(const void *ptr) +{ + const CMSDKAPBWatchdogTestArgs *args = ptr; + hwaddr wdog_base = args->wdog_base; + int64_t tick = args->tick; + g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine); + qtest_start(cmdline); + + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + writel(wdog_base + WDOGCONTROL, 1); + writel(wdog_base + WDOGLOAD, 1000); /* Step to just past the 500th tick */ - clock_step(500 * 80 + 1); - g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0); - g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 500); + clock_step(500 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 500); /* Just past the 1000th tick: timer should have fired */ - clock_step(500 * 80); - g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 1); - g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 0); + clock_step(500 * tick); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 1); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 0); /* VALUE reloads at following tick */ - clock_step(80); - g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000); + clock_step(tick); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1000); /* Writing any value to WDOGINTCLR clears the interrupt and reloads */ - clock_step(500 * 80); - g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 500); - g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 1); - writel(WDOG_BASE + WDOGINTCLR, 0); - g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000); - g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0); + clock_step(500 * tick); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 500); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 1); + writel(wdog_base + WDOGINTCLR, 0); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1000); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + qtest_end(); } -static void test_clock_change(void) +/* + * This test can only be executed in the stellaris board since it relies on a + * component of the board to change the clocking parameters of the watchdog. + */ +static void test_clock_change(const void *ptr) { uint32_t rcc; + const CMSDKAPBWatchdogTestArgs *args = ptr; + g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine); + qtest_start(cmdline); /* * Test that writing to the stellaris board's RCC register to @@ -109,23 +165,231 @@ static void test_clock_change(void) writel(WDOG_BASE + WDOGINTCLR, 0); g_assert_cmpuint(readl(WDOG_BASE + WDOGVALUE), ==, 1000); g_assert_cmpuint(readl(WDOG_BASE + WDOGRIS), ==, 0); + + qtest_end(); } -int main(int argc, char **argv) +/* Tests the counter is not running after reset. */ +static void test_watchdog_reset(const void *ptr) { - int r; + const CMSDKAPBWatchdogTestArgs *args = ptr; + hwaddr wdog_base = args->wdog_base; + int64_t tick = args->tick; + g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine); + qtest_start(cmdline); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); - g_test_init(&argc, &argv, NULL); + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); + + g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0); + + /* + * The counter should not be running if WDOGCONTROL.INTEN has not been set, + * as it is the case after a cold reset. + */ + clock_step(15 * tick + 1); + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); + + /* Let the counter run before reset */ + writel(wdog_base + WDOGLOAD, 3000); + writel(wdog_base + WDOGCONTROL, 1); + + /* Verify it is running */ + clock_step(1000 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 3000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 2000); + + system_reset(global_qtest); - qtest_start("-machine lm3s811evb"); + /* Check defaults after reset */ + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); - qtest_add_func("/cmsdk-apb-watchdog/watchdog", test_watchdog); - qtest_add_func("/cmsdk-apb-watchdog/watchdog_clock_change", - test_clock_change); + /* The counter should not be running after reset. */ + clock_step(1000 * tick + 1); + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); - r = g_test_run(); + qtest_end(); +} + +/* + * Tests inten works as the counter enable based on this description: + * + * Enable the interrupt event, WDOGINT. Set HIGH to enable the counter and the + * interrupt, or LOW to disable the counter and interrupt. Reloads the counter + * from the value in WDOGLOAD when the interrupt is enabled, after previously + * being disabled. + */ +static void test_watchdog_inten(const void *ptr) +{ + const CMSDKAPBWatchdogTestArgs *args = ptr; + hwaddr wdog_base = args->wdog_base; + int64_t tick = args->tick; + g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine); + qtest_start(cmdline); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); + + /* + * When WDOGLOAD is written to, the count is immediately restarted from the + * new value. + * + * Note: the counter should not be running as long as WDOGCONTROL.INTEN is + * not set + */ + writel(wdog_base + WDOGLOAD, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000); + clock_step(500 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000); + + /* Set HIGH WDOGCONTROL.INTEN to enable the counter and the interrupt */ + writel(wdog_base + WDOGCONTROL, 1); + clock_step(500 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500); + + /* or LOW to disable the counter and interrupt. */ + writel(wdog_base + WDOGCONTROL, 0); + clock_step(100 * tick); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500); + + /* + * Reloads the counter from the value in WDOGLOAD when the interrupt is + * enabled, after previously being disabled. + */ + writel(wdog_base + WDOGCONTROL, 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000); + + /* Test counter is still on */ + clock_step(50 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3950); + + /* + * When WDOGLOAD is written to, the count is immediately restarted from the + * new value. + * + * Note: the counter should be running since WDOGCONTROL.INTEN is set + */ + writel(wdog_base + WDOGLOAD, 5000); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 5000); + clock_step(4999 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + /* Finally disable and check the conditions don't change */ + writel(wdog_base + WDOGCONTROL, 0); + clock_step(10 * tick); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 5000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 1); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + qtest_end(); +} + +/* + * Tests the following custom behavior: + * + * The Luminary version of this device ignores writes to this register after the + * guest has enabled interrupts (so they can only be disabled again via reset). + */ +static void test_watchdog_inten_luminary(const void *ptr) +{ + const CMSDKAPBWatchdogTestArgs *args = ptr; + hwaddr wdog_base = args->wdog_base; + int64_t tick = args->tick; + g_autofree gchar *cmdline = g_strdup_printf("-machine %s", args->machine); + qtest_start(cmdline); + g_assert_cmpuint(readl(wdog_base + WDOGRIS), ==, 0); + + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); + + /* + * When WDOGLOAD is written to, the count is immediately restarted from the + * new value. + * + * Note: the counter should not be running as long as WDOGCONTROL.INTEN is + * not set + */ + writel(wdog_base + WDOGLOAD, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000); + clock_step(500 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 4000); + + /* Set HIGH WDOGCONTROL.INTEN to enable the counter and the interrupt */ + writel(wdog_base + WDOGCONTROL, 1); + clock_step(500 * tick + 1); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3500); + + /* + * The Luminary version of this device ignores writes to this register after + * the guest has enabled interrupts + */ + writel(wdog_base + WDOGCONTROL, 0); + clock_step(100 * tick); + g_assert_cmpuint(readl(wdog_base + WDOGLOAD), ==, 4000); + g_assert_cmpuint(readl(wdog_base + WDOGVALUE), ==, 3400); + g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0x1); + + /* They can only be disabled again via reset */ + system_reset(global_qtest); + + /* Check defaults after reset */ + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGCONTROL), ==, 0); + + /* The counter should not be running after reset. */ + clock_step(1000 * tick + 1); + g_assert_cmphex(readl(wdog_base + WDOGLOAD), ==, WDOGLOAD_DEFAULT); + g_assert_cmphex(readl(wdog_base + WDOGVALUE), ==, WDOGVALUE_DEFAULT); qtest_end(); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + g_test_set_nonfatal_assertions(); + + if (qtest_has_machine(machine_info[MACHINE_LM3S811EVB].machine)) { + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog", + &machine_info[MACHINE_LM3S811EVB], test_watchdog); + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_clock_change", + &machine_info[MACHINE_LM3S811EVB], + test_clock_change); + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_reset", + &machine_info[MACHINE_LM3S811EVB], + test_watchdog_reset); + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_inten_luminary", + &machine_info[MACHINE_LM3S811EVB], + test_watchdog_inten_luminary); + } + if (qtest_has_machine(machine_info[MACHINE_MPS2_AN385].machine)) { + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_mps2", + &machine_info[MACHINE_MPS2_AN385], test_watchdog); + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_reset_mps2", + &machine_info[MACHINE_MPS2_AN385], + test_watchdog_reset); + qtest_add_data_func("/cmsdk-apb-watchdog/watchdog_inten", + &machine_info[MACHINE_MPS2_AN385], + test_watchdog_inten); + } - return r; + return g_test_run(); } diff --git a/tests/qtest/cpu-plug-test.c b/tests/qtest/cpu-plug-test.c index 7f5dd5f85a7..44d704680b1 100644 --- a/tests/qtest/cpu-plug-test.c +++ b/tests/qtest/cpu-plug-test.c @@ -10,8 +10,8 @@ #include "qemu/osdep.h" #include "libqtest-single.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" struct PlugTestData { char *machine; @@ -156,6 +156,28 @@ static void add_s390x_test_case(const char *mname) g_free(path); } +static void add_loongarch_test_case(const char *mname) +{ + char *path; + PlugTestData *data; + + data = g_new(PlugTestData, 1); + data->machine = g_strdup(mname); + data->cpu_model = "la464"; + data->device_model = g_strdup("la464-loongarch-cpu"); + data->sockets = 1; + data->cores = 3; + data->threads = 1; + data->maxcpus = data->sockets * data->cores * data->threads; + + path = g_strdup_printf("cpu-plug/%s/device-add/%ux%ux%u&maxcpus=%u", + mname, data->sockets, data->cores, + data->threads, data->maxcpus); + qtest_add_data_func_full(path, data, test_plug_with_device_add, + test_data_free); + g_free(path); +} + int main(int argc, char **argv) { const char *arch = qtest_get_arch(); @@ -168,6 +190,8 @@ int main(int argc, char **argv) qtest_cb_for_every_machine(add_pseries_test_case, g_test_quick()); } else if (g_str_equal(arch, "s390x")) { qtest_cb_for_every_machine(add_s390x_test_case, g_test_quick()); + } else if (g_str_equal(arch, "loongarch64")) { + add_loongarch_test_case("virt"); } return g_test_run(); diff --git a/tests/qtest/cxl-test.c b/tests/qtest/cxl-test.c index a6003318439..8fb7e58d4f1 100644 --- a/tests/qtest/cxl-test.c +++ b/tests/qtest/cxl-test.c @@ -19,6 +19,12 @@ "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " +#define QEMU_VIRT_2PXB_CMD \ + "-machine virt,cxl=on -cpu max " \ + "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \ + "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \ + "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G " + #define QEMU_RP \ "-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 " @@ -197,25 +203,51 @@ static void cxl_2pxb_4rp_4t3d(void) qtest_end(); rmdir(tmpfs); } + +static void cxl_virt_2pxb_4rp_4t3d(void) +{ + g_autoptr(GString) cmdline = g_string_new(NULL); + g_autofree const char *tmpfs = NULL; + + tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL); + + g_string_printf(cmdline, QEMU_VIRT_2PXB_CMD QEMU_4RP QEMU_4T3D, + tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, + tmpfs, tmpfs); + + qtest_start(cmdline->str); + qtest_end(); + rmdir(tmpfs); +} #endif /* CONFIG_POSIX */ int main(int argc, char **argv) { - g_test_init(&argc, &argv, NULL); + const char *arch = qtest_get_arch(); - qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb); - qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb); - qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window); - qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window); - qtest_add_func("/pci/cxl/rp", cxl_root_port); - qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port); + g_test_init(&argc, &argv, NULL); + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb); + qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb); + qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window); + qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window); + qtest_add_func("/pci/cxl/rp", cxl_root_port); + qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port); #ifdef CONFIG_POSIX - qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated); - qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent); - qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile); - qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa); - qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d); - qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d); + qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated); + qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent); + qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile); + qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa); + qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d); + qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", + cxl_2pxb_4rp_4t3d); #endif + } else if (strcmp(arch, "aarch64") == 0) { +#ifdef CONFIG_POSIX + qtest_add_func("/pci/cxl/virt/pxb_x2_root_port_x4_type3_x4", + cxl_virt_2pxb_4rp_4t3d); +#endif + } + return g_test_run(); } diff --git a/tests/qtest/dbus-display-test.c b/tests/qtest/dbus-display-test.c index 0390bdcb41a..f7fc873bfb3 100644 --- a/tests/qtest/dbus-display-test.c +++ b/tests/qtest/dbus-display-test.c @@ -2,9 +2,14 @@ #include "qemu/sockets.h" #include "qemu/dbus.h" #include "qemu/sockets.h" +#include "glib.h" +#include "glibconfig.h" #include #include #include "libqtest.h" +#ifndef WIN32 +#include +#endif #include "ui/dbus-display1.h" static GDBusConnection* @@ -82,6 +87,7 @@ typedef struct TestDBusConsoleRegister { GThread *thread; GDBusConnection *listener_conn; GDBusObjectManagerServer *server; + bool with_map; } TestDBusConsoleRegister; static gboolean listener_handle_scanout( @@ -94,13 +100,49 @@ static gboolean listener_handle_scanout( GVariant *arg_data, TestDBusConsoleRegister *test) { + if (!test->with_map) { + g_main_loop_quit(test->loop); + } + + return DBUS_METHOD_INVOCATION_HANDLED; +} + +#ifndef WIN32 +static gboolean listener_handle_scanout_map( + QemuDBusDisplay1ListenerUnixMap *object, + GDBusMethodInvocation *invocation, + GUnixFDList *fd_list, + GVariant *arg_handle, + guint arg_offset, + guint arg_width, + guint arg_height, + guint arg_stride, + guint arg_pixman_format, + TestDBusConsoleRegister *test) +{ + int fd = -1; + gint32 handle = g_variant_get_handle(arg_handle); + g_autoptr(GError) error = NULL; + void *addr = NULL; + size_t len = arg_height * arg_stride; + + g_assert_cmpuint(g_unix_fd_list_get_length(fd_list), ==, 1); + fd = g_unix_fd_list_get(fd_list, handle, &error); + g_assert_no_error(error); + + addr = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, arg_offset); + g_assert_no_errno(addr == MAP_FAILED ? -1 : 0); + g_assert_no_errno(munmap(addr, len)); + g_main_loop_quit(test->loop); + close(fd); return DBUS_METHOD_INVOCATION_HANDLED; } +#endif static void -test_dbus_console_setup_listener(TestDBusConsoleRegister *test) +test_dbus_console_setup_listener(TestDBusConsoleRegister *test, bool with_map) { g_autoptr(GDBusObjectSkeleton) listener = NULL; g_autoptr(QemuDBusDisplay1ListenerSkeleton) iface = NULL; @@ -114,6 +156,25 @@ test_dbus_console_setup_listener(TestDBusConsoleRegister *test) NULL); g_dbus_object_skeleton_add_interface(listener, G_DBUS_INTERFACE_SKELETON(iface)); + if (with_map) { +#ifdef WIN32 + g_test_skip("map test lacking on win32"); + return; +#else + g_autoptr(QemuDBusDisplay1ListenerUnixMapSkeleton) iface_map = + QEMU_DBUS_DISPLAY1_LISTENER_UNIX_MAP_SKELETON( + qemu_dbus_display1_listener_unix_map_skeleton_new()); + + g_object_connect(iface_map, + "signal::handle-scanout-map", listener_handle_scanout_map, test, + NULL); + g_dbus_object_skeleton_add_interface(listener, + G_DBUS_INTERFACE_SKELETON(iface_map)); + g_object_set(iface, "interfaces", + (const gchar *[]) { "org.qemu.Display1.Listener.Unix.Map", NULL }, + NULL); +#endif + } g_dbus_object_manager_server_export(test->server, listener); g_dbus_object_manager_server_set_connection(test->server, test->listener_conn); @@ -145,7 +206,7 @@ test_dbus_console_registered(GObject *source_object, g_assert_no_error(err); test->listener_conn = g_thread_join(test->thread); - test_dbus_console_setup_listener(test); + test_dbus_console_setup_listener(test, test->with_map); } static gpointer @@ -155,7 +216,7 @@ test_dbus_p2p_server_setup_thread(gpointer data) } static void -test_dbus_display_console(void) +test_dbus_display_console(const void* data) { g_autoptr(GError) err = NULL; g_autoptr(GDBusConnection) conn = NULL; @@ -163,7 +224,7 @@ test_dbus_display_console(void) g_autoptr(GMainLoop) loop = NULL; QTestState *qts = NULL; int pair[2]; - TestDBusConsoleRegister test = { 0, }; + TestDBusConsoleRegister test = { 0, .with_map = GPOINTER_TO_INT(data) }; #ifdef WIN32 WSAPROTOCOL_INFOW info; g_autoptr(GVariant) listener = NULL; @@ -299,7 +360,8 @@ main(int argc, char **argv) g_test_init(&argc, &argv, NULL); qtest_add_func("/dbus-display/vm", test_dbus_display_vm); - qtest_add_func("/dbus-display/console", test_dbus_display_console); + qtest_add_data_func("/dbus-display/console", GINT_TO_POINTER(false), test_dbus_display_console); + qtest_add_data_func("/dbus-display/console/map", GINT_TO_POINTER(true), test_dbus_display_console); qtest_add_func("/dbus-display/keyboard", test_dbus_display_keyboard); return g_test_run(); diff --git a/tests/qtest/device-introspect-test.c b/tests/qtest/device-introspect-test.c index 587da59623d..f84cec51dc2 100644 --- a/tests/qtest/device-introspect-test.c +++ b/tests/qtest/device-introspect-test.c @@ -18,9 +18,9 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" #include "libqtest.h" const char common_args[] = "-nodefaults -machine none"; diff --git a/tests/qtest/device-plug-test.c b/tests/qtest/device-plug-test.c index c6f33153eb4..2707ee59f63 100644 --- a/tests/qtest/device-plug-test.c +++ b/tests/qtest/device-plug-test.c @@ -12,17 +12,8 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" - -static void system_reset(QTestState *qtest) -{ - QDict *resp; - - resp = qtest_qmp(qtest, "{'execute': 'system_reset'}"); - g_assert(qdict_haskey(resp, "return")); - qobject_unref(resp); -} +#include "qobject/qdict.h" +#include "qobject/qstring.h" static void wait_device_deleted_event(QTestState *qtest, const char *id) { @@ -58,7 +49,7 @@ static void process_device_remove(QTestState *qtest, const char *id) * handled, removing the device. */ qtest_qmp_device_del_send(qtest, id); - system_reset(qtest); + qtest_system_reset_nowait(qtest); wait_device_deleted_event(qtest, id); } diff --git a/tests/qtest/dm163-test.c b/tests/qtest/dm163-test.c index 3161c9208d8..4c8e654af29 100644 --- a/tests/qtest/dm163-test.c +++ b/tests/qtest/dm163-test.c @@ -182,6 +182,8 @@ static void test_dm163_gpio_connection(void) g_assert_false(qtest_get_irq(qts, LAT_B)); g_assert_false(qtest_get_irq(qts, SELBK)); g_assert_false(qtest_get_irq(qts, RST_B)); + + qtest_quit(qts); } int main(int argc, char **argv) diff --git a/tests/qtest/drive_del-test.c b/tests/qtest/drive_del-test.c index 7b67a4bbee4..30d9451ddda 100644 --- a/tests/qtest/drive_del-test.c +++ b/tests/qtest/drive_del-test.c @@ -13,8 +13,8 @@ #include "qemu/osdep.h" #include "libqtest.h" #include "libqos/virtio.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" static const char *qvirtio_get_dev_type(void); @@ -154,15 +154,10 @@ static void device_add(QTestState *qts) static void device_del(QTestState *qts, bool and_reset) { - QDict *response; - qtest_qmp_device_del_send(qts, "dev0"); if (and_reset) { - response = qtest_qmp(qts, "{'execute': 'system_reset' }"); - g_assert(response); - g_assert(qdict_haskey(response, "return")); - qobject_unref(response); + qtest_system_reset_nowait(qts); } qtest_qmp_eventwait(qts, "DEVICE_DELETED"); diff --git a/tests/qtest/emc141x-test.c b/tests/qtest/emc141x-test.c index 8c866940916..a24103e2cd8 100644 --- a/tests/qtest/emc141x-test.c +++ b/tests/qtest/emc141x-test.c @@ -10,7 +10,7 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/sensor/emc141x_regs.h" #define EMC1414_TEST_ID "emc1414-test" diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c index 5e8fbda9dff..1b37a8a4d25 100644 --- a/tests/qtest/fdc-test.c +++ b/tests/qtest/fdc-test.c @@ -26,7 +26,7 @@ #include "libqtest-single.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define DRIVE_FLOPPY_BLANK \ "-drive if=floppy,file=null-co://,file.read-zeroes=on,format=raw,size=1440k" @@ -552,7 +552,7 @@ static bool qtest_check_clang_sanitizer(void) #ifdef QEMU_SANITIZE_ADDRESS return true; #else - g_test_skip("QEMU not configured using --enable-sanitizers"); + g_test_skip("QEMU not configured using --enable-asan"); return false; #endif } diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c index 9b9c9f9c36e..ca248a51a6c 100644 --- a/tests/qtest/fuzz/fuzz.c +++ b/tests/qtest/fuzz/fuzz.c @@ -17,9 +17,9 @@ #include "qemu/cutils.h" #include "qemu/datadir.h" -#include "sysemu/sysemu.h" -#include "sysemu/qtest.h" -#include "sysemu/runstate.h" +#include "system/system.h" +#include "system/qtest.h" +#include "system/runstate.h" #include "qemu/main-loop.h" #include "qemu/rcu.h" #include "tests/qtest/libqtest.h" @@ -41,6 +41,7 @@ static FuzzTargetList *fuzz_target_list; static FuzzTarget *fuzz_target; static QTestState *fuzz_qts; +int (*qemu_main)(void); void flush_events(QTestState *s) diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c index d107a496da6..f12080eda49 100644 --- a/tests/qtest/fuzz/generic_fuzz.c +++ b/tests/qtest/fuzz/generic_fuzz.c @@ -20,8 +20,8 @@ #include "tests/qtest/libqos/pci-pc.h" #include "fuzz.h" #include "string.h" -#include "exec/memory.h" -#include "exec/ramblock.h" +#include "system/memory.h" +#include "system/ramblock.h" #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "hw/pci/pci_device.h" @@ -572,7 +572,6 @@ static void op_add_dma_pattern(QTestState *s, pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)}; p.index = a.index % p.len; g_array_append_val(dma_patterns, p); - return; } static void op_clear_dma_patterns(QTestState *s, diff --git a/tests/qtest/fuzz/qos_fuzz.c b/tests/qtest/fuzz/qos_fuzz.c index d3839bf9994..9afe8bf6d8b 100644 --- a/tests/qtest/fuzz/qos_fuzz.c +++ b/tests/qtest/fuzz/qos_fuzz.c @@ -19,7 +19,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/main-loop.h" #include "tests/qtest/libqtest.h" diff --git a/tests/qtest/fuzz/qtest_wrappers.c b/tests/qtest/fuzz/qtest_wrappers.c index 0580f8df860..d7adcbe3fd2 100644 --- a/tests/qtest/fuzz/qtest_wrappers.c +++ b/tests/qtest/fuzz/qtest_wrappers.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "hw/core/cpu.h" -#include "exec/ioport.h" +#include "system/ioport.h" #include "fuzz.h" diff --git a/tests/qtest/fw_cfg-test.c b/tests/qtest/fw_cfg-test.c index 5dc807ba238..e48b34afa59 100644 --- a/tests/qtest/fw_cfg-test.c +++ b/tests/qtest/fw_cfg-test.c @@ -243,12 +243,6 @@ int main(int argc, char **argv) qtest_add_func("fw_cfg/ram_size", test_fw_cfg_ram_size); qtest_add_func("fw_cfg/nographic", test_fw_cfg_nographic); qtest_add_func("fw_cfg/nb_cpus", test_fw_cfg_nb_cpus); -#if 0 - qtest_add_func("fw_cfg/machine_id", test_fw_cfg_machine_id); - qtest_add_func("fw_cfg/kernel", test_fw_cfg_kernel); - qtest_add_func("fw_cfg/initrd", test_fw_cfg_initrd); - qtest_add_func("fw_cfg/boot_device", test_fw_cfg_boot_device); -#endif qtest_add_func("fw_cfg/max_cpus", test_fw_cfg_max_cpus); qtest_add_func("fw_cfg/numa", test_fw_cfg_numa); qtest_add_func("fw_cfg/boot_menu", test_fw_cfg_boot_menu); diff --git a/tests/qtest/hd-geo-test.c b/tests/qtest/hd-geo-test.c index d08bffad91d..41481a5e09b 100644 --- a/tests/qtest/hd-geo-test.c +++ b/tests/qtest/hd-geo-test.c @@ -17,7 +17,7 @@ #include "qemu/osdep.h" #include "qemu/bswap.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "libqtest.h" #include "libqos/fw_cfg.h" #include "libqos/libqos.h" @@ -900,7 +900,6 @@ static void test_override_hot_unplug(TestArgs *args, const char *devid, QTestState *qts; char *joined_args; QFWCFG *fw_cfg; - QDict *response; int i; joined_args = g_strjoinv(" ", args->argv); @@ -913,13 +912,7 @@ static void test_override_hot_unplug(TestArgs *args, const char *devid, /* unplug device an restart */ qtest_qmp_device_del_send(qts, devid); - response = qtest_qmp(qts, - "{ 'execute': 'system_reset', 'arguments': { }}"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); - - qtest_qmp_eventwait(qts, "RESET"); + qtest_system_reset(qts); read_bootdevices(fw_cfg, expected2); @@ -1074,17 +1067,26 @@ int main(int argc, char **argv) } } - qtest_add_func("hd-geo/ide/none", test_ide_none); - qtest_add_func("hd-geo/ide/drive/mbr/blank", test_ide_drive_mbr_blank); - qtest_add_func("hd-geo/ide/drive/mbr/lba", test_ide_drive_mbr_lba); - qtest_add_func("hd-geo/ide/drive/mbr/chs", test_ide_drive_mbr_chs); - qtest_add_func("hd-geo/ide/drive/cd_0", test_ide_drive_cd_0); - qtest_add_func("hd-geo/ide/device/mbr/blank", test_ide_device_mbr_blank); - qtest_add_func("hd-geo/ide/device/mbr/lba", test_ide_device_mbr_lba); - qtest_add_func("hd-geo/ide/device/mbr/chs", test_ide_device_mbr_chs); - qtest_add_func("hd-geo/ide/device/user/chs", test_ide_device_user_chs); - qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst); - if (have_qemu_img()) { + if (qtest_has_machine("pc")) { + qtest_add_func("hd-geo/ide/none", test_ide_none); + qtest_add_func("hd-geo/ide/drive/mbr/blank", test_ide_drive_mbr_blank); + qtest_add_func("hd-geo/ide/drive/mbr/lba", test_ide_drive_mbr_lba); + qtest_add_func("hd-geo/ide/drive/mbr/chs", test_ide_drive_mbr_chs); + qtest_add_func("hd-geo/ide/drive/cd_0", test_ide_drive_cd_0); + qtest_add_func("hd-geo/ide/device/mbr/blank", test_ide_device_mbr_blank); + qtest_add_func("hd-geo/ide/device/mbr/lba", test_ide_device_mbr_lba); + qtest_add_func("hd-geo/ide/device/mbr/chs", test_ide_device_mbr_chs); + qtest_add_func("hd-geo/ide/device/user/chs", test_ide_device_user_chs); + qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst); + } + + if (!have_qemu_img()) { + g_test_message("QTEST_QEMU_IMG not set or qemu-img missing; " + "skipping hd-geo/override/* tests"); + goto test_add_done; + } + + if (qtest_has_machine("pc")) { qtest_add_func("hd-geo/override/ide", test_override_ide); if (qtest_has_device("lsi53c895a")) { qtest_add_func("hd-geo/override/scsi", test_override_scsi); @@ -1104,30 +1106,26 @@ int main(int argc, char **argv) qtest_add_func("hd-geo/override/virtio_blk", test_override_virtio_blk); } + } - if (qtest_has_machine("q35")) { - qtest_add_func("hd-geo/override/sata", test_override_sata); - qtest_add_func("hd-geo/override/zero_chs_q35", - test_override_zero_chs_q35); - if (qtest_has_device("lsi53c895a")) { - qtest_add_func("hd-geo/override/scsi_q35", - test_override_scsi_q35); - } - if (qtest_has_device("virtio-scsi-pci")) { - qtest_add_func("hd-geo/override/scsi_hot_unplug_q35", - test_override_scsi_hot_unplug_q35); - } - if (qtest_has_device("virtio-blk-pci")) { - qtest_add_func("hd-geo/override/virtio_hot_unplug_q35", - test_override_virtio_hot_unplug_q35); - qtest_add_func("hd-geo/override/virtio_blk_q35", - test_override_virtio_blk_q35); - } - + if (qtest_has_machine("q35")) { + qtest_add_func("hd-geo/override/sata", test_override_sata); + qtest_add_func("hd-geo/override/zero_chs_q35", + test_override_zero_chs_q35); + if (qtest_has_device("lsi53c895a")) { + qtest_add_func("hd-geo/override/scsi_q35", + test_override_scsi_q35); + } + if (qtest_has_device("virtio-scsi-pci")) { + qtest_add_func("hd-geo/override/scsi_hot_unplug_q35", + test_override_scsi_hot_unplug_q35); + } + if (qtest_has_device("virtio-blk-pci")) { + qtest_add_func("hd-geo/override/virtio_hot_unplug_q35", + test_override_virtio_hot_unplug_q35); + qtest_add_func("hd-geo/override/virtio_blk_q35", + test_override_virtio_blk_q35); } - } else { - g_test_message("QTEST_QEMU_IMG not set or qemu-img missing; " - "skipping hd-geo/override/* tests"); } test_add_done: diff --git a/tests/qtest/ide-test.c b/tests/qtest/ide-test.c index 90ba6b298bd..ceee444a9ec 100644 --- a/tests/qtest/ide-test.c +++ b/tests/qtest/ide-test.c @@ -29,7 +29,7 @@ #include "libqos/libqos.h" #include "libqos/pci-pc.h" #include "libqos/malloc-pc.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/bswap.h" #include "hw/pci/pci_ids.h" #include "hw/pci/pci_regs.h" diff --git a/tests/qtest/intel-iommu-test.c b/tests/qtest/intel-iommu-test.c new file mode 100644 index 00000000000..c521b3796eb --- /dev/null +++ b/tests/qtest/intel-iommu-test.c @@ -0,0 +1,64 @@ +/* + * QTest testcase for intel-iommu + * + * Copyright (c) 2024 Intel, Inc. + * + * Author: Zhenzhong Duan + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "hw/i386/intel_iommu_internal.h" + +#define CAP_STAGE_1_FIXED1 (VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | \ + VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS) +#define ECAP_STAGE_1_FIXED1 (VTD_ECAP_QI | VTD_ECAP_IR | VTD_ECAP_IRO | \ + VTD_ECAP_MHMV | VTD_ECAP_SMTS | VTD_ECAP_FLTS) + +static inline uint64_t vtd_reg_readq(QTestState *s, uint64_t offset) +{ + return qtest_readq(s, Q35_HOST_BRIDGE_IOMMU_ADDR + offset); +} + +static void test_intel_iommu_stage_1(void) +{ + uint8_t init_csr[DMAR_REG_SIZE]; /* register values */ + uint8_t post_reset_csr[DMAR_REG_SIZE]; /* register values */ + uint64_t cap, ecap, tmp; + QTestState *s; + + s = qtest_init("-M q35 -device intel-iommu,x-scalable-mode=on,x-flts=on"); + + cap = vtd_reg_readq(s, DMAR_CAP_REG); + g_assert((cap & CAP_STAGE_1_FIXED1) == CAP_STAGE_1_FIXED1); + + tmp = cap & VTD_CAP_SAGAW_MASK; + g_assert(tmp == (VTD_CAP_SAGAW_39bit | VTD_CAP_SAGAW_48bit)); + + tmp = VTD_MGAW_FROM_CAP(cap); + g_assert(tmp == VTD_HOST_AW_48BIT - 1); + + ecap = vtd_reg_readq(s, DMAR_ECAP_REG); + g_assert((ecap & ECAP_STAGE_1_FIXED1) == ECAP_STAGE_1_FIXED1); + + qtest_memread(s, Q35_HOST_BRIDGE_IOMMU_ADDR, init_csr, DMAR_REG_SIZE); + + qobject_unref(qtest_qmp(s, "{ 'execute': 'system_reset' }")); + qtest_qmp_eventwait(s, "RESET"); + + qtest_memread(s, Q35_HOST_BRIDGE_IOMMU_ADDR, post_reset_csr, DMAR_REG_SIZE); + /* Ensure registers are consistent after hard reset */ + g_assert(!memcmp(init_csr, post_reset_csr, DMAR_REG_SIZE)); + + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + qtest_add_func("/q35/intel-iommu/stage-1", test_intel_iommu_stage_1); + + return g_test_run(); +} diff --git a/tests/qtest/ipmi-bt-test.c b/tests/qtest/ipmi-bt-test.c index 383239bcd48..637732fd5aa 100644 --- a/tests/qtest/ipmi-bt-test.c +++ b/tests/qtest/ipmi-bt-test.c @@ -251,7 +251,7 @@ static void emu_msg_handler(void) msg[msg_len++] = 0xa0; write_emu_msg(msg, msg_len); } else { - g_assert(0); + g_assert_not_reached(); } } @@ -411,7 +411,7 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); global_qtest = qtest_initf( - " -chardev socket,id=ipmi0,host=127.0.0.1,port=%d,reconnect=10" + " -chardev socket,id=ipmi0,host=127.0.0.1,port=%d,reconnect-ms=10000" " -device ipmi-bmc-extern,chardev=ipmi0,id=bmc0" " -device isa-ipmi-bt,bmc=bmc0", emu_port); qtest_irq_intercept_in(global_qtest, "ioapic"); diff --git a/tests/qtest/ipmi-kcs-test.c b/tests/qtest/ipmi-kcs-test.c index afc24dd3e46..3186c6ad64b 100644 --- a/tests/qtest/ipmi-kcs-test.c +++ b/tests/qtest/ipmi-kcs-test.c @@ -145,7 +145,7 @@ static void kcs_cmd(uint8_t *cmd, unsigned int cmd_len, break; default: - g_assert(0); + g_assert_not_reached(); } *rsp_len = j; } @@ -184,7 +184,7 @@ static void kcs_abort(uint8_t *cmd, unsigned int cmd_len, break; default: - g_assert(0); + g_assert_not_reached(); } /* Start the abort here */ diff --git a/tests/qtest/isl_pmbus_vr-test.c b/tests/qtest/isl_pmbus_vr-test.c index 5553ea410ac..1ff840c6b74 100644 --- a/tests/qtest/isl_pmbus_vr-test.c +++ b/tests/qtest/isl_pmbus_vr-test.c @@ -21,8 +21,8 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/bitops.h" #define TEST_ID "isl_pmbus_vr-test" diff --git a/tests/qtest/libqmp.c b/tests/qtest/libqmp.c index a89cab03c3b..16fe546885a 100644 --- a/tests/qtest/libqmp.c +++ b/tests/qtest/libqmp.c @@ -25,8 +25,8 @@ #include "qemu/cutils.h" #include "qemu/sockets.h" #include "qapi/error.h" -#include "qapi/qmp/json-parser.h" -#include "qapi/qmp/qjson.h" +#include "qobject/json-parser.h" +#include "qobject/qjson.h" #define SOCKET_MAX_FDS 16 diff --git a/tests/qtest/libqmp.h b/tests/qtest/libqmp.h index 3445b753ff4..4a931c93ab2 100644 --- a/tests/qtest/libqmp.h +++ b/tests/qtest/libqmp.h @@ -18,7 +18,7 @@ #ifndef LIBQMP_H #define LIBQMP_H -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" QDict *qmp_fd_receive(int fd); #ifndef _WIN32 diff --git a/tests/qtest/libqos/arm-imx25-pdk-machine.c b/tests/qtest/libqos/arm-imx25-pdk-machine.c index 8fe128fae86..2d8b7543439 100644 --- a/tests/qtest/libqos/arm-imx25-pdk-machine.c +++ b/tests/qtest/libqos/arm-imx25-pdk-machine.c @@ -23,6 +23,7 @@ #include "libqos-malloc.h" #include "qgraph.h" #include "i2c.h" +#include "hw/i2c/imx_i2c.h" #define ARM_PAGE_SIZE 4096 #define IMX25_PDK_RAM_START 0x80000000 @@ -50,7 +51,7 @@ static void *imx25_pdk_get_driver(void *object, const char *interface) static QOSGraphObject *imx25_pdk_get_device(void *obj, const char *device) { QIMX25PDKMachine *machine = obj; - if (!g_strcmp0(device, "imx.i2c")) { + if (!g_strcmp0(device, TYPE_IMX_I2C)) { return &machine->i2c_1.obj; } @@ -86,7 +87,7 @@ static void imx25_pdk_register_nodes(void) .extra_device_opts = "bus=i2c-bus.0" }; qos_node_create_machine("arm/imx25-pdk", qos_create_machine_arm_imx25_pdk); - qos_node_contains("arm/imx25-pdk", "imx.i2c", &edge, NULL); + qos_node_contains("arm/imx25-pdk", TYPE_IMX_I2C, &edge, NULL); } libqos_init(imx25_pdk_register_nodes); diff --git a/tests/qtest/libqos/arm-n800-machine.c b/tests/qtest/libqos/arm-n800-machine.c deleted file mode 100644 index 4e5afe0164b..00000000000 --- a/tests/qtest/libqos/arm-n800-machine.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * libqos driver framework - * - * Copyright (c) 2019 Red Hat, Inc. - * - * Author: Paolo Bonzini - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1 as published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see - */ - -#include "qemu/osdep.h" -#include "../libqtest.h" -#include "libqos-malloc.h" -#include "qgraph.h" -#include "i2c.h" - -#define ARM_PAGE_SIZE 4096 -#define N800_RAM_START 0x80000000 -#define N800_RAM_END 0x88000000 - -typedef struct QN800Machine QN800Machine; - -struct QN800Machine { - QOSGraphObject obj; - QGuestAllocator alloc; - OMAPI2C i2c_1; -}; - -static void *n800_get_driver(void *object, const char *interface) -{ - QN800Machine *machine = object; - if (!g_strcmp0(interface, "memory")) { - return &machine->alloc; - } - - fprintf(stderr, "%s not present in arm/n800\n", interface); - g_assert_not_reached(); -} - -static QOSGraphObject *n800_get_device(void *obj, const char *device) -{ - QN800Machine *machine = obj; - if (!g_strcmp0(device, "omap_i2c")) { - return &machine->i2c_1.obj; - } - - fprintf(stderr, "%s not present in arm/n800\n", device); - g_assert_not_reached(); -} - -static void n800_destructor(QOSGraphObject *obj) -{ - QN800Machine *machine = (QN800Machine *) obj; - alloc_destroy(&machine->alloc); -} - -static void *qos_create_machine_arm_n800(QTestState *qts) -{ - QN800Machine *machine = g_new0(QN800Machine, 1); - - alloc_init(&machine->alloc, 0, - N800_RAM_START, - N800_RAM_END, - ARM_PAGE_SIZE); - machine->obj.get_device = n800_get_device; - machine->obj.get_driver = n800_get_driver; - machine->obj.destructor = n800_destructor; - - omap_i2c_init(&machine->i2c_1, qts, 0x48070000); - return &machine->obj; -} - -static void n800_register_nodes(void) -{ - QOSGraphEdgeOptions edge = { - .extra_device_opts = "bus=i2c-bus.0" - }; - qos_node_create_machine("arm/n800", qos_create_machine_arm_n800); - qos_node_contains("arm/n800", "omap_i2c", &edge, NULL); -} - -libqos_init(n800_register_nodes); diff --git a/tests/qtest/libqos/fw_cfg.c b/tests/qtest/libqos/fw_cfg.c index 89f053ccacb..0ab3959171b 100644 --- a/tests/qtest/libqos/fw_cfg.c +++ b/tests/qtest/libqos/fw_cfg.c @@ -14,6 +14,8 @@ #include "qemu/osdep.h" #include "fw_cfg.h" +#include "malloc-pc.h" +#include "libqos-malloc.h" #include "../libqtest.h" #include "qemu/bswap.h" #include "hw/nvram/fw_cfg.h" @@ -60,6 +62,91 @@ static void mm_fw_cfg_select(QFWCFG *fw_cfg, uint16_t key) qtest_writew(fw_cfg->qts, fw_cfg->base, key); } +static void qfw_cfg_dma_transfer(QFWCFG *fw_cfg, QOSState *qs, void *address, + uint32_t length, uint32_t control) +{ + FWCfgDmaAccess access; + uint32_t addr; + uint64_t guest_access_addr; + uint64_t gaddr; + + /* create a data buffer in guest memory */ + gaddr = guest_alloc(&qs->alloc, length); + + if (control & FW_CFG_DMA_CTL_WRITE) { + qtest_bufwrite(fw_cfg->qts, gaddr, address, length); + } + access.address = cpu_to_be64(gaddr); + access.length = cpu_to_be32(length); + access.control = cpu_to_be32(control); + + /* now create a separate buffer in guest memory for 'access' */ + guest_access_addr = guest_alloc(&qs->alloc, sizeof(access)); + qtest_bufwrite(fw_cfg->qts, guest_access_addr, &access, sizeof(access)); + + /* write lower 32 bits of address */ + addr = cpu_to_be32((uint32_t)(uintptr_t)guest_access_addr); + qtest_outl(fw_cfg->qts, fw_cfg->base + 8, addr); + + /* write upper 32 bits of address */ + addr = cpu_to_be32((uint32_t)(uintptr_t)(guest_access_addr >> 32)); + qtest_outl(fw_cfg->qts, fw_cfg->base + 4, addr); + + g_assert(!(be32_to_cpu(access.control) & FW_CFG_DMA_CTL_ERROR)); + + if (control & FW_CFG_DMA_CTL_READ) { + qtest_bufread(fw_cfg->qts, gaddr, address, length); + } + + guest_free(&qs->alloc, guest_access_addr); + guest_free(&qs->alloc, gaddr); +} + +static void qfw_cfg_write_entry(QFWCFG *fw_cfg, QOSState *qs, uint16_t key, + void *buf, uint32_t len) +{ + qfw_cfg_select(fw_cfg, key); + qfw_cfg_dma_transfer(fw_cfg, qs, buf, len, FW_CFG_DMA_CTL_WRITE); +} + +static void qfw_cfg_read_entry(QFWCFG *fw_cfg, QOSState *qs, uint16_t key, + void *buf, uint32_t len) +{ + qfw_cfg_select(fw_cfg, key); + qfw_cfg_dma_transfer(fw_cfg, qs, buf, len, FW_CFG_DMA_CTL_READ); +} + +static bool find_pdir_entry(QFWCFG *fw_cfg, const char *filename, + uint16_t *sel, uint32_t *size) +{ + g_autofree unsigned char *filesbuf = NULL; + uint32_t count; + size_t dsize; + FWCfgFile *pdir_entry; + uint32_t i; + bool found = false; + + *size = 0; + *sel = 0; + + qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, &count, sizeof(count)); + count = be32_to_cpu(count); + dsize = sizeof(uint32_t) + count * sizeof(struct fw_cfg_file); + filesbuf = g_malloc(dsize); + qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, filesbuf, dsize); + pdir_entry = (FWCfgFile *)(filesbuf + sizeof(uint32_t)); + for (i = 0; i < count; ++i, ++pdir_entry) { + if (!strcmp(pdir_entry->name, filename)) { + *size = be32_to_cpu(pdir_entry->size); + *sel = be16_to_cpu(pdir_entry->select); + found = true; + break; + } + } + + return found; +} + /* * The caller need check the return value. When the return value is * nonzero, it means that some bytes have been transferred. @@ -73,37 +160,106 @@ static void mm_fw_cfg_select(QFWCFG *fw_cfg, uint16_t key) * populated, it has received only a starting slice of the fw_cfg file. */ size_t qfw_cfg_get_file(QFWCFG *fw_cfg, const char *filename, - void *data, size_t buflen) + void *data, size_t buflen) { - uint32_t count; - uint32_t i; - unsigned char *filesbuf = NULL; - size_t dsize; - FWCfgFile *pdir_entry; size_t filesize = 0; + uint32_t len; + uint16_t sel; - qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, &count, sizeof(count)); - count = be32_to_cpu(count); - dsize = sizeof(uint32_t) + count * sizeof(struct fw_cfg_file); - filesbuf = g_malloc(dsize); - qfw_cfg_get(fw_cfg, FW_CFG_FILE_DIR, filesbuf, dsize); - pdir_entry = (FWCfgFile *)(filesbuf + sizeof(uint32_t)); - for (i = 0; i < count; ++i, ++pdir_entry) { - if (!strcmp(pdir_entry->name, filename)) { - uint32_t len = be32_to_cpu(pdir_entry->size); - uint16_t sel = be16_to_cpu(pdir_entry->select); - filesize = len; - if (len > buflen) { - len = buflen; - } - qfw_cfg_get(fw_cfg, sel, data, len); - break; + if (find_pdir_entry(fw_cfg, filename, &sel, &len)) { + filesize = len; + if (len > buflen) { + len = buflen; } + qfw_cfg_get(fw_cfg, sel, data, len); } - g_free(filesbuf); + return filesize; } +/* + * The caller need check the return value. When the return value is + * nonzero, it means that some bytes have been transferred. + * + * If the fw_cfg file in question is smaller than the allocated & passed-in + * buffer, then the first len bytes were read. + * + * If the fw_cfg file in question is larger than the passed-in + * buffer, then the return value explains how much was actually read. + * + * It is illegal to call this function if fw_cfg does not support DMA + * interface. The caller should ensure that DMA is supported before + * calling this function. + * + * Passed QOSState pointer qs must be initialized. qs->alloc must also be + * properly initialized. + */ +size_t qfw_cfg_read_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename, + void *data, size_t buflen) +{ + uint32_t len = 0; + uint16_t sel; + uint32_t id; + + g_assert(qs); + g_assert(filename); + g_assert(data); + g_assert(buflen); + /* check if DMA is supported since we use DMA for read */ + id = qfw_cfg_get_u32(fw_cfg, FW_CFG_ID); + g_assert(id & FW_CFG_VERSION_DMA); + + if (find_pdir_entry(fw_cfg, filename, &sel, &len)) { + if (len > buflen) { + len = buflen; + } + qfw_cfg_read_entry(fw_cfg, qs, sel, data, len); + } + + return len; +} + +/* + * The caller need check the return value. When the return value is + * nonzero, it means that some bytes have been transferred. + * + * If the fw_cfg file in question is smaller than the allocated & passed-in + * buffer, then the buffer has been partially written. + * + * If the fw_cfg file in question is larger than the passed-in + * buffer, then the return value explains how much was actually written. + * + * It is illegal to call this function if fw_cfg does not support DMA + * interface. The caller should ensure that DMA is supported before + * calling this function. + * + * Passed QOSState pointer qs must be initialized. qs->alloc must also be + * properly initialized. + */ +size_t qfw_cfg_write_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename, + void *data, size_t buflen) +{ + uint32_t len = 0; + uint16_t sel; + uint32_t id; + + g_assert(qs); + g_assert(filename); + g_assert(data); + g_assert(buflen); + /* write operation is only valid if DMA is supported */ + id = qfw_cfg_get_u32(fw_cfg, FW_CFG_ID); + g_assert(id & FW_CFG_VERSION_DMA); + + if (find_pdir_entry(fw_cfg, filename, &sel, &len)) { + if (len > buflen) { + len = buflen; + } + qfw_cfg_write_entry(fw_cfg, qs, sel, data, len); + } + return len; +} + static void mm_fw_cfg_read(QFWCFG *fw_cfg, void *data, size_t len) { uint8_t *ptr = data; diff --git a/tests/qtest/libqos/fw_cfg.h b/tests/qtest/libqos/fw_cfg.h index b0456a15df1..6d6ff09725d 100644 --- a/tests/qtest/libqos/fw_cfg.h +++ b/tests/qtest/libqos/fw_cfg.h @@ -14,6 +14,7 @@ #define LIBQOS_FW_CFG_H #include "../libqtest.h" +#include "libqos.h" typedef struct QFWCFG QFWCFG; @@ -33,7 +34,10 @@ uint32_t qfw_cfg_get_u32(QFWCFG *fw_cfg, uint16_t key); uint64_t qfw_cfg_get_u64(QFWCFG *fw_cfg, uint16_t key); size_t qfw_cfg_get_file(QFWCFG *fw_cfg, const char *filename, void *data, size_t buflen); - +size_t qfw_cfg_write_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename, + void *data, size_t buflen); +size_t qfw_cfg_read_file(QFWCFG *fw_cfg, QOSState *qs, const char *filename, + void *data, size_t buflen); QFWCFG *mm_fw_cfg_init(QTestState *qts, uint64_t base); void mm_fw_cfg_uninit(QFWCFG *fw_cfg); QFWCFG *io_fw_cfg_init(QTestState *qts, uint16_t base); diff --git a/tests/qtest/libqos/generic-pcihost.c b/tests/qtest/libqos/generic-pcihost.c index 3124b0e46b2..4bbeb5ff508 100644 --- a/tests/qtest/libqos/generic-pcihost.c +++ b/tests/qtest/libqos/generic-pcihost.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "../libqtest.h" #include "generic-pcihost.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/pci/pci_regs.h" #include "qemu/host-utils.h" diff --git a/tests/qtest/libqos/i2c-imx.c b/tests/qtest/libqos/i2c-imx.c index 710cb926d62..6d868e4cc4d 100644 --- a/tests/qtest/libqos/i2c-imx.c +++ b/tests/qtest/libqos/i2c-imx.c @@ -209,8 +209,8 @@ void imx_i2c_init(IMXI2C *s, QTestState *qts, uint64_t addr) static void imx_i2c_register_nodes(void) { - qos_node_create_driver("imx.i2c", NULL); - qos_node_produces("imx.i2c", "i2c-bus"); + qos_node_create_driver(TYPE_IMX_I2C, NULL); + qos_node_produces(TYPE_IMX_I2C, "i2c-bus"); } libqos_init(imx_i2c_register_nodes); diff --git a/tests/qtest/libqos/igb.c b/tests/qtest/libqos/igb.c index f40c4ec4cd2..ab3ef6f0c3b 100644 --- a/tests/qtest/libqos/igb.c +++ b/tests/qtest/libqos/igb.c @@ -104,10 +104,10 @@ static void igb_pci_start_hw(QOSGraphObject *obj) e1000e_macreg_write(&d->e1000e, E1000_RDT(0), 0); e1000e_macreg_write(&d->e1000e, E1000_RDH(0), 0); e1000e_macreg_write(&d->e1000e, E1000_RA, - le32_to_cpu(*(uint32_t *)address)); + ldl_le_p(address)); e1000e_macreg_write(&d->e1000e, E1000_RA + 4, E1000_RAH_AV | E1000_RAH_POOL_1 | - le16_to_cpu(*(uint16_t *)(address + 4))); + lduw_le_p(address + 4)); /* Set supported receive descriptor mode */ e1000e_macreg_write(&d->e1000e, diff --git a/tests/qtest/libqos/libqos-malloc.c b/tests/qtest/libqos/libqos-malloc.c index d7566972c40..c90f8f03f4f 100644 --- a/tests/qtest/libqos/libqos-malloc.c +++ b/tests/qtest/libqos/libqos-malloc.c @@ -342,5 +342,4 @@ void migrate_allocator(QGuestAllocator *src, QTAILQ_INIT(src->free); node = mlist_new(src->start, src->end - src->start); QTAILQ_INSERT_HEAD(src->free, node, MLIST_ENTNAME); - return; } diff --git a/tests/qtest/libqos/libqos.c b/tests/qtest/libqos/libqos.c index 5c0fa1f7c57..9b49d0d4ddc 100644 --- a/tests/qtest/libqos/libqos.c +++ b/tests/qtest/libqos/libqos.c @@ -2,7 +2,7 @@ #include "../libqtest.h" #include "libqos.h" #include "pci.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" /*** Test Setup & Teardown ***/ @@ -117,13 +117,14 @@ void migrate(QOSState *from, QOSState *to, const char *uri) g_assert(qdict_haskey(sub, "status")); st = qdict_get_str(sub, "status"); - /* "setup", "active", "completed", "failed", "cancelled" */ + /* "setup", "active", "device", "completed", "failed", "cancelled" */ if (strcmp(st, "completed") == 0) { qobject_unref(rsp); break; } if ((strcmp(st, "setup") == 0) || (strcmp(st, "active") == 0) + || (strcmp(st, "device") == 0) || (strcmp(st, "wait-unplug") == 0)) { qobject_unref(rsp); g_usleep(5000); diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build index 1b2b2dbb22e..1ddaf7b095b 100644 --- a/tests/qtest/libqos/meson.build +++ b/tests/qtest/libqos/meson.build @@ -32,7 +32,6 @@ libqos_srcs = files( 'i2c-omap.c', 'igb.c', 'sdhci.c', - 'tpci200.c', 'virtio.c', 'virtio-balloon.c', 'virtio-blk.c', @@ -52,7 +51,6 @@ libqos_srcs = files( # qgraph machines: 'aarch64-xlnx-zcu102-machine.c', 'arm-imx25-pdk-machine.c', - 'arm-n800-machine.c', 'arm-raspi2-machine.c', 'arm-sabrelite-machine.c', 'arm-smdkc210-machine.c', @@ -68,6 +66,13 @@ if have_virtfs libqos_srcs += files('virtio-9p.c', 'virtio-9p-client.c') endif +if config_all_devices.has_key('CONFIG_RISCV_IOMMU') + libqos_srcs += files('riscv-iommu.c') +endif +if config_all_devices.has_key('CONFIG_TPCI200') + libqos_srcs += files('tpci200.c') +endif + libqos = static_library('qos', libqos_srcs + genh, build_by_default: false) diff --git a/tests/qtest/libqos/pci-pc.c b/tests/qtest/libqos/pci-pc.c index 96046287ac6..147009f4f44 100644 --- a/tests/qtest/libqos/pci-pc.c +++ b/tests/qtest/libqos/pci-pc.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "../libqtest.h" #include "pci-pc.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/pci/pci_regs.h" #include "qemu/module.h" diff --git a/tests/qtest/libqos/pci.c b/tests/qtest/libqos/pci.c index b23d72346b6..a59197b9922 100644 --- a/tests/qtest/libqos/pci.c +++ b/tests/qtest/libqos/pci.c @@ -328,8 +328,6 @@ bool qpci_msix_pending(QPCIDevice *dev, uint16_t entry) g_assert(dev->msix_enabled); pba_entry = qpci_io_readl(dev, dev->msix_pba_bar, dev->msix_pba_off + off); - qpci_io_writel(dev, dev->msix_pba_bar, dev->msix_pba_off + off, - pba_entry & ~(1 << bit_n)); return (pba_entry & (1 << bit_n)) != 0; } diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h index 1b5de02e7be..81fbfdd0e27 100644 --- a/tests/qtest/libqos/qgraph.h +++ b/tests/qtest/libqos/qgraph.h @@ -355,7 +355,7 @@ void qos_object_start_hw(QOSGraphObject *obj); QOSGraphObject *qos_machine_new(QOSGraphNode *node, QTestState *qts); /** - * qos_machine_new(): instantiate a new driver node + * qos_driver_new(): instantiate a new driver node * @node: A driver node to be instantiated * @parent: A #QOSGraphObject to be consumed by the new driver node * @alloc: An allocator to be used by the new driver node. diff --git a/tests/qtest/libqos/qos_external.c b/tests/qtest/libqos/qos_external.c index c6bb8bff09e..493ab747de9 100644 --- a/tests/qtest/libqos/qos_external.c +++ b/tests/qtest/libqos/qos_external.c @@ -19,11 +19,11 @@ #include "qemu/osdep.h" #include #include "../libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qbool.h" +#include "qobject/qstring.h" #include "qemu/module.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qlist.h" #include "libqos-malloc.h" #include "qgraph.h" #include "qgraph_internal.h" diff --git a/tests/qtest/libqos/riscv-iommu.c b/tests/qtest/libqos/riscv-iommu.c new file mode 100644 index 00000000000..01e3b31c0b7 --- /dev/null +++ b/tests/qtest/libqos/riscv-iommu.c @@ -0,0 +1,76 @@ +/* + * libqos driver riscv-iommu-pci framework + * + * Copyright (c) 2024 Ventana Micro Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "../libqtest.h" +#include "qemu/module.h" +#include "qgraph.h" +#include "pci.h" +#include "riscv-iommu.h" + +static void *riscv_iommu_pci_get_driver(void *obj, const char *interface) +{ + QRISCVIOMMU *r_iommu_pci = obj; + + if (!g_strcmp0(interface, "pci-device")) { + return &r_iommu_pci->dev; + } + + fprintf(stderr, "%s not present in riscv_iommu_pci\n", interface); + g_assert_not_reached(); +} + +static void riscv_iommu_pci_start_hw(QOSGraphObject *obj) +{ + QRISCVIOMMU *pci = (QRISCVIOMMU *)obj; + qpci_device_enable(&pci->dev); +} + +static void riscv_iommu_pci_destructor(QOSGraphObject *obj) +{ + QRISCVIOMMU *pci = (QRISCVIOMMU *)obj; + qpci_iounmap(&pci->dev, pci->reg_bar); +} + +static void *riscv_iommu_pci_create(void *pci_bus, QGuestAllocator *alloc, + void *addr) +{ + QRISCVIOMMU *r_iommu_pci = g_new0(QRISCVIOMMU, 1); + QPCIBus *bus = pci_bus; + + qpci_device_init(&r_iommu_pci->dev, bus, addr); + r_iommu_pci->reg_bar = qpci_iomap(&r_iommu_pci->dev, 0, NULL); + + r_iommu_pci->obj.get_driver = riscv_iommu_pci_get_driver; + r_iommu_pci->obj.start_hw = riscv_iommu_pci_start_hw; + r_iommu_pci->obj.destructor = riscv_iommu_pci_destructor; + return &r_iommu_pci->obj; +} + +static void riscv_iommu_pci_register_nodes(void) +{ + QPCIAddress addr = { + .vendor_id = RISCV_IOMMU_PCI_VENDOR_ID, + .device_id = RISCV_IOMMU_PCI_DEVICE_ID, + .devfn = QPCI_DEVFN(1, 0), + }; + + QOSGraphEdgeOptions opts = { + .extra_device_opts = "addr=01.0", + }; + + add_qpci_address(&opts, &addr); + + qos_node_create_driver("riscv-iommu-pci", riscv_iommu_pci_create); + qos_node_produces("riscv-iommu-pci", "pci-device"); + qos_node_consumes("riscv-iommu-pci", "pci-bus", &opts); +} + +libqos_init(riscv_iommu_pci_register_nodes); diff --git a/tests/qtest/libqos/riscv-iommu.h b/tests/qtest/libqos/riscv-iommu.h new file mode 100644 index 00000000000..318db137999 --- /dev/null +++ b/tests/qtest/libqos/riscv-iommu.h @@ -0,0 +1,101 @@ +/* + * libqos driver riscv-iommu-pci framework + * + * Copyright (c) 2024 Ventana Micro Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#ifndef TESTS_LIBQOS_RISCV_IOMMU_H +#define TESTS_LIBQOS_RISCV_IOMMU_H + +#include "qgraph.h" +#include "pci.h" +#include "qemu/bitops.h" + +#ifndef GENMASK_ULL +#define GENMASK_ULL(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l)) +#endif + +/* + * RISC-V IOMMU uses PCI_VENDOR_ID_REDHAT 0x1b36 and + * PCI_DEVICE_ID_REDHAT_RISCV_IOMMU 0x0014. + */ +#define RISCV_IOMMU_PCI_VENDOR_ID 0x1b36 +#define RISCV_IOMMU_PCI_DEVICE_ID 0x0014 +#define RISCV_IOMMU_PCI_DEVICE_CLASS 0x0806 + +/* Common field positions */ +#define RISCV_IOMMU_QUEUE_ENABLE BIT(0) +#define RISCV_IOMMU_QUEUE_INTR_ENABLE BIT(1) +#define RISCV_IOMMU_QUEUE_MEM_FAULT BIT(8) +#define RISCV_IOMMU_QUEUE_ACTIVE BIT(16) +#define RISCV_IOMMU_QUEUE_BUSY BIT(17) + +#define RISCV_IOMMU_REG_CAP 0x0000 +#define RISCV_IOMMU_CAP_VERSION GENMASK_ULL(7, 0) + +#define RISCV_IOMMU_REG_DDTP 0x0010 +#define RISCV_IOMMU_DDTP_BUSY BIT_ULL(4) +#define RISCV_IOMMU_DDTP_MODE GENMASK_ULL(3, 0) +#define RISCV_IOMMU_DDTP_MODE_OFF 0 + +#define RISCV_IOMMU_REG_CQCSR 0x0048 +#define RISCV_IOMMU_CQCSR_CQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_CQCSR_CIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_CQCSR_CQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_CQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +#define RISCV_IOMMU_REG_FQCSR 0x004C +#define RISCV_IOMMU_FQCSR_FQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_FQCSR_FIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_FQCSR_FQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_FQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +#define RISCV_IOMMU_REG_PQCSR 0x0050 +#define RISCV_IOMMU_PQCSR_PQEN RISCV_IOMMU_QUEUE_ENABLE +#define RISCV_IOMMU_PQCSR_PIE RISCV_IOMMU_QUEUE_INTR_ENABLE +#define RISCV_IOMMU_PQCSR_PQON RISCV_IOMMU_QUEUE_ACTIVE +#define RISCV_IOMMU_PQCSR_BUSY RISCV_IOMMU_QUEUE_BUSY + +#define RISCV_IOMMU_REG_IPSR 0x0054 + +#define RISCV_IOMMU_REG_IVEC 0x02F8 +#define RISCV_IOMMU_REG_IVEC_CIV GENMASK_ULL(3, 0) +#define RISCV_IOMMU_REG_IVEC_FIV GENMASK_ULL(7, 4) +#define RISCV_IOMMU_REG_IVEC_PMIV GENMASK_ULL(11, 8) +#define RISCV_IOMMU_REG_IVEC_PIV GENMASK_ULL(15, 12) + +#define RISCV_IOMMU_REG_CQB 0x0018 +#define RISCV_IOMMU_CQB_PPN_START 10 +#define RISCV_IOMMU_CQB_PPN_LEN 44 +#define RISCV_IOMMU_CQB_LOG2SZ_START 0 +#define RISCV_IOMMU_CQB_LOG2SZ_LEN 5 + +#define RISCV_IOMMU_REG_CQT 0x0024 + +#define RISCV_IOMMU_REG_FQB 0x0028 +#define RISCV_IOMMU_FQB_PPN_START 10 +#define RISCV_IOMMU_FQB_PPN_LEN 44 +#define RISCV_IOMMU_FQB_LOG2SZ_START 0 +#define RISCV_IOMMU_FQB_LOG2SZ_LEN 5 + +#define RISCV_IOMMU_REG_FQT 0x0034 + +#define RISCV_IOMMU_REG_PQB 0x0038 +#define RISCV_IOMMU_PQB_PPN_START 10 +#define RISCV_IOMMU_PQB_PPN_LEN 44 +#define RISCV_IOMMU_PQB_LOG2SZ_START 0 +#define RISCV_IOMMU_PQB_LOG2SZ_LEN 5 + +#define RISCV_IOMMU_REG_PQT 0x0044 + +typedef struct QRISCVIOMMU { + QOSGraphObject obj; + QPCIDevice dev; + QPCIBar reg_bar; +} QRISCVIOMMU; + +#endif diff --git a/tests/qtest/libqos/virtio-9p-client.c b/tests/qtest/libqos/virtio-9p-client.c index b8adc8d4b91..6ab4501c6e1 100644 --- a/tests/qtest/libqos/virtio-9p-client.c +++ b/tests/qtest/libqos/virtio-9p-client.c @@ -235,10 +235,11 @@ static const char *rmessage_name(uint8_t id) id == P9_RMKDIR ? "RMKDIR" : id == P9_RLCREATE ? "RLCREATE" : id == P9_RSYMLINK ? "RSYMLINK" : + id == P9_RGETATTR ? "RGETATTR" : id == P9_RLINK ? "RLINK" : id == P9_RUNLINKAT ? "RUNLINKAT" : id == P9_RFLUSH ? "RFLUSH" : - id == P9_RREADDIR ? "READDIR" : + id == P9_RREADDIR ? "RREADDIR" : ""; } @@ -556,6 +557,55 @@ void v9fs_rgetattr(P9Req *req, v9fs_attr *attr) v9fs_req_free(req); } +/* + * size[4] Tsetattr tag[2] fid[4] valid[4] mode[4] uid[4] gid[4] size[8] + * atime_sec[8] atime_nsec[8] mtime_sec[8] mtime_nsec[8] + */ +TSetAttrRes v9fs_tsetattr(TSetAttrOpt opt) +{ + P9Req *req; + uint32_t err; + + g_assert(opt.client); + + req = v9fs_req_init( + opt.client, 4/*fid*/ + 4/*valid*/ + 4/*mode*/ + 4/*uid*/ + 4/*gid*/ + + 8/*size*/ + 8/*atime_sec*/ + 8/*atime_nsec*/ + 8/*mtime_sec*/ + + 8/*mtime_nsec*/, P9_TSETATTR, opt.tag + ); + v9fs_uint32_write(req, opt.fid); + v9fs_uint32_write(req, (uint32_t) opt.attr.valid); + v9fs_uint32_write(req, opt.attr.mode); + v9fs_uint32_write(req, opt.attr.uid); + v9fs_uint32_write(req, opt.attr.gid); + v9fs_uint64_write(req, opt.attr.size); + v9fs_uint64_write(req, opt.attr.atime_sec); + v9fs_uint64_write(req, opt.attr.atime_nsec); + v9fs_uint64_write(req, opt.attr.mtime_sec); + v9fs_uint64_write(req, opt.attr.mtime_nsec); + v9fs_req_send(req); + + if (!opt.requestOnly) { + v9fs_req_wait_for_reply(req, NULL); + if (opt.expectErr) { + v9fs_rlerror(req, &err); + g_assert_cmpint(err, ==, opt.expectErr); + } else { + v9fs_rsetattr(req); + } + req = NULL; /* request was freed */ + } + + return (TSetAttrRes) { .req = req }; +} + +/* size[4] Rsetattr tag[2] */ +void v9fs_rsetattr(P9Req *req) +{ + v9fs_req_recv(req, P9_RSETATTR); + v9fs_req_free(req); +} + /* size[4] Treaddir tag[2] fid[4] offset[8] count[4] */ TReadDirRes v9fs_treaddir(TReadDirOpt opt) { diff --git a/tests/qtest/libqos/virtio-9p-client.h b/tests/qtest/libqos/virtio-9p-client.h index 78228eb97d9..e3221a31041 100644 --- a/tests/qtest/libqos/virtio-9p-client.h +++ b/tests/qtest/libqos/virtio-9p-client.h @@ -65,6 +65,16 @@ typedef struct v9fs_attr { #define P9_GETATTR_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ #define P9_GETATTR_ALL 0x00003fffULL /* Mask for ALL fields */ +#define P9_SETATTR_MODE 0x00000001UL +#define P9_SETATTR_UID 0x00000002UL +#define P9_SETATTR_GID 0x00000004UL +#define P9_SETATTR_SIZE 0x00000008UL +#define P9_SETATTR_ATIME 0x00000010UL +#define P9_SETATTR_MTIME 0x00000020UL +#define P9_SETATTR_CTIME 0x00000040UL +#define P9_SETATTR_ATIME_SET 0x00000080UL +#define P9_SETATTR_MTIME_SET 0x00000100UL + struct V9fsDirent { v9fs_qid qid; uint64_t offset; @@ -182,6 +192,28 @@ typedef struct TGetAttrRes { P9Req *req; } TGetAttrRes; +/* options for 'Tsetattr' 9p request */ +typedef struct TSetAttrOpt { + /* 9P client being used (mandatory) */ + QVirtio9P *client; + /* user supplied tag number being returned with response (optional) */ + uint16_t tag; + /* file ID of file/dir whose attributes shall be modified (required) */ + uint32_t fid; + /* new attribute values to be set by 9p server */ + v9fs_attr attr; + /* only send Tsetattr request but not wait for a reply? (optional) */ + bool requestOnly; + /* do we expect an Rlerror response, if yes which error code? (optional) */ + uint32_t expectErr; +} TSetAttrOpt; + +/* result of 'Tsetattr' 9p request */ +typedef struct TSetAttrRes { + /* if requestOnly was set: request object for further processing */ + P9Req *req; +} TSetAttrRes; + /* options for 'Treaddir' 9p request */ typedef struct TReadDirOpt { /* 9P client being used (mandatory) */ @@ -470,6 +502,8 @@ TWalkRes v9fs_twalk(TWalkOpt opt); void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid); TGetAttrRes v9fs_tgetattr(TGetAttrOpt); void v9fs_rgetattr(P9Req *req, v9fs_attr *attr); +TSetAttrRes v9fs_tsetattr(TSetAttrOpt opt); +void v9fs_rsetattr(P9Req *req); TReadDirRes v9fs_treaddir(TReadDirOpt); void v9fs_rreaddir(P9Req *req, uint32_t *count, uint32_t *nentries, struct V9fsDirent **entries); diff --git a/tests/qtest/libqos/virtio-pci-modern.c b/tests/qtest/libqos/virtio-pci-modern.c index 18d118866f0..4e67fcbd5d3 100644 --- a/tests/qtest/libqos/virtio-pci-modern.c +++ b/tests/qtest/libqos/virtio-pci-modern.c @@ -173,13 +173,11 @@ static bool get_config_isr_status(QVirtioDevice *d) static void wait_config_isr_status(QVirtioDevice *d, gint64 timeout_us) { - QVirtioPCIDevice *dev = container_of(d, QVirtioPCIDevice, vdev); gint64 start_time = g_get_monotonic_time(); - do { + while (!get_config_isr_status(d)) { g_assert(g_get_monotonic_time() - start_time <= timeout_us); - qtest_clock_step(dev->pdev->bus->qts, 100); - } while (!get_config_isr_status(d)); + } } static void queue_select(QVirtioDevice *d, uint16_t index) diff --git a/tests/qtest/libqos/virtio-pci.c b/tests/qtest/libqos/virtio-pci.c index 485b8f6b7e0..002bf8b8c2d 100644 --- a/tests/qtest/libqos/virtio-pci.c +++ b/tests/qtest/libqos/virtio-pci.c @@ -171,13 +171,11 @@ static bool qvirtio_pci_get_config_isr_status(QVirtioDevice *d) static void qvirtio_pci_wait_config_isr_status(QVirtioDevice *d, gint64 timeout_us) { - QVirtioPCIDevice *dev = container_of(d, QVirtioPCIDevice, vdev); gint64 start_time = g_get_monotonic_time(); - do { + while (!qvirtio_pci_get_config_isr_status(d)) { g_assert(g_get_monotonic_time() - start_time <= timeout_us); - qtest_clock_step(dev->pdev->bus->qts, 100); - } while (!qvirtio_pci_get_config_isr_status(d)); + } } static void qvirtio_pci_queue_select(QVirtioDevice *d, uint16_t index) diff --git a/tests/qtest/libqos/virtio-scmi.c b/tests/qtest/libqos/virtio-scmi.c index ce8f4d5c06e..6b5bd4db42f 100644 --- a/tests/qtest/libqos/virtio-scmi.c +++ b/tests/qtest/libqos/virtio-scmi.c @@ -1,7 +1,7 @@ /* * virtio-scmi nodes for testing * - * SPDX-FileCopyrightText: Linaro Ltd + * Copyright (c) Linaro Ltd. * SPDX-FileCopyrightText: Red Hat, Inc. * SPDX-License-Identifier: GPL-2.0-or-later * diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c index a21b6eee9cd..5a709d0bc59 100644 --- a/tests/qtest/libqos/virtio.c +++ b/tests/qtest/libqos/virtio.c @@ -25,49 +25,63 @@ */ static uint16_t qvirtio_readw(QVirtioDevice *d, QTestState *qts, uint64_t addr) { - uint16_t val = qtest_readw(qts, addr); + uint16_t val; - if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) { - val = bswap16(val); + if (d->features & (1ull << VIRTIO_F_VERSION_1)) { + qtest_memread(qts, addr, &val, sizeof(val)); + val = le16_to_cpu(val); + } else { + val = qtest_readw(qts, addr); } + return val; } static uint32_t qvirtio_readl(QVirtioDevice *d, QTestState *qts, uint64_t addr) { - uint32_t val = qtest_readl(qts, addr); + uint32_t val; - if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) { - val = bswap32(val); + if (d->features & (1ull << VIRTIO_F_VERSION_1)) { + qtest_memread(qts, addr, &val, sizeof(val)); + val = le32_to_cpu(val); + } else { + val = qtest_readl(qts, addr); } + return val; } static void qvirtio_writew(QVirtioDevice *d, QTestState *qts, uint64_t addr, uint16_t val) { - if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) { - val = bswap16(val); + if (d->features & (1ull << VIRTIO_F_VERSION_1)) { + val = cpu_to_le16(val); + qtest_memwrite(qts, addr, &val, sizeof(val)); + } else { + qtest_writew(qts, addr, val); } - qtest_writew(qts, addr, val); } static void qvirtio_writel(QVirtioDevice *d, QTestState *qts, uint64_t addr, uint32_t val) { - if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) { - val = bswap32(val); + if (d->features & (1ull << VIRTIO_F_VERSION_1)) { + val = cpu_to_le32(val); + qtest_memwrite(qts, addr, &val, sizeof(val)); + } else { + qtest_writel(qts, addr, val); } - qtest_writel(qts, addr, val); } static void qvirtio_writeq(QVirtioDevice *d, QTestState *qts, uint64_t addr, uint64_t val) { - if (d->features & (1ull << VIRTIO_F_VERSION_1) && qtest_big_endian(qts)) { - val = bswap64(val); + if (d->features & (1ull << VIRTIO_F_VERSION_1)) { + val = cpu_to_le64(val); + qtest_memwrite(qts, addr, &val, sizeof(val)); + } else { + qtest_writeq(qts, addr, val); } - qtest_writeq(qts, addr, val); } uint8_t qvirtio_config_readb(QVirtioDevice *d, uint64_t addr) @@ -170,7 +184,6 @@ void qvirtio_wait_queue_isr(QTestState *qts, QVirtioDevice *d, gint64 start_time = g_get_monotonic_time(); for (;;) { - qtest_clock_step(qts, 100); if (d->bus->get_queue_isr_status(d, vq)) { return; } @@ -192,7 +205,6 @@ uint8_t qvirtio_wait_status_byte_no_isr(QTestState *qts, QVirtioDevice *d, uint8_t val; while ((val = qtest_readb(qts, addr)) == 0xff) { - qtest_clock_step(qts, 100); g_assert(!d->bus->get_queue_isr_status(d, vq)); g_assert(g_get_monotonic_time() - start_time <= timeout_us); } @@ -219,14 +231,12 @@ void qvirtio_wait_used_elem(QTestState *qts, QVirtioDevice *d, for (;;) { uint32_t got_desc_idx; - qtest_clock_step(qts, 100); if (d->bus->get_queue_isr_status(d, vq) && qvirtqueue_get_buf(qts, vq, &got_desc_idx, len)) { g_assert_cmpint(got_desc_idx, ==, desc_idx); return; } - g_assert(g_get_monotonic_time() - start_time <= timeout_us); } } diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 1326e342911..94526b7f9cc 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -30,14 +30,15 @@ #include "libqtest.h" #include "libqmp.h" +#include "qemu/accel.h" #include "qemu/ctype.h" #include "qemu/cutils.h" #include "qemu/sockets.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" +#include "qobject/qbool.h" #define MAX_IRQ 256 @@ -75,6 +76,8 @@ struct QTestState { int fd; int qmp_fd; + int sock; + int qmpsock; pid_t qemu_pid; /* our child QEMU process */ int wstatus; #ifdef _WIN32 @@ -215,6 +218,22 @@ static void qtest_check_status(QTestState *s) #endif } +void qtest_system_reset_nowait(QTestState *s) +{ + /* Request the system reset, but do not wait for it to complete */ + qtest_qmp_assert_success(s, "{'execute': 'system_reset' }"); +} + +void qtest_system_reset(QTestState *s) +{ + qtest_system_reset_nowait(s); + /* + * Wait for the RESET event, which is sent once the system reset + * has actually completed. + */ + qtest_qmp_eventwait(s, "RESET"); +} + void qtest_wait_qemu(QTestState *s) { if (s->qemu_pid != -1) { @@ -442,18 +461,19 @@ static QTestState *G_GNUC_PRINTF(2, 3) qtest_spawn_qemu(const char *qemu_bin, return s; } +static char *qtest_socket_path(const char *suffix) +{ + return g_strdup_printf("%s/qtest-%d.%s", g_get_tmp_dir(), getpid(), suffix); +} + static QTestState *qtest_init_internal(const char *qemu_bin, - const char *extra_args) + const char *extra_args, + bool do_connect) { QTestState *s; int sock, qmpsock, i; - gchar *socket_path; - gchar *qmp_socket_path; - - socket_path = g_strdup_printf("%s/qtest-%d.sock", - g_get_tmp_dir(), getpid()); - qmp_socket_path = g_strdup_printf("%s/qtest-%d.qmp", - g_get_tmp_dir(), getpid()); + g_autofree gchar *socket_path = qtest_socket_path("sock"); + g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp"); /* * It's possible that if an earlier test run crashed it might @@ -485,22 +505,19 @@ static QTestState *qtest_init_internal(const char *qemu_bin, qtest_client_set_rx_handler(s, qtest_client_socket_recv_line); qtest_client_set_tx_handler(s, qtest_client_socket_send); - s->fd = socket_accept(sock); - if (s->fd >= 0) { - s->qmp_fd = socket_accept(qmpsock); - } - unlink(socket_path); - unlink(qmp_socket_path); - g_free(socket_path); - g_free(qmp_socket_path); - - g_assert(s->fd >= 0 && s->qmp_fd >= 0); - s->rx = g_string_new(""); for (i = 0; i < MAX_IRQ; i++) { s->irq_level[i] = false; } + s->fd = -1; + s->qmp_fd = -1; + s->sock = sock; + s->qmpsock = qmpsock; + if (do_connect) { + qtest_connect(s); + } + /* * Stopping QEMU for debugging is not supported on Windows. * @@ -514,40 +531,70 @@ static QTestState *qtest_init_internal(const char *qemu_bin, kill(s->qemu_pid, SIGSTOP); } #endif - return s; + + return s; } -QTestState *qtest_init_without_qmp_handshake(const char *extra_args) +void qtest_connect(QTestState *s) { - return qtest_init_internal(qtest_qemu_binary(NULL), extra_args); + g_autofree gchar *socket_path = qtest_socket_path("sock"); + g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp"); + + g_assert(s->sock >= 0 && s->qmpsock >= 0); + s->fd = socket_accept(s->sock); + if (s->fd >= 0) { + s->qmp_fd = socket_accept(s->qmpsock); + } + unlink(socket_path); + unlink(qmp_socket_path); + g_assert(s->fd >= 0 && s->qmp_fd >= 0); + s->sock = s->qmpsock = -1; + /* ask endianness of the target */ + s->big_endian = qtest_query_target_endianness(s); } -QTestState *qtest_init_with_env_no_handshake(const char *var, - const char *extra_args) +QTestState *qtest_init_without_qmp_handshake(const char *extra_args) { - return qtest_init_internal(qtest_qemu_binary(var), extra_args); + return qtest_init_internal(qtest_qemu_binary(NULL), extra_args, true); } -QTestState *qtest_init_with_env(const char *var, const char *extra_args) +void qtest_qmp_handshake(QTestState *s, QList *capabilities) { - QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args); - QDict *greeting; - - /* ask endianness of the target */ - - s->big_endian = qtest_query_target_endianness(s); - /* Read the QMP greeting and then do the handshake */ - greeting = qtest_qmp_receive(s); + QDict *greeting = qtest_qmp_receive(s); qobject_unref(greeting); - qobject_unref(qtest_qmp(s, "{ 'execute': 'qmp_capabilities' }")); + if (capabilities) { + qtest_qmp_assert_success(s, + "{ 'execute': 'qmp_capabilities', " + "'arguments': { 'enable': %p } }", + qobject_ref(capabilities)); + } else { + qtest_qmp_assert_success(s, "{ 'execute': 'qmp_capabilities' }"); + } +} + +QTestState *qtest_init_ext(const char *var, const char *extra_args, + QList *capabilities, bool do_connect) +{ + QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args, + do_connect); + + if (do_connect) { + qtest_qmp_handshake(s, capabilities); + } else { + /* + * If the connection is delayed, the capabilities must be set + * at that moment. + */ + assert(!capabilities); + } return s; } QTestState *qtest_init(const char *extra_args) { - return qtest_init_with_env(NULL, extra_args); + return qtest_init_ext(NULL, extra_args, NULL, true); } QTestState *qtest_vinitf(const char *fmt, va_list ap) @@ -757,6 +804,7 @@ QDict *qtest_qmp_receive(QTestState *s) QDict *qtest_qmp_receive_dict(QTestState *s) { + g_assert(s->qmp_fd >= 0); return qmp_fd_receive(s->qmp_fd); } @@ -784,12 +832,14 @@ int qtest_socket_server(const char *socket_path) void qtest_qmp_vsend_fds(QTestState *s, int *fds, size_t fds_num, const char *fmt, va_list ap) { + g_assert(s->qmp_fd >= 0); qmp_fd_vsend_fds(s->qmp_fd, fds, fds_num, fmt, ap); } #endif void qtest_qmp_vsend(QTestState *s, const char *fmt, va_list ap) { + g_assert(s->qmp_fd >= 0); qmp_fd_vsend(s->qmp_fd, fmt, ap); } @@ -850,6 +900,7 @@ void qtest_qmp_send_raw(QTestState *s, const char *fmt, ...) { va_list ap; + g_assert(s->qmp_fd >= 0); va_start(ap, fmt); qmp_fd_vsend_raw(s->qmp_fd, fmt, ap); va_end(ap); @@ -953,15 +1004,62 @@ const char *qtest_get_arch(void) return end + 1; } +static bool qtest_qom_has_concrete_type(const char *parent_typename, + const char *child_typename, + QList **cached_list) +{ + QList *list = cached_list ? *cached_list : NULL; + const QListEntry *p; + QObject *qobj; + QString *qstr; + QDict *devinfo; + int idx; + + if (!list) { + QDict *resp; + QDict *args; + QTestState *qts = qtest_init("-machine none"); + + args = qdict_new(); + qdict_put_bool(args, "abstract", false); + qdict_put_str(args, "implements", parent_typename); + + resp = qtest_qmp(qts, "{'execute': 'qom-list-types', 'arguments': %p }", + args); + g_assert(qdict_haskey(resp, "return")); + list = qdict_get_qlist(resp, "return"); + qobject_ref(list); + qobject_unref(resp); + + qtest_quit(qts); + + if (cached_list) { + *cached_list = list; + } + } + + for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) { + devinfo = qobject_to(QDict, qlist_entry_obj(p)); + g_assert(devinfo); + + qobj = qdict_get(devinfo, "name"); + g_assert(qobj); + qstr = qobject_to(QString, qobj); + g_assert(qstr); + if (g_str_equal(qstring_get_str(qstr), child_typename)) { + return true; + } + } + + return false; +} + bool qtest_has_accel(const char *accel_name) { - if (g_str_equal(accel_name, "tcg")) { -#if defined(CONFIG_TCG) - return true; -#else - return false; -#endif - } else if (g_str_equal(accel_name, "kvm")) { + static QList *list; + g_autofree char *accel_type = NULL; + + if (g_str_equal(accel_name, "kvm")) { int i; const char *arch = qtest_get_arch(); const char *targets[] = { CONFIG_KVM_TARGETS }; @@ -973,11 +1071,12 @@ bool qtest_has_accel(const char *accel_name) } } } - } else { - /* not implemented */ - g_assert_not_reached(); + return false; } - return false; + + accel_type = g_strconcat(accel_name, ACCEL_CLASS_SUFFIX, NULL); + + return qtest_qom_has_concrete_type("accel", accel_type, &list); } bool qtest_get_irq(QTestState *s, int num) @@ -1207,6 +1306,33 @@ uint64_t qtest_rtas_call(QTestState *s, const char *name, return 0; } +static void qtest_rsp_csr(QTestState *s, uint64_t *val) +{ + gchar **args; + uint64_t ret; + int rc; + + args = qtest_rsp_args(s, 3); + + rc = qemu_strtou64(args[1], NULL, 16, &ret); + g_assert(rc == 0); + rc = qemu_strtou64(args[2], NULL, 16, val); + g_assert(rc == 0); + + g_strfreev(args); +} + +uint64_t qtest_csr_call(QTestState *s, const char *name, + uint64_t cpu, int csr, + uint64_t *val) +{ + qtest_sendf(s, "csr %s 0x%"PRIx64" %d 0x%"PRIx64"\n", + name, cpu, csr, *val); + + qtest_rsp_csr(s, val); + return 0; +} + void qtest_add_func(const char *str, void (*fn)(void)) { gchar *path = g_strdup_printf("/%s/%s", qtest_get_arch(), str); @@ -1528,7 +1654,7 @@ static struct MachInfo *qtest_get_machines(const char *var) silence_spawn_log = !g_test_verbose(); - qts = qtest_init_with_env(qemu_var, "-machine none"); + qts = qtest_init_ext(qemu_var, "-machine none", NULL, true); response = qtest_qmp(qts, "{ 'execute': 'query-machines' }"); g_assert(response); list = qdict_get_qlist(response, "return"); @@ -1583,7 +1709,7 @@ static struct CpuModel *qtest_get_cpu_models(void) silence_spawn_log = !g_test_verbose(); - qts = qtest_init_with_env(NULL, "-machine none"); + qts = qtest_init_ext(NULL, "-machine none", NULL, true); response = qtest_qmp(qts, "{ 'execute': 'query-cpu-definitions' }"); g_assert(response); list = qdict_get_qlist(response, "return"); @@ -1653,7 +1779,9 @@ void qtest_cb_for_every_machine(void (*cb)(const char *machine), /* Ignore machines that cannot be used for qtests */ if (!strncmp("xenfv", machines[i].name, 5) || g_str_equal("xenpv", machines[i].name) || - g_str_equal("xenpvh", machines[i].name)) { + g_str_equal("xenpvh", machines[i].name) || + g_str_equal("vmapple", machines[i].name) || + g_str_equal("nitro-enclave", machines[i].name)) { continue; } if (!skip_old_versioned || @@ -1704,45 +1832,8 @@ bool qtest_has_machine(const char *machine) bool qtest_has_device(const char *device) { static QList *list; - const QListEntry *p; - QObject *qobj; - QString *qstr; - QDict *devinfo; - int idx; - - if (!list) { - QDict *resp; - QDict *args; - QTestState *qts = qtest_init("-machine none"); - - args = qdict_new(); - qdict_put_bool(args, "abstract", false); - qdict_put_str(args, "implements", "device"); - - resp = qtest_qmp(qts, "{'execute': 'qom-list-types', 'arguments': %p }", - args); - g_assert(qdict_haskey(resp, "return")); - list = qdict_get_qlist(resp, "return"); - qobject_ref(list); - qobject_unref(resp); - - qtest_quit(qts); - } - - for (p = qlist_first(list), idx = 0; p; p = qlist_next(p), idx++) { - devinfo = qobject_to(QDict, qlist_entry_obj(p)); - g_assert(devinfo); - qobj = qdict_get(devinfo, "name"); - g_assert(qobj); - qstr = qobject_to(QString, qobj); - g_assert(qstr); - if (g_str_equal(qstring_get_str(qstr), device)) { - return true; - } - } - - return false; + return qtest_qom_has_concrete_type("device", device, &list); } /* @@ -1923,7 +2014,6 @@ void qtest_client_inproc_recv(void *opaque, const char *str) qts->rx = g_string_new(NULL); } g_string_append(qts->rx, str); - return; } void qtest_qom_set_bool(QTestState *s, const char *path, const char *property, diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h index c261b7e0b3b..b3f2e7fbefd 100644 --- a/tests/qtest/libqtest.h +++ b/tests/qtest/libqtest.h @@ -17,8 +17,9 @@ #ifndef LIBQTEST_H #define LIBQTEST_H -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qobject.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" #include "libqmp.h" typedef struct QTestState QTestState; @@ -56,20 +57,22 @@ QTestState *qtest_vinitf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); QTestState *qtest_init(const char *extra_args); /** - * qtest_init_with_env: + * qtest_init_ext: * @var: Environment variable from where to take the QEMU binary * @extra_args: Other arguments to pass to QEMU. CAUTION: these * arguments are subject to word splitting and shell evaluation. + * @capabilities: list of QMP capabilities (strings) to enable + * @do_connect: connect to qemu monitor and qtest socket. * * Like qtest_init(), but use a different environment variable for the - * QEMU binary. + * QEMU binary, allow specify capabilities and skip connecting + * to QEMU monitor. * * Returns: #QTestState instance. */ -QTestState *qtest_init_with_env(const char *var, const char *extra_args); +QTestState *qtest_init_ext(const char *var, const char *extra_args, + QList *capabilities, bool do_connect); -QTestState *qtest_init_with_env_no_handshake(const char *var, - const char *extra_args); /** * qtest_init_without_qmp_handshake: * @extra_args: other arguments to pass to QEMU. CAUTION: these @@ -79,6 +82,22 @@ QTestState *qtest_init_with_env_no_handshake(const char *var, */ QTestState *qtest_init_without_qmp_handshake(const char *extra_args); +/** + * qtest_connect + * @s: #QTestState instance to connect + * Connect to qemu monitor and qtest socket, after skipping them in + * qtest_init_ext. Does not handshake with the monitor. + */ +void qtest_connect(QTestState *s); + +/** + * qtest_qmp_handshake: + * @s: #QTestState instance to operate on. + * @capabilities: list of QMP capabilities (strings) to enable + * Perform handshake after connecting to qemu monitor. + */ +void qtest_qmp_handshake(QTestState *s, QList *capabilities); + /** * qtest_init_with_serial: * @extra_args: other arguments to pass to QEMU. CAUTION: these @@ -90,6 +109,31 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args); */ QTestState *qtest_init_with_serial(const char *extra_args, int *sock_fd); +/** + * qtest_system_reset: + * @s: #QTestState instance to operate on. + * + * Send a "system_reset" command to the QEMU under test, and wait for + * the reset to complete before returning. + */ +void qtest_system_reset(QTestState *s); + +/** + * qtest_system_reset_nowait: + * @s: #QTestState instance to operate on. + * + * Send a "system_reset" command to the QEMU under test, but do not + * wait for the reset to complete before returning. The caller is + * responsible for waiting for either the RESET event or some other + * event of interest to them before proceeding. + * + * This function should only be used if you're specifically testing + * for some other event; in that case you can't use qtest_system_reset() + * because it will read and discard any other QMP events that arrive + * before the RESET event. + */ +void qtest_system_reset_nowait(QTestState *s); + /** * qtest_wait_qemu: * @s: #QTestState instance to operate on. @@ -342,7 +386,7 @@ QDict *qtest_qmp_event_ref(QTestState *s, const char *event); char *qtest_hmp(QTestState *s, const char *fmt, ...) G_GNUC_PRINTF(2, 3); /** - * qtest_hmpv: + * qtest_vhmp: * @s: #QTestState instance to operate on. * @fmt: HMP command to send to QEMU, formats arguments like vsprintf(). * @ap: HMP command arguments @@ -577,6 +621,20 @@ uint64_t qtest_rtas_call(QTestState *s, const char *name, uint32_t nargs, uint64_t args, uint32_t nret, uint64_t ret); +/** + * qtest_csr_call: + * @s: #QTestState instance to operate on. + * @name: name of the command to call. + * @cpu: hart number. + * @csr: CSR number. + * @val: Value for reading/writing. + * + * Call an RISC-V CSR read/write function + */ +uint64_t qtest_csr_call(QTestState *s, const char *name, + uint64_t cpu, int csr, + uint64_t *val); + /** * qtest_bufread: * @s: #QTestState instance to operate on. @@ -881,7 +939,7 @@ void qtest_qmp_assert_success(QTestState *qts, const char *fmt, ...) #ifndef _WIN32 /** - * qtest_qmp_fd_assert_success_ref: + * qtest_qmp_fds_assert_success_ref: * @qts: QTestState instance to operate on * @fds: the file descriptors to send * @nfds: number of @fds to send @@ -898,7 +956,7 @@ QDict *qtest_qmp_fds_assert_success_ref(QTestState *qts, int *fds, size_t nfds, G_GNUC_PRINTF(4, 5); /** - * qtest_qmp_fd_assert_success: + * qtest_qmp_fds_assert_success: * @qts: QTestState instance to operate on * @fds: the file descriptors to send * @nfds: number of @fds to send diff --git a/tests/qtest/lsm303dlhc-mag-test.c b/tests/qtest/lsm303dlhc-mag-test.c index 0f64e7fc67d..55ef4594f91 100644 --- a/tests/qtest/lsm303dlhc-mag-test.c +++ b/tests/qtest/lsm303dlhc-mag-test.c @@ -13,7 +13,7 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define LSM303DLHC_MAG_TEST_ID "lsm303dlhc_mag-test" #define LSM303DLHC_MAG_REG_CRA 0x00 diff --git a/tests/qtest/m48t59-test.c b/tests/qtest/m48t59-test.c index 605797ab785..1e39a0e8f07 100644 --- a/tests/qtest/m48t59-test.c +++ b/tests/qtest/m48t59-test.c @@ -247,11 +247,6 @@ static void base_setup(void) base_year = 1968; base_machine = "SS-5"; use_mmio = true; - } else if (g_str_equal(arch, "ppc") || g_str_equal(arch, "ppc64")) { - base = 0xF0000000; - base_year = 1968; - base_machine = "ref405ep"; - use_mmio = true; } else { g_assert_not_reached(); } diff --git a/tests/qtest/machine-none-test.c b/tests/qtest/machine-none-test.c index 05da7bc72dc..b6a87d27edb 100644 --- a/tests/qtest/machine-none-test.c +++ b/tests/qtest/machine-none-test.c @@ -14,7 +14,7 @@ #include "qemu/cutils.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" struct arch2cpu { @@ -30,7 +30,6 @@ static struct arch2cpu cpus_map[] = { { "x86_64", "qemu64,apic-id=0" }, { "i386", "qemu32,apic-id=0" }, { "alpha", "ev67" }, - { "cris", "crisv32" }, { "m68k", "m5206" }, { "microblaze", "any" }, { "microblazeel", "any" }, diff --git a/tests/qtest/max34451-test.c b/tests/qtest/max34451-test.c index dbf6ddc8296..5e0878c9239 100644 --- a/tests/qtest/max34451-test.c +++ b/tests/qtest/max34451-test.c @@ -11,8 +11,8 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/bitops.h" #define TEST_ID "max34451-test" diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 2f0d3ef0809..669d07c06bd 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -1,11 +1,14 @@ slow_qtests = { + 'ahci-test': 150, 'aspeed_smc-test': 360, 'bios-tables-test' : 910, 'cdrom-test' : 610, 'device-introspect-test' : 720, + 'ide-test' : 120, 'migration-test' : 480, 'npcm7xx_pwm-test': 300, 'npcm7xx_watchdog_timer-test': 120, + 'qmp-cmd-test' : 120, 'qom-test' : 900, 'stm32l4x5_usart-test' : 600, 'test-hmp' : 240, @@ -49,7 +52,16 @@ qtests_filter = \ qtests_i386 = \ (slirp.found() ? ['pxe-test'] : []) + \ qtests_filter + \ - (have_tools ? ['ahci-test'] : []) + \ + (config_all_devices.has_key('CONFIG_ACPI_VMGENID') ? ['vmgenid-test'] : []) + \ + (config_all_devices.has_key('CONFIG_AHCI_ICH9') and have_tools ? ['ahci-test'] : []) + \ + (config_all_devices.has_key('CONFIG_AHCI_ICH9') ? ['tco-test'] : []) + \ + (config_all_devices.has_key('CONFIG_FDC_ISA') ? ['fdc-test'] : []) + \ + (config_all_devices.has_key('CONFIG_I440FX') ? ['fw_cfg-test'] : []) + \ + (config_all_devices.has_key('CONFIG_FW_CFG_DMA') ? ['vmcoreinfo-test'] : []) + \ + (config_all_devices.has_key('CONFIG_I440FX') ? ['i440fx-test'] : []) + \ + (config_all_devices.has_key('CONFIG_I440FX') ? ['ide-test'] : []) + \ + (config_all_devices.has_key('CONFIG_I440FX') ? ['numa-test'] : []) + \ + (config_all_devices.has_key('CONFIG_I440FX') ? ['test-x86-cpuid-compat'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_SGA') ? ['boot-serial-test'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_IPMI_KCS') ? ['ipmi-kcs-test'] : []) + \ @@ -63,6 +75,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_I82801B11') ? ['i82801b11-test'] : []) + \ (config_all_devices.has_key('CONFIG_IOH3420') ? ['ioh3420-test'] : []) + \ (config_all_devices.has_key('CONFIG_LPC_ICH9') ? ['lpc-ich9-test'] : []) + \ + (config_all_devices.has_key('CONFIG_MC146818RTC') ? ['rtc-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_UHCI') and \ config_all_devices.has_key('CONFIG_USB_EHCI') ? ['usb-hcd-ehci-test'] : []) + \ @@ -76,9 +89,12 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ (config_all_devices.has_key('CONFIG_LSI_SCSI_PCI') ? ['fuzz-lsi53c895a-test'] : []) + \ (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VIRTIO_BALLOON') ? ['virtio-balloon-test'] : []) + \ + (config_all_devices.has_key('CONFIG_Q35') ? ['q35-test'] : []) + \ (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ (config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VTD') ? ['intel-iommu-test'] : []) + \ (host_os != 'windows' and \ config_all_devices.has_key('CONFIG_ACPI_ERST') ? ['erst-test'] : []) + \ (config_all_devices.has_key('CONFIG_PCIE_PORT') and \ @@ -91,25 +107,16 @@ qtests_i386 = \ config_all_devices.has_key('CONFIG_PARALLEL') ? ['bios-tables-test'] : []) + \ qtests_pci + \ qtests_cxl + \ - ['fdc-test', - 'ide-test', + [ 'hd-geo-test', 'boot-order-test', - 'rtc-test', - 'i440fx-test', - 'fw_cfg-test', 'device-plug-test', 'drive_del-test', - 'tco-test', 'cpu-plug-test', - 'q35-test', - 'vmgenid-test', 'migration-test', - 'test-x86-cpuid-compat', - 'numa-test' ] -if dbus_display +if dbus_display and config_all_devices.has_key('CONFIG_VGA') qtests_i386 += ['dbus-display-test'] endif @@ -135,12 +142,15 @@ qtests_alpha = ['boot-serial-test'] + \ qtests_avr = [ 'boot-serial-test' ] -qtests_hppa = ['boot-serial-test'] + \ +qtests_hppa = \ qtests_filter + \ (config_all_devices.has_key('CONFIG_VGA') ? ['display-vga-test'] : []) qtests_loongarch64 = qtests_filter + \ - ['boot-serial-test', 'numa-test'] + (config_all_devices.has_key('CONFIG_LOONGARCH_VIRT') ? ['numa-test'] : []) + \ + (unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ + ['boot-serial-test', + 'cpu-plug-test'] qtests_m68k = ['boot-serial-test'] + \ qtests_filter @@ -162,7 +172,6 @@ qtests_mips64el = qtests_mips qtests_ppc = \ qtests_filter + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ - (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \ (config_all_accel.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ (config_all_accel.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ ['boot-order-test'] @@ -171,13 +180,15 @@ qtests_ppc64 = \ qtests_ppc + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \ + (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xive2-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-spi-seeprom-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-host-i2c-test'] : []) + \ + (config_all_devices.has_key('CONFIG_PSERIES') ? ['numa-test'] : []) + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['rtas-test'] : []) + \ (slirp.found() ? ['pxe-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_UHCI') ? ['usb-hcd-uhci-test'] : []) + \ (config_all_devices.has_key('CONFIG_USB_XHCI_NEC') ? ['usb-hcd-xhci-test'] : []) + \ - qtests_pci + ['migration-test', 'numa-test', 'cpu-plug-test', 'drive_del-test'] + qtests_pci + ['migration-test', 'cpu-plug-test', 'drive_del-test'] qtests_sh4 = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) qtests_sh4eb = (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) @@ -198,13 +209,19 @@ qtests_npcm7xx = \ 'npcm7xx_sdhci-test', 'npcm7xx_smbus-test', 'npcm7xx_timer-test', - 'npcm7xx_watchdog_timer-test', - 'npcm_gmac-test'] + \ + 'npcm7xx_watchdog_timer-test'] + \ (slirp.found() ? ['npcm7xx_emc-test'] : []) +qtests_npcm8xx = \ + ['npcm_gmac-test'] qtests_aspeed = \ - ['aspeed_hace-test', - 'aspeed_smc-test', - 'aspeed_gpio-test'] + ['aspeed_gpio-test', + 'aspeed_hace-test', + 'aspeed_scu-test', + 'aspeed_smc-test'] +qtests_aspeed64 = \ + ['ast2700-gpio-test', + 'ast2700-hace-test', + 'ast2700-smc-test'] qtests_stm32l4x5 = \ ['stm32l4x5_exti-test', @@ -217,7 +234,8 @@ qtests_arm = \ (config_all_devices.has_key('CONFIG_MPS2') ? ['sse-timer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_DUALTIMER') ? ['cmsdk-apb-dualtimer-test'] : []) + \ (config_all_devices.has_key('CONFIG_CMSDK_APB_TIMER') ? ['cmsdk-apb-timer-test'] : []) + \ - (config_all_devices.has_key('CONFIG_CMSDK_APB_WATCHDOG') ? ['cmsdk-apb-watchdog-test'] : []) + \ + (config_all_devices.has_key('CONFIG_STELLARIS') or + config_all_devices.has_key('CONFIG_MPS2') ? ['cmsdk-apb-watchdog-test'] : []) + \ (config_all_devices.has_key('CONFIG_PFLASH_CFI02') and config_all_devices.has_key('CONFIG_MUSICPAL') ? ['pflash-cfi02-test'] : []) + \ (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed : []) + \ @@ -243,6 +261,9 @@ qtests_aarch64 = \ (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test', 'bcm2835-i2c-test'] : []) + \ (config_all_accel.has_key('CONFIG_TCG') and \ config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ + (config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed64 : []) + \ + (config_all_devices.has_key('CONFIG_NPCM8XX') ? qtests_npcm8xx : []) + \ + qtests_cxl + \ ['arm-cpu-features', 'numa-test', 'boot-serial-test', @@ -260,7 +281,7 @@ qtests_s390x = \ qtests_riscv32 = \ (config_all_devices.has_key('CONFIG_SIFIVE_E_AON') ? ['sifive-e-aon-watchdog-test'] : []) -qtests_riscv64 = \ +qtests_riscv64 = ['riscv-csr-test'] + \ (unpack_edk2_blobs ? ['bios-tables-test'] : []) qos_test_ss = ss.source_set() @@ -272,7 +293,6 @@ qos_test_ss.add( 'e1000-test.c', 'eepro100-test.c', 'es1370-test.c', - 'ipoctal232-test.c', 'lsm303dlhc-mag-test.c', 'isl_pmbus_vr-test.c', 'max34451-test.c', @@ -283,6 +303,7 @@ qos_test_ss.add( 'pca9552-test.c', 'pci-test.c', 'pcnet-test.c', + 'rs5c372-test.c', 'sdhci-test.c', 'spapr-phb-test.c', 'tmp105-test.c', @@ -297,11 +318,15 @@ qos_test_ss.add( 'vmxnet3-test.c', 'igb-test.c', 'ufs-test.c', + 'riscv-iommu-test.c', ) if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL') qos_test_ss.add(files('virtio-serial-test.c')) endif +if config_all_devices.has_key('CONFIG_IP_OCTAL_232') + qos_test_ss.add(files('ipoctal232-test.c')) +endif if host_os != 'windows' qos_test_ss.add(files('e1000e-test.c')) @@ -318,23 +343,44 @@ endif tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c'] -migration_files = [files('migration-helpers.c')] +migration_files = [files( + 'migration/bootfile.c', + 'migration/framework.c', + 'migration/migration-qmp.c', + 'migration/migration-util.c', + 'migration/compression-tests.c', + 'migration/cpr-tests.c', + 'migration/file-tests.c', + 'migration/misc-tests.c', + 'migration/precopy-tests.c', + 'migration/postcopy-tests.c', +)] + +migration_tls_files = [] if gnutls.found() - migration_files += [files('../unit/crypto-tls-psk-helpers.c'), gnutls] + migration_tls_files = [files('migration/tls-tests.c', + '../unit/crypto-tls-psk-helpers.c'), gnutls] if tasn1.found() - migration_files += [files('../unit/crypto-tls-x509-helpers.c'), tasn1] + migration_tls_files += [files('../unit/crypto-tls-x509-helpers.c'), tasn1] endif endif qtests = { + 'aspeed_hace-test': files('aspeed-hace-utils.c', 'aspeed_hace-test.c'), + 'aspeed_smc-test': files('aspeed-smc-utils.c', 'aspeed_smc-test.c'), + 'ast2700-hace-test': files('aspeed-hace-utils.c', 'ast2700-hace-test.c'), + 'ast2700-smc-test': files('aspeed-smc-utils.c', 'ast2700-smc-test.c'), 'bios-tables-test': [io, 'boot-sector.c', 'acpi-utils.c', 'tpm-emu.c'], 'cdrom-test': files('boot-sector.c'), - 'dbus-vmstate-test': files('migration-helpers.c') + dbus_vmstate1, + 'dbus-vmstate-test': files('migration/migration-qmp.c', + 'migration/migration-util.c') + dbus_vmstate1, 'erst-test': files('erst-test.c'), 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], - 'migration-test': migration_files, + 'migration-test': migration_files + migration_tls_files, 'pxe-test': files('boot-sector.c'), + 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c', + 'pnv-xive2-nvpg_bar.c'), 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], 'tpm-crb-swtpm-test': [io, tpmemu_files], 'tpm-crb-test': [io, tpmemu_files], @@ -343,7 +389,7 @@ qtests = { 'tpm-tis-i2c-test': [io, tpmemu_files, 'qtest_aspeed.c'], 'tpm-tis-device-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'], 'tpm-tis-device-test': [io, tpmemu_files, 'tpm-tis-util.c'], - 'virtio-net-failover': files('migration-helpers.c'), + 'virtio-net-failover': migration_files, 'vmgenid-test': files('boot-sector.c', 'acpi-utils.c'), 'netdev-socket': files('netdev-socket.c', '../unit/socket-helpers.c'), } @@ -351,7 +397,7 @@ qtests = { if vnc.found() gvnc = dependency('gvnc-1.0', method: 'pkg-config', required: false) if gvnc.found() - qtests += {'vnc-display-test': [gvnc]} + qtests += {'vnc-display-test': [gvnc, keymap_targets]} qtests_generic += [ 'vnc-display-test' ] endif endif @@ -369,6 +415,8 @@ foreach dir : target_dirs target_base = dir.split('-')[0] qtest_emulator = emulators['qemu-system-' + target_base] target_qtests = get_variable('qtests_' + target_base, []) + qtests_generic + has_kvm = ('CONFIG_KVM' in config_all_accel and host_os == 'linux' + and cpu == target_base and fs.exists('/dev/kvm')) test_deps = roms qtest_env = environment() @@ -402,11 +450,18 @@ foreach dir : target_dirs test: executable(test, src, dependencies: deps) } endif + + test_args = ['--tap', '-k'] + + if test == 'migration-test' and has_kvm + test_args += ['--full'] + endif + test('qtest-@0@/@1@'.format(target_base, test), qtest_executables[test], depends: [test_deps, qtest_emulator, emulator_modules], env: qtest_env, - args: ['--tap', '-k'], + args: test_args, protocol: 'tap', timeout: slow_qtests.get(test, 60), priority: slow_qtests.get(test, 60), diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c deleted file mode 100644 index 84f49db85e0..00000000000 --- a/tests/qtest/migration-helpers.c +++ /dev/null @@ -1,533 +0,0 @@ -/* - * QTest migration helpers - * - * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates - * based on the vhost-user-test.c that is: - * Copyright (c) 2014 Virtual Open Systems Sarl. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "qemu/ctype.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qapi-visit-sockets.h" -#include "qapi/qobject-input-visitor.h" -#include "qapi/error.h" -#include "qapi/qmp/qlist.h" -#include "qemu/cutils.h" -#include "qemu/memalign.h" - -#include "migration-helpers.h" - -/* - * Number of seconds we wait when looking for migration - * status changes, to avoid test suite hanging forever - * when things go wrong. Needs to be higher enough to - * avoid false positives on loaded hosts. - */ -#define MIGRATION_STATUS_WAIT_TIMEOUT 120 - -static char *SocketAddress_to_str(SocketAddress *addr) -{ - switch (addr->type) { - case SOCKET_ADDRESS_TYPE_INET: - return g_strdup_printf("tcp:%s:%s", - addr->u.inet.host, - addr->u.inet.port); - case SOCKET_ADDRESS_TYPE_UNIX: - return g_strdup_printf("unix:%s", - addr->u.q_unix.path); - case SOCKET_ADDRESS_TYPE_FD: - return g_strdup_printf("fd:%s", addr->u.fd.str); - case SOCKET_ADDRESS_TYPE_VSOCK: - return g_strdup_printf("vsock:%s:%s", - addr->u.vsock.cid, - addr->u.vsock.port); - default: - return g_strdup("unknown address type"); - } -} - -static QDict *SocketAddress_to_qdict(SocketAddress *addr) -{ - QDict *dict = qdict_new(); - - switch (addr->type) { - case SOCKET_ADDRESS_TYPE_INET: - qdict_put_str(dict, "type", "inet"); - qdict_put_str(dict, "host", addr->u.inet.host); - qdict_put_str(dict, "port", addr->u.inet.port); - break; - case SOCKET_ADDRESS_TYPE_UNIX: - qdict_put_str(dict, "type", "unix"); - qdict_put_str(dict, "path", addr->u.q_unix.path); - break; - case SOCKET_ADDRESS_TYPE_FD: - qdict_put_str(dict, "type", "fd"); - qdict_put_str(dict, "str", addr->u.fd.str); - break; - case SOCKET_ADDRESS_TYPE_VSOCK: - qdict_put_str(dict, "type", "vsock"); - qdict_put_str(dict, "cid", addr->u.vsock.cid); - qdict_put_str(dict, "port", addr->u.vsock.port); - break; - default: - g_assert_not_reached(); - break; - } - - return dict; -} - -static SocketAddress *migrate_get_socket_address(QTestState *who) -{ - QDict *rsp; - SocketAddressList *addrs; - SocketAddress *addr; - Visitor *iv = NULL; - QObject *object; - - rsp = migrate_query(who); - object = qdict_get(rsp, "socket-address"); - - iv = qobject_input_visitor_new(object); - visit_type_SocketAddressList(iv, NULL, &addrs, &error_abort); - addr = addrs->value; - visit_free(iv); - - qobject_unref(rsp); - return addr; -} - -static char * -migrate_get_connect_uri(QTestState *who) -{ - SocketAddress *addrs; - char *connect_uri; - - addrs = migrate_get_socket_address(who); - connect_uri = SocketAddress_to_str(addrs); - - qapi_free_SocketAddress(addrs); - return connect_uri; -} - -static QDict * -migrate_get_connect_qdict(QTestState *who) -{ - SocketAddress *addrs; - QDict *connect_qdict; - - addrs = migrate_get_socket_address(who); - connect_qdict = SocketAddress_to_qdict(addrs); - - qapi_free_SocketAddress(addrs); - return connect_qdict; -} - -static void migrate_set_ports(QTestState *to, QList *channel_list) -{ - QDict *addr; - QListEntry *entry; - const char *addr_port = NULL; - - addr = migrate_get_connect_qdict(to); - - QLIST_FOREACH_ENTRY(channel_list, entry) { - QDict *channel = qobject_to(QDict, qlist_entry_obj(entry)); - QDict *addrdict = qdict_get_qdict(channel, "addr"); - - if (qdict_haskey(addrdict, "port") && - qdict_haskey(addr, "port") && - (strcmp(qdict_get_str(addrdict, "port"), "0") == 0)) { - addr_port = qdict_get_str(addr, "port"); - qdict_put_str(addrdict, "port", g_strdup(addr_port)); - } - } - - qobject_unref(addr); -} - -bool migrate_watch_for_events(QTestState *who, const char *name, - QDict *event, void *opaque) -{ - QTestMigrationState *state = opaque; - - if (g_str_equal(name, "STOP")) { - state->stop_seen = true; - return true; - } else if (g_str_equal(name, "SUSPEND")) { - state->suspend_seen = true; - return true; - } else if (g_str_equal(name, "RESUME")) { - state->resume_seen = true; - return true; - } - - return false; -} - -void migrate_qmp_fail(QTestState *who, const char *uri, - const char *channels, const char *fmt, ...) -{ - va_list ap; - QDict *args, *err; - - va_start(ap, fmt); - args = qdict_from_vjsonf_nofail(fmt, ap); - va_end(ap); - - g_assert(!qdict_haskey(args, "uri")); - if (uri) { - qdict_put_str(args, "uri", uri); - } - - g_assert(!qdict_haskey(args, "channels")); - if (channels) { - QObject *channels_obj = qobject_from_json(channels, &error_abort); - qdict_put_obj(args, "channels", channels_obj); - } - - err = qtest_qmp_assert_failure_ref( - who, "{ 'execute': 'migrate', 'arguments': %p}", args); - - g_assert(qdict_haskey(err, "desc")); - - qobject_unref(err); -} - -/* - * Send QMP command "migrate". - * Arguments are built from @fmt... (formatted like - * qobject_from_jsonf_nofail()) with "uri": @uri spliced in. - */ -void migrate_qmp(QTestState *who, QTestState *to, const char *uri, - const char *channels, const char *fmt, ...) -{ - va_list ap; - QDict *args; - g_autofree char *connect_uri = NULL; - - va_start(ap, fmt); - args = qdict_from_vjsonf_nofail(fmt, ap); - va_end(ap); - - g_assert(!qdict_haskey(args, "uri")); - if (uri) { - qdict_put_str(args, "uri", uri); - } else if (!channels) { - connect_uri = migrate_get_connect_uri(to); - qdict_put_str(args, "uri", connect_uri); - } - - g_assert(!qdict_haskey(args, "channels")); - if (channels) { - QObject *channels_obj = qobject_from_json(channels, &error_abort); - QList *channel_list = qobject_to(QList, channels_obj); - migrate_set_ports(to, channel_list); - qdict_put_obj(args, "channels", channels_obj); - } - - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate', 'arguments': %p}", args); -} - -void migrate_set_capability(QTestState *who, const char *capability, - bool value) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-set-capabilities'," - "'arguments': { " - "'capabilities': [ { " - "'capability': %s, 'state': %i } ] } }", - capability, value); -} - -void migrate_incoming_qmp(QTestState *to, const char *uri, const char *fmt, ...) -{ - va_list ap; - QDict *args, *rsp; - - va_start(ap, fmt); - args = qdict_from_vjsonf_nofail(fmt, ap); - va_end(ap); - - g_assert(!qdict_haskey(args, "uri")); - qdict_put_str(args, "uri", uri); - - /* This function relies on the event to work, make sure it's enabled */ - migrate_set_capability(to, "events", true); - - rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}", - args); - - if (!qdict_haskey(rsp, "return")) { - g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true); - g_test_message("%s", s->str); - } - - g_assert(qdict_haskey(rsp, "return")); - qobject_unref(rsp); - - migration_event_wait(to, "setup"); -} - -/* - * Note: caller is responsible to free the returned object via - * qobject_unref() after use - */ -QDict *migrate_query(QTestState *who) -{ - return qtest_qmp_assert_success_ref(who, "{ 'execute': 'query-migrate' }"); -} - -QDict *migrate_query_not_failed(QTestState *who) -{ - const char *status; - QDict *rsp = migrate_query(who); - status = qdict_get_str(rsp, "status"); - if (g_str_equal(status, "failed")) { - g_printerr("query-migrate shows failed migration: %s\n", - qdict_get_str(rsp, "error-desc")); - } - g_assert(!g_str_equal(status, "failed")); - return rsp; -} - -/* - * Note: caller is responsible to free the returned object via - * g_free() after use - */ -static gchar *migrate_query_status(QTestState *who) -{ - QDict *rsp_return = migrate_query(who); - gchar *status = g_strdup(qdict_get_str(rsp_return, "status")); - - g_assert(status); - qobject_unref(rsp_return); - - return status; -} - -static bool check_migration_status(QTestState *who, const char *goal, - const char **ungoals) -{ - bool ready; - char *current_status; - const char **ungoal; - - current_status = migrate_query_status(who); - ready = strcmp(current_status, goal) == 0; - if (!ungoals) { - g_assert_cmpstr(current_status, !=, "failed"); - /* - * If looking for a state other than completed, - * completion of migration would cause the test to - * hang. - */ - if (strcmp(goal, "completed") != 0) { - g_assert_cmpstr(current_status, !=, "completed"); - } - } else { - for (ungoal = ungoals; *ungoal; ungoal++) { - g_assert_cmpstr(current_status, !=, *ungoal); - } - } - g_free(current_status); - return ready; -} - -void wait_for_migration_status(QTestState *who, - const char *goal, const char **ungoals) -{ - g_test_timer_start(); - while (!check_migration_status(who, goal, ungoals)) { - usleep(1000); - - g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); - } -} - -void wait_for_migration_complete(QTestState *who) -{ - wait_for_migration_status(who, "completed", NULL); -} - -void wait_for_migration_fail(QTestState *from, bool allow_active) -{ - g_test_timer_start(); - QDict *rsp_return; - char *status; - bool failed; - - do { - status = migrate_query_status(from); - bool result = !strcmp(status, "setup") || !strcmp(status, "failed") || - (allow_active && !strcmp(status, "active")); - if (!result) { - fprintf(stderr, "%s: unexpected status status=%s allow_active=%d\n", - __func__, status, allow_active); - } - g_assert(result); - failed = !strcmp(status, "failed"); - g_free(status); - - g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); - } while (!failed); - - /* Is the machine currently running? */ - rsp_return = qtest_qmp_assert_success_ref(from, - "{ 'execute': 'query-status' }"); - g_assert(qdict_haskey(rsp_return, "running")); - g_assert(qdict_get_bool(rsp_return, "running")); - qobject_unref(rsp_return); -} - -char *find_common_machine_version(const char *mtype, const char *var1, - const char *var2) -{ - g_autofree char *type1 = qtest_resolve_machine_alias(var1, mtype); - g_autofree char *type2 = qtest_resolve_machine_alias(var2, mtype); - - g_assert(type1 && type2); - - if (g_str_equal(type1, type2)) { - /* either can be used */ - return g_strdup(type1); - } - - if (qtest_has_machine_with_env(var2, type1)) { - return g_strdup(type1); - } - - if (qtest_has_machine_with_env(var1, type2)) { - return g_strdup(type2); - } - - g_test_message("No common machine version for machine type '%s' between " - "binaries %s and %s", mtype, getenv(var1), getenv(var2)); - g_assert_not_reached(); -} - -char *resolve_machine_version(const char *alias, const char *var1, - const char *var2) -{ - const char *mname = g_getenv("QTEST_QEMU_MACHINE_TYPE"); - g_autofree char *machine_name = NULL; - - if (mname) { - const char *dash = strrchr(mname, '-'); - const char *dot = strrchr(mname, '.'); - - machine_name = g_strdup(mname); - - if (dash && dot) { - assert(qtest_has_machine(machine_name)); - return g_steal_pointer(&machine_name); - } - /* else: probably an alias, let it be resolved below */ - } else { - /* use the hardcoded alias */ - machine_name = g_strdup(alias); - } - - return find_common_machine_version(machine_name, var1, var2); -} - -typedef struct { - char *name; - void (*func)(void); -} MigrationTest; - -static void migration_test_destroy(gpointer data) -{ - MigrationTest *test = (MigrationTest *)data; - - g_free(test->name); - g_free(test); -} - -static void migration_test_wrapper(const void *data) -{ - MigrationTest *test = (MigrationTest *)data; - - g_test_message("Running /%s%s", qtest_get_arch(), test->name); - test->func(); -} - -void migration_test_add(const char *path, void (*fn)(void)) -{ - MigrationTest *test = g_new0(MigrationTest, 1); - - test->func = fn; - test->name = g_strdup(path); - - qtest_add_data_func_full(path, test, migration_test_wrapper, - migration_test_destroy); -} - -#ifdef O_DIRECT -/* - * Probe for O_DIRECT support on the filesystem. Since this is used - * for tests, be conservative, if anything fails, assume it's - * unsupported. - */ -bool probe_o_direct_support(const char *tmpfs) -{ - g_autofree char *filename = g_strdup_printf("%s/probe-o-direct", tmpfs); - int fd, flags = O_CREAT | O_RDWR | O_TRUNC | O_DIRECT; - void *buf; - ssize_t ret, len; - uint64_t offset; - - fd = open(filename, flags, 0660); - if (fd < 0) { - unlink(filename); - return false; - } - - /* - * Using 1MB alignment as conservative choice to satisfy any - * plausible architecture default page size, and/or filesystem - * alignment restrictions. - */ - len = 0x100000; - offset = 0x100000; - - buf = qemu_try_memalign(len, len); - g_assert(buf); - - ret = pwrite(fd, buf, len, offset); - unlink(filename); - g_free(buf); - - if (ret < 0) { - return false; - } - - return true; -} -#endif - -/* - * Wait for a "MIGRATION" event. This is what Libvirt uses to track - * migration status changes. - */ -void migration_event_wait(QTestState *s, const char *target) -{ - QDict *response, *data; - const char *status; - bool found; - - do { - response = qtest_qmp_eventwait_ref(s, "MIGRATION"); - data = qdict_get_qdict(response, "data"); - g_assert(data); - status = qdict_get_str(data, "status"); - found = (strcmp(status, target) == 0); - qobject_unref(response); - } while (!found); -} diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h deleted file mode 100644 index 72dba369fbe..00000000000 --- a/tests/qtest/migration-helpers.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * QTest migration helpers - * - * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates - * based on the vhost-user-test.c that is: - * Copyright (c) 2014 Virtual Open Systems Sarl. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef MIGRATION_HELPERS_H -#define MIGRATION_HELPERS_H - -#include "libqtest.h" - -typedef struct QTestMigrationState { - bool stop_seen; - bool resume_seen; - bool suspend_seen; - bool suspend_me; -} QTestMigrationState; - -bool migrate_watch_for_events(QTestState *who, const char *name, - QDict *event, void *opaque); - -G_GNUC_PRINTF(5, 6) -void migrate_qmp(QTestState *who, QTestState *to, const char *uri, - const char *channels, const char *fmt, ...); - -G_GNUC_PRINTF(3, 4) -void migrate_incoming_qmp(QTestState *who, const char *uri, - const char *fmt, ...); - -G_GNUC_PRINTF(4, 5) -void migrate_qmp_fail(QTestState *who, const char *uri, - const char *channels, const char *fmt, ...); - -void migrate_set_capability(QTestState *who, const char *capability, - bool value); - -QDict *migrate_query(QTestState *who); -QDict *migrate_query_not_failed(QTestState *who); - -void wait_for_migration_status(QTestState *who, - const char *goal, const char **ungoals); - -void wait_for_migration_complete(QTestState *who); - -void wait_for_migration_fail(QTestState *from, bool allow_active); - -char *find_common_machine_version(const char *mtype, const char *var1, - const char *var2); -char *resolve_machine_version(const char *alias, const char *var1, - const char *var2); -#ifdef O_DIRECT -bool probe_o_direct_support(const char *tmpfs); -#else -static inline bool probe_o_direct_support(const char *tmpfs) -{ - return false; -} -#endif -void migration_test_add(const char *path, void (*fn)(void)); -void migration_event_wait(QTestState *s, const char *target); - -#endif /* MIGRATION_HELPERS_H */ diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 70b606b8886..08936871741 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -11,4032 +11,56 @@ */ #include "qemu/osdep.h" +#include "migration/framework.h" +#include "qemu/module.h" -#include "libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qemu/module.h" -#include "qemu/option.h" -#include "qemu/range.h" -#include "qemu/sockets.h" -#include "chardev/char.h" -#include "crypto/tlscredspsk.h" -#include "qapi/qmp/qlist.h" -#include "ppc-util.h" - -#include "migration-helpers.h" -#include "tests/migration/migration-test.h" -#ifdef CONFIG_GNUTLS -# include "tests/unit/crypto-tls-psk-helpers.h" -# ifdef CONFIG_TASN1 -# include "tests/unit/crypto-tls-x509-helpers.h" -# endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - -/* For dirty ring test; so far only x86_64 is supported */ -#if defined(__linux__) && defined(HOST_X86_64) -#include "linux/kvm.h" -#endif - -unsigned start_address; -unsigned end_address; -static bool uffd_feature_thread_id; -static QTestMigrationState src_state; -static QTestMigrationState dst_state; - -/* - * An initial 3 MB offset is used as that corresponds - * to ~1 sec of data transfer with our bandwidth setting. - */ -#define MAGIC_OFFSET_BASE (3 * 1024 * 1024) -/* - * A further 1k is added to ensure we're not a multiple - * of TEST_MEM_PAGE_SIZE, thus avoid clash with writes - * from the migration guest workload. - */ -#define MAGIC_OFFSET_SHUFFLE 1024 -#define MAGIC_OFFSET (MAGIC_OFFSET_BASE + MAGIC_OFFSET_SHUFFLE) -#define MAGIC_MARKER 0xFEED12345678CAFEULL - -/* - * Dirtylimit stop working if dirty page rate error - * value less than DIRTYLIMIT_TOLERANCE_RANGE - */ -#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ - -#define ANALYZE_SCRIPT "scripts/analyze-migration.py" -#define VMSTATE_CHECKER_SCRIPT "scripts/vmstate-static-checker.py" - -#define QEMU_VM_FILE_MAGIC 0x5145564d -#define FILE_TEST_FILENAME "migfile" -#define FILE_TEST_OFFSET 0x1000 -#define FILE_TEST_MARKER 'X' -#define QEMU_ENV_SRC "QTEST_QEMU_BINARY_SRC" -#define QEMU_ENV_DST "QTEST_QEMU_BINARY_DST" - -typedef enum PostcopyRecoveryFailStage { - /* - * "no failure" must be 0 as it's the default. OTOH, real failure - * cases must be >0 to make sure they trigger by a "if" test. - */ - POSTCOPY_FAIL_NONE = 0, - POSTCOPY_FAIL_CHANNEL_ESTABLISH, - POSTCOPY_FAIL_RECOVERY, - POSTCOPY_FAIL_MAX -} PostcopyRecoveryFailStage; - -#if defined(__linux__) -#include -#include -#endif - -#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) -#include -#include -#include "qemu/userfaultfd.h" - -static bool ufd_version_check(void) -{ - struct uffdio_api api_struct; - uint64_t ioctl_mask; - - int ufd = uffd_open(O_CLOEXEC); - - if (ufd == -1) { - g_test_message("Skipping test: userfaultfd not available"); - return false; - } - - api_struct.api = UFFD_API; - api_struct.features = 0; - if (ioctl(ufd, UFFDIO_API, &api_struct)) { - g_test_message("Skipping test: UFFDIO_API failed"); - return false; - } - uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID; - - ioctl_mask = 1ULL << _UFFDIO_REGISTER | - 1ULL << _UFFDIO_UNREGISTER; - if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { - g_test_message("Skipping test: Missing userfault feature"); - return false; - } - - return true; -} - -#else -static bool ufd_version_check(void) -{ - g_test_message("Skipping test: Userfault not available (builtdtime)"); - return false; -} - -#endif - -static char *tmpfs; -static char *bootpath; - -/* The boot file modifies memory area in [start_address, end_address) - * repeatedly. It outputs a 'B' at a fixed rate while it's still running. - */ -#include "tests/migration/i386/a-b-bootblock.h" -#include "tests/migration/aarch64/a-b-kernel.h" -#include "tests/migration/ppc64/a-b-kernel.h" -#include "tests/migration/s390x/a-b-bios.h" - -static void bootfile_create(char *dir, bool suspend_me) -{ - const char *arch = qtest_get_arch(); - unsigned char *content; - size_t len; - - bootpath = g_strdup_printf("%s/bootsect", dir); - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { - /* the assembled x86 boot sector should be exactly one sector large */ - g_assert(sizeof(x86_bootsect) == 512); - x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me; - content = x86_bootsect; - len = sizeof(x86_bootsect); - } else if (g_str_equal(arch, "s390x")) { - content = s390x_elf; - len = sizeof(s390x_elf); - } else if (strcmp(arch, "ppc64") == 0) { - content = ppc64_kernel; - len = sizeof(ppc64_kernel); - } else if (strcmp(arch, "aarch64") == 0) { - content = aarch64_kernel; - len = sizeof(aarch64_kernel); - g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE); - } else { - g_assert_not_reached(); - } - - FILE *bootfile = fopen(bootpath, "wb"); - - g_assert_cmpint(fwrite(content, len, 1, bootfile), ==, 1); - fclose(bootfile); -} - -static void bootfile_delete(void) -{ - unlink(bootpath); - g_free(bootpath); - bootpath = NULL; -} - -/* - * Wait for some output in the serial output file, - * we get an 'A' followed by an endless string of 'B's - * but on the destination we won't have the A (unless we enabled suspend/resume) - */ -static void wait_for_serial(const char *side) -{ - g_autofree char *serialpath = g_strdup_printf("%s/%s", tmpfs, side); - FILE *serialfile = fopen(serialpath, "r"); - - do { - int readvalue = fgetc(serialfile); - - switch (readvalue) { - case 'A': - /* Fine */ - break; - - case 'B': - /* It's alive! */ - fclose(serialfile); - return; - - case EOF: - fseek(serialfile, 0, SEEK_SET); - usleep(1000); - break; - - default: - fprintf(stderr, "Unexpected %d on %s serial\n", readvalue, side); - g_assert_not_reached(); - } - } while (true); -} - -static void wait_for_stop(QTestState *who, QTestMigrationState *state) -{ - if (!state->stop_seen) { - qtest_qmp_eventwait(who, "STOP"); - } -} - -static void wait_for_resume(QTestState *who, QTestMigrationState *state) -{ - if (!state->resume_seen) { - qtest_qmp_eventwait(who, "RESUME"); - } -} - -static void wait_for_suspend(QTestState *who, QTestMigrationState *state) -{ - if (state->suspend_me && !state->suspend_seen) { - qtest_qmp_eventwait(who, "SUSPEND"); - } -} - -/* - * It's tricky to use qemu's migration event capability with qtest, - * events suddenly appearing confuse the qmp()/hmp() responses. - */ - -static int64_t read_ram_property_int(QTestState *who, const char *property) -{ - QDict *rsp_return, *rsp_ram; - int64_t result; - - rsp_return = migrate_query_not_failed(who); - if (!qdict_haskey(rsp_return, "ram")) { - /* Still in setup */ - result = 0; - } else { - rsp_ram = qdict_get_qdict(rsp_return, "ram"); - result = qdict_get_try_int(rsp_ram, property, 0); - } - qobject_unref(rsp_return); - return result; -} - -static int64_t read_migrate_property_int(QTestState *who, const char *property) -{ - QDict *rsp_return; - int64_t result; - - rsp_return = migrate_query_not_failed(who); - result = qdict_get_try_int(rsp_return, property, 0); - qobject_unref(rsp_return); - return result; -} - -static uint64_t get_migration_pass(QTestState *who) -{ - return read_ram_property_int(who, "dirty-sync-count"); -} - -static void read_blocktime(QTestState *who) -{ - QDict *rsp_return; - - rsp_return = migrate_query_not_failed(who); - g_assert(qdict_haskey(rsp_return, "postcopy-blocktime")); - qobject_unref(rsp_return); -} - -/* - * Wait for two changes in the migration pass count, but bail if we stop. - */ -static void wait_for_migration_pass(QTestState *who) -{ - uint64_t pass, prev_pass = 0, changes = 0; - - while (changes < 2 && !src_state.stop_seen && !src_state.suspend_seen) { - usleep(1000); - pass = get_migration_pass(who); - changes += (pass != prev_pass); - prev_pass = pass; - } -} - -static void check_guests_ram(QTestState *who) -{ - /* Our ASM test will have been incrementing one byte from each page from - * start_address to < end_address in order. This gives us a constraint - * that any page's byte should be equal or less than the previous pages - * byte (mod 256); and they should all be equal except for one transition - * at the point where we meet the incrementer. (We're running this with - * the guest stopped). - */ - unsigned address; - uint8_t first_byte; - uint8_t last_byte; - bool hit_edge = false; - int bad = 0; - - qtest_memread(who, start_address, &first_byte, 1); - last_byte = first_byte; - - for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address; - address += TEST_MEM_PAGE_SIZE) - { - uint8_t b; - qtest_memread(who, address, &b, 1); - if (b != last_byte) { - if (((b + 1) % 256) == last_byte && !hit_edge) { - /* This is OK, the guest stopped at the point of - * incrementing the previous page but didn't get - * to us yet. - */ - hit_edge = true; - last_byte = b; - } else { - bad++; - if (bad <= 10) { - fprintf(stderr, "Memory content inconsistency at %x" - " first_byte = %x last_byte = %x current = %x" - " hit_edge = %x\n", - address, first_byte, last_byte, b, hit_edge); - } - } - } - } - if (bad >= 10) { - fprintf(stderr, "and in another %d pages", bad - 10); - } - g_assert(bad == 0); -} - -static void cleanup(const char *filename) -{ - g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, filename); - - unlink(path); -} - -static long long migrate_get_parameter_int(QTestState *who, - const char *parameter) -{ - QDict *rsp; - long long result; - - rsp = qtest_qmp_assert_success_ref( - who, "{ 'execute': 'query-migrate-parameters' }"); - result = qdict_get_int(rsp, parameter); - qobject_unref(rsp); - return result; -} - -static void migrate_check_parameter_int(QTestState *who, const char *parameter, - long long value) -{ - long long result; - - result = migrate_get_parameter_int(who, parameter); - g_assert_cmpint(result, ==, value); -} - -static void migrate_set_parameter_int(QTestState *who, const char *parameter, - long long value) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-set-parameters'," - "'arguments': { %s: %lld } }", - parameter, value); - migrate_check_parameter_int(who, parameter, value); -} - -static char *migrate_get_parameter_str(QTestState *who, - const char *parameter) -{ - QDict *rsp; - char *result; - - rsp = qtest_qmp_assert_success_ref( - who, "{ 'execute': 'query-migrate-parameters' }"); - result = g_strdup(qdict_get_str(rsp, parameter)); - qobject_unref(rsp); - return result; -} - -static void migrate_check_parameter_str(QTestState *who, const char *parameter, - const char *value) -{ - g_autofree char *result = migrate_get_parameter_str(who, parameter); - g_assert_cmpstr(result, ==, value); -} - -static void migrate_set_parameter_str(QTestState *who, const char *parameter, - const char *value) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-set-parameters'," - "'arguments': { %s: %s } }", - parameter, value); - migrate_check_parameter_str(who, parameter, value); -} - -static long long migrate_get_parameter_bool(QTestState *who, - const char *parameter) -{ - QDict *rsp; - int result; - - rsp = qtest_qmp_assert_success_ref( - who, "{ 'execute': 'query-migrate-parameters' }"); - result = qdict_get_bool(rsp, parameter); - qobject_unref(rsp); - return !!result; -} - -static void migrate_check_parameter_bool(QTestState *who, const char *parameter, - int value) -{ - int result; - - result = migrate_get_parameter_bool(who, parameter); - g_assert_cmpint(result, ==, value); -} - -static void migrate_set_parameter_bool(QTestState *who, const char *parameter, - int value) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-set-parameters'," - "'arguments': { %s: %i } }", - parameter, value); - migrate_check_parameter_bool(who, parameter, value); -} - -static void migrate_ensure_non_converge(QTestState *who) -{ - /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */ - migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000); - migrate_set_parameter_int(who, "downtime-limit", 1); -} - -static void migrate_ensure_converge(QTestState *who) -{ - /* Should converge with 30s downtime + 1 gbs bandwidth limit */ - migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000); - migrate_set_parameter_int(who, "downtime-limit", 30 * 1000); -} - -/* - * Our goal is to ensure that we run a single full migration - * iteration, and also dirty memory, ensuring that at least - * one further iteration is required. - * - * We can't directly synchronize with the start of a migration - * so we have to apply some tricks monitoring memory that is - * transferred. - * - * Initially we set the migration bandwidth to an insanely - * low value, with tiny max downtime too. This basically - * guarantees migration will never complete. - * - * This will result in a test that is unacceptably slow though, - * so we can't let the entire migration pass run at this speed. - * Our intent is to let it run just long enough that we can - * prove data prior to the marker has been transferred *AND* - * also prove this transferred data is dirty again. - * - * Before migration starts, we write a 64-bit magic marker - * into a fixed location in the src VM RAM. - * - * Then watch dst memory until the marker appears. This is - * proof that start_address -> MAGIC_OFFSET_BASE has been - * transferred. - * - * Finally we go back to the source and read a byte just - * before the marker until we see it flip in value. This - * is proof that start_address -> MAGIC_OFFSET_BASE - * is now dirty again. - * - * IOW, we're guaranteed at least a 2nd migration pass - * at this point. - * - * We can now let migration run at full speed to finish - * the test - */ -static void migrate_prepare_for_dirty_mem(QTestState *from) -{ - /* - * The guest workflow iterates from start_address to - * end_address, writing 1 byte every TEST_MEM_PAGE_SIZE - * bytes. - * - * IOW, if we write to mem at a point which is NOT - * a multiple of TEST_MEM_PAGE_SIZE, our write won't - * conflict with the migration workflow. - * - * We put in a marker here, that we'll use to determine - * when the data has been transferred to the dst. - */ - qtest_writeq(from, start_address + MAGIC_OFFSET, MAGIC_MARKER); -} - -static void migrate_wait_for_dirty_mem(QTestState *from, - QTestState *to) -{ - uint64_t watch_address = start_address + MAGIC_OFFSET_BASE; - uint64_t marker_address = start_address + MAGIC_OFFSET; - uint8_t watch_byte; - - /* - * Wait for the MAGIC_MARKER to get transferred, as an - * indicator that a migration pass has made some known - * amount of progress. - */ - do { - usleep(1000 * 10); - } while (qtest_readq(to, marker_address) != MAGIC_MARKER); - - - /* If suspended, src only iterates once, and watch_byte may never change */ - if (src_state.suspend_me) { - return; - } - - /* - * Now ensure that already transferred bytes are - * dirty again from the guest workload. Note the - * guest byte value will wrap around and by chance - * match the original watch_byte. This is harmless - * as we'll eventually see a different value if we - * keep watching - */ - watch_byte = qtest_readb(from, watch_address); - do { - usleep(1000 * 10); - } while (qtest_readb(from, watch_address) == watch_byte); -} - - -static void migrate_pause(QTestState *who) -{ - qtest_qmp_assert_success(who, "{ 'execute': 'migrate-pause' }"); -} - -static void migrate_continue(QTestState *who, const char *state) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-continue'," - " 'arguments': { 'state': %s } }", - state); -} - -static void migrate_recover(QTestState *who, const char *uri) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'migrate-recover', " - " 'id': 'recover-cmd', " - " 'arguments': { 'uri': %s } }", - uri); -} - -static void migrate_cancel(QTestState *who) -{ - qtest_qmp_assert_success(who, "{ 'execute': 'migrate_cancel' }"); -} - -static void migrate_postcopy_start(QTestState *from, QTestState *to) -{ - qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }"); - - wait_for_stop(from, &src_state); - qtest_qmp_eventwait(to, "RESUME"); -} - -typedef struct { - /* - * QTEST_LOG=1 may override this. When QTEST_LOG=1, we always dump errors - * unconditionally, because it means the user would like to be verbose. - */ - bool hide_stderr; - bool use_shmem; - /* only launch the target process */ - bool only_target; - /* Use dirty ring if true; dirty logging otherwise */ - bool use_dirty_ring; - const char *opts_source; - const char *opts_target; - /* suspend the src before migrating to dest. */ - bool suspend_me; -} MigrateStart; - -/* - * A hook that runs after the src and dst QEMUs have been - * created, but before the migration is started. This can - * be used to set migration parameters and capabilities. - * - * Returns: NULL, or a pointer to opaque state to be - * later passed to the TestMigrateFinishHook - */ -typedef void * (*TestMigrateStartHook)(QTestState *from, - QTestState *to); - -/* - * A hook that runs after the migration has finished, - * regardless of whether it succeeded or failed, but - * before QEMU has terminated (unless it self-terminated - * due to migration error) - * - * @opaque is a pointer to state previously returned - * by the TestMigrateStartHook if any, or NULL. - */ -typedef void (*TestMigrateFinishHook)(QTestState *from, - QTestState *to, - void *opaque); - -typedef struct { - /* Optional: fine tune start parameters */ - MigrateStart start; - - /* Required: the URI for the dst QEMU to listen on */ - const char *listen_uri; - - /* - * Optional: the URI for the src QEMU to connect to - * If NULL, then it will query the dst QEMU for its actual - * listening address and use that as the connect address. - * This allows for dynamically picking a free TCP port. - */ - const char *connect_uri; - - /* - * Optional: JSON-formatted list of src QEMU URIs. If a port is - * defined as '0' in any QDict key a value of '0' will be - * automatically converted to the correct destination port. - */ - const char *connect_channels; - - /* Optional: callback to run at start to set migration parameters */ - TestMigrateStartHook start_hook; - /* Optional: callback to run at finish to cleanup */ - TestMigrateFinishHook finish_hook; - - /* - * Optional: normally we expect the migration process to complete. - * - * There can be a variety of reasons and stages in which failure - * can happen during tests. - * - * If a failure is expected to happen at time of establishing - * the connection, then MIG_TEST_FAIL will indicate that the dst - * QEMU is expected to stay running and accept future migration - * connections. - * - * If a failure is expected to happen while processing the - * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate - * that the dst QEMU is expected to quit with non-zero exit status - */ - enum { - /* This test should succeed, the default */ - MIG_TEST_SUCCEED = 0, - /* This test should fail, dest qemu should keep alive */ - MIG_TEST_FAIL, - /* This test should fail, dest qemu should fail with abnormal status */ - MIG_TEST_FAIL_DEST_QUIT_ERR, - /* The QMP command for this migration should fail with an error */ - MIG_TEST_QMP_ERROR, - } result; - - /* - * Optional: set number of migration passes to wait for, if live==true. - * If zero, then merely wait for a few MB of dirty data - */ - unsigned int iterations; - - /* - * Optional: whether the guest CPUs should be running during a precopy - * migration test. We used to always run with live but it took much - * longer so we reduced live tests to only the ones that have solid - * reason to be tested live-only. For each of the new test cases for - * precopy please provide justifications to use live explicitly (please - * refer to existing ones with live=true), or use live=off by default. - */ - bool live; - - /* Postcopy specific fields */ - void *postcopy_data; - bool postcopy_preempt; - PostcopyRecoveryFailStage postcopy_recovery_fail_stage; -} MigrateCommon; - -static int test_migrate_start(QTestState **from, QTestState **to, - const char *uri, MigrateStart *args) -{ - g_autofree gchar *arch_source = NULL; - g_autofree gchar *arch_target = NULL; - /* options for source and target */ - g_autofree gchar *arch_opts = NULL; - g_autofree gchar *cmd_source = NULL; - g_autofree gchar *cmd_target = NULL; - const gchar *ignore_stderr; - g_autofree char *shmem_opts = NULL; - g_autofree char *shmem_path = NULL; - const char *kvm_opts = NULL; - const char *arch = qtest_get_arch(); - const char *memory_size; - const char *machine_alias, *machine_opts = ""; - g_autofree char *machine = NULL; - - if (args->use_shmem) { - if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { - g_test_skip("/dev/shm is not supported"); - return -1; - } - } - - dst_state = (QTestMigrationState) { }; - src_state = (QTestMigrationState) { }; - bootfile_create(tmpfs, args->suspend_me); - src_state.suspend_me = args->suspend_me; - - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { - memory_size = "150M"; - - if (g_str_equal(arch, "i386")) { - machine_alias = "pc"; - } else { - machine_alias = "q35"; - } - arch_opts = g_strdup_printf( - "-drive if=none,id=d0,file=%s,format=raw " - "-device ide-hd,drive=d0,secs=1,cyls=1,heads=1", bootpath); - start_address = X86_TEST_MEM_START; - end_address = X86_TEST_MEM_END; - } else if (g_str_equal(arch, "s390x")) { - memory_size = "128M"; - machine_alias = "s390-ccw-virtio"; - arch_opts = g_strdup_printf("-bios %s", bootpath); - start_address = S390_TEST_MEM_START; - end_address = S390_TEST_MEM_END; - } else if (strcmp(arch, "ppc64") == 0) { - memory_size = "256M"; - start_address = PPC_TEST_MEM_START; - end_address = PPC_TEST_MEM_END; - machine_alias = "pseries"; - machine_opts = "vsmt=8"; - arch_opts = g_strdup_printf( - "-nodefaults -machine " PSERIES_DEFAULT_CAPABILITIES " " - "-bios %s", bootpath); - } else if (strcmp(arch, "aarch64") == 0) { - memory_size = "150M"; - machine_alias = "virt"; - machine_opts = "gic-version=3"; - arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath); - start_address = ARM_TEST_MEM_START; - end_address = ARM_TEST_MEM_END; - } else { - g_assert_not_reached(); - } - - if (!getenv("QTEST_LOG") && args->hide_stderr) { -#ifndef _WIN32 - ignore_stderr = "2>/dev/null"; -#else - /* - * On Windows the QEMU executable is created via CreateProcess() and - * IO redirection does not work, so don't bother adding IO redirection - * to the command line. - */ - ignore_stderr = ""; -#endif - } else { - ignore_stderr = ""; - } - - if (args->use_shmem) { - shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid()); - shmem_opts = g_strdup_printf( - "-object memory-backend-file,id=mem0,size=%s" - ",mem-path=%s,share=on -numa node,memdev=mem0", - memory_size, shmem_path); - } - - if (args->use_dirty_ring) { - kvm_opts = ",dirty-ring-size=4096"; - } - - if (!qtest_has_machine(machine_alias)) { - g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias); - g_test_skip(msg); - return -1; - } - - machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC, - QEMU_ENV_DST); - - g_test_message("Using machine type: %s", machine); - - cmd_source = g_strdup_printf("-accel kvm%s -accel tcg " - "-machine %s,%s " - "-name source,debug-threads=on " - "-m %s " - "-serial file:%s/src_serial " - "%s %s %s %s %s", - kvm_opts ? kvm_opts : "", - machine, machine_opts, - memory_size, tmpfs, - arch_opts ? arch_opts : "", - arch_source ? arch_source : "", - shmem_opts ? shmem_opts : "", - args->opts_source ? args->opts_source : "", - ignore_stderr); - if (!args->only_target) { - *from = qtest_init_with_env(QEMU_ENV_SRC, cmd_source); - qtest_qmp_set_event_callback(*from, - migrate_watch_for_events, - &src_state); - } - - cmd_target = g_strdup_printf("-accel kvm%s -accel tcg " - "-machine %s,%s " - "-name target,debug-threads=on " - "-m %s " - "-serial file:%s/dest_serial " - "-incoming %s " - "%s %s %s %s %s", - kvm_opts ? kvm_opts : "", - machine, machine_opts, - memory_size, tmpfs, uri, - arch_opts ? arch_opts : "", - arch_target ? arch_target : "", - shmem_opts ? shmem_opts : "", - args->opts_target ? args->opts_target : "", - ignore_stderr); - *to = qtest_init_with_env(QEMU_ENV_DST, cmd_target); - qtest_qmp_set_event_callback(*to, - migrate_watch_for_events, - &dst_state); - - /* - * Remove shmem file immediately to avoid memory leak in test failed case. - * It's valid because QEMU has already opened this file - */ - if (args->use_shmem) { - unlink(shmem_path); - } - - /* - * Always enable migration events. Libvirt always uses it, let's try - * to mimic as closer as that. - */ - migrate_set_capability(*from, "events", true); - migrate_set_capability(*to, "events", true); - - return 0; -} - -static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest) -{ - unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d; - - qtest_quit(from); - - if (test_dest) { - qtest_memread(to, start_address, &dest_byte_a, 1); - - /* Destination still running, wait for a byte to change */ - do { - qtest_memread(to, start_address, &dest_byte_b, 1); - usleep(1000 * 10); - } while (dest_byte_a == dest_byte_b); - - qtest_qmp_assert_success(to, "{ 'execute' : 'stop'}"); - - /* With it stopped, check nothing changes */ - qtest_memread(to, start_address, &dest_byte_c, 1); - usleep(1000 * 200); - qtest_memread(to, start_address, &dest_byte_d, 1); - g_assert_cmpint(dest_byte_c, ==, dest_byte_d); - - check_guests_ram(to); - } - - qtest_quit(to); - - cleanup("migsocket"); - cleanup("src_serial"); - cleanup("dest_serial"); - cleanup(FILE_TEST_FILENAME); -} - -#ifdef CONFIG_GNUTLS -struct TestMigrateTLSPSKData { - char *workdir; - char *workdiralt; - char *pskfile; - char *pskfilealt; -}; - -static void * -test_migrate_tls_psk_start_common(QTestState *from, - QTestState *to, - bool mismatch) -{ - struct TestMigrateTLSPSKData *data = - g_new0(struct TestMigrateTLSPSKData, 1); - - data->workdir = g_strdup_printf("%s/tlscredspsk0", tmpfs); - data->pskfile = g_strdup_printf("%s/%s", data->workdir, - QCRYPTO_TLS_CREDS_PSKFILE); - g_mkdir_with_parents(data->workdir, 0700); - test_tls_psk_init(data->pskfile); - - if (mismatch) { - data->workdiralt = g_strdup_printf("%s/tlscredspskalt0", tmpfs); - data->pskfilealt = g_strdup_printf("%s/%s", data->workdiralt, - QCRYPTO_TLS_CREDS_PSKFILE); - g_mkdir_with_parents(data->workdiralt, 0700); - test_tls_psk_init_alt(data->pskfilealt); - } - - qtest_qmp_assert_success(from, - "{ 'execute': 'object-add'," - " 'arguments': { 'qom-type': 'tls-creds-psk'," - " 'id': 'tlscredspsk0'," - " 'endpoint': 'client'," - " 'dir': %s," - " 'username': 'qemu'} }", - data->workdir); - - qtest_qmp_assert_success(to, - "{ 'execute': 'object-add'," - " 'arguments': { 'qom-type': 'tls-creds-psk'," - " 'id': 'tlscredspsk0'," - " 'endpoint': 'server'," - " 'dir': %s } }", - mismatch ? data->workdiralt : data->workdir); - - migrate_set_parameter_str(from, "tls-creds", "tlscredspsk0"); - migrate_set_parameter_str(to, "tls-creds", "tlscredspsk0"); - - return data; -} - -static void * -test_migrate_tls_psk_start_match(QTestState *from, - QTestState *to) -{ - return test_migrate_tls_psk_start_common(from, to, false); -} - -static void * -test_migrate_tls_psk_start_mismatch(QTestState *from, - QTestState *to) -{ - return test_migrate_tls_psk_start_common(from, to, true); -} - -static void -test_migrate_tls_psk_finish(QTestState *from, - QTestState *to, - void *opaque) -{ - struct TestMigrateTLSPSKData *data = opaque; - - test_tls_psk_cleanup(data->pskfile); - if (data->pskfilealt) { - test_tls_psk_cleanup(data->pskfilealt); - } - rmdir(data->workdir); - if (data->workdiralt) { - rmdir(data->workdiralt); - } - - g_free(data->workdiralt); - g_free(data->pskfilealt); - g_free(data->workdir); - g_free(data->pskfile); - g_free(data); -} - -#ifdef CONFIG_TASN1 -typedef struct { - char *workdir; - char *keyfile; - char *cacert; - char *servercert; - char *serverkey; - char *clientcert; - char *clientkey; -} TestMigrateTLSX509Data; - -typedef struct { - bool verifyclient; - bool clientcert; - bool hostileclient; - bool authzclient; - const char *certhostname; - const char *certipaddr; -} TestMigrateTLSX509; - -static void * -test_migrate_tls_x509_start_common(QTestState *from, - QTestState *to, - TestMigrateTLSX509 *args) -{ - TestMigrateTLSX509Data *data = g_new0(TestMigrateTLSX509Data, 1); - - data->workdir = g_strdup_printf("%s/tlscredsx5090", tmpfs); - data->keyfile = g_strdup_printf("%s/key.pem", data->workdir); - - data->cacert = g_strdup_printf("%s/ca-cert.pem", data->workdir); - data->serverkey = g_strdup_printf("%s/server-key.pem", data->workdir); - data->servercert = g_strdup_printf("%s/server-cert.pem", data->workdir); - if (args->clientcert) { - data->clientkey = g_strdup_printf("%s/client-key.pem", data->workdir); - data->clientcert = g_strdup_printf("%s/client-cert.pem", data->workdir); - } - - g_mkdir_with_parents(data->workdir, 0700); - - test_tls_init(data->keyfile); -#ifndef _WIN32 - g_assert(link(data->keyfile, data->serverkey) == 0); -#else - g_assert(CreateHardLink(data->serverkey, data->keyfile, NULL) != 0); -#endif - if (args->clientcert) { -#ifndef _WIN32 - g_assert(link(data->keyfile, data->clientkey) == 0); -#else - g_assert(CreateHardLink(data->clientkey, data->keyfile, NULL) != 0); -#endif - } - - TLS_ROOT_REQ_SIMPLE(cacertreq, data->cacert); - if (args->clientcert) { - TLS_CERT_REQ_SIMPLE_CLIENT(servercertreq, cacertreq, - args->hostileclient ? - QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME : - QCRYPTO_TLS_TEST_CLIENT_NAME, - data->clientcert); - } - - TLS_CERT_REQ_SIMPLE_SERVER(clientcertreq, cacertreq, - data->servercert, - args->certhostname, - args->certipaddr); - - qtest_qmp_assert_success(from, - "{ 'execute': 'object-add'," - " 'arguments': { 'qom-type': 'tls-creds-x509'," - " 'id': 'tlscredsx509client0'," - " 'endpoint': 'client'," - " 'dir': %s," - " 'sanity-check': true," - " 'verify-peer': true} }", - data->workdir); - migrate_set_parameter_str(from, "tls-creds", "tlscredsx509client0"); - if (args->certhostname) { - migrate_set_parameter_str(from, "tls-hostname", args->certhostname); - } - - qtest_qmp_assert_success(to, - "{ 'execute': 'object-add'," - " 'arguments': { 'qom-type': 'tls-creds-x509'," - " 'id': 'tlscredsx509server0'," - " 'endpoint': 'server'," - " 'dir': %s," - " 'sanity-check': true," - " 'verify-peer': %i} }", - data->workdir, args->verifyclient); - migrate_set_parameter_str(to, "tls-creds", "tlscredsx509server0"); - - if (args->authzclient) { - qtest_qmp_assert_success(to, - "{ 'execute': 'object-add'," - " 'arguments': { 'qom-type': 'authz-simple'," - " 'id': 'tlsauthz0'," - " 'identity': %s} }", - "CN=" QCRYPTO_TLS_TEST_CLIENT_NAME); - migrate_set_parameter_str(to, "tls-authz", "tlsauthz0"); - } - - return data; -} - -/* - * The normal case: match server's cert hostname against - * whatever host we were telling QEMU to connect to (if any) - */ -static void * -test_migrate_tls_x509_start_default_host(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .clientcert = true, - .certipaddr = "127.0.0.1" - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -/* - * The unusual case: the server's cert is different from - * the address we're telling QEMU to connect to (if any), - * so we must give QEMU an explicit hostname to validate - */ -static void * -test_migrate_tls_x509_start_override_host(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .clientcert = true, - .certhostname = "qemu.org", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -/* - * The unusual case: the server's cert is different from - * the address we're telling QEMU to connect to, and so we - * expect the client to reject the server - */ -static void * -test_migrate_tls_x509_start_mismatch_host(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .clientcert = true, - .certipaddr = "10.0.0.1", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -static void * -test_migrate_tls_x509_start_friendly_client(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .clientcert = true, - .authzclient = true, - .certipaddr = "127.0.0.1", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -static void * -test_migrate_tls_x509_start_hostile_client(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .clientcert = true, - .hostileclient = true, - .authzclient = true, - .certipaddr = "127.0.0.1", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -/* - * The case with no client certificate presented, - * and no server verification - */ -static void * -test_migrate_tls_x509_start_allow_anon_client(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .certipaddr = "127.0.0.1", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -/* - * The case with no client certificate presented, - * and server verification rejecting - */ -static void * -test_migrate_tls_x509_start_reject_anon_client(QTestState *from, - QTestState *to) -{ - TestMigrateTLSX509 args = { - .verifyclient = true, - .certipaddr = "127.0.0.1", - }; - return test_migrate_tls_x509_start_common(from, to, &args); -} - -static void -test_migrate_tls_x509_finish(QTestState *from, - QTestState *to, - void *opaque) -{ - TestMigrateTLSX509Data *data = opaque; - - test_tls_cleanup(data->keyfile); - g_free(data->keyfile); - - unlink(data->cacert); - g_free(data->cacert); - unlink(data->servercert); - g_free(data->servercert); - unlink(data->serverkey); - g_free(data->serverkey); - - if (data->clientcert) { - unlink(data->clientcert); - g_free(data->clientcert); - } - if (data->clientkey) { - unlink(data->clientkey); - g_free(data->clientkey); - } - - rmdir(data->workdir); - g_free(data->workdir); - - g_free(data); -} -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - -static int migrate_postcopy_prepare(QTestState **from_ptr, - QTestState **to_ptr, - MigrateCommon *args) -{ - QTestState *from, *to; - - if (test_migrate_start(&from, &to, "defer", &args->start)) { - return -1; - } - - if (args->start_hook) { - args->postcopy_data = args->start_hook(from, to); - } - - migrate_set_capability(from, "postcopy-ram", true); - migrate_set_capability(to, "postcopy-ram", true); - migrate_set_capability(to, "postcopy-blocktime", true); - - if (args->postcopy_preempt) { - migrate_set_capability(from, "postcopy-preempt", true); - migrate_set_capability(to, "postcopy-preempt", true); - } - - migrate_ensure_non_converge(from); - - migrate_prepare_for_dirty_mem(from); - qtest_qmp_assert_success(to, "{ 'execute': 'migrate-incoming'," - " 'arguments': { " - " 'channels': [ { 'channel-type': 'main'," - " 'addr': { 'transport': 'socket'," - " 'type': 'inet'," - " 'host': '127.0.0.1'," - " 'port': '0' } } ] } }"); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - wait_for_suspend(from, &src_state); - - migrate_qmp(from, to, NULL, NULL, "{}"); - - migrate_wait_for_dirty_mem(from, to); - - *from_ptr = from; - *to_ptr = to; - - return 0; -} - -static void migrate_postcopy_complete(QTestState *from, QTestState *to, - MigrateCommon *args) -{ - wait_for_migration_complete(from); - - if (args->start.suspend_me) { - /* wakeup succeeds only if guest is suspended */ - qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); - } - - /* Make sure we get at least one "B" on destination */ - wait_for_serial("dest_serial"); - - if (uffd_feature_thread_id) { - read_blocktime(to); - } - - if (args->finish_hook) { - args->finish_hook(from, to, args->postcopy_data); - args->postcopy_data = NULL; - } - - test_migrate_end(from, to, true); -} - -static void test_postcopy_common(MigrateCommon *args) -{ - QTestState *from, *to; - - if (migrate_postcopy_prepare(&from, &to, args)) { - return; - } - migrate_postcopy_start(from, to); - migrate_postcopy_complete(from, to, args); -} - -static void test_postcopy(void) -{ - MigrateCommon args = { }; - - test_postcopy_common(&args); -} - -static void test_postcopy_suspend(void) -{ - MigrateCommon args = { - .start.suspend_me = true, - }; - - test_postcopy_common(&args); -} - -static void test_postcopy_preempt(void) -{ - MigrateCommon args = { - .postcopy_preempt = true, - }; - - test_postcopy_common(&args); -} - -#ifdef CONFIG_GNUTLS -static void test_postcopy_tls_psk(void) -{ - MigrateCommon args = { - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_postcopy_common(&args); -} - -static void test_postcopy_preempt_tls_psk(void) -{ - MigrateCommon args = { - .postcopy_preempt = true, - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_postcopy_common(&args); -} -#endif - -static void wait_for_postcopy_status(QTestState *one, const char *status) -{ - wait_for_migration_status(one, status, - (const char * []) { "failed", "active", - "completed", NULL }); -} - -static void postcopy_recover_fail(QTestState *from, QTestState *to, - PostcopyRecoveryFailStage stage) -{ -#ifndef _WIN32 - bool fail_early = (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH); - int ret, pair1[2], pair2[2]; - char c; - - g_assert(stage > POSTCOPY_FAIL_NONE && stage < POSTCOPY_FAIL_MAX); - - /* Create two unrelated socketpairs */ - ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair1); - g_assert_cmpint(ret, ==, 0); - - ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair2); - g_assert_cmpint(ret, ==, 0); - - /* - * Give the guests unpaired ends of the sockets, so they'll all blocked - * at reading. This mimics a wrong channel established. - */ - qtest_qmp_fds_assert_success(from, &pair1[0], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - qtest_qmp_fds_assert_success(to, &pair2[0], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - - /* - * Write the 1st byte as QEMU_VM_COMMAND (0x8) for the dest socket, to - * emulate the 1st byte of a real recovery, but stops from there to - * keep dest QEMU in RECOVER. This is needed so that we can kick off - * the recover process on dest QEMU (by triggering the G_IO_IN event). - * - * NOTE: this trick is not needed on src QEMUs, because src doesn't - * rely on an pre-existing G_IO_IN event, so it will always trigger the - * upcoming recovery anyway even if it can read nothing. - */ -#define QEMU_VM_COMMAND 0x08 - c = QEMU_VM_COMMAND; - ret = send(pair2[1], &c, 1, 0); - g_assert_cmpint(ret, ==, 1); - - if (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH) { - /* - * This will make src QEMU to fail at an early stage when trying to - * resume later, where it shouldn't reach RECOVER stage at all. - */ - close(pair1[1]); - } - - migrate_recover(to, "fd:fd-mig"); - migrate_qmp(from, to, "fd:fd-mig", NULL, "{'resume': true}"); - - /* - * Source QEMU has an extra RECOVER_SETUP phase, dest doesn't have it. - * Make sure it appears along the way. - */ - migration_event_wait(from, "postcopy-recover-setup"); - - if (fail_early) { - /* - * When fails at reconnection, src QEMU will automatically goes - * back to PAUSED state. Making sure there is an event in this - * case: Libvirt relies on this to detect early reconnection - * errors. - */ - migration_event_wait(from, "postcopy-paused"); - } else { - /* - * We want to test "fail later" at RECOVER stage here. Make sure - * both QEMU instances will go into RECOVER stage first, then test - * kicking them out using migrate-pause. - * - * Explicitly check the RECOVER event on src, that's what Libvirt - * relies on, rather than polling. - */ - migration_event_wait(from, "postcopy-recover"); - wait_for_postcopy_status(from, "postcopy-recover"); - - /* Need an explicit kick on src QEMU in this case */ - migrate_pause(from); - } - - /* - * For all failure cases, we'll reach such states on both sides now. - * Check them. - */ - wait_for_postcopy_status(from, "postcopy-paused"); - wait_for_postcopy_status(to, "postcopy-recover"); - - /* - * Kick dest QEMU out too. This is normally not needed in reality - * because when the channel is shutdown it should also happen on src. - * However here we used separate socket pairs so we need to do that - * explicitly. - */ - migrate_pause(to); - wait_for_postcopy_status(to, "postcopy-paused"); - - close(pair1[0]); - close(pair2[0]); - close(pair2[1]); - - if (stage != POSTCOPY_FAIL_CHANNEL_ESTABLISH) { - close(pair1[1]); - } -#endif -} - -static void test_postcopy_recovery_common(MigrateCommon *args) -{ - QTestState *from, *to; - g_autofree char *uri = NULL; - - /* Always hide errors for postcopy recover tests since they're expected */ - args->start.hide_stderr = true; - - if (migrate_postcopy_prepare(&from, &to, args)) { - return; - } - - /* Turn postcopy speed down, 4K/s is slow enough on any machines */ - migrate_set_parameter_int(from, "max-postcopy-bandwidth", 4096); - - /* Now we start the postcopy */ - migrate_postcopy_start(from, to); - - /* - * Wait until postcopy is really started; we can only run the - * migrate-pause command during a postcopy - */ - wait_for_migration_status(from, "postcopy-active", NULL); - - /* - * Manually stop the postcopy migration. This emulates a network - * failure with the migration socket - */ - migrate_pause(from); - - /* - * Wait for destination side to reach postcopy-paused state. The - * migrate-recover command can only succeed if destination machine - * is in the paused state - */ - wait_for_postcopy_status(to, "postcopy-paused"); - wait_for_postcopy_status(from, "postcopy-paused"); - - if (args->postcopy_recovery_fail_stage) { - /* - * Test when a wrong socket specified for recover, and then the - * ability to kick it out, and continue with a correct socket. - */ - postcopy_recover_fail(from, to, args->postcopy_recovery_fail_stage); - /* continue with a good recovery */ - } - - /* - * Create a new socket to emulate a new channel that is different - * from the broken migration channel; tell the destination to - * listen to the new port - */ - uri = g_strdup_printf("unix:%s/migsocket-recover", tmpfs); - migrate_recover(to, uri); - - /* - * Try to rebuild the migration channel using the resume flag and - * the newly created channel - */ - migrate_qmp(from, to, uri, NULL, "{'resume': true}"); - - /* Restore the postcopy bandwidth to unlimited */ - migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0); - - migrate_postcopy_complete(from, to, args); -} - -static void test_postcopy_recovery(void) -{ - MigrateCommon args = { }; - - test_postcopy_recovery_common(&args); -} - -static void test_postcopy_recovery_fail_handshake(void) -{ - MigrateCommon args = { - .postcopy_recovery_fail_stage = POSTCOPY_FAIL_RECOVERY, - }; - - test_postcopy_recovery_common(&args); -} - -static void test_postcopy_recovery_fail_reconnect(void) -{ - MigrateCommon args = { - .postcopy_recovery_fail_stage = POSTCOPY_FAIL_CHANNEL_ESTABLISH, - }; - - test_postcopy_recovery_common(&args); -} - -#ifdef CONFIG_GNUTLS -static void test_postcopy_recovery_tls_psk(void) -{ - MigrateCommon args = { - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_postcopy_recovery_common(&args); -} -#endif - -static void test_postcopy_preempt_recovery(void) -{ - MigrateCommon args = { - .postcopy_preempt = true, - }; - - test_postcopy_recovery_common(&args); -} - -#ifdef CONFIG_GNUTLS -/* This contains preempt+recovery+tls test altogether */ -static void test_postcopy_preempt_all(void) -{ - MigrateCommon args = { - .postcopy_preempt = true, - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_postcopy_recovery_common(&args); -} - -#endif - -static void test_baddest(void) -{ - MigrateStart args = { - .hide_stderr = true - }; - QTestState *from, *to; - - if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) { - return; - } - migrate_qmp(from, to, "tcp:127.0.0.1:0", NULL, "{}"); - wait_for_migration_fail(from, false); - test_migrate_end(from, to, false); -} - -#ifndef _WIN32 -static void test_analyze_script(void) -{ - MigrateStart args = { - .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", - }; - QTestState *from, *to; - g_autofree char *uri = NULL; - g_autofree char *file = NULL; - int pid, wstatus; - const char *python = g_getenv("PYTHON"); - - if (!python) { - g_test_skip("PYTHON variable not set"); - return; - } - - /* dummy url */ - if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) { - return; - } - - /* - * Setting these two capabilities causes the "configuration" - * vmstate to include subsections for them. The script needs to - * parse those subsections properly. - */ - migrate_set_capability(from, "validate-uuid", true); - migrate_set_capability(from, "x-ignore-shared", true); - - file = g_strdup_printf("%s/migfile", tmpfs); - uri = g_strdup_printf("exec:cat > %s", file); - - migrate_ensure_converge(from); - migrate_qmp(from, to, uri, NULL, "{}"); - wait_for_migration_complete(from); - - pid = fork(); - if (!pid) { - close(1); - open("/dev/null", O_WRONLY); - execl(python, python, ANALYZE_SCRIPT, "-f", file, NULL); - g_assert_not_reached(); - } - - g_assert(waitpid(pid, &wstatus, 0) == pid); - if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) { - g_test_message("Failed to analyze the migration stream"); - g_test_fail(); - } - test_migrate_end(from, to, false); - cleanup("migfile"); -} - -static void test_vmstate_checker_script(void) -{ - g_autofree gchar *cmd_src = NULL; - g_autofree gchar *cmd_dst = NULL; - g_autofree gchar *vmstate_src = NULL; - g_autofree gchar *vmstate_dst = NULL; - const char *machine_alias, *machine_opts = ""; - g_autofree char *machine = NULL; - const char *arch = qtest_get_arch(); - int pid, wstatus; - const char *python = g_getenv("PYTHON"); - - if (!getenv(QEMU_ENV_SRC) && !getenv(QEMU_ENV_DST)) { - g_test_skip("Test needs two different QEMU versions"); - return; - } - - if (!python) { - g_test_skip("PYTHON variable not set"); - return; - } - - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { - if (g_str_equal(arch, "i386")) { - machine_alias = "pc"; - } else { - machine_alias = "q35"; - } - } else if (g_str_equal(arch, "s390x")) { - machine_alias = "s390-ccw-virtio"; - } else if (strcmp(arch, "ppc64") == 0) { - machine_alias = "pseries"; - } else if (strcmp(arch, "aarch64") == 0) { - machine_alias = "virt"; - } else { - g_assert_not_reached(); - } - - if (!qtest_has_machine(machine_alias)) { - g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias); - g_test_skip(msg); - return; - } - - machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC, - QEMU_ENV_DST); - - vmstate_src = g_strdup_printf("%s/vmstate-src", tmpfs); - vmstate_dst = g_strdup_printf("%s/vmstate-dst", tmpfs); - - cmd_dst = g_strdup_printf("-machine %s,%s -dump-vmstate %s", - machine, machine_opts, vmstate_dst); - cmd_src = g_strdup_printf("-machine %s,%s -dump-vmstate %s", - machine, machine_opts, vmstate_src); - - qtest_init_with_env_no_handshake(QEMU_ENV_SRC, cmd_src); - qtest_init_with_env_no_handshake(QEMU_ENV_DST, cmd_dst); - - pid = fork(); - if (!pid) { - close(1); - open("/dev/null", O_WRONLY); - execl(python, python, VMSTATE_CHECKER_SCRIPT, - "-s", vmstate_src, - "-d", vmstate_dst, - NULL); - g_assert_not_reached(); - } - - g_assert(waitpid(pid, &wstatus, 0) == pid); - if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) { - g_test_message("Failed to run vmstate-static-checker.py"); - g_test_fail(); - } - - cleanup("vmstate-src"); - cleanup("vmstate-dst"); -} -#endif - -static void test_precopy_common(MigrateCommon *args) -{ - QTestState *from, *to; - void *data_hook = NULL; - - if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) { - return; - } - - if (args->start_hook) { - data_hook = args->start_hook(from, to); - } - - /* Wait for the first serial output from the source */ - if (args->result == MIG_TEST_SUCCEED) { - wait_for_serial("src_serial"); - wait_for_suspend(from, &src_state); - } - - if (args->live) { - migrate_ensure_non_converge(from); - migrate_prepare_for_dirty_mem(from); - } else { - /* - * Testing non-live migration, we allow it to run at - * full speed to ensure short test case duration. - * For tests expected to fail, we don't need to - * change anything. - */ - if (args->result == MIG_TEST_SUCCEED) { - qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - wait_for_stop(from, &src_state); - migrate_ensure_converge(from); - } - } - - if (args->result == MIG_TEST_QMP_ERROR) { - migrate_qmp_fail(from, args->connect_uri, args->connect_channels, "{}"); - goto finish; - } - - migrate_qmp(from, to, args->connect_uri, args->connect_channels, "{}"); - - if (args->result != MIG_TEST_SUCCEED) { - bool allow_active = args->result == MIG_TEST_FAIL; - wait_for_migration_fail(from, allow_active); - - if (args->result == MIG_TEST_FAIL_DEST_QUIT_ERR) { - qtest_set_expected_status(to, EXIT_FAILURE); - } - } else { - if (args->live) { - /* - * For initial iteration(s) we must do a full pass, - * but for the final iteration, we need only wait - * for some dirty mem before switching to converge - */ - while (args->iterations > 1) { - wait_for_migration_pass(from); - args->iterations--; - } - migrate_wait_for_dirty_mem(from, to); - - migrate_ensure_converge(from); - - /* - * We do this first, as it has a timeout to stop us - * hanging forever if migration didn't converge - */ - wait_for_migration_complete(from); - - wait_for_stop(from, &src_state); - - } else { - wait_for_migration_complete(from); - /* - * Must wait for dst to finish reading all incoming - * data on the socket before issuing 'cont' otherwise - * it'll be ignored - */ - wait_for_migration_complete(to); - - qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); - } - - wait_for_resume(to, &dst_state); - - if (args->start.suspend_me) { - /* wakeup succeeds only if guest is suspended */ - qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); - } - - wait_for_serial("dest_serial"); - } - -finish: - if (args->finish_hook) { - args->finish_hook(from, to, data_hook); - } - - test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED); -} - -static void file_dirty_offset_region(void) -{ - g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - size_t size = FILE_TEST_OFFSET; - g_autofree char *data = g_new0(char, size); - - memset(data, FILE_TEST_MARKER, size); - g_assert(g_file_set_contents(path, data, size, NULL)); -} - -static void file_check_offset_region(void) -{ - g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - size_t size = FILE_TEST_OFFSET; - g_autofree char *expected = g_new0(char, size); - g_autofree char *actual = NULL; - uint64_t *stream_start; - - /* - * Ensure the skipped offset region's data has not been touched - * and the migration stream starts at the right place. - */ - - memset(expected, FILE_TEST_MARKER, size); - - g_assert(g_file_get_contents(path, &actual, NULL, NULL)); - g_assert(!memcmp(actual, expected, size)); - - stream_start = (uint64_t *)(actual + size); - g_assert_cmpint(cpu_to_be64(*stream_start) >> 32, ==, QEMU_VM_FILE_MAGIC); -} - -static void test_file_common(MigrateCommon *args, bool stop_src) -{ - QTestState *from, *to; - void *data_hook = NULL; - bool check_offset = false; - - if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) { - return; - } - - /* - * File migration is never live. We can keep the source VM running - * during migration, but the destination will not be running - * concurrently. - */ - g_assert_false(args->live); - - if (g_strrstr(args->connect_uri, "offset=")) { - check_offset = true; - /* - * This comes before the start_hook because it's equivalent to - * a management application creating the file and writing to - * it so hooks should expect the file to be already present. - */ - file_dirty_offset_region(); - } - - if (args->start_hook) { - data_hook = args->start_hook(from, to); - } - - migrate_ensure_converge(from); - wait_for_serial("src_serial"); - - if (stop_src) { - qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - wait_for_stop(from, &src_state); - } - - if (args->result == MIG_TEST_QMP_ERROR) { - migrate_qmp_fail(from, args->connect_uri, NULL, "{}"); - goto finish; - } - - migrate_qmp(from, to, args->connect_uri, NULL, "{}"); - wait_for_migration_complete(from); - - /* - * We need to wait for the source to finish before starting the - * destination. - */ - migrate_incoming_qmp(to, args->connect_uri, "{}"); - wait_for_migration_complete(to); - - if (stop_src) { - qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); - } - wait_for_resume(to, &dst_state); - - wait_for_serial("dest_serial"); - - if (check_offset) { - file_check_offset_region(); - } - -finish: - if (args->finish_hook) { - args->finish_hook(from, to, data_hook); - } - - test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED); -} - -static void test_precopy_unix_plain(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .listen_uri = uri, - .connect_uri = uri, - /* - * The simplest use case of precopy, covering smoke tests of - * get-dirty-log dirty tracking. - */ - .live = true, - }; - - test_precopy_common(&args); -} - -static void test_precopy_unix_suspend_live(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .listen_uri = uri, - .connect_uri = uri, - /* - * despite being live, the test is fast because the src - * suspends immediately. - */ - .live = true, - .start.suspend_me = true, - }; - - test_precopy_common(&args); -} - -static void test_precopy_unix_suspend_notlive(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .listen_uri = uri, - .connect_uri = uri, - .start.suspend_me = true, - }; - - test_precopy_common(&args); -} - -static void test_precopy_unix_dirty_ring(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .start = { - .use_dirty_ring = true, - }, - .listen_uri = uri, - .connect_uri = uri, - /* - * Besides the precopy/unix basic test, cover dirty ring interface - * rather than get-dirty-log. - */ - .live = true, - }; - - test_precopy_common(&args); -} - -#ifdef CONFIG_GNUTLS -static void test_precopy_unix_tls_psk(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = uri, - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_precopy_common(&args); -} - -#ifdef CONFIG_TASN1 -static void test_precopy_unix_tls_x509_default_host(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .connect_uri = uri, - .listen_uri = uri, - .start_hook = test_migrate_tls_x509_start_default_host, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL_DEST_QUIT_ERR, - }; - - test_precopy_common(&args); -} - -static void test_precopy_unix_tls_x509_override_host(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = uri, - .start_hook = test_migrate_tls_x509_start_override_host, - .finish_hook = test_migrate_tls_x509_finish, - }; - - test_precopy_common(&args); -} -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - -#if 0 -/* Currently upset on aarch64 TCG */ -static void test_ignore_shared(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - QTestState *from, *to; - - if (test_migrate_start(&from, &to, uri, false, true, NULL, NULL)) { - return; - } - - migrate_ensure_non_converge(from); - migrate_prepare_for_dirty_mem(from); - - migrate_set_capability(from, "x-ignore-shared", true); - migrate_set_capability(to, "x-ignore-shared", true); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - migrate_qmp(from, to, uri, NULL, "{}"); - - migrate_wait_for_dirty_mem(from, to); - - wait_for_stop(from, &src_state); - - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - - /* Check whether shared RAM has been really skipped */ - g_assert_cmpint(read_ram_property_int(from, "transferred"), <, 1024 * 1024); - - test_migrate_end(from, to, true); -} -#endif - -static void * -test_migrate_xbzrle_start(QTestState *from, - QTestState *to) -{ - migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432); - - migrate_set_capability(from, "xbzrle", true); - migrate_set_capability(to, "xbzrle", true); - - return NULL; -} - -static void test_precopy_unix_xbzrle(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = uri, - .start_hook = test_migrate_xbzrle_start, - .iterations = 2, - /* - * XBZRLE needs pages to be modified when doing the 2nd+ round - * iteration to have real data pushed to the stream. - */ - .live = true, - }; - - test_precopy_common(&args); -} - -static void test_precopy_file(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - }; - - test_file_common(&args, true); -} - -#ifndef _WIN32 -static void fdset_add_fds(QTestState *qts, const char *file, int flags, - int num_fds, bool direct_io) -{ - for (int i = 0; i < num_fds; i++) { - int fd; - -#ifdef O_DIRECT - /* only secondary channels can use direct-io */ - if (direct_io && i != 0) { - flags |= O_DIRECT; - } -#endif - - fd = open(file, flags, 0660); - assert(fd != -1); - - qtest_qmp_fds_assert_success(qts, &fd, 1, "{'execute': 'add-fd', " - "'arguments': {'fdset-id': 1}}"); - close(fd); - } -} - -static void *file_offset_fdset_start_hook(QTestState *from, QTestState *to) -{ - g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - - fdset_add_fds(from, file, O_WRONLY, 1, false); - fdset_add_fds(to, file, O_RDONLY, 1, false); - - return NULL; -} - -static void test_precopy_file_offset_fdset(void) -{ - g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", - FILE_TEST_OFFSET); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = file_offset_fdset_start_hook, - }; - - test_file_common(&args, false); -} -#endif - -static void test_precopy_file_offset(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=%d", tmpfs, - FILE_TEST_FILENAME, - FILE_TEST_OFFSET); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - }; - - test_file_common(&args, false); -} - -static void test_precopy_file_offset_bad(void) -{ - /* using a value not supported by qemu_strtosz() */ - g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=0x20M", - tmpfs, FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .result = MIG_TEST_QMP_ERROR, - }; - - test_file_common(&args, false); -} - -static void *test_mode_reboot_start(QTestState *from, QTestState *to) -{ - migrate_set_parameter_str(from, "mode", "cpr-reboot"); - migrate_set_parameter_str(to, "mode", "cpr-reboot"); - - migrate_set_capability(from, "x-ignore-shared", true); - migrate_set_capability(to, "x-ignore-shared", true); - - return NULL; -} - -static void *migrate_mapped_ram_start(QTestState *from, QTestState *to) -{ - migrate_set_capability(from, "mapped-ram", true); - migrate_set_capability(to, "mapped-ram", true); - - return NULL; -} - -static void test_mode_reboot(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .start.use_shmem = true, - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = test_mode_reboot_start - }; - - test_file_common(&args, true); -} - -static void test_precopy_file_mapped_ram_live(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = migrate_mapped_ram_start, - }; - - test_file_common(&args, false); -} - -static void test_precopy_file_mapped_ram(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = migrate_mapped_ram_start, - }; - - test_file_common(&args, true); -} - -static void *migrate_multifd_mapped_ram_start(QTestState *from, QTestState *to) -{ - migrate_mapped_ram_start(from, to); - - migrate_set_parameter_int(from, "multifd-channels", 4); - migrate_set_parameter_int(to, "multifd-channels", 4); - - migrate_set_capability(from, "multifd", true); - migrate_set_capability(to, "multifd", true); - - return NULL; -} - -static void test_multifd_file_mapped_ram_live(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = migrate_multifd_mapped_ram_start, - }; - - test_file_common(&args, false); -} - -static void test_multifd_file_mapped_ram(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = migrate_multifd_mapped_ram_start, - }; - - test_file_common(&args, true); -} - -static void *multifd_mapped_ram_dio_start(QTestState *from, QTestState *to) -{ - migrate_multifd_mapped_ram_start(from, to); - - migrate_set_parameter_bool(from, "direct-io", true); - migrate_set_parameter_bool(to, "direct-io", true); - - return NULL; -} - -static void test_multifd_file_mapped_ram_dio(void) -{ - g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, - FILE_TEST_FILENAME); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = multifd_mapped_ram_dio_start, - }; - - if (!probe_o_direct_support(tmpfs)) { - g_test_skip("Filesystem does not support O_DIRECT"); - return; - } - - test_file_common(&args, true); -} - -#ifndef _WIN32 -static void multifd_mapped_ram_fdset_end(QTestState *from, QTestState *to, - void *opaque) -{ - QDict *resp; - QList *fdsets; - - /* - * Remove the fdsets after migration, otherwise a second migration - * would fail due fdset reuse. - */ - qtest_qmp_assert_success(from, "{'execute': 'remove-fd', " - "'arguments': { 'fdset-id': 1}}"); - - /* - * Make sure no fdsets are left after migration, otherwise a - * second migration would fail due fdset reuse. - */ - resp = qtest_qmp(from, "{'execute': 'query-fdsets', " - "'arguments': {}}"); - g_assert(qdict_haskey(resp, "return")); - fdsets = qdict_get_qlist(resp, "return"); - g_assert(fdsets && qlist_empty(fdsets)); -} - -static void *multifd_mapped_ram_fdset_dio(QTestState *from, QTestState *to) -{ - g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - - fdset_add_fds(from, file, O_WRONLY, 2, true); - fdset_add_fds(to, file, O_RDONLY, 2, true); - - migrate_multifd_mapped_ram_start(from, to); - migrate_set_parameter_bool(from, "direct-io", true); - migrate_set_parameter_bool(to, "direct-io", true); - - return NULL; -} - -static void *multifd_mapped_ram_fdset(QTestState *from, QTestState *to) -{ - g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - - fdset_add_fds(from, file, O_WRONLY, 2, false); - fdset_add_fds(to, file, O_RDONLY, 2, false); - - migrate_multifd_mapped_ram_start(from, to); - - return NULL; -} - -static void test_multifd_file_mapped_ram_fdset(void) -{ - g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", - FILE_TEST_OFFSET); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = multifd_mapped_ram_fdset, - .finish_hook = multifd_mapped_ram_fdset_end, - }; - - test_file_common(&args, true); -} - -static void test_multifd_file_mapped_ram_fdset_dio(void) -{ - g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", - FILE_TEST_OFFSET); - MigrateCommon args = { - .connect_uri = uri, - .listen_uri = "defer", - .start_hook = multifd_mapped_ram_fdset_dio, - .finish_hook = multifd_mapped_ram_fdset_end, - }; - - if (!probe_o_direct_support(tmpfs)) { - g_test_skip("Filesystem does not support O_DIRECT"); - return; - } - - test_file_common(&args, true); -} -#endif /* !_WIN32 */ - -static void test_precopy_tcp_plain(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - }; - - test_precopy_common(&args); -} - -static void *test_migrate_switchover_ack_start(QTestState *from, QTestState *to) -{ - - migrate_set_capability(from, "return-path", true); - migrate_set_capability(to, "return-path", true); - - migrate_set_capability(from, "switchover-ack", true); - migrate_set_capability(to, "switchover-ack", true); - - return NULL; -} - -static void test_precopy_tcp_switchover_ack(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_switchover_ack_start, - /* - * Source VM must be running in order to consider the switchover ACK - * when deciding to do switchover or not. - */ - .live = true, - }; - - test_precopy_common(&args); -} - -#ifdef CONFIG_GNUTLS -static void test_precopy_tcp_tls_psk_match(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_psk_mismatch(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_psk_start_mismatch, - .finish_hook = test_migrate_tls_psk_finish, - .result = MIG_TEST_FAIL, - }; - - test_precopy_common(&args); -} - -#ifdef CONFIG_TASN1 -static void test_precopy_tcp_tls_x509_default_host(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_default_host, - .finish_hook = test_migrate_tls_x509_finish, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_override_host(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_override_host, - .finish_hook = test_migrate_tls_x509_finish, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_mismatch_host(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_mismatch_host, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL_DEST_QUIT_ERR, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_friendly_client(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_friendly_client, - .finish_hook = test_migrate_tls_x509_finish, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_hostile_client(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_hostile_client, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_allow_anon_client(void) -{ - MigrateCommon args = { - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_allow_anon_client, - .finish_hook = test_migrate_tls_x509_finish, - }; - - test_precopy_common(&args); -} - -static void test_precopy_tcp_tls_x509_reject_anon_client(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "tcp:127.0.0.1:0", - .start_hook = test_migrate_tls_x509_start_reject_anon_client, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL, - }; - - test_precopy_common(&args); -} -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - -#ifndef _WIN32 -static void *test_migrate_fd_start_hook(QTestState *from, - QTestState *to) -{ - int ret; - int pair[2]; - - /* Create two connected sockets for migration */ - ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair); - g_assert_cmpint(ret, ==, 0); - - /* Send the 1st socket to the target */ - qtest_qmp_fds_assert_success(to, &pair[0], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - close(pair[0]); - - /* Start incoming migration from the 1st socket */ - migrate_incoming_qmp(to, "fd:fd-mig", "{}"); - - /* Send the 2nd socket to the target */ - qtest_qmp_fds_assert_success(from, &pair[1], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - close(pair[1]); - - return NULL; -} - -static void test_migrate_fd_finish_hook(QTestState *from, - QTestState *to, - void *opaque) -{ - QDict *rsp; - const char *error_desc; - - /* Test closing fds */ - /* We assume, that QEMU removes named fd from its list, - * so this should fail */ - rsp = qtest_qmp(from, "{ 'execute': 'closefd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - g_assert_true(qdict_haskey(rsp, "error")); - error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc"); - g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found"); - qobject_unref(rsp); - - rsp = qtest_qmp(to, "{ 'execute': 'closefd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - g_assert_true(qdict_haskey(rsp, "error")); - error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc"); - g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found"); - qobject_unref(rsp); -} - -static void test_migrate_precopy_fd_socket(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .connect_uri = "fd:fd-mig", - .start_hook = test_migrate_fd_start_hook, - .finish_hook = test_migrate_fd_finish_hook - }; - test_precopy_common(&args); -} - -static void *migrate_precopy_fd_file_start(QTestState *from, QTestState *to) -{ - g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); - int src_flags = O_CREAT | O_RDWR; - int dst_flags = O_CREAT | O_RDWR; - int fds[2]; - - fds[0] = open(file, src_flags, 0660); - assert(fds[0] != -1); - - fds[1] = open(file, dst_flags, 0660); - assert(fds[1] != -1); - - - qtest_qmp_fds_assert_success(to, &fds[0], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - - qtest_qmp_fds_assert_success(from, &fds[1], 1, - "{ 'execute': 'getfd'," - " 'arguments': { 'fdname': 'fd-mig' }}"); - - close(fds[0]); - close(fds[1]); - - return NULL; -} - -static void test_migrate_precopy_fd_file(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .connect_uri = "fd:fd-mig", - .start_hook = migrate_precopy_fd_file_start, - .finish_hook = test_migrate_fd_finish_hook - }; - test_file_common(&args, true); -} -#endif /* _WIN32 */ - -static void do_test_validate_uuid(MigrateStart *args, bool should_fail) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - QTestState *from, *to; - - if (test_migrate_start(&from, &to, uri, args)) { - return; - } - - /* - * UUID validation is at the begin of migration. So, the main process of - * migration is not interesting for us here. Thus, set huge downtime for - * very fast migration. - */ - migrate_set_parameter_int(from, "downtime-limit", 1000000); - migrate_set_capability(from, "validate-uuid", true); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - migrate_qmp(from, to, uri, NULL, "{}"); - - if (should_fail) { - qtest_set_expected_status(to, EXIT_FAILURE); - wait_for_migration_fail(from, true); - } else { - wait_for_migration_complete(from); - } - - test_migrate_end(from, to, false); -} - -static void test_validate_uuid(void) -{ - MigrateStart args = { - .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", - .opts_target = "-uuid 11111111-1111-1111-1111-111111111111", - }; - - do_test_validate_uuid(&args, false); -} - -static void test_validate_uuid_error(void) -{ - MigrateStart args = { - .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", - .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", - .hide_stderr = true, - }; - - do_test_validate_uuid(&args, true); -} - -static void test_validate_uuid_src_not_set(void) -{ - MigrateStart args = { - .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", - .hide_stderr = true, - }; - - do_test_validate_uuid(&args, false); -} - -static void test_validate_uuid_dst_not_set(void) -{ - MigrateStart args = { - .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", - .hide_stderr = true, - }; - - do_test_validate_uuid(&args, false); -} - -static void do_test_validate_uri_channel(MigrateCommon *args) -{ - QTestState *from, *to; - - if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) { - return; - } - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - /* - * 'uri' and 'channels' validation is checked even before the migration - * starts. - */ - migrate_qmp_fail(from, args->connect_uri, args->connect_channels, "{}"); - test_migrate_end(from, to, false); -} - -static void test_validate_uri_channels_both_set(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "defer", - .connect_uri = "tcp:127.0.0.1:0", - .connect_channels = "[ { 'channel-type': 'main'," - " 'addr': { 'transport': 'socket'," - " 'type': 'inet'," - " 'host': '127.0.0.1'," - " 'port': '0' } } ]", - }; - - do_test_validate_uri_channel(&args); -} - -static void test_validate_uri_channels_none_set(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "defer", - }; - - do_test_validate_uri_channel(&args); -} - -/* - * The way auto_converge works, we need to do too many passes to - * run this test. Auto_converge logic is only run once every - * three iterations, so: - * - * - 3 iterations without auto_converge enabled - * - 3 iterations with pct = 5 - * - 3 iterations with pct = 30 - * - 3 iterations with pct = 55 - * - 3 iterations with pct = 80 - * - 3 iterations with pct = 95 (max(95, 80 + 25)) - * - * To make things even worse, we need to run the initial stage at - * 3MB/s so we enter autoconverge even when host is (over)loaded. - */ -static void test_migrate_auto_converge(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - MigrateStart args = {}; - QTestState *from, *to; - int64_t percentage; - - /* - * We want the test to be stable and as fast as possible. - * E.g., with 1Gb/s bandwidth migration may pass without throttling, - * so we need to decrease a bandwidth. - */ - const int64_t init_pct = 5, inc_pct = 25, max_pct = 95; - - if (test_migrate_start(&from, &to, uri, &args)) { - return; - } - - migrate_set_capability(from, "auto-converge", true); - migrate_set_parameter_int(from, "cpu-throttle-initial", init_pct); - migrate_set_parameter_int(from, "cpu-throttle-increment", inc_pct); - migrate_set_parameter_int(from, "max-cpu-throttle", max_pct); - - /* - * Set the initial parameters so that the migration could not converge - * without throttling. - */ - migrate_ensure_non_converge(from); - - /* To check remaining size after precopy */ - migrate_set_capability(from, "pause-before-switchover", true); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - migrate_qmp(from, to, uri, NULL, "{}"); - - /* Wait for throttling begins */ - percentage = 0; - do { - percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); - if (percentage != 0) { - break; - } - usleep(20); - g_assert_false(src_state.stop_seen); - } while (true); - /* The first percentage of throttling should be at least init_pct */ - g_assert_cmpint(percentage, >=, init_pct); - /* Now, when we tested that throttling works, let it converge */ - migrate_ensure_converge(from); - - /* - * Wait for pre-switchover status to check last throttle percentage - * and remaining. These values will be zeroed later - */ - wait_for_migration_status(from, "pre-switchover", NULL); - - /* The final percentage of throttling shouldn't be greater than max_pct */ - percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); - g_assert_cmpint(percentage, <=, max_pct); - migrate_continue(from, "pre-switchover"); - - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - - test_migrate_end(from, to, true); -} - -static void * -test_migrate_precopy_tcp_multifd_start_common(QTestState *from, - QTestState *to, - const char *method) -{ - migrate_set_parameter_int(from, "multifd-channels", 16); - migrate_set_parameter_int(to, "multifd-channels", 16); - - migrate_set_parameter_str(from, "multifd-compression", method); - migrate_set_parameter_str(to, "multifd-compression", method); - - migrate_set_capability(from, "multifd", true); - migrate_set_capability(to, "multifd", true); - - /* Start incoming migration from the 1st socket */ - migrate_incoming_qmp(to, "tcp:127.0.0.1:0", "{}"); - - return NULL; -} - -static void * -test_migrate_precopy_tcp_multifd_start(QTestState *from, - QTestState *to) -{ - return test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); -} - -static void * -test_migrate_precopy_tcp_multifd_start_zero_page_legacy(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - migrate_set_parameter_str(from, "zero-page-detection", "legacy"); - return NULL; -} - -static void * -test_migration_precopy_tcp_multifd_start_no_zero_page(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - migrate_set_parameter_str(from, "zero-page-detection", "none"); - return NULL; -} - -static void * -test_migrate_precopy_tcp_multifd_zlib_start(QTestState *from, - QTestState *to) -{ - /* - * Overloading this test to also check that set_parameter does not error. - * This is also done in the tests for the other compression methods. - */ - migrate_set_parameter_int(from, "multifd-zlib-level", 2); - migrate_set_parameter_int(to, "multifd-zlib-level", 2); - - return test_migrate_precopy_tcp_multifd_start_common(from, to, "zlib"); -} - -#ifdef CONFIG_ZSTD -static void * -test_migrate_precopy_tcp_multifd_zstd_start(QTestState *from, - QTestState *to) -{ - migrate_set_parameter_int(from, "multifd-zstd-level", 2); - migrate_set_parameter_int(to, "multifd-zstd-level", 2); - - return test_migrate_precopy_tcp_multifd_start_common(from, to, "zstd"); -} -#endif /* CONFIG_ZSTD */ - -#ifdef CONFIG_QPL -static void * -test_migrate_precopy_tcp_multifd_qpl_start(QTestState *from, - QTestState *to) -{ - return test_migrate_precopy_tcp_multifd_start_common(from, to, "qpl"); -} -#endif /* CONFIG_QPL */ -#ifdef CONFIG_UADK -static void * -test_migrate_precopy_tcp_multifd_uadk_start(QTestState *from, - QTestState *to) -{ - return test_migrate_precopy_tcp_multifd_start_common(from, to, "uadk"); -} -#endif /* CONFIG_UADK */ - -static void test_multifd_tcp_uri_none(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_start, - /* - * Multifd is more complicated than most of the features, it - * directly takes guest page buffers when sending, make sure - * everything will work alright even if guest page is changing. - */ - .live = true, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_zero_page_legacy(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_start_zero_page_legacy, - /* - * Multifd is more complicated than most of the features, it - * directly takes guest page buffers when sending, make sure - * everything will work alright even if guest page is changing. - */ - .live = true, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_no_zero_page(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migration_precopy_tcp_multifd_start_no_zero_page, - /* - * Multifd is more complicated than most of the features, it - * directly takes guest page buffers when sending, make sure - * everything will work alright even if guest page is changing. - */ - .live = true, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_channels_none(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_start, - .live = true, - .connect_channels = "[ { 'channel-type': 'main'," - " 'addr': { 'transport': 'socket'," - " 'type': 'inet'," - " 'host': '127.0.0.1'," - " 'port': '0' } } ]", - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_zlib(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_zlib_start, - }; - test_precopy_common(&args); -} - -#ifdef CONFIG_ZSTD -static void test_multifd_tcp_zstd(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_zstd_start, - }; - test_precopy_common(&args); -} -#endif - -#ifdef CONFIG_QPL -static void test_multifd_tcp_qpl(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_qpl_start, - }; - test_precopy_common(&args); -} -#endif - -#ifdef CONFIG_UADK -static void test_multifd_tcp_uadk(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_precopy_tcp_multifd_uadk_start, - }; - test_precopy_common(&args); -} -#endif - -#ifdef CONFIG_GNUTLS -static void * -test_migrate_multifd_tcp_tls_psk_start_match(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_psk_start_match(from, to); -} - -static void * -test_migrate_multifd_tcp_tls_psk_start_mismatch(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_psk_start_mismatch(from, to); -} - -#ifdef CONFIG_TASN1 -static void * -test_migrate_multifd_tls_x509_start_default_host(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_x509_start_default_host(from, to); -} - -static void * -test_migrate_multifd_tls_x509_start_override_host(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_x509_start_override_host(from, to); -} - -static void * -test_migrate_multifd_tls_x509_start_mismatch_host(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_x509_start_mismatch_host(from, to); -} - -static void * -test_migrate_multifd_tls_x509_start_allow_anon_client(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_x509_start_allow_anon_client(from, to); -} - -static void * -test_migrate_multifd_tls_x509_start_reject_anon_client(QTestState *from, - QTestState *to) -{ - test_migrate_precopy_tcp_multifd_start_common(from, to, "none"); - return test_migrate_tls_x509_start_reject_anon_client(from, to); -} -#endif /* CONFIG_TASN1 */ - -static void test_multifd_tcp_tls_psk_match(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tcp_tls_psk_start_match, - .finish_hook = test_migrate_tls_psk_finish, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_tls_psk_mismatch(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tcp_tls_psk_start_mismatch, - .finish_hook = test_migrate_tls_psk_finish, - .result = MIG_TEST_FAIL, - }; - test_precopy_common(&args); -} - -#ifdef CONFIG_TASN1 -static void test_multifd_tcp_tls_x509_default_host(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tls_x509_start_default_host, - .finish_hook = test_migrate_tls_x509_finish, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_tls_x509_override_host(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tls_x509_start_override_host, - .finish_hook = test_migrate_tls_x509_finish, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_tls_x509_mismatch_host(void) -{ - /* - * This has different behaviour to the non-multifd case. - * - * In non-multifd case when client aborts due to mismatched - * cert host, the server has already started trying to load - * migration state, and so it exits with I/O failure. - * - * In multifd case when client aborts due to mismatched - * cert host, the server is still waiting for the other - * multifd connections to arrive so hasn't started trying - * to load migration state, and thus just aborts the migration - * without exiting. - */ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tls_x509_start_mismatch_host, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_tls_x509_allow_anon_client(void) -{ - MigrateCommon args = { - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tls_x509_start_allow_anon_client, - .finish_hook = test_migrate_tls_x509_finish, - }; - test_precopy_common(&args); -} - -static void test_multifd_tcp_tls_x509_reject_anon_client(void) -{ - MigrateCommon args = { - .start = { - .hide_stderr = true, - }, - .listen_uri = "defer", - .start_hook = test_migrate_multifd_tls_x509_start_reject_anon_client, - .finish_hook = test_migrate_tls_x509_finish, - .result = MIG_TEST_FAIL, - }; - test_precopy_common(&args); -} -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - -/* - * This test does: - * source target - * migrate_incoming - * migrate - * migrate_cancel - * launch another target - * migrate - * - * And see that it works - */ -static void test_multifd_tcp_cancel(void) -{ - MigrateStart args = { - .hide_stderr = true, - }; - QTestState *from, *to, *to2; - - if (test_migrate_start(&from, &to, "defer", &args)) { - return; - } - - migrate_ensure_non_converge(from); - migrate_prepare_for_dirty_mem(from); - - migrate_set_parameter_int(from, "multifd-channels", 16); - migrate_set_parameter_int(to, "multifd-channels", 16); - - migrate_set_capability(from, "multifd", true); - migrate_set_capability(to, "multifd", true); - - /* Start incoming migration from the 1st socket */ - migrate_incoming_qmp(to, "tcp:127.0.0.1:0", "{}"); - - /* Wait for the first serial output from the source */ - wait_for_serial("src_serial"); - - migrate_qmp(from, to, NULL, NULL, "{}"); - - migrate_wait_for_dirty_mem(from, to); - - migrate_cancel(from); - - /* Make sure QEMU process "to" exited */ - qtest_set_expected_status(to, EXIT_FAILURE); - qtest_wait_qemu(to); - - args = (MigrateStart){ - .only_target = true, - }; - - if (test_migrate_start(&from, &to2, "defer", &args)) { - return; - } - - migrate_set_parameter_int(to2, "multifd-channels", 16); - - migrate_set_capability(to2, "multifd", true); - - /* Start incoming migration from the 1st socket */ - migrate_incoming_qmp(to2, "tcp:127.0.0.1:0", "{}"); - - wait_for_migration_status(from, "cancelled", NULL); - - migrate_ensure_non_converge(from); - - migrate_qmp(from, to2, NULL, NULL, "{}"); - - migrate_wait_for_dirty_mem(from, to2); - - migrate_ensure_converge(from); - - wait_for_stop(from, &src_state); - qtest_qmp_eventwait(to2, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - test_migrate_end(from, to2, true); -} - -static void calc_dirty_rate(QTestState *who, uint64_t calc_time) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'calc-dirty-rate'," - "'arguments': { " - "'calc-time': %" PRIu64 "," - "'mode': 'dirty-ring' }}", - calc_time); -} - -static QDict *query_dirty_rate(QTestState *who) -{ - return qtest_qmp_assert_success_ref(who, - "{ 'execute': 'query-dirty-rate' }"); -} - -static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'set-vcpu-dirty-limit'," - "'arguments': { " - "'dirty-rate': %" PRIu64 " } }", - dirtyrate); -} - -static void cancel_vcpu_dirty_limit(QTestState *who) -{ - qtest_qmp_assert_success(who, - "{ 'execute': 'cancel-vcpu-dirty-limit' }"); -} - -static QDict *query_vcpu_dirty_limit(QTestState *who) -{ - QDict *rsp; - - rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }"); - g_assert(!qdict_haskey(rsp, "error")); - g_assert(qdict_haskey(rsp, "return")); - - return rsp; -} - -static bool calc_dirtyrate_ready(QTestState *who) -{ - QDict *rsp_return; - gchar *status; - - rsp_return = query_dirty_rate(who); - g_assert(rsp_return); - - status = g_strdup(qdict_get_str(rsp_return, "status")); - g_assert(status); - - return g_strcmp0(status, "measuring"); -} - -static void wait_for_calc_dirtyrate_complete(QTestState *who, - int64_t time_s) -{ - int max_try_count = 10000; - usleep(time_s * 1000000); - - while (!calc_dirtyrate_ready(who) && max_try_count--) { - usleep(1000); - } - - /* - * Set the timeout with 10 s(max_try_count * 1000us), - * if dirtyrate measurement not complete, fail test. - */ - g_assert_cmpint(max_try_count, !=, 0); -} - -static int64_t get_dirty_rate(QTestState *who) -{ - QDict *rsp_return; - gchar *status; - QList *rates; - const QListEntry *entry; - QDict *rate; - int64_t dirtyrate; - - rsp_return = query_dirty_rate(who); - g_assert(rsp_return); - - status = g_strdup(qdict_get_str(rsp_return, "status")); - g_assert(status); - g_assert_cmpstr(status, ==, "measured"); - - rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate"); - g_assert(rates && !qlist_empty(rates)); - - entry = qlist_first(rates); - g_assert(entry); - - rate = qobject_to(QDict, qlist_entry_obj(entry)); - g_assert(rate); - - dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1); - - qobject_unref(rsp_return); - return dirtyrate; -} - -static int64_t get_limit_rate(QTestState *who) -{ - QDict *rsp_return; - QList *rates; - const QListEntry *entry; - QDict *rate; - int64_t dirtyrate; - - rsp_return = query_vcpu_dirty_limit(who); - g_assert(rsp_return); - - rates = qdict_get_qlist(rsp_return, "return"); - g_assert(rates && !qlist_empty(rates)); - - entry = qlist_first(rates); - g_assert(entry); - - rate = qobject_to(QDict, qlist_entry_obj(entry)); - g_assert(rate); - - dirtyrate = qdict_get_try_int(rate, "limit-rate", -1); - - qobject_unref(rsp_return); - return dirtyrate; -} - -static QTestState *dirtylimit_start_vm(void) -{ - QTestState *vm = NULL; - g_autofree gchar *cmd = NULL; - - bootfile_create(tmpfs, false); - cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " - "-name dirtylimit-test,debug-threads=on " - "-m 150M -smp 1 " - "-serial file:%s/vm_serial " - "-drive file=%s,format=raw ", - tmpfs, bootpath); - - vm = qtest_init(cmd); - return vm; -} - -static void dirtylimit_stop_vm(QTestState *vm) -{ - qtest_quit(vm); - cleanup("vm_serial"); -} - -static void test_vcpu_dirty_limit(void) +static void parse_args(int *argc_p, char ***argv_p, bool *full_set) { - QTestState *vm; - int64_t origin_rate; - int64_t quota_rate; - int64_t rate ; - int max_try_count = 20; - int hit = 0; + int argc = *argc_p; + char **argv = *argv_p; + int i, j; - /* Start vm for vcpu dirtylimit test */ - vm = dirtylimit_start_vm(); - - /* Wait for the first serial output from the vm*/ - wait_for_serial("vm_serial"); - - /* Do dirtyrate measurement with calc time equals 1s */ - calc_dirty_rate(vm, 1); - - /* Sleep calc time and wait for calc dirtyrate complete */ - wait_for_calc_dirtyrate_complete(vm, 1); - - /* Query original dirty page rate */ - origin_rate = get_dirty_rate(vm); - - /* VM booted from bootsect should dirty memory steadily */ - assert(origin_rate != 0); - - /* Setup quota dirty page rate at half of origin */ - quota_rate = origin_rate / 2; - - /* Set dirtylimit */ - dirtylimit_set_all(vm, quota_rate); - - /* - * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit - * works literally - */ - g_assert_cmpint(quota_rate, ==, get_limit_rate(vm)); - - /* Sleep a bit to check if it take effect */ - usleep(2000000); - - /* - * Check if dirtylimit take effect realistically, set the - * timeout with 20 s(max_try_count * 1s), if dirtylimit - * doesn't take effect, fail test. - */ - while (--max_try_count) { - calc_dirty_rate(vm, 1); - wait_for_calc_dirtyrate_complete(vm, 1); - rate = get_dirty_rate(vm); - - /* - * Assume hitting if current rate is less - * than quota rate (within accepting error) - */ - if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { - hit = 1; - break; + j = 1; + for (i = 1; i < argc; i++) { + if (g_str_equal(argv[i], "--full")) { + *full_set = true; + continue; } - } - - g_assert_cmpint(hit, ==, 1); - - hit = 0; - max_try_count = 20; - - /* Check if dirtylimit cancellation take effect */ - cancel_vcpu_dirty_limit(vm); - while (--max_try_count) { - calc_dirty_rate(vm, 1); - wait_for_calc_dirtyrate_complete(vm, 1); - rate = get_dirty_rate(vm); - - /* - * Assume dirtylimit be canceled if current rate is - * greater than quota rate (within accepting error) - */ - if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { - hit = 1; - break; + argv[j++] = argv[i]; + if (i >= j) { + argv[i] = NULL; } } - - g_assert_cmpint(hit, ==, 1); - dirtylimit_stop_vm(vm); -} - -static void migrate_dirty_limit_wait_showup(QTestState *from, - const int64_t period, - const int64_t value) -{ - /* Enable dirty limit capability */ - migrate_set_capability(from, "dirty-limit", true); - - /* Set dirty limit parameters */ - migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period); - migrate_set_parameter_int(from, "vcpu-dirty-limit", value); - - /* Make sure migrate can't converge */ - migrate_ensure_non_converge(from); - - /* To check limit rate after precopy */ - migrate_set_capability(from, "pause-before-switchover", true); - - /* Wait for the serial output from the source */ - wait_for_serial("src_serial"); -} - -/* - * This test does: - * source destination - * start vm - * start incoming vm - * migrate - * wait dirty limit to begin - * cancel migrate - * cancellation check - * restart incoming vm - * migrate - * wait dirty limit to begin - * wait pre-switchover event - * convergence condition check - * - * And see if dirty limit migration works correctly. - * This test case involves many passes, so it runs in slow mode only. - */ -static void test_migrate_dirty_limit(void) -{ - g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); - QTestState *from, *to; - int64_t remaining; - uint64_t throttle_us_per_full; - /* - * We want the test to be stable and as fast as possible. - * E.g., with 1Gb/s bandwidth migration may pass without dirty limit, - * so we need to decrease a bandwidth. - */ - const int64_t dirtylimit_period = 1000, dirtylimit_value = 50; - const int64_t max_bandwidth = 400000000; /* ~400Mb/s */ - const int64_t downtime_limit = 250; /* 250ms */ - /* - * We migrate through unix-socket (> 500Mb/s). - * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s). - * So, we can predict expected_threshold - */ - const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000; - int max_try_count = 10; - MigrateCommon args = { - .start = { - .hide_stderr = true, - .use_dirty_ring = true, - }, - .listen_uri = uri, - .connect_uri = uri, - }; - - /* Start src, dst vm */ - if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) { - return; - } - - /* Prepare for dirty limit migration and wait src vm show up */ - migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value); - - /* Start migrate */ - migrate_qmp(from, to, args.connect_uri, NULL, "{}"); - - /* Wait for dirty limit throttle begin */ - throttle_us_per_full = 0; - while (throttle_us_per_full == 0) { - throttle_us_per_full = - read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); - usleep(100); - g_assert_false(src_state.stop_seen); - } - - /* Now cancel migrate and wait for dirty limit throttle switch off */ - migrate_cancel(from); - wait_for_migration_status(from, "cancelled", NULL); - - /* Check if dirty limit throttle switched off, set timeout 1ms */ - do { - throttle_us_per_full = - read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); - usleep(100); - g_assert_false(src_state.stop_seen); - } while (throttle_us_per_full != 0 && --max_try_count); - - /* Assert dirty limit is not in service */ - g_assert_cmpint(throttle_us_per_full, ==, 0); - - args = (MigrateCommon) { - .start = { - .only_target = true, - .use_dirty_ring = true, - }, - .listen_uri = uri, - .connect_uri = uri, - }; - - /* Restart dst vm, src vm already show up so we needn't wait anymore */ - if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) { - return; - } - - /* Start migrate */ - migrate_qmp(from, to, args.connect_uri, NULL, "{}"); - - /* Wait for dirty limit throttle begin */ - throttle_us_per_full = 0; - while (throttle_us_per_full == 0) { - throttle_us_per_full = - read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); - usleep(100); - g_assert_false(src_state.stop_seen); - } - - /* - * The dirty limit rate should equals the return value of - * query-vcpu-dirty-limit if dirty limit cap set - */ - g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from)); - - /* Now, we have tested if dirty limit works, let it converge */ - migrate_set_parameter_int(from, "downtime-limit", downtime_limit); - migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth); - - /* - * Wait for pre-switchover status to check if migration - * satisfy the convergence condition - */ - wait_for_migration_status(from, "pre-switchover", NULL); - - remaining = read_ram_property_int(from, "remaining"); - g_assert_cmpint(remaining, <, - (expected_threshold + expected_threshold / 100)); - - migrate_continue(from, "pre-switchover"); - - qtest_qmp_eventwait(to, "RESUME"); - - wait_for_serial("dest_serial"); - wait_for_migration_complete(from); - - test_migrate_end(from, to, true); -} - -static bool kvm_dirty_ring_supported(void) -{ -#if defined(__linux__) && defined(HOST_X86_64) - int ret, kvm_fd = open("/dev/kvm", O_RDONLY); - - if (kvm_fd < 0) { - return false; - } - - ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING); - close(kvm_fd); - - /* We test with 4096 slots */ - if (ret < 4096) { - return false; - } - - return true; -#else - return false; -#endif + *argc_p = j; } int main(int argc, char **argv) { - bool has_kvm, has_tcg; - bool has_uffd, is_x86; - const char *arch; - g_autoptr(GError) err = NULL; - const char *qemu_src = getenv(QEMU_ENV_SRC); - const char *qemu_dst = getenv(QEMU_ENV_DST); + MigrationTestEnv *env; int ret; + bool full_set = false; - g_test_init(&argc, &argv, NULL); - - /* - * The default QTEST_QEMU_BINARY must always be provided because - * that is what helpers use to query the accel type and - * architecture. - */ - if (qemu_src && qemu_dst) { - g_test_message("Only one of %s, %s is allowed", - QEMU_ENV_SRC, QEMU_ENV_DST); - exit(1); - } - - has_kvm = qtest_has_accel("kvm"); - has_tcg = qtest_has_accel("tcg"); - - if (!has_tcg && !has_kvm) { - g_test_skip("No KVM or TCG accelerator available"); - return 0; - } - - has_uffd = ufd_version_check(); - arch = qtest_get_arch(); - is_x86 = !strcmp(arch, "i386") || !strcmp(arch, "x86_64"); - - tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err); - if (!tmpfs) { - g_test_message("Can't create temporary directory in %s: %s", - g_get_tmp_dir(), err->message); - } - g_assert(tmpfs); + /* strip the --full option if it's present */ + parse_args(&argc, &argv, &full_set); + g_test_init(&argc, &argv, NULL); + env = migration_get_env(); + env->full_set = full_set; module_call_init(MODULE_INIT_QOM); - migration_test_add("/migration/bad_dest", test_baddest); -#ifndef _WIN32 - migration_test_add("/migration/analyze-script", test_analyze_script); - migration_test_add("/migration/vmstate-checker-script", - test_vmstate_checker_script); -#endif - - if (is_x86) { - migration_test_add("/migration/precopy/unix/suspend/live", - test_precopy_unix_suspend_live); - migration_test_add("/migration/precopy/unix/suspend/notlive", - test_precopy_unix_suspend_notlive); - } - - if (has_uffd) { - migration_test_add("/migration/postcopy/plain", test_postcopy); - migration_test_add("/migration/postcopy/recovery/plain", - test_postcopy_recovery); - migration_test_add("/migration/postcopy/preempt/plain", - test_postcopy_preempt); - migration_test_add("/migration/postcopy/preempt/recovery/plain", - test_postcopy_preempt_recovery); - migration_test_add("/migration/postcopy/recovery/double-failures/handshake", - test_postcopy_recovery_fail_handshake); - migration_test_add("/migration/postcopy/recovery/double-failures/reconnect", - test_postcopy_recovery_fail_reconnect); - if (is_x86) { - migration_test_add("/migration/postcopy/suspend", - test_postcopy_suspend); - } - } - - migration_test_add("/migration/precopy/unix/plain", - test_precopy_unix_plain); - migration_test_add("/migration/precopy/unix/xbzrle", - test_precopy_unix_xbzrle); - migration_test_add("/migration/precopy/file", - test_precopy_file); - migration_test_add("/migration/precopy/file/offset", - test_precopy_file_offset); -#ifndef _WIN32 - migration_test_add("/migration/precopy/file/offset/fdset", - test_precopy_file_offset_fdset); -#endif - migration_test_add("/migration/precopy/file/offset/bad", - test_precopy_file_offset_bad); - - /* - * Our CI system has problems with shared memory. - * Don't run this test until we find a workaround. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - migration_test_add("/migration/mode/reboot", test_mode_reboot); - } - - migration_test_add("/migration/precopy/file/mapped-ram", - test_precopy_file_mapped_ram); - migration_test_add("/migration/precopy/file/mapped-ram/live", - test_precopy_file_mapped_ram_live); - - migration_test_add("/migration/multifd/file/mapped-ram", - test_multifd_file_mapped_ram); - migration_test_add("/migration/multifd/file/mapped-ram/live", - test_multifd_file_mapped_ram_live); - - migration_test_add("/migration/multifd/file/mapped-ram/dio", - test_multifd_file_mapped_ram_dio); - -#ifndef _WIN32 - migration_test_add("/migration/multifd/file/mapped-ram/fdset", - test_multifd_file_mapped_ram_fdset); - migration_test_add("/migration/multifd/file/mapped-ram/fdset/dio", - test_multifd_file_mapped_ram_fdset_dio); -#endif - -#ifdef CONFIG_GNUTLS - migration_test_add("/migration/precopy/unix/tls/psk", - test_precopy_unix_tls_psk); - - if (has_uffd) { - /* - * NOTE: psk test is enough for postcopy, as other types of TLS - * channels are tested under precopy. Here what we want to test is the - * general postcopy path that has TLS channel enabled. - */ - migration_test_add("/migration/postcopy/tls/psk", - test_postcopy_tls_psk); - migration_test_add("/migration/postcopy/recovery/tls/psk", - test_postcopy_recovery_tls_psk); - migration_test_add("/migration/postcopy/preempt/tls/psk", - test_postcopy_preempt_tls_psk); - migration_test_add("/migration/postcopy/preempt/recovery/tls/psk", - test_postcopy_preempt_all); - } -#ifdef CONFIG_TASN1 - migration_test_add("/migration/precopy/unix/tls/x509/default-host", - test_precopy_unix_tls_x509_default_host); - migration_test_add("/migration/precopy/unix/tls/x509/override-host", - test_precopy_unix_tls_x509_override_host); -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - - migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain); - - migration_test_add("/migration/precopy/tcp/plain/switchover-ack", - test_precopy_tcp_switchover_ack); - -#ifdef CONFIG_GNUTLS - migration_test_add("/migration/precopy/tcp/tls/psk/match", - test_precopy_tcp_tls_psk_match); - migration_test_add("/migration/precopy/tcp/tls/psk/mismatch", - test_precopy_tcp_tls_psk_mismatch); -#ifdef CONFIG_TASN1 - migration_test_add("/migration/precopy/tcp/tls/x509/default-host", - test_precopy_tcp_tls_x509_default_host); - migration_test_add("/migration/precopy/tcp/tls/x509/override-host", - test_precopy_tcp_tls_x509_override_host); - migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host", - test_precopy_tcp_tls_x509_mismatch_host); - migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client", - test_precopy_tcp_tls_x509_friendly_client); - migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client", - test_precopy_tcp_tls_x509_hostile_client); - migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client", - test_precopy_tcp_tls_x509_allow_anon_client); - migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client", - test_precopy_tcp_tls_x509_reject_anon_client); -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - - /* migration_test_add("/migration/ignore_shared", test_ignore_shared); */ -#ifndef _WIN32 - migration_test_add("/migration/precopy/fd/tcp", - test_migrate_precopy_fd_socket); - migration_test_add("/migration/precopy/fd/file", - test_migrate_precopy_fd_file); -#endif - migration_test_add("/migration/validate_uuid", test_validate_uuid); - migration_test_add("/migration/validate_uuid_error", - test_validate_uuid_error); - migration_test_add("/migration/validate_uuid_src_not_set", - test_validate_uuid_src_not_set); - migration_test_add("/migration/validate_uuid_dst_not_set", - test_validate_uuid_dst_not_set); - migration_test_add("/migration/validate_uri/channels/both_set", - test_validate_uri_channels_both_set); - migration_test_add("/migration/validate_uri/channels/none_set", - test_validate_uri_channels_none_set); - /* - * See explanation why this test is slow on function definition - */ - if (g_test_slow()) { - migration_test_add("/migration/auto_converge", - test_migrate_auto_converge); - if (g_str_equal(arch, "x86_64") && - has_kvm && kvm_dirty_ring_supported()) { - migration_test_add("/migration/dirty_limit", - test_migrate_dirty_limit); - } - } - migration_test_add("/migration/multifd/tcp/uri/plain/none", - test_multifd_tcp_uri_none); - migration_test_add("/migration/multifd/tcp/channels/plain/none", - test_multifd_tcp_channels_none); - migration_test_add("/migration/multifd/tcp/plain/zero-page/legacy", - test_multifd_tcp_zero_page_legacy); - migration_test_add("/migration/multifd/tcp/plain/zero-page/none", - test_multifd_tcp_no_zero_page); - migration_test_add("/migration/multifd/tcp/plain/cancel", - test_multifd_tcp_cancel); - migration_test_add("/migration/multifd/tcp/plain/zlib", - test_multifd_tcp_zlib); -#ifdef CONFIG_ZSTD - migration_test_add("/migration/multifd/tcp/plain/zstd", - test_multifd_tcp_zstd); -#endif -#ifdef CONFIG_QPL - migration_test_add("/migration/multifd/tcp/plain/qpl", - test_multifd_tcp_qpl); -#endif -#ifdef CONFIG_UADK - migration_test_add("/migration/multifd/tcp/plain/uadk", - test_multifd_tcp_uadk); -#endif -#ifdef CONFIG_GNUTLS - migration_test_add("/migration/multifd/tcp/tls/psk/match", - test_multifd_tcp_tls_psk_match); - migration_test_add("/migration/multifd/tcp/tls/psk/mismatch", - test_multifd_tcp_tls_psk_mismatch); -#ifdef CONFIG_TASN1 - migration_test_add("/migration/multifd/tcp/tls/x509/default-host", - test_multifd_tcp_tls_x509_default_host); - migration_test_add("/migration/multifd/tcp/tls/x509/override-host", - test_multifd_tcp_tls_x509_override_host); - migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host", - test_multifd_tcp_tls_x509_mismatch_host); - migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client", - test_multifd_tcp_tls_x509_allow_anon_client); - migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client", - test_multifd_tcp_tls_x509_reject_anon_client); -#endif /* CONFIG_TASN1 */ -#endif /* CONFIG_GNUTLS */ - - if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { - migration_test_add("/migration/dirty_ring", - test_precopy_unix_dirty_ring); - migration_test_add("/migration/vcpu_dirty_limit", - test_vcpu_dirty_limit); - } + migration_test_add_tls(env); + migration_test_add_compression(env); + migration_test_add_postcopy(env); + migration_test_add_file(env); + migration_test_add_precopy(env); + migration_test_add_cpr(env); + migration_test_add_misc(env); ret = g_test_run(); g_assert_cmpint(ret, ==, 0); - bootfile_delete(); - ret = rmdir(tmpfs); - if (ret != 0) { - g_test_message("unable to rmdir: path (%s): %s", - tmpfs, strerror(errno)); - } - g_free(tmpfs); + ret = migration_env_clean(env); return ret; } diff --git a/tests/migration/Makefile b/tests/qtest/migration/Makefile similarity index 100% rename from tests/migration/Makefile rename to tests/qtest/migration/Makefile diff --git a/tests/migration/aarch64/Makefile b/tests/qtest/migration/aarch64/Makefile similarity index 100% rename from tests/migration/aarch64/Makefile rename to tests/qtest/migration/aarch64/Makefile diff --git a/tests/migration/aarch64/a-b-kernel.S b/tests/qtest/migration/aarch64/a-b-kernel.S similarity index 100% rename from tests/migration/aarch64/a-b-kernel.S rename to tests/qtest/migration/aarch64/a-b-kernel.S diff --git a/tests/migration/aarch64/a-b-kernel.h b/tests/qtest/migration/aarch64/a-b-kernel.h similarity index 100% rename from tests/migration/aarch64/a-b-kernel.h rename to tests/qtest/migration/aarch64/a-b-kernel.h diff --git a/tests/qtest/migration/bootfile.c b/tests/qtest/migration/bootfile.c new file mode 100644 index 00000000000..fac059d11d7 --- /dev/null +++ b/tests/qtest/migration/bootfile.c @@ -0,0 +1,70 @@ +/* + * Guest code setup for migration tests + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +/* + * The boot file modifies memory area in [start_address, end_address) + * repeatedly. It outputs a 'B' at a fixed rate while it's still running. + */ +#include "bootfile.h" +#include "i386/a-b-bootblock.h" +#include "aarch64/a-b-kernel.h" +#include "ppc64/a-b-kernel.h" +#include "s390x/a-b-bios.h" + +static char *bootpath; + +void bootfile_delete(void) +{ + if (!bootpath) { + return; + } + unlink(bootpath); + g_free(bootpath); + bootpath = NULL; +} + +char *bootfile_create(const char *arch, const char *dir, bool suspend_me) +{ + unsigned char *content; + size_t len; + + bootfile_delete(); + bootpath = g_strdup_printf("%s/bootsect", dir); + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + /* the assembled x86 boot sector should be exactly one sector large */ + g_assert(sizeof(x86_bootsect) == 512); + x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me; + content = x86_bootsect; + len = sizeof(x86_bootsect); + } else if (g_str_equal(arch, "s390x")) { + content = s390x_elf; + len = sizeof(s390x_elf); + } else if (strcmp(arch, "ppc64") == 0) { + content = ppc64_kernel; + len = sizeof(ppc64_kernel); + } else if (strcmp(arch, "aarch64") == 0) { + content = aarch64_kernel; + len = sizeof(aarch64_kernel); + g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE); + } else { + g_assert_not_reached(); + } + + FILE *bootfile = fopen(bootpath, "wb"); + + g_assert_cmpint(fwrite(content, len, 1, bootfile), ==, 1); + fclose(bootfile); + + return bootpath; +} diff --git a/tests/qtest/migration/bootfile.h b/tests/qtest/migration/bootfile.h new file mode 100644 index 00000000000..6d6a67386e1 --- /dev/null +++ b/tests/qtest/migration/bootfile.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018 Red Hat, Inc. and/or its affiliates + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef BOOTFILE_H +#define BOOTFILE_H + +/* Common */ +#define TEST_MEM_PAGE_SIZE 4096 + +/* x86 */ +#define X86_TEST_MEM_START (1 * 1024 * 1024) +#define X86_TEST_MEM_END (100 * 1024 * 1024) + +/* S390 */ +#define S390_TEST_MEM_START (1 * 1024 * 1024) +#define S390_TEST_MEM_END (100 * 1024 * 1024) + +/* PPC */ +#define PPC_TEST_MEM_START (1 * 1024 * 1024) +#define PPC_TEST_MEM_END (100 * 1024 * 1024) +#define PPC_H_PUT_TERM_CHAR 0x58 + +/* ARM */ +#define ARM_TEST_MEM_START (0x40000000 + 1 * 1024 * 1024) +#define ARM_TEST_MEM_END (0x40000000 + 100 * 1024 * 1024) +#define ARM_MACH_VIRT_UART 0x09000000 +/* AArch64 kernel load address is 0x40080000, and the test memory starts at + * 0x40100000. So the maximum allowable kernel size is 512KB. + */ +#define ARM_TEST_MAX_KERNEL_SIZE (512 * 1024) + +void bootfile_delete(void); +char *bootfile_create(const char *arch, const char *dir, bool suspend_me); + +#endif /* BOOTFILE_H */ diff --git a/tests/qtest/migration/compression-tests.c b/tests/qtest/migration/compression-tests.c new file mode 100644 index 00000000000..b827665b8ec --- /dev/null +++ b/tests/qtest/migration/compression-tests.c @@ -0,0 +1,226 @@ +/* + * QTest testcases for migration compression + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" +#include "qemu/module.h" + + +static char *tmpfs; + +#ifdef CONFIG_ZSTD +static void * +migrate_hook_start_precopy_tcp_multifd_zstd(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_int(from, "multifd-zstd-level", 2); + migrate_set_parameter_int(to, "multifd-zstd-level", 2); + + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "zstd"); +} + +static void test_multifd_tcp_zstd(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_zstd, + }; + test_precopy_common(&args); +} + +static void test_multifd_postcopy_tcp_zstd(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_zstd, + }; + + test_precopy_common(&args); +} +#endif /* CONFIG_ZSTD */ + +#ifdef CONFIG_QATZIP +static void * +migrate_hook_start_precopy_tcp_multifd_qatzip(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_int(from, "multifd-qatzip-level", 2); + migrate_set_parameter_int(to, "multifd-qatzip-level", 2); + + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "qatzip"); +} + +static void test_multifd_tcp_qatzip(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_qatzip, + }; + test_precopy_common(&args); +} +#endif + +#ifdef CONFIG_QPL +static void * +migrate_hook_start_precopy_tcp_multifd_qpl(QTestState *from, + QTestState *to) +{ + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "qpl"); +} + +static void test_multifd_tcp_qpl(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_qpl, + }; + test_precopy_common(&args); +} +#endif /* CONFIG_QPL */ + +#ifdef CONFIG_UADK +static void * +migrate_hook_start_precopy_tcp_multifd_uadk(QTestState *from, + QTestState *to) +{ + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "uadk"); +} + +static void test_multifd_tcp_uadk(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_uadk, + }; + test_precopy_common(&args); +} +#endif /* CONFIG_UADK */ + +static void * +migrate_hook_start_xbzrle(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432); + return NULL; +} + +static void test_precopy_unix_xbzrle(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = migrate_hook_start_xbzrle, + .iterations = 2, + .start = { + .caps[MIGRATION_CAPABILITY_XBZRLE] = true, + }, + /* + * XBZRLE needs pages to be modified when doing the 2nd+ round + * iteration to have real data pushed to the stream. + */ + .live = true, + }; + + test_precopy_common(&args); +} + +static void * +migrate_hook_start_precopy_tcp_multifd_zlib(QTestState *from, + QTestState *to) +{ + /* + * Overloading this test to also check that set_parameter does not error. + * This is also done in the tests for the other compression methods. + */ + migrate_set_parameter_int(from, "multifd-zlib-level", 2); + migrate_set_parameter_int(to, "multifd-zlib-level", 2); + + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "zlib"); +} + +static void test_multifd_tcp_zlib(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .start_hook = migrate_hook_start_precopy_tcp_multifd_zlib, + }; + test_precopy_common(&args); +} + +static void migration_test_add_compression_smoke(MigrationTestEnv *env) +{ + migration_test_add("/migration/multifd/tcp/plain/zlib", + test_multifd_tcp_zlib); +} + +void migration_test_add_compression(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + migration_test_add_compression_smoke(env); + + if (!env->full_set) { + return; + } + +#ifdef CONFIG_ZSTD + migration_test_add("/migration/multifd/tcp/plain/zstd", + test_multifd_tcp_zstd); + if (env->has_uffd) { + migration_test_add("/migration/multifd+postcopy/tcp/plain/zstd", + test_multifd_postcopy_tcp_zstd); + } +#endif + +#ifdef CONFIG_QATZIP + migration_test_add("/migration/multifd/tcp/plain/qatzip", + test_multifd_tcp_qatzip); +#endif + +#ifdef CONFIG_QPL + migration_test_add("/migration/multifd/tcp/plain/qpl", + test_multifd_tcp_qpl); +#endif + +#ifdef CONFIG_UADK + migration_test_add("/migration/multifd/tcp/plain/uadk", + test_multifd_tcp_uadk); +#endif + + if (g_test_slow()) { + migration_test_add("/migration/precopy/unix/xbzrle", + test_precopy_unix_xbzrle); + } +} diff --git a/tests/qtest/migration/cpr-tests.c b/tests/qtest/migration/cpr-tests.c new file mode 100644 index 00000000000..5e764a67876 --- /dev/null +++ b/tests/qtest/migration/cpr-tests.c @@ -0,0 +1,136 @@ +/* + * QTest testcases for CPR + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" + + +static char *tmpfs; + +static void *migrate_hook_start_mode_reboot(QTestState *from, QTestState *to) +{ + migrate_set_parameter_str(from, "mode", "cpr-reboot"); + migrate_set_parameter_str(to, "mode", "cpr-reboot"); + + return NULL; +} + +static void test_mode_reboot(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .start.use_shmem = true, + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_hook_start_mode_reboot, + .start = { + .caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED] = true, + }, + }; + + test_file_common(&args, true); +} + +static void *test_mode_transfer_start(QTestState *from, QTestState *to) +{ + migrate_set_parameter_str(from, "mode", "cpr-transfer"); + return NULL; +} + +/* + * cpr-transfer mode cannot use the target monitor prior to starting the + * migration, and cannot connect synchronously to the monitor, so defer + * the target connection. + */ +static void test_mode_transfer_common(bool incoming_defer) +{ + g_autofree char *cpr_path = g_strdup_printf("%s/cpr.sock", tmpfs); + g_autofree char *mig_path = g_strdup_printf("%s/migsocket", tmpfs); + g_autofree char *uri = g_strdup_printf("unix:%s", mig_path); + g_autofree char *opts_target = NULL; + + const char *opts = "-machine aux-ram-share=on -nodefaults"; + g_autofree const char *cpr_channel = g_strdup_printf( + "cpr,addr.transport=socket,addr.type=unix,addr.path=%s", + cpr_path); + + g_autofree char *connect_channels = g_strdup_printf( + "[ { 'channel-type': 'main'," + " 'addr': { 'transport': 'socket'," + " 'type': 'unix'," + " 'path': '%s' } } ]", + mig_path); + + /* + * Set up a UNIX domain socket for the CPR channel before + * launching the destination VM, to avoid timing issues + * during connection setup. + */ + int cpr_sockfd = qtest_socket_server(cpr_path); + g_assert(cpr_sockfd >= 0); + + opts_target = g_strdup_printf("-incoming cpr,addr.transport=socket," + "addr.type=fd,addr.str=%d %s", + cpr_sockfd, opts); + MigrateCommon args = { + .start.opts_source = opts, + .start.opts_target = opts_target, + .start.defer_target_connect = true, + .start.memory_backend = "-object memory-backend-memfd,id=pc.ram,size=%s" + " -machine memory-backend=pc.ram", + .listen_uri = incoming_defer ? "defer" : uri, + .connect_channels = connect_channels, + .cpr_channel = cpr_channel, + .start_hook = test_mode_transfer_start, + }; + + test_precopy_common(&args); +} + +static void test_mode_transfer(void) +{ + test_mode_transfer_common(NULL); +} + +static void test_mode_transfer_defer(void) +{ + test_mode_transfer_common(true); +} + +void migration_test_add_cpr(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + /* no tests in the smoke set for now */ + + if (!env->full_set) { + return; + } + + /* + * Our CI system has problems with shared memory. + * Don't run this test until we find a workaround. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + migration_test_add("/migration/mode/reboot", test_mode_reboot); + } + + if (env->has_kvm) { + migration_test_add("/migration/mode/transfer", test_mode_transfer); + migration_test_add("/migration/mode/transfer/defer", + test_mode_transfer_defer); + } +} diff --git a/tests/qtest/migration/file-tests.c b/tests/qtest/migration/file-tests.c new file mode 100644 index 00000000000..4d78ce08556 --- /dev/null +++ b/tests/qtest/migration/file-tests.c @@ -0,0 +1,341 @@ +/* + * QTest testcases for migration to file + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" +#include "qobject/qlist.h" + + +static char *tmpfs; + +static void test_precopy_file(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + }; + + test_file_common(&args, true); +} + +#ifndef _WIN32 +static void fdset_add_fds(QTestState *qts, const char *file, int flags, + int num_fds, bool direct_io) +{ + for (int i = 0; i < num_fds; i++) { + int fd; + +#ifdef O_DIRECT + /* only secondary channels can use direct-io */ + if (direct_io && i != 0) { + flags |= O_DIRECT; + } +#endif + + fd = open(file, flags, 0660); + assert(fd != -1); + + qtest_qmp_fds_assert_success(qts, &fd, 1, "{'execute': 'add-fd', " + "'arguments': {'fdset-id': 1}}"); + close(fd); + } +} + +static void *migrate_hook_start_file_offset_fdset(QTestState *from, + QTestState *to) +{ + g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + + fdset_add_fds(from, file, O_WRONLY, 1, false); + fdset_add_fds(to, file, O_RDONLY, 1, false); + + return NULL; +} + +static void test_precopy_file_offset_fdset(void) +{ + g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", + FILE_TEST_OFFSET); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_hook_start_file_offset_fdset, + }; + + test_file_common(&args, false); +} +#endif + +static void test_precopy_file_offset(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=%d", tmpfs, + FILE_TEST_FILENAME, + FILE_TEST_OFFSET); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + }; + + test_file_common(&args, false); +} + +static void test_precopy_file_offset_bad(void) +{ + /* using a value not supported by qemu_strtosz() */ + g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=0x20M", + tmpfs, FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .result = MIG_TEST_QMP_ERROR, + }; + + test_file_common(&args, false); +} + +static void test_precopy_file_mapped_ram_live(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + }, + }; + + test_file_common(&args, false); +} + +static void test_precopy_file_mapped_ram(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + }, + }; + + test_file_common(&args, true); +} + +static void test_multifd_file_mapped_ram_live(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + }, + }; + + test_file_common(&args, false); +} + +static void test_multifd_file_mapped_ram(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + }, + }; + + test_file_common(&args, true); +} + +static void *migrate_hook_start_multifd_mapped_ram_dio(QTestState *from, + QTestState *to) +{ + migrate_set_parameter_bool(from, "direct-io", true); + migrate_set_parameter_bool(to, "direct-io", true); + + return NULL; +} + +static void test_multifd_file_mapped_ram_dio(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_mapped_ram_dio, + .start = { + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + + if (!probe_o_direct_support(tmpfs)) { + g_test_skip("Filesystem does not support O_DIRECT"); + return; + } + + test_file_common(&args, true); +} + +#ifndef _WIN32 +static void migrate_hook_end_multifd_mapped_ram_fdset(QTestState *from, + QTestState *to, + void *opaque) +{ + QDict *resp; + QList *fdsets; + + /* + * Remove the fdsets after migration, otherwise a second migration + * would fail due fdset reuse. + */ + qtest_qmp_assert_success(from, "{'execute': 'remove-fd', " + "'arguments': { 'fdset-id': 1}}"); + + /* + * Make sure no fdsets are left after migration, otherwise a + * second migration would fail due fdset reuse. + */ + resp = qtest_qmp(from, "{'execute': 'query-fdsets', " + "'arguments': {}}"); + g_assert(qdict_haskey(resp, "return")); + fdsets = qdict_get_qlist(resp, "return"); + g_assert(fdsets && qlist_empty(fdsets)); + qobject_unref(resp); +} + +static void *migrate_hook_start_multifd_mapped_ram_fdset_dio(QTestState *from, + QTestState *to) +{ + g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + + fdset_add_fds(from, file, O_WRONLY, 2, true); + fdset_add_fds(to, file, O_RDONLY, 2, true); + + migrate_set_parameter_bool(from, "direct-io", true); + migrate_set_parameter_bool(to, "direct-io", true); + + return NULL; +} + +static void *migrate_hook_start_multifd_mapped_ram_fdset(QTestState *from, + QTestState *to) +{ + g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + + fdset_add_fds(from, file, O_WRONLY, 2, false); + fdset_add_fds(to, file, O_RDONLY, 2, false); + + return NULL; +} + +static void test_multifd_file_mapped_ram_fdset(void) +{ + g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", + FILE_TEST_OFFSET); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_mapped_ram_fdset, + .end_hook = migrate_hook_end_multifd_mapped_ram_fdset, + .start = { + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + + test_file_common(&args, true); +} + +static void test_multifd_file_mapped_ram_fdset_dio(void) +{ + g_autofree char *uri = g_strdup_printf("file:/dev/fdset/1,offset=%d", + FILE_TEST_OFFSET); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_mapped_ram_fdset_dio, + .end_hook = migrate_hook_end_multifd_mapped_ram_fdset, + .start = { + .caps[MIGRATION_CAPABILITY_MAPPED_RAM] = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + + if (!probe_o_direct_support(tmpfs)) { + g_test_skip("Filesystem does not support O_DIRECT"); + return; + } + + test_file_common(&args, true); +} +#endif /* !_WIN32 */ + +static void migration_test_add_file_smoke(MigrationTestEnv *env) +{ + migration_test_add("/migration/precopy/file", + test_precopy_file); + + migration_test_add("/migration/multifd/file/mapped-ram/dio", + test_multifd_file_mapped_ram_dio); +} + +void migration_test_add_file(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + migration_test_add_file_smoke(env); + + if (!env->full_set) { + return; + } + + migration_test_add("/migration/precopy/file/offset", + test_precopy_file_offset); +#ifndef _WIN32 + migration_test_add("/migration/precopy/file/offset/fdset", + test_precopy_file_offset_fdset); +#endif + migration_test_add("/migration/precopy/file/offset/bad", + test_precopy_file_offset_bad); + + migration_test_add("/migration/precopy/file/mapped-ram", + test_precopy_file_mapped_ram); + migration_test_add("/migration/precopy/file/mapped-ram/live", + test_precopy_file_mapped_ram_live); + + migration_test_add("/migration/multifd/file/mapped-ram", + test_multifd_file_mapped_ram); + migration_test_add("/migration/multifd/file/mapped-ram/live", + test_multifd_file_mapped_ram_live); + +#ifndef _WIN32 + migration_test_add("/migration/multifd/file/mapped-ram/fdset", + test_multifd_file_mapped_ram_fdset); + migration_test_add("/migration/multifd/file/mapped-ram/fdset/dio", + test_multifd_file_mapped_ram_fdset_dio); +#endif +} diff --git a/tests/qtest/migration/framework.c b/tests/qtest/migration/framework.c new file mode 100644 index 00000000000..407c9023c05 --- /dev/null +++ b/tests/qtest/migration/framework.c @@ -0,0 +1,1066 @@ +/* + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "chardev/char.h" +#include "crypto/tlscredspsk.h" +#include "libqtest.h" +#include "migration/bootfile.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" +#include "ppc-util.h" +#include "qapi/error.h" +#include "qobject/qjson.h" +#include "qobject/qlist.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/range.h" +#include "qemu/sockets.h" + + +#define QEMU_VM_FILE_MAGIC 0x5145564d +#define QEMU_ENV_SRC "QTEST_QEMU_BINARY_SRC" +#define QEMU_ENV_DST "QTEST_QEMU_BINARY_DST" +#define MULTIFD_TEST_CHANNELS 4 + +unsigned start_address; +unsigned end_address; +static QTestMigrationState src_state; +static QTestMigrationState dst_state; +static char *tmpfs; + +/* + * An initial 3 MB offset is used as that corresponds + * to ~1 sec of data transfer with our bandwidth setting. + */ +#define MAGIC_OFFSET_BASE (3 * 1024 * 1024) +/* + * A further 1k is added to ensure we're not a multiple + * of TEST_MEM_PAGE_SIZE, thus avoid clash with writes + * from the migration guest workload. + */ +#define MAGIC_OFFSET_SHUFFLE 1024 +#define MAGIC_OFFSET (MAGIC_OFFSET_BASE + MAGIC_OFFSET_SHUFFLE) +#define MAGIC_MARKER 0xFEED12345678CAFEULL + + +/* + * Wait for some output in the serial output file, + * we get an 'A' followed by an endless string of 'B's + * but on the destination we won't have the A (unless we enabled suspend/resume) + */ +void wait_for_serial(const char *side) +{ + g_autofree char *serialpath = g_strdup_printf("%s/%s", tmpfs, side); + FILE *serialfile = fopen(serialpath, "r"); + + do { + int readvalue = fgetc(serialfile); + + switch (readvalue) { + case 'A': + /* Fine */ + break; + + case 'B': + /* It's alive! */ + fclose(serialfile); + return; + + case EOF: + fseek(serialfile, 0, SEEK_SET); + usleep(1000); + break; + + default: + fprintf(stderr, "Unexpected %d on %s serial\n", readvalue, side); + g_assert_not_reached(); + } + } while (true); +} + +void migrate_prepare_for_dirty_mem(QTestState *from) +{ + /* + * The guest workflow iterates from start_address to + * end_address, writing 1 byte every TEST_MEM_PAGE_SIZE + * bytes. + * + * IOW, if we write to mem at a point which is NOT + * a multiple of TEST_MEM_PAGE_SIZE, our write won't + * conflict with the migration workflow. + * + * We put in a marker here, that we'll use to determine + * when the data has been transferred to the dst. + */ + qtest_writeq(from, start_address + MAGIC_OFFSET, MAGIC_MARKER); +} + +void migrate_wait_for_dirty_mem(QTestState *from, QTestState *to) +{ + uint64_t watch_address = start_address + MAGIC_OFFSET_BASE; + uint64_t marker_address = start_address + MAGIC_OFFSET; + uint8_t watch_byte; + + /* + * Wait for the MAGIC_MARKER to get transferred, as an + * indicator that a migration pass has made some known + * amount of progress. + */ + do { + usleep(1000 * 10); + } while (qtest_readq(to, marker_address) != MAGIC_MARKER); + + + /* If suspended, src only iterates once, and watch_byte may never change */ + if (src_state.suspend_me) { + return; + } + + /* + * Now ensure that already transferred bytes are + * dirty again from the guest workload. Note the + * guest byte value will wrap around and by chance + * match the original watch_byte. This is harmless + * as we'll eventually see a different value if we + * keep watching + */ + watch_byte = qtest_readb(from, watch_address); + do { + usleep(1000 * 10); + } while (qtest_readb(from, watch_address) == watch_byte); +} + +static void check_guests_ram(QTestState *who) +{ + /* + * Our ASM test will have been incrementing one byte from each page from + * start_address to < end_address in order. This gives us a constraint + * that any page's byte should be equal or less than the previous pages + * byte (mod 256); and they should all be equal except for one transition + * at the point where we meet the incrementer. (We're running this with + * the guest stopped). + */ + unsigned address; + uint8_t first_byte; + uint8_t last_byte; + bool hit_edge = false; + int bad = 0; + + qtest_memread(who, start_address, &first_byte, 1); + last_byte = first_byte; + + for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address; + address += TEST_MEM_PAGE_SIZE) + { + uint8_t b; + qtest_memread(who, address, &b, 1); + if (b != last_byte) { + if (((b + 1) % 256) == last_byte && !hit_edge) { + /* + * This is OK, the guest stopped at the point of + * incrementing the previous page but didn't get + * to us yet. + */ + hit_edge = true; + last_byte = b; + } else { + bad++; + if (bad <= 10) { + fprintf(stderr, "Memory content inconsistency at %x" + " first_byte = %x last_byte = %x current = %x" + " hit_edge = %x\n", + address, first_byte, last_byte, b, hit_edge); + } + } + } + } + if (bad >= 10) { + fprintf(stderr, "and in another %d pages", bad - 10); + } + g_assert(bad == 0); +} + +static void cleanup(const char *filename) +{ + g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, filename); + + unlink(path); +} + +static QList *migrate_start_get_qmp_capabilities(const MigrateStart *args) +{ + QList *capabilities = NULL; + + if (args->oob) { + capabilities = qlist_new(); + qlist_append_str(capabilities, "oob"); + } + return capabilities; +} + +static void migrate_start_set_capabilities(QTestState *from, QTestState *to, + MigrateStart *args) +{ + /* + * MigrationCapability_lookup and MIGRATION_CAPABILITY_ constants + * are from qapi-types-migration.h. + */ + for (uint8_t i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { + if (!args->caps[i]) { + continue; + } + if (from) { + migrate_set_capability(from, + MigrationCapability_lookup.array[i], true); + } + if (to) { + migrate_set_capability(to, + MigrationCapability_lookup.array[i], true); + } + } + + /* + * Always enable migration events. Libvirt always uses it, let's try + * to mimic as closer as that. + */ + migrate_set_capability(from, "events", true); + if (!args->defer_target_connect) { + migrate_set_capability(to, "events", true); + } + + /* + * Default number of channels should be fine for most + * tests. Individual tests can override by calling + * migrate_set_parameter() directly. + */ + if (args->caps[MIGRATION_CAPABILITY_MULTIFD]) { + migrate_set_parameter_int(from, "multifd-channels", + MULTIFD_TEST_CHANNELS); + migrate_set_parameter_int(to, "multifd-channels", + MULTIFD_TEST_CHANNELS); + } + + return; +} + +int migrate_start(QTestState **from, QTestState **to, const char *uri, + MigrateStart *args) +{ + /* options for source and target */ + g_autofree gchar *arch_opts = NULL; + g_autofree gchar *cmd_source = NULL; + g_autofree gchar *cmd_target = NULL; + const gchar *ignore_stderr; + g_autofree char *shmem_opts = NULL; + g_autofree char *shmem_path = NULL; + const char *kvm_opts = NULL; + const char *arch = qtest_get_arch(); + const char *memory_size; + const char *machine_alias, *machine_opts = ""; + g_autofree char *machine = NULL; + const char *bootpath; + g_autoptr(QList) capabilities = migrate_start_get_qmp_capabilities(args); + g_autofree char *memory_backend = NULL; + const char *events; + + if (args->use_shmem) { + if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { + g_test_skip("/dev/shm is not supported"); + return -1; + } + } + + dst_state = (QTestMigrationState) { }; + src_state = (QTestMigrationState) { }; + bootpath = bootfile_create(arch, tmpfs, args->suspend_me); + src_state.suspend_me = args->suspend_me; + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + memory_size = "150M"; + + if (g_str_equal(arch, "i386")) { + machine_alias = "pc"; + } else { + machine_alias = "q35"; + } + arch_opts = g_strdup_printf( + "-drive if=none,id=d0,file=%s,format=raw " + "-device ide-hd,drive=d0,secs=1,cyls=1,heads=1", bootpath); + start_address = X86_TEST_MEM_START; + end_address = X86_TEST_MEM_END; + } else if (g_str_equal(arch, "s390x")) { + memory_size = "128M"; + machine_alias = "s390-ccw-virtio"; + arch_opts = g_strdup_printf("-bios %s", bootpath); + start_address = S390_TEST_MEM_START; + end_address = S390_TEST_MEM_END; + } else if (strcmp(arch, "ppc64") == 0) { + memory_size = "256M"; + start_address = PPC_TEST_MEM_START; + end_address = PPC_TEST_MEM_END; + machine_alias = "pseries"; + machine_opts = "vsmt=8"; + arch_opts = g_strdup_printf( + "-nodefaults -machine " PSERIES_DEFAULT_CAPABILITIES " " + "-bios %s", bootpath); + } else if (strcmp(arch, "aarch64") == 0) { + memory_size = "150M"; + machine_alias = "virt"; + machine_opts = "gic-version=3"; + arch_opts = g_strdup_printf("-cpu max -kernel %s", bootpath); + start_address = ARM_TEST_MEM_START; + end_address = ARM_TEST_MEM_END; + } else { + g_assert_not_reached(); + } + + if (!getenv("QTEST_LOG") && args->hide_stderr) { +#ifndef _WIN32 + ignore_stderr = "2>/dev/null"; +#else + /* + * On Windows the QEMU executable is created via CreateProcess() and + * IO redirection does not work, so don't bother adding IO redirection + * to the command line. + */ + ignore_stderr = ""; +#endif + } else { + ignore_stderr = ""; + } + + if (args->use_shmem) { + shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid()); + shmem_opts = g_strdup_printf( + "-object memory-backend-file,id=mem0,size=%s" + ",mem-path=%s,share=on -numa node,memdev=mem0", + memory_size, shmem_path); + } + + if (args->memory_backend) { + memory_backend = g_strdup_printf(args->memory_backend, memory_size); + } else { + memory_backend = g_strdup_printf("-m %s ", memory_size); + } + + if (args->use_dirty_ring) { + kvm_opts = ",dirty-ring-size=4096"; + } + + if (!qtest_has_machine(machine_alias)) { + g_autofree char *msg = g_strdup_printf("machine %s not supported", machine_alias); + g_test_skip(msg); + return -1; + } + + machine = resolve_machine_version(machine_alias, QEMU_ENV_SRC, + QEMU_ENV_DST); + + g_test_message("Using machine type: %s", machine); + + cmd_source = g_strdup_printf("-accel kvm%s -accel tcg " + "-machine %s,%s " + "-name source,debug-threads=on " + "%s " + "-serial file:%s/src_serial " + "%s %s %s %s", + kvm_opts ? kvm_opts : "", + machine, machine_opts, + memory_backend, tmpfs, + arch_opts ? arch_opts : "", + shmem_opts ? shmem_opts : "", + args->opts_source ? args->opts_source : "", + ignore_stderr); + if (!args->only_target) { + *from = qtest_init_ext(QEMU_ENV_SRC, cmd_source, capabilities, true); + qtest_qmp_set_event_callback(*from, + migrate_watch_for_events, + &src_state); + } + + /* + * If the monitor connection is deferred, enable events on the command line + * so none are missed. This is for testing only, do not set migration + * options like this in general. + */ + events = args->defer_target_connect ? "-global migration.x-events=on" : ""; + + cmd_target = g_strdup_printf("-accel kvm%s -accel tcg " + "-machine %s,%s " + "-name target,debug-threads=on " + "%s " + "-serial file:%s/dest_serial " + "-incoming %s " + "%s %s %s %s %s", + kvm_opts ? kvm_opts : "", + machine, machine_opts, + memory_backend, tmpfs, uri, + events, + arch_opts ? arch_opts : "", + shmem_opts ? shmem_opts : "", + args->opts_target ? args->opts_target : "", + ignore_stderr); + *to = qtest_init_ext(QEMU_ENV_DST, cmd_target, capabilities, + !args->defer_target_connect); + qtest_qmp_set_event_callback(*to, + migrate_watch_for_events, + &dst_state); + + /* + * Remove shmem file immediately to avoid memory leak in test failed case. + * It's valid because QEMU has already opened this file + */ + if (args->use_shmem) { + unlink(shmem_path); + } + + migrate_start_set_capabilities(*from, *to, args); + + return 0; +} + +void migrate_end(QTestState *from, QTestState *to, bool test_dest) +{ + unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d; + + qtest_quit(from); + + if (test_dest) { + qtest_memread(to, start_address, &dest_byte_a, 1); + + /* Destination still running, wait for a byte to change */ + do { + qtest_memread(to, start_address, &dest_byte_b, 1); + usleep(1000 * 10); + } while (dest_byte_a == dest_byte_b); + + qtest_qmp_assert_success(to, "{ 'execute' : 'stop'}"); + + /* With it stopped, check nothing changes */ + qtest_memread(to, start_address, &dest_byte_c, 1); + usleep(1000 * 200); + qtest_memread(to, start_address, &dest_byte_d, 1); + g_assert_cmpint(dest_byte_c, ==, dest_byte_d); + + check_guests_ram(to); + } + + qtest_quit(to); + + cleanup("migsocket"); + cleanup("cpr.sock"); + cleanup("src_serial"); + cleanup("dest_serial"); + cleanup(FILE_TEST_FILENAME); +} + +static int migrate_postcopy_prepare(QTestState **from_ptr, + QTestState **to_ptr, + MigrateCommon *args) +{ + QTestState *from, *to; + + /* set postcopy capabilities */ + args->start.caps[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME] = true; + args->start.caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true; + + if (migrate_start(&from, &to, "defer", &args->start)) { + return -1; + } + + if (args->start_hook) { + args->postcopy_data = args->start_hook(from, to); + } + + migrate_ensure_non_converge(from); + migrate_prepare_for_dirty_mem(from); + qtest_qmp_assert_success(to, "{ 'execute': 'migrate-incoming'," + " 'arguments': { " + " 'channels': [ { 'channel-type': 'main'," + " 'addr': { 'transport': 'socket'," + " 'type': 'inet'," + " 'host': '127.0.0.1'," + " 'port': '0' } } ] } }"); + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); + + migrate_qmp(from, to, NULL, NULL, "{}"); + + migrate_wait_for_dirty_mem(from, to); + + *from_ptr = from; + *to_ptr = to; + + return 0; +} + +static void migrate_postcopy_complete(QTestState *from, QTestState *to, + MigrateCommon *args) +{ + MigrationTestEnv *env = migration_get_env(); + + wait_for_migration_complete(from); + + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); + } + + /* Make sure we get at least one "B" on destination */ + wait_for_serial("dest_serial"); + + if (env->uffd_feature_thread_id) { + read_blocktime(to); + } + + if (args->end_hook) { + args->end_hook(from, to, args->postcopy_data); + args->postcopy_data = NULL; + } + + migrate_end(from, to, true); +} + +void test_postcopy_common(MigrateCommon *args) +{ + QTestState *from, *to; + + if (migrate_postcopy_prepare(&from, &to, args)) { + return; + } + migrate_postcopy_start(from, to, &src_state); + migrate_postcopy_complete(from, to, args); +} + +static void wait_for_postcopy_status(QTestState *one, const char *status) +{ + wait_for_migration_status(one, status, + (const char * []) { + "failed", "active", + "completed", NULL + }); +} + +static void postcopy_recover_fail(QTestState *from, QTestState *to, + PostcopyRecoveryFailStage stage) +{ +#ifndef _WIN32 + bool fail_early = (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH); + int ret, pair1[2], pair2[2]; + char c; + + g_assert(stage > POSTCOPY_FAIL_NONE && stage < POSTCOPY_FAIL_MAX); + + /* Create two unrelated socketpairs */ + ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair1); + g_assert_cmpint(ret, ==, 0); + + ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair2); + g_assert_cmpint(ret, ==, 0); + + /* + * Give the guests unpaired ends of the sockets, so they'll all blocked + * at reading. This mimics a wrong channel established. + */ + qtest_qmp_fds_assert_success(from, &pair1[0], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + qtest_qmp_fds_assert_success(to, &pair2[0], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + + /* + * Write the 1st byte as QEMU_VM_COMMAND (0x8) for the dest socket, to + * emulate the 1st byte of a real recovery, but stops from there to + * keep dest QEMU in RECOVER. This is needed so that we can kick off + * the recover process on dest QEMU (by triggering the G_IO_IN event). + * + * NOTE: this trick is not needed on src QEMUs, because src doesn't + * rely on an pre-existing G_IO_IN event, so it will always trigger the + * upcoming recovery anyway even if it can read nothing. + */ +#define QEMU_VM_COMMAND 0x08 + c = QEMU_VM_COMMAND; + ret = send(pair2[1], &c, 1, 0); + g_assert_cmpint(ret, ==, 1); + + if (stage == POSTCOPY_FAIL_CHANNEL_ESTABLISH) { + /* + * This will make src QEMU to fail at an early stage when trying to + * resume later, where it shouldn't reach RECOVER stage at all. + */ + close(pair1[1]); + } + + migrate_recover(to, "fd:fd-mig"); + migrate_qmp(from, to, "fd:fd-mig", NULL, "{'resume': true}"); + + /* + * Source QEMU has an extra RECOVER_SETUP phase, dest doesn't have it. + * Make sure it appears along the way. + */ + migration_event_wait(from, "postcopy-recover-setup"); + + if (fail_early) { + /* + * When fails at reconnection, src QEMU will automatically goes + * back to PAUSED state. Making sure there is an event in this + * case: Libvirt relies on this to detect early reconnection + * errors. + */ + migration_event_wait(from, "postcopy-paused"); + } else { + /* + * We want to test "fail later" at RECOVER stage here. Make sure + * both QEMU instances will go into RECOVER stage first, then test + * kicking them out using migrate-pause. + * + * Explicitly check the RECOVER event on src, that's what Libvirt + * relies on, rather than polling. + */ + migration_event_wait(from, "postcopy-recover"); + wait_for_postcopy_status(from, "postcopy-recover"); + + /* Need an explicit kick on src QEMU in this case */ + migrate_pause(from); + } + + /* + * For all failure cases, we'll reach such states on both sides now. + * Check them. + */ + wait_for_postcopy_status(from, "postcopy-paused"); + wait_for_postcopy_status(to, "postcopy-recover"); + + /* + * Kick dest QEMU out too. This is normally not needed in reality + * because when the channel is shutdown it should also happen on src. + * However here we used separate socket pairs so we need to do that + * explicitly. + */ + migrate_pause(to); + wait_for_postcopy_status(to, "postcopy-paused"); + + close(pair1[0]); + close(pair2[0]); + close(pair2[1]); + + if (stage != POSTCOPY_FAIL_CHANNEL_ESTABLISH) { + close(pair1[1]); + } +#endif +} + +void test_postcopy_recovery_common(MigrateCommon *args) +{ + QTestState *from, *to; + g_autofree char *uri = NULL; + + /* + * Always enable OOB QMP capability for recovery tests, migrate-recover is + * executed out-of-band + */ + args->start.oob = true; + + /* Always hide errors for postcopy recover tests since they're expected */ + args->start.hide_stderr = true; + + if (migrate_postcopy_prepare(&from, &to, args)) { + return; + } + + /* Turn postcopy speed down, 4K/s is slow enough on any machines */ + migrate_set_parameter_int(from, "max-postcopy-bandwidth", 4096); + + /* Now we start the postcopy */ + migrate_postcopy_start(from, to, &src_state); + + /* + * Wait until postcopy is really started; we can only run the + * migrate-pause command during a postcopy + */ + wait_for_migration_status(from, "postcopy-active", NULL); + + /* + * Manually stop the postcopy migration. This emulates a network + * failure with the migration socket + */ + migrate_pause(from); + + /* + * Wait for destination side to reach postcopy-paused state. The + * migrate-recover command can only succeed if destination machine + * is in the paused state + */ + wait_for_postcopy_status(to, "postcopy-paused"); + wait_for_postcopy_status(from, "postcopy-paused"); + + if (args->postcopy_recovery_fail_stage) { + /* + * Test when a wrong socket specified for recover, and then the + * ability to kick it out, and continue with a correct socket. + */ + postcopy_recover_fail(from, to, args->postcopy_recovery_fail_stage); + /* continue with a good recovery */ + } + + /* + * Create a new socket to emulate a new channel that is different + * from the broken migration channel; tell the destination to + * listen to the new port + */ + uri = g_strdup_printf("unix:%s/migsocket-recover", tmpfs); + migrate_recover(to, uri); + + /* + * Try to rebuild the migration channel using the resume flag and + * the newly created channel + */ + migrate_qmp(from, to, uri, NULL, "{'resume': true}"); + + /* Restore the postcopy bandwidth to unlimited */ + migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0); + + migrate_postcopy_complete(from, to, args); +} + +void test_precopy_common(MigrateCommon *args) +{ + QTestState *from, *to; + void *data_hook = NULL; + QObject *in_channels = NULL; + QObject *out_channels = NULL; + + g_assert(!args->cpr_channel || args->connect_channels); + + if (migrate_start(&from, &to, args->listen_uri, &args->start)) { + return; + } + + if (args->start_hook) { + data_hook = args->start_hook(from, to); + } + + /* Wait for the first serial output from the source */ + if (args->result == MIG_TEST_SUCCEED) { + wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); + } + + if (args->live) { + migrate_ensure_non_converge(from); + migrate_prepare_for_dirty_mem(from); + } else { + /* + * Testing non-live migration, we allow it to run at + * full speed to ensure short test case duration. + * For tests expected to fail, we don't need to + * change anything. + */ + if (args->result == MIG_TEST_SUCCEED) { + qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); + wait_for_stop(from, &src_state); + migrate_ensure_converge(from); + } + } + + /* + * The cpr channel must be included in outgoing channels, but not in + * migrate-incoming channels. + */ + if (args->connect_channels) { + if (args->start.defer_target_connect && + !strcmp(args->listen_uri, "defer")) { + in_channels = qobject_from_json(args->connect_channels, + &error_abort); + } + out_channels = qobject_from_json(args->connect_channels, &error_abort); + + if (args->cpr_channel) { + QList *channels_list = qobject_to(QList, out_channels); + QObject *obj = migrate_str_to_channel(args->cpr_channel); + + qlist_append(channels_list, obj); + } + } + + if (args->result == MIG_TEST_QMP_ERROR) { + migrate_qmp_fail(from, args->connect_uri, out_channels, "{}"); + goto finish; + } + + migrate_qmp(from, to, args->connect_uri, out_channels, "{}"); + + if (args->start.defer_target_connect) { + qtest_connect(to); + qtest_qmp_handshake(to, NULL); + if (!strcmp(args->listen_uri, "defer")) { + migrate_incoming_qmp(to, args->connect_uri, in_channels, "{}"); + } + } + + if (args->result != MIG_TEST_SUCCEED) { + bool allow_active = args->result == MIG_TEST_FAIL; + wait_for_migration_fail(from, allow_active); + + if (args->result == MIG_TEST_FAIL_DEST_QUIT_ERR) { + qtest_set_expected_status(to, EXIT_FAILURE); + } + } else { + if (args->live) { + /* + * For initial iteration(s) we must do a full pass, + * but for the final iteration, we need only wait + * for some dirty mem before switching to converge + */ + while (args->iterations > 1) { + wait_for_migration_pass(from, &src_state); + args->iterations--; + } + migrate_wait_for_dirty_mem(from, to); + + migrate_ensure_converge(from); + + /* + * We do this first, as it has a timeout to stop us + * hanging forever if migration didn't converge + */ + wait_for_migration_complete(from); + + wait_for_stop(from, &src_state); + + } else { + wait_for_migration_complete(from); + /* + * Must wait for dst to finish reading all incoming + * data on the socket before issuing 'cont' otherwise + * it'll be ignored + */ + wait_for_migration_complete(to); + + qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); + } + + wait_for_resume(to, &dst_state); + + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); + } + + wait_for_serial("dest_serial"); + } + +finish: + if (args->end_hook) { + args->end_hook(from, to, data_hook); + } + + migrate_end(from, to, args->result == MIG_TEST_SUCCEED); +} + +static void file_dirty_offset_region(void) +{ + g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + size_t size = FILE_TEST_OFFSET; + g_autofree char *data = g_new0(char, size); + + memset(data, FILE_TEST_MARKER, size); + g_assert(g_file_set_contents(path, data, size, NULL)); +} + +static void file_check_offset_region(void) +{ + g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + size_t size = FILE_TEST_OFFSET; + g_autofree char *expected = g_new0(char, size); + g_autofree char *actual = NULL; + uint64_t *stream_start; + + /* + * Ensure the skipped offset region's data has not been touched + * and the migration stream starts at the right place. + */ + + memset(expected, FILE_TEST_MARKER, size); + + g_assert(g_file_get_contents(path, &actual, NULL, NULL)); + g_assert(!memcmp(actual, expected, size)); + + stream_start = (uint64_t *)(actual + size); + g_assert_cmpint(cpu_to_be64(*stream_start) >> 32, ==, QEMU_VM_FILE_MAGIC); +} + +void test_file_common(MigrateCommon *args, bool stop_src) +{ + QTestState *from, *to; + void *data_hook = NULL; + bool check_offset = false; + + if (migrate_start(&from, &to, args->listen_uri, &args->start)) { + return; + } + + /* + * File migration is never live. We can keep the source VM running + * during migration, but the destination will not be running + * concurrently. + */ + g_assert_false(args->live); + + if (g_strrstr(args->connect_uri, "offset=")) { + check_offset = true; + /* + * This comes before the start_hook because it's equivalent to + * a management application creating the file and writing to + * it so hooks should expect the file to be already present. + */ + file_dirty_offset_region(); + } + + if (args->start_hook) { + data_hook = args->start_hook(from, to); + } + + migrate_ensure_converge(from); + wait_for_serial("src_serial"); + + if (stop_src) { + qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); + wait_for_stop(from, &src_state); + } + + if (args->result == MIG_TEST_QMP_ERROR) { + migrate_qmp_fail(from, args->connect_uri, NULL, "{}"); + goto finish; + } + + migrate_qmp(from, to, args->connect_uri, NULL, "{}"); + wait_for_migration_complete(from); + + /* + * We need to wait for the source to finish before starting the + * destination. + */ + migrate_incoming_qmp(to, args->connect_uri, NULL, "{}"); + wait_for_migration_complete(to); + + if (stop_src) { + qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); + } + wait_for_resume(to, &dst_state); + + wait_for_serial("dest_serial"); + + if (check_offset) { + file_check_offset_region(); + } + +finish: + if (args->end_hook) { + args->end_hook(from, to, data_hook); + } + + migrate_end(from, to, args->result == MIG_TEST_SUCCEED); +} + +void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from, + QTestState *to, + const char *method) +{ + migrate_set_parameter_str(from, "multifd-compression", method); + migrate_set_parameter_str(to, "multifd-compression", method); + + /* Start incoming migration from the 1st socket */ + migrate_incoming_qmp(to, "tcp:127.0.0.1:0", NULL, "{}"); + + return NULL; +} + +QTestMigrationState *get_src(void) +{ + return &src_state; +} + +MigrationTestEnv *migration_get_env(void) +{ + static MigrationTestEnv *env; + g_autoptr(GError) err = NULL; + + if (env) { + return env; + } + + env = g_new0(MigrationTestEnv, 1); + env->qemu_src = getenv(QEMU_ENV_SRC); + env->qemu_dst = getenv(QEMU_ENV_DST); + + /* + * The default QTEST_QEMU_BINARY must always be provided because + * that is what helpers use to query the accel type and + * architecture. + */ + if (env->qemu_src && env->qemu_dst) { + g_test_message("Only one of %s, %s is allowed", + QEMU_ENV_SRC, QEMU_ENV_DST); + exit(1); + } + + env->has_kvm = qtest_has_accel("kvm"); + env->has_tcg = qtest_has_accel("tcg"); + + if (!env->has_tcg && !env->has_kvm) { + g_test_skip("No KVM or TCG accelerator available"); + return env; + } + + env->has_dirty_ring = kvm_dirty_ring_supported(); + env->has_uffd = ufd_version_check(&env->uffd_feature_thread_id); + env->arch = qtest_get_arch(); + env->is_x86 = !strcmp(env->arch, "i386") || !strcmp(env->arch, "x86_64"); + + env->tmpfs = g_dir_make_tmp("migration-test-XXXXXX", &err); + if (!env->tmpfs) { + g_test_message("Can't create temporary directory in %s: %s", + g_get_tmp_dir(), err->message); + } + g_assert(env->tmpfs); + + tmpfs = env->tmpfs; + + return env; +} + +int migration_env_clean(MigrationTestEnv *env) +{ + char *tmpfs; + int ret = 0; + + if (!env) { + return ret; + } + + bootfile_delete(); + + tmpfs = env->tmpfs; + ret = rmdir(tmpfs); + if (ret != 0) { + g_test_message("unable to rmdir: path (%s): %s", + tmpfs, strerror(errno)); + } + g_free(tmpfs); + + return ret; +} diff --git a/tests/qtest/migration/framework.h b/tests/qtest/migration/framework.h new file mode 100644 index 00000000000..01e425e64e2 --- /dev/null +++ b/tests/qtest/migration/framework.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef TEST_FRAMEWORK_H +#define TEST_FRAMEWORK_H + +#include "libqtest.h" +#include + +#define FILE_TEST_FILENAME "migfile" +#define FILE_TEST_OFFSET 0x1000 +#define FILE_TEST_MARKER 'X' + +typedef struct MigrationTestEnv { + bool has_kvm; + bool has_tcg; + bool has_uffd; + bool uffd_feature_thread_id; + bool has_dirty_ring; + bool is_x86; + bool full_set; + const char *arch; + const char *qemu_src; + const char *qemu_dst; + char *tmpfs; +} MigrationTestEnv; + +MigrationTestEnv *migration_get_env(void); +int migration_env_clean(MigrationTestEnv *env); + +/* + * A hook that runs after the src and dst QEMUs have been + * created, but before the migration is started. This can + * be used to set migration parameters and capabilities. + * + * Returns: NULL, or a pointer to opaque state to be + * later passed to the TestMigrateEndHook + */ +typedef void * (*TestMigrateStartHook)(QTestState *from, + QTestState *to); + +/* + * A hook that runs after the migration has finished, + * regardless of whether it succeeded or failed, but + * before QEMU has terminated (unless it self-terminated + * due to migration error) + * + * @opaque is a pointer to state previously returned + * by the TestMigrateStartHook if any, or NULL. + */ +typedef void (*TestMigrateEndHook)(QTestState *from, + QTestState *to, + void *opaque); + +/* + * Our goal is to ensure that we run a single full migration + * iteration, and also dirty memory, ensuring that at least + * one further iteration is required. + * + * We can't directly synchronize with the start of a migration + * so we have to apply some tricks monitoring memory that is + * transferred. + * + * Initially we set the migration bandwidth to an insanely + * low value, with tiny max downtime too. This basically + * guarantees migration will never complete. + * + * This will result in a test that is unacceptably slow though, + * so we can't let the entire migration pass run at this speed. + * Our intent is to let it run just long enough that we can + * prove data prior to the marker has been transferred *AND* + * also prove this transferred data is dirty again. + * + * Before migration starts, we write a 64-bit magic marker + * into a fixed location in the src VM RAM. + * + * Then watch dst memory until the marker appears. This is + * proof that start_address -> MAGIC_OFFSET_BASE has been + * transferred. + * + * Finally we go back to the source and read a byte just + * before the marker until we see it flip in value. This + * is proof that start_address -> MAGIC_OFFSET_BASE + * is now dirty again. + * + * IOW, we're guaranteed at least a 2nd migration pass + * at this point. + * + * We can now let migration run at full speed to finish + * the test + */ +typedef struct { + /* + * QTEST_LOG=1 may override this. When QTEST_LOG=1, we always dump errors + * unconditionally, because it means the user would like to be verbose. + */ + bool hide_stderr; + bool use_shmem; + /* only launch the target process */ + bool only_target; + /* Use dirty ring if true; dirty logging otherwise */ + bool use_dirty_ring; + const char *opts_source; + const char *opts_target; + /* suspend the src before migrating to dest. */ + bool suspend_me; + /* enable OOB QMP capability */ + bool oob; + /* + * Format string for the main memory backend, containing one %s where the + * size is plugged in. If omitted, "-m %s" is used. + */ + const char *memory_backend; + + /* Do not connect to target monitor and qtest sockets in qtest_init */ + bool defer_target_connect; + + /* + * Migration capabilities to be set in both source and + * destination. For unilateral capabilities, use + * migration_set_capabilities(). + */ + bool caps[MIGRATION_CAPABILITY__MAX]; +} MigrateStart; + +typedef enum PostcopyRecoveryFailStage { + /* + * "no failure" must be 0 as it's the default. OTOH, real failure + * cases must be >0 to make sure they trigger by a "if" test. + */ + POSTCOPY_FAIL_NONE = 0, + POSTCOPY_FAIL_CHANNEL_ESTABLISH, + POSTCOPY_FAIL_RECOVERY, + POSTCOPY_FAIL_MAX +} PostcopyRecoveryFailStage; + +typedef struct { + /* Optional: fine tune start parameters */ + MigrateStart start; + + /* Required: the URI for the dst QEMU to listen on */ + const char *listen_uri; + + /* + * Optional: the URI for the src QEMU to connect to + * If NULL, then it will query the dst QEMU for its actual + * listening address and use that as the connect address. + * This allows for dynamically picking a free TCP port. + */ + const char *connect_uri; + + /* + * Optional: JSON-formatted list of src QEMU URIs. If a port is + * defined as '0' in any QDict key a value of '0' will be + * automatically converted to the correct destination port. + */ + const char *connect_channels; + + /* Optional: the cpr migration channel, in JSON or dotted keys format */ + const char *cpr_channel; + + /* Optional: callback to run at start to set migration parameters */ + TestMigrateStartHook start_hook; + /* Optional: callback to run at finish to cleanup */ + TestMigrateEndHook end_hook; + + /* + * Optional: normally we expect the migration process to complete. + * + * There can be a variety of reasons and stages in which failure + * can happen during tests. + * + * If a failure is expected to happen at time of establishing + * the connection, then MIG_TEST_FAIL will indicate that the dst + * QEMU is expected to stay running and accept future migration + * connections. + * + * If a failure is expected to happen while processing the + * migration stream, then MIG_TEST_FAIL_DEST_QUIT_ERR will indicate + * that the dst QEMU is expected to quit with non-zero exit status + */ + enum { + /* This test should succeed, the default */ + MIG_TEST_SUCCEED = 0, + /* This test should fail, dest qemu should keep alive */ + MIG_TEST_FAIL, + /* This test should fail, dest qemu should fail with abnormal status */ + MIG_TEST_FAIL_DEST_QUIT_ERR, + /* The QMP command for this migration should fail with an error */ + MIG_TEST_QMP_ERROR, + } result; + + /* + * Optional: set number of migration passes to wait for, if live==true. + * If zero, then merely wait for a few MB of dirty data + */ + unsigned int iterations; + + /* + * Optional: whether the guest CPUs should be running during a precopy + * migration test. We used to always run with live but it took much + * longer so we reduced live tests to only the ones that have solid + * reason to be tested live-only. For each of the new test cases for + * precopy please provide justifications to use live explicitly (please + * refer to existing ones with live=true), or use live=off by default. + */ + bool live; + + /* Postcopy specific fields */ + void *postcopy_data; + PostcopyRecoveryFailStage postcopy_recovery_fail_stage; +} MigrateCommon; + +void wait_for_serial(const char *side); +void migrate_prepare_for_dirty_mem(QTestState *from); +void migrate_wait_for_dirty_mem(QTestState *from, QTestState *to); +int migrate_start(QTestState **from, QTestState **to, const char *uri, + MigrateStart *args); +void migrate_end(QTestState *from, QTestState *to, bool test_dest); + +void test_postcopy_common(MigrateCommon *args); +void test_postcopy_recovery_common(MigrateCommon *args); +void test_precopy_common(MigrateCommon *args); +void test_file_common(MigrateCommon *args, bool stop_src); +void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from, + QTestState *to, + const char *method); + +typedef struct QTestMigrationState QTestMigrationState; +QTestMigrationState *get_src(void); + +#ifdef CONFIG_GNUTLS +void migration_test_add_tls(MigrationTestEnv *env); +#else +static inline void migration_test_add_tls(MigrationTestEnv *env) {}; +#endif +void migration_test_add_compression(MigrationTestEnv *env); +void migration_test_add_postcopy(MigrationTestEnv *env); +void migration_test_add_file(MigrationTestEnv *env); +void migration_test_add_precopy(MigrationTestEnv *env); +void migration_test_add_cpr(MigrationTestEnv *env); +void migration_test_add_misc(MigrationTestEnv *env); + +#endif /* TEST_FRAMEWORK_H */ diff --git a/tests/migration/i386/Makefile b/tests/qtest/migration/i386/Makefile similarity index 100% rename from tests/migration/i386/Makefile rename to tests/qtest/migration/i386/Makefile diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/qtest/migration/i386/a-b-bootblock.S similarity index 100% rename from tests/migration/i386/a-b-bootblock.S rename to tests/qtest/migration/i386/a-b-bootblock.S diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/qtest/migration/i386/a-b-bootblock.h similarity index 100% rename from tests/migration/i386/a-b-bootblock.h rename to tests/qtest/migration/i386/a-b-bootblock.h diff --git a/tests/qtest/migration/migration-qmp.c b/tests/qtest/migration/migration-qmp.c new file mode 100644 index 00000000000..66dd369ba7e --- /dev/null +++ b/tests/qtest/migration/migration-qmp.c @@ -0,0 +1,525 @@ +/* + * QTest QMP helpers for migration + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "migration-qmp.h" +#include "migration-util.h" +#include "qapi/error.h" +#include "qapi/qapi-types-migration.h" +#include "qapi/qapi-visit-migration.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qlist.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/qobject-output-visitor.h" + +/* + * Number of seconds we wait when looking for migration + * status changes, to avoid test suite hanging forever + * when things go wrong. Needs to be higher enough to + * avoid false positives on loaded hosts. + */ +#define MIGRATION_STATUS_WAIT_TIMEOUT 120 + +/* + * Wait for a "MIGRATION" event. This is what Libvirt uses to track + * migration status changes. + */ +void migration_event_wait(QTestState *s, const char *target) +{ + QDict *response, *data; + const char *status; + bool found; + + do { + response = qtest_qmp_eventwait_ref(s, "MIGRATION"); + data = qdict_get_qdict(response, "data"); + g_assert(data); + status = qdict_get_str(data, "status"); + found = (strcmp(status, target) == 0); + qobject_unref(response); + } while (!found); +} + +/* + * Convert a string representing a single channel to an object. + * @str may be in JSON or dotted keys format. + */ +QObject *migrate_str_to_channel(const char *str) +{ + Visitor *v; + MigrationChannel *channel; + QObject *obj; + + /* Create the channel */ + v = qobject_input_visitor_new_str(str, "channel-type", &error_abort); + visit_type_MigrationChannel(v, NULL, &channel, &error_abort); + visit_free(v); + + /* Create the object */ + v = qobject_output_visitor_new(&obj); + visit_type_MigrationChannel(v, NULL, &channel, &error_abort); + visit_complete(v, &obj); + visit_free(v); + + qapi_free_MigrationChannel(channel); + return obj; +} + +void migrate_qmp_fail(QTestState *who, const char *uri, + QObject *channels, const char *fmt, ...) +{ + va_list ap; + QDict *args, *err; + + va_start(ap, fmt); + args = qdict_from_vjsonf_nofail(fmt, ap); + va_end(ap); + + g_assert(!qdict_haskey(args, "uri")); + if (uri) { + qdict_put_str(args, "uri", uri); + } + + g_assert(!qdict_haskey(args, "channels")); + if (channels) { + qdict_put_obj(args, "channels", channels); + } + + err = qtest_qmp_assert_failure_ref( + who, "{ 'execute': 'migrate', 'arguments': %p}", args); + + g_assert(qdict_haskey(err, "desc")); + + qobject_unref(err); +} + +/* + * Send QMP command "migrate". + * Arguments are built from @fmt... (formatted like + * qobject_from_jsonf_nofail()) with "uri": @uri spliced in. + */ +void migrate_qmp(QTestState *who, QTestState *to, const char *uri, + QObject *channels, const char *fmt, ...) +{ + va_list ap; + QDict *args; + g_autofree char *connect_uri = NULL; + + va_start(ap, fmt); + args = qdict_from_vjsonf_nofail(fmt, ap); + va_end(ap); + + g_assert(!qdict_haskey(args, "uri")); + if (uri) { + qdict_put_str(args, "uri", uri); + } else if (!channels) { + connect_uri = migrate_get_connect_uri(to); + qdict_put_str(args, "uri", connect_uri); + } + + g_assert(!qdict_haskey(args, "channels")); + if (channels) { + QList *channel_list = qobject_to(QList, channels); + migrate_set_ports(to, channel_list); + qdict_put_obj(args, "channels", channels); + } + + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate', 'arguments': %p}", args); +} + +void migrate_set_capability(QTestState *who, const char *capability, + bool value) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate-set-capabilities'," + "'arguments': { " + "'capabilities': [ { " + "'capability': %s, 'state': %i } ] } }", + capability, value); +} + +void migrate_incoming_qmp(QTestState *to, const char *uri, QObject *channels, + const char *fmt, ...) +{ + va_list ap; + QDict *args, *rsp; + + va_start(ap, fmt); + args = qdict_from_vjsonf_nofail(fmt, ap); + va_end(ap); + + g_assert(!qdict_haskey(args, "uri")); + if (uri) { + qdict_put_str(args, "uri", uri); + } + + g_assert(!qdict_haskey(args, "channels")); + if (channels) { + qdict_put_obj(args, "channels", channels); + } + + /* This function relies on the event to work, make sure it's enabled */ + migrate_set_capability(to, "events", true); + + rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}", + args); + + if (!qdict_haskey(rsp, "return")) { + g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true); + g_test_message("%s", s->str); + } + + g_assert(qdict_haskey(rsp, "return")); + qobject_unref(rsp); + + migration_event_wait(to, "setup"); +} + +static bool check_migration_status(QTestState *who, const char *goal, + const char **ungoals) +{ + bool ready; + char *current_status; + const char **ungoal; + + current_status = migrate_query_status(who); + ready = strcmp(current_status, goal) == 0; + if (!ungoals) { + g_assert_cmpstr(current_status, !=, "failed"); + /* + * If looking for a state other than completed, + * completion of migration would cause the test to + * hang. + */ + if (strcmp(goal, "completed") != 0) { + g_assert_cmpstr(current_status, !=, "completed"); + } + } else { + for (ungoal = ungoals; *ungoal; ungoal++) { + g_assert_cmpstr(current_status, !=, *ungoal); + } + } + g_free(current_status); + return ready; +} + +void wait_for_migration_status(QTestState *who, + const char *goal, const char **ungoals) +{ + g_test_timer_start(); + while (!check_migration_status(who, goal, ungoals)) { + usleep(1000); + + g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); + } +} + +void wait_for_migration_complete(QTestState *who) +{ + wait_for_migration_status(who, "completed", NULL); +} + +void wait_for_migration_fail(QTestState *from, bool allow_active) +{ + g_test_timer_start(); + QDict *rsp_return; + char *status; + bool failed; + + do { + status = migrate_query_status(from); + bool result = !strcmp(status, "setup") || !strcmp(status, "failed") || + (allow_active && !strcmp(status, "active")); + if (!result) { + fprintf(stderr, "%s: unexpected status status=%s allow_active=%d\n", + __func__, status, allow_active); + } + g_assert(result); + failed = !strcmp(status, "failed"); + g_free(status); + + g_assert(g_test_timer_elapsed() < MIGRATION_STATUS_WAIT_TIMEOUT); + } while (!failed); + + /* Is the machine currently running? */ + rsp_return = qtest_qmp_assert_success_ref(from, + "{ 'execute': 'query-status' }"); + g_assert(qdict_haskey(rsp_return, "running")); + g_assert(qdict_get_bool(rsp_return, "running")); + qobject_unref(rsp_return); +} + +void wait_for_stop(QTestState *who, QTestMigrationState *state) +{ + if (!state->stop_seen) { + qtest_qmp_eventwait(who, "STOP"); + } +} + +void wait_for_resume(QTestState *who, QTestMigrationState *state) +{ + if (!state->resume_seen) { + qtest_qmp_eventwait(who, "RESUME"); + } +} + +void wait_for_suspend(QTestState *who, QTestMigrationState *state) +{ + if (state->suspend_me && !state->suspend_seen) { + qtest_qmp_eventwait(who, "SUSPEND"); + } +} + +/* + * Note: caller is responsible to free the returned object via + * qobject_unref() after use + */ +QDict *migrate_query(QTestState *who) +{ + return qtest_qmp_assert_success_ref(who, "{ 'execute': 'query-migrate' }"); +} + +QDict *migrate_query_not_failed(QTestState *who) +{ + const char *status; + QDict *rsp = migrate_query(who); + status = qdict_get_str(rsp, "status"); + if (g_str_equal(status, "failed")) { + g_printerr("query-migrate shows failed migration: %s\n", + qdict_get_str(rsp, "error-desc")); + } + g_assert(!g_str_equal(status, "failed")); + return rsp; +} + +/* + * Note: caller is responsible to free the returned object via + * g_free() after use + */ +gchar *migrate_query_status(QTestState *who) +{ + QDict *rsp_return = migrate_query(who); + gchar *status = g_strdup(qdict_get_str(rsp_return, "status")); + + g_assert(status); + qobject_unref(rsp_return); + + return status; +} + +int64_t read_ram_property_int(QTestState *who, const char *property) +{ + QDict *rsp_return, *rsp_ram; + int64_t result; + + rsp_return = migrate_query_not_failed(who); + if (!qdict_haskey(rsp_return, "ram")) { + /* Still in setup */ + result = 0; + } else { + rsp_ram = qdict_get_qdict(rsp_return, "ram"); + result = qdict_get_try_int(rsp_ram, property, 0); + } + qobject_unref(rsp_return); + return result; +} + +int64_t read_migrate_property_int(QTestState *who, const char *property) +{ + QDict *rsp_return; + int64_t result; + + rsp_return = migrate_query_not_failed(who); + result = qdict_get_try_int(rsp_return, property, 0); + qobject_unref(rsp_return); + return result; +} + +uint64_t get_migration_pass(QTestState *who) +{ + return read_ram_property_int(who, "dirty-sync-count"); +} + +void read_blocktime(QTestState *who) +{ + QDict *rsp_return; + + rsp_return = migrate_query_not_failed(who); + g_assert(qdict_haskey(rsp_return, "postcopy-blocktime")); + g_assert(qdict_haskey(rsp_return, "postcopy-vcpu-blocktime")); + g_assert(qdict_haskey(rsp_return, "postcopy-latency")); + g_assert(qdict_haskey(rsp_return, "postcopy-latency-dist")); + g_assert(qdict_haskey(rsp_return, "postcopy-vcpu-latency")); + g_assert(qdict_haskey(rsp_return, "postcopy-non-vcpu-latency")); + qobject_unref(rsp_return); +} + +/* + * Wait for two changes in the migration pass count, but bail if we stop. + */ +void wait_for_migration_pass(QTestState *who, QTestMigrationState *src_state) +{ + uint64_t pass, prev_pass = 0, changes = 0; + + while (changes < 2 && !src_state->stop_seen && !src_state->suspend_seen) { + usleep(1000); + pass = get_migration_pass(who); + changes += (pass != prev_pass); + prev_pass = pass; + } +} + +static long long migrate_get_parameter_int(QTestState *who, + const char *parameter) +{ + QDict *rsp; + long long result; + + rsp = qtest_qmp_assert_success_ref( + who, "{ 'execute': 'query-migrate-parameters' }"); + result = qdict_get_int(rsp, parameter); + qobject_unref(rsp); + return result; +} + +static void migrate_check_parameter_int(QTestState *who, const char *parameter, + long long value) +{ + long long result; + + result = migrate_get_parameter_int(who, parameter); + g_assert_cmpint(result, ==, value); +} + +void migrate_set_parameter_int(QTestState *who, const char *parameter, + long long value) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate-set-parameters'," + "'arguments': { %s: %lld } }", + parameter, value); + migrate_check_parameter_int(who, parameter, value); +} + +static char *migrate_get_parameter_str(QTestState *who, const char *parameter) +{ + QDict *rsp; + char *result; + + rsp = qtest_qmp_assert_success_ref( + who, "{ 'execute': 'query-migrate-parameters' }"); + result = g_strdup(qdict_get_str(rsp, parameter)); + qobject_unref(rsp); + return result; +} + +static void migrate_check_parameter_str(QTestState *who, const char *parameter, + const char *value) +{ + g_autofree char *result = migrate_get_parameter_str(who, parameter); + g_assert_cmpstr(result, ==, value); +} + +void migrate_set_parameter_str(QTestState *who, const char *parameter, + const char *value) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate-set-parameters'," + "'arguments': { %s: %s } }", + parameter, value); + migrate_check_parameter_str(who, parameter, value); +} + +static long long migrate_get_parameter_bool(QTestState *who, + const char *parameter) +{ + QDict *rsp; + int result; + + rsp = qtest_qmp_assert_success_ref( + who, "{ 'execute': 'query-migrate-parameters' }"); + result = qdict_get_bool(rsp, parameter); + qobject_unref(rsp); + return !!result; +} + +static void migrate_check_parameter_bool(QTestState *who, const char *parameter, + int value) +{ + int result; + + result = migrate_get_parameter_bool(who, parameter); + g_assert_cmpint(result, ==, value); +} + +void migrate_set_parameter_bool(QTestState *who, const char *parameter, + int value) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate-set-parameters'," + "'arguments': { %s: %i } }", + parameter, value); + migrate_check_parameter_bool(who, parameter, value); +} + +void migrate_ensure_non_converge(QTestState *who) +{ + /* Can't converge with 1ms downtime + 3 mbs bandwidth limit */ + migrate_set_parameter_int(who, "max-bandwidth", 3 * 1000 * 1000); + migrate_set_parameter_int(who, "downtime-limit", 1); +} + +void migrate_ensure_converge(QTestState *who) +{ + /* Should converge with 30s downtime + 1 gbs bandwidth limit */ + migrate_set_parameter_int(who, "max-bandwidth", 1 * 1000 * 1000 * 1000); + migrate_set_parameter_int(who, "downtime-limit", 30 * 1000); +} + +void migrate_pause(QTestState *who) +{ + qtest_qmp_assert_success(who, "{ 'execute': 'migrate-pause' }"); +} + +void migrate_continue(QTestState *who, const char *state) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'migrate-continue'," + " 'arguments': { 'state': %s } }", + state); +} + +void migrate_recover(QTestState *who, const char *uri) +{ + qtest_qmp_assert_success(who, + "{ 'exec-oob': 'migrate-recover', " + " 'id': 'recover-cmd', " + " 'arguments': { 'uri': %s } }", + uri); +} + +void migrate_cancel(QTestState *who) +{ + qtest_qmp_assert_success(who, "{ 'execute': 'migrate_cancel' }"); +} + +void migrate_postcopy_start(QTestState *from, QTestState *to, + QTestMigrationState *src_state) +{ + qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }"); + + wait_for_stop(from, src_state); + qtest_qmp_eventwait(to, "RESUME"); +} diff --git a/tests/qtest/migration/migration-qmp.h b/tests/qtest/migration/migration-qmp.h new file mode 100644 index 00000000000..faa8181d916 --- /dev/null +++ b/tests/qtest/migration/migration-qmp.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef MIGRATION_QMP_H +#define MIGRATION_QMP_H + +#include "migration-util.h" + +QObject *migrate_str_to_channel(const char *str); + +G_GNUC_PRINTF(4, 5) +void migrate_qmp_fail(QTestState *who, const char *uri, + QObject *channels, const char *fmt, ...); + +G_GNUC_PRINTF(5, 6) +void migrate_qmp(QTestState *who, QTestState *to, const char *uri, + QObject *channels, const char *fmt, ...); + +G_GNUC_PRINTF(4, 5) +void migrate_incoming_qmp(QTestState *who, const char *uri, + QObject *channels, const char *fmt, ...); + +void migration_event_wait(QTestState *s, const char *target); +void migrate_set_capability(QTestState *who, const char *capability, + bool value); +int64_t read_ram_property_int(QTestState *who, const char *property); +void migrate_set_parameter_int(QTestState *who, const char *parameter, + long long value); +void wait_for_stop(QTestState *who, QTestMigrationState *state); +void wait_for_resume(QTestState *who, QTestMigrationState *state); +void wait_for_suspend(QTestState *who, QTestMigrationState *state); +gchar *migrate_query_status(QTestState *who); +int64_t read_migrate_property_int(QTestState *who, const char *property); +uint64_t get_migration_pass(QTestState *who); +void read_blocktime(QTestState *who); +void wait_for_migration_pass(QTestState *who, QTestMigrationState *src_state); +void migrate_set_parameter_str(QTestState *who, const char *parameter, + const char *value); +void migrate_set_parameter_bool(QTestState *who, const char *parameter, + int value); +void migrate_ensure_non_converge(QTestState *who); +void migrate_ensure_converge(QTestState *who); +void migrate_pause(QTestState *who); +void migrate_continue(QTestState *who, const char *state); +void migrate_recover(QTestState *who, const char *uri); +void migrate_cancel(QTestState *who); +void migrate_postcopy_start(QTestState *from, QTestState *to, + QTestMigrationState *src_state); + +#endif /* MIGRATION_QMP_H */ diff --git a/tests/qtest/migration/migration-util.c b/tests/qtest/migration/migration-util.c new file mode 100644 index 00000000000..642cf50c8d8 --- /dev/null +++ b/tests/qtest/migration/migration-util.c @@ -0,0 +1,398 @@ +/* + * QTest migration utilities + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/ctype.h" +#include "qapi/qapi-visit-sockets.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/error.h" +#include "qobject/qlist.h" +#include "qemu/cutils.h" +#include "qemu/memalign.h" + +#include "migration/bootfile.h" +#include "migration/migration-util.h" + +#if defined(__linux__) +#include +#include +#endif + +/* for uffd_version_check() */ +#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) +#include +#include "qemu/userfaultfd.h" +#endif + +/* For dirty ring test; so far only x86_64 is supported */ +#if defined(__linux__) && defined(HOST_X86_64) +#include "linux/kvm.h" +#endif + + +static char *SocketAddress_to_str(SocketAddress *addr) +{ + switch (addr->type) { + case SOCKET_ADDRESS_TYPE_INET: + return g_strdup_printf("tcp:%s:%s", + addr->u.inet.host, + addr->u.inet.port); + case SOCKET_ADDRESS_TYPE_UNIX: + return g_strdup_printf("unix:%s", + addr->u.q_unix.path); + case SOCKET_ADDRESS_TYPE_FD: + return g_strdup_printf("fd:%s", addr->u.fd.str); + case SOCKET_ADDRESS_TYPE_VSOCK: + return g_strdup_printf("vsock:%s:%s", + addr->u.vsock.cid, + addr->u.vsock.port); + default: + return g_strdup("unknown address type"); + } +} + +static QDict *SocketAddress_to_qdict(SocketAddress *addr) +{ + QDict *dict = qdict_new(); + + switch (addr->type) { + case SOCKET_ADDRESS_TYPE_INET: + qdict_put_str(dict, "type", "inet"); + qdict_put_str(dict, "host", addr->u.inet.host); + qdict_put_str(dict, "port", addr->u.inet.port); + break; + case SOCKET_ADDRESS_TYPE_UNIX: + qdict_put_str(dict, "type", "unix"); + qdict_put_str(dict, "path", addr->u.q_unix.path); + break; + case SOCKET_ADDRESS_TYPE_FD: + qdict_put_str(dict, "type", "fd"); + qdict_put_str(dict, "str", addr->u.fd.str); + break; + case SOCKET_ADDRESS_TYPE_VSOCK: + qdict_put_str(dict, "type", "vsock"); + qdict_put_str(dict, "cid", addr->u.vsock.cid); + qdict_put_str(dict, "port", addr->u.vsock.port); + break; + default: + g_assert_not_reached(); + } + + return dict; +} + +static SocketAddressList *migrate_get_socket_address(QTestState *who) +{ + QDict *rsp; + SocketAddressList *addrs; + Visitor *iv = NULL; + QObject *object; + + rsp = migrate_query(who); + object = qdict_get(rsp, "socket-address"); + + iv = qobject_input_visitor_new(object); + visit_type_SocketAddressList(iv, NULL, &addrs, &error_abort); + visit_free(iv); + + qobject_unref(rsp); + return addrs; +} + +char *migrate_get_connect_uri(QTestState *who) +{ + SocketAddressList *addrs; + char *connect_uri; + + addrs = migrate_get_socket_address(who); + connect_uri = SocketAddress_to_str(addrs->value); + + qapi_free_SocketAddressList(addrs); + return connect_uri; +} + +static QDict * +migrate_get_connect_qdict(QTestState *who) +{ + SocketAddressList *addrs; + QDict *connect_qdict; + + addrs = migrate_get_socket_address(who); + connect_qdict = SocketAddress_to_qdict(addrs->value); + + qapi_free_SocketAddressList(addrs); + return connect_qdict; +} + +void migrate_set_ports(QTestState *to, QList *channel_list) +{ + g_autoptr(QDict) addr = NULL; + QListEntry *entry; + const char *addr_port = NULL; + + QLIST_FOREACH_ENTRY(channel_list, entry) { + QDict *channel = qobject_to(QDict, qlist_entry_obj(entry)); + QDict *addrdict = qdict_get_qdict(channel, "addr"); + + if (!qdict_haskey(addrdict, "port") || + strcmp(qdict_get_str(addrdict, "port"), "0")) { + continue; + } + + /* + * Fetch addr only if needed, so tests that are not yet connected to + * the monitor do not query it. Such tests cannot use port=0. + */ + if (!addr) { + addr = migrate_get_connect_qdict(to); + } + + if (qdict_haskey(addr, "port")) { + addr_port = qdict_get_str(addr, "port"); + qdict_put_str(addrdict, "port", addr_port); + } + } +} + +bool migrate_watch_for_events(QTestState *who, const char *name, + QDict *event, void *opaque) +{ + QTestMigrationState *state = opaque; + + if (g_str_equal(name, "STOP")) { + state->stop_seen = true; + return true; + } else if (g_str_equal(name, "SUSPEND")) { + state->suspend_seen = true; + return true; + } else if (g_str_equal(name, "RESUME")) { + state->resume_seen = true; + return true; + } + + return false; +} + +char *find_common_machine_version(const char *mtype, const char *var1, + const char *var2) +{ + g_autofree char *type1 = qtest_resolve_machine_alias(var1, mtype); + g_autofree char *type2 = qtest_resolve_machine_alias(var2, mtype); + + g_assert(type1 && type2); + + if (g_str_equal(type1, type2)) { + /* either can be used */ + return g_strdup(type1); + } + + if (qtest_has_machine_with_env(var2, type1)) { + return g_strdup(type1); + } + + if (qtest_has_machine_with_env(var1, type2)) { + return g_strdup(type2); + } + + g_test_message("No common machine version for machine type '%s' between " + "binaries %s and %s", mtype, getenv(var1), getenv(var2)); + g_assert_not_reached(); +} + +char *resolve_machine_version(const char *alias, const char *var1, + const char *var2) +{ + const char *mname = g_getenv("QTEST_QEMU_MACHINE_TYPE"); + g_autofree char *machine_name = NULL; + + if (mname) { + const char *dash = strrchr(mname, '-'); + const char *dot = strrchr(mname, '.'); + + machine_name = g_strdup(mname); + + if (dash && dot) { + assert(qtest_has_machine(machine_name)); + return g_steal_pointer(&machine_name); + } + /* else: probably an alias, let it be resolved below */ + } else { + /* use the hardcoded alias */ + machine_name = g_strdup(alias); + } + + return find_common_machine_version(machine_name, var1, var2); +} + +typedef struct { + char *name; + void (*func)(void); + void (*func_full)(void *); +} MigrationTest; + +static void migration_test_destroy(gpointer data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_free(test->name); + g_free(test); +} + +static void migration_test_wrapper(const void *data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_test_message("Running /%s%s", qtest_get_arch(), test->name); + test->func(); +} + +void migration_test_add(const char *path, void (*fn)(void)) +{ + MigrationTest *test = g_new0(MigrationTest, 1); + + test->func = fn; + test->name = g_strdup(path); + + qtest_add_data_func_full(path, test, migration_test_wrapper, + migration_test_destroy); +} + +static void migration_test_wrapper_full(const void *data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_test_message("Running /%s%s", qtest_get_arch(), test->name); + test->func_full(test->name); +} + +void migration_test_add_suffix(const char *path, const char *suffix, + void (*fn)(void *)) +{ + MigrationTest *test = g_new0(MigrationTest, 1); + + g_assert(g_str_has_suffix(path, "/")); + g_assert(!g_str_has_prefix(suffix, "/")); + + test->func_full = fn; + test->name = g_strconcat(path, suffix, NULL); + + qtest_add_data_func_full(test->name, test, migration_test_wrapper_full, + migration_test_destroy); +} + +#ifdef O_DIRECT +/* + * Probe for O_DIRECT support on the filesystem. Since this is used + * for tests, be conservative, if anything fails, assume it's + * unsupported. + */ +bool probe_o_direct_support(const char *tmpfs) +{ + g_autofree char *filename = g_strdup_printf("%s/probe-o-direct", tmpfs); + int fd, flags = O_CREAT | O_RDWR | O_TRUNC | O_DIRECT; + void *buf; + ssize_t ret, len; + uint64_t offset; + + fd = open(filename, flags, 0660); + if (fd < 0) { + unlink(filename); + return false; + } + + /* + * Using 1MB alignment as conservative choice to satisfy any + * plausible architecture default page size, and/or filesystem + * alignment restrictions. + */ + len = 0x100000; + offset = 0x100000; + + buf = qemu_try_memalign(len, len); + g_assert(buf); + memset(buf, 0, len); + + ret = pwrite(fd, buf, len, offset); + unlink(filename); + g_free(buf); + + if (ret < 0) { + return false; + } + + return true; +} +#endif + +#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) +bool ufd_version_check(bool *uffd_feature_thread_id) +{ + struct uffdio_api api_struct; + uint64_t ioctl_mask; + + int ufd = uffd_open(O_CLOEXEC); + + if (ufd == -1) { + g_test_message("Skipping test: userfaultfd not available"); + return false; + } + + api_struct.api = UFFD_API; + api_struct.features = 0; + if (ioctl(ufd, UFFDIO_API, &api_struct)) { + g_test_message("Skipping test: UFFDIO_API failed"); + return false; + } + + if (uffd_feature_thread_id) { + *uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID; + } + + ioctl_mask = (1ULL << _UFFDIO_REGISTER | + 1ULL << _UFFDIO_UNREGISTER); + if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { + g_test_message("Skipping test: Missing userfault feature"); + return false; + } + + return true; +} +#else +bool ufd_version_check(bool *uffd_feature_thread_id) +{ + g_test_message("Skipping test: Userfault not available (builtdtime)"); + return false; +} +#endif + +bool kvm_dirty_ring_supported(void) +{ +#if defined(__linux__) && defined(HOST_X86_64) + int ret, kvm_fd = open("/dev/kvm", O_RDONLY); + + if (kvm_fd < 0) { + return false; + } + + ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING); + close(kvm_fd); + + /* We test with 4096 slots */ + if (ret < 4096) { + return false; + } + + return true; +#else + return false; +#endif +} diff --git a/tests/qtest/migration/migration-util.h b/tests/qtest/migration/migration-util.h new file mode 100644 index 00000000000..44815e9c42d --- /dev/null +++ b/tests/qtest/migration/migration-util.h @@ -0,0 +1,59 @@ +/* + * QTest migration helpers + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MIGRATION_UTIL_H +#define MIGRATION_UTIL_H + +#include "libqtest.h" + +typedef struct QTestMigrationState { + bool stop_seen; + bool resume_seen; + bool suspend_seen; + bool suspend_me; +} QTestMigrationState; + +bool migrate_watch_for_events(QTestState *who, const char *name, + QDict *event, void *opaque); + +QDict *migrate_query(QTestState *who); +QDict *migrate_query_not_failed(QTestState *who); + +void wait_for_migration_status(QTestState *who, + const char *goal, const char **ungoals); + +void wait_for_migration_complete(QTestState *who); + +void wait_for_migration_fail(QTestState *from, bool allow_active); + +char *find_common_machine_version(const char *mtype, const char *var1, + const char *var2); +char *resolve_machine_version(const char *alias, const char *var1, + const char *var2); +#ifdef O_DIRECT +bool probe_o_direct_support(const char *tmpfs); +#else +static inline bool probe_o_direct_support(const char *tmpfs) +{ + return false; +} +#endif + +bool ufd_version_check(bool *uffd_feature_thread_id); +bool kvm_dirty_ring_supported(void); +void migration_test_add(const char *path, void (*fn)(void)); +void migration_test_add_suffix(const char *path, const char *suffix, + void (*fn)(void *)); +char *migrate_get_connect_uri(QTestState *who); +void migrate_set_ports(QTestState *to, QList *channel_list); + +#endif /* MIGRATION_UTIL_H */ diff --git a/tests/qtest/migration/misc-tests.c b/tests/qtest/migration/misc-tests.c new file mode 100644 index 00000000000..54995256d81 --- /dev/null +++ b/tests/qtest/migration/misc-tests.c @@ -0,0 +1,297 @@ +/* + * QTest testcases for migration + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qobject/qjson.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" + +#define ANALYZE_SCRIPT "scripts/analyze-migration.py" + +static char *tmpfs; + +static void test_baddest(void) +{ + MigrateStart args = { + .hide_stderr = true + }; + QTestState *from, *to; + + if (migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) { + return; + } + migrate_qmp(from, to, "tcp:127.0.0.1:0", NULL, "{}"); + wait_for_migration_fail(from, false); + migrate_end(from, to, false); +} + +#ifndef _WIN32 +static void test_analyze_script(void) +{ + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + }; + QTestState *from, *to; + g_autofree char *uri = NULL; + g_autofree char *file = NULL; + int pid, wstatus; + const char *python = g_getenv("PYTHON"); + + if (!python) { + g_test_skip("PYTHON variable not set"); + return; + } + + /* dummy url */ + if (migrate_start(&from, &to, "tcp:127.0.0.1:0", &args)) { + return; + } + + /* + * Setting these two capabilities causes the "configuration" + * vmstate to include subsections for them. The script needs to + * parse those subsections properly. + */ + migrate_set_capability(from, "validate-uuid", true); + migrate_set_capability(from, "x-ignore-shared", true); + + file = g_strdup_printf("%s/migfile", tmpfs); + uri = g_strdup_printf("exec:cat > %s", file); + + migrate_ensure_converge(from); + migrate_qmp(from, to, uri, NULL, "{}"); + wait_for_migration_complete(from); + + pid = fork(); + if (!pid) { + close(1); + open("/dev/null", O_WRONLY); + execl(python, python, ANALYZE_SCRIPT, "-f", file, NULL); + g_assert_not_reached(); + } + + g_assert(waitpid(pid, &wstatus, 0) == pid); + if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) { + g_test_message("Failed to analyze the migration stream"); + g_test_fail(); + } + migrate_end(from, to, false); + unlink(file); +} +#endif + +static void test_ignore_shared(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + QTestState *from, *to; + MigrateStart args = { + .use_shmem = true, + .caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED] = true, + }; + + if (migrate_start(&from, &to, uri, &args)) { + return; + } + + migrate_ensure_non_converge(from); + migrate_prepare_for_dirty_mem(from); + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + + migrate_qmp(from, to, uri, NULL, "{}"); + + migrate_wait_for_dirty_mem(from, to); + + wait_for_stop(from, get_src()); + + qtest_qmp_eventwait(to, "RESUME"); + + wait_for_serial("dest_serial"); + wait_for_migration_complete(from); + + /* Check whether shared RAM has been really skipped */ + g_assert_cmpint( + read_ram_property_int(from, "transferred"), <, 4 * 1024 * 1024); + + migrate_end(from, to, true); +} + +static void do_test_validate_uuid(MigrateStart *args, bool should_fail) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + QTestState *from, *to; + + if (migrate_start(&from, &to, uri, args)) { + return; + } + + /* + * UUID validation is at the begin of migration. So, the main process of + * migration is not interesting for us here. Thus, set huge downtime for + * very fast migration. + */ + migrate_set_parameter_int(from, "downtime-limit", 1000000); + migrate_set_capability(from, "validate-uuid", true); + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + + migrate_qmp(from, to, uri, NULL, "{}"); + + if (should_fail) { + qtest_set_expected_status(to, EXIT_FAILURE); + wait_for_migration_fail(from, true); + } else { + wait_for_migration_complete(from); + } + + migrate_end(from, to, false); +} + +static void test_validate_uuid(void) +{ + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .opts_target = "-uuid 11111111-1111-1111-1111-111111111111", + }; + + do_test_validate_uuid(&args, false); +} + +static void test_validate_uuid_error(void) +{ + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", + .hide_stderr = true, + }; + + do_test_validate_uuid(&args, true); +} + +static void test_validate_uuid_src_not_set(void) +{ + MigrateStart args = { + .opts_target = "-uuid 22222222-2222-2222-2222-222222222222", + .hide_stderr = true, + }; + + do_test_validate_uuid(&args, false); +} + +static void test_validate_uuid_dst_not_set(void) +{ + MigrateStart args = { + .opts_source = "-uuid 11111111-1111-1111-1111-111111111111", + .hide_stderr = true, + }; + + do_test_validate_uuid(&args, false); +} + +static void do_test_validate_uri_channel(MigrateCommon *args) +{ + QTestState *from, *to; + QObject *channels; + + if (migrate_start(&from, &to, args->listen_uri, &args->start)) { + return; + } + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + + /* + * 'uri' and 'channels' validation is checked even before the migration + * starts. + */ + channels = args->connect_channels ? + qobject_from_json(args->connect_channels, &error_abort) : + NULL; + migrate_qmp_fail(from, args->connect_uri, channels, "{}"); + + migrate_end(from, to, false); +} + +static void test_validate_uri_channels_both_set(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "defer", + .connect_uri = "tcp:127.0.0.1:0", + .connect_channels = ("[ { ""'channel-type': 'main'," + " 'addr': { 'transport': 'socket'," + " 'type': 'inet'," + " 'host': '127.0.0.1'," + " 'port': '0' } } ]"), + }; + + do_test_validate_uri_channel(&args); +} + +static void test_validate_uri_channels_none_set(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "defer", + }; + + do_test_validate_uri_channel(&args); +} + +static void migration_test_add_misc_smoke(MigrationTestEnv *env) +{ +#ifndef _WIN32 + migration_test_add("/migration/analyze-script", test_analyze_script); +#endif +} + +void migration_test_add_misc(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + migration_test_add_misc_smoke(env); + + if (!env->full_set) { + return; + } + + migration_test_add("/migration/bad_dest", test_baddest); + + /* + * Our CI system has problems with shared memory. + * Don't run this test until we find a workaround. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + migration_test_add("/migration/ignore-shared", test_ignore_shared); + } + + migration_test_add("/migration/validate_uuid", test_validate_uuid); + migration_test_add("/migration/validate_uuid_error", + test_validate_uuid_error); + migration_test_add("/migration/validate_uuid_src_not_set", + test_validate_uuid_src_not_set); + migration_test_add("/migration/validate_uuid_dst_not_set", + test_validate_uuid_dst_not_set); + migration_test_add("/migration/validate_uri/channels/both_set", + test_validate_uri_channels_both_set); + migration_test_add("/migration/validate_uri/channels/none_set", + test_validate_uri_channels_none_set); +} diff --git a/tests/qtest/migration/postcopy-tests.c b/tests/qtest/migration/postcopy-tests.c new file mode 100644 index 00000000000..37735258437 --- /dev/null +++ b/tests/qtest/migration/postcopy-tests.c @@ -0,0 +1,149 @@ +/* + * QTest testcases for postcopy migration + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-util.h" +#include "qobject/qlist.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/range.h" +#include "qemu/sockets.h" + +static void test_postcopy(void) +{ + MigrateCommon args = { }; + + test_postcopy_common(&args); +} + +static void test_postcopy_suspend(void) +{ + MigrateCommon args = { + .start.suspend_me = true, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt(void) +{ + MigrateCommon args = { + .start = { + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_recovery(void) +{ + MigrateCommon args = { }; + + test_postcopy_recovery_common(&args); +} + +static void test_postcopy_recovery_fail_handshake(void) +{ + MigrateCommon args = { + .postcopy_recovery_fail_stage = POSTCOPY_FAIL_RECOVERY, + }; + + test_postcopy_recovery_common(&args); +} + +static void test_postcopy_recovery_fail_reconnect(void) +{ + MigrateCommon args = { + .postcopy_recovery_fail_stage = POSTCOPY_FAIL_CHANNEL_ESTABLISH, + }; + + test_postcopy_recovery_common(&args); +} + +static void test_postcopy_preempt_recovery(void) +{ + MigrateCommon args = { + .start = { + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_recovery_common(&args); +} + +static void migration_test_add_postcopy_smoke(MigrationTestEnv *env) +{ + if (env->has_uffd) { + migration_test_add("/migration/postcopy/plain", test_postcopy); + migration_test_add("/migration/postcopy/recovery/plain", + test_postcopy_recovery); + migration_test_add("/migration/postcopy/preempt/plain", + test_postcopy_preempt); + } +} + +static void test_multifd_postcopy(void) +{ + MigrateCommon args = { + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + + test_postcopy_common(&args); +} + +static void test_multifd_postcopy_preempt(void) +{ + MigrateCommon args = { + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_common(&args); +} + +void migration_test_add_postcopy(MigrationTestEnv *env) +{ + migration_test_add_postcopy_smoke(env); + + if (!env->full_set) { + return; + } + + if (env->has_uffd) { + migration_test_add("/migration/postcopy/preempt/recovery/plain", + test_postcopy_preempt_recovery); + + migration_test_add( + "/migration/postcopy/recovery/double-failures/handshake", + test_postcopy_recovery_fail_handshake); + + migration_test_add( + "/migration/postcopy/recovery/double-failures/reconnect", + test_postcopy_recovery_fail_reconnect); + + migration_test_add("/migration/multifd+postcopy/plain", + test_multifd_postcopy); + migration_test_add("/migration/multifd+postcopy/preempt/plain", + test_multifd_postcopy_preempt); + if (env->is_x86) { + migration_test_add("/migration/postcopy/suspend", + test_postcopy_suspend); + } + } +} diff --git a/tests/migration/ppc64/Makefile b/tests/qtest/migration/ppc64/Makefile similarity index 100% rename from tests/migration/ppc64/Makefile rename to tests/qtest/migration/ppc64/Makefile diff --git a/tests/migration/ppc64/a-b-kernel.S b/tests/qtest/migration/ppc64/a-b-kernel.S similarity index 100% rename from tests/migration/ppc64/a-b-kernel.S rename to tests/qtest/migration/ppc64/a-b-kernel.S diff --git a/tests/migration/ppc64/a-b-kernel.h b/tests/qtest/migration/ppc64/a-b-kernel.h similarity index 100% rename from tests/migration/ppc64/a-b-kernel.h rename to tests/qtest/migration/ppc64/a-b-kernel.h diff --git a/tests/qtest/migration/precopy-tests.c b/tests/qtest/migration/precopy-tests.c new file mode 100644 index 00000000000..bb38292550c --- /dev/null +++ b/tests/qtest/migration/precopy-tests.c @@ -0,0 +1,1337 @@ +/* + * QTest testcase for precopy migration + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "chardev/char.h" +#include "crypto/tlscredspsk.h" +#include "libqtest.h" +#include "migration/bootfile.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" +#include "ppc-util.h" +#include "qobject/qlist.h" +#include "qapi-types-migration.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/range.h" +#include "qemu/sockets.h" + + +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ + +static char *tmpfs; + +static void test_precopy_unix_plain(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + /* + * The simplest use case of precopy, covering smoke tests of + * get-dirty-log dirty tracking. + */ + .live = true, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_suspend_live(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + /* + * despite being live, the test is fast because the src + * suspends immediately. + */ + .live = true, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_suspend_notlive(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_dirty_ring(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .start = { + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + /* + * Besides the precopy/unix basic test, cover dirty ring interface + * rather than get-dirty-log. + */ + .live = true, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_RDMA + +#include + +/* + * During migration over RDMA, it will try to pin portions of guest memory, + * typically exceeding 100MB in this test, while the remainder will be + * transmitted as compressed zero pages. + * + * REQUIRED_MEMLOCK_SZ indicates the minimal mlock size in the current context. + */ +#define REQUIRED_MEMLOCK_SZ (128 << 20) /* 128MB */ + +/* check 'ulimit -l' */ +static bool mlock_check(void) +{ + uid_t uid; + struct rlimit rlim; + + uid = getuid(); + if (uid == 0) { + return true; + } + + if (getrlimit(RLIMIT_MEMLOCK, &rlim) != 0) { + return false; + } + + return rlim.rlim_cur >= REQUIRED_MEMLOCK_SZ; +} + +#define RDMA_MIGRATION_HELPER "scripts/rdma-migration-helper.sh" +static int new_rdma_link(char *buffer, bool ipv6) +{ + char cmd[256]; + bool verbose = g_getenv("QTEST_LOG"); + + snprintf(cmd, sizeof(cmd), "IP_FAMILY=%s %s detect %s", + ipv6 ? "ipv6" : "ipv4", RDMA_MIGRATION_HELPER, + verbose ? "" : "2>/dev/null"); + + FILE *pipe = popen(cmd, "r"); + if (pipe == NULL) { + perror("Failed to run script"); + return -1; + } + + int idx = 0; + while (fgets(buffer + idx, 128 - idx, pipe) != NULL) { + idx += strlen(buffer); + } + + int status = pclose(pipe); + if (status == -1) { + perror("Error reported by pclose()"); + return -1; + } else if (WIFEXITED(status)) { + return WEXITSTATUS(status); + } + + return -1; +} + +static void __test_precopy_rdma_plain(bool ipv6) +{ + char buffer[128] = {}; + + if (!mlock_check()) { + g_test_skip("'ulimit -l' is too small, require >=128M"); + return; + } + + if (new_rdma_link(buffer, ipv6)) { + g_test_skip("No rdma link available\n" + "# To enable the test:\n" + "# Run \'" RDMA_MIGRATION_HELPER " setup\' with root to " + "setup a new rdma/rxe link and rerun the test\n" + "# Optional: run 'scripts/rdma-migration-helper.sh clean' " + "to revert the 'setup'"); + return; + } + + /* + * TODO: query a free port instead of hard code. + * 29200=('R'+'D'+'M'+'A')*100 + **/ + g_autofree char *uri = g_strdup_printf("rdma:%s:29200", buffer); + + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + }; + + test_precopy_common(&args); +} + +static void test_precopy_rdma_plain(void) +{ + __test_precopy_rdma_plain(false); +} + +static void test_precopy_rdma_plain_ipv6(void) +{ + __test_precopy_rdma_plain(true); +} +#endif + +static void test_precopy_tcp_plain(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_switchover_ack(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start = { + .caps[MIGRATION_CAPABILITY_RETURN_PATH] = true, + .caps[MIGRATION_CAPABILITY_SWITCHOVER_ACK] = true, + }, + /* + * Source VM must be running in order to consider the switchover ACK + * when deciding to do switchover or not. + */ + .live = true, + }; + + test_precopy_common(&args); +} + +#ifndef _WIN32 +static void *migrate_hook_start_fd(QTestState *from, + QTestState *to) +{ + int ret; + int pair[2]; + + /* Create two connected sockets for migration */ + ret = qemu_socketpair(PF_LOCAL, SOCK_STREAM, 0, pair); + g_assert_cmpint(ret, ==, 0); + + /* Send the 1st socket to the target */ + qtest_qmp_fds_assert_success(to, &pair[0], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + close(pair[0]); + + /* Start incoming migration from the 1st socket */ + migrate_incoming_qmp(to, "fd:fd-mig", NULL, "{}"); + + /* Send the 2nd socket to the target */ + qtest_qmp_fds_assert_success(from, &pair[1], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + close(pair[1]); + + return NULL; +} + +static void migrate_hook_end_fd(QTestState *from, + QTestState *to, + void *opaque) +{ + QDict *rsp; + const char *error_desc; + + /* Test closing fds */ + /* + * We assume, that QEMU removes named fd from its list, + * so this should fail. + */ + rsp = qtest_qmp(from, + "{ 'execute': 'closefd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + g_assert_true(qdict_haskey(rsp, "error")); + error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc"); + g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found"); + qobject_unref(rsp); + + rsp = qtest_qmp(to, + "{ 'execute': 'closefd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + g_assert_true(qdict_haskey(rsp, "error")); + error_desc = qdict_get_str(qdict_get_qdict(rsp, "error"), "desc"); + g_assert_cmpstr(error_desc, ==, "File descriptor named 'fd-mig' not found"); + qobject_unref(rsp); +} + +static void test_precopy_fd_socket(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .connect_uri = "fd:fd-mig", + .start_hook = migrate_hook_start_fd, + .end_hook = migrate_hook_end_fd, + }; + test_precopy_common(&args); +} + +static void *migrate_hook_start_precopy_fd_file(QTestState *from, + QTestState *to) +{ + g_autofree char *file = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME); + int src_flags = O_CREAT | O_RDWR; + int dst_flags = O_CREAT | O_RDWR; + int fds[2]; + + fds[0] = open(file, src_flags, 0660); + assert(fds[0] != -1); + + fds[1] = open(file, dst_flags, 0660); + assert(fds[1] != -1); + + + qtest_qmp_fds_assert_success(to, &fds[0], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + + qtest_qmp_fds_assert_success(from, &fds[1], 1, + "{ 'execute': 'getfd'," + " 'arguments': { 'fdname': 'fd-mig' }}"); + + close(fds[0]); + close(fds[1]); + + return NULL; +} + +static void test_precopy_fd_file(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .connect_uri = "fd:fd-mig", + .start_hook = migrate_hook_start_precopy_fd_file, + .end_hook = migrate_hook_end_fd, + }; + test_file_common(&args, true); +} +#endif /* _WIN32 */ + +/* + * The way auto_converge works, we need to do too many passes to + * run this test. Auto_converge logic is only run once every + * three iterations, so: + * + * - 3 iterations without auto_converge enabled + * - 3 iterations with pct = 5 + * - 3 iterations with pct = 30 + * - 3 iterations with pct = 55 + * - 3 iterations with pct = 80 + * - 3 iterations with pct = 95 (max(95, 80 + 25)) + * + * To make things even worse, we need to run the initial stage at + * 3MB/s so we enter autoconverge even when host is (over)loaded. + */ +static void test_auto_converge(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateStart args = {}; + QTestState *from, *to; + int64_t percentage; + + /* + * We want the test to be stable and as fast as possible. + * E.g., with 1Gb/s bandwidth migration may pass without throttling, + * so we need to decrease a bandwidth. + */ + const int64_t init_pct = 5, inc_pct = 25, max_pct = 95; + uint64_t prev_dirty_sync_cnt, dirty_sync_cnt; + int max_try_count, hit = 0; + + if (migrate_start(&from, &to, uri, &args)) { + return; + } + + migrate_set_capability(from, "auto-converge", true); + migrate_set_parameter_int(from, "cpu-throttle-initial", init_pct); + migrate_set_parameter_int(from, "cpu-throttle-increment", inc_pct); + migrate_set_parameter_int(from, "max-cpu-throttle", max_pct); + + /* + * Set the initial parameters so that the migration could not converge + * without throttling. + */ + migrate_ensure_non_converge(from); + + /* To check remaining size after precopy */ + migrate_set_capability(from, "pause-before-switchover", true); + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + + migrate_qmp(from, to, uri, NULL, "{}"); + + /* Wait for throttling begins */ + percentage = 0; + do { + percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); + if (percentage != 0) { + break; + } + usleep(20); + g_assert_false(get_src()->stop_seen); + } while (true); + /* The first percentage of throttling should be at least init_pct */ + g_assert_cmpint(percentage, >=, init_pct); + + /* + * End the loop when the dirty sync count greater than 1. + */ + while ((dirty_sync_cnt = get_migration_pass(from)) < 2) { + usleep(1000 * 1000); + } + + prev_dirty_sync_cnt = dirty_sync_cnt; + + /* + * The RAMBlock dirty sync count must changes in 5 seconds, here we set + * the timeout to 10 seconds to ensure it changes. + * + * Note that migrate_ensure_non_converge set the max-bandwidth to 3MB/s, + * while the qtest mem is >= 100MB, one iteration takes at least 33s (100/3) + * to complete; this ensures that the RAMBlock dirty sync occurs. + */ + max_try_count = 10; + while (--max_try_count) { + dirty_sync_cnt = get_migration_pass(from); + if (dirty_sync_cnt != prev_dirty_sync_cnt) { + hit = 1; + break; + } + prev_dirty_sync_cnt = dirty_sync_cnt; + sleep(1); + } + g_assert_cmpint(hit, ==, 1); + + /* Now, when we tested that throttling works, let it converge */ + migrate_ensure_converge(from); + + /* + * Wait for pre-switchover status to check last throttle percentage + * and remaining. These values will be zeroed later + */ + wait_for_migration_status(from, "pre-switchover", NULL); + + /* The final percentage of throttling shouldn't be greater than max_pct */ + percentage = read_migrate_property_int(from, "cpu-throttle-percentage"); + g_assert_cmpint(percentage, <=, max_pct); + migrate_continue(from, "pre-switchover"); + + qtest_qmp_eventwait(to, "RESUME"); + + wait_for_serial("dest_serial"); + wait_for_migration_complete(from); + + migrate_end(from, to, true); +} + +static void * +migrate_hook_start_precopy_tcp_multifd(QTestState *from, + QTestState *to) +{ + return migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); +} + +static void * +migrate_hook_start_precopy_tcp_multifd_zero_page_legacy(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + migrate_set_parameter_str(from, "zero-page-detection", "legacy"); + return NULL; +} + +static void * +migrate_hook_start_precopy_tcp_multifd_no_zero_page(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + migrate_set_parameter_str(from, "zero-page-detection", "none"); + return NULL; +} + +static void test_multifd_tcp_uri_none(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_precopy_tcp_multifd, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + /* + * Multifd is more complicated than most of the features, it + * directly takes guest page buffers when sending, make sure + * everything will work alright even if guest page is changing. + */ + .live = true, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_zero_page_legacy(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_precopy_tcp_multifd_zero_page_legacy, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + /* + * Multifd is more complicated than most of the features, it + * directly takes guest page buffers when sending, make sure + * everything will work alright even if guest page is changing. + */ + .live = true, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_no_zero_page(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_precopy_tcp_multifd_no_zero_page, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + /* + * Multifd is more complicated than most of the features, it + * directly takes guest page buffers when sending, make sure + * everything will work alright even if guest page is changing. + */ + .live = true, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_channels_none(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_precopy_tcp_multifd, + .live = true, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .connect_channels = ("[ { 'channel-type': 'main'," + " 'addr': { 'transport': 'socket'," + " 'type': 'inet'," + " 'host': '127.0.0.1'," + " 'port': '0' } } ]"), + }; + test_precopy_common(&args); +} + +/* + * This test does: + * source target + * migrate_incoming + * migrate + * migrate_cancel + * launch another target + * migrate + * + * And see that it works + */ +static void test_multifd_tcp_cancel(bool postcopy_ram) +{ + MigrateStart args = { + .hide_stderr = true, + }; + QTestState *from, *to, *to2; + + if (migrate_start(&from, &to, "defer", &args)) { + return; + } + + migrate_ensure_non_converge(from); + migrate_prepare_for_dirty_mem(from); + + if (postcopy_ram) { + migrate_set_capability(from, "postcopy-ram", true); + migrate_set_capability(to, "postcopy-ram", true); + } + + migrate_set_parameter_int(from, "multifd-channels", 16); + migrate_set_parameter_int(to, "multifd-channels", 16); + + migrate_set_capability(from, "multifd", true); + migrate_set_capability(to, "multifd", true); + + /* Start incoming migration from the 1st socket */ + migrate_incoming_qmp(to, "tcp:127.0.0.1:0", NULL, "{}"); + + /* Wait for the first serial output from the source */ + wait_for_serial("src_serial"); + + migrate_qmp(from, to, NULL, NULL, "{}"); + + migrate_wait_for_dirty_mem(from, to); + + migrate_cancel(from); + + /* Make sure QEMU process "to" exited */ + qtest_set_expected_status(to, EXIT_FAILURE); + qtest_wait_qemu(to); + qtest_quit(to); + + /* + * Ensure the source QEMU finishes its cancellation process before we + * proceed with the setup of the next migration. The migrate_start() + * function and others might want to interact with the source in a way that + * is not possible while the migration is not canceled properly. For + * example, setting migration capabilities when the migration is still + * running leads to an error. + */ + wait_for_migration_status(from, "cancelled", NULL); + + args = (MigrateStart){ + .only_target = true, + }; + + if (migrate_start(&from, &to2, "defer", &args)) { + return; + } + + if (postcopy_ram) { + migrate_set_capability(to2, "postcopy-ram", true); + } + + migrate_set_parameter_int(to2, "multifd-channels", 16); + + migrate_set_capability(to2, "multifd", true); + + /* Start incoming migration from the 1st socket */ + migrate_incoming_qmp(to2, "tcp:127.0.0.1:0", NULL, "{}"); + + migrate_ensure_non_converge(from); + + migrate_qmp(from, to2, NULL, NULL, "{}"); + + migrate_wait_for_dirty_mem(from, to2); + + migrate_ensure_converge(from); + + wait_for_stop(from, get_src()); + qtest_qmp_eventwait(to2, "RESUME"); + + wait_for_serial("dest_serial"); + wait_for_migration_complete(from); + migrate_end(from, to2, true); +} + +static void test_multifd_precopy_tcp_cancel(void) +{ + test_multifd_tcp_cancel(false); +} + +static void test_multifd_postcopy_tcp_cancel(void) +{ + test_multifd_tcp_cancel(true); +} + +static void test_cancel_src_after_failed(QTestState *from, QTestState *to, + const char *uri, const char *phase) +{ + /* + * No migrate_incoming_qmp() at the start to force source into + * failed state during migrate_qmp(). + */ + + wait_for_serial("src_serial"); + migrate_ensure_converge(from); + + migrate_qmp(from, to, uri, NULL, "{}"); + + migration_event_wait(from, phase); + migrate_cancel(from); + + /* cancelling will not move the migration out of 'failed' */ + + wait_for_migration_status(from, "failed", + (const char * []) { "completed", NULL }); + + /* + * Not waiting for the destination because it never started + * migration. + */ +} + +static void test_cancel_src_after_cancelled(QTestState *from, QTestState *to, + const char *uri, const char *phase) +{ + migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }"); + + wait_for_serial("src_serial"); + migrate_ensure_converge(from); + + migrate_qmp(from, to, uri, NULL, "{}"); + + /* To move to cancelled/cancelling */ + migrate_cancel(from); + migration_event_wait(from, phase); + + /* The migrate_cancel under test */ + migrate_cancel(from); + + wait_for_migration_status(from, "cancelled", + (const char * []) { "completed", NULL }); + + wait_for_migration_status(to, "failed", + (const char * []) { "completed", NULL }); +} + +static void test_cancel_src_after_complete(QTestState *from, QTestState *to, + const char *uri, const char *phase) +{ + migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }"); + + wait_for_serial("src_serial"); + migrate_ensure_converge(from); + + migrate_qmp(from, to, uri, NULL, "{}"); + + migration_event_wait(from, phase); + migrate_cancel(from); + + /* + * qmp_migrate_cancel() exits early if migration is not running + * anymore, the status will not change to cancelled. + */ + wait_for_migration_complete(from); + wait_for_migration_complete(to); +} + +static void test_cancel_src_after_none(QTestState *from, QTestState *to, + const char *uri, const char *phase) +{ + /* + * Test that cancelling without a migration happening does not + * affect subsequent migrations + */ + migrate_cancel(to); + + wait_for_serial("src_serial"); + migrate_cancel(from); + + migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }"); + + migrate_ensure_converge(from); + migrate_qmp(from, to, uri, NULL, "{}"); + + wait_for_migration_complete(from); + wait_for_migration_complete(to); +} + +static void test_cancel_src_pre_switchover(QTestState *from, QTestState *to, + const char *uri, const char *phase) +{ + migrate_set_capability(from, "pause-before-switchover", true); + migrate_set_capability(to, "pause-before-switchover", true); + + migrate_set_capability(from, "multifd", true); + migrate_set_capability(to, "multifd", true); + + migrate_incoming_qmp(to, uri, NULL, "{ 'exit-on-error': false }"); + + wait_for_serial("src_serial"); + migrate_ensure_converge(from); + + migrate_qmp(from, to, uri, NULL, "{}"); + + migration_event_wait(from, phase); + migrate_cancel(from); + migration_event_wait(from, "cancelling"); + + wait_for_migration_status(from, "cancelled", + (const char * []) { "completed", NULL }); + + wait_for_migration_status(to, "failed", + (const char * []) { "completed", NULL }); +} + +static void test_cancel_src_after_status(void *opaque) +{ + const char *test_path = opaque; + g_autofree char *phase = g_path_get_basename(test_path); + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + QTestState *from, *to; + MigrateStart args = { + .hide_stderr = true, + }; + + if (migrate_start(&from, &to, "defer", &args)) { + return; + } + + if (g_str_equal(phase, "cancelling") || + g_str_equal(phase, "cancelled")) { + test_cancel_src_after_cancelled(from, to, uri, phase); + + } else if (g_str_equal(phase, "completed")) { + test_cancel_src_after_complete(from, to, uri, phase); + + } else if (g_str_equal(phase, "failed")) { + test_cancel_src_after_failed(from, to, uri, phase); + + } else if (g_str_equal(phase, "none")) { + test_cancel_src_after_none(from, to, uri, phase); + + } else { + /* any state that comes before pre-switchover */ + test_cancel_src_pre_switchover(from, to, uri, phase); + } + + migrate_end(from, to, false); +} + +static void calc_dirty_rate(QTestState *who, uint64_t calc_time) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'calc-dirty-rate'," + "'arguments': { " + "'calc-time': %" PRIu64 "," + "'mode': 'dirty-ring' }}", + calc_time); +} + +static QDict *query_dirty_rate(QTestState *who) +{ + return qtest_qmp_assert_success_ref(who, + "{ 'execute': 'query-dirty-rate' }"); +} + +static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'set-vcpu-dirty-limit'," + "'arguments': { " + "'dirty-rate': %" PRIu64 " } }", + dirtyrate); +} + +static void cancel_vcpu_dirty_limit(QTestState *who) +{ + qtest_qmp_assert_success(who, + "{ 'execute': 'cancel-vcpu-dirty-limit' }"); +} + +static QDict *query_vcpu_dirty_limit(QTestState *who) +{ + QDict *rsp; + + rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }"); + g_assert(!qdict_haskey(rsp, "error")); + g_assert(qdict_haskey(rsp, "return")); + + return rsp; +} + +static bool calc_dirtyrate_ready(QTestState *who) +{ + QDict *rsp_return; + const char *status; + bool ready; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = qdict_get_str(rsp_return, "status"); + g_assert(status); + ready = g_strcmp0(status, "measuring"); + qobject_unref(rsp_return); + + return ready; +} + +static void wait_for_calc_dirtyrate_complete(QTestState *who, + int64_t time_s) +{ + int max_try_count = 10000; + usleep(time_s * 1000000); + + while (!calc_dirtyrate_ready(who) && max_try_count--) { + usleep(1000); + } + + /* + * Set the timeout with 10 s(max_try_count * 1000us), + * if dirtyrate measurement not complete, fail test. + */ + g_assert_cmpint(max_try_count, !=, 0); +} + +static int64_t get_dirty_rate(QTestState *who) +{ + QDict *rsp_return; + const char *status; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = qdict_get_str(rsp_return, "status"); + g_assert(status); + g_assert_cmpstr(status, ==, "measured"); + + rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static int64_t get_limit_rate(QTestState *who) +{ + QDict *rsp_return; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_vcpu_dirty_limit(who); + g_assert(rsp_return); + + rates = qdict_get_qlist(rsp_return, "return"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "limit-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static QTestState *dirtylimit_start_vm(void) +{ + QTestState *vm = NULL; + g_autofree gchar *cmd = NULL; + const char *bootpath; + + bootpath = bootfile_create(qtest_get_arch(), tmpfs, false); + cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " + "-name dirtylimit-test,debug-threads=on " + "-m 150M -smp 1 " + "-serial file:%s/vm_serial " + "-drive file=%s,format=raw ", + tmpfs, bootpath); + + vm = qtest_init(cmd); + return vm; +} + +static void dirtylimit_stop_vm(QTestState *vm) +{ + g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, "vm_serial"); + + qtest_quit(vm); + unlink(path); +} + +static void test_vcpu_dirty_limit(void) +{ + QTestState *vm; + int64_t origin_rate; + int64_t quota_rate; + int64_t rate ; + int max_try_count = 20; + int hit = 0; + + /* Start vm for vcpu dirtylimit test */ + vm = dirtylimit_start_vm(); + + /* Wait for the first serial output from the vm*/ + wait_for_serial("vm_serial"); + + /* Do dirtyrate measurement with calc time equals 1s */ + calc_dirty_rate(vm, 1); + + /* Sleep calc time and wait for calc dirtyrate complete */ + wait_for_calc_dirtyrate_complete(vm, 1); + + /* Query original dirty page rate */ + origin_rate = get_dirty_rate(vm); + + /* VM booted from bootsect should dirty memory steadily */ + assert(origin_rate != 0); + + /* Setup quota dirty page rate at half of origin */ + quota_rate = origin_rate / 2; + + /* Set dirtylimit */ + dirtylimit_set_all(vm, quota_rate); + + /* + * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit + * works literally + */ + g_assert_cmpint(quota_rate, ==, get_limit_rate(vm)); + + /* Sleep a bit to check if it take effect */ + usleep(2000000); + + /* + * Check if dirtylimit take effect realistically, set the + * timeout with 20 s(max_try_count * 1s), if dirtylimit + * doesn't take effect, fail test. + */ + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume hitting if current rate is less + * than quota rate (within accepting error) + */ + if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + + hit = 0; + max_try_count = 20; + + /* Check if dirtylimit cancellation take effect */ + cancel_vcpu_dirty_limit(vm); + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume dirtylimit be canceled if current rate is + * greater than quota rate (within accepting error) + */ + if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + dirtylimit_stop_vm(vm); +} + +static void migrate_dirty_limit_wait_showup(QTestState *from, + const int64_t period, + const int64_t value) +{ + /* Enable dirty limit capability */ + migrate_set_capability(from, "dirty-limit", true); + + /* Set dirty limit parameters */ + migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period); + migrate_set_parameter_int(from, "vcpu-dirty-limit", value); + + /* Make sure migrate can't converge */ + migrate_ensure_non_converge(from); + + /* To check limit rate after precopy */ + migrate_set_capability(from, "pause-before-switchover", true); + + /* Wait for the serial output from the source */ + wait_for_serial("src_serial"); +} + +/* + * This test does: + * source destination + * start vm + * start incoming vm + * migrate + * wait dirty limit to begin + * cancel migrate + * cancellation check + * restart incoming vm + * migrate + * wait dirty limit to begin + * wait pre-switchover event + * convergence condition check + * + * And see if dirty limit migration works correctly. + * This test case involves many passes, so it runs in slow mode only. + */ +static void test_dirty_limit(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + QTestState *from, *to; + int64_t remaining; + uint64_t throttle_us_per_full; + /* + * We want the test to be stable and as fast as possible. + * E.g., with 1Gb/s bandwidth migration may pass without dirty limit, + * so we need to decrease a bandwidth. + */ + const int64_t dirtylimit_period = 1000, dirtylimit_value = 50; + const int64_t max_bandwidth = 400000000; /* ~400Mb/s */ + const int64_t downtime_limit = 250; /* 250ms */ + /* + * We migrate through unix-socket (> 500Mb/s). + * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s). + * So, we can predict expected_threshold + */ + const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000; + int max_try_count = 10; + MigrateCommon args = { + .start = { + .hide_stderr = true, + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + }; + + /* Start src, dst vm */ + if (migrate_start(&from, &to, args.listen_uri, &args.start)) { + return; + } + + /* Prepare for dirty limit migration and wait src vm show up */ + migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value); + + /* Start migrate */ + migrate_qmp(from, to, args.connect_uri, NULL, "{}"); + + /* Wait for dirty limit throttle begin */ + throttle_us_per_full = 0; + while (throttle_us_per_full == 0) { + throttle_us_per_full = + read_migrate_property_int(from, + "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(get_src()->stop_seen); + } + + /* Now cancel migrate and wait for dirty limit throttle switch off */ + migrate_cancel(from); + wait_for_migration_status(from, "cancelled", NULL); + + /* destination always fails after cancel */ + migration_event_wait(to, "failed"); + qtest_set_expected_status(to, EXIT_FAILURE); + qtest_quit(to); + + /* Check if dirty limit throttle switched off, set timeout 1ms */ + do { + throttle_us_per_full = + read_migrate_property_int(from, + "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(get_src()->stop_seen); + } while (throttle_us_per_full != 0 && --max_try_count); + + /* Assert dirty limit is not in service */ + g_assert_cmpint(throttle_us_per_full, ==, 0); + + args = (MigrateCommon) { + .start = { + .only_target = true, + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + }; + + /* Restart dst vm, src vm already show up so we needn't wait anymore */ + if (migrate_start(&from, &to, args.listen_uri, &args.start)) { + return; + } + + /* Start migrate */ + migrate_qmp(from, to, args.connect_uri, NULL, "{}"); + + /* Wait for dirty limit throttle begin */ + throttle_us_per_full = 0; + while (throttle_us_per_full == 0) { + throttle_us_per_full = + read_migrate_property_int(from, + "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(get_src()->stop_seen); + } + + /* + * The dirty limit rate should equals the return value of + * query-vcpu-dirty-limit if dirty limit cap set + */ + g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from)); + + /* Now, we have tested if dirty limit works, let it converge */ + migrate_set_parameter_int(from, "downtime-limit", downtime_limit); + migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth); + + /* + * Wait for pre-switchover status to check if migration + * satisfy the convergence condition + */ + wait_for_migration_status(from, "pre-switchover", NULL); + + remaining = read_ram_property_int(from, "remaining"); + g_assert_cmpint(remaining, <, + (expected_threshold + expected_threshold / 100)); + + migrate_continue(from, "pre-switchover"); + + qtest_qmp_eventwait(to, "RESUME"); + + wait_for_serial("dest_serial"); + wait_for_migration_complete(from); + + migrate_end(from, to, true); +} + +static void migration_test_add_precopy_smoke(MigrationTestEnv *env) +{ + if (env->is_x86) { + migration_test_add("/migration/precopy/unix/suspend/live", + test_precopy_unix_suspend_live); + migration_test_add("/migration/precopy/unix/suspend/notlive", + test_precopy_unix_suspend_notlive); + } + + migration_test_add("/migration/precopy/unix/plain", + test_precopy_unix_plain); + + migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain); + migration_test_add("/migration/multifd/tcp/uri/plain/none", + test_multifd_tcp_uri_none); + migration_test_add("/migration/multifd/tcp/plain/cancel", + test_multifd_precopy_tcp_cancel); + if (env->has_uffd) { + migration_test_add("/migration/multifd+postcopy/tcp/plain/cancel", + test_multifd_postcopy_tcp_cancel); + } + +#ifdef CONFIG_RDMA + migration_test_add("/migration/precopy/rdma/plain", + test_precopy_rdma_plain); + migration_test_add("/migration/precopy/rdma/plain/ipv6", + test_precopy_rdma_plain_ipv6); +#endif +} + +void migration_test_add_precopy(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + migration_test_add_precopy_smoke(env); + + if (!env->full_set) { + return; + } + + migration_test_add("/migration/precopy/tcp/plain/switchover-ack", + test_precopy_tcp_switchover_ack); + +#ifndef _WIN32 + migration_test_add("/migration/precopy/fd/tcp", + test_precopy_fd_socket); + migration_test_add("/migration/precopy/fd/file", + test_precopy_fd_file); +#endif + + /* + * See explanation why this test is slow on function definition + */ + if (g_test_slow()) { + migration_test_add("/migration/auto_converge", + test_auto_converge); + if (g_str_equal(env->arch, "x86_64") && + env->has_kvm && env->has_dirty_ring) { + migration_test_add("/dirty_limit", + test_dirty_limit); + } + } + migration_test_add("/migration/multifd/tcp/channels/plain/none", + test_multifd_tcp_channels_none); + migration_test_add("/migration/multifd/tcp/plain/zero-page/legacy", + test_multifd_tcp_zero_page_legacy); + migration_test_add("/migration/multifd/tcp/plain/zero-page/none", + test_multifd_tcp_no_zero_page); + if (g_str_equal(env->arch, "x86_64") + && env->has_kvm && env->has_dirty_ring) { + + migration_test_add("/migration/dirty_ring", + test_precopy_unix_dirty_ring); + if (qtest_has_machine("pc") && g_test_slow()) { + migration_test_add("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); + } + } + + /* ensure new status don't go unnoticed */ + assert(MIGRATION_STATUS__MAX == 15); + + for (int i = MIGRATION_STATUS_NONE; i < MIGRATION_STATUS__MAX; i++) { + switch (i) { + case MIGRATION_STATUS_DEVICE: /* happens too fast */ + case MIGRATION_STATUS_WAIT_UNPLUG: /* no support in tests */ + case MIGRATION_STATUS_COLO: /* no support in tests */ + case MIGRATION_STATUS_POSTCOPY_ACTIVE: /* postcopy can't be cancelled */ + case MIGRATION_STATUS_POSTCOPY_PAUSED: + case MIGRATION_STATUS_POSTCOPY_RECOVER_SETUP: + case MIGRATION_STATUS_POSTCOPY_RECOVER: + continue; + default: + migration_test_add_suffix("/migration/cancel/src/after/", + MigrationStatus_str(i), + test_cancel_src_after_status); + } + } +} diff --git a/tests/migration/s390x/Makefile b/tests/qtest/migration/s390x/Makefile similarity index 100% rename from tests/migration/s390x/Makefile rename to tests/qtest/migration/s390x/Makefile diff --git a/tests/migration/s390x/a-b-bios.c b/tests/qtest/migration/s390x/a-b-bios.c similarity index 100% rename from tests/migration/s390x/a-b-bios.c rename to tests/qtest/migration/s390x/a-b-bios.c diff --git a/tests/migration/s390x/a-b-bios.h b/tests/qtest/migration/s390x/a-b-bios.h similarity index 100% rename from tests/migration/s390x/a-b-bios.h rename to tests/qtest/migration/s390x/a-b-bios.h diff --git a/tests/qtest/migration/tls-tests.c b/tests/qtest/migration/tls-tests.c new file mode 100644 index 00000000000..21e9fec87d8 --- /dev/null +++ b/tests/qtest/migration/tls-tests.c @@ -0,0 +1,871 @@ +/* + * QTest testcases for TLS migration + * + * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates + * based on the vhost-user-test.c that is: + * Copyright (c) 2014 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "crypto/tlscredspsk.h" +#include "libqtest.h" +#include "migration/framework.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" + +#include "tests/unit/crypto-tls-psk-helpers.h" +#ifdef CONFIG_TASN1 +# include "tests/unit/crypto-tls-x509-helpers.h" +#endif /* CONFIG_TASN1 */ + + +struct TestMigrateTLSPSKData { + char *workdir; + char *workdiralt; + char *pskfile; + char *pskfilealt; +}; + +static char *tmpfs; + +static void * +migrate_hook_start_tls_psk_common(QTestState *from, + QTestState *to, + bool mismatch) +{ + struct TestMigrateTLSPSKData *data = + g_new0(struct TestMigrateTLSPSKData, 1); + + data->workdir = g_strdup_printf("%s/tlscredspsk0", tmpfs); + data->pskfile = g_strdup_printf("%s/%s", data->workdir, + QCRYPTO_TLS_CREDS_PSKFILE); + g_mkdir_with_parents(data->workdir, 0700); + test_tls_psk_init(data->pskfile); + + if (mismatch) { + data->workdiralt = g_strdup_printf("%s/tlscredspskalt0", tmpfs); + data->pskfilealt = g_strdup_printf("%s/%s", data->workdiralt, + QCRYPTO_TLS_CREDS_PSKFILE); + g_mkdir_with_parents(data->workdiralt, 0700); + test_tls_psk_init_alt(data->pskfilealt); + } + + qtest_qmp_assert_success(from, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-psk'," + " 'id': 'tlscredspsk0'," + " 'endpoint': 'client'," + " 'dir': %s," + " 'username': 'qemu'} }", + data->workdir); + + qtest_qmp_assert_success(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-psk'," + " 'id': 'tlscredspsk0'," + " 'endpoint': 'server'," + " 'dir': %s } }", + mismatch ? data->workdiralt : data->workdir); + + migrate_set_parameter_str(from, "tls-creds", "tlscredspsk0"); + migrate_set_parameter_str(to, "tls-creds", "tlscredspsk0"); + + return data; +} + +static void * +migrate_hook_start_tls_psk_match(QTestState *from, + QTestState *to) +{ + return migrate_hook_start_tls_psk_common(from, to, false); +} + +static void * +migrate_hook_start_tls_psk_mismatch(QTestState *from, + QTestState *to) +{ + return migrate_hook_start_tls_psk_common(from, to, true); +} + +static void +migrate_hook_end_tls_psk(QTestState *from, + QTestState *to, + void *opaque) +{ + struct TestMigrateTLSPSKData *data = opaque; + + test_tls_psk_cleanup(data->pskfile); + if (data->pskfilealt) { + test_tls_psk_cleanup(data->pskfilealt); + } + rmdir(data->workdir); + if (data->workdiralt) { + rmdir(data->workdiralt); + } + + g_free(data->workdiralt); + g_free(data->pskfilealt); + g_free(data->workdir); + g_free(data->pskfile); + g_free(data); +} + +#ifdef CONFIG_TASN1 +typedef struct { + char *workdir; + char *keyfile; + char *cacert; + char *servercert; + char *serverkey; + char *clientcert; + char *clientkey; +} TestMigrateTLSX509Data; + +typedef struct { + bool verifyclient; + bool clientcert; + bool hostileclient; + bool authzclient; + const char *certhostname; + const char *certipaddr; +} TestMigrateTLSX509; + +static void * +migrate_hook_start_tls_x509_common(QTestState *from, + QTestState *to, + TestMigrateTLSX509 *args) +{ + TestMigrateTLSX509Data *data = g_new0(TestMigrateTLSX509Data, 1); + + data->workdir = g_strdup_printf("%s/tlscredsx5090", tmpfs); + data->keyfile = g_strdup_printf("%s/key.pem", data->workdir); + + data->cacert = g_strdup_printf("%s/ca-cert.pem", data->workdir); + data->serverkey = g_strdup_printf("%s/server-key.pem", data->workdir); + data->servercert = g_strdup_printf("%s/server-cert.pem", data->workdir); + if (args->clientcert) { + data->clientkey = g_strdup_printf("%s/client-key.pem", data->workdir); + data->clientcert = g_strdup_printf("%s/client-cert.pem", data->workdir); + } + + g_mkdir_with_parents(data->workdir, 0700); + + test_tls_init(data->keyfile); +#ifndef _WIN32 + g_assert(link(data->keyfile, data->serverkey) == 0); +#else + g_assert(CreateHardLink(data->serverkey, data->keyfile, NULL) != 0); +#endif + if (args->clientcert) { +#ifndef _WIN32 + g_assert(link(data->keyfile, data->clientkey) == 0); +#else + g_assert(CreateHardLink(data->clientkey, data->keyfile, NULL) != 0); +#endif + } + + TLS_ROOT_REQ_SIMPLE(cacertreq, data->cacert); + if (args->clientcert) { + TLS_CERT_REQ_SIMPLE_CLIENT(servercertreq, cacertreq, + args->hostileclient ? + QCRYPTO_TLS_TEST_CLIENT_HOSTILE_NAME : + QCRYPTO_TLS_TEST_CLIENT_NAME, + data->clientcert); + test_tls_deinit_cert(&servercertreq); + } + + TLS_CERT_REQ_SIMPLE_SERVER(clientcertreq, cacertreq, + data->servercert, + args->certhostname, + args->certipaddr); + test_tls_deinit_cert(&clientcertreq); + test_tls_deinit_cert(&cacertreq); + + qtest_qmp_assert_success(from, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-x509'," + " 'id': 'tlscredsx509client0'," + " 'endpoint': 'client'," + " 'dir': %s," + " 'sanity-check': true," + " 'verify-peer': true} }", + data->workdir); + migrate_set_parameter_str(from, "tls-creds", "tlscredsx509client0"); + if (args->certhostname) { + migrate_set_parameter_str(from, "tls-hostname", args->certhostname); + } + + qtest_qmp_assert_success(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'tls-creds-x509'," + " 'id': 'tlscredsx509server0'," + " 'endpoint': 'server'," + " 'dir': %s," + " 'sanity-check': true," + " 'verify-peer': %i} }", + data->workdir, args->verifyclient); + migrate_set_parameter_str(to, "tls-creds", "tlscredsx509server0"); + + if (args->authzclient) { + qtest_qmp_assert_success(to, + "{ 'execute': 'object-add'," + " 'arguments': { 'qom-type': 'authz-simple'," + " 'id': 'tlsauthz0'," + " 'identity': %s} }", + "CN=" QCRYPTO_TLS_TEST_CLIENT_NAME); + migrate_set_parameter_str(to, "tls-authz", "tlsauthz0"); + } + + return data; +} + +/* + * The normal case: match server's cert hostname against + * whatever host we were telling QEMU to connect to (if any) + */ +static void * +migrate_hook_start_tls_x509_default_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certipaddr = "127.0.0.1" + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +/* + * The unusual case: the server's cert is different from + * the address we're telling QEMU to connect to (if any), + * so we must give QEMU an explicit hostname to validate + */ +static void * +migrate_hook_start_tls_x509_override_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certhostname = "qemu.org", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +/* + * The unusual case: the server's cert is different from + * the address we're telling QEMU to connect to, and so we + * expect the client to reject the server + */ +static void * +migrate_hook_start_tls_x509_mismatch_host(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .certipaddr = "10.0.0.1", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +static void * +migrate_hook_start_tls_x509_friendly_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .authzclient = true, + .certipaddr = "127.0.0.1", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +static void * +migrate_hook_start_tls_x509_hostile_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .clientcert = true, + .hostileclient = true, + .authzclient = true, + .certipaddr = "127.0.0.1", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +/* + * The case with no client certificate presented, + * and no server verification + */ +static void * +migrate_hook_start_tls_x509_allow_anon_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .certipaddr = "127.0.0.1", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +/* + * The case with no client certificate presented, + * and server verification rejecting + */ +static void * +migrate_hook_start_tls_x509_reject_anon_client(QTestState *from, + QTestState *to) +{ + TestMigrateTLSX509 args = { + .verifyclient = true, + .certipaddr = "127.0.0.1", + }; + return migrate_hook_start_tls_x509_common(from, to, &args); +} + +static void +migrate_hook_end_tls_x509(QTestState *from, + QTestState *to, + void *opaque) +{ + TestMigrateTLSX509Data *data = opaque; + + test_tls_cleanup(data->keyfile); + g_free(data->keyfile); + + unlink(data->cacert); + g_free(data->cacert); + unlink(data->servercert); + g_free(data->servercert); + unlink(data->serverkey); + g_free(data->serverkey); + + if (data->clientcert) { + unlink(data->clientcert); + g_free(data->clientcert); + } + if (data->clientkey) { + unlink(data->clientkey); + g_free(data->clientkey); + } + + rmdir(data->workdir); + g_free(data->workdir); + + g_free(data); +} +#endif /* CONFIG_TASN1 */ + +static void test_postcopy_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_preempt_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + .start = { + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_common(&args); +} + +static void test_postcopy_recovery_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + }; + + test_postcopy_recovery_common(&args); +} + +static void test_multifd_postcopy_recovery_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + + test_postcopy_recovery_common(&args); +} + +/* This contains preempt+recovery+tls test altogether */ +static void test_postcopy_preempt_all(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + .start = { + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_recovery_common(&args); +} + +static void test_multifd_postcopy_preempt_recovery_tls_psk(void) +{ + MigrateCommon args = { + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT] = true, + }, + }; + + test_postcopy_recovery_common(&args); +} + +static void test_precopy_unix_tls_psk(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_precopy_unix_tls_x509_default_host(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .connect_uri = uri, + .listen_uri = uri, + .start_hook = migrate_hook_start_tls_x509_default_host, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL_DEST_QUIT_ERR, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_tls_x509_override_host(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .connect_uri = uri, + .listen_uri = uri, + .start_hook = migrate_hook_start_tls_x509_override_host, + .end_hook = migrate_hook_end_tls_x509, + }; + + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ + +static void test_precopy_tcp_tls_psk_match(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_psk_mismatch(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_psk_mismatch, + .end_hook = migrate_hook_end_tls_psk, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_precopy_tcp_tls_x509_default_host(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_default_host, + .end_hook = migrate_hook_end_tls_x509, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_override_host(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_override_host, + .end_hook = migrate_hook_end_tls_x509, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_mismatch_host(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_mismatch_host, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL_DEST_QUIT_ERR, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_friendly_client(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_friendly_client, + .end_hook = migrate_hook_end_tls_x509, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_hostile_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_hostile_client, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_allow_anon_client(void) +{ + MigrateCommon args = { + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_allow_anon_client, + .end_hook = migrate_hook_end_tls_x509, + }; + + test_precopy_common(&args); +} + +static void test_precopy_tcp_tls_x509_reject_anon_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + }, + .listen_uri = "tcp:127.0.0.1:0", + .start_hook = migrate_hook_start_tls_x509_reject_anon_client, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL, + }; + + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ + +static void * +migrate_hook_start_multifd_tcp_tls_psk_match(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_psk_match(from, to); +} + +static void * +migrate_hook_start_multifd_tcp_tls_psk_mismatch(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_psk_mismatch(from, to); +} + +#ifdef CONFIG_TASN1 +static void * +migrate_hook_start_multifd_tls_x509_default_host(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_x509_default_host(from, to); +} + +static void * +migrate_hook_start_multifd_tls_x509_override_host(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_x509_override_host(from, to); +} + +static void * +migrate_hook_start_multifd_tls_x509_mismatch_host(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_x509_mismatch_host(from, to); +} + +static void * +migrate_hook_start_multifd_tls_x509_allow_anon_client(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_x509_allow_anon_client(from, to); +} + +static void * +migrate_hook_start_multifd_tls_x509_reject_anon_client(QTestState *from, + QTestState *to) +{ + migrate_hook_start_precopy_tcp_multifd_common(from, to, "none"); + return migrate_hook_start_tls_x509_reject_anon_client(from, to); +} +#endif /* CONFIG_TASN1 */ + +static void test_multifd_tcp_tls_psk_match(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tcp_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_psk_mismatch(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tcp_tls_psk_mismatch, + .end_hook = migrate_hook_end_tls_psk, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} + +static void test_multifd_postcopy_tcp_tls_psk_match(void) +{ + MigrateCommon args = { + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + .caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] = true, + }, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tcp_tls_psk_match, + .end_hook = migrate_hook_end_tls_psk, + }; + + test_precopy_common(&args); +} + +#ifdef CONFIG_TASN1 +static void test_multifd_tcp_tls_x509_default_host(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tls_x509_default_host, + .end_hook = migrate_hook_end_tls_x509, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_override_host(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tls_x509_override_host, + .end_hook = migrate_hook_end_tls_x509, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_mismatch_host(void) +{ + /* + * This has different behaviour to the non-multifd case. + * + * In non-multifd case when client aborts due to mismatched + * cert host, the server has already started trying to load + * migration state, and so it exits with I/O failure. + * + * In multifd case when client aborts due to mismatched + * cert host, the server is still waiting for the other + * multifd connections to arrive so hasn't started trying + * to load migration state, and thus just aborts the migration + * without exiting. + */ + MigrateCommon args = { + .start = { + .hide_stderr = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tls_x509_mismatch_host, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_allow_anon_client(void) +{ + MigrateCommon args = { + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tls_x509_allow_anon_client, + .end_hook = migrate_hook_end_tls_x509, + .start = { + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + }; + test_precopy_common(&args); +} + +static void test_multifd_tcp_tls_x509_reject_anon_client(void) +{ + MigrateCommon args = { + .start = { + .hide_stderr = true, + .caps[MIGRATION_CAPABILITY_MULTIFD] = true, + }, + .listen_uri = "defer", + .start_hook = migrate_hook_start_multifd_tls_x509_reject_anon_client, + .end_hook = migrate_hook_end_tls_x509, + .result = MIG_TEST_FAIL, + }; + test_precopy_common(&args); +} +#endif /* CONFIG_TASN1 */ + +static void migration_test_add_tls_smoke(MigrationTestEnv *env) +{ + migration_test_add("/migration/precopy/tcp/tls/psk/match", + test_precopy_tcp_tls_psk_match); +} + +void migration_test_add_tls(MigrationTestEnv *env) +{ + tmpfs = env->tmpfs; + + migration_test_add_tls_smoke(env); + + if (!env->full_set) { + return; + } + + migration_test_add("/migration/precopy/unix/tls/psk", + test_precopy_unix_tls_psk); + + if (env->has_uffd) { + /* + * NOTE: psk test is enough for postcopy, as other types of TLS + * channels are tested under precopy. Here what we want to test is the + * general postcopy path that has TLS channel enabled. + */ + migration_test_add("/migration/postcopy/tls/psk", + test_postcopy_tls_psk); + migration_test_add("/migration/postcopy/recovery/tls/psk", + test_postcopy_recovery_tls_psk); + migration_test_add("/migration/postcopy/preempt/tls/psk", + test_postcopy_preempt_tls_psk); + migration_test_add("/migration/postcopy/preempt/recovery/tls/psk", + test_postcopy_preempt_all); + migration_test_add("/migration/multifd+postcopy/recovery/tls/psk", + test_multifd_postcopy_recovery_tls_psk); + migration_test_add( + "/migration/multifd+postcopy/preempt/recovery/tls/psk", + test_multifd_postcopy_preempt_recovery_tls_psk); + } +#ifdef CONFIG_TASN1 + migration_test_add("/migration/precopy/unix/tls/x509/default-host", + test_precopy_unix_tls_x509_default_host); + migration_test_add("/migration/precopy/unix/tls/x509/override-host", + test_precopy_unix_tls_x509_override_host); +#endif /* CONFIG_TASN1 */ + + migration_test_add("/migration/precopy/tcp/tls/psk/mismatch", + test_precopy_tcp_tls_psk_mismatch); +#ifdef CONFIG_TASN1 + migration_test_add("/migration/precopy/tcp/tls/x509/default-host", + test_precopy_tcp_tls_x509_default_host); + migration_test_add("/migration/precopy/tcp/tls/x509/override-host", + test_precopy_tcp_tls_x509_override_host); + migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host", + test_precopy_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client", + test_precopy_tcp_tls_x509_friendly_client); + migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client", + test_precopy_tcp_tls_x509_hostile_client); + migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client", + test_precopy_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client", + test_precopy_tcp_tls_x509_reject_anon_client); +#endif /* CONFIG_TASN1 */ + + migration_test_add("/migration/multifd/tcp/tls/psk/match", + test_multifd_tcp_tls_psk_match); + migration_test_add("/migration/multifd/tcp/tls/psk/mismatch", + test_multifd_tcp_tls_psk_mismatch); + if (env->has_uffd) { + migration_test_add("/migration/multifd+postcopy/tcp/tls/psk/match", + test_multifd_postcopy_tcp_tls_psk_match); + } +#ifdef CONFIG_TASN1 + migration_test_add("/migration/multifd/tcp/tls/x509/default-host", + test_multifd_tcp_tls_x509_default_host); + migration_test_add("/migration/multifd/tcp/tls/x509/override-host", + test_multifd_tcp_tls_x509_override_host); + migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host", + test_multifd_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client", + test_multifd_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client", + test_multifd_tcp_tls_x509_reject_anon_client); +#endif /* CONFIG_TASN1 */ +} diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c index fc7d11961ea..b731af0ad95 100644 --- a/tests/qtest/netdev-socket.c +++ b/tests/qtest/netdev-socket.c @@ -11,7 +11,7 @@ #include #include "../unit/socket-helpers.h" #include "libqtest.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qemu/sockets.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-sockets.h" @@ -204,7 +204,7 @@ static void test_stream_unix_reconnect(void) qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=unix," - "addr.path=%s,reconnect=1", path); + "addr.path=%s,reconnect-ms=1000", path); wait_stream_connected(qts0, "st0", &addr); g_assert_cmpint(addr->type, ==, SOCKET_ADDRESS_TYPE_UNIX); diff --git a/tests/qtest/npcm7xx_adc-test.c b/tests/qtest/npcm7xx_adc-test.c index e751a72e367..8bc89b8a8b9 100644 --- a/tests/qtest/npcm7xx_adc-test.c +++ b/tests/qtest/npcm7xx_adc-test.c @@ -18,7 +18,7 @@ #include "qemu/bitops.h" #include "qemu/timer.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define REF_HZ (25000000) diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c index 2e1a1a6d702..eeedb27ee68 100644 --- a/tests/qtest/npcm7xx_emc-test.c +++ b/tests/qtest/npcm7xx_emc-test.c @@ -16,8 +16,8 @@ #include "qemu/osdep.h" #include "libqos/libqos.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" #include "qemu/bitops.h" #include "qemu/iov.h" diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c index b53a43c4171..052ea876621 100644 --- a/tests/qtest/npcm7xx_pwm-test.c +++ b/tests/qtest/npcm7xx_pwm-test.c @@ -17,8 +17,8 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" static int verbosity_level; diff --git a/tests/qtest/npcm7xx_timer-test.c b/tests/qtest/npcm7xx_timer-test.c index 58f58c2f71b..43711049ca7 100644 --- a/tests/qtest/npcm7xx_timer-test.c +++ b/tests/qtest/npcm7xx_timer-test.c @@ -465,7 +465,6 @@ static void test_periodic_interrupt(gconstpointer test_data) int i; tim_reset(td); - clock_step_next(); tim_write_ticr(td, count); tim_write_tcsr(td, CEN | IE | MODE_PERIODIC | PRESCALE(ps)); diff --git a/tests/qtest/npcm7xx_watchdog_timer-test.c b/tests/qtest/npcm7xx_watchdog_timer-test.c index 981b853c99d..521ea789f18 100644 --- a/tests/qtest/npcm7xx_watchdog_timer-test.c +++ b/tests/qtest/npcm7xx_watchdog_timer-test.c @@ -18,7 +18,7 @@ #include "qemu/timer.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define WTCR_OFFSET 0x1c #define REF_HZ (25000000) diff --git a/tests/qtest/npcm_gmac-test.c b/tests/qtest/npcm_gmac-test.c index c28b471ab20..1317da2cd7c 100644 --- a/tests/qtest/npcm_gmac-test.c +++ b/tests/qtest/npcm_gmac-test.c @@ -36,7 +36,7 @@ typedef struct TestData { const GMACModule *module; } TestData; -/* Values extracted from hw/arm/npcm7xx.c */ +/* Values extracted from hw/arm/npcm8xx.c */ static const GMACModule gmac_module_list[] = { { .irq = 14, @@ -46,6 +46,14 @@ static const GMACModule gmac_module_list[] = { .irq = 15, .base_addr = 0xf0804000 }, + { + .irq = 16, + .base_addr = 0xf0806000 + }, + { + .irq = 17, + .base_addr = 0xf0808000 + } }; /* Returns the index of the GMAC module. */ @@ -174,18 +182,32 @@ static uint32_t gmac_read(QTestState *qts, const GMACModule *mod, return qtest_readl(qts, mod->base_addr + regno); } +static uint16_t pcs_read(QTestState *qts, const GMACModule *mod, + NPCMRegister regno) +{ + uint32_t write_value = (regno & 0x3ffe00) >> 9; + qtest_writel(qts, PCS_BASE_ADDRESS + NPCM_PCS_IND_AC_BA, write_value); + uint32_t read_offset = regno & 0x1ff; + return qtest_readl(qts, PCS_BASE_ADDRESS + read_offset); +} + /* Check that GMAC registers are reset to default value */ static void test_init(gconstpointer test_data) { const TestData *td = test_data; const GMACModule *mod = td->module; - QTestState *qts = qtest_init("-machine npcm750-evb"); + QTestState *qts = qtest_init("-machine npcm845-evb"); #define CHECK_REG32(regno, value) \ do { \ g_assert_cmphex(gmac_read(qts, mod, (regno)), ==, (value)); \ } while (0) +#define CHECK_REG_PCS(regno, value) \ + do { \ + g_assert_cmphex(pcs_read(qts, mod, (regno)), ==, (value)); \ + } while (0) + CHECK_REG32(NPCM_DMA_BUS_MODE, 0x00020100); CHECK_REG32(NPCM_DMA_XMT_POLL_DEMAND, 0); CHECK_REG32(NPCM_DMA_RCV_POLL_DEMAND, 0); @@ -235,6 +257,63 @@ static void test_init(gconstpointer test_data) CHECK_REG32(NPCM_GMAC_PTP_TAR, 0); CHECK_REG32(NPCM_GMAC_PTP_TTSR, 0); + if (mod->base_addr == 0xf0802000) { + CHECK_REG_PCS(NPCM_PCS_SR_CTL_ID1, 0x699e); + CHECK_REG_PCS(NPCM_PCS_SR_CTL_ID2, 0); + CHECK_REG_PCS(NPCM_PCS_SR_CTL_STS, 0x8000); + + CHECK_REG_PCS(NPCM_PCS_SR_MII_CTRL, 0x1140); + CHECK_REG_PCS(NPCM_PCS_SR_MII_STS, 0x0109); + CHECK_REG_PCS(NPCM_PCS_SR_MII_DEV_ID1, 0x699e); + CHECK_REG_PCS(NPCM_PCS_SR_MII_DEV_ID2, 0x0ced0); + CHECK_REG_PCS(NPCM_PCS_SR_MII_AN_ADV, 0x0020); + CHECK_REG_PCS(NPCM_PCS_SR_MII_LP_BABL, 0); + CHECK_REG_PCS(NPCM_PCS_SR_MII_AN_EXPN, 0); + CHECK_REG_PCS(NPCM_PCS_SR_MII_EXT_STS, 0xc000); + + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_ABL, 0x0003); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_LWR, 0x0038); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MAX_DLY_UPR, 0); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_LWR, 0x0038); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_TX_MIN_DLY_UPR, 0); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_LWR, 0x0058); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MAX_DLY_UPR, 0); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_LWR, 0x0048); + CHECK_REG_PCS(NPCM_PCS_SR_TIM_SYNC_RX_MIN_DLY_UPR, 0); + + CHECK_REG_PCS(NPCM_PCS_VR_MII_MMD_DIG_CTRL1, 0x2400); + CHECK_REG_PCS(NPCM_PCS_VR_MII_AN_CTRL, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_AN_INTR_STS, 0x000a); + CHECK_REG_PCS(NPCM_PCS_VR_MII_TC, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_DBG_CTRL, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_MCTRL0, 0x899c); + CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_TXTIMER, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_RXTIMER, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_LINK_TIMER_CTRL, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_EEE_MCTRL1, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_STS, 0x0010); + CHECK_REG_PCS(NPCM_PCS_VR_MII_ICG_ERRCNT1, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MISC_STS, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_RX_LSTS, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_BSTCTRL0, 0x00a); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_LVLCTRL0, 0x007f); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_GENCTRL0, 0x0001); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_GENCTRL1, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_TX_STS, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_GENCTRL0, 0x0100); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_GENCTRL1, 0x1100); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_RX_LOS_CTRL0, 0x000e); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_CTRL0, 0x0100); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_CTRL1, 0x0032); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MPLL_STS, 0x0001); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL2, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_LVL_CTRL, 0x0019); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL0, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_MP_MISC_CTRL1, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_CTRL2, 0); + CHECK_REG_PCS(NPCM_PCS_VR_MII_DIG_ERRCNT_SEL, 0); + } + qtest_quit(qts); } @@ -242,7 +321,7 @@ static void gmac_add_test(const char *name, const TestData* td, GTestDataFunc fn) { g_autofree char *full_name = g_strdup_printf( - "npcm7xx_gmac/gmac[%d]/%s", gmac_module_index(td->module), name); + "npcm8xx_gmac/gmac[%d]/%s", gmac_module_index(td->module), name); qtest_add_data_func(full_name, td, fn); } diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c index ede418963cb..d657f389474 100644 --- a/tests/qtest/numa-test.c +++ b/tests/qtest/numa-test.c @@ -11,8 +11,8 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" static char *make_cli(const GString *generic_cli, const char *test_cli) { @@ -162,7 +162,7 @@ static void pc_numa_cpu(const void *data) } else if (socket == 1 && core == 1 && thread == 1) { g_assert_cmpint(node, ==, 1); } else { - g_assert(false); + g_assert_not_reached(); } qobject_unref(e); } @@ -207,7 +207,7 @@ static void spapr_numa_cpu(const void *data) } else if (core == 3) { g_assert_cmpint(node, ==, 1); } else { - g_assert(false); + g_assert_not_reached(); } qobject_unref(e); } @@ -257,7 +257,7 @@ static void aarch64_numa_cpu(const void *data) } else if (socket == 1 && cluster == 0 && core == 0 && thread == 0) { g_assert_cmpint(node, ==, 0); } else { - g_assert(false); + g_assert_not_reached(); } qobject_unref(e); } @@ -305,7 +305,7 @@ static void loongarch64_numa_cpu(const void *data) } else if (socket == 1 && core == 0 && thread == 0) { g_assert_cmpint(node, ==, 0); } else { - g_assert(false); + g_assert_not_reached(); } qobject_unref(e); } @@ -367,7 +367,7 @@ static void pc_dynamic_cpu_cfg(const void *data) } else if (socket == 1) { g_assert_cmpint(node, ==, 0); } else { - g_assert(false); + g_assert_not_reached(); } qobject_unref(e); } diff --git a/tests/qtest/pnv-host-i2c-test.c b/tests/qtest/pnv-host-i2c-test.c index 7f64d597ac1..51e613ebdcb 100644 --- a/tests/qtest/pnv-host-i2c-test.c +++ b/tests/qtest/pnv-host-i2c-test.c @@ -191,12 +191,10 @@ static uint8_t pnv_i2c_pca9554_read_pins(PnvI2cDev *dev) { uint8_t send_buf[1]; uint8_t recv_buf[1]; - uint8_t inputs; send_buf[0] = PCA9554_INPUT; pnv_i2c_send(dev, send_buf, 1); pnv_i2c_recv(dev, recv_buf, 1); - inputs = recv_buf[0]; - return inputs; + return recv_buf[0]; } static void pnv_i2c_pca9554_flip_polarity(PnvI2cDev *dev) diff --git a/tests/qtest/pnv-spi-seeprom-test.c b/tests/qtest/pnv-spi-seeprom-test.c index 57f20af76e1..600493c425d 100644 --- a/tests/qtest/pnv-spi-seeprom-test.c +++ b/tests/qtest/pnv-spi-seeprom-test.c @@ -92,7 +92,7 @@ static void test_spi_seeprom(const void *data) qts = qtest_initf("-machine powernv10 -smp 2,cores=2," "threads=1 -accel tcg,thread=single -nographic " "-blockdev node-name=pib_spic2,driver=file," - "filename=%s -device 25csm04,bus=pnv-spi-bus.2,cs=0," + "filename=%s -device 25csm04,bus=chip0.spi.2,cs=0," "drive=pib_spic2", tmp_path); spi_seeprom_transaction(qts, chip); qtest_quit(qts); diff --git a/tests/qtest/pnv-xive2-common.c b/tests/qtest/pnv-xive2-common.c new file mode 100644 index 00000000000..bf2bce00567 --- /dev/null +++ b/tests/qtest/pnv-xive2-common.c @@ -0,0 +1,190 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Common functions for XIVE2 tests + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" + + +static uint64_t pnv_xscom_addr(uint32_t pcba) +{ + return P10_XSCOM_BASE | ((uint64_t) pcba << 3); +} + +static uint64_t pnv_xive_xscom_addr(uint32_t reg) +{ + return pnv_xscom_addr(XIVE_XSCOM + reg); +} + +uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg) +{ + return qtest_readq(qts, pnv_xive_xscom_addr(reg)); +} + +void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val) +{ + qtest_writeq(qts, pnv_xive_xscom_addr(reg), val); +} + +static void xive_get_struct(QTestState *qts, uint64_t src, void *dest, + size_t size) +{ + uint8_t *destination = (uint8_t *)dest; + size_t i; + + for (i = 0; i < size; i++) { + *(destination + i) = qtest_readb(qts, src + i); + } +} + +static void xive_copy_struct(QTestState *qts, void *src, uint64_t dest, + size_t size) +{ + uint8_t *source = (uint8_t *)src; + size_t i; + + for (i = 0; i < size; i++) { + qtest_writeb(qts, dest + i, *(source + i)); + } +} + +uint64_t xive_get_queue_addr(uint32_t end_index) +{ + return XIVE_QUEUE_MEM + (uint64_t)end_index * XIVE_QUEUE_SIZE; +} + +uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset) +{ + uint64_t addr; + + addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1)); + if (page == 1) { + addr += 1 << XIVE_PAGE_SHIFT; + } + return qtest_readb(qts, addr + offset); +} + +void set_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset, uint32_t val) +{ + uint64_t addr; + + addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1)); + if (page == 1) { + addr += 1 << XIVE_PAGE_SHIFT; + } + return qtest_writel(qts, addr + offset, cpu_to_be32(val)); +} + +void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp) +{ + uint64_t addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp); + xive_get_struct(qts, addr, nvp, sizeof(Xive2Nvp)); +} + +void set_nvp(QTestState *qts, uint32_t index, uint8_t first) +{ + uint64_t nvp_addr; + Xive2Nvp nvp; + uint64_t report_addr; + + nvp_addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp); + report_addr = (XIVE_REPORT_MEM + (uint64_t)index * XIVE_REPORT_SIZE) >> 8; + + memset(&nvp, 0, sizeof(nvp)); + nvp.w0 = xive_set_field32(NVP2_W0_VALID, 0, 1); + nvp.w0 = xive_set_field32(NVP2_W0_PGOFIRST, nvp.w0, first); + nvp.w6 = xive_set_field32(NVP2_W6_REPORTING_LINE, nvp.w6, + (report_addr >> 24) & 0xfffffff); + nvp.w7 = xive_set_field32(NVP2_W7_REPORTING_LINE, nvp.w7, + report_addr & 0xffffff); + xive_copy_struct(qts, &nvp, nvp_addr, sizeof(nvp)); +} + +static uint64_t get_cl_pair_addr(Xive2Nvp *nvp) +{ + uint64_t upper = xive_get_field32(0x0fffffff, nvp->w6); + uint64_t lower = xive_get_field32(0xffffff00, nvp->w7); + return (upper << 32) | (lower << 8); +} + +void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair) +{ + uint64_t addr = get_cl_pair_addr(nvp); + xive_get_struct(qts, addr, cl_pair, XIVE_REPORT_SIZE); +} + +void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair) +{ + uint64_t addr = get_cl_pair_addr(nvp); + xive_copy_struct(qts, cl_pair, addr, XIVE_REPORT_SIZE); +} + +void set_nvg(QTestState *qts, uint32_t index, uint8_t next) +{ + uint64_t nvg_addr; + Xive2Nvgc nvg; + + nvg_addr = XIVE_NVG_MEM + (uint64_t)index * sizeof(Xive2Nvgc); + + memset(&nvg, 0, sizeof(nvg)); + nvg.w0 = xive_set_field32(NVGC2_W0_VALID, 0, 1); + nvg.w0 = xive_set_field32(NVGC2_W0_PGONEXT, nvg.w0, next); + xive_copy_struct(qts, &nvg, nvg_addr, sizeof(nvg)); +} + +void set_eas(QTestState *qts, uint32_t index, uint32_t end_index, + uint32_t data) +{ + uint64_t eas_addr; + Xive2Eas eas; + + eas_addr = XIVE_EAS_MEM + (uint64_t)index * sizeof(Xive2Eas); + + memset(&eas, 0, sizeof(eas)); + eas.w = xive_set_field64(EAS2_VALID, 0, 1); + eas.w = xive_set_field64(EAS2_END_INDEX, eas.w, end_index); + eas.w = xive_set_field64(EAS2_END_DATA, eas.w, data); + xive_copy_struct(qts, &eas, eas_addr, sizeof(eas)); +} + +void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, + uint8_t priority, bool i) +{ + uint64_t end_addr, queue_addr, queue_hi, queue_lo; + uint8_t queue_size; + Xive2End end; + + end_addr = XIVE_END_MEM + (uint64_t)index * sizeof(Xive2End); + queue_addr = xive_get_queue_addr(index); + queue_hi = (queue_addr >> 32) & END2_W2_EQ_ADDR_HI; + queue_lo = queue_addr & END2_W3_EQ_ADDR_LO; + queue_size = ctz16(XIVE_QUEUE_SIZE) - 12; + + memset(&end, 0, sizeof(end)); + end.w0 = xive_set_field32(END2_W0_VALID, 0, 1); + end.w0 = xive_set_field32(END2_W0_ENQUEUE, end.w0, 1); + end.w0 = xive_set_field32(END2_W0_UCOND_NOTIFY, end.w0, 1); + end.w0 = xive_set_field32(END2_W0_BACKLOG, end.w0, 1); + + end.w1 = xive_set_field32(END2_W1_GENERATION, 0, 1); + + end.w2 = cpu_to_be32(queue_hi); + + end.w3 = cpu_to_be32(queue_lo); + end.w3 = xive_set_field32(END2_W3_QSIZE, end.w3, queue_size); + + end.w6 = xive_set_field32(END2_W6_IGNORE, 0, i); + end.w6 = xive_set_field32(END2_W6_VP_OFFSET, end.w6, nvp_index); + + end.w7 = xive_set_field32(END2_W7_F0_PRIORITY, 0, priority); + xive_copy_struct(qts, &end, end_addr, sizeof(end)); +} + diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h new file mode 100644 index 00000000000..2077c05ebc7 --- /dev/null +++ b/tests/qtest/pnv-xive2-common.h @@ -0,0 +1,112 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TEST_PNV_XIVE2_COMMON_H +#define TEST_PNV_XIVE2_COMMON_H + +#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32(bit) (0x80000000 >> (bit)) +#define PPC_BIT8(bit) (0x80 >> (bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ + PPC_BIT32(bs)) +#include "qemu/bswap.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +/* + * sizing: + * 128 interrupts + * => ESB BAR range: 16M + * 256 ENDs + * => END BAR range: 16M + * 256 VPs + * => NVPG,NVC BAR range: 32M + */ +#define MAX_IRQS 128 +#define MAX_ENDS 256 +#define MAX_VPS 256 + +#define XIVE_PAGE_SHIFT 16 + +#define XIVE_TRIGGER_PAGE 0 +#define XIVE_EOI_PAGE 1 + +#define XIVE_IC_ADDR 0x0006030200000000ull +#define XIVE_IC_TM_INDIRECT (XIVE_IC_ADDR + (256 << XIVE_PAGE_SHIFT)) +#define XIVE_IC_BAR ((0x3ull << 62) | XIVE_IC_ADDR) +#define XIVE_TM_BAR 0xc006030203180000ull +#define XIVE_ESB_ADDR 0x0006050000000000ull +#define XIVE_ESB_BAR ((0x3ull << 62) | XIVE_ESB_ADDR) +#define XIVE_END_BAR 0xc006060000000000ull +#define XIVE_NVPG_ADDR 0x0006040000000000ull +#define XIVE_NVPG_BAR ((0x3ull << 62) | XIVE_NVPG_ADDR) +#define XIVE_NVC_ADDR 0x0006030208000000ull +#define XIVE_NVC_BAR ((0x3ull << 62) | XIVE_NVC_ADDR) + +/* + * Memory layout + * A check is done when a table is configured to ensure that the max + * size of the resource fits in the table. + */ +#define XIVE_VST_SIZE 0x10000ull /* must be at least 4k */ + +#define XIVE_MEM_START 0x10000000ull +#define XIVE_ESB_MEM XIVE_MEM_START +#define XIVE_EAS_MEM (XIVE_ESB_MEM + XIVE_VST_SIZE) +#define XIVE_END_MEM (XIVE_EAS_MEM + XIVE_VST_SIZE) +#define XIVE_NVP_MEM (XIVE_END_MEM + XIVE_VST_SIZE) +#define XIVE_NVG_MEM (XIVE_NVP_MEM + XIVE_VST_SIZE) +#define XIVE_NVC_MEM (XIVE_NVG_MEM + XIVE_VST_SIZE) +#define XIVE_SYNC_MEM (XIVE_NVC_MEM + XIVE_VST_SIZE) +#define XIVE_QUEUE_MEM (XIVE_SYNC_MEM + XIVE_VST_SIZE) +#define XIVE_QUEUE_SIZE 4096 /* per End */ +#define XIVE_REPORT_MEM (XIVE_QUEUE_MEM + XIVE_QUEUE_SIZE * MAX_VPS) +#define XIVE_REPORT_SIZE 256 /* two cache lines per NVP */ +#define XIVE_MEM_END (XIVE_REPORT_MEM + XIVE_REPORT_SIZE * MAX_VPS) + +#define P10_XSCOM_BASE 0x000603fc00000000ull +#define XIVE_XSCOM 0x2010800ull + +#define XIVE_ESB_RESET 0b00 +#define XIVE_ESB_OFF 0b01 +#define XIVE_ESB_PENDING 0b10 +#define XIVE_ESB_QUEUED 0b11 + +#define XIVE_ESB_GET 0x800 +#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ +#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ +#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ +#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ + +#define XIVE_ESB_STORE_EOI 0x400 /* Store */ + + +extern uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg); +extern void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val); +extern uint64_t xive_get_queue_addr(uint32_t end_index); +extern uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset); +extern void set_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset, uint32_t val); +extern void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp); +extern void set_nvp(QTestState *qts, uint32_t index, uint8_t first); +extern void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair); +extern void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair); +extern void set_nvg(QTestState *qts, uint32_t index, uint8_t next); +extern void set_eas(QTestState *qts, uint32_t index, uint32_t end_index, + uint32_t data); +extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, + uint8_t priority, bool i); + + +void test_flush_sync_inject(QTestState *qts); +void test_nvpg_bar(QTestState *qts); + +#endif /* TEST_PNV_XIVE2_COMMON_H */ diff --git a/tests/qtest/pnv-xive2-flush-sync.c b/tests/qtest/pnv-xive2-flush-sync.c new file mode 100644 index 00000000000..142826bad0f --- /dev/null +++ b/tests/qtest/pnv-xive2-flush-sync.c @@ -0,0 +1,205 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Test cache flush/queue sync injection + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +#define PNV_XIVE2_QUEUE_IPI 0x00 +#define PNV_XIVE2_QUEUE_HW 0x01 +#define PNV_XIVE2_QUEUE_NXC 0x02 +#define PNV_XIVE2_QUEUE_INT 0x03 +#define PNV_XIVE2_QUEUE_OS 0x04 +#define PNV_XIVE2_QUEUE_POOL 0x05 +#define PNV_XIVE2_QUEUE_HARD 0x06 +#define PNV_XIVE2_CACHE_ENDC 0x08 +#define PNV_XIVE2_CACHE_ESBC 0x09 +#define PNV_XIVE2_CACHE_EASC 0x0a +#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10 +#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11 +#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12 +#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13 +#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14 +#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15 +#define PNV_XIVE2_CACHE_NXC 0x18 + +#define PNV_XIVE2_SYNC_IPI 0x000 +#define PNV_XIVE2_SYNC_HW 0x080 +#define PNV_XIVE2_SYNC_NxC 0x100 +#define PNV_XIVE2_SYNC_INT 0x180 +#define PNV_XIVE2_SYNC_OS_ESC 0x200 +#define PNV_XIVE2_SYNC_POOL_ESC 0x280 +#define PNV_XIVE2_SYNC_HARD_ESC 0x300 +#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800 +#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880 +#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900 +#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980 +#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00 +#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80 + + +static uint64_t get_sync_addr(uint32_t src_pir, int ic_topo_id, int type) +{ + int thread_nr = src_pir & 0x7f; + uint64_t addr = XIVE_SYNC_MEM + thread_nr * 512 + ic_topo_id * 32 + type; + return addr; +} + +static uint8_t get_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id, + int type) +{ + uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type); + return qtest_readb(qts, addr); +} + +static void clr_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id, + int type) +{ + uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type); + qtest_writeb(qts, addr, 0x0); +} + +static void inject_cache_flush(QTestState *qts, int ic_topo_id, + uint64_t scom_addr) +{ + (void)ic_topo_id; + pnv_xive_xscom_write(qts, scom_addr, 0); +} + +static void inject_queue_sync(QTestState *qts, int ic_topo_id, uint64_t offset) +{ + (void)ic_topo_id; + uint64_t addr = XIVE_IC_ADDR + (VST_SYNC << XIVE_PAGE_SHIFT) + offset; + qtest_writeq(qts, addr, 0); +} + +static void inject_op(QTestState *qts, int ic_topo_id, int type) +{ + switch (type) { + case PNV_XIVE2_QUEUE_IPI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_IPI); + break; + case PNV_XIVE2_QUEUE_HW: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HW); + break; + case PNV_XIVE2_QUEUE_NXC: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NxC); + break; + case PNV_XIVE2_QUEUE_INT: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_INT); + break; + case PNV_XIVE2_QUEUE_OS: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_OS_ESC); + break; + case PNV_XIVE2_QUEUE_POOL: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_POOL_ESC); + break; + case PNV_XIVE2_QUEUE_HARD: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HARD_ESC); + break; + case PNV_XIVE2_CACHE_ENDC: + inject_cache_flush(qts, ic_topo_id, X_VC_ENDC_FLUSH_INJECT); + break; + case PNV_XIVE2_CACHE_ESBC: + inject_cache_flush(qts, ic_topo_id, X_VC_ESBC_FLUSH_INJECT); + break; + case PNV_XIVE2_CACHE_EASC: + inject_cache_flush(qts, ic_topo_id, X_VC_EASC_FLUSH_INJECT); + break; + case PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_NCO); + break; + case PNV_XIVE2_QUEUE_NXC_LD_LCL_CO: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_CO); + break; + case PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_NCI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_LCL_CI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_CI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_NCI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_RMT_CI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_CI); + break; + case PNV_XIVE2_CACHE_NXC: + inject_cache_flush(qts, ic_topo_id, X_PC_NXC_FLUSH_INJECT); + break; + default: + g_assert_not_reached(); + break; + } +} + +const uint8_t xive_inject_tests[] = { + PNV_XIVE2_QUEUE_IPI, + PNV_XIVE2_QUEUE_HW, + PNV_XIVE2_QUEUE_NXC, + PNV_XIVE2_QUEUE_INT, + PNV_XIVE2_QUEUE_OS, + PNV_XIVE2_QUEUE_POOL, + PNV_XIVE2_QUEUE_HARD, + PNV_XIVE2_CACHE_ENDC, + PNV_XIVE2_CACHE_ESBC, + PNV_XIVE2_CACHE_EASC, + PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO, + PNV_XIVE2_QUEUE_NXC_LD_LCL_CO, + PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI, + PNV_XIVE2_QUEUE_NXC_ST_LCL_CI, + PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI, + PNV_XIVE2_QUEUE_NXC_ST_RMT_CI, + PNV_XIVE2_CACHE_NXC, +}; + +void test_flush_sync_inject(QTestState *qts) +{ + int ic_topo_id = 0; + + /* + * Writes performed by qtest are not done in the context of a thread. + * This means that QEMU XIVE code doesn't have a way to determine what + * thread is originating the write. In order to allow for some testing, + * QEMU XIVE code will assume a PIR of 0 when unable to determine the + * source thread for cache flush and queue sync inject operations. + * See hw/intc/pnv_xive2.c: pnv_xive2_inject_notify() for details. + */ + int src_pir = 0; + int test_nr; + uint8_t byte; + + g_test_message("========================================================="); + g_test_message("Starting cache flush/queue sync injection tests..."); + + for (test_nr = 0; test_nr < sizeof(xive_inject_tests); + test_nr++) { + int op_type = xive_inject_tests[test_nr]; + + g_test_message("Running test %d", test_nr); + + /* start with status byte set to 0 */ + clr_sync(qts, src_pir, ic_topo_id, op_type); + byte = get_sync(qts, src_pir, ic_topo_id, op_type); + g_assert_cmphex(byte, ==, 0); + + /* request cache flush or queue sync operation */ + inject_op(qts, ic_topo_id, op_type); + + /* verify that status byte was written to 0xff */ + byte = get_sync(qts, src_pir, ic_topo_id, op_type); + g_assert_cmphex(byte, ==, 0xff); + + clr_sync(qts, src_pir, ic_topo_id, op_type); + } +} + diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c new file mode 100644 index 00000000000..6ac8d36c821 --- /dev/null +++ b/tests/qtest/pnv-xive2-nvpg_bar.c @@ -0,0 +1,152 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Test NVPG BAR MMIO operations + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" + +#define NVPG_BACKLOG_OP_SHIFT 10 +#define NVPG_BACKLOG_PRIO_SHIFT 4 + +#define XIVE_PRIORITY_MAX 7 + +enum NVx { + NVP, + NVG, + NVC +}; + +typedef enum { + INCR_STORE = 0b100, + INCR_LOAD = 0b000, + DECR_STORE = 0b101, + DECR_LOAD = 0b001, + READ_x = 0b010, + READ_y = 0b011, +} backlog_op; + +static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op, + enum NVx type, uint64_t index, + uint8_t priority, uint8_t delta) +{ + uint64_t addr, offset; + uint32_t count = 0; + + switch (type) { + case NVP: + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)); + break; + case NVG: + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) + + (1 << XIVE_PAGE_SHIFT); + break; + case NVC: + addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT); + break; + default: + g_assert_not_reached(); + } + + offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT; + offset |= priority << NVPG_BACKLOG_PRIO_SHIFT; + if (op >> 2) { + qtest_writeb(qts, addr + offset, delta); + } else { + count = qtest_readw(qts, addr + offset); + } + return count; +} + +void test_nvpg_bar(QTestState *qts) +{ + uint32_t nvp_target = 0x11; + uint32_t group_target = 0x17; /* size 16 */ + uint32_t vp_irq = 33, group_irq = 47; + uint32_t vp_end = 3, group_end = 97; + uint32_t vp_irq_data = 0x33333333; + uint32_t group_irq_data = 0x66666666; + uint8_t vp_priority = 0, group_priority = 5; + uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 }; + uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 }; + uint32_t count, delta; + uint8_t i; + + g_test_message("========================================================="); + g_test_message("Testing NVPG BAR operations"); + + set_nvg(qts, group_target, 0); + set_nvp(qts, nvp_target, 0x04); + set_nvp(qts, group_target, 0x04); + + /* + * Setup: trigger a VP-specific interrupt and a group interrupt + * so that the backlog counters are initialized to something else + * than 0 for at least one priority level + */ + set_eas(qts, vp_irq, vp_end, vp_irq_data); + set_end(qts, vp_end, nvp_target, vp_priority, false /* group */); + + set_eas(qts, group_irq, group_end, group_irq_data); + set_end(qts, group_end, group_target, group_priority, true /* group */); + + get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0); + vp_count[vp_priority]++; + + get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0); + group_count[group_priority]++; + + /* check the initial counters */ + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0); + g_assert_cmpuint(count, ==, vp_count[i]); + + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0); + g_assert_cmpuint(count, ==, group_count[i]); + } + + /* do a few ops on the VP. Counter can only be 0 and 1 */ + vp_priority = 2; + delta = 7; + nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta); + vp_count[vp_priority] = 1; + count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0); + g_assert_cmpuint(count, ==, vp_count[vp_priority]); + count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0); + g_assert_cmpuint(count, ==, vp_count[vp_priority]); + + count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0); + g_assert_cmpuint(count, ==, vp_count[vp_priority]); + vp_count[vp_priority] = 0; + nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta); + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0); + g_assert_cmpuint(count, ==, vp_count[vp_priority]); + + /* do a few ops on the group */ + group_priority = 2; + delta = 9; + /* can't go negative */ + nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta); + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0); + g_assert_cmpuint(count, ==, 0); + nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta); + group_count[group_priority] += delta; + count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target, + group_priority, delta); + g_assert_cmpuint(count, ==, group_count[group_priority]); + group_count[group_priority]++; + + count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target, + group_priority, delta); + g_assert_cmpuint(count, ==, group_count[group_priority]); + group_count[group_priority]--; + count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0); + g_assert_cmpuint(count, ==, group_count[group_priority]); +} diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c new file mode 100644 index 00000000000..5313d4ef18b --- /dev/null +++ b/tests/qtest/pnv-xive2-test.c @@ -0,0 +1,585 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Test irq to hardware thread + * - Test 'Pull Thread Context to Odd Thread Reporting Line' + * - Test irq to hardware group + * - Test irq to hardware group going through backlog + * - Test irq to pool thread + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +#define SMT 4 /* some tests will break if less than 4 */ + + +static void set_table(QTestState *qts, uint64_t type, uint64_t addr) +{ + uint64_t vsd, size, log_size; + + /* + * First, let's make sure that all the resources used fit in the + * given table. + */ + switch (type) { + case VST_ESB: + size = MAX_IRQS / 4; + break; + case VST_EAS: + size = MAX_IRQS * 8; + break; + case VST_END: + size = MAX_ENDS * 32; + break; + case VST_NVP: + case VST_NVG: + case VST_NVC: + size = MAX_VPS * 32; + break; + case VST_SYNC: + size = 64 * 1024; + break; + default: + g_assert_not_reached(); + } + + g_assert_cmpuint(size, <=, XIVE_VST_SIZE); + log_size = ctzl(XIVE_VST_SIZE) - 12; + + vsd = ((uint64_t) VSD_MODE_EXCLUSIVE) << 62 | addr | log_size; + pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_ADDR, type << 48); + pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_DATA, vsd); + + if (type != VST_EAS && type != VST_IC && type != VST_ERQ) { + pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_ADDR, type << 48); + pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_DATA, vsd); + } +} + +static void set_tima8(QTestState *qts, uint32_t pir, uint32_t offset, + uint8_t b) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + qtest_writeb(qts, ic_addr + offset, b); +} + +static void set_tima32(QTestState *qts, uint32_t pir, uint32_t offset, + uint32_t l) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + qtest_writel(qts, ic_addr + offset, l); +} + +static uint8_t get_tima8(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readb(qts, ic_addr + offset); +} + +static uint16_t get_tima16(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readw(qts, ic_addr + offset); +} + +static uint32_t get_tima32(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readl(qts, ic_addr + offset); +} + +static void reset_pool_threads(QTestState *qts) +{ + uint8_t first_group = 0; + int i; + + for (i = 0; i < SMT; i++) { + uint32_t nvp_idx = 0x100 + i; + set_nvp(qts, nvp_idx, first_group); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD0, 0x000000ff); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD1, 0); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | nvp_idx); + } +} + +static void reset_hw_threads(QTestState *qts) +{ + uint8_t first_group = 0; + uint32_t w1 = 0x000000ff; + int i; + + if (SMT >= 4) { + /* define 2 groups of 2, part of a bigger group of size 4 */ + set_nvg(qts, 0x80, 0x02); + set_nvg(qts, 0x82, 0x02); + set_nvg(qts, 0x81, 0); + first_group = 0x01; + w1 = 0x000300ff; + } + + for (i = 0; i < SMT; i++) { + set_nvp(qts, 0x80 + i, first_group); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0, 0x00ff00ff); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD1, w1); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD2, 0x80000000); + } +} + +static void reset_state(QTestState *qts) +{ + size_t mem_used = XIVE_MEM_END - XIVE_MEM_START; + + qtest_memset(qts, XIVE_MEM_START, 0, mem_used); + reset_hw_threads(qts); + reset_pool_threads(qts); +} + +static void init_xive(QTestState *qts) +{ + uint64_t val1, val2, range; + + /* + * We can take a few shortcuts here, as we know the default values + * used for xive initialization + */ + + /* + * Set the BARs. + * We reuse the same values used by firmware to ease debug. + */ + pnv_xive_xscom_write(qts, X_CQ_IC_BAR, XIVE_IC_BAR); + pnv_xive_xscom_write(qts, X_CQ_TM_BAR, XIVE_TM_BAR); + + /* ESB and NVPG use 2 pages per resource. The others only one page */ + range = (MAX_IRQS << 17) >> 25; + val1 = XIVE_ESB_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_ESB_BAR, val1); + + range = (MAX_ENDS << 16) >> 25; + val1 = XIVE_END_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_END_BAR, val1); + + range = (MAX_VPS << 17) >> 25; + val1 = XIVE_NVPG_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_NVPG_BAR, val1); + + range = (MAX_VPS << 16) >> 25; + val1 = XIVE_NVC_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_NVC_BAR, val1); + + /* + * Enable hw threads. + * We check the value written. Useless with current + * implementation, but it validates the xscom read path and it's + * what the hardware procedure says + */ + val1 = 0xF000000000000000ull; /* core 0, 4 threads */ + pnv_xive_xscom_write(qts, X_TCTXT_EN0, val1); + val2 = pnv_xive_xscom_read(qts, X_TCTXT_EN0); + g_assert_cmphex(val1, ==, val2); + + /* Memory tables */ + set_table(qts, VST_ESB, XIVE_ESB_MEM); + set_table(qts, VST_EAS, XIVE_EAS_MEM); + set_table(qts, VST_END, XIVE_END_MEM); + set_table(qts, VST_NVP, XIVE_NVP_MEM); + set_table(qts, VST_NVG, XIVE_NVG_MEM); + set_table(qts, VST_NVC, XIVE_NVC_MEM); + set_table(qts, VST_SYNC, XIVE_SYNC_MEM); + + reset_hw_threads(qts); + reset_pool_threads(qts); +} + +static void test_hw_irq(QTestState *qts) +{ + uint32_t irq = 2; + uint32_t irq_data = 0x600df00d; + uint32_t end_index = 5; + uint32_t target_pir = 1; + uint32_t target_nvp = 0x80 + target_pir; + uint8_t priority = 5; + uint32_t reg32; + uint16_t reg16; + uint8_t pq, nsr, cppr; + + g_test_message("========================================================="); + g_test_message("Testing irq %d to hardware thread %d", irq, target_pir); + + /* irq config */ + set_eas(qts, irq, end_index, irq_data); + set_end(qts, end_index, target_nvp, priority, false /* group */); + + /* enable and trigger irq */ + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); + + /* check irq is raised on cpu */ + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); + + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x80); + g_assert_cmphex(cppr, ==, 0xFF); + + /* ack the irq */ + reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG); + nsr = reg16 >> 8; + cppr = reg16 & 0xFF; + g_assert_cmphex(nsr, ==, 0x80); + g_assert_cmphex(cppr, ==, priority); + + /* check irq data is what was configured */ + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); + + /* End Of Interrupt */ + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); + + /* reset CPPR */ + set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x00); + g_assert_cmphex(cppr, ==, 0xFF); +} + +static void test_pool_irq(QTestState *qts) +{ + uint32_t irq = 2; + uint32_t irq_data = 0x600d0d06; + uint32_t end_index = 5; + uint32_t target_pir = 1; + uint32_t target_nvp = 0x100 + target_pir; + uint8_t priority = 5; + uint32_t reg32; + uint16_t reg16; + uint8_t pq, nsr, cppr, ipb; + + g_test_message("========================================================="); + g_test_message("Testing irq %d to pool thread %d", irq, target_pir); + + /* irq config */ + set_eas(qts, irq, end_index, irq_data); + set_end(qts, end_index, target_nvp, priority, false /* group */); + + /* enable and trigger irq */ + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); + + /* check irq is raised on cpu */ + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); + + /* check TIMA values in the PHYS ring (shared by POOL ring) */ + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x40); + g_assert_cmphex(cppr, ==, 0xFF); + + /* check TIMA values in the POOL ring */ + reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + ipb = (reg32 >> 8) & 0xFF; + g_assert_cmphex(nsr, ==, 0); + g_assert_cmphex(cppr, ==, 0); + g_assert_cmphex(ipb, ==, 0x80 >> priority); + + /* ack the irq */ + reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG); + nsr = reg16 >> 8; + cppr = reg16 & 0xFF; + g_assert_cmphex(nsr, ==, 0x40); + g_assert_cmphex(cppr, ==, priority); + + /* check irq data is what was configured */ + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); + + /* check IPB is cleared in the POOL ring */ + reg32 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD0); + ipb = (reg32 >> 8) & 0xFF; + g_assert_cmphex(ipb, ==, 0); + + /* End Of Interrupt */ + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); + + /* reset CPPR */ + set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x00); + g_assert_cmphex(cppr, ==, 0xFF); +} + +#define XIVE_ODD_CL 0x80 +static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) +{ + uint32_t target_pir = 1; + uint32_t target_nvp = 0x80 + target_pir; + Xive2Nvp nvp; + uint8_t cl_pair[XIVE_REPORT_SIZE]; + uint32_t qw1w0, qw3w0, qw1w2, qw2w2; + uint8_t qw3b8; + uint32_t cl_word; + uint32_t word2; + + g_test_message("========================================================="); + g_test_message("Testing 'Pull Thread Context to Odd Thread Reporting " \ + "Line'"); + + /* clear odd cache line prior to pull operation */ + memset(cl_pair, 0, sizeof(cl_pair)); + get_nvp(qts, target_nvp, &nvp); + set_cl_pair(qts, &nvp, cl_pair); + + /* Read some values from TIMA that we expect to see in cacheline */ + qw1w0 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD0); + qw3w0 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + qw1w2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2); + qw2w2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2); + qw3b8 = get_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); + + /* Execute the pull operation */ + set_tima8(qts, target_pir, TM_SPC_PULL_PHYS_CTX_OL, 0); + + /* Verify odd cache line values match TIMA after pull operation */ + get_cl_pair(qts, &nvp, cl_pair); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD0], 4); + g_assert_cmphex(qw1w0, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD0], 4); + g_assert_cmphex(qw3w0, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD2], 4); + g_assert_cmphex(qw1w2, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW2_HV_POOL + TM_WORD2], 4); + g_assert_cmphex(qw2w2, ==, be32_to_cpu(cl_word)); + g_assert_cmphex(qw3b8, ==, + cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD2]); + + /* Verify that all TIMA valid bits for target thread are cleared */ + word2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW1W2_VO, word2), ==, 0); + word2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW2W2_VP, word2), ==, 0); + word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0); +} + +static void test_hw_group_irq(QTestState *qts) +{ + uint32_t irq = 100; + uint32_t irq_data = 0xdeadbeef; + uint32_t end_index = 23; + uint32_t chosen_one; + uint32_t target_nvp = 0x81; /* group size = 4 */ + uint8_t priority = 6; + uint32_t reg32; + uint16_t reg16; + uint8_t pq, nsr, cppr; + + g_test_message("========================================================="); + g_test_message("Testing irq %d to hardware group of size 4", irq); + + /* irq config */ + set_eas(qts, irq, end_index, irq_data); + set_end(qts, end_index, target_nvp, priority, true /* group */); + + /* enable and trigger irq */ + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); + + /* check irq is raised on cpu */ + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); + + /* find the targeted vCPU */ + for (chosen_one = 0; chosen_one < SMT; chosen_one++) { + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + if (nsr == 0x82) { + break; + } + } + g_assert_cmphex(chosen_one, <, SMT); + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x82); + g_assert_cmphex(cppr, ==, 0xFF); + + /* ack the irq */ + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); + nsr = reg16 >> 8; + cppr = reg16 & 0xFF; + g_assert_cmphex(nsr, ==, 0x82); + g_assert_cmphex(cppr, ==, priority); + + /* check irq data is what was configured */ + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); + + /* End Of Interrupt */ + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); + + /* reset CPPR */ + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x00); + g_assert_cmphex(cppr, ==, 0xFF); +} + +static void test_hw_group_irq_backlog(QTestState *qts) +{ + uint32_t irq = 31; + uint32_t irq_data = 0x01234567; + uint32_t end_index = 129; + uint32_t target_nvp = 0x81; /* group size = 4 */ + uint32_t chosen_one = 3; + uint8_t blocking_priority, priority = 3; + uint32_t reg32; + uint16_t reg16; + uint8_t pq, nsr, cppr, lsmfb, i; + + g_test_message("========================================================="); + g_test_message("Testing irq %d to hardware group of size 4 going " \ + "through backlog", + irq); + + /* + * set current priority of all threads in the group to something + * higher than what we're about to trigger + */ + blocking_priority = priority - 1; + for (i = 0; i < SMT; i++) { + set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority); + } + + /* irq config */ + set_eas(qts, irq, end_index, irq_data); + set_end(qts, end_index, target_nvp, priority, true /* group */); + + /* enable and trigger irq */ + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); + + /* check irq is raised on cpu */ + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); + + /* check no interrupt is pending on the 2 possible targets */ + for (i = 0; i < SMT; i++) { + reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + lsmfb = reg32 & 0xFF; + g_assert_cmphex(nsr, ==, 0x0); + g_assert_cmphex(cppr, ==, blocking_priority); + g_assert_cmphex(lsmfb, ==, priority); + } + + /* lower priority of one thread */ + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1); + + /* check backlogged interrupt is presented */ + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x82); + g_assert_cmphex(cppr, ==, priority + 1); + + /* ack the irq */ + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); + nsr = reg16 >> 8; + cppr = reg16 & 0xFF; + g_assert_cmphex(nsr, ==, 0x82); + g_assert_cmphex(cppr, ==, priority); + + /* check irq data is what was configured */ + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); + + /* End Of Interrupt */ + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); + + /* reset CPPR */ + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + lsmfb = reg32 & 0xFF; + g_assert_cmphex(nsr, ==, 0x00); + g_assert_cmphex(cppr, ==, 0xFF); + g_assert_cmphex(lsmfb, ==, 0xFF); +} + +static void test_xive(void) +{ + QTestState *qts; + + qts = qtest_initf("-M powernv10 -smp %d,cores=1,threads=%d -nographic " + "-nodefaults -serial mon:stdio -S " + "-d guest_errors -trace '*xive*'", + SMT, SMT); + init_xive(qts); + + test_hw_irq(qts); + + /* omit reset_state here and use settings from test_hw_irq */ + test_pull_thread_ctx_to_odd_thread_cl(qts); + + reset_state(qts); + test_pool_irq(qts); + + reset_state(qts); + test_hw_group_irq(qts); + + reset_state(qts); + test_hw_group_irq_backlog(qts); + + reset_state(qts); + test_flush_sync_inject(qts); + + reset_state(qts); + test_nvpg_bar(qts); + + qtest_quit(qts); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + qtest_add_func("xive2", test_xive); + return g_test_run(); +} diff --git a/tests/qtest/pvpanic-pci-test.c b/tests/qtest/pvpanic-pci-test.c index dc021c2fdf7..f788a44dbed 100644 --- a/tests/qtest/pvpanic-pci-test.c +++ b/tests/qtest/pvpanic-pci-test.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" #include "hw/misc/pvpanic.h" diff --git a/tests/qtest/pvpanic-test.c b/tests/qtest/pvpanic-test.c index d49d2ba9313..5606baf47be 100644 --- a/tests/qtest/pvpanic-test.c +++ b/tests/qtest/pvpanic-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/misc/pvpanic.h" static void test_panic_nopause(void) diff --git a/tests/qtest/q35-test.c b/tests/qtest/q35-test.c index c922d81bc02..62fff49fc89 100644 --- a/tests/qtest/q35-test.c +++ b/tests/qtest/q35-test.c @@ -14,7 +14,7 @@ #include "libqos/pci.h" #include "libqos/pci-pc.h" #include "hw/pci-host/q35.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define TSEG_SIZE_TEST_GUEST_RAM_MBYTES 128 @@ -83,7 +83,6 @@ static void test_smram_lock(void) { QPCIBus *pcibus; QPCIDevice *pcidev; - QDict *response; QTestState *qts; qts = qtest_init("-M q35"); @@ -107,10 +106,7 @@ static void test_smram_lock(void) g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false); /* reset */ - response = qtest_qmp(qts, "{'execute': 'system_reset', 'arguments': {} }"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); + qtest_system_reset(qts); /* check open is settable again */ smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false); @@ -194,7 +190,6 @@ static void test_smram_smbase_lock(void) { QPCIBus *pcibus; QPCIDevice *pcidev; - QDict *response; QTestState *qts; int i; @@ -237,10 +232,7 @@ static void test_smram_smbase_lock(void) } /* reset */ - response = qtest_qmp(qts, "{'execute': 'system_reset', 'arguments': {} }"); - g_assert(response); - g_assert(!qdict_haskey(response, "error")); - qobject_unref(response); + qtest_system_reset(qts); /* check RAM at SMBASE is available after reset */ g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN); @@ -254,41 +246,6 @@ static void test_smram_smbase_lock(void) qtest_quit(qts); } -static void test_without_smram_base(void) -{ - QPCIBus *pcibus; - QPCIDevice *pcidev; - QTestState *qts; - int i; - - qts = qtest_init("-M pc-q35-4.1"); - - pcibus = qpci_new_pc(qts, NULL); - g_assert(pcibus != NULL); - - pcidev = qpci_device_find(pcibus, 0); - g_assert(pcidev != NULL); - - /* check that RAM is accessible */ - qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN); - g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN); - - /* check that writing to 0x9c succeeds */ - for (i = 0; i <= 0xff; i++) { - qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i); - g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == i); - } - - /* check that RAM is still accessible */ - qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN + 1); - g_assert_cmpint(qtest_readb(qts, SMBASE), ==, (SMRAM_TEST_PATTERN + 1)); - - g_free(pcidev); - qpci_free_pc(pcibus); - - qtest_quit(qts); -} - int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); @@ -301,6 +258,6 @@ int main(int argc, char **argv) qtest_add_data_func("/q35/tseg-size/ext/16mb", &tseg_ext_16mb, test_tseg_size); qtest_add_func("/q35/smram/smbase_lock", test_smram_smbase_lock); - qtest_add_func("/q35/smram/legacy_smbase", test_without_smram_base); + return g_test_run(); } diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 2c15f609584..cf718761861 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -14,7 +14,7 @@ #include "libqtest.h" #include "qapi/error.h" #include "qapi/qapi-visit-introspect.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" const char common_args[] = "-nodefaults -machine none"; @@ -51,7 +51,6 @@ static int query_error_class(const char *cmd) { "x-query-usb", ERROR_CLASS_GENERIC_ERROR }, /* Only valid with accel=tcg */ { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, - { "x-query-opcount", ERROR_CLASS_GENERIC_ERROR }, { "xen-event-list", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; @@ -100,6 +99,7 @@ static bool query_is_ignored(const char *cmd) /* Success depends on target arch: */ "query-cpu-definitions", /* arm, i386, ppc, s390x */ "query-gic-capabilities", /* arm */ + "query-s390x-cpu-polarization", /* s390x */ /* Success depends on target-specific build configuration: */ "query-pci", /* CONFIG_PCI */ "x-query-virtio", /* CONFIG_VIRTIO */ diff --git a/tests/qtest/qmp-test.c b/tests/qtest/qmp-test.c index 22957fa49c2..edf08867874 100644 --- a/tests/qtest/qmp-test.c +++ b/tests/qtest/qmp-test.c @@ -14,10 +14,10 @@ #include "libqtest.h" #include "qapi/error.h" #include "qapi/qapi-visit-control.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" #include "qapi/qobject-input-visitor.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" const char common_args[] = "-nodefaults -machine none"; diff --git a/tests/qtest/qom-test.c b/tests/qtest/qom-test.c index d677f87c8e2..4ade1c728c0 100644 --- a/tests/qtest/qom-test.c +++ b/tests/qtest/qom-test.c @@ -9,13 +9,121 @@ #include "qemu/osdep.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" #include "libqtest.h" +#define RAM_NAME "node0" +#define RAM_SIZE 65536 + static int verbosity_level; +/* + * Verify that the /object/RAM_NAME 'size' property is RAM_SIZE. + */ +static void test_list_get_value(QTestState *qts) +{ + QDict *args = qdict_new(); + g_autoptr(QDict) response = NULL; + g_autoptr(QList) paths = qlist_new(); + QListEntry *entry, *prop_entry; + const char *prop_name; + QList *properties, *return_list; + QDict *obj; + + qlist_append_str(paths, "/objects/" RAM_NAME); + qdict_put_obj(args, "paths", QOBJECT(qlist_copy(paths))); + response = qtest_qmp(qts, "{ 'execute': 'qom-list-get'," + " 'arguments': %p }", args); + g_assert(response); + g_assert(qdict_haskey(response, "return")); + return_list = qobject_to(QList, qdict_get(response, "return")); + + entry = QTAILQ_FIRST(&return_list->head); + obj = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(qdict_haskey(obj, "properties")); + properties = qobject_to(QList, qdict_get(obj, "properties")); + + QLIST_FOREACH_ENTRY(properties, prop_entry) { + QDict *prop = qobject_to(QDict, qlist_entry_obj(prop_entry)); + + g_assert(qdict_haskey(prop, "name")); + g_assert(qdict_haskey(prop, "value")); + + prop_name = qdict_get_str(prop, "name"); + if (!strcmp(prop_name, "type")) { + g_assert_cmpstr(qdict_get_str(prop, "value"), ==, + "memory-backend-ram"); + + } else if (!strcmp(prop_name, "size")) { + g_assert_cmpint(qdict_get_int(prop, "value"), ==, RAM_SIZE); + } + } +} + +static void test_list_get(QTestState *qts, QList *paths) +{ + QListEntry *entry, *prop_entry, *path_entry; + g_autoptr(QDict) response = NULL; + QDict *args = qdict_new(); + QDict *prop; + QList *return_list; + + if (verbosity_level >= 2) { + g_test_message("Obtaining properties for paths:"); + QLIST_FOREACH_ENTRY(paths, path_entry) { + QString *qstr = qobject_to(QString, qlist_entry_obj(path_entry)); + g_test_message(" %s", qstring_get_str(qstr)); + } + } + + qdict_put_obj(args, "paths", QOBJECT(qlist_copy(paths))); + response = qtest_qmp(qts, "{ 'execute': 'qom-list-get'," + " 'arguments': %p }", args); + g_assert(response); + g_assert(qdict_haskey(response, "return")); + return_list = qobject_to(QList, qdict_get(response, "return")); + g_assert(!qlist_empty(return_list)); + + path_entry = QTAILQ_FIRST(&paths->head); + QLIST_FOREACH_ENTRY(return_list, entry) { + QDict *obj = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(qdict_haskey(obj, "properties")); + QList *properties = qobject_to(QList, qdict_get(obj, "properties")); + bool has_child = false; + + QLIST_FOREACH_ENTRY(properties, prop_entry) { + prop = qobject_to(QDict, qlist_entry_obj(prop_entry)); + g_assert(qdict_haskey(prop, "name")); + g_assert(qdict_haskey(prop, "type")); + has_child |= strstart(qdict_get_str(prop, "type"), "child<", NULL); + } + + if (has_child) { + /* build a list of child paths */ + QString *qstr = qobject_to(QString, qlist_entry_obj(path_entry)); + const char *path = qstring_get_str(qstr); + g_autoptr(QList) child_paths = qlist_new(); + + QLIST_FOREACH_ENTRY(properties, prop_entry) { + prop = qobject_to(QDict, qlist_entry_obj(prop_entry)); + if (strstart(qdict_get_str(prop, "type"), "child<", NULL)) { + g_autofree char *child_path = g_strdup_printf( + "%s/%s", path, qdict_get_str(prop, "name")); + qlist_append_str(child_paths, child_path); + } + } + + /* fetch props for all children with one qom-list-get call */ + test_list_get(qts, child_paths); + } + + path_entry = QTAILQ_NEXT(path_entry, next); + } +} + static void test_properties(QTestState *qts, const char *path, bool recurse) { char *child_path; @@ -85,11 +193,28 @@ static void test_machine(gconstpointer data) const char *machine = data; QDict *response; QTestState *qts; + g_autoptr(QList) paths = qlist_new(); + + qts = qtest_initf("-machine %s -object memory-backend-ram,id=%s,size=%d", + machine, RAM_NAME, RAM_SIZE); - qts = qtest_initf("-machine %s", machine); + if (g_test_slow()) { + /* Make sure we can get the machine class properties: */ + g_autofree char *qom_machine = g_strdup_printf("%s-machine", machine); + + response = qtest_qmp(qts, "{ 'execute': 'qom-list-properties'," + " 'arguments': { 'typename': %s } }", + qom_machine); + g_assert(response); + qobject_unref(response); + } test_properties(qts, "/machine", true); + qlist_append_str(paths, "/machine"); + test_list_get(qts, paths); + test_list_get_value(qts); + response = qtest_qmp(qts, "{ 'execute': 'quit' }"); g_assert(qdict_haskey(response, "return")); qobject_unref(response); diff --git a/tests/qtest/qos-test.c b/tests/qtest/qos-test.c index 114f6bef273..abfd4b9512d 100644 --- a/tests/qtest/qos-test.c +++ b/tests/qtest/qos-test.c @@ -20,7 +20,7 @@ #include #include "libqtest-single.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/module.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-machine.h" @@ -103,8 +103,7 @@ static void restart_qemu_or_continue(char *path) old_path = g_strdup(path); qtest_start(path); } else { /* if cmd line is the same, reset the guest */ - qobject_unref(qmp("{ 'execute': 'system_reset' }")); - qmp_eventwait("RESET"); + qtest_system_reset(global_qtest); } } diff --git a/tests/qtest/readconfig-test.c b/tests/qtest/readconfig-test.c index 760f974e631..c6f32a4e144 100644 --- a/tests/qtest/readconfig-test.c +++ b/tests/qtest/readconfig-test.c @@ -13,10 +13,10 @@ #include "qapi/qapi-visit-machine.h" #include "qapi/qapi-visit-qom.h" #include "qapi/qapi-visit-ui.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" #include "qapi/qobject-input-visitor.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" #include "qemu/units.h" static QTestState *qtest_init_with_config(const char *cfgdata) diff --git a/tests/qtest/riscv-csr-test.c b/tests/qtest/riscv-csr-test.c new file mode 100644 index 00000000000..ff5c29e6c6f --- /dev/null +++ b/tests/qtest/riscv-csr-test.c @@ -0,0 +1,56 @@ +/* + * QTest testcase for RISC-V CSRs + * + * Copyright (c) 2024 Syntacore. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "libqtest.h" + +#define CSR_MVENDORID 0xf11 +#define CSR_MISELECT 0x350 + +static void run_test_csr(void) +{ + uint64_t res; + uint64_t val = 0; + + QTestState *qts = qtest_init("-machine virt -cpu veyron-v1"); + + res = qtest_csr_call(qts, "get_csr", 0, CSR_MVENDORID, &val); + + g_assert_cmpint(res, ==, 0); + g_assert_cmpint(val, ==, 0x61f); + + val = 0xff; + res = qtest_csr_call(qts, "set_csr", 0, CSR_MISELECT, &val); + + g_assert_cmpint(res, ==, 0); + + val = 0; + res = qtest_csr_call(qts, "get_csr", 0, CSR_MISELECT, &val); + + g_assert_cmpint(res, ==, 0); + g_assert_cmpint(val, ==, 0xff); + + qtest_quit(qts); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("/cpu/csr", run_test_csr); + + return g_test_run(); +} diff --git a/tests/qtest/riscv-iommu-test.c b/tests/qtest/riscv-iommu-test.c new file mode 100644 index 00000000000..df0c7813ec0 --- /dev/null +++ b/tests/qtest/riscv-iommu-test.c @@ -0,0 +1,210 @@ +/* + * QTest testcase for RISC-V IOMMU + * + * Copyright (c) 2024 Ventana Micro Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "libqtest-single.h" +#include "qemu/module.h" +#include "libqos/qgraph.h" +#include "libqos/riscv-iommu.h" +#include "hw/pci/pci_regs.h" + +static uint32_t riscv_iommu_read_reg32(QRISCVIOMMU *r_iommu, int reg_offset) +{ + return qpci_io_readl(&r_iommu->dev, r_iommu->reg_bar, reg_offset); +} + +static uint64_t riscv_iommu_read_reg64(QRISCVIOMMU *r_iommu, int reg_offset) +{ + return qpci_io_readq(&r_iommu->dev, r_iommu->reg_bar, reg_offset); +} + +static void riscv_iommu_write_reg32(QRISCVIOMMU *r_iommu, int reg_offset, + uint32_t val) +{ + qpci_io_writel(&r_iommu->dev, r_iommu->reg_bar, reg_offset, val); +} + +static void riscv_iommu_write_reg64(QRISCVIOMMU *r_iommu, int reg_offset, + uint64_t val) +{ + qpci_io_writeq(&r_iommu->dev, r_iommu->reg_bar, reg_offset, val); +} + +static void test_pci_config(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QRISCVIOMMU *r_iommu = obj; + QPCIDevice *dev = &r_iommu->dev; + uint16_t vendorid, deviceid, classid; + + vendorid = qpci_config_readw(dev, PCI_VENDOR_ID); + deviceid = qpci_config_readw(dev, PCI_DEVICE_ID); + classid = qpci_config_readw(dev, PCI_CLASS_DEVICE); + + g_assert_cmpuint(vendorid, ==, RISCV_IOMMU_PCI_VENDOR_ID); + g_assert_cmpuint(deviceid, ==, RISCV_IOMMU_PCI_DEVICE_ID); + g_assert_cmpuint(classid, ==, RISCV_IOMMU_PCI_DEVICE_CLASS); +} + +static void test_reg_reset(void *obj, void *data, QGuestAllocator *t_alloc) +{ + QRISCVIOMMU *r_iommu = obj; + uint64_t cap; + uint32_t reg; + + cap = riscv_iommu_read_reg64(r_iommu, RISCV_IOMMU_REG_CAP); + g_assert_cmpuint(cap & RISCV_IOMMU_CAP_VERSION, ==, 0x10); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR); + g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CQEN, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CIE, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_CQON, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_CQCSR_BUSY, ==, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR); + g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FQEN, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FIE, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_FQON, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_FQCSR_BUSY, ==, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR); + g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PQEN, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PIE, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_PQON, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_PQCSR_BUSY, ==, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_DDTP); + g_assert_cmpuint(reg & RISCV_IOMMU_DDTP_BUSY, ==, 0); + g_assert_cmpuint(reg & RISCV_IOMMU_DDTP_MODE, ==, + RISCV_IOMMU_DDTP_MODE_OFF); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_IPSR); + g_assert_cmpuint(reg, ==, 0); +} + +/* + * Common timeout-based poll for CQCSR, FQCSR and PQCSR. All + * their ON bits are mapped as RISCV_IOMMU_QUEUE_ACTIVE (16), + */ +static void qtest_wait_for_queue_active(QRISCVIOMMU *r_iommu, + uint32_t queue_csr) +{ + QTestState *qts = global_qtest; + guint64 timeout_us = 2 * 1000 * 1000; + gint64 start_time = g_get_monotonic_time(); + uint32_t reg; + + for (;;) { + qtest_clock_step(qts, 100); + + reg = riscv_iommu_read_reg32(r_iommu, queue_csr); + if (reg & RISCV_IOMMU_QUEUE_ACTIVE) { + break; + } + g_assert(g_get_monotonic_time() - start_time <= timeout_us); + } +} + +/* + * Goes through the queue activation procedures of chapter 6.2, + * "Guidelines for initialization", of the RISCV-IOMMU spec. + */ +static void test_iommu_init_queues(void *obj, void *data, + QGuestAllocator *t_alloc) +{ + QRISCVIOMMU *r_iommu = obj; + uint64_t reg64, q_addr; + uint32_t reg; + int k = 2; + + reg64 = riscv_iommu_read_reg64(r_iommu, RISCV_IOMMU_REG_CAP); + g_assert_cmpuint(reg64 & RISCV_IOMMU_CAP_VERSION, ==, 0x10); + + /* + * Program the command queue. Write 0xF to civ, fiv, pmiv and + * piv. With the current PCI device impl we expect 2 writable + * bits for each (k = 2) since we have N = 4 total vectors (2^k). + */ + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_IVEC, 0xFFFF); + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_IVEC); + g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_CIV, ==, 0x3); + g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_FIV, ==, 0x30); + g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_PMIV, ==, 0x300); + g_assert_cmpuint(reg & RISCV_IOMMU_REG_IVEC_PIV, ==, 0x3000); + + /* Alloc a 4*16 bytes buffer and use it to set cqb */ + q_addr = guest_alloc(t_alloc, 4 * 16); + reg64 = 0; + deposit64(reg64, RISCV_IOMMU_CQB_PPN_START, + RISCV_IOMMU_CQB_PPN_LEN, q_addr); + deposit64(reg64, RISCV_IOMMU_CQB_LOG2SZ_START, + RISCV_IOMMU_CQB_LOG2SZ_LEN, k - 1); + riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_CQB, reg64); + + /* cqt = 0, cqcsr.cqen = 1, poll cqcsr.cqon until it reads 1 */ + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_CQT, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR); + reg |= RISCV_IOMMU_CQCSR_CQEN; + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_CQCSR, reg); + + qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_CQCSR); + + /* + * Program the fault queue. Alloc a 4*32 bytes (instead of 4*16) + * buffer and use it to set fqb. + */ + q_addr = guest_alloc(t_alloc, 4 * 32); + reg64 = 0; + deposit64(reg64, RISCV_IOMMU_FQB_PPN_START, + RISCV_IOMMU_FQB_PPN_LEN, q_addr); + deposit64(reg64, RISCV_IOMMU_FQB_LOG2SZ_START, + RISCV_IOMMU_FQB_LOG2SZ_LEN, k - 1); + riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_FQB, reg64); + + /* fqt = 0, fqcsr.fqen = 1, poll fqcsr.fqon until it reads 1 */ + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_FQT, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR); + reg |= RISCV_IOMMU_FQCSR_FQEN; + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_FQCSR, reg); + + qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_FQCSR); + + /* + * Program the page-request queue. Alloc a 4*16 bytes buffer + * and use it to set pqb. + */ + q_addr = guest_alloc(t_alloc, 4 * 16); + reg64 = 0; + deposit64(reg64, RISCV_IOMMU_PQB_PPN_START, + RISCV_IOMMU_PQB_PPN_LEN, q_addr); + deposit64(reg64, RISCV_IOMMU_PQB_LOG2SZ_START, + RISCV_IOMMU_PQB_LOG2SZ_LEN, k - 1); + riscv_iommu_write_reg64(r_iommu, RISCV_IOMMU_REG_PQB, reg64); + + /* pqt = 0, pqcsr.pqen = 1, poll pqcsr.pqon until it reads 1 */ + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_PQT, 0); + + reg = riscv_iommu_read_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR); + reg |= RISCV_IOMMU_PQCSR_PQEN; + riscv_iommu_write_reg32(r_iommu, RISCV_IOMMU_REG_PQCSR, reg); + + qtest_wait_for_queue_active(r_iommu, RISCV_IOMMU_REG_PQCSR); +} + +static void register_riscv_iommu_test(void) +{ + qos_add_test("pci_config", "riscv-iommu-pci", test_pci_config, NULL); + qos_add_test("reg_reset", "riscv-iommu-pci", test_reg_reset, NULL); + qos_add_test("iommu_init_queues", "riscv-iommu-pci", + test_iommu_init_queues, NULL); +} + +libqos_init(register_riscv_iommu_test); diff --git a/tests/qtest/rs5c372-test.c b/tests/qtest/rs5c372-test.c new file mode 100644 index 00000000000..0f6a9b68b9f --- /dev/null +++ b/tests/qtest/rs5c372-test.c @@ -0,0 +1,43 @@ +/* + * QTest testcase for the RS5C372 RTC + * + * Copyright (c) 2025 Bernhard Beschow + * + * Based on ds1338-test.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/bcd.h" +#include "libqos/i2c.h" + +#define RS5C372_ADDR 0x32 + +static void rs5c372_read_date(void *obj, void *data, QGuestAllocator *alloc) +{ + QI2CDevice *i2cdev = obj; + + uint8_t resp[0x10]; + time_t now = time(NULL); + struct tm *utc = gmtime(&now); + + i2c_read_block(i2cdev, 0, resp, sizeof(resp)); + + /* check retrieved time against local time */ + g_assert_cmpuint(from_bcd(resp[5]), == , utc->tm_mday); + g_assert_cmpuint(from_bcd(resp[6]), == , 1 + utc->tm_mon); + g_assert_cmpuint(2000 + from_bcd(resp[7]), == , 1900 + utc->tm_year); +} + +static void rs5c372_register_nodes(void) +{ + QOSGraphEdgeOptions opts = { }; + add_qi2c_address(&opts, &(QI2CAddress) { RS5C372_ADDR }); + + qos_node_create_driver("rs5c372", i2c_device_create); + qos_node_consumes("rs5c372", "i2c-bus", &opts); + qos_add_test("read_date", "rs5c372", rs5c372_read_date, NULL); +} + +libqos_init(rs5c372_register_nodes); diff --git a/tests/qtest/rtl8139-test.c b/tests/qtest/rtl8139-test.c index eedf90f65af..55f671f2f59 100644 --- a/tests/qtest/rtl8139-test.c +++ b/tests/qtest/rtl8139-test.c @@ -65,7 +65,7 @@ PORT(IntrMask, w, 0x3c) PORT(IntrStatus, w, 0x3E) PORT(TimerInt, l, 0x54) -#define fatal(...) do { g_test_message(__VA_ARGS__); g_assert(0); } while (0) +#define fatal(...) do { g_test_message(__VA_ARGS__); g_assert_not_reached(); } while (0) static void test_timer(void) { diff --git a/tests/qtest/stm32l4x5.h b/tests/qtest/stm32l4x5.h new file mode 100644 index 00000000000..2d21cc666cc --- /dev/null +++ b/tests/qtest/stm32l4x5.h @@ -0,0 +1,42 @@ +/* + * QTest testcase header for STM32L4X5 : + * used for consolidating common objects in stm32l4x5_*-test.c + * + * Copyright (c) 2024 Arnaud Minier + * Copyright (c) 2024 Inès Varhol + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "libqtest.h" + +/* copied from clock.h */ +#define CLOCK_PERIOD_1SEC (1000000000llu << 32) +#define CLOCK_PERIOD_FROM_HZ(hz) (((hz) != 0) ? CLOCK_PERIOD_1SEC / (hz) : 0u) +/* + * MSI (4 MHz) is used as system clock source after startup + * from Reset. + * AHB, APB1 and APB2 prescalers are set to 1 at reset. + */ +#define SYSCLK_PERIOD CLOCK_PERIOD_FROM_HZ(4000000) +#define RCC_AHB2ENR 0x4002104C +#define RCC_APB1ENR1 0x40021058 +#define RCC_APB1ENR2 0x4002105C +#define RCC_APB2ENR 0x40021060 + + +static inline uint64_t get_clock_period(QTestState *qts, const char *path) +{ + uint64_t clock_period = 0; + QDict *r; + + r = qtest_qmp(qts, "{ 'execute': 'qom-get', 'arguments':" + " { 'path': %s, 'property': 'qtest-clock-period'} }", path); + g_assert_false(qdict_haskey(r, "error")); + clock_period = qdict_get_int(r, "return"); + qobject_unref(r); + return clock_period; +} + + diff --git a/tests/qtest/stm32l4x5_gpio-test.c b/tests/qtest/stm32l4x5_gpio-test.c index 72a78234066..3c6ea71febf 100644 --- a/tests/qtest/stm32l4x5_gpio-test.c +++ b/tests/qtest/stm32l4x5_gpio-test.c @@ -10,6 +10,7 @@ #include "qemu/osdep.h" #include "libqtest-single.h" +#include "stm32l4x5.h" #define GPIO_BASE_ADDR 0x48000000 #define GPIO_SIZE 0x400 @@ -168,14 +169,6 @@ static uint32_t reset(uint32_t gpio, unsigned int offset) return 0x0; } -static void system_reset(void) -{ - QDict *r; - r = qtest_qmp(global_qtest, "{'execute': 'system_reset'}"); - g_assert_false(qdict_haskey(r, "error")); - qobject_unref(r); -} - static void test_idr_reset_value(void) { /* @@ -213,7 +206,7 @@ static void test_idr_reset_value(void) gpio_writel(GPIO_H, OTYPER, 0xDEADBEEF); gpio_writel(GPIO_H, PUPDR, 0xDEADBEEF); - system_reset(); + qtest_system_reset(global_qtest); uint32_t moder = gpio_readl(GPIO_A, MODER); uint32_t odr = gpio_readl(GPIO_A, ODR); @@ -505,6 +498,26 @@ static void test_bsrr_brr(const void *data) gpio_writel(gpio, ODR, reset(gpio, ODR)); } +static void test_clock_enable(void) +{ + /* + * For each GPIO, enable its clock in RCC + * and check that its clock period changes to SYSCLK_PERIOD + */ + unsigned int gpio_id; + + for (uint32_t gpio = GPIO_A; gpio <= GPIO_H; gpio += GPIO_B - GPIO_A) { + gpio_id = get_gpio_id(gpio); + g_autofree char *path = g_strdup_printf("/machine/soc/gpio%c/clk", + gpio_id + 'a'); + g_assert_cmpuint(get_clock_period(global_qtest, path), ==, 0); + /* Enable the gpio clock */ + writel(RCC_AHB2ENR, readl(RCC_AHB2ENR) | (0x1 << gpio_id)); + g_assert_cmpuint(get_clock_period(global_qtest, path), ==, + SYSCLK_PERIOD); + } +} + int main(int argc, char **argv) { int ret; @@ -556,6 +569,8 @@ int main(int argc, char **argv) qtest_add_data_func("stm32l4x5/gpio/test_bsrr_brr2", test_data(GPIO_D, 0), test_bsrr_brr); + qtest_add_func("stm32l4x5/gpio/test_clock_enable", + test_clock_enable); qtest_start("-machine b-l475e-iot01a"); ret = g_test_run(); diff --git a/tests/qtest/stm32l4x5_syscfg-test.c b/tests/qtest/stm32l4x5_syscfg-test.c index 258417cd889..376c80e31ca 100644 --- a/tests/qtest/stm32l4x5_syscfg-test.c +++ b/tests/qtest/stm32l4x5_syscfg-test.c @@ -10,6 +10,7 @@ #include "qemu/osdep.h" #include "libqtest-single.h" +#include "stm32l4x5.h" #define SYSCFG_BASE_ADDR 0x40010000 #define SYSCFG_MEMRMP 0x00 @@ -26,7 +27,9 @@ #define INVALID_ADDR 0x2C /* SoC forwards GPIOs to SysCfg */ -#define SYSCFG "/machine/soc" +#define SOC "/machine/soc" +#define SYSCFG "/machine/soc/syscfg" +#define SYSCFG_CLK "/machine/soc/syscfg/clk" #define EXTI "/machine/soc/exti" static void syscfg_writel(unsigned int offset, uint32_t value) @@ -41,15 +44,7 @@ static uint32_t syscfg_readl(unsigned int offset) static void syscfg_set_irq(int num, int level) { - qtest_set_irq_in(global_qtest, SYSCFG, NULL, num, level); -} - -static void system_reset(void) -{ - QDict *response; - response = qtest_qmp(global_qtest, "{'execute': 'system_reset'}"); - g_assert(qdict_haskey(response, "return")); - qobject_unref(response); + qtest_set_irq_in(global_qtest, SOC, NULL, num, level); } static void test_reset(void) @@ -179,7 +174,7 @@ static void test_set_only_bits(void) syscfg_writel(SYSCFG_SWPR2, 0x00000000); g_assert_cmphex(syscfg_readl(SYSCFG_SWPR2), ==, 0xFFFFFFFF); - system_reset(); + qtest_system_reset(global_qtest); } static void test_clear_only_bits(void) @@ -191,7 +186,7 @@ static void test_clear_only_bits(void) syscfg_writel(SYSCFG_CFGR1, 0x00000001); g_assert_cmphex(syscfg_readl(SYSCFG_CFGR1), ==, 0x00000000); - system_reset(); + qtest_system_reset(global_qtest); } static void test_interrupt(void) @@ -301,6 +296,17 @@ static void test_irq_gpio_multiplexer(void) syscfg_writel(SYSCFG_EXTICR1, 0x00000000); } +static void test_clock_enable(void) +{ + g_assert_cmpuint(get_clock_period(global_qtest, SYSCFG_CLK), ==, 0); + + /* Enable SYSCFG clock */ + writel(RCC_APB2ENR, readl(RCC_APB2ENR) | (0x1 << 0)); + + g_assert_cmpuint(get_clock_period(global_qtest, SYSCFG_CLK), ==, + SYSCLK_PERIOD); +} + int main(int argc, char **argv) { int ret; @@ -325,6 +331,8 @@ int main(int argc, char **argv) test_irq_pin_multiplexer); qtest_add_func("stm32l4x5/syscfg/test_irq_gpio_multiplexer", test_irq_gpio_multiplexer); + qtest_add_func("stm32l4x5/syscfg/test_clock_enable", + test_clock_enable); qtest_start("-machine b-l475e-iot01a"); ret = g_test_run(); diff --git a/tests/qtest/stm32l4x5_usart-test.c b/tests/qtest/stm32l4x5_usart-test.c index 89025182331..98a7472307f 100644 --- a/tests/qtest/stm32l4x5_usart-test.c +++ b/tests/qtest/stm32l4x5_usart-test.c @@ -12,6 +12,7 @@ #include "libqtest.h" #include "hw/misc/stm32l4x5_rcc_internals.h" #include "hw/registerfields.h" +#include "stm32l4x5.h" #define RCC_BASE_ADDR 0x40021000 /* Use USART 1 ADDR, assume the others work the same */ @@ -36,6 +37,8 @@ REG32(GTPR, 0x10) REG32(RTOR, 0x14) REG32(RQR, 0x18) REG32(ISR, 0x1C) + FIELD(ISR, REACK, 22, 1) + FIELD(ISR, TEACK, 21, 1) FIELD(ISR, TXE, 7, 1) FIELD(ISR, RXNE, 5, 1) FIELD(ISR, ORE, 3, 1) @@ -191,7 +194,7 @@ static void init_uart(QTestState *qts) /* Enable the transmitter, the receiver and the USART. */ qtest_writel(qts, (USART1_BASE_ADDR + A_CR1), - R_CR1_UE_MASK | R_CR1_RE_MASK | R_CR1_TE_MASK); + cr1 | R_CR1_UE_MASK | R_CR1_RE_MASK | R_CR1_TE_MASK); } static void test_write_read(void) @@ -202,6 +205,8 @@ static void test_write_read(void) qtest_writel(qts, USART1_BASE_ADDR + A_TDR, 0xFFFFFFFF); const uint32_t tdr = qtest_readl(qts, USART1_BASE_ADDR + A_TDR); g_assert_cmpuint(tdr, ==, 0x000001FF); + + qtest_quit(qts); } static void test_receive_char(void) @@ -296,10 +301,65 @@ static void test_send_str(void) qtest_quit(qts); } -int main(int argc, char **argv) +static void test_ack(void) { - int ret; + uint32_t cr1; + uint32_t isr; + QTestState *qts = qtest_init("-M b-l475e-iot01a"); + + init_uart(qts); + + cr1 = qtest_readl(qts, (USART1_BASE_ADDR + A_CR1)); + /* Disable the transmitter and receiver. */ + qtest_writel(qts, (USART1_BASE_ADDR + A_CR1), + cr1 & ~(R_CR1_RE_MASK | R_CR1_TE_MASK)); + + /* Test ISR ACK for transmitter and receiver disabled */ + isr = qtest_readl(qts, (USART1_BASE_ADDR + A_ISR)); + g_assert_false(isr & R_ISR_TEACK_MASK); + g_assert_false(isr & R_ISR_REACK_MASK); + + /* Enable the transmitter and receiver. */ + qtest_writel(qts, (USART1_BASE_ADDR + A_CR1), + cr1 | (R_CR1_RE_MASK | R_CR1_TE_MASK)); + + /* Test ISR ACK for transmitter and receiver disabled */ + isr = qtest_readl(qts, (USART1_BASE_ADDR + A_ISR)); + g_assert_true(isr & R_ISR_TEACK_MASK); + g_assert_true(isr & R_ISR_REACK_MASK); + + qtest_quit(qts); +} + +static void check_clock(QTestState *qts, const char *path, uint32_t rcc_reg, + uint32_t reg_offset) +{ + g_assert_cmpuint(get_clock_period(qts, path), ==, 0); + qtest_writel(qts, rcc_reg, qtest_readl(qts, rcc_reg) | (0x1 << reg_offset)); + g_assert_cmpuint(get_clock_period(qts, path), ==, SYSCLK_PERIOD); +} + +static void test_clock_enable(void) +{ + /* + * For each USART device, enable its clock in RCC + * and check that its clock frequency is SYSCLK_PERIOD + */ + QTestState *qts = qtest_init("-M b-l475e-iot01a"); + + check_clock(qts, "machine/soc/usart[0]/clk", RCC_APB2ENR, 14); + check_clock(qts, "machine/soc/usart[1]/clk", RCC_APB1ENR1, 17); + check_clock(qts, "machine/soc/usart[2]/clk", RCC_APB1ENR1, 18); + check_clock(qts, "machine/soc/uart[0]/clk", RCC_APB1ENR1, 19); + check_clock(qts, "machine/soc/uart[1]/clk", RCC_APB1ENR1, 20); + check_clock(qts, "machine/soc/lpuart1/clk", RCC_APB1ENR2, 0); + + qtest_quit(qts); +} + +int main(int argc, char **argv) +{ g_test_init(&argc, &argv, NULL); g_test_set_nonfatal_assertions(); @@ -308,8 +368,8 @@ int main(int argc, char **argv) qtest_add_func("stm32l4x5/usart/send_char", test_send_char); qtest_add_func("stm32l4x5/usart/receive_str", test_receive_str); qtest_add_func("stm32l4x5/usart/send_str", test_send_str); - ret = g_test_run(); - - return ret; + qtest_add_func("stm32l4x5/usart/ack", test_ack); + qtest_add_func("stm32l4x5/usart/clock_enable", test_clock_enable); + return g_test_run(); } diff --git a/tests/qtest/tco-test.c b/tests/qtest/tco-test.c index 0547d411738..20ccefabcbf 100644 --- a/tests/qtest/tco-test.c +++ b/tests/qtest/tco-test.c @@ -12,7 +12,7 @@ #include "libqtest.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/pci/pci_regs.h" #include "hw/southbridge/ich9.h" #include "hw/acpi/ich9.h" diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c index f3865f75196..723d2c2f291 100644 --- a/tests/qtest/test-filter-mirror.c +++ b/tests/qtest/test-filter-mirror.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" #include "qemu/error-report.h" diff --git a/tests/qtest/test-filter-redirector.c b/tests/qtest/test-filter-redirector.c index a77d5fd8ec0..a996a80c1c0 100644 --- a/tests/qtest/test-filter-redirector.c +++ b/tests/qtest/test-filter-redirector.c @@ -52,7 +52,7 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" #include "qemu/error-report.h" diff --git a/tests/qtest/test-netfilter.c b/tests/qtest/test-netfilter.c index b09ef7fae98..326d4bd85f1 100644 --- a/tests/qtest/test-netfilter.c +++ b/tests/qtest/test-netfilter.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "libqtest-single.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" /* add a netfilter to a netdev and then remove it */ static void add_one_netfilter(void) diff --git a/tests/qtest/test-x86-cpuid-compat.c b/tests/qtest/test-x86-cpuid-compat.c index b9e7e5ef7b5..456e2af6657 100644 --- a/tests/qtest/test-x86-cpuid-compat.c +++ b/tests/qtest/test-x86-cpuid-compat.c @@ -1,8 +1,8 @@ #include "qemu/osdep.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qbool.h" #include "libqtest-single.h" static char *get_cpu0_qom_path(void) @@ -193,7 +193,6 @@ static void add_feature_test(const char *name, const char *cpu, args->bitnr = bitnr; args->expected_value = expected_value; qtest_add_data_func(name, args, test_feature_flag); - return; } static void test_plus_minus_subprocess(void) @@ -357,19 +356,6 @@ int main(int argc, char **argv) "486", "xstore=on", "pc-i440fx-2.7", "xlevel2", 0); } - /* - * QEMU 2.3.0 had auto-level enabled for CPUID[7], already, - * and the compat code that sets default level shouldn't - * disable the auto-level=7 code: - */ - if (qtest_has_machine("pc-i440fx-2.3")) { - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off", - "Penryn", NULL, "pc-i440fx-2.3", - "level", 4); - add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on", - "Penryn", "erms=on", "pc-i440fx-2.3", - "level", 7); - } if (qtest_has_machine("pc-i440fx-2.9")) { add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off", "Conroe", NULL, "pc-i440fx-2.9", @@ -379,25 +365,6 @@ int main(int argc, char **argv) "level", 10); } - /* - * xlevel doesn't have any feature that triggers auto-level - * code on old machine-types. Just check that the compat code - * is working correctly: - */ - if (qtest_has_machine("pc-i440fx-2.3")) { - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3", - "SandyBridge", NULL, "pc-i440fx-2.3", - "xlevel", 0x8000000a); - } - if (qtest_has_machine("pc-i440fx-2.4")) { - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off", - "SandyBridge", NULL, "pc-i440fx-2.4", - "xlevel", 0x80000008); - add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on", - "SandyBridge", "svm=on,npt=on", "pc-i440fx-2.4", - "xlevel", 0x80000008); - } - /* Test feature parsing */ add_feature_test("x86/cpuid/features/plus", "486", "+arat", diff --git a/tests/qtest/tmp105-test.c b/tests/qtest/tmp105-test.c index 3678646df52..3b114a50f55 100644 --- a/tests/qtest/tmp105-test.c +++ b/tests/qtest/tmp105-test.c @@ -12,7 +12,7 @@ #include "libqtest-single.h" #include "libqos/qgraph.h" #include "libqos/i2c.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/sensor/tmp105_regs.h" #define TMP105_TEST_ID "tmp105-test" @@ -100,9 +100,9 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) g_assert_cmphex(value, ==, 0x14f0); i2c_set16(i2cdev, TMP105_REG_T_LOW, 0x1234); - g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_LOW), ==, 0x1234); + g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_LOW), ==, 0x1230); i2c_set16(i2cdev, TMP105_REG_T_HIGH, 0x4231); - g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_HIGH), ==, 0x4231); + g_assert_cmphex(i2c_get16(i2cdev, TMP105_REG_T_HIGH), ==, 0x4230); } static void tmp105_register_nodes(void) diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c index 2bf8ff4c86e..9e4c2005d0e 100644 --- a/tests/qtest/tpm-emu.c +++ b/tests/qtest/tpm-emu.c @@ -16,8 +16,8 @@ #include "backends/tpm/tpm_ioctl.h" #include "io/channel-socket.h" #include "qapi/error.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "tpm-emu.h" void tpm_emu_test_wait_cond(TPMTestState *s) diff --git a/tests/qtest/tpm-emu.h b/tests/qtest/tpm-emu.h index 712cee9b7a5..59c8009f4c5 100644 --- a/tests/qtest/tpm-emu.h +++ b/tests/qtest/tpm-emu.h @@ -21,7 +21,7 @@ #include "qemu/sockets.h" #include "io/channel.h" -#include "sysemu/tpm.h" +#include "system/tpm.h" #include "libqtest.h" struct tpm_hdr { diff --git a/tests/qtest/tpm-tests.c b/tests/qtest/tpm-tests.c index fb94496bbd8..197714f8d99 100644 --- a/tests/qtest/tpm-tests.c +++ b/tests/qtest/tpm-tests.c @@ -114,7 +114,7 @@ void tpm_test_swtpm_migration_test(const char *src_tpm_path, sizeof(tpm_pcrread_resp)); tpm_util_migrate(src_qemu, uri); - tpm_util_wait_for_migration_complete(src_qemu); + tpm_util_wait_for_migration_complete(dst_qemu); tpm_util_pcrread(dst_qemu, tx, tpm_pcrread_resp, sizeof(tpm_pcrread_resp)); diff --git a/tests/qtest/tpm-util.c b/tests/qtest/tpm-util.c index 1c0319e6e70..2cb2dd47961 100644 --- a/tests/qtest/tpm-util.c +++ b/tests/qtest/tpm-util.c @@ -18,7 +18,7 @@ #include "hw/acpi/tpm.h" #include "libqtest.h" #include "tpm-util.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" void tpm_util_crb_transfer(QTestState *s, const unsigned char *req, size_t req_size, diff --git a/tests/qtest/ufs-test.c b/tests/qtest/ufs-test.c index 82ec3f06715..4867ccf08a1 100644 --- a/tests/qtest/ufs-test.c +++ b/tests/qtest/ufs-test.c @@ -8,13 +8,14 @@ #include "qemu/osdep.h" #include "qemu/module.h" -#include "qemu/units.h" #include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/pci.h" #include "scsi/constants.h" #include "block/ufs.h" +#include "qemu/bitmap.h" +#define DWORD_BYTE 4 /* Test images sizes in Bytes */ #define TEST_IMAGE_SIZE (64 * 1024 * 1024) /* Timeout for various operations, in seconds. */ @@ -26,6 +27,12 @@ #define UTP_COMMAND_DESCRIPTOR_SIZE 4096 #define UTP_RESPONSE_UPIU_OFFSET 1024 #define UTP_PRDT_UPIU_OFFSET 2048 +#define UTRD_TEST_SLOT 0 +#define UFS_MAX_CMD_DESC 32 +/* Constants for MCQ */ +#define TEST_QID 0 +#define QUEUE_SIZE 32 +#define UFS_MCQ_MAX_QNUM 32 typedef struct QUfs QUfs; @@ -34,12 +41,22 @@ struct QUfs { QPCIDevice dev; QPCIBar bar; - uint64_t utrlba; - uint64_t utmrlba; + DECLARE_BITMAP(cmd_desc_bitmap, UFS_MAX_CMD_DESC); uint64_t cmd_desc_addr; uint64_t data_buffer_addr; bool enabled; + bool support_mcq; + + /* for legacy doorbell mode */ + uint64_t utrlba; + + /* for mcq mode */ + uint32_t maxq; + uint64_t sqlba[UFS_MCQ_MAX_QNUM]; + uint64_t cqlba[UFS_MCQ_MAX_QNUM]; + uint64_t sqdao[UFS_MCQ_MAX_QNUM]; + uint64_t cqdao[UFS_MCQ_MAX_QNUM]; }; static inline uint32_t ufs_rreg(QUfs *ufs, size_t offset) @@ -52,6 +69,24 @@ static inline void ufs_wreg(QUfs *ufs, size_t offset, uint32_t value) qpci_io_writel(&ufs->dev, ufs->bar, offset, value); } +static int alloc_cmd_desc_slot(QUfs *ufs) +{ + int slot = find_first_zero_bit(ufs->cmd_desc_bitmap, UFS_MAX_CMD_DESC); + if (slot == UFS_MAX_CMD_DESC) { + g_assert_not_reached(); + } + set_bit(slot, ufs->cmd_desc_bitmap); + return slot; +} + +static void release_cmd_desc_slot(QUfs *ufs, int slot) +{ + if (!test_bit(slot, ufs->cmd_desc_bitmap)) { + g_assert_not_reached(); + } + clear_bit(slot, ufs->cmd_desc_bitmap); +} + static void ufs_wait_for_irq(QUfs *ufs) { uint64_t end_time; @@ -64,14 +99,11 @@ static void ufs_wait_for_irq(QUfs *ufs) } while (is == 0 && g_get_monotonic_time() < end_time); } -static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr, - uint8_t slot, +static UtpTransferReqDesc ufs_build_req_utrd(uint64_t command_desc_base_addr, uint32_t data_direction, uint16_t prd_table_length) { UtpTransferReqDesc req = { 0 }; - uint64_t command_desc_base_addr = - cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; req.header.dword_0 = cpu_to_le32(1 << 28 | data_direction | UFS_UTP_REQ_DESC_INT_CMD); @@ -88,80 +120,140 @@ static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr, return req; } -static void ufs_send_nop_out(QUfs *ufs, uint8_t slot, - UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out) +static enum UtpOcsCodes +__ufs_send_transfer_request_doorbell(QUfs *ufs, uint8_t lun, + const UtpTransferReqDesc *utrd) { - /* Build up utp transfer request descriptor */ - UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot, - UFS_UTP_NO_DATA_TRANSFER, 0); - uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); + uint64_t utrd_addr = + ufs->utrlba + UTRD_TEST_SLOT * sizeof(UtpTransferReqDesc); + UtpTransferReqDesc utrd_result; + + qtest_memwrite(ufs->dev.bus->qts, utrd_addr, utrd, sizeof(*utrd)); + + /* Ring the doorbell */ + ufs_wreg(ufs, A_UTRLDBR, 1); + ufs_wait_for_irq(ufs); + g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); + + /* Handle completed command */ + qtest_memread(ufs->dev.bus->qts, utrd_addr, &utrd_result, + sizeof(utrd_result)); + return le32_to_cpu(utrd_result.header.dword_2) & 0xf; +} + +static enum UtpOcsCodes +__ufs_send_transfer_request_mcq(QUfs *ufs, uint8_t lun, + const UtpTransferReqDesc *utrd) +{ + uint32_t sqtp = ufs_rreg(ufs, ufs->sqdao[TEST_QID] + 0x4); + uint64_t utrd_addr = ufs->sqlba[TEST_QID] + sqtp; + uint32_t cqhp; + uint64_t cqentry_addr; + UfsCqEntry cqentry; + + qtest_memwrite(ufs->dev.bus->qts, utrd_addr, utrd, sizeof(*utrd)); + + /* Insert a new entry into the submission queue */ + sqtp = ufs_rreg(ufs, ufs->sqdao[TEST_QID] + 0x4); + sqtp = (sqtp + sizeof(UfsSqEntry)) % (QUEUE_SIZE * sizeof(UfsSqEntry)); + ufs_wreg(ufs, ufs->sqdao[TEST_QID] + 0x4, sqtp); + ufs_wait_for_irq(ufs); + g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, CQES)); + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, CQES, 1)); + + /* Handle the completed command from the completion queue */ + cqhp = ufs_rreg(ufs, ufs->cqdao[TEST_QID]); + cqentry_addr = ufs->cqlba[TEST_QID] + cqhp; + qtest_memread(ufs->dev.bus->qts, cqentry_addr, &cqentry, sizeof(cqentry)); + ufs_wreg(ufs, ufs->cqdao[TEST_QID], cqhp); + + return cqentry.status; +} + +static enum UtpOcsCodes +ufs_send_transfer_request_sync(QUfs *ufs, uint8_t lun, + const UtpTransferReqDesc *utrd) +{ + if (ufs->support_mcq) { + return __ufs_send_transfer_request_mcq(ufs, lun, utrd); + } + + return __ufs_send_transfer_request_doorbell(ufs, lun, utrd); +} + +static enum UtpOcsCodes ufs_send_nop_out(QUfs *ufs, UtpUpiuRsp *rsp_out) +{ + int cmd_desc_slot = alloc_cmd_desc_slot(ufs); uint64_t req_upiu_addr = - ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; + ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE; uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; - qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); /* Build up request upiu */ UtpUpiuReq req_upiu = { 0 }; req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_NOP_OUT; - req_upiu.header.task_tag = slot; + req_upiu.header.task_tag = cmd_desc_slot; qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, sizeof(req_upiu)); - /* Ring Doorbell */ - ufs_wreg(ufs, A_UTRLDBR, 1); - ufs_wait_for_irq(ufs); - g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); - ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); + /* Build up utp transfer request descriptor */ + UtpTransferReqDesc utrd = + ufs_build_req_utrd(req_upiu_addr, UFS_UTP_NO_DATA_TRANSFER, 0); + + /* Send Transfer Request */ + enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, 0, &utrd); - qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); + release_cmd_desc_slot(ufs, cmd_desc_slot); + return ret; } -static void ufs_send_query(QUfs *ufs, uint8_t slot, uint8_t query_function, - uint8_t query_opcode, uint8_t idn, uint8_t index, - UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out) +static enum UtpOcsCodes ufs_send_query(QUfs *ufs, uint8_t query_function, + uint8_t query_opcode, uint8_t idn, + uint8_t index, uint8_t selector, + uint32_t attr_value, UtpUpiuRsp *rsp_out) { - /* Build up utp transfer request descriptor */ - UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot, - UFS_UTP_NO_DATA_TRANSFER, 0); - uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); + int cmd_desc_slot = alloc_cmd_desc_slot(ufs); uint64_t req_upiu_addr = - ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; + ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE; uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; - qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); /* Build up request upiu */ UtpUpiuReq req_upiu = { 0 }; req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_QUERY_REQ; req_upiu.header.query_func = query_function; - req_upiu.header.task_tag = slot; + req_upiu.header.task_tag = cmd_desc_slot; /* - * QEMU UFS does not currently support Write descriptor and Write attribute, + * QEMU UFS does not currently support Write descriptor, * so the value of data_segment_length is always 0. */ req_upiu.header.data_segment_length = 0; req_upiu.qr.opcode = query_opcode; req_upiu.qr.idn = idn; req_upiu.qr.index = index; + req_upiu.qr.selector = selector; + req_upiu.qr.value = cpu_to_be32(attr_value); + req_upiu.qr.length = UFS_QUERY_DESC_MAX_SIZE; qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, sizeof(req_upiu)); - /* Ring Doorbell */ - ufs_wreg(ufs, A_UTRLDBR, 1); - ufs_wait_for_irq(ufs); - g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); - ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); + /* Build up utp transfer request descriptor */ + UtpTransferReqDesc utrd = + ufs_build_req_utrd(req_upiu_addr, UFS_UTP_NO_DATA_TRANSFER, 0); + + /* Send Transfer Request */ + enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, 0, &utrd); - qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); + release_cmd_desc_slot(ufs, cmd_desc_slot); + return ret; } -static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun, - const uint8_t *cdb, const uint8_t *data_in, - size_t data_in_len, uint8_t *data_out, - size_t data_out_len, - UtpTransferReqDesc *utrd_out, - UtpUpiuRsp *rsp_out) +static enum UtpOcsCodes +ufs_send_scsi_command(QUfs *ufs, uint8_t lun, const uint8_t *cdb, + const uint8_t *data_in, size_t data_in_len, + uint8_t *data_out, size_t data_out_len, + UtpUpiuRsp *rsp_out) { /* Build up PRDT */ @@ -171,8 +263,9 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun, uint8_t flags; uint16_t prd_table_length, i; uint32_t data_direction, data_len; + int cmd_desc_slot = alloc_cmd_desc_slot(ufs); uint64_t req_upiu_addr = - ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; + ufs->cmd_desc_addr + cmd_desc_slot * UTP_COMMAND_DESCRIPTOR_SIZE; uint64_t prdt_addr = req_upiu_addr + UTP_PRDT_UPIU_OFFSET; g_assert_true(data_in_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); @@ -214,36 +307,33 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun, qtest_memwrite(ufs->dev.bus->qts, prdt_addr, entries, prd_table_length * sizeof(UfshcdSgEntry)); - /* Build up utp transfer request descriptor */ - UtpTransferReqDesc utrd = ufs_build_req_utrd( - ufs->cmd_desc_addr, slot, data_direction, prd_table_length); - uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; - qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); /* Build up request upiu */ UtpUpiuReq req_upiu = { 0 }; req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_COMMAND; req_upiu.header.flags = flags; req_upiu.header.lun = lun; - req_upiu.header.task_tag = slot; + req_upiu.header.task_tag = cmd_desc_slot; req_upiu.sc.exp_data_transfer_len = cpu_to_be32(data_len); memcpy(req_upiu.sc.cdb, cdb, UFS_CDB_SIZE); qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, sizeof(req_upiu)); - /* Ring Doorbell */ - ufs_wreg(ufs, A_UTRLDBR, 1); - ufs_wait_for_irq(ufs); - g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); - ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); + /* Build up utp transfer request descriptor */ + UtpTransferReqDesc utrd = + ufs_build_req_utrd(req_upiu_addr, data_direction, prd_table_length); + + /* Send Transfer Request */ + enum UtpOcsCodes ret = ufs_send_transfer_request_sync(ufs, lun, &utrd); - qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); if (data_out_len) { qtest_memread(ufs->dev.bus->qts, ufs->data_buffer_addr, data_out, data_out_len); } + release_cmd_desc_slot(ufs, cmd_desc_slot); + return ret; } /** @@ -253,10 +343,10 @@ static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun, static void ufs_init(QUfs *ufs, QGuestAllocator *alloc) { uint64_t end_time; - uint32_t nutrs, nutmrs; + uint32_t nutrs; uint32_t hcs, is, ucmdarg2, cap; uint32_t hce = 0, ie = 0; - UtpTransferReqDesc utrd; + enum UtpOcsCodes ocs; UtpUpiuRsp rsp_upiu; ufs->bar = qpci_iomap(&ufs->dev, 0, NULL); @@ -301,9 +391,12 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc) hcs = ufs_rreg(ufs, A_HCS); g_assert_true(FIELD_EX32(hcs, HCS, DP)); g_assert_true(FIELD_EX32(hcs, HCS, UTRLRDY)); - g_assert_true(FIELD_EX32(hcs, HCS, UTMRLRDY)); g_assert_true(FIELD_EX32(hcs, HCS, UCRDY)); + /* Check MCQ support */ + cap = ufs_rreg(ufs, A_CAP); + ufs->support_mcq = FIELD_EX32(cap, CAP, MCQS); + /* Enable all interrupt functions */ ie = FIELD_DP32(ie, IE, UTRCE, 1); ie = FIELD_DP32(ie, IE, UEE, 1); @@ -316,44 +409,89 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc) ie = FIELD_DP32(ie, IE, HCFEE, 1); ie = FIELD_DP32(ie, IE, SBFEE, 1); ie = FIELD_DP32(ie, IE, CEFEE, 1); + if (ufs->support_mcq) { + ie = FIELD_DP32(ie, IE, CQEE, 1); + } ufs_wreg(ufs, A_IE, ie); ufs_wreg(ufs, A_UTRIACR, 0); - /* Enable transfer request and task management request */ - cap = ufs_rreg(ufs, A_CAP); - nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1; - nutmrs = FIELD_EX32(cap, CAP, NUTMRS) + 1; + /* Enable transfer request */ ufs->cmd_desc_addr = - guest_alloc(alloc, nutrs * UTP_COMMAND_DESCRIPTOR_SIZE); + guest_alloc(alloc, UFS_MAX_CMD_DESC * UTP_COMMAND_DESCRIPTOR_SIZE); ufs->data_buffer_addr = guest_alloc(alloc, MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); - ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc)); - ufs->utmrlba = guest_alloc(alloc, nutmrs * sizeof(UtpTaskReqDesc)); - ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff); - ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32); - ufs_wreg(ufs, A_UTMRLBA, ufs->utmrlba & 0xffffffff); - ufs_wreg(ufs, A_UTMRLBAU, ufs->utmrlba >> 32); - ufs_wreg(ufs, A_UTRLRSR, 1); - ufs_wreg(ufs, A_UTMRLRSR, 1); + if (ufs->support_mcq) { + uint32_t mcqcap, qid, qcfgptr, mcq_reg_offset; + uint32_t cqattr = 0, sqattr = 0; + + mcqcap = ufs_rreg(ufs, A_MCQCAP); + qcfgptr = FIELD_EX32(mcqcap, MCQCAP, QCFGPTR); + ufs->maxq = FIELD_EX32(mcqcap, MCQCAP, MAXQ) + 1; + for (qid = 0; qid < ufs->maxq; ++qid) { + ufs->sqlba[qid] = + guest_alloc(alloc, QUEUE_SIZE * sizeof(UtpTransferReqDesc)); + ufs->cqlba[qid] = + guest_alloc(alloc, QUEUE_SIZE * sizeof(UtpTransferReqDesc)); + mcq_reg_offset = qcfgptr * 0x200 + qid * 0x40; + + ufs_wreg(ufs, mcq_reg_offset + A_SQLBA, + ufs->sqlba[qid] & 0xffffffff); + ufs_wreg(ufs, mcq_reg_offset + A_SQUBA, ufs->sqlba[qid] >> 32); + ufs_wreg(ufs, mcq_reg_offset + A_CQLBA, + ufs->cqlba[qid] & 0xffffffff); + ufs_wreg(ufs, mcq_reg_offset + A_CQUBA, ufs->cqlba[qid] >> 32); + + /* Enable Completion Queue */ + cqattr = FIELD_DP32(cqattr, CQATTR, CQEN, 1); + cqattr = FIELD_DP32(cqattr, CQATTR, SIZE, + QUEUE_SIZE * sizeof(UtpTransferReqDesc) / + DWORD_BYTE); + ufs_wreg(ufs, mcq_reg_offset + A_CQATTR, cqattr); + + /* Enable Submission Queue */ + sqattr = FIELD_DP32(sqattr, SQATTR, SQEN, 1); + sqattr = FIELD_DP32(sqattr, SQATTR, SIZE, + QUEUE_SIZE * sizeof(UtpTransferReqDesc) / + DWORD_BYTE); + sqattr = FIELD_DP32(sqattr, SQATTR, CQID, qid); + ufs_wreg(ufs, mcq_reg_offset + A_SQATTR, sqattr); + + /* Cache head & tail pointer */ + ufs->sqdao[qid] = ufs_rreg(ufs, mcq_reg_offset + A_SQDAO); + ufs->cqdao[qid] = ufs_rreg(ufs, mcq_reg_offset + A_CQDAO); + } + } else { + nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1; + ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc)); + + ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff); + ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32); + ufs_wreg(ufs, A_UTRLRSR, 1); + } /* Send nop out to test transfer request */ - ufs_send_nop_out(ufs, 0, &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_nop_out(ufs, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); /* Set fDeviceInit flag via query request */ - ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, - UFS_UPIU_QUERY_OPCODE_SET_FLAG, - UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_SET_FLAG, + UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); /* Wait for device to reset */ end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND; do { qtest_clock_step(ufs->dev.bus->qts, 100); - ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, - UFS_UPIU_QUERY_OPCODE_READ_FLAG, - UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu); + ocs = + ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_FLAG, + UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_COMMAND_RESULT_SUCCESS); } while (be32_to_cpu(rsp_upiu.qr.value) != 0 && g_get_monotonic_time() < end_time); g_assert_cmpuint(be32_to_cpu(rsp_upiu.qr.value), ==, 0); @@ -364,8 +502,15 @@ static void ufs_init(QUfs *ufs, QGuestAllocator *alloc) static void ufs_exit(QUfs *ufs, QGuestAllocator *alloc) { if (ufs->enabled) { - guest_free(alloc, ufs->utrlba); - guest_free(alloc, ufs->utmrlba); + if (ufs->support_mcq) { + for (uint32_t qid = 0; qid < ufs->maxq; ++qid) { + guest_free(alloc, ufs->sqlba[qid]); + guest_free(alloc, ufs->cqlba[qid]); + } + } else { + guest_free(alloc, ufs->utrlba); + } + guest_free(alloc, ufs->cmd_desc_addr); guest_free(alloc, ufs->data_buffer_addr); } @@ -428,15 +573,15 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc) const uint8_t request_sense_cdb[UFS_CDB_SIZE] = { REQUEST_SENSE, }; - UtpTransferReqDesc utrd; + enum UtpOcsCodes ocs; UtpUpiuRsp rsp_upiu; ufs_init(ufs, alloc); /* Check REPORT_LUNS */ - ufs_send_scsi_command(ufs, 0, 0, report_luns_cdb, NULL, 0, buf, sizeof(buf), - &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, 0, report_luns_cdb, NULL, 0, buf, + sizeof(buf), &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD); /* LUN LIST LENGTH should be 8, in big endian */ g_assert_cmpuint(buf[3], ==, 8); @@ -444,15 +589,15 @@ static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc) g_assert_cmpuint(buf[9], ==, 0); /* Clear Unit Attention */ - ufs_send_scsi_command(ufs, 0, 0, request_sense_cdb, NULL, 0, buf, - sizeof(buf), &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, 0, request_sense_cdb, NULL, 0, buf, + sizeof(buf), &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION); /* Check TEST_UNIT_READY */ - ufs_send_scsi_command(ufs, 0, 0, test_unit_ready_cdb, NULL, 0, NULL, 0, - &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, 0, test_unit_ready_cdb, NULL, 0, NULL, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD); ufs_exit(ufs, alloc); @@ -494,22 +639,22 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) WRITE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00 }; uint32_t block_size; - UtpTransferReqDesc utrd; + enum UtpOcsCodes ocs; UtpUpiuRsp rsp_upiu; const int test_lun = 1; ufs_init(ufs, alloc); /* Clear Unit Attention */ - ufs_send_scsi_command(ufs, 0, test_lun, request_sense_cdb, NULL, 0, - read_buf, sizeof(read_buf), &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, test_lun, request_sense_cdb, NULL, 0, + read_buf, sizeof(read_buf), &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, CHECK_CONDITION); /* Read capacity */ - ufs_send_scsi_command(ufs, 0, test_lun, read_capacity_cdb, NULL, 0, - read_buf, sizeof(read_buf), &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, test_lun, read_capacity_cdb, NULL, 0, + read_buf, sizeof(read_buf), &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, UFS_COMMAND_RESULT_SUCCESS); block_size = ldl_be_p(&read_buf[8]); @@ -517,16 +662,16 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) /* Write data */ memset(write_buf, 0xab, block_size); - ufs_send_scsi_command(ufs, 0, test_lun, write_cdb, write_buf, block_size, - NULL, 0, &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, test_lun, write_cdb, write_buf, block_size, + NULL, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, UFS_COMMAND_RESULT_SUCCESS); /* Read data and verify */ - ufs_send_scsi_command(ufs, 0, test_lun, read_cdb, NULL, 0, read_buf, - block_size, &utrd, &rsp_upiu); - g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); + ocs = ufs_send_scsi_command(ufs, test_lun, read_cdb, NULL, 0, read_buf, + block_size, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, UFS_COMMAND_RESULT_SUCCESS); g_assert_cmpint(memcmp(read_buf, write_buf, block_size), ==, 0); @@ -534,6 +679,384 @@ static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) ufs_exit(ufs, alloc); } +static void ufstest_query_flag_request(void *obj, void *data, + QGuestAllocator *alloc) +{ + QUfs *ufs = obj; + + enum UtpOcsCodes ocs; + UtpUpiuRsp rsp_upiu; + ufs_init(ufs, alloc); + + /* Read read-only flag */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_FLAG, + UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_FLAG); + g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_FLAG_IDN_FDEVICEINIT); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0)); + + /* Flag Set, Clear, Toggle Test with fDeviceLifeSpanModeEn */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_FLAG, + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_SET_FLAG, + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(1)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG, + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG, + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(1)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG, + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, be32_to_cpu(0)); + + /* Read Write-only Flag (Intended Failure) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_FLAG, + UFS_QUERY_FLAG_IDN_PURGE_ENABLE, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_NOT_READABLE); + + /* Write Read-Only Flag (Intended Failure) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_SET_FLAG, + UFS_QUERY_FLAG_IDN_BUSY_RTC, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_NOT_WRITEABLE); + + ufs_exit(ufs, alloc); +} + +static void ufstest_query_attr_request(void *obj, void *data, + QGuestAllocator *alloc) +{ + QUfs *ufs = obj; + + enum UtpOcsCodes ocs; + UtpUpiuRsp rsp_upiu; + ufs_init(ufs, alloc); + + /* Read Readable Attributes*/ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_BOOT_LU_EN, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_ATTR); + g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_ATTR_IDN_BOOT_LU_EN); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(160)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND, 0, 0, 0, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(60)); + + /* Write Writable Attributes & Read Again */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0x03, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0x07, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x07)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x07)); + + /* Write Invalid Value (Intended Error) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0x10, + &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_INVALID_VALUE); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x03)); + + /* Read Write-Only Attribute (Intended Error) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_NOT_READABLE); + + /* Write Read-Only Attribute (Intended Error) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_POWER_MODE, 0, 0, 0x01, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_NOT_WRITEABLE); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_POWER_MODE, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + /* Reset Written Attributes */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR, + UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_ATTR, + UFS_QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.value, ==, cpu_to_be32(0x00)); + + ufs_exit(ufs, alloc); +} + +static void ufstest_query_desc_request(void *obj, void *data, + QGuestAllocator *alloc) +{ + QUfs *ufs = obj; + + enum UtpOcsCodes ocs; + UtpUpiuRsp rsp_upiu; + ufs_init(ufs, alloc); + + /* Write Descriptor is not supported yet */ + + /* Read Device Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_DEVICE, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.opcode, ==, UFS_UPIU_QUERY_OPCODE_READ_DESC); + g_assert_cmpuint(rsp_upiu.qr.idn, ==, UFS_QUERY_DESC_IDN_DEVICE); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(DeviceDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_DEVICE); + + /* Read Configuration Descriptor is not supported yet*/ + + /* Read Unit Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_UNIT, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(UnitDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT); + g_assert_cmpuint(rsp_upiu.qr.data[2], ==, 0); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_UNIT, 1, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(UnitDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT); + g_assert_cmpuint(rsp_upiu.qr.data[2], ==, 1); + + ocs = + ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, UFS_QUERY_DESC_IDN_UNIT, + UFS_UPIU_RPMB_WLUN, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(RpmbUnitDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_UNIT); + g_assert_cmpuint(rsp_upiu.qr.data[2], ==, UFS_UPIU_RPMB_WLUN); + + /* Read Interconnect Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_INTERCONNECT, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(InterconnectDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_INTERCONNECT); + + /* Read String Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_STRING, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x12); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_STRING, 1, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x22); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_STRING, 4, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, 0x0a); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_STRING); + + /* Read Geometry Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_GEOMETRY, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(GeometryDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_GEOMETRY); + + /* Read Power Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_POWER, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, + sizeof(PowerParametersDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_POWER); + + /* Read Health Descriptor */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_HEALTH, 0, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_SUCCESS); + g_assert_cmpuint(rsp_upiu.header.response, ==, UFS_COMMAND_RESULT_SUCCESS); + g_assert_cmpuint(rsp_upiu.qr.data[0], ==, sizeof(DeviceHealthDescriptor)); + g_assert_cmpuint(rsp_upiu.qr.data[1], ==, UFS_QUERY_DESC_IDN_HEALTH); + + /* Invalid Index (Intended Failure) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_UNIT, 4, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_INVALID_INDEX); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_STRING, 5, 0, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_INVALID_INDEX); + + /* Invalid Selector (Intended Failure) */ + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_DEVICE, 0, 1, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_INVALID_SELECTOR); + + ocs = ufs_send_query(ufs, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, + UFS_UPIU_QUERY_OPCODE_READ_DESC, + UFS_QUERY_DESC_IDN_STRING, 0, 1, 0, &rsp_upiu); + g_assert_cmpuint(ocs, ==, UFS_OCS_INVALID_CMD_TABLE_ATTR); + g_assert_cmpuint(rsp_upiu.header.response, ==, + UFS_QUERY_RESULT_INVALID_SELECTOR); + + ufs_exit(ufs, alloc); +} + static void drive_destroy(void *path) { unlink(path); @@ -575,12 +1098,16 @@ static void ufs_register_nodes(void) QOSGraphEdgeOptions edge_opts = { .before_cmd_line = "-blockdev null-co,node-name=drv0,read-zeroes=on", .after_cmd_line = "-device ufs-lu,bus=ufs0,drive=drv0,lun=0", - .extra_device_opts = "addr=04.0,id=ufs0,nutrs=32,nutmrs=8" + .extra_device_opts = "addr=04.0,id=ufs0" }; - QOSGraphTestOptions io_test_opts = { - .before = ufs_blk_test_setup, - }; + QOSGraphTestOptions io_test_opts = { .before = ufs_blk_test_setup, + .edge.extra_device_opts = + "mcq=false,nutrs=32,nutmrs=8" }; + + QOSGraphTestOptions mcq_test_opts = { .before = ufs_blk_test_setup, + .edge.extra_device_opts = + "mcq=true,mcq-maxq=1" }; add_qpci_address(&edge_opts, &(QPCIAddress){ .devfn = QPCI_DEVFN(4, 0) }); @@ -600,7 +1127,14 @@ static void ufs_register_nodes(void) return; } qos_add_test("init", "ufs", ufstest_init, NULL); - qos_add_test("read-write", "ufs", ufstest_read_write, &io_test_opts); + qos_add_test("legacy-read-write", "ufs", ufstest_read_write, &io_test_opts); + qos_add_test("mcq-read-write", "ufs", ufstest_read_write, &mcq_test_opts); + qos_add_test("query-flag", "ufs", ufstest_query_flag_request, + &io_test_opts); + qos_add_test("query-attribute", "ufs", ufstest_query_attr_request, + &io_test_opts); + qos_add_test("query-desciptor", "ufs", ufstest_query_desc_request, + &io_test_opts); } libqos_init(ufs_register_nodes); diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index d6075001e7c..75cb3e44b21 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -12,7 +12,7 @@ #include "libqtest-single.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/range.h" @@ -20,7 +20,7 @@ #include "chardev/char-fe.h" #include "qemu/memfd.h" #include "qemu/module.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "libqos/libqos.h" #include "libqos/pci-pc.h" #include "libqos/virtio-pci.h" @@ -920,7 +920,7 @@ static void wait_for_rings_started(TestServer *s, size_t count) static inline void test_server_connect(TestServer *server) { - test_server_create_chr(server, ",reconnect=1"); + test_server_create_chr(server, ",reconnect-ms=1000"); } static gboolean @@ -1043,7 +1043,8 @@ static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc) static uint64_t vu_net_get_features(TestServer *s) { - uint64_t features = 0x1ULL << VHOST_F_LOG_ALL | + uint64_t features = 0x1ULL << VIRTIO_F_VERSION_1 | + 0x1ULL << VHOST_F_LOG_ALL | 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; if (s->queues > 1) { diff --git a/tests/qtest/virtio-9p-test.c b/tests/qtest/virtio-9p-test.c index 3c8cd235cf0..ac38ccf5951 100644 --- a/tests/qtest/virtio-9p-test.c +++ b/tests/qtest/virtio-9p-test.c @@ -20,6 +20,7 @@ #define tversion(...) v9fs_tversion((TVersionOpt) __VA_ARGS__) #define tattach(...) v9fs_tattach((TAttachOpt) __VA_ARGS__) #define tgetattr(...) v9fs_tgetattr((TGetAttrOpt) __VA_ARGS__) +#define tsetattr(...) v9fs_tsetattr((TSetAttrOpt) __VA_ARGS__) #define treaddir(...) v9fs_treaddir((TReadDirOpt) __VA_ARGS__) #define tlopen(...) v9fs_tlopen((TLOpenOpt) __VA_ARGS__) #define twrite(...) v9fs_twrite((TWriteOpt) __VA_ARGS__) @@ -693,6 +694,64 @@ static void fs_unlinkat_hardlink(void *obj, void *data, g_assert(stat(real_file, &st_real) == 0); } +static void fs_use_after_unlink(void *obj, void *data, + QGuestAllocator *t_alloc) +{ + QVirtio9P *v9p = obj; + v9fs_set_allocator(t_alloc); + static const uint32_t write_count = P9_MAX_SIZE / 2; + g_autofree char *real_file = virtio_9p_test_path("09/doa_file"); + g_autofree char *buf = g_malloc0(write_count); + struct stat st_file; + struct v9fs_attr attr; + uint32_t fid_file; + uint32_t count; + + tattach({ .client = v9p }); + + /* create a file "09/doa_file" and make sure it exists and is regular */ + tmkdir({ .client = v9p, .atPath = "/", .name = "09" }); + tlcreate({ .client = v9p, .atPath = "09", .name = "doa_file" }); + g_assert(stat(real_file, &st_file) == 0); + g_assert((st_file.st_mode & S_IFMT) == S_IFREG); + + /* request a FID for that regular file that we can work with next */ + fid_file = twalk({ + .client = v9p, .fid = 0, .path = "09/doa_file" + }).newfid; + g_assert(fid_file != 0); + + /* now first open the file in write mode before ... */ + tlopen({ .client = v9p, .fid = fid_file, .flags = O_WRONLY }); + /* ... removing the file from file system */ + tunlinkat({ .client = v9p, .atPath = "09", .name = "doa_file" }); + + /* file is removed, but we still have it open, so this should succeed */ + tgetattr({ + .client = v9p, .fid = fid_file, .request_mask = P9_GETATTR_BASIC, + .rgetattr.attr = &attr + }); + count = twrite({ + .client = v9p, .fid = fid_file, .offset = 0, .count = write_count, + .data = buf + }).count; + g_assert_cmpint(count, ==, write_count); + + /* truncate file to (arbitrarily chosen) size 2001 */ + tsetattr({ + .client = v9p, .fid = fid_file, .attr = (v9fs_attr) { + .valid = P9_SETATTR_SIZE, + .size = 2001 + } + }); + /* truncate apparently succeeded, let's double-check the size */ + tgetattr({ + .client = v9p, .fid = fid_file, .request_mask = P9_GETATTR_BASIC, + .rgetattr.attr = &attr + }); + g_assert_cmpint(attr.size, ==, 2001); +} + static void cleanup_9p_local_driver(void *data) { /* remove previously created test dir when test is completed */ @@ -758,6 +817,8 @@ static void register_virtio_9p_test(void) qos_add_test("local/hardlink_file", "virtio-9p", fs_hardlink_file, &opts); qos_add_test("local/unlinkat_hardlink", "virtio-9p", fs_unlinkat_hardlink, &opts); + qos_add_test("local/use_after_unlink", "virtio-9p", fs_use_after_unlink, + &opts); } libqos_init(register_virtio_9p_test); diff --git a/tests/qtest/virtio-balloon-test.c b/tests/qtest/virtio-balloon-test.c new file mode 100644 index 00000000000..ecdd363b06c --- /dev/null +++ b/tests/qtest/virtio-balloon-test.c @@ -0,0 +1,57 @@ +/* + * QTest test cases for virtio balloon device + * + * Copyright (c) 2024 Gao Shiyuan + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest.h" +#include "standard-headers/linux/virtio_balloon.h" + +/* + * https://gitlab.com/qemu-project/qemu/-/issues/2576 + * Used to trigger: + * virtio_address_space_lookup: Assertion `mrs.mr' failed. + */ +static void oss_fuzz_71649(void) +{ + QTestState *s = qtest_init("-device virtio-balloon -machine q35" + " -nodefaults"); + + qtest_outl(s, 0xcf8, 0x80000890); + qtest_outl(s, 0xcfc, 0x2); + qtest_outl(s, 0xcf8, 0x80000891); + qtest_inl(s, 0xcfc); + qtest_quit(s); +} + +static void query_stats(void) +{ + QTestState *s = qtest_init("-device virtio-balloon,id=balloon" + " -nodefaults"); + QDict *ret = qtest_qmp_assert_success_ref( + s, + "{ 'execute': 'qom-get', 'arguments': " \ + "{ 'path': '/machine/peripheral/balloon', " \ + " 'property': 'guest-stats' } }"); + QDict *stats = qdict_get_qdict(ret, "stats"); + + /* We expect 1 entry in the dict for each known kernel stat */ + assert(qdict_size(stats) == VIRTIO_BALLOON_S_NR); + + qobject_unref(ret); + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("virtio-balloon/oss_fuzz_71649", oss_fuzz_71649); + qtest_add_func("virtio-balloon/query-stats", query_stats); + + return g_test_run(); +} + diff --git a/tests/qtest/virtio-iommu-test.c b/tests/qtest/virtio-iommu-test.c index afb225971d2..98ffa27912c 100644 --- a/tests/qtest/virtio-iommu-test.c +++ b/tests/qtest/virtio-iommu-test.c @@ -105,7 +105,7 @@ static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu, QVirtQueue *vq = v_iommu->vq; uint64_t ro_addr, wr_addr; uint32_t free_head; - struct virtio_iommu_req_map req; + struct virtio_iommu_req_map req = {}; size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); size_t wr_size = sizeof(struct virtio_iommu_req_tail); struct virtio_iommu_req_tail buffer; @@ -147,7 +147,7 @@ static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu, QVirtQueue *vq = v_iommu->vq; uint64_t ro_addr, wr_addr; uint32_t free_head; - struct virtio_iommu_req_unmap req; + struct virtio_iommu_req_unmap req = {}; size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); size_t wr_size = sizeof(struct virtio_iommu_req_tail); struct virtio_iommu_req_tail buffer; diff --git a/tests/qtest/virtio-net-failover.c b/tests/qtest/virtio-net-failover.c index 73dfabc2728..5baf81c3e6d 100644 --- a/tests/qtest/virtio-net-failover.c +++ b/tests/qtest/virtio-net-failover.c @@ -11,10 +11,11 @@ #include "libqtest.h" #include "libqos/pci.h" #include "libqos/pci-pc.h" -#include "migration-helpers.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qjson.h" +#include "migration/migration-qmp.h" +#include "migration/migration-util.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qjson.h" #include "libqos/malloc-pc.h" #include "libqos/virtio-pci.h" #include "hw/pci/pci.h" @@ -772,7 +773,7 @@ static void test_migrate_in(gconstpointer opaque) check_one_card(qts, true, "standby0", MAC_STANDBY0); check_one_card(qts, false, "primary0", MAC_PRIMARY0); - migrate_incoming_qmp(qts, uri, "{}"); + migrate_incoming_qmp(qts, uri, NULL, "{}"); resp = get_failover_negociated_event(qts); g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0"); @@ -894,7 +895,7 @@ static void test_off_migrate_in(gconstpointer opaque) check_one_card(qts, true, "standby0", MAC_STANDBY0); check_one_card(qts, true, "primary0", MAC_PRIMARY0); - migrate_incoming_qmp(qts, uri, "{}"); + migrate_incoming_qmp(qts, uri, NULL, "{}"); check_one_card(qts, true, "standby0", MAC_STANDBY0); check_one_card(qts, true, "primary0", MAC_PRIMARY0); @@ -1021,7 +1022,7 @@ static void test_guest_off_migrate_in(gconstpointer opaque) check_one_card(qts, true, "standby0", MAC_STANDBY0); check_one_card(qts, false, "primary0", MAC_PRIMARY0); - migrate_incoming_qmp(qts, uri, "{}"); + migrate_incoming_qmp(qts, uri, NULL, "{}"); check_one_card(qts, true, "standby0", MAC_STANDBY0); check_one_card(qts, false, "primary0", MAC_PRIMARY0); @@ -1746,7 +1747,7 @@ static void test_multi_in(gconstpointer opaque) check_one_card(qts, true, "standby1", MAC_STANDBY1); check_one_card(qts, false, "primary1", MAC_PRIMARY1); - migrate_incoming_qmp(qts, uri, "{}"); + migrate_incoming_qmp(qts, uri, NULL, "{}"); resp = get_failover_negociated_event(qts); g_assert_cmpstr(qdict_get_str(resp, "device-id"), ==, "standby0"); diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c index 2df75c9780c..60e5229a3de 100644 --- a/tests/qtest/virtio-net-test.c +++ b/tests/qtest/virtio-net-test.c @@ -11,7 +11,7 @@ #include "libqtest-single.h" #include "qemu/iov.h" #include "qemu/module.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "hw/virtio/virtio-net.h" #include "libqos/qgraph.h" #include "libqos/virtio-net.h" diff --git a/tests/qtest/vmcoreinfo-test.c b/tests/qtest/vmcoreinfo-test.c new file mode 100644 index 00000000000..dcf3b5ae058 --- /dev/null +++ b/tests/qtest/vmcoreinfo-test.c @@ -0,0 +1,90 @@ +/* + * qtest vmcoreinfo test case + * + * Copyright Red Hat. 2025. + * + * Authors: + * Ani Sinha + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "libqos/libqos-pc.h" +#include "libqtest.h" +#include "standard-headers/linux/qemu_fw_cfg.h" +#include "libqos/fw_cfg.h" +#include "qemu/bswap.h" +#include "hw/misc/vmcoreinfo.h" + +static void test_vmcoreinfo_write_basic(void) +{ + QFWCFG *fw_cfg; + QOSState *qs; + FWCfgVMCoreInfo info; + size_t filesize; + uint16_t guest_format; + uint16_t host_format; + uint32_t size; + uint64_t paddr; + + qs = qtest_pc_boot("-device vmcoreinfo"); + fw_cfg = pc_fw_cfg_init(qs->qts); + + memset(&info, 0 , sizeof(info)); + /* read vmcoreinfo and read back the host format */ + filesize = qfw_cfg_read_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME, + &info, sizeof(info)); + g_assert_cmpint(filesize, ==, sizeof(info)); + + host_format = le16_to_cpu(info.host_format); + g_assert_cmpint(host_format, ==, FW_CFG_VMCOREINFO_FORMAT_ELF); + + memset(&info, 0 , sizeof(info)); + info.guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF); + info.size = cpu_to_le32(1 * MiB); + info.paddr = cpu_to_le64(0xffffff00); + info.host_format = cpu_to_le16(host_format); + + /* write the values to the host */ + filesize = qfw_cfg_write_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME, + &info, sizeof(info)); + g_assert_cmpint(filesize, ==, sizeof(info)); + + memset(&info, 0 , sizeof(info)); + + /* now read back the values we wrote and compare that they are the same */ + filesize = qfw_cfg_read_file(fw_cfg, qs, FW_CFG_VMCOREINFO_FILENAME, + &info, sizeof(info)); + g_assert_cmpint(filesize, ==, sizeof(info)); + + size = le32_to_cpu(info.size); + paddr = le64_to_cpu(info.paddr); + guest_format = le16_to_cpu(info.guest_format); + + g_assert_cmpint(size, ==, 1 * MiB); + g_assert_cmpint(paddr, ==, 0xffffff00); + g_assert_cmpint(guest_format, ==, FW_CFG_VMCOREINFO_FORMAT_ELF); + + pc_fw_cfg_uninit(fw_cfg); + qtest_shutdown(qs); +} + +int main(int argc, char **argv) +{ + const char *arch = qtest_get_arch(); + + g_test_init(&argc, &argv, NULL); + + if (strcmp(arch, "i386") && strcmp(arch, "x86_64")) { + /* skip for non-x86 */ + exit(EXIT_SUCCESS); + } + + qtest_add_func("vmcoreinfo/basic-write", + test_vmcoreinfo_write_basic); + + return g_test_run(); +} diff --git a/tests/qtest/vmgenid-test.c b/tests/qtest/vmgenid-test.c index 29fee9e7c00..33e96b7c559 100644 --- a/tests/qtest/vmgenid-test.c +++ b/tests/qtest/vmgenid-test.c @@ -15,7 +15,7 @@ #include "boot-sector.h" #include "acpi-utils.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #define VGID_GUID "324e6eaf-d1d1-4bf6-bf41-b9bb6c91fb87" #define VMGENID_GUID_OFFSET 40 /* allow space for @@ -61,7 +61,7 @@ static uint32_t acpi_find_vgia(QTestState *qts) /* The GUID is written at a fixed offset into the fw_cfg file * in order to implement the "OVMF SDT Header probe suppressor" - * see docs/specs/vmgenid.txt for more details + * see docs/specs/vmgenid.rst for more details */ guid_offset = le32_to_cpu(vgia_val) + VMGENID_GUID_OFFSET; g_free(table_aml); diff --git a/tests/qtest/wdt_ib700-test.c b/tests/qtest/wdt_ib700-test.c index 797288d939f..17547571629 100644 --- a/tests/qtest/wdt_ib700-test.c +++ b/tests/qtest/wdt_ib700-test.c @@ -9,7 +9,7 @@ #include "qemu/osdep.h" #include "libqtest.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/timer.h" static void qmp_check_no_event(QTestState *s) diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 452a2cde65e..af72903f898 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -90,6 +90,7 @@ CFLAGS= LDFLAGS= QEMU_OPTS= +CHECK_PLUGIN_OUTPUT_COMMAND= # If TCG debugging, or TCI is enabled things are a lot slower @@ -102,9 +103,14 @@ ifeq ($(filter %-softmmu, $(TARGET)),) # then the target. If there are common tests shared between # sub-targets (e.g. ARM & AArch64) then it is up to # $(TARGET_NAME)/Makefile.target to include the common parent -# architecture in its VPATH. +# architecture in its VPATH. However some targets are so minimal we +# can't even build the multiarch tests. +ifneq ($(filter $(TARGET_NAME),aarch64_be),) +-include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.target +else -include $(SRC_PATH)/tests/tcg/multiarch/Makefile.target -include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.target +endif # Add the common build options CFLAGS+=-Wall -Werror -O0 -g -fno-strict-aliasing @@ -121,8 +127,14 @@ else # build options for bare programs are usually pretty different. They # are expected to provide their own build recipes. EXTRA_CFLAGS += -ffreestanding -fno-stack-protector + +# We skip the multiarch tests if the target hasn't provided a boot.S +MULTIARCH_SOFTMMU_TARGETS = i386 alpha aarch64 arm loongarch64 s390x x86_64 + +ifneq ($(filter $(TARGET_NAME),$(MULTIARCH_SOFTMMU_TARGETS)),) -include $(SRC_PATH)/tests/tcg/minilib/Makefile.target -include $(SRC_PATH)/tests/tcg/multiarch/system/Makefile.softmmu-target +endif -include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.softmmu-target endif @@ -145,26 +157,62 @@ ifeq ($(CONFIG_PLUGIN),y) PLUGIN_SRC=$(SRC_PATH)/tests/tcg/plugins PLUGIN_LIB=../plugins VPATH+=$(PLUGIN_LIB) -PLUGINS=$(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c))) +# Some plugins need to be disabled for all tests to avoid exponential explosion. +# For example, libpatch.so only needs to run against the arch-specific patch +# target test, so we explicitly run it in the arch-specific Makefile. +DISABLE_PLUGINS=libpatch.so + +# Likewise don't bother with the syscall plugin for softmmu +ifneq ($(filter %-softmmu, $(TARGET)),) +DISABLE_PLUGINS += libsyscall.so +endif + +PLUGINS=$(filter-out $(DISABLE_PLUGINS), \ + $(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c)))) + +strip-plugin = $(wordlist 1, 1, $(subst -with-, ,$1)) +extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) +extract-test = $(subst run-plugin-,,$(wordlist 1, 1, $(subst -with-, ,$1))) # We need to ensure expand the run-plugin-TEST-with-PLUGIN # pre-requistes manually here as we can't use stems to handle it. We # only expand MULTIARCH_TESTS which are common on most of our targets -# to avoid an exponential explosion as new tests are added. We also -# add some special helpers the run-plugin- rules can use below. +# and rotate the plugins so we don't grow too out of control as new +# tests are added. Plugins that need to run with a specific test +# should ensure they add their combination to EXTRA_RUNS. ifneq ($(MULTIARCH_TESTS),) -$(foreach p,$(PLUGINS), \ - $(foreach t,$(MULTIARCH_TESTS),\ - $(eval run-plugin-$(t)-with-$(p): $t $p) \ - $(eval RUN_TESTS+=run-plugin-$(t)-with-$(p)))) + +# Extract extra tests from the extra test+plugin combination. +EXTRA_TESTS_WITH_PLUGIN=$(foreach test, \ + $(EXTRA_RUNS_WITH_PLUGIN),$(call extract-test,$(test))) +# Exclude tests that were specified to run with specific plugins from the tests +# which can run with any plugin combination, so we don't run it twice. +MULTIARCH_TESTS:=$(filter-out $(EXTRA_TESTS_WITH_PLUGIN), $(MULTIARCH_TESTS)) + +NUM_PLUGINS := $(words $(PLUGINS)) +NUM_TESTS := $(words $(MULTIARCH_TESTS)) + +define mod_plus_one + $(shell $(PYTHON) -c "print( ($(1) % $(2)) + 1 )") +endef + +# Rules for running tests with any plugin combination, i.e., no specific plugin. +$(foreach _idx, $(shell seq 1 $(NUM_TESTS)), \ + $(eval _test := $(word $(_idx), $(MULTIARCH_TESTS))) \ + $(eval _plugin := $(word $(call mod_plus_one, $(_idx), $(NUM_PLUGINS)), $(PLUGINS))) \ + $(eval run-plugin-$(_test)-with-$(_plugin): $(_test) $(_plugin)) \ + $(eval RUN_TESTS+=run-plugin-$(_test)-with-$(_plugin))) + +# Rules for running extra tests with specific plugins. +$(foreach f,$(EXTRA_RUNS_WITH_PLUGIN), \ + $(eval $(f): $(call extract-test,$(f)) $(call extract-plugin,$(f)))) + endif # MULTIARCH_TESTS endif # CONFIG_PLUGIN -strip-plugin = $(wordlist 1, 1, $(subst -with-, ,$1)) -extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) - RUN_TESTS+=$(EXTRA_RUNS) +RUN_TESTS+=$(EXTRA_RUNS_WITH_PLUGIN) # Some plugins need additional arguments above the default to fully # exercise things. We can define them on a per-test basis here. @@ -172,13 +220,17 @@ run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true ifeq ($(filter %-softmmu, $(TARGET)),) run-%: % - $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<) + $(call run-test, $<, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) $<) run-plugin-%: - $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ + $(call run-test, $@, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@)$(PLUGIN_ARGS) \ -d plugin -D $*.pout \ $(call strip-plugin,$<)) + $(if $(CHECK_PLUGIN_OUTPUT_COMMAND), \ + $(call quiet-command, $(CHECK_PLUGIN_OUTPUT_COMMAND) $*.pout, \ + TEST, check plugin $(call extract-plugin,$@) output \ + with $(call strip-plugin,$<))) else run-%: % $(call run-test, $<, \ @@ -193,6 +245,10 @@ run-plugin-%: -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@)$(PLUGIN_ARGS) \ -d plugin -D $*.pout \ $(QEMU_OPTS) $(call strip-plugin,$<)) + $(if $(CHECK_PLUGIN_OUTPUT_COMMAND), \ + $(call quiet-command, $(CHECK_PLUGIN_OUTPUT_COMMAND) $*.pout, \ + TEST, check plugin $(call extract-plugin,$@) output \ + with $(call strip-plugin,$<))) endif gdb-%: % diff --git a/tests/tcg/aarch64/Makefile.softmmu-target b/tests/tcg/aarch64/Makefile.softmmu-target index 139e04d15f0..f7a7d2b800f 100644 --- a/tests/tcg/aarch64/Makefile.softmmu-target +++ b/tests/tcg/aarch64/Makefile.softmmu-target @@ -2,14 +2,22 @@ # Aarch64 system tests # -AARCH64_SYSTEM_SRC=$(SRC_PATH)/tests/tcg/aarch64/system +AARCH64_SRC=$(SRC_PATH)/tests/tcg/aarch64 +AARCH64_SYSTEM_SRC=$(AARCH64_SRC)/system + VPATH+=$(AARCH64_SYSTEM_SRC) # These objects provide the basic boot code and helper functions for all tests CRT_OBJS=boot.o -AARCH64_TEST_SRCS=$(wildcard $(AARCH64_SYSTEM_SRC)/*.c) -AARCH64_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.c, %, $(AARCH64_TEST_SRCS)) +AARCH64_TEST_C_SRCS=$(wildcard $(AARCH64_SYSTEM_SRC)/*.c) +AARCH64_TEST_S_SRCS=$(AARCH64_SYSTEM_SRC)/mte.S + +AARCH64_C_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.c, %, $(AARCH64_TEST_C_SRCS)) +AARCH64_S_TESTS = $(patsubst $(AARCH64_SYSTEM_SRC)/%.S, %, $(AARCH64_TEST_S_SRCS)) + +AARCH64_TESTS = $(AARCH64_C_TESTS) +AARCH64_TESTS += $(AARCH64_S_TESTS) CRT_PATH=$(AARCH64_SYSTEM_SRC) LINK_SCRIPT=$(AARCH64_SYSTEM_SRC)/kernel.ld @@ -21,7 +29,8 @@ LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc config-cc.mak: Makefile $(quiet-@)( \ - $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3)) 3> config-cc.mak + $(call cc-option,-march=armv8.3-a, CROSS_CC_HAS_ARMV8_3); \ + $(call cc-option,-march=armv8.5-a+memtag, CROSS_CC_HAS_ARMV8_MTE)) 3> config-cc.mak -include config-cc.mak # building head blobs @@ -59,7 +68,8 @@ run-plugin-semiconsole-with-%: semiconsole # vtimer test needs EL2 QEMU_EL2_MACHINE=-machine virt,virtualization=on,gic-version=2 -cpu cortex-a57 -smp 4 -run-vtimer: QEMU_OPTS=$(QEMU_EL2_MACHINE) $(QEMU_BASE_ARGS) -kernel +QEMU_EL2_BASE_ARGS=-semihosting-config enable=on,target=native,chardev=output,arg="2" +run-vtimer: QEMU_OPTS=$(QEMU_EL2_MACHINE) $(QEMU_EL2_BASE_ARGS) -kernel # Simple Record/Replay Test .PHONY: memory-record @@ -82,9 +92,44 @@ EXTRA_RUNS+=run-memory-replay ifneq ($(CROSS_CC_HAS_ARMV8_3),) pauth-3: CFLAGS += $(CROSS_CC_HAS_ARMV8_3) +# This test explicitly checks the output of the pauth operation so we +# must force the use of the QARMA5 algorithm for it. +run-pauth-3: QEMU_BASE_MACHINE=-M virt -cpu max,pauth-qarma5=on -display none else pauth-3: $(call skip-test, "BUILD of $@", "missing compiler support") run-pauth-3: $(call skip-test, "RUN of pauth-3", "not built") endif + +ifneq ($(CROSS_CC_HAS_ARMV8_MTE),) +QEMU_MTE_ENABLED_MACHINE=-M virt,mte=on -cpu max -display none +QEMU_OPTS_WITH_MTE_ON = $(QEMU_MTE_ENABLED_MACHINE) $(QEMU_BASE_ARGS) -kernel +mte: CFLAGS+=-march=armv8.5-a+memtag +mte: mte.S $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS) + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) + +run-mte: QEMU_OPTS=$(QEMU_OPTS_WITH_MTE_ON) +run-mte: mte + +ifeq ($(GDB_SUPPORTS_MTE_IN_BAREMETAL),y) +run-gdbstub-mte: QEMU_OPTS=$(QEMU_OPTS_WITH_MTE_ON) +run-gdbstub-mte: mte + $(call run-test, $@, $(GDB_SCRIPT) \ + --output run-gdbstub-mte.out \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "-chardev null$(COMMA)id=output $(QEMU_OPTS)" \ + --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py -- --mode=system, \ + gdbstub MTE support) + +EXTRA_RUNS += run-gdbstub-mte +else # !GDB_SUPPORTS_MTE_IN_BAREMETAL +run-gdbstub-mte: + $(call skip-test "RUN of gdbstub-mte", "GDB does not support MTE in baremetal!") +endif +else # !CROSS_CC_HAS_ARMV8_MTE +mte: + $(call skip-test, "BUILD of $@", "missing compiler support") +run-mte: + $(call skip-test, "RUN of mte", "not build") +endif diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index 8cc62eb4561..16ddcf4f883 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -83,7 +83,8 @@ test-aes: CFLAGS += -O -march=armv8-a+aes test-aes: test-aes-main.c.inc # Vector SHA1 -sha1-vector: CFLAGS=-O3 +# Work around compiler false-positive warning, as we do for the 'sha1' test +sha1-vector: CFLAGS=-O3 -Wno-stringop-overread sha1-vector: sha1.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-sha1-vector: sha1-vector run-sha1 @@ -138,7 +139,8 @@ run-gdbstub-mte: mte-8 $(call run-test, $@, $(GDB_SCRIPT) \ --gdb $(GDB) \ --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ - --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py, \ + --bin $< --test $(AARCH64_SRC)/gdbstub/test-mte.py \ + -- --mode=user, \ gdbstub MTE support) EXTRA_RUNS += run-gdbstub-mte diff --git a/tests/tcg/aarch64/gdbstub/test-mte.py b/tests/tcg/aarch64/gdbstub/test-mte.py index 66f9c25f8a4..9ad98e7a54c 100644 --- a/tests/tcg/aarch64/gdbstub/test-mte.py +++ b/tests/tcg/aarch64/gdbstub/test-mte.py @@ -1,34 +1,59 @@ from __future__ import print_function # # Test GDB memory-tag commands that exercise the stubs for the qIsAddressTagged, -# qMemTag, and QMemTag packets. Logical tag-only commands rely on local -# operations, hence don't exercise any stub. +# qMemTag, and QMemTag packets, which are used for manipulating allocation tags. +# Logical tags-related commands rely on local operations, hence don't exercise +# any stub and so are not used in this test. # -# The test consists in breaking just after a atag() call (which sets the -# allocation tag -- see mte-8.c for details) and setting/getting tags in -# different memory locations and ranges starting at the address of the array -# 'a'. +# The test consists in breaking just after a tag is set in a specific memory +# chunk, and then using the GDB 'memory-tagging' subcommands to set/get tags in +# different memory locations and ranges in the MTE-enabled memory chunk. # # This is launched via tests/guest-debug/run-test.py # -import gdb +try: + import gdb +except ModuleNotFoundError: + from sys import exit + exit("This script must be launched via tests/guest-debug/run-test.py!") import re -from test_gdbstub import main, report +from sys import argv +from test_gdbstub import arg_parser, main, report -PATTERN_0 = "Memory tags for address 0x[0-9a-f]+ match \\(0x[0-9a-f]+\\)." -PATTERN_1 = ".*(0x[0-9a-f]+)" +PATTERN_0 = r"Memory tags for address 0x[0-9a-f]+ match \(0x[0-9a-f]+\)." +PATTERN_1 = r".*(0x[0-9a-f]+)" def run_test(): - gdb.execute("break 95", False, True) + p = arg_parser(prog="test-mte.py", description="TCG MTE tests.") + p.add_argument("--mode", help="Run test for QEMU system or user mode.", + required=True, choices=['system','user']) + + args = p.parse_args(args=argv) + + if args.mode == "system": + # Break address: where to break before performing the tests + # See mte.S for details about this label. + ba = "main_end" + # Tagged address: the start of the MTE-enabled memory chunk to be tested + # 'tagged_addr' (x1) is a pointer to the MTE-enabled page. See mte.S. + ta = "$x1" + else: # mode="user" + # Line 95 in mte-8.c + ba = "95" + # 'a' array. See mte-8.c + ta = "a" + + gdb.execute(f"break {ba}", False, True) gdb.execute("continue", False, True) + try: - # Test if we can check correctly that the allocation tag for - # array 'a' matches the logical tag after atag() is called. - co = gdb.execute("memory-tag check a", False, True) + # Test if we can check correctly that the allocation tag for the address + # in {ta} matches the logical tag in {ta}. + co = gdb.execute(f"memory-tag check {ta}", False, True) tags_match = re.findall(PATTERN_0, co, re.MULTILINE) if tags_match: report(True, f"{tags_match[0]}") @@ -39,20 +64,20 @@ def run_test(): # tags rely on local operation and so don't exercise any stub. # Set the allocation tag for the first granule (16 bytes) of - # address starting at 'a' address to a known value, i.e. 0x04. - gdb.execute("memory-tag set-allocation-tag a 1 04", False, True) + # address starting at {ta} address to a known value, i.e. 0x04. + gdb.execute(f"memory-tag set-allocation-tag {ta} 1 04", False, True) # Then set the allocation tag for the second granule to a known # value, i.e. 0x06. This tests that contiguous tag granules are - # set correct and don't run over each other. - gdb.execute("memory-tag set-allocation-tag a+16 1 06", False, True) + # set correctly and don't run over each other. + gdb.execute(f"memory-tag set-allocation-tag {ta}+16 1 06", False, True) # Read the known values back and check if they remain the same. - co = gdb.execute("memory-tag print-allocation-tag a", False, True) + co = gdb.execute(f"memory-tag print-allocation-tag {ta}", False, True) first_tag = re.match(PATTERN_1, co)[1] - co = gdb.execute("memory-tag print-allocation-tag a+16", False, True) + co = gdb.execute(f"memory-tag print-allocation-tag {ta}+16", False, True) second_tag = re.match(PATTERN_1, co)[1] if first_tag == "0x4" and second_tag == "0x6": @@ -61,15 +86,15 @@ def run_test(): report(False, "Can't set/print allocation tags!") # Now test fill pattern by setting a whole page with a pattern. - gdb.execute("memory-tag set-allocation-tag a 4096 0a0b", False, True) + gdb.execute(f"memory-tag set-allocation-tag {ta} 4096 0a0b", False, True) # And read back the tags of the last two granules in page so # we also test if the pattern is set correctly up to the end of # the page. - co = gdb.execute("memory-tag print-allocation-tag a+4096-32", False, True) + co = gdb.execute(f"memory-tag print-allocation-tag {ta}+4096-32", False, True) tag = re.match(PATTERN_1, co)[1] - co = gdb.execute("memory-tag print-allocation-tag a+4096-16", False, True) + co = gdb.execute(f"memory-tag print-allocation-tag {ta}+4096-16", False, True) last_tag = re.match(PATTERN_1, co)[1] if tag == "0xa" and last_tag == "0xb": @@ -78,8 +103,8 @@ def run_test(): report(False, "Fill pattern failed!") except gdb.error: - # This usually happens because a GDB version that does not - # support memory tagging was used to run the test. + # This usually happens because a GDB version that does not support + # memory tagging was used to run the test. report(False, "'memory-tag' command failed!") diff --git a/tests/tcg/aarch64/system/boot.S b/tests/tcg/aarch64/system/boot.S index 501685d0ece..8bfa4e4efc7 100644 --- a/tests/tcg/aarch64/system/boot.S +++ b/tests/tcg/aarch64/system/boot.S @@ -16,6 +16,7 @@ #define semihosting_call hlt 0xf000 #define SYS_WRITEC 0x03 /* character to debug channel */ #define SYS_WRITE0 0x04 /* string to debug channel */ +#define SYS_GET_CMDLINE 0x15 /* get command line */ #define SYS_EXIT 0x18 .align 12 @@ -70,22 +71,172 @@ lower_a32_sync: lower_a32_irq: lower_a32_fiq: lower_a32_serror: + adr x1, .unexp_excp +exit_msg: mov x0, SYS_WRITE0 - adr x1, .error - semihosting_call - mov x0, SYS_EXIT - mov x1, 1 semihosting_call + mov x0, 1 /* EXIT_FAILURE */ + bl _exit /* never returns */ .section .rodata -.error: - .string "Terminated by exception.\n" +.unexp_excp: + .string "Unexpected exception.\n" +.high_el_msg: + .string "Started in lower EL than requested.\n" +.unexp_el0: + .string "Started in invalid EL.\n" + + .align 8 +.get_cmd: + .quad cmdline + .quad 128 .text .align 4 .global __start __start: + /* + * Initialise the stack for whatever EL we are in before + * anything else, we need it to be able to _exit cleanly. + * It's smaller than the stack we pass to the C code but we + * don't need much. + */ + adrp x0, system_stack_end + add x0, x0, :lo12:system_stack_end + mov sp, x0 + + /* + * The test can set the semihosting command line to the target + * EL needed for the test. However if no semihosting args are set we will + * end up with -kernel/-append data (see semihosting_arg_fallback). + * Keep the normalised target in w11. + */ + mov x0, SYS_GET_CMDLINE + adr x1, .get_cmd + semihosting_call + adrp x10, cmdline + add x10, x10, :lo12:cmdline + ldrb w11, [x10] + + /* sanity check, normalise char to EL, clamp to 1 if outside range */ + subs w11, w11, #'0' + b.lt el_default + cmp w11, #3 + b.gt el_default + b 1f + +el_high: + adr x1, .high_el_msg + b exit_msg + +el_default: + mov w11, #1 + +1: + /* Determine current Exception Level */ + mrs x0, CurrentEL + lsr x0, x0, #2 /* CurrentEL[3:2] contains the current EL */ + + /* Are we already in a lower EL than we want? */ + cmp w11, w0 + bgt el_high + + /* Branch based on current EL */ + cmp x0, #3 + b.eq setup_el3 + cmp x0, #2 + b.eq setup_el2 + cmp x0, #1 + b.eq at_testel /* Already at EL1, skip transition */ + + /* Should not be at EL0 - error out */ + adr x1, .unexp_el0 + b exit_msg + +setup_el3: + /* Ensure we trap if we get anything wrong */ + adr x0, vector_table + msr vbar_el3, x0 + + /* Does the test want to be at EL3? */ + cmp w11, #3 + beq at_testel + + /* Configure EL3 to for lower states (EL2 or EL1) */ + mrs x0, scr_el3 + orr x0, x0, #(1 << 10) /* RW = 1: EL2/EL1 execution state is AArch64 */ + orr x0, x0, #(1 << 0) /* NS = 1: Non-secure state */ + msr scr_el3, x0 + + /* + * We need to check if EL2 is actually enabled via ID_AA64PFR0_EL1, + * otherwise we should just jump straight to EL1. + */ + mrs x0, id_aa64pfr0_el1 + ubfx x0, x0, #8, #4 /* Extract EL2 field (bits 11:8) */ + cbz x0, el2_not_present /* If field is 0 no EL2 */ + + + /* Prepare SPSR for exception return to EL2 */ + mov x0, #0x3c9 /* DAIF bits and EL2h mode (9) */ + msr spsr_el3, x0 + + /* Set EL2 entry point */ + adr x0, setup_el2 + msr elr_el3, x0 + + /* Return to EL2 */ + eret + +el2_not_present: + /* Initialize SCTLR_EL1 with reset value */ + msr sctlr_el1, xzr + + /* Set EL1 entry point */ + adr x0, at_testel + msr elr_el3, x0 + + /* Prepare SPSR for exception return to EL1h with interrupts masked */ + mov x0, #0x3c5 /* DAIF bits and EL1h mode (5) */ + msr spsr_el3, x0 + + isb /* Synchronization barrier */ + eret /* Jump to EL1 */ + +setup_el2: + /* Ensure we trap if we get anything wrong */ + adr x0, vector_table + msr vbar_el2, x0 + + /* Does the test want to be at EL2? */ + cmp w11, #2 + beq at_testel + + /* Configure EL2 to allow transition to EL1 */ + mrs x0, hcr_el2 + orr x0, x0, #(1 << 31) /* RW = 1: EL1 execution state is AArch64 */ + msr hcr_el2, x0 + + /* Initialize SCTLR_EL1 with reset value */ + msr sctlr_el1, xzr + + /* Set EL1 entry point */ + adr x0, at_testel + msr elr_el2, x0 + + /* Prepare SPSR for exception return to EL1 */ + mov x0, #(0x5 << 0) /* EL1h (SPx), with interrupts disabled */ + msr spsr_el2, x0 + + /* Return to EL1 */ + eret + + /* + * At the target EL for the test, usually EL1. Note we still + * set everything up as if we were at EL1. + */ +at_testel: /* Installs a table of exception vectors to catch and handle all exceptions by terminating the process with a diagnostic. */ adr x0, vector_table @@ -101,7 +252,7 @@ __start: * maps RAM to the first Gb. The stage2 tables have two 2mb * translation block entries covering a series of adjacent * 4k pages. - */ + */ /* Stage 1 entry: indexed by IA[38:30] */ adr x1, . /* phys address */ @@ -135,6 +286,17 @@ __start: orr x1, x1, x3 str x1, [x2] /* 2nd 2mb (.data & .bss)*/ + /* Third block: at 'mte_page', set in kernel.ld */ + adrp x1, mte_page + add x1, x1, :lo12:mte_page + bic x1, x1, #(1 << 21) - 1 + and x4, x1, x5 + add x2, x0, x4, lsr #(21 - 3) + /* attr(AF, NX, block, AttrIndx=Attr1) */ + ldr x3, =(3 << 53) | 0x401 | (1 << 2) + orr x1, x1, x3 + str x1, [x2] + /* Setup/enable the MMU. */ /* @@ -188,7 +350,8 @@ __start: orr x0, x0, #(3 << 16) msr cpacr_el1, x0 - /* Setup some stack space and enter the test code. + /* + * Setup some stack space before we enter the test code. * Assume everything except the return value is garbage when we * return, we won't need it. */ @@ -223,6 +386,11 @@ __sys_outc: ret .data + + .align 8 +cmdline: + .space 128, 0 + .align 12 /* Translation table @@ -236,6 +404,10 @@ ttb_stage2: .space 4096, 0 .align 12 +system_stack: + .space 4096, 0 +system_stack_end: + stack: .space 65536, 0 stack_end: diff --git a/tests/tcg/aarch64/system/feat-xs.c b/tests/tcg/aarch64/system/feat-xs.c new file mode 100644 index 00000000000..f310fc837e0 --- /dev/null +++ b/tests/tcg/aarch64/system/feat-xs.c @@ -0,0 +1,27 @@ +/* + * FEAT_XS Test + * + * Copyright (c) 2024 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +int main(void) +{ + uint64_t isar1; + + asm volatile ("mrs %0, id_aa64isar1_el1" : "=r"(isar1)); + if (((isar1 >> 56) & 0xf) < 1) { + ml_printf("FEAT_XS not supported by CPU"); + return 1; + } + /* VMALLE1NXS */ + asm volatile (".inst 0xd508971f"); + /* VMALLE1OSNXS */ + asm volatile (".inst 0xd508911f"); + + return 0; +} diff --git a/tests/tcg/aarch64/system/kernel.ld b/tests/tcg/aarch64/system/kernel.ld index 7b3a76dcbf3..aef043e31db 100644 --- a/tests/tcg/aarch64/system/kernel.ld +++ b/tests/tcg/aarch64/system/kernel.ld @@ -1,23 +1,32 @@ ENTRY(__start) -SECTIONS -{ - /* virt machine, RAM starts at 1gb */ - . = (1 << 30); +MEMORY { + /* On virt machine RAM starts at 1 GiB. */ + + /* Align text and rodata to the 1st 2 MiB chunk. */ + TXT (rx) : ORIGIN = 1 << 30, LENGTH = 2M + /* Align r/w data to the 2nd 2 MiB chunk. */ + DAT (rw) : ORIGIN = (1 << 30) + 2M, LENGTH = 2M + /* Align the MTE-enabled page to the 3rd 2 MiB chunk. */ + TAG (rw) : ORIGIN = (1 << 30) + 4M, LENGTH = 2M +} + +SECTIONS { .text : { *(.text) - } - .rodata : { *(.rodata) - } - /* align r/w section to next 2mb */ - . = ALIGN(1 << 21); + } >TXT .data : { *(.data) - } - .bss : { *(.bss) - } + } >DAT + .tag : { + /* + * Symbol 'mte_page' is used in boot.S to setup the PTE and in the mte.S + * test as the address that the MTE instructions operate on. + */ + mte_page = .; + } >TAG /DISCARD/ : { *(.ARM.attributes) } diff --git a/tests/tcg/aarch64/system/mte.S b/tests/tcg/aarch64/system/mte.S new file mode 100644 index 00000000000..b611240a95c --- /dev/null +++ b/tests/tcg/aarch64/system/mte.S @@ -0,0 +1,109 @@ +/* + * Code to help test the MTE gdbstubs in system mode. + * + * Copyright (c) 2024 Linaro Limited + * + * Author: Gustavo Romero + * + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#define addr x0 /* Ptr to the start of the MTE-enabled page. */ +#define tagged_addr x1 /* 'addr' ptr with a random-generated tag added. */ +#define tmp0 x2 /* Scratch register. */ +#define tmp1 x3 /* Scratch register. */ +#define tmp2 x4 /* Scratch register. */ +#define tmp3 x5 /* Sctatch register. */ + + .file "mte.S" + + .text + .align 4 + + .globl main + .type main, @function + +main: + /* + * Set MAIR_EL1 (Memory Attribute Index Register). In boot.S, the + * attribute index for .mte_page is set to point to MAILR_EL field Attr1 + * (AttrIndx=Attr1), so set Attr1 as Tagged Normal (MTE) to enable MTE + * on this page. + * + * Attr1 = 0xF0 => Tagged Normal (MTE) + */ + mrs tmp0, mair_el1 + orr tmp0, tmp0, (0xF0 << 8) + msr mair_el1, tmp0 + + /* + * Set TCR_EL1 (Translation Control Registers) to ignore the top byte + * in the translated addresses so it can be used to keep the tags. + * + * TBI0[37] = 0b1 => Top Byte ignored and used for tagged addresses + */ + mrs tmp1, tcr_el1 + orr tmp1, tmp1, (1 << 37) + msr tcr_el1, tmp1 + + /* + * Set SCTLR_EL1 (System Control Register) to enable the use of MTE + * insns., like stg & friends, and to enable synchronous exception in + * case of a tag mismatch, i.e., when the logical tag in 'tagged_addr' + * is different from the allocation tag related to 'addr' address. + * + * ATA[43] = 0b1 => Enable access to allocation tags at EL1 + * TCF[41:40] = 0b01 => Tag Check Faults cause a synchronous exception + * + */ + mrs tmp2, sctlr_el1 + mov tmp3, (1 << 43) | (1 << 40) + orr tmp2, tmp2, tmp3 + msr sctlr_el1, tmp2 + + isb + + /* + * MTE-enabled page resides at the 3rd 2MB chunk in the second 1GB + * block, i.e., at 0x40400000 address. See .mte_page section in boot.S + * and kernel.ld (where the address is effectively computed). + * + * Load .mte_page address into 'addr' register. + */ + adrp addr, mte_page + add addr, addr, :lo12:mte_page + + /* + * Set GCR for random tag generation. 0xA5 is just a random value to set + * GCR != 0 so the tag generated by 'irg' insn. is not zero, which is + * more interesting for the tests than when tag is zero. + */ + mov tmp0, 0xA5 + msr gcr_el1, tmp0 + + /* + * Generate a logical tag, add it to 'addr' address and put it into + * 'tagged_addr'. + */ + irg tagged_addr, addr + + /* + * Store the generated tag to memory region pointed to by 'addr', i.e. + * set the allocation tag for granule at 'addr'. The tag is extracted + * by stg from tagged_addr pointer. + */ + stg tagged_addr, [addr] + + /* + * Store a random value (0xdeadbeef) to tagged_addr address. This must + * not cause any Tag Check Fault since logical tag in tagged_addr and + * allocation tag associated with the memory pointed by tagged_addr are + * set the same, otherwise something is off and the test fails -- an + * exception is generated. + */ + ldr tmp1, =0xdeadbeef + str tmp1, [tagged_addr] + + /* This label is used by GDB Python script test-mte.py. */ +main_end: + ret diff --git a/tests/tcg/aarch64_be/Makefile.target b/tests/tcg/aarch64_be/Makefile.target new file mode 100644 index 00000000000..cbe5fa0b2db --- /dev/null +++ b/tests/tcg/aarch64_be/Makefile.target @@ -0,0 +1,17 @@ +# -*- Mode: makefile -*- +# +# A super basic AArch64 BE makefile. As we don't have any big-endian +# libc available the best we can do is a basic Hello World. + +AARCH64BE_SRC=$(SRC_PATH)/tests/tcg/aarch64_be +VPATH += $(AARCH64BE_SRC) + +AARCH64BE_TEST_SRCS=$(notdir $(wildcard $(AARCH64BE_SRC)/*.c)) +AARCH64BE_TESTS=$(AARCH64BE_TEST_SRCS:.c=) +#MULTIARCH_TESTS = $(MULTIARCH_SRCS:.c=) + +# We need to specify big-endian cflags +CFLAGS +=-mbig-endian -ffreestanding +LDFLAGS +=-nostdlib + +TESTS += $(AARCH64BE_TESTS) diff --git a/tests/tcg/aarch64_be/hello.c b/tests/tcg/aarch64_be/hello.c new file mode 100644 index 00000000000..a9b2ab45de9 --- /dev/null +++ b/tests/tcg/aarch64_be/hello.c @@ -0,0 +1,35 @@ +/* + * Non-libc syscall hello world for Aarch64 BE + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#define __NR_write 64 +#define __NR_exit 93 + +int write(int fd, char *buf, int len) +{ + register int x0 __asm__("x0") = fd; + register char *x1 __asm__("x1") = buf; + register int x2 __asm__("x2") = len; + register int x8 __asm__("x8") = __NR_write; + + asm volatile("svc #0" : : "r"(x0), "r"(x1), "r"(x2), "r"(x8)); + + return len; +} + +void exit(int ret) +{ + register int x0 __asm__("x0") = ret; + register int x8 __asm__("x8") = __NR_exit; + + asm volatile("svc #0" : : "r"(x0), "r"(x8)); + __builtin_unreachable(); +} + +void _start(void) +{ + write(1, "Hello World\n", 12); + exit(0); +} diff --git a/tests/tcg/alpha/Makefile.softmmu-target b/tests/tcg/alpha/Makefile.softmmu-target index a0eca4d6eac..a944102a3ce 100644 --- a/tests/tcg/alpha/Makefile.softmmu-target +++ b/tests/tcg/alpha/Makefile.softmmu-target @@ -28,7 +28,7 @@ LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc %: %.c $(LINK_SCRIPT) $(CRT_OBJS) $(MINILIB_OBJS) $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) -memory: CFLAGS+=-DCHECK_UNALIGNED=0 +memory: CFLAGS+=-DCHECK_UNALIGNED=0 -mbwx # Running QEMU_OPTS+=-serial chardev:output -kernel diff --git a/tests/tcg/alpha/Makefile.target b/tests/tcg/alpha/Makefile.target index fdd7ddf64ec..36d8ed1eaea 100644 --- a/tests/tcg/alpha/Makefile.target +++ b/tests/tcg/alpha/Makefile.target @@ -12,4 +12,7 @@ test-cmov: EXTRA_CFLAGS=-DTEST_CMOV test-cmov: test-cond.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) +# Force generation of byte read/write +test-plugin-mem-access: CFLAGS+=-mbwx + run-test-cmov: test-cmov diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target index 06ddf3e04fc..6189d7a0e24 100644 --- a/tests/tcg/arm/Makefile.target +++ b/tests/tcg/arm/Makefile.target @@ -20,13 +20,6 @@ ARM_TESTS = hello-arm hello-arm: CFLAGS+=-marm -ffreestanding -fno-stack-protector hello-arm: LDFLAGS+=-nostdlib -# IWMXT floating point extensions -ARM_TESTS += test-arm-iwmmxt -# Clang assembler does not support IWMXT, so use the external assembler. -test-arm-iwmmxt: CFLAGS += -marm -march=iwmmxt -mabi=aapcs -mfpu=fpv4-sp-d16 $(CROSS_CC_HAS_FNIA) -test-arm-iwmmxt: test-arm-iwmmxt.S - $(CC) $(CFLAGS) -Wa,--noexecstack $< -o $@ $(LDFLAGS) - # Float-convert Tests ARM_TESTS += fcvt fcvt: LDFLAGS += -lm @@ -68,7 +61,8 @@ endif ARM_TESTS += commpage # Vector SHA1 -sha1-vector: CFLAGS=-O3 +# Work around compiler false-positive warning, as we do for the 'sha1' test +sha1-vector: CFLAGS=-O3 -Wno-stringop-overread sha1-vector: sha1.c $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS) run-sha1-vector: sha1-vector run-sha1 diff --git a/tests/tcg/arm/README b/tests/tcg/arm/README index e6307116e23..aceccc127f7 100644 --- a/tests/tcg/arm/README +++ b/tests/tcg/arm/README @@ -4,8 +4,3 @@ hello-arm --------- A very simple inline assembly, write syscall based hello world - -test-arm-iwmmxt ---------------- - -A simple test case for older iwmmxt extended ARMs diff --git a/tests/tcg/arm/test-arm-iwmmxt.S b/tests/tcg/arm/test-arm-iwmmxt.S deleted file mode 100644 index d647f9404ae..00000000000 --- a/tests/tcg/arm/test-arm-iwmmxt.S +++ /dev/null @@ -1,49 +0,0 @@ -@ Checks whether iwMMXt is functional. -.code 32 -.globl main - -main: -ldr r0, =data0 -ldr r1, =data1 -ldr r2, =data2 -#ifndef FPA -wldrd wr0, [r0, #0] -wldrd wr1, [r0, #8] -wldrd wr2, [r1, #0] -wldrd wr3, [r1, #8] -wsubb wr2, wr2, wr0 -wsubb wr3, wr3, wr1 -wldrd wr0, [r2, #0] -wldrd wr1, [r2, #8] -waddb wr0, wr0, wr2 -waddb wr1, wr1, wr3 -wstrd wr0, [r2, #0] -wstrd wr1, [r2, #8] -#else -ldfe f0, [r0, #0] -ldfe f1, [r0, #8] -ldfe f2, [r1, #0] -ldfe f3, [r1, #8] -adfdp f2, f2, f0 -adfdp f3, f3, f1 -ldfe f0, [r2, #0] -ldfe f1, [r2, #8] -adfd f0, f0, f2 -adfd f1, f1, f3 -stfe f0, [r2, #0] -stfe f1, [r2, #8] -#endif -mov r0, #1 -mov r1, r2 -mov r2, #0x11 -swi #0x900004 -mov r0, #0 -swi #0x900001 - -.data -data0: -.string "aaaabbbbccccdddd" -data1: -.string "bbbbccccddddeeee" -data2: -.string "hvLLWs\x1fsdrs9\x1fNJ-\n" diff --git a/tests/tcg/cris/.gdbinit b/tests/tcg/cris/.gdbinit deleted file mode 100644 index 5e8c1d32f32..00000000000 --- a/tests/tcg/cris/.gdbinit +++ /dev/null @@ -1,11 +0,0 @@ -b main -b _fail -b exit -display /i $pc -display /x $srp -display /x $r0 -display /x $r1 -display /x $r2 -display /x $r3 -display /x $r4 -display /t $ccs diff --git a/tests/tcg/cris/Makefile.target b/tests/tcg/cris/Makefile.target deleted file mode 100644 index 713e2a5b6cd..00000000000 --- a/tests/tcg/cris/Makefile.target +++ /dev/null @@ -1,62 +0,0 @@ -# -*- Mode: makefile -*- -# -# Cris tests -# -# Currently we can only build the "bare" tests with the docker -# supplied cross-compiler. -# - -CRIS_SRC = $(SRC_PATH)/tests/tcg/cris/bare -CRIS_ALL = $(wildcard $(CRIS_SRC)/*.s) -CRIS_TESTS = $(patsubst $(CRIS_SRC)/%.s, %, $(CRIS_ALL)) -# Filter out common blobs and broken tests -CRIS_BROKEN_TESTS = crt check_jsr -# upstream GCC doesn't support v32 -CRIS_BROKEN_TESTS += check_mcp check_mulv32 check_addiv32 check_movpmv32 -CRIS_BROKEN_TESTS += check_movprv32 check_clearfv32 check_movemrv32 check_bas -CRIS_BROKEN_TESTS += check_lapc check_movei -# no sure why -CRIS_BROKEN_TESTS += check_scc check_xarith - -CRIS_USABLE_TESTS = $(filter-out $(CRIS_BROKEN_TESTS), $(CRIS_TESTS)) -CRIS_RUNS = $(patsubst %, run-%, $(CRIS_USABLE_TESTS)) - -# override the list of tests, as we can't build the multiarch tests -TESTS = $(CRIS_USABLE_TESTS) -EXTRA_RUNS = -VPATH = $(CRIS_SRC) - -AS = $(CC) -x assembler-with-cpp -LD = $(CC) - -# we rely on GCC inline:ing the stuff we tell it to in many places here. -CFLAGS = -Winline -Wall -g -O2 -static -fno-stack-protector -NOSTDFLAGS = -nostartfiles -nostdlib -ASFLAGS += -mcpu=v10 -g -Wa,-I,$(SRC_PATH)/tests/tcg/cris/bare -CRT_FILES = crt.o sys.o - -# stop make deleting crt files if build fails -.PRECIOUS: $(CRT_FILES) - -%.o: %.c - $(CC) -c $< -o $@ - -%.o: %.s - $(AS) $(ASFLAGS) -c $< -o $@ - -%: %.s $(CRT_FILES) - $(CC) $(ASFLAGS) $< -o $@ $(LDFLAGS) $(NOSTDFLAGS) $(CRT_FILES) - -# The default CPU breaks (possibly as it's max?) so force crisv17 -QEMU_OPTS=-cpu crisv17 - -# Additional runners to run under GNU SIM -CRIS_RUNS_ON_SIM=$(patsubst %, %-on-sim, $(CRIS_RUNS)) -SIMG:=cris-axis-linux-gnu-run - -# e.g.: make -f ../../tests/tcg/Makefile run-check_orm-on-sim -run-%-on-sim: - $(call run-test, $<, $(SIMG) $<) - -# We don't currently support the multiarch tests -undefine MULTIARCH_TESTS diff --git a/tests/tcg/cris/README b/tests/tcg/cris/README deleted file mode 100644 index 2e65a76f10b..00000000000 --- a/tests/tcg/cris/README +++ /dev/null @@ -1 +0,0 @@ -Test-suite for the cris port. Heavily based on the test-suite for the CRIS port of sim by Hans-Peter Nilsson. diff --git a/tests/tcg/cris/bare/check_addcv17.s b/tests/tcg/cris/bare/check_addcv17.s deleted file mode 100644 index 52ef7a97169..00000000000 --- a/tests/tcg/cris/bare/check_addcv17.s +++ /dev/null @@ -1,65 +0,0 @@ -# mach: crisv17 - - .include "testutils.inc" - - .macro addc Rs Rd inc=0 -# Create the instruction manually since there is no assembler support yet - .word (\Rd << 12) | \Rs | (\inc << 10) | 0x09a0 - .endm - - start - - .data -mem1: - .dword 0x0 -mem2: - .dword 0x12345678 - - .text - move.d mem1,r4 - clearf nzvc - addc 4 3 - test_cc 0 1 0 0 - checkr3 0 - - move.d mem1,r4 - clearf nzvc - ax - addc 4 3 - test_cc 0 0 0 0 - checkr3 0 - - move.d mem1,r4 - clearf nzvc - setf c - addc 4 3 - test_cc 0 0 0 0 - checkr3 1 - - move.d mem2,r4 - moveq 2, r3 - clearf nzvc - setf c - addc 4 3 - test_cc 0 0 0 0 - checkr3 1234567b - - move.d mem2,r5 - clearf nzvc - cmp.d r4,r5 - test_cc 0 1 0 0 - - move.d mem2,r4 - moveq 2, r3 - clearf nzvc - addc 4 3 inc=1 - test_cc 0 0 0 0 - checkr3 1234567a - - move.d mem2,r5 - clearf nzvc - addq 4,r5 - cmp.d r4,r5 - test_cc 0 1 0 0 - - quit diff --git a/tests/tcg/cris/bare/check_addi.s b/tests/tcg/cris/bare/check_addi.s deleted file mode 100644 index a00dec02af8..00000000000 --- a/tests/tcg/cris/bare/check_addi.s +++ /dev/null @@ -1,57 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 0\n1\n2\n4\nbe02460f\n69d035a6\nc16c14d4\n - - .include "testutils.inc" - start - moveq 0,r3 - moveq 0,r4 - clearf zcvn - addi r4.b,r3 - test_cc 0 0 0 0 - checkr3 0 - - moveq 0,r3 - moveq 1,r4 - setf zcvn - addi r4.b,r3 - test_cc 1 1 1 1 - checkr3 1 - - moveq 0,r3 - moveq 1,r4 - setf cv - clearf zn - addi r4.w,r3 - test_cc 0 0 1 1 - checkr3 2 - - moveq 0,r3 - moveq 1,r4 - clearf cv - setf zn - addi r4.d,r3 - test_cc 1 1 0 0 - checkr3 4 - - move.d 0x12345678,r3 - move.d 0xabcdef97,r4 - clearf cn - setf zv - addi r4.b,r3 - test_cc 0 1 1 0 - checkr3 be02460f - - move.d 0x12345678,r3 - move.d 0xabcdef97,r4 - setf cn - clearf zv - addi r4.w,r3 - test_cc 1 0 0 1 - checkr3 69d035a6 - - move.d 0x12345678,r3 - move.d 0xabcdef97,r4 - addi r4.d,r3 - checkr3 c16c14d4 - - quit diff --git a/tests/tcg/cris/bare/check_addiv32.s b/tests/tcg/cris/bare/check_addiv32.s deleted file mode 100644 index 20ba25d2192..00000000000 --- a/tests/tcg/cris/bare/check_addiv32.s +++ /dev/null @@ -1,62 +0,0 @@ -# mach: crisv32 -# output: 4455aa77\n4455aa77\nee19ccff\nff22\n4455aa77\nff224455\n55aa77ff\n - - .include "testutils.inc" - .data -x: - .dword 0x55aa77ff - .dword 0xccff2244 - .dword 0x88ccee19 - - start - setf cv - moveq -1,r0 - move.d x-32768,r5 - move.d 32769,r6 - addi r6.b,r5,acr - test_cc 0 0 1 1 - move.d [acr],r3 - checkr3 4455aa77 - - addu.w 32771,r5 - setf znvc - moveq -1,r8 - addi r8.w,r5,acr - test_cc 1 1 1 1 - move.d [acr],r3 - checkr3 4455aa77 - - moveq 5,r10 - clearf znvc - addi r10.b,acr,acr - test_cc 0 0 0 0 - move.d [acr],r3 - checkr3 ee19ccff - - subq 1,r5 - move.d r5,r8 - subq 1,r8 - moveq 1,r9 - addi r9.d,r8,acr - test_cc 0 0 0 0 - movu.w [acr],r3 - checkr3 ff22 - - moveq -2,r11 - addi r11.w,acr,acr - move.d [acr],r3 - checkr3 4455aa77 - - moveq 5,r9 - addi r9.d,acr,acr - subq 18,acr - move.d [acr],r3 - checkr3 ff224455 - - move.d -76789888/4,r12 - addi r12.d,r5,acr - add.d 76789886,acr - move.d [acr],r3 - checkr3 55aa77ff - - quit diff --git a/tests/tcg/cris/bare/check_addm.s b/tests/tcg/cris/bare/check_addm.s deleted file mode 100644 index efece9f538d..00000000000 --- a/tests/tcg/cris/bare/check_addm.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n781344d0\n - - .include "testutils.inc" - .data -x: - .dword 2,-1,0xffff,-1,0x5432f789 - .word 2,-1,0xffff,0xf789 - .byte 2,0xff,0x89 - .byte 0x7e - - start - moveq -1,r3 - move.d x,r5 - add.d [r5+],r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - add.d [r5],r3 - test_cc 0 0 0 1 - addq 4,r5 - checkr3 1 - - move.d 0xffff,r3 - add.d [r5+],r3 - test_cc 0 0 0 0 - checkr3 1fffe - - moveq -1,r3 - add.d [r5+],r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - move.d 0x78134452,r3 - add.d [r5+],r3 - test_cc 1 0 1 0 - checkr3 cc463bdb - - moveq -1,r3 - add.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 ffff0001 - - moveq 2,r3 - add.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xffff,r3 - add.w [r5],r3 - test_cc 1 0 0 1 - checkr3 fffe - - move.d 0xfedaffff,r3 - add.w [r5+],r3 - test_cc 1 0 0 1 - checkr3 fedafffe - - move.d 0x78134452,r3 - add.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 78133bdb - - moveq -1,r3 - add.b [r5],r3 - test_cc 0 0 0 1 - addq 1,r5 - checkr3 ffffff01 - - moveq 2,r3 - add.b [r5],r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xff,r3 - add.b [r5],r3 - test_cc 1 0 0 1 - checkr3 fe - - move.d 0xfeda49ff,r3 - add.b [r5+],r3 - test_cc 1 0 0 1 - checkr3 feda49fe - - move.d 0x78134452,r3 - add.b [r5+],r3 - test_cc 1 0 0 0 - checkr3 781344db - - move.d 0x78134452,r3 - add.b [r5],r3 - test_cc 1 0 1 0 - checkr3 781344d0 - - quit diff --git a/tests/tcg/cris/bare/check_addq.s b/tests/tcg/cris/bare/check_addq.s deleted file mode 100644 index e6f874f9b2b..00000000000 --- a/tests/tcg/cris/bare/check_addq.s +++ /dev/null @@ -1,47 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n0\n1\n100\n10000\n47\n67\na6\n80000001\n - - .include "testutils.inc" - start - moveq -2,r3 - addq 1,r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - addq 1,r3 - test_cc 0 1 0 1 - checkr3 0 - - addq 1,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xff,r3 - addq 1,r3 - test_cc 0 0 0 0 - checkr3 100 - - move.d 0xffff,r3 - addq 1,r3 - test_cc 0 0 0 0 - checkr3 10000 - - move.d 0x42,r3 - addq 5,r3 - test_cc 0 0 0 0 - checkr3 47 - - addq 32,r3 - test_cc 0 0 0 0 - checkr3 67 - - addq 63,r3 - test_cc 0 0 0 0 - checkr3 a6 - - move.d 0x7ffffffe,r3 - addq 3,r3 - test_cc 1 0 1 0 - checkr3 80000001 - - quit diff --git a/tests/tcg/cris/bare/check_addr.s b/tests/tcg/cris/bare/check_addr.s deleted file mode 100644 index 7f55cdc1b5e..00000000000 --- a/tests/tcg/cris/bare/check_addr.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - add.d r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - moveq -1,r4 - add.d r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xffff,r4 - move.d r4,r3 - add.d r4,r3 - test_cc 0 0 0 0 - checkr3 1fffe - - moveq -1,r4 - move.d r4,r3 - add.d r4,r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.d r4,r3 - test_cc 1 0 1 0 - checkr3 cc463bdb - - moveq -1,r3 - moveq 2,r4 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 ffff0001 - - moveq 2,r3 - moveq -1,r4 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xffff,r4 - move.d r4,r3 - add.w r4,r3 - test_cc 1 0 0 1 - checkr3 fffe - - move.d 0xfedaffff,r4 - move.d r4,r3 - add.w r4,r3 - test_cc 1 0 0 1 - checkr3 fedafffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 78133bdb - - moveq -1,r3 - moveq 2,r4 - add.b r4,r3 - test_cc 0 0 0 1 - checkr3 ffffff01 - - moveq 2,r3 - moveq -1,r4 - add.b r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xff,r4 - move.d r4,r3 - add.b r4,r3 - test_cc 1 0 0 1 - checkr3 fe - - move.d 0xfeda49ff,r4 - move.d r4,r3 - add.b r4,r3 - test_cc 1 0 0 1 - checkr3 feda49fe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.b r4,r3 - test_cc 1 0 0 0 - checkr3 781344db - - quit diff --git a/tests/tcg/cris/bare/check_addxc.s b/tests/tcg/cris/bare/check_addxc.s deleted file mode 100644 index 09c8355bf8f..00000000000 --- a/tests/tcg/cris/bare/check_addxc.s +++ /dev/null @@ -1,91 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n101\n10001\n100fe\n1fffe\nfffe\nfffe\nfffffffe\nfe\nfffffffe\n781344db\n781343db\n78143bdb\n78133bdb\n800000ed\n0\n - - .include "testutils.inc" - start - moveq 2,r3 - adds.b 0xff,r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - adds.w 0xffff,r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - addu.b 0xff,r3 - checkr3 101 - - moveq 2,r3 - move.d 0xffffffff,r4 - addu.w -1,r3 - test_cc 0 0 0 0 - checkr3 10001 - - move.d 0xffff,r3 - addu.b -1,r3 - test_cc 0 0 0 0 - checkr3 100fe - - move.d 0xffff,r3 - addu.w -1,r3 - test_cc 0 0 0 0 - checkr3 1fffe - - move.d 0xffff,r3 - adds.b 0xff,r3 - test_cc 0 0 0 1 - checkr3 fffe - - move.d 0xffff,r3 - adds.w 0xffff,r3 - test_cc 0 0 0 1 - checkr3 fffe - - moveq -1,r3 - adds.b 0xff,r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - moveq -1,r3 - adds.w 0xff,r3 - test_cc 0 0 0 1 - checkr3 fe - - moveq -1,r3 - adds.w 0xffff,r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - move.d 0x78134452,r3 - addu.b 0x89,r3 - test_cc 0 0 0 0 - checkr3 781344db - - move.d 0x78134452,r3 - adds.b 0x89,r3 - test_cc 0 0 0 1 - checkr3 781343db - - move.d 0x78134452,r3 - addu.w 0xf789,r3 - test_cc 0 0 0 0 - checkr3 78143bdb - - move.d 0x78134452,r3 - adds.w 0xf789,r3 - test_cc 0 0 0 1 - checkr3 78133bdb - - move.d 0x7fffffee,r3 - addu.b 0xff,r3 - test_cc 1 0 1 0 - checkr3 800000ed - - move.d 0x1,r3 - adds.w 0xffff,r3 - test_cc 0 1 0 1 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_addxm.s b/tests/tcg/cris/bare/check_addxm.s deleted file mode 100644 index 7563494b991..00000000000 --- a/tests/tcg/cris/bare/check_addxm.s +++ /dev/null @@ -1,106 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n101\n10001\n100fe\n1fffe\nfffe\nfffe\nfffffffe\nfe\nfffffffe\n781344db\n781343db\n78143bdb\n78133bdb\n800000ed\n0\n - - .include "testutils.inc" - .data -x: - .byte 0xff - .word 0xffff - .word 0xff - .word 0xffff - .byte 0x89 - .word 0xf789 - .byte 0xff - .word 0xffff - - start - moveq 2,r3 - move.d x,r5 - adds.b [r5+],r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - adds.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - subq 3,r5 - addu.b [r5+],r3 - test_cc 0 0 0 0 - checkr3 101 - - moveq 2,r3 - addu.w [r5+],r3 - subq 3,r5 - test_cc 0 0 0 0 - checkr3 10001 - - move.d 0xffff,r3 - addu.b [r5],r3 - test_cc 0 0 0 0 - checkr3 100fe - - move.d 0xffff,r3 - addu.w [r5],r3 - test_cc 0 0 0 0 - checkr3 1fffe - - move.d 0xffff,r3 - adds.b [r5],r3 - test_cc 0 0 0 1 - checkr3 fffe - - move.d 0xffff,r3 - adds.w [r5],r3 - test_cc 0 0 0 1 - checkr3 fffe - - moveq -1,r3 - adds.b [r5],r3 - test_cc 1 0 0 1 - addq 3,r5 - checkr3 fffffffe - - moveq -1,r3 - adds.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 fe - - moveq -1,r3 - adds.w [r5+],r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - move.d 0x78134452,r3 - addu.b [r5],r3 - test_cc 0 0 0 0 - checkr3 781344db - - move.d 0x78134452,r3 - adds.b [r5+],r3 - test_cc 0 0 0 1 - checkr3 781343db - - move.d 0x78134452,r3 - addu.w [r5],r3 - test_cc 0 0 0 0 - checkr3 78143bdb - - move.d 0x78134452,r3 - adds.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 78133bdb - - move.d 0x7fffffee,r3 - addu.b [r5+],r3 - test_cc 1 0 1 0 - checkr3 800000ed - - move.d 0x1,r3 - adds.w [r5+],r3 - test_cc 0 1 0 1 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_addxr.s b/tests/tcg/cris/bare/check_addxr.s deleted file mode 100644 index 7f55cdc1b5e..00000000000 --- a/tests/tcg/cris/bare/check_addxr.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - add.d r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - moveq 2,r3 - moveq -1,r4 - add.d r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xffff,r4 - move.d r4,r3 - add.d r4,r3 - test_cc 0 0 0 0 - checkr3 1fffe - - moveq -1,r4 - move.d r4,r3 - add.d r4,r3 - test_cc 1 0 0 1 - checkr3 fffffffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.d r4,r3 - test_cc 1 0 1 0 - checkr3 cc463bdb - - moveq -1,r3 - moveq 2,r4 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 ffff0001 - - moveq 2,r3 - moveq -1,r4 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xffff,r4 - move.d r4,r3 - add.w r4,r3 - test_cc 1 0 0 1 - checkr3 fffe - - move.d 0xfedaffff,r4 - move.d r4,r3 - add.w r4,r3 - test_cc 1 0 0 1 - checkr3 fedafffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.w r4,r3 - test_cc 0 0 0 1 - checkr3 78133bdb - - moveq -1,r3 - moveq 2,r4 - add.b r4,r3 - test_cc 0 0 0 1 - checkr3 ffffff01 - - moveq 2,r3 - moveq -1,r4 - add.b r4,r3 - test_cc 0 0 0 1 - checkr3 1 - - move.d 0xff,r4 - move.d r4,r3 - add.b r4,r3 - test_cc 1 0 0 1 - checkr3 fe - - move.d 0xfeda49ff,r4 - move.d r4,r3 - add.b r4,r3 - test_cc 1 0 0 1 - checkr3 feda49fe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - add.b r4,r3 - test_cc 1 0 0 0 - checkr3 781344db - - quit diff --git a/tests/tcg/cris/bare/check_andc.s b/tests/tcg/cris/bare/check_andc.s deleted file mode 100644 index a947b773c97..00000000000 --- a/tests/tcg/cris/bare/check_andc.s +++ /dev/null @@ -1,80 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n - - .include "testutils.inc" - start - moveq -1,r3 - and.d 2,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - and.d -1,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - and.d 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r3 - and.d -1,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - and.d 0x5432f789,r3 - test_move_cc 0 0 0 0 - checkr3 50124400 - - moveq -1,r3 - and.w 2,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0002 - - moveq 2,r3 - and.w -1,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xfffff,r3 - and.w 0xffff,r3 - test_move_cc 1 0 0 0 - checkr3 fffff - - move.d 0xfedaffaf,r3 - and.w 0xff5f,r3 - test_move_cc 1 0 0 0 - checkr3 fedaff0f - - move.d 0x78134452,r3 - and.w 0xf789,r3 - test_move_cc 0 0 0 0 - checkr3 78134400 - - moveq -1,r3 - and.b 2,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff02 - - moveq 2,r3 - and.b -1,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xfa7,r3 - and.b 0x5a,r3 - test_move_cc 0 0 0 0 - checkr3 f02 - - move.d 0x78134453,r3 - and.b 0x89,r3 - test_move_cc 0 0 0 0 - checkr3 78134401 - - and.b 0,r3 - test_move_cc 0 1 0 0 - checkr3 78134400 - - quit diff --git a/tests/tcg/cris/bare/check_andm.s b/tests/tcg/cris/bare/check_andm.s deleted file mode 100644 index 93858863fe5..00000000000 --- a/tests/tcg/cris/bare/check_andm.s +++ /dev/null @@ -1,90 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n - - .include "testutils.inc" - .data -x: - .dword 2,-1,0xffff,-1,0x5432f789 - .word 2,-1,0xffff,0xff5f,0xf789 - .byte 2,-1,0x5a,0x89,0 - - start - moveq -1,r3 - move.d x,r5 - and.d [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - and.d [r5],r3 - test_move_cc 0 0 0 0 - addq 4,r5 - checkr3 2 - - move.d 0xffff,r3 - and.d [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r3 - and.d [r5+],r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - and.d [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 50124400 - - moveq -1,r3 - and.w [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 ffff0002 - - moveq 2,r3 - and.w [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xfffff,r3 - and.w [r5],r3 - test_move_cc 1 0 0 0 - addq 2,r5 - checkr3 fffff - - move.d 0xfedaffaf,r3 - and.w [r5+],r3 - test_move_cc 1 0 0 0 - checkr3 fedaff0f - - move.d 0x78134452,r3 - and.w [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 78134400 - - moveq -1,r3 - and.b [r5],r3 - test_move_cc 0 0 0 0 - addq 1,r5 - checkr3 ffffff02 - - moveq 2,r3 - and.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xfa7,r3 - and.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 f02 - - move.d 0x78134453,r3 - and.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 78134401 - - and.b [r5],r3 - test_move_cc 0 1 0 0 - checkr3 78134400 - - quit diff --git a/tests/tcg/cris/bare/check_andq.s b/tests/tcg/cris/bare/check_andq.s deleted file mode 100644 index 55aa7b06077..00000000000 --- a/tests/tcg/cris/bare/check_andq.s +++ /dev/null @@ -1,46 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n1f\nffffffe0\n78134452\n0\n - - .include "testutils.inc" - start - moveq -1,r3 - andq 2,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - andq -1,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - andq -1,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r3 - andq -1,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - andq 31,r3 - test_move_cc 0 0 0 0 - checkr3 1f - - moveq -1,r3 - andq -32,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffe0 - - move.d 0x78134457,r3 - andq -14,r3 - test_move_cc 0 0 0 0 - checkr3 78134452 - - moveq 0,r3 - andq -14,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_andr.s b/tests/tcg/cris/bare/check_andr.s deleted file mode 100644 index 61aa1dc32f2..00000000000 --- a/tests/tcg/cris/bare/check_andr.s +++ /dev/null @@ -1,95 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n50124400\nffff0002\n2\nfffff\nfedaff0f\n78134400\nffffff02\n2\nf02\n78134401\n78134400\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - and.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - moveq -1,r4 - and.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r4 - move.d r4,r3 - and.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r4 - move.d r4,r3 - and.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - and.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 50124400 - - moveq -1,r3 - moveq 2,r4 - and.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0002 - - moveq 2,r3 - moveq -1,r4 - and.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xfffff,r3 - move.d 0xffff,r4 - and.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 fffff - - move.d 0xfedaffaf,r3 - move.d 0xff5f,r4 - and.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 fedaff0f - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - and.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 78134400 - - moveq -1,r3 - moveq 2,r4 - and.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff02 - - moveq 2,r3 - moveq -1,r4 - and.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0x5a,r4 - move.d 0xfa7,r3 - and.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 f02 - - move.d 0x5432f789,r4 - move.d 0x78134453,r3 - and.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 78134401 - - moveq 0,r7 - and.b r7,r3 - test_move_cc 0 1 0 0 - checkr3 78134400 - - quit diff --git a/tests/tcg/cris/bare/check_asr.s b/tests/tcg/cris/bare/check_asr.s deleted file mode 100644 index 0a02ae6f7eb..00000000000 --- a/tests/tcg/cris/bare/check_asr.s +++ /dev/null @@ -1,230 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n1\nffffffff\nffffffff\n5a67f\nffffffff\nffffffff\nffffffff\nf699fc67\nffffffff\n1\nffffffff\nffffffff\n5a67f\nda67ffff\nda67ffff\nda67ffff\nda67fc67\nffffffff\nffffffff\n1\nffffffff\nffffffff\n5a670007\nda67f1ff\nda67f1ff\nda67f1ff\nda67f1e7\nffffffff\nffffffff\n1\nffffffff\nffffffff\nffffffff\n5a67f1ff\n5a67f1f9\n0\n5a670000\n - - .include "testutils.inc" - start - moveq -1,r3 - asrq 0,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - asrq 1,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - asrq 31,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - asrq 15,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5a67f19f,r3 - asrq 12,r3 - test_move_cc 0 0 0 0 - checkr3 5a67f - - move.d 0xda67f19f,r3 - move.d 31,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0xda67f19f,r3 - move.d 32,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0xda67f19f,r3 - move.d 33,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0xda67f19f,r3 - move.d 66,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 f699fc67 - - moveq -1,r3 - moveq 0,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - asr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 31,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 15,r4 - asr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5a67f19f,r3 - moveq 12,r4 - asr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 5a67f - - move.d 0xda67f19f,r3 - move.d 31,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67ffff - - move.d 0xda67f19f,r3 - move.d 32,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67ffff - - move.d 0xda67f19f,r3 - move.d 33,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67ffff - - move.d 0xda67f19f,r3 - move.d 66,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67fc67 - - moveq -1,r3 - moveq 0,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 1,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - asr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 31,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 15,r4 - asr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5a67719f,r3 - moveq 12,r4 - asr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 5a670007 - - move.d 0xda67f19f,r3 - move.d 31,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67f1ff - - move.d 0xda67f19f,r3 - move.d 32,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67f1ff - - move.d 0xda67f19f,r3 - move.d 33,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67f1ff - - move.d 0xda67f19f,r3 - move.d 66,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67f1e7 - - moveq -1,r3 - moveq 0,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 1,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - asr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 31,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 15,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 7,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - -; FIXME: was wrong. - move.d 0x5a67f19f,r3 - moveq 12,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 5a67f1ff - -; FIXME: was wrong. - move.d 0x5a67f19f,r3 - moveq 4,r4 - asr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 5a67f1f9 - - move.d 0x5a67f19f,r3 - asrq 31,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0x5a67419f,r3 - moveq 16,r4 - asr.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 5a670000 - - quit diff --git a/tests/tcg/cris/bare/check_ba.s b/tests/tcg/cris/bare/check_ba.s deleted file mode 100644 index 873a4086c5f..00000000000 --- a/tests/tcg/cris/bare/check_ba.s +++ /dev/null @@ -1,93 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: a\n - - - .set smalloffset,0 - .set largeoffset,0 - - - .macro fail - jump _fail - .endm - - .global main -main: - moveq 0,$r3 - -; Short forward branch. - ba 0f - addq 1,$r3 - fail - -; Max short forward branch. -1: - ba 2f - addq 1,$r3 - fail - -; Short backward branch. -0: - ba 1b - addq 1,$r3 - fail - - .space 254-2+smalloffset+1b-.,0 - moveq 0,$r3 - -2: -; Transit branch (long). - ba 3f - addq 1,$r3 - fail - - moveq 0,$r3 -4: -; Long forward branch. - ba 5f - addq 1,$r3 - fail - - .space 256-2-smalloffset+4b-.,0 - - moveq 0,$r3 - -; Max short backward branch. -3: - ba 4b - addq 1,$r3 - fail - -5: -; Max long forward branch. - ba 6f - addq 1,$r3 - fail - - .space 32766+largeoffset-2+5b-.,0 - - moveq 0,$r3 -6: -; Transit branch. - ba 7f - addq 1,$r3 - fail - - moveq 0,$r3 -9: - jsr pass - nop - -; Transit branch. - moveq 0,$r3 -7: - ba 8f - addq 1,$r3 - fail - - .space 32768-largeoffset+9b-.,0 - -8: -; Max long backward branch. - ba 9b - addq 1,$r3 - fail diff --git a/tests/tcg/cris/bare/check_bas.s b/tests/tcg/cris/bare/check_bas.s deleted file mode 100644 index 11929d42020..00000000000 --- a/tests/tcg/cris/bare/check_bas.s +++ /dev/null @@ -1,102 +0,0 @@ -# mach: crisv32 -# output: 0\n0\n0\nfb349abc\n0\n12124243\n0\n0\neab5baad\n0\nefb37832\n - - .include "testutils.inc" - start -x: - setf zncv - bsr 0f - nop -0: - test_cc 1 1 1 1 - move srp,r3 - sub.d 0b,r3 - checkr3 0 - - bas 1f,mof - moveq 0,r0 -6: - nop - quit - -2: - move srp,r3 - sub.d 3f,r3 - checkr3 0 - move srp,r4 - subq 4,r4 - move.d [r4],r3 - checkr3 fb349abc - - basc 4f,mof - nop - .dword 0x12124243 -7: - nop - quit - -8: - move mof,r3 - sub.d 7f,r3 - checkr3 0 - - move mof,r4 - subq 4,r4 - move.d [r4],r3 - checkr3 eab5baad - - jasc 9f,mof - nop - .dword 0xefb37832 -0: - quit - - quit -9: - move mof,r3 - sub.d 0b,r3 - checkr3 0 - - move mof,r4 - subq 4,r4 - move.d [r4],r3 - checkr3 efb37832 - - quit - -4: - move mof,r3 - sub.d 7b,r3 - checkr3 0 - move mof,r4 - subq 4,r4 - move.d [r4],r3 - checkr3 12124243 - basc 5f,bz - moveq 0,r3 - .dword 0x7634aeba - quit - - .space 32770,0 -1: - move mof,r3 - sub.d 6b,r3 - checkr3 0 - - bsrc 2b - nop - .dword 0xfb349abc -3: - - quit - -5: - move mof,r3 - sub.d 7b,r3 - checkr3 0 - move.d 8b,r6 - jasc r6,mof - nop - .dword 0xeab5baad -7: - quit diff --git a/tests/tcg/cris/bare/check_bcc.s b/tests/tcg/cris/bare/check_bcc.s deleted file mode 100644 index c57ffa6fa35..00000000000 --- a/tests/tcg/cris/bare/check_bcc.s +++ /dev/null @@ -1,197 +0,0 @@ - .global main - .type main, @function -main: - clearf nzvc - setf nzv - bcc 0f - addq 1, $r3 - jump dofail - -0: - clearf nzvc - setf nzv - bcs dofail - addq 1,$r3 - - clearf nzvc - setf ncv - bne 1f - addq 1, $r3 - -fail: -dofail: - jump _fail - -1: - clearf nzvc - setf ncv - beq dofail - addq 1,$r3 - - clearf nzvc - setf ncz - bvc 2f - addq 1,$r3 - jump dofail - -2: - clearf nzvc - setf ncz - bvs dofail - addq 1,$r3 - - clearf nzvc - setf vcz - bpl 3f - addq 1,$r3 - jump fail -3: - clearf nzvc - setf vcz - bmi dofail - addq 1,$r3 - - clearf nzvc - setf nv - bls dofail - addq 1,$r3 - - clearf nzvc - setf nv - bhi 4f - addq 1,$r3 - jump dofail - -4: - clearf nzvc - setf zc - bge 5f - addq 1,$r3 - jump dofail - -5: - clearf nzvc - setf zc - blt dofail - addq 1,$r3 - - clearf nzvc - setf c - bgt 6f - addq 1,$r3 - jump fail - -6: - clearf nzvc - setf c - ble dofail - addq 1,$r3 - -;;;;;;;;;; - - setf nzvc - clearf nzv - bcc dofail - addq 1,$r3 - - setf nzvc - clearf nzv - bcs 0f - addq 1,$r3 - jump fail - -0: - setf nzvc - clearf ncv - bne dofail - addq 1,$r3 - - setf nzvc - clearf ncv - beq 1f - addq 1,$r3 - jump fail - -1: - setf nzvc - clearf ncz - bvc dofail - addq 1,$r3 - - setf nzvc - clearf ncz - bvs 2f - addq 1,$r3 - jump fail - -2: - setf nzvc - clearf vcz - bpl dofail - addq 1,$r3 - - setf nzvc - clearf vcz - bmi 3f - addq 1,$r3 - jump fail - -3: - setf nzvc - clearf nv - bls 4f - addq 1,$r3 - jump fail - -4: - setf nzvc - clearf nv - bhi dofail - addq 1,$r3 - - setf zvc - clearf nzc - bge dofail - addq 1,$r3 - - setf nzc - clearf vzc - blt 5f - addq 1,$r3 - jump fail - -5: - setf nzvc - clearf c - bgt dofail - addq 1,$r3 - - setf nzvc - clearf c - ble 6f - addq 1,$r3 - jump fail - -6: - ; do a forward branch. - ba 2f - nop - .fill 100 -1: - ba 3f - nop - .fill 800 -2: - ba 1b - nop - .fill 1024 -3: - - moveq 31, $r0 -1: bne 1b - subq 1, $r0 - - jsr pass - moveq 0, $r10 - ret - nop diff --git a/tests/tcg/cris/bare/check_boundc.s b/tests/tcg/cris/bare/check_boundc.s deleted file mode 100644 index fb9e5bc905b..00000000000 --- a/tests/tcg/cris/bare/check_boundc.s +++ /dev/null @@ -1,101 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n5432f789\n2\nffff\n2\nffff\nffff\nf789\n2\n2\nff\nff\nff\n89\n0\nff\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - bound.d 2,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - bound.d 0xffffffff,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - bound.d 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r3 - bound.d 0xffffffff,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - bound.d 0x5432f789,r3 - test_move_cc 0 0 0 0 - checkr3 5432f789 - - moveq -1,r3 - bound.w 2,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq -1,r3 - bound.w 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq 2,r3 - bound.w 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - bound.w 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - move.d 0xfedaffff,r3 - bound.w 0xffff,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - move.d 0x78134452,r3 - bound.w 0xf789,r3 - test_move_cc 0 0 0 0 - checkr3 f789 - - moveq -1,r3 - bound.b 2,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - bound.b 0xff,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq -1,r3 - bound.b 0xff,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - move.d 0xff,r3 - bound.b 0xff,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - move.d 0xfeda49ff,r3 - bound.b 0xff,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - move.d 0x78134452,r3 - bound.b 0x89,r3 - test_move_cc 0 0 0 0 - checkr3 89 - - bound.w 0,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xffff,r3 - bound.b -1,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - quit diff --git a/tests/tcg/cris/bare/check_boundr.s b/tests/tcg/cris/bare/check_boundr.s deleted file mode 100644 index 5c50cc5f6ad..00000000000 --- a/tests/tcg/cris/bare/check_boundr.s +++ /dev/null @@ -1,125 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\nffff\nffffffff\n5432f789\n2\n2\nffff\nffff\nffff\nf789\n2\n2\nff\nff\n89\nfeda4953\nfeda4962\n0\n0\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - bound.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - moveq -1,r4 - bound.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r4 - move.d r4,r3 - bound.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r4 - move.d r4,r3 - bound.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - bound.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 5432f789 - - moveq -1,r3 - moveq 2,r4 - bound.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - moveq -1,r4 - bound.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq -1,r3 - bound.w r3,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - move.d 0xffff,r4 - move.d r4,r3 - bound.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - move.d 0xfedaffff,r4 - move.d r4,r3 - bound.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - bound.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 f789 - - moveq -1,r3 - moveq 2,r4 - bound.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - moveq 2,r3 - moveq -1,r4 - bound.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 2 - - move.d 0xff,r4 - move.d r4,r3 - bound.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - move.d 0xfeda49ff,r4 - move.d r4,r3 - bound.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 ff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - bound.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 89 - - move.d 0xfeda4956,r3 - move.d 0xfeda4953,r4 - bound.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 feda4953 - - move.d 0xfeda4962,r3 - move.d 0xfeda4963,r4 - bound.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 feda4962 - - move.d 0xfeda4956,r3 - move.d 0,r4 - bound.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xfeda4956,r4 - move.d 0,r3 - bound.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_btst.s b/tests/tcg/cris/bare/check_btst.s deleted file mode 100644 index 485deb20066..00000000000 --- a/tests/tcg/cris/bare/check_btst.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1111\n - - .include "testutils.inc" - start - clearf nzvc - moveq -1,r3 - .if 1 ;..asm.arch.cris.v32 - .else - setf vc - .endif - btstq 0,r3 - test_cc 1 0 0 0 - - moveq 2,r3 - btstq 1,r3 - test_cc 1 0 0 0 - - moveq 4,r3 - btstq 1,r3 - test_cc 0 1 0 0 - - moveq -1,r3 - btstq 31,r3 - test_cc 1 0 0 0 - - move.d 0x5a67f19f,r3 - btstq 12,r3 - test_cc 1 0 0 0 - - move.d 0xda67f19f,r3 - move.d 29,r4 - btst r4,r3 - test_cc 0 0 0 0 - - move.d 0xda67f19f,r3 - move.d 32,r4 - btst r4,r3 - test_cc 1 0 0 0 - - move.d 0xda67f191,r3 - move.d 33,r4 - btst r4,r3 - test_cc 0 0 0 0 - - moveq -1,r3 - moveq 0,r4 - btst r4,r3 - test_cc 1 0 0 0 - - moveq 2,r3 - moveq 1,r4 - btst r4,r3 - test_cc 1 0 0 0 - - moveq -1,r3 - moveq 31,r4 - btst r4,r3 - test_cc 1 0 0 0 - - moveq 4,r3 - btstq 1,r3 - test_cc 0 1 0 0 - - moveq -1,r3 - moveq 15,r4 - btst r4,r3 - test_cc 1 0 0 0 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - btst r4,r3 - test_cc 1 0 0 0 - - move.d 0x5a678000,r3 - moveq 11,r4 - btst r4,r3 - test_cc 0 1 0 0 - - move.d 0x5a67f19f,r3 - btst r3,r3 - test_cc 0 0 0 0 - - move.d 0x1111,r3 - checkr3 1111 - - ; check that X gets cleared and that only the NZ flags are touched. - ;; move.d 0xff, $r0 - ;; move $r0, $ccs - ;; btst r3,r3 - ;; move $ccs, $r0 - ;; and.d 0xff, $r0 - ;; cmp.d 0xe3, $r0 - ;; test_cc 0 1 0 0 - - quit diff --git a/tests/tcg/cris/bare/check_clearfv32.s b/tests/tcg/cris/bare/check_clearfv32.s deleted file mode 100644 index 4e91360273c..00000000000 --- a/tests/tcg/cris/bare/check_clearfv32.s +++ /dev/null @@ -1,19 +0,0 @@ -# mach: crisv32 -# output: ef\nef\n - -; Check that "clearf x" doesn't trivially fail. - - .include "testutils.inc" - start - setf puixnzvc - clearf x ; Actually, x would be cleared by almost-all other insns. - move ccs,r3 - and.d 0xff, $r3 - checkr3 ef - - setf puixnzvc - moveq 0, $r3 ; moveq should only clear the xflag. - move ccs,r3 - and.d 0xff, $r3 - checkr3 ef - quit diff --git a/tests/tcg/cris/bare/check_clrjmp1.s b/tests/tcg/cris/bare/check_clrjmp1.s deleted file mode 100644 index 45a7005e24d..00000000000 --- a/tests/tcg/cris/bare/check_clrjmp1.s +++ /dev/null @@ -1,36 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffff00\n - -; A bug resulting in a non-effectual clear.b discovered running the GCC -; testsuite; jump actually wrote to p0. - - .include "testutils.inc" - - start - jump 1f - nop - .p2align 8 -1: - move.d y,r4 - - .if 0 ;0 == ..asm.arch.cris.v32 -; There was a bug causing this insn to set special register p0 -; (byte-clear) to 8 (low 8 bits of location after insn). - jump [r4+] - .endif - -1: - move.d 0f,r4 - -; The corresponding bug would cause this insn too, to set p0. - jump r4 - nop - quit -0: - moveq -1,r3 - clear.b r3 - checkr3 ffffff00 - quit - -y: - .dword 1b diff --git a/tests/tcg/cris/bare/check_cmp-2.s b/tests/tcg/cris/bare/check_cmp-2.s deleted file mode 100644 index 414d3705179..00000000000 --- a/tests/tcg/cris/bare/check_cmp-2.s +++ /dev/null @@ -1,15 +0,0 @@ - - -.include "testutils.inc" - - start - - move.d 4294967283, $r0 - move.d $r0, $r10 - cmp.d $r0, $r10 - beq 1f - move.d $r10, $r3 - fail -1: - pass - quit diff --git a/tests/tcg/cris/bare/check_cmpc.s b/tests/tcg/cris/bare/check_cmpc.s deleted file mode 100644 index 267c9ba8c01..00000000000 --- a/tests/tcg/cris/bare/check_cmpc.s +++ /dev/null @@ -1,86 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649282\n - - .include "testutils.inc" - start - moveq -1,r3 - cmp.d -2,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - cmp.d 1,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - cmp.d -0xffff,r3 - test_cc 0 0 0 1 - checkr3 ffff - - moveq -1,r3 - cmp.d 1,r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - cmp.d -0x5432f789,r3 - test_cc 1 0 1 1 - checkr3 78134452 - - moveq -1,r3 - cmp.w -2,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - cmp.w 1,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - cmp.w 1,r3 - test_cc 1 0 0 0 - checkr3 ffff - - move.d 0xfedaffff,r3 - cmp.w 1,r3 - test_cc 1 0 0 0 - checkr3 fedaffff - - move.d 0x78134452,r3 - cmp.w 0x877,r3 - test_cc 0 0 0 0 - checkr3 78134452 - - moveq -1,r3 - cmp.b -2,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - cmp.b 1,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xff,r3 - cmp.b 1,r3 - test_cc 1 0 0 0 - checkr3 ff - - move.d 0xfeda49ff,r3 - cmp.b 1,r3 - test_cc 1 0 0 0 - checkr3 feda49ff - - move.d 0x78134452,r3 - cmp.b 0x77,r3 - test_cc 1 0 0 1 - checkr3 78134452 - - move.d 0x85649282,r3 - cmp.b 0x82,r3 - test_cc 0 1 0 0 - checkr3 85649282 - - quit diff --git a/tests/tcg/cris/bare/check_cmpm.s b/tests/tcg/cris/bare/check_cmpm.s deleted file mode 100644 index e4dde15b32e..00000000000 --- a/tests/tcg/cris/bare/check_cmpm.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649222\n - - .include "testutils.inc" - .data -x: - .dword -2,1,-0xffff,1,-0x5432f789 - .word -2,1,1,0x877 - .byte -2,1,0x77 - .byte 0x22 - - start - moveq -1,r3 - move.d x,r5 - cmp.d [r5+],r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - cmp.d [r5],r3 - test_cc 0 0 0 0 - addq 4,r5 - checkr3 2 - - move.d 0xffff,r3 - cmp.d [r5+],r3 - test_cc 0 0 0 1 - checkr3 ffff - - moveq -1,r3 - cmp.d [r5+],r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - cmp.d [r5+],r3 - test_cc 1 0 1 1 - checkr3 78134452 - - moveq -1,r3 - cmp.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - cmp.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - cmp.w [r5],r3 - test_cc 1 0 0 0 - checkr3 ffff - - move.d 0xfedaffff,r3 - cmp.w [r5+],r3 - test_cc 1 0 0 0 - checkr3 fedaffff - - move.d 0x78134452,r3 - cmp.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 78134452 - - moveq -1,r3 - cmp.b [r5],r3 - test_cc 0 0 0 0 - addq 1,r5 - checkr3 ffffffff - - moveq 2,r3 - cmp.b [r5],r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xff,r3 - cmp.b [r5],r3 - test_cc 1 0 0 0 - checkr3 ff - - move.d 0xfeda49ff,r3 - cmp.b [r5+],r3 - test_cc 1 0 0 0 - checkr3 feda49ff - - move.d 0x78134452,r3 - cmp.b [r5+],r3 - test_cc 1 0 0 1 - checkr3 78134452 - - move.d 0x85649222,r3 - cmp.b [r5],r3 - test_cc 0 1 0 0 - checkr3 85649222 - - quit diff --git a/tests/tcg/cris/bare/check_cmpq.s b/tests/tcg/cris/bare/check_cmpq.s deleted file mode 100644 index 5469141c916..00000000000 --- a/tests/tcg/cris/bare/check_cmpq.s +++ /dev/null @@ -1,75 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1\n1f\n1f\nffffffe1\nffffffe1\nffffffe0\n0\n0\nffffffff\nffffffff\n10000\n100\n5678900\n - - .include "testutils.inc" - start - moveq 1,r3 - cmpq 1,r3 - test_cc 0 1 0 0 - checkr3 1 - - cmpq -1,r3 - test_cc 0 0 0 1 - checkr3 1 - - cmpq 31,r3 - test_cc 1 0 0 1 - checkr3 1 - - moveq 31,r3 - cmpq 31,r3 - test_cc 0 1 0 0 - checkr3 1f - - cmpq -31,r3 - test_cc 0 0 0 1 - checkr3 1f - - movs.b -31,r3 - cmpq -31,r3 - test_cc 0 1 0 0 - checkr3 ffffffe1 - - cmpq -32,r3 - test_cc 0 0 0 0 - checkr3 ffffffe1 - - movs.b -32,r3 - cmpq -32,r3 - test_cc 0 1 0 0 - checkr3 ffffffe0 - - moveq 0,r3 - cmpq 1,r3 - test_cc 1 0 0 1 - checkr3 0 - - cmpq -32,r3 - test_cc 0 0 0 1 - checkr3 0 - - moveq -1,r3 - cmpq 1,r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - cmpq -1,r3 - test_cc 0 1 0 0 - checkr3 ffffffff - - move.d 0x10000,r3 - cmpq 1,r3 - test_cc 0 0 0 0 - checkr3 10000 - - move.d 0x100,r3 - cmpq 1,r3 - test_cc 0 0 0 0 - checkr3 100 - - move.d 0x5678900,r3 - cmpq 7,r3 - test_cc 0 0 0 0 - checkr3 5678900 - - quit diff --git a/tests/tcg/cris/bare/check_cmpr.s b/tests/tcg/cris/bare/check_cmpr.s deleted file mode 100644 index b30af7a5385..00000000000 --- a/tests/tcg/cris/bare/check_cmpr.s +++ /dev/null @@ -1,102 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n2\nffff\nffffffff\n78134452\nffffffff\n2\nffff\nfedaffff\n78134452\nffffffff\n2\nff\nfeda49ff\n78134452\n85649222\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq -2,r4 - cmp.d r4,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - cmp.d r4,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - move.d -0xffff,r4 - cmp.d r4,r3 - test_cc 0 0 0 1 - checkr3 ffff - - moveq 1,r4 - moveq -1,r3 - cmp.d r4,r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - cmp.d r4,r3 - test_cc 1 0 1 1 - checkr3 78134452 - - moveq -1,r3 - moveq -2,r4 - cmp.w r4,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - cmp.w r4,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d 0xffff,r3 - move.d -0xffff,r4 - cmp.w r4,r3 - test_cc 1 0 0 0 - checkr3 ffff - - move.d 0xfedaffff,r3 - move.d -0xfedaffff,r4 - cmp.w r4,r3 - test_cc 1 0 0 0 - checkr3 fedaffff - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - cmp.w r4,r3 - test_cc 0 0 0 0 - checkr3 78134452 - - moveq -1,r3 - moveq -2,r4 - cmp.b r4,r3 - test_cc 0 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - cmp.b r4,r3 - test_cc 0 0 0 0 - checkr3 2 - - move.d -0xff,r4 - move.d 0xff,r3 - cmp.b r4,r3 - test_cc 1 0 0 0 - checkr3 ff - - move.d -0xfeda49ff,r4 - move.d 0xfeda49ff,r3 - cmp.b r4,r3 - test_cc 1 0 0 0 - checkr3 feda49ff - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - cmp.b r4,r3 - test_cc 1 0 0 1 - checkr3 78134452 - - move.d 0x85649222,r3 - move.d 0x77445622,r4 - cmp.b r4,r3 - test_cc 0 1 0 0 - checkr3 85649222 - - quit diff --git a/tests/tcg/cris/bare/check_cmpxc.s b/tests/tcg/cris/bare/check_cmpxc.s deleted file mode 100644 index b237a931752..00000000000 --- a/tests/tcg/cris/bare/check_cmpxc.s +++ /dev/null @@ -1,92 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\n2\n2\nffff\nffff\nffff\nffff\nffffffff\nffffffff\nffffffff\n78134452\n78134452\n78134452\n78134452\n4452\n80000032\n - - .include "testutils.inc" - start - moveq 2,r3 - cmps.b 0xff,r3 - test_cc 0 0 0 1 - checkr3 2 - - moveq 2,r3 - cmps.w 0xffff,r3 - test_cc 0 0 0 1 - checkr3 2 - - moveq 2,r3 - cmpu.b 0xff,r3 - test_cc 1 0 0 1 - checkr3 2 - - moveq 2,r3 - move.d 0xffffffff,r4 - cmpu.w -1,r3 - test_cc 1 0 0 1 - checkr3 2 - - move.d 0xffff,r3 - cmpu.b -1,r3 - test_cc 0 0 0 0 - checkr3 ffff - - move.d 0xffff,r3 - cmpu.w -1,r3 - test_cc 0 1 0 0 - checkr3 ffff - - move.d 0xffff,r3 - cmps.b 0xff,r3 - test_cc 0 0 0 1 - checkr3 ffff - - move.d 0xffff,r3 - cmps.w 0xffff,r3 - test_cc 0 0 0 1 - checkr3 ffff - - moveq -1,r3 - cmps.b 0xff,r3 - test_cc 0 1 0 0 - checkr3 ffffffff - - moveq -1,r3 - cmps.w 0xff,r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - cmps.w 0xffff,r3 - test_cc 0 1 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - cmpu.b 0x89,r3 - test_cc 0 0 0 0 - checkr3 78134452 - - move.d 0x78134452,r3 - cmps.b 0x89,r3 - test_cc 0 0 0 1 - checkr3 78134452 - - move.d 0x78134452,r3 - cmpu.w 0xf789,r3 - test_cc 0 0 0 0 - checkr3 78134452 - - move.d 0x78134452,r3 - cmps.w 0xf789,r3 - test_cc 0 0 0 1 - checkr3 78134452 - - move.d 0x4452,r3 - cmps.w 0x8002,r3 - test_cc 0 0 0 1 - checkr3 4452 - - move.d 0x80000032,r3 - cmpu.w 0x764,r3 - test_cc 0 0 1 0 - checkr3 80000032 - - quit diff --git a/tests/tcg/cris/bare/check_cmpxm.s b/tests/tcg/cris/bare/check_cmpxm.s deleted file mode 100644 index 87ea5bf8e31..00000000000 --- a/tests/tcg/cris/bare/check_cmpxm.s +++ /dev/null @@ -1,106 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 2\n2\n2\n2\nffff\nffff\nffff\nffff\nffffffff\nffffffff\nffffffff\n78134452\n78134452\n78134452\n78134452\n4452\n80000032\n - - .include "testutils.inc" - .data -x: - .byte 0xff - .word 0xffff - .word 0xff - .word 0xffff - .byte 0x89 - .word 0xf789 - .word 0x8002 - .word 0x764 - - start - moveq 2,r3 - move.d x,r5 - cmps.b [r5+],r3 - test_cc 0 0 0 1 - checkr3 2 - - moveq 2,r3 - cmps.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 2 - - moveq 2,r3 - subq 3,r5 - cmpu.b [r5+],r3 - test_cc 1 0 0 1 - checkr3 2 - - moveq 2,r3 - cmpu.w [r5+],r3 - test_cc 1 0 0 1 - subq 3,r5 - checkr3 2 - - move.d 0xffff,r3 - cmpu.b [r5],r3 - test_cc 0 0 0 0 - checkr3 ffff - - move.d 0xffff,r3 - cmpu.w [r5],r3 - test_cc 0 1 0 0 - checkr3 ffff - - move.d 0xffff,r3 - cmps.b [r5],r3 - test_cc 0 0 0 1 - checkr3 ffff - - move.d 0xffff,r3 - cmps.w [r5],r3 - test_cc 0 0 0 1 - checkr3 ffff - - moveq -1,r3 - cmps.b [r5],r3 - test_cc 0 1 0 0 - addq 3,r5 - checkr3 ffffffff - - moveq -1,r3 - cmps.w [r5+],r3 - test_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - cmps.w [r5+],r3 - test_cc 0 1 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - cmpu.b [r5],r3 - test_cc 0 0 0 0 - checkr3 78134452 - - move.d 0x78134452,r3 - cmps.b [r5+],r3 - test_cc 0 0 0 1 - checkr3 78134452 - - move.d 0x78134452,r3 - cmpu.w [r5],r3 - test_cc 0 0 0 0 - checkr3 78134452 - - move.d 0x78134452,r3 - cmps.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 78134452 - - move.d 0x4452,r3 - cmps.w [r5+],r3 - test_cc 0 0 0 1 - checkr3 4452 - - move.d 0x80000032,r3 - cmpu.w [r5+],r3 - test_cc 0 0 1 0 - checkr3 80000032 - - quit diff --git a/tests/tcg/cris/bare/check_dstep.s b/tests/tcg/cris/bare/check_dstep.s deleted file mode 100644 index bd43b838ea9..00000000000 --- a/tests/tcg/cris/bare/check_dstep.s +++ /dev/null @@ -1,42 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: fffffffc\n4\nffff\nfffffffe\n9bf3911b\n0\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - dstep r4,r3 - test_move_cc 1 0 0 0 - checkr3 fffffffc - - moveq 2,r3 - moveq -1,r4 - dstep r4,r3 - test_move_cc 0 0 0 0 - checkr3 4 - - move.d 0xffff,r4 - move.d r4,r3 - dstep r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r4 - move.d r4,r3 - dstep r4,r3 - test_move_cc 1 0 0 0 - checkr3 fffffffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - dstep r4,r3 - test_move_cc 1 0 0 0 - checkr3 9bf3911b - - move.d 0xffff,r3 - move.d 0x1fffe,r4 - dstep r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_jsr.s b/tests/tcg/cris/bare/check_jsr.s deleted file mode 100644 index 10602378731..00000000000 --- a/tests/tcg/cris/bare/check_jsr.s +++ /dev/null @@ -1,85 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 0\n0\n0\n0\n0\n0\n - -# Test that jsr Rn and jsr [PC+] work. - - .include "testutils.inc" - start -x: - move.d 0f,r6 - setf nzvc - jsr r6 - .if 1; ..asm.arch.cris.v32 - nop - .endif -0: - test_move_cc 1 1 1 1 - move srp,r3 - sub.d 0b,r3 - checkr3 0 - - move.d 1f,r0 - setf nzvc - jsr r0 - .if 1 ; ..asm.arch.cris.v32 - moveq 0,r0 - .endif -6: - nop - quit - -2: - test_move_cc 0 0 0 0 - move srp,r3 - sub.d 3f,r3 - checkr3 0 - jsr 4f - .if 1 ; ..asm.arch.cris.v32 - nop - .endif -7: - nop - quit - -8: - move srp,r3 - sub.d 7b,r3 - checkr3 0 - quit - -4: - move srp,r3 - sub.d 7b,r3 - checkr3 0 - move.d 5f,r3 - jump r3 - .if 1; ..asm.arch.cris.v32 - moveq 0,r3 - .endif - quit - - .space 32770,0 -1: - test_move_cc 1 1 1 1 - move srp,r3 - sub.d 6b,r3 - checkr3 0 - - clearf cznv - jsr 2b - .if 1; ..asm.arch.cris.v32 - nop - .endif -3: - - quit - -5: - move srp,r3 - sub.d 7b,r3 - checkr3 0 - jump 8b - .if 1 ; ..asm.arch.cris.v32 - nop - .endif - quit diff --git a/tests/tcg/cris/bare/check_lapc.s b/tests/tcg/cris/bare/check_lapc.s deleted file mode 100644 index 9a6150b749e..00000000000 --- a/tests/tcg/cris/bare/check_lapc.s +++ /dev/null @@ -1,78 +0,0 @@ -# mach: crisv32 -# output: 0\n0\nfffffffa\nfffffffe\nffffffda\n1e\n1e\n0\n - -.include "testutils.inc" - -; To accommodate dumpr3 with more than one instruction, keep it -; out of lapc operand ranges and difference calculations. - - start - lapc.d 0f,r3 -0: - sub.d .,r3 - checkr3 0 - - lapcq 0f,r3 -0: - sub.d .,r3 - checkr3 0 - - lapc.d .,r3 - sub.d .,r3 - checkr3 fffffffa - - lapcq .,r3 - sub.d .,r3 - checkr3 fffffffe - -0: - .rept 16 - nop - .endr - lapc.d 0b,r3 - sub.d .,r3 - checkr3 ffffffda - - setf zcvn - lapc.d 0f,r3 - test_cc 1 1 1 1 - sub.d .,r3 - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop -0: - checkr3 1e -0: - lapcq 0f,r3 - sub.d 0b,r3 - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop - nop -0: - checkr3 1e - clearf cn - setf zv -1: - lapcq .,r3 - test_cc 0 1 1 0 - sub.d 1b,r3 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_lsl.s b/tests/tcg/cris/bare/check_lsl.s deleted file mode 100644 index 9e2ddd7cd00..00000000000 --- a/tests/tcg/cris/bare/check_lsl.s +++ /dev/null @@ -1,217 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n4\n80000000\nffff8000\n7f19f000\n80000000\n0\n0\n699fc67c\nffffffff\n4\n80000000\nffff8000\n7f19f000\nda670000\nda670000\nda670000\nda67c67c\nffffffff\nfffafffe\n4\nffff0000\nffff8000\n5a67f000\nda67f100\nda67f100\nda67f100\nda67f17c\nfff3faff\nfff3fafe\n4\nffffff00\nffffff00\nffffff80\n5a67f100\n5a67f1f0\n - - .include "testutils.inc" - start - moveq -1,r3 - lslq 0,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - lslq 1,r3 - test_move_cc 0 0 0 0 - checkr3 4 - - moveq -1,r3 - lslq 31,r3 - test_move_cc 1 0 0 0 - checkr3 80000000 - - moveq -1,r3 - lslq 15,r3 - test_move_cc 1 0 0 0 - checkr3 ffff8000 - - move.d 0x5a67f19f,r3 - lslq 12,r3 - test_move_cc 0 0 0 0 - checkr3 7f19f000 - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsl.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 80000000 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsl.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsl.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsl.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 699fc67c - - moveq -1,r3 - moveq 0,r4 - lsl.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - lsl.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 4 - - moveq -1,r3 - moveq 31,r4 - lsl.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 80000000 - - moveq -1,r3 - moveq 15,r4 - lsl.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffff8000 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsl.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 7f19f000 - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsl.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsl.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsl.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsl.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 da67c67c - - moveq -1,r3 - moveq 0,r4 - lsl.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0xfffaffff,r3 - moveq 1,r4 - lsl.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 fffafffe - - moveq 2,r3 - moveq 1,r4 - lsl.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 4 - - moveq -1,r3 - moveq 31,r4 - lsl.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffff0000 - - moveq -1,r3 - moveq 15,r4 - lsl.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffff8000 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsl.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 5a67f000 - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsl.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 da67f17c - - move.d 0xfff3faff,r3 - moveq 0,r4 - lsl.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 fff3faff - - move.d 0xfff3faff,r3 - moveq 1,r4 - lsl.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 fff3fafe - - moveq 2,r3 - moveq 1,r4 - lsl.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 4 - - moveq -1,r3 - moveq 31,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffffff00 - - moveq -1,r3 - moveq 15,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffffff00 - - moveq -1,r3 - moveq 7,r4 - lsl.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffff80 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsl.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 5a67f100 - - move.d 0x5a67f19f,r3 - moveq 4,r4 - lsl.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 5a67f1f0 - - quit diff --git a/tests/tcg/cris/bare/check_lsr.s b/tests/tcg/cris/bare/check_lsr.s deleted file mode 100644 index 18fdbef9b28..00000000000 --- a/tests/tcg/cris/bare/check_lsr.s +++ /dev/null @@ -1,218 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\n1\n1\n1ffff\n5a67f\n1\n0\n0\n3699fc67\nffffffff\n1\n1\n1ffff\n5a67f\nda670000\nda670000\nda670000\nda673c67\nffffffff\nffff7fff\n1\nffff0000\nffff0001\n5a67000f\nda67f100\nda67f100\nda67f100\nda67f127\nffffffff\nffffff7f\n1\nffffff00\nffffff00\nffffff01\n5a67f100\n5a67f109\n - - .include "testutils.inc" - start - moveq -1,r3 - lsrq 0,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - lsrq 1,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - lsrq 31,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - lsrq 15,r3 - test_move_cc 0 0 0 0 - checkr3 1ffff - - move.d 0x5a67f19f,r3 - lsrq 12,r3 - test_move_cc 0 0 0 0 - checkr3 5a67f - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsr.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsr.d r4,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 3699fc67 - - moveq -1,r3 - moveq 0,r4 - lsr.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 2,r3 - moveq 1,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 31,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 15,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 1ffff - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsr.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 5a67f - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsr.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsr.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsr.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 da670000 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 da673c67 - - moveq -1,r3 - moveq 0,r4 - lsr.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 1,r4 - lsr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff7fff - - moveq 2,r3 - moveq 1,r4 - lsr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - -;; FIXME: this was wrong. Z should be set. - moveq -1,r3 - moveq 31,r4 - lsr.w r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffff0000 - - moveq -1,r3 - moveq 15,r4 - lsr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0001 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsr.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 5a67000f - - move.d 0xda67f19f,r3 - move.d 31,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 32,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 33,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 da67f100 - - move.d 0xda67f19f,r3 - move.d 66,r4 - lsr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 da67f127 - - moveq -1,r3 - moveq 0,r4 - lsr.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq -1,r3 - moveq 1,r4 - lsr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff7f - - moveq 2,r3 - moveq 1,r4 - lsr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - moveq -1,r3 - moveq 31,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffffff00 - - moveq -1,r3 - moveq 15,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffffff00 - - moveq -1,r3 - moveq 7,r4 - lsr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff01 - - move.d 0x5a67f19f,r3 - moveq 12,r4 - lsr.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 5a67f100 - - move.d 0x5a67f19f,r3 - moveq 4,r4 - lsr.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 5a67f109 - - quit diff --git a/tests/tcg/cris/bare/check_mcp.s b/tests/tcg/cris/bare/check_mcp.s deleted file mode 100644 index e65ccddfd46..00000000000 --- a/tests/tcg/cris/bare/check_mcp.s +++ /dev/null @@ -1,49 +0,0 @@ -# mach: crisv32 -# output: fffffffe\n1\n1ffff\nfffffffe\ncc463bdc\n4c463bdc\n0\n - - .include "testutils.inc" - start - -; Set R, clear C. - move 0x100,ccs - moveq -5,r3 - move 2,mof - mcp mof,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - moveq 2,r3 - move -1,srp - mcp srp,r3 - test_cc 0 0 0 0 - checkr3 1 - - move 0xffff,srp - move srp,r3 - mcp srp,r3 - test_cc 0 0 0 0 - checkr3 1ffff - - move -1,mof - move mof,r3 - mcp mof,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - move 0x5432f789,mof - move.d 0x78134452,r3 - mcp mof,r3 - test_cc 1 0 1 0 - checkr3 cc463bdc - - move 0x80000000,srp - mcp srp,r3 - test_cc 0 0 1 0 - checkr3 4c463bdc - - move 0xb3b9c423,srp - mcp srp,r3 - test_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movdelsr1.s b/tests/tcg/cris/bare/check_movdelsr1.s deleted file mode 100644 index 300cc877422..00000000000 --- a/tests/tcg/cris/bare/check_movdelsr1.s +++ /dev/null @@ -1,33 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: aa117acd\n -# output: eeaabb42\n - -; Bug with move to special register in delay slot, due to -; special flush-insn-cache simulator use. Ordinary move worked; -; special register caused branch to fail. - - .include "testutils.inc" - start - move -1,srp - - move.d 0xaa117acd,r1 - moveq 3,r9 - cmpq 1,r9 - bhi 0f - move.d r1,r3 - - fail -0: - checkr3 aa117acd - - move.d 0xeeaabb42,r1 - moveq 3,r9 - cmpq 1,r9 - bhi 0f - move r1,srp - - fail -0: - move srp,r3 - checkr3 eeaabb42 - quit diff --git a/tests/tcg/cris/bare/check_movecr.s b/tests/tcg/cris/bare/check_movecr.s deleted file mode 100644 index da8ec262842..00000000000 --- a/tests/tcg/cris/bare/check_movecr.s +++ /dev/null @@ -1,37 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffff42\n94\nffff4321\n9234\n76543210\n76540000\n - -; Move constant byte, word, dword to register. Check that no extension is -; performed, that only part of the register is set. - - .include "testutils.inc" - startnostack - moveq -1,r3 - move.b 0x42,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff42 - - moveq 0,r3 - move.b 0x94,r3 - test_move_cc 1 0 0 0 - checkr3 94 - - moveq -1,r3 - move.w 0x4321,r3 - test_move_cc 0 0 0 0 - checkr3 ffff4321 - - moveq 0,r3 - move.w 0x9234,r3 - test_move_cc 1 0 0 0 - checkr3 9234 - - move.d 0x76543210,r3 - test_move_cc 0 0 0 0 - checkr3 76543210 - - move.w 0,r3 - test_move_cc 0 1 0 0 - checkr3 76540000 - - quit diff --git a/tests/tcg/cris/bare/check_movei.s b/tests/tcg/cris/bare/check_movei.s deleted file mode 100644 index bbfa633373d..00000000000 --- a/tests/tcg/cris/bare/check_movei.s +++ /dev/null @@ -1,50 +0,0 @@ -# mach: crisv32 -# output: fffffffe\n -# output: fffffffe\n - -; Check basic integral-write semantics regarding flags. - - .include "testutils.inc" - start - - move.d 0, $r3 -; A write that works. Check that flags are set correspondingly. - move.d d,r4 - ;; store to bring it into the tlb with the right prot bits - move.d r3,[r4] - moveq -2,r5 - setf c - clearf p - move.d [r4],r3 - ax - move.d r5,[r4] - move.d [r4],r3 - - bcc 0f - nop - fail - -0: - checkr3 fffffffe - -; A write that fails; check flags too. - move.d d,r4 - moveq 23,r5 - setf p - clearf c - move.d [r4],r3 - ax - move.d r5,[r4] - move.d [r4],r3 - - bcs 0f - nop - fail - -0: - checkr3 fffffffe - quit - - .data -d: - .dword 42424242 diff --git a/tests/tcg/cris/bare/check_movemr.s b/tests/tcg/cris/bare/check_movemr.s deleted file mode 100644 index 88489dee319..00000000000 --- a/tests/tcg/cris/bare/check_movemr.s +++ /dev/null @@ -1,78 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 12345678\n10234567\n12345678\n12344567\n12344523\n76543210\nffffffaa\naa\n9911\nffff9911\n78\n56\n3456\n6712\n - - .include "testutils.inc" - start - - .data -mem1: - .dword 0x12345678 -mem2: - .word 0x4567 -mem3: - .byte 0x23 - .dword 0x76543210 - .byte 0xaa,0x11,0x99 - - .text - move.d mem1,r2 - move.d [r2],r3 - test_move_cc 0 0 0 0 - checkr3 12345678 - - move.d mem2,r3 - move.d [r3],r3 - test_move_cc 0 0 0 0 - checkr3 10234567 - - move.d mem1,r2 - move.d [r2+],r3 - test_move_cc 0 0 0 0 - checkr3 12345678 - - move.w [r2+],r3 - test_move_cc 0 0 0 0 - checkr3 12344567 - - move.b [r2+],r3 - test_move_cc 0 0 0 0 - checkr3 12344523 - - move.d [r2+],r3 - test_move_cc 0 0 0 0 - checkr3 76543210 - - movs.b [r2],r3 - test_move_cc 1 0 0 0 - checkr3 ffffffaa - - movu.b [r2+],r3 - test_move_cc 0 0 0 0 - checkr3 aa - - movu.w [r2],r3 - test_move_cc 0 0 0 0 - checkr3 9911 - - movs.w [r2+],r3 - test_move_cc 1 0 0 0 - checkr3 ffff9911 - - move.d mem1,r13 - movs.b [r13+],r3 - test_move_cc 0 0 0 0 - checkr3 78 - - movu.b [r13],r3 - test_move_cc 0 0 0 0 - checkr3 56 - - movs.w [r13+],r3 - test_move_cc 0 0 0 0 - checkr3 3456 - - movu.w [r13+],r3 - test_move_cc 0 0 0 0 - checkr3 6712 - - quit diff --git a/tests/tcg/cris/bare/check_movemrv32.s b/tests/tcg/cris/bare/check_movemrv32.s deleted file mode 100644 index 53950abd5bd..00000000000 --- a/tests/tcg/cris/bare/check_movemrv32.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv32 -# output: 15\n7\n2\nffff1234\nb\n16\nf\n2\nffffffef\nf\nffff1234\nf\nfffffff4\nd\nfffffff2\n10\nfffffff2\nd\n - - .include "testutils.inc" - .data -x: - .dword 8,9,10,11 -y: - .dword -12,13,-14,15,16 - - start - moveq 7,r0 - moveq 2,r1 - move.d 0xffff1234,r2 - moveq 21,r3 - move.d x,r4 - setf zcvn - movem r2,[r4+] - test_cc 1 1 1 1 - subq 12,r4 - - checkr3 15 - - move.d [r4+],r3 - checkr3 7 - - move.d [r4+],r3 - checkr3 2 - - move.d [r4+],r3 - checkr3 ffff1234 - - move.d [r4+],r3 - checkr3 b - - subq 16,r4 - moveq 22,r0 - moveq 15,r1 - clearf zcvn - movem r0,[r4] - test_cc 0 0 0 0 - move.d [r4+],r3 - checkr3 16 - - move.d r1,r3 - checkr3 f - - move.d [r4+],r3 - checkr3 2 - - subq 8,r4 - moveq 10,r2 - moveq -17,r0 - clearf zc - setf vn - movem r1,[r4] - test_cc 1 0 1 0 - move.d [r4+],r3 - checkr3 ffffffef - - move.d [r4+],r3 - checkr3 f - - move.d [r4+],r3 - checkr3 ffff1234 - - move.d y,r4 - setf zc - clearf vn - movem [r4+],r3 - test_cc 0 1 0 1 - checkr3 f - - move.d r0,r3 - checkr3 fffffff4 - - move.d r1,r3 - checkr3 d - - move.d r2,r3 - checkr3 fffffff2 - - move.d [r4],r3 - checkr3 10 - - subq 8,r4 - setf zcvn - movem [r4+],r0 - test_cc 1 1 1 1 - move.d r0,r3 - checkr3 fffffff2 - - move.d r1,r3 - checkr3 d - - quit diff --git a/tests/tcg/cris/bare/check_mover.s b/tests/tcg/cris/bare/check_mover.s deleted file mode 100644 index b4db595d64b..00000000000 --- a/tests/tcg/cris/bare/check_mover.s +++ /dev/null @@ -1,28 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffff05\nffff0005\n5\nffffff00\n - -; Move between registers. Check that just the subreg is copied. - - .include "testutils.inc" - startnostack - moveq -30,r3 - moveq 5,r4 - move.b r4,r3 - test_move_cc 0 0 0 0 ; FIXME - checkr3 ffffff05 - - move.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0005 - - move.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq -1,r3 - moveq 0,r4 - move.b r4,r3 - test_move_cc 0 1 0 0 - checkr3 ffffff00 - - quit diff --git a/tests/tcg/cris/bare/check_moverm.s b/tests/tcg/cris/bare/check_moverm.s deleted file mode 100644 index eabc9588d42..00000000000 --- a/tests/tcg/cris/bare/check_moverm.s +++ /dev/null @@ -1,45 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 7823fec2\n10231879\n102318fe\n - - .include "testutils.inc" - start - - .data -mem1: - .dword 0x12345678 -mem2: - .word 0x4567 -mem3: - .byte 0x23 - .dword 0x76543210 - .byte 0xaa,0x11,0x99 - - .text - move.d mem1,r2 - move.d 0x7823fec2,r4 - setf nzvc - move.d r4,[r2+] - test_cc 1 1 1 1 - subq 4,r2 - move.d [r2],r3 - checkr3 7823fec2 - - move.d mem2,r3 - move.d 0x45231879,r4 - clearf nzvc - move.w r4,[r3] - test_cc 0 0 0 0 - move.d [r3],r3 - checkr3 10231879 - - move.d mem2,r2 - moveq -2,r4 - clearf nc - setf zv - move.b r4,[r2+] - test_cc 0 1 1 0 - subq 1,r2 - move.d [r2],r3 - checkr3 102318fe - - quit diff --git a/tests/tcg/cris/bare/check_movmp.s b/tests/tcg/cris/bare/check_movmp.s deleted file mode 100644 index 7fc11f064dd..00000000000 --- a/tests/tcg/cris/bare/check_movmp.s +++ /dev/null @@ -1,131 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n - -# Test generic "move Ps,[]" and "move [],Pd" insns; the ones with -# functionality common to all models. - - .include "testutils.inc" - start - - .data -filler: - .byte 0xaa - .word 0x4433 - .dword 0x55778866 - .byte 0xcc - - .text -; Test that writing to zero-registers is a nop - .if 0 - ; We used to just ignore the writes, but now an error is emitted. We - ; keep the test-code but disabled, in case we need to change this again. - move 0xaa,p0 - move 0x4433,p4 - move 0x55774433,p8 - .endif - - moveq -1,r3 - setf zcvn - clear.b r3 - test_cc 1 1 1 1 - checkr3 ffffff00 - - moveq -1,r3 - clearf zcvn - clear.w r3 - test_cc 0 0 0 0 - checkr3 ffff0000 - - moveq -1,r3 - clear.d r3 - checkr3 0 - -; "Write" using ordinary memory references too. - .if 0 ; See ".if 0" above. - move.d filler,r6 - move [r6],p0 - move [r6],p4 - move [r6],p8 - .endif - -# ffffff00\nffff0000\n0\nffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n - - moveq -1,r3 - clear.b r3 - checkr3 ffffff00 - - moveq -1,r3 - clear.w r3 - checkr3 ffff0000 - - moveq -1,r3 - clear.d r3 - checkr3 0 - -; And postincremented. - .if 0 ; See ".if 0" above. - move [r6+],p0 - move [r6+],p4 - move [r6+],p8 - .endif - -# ffffff00\nffff0000\n0\nbb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n - - moveq -1,r3 - clear.b r3 - checkr3 ffffff00 - - moveq -1,r3 - clear.w r3 - checkr3 ffff0000 - - moveq -1,r3 - clear.d r3 - checkr3 0 - -; Now see that we can write to the registers too. -# bb113344\n664433aa\ncc557788\nabcde012\nabcde000\n77880000\n0\n -; [PC+] - move.d filler,r9 - move 0xbb113344,srp - move srp,r3 - checkr3 bb113344 - -; [R+] - move [r9+],srp - move srp,r3 - checkr3 664433aa - -; [R] - move [r9],srp - move srp,r3 - checkr3 cc557788 - -; And check writing to memory, clear and srp. - - move.d filler,r9 - move 0xabcde012,srp - setf zcvn - move srp,[r9+] - test_cc 1 1 1 1 - subq 4,r9 - move.d [r9],r3 - checkr3 abcde012 - - clearf zcvn - clear.b [r9] - test_cc 0 0 0 0 - move.d [r9],r3 - checkr3 abcde000 - - addq 2,r9 - clear.w [r9+] - subq 2,r9 - move.d [r9],r3 - checkr3 77880000 - - clear.d [r9] - move.d [r9],r3 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movpmv32.s b/tests/tcg/cris/bare/check_movpmv32.s deleted file mode 100644 index daf0970e4a6..00000000000 --- a/tests/tcg/cris/bare/check_movpmv32.s +++ /dev/null @@ -1,35 +0,0 @@ -# mach: crisv32 -# output: 11223320\nbb113344\naa557711\n - -# Test v32-specific special registers. FIXME: more registers. - - .include "testutils.inc" - start - .data -store: - .dword 0x11223344 - .dword 0x77665544 - - .text - moveq -1,r3 - move.d store,r4 - move vr,[r4] - move [r4+],mof - move mof,r3 - checkr3 11223320 - - moveq -1,r3 - clearf zcvn - move 0xbb113344,mof - test_cc 0 0 0 0 - move mof,r3 - checkr3 bb113344 - - setf zcvn - move 0xaa557711,mof - test_cc 1 1 1 1 - move mof,[r4] - move.d [r4],r3 - checkr3 aa557711 - - quit diff --git a/tests/tcg/cris/bare/check_movpr.s b/tests/tcg/cris/bare/check_movpr.s deleted file mode 100644 index eef9bdb4fba..00000000000 --- a/tests/tcg/cris/bare/check_movpr.s +++ /dev/null @@ -1,28 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: ffffff00\nffff0000\n0\nbb113344\n - -# Test generic "move Ps,Rd" and "move Rs,Pd" insns; the ones with -# functionality common to all models. - - .include "testutils.inc" - start - moveq -1,r3 - clear.b r3 - checkr3 ffffff00 - - moveq -1,r3 - clear.w r3 - checkr3 ffff0000 - - moveq -1,r3 - clear.d r3 - checkr3 0 - - moveq -1,r3 - move.d 0xbb113344,r4 - setf zcvn - move r4,srp - move srp,r3 - test_cc 1 1 1 1 - checkr3 bb113344 - quit diff --git a/tests/tcg/cris/bare/check_movprv32.s b/tests/tcg/cris/bare/check_movprv32.s deleted file mode 100644 index d0d90e12465..00000000000 --- a/tests/tcg/cris/bare/check_movprv32.s +++ /dev/null @@ -1,21 +0,0 @@ -# mach: crisv32 -# output: ffffff20\nbb113344\n - -# Test v32-specific special registers. FIXME: more registers. - - .include "testutils.inc" - start - moveq -1,r3 - setf zcvn - move vr,r3 - test_cc 1 1 1 1 - checkr3 ffffff20 - - moveq -1,r3 - move.d 0xbb113344,r4 - clearf cvnz - move r4,mof - test_cc 0 0 0 0 - move mof,r3 - checkr3 bb113344 - quit diff --git a/tests/tcg/cris/bare/check_movscr.s b/tests/tcg/cris/bare/check_movscr.s deleted file mode 100644 index 53c8ce6b50d..00000000000 --- a/tests/tcg/cris/bare/check_movscr.s +++ /dev/null @@ -1,29 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 42\nffffff85\n7685\nffff8765\n0\n - -; Move constant byte, word, dword to register. Check that sign-extension -; is performed. - - .include "testutils.inc" - start - moveq -1,r3 - movs.b 0x42,r3 - checkr3 42 - - movs.b 0x85,r3 - test_move_cc 1 0 0 0 - checkr3 ffffff85 - - movs.w 0x7685,r3 - test_move_cc 0 0 0 0 - checkr3 7685 - - movs.w 0x8765,r3 - test_move_cc 1 0 0 0 - checkr3 ffff8765 - - movs.w 0,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movsm.s b/tests/tcg/cris/bare/check_movsm.s deleted file mode 100644 index 7074336e788..00000000000 --- a/tests/tcg/cris/bare/check_movsm.s +++ /dev/null @@ -1,44 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 5\nfffffff5\n5\nfffffff5\n0\n - -; Movs between registers. Check that sign-extension is performed and the -; full register is set. - - .include "testutils.inc" - - .data -x: - .byte 5,-11 - .word 5,-11 - .word 0 - - start - move.d x,r5 - - moveq -1,r3 - movs.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r3 - movs.b [r5],r3 - test_move_cc 1 0 0 0 - addq 1,r5 - checkr3 fffffff5 - - moveq -1,r3 - movs.w [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r3 - movs.w [r5],r3 - test_move_cc 1 0 0 0 - addq 2,r5 - checkr3 fffffff5 - - movs.w [r5],r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movsr.s b/tests/tcg/cris/bare/check_movsr.s deleted file mode 100644 index d1889a7a1b7..00000000000 --- a/tests/tcg/cris/bare/check_movsr.s +++ /dev/null @@ -1,46 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 5\nfffffff5\n5\nfffffff5\n0\n - -; Movs between registers. Check that sign-extension is performed and the -; full register is set. - - .include "testutils.inc" - start - moveq -1,r5 - moveq 5,r4 - move.b r4,r5 - moveq -1,r3 - movs.b r5,r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r5 - moveq -11,r4 - move.b r4,r5 - moveq 0,r3 - movs.b r5,r3 - test_move_cc 1 0 0 0 - checkr3 fffffff5 - - moveq -1,r5 - moveq 5,r4 - move.w r4,r5 - moveq -1,r3 - movs.w r5,r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r5 - moveq -11,r4 - move.w r4,r5 - moveq 0,r3 - movs.w r5,r3 - test_move_cc 1 0 0 0 - checkr3 fffffff5 - - moveq 0,r5 - movs.b r5,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movucr.s b/tests/tcg/cris/bare/check_movucr.s deleted file mode 100644 index 7c8487d1a24..00000000000 --- a/tests/tcg/cris/bare/check_movucr.s +++ /dev/null @@ -1,33 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 42\n85\n7685\n8765\n0\n - -; Move constant byte, word, dword to register. Check that zero-extension -; is performed. - - .include "testutils.inc" - start - moveq -1,r3 - movu.b 0x42,r3 - test_move_cc 0 0 0 0 - checkr3 42 - - moveq -1,r3 - movu.b 0x85,r3 - test_move_cc 0 0 0 0 - checkr3 85 - - moveq -1,r3 - movu.w 0x7685,r3 - test_move_cc 0 0 0 0 - checkr3 7685 - - moveq -1,r3 - movu.w 0x8765,r3 - test_move_cc 0 0 0 0 - checkr3 8765 - - movu.b 0,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movum.s b/tests/tcg/cris/bare/check_movum.s deleted file mode 100644 index 038e539463b..00000000000 --- a/tests/tcg/cris/bare/check_movum.s +++ /dev/null @@ -1,40 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 5\nf5\n5\nfff5\n0\n - -; Movu between registers. Check that zero-extension is performed and the -; full register is set. - - .include "testutils.inc" - - .data -x: - .byte 5,-11 - .word 5,-11 - .word 0 - - start - move.d x,r5 - - movu.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 5 - - movu.b [r5],r3 - test_move_cc 0 0 0 0 - addq 1,r5 - checkr3 f5 - - movu.w [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 5 - - movu.w [r5],r3 - test_move_cc 0 0 0 0 - addq 2,r5 - checkr3 fff5 - - movu.w [r5],r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_movur.s b/tests/tcg/cris/bare/check_movur.s deleted file mode 100644 index 3ecf475f757..00000000000 --- a/tests/tcg/cris/bare/check_movur.s +++ /dev/null @@ -1,45 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 5\nf5\n5\nfff5\n0\n - -; Movu between registers. Check that zero-extension is performed and the -; full register is set. - - .include "testutils.inc" - start - moveq -1,r5 - moveq 5,r4 - move.b r4,r5 - moveq -1,r3 - movu.b r5,r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r5 - moveq -11,r4 - move.b r4,r5 - moveq -1,r3 - movu.b r5,r3 - test_move_cc 0 0 0 0 - checkr3 f5 - - moveq -1,r5 - moveq 5,r4 - move.w r4,r5 - moveq -1,r3 - movu.w r5,r3 - test_move_cc 0 0 0 0 - checkr3 5 - - moveq 0,r5 - moveq -11,r4 - move.w r4,r5 - moveq -1,r3 - movu.w r5,r3 - test_move_cc 0 0 0 0 - checkr3 fff5 - - movu.w 0,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_mulv32.s b/tests/tcg/cris/bare/check_mulv32.s deleted file mode 100644 index f379358765b..00000000000 --- a/tests/tcg/cris/bare/check_mulv32.s +++ /dev/null @@ -1,51 +0,0 @@ -# mach: crisv32 -# output: fffffffe\n -# output: ffffffff\n -# output: fffffffe\n -# output: 1\n -# output: fffffffe\n -# output: ffffffff\n -# output: fffffffe\n -# output: 1\n - -; Check that carry is not modified on v32. - - .include "testutils.inc" - start - moveq -1,r3 - moveq 2,r4 - setf c - muls.d r4,r3 - test_cc 1 0 0 1 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq -1,r3 - moveq 2,r4 - setf c - mulu.d r4,r3 - test_cc 0 0 1 1 - checkr3 fffffffe - move mof,r3 - checkr3 1 - - moveq -1,r3 - moveq 2,r4 - clearf c - muls.d r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq -1,r3 - moveq 2,r4 - clearf c - mulu.d r4,r3 - test_cc 0 0 1 0 - checkr3 fffffffe - move mof,r3 - checkr3 1 - - quit diff --git a/tests/tcg/cris/bare/check_mulx.s b/tests/tcg/cris/bare/check_mulx.s deleted file mode 100644 index a7a1f82a829..00000000000 --- a/tests/tcg/cris/bare/check_mulx.s +++ /dev/null @@ -1,257 +0,0 @@ -# mach: crisv10 crisv32 -# output: fffffffe\nffffffff\nfffffffe\n1\nfffffffe\nffffffff\nfffffffe\n1\nfffe0001\n0\nfffe0001\n0\n1\n0\n1\nfffffffe\n193eade2\n277e3a49\n193eade2\n277e3a49\nfffffffe\nffffffff\n1fffe\n0\nfffffffe\nffffffff\n1fffe\n0\n1\n0\nfffe0001\n0\nfdbdade2\nffffffff\n420fade2\n0\nfffffffe\nffffffff\n1fe\n0\nfffffffe\nffffffff\n1fe\n0\n1\n0\nfe01\n0\n1\n0\nfe01\n0\nffffd9e2\nffffffff\n2be2\n0\n0\n0\n0\n0\n - - .include "testutils.inc" - start - - .align 4 - moveq -1,r3 - moveq 2,r4 - muls.d r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - .align 4 - moveq -1,r3 - moveq 2,r4 - mulu.d r4,r3 - test_cc 0 0 1 0 - checkr3 fffffffe - move mof,r3 - checkr3 1 - - .align 4 - moveq 2,r3 - moveq -1,r4 - muls.d r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - .align 4 - moveq 2,r3 - moveq -1,r4 - mulu.d r4,r3 - test_cc 0 0 1 0 - checkr3 fffffffe - move mof,r3 - checkr3 1 - - move.d 0xffff,r4 - move.d r4,r3 - muls.d r4,r3 - test_cc 0 0 1 0 - checkr3 fffe0001 - move mof,r3 - checkr3 0 - - move.d 0xffff,r4 - move.d r4,r3 - mulu.d r4,r3 - test_cc 0 0 0 0 - checkr3 fffe0001 - move mof,r3 - checkr3 0 - - moveq -1,r4 - move.d r4,r3 - muls.d r4,r3 - test_cc 0 0 0 0 - checkr3 1 - move mof,r3 - checkr3 0 - - moveq -1,r4 - move.d r4,r3 - mulu.d r4,r3 - test_cc 1 0 1 0 - checkr3 1 - move mof,r3 - checkr3 fffffffe - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - muls.d r4,r3 - test_cc 0 0 1 0 - checkr3 193eade2 - move mof,r3 - checkr3 277e3a49 - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - mulu.d r4,r3 - test_cc 0 0 1 0 - checkr3 193eade2 - move mof,r3 - checkr3 277e3a49 - - move.d 0xffff,r3 - moveq 2,r4 - muls.w r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq -1,r3 - moveq 2,r4 - mulu.w r4,r3 - test_cc 0 0 0 0 - checkr3 1fffe - move mof,r3 - checkr3 0 - nop - - moveq 2,r3 - move.d 0xffff,r4 - muls.w r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq 2,r3 - moveq -1,r4 - mulu.w r4,r3 - test_cc 0 0 0 0 - checkr3 1fffe - move mof,r3 - checkr3 0 - - move.d 0xffff,r4 - move.d r4,r3 - muls.w r4,r3 - test_cc 0 0 0 0 - checkr3 1 - move mof,r3 - checkr3 0 - - moveq -1,r4 - move.d r4,r3 - mulu.w r4,r3 - test_cc 0 0 0 0 - checkr3 fffe0001 - move mof,r3 - checkr3 0 - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - muls.w r4,r3 - test_cc 1 0 0 0 - checkr3 fdbdade2 - move mof,r3 - checkr3 ffffffff - nop - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - mulu.w r4,r3 - test_cc 0 0 0 0 - checkr3 420fade2 - move mof,r3 - checkr3 0 - nop - - move.d 0xff,r3 - moveq 2,r4 - muls.b r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq -1,r3 - moveq 2,r4 - mulu.b r4,r3 - test_cc 0 0 0 0 - checkr3 1fe - move mof,r3 - checkr3 0 - - moveq 2,r3 - moveq -1,r4 - muls.b r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - move mof,r3 - checkr3 ffffffff - - moveq 2,r3 - moveq -1,r4 - mulu.b r4,r3 - test_cc 0 0 0 0 - checkr3 1fe - move mof,r3 - checkr3 0 - - move.d 0xff,r4 - move.d r4,r3 - muls.b r4,r3 - test_cc 0 0 0 0 - checkr3 1 - move mof,r3 - checkr3 0 - nop - - moveq -1,r4 - move.d r4,r3 - mulu.b r4,r3 - test_cc 0 0 0 0 - checkr3 fe01 - move mof,r3 - checkr3 0 - nop - - move.d 0xfeda49ff,r4 - move.d r4,r3 - muls.b r4,r3 - test_cc 0 0 0 0 - checkr3 1 - move mof,r3 - checkr3 0 - nop - - move.d 0xfeda49ff,r4 - move.d r4,r3 - mulu.b r4,r3 - test_cc 0 0 0 0 - checkr3 fe01 - move mof,r3 - checkr3 0 - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - muls.b r4,r3 - test_cc 1 0 0 0 - checkr3 ffffd9e2 - move mof,r3 - checkr3 ffffffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - mulu.b r4,r3 - test_cc 0 0 0 0 - checkr3 2be2 - move mof,r3 - checkr3 0 - - moveq 0,r3 - move.d 0xf87f4aeb,r4 - muls.d r4,r3 - test_cc 0 1 0 0 - checkr3 0 - move mof,r3 - checkr3 0 - - move.d 0xf87f4aeb,r3 - moveq 0,r4 - mulu.d r4,r3 - test_cc 0 1 0 0 - checkr3 0 - move mof,r3 - checkr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_neg.s b/tests/tcg/cris/bare/check_neg.s deleted file mode 100644 index 963c4b6f5ea..00000000000 --- a/tests/tcg/cris/bare/check_neg.s +++ /dev/null @@ -1,104 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: ffffffff\nffffffff\n0\n80000000\n1\nba987655\nffff\nffff\n0\n89ab8000\nffff0001\n45677655\nff\nff\n0\n89abae80\nffffff01\n45678955\n - - .include "testutils.inc" - start - moveq 0,r3 - moveq 1,r4 - neg.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 1,r3 - moveq 0,r4 - neg.d r3,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - -;; FIXME: this was wrong. - moveq 0,r3 - neg.d r3,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0x80000000,r3 - neg.d r3,r3 - test_move_cc 1 0 0 0 - checkr3 80000000 - - moveq -1,r3 - neg.d r3,r3 - test_move_cc 0 0 0 0 - checkr3 1 - - move.d 0x456789ab,r3 - neg.d r3,r3 - test_move_cc 1 0 0 0 - checkr3 ba987655 - - moveq 0,r3 - moveq 1,r4 - neg.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffff - - moveq 1,r3 - moveq 0,r4 - neg.w r3,r3 - test_move_cc 1 0 0 0 - checkr3 ffff - - moveq 0,r3 - neg.w r3,r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0x89ab8000,r3 - neg.w r3,r3 - test_move_cc 1 0 0 0 - checkr3 89ab8000 - - moveq -1,r3 - neg.w r3,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0001 - - move.d 0x456789ab,r3 - neg.w r3,r3 - test_move_cc 0 0 0 0 - checkr3 45677655 - - moveq 0,r3 - moveq 1,r4 - neg.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 ff - - moveq 1,r3 - moveq 0,r4 - neg.b r3,r3 - test_move_cc 1 0 0 0 - checkr3 ff - - moveq 0,r3 - neg.b r3,r3 - test_move_cc 0 1 0 0 - checkr3 0 - -;; FIXME: was wrong. - move.d 0x89abae80,r3 - neg.b r3,r3 - test_move_cc 1 0 0 1 - checkr3 89abae80 - - moveq -1,r3 - neg.b r3,r3 - test_move_cc 0 0 0 0 - checkr3 ffffff01 - - move.d 0x456789ab,r3 - neg.b r3,r3 - test_move_cc 0 0 0 0 - checkr3 45678955 - - quit diff --git a/tests/tcg/cris/bare/check_not.s b/tests/tcg/cris/bare/check_not.s deleted file mode 100644 index 33bcf155e56..00000000000 --- a/tests/tcg/cris/bare/check_not.s +++ /dev/null @@ -1,31 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: fffffffe\nfffffffd\nffff0f00\n0\n87ecbbad\n - - .include "testutils.inc" - start - moveq 1,r3 - not r3 - test_move_cc 1 0 0 0 - checkr3 fffffffe - - moveq 2,r3 - not r3 - test_move_cc 1 0 0 0 - checkr3 fffffffd - - move.d 0xf0ff,r3 - not r3 - test_move_cc 1 0 0 0 - checkr3 ffff0f00 - - moveq -1,r3 - not r3 - test_move_cc 0 1 0 0 - checkr3 0 - - move.d 0x78134452,r3 - not r3 - test_move_cc 1 0 0 0 - checkr3 87ecbbad - - quit diff --git a/tests/tcg/cris/bare/check_orc.s b/tests/tcg/cris/bare/check_orc.s deleted file mode 100644 index c733f036a2b..00000000000 --- a/tests/tcg/cris/bare/check_orc.s +++ /dev/null @@ -1,71 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n - - .include "testutils.inc" - start - moveq 1,r3 - or.d 2,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - or.d 1,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xf0ff,r3 - or.d 0xff0f,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r3 - or.d -1,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x78134452,r3 - or.d 0x5432f789,r3 - test_move_cc 0 0 0 0 - checkr3 7c33f7db - - move.d 0xffff0001,r3 - or.w 2,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0003 - - moveq 2,r3 - or.w 1,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xfedaffaf,r3 - or.w 0xff5f,r3 - test_move_cc 1 0 0 0 - checkr3 fedaffff - - move.d 0x78134452,r3 - or.w 0xf789,r3 - test_move_cc 1 0 0 0 - checkr3 7813f7db - - moveq 1,r3 - or.b 2,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - or.b 1,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xfa3,r3 - or.b 0x4a,r3 - test_move_cc 1 0 0 0 - checkr3 feb - - move.d 0x78134453,r3 - or.b 0x89,r3 - test_move_cc 1 0 0 0 - checkr3 781344db - - quit diff --git a/tests/tcg/cris/bare/check_orm.s b/tests/tcg/cris/bare/check_orm.s deleted file mode 100644 index ee723a6aa02..00000000000 --- a/tests/tcg/cris/bare/check_orm.s +++ /dev/null @@ -1,75 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n - - .include "testutils.inc" - .data -x: - .dword 2,1,0xff0f,-1,0x5432f789 - .word 2,1,0xff5f,0xf789 - .byte 2,1,0x4a,0x89 - - start - moveq 1,r3 - move.d x,r5 - or.d [r5+],r3 - checkr3 3 - - moveq 2,r3 - or.d [r5],r3 - addq 4,r5 - checkr3 3 - - move.d 0xf0ff,r3 - or.d [r5+],r3 - checkr3 ffff - - moveq -1,r3 - or.d [r5+],r3 - checkr3 ffffffff - - move.d 0x78134452,r3 - or.d [r5+],r3 - checkr3 7c33f7db - - move.d 0xffff0001,r3 - or.w [r5+],r3 - checkr3 ffff0003 - - moveq 2,r3 - or.w [r5],r3 - addq 2,r5 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xfedaffaf,r3 - or.w [r5+],r3 - test_move_cc 1 0 0 0 - checkr3 fedaffff - - move.d 0x78134452,r3 - or.w [r5+],r3 - test_move_cc 1 0 0 0 - checkr3 7813f7db - - moveq 1,r3 - or.b [r5+],r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - or.b [r5],r3 - addq 1,r5 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xfa3,r3 - or.b [r5+],r3 - test_move_cc 1 0 0 0 - checkr3 feb - - move.d 0x78134453,r3 - or.b [r5],r3 - test_move_cc 1 0 0 0 - checkr3 781344db - - quit diff --git a/tests/tcg/cris/bare/check_orq.s b/tests/tcg/cris/bare/check_orq.s deleted file mode 100644 index 5060edc72d9..00000000000 --- a/tests/tcg/cris/bare/check_orq.s +++ /dev/null @@ -1,41 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 3\n3\nffffffff\nffffffff\n1f\nffffffe0\n7813445e\n - - .include "testutils.inc" - start - moveq 1,r3 - orq 2,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - orq 1,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xf0ff,r3 - orq -1,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 0,r3 - orq -1,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - moveq 0,r3 - orq 31,r3 - test_move_cc 0 0 0 0 - checkr3 1f - - moveq 0,r3 - orq -32,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffe0 - - move.d 0x78134452,r3 - orq 12,r3 - test_move_cc 0 0 0 0 - checkr3 7813445e - - quit diff --git a/tests/tcg/cris/bare/check_orr.s b/tests/tcg/cris/bare/check_orr.s deleted file mode 100644 index a514c11bc90..00000000000 --- a/tests/tcg/cris/bare/check_orr.s +++ /dev/null @@ -1,84 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 3\n3\nffff\nffffffff\n7c33f7db\nffff0003\n3\nfedaffff\n7813f7db\n3\n3\nfeb\n781344db\n - - .include "testutils.inc" - start - moveq 1,r3 - moveq 2,r4 - or.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - moveq 1,r4 - or.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xff0f,r4 - move.d 0xf0ff,r3 - or.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff - - moveq -1,r4 - move.d r4,r3 - or.d r4,r3 - test_move_cc 1 0 0 0 - checkr3 ffffffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - or.d r4,r3 - test_move_cc 0 0 0 0 - checkr3 7c33f7db - - move.d 0xffff0001,r3 - moveq 2,r4 - or.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 ffff0003 - - moveq 2,r3 - move.d 0xffff0001,r4 - or.w r4,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0xfedaffaf,r3 - move.d 0xffffff5f,r4 - or.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 fedaffff - - move.d 0x5432f789,r4 - move.d 0x78134452,r3 - or.w r4,r3 - test_move_cc 1 0 0 0 - checkr3 7813f7db - - moveq 1,r3 - move.d 0xffffff02,r4 - or.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - moveq 2,r3 - moveq 1,r4 - or.b r4,r3 - test_move_cc 0 0 0 0 - checkr3 3 - - move.d 0x4a,r4 - move.d 0xfa3,r3 - or.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 feb - - move.d 0x5432f789,r4 - move.d 0x78134453,r3 - or.b r4,r3 - test_move_cc 1 0 0 0 - checkr3 781344db - - quit diff --git a/tests/tcg/cris/bare/check_ret.s b/tests/tcg/cris/bare/check_ret.s deleted file mode 100644 index b44fb25933a..00000000000 --- a/tests/tcg/cris/bare/check_ret.s +++ /dev/null @@ -1,25 +0,0 @@ -# mach: crisv3 crisv8 crisv10 -# output: 3\n - -# Test that ret works. - - .include "testutils.inc" - start -x: - moveq 0,r3 - jsr z -w: - quit -y: - addq 1,r3 - checkr3 3 - quit - -z: - addq 1,r3 - move srp,r2 - add.d y-w,r2 - move r2,srp - ret - addq 1,r3 - quit diff --git a/tests/tcg/cris/bare/check_scc.s b/tests/tcg/cris/bare/check_scc.s deleted file mode 100644 index 4a8674cc1a2..00000000000 --- a/tests/tcg/cris/bare/check_scc.s +++ /dev/null @@ -1,95 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n0\n1\n0\n1\n0\n1\n0\n0\n1\n1\n0\n1\n0\n1\n0\n1\n0\n0\n1\n0\n1\n1\n0\n1\n0\n0\n1\n1\n0\n1\n1\n0\n - - .include "testutils.inc" - - .macro lcheckr3 v - move $ccs, $r9 - checkr3 \v - move $r9, $ccs - .endm - - start - clearf nzvc - scc r3 - lcheckr3 1 - scs r3 - lcheckr3 0 - sne r3 - lcheckr3 1 - seq r3 - lcheckr3 0 - svc r3 - lcheckr3 1 - svs r3 - lcheckr3 0 - spl r3 - lcheckr3 1 - smi r3 - lcheckr3 0 - sls r3 - lcheckr3 0 - shi r3 - lcheckr3 1 - sge r3 - lcheckr3 1 - slt r3 - lcheckr3 0 - sgt r3 - lcheckr3 1 - sle r3 - lcheckr3 0 - sa r3 - lcheckr3 1 - setf nzvc - scc r3 - lcheckr3 0 - scs r3 - lcheckr3 1 - sne r3 - lcheckr3 0 - svc r3 - lcheckr3 0 - svs r3 - lcheckr3 1 - spl r3 - lcheckr3 0 - smi r3 - lcheckr3 1 - sls r3 - lcheckr3 1 - shi r3 - lcheckr3 0 - sge r3 - lcheckr3 1 - slt r3 - lcheckr3 0 - sgt r3 - lcheckr3 0 - sle r3 - lcheckr3 1 - sa r3 - lcheckr3 1 - clearf n - sge r3 - lcheckr3 0 - slt r3 - lcheckr3 1 - - .if 1 ;..asm.arch.cris.v32 - setf p - ssb r3 - .else - moveq 1,r3 - .endif - lcheckr3 1 - - .if 1 ;..asm.arch.cris.v32 - clearf p - ssb r3 - .else - moveq 0,r3 - .endif - lcheckr3 0 - - quit diff --git a/tests/tcg/cris/bare/check_subc.s b/tests/tcg/cris/bare/check_subc.s deleted file mode 100644 index e34b5448e25..00000000000 --- a/tests/tcg/cris/bare/check_subc.s +++ /dev/null @@ -1,87 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n - - .include "testutils.inc" - start - - moveq -1,r3 - sub.d -2,r3 - test_cc 0 0 0 0 - checkr3 1 - - moveq 2,r3 - sub.d 1,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xffff,r3 - sub.d -0xffff,r3 - test_cc 0 0 0 1 - checkr3 1fffe - - moveq -1,r3 - sub.d 1,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - move.d 0x78134452,r3 - sub.d -0x5432f789,r3 - test_cc 1 0 1 1 - checkr3 cc463bdb - - moveq -1,r3 - sub.w -2,r3 - test_cc 0 0 0 0 - checkr3 ffff0001 - - moveq 2,r3 - sub.w 1,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xffff,r3 - sub.w 1,r3 - test_cc 1 0 0 0 - checkr3 fffe - - move.d 0xfedaffff,r3 - sub.w 1,r3 - test_cc 1 0 0 0 - checkr3 fedafffe - - move.d 0x78134452,r3 - sub.w 0x877,r3 - test_cc 0 0 0 0 - checkr3 78133bdb - - moveq -1,r3 - sub.b -2,r3 - test_cc 0 0 0 0 - checkr3 ffffff01 - - moveq 2,r3 - sub.b 1,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xff,r3 - sub.b 1,r3 - test_cc 1 0 0 0 - checkr3 fe - - move.d 0xfeda49ff,r3 - sub.b 1,r3 - test_cc 1 0 0 0 - checkr3 feda49fe - - move.d 0x78134452,r3 - sub.b 0x77,r3 - test_cc 1 0 0 1 - checkr3 781344db - - move.d 0x85649282,r3 - sub.b 0x82,r3 - test_cc 0 1 0 0 - checkr3 85649200 - - quit diff --git a/tests/tcg/cris/bare/check_subm.s b/tests/tcg/cris/bare/check_subm.s deleted file mode 100644 index e07ea02dd4c..00000000000 --- a/tests/tcg/cris/bare/check_subm.s +++ /dev/null @@ -1,96 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n - - .include "testutils.inc" - .data -x: - .dword -2,1,-0xffff,1,-0x5432f789 - .word -2,1,1,0x877 - .byte -2,1,0x77 - .byte 0x22 - - start - moveq -1,r3 - move.d x,r5 - sub.d [r5+],r3 - test_cc 0 0 0 0 - checkr3 1 - - moveq 2,r3 - sub.d [r5],r3 - test_cc 0 0 0 0 - addq 4,r5 - checkr3 1 - - move.d 0xffff,r3 - sub.d [r5+],r3 - test_cc 0 0 0 1 - checkr3 1fffe - - moveq -1,r3 - sub.d [r5+],r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - move.d 0x78134452,r3 - sub.d [r5+],r3 - test_cc 1 0 1 1 - checkr3 cc463bdb - - moveq -1,r3 - sub.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 ffff0001 - - moveq 2,r3 - sub.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xffff,r3 - sub.w [r5],r3 - test_cc 1 0 0 0 - checkr3 fffe - - move.d 0xfedaffff,r3 - sub.w [r5+],r3 - test_cc 1 0 0 0 - checkr3 fedafffe - - move.d 0x78134452,r3 - sub.w [r5+],r3 - test_cc 0 0 0 0 - checkr3 78133bdb - - moveq -1,r3 - sub.b [r5],r3 - test_cc 0 0 0 0 - addq 1,r5 - checkr3 ffffff01 - - moveq 2,r3 - sub.b [r5],r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xff,r3 - sub.b [r5],r3 - test_cc 1 0 0 0 - checkr3 fe - - move.d 0xfeda49ff,r3 - sub.b [r5+],r3 - test_cc 1 0 0 0 - checkr3 feda49fe - - move.d 0x78134452,r3 - sub.b [r5+],r3 - test_cc 1 0 0 1 - checkr3 781344db - - move.d 0x85649222,r3 - sub.b [r5],r3 - test_cc 0 1 0 0 - checkr3 85649200 - - quit diff --git a/tests/tcg/cris/bare/check_subq.s b/tests/tcg/cris/bare/check_subq.s deleted file mode 100644 index 9e34fa31ab3..00000000000 --- a/tests/tcg/cris/bare/check_subq.s +++ /dev/null @@ -1,52 +0,0 @@ -# mach: crisv3 crisv8 crisv10 crisv32 -# output: 0\nffffffff\nfffffffe\nffff\nff\n56788f9\n56788d9\n567889a\n0\n7ffffffc\n - - .include "testutils.inc" - start - moveq 1,r3 - subq 1,r3 - test_cc 0 1 0 0 - checkr3 0 - - subq 1,r3 - test_cc 1 0 0 1 - checkr3 ffffffff - - subq 1,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - move.d 0x10000,r3 - subq 1,r3 - test_cc 0 0 0 0 - checkr3 ffff - - move.d 0x100,r3 - subq 1,r3 - test_cc 0 0 0 0 - checkr3 ff - - move.d 0x5678900,r3 - subq 7,r3 - test_cc 0 0 0 0 - checkr3 56788f9 - - subq 32,r3 - test_cc 0 0 0 0 - checkr3 56788d9 - - subq 63,r3 - test_cc 0 0 0 0 - checkr3 567889a - - move.d 34,r3 - subq 34,r3 - test_cc 0 1 0 0 - checkr3 0 - - move.d 0x80000024,r3 - subq 40,r3 - test_cc 0 0 1 0 - checkr3 7ffffffc - - quit diff --git a/tests/tcg/cris/bare/check_subr.s b/tests/tcg/cris/bare/check_subr.s deleted file mode 100644 index 742fbc8915e..00000000000 --- a/tests/tcg/cris/bare/check_subr.s +++ /dev/null @@ -1,102 +0,0 @@ -# mach: crisv0 crisv3 crisv8 crisv10 crisv32 -# output: 1\n1\n1fffe\nfffffffe\ncc463bdb\nffff0001\n1\nfffe\nfedafffe\n78133bdb\nffffff01\n1\nfe\nfeda49fe\n781344db\n85649200\n - - .include "testutils.inc" - start - moveq -1,r3 - moveq -2,r4 - sub.d r4,r3 - test_cc 0 0 0 0 - checkr3 1 - - moveq 2,r3 - moveq 1,r4 - sub.d r4,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xffff,r3 - move.d -0xffff,r4 - sub.d r4,r3 - test_cc 0 0 0 1 - checkr3 1fffe - - moveq 1,r4 - moveq -1,r3 - sub.d r4,r3 - test_cc 1 0 0 0 - checkr3 fffffffe - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - sub.d r4,r3 - test_cc 1 0 1 1 - checkr3 cc463bdb - - moveq -1,r3 - moveq -2,r4 - sub.w r4,r3 - test_cc 0 0 0 0 - checkr3 ffff0001 - - moveq 2,r3 - moveq 1,r4 - sub.w r4,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d 0xffff,r3 - move.d -0xffff,r4 - sub.w r4,r3 - test_cc 1 0 0 0 - checkr3 fffe - - move.d 0xfedaffff,r3 - move.d -0xfedaffff,r4 - sub.w r4,r3 - test_cc 1 0 0 0 - checkr3 fedafffe - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - sub.w r4,r3 - test_cc 0 0 0 0 - checkr3 78133bdb - - moveq -1,r3 - moveq -2,r4 - sub.b r4,r3 - test_cc 0 0 0 0 - checkr3 ffffff01 - - moveq 2,r3 - moveq 1,r4 - sub.b r4,r3 - test_cc 0 0 0 0 - checkr3 1 - - move.d -0xff,r4 - move.d 0xff,r3 - sub.b r4,r3 - test_cc 1 0 0 0 - checkr3 fe - - move.d -0xfeda49ff,r4 - move.d 0xfeda49ff,r3 - sub.b r4,r3 - test_cc 1 0 0 0 - checkr3 feda49fe - - move.d -0x5432f789,r4 - move.d 0x78134452,r3 - sub.b r4,r3 - test_cc 1 0 0 1 - checkr3 781344db - - move.d 0x85649222,r3 - move.d 0x77445622,r4 - sub.b r4,r3 - test_cc 0 1 0 0 - checkr3 85649200 - - quit diff --git a/tests/tcg/cris/bare/check_xarith.s b/tests/tcg/cris/bare/check_xarith.s deleted file mode 100644 index 80038b2ab92..00000000000 --- a/tests/tcg/cris/bare/check_xarith.s +++ /dev/null @@ -1,72 +0,0 @@ - -.include "testutils.inc" - - start - - moveq -1, $r0 - moveq 0, $r1 - addq 1, $r0 - ax - addq 0, $r1 - - move.d $r0, $r3 - checkr3 0 - move.d $r1, $r3 - checkr3 1 - - move.d 0, $r0 - moveq -1, $r1 - subq 1, $r0 - ax - subq 0, $r1 - - move.d $r0, $r3 - checkr3 ffffffff - move.d $r1, $r3 - checkr3 fffffffe - - - moveq -1, $r0 - moveq -1, $r1 - cmpq -1, $r0 - ax - cmpq -1, $r1 - beq 1f - nop - fail -1: - cmpq 0, $r0 - ax - cmpq -1, $r1 - bne 1f - nop - fail -1: - - ;; test for broken X sequence, run it several times. - moveq 8, $r0 -1: - moveq 0, $r3 - move.d $r0, $r1 - andq 1, $r1 - lslq 4, $r1 - moveq 1, $r2 - or.d $r1, $r2 - ba 2f - move $r2, $ccs -2: - addq 0, $r3 - move.d $r0, $r4 - move.d $r1, $r5 - move.d $r2, $r6 - move.d $r3, $r7 - lsrq 4, $r1 - move.d $r1, $r8 - xor $r1, $r3 - checkr3 0 - subq 1, $r0 - bne 1b - nop - - pass - quit diff --git a/tests/tcg/cris/bare/crt.s b/tests/tcg/cris/bare/crt.s deleted file mode 100644 index af027d74755..00000000000 --- a/tests/tcg/cris/bare/crt.s +++ /dev/null @@ -1,13 +0,0 @@ - .data -_stack_start: - .space 8192, 0 -_stack_end: - .text - .global _start -_start: - move.d _stack_end, $sp - jsr main - nop - moveq 0, $r10 - jump exit - nop diff --git a/tests/tcg/cris/bare/sys.c b/tests/tcg/cris/bare/sys.c deleted file mode 100644 index 1644eecc334..00000000000 --- a/tests/tcg/cris/bare/sys.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Helper functions for CRIS system tests - * - * There is no libc and only a limited set of headers. - */ - -#include - -void exit(int status) -{ - register unsigned int callno asm ("r9") = 1; /* NR_exit */ - - asm volatile ("break 13\n" - : /* no outputs */ - : "r" (callno) - : "memory"); - while (1) { - /* do nothing */ - }; -} - -size_t write(int fd, const void *buf, size_t count) -{ - register unsigned int callno asm ("r9") = 4; /* NR_write */ - register unsigned int r10 asm ("r10") = fd; - register const void *r11 asm ("r11") = buf; - register size_t r12 asm ("r12") = count; - register unsigned int r asm ("r10"); - - asm volatile ("break 13\n" - : "=r" (r) - : "r" (callno), "0" (r10), "r" (r11), "r" (r12) - : "memory"); - - return r; -} - -static inline int mystrlen(char *s) -{ - int i = 0; - while (s[i]) { - i++; - } - return i; -} - - -void pass(void) -{ - char s[] = "passed.\n"; - write(1, s, sizeof(s) - 1); - exit(0); -} - -void _fail(char *reason) -{ - char s[] = "\nfailed: "; - int len = mystrlen(reason); - write(1, s, sizeof(s) - 1); - write(1, reason, len); - write(1, "\n", 1); - exit(1); -} diff --git a/tests/tcg/cris/bare/testutils.inc b/tests/tcg/cris/bare/testutils.inc deleted file mode 100644 index aa1641b2e6b..00000000000 --- a/tests/tcg/cris/bare/testutils.inc +++ /dev/null @@ -1,117 +0,0 @@ - .syntax no_register_prefix - - .macro start - .text - .global main -main: - .endm - - .macro quit - jump pass - nop - .endm - - .macro pass - jump pass - nop - .endm - - .macro startnostack - start - .endm - - .macro fail - .data -99: - .asciz " checkr3 failed\n" - .text - move.d 99b, $r10 - jsr _fail - nop - .endm - - .macro checkr3 val - cmp.d 0x\val, $r3 - beq 100f - nop - .data -99: - .asciz "checkr3 failed\n" - .text - move.d 99b, $r10 - jsr _fail - nop -100: - .endm - -; Test the condition codes - .macro test_cc N Z V C - .if \N - bpl 9f - nop - .else - bmi 9f - nop - .endif - .if \Z - bne 9f - nop - .else - beq 9f - nop - .endif - .if \V - bvc 9f - nop - .else - bvs 9f - nop - .endif - .if \C - bcc 9f - nop - .else - bcs 9f - nop - .endif - ba 8f - nop -9: - .data -99: - .asciz "test_move_cc failed\n" - .text - move.d 99b, $r10 - jsr _fail - nop -8: - .endm - - - .macro test_move_cc N Z V C - .if \N - bpl 9f - nop - .else - bmi 9f - nop - .endif - .if \Z - bne 9f - nop - .else - beq 9f - nop - .endif - ba 8f - nop -9: - .data -99: - .asciz "test_move_cc failed\n" - .text - move.d 99b, $r10 - jsr _fail - nop -8: - .endm diff --git a/tests/tcg/cris/libc/check_abs.c b/tests/tcg/cris/libc/check_abs.c deleted file mode 100644 index 08b67b6ef0c..00000000000 --- a/tests/tcg/cris/libc/check_abs.c +++ /dev/null @@ -1,40 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -static always_inline int cris_abs(int n) -{ - int r; - asm ("abs\t%1, %0\n" : "=r" (r) : "r" (n)); - return r; -} - -static always_inline void -verify_abs(int val, int res, - const int n, const int z, const int v, const int c) -{ - int r; - - cris_tst_cc_init(); - r = cris_abs(val); - cris_tst_cc(n, z, v, c); - if (r != res) - err(); -} - -int main(void) -{ - verify_abs(-1, 1, 0, 0, 0, 0); - verify_abs(0x80000000, 0x80000000, 1, 0, 0, 0); - verify_abs(0x7fffffff, 0x7fffffff, 0, 0, 0, 0); - verify_abs(42, 42, 0, 0, 0, 0); - verify_abs(1, 1, 0, 0, 0, 0); - verify_abs(0xffff, 0xffff, 0, 0, 0, 0); - verify_abs(0xffff, 0xffff, 0, 0, 0, 0); - verify_abs(-31, 0x1f, 0, 0, 0, 0); - verify_abs(0, 0, 0, 1, 0, 0); - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_addc.c b/tests/tcg/cris/libc/check_addc.c deleted file mode 100644 index fc3fb1faa80..00000000000 --- a/tests/tcg/cris/libc/check_addc.c +++ /dev/null @@ -1,58 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -static always_inline int cris_addc(int a, const int b) -{ - asm ("addc\t%1, %0\n" : "+r" (a) : "r" (b)); - return a; -} - -#define verify_addc(a, b, res, n, z, v, c) \ -{ \ - int r; \ - r = cris_addc((a), (b)); \ - cris_tst_cc((n), (z), (v), (c)); \ - if (r != (res)) \ - err(); \ -} - -int main(void) -{ - cris_tst_cc_init(); - asm volatile ("clearf cz"); - verify_addc(0, 0, 0, 0, 0, 0, 0); - - cris_tst_cc_init(); - asm volatile ("setf z"); - verify_addc(0, 0, 0, 0, 1, 0, 0); - - cris_tst_cc_init(); - asm volatile ("setf cz"); - verify_addc(0, 0, 1, 0, 0, 0, 0); - cris_tst_cc_init(); - asm volatile ("clearf c"); - verify_addc(-1, 2, 1, 0, 0, 0, 1); - - cris_tst_cc_init(); - asm volatile ("clearf nzv"); - asm volatile ("setf c"); - verify_addc(-1, 2, 2, 0, 0, 0, 1); - - cris_tst_cc_init(); - asm volatile ("setf c"); - verify_addc(0xffff, 0xffff, 0x1ffff, 0, 0, 0, 0); - - cris_tst_cc_init(); - asm volatile ("clearf nzvc"); - verify_addc(-1, -1, 0xfffffffe, 1, 0, 0, 1); - - cris_tst_cc_init(); - asm volatile ("setf c"); - verify_addc(0x78134452, 0x5432f789, 0xcc463bdc, 1, 0, 1, 0); - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_addcm.c b/tests/tcg/cris/libc/check_addcm.c deleted file mode 100644 index b355ba164fb..00000000000 --- a/tests/tcg/cris/libc/check_addcm.c +++ /dev/null @@ -1,85 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -/* need to avoid acr as source here. */ -static always_inline int cris_addc_m(int a, const int *b) -{ - asm volatile ("addc [%1], %0\n" : "+r" (a) : "r" (b)); - return a; -} - -/* 'b' is a crisv32 constrain to avoid postinc with $acr. */ -static always_inline int cris_addc_pi_m(int a, int **b) -{ - asm volatile ("addc [%1+], %0\n" : "+r" (a), "+b" (*b)); - return a; -} - -#define verify_addc_m(a, b, res, n, z, v, c) \ -{ \ - int r; \ - r = cris_addc_m((a), (b)); \ - cris_tst_cc((n), (z), (v), (c)); \ - if (r != (res)) \ - err(); \ -} - -#define verify_addc_pi_m(a, b, res, n, z, v, c) \ -{ \ - int r; \ - r = cris_addc_pi_m((a), (b)); \ - cris_tst_cc((n), (z), (v), (c)); \ - if (r != (res)) \ - err(); \ -} - -int x[] = { 0, 0, 2, -1, 0xffff, -1, 0x5432f789}; - -int main(void) -{ - int *p = (void *)&x[0]; -#if 1 - cris_tst_cc_init(); - asm volatile ("clearf cz"); - verify_addc_m(0, p, 0, 0, 0, 0, 0); - - cris_tst_cc_init(); - asm volatile ("setf z"); - verify_addc_m(0, p, 0, 0, 1, 0, 0); - - cris_tst_cc_init(); - asm volatile ("setf c"); - verify_addc_m(0, p, 1, 0, 0, 0, 0); - - cris_tst_cc_init(); - asm volatile ("clearf c"); - verify_addc_pi_m(0, &p, 0, 0, 1, 0, 0); - - p = &x[1]; - cris_tst_cc_init(); - asm volatile ("setf c"); - verify_addc_pi_m(0, &p, 1, 0, 0, 0, 0); - - if (p != &x[2]) - err(); - - cris_tst_cc_init(); - asm volatile ("clearf c"); - verify_addc_pi_m(-1, &p, 1, 0, 0, 0, 1); - - if (p != &x[3]) - err(); -#endif - p = &x[3]; - /* TODO: investigate why this one fails. */ - cris_tst_cc_init(); - asm volatile ("setf c"); - verify_addc_m(2, p, 2, 0, 0, 0, 1); - p += 4; - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_addo.c b/tests/tcg/cris/libc/check_addo.c deleted file mode 100644 index 4235e5fc65c..00000000000 --- a/tests/tcg/cris/libc/check_addo.c +++ /dev/null @@ -1,125 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -/* this would be better to do in asm, it's an orgy in GCC inline asm now. */ - -#define cris_addo_b(o, v) \ - asm volatile ("addo.b\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr"); -#define cris_addo_w(o, v) \ - asm volatile ("addo.w\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr"); -#define cris_addo_d(o, v) \ - asm volatile ("addo.d\t[%0], %1, $acr\n" : : "r" (o), "r" (v) : "acr"); -#define cris_addo_pi_b(o, v) \ - asm volatile ("addo.b\t[%0+], %1, $acr\n" \ - : "+b" (o): "r" (v) : "acr"); -#define cris_addo_pi_w(o, v) \ - asm volatile ("addo.w\t[%0+], %1, $acr\n" \ - : "+b" (o): "r" (v) : "acr"); -#define cris_addo_pi_d(o, v) \ - asm volatile ("addo.d\t[%0+], %1, $acr\n" \ - : "+b" (o): "r" (v) : "acr"); - -struct { - uint32_t v1; - uint16_t v2; - uint32_t v3; - uint8_t v4; - uint8_t v5; - uint16_t v6; - uint32_t v7; -} y = { - 32769, - -1, - 5, - 3, -4, - 2, - -76789887 -}; - -static int x[3] = {0x55aa77ff, 0xccff2244, 0x88ccee19}; - -int main(void) -{ - int *r; - unsigned char *t, *p; - - /* Note, this test-case will trig an unaligned access, partly - to x[0] and to [x1]. */ - t = (unsigned char *)x; - t -= 32768; - p = (unsigned char *) &y.v1; - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_pi_d(p, t); - cris_tst_cc(1, 1, 1, 1); - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*r != 0x4455aa77) - err(); - - - t += 32770; - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_pi_w(p, t); - cris_tst_cc(1, 1, 1, 1); - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*r != 0x4455aa77) - err(); - - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_d(p, r); - cris_tst_cc(1, 1, 1, 1); - p += 4; - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*r != 0xee19ccff) - err(); - - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_pi_b(p, t); - cris_tst_cc(0, 0, 0, 0); - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*(uint16_t*)r != 0xff22) - err(); - - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_b(p, r); - cris_tst_cc(1, 1, 1, 1); - p += 1; - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*r != 0x4455aa77) - err(); - - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_w(p, r); - cris_tst_cc(1, 1, 1, 1); - p += 2; - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - if (*r != 0xff224455) - err(); - - mb(); /* don't reorder anything beyond here. */ - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addo_pi_d(p, t); - cris_tst_cc(0, 0, 0, 0); - asm volatile ("move.d\t$acr, %0\n" : "=r" (r)); - r = (void*)(((char *)r) + 76789885); - if (*r != 0x55aa77ff) - err(); - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_addoq.c b/tests/tcg/cris/libc/check_addoq.c deleted file mode 100644 index ed509e27e0e..00000000000 --- a/tests/tcg/cris/libc/check_addoq.c +++ /dev/null @@ -1,44 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -/* this would be better to do in asm, it's an orgy in GCC inline asm now. */ - -/* ACR will be clobbered. */ -#define cris_addoq(o, v) \ - asm volatile ("addoq\t%1, %0, $acr\n" : : "r" (v), "i" (o) : "acr"); - - -int main(void) -{ - int x[3] = {0x55aa77ff, 0xccff2244, 0x88ccee19}; - int *p, *t = x + 1; - - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addoq(0, t); - cris_tst_cc(1, 1, 1, 1); - asm volatile ("move.d\t$acr, %0\n" : "=r" (p)); - if (*p != 0xccff2244) - err(); - - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_addoq(4, t); - cris_tst_cc(0, 0, 0, 0); - asm volatile ("move.d\t$acr, %0\n" : "=r" (p)); - if (*p != 0x88ccee19) - err(); - - cris_tst_cc_init(); - asm volatile ("clearf\tzvnc\n"); - cris_addoq(-8, t + 1); - cris_tst_cc(0, 0, 0, 0); - asm volatile ("move.d\t$acr, %0\n" : "=r" (p)); - if (*p != 0x55aa77ff) - err(); - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_bound.c b/tests/tcg/cris/libc/check_bound.c deleted file mode 100644 index d956ab9adec..00000000000 --- a/tests/tcg/cris/libc/check_bound.c +++ /dev/null @@ -1,142 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -static always_inline int cris_bound_b(int v, int b) -{ - int r = v; - asm ("bound.b\t%1, %0\n" : "+r" (r) : "ri" (b)); - return r; -} - -static always_inline int cris_bound_w(int v, int b) -{ - int r = v; - asm ("bound.w\t%1, %0\n" : "+r" (r) : "ri" (b)); - return r; -} - -static always_inline int cris_bound_d(int v, int b) -{ - int r = v; - asm ("bound.d\t%1, %0\n" : "+r" (r) : "ri" (b)); - return r; -} - -int main(void) -{ - int r; - - cris_tst_cc_init(); - r = cris_bound_d(-1, 2); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_d(2, 0xffffffff); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_d(0xffff, 0xffff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xffff) - err(); - - cris_tst_cc_init(); - r = cris_bound_d(-1, 0xffffffff); - cris_tst_cc(1, 0, 0, 0); - if (r != 0xffffffff) - err(); - - cris_tst_cc_init(); - r = cris_bound_d(0x78134452, 0x5432f789); - cris_tst_cc(0, 0, 0, 0); - if (r != 0x5432f789) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(-1, 2); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(-1, 0xffff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xffff) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(2, 0xffff); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(0xfedaffff, 0xffff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xffff) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(0x78134452, 0xf789); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xf789) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(-1, 2); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(2, 0xff); - cris_tst_cc(0, 0, 0, 0); - if (r != 2) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(-1, 0xff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xff) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(0xff, 0xff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xff) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(0xfeda49ff, 0xff); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xff) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(0x78134452, 0x89); - cris_tst_cc(0, 0, 0, 0); - if (r != 0x89) - err(); - - cris_tst_cc_init(); - r = cris_bound_w(0x78134452, 0); - cris_tst_cc(0, 1, 0, 0); - if (r != 0) - err(); - - cris_tst_cc_init(); - r = cris_bound_b(0xffff, -1); - cris_tst_cc(0, 0, 0, 0); - if (r != 0xff) - err(); - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_ftag.c b/tests/tcg/cris/libc/check_ftag.c deleted file mode 100644 index aaa5c971159..00000000000 --- a/tests/tcg/cris/libc/check_ftag.c +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -static always_inline void cris_ftag_i(unsigned int x) -{ - register unsigned int v asm("$r10") = x; - asm ("ftagi\t[%0]\n" : : "r" (v) ); -} -static always_inline void cris_ftag_d(unsigned int x) -{ - register unsigned int v asm("$r10") = x; - asm ("ftagd\t[%0]\n" : : "r" (v) ); -} -static always_inline void cris_fidx_i(unsigned int x) -{ - register unsigned int v asm("$r10") = x; - asm ("fidxi\t[%0]\n" : : "r" (v) ); -} -static always_inline void cris_fidx_d(unsigned int x) -{ - register unsigned int v asm("$r10") = x; - asm ("fidxd\t[%0]\n" : : "r" (v) ); -} - - -int main(void) -{ - cris_ftag_i(0); - cris_ftag_d(0); - cris_fidx_i(0); - cris_fidx_d(0); - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c b/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c deleted file mode 100644 index 45ecd159b3c..00000000000 --- a/tests/tcg/cris/libc/check_gcctorture_pr28634-1.c +++ /dev/null @@ -1,15 +0,0 @@ -/* PR rtl-optimization/28634. On targets with delayed branches, - dbr_schedule could do the next iteration's addition in the - branch delay slot, then subtract the value again if the branch - wasn't taken. This can lead to rounding errors. */ -int x = -1; -int y = 1; -int -main (void) -{ - while (y > 0) - y += x; - if (y != x + 1) - abort (); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_gcctorture_pr28634.c b/tests/tcg/cris/libc/check_gcctorture_pr28634.c deleted file mode 100644 index a0c525497d7..00000000000 --- a/tests/tcg/cris/libc/check_gcctorture_pr28634.c +++ /dev/null @@ -1,15 +0,0 @@ -/* PR rtl-optimization/28634. On targets with delayed branches, - dbr_schedule could do the next iteration's addition in the - branch delay slot, then subtract the value again if the branch - wasn't taken. This can lead to rounding errors. */ -double x = -0x1.0p53; -double y = 1; -int -main (void) -{ - while (y > 0) - y += x; - if (y != x + 1) - abort (); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_glibc_kernelversion.c b/tests/tcg/cris/libc/check_glibc_kernelversion.c deleted file mode 100644 index 7aada899115..00000000000 --- a/tests/tcg/cris/libc/check_glibc_kernelversion.c +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Check the lz insn. - */ - -#include -#include -#include -#include "sys.h" - -#define __LINUX_KERNEL_VERSION 131584 - -#define DL_SYSDEP_OSCHECK(FATAL) \ - do { \ - /* Test whether the kernel is new enough. This test is only \ - performed if the library is not compiled to run on all \ - kernels. */ \ - if (__LINUX_KERNEL_VERSION > 0) \ - { \ - char bufmem[64]; \ - char *buf = bufmem; \ - unsigned int version; \ - int parts; \ - char *cp; \ - struct utsname uts; \ - \ - /* Try the uname syscall */ \ - if (__uname (&uts)) \ - { \ - /* This was not successful. Now try reading the /proc \ - filesystem. */ \ - ssize_t reslen; \ - int fd = __open ("/proc/sys/kernel/osrelease", O_RDONLY); \ - if (fd == -1 \ - || (reslen = __read (fd, bufmem, sizeof (bufmem))) <= 0) \ - /* This also didn't work. We give up since we cannot \ - make sure the library can actually work. */ \ - FATAL ("FATAL: cannot determine library version\n"); \ - __close (fd); \ - buf[MIN (reslen, (ssize_t) sizeof (bufmem) - 1)] = '\0'; \ - } \ - else \ - buf = uts.release; \ - \ - /* Now convert it into a number. The string consists of at most \ - three parts. */ \ - version = 0; \ - parts = 0; \ - cp = buf; \ - while ((*cp >= '0') && (*cp <= '9')) \ - { \ - unsigned int here = *cp++ - '0'; \ - \ - while ((*cp >= '0') && (*cp <= '9')) \ - { \ - here *= 10; \ - here += *cp++ - '0'; \ - } \ - \ - ++parts; \ - version <<= 8; \ - version |= here; \ - \ - if (*cp++ != '.') \ - /* Another part following? */ \ - break; \ - } \ - \ - if (parts < 3) \ - version <<= 8 * (3 - parts); \ - \ - /* Now we can test with the required version. */ \ - if (version < __LINUX_KERNEL_VERSION) \ - /* Not sufficient. */ \ - FATAL ("FATAL: kernel too old\n"); \ - \ - _dl_osversion = version; \ - } \ - } while (0) - -int main(void) -{ - char bufmem[64] = "2.6.22"; - char *buf = bufmem; - unsigned int version; - int parts; - char *cp; - - version = 0; - parts = 0; - cp = buf; - while ((*cp >= '0') && (*cp <= '9')) - { - unsigned int here = *cp++ - '0'; - - while ((*cp >= '0') && (*cp <= '9')) - { - here *= 10; - here += *cp++ - '0'; - } - - ++parts; - version <<= 8; - version |= here; - - if (*cp++ != '.') - /* Another part following? */ - break; - } - - if (parts < 3) - version <<= 8 * (3 - parts); - if (version < __LINUX_KERNEL_VERSION) - err(); - pass(); - exit(0); -} diff --git a/tests/tcg/cris/libc/check_hello.c b/tests/tcg/cris/libc/check_hello.c deleted file mode 100644 index fb403ba9961..00000000000 --- a/tests/tcg/cris/libc/check_hello.c +++ /dev/null @@ -1,7 +0,0 @@ -#include -#include -int main () -{ - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_int64.c b/tests/tcg/cris/libc/check_int64.c deleted file mode 100644 index 69caec1bb24..00000000000 --- a/tests/tcg/cris/libc/check_int64.c +++ /dev/null @@ -1,47 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - - -static always_inline int64_t add64(const int64_t a, const int64_t b) -{ - return a + b; -} - -static always_inline int64_t sub64(const int64_t a, const int64_t b) -{ - return a - b; -} - -int main(void) -{ - int64_t a = 1; - int64_t b = 2; - - /* FIXME: add some tests. */ - a = add64(a, b); - if (a != 3) - err(); - - a = sub64(a, b); - if (a != 1) - err(); - - a = add64(a, -4); - if (a != -3) - err(); - - a = add64(a, 3); - if (a != 0) - err(); - - a = 0; - a = sub64(a, 1); - if (a != -1) - err(); - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_lz.c b/tests/tcg/cris/libc/check_lz.c deleted file mode 100644 index bf051a6b550..00000000000 --- a/tests/tcg/cris/libc/check_lz.c +++ /dev/null @@ -1,49 +0,0 @@ -#include -#include -#include -#include "sys.h" - -static always_inline int cris_lz(int x) -{ - int r; - asm ("lz\t%1, %0\n" : "=r" (r) : "r" (x)); - return r; -} - -void check_lz(void) -{ - int i; - - if (cris_lz(0) != 32) - err(); - if (cris_lz(1) != 31) - err(); - if (cris_lz(2) != 30) - err(); - if (cris_lz(4) != 29) - err(); - if (cris_lz(8) != 28) - err(); - - /* try all positions with a single bit. */ - for (i = 1; i < 32; i++) { - if (cris_lz(1 << (i-1)) != (32 - i)) - err(); - } - - /* try all positions with all bits. */ - for (i = 1; i < 32; i++) { - /* split up this computation to clarify it. */ - uint32_t val; - val = (unsigned int)-1 >> (32 - i); - if (cris_lz(val) != (32 - i)) - err(); - } -} - -int main(void) -{ - check_lz(); - pass(); - exit(0); -} diff --git a/tests/tcg/cris/libc/check_mapbrk.c b/tests/tcg/cris/libc/check_mapbrk.c deleted file mode 100644 index 1aff7622bc2..00000000000 --- a/tests/tcg/cris/libc/check_mapbrk.c +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include - -/* Basic sanity check that syscalls to implement malloc (brk, mmap2, - munmap) are trivially functional. */ - -int main () -{ - void *p1, *p2, *p3, *p4, *p5, *p6; - - if ((p1 = malloc (8100)) == NULL - || (p2 = malloc (16300)) == NULL - || (p3 = malloc (4000)) == NULL - || (p4 = malloc (500)) == NULL - || (p5 = malloc (1023*1024)) == NULL - || (p6 = malloc (8191*1024)) == NULL) - { - printf ("fail\n"); - exit (1); - } - - free (p1); - free (p2); - free (p3); - free (p4); - free (p5); - free (p6); - - p1 = malloc (64000); - if (p1 == NULL) - { - printf ("fail\n"); - exit (1); - } - free (p1); - - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_mmap1.c b/tests/tcg/cris/libc/check_mmap1.c deleted file mode 100644 index b803f0c431e..00000000000 --- a/tests/tcg/cris/libc/check_mmap1.c +++ /dev/null @@ -1,48 +0,0 @@ -/* -#notarget: cris*-*-elf -*/ - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - int fd = open (argv[0], O_RDONLY); - struct stat sb; - int size; - void *a; - const char *str = "a string you'll only find in the program"; - - if (fd == -1) - { - perror ("open"); - abort (); - } - - if (fstat (fd, &sb) < 0) - { - perror ("fstat"); - abort (); - } - - size = sb.st_size; - - /* We want to test mmapping a size that isn't exactly a page. */ - if ((size & 8191) == 0) - size--; - - a = mmap (NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); - - if (memmem (a, size, str, strlen (str) + 1) == NULL) - abort (); - - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_mmap2.c b/tests/tcg/cris/libc/check_mmap2.c deleted file mode 100644 index 35139a0ed9a..00000000000 --- a/tests/tcg/cris/libc/check_mmap2.c +++ /dev/null @@ -1,48 +0,0 @@ -/* -#notarget: cris*-*-elf -*/ - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - int fd = open (argv[0], O_RDONLY); - struct stat sb; - int size; - void *a; - const char *str = "a string you'll only find in the program"; - - if (fd == -1) - { - perror ("open"); - abort (); - } - - if (fstat (fd, &sb) < 0) - { - perror ("fstat"); - abort (); - } - - size = sb.st_size; - - /* We want to test mmapping a size that isn't exactly a page. */ - if ((size & 8191) == 0) - size--; - - a = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0); - - if (memmem (a, size, str, strlen (str) + 1) == NULL) - abort (); - - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_mmap3.c b/tests/tcg/cris/libc/check_mmap3.c deleted file mode 100644 index cb890ef1200..00000000000 --- a/tests/tcg/cris/libc/check_mmap3.c +++ /dev/null @@ -1,33 +0,0 @@ -/* -#notarget: cris*-*-elf -*/ - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - volatile unsigned char *a; - - /* Check that we can map a non-multiple of a page and still get a full page. */ - a = mmap (NULL, 0x4c, PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (a == NULL || a == (unsigned char *) -1) - abort (); - - a[0] = 0xbe; - a[8191] = 0xef; - memset ((char *) a + 1, 0, 8190); - - if (a[0] != 0xbe || a[8191] != 0xef) - abort (); - - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_moveq.c b/tests/tcg/cris/libc/check_moveq.c deleted file mode 100644 index 80f2dff6ab2..00000000000 --- a/tests/tcg/cris/libc/check_moveq.c +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -#define cris_moveq(dst, src) \ - asm volatile ("moveq %1, %0\n" : "=r" (dst) : "i" (src)); - - - -int main(void) -{ - int t; - - cris_tst_cc_init(); - asm volatile ("setf\tzvnc\n"); - cris_moveq(t, 10); - cris_tst_cc(1, 1, 1, 1); - if (t != 10) - err(); - - /* make sure moveq doesn't clobber the zflag. */ - cris_tst_cc_init(); - asm volatile ("setf vnc\n"); - asm volatile ("clearf z\n"); - cris_moveq(t, 0); - cris_tst_cc(1, 0, 1, 1); - if (t != 0) - err(); - - /* make sure moveq doesn't clobber the nflag. - Also check large immediates */ - cris_tst_cc_init(); - asm volatile ("setf zvc\n"); - asm volatile ("clearf n\n"); - cris_moveq(t, -31); - cris_tst_cc(0, 1, 1, 1); - if (t != -31) - err(); - - cris_tst_cc_init(); - asm volatile ("setf nzvc\n"); - cris_moveq(t, 31); - cris_tst_cc(1, 1, 1, 1); - if (t != 31) - err(); - - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_openpf1.c b/tests/tcg/cris/libc/check_openpf1.c deleted file mode 100644 index 251d26eec2e..00000000000 --- a/tests/tcg/cris/libc/check_openpf1.c +++ /dev/null @@ -1,38 +0,0 @@ -/* Check that --sysroot is applied to open(2). -#sim: --sysroot=@exedir@ - - We assume, with EXE being the name of the executable: - - The simulator executes with cwd the same directory where the executable - is located (so argv[0] contains a plain filename without directory - components). - - There's no /EXE on the host file system. */ - -#include -#include -#include -#include -int main (int argc, char *argv[]) -{ - char *fnam = argv[0]; - FILE *f; - if (argv[0][0] != '/') - { - fnam = malloc (strlen (argv[0]) + 2); - if (fnam == NULL) - abort (); - strcpy (fnam, "/"); - strcat (fnam, argv[0]); - } - - f = fopen (fnam, "rb"); - if (f == NULL) - abort (); - fclose(f); - - /* Cover another execution path. */ - if (fopen ("/nonexistent", "rb") != NULL - || errno != ENOENT) - abort (); - printf ("pass\n"); - return 0; -} diff --git a/tests/tcg/cris/libc/check_openpf2.c b/tests/tcg/cris/libc/check_openpf2.c deleted file mode 100644 index 5d56189f8ea..00000000000 --- a/tests/tcg/cris/libc/check_openpf2.c +++ /dev/null @@ -1,16 +0,0 @@ -/* Check that the simulator has chdir:ed to the --sysroot argument -#sim: --sysroot=@srcdir@ - (or that --sysroot is applied to relative file paths). */ - -#include -#include -#include -int main (int argc, char *argv[]) -{ - FILE *f = fopen ("check_openpf2.c", "rb"); - if (f == NULL) - abort (); - fclose(f); - printf ("pass\n"); - return 0; -} diff --git a/tests/tcg/cris/libc/check_openpf3.c b/tests/tcg/cris/libc/check_openpf3.c deleted file mode 100644 index 557adee92de..00000000000 --- a/tests/tcg/cris/libc/check_openpf3.c +++ /dev/null @@ -1,49 +0,0 @@ -/* Basic file operations (rename, unlink); once without sysroot. We - also test that the simulator has chdir:ed to PREFIX, when defined. */ - -#include -#include -#include -#include -#include -#include - -#ifndef PREFIX -#define PREFIX -#endif - -void err (const char *s) -{ - perror (s); - abort (); -} - -int main (int argc, char *argv[]) -{ - FILE *f; - struct stat buf; - - unlink (PREFIX "testfoo2.tmp"); - - f = fopen ("testfoo1.tmp", "w"); - if (f == NULL) - err ("open"); - fclose (f); - - if (rename (PREFIX "testfoo1.tmp", PREFIX "testfoo2.tmp") != 0) - err ("rename"); - - if (stat (PREFIX "testfoo2.tmp", &buf) != 0 - || !S_ISREG (buf.st_mode)) - err ("stat 1"); - - if (stat ("testfoo2.tmp", &buf) != 0 - || !S_ISREG (buf.st_mode)) - err ("stat 2"); - - if (unlink (PREFIX "testfoo2.tmp") != 0) - err ("unlink"); - - printf ("pass\n"); - return 0; -} diff --git a/tests/tcg/cris/libc/check_openpf5.c b/tests/tcg/cris/libc/check_openpf5.c deleted file mode 100644 index 1f86ea283d4..00000000000 --- a/tests/tcg/cris/libc/check_openpf5.c +++ /dev/null @@ -1,56 +0,0 @@ -/* Check that TRT happens when error on too many opened files. -#notarget: cris*-*-elf -#sim: --sysroot=@exedir@ -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - int i; - int filemax; - -#ifdef OPEN_MAX - filemax = OPEN_MAX; -#else - filemax = sysconf (_SC_OPEN_MAX); -#endif - - char *fn = malloc (strlen (argv[0]) + 2); - if (fn == NULL) - abort (); - strcpy (fn, "/"); - strcat (fn, argv[0]); - - for (i = 0; i < filemax + 1; i++) - { - if (open (fn, O_RDONLY) < 0) - { - /* Shouldn't happen too early. */ - if (i < filemax - 3 - 1) - { - fprintf (stderr, "i: %d\n", i); - abort (); - } - if (errno != EMFILE) - { - perror ("open"); - abort (); - } - goto ok; - } - } - abort (); - -ok: - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_settls1.c b/tests/tcg/cris/libc/check_settls1.c deleted file mode 100644 index 3abc3a9ea83..00000000000 --- a/tests/tcg/cris/libc/check_settls1.c +++ /dev/null @@ -1,45 +0,0 @@ -#include -#include -#include -#include - -#include - -#ifndef SYS_set_thread_area -#define SYS_set_thread_area 243 -#endif - -int main (void) -{ - unsigned long tp, old_tp; - int ret; - - asm volatile ("move $pid,%0" : "=r" (old_tp)); - old_tp &= ~0xff; - - ret = syscall (SYS_set_thread_area, 0xf0); - if (ret != -1 || errno != EINVAL) { - syscall (SYS_set_thread_area, old_tp); - perror ("Invalid thread area accepted:"); - abort(); - } - - ret = syscall (SYS_set_thread_area, 0xeddeed00); - if (ret != 0) { - perror ("Valid thread area not accepted: "); - abort (); - } - - asm volatile ("move $pid,%0" : "=r" (tp)); - tp &= ~0xff; - syscall (SYS_set_thread_area, old_tp); - - if (tp != 0xeddeed00) { - * (volatile int *) 0 = 0; - perror ("tls2"); - abort (); - } - - printf ("pass\n"); - return EXIT_SUCCESS; -} diff --git a/tests/tcg/cris/libc/check_sigalrm.c b/tests/tcg/cris/libc/check_sigalrm.c deleted file mode 100644 index 39fa8d9bacd..00000000000 --- a/tests/tcg/cris/libc/check_sigalrm.c +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include -#include -#include - -#define MAGIC (0xdeadbeef) - -int s = 0; -void sighandler(int sig) -{ - s = MAGIC; -} - -int main(int argc, char **argv) -{ - int p; - - p = getpid(); - signal(SIGALRM, sighandler); - kill(p, SIGALRM); - if (s != MAGIC) - return EXIT_FAILURE; - - printf ("passed\n"); - return EXIT_SUCCESS; -} diff --git a/tests/tcg/cris/libc/check_stat1.c b/tests/tcg/cris/libc/check_stat1.c deleted file mode 100644 index 2e2cae51df0..00000000000 --- a/tests/tcg/cris/libc/check_stat1.c +++ /dev/null @@ -1,16 +0,0 @@ -#include -#include -#include -#include -#include - -int main (void) -{ - struct stat buf; - - if (stat (".", &buf) != 0 - || !S_ISDIR (buf.st_mode)) - abort (); - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_stat2.c b/tests/tcg/cris/libc/check_stat2.c deleted file mode 100644 index e36172ed257..00000000000 --- a/tests/tcg/cris/libc/check_stat2.c +++ /dev/null @@ -1,20 +0,0 @@ -/* -#notarget: cris*-*-elf -*/ - -#include -#include -#include -#include -#include - -int main (void) -{ - struct stat buf; - - if (lstat (".", &buf) != 0 - || !S_ISDIR (buf.st_mode)) - abort (); - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_stat3.c b/tests/tcg/cris/libc/check_stat3.c deleted file mode 100644 index 36a9d5d2744..00000000000 --- a/tests/tcg/cris/libc/check_stat3.c +++ /dev/null @@ -1,25 +0,0 @@ -/* Simulator options: -#sim: --sysroot=@exedir@ -*/ -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - char path[1024] = "/"; - struct stat buf; - - strncat(path, argv[0], sizeof(path) - 2); - if (stat (".", &buf) != 0 - || !S_ISDIR (buf.st_mode)) - abort (); - if (stat (path, &buf) != 0 - || !S_ISREG (buf.st_mode)) - abort (); - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_stat4.c b/tests/tcg/cris/libc/check_stat4.c deleted file mode 100644 index 04f21fe7c4f..00000000000 --- a/tests/tcg/cris/libc/check_stat4.c +++ /dev/null @@ -1,27 +0,0 @@ -/* Simulator options: -#notarget: cris*-*-elf -#sim: --sysroot=@exedir@ -*/ - -#include -#include -#include -#include -#include -#include - -int main (int argc, char *argv[]) -{ - char path[1024] = "/"; - struct stat buf; - - strncat(path, argv[0], sizeof(path) - 2); - if (lstat (".", &buf) != 0 - || !S_ISDIR (buf.st_mode)) - abort (); - if (lstat (path, &buf) != 0 - || !S_ISREG (buf.st_mode)) - abort (); - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/check_swap.c b/tests/tcg/cris/libc/check_swap.c deleted file mode 100644 index 9a68c1e5d76..00000000000 --- a/tests/tcg/cris/libc/check_swap.c +++ /dev/null @@ -1,76 +0,0 @@ -#include -#include -#include -#include "sys.h" -#include "crisutils.h" - -#define N 8 -#define W 4 -#define B 2 -#define R 1 - -static always_inline int cris_swap(const int mode, int x) -{ - switch (mode) - { - case N: asm ("swapn\t%0\n" : "+r" (x) : "0" (x)); break; - case W: asm ("swapw\t%0\n" : "+r" (x) : "0" (x)); break; - case B: asm ("swapb\t%0\n" : "+r" (x) : "0" (x)); break; - case R: asm ("swapr\t%0\n" : "+r" (x) : "0" (x)); break; - case B|R: asm ("swapbr\t%0\n" : "+r" (x) : "0" (x)); break; - case W|R: asm ("swapwr\t%0\n" : "+r" (x) : "0" (x)); break; - case W|B: asm ("swapwb\t%0\n" : "+r" (x) : "0" (x)); break; - case W|B|R: asm ("swapwbr\t%0\n" : "+r" (x) : "0" (x)); break; - case N|R: asm ("swapnr\t%0\n" : "+r" (x) : "0" (x)); break; - case N|B: asm ("swapnb\t%0\n" : "+r" (x) : "0" (x)); break; - case N|B|R: asm ("swapnbr\t%0\n" : "+r" (x) : "0" (x)); break; - case N|W: asm ("swapnw\t%0\n" : "+r" (x) : "0" (x)); break; - default: - err(); - break; - } - return x; -} - -/* Made this a macro to be able to pick up the location of the errors. */ -#define verify_swap(mode, val, expected, n, z) \ -do { \ - int r; \ - cris_tst_cc_init(); \ - r = cris_swap(mode, val); \ - cris_tst_mov_cc(n, z); \ - if (r != expected) \ - err(); \ -} while(0) - -void check_swap(void) -{ - /* Some of these numbers are borrowed from GDB's cris sim - testsuite. */ - if (cris_swap(N, 0) != 0xffffffff) - err(); - if (cris_swap(W, 0x12345678) != 0x56781234) - err(); - if (cris_swap(B, 0x12345678) != 0x34127856) - err(); - - verify_swap(R, 0x78134452, 0x1ec8224a, 0, 0); - verify_swap(B, 0x78134452, 0x13785244, 0, 0); - verify_swap(B|R, 0x78134452, 0xc81e4a22, 1, 0); - verify_swap(W, 0x78134452, 0x44527813, 0, 0); - verify_swap(W|R, 0x78134452, 0x224a1ec8, 0, 0); - verify_swap(W|B|R, 0x78134452, 0x4a22c81e, 0, 0); - verify_swap(N, 0x78134452, 0x87ecbbad, 1, 0); - verify_swap(N|R, 0x78134452, 0xe137ddb5, 1, 0); - verify_swap(N|B, 0x78134452, 0xec87adbb, 1, 0); - verify_swap(N|B|R, 0x78134452, 0x37e1b5dd, 0, 0); - verify_swap(N|W, 0x78134452, 0xbbad87ec, 1, 0); - verify_swap(N|B|R, 0xffffffff, 0, 0, 1); -} - -int main(void) -{ - check_swap(); - pass(); - return 0; -} diff --git a/tests/tcg/cris/libc/check_time2.c b/tests/tcg/cris/libc/check_time2.c deleted file mode 100644 index 20b69b4f60b..00000000000 --- a/tests/tcg/cris/libc/check_time2.c +++ /dev/null @@ -1,18 +0,0 @@ -/* CB_SYS_time doesn't implement the Linux time syscall; the return - value isn't written to the argument. */ - -#include -#include -#include - -int -main (void) -{ - time_t x = (time_t) -1; - time_t t = time (&x); - - if (t == (time_t) -1 || t != x) - abort (); - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/libc/crisutils.h b/tests/tcg/cris/libc/crisutils.h deleted file mode 100644 index bbbe6c55405..00000000000 --- a/tests/tcg/cris/libc/crisutils.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef CRISUTILS_H -#define CRISUTILS_H 1 - -static char *tst_cc_loc = NULL; - -#define cris_tst_cc_init() \ -do { tst_cc_loc = "test_cc failed at " CURRENT_LOCATION; } while(0) - -/* We need a real symbol to signal error. */ -void _err(void) { - if (!tst_cc_loc) - tst_cc_loc = "tst_cc_failed\n"; - _fail(tst_cc_loc); -} - -static always_inline void cris_tst_cc_n1(void) -{ - asm volatile ("bpl _err\n" - "nop\n"); -} -static always_inline void cris_tst_cc_n0(void) -{ - asm volatile ("bmi _err\n" - "nop\n"); -} - -static always_inline void cris_tst_cc_z1(void) -{ - asm volatile ("bne _err\n" - "nop\n"); -} -static always_inline void cris_tst_cc_z0(void) -{ - asm volatile ("beq _err\n" - "nop\n"); -} -static always_inline void cris_tst_cc_v1(void) -{ - asm volatile ("bvc _err\n" - "nop\n"); -} -static always_inline void cris_tst_cc_v0(void) -{ - asm volatile ("bvs _err\n" - "nop\n"); -} - -static always_inline void cris_tst_cc_c1(void) -{ - asm volatile ("bcc _err\n" - "nop\n"); -} -static always_inline void cris_tst_cc_c0(void) -{ - asm volatile ("bcs _err\n" - "nop\n"); -} - -static always_inline void cris_tst_mov_cc(int n, int z) -{ - if (n) cris_tst_cc_n1(); else cris_tst_cc_n0(); - if (z) cris_tst_cc_z1(); else cris_tst_cc_z0(); - asm volatile ("" : : "g" (_err)); -} - -static always_inline void cris_tst_cc(const int n, const int z, - const int v, const int c) -{ - if (n) cris_tst_cc_n1(); else cris_tst_cc_n0(); - if (z) cris_tst_cc_z1(); else cris_tst_cc_z0(); - if (v) cris_tst_cc_v1(); else cris_tst_cc_v0(); - if (c) cris_tst_cc_c1(); else cris_tst_cc_c0(); - asm volatile ("" : : "g" (_err)); -} - -#endif diff --git a/tests/tcg/cris/libc/sys.h b/tests/tcg/cris/libc/sys.h deleted file mode 100644 index 3dd47bb6730..00000000000 --- a/tests/tcg/cris/libc/sys.h +++ /dev/null @@ -1,18 +0,0 @@ -#include - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) - -#define always_inline inline __attribute__((always_inline)) - -#define CURRENT_LOCATION __FILE__ ":" TOSTRING(__LINE__) - -#define err() \ -{ \ - _fail("at " CURRENT_LOCATION " "); \ -} - -#define mb() asm volatile ("" : : : "memory") - -void pass(void); -void _fail(char *reason); diff --git a/tests/tcg/hexagon/usr.c b/tests/tcg/hexagon/usr.c index 92bc86a2134..f0b23d312b3 100644 --- a/tests/tcg/hexagon/usr.c +++ b/tests/tcg/hexagon/usr.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + * Copyright(c) 2022-2024 Qualcomm Innovation Center, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1007,6 +1007,11 @@ int main() TEST_P_OP_R(conv_sf2d_chop, SF_QNaN, 0xffffffffffffffffULL, USR_FPINVF); TEST_P_OP_R(conv_sf2d_chop, SF_SNaN, 0xffffffffffffffffULL, USR_FPINVF); + TEST_R_OP_R(conv_sf2uw, SF_zero_neg, 0, USR_CLEAR); + TEST_R_OP_R(conv_sf2uw_chop, SF_zero_neg, 0, USR_CLEAR); + TEST_P_OP_R(conv_sf2ud, SF_zero_neg, 0, USR_CLEAR); + TEST_P_OP_R(conv_sf2ud_chop, SF_zero_neg, 0, USR_CLEAR); + TEST_R_OP_P(conv_df2sf, DF_QNaN, SF_HEX_NaN, USR_CLEAR); TEST_R_OP_P(conv_df2sf, DF_SNaN, SF_HEX_NaN, USR_FPINVF); TEST_R_OP_P(conv_df2uw, DF_QNaN, 0xffffffff, USR_FPINVF); @@ -1020,6 +1025,11 @@ int main() TEST_R_OP_P(conv_df2uw_chop, DF_QNaN, 0xffffffff, USR_FPINVF); TEST_R_OP_P(conv_df2uw_chop, DF_SNaN, 0xffffffff, USR_FPINVF); + TEST_R_OP_P(conv_df2uw, DF_zero_neg, 0, USR_CLEAR); + TEST_R_OP_P(conv_df2uw_chop, DF_zero_neg, 0, USR_CLEAR); + TEST_P_OP_P(conv_df2ud, DF_zero_neg, 0, USR_CLEAR); + TEST_P_OP_P(conv_df2ud_chop, DF_zero_neg, 0, USR_CLEAR); + /* Test for typo in HELPER(conv_df2uw_chop) */ TEST_R_OP_P(conv_df2uw_chop, 0xffffff7f00000001ULL, 0xffffffff, USR_FPINVF); diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index bbe2c44b2a7..f1df40411b0 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -22,7 +22,7 @@ run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max test-i386-pcmpistri: CFLAGS += -msse4.2 run-test-i386-pcmpistri: QEMU_OPTS += -cpu max -test-i386-bmi2: CFLAGS=-O2 +test-i386-bmi2: CFLAGS=-O2 -fwrapv run-test-i386-bmi2: QEMU_OPTS += -cpu max test-i386-adcox: CFLAGS=-O2 diff --git a/tests/tcg/i386/test-avx.c b/tests/tcg/i386/test-avx.c index 230e6d84b84..80fe363cfc2 100644 --- a/tests/tcg/i386/test-avx.c +++ b/tests/tcg/i386/test-avx.c @@ -244,7 +244,7 @@ v4di indexd = {0x00000002ffffffcdull, 0xfffffff500000010ull, 0x0000003afffffff0ull, 0x000000000000000eull}; v4di gather_mem[0x20]; -_Static_assert(sizeof(gather_mem) == 1024); +_Static_assert(sizeof(gather_mem) == 1024, "gather_mem not expected size"); void init_f16reg(v4di *r) { diff --git a/tests/tcg/i386/test-i386-adcox.c b/tests/tcg/i386/test-i386-adcox.c index 16169efff82..a717064adb0 100644 --- a/tests/tcg/i386/test-i386-adcox.c +++ b/tests/tcg/i386/test-i386-adcox.c @@ -29,7 +29,7 @@ void test_adox_adcx(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_ope "adcx %3, %1;" "pushf; pop %0" : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) - : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + : "r" ((REG) - 1), "0" (flags), "1" (out_adcx), "2" (out_adox)); assert(out_adcx == in_c + adcx_operand - 1); assert(out_adox == in_o + adox_operand - 1); @@ -53,8 +53,8 @@ void test_adcx_adox(uint32_t in_c, uint32_t in_o, REG adcx_operand, REG adox_ope "adcx %3, %1;" "adox %3, %2;" "pushf; pop %0" - : "+r" (flags), "+r" (out_adcx), "+r" (out_adox) - : "r" ((REG)-1), "0" (flags), "1" (out_adcx), "2" (out_adox)); + : "+r"(flags), "+r"(out_adcx), "+r"(out_adox) + : "r" ((REG)-1)); assert(out_adcx == in_c + adcx_operand - 1); assert(out_adox == in_o + adox_operand - 1); diff --git a/tests/tcg/loongarch64/system/kernel.ld b/tests/tcg/loongarch64/system/kernel.ld index f1a7c0168c5..56d8588f1a3 100644 --- a/tests/tcg/loongarch64/system/kernel.ld +++ b/tests/tcg/loongarch64/system/kernel.ld @@ -3,7 +3,7 @@ ENTRY(_start) SECTIONS { /* Linux kernel legacy start address. */ - . = 0x9000000000200000; + . = 0x200000; _text = .; .text : { *(.text) diff --git a/tests/tcg/loongarch64/system/regdef.h b/tests/tcg/loongarch64/system/regdef.h index faa09b2377c..b586b4e86d3 100644 --- a/tests/tcg/loongarch64/system/regdef.h +++ b/tests/tcg/loongarch64/system/regdef.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2021 Loongson Technology Corporation Limited */ diff --git a/tests/tcg/mips/include/wrappers_mips64r6.h b/tests/tcg/mips/include/wrappers_mips64r6.h index d1e5edb632e..33d03de50b2 100644 --- a/tests/tcg/mips/include/wrappers_mips64r6.h +++ b/tests/tcg/mips/include/wrappers_mips64r6.h @@ -23,6 +23,7 @@ #ifndef WRAPPERS_MIPS64R6_H #define WRAPPERS_MIPS64R6_H +#include #define DO_MIPS64R6__RD__RS(suffix, mnemonic) \ static inline void do_mips64r6_##suffix(const void *input, \ @@ -80,4 +81,35 @@ DO_MIPS64R6__RD__RS_RT(DMULU, dmulu) DO_MIPS64R6__RD__RS_RT(DMUHU, dmuhu) +#define DO_MIPS64R6__RT__RS_RT(suffix, mnemonic) \ +static inline void do_mips64r6_##suffix(const void *input1, \ + const void *input2, \ + void *output) \ +{ \ + if (strncmp(#mnemonic, "crc32", 5) == 0) \ + __asm__ volatile ( \ + ".set crc\n\t" \ + ); \ + \ + __asm__ volatile ( \ + "ld $t1, 0(%0)\n\t" \ + "ld $t2, 0(%1)\n\t" \ + #mnemonic " $t2, $t1, $t2\n\t" \ + "sd $t2, 0(%2)\n\t" \ + : \ + : "r" (input1), "r" (input2), "r" (output) \ + : "t0", "t1", "t2", "memory" \ + ); \ +} + +DO_MIPS64R6__RT__RS_RT(CRC32B, crc32b) +DO_MIPS64R6__RT__RS_RT(CRC32H, crc32h) +DO_MIPS64R6__RT__RS_RT(CRC32W, crc32w) +DO_MIPS64R6__RT__RS_RT(CRC32D, crc32d) + +DO_MIPS64R6__RT__RS_RT(CRC32CB, crc32cb) +DO_MIPS64R6__RT__RS_RT(CRC32CH, crc32ch) +DO_MIPS64R6__RT__RS_RT(CRC32CW, crc32cw) +DO_MIPS64R6__RT__RS_RT(CRC32CD, crc32cd) + #endif diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/Makefile b/tests/tcg/mips/user/isa/mips64r6/crc/Makefile new file mode 100644 index 00000000000..b7f5811a5e9 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/Makefile @@ -0,0 +1,40 @@ +# +# Test program for MIPS64R6 CRC32 instructions +# +# Copyright (C) 2025 Aleksandar Rakic +# +# SPDX-License-Identifier: GPL-2.0-or-later +# + +ifndef PREFIX + $(error "PREFIX not set, please export GNU Toolchain install directory.") +endif + +ifndef SYSROOT + $(error "SYSROOT not set, please export GNU Toolchain system root directory.") +endif + +SIM = ../../../../../../../build/qemu-mips64 +SIM_FLAGS = -L $(SYSROOT) + +CC = $(PREFIX)/bin/mips64-r6-linux-gnu-gcc + +TESTCASES = test_mips64r6_crc32b.tst +TESTCASES += test_mips64r6_crc32h.tst +TESTCASES += test_mips64r6_crc32w.tst +TESTCASES += test_mips64r6_crc32d.tst +TESTCASES += test_mips64r6_crc32cb.tst +TESTCASES += test_mips64r6_crc32ch.tst +TESTCASES += test_mips64r6_crc32cw.tst +TESTCASES += test_mips64r6_crc32cd.tst + +all: $(TESTCASES) + @for case in $(TESTCASES); do \ + echo $(SIM) $(SIM_FLAGS) ./$$case; \ + $(SIM) $(SIM_FLAGS) ./$$case; \ + echo $(RM) -rf ./$$case; \ + $(RM) -rf ./$$case; \ + done + +%.tst: %.c + $(CC) $< -o $@ diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32b.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32b.c new file mode 100644 index 00000000000..bb1f3f69249 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32b.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32B + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0xEDB88320"; + char *instruction_name = "CRC32B"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x0000000000ffffffULL, /* 0 */ + 0x000000002d02ef8dULL, + 0x000000001bab0fd1ULL, + 0x0000000036561fa3ULL, + 0xffffffffbf1caddaULL, + 0xffffffff92e1bda8ULL, + 0x00000000278c7949ULL, + 0x000000000a71693bULL, + 0x000000002dfd1072ULL, /* 8 */ + 0x0000000000000000ULL, + 0x0000000036a9e05cULL, + 0x000000001b54f02eULL, + 0xffffffff921e4257ULL, + 0xffffffffbfe35225ULL, + 0x000000000a8e96c4ULL, + 0x00000000277386b6ULL, + 0x000000001bfe5a84ULL, /* 16 */ + 0x0000000036034af6ULL, + 0x0000000000aaaaaaULL, + 0x000000002d57bad8ULL, + 0xffffffffa41d08a1ULL, + 0xffffffff89e018d3ULL, + 0x000000003c8ddc32ULL, + 0x000000001170cc40ULL, + 0x0000000036fcb509ULL, /* 24 */ + 0x000000001b01a57bULL, + 0x000000002da84527ULL, + 0x0000000000555555ULL, + 0xffffffff891fe72cULL, + 0xffffffffa4e2f75eULL, + 0x00000000118f33bfULL, + 0x000000003c7223cdULL, + 0xffffffffbf2f9ee9ULL, /* 32 */ + 0xffffffff92d28e9bULL, + 0xffffffffa47b6ec7ULL, + 0xffffffff89867eb5ULL, + 0x0000000000ccccccULL, + 0x000000002d31dcbeULL, + 0xffffffff985c185fULL, + 0xffffffffb5a1082dULL, + 0xffffffff922d7164ULL, /* 40 */ + 0xffffffffbfd06116ULL, + 0xffffffff8979814aULL, + 0xffffffffa4849138ULL, + 0x000000002dce2341ULL, + 0x0000000000333333ULL, + 0xffffffffb55ef7d2ULL, + 0xffffffff98a3e7a0ULL, + 0x0000000027fdbe55ULL, /* 48 */ + 0x000000000a00ae27ULL, + 0x000000003ca94e7bULL, + 0x0000000011545e09ULL, + 0xffffffff981eec70ULL, + 0xffffffffb5e3fc02ULL, + 0x00000000008e38e3ULL, + 0x000000002d732891ULL, + 0x000000000aff51d8ULL, /* 56 */ + 0x00000000270241aaULL, + 0x0000000011aba1f6ULL, + 0x000000003c56b184ULL, + 0xffffffffb51c03fdULL, + 0xffffffff98e1138fULL, + 0x000000002d8cd76eULL, + 0x000000000071c71cULL, + 0x0000000000286255ULL, /* 64 */ + 0x00000000784a5a65ULL, + 0xffffffff9bdd0d3bULL, + 0xffffffffe7e61ce5ULL, + 0x00000000782fabf7ULL, + 0x00000000004d93c7ULL, + 0xffffffffe3dac499ULL, + 0xffffffff9fe1d547ULL, + 0xffffffff9b4ca0e5ULL, /* 72 */ + 0xffffffffe32e98d5ULL, + 0x0000000000b9cf8bULL, + 0x000000007c82de55ULL, + 0xffffffffe7904f52ULL, + 0xffffffff9ff27762ULL, + 0x000000007c65203cULL, + 0x00000000005e31e2ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32B(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32B(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cb.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cb.c new file mode 100644 index 00000000000..1439d44bf20 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cb.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32CB + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0x82F63B78"; + char *instruction_name = "CRC32CB"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x0000000000ffffffULL, /* 0 */ + 0xffffffffad7d5351ULL, + 0x00000000647e6465ULL, + 0xffffffffc9fcc8cbULL, + 0x00000000237f7689ULL, + 0xffffffff8efdda27ULL, + 0xffffffff837defedULL, + 0x000000002eff4343ULL, + 0xffffffffad82acaeULL, /* 8 */ + 0x0000000000000000ULL, + 0xffffffffc9033734ULL, + 0x0000000064819b9aULL, + 0xffffffff8e0225d8ULL, + 0x0000000023808976ULL, + 0x000000002e00bcbcULL, + 0xffffffff83821012ULL, + 0x00000000642b3130ULL, /* 16 */ + 0xffffffffc9a99d9eULL, + 0x0000000000aaaaaaULL, + 0xffffffffad280604ULL, + 0x0000000047abb846ULL, + 0xffffffffea2914e8ULL, + 0xffffffffe7a92122ULL, + 0x000000004a2b8d8cULL, + 0xffffffffc9566261ULL, /* 24 */ + 0x0000000064d4cecfULL, + 0xffffffffadd7f9fbULL, + 0x0000000000555555ULL, + 0xffffffffead6eb17ULL, + 0x00000000475447b9ULL, + 0x000000004ad47273ULL, + 0xffffffffe756deddULL, + 0x00000000234c45baULL, /* 32 */ + 0xffffffff8ecee914ULL, + 0x0000000047cdde20ULL, + 0xffffffffea4f728eULL, + 0x0000000000ccccccULL, + 0xffffffffad4e6062ULL, + 0xffffffffa0ce55a8ULL, + 0x000000000d4cf906ULL, + 0xffffffff8e3116ebULL, /* 40 */ + 0x0000000023b3ba45ULL, + 0xffffffffeab08d71ULL, + 0x00000000473221dfULL, + 0xffffffffadb19f9dULL, + 0x0000000000333333ULL, + 0x000000000db306f9ULL, + 0xffffffffa031aa57ULL, + 0xffffffff830c28f1ULL, /* 48 */ + 0x000000002e8e845fULL, + 0xffffffffe78db36bULL, + 0x000000004a0f1fc5ULL, + 0xffffffffa08ca187ULL, + 0x000000000d0e0d29ULL, + 0x00000000008e38e3ULL, + 0xffffffffad0c944dULL, + 0x000000002e717ba0ULL, /* 56 */ + 0xffffffff83f3d70eULL, + 0x000000004af0e03aULL, + 0xffffffffe7724c94ULL, + 0x000000000df1f2d6ULL, + 0xffffffffa0735e78ULL, + 0xffffffffadf36bb2ULL, + 0x000000000071c71cULL, + 0x0000000000286255ULL, /* 64 */ + 0xffffffffcbefd6b4ULL, + 0xffffffffc334e94fULL, + 0xffffffffac268ec5ULL, + 0xffffffffcb8a2726ULL, + 0x00000000004d93c7ULL, + 0x000000000896ac3cULL, + 0x000000006784cbb6ULL, + 0xffffffffc3a54491ULL, /* 72 */ + 0x000000000862f070ULL, + 0x0000000000b9cf8bULL, + 0x000000006faba801ULL, + 0xffffffffac50dd72ULL, + 0x0000000067976993ULL, + 0x000000006f4c5668ULL, + 0x00000000005e31e2ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CB(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CB(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cd.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cd.c new file mode 100644 index 00000000000..bf258e0696d --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cd.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32CD + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0x82F63B78"; + char *instruction_name = "CRC32CD"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0xffffffffb798b438ULL, /* 0 */ + 0xffffffffc44ff94dULL, + 0xffffffff992a70ebULL, + 0xffffffffeafd3d9eULL, + 0x000000005152da26ULL, + 0x0000000022859753ULL, + 0x0000000015cb6d32ULL, + 0x00000000661c2047ULL, + 0x0000000073d74d75ULL, /* 8 */ + 0x0000000000000000ULL, + 0x000000005d6589a6ULL, + 0x000000002eb2c4d3ULL, + 0xffffffff951d236bULL, + 0xffffffffe6ca6e1eULL, + 0xffffffffd184947fULL, + 0xffffffffa253d90aULL, + 0x0000000008f9ceacULL, /* 16 */ + 0x000000007b2e83d9ULL, + 0x00000000264b0a7fULL, + 0x00000000559c470aULL, + 0xffffffffee33a0b2ULL, + 0xffffffff9de4edc7ULL, + 0xffffffffaaaa17a6ULL, + 0xffffffffd97d5ad3ULL, + 0xffffffffccb637e1ULL, /* 24 */ + 0xffffffffbf617a94ULL, + 0xffffffffe204f332ULL, + 0xffffffff91d3be47ULL, + 0x000000002a7c59ffULL, + 0x0000000059ab148aULL, + 0x000000006ee5eeebULL, + 0x000000001d32a39eULL, + 0x0000000021e3b01bULL, /* 32 */ + 0x000000005234fd6eULL, + 0x000000000f5174c8ULL, + 0x000000007c8639bdULL, + 0xffffffffc729de05ULL, + 0xffffffffb4fe9370ULL, + 0xffffffff83b06911ULL, + 0xfffffffff0672464ULL, + 0xffffffffe5ac4956ULL, /* 40 */ + 0xffffffff967b0423ULL, + 0xffffffffcb1e8d85ULL, + 0xffffffffb8c9c0f0ULL, + 0x0000000003662748ULL, + 0x0000000070b16a3dULL, + 0x0000000047ff905cULL, + 0x000000003428dd29ULL, + 0xffffffffb89d59a6ULL, /* 48 */ + 0xffffffffcb4a14d3ULL, + 0xffffffff962f9d75ULL, + 0xffffffffe5f8d000ULL, + 0x000000005e5737b8ULL, + 0x000000002d807acdULL, + 0x000000001ace80acULL, + 0x000000006919cdd9ULL, + 0x000000007cd2a0ebULL, /* 56 */ + 0x000000000f05ed9eULL, + 0x0000000052606438ULL, + 0x0000000021b7294dULL, + 0xffffffff9a18cef5ULL, + 0xffffffffe9cf8380ULL, + 0xffffffffde8179e1ULL, + 0xffffffffad563494ULL, + 0x000000003a358bb3ULL, /* 64 */ + 0xffffffff975446ebULL, + 0x0000000041d37ad6ULL, + 0x000000004be84fe1ULL, + 0xffffffff9671b1b3ULL, + 0x000000003b107cebULL, + 0xffffffffed9740d6ULL, + 0xffffffffe7ac75e1ULL, + 0xffffffffa1489696ULL, /* 72 */ + 0x000000000c295bceULL, + 0xffffffffdaae67f3ULL, + 0xffffffffd09552c4ULL, + 0x0000000042bd7071ULL, + 0xffffffffefdcbd29ULL, + 0x00000000395b8114ULL, + 0x000000003360b423ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CD(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CD(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32ch.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32ch.c new file mode 100644 index 00000000000..0e7b67732e0 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32ch.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32CH + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0x82F63B78"; + char *instruction_name = "CRC32CH"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x000000000000ffffULL, /* 0 */ + 0x000000000e9e77d2ULL, + 0xfffffffff92eaa4bULL, + 0xfffffffff7b02266ULL, + 0x00000000571acc93ULL, + 0x00000000598444beULL, + 0xfffffffff1e6ca77ULL, + 0xffffffffff78425aULL, + 0x000000000e9e882dULL, /* 8 */ + 0x0000000000000000ULL, + 0xfffffffff7b0dd99ULL, + 0xfffffffff92e55b4ULL, + 0x000000005984bb41ULL, + 0x00000000571a336cULL, + 0xffffffffff78bda5ULL, + 0xfffffffff1e63588ULL, + 0xfffffffff92eff1eULL, /* 16 */ + 0xfffffffff7b07733ULL, + 0x000000000000aaaaULL, + 0x000000000e9e2287ULL, + 0xffffffffae34cc72ULL, + 0xffffffffa0aa445fULL, + 0x0000000008c8ca96ULL, + 0x00000000065642bbULL, + 0xfffffffff7b088ccULL, /* 24 */ + 0xfffffffff92e00e1ULL, + 0x000000000e9edd78ULL, + 0x0000000000005555ULL, + 0xffffffffa0aabba0ULL, + 0xffffffffae34338dULL, + 0x000000000656bd44ULL, + 0x0000000008c83569ULL, + 0x00000000571affa0ULL, /* 32 */ + 0x000000005984778dULL, + 0xffffffffae34aa14ULL, + 0xffffffffa0aa2239ULL, + 0x000000000000ccccULL, + 0x000000000e9e44e1ULL, + 0xffffffffa6fcca28ULL, + 0xffffffffa8624205ULL, + 0x0000000059848872ULL, /* 40 */ + 0x00000000571a005fULL, + 0xffffffffa0aaddc6ULL, + 0xffffffffae3455ebULL, + 0x000000000e9ebb1eULL, + 0x0000000000003333ULL, + 0xffffffffa862bdfaULL, + 0xffffffffa6fc35d7ULL, + 0xfffffffff1e6bbb0ULL, /* 48 */ + 0xffffffffff78339dULL, + 0x0000000008c8ee04ULL, + 0x0000000006566629ULL, + 0xffffffffa6fc88dcULL, + 0xffffffffa86200f1ULL, + 0x0000000000008e38ULL, + 0x000000000e9e0615ULL, + 0xffffffffff78cc62ULL, /* 56 */ + 0xfffffffff1e6444fULL, + 0x00000000065699d6ULL, + 0x0000000008c811fbULL, + 0xffffffffa862ff0eULL, + 0xffffffffa6fc7723ULL, + 0x000000000e9ef9eaULL, + 0x00000000000071c7ULL, + 0x0000000000002862ULL, /* 64 */ + 0x000000001190c4cfULL, + 0x000000007b7fdbbeULL, + 0xffffffff9204da99ULL, + 0x000000001190a13eULL, + 0x0000000000004d93ULL, + 0x000000006aef52e2ULL, + 0xffffffff839453c5ULL, + 0x000000007b7f4a13ULL, /* 72 */ + 0x000000006aefa6beULL, + 0x000000000000b9cfULL, + 0xffffffffe97bb8e8ULL, + 0xffffffff9204accaULL, + 0xffffffff83944067ULL, + 0xffffffffe97b5f16ULL, + 0x0000000000005e31ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CH(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CH(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cw.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cw.c new file mode 100644 index 00000000000..f7110b3a0a4 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32cw.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32CW + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0x82F63B78"; + char *instruction_name = "CRC32CW"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x0000000000000000ULL, /* 0 */ + 0xffffffffb798b438ULL, + 0xffffffff91d3be47ULL, + 0x00000000264b0a7fULL, + 0x0000000070b16a3dULL, + 0xffffffffc729de05ULL, + 0x0000000063c5950aULL, + 0xffffffffd45d2132ULL, + 0xffffffffb798b438ULL, /* 8 */ + 0x0000000000000000ULL, + 0x00000000264b0a7fULL, + 0xffffffff91d3be47ULL, + 0xffffffffc729de05ULL, + 0x0000000070b16a3dULL, + 0xffffffffd45d2132ULL, + 0x0000000063c5950aULL, + 0xffffffff91d3be47ULL, /* 16 */ + 0x00000000264b0a7fULL, + 0x0000000000000000ULL, + 0xffffffffb798b438ULL, + 0xffffffffe162d47aULL, + 0x0000000056fa6042ULL, + 0xfffffffff2162b4dULL, + 0x00000000458e9f75ULL, + 0x00000000264b0a7fULL, /* 24 */ + 0xffffffff91d3be47ULL, + 0xffffffffb798b438ULL, + 0x0000000000000000ULL, + 0x0000000056fa6042ULL, + 0xffffffffe162d47aULL, + 0x00000000458e9f75ULL, + 0xfffffffff2162b4dULL, + 0x0000000070b16a3dULL, /* 32 */ + 0xffffffffc729de05ULL, + 0xffffffffe162d47aULL, + 0x0000000056fa6042ULL, + 0x0000000000000000ULL, + 0xffffffffb798b438ULL, + 0x000000001374ff37ULL, + 0xffffffffa4ec4b0fULL, + 0xffffffffc729de05ULL, /* 40 */ + 0x0000000070b16a3dULL, + 0x0000000056fa6042ULL, + 0xffffffffe162d47aULL, + 0xffffffffb798b438ULL, + 0x0000000000000000ULL, + 0xffffffffa4ec4b0fULL, + 0x000000001374ff37ULL, + 0x0000000063c5950aULL, /* 48 */ + 0xffffffffd45d2132ULL, + 0xfffffffff2162b4dULL, + 0x00000000458e9f75ULL, + 0x000000001374ff37ULL, + 0xffffffffa4ec4b0fULL, + 0x0000000000000000ULL, + 0xffffffffb798b438ULL, + 0xffffffffd45d2132ULL, /* 56 */ + 0x0000000063c5950aULL, + 0x00000000458e9f75ULL, + 0xfffffffff2162b4dULL, + 0xffffffffa4ec4b0fULL, + 0x000000001374ff37ULL, + 0xffffffffb798b438ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, /* 64 */ + 0xffffffffea0755b2ULL, + 0x0000000008b188e6ULL, + 0xffffffffff3cc8d9ULL, + 0xffffffffea0755b2ULL, + 0x0000000000000000ULL, + 0xffffffffe2b6dd54ULL, + 0x00000000153b9d6bULL, + 0x0000000008b188e6ULL, /* 72 */ + 0xffffffffe2b6dd54ULL, + 0x0000000000000000ULL, + 0xfffffffff78d403fULL, + 0xffffffffff3cc8d9ULL, + 0x00000000153b9d6bULL, + 0xfffffffff78d403fULL, + 0x0000000000000000ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CW(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32CW(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32d.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32d.c new file mode 100644 index 00000000000..e391be803f7 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32d.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32D + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0xEDB88320"; + char *instruction_name = "CRC32D"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0xffffffffdebb20e3ULL, /* 0 */ + 0x0000000044660075ULL, + 0x000000001e20c2aeULL, + 0xffffffff84fde238ULL, + 0x00000000281d7ce7ULL, + 0xffffffffb2c05c71ULL, + 0xffffffffd660a024ULL, + 0x000000004cbd80b2ULL, + 0xffffffff9add2096ULL, /* 8 */ + 0x0000000000000000ULL, + 0x000000005a46c2dbULL, + 0xffffffffc09be24dULL, + 0x000000006c7b7c92ULL, + 0xfffffffff6a65c04ULL, + 0xffffffff9206a051ULL, + 0x0000000008db80c7ULL, + 0x000000005449dd0fULL, /* 16 */ + 0xffffffffce94fd99ULL, + 0xffffffff94d23f42ULL, + 0x000000000e0f1fd4ULL, + 0xffffffffa2ef810bULL, + 0x000000003832a19dULL, + 0x000000005c925dc8ULL, + 0xffffffffc64f7d5eULL, + 0x00000000102fdd7aULL, /* 24 */ + 0xffffffff8af2fdecULL, + 0xffffffffd0b43f37ULL, + 0x000000004a691fa1ULL, + 0xffffffffe689817eULL, + 0x000000007c54a1e8ULL, + 0x0000000018f45dbdULL, + 0xffffffff82297d2bULL, + 0xffffffffa7157447ULL, /* 32 */ + 0x000000003dc854d1ULL, + 0x00000000678e960aULL, + 0xfffffffffd53b69cULL, + 0x0000000051b32843ULL, + 0xffffffffcb6e08d5ULL, + 0xffffffffafcef480ULL, + 0x000000003513d416ULL, + 0xffffffffe3737432ULL, /* 40 */ + 0x0000000079ae54a4ULL, + 0x0000000023e8967fULL, + 0xffffffffb935b6e9ULL, + 0x0000000015d52836ULL, + 0xffffffff8f0808a0ULL, + 0xffffffffeba8f4f5ULL, + 0x000000007175d463ULL, + 0x000000007a6adc3eULL, /* 48 */ + 0xffffffffe0b7fca8ULL, + 0xffffffffbaf13e73ULL, + 0x00000000202c1ee5ULL, + 0xffffffff8ccc803aULL, + 0x000000001611a0acULL, + 0x0000000072b15cf9ULL, + 0xffffffffe86c7c6fULL, + 0x000000003e0cdc4bULL, /* 56 */ + 0xffffffffa4d1fcddULL, + 0xfffffffffe973e06ULL, + 0x00000000644a1e90ULL, + 0xffffffffc8aa804fULL, + 0x000000005277a0d9ULL, + 0x0000000036d75c8cULL, + 0xffffffffac0a7c1aULL, + 0xffffffffed857593ULL, /* 64 */ + 0xffffffffe0b6f95fULL, + 0x00000000253b462cULL, + 0xffffffffe15579b9ULL, + 0x0000000074897c83ULL, + 0x0000000079baf04fULL, + 0xffffffffbc374f3cULL, + 0x00000000785970a9ULL, + 0xffffffffa6bae0a9ULL, /* 72 */ + 0xffffffffab896c65ULL, + 0x000000006e04d316ULL, + 0xffffffffaa6aec83ULL, + 0x000000005ae171feULL, + 0x0000000057d2fd32ULL, + 0xffffffff925f4241ULL, + 0x0000000056317dd4ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32D(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32D(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32h.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32h.c new file mode 100644 index 00000000000..100f02c7dda --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32h.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32H + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0xEDB88320"; + char *instruction_name = "CRC32H"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x000000000000ffffULL, /* 0 */ + 0xffffffffbe2612ffULL, + 0xffffffffdccda6c0ULL, + 0x0000000062eb4bc0ULL, + 0x000000004bbbc8eaULL, + 0xfffffffff59d25eaULL, + 0x0000000022259ac0ULL, + 0xffffffff9c0377c0ULL, + 0xffffffffbe26ed00ULL, /* 8 */ + 0x0000000000000000ULL, + 0x0000000062ebb43fULL, + 0xffffffffdccd593fULL, + 0xfffffffff59dda15ULL, + 0x000000004bbb3715ULL, + 0xffffffff9c03883fULL, + 0x000000002225653fULL, + 0xffffffffdccdf395ULL, /* 16 */ + 0x0000000062eb1e95ULL, + 0x000000000000aaaaULL, + 0xffffffffbe2647aaULL, + 0xffffffff9776c480ULL, + 0x0000000029502980ULL, + 0xfffffffffee896aaULL, + 0x0000000040ce7baaULL, + 0x0000000062ebe16aULL, /* 24 */ + 0xffffffffdccd0c6aULL, + 0xffffffffbe26b855ULL, + 0x0000000000005555ULL, + 0x000000002950d67fULL, + 0xffffffff97763b7fULL, + 0x0000000040ce8455ULL, + 0xfffffffffee86955ULL, + 0x000000004bbbfbd9ULL, /* 32 */ + 0xfffffffff59d16d9ULL, + 0xffffffff9776a2e6ULL, + 0x0000000029504fe6ULL, + 0x000000000000ccccULL, + 0xffffffffbe2621ccULL, + 0x00000000699e9ee6ULL, + 0xffffffffd7b873e6ULL, + 0xfffffffff59de926ULL, /* 40 */ + 0x000000004bbb0426ULL, + 0x000000002950b019ULL, + 0xffffffff97765d19ULL, + 0xffffffffbe26de33ULL, + 0x0000000000003333ULL, + 0xffffffffd7b88c19ULL, + 0x00000000699e6119ULL, + 0x000000002225eb07ULL, /* 48 */ + 0xffffffff9c030607ULL, + 0xfffffffffee8b238ULL, + 0x0000000040ce5f38ULL, + 0x00000000699edc12ULL, + 0xffffffffd7b83112ULL, + 0x0000000000008e38ULL, + 0xffffffffbe266338ULL, + 0xffffffff9c03f9f8ULL, /* 56 */ + 0x00000000222514f8ULL, + 0x0000000040cea0c7ULL, + 0xfffffffffee84dc7ULL, + 0xffffffffd7b8ceedULL, + 0x00000000699e23edULL, + 0xffffffffbe269cc7ULL, + 0x00000000000071c7ULL, + 0x0000000000002862ULL, /* 64 */ + 0x0000000026a17af6ULL, + 0xffffffffaa919152ULL, + 0xffffffffcb865590ULL, + 0x0000000026a11f07ULL, + 0x0000000000004d93ULL, + 0xffffffff8c30a637ULL, + 0xffffffffed2762f5ULL, + 0xffffffffaa9100ffULL, /* 72 */ + 0xffffffff8c30526bULL, + 0x000000000000b9cfULL, + 0x0000000061177d0dULL, + 0xffffffffcb8623c3ULL, + 0xffffffffed277157ULL, + 0x0000000061179af3ULL, + 0x0000000000005e31ULL + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32H(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32H(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32w.c b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32w.c new file mode 100644 index 00000000000..b4f5f4bbee2 --- /dev/null +++ b/tests/tcg/mips/user/isa/mips64r6/crc/test_mips64r6_crc32w.c @@ -0,0 +1,142 @@ +/* + * Test program for MIPS64R6 instruction CRC32W + * + * Copyright (C) 2019 Wave Computing, Inc. + * Copyright (C) 2019 Aleksandar Markovic + * Copyright (C) 2025 Aleksandar Rakic + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include + +#include "../../../../include/wrappers_mips64r6.h" +#include "../../../../include/test_inputs_64.h" +#include "../../../../include/test_utils_64.h" + +#define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) + +int32_t main(void) +{ + char *isa_ase_name = "mips64r6"; + char *group_name = "CRC with reversed polynomial 0xEDB88320"; + char *instruction_name = "CRC32W"; + int32_t ret; + uint32_t i, j; + struct timeval start, end; + double elapsed_time; + + uint64_t b64_result[TEST_COUNT_TOTAL]; + uint64_t b64_expect[TEST_COUNT_TOTAL] = { + 0x0000000000000000ULL, /* 0 */ + 0xffffffffdebb20e3ULL, + 0x000000004a691fa1ULL, + 0xffffffff94d23f42ULL, + 0xffffffff8f0808a0ULL, + 0x0000000051b32843ULL, + 0x0000000065069dceULL, + 0xffffffffbbbdbd2dULL, + 0xffffffffdebb20e3ULL, /* 8 */ + 0x0000000000000000ULL, + 0xffffffff94d23f42ULL, + 0x000000004a691fa1ULL, + 0x0000000051b32843ULL, + 0xffffffff8f0808a0ULL, + 0xffffffffbbbdbd2dULL, + 0x0000000065069dceULL, + 0x000000004a691fa1ULL, /* 16 */ + 0xffffffff94d23f42ULL, + 0x0000000000000000ULL, + 0xffffffffdebb20e3ULL, + 0xffffffffc5611701ULL, + 0x000000001bda37e2ULL, + 0x000000002f6f826fULL, + 0xfffffffff1d4a28cULL, + 0xffffffff94d23f42ULL, /* 24 */ + 0x000000004a691fa1ULL, + 0xffffffffdebb20e3ULL, + 0x0000000000000000ULL, + 0x000000001bda37e2ULL, + 0xffffffffc5611701ULL, + 0xfffffffff1d4a28cULL, + 0x000000002f6f826fULL, + 0xffffffff8f0808a0ULL, /* 32 */ + 0x0000000051b32843ULL, + 0xffffffffc5611701ULL, + 0x000000001bda37e2ULL, + 0x0000000000000000ULL, + 0xffffffffdebb20e3ULL, + 0xffffffffea0e956eULL, + 0x0000000034b5b58dULL, + 0x0000000051b32843ULL, /* 40 */ + 0xffffffff8f0808a0ULL, + 0x000000001bda37e2ULL, + 0xffffffffc5611701ULL, + 0xffffffffdebb20e3ULL, + 0x0000000000000000ULL, + 0x0000000034b5b58dULL, + 0xffffffffea0e956eULL, + 0x0000000065069dceULL, /* 48 */ + 0xffffffffbbbdbd2dULL, + 0x000000002f6f826fULL, + 0xfffffffff1d4a28cULL, + 0xffffffffea0e956eULL, + 0x0000000034b5b58dULL, + 0x0000000000000000ULL, + 0xffffffffdebb20e3ULL, + 0xffffffffbbbdbd2dULL, /* 56 */ + 0x0000000065069dceULL, + 0xfffffffff1d4a28cULL, + 0x000000002f6f826fULL, + 0x0000000034b5b58dULL, + 0xffffffffea0e956eULL, + 0xffffffffdebb20e3ULL, + 0x0000000000000000ULL, + 0x0000000000000000ULL, /* 64 */ + 0xffffffff90485967ULL, + 0x000000006dfb974aULL, + 0x00000000083e4538ULL, + 0xffffffff90485967ULL, + 0x0000000000000000ULL, + 0xfffffffffdb3ce2dULL, + 0xffffffff98761c5fULL, + 0x000000006dfb974aULL, /* 72 */ + 0xfffffffffdb3ce2dULL, + 0x0000000000000000ULL, + 0x0000000065c5d272ULL, + 0x00000000083e4538ULL, + 0xffffffff98761c5fULL, + 0x0000000065c5d272ULL, + 0x0000000000000000ULL, + }; + + gettimeofday(&start, NULL); + + for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32W(b64_pattern + i, b64_pattern + j, + b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { + for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { + do_mips64r6_CRC32W(b64_random + i, b64_random + j, + b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * + (PATTERN_INPUTS_64_SHORT_COUNT)) + + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); + } + } + + gettimeofday(&end, NULL); + + elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; + elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; + + ret = check_results_64(isa_ase_name, group_name, instruction_name, + TEST_COUNT_TOTAL, elapsed_time, b64_result, + b64_expect); + + return ret; +} diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index 5e3391ec9d2..8dc65d7a064 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -29,6 +29,7 @@ run-float_%: float_% $(call run-test,$<, $(QEMU) $(QEMU_OPTS) $<) $(call conditional-diff-out,$<,$(SRC_PATH)/tests/tcg/$(TARGET_NAME)/$<.ref) +fnmsub: LDFLAGS+=-lm testthread: LDFLAGS+=-lpthread @@ -42,6 +43,17 @@ munmap-pthread: LDFLAGS+=-pthread vma-pthread: CFLAGS+=-pthread vma-pthread: LDFLAGS+=-pthread +sigreturn-sigmask: CFLAGS+=-pthread +sigreturn-sigmask: LDFLAGS+=-pthread + +# GCC versions 12/13/14/15 at least incorrectly complain about +# "'SHA1Transform' reading 64 bytes from a region of size 0"; see the gcc bug +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106709 +# Since this is just a standard piece of library code we've borrowed for a +# TCG test case, suppress the warning rather than trying to modify the +# code to work around the compiler. +sha1: CFLAGS+=-Wno-stringop-overread -Wno-unknown-warning-option + # The vma-pthread seems very sensitive on gitlab and we currently # don't know if its exposing a real bug or the test is flaky. ifneq ($(GITLAB_CI),) @@ -127,6 +139,13 @@ run-gdbstub-follow-fork-mode-parent: follow-fork-mode --bin $< --test $(MULTIARCH_SRC)/gdbstub/follow-fork-mode-parent.py, \ following parents on fork) +run-gdbstub-late-attach: late-attach + $(call run-test, $@, env LATE_ATTACH_PY=1 $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" --no-suspend \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/late-attach.py, \ + attaching to a running process) + else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb with $(patsubst -%,,$(TARGET_NAME)) support") @@ -136,7 +155,7 @@ EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \ run-gdbstub-registers run-gdbstub-prot-none \ run-gdbstub-catch-syscalls run-gdbstub-follow-fork-mode-child \ run-gdbstub-follow-fork-mode-parent \ - run-gdbstub-qxfer-siginfo-read + run-gdbstub-qxfer-siginfo-read run-gdbstub-late-attach # ARM Compatible Semi Hosting Tests # @@ -170,5 +189,20 @@ run-plugin-semiconsole-with-%: TESTS += semihosting semiconsole endif +test-plugin-mem-access: CFLAGS+=-pthread -O0 +test-plugin-mem-access: LDFLAGS+=-pthread -O0 + +ifeq ($(CONFIG_PLUGIN),y) +# Test plugin memory access instrumentation +run-plugin-test-plugin-mem-access-with-libmem.so: \ + PLUGIN_ARGS=$(COMMA)print-accesses=true +run-plugin-test-plugin-mem-access-with-libmem.so: \ + CHECK_PLUGIN_OUTPUT_COMMAND= \ + $(SRC_PATH)/tests/tcg/multiarch/check-plugin-output.sh \ + $(QEMU) $< + +EXTRA_RUNS_WITH_PLUGIN += run-plugin-test-plugin-mem-access-with-libmem.so +endif + # Update TESTS TESTS += $(MULTIARCH_TESTS) diff --git a/tests/tcg/multiarch/check-plugin-output.sh b/tests/tcg/multiarch/check-plugin-output.sh new file mode 100755 index 00000000000..80607f04b5d --- /dev/null +++ b/tests/tcg/multiarch/check-plugin-output.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# This script runs a given executable using qemu, and compare its standard +# output with an expected plugin output. +# Each line of output is searched (as a regexp) in the expected plugin output. + +set -euo pipefail + +die() +{ + echo "$@" 1>&2 + exit 1 +} + +check() +{ + file=$1 + pattern=$2 + grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file" +} + +[ $# -eq 3 ] || die "usage: qemu_bin exe plugin_out_file" + +qemu_bin=$1; shift +exe=$1;shift +plugin_out=$1; shift + +expected() +{ + $qemu_bin $exe || + die "running $exe failed" +} + +expected | while read line; do + check "$plugin_out" "$line" +done diff --git a/tests/tcg/multiarch/fnmsub.c b/tests/tcg/multiarch/fnmsub.c new file mode 100644 index 00000000000..15dd41d3bd2 --- /dev/null +++ b/tests/tcg/multiarch/fnmsub.c @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include +#include +#include + +union U { + double d; + unsigned long long l; +}; + +union U x = { .l = 0x4ff0000000000000ULL }; +union U y = { .l = 0x2ff0000000000000ULL }; +union U r; + +int main() +{ +#ifdef FE_DOWNWARD + fesetround(FE_DOWNWARD); + +#if defined(__loongarch__) + asm("fnmsub.d %0, %1, %1, %2" : "=f"(r.d) : "f"(x.d), "f"(y.d)); +#elif defined(__powerpc64__) + asm("fnmsub %0,%1,%1,%2" : "=f"(r.d) : "f"(x.d), "f"(y.d)); +#elif defined(__s390x__) && 0 /* need -march=z14 */ + asm("vfnms %0,%1,%1,%2,0,3" : "=f"(r.d) : "f"(x.d), "f"(y.d)); +#else + r.d = -fma(x.d, x.d, -y.d); +#endif + + if (r.l != 0xdfefffffffffffffULL) { + printf("r = %.18a (%016llx)\n", r.d, r.l); + return 1; + } +#endif + return 0; +} diff --git a/tests/tcg/multiarch/gdbstub/interrupt.py b/tests/tcg/multiarch/gdbstub/interrupt.py index 90a45b5140a..2d5654d1540 100644 --- a/tests/tcg/multiarch/gdbstub/interrupt.py +++ b/tests/tcg/multiarch/gdbstub/interrupt.py @@ -8,7 +8,7 @@ # import gdb -from test_gdbstub import main, report +from test_gdbstub import gdb_exit, main, report def check_interrupt(thread): @@ -49,7 +49,7 @@ def run_test(): """ if len(gdb.selected_inferior().threads()) == 1: print("SKIP: set to run on a single thread") - exit(0) + gdb_exit(0) gdb.execute("set scheduler-locking on") for thread in gdb.selected_inferior().threads(): diff --git a/tests/tcg/multiarch/gdbstub/late-attach.py b/tests/tcg/multiarch/gdbstub/late-attach.py new file mode 100644 index 00000000000..1d40efb5ec8 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/late-attach.py @@ -0,0 +1,28 @@ +"""Test attaching GDB to a running process. + +SPDX-License-Identifier: GPL-2.0-or-later +""" +from test_gdbstub import main, report + + +def run_test(): + """Run through the tests one by one""" + try: + phase = gdb.parse_and_eval("phase").string() + except gdb.error: + # Assume the guest did not reach main(). + phase = "start" + + if phase == "start": + gdb.execute("break sigwait") + gdb.execute("continue") + phase = gdb.parse_and_eval("phase").string() + report(phase == "sigwait", "{} == \"sigwait\"".format(phase)) + + gdb.execute("signal SIGUSR1") + + exitcode = int(gdb.parse_and_eval("$_exitcode")) + report(exitcode == 0, "{} == 0".format(exitcode)) + + +main(run_test) diff --git a/tests/tcg/multiarch/gdbstub/prot-none.py b/tests/tcg/multiarch/gdbstub/prot-none.py index 7e264589cb8..51082a30e40 100644 --- a/tests/tcg/multiarch/gdbstub/prot-none.py +++ b/tests/tcg/multiarch/gdbstub/prot-none.py @@ -5,7 +5,7 @@ SPDX-License-Identifier: GPL-2.0-or-later """ import ctypes -from test_gdbstub import main, report +from test_gdbstub import gdb_exit, main, report def probe_proc_self_mem(): @@ -22,7 +22,7 @@ def run_test(): """Run through the tests one by one""" if not probe_proc_self_mem(): print("SKIP: /proc/self/mem is not usable") - exit(0) + gdb_exit(0) gdb.Breakpoint("break_here") gdb.execute("continue") val = gdb.parse_and_eval("*(char[2] *)q").string() diff --git a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py index 564613fabf0..6eb6ebf7b17 100644 --- a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py +++ b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py @@ -3,22 +3,17 @@ This runs as a sourced script (via -x, via run-test.py).""" from __future__ import print_function import gdb -from test_gdbstub import main, report +from test_gdbstub import gdb_exit, main, report def run_test(): """Run through the tests one by one""" - try: - mappings = gdb.execute("info proc mappings", False, True) - except gdb.error as exc: - exc_str = str(exc) - if "Not supported on this target." in exc_str: - # Detect failures due to an outstanding issue with how GDB handles - # the x86_64 QEMU's target.xml, which does not contain the - # definition of orig_rax. Skip the test in this case. - print("SKIP: {}".format(exc_str)) - return - raise + if gdb.selected_inferior().architecture().name() == "m68k": + # m68k GDB supports only GDB_OSABI_SVR4, but GDB_OSABI_LINUX is + # required for the info proc support (see set_gdbarch_info_proc()). + print("SKIP: m68k GDB does not support GDB_OSABI_LINUX") + gdb_exit(0) + mappings = gdb.execute("info proc mappings", False, True) report(isinstance(mappings, str), "Fetched the mappings from the inferior") # Broken with host page size > guest page size # report("/sha1" in mappings, "Found the test binary name in the mappings") diff --git a/tests/tcg/multiarch/late-attach.c b/tests/tcg/multiarch/late-attach.c new file mode 100644 index 00000000000..20a364034b5 --- /dev/null +++ b/tests/tcg/multiarch/late-attach.c @@ -0,0 +1,41 @@ +/* + * Test attaching GDB to a running process. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include + +static const char *phase = "start"; + +int main(void) +{ + sigset_t set; + int sig; + + assert(sigfillset(&set) == 0); + assert(sigprocmask(SIG_BLOCK, &set, NULL) == 0); + + /* Let GDB know it can send SIGUSR1. */ + phase = "sigwait"; + if (getenv("LATE_ATTACH_PY")) { + assert(sigwait(&set, &sig) == 0); + if (sig != SIGUSR1) { + fprintf(stderr, "Unexpected signal %d\n", sig); + return EXIT_FAILURE; + } + } + + /* Check that the guest does not see host_interrupt_signal. */ + assert(sigpending(&set) == 0); + for (sig = 1; sig < NSIG; sig++) { + if (sigismember(&set, sig)) { + fprintf(stderr, "Unexpected signal %d\n", sig); + return EXIT_FAILURE; + } + } + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/linux/linux-sigrtminmax.c b/tests/tcg/multiarch/linux/linux-sigrtminmax.c new file mode 100644 index 00000000000..a7059aacd9c --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-sigrtminmax.c @@ -0,0 +1,74 @@ +/* + * Test the lowest and the highest real-time signals. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include +#include +#include + +/* For hexagon and microblaze. */ +#ifndef __SIGRTMIN +#define __SIGRTMIN 32 +#endif + +extern char **environ; + +static bool seen_sigrtmin, seen_sigrtmax; + +static void handle_signal(int sig) +{ + if (sig == SIGRTMIN) { + seen_sigrtmin = true; + } else if (sig == SIGRTMAX) { + seen_sigrtmax = true; + } else { + _exit(1); + } +} + +int main(int argc, char **argv) +{ + char *qemu = getenv("QEMU"); + struct sigaction act; + + assert(qemu); + + if (!getenv("QEMU_RTSIG_MAP")) { + char **new_argv = malloc((argc + 2) + sizeof(char *)); + int tsig1, hsig1, count1, tsig2, hsig2, count2; + char rt_sigmap[64]; + + /* Re-exec with a mapping that includes SIGRTMIN and SIGRTMAX. */ + new_argv[0] = qemu; + memcpy(&new_argv[1], argv, (argc + 1) * sizeof(char *)); + tsig1 = __SIGRTMIN; + /* The host must have a few signals starting from this one. */ + hsig1 = 36; + count1 = SIGRTMIN - __SIGRTMIN + 1; + tsig2 = SIGRTMAX; + hsig2 = hsig1 + count1; + count2 = 1; + snprintf(rt_sigmap, sizeof(rt_sigmap), "%d %d %d,%d %d %d", + tsig1, hsig1, count1, tsig2, hsig2, count2); + setenv("QEMU_RTSIG_MAP", rt_sigmap, 0); + assert(execve(new_argv[0], new_argv, environ) == 0); + return EXIT_FAILURE; + } + + memset(&act, 0, sizeof(act)); + act.sa_handler = handle_signal; + assert(sigaction(SIGRTMIN, &act, NULL) == 0); + assert(sigaction(SIGRTMAX, &act, NULL) == 0); + + assert(kill(getpid(), SIGRTMIN) == 0); + assert(seen_sigrtmin); + assert(kill(getpid(), SIGRTMAX) == 0); + assert(seen_sigrtmax); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/test-vma.c b/tests/tcg/multiarch/linux/test-vma.c similarity index 100% rename from tests/tcg/multiarch/test-vma.c rename to tests/tcg/multiarch/linux/test-vma.c diff --git a/tests/tcg/multiarch/sigreturn-sigmask.c b/tests/tcg/multiarch/sigreturn-sigmask.c new file mode 100644 index 00000000000..e6cc904898d --- /dev/null +++ b/tests/tcg/multiarch/sigreturn-sigmask.c @@ -0,0 +1,51 @@ +/* + * Test that sigreturn() does not corrupt the signal mask. + * Block SIGUSR2 and handle SIGUSR1. + * Then sigwait() SIGUSR2, which relies on it remaining blocked. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include + +int seen_sig = -1; + +static void signal_func(int sig) +{ + seen_sig = sig; +} + +static void *thread_func(void *arg) +{ + kill(getpid(), SIGUSR2); + return NULL; +} + +int main(void) +{ + struct sigaction act = { + .sa_handler = signal_func, + }; + pthread_t thread; + sigset_t set; + int sig; + + assert(sigaction(SIGUSR1, &act, NULL) == 0); + + assert(sigemptyset(&set) == 0); + assert(sigaddset(&set, SIGUSR2) == 0); + assert(sigprocmask(SIG_BLOCK, &set, NULL) == 0); + + kill(getpid(), SIGUSR1); + assert(seen_sig == SIGUSR1); + + assert(pthread_create(&thread, NULL, thread_func, NULL) == 0); + assert(sigwait(&set, &sig) == 0); + assert(sig == SIGUSR2); + assert(pthread_join(thread, NULL) == 0); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target index 32dc0f98306..98c4eda5e00 100644 --- a/tests/tcg/multiarch/system/Makefile.softmmu-target +++ b/tests/tcg/multiarch/system/Makefile.softmmu-target @@ -6,6 +6,11 @@ # architecture to add to the test dependencies and deal with the # complications of building. # +# To support the multiarch guests the target arch needs to provide a +# boot.S that jumps to main and provides a __sys_outc functions. +# Remember to update MULTIARCH_SOFTMMU_TARGETS in the tcg test +# Makefile.target when this is done. +# MULTIARCH_SRC=$(SRC_PATH)/tests/tcg/multiarch MULTIARCH_SYSTEM_SRC=$(MULTIARCH_SRC)/system @@ -65,3 +70,12 @@ endif MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt \ run-gdbstub-untimely-packet run-gdbstub-registers + +ifeq ($(CONFIG_PLUGIN),y) +# Test plugin memory access instrumentation +run-plugin-memory-with-libmem.so: memory libmem.so +run-plugin-memory-with-libmem.so: PLUGIN_ARGS=$(COMMA)region-summary=true +run-plugin-memory-with-libmem.so: CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out + +EXTRA_RUNS_WITH_PLUGIN += run-plugin-memory-with-libmem.so +endif diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c index 6eb2eb16f7f..7508f6b916d 100644 --- a/tests/tcg/multiarch/system/memory.c +++ b/tests/tcg/multiarch/system/memory.c @@ -20,20 +20,28 @@ # error "Target does not specify CHECK_UNALIGNED" #endif +uint32_t test_read_count; +uint32_t test_write_count; + #define MEM_PAGE_SIZE 4096 /* nominal 4k "pages" */ #define TEST_SIZE (MEM_PAGE_SIZE * 4) /* 4 pages */ #define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0]))) -__attribute__((aligned(MEM_PAGE_SIZE))) +__attribute__((aligned(TEST_SIZE))) static uint8_t test_data[TEST_SIZE]; typedef void (*init_ufn) (int offset); typedef bool (*read_ufn) (int offset); typedef bool (*read_sfn) (int offset, bool nf); -static void pdot(int count) +static void pdot(int count, bool write) { + if (write) { + test_write_count++; + } else { + test_read_count++; + } if (count % 128 == 0) { ml_printf("."); } @@ -63,12 +71,14 @@ static void init_test_data_u8(int unused_offset) int i; (void)(unused_offset); - ml_printf("Filling test area with u8:"); + ml_printf("Filling test area with u8 (%p):", ptr); + for (i = 0; i < TEST_SIZE; i++) { *ptr++ = BYTE_NEXT(count); - pdot(i); + pdot(i, true); } - ml_printf("done\n"); + + ml_printf("done %d @ %p\n", i, ptr); } /* @@ -91,10 +101,11 @@ static void init_test_data_s8(bool neg_first) neg_first ? "neg first" : "pos first"); for (i = 0; i < TEST_SIZE / 2; i++) { *ptr++ = get_byte(i, neg_first); + pdot(i, true); *ptr++ = get_byte(i, !neg_first); - pdot(i); + pdot(i, true); } - ml_printf("done\n"); + ml_printf("done %d @ %p\n", i * 2, ptr); } /* @@ -105,9 +116,19 @@ static void reset_start_data(int offset) { uint32_t *ptr = (uint32_t *) &test_data[0]; int i; + + if (!offset) { + return; + } + + ml_printf("Flushing %d bytes from %p: ", offset, ptr); + for (i = 0; i < offset; i++) { *ptr++ = 0; + pdot(i, true); } + + ml_printf("done %d @ %p\n", i, ptr); } static void init_test_data_u16(int offset) @@ -117,17 +138,17 @@ static void init_test_data_u16(int offset) const int max = (TEST_SIZE - offset) / sizeof(word); int i; - ml_printf("Filling test area with u16 (offset %d, %p):", offset, ptr); - reset_start_data(offset); + ml_printf("Filling test area with u16 (offset %d, %p):", offset, ptr); + for (i = 0; i < max; i++) { uint16_t low = BYTE_NEXT(count), high = BYTE_NEXT(count); word = BYTE_SHIFT(high, 1) | BYTE_SHIFT(low, 0); *ptr++ = word; - pdot(i); + pdot(i, true); } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); } static void init_test_data_u32(int offset) @@ -137,21 +158,22 @@ static void init_test_data_u32(int offset) const int max = (TEST_SIZE - offset) / sizeof(word); int i; - ml_printf("Filling test area with u32 (offset %d, %p):", offset, ptr); - reset_start_data(offset); + ml_printf("Filling test area with u32 (offset %d, %p):", offset, ptr); + for (i = 0; i < max; i++) { uint32_t b4 = BYTE_NEXT(count), b3 = BYTE_NEXT(count); uint32_t b2 = BYTE_NEXT(count), b1 = BYTE_NEXT(count); word = BYTE_SHIFT(b1, 3) | BYTE_SHIFT(b2, 2) | BYTE_SHIFT(b3, 1) | BYTE_SHIFT(b4, 0); *ptr++ = word; - pdot(i); + pdot(i, true); } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); } +#if __SIZEOF_POINTER__ >= 8 static void init_test_data_u64(int offset) { uint8_t count = 0; @@ -159,10 +181,10 @@ static void init_test_data_u64(int offset) const int max = (TEST_SIZE - offset) / sizeof(word); int i; - ml_printf("Filling test area with u64 (offset %d, %p):", offset, ptr); - reset_start_data(offset); + ml_printf("Filling test area with u64 (offset %d, %p):", offset, ptr); + for (i = 0; i < max; i++) { uint64_t b8 = BYTE_NEXT(count), b7 = BYTE_NEXT(count); uint64_t b6 = BYTE_NEXT(count), b5 = BYTE_NEXT(count); @@ -172,10 +194,11 @@ static void init_test_data_u64(int offset) BYTE_SHIFT(b4, 4) | BYTE_SHIFT(b5, 3) | BYTE_SHIFT(b6, 2) | BYTE_SHIFT(b7, 1) | BYTE_SHIFT(b8, 0); *ptr++ = word; - pdot(i); + pdot(i, true); } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); } +#endif static bool read_test_data_u16(int offset) { @@ -194,11 +217,11 @@ static bool read_test_data_u16(int offset) ml_printf("Error %d < %d\n", high, low); return false; } else { - pdot(i); + pdot(i, false); } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); return true; } @@ -236,13 +259,14 @@ static bool read_test_data_u32(int offset) ml_printf("Error %d, %d, %d, %d", b1, b2, b3, b4); return false; } else { - pdot(i); + pdot(i, false); } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); return true; } +#if __SIZEOF_POINTER__ >= 8 static bool read_test_data_u64(int offset) { uint64_t word, *ptr = (uint64_t *)&test_data[offset]; @@ -290,17 +314,22 @@ static bool read_test_data_u64(int offset) b1, b2, b3, b4, b5, b6, b7, b8); return false; } else { - pdot(i); + pdot(i, false); } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); return true; } +#endif /* Read the test data and verify at various offsets */ -read_ufn read_ufns[] = { read_test_data_u16, - read_test_data_u32, - read_test_data_u64 }; +read_ufn read_ufns[] = { + read_test_data_u16, + read_test_data_u32, +#if __SIZEOF_POINTER__ >= 8 + read_test_data_u64 +#endif +}; bool do_unsigned_reads(int start_off) { @@ -357,15 +386,17 @@ static bool read_test_data_s8(int offset, bool neg_first) second = *ptr++; if (neg_first && first < 0 && second > 0) { - pdot(i); + pdot(i, false); + pdot(i, false); } else if (!neg_first && first > 0 && second < 0) { - pdot(i); + pdot(i, false); + pdot(i, false); } else { ml_printf("Error %d %c %d\n", first, neg_first ? '<' : '>', second); return false; } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i * 2, ptr); return true; } @@ -390,15 +421,15 @@ static bool read_test_data_s16(int offset, bool neg_first) int32_t data = *ptr++; if (neg_first && data < 0) { - pdot(i); + pdot(i, false); } else if (!neg_first && data > 0) { - pdot(i); + pdot(i, false); } else { ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>'); return false; } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); return true; } @@ -423,15 +454,15 @@ static bool read_test_data_s32(int offset, bool neg_first) int64_t data = *ptr++; if (neg_first && data < 0) { - pdot(i); + pdot(i, false); } else if (!neg_first && data > 0) { - pdot(i); + pdot(i, false); } else { ml_printf("Error %d %c 0\n", data, neg_first ? '<' : '>'); return false; } } - ml_printf("done @ %p\n", ptr); + ml_printf("done %d @ %p\n", i, ptr); return true; } @@ -465,16 +496,23 @@ bool do_signed_reads(bool neg_first) return ok; } -init_ufn init_ufns[] = { init_test_data_u8, - init_test_data_u16, - init_test_data_u32, - init_test_data_u64 }; +init_ufn init_ufns[] = { + init_test_data_u8, + init_test_data_u16, + init_test_data_u32, +#if __SIZEOF_POINTER__ >= 8 + init_test_data_u64 +#endif +}; int main(void) { int i; bool ok = true; + ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]); + ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]); + /* Run through the unsigned tests first */ for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) { ok = do_unsigned_test(init_ufns[i]); @@ -490,6 +528,8 @@ int main(void) ok = do_signed_reads(true); } + ml_printf("Test data read: %lu\n", (unsigned long)test_read_count); + ml_printf("Test data write: %lu\n", (unsigned long)test_write_count); ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED"); return ok ? 0 : -1; } diff --git a/tests/tcg/multiarch/system/validate-memory-counts.py b/tests/tcg/multiarch/system/validate-memory-counts.py new file mode 100755 index 00000000000..5b8bbf3ef37 --- /dev/null +++ b/tests/tcg/multiarch/system/validate-memory-counts.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 +# +# validate-memory-counts.py: check we instrumented memory properly +# +# This program takes two inputs: +# - the mem plugin output +# - the memory binary output +# +# Copyright (C) 2024 Linaro Ltd +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +from argparse import ArgumentParser + +def extract_counts(path): + """ + Load the output from path and extract the lines containing: + + Test data start: 0x40214000 + Test data end: 0x40218001 + Test data read: 2522280 + Test data write: 262111 + + From the stream of data. Extract the values for use in the + validation function. + """ + start_address = None + end_address = None + read_count = 0 + write_count = 0 + with open(path, 'r') as f: + for line in f: + if line.startswith("Test data start:"): + start_address = int(line.split(':')[1].strip(), 16) + elif line.startswith("Test data end:"): + end_address = int(line.split(':')[1].strip(), 16) + elif line.startswith("Test data read:"): + read_count = int(line.split(':')[1].strip()) + elif line.startswith("Test data write:"): + write_count = int(line.split(':')[1].strip()) + return start_address, end_address, read_count, write_count + + +def parse_plugin_output(path, start, end): + """ + Load the plugin output from path in the form of: + + Region Base, Reads, Writes, Seen all + 0x0000000040004000, 31093, 0, false + 0x0000000040214000, 2522280, 278579, true + 0x0000000040000000, 137398, 0, false + 0x0000000040210000, 54727397, 33721956, false + + And extract the ranges that match test data start and end and + return the results. + """ + total_reads = 0 + total_writes = 0 + seen_all = False + + with open(path, 'r') as f: + next(f) # Skip the header + for line in f: + + if line.startswith("Region Base"): + continue + + parts = line.strip().split(', ') + if len(parts) != 4: + continue + + region_base = int(parts[0], 16) + reads = int(parts[1]) + writes = int(parts[2]) + + if start <= region_base < end: # Checking if within range + total_reads += reads + total_writes += writes + seen_all = parts[3] == "true" + + return total_reads, total_writes, seen_all + +def main() -> None: + """ + Process the arguments, injest the program and plugin out and + verify they match up and report if they do not. + """ + parser = ArgumentParser(description="Validate memory instrumentation") + parser.add_argument('test_output', + help="The output from the test itself") + parser.add_argument('plugin_output', + help="The output from memory plugin") + parser.add_argument('--bss-cleared', + action='store_true', + help='Assume bss was cleared (and adjusts counts).') + + args = parser.parse_args() + + # Extract counts from memory binary + start, end, exp_reads, exp_writes = extract_counts(args.test_output) + + # Some targets clear BSS before running but the test doesn't know + # that so we adjust it by the size of the test region. + if args.bss_cleared: + exp_writes += 16384 + + if start is None or end is None: + print("Failed to test_data boundaries from output.") + sys.exit(1) + + # Parse plugin output + preads, pwrites, seen_all = parse_plugin_output(args.plugin_output, + start, end) + + if not seen_all: + print("Fail: didn't instrument all accesses to test_data.") + sys.exit(1) + + # Compare and report + if preads == exp_reads and pwrites == exp_writes: + sys.exit(0) + else: + print("Fail: The memory reads and writes count does not match.") + print(f"Expected Reads: {exp_reads}, Actual Reads: {preads}") + print(f"Expected Writes: {exp_writes}, Actual Writes: {pwrites}") + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c new file mode 100644 index 00000000000..057b9aac9f6 --- /dev/null +++ b/tests/tcg/multiarch/test-plugin-mem-access.c @@ -0,0 +1,177 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Check if we detect all memory accesses expected using plugin API. + * Used in conjunction with ./check-plugin-mem-access.sh check script. + * Output of this program is the list of patterns expected in plugin output. + * + * 8,16,32 load/store are tested for all arch. + * 64,128 load/store are tested for aarch64/x64. + * atomic operations (8,16,32,64) are tested for x64 only. + */ + +#include +#include +#include +#include + +#if defined(__x86_64__) +#include +#elif defined(__aarch64__) +#include +#endif /* __x86_64__ */ + +static void *data; + +/* ,store_u8,.*,8,store,0xf1 */ +#define PRINT_EXPECTED(function, type, value, action) \ +do { \ + printf(",%s,.*,%d,%s,%s\n", \ + #function, (int) sizeof(type) * 8, action, value); \ +} \ +while (0) + +#define DEFINE_STORE(name, type, value) \ + \ +static void print_expected_store_##name(void) \ +{ \ + PRINT_EXPECTED(store_##name, type, #value, "store"); \ +} \ + \ +static void store_##name(void) \ +{ \ + *((type *)data) = value; \ + print_expected_store_##name(); \ +} + +#define DEFINE_ATOMIC_OP(name, type, value) \ + \ +static void print_expected_atomic_op_##name(void) \ +{ \ + PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load"); \ + PRINT_EXPECTED(atomic_op_##name, type, #value, "store"); \ +} \ + \ +static void atomic_op_##name(void) \ +{ \ + *((type *)data) = 0x42; \ + __sync_val_compare_and_swap((type *)data, 0x42, value); \ + print_expected_atomic_op_##name(); \ +} + +#define DEFINE_LOAD(name, type, value) \ + \ +static void print_expected_load_##name(void) \ +{ \ + PRINT_EXPECTED(load_##name, type, #value, "load"); \ +} \ + \ +static void load_##name(void) \ +{ \ + \ + /* volatile forces load to be generated. */ \ + volatile type src = *((type *) data); \ + volatile type dest = src; \ + (void)src, (void)dest; \ + print_expected_load_##name(); \ +} + +DEFINE_STORE(u8, uint8_t, 0xf1) +DEFINE_LOAD(u8, uint8_t, 0xf1) +DEFINE_STORE(u16, uint16_t, 0xf123) +DEFINE_LOAD(u16, uint16_t, 0xf123) +DEFINE_STORE(u32, uint32_t, 0xff112233) +DEFINE_LOAD(u32, uint32_t, 0xff112233) + +#if defined(__x86_64__) || defined(__aarch64__) +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef) +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef) + +static void print_expected_store_u128(void) +{ + PRINT_EXPECTED(store_u128, __int128, + "0xf122334455667788f123456789abcdef", "store"); +} + +static void store_u128(void) +{ +#ifdef __x86_64__ + _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788, + 0xf1234567, 0x89abcdef)); +#else + const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344}; + uint32x4_t vec = vld1q_u32(init); + vst1q_u32(data, vec); +#endif /* __x86_64__ */ + print_expected_store_u128(); +} + +static void print_expected_load_u128(void) +{ + PRINT_EXPECTED(load_u128, __int128, + "0xf122334455667788f123456789abcdef", "load"); +} + +static void load_u128(void) +{ +#ifdef __x86_64__ + __m128i var = _mm_load_si128(data); +#else + uint32x4_t var = vld1q_u32(data); +#endif + (void) var; + print_expected_load_u128(); +} +#endif /* __x86_64__ || __aarch64__ */ + +#if defined(__x86_64__) +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1) +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123) +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233) +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef) +#endif /* __x86_64__ */ + +static void *f(void *p) +{ + return NULL; +} + +int main(void) +{ + /* + * We force creation of a second thread to enable cpu flag CF_PARALLEL. + * This will generate atomic operations when needed. + */ + pthread_t thread; + pthread_create(&thread, NULL, &f, NULL); + pthread_join(thread, NULL); + + /* allocate storage up to 128 bits */ + data = malloc(16); + + store_u8(); + load_u8(); + + store_u16(); + load_u16(); + + store_u32(); + load_u32(); + +#if defined(__x86_64__) || defined(__aarch64__) + store_u64(); + load_u64(); + + store_u128(); + load_u128(); +#endif /* __x86_64__ || __aarch64__ */ + +#if defined(__x86_64__) + atomic_op_u8(); + atomic_op_u16(); + atomic_op_u32(); + atomic_op_u64(); +#endif /* __x86_64__ */ + + free(data); +} diff --git a/tests/tcg/plugins/insn.c b/tests/tcg/plugins/insn.c index baf2d07205d..0c723cb9ed8 100644 --- a/tests/tcg/plugins/insn.c +++ b/tests/tcg/plugins/insn.c @@ -150,10 +150,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_count, 1); } else { - uint64_t vaddr = qemu_plugin_insn_vaddr(insn); qemu_plugin_register_vcpu_insn_exec_cb( - insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, - GUINT_TO_POINTER(vaddr)); + insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, NULL); } if (do_size) { diff --git a/tests/tcg/plugins/mem.c b/tests/tcg/plugins/mem.c index b650dddcce1..9649bce99ca 100644 --- a/tests/tcg/plugins/mem.c +++ b/tests/tcg/plugins/mem.c @@ -12,6 +12,16 @@ #include #include +/* + * plugins should not include anything from QEMU aside from the + * API header. However as this is a test plugin to exercise the + * internals of QEMU and we want to avoid needless code duplication we + * do so here. bswap.h is pretty self-contained although it needs a + * few things provided by compiler.h. + */ +#include +#include +#include #include QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; @@ -21,13 +31,52 @@ typedef struct { uint64_t io_count; } CPUCount; +typedef struct { + uint64_t vaddr; + const char *sym; +} InsnInfo; + +/* + * For the "memory" system test we need to track accesses to + * individual regions. We mirror the data written to the region and + * then check when it is read that it matches up. + * + * We do this as regions rather than pages to save on complications + * with page crossing and the fact the test only cares about the + * test_data region. + */ +static uint64_t region_size = 4096 * 4; +static uint64_t region_mask; + +typedef struct { + uint64_t region_address; + uint64_t reads; + uint64_t writes; + uint8_t *data; + /* Did we see every write and read with correct values? */ + bool seen_all; +} RegionInfo; + static struct qemu_plugin_scoreboard *counts; static qemu_plugin_u64 mem_count; static qemu_plugin_u64 io_count; -static bool do_inline, do_callback; +static bool do_inline, do_callback, do_print_accesses, do_region_summary; static bool do_haddr; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; + +static GMutex lock; +static GHashTable *regions; + +static gint addr_order(gconstpointer a, gconstpointer b, gpointer d) +{ + RegionInfo *na = (RegionInfo *) a; + RegionInfo *nb = (RegionInfo *) b; + + return na->region_address > nb->region_address ? 1 : -1; +} + + static void plugin_exit(qemu_plugin_id_t id, void *p) { g_autoptr(GString) out = g_string_new(""); @@ -41,9 +90,145 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) qemu_plugin_u64_sum(io_count)); } qemu_plugin_outs(out->str); + + + if (do_region_summary) { + GList *counts = g_hash_table_get_values(regions); + + counts = g_list_sort_with_data(counts, addr_order, NULL); + + g_string_printf(out, "Region Base, Reads, Writes, Seen all\n"); + + if (counts && g_list_next(counts)) { + for (/* counts */; counts; counts = counts->next) { + RegionInfo *ri = (RegionInfo *) counts->data; + + g_string_append_printf(out, + "0x%016"PRIx64", " + "%"PRId64", %"PRId64", %s\n", + ri->region_address, + ri->reads, + ri->writes, + ri->seen_all ? "true" : "false"); + } + } + qemu_plugin_outs(out->str); + } + qemu_plugin_scoreboard_free(counts); } +/* + * Update the region tracking info for the access. We split up accesses + * that span regions even though the plugin infrastructure will deliver + * it as a single access. + */ +static void update_region_info(uint64_t region, uint64_t offset, + qemu_plugin_meminfo_t meminfo, + qemu_plugin_mem_value value, + unsigned size) +{ + bool be = qemu_plugin_mem_is_big_endian(meminfo); + bool is_store = qemu_plugin_mem_is_store(meminfo); + RegionInfo *ri; + bool unseen_data = false; + + g_assert(offset + size <= region_size); + + g_mutex_lock(&lock); + ri = (RegionInfo *) g_hash_table_lookup(regions, ®ion); + + if (!ri) { + ri = g_new0(RegionInfo, 1); + ri->region_address = region; + ri->data = g_malloc0(region_size); + ri->seen_all = true; + g_hash_table_insert(regions, &ri->region_address, ri); + } + + if (is_store) { + ri->writes++; + } else { + ri->reads++; + } + + switch (value.type) { + case QEMU_PLUGIN_MEM_VALUE_U8: + if (is_store) { + ri->data[offset] = value.data.u8; + } else if (ri->data[offset] != value.data.u8) { + unseen_data = true; + } + break; + case QEMU_PLUGIN_MEM_VALUE_U16: + { + uint16_t *p = (uint16_t *) &ri->data[offset]; + if (is_store) { + if (be) { + stw_be_p(p, value.data.u16); + } else { + stw_le_p(p, value.data.u16); + } + } else { + uint16_t val = be ? lduw_be_p(p) : lduw_le_p(p); + unseen_data = val != value.data.u16; + } + break; + } + case QEMU_PLUGIN_MEM_VALUE_U32: + { + uint32_t *p = (uint32_t *) &ri->data[offset]; + if (is_store) { + if (be) { + stl_be_p(p, value.data.u32); + } else { + stl_le_p(p, value.data.u32); + } + } else { + uint32_t val = be ? ldl_be_p(p) : ldl_le_p(p); + unseen_data = val != value.data.u32; + } + break; + } + case QEMU_PLUGIN_MEM_VALUE_U64: + { + uint64_t *p = (uint64_t *) &ri->data[offset]; + if (is_store) { + if (be) { + stq_be_p(p, value.data.u64); + } else { + stq_le_p(p, value.data.u64); + } + } else { + uint64_t val = be ? ldq_be_p(p) : ldq_le_p(p); + unseen_data = val != value.data.u64; + } + break; + } + case QEMU_PLUGIN_MEM_VALUE_U128: + /* non in test so skip */ + break; + default: + g_assert_not_reached(); + } + + /* + * This is expected for regions initialised by QEMU (.text etc) but we + * expect to see all data read and written to the test_data region + * of the memory test. + */ + if (unseen_data && ri->seen_all) { + g_autoptr(GString) error = g_string_new("Warning: "); + g_string_append_printf(error, "0x%016"PRIx64":%"PRId64 + " read an un-instrumented value\n", + region, offset); + qemu_plugin_outs(error->str); + ri->seen_all = false; + } + + g_mutex_unlock(&lock); +} + static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, uint64_t vaddr, void *udata) { @@ -58,6 +243,53 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, } else { qemu_plugin_u64_add(mem_count, cpu_index, 1); } + + if (do_region_summary) { + uint64_t region = vaddr & ~region_mask; + uint64_t offset = vaddr & region_mask; + qemu_plugin_mem_value value = qemu_plugin_mem_get_value(meminfo); + unsigned size = 1 << qemu_plugin_mem_size_shift(meminfo); + + update_region_info(region, offset, meminfo, value, size); + } +} + +static void print_access(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo, + uint64_t vaddr, void *udata) +{ + InsnInfo *insn_info = udata; + unsigned size = 8 << qemu_plugin_mem_size_shift(meminfo); + const char *type = qemu_plugin_mem_is_store(meminfo) ? "store" : "load"; + qemu_plugin_mem_value value = qemu_plugin_mem_get_value(meminfo); + uint64_t hwaddr = + qemu_plugin_hwaddr_phys_addr(qemu_plugin_get_hwaddr(meminfo, vaddr)); + g_autoptr(GString) out = g_string_new(""); + g_string_printf(out, + "0x%"PRIx64",%s,0x%"PRIx64",0x%"PRIx64",%d,%s,", + insn_info->vaddr, insn_info->sym, + vaddr, hwaddr, size, type); + switch (value.type) { + case QEMU_PLUGIN_MEM_VALUE_U8: + g_string_append_printf(out, "0x%02"PRIx8, value.data.u8); + break; + case QEMU_PLUGIN_MEM_VALUE_U16: + g_string_append_printf(out, "0x%04"PRIx16, value.data.u16); + break; + case QEMU_PLUGIN_MEM_VALUE_U32: + g_string_append_printf(out, "0x%08"PRIx32, value.data.u32); + break; + case QEMU_PLUGIN_MEM_VALUE_U64: + g_string_append_printf(out, "0x%016"PRIx64, value.data.u64); + break; + case QEMU_PLUGIN_MEM_VALUE_U128: + g_string_append_printf(out, "0x%016"PRIx64"%016"PRIx64, + value.data.u128.high, value.data.u128.low); + break; + default: + g_assert_not_reached(); + } + g_string_append_printf(out, "\n"); + qemu_plugin_outs(out->str); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -74,11 +306,21 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) QEMU_PLUGIN_INLINE_ADD_U64, mem_count, 1); } - if (do_callback) { + if (do_callback || do_region_summary) { qemu_plugin_register_vcpu_mem_cb(insn, vcpu_mem, QEMU_PLUGIN_CB_NO_REGS, rw, NULL); } + if (do_print_accesses) { + /* we leak this pointer, to avoid locking to keep track of it */ + InsnInfo *insn_info = g_malloc(sizeof(InsnInfo)); + const char *sym = qemu_plugin_insn_symbol(insn); + insn_info->sym = sym ? sym : ""; + insn_info->vaddr = qemu_plugin_insn_vaddr(insn); + qemu_plugin_register_vcpu_mem_cb(insn, print_access, + QEMU_PLUGIN_CB_NO_REGS, + rw, (void *) insn_info); + } } } @@ -117,6 +359,18 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, fprintf(stderr, "boolean argument parsing failed: %s\n", opt); return -1; } + } else if (g_strcmp0(tokens[0], "print-accesses") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], + &do_print_accesses)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "region-summary") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], + &do_region_summary)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; @@ -129,6 +383,19 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, return -1; } + if (do_print_accesses) { + g_autoptr(GString) out = g_string_new(""); + g_string_printf(out, + "insn_vaddr,insn_symbol,mem_vaddr,mem_hwaddr," + "access_size,access_type,mem_value\n"); + qemu_plugin_outs(out->str); + } + + if (do_region_summary) { + region_mask = (region_size - 1); + regions = g_hash_table_new(g_int64_hash, g_int64_equal); + } + counts = qemu_plugin_scoreboard_new(sizeof(CPUCount)); mem_count = qemu_plugin_scoreboard_u64_in_struct( counts, CPUCount, mem_count); diff --git a/tests/tcg/plugins/meson.build b/tests/tcg/plugins/meson.build index f847849b1b7..61a007d9e74 100644 --- a/tests/tcg/plugins/meson.build +++ b/tests/tcg/plugins/meson.build @@ -1,13 +1,12 @@ t = [] if get_option('plugins') - foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall'] + foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'reset', 'syscall', 'patch'] if host_os == 'windows' t += shared_module(i, files(i + '.c') + '../../../contrib/plugins/win32_linker.c', include_directories: '../../../include/qemu', link_depends: [win32_qemu_plugin_api_lib], - link_args: ['-Lplugins', '-lqemu_plugin_api'], + link_args: win32_qemu_plugin_api_link_flags, dependencies: glib) - else t += shared_module(i, files(i + '.c'), include_directories: '../../../include/qemu', @@ -18,5 +17,7 @@ endif if t.length() > 0 alias_target('test-plugins', t) else - run_target('test-plugins', command: find_program('true')) + run_target('test-plugins', command: [python, '-c', '']) endif + +plugin_modules += t diff --git a/tests/tcg/plugins/patch.c b/tests/tcg/plugins/patch.c new file mode 100644 index 00000000000..111c5c1f169 --- /dev/null +++ b/tests/tcg/plugins/patch.c @@ -0,0 +1,251 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This plugin patches instructions matching a pattern to a different + * instruction as they execute + * + */ + +#include "glib.h" +#include "glibconfig.h" + +#include +#include +#include + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +static bool use_hwaddr; +static GByteArray *target_data; +static GByteArray *patch_data; + +/** + * Parse a string of hexadecimal digits into a GByteArray. The string must be + * even length + */ +static GByteArray *str_to_bytes(const char *str) +{ + size_t len = strlen(str); + + if (len == 0 || len % 2 != 0) { + return NULL; + } + + GByteArray *bytes = g_byte_array_new(); + char byte[3] = {0}; + guint8 value = 0; + + for (size_t i = 0; i < len; i += 2) { + byte[0] = str[i]; + byte[1] = str[i + 1]; + value = (guint8)g_ascii_strtoull(byte, NULL, 16); + g_byte_array_append(bytes, &value, 1); + } + + return bytes; +} + +static void patch_hwaddr(unsigned int vcpu_index, void *userdata) +{ + uintptr_t addr = (uintptr_t) userdata; + g_autoptr(GString) str = g_string_new(NULL); + g_string_printf(str, "patching: @0x%" + PRIxPTR "\n", + addr); + qemu_plugin_outs(str->str); + + enum qemu_plugin_hwaddr_operation_result result = + qemu_plugin_write_memory_hwaddr(addr, patch_data); + + + if (result != QEMU_PLUGIN_HWADDR_OPERATION_OK) { + g_autoptr(GString) errmsg = g_string_new(NULL); + g_string_printf(errmsg, "Failed to write memory: %d\n", result); + qemu_plugin_outs(errmsg->str); + return; + } + + GByteArray *read_data = g_byte_array_new(); + + result = qemu_plugin_read_memory_hwaddr(addr, read_data, + patch_data->len); + + qemu_plugin_outs("Reading memory...\n"); + + if (result != QEMU_PLUGIN_HWADDR_OPERATION_OK) { + g_autoptr(GString) errmsg = g_string_new(NULL); + g_string_printf(errmsg, "Failed to read memory: %d\n", result); + qemu_plugin_outs(errmsg->str); + return; + } + + if (memcmp(patch_data->data, read_data->data, patch_data->len) != 0) { + qemu_plugin_outs("Failed to read back written data\n"); + } + + qemu_plugin_outs("Success!\n"); + + return; +} + +static void patch_vaddr(unsigned int vcpu_index, void *userdata) +{ + uintptr_t addr = (uintptr_t) userdata; + uint64_t hwaddr = 0; + if (!qemu_plugin_translate_vaddr(addr, &hwaddr)) { + qemu_plugin_outs("Failed to translate vaddr\n"); + return; + } + g_autoptr(GString) str = g_string_new(NULL); + g_string_printf(str, "patching: @0x%" + PRIxPTR " hw: @0x%" PRIx64 "\n", + addr, hwaddr); + qemu_plugin_outs(str->str); + + qemu_plugin_outs("Writing memory (vaddr)...\n"); + + if (!qemu_plugin_write_memory_vaddr(addr, patch_data)) { + qemu_plugin_outs("Failed to write memory\n"); + return; + } + + qemu_plugin_outs("Reading memory (vaddr)...\n"); + + g_autoptr(GByteArray) read_data = g_byte_array_new(); + + if (!qemu_plugin_read_memory_vaddr(addr, read_data, patch_data->len)) { + qemu_plugin_outs("Failed to read memory\n"); + return; + } + + if (memcmp(patch_data->data, read_data->data, patch_data->len) != 0) { + qemu_plugin_outs("Failed to read back written data\n"); + } + + qemu_plugin_outs("Success!\n"); + + return; +} + +/* + * Callback on translation of a translation block. + */ +static void vcpu_tb_trans_cb(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + g_autoptr(GByteArray) insn_data = g_byte_array_new(); + uintptr_t addr = 0; + + for (size_t i = 0; i < qemu_plugin_tb_n_insns(tb); i++) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); + uint64_t vaddr = qemu_plugin_insn_vaddr(insn); + + if (use_hwaddr) { + uint64_t hwaddr = 0; + if (!qemu_plugin_translate_vaddr(vaddr, &hwaddr)) { + qemu_plugin_outs("Failed to translate vaddr\n"); + continue; + } + /* + * As we cannot emulate 64 bit systems on 32 bit hosts we + * should never see the top bits set, hence we can safely + * cast to uintptr_t. + */ + g_assert(hwaddr <= UINTPTR_MAX); + addr = (uintptr_t) hwaddr; + } else { + g_assert(vaddr <= UINTPTR_MAX); + addr = (uintptr_t) vaddr; + } + + g_byte_array_set_size(insn_data, qemu_plugin_insn_size(insn)); + qemu_plugin_insn_data(insn, insn_data->data, insn_data->len); + + if (insn_data->len >= target_data->len && + !memcmp(insn_data->data, target_data->data, + MIN(target_data->len, insn_data->len))) { + if (use_hwaddr) { + qemu_plugin_register_vcpu_tb_exec_cb(tb, patch_hwaddr, + QEMU_PLUGIN_CB_NO_REGS, + (void *) addr); + } else { + qemu_plugin_register_vcpu_tb_exec_cb(tb, patch_vaddr, + QEMU_PLUGIN_CB_NO_REGS, + (void *) addr); + } + } + } +} + +static void usage(void) +{ + fprintf(stderr, "Usage: ,target=,patch=" + "[,use_hwaddr=true|false]"); +} + +/* + * Called when the plugin is installed + */ +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, int argc, + char **argv) +{ + + use_hwaddr = true; + target_data = NULL; + patch_data = NULL; + + if (argc > 4) { + usage(); + return -1; + } + + for (size_t i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "use_hwaddr") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &use_hwaddr)) { + fprintf(stderr, + "Failed to parse boolean argument use_hwaddr\n"); + return -1; + } + } else if (g_strcmp0(tokens[0], "target") == 0) { + target_data = str_to_bytes(tokens[1]); + if (!target_data) { + fprintf(stderr, + "Failed to parse target bytes.\n"); + return -1; + } + } else if (g_strcmp0(tokens[0], "patch") == 0) { + patch_data = str_to_bytes(tokens[1]); + if (!patch_data) { + fprintf(stderr, "Failed to parse patch bytes.\n"); + return -1; + } + } else { + fprintf(stderr, "Unknown argument: %s\n", tokens[0]); + usage(); + return -1; + } + } + + if (!target_data) { + fprintf(stderr, "target argument is required\n"); + usage(); + return -1; + } + + if (!patch_data) { + fprintf(stderr, "patch argument is required\n"); + usage(); + return -1; + } + + if (target_data->len != patch_data->len) { + fprintf(stderr, "Target and patch data must be the same length\n"); + return -1; + } + + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans_cb); + + return 0; +} diff --git a/tests/tcg/plugins/reset.c b/tests/tcg/plugins/reset.c new file mode 100644 index 00000000000..1be8be2a4b2 --- /dev/null +++ b/tests/tcg/plugins/reset.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2025 Linaro Ltd + * + * Test the reset/uninstall cycle of a plugin. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include + +#include + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; +static qemu_plugin_id_t plugin_id; +static bool was_reset; +static bool was_uninstalled; + +static void after_uninstall(qemu_plugin_id_t id) +{ + g_assert(was_reset && !was_uninstalled); + qemu_plugin_outs("uninstall done\n"); + was_uninstalled = true; +} + +static void tb_exec_after_reset(unsigned int vcpu_index, void *userdata) +{ + g_assert(was_reset && !was_uninstalled); + qemu_plugin_uninstall(plugin_id, after_uninstall); +} + +static void tb_trans_after_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + g_assert(was_reset && !was_uninstalled); + qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_after_reset, + QEMU_PLUGIN_CB_NO_REGS, NULL); +} + +static void after_reset(qemu_plugin_id_t id) +{ + g_assert(!was_reset && !was_uninstalled); + qemu_plugin_outs("reset done\n"); + was_reset = true; + qemu_plugin_register_vcpu_tb_trans_cb(id, tb_trans_after_reset); +} + +static void tb_exec_before_reset(unsigned int vcpu_index, void *userdata) +{ + g_assert(!was_reset && !was_uninstalled); + qemu_plugin_reset(plugin_id, after_reset); +} + +static void tb_trans_before_reset(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + g_assert(!was_reset && !was_uninstalled); + qemu_plugin_register_vcpu_tb_exec_cb(tb, tb_exec_before_reset, + QEMU_PLUGIN_CB_NO_REGS, NULL); +} + +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, + int argc, char **argv) +{ + plugin_id = id; + qemu_plugin_register_vcpu_tb_trans_cb(id, tb_trans_before_reset); + return 0; +} + +/* Since we uninstall the plugin, we can't use qemu_plugin_register_atexit_cb, + * so we use destructor attribute instead. */ +static void __attribute__((destructor)) on_plugin_exit(void) +{ + g_assert(was_reset && was_uninstalled); + qemu_plugin_outs("plugin exit\n"); +} diff --git a/tests/tcg/plugins/syscall.c b/tests/tcg/plugins/syscall.c index 72e1a5bf901..42801f5c863 100644 --- a/tests/tcg/plugins/syscall.c +++ b/tests/tcg/plugins/syscall.c @@ -22,23 +22,109 @@ typedef struct { int64_t errors; } SyscallStats; +struct SyscallInfo { + const char *name; + int64_t write_sysno; +}; + +static const struct SyscallInfo arch_syscall_info[] = { + { "aarch64", 64 }, + { "aarch64_be", 64 }, + { "alpha", 4 }, + { "arm", 4 }, + { "armeb", 4 }, + { "avr", -1 }, + { "hexagon", 64 }, + { "hppa", -1 }, + { "i386", 4 }, + { "loongarch64", -1 }, + { "m68k", 4 }, + { "microblaze", 4 }, + { "microblazeel", 4 }, + { "mips", 1 }, + { "mips64", 1 }, + { "mips64el", 1 }, + { "mipsel", 1 }, + { "mipsn32", 1 }, + { "mipsn32el", 1 }, + { "or1k", -1 }, + { "ppc", 4 }, + { "ppc64", 4 }, + { "ppc64le", 4 }, + { "riscv32", 64 }, + { "riscv64", 64 }, + { "rx", -1 }, + { "s390x", -1 }, + { "sh4", -1 }, + { "sh4eb", -1 }, + { "sparc", 4 }, + { "sparc32plus", 4 }, + { "sparc64", 4 }, + { "tricore", -1 }, + { "x86_64", 1 }, + { "xtensa", 13 }, + { "xtensaeb", 13 }, + { NULL, -1 }, +}; + static GMutex lock; static GHashTable *statistics; +static GByteArray *memory_buffer; +static bool do_log_writes; +static int64_t write_sysno = -1; static SyscallStats *get_or_create_entry(int64_t num) { SyscallStats *entry = - (SyscallStats *) g_hash_table_lookup(statistics, GINT_TO_POINTER(num)); + (SyscallStats *) g_hash_table_lookup(statistics, &num); if (!entry) { entry = g_new0(SyscallStats, 1); entry->num = num; - g_hash_table_insert(statistics, GINT_TO_POINTER(num), (gpointer) entry); + g_hash_table_insert(statistics, &entry->num, entry); } return entry; } +/* + * Hex-dump a GByteArray to the QEMU plugin output in the format: + * 61 63 63 65 6c 09 09 20 20 20 66 70 75 09 09 09 | accel.....fpu... + * 20 6d 6f 64 75 6c 65 2d 63 6f 6d 6d 6f 6e 2e 63 | .module-common.c + */ +static void hexdump(const GByteArray *data) +{ + g_autoptr(GString) out = g_string_new(""); + + for (guint index = 0; index < data->len; index += 16) { + for (guint col = 0; col < 16; col++) { + if (index + col < data->len) { + g_string_append_printf(out, "%02x ", data->data[index + col]); + } else { + g_string_append(out, " "); + } + } + + g_string_append(out, " | "); + + for (guint col = 0; col < 16; col++) { + if (index + col >= data->len) { + break; + } + + if (g_ascii_isgraph(data->data[index + col])) { + g_string_append_printf(out, "%c", data->data[index + col]); + } else { + g_string_append(out, "."); + } + } + + g_string_append(out, "\n"); + } + + qemu_plugin_outs(out->str); +} + static void vcpu_syscall(qemu_plugin_id_t id, unsigned int vcpu_index, int64_t num, uint64_t a1, uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5, @@ -54,6 +140,14 @@ static void vcpu_syscall(qemu_plugin_id_t id, unsigned int vcpu_index, g_autofree gchar *out = g_strdup_printf("syscall #%" PRIi64 "\n", num); qemu_plugin_outs(out); } + + if (do_log_writes && num == write_sysno) { + if (qemu_plugin_read_memory_vaddr(a2, memory_buffer, a3)) { + hexdump(memory_buffer); + } else { + fprintf(stderr, "Error reading memory from vaddr %"PRIu64"\n", a2); + } + } } static void vcpu_syscall_ret(qemu_plugin_id_t id, unsigned int vcpu_idx, @@ -86,7 +180,7 @@ static void print_entry(gpointer val, gpointer user_data) qemu_plugin_outs(out); } -static gint comp_func(gconstpointer ea, gconstpointer eb) +static gint comp_func(gconstpointer ea, gconstpointer eb, gpointer d) { SyscallStats *ent_a = (SyscallStats *) ea; SyscallStats *ent_b = (SyscallStats *) eb; @@ -103,7 +197,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) g_mutex_lock(&lock); GList *entries = g_hash_table_get_values(statistics); - entries = g_list_sort(entries, comp_func); + entries = g_list_sort_with_data(entries, comp_func, NULL); qemu_plugin_outs("syscall no. calls errors\n"); g_list_foreach(entries, print_entry, NULL); @@ -127,6 +221,10 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_print)) { fprintf(stderr, "boolean argument parsing failed: %s\n", opt); } + } else if (g_strcmp0(tokens[0], "log_writes") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_log_writes)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + } } else { fprintf(stderr, "unsupported argument: %s\n", argv[i]); return -1; @@ -134,7 +232,25 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } if (!do_print) { - statistics = g_hash_table_new_full(NULL, g_direct_equal, NULL, g_free); + statistics = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, g_free); + } + + if (do_log_writes) { + for (const struct SyscallInfo *syscall_info = arch_syscall_info; + syscall_info->name != NULL; syscall_info++) { + + if (g_strcmp0(syscall_info->name, info->target_name) == 0) { + write_sysno = syscall_info->write_sysno; + break; + } + } + + if (write_sysno == -1) { + fprintf(stderr, "write syscall number not found\n"); + return -1; + } + + memory_buffer = g_byte_array_new(); } qemu_plugin_register_vcpu_syscall_cb(id, vcpu_syscall); diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index 509a20be2b0..0d058b26006 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -6,7 +6,7 @@ VPATH += $(SRC_PATH)/tests/tcg/ppc64 config-cc.mak: Makefile $(quiet-@)( \ - $(call cc-option,-mpower8-vector, CROSS_CC_HAS_POWER8_VECTOR); \ + $(call cc-option,-mcpu=power8, CROSS_CC_HAS_CPU_POWER8); \ $(call cc-option,-mpower10, CROSS_CC_HAS_POWER10)) 3> config-cc.mak -include config-cc.mak @@ -23,15 +23,15 @@ run-threadcount: threadcount run-plugin-threadcount-with-%: $(call skip-test, $<, "BROKEN (flaky with clang) ") -ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),) +ifneq ($(CROSS_CC_HAS_CPU_POWER8),) PPC64_TESTS=bcdsub non_signalling_xscv endif -$(PPC64_TESTS): CFLAGS += -mpower8-vector +$(PPC64_TESTS): CFLAGS += -mcpu=power8 -ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),) +ifneq ($(CROSS_CC_HAS_CPU_POWER8),) PPC64_TESTS += vsx_f2i_nan endif -vsx_f2i_nan: CFLAGS += -mpower8-vector -I$(SRC_PATH)/include +vsx_f2i_nan: CFLAGS += -mcpu=power8 -I$(SRC_PATH)/include PPC64_TESTS += mtfsf PPC64_TESTS += mffsce @@ -55,4 +55,9 @@ PPC64_TESTS += signal_save_restore_xer PPC64_TESTS += xxspltw PPC64_TESTS += test-aes +# ppc64 ABI uses function descriptors, and thus, QEMU can't find symbol for a +# given instruction. Thus, we don't check output of mem-access plugin. +run-plugin-test-plugin-mem-access-with-libmem.so: \ + CHECK_PLUGIN_OUTPUT_COMMAND= + TESTS += $(PPC64_TESTS) diff --git a/tests/tcg/riscv64/Makefile.softmmu-target b/tests/tcg/riscv64/Makefile.softmmu-target index 7c1d44d3f43..3ca595335dd 100644 --- a/tests/tcg/riscv64/Makefile.softmmu-target +++ b/tests/tcg/riscv64/Makefile.softmmu-target @@ -20,5 +20,9 @@ EXTRA_RUNS += run-issue1060 run-issue1060: issue1060 $(call run-test, $<, $(QEMU) $(QEMU_OPTS)$<) +EXTRA_RUNS += run-test-mepc-masking +run-test-mepc-masking: test-mepc-masking + $(call run-test, $<, $(QEMU) $(QEMU_OPTS)$<) + # We don't currently support the multiarch system tests undefine MULTIARCH_TESTS diff --git a/tests/tcg/riscv64/test-mepc-masking.S b/tests/tcg/riscv64/test-mepc-masking.S new file mode 100644 index 00000000000..fccd2a7ac46 --- /dev/null +++ b/tests/tcg/riscv64/test-mepc-masking.S @@ -0,0 +1,73 @@ +/* + * Test for MEPC masking bug fix + * + * This test verifies that MEPC properly masks the lower bits according + * to the RISC-V specification when vectored mode bits from STVEC are + * written to MEPC. + */ + + .option norvc + + .text + .global _start +_start: + /* Set up machine trap vector */ + lla t0, machine_trap_handler + csrw mtvec, t0 + + /* Set STVEC with vectored mode (mode bits = 01) */ + li t0, 0x80004001 + csrw stvec, t0 + + /* Clear medeleg to handle exceptions in M-mode */ + csrw medeleg, zero + + /* Trigger illegal instruction exception */ + .word 0xffffffff + +test_completed: + /* Exit with result in a0 */ + /* a0 = 0: success (bits [1:0] were masked) */ + /* a0 != 0: failure (some bits were not masked) */ + j _exit + +machine_trap_handler: + /* Check if illegal instruction (mcause = 2) */ + csrr t0, mcause + li t1, 2 + bne t0, t1, skip_test + + /* Test: Copy STVEC (with mode bits) to MEPC */ + csrr t0, stvec /* t0 = 0x80004001 */ + csrw mepc, t0 /* Write to MEPC */ + csrr t1, mepc /* Read back MEPC */ + + /* Check if bits [1:0] are masked (IALIGN=32 without RVC) */ + andi a0, t1, 3 /* a0 = 0 if both bits masked correctly */ + + /* Set correct return address */ + lla t0, test_completed + csrw mepc, t0 + +skip_test: + mret + +/* Exit with semihosting */ +_exit: + lla a1, semiargs + li t0, 0x20026 /* ADP_Stopped_ApplicationExit */ + sd t0, 0(a1) + sd a0, 8(a1) + li a0, 0x20 /* TARGET_SYS_EXIT_EXTENDED */ + + /* Semihosting call sequence */ + .balign 16 + slli zero, zero, 0x1f + ebreak + srai zero, zero, 0x7 + j . + + .data + .balign 8 +semiargs: + .space 16 diff --git a/tests/tcg/s390x/Makefile.softmmu-target b/tests/tcg/s390x/Makefile.softmmu-target index f60f94b090e..8cd4667c63b 100644 --- a/tests/tcg/s390x/Makefile.softmmu-target +++ b/tests/tcg/s390x/Makefile.softmmu-target @@ -1,8 +1,9 @@ S390X_SRC=$(SRC_PATH)/tests/tcg/s390x VPATH+=$(S390X_SRC) -QEMU_OPTS+=-action panic=exit-failure -nographic $(EXTFLAGS) -kernel +# EXTFLAGS can be passed by the user, e.g. to override the --accel +QEMU_OPTS+=-action panic=exit-failure -nographic -serial chardev:output $(EXTFLAGS) -kernel LINK_SCRIPT=$(S390X_SRC)/softmmu.ld -CFLAGS+=-ggdb -O0 +CFLAGS+=-ggdb -O0 -I$(SRC_PATH)/include/hw/s390x/ipl/ LDFLAGS=-nostdlib -static %.o: %.S @@ -41,8 +42,15 @@ $(ASM_TESTS): LDFLAGS += -Wl,-T$(LINK_SCRIPT) -Wl,--build-id=none $(ASM_TESTS): $(LINK_SCRIPT) TESTS += $(ASM_TESTS) +MULTIARCH_TESTS += mvc-smc S390X_MULTIARCH_RUNTIME_OBJS = head64.o console.o $(MINILIB_OBJS) $(MULTIARCH_TESTS): $(S390X_MULTIARCH_RUNTIME_OBJS) $(MULTIARCH_TESTS): LDFLAGS += $(S390X_MULTIARCH_RUNTIME_OBJS) -$(MULTIARCH_TESTS): CFLAGS += $(MINILIB_INC) +$(MULTIARCH_TESTS): CFLAGS += $(MINILIB_INC) \ + -I$(SRC_PATH)/roms/SLOF/lib/libc/include/ memory: CFLAGS += -DCHECK_UNALIGNED=0 + +# s390x clears the BSS section so we need to account for that +run-plugin-memory-with-libmem.so: \ + CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py \ + --bss-cleared $@.out diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index a8f86c94498..da5fe71a407 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -48,6 +48,7 @@ TESTS+=lae TESTS+=cvd TESTS+=cvb TESTS+=ts +TESTS+=ex-smc cdsg: CFLAGS+=-pthread cdsg: LDFLAGS+=-pthread @@ -73,8 +74,11 @@ $(Z13_TESTS): CFLAGS+=-march=z13 -O2 TESTS+=$(Z13_TESTS) ifneq ($(CROSS_CC_HAS_Z14),) -Z14_TESTS=vfminmax +Z14_TESTS=fma vfminmax +fma: float.h +fma: LDFLAGS+=-lm vfminmax: LDFLAGS+=-lm +vfminmax: float.h $(Z14_TESTS): CFLAGS+=-march=z14 -O2 TESTS+=$(Z14_TESTS) endif diff --git a/tests/tcg/s390x/console.c b/tests/tcg/s390x/console.c index d43ce3f44b4..6c26f04949f 100644 --- a/tests/tcg/s390x/console.c +++ b/tests/tcg/s390x/console.c @@ -4,7 +4,10 @@ * * SPDX-License-Identifier: GPL-2.0-or-later */ + #include "../../../pc-bios/s390-ccw/sclp.c" +#include "../../../roms/SLOF/lib/libc/string/memset.c" +#include "../../../roms/SLOF/lib/libc/string/memcpy.c" void __sys_outc(char c) { diff --git a/tests/tcg/s390x/ex-smc.c b/tests/tcg/s390x/ex-smc.c new file mode 100644 index 00000000000..f403640d6ba --- /dev/null +++ b/tests/tcg/s390x/ex-smc.c @@ -0,0 +1,57 @@ +/* + * Test modifying an EXECUTE target. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +/* Make sure we exercise the same EXECUTE instruction. */ +extern void execute(unsigned char *insn, unsigned char mask, + unsigned long *r1_r5); +asm(".globl execute\n" + "execute:\n" + "lg %r1,0(%r4)\n" + "lg %r5,8(%r4)\n" + "ex %r3,0(%r2)\n" + "stg %r5,8(%r4)\n" + "stg %r1,0(%r4)\n" + "br %r14\n"); + +/* Define an RWX EXECUTE target. */ +extern unsigned char lgfi[]; +asm(".pushsection .rwx,\"awx\",@progbits\n" + ".globl lgfi\n" + "lgfi: lgfi %r0,0\n" + ".popsection\n"); + +int main(void) +{ + unsigned long r1_r5[2]; + + /* Create an initial TB. */ + r1_r5[0] = -1; + r1_r5[1] = -1; + execute(lgfi, 1 << 4, r1_r5); + assert(r1_r5[0] == 0); + assert(r1_r5[1] == -1); + + /* Test changing the mask. */ + execute(lgfi, 5 << 4, r1_r5); + assert(r1_r5[0] == 0); + assert(r1_r5[1] == 0); + + /* Test changing the target. */ + lgfi[5] = 42; + execute(lgfi, 5 << 4, r1_r5); + assert(r1_r5[0] == 0); + assert(r1_r5[1] == 42); + + /* Test changing both the mask and the target. */ + lgfi[5] = 24; + execute(lgfi, 1 << 4, r1_r5); + assert(r1_r5[0] == 24); + assert(r1_r5[1] == 42); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/s390x/float.h b/tests/tcg/s390x/float.h new file mode 100644 index 00000000000..9d1682b8fc5 --- /dev/null +++ b/tests/tcg/s390x/float.h @@ -0,0 +1,104 @@ +/* + * Helpers for floating-point tests. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef FLOAT_H +#define FLOAT_H + +/* + * Floating-point value classes. + */ +#define N_FORMATS 3 +#define CLASS_MINUS_INF 0 +#define CLASS_MINUS_FN 1 +#define CLASS_MINUS_ZERO 2 +#define CLASS_PLUS_ZERO 3 +#define CLASS_PLUS_FN 4 +#define CLASS_PLUS_INF 5 +#define CLASS_QNAN 6 +#define CLASS_SNAN 7 +#define N_SIGNED_CLASSES 8 +static const size_t float_sizes[N_FORMATS] = { + /* M4 == 2: short */ 4, + /* M4 == 3: long */ 8, + /* M4 == 4: extended */ 16, +}; +static const size_t e_bits[N_FORMATS] = { + /* M4 == 2: short */ 8, + /* M4 == 3: long */ 11, + /* M4 == 4: extended */ 15, +}; +struct float_class { + size_t n; + unsigned char v[2][16]; +}; +static const struct float_class signed_floats[N_FORMATS][N_SIGNED_CLASSES] = { + /* M4 == 2: short */ + { + /* -inf */ {1, {{0xff, 0x80, 0x00, 0x00}}}, + /* -Fn */ {2, {{0xc2, 0x28, 0x00, 0x00}, + {0xc2, 0x29, 0x00, 0x00}}}, + /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00}}}, + /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00}}}, + /* +Fn */ {2, {{0x42, 0x28, 0x00, 0x00}, + {0x42, 0x2a, 0x00, 0x00}}}, + /* +inf */ {1, {{0x7f, 0x80, 0x00, 0x00}}}, + /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xfe}}}, + /* SNaN */ {2, {{0x7f, 0xbf, 0xff, 0xff}, + {0x7f, 0xbf, 0xff, 0xfd}}}, + }, + + /* M4 == 3: long */ + { + /* -inf */ {1, {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* -Fn */ {2, {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +Fn */ {2, {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +inf */ {1, {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}}, + /* SNaN */ {2, {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}}, + }, + + /* M4 == 4: extended */ + { + /* -inf */ {1, {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* -Fn */ {2, {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* -0 */ {1, {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +0 */ {1, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +Fn */ {2, {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* +inf */ {1, {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}}, + /* QNaN */ {2, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}}, + /* SNaN */ {2, {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}}, + }, +}; +static const unsigned char default_nans[N_FORMATS][16] = { + /* M4 == 2: short */ {0x7f, 0xc0, 0x00, 0x00}, + /* M4 == 3: long */ {0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* M4 == 4: extended */ {0x7f, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, +}; + +static void dump_v(FILE *f, const void *v, size_t n) +{ + for (int i = 0; i < n; i++) { + fprintf(f, "%02x", ((const unsigned char *)v)[i]); + } +} + +static void snan_to_qnan(char *v, int fmt) +{ + size_t bit = 1 + e_bits[fmt]; + v[bit / 8] |= 1 << (7 - (bit % 8)); +} + +#endif diff --git a/tests/tcg/s390x/fma.c b/tests/tcg/s390x/fma.c new file mode 100644 index 00000000000..6872f59a7a6 --- /dev/null +++ b/tests/tcg/s390x/fma.c @@ -0,0 +1,233 @@ +/* + * Test floating-point multiply-and-add instructions. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include +#include +#include "float.h" + +union val { + float e; + double d; + long double x; + char buf[16]; +}; + +/* + * PoP tables as close to the original as possible. + */ +static const char *table1[N_SIGNED_CLASSES][N_SIGNED_CLASSES] = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "P(+inf)", "P(+inf)", "Xi: T(dNaN)", "Xi: T(dNaN)", "P(-inf)", "P(-inf)", "P(b)", "Xi: T(b*)"}, + {/* -Fn */ "P(+inf)", "P(a*b)", "P(+0)", "P(-0)", "P(a*b)", "P(-inf)", "P(b)", "Xi: T(b*)"}, + {/* -0 */ "Xi: T(dNaN)", "P(+0)", "P(+0)", "P(-0)", "P(-0)", "Xi: T(dNaN)", "P(b)", "Xi: T(b*)"}, + {/* +0 */ "Xi: T(dNaN)", "P(-0)", "P(-0)", "P(+0)", "P(+0)", "Xi: T(dNaN)", "P(b)", "Xi: T(b*)"}, + {/* +Fn */ "P(-inf)", "P(a*b)", "P(-0)", "P(+0)", "P(a*b)", "P(+inf)", "P(b)", "Xi: T(b*)"}, + {/* +inf */ "P(-inf)", "P(-inf)", "Xi: T(dNaN)", "Xi: T(dNaN)", "P(+inf)", "P(+inf)", "P(b)", "Xi: T(b*)"}, + {/* QNaN */ "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "P(a)", "Xi: T(b*)"}, + {/* SNaN */ "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)", "Xi: T(a*)"}, +}; + +static const char *table2[N_SIGNED_CLASSES][N_SIGNED_CLASSES] = { + /* -inf -Fn -0 +0 +Fn +inf QNaN SNaN */ + {/* -inf */ "T(-inf)", "T(-inf)", "T(-inf)", "T(-inf)", "T(-inf)", "Xi: T(dNaN)", "T(c)", "Xi: T(c*)"}, + {/* -Fn */ "T(-inf)", "R(p+c)", "R(p)", "R(p)", "R(p+c)", "T(+inf)", "T(c)", "Xi: T(c*)"}, + {/* -0 */ "T(-inf)", "R(c)", "T(-0)", "Rezd", "R(c)", "T(+inf)", "T(c)", "Xi: T(c*)"}, + {/* +0 */ "T(-inf)", "R(c)", "Rezd", "T(+0)", "R(c)", "T(+inf)", "T(c)", "Xi: T(c*)"}, + {/* +Fn */ "T(-inf)", "R(p+c)", "R(p)", "R(p)", "R(p+c)", "T(+inf)", "T(c)", "Xi: T(c*)"}, + {/* +inf */ "Xi: T(dNaN)", "T(+inf)", "T(+inf)", "T(+inf)", "T(+inf)", "T(+inf)", "T(c)", "Xi: T(c*)"}, + {/* QNaN */ "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "T(p)", "Xi: T(c*)"}, + /* SNaN: can't happen */ +}; + +static void interpret_tables(union val *r, bool *xi, int fmt, + int cls_a, const union val *a, + int cls_b, const union val *b, + int cls_c, const union val *c) +{ + const char *spec1 = table1[cls_a][cls_b]; + const char *spec2; + union val p; + int cls_p; + + *xi = false; + + if (strcmp(spec1, "P(-inf)") == 0) { + cls_p = CLASS_MINUS_INF; + } else if (strcmp(spec1, "P(+inf)") == 0) { + cls_p = CLASS_PLUS_INF; + } else if (strcmp(spec1, "P(-0)") == 0) { + cls_p = CLASS_MINUS_ZERO; + } else if (strcmp(spec1, "P(+0)") == 0) { + cls_p = CLASS_PLUS_ZERO; + } else if (strcmp(spec1, "P(a)") == 0) { + cls_p = cls_a; + memcpy(&p, a, sizeof(p)); + } else if (strcmp(spec1, "P(b)") == 0) { + cls_p = cls_b; + memcpy(&p, b, sizeof(p)); + } else if (strcmp(spec1, "P(a*b)") == 0) { + /* + * In the general case splitting fma into multiplication and addition + * doesn't work, but this is the case with our test inputs. + */ + cls_p = cls_a == cls_b ? CLASS_PLUS_FN : CLASS_MINUS_FN; + switch (fmt) { + case 0: + p.e = a->e * b->e; + break; + case 1: + p.d = a->d * b->d; + break; + case 2: + p.x = a->x * b->x; + break; + default: + fprintf(stderr, "Unsupported fmt: %d\n", fmt); + exit(1); + } + } else if (strcmp(spec1, "Xi: T(dNaN)") == 0) { + memcpy(r, default_nans[fmt], sizeof(*r)); + *xi = true; + return; + } else if (strcmp(spec1, "Xi: T(a*)") == 0) { + memcpy(r, a, sizeof(*r)); + snan_to_qnan(r->buf, fmt); + *xi = true; + return; + } else if (strcmp(spec1, "Xi: T(b*)") == 0) { + memcpy(r, b, sizeof(*r)); + snan_to_qnan(r->buf, fmt); + *xi = true; + return; + } else { + fprintf(stderr, "Unsupported spec1: %s\n", spec1); + exit(1); + } + + spec2 = table2[cls_p][cls_c]; + if (strcmp(spec2, "T(-inf)") == 0) { + memcpy(r, signed_floats[fmt][CLASS_MINUS_INF].v[0], sizeof(*r)); + } else if (strcmp(spec2, "T(+inf)") == 0) { + memcpy(r, signed_floats[fmt][CLASS_PLUS_INF].v[0], sizeof(*r)); + } else if (strcmp(spec2, "T(-0)") == 0) { + memcpy(r, signed_floats[fmt][CLASS_MINUS_ZERO].v[0], sizeof(*r)); + } else if (strcmp(spec2, "T(+0)") == 0 || strcmp(spec2, "Rezd") == 0) { + memcpy(r, signed_floats[fmt][CLASS_PLUS_ZERO].v[0], sizeof(*r)); + } else if (strcmp(spec2, "R(c)") == 0 || strcmp(spec2, "T(c)") == 0) { + memcpy(r, c, sizeof(*r)); + } else if (strcmp(spec2, "R(p)") == 0 || strcmp(spec2, "T(p)") == 0) { + memcpy(r, &p, sizeof(*r)); + } else if (strcmp(spec2, "R(p+c)") == 0 || strcmp(spec2, "T(p+c)") == 0) { + switch (fmt) { + case 0: + r->e = p.e + c->e; + break; + case 1: + r->d = p.d + c->d; + break; + case 2: + r->x = p.x + c->x; + break; + default: + fprintf(stderr, "Unsupported fmt: %d\n", fmt); + exit(1); + } + } else if (strcmp(spec2, "Xi: T(dNaN)") == 0) { + memcpy(r, default_nans[fmt], sizeof(*r)); + *xi = true; + } else if (strcmp(spec2, "Xi: T(c*)") == 0) { + memcpy(r, c, sizeof(*r)); + snan_to_qnan(r->buf, fmt); + *xi = true; + } else { + fprintf(stderr, "Unsupported spec2: %s\n", spec2); + exit(1); + } +} + +struct iter { + int fmt; + int cls[3]; + int val[3]; +}; + +static bool iter_next(struct iter *it) +{ + int i; + + for (i = 2; i >= 0; i--) { + if (++it->val[i] != signed_floats[it->fmt][it->cls[i]].n) { + return true; + } + it->val[i] = 0; + + if (++it->cls[i] != N_SIGNED_CLASSES) { + return true; + } + it->cls[i] = 0; + } + + return ++it->fmt != N_FORMATS; +} + +int main(void) +{ + int ret = EXIT_SUCCESS; + struct iter it = {}; + + do { + size_t n = float_sizes[it.fmt]; + union val a, b, c, exp, res; + bool xi_exp, xi; + + memcpy(&a, signed_floats[it.fmt][it.cls[0]].v[it.val[0]], sizeof(a)); + memcpy(&b, signed_floats[it.fmt][it.cls[1]].v[it.val[1]], sizeof(b)); + memcpy(&c, signed_floats[it.fmt][it.cls[2]].v[it.val[2]], sizeof(c)); + + interpret_tables(&exp, &xi_exp, it.fmt, + it.cls[1], &b, it.cls[2], &c, it.cls[0], &a); + + memcpy(&res, &a, sizeof(res)); + feclearexcept(FE_ALL_EXCEPT); + switch (it.fmt) { + case 0: + asm("maebr %[a],%[b],%[c]" + : [a] "+f" (res.e) : [b] "f" (b.e), [c] "f" (c.e)); + break; + case 1: + asm("madbr %[a],%[b],%[c]" + : [a] "+f" (res.d) : [b] "f" (b.d), [c] "f" (c.d)); + break; + case 2: + asm("wfmaxb %[a],%[c],%[b],%[a]" + : [a] "+v" (res.x) : [b] "v" (b.x), [c] "v" (c.x)); + break; + default: + fprintf(stderr, "Unsupported fmt: %d\n", it.fmt); + exit(1); + } + xi = fetestexcept(FE_ALL_EXCEPT) == FE_INVALID; + + if (memcmp(&res, &exp, n) != 0 || xi != xi_exp) { + fprintf(stderr, "[ FAILED ] "); + dump_v(stderr, &b, n); + fprintf(stderr, " * "); + dump_v(stderr, &c, n); + fprintf(stderr, " + "); + dump_v(stderr, &a, n); + fprintf(stderr, ": actual="); + dump_v(stderr, &res, n); + fprintf(stderr, "/%d, expected=", (int)xi); + dump_v(stderr, &exp, n); + fprintf(stderr, "/%d\n", (int)xi_exp); + ret = EXIT_FAILURE; + } + } while (iter_next(&it)); + + return ret; +} diff --git a/tests/tcg/s390x/mvc-smc.c b/tests/tcg/s390x/mvc-smc.c new file mode 100644 index 00000000000..d68f60caa85 --- /dev/null +++ b/tests/tcg/s390x/mvc-smc.c @@ -0,0 +1,82 @@ +/* + * Test modifying code using the MVC instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include + +#define PAGE_SIZE 4096 +#define BR_14_SIZE 2 +#define RWX_OFFSET 2 + +static unsigned char rw[PAGE_SIZE + BR_14_SIZE]; +static unsigned char rwx[RWX_OFFSET + sizeof(rw)] + __attribute__((aligned(PAGE_SIZE))); + +typedef unsigned long (*function_t)(unsigned long); + +static int emit_function(unsigned char *p, int n) +{ + int i = 0, val = 0; + + while (i < n - 2) { + /* aghi %r2,1 */ + p[i++] = 0xa7; + p[i++] = 0x2b; + p[i++] = 0x00; + p[i++] = 0x01; + val++; + } + + /* br %r14 */ + p[i++] = 0x07; + p[i++] = 0xfe; + + return val; +} + +static void memcpy_mvc(void *dest, void *src, unsigned long n) +{ + while (n >= 256) { + asm("mvc 0(256,%[dest]),0(%[src])" + : + : [dest] "a" (dest) + , [src] "a" (src) + : "memory"); + dest += 256; + src += 256; + n -= 256; + } + asm("exrl %[n],0f\n" + "j 1f\n" + "0: mvc 0(1,%[dest]),0(%[src])\n" + "1:" + : + : [dest] "a" (dest) + , [src] "a" (src) + , [n] "a" (n) + : "memory"); +} + +int main(void) +{ + int expected, size; + + /* Create a TB. */ + size = sizeof(rwx) - RWX_OFFSET - 4; + expected = emit_function(rwx + RWX_OFFSET, size); + if (((function_t)(rwx + RWX_OFFSET))(0) != expected) { + return 1; + } + + /* Overwrite the TB. */ + size += 4; + expected = emit_function(rw, size); + memcpy_mvc(rwx + RWX_OFFSET, rw, size); + if (((function_t)(rwx + RWX_OFFSET))(0) != expected) { + return 2; + } + + return 0; +} diff --git a/tests/tcg/s390x/vfminmax.c b/tests/tcg/s390x/vfminmax.c index 22629df160e..e66285f7625 100644 --- a/tests/tcg/s390x/vfminmax.c +++ b/tests/tcg/s390x/vfminmax.c @@ -4,6 +4,8 @@ #include #include +#include "float.h" + /* * vfmin/vfmax instruction execution. */ @@ -21,98 +23,21 @@ static void vfminmax(unsigned int op, unsigned int m4, unsigned int m5, unsigned int m6, void *v1, const void *v2, const void *v3) { - insn[3] = (m6 << 4) | m5; - insn[4] = (m4 << 4) | 0x0e; - insn[5] = op; + insn[3] = (m6 << 4) | m5; + insn[4] = (m4 << 4) | 0x0e; + insn[5] = op; asm("vl %%v25,%[v2]\n" "vl %%v26,%[v3]\n" "ex 0,%[insn]\n" "vst %%v24,%[v1]\n" : [v1] "=m" (*(char (*)[16])v1) - : [v2] "m" (*(char (*)[16])v2) - , [v3] "m" (*(char (*)[16])v3) - , [insn] "m"(insn) + : [v2] "m" (*(const char (*)[16])v2) + , [v3] "m" (*(const char (*)[16])v3) + , [insn] "m" (insn) : "v24", "v25", "v26"); } -/* - * Floating-point value classes. - */ -#define N_FORMATS 3 -#define N_SIGNED_CLASSES 8 -static const size_t float_sizes[N_FORMATS] = { - /* M4 == 2: short */ 4, - /* M4 == 3: long */ 8, - /* M4 == 4: extended */ 16, -}; -static const size_t e_bits[N_FORMATS] = { - /* M4 == 2: short */ 8, - /* M4 == 3: long */ 11, - /* M4 == 4: extended */ 15, -}; -static const unsigned char signed_floats[N_FORMATS][N_SIGNED_CLASSES][2][16] = { - /* M4 == 2: short */ - { - /* -inf */ {{0xff, 0x80, 0x00, 0x00}, - {0xff, 0x80, 0x00, 0x00}}, - /* -Fn */ {{0xc2, 0x28, 0x00, 0x00}, - {0xc2, 0x29, 0x00, 0x00}}, - /* -0 */ {{0x80, 0x00, 0x00, 0x00}, - {0x80, 0x00, 0x00, 0x00}}, - /* +0 */ {{0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00}}, - /* +Fn */ {{0x42, 0x28, 0x00, 0x00}, - {0x42, 0x2a, 0x00, 0x00}}, - /* +inf */ {{0x7f, 0x80, 0x00, 0x00}, - {0x7f, 0x80, 0x00, 0x00}}, - /* QNaN */ {{0x7f, 0xff, 0xff, 0xff}, - {0x7f, 0xff, 0xff, 0xfe}}, - /* SNaN */ {{0x7f, 0xbf, 0xff, 0xff}, - {0x7f, 0xbf, 0xff, 0xfd}}, - }, - - /* M4 == 3: long */ - { - /* -inf */ {{0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* -Fn */ {{0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xc0, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +Fn */ {{0x40, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x40, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +inf */ {{0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, - /* SNaN */ {{0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x7f, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, - }, - - /* M4 == 4: extended */ - { - /* -inf */ {{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* -Fn */ {{0xc0, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xc0, 0x04, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* -0 */ {{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +0 */ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +Fn */ {{0x40, 0x04, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x40, 0x04, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* +inf */ {{0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x7f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - /* QNaN */ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}}, - /* SNaN */ {{0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x7f, 0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd}}, - }, -}; - /* * PoP tables as close to the original as possible. */ @@ -285,13 +210,6 @@ struct signed_test { }, }; -static void dump_v(FILE *f, const void *v, size_t n) -{ - for (int i = 0; i < n; i++) { - fprintf(f, "%02x", ((const unsigned char *)v)[i]); - } -} - static int signed_test(struct signed_test *test, int m4, int m5, const void *v1_exp, bool xi_exp, const void *v2, const void *v3) @@ -320,10 +238,28 @@ static int signed_test(struct signed_test *test, int m4, int m5, return 0; } -static void snan_to_qnan(char *v, int m4) +struct iter { + int cls[2]; + int val[2]; +}; + +static bool iter_next(struct iter *it, int fmt) { - size_t bit = 1 + e_bits[m4 - 2]; - v[bit / 8] |= 1 << (7 - (bit % 8)); + int i; + + for (i = 1; i >= 0; i--) { + if (++it->val[i] != signed_floats[fmt][it->cls[i]].n) { + return true; + } + it->val[i] = 0; + + if (++it->cls[i] != N_SIGNED_CLASSES) { + return true; + } + it->cls[i] = 0; + } + + return false; } int main(void) @@ -333,72 +269,71 @@ int main(void) for (i = 0; i < sizeof(signed_tests) / sizeof(signed_tests[0]); i++) { struct signed_test *test = &signed_tests[i]; - int m4; + int fmt; - for (m4 = 2; m4 <= 4; m4++) { - const unsigned char (*floats)[2][16] = signed_floats[m4 - 2]; - size_t float_size = float_sizes[m4 - 2]; + for (fmt = 0; fmt < N_FORMATS; fmt++) { + size_t float_size = float_sizes[fmt]; + int m4 = fmt + 2; int m5; for (m5 = 0; m5 <= 8; m5 += 8) { char v1_exp[16], v2[16], v3[16]; bool xi_exp = false; + struct iter it = {}; int pos = 0; - int i2; - for (i2 = 0; i2 < N_SIGNED_CLASSES * 2; i2++) { - int i3; + do { + const char *spec = test->table[it.cls[0]][it.cls[1]]; - for (i3 = 0; i3 < N_SIGNED_CLASSES * 2; i3++) { - const char *spec = test->table[i2 / 2][i3 / 2]; + memcpy(&v2[pos], + signed_floats[fmt][it.cls[0]].v[it.val[0]], + float_size); + memcpy(&v3[pos], + signed_floats[fmt][it.cls[1]].v[it.val[1]], + float_size); + if (strcmp(spec, "T(a)") == 0 || + strcmp(spec, "Xi: T(a)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + } else if (strcmp(spec, "T(b)") == 0 || + strcmp(spec, "Xi: T(b)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + } else if (strcmp(spec, "Xi: T(a*)") == 0) { + memcpy(&v1_exp[pos], &v2[pos], float_size); + snan_to_qnan(&v1_exp[pos], fmt); + } else if (strcmp(spec, "Xi: T(b*)") == 0) { + memcpy(&v1_exp[pos], &v3[pos], float_size); + snan_to_qnan(&v1_exp[pos], fmt); + } else if (strcmp(spec, "T(M(a,b))") == 0) { + /* + * Comparing floats is risky, since the compiler might + * generate the same instruction that we are testing. + * Compare ints instead. This works, because we get + * here only for +-Fn, and the corresponding test + * values have identical exponents. + */ + int v2_int = *(int *)&v2[pos]; + int v3_int = *(int *)&v3[pos]; - memcpy(&v2[pos], floats[i2 / 2][i2 % 2], float_size); - memcpy(&v3[pos], floats[i3 / 2][i3 % 2], float_size); - if (strcmp(spec, "T(a)") == 0 || - strcmp(spec, "Xi: T(a)") == 0) { + if ((v2_int < v3_int) == + ((test->op == VFMIN) != (v2_int < 0))) { memcpy(&v1_exp[pos], &v2[pos], float_size); - } else if (strcmp(spec, "T(b)") == 0 || - strcmp(spec, "Xi: T(b)") == 0) { - memcpy(&v1_exp[pos], &v3[pos], float_size); - } else if (strcmp(spec, "Xi: T(a*)") == 0) { - memcpy(&v1_exp[pos], &v2[pos], float_size); - snan_to_qnan(&v1_exp[pos], m4); - } else if (strcmp(spec, "Xi: T(b*)") == 0) { - memcpy(&v1_exp[pos], &v3[pos], float_size); - snan_to_qnan(&v1_exp[pos], m4); - } else if (strcmp(spec, "T(M(a,b))") == 0) { - /* - * Comparing floats is risky, since the compiler - * might generate the same instruction that we are - * testing. Compare ints instead. This works, - * because we get here only for +-Fn, and the - * corresponding test values have identical - * exponents. - */ - int v2_int = *(int *)&v2[pos]; - int v3_int = *(int *)&v3[pos]; - - if ((v2_int < v3_int) == - ((test->op == VFMIN) != (v2_int < 0))) { - memcpy(&v1_exp[pos], &v2[pos], float_size); - } else { - memcpy(&v1_exp[pos], &v3[pos], float_size); - } } else { - fprintf(stderr, "Unexpected spec: %s\n", spec); - return 1; + memcpy(&v1_exp[pos], &v3[pos], float_size); } - xi_exp |= spec[0] == 'X'; - pos += float_size; + } else { + fprintf(stderr, "Unexpected spec: %s\n", spec); + return 1; + } + xi_exp |= spec[0] == 'X'; + pos += float_size; - if ((m5 & 8) || pos == 16) { - ret |= signed_test(test, m4, m5, - v1_exp, xi_exp, v2, v3); - pos = 0; - xi_exp = false; - } + if ((m5 & 8) || pos == 16) { + ret |= signed_test(test, m4, m5, + v1_exp, xi_exp, v2, v3); + pos = 0; + xi_exp = false; } - } + } while (iter_next(&it, fmt)); if (pos != 0) { ret |= signed_test(test, m4, m5, v1_exp, xi_exp, v2, v3); diff --git a/tests/tcg/x86_64/Makefile.softmmu-target b/tests/tcg/x86_64/Makefile.softmmu-target index ef6bcb4dc74..4e65f58b570 100644 --- a/tests/tcg/x86_64/Makefile.softmmu-target +++ b/tests/tcg/x86_64/Makefile.softmmu-target @@ -1,13 +1,11 @@ # -# x86 system tests -# -# This currently builds only for i386. The common C code is built -# with standard compiler flags however so we can support both by -# adding additional boot files for x86_64. +# x86_64 system tests # -I386_SYSTEM_SRC=$(SRC_PATH)/tests/tcg/i386/system X64_SYSTEM_SRC=$(SRC_PATH)/tests/tcg/x86_64/system +X64_SYSTEM_TESTS=$(patsubst $(X64_SYSTEM_SRC)/%.c, %, $(wildcard $(X64_SYSTEM_SRC)/*.c)) + +VPATH+=$(X64_SYSTEM_SRC) # These objects provide the basic boot code and helper functions for all tests CRT_OBJS=boot.o @@ -18,7 +16,7 @@ LDFLAGS=-Wl,-T$(LINK_SCRIPT) -Wl,-melf_x86_64 CFLAGS+=-nostdlib -ggdb -O0 $(MINILIB_INC) LDFLAGS+=-static -nostdlib $(CRT_OBJS) $(MINILIB_OBJS) -lgcc -TESTS+=$(MULTIARCH_TESTS) +TESTS+=$(MULTIARCH_TESTS) $(X64_SYSTEM_TESTS) EXTRA_RUNS+=$(MULTIARCH_RUNS) # building head blobs @@ -35,3 +33,12 @@ memory: CFLAGS+=-DCHECK_UNALIGNED=1 # Running QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel + +ifeq ($(CONFIG_PLUGIN),y) +run-plugin-patch-target-with-libpatch.so: \ + PLUGIN_ARGS=$(COMMA)target=ffc0$(COMMA)patch=9090$(COMMA)use_hwaddr=true +run-plugin-patch-target-with-libpatch.so: \ + CHECK_PLUGIN_OUTPUT_COMMAND=$(X64_SYSTEM_SRC)/validate-patch.py $@.out +run-plugin-patch-target-with-libpatch.so: patch-target libpatch.so +EXTRA_RUNS_WITH_PLUGIN+=run-plugin-patch-target-with-libpatch.so +endif diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target index eda9bd7396c..be20fc64e88 100644 --- a/tests/tcg/x86_64/Makefile.target +++ b/tests/tcg/x86_64/Makefile.target @@ -16,6 +16,9 @@ X86_64_TESTS += noexec X86_64_TESTS += cmpxchg X86_64_TESTS += adox X86_64_TESTS += test-1648 +X86_64_TESTS += test-2175 +X86_64_TESTS += cross-modifying-code +X86_64_TESTS += fma TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64 else TESTS=$(MULTIARCH_TESTS) @@ -26,6 +29,9 @@ adox: CFLAGS=-O2 run-test-i386-ssse3: QEMU_OPTS += -cpu max run-plugin-test-i386-ssse3-%: QEMU_OPTS += -cpu max +cross-modifying-code: CFLAGS+=-pthread +cross-modifying-code: LDFLAGS+=-pthread + test-x86_64: LDFLAGS+=-lm -lc test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) diff --git a/tests/tcg/x86_64/cross-modifying-code.c b/tests/tcg/x86_64/cross-modifying-code.c new file mode 100644 index 00000000000..2704df6061c --- /dev/null +++ b/tests/tcg/x86_64/cross-modifying-code.c @@ -0,0 +1,80 @@ +/* + * Test patching code, running in one thread, from another thread. + * + * Intel SDM calls this "cross-modifying code" and recommends a special + * sequence, which requires both threads to cooperate. + * + * Linux kernel uses a different sequence that does not require cooperation and + * involves patching the first byte with int3. + * + * Finally, there is user-mode software out there that simply uses atomics, and + * that seems to be good enough in practice. Test that QEMU has no problems + * with this as well. + */ + +#include +#include +#include +#include + +void add1_or_nop(long *x); +asm(".pushsection .rwx,\"awx\",@progbits\n" + ".globl add1_or_nop\n" + /* addq $0x1,(%rdi) */ + "add1_or_nop: .byte 0x48, 0x83, 0x07, 0x01\n" + "ret\n" + ".popsection\n"); + +#define THREAD_WAIT 0 +#define THREAD_PATCH 1 +#define THREAD_STOP 2 + +static void *thread_func(void *arg) +{ + int val = 0x0026748d; /* nop */ + + while (true) { + switch (__atomic_load_n((int *)arg, __ATOMIC_SEQ_CST)) { + case THREAD_WAIT: + break; + case THREAD_PATCH: + val = __atomic_exchange_n((int *)&add1_or_nop, val, + __ATOMIC_SEQ_CST); + break; + case THREAD_STOP: + return NULL; + default: + assert(false); + __builtin_unreachable(); + } + } +} + +#define INITIAL 42 +#define COUNT 1000000 + +int main(void) +{ + int command = THREAD_WAIT; + pthread_t thread; + long x = 0; + int err; + int i; + + err = pthread_create(&thread, NULL, &thread_func, &command); + assert(err == 0); + + __atomic_store_n(&command, THREAD_PATCH, __ATOMIC_SEQ_CST); + for (i = 0; i < COUNT; i++) { + add1_or_nop(&x); + } + __atomic_store_n(&command, THREAD_STOP, __ATOMIC_SEQ_CST); + + err = pthread_join(thread, NULL); + assert(err == 0); + + assert(x >= INITIAL); + assert(x <= INITIAL + COUNT); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/x86_64/fma.c b/tests/tcg/x86_64/fma.c new file mode 100644 index 00000000000..34219614c0a --- /dev/null +++ b/tests/tcg/x86_64/fma.c @@ -0,0 +1,116 @@ +/* + * Test some fused multiply add corner cases. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include +#include +#include + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* + * Perform one "n * m + a" operation using the vfmadd insn and return + * the result; on return *mxcsr_p is set to the bottom 6 bits of MXCSR + * (the Flag bits). If ftz is true then we set MXCSR.FTZ while doing + * the operation. + * We print the operation and its results to stdout. + */ +static uint64_t do_fmadd(uint64_t n, uint64_t m, uint64_t a, + bool ftz, uint32_t *mxcsr_p) +{ + uint64_t r; + uint32_t mxcsr = 0; + uint32_t ftz_bit = ftz ? (1 << 15) : 0; + uint32_t saved_mxcsr = 0; + + asm volatile("stmxcsr %[saved_mxcsr]\n" + "stmxcsr %[mxcsr]\n" + "andl $0xffff7fc0, %[mxcsr]\n" + "orl %[ftz_bit], %[mxcsr]\n" + "ldmxcsr %[mxcsr]\n" + "movq %[a], %%xmm0\n" + "movq %[m], %%xmm1\n" + "movq %[n], %%xmm2\n" + /* xmm0 = xmm0 + xmm2 * xmm1 */ + "vfmadd231sd %%xmm1, %%xmm2, %%xmm0\n" + "movq %%xmm0, %[r]\n" + "stmxcsr %[mxcsr]\n" + "ldmxcsr %[saved_mxcsr]\n" + : [r] "=r" (r), [mxcsr] "=m" (mxcsr), + [saved_mxcsr] "=m" (saved_mxcsr) + : [n] "r" (n), [m] "r" (m), [a] "r" (a), + [ftz_bit] "r" (ftz_bit) + : "xmm0", "xmm1", "xmm2"); + *mxcsr_p = mxcsr & 0x3f; + printf("vfmadd132sd 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 + " = 0x%" PRIx64 " MXCSR flags 0x%" PRIx32 "\n", + n, m, a, r, *mxcsr_p); + return r; +} + +typedef struct testdata { + /* Input n, m, a */ + uint64_t n; + uint64_t m; + uint64_t a; + bool ftz; + /* Expected result */ + uint64_t expected_r; + /* Expected low 6 bits of MXCSR (the Flag bits) */ + uint32_t expected_mxcsr; +} testdata; + +static testdata tests[] = { + { 0, 0x7ff0000000000000, 0x7ff000000000aaaa, false, /* 0 * Inf + SNaN */ + 0x7ff800000000aaaa, 1 }, /* Should be QNaN and does raise Invalid */ + { 0, 0x7ff0000000000000, 0x7ff800000000aaaa, false, /* 0 * Inf + QNaN */ + 0x7ff800000000aaaa, 0 }, /* Should be QNaN and does *not* raise Invalid */ + /* + * These inputs give a result which is tiny before rounding but which + * becomes non-tiny after rounding. x86 is a "detect tininess after + * rounding" architecture, so it should give a non-denormal result and + * not set the Underflow flag (only the Precision flag for an inexact + * result). + */ + { 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, false, + 0x8010000000000000, 0x20 }, + /* + * Flushing of denormal outputs to zero should also happen after + * rounding, so setting FTZ should not affect the result or the flags. + */ + { 0x3fdfffffffffffff, 0x001fffffffffffff, 0x801fffffffffffff, true, + 0x8010000000000000, 0x20 }, /* Enabling FTZ shouldn't change flags */ + /* + * normal * 0 + a denormal. With FTZ disabled this gives an exact + * result (equal to the input denormal) that has consumed the denormal. + */ + { 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, false, + 0x8008000000000000, 0x2 }, /* Denormal */ + /* + * With FTZ enabled, this consumes the denormal, returns zero (because + * flushed) and indicates also Underflow and Precision. + */ + { 0x3cc8000000000000, 0x0000000000000000, 0x8008000000000000, true, + 0x8000000000000000, 0x32 }, /* Precision, Underflow, Denormal */ +}; + +int main(void) +{ + bool passed = true; + for (int i = 0; i < ARRAY_SIZE(tests); i++) { + uint32_t mxcsr; + uint64_t r = do_fmadd(tests[i].n, tests[i].m, tests[i].a, + tests[i].ftz, &mxcsr); + if (r != tests[i].expected_r) { + printf("expected result 0x%" PRIx64 "\n", tests[i].expected_r); + passed = false; + } + if (mxcsr != tests[i].expected_mxcsr) { + printf("expected MXCSR flags 0x%x\n", tests[i].expected_mxcsr); + passed = false; + } + } + return passed ? 0 : 1; +} diff --git a/tests/tcg/x86_64/system/patch-target.c b/tests/tcg/x86_64/system/patch-target.c new file mode 100644 index 00000000000..8c2b6f4ba78 --- /dev/null +++ b/tests/tcg/x86_64/system/patch-target.c @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This test target increments a value 100 times. The patcher converts the + * inc instruction to a nop, so it only increments the value once. + * + */ +#include + +int main(void) +{ + ml_printf("Running test...\n"); + unsigned int x = 0; + for (int i = 0; i < 100; i++) { + asm volatile ( + "inc %[x]" + : [x] "+a" (x) + ); + } + ml_printf("Value: %d\n", x); + return 0; +} diff --git a/tests/tcg/x86_64/system/validate-patch.py b/tests/tcg/x86_64/system/validate-patch.py new file mode 100755 index 00000000000..700950eae55 --- /dev/null +++ b/tests/tcg/x86_64/system/validate-patch.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# +# validate-patch.py: check the patch applies +# +# This program takes two inputs: +# - the plugin output +# - the binary output +# +# Copyright (C) 2024 +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import sys +from argparse import ArgumentParser + +def main() -> None: + """ + Process the arguments, injest the program and plugin out and + verify they match up and report if they do not. + """ + parser = ArgumentParser(description="Validate patch") + parser.add_argument('test_output', + help="The output from the test itself") + parser.add_argument('plugin_output', + help="The output from plugin") + args = parser.parse_args() + + with open(args.test_output, 'r') as f: + test_data = f.read() + with open(args.plugin_output, 'r') as f: + plugin_data = f.read() + if "Value: 1" in test_data: + sys.exit(0) + else: + sys.exit(1) + +if __name__ == "__main__": + main() + diff --git a/tests/tcg/x86_64/test-2175.c b/tests/tcg/x86_64/test-2175.c new file mode 100644 index 00000000000..aafd037bce2 --- /dev/null +++ b/tests/tcg/x86_64/test-2175.c @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* See https://gitlab.com/qemu-project/qemu/-/issues/2185 */ + +#include + +int test_setc(unsigned int x, unsigned int y) +{ + asm("blsi %1, %0; setc %b0" : "+r"(x) : "r"(y)); + return (unsigned char)x; +} + +int test_pushf(unsigned int x, unsigned int y) +{ + asm("blsi %1, %0; pushf; pop %q0" : "+r"(x) : "r"(y)); + return x & 1; +} + +int main() +{ + assert(test_setc(1, 0xedbf530a)); + assert(test_pushf(1, 0xedbf530a)); + return 0; +} + diff --git a/tests/uefi-test-tools/Makefile b/tests/uefi-test-tools/Makefile index f4eaebd8ff6..8ee6fb3571f 100644 --- a/tests/uefi-test-tools/Makefile +++ b/tests/uefi-test-tools/Makefile @@ -12,7 +12,7 @@ edk2_dir := ../../roms/edk2 images_dir := ../data/uefi-boot-images -emulation_targets := arm aarch64 i386 x86_64 riscv64 +emulation_targets := arm aarch64 i386 x86_64 riscv64 loongarch64 uefi_binaries := bios-tables-test intermediate_suffixes := .efi .fat .iso.raw @@ -56,7 +56,8 @@ Build/%.iso.raw: Build/%.fat # stripped from, the argument. map_arm_to_uefi = $(subst arm,ARM,$(1)) map_aarch64_to_uefi = $(subst aarch64,AA64,$(call map_arm_to_uefi,$(1))) -map_riscv64_to_uefi = $(subst riscv64,RISCV64,$(call map_aarch64_to_uefi,$(1))) +map_loongarch64_to_uefi = $(subst loongarch64,LOONGARCH64,$(call map_aarch64_to_uefi,$(1))) +map_riscv64_to_uefi = $(subst riscv64,RISCV64,$(call map_loongarch64_to_uefi,$(1))) map_i386_to_uefi = $(subst i386,IA32,$(call map_riscv64_to_uefi,$(1))) map_x86_64_to_uefi = $(subst x86_64,X64,$(call map_i386_to_uefi,$(1))) map_to_uefi = $(subst .,,$(call map_x86_64_to_uefi,$(1))) diff --git a/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc b/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc index 0902fd3c731..facf8df1fa9 100644 --- a/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc +++ b/tests/uefi-test-tools/UefiTestToolsPkg/UefiTestToolsPkg.dsc @@ -19,7 +19,7 @@ PLATFORM_VERSION = 0.1 PLATFORM_NAME = UefiTestTools SKUID_IDENTIFIER = DEFAULT - SUPPORTED_ARCHITECTURES = ARM|AARCH64|IA32|X64|RISCV64 + SUPPORTED_ARCHITECTURES = ARM|AARCH64|IA32|X64|RISCV64|LOONGARCH64 BUILD_TARGETS = DEBUG [BuildOptions.IA32] @@ -65,6 +65,10 @@ [LibraryClasses.RISCV64] BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf +[LibraryClasses.LOONGARCH64] + BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf + StackCheckLib|MdePkg/Library/StackCheckLibNull/StackCheckLibNull.inf + [PcdsFixedAtBuild] gEfiMdePkgTokenSpaceGuid.PcdDebugPrintErrorLevel|0x8040004F gEfiMdePkgTokenSpaceGuid.PcdDebugPropertyMask|0x2F diff --git a/tests/uefi-test-tools/uefi-test-build.config b/tests/uefi-test-tools/uefi-test-build.config index a4c61fc97aa..8bf48266345 100644 --- a/tests/uefi-test-tools/uefi-test-build.config +++ b/tests/uefi-test-tools/uefi-test-build.config @@ -21,6 +21,16 @@ dest = ./Build arch = AARCH64 cpy1 = AARCH64/BiosTablesTest.efi bios-tables-test.aarch64.efi +#################################################################################### +# loongarch64 + +[build.loongarch64] +conf = UefiTestToolsPkg/UefiTestToolsPkg.dsc +plat = UefiTestTools +dest = ./Build +arch = LOONGARCH64 +cpy1 = LOONGARCH64/BiosTablesTest.efi bios-tables-test.loongarch64.efi + #################################################################################### # riscv64 diff --git a/tests/unit/check-block-qdict.c b/tests/unit/check-block-qdict.c index 751c58e7373..0036d85cfaa 100644 --- a/tests/unit/check-block-qdict.c +++ b/tests/unit/check-block-qdict.c @@ -9,8 +9,8 @@ #include "qemu/osdep.h" #include "block/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qlist.h" +#include "qobject/qnum.h" #include "qapi/error.h" static void qdict_defaults_test(void) diff --git a/tests/unit/check-qdict.c b/tests/unit/check-qdict.c index b5efa859b0b..a1312be30a8 100644 --- a/tests/unit/check-qdict.c +++ b/tests/unit/check-qdict.c @@ -11,9 +11,9 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" /* * Public Interface test-cases diff --git a/tests/unit/check-qjson.c b/tests/unit/check-qjson.c index a89293ce51b..780a3654d08 100644 --- a/tests/unit/check-qjson.c +++ b/tests/unit/check-qjson.c @@ -14,12 +14,12 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qlit.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qjson.h" +#include "qobject/qlit.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qemu/unicode.h" static QString *from_json_str(const char *jstr, bool single, Error **errp) diff --git a/tests/unit/check-qlist.c b/tests/unit/check-qlist.c index 3cd0ccbf198..1388aeede35 100644 --- a/tests/unit/check-qlist.c +++ b/tests/unit/check-qlist.c @@ -11,8 +11,8 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qnum.h" +#include "qobject/qlist.h" /* * Public Interface test-cases diff --git a/tests/unit/check-qlit.c b/tests/unit/check-qlit.c index bd6798d9122..ea7a0d9119e 100644 --- a/tests/unit/check-qlit.c +++ b/tests/unit/check-qlit.c @@ -9,12 +9,12 @@ #include "qemu/osdep.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qlit.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qlit.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" static QLitObject qlit = QLIT_QDICT(((QLitDictEntry[]) { { "foo", QLIT_QNUM(42) }, diff --git a/tests/unit/check-qnull.c b/tests/unit/check-qnull.c index 5ceacc65d7f..724a66d0bd2 100644 --- a/tests/unit/check-qnull.c +++ b/tests/unit/check-qnull.c @@ -8,7 +8,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qnull.h" +#include "qobject/qnull.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "qapi/error.h" diff --git a/tests/unit/check-qnum.c b/tests/unit/check-qnum.c index bf7fe45bacc..a40120e8d3d 100644 --- a/tests/unit/check-qnum.c +++ b/tests/unit/check-qnum.c @@ -14,7 +14,7 @@ #include "qemu/osdep.h" -#include "qapi/qmp/qnum.h" +#include "qobject/qnum.h" /* * Public Interface test-cases diff --git a/tests/unit/check-qobject.c b/tests/unit/check-qobject.c index 022b7c74fe5..ccb25660f26 100644 --- a/tests/unit/check-qobject.c +++ b/tests/unit/check-qobject.c @@ -9,12 +9,12 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include diff --git a/tests/unit/check-qom-interface.c b/tests/unit/check-qom-interface.c index c99be97ed8b..86ae5f6c3b1 100644 --- a/tests/unit/check-qom-interface.c +++ b/tests/unit/check-qom-interface.c @@ -38,7 +38,7 @@ static const TypeInfo test_if_info = { #define PATTERN 0xFAFBFCFD -static void test_class_init(ObjectClass *oc, void *data) +static void test_class_init(ObjectClass *oc, const void *data) { TestIfClass *tc = TEST_IF_CLASS(oc); @@ -52,7 +52,7 @@ static const TypeInfo direct_impl_info = { .name = TYPE_DIRECT_IMPL, .parent = TYPE_OBJECT, .class_init = test_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_TEST_IF }, { } } diff --git a/tests/unit/check-qom-proplist.c b/tests/unit/check-qom-proplist.c index 79d4a8b89d3..ee3c6fb32b1 100644 --- a/tests/unit/check-qom-proplist.c +++ b/tests/unit/check-qom-proplist.c @@ -22,8 +22,8 @@ #include "qapi/error.h" #include "qapi/qobject-input-visitor.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qobject.h" +#include "qobject/qdict.h" +#include "qobject/qobject.h" #include "qom/object.h" #include "qemu/module.h" #include "qemu/option.h" @@ -135,7 +135,7 @@ static void dummy_init(Object *obj) } -static void dummy_class_init(ObjectClass *cls, void *data) +static void dummy_class_init(ObjectClass *cls, const void *data) { object_class_property_add_str(cls, "sv", dummy_get_sv, @@ -164,7 +164,7 @@ static const TypeInfo dummy_info = { .instance_finalize = dummy_finalize, .class_size = sizeof(DummyObjectClass), .class_init = dummy_class_init, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } @@ -264,7 +264,7 @@ static void dummy_dev_unparent(Object *obj) object_unparent(OBJECT(dev->bus)); } -static void dummy_dev_class_init(ObjectClass *klass, void *opaque) +static void dummy_dev_class_init(ObjectClass *klass, const void *opaque) { klass->unparent = dummy_dev_unparent; } @@ -288,7 +288,7 @@ static void dummy_bus_unparent(Object *obj) object_unparent(OBJECT(bus->backend)); } -static void dummy_bus_class_init(ObjectClass *klass, void *opaque) +static void dummy_bus_class_init(ObjectClass *klass, const void *opaque) { klass->unparent = dummy_bus_unparent; } @@ -610,7 +610,7 @@ static void test_dummy_delchild(void) static void test_qom_partial_path(void) { Object *root = object_get_objects_root(); - Object *cont1 = container_get(root, "/cont1"); + Object *cont1 = object_property_add_new_container(root, "cont1"); Object *obj1 = object_new(TYPE_DUMMY); Object *obj2a = object_new(TYPE_DUMMY); Object *obj2b = object_new(TYPE_DUMMY); diff --git a/tests/unit/check-qstring.c b/tests/unit/check-qstring.c index bd861f4f8b4..2e6a00570fd 100644 --- a/tests/unit/check-qstring.c +++ b/tests/unit/check-qstring.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qstring.h" /* * Public Interface test-cases diff --git a/tests/unit/crypto-tls-x509-helpers.c b/tests/unit/crypto-tls-x509-helpers.c index 3e74ec5b5d4..2daecc416c6 100644 --- a/tests/unit/crypto-tls-x509-helpers.c +++ b/tests/unit/crypto-tls-x509-helpers.c @@ -135,6 +135,7 @@ void test_tls_init(const char *keyfile) void test_tls_cleanup(const char *keyfile) { asn1_delete_structure(&pkix_asn1); + gnutls_x509_privkey_deinit(privkey); unlink(keyfile); } @@ -502,8 +503,7 @@ void test_tls_write_cert_chain(const char *filename, g_free(buffer); } - -void test_tls_discard_cert(QCryptoTLSTestCertReq *req) +void test_tls_deinit_cert(QCryptoTLSTestCertReq *req) { if (!req->crt) { return; @@ -511,6 +511,15 @@ void test_tls_discard_cert(QCryptoTLSTestCertReq *req) gnutls_x509_crt_deinit(req->crt); req->crt = NULL; +} + +void test_tls_discard_cert(QCryptoTLSTestCertReq *req) +{ + if (!req->crt) { + return; + } + + test_tls_deinit_cert(req); if (getenv("QEMU_TEST_DEBUG_CERTS") == NULL) { unlink(req->filename); diff --git a/tests/unit/crypto-tls-x509-helpers.h b/tests/unit/crypto-tls-x509-helpers.h index 562c1606535..2a0f7c04fd8 100644 --- a/tests/unit/crypto-tls-x509-helpers.h +++ b/tests/unit/crypto-tls-x509-helpers.h @@ -73,6 +73,12 @@ void test_tls_generate_cert(QCryptoTLSTestCertReq *req, void test_tls_write_cert_chain(const char *filename, gnutls_x509_crt_t *certs, size_t ncerts); +/* + * Deinitialize the QCryptoTLSTestCertReq, but don't delete the certificate + * file on disk. (The caller is then responsible for doing that themselves. + */ +void test_tls_deinit_cert(QCryptoTLSTestCertReq *req); +/* Deinit the QCryptoTLSTestCertReq, and delete the certificate file */ void test_tls_discard_cert(QCryptoTLSTestCertReq *req); void test_tls_init(const char *keyfile); diff --git a/tests/unit/meson.build b/tests/unit/meson.build index 490ab8182dc..d5248ae51de 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -47,6 +47,7 @@ tests = { 'test-logging': [], 'test-qapi-util': [], 'test-interval-tree': [], + 'test-fifo': [], } if have_system or have_tools @@ -115,15 +116,13 @@ if have_block if host_os != 'windows' tests += { 'test-image-locking': [testblock], - 'test-nested-aio-poll': [testblock], + 'test-nested-aio-poll': [], } endif if config_host_data.get('CONFIG_REPLICATION') tests += {'test-replication': [testblock]} endif - if nettle.found() or gcrypt.found() - tests += {'test-crypto-pbkdf': [io]} - endif + tests += {'test-crypto-pbkdf': [io]} endif if have_system diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c index 04b5f4e3d03..08240594bbd 100644 --- a/tests/unit/ptimer-test.c +++ b/tests/unit/ptimer-test.c @@ -763,6 +763,33 @@ static void check_oneshot_with_load_0(gconstpointer arg) ptimer_free(ptimer); } +static void check_freq_more_than_1000M(gconstpointer arg) +{ + const uint8_t *policy = arg; + ptimer_state *ptimer = ptimer_init(ptimer_trigger, NULL, *policy); + bool no_round_down = (*policy & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + triggered = false; + + ptimer_transaction_begin(ptimer); + ptimer_set_freq(ptimer, 2000000000); + ptimer_set_limit(ptimer, 8, 1); + ptimer_run(ptimer, 1); + ptimer_transaction_commit(ptimer); + + qemu_clock_step(3); + + g_assert_cmpuint(ptimer_get_count(ptimer), ==, no_round_down ? 3 : 2); + g_assert_false(triggered); + + qemu_clock_step(1); + + g_assert_cmpuint(ptimer_get_count(ptimer), ==, 0); + g_assert_true(triggered); + + ptimer_free(ptimer); +} + static void add_ptimer_tests(uint8_t policy) { char policy_name[256] = ""; @@ -857,6 +884,12 @@ static void add_ptimer_tests(uint8_t policy) policy_name), g_memdup2(&policy, 1), check_oneshot_with_load_0, g_free); g_free(tmp); + + g_test_add_data_func_full( + tmp = g_strdup_printf("/ptimer/freq_more_than_1000M policy=%s", + policy_name), + g_memdup2(&policy, 1), check_freq_more_than_1000M, g_free); + g_free(tmp); } static void add_all_ptimer_policies_comb_tests(void) diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c index f3439cc4e52..37db24f72a1 100644 --- a/tests/unit/socket-helpers.c +++ b/tests/unit/socket-helpers.c @@ -170,5 +170,4 @@ void socket_check_afunix_support(bool *has_afunix) if (*has_afunix) { close(fd); } - return; } diff --git a/tests/unit/test-aio-multithread.c b/tests/unit/test-aio-multithread.c index 08d4570ccb1..0ead6bf34ad 100644 --- a/tests/unit/test-aio-multithread.c +++ b/tests/unit/test-aio-multithread.c @@ -305,7 +305,9 @@ static void mcs_mutex_lock(void) prev = qatomic_xchg(&mutex_head, id); if (prev != -1) { qatomic_set(&nodes[prev].next, id); - qemu_futex_wait(&nodes[id].locked, 1); + while (qatomic_read(&nodes[id].locked) == 1) { + qemu_futex_wait(&nodes[id].locked, 1); + } } } @@ -328,7 +330,7 @@ static void mcs_mutex_unlock(void) /* Wake up the next in line. */ next = qatomic_read(&nodes[id].next); nodes[next].locked = 0; - qemu_futex_wake(&nodes[next].locked, 1); + qemu_futex_wake_single(&nodes[next].locked); } static void test_multi_fair_mutex_entry(void *opaque) diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 666880472b8..43b0ba8648a 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "block/block_int.h" #include "block/blockjob_int.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qemu/main-loop.h" #include "iothread.h" @@ -193,7 +193,9 @@ static BlockBackend * no_coroutine_fn test_setup(void) blk_insert_bs(blk, bs, &error_abort); backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(bs, backing, &error_abort); + bdrv_graph_wrunlock(); bdrv_unref(backing); bdrv_unref(bs); @@ -386,7 +388,9 @@ static void test_nested(void) backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); backing_s = backing->opaque; + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(bs, backing, &error_abort); + bdrv_graph_wrunlock(); for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { @@ -632,6 +636,8 @@ typedef struct TestBlockJob { BlockDriverState *bs; int run_ret; int prepare_ret; + + /* Accessed with atomics */ bool running; bool should_complete; } TestBlockJob; @@ -667,10 +673,10 @@ static int coroutine_fn test_job_run(Job *job, Error **errp) /* We are running the actual job code past the pause point in * job_co_entry(). */ - s->running = true; + qatomic_set(&s->running, true); job_transition_to_ready(&s->common.job); - while (!s->should_complete) { + while (!qatomic_read(&s->should_complete)) { /* Avoid job_sleep_ns() because it marks the job as !busy. We want to * emulate some actual activity (probably some I/O) here so that drain * has to wait for this activity to stop. */ @@ -685,7 +691,7 @@ static int coroutine_fn test_job_run(Job *job, Error **errp) static void test_job_complete(Job *job, Error **errp) { TestBlockJob *s = container_of(job, TestBlockJob, common.job); - s->should_complete = true; + qatomic_set(&s->should_complete, true); } BlockJobDriver test_job_driver = { @@ -722,7 +728,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, BlockJob *job; TestBlockJob *tjob; IOThread *iothread = NULL; - int ret; + int ret = -1; src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, &error_abort); @@ -731,10 +737,12 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay", BDRV_O_RDWR, &error_abort); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(src_overlay, src, &error_abort); bdrv_unref(src); bdrv_set_backing_hd(src, src_backing, &error_abort); bdrv_unref(src_backing); + bdrv_graph_wrunlock(); blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk_src, src_overlay, &error_abort); @@ -770,7 +778,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->bs = src; job = &tjob->common; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); bdrv_graph_wrunlock(); @@ -791,7 +799,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, /* job_co_entry() is run in the I/O thread, wait for the actual job * code to start (we don't want to catch the job in the pause point in * job_co_entry(). */ - while (!tjob->running) { + while (!qatomic_read(&tjob->running)) { aio_poll(qemu_get_aio_context(), false); } } @@ -799,7 +807,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, WITH_JOB_LOCK_GUARD() { g_assert_cmpint(job->job.pause_count, ==, 0); g_assert_false(job->job.paused); - g_assert_true(tjob->running); + g_assert_true(qatomic_read(&tjob->running)); g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ } @@ -825,7 +833,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, * * paused is reset in the I/O thread, wait for it */ - while (job->job.paused) { + while (job_is_paused(&job->job)) { aio_poll(qemu_get_aio_context(), false); } } @@ -858,7 +866,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, * * paused is reset in the I/O thread, wait for it */ - while (job->job.paused) { + while (job_is_paused(&job->job)) { aio_poll(qemu_get_aio_context(), false); } } @@ -951,7 +959,7 @@ static void bdrv_test_top_close(BlockDriverState *bs) { BdrvChild *c, *next_c; - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_unref_child(bs, c); } @@ -1012,7 +1020,9 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) bdrv_graph_co_rdlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_graph_co_rdunlock(); + bdrv_drain_all_begin(); bdrv_co_unref_child(bs, c); + bdrv_drain_all_end(); bdrv_graph_co_rdlock(); } bdrv_graph_co_rdunlock(); @@ -1045,7 +1055,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); @@ -1056,7 +1066,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, &error_abort); child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; /* Takes our reference to child_bs */ - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_of_bds, BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, @@ -1067,7 +1077,7 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, * (for detach_instead_of_delete == true) */ null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); @@ -1153,7 +1163,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) bdrv_dec_in_flight(data->child_b->bs); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); @@ -1258,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, BDRV_CHILD_DATA, &error_abort); child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, @@ -1394,14 +1404,10 @@ static void test_set_aio_context(void) bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, &error_abort); - bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); - bdrv_drained_end(bs); - bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); - bdrv_drained_end(bs); bdrv_unref(bs); iothread_join(a); @@ -1411,10 +1417,12 @@ static void test_set_aio_context(void) typedef struct TestDropBackingBlockJob { BlockJob common; - bool should_complete; bool *did_complete; BlockDriverState *detach_also; BlockDriverState *bs; + + /* Accessed with atomics */ + bool should_complete; } TestDropBackingBlockJob; static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) @@ -1422,7 +1430,7 @@ static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) TestDropBackingBlockJob *s = container_of(job, TestDropBackingBlockJob, common.job); - while (!s->should_complete) { + while (!qatomic_read(&s->should_complete)) { job_sleep_ns(job, 0); } @@ -1434,8 +1442,10 @@ static void test_drop_backing_job_commit(Job *job) TestDropBackingBlockJob *s = container_of(job, TestDropBackingBlockJob, common.job); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(s->bs, NULL, &error_abort); bdrv_set_backing_hd(s->detach_also, NULL, &error_abort); + bdrv_graph_wrunlock(); *s->did_complete = true; } @@ -1528,7 +1538,9 @@ static void test_blockjob_commit_by_drained_end(void) snprintf(name, sizeof(name), "parent-node-%i", i); bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR, &error_abort); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort); + bdrv_graph_wrunlock(); } job = block_job_create("job", &test_drop_backing_job_driver, NULL, @@ -1541,7 +1553,7 @@ static void test_blockjob_commit_by_drained_end(void) job_start(&job->common.job); - job->should_complete = true; + qatomic_set(&job->should_complete, true); bdrv_drained_begin(bs_child); g_assert(!job_has_completed); bdrv_drained_end(bs_child); @@ -1557,15 +1569,17 @@ static void test_blockjob_commit_by_drained_end(void) typedef struct TestSimpleBlockJob { BlockJob common; - bool should_complete; bool *did_complete; + + /* Accessed with atomics */ + bool should_complete; } TestSimpleBlockJob; static int coroutine_fn test_simple_job_run(Job *job, Error **errp) { TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); - while (!s->should_complete) { + while (!qatomic_read(&s->should_complete)) { job_sleep_ns(job, 0); } @@ -1675,13 +1689,13 @@ static void test_drop_intermediate_poll(void) job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, &error_abort); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(job_node, chain[1], &error_abort); /* * Establish the chain last, so the chain links are the first * elements in the BDS.parents lists */ - bdrv_graph_wrlock(); for (i = 0; i < 3; i++) { if (i) { /* Takes the reference to chain[i - 1] */ @@ -1700,7 +1714,7 @@ static void test_drop_intermediate_poll(void) job->did_complete = &job_has_completed; job_start(&job->common.job); - job->should_complete = true; + qatomic_set(&job->should_complete, true); g_assert(!job_has_completed); ret = bdrv_drop_intermediate(chain[1], chain[0], NULL, false); @@ -1936,7 +1950,7 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); bdrv_graph_wrunlock(); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index cafc023db42..567db99e4fa 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -22,7 +22,7 @@ #include "qapi/error.h" #include "qemu/main-loop.h" #include "block/block_int.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" static BlockDriver bdrv_pass_through = { .format_name = "pass-through", @@ -137,7 +137,7 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(filter, bs, "child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); bdrv_graph_wrunlock(); @@ -202,9 +202,9 @@ static void test_should_update_child(void) blk_insert_bs(root, bs, &error_abort); + bdrv_graph_wrlock_drained(); bdrv_set_backing_hd(target, bs, &error_abort); - bdrv_graph_wrlock(); g_assert(target->backing->bs == bs); bdrv_attach_child(filter, target, "target", &child_of_bds, BDRV_CHILD_DATA, &error_abort); @@ -244,7 +244,7 @@ static void test_parallel_exclusive_write(void) bdrv_ref(base); bdrv_ref(fl1); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); @@ -363,7 +363,7 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds, @@ -430,7 +430,7 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); - bdrv_graph_wrlock(); + bdrv_graph_wrlock_drained(); bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); diff --git a/tests/unit/test-block-backend.c b/tests/unit/test-block-backend.c index 2fb1a444bd4..4257b3f8153 100644 --- a/tests/unit/test-block-backend.c +++ b/tests/unit/test-block-backend.c @@ -24,7 +24,7 @@ #include "qemu/osdep.h" #include "block/block.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" #include "qemu/main-loop.h" diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 3766d5de6be..e26b3be5939 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -26,9 +26,9 @@ #include "block/block.h" #include "block/block_int-global-state.h" #include "block/blockjob_int.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/main-loop.h" #include "iothread.h" @@ -63,7 +63,7 @@ bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, } static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs, - bool want_zero, + unsigned int mode, int64_t offset, int64_t count, int64_t *pnum, int64_t *map, BlockDriverState **file) @@ -745,7 +745,7 @@ static void test_propagate_mirror(void) AioContext *main_ctx = qemu_get_aio_context(); BlockDriverState *src, *target, *filter; BlockBackend *blk; - Job *job; + Job *job = NULL; Error *local_err = NULL; /* Create src and target*/ diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c index d3b0bb24bec..118503a8e87 100644 --- a/tests/unit/test-blockjob-txn.c +++ b/tests/unit/test-blockjob-txn.c @@ -14,8 +14,8 @@ #include "qapi/error.h" #include "qemu/main-loop.h" #include "block/blockjob_int.h" -#include "sysemu/block-backend.h" -#include "qapi/qmp/qdict.h" +#include "system/block-backend.h" +#include "qobject/qdict.h" typedef struct { BlockJob common; diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index fe3e0d2d38c..abdbe4b8350 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -14,8 +14,8 @@ #include "qapi/error.h" #include "qemu/main-loop.h" #include "block/blockjob_int.h" -#include "sysemu/block-backend.h" -#include "qapi/qmp/qdict.h" +#include "system/block-backend.h" +#include "qobject/qdict.h" #include "iothread.h" static const BlockJobDriver test_block_job_driver = { diff --git a/tests/unit/test-char.c b/tests/unit/test-char.c index f273ce52261..f30a39f61ff 100644 --- a/tests/unit/test-char.c +++ b/tests/unit/test-char.c @@ -1,15 +1,16 @@ #include "qemu/osdep.h" #include +#include "qapi/error.h" #include "qemu/config-file.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/sockets.h" #include "chardev/char-fe.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "qapi/qapi-commands-char.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qom/qom-qobject.h" #include "io/channel-socket.h" #include "qapi/qobject-input-visitor.h" @@ -184,6 +185,21 @@ static void char_mux_test(void) char *data; FeHandler h1 = { 0, false, 0, false, }, h2 = { 0, false, 0, false, }; CharBackend chr_be1, chr_be2; + Error *error = NULL; + + /* Create mux and chardev to be immediately removed */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "mux-label", + 1, &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", "128", &error_abort); + qemu_opt_set(opts, "mux", "on", &error_abort); + chr = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr); + qemu_opts_del(opts); + + /* Remove just created mux and chardev */ + qmp_chardev_remove("mux-label", &error_abort); + qmp_chardev_remove("mux-label-base", &error_abort); opts = qemu_opts_create(qemu_find_opts("chardev"), "mux-label", 1, &error_abort); @@ -334,9 +350,412 @@ static void char_mux_test(void) g_free(data); qemu_chr_fe_deinit(&chr_be1, false); - qemu_chr_fe_deinit(&chr_be2, true); + + qmp_chardev_remove("mux-label", &error); + g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'mux-label' is busy"); + error_free(error); + + qemu_chr_fe_deinit(&chr_be2, false); + qmp_chardev_remove("mux-label", &error_abort); } +static void char_hub_test(void) +{ + QemuOpts *opts; + Chardev *hub, *chr1, *chr2, *base; + char *data; + FeHandler h = { 0, false, 0, false, }; + Error *error = NULL; + CharBackend chr_be; + int ret, i; + +#define RB_SIZE 128 + + /* + * Create invalid hub + * 1. Create hub without a 'chardevs.N' defined (expect error) + */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + qemu_opt_set(opts, "backend", "hub", &error_abort); + hub = qemu_chr_new_from_opts(opts, NULL, &error); + g_assert_cmpstr(error_get_pretty(error), ==, + "hub: 'chardevs' list is not defined"); + error_free(error); + error = NULL; + qemu_opts_del(opts); + + /* + * Create invalid hub + * 1. Create chardev with embedded mux: 'mux=on' + * 2. Create hub which refers mux + * 3. Create hub which refers chardev already attached + * to the mux (already in use, expect error) + */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr0", + 1, &error_abort); + qemu_opt_set(opts, "mux", "on", &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort); + base = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(base); + qemu_opts_del(opts); + + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + qemu_opt_set(opts, "backend", "hub", &error_abort); + qemu_opt_set(opts, "chardevs.0", "chr0", &error_abort); + hub = qemu_chr_new_from_opts(opts, NULL, &error); + g_assert_cmpstr(error_get_pretty(error), ==, + "hub: multiplexers and hub devices can't be " + "stacked, check chardev 'chr0', chardev should " + "not be a hub device or have 'mux=on' enabled"); + error_free(error); + error = NULL; + qemu_opts_del(opts); + + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + qemu_opt_set(opts, "backend", "hub", &error_abort); + qemu_opt_set(opts, "chardevs.0", "chr0-base", &error_abort); + hub = qemu_chr_new_from_opts(opts, NULL, &error); + g_assert_cmpstr(error_get_pretty(error), ==, + "chardev 'chr0-base' is already in use"); + error_free(error); + error = NULL; + qemu_opts_del(opts); + + /* Finalize chr0 */ + qmp_chardev_remove("chr0", &error_abort); + + /* + * Create invalid hub with more than maximum allowed backends + * 1. Create more than maximum allowed 'chardevs.%d' options for + * hub (expect error) + */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + for (i = 0; i < 10; i++) { + char key[32], val[32]; + + snprintf(key, sizeof(key), "chardevs.%d", i); + snprintf(val, sizeof(val), "chr%d", i); + qemu_opt_set(opts, key, val, &error); + if (error) { + char buf[64]; + + snprintf(buf, sizeof(buf), "Invalid parameter 'chardevs.%d'", i); + g_assert_cmpstr(error_get_pretty(error), ==, buf); + error_free(error); + break; + } + } + g_assert_nonnull(error); + error = NULL; + qemu_opts_del(opts); + + /* + * Create hub with 2 backend chardevs and 1 frontend and perform + * data aggregation + * 1. Create 2 ringbuf backend chardevs + * 2. Create 1 frontend + * 3. Create hub which refers 2 backend chardevs + * 4. Attach hub to a frontend + * 5. Attach hub to a frontend second time (expect error) + * 6. Perform data aggregation + * 7. Remove chr1 ("chr1 is busy", expect error) + * 8. Remove hub0 ("hub0 is busy", expect error); + * 9. Finilize frontend, hub and backend chardevs in correct order + */ + + /* Create first chardev */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr1", + 1, &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort); + chr1 = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr1); + qemu_opts_del(opts); + + /* Create second chardev */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr2", + 1, &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort); + chr2 = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr2); + qemu_opts_del(opts); + + /* Create hub0 and refer 2 backend chardevs */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + qemu_opt_set(opts, "backend", "hub", &error_abort); + qemu_opt_set(opts, "chardevs.0", "chr1", &error_abort); + qemu_opt_set(opts, "chardevs.1", "chr2", &error_abort); + hub = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(hub); + qemu_opts_del(opts); + + /* Attach hub to a frontend */ + qemu_chr_fe_init(&chr_be, hub, &error_abort); + qemu_chr_fe_set_handlers(&chr_be, + fe_can_read, + fe_read, + fe_event, + NULL, + &h, + NULL, true); + + /* Fails second time */ + qemu_chr_fe_init(&chr_be, hub, &error); + g_assert_cmpstr(error_get_pretty(error), ==, "chardev 'hub0' is already in use"); + error_free(error); + error = NULL; + + /* Write to backend, chr1 */ + base = qemu_chr_find("chr1"); + g_assert_cmpint(qemu_chr_be_can_write(base), !=, 0); + + qemu_chr_be_write(base, (void *)"hello", 6); + g_assert_cmpint(h.read_count, ==, 6); + g_assert_cmpstr(h.read_buf, ==, "hello"); + h.read_count = 0; + + /* Write to backend, chr2 */ + base = qemu_chr_find("chr2"); + g_assert_cmpint(qemu_chr_be_can_write(base), !=, 0); + + qemu_chr_be_write(base, (void *)"olleh", 6); + g_assert_cmpint(h.read_count, ==, 6); + g_assert_cmpstr(h.read_buf, ==, "olleh"); + h.read_count = 0; + + /* Write to frontend, chr_be */ + ret = qemu_chr_fe_write(&chr_be, (void *)"heyhey", 6); + g_assert_cmpint(ret, ==, 6); + + data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 6); + g_assert_cmpstr(data, ==, "heyhey"); + g_free(data); + + data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 6); + g_assert_cmpstr(data, ==, "heyhey"); + g_free(data); + + /* Can't be removed, depends on hub0 */ + qmp_chardev_remove("chr1", &error); + g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'chr1' is busy"); + error_free(error); + error = NULL; + + /* Can't be removed, depends on frontend chr_be */ + qmp_chardev_remove("hub0", &error); + g_assert_cmpstr(error_get_pretty(error), ==, "Chardev 'hub0' is busy"); + error_free(error); + error = NULL; + + /* Finalize frontend */ + qemu_chr_fe_deinit(&chr_be, false); + + /* Finalize hub0 */ + qmp_chardev_remove("hub0", &error_abort); + + /* Finalize backend chardevs */ + qmp_chardev_remove("chr1", &error_abort); + qmp_chardev_remove("chr2", &error_abort); + +#ifndef _WIN32 + /* + * Create 3 backend chardevs to simulate EAGAIN and watcher. + * Mainly copied from char_pipe_test(). + * 1. Create 2 ringbuf backend chardevs + * 2. Create 1 pipe backend chardev + * 3. Create 1 frontend + * 4. Create hub which refers 2 backend chardevs + * 5. Attach hub to a frontend + * 6. Perform data aggregation and check watcher + * 7. Finilize frontend, hub and backend chardevs in correct order + */ + { + gchar *tmp_path = g_dir_make_tmp("qemu-test-char.XXXXXX", NULL); + gchar *in, *out, *pipe = g_build_filename(tmp_path, "pipe", NULL); + Chardev *chr3; + int fd, len; + char buf[128]; + + in = g_strdup_printf("%s.in", pipe); + if (mkfifo(in, 0600) < 0) { + abort(); + } + out = g_strdup_printf("%s.out", pipe); + if (mkfifo(out, 0600) < 0) { + abort(); + } + + /* Create first chardev */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr1", + 1, &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort); + chr1 = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr1); + qemu_opts_del(opts); + + /* Create second chardev */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr2", + 1, &error_abort); + qemu_opt_set(opts, "backend", "ringbuf", &error_abort); + qemu_opt_set(opts, "size", stringify(RB_SIZE), &error_abort); + chr2 = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr2); + qemu_opts_del(opts); + + /* Create third chardev */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "chr3", + 1, &error_abort); + qemu_opt_set(opts, "backend", "pipe", &error_abort); + qemu_opt_set(opts, "path", pipe, &error_abort); + chr3 = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(chr3); + + /* Create hub0 and refer 3 backend chardevs */ + opts = qemu_opts_create(qemu_find_opts("chardev"), "hub0", + 1, &error_abort); + qemu_opt_set(opts, "backend", "hub", &error_abort); + qemu_opt_set(opts, "chardevs.0", "chr1", &error_abort); + qemu_opt_set(opts, "chardevs.1", "chr2", &error_abort); + qemu_opt_set(opts, "chardevs.2", "chr3", &error_abort); + hub = qemu_chr_new_from_opts(opts, NULL, &error_abort); + g_assert_nonnull(hub); + qemu_opts_del(opts); + + /* Attach hub to a frontend */ + qemu_chr_fe_init(&chr_be, hub, &error_abort); + qemu_chr_fe_set_handlers(&chr_be, + fe_can_read, + fe_read, + fe_event, + NULL, + &h, + NULL, true); + + /* Write to frontend, chr_be */ + ret = qemu_chr_fe_write(&chr_be, (void *)"thisis", 6); + g_assert_cmpint(ret, ==, 6); + + data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 6); + g_assert_cmpstr(data, ==, "thisis"); + g_free(data); + + data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 6); + g_assert_cmpstr(data, ==, "thisis"); + g_free(data); + + fd = open(out, O_RDWR); + ret = read(fd, buf, sizeof(buf)); + g_assert_cmpint(ret, ==, 6); + buf[ret] = 0; + g_assert_cmpstr(buf, ==, "thisis"); + close(fd); + + /* Add watch. 0 indicates no watches if nothing to wait for */ + ret = qemu_chr_fe_add_watch(&chr_be, G_IO_OUT | G_IO_HUP, + NULL, NULL); + g_assert_cmpint(ret, ==, 0); + + /* + * Write to frontend, chr_be, until EAGAIN. Make sure length is + * power of two to fit nicely the whole pipe buffer. + */ + len = 0; + while ((ret = qemu_chr_fe_write(&chr_be, (void *)"thisisit", 8)) + != -1) { + len += ret; + } + g_assert_cmpint(errno, ==, EAGAIN); + + /* Further all writes should cause EAGAIN */ + ret = qemu_chr_fe_write(&chr_be, (void *)"b", 1); + g_assert_cmpint(ret, ==, -1); + g_assert_cmpint(errno, ==, EAGAIN); + + /* + * Add watch. Non 0 indicates we have a blocked chardev, which + * can wakes us up when write is possible. + */ + ret = qemu_chr_fe_add_watch(&chr_be, G_IO_OUT | G_IO_HUP, + NULL, NULL); + g_assert_cmpint(ret, !=, 0); + g_source_remove(ret); + + /* Drain pipe and ring buffers */ + fd = open(out, O_RDWR); + while ((ret = read(fd, buf, MIN(sizeof(buf), len))) != -1 && len > 0) { + len -= ret; + } + close(fd); + + data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 128); + g_free(data); + + data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 128); + g_free(data); + + /* + * Now we are good to go, first repeat "lost" sequence, which + * was already consumed and drained by the ring buffers, but + * pipe have not recieved that yet. + */ + ret = qemu_chr_fe_write(&chr_be, (void *)"thisisit", 8); + g_assert_cmpint(ret, ==, 8); + + ret = qemu_chr_fe_write(&chr_be, (void *)"streamisrestored", 16); + g_assert_cmpint(ret, ==, 16); + + data = qmp_ringbuf_read("chr1", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 16); + /* Only last 16 bytes, see big comment above */ + g_assert_cmpstr(data, ==, "streamisrestored"); + g_free(data); + + data = qmp_ringbuf_read("chr2", RB_SIZE, false, 0, &error_abort); + g_assert_cmpint(strlen(data), ==, 16); + /* Only last 16 bytes, see big comment above */ + g_assert_cmpstr(data, ==, "streamisrestored"); + g_free(data); + + fd = open(out, O_RDWR); + ret = read(fd, buf, sizeof(buf)); + g_assert_cmpint(ret, ==, 24); + buf[ret] = 0; + /* Both 8 and 16 bytes */ + g_assert_cmpstr(buf, ==, "thisisitstreamisrestored"); + close(fd); + + g_free(in); + g_free(out); + g_free(tmp_path); + g_free(pipe); + + /* Finalize frontend */ + qemu_chr_fe_deinit(&chr_be, false); + + /* Finalize hub0 */ + qmp_chardev_remove("hub0", &error_abort); + + /* Finalize backend chardevs */ + qmp_chardev_remove("chr1", &error_abort); + qmp_chardev_remove("chr2", &error_abort); + qmp_chardev_remove("chr3", &error_abort); + } +#endif +} static void websock_server_read(void *opaque, const uint8_t *buf, int size) { @@ -574,7 +993,7 @@ static void char_udp_test_internal(Chardev *reuse_chr, int sock) struct sockaddr_in other; SocketIdleData d = { 0, }; Chardev *chr; - CharBackend *be; + CharBackend stack_be, *be = &stack_be; socklen_t alen = sizeof(other); int ret; char buf[10]; @@ -590,7 +1009,6 @@ static void char_udp_test_internal(Chardev *reuse_chr, int sock) chr = qemu_chr_new("client", tmp, NULL); g_assert_nonnull(chr); - be = g_alloca(sizeof(CharBackend)); qemu_chr_fe_init(be, chr, &error_abort); } @@ -1485,6 +1903,7 @@ int main(int argc, char **argv) g_test_add_func("/char/invalid", char_invalid_test); g_test_add_func("/char/ringbuf", char_ringbuf_test); g_test_add_func("/char/mux", char_mux_test); + g_test_add_func("/char/hub", char_hub_test); #ifdef _WIN32 g_test_add_func("/char/console/subprocess", char_console_test_subprocess); g_test_add_func("/char/console", char_console_test); @@ -1523,18 +1942,18 @@ int main(int argc, char **argv) static CharSocketClientTestConfig client2 ## name = \ { addr, NULL, true, false, char_socket_event }; \ static CharSocketClientTestConfig client3 ## name = \ - { addr, ",reconnect=1", false, false, char_socket_event }; \ + { addr, ",reconnect-ms=1000", false, false, char_socket_event }; \ static CharSocketClientTestConfig client4 ## name = \ - { addr, ",reconnect=1", true, false, char_socket_event }; \ + { addr, ",reconnect-ms=1000", true, false, char_socket_event }; \ static CharSocketClientTestConfig client5 ## name = \ { addr, NULL, false, true, char_socket_event }; \ static CharSocketClientTestConfig client6 ## name = \ { addr, NULL, true, true, char_socket_event }; \ static CharSocketClientTestConfig client7 ## name = \ - { addr, ",reconnect=1", true, false, \ + { addr, ",reconnect-ms=1000", true, false, \ char_socket_event_with_error }; \ static CharSocketClientTestConfig client8 ## name = \ - { addr, ",reconnect=1", false, false, char_socket_event }; \ + { addr, ",reconnect-ms=1000", false, false, char_socket_event };\ g_test_add_data_func("/char/socket/client/mainloop/" # name, \ &client1 ##name, char_socket_client_test); \ g_test_add_data_func("/char/socket/client/wait-conn/" # name, \ diff --git a/tests/unit/test-crypto-afsplit.c b/tests/unit/test-crypto-afsplit.c index 00a7c180fd6..45e9046bf63 100644 --- a/tests/unit/test-crypto-afsplit.c +++ b/tests/unit/test-crypto-afsplit.c @@ -26,7 +26,7 @@ typedef struct QCryptoAFSplitTestData QCryptoAFSplitTestData; struct QCryptoAFSplitTestData { const char *path; - QCryptoHashAlgorithm hash; + QCryptoHashAlgo hash; uint32_t stripes; size_t blocklen; const uint8_t *key; @@ -36,7 +36,7 @@ struct QCryptoAFSplitTestData { static QCryptoAFSplitTestData test_data[] = { { .path = "/crypto/afsplit/sha256/5", - .hash = QCRYPTO_HASH_ALG_SHA256, + .hash = QCRYPTO_HASH_ALGO_SHA256, .stripes = 5, .blocklen = 32, .key = (const uint8_t *) @@ -68,7 +68,7 @@ static QCryptoAFSplitTestData test_data[] = { }, { .path = "/crypto/afsplit/sha256/5000", - .hash = QCRYPTO_HASH_ALG_SHA256, + .hash = QCRYPTO_HASH_ALGO_SHA256, .stripes = 5000, .blocklen = 16, .key = (const uint8_t *) @@ -77,7 +77,7 @@ static QCryptoAFSplitTestData test_data[] = { }, { .path = "/crypto/afsplit/sha1/1000", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .stripes = 1000, .blocklen = 32, .key = (const uint8_t *) @@ -88,7 +88,7 @@ static QCryptoAFSplitTestData test_data[] = { }, { .path = "/crypto/afsplit/sha256/big", - .hash = QCRYPTO_HASH_ALG_SHA256, + .hash = QCRYPTO_HASH_ALGO_SHA256, .stripes = 1000, .blocklen = 64, .key = (const uint8_t *) diff --git a/tests/unit/test-crypto-akcipher.c b/tests/unit/test-crypto-akcipher.c index 4f1f4214dd2..53c2211ba8e 100644 --- a/tests/unit/test-crypto-akcipher.c +++ b/tests/unit/test-crypto-akcipher.c @@ -692,7 +692,7 @@ struct QCryptoAkCipherTestData { static QCryptoRSAKeyTestData rsakey_test_data[] = { { .path = "/crypto/akcipher/rsakey-1024-public", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa1024_public_key, .keylen = sizeof(rsa1024_public_key), .is_valid_key = true, @@ -700,7 +700,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = { }, { .path = "/crypto/akcipher/rsakey-1024-private", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, .key = rsa1024_private_key, .keylen = sizeof(rsa1024_private_key), .is_valid_key = true, @@ -708,7 +708,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = { }, { .path = "/crypto/akcipher/rsakey-2048-public", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa2048_public_key, .keylen = sizeof(rsa2048_public_key), .is_valid_key = true, @@ -716,7 +716,7 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = { }, { .path = "/crypto/akcipher/rsakey-2048-private", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, .key = rsa2048_private_key, .keylen = sizeof(rsa2048_private_key), .is_valid_key = true, @@ -724,56 +724,56 @@ static QCryptoRSAKeyTestData rsakey_test_data[] = { }, { .path = "/crypto/akcipher/rsakey-public-lack-elem", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa_public_key_lack_element, .keylen = sizeof(rsa_public_key_lack_element), .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-private-lack-elem", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, .key = rsa_private_key_lack_element, .keylen = sizeof(rsa_private_key_lack_element), .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-public-empty-elem", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa_public_key_empty_element, .keylen = sizeof(rsa_public_key_empty_element), .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-private-empty-elem", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, .key = rsa_private_key_empty_element, .keylen = sizeof(rsa_private_key_empty_element), .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-public-empty-key", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = NULL, .keylen = 0, .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-private-empty-key", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, .key = NULL, .keylen = 0, .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-public-invalid-length-val", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa_public_key_invalid_length_val, .keylen = sizeof(rsa_public_key_invalid_length_val), .is_valid_key = false, }, { .path = "/crypto/akcipher/rsakey-public-extra-elem", - .key_type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + .key_type = QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, .key = rsa_public_key_extra_elem, .keylen = sizeof(rsa_public_key_extra_elem), .is_valid_key = false, @@ -785,9 +785,9 @@ static QCryptoAkCipherTestData akcipher_test_data[] = { { .path = "/crypto/akcipher/rsa1024-raw", .opt = { - .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .alg = QCRYPTO_AK_CIPHER_ALGO_RSA, .u.rsa = { - .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW, + .padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW, }, }, .pub_key = rsa1024_public_key, @@ -805,10 +805,10 @@ static QCryptoAkCipherTestData akcipher_test_data[] = { { .path = "/crypto/akcipher/rsa1024-pkcs1", .opt = { - .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .alg = QCRYPTO_AK_CIPHER_ALGO_RSA, .u.rsa = { - .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, - .hash_alg = QCRYPTO_HASH_ALG_SHA1, + .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1, + .hash_alg = QCRYPTO_HASH_ALGO_SHA1, }, }, .pub_key = rsa1024_public_key, @@ -830,9 +830,9 @@ static QCryptoAkCipherTestData akcipher_test_data[] = { { .path = "/crypto/akcipher/rsa2048-raw", .opt = { - .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .alg = QCRYPTO_AK_CIPHER_ALGO_RSA, .u.rsa = { - .padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW, + .padding_alg = QCRYPTO_RSA_PADDING_ALGO_RAW, }, }, .pub_key = rsa2048_public_key, @@ -850,10 +850,10 @@ static QCryptoAkCipherTestData akcipher_test_data[] = { { .path = "/crypto/akcipher/rsa2048-pkcs1", .opt = { - .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .alg = QCRYPTO_AK_CIPHER_ALGO_RSA, .u.rsa = { - .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, - .hash_alg = QCRYPTO_HASH_ALG_SHA1, + .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1, + .hash_alg = QCRYPTO_HASH_ALGO_SHA1, }, }, .pub_key = rsa2048_public_key, @@ -885,12 +885,12 @@ static void test_akcipher(const void *opaque) return; } pub_key = qcrypto_akcipher_new(&data->opt, - QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC, + QCRYPTO_AK_CIPHER_KEY_TYPE_PUBLIC, data->pub_key, data->pub_key_len, &error_abort); g_assert(pub_key != NULL); priv_key = qcrypto_akcipher_new(&data->opt, - QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE, + QCRYPTO_AK_CIPHER_KEY_TYPE_PRIVATE, data->priv_key, data->priv_key_len, &error_abort); g_assert(priv_key != NULL); @@ -944,10 +944,10 @@ static void test_rsakey(const void *opaque) { const QCryptoRSAKeyTestData *data = (const QCryptoRSAKeyTestData *)opaque; QCryptoAkCipherOptions opt = { - .alg = QCRYPTO_AKCIPHER_ALG_RSA, + .alg = QCRYPTO_AK_CIPHER_ALGO_RSA, .u.rsa = { - .padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1, - .hash_alg = QCRYPTO_HASH_ALG_SHA1, + .padding_alg = QCRYPTO_RSA_PADDING_ALGO_PKCS1, + .hash_alg = QCRYPTO_HASH_ALGO_SHA1, } }; g_autoptr(QCryptoAkCipher) key = qcrypto_akcipher_new( diff --git a/tests/unit/test-crypto-block.c b/tests/unit/test-crypto-block.c index 42cfab60679..3ac7f17b2a0 100644 --- a/tests/unit/test-crypto-block.c +++ b/tests/unit/test-crypto-block.c @@ -39,14 +39,14 @@ #endif static QCryptoBlockCreateOptions qcow_create_opts = { - .format = Q_CRYPTO_BLOCK_FORMAT_QCOW, + .format = QCRYPTO_BLOCK_FORMAT_QCOW, .u.qcow = { .key_secret = (char *)"sec0", }, }; static QCryptoBlockOpenOptions qcow_open_opts = { - .format = Q_CRYPTO_BLOCK_FORMAT_QCOW, + .format = QCRYPTO_BLOCK_FORMAT_QCOW, .u.qcow = { .key_secret = (char *)"sec0", }, @@ -55,7 +55,7 @@ static QCryptoBlockOpenOptions qcow_open_opts = { #ifdef TEST_LUKS static QCryptoBlockOpenOptions luks_open_opts = { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { .key_secret = (char *)"sec0", }, @@ -64,7 +64,7 @@ static QCryptoBlockOpenOptions luks_open_opts = { /* Creation with all default values */ static QCryptoBlockCreateOptions luks_create_opts_default = { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { .key_secret = (char *)"sec0", }, @@ -73,33 +73,33 @@ static QCryptoBlockCreateOptions luks_create_opts_default = { /* ...and with explicit values */ static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_plain64 = { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { .key_secret = (char *)"sec0", .has_cipher_alg = true, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256, .has_cipher_mode = true, .cipher_mode = QCRYPTO_CIPHER_MODE_CBC, .has_ivgen_alg = true, - .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64, }, }; static QCryptoBlockCreateOptions luks_create_opts_aes256_cbc_essiv = { - .format = Q_CRYPTO_BLOCK_FORMAT_LUKS, + .format = QCRYPTO_BLOCK_FORMAT_LUKS, .u.luks = { .key_secret = (char *)"sec0", .has_cipher_alg = true, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256, .has_cipher_mode = true, .cipher_mode = QCRYPTO_CIPHER_MODE_CBC, .has_ivgen_alg = true, - .ivgen_alg = QCRYPTO_IVGEN_ALG_ESSIV, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_ESSIV, .has_ivgen_hash_alg = true, - .ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256, + .ivgen_hash_alg = QCRYPTO_HASH_ALGO_SHA256, .has_hash_alg = true, - .hash_alg = QCRYPTO_HASH_ALG_SHA1, + .hash_alg = QCRYPTO_HASH_ALGO_SHA1, }, }; #endif /* TEST_LUKS */ @@ -112,12 +112,12 @@ static struct QCryptoBlockTestData { bool expect_header; - QCryptoCipherAlgorithm cipher_alg; + QCryptoCipherAlgo cipher_alg; QCryptoCipherMode cipher_mode; - QCryptoHashAlgorithm hash_alg; + QCryptoHashAlgo hash_alg; - QCryptoIVGenAlgorithm ivgen_alg; - QCryptoHashAlgorithm ivgen_hash; + QCryptoIVGenAlgo ivgen_alg; + QCryptoHashAlgo ivgen_hash; bool slow; } test_data[] = { @@ -128,10 +128,10 @@ static struct QCryptoBlockTestData { .expect_header = false, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_128, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_128, .cipher_mode = QCRYPTO_CIPHER_MODE_CBC, - .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64, }, #ifdef TEST_LUKS { @@ -141,11 +141,11 @@ static struct QCryptoBlockTestData { .expect_header = true, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256, .cipher_mode = QCRYPTO_CIPHER_MODE_XTS, - .hash_alg = QCRYPTO_HASH_ALG_SHA256, + .hash_alg = QCRYPTO_HASH_ALGO_SHA256, - .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64, .slow = true, }, @@ -156,11 +156,11 @@ static struct QCryptoBlockTestData { .expect_header = true, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256, .cipher_mode = QCRYPTO_CIPHER_MODE_CBC, - .hash_alg = QCRYPTO_HASH_ALG_SHA256, + .hash_alg = QCRYPTO_HASH_ALGO_SHA256, - .ivgen_alg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_PLAIN64, .slow = true, }, @@ -171,12 +171,12 @@ static struct QCryptoBlockTestData { .expect_header = true, - .cipher_alg = QCRYPTO_CIPHER_ALG_AES_256, + .cipher_alg = QCRYPTO_CIPHER_ALGO_AES_256, .cipher_mode = QCRYPTO_CIPHER_MODE_CBC, - .hash_alg = QCRYPTO_HASH_ALG_SHA1, + .hash_alg = QCRYPTO_HASH_ALGO_SHA1, - .ivgen_alg = QCRYPTO_IVGEN_ALG_ESSIV, - .ivgen_hash = QCRYPTO_HASH_ALG_SHA256, + .ivgen_alg = QCRYPTO_IV_GEN_ALGO_ESSIV, + .ivgen_hash = QCRYPTO_HASH_ALGO_SHA256, .slow = true, }, @@ -572,8 +572,15 @@ int main(int argc, char **argv) g_assert(qcrypto_init(NULL) == 0); for (i = 0; i < G_N_ELEMENTS(test_data); i++) { - if (test_data[i].open_opts->format == Q_CRYPTO_BLOCK_FORMAT_LUKS && + if (test_data[i].open_opts->format == QCRYPTO_BLOCK_FORMAT_LUKS && !qcrypto_hash_supports(test_data[i].hash_alg)) { + g_printerr("# skip unsupported %s\n", + QCryptoHashAlgo_str(test_data[i].hash_alg)); + continue; + } + if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128, + QCRYPTO_CIPHER_MODE_CBC)) { + g_printerr("# skip unsupported aes-128:cbc\n"); continue; } if (!test_data[i].slow || diff --git a/tests/unit/test-crypto-cipher.c b/tests/unit/test-crypto-cipher.c index f5152e569dd..1331d558cf2 100644 --- a/tests/unit/test-crypto-cipher.c +++ b/tests/unit/test-crypto-cipher.c @@ -27,7 +27,7 @@ typedef struct QCryptoCipherTestData QCryptoCipherTestData; struct QCryptoCipherTestData { const char *path; - QCryptoCipherAlgorithm alg; + QCryptoCipherAlgo alg; QCryptoCipherMode mode; const char *key; const char *plaintext; @@ -43,7 +43,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.1.1 ECB-AES128.Encrypt */ .path = "/crypto/cipher/aes-ecb-128", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "2b7e151628aed2a6abf7158809cf4f3c", .plaintext = @@ -60,7 +60,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.1.3 ECB-AES192.Encrypt */ .path = "/crypto/cipher/aes-ecb-192", - .alg = QCRYPTO_CIPHER_ALG_AES_192, + .alg = QCRYPTO_CIPHER_ALGO_AES_192, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b", .plaintext = @@ -77,7 +77,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.1.5 ECB-AES256.Encrypt */ .path = "/crypto/cipher/aes-ecb-256", - .alg = QCRYPTO_CIPHER_ALG_AES_256, + .alg = QCRYPTO_CIPHER_ALGO_AES_256, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "603deb1015ca71be2b73aef0857d7781" @@ -96,7 +96,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.2.1 CBC-AES128.Encrypt */ .path = "/crypto/cipher/aes-cbc-128", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_CBC, .key = "2b7e151628aed2a6abf7158809cf4f3c", .iv = "000102030405060708090a0b0c0d0e0f", @@ -114,7 +114,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.2.3 CBC-AES128.Encrypt */ .path = "/crypto/cipher/aes-cbc-192", - .alg = QCRYPTO_CIPHER_ALG_AES_192, + .alg = QCRYPTO_CIPHER_ALGO_AES_192, .mode = QCRYPTO_CIPHER_MODE_CBC, .key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b", .iv = "000102030405060708090a0b0c0d0e0f", @@ -132,7 +132,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.2.5 CBC-AES128.Encrypt */ .path = "/crypto/cipher/aes-cbc-256", - .alg = QCRYPTO_CIPHER_ALG_AES_256, + .alg = QCRYPTO_CIPHER_ALGO_AES_256, .mode = QCRYPTO_CIPHER_MODE_CBC, .key = "603deb1015ca71be2b73aef0857d7781" @@ -156,7 +156,7 @@ static QCryptoCipherTestData test_data[] = { * ciphertext in ECB and CBC modes */ .path = "/crypto/cipher/des-ecb-56-one-block", - .alg = QCRYPTO_CIPHER_ALG_DES, + .alg = QCRYPTO_CIPHER_ALGO_DES, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "80c4a2e691d5b3f7", .plaintext = "70617373776f7264", @@ -165,7 +165,7 @@ static QCryptoCipherTestData test_data[] = { { /* See previous comment */ .path = "/crypto/cipher/des-cbc-56-one-block", - .alg = QCRYPTO_CIPHER_ALG_DES, + .alg = QCRYPTO_CIPHER_ALGO_DES, .mode = QCRYPTO_CIPHER_MODE_CBC, .key = "80c4a2e691d5b3f7", .iv = "0000000000000000", @@ -174,7 +174,7 @@ static QCryptoCipherTestData test_data[] = { }, { .path = "/crypto/cipher/des-ecb-56", - .alg = QCRYPTO_CIPHER_ALG_DES, + .alg = QCRYPTO_CIPHER_ALGO_DES, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "80c4a2e691d5b3f7", .plaintext = @@ -191,7 +191,7 @@ static QCryptoCipherTestData test_data[] = { { /* Borrowed from linux-kernel crypto/testmgr.h */ .path = "/crypto/cipher/3des-cbc", - .alg = QCRYPTO_CIPHER_ALG_3DES, + .alg = QCRYPTO_CIPHER_ALGO_3DES, .mode = QCRYPTO_CIPHER_MODE_CBC, .key = "e9c0ff2e760b6424444d995a12d640c0" @@ -220,7 +220,7 @@ static QCryptoCipherTestData test_data[] = { { /* Borrowed from linux-kernel crypto/testmgr.h */ .path = "/crypto/cipher/3des-ecb", - .alg = QCRYPTO_CIPHER_ALG_3DES, + .alg = QCRYPTO_CIPHER_ALGO_3DES, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "0123456789abcdef5555555555555555" @@ -233,7 +233,7 @@ static QCryptoCipherTestData test_data[] = { { /* Borrowed from linux-kernel crypto/testmgr.h */ .path = "/crypto/cipher/3des-ctr", - .alg = QCRYPTO_CIPHER_ALG_3DES, + .alg = QCRYPTO_CIPHER_ALGO_3DES, .mode = QCRYPTO_CIPHER_MODE_CTR, .key = "9cd6f39cb95a67005a67002dceeb2dce" @@ -308,7 +308,7 @@ static QCryptoCipherTestData test_data[] = { { /* RFC 2144, Appendix B.1 */ .path = "/crypto/cipher/cast5-128", - .alg = QCRYPTO_CIPHER_ALG_CAST5_128, + .alg = QCRYPTO_CIPHER_ALGO_CAST5_128, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "0123456712345678234567893456789A", .plaintext = "0123456789abcdef", @@ -317,7 +317,7 @@ static QCryptoCipherTestData test_data[] = { { /* libgcrypt serpent.c */ .path = "/crypto/cipher/serpent-128", - .alg = QCRYPTO_CIPHER_ALG_SERPENT_128, + .alg = QCRYPTO_CIPHER_ALGO_SERPENT_128, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "00000000000000000000000000000000", .plaintext = "d29d576fcea3a3a7ed9099f29273d78e", @@ -326,7 +326,7 @@ static QCryptoCipherTestData test_data[] = { { /* libgcrypt serpent.c */ .path = "/crypto/cipher/serpent-192", - .alg = QCRYPTO_CIPHER_ALG_SERPENT_192, + .alg = QCRYPTO_CIPHER_ALGO_SERPENT_192, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "00000000000000000000000000000000" "0000000000000000", @@ -336,7 +336,7 @@ static QCryptoCipherTestData test_data[] = { { /* libgcrypt serpent.c */ .path = "/crypto/cipher/serpent-256a", - .alg = QCRYPTO_CIPHER_ALG_SERPENT_256, + .alg = QCRYPTO_CIPHER_ALGO_SERPENT_256, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "00000000000000000000000000000000" "00000000000000000000000000000000", @@ -346,7 +346,7 @@ static QCryptoCipherTestData test_data[] = { { /* libgcrypt serpent.c */ .path = "/crypto/cipher/serpent-256b", - .alg = QCRYPTO_CIPHER_ALG_SERPENT_256, + .alg = QCRYPTO_CIPHER_ALGO_SERPENT_256, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "00000000000000000000000000000000" "00000000000000000000000000000000", @@ -356,7 +356,7 @@ static QCryptoCipherTestData test_data[] = { { /* Twofish paper "Known Answer Test" */ .path = "/crypto/cipher/twofish-128", - .alg = QCRYPTO_CIPHER_ALG_TWOFISH_128, + .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_128, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "d491db16e7b1c39e86cb086b789f5419", .plaintext = "019f9809de1711858faac3a3ba20fbc3", @@ -365,7 +365,7 @@ static QCryptoCipherTestData test_data[] = { { /* Twofish paper "Known Answer Test", I=3 */ .path = "/crypto/cipher/twofish-192", - .alg = QCRYPTO_CIPHER_ALG_TWOFISH_192, + .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_192, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "88b2b2706b105e36b446bb6d731a1e88" "efa71f788965bd44", @@ -375,7 +375,7 @@ static QCryptoCipherTestData test_data[] = { { /* Twofish paper "Known Answer Test", I=4 */ .path = "/crypto/cipher/twofish-256", - .alg = QCRYPTO_CIPHER_ALG_TWOFISH_256, + .alg = QCRYPTO_CIPHER_ALGO_TWOFISH_256, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "d43bb7556ea32e46f2a282b7d45b4e0d" "57ff739d4dc92c1bd7fc01700cc8216f", @@ -386,7 +386,7 @@ static QCryptoCipherTestData test_data[] = { { /* SM4, GB/T 32907-2016, Appendix A.1 */ .path = "/crypto/cipher/sm4", - .alg = QCRYPTO_CIPHER_ALG_SM4, + .alg = QCRYPTO_CIPHER_ALGO_SM4, .mode = QCRYPTO_CIPHER_MODE_ECB, .key = "0123456789abcdeffedcba9876543210", .plaintext = @@ -398,7 +398,7 @@ static QCryptoCipherTestData test_data[] = { { /* #1 32 byte key, 32 byte PTX */ .path = "/crypto/cipher/aes-xts-128-1", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_XTS, .key = "00000000000000000000000000000000" @@ -415,7 +415,7 @@ static QCryptoCipherTestData test_data[] = { { /* #2, 32 byte key, 32 byte PTX */ .path = "/crypto/cipher/aes-xts-128-2", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_XTS, .key = "11111111111111111111111111111111" @@ -432,7 +432,7 @@ static QCryptoCipherTestData test_data[] = { { /* #5 from xts.7, 32 byte key, 32 byte PTX */ .path = "/crypto/cipher/aes-xts-128-3", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_XTS, .key = "fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0" @@ -449,7 +449,7 @@ static QCryptoCipherTestData test_data[] = { { /* #4, 32 byte key, 512 byte PTX */ .path = "/crypto/cipher/aes-xts-128-4", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_XTS, .key = "27182818284590452353602874713526" @@ -528,7 +528,7 @@ static QCryptoCipherTestData test_data[] = { * which is incompatible with XTS */ .path = "/crypto/cipher/cast5-xts-128", - .alg = QCRYPTO_CIPHER_ALG_CAST5_128, + .alg = QCRYPTO_CIPHER_ALGO_CAST5_128, .mode = QCRYPTO_CIPHER_MODE_XTS, .key = "27182818284590452353602874713526" @@ -537,7 +537,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.5.1 CTR-AES128.Encrypt */ .path = "/crypto/cipher/aes-ctr-128", - .alg = QCRYPTO_CIPHER_ALG_AES_128, + .alg = QCRYPTO_CIPHER_ALGO_AES_128, .mode = QCRYPTO_CIPHER_MODE_CTR, .key = "2b7e151628aed2a6abf7158809cf4f3c", .iv = "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", @@ -555,7 +555,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.5.3 CTR-AES192.Encrypt */ .path = "/crypto/cipher/aes-ctr-192", - .alg = QCRYPTO_CIPHER_ALG_AES_192, + .alg = QCRYPTO_CIPHER_ALGO_AES_192, .mode = QCRYPTO_CIPHER_MODE_CTR, .key = "8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b", .iv = "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", @@ -573,7 +573,7 @@ static QCryptoCipherTestData test_data[] = { { /* NIST F.5.5 CTR-AES256.Encrypt */ .path = "/crypto/cipher/aes-ctr-256", - .alg = QCRYPTO_CIPHER_ALG_AES_256, + .alg = QCRYPTO_CIPHER_ALGO_AES_256, .mode = QCRYPTO_CIPHER_MODE_CTR, .key = "603deb1015ca71be2b73aef0857d7781" "1f352c073b6108d72d9810a30914dff4", @@ -750,7 +750,7 @@ static void test_cipher_null_iv(void) uint8_t ciphertext[32] = { 0 }; cipher = qcrypto_cipher_new( - QCRYPTO_CIPHER_ALG_AES_256, + QCRYPTO_CIPHER_ALGO_AES_256, QCRYPTO_CIPHER_MODE_CBC, key, sizeof(key), &error_abort); @@ -779,7 +779,7 @@ static void test_cipher_short_plaintext(void) int ret; cipher = qcrypto_cipher_new( - QCRYPTO_CIPHER_ALG_AES_256, + QCRYPTO_CIPHER_ALGO_AES_256, QCRYPTO_CIPHER_MODE_CBC, key, sizeof(key), &error_abort); @@ -823,16 +823,21 @@ int main(int argc, char **argv) g_test_add_data_func(test_data[i].path, &test_data[i], test_cipher); } else { g_printerr("# skip unsupported %s:%s\n", - QCryptoCipherAlgorithm_str(test_data[i].alg), + QCryptoCipherAlgo_str(test_data[i].alg), QCryptoCipherMode_str(test_data[i].mode)); } } - g_test_add_func("/crypto/cipher/null-iv", - test_cipher_null_iv); + if (qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_256, + QCRYPTO_CIPHER_MODE_CBC)) { + g_test_add_func("/crypto/cipher/null-iv", + test_cipher_null_iv); - g_test_add_func("/crypto/cipher/short-plaintext", - test_cipher_short_plaintext); + g_test_add_func("/crypto/cipher/short-plaintext", + test_cipher_short_plaintext); + } else { + g_printerr("# skip unsupported aes-256:cbc\n"); + } return g_test_run(); } diff --git a/tests/unit/test-crypto-hash.c b/tests/unit/test-crypto-hash.c index 1f4abb822b6..8fee1593f94 100644 --- a/tests/unit/test-crypto-hash.c +++ b/tests/unit/test-crypto-hash.c @@ -1,6 +1,7 @@ /* * QEMU Crypto hash algorithms * + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * Copyright (c) 2015 Red Hat, Inc. * * This library is free software; you can redistribute it and/or @@ -42,6 +43,9 @@ "63b54e4cb2d2032b393994aa263c0dbb" \ "e00a9f2fe9ef6037352232a1eec55ee7" #define OUTPUT_RIPEMD160 "f3d658fad3fdfb2b52c9369cf0d441249ddfa8a0" +#ifdef CONFIG_CRYPTO_SM3 +#define OUTPUT_SM3 "d4a97db105b477b84c4f20ec9c31a6c814e2705a0b83a5a89748d75f0ef456a1" +#endif #define OUTPUT_MD5_B64 "Yo0gY3FWMDWrjvYvSSveyQ==" #define OUTPUT_SHA1_B64 "sudPJnWKOkIeUJzuBFJEt4dTzAI=" @@ -54,32 +58,45 @@ "7sVe5w==" #define OUTPUT_RIPEMD160_B64 "89ZY+tP9+ytSyTac8NRBJJ3fqKA=" +#ifdef CONFIG_CRYPTO_SM3 +#define OUTPUT_SM3_B64 "1Kl9sQW0d7hMTyDsnDGmyBTicFoLg6Wol0jXXw70VqE=" +#endif + static const char *expected_outputs[] = { - [QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5, - [QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1, - [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224, - [QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256, - [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384, - [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512, - [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160, + [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5, + [QCRYPTO_HASH_ALGO_SHA1] = OUTPUT_SHA1, + [QCRYPTO_HASH_ALGO_SHA224] = OUTPUT_SHA224, + [QCRYPTO_HASH_ALGO_SHA256] = OUTPUT_SHA256, + [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384, + [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512, + [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3, +#endif }; static const char *expected_outputs_b64[] = { - [QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5_B64, - [QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1_B64, - [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224_B64, - [QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256_B64, - [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384_B64, - [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512_B64, - [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160_B64, + [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5_B64, + [QCRYPTO_HASH_ALGO_SHA1] = OUTPUT_SHA1_B64, + [QCRYPTO_HASH_ALGO_SHA224] = OUTPUT_SHA224_B64, + [QCRYPTO_HASH_ALGO_SHA256] = OUTPUT_SHA256_B64, + [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384_B64, + [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512_B64, + [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160_B64, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3_B64, +#endif }; static const int expected_lens[] = { - [QCRYPTO_HASH_ALG_MD5] = 16, - [QCRYPTO_HASH_ALG_SHA1] = 20, - [QCRYPTO_HASH_ALG_SHA224] = 28, - [QCRYPTO_HASH_ALG_SHA256] = 32, - [QCRYPTO_HASH_ALG_SHA384] = 48, - [QCRYPTO_HASH_ALG_SHA512] = 64, - [QCRYPTO_HASH_ALG_RIPEMD160] = 20, + [QCRYPTO_HASH_ALGO_MD5] = 16, + [QCRYPTO_HASH_ALGO_SHA1] = 20, + [QCRYPTO_HASH_ALGO_SHA224] = 28, + [QCRYPTO_HASH_ALGO_SHA256] = 32, + [QCRYPTO_HASH_ALGO_SHA384] = 48, + [QCRYPTO_HASH_ALGO_SHA512] = 64, + [QCRYPTO_HASH_ALGO_RIPEMD160] = 20, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = 32, +#endif }; static const char hex[] = "0123456789abcdef"; @@ -122,7 +139,7 @@ static void test_hash_prealloc(void) size_t i; for (i = 0; i < G_N_ELEMENTS(expected_outputs) ; i++) { - uint8_t *result; + uint8_t *result, *origresult; size_t resultlen; int ret; size_t j; @@ -132,7 +149,7 @@ static void test_hash_prealloc(void) } resultlen = expected_lens[i]; - result = g_new0(uint8_t, resultlen); + origresult = result = g_new0(uint8_t, resultlen); ret = qcrypto_hash_bytes(i, INPUT_TEXT, @@ -141,7 +158,8 @@ static void test_hash_prealloc(void) &resultlen, &error_fatal); g_assert(ret == 0); - + /* Validate that our pre-allocated pointer was not replaced */ + g_assert(result == origresult); g_assert(resultlen == expected_lens[i]); for (j = 0; j < resultlen; j++) { g_assert(expected_outputs[i][j * 2] == hex[(result[j] >> 4) & 0xf]); @@ -241,6 +259,50 @@ static void test_hash_base64(void) } } +static void test_hash_accumulate(void) +{ + size_t i; + + for (i = 0; i < G_N_ELEMENTS(expected_outputs) ; i++) { + g_autoptr(QCryptoHash) hash = NULL; + struct iovec iov[] = { + { .iov_base = (char *)INPUT_TEXT1, .iov_len = strlen(INPUT_TEXT1) }, + { .iov_base = (char *)INPUT_TEXT2, .iov_len = strlen(INPUT_TEXT2) }, + { .iov_base = (char *)INPUT_TEXT3, .iov_len = strlen(INPUT_TEXT3) }, + }; + g_autofree uint8_t *result = NULL; + size_t resultlen = 0; + int ret; + size_t j; + + if (!qcrypto_hash_supports(i)) { + continue; + } + + hash = qcrypto_hash_new(i, &error_fatal); + g_assert(hash != NULL); + + /* Add each iovec to the hash context separately */ + for (j = 0; j < G_N_ELEMENTS(iov); j++) { + ret = qcrypto_hash_updatev(hash, + &iov[j], 1, + &error_fatal); + + g_assert(ret == 0); + } + + ret = qcrypto_hash_finalize_bytes(hash, &result, &resultlen, + &error_fatal); + + g_assert(ret == 0); + g_assert(resultlen == expected_lens[i]); + for (j = 0; j < resultlen; j++) { + g_assert(expected_outputs[i][j * 2] == hex[(result[j] >> 4) & 0xf]); + g_assert(expected_outputs[i][j * 2 + 1] == hex[result[j] & 0xf]); + } + } +} + int main(int argc, char **argv) { int ret = qcrypto_init(&error_fatal); @@ -252,5 +314,6 @@ int main(int argc, char **argv) g_test_add_func("/crypto/hash/prealloc", test_hash_prealloc); g_test_add_func("/crypto/hash/digest", test_hash_digest); g_test_add_func("/crypto/hash/base64", test_hash_base64); + g_test_add_func("/crypto/hash/accumulate", test_hash_accumulate); return g_test_run(); } diff --git a/tests/unit/test-crypto-hmac.c b/tests/unit/test-crypto-hmac.c index 23eb724d942..20c60eb9d8b 100644 --- a/tests/unit/test-crypto-hmac.c +++ b/tests/unit/test-crypto-hmac.c @@ -27,43 +27,43 @@ typedef struct QCryptoHmacTestData QCryptoHmacTestData; struct QCryptoHmacTestData { - QCryptoHashAlgorithm alg; + QCryptoHashAlgo alg; const char *hex_digest; }; static QCryptoHmacTestData test_data[] = { { - .alg = QCRYPTO_HASH_ALG_MD5, + .alg = QCRYPTO_HASH_ALGO_MD5, .hex_digest = "ede9cb83679ba82d88fbeae865b3f8fc", }, { - .alg = QCRYPTO_HASH_ALG_SHA1, + .alg = QCRYPTO_HASH_ALGO_SHA1, .hex_digest = "c7b5a631e3aac975c4ededfcd346e469" "dbc5f2d1", }, { - .alg = QCRYPTO_HASH_ALG_SHA224, + .alg = QCRYPTO_HASH_ALGO_SHA224, .hex_digest = "5f768179dbb29ca722875d0f461a2e2f" "597d0210340a84df1a8e9c63", }, { - .alg = QCRYPTO_HASH_ALG_SHA256, + .alg = QCRYPTO_HASH_ALGO_SHA256, .hex_digest = "3798f363c57afa6edaffe39016ca7bad" "efd1e670afb0e3987194307dec3197db", }, { - .alg = QCRYPTO_HASH_ALG_SHA384, + .alg = QCRYPTO_HASH_ALGO_SHA384, .hex_digest = "d218680a6032d33dccd9882d6a6a7164" "64f26623be257a9b2919b185294f4a49" "9e54b190bfd6bc5cedd2cd05c7e65e82", }, { - .alg = QCRYPTO_HASH_ALG_SHA512, + .alg = QCRYPTO_HASH_ALGO_SHA512, .hex_digest = "835a4f5b3750b4c1fccfa88da2f746a4" "900160c9f18964309bb736c13b59491b" @@ -71,11 +71,19 @@ static QCryptoHmacTestData test_data[] = { "94c4ba26862b2dadb59b7ede1d08d53e", }, { - .alg = QCRYPTO_HASH_ALG_RIPEMD160, + .alg = QCRYPTO_HASH_ALGO_RIPEMD160, .hex_digest = "94964ed4c1155b62b668c241d67279e5" "8a711676", }, +#ifdef CONFIG_CRYPTO_SM3 + { + .alg = QCRYPTO_HASH_ALGO_SM3, + .hex_digest = + "760e3799332bc913819b930085360ddb" + "c05529261313d5b15b75bab4fd7ae91e", + }, +#endif }; static const char hex[] = "0123456789abcdef"; @@ -126,7 +134,7 @@ static void test_hmac_prealloc(void) for (i = 0; i < G_N_ELEMENTS(test_data); i++) { QCryptoHmacTestData *data = &test_data[i]; QCryptoHmac *hmac = NULL; - uint8_t *result = NULL; + uint8_t *result = NULL, *origresult = NULL; size_t resultlen = 0; const char *exp_output = NULL; int ret; @@ -139,7 +147,7 @@ static void test_hmac_prealloc(void) exp_output = data->hex_digest; resultlen = strlen(exp_output) / 2; - result = g_new0(uint8_t, resultlen); + origresult = result = g_new0(uint8_t, resultlen); hmac = qcrypto_hmac_new(data->alg, (const uint8_t *)KEY, strlen(KEY), &error_fatal); @@ -149,6 +157,8 @@ static void test_hmac_prealloc(void) strlen(INPUT_TEXT), &result, &resultlen, &error_fatal); g_assert(ret == 0); + /* Validate that our pre-allocated pointer was not replaced */ + g_assert(result == origresult); exp_output = data->hex_digest; for (j = 0; j < resultlen; j++) { diff --git a/tests/unit/test-crypto-ivgen.c b/tests/unit/test-crypto-ivgen.c index 29630ed348a..bc9ffe34e74 100644 --- a/tests/unit/test-crypto-ivgen.c +++ b/tests/unit/test-crypto-ivgen.c @@ -26,9 +26,9 @@ struct QCryptoIVGenTestData { const char *path; uint64_t sector; - QCryptoIVGenAlgorithm ivalg; - QCryptoHashAlgorithm hashalg; - QCryptoCipherAlgorithm cipheralg; + QCryptoIVGenAlgo ivalg; + QCryptoHashAlgo hashalg; + QCryptoCipherAlgo cipheralg; const uint8_t *key; size_t nkey; const uint8_t *iv; @@ -38,7 +38,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain/1", .sector = 0x1, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN, .iv = (const uint8_t *)"\x01\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -47,7 +47,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain/1f2e3d4c", .sector = 0x1f2e3d4cULL, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN, .iv = (const uint8_t *)"\x4c\x3d\x2e\x1f\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -56,7 +56,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain/1f2e3d4c5b6a7988", .sector = 0x1f2e3d4c5b6a7988ULL, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN, .iv = (const uint8_t *)"\x88\x79\x6a\x5b\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -65,7 +65,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain64/1", .sector = 0x1, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64, .iv = (const uint8_t *)"\x01\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -74,7 +74,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain64/1f2e3d4c", .sector = 0x1f2e3d4cULL, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64, .iv = (const uint8_t *)"\x4c\x3d\x2e\x1f\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -83,7 +83,7 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/plain64/1f2e3d4c5b6a7988", .sector = 0x1f2e3d4c5b6a7988ULL, - .ivalg = QCRYPTO_IVGEN_ALG_PLAIN64, + .ivalg = QCRYPTO_IV_GEN_ALGO_PLAIN64, .iv = (const uint8_t *)"\x88\x79\x6a\x5b\x4c\x3d\x2e\x1f" "\x00\x00\x00\x00\x00\x00\x00\x00", .niv = 16, @@ -92,9 +92,9 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/essiv/1", .sector = 0x1, - .ivalg = QCRYPTO_IVGEN_ALG_ESSIV, - .cipheralg = QCRYPTO_CIPHER_ALG_AES_128, - .hashalg = QCRYPTO_HASH_ALG_SHA256, + .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV, + .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128, + .hashalg = QCRYPTO_HASH_ALGO_SHA256, .key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .nkey = 16, @@ -106,9 +106,9 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/essiv/1f2e3d4c", .sector = 0x1f2e3d4cULL, - .ivalg = QCRYPTO_IVGEN_ALG_ESSIV, - .cipheralg = QCRYPTO_CIPHER_ALG_AES_128, - .hashalg = QCRYPTO_HASH_ALG_SHA256, + .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV, + .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128, + .hashalg = QCRYPTO_HASH_ALGO_SHA256, .key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .nkey = 16, @@ -120,9 +120,9 @@ struct QCryptoIVGenTestData { { "/crypto/ivgen/essiv/1f2e3d4c5b6a7988", .sector = 0x1f2e3d4c5b6a7988ULL, - .ivalg = QCRYPTO_IVGEN_ALG_ESSIV, - .cipheralg = QCRYPTO_CIPHER_ALG_AES_128, - .hashalg = QCRYPTO_HASH_ALG_SHA256, + .ivalg = QCRYPTO_IV_GEN_ALGO_ESSIV, + .cipheralg = QCRYPTO_CIPHER_ALGO_AES_128, + .hashalg = QCRYPTO_HASH_ALGO_SHA256, .key = (const uint8_t *)"\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .nkey = 16, @@ -166,7 +166,7 @@ int main(int argc, char **argv) size_t i; g_test_init(&argc, &argv, NULL); for (i = 0; i < G_N_ELEMENTS(test_data); i++) { - if (test_data[i].ivalg == QCRYPTO_IVGEN_ALG_ESSIV && + if (test_data[i].ivalg == QCRYPTO_IV_GEN_ALGO_ESSIV && !qcrypto_hash_supports(test_data[i].hashalg)) { continue; } diff --git a/tests/unit/test-crypto-pbkdf.c b/tests/unit/test-crypto-pbkdf.c index 43c417f6b43..ddb7244e213 100644 --- a/tests/unit/test-crypto-pbkdf.c +++ b/tests/unit/test-crypto-pbkdf.c @@ -25,14 +25,13 @@ #include #endif -#if ((defined(CONFIG_NETTLE) || defined(CONFIG_GCRYPT)) && \ - (defined(_WIN32) || defined(RUSAGE_THREAD))) +#if defined(_WIN32) || defined(RUSAGE_THREAD) || defined(CONFIG_DARWIN) #include "crypto/pbkdf.h" typedef struct QCryptoPbkdfTestData QCryptoPbkdfTestData; struct QCryptoPbkdfTestData { const char *path; - QCryptoHashAlgorithm hash; + QCryptoHashAlgo hash; unsigned int iterations; const char *key; size_t nkey; @@ -53,7 +52,7 @@ static QCryptoPbkdfTestData test_data[] = { /* RFC 3962 test data */ { .path = "/crypto/pbkdf/rfc3962/sha1/iter1", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 1, .key = "password", .nkey = 8, @@ -67,7 +66,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter2", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 2, .key = "password", .nkey = 8, @@ -81,7 +80,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter1200a", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 1200, .key = "password", .nkey = 8, @@ -95,7 +94,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter5", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 5, .key = "password", .nkey = 8, @@ -109,7 +108,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter1200b", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", @@ -124,7 +123,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter1200c", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", @@ -139,7 +138,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc3962/sha1/iter50", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 50, .key = "\360\235\204\236", /* g-clef ("\xf09d849e) */ .nkey = 4, @@ -155,7 +154,7 @@ static QCryptoPbkdfTestData test_data[] = { /* RFC-6070 test data */ { .path = "/crypto/pbkdf/rfc6070/sha1/iter1", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 1, .key = "password", .nkey = 8, @@ -167,7 +166,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc6070/sha1/iter2", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 2, .key = "password", .nkey = 8, @@ -179,7 +178,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc6070/sha1/iter4096", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 4096, .key = "password", .nkey = 8, @@ -191,7 +190,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc6070/sha1/iter16777216", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 16777216, .key = "password", .nkey = 8, @@ -204,7 +203,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc6070/sha1/iter4096a", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 4096, .key = "passwordPASSWORDpassword", .nkey = 24, @@ -217,7 +216,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/rfc6070/sha1/iter4096b", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 4096, .key = "pass\0word", .nkey = 9, @@ -232,7 +231,7 @@ static QCryptoPbkdfTestData test_data[] = { { /* empty password test. */ .path = "/crypto/pbkdf/nonrfc/sha1/iter2", - .hash = QCRYPTO_HASH_ALG_SHA1, + .hash = QCRYPTO_HASH_ALGO_SHA1, .iterations = 2, .key = "", .nkey = 0, @@ -245,7 +244,7 @@ static QCryptoPbkdfTestData test_data[] = { { /* Password exceeds block size test */ .path = "/crypto/pbkdf/nonrfc/sha256/iter1200", - .hash = QCRYPTO_HASH_ALG_SHA256, + .hash = QCRYPTO_HASH_ALGO_SHA256, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", @@ -260,7 +259,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/nonrfc/sha512/iter1200", - .hash = QCRYPTO_HASH_ALG_SHA512, + .hash = QCRYPTO_HASH_ALGO_SHA512, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" @@ -277,7 +276,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/nonrfc/sha224/iter1200", - .hash = QCRYPTO_HASH_ALG_SHA224, + .hash = QCRYPTO_HASH_ALGO_SHA224, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" @@ -294,7 +293,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/nonrfc/sha384/iter1200", - .hash = QCRYPTO_HASH_ALG_SHA384, + .hash = QCRYPTO_HASH_ALGO_SHA384, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" @@ -311,7 +310,7 @@ static QCryptoPbkdfTestData test_data[] = { }, { .path = "/crypto/pbkdf/nonrfc/ripemd160/iter1200", - .hash = QCRYPTO_HASH_ALG_RIPEMD160, + .hash = QCRYPTO_HASH_ALGO_RIPEMD160, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" @@ -326,10 +325,26 @@ static QCryptoPbkdfTestData test_data[] = { "\xce\xbf\x91\x14\x8b\x5c\x48\x41", .nout = 32 }, +#ifdef CONFIG_CRYPTO_SM3 + { + .path = "/crypto/pbkdf/nonrfc/sm3/iter2", + .hash = QCRYPTO_HASH_ALGO_SM3, + .iterations = 2, + .key = "password", + .nkey = 8, + .salt = "ATHENA.MIT.EDUraeburn", + .nsalt = 21, + .out = "\x48\x71\x1b\x58\xa3\xcb\xce\x06" + "\xba\xad\x77\xa8\xb5\xb9\xd8\x07" + "\x6a\xe2\xb3\x5b\x95\xce\xc8\xce" + "\xe7\xb1\xcb\xee\x61\xdf\x04\xea", + .nout = 32 + }, +#endif #if 0 { .path = "/crypto/pbkdf/nonrfc/whirlpool/iter1200", - .hash = QCRYPTO_HASH_ALG_WHIRLPOOL, + .hash = QCRYPTO_HASH_ALGO_WHIRLPOOL, .iterations = 1200, .key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", @@ -394,7 +409,7 @@ static void test_pbkdf(const void *opaque) } -static void test_pbkdf_timing(void) +static void test_pbkdf_timing_sha256(void) { uint8_t key[32]; uint8_t salt[32]; @@ -403,7 +418,7 @@ static void test_pbkdf_timing(void) memset(key, 0x5d, sizeof(key)); memset(salt, 0x7c, sizeof(salt)); - iters = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALG_SHA256, + iters = qcrypto_pbkdf2_count_iters(QCRYPTO_HASH_ALGO_SHA256, key, sizeof(key), salt, sizeof(salt), 32, @@ -422,14 +437,18 @@ int main(int argc, char **argv) g_assert(qcrypto_init(NULL) == 0); for (i = 0; i < G_N_ELEMENTS(test_data); i++) { + if (!qcrypto_pbkdf2_supports(test_data[i].hash)) { + continue; + } + if (!test_data[i].slow || g_test_slow()) { g_test_add_data_func(test_data[i].path, &test_data[i], test_pbkdf); } } - if (g_test_slow()) { - g_test_add_func("/crypt0/pbkdf/timing", test_pbkdf_timing); + if (g_test_slow() && qcrypto_pbkdf2_supports(QCRYPTO_HASH_ALGO_SHA256)) { + g_test_add_func("/crypt0/pbkdf/timing/sha256", test_pbkdf_timing_sha256); } return g_test_run(); diff --git a/tests/unit/test-crypto-secret.c b/tests/unit/test-crypto-secret.c index ffd13ff70e8..fc32a01747b 100644 --- a/tests/unit/test-crypto-secret.c +++ b/tests/unit/test-crypto-secret.c @@ -22,6 +22,7 @@ #include "crypto/init.h" #include "crypto/secret.h" +#include "crypto/cipher.h" #include "qapi/error.h" #include "qemu/module.h" #if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) @@ -597,18 +598,21 @@ int main(int argc, char **argv) g_test_add_func("/crypto/secret/conv/utf8/base64", test_secret_conv_utf8_base64); - g_test_add_func("/crypto/secret/crypt/raw", - test_secret_crypt_raw); - g_test_add_func("/crypto/secret/crypt/base64", - test_secret_crypt_base64); - g_test_add_func("/crypto/secret/crypt/shortkey", - test_secret_crypt_short_key); - g_test_add_func("/crypto/secret/crypt/shortiv", - test_secret_crypt_short_iv); - g_test_add_func("/crypto/secret/crypt/missingiv", - test_secret_crypt_missing_iv); - g_test_add_func("/crypto/secret/crypt/badiv", - test_secret_crypt_bad_iv); + if (qcrypto_cipher_supports(QCRYPTO_CIPHER_ALGO_AES_128, + QCRYPTO_CIPHER_MODE_CBC)) { + g_test_add_func("/crypto/secret/crypt/raw", + test_secret_crypt_raw); + g_test_add_func("/crypto/secret/crypt/base64", + test_secret_crypt_base64); + g_test_add_func("/crypto/secret/crypt/shortkey", + test_secret_crypt_short_key); + g_test_add_func("/crypto/secret/crypt/shortiv", + test_secret_crypt_short_iv); + g_test_add_func("/crypto/secret/crypt/missingiv", + test_secret_crypt_missing_iv); + g_test_add_func("/crypto/secret/crypt/badiv", + test_secret_crypt_bad_iv); + } return g_test_run(); } diff --git a/tests/unit/test-crypto-tlssession.c b/tests/unit/test-crypto-tlssession.c index 3395f73560f..554054e9344 100644 --- a/tests/unit/test-crypto-tlssession.c +++ b/tests/unit/test-crypto-tlssession.c @@ -158,8 +158,7 @@ static void test_crypto_tls_session_psk(void) rv = qcrypto_tls_session_handshake(serverSess, &error_abort); g_assert(rv >= 0); - if (qcrypto_tls_session_get_handshake_status(serverSess) == - QCRYPTO_TLS_HANDSHAKE_COMPLETE) { + if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) { serverShake = true; } } @@ -167,8 +166,7 @@ static void test_crypto_tls_session_psk(void) rv = qcrypto_tls_session_handshake(clientSess, &error_abort); g_assert(rv >= 0); - if (qcrypto_tls_session_get_handshake_status(clientSess) == - QCRYPTO_TLS_HANDSHAKE_COMPLETE) { + if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) { clientShake = true; } } @@ -352,8 +350,7 @@ static void test_crypto_tls_session_x509(const void *opaque) rv = qcrypto_tls_session_handshake(serverSess, &error_abort); g_assert(rv >= 0); - if (qcrypto_tls_session_get_handshake_status(serverSess) == - QCRYPTO_TLS_HANDSHAKE_COMPLETE) { + if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) { serverShake = true; } } @@ -361,8 +358,7 @@ static void test_crypto_tls_session_x509(const void *opaque) rv = qcrypto_tls_session_handshake(clientSess, &error_abort); g_assert(rv >= 0); - if (qcrypto_tls_session_get_handshake_status(clientSess) == - QCRYPTO_TLS_HANDSHAKE_COMPLETE) { + if (rv == QCRYPTO_TLS_HANDSHAKE_COMPLETE) { clientShake = true; } } diff --git a/tests/unit/test-fifo.c b/tests/unit/test-fifo.c new file mode 100644 index 00000000000..14153c41fa0 --- /dev/null +++ b/tests/unit/test-fifo.c @@ -0,0 +1,449 @@ +/* + * Fifo8 tests + * + * Copyright 2024 Mark Cave-Ayland + * + * Authors: + * Mark Cave-Ayland + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "qemu/fifo8.h" + +const VMStateInfo vmstate_info_uint32; +const VMStateInfo vmstate_info_buffer; + + +static void test_fifo8_pop_bufptr_wrap(void) +{ + Fifo8 fifo; + uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 }; + uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa }; + const uint8_t *buf; + uint32_t count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: [ . . . . . . . . ] + */ + + fifo8_push_all(&fifo, data_in1, sizeof(data_in1)); + /* + * head --v ]-- tail used = 4 + * FIFO: [ 1 2 3 4 . . . . ] + */ + buf = fifo8_pop_bufptr(&fifo, 2, &count); + /* + * head --v ]-- tail used = 2 + * FIFO: [ 1 2 3 4 . . . . ] + * buf --^ count = 2 + */ + g_assert(count == 2); + g_assert(buf[0] == 0x1 && buf[1] == 0x2); + + fifo8_push_all(&fifo, data_in2, sizeof(data_in2)); + /* + * tail --]v-- head used = 8 + * FIFO: [ 9 a 3 4 5 6 7 8 ] + */ + buf = fifo8_pop_bufptr(&fifo, 8, &count); + /* + * head --v ]-- tail used = 2 + * FIFO: [ 9 a 3 4 5 6 7 8 ] + * buf --^ count = 6 + */ + g_assert(count == 6); + g_assert(buf[0] == 0x3 && buf[1] == 0x4 && buf[2] == 0x5 && + buf[3] == 0x6 && buf[4] == 0x7 && buf[5] == 0x8); + + g_assert(fifo8_num_used(&fifo) == 2); + fifo8_destroy(&fifo); +} + +static void test_fifo8_pop_bufptr(void) +{ + Fifo8 fifo; + uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 }; + const uint8_t *buf; + uint32_t count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: [ . . . . . . . . ] + */ + + fifo8_push_all(&fifo, data_in, sizeof(data_in)); + /* + * head --v ]-- tail used = 4 + * FIFO: [ 1 2 3 4 . . . . ] + */ + buf = fifo8_pop_bufptr(&fifo, 2, &count); + /* + * head --v ]-- tail used = 2 + * FIFO: [ 1 2 3 4 . . . . ] + * buf --^ count = 2 + */ + g_assert(count == 2); + g_assert(buf[0] == 0x1 && buf[1] == 0x2); + + g_assert(fifo8_num_used(&fifo) == 2); + fifo8_destroy(&fifo); +} + +static void test_fifo8_peek_bufptr_wrap(void) +{ + Fifo8 fifo; + uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 }; + uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa }; + const uint8_t *buf; + uint32_t count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in1, sizeof(data_in1)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + buf = fifo8_peek_bufptr(&fifo, 2, &count); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + * buf: [ 1 2 ] count = 2 + */ + g_assert(count == 2); + g_assert(buf[0] == 0x1 && buf[1] == 0x2); + + buf = fifo8_pop_bufptr(&fifo, 2, &count); + /* + * head --v ]-- tail used = 2 + * FIFO: { 1 2 3 4 . . . . } + * buf: [ 1 2 ] count = 2 + */ + g_assert(count == 2); + g_assert(buf[0] == 0x1 && buf[1] == 0x2); + fifo8_push_all(&fifo, data_in2, sizeof(data_in2)); + /* + * tail ---]v-- head used = 8 + * FIFO: { 9 a 3 4 5 6 7 8 } + */ + + buf = fifo8_peek_bufptr(&fifo, 8, &count); + /* + * tail --]v-- head used = 8 + * FIFO: { 9 a 3 4 5 6 7 8 } + * buf: [ 3 4 5 6 7 8 ] count = 6 + */ + g_assert(count == 6); + g_assert(buf[0] == 0x3 && buf[1] == 0x4 && buf[2] == 0x5 && + buf[3] == 0x6 && buf[4] == 0x7 && buf[5] == 0x8); + + g_assert(fifo8_num_used(&fifo) == 8); + fifo8_destroy(&fifo); +} + +static void test_fifo8_peek_bufptr(void) +{ + Fifo8 fifo; + uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 }; + const uint8_t *buf; + uint32_t count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in, sizeof(data_in)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + buf = fifo8_peek_bufptr(&fifo, 2, &count); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + * buf: [ 1 2 ] count = 2 + */ + g_assert(count == 2); + g_assert(buf[0] == 0x1 && buf[1] == 0x2); + + g_assert(fifo8_num_used(&fifo) == 4); + fifo8_destroy(&fifo); +} + +static void test_fifo8_pop_buf_wrap(void) +{ + Fifo8 fifo; + uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 }; + uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }; + uint8_t data_out[4]; + int count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in1, sizeof(data_in1)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + fifo8_pop_buf(&fifo, NULL, 4); + /* + * tail --]v-- head used = 0 + * FIFO: [ 1 2 3 4 . . . . ] + */ + + fifo8_push_all(&fifo, data_in2, sizeof(data_in2)); + /* + * tail --]v-- head used = 8 + * FIFO: { 9 a b c 5 6 7 8 } + */ + count = fifo8_pop_buf(&fifo, NULL, 4); + /* + * head --v ]-- tail used = 4 + * FIFO: { 9 a b c 5 6 7 8 } + */ + g_assert(count == 4); + count = fifo8_pop_buf(&fifo, data_out, 4); + /* + * tail --]v-- head used = 0 + * FIFO: { 9 a b c 5 6 7 8 } + */ + g_assert(count == 4); + g_assert(data_out[0] == 0x9 && data_out[1] == 0xa && + data_out[2] == 0xb && data_out[3] == 0xc); + + g_assert(fifo8_num_used(&fifo) == 0); + fifo8_destroy(&fifo); +} + +static void test_fifo8_pop_buf(void) +{ + Fifo8 fifo; + uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8 }; + uint8_t data_out[] = { 0xff, 0xff, 0xff, 0xff }; + int count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in, sizeof(data_in)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + count = fifo8_pop_buf(&fifo, NULL, 4); + /* + * tail --]v-- head used = 0 + * FIFO: { 1 2 3 4 . . . . } + */ + g_assert(count == 4); + count = fifo8_pop_buf(&fifo, data_out, 4); + g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 && + data_out[2] == 0x7 && data_out[3] == 0x8); + + g_assert(fifo8_num_used(&fifo) == 0); + fifo8_destroy(&fifo); +} + +static void test_fifo8_peek_buf_wrap(void) +{ + Fifo8 fifo; + uint8_t data_in1[] = { 0x1, 0x2, 0x3, 0x4 }; + uint8_t data_in2[] = { 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }; + uint8_t data_out[8]; + int count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in1, sizeof(data_in1)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + fifo8_pop_buf(&fifo, NULL, 4); + /* + * tail --]v-- head used = 0 + * FIFO: { 1 2 3 4 . . . . } + */ + + fifo8_push_all(&fifo, data_in2, sizeof(data_in2)); + /* + * tail --]v-- head used = 8 + * FIFO: { 9 a b c 5 6 7 8 } + */ + count = fifo8_peek_buf(&fifo, NULL, 4); + g_assert(count == 4); + count = fifo8_peek_buf(&fifo, data_out, 4); + /* + * tail --]v-- head used = 8 + * FIFO: { 9 a b c 5 6 7 8 } + * buf: [ 5 6 7 8 ] count = 4 + */ + g_assert(count == 4); + g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 && + data_out[2] == 0x7 && data_out[3] == 0x8); + + count = fifo8_peek_buf(&fifo, data_out, 8); + /* + * tail --]v-- head used = 8 + * FIFO: { 9 a b c 5 6 7 8 } + * buf: [ 5 6 7 8 9 a b c ] count = 8 + */ + g_assert(count == 8); + g_assert(data_out[0] == 0x5 && data_out[1] == 0x6 && + data_out[2] == 0x7 && data_out[3] == 0x8); + g_assert(data_out[4] == 0x9 && data_out[5] == 0xa && + data_out[6] == 0xb && data_out[7] == 0xc); + + g_assert(fifo8_num_used(&fifo) == 8); + fifo8_destroy(&fifo); +} + +static void test_fifo8_peek_buf(void) +{ + Fifo8 fifo; + uint8_t data_in[] = { 0x1, 0x2, 0x3, 0x4 }; + uint8_t data_out[] = { 0xff, 0xff, 0xff, 0xff }; + int count; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + + fifo8_push_all(&fifo, data_in, sizeof(data_in)); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + */ + count = fifo8_peek_buf(&fifo, NULL, 4); + g_assert(count == 4); + + g_assert(data_out[0] == 0xff && data_out[1] == 0xff && + data_out[2] == 0xff && data_out[3] == 0xff); + count = fifo8_peek_buf(&fifo, data_out, 4); + /* + * head --v ]-- tail used = 4 + * FIFO: { 1 2 3 4 . . . . } + * buf: [ 1 2 3 4 ] count = 4 + */ + g_assert(count == 4); + g_assert(data_out[0] == 0x1 && data_out[1] == 0x2 && + data_out[2] == 0x3 && data_out[3] == 0x4); + + g_assert(fifo8_num_used(&fifo) == 4); + fifo8_destroy(&fifo); +} + +static void test_fifo8_peek(void) +{ + Fifo8 fifo; + uint8_t c; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + fifo8_push(&fifo, 0x1); + /* + * head --v]-- tail used = 1 + * FIFO: { 1 . . . . . . . } + */ + fifo8_push(&fifo, 0x2); + /* + * head --v ]-- tail used = 2 + * FIFO: { 1 2 . . . . . . } + */ + + c = fifo8_peek(&fifo); + g_assert(c == 0x1); + fifo8_pop(&fifo); + /* + * head --v]-- tail used = 1 + * FIFO: { 1 2 . . . . . . } + */ + c = fifo8_peek(&fifo); + g_assert(c == 0x2); + + g_assert(fifo8_num_used(&fifo) == 1); + fifo8_destroy(&fifo); +} + +static void test_fifo8_pushpop(void) +{ + Fifo8 fifo; + uint8_t c; + + fifo8_create(&fifo, 8); + /* + * head --v-- tail used = 0 + * FIFO: { . . . . . . . . } + */ + fifo8_push(&fifo, 0x1); + /* + * head --v]-- tail used = 1 + * FIFO: { 1 . . . . . . . } + */ + fifo8_push(&fifo, 0x2); + /* + * head --v ]-- tail used = 2 + * FIFO: { 1 2 . . . . . . } + */ + + c = fifo8_pop(&fifo); + /* + * head --v]-- tail used = 1 + * FIFO: { 1 2 . . . . . . } + */ + g_assert(c == 0x1); + c = fifo8_pop(&fifo); + /* + * tail --]v-- head used = 0 + * FIFO: { 1 2 . . . . . . } + */ + g_assert(c == 0x2); + + g_assert(fifo8_num_used(&fifo) == 0); + fifo8_destroy(&fifo); +} + +int main(int argc, char *argv[]) +{ + g_test_init(&argc, &argv, NULL); + g_test_add_func("/fifo8/pushpop", test_fifo8_pushpop); + g_test_add_func("/fifo8/peek", test_fifo8_peek); + g_test_add_func("/fifo8/peek_buf", test_fifo8_peek_buf); + g_test_add_func("/fifo8/peek_buf_wrap", test_fifo8_peek_buf_wrap); + g_test_add_func("/fifo8/pop_buf", test_fifo8_pop_buf); + g_test_add_func("/fifo8/pop_buf_wrap", test_fifo8_pop_buf_wrap); + g_test_add_func("/fifo8/peek_bufptr", test_fifo8_peek_bufptr); + g_test_add_func("/fifo8/peek_bufptr_wrap", test_fifo8_peek_bufptr_wrap); + g_test_add_func("/fifo8/pop_bufptr", test_fifo8_pop_bufptr); + g_test_add_func("/fifo8/pop_bufptr_wrap", test_fifo8_pop_bufptr_wrap); + return g_test_run(); +} diff --git a/tests/unit/test-forward-visitor.c b/tests/unit/test-forward-visitor.c index eea8ffc0720..aad1c89f13d 100644 --- a/tests/unit/test-forward-visitor.c +++ b/tests/unit/test-forward-visitor.c @@ -12,8 +12,8 @@ #include "qapi/forward-visitor.h" #include "qapi/qobject-input-visitor.h" #include "qapi/error.h" -#include "qapi/qmp/qobject.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qobject.h" +#include "qobject/qdict.h" #include "test-qapi-visit.h" #include "qemu/keyval.h" diff --git a/tests/unit/test-image-locking.c b/tests/unit/test-image-locking.c index 2624cec6a04..019195f8992 100644 --- a/tests/unit/test-image-locking.c +++ b/tests/unit/test-image-locking.c @@ -26,9 +26,9 @@ #include "qemu/osdep.h" #include "block/block.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/main-loop.h" static BlockBackend *open_image(const char *path, diff --git a/tests/unit/test-io-channel-socket.c b/tests/unit/test-io-channel-socket.c index b964bb202db..dc7be96e9cd 100644 --- a/tests/unit/test-io-channel-socket.c +++ b/tests/unit/test-io-channel-socket.c @@ -506,7 +506,7 @@ static void test_io_channel_unix_listen_cleanup(void) { QIOChannelSocket *ioc; struct sockaddr_un un; - int sock; + int sock, ret = 0; #define TEST_SOCKET "test-io-channel-socket.sock" @@ -519,7 +519,9 @@ static void test_io_channel_unix_listen_cleanup(void) un.sun_family = AF_UNIX; snprintf(un.sun_path, sizeof(un.sun_path), "%s", TEST_SOCKET); unlink(TEST_SOCKET); - bind(sock, (struct sockaddr *)&un, sizeof(un)); + ret = bind(sock, (struct sockaddr *)&un, sizeof(un)); + g_assert_cmpint(ret, ==, 0); + ioc->fd = sock; ioc->localAddrLen = sizeof(ioc->localAddr); getsockname(sock, (struct sockaddr *)&ioc->localAddr, diff --git a/tests/unit/test-keyval.c b/tests/unit/test-keyval.c index 4dc52c7a1a8..c6e8f4fe370 100644 --- a/tests/unit/test-keyval.c +++ b/tests/unit/test-keyval.c @@ -13,9 +13,9 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "test-qapi-visit.h" #include "qemu/cutils.h" diff --git a/tests/unit/test-qdev-global-props.c b/tests/unit/test-qdev-global-props.c index c8862cac5fe..33062762a67 100644 --- a/tests/unit/test-qdev-global-props.c +++ b/tests/unit/test-qdev-global-props.c @@ -46,13 +46,12 @@ struct MyType { uint32_t prop2; }; -static Property static_props[] = { +static const Property static_props[] = { DEFINE_PROP_UINT32("prop1", MyType, prop1, PROP_DEFAULT), DEFINE_PROP_UINT32("prop2", MyType, prop2, PROP_DEFAULT), - DEFINE_PROP_END_OF_LIST() }; -static void static_prop_class_init(ObjectClass *oc, void *data) +static void static_prop_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -72,6 +71,26 @@ static const TypeInfo subclass_type = { .parent = TYPE_STATIC_PROPS, }; +/* + * Initialize a fake machine, being prepared for future tests. + * + * All the tests later (even if to be run in subprocesses.. which will + * inherit the global states of the parent process) will try to create qdev + * and realize the device. + * + * Realization of such anonymous qdev (with no parent object) requires both + * the machine object and its "unattached" container to be at least present. + */ +static void test_init_machine(void) +{ + /* This is a fake machine - it doesn't need to be a machine object */ + Object *machine = object_property_add_new_container( + object_get_root(), "machine"); + + /* This container must exist for anonymous qdevs to realize() */ + object_property_add_new_container(machine, "unattached"); +} + /* Test simple static property setting to default value */ static void test_static_prop_subprocess(void) { @@ -158,7 +177,7 @@ static void dynamic_instance_init(Object *obj) NULL, NULL); } -static void dynamic_class_init(ObjectClass *oc, void *data) +static void dynamic_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -174,7 +193,7 @@ static const TypeInfo dynamic_prop_type = { .class_init = dynamic_class_init, }; -static void hotplug_class_init(ObjectClass *oc, void *data) +static void hotplug_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -190,7 +209,7 @@ static const TypeInfo hotplug_type = { .class_init = hotplug_class_init, }; -static void nohotplug_class_init(ObjectClass *oc, void *data) +static void nohotplug_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -295,6 +314,8 @@ int main(int argc, char **argv) type_register_static(&nohotplug_type); type_register_static(&nondevice_type); + test_init_machine(); + g_test_add_func("/qdev/properties/static/default/subprocess", test_static_prop_subprocess); g_test_add_func("/qdev/properties/static/default", diff --git a/tests/unit/test-qemu-opts.c b/tests/unit/test-qemu-opts.c index 828d40e9283..8d03a69f7cd 100644 --- a/tests/unit/test-qemu-opts.c +++ b/tests/unit/test-qemu-opts.c @@ -12,8 +12,8 @@ #include "qemu/option.h" #include "qemu/option_int.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qstring.h" #include "qemu/config-file.h" diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index 8cddf5dc37b..587e30c7e48 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -5,8 +5,8 @@ #include #include "../qtest/libqtest.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" typedef struct { char *test_dir; @@ -332,6 +332,22 @@ static void test_qga_get_fsinfo(gconstpointer fix) } } +static void test_qga_get_load(gconstpointer fix) +{ + const TestFixture *fixture = fix; + g_autoptr(QDict) ret = NULL; + QDict *load; + + ret = qmp_fd(fixture->fd, "{'execute': 'guest-get-load'}"); + g_assert_nonnull(ret); + qmp_assert_no_error(ret); + + load = qdict_get_qdict(ret, "return"); + g_assert(qdict_haskey(load, "load1m")); + g_assert(qdict_haskey(load, "load5m")); + g_assert(qdict_haskey(load, "load15m")); +} + static void test_qga_get_memory_block_info(gconstpointer fix) { const TestFixture *fixture = fix; @@ -1105,6 +1121,7 @@ int main(int argc, char **argv) g_test_add_data_func("/qga/get-vcpus", &fix, test_qga_get_vcpus); } g_test_add_data_func("/qga/get-fsinfo", &fix, test_qga_get_fsinfo); + g_test_add_data_func("/qga/get-load", &fix, test_qga_get_load); g_test_add_data_func("/qga/get-memory-block-info", &fix, test_qga_get_memory_block_info); g_test_add_data_func("/qga/get-memory-blocks", &fix, diff --git a/tests/unit/test-qgraph.c b/tests/unit/test-qgraph.c index 334c76c8e71..ca1d60fc189 100644 --- a/tests/unit/test-qgraph.c +++ b/tests/unit/test-qgraph.c @@ -44,7 +44,6 @@ static void *driverfunct(void *obj, QGuestAllocator *machine, void *arg) static void testfunct(void *obj, void *arg, QGuestAllocator *alloc) { - return; } static void check_interface(const char *interface) diff --git a/tests/unit/test-qmp-cmds.c b/tests/unit/test-qmp-cmds.c index 6d52b4e5d81..ad538868863 100644 --- a/tests/unit/test-qmp-cmds.c +++ b/tests/unit/test-qmp-cmds.c @@ -1,9 +1,9 @@ #include "qemu/osdep.h" #include "qapi/compat-policy.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qapi/error.h" #include "qapi/qobject-input-visitor.h" #include "tests/test-qapi-types.h" diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index 08e95a382bd..2aac27163d8 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -15,11 +15,11 @@ #include "qapi/compat-policy.h" #include "qapi/error.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qjson.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qapi/qmp-event.h" #include "test-qapi-events.h" #include "test-qapi-emit-events.h" diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c index 024e26c49e9..84bdcdf702e 100644 --- a/tests/unit/test-qobject-input-visitor.c +++ b/tests/unit/test-qobject-input-visitor.c @@ -17,12 +17,12 @@ #include "qapi/qapi-visit-introspect.h" #include "qapi/qobject-input-visitor.h" #include "test-qapi-visit.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" -#include "qapi/qmp/qjson.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" +#include "qobject/qjson.h" #include "test-qapi-introspect.h" #include "qapi/qapi-introspect.h" @@ -720,7 +720,7 @@ static void test_visitor_in_union_in_union(TestInputVisitorData *data, visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort); g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A); - g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A1); + g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUM_A_VALUE_A1); g_assert_cmpint(tmp->u.value_a.u.value_a1.integer, ==, 2); g_assert_cmpint(strcmp(tmp->u.value_a.u.value_a1.name, "fish"), ==, 0); @@ -734,7 +734,7 @@ static void test_visitor_in_union_in_union(TestInputVisitorData *data, visit_type_TestUnionInUnion(v, NULL, &tmp, &error_abort); g_assert_cmpint(tmp->type, ==, TEST_UNION_ENUM_VALUE_A); - g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUMA_VALUE_A2); + g_assert_cmpint(tmp->u.value_a.type_a, ==, TEST_UNION_ENUM_A_VALUE_A2); g_assert_cmpint(tmp->u.value_a.u.value_a2.integer, ==, 1729); g_assert_cmpint(tmp->u.value_a.u.value_a2.size, ==, 87539319); diff --git a/tests/unit/test-qobject-output-visitor.c b/tests/unit/test-qobject-output-visitor.c index 1535b3ad17b..407ab9ed505 100644 --- a/tests/unit/test-qobject-output-visitor.c +++ b/tests/unit/test-qobject-output-visitor.c @@ -15,12 +15,12 @@ #include "qapi/error.h" #include "qapi/qobject-output-visitor.h" #include "test-qapi-visit.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qnull.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qnull.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" typedef struct TestOutputVisitorData { Visitor *ov; @@ -359,7 +359,7 @@ static void test_visitor_out_union_in_union(TestOutputVisitorData *data, TestUnionInUnion *tmp = g_new0(TestUnionInUnion, 1); tmp->type = TEST_UNION_ENUM_VALUE_A; - tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A1; + tmp->u.value_a.type_a = TEST_UNION_ENUM_A_VALUE_A1; tmp->u.value_a.u.value_a1.integer = 42; tmp->u.value_a.u.value_a1.name = g_strdup("fish"); @@ -377,7 +377,7 @@ static void test_visitor_out_union_in_union(TestOutputVisitorData *data, visitor_reset(data); tmp = g_new0(TestUnionInUnion, 1); tmp->type = TEST_UNION_ENUM_VALUE_A; - tmp->u.value_a.type_a = TEST_UNION_ENUMA_VALUE_A2; + tmp->u.value_a.type_a = TEST_UNION_ENUM_A_VALUE_A2; tmp->u.value_a.u.value_a2.integer = 1729; tmp->u.value_a.u.value_a2.size = 87539319; diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c index 5d2003b8ced..3aa98e6f565 100644 --- a/tests/unit/test-replication.c +++ b/tests/unit/test-replication.c @@ -11,13 +11,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "qemu/option.h" #include "qemu/main-loop.h" #include "block/replication.h" #include "block/block_int.h" #include "block/qdict.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" #define IMG_SIZE (64 * 1024 * 1024) diff --git a/tests/unit/test-resv-mem.c b/tests/unit/test-resv-mem.c index cd8f7318ccc..4de2d042d14 100644 --- a/tests/unit/test-resv-mem.c +++ b/tests/unit/test-resv-mem.c @@ -10,7 +10,7 @@ #include "qemu/osdep.h" #include "qemu/range.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qemu/reserved-region.h" #define DEBUG 0 diff --git a/tests/unit/test-seccomp.c b/tests/unit/test-seccomp.c index bab93fd6dab..71d40834391 100644 --- a/tests/unit/test-seccomp.c +++ b/tests/unit/test-seccomp.c @@ -21,7 +21,7 @@ #include "qemu/osdep.h" #include "qemu/config-file.h" #include "qemu/option.h" -#include "sysemu/seccomp.h" +#include "system/seccomp.h" #include "qapi/error.h" #include "qemu/module.h" diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c index f9bccb56abc..326045ecbb3 100644 --- a/tests/unit/test-smp-parse.c +++ b/tests/unit/test-smp-parse.c @@ -924,7 +924,7 @@ static void unsupported_params_init(const MachineClass *mc, SMPTestData *data) } } -static void machine_base_class_init(ObjectClass *oc, void *data) +static void machine_base_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -934,7 +934,8 @@ static void machine_base_class_init(ObjectClass *oc, void *data) mc->name = g_strdup(SMP_MACHINE_NAME); } -static void machine_generic_invalid_class_init(ObjectClass *oc, void *data) +static void machine_generic_invalid_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -943,21 +944,22 @@ static void machine_generic_invalid_class_init(ObjectClass *oc, void *data) mc->max_cpus = MAX_CPUS - 1; } -static void machine_with_modules_class_init(ObjectClass *oc, void *data) +static void machine_with_modules_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->smp_props.modules_supported = true; } -static void machine_with_dies_class_init(ObjectClass *oc, void *data) +static void machine_with_dies_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->smp_props.dies_supported = true; } -static void machine_with_modules_dies_class_init(ObjectClass *oc, void *data) +static void machine_with_modules_dies_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -965,28 +967,29 @@ static void machine_with_modules_dies_class_init(ObjectClass *oc, void *data) mc->smp_props.dies_supported = true; } -static void machine_with_clusters_class_init(ObjectClass *oc, void *data) +static void machine_with_clusters_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->smp_props.clusters_supported = true; } -static void machine_with_books_class_init(ObjectClass *oc, void *data) +static void machine_with_books_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->smp_props.books_supported = true; } -static void machine_with_drawers_class_init(ObjectClass *oc, void *data) +static void machine_with_drawers_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->smp_props.drawers_supported = true; } -static void machine_with_drawers_books_class_init(ObjectClass *oc, void *data) +static void machine_with_drawers_books_class_init(ObjectClass *oc, + const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -994,7 +997,7 @@ static void machine_with_drawers_books_class_init(ObjectClass *oc, void *data) mc->smp_props.books_supported = true; } -static void machine_full_topo_class_init(ObjectClass *oc, void *data) +static void machine_full_topo_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); diff --git a/tests/unit/test-thread-pool.c b/tests/unit/test-thread-pool.c index 1483e53473d..33407b595d3 100644 --- a/tests/unit/test-thread-pool.c +++ b/tests/unit/test-thread-pool.c @@ -43,10 +43,10 @@ static void done_cb(void *opaque, int ret) active--; } -static void test_submit(void) +static void test_submit_no_complete(void) { WorkerTestData data = { .n = 0 }; - thread_pool_submit(worker_cb, &data); + thread_pool_submit_aio(worker_cb, &data, NULL, NULL); while (data.n == 0) { aio_poll(ctx, true); } @@ -236,7 +236,7 @@ int main(int argc, char **argv) ctx = qemu_get_current_aio_context(); g_test_init(&argc, &argv, NULL); - g_test_add_func("/thread-pool/submit", test_submit); + g_test_add_func("/thread-pool/submit-no-complete", test_submit_no_complete); g_test_add_func("/thread-pool/submit-aio", test_submit_aio); g_test_add_func("/thread-pool/submit-co", test_submit_co); g_test_add_func("/thread-pool/submit-many", test_submit_many); diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c index 24032a02667..dfa61c75ea4 100644 --- a/tests/unit/test-throttle.c +++ b/tests/unit/test-throttle.c @@ -21,7 +21,7 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "block/throttle-groups.h" -#include "sysemu/block-backend.h" +#include "system/block-backend.h" static AioContext *ctx; static LeakyBucket bkt; diff --git a/tests/unit/test-timed-average.c b/tests/unit/test-timed-average.c index 82c92500df8..747ed1ee60e 100644 --- a/tests/unit/test-timed-average.c +++ b/tests/unit/test-timed-average.c @@ -11,7 +11,7 @@ */ #include "qemu/osdep.h" -#include "sysemu/cpu-timers.h" +#include "system/cpu-timers.h" #include "qemu/timed-average.h" /* This is the clock for QEMU_CLOCK_VIRTUAL */ diff --git a/tests/unit/test-util-sockets.c b/tests/unit/test-util-sockets.c index 4c9dd0b2717..ee66d727c38 100644 --- a/tests/unit/test-util-sockets.c +++ b/tests/unit/test-util-sockets.c @@ -332,6 +332,220 @@ static void test_socket_unix_abstract(void) #endif /* CONFIG_LINUX */ +static void inet_parse_test_helper(const char *str, + InetSocketAddress *exp_addr, bool success) +{ + InetSocketAddress addr; + Error *error = NULL; + + int rc = inet_parse(&addr, str, &error); + + if (success) { + if (error) { + error_report_err(error); + } + g_assert_cmpint(rc, ==, 0); + } else { + error_free(error); + g_assert_cmpint(rc, <, 0); + } + if (exp_addr != NULL) { + g_assert_cmpstr(addr.host, ==, exp_addr->host); + g_assert_cmpstr(addr.port, ==, exp_addr->port); + /* Own members: */ + g_assert_cmpint(addr.has_numeric, ==, exp_addr->has_numeric); + g_assert_cmpint(addr.numeric, ==, exp_addr->numeric); + g_assert_cmpint(addr.has_to, ==, exp_addr->has_to); + g_assert_cmpint(addr.to, ==, exp_addr->to); + g_assert_cmpint(addr.has_ipv4, ==, exp_addr->has_ipv4); + g_assert_cmpint(addr.ipv4, ==, exp_addr->ipv4); + g_assert_cmpint(addr.has_ipv6, ==, exp_addr->has_ipv6); + g_assert_cmpint(addr.ipv6, ==, exp_addr->ipv6); + g_assert_cmpint(addr.has_keep_alive, ==, exp_addr->has_keep_alive); + g_assert_cmpint(addr.keep_alive, ==, exp_addr->keep_alive); +#ifdef HAVE_TCP_KEEPCNT + g_assert_cmpint(addr.has_keep_alive_count, ==, + exp_addr->has_keep_alive_count); + g_assert_cmpint(addr.keep_alive_count, ==, + exp_addr->keep_alive_count); +#endif +#ifdef HAVE_TCP_KEEPIDLE + g_assert_cmpint(addr.has_keep_alive_idle, ==, + exp_addr->has_keep_alive_idle); + g_assert_cmpint(addr.keep_alive_idle, ==, + exp_addr->keep_alive_idle); +#endif +#ifdef HAVE_TCP_KEEPINTVL + g_assert_cmpint(addr.has_keep_alive_interval, ==, + exp_addr->has_keep_alive_interval); + g_assert_cmpint(addr.keep_alive_interval, ==, + exp_addr->keep_alive_interval); +#endif +#ifdef HAVE_IPPROTO_MPTCP + g_assert_cmpint(addr.has_mptcp, ==, exp_addr->has_mptcp); + g_assert_cmpint(addr.mptcp, ==, exp_addr->mptcp); +#endif + } + + g_free(addr.host); + g_free(addr.port); +} + +static void test_inet_parse_nohost_good(void) +{ + char host[] = ""; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + }; + inet_parse_test_helper(":5000", &exp_addr, true); +} + +static void test_inet_parse_empty_bad(void) +{ + inet_parse_test_helper("", NULL, false); +} + +static void test_inet_parse_only_colon_bad(void) +{ + inet_parse_test_helper(":", NULL, false); +} + +static void test_inet_parse_ipv4_good(void) +{ + char host[] = "127.0.0.1"; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + }; + inet_parse_test_helper("127.0.0.1:5000", &exp_addr, true); +} + +static void test_inet_parse_ipv4_noport_bad(void) +{ + inet_parse_test_helper("127.0.0.1", NULL, false); +} + +static void test_inet_parse_ipv6_good(void) +{ + char host[] = "::1"; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + }; + inet_parse_test_helper("[::1]:5000", &exp_addr, true); +} + +static void test_inet_parse_ipv6_noend_bad(void) +{ + inet_parse_test_helper("[::1", NULL, false); +} + +static void test_inet_parse_ipv6_noport_bad(void) +{ + inet_parse_test_helper("[::1]:", NULL, false); +} + +static void test_inet_parse_ipv6_empty_bad(void) +{ + inet_parse_test_helper("[]:5000", NULL, false); +} + +static void test_inet_parse_hostname_good(void) +{ + char host[] = "localhost"; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + }; + inet_parse_test_helper("localhost:5000", &exp_addr, true); +} + +static void test_inet_parse_all_options_good(void) +{ + char host[] = "::1"; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + .has_numeric = true, + .numeric = true, + .has_to = true, + .to = 5006, + .has_ipv4 = true, + .ipv4 = false, + .has_ipv6 = true, + .ipv6 = true, + .has_keep_alive = true, + .keep_alive = true, +#ifdef HAVE_TCP_KEEPCNT + .has_keep_alive_count = true, + .keep_alive_count = 10, +#endif +#ifdef HAVE_TCP_KEEPIDLE + .has_keep_alive_idle = true, + .keep_alive_idle = 60, +#endif +#ifdef HAVE_TCP_KEEPINTVL + .has_keep_alive_interval = true, + .keep_alive_interval = 30, +#endif +#ifdef HAVE_IPPROTO_MPTCP + .has_mptcp = true, + .mptcp = false, +#endif + }; + inet_parse_test_helper( + "[::1]:5000,numeric=on,to=5006,ipv4=off,ipv6=on,keep-alive=on" +#ifdef HAVE_TCP_KEEPCNT + ",keep-alive-count=10" +#endif +#ifdef HAVE_TCP_KEEPIDLE + ",keep-alive-idle=60" +#endif +#ifdef HAVE_TCP_KEEPINTVL + ",keep-alive-interval=30" +#endif +#ifdef HAVE_IPPROTO_MPTCP + ",mptcp=off" +#endif + , &exp_addr, true); +} + +static void test_inet_parse_all_implicit_bool_good(void) +{ + char host[] = "::1"; + char port[] = "5000"; + InetSocketAddress exp_addr = { + .host = host, + .port = port, + .has_numeric = true, + .numeric = true, + .has_to = true, + .to = 5006, + .has_ipv4 = true, + .ipv4 = true, + .has_ipv6 = true, + .ipv6 = true, + .has_keep_alive = true, + .keep_alive = true, +#ifdef HAVE_IPPROTO_MPTCP + .has_mptcp = true, + .mptcp = true, +#endif + }; + inet_parse_test_helper( + "[::1]:5000,numeric,to=5006,ipv4,ipv6,keep-alive" +#ifdef HAVE_IPPROTO_MPTCP + ",mptcp" +#endif + , &exp_addr, true); +} + int main(int argc, char **argv) { bool has_ipv4, has_ipv6; @@ -377,6 +591,31 @@ int main(int argc, char **argv) test_socket_unix_abstract); #endif + g_test_add_func("/util/socket/inet-parse/nohost-good", + test_inet_parse_nohost_good); + g_test_add_func("/util/socket/inet-parse/empty-bad", + test_inet_parse_empty_bad); + g_test_add_func("/util/socket/inet-parse/only-colon-bad", + test_inet_parse_only_colon_bad); + g_test_add_func("/util/socket/inet-parse/ipv4-good", + test_inet_parse_ipv4_good); + g_test_add_func("/util/socket/inet-parse/ipv4-noport-bad", + test_inet_parse_ipv4_noport_bad); + g_test_add_func("/util/socket/inet-parse/ipv6-good", + test_inet_parse_ipv6_good); + g_test_add_func("/util/socket/inet-parse/ipv6-noend-bad", + test_inet_parse_ipv6_noend_bad); + g_test_add_func("/util/socket/inet-parse/ipv6-noport-bad", + test_inet_parse_ipv6_noport_bad); + g_test_add_func("/util/socket/inet-parse/ipv6-empty-bad", + test_inet_parse_ipv6_empty_bad); + g_test_add_func("/util/socket/inet-parse/hostname-good", + test_inet_parse_hostname_good); + g_test_add_func("/util/socket/inet-parse/all-options-good", + test_inet_parse_all_options_good); + g_test_add_func("/util/socket/inet-parse/all-bare-bool-good", + test_inet_parse_all_implicit_bool_good); + end: return g_test_run(); } diff --git a/tests/unit/test-visitor-serialization.c b/tests/unit/test-visitor-serialization.c index c2056c3eaa8..2d365999fc8 100644 --- a/tests/unit/test-visitor-serialization.c +++ b/tests/unit/test-visitor-serialization.c @@ -16,8 +16,8 @@ #include "test-qapi-visit.h" #include "qapi/error.h" -#include "qapi/qmp/qjson.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qjson.h" +#include "qobject/qstring.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "qapi/string-input-visitor.h" diff --git a/tests/unit/test-xs-node.c b/tests/unit/test-xs-node.c index ac94e7ed6c2..2f447a73fb8 100644 --- a/tests/unit/test-xs-node.c +++ b/tests/unit/test-xs-node.c @@ -212,7 +212,7 @@ static void compare_tx(gpointer key, gpointer val, gpointer opaque) printf("Comparison failure in TX %u after serdes:\n", tx_id); dump_ref("Original", t1->root, 0); dump_ref("Deserialised", t2->root, 0); - g_assert(0); + g_assert_not_reached(); } g_assert(t1->nr_nodes == t2->nr_nodes); } @@ -257,7 +257,7 @@ static void check_serdes(XenstoreImplState *s) printf("Comparison failure in main tree after serdes:\n"); dump_ref("Original", s->root, 0); dump_ref("Deserialised", s2->root, 0); - g_assert(0); + g_assert_not_reached(); } nr_transactions1 = g_hash_table_size(s->transactions); diff --git a/tests/unit/test-yank.c b/tests/unit/test-yank.c index e6c036a64d9..4acfb2f3f62 100644 --- a/tests/unit/test-yank.c +++ b/tests/unit/test-yank.c @@ -14,7 +14,7 @@ #include "qemu/module.h" #include "qemu/option.h" #include "chardev/char-fe.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "qapi/qapi-commands-char.h" #include "qapi/qapi-types-char.h" diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include index 13ed80f72d8..14188bba1c6 100644 --- a/tests/vm/Makefile.include +++ b/tests/vm/Makefile.include @@ -64,23 +64,24 @@ endif @echo " vm-boot-ssh- - Boot guest and login via ssh" @echo @echo "Special variables:" - @echo " BUILD_TARGET=foo - Override the build target" - @echo " DEBUG=1 - Enable verbose output on host and interactive debugging" - @echo ' EXTRA_CONFIGURE_OPTS="..." - Pass to configure step' - @echo " J=[0..9]* - Override the -jN parameter for make commands" - @echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm " - @echo " USE_TCG=1 - Use TCG for cross-arch images" - @echo " QEMU=/path/to/qemu - Change path to QEMU binary" + @echo " BUILD_TARGET=foo - Override the build target" + @echo " DEBUG=1 - Enable verbose output on host and interactive debugging" + @echo " ROOT_USER=1 - Login as root user for interactive shell" + @echo ' EXTRA_CONFIGURE_OPTS="..." - Pass to configure step' + @echo " J=[0..9]* - Override the -jN parameter for make commands" + @echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm " + @echo " USE_TCG=1 - Use TCG for cross-arch images" + @echo " QEMU=/path/to/qemu - Change path to QEMU binary" ifeq ($(HAVE_PYTHON_YAML),yes) - @echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file." + @echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file." else @echo " (install python3-yaml to enable support for yaml file to configure a VM.)" endif - @echo " See conf_example_*.yml for file format details." - @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool" - @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build." - @echo " TARGET_LIST=a,b,c - Override target list in builds" - @echo " V=1 - Enable verbose output on host and guest commands" + @echo " See conf_example_*.yml for file format details." + @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool" + @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build." + @echo " TARGET_LIST=a,b,c - Override target list in builds" + @echo " V=1 - Enable verbose output on host and guest commands" vm-build-all: $(addprefix vm-build-, $(IMAGES)) @@ -141,6 +142,6 @@ vm-boot-ssh-%: $(IMAGES_DIR)/%.img $(VM_VENV) $(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \ $(if $(LOG_CONSOLE),--log-console) \ --image "$<" \ - --interactive \ + $(if $(ROOT_USER),--interactive-root,-interactive) \ false, \ " VM-BOOT-SSH $*") || true diff --git a/tests/vm/README b/tests/vm/README index f9c04cc0e75..14ac323309e 100644 --- a/tests/vm/README +++ b/tests/vm/README @@ -1 +1 @@ -See docs/devel/testing.rst for help. +See docs/devel/testing/main.rst for help. diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index 4a1af04b9a7..9e879e966a3 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -83,7 +83,7 @@ class BaseVM(object): # command to halt the guest, can be overridden by subclasses poweroff = "poweroff" # Time to wait for shutdown to finish. - shutdown_timeout_default = 30 + shutdown_timeout_default = 90 # enable IPv6 networking ipv6 = True # This is the timeout on the wait for console bytes. @@ -520,8 +520,7 @@ def get_qemu_path(arch, build_path=None): if "QEMU" in os.environ: qemu_path = os.environ["QEMU"] elif build_path: - qemu_path = os.path.join(build_path, arch + "-softmmu") - qemu_path = os.path.join(qemu_path, "qemu-system-" + arch) + qemu_path = os.path.join(build_path, "qemu-system-" + arch) else: # Default is to use system path for qemu. qemu_path = "qemu-system-" + arch @@ -613,8 +612,11 @@ def get_default_jobs(): parser.add_argument("--source-path", default=None, help="Path of source directory, "\ "for finding additional files. ") - parser.add_argument("--interactive", "-I", action="store_true", - help="Interactively run command") + int_ops = parser.add_mutually_exclusive_group() + int_ops.add_argument("--interactive", "-I", action="store_true", + help="Interactively run command") + int_ops.add_argument("--interactive-root", action="store_true", + help="Interactively run command as root") parser.add_argument("--snapshot", "-s", action="store_true", help="run tests with a snapshot") parser.add_argument("--genisoimage", default="genisoimage", @@ -676,6 +678,8 @@ def main(vmcls, config=None): exitcode = 3 if args.interactive: vm.ssh() + elif args.interactive_root: + vm.ssh_root() if not args.snapshot: vm.graceful_shutdown() diff --git a/tests/vm/freebsd b/tests/vm/freebsd index 1247f40a385..2e96c9eba52 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -28,8 +28,8 @@ class FreeBSDVM(basevm.BaseVM): name = "freebsd" arch = "x86_64" - link = "https://download.freebsd.org/releases/CI-IMAGES/13.2-RELEASE/amd64/Latest/FreeBSD-13.2-RELEASE-amd64-BASIC-CI.raw.xz" - csum = "a4fb3b6c7b75dd4d58fb0d75e4caf72844bffe0ca00e66459c028b198ffb3c0e" + link = "https://download.freebsd.org/releases/CI-IMAGES/14.3-RELEASE/amd64/Latest/FreeBSD-14.3-RELEASE-amd64-BASIC-CI.raw.xz" + csum = "ec0f5a4bbe63aa50a725d9fee0f1931f850e9a21cbebdadb991df00f168d6805" size = "20G" BUILD_SCRIPT = """ @@ -39,7 +39,7 @@ class FreeBSDVM(basevm.BaseVM): mkdir src build; cd src; tar -xf /dev/vtbd1; cd ../build; - ../src/configure --python=python3.9 --extra-ldflags=-L/usr/local/lib \ + ../src/configure --extra-ldflags=-L/usr/local/lib \ --extra-cflags=-I/usr/local/include {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ diff --git a/tests/vm/generated/freebsd.json b/tests/vm/generated/freebsd.json index 2a361cecd05..c03e1cd5863 100644 --- a/tests/vm/generated/freebsd.json +++ b/tests/vm/generated/freebsd.json @@ -5,7 +5,7 @@ "make": "/usr/local/bin/gmake", "ninja": "/usr/local/bin/ninja", "packaging_command": "pkg", - "pip3": "/usr/local/bin/pip-3.8", + "pip3": "/usr/local/bin/pip", "pkgs": [ "alsa-lib", "bash", @@ -13,7 +13,7 @@ "bzip2", "ca_root_nss", "capstone4", - "ccache", + "ccache4", "cmocka", "ctags", "curl", @@ -29,6 +29,7 @@ "gmake", "gnutls", "gsed", + "gtk-vnc", "gtk3", "json-c", "libepoxy", @@ -54,12 +55,14 @@ "py311-numpy", "py311-pillow", "py311-pip", + "py311-pyyaml", "py311-sphinx", "py311-sphinx_rtd_theme", "py311-tomli", - "py311-yaml", "python3", "rpm2cpio", + "rust", + "rust-bindgen-cli", "sdl2", "sdl2_image", "snappy", @@ -70,6 +73,7 @@ "usbredir", "virglrenderer", "vte3", + "vulkan-tools", "xorriso", "zstd" ], diff --git a/tests/vm/openbsd b/tests/vm/openbsd index 49cab087825..2ea86a01bad 100755 --- a/tests/vm/openbsd +++ b/tests/vm/openbsd @@ -22,8 +22,8 @@ class OpenBSDVM(basevm.BaseVM): name = "openbsd" arch = "x86_64" - link = "https://cdn.openbsd.org/pub/OpenBSD/7.5/amd64/install75.iso" - csum = "034435c6e27405d5a7fafb058162943c194eb793dafdc412c08d49bb56b3892a" + link = "https://cdn.openbsd.org/pub/OpenBSD/7.7/amd64/install77.iso" + csum = "da0106e39463f015524dca806f407c37a9bdd17e6dfffe533b06a2dd2edd8a27" size = "20G" pkgs = [ # tools @@ -32,7 +32,6 @@ class OpenBSDVM(basevm.BaseVM): "pkgconf", "bzip2", "xz", "ninja", - "py3-tomli", # gnu tools "bash", diff --git a/tools/i386/qemu-vmsr-helper.c b/tools/i386/qemu-vmsr-helper.c index a35dcb88a3f..5f19a48cbd2 100644 --- a/tools/i386/qemu-vmsr-helper.c +++ b/tools/i386/qemu-vmsr-helper.c @@ -71,7 +71,6 @@ static void compute_default_paths(void) static int is_intel_processor(void) { - int result; int ebx, ecx, edx; /* Execute CPUID instruction with eax=0 (basic identification) */ @@ -87,9 +86,7 @@ static int is_intel_processor(void) * 0x49656e69 = "ineI" * 0x6c65746e = "ntel" */ - result = (ebx == 0x756e6547) && (edx == 0x49656e69) && (ecx == 0x6c65746e); - - return result; + return (ebx == 0x756e6547) && (edx == 0x49656e69) && (ecx == 0x6c65746e); } static int is_rapl_enabled(void) diff --git a/trace-events b/trace-events index 9cb96f64c4a..3ec8a6c7202 100644 --- a/trace-events +++ b/trace-events @@ -30,13 +30,6 @@ breakpoint_insert(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64 breakpoint_remove(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64 " flags=0x%x" breakpoint_singlestep(int cpu_index, int enabled) "cpu=%d enable=%d" -# dma-helpers.c -dma_blk_io(void *dbs, void *bs, int64_t offset, bool to_dev) "dbs=%p bs=%p offset=%" PRId64 " to_dev=%d" -dma_aio_cancel(void *dbs) "dbs=%p" -dma_complete(void *dbs, int ret, void *cb) "dbs=%p ret=%d cb=%p" -dma_blk_cb(void *dbs, int ret) "dbs=%p ret=%d" -dma_map_wait(void *dbs) "dbs=%p" - # job.c job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)" diff --git a/trace/control-target.c b/trace/control-target.c index 97f21e476d2..57ceac21084 100644 --- a/trace/control-target.c +++ b/trace/control-target.c @@ -8,9 +8,6 @@ */ #include "qemu/osdep.h" -#include "qemu/lockable.h" -#include "cpu.h" -#include "trace/trace-root.h" #include "trace/control.h" diff --git a/trace/control.c b/trace/control.c index ef107829ac0..1c8c50064af 100644 --- a/trace/control.c +++ b/trace/control.c @@ -27,7 +27,6 @@ #include "qemu/error-report.h" #include "qemu/config-file.h" #include "monitor/monitor.h" -#include "trace/trace-root.h" int trace_events_enabled_count; diff --git a/trace/meson.build b/trace/meson.build index c3412dc0ba5..9c42a57a053 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -1,12 +1,10 @@ -system_ss.add(files('trace-hmp-cmds.c')) - -specific_ss.add(files('control-target.c')) +system_ss.add(files('control-target.c', 'trace-hmp-cmds.c')) trace_events_files = [] foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events if item in qapi_trace_events trace_events_file = item - group_name = item.full_path().split('/')[-1].underscorify() + group_name = fs.name(item).underscorify() else trace_events_file = meson.project_source_root() / item / 'trace-events' group_name = item == '.' ? 'root' : item.underscorify() @@ -59,10 +57,11 @@ foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events endif endforeach +cat = [ python, '-c', 'import fileinput; [print(line, end="") for line in fileinput.input()]', '@INPUT@' ] trace_events_all = custom_target('trace-events-all', output: 'trace-events-all', input: trace_events_files, - command: [ 'cat', '@INPUT@' ], + command: cat, capture: true, install: get_option('trace_backends') != [ 'nop' ], install_dir: qemu_datadir) diff --git a/trace/simple.c b/trace/simple.c index 18af590cf7b..c0aba00cb7f 100644 --- a/trace/simple.c +++ b/trace/simple.c @@ -366,7 +366,7 @@ void st_set_trace_file(const char *file) /* Type cast needed for Windows where getpid() returns an int. */ trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE "-" FMT_pid, (pid_t)getpid()); } else { - trace_file_name = g_strdup_printf("%s", file); + trace_file_name = g_strdup(file); } st_set_trace_file_enabled(saved_enable); diff --git a/trace/trace-hmp-cmds.c b/trace/trace-hmp-cmds.c index d38dd600dee..45f4335ff5d 100644 --- a/trace/trace-hmp-cmds.c +++ b/trace/trace-hmp-cmds.c @@ -27,7 +27,7 @@ #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qapi-commands-trace.h" -#include "qapi/qmp/qdict.h" +#include "qobject/qdict.h" #include "trace/control.h" #ifdef CONFIG_TRACE_SIMPLE #include "trace/simple.h" diff --git a/ui/clipboard.c b/ui/clipboard.c index 132086eb135..ec00a0b8ec7 100644 --- a/ui/clipboard.c +++ b/ui/clipboard.c @@ -1,4 +1,5 @@ #include "qemu/osdep.h" +#include "system/runstate.h" #include "ui/clipboard.h" #include "trace.h" @@ -7,8 +8,62 @@ static NotifierList clipboard_notifiers = static QemuClipboardInfo *cbinfo[QEMU_CLIPBOARD_SELECTION__COUNT]; +static VMChangeStateEntry *cb_change_state_entry = NULL; + +static bool cb_reset_serial_on_resume = false; + +static const VMStateDescription vmstate_cbcontent = { + .name = "clipboard/content", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_BOOL(available, QemuClipboardContent), + VMSTATE_BOOL(requested, QemuClipboardContent), + VMSTATE_UINT32(size, QemuClipboardContent), + VMSTATE_VBUFFER_ALLOC_UINT32(data, QemuClipboardContent, 0, 0, size), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_cbinfo = { + .name = "clipboard", + .version_id = 0, + .minimum_version_id = 0, + .fields = (const VMStateField[]) { + VMSTATE_INT32(selection, QemuClipboardInfo), + VMSTATE_BOOL(has_serial, QemuClipboardInfo), + VMSTATE_UINT32(serial, QemuClipboardInfo), + VMSTATE_STRUCT_ARRAY(types, QemuClipboardInfo, QEMU_CLIPBOARD_TYPE__COUNT, 0, vmstate_cbcontent, QemuClipboardContent), + VMSTATE_END_OF_LIST() + } +}; + +static void qemu_clipboard_change_state(void *opaque, bool running, RunState state) +{ + int i; + + if (!running) { + return; + } + + if (cb_reset_serial_on_resume) { + qemu_clipboard_reset_serial(); + } + + for (i = 0; i < QEMU_CLIPBOARD_SELECTION__COUNT; i++) { + if (cbinfo[i]) { + qemu_clipboard_update(cbinfo[i]); + } + } + +} + void qemu_clipboard_peer_register(QemuClipboardPeer *peer) { + if (cb_change_state_entry == NULL) { + cb_change_state_entry = qemu_add_vm_change_state_handler(qemu_clipboard_change_state, NULL); + } + notifier_list_add(&clipboard_notifiers, &peer->notifier); } @@ -83,7 +138,9 @@ void qemu_clipboard_update(QemuClipboardInfo *info) } } - notifier_list_notify(&clipboard_notifiers, ¬ify); + if (runstate_is_running() || runstate_check(RUN_STATE_SUSPENDED)) { + notifier_list_notify(&clipboard_notifiers, ¬ify); + } if (cbinfo[info->selection] != info) { qemu_clipboard_info_unref(cbinfo[info->selection]); @@ -163,7 +220,12 @@ void qemu_clipboard_reset_serial(void) info->serial = 0; } } - notifier_list_notify(&clipboard_notifiers, ¬ify); + + if (runstate_is_running() || runstate_check(RUN_STATE_SUSPENDED)) { + notifier_list_notify(&clipboard_notifiers, ¬ify); + } else { + cb_reset_serial_on_resume = true; + } } void qemu_clipboard_set_data(QemuClipboardPeer *peer, diff --git a/ui/cocoa.m b/ui/cocoa.m index 4c2dd335323..23b7a736d70 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -34,15 +34,15 @@ #include "ui/console.h" #include "ui/input.h" #include "ui/kbd-state.h" -#include "sysemu/sysemu.h" -#include "sysemu/runstate.h" -#include "sysemu/runstate-action.h" -#include "sysemu/cpu-throttle.h" +#include "system/system.h" +#include "system/runstate.h" +#include "system/runstate-action.h" +#include "system/cpu-throttle.h" #include "qapi/error.h" #include "qapi/qapi-commands-block.h" #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" -#include "sysemu/blockdev.h" +#include "system/blockdev.h" #include "qemu-version.h" #include "qemu/cutils.h" #include "qemu/main-loop.h" @@ -73,6 +73,8 @@ int height; } QEMUScreen; +@class QemuCocoaPasteboardTypeOwner; + static void cocoa_update(DisplayChangeListener *dcl, int x, int y, int w, int h); @@ -107,6 +109,7 @@ static void cocoa_switch(DisplayChangeListener *dcl, static NSInteger cbchangecount = -1; static QemuClipboardInfo *cbinfo; static QemuEvent cbevent; +static QemuCocoaPasteboardTypeOwner *cbowner; // Utility functions to run specified code block with the BQL held typedef void (^CodeBlock)(void); @@ -639,6 +642,9 @@ - (void) updateBounds [self setBoundsSize:NSMakeSize(screen.width, screen.height)]; } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + - (void) updateUIInfoLocked { /* Must be called with the BQL, i.e. via updateUIInfo */ @@ -685,6 +691,8 @@ - (void) updateUIInfoLocked dpy_set_ui_info(dcl.con, &info, TRUE); } +#pragma clang diagnostic pop + - (void) updateUIInfo { if (!allow_events) { @@ -1321,8 +1329,10 @@ - (void) dealloc { COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); - if (cocoaView) - [cocoaView release]; + [cocoaView release]; + [cbowner release]; + cbowner = nil; + [super dealloc]; } @@ -1938,8 +1948,6 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t @end -static QemuCocoaPasteboardTypeOwner *cbowner; - static void cocoa_clipboard_notify(Notifier *notifier, void *data); static void cocoa_clipboard_request(QemuClipboardInfo *info, QemuClipboardType type); @@ -2002,43 +2010,8 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, } } -/* - * The startup process for the OSX/Cocoa UI is complicated, because - * OSX insists that the UI runs on the initial main thread, and so we - * need to start a second thread which runs the qemu_default_main(): - * in main(): - * in cocoa_display_init(): - * assign cocoa_main to qemu_main - * create application, menus, etc - * in cocoa_main(): - * create qemu-main thread - * enter OSX run loop - */ - -static void *call_qemu_main(void *opaque) -{ - int status; - - COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); - bql_lock(); - status = qemu_default_main(); - bql_unlock(); - COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); - [cbowner release]; - exit(status); -} - static int cocoa_main(void) { - QemuThread thread; - - COCOA_DEBUG("Entered %s()\n", __func__); - - bql_unlock(); - qemu_thread_create(&thread, "qemu_main", call_qemu_main, - NULL, QEMU_THREAD_DETACHED); - - // Start the main event loop COCOA_DEBUG("Main thread: entering OSX run loop\n"); [NSApp run]; COCOA_DEBUG("Main thread: left OSX run loop, which should never happen\n"); @@ -2120,8 +2093,6 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); - qemu_main = cocoa_main; - // Pull this console process up to being a fully-fledged graphical // app with a menubar and Dock icon ProcessSerialNumber psn = { 0, kCurrentProcess }; @@ -2185,6 +2156,12 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) qemu_clipboard_peer_register(&cbpeer); [pool release]; + + /* + * The Cocoa UI will run the NSApplication runloop on the main thread + * rather than the default Core Foundation one. + */ + qemu_main = cocoa_main; } static QemuDisplay qemu_display_cocoa = { diff --git a/ui/console-gl.c b/ui/console-gl.c index 103b954017f..403fc36fbdf 100644 --- a/ui/console-gl.c +++ b/ui/console-gl.c @@ -25,6 +25,7 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "ui/console.h" #include "ui/shader.h" @@ -96,6 +97,53 @@ void surface_gl_create_texture(QemuGLShader *gls, glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); } +bool surface_gl_create_texture_from_fd(DisplaySurface *surface, + int fd, GLuint *texture, + GLuint *mem_obj) +{ + unsigned long size = surface_stride(surface) * surface_height(surface); + GLenum err = glGetError(); + *texture = 0; + *mem_obj = 0; + + if (!epoxy_has_gl_extension("GL_EXT_memory_object") || + !epoxy_has_gl_extension("GL_EXT_memory_object_fd")) { + error_report("spice: required OpenGL extensions not supported: " + "GL_EXT_memory_object and GL_EXT_memory_object_fd"); + return false; + } + +#ifdef GL_EXT_memory_object_fd + glCreateMemoryObjectsEXT(1, mem_obj); + glImportMemoryFdEXT(*mem_obj, size, GL_HANDLE_TYPE_OPAQUE_FD_EXT, fd); + + err = glGetError(); + if (err != GL_NO_ERROR) { + error_report("spice: cannot import memory object from fd"); + goto cleanup_mem; + } + + glGenTextures(1, texture); + glBindTexture(GL_TEXTURE_2D, *texture); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_TILING_EXT, GL_LINEAR_TILING_EXT); + glTexStorageMem2DEXT(GL_TEXTURE_2D, 1, GL_RGBA8, surface_width(surface), + surface_height(surface), *mem_obj, 0); + err = glGetError(); + if (err != GL_NO_ERROR) { + error_report("spice: cannot create texture from memory object"); + goto cleanup_tex_and_mem; + } + return true; + +cleanup_tex_and_mem: + glDeleteTextures(1, texture); +cleanup_mem: + glDeleteMemoryObjectsEXT(1, mem_obj); + +#endif + return false; +} + void surface_gl_update_texture(QemuGLShader *gls, DisplaySurface *surface, int x, int y, int w, int h) @@ -136,6 +184,12 @@ void surface_gl_destroy_texture(QemuGLShader *gls, } glDeleteTextures(1, &surface->texture); surface->texture = 0; +#ifdef GL_EXT_memory_object_fd + if (surface->mem_obj) { + glDeleteMemoryObjectsEXT(1, &surface->mem_obj); + surface->mem_obj = 0; + } +#endif } void surface_gl_setup_viewport(QemuGLShader *gls, diff --git a/ui/console-vc.c b/ui/console-vc.c index 8393d532e7f..830842064d6 100644 --- a/ui/console-vc.c +++ b/ui/console-vc.c @@ -42,6 +42,8 @@ enum TTYState { TTY_STATE_NORM, TTY_STATE_ESC, TTY_STATE_CSI, + TTY_STATE_G0, + TTY_STATE_G1, }; typedef struct QemuTextConsole { @@ -88,6 +90,7 @@ struct VCChardev { int esc_params[MAX_ESC_PARAMS]; int nb_esc_params; TextAttributes t_attrib; /* currently active text attributes */ + TextAttributes t_attrib_saved; int x_saved, y_saved; }; typedef struct VCChardev VCChardev; @@ -615,10 +618,9 @@ static void vc_put_one(VCChardev *vc, int ch) static void vc_respond_str(VCChardev *vc, const char *buf) { - while (*buf) { - vc_put_one(vc, *buf); - buf++; - } + QemuTextConsole *s = vc->console; + + qemu_chr_be_write(s->chr, (const uint8_t *)buf, strlen(buf)); } /* set cursor, checking bounds */ @@ -643,12 +645,119 @@ static void vc_set_cursor(VCChardev *vc, int x, int y) s->y = y; } +/** + * vc_csi_P() - (DCH) deletes one or more characters from the cursor + * position to the right. As characters are deleted, the remaining + * characters between the cursor and right margin move to the + * left. Character attributes move with the characters. + */ +static void vc_csi_P(struct VCChardev *vc, unsigned int nr) +{ + QemuTextConsole *s = vc->console; + TextCell *c1, *c2; + unsigned int x1, x2, y; + unsigned int end, len; + + if (!nr) { + nr = 1; + } + if (nr > s->width - s->x) { + nr = s->width - s->x; + if (!nr) { + return; + } + } + + x1 = s->x; + x2 = s->x + nr; + len = s->width - x2; + if (len) { + y = (s->y_base + s->y) % s->total_height; + c1 = &s->cells[y * s->width + x1]; + c2 = &s->cells[y * s->width + x2]; + memmove(c1, c2, len * sizeof(*c1)); + for (end = x1 + len; x1 < end; x1++) { + vc_update_xy(vc, x1, s->y); + } + } + /* Clear the rest */ + for (; x1 < s->width; x1++) { + vc_clear_xy(vc, x1, s->y); + } +} + +/** + * vc_csi_at() - (ICH) inserts `nr` blank characters with the default + * character attribute. The cursor remains at the beginning of the + * blank characters. Text between the cursor and right margin moves to + * the right. Characters scrolled past the right margin are lost. + */ +static void vc_csi_at(struct VCChardev *vc, unsigned int nr) +{ + QemuTextConsole *s = vc->console; + TextCell *c1, *c2; + unsigned int x1, x2, y; + unsigned int end, len; + + if (!nr) { + nr = 1; + } + if (nr > s->width - s->x) { + nr = s->width - s->x; + if (!nr) { + return; + } + } + + x1 = s->x + nr; + x2 = s->x; + len = s->width - x1; + if (len) { + y = (s->y_base + s->y) % s->total_height; + c1 = &s->cells[y * s->width + x1]; + c2 = &s->cells[y * s->width + x2]; + memmove(c1, c2, len * sizeof(*c1)); + for (end = x1 + len; x1 < end; x1++) { + vc_update_xy(vc, x1, s->y); + } + } + /* Insert blanks */ + for (x1 = s->x; x1 < s->x + nr; x1++) { + vc_clear_xy(vc, x1, s->y); + } +} + +/** + * vc_save_cursor() - saves cursor position and character attributes. + */ +static void vc_save_cursor(VCChardev *vc) +{ + QemuTextConsole *s = vc->console; + + vc->x_saved = s->x; + vc->y_saved = s->y; + vc->t_attrib_saved = vc->t_attrib; +} + +/** + * vc_restore_cursor() - restores cursor position and character + * attributes from saved state. + */ +static void vc_restore_cursor(VCChardev *vc) +{ + QemuTextConsole *s = vc->console; + + s->x = vc->x_saved; + s->y = vc->y_saved; + vc->t_attrib = vc->t_attrib_saved; +} + static void vc_putchar(VCChardev *vc, int ch) { QemuTextConsole *s = vc->console; int i; int x, y; - char response[40]; + g_autofree char *response = NULL; switch(vc->state) { case TTY_STATE_NORM: @@ -694,6 +803,16 @@ static void vc_putchar(VCChardev *vc, int ch) vc->esc_params[i] = 0; vc->nb_esc_params = 0; vc->state = TTY_STATE_CSI; + } else if (ch == '(') { + vc->state = TTY_STATE_G0; + } else if (ch == ')') { + vc->state = TTY_STATE_G1; + } else if (ch == '7') { + vc_save_cursor(vc); + vc->state = TTY_STATE_NORM; + } else if (ch == '8') { + vc_restore_cursor(vc); + vc->state = TTY_STATE_NORM; } else { vc->state = TTY_STATE_NORM; } @@ -810,6 +929,9 @@ static void vc_putchar(VCChardev *vc, int ch) break; } break; + case 'P': + vc_csi_P(vc, vc->esc_params[0]); + break; case 'm': vc_handle_escape(vc); break; @@ -821,22 +943,20 @@ static void vc_putchar(VCChardev *vc, int ch) break; case 6: /* report cursor position */ - sprintf(response, "\033[%d;%dR", - (s->y_base + s->y) % s->total_height + 1, - s->x + 1); + response = g_strdup_printf("\033[%d;%dR", + s->y + 1, s->x + 1); vc_respond_str(vc, response); break; } break; case 's': - /* save cursor position */ - vc->x_saved = s->x; - vc->y_saved = s->y; + vc_save_cursor(vc); break; case 'u': - /* restore cursor position */ - s->x = vc->x_saved; - s->y = vc->y_saved; + vc_restore_cursor(vc); + break; + case '@': + vc_csi_at(vc, vc->esc_params[0]); break; default: trace_console_putchar_unhandled(ch); @@ -844,6 +964,16 @@ static void vc_putchar(VCChardev *vc, int ch) } break; } + break; + case TTY_STATE_G0: /* set character sets */ + case TTY_STATE_G1: /* set character sets */ + switch (ch) { + case 'B': + /* Latin-1 map */ + break; + } + vc->state = TTY_STATE_NORM; + break; } } @@ -906,7 +1036,7 @@ qemu_text_console_finalize(Object *obj) } static void -qemu_text_console_class_init(ObjectClass *oc, void *data) +qemu_text_console_class_init(ObjectClass *oc, const void *data) { if (!cursor_timer) { cursor_timer = timer_new_ms(QEMU_CLOCK_REALTIME, cursor_timer_cb, NULL); @@ -935,7 +1065,7 @@ qemu_fixed_text_console_finalize(Object *obj) } static void -qemu_fixed_text_console_class_init(ObjectClass *oc, void *data) +qemu_fixed_text_console_class_init(ObjectClass *oc, const void *data) { } @@ -1051,7 +1181,7 @@ static void vc_chr_parse(QemuOpts *opts, ChardevBackend *backend, Error **errp) } } -static void char_vc_class_init(ObjectClass *oc, void *data) +static void char_vc_class_init(ObjectClass *oc, const void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); @@ -1073,6 +1203,6 @@ void qemu_console_early_init(void) { /* set the default vc driver */ if (!object_class_by_name(TYPE_CHARDEV_VC)) { - type_register(&char_vc_type_info); + type_register_static(&char_vc_type_info); } } diff --git a/ui/console.c b/ui/console.c index e8f0083af71..2d00828c538 100644 --- a/ui/console.c +++ b/ui/console.c @@ -35,8 +35,9 @@ #include "qemu/option.h" #include "chardev/char.h" #include "trace.h" -#include "exec/memory.h" +#include "system/memory.h" #include "qom/object.h" +#include "qemu/memfd.h" #include "console-priv.h" @@ -400,7 +401,7 @@ qemu_console_finalize(Object *obj) } static void -qemu_console_class_init(ObjectClass *oc, void *data) +qemu_console_class_init(ObjectClass *oc, const void *data) { } @@ -436,7 +437,7 @@ qemu_graphic_console_prop_get_head(Object *obj, Visitor *v, const char *name, } static void -qemu_graphic_console_class_init(ObjectClass *oc, void *data) +qemu_graphic_console_class_init(ObjectClass *oc, const void *data) { object_class_property_add_link(oc, "device", TYPE_DEVICE, offsetof(QemuGraphicConsole, device), @@ -452,60 +453,26 @@ qemu_graphic_console_init(Object *obj) { } -#ifdef WIN32 -void qemu_displaysurface_win32_set_handle(DisplaySurface *surface, - HANDLE h, uint32_t offset) +void qemu_displaysurface_set_share_handle(DisplaySurface *surface, + qemu_pixman_shareable handle, + uint32_t offset) { - assert(!surface->handle); + assert(surface->share_handle == SHAREABLE_NONE); - surface->handle = h; - surface->handle_offset = offset; -} - -static void -win32_pixman_image_destroy(pixman_image_t *image, void *data) -{ - DisplaySurface *surface = data; - - if (!surface->handle) { - return; - } + surface->share_handle = handle; + surface->share_handle_offset = offset; - assert(surface->handle_offset == 0); - - qemu_win32_map_free( - pixman_image_get_data(surface->image), - surface->handle, - &error_warn - ); } -#endif DisplaySurface *qemu_create_displaysurface(int width, int height) { - DisplaySurface *surface; - void *bits = NULL; -#ifdef WIN32 - HANDLE handle = NULL; -#endif - trace_displaysurface_create(width, height); -#ifdef WIN32 - bits = qemu_win32_map_alloc(width * height * 4, &handle, &error_abort); -#endif - - surface = qemu_create_displaysurface_from( + return qemu_create_displaysurface_from( width, height, PIXMAN_x8r8g8b8, - width * 4, bits + width * 4, NULL ); - surface->flags = QEMU_ALLOCATED_FLAG; - -#ifdef WIN32 - qemu_displaysurface_win32_set_handle(surface, handle, 0); -#endif - return surface; } DisplaySurface *qemu_create_displaysurface_from(int width, int height, @@ -515,15 +482,25 @@ DisplaySurface *qemu_create_displaysurface_from(int width, int height, DisplaySurface *surface = g_new0(DisplaySurface, 1); trace_displaysurface_create_from(surface, width, height, format); - surface->image = pixman_image_create_bits(format, - width, height, - (void *)data, linesize); - assert(surface->image != NULL); -#ifdef WIN32 - pixman_image_set_destroy_function(surface->image, - win32_pixman_image_destroy, surface); -#endif + surface->share_handle = SHAREABLE_NONE; + + if (data) { + surface->image = pixman_image_create_bits(format, + width, height, + (void *)data, linesize); + } else { + qemu_pixman_image_new_shareable(&surface->image, + &surface->share_handle, + "displaysurface", + format, + width, + height, + linesize, + &error_abort); + surface->flags = QEMU_ALLOCATED_FLAG; + } + assert(surface->image != NULL); return surface; } @@ -532,6 +509,7 @@ DisplaySurface *qemu_create_displaysurface_pixman(pixman_image_t *image) DisplaySurface *surface = g_new0(DisplaySurface, 1); trace_displaysurface_create_pixman(surface); + surface->share_handle = SHAREABLE_NONE; surface->image = pixman_image_ref(image); return surface; @@ -1182,7 +1160,7 @@ DisplayState *init_displaystate(void) * all QemuConsoles are created and the order / numbering * doesn't change any more */ name = g_strdup_printf("console[%d]", con->index); - object_property_add_child(container_get(object_get_root(), "/backend"), + object_property_add_child(object_get_container("backend"), name, OBJECT(con)); g_free(name); } @@ -1408,9 +1386,7 @@ char *qemu_console_get_label(QemuConsole *con) object_get_typename(c->device), c->head); } else { - return g_strdup_printf("%s", dev->id ? - dev->id : - object_get_typename(c->device)); + return g_strdup(dev->id ? : object_get_typename(c->device)); } } return g_strdup("VGA"); @@ -1632,4 +1608,9 @@ void qemu_display_help(void) printf("%s\n", DisplayType_str(dpys[idx]->type)); } } + printf("\n" + "Some display backends support suboptions, which can be set with\n" + " -display backend,option=value,option=value...\n" + "For a short list of the suboptions for each display, see the " + "top-level -help output; more detail is in the documentation.\n"); } diff --git a/ui/curses.c b/ui/curses.c index ec61615f7c1..161f78c35c3 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -36,9 +36,9 @@ #include "qemu/module.h" #include "ui/console.h" #include "ui/input.h" -#include "sysemu/sysemu.h" +#include "system/system.h" -#if defined(__APPLE__) || defined(__OpenBSD__) +#ifdef __APPLE__ #define _XOPEN_SOURCE_EXTENDED 1 #endif @@ -265,7 +265,8 @@ static int curses2foo(const int _curses2foo[], const int _curseskey2foo[], static void curses_refresh(DisplayChangeListener *dcl) { - int chr, keysym, keycode, keycode_alt; + wint_t chr = 0; + int keysym, keycode, keycode_alt; enum maybe_keycode maybe_keycode = CURSES_KEYCODE; curses_winch_check(); @@ -284,8 +285,9 @@ static void curses_refresh(DisplayChangeListener *dcl) /* while there are any pending key strokes to process */ chr = console_getch(&maybe_keycode); - if (chr == -1) + if (chr == WEOF) { break; + } #ifdef KEY_RESIZE /* this shouldn't occur when we use a custom SIGWINCH handler */ @@ -304,9 +306,9 @@ static void curses_refresh(DisplayChangeListener *dcl) /* alt or esc key */ if (keycode == 1) { enum maybe_keycode next_maybe_keycode = CURSES_KEYCODE; - int nextchr = console_getch(&next_maybe_keycode); + wint_t nextchr = console_getch(&next_maybe_keycode); - if (nextchr != -1) { + if (nextchr != WEOF) { chr = nextchr; maybe_keycode = next_maybe_keycode; keycode_alt = ALT; diff --git a/ui/cursor.c b/ui/cursor.c index dd3853320d7..6e23244fbe6 100644 --- a/ui/cursor.c +++ b/ui/cursor.c @@ -197,30 +197,6 @@ void cursor_set_mono(QEMUCursor *c, } } -void cursor_get_mono_image(QEMUCursor *c, int foreground, uint8_t *image) -{ - uint32_t *data = c->data; - uint8_t bit; - int x,y,bpl; - - bpl = cursor_get_mono_bpl(c); - memset(image, 0, bpl * c->height); - for (y = 0; y < c->height; y++) { - bit = 0x80; - for (x = 0; x < c->width; x++, data++) { - if (((*data & 0xff000000) == 0xff000000) && - ((*data & 0x00ffffff) == foreground)) { - image[x/8] |= bit; - } - bit >>= 1; - if (bit == 0) { - bit = 0x80; - } - } - image += bpl; - } -} - void cursor_get_mono_mask(QEMUCursor *c, int transparent, uint8_t *mask) { uint32_t *data = c->data; diff --git a/ui/dbus-chardev.c b/ui/dbus-chardev.c index 1d3a7122a11..d05dddaf81b 100644 --- a/ui/dbus-chardev.c +++ b/ui/dbus-chardev.c @@ -106,7 +106,7 @@ dbus_chardev_init(DBusDisplay *dpy) dpy->notifier.notify = dbus_display_on_notify; dbus_display_notifier_add(&dpy->notifier); - object_child_foreach(container_get(object_get_root(), "/chardevs"), + object_child_foreach(object_get_container("chardevs"), dbus_display_chardev_foreach, dpy); } @@ -269,7 +269,7 @@ dbus_chr_parse(QemuOpts *opts, ChardevBackend *backend, } static void -char_dbus_class_init(ObjectClass *oc, void *data) +char_dbus_class_init(ObjectClass *oc, const void *data) { DBusChardevClass *klass = DBUS_CHARDEV_CLASS(oc); ChardevClass *cc = CHARDEV_CLASS(oc); diff --git a/ui/dbus-clipboard.c b/ui/dbus-clipboard.c index fbb043abca2..6787a776687 100644 --- a/ui/dbus-clipboard.c +++ b/ui/dbus-clipboard.c @@ -26,7 +26,7 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qom/object_interfaces.h" -#include "sysemu/sysemu.h" +#include "system/system.h" #include "qapi/error.h" #include "trace.h" diff --git a/ui/dbus-console.c b/ui/dbus-console.c index 578b67f62b1..85e215ef233 100644 --- a/ui/dbus-console.c +++ b/ui/dbus-console.c @@ -41,7 +41,7 @@ struct _DBusDisplayConsole { DisplayChangeListener dcl; DBusDisplay *display; - GHashTable *listeners; + GPtrArray *listeners; QemuDBusDisplay1Console *iface; QemuDBusDisplay1Keyboard *iface_kbd; @@ -142,8 +142,7 @@ dbus_display_console_init(DBusDisplayConsole *object) { DBusDisplayConsole *ddc = DBUS_DISPLAY_CONSOLE(object); - ddc->listeners = g_hash_table_new_full(g_str_hash, g_str_equal, - NULL, g_object_unref); + ddc->listeners = g_ptr_array_new_with_free_func(g_object_unref); ddc->dcl.ops = &dbus_console_dcl_ops; } @@ -157,7 +156,7 @@ dbus_display_console_dispose(GObject *object) g_clear_object(&ddc->iface_mouse); g_clear_object(&ddc->iface_kbd); g_clear_object(&ddc->iface); - g_clear_pointer(&ddc->listeners, g_hash_table_unref); + g_clear_pointer(&ddc->listeners, g_ptr_array_unref); g_clear_pointer(&ddc->kbd, qkbd_state_free); G_OBJECT_CLASS(dbus_display_console_parent_class)->dispose(object); @@ -179,7 +178,7 @@ listener_vanished_cb(DBusDisplayListener *listener) trace_dbus_listener_vanished(name); - g_hash_table_remove(ddc->listeners, name); + g_ptr_array_remove_fast(ddc->listeners, listener); qkbd_state_lift_all_keys(ddc->kbd); } @@ -267,16 +266,6 @@ dbus_console_register_listener(DBusDisplayConsole *ddc, DBusDisplayListener *listener; int fd; - if (sender && g_hash_table_contains(ddc->listeners, sender)) { - g_dbus_method_invocation_return_error( - invocation, - DBUS_DISPLAY_ERROR, - DBUS_DISPLAY_ERROR_INVALID, - "`%s` is already registered!", - sender); - return DBUS_METHOD_INVOCATION_HANDLED; - } - #ifdef G_OS_WIN32 if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) { return DBUS_METHOD_INVOCATION_HANDLED; @@ -316,10 +305,16 @@ dbus_console_register_listener(DBusDisplayConsole *ddc, #endif ); + GDBusConnectionFlags flags = + G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER; +#ifdef WIN32 + flags |= G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_ALLOW_ANONYMOUS; +#endif + listener_conn = g_dbus_connection_new_sync( G_IO_STREAM(socket_conn), guid, - G_DBUS_CONNECTION_FLAGS_AUTHENTICATION_SERVER, + flags, NULL, NULL, &err); if (err) { error_report("Failed to setup peer connection: %s", err->message); @@ -331,9 +326,7 @@ dbus_console_register_listener(DBusDisplayConsole *ddc, return DBUS_METHOD_INVOCATION_HANDLED; } - g_hash_table_insert(ddc->listeners, - (gpointer)dbus_display_listener_get_bus_name(listener), - listener); + g_ptr_array_add(ddc->listeners, listener); g_object_connect(listener_conn, "swapped-signal::closed", listener_vanished_cb, listener, NULL); diff --git a/ui/dbus-display1.xml b/ui/dbus-display1.xml index ce35d64eea1..4a41a7e0f34 100644 --- a/ui/dbus-display1.xml +++ b/ui/dbus-display1.xml @@ -469,6 +469,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + @@ -513,6 +561,7 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + load ev->value + * ev->value = old value | EV_FREE + * cond_broadcast() + * unlock(); } + * } if (done == 0) + * // qemu_event_wait() not called + */ + qatomic_set(&ev->value, qatomic_load_acquire(&ev->value) | EV_FREE); +#endif +} + +void qemu_event_wait(QemuEvent *ev) +{ + assert(ev->initialized); + +#ifdef HAVE_FUTEX + while (true) { + /* + * qemu_event_wait must synchronize with qemu_event_set even if it does + * not go down the slow path, so this load-acquire is needed that + * synchronizes with the first memory barrier in qemu_event_set(). + */ + unsigned value = qatomic_load_acquire(&ev->value); + if (value == EV_SET) { + break; + } + + if (value == EV_FREE) { + /* + * Leave the event reset and tell qemu_event_set that there are + * waiters. No need to retry, because there cannot be a concurrent + * busy->free transition. After the CAS, the event will be either + * set or busy. + * + * This cmpxchg doesn't have particular ordering requirements if it + * succeeds (moving the store earlier can only cause + * qemu_event_set() to issue _more_ wakeups), the failing case needs + * acquire semantics like the load above. + */ + if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { + break; + } + } + + /* + * This is the final check for a concurrent set, so it does need + * a smp_mb() pairing with the second barrier of qemu_event_set(). + * The barrier is inside the FUTEX_WAIT system call. + */ + qemu_futex_wait(ev, EV_BUSY); + } +#else + pthread_mutex_lock(&ev->lock); + while (qatomic_read(&ev->value) != EV_SET) { + pthread_cond_wait(&ev->cond, &ev->lock); + } + pthread_mutex_unlock(&ev->lock); +#endif +} diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index c6413cb18fe..9fb8800dde8 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -5,6 +5,7 @@ #include "qemu/osdep.h" #include +#include "qemu/lockcnt.h" #include "qemu/rcu_queue.h" #include "aio-posix.h" diff --git a/util/fifo8.c b/util/fifo8.c index 1ffa19d900e..a26da66ad2c 100644 --- a/util/fifo8.c +++ b/util/fifo8.c @@ -71,18 +71,27 @@ uint8_t fifo8_pop(Fifo8 *fifo) return ret; } -static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max, - uint32_t *numptr, bool do_pop) +uint8_t fifo8_peek(Fifo8 *fifo) +{ + assert(fifo->num > 0); + return fifo->data[fifo->head]; +} + +static const uint8_t *fifo8_peekpop_bufptr(Fifo8 *fifo, uint32_t max, + uint32_t skip, uint32_t *numptr, + bool do_pop) { uint8_t *ret; - uint32_t num; + uint32_t num, head; assert(max > 0 && max <= fifo->num); - num = MIN(fifo->capacity - fifo->head, max); - ret = &fifo->data[fifo->head]; + assert(skip <= fifo->num); + head = (fifo->head + skip) % fifo->capacity; + num = MIN(fifo->capacity - head, max); + ret = &fifo->data[head]; if (do_pop) { - fifo->head += num; + fifo->head = head + num; fifo->head %= fifo->capacity; fifo->num -= num; } @@ -94,15 +103,16 @@ static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max, const uint8_t *fifo8_peek_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr) { - return fifo8_peekpop_buf(fifo, max, numptr, false); + return fifo8_peekpop_bufptr(fifo, max, 0, numptr, false); } const uint8_t *fifo8_pop_bufptr(Fifo8 *fifo, uint32_t max, uint32_t *numptr) { - return fifo8_peekpop_buf(fifo, max, numptr, true); + return fifo8_peekpop_bufptr(fifo, max, 0, numptr, true); } -uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) +static uint32_t fifo8_peekpop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen, + bool do_pop) { const uint8_t *buf; uint32_t n1, n2 = 0; @@ -113,7 +123,7 @@ uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) } len = destlen; - buf = fifo8_pop_bufptr(fifo, len, &n1); + buf = fifo8_peekpop_bufptr(fifo, len, 0, &n1, do_pop); if (dest) { memcpy(dest, buf, n1); } @@ -122,7 +132,7 @@ uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) len -= n1; len = MIN(len, fifo8_num_used(fifo)); if (len) { - buf = fifo8_pop_bufptr(fifo, len, &n2); + buf = fifo8_peekpop_bufptr(fifo, len, do_pop ? 0 : n1, &n2, do_pop); if (dest) { memcpy(&dest[n1], buf, n2); } @@ -131,6 +141,16 @@ uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) return n1 + n2; } +uint32_t fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) +{ + return fifo8_peekpop_buf(fifo, dest, destlen, true); +} + +uint32_t fifo8_peek_buf(Fifo8 *fifo, uint8_t *dest, uint32_t destlen) +{ + return fifo8_peekpop_buf(fifo, dest, destlen, false); +} + void fifo8_drop(Fifo8 *fifo, uint32_t len) { len -= fifo8_pop_buf(fifo, NULL, len); diff --git a/util/hbitmap.c b/util/hbitmap.c index 6d6e1b595d9..d9a1dabc630 100644 --- a/util/hbitmap.c +++ b/util/hbitmap.c @@ -949,7 +949,7 @@ char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long); char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1]; char *hash = NULL; - qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp); + qcrypto_hash_digest(QCRYPTO_HASH_ALGO_SHA256, data, size, &hash, errp); return hash; } diff --git a/util/hexdump.c b/util/hexdump.c index ae0d4992dcf..f29ffceb746 100644 --- a/util/hexdump.c +++ b/util/hexdump.c @@ -15,6 +15,7 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/host-utils.h" static inline char hexdump_nibble(unsigned x) { @@ -97,3 +98,20 @@ void qemu_hexdump(FILE *fp, const char *prefix, } } + +void qemu_hexdump_to_buffer(char *restrict buffer, size_t buffer_size, + const uint8_t *restrict data, size_t data_size) +{ + size_t i; + uint64_t required_buffer_size; + bool overflow = umul64_overflow(data_size, 2, &required_buffer_size); + overflow |= uadd64_overflow(required_buffer_size, 1, &required_buffer_size); + assert(!overflow && buffer_size >= required_buffer_size); + + for (i = 0; i < data_size; i++) { + uint8_t val = data[i]; + *(buffer++) = hexdump_nibble(val >> 4); + *(buffer++) = hexdump_nibble(val & 0xf); + } + *buffer = '\0'; +} diff --git a/util/iov.c b/util/iov.c index 7e73948f5e3..f8536f0474a 100644 --- a/util/iov.c +++ b/util/iov.c @@ -3,6 +3,7 @@ * * Copyright IBM, Corp. 2007, 2008 * Copyright (C) 2010 Red Hat, Inc. + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates * * Author(s): * Anthony Liguori @@ -36,7 +37,6 @@ size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt, offset -= iov[i].iov_len; } } - assert(offset == 0); return done; } @@ -55,7 +55,6 @@ size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt, offset -= iov[i].iov_len; } } - assert(offset == 0); return done; } @@ -74,7 +73,6 @@ size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt, offset -= iov[i].iov_len; } } - assert(offset == 0); return done; } @@ -92,7 +90,8 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt) /* helper function for iov_send_recv() */ static ssize_t -do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) +do_send_recv(int sockfd, int flags, struct iovec *iov, unsigned iov_cnt, + bool do_send) { #ifdef CONFIG_POSIX ssize_t ret; @@ -102,8 +101,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) msg.msg_iovlen = iov_cnt; do { ret = do_send - ? sendmsg(sockfd, &msg, 0) - : recvmsg(sockfd, &msg, 0); + ? sendmsg(sockfd, &msg, flags) + : recvmsg(sockfd, &msg, flags); } while (ret < 0 && errno == EINTR); return ret; #else @@ -114,8 +113,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) ssize_t off = 0; while (i < iov_cnt) { ssize_t r = do_send - ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0) - : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0); + ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags) + : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags); if (r > 0) { ret += r; off += r; @@ -144,6 +143,15 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt, size_t offset, size_t bytes, bool do_send) +{ + return iov_send_recv_with_flags(sockfd, 0, _iov, iov_cnt, offset, bytes, + do_send); +} + +ssize_t iov_send_recv_with_flags(int sockfd, int sockflags, + const struct iovec *_iov, + unsigned iov_cnt, size_t offset, + size_t bytes, bool do_send) { ssize_t total = 0; ssize_t ret; @@ -192,11 +200,11 @@ ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt, assert(iov[niov].iov_len > tail); orig_len = iov[niov].iov_len; iov[niov++].iov_len = tail; - ret = do_send_recv(sockfd, iov, niov, do_send); + ret = do_send_recv(sockfd, sockflags, iov, niov, do_send); /* Undo the changes above before checking for errors */ iov[niov-1].iov_len = orig_len; } else { - ret = do_send_recv(sockfd, iov, niov, do_send); + ret = do_send_recv(sockfd, sockflags, iov, niov, do_send); } if (offset) { iov[0].iov_base -= offset; @@ -266,7 +274,6 @@ unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt, bytes -= len; offset = 0; } - assert(offset == 0); return j; } @@ -337,7 +344,6 @@ size_t qemu_iovec_concat_iov(QEMUIOVector *dst, soffset -= src_iov[i].iov_len; } } - assert(soffset == 0); /* offset beyond end of src */ return done; } diff --git a/util/iova-tree.c b/util/iova-tree.c index 536789797e4..5b0c95ff158 100644 --- a/util/iova-tree.c +++ b/util/iova-tree.c @@ -115,13 +115,6 @@ const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map) return args.result; } -const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova) -{ - const DMAMap map = { .iova = iova, .size = 0 }; - - return iova_tree_find(tree, &map); -} - static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range) { /* Key and value are sharing the same range data */ @@ -148,22 +141,6 @@ int iova_tree_insert(IOVATree *tree, const DMAMap *map) return IOVA_OK; } -static gboolean iova_tree_traverse(gpointer key, gpointer value, - gpointer data) -{ - iova_tree_iterator iterator = data; - DMAMap *map = key; - - g_assert(key == value); - - return iterator(map); -} - -void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator) -{ - g_tree_foreach(tree->tree, iova_tree_traverse, iterator); -} - void iova_tree_remove(IOVATree *tree, DMAMap map) { const DMAMap *overlap; @@ -280,3 +257,49 @@ void iova_tree_destroy(IOVATree *tree) g_tree_destroy(tree->tree); g_free(tree); } + +static int gpa_tree_compare(gconstpointer a, gconstpointer b, gpointer data) +{ + const DMAMap *m1 = a, *m2 = b; + + if (m1->translated_addr > m2->translated_addr + m2->size) { + return 1; + } + + if (m1->translated_addr + m1->size < m2->translated_addr) { + return -1; + } + + /* Overlapped */ + return 0; +} + +IOVATree *gpa_tree_new(void) +{ + IOVATree *gpa_tree = g_new0(IOVATree, 1); + + gpa_tree->tree = g_tree_new_full(gpa_tree_compare, NULL, g_free, NULL); + + return gpa_tree; +} + +int gpa_tree_insert(IOVATree *tree, const DMAMap *map) +{ + DMAMap *new; + + if (map->translated_addr + map->size < map->translated_addr || + map->perm == IOMMU_NONE) { + return IOVA_ERR_INVALID; + } + + /* We don't allow inserting ranges that overlap with existing ones */ + if (iova_tree_find(tree, map)) { + return IOVA_ERR_OVERLAP; + } + + new = g_new0(DMAMap, 1); + memcpy(new, map, sizeof(*new)); + iova_tree_insert_internal(tree->tree, new); + + return IOVA_OK; +} diff --git a/util/keyval.c b/util/keyval.c index 66a5b4740f1..a70629a4812 100644 --- a/util/keyval.c +++ b/util/keyval.c @@ -91,9 +91,9 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" +#include "qobject/qstring.h" #include "qemu/cutils.h" #include "qemu/keyval.h" #include "qemu/help_option.h" diff --git a/util/lockcnt.c b/util/lockcnt.c index 5da36946b1b..92c9f8ceca8 100644 --- a/util/lockcnt.c +++ b/util/lockcnt.c @@ -7,14 +7,16 @@ * Paolo Bonzini */ #include "qemu/osdep.h" +#include "qemu/lockcnt.h" #include "qemu/thread.h" #include "qemu/atomic.h" #include "trace.h" -#ifdef CONFIG_LINUX -#include "qemu/futex.h" +#ifdef HAVE_FUTEX -/* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter. +/* + * When futex is available, bits 0-1 are a futex-based lock, bits 2-31 are the + * counter. * For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok, * this is not the most relaxing citation I could make...). It is similar * to mutex2 in the paper. @@ -105,7 +107,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val, static void lockcnt_wake(QemuLockCnt *lockcnt) { trace_lockcnt_futex_wake(lockcnt); - qemu_futex_wake(&lockcnt->count, 1); + qemu_futex_wake_single(&lockcnt->count); } void qemu_lockcnt_inc(QemuLockCnt *lockcnt) diff --git a/util/log.c b/util/log.c index 6219819855c..abdcb6b3111 100644 --- a/util/log.c +++ b/util/log.c @@ -145,10 +145,28 @@ void qemu_log_unlock(FILE *logfile) void qemu_log(const char *fmt, ...) { - FILE *f = qemu_log_trylock(); + FILE *f; + g_autofree const char *timestr = NULL; + + /* + * Prepare the timestamp *outside* the logging + * lock so it better reflects when the message + * was emitted if we are delayed acquiring the + * mutex + */ + if (message_with_timestamp) { + g_autoptr(GDateTime) dt = g_date_time_new_now_utc(); + timestr = g_date_time_format_iso8601(dt); + } + + f = qemu_log_trylock(); if (f) { va_list ap; + if (timestr) { + fprintf(f, "%s ", timestr); + } + va_start(ap, fmt); vfprintf(f, fmt, ap); va_end(ap); @@ -503,6 +521,8 @@ const QEMULogItem qemu_log_items[] = { "open a separate log file per thread; filename must contain '%d'" }, { CPU_LOG_TB_VPU, "vpu", "include VPU registers in the 'cpu' logging" }, + { LOG_INVALID_MEM, "invalid_mem", + "log invalid memory accesses" }, { 0, NULL, NULL }, }; @@ -556,3 +576,15 @@ void qemu_print_log_usage(FILE *f) fprintf(f, "\nUse \"-d trace:help\" to get a list of trace events.\n\n"); #endif } + +#ifdef CONFIG_HAVE_RUST +ssize_t rust_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream) +{ + /* + * Same as fwrite, but return -errno because Rust libc does not provide + * portable access to errno. :( + */ + int ret = fwrite(ptr, size, nmemb, stream); + return ret < 0 ? -errno : 0; +} +#endif diff --git a/util/main-loop.c b/util/main-loop.c index a0386cfeb60..51aeb2432e7 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -26,8 +26,9 @@ #include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/timer.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/replay.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "system/replay.h" #include "qemu/main-loop.h" #include "block/aio.h" #include "block/thread-pool.h" @@ -212,7 +213,6 @@ static void main_loop_init(EventLoopBase *base, Error **errp) main_loop_update_params(base, errp); mloop = m; - return; } static bool main_loop_can_be_deleted(EventLoopBase *base) @@ -220,7 +220,7 @@ static bool main_loop_can_be_deleted(EventLoopBase *base) return false; } -static void main_loop_class_init(ObjectClass *oc, void *class_data) +static void main_loop_class_init(ObjectClass *oc, const void *class_data) { EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc); diff --git a/util/memfd.c b/util/memfd.c index 4a3c07e0bee..07beab174dc 100644 --- a/util/memfd.c +++ b/util/memfd.c @@ -28,6 +28,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/memfd.h" #include "qemu/host-utils.h" @@ -149,11 +150,15 @@ void *qemu_memfd_alloc(const char *name, size_t size, unsigned int seals, void qemu_memfd_free(void *ptr, size_t size, int fd) { if (ptr) { - munmap(ptr, size); + if (munmap(ptr, size) != 0) { + error_report("memfd munmap() failed: %s", strerror(errno)); + } } if (fd != -1) { - close(fd); + if (close(fd) != 0) { + error_report("memfd close() failed: %s", strerror(errno)); + } } } @@ -189,17 +194,27 @@ bool qemu_memfd_alloc_check(void) /** * qemu_memfd_check(): * - * Check if host supports memfd. + * Check if host supports memfd. Cache the answer for the common case flags=0. */ bool qemu_memfd_check(unsigned int flags) { #ifdef CONFIG_LINUX - int mfd = memfd_create("test", flags | MFD_CLOEXEC); + int mfd; + static int memfd_check = MEMFD_TODO; + if (!flags && memfd_check != MEMFD_TODO) { + return memfd_check; + } + + mfd = memfd_create("test", flags | MFD_CLOEXEC); if (mfd >= 0) { close(mfd); - return true; } + if (!flags) { + memfd_check = (mfd >= 0) ? MEMFD_OK : MEMFD_KO; + } + return (mfd >= 0); + #endif return false; diff --git a/util/meson.build b/util/meson.build index 5d8bef98912..35029380a37 100644 --- a/util/meson.build +++ b/util/meson.build @@ -11,7 +11,9 @@ if host_os != 'windows' endif util_ss.add(files('compatfd.c')) util_ss.add(files('event_notifier-posix.c')) - util_ss.add(files('mmap-alloc.c')) + if host_os != 'emscripten' + util_ss.add(files('mmap-alloc.c')) + endif freebsd_dep = [] if host_os == 'freebsd' freebsd_dep = util @@ -25,7 +27,7 @@ else util_ss.add(files('event_notifier-win32.c')) util_ss.add(files('oslib-win32.c')) util_ss.add(files('qemu-thread-win32.c')) - util_ss.add(winmm, pathcch) + util_ss.add(winmm, pathcch, synchronization) endif util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c')) if glib_has_gslice @@ -33,6 +35,7 @@ if glib_has_gslice endif util_ss.add(files('defer-call.c')) util_ss.add(files('envlist.c', 'path.c', 'module.c')) +util_ss.add(files('event.c')) util_ss.add(files('host-utils.c')) util_ss.add(files('bitmap.c', 'bitops.c')) util_ss.add(files('fifo8.c')) @@ -84,6 +87,8 @@ if have_block or have_ga util_ss.add(files('qemu-coroutine.c', 'qemu-coroutine-lock.c', 'qemu-coroutine-io.c')) util_ss.add(files(f'coroutine-@coroutine_backend@.c')) util_ss.add(files('thread-pool.c', 'qemu-timer.c')) +endif +if have_block or have_ga or have_user util_ss.add(files('qemu-sockets.c')) endif if have_block @@ -129,4 +134,6 @@ elif cpu in ['ppc', 'ppc64'] util_ss.add(files('cpuinfo-ppc.c')) elif cpu in ['riscv32', 'riscv64'] util_ss.add(files('cpuinfo-riscv.c')) +elif cpu == 's390x' + util_ss.add(files('s390x_pci_mmio.c')) endif diff --git a/util/module.c b/util/module.c index 32e263163c7..1aa2079d013 100644 --- a/util/module.c +++ b/util/module.c @@ -234,7 +234,7 @@ int module_load(const char *prefix, const char *name, Error **errp) search_dir = getenv("QEMU_MODULE_DIR"); if (search_dir != NULL) { - dirs[n_dirs++] = g_strdup_printf("%s", search_dir); + dirs[n_dirs++] = g_strdup(search_dir); } dirs[n_dirs++] = get_relocated_path(CONFIG_QEMU_MODDIR); @@ -354,13 +354,13 @@ int module_load_qom(const char *type, Error **errp) void module_load_qom_all(void) { const QemuModinfo *modinfo; - Error *local_err = NULL; if (module_loaded_qom_all) { return; } for (modinfo = module_info; modinfo->name != NULL; modinfo++) { + Error *local_err = NULL; if (!modinfo->objs) { continue; } diff --git a/util/oslib-posix.c b/util/oslib-posix.c index b090fe0eed0..4ff577e5de6 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -31,7 +31,7 @@ #include -#include "sysemu/sysemu.h" +#include "system/system.h" #include "trace.h" #include "qapi/error.h" #include "qemu/error-report.h" @@ -58,6 +58,7 @@ #include #endif +#include "qemu/memalign.h" #include "qemu/mmap-alloc.h" #define MAX_MEM_PREALLOC_THREAD_COUNT 16 @@ -111,6 +112,21 @@ int qemu_get_thread_id(void) #endif } +int qemu_kill_thread(int tid, int sig) +{ +#if defined(__linux__) + return syscall(__NR_tgkill, getpid(), tid, sig); +#elif defined(__FreeBSD__) + return thr_kill2(getpid(), tid, sig); +#elif defined(__NetBSD__) + return _lwp_kill(tid, sig); +#elif defined(__OpenBSD__) + return thrkill(tid, sig, NULL); +#else + return kill(tid, sig); +#endif +} + int qemu_daemon(int nochdir, int noclose) { return daemon(nochdir, noclose); @@ -195,11 +211,21 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, const uint32_t qemu_map_flags = (shared ? QEMU_MAP_SHARED : 0) | (noreserve ? QEMU_MAP_NORESERVE : 0); size_t align = QEMU_VMALLOC_ALIGN; +#ifndef EMSCRIPTEN void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0); if (ptr == MAP_FAILED) { return NULL; } +#else + /* + * qemu_ram_mmap is not implemented for Emscripten. Use qemu_memalign + * for the anonymous allocation. noreserve is ignored as there is no swap + * space on Emscripten, and shared is ignored as there is no other + * processes on Emscripten. + */ + void *ptr = qemu_memalign(align, size); +#endif if (alignment) { *alignment = align; @@ -212,7 +238,16 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, void qemu_anon_ram_free(void *ptr, size_t size) { trace_qemu_anon_ram_free(ptr, size); +#ifndef EMSCRIPTEN qemu_ram_munmap(-1, ptr, size); +#else + /* + * qemu_ram_munmap is not implemented for Emscripten and qemu_memalign + * was used for the allocation. Use the corresponding freeing function + * here. + */ + qemu_vfree(ptr); +#endif } void qemu_socket_set_block(int fd) @@ -573,7 +608,15 @@ bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, { static gsize initialized; int ret; +#ifndef EMSCRIPTEN size_t hpagesize = qemu_fd_getpagesize(fd); +#else + /* + * mmap-alloc.c is excluded from Emscripten build, so qemu_fd_getpagesize + * is unavailable. Fallback to the lower level implementation. + */ + size_t hpagesize = qemu_real_host_page_size(); +#endif size_t numpages = DIV_ROUND_UP(sz, hpagesize); bool use_madv_populate_write; struct sigaction act; @@ -807,3 +850,179 @@ int qemu_msync(void *addr, size_t length, int fd) return msync(addr, length, MS_SYNC); } + +static bool qemu_close_all_open_fd_proc(const int *skip, unsigned int nskip) +{ + struct dirent *de; + int fd, dfd; + DIR *dir; + unsigned int skip_start = 0, skip_end = nskip; + + dir = opendir("/proc/self/fd"); + if (!dir) { + /* If /proc is not mounted, there is nothing that can be done. */ + return false; + } + /* Avoid closing the directory. */ + dfd = dirfd(dir); + + for (de = readdir(dir); de; de = readdir(dir)) { + bool close_fd = true; + + if (de->d_name[0] == '.') { + continue; + } + fd = atoi(de->d_name); + if (fd == dfd) { + continue; + } + + for (unsigned int i = skip_start; i < skip_end; i++) { + if (fd < skip[i]) { + /* We are below the next skipped fd, break */ + break; + } else if (fd == skip[i]) { + close_fd = false; + /* Restrict the range as we found fds matching start/end */ + if (i == skip_start) { + skip_start++; + } else if (i == skip_end) { + skip_end--; + } + break; + } + } + + if (close_fd) { + close(fd); + } + } + closedir(dir); + + return true; +} + +static bool qemu_close_all_open_fd_close_range(const int *skip, + unsigned int nskip, + int open_max) +{ +#ifdef CONFIG_CLOSE_RANGE + int max_fd = open_max - 1; + int first = 0, last; + unsigned int cur_skip = 0; + int ret; + + do { + /* Find the start boundary of the range to close */ + while (cur_skip < nskip && first == skip[cur_skip]) { + cur_skip++; + first++; + } + + /* Find the upper boundary of the range to close */ + last = max_fd; + if (cur_skip < nskip) { + last = skip[cur_skip] - 1; + last = MIN(last, max_fd); + } + + /* With the adjustments to the range, we might be done. */ + if (first > last) { + break; + } + + ret = close_range(first, last, 0); + if (ret < 0) { + return false; + } + + first = last + 1; + } while (last < max_fd); + + return true; +#else + return false; +#endif +} + +static void qemu_close_all_open_fd_fallback(const int *skip, unsigned int nskip, + int open_max) +{ + unsigned int cur_skip = 0; + + /* Fallback */ + for (int i = 0; i < open_max; i++) { + if (cur_skip < nskip && i == skip[cur_skip]) { + cur_skip++; + continue; + } + close(i); + } +} + +/* + * Close all open file descriptors. + */ +void qemu_close_all_open_fd(const int *skip, unsigned int nskip) +{ + int open_max = sysconf(_SC_OPEN_MAX); + + assert(skip != NULL || nskip == 0); + + if (!qemu_close_all_open_fd_close_range(skip, nskip, open_max) && + !qemu_close_all_open_fd_proc(skip, nskip)) { + qemu_close_all_open_fd_fallback(skip, nskip, open_max); + } +} + +int qemu_shm_alloc(size_t size, Error **errp) +{ + g_autoptr(GString) shm_name = g_string_new(NULL); + int fd, oflag, cur_sequence; + static int sequence; + mode_t mode; + + cur_sequence = qatomic_fetch_inc(&sequence); + + /* + * Let's use `mode = 0` because we don't want other processes to open our + * memory unless we share the file descriptor with them. + */ + mode = 0; + oflag = O_RDWR | O_CREAT | O_EXCL; + + /* + * Some operating systems allow creating anonymous POSIX shared memory + * objects (e.g. FreeBSD provides the SHM_ANON constant), but this is not + * defined by POSIX, so let's create a unique name. + * + * From Linux's shm_open(3) man-page: + * For portable use, a shared memory object should be identified + * by a name of the form /somename;" + */ + g_string_printf(shm_name, "/qemu-" FMT_pid "-shm-%d", getpid(), + cur_sequence); + + fd = shm_open(shm_name->str, oflag, mode); + if (fd < 0) { + error_setg_errno(errp, errno, + "failed to create POSIX shared memory"); + return -1; + } + + /* + * We have the file descriptor, so we no longer need to expose the + * POSIX shared memory object. However it will remain allocated as long as + * there are file descriptors pointing to it. + */ + shm_unlink(shm_name->str); + + if (ftruncate(fd, size) == -1) { + error_setg_errno(errp, errno, + "failed to resize POSIX shared memory to %zu", size); + close(fd); + return -1; + } + + return fd; +} diff --git a/util/oslib-win32.c b/util/oslib-win32.c index b623830d624..b7351634ece 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -877,3 +877,9 @@ void qemu_win32_map_free(void *ptr, HANDLE h, Error **errp) } CloseHandle(h); } + +int qemu_shm_alloc(size_t size, Error **errp) +{ + error_setg(errp, "Shared memory is not supported."); + return -1; +} diff --git a/util/qemu-co-shared-resource.c b/util/qemu-co-shared-resource.c index a66cc07e75b..752eb5a1c5f 100644 --- a/util/qemu-co-shared-resource.c +++ b/util/qemu-co-shared-resource.c @@ -66,12 +66,6 @@ static bool co_try_get_from_shres_locked(SharedResource *s, uint64_t n) return false; } -bool co_try_get_from_shres(SharedResource *s, uint64_t n) -{ - QEMU_LOCK_GUARD(&s->lock); - return co_try_get_from_shres_locked(s, n); -} - void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n) { assert(n <= s->total); diff --git a/util/qemu-config.c b/util/qemu-config.c index a90c18dad25..d1fc49c507d 100644 --- a/util/qemu-config.c +++ b/util/qemu-config.c @@ -1,8 +1,8 @@ #include "qemu/osdep.h" #include "block/qdict.h" /* for qdict_extract_subqdict() */ #include "qapi/error.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qlist.h" +#include "qobject/qdict.h" +#include "qobject/qlist.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/config-file.h" diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index eb4eebefdfb..64d6264fc74 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -136,7 +136,7 @@ static Coroutine *coroutine_pool_get_local(void) static void coroutine_pool_refill_local(void) { CoroutinePool *local_pool = get_ptr_local_pool(); - CoroutinePoolBatch *batch; + CoroutinePoolBatch *batch = NULL; WITH_QEMU_LOCK_GUARD(&global_pool_lock) { batch = QSLIST_FIRST(&global_pool); diff --git a/util/qemu-option.c b/util/qemu-option.c index 201f7a87f34..770300dff12 100644 --- a/util/qemu-option.c +++ b/util/qemu-option.c @@ -27,10 +27,10 @@ #include "qapi/error.h" #include "qemu/error-report.h" -#include "qapi/qmp/qbool.h" -#include "qapi/qmp/qdict.h" -#include "qapi/qmp/qnum.h" -#include "qapi/qmp/qstring.h" +#include "qobject/qbool.h" +#include "qobject/qdict.h" +#include "qobject/qnum.h" +#include "qobject/qstring.h" #include "qapi/qmp/qerror.h" #include "qemu/option_int.h" #include "qemu/cutils.h" diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 60c44b2b56b..4773755fd58 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -30,6 +30,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "qemu/cutils.h" +#include "qemu/option.h" #include "trace.h" #ifndef AI_ADDRCONFIG @@ -44,6 +45,14 @@ # define AI_NUMERICSERV 0 #endif +/* + * On macOS TCP_KEEPIDLE is available under a different name, TCP_KEEPALIVE. + * https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/bsd/man/man4/tcp.4#L172 + */ +#if defined(TCP_KEEPALIVE) && !defined(TCP_KEEPIDLE) +# define TCP_KEEPIDLE TCP_KEEPALIVE +#endif + static int inet_getport(struct addrinfo *e) { @@ -205,6 +214,58 @@ static int try_bind(int socket, InetSocketAddress *saddr, struct addrinfo *e) #endif } +static int inet_set_sockopts(int sock, InetSocketAddress *saddr, Error **errp) +{ + if (saddr->keep_alive) { + int keep_alive = 1; + int ret = setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + &keep_alive, sizeof(keep_alive)); + + if (ret < 0) { + error_setg_errno(errp, errno, + "Unable to set keep-alive option on socket"); + return -1; + } +#ifdef HAVE_TCP_KEEPCNT + if (saddr->has_keep_alive_count && saddr->keep_alive_count) { + int keep_count = saddr->keep_alive_count; + ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, &keep_count, + sizeof(keep_count)); + if (ret < 0) { + error_setg_errno(errp, errno, + "Unable to set TCP keep-alive count option on socket"); + return -1; + } + } +#endif +#ifdef HAVE_TCP_KEEPIDLE + if (saddr->has_keep_alive_idle && saddr->keep_alive_idle) { + int keep_idle = saddr->keep_alive_idle; + ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, &keep_idle, + sizeof(keep_idle)); + if (ret < 0) { + error_setg_errno(errp, errno, + "Unable to set TCP keep-alive idle option on socket"); + return -1; + } + } +#endif +#ifdef HAVE_TCP_KEEPINTVL + if (saddr->has_keep_alive_interval && saddr->keep_alive_interval) { + int keep_interval = saddr->keep_alive_interval; + ret = setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, &keep_interval, + sizeof(keep_interval)); + if (ret < 0) { + error_setg_errno(errp, errno, + "Unable to set TCP keep-alive interval option on socket"); + return -1; + } + } +#endif + } + return 0; +} + static int inet_listen_saddr(InetSocketAddress *saddr, int port_offset, int num, @@ -220,12 +281,6 @@ static int inet_listen_saddr(InetSocketAddress *saddr, int saved_errno = 0; bool socket_created = false; - if (saddr->keep_alive) { - error_setg(errp, "keep-alive option is not supported for passive " - "sockets"); - return -1; - } - memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_PASSIVE; if (saddr->has_numeric && saddr->numeric) { @@ -287,11 +342,20 @@ static int inet_listen_saddr(InetSocketAddress *saddr, port_min = inet_getport(e); port_max = saddr->has_to ? saddr->to + port_offset : port_min; for (p = port_min; p <= port_max; p++) { + if (slisten >= 0) { + /* + * We have a socket we tried with the previous port. It cannot + * be rebound, we need to close it and create a new one. + */ + close(slisten); + slisten = -1; + } inet_setport(e, p); slisten = create_fast_reuse_socket(e); if (slisten < 0) { - /* First time we expect we might fail to create the socket + /* + * First time we expect we might fail to create the socket * eg if 'e' has AF_INET6 but ipv6 kmod is not loaded. * Later iterations should always succeed if first iteration * worked though, so treat that as fatal. @@ -301,40 +365,41 @@ static int inet_listen_saddr(InetSocketAddress *saddr, } else { error_setg_errno(errp, errno, "Failed to recreate failed listening socket"); - goto listen_failed; + goto fail; } } socket_created = true; rc = try_bind(slisten, saddr, e); if (rc < 0) { - if (errno != EADDRINUSE) { - error_setg_errno(errp, errno, "Failed to bind socket"); - goto listen_failed; - } - } else { - if (!listen(slisten, num)) { - goto listen_ok; + if (errno == EADDRINUSE) { + /* This port is already used, try the next one */ + continue; } - if (errno != EADDRINUSE) { - error_setg_errno(errp, errno, "Failed to listen on socket"); - goto listen_failed; + error_setg_errno(errp, errno, "Failed to bind socket"); + goto fail; + } + if (listen(slisten, num)) { + if (errno == EADDRINUSE) { + /* This port is already used, try the next one */ + continue; } + error_setg_errno(errp, errno, "Failed to listen on socket"); + goto fail; + } + /* We have a listening socket */ + if (inet_set_sockopts(slisten, saddr, errp) < 0) { + goto fail; } - /* Someone else managed to bind to the same port and beat us - * to listen on it! Socket semantics does not allow us to - * recover from this situation, so we need to recreate the - * socket to allow bind attempts for subsequent ports: - */ - close(slisten); - slisten = -1; + freeaddrinfo(res); + return slisten; } } error_setg_errno(errp, errno, socket_created ? "Failed to find an available port" : "Failed to create a socket"); -listen_failed: +fail: saved_errno = errno; if (slisten >= 0) { close(slisten); @@ -342,10 +407,6 @@ static int inet_listen_saddr(InetSocketAddress *saddr, freeaddrinfo(res); errno = saved_errno; return -1; - -listen_ok: - freeaddrinfo(res); - return slisten; } #ifdef _WIN32 @@ -367,7 +428,6 @@ static int inet_connect_addr(const InetSocketAddress *saddr, addr->ai_family); return -1; } - socket_set_fast_reuse(sock); /* connect to peer */ do { @@ -476,16 +536,9 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp) return sock; } - if (saddr->keep_alive) { - int val = 1; - int ret = setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - &val, sizeof(val)); - - if (ret < 0) { - error_setg_errno(errp, errno, "Unable to set KEEPALIVE"); - close(sock); - return -1; - } + if (inet_set_sockopts(sock, saddr, errp) < 0) { + close(sock); + return -1; } return sock; @@ -592,141 +645,146 @@ static int inet_dgram_saddr(InetSocketAddress *sraddr, return -1; } -/* compatibility wrapper */ -static int inet_parse_flag(const char *flagname, const char *optstr, bool *val, - Error **errp) -{ - char *end; - size_t len; - - end = strstr(optstr, ","); - if (end) { - if (end[1] == ',') { /* Reject 'ipv6=on,,foo' */ - error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr); - return -1; - } - len = end - optstr; - } else { - len = strlen(optstr); - } - if (len == 0 || (len == 3 && strncmp(optstr, "=on", len) == 0)) { - *val = true; - } else if (len == 4 && strncmp(optstr, "=off", len) == 0) { - *val = false; - } else { - error_setg(errp, "error parsing '%s' flag '%s'", flagname, optstr); - return -1; - } - return 0; -} +static QemuOptsList inet_opts = { + .name = "InetSocketAddress", + .head = QTAILQ_HEAD_INITIALIZER(inet_opts.head), + .implied_opt_name = "addr", + .desc = { + { + .name = "addr", + .type = QEMU_OPT_STRING, + }, + { + .name = "numeric", + .type = QEMU_OPT_BOOL, + }, + { + .name = "to", + .type = QEMU_OPT_NUMBER, + }, + { + .name = "ipv4", + .type = QEMU_OPT_BOOL, + }, + { + .name = "ipv6", + .type = QEMU_OPT_BOOL, + }, + { + .name = "keep-alive", + .type = QEMU_OPT_BOOL, + }, +#ifdef HAVE_TCP_KEEPCNT + { + .name = "keep-alive-count", + .type = QEMU_OPT_NUMBER, + }, +#endif +#ifdef HAVE_TCP_KEEPIDLE + { + .name = "keep-alive-idle", + .type = QEMU_OPT_NUMBER, + }, +#endif +#ifdef HAVE_TCP_KEEPINTVL + { + .name = "keep-alive-interval", + .type = QEMU_OPT_NUMBER, + }, +#endif +#ifdef HAVE_IPPROTO_MPTCP + { + .name = "mptcp", + .type = QEMU_OPT_BOOL, + }, +#endif + { /* end of list */ } + }, +}; int inet_parse(InetSocketAddress *addr, const char *str, Error **errp) { - const char *optstr, *h; - char host[65]; - char port[33]; - int to; - int pos; - char *begin; - + QemuOpts *opts = qemu_opts_parse(&inet_opts, str, true, errp); + if (!opts) { + return -1; + } memset(addr, 0, sizeof(*addr)); /* parse address */ - if (str[0] == ':') { - /* no host given */ - host[0] = '\0'; - if (sscanf(str, ":%32[^,]%n", port, &pos) != 1) { - error_setg(errp, "error parsing port in address '%s'", str); - return -1; - } - } else if (str[0] == '[') { + const char *addr_str = qemu_opt_get(opts, "addr"); + if (!addr_str) { + error_setg(errp, "error parsing address ''"); + return -1; + } + if (str[0] == '[') { /* IPv6 addr */ - if (sscanf(str, "[%64[^]]]:%32[^,]%n", host, port, &pos) != 2) { - error_setg(errp, "error parsing IPv6 address '%s'", str); + const char *ip_end = strstr(addr_str, "]:"); + if (!ip_end || ip_end - addr_str < 2 || strlen(ip_end) < 3) { + error_setg(errp, "error parsing IPv6 address '%s'", addr_str); return -1; } + addr->host = g_strndup(addr_str + 1, ip_end - addr_str - 1); + addr->port = g_strdup(ip_end + 2); } else { - /* hostname or IPv4 addr */ - if (sscanf(str, "%64[^:]:%32[^,]%n", host, port, &pos) != 2) { - error_setg(errp, "error parsing address '%s'", str); + /* no host, hostname or IPv4 addr */ + const char *port = strchr(addr_str, ':'); + if (!port || strlen(port) < 2) { + error_setg(errp, "error parsing address '%s'", addr_str); return -1; } + addr->host = g_strndup(addr_str, port - addr_str); + addr->port = g_strdup(port + 1); } - addr->host = g_strdup(host); - addr->port = g_strdup(port); - /* parse options */ - optstr = str + pos; - h = strstr(optstr, ",to="); - if (h) { - h += 4; - if (sscanf(h, "%d%n", &to, &pos) != 1 || - (h[pos] != '\0' && h[pos] != ',')) { - error_setg(errp, "error parsing to= argument"); - return -1; - } + if (qemu_opt_find(opts, "numeric")) { + addr->has_numeric = true, + addr->numeric = qemu_opt_get_bool(opts, "numeric", false); + } + if (qemu_opt_find(opts, "to")) { addr->has_to = true; - addr->to = to; + addr->to = qemu_opt_get_number(opts, "to", 0); } - begin = strstr(optstr, ",ipv4"); - if (begin) { - if (inet_parse_flag("ipv4", begin + 5, &addr->ipv4, errp) < 0) { - return -1; - } + if (qemu_opt_find(opts, "ipv4")) { addr->has_ipv4 = true; + addr->ipv4 = qemu_opt_get_bool(opts, "ipv4", false); } - begin = strstr(optstr, ",ipv6"); - if (begin) { - if (inet_parse_flag("ipv6", begin + 5, &addr->ipv6, errp) < 0) { - return -1; - } + if (qemu_opt_find(opts, "ipv6")) { addr->has_ipv6 = true; + addr->ipv6 = qemu_opt_get_bool(opts, "ipv6", false); } - begin = strstr(optstr, ",keep-alive"); - if (begin) { - if (inet_parse_flag("keep-alive", begin + strlen(",keep-alive"), - &addr->keep_alive, errp) < 0) - { - return -1; - } + if (qemu_opt_find(opts, "keep-alive")) { addr->has_keep_alive = true; + addr->keep_alive = qemu_opt_get_bool(opts, "keep-alive", false); + } +#ifdef HAVE_TCP_KEEPCNT + if (qemu_opt_find(opts, "keep-alive-count")) { + addr->has_keep_alive_count = true; + addr->keep_alive_count = qemu_opt_get_number(opts, "keep-alive-count", 0); + } +#endif +#ifdef HAVE_TCP_KEEPIDLE + if (qemu_opt_find(opts, "keep-alive-idle")) { + addr->has_keep_alive_idle = true; + addr->keep_alive_idle = qemu_opt_get_number(opts, "keep-alive-idle", 0); } +#endif +#ifdef HAVE_TCP_KEEPINTVL + if (qemu_opt_find(opts, "keep-alive-interval")) { + addr->has_keep_alive_interval = true; + addr->keep_alive_interval = qemu_opt_get_number(opts, "keep-alive-interval", 0); + } +#endif #ifdef HAVE_IPPROTO_MPTCP - begin = strstr(optstr, ",mptcp"); - if (begin) { - if (inet_parse_flag("mptcp", begin + strlen(",mptcp"), - &addr->mptcp, errp) < 0) - { - return -1; - } + if (qemu_opt_find(opts, "mptcp")) { addr->has_mptcp = true; + addr->mptcp = qemu_opt_get_bool(opts, "mptcp", 0); } #endif return 0; } -/** - * Create a blocking socket and connect it to an address. - * - * @str: address string - * @errp: set in case of an error - * - * Returns -1 in case of error, file descriptor on success - **/ -int inet_connect(const char *str, Error **errp) -{ - int sock = -1; - InetSocketAddress *addr = g_new(InetSocketAddress, 1); - - if (!inet_parse(addr, str, errp)) { - sock = inet_connect_saddr(addr, errp); - } - qapi_free_InetSocketAddress(addr); - return sock; -} - #ifdef CONFIG_AF_VSOCK static bool vsock_parse_vaddr_to_sockaddr(const VsockSocketAddress *vaddr, struct sockaddr_vm *svm, @@ -1421,21 +1479,6 @@ SocketAddress *socket_local_address(int fd, Error **errp) } -SocketAddress *socket_remote_address(int fd, Error **errp) -{ - struct sockaddr_storage ss; - socklen_t sslen = sizeof(ss); - - if (getpeername(fd, (struct sockaddr *)&ss, &sslen) < 0) { - error_setg_errno(errp, errno, "%s", - "Unable to query remote socket address"); - return NULL; - } - - return socket_sockaddr_to_address(&ss, sslen, errp); -} - - SocketAddress *socket_address_flatten(SocketAddressLegacy *addr_legacy) { SocketAddress *addr; diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index b2e26e21205..ba725444ba6 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -317,154 +317,6 @@ void qemu_sem_wait(QemuSemaphore *sem) qemu_mutex_unlock(&sem->mutex); } -#ifdef __linux__ -#include "qemu/futex.h" -#else -static inline void qemu_futex_wake(QemuEvent *ev, int n) -{ - assert(ev->initialized); - pthread_mutex_lock(&ev->lock); - if (n == 1) { - pthread_cond_signal(&ev->cond); - } else { - pthread_cond_broadcast(&ev->cond); - } - pthread_mutex_unlock(&ev->lock); -} - -static inline void qemu_futex_wait(QemuEvent *ev, unsigned val) -{ - assert(ev->initialized); - pthread_mutex_lock(&ev->lock); - if (ev->value == val) { - pthread_cond_wait(&ev->cond, &ev->lock); - } - pthread_mutex_unlock(&ev->lock); -} -#endif - -/* Valid transitions: - * - free->set, when setting the event - * - busy->set, when setting the event, followed by qemu_futex_wake - * - set->free, when resetting the event - * - free->busy, when waiting - * - * set->busy does not happen (it can be observed from the outside but - * it really is set->free->busy). - * - * busy->free provably cannot happen; to enforce it, the set->free transition - * is done with an OR, which becomes a no-op if the event has concurrently - * transitioned to free or busy. - */ - -#define EV_SET 0 -#define EV_FREE 1 -#define EV_BUSY -1 - -void qemu_event_init(QemuEvent *ev, bool init) -{ -#ifndef __linux__ - pthread_mutex_init(&ev->lock, NULL); - pthread_cond_init(&ev->cond, NULL); -#endif - - ev->value = (init ? EV_SET : EV_FREE); - ev->initialized = true; -} - -void qemu_event_destroy(QemuEvent *ev) -{ - assert(ev->initialized); - ev->initialized = false; -#ifndef __linux__ - pthread_mutex_destroy(&ev->lock); - pthread_cond_destroy(&ev->cond); -#endif -} - -void qemu_event_set(QemuEvent *ev) -{ - assert(ev->initialized); - - /* - * Pairs with both qemu_event_reset() and qemu_event_wait(). - * - * qemu_event_set has release semantics, but because it *loads* - * ev->value we need a full memory barrier here. - */ - smp_mb(); - if (qatomic_read(&ev->value) != EV_SET) { - int old = qatomic_xchg(&ev->value, EV_SET); - - /* Pairs with memory barrier in kernel futex_wait system call. */ - smp_mb__after_rmw(); - if (old == EV_BUSY) { - /* There were waiters, wake them up. */ - qemu_futex_wake(ev, INT_MAX); - } - } -} - -void qemu_event_reset(QemuEvent *ev) -{ - assert(ev->initialized); - - /* - * If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - - /* - * Order reset before checking the condition in the caller. - * Pairs with the first memory barrier in qemu_event_set(). - */ - smp_mb__after_rmw(); -} - -void qemu_event_wait(QemuEvent *ev) -{ - unsigned value; - - assert(ev->initialized); - - /* - * qemu_event_wait must synchronize with qemu_event_set even if it does - * not go down the slow path, so this load-acquire is needed that - * synchronizes with the first memory barrier in qemu_event_set(). - * - * If we do go down the slow path, there is no requirement at all: we - * might miss a qemu_event_set() here but ultimately the memory barrier in - * qemu_futex_wait() will ensure the check is done correctly. - */ - value = qatomic_load_acquire(&ev->value); - if (value != EV_SET) { - if (value == EV_FREE) { - /* - * Leave the event reset and tell qemu_event_set that there are - * waiters. No need to retry, because there cannot be a concurrent - * busy->free transition. After the CAS, the event will be either - * set or busy. - * - * This cmpxchg doesn't have particular ordering requirements if it - * succeeds (moving the store earlier can only cause qemu_event_set() - * to issue _more_ wakeups), the failing case needs acquire semantics - * like the load above. - */ - if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { - return; - } - } - - /* - * This is the final check for a concurrent set, so it does need - * a smp_mb() pairing with the second barrier of qemu_event_set(). - * The barrier is inside the FUTEX_WAIT system call. - */ - qemu_futex_wait(ev, EV_BUSY); - } -} - static __thread NotifierList thread_exit; /* diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index a7fe3cc345f..ca2e0b512e2 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -231,135 +231,6 @@ void qemu_sem_wait(QemuSemaphore *sem) } } -/* Wrap a Win32 manual-reset event with a fast userspace path. The idea - * is to reset the Win32 event lazily, as part of a test-reset-test-wait - * sequence. Such a sequence is, indeed, how QemuEvents are used by - * RCU and other subsystems! - * - * Valid transitions: - * - free->set, when setting the event - * - busy->set, when setting the event, followed by SetEvent - * - set->free, when resetting the event - * - free->busy, when waiting - * - * set->busy does not happen (it can be observed from the outside but - * it really is set->free->busy). - * - * busy->free provably cannot happen; to enforce it, the set->free transition - * is done with an OR, which becomes a no-op if the event has concurrently - * transitioned to free or busy (and is faster than cmpxchg). - */ - -#define EV_SET 0 -#define EV_FREE 1 -#define EV_BUSY -1 - -void qemu_event_init(QemuEvent *ev, bool init) -{ - /* Manual reset. */ - ev->event = CreateEvent(NULL, TRUE, TRUE, NULL); - ev->value = (init ? EV_SET : EV_FREE); - ev->initialized = true; -} - -void qemu_event_destroy(QemuEvent *ev) -{ - assert(ev->initialized); - ev->initialized = false; - CloseHandle(ev->event); -} - -void qemu_event_set(QemuEvent *ev) -{ - assert(ev->initialized); - - /* - * Pairs with both qemu_event_reset() and qemu_event_wait(). - * - * qemu_event_set has release semantics, but because it *loads* - * ev->value we need a full memory barrier here. - */ - smp_mb(); - if (qatomic_read(&ev->value) != EV_SET) { - int old = qatomic_xchg(&ev->value, EV_SET); - - /* Pairs with memory barrier after ResetEvent. */ - smp_mb__after_rmw(); - if (old == EV_BUSY) { - /* There were waiters, wake them up. */ - SetEvent(ev->event); - } - } -} - -void qemu_event_reset(QemuEvent *ev) -{ - assert(ev->initialized); - - /* - * If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - - /* - * Order reset before checking the condition in the caller. - * Pairs with the first memory barrier in qemu_event_set(). - */ - smp_mb__after_rmw(); -} - -void qemu_event_wait(QemuEvent *ev) -{ - unsigned value; - - assert(ev->initialized); - - /* - * qemu_event_wait must synchronize with qemu_event_set even if it does - * not go down the slow path, so this load-acquire is needed that - * synchronizes with the first memory barrier in qemu_event_set(). - * - * If we do go down the slow path, there is no requirement at all: we - * might miss a qemu_event_set() here but ultimately the memory barrier in - * qemu_futex_wait() will ensure the check is done correctly. - */ - value = qatomic_load_acquire(&ev->value); - if (value != EV_SET) { - if (value == EV_FREE) { - /* - * Here the underlying kernel event is reset, but qemu_event_set is - * not yet going to call SetEvent. However, there will be another - * check for EV_SET below when setting EV_BUSY. At that point it - * is safe to call WaitForSingleObject. - */ - ResetEvent(ev->event); - - /* - * It is not clear whether ResetEvent provides this barrier; kernel - * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry! - */ - smp_mb(); - - /* - * Leave the event reset and tell qemu_event_set that there are - * waiters. No need to retry, because there cannot be a concurrent - * busy->free transition. After the CAS, the event will be either - * set or busy. - */ - if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { - return; - } - } - - /* - * ev->value is now EV_BUSY. Since we didn't observe EV_SET, - * qemu_event_set() must observe EV_BUSY and call SetEvent(). - */ - WaitForSingleObject(ev->event, INFINITE); - } -} - struct QemuThreadData { /* Passed to win32_start_routine. */ void *(*start_routine)(void *); diff --git a/util/qemu-timer.c b/util/qemu-timer.c index 213114be68c..1fb48be281a 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -26,9 +26,10 @@ #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/lockable.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/replay.h" -#include "sysemu/cpus.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "system/replay.h" +#include "system/cpus.h" #ifdef CONFIG_POSIX #include @@ -182,7 +183,7 @@ bool qemu_clock_has_timers(QEMUClockType type) bool timerlist_expired(QEMUTimerList *timer_list) { - int64_t expire_time; + int64_t expire_time = 0; if (!qatomic_read(&timer_list->active_timers)) { return false; @@ -212,7 +213,7 @@ bool qemu_clock_expired(QEMUClockType type) int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) { int64_t delta; - int64_t expire_time; + int64_t expire_time = 0; if (!qatomic_read(&timer_list->active_timers)) { return -1; @@ -286,16 +287,6 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) return deadline; } -QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list) -{ - return timer_list->clock->type; -} - -QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type) -{ - return main_loop_tlg.tl[type]; -} - void timerlist_notify(QEMUTimerList *timer_list) { if (timer_list->notify_cb) { @@ -419,10 +410,6 @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list, static void timerlist_rearm(QEMUTimerList *timer_list) { - /* Interrupt execution to force deadline recalculation. */ - if (icount_enabled() && timer_list->clock->type == QEMU_CLOCK_VIRTUAL) { - icount_start_warp_timer(); - } timerlist_notify(timer_list); } @@ -461,7 +448,7 @@ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time) void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time) { QEMUTimerList *timer_list = ts->timer_list; - bool rearm; + bool rearm = false; WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { if (ts->expire_time == -1 || ts->expire_time > expire_time) { diff --git a/util/qht.c b/util/qht.c index 92c6b787593..208c2f4b329 100644 --- a/util/qht.c +++ b/util/qht.c @@ -367,7 +367,6 @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) qht_map_lock_buckets(map); qht_unlock(ht); *pmap = map; - return; } /* diff --git a/util/rcu.c b/util/rcu.c index fa32c942e4b..b703c86f15a 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -20,8 +20,8 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * License along with this library; if not, see + * . * * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ diff --git a/util/s390x_pci_mmio.c b/util/s390x_pci_mmio.c new file mode 100644 index 00000000000..5ab24fa474c --- /dev/null +++ b/util/s390x_pci_mmio.c @@ -0,0 +1,146 @@ +/* + * s390x PCI MMIO definitions + * + * Copyright 2025 IBM Corp. + * Author(s): Farhan Ali + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include +#include "qemu/s390x_pci_mmio.h" +#include "elf.h" + +union register_pair { + unsigned __int128 pair; + struct { + uint64_t even; + uint64_t odd; + }; +}; + +static bool is_mio_supported; + +static __attribute__((constructor)) void check_is_mio_supported(void) +{ + is_mio_supported = !!(qemu_getauxval(AT_HWCAP) & HWCAP_S390_PCI_MIO); +} + +static uint64_t s390x_pcilgi(const void *ioaddr, size_t len) +{ + union register_pair ioaddr_len = { .even = (uint64_t)ioaddr, + .odd = len }; + uint64_t val; + int cc; + + asm volatile( + /* pcilgi */ + ".insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" + "ipm %[cc]\n" + "srl %[cc],28\n" + : [cc] "=d"(cc), [val] "=d"(val), + [ioaddr_len] "+d"(ioaddr_len.pair) :: "cc"); + + if (cc) { + val = -1ULL; + } + + return val; +} + +static void s390x_pcistgi(void *ioaddr, uint64_t val, size_t len) +{ + union register_pair ioaddr_len = {.even = (uint64_t)ioaddr, .odd = len}; + + asm volatile ( + /* pcistgi */ + ".insn rre,0xb9d40000,%[val],%[ioaddr_len]\n" + : [ioaddr_len] "+d" (ioaddr_len.pair) + : [val] "d" (val) + : "cc", "memory"); +} + +uint8_t s390x_pci_mmio_read_8(const void *ioaddr) +{ + uint8_t val = 0; + + if (is_mio_supported) { + val = s390x_pcilgi(ioaddr, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val)); + } + return val; +} + +uint16_t s390x_pci_mmio_read_16(const void *ioaddr) +{ + uint16_t val = 0; + + if (is_mio_supported) { + val = s390x_pcilgi(ioaddr, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val)); + } + return val; +} + +uint32_t s390x_pci_mmio_read_32(const void *ioaddr) +{ + uint32_t val = 0; + + if (is_mio_supported) { + val = s390x_pcilgi(ioaddr, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val)); + } + return val; +} + +uint64_t s390x_pci_mmio_read_64(const void *ioaddr) +{ + uint64_t val = 0; + + if (is_mio_supported) { + val = s390x_pcilgi(ioaddr, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val)); + } + return val; +} + +void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val) +{ + if (is_mio_supported) { + s390x_pcistgi(ioaddr, val, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val)); + } +} + +void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val) +{ + if (is_mio_supported) { + s390x_pcistgi(ioaddr, val, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val)); + } +} + +void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val) +{ + if (is_mio_supported) { + s390x_pcistgi(ioaddr, val, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val)); + } +} + +void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val) +{ + if (is_mio_supported) { + s390x_pcistgi(ioaddr, val, sizeof(val)); + } else { + syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val)); + } +} diff --git a/util/thread-context.c b/util/thread-context.c index 2bc7883b9e0..0146154fa56 100644 --- a/util/thread-context.c +++ b/util/thread-context.c @@ -273,7 +273,7 @@ static void thread_context_instance_complete(UserCreatable *uc, Error **errp) } } -static void thread_context_class_init(ObjectClass *oc, void *data) +static void thread_context_class_init(ObjectClass *oc, const void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); @@ -319,7 +319,7 @@ static const TypeInfo thread_context_info = { .instance_size = sizeof(ThreadContext), .instance_init = thread_context_instance_init, .instance_finalize = thread_context_instance_finalize, - .interfaces = (InterfaceInfo[]) { + .interfaces = (const InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } diff --git a/util/thread-pool.c b/util/thread-pool.c index 27eb777e855..d2ead6b7285 100644 --- a/util/thread-pool.c +++ b/util/thread-pool.c @@ -23,9 +23,9 @@ #include "block/thread-pool.h" #include "qemu/main-loop.h" -static void do_spawn_thread(ThreadPool *pool); +static void do_spawn_thread(ThreadPoolAio *pool); -typedef struct ThreadPoolElement ThreadPoolElement; +typedef struct ThreadPoolElementAio ThreadPoolElementAio; enum ThreadState { THREAD_QUEUED, @@ -33,9 +33,9 @@ enum ThreadState { THREAD_DONE, }; -struct ThreadPoolElement { +struct ThreadPoolElementAio { BlockAIOCB common; - ThreadPool *pool; + ThreadPoolAio *pool; ThreadPoolFunc *func; void *arg; @@ -47,13 +47,13 @@ struct ThreadPoolElement { int ret; /* Access to this list is protected by lock. */ - QTAILQ_ENTRY(ThreadPoolElement) reqs; + QTAILQ_ENTRY(ThreadPoolElementAio) reqs; /* This list is only written by the thread pool's mother thread. */ - QLIST_ENTRY(ThreadPoolElement) all; + QLIST_ENTRY(ThreadPoolElementAio) all; }; -struct ThreadPool { +struct ThreadPoolAio { AioContext *ctx; QEMUBH *completion_bh; QemuMutex lock; @@ -62,10 +62,10 @@ struct ThreadPool { QEMUBH *new_thread_bh; /* The following variables are only accessed from one AioContext. */ - QLIST_HEAD(, ThreadPoolElement) head; + QLIST_HEAD(, ThreadPoolElementAio) head; /* The following variables are protected by lock. */ - QTAILQ_HEAD(, ThreadPoolElement) request_list; + QTAILQ_HEAD(, ThreadPoolElementAio) request_list; int cur_threads; int idle_threads; int new_threads; /* backlog of threads we need to create */ @@ -76,14 +76,14 @@ struct ThreadPool { static void *worker_thread(void *opaque) { - ThreadPool *pool = opaque; + ThreadPoolAio *pool = opaque; qemu_mutex_lock(&pool->lock); pool->pending_threads--; do_spawn_thread(pool); while (pool->cur_threads <= pool->max_threads) { - ThreadPoolElement *req; + ThreadPoolElementAio *req; int ret; if (QTAILQ_EMPTY(&pool->request_list)) { @@ -131,7 +131,7 @@ static void *worker_thread(void *opaque) return NULL; } -static void do_spawn_thread(ThreadPool *pool) +static void do_spawn_thread(ThreadPoolAio *pool) { QemuThread t; @@ -148,14 +148,14 @@ static void do_spawn_thread(ThreadPool *pool) static void spawn_thread_bh_fn(void *opaque) { - ThreadPool *pool = opaque; + ThreadPoolAio *pool = opaque; qemu_mutex_lock(&pool->lock); do_spawn_thread(pool); qemu_mutex_unlock(&pool->lock); } -static void spawn_thread(ThreadPool *pool) +static void spawn_thread(ThreadPoolAio *pool) { pool->cur_threads++; pool->new_threads++; @@ -173,8 +173,8 @@ static void spawn_thread(ThreadPool *pool) static void thread_pool_completion_bh(void *opaque) { - ThreadPool *pool = opaque; - ThreadPoolElement *elem, *next; + ThreadPoolAio *pool = opaque; + ThreadPoolElementAio *elem, *next; defer_call_begin(); /* cb() may use defer_call() to coalesce work */ @@ -184,8 +184,8 @@ static void thread_pool_completion_bh(void *opaque) continue; } - trace_thread_pool_complete(pool, elem, elem->common.opaque, - elem->ret); + trace_thread_pool_complete_aio(pool, elem, elem->common.opaque, + elem->ret); QLIST_REMOVE(elem, all); if (elem->common.cb) { @@ -217,10 +217,10 @@ static void thread_pool_completion_bh(void *opaque) static void thread_pool_cancel(BlockAIOCB *acb) { - ThreadPoolElement *elem = (ThreadPoolElement *)acb; - ThreadPool *pool = elem->pool; + ThreadPoolElementAio *elem = (ThreadPoolElementAio *)acb; + ThreadPoolAio *pool = elem->pool; - trace_thread_pool_cancel(elem, elem->common.opaque); + trace_thread_pool_cancel_aio(elem, elem->common.opaque); QEMU_LOCK_GUARD(&pool->lock); if (elem->state == THREAD_QUEUED) { @@ -234,16 +234,16 @@ static void thread_pool_cancel(BlockAIOCB *acb) } static const AIOCBInfo thread_pool_aiocb_info = { - .aiocb_size = sizeof(ThreadPoolElement), + .aiocb_size = sizeof(ThreadPoolElementAio), .cancel_async = thread_pool_cancel, }; BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockCompletionFunc *cb, void *opaque) { - ThreadPoolElement *req; + ThreadPoolElementAio *req; AioContext *ctx = qemu_get_current_aio_context(); - ThreadPool *pool = aio_get_thread_pool(ctx); + ThreadPoolAio *pool = aio_get_thread_pool(ctx); /* Assert that the thread submitting work is the same running the pool */ assert(pool->ctx == qemu_get_current_aio_context()); @@ -256,7 +256,7 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, QLIST_INSERT_HEAD(&pool->head, req, all); - trace_thread_pool_submit(pool, req, arg); + trace_thread_pool_submit_aio(pool, req, arg); qemu_mutex_lock(&pool->lock); if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) { @@ -290,12 +290,7 @@ int coroutine_fn thread_pool_submit_co(ThreadPoolFunc *func, void *arg) return tpc.ret; } -void thread_pool_submit(ThreadPoolFunc *func, void *arg) -{ - thread_pool_submit_aio(func, arg, NULL, NULL); -} - -void thread_pool_update_params(ThreadPool *pool, AioContext *ctx) +void thread_pool_update_params(ThreadPoolAio *pool, AioContext *ctx) { qemu_mutex_lock(&pool->lock); @@ -322,7 +317,7 @@ void thread_pool_update_params(ThreadPool *pool, AioContext *ctx) qemu_mutex_unlock(&pool->lock); } -static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) +static void thread_pool_init_one(ThreadPoolAio *pool, AioContext *ctx) { if (!ctx) { ctx = qemu_get_aio_context(); @@ -342,14 +337,14 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) thread_pool_update_params(pool, ctx); } -ThreadPool *thread_pool_new(AioContext *ctx) +ThreadPoolAio *thread_pool_new_aio(AioContext *ctx) { - ThreadPool *pool = g_new(ThreadPool, 1); + ThreadPoolAio *pool = g_new(ThreadPoolAio, 1); thread_pool_init_one(pool, ctx); return pool; } -void thread_pool_free(ThreadPool *pool) +void thread_pool_free_aio(ThreadPoolAio *pool) { if (!pool) { return; @@ -379,3 +374,122 @@ void thread_pool_free(ThreadPool *pool) qemu_mutex_destroy(&pool->lock); g_free(pool); } + +struct ThreadPool { + GThreadPool *t; + size_t cur_work; + QemuMutex cur_work_lock; + QemuCond all_finished_cond; +}; + +typedef struct { + ThreadPoolFunc *func; + void *opaque; + GDestroyNotify opaque_destroy; +} ThreadPoolElement; + +static void thread_pool_func(gpointer data, gpointer user_data) +{ + ThreadPool *pool = user_data; + g_autofree ThreadPoolElement *el = data; + + el->func(el->opaque); + + if (el->opaque_destroy) { + el->opaque_destroy(el->opaque); + } + + QEMU_LOCK_GUARD(&pool->cur_work_lock); + + assert(pool->cur_work > 0); + pool->cur_work--; + + if (pool->cur_work == 0) { + qemu_cond_signal(&pool->all_finished_cond); + } +} + +ThreadPool *thread_pool_new(void) +{ + ThreadPool *pool = g_new(ThreadPool, 1); + + pool->cur_work = 0; + qemu_mutex_init(&pool->cur_work_lock); + qemu_cond_init(&pool->all_finished_cond); + + pool->t = g_thread_pool_new(thread_pool_func, pool, 0, TRUE, NULL); + /* + * g_thread_pool_new() can only return errors if initial thread(s) + * creation fails but we ask for 0 initial threads above. + */ + assert(pool->t); + + return pool; +} + +void thread_pool_free(ThreadPool *pool) +{ + /* + * With _wait = TRUE this effectively waits for all + * previously submitted work to complete first. + */ + g_thread_pool_free(pool->t, FALSE, TRUE); + + qemu_cond_destroy(&pool->all_finished_cond); + qemu_mutex_destroy(&pool->cur_work_lock); + + g_free(pool); +} + +void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, + void *opaque, GDestroyNotify opaque_destroy) +{ + ThreadPoolElement *el = g_new(ThreadPoolElement, 1); + + el->func = func; + el->opaque = opaque; + el->opaque_destroy = opaque_destroy; + + WITH_QEMU_LOCK_GUARD(&pool->cur_work_lock) { + pool->cur_work++; + } + + /* + * Ignore the return value since this function can only return errors + * if creation of an additional thread fails but even in this case the + * provided work is still getting queued (just for the existing threads). + */ + g_thread_pool_push(pool->t, el, NULL); +} + +void thread_pool_submit_immediate(ThreadPool *pool, ThreadPoolFunc *func, + void *opaque, GDestroyNotify opaque_destroy) +{ + thread_pool_submit(pool, func, opaque, opaque_destroy); + thread_pool_adjust_max_threads_to_work(pool); +} + +void thread_pool_wait(ThreadPool *pool) +{ + QEMU_LOCK_GUARD(&pool->cur_work_lock); + + while (pool->cur_work > 0) { + qemu_cond_wait(&pool->all_finished_cond, + &pool->cur_work_lock); + } +} + +bool thread_pool_set_max_threads(ThreadPool *pool, + int max_threads) +{ + assert(max_threads > 0); + + return g_thread_pool_set_max_threads(pool->t, max_threads, NULL); +} + +bool thread_pool_adjust_max_threads_to_work(ThreadPool *pool) +{ + QEMU_LOCK_GUARD(&pool->cur_work_lock); + + return thread_pool_set_max_threads(pool, pool->cur_work); +} diff --git a/util/timed-average.c b/util/timed-average.c index 2b49d532cee..5b5c22afd88 100644 --- a/util/timed-average.c +++ b/util/timed-average.c @@ -8,10 +8,12 @@ * Benoît Canet * Alberto Garcia * + * SPDX-License-Identifier: GPL-2.0-or-later + * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or - * (at your option) version 3 or any later version. + * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/util/trace-events b/util/trace-events index 49a4962e188..bd8f25fb592 100644 --- a/util/trace-events +++ b/util/trace-events @@ -14,9 +14,9 @@ aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" reentrant_aio(void *ctx, const char *name) "ctx %p name %s" # thread-pool.c -thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" -thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" -thread_pool_cancel(void *req, void *opaque) "req %p opaque %p" +thread_pool_submit_aio(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" +thread_pool_complete_aio(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" +thread_pool_cancel_aio(void *req, void *opaque) "req %p opaque %p" # buffer.c buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd" diff --git a/util/userfaultfd.c b/util/userfaultfd.c index 1b2fa949d4d..2396104f23e 100644 --- a/util/userfaultfd.c +++ b/util/userfaultfd.c @@ -240,7 +240,7 @@ int uffd_change_protection(int uffd_fd, void *addr, uint64_t length, * Copy range of source pages to the destination to resolve * missing page fault somewhere in the destination range. * - * Returns 0 on success, negative value in case of an error + * Returns 0 on success, -errno in case of an error * * @uffd_fd: UFFD file descriptor * @dst_addr: destination base address @@ -259,10 +259,11 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr, uffd_copy.mode = dont_wake ? UFFDIO_COPY_MODE_DONTWAKE : 0; if (ioctl(uffd_fd, UFFDIO_COPY, &uffd_copy)) { + int e = errno; error_report("uffd_copy_page() failed: dst_addr=%p src_addr=%p length=%" PRIu64 " mode=%" PRIx64 " errno=%i", dst_addr, src_addr, - length, (uint64_t) uffd_copy.mode, errno); - return -1; + length, (uint64_t) uffd_copy.mode, e); + return -e; } return 0; @@ -273,7 +274,7 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr, * * Fill range pages with zeroes to resolve missing page fault within the range. * - * Returns 0 on success, negative value in case of an error + * Returns 0 on success, -errno in case of an error * * @uffd_fd: UFFD file descriptor * @addr: base address @@ -289,10 +290,11 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake) uffd_zeropage.mode = dont_wake ? UFFDIO_ZEROPAGE_MODE_DONTWAKE : 0; if (ioctl(uffd_fd, UFFDIO_ZEROPAGE, &uffd_zeropage)) { + int e = errno; error_report("uffd_zero_page() failed: addr=%p length=%" PRIu64 " mode=%" PRIx64 " errno=%i", addr, length, - (uint64_t) uffd_zeropage.mode, errno); - return -1; + (uint64_t) uffd_zeropage.mode, e); + return -e; } return 0; @@ -306,7 +308,7 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake) * via UFFD-IO IOCTLs with MODE_DONTWAKE flag set, then after that all waits * for the whole memory range are satisfied in a single call to uffd_wakeup(). * - * Returns 0 on success, negative value in case of an error + * Returns 0 on success, -errno in case of an error * * @uffd_fd: UFFD file descriptor * @addr: base address @@ -320,9 +322,10 @@ int uffd_wakeup(int uffd_fd, void *addr, uint64_t length) uffd_range.len = length; if (ioctl(uffd_fd, UFFDIO_WAKE, &uffd_range)) { + int e = errno; error_report("uffd_wakeup() failed: addr=%p length=%" PRIu64 " errno=%i", - addr, length, errno); - return -1; + addr, length, e); + return -e; } return 0; @@ -355,31 +358,3 @@ int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count) return (int) (res / sizeof(struct uffd_msg)); } - -/** - * uffd_poll_events: poll UFFD file descriptor for read - * - * Returns true if events are available for read, false otherwise - * - * @uffd_fd: UFFD file descriptor - * @tmo: timeout value - */ -bool uffd_poll_events(int uffd_fd, int tmo) -{ - int res; - struct pollfd poll_fd = { .fd = uffd_fd, .events = POLLIN, .revents = 0 }; - - do { - res = poll(&poll_fd, 1, tmo); - } while (res < 0 && errno == EINTR); - - if (res == 0) { - return false; - } - if (res < 0) { - error_report("uffd_poll_events() failed: errno=%i", errno); - return false; - } - - return (poll_fd.revents & POLLIN) != 0; -} diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index f8bab46c68f..fdff042ab46 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -16,7 +16,7 @@ #include "qapi/error.h" #include "exec/ramlist.h" #include "exec/cpu-common.h" -#include "exec/memory.h" +#include "system/memory.h" #include "trace.h" #include "qemu/error-report.h" #include "standard-headers/linux/pci_regs.h"